summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
commitc69cb8cc094cc916adbc516b09e944cd3d137c01 (patch)
treef2878ec41fb6d0e3613906c6722fc02b934eeb80
parentInitial commit. (diff)
downloadnetdata-c69cb8cc094cc916adbc516b09e944cd3d137c01.tar.xz
netdata-c69cb8cc094cc916adbc516b09e944cd3d137c01.zip
Adding upstream version 1.29.3.upstream/1.29.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--.clang-format118
-rw-r--r--.codacy.yml16
-rw-r--r--.codeclimate.yml99
-rw-r--r--.csslintrc2
l---------.dockerignore1
-rw-r--r--.eslintignore1
-rw-r--r--.eslintrc213
-rw-r--r--.gitattributes2
-rw-r--r--.github/CODEOWNERS66
-rw-r--r--.github/ISSUE_TEMPLATE.md15
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md57
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml14
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md28
-rw-r--r--.github/dependabot.yml9
-rw-r--r--.github/dockerfiles/Dockerfile.build_test11
-rw-r--r--.github/dockerfiles/Dockerfile.clang18
-rw-r--r--.github/labeler.yml101
-rwxr-xr-x.github/scripts/build-artifacts.sh81
-rwxr-xr-x.github/scripts/build-static-x86_64.sh58
-rwxr-xr-x.github/scripts/bump-packaging-version.sh6
-rw-r--r--.github/scripts/functions.sh69
-rwxr-xr-x.github/scripts/run_install_with_dist_file.sh38
-rw-r--r--.github/stale.yml18
-rw-r--r--.github/workflows/build-and-install.yml201
-rw-r--r--.github/workflows/checks.yml106
-rw-r--r--.github/workflows/coverity.yml34
-rw-r--r--.github/workflows/docker.yml59
-rw-r--r--.github/workflows/docs.yml25
-rw-r--r--.github/workflows/labeler.yml16
-rw-r--r--.github/workflows/review.yml94
-rw-r--r--.github/workflows/tests.yml88
-rw-r--r--.gitignore210
-rw-r--r--.lgtm.yml24
-rw-r--r--.mlc_config.json20
-rw-r--r--.remarkignore1
-rw-r--r--.remarkrc.js111
-rw-r--r--.squash.yml7
-rw-r--r--.travis.yml460
-rw-r--r--.travis/README.md149
-rwxr-xr-x.travis/check_changelog_last_modification.sh38
-rwxr-xr-x.travis/create_artifacts.sh67
-rwxr-xr-x.travis/create_changelog.sh46
-rwxr-xr-x.travis/draft_release.sh65
-rw-r--r--.travis/gcs-credentials.json.encbin0 -> 2320 bytes
-rwxr-xr-x.travis/generate_changelog_and_tag_release.sh64
-rwxr-xr-x.travis/generate_changelog_for_nightlies.sh50
-rwxr-xr-x.travis/nightlies.sh51
-rw-r--r--.travis/package_management/build.sh32
-rwxr-xr-x.travis/package_management/build_judy.sh36
-rwxr-xr-x.travis/package_management/build_libuv.sh36
-rwxr-xr-x.travis/package_management/build_package_in_container.sh82
-rwxr-xr-x.travis/package_management/common.py182
-rwxr-xr-x.travis/package_management/configure_deb_lxc_environment.py90
-rwxr-xr-x.travis/package_management/configure_rpm_lxc_environment.py102
-rwxr-xr-x.travis/package_management/create_lxc_for_build.sh101
-rw-r--r--.travis/package_management/functions.sh33
-rwxr-xr-x.travis/package_management/old_package_purging.sh93
-rwxr-xr-x.travis/package_management/package_cloud_wrapper.sh48
-rwxr-xr-x.travis/package_management/prepare_packages.sh63
-rwxr-xr-x.travis/package_management/trigger_deb_lxc_build.py86
-rwxr-xr-x.travis/package_management/trigger_rpm_lxc_build.py55
-rwxr-xr-x.travis/package_management/yank_stale_pkg.sh35
-rwxr-xr-x.travis/tagger.sh61
-rwxr-xr-x.travis/trigger_docker_build.sh19
-rwxr-xr-x.travis/trigger_package_generation.sh57
-rw-r--r--.travis/utils.sh29
-rw-r--r--.yamllint.yml34
-rw-r--r--BREAKING_CHANGES.md11
-rw-r--r--BUILD.md365
-rw-r--r--CHANGELOG.md388
-rw-r--r--CMakeLists.txt1364
-rw-r--r--CODE_OF_CONDUCT.md80
-rw-r--r--CONTRIBUTING.md241
-rw-r--r--CONTRIBUTORS.md136
l---------Dockerfile1
-rw-r--r--Dockerfile.test74
-rw-r--r--HISTORICAL_CHANGELOG.md650
-rw-r--r--LICENSE684
-rw-r--r--Makefile.am1027
-rw-r--r--README.md229
-rw-r--r--REDISTRIBUTED.md183
-rw-r--r--SECURITY.md51
-rw-r--r--aclk/README.md151
-rw-r--r--aclk/legacy/Makefile.am19
-rw-r--r--aclk/legacy/aclk_common.c259
-rw-r--r--aclk/legacy/aclk_common.h72
-rw-r--r--aclk/legacy/aclk_lws_https_client.c246
-rw-r--r--aclk/legacy/aclk_lws_https_client.h18
-rw-r--r--aclk/legacy/aclk_lws_wss_client.c621
-rw-r--r--aclk/legacy/aclk_lws_wss_client.h92
-rw-r--r--aclk/legacy/aclk_query.c789
-rw-r--r--aclk/legacy/aclk_query.h40
-rw-r--r--aclk/legacy/aclk_rrdhost_state.h42
-rw-r--r--aclk/legacy/aclk_rx_msgs.c365
-rw-r--r--aclk/legacy/aclk_rx_msgs.h13
-rw-r--r--aclk/legacy/aclk_stats.c298
-rw-r--r--aclk/legacy/aclk_stats.h91
-rw-r--r--aclk/legacy/agent_cloud_link.c1683
-rw-r--r--aclk/legacy/agent_cloud_link.h93
-rw-r--r--aclk/legacy/mqtt.c366
-rw-r--r--aclk/legacy/mqtt.h25
-rw-r--r--aclk/legacy/tests/fake-charts.d.plugin24
-rw-r--r--aclk/legacy/tests/install-fake-charts.d.sh.in6
-rwxr-xr-xaclk/legacy/tests/launch-paho.sh4
-rw-r--r--aclk/legacy/tests/paho-inspection.py59
-rw-r--r--aclk/legacy/tests/paho.Dockerfile14
-rw-r--r--backends/Makefile.am22
-rw-r--r--backends/README.md236
-rw-r--r--backends/TIMESCALE.md57
-rw-r--r--backends/WALKTHROUGH.md258
-rw-r--r--backends/aws_kinesis/Makefile.am12
-rw-r--r--backends/aws_kinesis/README.md53
-rw-r--r--backends/aws_kinesis/aws_kinesis.c94
-rw-r--r--backends/aws_kinesis/aws_kinesis.conf10
-rw-r--r--backends/aws_kinesis/aws_kinesis.h14
-rw-r--r--backends/aws_kinesis/aws_kinesis_put_record.cc87
-rw-r--r--backends/aws_kinesis/aws_kinesis_put_record.h25
-rw-r--r--backends/backends.c1243
-rw-r--r--backends/backends.h97
-rw-r--r--backends/graphite/Makefile.am4
-rw-r--r--backends/graphite/graphite.c90
-rw-r--r--backends/graphite/graphite.h35
-rw-r--r--backends/json/Makefile.am4
-rw-r--r--backends/json/json.c152
-rw-r--r--backends/json/json.h34
-rw-r--r--backends/mongodb/Makefile.am8
-rw-r--r--backends/mongodb/README.md41
-rw-r--r--backends/mongodb/mongodb.c189
-rw-r--r--backends/mongodb/mongodb.conf12
-rw-r--r--backends/mongodb/mongodb.h16
-rwxr-xr-xbackends/nc-backend.sh158
-rw-r--r--backends/opentsdb/Makefile.am4
-rw-r--r--backends/opentsdb/README.md38
-rw-r--r--backends/opentsdb/opentsdb.c205
-rw-r--r--backends/opentsdb/opentsdb.h58
-rw-r--r--backends/prometheus/Makefile.am12
-rw-r--r--backends/prometheus/README.md457
-rw-r--r--backends/prometheus/backend_prometheus.c797
-rw-r--r--backends/prometheus/backend_prometheus.h37
-rw-r--r--backends/prometheus/remote_write/Makefile.am14
-rw-r--r--backends/prometheus/remote_write/README.md41
-rw-r--r--backends/prometheus/remote_write/remote_write.cc120
-rw-r--r--backends/prometheus/remote_write/remote_write.h30
-rw-r--r--backends/prometheus/remote_write/remote_write.proto29
-rwxr-xr-xbuild-artifacts.sh27
-rw-r--r--build/m4/ax_c___atomic.m436
-rw-r--r--build/m4/ax_c__generic.m428
-rw-r--r--build/m4/ax_c_lto.m421
-rw-r--r--build/m4/ax_c_mallinfo.m424
-rw-r--r--build/m4/ax_c_mallopt.m420
-rw-r--r--build/m4/ax_c_statement_expressions.m423
-rw-r--r--build/m4/ax_check_compile_flag.m450
-rw-r--r--build/m4/ax_check_enable_debug.m4122
-rw-r--r--build/m4/ax_gcc_func_attribute.m4223
-rw-r--r--build/m4/ax_pthread.m4308
-rw-r--r--build/m4/jemalloc.m475
-rw-r--r--build/m4/tcmalloc.m445
-rw-r--r--build/subst.inc20
-rw-r--r--build_external/README.md128
-rwxr-xr-xbuild_external/bin/clean-install.sh54
-rwxr-xr-xbuild_external/bin/make-install.sh8
-rw-r--r--build_external/clean-install-arch-debug.Dockerfile62
-rw-r--r--build_external/clean-install-arch-extras.Dockerfile58
-rw-r--r--build_external/clean-install-arch.Dockerfile54
-rw-r--r--build_external/clean-install.Dockerfile39
-rw-r--r--build_external/make-install.Dockerfile11
-rw-r--r--build_external/scenarios/aclk-testing/agent-compose.yml19
-rw-r--r--build_external/scenarios/aclk-testing/agent-valgrind-compose.yml19
-rw-r--r--build_external/scenarios/aclk-testing/agent_netdata.conf7158
-rw-r--r--build_external/scenarios/aclk-testing/configureVerneMQ.Dockerfile8
-rw-r--r--build_external/scenarios/aclk-testing/paho-compose.yml6
-rw-r--r--build_external/scenarios/aclk-testing/paho-inspection.py33
-rw-r--r--build_external/scenarios/aclk-testing/paho.Dockerfile12
-rw-r--r--build_external/scenarios/aclk-testing/vernemq-compose.yml7
-rw-r--r--build_external/scenarios/aclk-testing/vernemq.conf68
-rw-r--r--build_external/scenarios/gaps_hi/child-compose.yml14
-rw-r--r--build_external/scenarios/gaps_hi/child_guid1
-rw-r--r--build_external/scenarios/gaps_hi/child_stream.conf11
-rw-r--r--build_external/scenarios/gaps_hi/middle-compose.yml13
-rw-r--r--build_external/scenarios/gaps_hi/middle_guid1
-rw-r--r--build_external/scenarios/gaps_hi/middle_stream.conf23
-rw-r--r--build_external/scenarios/gaps_hi/min.conf6
-rw-r--r--build_external/scenarios/gaps_hi/parent-compose.yml13
-rw-r--r--build_external/scenarios/gaps_hi/parent_guid1
-rw-r--r--build_external/scenarios/gaps_hi/parent_stream.conf11
-rw-r--r--build_external/scenarios/gaps_lo/child-compose.yml15
-rw-r--r--build_external/scenarios/gaps_lo/child_guid1
-rw-r--r--build_external/scenarios/gaps_lo/child_stream.conf11
-rw-r--r--build_external/scenarios/gaps_lo/middle-compose.yml14
-rw-r--r--build_external/scenarios/gaps_lo/middle_guid1
-rw-r--r--build_external/scenarios/gaps_lo/middle_stream.conf20
-rw-r--r--build_external/scenarios/gaps_lo/mostly_off.conf965
-rw-r--r--build_external/scenarios/gaps_lo/parent-compose.yml13
-rw-r--r--build_external/scenarios/gaps_lo/parent_guid1
-rw-r--r--build_external/scenarios/gaps_lo/parent_stream.conf12
-rw-r--r--build_external/scenarios/only-agent/docker-compose.yml8
-rw-r--r--build_external/scenarios/parent-child/child_stream.conf10
-rw-r--r--build_external/scenarios/parent-child/docker-compose.yml23
-rw-r--r--build_external/scenarios/parent-child/parent_stream.conf7
-rw-r--r--claim/Makefile.am21
-rw-r--r--claim/README.md403
-rw-r--r--claim/claim.c194
-rw-r--r--claim/claim.h16
-rwxr-xr-xclaim/netdata-claim.sh.in401
-rw-r--r--cli/Makefile.am8
-rw-r--r--cli/README.md34
-rw-r--r--cli/cli.c201
-rw-r--r--cli/cli.h8
-rw-r--r--collectors/COLLECTORS.md538
-rw-r--r--collectors/Makefile.am41
-rw-r--r--collectors/QUICKSTART.md125
-rw-r--r--collectors/README.md52
-rw-r--r--collectors/REFERENCE.md186
-rw-r--r--collectors/all.h344
-rw-r--r--collectors/apps.plugin/Makefile.am12
-rw-r--r--collectors/apps.plugin/README.md399
-rw-r--r--collectors/apps.plugin/apps_groups.conf316
-rw-r--r--collectors/apps.plugin/apps_plugin.c4226
-rw-r--r--collectors/cgroups.plugin/Makefile.am21
-rw-r--r--collectors/cgroups.plugin/README.md233
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in470
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh297
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c714
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c4051
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h33
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.c110
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.h16
-rw-r--r--collectors/cgroups.plugin/tests/test_doubles.c162
-rw-r--r--collectors/charts.d.plugin/Makefile.am50
-rw-r--r--collectors/charts.d.plugin/README.md198
-rw-r--r--collectors/charts.d.plugin/ap/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/ap/README.md99
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh179
-rw-r--r--collectors/charts.d.plugin/ap/ap.conf23
-rw-r--r--collectors/charts.d.plugin/apcupsd/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md21
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh213
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.conf25
-rw-r--r--collectors/charts.d.plugin/charts.d.conf47
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh72
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in732
-rw-r--r--collectors/charts.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/example/README.md10
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh123
-rw-r--r--collectors/charts.d.plugin/example/example.conf21
-rw-r--r--collectors/charts.d.plugin/libreswan/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md56
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh187
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.conf29
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc227
-rw-r--r--collectors/charts.d.plugin/nut/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nut/README.md74
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh232
-rw-r--r--collectors/charts.d.plugin/nut/nut.conf33
-rw-r--r--collectors/charts.d.plugin/opensips/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/opensips/README.md19
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh324
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.conf21
-rw-r--r--collectors/charts.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/sensors/README.md67
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh250
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.conf32
-rw-r--r--collectors/checks.plugin/Makefile.am8
-rw-r--r--collectors/checks.plugin/README.md10
-rw-r--r--collectors/checks.plugin/plugin_checks.c129
-rw-r--r--collectors/checks.plugin/plugin_checks.h29
-rw-r--r--collectors/cups.plugin/Makefile.am8
-rw-r--r--collectors/cups.plugin/README.md62
-rw-r--r--collectors/cups.plugin/cups_plugin.c479
-rw-r--r--collectors/diskspace.plugin/Makefile.am8
-rw-r--r--collectors/diskspace.plugin/README.md43
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c469
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.h34
-rw-r--r--collectors/ebpf.plugin/Makefile.am25
-rw-r--r--collectors/ebpf.plugin/README.md400
-rw-r--r--collectors/ebpf.plugin/ebpf.c1977
-rw-r--r--collectors/ebpf.plugin/ebpf.conf45
-rw-r--r--collectors/ebpf.plugin/ebpf.h207
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c1082
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h430
-rw-r--r--collectors/ebpf.plugin/ebpf_kernel_reject_list.txt1
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c1057
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h139
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c1920
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h275
-rw-r--r--collectors/ebpf.plugin/reset_netdata_trace.sh.in9
-rw-r--r--collectors/fping.plugin/Makefile.am24
-rw-r--r--collectors/fping.plugin/README.md102
-rw-r--r--collectors/fping.plugin/fping.conf44
-rwxr-xr-xcollectors/fping.plugin/fping.plugin.in200
-rw-r--r--collectors/freebsd.plugin/Makefile.am8
-rw-r--r--collectors/freebsd.plugin/README.md12
-rw-r--r--collectors/freebsd.plugin/freebsd_devstat.c789
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c627
-rw-r--r--collectors/freebsd.plugin/freebsd_getmntinfo.c305
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c372
-rw-r--r--collectors/freebsd.plugin/freebsd_kstat_zfs.c304
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c3240
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.c175
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.h74
-rw-r--r--collectors/freeipmi.plugin/Makefile.am8
-rw-r--r--collectors/freeipmi.plugin/README.md187
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c1871
-rw-r--r--collectors/idlejitter.plugin/Makefile.am8
-rw-r--r--collectors/idlejitter.plugin/README.md20
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.c92
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.h21
-rw-r--r--collectors/ioping.plugin/Makefile.am24
-rw-r--r--collectors/ioping.plugin/README.md86
-rw-r--r--collectors/ioping.plugin/ioping.conf40
-rwxr-xr-xcollectors/ioping.plugin/ioping.plugin.in212
-rw-r--r--collectors/macos.plugin/Makefile.am8
-rw-r--r--collectors/macos.plugin/README.md12
-rw-r--r--collectors/macos.plugin/macos_fw.c687
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c241
-rw-r--r--collectors/macos.plugin/macos_sysctl.c1526
-rw-r--r--collectors/macos.plugin/plugin_macos.c69
-rw-r--r--collectors/macos.plugin/plugin_macos.h43
-rw-r--r--collectors/nfacct.plugin/Makefile.am8
-rw-r--r--collectors/nfacct.plugin/README.md54
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c944
-rw-r--r--collectors/node.d.plugin/Makefile.am61
-rw-r--r--collectors/node.d.plugin/README.md236
-rw-r--r--collectors/node.d.plugin/fronius/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/fronius/README.md135
-rw-r--r--collectors/node.d.plugin/fronius/fronius.node.js400
-rw-r--r--collectors/node.d.plugin/named/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/named/README.md348
-rw-r--r--collectors/node.d.plugin/named/named.node.js610
-rw-r--r--collectors/node.d.plugin/node.d.conf39
-rwxr-xr-xcollectors/node.d.plugin/node.d.plugin.in303
-rw-r--r--collectors/node.d.plugin/node_modules/asn1-ber.js7
-rw-r--r--collectors/node.d.plugin/node_modules/extend.js88
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/errors.js10
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/index.js18
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/reader.js270
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/types.js35
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/writer.js318
-rw-r--r--collectors/node.d.plugin/node_modules/net-snmp.js3452
-rw-r--r--collectors/node.d.plugin/node_modules/netdata.js654
-rw-r--r--collectors/node.d.plugin/node_modules/pixl-xml.js607
-rw-r--r--collectors/node.d.plugin/sma_webbox/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md33
-rw-r--r--collectors/node.d.plugin/sma_webbox/sma_webbox.node.js239
-rw-r--r--collectors/node.d.plugin/snmp/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/snmp/README.md432
-rw-r--r--collectors/node.d.plugin/snmp/snmp.node.js527
-rw-r--r--collectors/node.d.plugin/stiebeleltron/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md525
-rw-r--r--collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js197
-rw-r--r--collectors/perf.plugin/Makefile.am8
-rw-r--r--collectors/perf.plugin/README.md80
-rw-r--r--collectors/perf.plugin/perf_plugin.c1348
-rw-r--r--collectors/plugins.d/Makefile.am11
-rw-r--r--collectors/plugins.d/README.md484
-rw-r--r--collectors/plugins.d/plugins_d.c403
-rw-r--r--collectors/plugins.d/plugins_d.h84
-rw-r--r--collectors/plugins.d/pluginsd_parser.c738
-rw-r--r--collectors/plugins.d/pluginsd_parser.h40
-rw-r--r--collectors/proc.plugin/Makefile.am8
-rw-r--r--collectors/proc.plugin/README.md571
-rw-r--r--collectors/proc.plugin/ipc.c576
-rw-r--r--collectors/proc.plugin/plugin_proc.c230
-rw-r--r--collectors/proc.plugin/plugin_proc.h83
-rw-r--r--collectors/proc.plugin/proc_diskstats.c1700
-rw-r--r--collectors/proc.plugin/proc_interrupts.c248
-rw-r--r--collectors/proc.plugin/proc_loadavg.c131
-rw-r--r--collectors/proc.plugin/proc_mdstat.c647
-rw-r--r--collectors/proc.plugin/proc_meminfo.c528
-rw-r--r--collectors/proc.plugin/proc_net_dev.c1127
-rw-r--r--collectors/proc.plugin/proc_net_ip_vs_stats.c133
-rw-r--r--collectors/proc.plugin/proc_net_netstat.c869
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfs.c454
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfsd.c1006
-rw-r--r--collectors/proc.plugin/proc_net_sctp_snmp.c373
-rw-r--r--collectors/proc.plugin/proc_net_snmp.c1130
-rw-r--r--collectors/proc.plugin/proc_net_snmp6.c1293
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c538
-rw-r--r--collectors/proc.plugin/proc_net_sockstat6.c283
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c151
-rw-r--r--collectors/proc.plugin/proc_net_stat_conntrack.c351
-rw-r--r--collectors/proc.plugin/proc_net_stat_synproxy.c189
-rw-r--r--collectors/proc.plugin/proc_net_wireless.c453
-rw-r--r--collectors/proc.plugin/proc_pagetypeinfo.c333
-rw-r--r--collectors/proc.plugin/proc_pressure.c178
-rw-r--r--collectors/proc.plugin/proc_pressure.h31
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.c403
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.h57
-rw-r--r--collectors/proc.plugin/proc_softirqs.c242
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c196
-rw-r--r--collectors/proc.plugin/proc_stat.c1069
-rw-r--r--collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c49
-rw-r--r--collectors/proc.plugin/proc_uptime.c46
-rw-r--r--collectors/proc.plugin/proc_vmstat.c263
-rw-r--r--collectors/proc.plugin/sys_block_zram.c292
-rw-r--r--collectors/proc.plugin/sys_class_infiniband.c704
-rw-r--r--collectors/proc.plugin/sys_class_power_supply.c410
-rw-r--r--collectors/proc.plugin/sys_devices_system_edac_mc.c208
-rw-r--r--collectors/proc.plugin/sys_devices_system_node.c164
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c734
-rw-r--r--collectors/proc.plugin/sys_kernel_mm_ksm.c201
-rw-r--r--collectors/proc.plugin/zfs_common.c772
-rw-r--r--collectors/proc.plugin/zfs_common.h115
-rw-r--r--collectors/python.d.plugin/Makefile.am253
-rw-r--r--collectors/python.d.plugin/README.md268
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md80
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py245
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf53
-rw-r--r--collectors/python.d.plugin/alarms/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/alarms/README.md58
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py71
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf50
-rw-r--r--collectors/python.d.plugin/am2320/Makefile.inc8
-rw-r--r--collectors/python.d.plugin/am2320/README.md54
-rw-r--r--collectors/python.d.plugin/am2320/am2320.chart.py68
-rw-r--r--collectors/python.d.plugin/am2320/am2320.conf68
-rw-r--r--collectors/python.d.plugin/anomalies/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/anomalies/README.md231
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.chart.py349
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.conf181
-rw-r--r--collectors/python.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/apache/README.md82
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py159
-rw-r--r--collectors/python.d.plugin/apache/apache.conf85
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md133
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py252
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf78
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md79
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py252
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf110
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/boinc/README.md41
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py168
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf66
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ceph/README.md48
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py374
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf75
-rw-r--r--collectors/python.d.plugin/chrony/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/chrony/README.md61
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py118
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf77
-rw-r--r--collectors/python.d.plugin/couchdb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/couchdb/README.md53
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py398
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf89
-rw-r--r--collectors/python.d.plugin/dns_query_time/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md29
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py149
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf69
-rw-r--r--collectors/python.d.plugin/dnsdist/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md72
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py131
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf83
-rw-r--r--collectors/python.d.plugin/dockerd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dockerd/README.md46
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py86
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf77
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dovecot/README.md105
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py143
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf98
-rw-r--r--collectors/python.d.plugin/elasticsearch/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md94
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py774
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf83
-rw-r--r--collectors/python.d.plugin/energid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/energid/README.md77
-rw-r--r--collectors/python.d.plugin/energid/energid.chart.py163
-rw-r--r--collectors/python.d.plugin/energid/energid.conf90
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/example/README.md11
-rw-r--r--collectors/python.d.plugin/example/example.chart.py48
-rw-r--r--collectors/python.d.plugin/example/example.conf68
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/exim/README.md41
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py39
-rw-r--r--collectors/python.d.plugin/exim/exim.conf91
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md41
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py204
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf68
-rw-r--r--collectors/python.d.plugin/freeradius/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/freeradius/README.md90
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py177
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf80
-rw-r--r--collectors/python.d.plugin/gearman/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/gearman/README.md51
-rw-r--r--collectors/python.d.plugin/gearman/gearman.chart.py243
-rw-r--r--collectors/python.d.plugin/gearman/gearman.conf72
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md319
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py253
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf108
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md67
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py360
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf83
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md38
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py99
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf95
-rw-r--r--collectors/python.d.plugin/hpssa/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hpssa/README.md81
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.chart.py395
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.conf61
-rw-r--r--collectors/python.d.plugin/httpcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md59
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py125
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf107
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/icecast/README.md44
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py94
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf81
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ipfs/README.md51
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py149
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf82
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md57
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py269
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf80
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/litespeed/README.md72
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py188
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf72
-rw-r--r--collectors/python.d.plugin/logind/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/logind/README.md86
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py85
-rw-r--r--collectors/python.d.plugin/logind/logind.conf60
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/megacli/README.md85
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py278
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf60
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/memcached/README.md99
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py197
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf90
-rw-r--r--collectors/python.d.plugin/mongodb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mongodb/README.md210
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py786
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf102
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/monit/README.md52
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py360
-rw-r--r--collectors/python.d.plugin/monit/monit.conf86
-rw-r--r--collectors/python.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mysql/README.md396
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py976
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf293
-rw-r--r--collectors/python.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx/README.md65
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py71
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf107
-rw-r--r--collectors/python.d.plugin/nginx_plus/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md165
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py487
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf85
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nsd/README.md68
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py105
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf91
-rw-r--r--collectors/python.d.plugin/ntpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ntpd/README.md90
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py385
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf89
-rw-r--r--collectors/python.d.plugin/nvidia_smi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md58
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py550
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf68
-rw-r--r--collectors/python.d.plugin/openldap/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/openldap/README.md79
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py216
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf75
-rw-r--r--collectors/python.d.plugin/oracledb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/oracledb/README.md97
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py831
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.conf84
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md50
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py136
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf97
-rw-r--r--collectors/python.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md51
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py174
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf88
-rw-r--r--collectors/python.d.plugin/portcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/portcheck/README.md52
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py157
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf74
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postfix/README.md27
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py52
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf72
-rw-r--r--collectors/python.d.plugin/postgres/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postgres/README.md99
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py1235
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf141
-rw-r--r--collectors/python.d.plugin/powerdns/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/powerdns/README.md104
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py153
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf76
-rw-r--r--collectors/python.d.plugin/proxysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/proxysql/README.md106
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py352
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf116
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/puppet/README.md67
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf94
-rw-r--r--collectors/python.d.plugin/python.d.conf109
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in852
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py91
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py82
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py163
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py256
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py336
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py207
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py414
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py117
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py46
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py206
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/filelock.py451
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py327
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py201
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py110
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py315
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py582
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
-rw-r--r--collectors/python.d.plugin/rabbitmq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md116
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py443
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf86
-rw-r--r--collectors/python.d.plugin/redis/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/redis/README.md64
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py268
-rw-r--r--collectors/python.d.plugin/redis/redis.conf110
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md53
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py247
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf76
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/retroshare/README.md47
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py78
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf72
-rw-r--r--collectors/python.d.plugin/riakkv/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/riakkv/README.md128
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.chart.py334
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.conf68
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/samba/README.md121
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py144
-rw-r--r--collectors/python.d.plugin/samba/samba.conf60
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/sensors/README.md31
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py163
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf61
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md125
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py752
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf67
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md38
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py167
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf66
-rw-r--r--collectors/python.d.plugin/springboot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/springboot/README.md145
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py160
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf118
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/squid/README.md58
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py123
-rw-r--r--collectors/python.d.plugin/squid/squid.conf167
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tomcat/README.md53
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py199
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf89
-rw-r--r--collectors/python.d.plugin/tor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tor/README.md64
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py107
-rw-r--r--collectors/python.d.plugin/tor/tor.conf77
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md74
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py198
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf77
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md52
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py177
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf92
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/varnish/README.md65
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py385
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf66
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md28
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py97
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf72
-rw-r--r--collectors/python.d.plugin/web_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/web_log/README.md219
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py1194
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf219
-rw-r--r--collectors/slabinfo.plugin/Makefile.am14
-rw-r--r--collectors/slabinfo.plugin/README.md29
-rw-r--r--collectors/slabinfo.plugin/slabinfo.c418
-rw-r--r--collectors/statsd.plugin/Makefile.am21
-rw-r--r--collectors/statsd.plugin/README.md605
-rw-r--r--collectors/statsd.plugin/example.conf64
-rw-r--r--collectors/statsd.plugin/statsd.c2560
-rw-r--r--collectors/statsd.plugin/statsd.h25
-rw-r--r--collectors/tc.plugin/Makefile.am20
-rw-r--r--collectors/tc.plugin/README.md205
-rw-r--r--collectors/tc.plugin/plugin_tc.c1163
-rw-r--r--collectors/tc.plugin/plugin_tc.h31
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in297
-rw-r--r--collectors/xenstat.plugin/Makefile.am8
-rw-r--r--collectors/xenstat.plugin/README.md53
-rw-r--r--collectors/xenstat.plugin/xenstat_plugin.c1106
-rw-r--r--configs.signatures764
-rw-r--r--configure.ac1625
-rw-r--r--contrib/README.md61
-rw-r--r--contrib/debian/changelog6
-rw-r--r--contrib/debian/compat1
-rw-r--r--contrib/debian/control62
-rw-r--r--contrib/debian/copyright10
-rwxr-xr-xcontrib/debian/install_go.sh98
-rw-r--r--contrib/debian/netdata.default7
-rw-r--r--contrib/debian/netdata.docs1
-rwxr-xr-xcontrib/debian/netdata.init56
-rw-r--r--contrib/debian/netdata.install1
-rw-r--r--contrib/debian/netdata.lintian-overrides16
-rw-r--r--contrib/debian/netdata.postinst74
-rw-r--r--contrib/debian/netdata.postrm53
-rw-r--r--contrib/debian/netdata.preinst18
-rwxr-xr-xcontrib/debian/rules127
-rw-r--r--contrib/debian/source/format1
-rwxr-xr-xcontrib/rhel/build-netdata-rpm.sh56
-rw-r--r--contrib/sles11/README.md17
-rwxr-xr-xcontrib/sles11/alarm-notify-basic.bash3.sh755
-rw-r--r--contrib/sles11/netdata-alarms-bash3.patch10
-rw-r--r--contrib/sles11/netdata-automake-no-dist-xz.patch13
-rw-r--r--contrib/sles11/netdata-python-plugin-sles11.patch28
-rwxr-xr-xcontrib/sles11/netdata.init65
-rwxr-xr-xcoverity-scan.sh204
-rwxr-xr-xcppcheck.sh35
-rw-r--r--daemon/Makefile.am23
-rw-r--r--daemon/README.md554
-rwxr-xr-xdaemon/anonymous-statistics.sh.in104
-rw-r--r--daemon/buildinfo.c229
-rw-r--r--daemon/buildinfo.h8
-rw-r--r--daemon/commands.c726
-rw-r--r--daemon/commands.h81
-rw-r--r--daemon/common.c19
-rw-r--r--daemon/common.h101
-rw-r--r--daemon/config/README.md229
-rw-r--r--daemon/daemon.c501
-rw-r--r--daemon/daemon.h18
-rw-r--r--daemon/get-kubernetes-labels.sh.in41
-rw-r--r--daemon/global_statistics.c950
-rw-r--r--daemon/global_statistics.h23
-rw-r--r--daemon/main.c1532
-rw-r--r--daemon/main.h48
-rw-r--r--daemon/signals.c285
-rw-r--r--daemon/signals.h13
-rwxr-xr-xdaemon/system-info.sh406
-rw-r--r--daemon/unit_test.c2235
-rw-r--r--daemon/unit_test.h19
-rw-r--r--database/Makefile.am12
-rw-r--r--database/README.md215
-rw-r--r--database/engine/Makefile.am12
-rw-r--r--database/engine/README.md259
-rw-r--r--database/engine/datafile.c460
-rw-r--r--database/engine/datafile.h67
-rw-r--r--database/engine/journalfile.c515
-rw-r--r--database/engine/journalfile.h49
-rw-r--r--database/engine/metadata_log/Makefile.am8
-rw-r--r--database/engine/metadata_log/README.md0
-rw-r--r--database/engine/metadata_log/compaction.c86
-rw-r--r--database/engine/metadata_log/compaction.h14
-rw-r--r--database/engine/metadata_log/logfile.c453
-rw-r--r--database/engine/metadata_log/logfile.h39
-rw-r--r--database/engine/metadata_log/metadatalog.h28
-rwxr-xr-xdatabase/engine/metadata_log/metadatalogapi.c39
-rw-r--r--database/engine/metadata_log/metadatalogapi.h12
-rw-r--r--database/engine/metadata_log/metadatalogprotocol.h53
-rwxr-xr-xdatabase/engine/metadata_log/metalogpluginsd.c140
-rw-r--r--database/engine/metadata_log/metalogpluginsd.h33
-rw-r--r--database/engine/pagecache.c1178
-rw-r--r--database/engine/pagecache.h231
-rw-r--r--database/engine/rrddiskprotocol.h119
-rw-r--r--database/engine/rrdengine.c1266
-rw-r--r--database/engine/rrdengine.h233
-rwxr-xr-xdatabase/engine/rrdengineapi.c999
-rw-r--r--database/engine/rrdengineapi.h63
-rw-r--r--database/engine/rrdenginelib.c294
-rw-r--r--database/engine/rrdenginelib.h144
-rw-r--r--database/engine/rrdenglocking.c241
-rw-r--r--database/engine/rrdenglocking.h17
-rw-r--r--database/rrd.c160
-rw-r--r--database/rrd.h1318
-rw-r--r--database/rrdcalc.c767
-rw-r--r--database/rrdcalc.h182
-rw-r--r--database/rrdcalctemplate.c149
-rw-r--r--database/rrdcalctemplate.h89
-rw-r--r--database/rrddim.c631
-rw-r--r--database/rrddimvar.c217
-rw-r--r--database/rrddimvar.h56
-rw-r--r--database/rrdfamily.c61
-rw-r--r--database/rrdhost.c1606
-rw-r--r--database/rrdlabels.c203
-rw-r--r--database/rrdset.c1973
-rw-r--r--database/rrdsetvar.c189
-rw-r--r--database/rrdsetvar.h44
-rw-r--r--database/rrdvar.c305
-rw-r--r--database/rrdvar.h66
-rw-r--r--database/sqlite/sqlite3.c230536
-rw-r--r--database/sqlite/sqlite3.h12174
-rw-r--r--database/sqlite/sqlite_functions.c1022
-rw-r--r--database/sqlite/sqlite_functions.h62
-rw-r--r--diagrams/Makefile.am24
-rwxr-xr-xdiagrams/build.sh21
-rw-r--r--diagrams/config.puml46
-rw-r--r--diagrams/data_structures/README.md18
-rw-r--r--diagrams/data_structures/netdata_config.svg2
-rw-r--r--diagrams/data_structures/registry.svg2
-rw-r--r--diagrams/data_structures/rrd.svg2
-rw-r--r--diagrams/data_structures/src/netdata_config.xml1
-rw-r--r--diagrams/data_structures/src/registry.xml1
-rw-r--r--diagrams/data_structures/src/rrd.xml1
-rw-r--r--diagrams/data_structures/src/web.xml1
-rw-r--r--diagrams/data_structures/web.svg2
-rw-r--r--diagrams/netdata-for-ephemeral-nodes.xml1
-rw-r--r--diagrams/netdata-overview.xml1
-rw-r--r--diagrams/netdata-proxies-example.xml1
-rw-r--r--diagrams/registry.puml40
-rw-r--r--docs/Add-more-charts-to-netdata.md16
-rw-r--r--docs/Demo-Sites.md25
-rw-r--r--docs/Donations-netdata-has-received.md29
-rw-r--r--docs/README.md17
-rw-r--r--docs/Running-behind-apache.md373
-rw-r--r--docs/Running-behind-caddy.md34
-rw-r--r--docs/Running-behind-haproxy.md293
-rw-r--r--docs/Running-behind-lighttpd.md71
-rw-r--r--docs/Running-behind-nginx.md264
-rw-r--r--docs/a-github-star-is-important.md24
-rw-r--r--docs/agent-cloud.md79
-rw-r--r--docs/anonymous-statistics.md109
-rw-r--r--docs/collect/application-metrics.md80
-rw-r--r--docs/collect/container-metrics.md99
-rw-r--r--docs/collect/enable-configure.md66
-rw-r--r--docs/collect/how-collectors-work.md80
-rw-r--r--docs/collect/system-metrics.md65
-rw-r--r--docs/configure/common-changes.md214
-rw-r--r--docs/configure/nodes.md165
-rw-r--r--docs/configure/secure-nodes.md123
-rw-r--r--docs/configure/start-stop-restart.md98
-rw-r--r--docs/contributing/contributing-documentation.md109
-rw-r--r--docs/contributing/style-guide.md492
-rw-r--r--docs/export/enable-connector.md93
-rw-r--r--docs/export/external-databases.md90
-rw-r--r--docs/get/README.md158
-rw-r--r--docs/getting-started.md240
-rw-r--r--docs/guides/collect-apache-nginx-web-logs.md161
-rw-r--r--docs/guides/collect-unbound-metrics.md138
-rw-r--r--docs/guides/configure/performance.md235
-rw-r--r--docs/guides/deploy/ansible.md174
-rw-r--r--docs/guides/export/export-netdata-metrics-graphite.md184
-rw-r--r--docs/guides/longer-metrics-storage.md160
-rw-r--r--docs/guides/monitor-cockroachdb.md136
-rw-r--r--docs/guides/monitor-hadoop-cluster.md204
-rw-r--r--docs/guides/monitor/anomaly-detection.md191
-rw-r--r--docs/guides/monitor/dimension-templates.md176
-rw-r--r--docs/guides/monitor/kubernetes-k8s-netdata.md278
-rw-r--r--docs/guides/monitor/pi-hole-raspberry-pi.md163
-rw-r--r--docs/guides/monitor/process.md299
-rw-r--r--docs/guides/monitor/stop-notifications-alarms.md92
-rw-r--r--docs/guides/monitor/visualize-monitor-anomalies.md147
-rw-r--r--docs/guides/step-by-step/step-00.md115
-rw-r--r--docs/guides/step-by-step/step-01.md156
-rw-r--r--docs/guides/step-by-step/step-02.md208
-rw-r--r--docs/guides/step-by-step/step-03.md91
-rw-r--r--docs/guides/step-by-step/step-04.md144
-rw-r--r--docs/guides/step-by-step/step-05.md343
-rw-r--r--docs/guides/step-by-step/step-06.md122
-rw-r--r--docs/guides/step-by-step/step-07.md114
-rw-r--r--docs/guides/step-by-step/step-08.md395
-rw-r--r--docs/guides/step-by-step/step-09.md164
-rw-r--r--docs/guides/step-by-step/step-10.md230
-rw-r--r--docs/guides/step-by-step/step-99.md51
-rw-r--r--docs/guides/troubleshoot/monitor-debug-applications-ebpf.md268
-rw-r--r--docs/guides/using-host-labels.md212
-rw-r--r--docs/monitor/configure-alarms.md148
-rw-r--r--docs/monitor/enable-notifications.md144
-rw-r--r--docs/monitor/view-active-alarms.md75
-rw-r--r--docs/netdata-for-IoT.md74
-rw-r--r--docs/netdata-security.md229
-rw-r--r--docs/overview/netdata-monitoring-stack.md62
-rw-r--r--docs/overview/what-is-netdata.md76
-rw-r--r--docs/overview/why-netdata.md63
-rw-r--r--docs/privacy-policy.md134
-rw-r--r--docs/quickstart/infrastructure.md184
-rw-r--r--docs/quickstart/single-node.md96
-rw-r--r--docs/store/change-metrics-storage.md72
-rw-r--r--docs/store/distributed-data-architecture.md71
-rw-r--r--docs/terms-of-use.md166
-rw-r--r--docs/visualize/create-dashboards.md64
-rw-r--r--docs/visualize/interact-dashboards-charts.md127
-rw-r--r--docs/visualize/overview-infrastructure.md109
-rw-r--r--docs/why-netdata/1s-granularity.md59
-rw-r--r--docs/why-netdata/README.md35
-rw-r--r--docs/why-netdata/immediate-results.md46
-rw-r--r--docs/why-netdata/meaningful-presentation.md68
-rw-r--r--docs/why-netdata/unlimited-metrics.md49
-rw-r--r--exporting/Makefile.am29
-rw-r--r--exporting/README.md312
-rw-r--r--exporting/TIMESCALE.md69
-rw-r--r--exporting/WALKTHROUGH.md259
-rw-r--r--exporting/aws_kinesis/Makefile.am8
-rw-r--r--exporting/aws_kinesis/README.md58
-rw-r--r--exporting/aws_kinesis/aws_kinesis.c217
-rw-r--r--exporting/aws_kinesis/aws_kinesis.h16
-rw-r--r--exporting/aws_kinesis/aws_kinesis_put_record.cc151
-rw-r--r--exporting/aws_kinesis/aws_kinesis_put_record.h35
-rw-r--r--exporting/check_filters.c78
-rw-r--r--exporting/clean_connectors.c81
-rw-r--r--exporting/exporting.conf89
-rw-r--r--exporting/exporting_engine.c143
-rw-r--r--exporting/exporting_engine.h304
-rw-r--r--exporting/graphite/Makefile.am4
-rw-r--r--exporting/graphite/README.md30
-rw-r--r--exporting/graphite/graphite.c228
-rw-r--r--exporting/graphite/graphite.h18
-rw-r--r--exporting/init_connectors.c145
-rw-r--r--exporting/json/Makefile.am4
-rw-r--r--exporting/json/README.md30
-rw-r--r--exporting/json/json.c362
-rw-r--r--exporting/json/json.h21
-rw-r--r--exporting/mongodb/Makefile.am8
-rw-r--r--exporting/mongodb/README.md38
-rw-r--r--exporting/mongodb/mongodb.c387
-rw-r--r--exporting/mongodb/mongodb.h35
-rwxr-xr-xexporting/nc-exporting.sh158
-rw-r--r--exporting/opentsdb/Makefile.am8
-rw-r--r--exporting/opentsdb/README.md30
-rw-r--r--exporting/opentsdb/opentsdb.c422
-rw-r--r--exporting/opentsdb/opentsdb.h26
-rw-r--r--exporting/process_data.c426
-rw-r--r--exporting/prometheus/Makefile.am12
-rw-r--r--exporting/prometheus/README.md461
-rw-r--r--exporting/prometheus/prometheus.c916
-rw-r--r--exporting/prometheus/prometheus.h41
-rw-r--r--exporting/prometheus/remote_write/Makefile.am14
-rw-r--r--exporting/prometheus/remote_write/README.md51
-rw-r--r--exporting/prometheus/remote_write/remote_write.c345
-rw-r--r--exporting/prometheus/remote_write/remote_write.h25
-rw-r--r--exporting/prometheus/remote_write/remote_write.proto29
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.cc186
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.h33
-rw-r--r--exporting/pubsub/Makefile.am8
-rw-r--r--exporting/pubsub/README.md45
-rw-r--r--exporting/pubsub/pubsub.c194
-rw-r--r--exporting/pubsub/pubsub.h14
-rw-r--r--exporting/pubsub/pubsub_publish.cc258
-rw-r--r--exporting/pubsub/pubsub_publish.h37
-rw-r--r--exporting/read_config.c503
-rw-r--r--exporting/send_data.c440
-rw-r--r--exporting/send_internal_metrics.c172
-rw-r--r--exporting/tests/Makefile.am4
-rw-r--r--exporting/tests/exporting_doubles.c395
-rw-r--r--exporting/tests/exporting_fixtures.c163
-rw-r--r--exporting/tests/netdata_doubles.c247
-rw-r--r--exporting/tests/system_doubles.c61
-rw-r--r--exporting/tests/test_exporting_engine.c1939
-rw-r--r--exporting/tests/test_exporting_engine.h203
-rw-r--r--health/Makefile.am111
-rw-r--r--health/QUICKSTART.md143
-rw-r--r--health/README.md38
-rw-r--r--health/REFERENCE.md797
-rw-r--r--health/health.c1047
-rw-r--r--health/health.d/adaptec_raid.conf24
-rw-r--r--health/health.d/am2320.conf12
-rw-r--r--health/health.d/anomalies.conf17
-rw-r--r--health/health.d/apache.conf14
-rw-r--r--health/health.d/apcupsd.conf40
-rw-r--r--health/health.d/apps_plugin.conf15
-rw-r--r--health/health.d/backend.conf56
-rw-r--r--health/health.d/bcache.conf22
-rw-r--r--health/health.d/beanstalkd.conf36
-rw-r--r--health/health.d/bind_rndc.conf9
-rw-r--r--health/health.d/boinc.conf62
-rw-r--r--health/health.d/btrfs.conf57
-rw-r--r--health/health.d/ceph.conf13
-rw-r--r--health/health.d/cgroups.conf41
-rw-r--r--health/health.d/cockroachdb.conf91
-rw-r--r--health/health.d/couchdb.conf13
-rw-r--r--health/health.d/cpu.conf55
-rw-r--r--health/health.d/dbengine.conf50
-rw-r--r--health/health.d/disks.conf167
-rw-r--r--health/health.d/dns_query.conf12
-rw-r--r--health/health.d/dnsmasq_dhcp.conf12
-rw-r--r--health/health.d/dockerd.conf8
-rw-r--r--health/health.d/elasticsearch.conf12
-rw-r--r--health/health.d/entropy.conf16
-rw-r--r--health/health.d/exporting.conf34
-rw-r--r--health/health.d/fping.conf53
-rw-r--r--health/health.d/fronius.conf11
-rw-r--r--health/health.d/gearman.conf22
-rw-r--r--health/health.d/haproxy.conf27
-rw-r--r--health/health.d/hdfs.conf75
-rw-r--r--health/health.d/httpcheck.conf99
-rw-r--r--health/health.d/ioping.conf13
-rw-r--r--health/health.d/ipc.conf28
-rw-r--r--health/health.d/ipfs.conf11
-rw-r--r--health/health.d/ipmi.conf20
-rw-r--r--health/health.d/isc_dhcpd.conf10
-rw-r--r--health/health.d/kubelet.conf115
-rw-r--r--health/health.d/lighttpd.conf14
-rw-r--r--health/health.d/linux_power_supply.conf12
-rw-r--r--health/health.d/load.conf56
-rw-r--r--health/health.d/mdstat.conf38
-rw-r--r--health/health.d/megacli.conf48
-rw-r--r--health/health.d/memcached.conf52
-rw-r--r--health/health.d/memory.conf38
-rw-r--r--health/health.d/mongodb.conf13
-rw-r--r--health/health.d/mysql.conf146
-rw-r--r--health/health.d/named.conf14
-rw-r--r--health/health.d/net.conf195
-rw-r--r--health/health.d/netfilter.conf29
-rw-r--r--health/health.d/nginx.conf14
-rw-r--r--health/health.d/nginx_plus.conf14
-rw-r--r--health/health.d/phpfpm.conf14
-rw-r--r--health/health.d/pihole.conf65
-rw-r--r--health/health.d/portcheck.conf46
-rw-r--r--health/health.d/postgres.conf13
-rw-r--r--health/health.d/processes.conf13
-rw-r--r--health/health.d/pulsar.conf13
-rw-r--r--health/health.d/qos.conf18
-rw-r--r--health/health.d/ram.conf64
-rw-r--r--health/health.d/redis.conf34
-rw-r--r--health/health.d/retroshare.conf25
-rw-r--r--health/health.d/riakkv.conf80
-rw-r--r--health/health.d/scaleio.conf38
-rw-r--r--health/health.d/softnet.conf40
-rw-r--r--health/health.d/squid.conf14
-rw-r--r--health/health.d/stiebeleltron.conf11
-rw-r--r--health/health.d/swap.conf43
-rw-r--r--health/health.d/tcp_conn.conf19
-rw-r--r--health/health.d/tcp_listen.conf83
-rw-r--r--health/health.d/tcp_mem.conf20
-rw-r--r--health/health.d/tcp_orphans.conf21
-rw-r--r--health/health.d/tcp_resets.conf67
-rw-r--r--health/health.d/udp_errors.conf49
-rw-r--r--health/health.d/unbound.conf35
-rw-r--r--health/health.d/varnish.conf9
-rw-r--r--health/health.d/vcsa.conf122
-rw-r--r--health/health.d/vernemq.conf399
-rw-r--r--health/health.d/vsphere.conf157
-rw-r--r--health/health.d/web_log.conf385
-rw-r--r--health/health.d/whoisquery.conf24
-rw-r--r--health/health.d/wmi.conf130
-rw-r--r--health/health.d/x509check.conf32
-rw-r--r--health/health.d/zfs.conf10
-rw-r--r--health/health.d/zookeeper.conf14
-rw-r--r--health/health.h112
-rw-r--r--health/health_config.c1035
-rw-r--r--health/health_json.c368
-rw-r--r--health/health_log.c573
-rw-r--r--health/notifications/Makefile.am51
-rw-r--r--health/notifications/README.md86
-rwxr-xr-xhealth/notifications/alarm-email.sh7
-rwxr-xr-xhealth/notifications/alarm-notify.sh.in2759
-rwxr-xr-xhealth/notifications/alarm-test.sh12
-rw-r--r--health/notifications/alerta/Makefile.inc12
-rw-r--r--health/notifications/alerta/README.md94
-rw-r--r--health/notifications/awssns/Makefile.inc12
-rw-r--r--health/notifications/awssns/README.md43
-rw-r--r--health/notifications/custom/Makefile.inc12
-rw-r--r--health/notifications/custom/README.md88
-rw-r--r--health/notifications/discord/Makefile.inc12
-rw-r--r--health/notifications/discord/README.md50
-rw-r--r--health/notifications/dynatrace/Makefile.inc12
-rw-r--r--health/notifications/dynatrace/README.md36
-rw-r--r--health/notifications/email/Makefile.inc12
-rw-r--r--health/notifications/email/README.md63
-rw-r--r--health/notifications/flock/Makefile.inc12
-rw-r--r--health/notifications/flock/README.md37
-rw-r--r--health/notifications/hangouts/Makefile.inc12
-rw-r--r--health/notifications/hangouts/README.md55
-rwxr-xr-xhealth/notifications/health_alarm_notify.conf1251
-rw-r--r--health/notifications/health_email_recipients.conf2
-rw-r--r--health/notifications/irc/Makefile.inc12
-rw-r--r--health/notifications/irc/README.md78
-rw-r--r--health/notifications/kavenegar/Makefile.inc12
-rw-r--r--health/notifications/kavenegar/README.md46
-rw-r--r--health/notifications/matrix/Makefile.inc12
-rw-r--r--health/notifications/matrix/README.md58
-rw-r--r--health/notifications/messagebird/Makefile.inc12
-rw-r--r--health/notifications/messagebird/README.md45
-rw-r--r--health/notifications/opsgenie/Makefile.inc12
-rw-r--r--health/notifications/opsgenie/README.md62
-rw-r--r--health/notifications/pagerduty/Makefile.inc12
-rw-r--r--health/notifications/pagerduty/README.md46
-rw-r--r--health/notifications/prowl/Makefile.inc12
-rw-r--r--health/notifications/prowl/README.md29
-rw-r--r--health/notifications/pushbullet/Makefile.inc12
-rw-r--r--health/notifications/pushbullet/README.md48
-rw-r--r--health/notifications/pushover/Makefile.inc12
-rw-r--r--health/notifications/pushover/README.md23
-rw-r--r--health/notifications/rocketchat/Makefile.inc12
-rw-r--r--health/notifications/rocketchat/README.md52
-rw-r--r--health/notifications/slack/Makefile.inc12
-rw-r--r--health/notifications/slack/README.md50
-rw-r--r--health/notifications/smstools3/Makefile.inc12
-rw-r--r--health/notifications/smstools3/README.md44
-rw-r--r--health/notifications/stackpulse/Makefile.inc12
-rw-r--r--health/notifications/stackpulse/README.md80
-rw-r--r--health/notifications/syslog/Makefile.inc12
-rw-r--r--health/notifications/syslog/README.md34
-rw-r--r--health/notifications/telegram/Makefile.inc12
-rw-r--r--health/notifications/telegram/README.md45
-rw-r--r--health/notifications/twilio/Makefile.inc12
-rw-r--r--health/notifications/twilio/README.md47
-rw-r--r--health/notifications/web/Makefile.inc12
-rw-r--r--health/notifications/web/README.md13
-rw-r--r--libnetdata/Makefile.am32
-rw-r--r--libnetdata/README.md10
-rw-r--r--libnetdata/adaptive_resortable_list/Makefile.am8
-rw-r--r--libnetdata/adaptive_resortable_list/README.md99
-rw-r--r--libnetdata/adaptive_resortable_list/adaptive_resortable_list.c280
-rw-r--r--libnetdata/adaptive_resortable_list/adaptive_resortable_list.h138
-rw-r--r--libnetdata/avl/Makefile.am8
-rw-r--r--libnetdata/avl/README.md17
-rw-r--r--libnetdata/avl/avl.c420
-rw-r--r--libnetdata/avl/avl.h92
-rw-r--r--libnetdata/buffer/Makefile.am8
-rw-r--r--libnetdata/buffer/README.md17
-rw-r--r--libnetdata/buffer/buffer.c426
-rw-r--r--libnetdata/buffer/buffer.h86
-rw-r--r--libnetdata/circular_buffer/Makefile.am8
-rw-r--r--libnetdata/circular_buffer/README.md12
-rw-r--r--libnetdata/circular_buffer/circular_buffer.c85
-rw-r--r--libnetdata/circular_buffer/circular_buffer.h16
-rw-r--r--libnetdata/clocks/Makefile.am8
-rw-r--r--libnetdata/clocks/README.md5
-rw-r--r--libnetdata/clocks/clocks.c300
-rw-r--r--libnetdata/clocks/clocks.h169
-rw-r--r--libnetdata/config/Makefile.am8
-rw-r--r--libnetdata/config/README.md54
-rw-r--r--libnetdata/config/appconfig.c883
-rw-r--r--libnetdata/config/appconfig.h209
-rw-r--r--libnetdata/dictionary/Makefile.am8
-rw-r--r--libnetdata/dictionary/README.md5
-rw-r--r--libnetdata/dictionary/dictionary.c329
-rw-r--r--libnetdata/dictionary/dictionary.h49
-rw-r--r--libnetdata/ebpf/Makefile.am8
-rw-r--r--libnetdata/ebpf/README.md5
-rw-r--r--libnetdata/ebpf/ebpf.c324
-rw-r--r--libnetdata/ebpf/ebpf.h105
-rw-r--r--libnetdata/eval/Makefile.am8
-rw-r--r--libnetdata/eval/README.md1
-rw-r--r--libnetdata/eval/eval.c1190
-rw-r--r--libnetdata/eval/eval.h88
-rw-r--r--libnetdata/health/Makefile.am7
-rw-r--r--libnetdata/health/health.c173
-rw-r--r--libnetdata/health/health.h55
-rw-r--r--libnetdata/inlined.h318
-rw-r--r--libnetdata/json/Makefile.am8
-rw-r--r--libnetdata/json/README.md10
-rw-r--r--libnetdata/json/jsmn.c328
-rw-r--r--libnetdata/json/jsmn.h75
-rw-r--r--libnetdata/json/json.c556
-rw-r--r--libnetdata/json/json.h72
-rw-r--r--libnetdata/libnetdata.c1493
-rw-r--r--libnetdata/libnetdata.h340
-rw-r--r--libnetdata/locks/Makefile.am8
-rw-r--r--libnetdata/locks/README.md5
-rw-r--r--libnetdata/locks/locks.c343
-rw-r--r--libnetdata/locks/locks.h75
-rw-r--r--libnetdata/log/Makefile.am8
-rw-r--r--libnetdata/log/README.md5
-rw-r--r--libnetdata/log/log.c872
-rw-r--r--libnetdata/log/log.h109
-rw-r--r--libnetdata/os.c217
-rw-r--r--libnetdata/os.h70
-rw-r--r--libnetdata/popen/Makefile.am8
-rw-r--r--libnetdata/popen/README.md5
-rw-r--r--libnetdata/popen/popen.c337
-rw-r--r--libnetdata/popen/popen.h23
-rw-r--r--libnetdata/procfile/Makefile.am8
-rw-r--r--libnetdata/procfile/README.md67
-rw-r--r--libnetdata/procfile/procfile.c472
-rw-r--r--libnetdata/procfile/procfile.h106
-rw-r--r--libnetdata/required_dummies.h38
-rw-r--r--libnetdata/simple_pattern/Makefile.am8
-rw-r--r--libnetdata/simple_pattern/README.md43
-rw-r--r--libnetdata/simple_pattern/simple_pattern.c365
-rw-r--r--libnetdata/simple_pattern/simple_pattern.h40
-rw-r--r--libnetdata/socket/Makefile.am8
-rw-r--r--libnetdata/socket/README.md5
-rw-r--r--libnetdata/socket/security.c385
-rw-r--r--libnetdata/socket/security.h55
-rw-r--r--libnetdata/socket/socket.c1733
-rw-r--r--libnetdata/socket/socket.h207
-rw-r--r--libnetdata/statistical/Makefile.am8
-rw-r--r--libnetdata/statistical/README.md5
-rw-r--r--libnetdata/statistical/statistical.c452
-rw-r--r--libnetdata/statistical/statistical.h26
-rw-r--r--libnetdata/storage_number/Makefile.am12
-rw-r--r--libnetdata/storage_number/README.md17
-rw-r--r--libnetdata/storage_number/storage_number.c252
-rw-r--r--libnetdata/storage_number/storage_number.h94
-rw-r--r--libnetdata/storage_number/tests/Makefile.am4
-rw-r--r--libnetdata/storage_number/tests/test_storage_number.c52
-rw-r--r--libnetdata/string/utf8.h9
-rw-r--r--libnetdata/tests/Makefile.am4
-rw-r--r--libnetdata/tests/test_str2ld.c48
-rw-r--r--libnetdata/threads/Makefile.am8
-rw-r--r--libnetdata/threads/README.md5
-rw-r--r--libnetdata/threads/threads.c241
-rw-r--r--libnetdata/threads/threads.h42
-rw-r--r--libnetdata/url/Makefile.am8
-rw-r--r--libnetdata/url/README.md5
-rw-r--r--libnetdata/url/url.c391
-rw-r--r--libnetdata/url/url.h35
-rwxr-xr-xnetdata-installer.sh1909
-rw-r--r--netdata.cppcheck17
-rw-r--r--netdata.spec.in577
-rw-r--r--package-lock.json6320
-rw-r--r--package.json57
-rw-r--r--packaging/DISTRIBUTIONS.md256
-rw-r--r--packaging/Dockerfile.packager42
-rwxr-xr-xpackaging/build_package_install_test.sh37
-rwxr-xr-xpackaging/bundle-dashboard.sh14
-rwxr-xr-xpackaging/bundle-ebpf.sh14
-rwxr-xr-xpackaging/bundle-judy.sh23
-rwxr-xr-xpackaging/bundle-libbpf.sh18
-rwxr-xr-xpackaging/bundle-lws.sh17
-rwxr-xr-xpackaging/bundle-mosquitto.sh12
-rwxr-xr-xpackaging/check-kernel-config.sh73
-rw-r--r--packaging/dashboard.checksums1
-rw-r--r--packaging/dashboard.version1
-rw-r--r--packaging/docker/Dockerfile114
-rw-r--r--packaging/docker/README.md461
-rwxr-xr-xpackaging/docker/health.sh17
-rwxr-xr-xpackaging/docker/run.sh30
-rw-r--r--packaging/ebpf.checksums3
-rw-r--r--packaging/ebpf.version1
-rw-r--r--packaging/go.d.checksums16
-rw-r--r--packaging/go.d.version1
-rw-r--r--packaging/installer/README.md253
-rw-r--r--packaging/installer/REINSTALL.md68
-rw-r--r--packaging/installer/UNINSTALL.md54
-rw-r--r--packaging/installer/UPDATE.md153
-rw-r--r--packaging/installer/functions.sh1054
-rwxr-xr-xpackaging/installer/install-required-packages.sh2175
-rwxr-xr-xpackaging/installer/kickstart-static64.sh368
-rwxr-xr-xpackaging/installer/kickstart.sh491
-rw-r--r--packaging/installer/methods/alpine.md36
-rw-r--r--packaging/installer/methods/cloud-providers.md130
-rw-r--r--packaging/installer/methods/freebsd.md108
-rw-r--r--packaging/installer/methods/freenas.md24
-rw-r--r--packaging/installer/methods/kickstart-64.md96
-rw-r--r--packaging/installer/methods/kickstart.md79
-rw-r--r--packaging/installer/methods/kubernetes.md209
-rw-r--r--packaging/installer/methods/macos.md90
-rw-r--r--packaging/installer/methods/manual.md229
-rw-r--r--packaging/installer/methods/offline.md90
-rw-r--r--packaging/installer/methods/packages.md48
-rw-r--r--packaging/installer/methods/pfsense.md84
-rw-r--r--packaging/installer/methods/source.md291
-rw-r--r--packaging/installer/methods/synology.md52
-rwxr-xr-xpackaging/installer/netdata-uninstaller.sh499
-rwxr-xr-xpackaging/installer/netdata-updater.sh428
-rw-r--r--packaging/jsonc.checksums1
-rw-r--r--packaging/jsonc.version1
-rw-r--r--packaging/judy.checksums1
-rw-r--r--packaging/judy.version1
-rw-r--r--packaging/libbpf.checksums1
-rw-r--r--packaging/libbpf.version1
-rw-r--r--packaging/libwebsockets.checksums1
-rw-r--r--packaging/libwebsockets.version1
-rw-r--r--packaging/maintainers/README.md86
-rw-r--r--packaging/makeself/README.md54
-rwxr-xr-xpackaging/makeself/build-x86_64-static.sh48
-rwxr-xr-xpackaging/makeself/build.sh57
-rwxr-xr-xpackaging/makeself/functions.sh62
-rwxr-xr-xpackaging/makeself/install-alpine-packages.sh51
-rwxr-xr-xpackaging/makeself/install-or-update.sh269
-rwxr-xr-xpackaging/makeself/jobs/10-prepare-destination.install.sh17
-rwxr-xr-xpackaging/makeself/jobs/20-openssl.install.sh20
-rwxr-xr-xpackaging/makeself/jobs/50-bash-5.0.install.sh33
-rwxr-xr-xpackaging/makeself/jobs/50-curl-7.73.0.install.sh36
-rwxr-xr-xpackaging/makeself/jobs/50-fping-5.0.install.sh30
-rwxr-xr-xpackaging/makeself/jobs/50-ioping-1.2.install.sh18
-rwxr-xr-xpackaging/makeself/jobs/70-netdata-git.install.sh50
-rwxr-xr-xpackaging/makeself/jobs/99-makeself.install.sh101
-rwxr-xr-xpackaging/makeself/makeself-header.sh556
-rw-r--r--packaging/makeself/makeself-help-header.txt49
-rw-r--r--packaging/makeself/makeself-license.txt44
-rw-r--r--packaging/makeself/makeself.lsm16
-rwxr-xr-xpackaging/makeself/makeself.sh621
-rw-r--r--packaging/makeself/openssl.version1
-rwxr-xr-xpackaging/makeself/post-installer.sh11
-rwxr-xr-xpackaging/makeself/run-all-jobs.sh42
-rw-r--r--packaging/mosquitto.checksums1
-rw-r--r--packaging/mosquitto.version1
-rwxr-xr-xpackaging/scripts/install.sh58
-rwxr-xr-xpackaging/scripts/test.sh27
-rw-r--r--packaging/version1
-rw-r--r--parser/Makefile.am9
-rw-r--r--parser/README.md149
-rw-r--r--parser/parser.c320
-rw-r--r--parser/parser.h114
-rw-r--r--registry/Makefile.am8
-rw-r--r--registry/README.md203
-rw-r--r--registry/registry.c435
-rw-r--r--registry/registry.h83
-rw-r--r--registry/registry_db.c346
-rw-r--r--registry/registry_init.c147
-rw-r--r--registry/registry_internals.c332
-rw-r--r--registry/registry_internals.h89
-rw-r--r--registry/registry_log.c136
-rw-r--r--registry/registry_machine.c104
-rw-r--r--registry/registry_machine.h43
-rw-r--r--registry/registry_person.c267
-rw-r--r--registry/registry_person.h62
-rw-r--r--registry/registry_url.c88
-rw-r--r--registry/registry_url.h35
-rw-r--r--spawn/Makefile.am9
-rw-r--r--spawn/README.md0
-rw-r--r--spawn/spawn.c290
-rw-r--r--spawn/spawn.h109
-rw-r--r--spawn/spawn_client.c241
-rw-r--r--spawn/spawn_server.c377
-rw-r--r--streaming/Makefile.am12
-rw-r--r--streaming/README.md624
-rw-r--r--streaming/receiver.c499
-rw-r--r--streaming/rrdpush.c731
-rw-r--r--streaming/rrdpush.h117
-rw-r--r--streaming/sender.c723
-rw-r--r--streaming/stream.conf205
-rw-r--r--system/Makefile.am57
-rwxr-xr-xsystem/edit-config.in83
-rw-r--r--system/netdata-freebsd.in52
-rw-r--r--system/netdata-init-d.in94
-rw-r--r--system/netdata-lsb.in114
-rw-r--r--system/netdata-openrc.in60
-rw-r--r--system/netdata-updater.service.in8
-rw-r--r--system/netdata-updater.timer12
-rw-r--r--system/netdata.conf28
-rw-r--r--system/netdata.crontab.in1
-rw-r--r--system/netdata.logrotate.in12
-rw-r--r--system/netdata.plist.in15
-rw-r--r--system/netdata.service.in77
-rw-r--r--system/netdata.service.v235.in48
-rw-r--r--tests/Makefile.am45
-rw-r--r--tests/README.md148
-rw-r--r--tests/acls/acl.sh.in126
-rw-r--r--tests/acls/netdata.cfg20
-rw-r--r--tests/acls/netdata.ssl.cfg24
-rw-r--r--tests/alarm_repetition/alarm.sh.in89
-rw-r--r--tests/alarm_repetition/netdata.conf_with_repetition57
-rw-r--r--tests/alarm_repetition/netdata.conf_without_repetition57
-rw-r--r--tests/alarm_repetition/ram_with_repetition.conf64
-rw-r--r--tests/alarm_repetition/ram_without_repetition.conf63
-rw-r--r--tests/api/fuzzer.py378
-rw-r--r--tests/backends/prometheus-avg-oldunits.txt148
-rw-r--r--tests/backends/prometheus-avg.txt148
-rw-r--r--tests/backends/prometheus-raw.txt156
-rwxr-xr-xtests/backends/prometheus.bats31
-rw-r--r--tests/health_mgmtapi/README.md15
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/HOSTS-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/RESET-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_2-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_3-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json1
-rwxr-xr-xtests/health_mgmtapi/health-cmdapi-test.sh.in226
-rwxr-xr-xtests/installer/checksums.sh54
-rwxr-xr-xtests/installer/slack.sh65
-rw-r--r--tests/k6/data.js67
-rwxr-xr-xtests/lifecycle.bats61
-rw-r--r--tests/node.d/fronius.chart.spec.js162
-rw-r--r--tests/node.d/fronius.parse.spec.js333
-rw-r--r--tests/node.d/fronius.process.spec.js75
-rw-r--r--tests/node.d/fronius.validation.spec.js155
-rw-r--r--tests/profile/Makefile51
-rw-r--r--tests/profile/benchmark-dictionary.c130
-rw-r--r--tests/profile/benchmark-line-parsing.c702
-rw-r--r--tests/profile/benchmark-procfile-parser.c329
-rw-r--r--tests/profile/benchmark-registry.c227
-rw-r--r--tests/profile/benchmark-value-pairs.c623
-rw-r--r--tests/profile/statsd-stress.c151
-rw-r--r--tests/profile/test-eval.c299
-rwxr-xr-xtests/run-unit-tests.sh39
-rwxr-xr-xtests/stress.sh83
-rw-r--r--tests/template_dimension/system_cpu.conf.alarm_foreach8
-rw-r--r--tests/template_dimension/system_cpu.conf.alarm_foreach_sp8
-rw-r--r--tests/template_dimension/system_cpu.conf.template_alarm26
-rw-r--r--tests/template_dimension/system_cpu.conf.template_foreach8
-rw-r--r--tests/template_dimension/system_cpu.conf.template_foreach_sp8
-rw-r--r--tests/template_dimension/system_cpu.conf.unique_alarm26
-rw-r--r--tests/template_dimension/template_dim.sh.in88
-rwxr-xr-xtests/updater_checks.bats66
-rwxr-xr-xtests/updater_checks.sh71
-rw-r--r--tests/urls/request.sh.in307
-rw-r--r--tests/web/easypiechart.chart.spec.js39
-rw-r--r--tests/web/easypiechart.percentage.spec.js142
-rw-r--r--tests/web/fixtures/easypiechart.chart.fixture1.html6
-rw-r--r--tests/web/karma.conf.js110
-rw-r--r--tests/web/lib/jasmine-jquery.js841
-rw-r--r--web/Makefile.am22
-rw-r--r--web/README.md236
-rw-r--r--web/api/Makefile.am21
-rw-r--r--web/api/README.md14
-rw-r--r--web/api/badges/Makefile.am8
-rw-r--r--web/api/badges/README.md363
-rw-r--r--web/api/badges/web_buffer_svg.c1140
-rw-r--r--web/api/badges/web_buffer_svg.h16
-rw-r--r--web/api/exporters/Makefile.am13
-rw-r--r--web/api/exporters/README.md10
-rw-r--r--web/api/exporters/allmetrics.c127
-rw-r--r--web/api/exporters/allmetrics.h12
-rw-r--r--web/api/exporters/prometheus/Makefile.am8
-rw-r--r--web/api/exporters/prometheus/README.md10
-rw-r--r--web/api/exporters/shell/Makefile.am8
-rw-r--r--web/api/exporters/shell/README.md71
-rw-r--r--web/api/exporters/shell/allmetrics_shell.c161
-rw-r--r--web/api/exporters/shell/allmetrics_shell.h21
-rw-r--r--web/api/formatters/Makefile.am15
-rw-r--r--web/api/formatters/README.md78
-rw-r--r--web/api/formatters/charts2json.c192
-rw-r--r--web/api/formatters/charts2json.h12
-rw-r--r--web/api/formatters/csv/Makefile.am8
-rw-r--r--web/api/formatters/csv/README.md144
-rw-r--r--web/api/formatters/csv/csv.c143
-rw-r--r--web/api/formatters/csv/csv.h12
-rw-r--r--web/api/formatters/json/Makefile.am8
-rw-r--r--web/api/formatters/json/README.md156
-rw-r--r--web/api/formatters/json/json.c248
-rw-r--r--web/api/formatters/json/json.h10
-rw-r--r--web/api/formatters/json_wrapper.c295
-rw-r--r--web/api/formatters/json_wrapper.h11
-rw-r--r--web/api/formatters/rrd2json.c401
-rw-r--r--web/api/formatters/rrd2json.h92
-rw-r--r--web/api/formatters/rrdset2json.c161
-rw-r--r--web/api/formatters/rrdset2json.h10
-rw-r--r--web/api/formatters/ssv/Makefile.am8
-rw-r--r--web/api/formatters/ssv/README.md59
-rw-r--r--web/api/formatters/ssv/ssv.c45
-rw-r--r--web/api/formatters/ssv/ssv.h10
-rw-r--r--web/api/formatters/value/Makefile.am8
-rw-r--r--web/api/formatters/value/README.md24
-rw-r--r--web/api/formatters/value/value.c94
-rw-r--r--web/api/formatters/value/value.h10
-rw-r--r--web/api/health/Makefile.am8
-rw-r--r--web/api/health/README.md225
-rw-r--r--web/api/health/health_cmdapi.c205
-rw-r--r--web/api/health/health_cmdapi.h31
-rw-r--r--web/api/netdata-swagger.json2065
-rw-r--r--web/api/netdata-swagger.yaml1611
-rw-r--r--web/api/queries/Makefile.am20
-rw-r--r--web/api/queries/README.md176
-rw-r--r--web/api/queries/average/Makefile.am8
-rw-r--r--web/api/queries/average/README.md46
-rw-r--r--web/api/queries/average/average.c62
-rw-r--r--web/api/queries/average/average.h15
-rw-r--r--web/api/queries/des/Makefile.am8
-rw-r--r--web/api/queries/des/README.md73
-rw-r--r--web/api/queries/des/des.c139
-rw-r--r--web/api/queries/des/des.h17
-rw-r--r--web/api/queries/incremental_sum/Makefile.am8
-rw-r--r--web/api/queries/incremental_sum/README.md41
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c69
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h15
-rw-r--r--web/api/queries/max/Makefile.am8
-rw-r--r--web/api/queries/max/README.md38
-rw-r--r--web/api/queries/max/max.c60
-rw-r--r--web/api/queries/max/max.h15
-rw-r--r--web/api/queries/median/Makefile.am8
-rw-r--r--web/api/queries/median/README.md45
-rw-r--r--web/api/queries/median/median.c79
-rw-r--r--web/api/queries/median/median.h15
-rw-r--r--web/api/queries/min/Makefile.am8
-rw-r--r--web/api/queries/min/README.md38
-rw-r--r--web/api/queries/min/min.c60
-rw-r--r--web/api/queries/min/min.h15
-rw-r--r--web/api/queries/query.c1636
-rw-r--r--web/api/queries/query.h24
-rw-r--r--web/api/queries/rrdr.c144
-rw-r--r--web/api/queries/rrdr.h115
-rw-r--r--web/api/queries/ses/Makefile.am8
-rw-r--r--web/api/queries/ses/README.md61
-rw-r--r--web/api/queries/ses/ses.c92
-rw-r--r--web/api/queries/ses/ses.h17
-rw-r--r--web/api/queries/stddev/Makefile.am8
-rw-r--r--web/api/queries/stddev/README.md93
-rw-r--r--web/api/queries/stddev/stddev.c176
-rw-r--r--web/api/queries/stddev/stddev.h18
-rw-r--r--web/api/queries/sum/Makefile.am8
-rw-r--r--web/api/queries/sum/README.md41
-rw-r--r--web/api/queries/sum/sum.c58
-rw-r--r--web/api/queries/sum/sum.h15
-rw-r--r--web/api/tests/valid_urls.c790
-rw-r--r--web/api/tests/web_api.c474
-rw-r--r--web/api/web_api_v1.c1063
-rw-r--r--web/api/web_api_v1.h36
-rw-r--r--web/gui/.well-known/dnt/cookies14
-rw-r--r--web/gui/Makefile.am189
-rw-r--r--web/gui/README.md169
-rw-r--r--web/gui/browserconfig.xml2
-rw-r--r--web/gui/confluence/README.md1019
-rw-r--r--web/gui/console.html72
-rw-r--r--web/gui/css/bootstrap-3.3.7.css6758
-rw-r--r--web/gui/css/bootstrap-slate-flat-3.3.7.css7101
-rw-r--r--web/gui/css/bootstrap-slider-10.0.0.min.css22
-rw-r--r--web/gui/css/bootstrap-theme-3.3.7.min.css7
-rw-r--r--web/gui/css/bootstrap-toggle-2.2.2.min.css29
-rw-r--r--web/gui/css/c3-0.4.18.min.css2
-rw-r--r--web/gui/css/morris-0.5.1.css3
-rw-r--r--web/gui/custom/README.md663
-rw-r--r--web/gui/dash-example.html1028
-rw-r--r--web/gui/dashboard.css757
-rw-r--r--web/gui/dashboard.html699
-rw-r--r--web/gui/dashboard.js10377
-rw-r--r--web/gui/dashboard.slate.css775
-rw-r--r--web/gui/dashboard_info.js3781
-rw-r--r--web/gui/dashboard_info_custom_example.js62
-rw-r--r--web/gui/demo.html51
-rw-r--r--web/gui/demo2.html143
-rw-r--r--web/gui/demosites.html1469
-rw-r--r--web/gui/demosites2.html1112
-rw-r--r--web/gui/favicon.icobin0 -> 1150 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.eotbin0 -> 20127 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.svg289
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.ttfbin0 -> 45404 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.woffbin0 -> 23424 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.woff2bin0 -> 18028 bytes
-rw-r--r--web/gui/goto-host-from-alarm.html251
-rw-r--r--web/gui/images/alert-128-orange.pngbin0 -> 3477 bytes
-rw-r--r--web/gui/images/alert-128-red.pngbin0 -> 3743 bytes
-rw-r--r--web/gui/images/alert-multi-size-orange.icobin0 -> 112374 bytes
-rw-r--r--web/gui/images/alert-multi-size-red.icobin0 -> 112458 bytes
-rw-r--r--web/gui/images/android-icon-144x144.pngbin0 -> 2721 bytes
-rw-r--r--web/gui/images/android-icon-192x192.pngbin0 -> 3923 bytes
-rw-r--r--web/gui/images/android-icon-36x36.pngbin0 -> 539 bytes
-rw-r--r--web/gui/images/android-icon-48x48.pngbin0 -> 762 bytes
-rw-r--r--web/gui/images/android-icon-72x72.pngbin0 -> 1153 bytes
-rw-r--r--web/gui/images/android-icon-96x96.pngbin0 -> 1747 bytes
-rw-r--r--web/gui/images/animated.gifbin0 -> 389597 bytes
-rw-r--r--web/gui/images/apple-icon-114x114.pngbin0 -> 3651 bytes
-rw-r--r--web/gui/images/apple-icon-120x120.pngbin0 -> 2315 bytes
-rw-r--r--web/gui/images/apple-icon-144x144.pngbin0 -> 4001 bytes
-rw-r--r--web/gui/images/apple-icon-152x152.pngbin0 -> 5026 bytes
-rw-r--r--web/gui/images/apple-icon-180x180.pngbin0 -> 3645 bytes
-rw-r--r--web/gui/images/apple-icon-57x57.pngbin0 -> 1250 bytes
-rw-r--r--web/gui/images/apple-icon-60x60.pngbin0 -> 1052 bytes
-rw-r--r--web/gui/images/apple-icon-72x72.pngbin0 -> 1427 bytes
-rw-r--r--web/gui/images/apple-icon-76x76.pngbin0 -> 1711 bytes
-rw-r--r--web/gui/images/apple-icon-precomposed.pngbin0 -> 3926 bytes
-rw-r--r--web/gui/images/apple-icon.pngbin0 -> 3926 bytes
-rw-r--r--web/gui/images/banner-icon-144x144.pngbin0 -> 2724 bytes
-rw-r--r--web/gui/images/check-mark-2-128-green.pngbin0 -> 3771 bytes
-rw-r--r--web/gui/images/check-mark-2-multi-size-green.icobin0 -> 111893 bytes
-rw-r--r--web/gui/images/favicon-128.pngbin0 -> 2436 bytes
-rw-r--r--web/gui/images/favicon-16x16.pngbin0 -> 285 bytes
-rw-r--r--web/gui/images/favicon-196x196.pngbin0 -> 10025 bytes
-rw-r--r--web/gui/images/favicon-32x32.pngbin0 -> 454 bytes
-rw-r--r--web/gui/images/favicon-96x96.pngbin0 -> 1925 bytes
-rw-r--r--web/gui/images/favicon.icobin0 -> 1150 bytes
-rw-r--r--web/gui/images/ms-icon-144x144.pngbin0 -> 4001 bytes
-rw-r--r--web/gui/images/ms-icon-150x150.pngbin0 -> 2867 bytes
-rw-r--r--web/gui/images/ms-icon-310x150.pngbin0 -> 3632 bytes
-rw-r--r--web/gui/images/ms-icon-310x310.pngbin0 -> 7215 bytes
-rw-r--r--web/gui/images/ms-icon-36x36.pngbin0 -> 536 bytes
-rw-r--r--web/gui/images/ms-icon-70x70.pngbin0 -> 2436 bytes
-rw-r--r--web/gui/images/netdata-logomark.svg8
-rw-r--r--web/gui/images/netdata.svg18
-rw-r--r--web/gui/images/packaging-beta-tag.svg42
-rw-r--r--web/gui/images/post.pngbin0 -> 9043 bytes
-rw-r--r--web/gui/images/seo-performance-128.pngbin0 -> 1828 bytes
-rw-r--r--web/gui/index.html1322
-rw-r--r--web/gui/infographic.html171
-rw-r--r--web/gui/lib/bootstrap-3.3.7.min.js8
-rw-r--r--web/gui/lib/bootstrap-slider-10.0.0.min.js6
-rw-r--r--web/gui/lib/bootstrap-table-1.11.0.min.js9
-rw-r--r--web/gui/lib/bootstrap-table-export-1.11.0.min.js8
-rw-r--r--web/gui/lib/bootstrap-toggle-2.2.2.min.js10
-rw-r--r--web/gui/lib/clipboard-polyfill-be05dad.js9
-rw-r--r--web/gui/lib/d3-4.12.2.min.js3
-rw-r--r--web/gui/lib/d3pie-0.2.1-netdata-3.js2124
-rw-r--r--web/gui/lib/dygraph-c91c859.min.js7
-rw-r--r--web/gui/lib/dygraph-smooth-plotter-c91c859.js141
-rw-r--r--web/gui/lib/fontawesome-all-5.0.1.min.js6
-rw-r--r--web/gui/lib/gauge-1.3.2.min.js2
-rw-r--r--web/gui/lib/jquery-2.2.4.min.js5
-rw-r--r--web/gui/lib/jquery.easypiechart-97b5824.min.js10
-rw-r--r--web/gui/lib/jquery.peity-3.2.0.min.js14
-rw-r--r--web/gui/lib/jquery.sparkline-2.1.2.min.js6
-rw-r--r--web/gui/lib/lz-string-1.4.4.min.js2
-rw-r--r--web/gui/lib/pako-1.0.6.min.js2
-rw-r--r--web/gui/lib/perfect-scrollbar-0.6.15.min.js3
-rw-r--r--web/gui/lib/tableExport-1.6.0.min.js55
-rw-r--r--web/gui/main.css764
-rw-r--r--web/gui/main.js5155
-rw-r--r--web/gui/manifest.json41
-rw-r--r--web/gui/old/index.html1321
-rw-r--r--web/gui/refresh-badges.js98
-rw-r--r--web/gui/robots.txt7
-rw-r--r--web/gui/sitemap.xml9
-rw-r--r--web/gui/src/dashboard.js/alarms.js422
-rw-r--r--web/gui/src/dashboard.js/boot.js142
-rw-r--r--web/gui/src/dashboard.js/chart-registry.js94
-rw-r--r--web/gui/src/dashboard.js/charting.js492
-rw-r--r--web/gui/src/dashboard.js/charting/_c3.js114
-rw-r--r--web/gui/src/dashboard.js/charting/_morris.js81
-rw-r--r--web/gui/src/dashboard.js/charting/_raphael.js48
-rw-r--r--web/gui/src/dashboard.js/charting/d3.js43
-rw-r--r--web/gui/src/dashboard.js/charting/d3pie.js341
-rw-r--r--web/gui/src/dashboard.js/charting/dygraph.js1065
-rw-r--r--web/gui/src/dashboard.js/charting/easy-pie-chart.js281
-rw-r--r--web/gui/src/dashboard.js/charting/gauge.js406
-rw-r--r--web/gui/src/dashboard.js/charting/google-charts.js132
-rw-r--r--web/gui/src/dashboard.js/charting/peity.js62
-rw-r--r--web/gui/src/dashboard.js/charting/sparkline.js155
-rw-r--r--web/gui/src/dashboard.js/charting/textonly.js18
-rw-r--r--web/gui/src/dashboard.js/colors.js34
-rw-r--r--web/gui/src/dashboard.js/common.js249
-rw-r--r--web/gui/src/dashboard.js/compatibility.js31
-rw-r--r--web/gui/src/dashboard.js/dependencies.js21
-rw-r--r--web/gui/src/dashboard.js/epilogue.js.inc1
-rw-r--r--web/gui/src/dashboard.js/error-handling.js52
-rw-r--r--web/gui/src/dashboard.js/localstorage.js173
-rw-r--r--web/gui/src/dashboard.js/main.js4279
-rw-r--r--web/gui/src/dashboard.js/options.js225
-rw-r--r--web/gui/src/dashboard.js/prologue.js.inc84
-rw-r--r--web/gui/src/dashboard.js/registry.js311
-rw-r--r--web/gui/src/dashboard.js/server-detection.js29
-rw-r--r--web/gui/src/dashboard.js/themes.js92
-rw-r--r--web/gui/src/dashboard.js/timeout.js100
-rw-r--r--web/gui/src/dashboard.js/units-conversion.js528
-rw-r--r--web/gui/src/dashboard.js/utils.js432
-rw-r--r--web/gui/src/dashboard.js/xss.js84
-rw-r--r--web/gui/static/img/netdata-logomark.svg3
-rw-r--r--web/gui/tv.html279
-rw-r--r--web/server/Makefile.am12
-rw-r--r--web/server/README.md245
-rw-r--r--web/server/static/Makefile.am11
-rw-r--r--web/server/static/README.md17
-rw-r--r--web/server/static/static-threaded.c503
-rw-r--r--web/server/static/static-threaded.h10
-rw-r--r--web/server/web_client.c2035
-rw-r--r--web/server/web_client.h217
-rw-r--r--web/server/web_client_cache.c269
-rw-r--r--web/server/web_client_cache.h31
-rw-r--r--web/server/web_server.c159
-rw-r--r--web/server/web_server.h64
1742 files changed, 601905 insertions, 0 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..50d4af0
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# clang-format configuration file. Intended for clang-format >= 4.
+#
+# For more information, see:
+#
+# Documentation/process/clang-format.rst
+# https://clang.llvm.org/docs/ClangFormat.html
+# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
+#
+---
+AccessModifierOffset: -4
+AlignAfterOpenBracket: AlwaysBreak
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
+AlignOperands: true
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: false
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+ AfterClass: false
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterNamespace: true
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ #AfterExternBlock: false # Unknown to clang-format-5.0
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+ #SplitEmptyFunction: true # Unknown to clang-format-4.0
+ #SplitEmptyRecord: true # Unknown to clang-format-4.0
+ #SplitEmptyNamespace: true # Unknown to clang-format-4.0
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
+BreakBeforeTernaryOperators: false
+BreakConstructorInitializersBeforeComma: false
+#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: false
+ColumnLimit: 120
+CommentPragmas: '^ IWYU pragma:'
+#CompactNamespaces: false # Unknown to clang-format-4.0
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+#FixNamespaceComments: false # Unknown to clang-format-4.0
+
+#IncludeBlocks: Preserve # Unknown to clang-format-5.0
+IncludeCategories:
+ - Regex: '.*'
+ Priority: 1
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: true
+#IndentPPDirectives: None # Unknown to clang-format-5.0
+IndentWidth: 4
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: Inner
+#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
+ObjCBlockIndentWidth: 4
+ObjCSpaceAfterProperty: true
+ObjCSpaceBeforeProtocolList: true
+
+# Taken from git's rules
+#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
+PenaltyBreakBeforeFirstCallParameter: 30
+PenaltyBreakComment: 10
+PenaltyBreakFirstLessLess: 0
+PenaltyBreakString: 10
+PenaltyExcessCharacter: 100
+PenaltyReturnTypeOnItsOwnLine: 60
+
+PointerAlignment: Right
+ReflowComments: false
+SortIncludes: false
+#SortUsingDeclarations: false # Unknown to clang-format-4.0
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
+#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
+SpaceBeforeParens: ControlStatements
+#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp03
+TabWidth: 4
+UseTab: Never
+...
diff --git a/.codacy.yml b/.codacy.yml
new file mode 100644
index 0000000..3d88a25
--- /dev/null
+++ b/.codacy.yml
@@ -0,0 +1,16 @@
+---
+exclude_paths:
+ - collectors/python.d.plugin/python_modules/pyyaml2/**
+ - collectors/python.d.plugin/python_modules/pyyaml3/**
+ - collectors/python.d.plugin/python_modules/urllib3/**
+ - collectors/python.d.plugin/python_modules/third_party/**
+ - collectors/node.d.plugin/node_modules/**
+ - contrib/**
+ - packaging/makeself/**
+ - web/gui/css/**
+ - web/gui/lib/**
+ - web/gui/old/**
+ - web/gui/src/**
+ - web/gui/main.js
+ - tests/**
+ - aclk/tests/**
diff --git a/.codeclimate.yml b/.codeclimate.yml
new file mode 100644
index 0000000..8a11c84
--- /dev/null
+++ b/.codeclimate.yml
@@ -0,0 +1,99 @@
+version: "2"
+checks:
+ argument-count:
+ enabled: false
+ config:
+ threshold: 10
+ complex-logic:
+ enabled: false
+ config:
+ threshold: 10
+ file-lines:
+ enabled: false
+ config:
+ threshold: 5000
+ method-complexity:
+ enabled: false
+ config:
+ threshold: 20
+ method-count:
+ enabled: false
+ config:
+ threshold: 50
+ method-lines:
+ enabled: false
+ config:
+ threshold: 250
+ nested-control-flow:
+ enabled: false
+ config:
+ threshold: 4
+ return-statements:
+ enabled: false
+ config:
+ threshold: 4
+ similar-code:
+ enabled: false
+ identical-code:
+ enabled: false
+plugins:
+ csslint:
+ enabled: true
+ duplication:
+ enabled: false
+ config:
+ languages:
+ - javascript:
+ mass_threshold: 100
+ - python:
+ python_version: 3
+ mass_threshold: 100
+ checks:
+ Similar code:
+ enabled: false
+ Identical code:
+ enabled: false
+ eslint:
+ enabled: true
+ checks:
+ max-statements:
+ enabled: false
+ complexity:
+ enabled: false
+ no-eval:
+ enabled: false
+ no-extend-native:
+ enabled: false
+ no-void:
+ enabled: false
+ no-alert:
+ enabled: false
+ no-undef-init:
+ enabled: false
+ fixme:
+ enabled: false
+ phpmd:
+ enabled: true
+ radon:
+ enabled: true
+ checks:
+ Complexity:
+ enabled: false
+exclude_patterns:
+ - ".gitignore"
+ - ".githooks/"
+ - "tests/"
+ - "m4/"
+ - "web/css/"
+ - "web/lib/"
+ - "web/fonts/"
+ - "web/old/"
+ - "collectors/python.d.plugin/python_modules/pyyaml2/"
+ - "collectors/python.d.plugin/python_modules/pyyaml3/"
+ - "collectors/python.d.plugin/python_modules/urllib3/"
+ - "collectors/node.d.plugin/node_modules/lib/"
+ - "collectors/node.d.plugin/node_modules/asn1-ber.js"
+ - "collectors/node.d.plugin/node_modules/extend.js"
+ - "collectors/node.d.plugin/node_modules/pixl-xml.js"
+ - "collectors/node.d.plugin/node_modules/net-snmp.js"
+
diff --git a/.csslintrc b/.csslintrc
new file mode 100644
index 0000000..aacba95
--- /dev/null
+++ b/.csslintrc
@@ -0,0 +1,2 @@
+--exclude-exts=.min.css
+--ignore=adjoining-classes,box-model,ids,order-alphabetical,unqualified-attributes
diff --git a/.dockerignore b/.dockerignore
new file mode 120000
index 0000000..3e4e48b
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1 @@
+.gitignore \ No newline at end of file
diff --git a/.eslintignore b/.eslintignore
new file mode 100644
index 0000000..96212a3
--- /dev/null
+++ b/.eslintignore
@@ -0,0 +1 @@
+**/*{.,-}min.js
diff --git a/.eslintrc b/.eslintrc
new file mode 100644
index 0000000..9faa375
--- /dev/null
+++ b/.eslintrc
@@ -0,0 +1,213 @@
+ecmaFeatures:
+ modules: true
+ jsx: true
+
+env:
+ amd: true
+ browser: true
+ es6: true
+ jquery: true
+ node: true
+
+# http://eslint.org/docs/rules/
+rules:
+ # Possible Errors
+ comma-dangle: [2, never]
+ no-cond-assign: 2
+ no-console: 0
+ no-constant-condition: 2
+ no-control-regex: 2
+ no-debugger: 2
+ no-dupe-args: 2
+ no-dupe-keys: 2
+ no-duplicate-case: 2
+ no-empty: 2
+ no-empty-character-class: 2
+ no-ex-assign: 2
+ no-extra-boolean-cast: 2
+ no-extra-parens: 0
+ no-extra-semi: 2
+ no-func-assign: 2
+ no-inner-declarations: [2, functions]
+ no-invalid-regexp: 2
+ no-irregular-whitespace: 2
+ no-negated-in-lhs: 2
+ no-obj-calls: 2
+ no-regex-spaces: 2
+ no-sparse-arrays: 2
+ no-unexpected-multiline: 2
+ no-unreachable: 2
+ use-isnan: 2
+ valid-jsdoc: 0
+ valid-typeof: 2
+
+ # Best Practices
+ accessor-pairs: 2
+ block-scoped-var: 0
+ complexity: [2, 6]
+ consistent-return: 0
+ curly: 0
+ default-case: 0
+ dot-location: 0
+ dot-notation: 0
+ eqeqeq: 2
+ guard-for-in: 2
+ no-alert: 2
+ no-caller: 2
+ no-case-declarations: 2
+ no-div-regex: 2
+ no-else-return: 0
+ no-empty-label: 2
+ no-empty-pattern: 2
+ no-eq-null: 2
+ no-eval: 2
+ no-extend-native: 2
+ no-extra-bind: 2
+ no-fallthrough: 2
+ no-floating-decimal: 0
+ no-implicit-coercion: 0
+ no-implied-eval: 2
+ no-invalid-this: 0
+ no-iterator: 2
+ no-labels: 0
+ no-lone-blocks: 2
+ no-loop-func: 2
+ no-magic-number: 0
+ no-multi-spaces: 0
+ no-multi-str: 0
+ no-native-reassign: 2
+ no-new-func: 2
+ no-new-wrappers: 2
+ no-new: 2
+ no-octal-escape: 2
+ no-octal: 2
+ no-proto: 2
+ no-redeclare: 2
+ no-return-assign: 2
+ no-script-url: 2
+ no-self-compare: 2
+ no-sequences: 0
+ no-throw-literal: 0
+ no-unused-expressions: 2
+ no-useless-call: 2
+ no-useless-concat: 2
+ no-void: 2
+ no-warning-comments: 0
+ no-with: 2
+ radix: 2
+ vars-on-top: 0
+ wrap-iife: 2
+ yoda: 0
+
+ # Strict
+ strict: 0
+
+ # Variables
+ init-declarations: 0
+ no-catch-shadow: 2
+ no-delete-var: 2
+ no-label-var: 2
+ no-shadow-restricted-names: 2
+ no-shadow: 0
+ no-undef-init: 2
+ no-undef: 0
+ no-undefined: 0
+ no-unused-vars: 0
+ no-use-before-define: 0
+
+ # Node.js and CommonJS
+ callback-return: 2
+ global-require: 2
+ handle-callback-err: 2
+ no-mixed-requires: 0
+ no-new-require: 0
+ no-path-concat: 2
+ no-process-exit: 2
+ no-restricted-modules: 0
+ no-sync: 0
+
+ # Stylistic Issues
+ array-bracket-spacing: 0
+ block-spacing: 0
+ brace-style: 0
+ camelcase: 0
+ comma-spacing: 0
+ comma-style: 0
+ computed-property-spacing: 0
+ consistent-this: 0
+ eol-last: 0
+ func-names: 0
+ func-style: 0
+ id-length: 0
+ id-match: 0
+ indent: 0
+ jsx-quotes: 0
+ key-spacing: 0
+ linebreak-style: 0
+ lines-around-comment: 0
+ max-depth: 0
+ max-len: 0
+ max-nested-callbacks: 0
+ max-params: 0
+ max-statements: [2, 30]
+ new-cap: 0
+ new-parens: 0
+ newline-after-var: 0
+ no-array-constructor: 0
+ no-bitwise: 0
+ no-continue: 0
+ no-inline-comments: 0
+ no-lonely-if: 0
+ no-mixed-spaces-and-tabs: 0
+ no-multiple-empty-lines: 0
+ no-negated-condition: 0
+ no-nested-ternary: 0
+ no-new-object: 0
+ no-plusplus: 0
+ no-restricted-syntax: 0
+ no-spaced-func: 0
+ no-ternary: 0
+ no-trailing-spaces: 0
+ no-underscore-dangle: 0
+ no-unneeded-ternary: 0
+ object-curly-spacing: 0
+ one-var: 0
+ operator-assignment: 0
+ operator-linebreak: 0
+ padded-blocks: 0
+ quote-props: 0
+ quotes: 0
+ require-jsdoc: 0
+ semi-spacing: 0
+ semi: 0
+ sort-vars: 0
+ space-after-keywords: 0
+ space-before-blocks: 0
+ space-before-function-paren: 0
+ space-before-keywords: 0
+ space-in-parens: 0
+ space-infix-ops: 0
+ space-return-throw-case: 0
+ space-unary-ops: 0
+ spaced-comment: 0
+ wrap-regex: 0
+
+ # ECMAScript 6
+ arrow-body-style: 0
+ arrow-parens: 0
+ arrow-spacing: 0
+ constructor-super: 0
+ generator-star-spacing: 0
+ no-arrow-condition: 0
+ no-class-assign: 0
+ no-const-assign: 0
+ no-dupe-class-members: 0
+ no-this-before-super: 0
+ no-var: 0
+ object-shorthand: 0
+ prefer-arrow-callback: 0
+ prefer-const: 0
+ prefer-reflect: 0
+ prefer-spread: 0
+ prefer-template: 0
+ require-yield: 0
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..45ec515
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+*.c diff=cpp
+*.h diff=cpp
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..5d0484c
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,66 @@
+# Files which shouldn't be changed manually are owned by @netdatabot.
+# This way we prevent modifications which will be overwriten by automation.
+
+# Global (default) code owner
+* @ktsaou @Ferroin
+
+# Ownership by directory structure
+.travis/ @Ferroin @knatsakis @kaskavel @vkalintiris
+.github/ @Ferroin @knatsakis @kaskavel @vkalintiris
+aclk/ @stelfrag @underhood
+backends/ @thiagoftsm @vlvkobal
+backends/graphite/ @thiagoftsm @vlvkobal
+backends/json/ @thiagoftsm @vlvkobal
+backends/opentsdb/ @thiagoftsm @vlvkobal
+backends/prometheus/ @vlvkobal @thiagoftsm
+build/ @Ferroin @knatsakis
+contrib/debian @Ferroin @knatsakis @vkalintiris
+collectors/ @vlvkobal @mfundul
+collectors/charts.d.plugin/ @ilyam8 @Ferroin
+collectors/freebsd.plugin/ @vlvkobal @thiagoftsm
+collectors/ebpf.plugin/ @thiagoftsm @vlvkobal
+collectors/macos.plugin/ @vlvkobal @thiagoftsm
+collectors/node.d.plugin/ @jacekkolasa
+collectors/node.d.plugin/fronius/ @ccremer
+collectors/node.d.plugin/snmp/ @jacekkolasa
+collectors/node.d.plugin/stiebeleltron/ @ccremer
+collectors/python.d.plugin/ @ilyam8
+collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm
+exporting/ @vlvkobal @thiagoftsm
+daemon/ @thiagoftsm @mfundul
+database/ @mfundul @thiagoftsm
+docs/ @joelhans
+health/ @thiagoftsm @vlvkobal
+health/health.d/ @thiagoftsm @vlvkobal
+health/notifications/ @Ferroin @thiagoftsm
+libnetdata/ @thiagofsm @mfundul
+packaging/ @Ferroin @knatsakis @vkalintiris
+registry/ @jacekkolasa
+streaming/ @thiagoftsm @vlvkobal
+system/ @Ferroin @knatsakis @vkalintiris
+tests/ @Ferroin @knatsakis @kaskavel
+web/ @thiagoftsm @mfundul @vlvkobal
+web/gui/ @jacekkolasa
+
+# Ownership by filetype (overwrites ownership by directory)
+*.am @Ferroin
+*.md @joelhans
+Dockerfile* @Ferroin @knatsakis
+
+# Ownership of specific files
+.gitignore @Ferroin @knatsakis
+.travis.yml @Ferroin @knatsakis @kaskavel @vkalintiris
+.lgtm.yml @Ferroin @knatsakis
+.eslintrc @Ferroin @knatsakis
+.eslintignore @Ferroin @knatsakis
+.csslintrc @Ferroin @knatsakis
+.codeclimate.yml @Ferroin @knatsakis
+.codacy.yml @Ferroin @knatsakis
+.yamllint.yml @Ferroin @knatsakis
+netdata.spec.in @Ferroin @knatsakis @vkalintiris
+netdata-installer.sh @Ferroin @knatsakis @vkalintiris
+package.json @jacekkolasa @Ferroin @knatsakis
+packaging/version @netdatabot @Ferroin @knatsakis
+
+LICENSE.md @joelhans @Ferroin
+CHANGELOG.md @netdatabot @Ferroin
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..bd939ba
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,15 @@
+---
+about: General issue template
+labels: "needs triage", "no changelog"
+---
+
+<!---
+This is a generic issue template. We usually prefer contributors to use one
+of 3 other specific issue templates (bug report, feature request, question)
+to allow our automation classify those so you can get response faster.
+However if your issue doesn't fall into either one of those 3 categories
+use this generic template.
+--->
+
+#### Summary
+
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..ecfd9fc
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,57 @@
+---
+name: "Bug report: Netdata Agent"
+about: Submit a report and help us improve our free and open-source Netdata Agent
+labels: bug, needs triage
+---
+
+<!--
+When creating a bug report please:
+- Verify first that your issue is not already reported on GitHub.
+- Test if the latest release and master branch are affected too.
+-->
+
+##### Bug report summary
+<!-- Provide a clear and concise description of the bug you're experiencing. -->
+
+##### OS / Environment
+<!--
+Provide as much information about your environment (which operating system and distribution you're using, if Netdata is running in a container, etc.)
+as possible to allow us reproduce this bug faster.
+
+To get this information, execute the following commands based on your operating system:
+- uname -a; grep -Hv "^#" /etc/*release # Linux
+- uname -a; uname -K # BSD
+- uname -a; sw_vers # macOS
+
+Place the output from the command in the code section below.
+ -->
+```
+
+```
+
+##### Netdata version
+<!--
+Provide output of `netdata -V`.
+
+If Netdata is running, execute: $(ps aux | grep -E -o "[a-zA-Z/]+netdata ") -V
+ -->
+
+
+##### Component Name
+<!--
+Let us know which component is affected by the bug. Our code is structured according to its component,
+so the component name is the same as the top level directory of the repository.
+For example, a bug in the dashboard would be under the web component.
+-->
+
+##### Steps To Reproduce
+<!--
+Describe how you found this bug and how we can reproduce it, preferably with a minimal test-case scenario.
+If you'd like to attach larger files, use gist.github.com and paste in links.
+-->
+
+1. ...
+2. ...
+
+##### Expected behavior
+<!-- Provide a clear and concise description of what you expected to happen. -->
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..e6ea7d3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,14 @@
+blank_issues_enabled: false
+contact_links:
+ - name: "Bug report: Netdata Cloud"
+ url: https://github.com/netdata/netdata-cloud/issues/new/choose
+ about: Create a report to help us improve our web application
+ - name: Help
+ url: https://community.netdata.cloud/c/support/13
+ about: We offer community-driven support on our community forums
+ - name: Feature request
+ url: https://community.netdata.cloud/c/feature-requests/7
+ about: Make a feature request for Netdata Cloud or the Netdata Agent
+ - name: Community
+ url: https://netdata.cloud/community
+ about: If you don't know where to start, visit our community page!
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..33f7736
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,28 @@
+<!--
+Describe the change in summary section, including rationale and design decisions.
+Include "Fixes #nnn" if you are fixing an existing issue.
+
+In "Component Name" section write which component is changed in this PR. This
+will help us review your PR quicker.
+
+In "Test Plan" provide enough detail on how you plan to test this PR so that a reviewer can validate your tests. If our CI covers sufficient tests, then state which tests cover the change.
+
+If you have more information you want to add, write them in "Additional
+Information" section. This is usually used to help others understand your
+motivation behind this change. A step-by-step reproduction of the problem is
+helpful if there is no related issue.
+-->
+
+##### Summary
+
+##### Component Name
+
+##### Test Plan
+
+<!---
+Provide enough detail so that your reviewer can understand which test-cases you
+have covered, and recreate them if necessary. If sufficient tests are covered
+by our CI, then state which tests cover the change.
+-->
+
+##### Additional Information
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..b02b155
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,9 @@
+version: 2
+updates:
+ - package-ecosystem: github-actions
+ directory: /
+ schedule:
+ interval: weekly
+ labels:
+ - "no changelog"
+ - "area/ci"
diff --git a/.github/dockerfiles/Dockerfile.build_test b/.github/dockerfiles/Dockerfile.build_test
new file mode 100644
index 0000000..1dc3e30
--- /dev/null
+++ b/.github/dockerfiles/Dockerfile.build_test
@@ -0,0 +1,11 @@
+ARG BASE
+
+FROM ${BASE}
+
+ARG PRE
+ENV PRE=${PRE}
+
+COPY . /netdata
+
+RUN /bin/sh /netdata/prep-cmd.sh
+RUN /netdata/packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata-all
diff --git a/.github/dockerfiles/Dockerfile.clang b/.github/dockerfiles/Dockerfile.clang
new file mode 100644
index 0000000..62bb019
--- /dev/null
+++ b/.github/dockerfiles/Dockerfile.clang
@@ -0,0 +1,18 @@
+FROM debian:buster AS build
+
+# Disable apt/dpkg interactive mode
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install all build dependencies
+COPY packaging/installer/install-required-packages.sh /tmp/install-required-packages.sh
+RUN /tmp/install-required-packages.sh --dont-wait --non-interactive netdata-all
+
+# Install Clang and set as default CC
+RUN apt-get install -y clang && \
+ update-alternatives --install /usr/bin/cc cc /usr/bin/clang 100
+
+WORKDIR /netdata
+COPY . .
+
+# Build Netdata
+RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go --require-cloud
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 0000000..544b420
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,101 @@
+# This configures label matching for PR's.
+#
+# The keys are labels, and the values are lists of minimatch patterns
+# to which those labels apply.
+#
+# NOTE: This can only add labels, not remove them.
+# NOTE: Due to YAML syntax limitations, patterns or labels which start
+# with a character that is part of the standard YAML syntax must be
+# quoted.
+#
+# Please keep the labels sorted and deduplicated.
+
+ACLK:
+ - aclk/*
+ - aclk/**/*
+
+area/backends:
+ - backends/*
+ - backends/**/*
+
+area/exporting:
+ - exporting/*
+ - exporting/**/*
+
+area/build:
+ - build/*
+ - build/**/*
+ - build_external/*
+ - build_external/**/*
+ - CMakeLists.txt
+ - configure.ac
+ - Makefile.am
+ - "**/Makefile.am"
+
+area/ci:
+ - .travis/*
+ - .travis/**/*
+ - .github/*
+ - .github/**/*
+
+area/collectors:
+ - collectors/*
+ - collectors/**/*
+
+area/daemon:
+ - daemon/*
+ - daemon/**/*
+
+area/database:
+ - database/*
+ - database/**/*
+
+area/docs:
+ - "**/*.md"
+ - diagrams/*
+ - diagrams/**/*
+
+area/external/python:
+ - collectors/python.d.plugin/*
+ - collectors/python.d.plugin/**/*
+
+area/external:
+ - collectors/charts.d.plugin/*
+ - collectors/charts.d.plugin/**/*
+ - collectors/node.d.plugin/*
+ - collectors/node.d.plugin/**/*
+
+area/health:
+ - health/*
+ - health/**/*
+
+area/packaging:
+ - contrib/*
+ - contrib/**/*
+ - packaging/*
+ - packaging/**/*
+ - system/*
+ - system/**/*
+ - Dockerfile*
+ - netdata-installer.sh
+ - netdata.spec.in
+
+area/registry:
+ - registry/*
+ - registry/**/*
+
+area/streaming:
+ - streaming/*
+ - streaming/**/*
+
+area/tests:
+ - tests/*
+ - tests/**/*
+ - daemon/unit_test*
+ - coverity-scan.sh
+ - cppcheck.sh
+ - netdata.cppcheck
+
+area/web:
+ - web/*
+ - web/**/*
diff --git a/.github/scripts/build-artifacts.sh b/.github/scripts/build-artifacts.sh
new file mode 100755
index 0000000..e635765
--- /dev/null
+++ b/.github/scripts/build-artifacts.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+#
+# Builds the netdata-vX.y.Z-xxxx.tar.gz source tarball (dist)
+# and netdata-vX.Y.Z-xxxx.gz.run (static x86_64) artifacts.
+
+set -e
+
+# shellcheck source=.github/scripts/functions.sh
+. "$(dirname "$0")/functions.sh"
+
+NAME="${NAME:-netdata}"
+VERSION="${VERSION:-"$(git describe)"}"
+BASENAME="$NAME-$VERSION"
+
+prepare_build() {
+ progress "Preparing build"
+ (
+ test -d artifacts || mkdir -p artifacts
+ echo "${VERSION}" > packaging/version
+ ) >&2
+}
+
+build_dist() {
+ progress "Building dist"
+ (
+ command -v git > /dev/null && [ -d .git ] && git clean -d -f
+ autoreconf -ivf
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --localstatedir=/var \
+ --libexecdir=/usr/libexec \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata \
+ CFLAGS=-O2
+ make dist
+ mv "${BASENAME}.tar.gz" artifacts/
+ ) >&2
+}
+
+build_static_x86_64() {
+ progress "Building static x86_64"
+ (
+ command -v git > /dev/null && [ -d .git ] && git clean -d -f
+ USER="" ./packaging/makeself/build-x86_64-static.sh
+ ) >&2
+}
+
+prepare_assets() {
+ progress "Preparing assets"
+ (
+ cp packaging/version artifacts/latest-version.txt
+
+ cd artifacts || exit 1
+ ln -f "${BASENAME}.tar.gz" netdata-latest.tar.gz
+ ln -f "${BASENAME}.gz.run" netdata-latest.gz.run
+ sha256sum -b ./* > "sha256sums.txt"
+ ) >&2
+}
+
+steps="prepare_build build_dist build_static_x86_64"
+steps="$steps prepare_assets"
+
+_main() {
+ for step in $steps; do
+ if ! run "$step"; then
+ if [ -t 1 ]; then
+ debug
+ else
+ fail "Build failed"
+ fi
+ fi
+ done
+
+ echo "🎉 All Done!"
+}
+
+if [ -n "$0" ] && [ x"$0" != x"-bash" ]; then
+ _main "$@"
+fi
diff --git a/.github/scripts/build-static-x86_64.sh b/.github/scripts/build-static-x86_64.sh
new file mode 100755
index 0000000..2676b63
--- /dev/null
+++ b/.github/scripts/build-static-x86_64.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+#
+# Builds the netdata-vX.Y.Z-xxxx.gz.run (static x86_64) artifact.
+
+set -e
+
+# shellcheck source=.github/scripts/functions.sh
+. "$(dirname "$0")/functions.sh"
+
+NAME="${NAME:-netdata}"
+VERSION="${VERSION:-"$(git describe)"}"
+BASENAME="$NAME-$VERSION"
+
+prepare_build() {
+ progress "Preparing build"
+ (
+ test -d artifacts || mkdir -p artifacts
+ ) >&2
+}
+
+build_static_x86_64() {
+ progress "Building static x86_64"
+ (
+ USER="" ./packaging/makeself/build-x86_64-static.sh
+ ) >&2
+}
+
+prepare_assets() {
+ progress "Preparing assets"
+ (
+ cp packaging/version artifacts/latest-version.txt
+
+ cd artifacts || exit 1
+ ln -s "${BASENAME}.gz.run" netdata-latest.gz.run
+ sha256sum -b ./* > "sha256sums.txt"
+ ) >&2
+}
+
+steps="prepare_build build_static_x86_64"
+steps="$steps prepare_assets"
+
+_main() {
+ for step in $steps; do
+ if ! run "$step"; then
+ if [ -t 1 ]; then
+ debug
+ else
+ fail "Build failed"
+ fi
+ fi
+ done
+
+ echo "🎉 All Done!"
+}
+
+if [ -n "$0" ] && [ x"$0" != x"-bash" ]; then
+ _main "$@"
+fi
diff --git a/.github/scripts/bump-packaging-version.sh b/.github/scripts/bump-packaging-version.sh
new file mode 100755
index 0000000..bffcb0c
--- /dev/null
+++ b/.github/scripts/bump-packaging-version.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+VERSION="$(git describe)"
+echo "$VERSION" > packaging/version
+git add -A
+git ci -m "[netdata nightly] $VERSION"
diff --git a/.github/scripts/functions.sh b/.github/scripts/functions.sh
new file mode 100644
index 0000000..7cd2e08
--- /dev/null
+++ b/.github/scripts/functions.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+# This file is included by download.sh & build.sh
+
+set -e
+
+color() {
+ fg="$1"
+ bg="${2}"
+ ft="${3:-0}"
+
+ printf "\33[%s;%s;%s" "$ft" "$fg" "$bg"
+}
+
+color_reset() {
+ printf "\033[0m"
+}
+
+ok() {
+ if [ -t 1 ]; then
+ printf "%s[ OK ]%s\n" "$(color 37 42m 1)" "$(color_reset)"
+ else
+ printf "%s\n" "[ OK ]"
+ fi
+}
+
+err() {
+ if [ -t 1 ]; then
+ printf "%s[ ERR ]%s\n" "$(color 37 41m 1)" "$(color_reset)"
+ else
+ printf "%s\n" "[ ERR ]"
+ fi
+}
+
+run() {
+ retval=0
+ logfile="$(mktemp -t "run-XXXXXX")"
+ if "$@" 2> "$logfile"; then
+ ok
+ else
+ retval=$?
+ err
+ tail -n 100 "$logfile" || true
+ fi
+ rm -rf "$logfile"
+ return $retval
+}
+
+progress() {
+ printf "%-40s" "$(printf "%s ... " "$1")"
+}
+
+log() {
+ printf "%s\n" "$1"
+}
+
+error() {
+ log "ERROR: ${1}"
+}
+
+fail() {
+ log "FATAL: ${1}"
+ exit 1
+}
+
+debug() {
+ log "Dropping into a shell for debugging ..."
+ exec /bin/sh
+}
diff --git a/.github/scripts/run_install_with_dist_file.sh b/.github/scripts/run_install_with_dist_file.sh
new file mode 100755
index 0000000..9453dff
--- /dev/null
+++ b/.github/scripts/run_install_with_dist_file.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+#
+# This script is evaluating netdata installation with the source from make dist
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+
+set -e
+
+if [ $# -ne 1 ]; then
+ printf >&2 "Usage: %s <dist_file>\n" "$(basename "$0")"
+ exit 1
+fi
+
+distfile="${1}"
+shift
+
+printf >&2 "Opening dist archive %s ... " "${distfile}"
+tar -xovf "${distfile}"
+distdir="$(echo "${distfile}" | cut -d. -f1,2,3)"
+cp -a packaging/installer/install-required-packages.sh "${distdir}/install-required-packages.sh"
+if [ ! -d "${distdir}" ]; then
+ printf >&2 "ERROR: %s is not a directory" "${distdir}"
+ exit 2
+fi
+
+printf >&2 "Entering %s and starting docker run ..." "${distdir}"
+
+pushd "${distdir}" || exit 1
+docker run \
+ -v "${PWD}:/netdata" \
+ -w /netdata \
+ "ubuntu:latest" \
+ /bin/bash -c "./install-required-packages.sh --dont-wait --non-interactive netdata && apt install wget && ./netdata-installer.sh --dont-wait --require-cloud --disable-telemetry --install /tmp && echo \"Validating netdata instance is running\" && wget -O - 'http://127.0.0.1:19999/api/v1/info' | grep version"
+popd || exit 1
+
+echo "All Done!"
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 0000000..abf927a
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,18 @@
+---
+only: issues
+limitPerRun: 30
+daysUntilStale: 30
+daysUntilClose: 7
+exemptLabels:
+ - bug
+ - help wanted
+ - feature request
+exemptProjects: true
+exemptMilestones: true
+staleLabel: stale
+markComment: >
+ This issue has been inactive for 30 days.
+ It will be closed in one week, unless it is updated.
+closeComment: >
+ This issue has been automatically closed due to extended period of inactivity.
+ Please reopen if it is still valid. Thank you for your contributions.
diff --git a/.github/workflows/build-and-install.yml b/.github/workflows/build-and-install.yml
new file mode 100644
index 0000000..26a144a
--- /dev/null
+++ b/.github/workflows/build-and-install.yml
@@ -0,0 +1,201 @@
+---
+name: Builder
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+jobs:
+ static-build:
+ name: Build (x86_64)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ - run: |
+ git fetch --prune --unshallow --tags
+ - name: Build
+ run: |
+ .github/scripts/build-static-x86_64.sh
+ source-build:
+ name: Build & Install
+ strategy:
+ fail-fast: false
+ matrix:
+ distro:
+ - 'alpine:edge'
+ - 'alpine:3.13'
+ - 'alpine:3.12'
+ - 'alpine:3.11'
+ - 'alpine:3.10'
+ - 'archlinux:latest'
+ - 'centos:8'
+ - 'centos:7'
+ - 'clearlinux:latest'
+ - 'debian:10'
+ - 'debian:9'
+ - 'fedora:33'
+ - 'fedora:32'
+ - 'opensuse/leap:15.2'
+ - 'opensuse/tumbleweed:latest'
+ - 'ubuntu:20.10'
+ - 'ubuntu:20.04'
+ - 'ubuntu:18.04'
+ - 'ubuntu:16.04'
+ include:
+ - distro: 'alpine:edge'
+ pre: 'apk add -U bash'
+ rmjsonc: 'apk del json-c-dev'
+ - distro: 'alpine:3.13'
+ pre: 'apk add -U bash'
+ rmjsonc: 'apk del json-c-dev'
+ - distro: 'alpine:3.12'
+ pre: 'apk add -U bash'
+ rmjsonc: 'apk del json-c-dev'
+ - distro: 'alpine:3.11'
+ pre: 'apk add -U bash'
+ rmjsonc: 'apk del json-c-dev'
+ - distro: 'alpine:3.10'
+ pre: 'apk add -U bash'
+ rmjsonc: 'apk del json-c-dev'
+
+ - distro: 'archlinux:latest'
+ pre: 'pacman --noconfirm -Syu && pacman --noconfirm -Sy grep libffi'
+
+ - distro: 'centos:8'
+ rmjsonc: 'dnf remove -y json-c-devel'
+
+ - distro: 'debian:10'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+ - distro: 'debian:9'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+
+ - distro: 'fedora:33'
+ rmjsonc: 'dnf remove -y json-c-devel'
+ - distro: 'fedora:32'
+ rmjsonc: 'dnf remove -y json-c-devel'
+
+ - distro: 'opensuse/leap:15.2'
+ rmjsonc: 'zypper rm -y libjson-c-devel'
+ - distro: 'opensuse/tumbleweed:latest'
+ rmjsonc: 'zypper rm -y libjson-c-devel'
+
+ - distro: 'ubuntu:20.10'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+ - distro: 'ubuntu:20.04'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+ - distro: 'ubuntu:18.04'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+ - distro: 'ubuntu:16.04'
+ pre: 'apt-get update'
+ rmjsonc: 'apt-get remove -y libjson-c-dev'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ - name: install-required-packages.sh on ${{ matrix.distro }}
+ env:
+ PRE: ${{ matrix.pre }}
+ RMJSONC: ${{ matrix.rmjsonc }}
+ run: |
+ echo $PRE > ./prep-cmd.sh
+ echo $RMJSONC > ./rmjsonc.sh && chmod +x ./rmjsonc.sh
+ docker build . -f .github/dockerfiles/Dockerfile.build_test -t test --build-arg BASE=${{ matrix.distro }}
+ - name: Regular build on ${{ matrix.distro }}
+ run: |
+ docker run -w /netdata test /bin/sh -c 'autoreconf -ivf && ./configure && make -j2'
+ - name: netdata-installer on ${{ matrix.distro }}, disable cloud
+ run: |
+ docker run -w /netdata test /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud'
+ - name: netdata-installer on ${{ matrix.distro }}, require cloud
+ run: |
+ docker run -w /netdata test /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud'
+ - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C
+ if: matrix.rmjsonc != ''
+ run: |
+ docker run -w /netdata test \
+ /bin/sh -c '/netdata/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud'
+ aws-kinesis-build:
+ name: With AWS Kinesis SDK
+ strategy:
+ fail-fast: false
+ matrix:
+ distro:
+ - 'centos:8'
+ - 'debian:buster'
+ - 'fedora:32'
+ - 'ubuntu:20.04'
+ include:
+ - distro: 'centos:8'
+ pre: >-
+ yum -y update &&
+ yum -y groupinstall 'Development Tools' &&
+ yum -y install libcurl-devel openssl-devel libuuid-devel
+ build_kinesis: >-
+ git clone https://github.com/aws/aws-sdk-cpp.git &&
+ cmake -DCMAKE_INSTALL_PREFIX=/usr
+ -DBUILD_ONLY=kinesis
+ ./aws-sdk-cpp &&
+ make &&
+ make install
+ - distro: 'debian:buster'
+ pre: >-
+ apt-get update &&
+ DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential &&
+ DEBIAN_FRONTEND=noninteractive apt-get install -y libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev
+ build_kinesis: >-
+ git clone https://github.com/aws/aws-sdk-cpp.git &&
+ cmake -DCMAKE_INSTALL_PREFIX=/usr
+ -DBUILD_ONLY=kinesis
+ ./aws-sdk-cpp &&
+ make &&
+ make install
+ - distro: 'fedora:32'
+ pre: >-
+ dnf -y update &&
+ dnf -y groupinstall 'Development Tools' &&
+ dnf -y install libcurl-devel openssl-devel libuuid-devel
+ build_kinesis: >-
+ git clone https://github.com/aws/aws-sdk-cpp.git &&
+ cmake -DCMAKE_INSTALL_PREFIX=/usr
+ -DBUILD_ONLY=kinesis
+ ./aws-sdk-cpp &&
+ make &&
+ make install
+ - distro: 'ubuntu:20.04'
+ pre: >-
+ apt-get update &&
+ DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential &&
+ DEBIAN_FRONTEND=noninteractive apt-get install -y libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev
+ build_kinesis: >-
+ git clone https://github.com/aws/aws-sdk-cpp.git &&
+ cmake -DCMAKE_INSTALL_PREFIX=/usr
+ -DBUILD_ONLY=kinesis
+ ./aws-sdk-cpp &&
+ make &&
+ make install
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ - name: install-required-packages.sh on ${{ matrix.distro }}
+ env:
+ PRE: ${{ matrix.pre }}
+ BUILD_KINESIS: ${{ matrix.build_kinesis }}
+ run: |
+ echo $PRE > ./prep-cmd.sh
+ echo $BUILD_KINESIS > ./build-kinesis.sh && chmod +x ./build-kinesis.sh
+ docker build . -f .github/dockerfiles/Dockerfile.build_test -t test --build-arg BASE=${{ matrix.distro }}
+ - name: Build on ${{ matrix.distro }}
+ env:
+ RUNCMD: >-
+ ./build-kinesis.sh &&
+ ./netdata-installer.sh --dont-wait --dont-start-it --enable-backend-kinesis
+ run: |
+ docker run -w /netdata test \
+ /bin/sh -c "$RUNCMD"
diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml
new file mode 100644
index 0000000..cf494e9
--- /dev/null
+++ b/.github/workflows/checks.yml
@@ -0,0 +1,106 @@
+---
+name: Checks
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+jobs:
+ checksum-checks:
+ name: Checksums
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ - name: Run checksum checks on kickstart files
+ env:
+ LOCAL_ONLY: "true"
+ run: |
+ ./tests/installer/checksums.sh
+ dashboard-checks:
+ name: Dashboard
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ - name: Install required packages
+ run: |
+ ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata
+ - name: Backup dashboard.js
+ run: |
+ cp web/gui/dashboard.js /tmp/dashboard.js
+ - name: Regenerate dashboard.js
+ run: |
+ autoreconf -ivf
+ ./configure --enable-maintainer-mode
+ make dist
+ - name: Compare generated Dashboard vs. Backed up Dashboard
+ run: |
+ diff -sNrdu /tmp/dashboard.js web/gui/dashboard.js
+ libressl-checks:
+ name: LibreSSL
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Build
+ run: >
+ docker run -v "$PWD":/netdata -w /netdata alpine:latest /bin/sh -c
+ 'apk add bash;
+ ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata;
+ apk del openssl openssl-dev;
+ apk add libressl libressl-dev;
+ autoreconf -ivf;
+ ./configure;
+ make;'
+ clang-checks:
+ name: Clang
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Build
+ run: |
+ docker build -f .github/dockerfiles/Dockerfile.clang .
+ dist-checks:
+ name: Dist
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Prepare environment
+ run: |
+ ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata
+ sudo apt-get install -y libjson-c-dev libipmimonitoring-dev libcups2-dev libsnappy-dev \
+ libprotobuf-dev libprotoc-dev libssl-dev protobuf-compiler \
+ libnetfilter-acct-dev
+ - name: Configure
+ run: |
+ autoreconf -ivf
+ ./configure \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata \
+ CFLAGS=-O2
+ - name: Make dist
+ run: |
+ make dist
+ - name: Verify & Set distfile
+ run: |
+ ls -lah netdata-*.tar.gz
+ echo "DISTFILE=$(ls netdata-*.tar.gz)" >> $GITHUB_ENV
+ - name: Run run_install_with_dist_file.sh
+ run: |
+ ./.github/scripts/run_install_with_dist_file.sh "${DISTFILE}"
+ gitignore-check:
+ name: .gitignore
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Prepare environment
+ run: ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata
+ - name: Build netdata
+ run: ./netdata-installer.sh --dont-start-it --disable-telemetry --dont-wait --install /tmp/install
+ - name: Check that repo is clean
+ run: if [ "$(git status --porcelain=v1 | wc -l)" -gt 0 ] ; then exit 1 ; fi
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
new file mode 100644
index 0000000..926257d
--- /dev/null
+++ b/.github/workflows/coverity.yml
@@ -0,0 +1,34 @@
+---
+# Runs coverity-scan.sh every 24h on `master`
+name: Coverity Scan
+on:
+ schedule:
+ - cron: '0 1 * * *'
+ pull_request:
+ paths:
+ - .github/workflows/coverity.yml
+ - coverity-scan.sh
+jobs:
+ coverity:
+ if: github.repository == 'netdata/netdata'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Prepare environment
+ env:
+ DEBIAN_FRONTEND: 'noninteractive'
+ run: |
+ ./packaging/installer/install-required-packages.sh \
+ --dont-wait --non-interactive netdata
+ sudo apt-get install -y libjson-c-dev libipmimonitoring-dev \
+ libcups2-dev libsnappy-dev libprotobuf-dev \
+ libprotoc-dev libssl-dev protobuf-compiler \
+ libnetfilter-acct-dev
+ - name: Run coverity-scan
+ env:
+ REPOSITORY: 'netdata/netdata'
+ COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }}
+ COVERITY_SCAN_SUBMIT_MAIL: ${{ secrets.COVERITY_SCAN_SUBMIT_MAIL }}
+ run: |
+ ./coverity-scan.sh --with-install
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
new file mode 100644
index 0000000..04f91bf
--- /dev/null
+++ b/.github/workflows/docker.yml
@@ -0,0 +1,59 @@
+---
+name: Docker
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - '.github/workflows/docker.yml'
+ - 'netdata-installer.sh'
+ - 'packaging/**'
+ pull_request:
+ paths:
+ - '.github/workflows/docker.yml'
+ - 'netdata-installer.sh'
+ - 'packaging/**'
+ workflow_dispatch:
+ inputs:
+ version:
+ name: Version Tag
+ default: nightly
+ required: true
+jobs:
+ docker-build:
+ name: Docker Build
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Determine if we should push changes and which tags to use
+ if: github.event_name == 'workflow_dispatch' && github.event.inputs.version != 'nightly'
+ run: |
+ echo "publish=true" >> $GITHUB_ENV
+ echo "tags=netdata/netdata:latest,netdata/netdata:stable,netdata/netdata:${{ github.event.inputs.version }}" >> $GITHUB_ENV
+ - name: Determine if we should push changes and which tags to use
+ if: github.event_name == 'workflow_dispatch' && github.event.inputs.version == 'nightly'
+ run: |
+ echo "publish=true" >> $GITHUB_ENV
+ echo "tags=netdata/netdata:latest,netdata/netdata:edge" >> $GITHUB_ENV
+ - name: Determine if we should push changes and which tags to use
+ if: github.event_name != 'workflow_dispatch'
+ run: |
+ echo "publish=false" >> $GITHUB_ENV
+ echo "tags=netdata/netdata:test" >> $GITHUB_ENV
+ - name: Setup QEMU
+ uses: docker/setup-qemu-action@v1
+ - name: Setup Buildx
+ uses: docker/setup-buildx-action@v1
+ - name: Docker Hub Login
+ if: github.event_name == 'workflow_dispatch'
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKER_HUB_USERNAME }}
+ password: ${{ secrets.DOCKER_HUB_PASSWORD }}
+ - name: Docker Build
+ uses: docker/build-push-action@v2
+ with:
+ platforms: linux/amd64,linux/i386,linux/arm/v7,linux/arm64
+ push: ${{ env.publish }}
+ tags: ${{ env.tags }}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..2a4fe87
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,25 @@
+---
+name: Docs
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - '**.md'
+ pull_request:
+ paths:
+ - '**.md'
+jobs:
+ markdown-link-check:
+ name: Broken Links
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Run link check
+ uses: gaurav-nelson/github-action-markdown-link-check@v1
+ with:
+ use-quiet-mode: 'no'
+ use-verbose-mode: 'yes'
+ check-modified-files-only: 'yes'
+ config-file: '.mlc_config.json'
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
new file mode 100644
index 0000000..24842e7
--- /dev/null
+++ b/.github/workflows/labeler.yml
@@ -0,0 +1,16 @@
+---
+# Handles labelling of PR's.
+name: Pull Request Labeler
+on:
+ schedule:
+ - cron: '*/5 * * * *'
+jobs:
+ labeler:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: docker://docker.io/ilyam8/periodic-pr-labeler:v0.1.0
+ if: github.repository == 'netdata/netdata'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_REPOSITORY: ${{ github.repository }}
+ LABEL_MAPPINGS_FILE: .github/labeler.yml
diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml
new file mode 100644
index 0000000..ca8f6de
--- /dev/null
+++ b/.github/workflows/review.yml
@@ -0,0 +1,94 @@
+---
+# Runs various ReviewDog based checks against PR with suggested changes to improve quality
+name: Review
+on:
+ pull_request:
+env:
+ run_eslint: 0
+ run_hadolint: 0
+ run_shellcheck: 0
+ run_yamllint: 0
+jobs:
+ eslint:
+ name: eslint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - name: Check files
+ run: |
+ if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.js|node\.d\.plugin\.in' ; then
+ echo 'run_eslint=1' >> $GITHUB_ENV
+ fi
+ - name: Run eslint
+ if: env.run_eslint == 1
+ uses: reviewdog/action-eslint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
+ eslint_flags: '.'
+
+ hadolint:
+ name: hadolint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - name: Check files
+ run: |
+ if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*Dockerfile*' ; then
+ echo 'run_hadolint=1' >> $GITHUB_ENV
+ fi
+ - name: Run hadolint
+ if: env.run_hadolint == 1
+ uses: reviewdog/action-hadolint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
+
+ shellcheck:
+ name: shellcheck
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - name: Check files
+ run: |
+ if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.sh.*' ; then
+ echo 'run_shellcheck=1' >> $GITHUB_ENV
+ fi
+ - name: Run shellcheck
+ if: env.run_shellcheck == 1
+ uses: reviewdog/action-shellcheck@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
+ path: "."
+ pattern: "*.sh*"
+ exclude: "./.git/*"
+
+ yamllint:
+ name: yamllint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - name: Check files
+ run: |
+ if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.ya?ml|python\.d/.*\.conf' ; then
+ echo 'run_yamllint=1' >> $GITHUB_ENV
+ fi
+ - name: Run yamllint
+ if: env.run_yamllint == 1
+ uses: reviewdog/action-yamllint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 0000000..ef6bfbc
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,88 @@
+---
+# Runs Tests on Pushes to `master` and Pull Requests
+name: Tests
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - 'CMakeLists.txt'
+ - '**.c'
+ - '**.h'
+ pull_request:
+ paths:
+ - 'CMakeLists.txt'
+ - '**.c'
+ - '**.h'
+jobs:
+ unit-tests-legacy:
+ name: Unit Tests (legacy)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Prepare environment
+ run: |
+ ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata-all
+ sudo apt-get install -y libjson-c-dev libipmimonitoring-dev libcups2-dev libsnappy-dev \
+ libprotobuf-dev libprotoc-dev libssl-dev protobuf-compiler \
+ libnetfilter-acct-dev
+ - name: Run ./tests/run-unit-tests.sh
+ env:
+ CFLAGS: "-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1"
+ run: |
+ ./tests/run-unit-tests.sh
+
+ unit-tests-cmocka:
+ name: Unit Tests (cmocka)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Prepare environment
+ run: |
+ ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata-all
+ sudo apt-get install -y libjson-c-dev libipmimonitoring-dev libcups2-dev libsnappy-dev \
+ libprotobuf-dev libprotoc-dev libssl-dev protobuf-compiler \
+ libnetfilter-acct-dev libmongoc-dev libcmocka-dev
+ - name: Configure
+ run: |
+ autoreconf -ivf
+ ./configure
+ # XXX: Work-around for bug with libbson-1.0 in Ubuntu 18.04
+ # See: https://bugs.launchpad.net/ubuntu/+source/libmongoc/+bug/1790771
+ # https://jira.mongodb.org/browse/CDRIVER-2818
+ - name: Fix libbson
+ run: |
+ pushd /usr/lib || exit 1
+ sudo ln -s /usr/include .
+ popd || exit 1
+ - name: Build
+ run: |
+ mkdir build-tmp
+ cd build-tmp
+ cmake \
+ -D UNIT_TESTING=1 \
+ -D BUILD_TESTING=1 \
+ -D CMAKE_BUILD_TYPE="Debug" \
+ -D BSON_LIBRARY=/usr/lib/x86_64-linux-gnu/libbson-1.0.so \
+ -D MONGOC_LIBRARY=/usr/lib/x86_64-linux-gnu/libmongoc-1.0.so \
+ ..
+ make
+ - name: Run ctest
+ run: |
+ cd build-tmp
+ ctest
+ - name: Prepare Artifacts
+ if: always()
+ run: |
+ mkdir logs
+ pushd build-tmp || exit 1
+ find . -type f -name '*.log' -exec cp {} ../logs/ \;
+ popd || exit 1
+ - name: Upload Artifacts
+ uses: actions/upload-artifact@v2.2.2
+ if: always()
+ with:
+ name: logs
+ path: logs
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2049bf4
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,210 @@
+# Secrets
+gcs-credentials.json
+
+.deps
+.libs
+.dirstamp
+.project
+.pydevproject
+
+*.o
+*.a
+config.h.in
+Makefile.in
+*~
+.*.swp
+*.old
+*.log
+*.pyc
+
+Makefile
+aclocal.m4
+autom4te.cache
+compile
+config.guess
+config.h
+config.status
+config.sub
+configure
+depcomp
+install-sh
+libtool
+ltmain.sh
+missing
+stamp-h1
+netdata.spec
+sha256sums.txt
+
+# netdata binaries
+netdata
+netdatacli
+!netdata/
+upload/
+artifacts/
+
+apps.plugin
+!apps.plugin/
+
+freeipmi.plugin
+!freeipmi.plugin/
+
+cups.plugin
+!cups.plugin/
+
+nfacct.plugin
+!nfacct.plugin/
+
+xenstat.plugin
+!xenstat.plugin/
+
+perf.plugin
+!perf.plugin/
+
+slabinfo.plugin
+!slabinfo.plugin/
+
+cgroup-network
+!cgroup-network/
+
+ebpf.plugin
+collectors/ebpf.plugin/reset_netdata_trace.sh
+!ebpf.plugin/
+
+# protoc generated files
+*.pb.cc
+*.pb.h
+
+# installation artifacts
+packaging/installer/.environment.sh
+*.tar.*
+*.run
+
+# netdata makeself downloads
+packaging/makeself/tmp/
+
+# coverity
+cov-int/
+netdata-coverity-analysis.tgz
+.coverity-scan.conf
+
+.cproject/
+.idea/
+.vscode/
+.project/
+.settings/
+README
+TODO.md
+TODO.txt
+
+web/gui/chart-info/
+web/gui/control.html
+web/gui/datasource.css
+web/gui/gadget.xml
+web/gui/index_new.html
+web/gui/version.txt
+
+# related to karma/javascript/node
+/node_modules/
+/coverage/
+
+system/netdata-lsb
+system/netdata-openrc
+system/netdata-init-d
+system/netdata.logrotate
+system/netdata.service
+system/netdata.service.*
+system/netdata-updater.service
+!system/netdata.service.in
+!system/netdata.service.*.in
+system/netdata.plist
+system/netdata-freebsd
+system/edit-config
+system/netdata.crontab
+
+daemon/anonymous-statistics.sh
+daemon/get-kubernetes-labels.sh
+
+health/notifications/alarm-notify.sh
+claim/netdata-claim.sh
+collectors/cgroups.plugin/cgroup-name.sh
+collectors/tc.plugin/tc-qos-helper.sh
+collectors/charts.d.plugin/charts.d.plugin
+collectors/node.d.plugin/node.d.plugin
+collectors/python.d.plugin/python.d.plugin
+collectors/fping.plugin/fping.plugin
+collectors/ioping.plugin/ioping.plugin
+collectors/go.d.plugin
+web/netdata-switch-dashboard.sh
+
+# installer generated files
+/netdata-uninstaller.sh
+/netdata-updater.sh
+
+# cmake files
+cmake-build-debug/
+cmake-build-release/
+CMakeCache.txt
+CMakeFiles/
+cmake_install.cmake
+.cmake
+compile_commands.json
+
+# jetbrains IDE
+.jetbrains*
+
+.DS_Store
+webcopylocal*
+
+# converted diagrams
+diagrams/*.png
+diagrams/*.svg
+diagrams/*.atxt
+diagrams/plantuml.jar
+
+# cppcheck
+cppcheck-build/
+
+venv/
+
+# debugging / profiling
+makeself/debug/
+tests/profile/benchmark-dictionary
+tests/profile/benchmark-registry
+tests/profile/test-eval
+tests/profile/benchmark-line-parsing
+tests/profile/benchmark-procfile-parser
+tests/profile/benchmark-value-pairs
+tests/profile/statsd-stress
+tests/health_mgmtapi/health-cmdapi-test.sh
+oprofile_data/
+vgcore.*
+callgrind.out.*
+gmon.out
+gmon.txt
+sitespeed-result/
+tests/acls/acl.sh
+tests/urls/request.sh
+tests/alarm_repetition/alarm.sh
+tests/template_dimension/template_dim.sh
+aclk/legacy/tests/install-fake-charts.d.sh
+
+# tests and temp files
+test-driver
+**/tests/*_testdriver
+**/tests/*_testdriver.trs
+python.d/python-modules-installer.sh
+
+# documentation generated files
+docs/generator/src
+docs/generator/build
+docs/generator/doc
+docs/generator/localization
+docs/generator/mkdocs.yml
+
+.environment.sh
+
+#CLion files
+netdata.cbp
+
+# External dependencies
+externaldeps/
diff --git a/.lgtm.yml b/.lgtm.yml
new file mode 100644
index 0000000..38f6675
--- /dev/null
+++ b/.lgtm.yml
@@ -0,0 +1,24 @@
+---
+# LGTM does a good job at classifying files, but sometimes it needs some help.
+# To classify files which shouldn't be checked we need to define where such
+# files are located and manually assign them one of possible categories:
+# docs, generated, library, template, test
+# More information can be found in lgtm documentation:
+# https://help.semmle.com/lgtm-enterprise/user/help/file-classification.html#built-in-tags
+# https://lgtm.com/help/lgtm/lgtm.yml-configuration-file
+path_classifiers:
+ library:
+ - collectors/python.d.plugin/python_modules/third_party/
+ - collectors/python.d.plugin/python_modules/urllib3/
+ - collectors/python.d.plugin/python_modules/pyyaml2/
+ - collectors/python.d.plugin/python_modules/pyyaml3/
+ - collectors/node.d.plugin/node_modules/lib/
+ - collectors/node.d.plugin/node_modules/asn1-ber.js
+ - collectors/node.d.plugin/node_modules/extend.js
+ - collectors/node.d.plugin/node_modules/net-snmp.js
+ - collectors/node.d.plugin/node_modules/pixl-xml.js
+ - web/gui/lib/
+ - web/gui/src/
+ - web/gui/css/
+ test:
+ - tests/
diff --git a/.mlc_config.json b/.mlc_config.json
new file mode 100644
index 0000000..8164bc4
--- /dev/null
+++ b/.mlc_config.json
@@ -0,0 +1,20 @@
+{
+ "ignorePatterns": [
+ {
+ "pattern": "^https:\/\/pi-hole\.net\b"
+ },
+ {
+ "pattern": "^https:\/\/docs\.stackpulse\.io\b"
+ }
+ ],
+ "replacementPatterns": [
+ {
+ "pattern": "^/",
+ "replacement": "/github/workspace/"
+ }
+ ],
+ "aliveStatusCodes": [
+ 200,
+ 429
+ ]
+}
diff --git a/.remarkignore b/.remarkignore
new file mode 100644
index 0000000..83b6947
--- /dev/null
+++ b/.remarkignore
@@ -0,0 +1 @@
+CHANGELOG.md \ No newline at end of file
diff --git a/.remarkrc.js b/.remarkrc.js
new file mode 100644
index 0000000..55793e4
--- /dev/null
+++ b/.remarkrc.js
@@ -0,0 +1,111 @@
+// Source: https://github.com/codacy/codacy-remark-lint/raw/master/.remarkrc.js
+
+exports.settings = {
+ gfm: true,
+ commonmark: true,
+ looseTable: false,
+ spacedTable: false,
+ paddedTable: false,
+ fences: true,
+ rule: "-",
+ ruleRepetition: 3,
+ emphasis: "*",
+ strong: "*",
+ bullet: "-",
+ listItemIndent: "1",
+ incrementListMarker: true
+};
+
+const remarkPresetLintMarkdownStyleGuide = {
+ plugins: require("remark-preset-lint-markdown-style-guide").plugins.filter(
+ function(elem) {
+ return elem != require("remark-lint-no-duplicate-headings");
+ }
+ )
+};
+
+exports.plugins = [
+ require("remark-preset-lint-consistent"),
+ require("remark-preset-lint-recommended"),
+ remarkPresetLintMarkdownStyleGuide,
+ [require("remark-lint-no-dead-urls"), { skipOffline: true }],
+ require("remark-lint-heading-whitespace"),
+ [require("remark-lint-maximum-line-length"), 120],
+ [require("remark-lint-maximum-heading-length"), 120],
+ [require("remark-lint-list-item-indent"), "tab-size"],
+ [require("remark-lint-list-item-spacing"), false],
+ [require("remark-lint-strong-marker"), "*"],
+ [require("remark-lint-emphasis-marker"), "_"],
+ [require("remark-lint-unordered-list-marker-style"), "-"],
+ [require("remark-lint-ordered-list-marker-style"), "."],
+ [require("remark-lint-ordered-list-marker-value"), "ordered"],
+ /*[
+ require("remark-lint-write-good"),
+ [
+ "warn",
+ {
+ passive: false,
+ illusion: true,
+ so: true,
+ thereIs: true,
+ weasel: true,
+ adverb: true,
+ tooWordy: true,
+ cliches: true,
+ eprime: false
+ }
+ ]
+ ],*/
+ require("remark-validate-links"),
+ require("remark-frontmatter"),
+ /*[
+ require("remark-retext"),
+ require("unified")().use({
+ plugins: [
+ require("retext-english"),
+ require("retext-syntax-urls"),
+ [
+ require("retext-spell"),
+ {
+ ignoreLiteral: true,
+ dictionary: require("dictionary-en-us"),
+ ...personalDictionary
+ }
+ ],
+ [
+ require("retext-sentence-spacing"),
+ {
+ preferred: 1
+ }
+ ],
+ require("retext-repeated-words"),
+ require("retext-usage"),
+ require("retext-indefinite-article"),
+ require("retext-redundant-acronyms"),
+ [
+ require("retext-contractions"),
+ {
+ straight: true,
+ allowLiteral: true
+ }
+ ],
+ require("retext-diacritics"),
+ [
+ require("retext-quotes"),
+ {
+ preferred: "straight"
+ }
+ ],
+ require("retext-equality"),
+ require("retext-passive"),
+ require("retext-profanities"),
+ [
+ require("retext-readability"),
+ {
+ age: 20
+ }
+ ]
+ ]
+ })
+ ]*/
+];
diff --git a/.squash.yml b/.squash.yml
new file mode 100644
index 0000000..58e5910
--- /dev/null
+++ b/.squash.yml
@@ -0,0 +1,7 @@
+deployments:
+ netdata:
+ auto_deploy_on_commits: true
+ filename: ./packaging/docker/Dockerfile
+ context_path: ./
+ port_forwarding: 80:19999
+ run_options: -v /proc:/host/proc:ro -v /sys:/host/sys:ro -v /var/run/docker.sock:/var/run/docker.sock:ro --cap-add SYS_PTRACE --security-opt apparmor=unconfined
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..8c95f5d
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,460 @@
+---
+dist: focal
+language: c
+
+addons:
+ apt:
+ packages: ['moreutils']
+
+env:
+ global:
+ - RELEASE_CHANNEL=nightly
+
+before_install:
+ - exec > >(ts -s '%H:%M:%.S ') 2>&1
+ - source .travis/utils.sh
+
+# Install dependencies for all, once
+#
+install:
+ - sudo apt-get install -y libuv1-dev liblz4-dev libjudy-dev libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python3-pip
+ - sudo pip install git-semver==0.3.2 # 11/Sep/2019: git-semver tip was broken, so we had to force last good run of it
+ - source tests/installer/slack.sh
+ - export NOTIF_CHANNEL="automation-beta"
+ - if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi;
+ - export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)"
+ - export LATEST_RELEASE_VERSION="$(cat packaging/version | cut -d'-' -f1)"
+ - export LATEST_RELEASE_DATE="$(git log -1 --format=%aD "${LATEST_RELEASE_VERSION}" | cat)"
+ - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi;
+ - export DEPLOY_REPO="netdata" # Default production packaging repository
+ - export PACKAGING_USER="netdata" # Standard package cloud account
+ - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi;
+ - export PACKAGE_CLOUD_RETENTION_DAYS=30
+ - if [ ! "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export DEPLOY_REPO="netdata-devel"; fi;
+ # These are release-related artifacts and have to be evaluated before we start doing conditional checks inside stages
+ - source ".travis/tagger.sh"
+ - export GIT_TAG="$(git tag --points-at)"
+
+
+# Setup notification system
+#
+notifications:
+ webhooks:
+ urls:
+ - https://app.fossa.io/hooks/travisci
+
+# Define the stage sequence and conditionals
+#
+stages:
+ # Mandatory runs, we always want these executed
+ - name: Build process
+ if: commit_message =~ /^((?!\[Package (amd64|arm64|i386) (DEB|RPM)( .*)?\]).)*$/
+
+ # Nightly operations
+ - name: Nightly operations
+ if: branch = master AND type = cron AND env(RUN_NIGHTLY) = yes
+
+ - name: Nightly release
+ if: branch = master AND type = cron AND env(RUN_NIGHTLY) = yes
+ - name: Trigger deb and rpm package build (nightly release)
+ if: branch = master AND type = cron AND env(RUN_NIGHTLY) = yes
+
+ # Scheduled releases
+ - name: Support activities on main branch
+ if: branch = master AND type != pull_request AND type != cron AND repo = netdata/netdata
+
+ # We don't run on release candidates
+ - name: Publish for release
+ if: >-
+ branch = master
+ AND type != pull_request
+ AND type != cron
+ AND tag !~ /(-rc)/
+ AND commit_message =~ /\[netdata (release candidate|(major|minor|patch) release)\]/
+ - name: Trigger deb and rpm package build (release)
+ if: >-
+ branch = master
+ AND type != pull_request
+ AND type != cron
+ AND tag !~ /(-rc)/
+ AND commit_message =~ /\[netdata (release candidate|(major|minor|patch) release)\]/
+
+ # Build DEB packages under special conditions
+ - name: Package ubuntu/* and debian/*
+ if: type != cron AND type != pull_request AND branch = master
+
+ # Build RPM packages under special conditions
+ - name: Package centos, fedora and opensuse
+ if: type != cron AND type != pull_request AND branch = master
+
+
+# Define stage implementation details
+#
+jobs:
+ # This is a hook to help us introduce "soft" errors on our process
+ allow_failures:
+ - env: ALLOW_SOFT_FAILURE_HERE=true
+ include:
+ # Ensure netdata code builds successfully
+ - stage: Build process
+
+ name: Standard netdata build
+ script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto
+ env: CFLAGS='-O1 -Wall -Wextra -Wformat-signedness -fstack-protector-all -fno-common -DNETDATA_INTERNAL_CHECKS=1 -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1'
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> standard netdata build is failing (Still dont know which one, will improve soon)"
+
+ - name: Build/Install for ubuntu 20.04 (not containerized)
+ script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME
+ after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04"
+
+ - name: Build/install for CentOS 7 (Containerized)
+ script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp
+ after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7"
+
+ - name: DEB package test
+ git:
+ depth: false
+ before_install:
+ - sudo apt-get install -y wget lxc python3-lxc lxc-templates dh-make git-buildpackage build-essential libdistro-info-perl
+ before_script:
+ - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
+ - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
+ - sudo chown -R root:travis "/var/lib/lxc"
+ - sudo chmod -R 750 "/var/lib/lxc"
+ - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ after_failure: post_message "TRAVIS_MESSAGE" "Netdata DEB package build check failed."
+
+ - name: RPM package test
+ git:
+ depth: false
+ before_install:
+ - sudo apt-get install -y wget lxc lxc-templates python3-lxc
+ before_script:
+ - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
+ - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
+ - sudo chmod -R 755 "/var/lib/lxc"
+ - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="32" BUILD_STRING="fedora/32"
+ - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
+ after_failure: post_message "TRAVIS_MESSAGE" "Netdata RPM package build check failed."
+
+ - stage: Support activities on main branch
+ name: Generate changelog for release (only on special and tagged commit msg)
+ before_script: post_message "TRAVIS_MESSAGE" "Support activities on main branch initiated" "${NOTIF_CHANNEL}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - if [[ -z "${GIT_TAG}" ]]; then echo "Running set tag for release" && set_tag_for_release; fi;
+ - .travis/generate_changelog_and_tag_release.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Changelog generation and tag of release, failed"
+ git:
+ depth: false
+ if: commit_message =~ /\[netdata (release candidate|(major|minor|patch) release)\]/ AND tag !~ /(-rc)/ OR (env(GIT_TAG) IS present AND NOT env(GIT_TAG) IS blank)
+
+ # ###### Packaging workflow section ######
+ # References:
+ # https://us.images.linuxcontainers.org
+ # https://packagecloud.io/docs#install_repo
+
+ # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging
+ - stage: Package ubuntu/* and debian/*
+ _template: &DEB_TEMPLATE
+ git:
+ depth: false
+ before_install:
+ - sudo apt-get install -y wget lxc python3-lxc lxc-templates dh-make git-buildpackage build-essential libdistro-info-perl
+ - source tests/installer/slack.sh
+ before_script:
+ - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
+ - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
+ - sudo chown -R root:travis "/var/lib/lxc"
+ - sudo chmod -R 750 "/var/lib/lxc"
+ - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}"
+ before_deploy:
+ - .travis/package_management/yank_stale_pkg.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found"
+ deploy:
+ - provider: packagecloud
+ repository: "${DEPLOY_REPO}"
+ username: "${PACKAGING_USER}"
+ token: "${PKG_CLOUD_TOKEN}"
+ dist: "${BUILD_STRING}"
+ local_dir: "${PACKAGES_DIRECTORY}"
+ skip_cleanup: true
+ on:
+ # Only deploy on ${USER}/netdata, master branch, when build-area directory is created
+ repo: ${TRAVIS_REPO_SLUG}
+ branch: "master"
+ condition: -d "${PACKAGES_DIRECTORY}"
+ after_deploy:
+ - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi;
+ - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi;
+
+ name: "Build & Publish DEB package for ubuntu/bionic"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64|i386) DEB( Ubuntu)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish DEB package for ubuntu/xenial"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64|i386) DEB( Ubuntu)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="xenial" BUILD_STRING="ubuntu/xenial"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish DEB package for ubuntu/focal"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) DEB( Ubuntu)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="focal" BUILD_STRING="ubuntu/focal"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish DEB package for ubuntu/groovy"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) DEB( Ubuntu)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="groovy" BUILD_STRING="ubuntu/groovy"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish DEB package for debian/buster"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64|i386) DEB( Debian)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish DEB package for debian/stretch"
+ <<: *DEB_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64|i386) DEB( Debian)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch"
+ - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - stage: Package centos, fedora and opensuse
+ _template: &RPM_TEMPLATE
+ git:
+ depth: false
+ before_install:
+ - sudo apt-get install -y wget lxc lxc-templates python3-lxc
+ - source tests/installer/slack.sh
+ before_script:
+ - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
+ - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
+ - sudo chmod -R 755 "/var/lib/lxc"
+ - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}"
+ before_deploy:
+ - .travis/package_management/yank_stale_pkg.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found"
+ deploy:
+ - provider: packagecloud
+ repository: "${DEPLOY_REPO}"
+ username: "${PACKAGING_USER}"
+ token: "${PKG_CLOUD_TOKEN}"
+ dist: "${BUILD_STRING}"
+ local_dir: "${PACKAGES_DIRECTORY}"
+ skip_cleanup: true
+ on:
+ # Only deploy on ${USER}/netdata, master branch, when packages directory is created
+ repo: ${TRAVIS_REPO_SLUG}
+ branch: "master"
+ condition: -d "${PACKAGES_DIRECTORY}"
+ after_deploy:
+ - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi;
+ - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi;
+
+ name: "Build & Publish RPM package for Enterprise Linux 7"
+ <<: *RPM_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) RPM( Enterprise Linux)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7"
+ - PACKAGE_TYPE="rpm" REPO_TOOL="yum"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish RPM package for Fedora 32"
+ <<: *RPM_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) RPM( Fedora)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="32" BUILD_STRING="fedora/32"
+ - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish RPM package for Fedora 33"
+ <<: *RPM_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) RPM( Fedora)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="33" BUILD_STRING="fedora/33"
+ - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
+ - ALLOW_SOFT_FAILURE_HERE=true
+
+ - name: "Build & Publish RPM package for openSUSE 15.2"
+ <<: *RPM_TEMPLATE
+ if: commit_message =~ /\[Package (amd64|arm64) RPM( openSUSE)?\]/
+ env:
+ - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.2" BUILD_STRING="opensuse/15.2"
+ - PACKAGE_TYPE="rpm" REPO_TOOL="zypper"
+ - ALLOW_SOFT_FAILURE_HERE=true
+ # ###### End of packaging workflow section ###### #
+ # ############################################### #
+
+
+ # We only publish if a TAG has been set during packaging
+ - stage: Publish for release
+ name: Create release draft
+ git:
+ depth: false
+ env:
+ - RELEASE_CHANNEL=stable
+ before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}"
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs
+ - .travis/draft_release.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Draft release submission failed"
+
+ - name: Trigger Docker image build and publish
+ script:
+ - git checkout "${TRAVIS_BRANCH}" && export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)"
+ - .travis/trigger_docker_build.sh "${GITHUB_TOKEN}" "${BUILD_VERSION}"
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Failed to trigger docker build during release" "${NOTIF_CHANNEL}"
+
+ - stage: Trigger deb and rpm package build (release)
+ name: Trigger deb and rpm package build
+ script: .travis/trigger_package_generation.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Failed to trigger deb and rpm package build during release" "${NOTIF_CHANNEL}"
+
+
+ # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc)
+ - stage: Nightly operations
+ name: Kickstart files integrity testing (extended)
+ script: ./tests/installer/checksums.sh
+
+ # This is generating the changelog for nightly release and publish it
+ - name: Generate nightly changelog
+ script:
+ - ".travis/nightlies.sh"
+ - ".travis/check_changelog_last_modification.sh"
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly changelog generation failed"
+ git:
+ depth: false
+
+ - name: Clean up package cloud nightly repository from old versions
+ script:
+ - DEPLOY_REPO="netdata-edge" .travis/package_management/old_package_purging.sh
+ - DEPLOY_REPO="netdata-devel" .travis/package_management/old_package_purging.sh
+
+ # This is the nightly execution step
+ #
+ - stage: Nightly release
+ _template: &NIGHTLY_TEMPLATE
+ git:
+ depth: false
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - "sudo echo '{\"experimental\": true}' > /etc/docker/daemon.json && sudo systemctl restart docker"
+ - packaging/docker/check_login.sh
+ && tick packaging/docker/build.sh
+ && packaging/docker/publish.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly docker image publish failed"
+
+ name: Create nightly release artifacts, publish to GCS
+ script:
+ - echo "GIT Branch:" && git branch
+ - echo "Last commit:" && git log -1
+ - echo "GIT Describe:" && git describe
+ - echo "packaging/version:" && cat packaging/version
+ - .travis/create_artifacts.sh
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly artifacts generation failed"
+ git:
+ depth: false
+ before_deploy:
+ echo "Preparing creds under ${TRAVIS_REPO_SLUG}";
+ if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d;
+ else
+ echo "Beta deployment stage in progress";
+ openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d;
+ fi;
+ deploy:
+ # Beta storage, used for testing purposes
+ - provider: gcs
+ edge:
+ branch: gcs-ng
+ project_id: netdata-storage
+ credentials: .travis/gcs-credentials.json
+ bucket: "netdata-dev-nightlies"
+ skip_cleanup: true
+ local_dir: "artifacts"
+ on:
+ # Only deploy on netdata/netdata, master branch, when artifacts directory is created
+ repo: ${TRAVIS_REPO_SLUG}
+ branch: master
+ condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata"
+
+ # Production storage
+ - provider: gcs
+ edge:
+ branch: gcs-ng
+ project_id: netdata-storage
+ credentials: .travis/gcs-credentials.json
+ bucket: "netdata-nightlies"
+ skip_cleanup: true
+ local_dir: "artifacts"
+ on:
+ # Only deploy on netdata/netdata, master branch, when artifacts directory is created
+ repo: netdata/netdata
+ branch: master
+ condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata"
+ after_deploy: rm -f .travis/gcs-credentials.json
+
+ - name: Trigger Docker image build and publish
+ script: .travis/trigger_docker_build.sh "${GITHUB_TOKEN}" "nightly"
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Failed to trigger docker build during nightly release" "${NOTIF_CHANNEL}"
+
+ - stage: Trigger deb and rpm package build (nightly release)
+ name: Trigger deb and rpm package build
+ script: .travis/trigger_package_generation.sh "[Build latest]"
+ after_failure: post_message "TRAVIS_MESSAGE" "<!here> Failed to trigger deb and rpm package build during nightly release" "${NOTIF_CHANNEL}"
diff --git a/.travis/README.md b/.travis/README.md
new file mode 100644
index 0000000..8927dd4
--- /dev/null
+++ b/.travis/README.md
@@ -0,0 +1,149 @@
+<!--
+---
+title: "Description of CI build configuration"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/.travis/README.md
+---
+-->
+
+# Description of CI build configuration
+
+## Variables needed by travis
+
+- GITHUB_TOKEN - GitHub token with push access to repository
+- DOCKER_USERNAME - Username (netdatabot) with write access to docker hub repository
+- DOCKER_PWD - Password to docker hub
+- encrypted_8daf19481253_key - key needed by openssl to decrypt GCS credentials file
+- encrypted_8daf19481253_iv - IV needed by openssl to decrypt GCS credentials file
+- COVERITY_SCAN_TOKEN - Token to allow coverity test analysis uploads
+- SLACK_USERNAME - This is required for the slack notifications triggered by travis pipeline
+- SLACK_CHANNEL - This is the channel that Travis will be posting messages
+- SLACK_NOTIFY_WEBHOOK_URL - This is the incoming URL webhook as provided by slack integration. Visit Apps integration in slack to generate the required hook
+- SLACK_BOT_NAME - This is the name your bot will appear with on slack
+
+## CI workflow details
+Our CI pipeline is designed to help us identify and mitigate risks at all stages of implementation.
+To accommodate this need, we used [Travis CI](http://www.travis-ci.com) as our CI/CD tool.
+Our main areas of concern are:
+1) Only push code that is working. That means fail fast so that we can improve before we reach the public
+
+2) Reduce the time to market to minimum, by streamlining the release process.
+ That means a lot of testing, a lot of consistency checks, a lot of validations
+
+3) Generated artifacts consistency. We should not allow broken software to reach the public.
+ When this happens, it's embarrassing and we struggle to eliminate it.
+
+4) We are an innovative company, so we love to automate :)
+
+
+Having said that, here's a brief introduction to Netdata's improved CI/CD pipeline with Travis.
+Our CI/CD lifecycle contains three different execution entry points:
+1) A user opens a pull request to netdata/master: Travis will run a pipeline on the branch under that PR
+2) A merge or commit happens on netdata/master. This will trigger travis to run, but we have two distinct cases in this scenario:
+ a) A user merges a pull request to netdata/master: Travis will run on master, after the merge.
+ b) A user runs a commit/merge with a special keyword (mentioned later).
+ This triggers a release for either minor, major or release candidate versions, depending the keyword
+3) A scheduled job runs on master once per day: Travis will run on master at the scheduled interval
+
+To accommodate all three entry points our CI/CD workflow has a set of steps that run on all three entry points.
+Once all these steps are successful, then our pipeline executes another subset of steps for entry points 2 and 3.
+In travis terms the "steps" are "Stages" and within each stage we execute a set of activities called "jobs" in travis.
+
+### Always run: Stages that running on all three execution entry points
+
+## Code quality, linting, syntax, code style
+At this early stage we iterate through a set of basic quality control checks:
+- Shell checking: Run linters for our various BASH scripts
+- Checksum validators: Run validators to ensure our installers and documentation are in sync
+- Dashboard validator: We provide a pre-generated dashboard.js script file that we need to make sure its up to date. We validate that.
+
+## Build process
+At this stage, basically, we build :-)
+We do a baseline check of our build artifacts to guarantee they are not broken
+Briefly our activities include:
+- Verify docker builds successfully
+- Run the standard Netdata installer, to make sure we build & run properly
+- Do the same through 'make dist', as this is our stable channel for our kickstart files
+
+## Artifacts validation
+At this point we know our software is building, we need to go through the a set of checks, to guarantee
+that our product meets certain expectations. At the current stage, we are focusing on basic capabilities
+like installing in different distributions, running the full lifecycle of install-run-update-install and so on.
+We are still working on enriching this with more and more use cases, to get us closer to achieving full stability of our software.
+Briefly we currently evaluate the following activities:
+- Basic software unit testing (only run when changes happen that require it)
+- Non containerized build and install on ubuntu 14.04
+- Non containerized build and install on ubuntu 18.04
+- Running the full Netdata lifecycle (install, update, uninstall) on ubuntu 18.04
+- Build and install on CentOS 7
+(More to come)
+
+### Nightly operations: Stages that run daily under cronjob
+The nightly stages are related to the daily nightly activities, that produce our daily latest releases.
+We also maintain a couple of cronjobs that run during the night to provide us with deeper insights,
+like for example coverity scanning or extended kickstart checksum checks
+
+## Nightly operations
+At this stage we run scheduled jobs and execute the nightly changelog generator, coverity scans,
+labeler for our issues and extended kickstart files checksum validations.
+
+## Nightly release
+During this stage we are building and publishing latest docker images, prepare the nightly artifacts
+and deploy them (the artifacts) to our google cloud service provider.
+
+
+### Publishing
+Publishing is responsible for executing the major/minor/patch releases and is separated
+in two stages: packaging preparation process and publishing.
+
+## Packaging for release
+During packaging we are preparing the release changelog information and run the labeler.
+
+## Publish for release
+The publishing stage is the most complex part in publishing. This is the stage were we generate and publish docker images,
+prepare the release artifacts and get ready with the release draft.
+
+### Package Management workflows
+As part of our goal to provide the best support to our customers, we have created a set of CI workflows to automatically produce
+DEB and RPM for multiple distributions. These workflows are implemented under the templated stages '_DEB_TEMPLATE' and '_RPM_TEMPLATE'.
+We currently plan to actively support the following Operating Systems, with a plan to further expand this list following our users needs.
+
+### Operating systems supported
+The following distributions are supported
+- Debian versions
+ - Buster (TBD - not released yet, check [debian releases](https://www.debian.org/releases/) for details)
+ - Stretch
+ - Jessie
+ - Wheezy
+
+- Ubuntu versions
+ - Disco
+ - Cosmic
+ - Bionic
+ - artful
+
+- Enterprise Linux versions (Covers Red Hat, CentOS, and Amazon Linux with version 6)
+ - Version 8 (TBD)
+ - Version 7
+ - Version 6
+
+- Fedora versions
+ - Version 31 (TBD)
+ - Version 30
+ - Version 29
+ - Version 28
+
+- openSUSE versions
+ - 15.1
+ - 15.0
+
+- Gentoo distributions
+ - TBD
+
+### Architectures supported
+We plan to support amd64, x86 and arm64 architectures. As of June 2019 only amd64 and x86 will become available, as we are still working on solving issues with the architecture.
+
+The Package deployment can be triggered manually by executing an empty commit with the following message pattern: `[Package PACKAGE_TYPE PACKAGE_ARCH] DESCRIBE_THE_REASONING_HERE`.
+Travis Yaml configuration allows the user to combine package type and architecture as necessary to regenerate the current stable release (For example tag v1.15.0 as of 4th of May 2019)
+Sample patterns to trigger building of packages for all amd64 supported architecture:
+- '[Package amd64 RPM]': Build & publish all amd64 available RPM packages
+- '[Package amd64 DEB]': Build & publish all amd64 available DEB packages
diff --git a/.travis/check_changelog_last_modification.sh b/.travis/check_changelog_last_modification.sh
new file mode 100755
index 0000000..d6f8a16
--- /dev/null
+++ b/.travis/check_changelog_last_modification.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+#
+# This scriptplet validates nightlies age and notifies is if it gets too old
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog age checker exited abnormally"
+ exit 1
+fi
+
+source tests/installer/slack.sh || echo "I could not load slack library"
+
+LAST_MODIFICATION="$(git log -1 --pretty="format:%at" CHANGELOG.md)"
+CURRENT_TIME="$(date +"%s")"
+TWO_DAYS_IN_SECONDS=172800
+
+DIFF=$((CURRENT_TIME - LAST_MODIFICATION))
+
+echo "Checking CHANGELOG.md last modification time on GIT.."
+echo "CHANGELOG.md timestamp: ${LAST_MODIFICATION}"
+echo "Current timestamp: ${CURRENT_TIME}"
+echo "Diff: ${DIFF}"
+
+if [ ${DIFF} -gt ${TWO_DAYS_IN_SECONDS} ]; then
+ echo "CHANGELOG.md is more than two days old!"
+ post_message "TRAVIS_MESSAGE" "Hi <!here>, CHANGELOG.md was found more than two days old (Diff: ${DIFF} seconds)" "${NOTIF_CHANNEL}"
+else
+ echo "CHANGELOG.md is less than two days old, fine"
+fi
diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh
new file mode 100755
index 0000000..32b9a0b
--- /dev/null
+++ b/.travis/create_artifacts.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+#
+# Artifacts creation script.
+# This script generates two things:
+# 1) The static binary that can run on all linux distros (built-in dependencies etc)
+# 2) The distribution source tarbal
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
+#
+# shellcheck disable=SC2230
+
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ exit 1
+fi
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode on ${TRAVIS_REPO_SLUG}, not running anything here"
+ exit 0
+fi
+
+echo "--- Initialize git configuration ---"
+git checkout "${1-master}"
+git pull
+
+if [ "${RELEASE_CHANNEL}" == stable ]; then
+ echo "--- Set default release channel to stable ---"
+ sed -i 's/^RELEASE_CHANNEL="nightly" *#/RELEASE_CHANNEL="stable" #/' \
+ netdata-installer.sh \
+ packaging/makeself/install-or-update.sh
+fi
+
+# Everything from this directory will be uploaded to GCS
+mkdir -p artifacts
+BASENAME="netdata-$(git describe)"
+
+# Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads.
+# See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details.
+python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);'
+echo "--- Create tarball ---"
+command -v git > /dev/null && [ -d .git ] && git clean -d -f
+autoreconf -ivf
+./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libexecdir=/usr/libexec --with-zlib --with-math --with-user=netdata CFLAGS=-O2
+make dist
+mv "${BASENAME}.tar.gz" artifacts/
+
+echo "--- Create self-extractor ---"
+command -v git > /dev/null && [ -d .git ] && git clean -d -f
+./packaging/makeself/build-x86_64-static.sh
+
+# Needed for GCS
+echo "--- Copy artifacts to separate directory ---"
+#shellcheck disable=SC2164
+cp packaging/version artifacts/latest-version.txt
+cd artifacts
+ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz
+ln -s "${BASENAME}.gz.run" netdata-latest.gz.run
+sha256sum -b ./* > "sha256sums.txt"
+echo "checksums:"
+cat sha256sums.txt
diff --git a/.travis/create_changelog.sh b/.travis/create_changelog.sh
new file mode 100755
index 0000000..8d4c12e
--- /dev/null
+++ b/.travis/create_changelog.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog creation aborted"
+ exit 1
+fi
+
+ORGANIZATION=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $1}')
+PROJECT=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $2}')
+GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
+GIT_USER=${GIT_USER:-"netdatabot"}
+
+if [ -z ${GIT_TAG+x} ]; then
+ OPTS=""
+else
+ OPTS="--future-release ${GIT_TAG}"
+fi
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do here"
+ exit 0
+fi
+
+echo "--- Creating changelog ---"
+git checkout master
+git pull
+
+docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \
+ --user "${ORGANIZATION}" \
+ --project "${PROJECT}" \
+ --token "${GITHUB_TOKEN}" \
+ --since-tag "v1.10.0" \
+ --unreleased-label "**Next release**" \
+ --no-issues \
+ --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \
+ --max-issues 500 \
+ --bug-labels IGNOREBUGS ${OPTS}
diff --git a/.travis/draft_release.sh b/.travis/draft_release.sh
new file mode 100755
index 0000000..ddc0f9a
--- /dev/null
+++ b/.travis/draft_release.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Draft release generator.
+# This utility is responsible for submitting a draft release to github repo
+# It is agnostic of other processes, when executed it will draft a release,
+# based on the most recent reachable tag.
+#
+# Requirements:
+# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
+# - artifacts directory in place
+# - The directory is created by create_artifacts.sh mechanism
+# - The artifacts need to be created with the same tag, obviously
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+set -e
+
+if [ ! -f .gitignore ]; then
+ echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
+ exit 1
+fi
+
+echo "--- Initialize git configuration ---"
+git checkout master
+git pull
+
+
+if [[ $(git describe) =~ -rc* ]]; then
+ echo "This is a release candidate tag, we do not generate a release draft"
+ exit 0
+fi
+
+# Load the tag, if any
+GIT_TAG=$(git describe)
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode on ${TRAVIS_REPO_SLUG}, i was about to run for release (${GIT_TAG}), but i am emulating, so bye"
+ exit 0
+fi;
+
+echo "---- CREATING RELEASE DRAFT WITH ASSETS -----"
+# Download hub
+HUB_VERSION=${HUB_VERSION:-"2.5.1"}
+wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
+tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
+export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin"
+
+# Create a release draft
+if [ -z ${GIT_TAG+x} ]; then
+ echo "Variable GIT_TAG is not set. Something went terribly wrong! Exiting."
+ exit 1
+fi
+if [ "${GIT_TAG}" != "$(git tag --points-at)" ]; then
+ echo "ERROR! Current commit is not tagged. Stopping release creation."
+ exit 1
+fi
+until hub release create --draft \
+ -a "artifacts/netdata-${GIT_TAG}.tar.gz" \
+ -a "artifacts/netdata-${GIT_TAG}.gz.run" \
+ -a "artifacts/sha256sums.txt" \
+ -m "${GIT_TAG}" "${GIT_TAG}"; do
+ sleep 5
+done
diff --git a/.travis/gcs-credentials.json.enc b/.travis/gcs-credentials.json.enc
new file mode 100644
index 0000000..5d1e7b2
--- /dev/null
+++ b/.travis/gcs-credentials.json.enc
Binary files differ
diff --git a/.travis/generate_changelog_and_tag_release.sh b/.travis/generate_changelog_and_tag_release.sh
new file mode 100755
index 0000000..bf5555b
--- /dev/null
+++ b/.travis/generate_changelog_and_tag_release.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Script to automatically do a couple of things:
+# - generate a new tag according to semver (https://semver.org/)
+# - generate CHANGELOG.md by using https://github.com/skywinder/github-changelog-generator
+#
+# Tags are generated by searching for a keyword in last commit message. Keywords are:
+# - [patch] or [fix] to bump patch number
+# - [minor], [feature] or [feat] to bump minor number
+# - [major] or [breaking change] to bump major number
+# All keywords MUST be surrounded with square braces.
+#
+# Script uses git mechanisms for locking, so it can be used in parallel builds
+#
+# Requirements:
+# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
+# - docker
+#
+# This is a modified version of:
+# https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+# Author: Pawel Krupa (@paulfantom)
+set -e
+
+if [ ! -f .gitignore ]; then
+ echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
+ exit 1
+fi
+
+echo "--- Changelog generator script starting ---"
+# If we dont have a produced TAG there is nothing to do, so bail out happy
+if [ -z "${GIT_TAG}" ]; then
+ echo "GIT_TAG is empty, that is not suppose to happen (Value: $GIT_TAG)"
+ exit 1
+fi
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing to do on the changelog generator and tagging script for (${GIT_TAG}), bye"
+ exit 0
+fi
+
+echo "--- Initialize git configuration ---"
+export GIT_MAIL="bot@netdata.cloud"
+export GIT_USER="netdatabot"
+git checkout master
+git pull
+
+echo "---- UPDATE VERSION FILE ----"
+echo "$GIT_TAG" >packaging/version
+git add packaging/version
+
+echo "---- Create CHANGELOG -----"
+./.travis/create_changelog.sh
+git add CHANGELOG.md
+
+echo "---- COMMIT AND PUSH CHANGES ----"
+git commit -m "[ci skip] release $GIT_TAG" --author "${GIT_USER} <${GIT_MAIL}>"
+git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER"
+git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')"
+git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags
+# After those operations output of command `git describe` should be identical with a value of GIT_TAG
diff --git a/.travis/generate_changelog_for_nightlies.sh b/.travis/generate_changelog_for_nightlies.sh
new file mode 100755
index 0000000..59173af
--- /dev/null
+++ b/.travis/generate_changelog_for_nightlies.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+#
+# Changelog generation scriptlet, for nightlies
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog generation process aborted"
+ exit 1
+fi
+
+LAST_TAG="$1"
+COMMITS_SINCE_RELEASE="$2"
+NEW_VERSION="${LAST_TAG}-$((COMMITS_SINCE_RELEASE + 1))-nightly"
+GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
+GIT_USER=${GIT_USER:-"netdatabot"}
+PUSH_URL=$(git config --get remote.origin.url | sed -e 's/^https:\/\///')
+FAIL=0
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do here"
+ exit 0
+fi
+
+echo "Running changelog creation mechanism"
+.travis/create_changelog.sh
+
+echo "Changelog created! Adding packaging/version(${NEW_VERSION}) and CHANGELOG.md to the repository"
+echo "${NEW_VERSION}" > packaging/version
+git add packaging/version && echo "1) Added packaging/version to repository" || FAIL=1
+git add CHANGELOG.md && echo "2) Added changelog file to repository" || FAIL=1
+git commit -m '[ci skip] create nightly packages and update changelog' --author "${GIT_USER} <${GIT_MAIL}>" && echo "3) Committed changes to repository" || FAIL=1
+git push "https://${GITHUB_TOKEN}:@${PUSH_URL}" && echo "4) Pushed changes to remote ${PUSH_URL}" || FAIL=1
+
+# In case of a failure, wrap it up and bail out cleanly
+if [ $FAIL -eq 1 ]; then
+ git clean -xfd
+ echo "Changelog generation failed during github UPDATE!"
+ exit 1
+fi
+
+echo "Changelog generation completed successfully!"
diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh
new file mode 100755
index 0000000..e60ee27
--- /dev/null
+++ b/.travis/nightlies.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+#
+# This is the nightly changelog generation script
+# It is responsible for two major activities:
+# 1) Update packaging/version with the current nightly version
+# 2) Generate the changelog for the mentioned version
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+set -e
+FAIL=0
+
+source tests/installer/slack.sh || echo "Could not load slack library"
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog generation process aborted"
+ exit 1
+fi
+
+LAST_TAG=$(git describe --abbrev=0 --tags)
+COMMITS_SINCE_RELEASE=$(git rev-list "${LAST_TAG}"..HEAD --count)
+PREVIOUS_NIGHTLY_COUNT="$(rev <packaging/version | cut -d- -f 2 | rev)"
+
+# If no commits since release, just stop
+if [ "${COMMITS_SINCE_RELEASE}" == "${PREVIOUS_NIGHTLY_COUNT}" ]; then
+ echo "No changes since last nighthly release, nothing else to do"
+ exit 0
+fi
+
+if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
+ echo "Beta mode -- nothing to do for ${TRAVIS_REPO_SLUG} on the nightlies script, bye"
+ exit 0
+fi
+
+echo "--- Running Changelog generation ---"
+echo "We got $COMMITS_SINCE_RELEASE changes since $LAST_TAG, re-generating changelog"
+NIGHTLIES_CHANGELOG_FAILED=0
+.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || NIGHTLIES_CHANGELOG_FAILED=1
+
+if [ ${NIGHTLIES_CHANGELOG_FAILED} -eq 1 ]; then
+ echo "Changelog generation has failed, this is a soft error, process continues"
+ post_message "TRAVIS_MESSAGE" "Changelog generation job for nightlies failed, possibly due to github issues" "${NOTIF_CHANNEL}" || echo "Slack notification failed"
+fi
+
+exit "${FAIL}"
diff --git a/.travis/package_management/build.sh b/.travis/package_management/build.sh
new file mode 100644
index 0000000..bafaecc
--- /dev/null
+++ b/.travis/package_management/build.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+UNPACKAGED_NETDATA_PATH="$1"
+LATEST_RELEASE_VERSION="$2"
+
+if [ -z "${LATEST_RELEASE_VERSION}" ]; then
+ echo "Parameter 'LATEST_RELEASE_VERSION' not defined"
+ exit 1
+fi
+
+if [ -z "${UNPACKAGED_NETDATA_PATH}" ]; then
+ echo "Parameter 'UNPACKAGED_NETDATA_PATH' not defined"
+ exit 1
+fi
+
+echo "Running changelog generation mechanism since ${LATEST_RELEASE_VERSION}"
+
+echo "Entering ${UNPACKAGED_NETDATA_PATH}"
+cd "${UNPACKAGED_NETDATA_PATH}"
+
+echo "Linking debian -> contrib/debian"
+ln -sf contrib/debian debian
+
+echo "Executing dpkg-buildpackage"
+# pre/post options are after 1.18.8, is simpler to just check help for their existence than parsing version
+if dpkg-buildpackage --help | grep "\-\-post\-clean" 2> /dev/null > /dev/null; then
+ dpkg-buildpackage --post-clean --pre-clean --build=binary -us -uc
+else
+ dpkg-buildpackage -b -us -uc
+fi
+
+echo "DEB build script completed!"
diff --git a/.travis/package_management/build_judy.sh b/.travis/package_management/build_judy.sh
new file mode 100755
index 0000000..202ea04
--- /dev/null
+++ b/.travis/package_management/build_judy.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+# Build Judy from source, you need to run this script as root.
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+set -e
+JUDY_VER="1.0.5"
+JUDY_DIR="/opt/judy-${JUDY_VER}"
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Build Judy package from source code failed"
+ exit 1
+fi
+
+echo "Fetching judy source tarball"
+wget -O /opt/judy.tar.gz http://downloads.sourceforge.net/project/judy/judy/Judy-${JUDY_VER}/Judy-${JUDY_VER}.tar.gz
+
+echo "Entering /opt directory and extracting tarball"
+cd /opt && tar -xf judy.tar.gz && rm judy.tar.gz
+
+echo "Entering ${JUDY_DIR}"
+cd "${JUDY_DIR}"
+
+echo "Running configure"
+CFLAGS="-O2 -s" CXXFLAGS="-O2 -s" ./configure
+
+echo "Compiling and installing"
+make && make install
+
+echo "Done, enjoy Judy!"
diff --git a/.travis/package_management/build_libuv.sh b/.travis/package_management/build_libuv.sh
new file mode 100755
index 0000000..c30eede
--- /dev/null
+++ b/.travis/package_management/build_libuv.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+# Build libuv from source, you need to run this script as root.
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+set -e
+LIBUV_VERSION="v1.32.0"
+# Their folder is libuv-1.32.0 while the tarball version is v1.32.0, so fix that until they fix it...
+LIBUV_DIR="/opt/libuv-${LIBUV_VERSION/v/}"
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Build libuv package from source code failed"
+ exit 1
+fi
+
+echo "Fetching libuv from github"
+wget -O /opt/libuv.tar.gz "https://github.com/libuv/libuv/archive/${LIBUV_VERSION}.tar.gz"
+
+echo "Entering /opt and extracting source"
+cd /opt && tar -xf libuv.tar.gz && rm libuv.tar.gz
+
+echo "Entering ${LIBUV_DIR}"
+cd "${LIBUV_DIR}"
+
+echo "Compiling and installing"
+sh autogen.sh
+./configure
+make && make check && make install
+
+echo "Done, enjoy libuv!"
diff --git a/.travis/package_management/build_package_in_container.sh b/.travis/package_management/build_package_in_container.sh
new file mode 100755
index 0000000..95a68e7
--- /dev/null
+++ b/.travis/package_management/build_package_in_container.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#
+# Entry point for package build process
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+#shellcheck disable=SC1091
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Docker build process aborted"
+ exit 1
+fi
+
+source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1)
+
+# Check for presence of mandatory environment variables
+if [ -z "${BUILD_STRING}" ]; then
+ echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILDER_NAME}" ]; then
+ echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILD_DISTRO}" ]; then
+ echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILD_RELEASE}" ]; then
+ echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${PACKAGE_TYPE}" ]; then
+ echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script"
+ exit 1
+fi
+
+# Detect architecture and load extra variables needed
+detect_arch_from_commit
+
+case "${BUILD_ARCH}" in
+"all")
+ echo "* * * Building all architectures, amd64 and i386 * * *"
+ echo "Building for amd64.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
+
+ echo "Building for arm64.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
+
+ echo "Building for i386.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
+
+ ;;
+"amd64"|"arm64"|"i386")
+ echo "Building for ${BUILD_ARCH}.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
+ ;;
+*)
+ echo "Unknown build architecture '${BUILD_ARCH}', nothing to do for build"
+ exit 1
+ ;;
+esac
+
+echo "Build process completed!"
diff --git a/.travis/package_management/common.py b/.travis/package_management/common.py
new file mode 100755
index 0000000..4cc04b9
--- /dev/null
+++ b/.travis/package_management/common.py
@@ -0,0 +1,182 @@
+#
+#
+# Python library with commonly used functions within the package management scope
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+import lxc
+import subprocess
+import os
+import sys
+import tempfile
+import shutil
+
+def fetch_version(orig_build_version):
+ tag = None
+ friendly_version = ""
+
+ # TODO: Checksum validations
+ if str(orig_build_version).count(".latest") == 1:
+ version_list=str(orig_build_version).replace('v', '').split('.')
+ minor = version_list[3] if int(version_list[2]) == 0 else (version_list[2] + version_list[3])
+ friendly_version='.'.join(version_list[0:2]) + "." + minor
+ else:
+ friendly_version = orig_build_version.replace('v', '')
+ tag = friendly_version # Go to stable tag
+ print("Version set to %s from %s" % (friendly_version, orig_build_version))
+
+ return friendly_version, tag
+
+def replace_tag(tag_name, spec, new_tag_content):
+ print("Fixing tag %s in %s" % (tag_name, spec))
+
+ ifp = open(spec, "r")
+ config = ifp.readlines()
+ ifp.close()
+
+ source_line = -1
+ for line in config:
+ if str(line).count(tag_name + ":") > 0:
+ source_line = config.index(line)
+ print("Found line: %s in item %d" % (line, source_line))
+ break
+
+ if source_line >= 0:
+ print("Replacing line %s with %s in spec file" %(config[source_line], new_tag_content))
+ config[source_line] = "%s: %s\n" % (tag_name, new_tag_content)
+ config_str = ''.join(config)
+ ofp = open(spec, 'w')
+ ofp.write(config_str)
+ ofp.close()
+
+def run_command(container, command):
+ print("Running command: %s" % command)
+ command_result = container.attach_wait(lxc.attach_run_command, command, stdout=sys.stdout.buffer, stderr=sys.stdout.buffer)
+
+ if command_result != 0:
+ raise Exception("Command failed with exit code %d" % command_result)
+
+def run_command_in_host(cmd, cwd=None):
+ print("Issue command in host: %s, cwd:%s" % (str(cmd), str(cwd)))
+
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
+ o, e = proc.communicate()
+ print('Output: ' + o.decode('ascii'))
+ print('Error: ' + e.decode('ascii'))
+ print('code: ' + str(proc.returncode))
+
+def prepare_repo(container):
+ if str(os.environ["REPO_TOOL"]).count("zypper") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"])
+ run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"])
+
+ elif str(os.environ["REPO_TOOL"]).count("yum") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "clean", "all"])
+ run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
+
+ if os.environ["BUILD_STRING"].count("el/7") == 1 and os.environ["BUILD_ARCH"].count("i386") == 1:
+ print ("Skipping epel-release install for %s-%s" % (os.environ["BUILD_STRING"], os.environ["BUILD_ARCH"]))
+ else:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"])
+
+ elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1:
+ if str(os.environ["BUILD_STRING"]).count("debian/jessie") == 1:
+ run_command(container, ["bash", "-c", "echo deb http://archive.debian.org/debian/ jessie-backports main contrib non-free >> /etc/apt/sources.list.d/99-archived.list"])
+ run_command(container, [os.environ["REPO_TOOL"], "update", "-y", '-o', 'Acquire::Check-Valid-Until=false'])
+ else:
+ run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
+ else:
+ run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
+
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"])
+
+def install_common_dependendencies(container):
+ if str(os.environ["REPO_TOOL"]).count("zypper") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c"])
+
+ elif str(os.environ["REPO_TOOL"]).count("yum") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libwebsockets-devel"])
+
+ elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "g++"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libipmimonitoring-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libjson-c-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libsnappy-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotobuf-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotoc-dev"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
+ if os.environ["BUILD_STRING"].count("debian/jessie") == 1:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy"])
+ else:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libwebsockets-devel"])
+
+ if os.environ["BUILD_STRING"].count("el/6") <= 0:
+ run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autogen"])
+
+def prepare_version_source(dest_archive, pkg_friendly_version, tag=None):
+ print(".0 Preparing local implementation tarball for version %s" % pkg_friendly_version)
+ tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive
+
+ print(".0 Copy repo to prepare it for tarball generation")
+ tmp_src = tempfile.mkdtemp(prefix='netdata-source-')
+ run_command_in_host(['cp', '-r', '.', tmp_src])
+
+ if tag is not None:
+ print(".1 Checking out tag %s" % tag)
+ run_command_in_host(['git', 'fetch', '--all'], tmp_src)
+
+ # TODO: Keep in mind that tricky 'v' there, needs to be removed once we clear our versioning scheme
+ run_command_in_host(['git', 'checkout', 'v%s' % pkg_friendly_version], tmp_src)
+
+ print(".2 Tagging the code with version: %s" % pkg_friendly_version)
+ run_command_in_host(['git', 'tag', '-a', pkg_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]], tmp_src)
+
+ print(".3 Run autoreconf -ivf")
+ run_command_in_host(['autoreconf', '-ivf'], tmp_src)
+
+ print(".4 Run configure")
+ run_command_in_host(['./configure', '--prefix=/usr', '--sysconfdir=/etc', '--localstatedir=/var', '--libdir=/usr/lib', '--libexecdir=/usr/libexec', '--with-math', '--with-zlib', '--with-user=netdata'], tmp_src)
+
+ print(".5 Run make dist")
+ run_command_in_host(['make', 'dist'], tmp_src)
+
+ print(".6 Copy generated tarbal to desired path")
+ generated_tarball = '%s/netdata-%s.tar.gz' % (tmp_src, pkg_friendly_version)
+
+ if os.path.exists(generated_tarball):
+ run_command_in_host(['sudo', 'cp', generated_tarball, tar_file])
+
+ print(".7 Fixing permissions on tarball")
+ run_command_in_host(['sudo', 'chmod', '777', tar_file])
+
+ print(".8 Bring over netdata.spec, Remove temp directory");
+ run_command_in_host(['cp', '%s/netdata.spec' % tmp_src, 'netdata.spec'])
+ shutil.rmtree(tmp_src)
+ else:
+ print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % generated_tarball)
+ sys.exit(1)
diff --git a/.travis/package_management/configure_deb_lxc_environment.py b/.travis/package_management/configure_deb_lxc_environment.py
new file mode 100755
index 0000000..627493b
--- /dev/null
+++ b/.travis/package_management/configure_deb_lxc_environment.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python3
+#
+# Prepare the build environment within the container
+# The script attaches to the running container and does the following:
+# 1) Create the container
+# 2) Start the container up
+# 3) Create the builder user
+# 4) Prepare the environment for DEB build
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+import common
+import os
+import sys
+import lxc
+
+if len(sys.argv) != 2:
+ print('You need to provide a container name to get things started')
+ sys.exit(1)
+container_name=sys.argv[1]
+
+# Setup the container object
+print("Defining container %s" % container_name)
+container = lxc.Container(container_name)
+if not container.defined:
+ raise Exception("Container %s not defined!" % container_name)
+
+# Start the container
+if not container.start():
+ raise Exception("Failed to start the container")
+
+if not container.running or not container.state == "RUNNING":
+ raise Exception('Container %s is not running, configuration process aborted ' % container_name)
+
+# Wait for connectivity
+print("Waiting for container connectivity to start configuration sequence")
+if not container.get_ips(timeout=30):
+ raise Exception("Timeout while waiting for container")
+
+build_path = "/home/%s" % os.environ['BUILDER_NAME']
+
+# Run the required activities now
+# 1. Create the builder user
+print("1. Adding user %s" % os.environ['BUILDER_NAME'])
+common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']])
+
+# Fetch package dependencies for the build
+print("2. Preparing repo on LXC container")
+common.prepare_repo(container)
+
+print("2.1 Install .DEB build support packages")
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dpkg-dev"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libdistro-info-perl"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-make"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-systemd"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-autoreconf"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "git-buildpackage"])
+
+print("2.2 Add more dependencies")
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter-acct-dev"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"])
+
+print ("3.1 Run install-required-packages scriptlet")
+common.run_command(container, ["wget", "-T", "15", "-O", "%s/.install-required-packages.sh" % build_path, "https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"])
+common.run_command(container, ["bash", "%s/.install-required-packages.sh" % build_path, "netdata", "--dont-wait", "--non-interactive"])
+
+print("3.2 Installing package dependencies within LXC container")
+common.install_common_dependendencies(container)
+
+friendly_version=""
+dest_archive=""
+download_url=""
+tag = None
+friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
+
+tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), friendly_version)
+
+print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION'])
+dest_archive="%s/netdata-%s.tar.gz" % (build_path, friendly_version)
+
+common.prepare_version_source(dest_archive, friendly_version, tag=tag)
+
+print("6. Installing build.sh script to build path")
+common.run_command_in_host(['sudo', 'cp', '.travis/package_management/build.sh', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)])
+common.run_command_in_host(['sudo', 'chmod', '777', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)])
+common.run_command_in_host(['sudo', 'ln', '-sf', 'contrib/debian', 'debian'])
+
+print("Done!")
diff --git a/.travis/package_management/configure_rpm_lxc_environment.py b/.travis/package_management/configure_rpm_lxc_environment.py
new file mode 100755
index 0000000..4dca0bf
--- /dev/null
+++ b/.travis/package_management/configure_rpm_lxc_environment.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+#
+#
+# Prepare the build environment within the container
+# The script attaches to the running container and does the following:
+# 1) Create the container
+# 2) Start the container up
+# 3) Create the builder user
+# 4) Prepare the environment for RPM build
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+import common
+import os
+import sys
+import lxc
+
+if len(sys.argv) != 2:
+ print('You need to provide a container name to get things started')
+ sys.exit(1)
+container_name=sys.argv[1]
+
+# Setup the container object
+print("Defining container %s" % container_name)
+container = lxc.Container(container_name)
+if not container.defined:
+ raise Exception("Container %s not defined!" % container_name)
+
+# Start the container
+if not container.start():
+ raise Exception("Failed to start the container")
+
+if not container.running or not container.state == "RUNNING":
+ raise Exception('Container %s is not running, configuration process aborted ' % container_name)
+
+# Wait for connectivity
+print("Waiting for container connectivity to start configuration sequence")
+if not container.get_ips(timeout=30):
+ raise Exception("Timeout while waiting for container")
+
+# Run the required activities now
+# Create the builder user
+print("1. Adding user %s" % os.environ['BUILDER_NAME'])
+common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']])
+
+# Fetch package dependencies for the build
+print("2.1 Preparing repo on LXC container")
+common.prepare_repo(container)
+
+common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"])
+common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"])
+
+# Exceptional cases, not available everywhere
+#
+print("2.2 Running uncommon dependencies and preparing LXC environment")
+# Not on Centos-7
+if os.environ["BUILD_STRING"].count("el/7") <= 0:
+ common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter_acct-devel"])
+
+# Not on Centos-6
+if os.environ["BUILD_STRING"].count("el/6") <= 0:
+ common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autoconf-archive"])
+
+print("2.3 Installing common dependencies")
+common.install_common_dependendencies(container)
+
+print("3. Setting up macros")
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "/bin/echo", "'%_topdir %(echo /home/" + os.environ['BUILDER_NAME'] + ")/rpmbuild' > /home/" + os.environ['BUILDER_NAME'] + "/.rpmmacros"])
+
+print("4. Create rpmbuild directory")
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/BUILD"])
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/RPMS"])
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SOURCES"])
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SPECS"])
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SRPMS"])
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltrR", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild"])
+
+# Download the source
+rpm_friendly_version=""
+dest_archive=""
+download_url=""
+spec_file="/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME']
+tag = None
+rpm_friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
+tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version)
+
+print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION'])
+dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version)
+
+common.prepare_version_source(dest_archive, rpm_friendly_version, tag=tag)
+
+# Extract the spec file in place
+print("6. Extract spec file from the source")
+common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file])
+common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file])
+
+print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file))
+common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file)
+
+print('Done!')
diff --git a/.travis/package_management/create_lxc_for_build.sh b/.travis/package_management/create_lxc_for_build.sh
new file mode 100755
index 0000000..d733687
--- /dev/null
+++ b/.travis/package_management/create_lxc_for_build.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+#
+# This script generates an LXC container and starts it up
+# Once the script completes successfully, a container has become available for usage
+# The container image to be used and the container name to be set, are part of variables
+# that must be present for the script to work
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+# shellcheck disable=SC1091
+set -e
+
+source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1)
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "LXC Container creation aborted"
+ exit 1
+fi
+
+# Check for presence of mandatory environment variables
+if [ -z "${BUILD_STRING}" ]; then
+ echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILDER_NAME}" ]; then
+ echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILD_DISTRO}" ]; then
+ echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${BUILD_RELEASE}" ]; then
+ echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script"
+ exit 1
+fi
+
+if [ -z "${PACKAGE_TYPE}" ]; then
+ echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script"
+ exit 1
+fi
+
+# Detect architecture and load extra variables needed
+detect_arch_from_commit
+
+echo "Creating LXC container ${BUILDER_NAME}/${BUILD_STRING}/${BUILD_ARCH}...."
+
+case "${BUILD_ARCH}" in
+"all")
+ # i386
+ echo "Creating LXC Container for i386.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "i386" --no-validate
+
+ echo "Container(s) ready. Configuring container(s).."
+ .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
+
+ # amd64
+ echo "Creating LXC Container for amd64.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "amd64" --no-validate
+
+ echo "Container(s) ready. Configuring container(s).."
+ .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
+
+ # arm64
+ echo "Creating LXC Container for arm64.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "arm64" --no-validate
+
+ echo "Container(s) ready. Configuring container(s).."
+ .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
+ ;;
+"i386"|"amd64"|"arm64")
+ # amd64 or i386
+ echo "Creating LXC Container for ${BUILD_ARCH}.."
+ export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}"
+ export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
+ lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "${BUILD_ARCH}" --no-validate
+
+ echo "Container(s) ready. Configuring container(s).."
+ .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
+ ;;
+*)
+ echo "Unknown BUILD_ARCH value '${BUILD_ARCH}' given, process failed"
+ exit 1
+ ;;
+esac
+
+echo "..LXC creation complete!"
diff --git a/.travis/package_management/functions.sh b/.travis/package_management/functions.sh
new file mode 100644
index 0000000..0c00d2f
--- /dev/null
+++ b/.travis/package_management/functions.sh
@@ -0,0 +1,33 @@
+# no-shebang-needed-its-a-library
+#
+# Utility functions for packaging in travis CI
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+#shellcheck disable=SC2148
+set -e
+
+function detect_arch_from_commit {
+ case "${TRAVIS_COMMIT_MESSAGE}" in
+ "[Package amd64"*)
+ export BUILD_ARCH="amd64"
+ ;;
+ "[Package i386"*)
+ export BUILD_ARCH="i386"
+ ;;
+ "[Package ALL"*)
+ export BUILD_ARCH="all"
+ ;;
+ "[Package arm64"*)
+ export BUILD_ARCH="arm64"
+ ;;
+
+ *)
+ echo "Unknown build architecture in '${TRAVIS_COMMIT_MESSAGE}'. Assuming amd64"
+ export BUILD_ARCH="amd64"
+ ;;
+ esac
+
+ echo "Detected build architecture ${BUILD_ARCH}"
+}
diff --git a/.travis/package_management/old_package_purging.sh b/.travis/package_management/old_package_purging.sh
new file mode 100755
index 0000000..89ecd75
--- /dev/null
+++ b/.travis/package_management/old_package_purging.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+#
+# Script to handle package cloud retention policy
+# Our open source subscription is limited,
+# so we use this sript to control the number of packages maintained historically
+#
+# Dependencies:
+# - PACKAGE_CLOUD_RETENTION_DAYS
+# This is to indicate for how many days back we want to maintain the various RPM and DEB packages on package cloud
+#
+# Copyright : SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+#
+set -e
+
+delete_files_for_version() {
+ local v="$1"
+
+ # Delete the selected filenames in version
+ FILES_IN_VERSION=$(jq --sort-keys --arg v "${v}" '.[] | select ( .version | contains($v))' "${PKG_LIST_FILE}" | grep filename | cut -d':' -f 2)
+
+ # Iterate through the files and delete them
+ for pkg in ${FILES_IN_VERSION/\\n/}; do
+ pkg=${pkg/,/}
+ pkg=${pkg/\"/}
+ pkg=${pkg/\"/}
+ echo "Attempting yank on ${pkg}.."
+ .travis/package_management/package_cloud_wrapper.sh yank "${PACKAGING_USER}/${DEPLOY_REPO}" "${pkg}" || echo "Nothing to yank or error on ${pkg}"
+ done
+}
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Old packages yanking cancelled"
+ exit 1
+fi
+
+if [ -z "${PACKAGING_USER}" ]; then
+ echo "No PACKAGING_USER variable found"
+ exit 1
+fi
+
+if [ -z "${DEPLOY_REPO}" ]; then
+ echo "No DEPLOY_REPO variable found"
+ exit 1
+fi
+
+if [ -z ${PKG_CLOUD_TOKEN} ]; then
+ echo "No PKG_CLOUD_TOKEN variable found"
+ exit 1
+fi
+
+if [ -z ${PACKAGE_CLOUD_RETENTION_DAYS} ]; then
+ echo "No PACKAGE_CLOUD_RETENTION_DAYS variable found"
+ exit 1
+fi
+
+TMP_DIR="$(mktemp -d /tmp/netdata-old-package-yanking-XXXXXX)"
+PKG_LIST_FILE="${TMP_DIR}/complete_package_list.json"
+DATE_EPOCH="1970-01-01"
+DATE_UNTIL_TO_DELETE=$(date --date="${PACKAGE_CLOUD_RETENTION_DAYS} day ago" +%Y-%m-%d)
+
+
+echo "Created temp directory: ${TMP_DIR}"
+echo "We will be purging contents up until ${DATE_UNTIL_TO_DELETE}"
+
+echo "Calling package could to retrieve all available packages on ${PACKAGING_USER}/${DEPLOY_REPO}"
+curl -sS "https://${PKG_CLOUD_TOKEN}:@packagecloud.io/api/v1/repos/${PACKAGING_USER}/${DEPLOY_REPO}/packages.json" > "${PKG_LIST_FILE}"
+
+# Get versions within the desired duration
+#
+VERSIONS_TO_PURGE=$(jq --arg s "${DATE_EPOCH}" --arg e "${DATE_UNTIL_TO_DELETE}" '
+[($s, $e) | strptime("%Y-%m-%d")[0:3]] as $r
+ | map(select(
+ (.created_at[:19] | strptime("%Y-%m-%dT%H:%M:%S")[0:3]) as $d
+ | $d >= $r[0] and $d <= $r[1]
+))' "${PKG_LIST_FILE}" | grep '"version":' | sort -u | sed -e 's/ //g' | cut -d':' -f2)
+
+echo "We will be deleting the following versions: ${VERSIONS_TO_PURGE}"
+for v in ${VERSIONS_TO_PURGE/\n//}; do
+ v=${v/\"/}
+ v=${v/\"/}
+ v=${v/,/}
+ echo "Remove all files for version $v"
+ delete_files_for_version "${v}"
+done
+
+# Done, clean up
+[ -d "${TMP_DIR}" ] && rm -rf "${TMP_DIR}"
diff --git a/.travis/package_management/package_cloud_wrapper.sh b/.travis/package_management/package_cloud_wrapper.sh
new file mode 100755
index 0000000..48a372d
--- /dev/null
+++ b/.travis/package_management/package_cloud_wrapper.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+#
+# This is a tool to help removal of packages from packagecloud.io
+# It utilizes the package_cloud utility provided from packagecloud.io
+#
+# Depends on:
+# 1) package cloud gem (detects absence and installs it)
+#
+# Requires:
+# 1) PKG_CLOUD_TOKEN variable exported
+# 2) To properly install package_cloud when not found, it requires: ruby gcc gcc-c++ ruby-devel
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+#shellcheck disable=SC2068,SC2145
+set -e
+PKG_CLOUD_CONFIG="$HOME/.package_cloud_configuration.cfg"
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Docker build process aborted"
+ exit 1
+fi
+
+# Install dependency if not there
+if ! command -v package_cloud > /dev/null 2>&1; then
+ echo "No package cloud gem found, installing"
+ gem install -V package_cloud || (echo "Package cloud installation failed. you might want to check if required dependencies are there (ruby gcc gcc-c++ ruby-devel)" && exit 1)
+else
+ echo "Found package_cloud gem, continuing"
+fi
+
+# Check for required token and prepare config
+if [ -z "${PKG_CLOUD_TOKEN}" ]; then
+ echo "Please set PKG_CLOUD_TOKEN to be able to use ${0}"
+ exit 1
+fi
+echo "{\"url\":\"https://packagecloud.io\",\"token\":\"${PKG_CLOUD_TOKEN}\"}" > "${PKG_CLOUD_CONFIG}"
+
+echo "Executing package_cloud with config ${PKG_CLOUD_CONFIG} and parameters $@"
+package_cloud $@ --config="${PKG_CLOUD_CONFIG}"
+
+rm -rf "${PKG_CLOUD_CONFIG}"
+echo "Done!"
diff --git a/.travis/package_management/prepare_packages.sh b/.travis/package_management/prepare_packages.sh
new file mode 100755
index 0000000..12ed07c
--- /dev/null
+++ b/.travis/package_management/prepare_packages.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+#
+# Utility that gathers generated packages,
+# puts them together in a local folder for deploy facility to pick up
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+#shellcheck disable=SC2068
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Package preparation aborted"
+ exit 1
+fi
+
+export LXC_ROOT="/var/lib/lxc"
+
+# Go through the containers created for packaging and pick up all generated packages
+CREATED_CONTAINERS=$(ls -A "${LXC_ROOT}")
+for d in ${CREATED_CONTAINERS[@]}; do
+ echo "Picking up packaging contents from ${d}"
+
+ # Pick up any RPMS from builder
+ RPM_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}/rpmbuild"
+ if [ -d "${RPM_BUILD_PATH}" ]; then
+ echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS"
+
+ if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then
+ echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:"
+ ls -ltrR "${RPM_BUILD_PATH}/RPMS"
+ [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}"
+ [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}"
+ [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}"
+ fi
+
+ if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then
+ echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:"
+ ls -ltrR "${RPM_BUILD_PATH}/SRPMS"
+ [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}"
+ [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}"
+ [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}"
+ fi
+ else
+ DEB_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}"
+ echo "Checking folder ${DEB_BUILD_PATH} for DEB packages"
+ if [ -d "${DEB_BUILD_PATH}" ]; then
+ cp "${DEB_BUILD_PATH}"/netdata*.ddeb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .ddeb files"
+ cp "${DEB_BUILD_PATH}"/netdata*.deb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .deb files"
+ cp "${DEB_BUILD_PATH}"/netdata*.buildinfo "${PACKAGES_DIRECTORY}" || echo "Could not copy any .buildinfo files"
+ cp "${DEB_BUILD_PATH}"/netdata*.changes "${PACKAGES_DIRECTORY}" || echo "Could not copy any .changes files"
+ else
+ echo "Folder ${DEB_BUILD_PATH} does not exist or not a directory, nothing to do for package preparation"
+ fi
+ fi
+done
+
+chmod -R 777 "${PACKAGES_DIRECTORY}"
+echo "Packaging contents ready to ship!"
diff --git a/.travis/package_management/trigger_deb_lxc_build.py b/.travis/package_management/trigger_deb_lxc_build.py
new file mode 100755
index 0000000..464a771
--- /dev/null
+++ b/.travis/package_management/trigger_deb_lxc_build.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+#
+# This script is responsible for running the RPM build on the running container
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+import common
+import os
+import sys
+import lxc
+
+if len(sys.argv) != 2:
+ print('You need to provide a container name to get things started')
+ sys.exit(1)
+container_name=sys.argv[1]
+
+# Load the container, break if its not there
+print("Starting up container %s" % container_name)
+container = lxc.Container(container_name)
+if not container.defined:
+ raise Exception("Container %s does not exist!" % container_name)
+
+# Check if the container is running, attempt to start it up in case its not running
+if not container.running or not container.state == "RUNNING":
+ print('Container %s is not running, attempt to start it up' % container_name)
+
+ # Start the container
+ if not container.start():
+ raise Exception("Failed to start the container")
+
+ if not container.running or not container.state == "RUNNING":
+ raise Exception('Container %s is not running, configuration process aborted ' % container_name)
+
+# Wait for connectivity
+if not container.get_ips(timeout=30):
+ raise Exception("Timeout while waiting for container")
+
+build_path = "/home/%s" % os.environ['BUILDER_NAME']
+
+print("Setting up EMAIL and DEBFULLNAME variables required by the build tools")
+os.environ["EMAIL"] = "bot@netdata.cloud"
+os.environ["DEBFULLNAME"] = "Netdata builder"
+
+# Run the build process on the container
+new_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
+print("Starting DEB build process for version %s" % new_version)
+
+netdata_tarball = "%s/netdata-%s.tar.gz" % (build_path, new_version)
+unpacked_netdata = netdata_tarball.replace(".tar.gz", "")
+
+print("Extracting tarball %s" % netdata_tarball)
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", netdata_tarball, "-C", build_path])
+
+print("Checking version consistency")
+since_version = os.environ["LATEST_RELEASE_VERSION"]
+if str(since_version).replace('v', '') == str(new_version) and str(new_version).count('.') == 2:
+ s = since_version.split('.')
+ if int(s[2]) > 0:
+ patch_prev = str(int(s[2]) - 1)
+ since_version = s[0] + '.' + s[1] + '.' + patch_prev
+ else:
+ prev = str(int(s[1]) - 1)
+ since_version = s[0] + '.' + prev + '.' + s[2]
+
+ print("We seem to be building a new stable release, reduce by one since_version option. New since_version:%s" % since_version)
+
+print("Fixing changelog tags")
+changelog_in_host = "contrib/debian/changelog"
+common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_VERSION/%s-1/g' % since_version.replace("v", ""), changelog_in_host])
+common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_DATE/%s/g' % os.environ["LATEST_RELEASE_DATE"], changelog_in_host])
+
+print("Executing gbp dch command..")
+common.run_command_in_host(['gbp', 'dch', '--release', '--ignore-branch', '--spawn-editor=snapshot', '--since=%s' % since_version, '--new-version=%s' % new_version])
+
+print("Copying over changelog to the destination machine")
+common.run_command_in_host(['sudo', 'cp', 'debian/changelog', "%s/%s/netdata-%s/contrib/debian/" % (os.environ['LXC_CONTAINER_ROOT'], build_path, new_version)])
+
+print("Running debian build script since %s" % since_version)
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "%s/build.sh" % build_path, unpacked_netdata, new_version])
+
+print("Listing contents on build path")
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltr", build_path])
+
+print('Done!')
diff --git a/.travis/package_management/trigger_rpm_lxc_build.py b/.travis/package_management/trigger_rpm_lxc_build.py
new file mode 100755
index 0000000..f9e109c
--- /dev/null
+++ b/.travis/package_management/trigger_rpm_lxc_build.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+#
+# This script is responsible for running the RPM build on the running container
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+import common
+import os
+import sys
+import lxc
+
+if len(sys.argv) != 2:
+ print('You need to provide a container name to get things started')
+ sys.exit(1)
+container_name=sys.argv[1]
+
+# Load the container, break if its not there
+print("Starting up container %s" % container_name)
+container = lxc.Container(container_name)
+if not container.defined:
+ raise Exception("Container %s does not exist!" % container_name)
+
+# Check if the container is running, attempt to start it up in case its not running
+if not container.running or not container.state == "RUNNING":
+ print('Container %s is not running, attempt to start it up' % container_name)
+
+ # Start the container
+ if not container.start():
+ raise Exception("Failed to start the container")
+
+ if not container.running or not container.state == "RUNNING":
+ raise Exception('Container %s is not running, configuration process aborted ' % container_name)
+
+# Wait for connectivity
+if not container.get_ips(timeout=30):
+ raise Exception("Timeout while waiting for container")
+
+print("Adding builder specific dependencies to the LXC container")
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-build"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-devel"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmlint"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "make"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "python"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "diffutils"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "patch"])
+common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmdevtools"])
+
+# Run the build process on the container
+print("Starting RPM build process")
+common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "rpmbuild", "-ba", "--rebuild", "/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME']])
+
+print('Done!')
diff --git a/.travis/package_management/yank_stale_pkg.sh b/.travis/package_management/yank_stale_pkg.sh
new file mode 100755
index 0000000..3f76697
--- /dev/null
+++ b/.travis/package_management/yank_stale_pkg.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+#
+# This script is responsible for the removal of stale RPM/DEB files.
+# It runs on the pre-deploy step and takes care of the removal of the files
+# prior to the upload of the freshly built ones
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+#shellcheck disable=SC2010,SC2068
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup)
+if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
+ echo "Package yanking cancelled"
+ exit 1
+fi
+
+PACKAGES_DIR="$1"
+DISTRO="$2"
+PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep -e '\.rpm' -e '\.deb' -e '\.ddeb' )"
+
+if [ ! -d "${PACKAGES_DIR}" ] || [ -z "${PACKAGES_LIST}" ]; then
+ echo "Folder ${PACKAGES_DIR} does not seem to be a valid directory or is empty. No packages to check for yanking"
+ exit 1
+fi
+
+for pkg in ${PACKAGES_LIST[@]}; do
+ echo "Attempting yank on ${pkg}.."
+ .travis/package_management/package_cloud_wrapper.sh yank "${PACKAGING_USER}/${DEPLOY_REPO}/${DISTRO}" "${pkg}" || echo "Nothing to yank or error on ${pkg}"
+done
+
diff --git a/.travis/tagger.sh b/.travis/tagger.sh
new file mode 100755
index 0000000..a775a82
--- /dev/null
+++ b/.travis/tagger.sh
@@ -0,0 +1,61 @@
+# #BASH library
+#
+# Tags are generated by searching for a keyword in last commit message. Keywords are:
+# - [patch] or [fix] to bump patch number
+# - [minor], [feature] or [feat] to bump minor number
+# - [major] or [breaking change] to bump major number
+# All keywords MUST be surrounded with square braces.
+#
+# Requirements:
+# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
+# - git-semver python package (pip install git-semver)
+#
+# Original script is available at https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+# Figure out what will be new release candidate tag based only on previous ones.
+# This assumes that RELEASES are in format of "v0.1.2" and prereleases (RCs) are using "v0.1.2-rc0"
+function set_tag_release_candidate() {
+ LAST_TAG=$(git semver)
+ echo "${0}: Last tag found is: ${LAST_TAG}"
+
+ if [[ $LAST_TAG =~ -rc* ]]; then
+ VERSION=$(echo "$LAST_TAG" | cut -d'-' -f 1)
+ LAST_RC=$(echo "$LAST_TAG" | cut -d'c' -f 2)
+ RC=$((LAST_RC + 1))
+ else
+ VERSION="$(git semver --next-minor)"
+ RC=0
+ echo "${0}: Warning: Will set version to ${VERSION} (Last tag: ${LAST_TAG}) while tagged for release candidate generation"
+ fi
+
+ GIT_TAG="v${VERSION}-rc${RC}"
+ echo "${0}: Generated a new tag, set to: (${GIT_TAG})"
+}
+
+function set_tag_for_release() {
+ echo "${0}: Checking for tag existence"
+ if [ -z "${GIT_TAG}" ]; then
+ echo "${0}: No tag was found, generating a new tag"
+ git semver
+
+ echo "${0}: Last commit message: ${TRAVIS_COMMIT_MESSAGE}"
+
+ # Figure out next tag based on commit message
+ case "${TRAVIS_COMMIT_MESSAGE}" in
+ *"[netdata patch release]"*) GIT_TAG="v$(git semver --next-patch)" ;;
+ *"[netdata minor release]"*) GIT_TAG="v$(git semver --next-minor)" ;;
+ *"[netdata major release]"*) GIT_TAG="v$(git semver --next-major)" ;;
+ *"[netdata release candidate]"*) set_tag_release_candidate ;;
+ *)
+ echo "${0}: Keyword not detected. Nothing to set for GIT_TAG"
+ ;;
+ esac
+ else
+ echo "${0}: We seem to already have a GIT_TAG set to (${GIT_TAG})"
+ fi
+}
diff --git a/.travis/trigger_docker_build.sh b/.travis/trigger_docker_build.sh
new file mode 100755
index 0000000..7a3dd50
--- /dev/null
+++ b/.travis/trigger_docker_build.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+token="${1}"
+version="${2}"
+
+resp="$(curl -X POST \
+ -H 'Accept: application/vnd.github.v3+json' \
+ -H "Authorization: Bearer ${token}" \
+ "https://api.github.com/repos/netdata/netdata/actions/workflows/docker.yml/dispatches" \
+ -d "{\"ref\": \"master\", \"inputs\": {\"version\": \"${version}\"}}")"
+
+if [ -z "${resp}" ]; then
+ echo "Successfully triggered Docker image build."
+ exit 0
+else
+ echo "Failed to trigger Docker image build. Output:"
+ echo "${resp}"
+ exit 1
+fi
diff --git a/.travis/trigger_package_generation.sh b/.travis/trigger_package_generation.sh
new file mode 100755
index 0000000..b58832c
--- /dev/null
+++ b/.travis/trigger_package_generation.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+#
+# Trigger .RPM and .DEB package generation processes
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+set -e
+WAIT_TIME=15
+BUILD_NIGHTLY="$1"
+
+commit_change() {
+ local ARCH="$1"
+ local PKG="$2"
+ local GIT_MAIL="bot@netdata.cloud"
+ local GIT_USER="netdatabot"
+
+ echo "---- Committing ${ARCH} .${PKG} package generation ----"
+ git commit --allow-empty --author "${GIT_USER} <${GIT_MAIL}>" -m "[Package ${ARCH} ${PKG}]${BUILD_NIGHTLY} Package build process trigger"
+}
+
+push_change() {
+
+ echo "---- Push changes to repository ----"
+ git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')"
+}
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
+ echo "Changelog generation process aborted"
+ exit 1
+fi
+
+echo "--- Initialize git configuration ---"
+git checkout master
+git fetch --all
+git pull
+
+commit_change "amd64" "DEB"
+push_change
+
+echo "---- Waiting for ${WAIT_TIME} seconds before triggering next process ----"
+sleep "${WAIT_TIME}"
+
+commit_change "i386" "DEB"
+push_change
+
+echo "---- Waiting for ${WAIT_TIME} seconds before triggering next process ----"
+sleep "${WAIT_TIME}"
+
+commit_change "amd64" "RPM"
+push_change
+
+echo "---- Done! ----"
diff --git a/.travis/utils.sh b/.travis/utils.sh
new file mode 100644
index 0000000..d084a7a
--- /dev/null
+++ b/.travis/utils.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# Prevent travis from timing out after 10 minutes of no output
+tick() {
+ (while true; do sleep 300; echo; done) &
+ local PID=$!
+ disown
+
+ "$@"
+ local RET=$?
+
+ kill $PID
+ return $RET
+}
+export -f tick
+
+retry() {
+ local tries=$1
+ shift
+
+ local i=0
+ while [ "$i" -lt "$tries" ]; do
+ "$@" && return 0
+ sleep $((2**((i++))))
+ done
+
+ return 1
+}
+export -f retry
diff --git a/.yamllint.yml b/.yamllint.yml
new file mode 100644
index 0000000..09d99aa
--- /dev/null
+++ b/.yamllint.yml
@@ -0,0 +1,34 @@
+---
+yaml-files:
+ - '*.yaml'
+ - '*.yml'
+ - '.yamllint'
+ - 'collectors/python.d.plugin/*.conf'
+ - 'collectors/python.d.plugin/*/*.conf'
+
+rules:
+ line-length:
+ max: 120
+ level: warning
+ braces: enable
+ brackets: enable
+ colons: enable
+ commas: enable
+ comments: disable
+ comments-indentation: disable
+ document-end: disable
+ document-start: disable
+ empty-lines: enable
+ empty-values: disable
+ hyphens: enable
+ indentation: enable
+ key-duplicates: enable
+ key-ordering: disable
+ new-line-at-end-of-file: enable
+ new-lines: enable
+ octal-values: enable
+ quoted-strings: disable
+ trailing-spaces: enable
+ truthy:
+ check-keys: false
+ allowed-values: ["true", "false", "yes", "no"]
diff --git a/BREAKING_CHANGES.md b/BREAKING_CHANGES.md
new file mode 100644
index 0000000..20d0556
--- /dev/null
+++ b/BREAKING_CHANGES.md
@@ -0,0 +1,11 @@
+<!--
+title: "Breaking changes"
+description: "On occasion, the Netdata team must make significant changes to the open-source Netdata Agent. We publish those breaking changes here for reference."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/BREAKING_CHANGES.md
+-->
+
+# Breaking changes
+
+- remove deprecated bash modules (`apache`, `cpu_apps`, `cpufreq`, `exim`, `hddtemp`, `load_average`, `mem_apps`, `mysql`, `nginx`, `phpfpm`, `postfix`, `squid`, `tomcat`) [[#7962](https://github.com/netdata/netdata/pull/7962)]
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2BREAKING_CHANGES&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/BUILD.md b/BUILD.md
new file mode 100644
index 0000000..049c86d
--- /dev/null
+++ b/BUILD.md
@@ -0,0 +1,365 @@
+<!--
+title: "The build system"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/BUILD.md
+-->
+
+# The build system
+
+We are currently migrating from `autotools` to `CMake` as a build-system. This document
+currently describes how we intend to perform this migration, and will be updated after
+the migration to explain how the new `CMake` configuration works.
+
+## Stages during the build
+
+1. The `netdata-installer.sh`, take in arguments and environment settings to control the
+ build.
+2. The configure step: `autoreconf -ivf ; ./configure` passing arguments into the configure
+ script. This becomes `generation-time` in CMake. This includes package / system detection
+ and configuration resulting in the `config.h` in the source root.
+3. The build step: recurse through the generated Makefiles and build the executable.
+4. The first install step: calls `make install` to handle all the install steps put into
+ the Makefiles by the configure step (puts binaries / libraries / config into target
+ tree structure).
+5. The second install step: the rest of the installer after the make install handles
+ system-level configuration (privilege setting, user / groups, fetch/build/install `go.d`
+ plugins, telemetry, installing service for startup, uninstaller, auto-updates.
+
+The ideal migration result is to replace all of this with the following steps:
+```
+mkdir build ; cd build ; cmake .. -D... ; cmake --build . --target install
+```
+
+The `-D...` indicates where the command-line arguments for configuration are passed into
+`CMake`.
+
+## CMake generation time
+
+At generation time we need to solve the following issues:
+
+### Feature flags
+
+Every command-line switch on the installer and the configure script needs to becomes an
+argument to the CMake generation, we can do this with variables in the CMake cache:
+
+CMakeLists.txt:
+```
+option(ENABLE_DBENGINE "Enable the dbengine storage" ON)
+...
+if(${ENABLE_DBENGINE})
+...
+endif()
+```
+
+Command-line interface
+```
+cmake -DENABLE_DBENGINE
+```
+
+### Dependency detection
+
+We have a mixture of soft- and hard-dependencies on libraries. For most of these we expect
+`pkg-config` information, for some we manually probe for libraries and include files. We
+should treat all of the external dependencies consistently:
+
+1. Default to autodetect using `pkg-config` (e.g. the standard `jemalloc` drops a `.pc`
+ into the system but we do not check for it.
+2. If no `.pc` is found perform a manual search for libraries under known names, and
+ check for accessible symbols inside them.
+3. Check that include paths work.
+4. Allow a command-line override (e.g. `-DWITH_JEMALLOC=/...`).
+5. If none of the above work then fail the install if the dependency is hard, otherwise
+ indicate it is not present in the `config.h`.
+
+Before doing any dependency detection we need to determine which search paths are
+really in use for the current compiler, after the `project` declaration we can use:
+```
+execute_process(COMMAND ${CMAKE_C_COMPILER} "--print-search-dirs"
+ COMMAND grep "^libraries:"
+ COMMAND sed "s/^libraries: =//"
+ COMMAND tr ":" " "
+ COMMAND tr -d "\n"
+ OUTPUT_VARIABLE CC_SEARCH_DIRS
+ RESULTS_VARIABLE CC_SEARCH_RES)
+string(REGEX MATCH "^[0-9]+" CC_SEARCH_RES ${CC_SEARCH_RES})
+#string(STRIP "${CC_SEARCH_RES}" CC_SEARCH_RES)
+if(0 LESS ${CC_SEARCH_RES})
+ message(STATUS "Warning - cannot determine standard compiler library paths")
+ # Note: we will probably need a different method for Windows...
+endif()
+
+```
+
+The output format for this switch works on both `Clang` and `gcc`, it also includes
+the include search path, which can be extracted in a similar way. Standard advice here
+is to list the `ldconfig` cache or use the `-V` flag to check, but this does not work
+consistently across platforms - in particular `gcc` will reconfigure `ld` when it is
+called to gcc's internal view of search paths. During experiments each of these
+alternative missed / added unused paths. Dumping the compiler's own estimate of the
+search paths seems to work consistently across clang/gcc/linux/freebsd configurations.
+
+The default behaviour in CMake is to search across predefined paths (e.g. `CMAKE_LIBRARY_PATH`)
+that are based on heuristics about the current platform. Most projects using CMake seem
+to overwrite this with their own estimates.
+
+We can use the extracted paths as a base, add our own heuristics based on OS and then
+`set(CMAKE_LIBRARY_PATH ${OUR_OWN_LIB_SEARCH})` to get the best results. Roughly we do
+the following for each external dependency:
+```
+set(WITH_JSONC "Detect" CACHE STRING "Manually set the path to a json-c installation")
+...
+if(${WITH_JSONC} STREQUAL "Detect")
+ pkg_check_modules(JSONC json-c) # Don't set the REQUIRED flag
+ if(JSONC_FOUND)
+ message(STATUS "libjsonc found through .pc -> ${JSONC_CFLAGS_OTHER} ${JSONC_LIBRARIES}")
+ # ... setup using JSONC_CFLAGS_OTHER JSONC_LIBRARIES and JSONC_INCLUDE_DIRS
+ else()
+ find_library(LIB_JSONC
+ NAMES json-c libjson-c
+ PATHS ${CMAKE_LIBRARY_PATH}) # Includes our additions by this point
+ if(${LIB_JSONC} STREQUAL "LIB_JSONC-NOTFOUND")
+ message(STATUS "Library json-c not installed, disabling")
+ else()
+ check_library_exists(${LIB_JSONC} json_object_get_type "" HAVE_JSONC)
+ # ... setup using heuristics for CFLAGS and check include files are available
+ endif()
+ endif()
+else()
+ # ... use explicit path as base to check for library and includes ...
+endif()
+
+```
+
+For checking the include path we have two options, if we overwrite the `CMAKE_`... variables
+to change the internal search path we can use:
+```
+CHECK_INCLUDE_FILE(json/json.h HAVE_JSONC_H)
+```
+Or we can build a custom search path and then use:
+```
+find_file(HAVE_JSONC_H json/json.h PATHS ${OUR_INCLUDE_PATHS})
+```
+
+Note: we may have cases where there is no `.pc` but we have access to a `.cmake` (e.g. AWS SDK, mongodb,cmocka) - these need to be checked / pulled inside the repo while building a prototype.
+
+### Compiler compatibility checks
+
+In CMakeLists.txt:
+
+```
+CHECK_INCLUDE_FILE(sys/prctl.h HAVE_PRCTL_H)
+configure_file(cmake/config.in config.h)
+```
+
+In cmake/config.in:
+
+```
+#cmakedefine HAVE_PRCTL_H 1
+```
+
+If we want to check explicitly if something compiles (e.g. the accept4 check, or the
+`strerror_r` typing issue) then we set the `CMAKE_`... paths and then use:
+```
+check_c_source_compiles(
+ "
+ #include <string.h>
+ int main() { char x = *strerror_r(0, &x, sizeof(x)); return 0; }
+ "
+ STRERROR_R_CHAR_P)
+
+```
+This produces a bool that we can use inside CMake or propagate into the `config.h`.
+
+We can handle the atomic checks with:
+```
+check_c_source_compiles(
+ "
+ int main (int argc, char **argv)
+ {
+ volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
+ __atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
+ __atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ return 0;
+ }
+ "
+ HAVE_C__ATOMIC)
+```
+
+For the specific problem of getting the correct type signature in log.c for the `strerror_r`
+calls we can replicate what we have now, or we can delete this code completely and use a
+better solution that is documented [here](http://www.club.cc.cmu.edu/~cmccabe/blog_strerror.html).
+To replicate what we have now:
+```
+check_c_source_compiles(
+ "
+ #include <string.h>
+ int main() { char x = *strerror_r(0, &x, sizeof(x)); return 0; }
+ "
+ STRERROR_R_CHAR_P)
+
+check_c_source_compiles(
+ "
+ #include <string.h>
+ int main() { int x = strerror_r(0, &x, sizeof(x)); return 0; }
+ "
+ STRERROR_R_INT)
+
+if("${STRERROR_R_CHAR_P}" OR "${STRERROR_R_INT}")
+ set(HAVE_DECL_STRERROR_R 1)
+endif()
+message(STATUS "Result was ${HAVE_DECL_STRERROR_R}")
+
+```
+
+Note: I did not find an explicit way to select compiler when both `clang` and `gcc` are
+present. We might have an implicit way (like redirecting `cc`) but we should put one in.
+
+
+
+### Debugging problems in test compilations
+
+Test compilations attempt to feed a test-input into the targeted compiler and result
+in a yes/no decision, this is similar to `AC_LANG_SOURCE(.... if test $ac_...` in .`m4`.
+We have two techniques to use in CMake:
+```
+cmake_minimum_required(VERSION 3.1.0)
+include(CheckCCompilerFlag)
+project(empty C)
+
+check_c_source_compiles(
+ "
+ #include <string.h>
+ int main() { char x = *strerror_r(0, &x, sizeof(x)); return 0; }
+ "
+ STRERROR_R_CHAR_P)
+
+try_compile(HAVE_JEMALLOC ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/quickdemo.c
+ LINK_LIBRARIES jemalloc)
+```
+
+The `check_c_source_compiles` is light-weight:
+
+* Inline source for the test, easy to follow.
+* Build errors are reported in `CMakeFiles/CMakeErrors.log`
+
+But we cannot alter the include-paths / library-paths / compiler-flags specifically for
+the test without overwriting the current CMake settings. The alternative approach is
+slightly more heavy-weight:
+
+* Can't inline source for `try_compile` - it requires a `.c` file in the tree.
+* Build errors are not shown, the recovery process for them is somewhat difficult.
+
+```
+rm -rf * && cmake .. --debug-trycompile
+grep jemal CMakeFiles/CMakeTmp/CMakeFiles/*dir/*
+cd CMakeFiles/CMakeTmp/CMakeFiles/cmTC_d6f0e.dir # for example
+cmake --build ../..
+```
+
+This implies that we can do this to diagnose problems / develop test-programs, but we
+have to make them *bullet-proof* as we cannot expose this to end-users. This means that
+the results of the compilation must be *crisp* - exactly yes/no if the feature we are
+testing is supported.
+
+### System configuration checks
+
+For any system configuration checks that fall outside of the above scope (includes, libraries,
+packages, test-compilation checks) we have a fall-back that we can use to glue any holes
+that we need, e.g. to pull out the packaging strings, inside the `CMakeLists.h`:
+```
+execute_process(COMMAND cat ${CMAKE_CURRENT_SOURCE_DIR}/packaging/version
+ COMMAND tr -d '\n'
+ OUTPUT_VARIABLE VERSION_FROM_FILE)
+message(STATUS "Packaging version ${VERSION_FROM_FILE}")
+```
+and this in the `config.h.in`:
+```
+#define VERSION_FROM_FILE "@VERSION_FROM_FILE@"
+```
+
+## CMake build time
+
+We have a working definition of the targets that is in use with CLion and works on modern
+CMake (3.15). It breaks on older CMake version (e.g. 3.7) with an error message (issue#7091).
+No PoC yet to fix this, but it looks like changing the target properties should do it (in the
+worst case we can drop the separate object completely and merge the sources directly into
+the final target).
+
+Steps needed for building a prototype:
+
+1. Pick a reasonable configuration.
+2. Use the PoC techniques above to do a full generation of `CMAKE_` variables in the cache
+ according to the feature options and dependencies.
+3. Push these into the project variables.
+4. Work on it until the build succeeds in at least one known configuration.
+5. Smoke-test that the output is valid (i.e. the executable loads and runs, and we can
+ access the dashboard).
+6. Do a full comparison of the `config.h` generated by autotools against the CMake version
+ and document / fix any deviations.
+
+## CMake install target
+
+I've only looked at this superficially as we do not have a prototype yet, but each of the
+first-stage install steps (in `make install`) and the second-stage (in `netdata-installer.sh`)
+look feasible.
+
+## General issues
+
+* We need to choose a minimum CMake version that is an available package across all of our
+ supported environments. There is currently a build issue #7091 that documents a problem
+ in the compilation phase (we cannot link in libnetdata as an object on old CMake versions
+ and need to find a different way to express this).
+
+* The default variable-expansion / comparisons in CMake are awkward, we need this to make it
+ sane:
+ ```
+ cmake_policy(SET CMP0054 "NEW")
+ ```
+* Default paths for libs / includes are not comprehensive on most environments, we still need
+ some heuristics for common locations, e.g. `/usr/local` on FreeBSD.
+
+# Recommendations
+
+We should follow these steps:
+
+1. Build a prototype.
+2. Build a test-environment to check the prototype against environments / configurations that
+ the team uses.
+3. Perform an "internal" release - merge the new CMake into master, but not announce it or
+ offer to support it.
+4. Check it works for the team internally.
+5. Do a soft-release: offer it externally as a replacement option for autotools.
+6. Gather feedback and usage reports on a wider range of configurations.
+7. Do a hard-release: switch over the preferred build-system in the installation instructions.
+8. Gather feedback and usage reports on a wider range of configurations (again).
+9. Deprecate / remove the autotools build-system completely (so that we can support a single
+ build-system).
+
+Some smaller miscellaneous suggestions:
+
+1. Remove the `_Generic` / `strerror_r` config to make the system simpler (use the technique
+ on the blog post to make the standard version re-entrant so that it is thread-safe).
+2. Pull in jemalloc by source into the repo if it is our preferred malloc implementation.
+
+# Background
+
+* [Stack overflow starting point](https://stackoverflow.com/questions/7132862/how-do-i-convert-an-autotools-project-to-a-cmake-project#7680240)
+* [CMake wiki including previous autotools conversions](https://gitlab.kitware.com/cmake/community/wikis/Home)
+* [Commands section in old CMake docs](https://cmake.org/cmake/help/v2.8.8/cmake.html#section_Commands)
+* [try_compile in newer CMake docs](https://cmake.org/cmake/help/v3.7/command/try_compile.html)
+* [configure_file in newer CMake docs](https://cmake.org/cmake/help/v3.7/command/configure_file.html?highlight=configure_file)
+* [header checks in CMake](https://stackoverflow.com/questions/647892/how-to-check-header-files-and-library-functions-in-cmake-like-it-is-done-in-auto)
+* [how to write platform checks](https://gitlab.kitware.com/cmake/community/wikis/doc/tutorials/How-To-Write-Platform-Checks)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FBUILD&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..1caf3ed
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,388 @@
+# Changelog
+
+## [v1.29.3](https://github.com/netdata/netdata/tree/v1.29.3) (2021-02-23)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.29.2...v1.29.3)
+
+**Merged pull requests:**
+
+- Invalidate RRDSETVAR pointers on obsoletion. [\#10667](https://github.com/netdata/netdata/pull/10667) ([mfundul](https://github.com/mfundul))
+- Fixed condition controlling use of static LWS in RPM builds. [\#10661](https://github.com/netdata/netdata/pull/10661) ([Ferroin](https://github.com/Ferroin))
+- fix wrong link on docs Netdata Agent Daemon [\#10659](https://github.com/netdata/netdata/pull/10659) ([OdysLam](https://github.com/OdysLam))
+- Fix broken links in docs and add collectors to list [\#10651](https://github.com/netdata/netdata/pull/10651) ([joelhans](https://github.com/joelhans))
+- Statsd dashboard [\#10640](https://github.com/netdata/netdata/pull/10640) ([OdysLam](https://github.com/OdysLam))
+
+## [v1.29.2](https://github.com/netdata/netdata/tree/v1.29.2) (2021-02-18)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.29.1...v1.29.2)
+
+**Merged pull requests:**
+
+- Fix the context filtering on the data query endpoint [\#10652](https://github.com/netdata/netdata/pull/10652) ([stelfrag](https://github.com/stelfrag))
+- fix container/host detection in system-info script [\#10647](https://github.com/netdata/netdata/pull/10647) ([ilyam8](https://github.com/ilyam8))
+- Enable apps.plugin aggregation debug messages [\#10645](https://github.com/netdata/netdata/pull/10645) ([vlvkobal](https://github.com/vlvkobal))
+- add small delay to the ipv4\_tcp\_resets alarms [\#10644](https://github.com/netdata/netdata/pull/10644) ([ilyam8](https://github.com/ilyam8))
+- collectors/proc: fix collecting operstate for virtual network interfaces [\#10633](https://github.com/netdata/netdata/pull/10633) ([ilyam8](https://github.com/ilyam8))
+- fix sendmail unrecognized option F error [\#10631](https://github.com/netdata/netdata/pull/10631) ([ilyam8](https://github.com/ilyam8))
+- Fix typo in web/gui/readme.md [\#10623](https://github.com/netdata/netdata/pull/10623) ([OdysLam](https://github.com/OdysLam))
+- add freeswitch to apps\_groups [\#10621](https://github.com/netdata/netdata/pull/10621) ([fayak](https://github.com/fayak))
+- Add ACLK proxy setting as host label [\#10619](https://github.com/netdata/netdata/pull/10619) ([underhood](https://github.com/underhood))
+- dashboard@v2.13.6 [\#10618](https://github.com/netdata/netdata/pull/10618) ([jacekkolasa](https://github.com/jacekkolasa))
+- Disable stock alarms [\#10617](https://github.com/netdata/netdata/pull/10617) ([thiagoftsm](https://github.com/thiagoftsm))
+- Fixes \#10597 raw binary data should never be printed [\#10603](https://github.com/netdata/netdata/pull/10603) ([rda0](https://github.com/rda0))
+- collectors/proc: change ksm mem chart type to stacked [\#10598](https://github.com/netdata/netdata/pull/10598) ([ilyam8](https://github.com/ilyam8))
+- ACLK reduce excessive logging [\#10596](https://github.com/netdata/netdata/pull/10596) ([underhood](https://github.com/underhood))
+- add k8s\_cluster\_id host label [\#10588](https://github.com/netdata/netdata/pull/10588) ([ilyam8](https://github.com/ilyam8))
+- add resetting CapabilityBoundingSet workaround to the python.d collectors \(that use `sudo`\) readmes [\#10587](https://github.com/netdata/netdata/pull/10587) ([ilyam8](https://github.com/ilyam8))
+- collectors/elasticsearch: document `scheme` option [\#10572](https://github.com/netdata/netdata/pull/10572) ([vjt](https://github.com/vjt))
+- Update claiming docs for Docker containers. [\#10570](https://github.com/netdata/netdata/pull/10570) ([Ferroin](https://github.com/Ferroin))
+- health: make Opsgenie API URL configurable [\#10561](https://github.com/netdata/netdata/pull/10561) ([tinyhammers](https://github.com/tinyhammers))
+- Allow the REMOVED alarm status via ACLK if the previous status was WARN/CRIT [\#10533](https://github.com/netdata/netdata/pull/10533) ([stelfrag](https://github.com/stelfrag))
+- Change eBPF plugin internal [\#10442](https://github.com/netdata/netdata/pull/10442) ([thiagoftsm](https://github.com/thiagoftsm))
+
+## [v1.29.1](https://github.com/netdata/netdata/tree/v1.29.1) (2021-02-09)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.29.0...v1.29.1)
+
+**Merged pull requests:**
+
+- Fix crash during shutdown of cgroups internal plugin. [\#10614](https://github.com/netdata/netdata/pull/10614) ([mfundul](https://github.com/mfundul))
+- Update latest release on main README [\#10590](https://github.com/netdata/netdata/pull/10590) ([joelhans](https://github.com/joelhans))
+
+## [v1.29.0](https://github.com/netdata/netdata/tree/v1.29.0) (2021-02-03)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.28.0...v1.29.0)
+
+**Merged pull requests:**
+
+- Fixed Netdata Cloud support in RPM packages. [\#10578](https://github.com/netdata/netdata/pull/10578) ([Ferroin](https://github.com/Ferroin))
+- Fix container detection from systemd-detect-virt [\#10569](https://github.com/netdata/netdata/pull/10569) ([cakrit](https://github.com/cakrit))
+- dashboard v2.13.0 [\#10565](https://github.com/netdata/netdata/pull/10565) ([jacekkolasa](https://github.com/jacekkolasa))
+- bytes after last '}' trip JSON parser [\#10563](https://github.com/netdata/netdata/pull/10563) ([underhood](https://github.com/underhood))
+- Fix prometheus remote write header [\#10560](https://github.com/netdata/netdata/pull/10560) ([vlvkobal](https://github.com/vlvkobal))
+- fix minor vulnerability alert, updating socket-io dependency [\#10557](https://github.com/netdata/netdata/pull/10557) ([jacekkolasa](https://github.com/jacekkolasa))
+- Fix values in Prometheus export for metrics, collected by the Prometheus collector [\#10551](https://github.com/netdata/netdata/pull/10551) ([vlvkobal](https://github.com/vlvkobal))
+- Properly handle saved temporary directory on updates. [\#10550](https://github.com/netdata/netdata/pull/10550) ([Ferroin](https://github.com/Ferroin))
+- installer: update go.d.plugin version to v0.27.0 [\#10544](https://github.com/netdata/netdata/pull/10544) ([ilyam8](https://github.com/ilyam8))
+- Update README.md on postgres collector [\#10532](https://github.com/netdata/netdata/pull/10532) ([OdysLam](https://github.com/OdysLam))
+- fix postgres password bug and change default config [\#10531](https://github.com/netdata/netdata/pull/10531) ([OdysLam](https://github.com/OdysLam))
+- Make some tweaks/improvements to conf docs [\#10528](https://github.com/netdata/netdata/pull/10528) ([joelhans](https://github.com/joelhans))
+- Spelling python plugin [\#10525](https://github.com/netdata/netdata/pull/10525) ([jsoref](https://github.com/jsoref))
+- Reduce the number of alarm updates on ACLK [\#10524](https://github.com/netdata/netdata/pull/10524) ([stelfrag](https://github.com/stelfrag))
+- dashboard@v2.12.5 [\#10520](https://github.com/netdata/netdata/pull/10520) ([jacekkolasa](https://github.com/jacekkolasa))
+- Remove unused entries from structures [\#10519](https://github.com/netdata/netdata/pull/10519) ([stelfrag](https://github.com/stelfrag))
+- Mark internal functions as static in health code. [\#10518](https://github.com/netdata/netdata/pull/10518) ([vkalintiris](https://github.com/vkalintiris))
+- Remove unused struct in health code. [\#10517](https://github.com/netdata/netdata/pull/10517) ([vkalintiris](https://github.com/vkalintiris))
+- Fix coverity issue CID 365322 [\#10516](https://github.com/netdata/netdata/pull/10516) ([stelfrag](https://github.com/stelfrag))
+- health/mysql: fix `mysql.slave\_status` alarm for go mysql collector [\#10513](https://github.com/netdata/netdata/pull/10513) ([ilyam8](https://github.com/ilyam8))
+- Spelling md [\#10508](https://github.com/netdata/netdata/pull/10508) ([jsoref](https://github.com/jsoref))
+- Switched to using system libwebsockets for RPM builds. [\#10507](https://github.com/netdata/netdata/pull/10507) ([Ferroin](https://github.com/Ferroin))
+- Add link to specific feedback megathread for the anomalies collector [\#10506](https://github.com/netdata/netdata/pull/10506) ([andrewm4894](https://github.com/andrewm4894))
+- Add link to specific feedback megathread for the anomalies collector [\#10505](https://github.com/netdata/netdata/pull/10505) ([andrewm4894](https://github.com/andrewm4894))
+- Fix broken dbengine stress tests. [\#10502](https://github.com/netdata/netdata/pull/10502) ([mfundul](https://github.com/mfundul))
+- add `\_is\_k8s\_node` label to the host labels [\#10501](https://github.com/netdata/netdata/pull/10501) ([ilyam8](https://github.com/ilyam8))
+- Fix segmentation fault in the agent [\#10498](https://github.com/netdata/netdata/pull/10498) ([mfundul](https://github.com/mfundul))
+- Fixed handling of TLS config so that cURL works in all cases. [\#10491](https://github.com/netdata/netdata/pull/10491) ([Ferroin](https://github.com/Ferroin))
+- Bump ini from 1.3.5 to 1.3.8 [\#10489](https://github.com/netdata/netdata/pull/10489) ([dependabot[bot]](https://github.com/apps/dependabot))
+- health: make mdstat\_mismatch\_cnt alarm less strict [\#10488](https://github.com/netdata/netdata/pull/10488) ([ilyam8](https://github.com/ilyam8))
+- Mention PostgreSQL Prometheus Adapter in the documentation [\#10487](https://github.com/netdata/netdata/pull/10487) ([vlvkobal](https://github.com/vlvkobal))
+- Fix memory allocation when computing standard deviation [\#10484](https://github.com/netdata/netdata/pull/10484) ([stelfrag](https://github.com/stelfrag))
+- Support multiple chart label keys in data queries [\#10483](https://github.com/netdata/netdata/pull/10483) ([stelfrag](https://github.com/stelfrag))
+- Claiming retry/backoff [\#10482](https://github.com/netdata/netdata/pull/10482) ([underhood](https://github.com/underhood))
+- Add guide: Monitor and visualize anomalies with Netdata [\#10480](https://github.com/netdata/netdata/pull/10480) ([joelhans](https://github.com/joelhans))
+- Truncate excessive information from titles for apps and cgroups [\#10479](https://github.com/netdata/netdata/pull/10479) ([vlvkobal](https://github.com/vlvkobal))
+- Add vkalintiris to CODEOWNERS for CI, packaging, and installer code. [\#10478](https://github.com/netdata/netdata/pull/10478) ([Ferroin](https://github.com/Ferroin))
+- GitHub action markdown link check update [\#10474](https://github.com/netdata/netdata/pull/10474) ([jsoref](https://github.com/jsoref))
+- Fix for older compilers [\#10470](https://github.com/netdata/netdata/pull/10470) ([underhood](https://github.com/underhood))
+- Fixes for SEO housekeeping/improvements [\#10468](https://github.com/netdata/netdata/pull/10468) ([joelhans](https://github.com/joelhans))
+- Update README.md [\#10467](https://github.com/netdata/netdata/pull/10467) ([OdysLam](https://github.com/OdysLam))
+- Update pfsense.md [\#10466](https://github.com/netdata/netdata/pull/10466) ([OdysLam](https://github.com/OdysLam))
+- Fixed function name in updater script. [\#10462](https://github.com/netdata/netdata/pull/10462) ([Ferroin](https://github.com/Ferroin))
+- Fixed bundling of libwebsockets in binary packages. [\#10460](https://github.com/netdata/netdata/pull/10460) ([Ferroin](https://github.com/Ferroin))
+- Anomalies collector custom model bugfix for issue \#10456 [\#10459](https://github.com/netdata/netdata/pull/10459) ([andrewm4894](https://github.com/andrewm4894))
+- Add missing section to Netdata style guide [\#10453](https://github.com/netdata/netdata/pull/10453) ([joelhans](https://github.com/joelhans))
+- Add guide: Detect anomalies in nodes and applications with Netdata [\#10451](https://github.com/netdata/netdata/pull/10451) ([joelhans](https://github.com/joelhans))
+- Updated messages about checksum validation failures on install. [\#10448](https://github.com/netdata/netdata/pull/10448) ([Ferroin](https://github.com/Ferroin))
+- Fixed handling of environment file in updater script. [\#10447](https://github.com/netdata/netdata/pull/10447) ([Ferroin](https://github.com/Ferroin))
+- Exclude autofs by default in diskspace plugin [\#10441](https://github.com/netdata/netdata/pull/10441) ([nabijaczleweli](https://github.com/nabijaczleweli))
+- New eBPF kernel [\#10434](https://github.com/netdata/netdata/pull/10434) ([thiagoftsm](https://github.com/thiagoftsm))
+- Update and improve the Netdata style guide [\#10433](https://github.com/netdata/netdata/pull/10433) ([joelhans](https://github.com/joelhans))
+- Change HDDtemp to report None instead of 0 [\#10429](https://github.com/netdata/netdata/pull/10429) ([slavox](https://github.com/slavox))
+- Use bash shell as user netdata for debug [\#10425](https://github.com/netdata/netdata/pull/10425) ([Steve8291](https://github.com/Steve8291))
+- Qick and dirty fix for \#10420 [\#10424](https://github.com/netdata/netdata/pull/10424) ([skibbipl](https://github.com/skibbipl))
+- Add instructions on enabling explicitly disabled collectors [\#10418](https://github.com/netdata/netdata/pull/10418) ([joelhans](https://github.com/joelhans))
+- Change links at bottom of all install docs [\#10416](https://github.com/netdata/netdata/pull/10416) ([joelhans](https://github.com/joelhans))
+- Improve configuration docs with common changes and start/stop/restart directions [\#10415](https://github.com/netdata/netdata/pull/10415) ([joelhans](https://github.com/joelhans))
+- Add Realtek network cards to the list of physical interfaces on FreeBSD [\#10414](https://github.com/netdata/netdata/pull/10414) ([vlvkobal](https://github.com/vlvkobal))
+- Update main README with release news [\#10412](https://github.com/netdata/netdata/pull/10412) ([joelhans](https://github.com/joelhans))
+- Small updates, improvements, and housekeeping to docs [\#10405](https://github.com/netdata/netdata/pull/10405) ([joelhans](https://github.com/joelhans))
+- python.d/fail2ban: Add handling "yes" and "no" as bool, match flexible spaces [\#10400](https://github.com/netdata/netdata/pull/10400) ([grinapo](https://github.com/grinapo))
+- Dispatch cgroup discovery into another thread [\#10399](https://github.com/netdata/netdata/pull/10399) ([vlvkobal](https://github.com/vlvkobal))
+- Added instructions on which file to edit. [\#10398](https://github.com/netdata/netdata/pull/10398) ([kdvlr](https://github.com/kdvlr))
+- Fix data source option for Prometheus web API in exporting configuration [\#10397](https://github.com/netdata/netdata/pull/10397) ([vlvkobal](https://github.com/vlvkobal))
+- ACLK collector list use mguid instead of hostname [\#10394](https://github.com/netdata/netdata/pull/10394) ([underhood](https://github.com/underhood))
+- Docs housekeeping for SEO and syntax, part 1 [\#10388](https://github.com/netdata/netdata/pull/10388) ([joelhans](https://github.com/joelhans))
+- Persist `$TMPDIR` from installer to updater. [\#10384](https://github.com/netdata/netdata/pull/10384) ([Ferroin](https://github.com/Ferroin))
+- Add centralized Cloud notifications to core docs [\#10374](https://github.com/netdata/netdata/pull/10374) ([joelhans](https://github.com/joelhans))
+- Change linting standard for Markdown lists [\#10371](https://github.com/netdata/netdata/pull/10371) ([joelhans](https://github.com/joelhans))
+- Move ACLK Legacy into subfolder [\#10265](https://github.com/netdata/netdata/pull/10265) ([underhood](https://github.com/underhood))
+
+## [v1.28.0](https://github.com/netdata/netdata/tree/v1.28.0) (2020-12-18)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.27.0...v1.28.0)
+
+**Merged pull requests:**
+
+- Fix locking after on\_connect failure [\#10401](https://github.com/netdata/netdata/pull/10401) ([stelfrag](https://github.com/stelfrag))
+
+## [v1.27.0](https://github.com/netdata/netdata/tree/v1.27.0) (2020-12-17)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.26.0...v1.27.0)
+
+**Merged pull requests:**
+
+- Fixed option parsing in kickstart.sh. [\#10396](https://github.com/netdata/netdata/pull/10396) ([Ferroin](https://github.com/Ferroin))
+- invalid\_addr\_cleanup: Initialize variables [\#10395](https://github.com/netdata/netdata/pull/10395) ([thiagoftsm](https://github.com/thiagoftsm))
+- Fix a buffer overflow when extracting information from a STREAM connection [\#10391](https://github.com/netdata/netdata/pull/10391) ([stelfrag](https://github.com/stelfrag))
+- fix typo in performance.md [\#10386](https://github.com/netdata/netdata/pull/10386) ([OdysLam](https://github.com/OdysLam))
+- Fix a lock check [\#10385](https://github.com/netdata/netdata/pull/10385) ([vlvkobal](https://github.com/vlvkobal))
+- dashboard v2.11 [\#10383](https://github.com/netdata/netdata/pull/10383) ([jacekkolasa](https://github.com/jacekkolasa))
+- Fixed handling of dependencies on Gentoo. [\#10382](https://github.com/netdata/netdata/pull/10382) ([Ferroin](https://github.com/Ferroin))
+- Fix issue with chart metadata sent multiple times over ACLK [\#10381](https://github.com/netdata/netdata/pull/10381) ([stelfrag](https://github.com/stelfrag))
+- Update macos.md [\#10379](https://github.com/netdata/netdata/pull/10379) ([ktsaou](https://github.com/ktsaou))
+- python.d/alarms: fix sending chart definition on every data collection [\#10378](https://github.com/netdata/netdata/pull/10378) ([ilyam8](https://github.com/ilyam8))
+- python.d/alarms: add alarms obsoletion and disable by default [\#10375](https://github.com/netdata/netdata/pull/10375) ([ilyam8](https://github.com/ilyam8))
+- add two more insignificant warnings to suppress in anomalies collector [\#10369](https://github.com/netdata/netdata/pull/10369) ([andrewm4894](https://github.com/andrewm4894))
+- add paragraph in anomalies collector README to ask for feedback [\#10363](https://github.com/netdata/netdata/pull/10363) ([andrewm4894](https://github.com/andrewm4894))
+- Fix hostname configuration in the exporting engine [\#10361](https://github.com/netdata/netdata/pull/10361) ([vlvkobal](https://github.com/vlvkobal))
+- New ebpf charts [\#10360](https://github.com/netdata/netdata/pull/10360) ([thiagoftsm](https://github.com/thiagoftsm))
+- installer: update go.d.plugin version to v0.26.2 [\#10355](https://github.com/netdata/netdata/pull/10355) ([ilyam8](https://github.com/ilyam8))
+- Fixed handling of self-updating in updater script. [\#10352](https://github.com/netdata/netdata/pull/10352) ([Ferroin](https://github.com/Ferroin))
+- update alarms collector readme image to use one from the related netdata PR [\#10348](https://github.com/netdata/netdata/pull/10348) ([andrewm4894](https://github.com/andrewm4894))
+- Add documentation for time & date picker in Agent and Cloud [\#10347](https://github.com/netdata/netdata/pull/10347) ([joelhans](https://github.com/joelhans))
+- Use `glibtoolize` on macOS instead of regular `libtoolize`. [\#10346](https://github.com/netdata/netdata/pull/10346) ([Ferroin](https://github.com/Ferroin))
+- Fix handling of Python dependency for RPM package. [\#10345](https://github.com/netdata/netdata/pull/10345) ([Ferroin](https://github.com/Ferroin))
+- Fixed handling of PowerTools repo on CentOS 8. [\#10344](https://github.com/netdata/netdata/pull/10344) ([Ferroin](https://github.com/Ferroin))
+- Fix backend options [\#10343](https://github.com/netdata/netdata/pull/10343) ([vlvkobal](https://github.com/vlvkobal))
+- Add guide: Monitor any process in real-time with Netdata [\#10338](https://github.com/netdata/netdata/pull/10338) ([joelhans](https://github.com/joelhans))
+- Added patch to build LWS properly on macOS. [\#10333](https://github.com/netdata/netdata/pull/10333) ([Ferroin](https://github.com/Ferroin))
+- Added number of allocated/stored objects within each Varnish storage [\#10329](https://github.com/netdata/netdata/pull/10329) ([ernestojpg](https://github.com/ernestojpg))
+- health: disable 'used\_file\_descriptors' alarm [\#10328](https://github.com/netdata/netdata/pull/10328) ([ilyam8](https://github.com/ilyam8))
+- Fix exporting config [\#10323](https://github.com/netdata/netdata/pull/10323) ([vlvkobal](https://github.com/vlvkobal))
+- Fix a compilation warning [\#10320](https://github.com/netdata/netdata/pull/10320) ([vlvkobal](https://github.com/vlvkobal))
+- installer: update go.d.plugin version to v0.26.1 [\#10319](https://github.com/netdata/netdata/pull/10319) ([ilyam8](https://github.com/ilyam8))
+- Improve core documentation to align with recent Netdata Cloud releases [\#10318](https://github.com/netdata/netdata/pull/10318) ([joelhans](https://github.com/joelhans))
+- Added support for MSE \(Massive Storage Engine\) in Varnish-Plus [\#10317](https://github.com/netdata/netdata/pull/10317) ([ernestojpg](https://github.com/ernestojpg))
+- dashboard v2.10.1 [\#10314](https://github.com/netdata/netdata/pull/10314) ([jacekkolasa](https://github.com/jacekkolasa))
+- fix UUID\_STR\_LEN undefined on MacOS [\#10313](https://github.com/netdata/netdata/pull/10313) ([underhood](https://github.com/underhood))
+- python.d/nvidia\_smi: fix gpu data filtering [\#10312](https://github.com/netdata/netdata/pull/10312) ([ilyam8](https://github.com/ilyam8))
+- Add new collectors to supported collectors list [\#10310](https://github.com/netdata/netdata/pull/10310) ([joelhans](https://github.com/joelhans))
+- Added numerous improvements to our Docker image. [\#10308](https://github.com/netdata/netdata/pull/10308) ([Ferroin](https://github.com/Ferroin))
+- Update README.md [\#10303](https://github.com/netdata/netdata/pull/10303) ([ktsaou](https://github.com/ktsaou))
+- Add optional info on how to also add some default alarms as part of s… [\#10302](https://github.com/netdata/netdata/pull/10302) ([andrewm4894](https://github.com/andrewm4894))
+- Update UPDATE.md [\#10301](https://github.com/netdata/netdata/pull/10301) ([ysamouhos](https://github.com/ysamouhos))
+- HAProxy spelling correction [\#10300](https://github.com/netdata/netdata/pull/10300) ([autoalan](https://github.com/autoalan))
+- eBPF synchronization [\#10299](https://github.com/netdata/netdata/pull/10299) ([thiagoftsm](https://github.com/thiagoftsm))
+- python.d: always create a runtime chart on `create` call [\#10296](https://github.com/netdata/netdata/pull/10296) ([ilyam8](https://github.com/ilyam8))
+- Update macOS instructions with cmake [\#10295](https://github.com/netdata/netdata/pull/10295) ([joelhans](https://github.com/joelhans))
+- dbengine extent cache [\#10293](https://github.com/netdata/netdata/pull/10293) ([mfundul](https://github.com/mfundul))
+- add privacy information about aclk connection [\#10292](https://github.com/netdata/netdata/pull/10292) ([OdysLam](https://github.com/OdysLam))
+- Fixed the data endpoint so that the context param is correctly applied to children [\#10290](https://github.com/netdata/netdata/pull/10290) ([stelfrag](https://github.com/stelfrag))
+- installer: update go.d.plugin version to v0.26.0 [\#10284](https://github.com/netdata/netdata/pull/10284) ([ilyam8](https://github.com/ilyam8))
+- use new libmosquitto release \(with MacOS libMosq fix\) [\#10283](https://github.com/netdata/netdata/pull/10283) ([underhood](https://github.com/underhood))
+- Address coverity errors \(CID 364045,364046\) [\#10282](https://github.com/netdata/netdata/pull/10282) ([stelfrag](https://github.com/stelfrag))
+- health/web\_log: remove `crit` from unmatched alarms [\#10280](https://github.com/netdata/netdata/pull/10280) ([ilyam8](https://github.com/ilyam8))
+- Fix compilation with https disabled. Fixes \#10278 [\#10279](https://github.com/netdata/netdata/pull/10279) ([KickerTom](https://github.com/KickerTom))
+- Fix race condition in rrdset\_first\_entry\_t\(\) and rrdset\_last\_entry\_t\(\) [\#10276](https://github.com/netdata/netdata/pull/10276) ([mfundul](https://github.com/mfundul))
+- Fix host name when syslog is used [\#10275](https://github.com/netdata/netdata/pull/10275) ([thiagoftsm](https://github.com/thiagoftsm))
+- Add guide: How to optimize Netdata's performance [\#10271](https://github.com/netdata/netdata/pull/10271) ([joelhans](https://github.com/joelhans))
+- Document the Agent reinstallation process [\#10270](https://github.com/netdata/netdata/pull/10270) ([joelhans](https://github.com/joelhans))
+- fix bug\_report.md syntax error [\#10269](https://github.com/netdata/netdata/pull/10269) ([OdysLam](https://github.com/OdysLam))
+- python.d/nvidia\_smi: use `pwd` lib to get username if not inside a container [\#10268](https://github.com/netdata/netdata/pull/10268) ([ilyam8](https://github.com/ilyam8))
+- Add kernel to blacklist [\#10262](https://github.com/netdata/netdata/pull/10262) ([thiagoftsm](https://github.com/thiagoftsm))
+- Made the update script significantly more robust and user friendly. [\#10261](https://github.com/netdata/netdata/pull/10261) ([Ferroin](https://github.com/Ferroin))
+- new issue templates [\#10259](https://github.com/netdata/netdata/pull/10259) ([OdysLam](https://github.com/OdysLam))
+- Docs: Point users to proper configure doc [\#10254](https://github.com/netdata/netdata/pull/10254) ([joelhans](https://github.com/joelhans))
+- Docs: Cleanup and fix broken links [\#10253](https://github.com/netdata/netdata/pull/10253) ([joelhans](https://github.com/joelhans))
+- Update CONTRIBUTING.md [\#10252](https://github.com/netdata/netdata/pull/10252) ([joelhans](https://github.com/joelhans))
+- updated 3rd party static dependencies and use alpine 3.12 [\#10241](https://github.com/netdata/netdata/pull/10241) ([ktsaou](https://github.com/ktsaou))
+- Fix streaming buffer size [\#10240](https://github.com/netdata/netdata/pull/10240) ([vlvkobal](https://github.com/vlvkobal))
+- dashboard v2.9.2 [\#10239](https://github.com/netdata/netdata/pull/10239) ([jacekkolasa](https://github.com/jacekkolasa))
+- database: avoid endless loop when cleaning obsolete charts [\#10236](https://github.com/netdata/netdata/pull/10236) ([hexchain](https://github.com/hexchain))
+- Update ansible.md [\#10232](https://github.com/netdata/netdata/pull/10232) ([voriol](https://github.com/voriol))
+- Disable chart obsoletion code for archived chart creation. [\#10231](https://github.com/netdata/netdata/pull/10231) ([mfundul](https://github.com/mfundul))
+- add `nvidia\_smi` collector data to the dashboard\_info.js [\#10230](https://github.com/netdata/netdata/pull/10230) ([ilyam8](https://github.com/ilyam8))
+- health: convert `elasticsearch\_last\_collected` alarm to template [\#10226](https://github.com/netdata/netdata/pull/10226) ([ilyam8](https://github.com/ilyam8))
+- streaming: fix a typo in the README.md [\#10225](https://github.com/netdata/netdata/pull/10225) ([ilyam8](https://github.com/ilyam8))
+- collectors/xenstat.plugin: recieved =\> received [\#10224](https://github.com/netdata/netdata/pull/10224) ([ilyam8](https://github.com/ilyam8))
+- dashboard\_info.js: fix a typo \(vernemq\) [\#10223](https://github.com/netdata/netdata/pull/10223) ([ilyam8](https://github.com/ilyam8))
+- Fix chart filtering [\#10218](https://github.com/netdata/netdata/pull/10218) ([vlvkobal](https://github.com/vlvkobal))
+- Don't stop Prometheus remote write collector when data is not available for dimension formatting [\#10217](https://github.com/netdata/netdata/pull/10217) ([vlvkobal](https://github.com/vlvkobal))
+- Fix coverity issues [\#10216](https://github.com/netdata/netdata/pull/10216) ([vlvkobal](https://github.com/vlvkobal))
+- Fixed bug in auto-updater for FreeBSD \(\#10198\) [\#10204](https://github.com/netdata/netdata/pull/10204) ([abrbon](https://github.com/abrbon))
+- New ebpf release [\#10202](https://github.com/netdata/netdata/pull/10202) ([thiagoftsm](https://github.com/thiagoftsm))
+- Add guide: Deploy Netdata with Ansible [\#10199](https://github.com/netdata/netdata/pull/10199) ([joelhans](https://github.com/joelhans))
+- Add allocated space metrics to oracledb charts [\#10197](https://github.com/netdata/netdata/pull/10197) ([jurgenhaas](https://github.com/jurgenhaas))
+- File descr alarm v01 [\#10192](https://github.com/netdata/netdata/pull/10192) ([Ancairon](https://github.com/Ancairon))
+- Update CoC and widen scope to community [\#10186](https://github.com/netdata/netdata/pull/10186) ([OdysLam](https://github.com/OdysLam))
+- Migrate metadata log to SQLite [\#10139](https://github.com/netdata/netdata/pull/10139) ([stelfrag](https://github.com/stelfrag))
+- Kubernetes labels [\#10107](https://github.com/netdata/netdata/pull/10107) ([ilyam8](https://github.com/ilyam8))
+- Remove Docker example from update docs and add section to claim troubleshooting [\#10103](https://github.com/netdata/netdata/pull/10103) ([joelhans](https://github.com/joelhans))
+- Anomalies collector [\#10060](https://github.com/netdata/netdata/pull/10060) ([andrewm4894](https://github.com/andrewm4894))
+- Alarms collector [\#10042](https://github.com/netdata/netdata/pull/10042) ([andrewm4894](https://github.com/andrewm4894))
+- ACLK allow child query [\#10030](https://github.com/netdata/netdata/pull/10030) ([underhood](https://github.com/underhood))
+
+## [v1.26.0](https://github.com/netdata/netdata/tree/v1.26.0) (2020-10-14)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/before_rebase...v1.26.0)
+
+**Merged pull requests:**
+
+- Fix systemd comment syntax [\#10066](https://github.com/netdata/netdata/pull/10066) ([HolgerHees](https://github.com/HolgerHees))
+- health/portcheck: add `failed` dim to the `connection\_fails` alarm [\#10048](https://github.com/netdata/netdata/pull/10048) ([ilyam8](https://github.com/ilyam8))
+- installer: update go.d.plugin version to v0.23.0 [\#10046](https://github.com/netdata/netdata/pull/10046) ([ilyam8](https://github.com/ilyam8))
+- Rename NETDATA\_PORT to NETDATA\_LISTENER\_PORT [\#10045](https://github.com/netdata/netdata/pull/10045) ([knatsakis](https://github.com/knatsakis))
+- small docs update - adding note about using `nolock` when debugging [\#10036](https://github.com/netdata/netdata/pull/10036) ([andrewm4894](https://github.com/andrewm4894))
+- Fixed the data endpoint to prioritize chart over context if both are present [\#10032](https://github.com/netdata/netdata/pull/10032) ([stelfrag](https://github.com/stelfrag))
+- python.d/rabbitmq: Add chart for churn rates [\#10031](https://github.com/netdata/netdata/pull/10031) ([chadknutson](https://github.com/chadknutson))
+- Fixed gauges for go web\_log module [\#10029](https://github.com/netdata/netdata/pull/10029) ([hamedbrd](https://github.com/hamedbrd))
+- Fixed incorrect condition in updater type detection. [\#10028](https://github.com/netdata/netdata/pull/10028) ([Ferroin](https://github.com/Ferroin))
+- Fix README exporting link [\#10020](https://github.com/netdata/netdata/pull/10020) ([Dim-P](https://github.com/Dim-P))
+- Clean up and better cross-link new docsv2 documents [\#10015](https://github.com/netdata/netdata/pull/10015) ([joelhans](https://github.com/joelhans))
+- collector infiniband: fix file descriptor leak [\#10013](https://github.com/netdata/netdata/pull/10013) ([Saruspete](https://github.com/Saruspete))
+
+## [before_rebase](https://github.com/netdata/netdata/tree/before_rebase) (2020-09-24)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.25.0...before_rebase)
+
+## [v1.25.0](https://github.com/netdata/netdata/tree/v1.25.0) (2020-09-15)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/poc2...v1.25.0)
+
+## [poc2](https://github.com/netdata/netdata/tree/poc2) (2020-08-25)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.24.0...poc2)
+
+## [v1.24.0](https://github.com/netdata/netdata/tree/v1.24.0) (2020-08-10)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.23.2...v1.24.0)
+
+## [v1.23.2](https://github.com/netdata/netdata/tree/v1.23.2) (2020-07-16)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.23.1...v1.23.2)
+
+## [v1.23.1](https://github.com/netdata/netdata/tree/v1.23.1) (2020-07-01)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.23.0...v1.23.1)
+
+## [v1.23.0](https://github.com/netdata/netdata/tree/v1.23.0) (2020-06-25)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.22.1...v1.23.0)
+
+## [v1.22.1](https://github.com/netdata/netdata/tree/v1.22.1) (2020-05-12)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.22.0...v1.22.1)
+
+## [v1.22.0](https://github.com/netdata/netdata/tree/v1.22.0) (2020-05-11)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.21.1...v1.22.0)
+
+## [v1.21.1](https://github.com/netdata/netdata/tree/v1.21.1) (2020-04-13)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.21.0...v1.21.1)
+
+## [v1.21.0](https://github.com/netdata/netdata/tree/v1.21.0) (2020-04-06)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.20.0...v1.21.0)
+
+## [v1.20.0](https://github.com/netdata/netdata/tree/v1.20.0) (2020-02-21)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.19.0...v1.20.0)
+
+## [v1.19.0](https://github.com/netdata/netdata/tree/v1.19.0) (2019-11-27)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.18.1...v1.19.0)
+
+## [v1.18.1](https://github.com/netdata/netdata/tree/v1.18.1) (2019-10-18)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.18.0...v1.18.1)
+
+## [v1.18.0](https://github.com/netdata/netdata/tree/v1.18.0) (2019-10-10)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.17.1...v1.18.0)
+
+## [v1.17.1](https://github.com/netdata/netdata/tree/v1.17.1) (2019-09-12)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.17.0...v1.17.1)
+
+## [v1.17.0](https://github.com/netdata/netdata/tree/v1.17.0) (2019-09-03)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/issue_4934...v1.17.0)
+
+## [issue_4934](https://github.com/netdata/netdata/tree/issue_4934) (2019-08-03)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.16.1...issue_4934)
+
+## [v1.16.1](https://github.com/netdata/netdata/tree/v1.16.1) (2019-07-31)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.16.0...v1.16.1)
+
+## [v1.16.0](https://github.com/netdata/netdata/tree/v1.16.0) (2019-07-08)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.15.0...v1.16.0)
+
+## [v1.15.0](https://github.com/netdata/netdata/tree/v1.15.0) (2019-05-22)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.14.0...v1.15.0)
+
+## [v1.14.0](https://github.com/netdata/netdata/tree/v1.14.0) (2019-04-18)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.14.0-rc0...v1.14.0)
+
+## [v1.14.0-rc0](https://github.com/netdata/netdata/tree/v1.14.0-rc0) (2019-03-30)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.13.0...v1.14.0-rc0)
+
+## [v1.13.0](https://github.com/netdata/netdata/tree/v1.13.0) (2019-03-14)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.2...v1.13.0)
+
+## [v1.12.2](https://github.com/netdata/netdata/tree/v1.12.2) (2019-02-28)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.1...v1.12.2)
+
+## [v1.12.1](https://github.com/netdata/netdata/tree/v1.12.1) (2019-02-21)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.0...v1.12.1)
+
+## [v1.12.0](https://github.com/netdata/netdata/tree/v1.12.0) (2019-02-06)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.0-rc3...v1.12.0)
+
+## [v1.12.0-rc3](https://github.com/netdata/netdata/tree/v1.12.0-rc3) (2019-01-17)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.0-rc2...v1.12.0-rc3)
+
+## [v1.12.0-rc2](https://github.com/netdata/netdata/tree/v1.12.0-rc2) (2019-01-03)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.0-rc1...v1.12.0-rc2)
+
+## [v1.12.0-rc1](https://github.com/netdata/netdata/tree/v1.12.0-rc1) (2018-12-19)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.12.0-rc0...v1.12.0-rc1)
+
+## [v1.12.0-rc0](https://github.com/netdata/netdata/tree/v1.12.0-rc0) (2018-12-06)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.11.1...v1.12.0-rc0)
+
+## [v1.11.1](https://github.com/netdata/netdata/tree/v1.11.1) (2018-11-22)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.11.0...v1.11.1)
+
+## [v1.11.0](https://github.com/netdata/netdata/tree/v1.11.0) (2018-11-02)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.10.0...v1.11.0)
+
+
+
+\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..415c673
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,1364 @@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+# This file is only used for development (netdata in Clion)
+# It can build netdata, but you are on your own...
+
+cmake_minimum_required(VERSION 3.1.0)
+project(netdata C CXX)
+
+find_package(Threads REQUIRED)
+find_package(PkgConfig REQUIRED)
+
+# default is "Debug"
+#set(CMAKE_BUILD_TYPE "Release")
+
+# set this to see the compilation commands
+# set(CMAKE_VERBOSE_MAKEFILE 1)
+
+
+# -----------------------------------------------------------------------------
+# Set compilation options according to build type
+
+IF("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+ message(STATUS "building for: debugging")
+
+ ## unfortunately these produce errors
+ #include(CheckCXXCompilerFlag)
+ #CHECK_CXX_COMPILER_FLAG("-Wformat-signedness" CXX_FORMAT_SIGNEDNESS)
+ #CHECK_CXX_COMPILER_FLAG("-Werror=format-security" CXX_FORMAT_SECURITY)
+ #CHECK_CXX_COMPILER_FLAG("-fstack-protector-all" CXX_STACK_PROTECTOR)
+ set(CXX_FORMAT_SIGNEDNESS "-Wformat-signedness")
+ set(CXX_FORMAT_SECURITY "-Werror=format-security")
+ set(CXX_STACK_PROTECTOR "-fstack-protector-all")
+ set(CXX_FLAGS_DEBUG "-O0")
+ set(CMAKE_C_STANDARD 99)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -ggdb -Wall -Wextra -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1 ${CXX_FORMAT_SIGNEDNESS} ${CXX_FORMAT_SECURITY} ${CXX_STACK_PROTECTOR} ${CXX_FLAGS_DEBUG}")
+ELSE()
+ message(STATUS "building for: release")
+ cmake_policy(SET CMP0069 "NEW")
+ include(CheckIPOSupported)
+ check_ipo_supported(RESULT ipo_supported OUTPUT error)
+ IF(${ipo_supported})
+ message(STATUS "link time optimization: supported")
+ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
+ ELSE()
+ message(STATUS "link time optimization: not supported")
+ ENDIF()
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# O/S Detection
+
+# these are defined in common.h too
+SET(LINUX False)
+SET(FREEBSD False)
+SET(MACOS False)
+
+# Detect the operating system
+IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+ SET(TARGET_OS_NAME "macos")
+ SET(TARGET_OS 3)
+ SET(MACOS True)
+ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
+ SET(TARGET_OS_NAME "freebsd")
+ SET(TARGET_OS 2)
+ SET(FREEBSD True)
+ELSE()
+ SET(TARGET_OS_NAME "linux")
+ SET(TARGET_OS 1)
+ SET(LINUX True)
+ENDIF()
+
+# show the operating system on the console
+message(STATUS "operating system: ${TARGET_OS_NAME} (TARGET_OS=${TARGET_OS})")
+
+
+# -----------------------------------------------------------------------------
+# Detect libuuid
+
+pkg_check_modules(UUID REQUIRED uuid)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${UUID_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${UUID_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${UUID_INCLUDE_DIRS})
+
+# -----------------------------------------------------------------------------
+# Detect libz
+
+pkg_check_modules(ZLIB REQUIRED zlib)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${ZLIB_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${ZLIB_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS})
+
+# -----------------------------------------------------------------------------
+# libuv multi-platform support library with a focus on asynchronous I/O
+
+pkg_check_modules(LIBUV REQUIRED libuv)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${LIBUV_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${LIBUV_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${LIBUV_INCLUDE_DIRS})
+
+# -----------------------------------------------------------------------------
+# lz4 Extremely Fast Compression algorithm
+
+pkg_check_modules(LIBLZ4 REQUIRED liblz4)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${LIBLZ4_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${LIBLZ4_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${LIBLZ4_INCLUDE_DIRS})
+
+# -----------------------------------------------------------------------------
+# Judy General purpose dynamic array
+
+# pkgconfig not working in Ubuntu, why? upstream package broken?
+#pkg_check_modules(JUDY REQUIRED Judy)
+#set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${JUDY_CFLAGS_OTHER})
+#set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${JUDY_LIBRARIES})
+#set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${JUDY_INCLUDE_DIRS})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} "-lJudy")
+set(CMAKE_REQUIRED_LIBRARIES "Judy")
+include(CheckSymbolExists)
+check_symbol_exists("JudyLLast" "Judy.h" HAVE_JUDY)
+IF(HAVE_JUDY)
+ message(STATUS "Judy library found")
+ELSE()
+ message( FATAL_ERROR "libJudy required but not found. Try installing 'libjudy-dev' or 'Judy-devel'." )
+ENDIF()
+
+# -----------------------------------------------------------------------------
+# OpenSSL Cryptography and SSL/TLS Toolkit
+
+pkg_check_modules(OPENSSL REQUIRED openssl)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${OPENSSL_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${OPENSSL_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${OPENSSL_INCLUDE_DIRS})
+
+# -----------------------------------------------------------------------------
+# JSON-C used to health
+
+pkg_check_modules(JSON REQUIRED json-c)
+set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${JSONC_CFLAGS_OTHER})
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${JSON_LIBRARIES})
+set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${JSON_INCLUDE_DIRS})
+
+
+# -----------------------------------------------------------------------------
+# Detect libcap
+
+IF(LINUX)
+ pkg_check_modules(CAP QUIET libcap)
+ # later we use:
+ # ${CAP_LIBRARIES}
+ # ${CAP_CFLAGS_OTHER}
+ # ${CAP_INCLUDE_DIRS}
+ENDIF(LINUX)
+
+
+# -----------------------------------------------------------------------------
+# Detect libipmimonitoring
+
+IF(LINUX)
+ pkg_check_modules(IPMI libipmimonitoring)
+ # later we use:
+ # ${IPMI_LIBRARIES}
+ # ${IPMI_CFLAGS_OTHER}
+ # ${IPMI_INCLUDE_DIRS}
+ENDIF(LINUX)
+
+
+# -----------------------------------------------------------------------------
+# Detect libmnl
+IF(LINUX)
+ pkg_check_modules(MNL libmnl)
+ # later we use:
+ # ${MNL_LIBRARIES}
+ # ${MNL_CFLAGS_OTHER}
+ # ${MNL_INCLUDE_DIRS}
+ENDIF(LINUX)
+
+
+# -----------------------------------------------------------------------------
+# Detect libmnl
+
+pkg_check_modules(NFACCT libnetfilter_acct)
+# later we use:
+# ${NFACCT_LIBRARIES}
+# ${NFACCT_CFLAGS_OTHER}
+# ${NFACCT_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libxenstat
+
+pkg_check_modules(XENSTAT libxenstat)
+# later we use:
+# ${XENSTAT_LIBRARIES}
+# ${XENSTAT_CFLAGS_OTHER}
+# ${XENSTAT_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect MacOS IOKit/Foundation framework
+
+IF(MACOS)
+ find_library(IOKIT IOKit)
+ find_library(FOUNDATION Foundation)
+ # later we use:
+ # ${FOUNDATION}
+ # ${IOKIT}
+ENDIF(MACOS)
+
+
+# -----------------------------------------------------------------------------
+# Detect libcrypto
+
+pkg_check_modules(CRYPTO libcrypto)
+# later we use:
+# ${CRYPTO_LIBRARIES}
+# ${CRYPTO_CFLAGS_OTHER}
+# ${CRYPTO_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libssl
+
+pkg_check_modules(SSL libssl)
+# later we use:
+# ${SSL_LIBRARIES}
+# ${SSL_CFLAGS_OTHER}
+# ${SSL_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libcurl
+
+pkg_check_modules(CURL libcurl)
+# later we use:
+# ${CURL_LIBRARIES}
+# ${CURL_CFLAGS_OTHER}
+# ${CURL_INCLUDE_DIRS}
+
+# -----------------------------------------------------------------------------
+# Detect libaws-c-common
+
+find_library(HAVE_AWS_CHECKSUMS aws-checksums)
+# later we use:
+# ${HAVE_AWS_CHECKSUMS}
+
+# -----------------------------------------------------------------------------
+# Detect libaws-c-common
+
+find_library(HAVE_AWS_COMMON aws-c-common)
+# later we use:
+# ${HAVE_AWS_COMMON}
+
+# -----------------------------------------------------------------------------
+# Detect libaws-c-common
+
+find_library(HAVE_AWS_EVENT_STREAM aws-c-event-stream)
+# later we use:
+# ${HAVE_AWS_EVENT_STREAM}
+
+# -----------------------------------------------------------------------------
+# Detect libaws-cpp-sdk-core
+
+pkg_check_modules(AWS_CORE aws-cpp-sdk-core)
+# later we use:
+# ${AWS_CORE_LIBRARIES}
+# ${AWS_CORE_CFLAGS_OTHER}
+# ${AWS_CORE_INCLUDE_DIRS}
+
+# -----------------------------------------------------------------------------
+# Detect libaws-cpp-sdk-kinesis
+
+pkg_check_modules(KINESIS aws-cpp-sdk-kinesis)
+# later we use:
+# ${KINESIS_LIBRARIES}
+# ${KINESIS_CFLAGS_OTHER}
+# ${KINESIS_INCLUDE_DIRS}
+
+# -----------------------------------------------------------------------------
+# Detect libgrpc
+
+pkg_check_modules(GRPC grpc)
+# later we use:
+# ${GRPC_LIBRARIES}
+# ${GRPC_CFLAGS_OTHER}
+# ${GRPC_INCLUDE_DIRS}
+
+# -----------------------------------------------------------------------------
+# Detect libgoogleapis_cpp_pubsub_protos
+
+pkg_check_modules(PUBSUB googleapis_cpp_pubsub_protos)
+# later we use:
+# ${PUBSUB_LIBRARIES}
+# ${PUBSUB_CFLAGS_OTHER}
+# ${PUBSUB_INCLUDE_DIRS}
+
+# -----------------------------------------------------------------------------
+# Detect libprotobuf
+
+pkg_check_modules(PROTOBUF protobuf>=3)
+# later we use:
+# ${PROTOBUF_LIBRARIES}
+# ${PROTOBUF_CFLAGS_OTHER}
+# ${PROTOBUF_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libsnappy
+
+pkg_check_modules(SNAPPY snappy)
+# later we use:
+# ${SNAPPY_LIBRARIES}
+# ${SNAPPY_CFLAGS_OTHER}
+# ${SNAPPY_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libmongoc
+
+find_package(libmongoc-1.0)
+# later we use:
+# ${MONGOC_LIBRARIES}
+# ${MONGOC_INCLUDE_DIRS}
+
+
+# -----------------------------------------------------------------------------
+# Detect libbpf
+IF(LINUX AND EXISTS "${CMAKE_SOURCE_DIR}/externaldeps/libbpf/libbpf.a")
+ message(STATUS "libbpf library found")
+ pkg_check_modules(ELF REQUIRED libelf)
+ # later we use:
+ # ${ELF_LIBRARIES}
+ # ${ELF_CFLAGS_OTHER}
+ # ${ELF_INCLUDE_DIRS}
+ IF(ELF_LIBRARIES)
+ list(APPEND NETDATA_COMMON_CFLAGS ${ELF_CFLAGS_OTHER})
+ list(INSERT NETDATA_COMMON_LIBRARIES 0
+ ${CMAKE_SOURCE_DIR}/externaldeps/libbpf/libbpf.a
+ ${ELF_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${ELF_INCLUDE_DIRS})
+ include_directories(BEFORE ${CMAKE_SOURCE_DIR}/externaldeps/libbpf/include)
+ set(ENABLE_PLUGIN_EBPF True)
+ ELSE(ELF_LIBRARIES)
+ set(ENABLE_PLUGIN_EBPF False)
+ message(STATUS "ebpf plugin: disabled (requires libelf)")
+ ENDIF(ELF_LIBRARIES)
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# netdata files
+
+set(LIBNETDATA_FILES
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
+ libnetdata/config/appconfig.c
+ libnetdata/config/appconfig.h
+ libnetdata/avl/avl.c
+ libnetdata/avl/avl.h
+ libnetdata/buffer/buffer.c
+ libnetdata/buffer/buffer.h
+ libnetdata/clocks/clocks.c
+ libnetdata/clocks/clocks.h
+ libnetdata/dictionary/dictionary.c
+ libnetdata/dictionary/dictionary.h
+ libnetdata/eval/eval.c
+ libnetdata/eval/eval.h
+ libnetdata/inlined.h
+ libnetdata/libnetdata.c
+ libnetdata/libnetdata.h
+ libnetdata/required_dummies.h
+ libnetdata/locks/locks.c
+ libnetdata/locks/locks.h
+ libnetdata/log/log.c
+ libnetdata/log/log.h
+ libnetdata/os.c
+ libnetdata/os.h
+ libnetdata/popen/popen.c
+ libnetdata/popen/popen.h
+ libnetdata/procfile/procfile.c
+ libnetdata/procfile/procfile.h
+ libnetdata/simple_pattern/simple_pattern.c
+ libnetdata/simple_pattern/simple_pattern.h
+ libnetdata/socket/socket.c
+ libnetdata/socket/socket.h
+ libnetdata/statistical/statistical.c
+ libnetdata/statistical/statistical.h
+ libnetdata/storage_number/storage_number.c
+ libnetdata/storage_number/storage_number.h
+ libnetdata/threads/threads.c
+ libnetdata/threads/threads.h
+ libnetdata/url/url.c
+ libnetdata/url/url.h
+ libnetdata/json/json.c
+ libnetdata/json/json.h
+ libnetdata/json/jsmn.c
+ libnetdata/json/jsmn.h
+ libnetdata/health/health.c
+ libnetdata/health/health.h
+ libnetdata/string/utf8.h
+ libnetdata/socket/security.c
+ libnetdata/socket/security.h
+ libnetdata/circular_buffer/circular_buffer.c
+ libnetdata/circular_buffer/circular_buffer.h)
+
+IF(ENABLE_PLUGIN_EBPF)
+ list(APPEND LIBNETDATA_FILES
+ libnetdata/ebpf/ebpf.c
+ libnetdata/ebpf/ebpf.h)
+ENDIF()
+
+add_library(libnetdata OBJECT ${LIBNETDATA_FILES})
+
+set(APPS_PLUGIN_FILES
+ collectors/apps.plugin/apps_plugin.c)
+
+set(CHECKS_PLUGIN_FILES
+ collectors/checks.plugin/plugin_checks.c
+ collectors/checks.plugin/plugin_checks.h
+ )
+
+set(FREEBSD_PLUGIN_FILES
+ collectors/freebsd.plugin/plugin_freebsd.c
+ collectors/freebsd.plugin/plugin_freebsd.h
+ collectors/freebsd.plugin/freebsd_sysctl.c
+ collectors/freebsd.plugin/freebsd_getmntinfo.c
+ collectors/freebsd.plugin/freebsd_getifaddrs.c
+ collectors/freebsd.plugin/freebsd_devstat.c
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c
+ collectors/freebsd.plugin/freebsd_ipfw.c
+ collectors/proc.plugin/zfs_common.c
+ collectors/proc.plugin/zfs_common.h
+ )
+
+set(HEALTH_PLUGIN_FILES
+ health/health.c
+ health/health.h
+ health/health_config.c
+ health/health_json.c
+ health/health_log.c)
+
+set(IDLEJITTER_PLUGIN_FILES
+ collectors/idlejitter.plugin/plugin_idlejitter.c
+ collectors/idlejitter.plugin/plugin_idlejitter.h
+ )
+
+set(CGROUPS_PLUGIN_FILES
+ collectors/cgroups.plugin/sys_fs_cgroup.c
+ collectors/cgroups.plugin/sys_fs_cgroup.h
+ )
+
+set(CGROUP_NETWORK_FILES
+ collectors/cgroups.plugin/cgroup-network.c
+ )
+
+set(DISKSPACE_PLUGIN_FILES
+ collectors/diskspace.plugin/plugin_diskspace.h
+ collectors/diskspace.plugin/plugin_diskspace.c
+ )
+
+set(FREEIPMI_PLUGIN_FILES
+ collectors/freeipmi.plugin/freeipmi_plugin.c
+ )
+
+set(NFACCT_PLUGIN_FILES
+ collectors/nfacct.plugin/plugin_nfacct.c
+ )
+
+set(XENSTAT_PLUGIN_FILES
+ collectors/xenstat.plugin/xenstat_plugin.c
+ )
+
+set(PERF_PLUGIN_FILES
+ collectors/perf.plugin/perf_plugin.c
+ )
+
+set(SLABINFO_PLUGIN_FILES
+ collectors/slabinfo.plugin/slabinfo.c
+ )
+
+set(EBPF_PROCESS_PLUGIN_FILES
+ collectors/ebpf.plugin/ebpf.c
+ collectors/ebpf.plugin/ebpf.h
+ collectors/ebpf.plugin/ebpf_process.c
+ collectors/ebpf.plugin/ebpf_process.h
+ collectors/ebpf.plugin/ebpf_socket.c
+ collectors/ebpf.plugin/ebpf_socket.h
+ collectors/ebpf.plugin/ebpf_apps.c
+ collectors/ebpf.plugin/ebpf_apps.h
+ )
+
+set(PROC_PLUGIN_FILES
+ collectors/proc.plugin/ipc.c
+ collectors/proc.plugin/plugin_proc.c
+ collectors/proc.plugin/plugin_proc.h
+ collectors/proc.plugin/proc_diskstats.c
+ collectors/proc.plugin/proc_mdstat.c
+ collectors/proc.plugin/proc_interrupts.c
+ collectors/proc.plugin/proc_softirqs.c
+ collectors/proc.plugin/proc_loadavg.c
+ collectors/proc.plugin/proc_meminfo.c
+ collectors/proc.plugin/proc_pagetypeinfo.c
+ collectors/proc.plugin/proc_net_dev.c
+ collectors/proc.plugin/proc_net_wireless.c
+ collectors/proc.plugin/proc_net_ip_vs_stats.c
+ collectors/proc.plugin/proc_net_netstat.c
+ collectors/proc.plugin/proc_net_rpc_nfs.c
+ collectors/proc.plugin/proc_net_rpc_nfsd.c
+ collectors/proc.plugin/proc_net_snmp.c
+ collectors/proc.plugin/proc_net_snmp6.c
+ collectors/proc.plugin/proc_net_sctp_snmp.c
+ collectors/proc.plugin/proc_net_sockstat.c
+ collectors/proc.plugin/proc_net_sockstat6.c
+ collectors/proc.plugin/proc_net_softnet_stat.c
+ collectors/proc.plugin/proc_net_stat_conntrack.c
+ collectors/proc.plugin/proc_net_stat_synproxy.c
+ collectors/proc.plugin/proc_self_mountinfo.c
+ collectors/proc.plugin/proc_self_mountinfo.h
+ collectors/proc.plugin/zfs_common.c
+ collectors/proc.plugin/zfs_common.h
+ collectors/proc.plugin/proc_spl_kstat_zfs.c
+ collectors/proc.plugin/proc_stat.c
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
+ collectors/proc.plugin/proc_vmstat.c
+ collectors/proc.plugin/proc_uptime.c
+ collectors/proc.plugin/proc_pressure.c
+ collectors/proc.plugin/proc_pressure.h
+ collectors/proc.plugin/sys_kernel_mm_ksm.c
+ collectors/proc.plugin/sys_block_zram.c
+ collectors/proc.plugin/sys_devices_system_edac_mc.c
+ collectors/proc.plugin/sys_devices_system_node.c
+ collectors/proc.plugin/sys_class_infiniband.c
+ collectors/proc.plugin/sys_fs_btrfs.c
+ collectors/proc.plugin/sys_class_power_supply.c
+ )
+
+set(TC_PLUGIN_FILES
+ collectors/tc.plugin/plugin_tc.c
+ collectors/tc.plugin/plugin_tc.h
+ )
+
+set(MACOS_PLUGIN_FILES
+ collectors/macos.plugin/plugin_macos.c
+ collectors/macos.plugin/plugin_macos.h
+ collectors/macos.plugin/macos_sysctl.c
+ collectors/macos.plugin/macos_mach_smi.c
+ collectors/macos.plugin/macos_fw.c
+ )
+
+set(PLUGINSD_PLUGIN_FILES
+ collectors/plugins.d/plugins_d.c
+ collectors/plugins.d/plugins_d.h
+ collectors/plugins.d/pluginsd_parser.c
+ collectors/plugins.d/pluginsd_parser.h
+ )
+
+set(PARSER_PLUGIN_FILES
+ parser/parser.c
+ parser/parser.h
+ )
+
+set(REGISTRY_PLUGIN_FILES
+ registry/registry.c
+ registry/registry.h
+ registry/registry_db.c
+ registry/registry_init.c
+ registry/registry_internals.c
+ registry/registry_internals.h
+ registry/registry_log.c
+ registry/registry_machine.c
+ registry/registry_machine.h
+ registry/registry_person.c
+ registry/registry_person.h
+ registry/registry_url.c
+ registry/registry_url.h
+ )
+
+set(STATSD_PLUGIN_FILES
+ collectors/statsd.plugin/statsd.c
+ collectors/statsd.plugin/statsd.h
+ )
+
+set(RRD_PLUGIN_FILES
+ database/rrdcalc.c
+ database/rrdcalc.h
+ database/rrdcalctemplate.c
+ database/rrdcalctemplate.h
+ database/rrddim.c
+ database/rrddimvar.c
+ database/rrddimvar.h
+ database/rrdfamily.c
+ database/rrdhost.c
+ database/rrdlabels.c
+ database/rrd.c
+ database/rrd.h
+ database/rrdset.c
+ database/rrdsetvar.c
+ database/rrdsetvar.h
+ database/rrdvar.c
+ database/rrdvar.h
+ database/sqlite/sqlite_functions.c
+ database/sqlite/sqlite_functions.h
+ database/sqlite/sqlite3.c
+ database/sqlite/sqlite3.h
+ database/engine/rrdengine.c
+ database/engine/rrdengine.h
+ database/engine/rrddiskprotocol.h
+ database/engine/datafile.c
+ database/engine/datafile.h
+ database/engine/journalfile.c
+ database/engine/journalfile.h
+ database/engine/rrdenginelib.c
+ database/engine/rrdenginelib.h
+ database/engine/rrdengineapi.c
+ database/engine/rrdengineapi.h
+ database/engine/pagecache.c
+ database/engine/pagecache.h
+ database/engine/rrdenglocking.c
+ database/engine/rrdenglocking.h
+ database/engine/metadata_log/metadatalog.h
+ database/engine/metadata_log/metadatalogapi.c
+ database/engine/metadata_log/metadatalogapi.h
+ database/engine/metadata_log/logfile.h
+ database/engine/metadata_log/logfile.c
+ database/engine/metadata_log/metadatalogprotocol.h
+ database/engine/metadata_log/metalogpluginsd.c
+ database/engine/metadata_log/metalogpluginsd.h
+ database/engine/metadata_log/compaction.c
+ database/engine/metadata_log/compaction.h
+ )
+
+set(WEB_PLUGIN_FILES
+ web/server/web_client.c
+ web/server/web_client.h
+ web/server/web_server.c
+ web/server/web_server.h
+ web/server/static/static-threaded.c
+ web/server/static/static-threaded.h
+ web/server/web_client_cache.c
+ web/server/web_client_cache.h
+ )
+
+set(API_PLUGIN_FILES
+ web/api/web_api_v1.c
+ web/api/web_api_v1.h
+ web/api/badges/web_buffer_svg.c
+ web/api/badges/web_buffer_svg.h
+ web/api/exporters/allmetrics.c
+ web/api/exporters/allmetrics.h
+ web/api/exporters/shell/allmetrics_shell.c
+ web/api/exporters/shell/allmetrics_shell.h
+ web/api/queries/rrdr.c
+ web/api/queries/rrdr.h
+ web/api/queries/query.c
+ web/api/queries/query.h
+ web/api/queries/average/average.c
+ web/api/queries/average/average.h
+ web/api/queries/incremental_sum/incremental_sum.c
+ web/api/queries/incremental_sum/incremental_sum.h
+ web/api/queries/max/max.c
+ web/api/queries/max/max.h
+ web/api/queries/min/min.c
+ web/api/queries/min/min.h
+ web/api/queries/sum/sum.c
+ web/api/queries/sum/sum.h
+ web/api/queries/median/median.c
+ web/api/queries/median/median.h
+ web/api/queries/stddev/stddev.c
+ web/api/queries/stddev/stddev.h
+ web/api/queries/ses/ses.c
+ web/api/queries/ses/ses.h
+ web/api/queries/des/des.c
+ web/api/queries/des/des.h
+ web/api/formatters/rrd2json.c
+ web/api/formatters/rrd2json.h
+ web/api/formatters/csv/csv.c
+ web/api/formatters/csv/csv.h
+ web/api/formatters/json/json.c
+ web/api/formatters/json/json.h
+ web/api/formatters/ssv/ssv.c
+ web/api/formatters/ssv/ssv.h
+ web/api/formatters/value/value.c
+ web/api/formatters/value/value.h
+ web/api/formatters/json_wrapper.c
+ web/api/formatters/json_wrapper.h
+ web/api/formatters/charts2json.c
+ web/api/formatters/charts2json.h
+ web/api/formatters/rrdset2json.c
+ web/api/formatters/rrdset2json.h
+ web/api/health/health_cmdapi.c
+ )
+
+set(STREAMING_PLUGIN_FILES
+ streaming/rrdpush.c
+ streaming/rrdpush.h
+ streaming/receiver.c
+ streaming/sender.c
+ )
+
+set(BACKENDS_PLUGIN_FILES
+ backends/backends.c
+ backends/backends.h
+ backends/graphite/graphite.c
+ backends/graphite/graphite.h
+ backends/json/json.c
+ backends/json/json.h
+ backends/opentsdb/opentsdb.c
+ backends/opentsdb/opentsdb.h
+ backends/prometheus/backend_prometheus.c
+ backends/prometheus/backend_prometheus.h
+ )
+
+set(CLAIM_PLUGIN_FILES
+ claim/claim.c
+ claim/claim.h
+ aclk/legacy/aclk_rrdhost_state.h
+ aclk/legacy/aclk_common.c
+ aclk/legacy/aclk_common.h
+ )
+
+set(ACLK_PLUGIN_FILES
+ aclk/legacy/agent_cloud_link.c
+ aclk/legacy/agent_cloud_link.h
+ aclk/legacy/aclk_query.c
+ aclk/legacy/aclk_query.h
+ aclk/legacy/aclk_lws_wss_client.c
+ aclk/legacy/aclk_lws_wss_client.h
+ aclk/legacy/aclk_lws_https_client.c
+ aclk/legacy/aclk_lws_https_client.h
+ aclk/legacy/mqtt.c
+ aclk/legacy/mqtt.h
+ aclk/legacy/aclk_stats.c
+ aclk/legacy/aclk_stats.h
+ aclk/legacy/aclk_rx_msgs.c
+ aclk/legacy/aclk_rx_msgs.h
+ )
+
+set(SPAWN_PLUGIN_FILES
+ spawn/spawn.c
+ spawn/spawn_server.c
+ spawn/spawn_client.c
+ spawn/spawn.h
+ )
+
+set(ACLK_STATIC_LIBS
+ ${CMAKE_SOURCE_DIR}/externaldeps/mosquitto/libmosquitto.a
+ ${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/libwebsockets.a
+ )
+
+set(EXPORTING_ENGINE_FILES
+ exporting/exporting_engine.c
+ exporting/exporting_engine.h
+ exporting/graphite/graphite.c
+ exporting/graphite/graphite.h
+ exporting/json/json.c
+ exporting/json/json.h
+ exporting/opentsdb/opentsdb.c
+ exporting/opentsdb/opentsdb.h
+ exporting/prometheus/prometheus.c
+ exporting/prometheus/prometheus.h
+ exporting/read_config.c
+ exporting/clean_connectors.c
+ exporting/init_connectors.c
+ exporting/process_data.c
+ exporting/check_filters.c
+ exporting/send_data.c
+ exporting/send_internal_metrics.c
+ )
+
+set(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES
+ exporting/prometheus/remote_write/remote_write.c
+ exporting/prometheus/remote_write/remote_write.h
+ exporting/prometheus/remote_write/remote_write_request.cc
+ exporting/prometheus/remote_write/remote_write_request.h
+ )
+
+set(KINESIS_EXPORTING_FILES
+ exporting/aws_kinesis/aws_kinesis.c
+ exporting/aws_kinesis/aws_kinesis.h
+ exporting/aws_kinesis/aws_kinesis_put_record.cc
+ exporting/aws_kinesis/aws_kinesis_put_record.h
+ )
+
+set(PUBSUB_EXPORTING_FILES
+ exporting/pubsub/pubsub.c
+ exporting/pubsub/pubsub.h
+ exporting/pubsub/pubsub_publish.cc
+ exporting/pubsub/pubsub_publish.h
+ )
+
+set(MONGODB_EXPORTING_FILES
+ exporting/mongodb/mongodb.c
+ exporting/mongodb/mongodb.h
+ )
+
+set(KINESIS_BACKEND_FILES
+ backends/aws_kinesis/aws_kinesis.c
+ backends/aws_kinesis/aws_kinesis.h
+ backends/aws_kinesis/aws_kinesis_put_record.cc
+ backends/aws_kinesis/aws_kinesis_put_record.h
+ )
+
+set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES
+ backends/prometheus/remote_write/remote_write.cc
+ backends/prometheus/remote_write/remote_write.h
+ )
+
+set(MONGODB_BACKEND_FILES
+ backends/mongodb/mongodb.c
+ backends/mongodb/mongodb.h
+ )
+
+set(DAEMON_FILES
+ daemon/buildinfo.c
+ daemon/buildinfo.h
+ daemon/common.c
+ daemon/common.h
+ daemon/daemon.c
+ daemon/daemon.h
+ daemon/global_statistics.c
+ daemon/global_statistics.h
+ daemon/main.c
+ daemon/main.h
+ daemon/signals.c
+ daemon/signals.h
+ daemon/commands.c
+ daemon/commands.h
+ daemon/unit_test.c
+ daemon/unit_test.h
+ )
+
+set(NETDATA_FILES
+ collectors/all.h
+ ${DAEMON_FILES}
+ ${API_PLUGIN_FILES}
+ ${BACKENDS_PLUGIN_FILES}
+ ${EXPORTING_ENGINE_FILES}
+ ${CHECKS_PLUGIN_FILES}
+ ${HEALTH_PLUGIN_FILES}
+ ${IDLEJITTER_PLUGIN_FILES}
+ ${PLUGINSD_PLUGIN_FILES}
+ ${RRD_PLUGIN_FILES}
+ ${REGISTRY_PLUGIN_FILES}
+ ${STATSD_PLUGIN_FILES}
+ ${STREAMING_PLUGIN_FILES}
+ ${WEB_PLUGIN_FILES}
+ ${CLAIM_PLUGIN_FILES}
+ ${SPAWN_PLUGIN_FILES}
+ ${PARSER_PLUGIN_FILES}
+)
+
+set(NETDATACLI_FILES
+ daemon/commands.h
+ cli/cli.c
+ cli/cli.h
+ )
+
+include_directories(AFTER .)
+
+add_definitions(
+ -DHAVE_CONFIG_H
+ -DTARGET_OS=${TARGET_OS}
+ -DCACHE_DIR="/var/cache/netdata"
+ -DCONFIG_DIR="/etc/netdata"
+ -DLIBCONFIG_DIR="/usr/lib/netdata/conf.d"
+ -DLOG_DIR="/var/log/netdata"
+ -DPLUGINS_DIR="/usr/libexec/netdata/plugins.d"
+ -DWEB_DIR="/usr/share/netdata/web"
+ -DVARLIB_DIR="/var/lib/netdata"
+)
+
+# -----------------------------------------------------------------------------
+# kinesis backend
+
+IF(KINESIS_LIBRARIES AND AWS_CORE_LIBRARIES AND HAVE_AWS_EVENT_STREAM AND HAVE_AWS_COMMON AND HAVE_AWS_CHECKSUMS AND
+ CRYPTO_LIBRARIES AND SSL_LIBRARIES AND CURL_LIBRARIES)
+ SET(ENABLE_BACKEND_KINESIS True)
+ELSE()
+ SET(ENABLE_BACKEND_KINESIS False)
+ENDIF()
+
+IF(ENABLE_BACKEND_KINESIS)
+ message(STATUS "kinesis backend: enabled")
+ list(APPEND NETDATA_FILES ${KINESIS_BACKEND_FILES} ${KINESIS_EXPORTING_FILES})
+ list(APPEND NETDATA_COMMON_LIBRARIES ${KINESIS_LIBRARIES} ${AWS_CORE_LIBRARIES}
+ ${CRYPTO_LIBRARIES} ${SSL_LIBRARIES} ${CURL_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${KINESIS_INCLUDE_DIRS} ${AWS_CORE_INCLUDE_DIRS}
+ ${CRYPTO_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CURL_INCLUDE_DIRS})
+ list(APPEND NETDATA_COMMON_CFLAGS ${CRYPTO_CFLAGS_OTHER} ${SSL_CFLAGS_OTHER} ${CURL_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "kinesis backend: disabled (requires AWS SDK for C++)")
+ENDIF()
+
+# -----------------------------------------------------------------------------
+# Pub/Sub exporting connector
+
+IF(GRPC_LIBRARIES AND PUBSUB_LIBRARIES)
+ SET(ENABLE_EXPORTING_PUBSUB True)
+ELSE()
+ SET(ENABLE_EXPORTING_PUBSUB False)
+ENDIF()
+
+IF(ENABLE_EXPORTING_PUBSUB)
+ message(STATUS "pubsub exporting connector: enabled")
+ list(APPEND NETDATA_FILES ${PUBSUB_EXPORTING_FILES})
+ list(APPEND NETDATA_COMMON_LIBRARIES ${GRPC_LIBRARIES} ${PUBSUB_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${GRPC_INCLUDE_DIRS} ${PUBSUB_INCLUDE_DIRS})
+ list(APPEND NETDATA_COMMON_CFLAGS ${GRPC_CFLAGS_OTHER} ${PUBSUB_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "pubsub exporting connector: disabled (requires grpc and googleapis)")
+ENDIF()
+
+# -----------------------------------------------------------------------------
+# prometheus remote write backend
+
+IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
+ SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE True)
+ELSE()
+ SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE False)
+ENDIF()
+
+IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
+ message(STATUS "prometheus remote write backend: enabled")
+
+ find_package(Protobuf REQUIRED)
+ protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS exporting/prometheus/remote_write/remote_write.proto)
+
+ list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_BACKEND_FILES} ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
+ list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
+ list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)")
+ENDIF()
+
+# -----------------------------------------------------------------------------
+# mongodb backend
+
+IF(libmongoc-1.0_FOUND)
+ message(STATUS "mongodb backend: enabled")
+
+ list(APPEND NETDATA_FILES ${MONGODB_BACKEND_FILES} ${MONGODB_EXPORTING_FILES})
+ list(APPEND NETDATA_COMMON_LIBRARIES ${MONGOC_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${MONGOC_INCLUDE_DIRS})
+ELSE()
+ message(STATUS "mongodb backend: disabled (requires mongoc library)")
+ENDIF()
+
+set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT})
+
+set(ACLK_CAN_BUILD 1)
+if(NOT EXISTS "${CMAKE_SOURCE_DIR}/externaldeps/mosquitto/libmosquitto.a")
+ message(WARNING "Static build of mosquitto not found. Disabling ACLK")
+ set(ACLK_CAN_BUILD 0)
+ENDIF()
+
+if(NOT EXISTS "${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/libwebsockets.a")
+ message(WARNING "Static build of libwebsockets not found. Disabling ACLK")
+ set(ACLK_CAN_BUILD 0)
+ENDIF()
+
+IF(ACLK_CAN_BUILD)
+ message(STATUS "agent-cloud-link: enabled")
+ list(APPEND NETDATA_FILES ${ACLK_PLUGIN_FILES})
+ list(APPEND NETDATA_COMMON_LIBRARIES ${ACLK_STATIC_LIBS})
+ include_directories(BEFORE ${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/include)
+ IF(LINUX AND CAP_FOUND)
+ list(APPEND NETDATA_COMMON_LIBRARIES ${CAP_LIBRARIES})
+ list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${CAP_INCLUDE_DIRS})
+ list(APPEND NETDATA_COMMON_CFLAGS ${CAP_CFLAGS_OTHER})
+ ENDIF()
+ELSE()
+ message(STATUS "agent-cloud-link: disabled")
+ENDIF()
+
+# -----------------------------------------------------------------------------
+# netdata
+
+IF(LINUX)
+ add_executable(netdata config.h ${NETDATA_FILES}
+ ${CGROUPS_PLUGIN_FILES}
+ ${DISKSPACE_PLUGIN_FILES}
+ ${PROC_PLUGIN_FILES}
+ ${TC_PLUGIN_FILES}
+ )
+ target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
+
+ SET(ENABLE_PLUGIN_CGROUP_NETWORK True)
+ SET(ENABLE_PLUGIN_APPS True)
+ SET(ENABLE_PLUGIN_PERF True)
+ SET(ENABLE_PLUGIN_SLABINFO True)
+
+ELSEIF(FREEBSD)
+ add_executable(netdata config.h ${NETDATA_FILES} ${FREEBSD_PLUGIN_FILES})
+ target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
+ SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
+ SET(ENABLE_PLUGIN_APPS True)
+ SET(ENABLE_PLUGIN_PERF False)
+ SET(ENABLE_PLUGIN_SLABINFO False)
+ SET(ENABLE_PLUGIN_EBPF False)
+
+ELSEIF(MACOS)
+ add_executable(netdata config.h ${NETDATA_FILES} ${MACOS_PLUGIN_FILES})
+ target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES} ${IOKIT} ${FOUNDATION})
+ target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
+ SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
+ SET(ENABLE_PLUGIN_APPS False)
+ SET(ENABLE_PLUGIN_PERF False)
+ SET(ENABLE_PLUGIN_SLABINFO False)
+ SET(ENABLE_PLUGIN_EBPF False)
+
+ENDIF()
+
+IF(ENABLE_BACKEND_KINESIS OR ENABLE_EXPORTING_PUBSUB OR ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
+ set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
+ set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
+ENDIF()
+
+IF(IPMI_LIBRARIES)
+ SET(ENABLE_PLUGIN_FREEIPMI True)
+ELSE()
+ SET(ENABLE_PLUGIN_FREEIPMI False)
+ENDIF()
+
+IF(LINUX AND MNL_LIBRARIES AND NFACCT_LIBRARIES)
+ SET(ENABLE_PLUGIN_NFACCT True)
+ELSE()
+ SET(ENABLE_PLUGIN_NFACCT False)
+ENDIF()
+
+IF(XENSTAT_LIBRARIES)
+ SET(ENABLE_PLUGIN_XENSTAT True)
+ELSE()
+ SET(ENABLE_PLUGIN_XENSTAT False)
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# netdatacli
+
+add_executable(netdatacli config.h ${NETDATACLI_FILES})
+target_link_libraries (netdatacli libnetdata ${NETDATA_COMMON_LIBRARIES})
+target_include_directories(netdatacli PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+target_compile_options(netdatacli PUBLIC ${NETDATA_COMMON_CFLAGS})
+
+
+# -----------------------------------------------------------------------------
+# apps.plugin
+
+IF(ENABLE_PLUGIN_APPS)
+ message(STATUS "apps.plugin: enabled")
+ add_executable(apps.plugin config.h ${APPS_PLUGIN_FILES})
+ target_link_libraries (apps.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${CAP_LIBRARIES})
+ target_include_directories(apps.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${CAP_INCLUDE_DIRS})
+ target_compile_options(apps.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${CAP_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "apps.plugin: disabled")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# freeipmi.plugin
+
+IF(ENABLE_PLUGIN_FREEIPMI)
+ message(STATUS "freeipmi.plugin: enabled")
+ add_executable(freeipmi.plugin config.h ${FREEIPMI_PLUGIN_FILES})
+ target_link_libraries (freeipmi.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${IPMI_LIBRARIES})
+ target_include_directories(freeipmi.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${IPMI_INCLUDE_DIRS})
+ target_compile_options(freeipmi.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${IPMI_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "freeipmi.plugin: disabled (depends on libipmimonitoring)")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# nfacct.plugin
+
+IF(ENABLE_PLUGIN_NFACCT)
+ message(STATUS "nfacct.plugin: enabled")
+ add_executable(nfacct.plugin config.h ${NFACCT_PLUGIN_FILES})
+ target_link_libraries (nfacct.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${MNL_LIBRARIES} ${NFACCT_LIBRARIES})
+ target_include_directories(nfacct.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${MNL_INCLUDE_DIRS} ${NFACCT_INCLUDE_DIRS})
+ target_compile_options(nfacct.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${MNL_CFLAGS_OTHER} ${NFACCT_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "nfacct.plugin: disabled (requires libmnl and libnetfilter_acct)")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# xenstat.plugin
+
+IF(ENABLE_PLUGIN_XENSTAT)
+ message(STATUS "xenstat.plugin: enabled")
+ add_executable(xenstat.plugin config.h ${XENSTAT_PLUGIN_FILES})
+ target_link_libraries (xenstat.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${XENSTAT_LIBRARIES})
+ target_include_directories(xenstat.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${XENSTAT_INCLUDE_DIRS})
+ target_compile_options(xenstat.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${XENSTAT_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "xenstat.plugin: disabled (requires libxenstat)")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# perf.plugin
+
+IF(ENABLE_PLUGIN_PERF)
+ message(STATUS "perf.plugin: enabled")
+ add_executable(perf.plugin config.h ${PERF_PLUGIN_FILES})
+ target_link_libraries (perf.plugin libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(perf.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(perf.plugin PUBLIC ${NETDATA_COMMON_CFLAGS})
+ELSE()
+ message(STATUS "perf.plugin: disabled")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# ebpf_process.plugin
+
+IF(ENABLE_PLUGIN_EBPF)
+ message(STATUS "ebpf.plugin: enabled")
+ add_executable(ebpf.plugin config.h ${EBPF_PROCESS_PLUGIN_FILES})
+ target_link_libraries (ebpf.plugin libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(ebpf.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(ebpf.plugin PUBLIC ${NETDATA_COMMON_CFLAGS})
+ELSE()
+ message(STATUS "ebpf.plugin: disabled")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# slabinfo.plugin
+
+IF(ENABLE_PLUGIN_SLABINFO)
+ message(STATUS "slabinfo.plugin: enabled")
+ add_executable(slabinfo.plugin config.h ${SLABINFO_PLUGIN_FILES})
+ target_link_libraries (slabinfo.plugin libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(slabinfo.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(slabinfo.plugin PUBLIC ${NETDATA_COMMON_CFLAGS})
+ELSE()
+ message(STATUS "slabinfo.plugin: disabled")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# cgroup-network
+
+IF(ENABLE_PLUGIN_CGROUP_NETWORK)
+ message(STATUS "cgroup-network: enabled")
+ add_executable(cgroup-network config.h ${CGROUP_NETWORK_FILES})
+ target_link_libraries (cgroup-network libnetdata ${NETDATA_COMMON_LIBRARIES})
+ target_include_directories(cgroup-network PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
+ target_compile_options(cgroup-network PUBLIC ${NETDATA_COMMON_CFLAGS})
+ELSE()
+ message(STATUS "cgroup-network: disabled (requires Linux)")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
+# Unit tests
+
+if(UNIT_TESTING)
+ message(STATUS "Looking for CMocka which is required for unit testing")
+ find_package(CMocka REQUIRED)
+ include(CTest)
+
+if(BUILD_TESTING)
+ add_executable(str2ld_testdriver libnetdata/tests/test_str2ld.c)
+ target_link_libraries(str2ld_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+ add_test(NAME test_str2ld COMMAND str2ld_testdriver)
+
+ add_executable(storage_number_testdriver libnetdata/storage_number/tests/test_storage_number.c)
+ target_link_libraries(storage_number_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+ add_test(NAME test_storage_number COMMAND storage_number_testdriver)
+
+ set(EXPORTING_ENGINE_TEST_FILES
+ exporting/tests/test_exporting_engine.c
+ exporting/tests/test_exporting_engine.h
+ exporting/tests/exporting_fixtures.c
+ exporting/tests/exporting_doubles.c
+ exporting/tests/netdata_doubles.c
+ exporting/tests/system_doubles.c
+ )
+ set(TEST_NAME exporting_engine)
+ set(PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS)
+ set(KINESIS_LINK_OPTIONS)
+ set(PUBSUB_LINK_OPTIONS)
+ set(MONGODB_LINK_OPTIONS)
+if(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
+ list(APPEND EXPORTING_ENGINE_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
+ list(
+ APPEND PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS
+ -Wl,--wrap=init_write_request
+ -Wl,--wrap=add_host_info
+ -Wl,--wrap=add_label
+ -Wl,--wrap=add_metric
+ )
+endif()
+if(ENABLE_BACKEND_KINESIS)
+ list(APPEND EXPORTING_ENGINE_FILES ${KINESIS_EXPORTING_FILES})
+ list(
+ APPEND KINESIS_LINK_OPTIONS
+ -Wl,--wrap=aws_sdk_init
+ -Wl,--wrap=kinesis_init
+ -Wl,--wrap=kinesis_put_record
+ -Wl,--wrap=kinesis_get_result
+ )
+endif()
+if(ENABLE_EXPORTING_PUBSUB)
+ list(APPEND EXPORTING_ENGINE_FILES ${PUBSUB_EXPORTING_FILES})
+ list(
+ APPEND PUBSUB_LINK_OPTIONS
+ -Wl,--wrap=pubsub_init
+ -Wl,--wrap=pubsub_add_message
+ -Wl,--wrap=pubsub_publish
+ -Wl,--wrap=pubsub_get_result
+ )
+endif()
+if(MONGOC_LIBRARIES)
+ list(APPEND EXPORTING_ENGINE_FILES ${MONGODB_EXPORTING_FILES})
+ list(
+ APPEND MONGODB_LINK_OPTIONS
+ -Wl,--wrap=mongoc_init
+ -Wl,--wrap=mongoc_uri_new_with_error
+ -Wl,--wrap=mongoc_uri_get_option_as_int32
+ -Wl,--wrap=mongoc_uri_set_option_as_int32
+ -Wl,--wrap=mongoc_client_new_from_uri
+ -Wl,--wrap=mongoc_client_set_appname
+ -Wl,--wrap=mongoc_client_get_collection
+ -Wl,--wrap=mongoc_uri_destroy
+ -Wl,--wrap=mongoc_collection_insert_many
+ )
+endif()
+ add_executable(${TEST_NAME}_testdriver ${EXPORTING_ENGINE_TEST_FILES} ${EXPORTING_ENGINE_FILES})
+ target_compile_options(
+ ${TEST_NAME}_testdriver
+ PRIVATE
+ -DUNIT_TESTING
+ )
+ target_include_directories(
+ ${TEST_NAME}_testdriver
+ PUBLIC
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${NETDATA_COMMON_INCLUDE_DIRS}
+ )
+ target_link_options(
+ ${TEST_NAME}_testdriver
+ PRIVATE
+ -Wl,--wrap=read_exporting_config
+ -Wl,--wrap=init_connectors
+ -Wl,--wrap=mark_scheduled_instances
+ -Wl,--wrap=rrdhost_is_exportable
+ -Wl,--wrap=rrdset_is_exportable
+ -Wl,--wrap=exporting_calculate_value_from_stored_data
+ -Wl,--wrap=prepare_buffers
+ -Wl,--wrap=send_internal_metrics
+ -Wl,--wrap=now_realtime_sec
+ -Wl,--wrap=uv_thread_set_name_np
+ -Wl,--wrap=uv_thread_create
+ -Wl,--wrap=uv_mutex_lock
+ -Wl,--wrap=uv_mutex_unlock
+ -Wl,--wrap=uv_cond_signal
+ -Wl,--wrap=uv_cond_wait
+ -Wl,--wrap=strdupz
+ -Wl,--wrap=info_int
+ -Wl,--wrap=recv
+ -Wl,--wrap=send
+ -Wl,--wrap=connect_to_one_of
+ -Wl,--wrap=create_main_rusage_chart
+ -Wl,--wrap=send_main_rusage
+ -Wl,--wrap=simple_connector_end_batch
+ ${PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS}
+ ${KINESIS_LINK_OPTIONS}
+ ${PUBSUB_LINK_OPTIONS}
+ ${MONGODB_LINK_OPTIONS}
+ )
+ target_link_libraries(${TEST_NAME}_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+ add_test(NAME test_${TEST_NAME} COMMAND ${TEST_NAME}_testdriver)
+
+ set(WEB_API_TEST_FILES
+ web/api/tests/web_api.c
+ web/server/web_client.c
+ )
+ add_executable(web_api_testdriver ${WEB_API_TEST_FILES})
+ target_link_options(
+ web_api_testdriver
+ PRIVATE
+ -Wl,--wrap=rrdhost_find_by_hostname
+ -Wl,--wrap=finished_web_request_statistics
+ -Wl,--wrap=config_get
+ -Wl,--wrap=web_client_api_request_v1
+ -Wl,--wrap=rrdhost_find_by_guid
+ -Wl,--wrap=rrdset_find_byname
+ -Wl,--wrap=sql_create_host_by_uuid
+ -Wl,--wrap=rrdset_find
+ -Wl,--wrap=rrdpush_receiver_thread_spawn
+ -Wl,--wrap=debug_int
+ -Wl,--wrap=error_int
+ -Wl,--wrap=info_int
+ -Wl,--wrap=fatal_int
+ )
+ target_link_libraries(web_api_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+# add_test(NAME test_web_api COMMAND web_api_testdriver)
+
+ set(VALID_URLS_TEST_FILES
+ web/api/tests/valid_urls.c
+ web/server/web_client.c
+ )
+ add_executable(valid_urls_testdriver ${VALID_URLS_TEST_FILES})
+ target_link_options(
+ valid_urls_testdriver
+ PRIVATE
+ -Wl,--wrap=rrdhost_find_by_hostname
+ -Wl,--wrap=finished_web_request_statistics
+ -Wl,--wrap=config_get
+ -Wl,--wrap=web_client_api_request_v1
+ -Wl,--wrap=rrdhost_find_by_guid
+ -Wl,--wrap=rrdset_find_byname
+ -Wl,--wrap=sql_create_host_by_uuid
+ -Wl,--wrap=rrdset_find
+ -Wl,--wrap=rrdpush_receiver_thread_spawn
+ -Wl,--wrap=debug_int
+ -Wl,--wrap=error_int
+ -Wl,--wrap=info_int
+ -Wl,--wrap=fatal_int
+ -Wl,--wrap=mysendfile
+ -DREMOVE_MYSENDFILE
+ )
+ target_link_libraries(valid_urls_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+# add_test(NAME test_valid_urls COMMAND valid_urls_testdriver)
+
+ set(CGROUPS_TEST_FILES
+ collectors/cgroups.plugin/tests/test_cgroups_plugin.c
+ collectors/cgroups.plugin/tests/test_cgroups_plugin.h
+ collectors/cgroups.plugin/tests/test_doubles.c
+ database/rrdlabels.c
+ database/rrd.h
+ )
+ add_executable(cgroups_testdriver ${CGROUPS_TEST_FILES} ${CGROUPS_PLUGIN_FILES})
+ target_link_options(
+ cgroups_testdriver
+ PRIVATE
+ -Wl,--wrap=add_label_to_list
+ )
+ target_link_libraries(cgroups_testdriver libnetdata ${NETDATA_COMMON_LIBRARIES} ${CMOCKA_LIBRARIES})
+ add_test(NAME test_cgroups COMMAND cgroups_testdriver)
+
+ set_target_properties(
+ str2ld_testdriver
+ storage_number_testdriver
+ exporting_engine_testdriver
+ web_api_testdriver
+ valid_urls_testdriver
+ cgroups_testdriver
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY tests
+ )
+
+endif()
+endif()
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..12e959d
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,80 @@
+<!--
+title: "Netdata Community Code of Conduct"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/CODE_OF_CONDUCT.md
+-->
+
+# Netdata Community Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors, maintainers and community members pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, documentation edits, issues, community posts, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting Netdata at info@netdata.cloud. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
+
+[homepage]: https://www.contributor-covenant.org
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCODE_OF_CONDUCT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..b9e4418
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,241 @@
+<!--
+title: "Contributing"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/CONTRIBUTING.md
+sidebar_label: "Contributing handbook"
+-->
+
+# Contributing
+
+Thank you for considering contributing to Netdata.
+
+We love to receive contributions. Maintaining a platform for monitoring everything imaginable requires a broad understanding of a plethora of technologies, systems and applications. We rely on community contributions and user feedback to continue providing the best monitoring solution out there.
+
+There are many ways to contribute, with varying requirements of skills, explained in detail in the following sections.
+Specific GitHub issues we need help with can be seen [here](https://github.com/netdata/netdata/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22help+wanted%22). Some of them are also labeled as "good first issue".
+
+## All Netdata Users
+
+### Give Netdata a GitHub star
+
+This is the minimum open-source users should contribute back to the projects they use. Github stars help the project gain visibility, stand out. So, if you use Netdata, consider pressing that button. **It really matters**.
+
+### Join the Netdata Community
+
+We have launched a [discussion board](https://community.netdata.cloud) where you can find many of us.
+
+### Spread the word
+
+Community growth allows the project to attract new talent willing to contribute. This talent is then developing new features and improves the project. These new features and improvements attract more users and so on. It is a loop. So, post about Netdata, present it to local meetups you attend, let your online social network or twitter, facebook, reddit, etc. know you are using it. **The more people involved, the faster the project evolves**.
+
+### Provide feedback
+
+Is there anything that bothers you about Netdata? Did you experience an issue while installing it or using it? Would you like to see it evolve to match your requirements? Let us know by making a post at [at Netdata Community](https://community.netdata.cloud/category/4/feedback) to discuss it or [open a GitHub issue](https://github.com/netdata/netdata/issues). Feedback is very important for open-source projects. We can't commit we will do everything, but your feedback influences [our roadmap](https://community.netdata.cloud/category/1/announcements-and-roadmap) significantly. **We rely on your feedback to make Netdata better**.
+
+### Translate some documentation
+
+The [Netdata localization project](https://github.com/netdata/localization) contains instructions on how to provide translations for parts of our documentation. Translating the entire documentation is a daunting task, but you can contribute as much as you like, even a single file. The Chinese translation effort has already begun and we are looking forward to more contributions.
+
+### Sponsor a part of Netdata
+
+Netdata is a complex system, with many integrations for the various collectors, backends and notification endpoints. As a result, we rely on help from "sponsors", a concept similar to "power users" or "product owners". To become a sponsor, just let us know in any Github issue and we will record your GitHub username in a "CONTRIBUTORS.md" in the appropriate directory.
+
+#### Sponsor a collector
+
+Netdata is all about simplicity and meaningful presentation. A "sponsor" for a collector does the following:
+
+- Assists the devs with feedback on the charts.
+- Specifies the alarms that would make sense for each metric.
+- When the implementation passes QA, tests the implementation in production.
+- Uses the charts and alarms in his/her day to day work and provides additional feedback.
+- Requests additional improvements as things change (e.g. new versions of an API are available).
+
+#### Sponsor an exporting connector
+
+We already support various [exporting connectors](/exporting/README.md), and we intend to support more. A "sponsor" for a connector:
+
+- Suggests ways in which the information in Netdata could best be exposed to the particular endpoint, to facilitate meaningful presentation.
+- When the implementation passes QA, tests the implementation in production.
+- Uses the backend in his/her day to day work and provides additional feedback, after the backend is delivered.
+- Requests additional improvements as things change (e.g. new versions of the backend API are available).
+
+#### Sponsor a notification method
+
+Netdata delivers alarms via various [notification methods](health/notifications). A "sponsor" for a notification method:
+
+- Points the devs to the documentation for the API and identifies any unusual features of interest (e.g. the ability in Slack to send a notification either to a channel or to a user).
+- Uses the notification method in production and provides feedback.
+- Requests additional improvements as things change (e.g. new versions of the API are available).
+
+## Experienced Users
+
+### Help other users
+
+As the project grows, an increasing share of our time is spent on supporting this community of users in terms of answering questions, of helping users understand how Netdata works and find their way with it. Helping other users is crucial. It allows the developers and maintainers of the project to focus on improving it.
+
+### Improve documentation
+
+Our documentation is in need of constant improvement and expansion. As Netdata's features grow, we need to clearly explain how each feature works and document all the possible configurations. And as Netdata's community grows, we need to improve existing documentation to make it more accessible to people of all skill levels.
+
+We also need to produce beginner-level tutorials on using Netdata to monitor common applications, web servers, and more.
+
+Start with the [guide for contributing to documentation](/docs/contributing/contributing-documentation.md), and then review the [documentation style guide](/docs/contributing/style-guide.md) for specifics on how we write our documentation.
+
+Don't be afraid to submit a pull request with your corrections or additions! We need a lot of help and are willing to guide new contributors through the process.
+
+## Developers
+
+We expect most contributions to be for new data collection plugins. You can read about how external plugins work [here](collectors/plugins.d/). Additional instructions are available for [Node.js plugins](collectors/node.d.plugin) and [Python plugins](collectors/python.d.plugin).
+
+Of course we appreciate contributions for any other part of the Netdata agent, including the [daemon](daemon), [backends for long term archiving](backends/), innovative ways of using the [REST API](web/api) to create cool [Custom Dashboards](web/gui/custom/) or to include Netdata charts in other applications, similarly to what can be done with [Confluence](web/gui/confluence/).
+
+If you are working on the C source code please be aware that we have a standard build configuration that we use. This
+is meant to keep the source tree clean and free of warnings. When you are preparing to work on the code:
+```
+CFLAGS="-O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -DNETDATA_INTERNAL_CHECKS=1 -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1" ./netdata-installer.sh --disable-lto --dont-wait
+```
+
+Typically we will enable LTO during production builds. The reasons for configuring it this way are:
+
+| CFLAG / argument | Reasoning |
+| ---------------- | --------- |
+| `-O1` | This makes the debugger easier to use as it disables optimisations that break the relationship between the source and the state of the debugger |
+| `-ggdb` | Enable debugging symbols in gdb format (this also works with clang / llbdb) |
+| `-Wall -Wextra -Wformat-signedness` | Really, definitely, absolutely all the warnings |
+| `-DNETDATA_INTERNAL_CHECKS=1` | This enables the debug.log and turns on the macro that outputs to it |
+| `-D_FORTIFY_SOURCE=2` | Enable buffer-overflow checks on string-processing functions |
+| `-DNETDATA_VERIFY_LOCKS=1` | Enable extra checks and debug |
+| `--disable-lto ` | We enable LTO for production builds, but disable it during development are it can hide errors about missing symbols that have been pruned. |
+
+Before submitting a PR we run through this checklist:
+
+* Compilation warnings
+* valgrind
+* ./netdata-installer.sh
+* make dist
+* `packaging/makeself/build-x86_64-static.sh`
+* `clang-format -style=file`
+
+Please be aware that the linting pass at the end is currently messy as we are transitioning between code styles
+across most of our code-base, but we prefer new contributions that match the linting style.
+
+### Contributions Ground Rules
+
+#### Code of Conduct and CLA
+
+We expect all contributors to abide by the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). For a pull request to be accepted, you will also need to accept the [Netdata contributors license agreement](CONTRIBUTORS.md), as part of the PR process.
+
+#### Performance and efficiency
+
+Everything on Netdata is about efficiency. We need Netdata to always be the most lightweight monitoring solution available. We will reject to merge PRs that are not optimal in resource utilization and efficiency.
+
+Of course there are cases that such technical excellence is either not reasonable or not feasible. In these cases, we may require the feature or code submitted to be by disabled by default.
+
+#### Meaningful metrics
+
+Unlike other monitoring solutions, Netdata requires all metrics collected to have some structure attached to them. So, Netdata metrics have a name, units, belong to a chart that has a title, a family, a context, belong to an application, etc.
+
+This structure is what makes Netdata different. Most other monitoring solution collect bulk metrics in terms of name-value pairs and then expect their users to give meaning to these metrics during visualization. This does not work. It is neither practical nor reasonable to give to someone 2000 metrics and let him/her visualize them in a meaningful way.
+
+So, Netdata requires all metrics to have a meaning at the time they are collected. We will reject to merge PRs that loosely collect just a "bunch of metrics", but we are very keen to help you fix this.
+
+#### Automated Testing
+
+Netdata is a very large application to have automated testing end-to-end. But automated testing is required for crucial functions of it.
+
+Generally, all pull requests should be coupled with automated testing scenarios. However since we do not currently have a framework in place for testing everything little bit of it, we currently require automated tests for parts of Netdata that seem too risky to be changed without automated testing.
+
+Of course, manual testing is always required.
+
+#### Netdata is a distributed application
+
+Netdata is a distributed monitoring application. A few basic features can become quite complicated for such applications. We may reject features that alter or influence the nature of Netdata, though we usually discuss the requirements with contributors and help them adapt their code to be better suited for Netdata.
+
+#### Operating systems supported
+
+Netdata should be running everywhere, on every production system out there.
+
+Although we focus on **supported operating systems**, we still want Netdata to run even on non-supported systems. This, of course, may require some more manual work from the users (to prepare their environment, or enable certain flags, etc).
+
+If your contributions limit the number of operating systems supported we will request from you to improve it.
+
+#### Documentation
+
+Your contributions should be bundled with related documentation to help users understand how to use the features you introduce.
+
+#### Maintenance
+
+When you contribute code to Netdata, you are automatically accepting that you will be responsible for maintaining that code in the future. So, if users need help, or report bugs, we will invite you to the related github issues to help them or fix the issues or bugs of your contributions.
+
+#### Code Style
+
+The single most important rule when writing code is this: *check the surrounding code and try to imitate it*. [Reference](https://developer.gnome.org/programming-guidelines/stable/c-coding-style.html.en)
+
+We use several different languages and have had contributions from several people with different styles. When in doubt, you can check similar existing code.
+
+For C contributions in particular, we try to respect the [Linux kernel style](https://www.kernel.org/doc/html/v4.10/process/coding-style.html), with the following exceptions:
+
+- Use 4 space indentation instead of 8
+- We occasionally have multiple statements on a single line (e.g. `if (a) b;`)
+- Allow max line length of 120 chars
+- Allow opening brace at the end of a function declaration: `function() {`.
+- Allow trailing comments
+
+### Your first pull request
+
+There are several guides for pull requests, such as the following:
+
+- <https://thenewstack.io/getting-legit-with-git-and-github-your-first-pull-request/>
+- <https://github.com/firstcontributions/first-contributions#first-contributions>
+
+However, it's not always that simple. Our [PR approval process](#pr-approval-process) and the several merges we do every day may cause your fork to get behind the Netdata master. If you worked on something that has changed in the meantime, you will be required to do a git rebase, to bring your fork to the correct state. A very easy to follow guide on how to do it without learning all the intricacies of GitHub can be found [here](https://medium.com/@ruthmpardee/git-fork-workflow-using-rebase-587a144be470)
+
+One thing you will need to do only for your first pull request in Netdata is to accept the CLA. Until you do, the automated check for the CLA acceptance will be showing as failed.
+
+#### PR Guidelines
+
+PR Titles:
+
+- Must follow the [Imperative Mood](https://en.wikipedia.org/wiki/Imperative_mood)
+- Must be no more than ~50 characters (_longer description in the PR_)
+
+PR Descriptions:
+
+- Must clearly contain sufficient information regarding the content of the PR, including area/component, test plan, etc.
+- Must reference an existing issue.
+
+Some PR title examples:
+
+- Fix bug in Netdata installer for FreeBSD 11.2
+- Update docs for other installation methods
+- Add new collector for Prometheus endpoints
+- Add 4.19 Kernel variant for eBPF
+- Fix typo in README
+- Refactor code for better maintainability
+- etc
+
+The key idea here is to start with a "verb" of what you are doing in the PR.
+
+For good examples have a look at other projects like:
+
+- https://github.com/facebook/react/commits/master
+- https://github.com/tensorflow/tensorflow/commits/master
+- https://github.com/vuejs/vue/commits/dev
+- https://github.com/microsoft/vscode/commits/master
+- Also see the Linux Kernel and Git projects as well as good examples.
+
+#### Commit messages when PRs are merged
+
+When a PR gets squashed and merged into master, the title of the commit message (first line) must be the PR title
+followed by the PR number.
+
+The body of the commit message should be a short description of the work, preferably taken from the connected issue.
+
+### PR approval process
+
+Each PR automatically [requires a review](https://help.github.com/articles/about-required-reviews-for-pull-requests/) from the code owners specified in `.github/CODEOWNERS`. Depending on the files contained in your PR, several people may be needed to approve it.
+
+We also have a series of automated checks running, such as linters to check code quality and QA tests. If you get an error or warning in any of those checks, you will need to click on the link included in the check to identify the root cause, so you can fix it.
+
+If you wish to open a PR but are not quite ready for the code to be reviewed, you can open it as a Draft PR (click the dropdown on the **Create PR** button and select **Draft PR**). This will prevent reviewers from being notified initially so that you can keep working on the PR. Once you're ready, you can click the **Ready for Review** button near the bottom of the PR to mark it ready and notify the relevant reviewers.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTING&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
new file mode 100644
index 0000000..7d7d521
--- /dev/null
+++ b/CONTRIBUTORS.md
@@ -0,0 +1,136 @@
+<!--
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# Netdata contributors license agreement
+
+**Thank you for contributing to Netdata!**
+
+This agreement is part of the legal framework of the open-source ecosystem
+that adds some red tape, but protects both the contributor and the project.
+
+To understand why this is needed, please read [a well-written chapter from
+Karl Fogel’s Producing Open Source Software on CLAs](https://producingoss.com/en/contributor-agreements.html).
+
+By signing this agreement, you do not change your rights to use your own
+contributions for any other purpose.
+
+## copyright license
+
+The Contributor (_you_) grants Netdata Inc. a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable copyright license to reproduce,
+prepare derivative works of, publicly display, publicly perform, sublicense,
+and distribute his contributions and such derivative works.
+
+## copyright transfer
+
+The Contributor (_you_) hereby assigns Netdata Inc. copyright in his
+contributions, to be licensed under the same terms as the rest of the code.
+
+> _Note: this means we may re-license Netdata (your contributions included)
+> any way we see fit, without asking your permission.
+> We intend to keep the Netdata agent forever FOSS.
+> But open-source licenses have significant differences and in our attempt to
+> help Netdata grow we may have to distribute it under a different license.
+> For example, CNCF, the Cloud Native Computing Foundation, requires Netdata
+> to be licensed under Apache-2.0 for it to be accepted as a member of the
+> Foundation. We want to be free to do it._
+
+## original work
+
+The Contributor (_you_) represent that each of his contributions is his
+original creation and that he is legally entitled to grant the above license.
+
+> _Note: if you are committing third party code, please make sure the third party
+> license or any other restrictions are also included with your commits.
+> Netdata includes many third party libraries and tools and this is not a
+> problem, provided that the license of the third party code is compatible with
+> the one we use for Netdata._
+
+## signature
+
+Since Sep 17th 2018, we use <https://cla-assistant.io/netdata/netdata> for signing the CLA, on all pull requests.
+Old contributors can sign the CLA at any time using this link.
+
+## HISTORICAL SIGNATURES
+
+(they have been imported to <https://cla-assistant.io/netdata/netdata> already)
+
+The Contributor (_you_) signs this agreement by adding his personal data in
+this document and committing it to the project repo
+(the same way contributions are submitted to the project).
+
+By signing once, all contributions (past and future) of The Contributor (_you_),
+are subject to this agreement.
+
+> _Note: so you have to:_
+>
+> 1. add your github username and name in this file
+> 2. commit it to the repo with a PR, using the same github username, or include this change in your first PR.
+
+# Netdata contributors
+
+This is the list of contributors that have signed this agreement:
+
+|username|name|email (optional)|
+|:------:|:--:|:---------------|
+|@lets00|Luís Eduardo|leduardo@lsd.ufcg.edu.br|
+|@ktsaou|Costa Tsaousis|costa@tsaousis.gr|
+|@tycho|Steven Noonan|steven@uplinklabs.net|
+|@philwhineray|Phil Whineray||
+|@paulfantom|Paweł Krupa|pawel@krupa.net.pl|
+|@Ferroin|Austin S. Hemmelgarn|ahferroin7@gmail.com|
+|@glensc|Elan Ruusamäe||
+|@l2isbad|Ilya Mashchenko|ilyamaschenko@gmail.com|
+|@rlefevre|Rémi Lefèvre||
+|@vlvkobal|Vladimir Kobal|vlad@prokk.net|
+|@simonnagl|Simon Nagl||
+|@manosf|Emmanouil Fokas|manosf@protonmail.com|
+|@user501254|Ashesh Singh|user501254@gmail.com|
+|@t-h-e|Stefan Forstenlechner||
+|@facetoe|Facetoe||
+|@ntlug|Christopher Cox|ccox@endlessnow.com|
+|@alonbl|Alon Bar-Lev|alon.barlev@gmail.com|
+|@Wing924|Wei He|weihe924stephen@gmail.com|
+|@NeonSludge|Kirill Buev|kirill.buev@gmx.com|
+|@kmlucy|Kyle Lucy|kmlucy@gmail.com|
+|@RicardoSette|Ricardo Sette|ricardosette@freebsdbrasil.com.br|
+|@383c57|Shinichi Tagashira||
+|@davidak|David Kleuker|netdata-contributors+vyff@davidak.de|
+|@ccremer|Christian Cremer||
+|@jimcooley|Jim Cooley|jim.cooley@healthvana.com|
+|@Chocobo1|Mike Tzou||
+|@vinyasmusic|Vinyas Malagaudanavar|vinyasmusic@gmail.com|
+|@cosmix|Dimosthenis Kaponis||
+|@shadycuz|Levi Blaney|shadycuz+spam@gmail.com|
+|@Flums|Philip Gabrielsen|philip@digno.no|
+|@domschl|Dominik Schlösser|dominik.schloesser@gmail.com|
+|@tioumen|Guillaume Hospital||
+|@arch273|Jacob Ayres||
+|@x4FF3|David Fuellgraf||
+|@jasonwbarnett|Jason Barnett||
+|@ecowed|Ed Wade||
+|@wungad|Rob Man||
+|@rda0|Sven Mäder|maeder@phys.ethz.ch|
+|@alibo|Ali Borhani|aliborhani1@gmail.com|
+|@Nani-o|Sofiane Medjkoune|sofiane@medjkoune.fr|
+|@n0guest|Evgeniy K.|ask@osshelp.ru|
+|@amichelic|Adalbert Michelic||
+|@abalabahaha|abalabahaha|hi@abal.moe|
+|@illes|Illes S.||
+|@plasticrake|Patrick Seal||
+|@jonfairbanks|Jon Fairbanks||
+|@pjz|Paul Jimenez|pj@place.org|
+|@jgrossiord|Julien Grossiord|julien@grossiord.net|
+|@pohzipohzi|Poh Zi How||
+|@vladmovchan|Vladyslav Movchan|vladislav.movchan@gmail.com|
+|@gmosx|George Moschovitis||
+|@adherzog|Adam Herzog|adam@adamherzog.com|
+|@skrzyp1|Jerzy S.||
+|@akwan|Alan Kwan||
+|@underhood|Timotej Šiškovič||
+|@stevenh|Steven Hartland|steven.hartland@multiplay.co.uk|
+|@dpsy4|Dave Sitek||
+|@devinrsmith|Devin Smith||
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/Dockerfile b/Dockerfile
new file mode 120000
index 0000000..96ebbda
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1 @@
+./packaging/docker/Dockerfile \ No newline at end of file
diff --git a/Dockerfile.test b/Dockerfile.test
new file mode 100644
index 0000000..df57ea0
--- /dev/null
+++ b/Dockerfile.test
@@ -0,0 +1,74 @@
+# NOTE: This Dockerfile (`Dockerfile.test`) should only be used for dev/testing
+# and *NOT* in production. Use the top-level `Dockerfile` which points to
+# `./packaging/docker/Dockerfile`.
+#
+# TODO: Create a netdata/package-builder:alpine9
+#FROM netdata/package-buidler:alpine AS build
+FROM alpine:3.9 AS build
+
+# Install Dependencies
+RUN apk add --no-cache -U alpine-sdk bash curl libuv-dev zlib-dev \
+ util-linux-dev libmnl-dev gcc make git autoconf \
+ automake pkgconfig python logrotate openssl-dev \
+ cmake
+
+# Pass optional ./netdata-installer.sh args with --build-arg INSTALLER_ARGS=...
+ARG INSTALLER_ARGS=""
+
+# Copy Sources
+# Can also bind-mount sources with:
+# $ docker run -v $PWD:/netdata
+
+WORKDIR /netdata
+COPY . .
+
+# Build
+RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go "${INSTALLER_ARGS}"
+
+FROM alpine:3.9 AS runtime
+
+# Install runtime dependeices
+RUN apk --no-cache -U add curl bash libuv zlib util-linux libmnl python
+
+# Create netdata user/group
+RUN addgroup -S netdata && \
+ adduser -D -S -h /var/empty -s /bin/false -G netdata netdata
+
+# Copy binary from build layer
+COPY --from=build /usr/sbin/netdata /usr/sbin/netdata
+
+# Copy configuration files from build layer
+COPY --from=build /etc/netdata/ /etc/netdata/
+COPY --from=build /usr/lib/netdata/ /usr/lib/netdata/
+
+# Copy assets from build layer
+COPY --from=build /usr/share/netdata/ /usr/share/netdata/
+
+# Create some directories netdata needs
+RUN mkdir -p \
+ /etc/netdata \
+ /var/log/netdata \
+ /var/lib/netdata \
+ /var/cache/netdata \
+ /usr/lib/netdata/conf.d \
+ /usr/libexec/netdata/plugins.d
+
+# Fix permissions/ownerships
+RUN chown -R netdata:netdata \
+ /etc/netdata/ \
+ /usr/lib/netdata/ \
+ /usr/share/netdata/ \
+ /var/log/netdata \
+ /var/lib/netdata \
+ /var/cache/netdata \
+ /usr/libexec/netdata
+
+VOLUME /etc/netdata
+VOLUME /var/lib/netdata
+VOLUME /var/log/netdata
+
+EXPOSE 19999/tcp
+
+USER netdata
+
+CMD ["/usr/sbin/netdata", "-D"]
diff --git a/HISTORICAL_CHANGELOG.md b/HISTORICAL_CHANGELOG.md
new file mode 100644
index 0000000..16ef786
--- /dev/null
+++ b/HISTORICAL_CHANGELOG.md
@@ -0,0 +1,650 @@
+netdata (1.10.0) - 2018-03-27
+
+ Please check full changelog at github.
+ <https://github.com/netdata/netdata/releases>
+
+netdata (1.9.0) - 2017-12-17
+
+ Please check full changelog at github.
+ <https://github.com/netdata/netdata/releases>
+
+netdata (1.8.0) - 2017-09-17
+
+ This is mainly a bugfix release.
+ Please check full changelog at github.
+
+netdata (1.7.0) - 2017-07-16
+
+- netdata is still spreading fast
+
+ we are at 320.000 users and 132.000 servers
+
+ Almost 100k new users, 52k new installations and 800k docker pulls
+ since the previous release, 4 and a half months ago.
+
+ netdata user base grows at about 1000 new users and 600 new servers
+ per day. Thank you. You are awesome.
+
+- The next release (v1.8) will be focused on providing a global health
+ monitoring service, for all netdata users, for free.
+
+- netdata is now a (very fast) fully featured statsd server and the
+ only one with automatic visualization: push a statsd metric and hit
+ F5 on the netdata dashboard: your metric visualized. It also supports
+ synthetic charts, defined by you, so that you can correlate and
+ visualize your application the way you like it.
+
+- netdata got new installation options
+ It is now easier than ever to install netdata - we also distribute a
+ statically linked netdata x86_64 binary, including key dependencies
+ (like bash, curl, etc) that can run everywhere a Linux kernel runs
+ (CoreOS, CirrOS, etc).
+
+- metrics streaming and replication has been improved significantly.
+ All known issues have been solved and key enhancements have been added.
+ Headless collectors and proxies can now send metrics to backends when
+ data source = as collected.
+
+- backends have got quite a few enhancements, including host tags and
+ metrics filtering at the netdata side;
+ prometheus support has been re-written to utilize more prometheus
+ features and provide more flexibility and integration options.
+
+- netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch,
+ RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba,
+ squid logs (with web_log plugin).
+
+- netdata dashboard loading times have been improved significantly
+ (hit F5 a few times on a netdata dashboard - it is now amazingly fast),
+ to support dashboards with thousands of charts.
+
+- netdata alarms now support custom hooks, so you can run whatever you
+ like in parallel with netdata alarms.
+
+- As usual, this release brings dozens of more improvements, enhancements
+ and compatibility fixes.
+
+netdata (1.6.0) - 2017-03-20
+
+- birthday release: 1 year netdata
+
+ netdata was first published on March 30th, 2016.
+ It has been a crazy year since then:
+
+ 225.000 unique netdata users
+ currently, at 1.000 new unique users per day
+
+ 80.000 unique netdata installations
+ currently, at 500 new installation per day
+
+ 610.000 docker pulls on docker hub
+
+ 4.000.000 netdata sessions served
+ currently, at 15.000 sessions served per day
+
+ 20.000 github stars
+
+ ```
+ Thank you!
+ You are awesome!
+ ```
+
+- central netdata is here
+
+ This is the first release that supports real-time streaming of
+ metrics between netdata servers.
+
+ netdata can now be:
+
+ - autonomous host monitoring
+ (like it always has been)
+
+ - headless data collector
+ (collect and stream metrics in real-time to another netdata)
+
+ - headless proxy
+ (collect metrics from multiple netdata and stream them to another netdata)
+
+ - store and forward proxy
+ (like headless proxy, but with a local database)
+
+ - central database
+ (metrics from multiple hosts are aggregated)
+
+ metrics databases can be configured on all nodes and each node maintaining
+ a database may have a different retention policy and possibly run
+ (even different) alarms on them.
+
+- monitoring ephemeral nodes
+
+ netdata now supports monitoring autoscaled ephemeral nodes,
+ that are started and stopped on demand (their IP is not known).
+
+ When the ephemeral nodes start streaming metrics to the central
+ netdata, the central netdata will show register them at "my-netdata"
+ menu on the dashboard.
+
+ For more information check:
+ <https://github.com/netdata/netdata/tree/master/streaming#monitoring-ephemeral-nodes>
+
+- monitoring ephemeral containers and VM guests
+
+ netdata now cleans up container, guest VM, network interfaces and mounted
+ disk metrics, disabling automatically their alarms too.
+
+ For more information check:
+ <https://github.com/netdata/netdata/tree/master/collectors/cgroups.plugin#monitoring-ephemeral-containers>
+
+- apps.plugin ported for FreeBSD
+
+ @vlvkobal has ported "apps.plugin" to FreeBSD. netdata can now provide
+ "Applications", "Users" and "User Groups" on FreeBSD.
+
+- web_log plugin
+
+ @l2isbad has done a wonderful job creating a unified web log parsing plugin
+ for all kinds of web server logs. With it, netdata provides real-time
+ performance information and health monitoring alarms for web applications
+ and web sites!
+
+ For more information check:
+ <https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log#web_log>
+
+- backends
+
+ netdata can now archive metrics to `JSON` backends
+ (both push, by @lfdominguez, and pull modes).
+
+- IPMI monitoring
+
+ netdata now has an IPMI plugin (based on freeipmi)
+ for monitoring server hardware.
+
+ The plugin creates (up to) 8 charts:
+
+ 1. number of sensors by state
+ 2. number of events in SEL
+ 3. Temperatures CELSIUS
+ 4. Temperatures FAHRENHEIT
+ 5. Voltages
+ 6. Currents
+ 7. Power
+ 8. Fans
+
+ It also supports alarms (including the number of sensors in critical state).
+
+ For more information, check:
+ <https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin>
+
+- new plugins
+
+ @l2isbad builds python data collection plugins for netdata at an wonderful
+ rate! He rocks!
+
+ - **web_log** for monitoring in real-time all kinds of web server log files @l2isbad
+ - **freeipmi** for monitoring IPMI (server hardware)
+ - **nsd** (the [name server daemon](https://www.nlnetlabs.nl/projects/nsd/)) @383c57
+ - **mongodb** @l2isbad
+ - **smartd_log** (monitoring disk S.M.A.R.T. values) @l2isbad
+
+- improved plugins
+
+ - **nfacct** reworked and now collects connection tracker information using netlink.
+ - **ElasticSearch** re-worked @l2isbad
+ - **mysql** re-worked to allow faster development of custom mysql based plugins (MySQLService) @l2isbad
+ - **SNMP**
+ - **tomcat** @NMcCloud
+ - **ap** (monitoring hostapd access points)
+ - **php_fpm** @l2isbad
+ - **postgres** @l2isbad
+ - **isc_dhcpd** @l2isbad
+ - **bind_rndc** @l2isbad
+ - **numa**
+ - **apps.plugin** improvements and freebsd support @vlvkobal
+ - **fail2ban** @l2isbad
+ - **freeradius** @l2isbad
+ - **nut** (monitoring UPSes)
+ - **tc** (Linux QoS) now works on qdiscs instead of classes for the same result (a lot faster) @t-h-e
+ - **varnish** @l2isbad
+
+- new and improved alarms
+ - **web_log**, many alarms to detect common web site/API issues
+ - **fping**, alarms to detect packet loss, disconnects and unusually high latency
+ - **cpu**, cpu utilization alarm now ignores `nice`
+
+- new and improved alarm notification methods
+ - **HipChat** to allow hosted HipChat @frei-style
+ - **discordapp** @lowfive
+
+- dashboard improvements
+ - dashboard now works on HiDPi screens
+ - dashboard now shows version of netdata
+ - dashboard now resets charts properly
+ - dashboard updated to use latest gauge.js release
+
+- other improvements
+ - thanks to @rlefevre netdata now uses a lot of different high resolution system clocks.
+
+ netdata has received a lot more improvements from many more contributors!
+
+ Thank you all!
+
+netdata (1.5.0) - 2017-01-22
+
+- yet another release that makes netdata the fastest
+ netdata ever!
+
+- netdata runs on FreeBSD, FreeNAS and MacOS !
+
+ Vladimir Kobal (@vlvkobal) has done a magnificent work
+ porting netdata to FreeBSD and MacOS.
+
+ Everything works: cpu, memory, disks performance, disks space,
+ network interfaces, interrupts, IPv4 metrics, IPv6 metrics
+ processes, context switches, softnet, IPC queues,
+ IPC semaphores, IPC shared memory, uptime, etc. Wow!
+
+- netdata supports data archiving to backend databases:
+
+ - Graphite
+ - OpenTSDB
+ - Prometheus
+
+ and of course all the compatible ones
+ (KairosDB, InfluxDB, Blueflood, etc)
+
+- new plugins:
+
+ Ilya Mashchenko (@l2isbad) has created most of the python
+ data collection plugins in this release !
+
+ - systemd Services (using cgroups!)
+ - FPing (yes, network latency in netdata!)
+ - postgres databases @facetoe, @moumoul
+ - Vanish disk cache (v3 and v4) @l2isbad
+ - ElasticSearch @l2isbad
+ - HAproxy @l2isbad
+ - FreeRadius @l2isbad, @lgz
+ - mdstat (RAID) @l2isbad
+ - ISC bind (via rndc) @l2isbad
+ - ISC dhcpd @l2isbad, @lgz
+ - Fail2Ban @l2isbad
+ - OpenVPN status log @l2isbad, @lgz
+ - NUMA memory @tycho
+ - CPU Idle @tycho
+ - gunicorn log @deltaskelta
+ - ECC memory hardware errors
+ - IPC semaphores
+ - uptime plugin (with a nice badge too)
+
+- improved plugins:
+
+ - netfilter conntrack
+ - mysql (replication) @l2isbad
+ - ipfs @pjz
+ - cpufreq @tycho
+ - hddtemp @l2isbad
+ - sensors @l2isbad
+ - nginx @leolovenet
+ - nginx_log @paulfantom
+ - phpfpm @leolovenet
+ - redis @leolovenet
+ - dovecot @justohall
+ - cgroups
+ - disk space
+ - apps.plugin
+ - /proc/interrupts @rlefevre
+ - /proc/softirqs @rlefevre
+ - /proc/vmstat (system memory charts)
+ - /proc/net/snmp6 (IPv6 charts)
+ - /proc/self/meminfo (system memory charts)
+ - /proc/net/dev (network interfaces)
+ - tc (linux QoS)
+
+- new/improved alarms:
+
+ - MySQL / MariaDB alarms (incl. replication)
+ - IPFS alarms
+ - HAproxy alarms
+ - UDP buffer alarms
+ - TCP AttemptFails
+ - ECC memory alarms
+ - netfilter connections alarms
+ - SNMP
+
+- new alarm notifications:
+
+ - messagebird.com @tech-no-logical
+ - pagerduty.com @jimcooley
+ - pushbullet.com @tperalta82
+ - twilio.com @shadycuz
+ - HipChat
+ - kafka
+
+- shell integration
+
+ - shell scripts can now query netdata easily!
+
+- dashboard improvements:
+ - dashboard is now faster on firefox, safari, opera, edge
+ (edge is still the slowest)
+ - dashboard now has a little bigger fonts
+ - SHIFT + mouse wheel to zoom charts, works on all browsers
+ - perfect-scrollbar on the dashboard
+ - dashboard 4K resolution fixes
+ - dashboard compatibility fixes for embedding charts in
+ third party web sites
+ - charts on custom dashboards can have common min/max
+ even if they come from different netdata servers
+ - alarm log is now saved and loaded back so that
+ the alarm history is available at the dashboard
+
+- other improvements:
+ - python.d.plugin has received way to many improvements
+ from many contributors!
+ - charts.d.plugin can now be forked to support
+ multiple independent instances
+ - registry has been re-factored to lower its memory
+ requirements (required for the public registry)
+ - simple patterns in cgroups, disks and alarms
+ - netdata-installer.sh can now correctly install
+ netdata in containers
+ - supplied logrotate script compatibility fixes
+ - spec cleanup @breed808
+ - clocks and timers reworked @rlefevre
+
+ netdata has received a lot more improvements from many more
+ contributors!
+
+ Thank you all guys!
+
+netdata (1.4.0) - 2016-10-04
+
+ At a glance:
+
+- the fastest netdata ever (with a better look too)!
+
+- improved IoT and containers support!
+
+- alarms improved in almost every way!
+
+- new plugins:
+ softnet netdev,
+ extended TCP metrics,
+ UDPLite
+ NFS v2, v3 client (server was there already),
+ NFS v4 server & client,
+ APCUPSd,
+ RetroShare
+
+- improved plugins:
+ mysql,
+ cgroups,
+ hddtemp,
+ sensors,
+ phpfpm,
+ tc (QoS)
+
+ In detail:
+
+- improved alarms
+
+ Many new alarms have been added to detect common kernel
+ configuration errors and old alarms have been re-worked
+ to avoid notification floods.
+
+ Alarms now support notification hysteresis (both static
+ and dynamic), notification self-cancellation, dynamic
+ thresholds based on current alarm status
+
+- improved alarm notifications
+
+ netdata now supports:
+
+ - email notifications
+ - slack.com notifications on slack channels
+ - pushover.net notifications (mobile push notifications)
+ - telegram.org notifications
+
+ For all the above methods, netdata supports role-based
+ notifications, with multiple recipients for each role
+ and severity filtering per recipient!
+
+ Also, netdata support HTML5 notifications, while the
+ dashboard is open in a browser window (no need to be
+ the active one).
+
+ All notifications are now clickable to get to the chart
+ that raised the alarm.
+
+- improved IoT support!
+
+ netdata builds and runs with musl libc and runs on systems
+ based on busybox.
+
+- improved containers support!
+
+ netdata runs on alpine linux (a low profile linux distribution
+ used in containers).
+
+- Dozens of other improvements and bugfixes
+
+netdata (1.3.0) - 2016-08-28
+
+ At a glance:
+
+- netdata has health monitoring / alarms!
+- netdata has badges that can be embeded anywhere!
+- netdata plugins are now written in Python!
+- new plugins: redis, memcached, nginx_log, ipfs, apache_cache
+
+ IMPORTANT:
+ Since netdata now uses Python plugins, new packages are
+ required to be installed on a system to allow it work.
+ For more information, please check the installation page:
+
+ <https://github.com/netdata/netdata/tree/master/installer#installation>
+
+ In detail:
+
+- netdata has alarms!
+
+ Based on the POLL we made on github
+ (<https://github.com/netdata/netdata/issues/436>),
+ health monitoring was the winner. So here it is!
+
+ netdata now has a powerful health monitoring system embedded.
+ Please check the wiki page:
+
+ <https://github.com/netdata/netdata/tree/master/health>
+
+- netdata has badges!
+
+ netdata can generate badges with live information from the
+ collected metrics.
+ Please check the wiki page:
+
+ <https://github.com/netdata/netdata/tree/master/web/api/badges>
+
+- netdata plugins are now written in Python!
+
+ Thanks to the great work of Paweł Krupa (@paulfantom), most BASH
+ plugins have been ported to Python.
+
+ The new python.d.plugin supports both python2 and python3 and
+ data collection from multiple sources for all modules.
+
+ The following pre-existing modules have been ported to Python:
+
+ - apache
+ - cpufreq
+ - example
+ - exim
+ - hddtemp
+ - mysql
+ - nginx
+ - phpfpm
+ - postfix
+ - sensors
+ - squid
+ - tomcat
+
+ The following new modules have been added:
+
+ - apache_cache
+ - dovecot
+ - ipfs
+ - memcached
+ - nginx_log
+ - redis
+
+- other data collectors:
+
+ - Thanks to @simonnagl netdata now reports disk space usage.
+
+- dashboards now transfer a certain settings from server to server
+ when changing servers via the my-netdata menu.
+
+ The settings transferred are the dashboard theme, the online
+ help status and current pan and zoom timeframe of the dashboard.
+
+- API improvements:
+
+ - reduction functions now support 'min', 'sum' and 'incremental-sum'.
+
+ - netdata now offers a multi-threaded and a single threaded
+ web server (single threaded is better for IoT).
+
+- apps.plugin improvements:
+
+ - can now run with command line argument 'without-files'
+ to prevent it from enumerating all the open files/sockets/pipes
+ of all running processes.
+
+ - apps.plugin now scales the collected values to match the
+ the total system usage.
+
+ - apps.plugin can now report guest CPU usage per process.
+
+ - repeating errors are now logged once per process.
+
+- netdata now runs with IDLE process priority (lower than nice 19)
+
+- netdata now instructs the kernel to kill it first when it starves
+ for memory.
+
+- netdata listens for signals:
+
+ - SIGHUP to netdata instructs it to re-open its log files
+ (new logrotate files added too).
+
+ - SIGUSR1 to netdata saves the database
+
+ - SIGUSR2 to netdata reloads health / alarms configuration
+
+- netdata can now bind to multiple IPs and ports.
+
+- netdata now has new systemd service file (it starts as user
+ netdata and does not fork).
+
+- Dozens of other improvements and bugfixes
+
+netdata (1.2.0) - 2016-05-16
+
+ At a glance:
+
+- netdata is now 30% faster
+- netdata now has a registry (my-netdata dashboard menu)
+- netdata now monitors Linux Containers (docker, lxc, etc)
+
+ IMPORTANT:
+ This version requires libuuid. The package you need is:
+
+- uuid-dev (debian/ubuntu), or
+- libuuid-devel (centos/fedora/redhat)
+
+ In detail:
+
+- netdata is now 30% faster !
+
+ - Patches submitted by @fredericopissarra improved overall
+ netdata performance by 10%.
+
+ - A new improved search function in the internal indexes
+ made all searches faster by 50%, resulting in about
+ 20% better performance for the core of netdata.
+
+ - More efficient threads locking in key components
+ contributed to the overall efficiency.
+
+- netdata now has a CENTRAL REGISTRY !
+
+ The central registry tracks all your netdata servers
+ and bookmarks them for you at the 'my-netdata' menu
+ on all dashboards.
+
+ Every netdata can act as a registry, but there is also
+ a global registry provided for free for all netdata users!
+
+- netdata now monitors CONTAINERS !
+
+ docker, lxc, or anything else. For each container it monitors
+ CPU, RAM, DISK I/O (network interfaces were already monitored)
+
+- apps.plugin: now uses linux capabilities by default
+ without setuid to root
+
+- netdata has now an improved signal handler
+ thanks to @simonnagl
+
+- API: new improved CORS support
+
+- SNMP: counter64 support fixed
+
+- MYSQL: more charts, about QCache, MyISAM key cache,
+ InnoDB buffer pools, open files
+
+- DISK charts now show mount point when available
+
+- Dashboard: improved support for older web browsers
+ and mobile web browsers (thanks to @simonnagl)
+
+- Multi-server dashboards now allow de-coupled refreshes for
+ each chart, so that if one netdata has a network latency
+ the other charts are not affected
+
+- Several other minor improvements and bugfixes
+
+netdata (1.1.0) - 2016-04-20
+
+ Dozens of commits that improve netdata in several ways:
+
+- Data collection: added IPv6 monitoring
+- Data collection: added SYNPROXY DDoS protection monitoring
+- Data collection: apps.plugin: added charts for users and user groups
+- Data collection: apps.plugin: grouping of processes now support patterns
+- Data collection: apps.plugin: now it is faster, after the new features added
+- Data collection: better auto-detection of partitions for disk monitoring
+- Data collection: better fireqos integration for QoS monitoring
+- Data collection: squid monitoring now uses squidclient
+- Data collection: SNMP monitoring now supports 64bit counters
+- API: fixed issues in CSV output generation
+- API: netdata can now be restricted to listen on a specific IP
+- Core and apps.plugin: error log flood protection
+- Dashboard: better error handling when the netdata server is unreachable
+- Dashboard: each chart now has a toolbox
+- Dashboard: on-line help support
+- Dashboard: check for netdata updates button
+- Dashboard: added example /tv.html dashboard
+- Packaging: now compiles with musl libc (alpine linux)
+- Packaging: added debian packaging
+- Packaging: support non-root installations
+- Packaging: the installer generates uninstall script
+
+netdata (1.0.0) - 2016-03-22
+
+- first public release
+
+netdata (1.0.0-rc.1) - 2015-11-28
+
+- initial packaging
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..1351f44
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,684 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {one line to give the program's name and a brief idea of what it does.}
+ Copyright (C) {year} {name of author}
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ {project} Copyright (C) {year} {fullname}
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
+---------------------------------------------------------------------------
+
+Note:
+Individual files contain the following tag instead of the full license text.
+
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are here available: http://spdx.org/licenses/
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..1846d1e
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,1027 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = foreign subdir-objects 1.11
+ACLOCAL_AMFLAGS = -I build/m4
+
+MAINTAINERCLEANFILES = \
+ config.log config.status \
+ $(srcdir)/Makefile.in \
+ $(srcdir)/config.h.in $(srcdir)/config.h.in~ $(srcdir)/configure \
+ $(srcdir)/install-sh $(srcdir)/ltmain.sh $(srcdir)/missing \
+ $(srcdir)/compile $(srcdir)/depcomp $(srcdir)/aclocal.m4 \
+ $(srcdir)/config.guess $(srcdir)/config.sub \
+ $(srcdir)/m4/ltsugar.m4 $(srcdir)/m4/libtool.m4 \
+ $(srcdir)/m4/ltversion.m4 $(srcdir)/m4/lt~obsolete.m4 \
+ $(srcdir)/m4/ltoptions.m4 \
+ $(srcdir)/pkcs11-helper.spec $(srcdir)/config-w32-vc.h
+
+CLEANFILES = \
+ $(srcdir)/$(distdir).tar.gz
+
+EXTRA_DIST = \
+ .gitignore \
+ .csslintrc \
+ .eslintignore \
+ .eslintrc \
+ .github/CODEOWNERS \
+ build/m4/jemalloc.m4 \
+ build/m4/ax_c___atomic.m4 \
+ build/m4/ax_check_enable_debug.m4 \
+ build/m4/ax_c_mallinfo.m4 \
+ build/m4/ax_gcc_func_attribute.m4 \
+ build/m4/ax_check_compile_flag.m4 \
+ build/m4/ax_c_statement_expressions.m4 \
+ build/m4/ax_pthread.m4 \
+ build/m4/ax_c_lto.m4 \
+ build/m4/ax_c_mallopt.m4 \
+ build/m4/tcmalloc.m4 \
+ build/m4/ax_c__generic.m4 \
+ README.md \
+ CONTRIBUTORS.md \
+ CODE_OF_CONDUCT.md \
+ LICENSE \
+ REDISTRIBUTED.md \
+ CONTRIBUTING.md \
+ $(NULL)
+
+SUBDIRS = \
+ diagrams \
+ system \
+ tests \
+ $(NULL)
+
+dist_noinst_DATA = \
+ CHANGELOG.md \
+ cppcheck.sh \
+ configs.signatures \
+ contrib \
+ netdata.cppcheck \
+ netdata.spec \
+ package.json \
+ docs \
+ packaging/version \
+ packaging/dashboard.version \
+ packaging/dashboard.checksums \
+ packaging/go.d.version \
+ packaging/go.d.checksums \
+ packaging/jsonc.version \
+ packaging/jsonc.checksums \
+ packaging/judy.version \
+ packaging/judy.checksums \
+ packaging/libwebsockets.version \
+ packaging/libwebsockets.checksums \
+ packaging/mosquitto.version \
+ packaging/mosquitto.checksums \
+ packaging/bundle-dashboard.sh \
+ packaging/bundle-ebpf.sh \
+ packaging/bundle-judy.sh \
+ packaging/bundle-libbpf.sh \
+ packaging/bundle-mosquitto.sh \
+ packaging/check-kernel-config.sh \
+ packaging/libbpf.checksums \
+ packaging/libbpf.version \
+ packaging/ebpf.checksums \
+ packaging/ebpf.version \
+ packaging/bundle-lws.sh \
+ packaging/installer/README.md \
+ packaging/installer/UNINSTALL.md \
+ packaging/installer/UPDATE.md \
+ $(NULL)
+
+# until integrated within build
+# should be proper init.d/openrc/systemd usable
+dist_noinst_SCRIPTS = \
+ coverity-scan.sh \
+ packaging/installer/netdata-updater.sh \
+ packaging/installer/netdata-uninstaller.sh \
+ packaging/installer/kickstart.sh \
+ packaging/installer/kickstart-static64.sh \
+ packaging/installer/functions.sh \
+ netdata-installer.sh
+ $(NULL)
+
+# -----------------------------------------------------------------------------
+# Compile netdata binaries
+
+SUBDIRS += \
+ backends \
+ collectors \
+ daemon \
+ database \
+ exporting \
+ health \
+ libnetdata \
+ registry \
+ streaming \
+ web \
+ claim \
+ parser \
+ aclk/legacy \
+ spawn \
+ $(NULL)
+
+
+AM_CFLAGS = \
+ $(OPTIONAL_MATH_CFLAGS) \
+ $(OPTIONAL_NFACCT_CFLAGS) \
+ $(OPTIONAL_ZLIB_CFLAGS) \
+ $(OPTIONAL_UUID_CFLAGS) \
+ $(OPTIONAL_MQTT_CFLAGS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(OPTIONAL_IPMIMONITORING_CFLAGS) \
+ $(OPTIONAL_CUPS_CFLAGS) \
+ $(OPTIONAL_XENSTAT_CFLAGS) \
+ $(OPTIONAL_BPF_CFLAGS) \
+ $(NULL)
+
+sbin_PROGRAMS =
+plugins_PROGRAMS =
+
+LIBNETDATA_FILES = \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c \
+ libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c \
+ libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c \
+ libnetdata/buffer/buffer.h \
+ libnetdata/circular_buffer/circular_buffer.c \
+ libnetdata/circular_buffer/circular_buffer.h \
+ libnetdata/clocks/clocks.c \
+ libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h \
+ libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h \
+ libnetdata/inlined.h \
+ libnetdata/libnetdata.c \
+ libnetdata/libnetdata.h \
+ libnetdata/required_dummies.h \
+ libnetdata/locks/locks.c \
+ libnetdata/locks/locks.h \
+ libnetdata/log/log.c \
+ libnetdata/log/log.h \
+ libnetdata/popen/popen.c \
+ libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c \
+ libnetdata/procfile/procfile.h \
+ libnetdata/os.c \
+ libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c \
+ libnetdata/socket/socket.h \
+ libnetdata/socket/security.c \
+ libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c \
+ libnetdata/threads/threads.h \
+ libnetdata/url/url.c \
+ libnetdata/url/url.h \
+ libnetdata/json/json.c \
+ libnetdata/json/json.h \
+ libnetdata/json/jsmn.c \
+ libnetdata/json/jsmn.h \
+ libnetdata/health/health.c \
+ libnetdata/health/health.h \
+ libnetdata/string/utf8.h \
+ $(NULL)
+
+if ENABLE_PLUGIN_EBPF
+ LIBNETDATA_FILES += \
+ libnetdata/ebpf/ebpf.c \
+ libnetdata/ebpf/ebpf.h \
+ $(NULL)
+endif
+
+APPS_PLUGIN_FILES = \
+ collectors/apps.plugin/apps_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+CHECKS_PLUGIN_FILES = \
+ collectors/checks.plugin/plugin_checks.c \
+ collectors/checks.plugin/plugin_checks.h \
+ $(NULL)
+
+FREEBSD_PLUGIN_FILES = \
+ collectors/freebsd.plugin/plugin_freebsd.c \
+ collectors/freebsd.plugin/plugin_freebsd.h \
+ collectors/freebsd.plugin/freebsd_sysctl.c \
+ collectors/freebsd.plugin/freebsd_getmntinfo.c \
+ collectors/freebsd.plugin/freebsd_getifaddrs.c \
+ collectors/freebsd.plugin/freebsd_devstat.c \
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c \
+ collectors/freebsd.plugin/freebsd_ipfw.c \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ $(NULL)
+
+HEALTH_PLUGIN_FILES = \
+ health/health.c \
+ health/health.h \
+ health/health_config.c \
+ health/health_json.c \
+ health/health_log.c \
+ $(NULL)
+
+IDLEJITTER_PLUGIN_FILES = \
+ collectors/idlejitter.plugin/plugin_idlejitter.c \
+ collectors/idlejitter.plugin/plugin_idlejitter.h \
+ $(NULL)
+
+CGROUPS_PLUGIN_FILES = \
+ collectors/cgroups.plugin/sys_fs_cgroup.c \
+ collectors/cgroups.plugin/sys_fs_cgroup.h \
+ $(NULL)
+
+CGROUP_NETWORK_FILES = \
+ collectors/cgroups.plugin/cgroup-network.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+DISKSPACE_PLUGIN_FILES = \
+ collectors/diskspace.plugin/plugin_diskspace.h \
+ collectors/diskspace.plugin/plugin_diskspace.c \
+ $(NULL)
+
+FREEIPMI_PLUGIN_FILES = \
+ collectors/freeipmi.plugin/freeipmi_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+CUPS_PLUGIN_FILES = \
+ collectors/cups.plugin/cups_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+NFACCT_PLUGIN_FILES = \
+ collectors/nfacct.plugin/plugin_nfacct.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+SLABINFO_PLUGIN_FILES = \
+ collectors/slabinfo.plugin/slabinfo.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+XENSTAT_PLUGIN_FILES = \
+ collectors/xenstat.plugin/xenstat_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+PERF_PLUGIN_FILES = \
+ collectors/perf.plugin/perf_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+EBPF_PLUGIN_FILES = \
+ collectors/ebpf.plugin/ebpf.c \
+ collectors/ebpf.plugin/ebpf_process.c \
+ collectors/ebpf.plugin/ebpf_process.h \
+ collectors/ebpf.plugin/ebpf_socket.c \
+ collectors/ebpf.plugin/ebpf_socket.h \
+ collectors/ebpf.plugin/ebpf.h \
+ collectors/ebpf.plugin/ebpf_apps.c \
+ collectors/ebpf.plugin/ebpf_apps.h \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+PROC_PLUGIN_FILES = \
+ collectors/proc.plugin/ipc.c \
+ collectors/proc.plugin/plugin_proc.c \
+ collectors/proc.plugin/plugin_proc.h \
+ collectors/proc.plugin/proc_diskstats.c \
+ collectors/proc.plugin/proc_mdstat.c \
+ collectors/proc.plugin/proc_interrupts.c \
+ collectors/proc.plugin/proc_softirqs.c \
+ collectors/proc.plugin/proc_loadavg.c \
+ collectors/proc.plugin/proc_meminfo.c \
+ collectors/proc.plugin/proc_pagetypeinfo.c \
+ collectors/proc.plugin/proc_pressure.c \
+ collectors/proc.plugin/proc_pressure.h \
+ collectors/proc.plugin/proc_net_dev.c \
+ collectors/proc.plugin/proc_net_wireless.c \
+ collectors/proc.plugin/proc_net_ip_vs_stats.c \
+ collectors/proc.plugin/proc_net_netstat.c \
+ collectors/proc.plugin/proc_net_rpc_nfs.c \
+ collectors/proc.plugin/proc_net_rpc_nfsd.c \
+ collectors/proc.plugin/proc_net_snmp.c \
+ collectors/proc.plugin/proc_net_snmp6.c \
+ collectors/proc.plugin/proc_net_sctp_snmp.c \
+ collectors/proc.plugin/proc_net_sockstat.c \
+ collectors/proc.plugin/proc_net_sockstat6.c \
+ collectors/proc.plugin/proc_net_softnet_stat.c \
+ collectors/proc.plugin/proc_net_stat_conntrack.c \
+ collectors/proc.plugin/proc_net_stat_synproxy.c \
+ collectors/proc.plugin/proc_self_mountinfo.c \
+ collectors/proc.plugin/proc_self_mountinfo.h \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ collectors/proc.plugin/proc_spl_kstat_zfs.c \
+ collectors/proc.plugin/proc_stat.c \
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \
+ collectors/proc.plugin/proc_vmstat.c \
+ collectors/proc.plugin/proc_uptime.c \
+ collectors/proc.plugin/sys_kernel_mm_ksm.c \
+ collectors/proc.plugin/sys_block_zram.c \
+ collectors/proc.plugin/sys_devices_system_edac_mc.c \
+ collectors/proc.plugin/sys_devices_system_node.c \
+ collectors/proc.plugin/sys_fs_btrfs.c \
+ collectors/proc.plugin/sys_class_power_supply.c \
+ collectors/proc.plugin/sys_class_infiniband.c \
+ $(NULL)
+
+TC_PLUGIN_FILES = \
+ collectors/tc.plugin/plugin_tc.c \
+ collectors/tc.plugin/plugin_tc.h \
+ $(NULL)
+
+MACOS_PLUGIN_FILES = \
+ collectors/macos.plugin/plugin_macos.c \
+ collectors/macos.plugin/plugin_macos.h \
+ collectors/macos.plugin/macos_sysctl.c \
+ collectors/macos.plugin/macos_mach_smi.c \
+ collectors/macos.plugin/macos_fw.c \
+ $(NULL)
+
+PLUGINSD_PLUGIN_FILES = \
+ collectors/plugins.d/plugins_d.c \
+ collectors/plugins.d/plugins_d.h \
+ collectors/plugins.d/pluginsd_parser.c \
+ collectors/plugins.d/pluginsd_parser.h \
+ $(NULL)
+
+RRD_PLUGIN_FILES = \
+ database/rrdcalc.c \
+ database/rrdcalc.h \
+ database/rrdcalctemplate.c \
+ database/rrdcalctemplate.h \
+ database/rrddim.c \
+ database/rrddimvar.c \
+ database/rrddimvar.h \
+ database/rrdfamily.c \
+ database/rrdhost.c \
+ database/rrdlabels.c \
+ database/rrd.c \
+ database/rrd.h \
+ database/rrdset.c \
+ database/rrdsetvar.c \
+ database/rrdsetvar.h \
+ database/rrdvar.c \
+ database/rrdvar.h \
+ $(NULL)
+
+if ENABLE_DBENGINE
+ RRD_PLUGIN_FILES += \
+ database/sqlite/sqlite_functions.c \
+ database/sqlite/sqlite_functions.h \
+ database/sqlite/sqlite3.c \
+ database/sqlite/sqlite3.h \
+ database/engine/rrdengine.c \
+ database/engine/rrdengine.h \
+ database/engine/rrddiskprotocol.h \
+ database/engine/datafile.c \
+ database/engine/datafile.h \
+ database/engine/journalfile.c \
+ database/engine/journalfile.h \
+ database/engine/rrdenginelib.c \
+ database/engine/rrdenginelib.h \
+ database/engine/rrdengineapi.c \
+ database/engine/rrdengineapi.h \
+ database/engine/pagecache.c \
+ database/engine/pagecache.h \
+ database/engine/rrdenglocking.c \
+ database/engine/rrdenglocking.h \
+ database/engine/metadata_log/metadatalog.h \
+ database/engine/metadata_log/metadatalogapi.c \
+ database/engine/metadata_log/metadatalogapi.h \
+ database/engine/metadata_log/logfile.h \
+ database/engine/metadata_log/logfile.c \
+ database/engine/metadata_log/metadatalogprotocol.h \
+ database/engine/metadata_log/metalogpluginsd.c \
+ database/engine/metadata_log/metalogpluginsd.h \
+ database/engine/metadata_log/compaction.c \
+ database/engine/metadata_log/compaction.h \
+ $(NULL)
+endif
+
+API_PLUGIN_FILES = \
+ web/api/badges/web_buffer_svg.c \
+ web/api/badges/web_buffer_svg.h \
+ web/api/exporters/allmetrics.c \
+ web/api/exporters/allmetrics.h \
+ web/api/exporters/shell/allmetrics_shell.c \
+ web/api/exporters/shell/allmetrics_shell.h \
+ web/api/queries/average/average.c \
+ web/api/queries/average/average.h \
+ web/api/queries/des/des.c \
+ web/api/queries/des/des.h \
+ web/api/queries/incremental_sum/incremental_sum.c \
+ web/api/queries/incremental_sum/incremental_sum.h \
+ web/api/queries/max/max.c \
+ web/api/queries/max/max.h \
+ web/api/queries/median/median.c \
+ web/api/queries/median/median.h \
+ web/api/queries/min/min.c \
+ web/api/queries/min/min.h \
+ web/api/queries/query.c \
+ web/api/queries/query.h \
+ web/api/queries/rrdr.c \
+ web/api/queries/rrdr.h \
+ web/api/queries/ses/ses.c \
+ web/api/queries/ses/ses.h \
+ web/api/queries/stddev/stddev.c \
+ web/api/queries/stddev/stddev.h \
+ web/api/queries/sum/sum.c \
+ web/api/queries/sum/sum.h \
+ web/api/formatters/rrd2json.c \
+ web/api/formatters/rrd2json.h \
+ web/api/formatters/csv/csv.c \
+ web/api/formatters/csv/csv.h \
+ web/api/formatters/json/json.c \
+ web/api/formatters/json/json.h \
+ web/api/formatters/ssv/ssv.c \
+ web/api/formatters/ssv/ssv.h \
+ web/api/formatters/value/value.c \
+ web/api/formatters/value/value.h \
+ web/api/formatters/json_wrapper.c \
+ web/api/formatters/json_wrapper.h \
+ web/api/formatters/charts2json.c \
+ web/api/formatters/charts2json.h \
+ web/api/formatters/rrdset2json.c \
+ web/api/formatters/rrdset2json.h \
+ web/api/health/health_cmdapi.c \
+ web/api/health/health_cmdapi.h \
+ web/api/web_api_v1.c \
+ web/api/web_api_v1.h \
+ $(NULL)
+
+STREAMING_PLUGIN_FILES = \
+ streaming/rrdpush.c \
+ streaming/sender.c \
+ streaming/receiver.c \
+ streaming/rrdpush.h \
+ $(NULL)
+
+REGISTRY_PLUGIN_FILES = \
+ registry/registry.c \
+ registry/registry.h \
+ registry/registry_db.c \
+ registry/registry_init.c \
+ registry/registry_internals.c \
+ registry/registry_internals.h \
+ registry/registry_log.c \
+ registry/registry_machine.c \
+ registry/registry_machine.h \
+ registry/registry_person.c \
+ registry/registry_person.h \
+ registry/registry_url.c \
+ registry/registry_url.h \
+ $(NULL)
+
+STATSD_PLUGIN_FILES = \
+ collectors/statsd.plugin/statsd.c \
+ collectors/statsd.plugin/statsd.h \
+ $(NULL)
+
+WEB_PLUGIN_FILES = \
+ web/server/web_client.c \
+ web/server/web_client.h \
+ web/server/web_server.c \
+ web/server/web_server.h \
+ web/server/web_client_cache.c \
+ web/server/web_client_cache.h \
+ web/server/static/static-threaded.c \
+ web/server/static/static-threaded.h \
+ $(NULL)
+
+BACKENDS_PLUGIN_FILES = \
+ backends/backends.c \
+ backends/backends.h \
+ backends/graphite/graphite.c \
+ backends/graphite/graphite.h \
+ backends/json/json.c \
+ backends/json/json.h \
+ backends/opentsdb/opentsdb.c \
+ backends/opentsdb/opentsdb.h \
+ backends/prometheus/backend_prometheus.c \
+ backends/prometheus/backend_prometheus.h \
+ $(NULL)
+
+CLAIM_FILES = \
+ claim/claim.c \
+ claim/claim.h \
+ $(NULL)
+
+PARSER_FILES = \
+ parser/parser.c \
+ parser/parser.h \
+ $(NULL)
+
+ACLK_FILES = \
+ aclk/legacy/aclk_rrdhost_state.h \
+ aclk/legacy/aclk_common.c \
+ aclk/legacy/aclk_common.h \
+ aclk/legacy/aclk_stats.c \
+ aclk/legacy/aclk_stats.h \
+ $(NULL)
+
+if ENABLE_ACLK
+ACLK_FILES += \
+ aclk/legacy/agent_cloud_link.c \
+ aclk/legacy/agent_cloud_link.h \
+ aclk/legacy/aclk_query.c \
+ aclk/legacy/aclk_query.h \
+ aclk/legacy/mqtt.c \
+ aclk/legacy/mqtt.h \
+ aclk/legacy/aclk_rx_msgs.c \
+ aclk/legacy/aclk_rx_msgs.h \
+ aclk/legacy/aclk_lws_wss_client.c \
+ aclk/legacy/aclk_lws_wss_client.h \
+ aclk/legacy/aclk_lws_https_client.c \
+ aclk/legacy/aclk_lws_https_client.h \
+ $(NULL)
+endif
+
+
+
+SPAWN_PLUGIN_FILES = \
+ spawn/spawn.c \
+ spawn/spawn_server.c \
+ spawn/spawn_client.c \
+ spawn/spawn.h \
+ $(NULL)
+
+EXPORTING_ENGINE_FILES = \
+ exporting/exporting_engine.c \
+ exporting/exporting_engine.h \
+ exporting/graphite/graphite.c \
+ exporting/graphite/graphite.h \
+ exporting/json/json.c \
+ exporting/json/json.h \
+ exporting/opentsdb/opentsdb.c \
+ exporting/opentsdb/opentsdb.h \
+ exporting/prometheus/prometheus.c \
+ exporting/prometheus/prometheus.h \
+ exporting/read_config.c \
+ exporting/clean_connectors.c \
+ exporting/init_connectors.c \
+ exporting/process_data.c \
+ exporting/check_filters.c \
+ exporting/send_data.c \
+ exporting/send_internal_metrics.c \
+ $(NULL)
+
+PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES = \
+ exporting/prometheus/remote_write/remote_write.c \
+ exporting/prometheus/remote_write/remote_write.h \
+ exporting/prometheus/remote_write/remote_write_request.cc \
+ exporting/prometheus/remote_write/remote_write_request.h \
+ exporting/prometheus/remote_write/remote_write.proto \
+ $(NULL)
+
+KINESIS_EXPORTING_FILES = \
+ exporting/aws_kinesis/aws_kinesis.c \
+ exporting/aws_kinesis/aws_kinesis.h \
+ exporting/aws_kinesis/aws_kinesis_put_record.cc \
+ exporting/aws_kinesis/aws_kinesis_put_record.h \
+ $(NULL)
+
+PUBSUB_EXPORTING_FILES = \
+ exporting/pubsub/pubsub.c \
+ exporting/pubsub/pubsub.h \
+ exporting/pubsub/pubsub_publish.cc \
+ exporting/pubsub/pubsub_publish.h \
+ $(NULL)
+
+MONGODB_EXPORTING_FILES = \
+ exporting/mongodb/mongodb.c \
+ exporting/mongodb/mongodb.h \
+ $(NULL)
+
+KINESIS_BACKEND_FILES = \
+ backends/aws_kinesis/aws_kinesis.c \
+ backends/aws_kinesis/aws_kinesis.h \
+ backends/aws_kinesis/aws_kinesis_put_record.cc \
+ backends/aws_kinesis/aws_kinesis_put_record.h \
+ $(NULL)
+
+PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
+ backends/prometheus/remote_write/remote_write.cc \
+ backends/prometheus/remote_write/remote_write.h \
+ $(NULL)
+
+MONGODB_BACKEND_FILES = \
+ backends/mongodb/mongodb.c \
+ backends/mongodb/mongodb.h \
+ $(NULL)
+
+DAEMON_FILES = \
+ daemon/buildinfo.c \
+ daemon/buildinfo.h \
+ daemon/common.c \
+ daemon/common.h \
+ daemon/daemon.c \
+ daemon/daemon.h \
+ daemon/global_statistics.c \
+ daemon/global_statistics.h \
+ daemon/main.c \
+ daemon/main.h \
+ daemon/signals.c \
+ daemon/signals.h \
+ daemon/commands.c \
+ daemon/commands.h \
+ daemon/unit_test.c \
+ daemon/unit_test.h \
+ $(NULL)
+
+NETDATA_FILES = \
+ collectors/all.h \
+ $(DAEMON_FILES) \
+ $(LIBNETDATA_FILES) \
+ $(API_PLUGIN_FILES) \
+ $(BACKENDS_PLUGIN_FILES) \
+ $(EXPORTING_ENGINE_FILES) \
+ $(CHECKS_PLUGIN_FILES) \
+ $(HEALTH_PLUGIN_FILES) \
+ $(IDLEJITTER_PLUGIN_FILES) \
+ $(PLUGINSD_PLUGIN_FILES) \
+ $(REGISTRY_PLUGIN_FILES) \
+ $(RRD_PLUGIN_FILES) \
+ $(STREAMING_PLUGIN_FILES) \
+ $(STATSD_PLUGIN_FILES) \
+ $(WEB_PLUGIN_FILES) \
+ $(CLAIM_FILES) \
+ $(PARSER_FILES) \
+ $(ACLK_FILES) \
+ $(SPAWN_PLUGIN_FILES) \
+ $(NULL)
+
+if FREEBSD
+ NETDATA_FILES += \
+ $(FREEBSD_PLUGIN_FILES) \
+ $(NULL)
+endif
+
+if MACOS
+ NETDATA_FILES += \
+ $(MACOS_PLUGIN_FILES) \
+ $(NULL)
+endif
+
+if LINUX
+ NETDATA_FILES += \
+ $(CGROUPS_PLUGIN_FILES) \
+ $(DISKSPACE_PLUGIN_FILES) \
+ $(PROC_PLUGIN_FILES) \
+ $(TC_PLUGIN_FILES) \
+ $(NULL)
+
+endif
+
+NETDATA_COMMON_LIBS = \
+ $(OPTIONAL_MATH_LIBS) \
+ $(OPTIONAL_BPF_LIBS) \
+ $(OPTIONAL_ZLIB_LIBS) \
+ $(OPTIONAL_SSL_LIBS) \
+ $(OPTIONAL_UUID_LIBS) \
+ $(OPTIONAL_MQTT_LIBS) \
+ $(OPTIONAL_UV_LIBS) \
+ $(OPTIONAL_LZ4_LIBS) \
+ $(OPTIONAL_JUDY_LIBS) \
+ $(OPTIONAL_SSL_LIBS) \
+ $(OPTIONAL_JSONC_LIBS) \
+ $(NULL)
+
+if LINK_STATIC_JSONC
+ NETDATA_COMMON_LIBS += externaldeps/jsonc/libjson-c.a
+endif
+
+NETDATACLI_FILES = \
+ daemon/commands.h \
+ $(LIBNETDATA_FILES) \
+ cli/cli.c \
+ cli/cli.h \
+ $(NULL)
+
+sbin_PROGRAMS += netdata
+netdata_SOURCES = $(NETDATA_FILES)
+
+if ENABLE_ACLK
+netdata_LDADD = \
+ externaldeps/mosquitto/libmosquitto.a \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(OPTIONAL_LWS_LIBS) \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+else
+netdata_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_CXX_LINKER
+ netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
+else
+ netdata_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
+endif
+
+sbin_PROGRAMS += netdatacli
+netdatacli_SOURCES = $(NETDATACLI_FILES)
+netdatacli_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+if ENABLE_CXX_LINKER
+ netdatacli_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
+else
+ netdatacli_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
+endif
+
+if ENABLE_PLUGIN_APPS
+ plugins_PROGRAMS += apps.plugin
+ apps_plugin_SOURCES = $(APPS_PLUGIN_FILES)
+ apps_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_CGROUP_NETWORK
+ plugins_PROGRAMS += cgroup-network
+ cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES)
+ cgroup_network_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_FREEIPMI
+ plugins_PROGRAMS += freeipmi.plugin
+ freeipmi_plugin_SOURCES = $(FREEIPMI_PLUGIN_FILES)
+ freeipmi_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_IPMIMONITORING_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_EBPF
+ plugins_PROGRAMS += ebpf.plugin
+ ebpf_plugin_SOURCES = $(EBPF_PLUGIN_FILES)
+ ebpf_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_CUPS
+ plugins_PROGRAMS += cups.plugin
+ cups_plugin_SOURCES = $(CUPS_PLUGIN_FILES)
+ cups_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_CUPS_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_NFACCT
+ plugins_PROGRAMS += nfacct.plugin
+ nfacct_plugin_SOURCES = $(NFACCT_PLUGIN_FILES)
+ nfacct_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_NFACCT_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_XENSTAT
+ plugins_PROGRAMS += xenstat.plugin
+ xenstat_plugin_SOURCES = $(XENSTAT_PLUGIN_FILES)
+ xenstat_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_XENSTAT_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_PERF
+ plugins_PROGRAMS += perf.plugin
+ perf_plugin_SOURCES = $(PERF_PLUGIN_FILES)
+ perf_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_SLABINFO
+ plugins_PROGRAMS += slabinfo.plugin
+ slabinfo_plugin_SOURCES = $(SLABINFO_PLUGIN_FILES)
+ slabinfo_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_BACKEND_KINESIS
+ netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
+ netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
+endif
+
+if ENABLE_EXPORTING_PUBSUB
+ netdata_SOURCES += $(PUBSUB_EXPORTING_FILES)
+ netdata_LDADD += $(OPTIONAL_PUBSUB_LIBS)
+endif
+
+if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
+ netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
+ netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
+ BUILT_SOURCES = \
+ exporting/prometheus/remote_write/remote_write.pb.cc \
+ exporting/prometheus/remote_write/remote_write.pb.h \
+ $(NULL)
+ nodist_netdata_SOURCES = $(BUILT_SOURCES)
+
+exporting/prometheus/remote_write/remote_write.pb.cc \
+exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote_write/remote_write.proto
+ $(PROTOC) --proto_path=$(srcdir) --cpp_out=$(builddir) $^
+
+endif
+
+if ENABLE_BACKEND_MONGODB
+ netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
+ netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
+endif
+
+if ENABLE_UNITTESTS
+ check_PROGRAMS = \
+ libnetdata/tests/str2ld_testdriver \
+ libnetdata/storage_number/tests/storage_number_testdriver \
+ exporting/tests/exporting_engine_testdriver \
+ web/api/tests/web_api_testdriver \
+ web/api/tests/valid_urls_testdriver \
+ collectors/cgroups_plugin/tests/cgroups_testdriver \
+ $(NULL)
+
+ TESTS = $(check_PROGRAMS)
+
+ web_api_tests_valid_urls_testdriver_LDFLAGS = \
+ -Wl,--wrap=rrdhost_find_by_hostname \
+ -Wl,--wrap=finished_web_request_statistics \
+ -Wl,--wrap=config_get \
+ -Wl,--wrap=web_client_api_request_v1 \
+ -Wl,--wrap=rrdhost_find_by_guid \
+ -Wl,--wrap=rrdset_find_byname \
+ -Wl,--wrap=rrdset_find \
+ -Wl,--wrap=rrdpush_receiver_thread_spawn \
+ -Wl,--wrap=debug_int \
+ -Wl,--wrap=error_int \
+ -Wl,--wrap=info_int \
+ -Wl,--wrap=fatal_int \
+ -Wl,--wrap=mysendfile \
+ -DREMOVE_MYSENDFILE \
+ $(TEST_LDFLAGS) \
+ $(NULL)
+ web_api_tests_valid_urls_testdriver_SOURCES = \
+ web/api/tests/valid_urls.c \
+ web/server/web_client.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ web_api_tests_valid_urls_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+
+ web_api_tests_web_api_testdriver_LDFLAGS = \
+ -Wl,--wrap=rrdhost_find_by_hostname \
+ -Wl,--wrap=finished_web_request_statistics \
+ -Wl,--wrap=config_get \
+ -Wl,--wrap=web_client_api_request_v1 \
+ -Wl,--wrap=rrdhost_find_by_guid \
+ -Wl,--wrap=rrdset_find_byname \
+ -Wl,--wrap=rrdset_find \
+ -Wl,--wrap=rrdpush_receiver_thread_spawn \
+ -Wl,--wrap=debug_int \
+ -Wl,--wrap=error_int \
+ -Wl,--wrap=info_int \
+ -Wl,--wrap=fatal_int \
+ $(TEST_LDFLAGS) \
+ $(NULL)
+ web_api_tests_web_api_testdriver_SOURCES = \
+ web/api/tests/web_api.c \
+ web/server/web_client.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ web_api_tests_web_api_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+
+ libnetdata_tests_str2ld_testdriver_SOURCES = \
+ libnetdata/tests/test_str2ld.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ libnetdata_tests_str2ld_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+
+ libnetdata_storage_number_tests_storage_number_testdriver_SOURCES = \
+ libnetdata/storage_number/tests/test_storage_number.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ libnetdata_storage_number_tests_storage_number_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+
+ EXPORTING_ENGINE_TEST_FILES = \
+ exporting/tests/test_exporting_engine.c \
+ exporting/tests/test_exporting_engine.h \
+ exporting/tests/exporting_fixtures.c \
+ exporting/tests/exporting_doubles.c \
+ exporting/tests/netdata_doubles.c \
+ exporting/tests/system_doubles.c \
+ $(NULL)
+ exporting_tests_exporting_engine_testdriver_SOURCES = \
+ $(EXPORTING_ENGINE_TEST_FILES) \
+ $(EXPORTING_ENGINE_FILES) \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ exporting_tests_exporting_engine_testdriver_CFLAGS = \
+ $(AM_CFLAGS) \
+ -DUNIT_TESTING \
+ $(NULL)
+ exporting_tests_exporting_engine_testdriver_LDFLAGS = \
+ -Wl,--wrap=read_exporting_config \
+ -Wl,--wrap=init_connectors \
+ -Wl,--wrap=mark_scheduled_instances \
+ -Wl,--wrap=rrdhost_is_exportable \
+ -Wl,--wrap=rrdset_is_exportable \
+ -Wl,--wrap=exporting_calculate_value_from_stored_data \
+ -Wl,--wrap=prepare_buffers \
+ -Wl,--wrap=send_internal_metrics \
+ -Wl,--wrap=now_realtime_sec \
+ -Wl,--wrap=uv_thread_set_name_np \
+ -Wl,--wrap=uv_thread_create \
+ -Wl,--wrap=uv_mutex_lock \
+ -Wl,--wrap=uv_mutex_unlock \
+ -Wl,--wrap=uv_cond_signal \
+ -Wl,--wrap=uv_cond_wait \
+ -Wl,--wrap=strdupz \
+ -Wl,--wrap=info_int \
+ -Wl,--wrap=recv \
+ -Wl,--wrap=send \
+ -Wl,--wrap=connect_to_one_of \
+ -Wl,--wrap=create_main_rusage_chart \
+ -Wl,--wrap=send_main_rusage \
+ -Wl,--wrap=simple_connector_end_batch \
+ $(TEST_LDFLAGS) \
+ $(NULL)
+ exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
+ exporting_tests_exporting_engine_testdriver_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
+ exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
+ exporting_tests_exporting_engine_testdriver_LDFLAGS += \
+ -Wl,--wrap=init_write_request \
+ -Wl,--wrap=add_host_info \
+ -Wl,--wrap=add_label \
+ -Wl,--wrap=add_metric \
+ $(NULL)
+ nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(BUILT_SOURCES)
+endif
+if ENABLE_BACKEND_KINESIS
+ exporting_tests_exporting_engine_testdriver_SOURCES += $(KINESIS_EXPORTING_FILES)
+ exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_KINESIS_LIBS)
+ exporting_tests_exporting_engine_testdriver_LDFLAGS += \
+ -Wl,--wrap=aws_sdk_init \
+ -Wl,--wrap=kinesis_init \
+ -Wl,--wrap=kinesis_put_record \
+ -Wl,--wrap=kinesis_get_result \
+ $(NULL)
+endif
+if ENABLE_EXPORTING_PUBSUB
+ exporting_tests_exporting_engine_testdriver_SOURCES += $(PUBSUB_EXPORTING_FILES)
+ exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_PUBSUB_LIBS)
+ exporting_tests_exporting_engine_testdriver_LDFLAGS += \
+ -Wl,--wrap=pubsub_init \
+ -Wl,--wrap=pubsub_add_message \
+ -Wl,--wrap=pubsub_publish \
+ -Wl,--wrap=pubsub_get_result \
+ $(NULL)
+endif
+if ENABLE_BACKEND_MONGODB
+ exporting_tests_exporting_engine_testdriver_SOURCES += $(MONGODB_EXPORTING_FILES)
+ exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_MONGOC_LIBS)
+ exporting_tests_exporting_engine_testdriver_LDFLAGS += \
+ -Wl,--wrap=mongoc_init \
+ -Wl,--wrap=mongoc_uri_new_with_error \
+ -Wl,--wrap=mongoc_uri_get_option_as_int32 \
+ -Wl,--wrap=mongoc_uri_set_option_as_int32 \
+ -Wl,--wrap=mongoc_client_new_from_uri \
+ -Wl,--wrap=mongoc_client_set_appname \
+ -Wl,--wrap=mongoc_client_get_collection \
+ -Wl,--wrap=mongoc_uri_destroy \
+ -Wl,--wrap=mongoc_collection_insert_many \
+ $(NULL)
+endif
+
+ collectors_cgroups_plugin_tests_cgroups_testdriver_SOURCES = \
+ collectors/cgroups.plugin/tests/test_cgroups_plugin.c \
+ collectors/cgroups.plugin/tests/test_cgroups_plugin.h \
+ collectors/cgroups.plugin/tests/test_doubles.c \
+ $(CGROUPS_PLUGIN_FILES) \
+ database/rrdlabels.c \
+ database/rrd.h \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+ collectors_cgroups_plugin_tests_cgroups_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
+ collectors_cgroups_plugin_tests_cgroups_testdriver_LDFLAGS = \
+ -Wl,--wrap=add_label_to_list \
+ $(NULL)
+
+endif
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..33c0616
--- /dev/null
+++ b/README.md
@@ -0,0 +1,229 @@
+<p align="center"><a href="https://netdata.cloud"><img src="https://user-images.githubusercontent.com/1153921/95268672-a3665100-07ec-11eb-8078-db619486d6ad.png" alt="Netdata" width="300" /></a></p>
+
+<h3 align="center">Netdata is high-fidelity infrastructure monitoring and troubleshooting.<br />Open-source, free, preconfigured, opinionated, and always real-time.</h3>
+<br />
+<p align="center">
+ <a href="https://github.com/netdata/netdata/releases/latest"><img src="https://img.shields.io/github/release/netdata/netdata.svg" alt="Latest release"></a>
+ <a href="https://travis-ci.com/netdata/netdata"><img src="https://travis-ci.com/netdata/netdata.svg?branch=master" alt="Build status"></a>
+ <a href="https://bestpractices.coreinfrastructure.org/projects/2231"><img src="https://bestpractices.coreinfrastructure.org/projects/2231/badge" alt="CII Best Practices"></a>
+ <a href="https://www.gnu.org/licenses/gpl-3.0"><img src="https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg" alt="License: GPL v3+"></a>
+ <a href="<>"><img src="https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3" alt="analytics"></a>
+ <br />
+ <a href="https://codeclimate.com/github/netdata/netdata"><img src="https://codeclimate.com/github/netdata/netdata/badges/gpa.svg" alt="Code Climate"></a>
+ <a href="https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade"><img src="https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498" alt="Codacy"></a>
+ <a href="https://lgtm.com/projects/g/netdata/netdata/context:cpp"><img src="https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm" alt="LGTM C"></a>
+ <a href="https://lgtm.com/projects/g/netdata/netdata/context:javascript"><img src="https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm" alt=""LGTM JS></a>
+ <a href="https://lgtm.com/projects/g/netdata/netdata/context:python"><img src="https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm" alt="LGTM PYTHON"></a>
+</p>
+
+<img src="https://user-images.githubusercontent.com/1153921/95269366-1b814680-07ee-11eb-8ff4-c1b0b8758499.png" alt="---" style="max-width: 100%;" />
+
+Netdata's **distributed, real-time monitoring Agent** collects thousands of metrics from systems, hardware, containers,
+and applications with zero configuration. It runs permanently on all your physical/virtual servers, containers, cloud
+deployments, and edge/IoT devices, and is perfectly safe to install on your systems mid-incident without any
+preparation.
+
+You can install Netdata on most Linux distributions (Ubuntu, Debian, CentOS, and more), container platforms (Kubernetes
+clusters, Docker), and many other operating systems (FreeBSD, macOS). No `sudo` required.
+
+Netdata is designed by system administrators, DevOps engineers, and developers to collect everything, help you visualize
+metrics, troubleshoot complex performance problems, and make data interoperable with the rest of your monitoring stack.
+
+People get addicted to Netdata. Once you use it on your systems, there's no going back! _You've been warned..._
+
+![Users who are addicted to
+Netdata](https://user-images.githubusercontent.com/1153921/96495792-2e881380-11fd-11eb-85a3-53d3a84dcb29.png)
+
+> **[Latest release](https://github.com/netdata/netdata/releases/latest): v1.29.0, February 2, 2021**
+>
+> The v1.29.0 release of the Netdata Agent is a maintenance release that brings incremental but necessary improvements
+> that make your monitoring experience more robust. We've pushed improvements and bug fixes to the installation and
+> update scripts, enriched our library of collectors, and focused on fixing bugs reported by the community.
+
+## Menu
+
+- [Features](#features)
+- [Get Netdata](#get-netdata)
+- [How it works](#how-it-works)
+- [Documentation](#documentation)
+- [Community](#community)
+- [Contribute](#contribute)
+- [License](#license)
+- [Is it any good?](#is-it-any-good)
+
+## Features
+
+![Netdata in
+action](https://user-images.githubusercontent.com/1153921/101513938-fae28380-3939-11eb-9434-8ad86a39be62.gif)
+
+Here's what you can expect from Netdata:
+
+- **1s granularity**: The highest possible resolution for all metrics.
+- **Unlimited metrics**: Netdata collects all the available metrics—the more, the better.
+- **1% CPU utilization of a single core**: It's unbelievably optimized.
+- **A few MB of RAM**: The highly-efficient database engine stores per-second metrics in RAM and then "spills"
+ historical metrics to disk long-term storage.
+- **Minimal disk I/O**: While running, Netdata only writes historical metrics and reads `error` and `access` logs.
+- **Zero configuration**: Netdata auto-detects everything, and can collect up to 10,000 metrics per server out of the
+ box.
+- **Zero maintenance**: You just run it. Netdata does the rest.
+- **Stunningly fast, interactive visualizations**: The dashboard responds to queries in less than 1ms per metric to
+ synchronize charts as you pan through time, zoom in on anomalies, and more.
+- **Visual anomaly detection**: Our UI/UX emphasizes the relationships between charts to help you detect the root
+ cause of anomalies.
+- **Scales to infinity**: You can install it on all your servers, containers, VMs, and IoT devices. Metrics are not
+ centralized by default, so there is no limit.
+- **Several operating modes**: Autonomous host monitoring (the default), headless data collector, forwarding proxy,
+ store and forward proxy, central multi-host monitoring, in all possible configurations. Use different metrics
+ retention policies per node and run with or without health monitoring.
+
+Netdata works with tons of applications, notifications platforms, and other time-series databases:
+
+- **300+ system, container, and application endpoints**: Collectors autodetect metrics from default endpoints and
+ immediately visualize them into meaningful charts designed for troubleshooting. See [everything we
+ support](https://learn.netdata.cloud/docs/agent/collectors/collectors).
+- **20+ notification platforms**: Netdata's health watchdog sends warning and critical alarms to your [favorite
+ platform](https://learn.netdata.cloud/docs/monitor/enable-notifications) to inform you of anomalies just seconds
+ after they affect your node.
+- **30+ external time-series databases**: Export resampled metrics as they're collected to other [local- and
+ Cloud-based databases](https://learn.netdata.cloud/docs/export/external-databases) for best-in-class
+ interoperability.
+
+> 💡 **Want to leverage the monitoring power of Netdata across entire infrastructure**? View metrics from
+> any number of distributed nodes in a single interface and unlock even more
+> [features](https://learn.netdata.cloud/docs/overview/why-netdata) with [Netdata
+> Cloud](https://learn.netdata.cloud/docs/overview/what-is-netdata#netdata-cloud).
+
+## Get Netdata
+
+<p align="center">
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2&divide=1000000&v43" alt="User base"></a>
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k&divide=1000&value_color=orange&precision=2&v43" alt="Servers monitored"></a>
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2&divide=1000000&v43" alt="Sessions served"></a>
+ <a href="https://hub.docker.com/r/netdata/netdata"><img src="https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=dockerhub.pulls_sum&divide=1000000&precision=1&units=M&label=docker+hub+pulls" alt="Docker Hub pulls"></a>
+ <br />
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42" alt="New users today"></a>
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42" alt="New machines today"></a>
+ <a href="https://registry.my-netdata.io/#menu_netdata_submenu_registry"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42" alt="Sessions today"></a>
+ <a href="https://hub.docker.com/r/netdata/netdata"><img src="https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=dockerhub.pulls_sum&divide=1000&precision=1&units=k&label=docker+hub+pulls&after=-86400&group=incremental-sum&label=docker%20hub%20pulls%20today" alt="Docker Hub pulls today"></a>
+</p>
+
+To install Netdata from source on most Linux systems (physical, virtual, container, IoT, edge), run our [one-line
+installation script](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/packages). This script downloads
+and builds all dependencies, including those required to connect to [Netdata Cloud](https://netdata.cloud/cloud) if you
+choose, and enables [automatic nightly
+updates](https://learn.netdata.cloud/docs/agent/packaging/installer#nightly-vs-stable-releases) and [anonymous
+statistics](https://learn.netdata.cloud/docs/agent/anonymous-statistics).
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+To view the Netdata dashboard, navigate to `http://localhost:19999`, or `http://NODE:19999`.
+
+### Docker
+
+You can also try out Netdata's capabilities in a [Docker
+container](https://learn.netdata.cloud/docs/agent/packaging/docker/):
+
+```bash
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -v netdataconfig:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+To view the Netdata dashboard, navigate to `http://localhost:19999`, or `http://NODE:19999`.
+
+### Other operating systems
+
+See our documentation for [additional operating
+systems](/packaging/installer/README.md#have-a-different-operating-system-or-want-to-try-another-method), including
+[Kubernetes](/packaging/installer/methods/kubernetes.md), [`.deb`/`.rpm`
+packages](/packaging/installer/methods/packages.md), and more.
+
+### Post-installation
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+## How it works
+
+Netdata is a highly efficient, highly modular, metrics management engine. Its lockless design makes it ideal for
+concurrent operations on the metrics.
+
+![Diagram of Netdata's core
+functionality](https://user-images.githubusercontent.com/1153921/95367248-5f755980-0889-11eb-827f-9b7aa02a556e.png)
+
+The result is a highly efficient, low-latency system, supporting multiple readers and one writer on each metric.
+
+## Infographic
+
+This is a high-level overview of Netdata features and architecture. Click on it to view an interactive version, which
+has links to our documentation.
+
+[![An infographic of how Netdata
+works](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html)
+
+## Documentation
+
+Netdata's documentation is available at [**Netdata Learn**](https://learn.netdata.cloud).
+
+This site also hosts a number of [guides](https://learn.netdata.cloud/guides) to help newer users better understand how
+to collect metrics, troubleshoot via charts, export to external databases, and more.
+
+## Community
+
+You can find most of the Netdata team in our [community forum](https://community.netdata.cloud). It's the best place to
+ask questions, find resources, and get to know the Netdata team.
+
+You can also find Netdata on:
+
+- [Facebook](https://www.facebook.com/linuxnetdata/)
+- [Twitter](https://twitter.com/linuxnetdata)
+- [StackShare](https://stackshare.io/netdata)
+- [Product Hunt](https://www.producthunt.com/posts/netdata-monitoring-agent/)
+- [Repology](https://repology.org/metapackage/netdata/versions)
+
+## Contribute
+
+We welcome [contributions](/CONTRIBUTING.md) to our code and to our
+[documentation](/docs/contributing/contributing-documentation.md). Feel free to join the team!
+
+To report bugs or get help, use [GitHub's issues](https://github.com/netdata/netdata/issues).
+
+Package maintainers should read the guide on [building Netdata from source](/packaging/installer/methods/source.md) for
+instructions on building each Netdata component from source and preparing a package.
+
+## License
+
+The Netdata Agent is [GPLv3+](/LICENSE). Netdata re-distributes other open-source tools and libraries. Please check the
+[third party licenses](/REDISTRIBUTED.md).
+
+## Is it any good?
+
+Yes.
+
+_When people first hear about a new product, they frequently ask if it is any good. A Hacker News user
+[remarked](https://news.ycombinator.com/item?id=3067434):_
+
+> Note to self: Starting immediately, all raganwald projects will have a “Is it any good?” section in the readme, and
+> the answer shall be “yes.".
+
+So, we follow the tradition...
diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md
new file mode 100644
index 0000000..fb256b5
--- /dev/null
+++ b/REDISTRIBUTED.md
@@ -0,0 +1,183 @@
+<!--
+title: "Redistributed software"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/REDISTRIBUTED.md
+-->
+
+# Redistributed software
+
+Netdata copyright info:
+ Copyright 2016-2018, Costa Tsaousis.
+ Copyright 2018, Netdata Inc.
+ Released under [GPL v3 or later](https://raw.githubusercontent.com/netdata/netdata/master/LICENSE).
+
+Netdata uses SPDX license tags to identify the license for its files.
+Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/).
+
+Netdata redistributes the following third-party software.
+We have decided to redistribute all these, instead of using them
+through a CDN, to allow Netdata to work in cases where Internet
+connectivity is not available.
+
+- [Dygraphs](http://dygraphs.com/)
+
+ Copyright 2009, Dan Vanderkam
+ [MIT License](http://dygraphs.com/legal.html)
+
+- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/)
+
+ Copyright 2013, Robert Fleischmann
+ [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE)
+
+- [Gauge.js](http://bernii.github.io/gauge.js/)
+
+ Copyright, Bernard Kobos
+ [MIT License](https://github.com/getgauge/gauge-js/blob/master/LICENSE)
+
+- [d3pie](https://github.com/benkeen/d3pie)
+
+ Copyright (c) 2014-2015 Benjamin Keen
+ [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE)
+
+- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/)
+
+ Copyright 2009-2012, Splunk Inc.
+ [New BSD License](http://opensource.org/licenses/BSD-3-Clause)
+
+- [Peity](http://benpickles.github.io/peity/)
+
+ Copyright 2009-2015, Ben Pickles
+ [MIT License](https://github.com/benpickles/peity/blob/master/LICENCE)
+
+- [morris.js](http://morrisjs.github.io/morris.js/)
+
+ Copyright 2013, Olly Smith
+ [Simplified BSD License](http://morrisjs.github.io/morris.js/)
+
+- [Raphaël](http://dmitrybaranovskiy.github.io/raphael/)
+
+ Copyright 2008, Dmitry Baranovskiy
+ [MIT License](http://dmitrybaranovskiy.github.io/raphael/license.html)
+
+- [C3](http://c3js.org/)
+
+ Copyright 2013, Masayuki Tanaka
+ [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE)
+
+- [D3](http://d3js.org/)
+
+ Copyright 2015, Mike Bostock
+ [BSD License](http://opensource.org/licenses/BSD-3-Clause)
+
+- [jQuery](https://jquery.org/)
+
+ Copyright 2015, jQuery Foundation
+ [MIT License](https://jquery.org/license/)
+
+- [Bootstrap](http://getbootstrap.com/getting-started/)
+
+ Copyright 2015, Twitter
+ [MIT License](https://github.com/twbs/bootstrap/blob/v4-dev/LICENSE)
+
+- [Bootstrap Toggle](http://www.bootstraptoggle.com/)
+
+ Copyright (c) 2011-2014 Min Hur, The New York Times Company
+ [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE)
+
+- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/)
+
+ Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors
+ [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md)
+
+- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/)
+
+ Copyright (c) 2012-2016 Zhixin Wen [wenzhixin2010@gmail.com](mailto:wenzhixin2010@gmail.com)
+ [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE)
+
+- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin)
+
+ Copyright (c) 2015,2016 hhurz
+ [MIT License](https://github.com/hhurz/tableExport.jquery.plugin/blob/master/LICENSE)
+
+- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/)
+
+ Copyright 2016, Hyunje Alex Jun and other contributors
+ [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE)
+
+- [FontAwesome](https://fortawesome.github.io/Font-Awesome/)
+
+ Created by Dave Gandy
+ Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL)
+ Icon license [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/)
+ Code license: [MIT License](http://opensource.org/licenses/mit-license.html)
+
+- [node-extend](https://github.com/justmoon/node-extend)
+
+ Copyright 2014, Stefan Thomas
+ [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE)
+
+- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp)
+
+ Copyright 2013, Stephen Vickers
+ [MIT License](https://github.com/nospaceships/node-net-snmp#license)
+
+- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber)
+
+ Copyright 2017, Stephen Vickers
+ Copyright 2011, Mark Cavage
+ [MIT License](https://github.com/nospaceships/node-asn1-ber#license)
+
+- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
+
+ Copyright 2015, Joseph Huckaby
+ [MIT License](https://github.com/jhuckaby/pixl-xml#license)
+
+- [sensors](https://github.com/paroj/sensors.py)
+
+ Copyright 2014, Pavel Rojtberg
+ [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1)
+
+- [PyYAML](https://bitbucket.org/blackjack/pysensors)
+
+ Copyright 2006, Kirill Simonov
+ [MIT License](https://github.com/yaml/pyyaml/blob/master/LICENSE)
+
+- [urllib3](https://github.com/shazow/urllib3)
+
+ Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt)
+ [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt)
+
+- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html)
+
+ Copyright 2013 Pieroxy
+ [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10)
+
+- [pako](http://nodeca.github.io/pako/)
+
+ Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn
+ [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE)
+
+- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill)
+
+ Copyright (c) 2014 Lucas Garron
+ [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md)
+
+- [Utilities for writing code that runs on Python 2 and 3](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/python_modules/urllib3/packages/six.py)
+
+ Copyright (c) 2010-2015 Benjamin Peterson
+ [MIT License](https://github.com/benjaminp/six/blob/master/LICENSE)
+
+- [mcrcon](https://github.com/barneygale/MCRcon)
+
+ Copyright (C) 2015 Barnaby Gale
+ [MIT License](https://raw.githubusercontent.com/barneygale/MCRcon/master/COPYING.txt)
+
+- [monotonic](https://github.com/atdt/monotonic)
+
+ Copyright 2014, 2015, 2016 Ori Livneh [ori@wikimedia.org](mailto:ori@wikimedia.org)
+ [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0)
+
+- [filelock](https://github.com/benediktschmitt/py-filelock)
+
+ Copyright 2015, Benedikt Schmitt [Unlicense License](https://unlicense.org/)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FREDISTRIBUTED&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..e823e22
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,51 @@
+<!--
+title: "Security Policy"
+description: "The Netdata team maintains and adheres to a formal process any time a member of the community reports a security vulnerability."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/SECURITY.md
+-->
+
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+|------- | --------- |
+| Latest | Yes |
+
+## Reporting a Vulnerability
+
+We're extremely grateful for security researchers and users that report vulnerabilities to Netdata Open Source Community. All reports are thoroughly investigated by a set of community volunteers.
+
+To make a report, please send an email to **security@netdata.cloud** with
+the vulnerability details and the details expected for [all Netdata bug
+reports](https://github.com/netdata/netdata/blob/c1f4c6cf503995cd4d896c5821b00d55afcbde87/.github/ISSUE_TEMPLATE/bug_report.md).
+
+### When Should I Report a Vulnerability?
+
+- You think you discovered a potential security vulnerability in Netdata
+- You are unsure how a vulnerability affects Netdata
+- You think you discovered a vulnerability in another project that Netdata depends on (e.g. python, node, etc)
+
+### When Should I NOT Report a Vulnerability?
+
+- You need help tuning Netdata for security
+- You need help applying security related updates
+- Your issue is not security related
+
+### Security Vulnerability Response
+
+Each report is acknowledged and analyzed by Netdata Team members within 3 working days. This will set off a Security Release Process.
+
+Any vulnerability information shared with the Netdata Team stays within the Netdata project and will not be disseminated to other projects unless it is necessary to get the issue fixed.
+
+As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated.
+
+### Public Disclosure Timing
+
+A public disclosure date is negotiated by the Netdata team and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. As a basic default, we expect report date to disclosure date to be on the order of 7 days. The Netdata team holds the final say when setting a disclosure date.
+
+### Security Announcements
+
+Every time a security issue is fixed in Netdata, we immediately release a new version of it. So, to get notified of all security incidents, please subscribe to our releases on github.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FSECURITY&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/aclk/README.md b/aclk/README.md
new file mode 100644
index 0000000..e279f01
--- /dev/null
+++ b/aclk/README.md
@@ -0,0 +1,151 @@
+<!--
+title: "Agent-Cloud link (ACLK)"
+description: "The Agent-Cloud link (ACLK) is the mechanism responsible for connecting a Netdata agent to Netdata Cloud."
+date: 2020-05-11
+custom_edit_url: https://github.com/netdata/netdata/edit/master/aclk/README.md
+-->
+
+# Agent-cloud link (ACLK)
+
+The Agent-Cloud link (ACLK) is the mechanism responsible for securely connecting a Netdata Agent to your web browser
+through Netdata Cloud. The ACLK establishes an outgoing secure WebSocket (WSS) connection to Netdata Cloud on port
+`443`. The ACLK is encrypted, safe, and _is only established if you claim your node_.
+
+The Cloud App lives at app.netdata.cloud which currently resolves to 35.196.244.138. However, this IP or range of
+IPs can change without notice. Watch this page for updates.
+
+For a guide to claiming a node using the ACLK, plus additional troubleshooting and reference information, read our [get
+started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide or the full [claiming
+documentation](/claim/README.md).
+
+## Data privacy
+
+Privacy is very important to us. We firmly believe that your data belongs to you. This is why **we don't store any metric data in Netdata Cloud**.
+
+All the data that the user sees in the web browser when using Netdata Cloud, are actually streamed directly from the Netdata Agent to the Netdata Cloud dashboard. They pass through our systems, but they are not stored.
+
+We do however store a limited number of *metadata* to be able to offer the stunning visualizations and advanced functionality of Netdata Cloud.
+
+### Metadata
+
+The information we store in Netdata Cloud is the following (using the publicly available demo server `frankfurt.my-netdata.io` as an example):
+- The email address you used to sign up/or sign in
+- For each node claimed to your Spaces in Netdata Cloud:
+ - Hostname (as it appears in Netdata Cloud)
+ - Information shown in `/api/v1/info`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info).
+ - The chart metadata shown in `/api/v1/charts`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info).
+ - Alarm configurations shown in `/api/v1/alarms?all`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms?all](https://frankfurt.my-netdata.io/api/v1/alarms?all).
+ - Active alarms shown in `/api/v1/alarms`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms](https://frankfurt.my-netdata.io/api/v1/alarms).
+
+How we use them:
+- The data are stored in our production database on Google Cloud and some of it is also used in BigQuery, our data lake, for analytics purposes. These analytics are crucial for our product development process.
+- Email is used to identify users in regards to product use and to enrich our tools with product use, such as our CRM.
+- This data is only be available to Netdata and never to a 3rd party.
+
+## Enable and configure the ACLK
+
+The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. No file is
+created at `/var/lib/netdata/cloud.d/cloud.conf` until you either claim a node or create it yourself. The default
+configuration uses two settings:
+
+```conf
+[global]
+ enabled = yes
+ cloud base url = https://app.netdata.cloud
+```
+
+If your Agent needs to use a proxy to access the internet, you must [set up a proxy for
+claiming](/claim/README.md#claim-through-a-proxy).
+
+You can configure following keys in the `netdata.conf` section `[cloud]`:
+```
+[cloud]
+ statistics = yes
+ query thread count = 2
+```
+
+- `statistics` enables/disables ACLK related statistics and their charts. You can disable this to save some space in the database and slightly reduce memory usage of Netdata Agent.
+- `query thread count` specifies the number of threads to process cloud queries. Increasing this setting is useful for nodes with many children (streaming), which can expect to handle more queries (and/or more complicated queries).
+
+## Disable the ACLK
+
+You have two options if you prefer to disable the ACLK and not use Netdata Cloud.
+
+### Disable at installation
+
+You can pass the `--disable-cloud` parameter to the Agent installation when using a kickstart script
+([kickstart.sh](/packaging/installer/methods/kickstart.md) or
+[kickstart-static64.sh](/packaging/installer/methods/kickstart-64.md)), or a [manual installation from
+Git](/packaging/installer/methods/manual.md).
+
+When you pass this parameter, the installer does not download or compile any extra libraries. Once running, the Agent
+kills the thread responsible for the ACLK and claiming behavior, and behaves as though the ACLK, and thus Netdata Cloud,
+does not exist.
+
+### Disable at runtime
+
+You can change a runtime setting in your `cloud.conf` file to disable the ACLK. This setting only stops the Agent from
+attempting any connection via the ACLK, but does not prevent the installer from downloading and compiling the ACLK's
+dependencies.
+
+The file typically exists at `/var/lib/netdata/cloud.d/cloud.conf`, but can change if you set a prefix during
+installation. To disable the ACLK, open that file and change the `enabled` setting to `no`:
+
+```conf
+[global]
+ enabled = no
+```
+
+If the file at `/var/lib/netdata/cloud.d/cloud.conf` doesn't exist, you need to create it.
+
+Copy and paste the first two lines from below, which will change your prompt to `cat`.
+
+```bash
+cd /var/lib/netdata/cloud.d
+cat > cloud.conf << EOF
+```
+
+Copy and paste in lines 3-6, and after the final `EOF`, hit **Enter**. The final line must contain only `EOF`. Hit **Enter** again to return to your normal prompt with the newly-created file.
+
+To get your normal prompt back, the final line
+must contain only `EOF`.
+
+```bash
+[global]
+ enabled = no
+ cloud base url = https://app.netdata.cloud
+EOF
+```
+
+You also need to change the file's permissions. Use `grep "run as user" /etc/netdata/netdata.conf` to figure out which
+user your Agent runs as (typically `netdata`), and replace `netdata:netdata` as shown below if necessary:
+
+```bash
+sudo chmod 0770 cloud.conf
+sudo chown netdata:netdata cloud.conf
+```
+
+Restart your Agent to disable the ACLK.
+
+### Re-enable the ACLK
+
+If you first disable the ACLK and any Cloud functionality and then decide you would like to use Cloud, you must either
+[reinstall Netdata](/packaging/installer/REINSTALL.md) with Cloud enabled or change the runtime setting in your
+`cloud.conf` file.
+
+If you passed `--disable-cloud` to `netdata-installer.sh` during installation, you must
+[reinstall](/packaging/installer/REINSTALL.md) your Agent. Use the same method as before, but pass `--require-cloud` to
+the installer. When installation finishes you can [claim your node](/claim/README.md#how-to-claim-a-node).
+
+If you changed the runtime setting in your `var/lib/netdata/cloud.d/cloud.conf` file, edit the file again and change
+`enabled` to `yes`:
+
+```conf
+[global]
+ enabled = yes
+```
+
+Restart your Agent and [claim your node](/claim/README.md#how-to-claim-a-node).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Faclk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
+
diff --git a/aclk/legacy/Makefile.am b/aclk/legacy/Makefile.am
new file mode 100644
index 0000000..1cd876b
--- /dev/null
+++ b/aclk/legacy/Makefile.am
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ tests/install-fake-charts.d.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+#sbin_SCRIPTS = \
+# tests/install-fake-charts.d.sh \
+# $(NULL)
+
+dist_noinst_SCRIPTS = tests/install-fake-charts.d.sh
+dist_noinst_DATA = tests/install-fake-charts.d.sh.in
+
diff --git a/aclk/legacy/aclk_common.c b/aclk/legacy/aclk_common.c
new file mode 100644
index 0000000..d7188b1
--- /dev/null
+++ b/aclk/legacy/aclk_common.c
@@ -0,0 +1,259 @@
+#include "aclk_common.h"
+
+#include "../../daemon/common.h"
+
+#ifdef ENABLE_ACLK
+#include <libwebsockets.h>
+#endif
+
+netdata_mutex_t aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER;
+
+int aclk_disable_runtime = 0;
+int aclk_kill_link = 0;
+
+struct aclk_shared_state aclk_shared_state = {
+ .version_neg = 0,
+ .version_neg_wait_till = 0
+};
+
+struct {
+ ACLK_PROXY_TYPE type;
+ const char *url_str;
+} supported_proxy_types[] = {
+ { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5" ACLK_PROXY_PROTO_ADDR_SEPARATOR },
+ { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5h" ACLK_PROXY_PROTO_ADDR_SEPARATOR },
+ { .type = PROXY_TYPE_HTTP, .url_str = "http" ACLK_PROXY_PROTO_ADDR_SEPARATOR },
+ { .type = PROXY_TYPE_UNKNOWN, .url_str = NULL },
+};
+
+const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type)
+{
+ switch (*type) {
+ case PROXY_DISABLED:
+ return "disabled";
+ case PROXY_TYPE_HTTP:
+ return "HTTP";
+ case PROXY_TYPE_SOCKS5:
+ return "SOCKS";
+ default:
+ return "Unknown";
+ }
+}
+
+static inline ACLK_PROXY_TYPE aclk_find_proxy(const char *string)
+{
+ int i = 0;
+ while (supported_proxy_types[i].url_str) {
+ if (!strncmp(supported_proxy_types[i].url_str, string, strlen(supported_proxy_types[i].url_str)))
+ return supported_proxy_types[i].type;
+ i++;
+ }
+ return PROXY_TYPE_UNKNOWN;
+}
+
+ACLK_PROXY_TYPE aclk_verify_proxy(const char *string)
+{
+ if (!string)
+ return PROXY_TYPE_UNKNOWN;
+
+ while (*string == 0x20 && *string!=0) // Help coverity (compiler will remove)
+ string++;
+
+ if (!*string)
+ return PROXY_TYPE_UNKNOWN;
+
+ return aclk_find_proxy(string);
+}
+
+// helper function to censor user&password
+// for logging purposes
+void safe_log_proxy_censor(char *proxy)
+{
+ size_t length = strlen(proxy);
+ char *auth = proxy + length - 1;
+ char *cur;
+
+ while ((auth >= proxy) && (*auth != '@'))
+ auth--;
+
+ //if not found or @ is first char do nothing
+ if (auth <= proxy)
+ return;
+
+ cur = strstr(proxy, ACLK_PROXY_PROTO_ADDR_SEPARATOR);
+ if (!cur)
+ cur = proxy;
+ else
+ cur += strlen(ACLK_PROXY_PROTO_ADDR_SEPARATOR);
+
+ while (cur < auth) {
+ *cur = 'X';
+ cur++;
+ }
+}
+
+static inline void safe_log_proxy_error(char *str, const char *proxy)
+{
+ char *log = strdupz(proxy);
+ safe_log_proxy_censor(log);
+ error("%s Provided Value:\"%s\"", str, log);
+ freez(log);
+}
+
+static inline int check_socks_enviroment(const char **proxy)
+{
+ char *tmp = getenv("socks_proxy");
+
+ if (!tmp)
+ return 1;
+
+ if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) {
+ *proxy = tmp;
+ return 0;
+ }
+
+ safe_log_proxy_error(
+ "Environment var \"socks_proxy\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".",
+ tmp);
+ return 1;
+}
+
+static inline int check_http_enviroment(const char **proxy)
+{
+ char *tmp = getenv("http_proxy");
+
+ if (!tmp)
+ return 1;
+
+ if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) {
+ *proxy = tmp;
+ return 0;
+ }
+
+ safe_log_proxy_error(
+ "Environment var \"http_proxy\" defined but of unknown format. Supported syntax: \"http[s]://[user:pass@]host:ip\".",
+ tmp);
+ return 1;
+}
+
+const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type)
+{
+ const char *proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV);
+ *type = PROXY_DISABLED;
+
+ if (strcmp(proxy, "none") == 0)
+ return proxy;
+
+ if (strcmp(proxy, ACLK_PROXY_ENV) == 0) {
+ if (check_socks_enviroment(&proxy) == 0) {
+#ifdef LWS_WITH_SOCKS5
+ *type = PROXY_TYPE_SOCKS5;
+ return proxy;
+#else
+ safe_log_proxy_error("socks_proxy environment variable set to use SOCKS5 proxy "
+ "but Libwebsockets used doesn't have SOCKS5 support built in. "
+ "Ignoring and checking for other options.",
+ proxy);
+#endif
+ }
+ if (check_http_enviroment(&proxy) == 0)
+ *type = PROXY_TYPE_HTTP;
+ return proxy;
+ }
+
+ *type = aclk_verify_proxy(proxy);
+#ifndef LWS_WITH_SOCKS5
+ if (*type == PROXY_TYPE_SOCKS5) {
+ safe_log_proxy_error(
+ "Config var \"" ACLK_PROXY_CONFIG_VAR
+ "\" set to use SOCKS5 proxy but Libwebsockets used is built without support for SOCKS proxy. ACLK will be disabled.",
+ proxy);
+ }
+#endif
+ if (*type == PROXY_TYPE_UNKNOWN) {
+ *type = PROXY_DISABLED;
+ safe_log_proxy_error(
+ "Config var \"" ACLK_PROXY_CONFIG_VAR
+ "\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".",
+ proxy);
+ }
+
+ return proxy;
+}
+
+// helper function to read settings only once (static)
+// as claiming, challenge/response and ACLK
+// read the same thing, no need to parse again
+const char *aclk_get_proxy(ACLK_PROXY_TYPE *type)
+{
+ static const char *proxy = NULL;
+ static ACLK_PROXY_TYPE proxy_type = PROXY_NOT_SET;
+
+ if (proxy_type == PROXY_NOT_SET)
+ proxy = aclk_lws_wss_get_proxy_setting(&proxy_type);
+
+ *type = proxy_type;
+ return proxy;
+}
+
+int aclk_decode_base_url(char *url, char **aclk_hostname, int *aclk_port)
+{
+ int pos = 0;
+ if (!strncmp("https://", url, 8)) {
+ pos = 8;
+ } else if (!strncmp("http://", url, 7)) {
+ error("Cannot connect ACLK over %s -> unencrypted link is not supported", url);
+ return 1;
+ }
+ int host_end = pos;
+ while (url[host_end] != 0 && url[host_end] != '/' && url[host_end] != ':')
+ host_end++;
+ if (url[host_end] == 0) {
+ *aclk_hostname = strdupz(url + pos);
+ *aclk_port = 443;
+ info("Setting ACLK target host=%s port=%d from %s", *aclk_hostname, *aclk_port, url);
+ return 0;
+ }
+ if (url[host_end] == ':') {
+ *aclk_hostname = callocz(host_end - pos + 1, 1);
+ strncpy(*aclk_hostname, url + pos, host_end - pos);
+ int port_end = host_end + 1;
+ while (url[port_end] >= '0' && url[port_end] <= '9')
+ port_end++;
+ if (port_end - host_end > 6) {
+ error("Port specified in %s is invalid", url);
+ return 0;
+ }
+ *aclk_port = atoi(&url[host_end+1]);
+ }
+ if (url[host_end] == '/') {
+ *aclk_port = 443;
+ *aclk_hostname = callocz(1, host_end - pos + 1);
+ strncpy(*aclk_hostname, url+pos, host_end - pos);
+ }
+ info("Setting ACLK target host=%s port=%d from %s", *aclk_hostname, *aclk_port, url);
+ return 0;
+}
+
+struct label *add_aclk_host_labels(struct label *label) {
+#ifdef ENABLE_ACLK
+ ACLK_PROXY_TYPE aclk_proxy;
+ char *proxy_str;
+ aclk_get_proxy(&aclk_proxy);
+
+ switch(aclk_proxy) {
+ case PROXY_TYPE_SOCKS5:
+ proxy_str = "SOCKS5";
+ break;
+ case PROXY_TYPE_HTTP:
+ proxy_str = "HTTP";
+ break;
+ default:
+ proxy_str = "none";
+ break;
+ }
+ return add_label_to_list(label, "_aclk_proxy", proxy_str, LABEL_SOURCE_AUTO);
+#else
+ return label;
+#endif
+}
diff --git a/aclk/legacy/aclk_common.h b/aclk/legacy/aclk_common.h
new file mode 100644
index 0000000..eedb5b5
--- /dev/null
+++ b/aclk/legacy/aclk_common.h
@@ -0,0 +1,72 @@
+#ifndef ACLK_COMMON_H
+#define ACLK_COMMON_H
+
+#include "aclk_rrdhost_state.h"
+#include "../../daemon/common.h"
+
+extern netdata_mutex_t aclk_shared_state_mutex;
+#define ACLK_SHARED_STATE_LOCK netdata_mutex_lock(&aclk_shared_state_mutex)
+#define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex)
+
+// minimum and maximum supported version of ACLK
+// in this version of agent
+#define ACLK_VERSION_MIN 2
+#define ACLK_VERSION_MAX 3
+
+// Version negotiation messages have they own versioning
+// this is also used for LWT message as we set that up
+// before version negotiation
+#define ACLK_VERSION_NEG_VERSION 1
+
+// Maximum time to wait for version negotiation before aborting
+// and defaulting to oldest supported version
+#define VERSION_NEG_TIMEOUT 3
+
+#if ACLK_VERSION_MIN > ACLK_VERSION_MAX
+#error "ACLK_VERSION_MAX must be >= than ACLK_VERSION_MIN"
+#endif
+
+// Define ACLK Feature Version Boundaries Here
+#define ACLK_V_COMPRESSION 2
+#define ACLK_V_CHILDRENSTATE 3
+
+#define ACLK_IS_HOST_INITIALIZING(host) (host->aclk_state.state == ACLK_HOST_INITIALIZING)
+#define ACLK_IS_HOST_POPCORNING(host) (ACLK_IS_HOST_INITIALIZING(host) && host->aclk_state.t_last_popcorn_update)
+
+extern struct aclk_shared_state {
+ // optimization to avoid looping trough hosts
+ // every time Query Thread wakes up
+ RRDHOST *next_popcorn_host;
+
+ // read only while ACLK connected
+ // protect by lock otherwise
+ int version_neg;
+ usec_t version_neg_wait_till;
+} aclk_shared_state;
+
+typedef enum aclk_proxy_type {
+ PROXY_TYPE_UNKNOWN = 0,
+ PROXY_TYPE_SOCKS5,
+ PROXY_TYPE_HTTP,
+ PROXY_DISABLED,
+ PROXY_NOT_SET,
+} ACLK_PROXY_TYPE;
+
+extern int aclk_kill_link; // Tells the agent to tear down the link
+extern int aclk_disable_runtime;
+
+const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type);
+
+#define ACLK_PROXY_PROTO_ADDR_SEPARATOR "://"
+#define ACLK_PROXY_ENV "env"
+#define ACLK_PROXY_CONFIG_VAR "proxy"
+
+ACLK_PROXY_TYPE aclk_verify_proxy(const char *string);
+const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type);
+void safe_log_proxy_censor(char *proxy);
+int aclk_decode_base_url(char *url, char **aclk_hostname, int *aclk_port);
+const char *aclk_get_proxy(ACLK_PROXY_TYPE *type);
+
+struct label *add_aclk_host_labels(struct label *label);
+
+#endif //ACLK_COMMON_H
diff --git a/aclk/legacy/aclk_lws_https_client.c b/aclk/legacy/aclk_lws_https_client.c
new file mode 100644
index 0000000..c1856ed
--- /dev/null
+++ b/aclk/legacy/aclk_lws_https_client.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define ACLK_LWS_HTTPS_CLIENT_INTERNAL
+#include "aclk_lws_https_client.h"
+
+#include "aclk_common.h"
+
+#include "aclk_lws_wss_client.h"
+
+#define SMALL_BUFFER 16
+
+struct simple_hcc_data {
+ char *data;
+ size_t data_size;
+ size_t written;
+ char lws_work_buffer[1024 + LWS_PRE];
+ char *payload;
+ int response_code;
+ int done;
+};
+
+static int simple_https_client_callback(struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len)
+{
+ UNUSED(user);
+ int n;
+ char *ptr;
+ char buffer[SMALL_BUFFER];
+ struct simple_hcc_data *perconn_data = lws_get_opaque_user_data(wsi);
+
+ switch (reason) {
+ case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ:
+ debug(D_ACLK, "LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ");
+ if (perconn_data->data_size - 1 - perconn_data->written < len)
+ return 1;
+ memcpy(&perconn_data->data[perconn_data->written], in, len);
+ perconn_data->written += len;
+ return 0;
+ case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
+ debug(D_ACLK, "LWS_CALLBACK_RECEIVE_CLIENT_HTTP");
+ if(!perconn_data) {
+ error("Missing Per Connect Data");
+ return -1;
+ }
+ n = sizeof(perconn_data->lws_work_buffer) - LWS_PRE;
+ ptr = perconn_data->lws_work_buffer + LWS_PRE;
+ if (lws_http_client_read(wsi, &ptr, &n) < 0)
+ return -1;
+ perconn_data->data[perconn_data->written] = '\0';
+ return 0;
+ case LWS_CALLBACK_WSI_DESTROY:
+ debug(D_ACLK, "LWS_CALLBACK_WSI_DESTROY");
+ if(perconn_data)
+ perconn_data->done = 1;
+ return 0;
+ case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP:
+ debug(D_ACLK, "LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP");
+ if(perconn_data)
+ perconn_data->response_code = lws_http_client_http_response(wsi);
+ return 0;
+ case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
+ debug(D_ACLK, "LWS_CALLBACK_CLOSED_CLIENT_HTTP");
+ return 0;
+ case LWS_CALLBACK_OPENSSL_LOAD_EXTRA_CLIENT_VERIFY_CERTS:
+ debug(D_ACLK, "LWS_CALLBACK_OPENSSL_LOAD_EXTRA_CLIENT_VERIFY_CERTS");
+ return 0;
+ case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
+ debug(D_ACLK, "LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER");
+ if(perconn_data && perconn_data->payload) {
+ unsigned char **p = (unsigned char **)in, *end = (*p) + len;
+ snprintfz(buffer, SMALL_BUFFER, "%zu", strlen(perconn_data->payload));
+ if (lws_add_http_header_by_token(wsi,
+ WSI_TOKEN_HTTP_CONTENT_LENGTH,
+ (unsigned char *)buffer, strlen(buffer), p, end))
+ return -1;
+ if (lws_add_http_header_by_token(wsi,
+ WSI_TOKEN_HTTP_CONTENT_TYPE,
+ (unsigned char *)ACLK_CONTENT_TYPE_JSON,
+ strlen(ACLK_CONTENT_TYPE_JSON), p, end))
+ return -1;
+ lws_client_http_body_pending(wsi, 1);
+ lws_callback_on_writable(wsi);
+ }
+ return 0;
+ case LWS_CALLBACK_CLIENT_HTTP_WRITEABLE:
+ debug(D_ACLK, "LWS_CALLBACK_CLIENT_HTTP_WRITEABLE");
+ if(perconn_data && perconn_data->payload) {
+ n = strlen(perconn_data->payload);
+ if(perconn_data->data_size < (size_t)LWS_PRE + n + 1) {
+ error("Buffer given is not big enough");
+ return 1;
+ }
+
+ memcpy(&perconn_data->data[LWS_PRE], perconn_data->payload, n);
+ if(n != lws_write(wsi, (unsigned char*)&perconn_data->data[LWS_PRE], n, LWS_WRITE_HTTP)) {
+ error("lws_write error");
+ perconn_data->data[0] = 0;
+ return 1;
+ }
+ lws_client_http_body_pending(wsi, 0);
+ // clean for subsequent reply read
+ perconn_data->data[0] = 0;
+ }
+ return 0;
+ case LWS_CALLBACK_CLIENT_HTTP_BIND_PROTOCOL:
+ debug(D_ACLK, "LWS_CALLBACK_CLIENT_HTTP_BIND_PROTOCOL");
+ return 0;
+ case LWS_CALLBACK_WSI_CREATE:
+ debug(D_ACLK, "LWS_CALLBACK_WSI_CREATE");
+ return 0;
+ case LWS_CALLBACK_PROTOCOL_INIT:
+ debug(D_ACLK, "LWS_CALLBACK_PROTOCOL_INIT");
+ return 0;
+ case LWS_CALLBACK_CLIENT_HTTP_DROP_PROTOCOL:
+ debug(D_ACLK, "LWS_CALLBACK_CLIENT_HTTP_DROP_PROTOCOL");
+ return 0;
+ case LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED:
+ debug(D_ACLK, "LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED");
+ return 0;
+ case LWS_CALLBACK_GET_THREAD_ID:
+ debug(D_ACLK, "LWS_CALLBACK_GET_THREAD_ID");
+ return 0;
+ case LWS_CALLBACK_EVENT_WAIT_CANCELLED:
+ debug(D_ACLK, "LWS_CALLBACK_EVENT_WAIT_CANCELLED");
+ return 0;
+ case LWS_CALLBACK_OPENSSL_PERFORM_SERVER_CERT_VERIFICATION:
+ debug(D_ACLK, "LWS_CALLBACK_OPENSSL_PERFORM_SERVER_CERT_VERIFICATION");
+ return 0;
+ case LWS_CALLBACK_CLIENT_FILTER_PRE_ESTABLISH:
+ debug(D_ACLK, "LWS_CALLBACK_CLIENT_FILTER_PRE_ESTABLISH");
+ return 0;
+ default:
+ debug(D_ACLK, "Unknown callback %d", (int)reason);
+ return 0;
+ }
+}
+
+static const struct lws_protocols protocols[] = {
+ {
+ "http",
+ simple_https_client_callback,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ { NULL, NULL, 0, 0, 0, 0, 0 }
+};
+
+static void simple_hcc_log_divert(int level, const char *line)
+{
+ UNUSED(level);
+ error("Libwebsockets: %s", line);
+}
+
+int aclk_send_https_request(char *method, char *host, int port, char *url, char *b, size_t b_size, char *payload)
+{
+ info("%s %s", __func__, method);
+
+ struct lws_context_creation_info info;
+ struct lws_client_connect_info i;
+ struct lws_context *context;
+
+ struct simple_hcc_data *data = callocz(1, sizeof(struct simple_hcc_data));
+ data->data = b;
+ data->data[0] = 0;
+ data->data_size = b_size;
+ data->payload = payload;
+
+ int n = 0;
+ time_t timestamp;
+
+ struct lws_vhost *vhost;
+
+ memset(&info, 0, sizeof info);
+
+ info.options = LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
+ info.port = CONTEXT_PORT_NO_LISTEN;
+ info.protocols = protocols;
+
+
+ context = lws_create_context(&info);
+ if (!context) {
+ error("Error creating LWS context");
+ freez(data);
+ return 1;
+ }
+
+ lws_set_log_level(LLL_ERR | LLL_WARN, simple_hcc_log_divert);
+
+ lws_service(context, 0);
+
+ memset(&i, 0, sizeof i); /* otherwise uninitialized garbage */
+ i.context = context;
+
+#ifdef ACLK_SSL_ALLOW_SELF_SIGNED
+ i.ssl_connection = LCCSCF_USE_SSL | LCCSCF_ALLOW_SELFSIGNED | LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK | LCCSCF_ALLOW_INSECURE;
+ info("Disabling SSL certificate checks");
+#else
+ i.ssl_connection = LCCSCF_USE_SSL;
+#endif
+#if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 0
+#warning DISABLING SSL HOSTNAME VALIDATION BECAUSE IT IS NOT AVAILABLE ON THIS SYSTEM.
+ i.ssl_connection |= LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK;
+#endif
+
+ i.port = port;
+ i.address = host;
+ i.path = url;
+
+ i.host = i.address;
+ i.origin = i.address;
+ i.method = method;
+ i.opaque_user_data = data;
+ i.alpn = "http/1.1";
+
+ i.protocol = protocols[0].name;
+
+ vhost = lws_get_vhost_by_name(context, "default");
+ if(!vhost)
+ fatal("Could not find the default LWS vhost.");
+
+ //set up proxy
+ aclk_wss_set_proxy(vhost);
+
+ lws_client_connect_via_info(&i);
+
+ // libwebsockets handle connection timeouts already
+ // this adds additional safety in case of bug in LWS
+ timestamp = now_monotonic_sec();
+ while( n >= 0 && !data->done && !netdata_exit) {
+ n = lws_service(context, 0);
+ if( now_monotonic_sec() - timestamp > SEND_HTTPS_REQUEST_TIMEOUT ) {
+ data->data[0] = 0;
+ data->done = 1;
+ error("Servicing LWS took too long.");
+ }
+ }
+
+ lws_context_destroy(context);
+
+ n = data->response_code;
+
+ freez(data);
+ return (n < 200 || n >= 300);
+}
diff --git a/aclk/legacy/aclk_lws_https_client.h b/aclk/legacy/aclk_lws_https_client.h
new file mode 100644
index 0000000..811809d
--- /dev/null
+++ b/aclk/legacy/aclk_lws_https_client.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LWS_HTTPS_CLIENT_H
+#define NETDATA_LWS_HTTPS_CLIENT_H
+
+#include "../../daemon/common.h"
+#include "libnetdata/libnetdata.h"
+
+#define DATAMAXLEN 1024*16
+
+#ifdef ACLK_LWS_HTTPS_CLIENT_INTERNAL
+#define ACLK_CONTENT_TYPE_JSON "application/json"
+#define SEND_HTTPS_REQUEST_TIMEOUT 30
+#endif
+
+int aclk_send_https_request(char *method, char *host, int port, char *url, char *b, size_t b_size, char *payload);
+
+#endif /* NETDATA_LWS_HTTPS_CLIENT_H */
diff --git a/aclk/legacy/aclk_lws_wss_client.c b/aclk/legacy/aclk_lws_wss_client.c
new file mode 100644
index 0000000..f06df3f
--- /dev/null
+++ b/aclk/legacy/aclk_lws_wss_client.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "aclk_lws_wss_client.h"
+
+#include "libnetdata/libnetdata.h"
+#include "../../daemon/common.h"
+#include "aclk_common.h"
+#include "aclk_stats.h"
+
+extern int aclk_shutting_down;
+
+static int aclk_lws_wss_callback(struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len);
+
+struct aclk_lws_wss_perconnect_data {
+ int todo;
+};
+
+static struct aclk_lws_wss_engine_instance *engine_instance = NULL;
+
+void lws_wss_check_queues(size_t *write_len, size_t *write_len_bytes, size_t *read_len)
+{
+ if (write_len != NULL && write_len_bytes != NULL)
+ {
+ *write_len = 0;
+ *write_len_bytes = 0;
+ if (engine_instance != NULL)
+ {
+ aclk_lws_mutex_lock(&engine_instance->write_buf_mutex);
+
+ struct lws_wss_packet_buffer *write_b;
+ size_t w,wb;
+ for(w=0, wb=0, write_b = engine_instance->write_buffer_head; write_b != NULL; write_b = write_b->next)
+ {
+ w++;
+ wb += write_b->data_size - write_b->written;
+ }
+ *write_len = w;
+ *write_len_bytes = wb;
+ aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex);
+ }
+ }
+ else if (write_len != NULL)
+ {
+ *write_len = 0;
+ if (engine_instance != NULL)
+ {
+ aclk_lws_mutex_lock(&engine_instance->write_buf_mutex);
+
+ struct lws_wss_packet_buffer *write_b;
+ size_t w;
+ for(w=0, write_b = engine_instance->write_buffer_head; write_b != NULL; write_b = write_b->next)
+ w++;
+ *write_len = w;
+ aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex);
+ }
+ }
+ if (read_len != NULL)
+ {
+ *read_len = 0;
+ if (engine_instance != NULL)
+ {
+ aclk_lws_mutex_lock(&engine_instance->read_buf_mutex);
+ *read_len = lws_ring_get_count_waiting_elements(engine_instance->read_ringbuffer, NULL);
+ aclk_lws_mutex_unlock(&engine_instance->read_buf_mutex);
+ }
+ }
+}
+
+static inline struct lws_wss_packet_buffer *lws_wss_packet_buffer_new(void *data, size_t size)
+{
+ struct lws_wss_packet_buffer *new = callocz(1, sizeof(struct lws_wss_packet_buffer));
+ if (data) {
+ new->data = mallocz(LWS_PRE + size);
+ memcpy(new->data + LWS_PRE, data, size);
+ new->data_size = size;
+ new->written = 0;
+ }
+ return new;
+}
+
+static inline void lws_wss_packet_buffer_append(struct lws_wss_packet_buffer **list, struct lws_wss_packet_buffer *item)
+{
+ struct lws_wss_packet_buffer *tail = *list;
+ if (!*list) {
+ *list = item;
+ return;
+ }
+ while (tail->next) {
+ tail = tail->next;
+ }
+ tail->next = item;
+}
+
+static inline struct lws_wss_packet_buffer *lws_wss_packet_buffer_pop(struct lws_wss_packet_buffer **list)
+{
+ struct lws_wss_packet_buffer *ret = *list;
+ if (ret != NULL)
+ *list = ret->next;
+
+ return ret;
+}
+
+static inline void lws_wss_packet_buffer_free(struct lws_wss_packet_buffer *item)
+{
+ freez(item->data);
+ freez(item);
+}
+
+static inline void _aclk_lws_wss_read_buffer_clear(struct lws_ring *ringbuffer)
+{
+ size_t elems = lws_ring_get_count_waiting_elements(ringbuffer, NULL);
+ if (elems > 0)
+ lws_ring_consume(ringbuffer, NULL, NULL, elems);
+}
+
+static inline void _aclk_lws_wss_write_buffer_clear(struct lws_wss_packet_buffer **list)
+{
+ struct lws_wss_packet_buffer *i;
+ while ((i = lws_wss_packet_buffer_pop(list)) != NULL) {
+ lws_wss_packet_buffer_free(i);
+ }
+ *list = NULL;
+}
+
+static inline void aclk_lws_wss_clear_io_buffers()
+{
+ aclk_lws_mutex_lock(&engine_instance->read_buf_mutex);
+ _aclk_lws_wss_read_buffer_clear(engine_instance->read_ringbuffer);
+ aclk_lws_mutex_unlock(&engine_instance->read_buf_mutex);
+ aclk_lws_mutex_lock(&engine_instance->write_buf_mutex);
+ _aclk_lws_wss_write_buffer_clear(&engine_instance->write_buffer_head);
+ aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex);
+}
+
+static const struct lws_protocols protocols[] = { { "aclk-wss", aclk_lws_wss_callback,
+ sizeof(struct aclk_lws_wss_perconnect_data), 32768*4, 0, 0, 32768*4 },
+ { NULL, NULL, 0, 0, 0, 0, 0 } };
+
+static void aclk_lws_wss_log_divert(int level, const char *line)
+{
+ switch (level) {
+ case LLL_ERR:
+ error("Libwebsockets Error: %s", line);
+ break;
+ case LLL_WARN:
+ debug(D_ACLK, "Libwebsockets Warn: %s", line);
+ break;
+ default:
+ error("Libwebsockets try to log with unknown log level (%d), msg: %s", level, line);
+ }
+}
+
+static int aclk_lws_wss_client_init( char *target_hostname, int target_port)
+{
+ static int lws_logging_initialized = 0;
+
+ if (unlikely(!lws_logging_initialized)) {
+ lws_set_log_level(LLL_ERR | LLL_WARN, aclk_lws_wss_log_divert);
+ lws_logging_initialized = 1;
+ }
+
+ if (!target_hostname)
+ return 1;
+
+ engine_instance = callocz(1, sizeof(struct aclk_lws_wss_engine_instance));
+
+ engine_instance->host = target_hostname;
+ engine_instance->port = target_port;
+
+
+ aclk_lws_mutex_init(&engine_instance->write_buf_mutex);
+ aclk_lws_mutex_init(&engine_instance->read_buf_mutex);
+
+ engine_instance->read_ringbuffer = lws_ring_create(1, ACLK_LWS_WSS_RECV_BUFF_SIZE_BYTES, NULL);
+ if (!engine_instance->read_ringbuffer)
+ goto failure_cleanup;
+
+ return 0;
+
+failure_cleanup:
+ freez(engine_instance);
+ return 1;
+}
+
+void aclk_lws_wss_destroy_context()
+{
+ if (!engine_instance)
+ return;
+ if (!engine_instance->lws_context)
+ return;
+ lws_context_destroy(engine_instance->lws_context);
+ engine_instance->lws_context = NULL;
+}
+
+
+void aclk_lws_wss_client_destroy()
+{
+ if (engine_instance == NULL)
+ return;
+
+ aclk_lws_wss_destroy_context();
+ engine_instance->lws_wsi = NULL;
+
+ aclk_lws_wss_clear_io_buffers();
+
+#ifdef ACLK_LWS_MOSQUITTO_IO_CALLS_MULTITHREADED
+ pthread_mutex_destroy(&engine_instance->write_buf_mutex);
+ pthread_mutex_destroy(&engine_instance->read_buf_mutex);
+#endif
+}
+
+#ifdef LWS_WITH_SOCKS5
+static int aclk_wss_set_socks(struct lws_vhost *vhost, const char *socks)
+{
+ char *proxy = strstr(socks, ACLK_PROXY_PROTO_ADDR_SEPARATOR);
+
+ if (!proxy)
+ return -1;
+
+ proxy += strlen(ACLK_PROXY_PROTO_ADDR_SEPARATOR);
+
+ if (!*proxy)
+ return -1;
+
+ return lws_set_socks(vhost, proxy);
+}
+#endif
+
+void aclk_wss_set_proxy(struct lws_vhost *vhost)
+{
+ const char *proxy;
+ ACLK_PROXY_TYPE proxy_type;
+ char *log;
+
+ proxy = aclk_get_proxy(&proxy_type);
+
+#ifdef LWS_WITH_SOCKS5
+ lws_set_socks(vhost, ":");
+#endif
+ lws_set_proxy(vhost, ":");
+
+ if (proxy_type == PROXY_TYPE_UNKNOWN) {
+ error("Unknown proxy type");
+ return;
+ }
+
+ if (proxy_type == PROXY_TYPE_SOCKS5 || proxy_type == PROXY_TYPE_HTTP) {
+ log = strdupz(proxy);
+ safe_log_proxy_censor(log);
+ info("Connecting using %s proxy:\"%s\"", aclk_proxy_type_to_s(&proxy_type), log);
+ freez(log);
+ }
+ if (proxy_type == PROXY_TYPE_SOCKS5) {
+#ifdef LWS_WITH_SOCKS5
+ if (aclk_wss_set_socks(vhost, proxy))
+ error("LWS failed to accept socks proxy.");
+ return;
+#else
+ fatal("We have no SOCKS5 support but we made it here. Programming error!");
+#endif
+ }
+ if (proxy_type == PROXY_TYPE_HTTP) {
+ if (lws_set_proxy(vhost, proxy))
+ error("LWS failed to accept http proxy.");
+ return;
+ }
+ if (proxy_type != PROXY_DISABLED)
+ error("Unknown proxy type");
+}
+
+// Return code indicates if connection attempt has started async.
+int aclk_lws_wss_connect(char *host, int port)
+{
+ struct lws_client_connect_info i;
+ struct lws_vhost *vhost;
+ int n;
+
+ if (!engine_instance) {
+ if (aclk_lws_wss_client_init(host, port))
+ return 1; // Propagate failure
+ }
+
+ if (!engine_instance->lws_context)
+ {
+ // First time through (on this connection), create the context
+ struct lws_context_creation_info info;
+ memset(&info, 0, sizeof(struct lws_context_creation_info));
+ info.options = LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
+ info.port = CONTEXT_PORT_NO_LISTEN;
+ info.protocols = protocols;
+ engine_instance->lws_context = lws_create_context(&info);
+ if (!engine_instance->lws_context)
+ {
+ error("Failed to create lws_context, ACLK will not function");
+ return 1;
+ }
+ return 0;
+ // PROTOCOL_INIT callback will call again.
+ }
+
+ for (n = 0; n < ACLK_LWS_CALLBACK_HISTORY; n++)
+ engine_instance->lws_callback_history[n] = 0;
+
+ if (engine_instance->lws_wsi) {
+ error("Already Connected. Only one connection supported at a time.");
+ return 0;
+ }
+
+ memset(&i, 0, sizeof(i));
+ i.context = engine_instance->lws_context;
+ i.port = engine_instance->port;
+ i.address = engine_instance->host;
+ i.path = "/mqtt";
+ i.host = engine_instance->host;
+ i.protocol = "mqtt";
+
+ // from LWS docu:
+ // If option LWS_SERVER_OPTION_EXPLICIT_VHOSTS is given, no vhost is
+ // created; you're expected to create your own vhosts afterwards using
+ // lws_create_vhost(). Otherwise a vhost named "default" is also created
+ // using the information in the vhost-related members, for compatibility.
+ vhost = lws_get_vhost_by_name(engine_instance->lws_context, "default");
+ if(!vhost)
+ fatal("Could not find the default LWS vhost.");
+
+ aclk_wss_set_proxy(vhost);
+
+#ifdef ACLK_SSL_ALLOW_SELF_SIGNED
+ i.ssl_connection = LCCSCF_USE_SSL | LCCSCF_ALLOW_SELFSIGNED | LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK | LCCSCF_ALLOW_INSECURE;
+ info("Disabling SSL certificate checks");
+#else
+ i.ssl_connection = LCCSCF_USE_SSL;
+#endif
+#if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 0
+#warning DISABLING SSL HOSTNAME VALIDATION BECAUSE IT IS NOT AVAILABLE ON THIS SYSTEM.
+ i.ssl_connection |= LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK;
+#endif
+ lws_client_connect_via_info(&i);
+ return 0;
+}
+
+static inline int received_data_to_ringbuff(struct lws_ring *buffer, void *data, size_t len)
+{
+ if (lws_ring_insert(buffer, data, len) != len) {
+ error("ACLK_LWS_WSS_CLIENT: receive buffer full. Closing connection to prevent flooding.");
+ return 0;
+ }
+ return 1;
+}
+
+static const char *aclk_lws_callback_name(enum lws_callback_reasons reason)
+{
+ switch (reason) {
+ case LWS_CALLBACK_CLIENT_WRITEABLE:
+ return "LWS_CALLBACK_CLIENT_WRITEABLE";
+ case LWS_CALLBACK_CLIENT_RECEIVE:
+ return "LWS_CALLBACK_CLIENT_RECEIVE";
+ case LWS_CALLBACK_PROTOCOL_INIT:
+ return "LWS_CALLBACK_PROTOCOL_INIT";
+ case LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED:
+ return "LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED";
+ case LWS_CALLBACK_USER:
+ return "LWS_CALLBACK_USER";
+ case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
+ return "LWS_CALLBACK_CLIENT_CONNECTION_ERROR";
+ case LWS_CALLBACK_CLIENT_CLOSED:
+ return "LWS_CALLBACK_CLIENT_CLOSED";
+ case LWS_CALLBACK_WS_PEER_INITIATED_CLOSE:
+ return "LWS_CALLBACK_WS_PEER_INITIATED_CLOSE";
+ case LWS_CALLBACK_WSI_DESTROY:
+ return "LWS_CALLBACK_WSI_DESTROY";
+ case LWS_CALLBACK_CLIENT_ESTABLISHED:
+ return "LWS_CALLBACK_CLIENT_ESTABLISHED";
+ case LWS_CALLBACK_OPENSSL_PERFORM_SERVER_CERT_VERIFICATION:
+ return "LWS_CALLBACK_OPENSSL_PERFORM_SERVER_CERT_VERIFICATION";
+ case LWS_CALLBACK_EVENT_WAIT_CANCELLED:
+ return "LWS_CALLBACK_EVENT_WAIT_CANCELLED";
+ default:
+ // Not using an internal buffer here for thread-safety with unknown calling context.
+#ifdef ACLK_TRP_DEBUG_VERBOSE
+ error("Unknown LWS callback %u", reason);
+#endif
+ return "unknown";
+ }
+}
+
+void aclk_lws_wss_fail_report()
+{
+ int i;
+ int anything_to_send = 0;
+ BUFFER *buf;
+
+ if (netdata_anonymous_statistics_enabled <= 0)
+ return;
+
+ // guess - most of the callback will be 1-99 + ',' + \0
+ buf = buffer_create((ACLK_LWS_CALLBACK_HISTORY * 2) + 10);
+
+ for (i = 0; i < ACLK_LWS_CALLBACK_HISTORY; i++)
+ if (engine_instance->lws_callback_history[i]) {
+ buffer_sprintf(buf, "%s%d", (i ? "," : ""), engine_instance->lws_callback_history[i]);
+ anything_to_send = 1;
+ }
+
+ if (anything_to_send)
+ send_statistics("ACLK_CONN_FAIL", "FAIL", buffer_tostring(buf));
+
+ buffer_free(buf);
+}
+
+static int aclk_lws_wss_callback(struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len)
+{
+ UNUSED(user);
+ struct lws_wss_packet_buffer *data;
+ int retval = 0;
+ static int lws_shutting_down = 0;
+ int i;
+
+ for (i = ACLK_LWS_CALLBACK_HISTORY - 1; i > 0; i--)
+ engine_instance->lws_callback_history[i] = engine_instance->lws_callback_history[i - 1];
+ engine_instance->lws_callback_history[0] = (int)reason;
+
+ if (unlikely(aclk_shutting_down && !lws_shutting_down)) {
+ lws_shutting_down = 1;
+ retval = -1;
+ engine_instance->upstream_reconnect_request = 0;
+ }
+
+ // Callback servicing is forced when we are closed from above.
+ if (engine_instance->upstream_reconnect_request) {
+ error("Closing lws connectino due to libmosquitto error.");
+ char *upstream_connection_error = "MQTT protocol error. Closing underlying wss connection.";
+ lws_close_reason(
+ wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR, (unsigned char *)upstream_connection_error,
+ strlen(upstream_connection_error));
+ retval = -1;
+ engine_instance->upstream_reconnect_request = 0;
+ }
+
+ // Don't log to info - volume is proportional to message flow on ACLK.
+ switch (reason) {
+ case LWS_CALLBACK_CLIENT_WRITEABLE:
+ aclk_lws_mutex_lock(&engine_instance->write_buf_mutex);
+ data = engine_instance->write_buffer_head;
+ if (likely(data)) {
+ size_t bytes_left = data->data_size - data->written;
+ if ( bytes_left > FRAGMENT_SIZE)
+ bytes_left = FRAGMENT_SIZE;
+ int n = lws_write(wsi, data->data + LWS_PRE + data->written, bytes_left, LWS_WRITE_BINARY);
+ if (n>=0) {
+ data->written += n;
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.write_q_consumed += n;
+ ACLK_STATS_UNLOCK;
+ }
+ }
+ //error("lws_write(req=%u,written=%u) %zu of %zu",bytes_left, rc, data->written,data->data_size,rc);
+ if (data->written == data->data_size)
+ {
+ lws_wss_packet_buffer_pop(&engine_instance->write_buffer_head);
+ lws_wss_packet_buffer_free(data);
+ }
+ if (engine_instance->write_buffer_head)
+ lws_callback_on_writable(engine_instance->lws_wsi);
+ }
+ aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex);
+ return retval;
+
+ case LWS_CALLBACK_CLIENT_RECEIVE:
+ aclk_lws_mutex_lock(&engine_instance->read_buf_mutex);
+ if (!received_data_to_ringbuff(engine_instance->read_ringbuffer, in, len))
+ retval = 1;
+ aclk_lws_mutex_unlock(&engine_instance->read_buf_mutex);
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.read_q_added += len;
+ ACLK_STATS_UNLOCK;
+ }
+
+ // to future myself -> do not call this while read lock is active as it will eventually
+ // want to acquire same lock later in aclk_lws_wss_client_read() function
+ aclk_lws_connection_data_received();
+ return retval;
+
+ case LWS_CALLBACK_WSI_CREATE:
+ case LWS_CALLBACK_CLIENT_FILTER_PRE_ESTABLISH:
+ case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
+ case LWS_CALLBACK_OPENSSL_LOAD_EXTRA_CLIENT_VERIFY_CERTS:
+ case LWS_CALLBACK_GET_THREAD_ID: // ?
+ case LWS_CALLBACK_EVENT_WAIT_CANCELLED:
+ case LWS_CALLBACK_OPENSSL_PERFORM_SERVER_CERT_VERIFICATION:
+ // Expected and safe to ignore.
+#ifdef ACLK_TRP_DEBUG_VERBOSE
+ debug(D_ACLK, "Ignoring expected callback from LWS: %s", aclk_lws_callback_name(reason));
+#endif
+ return retval;
+
+ default:
+ // Pass to next switch, this case removes compiler warnings.
+ break;
+ }
+ // Log to info - volume is proportional to connection attempts.
+#ifdef ACLK_TRP_DEBUG_VERBOSE
+ info("Processing callback %s", aclk_lws_callback_name(reason));
+#endif
+ switch (reason) {
+ case LWS_CALLBACK_PROTOCOL_INIT:
+ aclk_lws_wss_connect(engine_instance->host, engine_instance->port); // Makes the outgoing connection
+ break;
+ case LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED:
+ if (engine_instance->lws_wsi != NULL && engine_instance->lws_wsi != wsi)
+ error("Multiple connections on same WSI? %p vs %p", engine_instance->lws_wsi, wsi);
+ engine_instance->lws_wsi = wsi;
+ break;
+ case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
+ error(
+ "Could not connect MQTT over WSS server \"%s:%d\". LwsReason:\"%s\"", engine_instance->host,
+ engine_instance->port, (in ? (char *)in : "not given"));
+ // Fall-through
+ case LWS_CALLBACK_CLIENT_CLOSED:
+ case LWS_CALLBACK_WS_PEER_INITIATED_CLOSE:
+ engine_instance->lws_wsi = NULL; // inside libwebsockets lws_close_free_wsi is called after callback
+ aclk_lws_connection_closed();
+ return -1; // the callback response is ignored, hope the above remains true
+ case LWS_CALLBACK_WSI_DESTROY:
+ aclk_lws_wss_clear_io_buffers();
+ if (!engine_instance->websocket_connection_up)
+ aclk_lws_wss_fail_report();
+ engine_instance->lws_wsi = NULL;
+ engine_instance->websocket_connection_up = 0;
+ aclk_lws_connection_closed();
+ break;
+ case LWS_CALLBACK_CLIENT_ESTABLISHED:
+ engine_instance->websocket_connection_up = 1;
+ aclk_lws_connection_established(engine_instance->host, engine_instance->port);
+ break;
+
+ default:
+#ifdef ACLK_TRP_DEBUG_VERBOSE
+ error("Unexpected callback from libwebsockets %s", aclk_lws_callback_name(reason));
+#endif
+ break;
+ }
+ return retval; //0-OK, other connection should be closed!
+}
+
+int aclk_lws_wss_client_write(void *buf, size_t count)
+{
+ if (engine_instance && engine_instance->lws_wsi && engine_instance->websocket_connection_up) {
+ aclk_lws_mutex_lock(&engine_instance->write_buf_mutex);
+ lws_wss_packet_buffer_append(&engine_instance->write_buffer_head, lws_wss_packet_buffer_new(buf, count));
+ aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex);
+
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.write_q_added += count;
+ ACLK_STATS_UNLOCK;
+ }
+
+ lws_callback_on_writable(engine_instance->lws_wsi);
+ return count;
+ }
+ return 0;
+}
+
+int aclk_lws_wss_client_read(void *buf, size_t count)
+{
+ size_t data_to_be_read = count;
+
+ aclk_lws_mutex_lock(&engine_instance->read_buf_mutex);
+ size_t readable_byte_count = lws_ring_get_count_waiting_elements(engine_instance->read_ringbuffer, NULL);
+ if (unlikely(readable_byte_count == 0)) {
+ errno = EAGAIN;
+ data_to_be_read = -1;
+ goto abort;
+ }
+
+ if (readable_byte_count < data_to_be_read)
+ data_to_be_read = readable_byte_count;
+
+ data_to_be_read = lws_ring_consume(engine_instance->read_ringbuffer, NULL, buf, data_to_be_read);
+ if (data_to_be_read == readable_byte_count)
+ engine_instance->data_to_read = 0;
+
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.read_q_consumed += data_to_be_read;
+ ACLK_STATS_UNLOCK;
+ }
+
+abort:
+ aclk_lws_mutex_unlock(&engine_instance->read_buf_mutex);
+ return data_to_be_read;
+}
+
+void aclk_lws_wss_service_loop()
+{
+ if (engine_instance)
+ {
+ /*if (engine_instance->lws_wsi) {
+ lws_cancel_service(engine_instance->lws_context);
+ lws_callback_on_writable(engine_instance->lws_wsi);
+ }*/
+ lws_service(engine_instance->lws_context, 0);
+ }
+}
+
+// in case the MQTT connection disconnect while lws transport is still operational
+// we should drop connection and reconnect
+// this function should be called when that happens to notify lws of that situation
+void aclk_lws_wss_mqtt_layer_disconect_notif()
+{
+ if (!engine_instance)
+ return;
+ if (engine_instance->lws_wsi && engine_instance->websocket_connection_up) {
+ engine_instance->upstream_reconnect_request = 1;
+ lws_callback_on_writable(
+ engine_instance->lws_wsi); //here we just do it to ensure we get callback called from lws, we don't need any actual data to be written.
+ }
+}
diff --git a/aclk/legacy/aclk_lws_wss_client.h b/aclk/legacy/aclk_lws_wss_client.h
new file mode 100644
index 0000000..584a3cf
--- /dev/null
+++ b/aclk/legacy/aclk_lws_wss_client.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef ACLK_LWS_WSS_CLIENT_H
+#define ACLK_LWS_WSS_CLIENT_H
+
+#include <libwebsockets.h>
+
+#include "libnetdata/libnetdata.h"
+
+// This is as define because ideally the ACLK at high level
+// can do mosqitto writes and reads only from one thread
+// which is cleaner implementation IMHO
+// in such case this mutexes are not necessarry and life
+// is simpler
+#define ACLK_LWS_MOSQUITTO_IO_CALLS_MULTITHREADED 1
+
+#define ACLK_LWS_WSS_RECV_BUFF_SIZE_BYTES (128 * 1024)
+
+#define ACLK_LWS_CALLBACK_HISTORY 10
+
+#ifdef ACLK_LWS_MOSQUITTO_IO_CALLS_MULTITHREADED
+#define aclk_lws_mutex_init(x) netdata_mutex_init(x)
+#define aclk_lws_mutex_lock(x) netdata_mutex_lock(x)
+#define aclk_lws_mutex_unlock(x) netdata_mutex_unlock(x)
+#else
+#define aclk_lws_mutex_init(x)
+#define aclk_lws_mutex_lock(x)
+#define aclk_lws_mutex_unlock(x)
+#endif
+
+struct aclk_lws_wss_engine_callbacks {
+ void (*connection_established_callback)();
+ void (*data_rcvd_callback)();
+ void (*data_writable_callback)();
+ void (*connection_closed)();
+};
+
+struct lws_wss_packet_buffer {
+ unsigned char *data;
+ size_t data_size, written;
+ struct lws_wss_packet_buffer *next;
+};
+
+struct aclk_lws_wss_engine_instance {
+ //target host/port for connection
+ char *host;
+ int port;
+
+ //internal data
+ struct lws_context *lws_context;
+ struct lws *lws_wsi;
+
+#ifdef ACLK_LWS_MOSQUITTO_IO_CALLS_MULTITHREADED
+ netdata_mutex_t write_buf_mutex;
+ netdata_mutex_t read_buf_mutex;
+#endif
+
+ struct lws_wss_packet_buffer *write_buffer_head;
+ struct lws_ring *read_ringbuffer;
+
+ //flags to be readed by engine user
+ int websocket_connection_up;
+
+ // currently this is by default disabled
+
+ int data_to_read;
+ int upstream_reconnect_request;
+
+ int lws_callback_history[ACLK_LWS_CALLBACK_HISTORY];
+};
+
+void aclk_lws_wss_client_destroy();
+void aclk_lws_wss_destroy_context();
+
+int aclk_lws_wss_connect(char *host, int port);
+
+int aclk_lws_wss_client_write(void *buf, size_t count);
+int aclk_lws_wss_client_read(void *buf, size_t count);
+void aclk_lws_wss_service_loop();
+
+void aclk_lws_wss_mqtt_layer_disconect_notif();
+
+// Notifications inside the layer above
+void aclk_lws_connection_established();
+void aclk_lws_connection_data_received();
+void aclk_lws_connection_closed();
+void lws_wss_check_queues(size_t *write_len, size_t *write_len_bytes, size_t *read_len);
+
+void aclk_wss_set_proxy(struct lws_vhost *vhost);
+
+#define FRAGMENT_SIZE 4096
+#endif
diff --git a/aclk/legacy/aclk_query.c b/aclk/legacy/aclk_query.c
new file mode 100644
index 0000000..7ab534f
--- /dev/null
+++ b/aclk/legacy/aclk_query.c
@@ -0,0 +1,789 @@
+#include "aclk_common.h"
+#include "aclk_query.h"
+#include "aclk_stats.h"
+#include "aclk_rx_msgs.h"
+
+#define WEB_HDR_ACCEPT_ENC "Accept-Encoding:"
+
+pthread_cond_t query_cond_wait = PTHREAD_COND_INITIALIZER;
+pthread_mutex_t query_lock_wait = PTHREAD_MUTEX_INITIALIZER;
+#define QUERY_THREAD_LOCK pthread_mutex_lock(&query_lock_wait)
+#define QUERY_THREAD_UNLOCK pthread_mutex_unlock(&query_lock_wait)
+
+volatile int aclk_connected = 0;
+
+#ifndef __GNUC__
+#pragma region ACLK_QUEUE
+#endif
+
+static netdata_mutex_t queue_mutex = NETDATA_MUTEX_INITIALIZER;
+#define ACLK_QUEUE_LOCK netdata_mutex_lock(&queue_mutex)
+#define ACLK_QUEUE_UNLOCK netdata_mutex_unlock(&queue_mutex)
+
+struct aclk_query {
+ usec_t created;
+ usec_t created_boot_time;
+ time_t run_after; // Delay run until after this time
+ ACLK_CMD cmd; // What command is this
+ char *topic; // Topic to respond to
+ char *data; // Internal data (NULL if request from the cloud)
+ char *msg_id; // msg_id generated by the cloud (NULL if internal)
+ char *query; // The actual query
+ u_char deleted; // Mark deleted for garbage collect
+ struct aclk_query *next;
+};
+
+struct aclk_query_queue {
+ struct aclk_query *aclk_query_head;
+ struct aclk_query *aclk_query_tail;
+ unsigned int count;
+} aclk_queue = { .aclk_query_head = NULL, .aclk_query_tail = NULL, .count = 0 };
+
+
+unsigned int aclk_query_size()
+{
+ int r;
+ ACLK_QUEUE_LOCK;
+ r = aclk_queue.count;
+ ACLK_QUEUE_UNLOCK;
+ return r;
+}
+
+/*
+ * Free a query structure when done
+ */
+static void aclk_query_free(struct aclk_query *this_query)
+{
+ if (unlikely(!this_query))
+ return;
+
+ freez(this_query->topic);
+ if (likely(this_query->query))
+ freez(this_query->query);
+ if(this_query->data && this_query->cmd == ACLK_CMD_CLOUD_QUERY_2) {
+ struct aclk_cloud_req_v2 *del = (struct aclk_cloud_req_v2 *)this_query->data;
+ freez(del->data);
+ freez(del);
+ }
+ if (likely(this_query->msg_id))
+ freez(this_query->msg_id);
+ freez(this_query);
+}
+
+/*
+ * Get the next query to process - NULL if nothing there
+ * The caller needs to free memory by calling aclk_query_free()
+ *
+ * topic
+ * query
+ * The structure itself
+ *
+ */
+static struct aclk_query *aclk_queue_pop()
+{
+ struct aclk_query *this_query;
+
+ ACLK_QUEUE_LOCK;
+
+ if (likely(!aclk_queue.aclk_query_head)) {
+ ACLK_QUEUE_UNLOCK;
+ return NULL;
+ }
+
+ this_query = aclk_queue.aclk_query_head;
+
+ // Get rid of the deleted entries
+ while (this_query && this_query->deleted) {
+ aclk_queue.count--;
+
+ aclk_queue.aclk_query_head = aclk_queue.aclk_query_head->next;
+
+ if (likely(!aclk_queue.aclk_query_head)) {
+ aclk_queue.aclk_query_tail = NULL;
+ }
+
+ aclk_query_free(this_query);
+
+ this_query = aclk_queue.aclk_query_head;
+ }
+
+ if (likely(!this_query)) {
+ ACLK_QUEUE_UNLOCK;
+ return NULL;
+ }
+
+ if (!this_query->deleted && this_query->run_after > now_realtime_sec()) {
+ info("Query %s will run in %ld seconds", this_query->query, this_query->run_after - now_realtime_sec());
+ ACLK_QUEUE_UNLOCK;
+ return NULL;
+ }
+
+ aclk_queue.count--;
+ aclk_queue.aclk_query_head = aclk_queue.aclk_query_head->next;
+
+ if (likely(!aclk_queue.aclk_query_head)) {
+ aclk_queue.aclk_query_tail = NULL;
+ }
+
+ ACLK_QUEUE_UNLOCK;
+ return this_query;
+}
+
+// Returns the entry after which we need to create a new entry to run at the specified time
+// If NULL is returned we need to add to HEAD
+// Need to have a QUERY lock before calling this
+
+static struct aclk_query *aclk_query_find_position(time_t time_to_run)
+{
+ struct aclk_query *tmp_query, *last_query;
+
+ // Quick check if we will add to the end
+ if (likely(aclk_queue.aclk_query_tail)) {
+ if (aclk_queue.aclk_query_tail->run_after <= time_to_run)
+ return aclk_queue.aclk_query_tail;
+ }
+
+ last_query = NULL;
+ tmp_query = aclk_queue.aclk_query_head;
+
+ while (tmp_query) {
+ if (tmp_query->run_after > time_to_run)
+ return last_query;
+ last_query = tmp_query;
+ tmp_query = tmp_query->next;
+ }
+ return last_query;
+}
+
+// Need to have a QUERY lock before calling this
+static struct aclk_query *
+aclk_query_find(char *topic, void *data, char *msg_id, char *query, ACLK_CMD cmd, struct aclk_query **last_query)
+{
+ struct aclk_query *tmp_query, *prev_query;
+ UNUSED(cmd);
+
+ tmp_query = aclk_queue.aclk_query_head;
+ prev_query = NULL;
+ while (tmp_query) {
+ if (likely(!tmp_query->deleted)) {
+ if (strcmp(tmp_query->topic, topic) == 0 && (!query || strcmp(tmp_query->query, query) == 0)) {
+ if ((!data || data == tmp_query->data) &&
+ (!msg_id || (msg_id && strcmp(msg_id, tmp_query->msg_id) == 0))) {
+ if (likely(last_query))
+ *last_query = prev_query;
+ return tmp_query;
+ }
+ }
+ }
+ prev_query = tmp_query;
+ tmp_query = tmp_query->next;
+ }
+ return NULL;
+}
+
+/*
+ * Add a query to execute, the result will be send to the specified topic
+ */
+
+int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run_after, int internal, ACLK_CMD aclk_cmd)
+{
+ struct aclk_query *new_query, *tmp_query;
+
+ // Ignore all commands while we wait for the agent to initialize
+ if (unlikely(!aclk_connected))
+ return 1;
+
+ run_after = now_realtime_sec() + run_after;
+
+ ACLK_QUEUE_LOCK;
+ struct aclk_query *last_query = NULL;
+
+ tmp_query = aclk_query_find(topic, data, msg_id, query, aclk_cmd, &last_query);
+ if (unlikely(tmp_query)) {
+ if (tmp_query->run_after == run_after) {
+ ACLK_QUEUE_UNLOCK;
+ QUERY_THREAD_WAKEUP;
+ return 0;
+ }
+
+ if (last_query)
+ last_query->next = tmp_query->next;
+ else
+ aclk_queue.aclk_query_head = tmp_query->next;
+
+ debug(D_ACLK, "Removing double entry");
+ aclk_query_free(tmp_query);
+ aclk_queue.count--;
+ }
+
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.queries_queued++;
+ ACLK_STATS_UNLOCK;
+ }
+
+ new_query = callocz(1, sizeof(struct aclk_query));
+ new_query->cmd = aclk_cmd;
+ if (internal) {
+ new_query->topic = strdupz(topic);
+ if (likely(query))
+ new_query->query = strdupz(query);
+ } else {
+ new_query->topic = topic;
+ new_query->query = query;
+ new_query->msg_id = msg_id;
+ }
+
+ new_query->data = data;
+ new_query->next = NULL;
+ new_query->created = now_realtime_usec();
+ new_query->created_boot_time = now_boottime_usec();
+ new_query->run_after = run_after;
+
+ debug(D_ACLK, "Added query (%s) (%s)", topic, query ? query : "");
+
+ tmp_query = aclk_query_find_position(run_after);
+
+ if (tmp_query) {
+ new_query->next = tmp_query->next;
+ tmp_query->next = new_query;
+ if (tmp_query == aclk_queue.aclk_query_tail)
+ aclk_queue.aclk_query_tail = new_query;
+ aclk_queue.count++;
+ ACLK_QUEUE_UNLOCK;
+ QUERY_THREAD_WAKEUP;
+ return 0;
+ }
+
+ new_query->next = aclk_queue.aclk_query_head;
+ aclk_queue.aclk_query_head = new_query;
+ aclk_queue.count++;
+
+ ACLK_QUEUE_UNLOCK;
+ QUERY_THREAD_WAKEUP;
+ return 0;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
+
+#ifndef __GNUC__
+#pragma region Helper Functions
+#endif
+
+/*
+ * Take a buffer, encode it and rewrite it
+ *
+ */
+
+static char *aclk_encode_response(char *src, size_t content_size, int keep_newlines)
+{
+ char *tmp_buffer = mallocz(content_size * 2);
+ char *dst = tmp_buffer;
+ while (content_size > 0) {
+ switch (*src) {
+ case '\n':
+ if (keep_newlines)
+ {
+ *dst++ = '\\';
+ *dst++ = 'n';
+ }
+ break;
+ case '\t':
+ break;
+ case 0x01 ... 0x08:
+ case 0x0b ... 0x1F:
+ *dst++ = '\\';
+ *dst++ = 'u';
+ *dst++ = '0';
+ *dst++ = '0';
+ *dst++ = (*src < 0x0F) ? '0' : '1';
+ *dst++ = to_hex(*src);
+ break;
+ case '\"':
+ *dst++ = '\\';
+ *dst++ = *src;
+ break;
+ default:
+ *dst++ = *src;
+ }
+ src++;
+ content_size--;
+ }
+ *dst = '\0';
+
+ return tmp_buffer;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
+
+#ifndef __GNUC__
+#pragma region ACLK_QUERY
+#endif
+
+static usec_t aclk_web_api_request_v1(RRDHOST *host, struct web_client *w, char *url, usec_t q_created)
+{
+ usec_t t = now_boottime_usec();
+ aclk_metric_mat_update(&aclk_metrics_per_sample.cloud_q_recvd_to_processed, t - q_created);
+
+ w->response.code = web_client_api_request_v1(host, w, url);
+ t = now_boottime_usec() - t;
+
+ aclk_metric_mat_update(&aclk_metrics_per_sample.cloud_q_db_query_time, t);
+
+ return t;
+}
+
+static int aclk_execute_query(struct aclk_query *this_query)
+{
+ if (strncmp(this_query->query, "/api/v1/", 8) == 0) {
+ struct web_client *w = (struct web_client *)callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f;
+
+ char *mysep = strchr(this_query->query, '?');
+ if (mysep) {
+ strncpyz(w->decoded_query_string, mysep, NETDATA_WEB_REQUEST_URL_SIZE);
+ *mysep = '\0';
+ } else
+ strncpyz(w->decoded_query_string, this_query->query, NETDATA_WEB_REQUEST_URL_SIZE);
+
+ mysep = strrchr(this_query->query, '/');
+
+ // TODO: handle bad response perhaps in a different way. For now it does to the payload
+ aclk_web_api_request_v1(localhost, w, mysep ? mysep + 1 : "noop", this_query->created_boot_time);
+ now_realtime_timeval(&w->tv_ready);
+ w->response.data->date = w->tv_ready.tv_sec;
+ web_client_build_http_header(w); // TODO: this function should offset from date, not tv_ready
+ BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ buffer_flush(local_buffer);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, aclk_shared_state.version_neg);
+ buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+ char *encoded_response = aclk_encode_response(w->response.data->buffer, w->response.data->len, 0);
+ char *encoded_header = aclk_encode_response(w->response.header_output->buffer, w->response.header_output->len, 1);
+
+ buffer_sprintf(
+ local_buffer, "{\n\"code\": %d,\n\"body\": \"%s\",\n\"headers\": \"%s\"\n}",
+ w->response.code, encoded_response, encoded_header);
+
+ buffer_sprintf(local_buffer, "\n}");
+
+ debug(D_ACLK, "Response:%s", encoded_header);
+
+ aclk_send_message(this_query->topic, local_buffer->buffer, this_query->msg_id);
+
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ freez(w);
+ buffer_free(local_buffer);
+ freez(encoded_response);
+ freez(encoded_header);
+ return 0;
+ }
+ return 1;
+}
+
+static int aclk_execute_query_v2(struct aclk_query *this_query)
+{
+ int retval = 0;
+ usec_t t;
+ BUFFER *local_buffer = NULL;
+ struct aclk_cloud_req_v2 *cloud_req = (struct aclk_cloud_req_v2 *)this_query->data;
+
+#ifdef NETDATA_WITH_ZLIB
+ int z_ret;
+ BUFFER *z_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ char *start, *end;
+#endif
+
+ struct web_client *w = (struct web_client *)callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f;
+
+ char *mysep = strchr(this_query->query, '?');
+ if (mysep) {
+ url_decode_r(w->decoded_query_string, mysep, NETDATA_WEB_REQUEST_URL_SIZE + 1);
+ *mysep = '\0';
+ } else
+ url_decode_r(w->decoded_query_string, this_query->query, NETDATA_WEB_REQUEST_URL_SIZE + 1);
+
+ mysep = strrchr(this_query->query, '/');
+
+ // execute the query
+ t = aclk_web_api_request_v1(cloud_req->host, w, mysep ? mysep + 1 : "noop", this_query->created_boot_time);
+
+#ifdef NETDATA_WITH_ZLIB
+ // check if gzip encoding can and should be used
+ if ((start = strstr(cloud_req->data, WEB_HDR_ACCEPT_ENC))) {
+ start += strlen(WEB_HDR_ACCEPT_ENC);
+ end = strstr(start, "\x0D\x0A");
+ start = strstr(start, "gzip");
+
+ if (start && start < end) {
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+ if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + 16, 8, web_gzip_strategy) == Z_OK) {
+ w->response.zinitialized = 1;
+ w->response.zoutput = 1;
+ } else
+ error("Failed to initialize zlib. Proceeding without compression.");
+ }
+ }
+
+ if (w->response.data->len && w->response.zinitialized) {
+ w->response.zstream.next_in = (Bytef *)w->response.data->buffer;
+ w->response.zstream.avail_in = w->response.data->len;
+ do {
+ w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
+ w->response.zstream.next_out = w->response.zbuffer;
+ z_ret = deflate(&w->response.zstream, Z_FINISH);
+ if(z_ret < 0) {
+ if(w->response.zstream.msg)
+ error("Error compressing body. ZLIB error: \"%s\"", w->response.zstream.msg);
+ else
+ error("Unknown error during zlib compression.");
+ retval = 1;
+ goto cleanup;
+ }
+ int bytes_to_cpy = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out;
+ buffer_need_bytes(z_buffer, bytes_to_cpy);
+ memcpy(&z_buffer->buffer[z_buffer->len], w->response.zbuffer, bytes_to_cpy);
+ z_buffer->len += bytes_to_cpy;
+ } while(z_ret != Z_STREAM_END);
+ // so that web_client_build_http_header
+ // puts correct content lenght into header
+ buffer_free(w->response.data);
+ w->response.data = z_buffer;
+ z_buffer = NULL;
+ }
+#endif
+
+ now_realtime_timeval(&w->tv_ready);
+ w->response.data->date = w->tv_ready.tv_sec;
+ web_client_build_http_header(w);
+ local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, aclk_shared_state.version_neg);
+ buffer_sprintf(local_buffer, ",\"t-exec\": %llu,\"t-rx\": %llu,\"http-code\": %d", t, this_query->created, w->response.code);
+ buffer_strcat(local_buffer, "}\x0D\x0A\x0D\x0A");
+ buffer_strcat(local_buffer, w->response.header_output->buffer);
+
+ if (w->response.data->len) {
+#ifdef NETDATA_WITH_ZLIB
+ if (w->response.zinitialized) {
+ buffer_need_bytes(local_buffer, w->response.data->len);
+ memcpy(&local_buffer->buffer[local_buffer->len], w->response.data->buffer, w->response.data->len);
+ local_buffer->len += w->response.data->len;
+ } else {
+#endif
+ buffer_strcat(local_buffer, w->response.data->buffer);
+#ifdef NETDATA_WITH_ZLIB
+ }
+#endif
+ }
+
+ aclk_send_message_bin(this_query->topic, local_buffer->buffer, local_buffer->len, this_query->msg_id);
+
+cleanup:
+#ifdef NETDATA_WITH_ZLIB
+ if(w->response.zinitialized)
+ deflateEnd(&w->response.zstream);
+ buffer_free(z_buffer);
+#endif
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ freez(w);
+ buffer_free(local_buffer);
+ return retval;
+}
+
+#define ACLK_HOST_PTR_COMPULSORY(x) \
+ if (unlikely(!host)) { \
+ errno = 0; \
+ error(x " needs host pointer"); \
+ break; \
+ }
+
+/*
+ * This function will fetch the next pending command and process it
+ *
+ */
+static int aclk_process_query(struct aclk_query_thread *t_info)
+{
+ struct aclk_query *this_query;
+ static long int query_count = 0;
+ ACLK_METADATA_STATE meta_state;
+ RRDHOST *host;
+
+ if (!aclk_connected)
+ return 0;
+
+ this_query = aclk_queue_pop();
+ if (likely(!this_query)) {
+ return 0;
+ }
+
+ if (unlikely(this_query->deleted)) {
+ debug(D_ACLK, "Garbage collect query %s:%s", this_query->topic, this_query->query);
+ aclk_query_free(this_query);
+ return 1;
+ }
+ query_count++;
+
+ host = (RRDHOST*)this_query->data;
+
+ debug(
+ D_ACLK, "Query #%ld (%s) size=%zu in queue %llu ms", query_count, this_query->topic,
+ this_query->query ? strlen(this_query->query) : 0, (now_realtime_usec() - this_query->created)/USEC_PER_MS);
+
+ switch (this_query->cmd) {
+ case ACLK_CMD_ONCONNECT:
+ ACLK_HOST_PTR_COMPULSORY("ACLK_CMD_ONCONNECT");
+#if ACLK_VERSION_MIN < ACLK_V_CHILDRENSTATE
+ if (host != localhost && aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) {
+ error("We are not allowed to send connect message in ACLK version before %d", ACLK_V_CHILDRENSTATE);
+ break;
+ }
+#else
+#warning "This check became unnecessary. Remove"
+#endif
+
+ debug(D_ACLK, "EXECUTING on connect metadata command for host \"%s\" GUID \"%s\"",
+ host->hostname,
+ host->machine_guid);
+
+ rrdhost_aclk_state_lock(host);
+ meta_state = host->aclk_state.metadata;
+ host->aclk_state.metadata = ACLK_METADATA_SENT;
+ rrdhost_aclk_state_unlock(host);
+ aclk_send_metadata(meta_state, host);
+ break;
+
+ case ACLK_CMD_CHART:
+ ACLK_HOST_PTR_COMPULSORY("ACLK_CMD_CHART");
+
+ debug(D_ACLK, "EXECUTING a chart update command");
+ aclk_send_single_chart(host, this_query->query);
+ break;
+
+ case ACLK_CMD_CHARTDEL:
+ ACLK_HOST_PTR_COMPULSORY("ACLK_CMD_CHARTDEL");
+
+ debug(D_ACLK, "EXECUTING a chart delete command");
+ //TODO: This send the info metadata for now
+ aclk_send_info_metadata(ACLK_METADATA_SENT, host);
+ break;
+
+ case ACLK_CMD_ALARM:
+ debug(D_ACLK, "EXECUTING an alarm update command");
+ aclk_send_message(this_query->topic, this_query->query, this_query->msg_id);
+ break;
+
+ case ACLK_CMD_CLOUD:
+ debug(D_ACLK, "EXECUTING a cloud command");
+ aclk_execute_query(this_query);
+ break;
+ case ACLK_CMD_CLOUD_QUERY_2:
+ debug(D_ACLK, "EXECUTING Cloud Query v2");
+ aclk_execute_query_v2(this_query);
+ break;
+
+ case ACLK_CMD_CHILD_CONNECT:
+ case ACLK_CMD_CHILD_DISCONNECT:
+ ACLK_HOST_PTR_COMPULSORY("ACLK_CMD_CHILD_CONNECT/ACLK_CMD_CHILD_DISCONNECT");
+
+ debug(
+ D_ACLK, "Execution Child %s command",
+ this_query->cmd == ACLK_CMD_CHILD_CONNECT ? "connect" : "disconnect");
+ aclk_send_info_child_connection(host, this_query->cmd);
+ break;
+
+ default:
+ errno = 0;
+ error("Unknown ACLK Query Command");
+ break;
+ }
+ debug(D_ACLK, "Query #%ld (%s) done", query_count, this_query->topic);
+
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.queries_dispatched++;
+ aclk_queries_per_thread[t_info->idx]++;
+ ACLK_STATS_UNLOCK;
+ }
+
+ aclk_query_free(this_query);
+
+ return 1;
+}
+
+void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads)
+{
+ if (query_threads && query_threads->thread_list) {
+ for (int i = 0; i < query_threads->count; i++) {
+ netdata_thread_join(query_threads->thread_list[i].thread, NULL);
+ }
+ freez(query_threads->thread_list);
+ }
+
+ struct aclk_query *this_query;
+
+ do {
+ this_query = aclk_queue_pop();
+ aclk_query_free(this_query);
+ } while (this_query);
+}
+
+#define TASK_LEN_MAX 16
+void aclk_query_threads_start(struct aclk_query_threads *query_threads)
+{
+ info("Starting %d query threads.", query_threads->count);
+
+ char thread_name[TASK_LEN_MAX];
+ query_threads->thread_list = callocz(query_threads->count, sizeof(struct aclk_query_thread));
+ for (int i = 0; i < query_threads->count; i++) {
+ query_threads->thread_list[i].idx = i; //thread needs to know its index for statistics
+
+ if(unlikely(snprintf(thread_name, TASK_LEN_MAX, "%s_%d", ACLK_THREAD_NAME, i) < 0))
+ error("snprintf encoding error");
+ netdata_thread_create(
+ &query_threads->thread_list[i].thread, thread_name, NETDATA_THREAD_OPTION_JOINABLE, aclk_query_main_thread,
+ &query_threads->thread_list[i]);
+ }
+}
+
+/**
+ * Checks and updates popcorning state of rrdhost
+ * returns actual/updated popcorning state
+ */
+
+ACLK_POPCORNING_STATE aclk_host_popcorn_check(RRDHOST *host)
+{
+ rrdhost_aclk_state_lock(host);
+ ACLK_POPCORNING_STATE ret = host->aclk_state.state;
+ if (host->aclk_state.state != ACLK_HOST_INITIALIZING){
+ rrdhost_aclk_state_unlock(host);
+ return ret;
+ }
+
+ if (!host->aclk_state.t_last_popcorn_update){
+ rrdhost_aclk_state_unlock(host);
+ return ret;
+ }
+
+ time_t t_diff = now_monotonic_sec() - host->aclk_state.t_last_popcorn_update;
+
+ if (t_diff >= ACLK_STABLE_TIMEOUT) {
+ host->aclk_state.state = ACLK_HOST_STABLE;
+ host->aclk_state.t_last_popcorn_update = 0;
+ rrdhost_aclk_state_unlock(host);
+ info("Host \"%s\" stable, ACLK popcorning finished. Last interrupt was %ld seconds ago", host->hostname, t_diff);
+ return ACLK_HOST_STABLE;
+ }
+
+ rrdhost_aclk_state_unlock(host);
+ return ret;
+}
+
+/**
+ * Main query processing thread
+ *
+ * On startup wait for the agent collectors to initialize
+ * Expect at least a time of ACLK_STABLE_TIMEOUT seconds
+ * of no new collectors coming in in order to mark the agent
+ * as stable (set agent_state = AGENT_STABLE)
+ */
+void *aclk_query_main_thread(void *ptr)
+{
+ struct aclk_query_thread *info = ptr;
+
+ while (!netdata_exit) {
+ if(aclk_host_popcorn_check(localhost) == ACLK_HOST_STABLE) {
+#ifdef ACLK_DEBUG
+ _dump_collector_list();
+#endif
+ break;
+ }
+ sleep_usec(USEC_PER_SEC * 1);
+ }
+
+ while (!netdata_exit) {
+ if(aclk_disable_runtime) {
+ sleep(1);
+ continue;
+ }
+ ACLK_SHARED_STATE_LOCK;
+ if (unlikely(!aclk_shared_state.version_neg)) {
+ if (!aclk_shared_state.version_neg_wait_till || aclk_shared_state.version_neg_wait_till > now_monotonic_usec()) {
+ ACLK_SHARED_STATE_UNLOCK;
+ info("Waiting for ACLK Version Negotiation message from Cloud");
+ sleep(1);
+ continue;
+ }
+ errno = 0;
+ error("ACLK version negotiation failed. No reply to \"hello\" with \"version\" from cloud in time of %ds."
+ " Reverting to default ACLK version of %d.", VERSION_NEG_TIMEOUT, ACLK_VERSION_MIN);
+ aclk_shared_state.version_neg = ACLK_VERSION_MIN;
+ aclk_set_rx_handlers(aclk_shared_state.version_neg);
+ }
+ ACLK_SHARED_STATE_UNLOCK;
+
+ rrdhost_aclk_state_lock(localhost);
+ if (unlikely(localhost->aclk_state.metadata == ACLK_METADATA_REQUIRED)) {
+ if (unlikely(aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) {
+ rrdhost_aclk_state_unlock(localhost);
+ errno = 0;
+ error("ACLK failed to queue on_connect command");
+ sleep(1);
+ continue;
+ }
+ localhost->aclk_state.metadata = ACLK_METADATA_CMD_QUEUED;
+ }
+ rrdhost_aclk_state_unlock(localhost);
+
+ ACLK_SHARED_STATE_LOCK;
+ if (aclk_shared_state.next_popcorn_host && aclk_host_popcorn_check(aclk_shared_state.next_popcorn_host) == ACLK_HOST_STABLE) {
+ aclk_queue_query("on_connect", aclk_shared_state.next_popcorn_host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT);
+ aclk_shared_state.next_popcorn_host = NULL;
+ aclk_update_next_child_to_popcorn();
+ }
+ ACLK_SHARED_STATE_UNLOCK;
+
+ while (aclk_process_query(info)) {
+ // Process all commands
+ };
+
+ QUERY_THREAD_LOCK;
+
+ // TODO: Need to check if there are queries awaiting already
+ if (unlikely(pthread_cond_wait(&query_cond_wait, &query_lock_wait)))
+ sleep_usec(USEC_PER_SEC * 1);
+
+ QUERY_THREAD_UNLOCK;
+ }
+
+ return NULL;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
diff --git a/aclk/legacy/aclk_query.h b/aclk/legacy/aclk_query.h
new file mode 100644
index 0000000..53eef13
--- /dev/null
+++ b/aclk/legacy/aclk_query.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ACLK_QUERY_H
+#define NETDATA_ACLK_QUERY_H
+
+#include "libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+
+#define ACLK_STABLE_TIMEOUT 3 // Minimum delay to mark AGENT as stable
+
+extern pthread_cond_t query_cond_wait;
+extern pthread_mutex_t query_lock_wait;
+#define QUERY_THREAD_WAKEUP pthread_cond_signal(&query_cond_wait)
+#define QUERY_THREAD_WAKEUP_ALL pthread_cond_broadcast(&query_cond_wait)
+
+extern volatile int aclk_connected;
+
+struct aclk_query_thread {
+ netdata_thread_t thread;
+ int idx;
+};
+
+struct aclk_query_threads {
+ struct aclk_query_thread *thread_list;
+ int count;
+};
+
+struct aclk_cloud_req_v2 {
+ char *data;
+ RRDHOST *host;
+};
+
+void *aclk_query_main_thread(void *ptr);
+int aclk_queue_query(char *token, void *data, char *msg_type, char *query, int run_after, int internal, ACLK_CMD cmd);
+
+void aclk_query_threads_start(struct aclk_query_threads *query_threads);
+void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads);
+unsigned int aclk_query_size();
+
+#endif //NETDATA_AGENT_CLOUD_LINK_H
diff --git a/aclk/legacy/aclk_rrdhost_state.h b/aclk/legacy/aclk_rrdhost_state.h
new file mode 100644
index 0000000..7ab3a50
--- /dev/null
+++ b/aclk/legacy/aclk_rrdhost_state.h
@@ -0,0 +1,42 @@
+#ifndef ACLK_RRDHOST_STATE_H
+#define ACLK_RRDHOST_STATE_H
+
+#include "../../libnetdata/libnetdata.h"
+
+typedef enum aclk_cmd {
+ ACLK_CMD_CLOUD,
+ ACLK_CMD_ONCONNECT,
+ ACLK_CMD_INFO,
+ ACLK_CMD_CHART,
+ ACLK_CMD_CHARTDEL,
+ ACLK_CMD_ALARM,
+ ACLK_CMD_CLOUD_QUERY_2,
+ ACLK_CMD_CHILD_CONNECT,
+ ACLK_CMD_CHILD_DISCONNECT
+} ACLK_CMD;
+
+typedef enum aclk_metadata_state {
+ ACLK_METADATA_REQUIRED,
+ ACLK_METADATA_CMD_QUEUED,
+ ACLK_METADATA_SENT
+} ACLK_METADATA_STATE;
+
+typedef enum aclk_agent_state {
+ ACLK_HOST_INITIALIZING,
+ ACLK_HOST_STABLE
+} ACLK_POPCORNING_STATE;
+
+typedef struct aclk_rrdhost_state {
+ char *claimed_id; // Claimed ID if host has one otherwise NULL
+
+#ifdef ENABLE_ACLK
+ // per child popcorning
+ ACLK_POPCORNING_STATE state;
+ ACLK_METADATA_STATE metadata;
+
+ time_t timestamp_created;
+ time_t t_last_popcorn_update;
+#endif /* ENABLE_ACLK */
+} aclk_rrdhost_state;
+
+#endif /* ACLK_RRDHOST_STATE_H */
diff --git a/aclk/legacy/aclk_rx_msgs.c b/aclk/legacy/aclk_rx_msgs.c
new file mode 100644
index 0000000..99fa9d9
--- /dev/null
+++ b/aclk/legacy/aclk_rx_msgs.c
@@ -0,0 +1,365 @@
+
+#include "aclk_rx_msgs.h"
+
+#include "aclk_common.h"
+#include "aclk_stats.h"
+#include "aclk_query.h"
+
+#ifndef UUID_STR_LEN
+#define UUID_STR_LEN 37
+#endif
+
+static inline int aclk_extract_v2_data(char *payload, char **data)
+{
+ char* ptr = strstr(payload, ACLK_V2_PAYLOAD_SEPARATOR);
+ if(!ptr)
+ return 1;
+ ptr += strlen(ACLK_V2_PAYLOAD_SEPARATOR);
+ *data = strdupz(ptr);
+ return 0;
+}
+
+#define ACLK_GET_REQ "GET "
+#define ACLK_CHILD_REQ "/host/"
+#define ACLK_CLOUD_REQ_V2_PREFIX "/api/v1/"
+#define STRNCMP_CONSTANT_PREFIX(str, const_pref) strncmp(str, const_pref, strlen(const_pref))
+static inline int aclk_v2_payload_get_query(struct aclk_cloud_req_v2 *cloud_req, struct aclk_request *req)
+{
+ const char *start, *end, *ptr;
+ char uuid_str[UUID_STR_LEN];
+ uuid_t uuid;
+
+ errno = 0;
+
+ if(STRNCMP_CONSTANT_PREFIX(cloud_req->data, ACLK_GET_REQ)) {
+ error("Only accepting GET HTTP requests from CLOUD");
+ return 1;
+ }
+ start = ptr = cloud_req->data + strlen(ACLK_GET_REQ);
+
+ if(!STRNCMP_CONSTANT_PREFIX(ptr, ACLK_CHILD_REQ)) {
+ ptr += strlen(ACLK_CHILD_REQ);
+ if(strlen(ptr) < UUID_STR_LEN) {
+ error("the child id in URL too short \"%s\"", start);
+ return 1;
+ }
+
+ strncpyz(uuid_str, ptr, UUID_STR_LEN - 1);
+
+ for(int i = 0; i < UUID_STR_LEN && uuid_str[i]; i++)
+ uuid_str[i] = tolower(uuid_str[i]);
+
+ if(ptr[0] && uuid_parse(uuid_str, uuid)) {
+ error("Got Child query (/host/XXX/...) host id \"%s\" doesn't look like valid GUID", uuid_str);
+ return 1;
+ }
+ ptr += UUID_STR_LEN - 1;
+
+ cloud_req->host = rrdhost_find_by_guid(uuid_str, 0);
+ if(!cloud_req->host) {
+ error("Cannot find host with GUID \"%s\"", uuid_str);
+ return 1;
+ }
+ }
+
+ if(STRNCMP_CONSTANT_PREFIX(ptr, ACLK_CLOUD_REQ_V2_PREFIX)) {
+ error("Only accepting requests that start with \"%s\" from CLOUD.", ACLK_CLOUD_REQ_V2_PREFIX);
+ return 1;
+ }
+
+ if(!(end = strstr(ptr, " HTTP/1.1\x0D\x0A"))) {
+ errno = 0;
+ error("Doesn't look like HTTP GET request.");
+ return 1;
+ }
+
+ req->payload = mallocz((end - start) + 1);
+ strncpyz(req->payload, start, end - start);
+
+ return 0;
+}
+
+#define HTTP_CHECK_AGENT_INITIALIZED() rrdhost_aclk_state_lock(localhost);\
+ if (unlikely(localhost->aclk_state.state == ACLK_HOST_INITIALIZING)) {\
+ debug(D_ACLK, "Ignoring \"http\" cloud request; agent not in stable state");\
+ rrdhost_aclk_state_unlock(localhost);\
+ return 1;\
+ }\
+ rrdhost_aclk_state_unlock(localhost);
+
+/*
+ * Parse the incoming payload and queue a command if valid
+ */
+static int aclk_handle_cloud_request_v1(struct aclk_request *cloud_to_agent, char *raw_payload)
+{
+ UNUSED(raw_payload);
+ HTTP_CHECK_AGENT_INITIALIZED();
+
+ errno = 0;
+ if (unlikely(cloud_to_agent->version != 1)) {
+ error(
+ "Received \"http\" message from Cloud with version %d, but ACLK version %d is used",
+ cloud_to_agent->version,
+ aclk_shared_state.version_neg);
+ return 1;
+ }
+
+ if (unlikely(!cloud_to_agent->payload)) {
+ error("payload missing");
+ return 1;
+ }
+
+ if (unlikely(!cloud_to_agent->callback_topic)) {
+ error("callback_topic missing");
+ return 1;
+ }
+
+ if (unlikely(!cloud_to_agent->msg_id)) {
+ error("msg_id missing");
+ return 1;
+ }
+
+ if (unlikely(aclk_queue_query(cloud_to_agent->callback_topic, NULL, cloud_to_agent->msg_id, cloud_to_agent->payload, 0, 0, ACLK_CMD_CLOUD)))
+ debug(D_ACLK, "ACLK failed to queue incoming \"http\" message");
+
+ return 0;
+}
+
+static int aclk_handle_cloud_request_v2(struct aclk_request *cloud_to_agent, char *raw_payload)
+{
+ HTTP_CHECK_AGENT_INITIALIZED();
+
+ struct aclk_cloud_req_v2 *cloud_req;
+ char *data;
+
+ errno = 0;
+ if (cloud_to_agent->version < ACLK_V_COMPRESSION) {
+ error(
+ "This handler cannot reply to request with version older than %d, received %d.",
+ ACLK_V_COMPRESSION,
+ cloud_to_agent->version);
+ return 1;
+ }
+
+ if (unlikely(aclk_extract_v2_data(raw_payload, &data))) {
+ error("Error extracting payload expected after the JSON dictionary.");
+ return 1;
+ }
+
+ cloud_req = mallocz(sizeof(struct aclk_cloud_req_v2));
+ cloud_req->data = data;
+ cloud_req->host = localhost;
+
+ if (unlikely(aclk_v2_payload_get_query(cloud_req, cloud_to_agent))) {
+ error("Could not extract payload from query");
+ goto cleanup;
+ }
+
+ if (unlikely(!cloud_to_agent->callback_topic)) {
+ error("Missing callback_topic");
+ goto cleanup;
+ }
+
+ if (unlikely(!cloud_to_agent->msg_id)) {
+ error("Missing msg_id");
+ goto cleanup;
+ }
+
+ // aclk_queue_query takes ownership of data pointer
+ if (unlikely(aclk_queue_query(
+ cloud_to_agent->callback_topic, cloud_req, cloud_to_agent->msg_id, cloud_to_agent->payload, 0, 0,
+ ACLK_CMD_CLOUD_QUERY_2))) {
+ error("ACLK failed to queue incoming \"http\" v2 message");
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ freez(cloud_req->data);
+ freez(cloud_req);
+ return 1;
+}
+
+// This handles `version` message from cloud used to negotiate
+// protocol version we will use
+static int aclk_handle_version_response(struct aclk_request *cloud_to_agent, char *raw_payload)
+{
+ UNUSED(raw_payload);
+ int version = -1;
+ errno = 0;
+
+ if (unlikely(cloud_to_agent->version != ACLK_VERSION_NEG_VERSION)) {
+ error(
+ "Unsuported version of \"version\" message from cloud. Expected %d, Got %d",
+ ACLK_VERSION_NEG_VERSION,
+ cloud_to_agent->version);
+ return 1;
+ }
+ if (unlikely(!cloud_to_agent->min_version)) {
+ error("Min version missing or 0");
+ return 1;
+ }
+ if (unlikely(!cloud_to_agent->max_version)) {
+ error("Max version missing or 0");
+ return 1;
+ }
+ if (unlikely(cloud_to_agent->max_version < cloud_to_agent->min_version)) {
+ error(
+ "Max version (%d) must be >= than min version (%d)", cloud_to_agent->max_version,
+ cloud_to_agent->min_version);
+ return 1;
+ }
+
+ if (unlikely(cloud_to_agent->min_version > ACLK_VERSION_MAX)) {
+ error(
+ "Agent too old for this cloud. Minimum version required by cloud %d."
+ " Maximum version supported by this agent %d.",
+ cloud_to_agent->min_version, ACLK_VERSION_MAX);
+ aclk_kill_link = 1;
+ aclk_disable_runtime = 1;
+ return 1;
+ }
+ if (unlikely(cloud_to_agent->max_version < ACLK_VERSION_MIN)) {
+ error(
+ "Cloud version is too old for this agent. Maximum version supported by cloud %d."
+ " Minimum (oldest) version supported by this agent %d.",
+ cloud_to_agent->max_version, ACLK_VERSION_MIN);
+ aclk_kill_link = 1;
+ return 1;
+ }
+
+ version = MIN(cloud_to_agent->max_version, ACLK_VERSION_MAX);
+
+ ACLK_SHARED_STATE_LOCK;
+ if (unlikely(now_monotonic_usec() > aclk_shared_state.version_neg_wait_till)) {
+ errno = 0;
+ error("The \"version\" message came too late ignoring.");
+ goto err_cleanup;
+ }
+ if (unlikely(aclk_shared_state.version_neg)) {
+ errno = 0;
+ error("Version has already been set to %d", aclk_shared_state.version_neg);
+ goto err_cleanup;
+ }
+ aclk_shared_state.version_neg = version;
+ ACLK_SHARED_STATE_UNLOCK;
+
+ info("Choosing version %d of ACLK", version);
+
+ aclk_set_rx_handlers(version);
+
+ return 0;
+
+err_cleanup:
+ ACLK_SHARED_STATE_UNLOCK;
+ return 1;
+}
+
+typedef struct aclk_incoming_msg_type{
+ char *name;
+ int(*fnc)(struct aclk_request *, char *);
+}aclk_incoming_msg_type;
+
+aclk_incoming_msg_type aclk_incoming_msg_types_v1[] = {
+ { .name = "http", .fnc = aclk_handle_cloud_request_v1 },
+ { .name = "version", .fnc = aclk_handle_version_response },
+ { .name = NULL, .fnc = NULL }
+};
+
+aclk_incoming_msg_type aclk_incoming_msg_types_compression[] = {
+ { .name = "http", .fnc = aclk_handle_cloud_request_v2 },
+ { .name = "version", .fnc = aclk_handle_version_response },
+ { .name = NULL, .fnc = NULL }
+};
+
+struct aclk_incoming_msg_type *aclk_incoming_msg_types = aclk_incoming_msg_types_v1;
+
+void aclk_set_rx_handlers(int version)
+{
+ if(version >= ACLK_V_COMPRESSION) {
+ aclk_incoming_msg_types = aclk_incoming_msg_types_compression;
+ return;
+ }
+
+ aclk_incoming_msg_types = aclk_incoming_msg_types_v1;
+}
+
+int aclk_handle_cloud_message(char *payload)
+{
+ struct aclk_request cloud_to_agent;
+ memset(&cloud_to_agent, 0, sizeof(struct aclk_request));
+
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.cloud_req_recvd++;
+ ACLK_STATS_UNLOCK;
+ }
+
+ if (unlikely(!payload)) {
+ errno = 0;
+ error("ACLK incoming message is empty");
+ goto err_cleanup_nojson;
+ }
+
+ debug(D_ACLK, "ACLK incoming message (%s)", payload);
+
+ int rc = json_parse(payload, &cloud_to_agent, cloud_to_agent_parse);
+
+ if (unlikely(rc != JSON_OK)) {
+ errno = 0;
+ error("Malformed json request (%s)", payload);
+ goto err_cleanup;
+ }
+
+ if (!cloud_to_agent.type_id) {
+ errno = 0;
+ error("Cloud message is missing compulsory key \"type\"");
+ goto err_cleanup;
+ }
+
+ if (!aclk_shared_state.version_neg && strcmp(cloud_to_agent.type_id, "version")) {
+ error("Only \"version\" message is allowed before popcorning and version negotiation is finished. Ignoring");
+ goto err_cleanup;
+ }
+
+ for (int i = 0; aclk_incoming_msg_types[i].name; i++) {
+ if (strcmp(cloud_to_agent.type_id, aclk_incoming_msg_types[i].name) == 0) {
+ if (likely(!aclk_incoming_msg_types[i].fnc(&cloud_to_agent, payload))) {
+ // in case of success handler is supposed to clean up after itself
+ // or as in the case of aclk_handle_cloud_request take
+ // ownership of the pointers (done to avoid copying)
+ // see what `aclk_queue_query` parameter `internal` does
+
+ // NEVER CONTINUE THIS LOOP AFTER CALLING FUNCTION!!!
+ // msg handlers (namely aclk_handle_version_responce)
+ // can freely change what aclk_incoming_msg_types points to
+ // so either exit or restart this for loop
+ freez(cloud_to_agent.type_id);
+ return 0;
+ }
+ goto err_cleanup;
+ }
+ }
+
+ errno = 0;
+ error("Unknown message type from Cloud \"%s\"", cloud_to_agent.type_id);
+
+err_cleanup:
+ if (cloud_to_agent.payload)
+ freez(cloud_to_agent.payload);
+ if (cloud_to_agent.type_id)
+ freez(cloud_to_agent.type_id);
+ if (cloud_to_agent.msg_id)
+ freez(cloud_to_agent.msg_id);
+ if (cloud_to_agent.callback_topic)
+ freez(cloud_to_agent.callback_topic);
+
+err_cleanup_nojson:
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ aclk_metrics_per_sample.cloud_req_err++;
+ ACLK_STATS_UNLOCK;
+ }
+
+ return 1;
+}
diff --git a/aclk/legacy/aclk_rx_msgs.h b/aclk/legacy/aclk_rx_msgs.h
new file mode 100644
index 0000000..3095e41
--- /dev/null
+++ b/aclk/legacy/aclk_rx_msgs.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ACLK_RX_MSGS_H
+#define NETDATA_ACLK_RX_MSGS_H
+
+#include "../../daemon/common.h"
+#include "libnetdata/libnetdata.h"
+
+int aclk_handle_cloud_message(char *payload);
+void aclk_set_rx_handlers(int version);
+
+
+#endif /* NETDATA_ACLK_RX_MSGS_H */
diff --git a/aclk/legacy/aclk_stats.c b/aclk/legacy/aclk_stats.c
new file mode 100644
index 0000000..2a57cd6
--- /dev/null
+++ b/aclk/legacy/aclk_stats.c
@@ -0,0 +1,298 @@
+#include "aclk_stats.h"
+
+netdata_mutex_t aclk_stats_mutex = NETDATA_MUTEX_INITIALIZER;
+
+int aclk_stats_enabled;
+
+int query_thread_count;
+
+// data ACLK stats need per query thread
+struct aclk_qt_data {
+ RRDDIM *dim;
+} *aclk_qt_data = NULL;
+
+uint32_t *aclk_queries_per_thread = NULL;
+uint32_t *aclk_queries_per_thread_sample = NULL;
+
+struct aclk_metrics aclk_metrics = {
+ .online = 0,
+};
+
+struct aclk_metrics_per_sample aclk_metrics_per_sample;
+
+struct aclk_mat_metrics aclk_mat_metrics = {
+#ifdef NETDATA_INTERNAL_CHECKS
+ .latency = { .name = "aclk_latency_mqtt",
+ .prio = 200002,
+ .st = NULL,
+ .rd_avg = NULL,
+ .rd_max = NULL,
+ .rd_total = NULL,
+ .unit = "ms",
+ .title = "ACLK Message Publish Latency" },
+#endif
+
+ .cloud_q_db_query_time = { .name = "aclk_db_query_time",
+ .prio = 200006,
+ .st = NULL,
+ .rd_avg = NULL,
+ .rd_max = NULL,
+ .rd_total = NULL,
+ .unit = "us",
+ .title = "Time it took to process cloud requested DB queries" },
+
+ .cloud_q_recvd_to_processed = { .name = "aclk_cloud_q_recvd_to_processed",
+ .prio = 200007,
+ .st = NULL,
+ .rd_avg = NULL,
+ .rd_max = NULL,
+ .rd_total = NULL,
+ .unit = "us",
+ .title = "Time from receiving the Cloud Query until it was picked up "
+ "by query thread (just before passing to the database)." }
+};
+
+void aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement)
+{
+ if (aclk_stats_enabled) {
+ ACLK_STATS_LOCK;
+ if (metric->max < measurement)
+ metric->max = measurement;
+
+ metric->total += measurement;
+ metric->count++;
+ ACLK_STATS_UNLOCK;
+ }
+}
+
+static void aclk_stats_collect(struct aclk_metrics_per_sample *per_sample, struct aclk_metrics *permanent)
+{
+ static RRDSET *st_aclkstats = NULL;
+ static RRDDIM *rd_online_status = NULL;
+
+ if (unlikely(!st_aclkstats)) {
+ st_aclkstats = rrdset_create_localhost(
+ "netdata", "aclk_status", NULL, "aclk", NULL, "ACLK/Cloud connection status",
+ "connected", "netdata", "stats", 200000, localhost->rrd_update_every, RRDSET_TYPE_LINE);
+
+ rd_online_status = rrddim_add(st_aclkstats, "online", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st_aclkstats);
+
+ rrddim_set_by_pointer(st_aclkstats, rd_online_status, per_sample->offline_during_sample ? 0 : permanent->online);
+
+ rrdset_done(st_aclkstats);
+}
+
+static void aclk_stats_query_queue(struct aclk_metrics_per_sample *per_sample)
+{
+ static RRDSET *st_query_thread = NULL;
+ static RRDDIM *rd_queued = NULL;
+ static RRDDIM *rd_dispatched = NULL;
+
+ if (unlikely(!st_query_thread)) {
+ st_query_thread = rrdset_create_localhost(
+ "netdata", "aclk_query_per_second", NULL, "aclk", NULL, "ACLK Queries per second", "queries/s",
+ "netdata", "stats", 200001, localhost->rrd_update_every, RRDSET_TYPE_AREA);
+
+ rd_queued = rrddim_add(st_query_thread, "added", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ rd_dispatched = rrddim_add(st_query_thread, "dispatched", NULL, -1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st_query_thread);
+
+ rrddim_set_by_pointer(st_query_thread, rd_queued, per_sample->queries_queued);
+ rrddim_set_by_pointer(st_query_thread, rd_dispatched, per_sample->queries_dispatched);
+
+ rrdset_done(st_query_thread);
+}
+
+static void aclk_stats_write_q(struct aclk_metrics_per_sample *per_sample)
+{
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_wq_add = NULL;
+ static RRDDIM *rd_wq_consumed = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "netdata", "aclk_write_q", NULL, "aclk", NULL, "Write Queue Mosq->Libwebsockets", "kB/s",
+ "netdata", "stats", 200003, localhost->rrd_update_every, RRDSET_TYPE_AREA);
+
+ rd_wq_add = rrddim_add(st, "added", NULL, 1, 1024 * localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ rd_wq_consumed = rrddim_add(st, "consumed", NULL, 1, -1024 * localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_wq_add, per_sample->write_q_added);
+ rrddim_set_by_pointer(st, rd_wq_consumed, per_sample->write_q_consumed);
+
+ rrdset_done(st);
+}
+
+static void aclk_stats_read_q(struct aclk_metrics_per_sample *per_sample)
+{
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_rq_add = NULL;
+ static RRDDIM *rd_rq_consumed = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "netdata", "aclk_read_q", NULL, "aclk", NULL, "Read Queue Libwebsockets->Mosq", "kB/s",
+ "netdata", "stats", 200004, localhost->rrd_update_every, RRDSET_TYPE_AREA);
+
+ rd_rq_add = rrddim_add(st, "added", NULL, 1, 1024 * localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ rd_rq_consumed = rrddim_add(st, "consumed", NULL, 1, -1024 * localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_rq_add, per_sample->read_q_added);
+ rrddim_set_by_pointer(st, rd_rq_consumed, per_sample->read_q_consumed);
+
+ rrdset_done(st);
+}
+
+static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample)
+{
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_rq_rcvd = NULL;
+ static RRDDIM *rd_rq_err = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "netdata", "aclk_cloud_req", NULL, "aclk", NULL, "Requests received from cloud", "req/s",
+ "netdata", "stats", 200005, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
+
+ rd_rq_rcvd = rrddim_add(st, "received", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ rd_rq_err = rrddim_add(st, "malformed", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_rq_rcvd, per_sample->cloud_req_recvd - per_sample->cloud_req_err);
+ rrddim_set_by_pointer(st, rd_rq_err, per_sample->cloud_req_err);
+
+ rrdset_done(st);
+}
+
+#define MAX_DIM_NAME 16
+static void aclk_stats_query_threads(uint32_t *queries_per_thread)
+{
+ static RRDSET *st = NULL;
+
+ char dim_name[MAX_DIM_NAME];
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "netdata", "aclk_query_threads", NULL, "aclk", NULL, "Queries Processed Per Thread", "req/s",
+ "netdata", "stats", 200007, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
+
+ for (int i = 0; i < query_thread_count; i++) {
+ if (snprintf(dim_name, MAX_DIM_NAME, "Query %d", i) < 0)
+ error("snprintf encoding error");
+ aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ }
+ } else
+ rrdset_next(st);
+
+ for (int i = 0; i < query_thread_count; i++) {
+ rrddim_set_by_pointer(st, aclk_qt_data[i].dim, queries_per_thread[i]);
+ }
+
+ rrdset_done(st);
+}
+
+static void aclk_stats_mat_metric_process(struct aclk_metric_mat *metric, struct aclk_metric_mat_data *data)
+{
+ if(unlikely(!metric->st)) {
+ metric->st = rrdset_create_localhost(
+ "netdata", metric->name, NULL, "aclk", NULL, metric->title, metric->unit, "netdata", "stats", metric->prio,
+ localhost->rrd_update_every, RRDSET_TYPE_LINE);
+
+ metric->rd_avg = rrddim_add(metric->st, "avg", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ metric->rd_max = rrddim_add(metric->st, "max", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ metric->rd_total = rrddim_add(metric->st, "total", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(metric->st);
+
+ if(data->count)
+ rrddim_set_by_pointer(metric->st, metric->rd_avg, roundf((float)data->total / data->count));
+ else
+ rrddim_set_by_pointer(metric->st, metric->rd_avg, 0);
+ rrddim_set_by_pointer(metric->st, metric->rd_max, data->max);
+ rrddim_set_by_pointer(metric->st, metric->rd_total, data->total);
+
+ rrdset_done(metric->st);
+}
+
+void aclk_stats_thread_cleanup()
+{
+ freez(aclk_qt_data);
+ freez(aclk_queries_per_thread);
+ freez(aclk_queries_per_thread_sample);
+}
+
+void *aclk_stats_main_thread(void *ptr)
+{
+ struct aclk_stats_thread *args = ptr;
+
+ query_thread_count = args->query_thread_count;
+ aclk_qt_data = callocz(query_thread_count, sizeof(struct aclk_qt_data));
+ aclk_queries_per_thread = callocz(query_thread_count, sizeof(uint32_t));
+ aclk_queries_per_thread_sample = callocz(query_thread_count, sizeof(uint32_t));
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
+
+ memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample));
+
+ struct aclk_metrics_per_sample per_sample;
+ struct aclk_metrics permanent;
+
+ while (!netdata_exit) {
+ netdata_thread_testcancel();
+ // ------------------------------------------------------------------------
+ // Wait for the next iteration point.
+
+ heartbeat_next(&hb, step_ut);
+ if (netdata_exit) break;
+
+ ACLK_STATS_LOCK;
+ // to not hold lock longer than necessary, especially not to hold it
+ // during database rrd* operations
+ memcpy(&per_sample, &aclk_metrics_per_sample, sizeof(struct aclk_metrics_per_sample));
+ memcpy(&permanent, &aclk_metrics, sizeof(struct aclk_metrics));
+ memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample));
+
+ memcpy(aclk_queries_per_thread_sample, aclk_queries_per_thread, sizeof(uint32_t) * query_thread_count);
+ memset(aclk_queries_per_thread, 0, sizeof(uint32_t) * query_thread_count);
+ ACLK_STATS_UNLOCK;
+
+ aclk_stats_collect(&per_sample, &permanent);
+ aclk_stats_query_queue(&per_sample);
+
+ aclk_stats_write_q(&per_sample);
+ aclk_stats_read_q(&per_sample);
+
+ aclk_stats_cloud_req(&per_sample);
+ aclk_stats_query_threads(aclk_queries_per_thread_sample);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ aclk_stats_mat_metric_process(&aclk_mat_metrics.latency, &per_sample.latency);
+#endif
+ aclk_stats_mat_metric_process(&aclk_mat_metrics.cloud_q_db_query_time, &per_sample.cloud_q_db_query_time);
+ aclk_stats_mat_metric_process(&aclk_mat_metrics.cloud_q_recvd_to_processed, &per_sample.cloud_q_recvd_to_processed);
+ }
+
+ return 0;
+}
+
+void aclk_stats_upd_online(int online) {
+ if(!aclk_stats_enabled)
+ return;
+
+ ACLK_STATS_LOCK;
+ aclk_metrics.online = online;
+
+ if(!online)
+ aclk_metrics_per_sample.offline_during_sample = 1;
+ ACLK_STATS_UNLOCK;
+}
diff --git a/aclk/legacy/aclk_stats.h b/aclk/legacy/aclk_stats.h
new file mode 100644
index 0000000..7e74fdf
--- /dev/null
+++ b/aclk/legacy/aclk_stats.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ACLK_STATS_H
+#define NETDATA_ACLK_STATS_H
+
+#include "../../daemon/common.h"
+#include "libnetdata/libnetdata.h"
+#include "aclk_common.h"
+
+#define ACLK_STATS_THREAD_NAME "ACLK_Stats"
+
+extern netdata_mutex_t aclk_stats_mutex;
+
+#define ACLK_STATS_LOCK netdata_mutex_lock(&aclk_stats_mutex)
+#define ACLK_STATS_UNLOCK netdata_mutex_unlock(&aclk_stats_mutex)
+
+extern int aclk_stats_enabled;
+
+struct aclk_stats_thread {
+ netdata_thread_t *thread;
+ int query_thread_count;
+};
+
+// preserve between samples
+struct aclk_metrics {
+ volatile uint8_t online;
+};
+
+//mat = max average total
+struct aclk_metric_mat_data {
+ volatile uint32_t total;
+ volatile uint32_t count;
+ volatile uint32_t max;
+};
+
+//mat = max average total
+struct aclk_metric_mat {
+ char *name;
+ char *title;
+ RRDSET *st;
+ RRDDIM *rd_avg;
+ RRDDIM *rd_max;
+ RRDDIM *rd_total;
+ long prio;
+ char *unit;
+};
+
+extern struct aclk_mat_metrics {
+#ifdef NETDATA_INTERNAL_CHECKS
+ struct aclk_metric_mat latency;
+#endif
+ struct aclk_metric_mat cloud_q_db_query_time;
+ struct aclk_metric_mat cloud_q_recvd_to_processed;
+} aclk_mat_metrics;
+
+void aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement);
+
+// reset to 0 on every sample
+extern struct aclk_metrics_per_sample {
+ /* in the unlikely event of ACLK disconnecting
+ and reconnecting under 1 sampling rate
+ we want to make sure we record the disconnection
+ despite it being then seemingly longer in graph */
+ volatile uint8_t offline_during_sample;
+
+ volatile uint32_t queries_queued;
+ volatile uint32_t queries_dispatched;
+
+ volatile uint32_t write_q_added;
+ volatile uint32_t write_q_consumed;
+
+ volatile uint32_t read_q_added;
+ volatile uint32_t read_q_consumed;
+
+ volatile uint32_t cloud_req_recvd;
+ volatile uint32_t cloud_req_err;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ struct aclk_metric_mat_data latency;
+#endif
+ struct aclk_metric_mat_data cloud_q_db_query_time;
+ struct aclk_metric_mat_data cloud_q_recvd_to_processed;
+} aclk_metrics_per_sample;
+
+extern uint32_t *aclk_queries_per_thread;
+
+void *aclk_stats_main_thread(void *ptr);
+void aclk_stats_thread_cleanup();
+void aclk_stats_upd_online(int online);
+
+#endif /* NETDATA_ACLK_STATS_H */
diff --git a/aclk/legacy/agent_cloud_link.c b/aclk/legacy/agent_cloud_link.c
new file mode 100644
index 0000000..e51a013
--- /dev/null
+++ b/aclk/legacy/agent_cloud_link.c
@@ -0,0 +1,1683 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "agent_cloud_link.h"
+#include "aclk_lws_https_client.h"
+#include "aclk_query.h"
+#include "aclk_common.h"
+#include "aclk_stats.h"
+
+#ifdef ENABLE_ACLK
+#include <libwebsockets.h>
+#endif
+
+int aclk_shutting_down = 0;
+
+// Other global state
+static int aclk_subscribed = 0;
+static int aclk_disable_single_updates = 0;
+static char *aclk_username = NULL;
+static char *aclk_password = NULL;
+
+static char *global_base_topic = NULL;
+static int aclk_connecting = 0;
+int aclk_force_reconnect = 0; // Indication from lower layers
+usec_t aclk_session_us = 0; // Used by the mqtt layer
+time_t aclk_session_sec = 0; // Used by the mqtt layer
+
+static netdata_mutex_t aclk_mutex = NETDATA_MUTEX_INITIALIZER;
+static netdata_mutex_t collector_mutex = NETDATA_MUTEX_INITIALIZER;
+
+#define ACLK_LOCK netdata_mutex_lock(&aclk_mutex)
+#define ACLK_UNLOCK netdata_mutex_unlock(&aclk_mutex)
+
+#define COLLECTOR_LOCK netdata_mutex_lock(&collector_mutex)
+#define COLLECTOR_UNLOCK netdata_mutex_unlock(&collector_mutex)
+
+void lws_wss_check_queues(size_t *write_len, size_t *write_len_bytes, size_t *read_len);
+void aclk_lws_wss_destroy_context();
+/*
+ * Maintain a list of collectors and chart count
+ * If all the charts of a collector are deleted
+ * then a new metadata dataset must be send to the cloud
+ *
+ */
+struct _collector {
+ time_t created;
+ uint32_t count; //chart count
+ uint32_t hostname_hash;
+ uint32_t plugin_hash;
+ uint32_t module_hash;
+ char *hostname;
+ char *plugin_name;
+ char *module_name;
+ struct _collector *next;
+};
+
+struct _collector *collector_list = NULL;
+
+char *create_uuid()
+{
+ uuid_t uuid;
+ char *uuid_str = mallocz(36 + 1);
+
+ uuid_generate(uuid);
+ uuid_unparse(uuid, uuid_str);
+
+ return uuid_str;
+}
+
+int cloud_to_agent_parse(JSON_ENTRY *e)
+{
+ struct aclk_request *data = e->callback_data;
+
+ switch (e->type) {
+ case JSON_OBJECT:
+ case JSON_ARRAY:
+ break;
+ case JSON_STRING:
+ if (!strcmp(e->name, "msg-id")) {
+ data->msg_id = strdupz(e->data.string);
+ break;
+ }
+ if (!strcmp(e->name, "type")) {
+ data->type_id = strdupz(e->data.string);
+ break;
+ }
+ if (!strcmp(e->name, "callback-topic")) {
+ data->callback_topic = strdupz(e->data.string);
+ break;
+ }
+ if (!strcmp(e->name, "payload")) {
+ if (likely(e->data.string)) {
+ size_t len = strlen(e->data.string);
+ data->payload = mallocz(len+1);
+ if (!url_decode_r(data->payload, e->data.string, len + 1))
+ strcpy(data->payload, e->data.string);
+ }
+ break;
+ }
+ break;
+ case JSON_NUMBER:
+ if (!strcmp(e->name, "version")) {
+ data->version = e->data.number;
+ break;
+ }
+ if (!strcmp(e->name, "min-version")) {
+ data->min_version = e->data.number;
+ break;
+ }
+ if (!strcmp(e->name, "max-version")) {
+ data->max_version = e->data.number;
+ break;
+ }
+
+ break;
+
+ case JSON_BOOLEAN:
+ break;
+
+ case JSON_NULL:
+ break;
+ }
+ return 0;
+}
+
+
+static RSA *aclk_private_key = NULL;
+static int create_private_key()
+{
+ if (aclk_private_key != NULL)
+ RSA_free(aclk_private_key);
+ aclk_private_key = NULL;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/cloud.d/private.pem", netdata_configured_varlib_dir);
+
+ long bytes_read;
+ char *private_key = read_by_filename(filename, &bytes_read);
+ if (!private_key) {
+ error("Claimed agent cannot establish ACLK - unable to load private key '%s' failed.", filename);
+ return 1;
+ }
+ debug(D_ACLK, "Claimed agent loaded private key len=%ld bytes", bytes_read);
+
+ BIO *key_bio = BIO_new_mem_buf(private_key, -1);
+ if (key_bio==NULL) {
+ error("Claimed agent cannot establish ACLK - failed to create BIO for key");
+ goto biofailed;
+ }
+
+ aclk_private_key = PEM_read_bio_RSAPrivateKey(key_bio, NULL, NULL, NULL);
+ BIO_free(key_bio);
+ if (aclk_private_key!=NULL)
+ {
+ freez(private_key);
+ return 0;
+ }
+ char err[512];
+ ERR_error_string_n(ERR_get_error(), err, sizeof(err));
+ error("Claimed agent cannot establish ACLK - cannot create private key: %s", err);
+
+biofailed:
+ freez(private_key);
+ return 1;
+}
+
+/*
+ * After a connection failure -- delay in milliseconds
+ * When a connection is established, the delay function
+ * should be called with
+ *
+ * mode 0 to reset the delay
+ * mode 1 to calculate sleep time [0 .. ACLK_MAX_BACKOFF_DELAY * 1000] ms
+ *
+ */
+unsigned long int aclk_reconnect_delay(int mode)
+{
+ static int fail = -1;
+ unsigned long int delay;
+
+ if (!mode || fail == -1) {
+ srandom(time(NULL));
+ fail = mode - 1;
+ return 0;
+ }
+
+ delay = (1 << fail);
+
+ if (delay >= ACLK_MAX_BACKOFF_DELAY) {
+ delay = ACLK_MAX_BACKOFF_DELAY * 1000;
+ } else {
+ fail++;
+ delay = (delay * 1000) + (random() % 1000);
+ }
+
+ return delay;
+}
+
+// This will give the base topic that the agent will publish messages.
+// subtopics will be sent under the base topic e.g. base_topic/subtopic
+// This is called during the connection, we delete any previous topic
+// in-case the user has changed the agent id and reclaimed.
+
+char *create_publish_base_topic()
+{
+ char *agent_id = is_agent_claimed();
+ if (unlikely(!agent_id))
+ return NULL;
+
+ ACLK_LOCK;
+
+ if (global_base_topic)
+ freez(global_base_topic);
+ char tmp_topic[ACLK_MAX_TOPIC + 1], *tmp;
+
+ snprintf(tmp_topic, ACLK_MAX_TOPIC, ACLK_TOPIC_STRUCTURE, agent_id);
+ tmp = strchr(tmp_topic, '\n');
+ if (unlikely(tmp))
+ *tmp = '\0';
+ global_base_topic = strdupz(tmp_topic);
+
+ ACLK_UNLOCK;
+ freez(agent_id);
+ return global_base_topic;
+}
+
+/*
+ * Build a topic based on sub_topic and final_topic
+ * if the sub topic starts with / assume that is an absolute topic
+ *
+ */
+
+char *get_topic(char *sub_topic, char *final_topic, int max_size)
+{
+ int rc;
+
+ if (likely(sub_topic && sub_topic[0] == '/'))
+ return sub_topic;
+
+ if (unlikely(!global_base_topic))
+ return sub_topic;
+
+ rc = snprintf(final_topic, max_size, "%s/%s", global_base_topic, sub_topic);
+ if (unlikely(rc >= max_size))
+ debug(D_ACLK, "Topic has been truncated to [%s] instead of [%s/%s]", final_topic, global_base_topic, sub_topic);
+
+ return final_topic;
+}
+
+#ifndef __GNUC__
+#pragma region ACLK Internal Collector Tracking
+#endif
+
+/*
+ * Free a collector structure
+ */
+
+static void _free_collector(struct _collector *collector)
+{
+ if (likely(collector->plugin_name))
+ freez(collector->plugin_name);
+
+ if (likely(collector->module_name))
+ freez(collector->module_name);
+
+ if (likely(collector->hostname))
+ freez(collector->hostname);
+
+ freez(collector);
+}
+
+/*
+ * This will report the collector list
+ *
+ */
+#ifdef ACLK_DEBUG
+static void _dump_collector_list()
+{
+ struct _collector *tmp_collector;
+
+ COLLECTOR_LOCK;
+
+ info("DUMPING ALL COLLECTORS");
+
+ if (unlikely(!collector_list || !collector_list->next)) {
+ COLLECTOR_UNLOCK;
+ info("DUMPING ALL COLLECTORS -- nothing found");
+ return;
+ }
+
+ // Note that the first entry is "dummy"
+ tmp_collector = collector_list->next;
+
+ while (tmp_collector) {
+ info(
+ "COLLECTOR %s : [%s:%s] count = %u", tmp_collector->hostname,
+ tmp_collector->plugin_name ? tmp_collector->plugin_name : "",
+ tmp_collector->module_name ? tmp_collector->module_name : "", tmp_collector->count);
+
+ tmp_collector = tmp_collector->next;
+ }
+ info("DUMPING ALL COLLECTORS DONE");
+ COLLECTOR_UNLOCK;
+}
+#endif
+
+/*
+ * This will cleanup the collector list
+ *
+ */
+static void _reset_collector_list()
+{
+ struct _collector *tmp_collector, *next_collector;
+
+ COLLECTOR_LOCK;
+
+ if (unlikely(!collector_list || !collector_list->next)) {
+ COLLECTOR_UNLOCK;
+ return;
+ }
+
+ // Note that the first entry is "dummy"
+ tmp_collector = collector_list->next;
+ collector_list->count = 0;
+ collector_list->next = NULL;
+
+ // We broke the link; we can unlock
+ COLLECTOR_UNLOCK;
+
+ while (tmp_collector) {
+ next_collector = tmp_collector->next;
+ _free_collector(tmp_collector);
+ tmp_collector = next_collector;
+ }
+}
+
+/*
+ * Find a collector (if it exists)
+ * Must lock before calling this
+ * If last_collector is not null, it will return the previous collector in the linked
+ * list (used in collector delete)
+ */
+static struct _collector *_find_collector(
+ const char *hostname, const char *plugin_name, const char *module_name, struct _collector **last_collector)
+{
+ struct _collector *tmp_collector, *prev_collector;
+ uint32_t plugin_hash;
+ uint32_t module_hash;
+ uint32_t hostname_hash;
+
+ if (unlikely(!collector_list)) {
+ collector_list = callocz(1, sizeof(struct _collector));
+ return NULL;
+ }
+
+ if (unlikely(!collector_list->next))
+ return NULL;
+
+ plugin_hash = plugin_name ? simple_hash(plugin_name) : 1;
+ module_hash = module_name ? simple_hash(module_name) : 1;
+ hostname_hash = simple_hash(hostname);
+
+ // Note that the first entry is "dummy"
+ tmp_collector = collector_list->next;
+ prev_collector = collector_list;
+ while (tmp_collector) {
+ if (plugin_hash == tmp_collector->plugin_hash && module_hash == tmp_collector->module_hash &&
+ hostname_hash == tmp_collector->hostname_hash && (!strcmp(hostname, tmp_collector->hostname)) &&
+ (!plugin_name || !tmp_collector->plugin_name || !strcmp(plugin_name, tmp_collector->plugin_name)) &&
+ (!module_name || !tmp_collector->module_name || !strcmp(module_name, tmp_collector->module_name))) {
+ if (unlikely(last_collector))
+ *last_collector = prev_collector;
+
+ return tmp_collector;
+ }
+
+ prev_collector = tmp_collector;
+ tmp_collector = tmp_collector->next;
+ }
+
+ return tmp_collector;
+}
+
+/*
+ * Called to delete a collector
+ * It will reduce the count (chart_count) and will remove it
+ * from the linked list if the count reaches zero
+ * The structure will be returned to the caller to free
+ * the resources
+ *
+ */
+static struct _collector *_del_collector(const char *hostname, const char *plugin_name, const char *module_name)
+{
+ struct _collector *tmp_collector, *prev_collector = NULL;
+
+ tmp_collector = _find_collector(hostname, plugin_name, module_name, &prev_collector);
+
+ if (likely(tmp_collector)) {
+ --tmp_collector->count;
+ if (unlikely(!tmp_collector->count))
+ prev_collector->next = tmp_collector->next;
+ }
+ return tmp_collector;
+}
+
+/*
+ * Add a new collector (plugin / module) to the list
+ * If it already exists just update the chart count
+ *
+ * Lock before calling
+ */
+static struct _collector *_add_collector(const char *hostname, const char *plugin_name, const char *module_name)
+{
+ struct _collector *tmp_collector;
+
+ tmp_collector = _find_collector(hostname, plugin_name, module_name, NULL);
+
+ if (unlikely(!tmp_collector)) {
+ tmp_collector = callocz(1, sizeof(struct _collector));
+ tmp_collector->hostname_hash = simple_hash(hostname);
+ tmp_collector->plugin_hash = plugin_name ? simple_hash(plugin_name) : 1;
+ tmp_collector->module_hash = module_name ? simple_hash(module_name) : 1;
+
+ tmp_collector->hostname = strdupz(hostname);
+ tmp_collector->plugin_name = plugin_name ? strdupz(plugin_name) : NULL;
+ tmp_collector->module_name = module_name ? strdupz(module_name) : NULL;
+
+ tmp_collector->next = collector_list->next;
+ collector_list->next = tmp_collector;
+ }
+ tmp_collector->count++;
+ debug(
+ D_ACLK, "ADD COLLECTOR %s [%s:%s] -- chart %u", hostname, plugin_name ? plugin_name : "*",
+ module_name ? module_name : "*", tmp_collector->count);
+ return tmp_collector;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
+
+/* Avoids the need to scan trough all RRDHOSTS
+ * every time any Query Thread Wakes Up
+ * (every time we need to check child popcorn expiry)
+ * call with ACLK_SHARED_STATE_LOCK held
+ */
+void aclk_update_next_child_to_popcorn(void)
+{
+ RRDHOST *host;
+ int any = 0;
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ if (unlikely(host == localhost || rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)))
+ continue;
+
+ rrdhost_aclk_state_lock(host);
+ if (!ACLK_IS_HOST_POPCORNING(host)) {
+ rrdhost_aclk_state_unlock(host);
+ continue;
+ }
+
+ any = 1;
+
+ if (unlikely(!aclk_shared_state.next_popcorn_host)) {
+ aclk_shared_state.next_popcorn_host = host;
+ rrdhost_aclk_state_unlock(host);
+ continue;
+ }
+
+ if (aclk_shared_state.next_popcorn_host->aclk_state.t_last_popcorn_update > host->aclk_state.t_last_popcorn_update)
+ aclk_shared_state.next_popcorn_host = host;
+
+ rrdhost_aclk_state_unlock(host);
+ }
+ if(!any)
+ aclk_shared_state.next_popcorn_host = NULL;
+
+ rrd_unlock();
+}
+
+/* If popcorning bump timer.
+ * If popcorning or initializing (host not stable) return 1
+ * Otherwise return 0
+ */
+static int aclk_popcorn_check_bump(RRDHOST *host)
+{
+ time_t now = now_monotonic_sec();
+ int updated = 0, ret;
+ ACLK_SHARED_STATE_LOCK;
+ rrdhost_aclk_state_lock(host);
+
+ ret = ACLK_IS_HOST_INITIALIZING(host);
+ if (unlikely(ACLK_IS_HOST_POPCORNING(host))) {
+ if(now != host->aclk_state.t_last_popcorn_update) {
+ updated = 1;
+ info("Restarting ACLK popcorn timer for host \"%s\" with GUID \"%s\"", host->hostname, host->machine_guid);
+ }
+ host->aclk_state.t_last_popcorn_update = now;
+ rrdhost_aclk_state_unlock(host);
+
+ if (host != localhost && updated)
+ aclk_update_next_child_to_popcorn();
+
+ ACLK_SHARED_STATE_UNLOCK;
+ return ret;
+ }
+
+ rrdhost_aclk_state_unlock(host);
+ ACLK_SHARED_STATE_UNLOCK;
+ return ret;
+}
+
+inline static int aclk_host_initializing(RRDHOST *host)
+{
+ rrdhost_aclk_state_lock(host);
+ int ret = ACLK_IS_HOST_INITIALIZING(host);
+ rrdhost_aclk_state_unlock(host);
+ return ret;
+}
+
+static void aclk_start_host_popcorning(RRDHOST *host)
+{
+ usec_t now = now_monotonic_sec();
+ info("Starting ACLK popcorn timer for host \"%s\" with GUID \"%s\"", host->hostname, host->machine_guid);
+ ACLK_SHARED_STATE_LOCK;
+ rrdhost_aclk_state_lock(host);
+ if (host == localhost && !ACLK_IS_HOST_INITIALIZING(host)) {
+ errno = 0;
+ error("Localhost is allowed to do popcorning only once after startup!");
+ rrdhost_aclk_state_unlock(host);
+ ACLK_SHARED_STATE_UNLOCK;
+ return;
+ }
+
+ host->aclk_state.state = ACLK_HOST_INITIALIZING;
+ host->aclk_state.metadata = ACLK_METADATA_REQUIRED;
+ host->aclk_state.t_last_popcorn_update = now;
+ rrdhost_aclk_state_unlock(host);
+ if (host != localhost)
+ aclk_update_next_child_to_popcorn();
+ ACLK_SHARED_STATE_UNLOCK;
+}
+
+static void aclk_stop_host_popcorning(RRDHOST *host)
+{
+ ACLK_SHARED_STATE_LOCK;
+ rrdhost_aclk_state_lock(host);
+ if (!ACLK_IS_HOST_POPCORNING(host)) {
+ rrdhost_aclk_state_unlock(host);
+ ACLK_SHARED_STATE_UNLOCK;
+ return;
+ }
+
+ info("Host Disconnected before ACLK popcorning finished. Canceling. Host \"%s\" GUID:\"%s\"", host->hostname, host->machine_guid);
+ host->aclk_state.t_last_popcorn_update = 0;
+ host->aclk_state.metadata = ACLK_METADATA_REQUIRED;
+ rrdhost_aclk_state_unlock(host);
+
+ if(host == aclk_shared_state.next_popcorn_host) {
+ aclk_shared_state.next_popcorn_host = NULL;
+ aclk_update_next_child_to_popcorn();
+ }
+ ACLK_SHARED_STATE_UNLOCK;
+}
+
+/*
+ * Add a new collector to the list
+ * If it exists, update the chart count
+ */
+void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name)
+{
+ struct _collector *tmp_collector;
+ if (unlikely(!netdata_ready)) {
+ return;
+ }
+
+ COLLECTOR_LOCK;
+
+ tmp_collector = _add_collector(host->machine_guid, plugin_name, module_name);
+
+ if (unlikely(tmp_collector->count != 1)) {
+ COLLECTOR_UNLOCK;
+ return;
+ }
+
+ COLLECTOR_UNLOCK;
+
+ if(aclk_popcorn_check_bump(host))
+ return;
+
+ if (unlikely(aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT)))
+ debug(D_ACLK, "ACLK failed to queue on_connect command on collector addition");
+}
+
+/*
+ * Delete a collector from the list
+ * If the chart count reaches zero the collector will be removed
+ * from the list by calling del_collector.
+ *
+ * This function will release the memory used and schedule
+ * a cloud update
+ */
+void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name)
+{
+ struct _collector *tmp_collector;
+ if (unlikely(!netdata_ready)) {
+ return;
+ }
+
+ COLLECTOR_LOCK;
+
+ tmp_collector = _del_collector(host->machine_guid, plugin_name, module_name);
+
+ if (unlikely(!tmp_collector || tmp_collector->count)) {
+ COLLECTOR_UNLOCK;
+ return;
+ }
+
+ debug(
+ D_ACLK, "DEL COLLECTOR [%s:%s] -- charts %u", plugin_name ? plugin_name : "*", module_name ? module_name : "*",
+ tmp_collector->count);
+
+ COLLECTOR_UNLOCK;
+
+ _free_collector(tmp_collector);
+
+ if (aclk_popcorn_check_bump(host))
+ return;
+
+ if (unlikely(aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT)))
+ debug(D_ACLK, "ACLK failed to queue on_connect command on collector deletion");
+}
+
+static void aclk_graceful_disconnect()
+{
+ size_t write_q, write_q_bytes, read_q;
+ time_t event_loop_timeout;
+
+ // Send a graceful disconnect message
+ BUFFER *b = buffer_create(512);
+ aclk_create_header(b, "disconnect", NULL, 0, 0, aclk_shared_state.version_neg);
+ buffer_strcat(b, ",\n\t\"payload\": \"graceful\"}");
+ aclk_send_message(ACLK_METADATA_TOPIC, (char*)buffer_tostring(b), NULL);
+ buffer_free(b);
+
+ event_loop_timeout = now_realtime_sec() + 5;
+ write_q = 1;
+ while (write_q && event_loop_timeout > now_realtime_sec()) {
+ _link_event_loop();
+ lws_wss_check_queues(&write_q, &write_q_bytes, &read_q);
+ }
+
+ aclk_shutting_down = 1;
+ _link_shutdown();
+ aclk_lws_wss_mqtt_layer_disconect_notif();
+
+ write_q = 1;
+ event_loop_timeout = now_realtime_sec() + 5;
+ while (write_q && event_loop_timeout > now_realtime_sec()) {
+ _link_event_loop();
+ lws_wss_check_queues(&write_q, &write_q_bytes, &read_q);
+ }
+ aclk_shutting_down = 0;
+}
+
+#ifndef __GNUC__
+#pragma region Incoming Msg Parsing
+#endif
+
+struct dictionary_singleton {
+ char *key;
+ char *result;
+};
+
+int json_extract_singleton(JSON_ENTRY *e)
+{
+ struct dictionary_singleton *data = e->callback_data;
+
+ switch (e->type) {
+ case JSON_OBJECT:
+ case JSON_ARRAY:
+ break;
+ case JSON_STRING:
+ if (!strcmp(e->name, data->key)) {
+ data->result = strdupz(e->data.string);
+ break;
+ }
+ break;
+ case JSON_NUMBER:
+ case JSON_BOOLEAN:
+ case JSON_NULL:
+ break;
+ }
+ return 0;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
+
+
+#ifndef __GNUC__
+#pragma region Challenge Response
+#endif
+
+// Base-64 decoder.
+// Note: This is non-validating, invalid input will be decoded without an error.
+// Challenges are packed into json strings so we don't skip newlines.
+// Size errors (i.e. invalid input size or insufficient output space) are caught.
+size_t base64_decode(unsigned char *input, size_t input_size, unsigned char *output, size_t output_size)
+{
+ static char lookup[256];
+ static int first_time=1;
+ if (first_time)
+ {
+ first_time = 0;
+ for(int i=0; i<256; i++)
+ lookup[i] = -1;
+ for(int i='A'; i<='Z'; i++)
+ lookup[i] = i-'A';
+ for(int i='a'; i<='z'; i++)
+ lookup[i] = i-'a' + 26;
+ for(int i='0'; i<='9'; i++)
+ lookup[i] = i-'0' + 52;
+ lookup['+'] = 62;
+ lookup['/'] = 63;
+ }
+ if ((input_size & 3) != 0)
+ {
+ error("Can't decode base-64 input length %zu", input_size);
+ return 0;
+ }
+ size_t unpadded_size = (input_size/4) * 3;
+ if ( unpadded_size > output_size )
+ {
+ error("Output buffer size %zu is too small to decode %zu into", output_size, input_size);
+ return 0;
+ }
+ // Don't check padding within full quantums
+ for (size_t i = 0 ; i < input_size-4 ; i+=4 )
+ {
+ uint32_t value = (lookup[input[0]] << 18) + (lookup[input[1]] << 12) + (lookup[input[2]] << 6) + lookup[input[3]];
+ output[0] = value >> 16;
+ output[1] = value >> 8;
+ output[2] = value;
+ //error("Decoded %c %c %c %c -> %02x %02x %02x", input[0], input[1], input[2], input[3], output[0], output[1], output[2]);
+ output += 3;
+ input += 4;
+ }
+ // Handle padding only in last quantum
+ if (input[2] == '=') {
+ uint32_t value = (lookup[input[0]] << 6) + lookup[input[1]];
+ output[0] = value >> 4;
+ //error("Decoded %c %c %c %c -> %02x", input[0], input[1], input[2], input[3], output[0]);
+ return unpadded_size-2;
+ }
+ else if (input[3] == '=') {
+ uint32_t value = (lookup[input[0]] << 12) + (lookup[input[1]] << 6) + lookup[input[2]];
+ output[0] = value >> 10;
+ output[1] = value >> 2;
+ //error("Decoded %c %c %c %c -> %02x %02x", input[0], input[1], input[2], input[3], output[0], output[1]);
+ return unpadded_size-1;
+ }
+ else
+ {
+ uint32_t value = (input[0] << 18) + (input[1] << 12) + (input[2]<<6) + input[3];
+ output[0] = value >> 16;
+ output[1] = value >> 8;
+ output[2] = value;
+ //error("Decoded %c %c %c %c -> %02x %02x %02x", input[0], input[1], input[2], input[3], output[0], output[1], output[2]);
+ return unpadded_size;
+ }
+}
+
+size_t base64_encode(unsigned char *input, size_t input_size, char *output, size_t output_size)
+{
+ uint32_t value;
+ static char lookup[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+ if ((input_size/3+1)*4 >= output_size)
+ {
+ error("Output buffer for encoding size=%zu is not large enough for %zu-bytes input", output_size, input_size);
+ return 0;
+ }
+ size_t count = 0;
+ while (input_size>3)
+ {
+ value = ((input[0] << 16) + (input[1] << 8) + input[2]) & 0xffffff;
+ output[0] = lookup[value >> 18];
+ output[1] = lookup[(value >> 12) & 0x3f];
+ output[2] = lookup[(value >> 6) & 0x3f];
+ output[3] = lookup[value & 0x3f];
+ //error("Base-64 encode (%04x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]);
+ output += 4;
+ input += 3;
+ input_size -= 3;
+ count += 4;
+ }
+ switch (input_size)
+ {
+ case 2:
+ value = (input[0] << 10) + (input[1] << 2);
+ output[0] = lookup[(value >> 12) & 0x3f];
+ output[1] = lookup[(value >> 6) & 0x3f];
+ output[2] = lookup[value & 0x3f];
+ output[3] = '=';
+ //error("Base-64 encode (%06x) -> %c %c %c %c\n", (value>>2)&0xffff, output[0], output[1], output[2], output[3]);
+ count += 4;
+ break;
+ case 1:
+ value = input[0] << 4;
+ output[0] = lookup[(value >> 6) & 0x3f];
+ output[1] = lookup[value & 0x3f];
+ output[2] = '=';
+ output[3] = '=';
+ //error("Base-64 encode (%06x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]);
+ count += 4;
+ break;
+ case 0:
+ break;
+ }
+ return count;
+}
+
+
+
+int private_decrypt(unsigned char * enc_data, int data_len, unsigned char *decrypted)
+{
+ int result = RSA_private_decrypt( data_len, enc_data, decrypted, aclk_private_key, RSA_PKCS1_OAEP_PADDING);
+ if (result == -1) {
+ char err[512];
+ ERR_error_string_n(ERR_get_error(), err, sizeof(err));
+ error("Decryption of the challenge failed: %s", err);
+ }
+ return result;
+}
+
+void aclk_get_challenge(char *aclk_hostname, int port)
+{
+ char *data_buffer = mallocz(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ debug(D_ACLK, "Performing challenge-response sequence");
+ if (aclk_password != NULL)
+ {
+ freez(aclk_password);
+ aclk_password = NULL;
+ }
+ // curl http://cloud-iam-agent-service:8080/api/v1/auth/node/00000000-0000-0000-0000-000000000000/challenge
+ // TODO - target host?
+ char *agent_id = is_agent_claimed();
+ if (agent_id == NULL)
+ {
+ error("Agent was not claimed - cannot perform challenge/response");
+ goto CLEANUP;
+ }
+ char url[1024];
+ sprintf(url, "/api/v1/auth/node/%s/challenge", agent_id);
+ info("Retrieving challenge from cloud: %s %d %s", aclk_hostname, port, url);
+ if(aclk_send_https_request("GET", aclk_hostname, port, url, data_buffer, NETDATA_WEB_RESPONSE_INITIAL_SIZE, NULL))
+ {
+ error("Challenge failed: %s", data_buffer);
+ goto CLEANUP;
+ }
+ struct dictionary_singleton challenge = { .key = "challenge", .result = NULL };
+
+ debug(D_ACLK, "Challenge response from cloud: %s", data_buffer);
+ if ( json_parse(data_buffer, &challenge, json_extract_singleton) != JSON_OK)
+ {
+ freez(challenge.result);
+ error("Could not parse the json response with the challenge: %s", data_buffer);
+ goto CLEANUP;
+ }
+ if (challenge.result == NULL ) {
+ error("Could not retrieve challenge from auth response: %s", data_buffer);
+ goto CLEANUP;
+ }
+
+
+ size_t challenge_len = strlen(challenge.result);
+ unsigned char decoded[512];
+ size_t decoded_len = base64_decode((unsigned char*)challenge.result, challenge_len, decoded, sizeof(decoded));
+
+ unsigned char plaintext[4096]={};
+ int decrypted_length = private_decrypt(decoded, decoded_len, plaintext);
+ freez(challenge.result);
+ char encoded[512];
+ size_t encoded_len = base64_encode(plaintext, decrypted_length, encoded, sizeof(encoded));
+ encoded[encoded_len] = 0;
+ debug(D_ACLK, "Encoded len=%zu Decryption len=%d: '%s'", encoded_len, decrypted_length, encoded);
+
+ char response_json[4096]={};
+ sprintf(response_json, "{\"response\":\"%s\"}", encoded);
+ debug(D_ACLK, "Password phase: %s",response_json);
+ // TODO - host
+ sprintf(url, "/api/v1/auth/node/%s/password", agent_id);
+ if(aclk_send_https_request("POST", aclk_hostname, port, url, data_buffer, NETDATA_WEB_RESPONSE_INITIAL_SIZE, response_json))
+ {
+ error("Challenge-response failed: %s", data_buffer);
+ goto CLEANUP;
+ }
+
+ debug(D_ACLK, "Password response from cloud: %s", data_buffer);
+
+ struct dictionary_singleton password = { .key = "password", .result = NULL };
+ if ( json_parse(data_buffer, &password, json_extract_singleton) != JSON_OK)
+ {
+ freez(password.result);
+ error("Could not parse the json response with the password: %s", data_buffer);
+ goto CLEANUP;
+ }
+
+ if (password.result == NULL ) {
+ error("Could not retrieve password from auth response");
+ goto CLEANUP;
+ }
+ if (aclk_password != NULL )
+ freez(aclk_password);
+ aclk_password = password.result;
+ if (aclk_username != NULL)
+ freez(aclk_username);
+ aclk_username = agent_id;
+ agent_id = NULL;
+
+CLEANUP:
+ if (agent_id != NULL)
+ freez(agent_id);
+ freez(data_buffer);
+ return;
+}
+
+#ifndef __GNUC__
+#pragma endregion
+#endif
+
+static void aclk_try_to_connect(char *hostname, int port)
+{
+ int rc;
+
+// this is usefull for developers working on ACLK
+// allows connecting agent to any MQTT broker
+// for debugging, development and testing purposes
+#ifndef ACLK_DISABLE_CHALLENGE
+ if (!aclk_private_key) {
+ error("Cannot try to establish the agent cloud link - no private key available!");
+ return;
+ }
+#endif
+
+ info("Attempting to establish the agent cloud link");
+#ifdef ACLK_DISABLE_CHALLENGE
+ error("Agent built with ACLK_DISABLE_CHALLENGE. This is for testing "
+ "and development purposes only. Warranty void. Won't be able "
+ "to connect to Netdata Cloud.");
+ if (aclk_password == NULL)
+ aclk_password = strdupz("anon");
+#else
+ aclk_get_challenge(hostname, port);
+ if (aclk_password == NULL)
+ return;
+#endif
+
+ aclk_connecting = 1;
+ create_publish_base_topic();
+
+ ACLK_SHARED_STATE_LOCK;
+ aclk_shared_state.version_neg = 0;
+ aclk_shared_state.version_neg_wait_till = 0;
+ ACLK_SHARED_STATE_UNLOCK;
+
+ rc = mqtt_attempt_connection(hostname, port, aclk_username, aclk_password);
+ if (unlikely(rc)) {
+ error("Failed to initialize the agent cloud link library");
+ }
+}
+
+// Sends "hello" message to negotiate ACLK version with cloud
+static inline void aclk_hello_msg()
+{
+ BUFFER *buf = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+
+ char *msg_id = create_uuid();
+
+ ACLK_SHARED_STATE_LOCK;
+ aclk_shared_state.version_neg = 0;
+ aclk_shared_state.version_neg_wait_till = now_monotonic_usec() + USEC_PER_SEC * VERSION_NEG_TIMEOUT;
+ ACLK_SHARED_STATE_UNLOCK;
+
+ //Hello message is versioned separatelly from the rest of the protocol
+ aclk_create_header(buf, "hello", msg_id, 0, 0, ACLK_VERSION_NEG_VERSION);
+ buffer_sprintf(buf, ",\"min-version\":%d,\"max-version\":%d}", ACLK_VERSION_MIN, ACLK_VERSION_MAX);
+ aclk_send_message(ACLK_METADATA_TOPIC, buf->buffer, msg_id);
+ freez(msg_id);
+ buffer_free(buf);
+}
+
+/**
+ * Main agent cloud link thread
+ *
+ * This thread will simply call the main event loop that handles
+ * pending requests - both inbound and outbound
+ *
+ * @param ptr is a pointer to the netdata_static_thread structure.
+ *
+ * @return It always returns NULL
+ */
+void *aclk_main(void *ptr)
+{
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ struct aclk_query_threads query_threads;
+ struct aclk_stats_thread *stats_thread = NULL;
+ time_t last_periodic_query_wakeup = 0;
+
+ query_threads.thread_list = NULL;
+
+ // This thread is unusual in that it cannot be cancelled by cancel_main_threads()
+ // as it must notify the far end that it shutdown gracefully and avoid the LWT.
+ netdata_thread_disable_cancelability();
+
+#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK)
+ info("Killing ACLK thread -> cloud functionality has been disabled");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+#endif
+
+#ifndef LWS_WITH_SOCKS5
+ ACLK_PROXY_TYPE proxy_type;
+ aclk_get_proxy(&proxy_type);
+ if(proxy_type == PROXY_TYPE_SOCKS5) {
+ error("Disabling ACLK due to requested SOCKS5 proxy.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+ }
+#endif
+
+ info("Waiting for netdata to be ready");
+ while (!netdata_ready) {
+ sleep_usec(USEC_PER_MS * 300);
+ }
+
+ info("Waiting for Cloud to be enabled");
+ while (!netdata_cloud_setting) {
+ sleep_usec(USEC_PER_SEC * 1);
+ if (netdata_exit) {
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+ }
+ }
+
+ query_threads.count = MIN(processors/2, 6);
+ query_threads.count = MAX(query_threads.count, 2);
+ query_threads.count = config_get_number(CONFIG_SECTION_CLOUD, "query thread count", query_threads.count);
+ if(query_threads.count < 1) {
+ error("You need at least one query thread. Overriding configured setting of \"%d\"", query_threads.count);
+ query_threads.count = 1;
+ config_set_number(CONFIG_SECTION_CLOUD, "query thread count", query_threads.count);
+ }
+
+ //start localhost popcorning
+ aclk_start_host_popcorning(localhost);
+
+ aclk_stats_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "statistics", CONFIG_BOOLEAN_YES);
+ if (aclk_stats_enabled) {
+ stats_thread = callocz(1, sizeof(struct aclk_stats_thread));
+ stats_thread->thread = mallocz(sizeof(netdata_thread_t));
+ stats_thread->query_thread_count = query_threads.count;
+ netdata_thread_create(
+ stats_thread->thread, ACLK_STATS_THREAD_NAME, NETDATA_THREAD_OPTION_JOINABLE, aclk_stats_main_thread,
+ stats_thread);
+ }
+
+ char *aclk_hostname = NULL; // Initializers are over-written but prevent gcc complaining about clobbering.
+ int port_num = 0;
+ info("Waiting for netdata to be claimed");
+ while(1) {
+ char *agent_id = is_agent_claimed();
+ while (likely(!agent_id)) {
+ sleep_usec(USEC_PER_SEC * 1);
+ if (netdata_exit)
+ goto exited;
+ agent_id = is_agent_claimed();
+ }
+ freez(agent_id);
+ // The NULL return means the value was never initialised, but this value has been initialized in post_conf_load.
+ // We trap the impossible NULL here to keep the linter happy without using a fatal() in the code.
+ char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL);
+ if (cloud_base_url == NULL) {
+ error("Do not move the cloud base url out of post_conf_load!!");
+ goto exited;
+ }
+ if (aclk_decode_base_url(cloud_base_url, &aclk_hostname, &port_num))
+ error("Agent is claimed but the configuration is invalid, please fix");
+ else if (!create_private_key() && !_mqtt_lib_init())
+ break;
+
+ for (int i=0; i<60; i++) {
+ if (netdata_exit)
+ goto exited;
+
+ sleep_usec(USEC_PER_SEC * 1);
+ }
+ }
+
+ usec_t reconnect_expiry = 0; // In usecs
+
+ while (!netdata_exit) {
+ static int first_init = 0;
+ /* size_t write_q, write_q_bytes, read_q;
+ lws_wss_check_queues(&write_q, &write_q_bytes, &read_q);*/
+
+ if (aclk_disable_runtime && !aclk_connected) {
+ sleep(1);
+ continue;
+ }
+
+ if (aclk_kill_link) { // User has reloaded the claiming state
+ aclk_kill_link = 0;
+ aclk_graceful_disconnect();
+ create_private_key();
+ continue;
+ }
+
+ if (aclk_force_reconnect) {
+ aclk_lws_wss_destroy_context();
+ aclk_force_reconnect = 0;
+ }
+ if (unlikely(!netdata_exit && !aclk_connected && !aclk_force_reconnect)) {
+ if (unlikely(!first_init)) {
+ aclk_try_to_connect(aclk_hostname, port_num);
+ first_init = 1;
+ } else {
+ if (aclk_connecting == 0) {
+ if (reconnect_expiry == 0) {
+ unsigned long int delay = aclk_reconnect_delay(1);
+ reconnect_expiry = now_realtime_usec() + delay * 1000;
+ info("Retrying to establish the ACLK connection in %.3f seconds", delay / 1000.0);
+ }
+ if (now_realtime_usec() >= reconnect_expiry) {
+ reconnect_expiry = 0;
+ aclk_try_to_connect(aclk_hostname, port_num);
+ }
+ sleep_usec(USEC_PER_MS * 100);
+ }
+ }
+ if (aclk_connecting) {
+ _link_event_loop();
+ sleep_usec(USEC_PER_MS * 100);
+ }
+ continue;
+ }
+
+ _link_event_loop();
+ if (unlikely(!aclk_connected || aclk_force_reconnect))
+ continue;
+ /*static int stress_counter = 0;
+ if (write_q_bytes==0 && stress_counter ++ >5)
+ {
+ aclk_send_stress_test(8000000);
+ stress_counter = 0;
+ }*/
+
+ if (unlikely(!aclk_subscribed)) {
+ aclk_subscribed = !aclk_subscribe(ACLK_COMMAND_TOPIC, 1);
+ aclk_hello_msg();
+ }
+
+ if (unlikely(!query_threads.thread_list)) {
+ aclk_query_threads_start(&query_threads);
+ }
+
+ time_t now = now_monotonic_sec();
+ if(aclk_connected && last_periodic_query_wakeup < now) {
+ // to make `aclk_queue_query()` param `run_after` work
+ // also makes per child popcorning work
+ last_periodic_query_wakeup = now;
+ QUERY_THREAD_WAKEUP;
+ }
+ } // forever
+exited:
+ // Wakeup query thread to cleanup
+ QUERY_THREAD_WAKEUP_ALL;
+
+ freez(aclk_username);
+ freez(aclk_password);
+ freez(aclk_hostname);
+ if (aclk_private_key != NULL)
+ RSA_free(aclk_private_key);
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ char *agent_id = is_agent_claimed();
+ if (agent_id && aclk_connected) {
+ freez(agent_id);
+ // Wakeup thread to cleanup
+ QUERY_THREAD_WAKEUP;
+ aclk_graceful_disconnect();
+ }
+
+ aclk_query_threads_cleanup(&query_threads);
+
+ _reset_collector_list();
+ freez(collector_list);
+
+ if(aclk_stats_enabled) {
+ netdata_thread_join(*stats_thread->thread, NULL);
+ aclk_stats_thread_cleanup();
+ freez(stats_thread->thread);
+ freez(stats_thread);
+ }
+
+ /*
+ * this must be last -> if all static threads signal
+ * THREAD_EXITED rrdengine will dealloc the RRDSETs
+ * and RRDDIMs that are used by still runing stat thread.
+ * see netdata_cleanup_and_exit() for reference
+ */
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+}
+
+/*
+ * Send a message to the cloud, using a base topic and sib_topic
+ * The final topic will be in the form <base_topic>/<sub_topic>
+ * If base_topic is missing then the global_base_topic will be used (if available)
+ *
+ */
+int aclk_send_message_bin(char *sub_topic, const void *message, size_t len, char *msg_id)
+{
+ int rc;
+ int mid;
+ char topic[ACLK_MAX_TOPIC + 1];
+ char *final_topic;
+
+ UNUSED(msg_id);
+
+ if (!aclk_connected)
+ return 0;
+
+ if (unlikely(!message))
+ return 0;
+
+ final_topic = get_topic(sub_topic, topic, ACLK_MAX_TOPIC);
+
+ if (unlikely(!final_topic)) {
+ errno = 0;
+ error("Unable to build outgoing topic; truncated?");
+ return 1;
+ }
+
+ ACLK_LOCK;
+ rc = _link_send_message(final_topic, message, len, &mid);
+ // TODO: link the msg_id with the mid so we can trace it
+ ACLK_UNLOCK;
+
+ if (unlikely(rc)) {
+ errno = 0;
+ error("Failed to send message, error code %d (%s)", rc, _link_strerror(rc));
+ }
+
+ return rc;
+}
+
+int aclk_send_message(char *sub_topic, char *message, char *msg_id)
+{
+ return aclk_send_message_bin(sub_topic, message, strlen(message), msg_id);
+}
+
+/*
+ * Subscribe to a topic in the cloud
+ * The final subscription will be in the form
+ * /agent/claim_id/<sub_topic>
+ */
+int aclk_subscribe(char *sub_topic, int qos)
+{
+ int rc;
+ char topic[ACLK_MAX_TOPIC + 1];
+ char *final_topic;
+
+ final_topic = get_topic(sub_topic, topic, ACLK_MAX_TOPIC);
+ if (unlikely(!final_topic)) {
+ errno = 0;
+ error("Unable to build outgoing topic; truncated?");
+ return 1;
+ }
+
+ if (!aclk_connected) {
+ error("Cannot subscribe to %s - not connected!", topic);
+ return 1;
+ }
+
+ ACLK_LOCK;
+ rc = _link_subscribe(final_topic, qos);
+ ACLK_UNLOCK;
+
+ // TODO: Add better handling -- error will flood the logfile here
+ if (unlikely(rc)) {
+ errno = 0;
+ error("Failed subscribe to command topic %d (%s)", rc, _link_strerror(rc));
+ }
+
+ return rc;
+}
+
+// This is called from a callback when the link goes up
+void aclk_connect()
+{
+ info("Connection detected (%u queued queries)", aclk_query_size());
+
+ aclk_stats_upd_online(1);
+
+ aclk_connected = 1;
+ aclk_reconnect_delay(0);
+
+ QUERY_THREAD_WAKEUP;
+ return;
+}
+
+// This is called from a callback when the link goes down
+void aclk_disconnect()
+{
+ if (likely(aclk_connected))
+ info("Disconnect detected (%u queued queries)", aclk_query_size());
+
+ aclk_stats_upd_online(0);
+
+ aclk_subscribed = 0;
+ rrdhost_aclk_state_lock(localhost);
+ localhost->aclk_state.metadata = ACLK_METADATA_REQUIRED;
+ rrdhost_aclk_state_unlock(localhost);
+ aclk_connected = 0;
+ aclk_connecting = 0;
+ aclk_force_reconnect = 1;
+}
+
+inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us, int version)
+{
+ uuid_t uuid;
+ char uuid_str[36 + 1];
+
+ if (unlikely(!msg_id)) {
+ uuid_generate(uuid);
+ uuid_unparse(uuid, uuid_str);
+ msg_id = uuid_str;
+ }
+
+ if (ts_secs == 0) {
+ ts_us = now_realtime_usec();
+ ts_secs = ts_us / USEC_PER_SEC;
+ ts_us = ts_us % USEC_PER_SEC;
+ }
+
+ buffer_sprintf(
+ dest,
+ "{\t\"type\": \"%s\",\n"
+ "\t\"msg-id\": \"%s\",\n"
+ "\t\"timestamp\": %ld,\n"
+ "\t\"timestamp-offset-usec\": %llu,\n"
+ "\t\"connect\": %ld,\n"
+ "\t\"connect-offset-usec\": %llu,\n"
+ "\t\"version\": %d",
+ type, msg_id, ts_secs, ts_us, aclk_session_sec, aclk_session_us, version);
+
+ debug(D_ACLK, "Sending v%d msgid [%s] type [%s] time [%ld]", version, msg_id, type, ts_secs);
+}
+
+
+/*
+ * This will send alarm information which includes
+ * configured alarms
+ * alarm_log
+ * active alarms
+ */
+void health_active_log_alarms_2json(RRDHOST *host, BUFFER *wb);
+
+void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted)
+{
+ BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+
+ char *msg_id = create_uuid();
+ buffer_flush(local_buffer);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ debug(D_ACLK, "Metadata alarms start");
+
+ // on_connect messages are sent on a health reload, if the on_connect message is real then we
+ // use the session time as the fake timestamp to indicate that it starts the session. If it is
+ // a fake on_connect message then use the real timestamp to indicate it is within the existing
+ // session.
+
+ if (metadata_submitted == ACLK_METADATA_SENT)
+ aclk_create_header(local_buffer, "connect_alarms", msg_id, 0, 0, aclk_shared_state.version_neg);
+ else
+ aclk_create_header(local_buffer, "connect_alarms", msg_id, aclk_session_sec, aclk_session_us, aclk_shared_state.version_neg);
+ buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
+
+ buffer_sprintf(local_buffer, "{\n\t \"configured-alarms\" : ");
+ health_alarms2json(localhost, local_buffer, 1);
+ debug(D_ACLK, "Metadata %s with configured alarms has %zu bytes", msg_id, local_buffer->len);
+ // buffer_sprintf(local_buffer, ",\n\t \"alarm-log\" : ");
+ // health_alarm_log2json(localhost, local_buffer, 0);
+ // debug(D_ACLK, "Metadata %s with alarm_log has %zu bytes", msg_id, local_buffer->len);
+ buffer_sprintf(local_buffer, ",\n\t \"alarms-active\" : ");
+ health_active_log_alarms_2json(localhost, local_buffer);
+ //debug(D_ACLK, "Metadata message %s", local_buffer->buffer);
+
+
+
+ buffer_sprintf(local_buffer, "\n}\n}");
+ aclk_send_message(ACLK_ALARMS_TOPIC, local_buffer->buffer, msg_id);
+
+ freez(msg_id);
+ buffer_free(local_buffer);
+}
+
+/*
+ * This will send the agent metadata
+ * /api/v1/info
+ * charts
+ */
+int aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host)
+{
+ BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+
+ debug(D_ACLK, "Metadata /info start");
+
+ char *msg_id = create_uuid();
+ buffer_flush(local_buffer);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ // on_connect messages are sent on a health reload, if the on_connect message is real then we
+ // use the session time as the fake timestamp to indicate that it starts the session. If it is
+ // a fake on_connect message then use the real timestamp to indicate it is within the existing
+ // session.
+ if (metadata_submitted == ACLK_METADATA_SENT)
+ aclk_create_header(local_buffer, "update", msg_id, 0, 0, aclk_shared_state.version_neg);
+ else
+ aclk_create_header(local_buffer, "connect", msg_id, aclk_session_sec, aclk_session_us, aclk_shared_state.version_neg);
+ buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
+ buffer_sprintf(local_buffer, "{\n\t \"info\" : ");
+ web_client_api_request_v1_info_fill_buffer(host, local_buffer);
+ debug(D_ACLK, "Metadata %s with info has %zu bytes", msg_id, local_buffer->len);
+
+ buffer_sprintf(local_buffer, ", \n\t \"charts\" : ");
+ charts2json(host, local_buffer, 1, 0);
+ buffer_sprintf(local_buffer, "\n}\n}");
+ debug(D_ACLK, "Metadata %s with chart has %zu bytes", msg_id, local_buffer->len);
+
+ aclk_send_message(ACLK_METADATA_TOPIC, local_buffer->buffer, msg_id);
+
+ freez(msg_id);
+ buffer_free(local_buffer);
+ return 0;
+}
+
+int aclk_send_info_child_connection(RRDHOST *host, ACLK_CMD cmd)
+{
+ BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ if(aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE)
+ fatal("This function should not be called if ACLK version is less than %d (current %d)", ACLK_V_CHILDRENSTATE, aclk_shared_state.version_neg);
+
+ debug(D_ACLK, "Sending Child Disconnect");
+
+ char *msg_id = create_uuid();
+
+ aclk_create_header(local_buffer, cmd == ACLK_CMD_CHILD_CONNECT ? "child_connect" : "child_disconnect", msg_id, 0, 0, aclk_shared_state.version_neg);
+
+ buffer_strcat(local_buffer, ",\"payload\":");
+
+ buffer_sprintf(local_buffer, "{\"guid\":\"%s\",\"claim_id\":", host->machine_guid);
+ rrdhost_aclk_state_lock(host);
+ if(host->aclk_state.claimed_id)
+ buffer_sprintf(local_buffer, "\"%s\"}}", host->aclk_state.claimed_id);
+ else
+ buffer_strcat(local_buffer, "null}}");
+
+ rrdhost_aclk_state_unlock(host);
+
+ aclk_send_message(ACLK_METADATA_TOPIC, local_buffer->buffer, msg_id);
+
+ freez(msg_id);
+ buffer_free(local_buffer);
+ return 0;
+}
+
+void aclk_host_state_update(RRDHOST *host, ACLK_CMD cmd)
+{
+#if ACLK_VERSION_MIN < ACLK_V_CHILDRENSTATE
+ if (aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE)
+ return;
+#else
+#warning "This check became unnecessary. Remove"
+#endif
+
+ if (unlikely(aclk_host_initializing(localhost)))
+ return;
+
+ switch (cmd) {
+ case ACLK_CMD_CHILD_CONNECT:
+ debug(D_ACLK, "Child Connected %s %s.", host->hostname, host->machine_guid);
+ aclk_start_host_popcorning(host);
+ aclk_queue_query("add_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_CONNECT);
+ break;
+ case ACLK_CMD_CHILD_DISCONNECT:
+ debug(D_ACLK, "Child Disconnected %s %s.", host->hostname, host->machine_guid);
+ aclk_stop_host_popcorning(host);
+ aclk_queue_query("del_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_DISCONNECT);
+ break;
+ default:
+ error("Unknown command for aclk_host_state_update %d.", (int)cmd);
+ }
+}
+
+void aclk_send_stress_test(size_t size)
+{
+ char *buffer = mallocz(size);
+ if (buffer != NULL)
+ {
+ for(size_t i=0; i<size; i++)
+ buffer[i] = 'x';
+ buffer[size-1] = 0;
+ time_t time_created = now_realtime_sec();
+ sprintf(buffer,"{\"type\":\"stress\", \"timestamp\":%ld,\"payload\":", time_created);
+ buffer[strlen(buffer)] = '"';
+ buffer[size-2] = '}';
+ buffer[size-3] = '"';
+ aclk_send_message(ACLK_METADATA_TOPIC, buffer, NULL);
+ error("Sending stress of size %zu at time %ld", size, time_created);
+ }
+ free(buffer);
+}
+
+// Send info metadata message to the cloud if the link is established
+// or on request
+int aclk_send_metadata(ACLK_METADATA_STATE state, RRDHOST *host)
+{
+ aclk_send_info_metadata(state, host);
+
+ if(host == localhost)
+ aclk_send_alarm_metadata(state);
+
+ return 0;
+}
+
+void aclk_single_update_disable()
+{
+ aclk_disable_single_updates = 1;
+}
+
+void aclk_single_update_enable()
+{
+ aclk_disable_single_updates = 0;
+}
+
+// Trigged by a health reload, sends the alarm metadata
+void aclk_alarm_reload()
+{
+ if (unlikely(aclk_host_initializing(localhost)))
+ return;
+
+ if (unlikely(aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) {
+ if (likely(aclk_connected)) {
+ errno = 0;
+ error("ACLK failed to queue on_connect command on alarm reload");
+ }
+ }
+}
+//rrd_stats_api_v1_chart(RRDSET *st, BUFFER *buf)
+
+int aclk_send_single_chart(RRDHOST *host, char *chart)
+{
+ RRDSET *st = rrdset_find(host, chart);
+ if (!st)
+ st = rrdset_find_byname(host, chart);
+ if (!st) {
+ info("FAILED to find chart %s", chart);
+ return 1;
+ }
+
+ BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ char *msg_id = create_uuid();
+ buffer_flush(local_buffer);
+ local_buffer->contenttype = CT_APPLICATION_JSON;
+
+ aclk_create_header(local_buffer, "chart", msg_id, 0, 0, aclk_shared_state.version_neg);
+ buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
+ rrdset2json(st, local_buffer, NULL, NULL, 1);
+ buffer_sprintf(local_buffer, "\t\n}");
+
+ aclk_send_message(ACLK_CHART_TOPIC, local_buffer->buffer, msg_id);
+
+ freez(msg_id);
+ buffer_free(local_buffer);
+ return 0;
+}
+
+int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd)
+{
+#ifndef ENABLE_ACLK
+ UNUSED(host);
+ UNUSED(chart_name);
+ return 0;
+#else
+ if (unlikely(!netdata_ready))
+ return 0;
+
+ if (!netdata_cloud_setting)
+ return 0;
+
+ if (aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE && host != localhost)
+ return 0;
+
+ if (aclk_host_initializing(localhost))
+ return 0;
+
+ if (unlikely(aclk_disable_single_updates))
+ return 0;
+
+ if (aclk_popcorn_check_bump(host))
+ return 0;
+
+ if (unlikely(aclk_queue_query("_chart", host, NULL, chart_name, 0, 1, aclk_cmd))) {
+ if (likely(aclk_connected)) {
+ errno = 0;
+ error("ACLK failed to queue chart_update command");
+ }
+ }
+
+ return 0;
+#endif
+}
+
+int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae)
+{
+ BUFFER *local_buffer = NULL;
+
+ if (unlikely(!netdata_ready))
+ return 0;
+
+ if (host != localhost)
+ return 0;
+
+ if(unlikely(aclk_host_initializing(localhost)))
+ return 0;
+
+ /*
+ * Check if individual updates have been disabled
+ * This will be the case when we do health reload
+ * and all the alarms will be dropped and recreated.
+ * At the end of the health reload the complete alarm metadata
+ * info will be sent
+ */
+ if (unlikely(aclk_disable_single_updates))
+ return 0;
+
+ local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ char *msg_id = create_uuid();
+
+ buffer_flush(local_buffer);
+ aclk_create_header(local_buffer, "status-change", msg_id, 0, 0, aclk_shared_state.version_neg);
+ buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ health_alarm_entry2json_nolock(local_buffer, ae, host);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+
+ buffer_sprintf(local_buffer, "\n}");
+
+ if (unlikely(aclk_queue_query(ACLK_ALARMS_TOPIC, NULL, msg_id, local_buffer->buffer, 0, 1, ACLK_CMD_ALARM))) {
+ if (likely(aclk_connected)) {
+ errno = 0;
+ error("ACLK failed to queue alarm_command on alarm_update");
+ }
+ }
+
+ freez(msg_id);
+ buffer_free(local_buffer);
+
+ return 0;
+}
diff --git a/aclk/legacy/agent_cloud_link.h b/aclk/legacy/agent_cloud_link.h
new file mode 100644
index 0000000..e777e0b
--- /dev/null
+++ b/aclk/legacy/agent_cloud_link.h
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_AGENT_CLOUD_LINK_H
+#define NETDATA_AGENT_CLOUD_LINK_H
+
+#include "../../daemon/common.h"
+#include "mqtt.h"
+#include "aclk_common.h"
+
+#define ACLK_THREAD_NAME "ACLK_Query"
+#define ACLK_CHART_TOPIC "outbound/meta"
+#define ACLK_ALARMS_TOPIC "outbound/alarms"
+#define ACLK_METADATA_TOPIC "outbound/meta"
+#define ACLK_COMMAND_TOPIC "inbound/cmd"
+#define ACLK_TOPIC_STRUCTURE "/agent/%s"
+
+#define ACLK_MAX_BACKOFF_DELAY 1024 // maximum backoff delay in seconds
+
+#define ACLK_INITIALIZATION_WAIT 60 // Wait for link to initialize in seconds (per msg)
+#define ACLK_INITIALIZATION_SLEEP_WAIT 1 // Wait time @ spin lock for MQTT initialization in seconds
+#define ACLK_QOS 1
+#define ACLK_PING_INTERVAL 60
+#define ACLK_LOOP_TIMEOUT 5 // seconds to wait for operations in the library loop
+
+#define ACLK_MAX_TOPIC 255
+
+#define ACLK_RECONNECT_DELAY 1 // reconnect delay -- with backoff stragegy fow now
+#define ACLK_DEFAULT_PORT 9002
+#define ACLK_DEFAULT_HOST "localhost"
+
+#define ACLK_V2_PAYLOAD_SEPARATOR "\x0D\x0A\x0D\x0A"
+
+struct aclk_request {
+ char *type_id;
+ char *msg_id;
+ char *callback_topic;
+ char *payload;
+ int version;
+ int min_version;
+ int max_version;
+};
+
+typedef enum aclk_init_action { ACLK_INIT, ACLK_REINIT } ACLK_INIT_ACTION;
+
+void *aclk_main(void *ptr);
+
+#define NETDATA_ACLK_HOOK \
+ { .name = "ACLK_Main", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = aclk_main },
+
+extern int aclk_send_message(char *sub_topic, char *message, char *msg_id);
+extern int aclk_send_message_bin(char *sub_topic, const void *message, size_t len, char *msg_id);
+
+extern char *is_agent_claimed(void);
+extern void aclk_lws_wss_mqtt_layer_disconect_notif();
+char *create_uuid();
+
+// callbacks for agent cloud link
+int aclk_subscribe(char *topic, int qos);
+int cloud_to_agent_parse(JSON_ENTRY *e);
+void aclk_disconnect();
+void aclk_connect();
+
+int aclk_send_metadata(ACLK_METADATA_STATE state, RRDHOST *host);
+int aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host);
+void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted);
+
+int aclk_wait_for_initialization();
+char *create_publish_base_topic();
+
+int aclk_send_single_chart(RRDHOST *host, char *chart);
+int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd);
+int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae);
+void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us, int version);
+int aclk_handle_cloud_message(char *payload);
+void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name);
+void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name);
+void aclk_alarm_reload();
+unsigned long int aclk_reconnect_delay(int mode);
+extern void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host);
+void aclk_single_update_enable();
+void aclk_single_update_disable();
+
+void aclk_host_state_update(RRDHOST *host, ACLK_CMD cmd);
+int aclk_send_info_child_connection(RRDHOST *host, ACLK_CMD cmd);
+void aclk_update_next_child_to_popcorn(void);
+
+#endif //NETDATA_AGENT_CLOUD_LINK_H
diff --git a/aclk/legacy/mqtt.c b/aclk/legacy/mqtt.c
new file mode 100644
index 0000000..6f38a20
--- /dev/null
+++ b/aclk/legacy/mqtt.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <libnetdata/json/json.h>
+#include "../../daemon/common.h"
+#include "mqtt.h"
+#include "aclk_lws_wss_client.h"
+#include "aclk_stats.h"
+#include "aclk_rx_msgs.h"
+
+extern usec_t aclk_session_us;
+extern time_t aclk_session_sec;
+
+inline const char *_link_strerror(int rc)
+{
+ return mosquitto_strerror(rc);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static struct timeval sendTimes[1024];
+#endif
+
+static struct mosquitto *mosq = NULL;
+
+
+void mqtt_message_callback(struct mosquitto *mosq, void *obj, const struct mosquitto_message *msg)
+{
+ UNUSED(mosq);
+ UNUSED(obj);
+
+ aclk_handle_cloud_message(msg->payload);
+}
+
+void publish_callback(struct mosquitto *mosq, void *obj, int rc)
+{
+ UNUSED(mosq);
+ UNUSED(obj);
+ UNUSED(rc);
+#ifdef NETDATA_INTERNAL_CHECKS
+ struct timeval now, *orig;
+ now_realtime_timeval(&now);
+ orig = &sendTimes[ rc & 0x3ff ];
+ int64_t diff = (now.tv_sec - orig->tv_sec) * USEC_PER_SEC + (now.tv_usec - orig->tv_usec);
+ diff /= 1000;
+
+ info("Publish_callback: mid=%d latency=%" PRId64 "ms", rc, diff);
+
+ aclk_metric_mat_update(&aclk_metrics_per_sample.latency, diff);
+#endif
+ return;
+}
+
+void connect_callback(struct mosquitto *mosq, void *obj, int rc)
+{
+ UNUSED(mosq);
+ UNUSED(obj);
+ UNUSED(rc);
+
+ info("Connection to cloud estabilished");
+ aclk_connect();
+
+ return;
+}
+
+void disconnect_callback(struct mosquitto *mosq, void *obj, int rc)
+{
+ UNUSED(mosq);
+ UNUSED(obj);
+ UNUSED(rc);
+
+ if (netdata_exit)
+ info("Connection to cloud terminated due to agent shutdown");
+ else {
+ errno = 0;
+ error("Connection to cloud failed");
+ }
+ aclk_disconnect();
+
+ aclk_lws_wss_mqtt_layer_disconect_notif();
+
+ return;
+}
+
+void _show_mqtt_info()
+{
+ int libmosq_major, libmosq_minor, libmosq_revision, libmosq_version;
+ libmosq_version = mosquitto_lib_version(&libmosq_major, &libmosq_minor, &libmosq_revision);
+
+ info(
+ "Detected libmosquitto library version %d, %d.%d.%d", libmosq_version, libmosq_major, libmosq_minor,
+ libmosq_revision);
+}
+
+size_t _mqtt_external_write_hook(void *buf, size_t count)
+{
+ return aclk_lws_wss_client_write(buf, count);
+}
+
+size_t _mqtt_external_read_hook(void *buf, size_t count)
+{
+ return aclk_lws_wss_client_read(buf, count);
+}
+
+int _mqtt_lib_init()
+{
+ int rc;
+ //int libmosq_major, libmosq_minor, libmosq_revision, libmosq_version;
+ /* Commenting out now as it is unused - do not delete, this is needed for the on-prem version.
+ char *ca_crt;
+ char *server_crt;
+ char *server_key;
+
+ // show library info so can have it in the logfile
+ //libmosq_version = mosquitto_lib_version(&libmosq_major, &libmosq_minor, &libmosq_revision);
+ ca_crt = config_get(CONFIG_SECTION_CLOUD, "link cert", "*");
+ server_crt = config_get(CONFIG_SECTION_CLOUD, "link server cert", "*");
+ server_key = config_get(CONFIG_SECTION_CLOUD, "link server key", "*");
+
+ if (ca_crt[0] == '*') {
+ freez(ca_crt);
+ ca_crt = NULL;
+ }
+
+ if (server_crt[0] == '*') {
+ freez(server_crt);
+ server_crt = NULL;
+ }
+
+ if (server_key[0] == '*') {
+ freez(server_key);
+ server_key = NULL;
+ }
+ */
+
+ // info(
+ // "Detected libmosquitto library version %d, %d.%d.%d", libmosq_version, libmosq_major, libmosq_minor,
+ // libmosq_revision);
+
+ rc = mosquitto_lib_init();
+ if (unlikely(rc != MOSQ_ERR_SUCCESS)) {
+ error("Failed to initialize MQTT (libmosquitto library)");
+ return 1;
+ }
+ return 0;
+}
+
+static int _mqtt_create_connection(char *username, char *password)
+{
+ if (mosq != NULL)
+ mosquitto_destroy(mosq);
+ mosq = mosquitto_new(username, true, NULL);
+ if (unlikely(!mosq)) {
+ mosquitto_lib_cleanup();
+ error("MQTT new structure -- %s", mosquitto_strerror(errno));
+ return MOSQ_ERR_UNKNOWN;
+ }
+
+ // Record the session start time to allow a nominal LWT timestamp
+ usec_t now = now_realtime_usec();
+ aclk_session_sec = now / USEC_PER_SEC;
+ aclk_session_us = now % USEC_PER_SEC;
+
+ _link_set_lwt("outbound/meta", 2);
+
+ mosquitto_connect_callback_set(mosq, connect_callback);
+ mosquitto_disconnect_callback_set(mosq, disconnect_callback);
+ mosquitto_publish_callback_set(mosq, publish_callback);
+
+ info("Using challenge-response: %s / %s", username, password);
+ mosquitto_username_pw_set(mosq, username, password);
+
+ int rc = mosquitto_threaded_set(mosq, 1);
+ if (unlikely(rc != MOSQ_ERR_SUCCESS))
+ error("Failed to tune the thread model for libmoquitto (%s)", mosquitto_strerror(rc));
+
+#if defined(LIBMOSQUITTO_VERSION_NUMBER) >= 1006000
+ rc = mosquitto_int_option(mosq, MQTT_PROTOCOL_V311, 0);
+ if (unlikely(rc != MOSQ_ERR_SUCCESS))
+ error("MQTT protocol specification rc = %d (%s)", rc, mosquitto_strerror(rc));
+
+ rc = mosquitto_int_option(mosq, MOSQ_OPT_SEND_MAXIMUM, 1);
+ info("MQTT in flight messages set to 1 -- %s", mosquitto_strerror(rc));
+#endif
+
+ return rc;
+}
+
+static int _link_mqtt_connect(char *aclk_hostname, int aclk_port)
+{
+ int rc;
+
+ rc = mosquitto_connect_async(mosq, aclk_hostname, aclk_port, ACLK_PING_INTERVAL);
+
+ if (unlikely(rc != MOSQ_ERR_SUCCESS))
+ error(
+ "Failed to establish link to [%s:%d] MQTT status = %d (%s)", aclk_hostname, aclk_port, rc,
+ mosquitto_strerror(rc));
+ else
+ info("Establishing MQTT link to [%s:%d]", aclk_hostname, aclk_port);
+
+ return rc;
+}
+
+static inline void _link_mosquitto_write()
+{
+ int rc;
+
+ if (unlikely(!mosq)) {
+ return;
+ }
+
+ rc = mosquitto_loop_misc(mosq);
+ if (unlikely(rc != MOSQ_ERR_SUCCESS))
+ debug(D_ACLK, "ACLK: failure during mosquitto_loop_misc %s", mosquitto_strerror(rc));
+
+ if (likely(mosquitto_want_write(mosq))) {
+ rc = mosquitto_loop_write(mosq, 1);
+ if (rc != MOSQ_ERR_SUCCESS)
+ debug(D_ACLK, "ACLK: failure during mosquitto_loop_write %s", mosquitto_strerror(rc));
+ }
+}
+
+void aclk_lws_connection_established(char *hostname, int port)
+{
+ _link_mqtt_connect(hostname, port); // Parameters only used for logging, lower layer connected.
+ _link_mosquitto_write();
+}
+
+void aclk_lws_connection_data_received()
+{
+ int rc = mosquitto_loop_read(mosq, 1);
+ if (rc != MOSQ_ERR_SUCCESS)
+ debug(D_ACLK, "ACLK: failure during mosquitto_loop_read %s", mosquitto_strerror(rc));
+}
+
+void aclk_lws_connection_closed()
+{
+ aclk_disconnect();
+
+}
+
+
+int mqtt_attempt_connection(char *aclk_hostname, int aclk_port, char *username, char *password)
+{
+ if(aclk_lws_wss_connect(aclk_hostname, aclk_port))
+ return MOSQ_ERR_UNKNOWN;
+ aclk_lws_wss_service_loop();
+
+ int rc = _mqtt_create_connection(username, password);
+ if (rc!= MOSQ_ERR_SUCCESS)
+ return rc;
+
+ mosquitto_external_callbacks_set(mosq, _mqtt_external_write_hook, _mqtt_external_read_hook);
+ return rc;
+}
+
+inline int _link_event_loop()
+{
+
+ // TODO: Check if we need to flush undelivered messages from libmosquitto on new connection attempts (QoS=1).
+ _link_mosquitto_write();
+ aclk_lws_wss_service_loop();
+
+ // this is because if use LWS we don't want
+ // mqtt to reconnect by itself
+ return MOSQ_ERR_SUCCESS;
+}
+
+void _link_shutdown()
+{
+ int rc;
+
+ if (likely(!mosq))
+ return;
+
+ rc = mosquitto_disconnect(mosq);
+ switch (rc) {
+ case MOSQ_ERR_SUCCESS:
+ info("MQTT disconnected from broker");
+ break;
+ default:
+ info("MQTT invalid structure");
+ break;
+ };
+}
+
+
+int _link_set_lwt(char *sub_topic, int qos)
+{
+ int rc;
+ char topic[ACLK_MAX_TOPIC + 1];
+ char *final_topic;
+
+ final_topic = get_topic(sub_topic, topic, ACLK_MAX_TOPIC);
+ if (unlikely(!final_topic)) {
+ errno = 0;
+ error("Unable to build outgoing topic; truncated?");
+ return 1;
+ }
+
+ usec_t lwt_time = aclk_session_sec * USEC_PER_SEC + aclk_session_us + 1;
+ BUFFER *b = buffer_create(512);
+ aclk_create_header(b, "disconnect", NULL, lwt_time / USEC_PER_SEC, lwt_time % USEC_PER_SEC, ACLK_VERSION_NEG_VERSION);
+ buffer_strcat(b, ", \"payload\": \"unexpected\" }");
+ rc = mosquitto_will_set(mosq, topic, buffer_strlen(b), buffer_tostring(b), qos, 0);
+ buffer_free(b);
+
+ return rc;
+}
+
+int _link_subscribe(char *topic, int qos)
+{
+ int rc;
+
+ if (unlikely(!mosq))
+ return 1;
+
+ mosquitto_message_callback_set(mosq, mqtt_message_callback);
+
+ rc = mosquitto_subscribe(mosq, NULL, topic, qos);
+ if (unlikely(rc)) {
+ errno = 0;
+ error("Failed to register subscription %d (%s)", rc, mosquitto_strerror(rc));
+ return 1;
+ }
+
+ _link_mosquitto_write();
+ return 0;
+}
+
+/*
+ * Send a message to the cloud to specific topic
+ *
+ */
+
+int _link_send_message(char *topic, const void *message, size_t len, int *mid)
+{
+ int rc;
+ size_t write_q, write_q_bytes, read_q;
+
+ rc = mosquitto_pub_topic_check(topic);
+
+ if (unlikely(rc != MOSQ_ERR_SUCCESS))
+ return rc;
+
+ lws_wss_check_queues(&write_q, &write_q_bytes, &read_q);
+ rc = mosquitto_publish(mosq, mid, topic, len, message, ACLK_QOS, 0);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char msg_head[64];
+ memset(msg_head, 0, sizeof(msg_head));
+ strncpy(msg_head, (char*)message, 60);
+ for (size_t i = 0; i < sizeof(msg_head); i++)
+ if(msg_head[i] == '\n') msg_head[i] = ' ';
+ info("Sending MQTT len=%d mid=%d wq=%zu (%zu-bytes) readq=%zu: %s", (int)len,
+ *mid, write_q, write_q_bytes, read_q, msg_head);
+ now_realtime_timeval(&sendTimes[ *mid & 0x3ff ]);
+#endif
+
+ // TODO: Add better handling -- error will flood the logfile here
+ if (unlikely(rc != MOSQ_ERR_SUCCESS)) {
+ errno = 0;
+ error("MQTT message failed : %s", mosquitto_strerror(rc));
+ }
+ _link_mosquitto_write();
+ return rc;
+}
diff --git a/aclk/legacy/mqtt.h b/aclk/legacy/mqtt.h
new file mode 100644
index 0000000..cc4765d
--- /dev/null
+++ b/aclk/legacy/mqtt.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_MQTT_H
+#define NETDATA_MQTT_H
+
+#ifdef ENABLE_ACLK
+#include "externaldeps/mosquitto/mosquitto.h"
+#endif
+
+void _show_mqtt_info();
+int _link_event_loop();
+void _link_shutdown();
+int mqtt_attempt_connection(char *aclk_hostname, int aclk_port, char *username, char *password);
+//int _link_lib_init();
+int _mqtt_lib_init();
+int _link_subscribe(char *topic, int qos);
+int _link_send_message(char *topic, const void *message, size_t len, int *mid);
+const char *_link_strerror(int rc);
+int _link_set_lwt(char *topic, int qos);
+
+
+int aclk_handle_cloud_message(char *);
+extern char *get_topic(char *sub_topic, char *final_topic, int max_size);
+
+#endif //NETDATA_MQTT_H
diff --git a/aclk/legacy/tests/fake-charts.d.plugin b/aclk/legacy/tests/fake-charts.d.plugin
new file mode 100644
index 0000000..a13c6ba
--- /dev/null
+++ b/aclk/legacy/tests/fake-charts.d.plugin
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+sleep 45 # Wait until popcorning finishes
+
+echo "CHART aclk_test.newcol '' 'Generate new collector/chart event' 'units' aclk_test aclk_test lines 900001 1"
+sleep 5
+echo "DIMENSION aclk1 '' percentage-of-absolute 1 1"
+sleep 5
+echo "BEGIN aclk_test.newcol 1000000"
+echo "SET aclk1 = 3"
+echo "END"
+sleep 5
+echo "DIMENSION aclk2 '' percentage-of-absolute 1 1"
+sleep 5
+echo "BEGIN aclk_test.newcol 1000000"
+echo "SET aclk1 = 3"
+echo "SET aclk2 = 3"
+echo "END"
+sleep 5
+echo "CHART aclk_test2.newcol '' 'Generate new collector/chart event' 'units' aclk_test aclk_test lines 900001 1"
+echo "DIMENSION aclk1 '' percentage-of-absolute 1 1"
+
+sleep 5
+exit 0 # Signal that we are done
diff --git a/aclk/legacy/tests/install-fake-charts.d.sh.in b/aclk/legacy/tests/install-fake-charts.d.sh.in
new file mode 100644
index 0000000..ac002a2
--- /dev/null
+++ b/aclk/legacy/tests/install-fake-charts.d.sh.in
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+TARGET="@pluginsdir_POST@"
+BASE="$(cd "$(dirname "$0")" && pwd)"
+
+cp "$BASE/fake-charts.d.plugin" "$TARGET/charts.d.plugin"
diff --git a/aclk/legacy/tests/launch-paho.sh b/aclk/legacy/tests/launch-paho.sh
new file mode 100755
index 0000000..1c2cb5f
--- /dev/null
+++ b/aclk/legacy/tests/launch-paho.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+docker build -f paho.Dockerfile . --build-arg "HOST_HOSTNAME=$(ping -c1 "$(hostname).local" | head -n1 | grep -o '[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*')" -t paho-client
+docker run -it paho-client
diff --git a/aclk/legacy/tests/paho-inspection.py b/aclk/legacy/tests/paho-inspection.py
new file mode 100644
index 0000000..20ab523
--- /dev/null
+++ b/aclk/legacy/tests/paho-inspection.py
@@ -0,0 +1,59 @@
+import ssl
+import paho.mqtt.client as mqtt
+import json
+import time
+import sys
+
+def on_connect(mqttc, obj, flags, rc):
+ if rc==0:
+ print("Successful connection", flush=True)
+ else :
+ print(f"Connection error rc={rc}", flush=True)
+ mqttc.subscribe("/agent/#",0)
+
+def on_disconnect(mqttc, obj, flags, rc):
+ print("disconnected rc: "+str(rc), flush=True)
+
+def on_message(mqttc, obj, msg):
+ print(f"{msg.topic} {len(msg.payload)}-bytes qos={msg.qos}", flush=True)
+ try:
+ print(f"Trying decode of {msg.payload[:60]}",flush=True)
+ api_msg = json.loads(msg.payload)
+ except Exception as e:
+ print(e,flush=True)
+ return
+ ts = api_msg["timestamp"]
+ mtype = api_msg["type"]
+ print(f"Message {mtype} time={ts} size {len(api_msg)}", flush=True)
+ now = time.time()
+ print(f"Current {now} -> Delay {now-ts}", flush=True)
+ if mtype=="disconnect":
+ print(f"Message dump: {api_msg}", flush=True)
+
+def on_publish(mqttc, obj, mid):
+ print("mid: "+str(mid), flush=True)
+
+def on_subscribe(mqttc, obj, mid, granted_qos):
+ print("Subscribed: "+str(mid)+" "+str(granted_qos), flush=True)
+
+def on_log(mqttc, obj, level, string):
+ print(string)
+
+print(f"Starting paho-inspection on {sys.argv[1]}", flush=True)
+mqttc = mqtt.Client(transport='websockets',client_id="paho")
+#mqttc.tls_set(certfile="server.crt", keyfile="server.key", cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+#mqttc.tls_set(ca_certs="server.crt", cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+mqttc.tls_set(cert_reqs=ssl.CERT_NONE, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+mqttc.tls_insecure_set(True)
+mqttc.on_message = on_message
+mqttc.on_connect = on_connect
+mqttc.on_disconnect = on_disconnect
+mqttc.on_publish = on_publish
+mqttc.on_subscribe = on_subscribe
+mqttc.username_pw_set("paho","paho")
+mqttc.connect(sys.argv[1], 8443, 60)
+
+#mqttc.publish("/agent/mine","Test1")
+#mqttc.subscribe("$SYS/#", 0)
+print("Connected succesfully, monitoring /agent/#", flush=True)
+mqttc.loop_forever()
diff --git a/aclk/legacy/tests/paho.Dockerfile b/aclk/legacy/tests/paho.Dockerfile
new file mode 100644
index 0000000..d67cc4c
--- /dev/null
+++ b/aclk/legacy/tests/paho.Dockerfile
@@ -0,0 +1,14 @@
+FROM archlinux/base:latest
+
+RUN pacman -Syyu --noconfirm
+RUN pacman --noconfirm --needed -S python-pip
+
+RUN pip install paho-mqtt
+
+RUN mkdir -p /opt/paho
+COPY paho-inspection.py /opt/paho/
+
+WORKDIR /opt/paho
+ARG HOST_HOSTNAME
+RUN echo $HOST_HOSTNAME >host
+CMD ["/bin/bash", "-c", "/usr/sbin/python paho-inspection.py $(cat host)"]
diff --git a/backends/Makefile.am b/backends/Makefile.am
new file mode 100644
index 0000000..dace013
--- /dev/null
+++ b/backends/Makefile.am
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ graphite \
+ json \
+ opentsdb \
+ prometheus \
+ aws_kinesis \
+ mongodb \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ WALKTHROUGH.md \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ nc-backend.sh \
+ $(NULL)
diff --git a/backends/README.md b/backends/README.md
new file mode 100644
index 0000000..8d53fd6
--- /dev/null
+++ b/backends/README.md
@@ -0,0 +1,236 @@
+<!--
+title: "Metrics long term archiving"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/README.md
+-->
+
+# Metrics long term archiving
+
+> ⚠️ The backends system is now deprecated in favor of the [exporting engine](/exporting/README.md).
+
+Netdata supports backends for archiving the metrics, or providing long term dashboards, using Grafana or other tools,
+like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20649711/29f182ba-b4ce-11e6-97c8-ab2c0ab59833.png)
+
+Since Netdata collects thousands of metrics per server per second, which would easily congest any backend server when
+several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
+
+So, although Netdata collects metrics every second, it can send to the backend servers averages or sums every X seconds
+(though, it can send them per second if you need it to).
+
+## features
+
+1. Supported backends
+
+ - **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, **Blueflood**,
+ **ElasticSearch** via logstash tcp input and the graphite codec, etc)
+
+ metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is configured below,
+ `hostname` is the hostname of the machine (can also be configured).
+
+ - **opentsdb** (`telnet or HTTP interfaces`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
+
+ metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
+
+ - **json** document DBs
+
+ metrics are sent to a document db, `JSON` formatted.
+
+ - **prometheus** is described at [prometheus page](/backends/prometheus/README.md) since it pulls data from
+ Netdata.
+
+ - **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
+ **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**, **PostgreSQL/TimescaleDB**,
+ **Splunk**, **VictoriaMetrics**, and a lot of other [storage
+ providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
+
+ metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus
+ protocol](/backends/prometheus/README.md). Notes on using the remote write backend are [here](/backends/prometheus/remote_write/README.md).
+
+ - **TimescaleDB** via [community-built connector](/backends/TIMESCALE.md) that takes JSON streams from a Netdata
+ client and writes them to a TimescaleDB table.
+
+ - **AWS Kinesis Data Streams**
+
+ metrics are sent to the service in `JSON` format.
+
+ - **MongoDB**
+
+ metrics are sent to the database in `JSON` format.
+
+2. Only one backend may be active at a time.
+
+3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
+
+4. Netdata supports three modes of operation for all backends:
+
+ - `as-collected` sends to backends the metrics as they are collected, in the units they are collected. So,
+ counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example, to
+ calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+
+ - `average` sends to backends normalized metrics from the Netdata database. In this mode, all metrics are sent as
+ gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but you will not
+ be able to copy and paste queries from other sources to convert units. For example, CPU utilization percentage
+ is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage to the
+ backend.
+
+ - `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the backend. So, if
+ Netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the
+ Netdata charts will be used.
+
+ Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
+ monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
+ and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
+
+ If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
+ Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
+ simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in
+ Netdata, which is not necessarily true for the other modes of operation.
+
+5. This code is smart enough, not to slow down Netdata, independently of the speed of the backend server.
+
+## configuration
+
+In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version of `netdata.conf`
+from your Netdata):
+
+```conf
+[backend]
+ enabled = yes | no
+ type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis | mongodb
+ host tags = list of TAG=VALUE
+ destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis
+ data source = average | sum | as collected
+ prefix = Netdata
+ hostname = my-name
+ update every = 10
+ buffer on failures = 10
+ timeout ms = 20000
+ send charts matching = *
+ send hosts matching = localhost *
+ send names instead of ids = yes
+```
+
+- `enabled = yes | no`, enables or disables sending data to a backend
+
+- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis | mongodb`, selects the backend
+ type
+
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
+ ports to connect to. Netdata will use the **first available** to send the metrics.
+
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
+
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
+ separate it from the port.
+
+ `PORT` can be a number of a service name. If omitted, the default port for the backend will be used
+ (graphite = 2003, opentsdb = 4242).
+
+ Example IPv4:
+
+```conf
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+```
+
+ Example IPv6 and IPv4 together:
+
+```conf
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+
+ When multiple servers are defined, Netdata will try the next one when the first one fails. This allows you to
+ load-balance different servers: give your backend servers in different order on each Netdata.
+
+ Netdata also ships `nc-backend.sh`, a script that can be used as a fallback backend to save the
+ metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
+ monitor / trace / debug the metrics Netdata generates.
+
+ For kinesis backend `destination` should be set to an AWS region (for example, `us-east-1`).
+
+ The MongoDB backend doesn't use the `destination` option for its configuration. It uses the `mongodb.conf`
+ [configuration file](/backends/mongodb/README.md) instead.
+
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
+ be sent to the backend.
+
+- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default this is
+ `[global].hostname`.
+
+- `prefix = Netdata`, is the prefix to add to all metrics.
+
+- `update every = 10`, is the number of seconds between sending data to the backend. Netdata will add some randomness
+ to this number, to prevent stressing the backend server when many Netdata servers send data to the same backend.
+ This randomness does not affect the quality of the data, only the time they are sent.
+
+- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) to
+ buffer data, when the backend is not available. If the backend fails to receive the data after that many failures,
+ data loss on the backend is expected (Netdata will also log it).
+
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. By default
+ this is `2 * update_every * 1000`.
+
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
+ of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
+ `localhost`), allowing us to filter which hosts will be sent to the backend when this Netdata is a central Netdata
+ aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named `*db*`
+ except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first pattern
+ matching the hostname will be used - positive or negative).
+
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
+ within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
+ gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
+ apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
+ positive or negative).
+
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to backend. Netdata supports
+ names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are
+ human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they
+ are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. These are
+ currently only sent to graphite, json, opentsdb and prometheus. Please use the appropriate format for each
+ time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like `tag1="value1",
+ tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics between Netdata servers).
+
+ Starting from Netdata v1.20 the host tags are parsed in accordance with a configured backend type and stored as
+ host labels so that they can be reused in API responses and exporting connectors. The parsing is supported for
+ graphite, json, opentsdb, and prometheus (default) backend types. You can check how the host tags were parsed using
+ the /api/v1/info API call.
+
+## monitoring operation
+
+Netdata provides 5 charts:
+
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+ backend server.
+
+2. **Buffered data size**, the amount of data (in KB) Netdata added the buffer.
+
+3. ~~**Backend latency**, the time the backend server needed to process the data Netdata sent. If there was a
+ re-connection involved, this includes the connection time.~~ (this chart has been removed, because it only measures
+ the time Netdata needs to give the data to the O/S - since the backend servers do not ack the reception, Netdata
+ does not have any means to measure this properly).
+
+4. **Backend operations**, the number of operations performed by Netdata.
+
+5. **Backend thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending the
+ metrics to the backend server.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
+
+## alarms
+
+Netdata adds 4 alarms:
+
+1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data
+2. `backend_metrics_sent`, percentage of metrics sent to the backend server
+3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
+4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by
+ Netdata~~ (this was misleading and has been removed).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/TIMESCALE.md b/backends/TIMESCALE.md
new file mode 100644
index 0000000..05a3c3b
--- /dev/null
+++ b/backends/TIMESCALE.md
@@ -0,0 +1,57 @@
+<!--
+title: "Writing metrics to TimescaleDB"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/TIMESCALE.md
+-->
+
+# Writing metrics to TimescaleDB
+
+Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
+([GitHub](https://github.com/mahlonsmith)/[Website](http://www.martini.nu/)) in particular, Netdata now supports
+archiving metrics directly to TimescaleDB.
+
+What's TimescaleDB? Here's how their team defines the project on their [GitHub page](https://github.com/timescale/timescaledb):
+
+> TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is engineered up from
+> PostgreSQL, providing automatic partitioning across time and space (partitioning key), as well as full SQL support.
+
+## Quickstart
+
+To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
+repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub.
+
+This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
+You'll run this program in parallel with Netdata, and after a short [configuration
+process](https://github.com/mahlonsmith/netdata-timescale-relay#configuration), your metrics should start populating
+TimescaleDB.
+
+Finally, another member of Netdata's community has built a project that quickly launches Netdata, TimescaleDB, and
+Grafana in easy-to-manage Docker containers. Rune Juhl Jacobsen's
+[project](https://github.com/runejuhl/grafana-timescaledb) uses a `Makefile` to create everything, which makes it
+perfect for testing and experimentation.
+
+## Netdata&#8596;TimescaleDB in action
+
+Aside from creating incredible contributions to Netdata, Mahlon works at [LAIKA](https://www.laika.com/), an
+Oregon-based animation studio that's helped create acclaimed films like _Coraline_ and _Kubo and the Two Strings_.
+
+As part of his work to maintain the company's infrastructure of render farms, workstations, and virtual machines, he's
+using Netdata, `netdata-timescale-relay`, and TimescaleDB to store Netdata metrics alongside other data from other
+sources.
+
+> LAIKA is a long-time PostgreSQL user and added TimescaleDB to their infrastructure in 2018 to help manage and store
+> their IT metrics and time-series data. So far, the tool has been in production at LAIKA for over a year and helps them
+> with their use case of time-based logging, where they record over 8 million metrics an hour for netdata content alone.
+
+By archiving Netdata metrics to a backend like TimescaleDB, LAIKA can consolidate metrics data from distributed machines
+efficiently. Mahlon can then correlate Netdata metrics with other sources directly in TimescaleDB.
+
+And, because LAIKA will soon be storing years worth of Netdata metrics data in TimescaleDB, they can analyze long-term
+metrics as their films move from concept to final cut.
+
+Read the full blog post from LAIKA at the [TimescaleDB
+blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timescaledb/amp/).
+
+Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
+backend connection between Netdata and TimescaleDB!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FTIMESCALE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/WALKTHROUGH.md b/backends/WALKTHROUGH.md
new file mode 100644
index 0000000..76dd62f
--- /dev/null
+++ b/backends/WALKTHROUGH.md
@@ -0,0 +1,258 @@
+<!--
+title: "Netdata, Prometheus, Grafana stack"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/WALKTHROUGH.md
+-->
+
+# Netdata, Prometheus, Grafana stack
+
+## Intro
+
+In this article I will walk you through the basics of getting Netdata, Prometheus and Grafana all working together and
+monitoring your application servers. This article will be using docker on your local workstation. We will be working
+with docker in an ad-hoc way, launching containers that run ‘/bin/bash’ and attaching a TTY to them. I use docker here
+in a purely academic fashion and do not condone running Netdata in a container. I pick this method so individuals
+without cloud accounts or access to VMs can try this out and for it’s speed of deployment.
+
+## Why Netdata, Prometheus, and Grafana
+
+Some time ago I was introduced to Netdata by a coworker. We were attempting to troubleshoot python code which seemed to
+be bottlenecked. I was instantly impressed by the amount of metrics Netdata exposes to you. I quickly added Netdata to
+my set of go-to tools when troubleshooting systems performance.
+
+Some time ago, even later, I was introduced to Prometheus. Prometheus is a monitoring application which flips the normal
+architecture around and polls rest endpoints for its metrics. This architectural change greatly simplifies and decreases
+the time necessary to begin monitoring your applications. Compared to current monitoring solutions the time spent on
+designing the infrastructure is greatly reduced. Running a single Prometheus server per application becomes feasible
+with the help of Grafana.
+
+Grafana has been the go to graphing tool for… some time now. It’s awesome, anyone that has used it knows it’s awesome.
+We can point Grafana at Prometheus and use Prometheus as a data source. This allows a pretty simple overall monitoring
+architecture: Install Netdata on your application servers, point Prometheus at Netdata, and then point Grafana at
+Prometheus.
+
+I’m omitting an important ingredient in this stack in order to keep this tutorial simple and that is service discovery.
+My personal preference is to use Consul. Prometheus can plug into consul and automatically begin to scrape new hosts
+that register a Netdata client with Consul.
+
+At the end of this tutorial you will understand how each technology fits together to create a modern monitoring stack.
+This stack will offer you visibility into your application and systems performance.
+
+## Getting Started - Netdata
+
+To begin let’s create our container which we will install Netdata on. We need to run a container, forward the necessary
+port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
+this we want name resolution between the two containers to work. In order to accomplish this we will create a
+user-defined network and attach both containers to this network. The first command we should run is:
+
+```sh
+docker network create --driver bridge netdata-tutorial
+```
+
+With this user-defined network created we can now launch our container we will install Netdata on and point it to this
+network.
+
+```sh
+docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash'
+```
+
+This command creates an interactive tty session (-it), gives the container both a name in relation to the docker daemon
+and a hostname (this is so you know what container is which when working in the shells and docker maps hostname
+resolution to this container), forwards the local port 19999 to the container’s port 19999 (-p 19999:19999), sets the
+command to run (/bin/bash) and then chooses the base container images (centos:latest). After running this you should be
+sitting inside the shell of the container.
+
+After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
+link](../packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
+any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
+the following command in your container.
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
+```
+
+After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
+localhost if you’re doing this on a VM or have the docker container hosted on a machine not on your local system). If
+this is your first time using Netdata I suggest you take a look around. The amount of time I’ve spent digging through
+/proc and calculating my own metrics has been greatly reduced by this tool. Take it all in.
+
+Next I want to draw your attention to a particular endpoint. Navigate to
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
+publishes all the metrics in a format which Prometheus understands. Let’s take a look at one of these metrics.
+`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
+metric is representing several things which I will go in more details in the section on prometheus. For now understand
+that this metric: `netdata_system_cpu_percentage_average` has several labels: (chart, family, dimension). This
+corresponds with the first cpu chart you see on the Netdata dashboard.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
+
+This CHART is called ‘system.cpu’, The FAMILY is cpu, and the DIMENSION we are observing is “system”. You can begin to
+draw links between the charts in Netdata to the prometheus metrics format in this manner.
+
+## Prometheus
+
+We will be installing prometheus in a container for purpose of demonstration. While prometheus does have an official
+container I would like to walk through the install process and setup on a fresh container. This will allow anyone
+reading to migrate this tutorial to a VM or Server of any sort.
+
+Let’s start another container in the same fashion as we did the Netdata container.
+
+```sh
+docker run -it --name prometheus --hostname prometheus
+--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
+```
+
+This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
+files later in this tutorial.
+
+```sh
+yum install vim -y
+```
+
+Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
+
+Let’s download the latest version and install into your container.
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+
+mkdir /opt/prometheus
+
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+This should get prometheus installed into the container. Let’s test that we can run prometheus and connect to it’s web
+interface.
+
+```sh
+/opt/prometheus/prometheus
+```
+
+Now attempt to go to <http://localhost:9090/>. You should be presented with the prometheus homepage. This is a good
+point to talk about Prometheus’s data model which can be viewed here: <https://prometheus.io/docs/concepts/data_model/>
+As explained we have two key elements in Prometheus metrics. We have the ‘metric’ and its ‘labels’. Labels allow for
+granularity between metrics. Let’s use our previous example to further explain.
+
+```conf
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
+```
+
+Here our metric is ‘netdata_system_cpu_percentage_average’ and our labels are ‘chart’, ‘family’, and ‘dimension. The
+last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
+system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
+
+Let’s move our attention to Prometheus’s configuration. Prometheus gets it config from the file located (in our example)
+at `/opt/prometheus/prometheus.yml`. I won’t spend an extensive amount of time going over the configuration values
+documented here: <https://prometheus.io/docs/operating/configuration/>. We will be adding a new“job” under the
+“scrape_configs”. Let’s make the “scrape_configs” section look like this (we can use the dns name Netdata due to the
+custom user-defined network we created in docker beforehand).
+
+```yaml
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'netdata'
+
+ metrics_path: /api/v1/allmetrics
+ params:
+ format: [ prometheus ]
+
+ static_configs:
+ - targets: ['netdata:19999']
+```
+
+Let’s start prometheus once again by running `/opt/prometheus/prometheus`. If we now navigate to prometheus at
+‘<http://localhost:9090/targets’> we should see our target being successfully scraped. If we now go back to the
+Prometheus’s homepage and begin to type ‘netdata\_’ Prometheus should auto complete metrics it is now scraping.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
+
+Let’s now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a
+pointless busy loop. On the shell do the following:
+
+```sh
+[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
+```
+
+Our NetData cpu graph should be showing some activity. Let’s represent this in Prometheus. In order to do this let’s
+keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> We are
+setting out to graph the data in the CPU chart so let’s search for “system.cpu”in the metrics page above. We come across
+a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu", family
+"cpu", units "percentage"` Followed by the metrics. This is a good start now let us drill down to the specific metric we
+would like to graph.
+
+```conf
+# COMMENT
+netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
+```
+
+Here we learn that the metric name we care about is‘netdata_system_cpu_percentage_average’ so throw this into Prometheus
+and see what we get. We should see something similar to this (I shut off my busy loop)
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.47.53%20PM.png)
+
+This is a good step toward what we want. Also make note that Prometheus will tag on an ‘instance’ label for us which
+corresponds to our statically defined job in the configuration file. This allows us to tailor our queries to specific
+instances. Now we need to isolate the dimension we want in our query. To do this let us refine the query slightly. Let’s
+query the dimension also. Place this into our query text box.
+`netdata_system_cpu_percentage_average{dimension="system"}` We now wind up with the following graph.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png)
+
+Awesome, this is exactly what we wanted. If you haven’t caught on yet we can emulate entire charts from NetData by using
+the `chart` dimension. If you’d like you can combine the ‘chart’ and ‘instance’ dimension to create per-instance charts.
+Let’s give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
+
+This is the basics of using Prometheus to query NetData. I’d advise everyone at this point to read [this
+page](../backends/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from
+its internal DB or can send metrics “as-collected” by specifying the ‘source=as-collected’ url parameter like so.
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
+this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
+obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
+`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
+metrics returned by NetData's internal database (not specifying any source= url parameter) then use that. If you find
+limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
+chart.
+
+## Grafana
+
+Finally we make it to grafana. This is the easiest part in my opinion. This time we will actually run the official
+grafana docker container as all configuration we need to do is done via the GUI. Let’s run the following command:
+
+```sh
+docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
+```
+
+This will get grafana running at ‘<http://localhost:3000/’> Let’s go there and
+
+login using the credentials Admin:Admin.
+
+The first thing we want to do is click ‘Add data source’. Let’s make it look like the following screenshot
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.36.55%20PM.png)
+
+With this completed let’s graph! Create a new Dashboard by clicking on the top left Grafana Icon and create a new graph
+in that dashboard. Fill in the query like we did above and save.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.39.38%20PM.png)
+
+## Conclusion
+
+There you have it, a complete systems monitoring stack which is very easy to deploy. From here I would begin to
+understand how Prometheus and a service discovery mechanism such as Consul can play together nicely. My current prod
+deployments automatically register Netdata services into Consul and Prometheus automatically begins to scrape them. Once
+achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
+happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
+monitoring.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/aws_kinesis/Makefile.am b/backends/aws_kinesis/Makefile.am
new file mode 100644
index 0000000..1fec72c
--- /dev/null
+++ b/backends/aws_kinesis/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ aws_kinesis.conf \
+ $(NULL)
diff --git a/backends/aws_kinesis/README.md b/backends/aws_kinesis/README.md
new file mode 100644
index 0000000..a2b6825
--- /dev/null
+++ b/backends/aws_kinesis/README.md
@@ -0,0 +1,53 @@
+<!--
+title: "Using Netdata with AWS Kinesis Data Streams"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/aws_kinesis/README.md
+-->
+
+# Using Netdata with AWS Kinesis Data Streams
+
+## Prerequisites
+
+To use AWS Kinesis as a backend AWS SDK for C++ should be
+[installed](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) first. `libcrypto`, `libssl`,
+and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata should be re-installed
+from the source. The installer will detect that the required libraries are now available.
+
+If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY="kinesis"`. Otherwise, the
+building process could take a very long time. Take a note, that the default installation path for the libraries is
+`/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is
+advisable to use the following options to `cmake` while building the AWS SDK:
+
+```sh
+cmake -DCMAKE_INSTALL_LIBDIR=/usr/lib -DCMAKE_INSTALL_INCLUDEDIR=/usr/include -DBUILD_SHARED_LIBS=OFF -DBUILD_ONLY=kinesis <aws-sdk-cpp sources>
+```
+
+## Configuration
+
+To enable data sending to the kinesis backend set the following options in `netdata.conf`:
+
+```conf
+[backend]
+ enabled = yes
+ type = kinesis
+ destination = us-east-1
+```
+
+set the `destination` option to an AWS region.
+
+In the Netdata configuration directory run `./edit-config aws_kinesis.conf` and set AWS credentials and stream name:
+
+```yaml
+# AWS credentials
+aws_access_key_id = your_access_key_id
+aws_secret_access_key = your_secret_access_key
+
+# destination stream
+stream name = your_stream_name
+```
+
+Alternatively, AWS credentials can be set for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
+
+A partition key for every record is computed automatically by Netdata with the purpose to distribute records across
+available shards evenly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Faws_kinesis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/aws_kinesis/aws_kinesis.c b/backends/aws_kinesis/aws_kinesis.c
new file mode 100644
index 0000000..b1ea478
--- /dev/null
+++ b/backends/aws_kinesis/aws_kinesis.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "aws_kinesis.h"
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+// ----------------------------------------------------------------------------
+// kinesis backend
+
+// read the aws_kinesis.conf file
+int read_kinesis_conf(const char *path, char **access_key_id_p, char **secret_access_key_p, char **stream_name_p)
+{
+ char *access_key_id = *access_key_id_p;
+ char *secret_access_key = *secret_access_key_p;
+ char *stream_name = *stream_name_p;
+
+ if(unlikely(access_key_id)) freez(access_key_id);
+ if(unlikely(secret_access_key)) freez(secret_access_key);
+ if(unlikely(stream_name)) freez(stream_name);
+ access_key_id = NULL;
+ secret_access_key = NULL;
+ stream_name = NULL;
+
+ int line = 0;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/aws_kinesis.conf", path);
+
+ char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
+
+ debug(D_BACKEND, "BACKEND: opening config file '%s'", filename);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ return 1;
+ }
+
+ while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
+ buffer[CONFIG_FILE_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if(!s || *s == '#') {
+ debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(unlikely(!value)) {
+ error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(unlikely(!name || *name == '#')) {
+ error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename);
+ continue;
+ }
+
+ if(!value)
+ value = "";
+ else
+ value = strip_quotes(value);
+
+ if(name[0] == 'a' && name[4] == 'a' && !strcmp(name, "aws_access_key_id")) {
+ access_key_id = strdupz(value);
+ }
+ else if(name[0] == 'a' && name[4] == 's' && !strcmp(name, "aws_secret_access_key")) {
+ secret_access_key = strdupz(value);
+ }
+ else if(name[0] == 's' && !strcmp(name, "stream name")) {
+ stream_name = strdupz(value);
+ }
+ }
+
+ fclose(fp);
+
+ if(unlikely(!stream_name || !*stream_name)) {
+ error("BACKEND: stream name is a mandatory Kinesis parameter but it is not configured");
+ return 1;
+ }
+
+ *access_key_id_p = access_key_id;
+ *secret_access_key_p = secret_access_key;
+ *stream_name_p = stream_name;
+
+ return 0;
+}
diff --git a/backends/aws_kinesis/aws_kinesis.conf b/backends/aws_kinesis/aws_kinesis.conf
new file mode 100644
index 0000000..cc54b5f
--- /dev/null
+++ b/backends/aws_kinesis/aws_kinesis.conf
@@ -0,0 +1,10 @@
+# AWS Kinesis Data Streams backend configuration
+#
+# All options in this file are mandatory
+
+# AWS credentials
+aws_access_key_id =
+aws_secret_access_key =
+
+# destination stream
+stream name = \ No newline at end of file
diff --git a/backends/aws_kinesis/aws_kinesis.h b/backends/aws_kinesis/aws_kinesis.h
new file mode 100644
index 0000000..50a4631
--- /dev/null
+++ b/backends/aws_kinesis/aws_kinesis.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_KINESIS_H
+#define NETDATA_BACKEND_KINESIS_H
+
+#include "backends/backends.h"
+#include "aws_kinesis_put_record.h"
+
+#define KINESIS_PARTITION_KEY_MAX 256
+#define KINESIS_RECORD_MAX 1024 * 1024
+
+extern int read_kinesis_conf(const char *path, char **auth_key_id_p, char **secure_key_p, char **stream_name_p);
+
+#endif //NETDATA_BACKEND_KINESIS_H
diff --git a/backends/aws_kinesis/aws_kinesis_put_record.cc b/backends/aws_kinesis/aws_kinesis_put_record.cc
new file mode 100644
index 0000000..a8ba4aa
--- /dev/null
+++ b/backends/aws_kinesis/aws_kinesis_put_record.cc
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <aws/core/Aws.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/kinesis/KinesisClient.h>
+#include <aws/kinesis/model/PutRecordRequest.h>
+#include "aws_kinesis_put_record.h"
+
+using namespace Aws;
+
+static SDKOptions options;
+
+static Kinesis::KinesisClient *client;
+
+struct request_outcome {
+ Kinesis::Model::PutRecordOutcomeCallable future_outcome;
+ size_t data_len;
+};
+
+static Vector<request_outcome> request_outcomes;
+
+void backends_kinesis_init(const char *region, const char *access_key_id, const char *secret_key, const long timeout) {
+ InitAPI(options);
+
+ Client::ClientConfiguration config;
+
+ config.region = region;
+ config.requestTimeoutMs = timeout;
+ config.connectTimeoutMs = timeout;
+
+ if(access_key_id && *access_key_id && secret_key && *secret_key) {
+ client = New<Kinesis::KinesisClient>("client", Auth::AWSCredentials(access_key_id, secret_key), config);
+ } else {
+ client = New<Kinesis::KinesisClient>("client", config);
+ }
+}
+
+void backends_kinesis_shutdown() {
+ Delete(client);
+
+ ShutdownAPI(options);
+}
+
+int backends_kinesis_put_record(const char *stream_name, const char *partition_key,
+ const char *data, size_t data_len) {
+ Kinesis::Model::PutRecordRequest request;
+
+ request.SetStreamName(stream_name);
+ request.SetPartitionKey(partition_key);
+ request.SetData(Utils::ByteBuffer((unsigned char*) data, data_len));
+
+ request_outcomes.push_back({client->PutRecordCallable(request), data_len});
+
+ return 0;
+}
+
+int backends_kinesis_get_result(char *error_message, size_t *sent_bytes, size_t *lost_bytes) {
+ Kinesis::Model::PutRecordOutcome outcome;
+ *sent_bytes = 0;
+ *lost_bytes = 0;
+
+ for(auto request_outcome = request_outcomes.begin(); request_outcome != request_outcomes.end(); ) {
+ std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100));
+
+ if(status == std::future_status::ready || status == std::future_status::deferred) {
+ outcome = request_outcome->future_outcome.get();
+ *sent_bytes += request_outcome->data_len;
+
+ if(!outcome.IsSuccess()) {
+ *lost_bytes += request_outcome->data_len;
+ outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX);
+ }
+
+ request_outcomes.erase(request_outcome);
+ } else {
+ ++request_outcome;
+ }
+ }
+
+ if(*lost_bytes) {
+ return 1;
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/backends/aws_kinesis/aws_kinesis_put_record.h b/backends/aws_kinesis/aws_kinesis_put_record.h
new file mode 100644
index 0000000..fa3d034
--- /dev/null
+++ b/backends/aws_kinesis/aws_kinesis_put_record.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_KINESIS_PUT_RECORD_H
+#define NETDATA_BACKEND_KINESIS_PUT_RECORD_H
+
+#define ERROR_LINE_MAX 1023
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void backends_kinesis_init(const char *region, const char *access_key_id, const char *secret_key, const long timeout);
+
+void backends_kinesis_shutdown();
+
+int backends_kinesis_put_record(const char *stream_name, const char *partition_key,
+ const char *data, size_t data_len);
+
+int backends_kinesis_get_result(char *error_message, size_t *sent_bytes, size_t *lost_bytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_BACKEND_KINESIS_PUT_RECORD_H
diff --git a/backends/backends.c b/backends/backends.c
new file mode 100644
index 0000000..6bf583e
--- /dev/null
+++ b/backends/backends.c
@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "backends.h"
+
+// ----------------------------------------------------------------------------
+// How backends work in netdata:
+//
+// 1. There is an independent thread that runs at the required interval
+// (for example, once every 10 seconds)
+//
+// 2. Every time it wakes, it calls the backend formatting functions to build
+// a buffer of data. This is a very fast, memory only operation.
+//
+// 3. If the buffer already includes data, the new data are appended.
+// If the buffer becomes too big, because the data cannot be sent, a
+// log is written and the buffer is discarded.
+//
+// 4. Then it tries to send all the data. It blocks until all the data are sent
+// or the socket returns an error.
+// If the time required for this is above the interval, it starts skipping
+// intervals, but the calculated values include the entire database, without
+// gaps (it remembers the timestamps and continues from where it stopped).
+//
+// 5. repeats the above forever.
+//
+
+const char *global_backend_prefix = "netdata";
+int global_backend_update_every = 10;
+BACKEND_OPTIONS global_backend_options = BACKEND_SOURCE_DATA_AVERAGE | BACKEND_OPTION_SEND_NAMES;
+const char *global_backend_source = NULL;
+
+// ----------------------------------------------------------------------------
+// helper functions for backends
+
+size_t backend_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ char c = *s;
+
+ if(c != '.' && !isalnum(c)) *d = '_';
+ else *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+// calculate the SUM or AVERAGE of a dimension, for any timeframe
+// may return NAN if the database does not have any value in the give timeframe
+
+calculated_number backend_calculate_value_from_stored_data(
+ RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the first point of the database used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
+) {
+ RRDHOST *host = st->rrdhost;
+ (void)host;
+
+ // find the edges of the rrd database for this chart
+ time_t first_t = rd->state->query_ops.oldest_time(rd);
+ time_t last_t = rd->state->query_ops.latest_time(rd);
+ time_t update_every = st->update_every;
+ struct rrddim_query_handle handle;
+ storage_number n;
+
+ // step back a little, to make sure we have complete data collection
+ // for all metrics
+ after -= update_every * 2;
+ before -= update_every * 2;
+
+ // align the time-frame
+ after = after - (after % update_every);
+ before = before - (before % update_every);
+
+ // for before, loose another iteration
+ // the latest point will be reported the next time
+ before -= update_every;
+
+ if(unlikely(after > before))
+ // this can happen when update_every > before - after
+ after = before;
+
+ if(unlikely(after < first_t))
+ after = first_t;
+
+ if(unlikely(before > last_t))
+ before = last_t;
+
+ if(unlikely(before < first_t || after > last_t)) {
+ // the chart has not been updated in the wanted timeframe
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
+ host->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before,
+ (unsigned long)first_t, (unsigned long)last_t
+ );
+ return NAN;
+ }
+
+ *first_timestamp = after;
+ *last_timestamp = before;
+
+ size_t counter = 0;
+ calculated_number sum = 0;
+
+/*
+ long start_at_slot = rrdset_time2slot(st, before),
+ stop_at_slot = rrdset_time2slot(st, after),
+ slot, stop_now = 0;
+
+ for(slot = start_at_slot; !stop_now ; slot--) {
+
+ if(unlikely(slot < 0)) slot = st->entries - 1;
+ if(unlikely(slot == stop_at_slot)) stop_now = 1;
+
+ storage_number n = rd->values[slot];
+
+ if(unlikely(!does_storage_number_exist(n))) {
+ // not collected
+ continue;
+ }
+
+ calculated_number value = unpack_storage_number(n);
+ sum += value;
+
+ counter++;
+ }
+*/
+ for(rd->state->query_ops.init(rd, &handle, after, before) ; !rd->state->query_ops.is_finished(&handle) ; ) {
+ time_t curr_t;
+ n = rd->state->query_ops.next_metric(&handle, &curr_t);
+
+ if(unlikely(!does_storage_number_exist(n))) {
+ // not collected
+ continue;
+ }
+
+ calculated_number value = unpack_storage_number(n);
+ sum += value;
+
+ counter++;
+ }
+ rd->state->query_ops.finalize(&handle);
+ if(unlikely(!counter)) {
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu",
+ host->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before
+ );
+ return NAN;
+ }
+
+ if(unlikely(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM))
+ return sum;
+
+ return sum / (calculated_number)counter;
+}
+
+
+// discard a response received by a backend
+// after logging a simple of it to error.log
+
+int discard_response(BUFFER *b, const char *backend) {
+ char sample[1024];
+ const char *s = buffer_tostring(b);
+ char *d = sample, *e = &sample[sizeof(sample) - 1];
+
+ for(; *s && d < e ;s++) {
+ char c = *s;
+ if(unlikely(!isprint(c))) c = ' ';
+ *d++ = c;
+ }
+ *d = '\0';
+
+ info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample);
+ buffer_flush(b);
+ return 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// the backend thread
+
+static SIMPLE_PATTERN *charts_pattern = NULL;
+static SIMPLE_PATTERN *hosts_pattern = NULL;
+
+inline int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+ (void)host;
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
+ return 0;
+
+ if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name))
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
+ else {
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
+ return 0;
+ }
+ }
+
+ if(unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
+ return 0;
+ }
+
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+inline BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options) {
+ if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) {
+ backend_options |= BACKEND_SOURCE_DATA_AS_COLLECTED;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED);
+ }
+ else if(!strcmp(source, "average")) {
+ backend_options |= BACKEND_SOURCE_DATA_AVERAGE;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE);
+ }
+ else if(!strcmp(source, "sum") || !strcmp(source, "volume")) {
+ backend_options |= BACKEND_SOURCE_DATA_SUM;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM);
+ }
+ else {
+ error("BACKEND: invalid data source method '%s'.", source);
+ }
+
+ return backend_options;
+}
+
+static void backends_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+/**
+ * Set Kinesis variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_kinesis_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ (void)default_port;
+#ifndef HAVE_KINESIS
+ (void)brc;
+ (void)brf;
+#endif
+
+#if HAVE_KINESIS
+ *brc = process_json_response;
+ if (BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_json_plaintext;
+ else
+ *brf = backends_format_dimension_stored_json_plaintext;
+#endif
+}
+
+/**
+ * Set Prometheus variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_prometheus_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ (void)default_port;
+ (void)brf;
+#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
+ (void)brc;
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ *brc = backends_process_prometheus_remote_write_response;
+#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
+}
+
+/**
+ * Set MongoDB variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_mongodb_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ (void)default_port;
+#ifndef HAVE_MONGOC
+ (void)brc;
+ (void)brf;
+#endif
+
+#if HAVE_MONGOC
+ *brc = process_json_response;
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_json_plaintext;
+ else
+ *brf = backends_format_dimension_stored_json_plaintext;
+#endif
+}
+
+/**
+ * Set JSON variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_json_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ *default_port = 5448;
+ *brc = process_json_response;
+
+ if (BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_json_plaintext;
+ else
+ *brf = backends_format_dimension_stored_json_plaintext;
+}
+
+/**
+ * Set OpenTSDB HTTP variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_opentsdb_http_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ *default_port = 4242;
+ *brc = process_opentsdb_response;
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_opentsdb_http;
+ else
+ *brf = backends_format_dimension_stored_opentsdb_http;
+
+}
+
+/**
+ * Set OpenTSDB Telnet variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_opentsdb_telnet_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ *default_port = 4242;
+ *brc = process_opentsdb_response;
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_opentsdb_telnet;
+ else
+ *brf = backends_format_dimension_stored_opentsdb_telnet;
+}
+
+/**
+ * Set Graphite variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_graphite_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ *default_port = 2003;
+ *brc = process_graphite_response;
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = backends_format_dimension_collected_graphite_plaintext;
+ else
+ *brf = backends_format_dimension_stored_graphite_plaintext;
+}
+
+/**
+ * Select Type
+ *
+ * Select the backedn type based in the user input
+ *
+ * @param type is the string that defines the backend type
+ *
+ * @return It returns the backend id.
+ */
+BACKEND_TYPE backend_select_type(const char *type) {
+ if(!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
+ return BACKEND_TYPE_GRAPHITE;
+ }
+ else if(!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
+ return BACKEND_TYPE_OPENTSDB_USING_TELNET;
+ }
+ else if(!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
+ return BACKEND_TYPE_OPENTSDB_USING_HTTP;
+ }
+ else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
+ return BACKEND_TYPE_JSON;
+ }
+ else if (!strcmp(type, "prometheus_remote_write")) {
+ return BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE;
+ }
+ else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
+ return BACKEND_TYPE_KINESIS;
+ }
+ else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) {
+ return BACKEND_TYPE_MONGODB;
+ }
+
+ return BACKEND_TYPE_UNKNOWN;
+}
+
+/**
+ * Backend main
+ *
+ * The main thread used to control the backedns.
+ *
+ * @param ptr a pointer to netdata_static_structure.
+ *
+ * @return It always return NULL.
+ */
+void *backends_main(void *ptr) {
+ netdata_thread_cleanup_push(backends_main_cleanup, ptr);
+
+ int default_port = 0;
+ int sock = -1;
+ BUFFER *b = buffer_create(1), *response = buffer_create(1);
+ int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS) = NULL;
+ int (*backend_response_checker)(BUFFER *) = NULL;
+
+#if HAVE_KINESIS
+ int do_kinesis = 0;
+ char *kinesis_auth_key_id = NULL, *kinesis_secure_key = NULL, *kinesis_stream_name = NULL;
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ int do_prometheus_remote_write = 0;
+ BUFFER *http_request_header = NULL;
+#endif
+
+#if HAVE_MONGOC
+ int do_mongodb = 0;
+ char *mongodb_uri = NULL;
+ char *mongodb_database = NULL;
+ char *mongodb_collection = NULL;
+
+ // set the default socket timeout in ms
+ int32_t mongodb_default_socket_timeout = (int32_t)(global_backend_update_every >= 2)?(global_backend_update_every * MSEC_PER_SEC - 500):1000;
+
+#endif
+
+#ifdef ENABLE_HTTPS
+ struct netdata_ssl opentsdb_ssl = {NULL , NETDATA_SSL_START};
+#endif
+
+ // ------------------------------------------------------------------------
+ // collect configuration options
+
+ struct timeval timeout = {
+ .tv_sec = 0,
+ .tv_usec = 0
+ };
+ int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0);
+ const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average");
+ const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
+ const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost");
+ global_backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata");
+ const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname);
+ global_backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", global_backend_update_every);
+ int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10);
+ long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", global_backend_update_every * 2 * 1000);
+
+ if(config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", (global_backend_options & BACKEND_OPTION_SEND_NAMES)))
+ global_backend_options |= BACKEND_OPTION_SEND_NAMES;
+ else
+ global_backend_options &= ~BACKEND_OPTION_SEND_NAMES;
+
+ charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ const char *remote_write_path = config_get(CONFIG_SECTION_BACKEND, "remote write URL path", "/receive");
+#endif
+
+ // ------------------------------------------------------------------------
+ // validate configuration options
+ // and prepare for sending data to our backend
+
+ global_backend_options = backend_parse_data_source(source, global_backend_options);
+ global_backend_source = source;
+
+ if(timeoutms < 1) {
+ error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, global_backend_update_every * 2 * 1000);
+ timeoutms = global_backend_update_every * 2 * 1000;
+ }
+ timeout.tv_sec = (timeoutms * 1000) / 1000000;
+ timeout.tv_usec = (timeoutms * 1000) % 1000000;
+
+ if(!enabled || global_backend_update_every < 1)
+ goto cleanup;
+
+ // ------------------------------------------------------------------------
+ // select the backend type
+ BACKEND_TYPE work_type = backend_select_type(type);
+ if (work_type == BACKEND_TYPE_UNKNOWN) {
+ error("BACKEND: Unknown backend type '%s'", type);
+ goto cleanup;
+ }
+
+ switch (work_type) {
+ case BACKEND_TYPE_OPENTSDB_USING_HTTP: {
+#ifdef ENABLE_HTTPS
+ if (!strcmp(type, "opentsdb:https")) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+ backend_set_opentsdb_http_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE: {
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ do_prometheus_remote_write = 1;
+
+ http_request_header = buffer_create(1);
+ backends_init_write_request();
+#else
+ error("BACKEND: Prometheus remote write support isn't compiled");
+#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
+ backend_set_prometheus_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_KINESIS: {
+#if HAVE_KINESIS
+ do_kinesis = 1;
+
+ if(unlikely(read_kinesis_conf(netdata_configured_user_config_dir, &kinesis_auth_key_id, &kinesis_secure_key, &kinesis_stream_name))) {
+ error("BACKEND: kinesis backend type is set but cannot read its configuration from %s/aws_kinesis.conf", netdata_configured_user_config_dir);
+ goto cleanup;
+ }
+
+ backends_kinesis_init(destination, kinesis_auth_key_id, kinesis_secure_key, timeout.tv_sec * 1000 + timeout.tv_usec / 1000);
+#else
+ error("BACKEND: AWS Kinesis support isn't compiled");
+#endif // HAVE_KINESIS
+ backend_set_kinesis_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_MONGODB: {
+#if HAVE_MONGOC
+ if(unlikely(read_mongodb_conf(netdata_configured_user_config_dir,
+ &mongodb_uri,
+ &mongodb_database,
+ &mongodb_collection))) {
+ error("BACKEND: mongodb backend type is set but cannot read its configuration from %s/mongodb.conf",
+ netdata_configured_user_config_dir);
+ goto cleanup;
+ }
+
+ if(likely(!backends_mongodb_init(mongodb_uri, mongodb_database, mongodb_collection, mongodb_default_socket_timeout))) {
+ backend_set_mongodb_variables(&default_port, &backend_response_checker, &backend_request_formatter);
+ do_mongodb = 1;
+ }
+ else {
+ error("BACKEND: cannot initialize MongoDB backend");
+ goto cleanup;
+ }
+#else
+ error("BACKEND: MongoDB support isn't compiled");
+#endif // HAVE_MONGOC
+ break;
+ }
+ case BACKEND_TYPE_GRAPHITE: {
+ backend_set_graphite_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_OPENTSDB_USING_TELNET: {
+ backend_set_opentsdb_telnet_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_JSON: {
+ backend_set_json_variables(&default_port,&backend_response_checker,&backend_request_formatter);
+ break;
+ }
+ case BACKEND_TYPE_UNKNOWN: {
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if((backend_request_formatter == NULL && !do_prometheus_remote_write) || backend_response_checker == NULL) {
+#else
+ if(backend_request_formatter == NULL || backend_response_checker == NULL) {
+#endif
+ error("BACKEND: backend is misconfigured - disabling it.");
+ goto cleanup;
+ }
+
+
+// ------------------------------------------------------------------------
+// prepare the charts for monitoring the backend operation
+
+ struct rusage thread;
+
+ collected_number
+ chart_buffered_metrics = 0,
+ chart_lost_metrics = 0,
+ chart_sent_metrics = 0,
+ chart_buffered_bytes = 0,
+ chart_received_bytes = 0,
+ chart_sent_bytes = 0,
+ chart_receptions = 0,
+ chart_transmission_successes = 0,
+ chart_transmission_failures = 0,
+ chart_data_lost_events = 0,
+ chart_lost_bytes = 0,
+ chart_backend_reconnects = 0;
+ // chart_backend_latency = 0;
+
+ RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", "backends", NULL, 130600, global_backend_update_every, RRDSET_TYPE_LINE);
+ rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KiB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA);
+ rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_LINE);
+ rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ /*
+ * this is misleading - we can only measure the time we need to send data
+ * this time is not related to the time required for the data to travel to
+ * the backend database and the time that server needed to process them
+ *
+ * issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html
+ *
+ RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", "backends", NULL, 130620, global_backend_update_every, RRDSET_TYPE_AREA);
+ rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ */
+
+ RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_STACKED);
+ rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+
+
+ // ------------------------------------------------------------------------
+ // prepare the backend main loop
+
+ info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, global_backend_update_every, hostname, global_backend_prefix);
+ send_statistics("BACKEND_START", "OK", type);
+
+ usec_t step_ut = global_backend_update_every * USEC_PER_SEC;
+ time_t after = now_realtime_sec();
+ int failures = 0;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+
+ // ------------------------------------------------------------------------
+ // Wait for the next iteration point.
+
+ heartbeat_next(&hb, step_ut);
+ time_t before = now_realtime_sec();
+ debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before);
+
+ // ------------------------------------------------------------------------
+ // add to the buffer the data we need to send to the backend
+
+ netdata_thread_disable_cancelability();
+
+ size_t count_hosts = 0;
+ size_t count_charts_total = 0;
+ size_t count_dims_total = 0;
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if(do_prometheus_remote_write)
+ backends_clear_write_request();
+#endif
+ rrd_rdlock();
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND|RRDHOST_FLAG_BACKEND_DONT_SEND))) {
+ char *name = (host == localhost)?"localhost":host->hostname;
+ if (!hosts_pattern || simple_pattern_matches(hosts_pattern, name)) {
+ rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_SEND);
+ info("enabled backend for host '%s'", name);
+ }
+ else {
+ rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_DONT_SEND);
+ info("disabled backend for host '%s'", name);
+ }
+ }
+
+ if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND)))
+ continue;
+
+ rrdhost_rdlock(host);
+
+ count_hosts++;
+ size_t count_charts = 0;
+ size_t count_dims = 0;
+ size_t count_dims_skipped = 0;
+
+ const char *__hostname = (host == localhost)?hostname:host->hostname;
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if(do_prometheus_remote_write) {
+ backends_rrd_stats_remote_write_allmetrics_prometheus(
+ host
+ , __hostname
+ , global_backend_prefix
+ , global_backend_options
+ , after
+ , before
+ , &count_charts
+ , &count_dims
+ , &count_dims_skipped
+ );
+ chart_buffered_metrics += count_dims;
+ }
+ else
+#endif
+ {
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if(likely(backends_can_send_rrdset(global_backend_options, st))) {
+ rrdset_rdlock(st);
+
+ count_charts++;
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if (likely(rd->last_collected_time.tv_sec >= after)) {
+ chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options);
+ count_dims++;
+ }
+ else {
+ debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
+ count_dims_skipped++;
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+ }
+
+ debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped);
+ count_charts_total += count_charts;
+ count_dims_total += count_dims;
+
+ rrdhost_unlock(host);
+ }
+ rrd_unlock();
+
+ netdata_thread_enable_cancelability();
+
+ debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts);
+
+ // ------------------------------------------------------------------------
+
+ chart_buffered_bytes = (collected_number)buffer_strlen(b);
+
+ // reset the monitoring chart counters
+ chart_received_bytes =
+ chart_sent_bytes =
+ chart_sent_metrics =
+ chart_lost_metrics =
+ chart_receptions =
+ chart_transmission_successes =
+ chart_transmission_failures =
+ chart_data_lost_events =
+ chart_lost_bytes =
+ chart_backend_reconnects = 0;
+ // chart_backend_latency = 0;
+
+ if(unlikely(netdata_exit)) break;
+
+ //fprintf(stderr, "\nBACKEND BEGIN:\n%s\nBACKEND END\n", buffer_tostring(b));
+ //fprintf(stderr, "after = %lu, before = %lu\n", after, before);
+
+ // prepare for the next iteration
+ // to add incrementally data to buffer
+ after = before;
+
+#if HAVE_KINESIS
+ if(do_kinesis) {
+ unsigned long long partition_key_seq = 0;
+
+ size_t buffer_len = buffer_strlen(b);
+ size_t sent = 0;
+
+ while(sent < buffer_len) {
+ char partition_key[KINESIS_PARTITION_KEY_MAX + 1];
+ snprintf(partition_key, KINESIS_PARTITION_KEY_MAX, "netdata_%llu", partition_key_seq++);
+ size_t partition_key_len = strnlen(partition_key, KINESIS_PARTITION_KEY_MAX);
+
+ const char *first_char = buffer_tostring(b) + sent;
+
+ size_t record_len = 0;
+
+ // split buffer into chunks of maximum allowed size
+ if(buffer_len - sent < KINESIS_RECORD_MAX - partition_key_len) {
+ record_len = buffer_len - sent;
+ }
+ else {
+ record_len = KINESIS_RECORD_MAX - partition_key_len;
+ while(*(first_char + record_len) != '\n' && record_len) record_len--;
+ }
+
+ char error_message[ERROR_LINE_MAX + 1] = "";
+
+ debug(D_BACKEND, "BACKEND: backends_kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, \
+ buffer = %zu, record = %zu", destination, kinesis_auth_key_id, kinesis_secure_key, kinesis_stream_name,
+ partition_key, buffer_len, record_len);
+
+ backends_kinesis_put_record(kinesis_stream_name, partition_key, first_char, record_len);
+
+ sent += record_len;
+ chart_transmission_successes++;
+
+ size_t sent_bytes = 0, lost_bytes = 0;
+
+ if(unlikely(backends_kinesis_get_result(error_message, &sent_bytes, &lost_bytes))) {
+ // oops! we couldn't send (all or some of the) data
+ error("BACKEND: %s", error_message);
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ destination, sent_bytes, sent_bytes - lost_bytes);
+
+ chart_transmission_failures++;
+ chart_data_lost_events++;
+ chart_lost_bytes += lost_bytes;
+
+ // estimate the number of lost metrics
+ chart_lost_metrics += (collected_number)(chart_buffered_metrics
+ * (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1));
+
+ break;
+ }
+ else {
+ chart_receptions++;
+ }
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ chart_sent_bytes += sent;
+ if(likely(sent == buffer_len))
+ chart_sent_metrics = chart_buffered_metrics;
+
+ buffer_flush(b);
+ } else
+#endif /* HAVE_KINESIS */
+
+#if HAVE_MONGOC
+ if(do_mongodb) {
+ size_t buffer_len = buffer_strlen(b);
+ size_t sent = 0;
+
+ while(sent < buffer_len) {
+ const char *first_char = buffer_tostring(b);
+
+ debug(D_BACKEND, "BACKEND: backends_mongodb_insert(): uri = %s, database = %s, collection = %s, \
+ buffer = %zu", mongodb_uri, mongodb_database, mongodb_collection, buffer_len);
+
+ if(likely(!backends_mongodb_insert((char *)first_char, (size_t)chart_buffered_metrics))) {
+ sent += buffer_len;
+ chart_transmission_successes++;
+ chart_receptions++;
+ }
+ else {
+ // oops! we couldn't send (all or some of the) data
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ mongodb_uri, buffer_len, 0UL);
+
+ chart_transmission_failures++;
+ chart_data_lost_events++;
+ chart_lost_bytes += buffer_len;
+
+ // estimate the number of lost metrics
+ chart_lost_metrics += (collected_number)chart_buffered_metrics;
+
+ break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ chart_sent_bytes += sent;
+ if(likely(sent == buffer_len))
+ chart_sent_metrics = chart_buffered_metrics;
+
+ buffer_flush(b);
+ } else
+#endif /* HAVE_MONGOC */
+
+ {
+
+ // ------------------------------------------------------------------------
+ // if we are connected, receive a response, without blocking
+
+ if(likely(sock != -1)) {
+ errno = 0;
+
+ // loop through to collect all data
+ while(sock != -1 && errno != EWOULDBLOCK) {
+ buffer_need_bytes(response, 4096);
+
+ ssize_t r;
+#ifdef ENABLE_HTTPS
+ if(opentsdb_ssl.conn && !opentsdb_ssl.flags) {
+ r = SSL_read(opentsdb_ssl.conn, &response->buffer[response->len], response->size - response->len);
+ } else {
+ r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+ }
+#else
+ r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+#endif
+ if(likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ chart_received_bytes += r;
+ chart_receptions++;
+ }
+ else if(r == 0) {
+ error("BACKEND: '%s' closed the socket", destination);
+ close(sock);
+ sock = -1;
+ }
+ else {
+ // failed to receive data
+ if(errno != EAGAIN && errno != EWOULDBLOCK) {
+ error("BACKEND: cannot receive data from backend '%s'.", destination);
+ }
+ }
+ }
+
+ // if we received data, process them
+ if(buffer_strlen(response))
+ backend_response_checker(response);
+ }
+
+ // ------------------------------------------------------------------------
+ // if we are not connected, connect to a backend server
+
+ if(unlikely(sock == -1)) {
+ // usec_t start_ut = now_monotonic_usec();
+ size_t reconnects = 0;
+
+ sock = connect_to_one_of(destination, default_port, &timeout, &reconnects, NULL, 0);
+#ifdef ENABLE_HTTPS
+ if(sock != -1) {
+ if(netdata_exporting_ctx) {
+ if(!opentsdb_ssl.conn) {
+ opentsdb_ssl.conn = SSL_new(netdata_exporting_ctx);
+ if(!opentsdb_ssl.conn) {
+ error("Failed to allocate SSL structure %d.", sock);
+ opentsdb_ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+ } else {
+ SSL_clear(opentsdb_ssl.conn);
+ }
+ }
+
+ if(opentsdb_ssl.conn) {
+ if(SSL_set_fd(opentsdb_ssl.conn, sock) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", host->rrdpush_sender_socket);
+ opentsdb_ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else {
+ opentsdb_ssl.flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
+ SSL_set_connect_state(opentsdb_ssl.conn);
+ int err = SSL_connect(opentsdb_ssl.conn);
+ if (err != 1) {
+ err = SSL_get_error(opentsdb_ssl.conn, err);
+ error("SSL cannot connect with the server: %s ", ERR_error_string((long)SSL_get_error(opentsdb_ssl.conn, err), NULL));
+ opentsdb_ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ } //TODO: check certificate here
+ }
+ }
+ }
+#endif
+ chart_backend_reconnects += reconnects;
+ // chart_backend_latency += now_monotonic_usec() - start_ut;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ // ------------------------------------------------------------------------
+ // if we are connected, send our buffer to the backend server
+
+ if(likely(sock != -1)) {
+ size_t len = buffer_strlen(b);
+ // usec_t start_ut = now_monotonic_usec();
+ int flags = 0;
+ #ifdef MSG_NOSIGNAL
+ flags += MSG_NOSIGNAL;
+ #endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if(do_prometheus_remote_write) {
+ size_t data_size = backends_get_write_request_size();
+
+ if(unlikely(!data_size)) {
+ error("BACKEND: write request size is out of range");
+ continue;
+ }
+
+ buffer_flush(b);
+ buffer_need_bytes(b, data_size);
+ if(unlikely(backends_pack_write_request(b->buffer, &data_size))) {
+ error("BACKEND: cannot pack write request");
+ continue;
+ }
+ b->len = data_size;
+ chart_buffered_bytes = (collected_number)buffer_strlen(b);
+
+ buffer_flush(http_request_header);
+ buffer_sprintf(http_request_header,
+ "POST %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Accept: */*\r\n"
+ "Content-Length: %zu\r\n"
+ "Content-Type: application/x-www-form-urlencoded\r\n\r\n",
+ remote_write_path,
+ destination,
+ data_size
+ );
+
+ len = buffer_strlen(http_request_header);
+ send(sock, buffer_tostring(http_request_header), len, flags);
+
+ len = data_size;
+ }
+#endif
+
+ ssize_t written;
+#ifdef ENABLE_HTTPS
+ if(opentsdb_ssl.conn && !opentsdb_ssl.flags) {
+ written = SSL_write(opentsdb_ssl.conn, buffer_tostring(b), len);
+ } else {
+ written = send(sock, buffer_tostring(b), len, flags);
+ }
+#else
+ written = send(sock, buffer_tostring(b), len, flags);
+#endif
+
+ // chart_backend_latency += now_monotonic_usec() - start_ut;
+ if(written != -1 && (size_t)written == len) {
+ // we sent the data successfully
+ chart_transmission_successes++;
+ chart_sent_bytes += written;
+ chart_sent_metrics = chart_buffered_metrics;
+
+ // reset the failures count
+ failures = 0;
+
+ // empty the buffer
+ buffer_flush(b);
+ }
+ else {
+ // oops! we couldn't send (all or some of the) data
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written);
+ chart_transmission_failures++;
+
+ if(written != -1)
+ chart_sent_bytes += written;
+
+ // increment the counter we check for data loss
+ failures++;
+
+ // close the socket - we will re-open it next time
+ close(sock);
+ sock = -1;
+ }
+ }
+ else {
+ error("BACKEND: failed to update database backend '%s'", destination);
+ chart_transmission_failures++;
+
+ // increment the counter we check for data loss
+ failures++;
+ }
+ }
+
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if(do_prometheus_remote_write && failures) {
+ (void) buffer_on_failures;
+ failures = 0;
+ chart_lost_bytes = chart_buffered_bytes = backends_get_write_request_size(); // estimated write request size
+ chart_data_lost_events++;
+ chart_lost_metrics = chart_buffered_metrics;
+ } else
+#endif
+ if(failures > buffer_on_failures) {
+ // too bad! we are going to lose data
+ chart_lost_bytes += buffer_strlen(b);
+ error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination);
+ buffer_flush(b);
+ failures = 0;
+ chart_data_lost_events++;
+ chart_lost_metrics = chart_buffered_metrics;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ // ------------------------------------------------------------------------
+ // update the monitoring charts
+
+ if(likely(chart_ops->counter_done)) rrdset_next(chart_ops);
+ rrddim_set(chart_ops, "read", chart_receptions);
+ rrddim_set(chart_ops, "write", chart_transmission_successes);
+ rrddim_set(chart_ops, "discard", chart_data_lost_events);
+ rrddim_set(chart_ops, "failure", chart_transmission_failures);
+ rrddim_set(chart_ops, "reconnect", chart_backend_reconnects);
+ rrdset_done(chart_ops);
+
+ if(likely(chart_metrics->counter_done)) rrdset_next(chart_metrics);
+ rrddim_set(chart_metrics, "buffered", chart_buffered_metrics);
+ rrddim_set(chart_metrics, "lost", chart_lost_metrics);
+ rrddim_set(chart_metrics, "sent", chart_sent_metrics);
+ rrdset_done(chart_metrics);
+
+ if(likely(chart_bytes->counter_done)) rrdset_next(chart_bytes);
+ rrddim_set(chart_bytes, "buffered", chart_buffered_bytes);
+ rrddim_set(chart_bytes, "lost", chart_lost_bytes);
+ rrddim_set(chart_bytes, "sent", chart_sent_bytes);
+ rrddim_set(chart_bytes, "received", chart_received_bytes);
+ rrdset_done(chart_bytes);
+
+ /*
+ if(likely(chart_latency->counter_done)) rrdset_next(chart_latency);
+ rrddim_set(chart_latency, "latency", chart_backend_latency);
+ rrdset_done(chart_latency);
+ */
+
+ getrusage(RUSAGE_THREAD, &thread);
+ if(likely(chart_rusage->counter_done)) rrdset_next(chart_rusage);
+ rrddim_set(chart_rusage, "user", thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set(chart_rusage, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(chart_rusage);
+
+ if(likely(buffer_strlen(b) == 0))
+ chart_buffered_metrics = 0;
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+cleanup:
+#if HAVE_KINESIS
+ if(do_kinesis) {
+ backends_kinesis_shutdown();
+ freez(kinesis_auth_key_id);
+ freez(kinesis_secure_key);
+ freez(kinesis_stream_name);
+ }
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ buffer_free(http_request_header);
+ if(do_prometheus_remote_write)
+ backends_protocol_buffers_shutdown();
+#endif
+
+#if HAVE_MONGOC
+ if(do_mongodb) {
+ backends_mongodb_cleanup();
+ freez(mongodb_uri);
+ freez(mongodb_database);
+ freez(mongodb_collection);
+ }
+#endif
+
+ if(sock != -1)
+ close(sock);
+
+ buffer_free(b);
+ buffer_free(response);
+
+#ifdef ENABLE_HTTPS
+ if(netdata_exporting_ctx) {
+ if(opentsdb_ssl.conn) {
+ SSL_free(opentsdb_ssl.conn);
+ }
+ }
+#endif
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/backends/backends.h b/backends/backends.h
new file mode 100644
index 0000000..2f4efd9
--- /dev/null
+++ b/backends/backends.h
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKENDS_H
+#define NETDATA_BACKENDS_H 1
+
+#include "daemon/common.h"
+
+typedef enum backend_options {
+ BACKEND_OPTION_NONE = 0,
+
+ BACKEND_SOURCE_DATA_AS_COLLECTED = (1 << 0),
+ BACKEND_SOURCE_DATA_AVERAGE = (1 << 1),
+ BACKEND_SOURCE_DATA_SUM = (1 << 2),
+
+ BACKEND_OPTION_SEND_NAMES = (1 << 16)
+} BACKEND_OPTIONS;
+
+typedef enum backend_types {
+ BACKEND_TYPE_UNKNOWN, // Invalid type
+ BACKEND_TYPE_GRAPHITE, // Send plain text to Graphite
+ BACKEND_TYPE_OPENTSDB_USING_TELNET, // Send data to OpenTSDB using telnet API
+ BACKEND_TYPE_OPENTSDB_USING_HTTP, // Send data to OpenTSDB using HTTP API
+ BACKEND_TYPE_JSON, // Stores the data using JSON.
+ BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE, // The user selected to use Prometheus backend
+ BACKEND_TYPE_KINESIS, // Send message to AWS Kinesis
+ BACKEND_TYPE_MONGODB, // Send data to MongoDB collection
+ BACKEND_TYPE_NUM // Number of backend types
+} BACKEND_TYPE;
+
+typedef int (**backend_response_checker_t)(BUFFER *);
+typedef int (**backend_request_formatter_t)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS);
+
+#define BACKEND_OPTIONS_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM)
+#define BACKEND_OPTIONS_DATA_SOURCE(backend_options) (backend_options & BACKEND_OPTIONS_SOURCE_BITS)
+
+extern int global_backend_update_every;
+extern BACKEND_OPTIONS global_backend_options;
+extern const char *global_backend_source;
+extern const char *global_backend_prefix;
+
+extern void *backends_main(void *ptr);
+BACKEND_TYPE backend_select_type(const char *type);
+
+extern BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options);
+
+#ifdef BACKENDS_INTERNALS
+
+extern int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st);
+extern calculated_number backend_calculate_value_from_stored_data(
+ RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the timestamp of the first point used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
+);
+
+extern size_t backend_name_copy(char *d, const char *s, size_t usable);
+extern int discard_response(BUFFER *b, const char *backend);
+
+static inline char *strip_quotes(char *str) {
+ if(*str == '"' || *str == '\'') {
+ char *s;
+
+ str++;
+
+ s = str;
+ while(*s) s++;
+ if(s != str) s--;
+
+ if(*s == '"' || *s == '\'') *s = '\0';
+ }
+
+ return str;
+}
+
+#endif // BACKENDS_INTERNALS
+
+#include "backends/prometheus/backend_prometheus.h"
+#include "backends/graphite/graphite.h"
+#include "backends/json/json.h"
+#include "backends/opentsdb/opentsdb.h"
+
+#if HAVE_KINESIS
+#include "backends/aws_kinesis/aws_kinesis.h"
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "backends/prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_MONGOC
+#include "backends/mongodb/mongodb.h"
+#endif
+
+#endif /* NETDATA_BACKENDS_H */
diff --git a/backends/graphite/Makefile.am b/backends/graphite/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/backends/graphite/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/backends/graphite/graphite.c b/backends/graphite/graphite.c
new file mode 100644
index 0000000..f75a93a
--- /dev/null
+++ b/backends/graphite/graphite.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "graphite.h"
+
+// ----------------------------------------------------------------------------
+// graphite backend
+
+int backends_format_dimension_collected_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+ (void)after;
+ (void)before;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ b
+ , "%s.%s.%s.%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , hostname
+ , chart_name
+ , dimension_name
+ , (host->tags)?";":""
+ , (host->tags)?host->tags:""
+ , rd->last_collected_value
+ , (unsigned long long)rd->last_collected_time.tv_sec
+ );
+
+ return 1;
+}
+
+int backends_format_dimension_stored_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ if(!isnan(value)) {
+
+ buffer_sprintf(
+ b
+ , "%s.%s.%s.%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , hostname
+ , chart_name
+ , dimension_name
+ , (host->tags)?";":""
+ , (host->tags)?host->tags:""
+ , value
+ , (unsigned long long) last_t
+ );
+
+ return 1;
+ }
+ return 0;
+}
+
+int process_graphite_response(BUFFER *b) {
+ return discard_response(b, "graphite");
+}
+
+
diff --git a/backends/graphite/graphite.h b/backends/graphite/graphite.h
new file mode 100644
index 0000000..498a7fc
--- /dev/null
+++ b/backends/graphite/graphite.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+
+#ifndef NETDATA_BACKEND_GRAPHITE_H
+#define NETDATA_BACKEND_GRAPHITE_H
+
+#include "backends/backends.h"
+
+extern int backends_format_dimension_collected_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int backends_format_dimension_stored_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_graphite_response(BUFFER *b);
+
+#endif //NETDATA_BACKEND_GRAPHITE_H
diff --git a/backends/json/Makefile.am b/backends/json/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/backends/json/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/backends/json/json.c b/backends/json/json.c
new file mode 100644
index 0000000..0c7cc73
--- /dev/null
+++ b/backends/json/json.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "json.h"
+
+// ----------------------------------------------------------------------------
+// json backend
+
+int backends_format_dimension_collected_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+ (void)after;
+ (void)before;
+ (void)backend_options;
+
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if(!tags) tags = "";
+
+ if(*tags) {
+ if(*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ }
+ else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ buffer_sprintf(b, "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\": \"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\": \"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" COLLECTED_NUMBER_FORMAT ","
+
+ "\"timestamp\": %llu}\n",
+ prefix,
+ hostname,
+ tags_pre, tags, tags_post,
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ rd->last_collected_value,
+
+ (unsigned long long) rd->last_collected_time.tv_sec
+ );
+
+ return 1;
+}
+
+int backends_format_dimension_stored_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ if(!isnan(value)) {
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if(!tags) tags = "";
+
+ if(*tags) {
+ if(*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ }
+ else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ buffer_sprintf(b, "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\": \"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\": \"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" CALCULATED_NUMBER_FORMAT ","
+
+ "\"timestamp\": %llu}\n",
+ prefix,
+ hostname,
+ tags_pre, tags, tags_post,
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ value,
+
+ (unsigned long long) last_t
+ );
+
+ return 1;
+ }
+ return 0;
+}
+
+int process_json_response(BUFFER *b) {
+ return discard_response(b, "json");
+}
+
+
diff --git a/backends/json/json.h b/backends/json/json.h
new file mode 100644
index 0000000..78ac376
--- /dev/null
+++ b/backends/json/json.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_JSON_H
+#define NETDATA_BACKEND_JSON_H
+
+#include "backends/backends.h"
+
+extern int backends_format_dimension_collected_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int backends_format_dimension_stored_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_json_response(BUFFER *b);
+
+#endif //NETDATA_BACKEND_JSON_H
diff --git a/backends/mongodb/Makefile.am b/backends/mongodb/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/backends/mongodb/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/backends/mongodb/README.md b/backends/mongodb/README.md
new file mode 100644
index 0000000..7c7996e
--- /dev/null
+++ b/backends/mongodb/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "MongoDB backend"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/mongodb/README.md
+-->
+
+# MongoDB backend
+
+## Prerequisites
+
+To use MongoDB as a backend, `libmongoc` 1.7.0 or higher should be
+[installed](http://mongoc.org/libmongoc/current/installing.html) first. Next, Netdata should be re-installed from the
+source. The installer will detect that the required libraries are now available.
+
+## Configuration
+
+To enable data sending to the MongoDB backend set the following options in `netdata.conf`:
+
+```conf
+[backend]
+ enabled = yes
+ type = mongodb
+```
+
+In the Netdata configuration directory run `./edit-config mongodb.conf` and set [MongoDB
+URI](https://docs.mongodb.com/manual/reference/connection-string/), database name, and collection name:
+
+```yaml
+# URI
+uri = mongodb://<hostname>
+
+# database name
+database = your_database_name
+
+# collection name
+collection = your_collection_name
+```
+
+The default socket timeout depends on the backend update interval. The timeout is 500 ms shorter than the interval (but
+not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/mongodb/mongodb.c b/backends/mongodb/mongodb.c
new file mode 100644
index 0000000..d0527a7
--- /dev/null
+++ b/backends/mongodb/mongodb.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "mongodb.h"
+#include <mongoc.h>
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+static mongoc_client_t *mongodb_client;
+static mongoc_collection_t *mongodb_collection;
+
+int backends_mongodb_init(const char *uri_string,
+ const char *database_string,
+ const char *collection_string,
+ int32_t default_socket_timeout) {
+ mongoc_uri_t *uri;
+ bson_error_t error;
+
+ mongoc_init();
+
+ uri = mongoc_uri_new_with_error(uri_string, &error);
+ if(unlikely(!uri)) {
+ error("BACKEND: failed to parse URI: %s. Error message: %s", uri_string, error.message);
+ return 1;
+ }
+
+ int32_t socket_timeout = mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, default_socket_timeout);
+ if(!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) {
+ error("BACKEND: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout);
+ return 1;
+ };
+
+ mongodb_client = mongoc_client_new_from_uri(uri);
+ if(unlikely(!mongodb_client)) {
+ error("BACKEND: failed to create a new client");
+ return 1;
+ }
+
+ if(!mongoc_client_set_appname(mongodb_client, "netdata")) {
+ error("BACKEND: failed to set client appname");
+ };
+
+ mongodb_collection = mongoc_client_get_collection(mongodb_client, database_string, collection_string);
+
+ mongoc_uri_destroy(uri);
+
+ return 0;
+}
+
+void backends_free_bson(bson_t **insert, size_t n_documents) {
+ size_t i;
+
+ for(i = 0; i < n_documents; i++)
+ bson_destroy(insert[i]);
+
+ free(insert);
+}
+
+int backends_mongodb_insert(char *data, size_t n_metrics) {
+ bson_t **insert = calloc(n_metrics, sizeof(bson_t *));
+ bson_error_t error;
+ char *start = data, *end = data;
+ size_t n_documents = 0;
+
+ while(*end && n_documents <= n_metrics) {
+ while(*end && *end != '\n') end++;
+
+ if(likely(*end)) {
+ *end = '\0';
+ end++;
+ }
+ else {
+ break;
+ }
+
+ insert[n_documents] = bson_new_from_json((const uint8_t *)start, -1, &error);
+
+ if(unlikely(!insert[n_documents])) {
+ error("BACKEND: %s", error.message);
+ backends_free_bson(insert, n_documents);
+ return 1;
+ }
+
+ start = end;
+
+ n_documents++;
+ }
+
+ if(unlikely(!mongoc_collection_insert_many(mongodb_collection, (const bson_t **)insert, n_documents, NULL, NULL, &error))) {
+ error("BACKEND: %s", error.message);
+ backends_free_bson(insert, n_documents);
+ return 1;
+ }
+
+ backends_free_bson(insert, n_documents);
+
+ return 0;
+}
+
+void backends_mongodb_cleanup() {
+ mongoc_collection_destroy(mongodb_collection);
+ mongoc_client_destroy(mongodb_client);
+ mongoc_cleanup();
+
+ return;
+}
+
+int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p) {
+ char *uri = *uri_p;
+ char *database = *database_p;
+ char *collection = *collection_p;
+
+ if(unlikely(uri)) freez(uri);
+ if(unlikely(database)) freez(database);
+ if(unlikely(collection)) freez(collection);
+ uri = NULL;
+ database = NULL;
+ collection = NULL;
+
+ int line = 0;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/mongodb.conf", path);
+
+ char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
+
+ debug(D_BACKEND, "BACKEND: opening config file '%s'", filename);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ return 1;
+ }
+
+ while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
+ buffer[CONFIG_FILE_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if(!s || *s == '#') {
+ debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(unlikely(!value)) {
+ error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(unlikely(!name || *name == '#')) {
+ error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename);
+ continue;
+ }
+
+ if(!value)
+ value = "";
+ else
+ value = strip_quotes(value);
+
+ if(name[0] == 'u' && !strcmp(name, "uri")) {
+ uri = strdupz(value);
+ }
+ else if(name[0] == 'd' && !strcmp(name, "database")) {
+ database = strdupz(value);
+ }
+ else if(name[0] == 'c' && !strcmp(name, "collection")) {
+ collection = strdupz(value);
+ }
+ }
+
+ fclose(fp);
+
+ if(unlikely(!collection || !*collection)) {
+ error("BACKEND: collection name is a mandatory MongoDB parameter, but it is not configured");
+ return 1;
+ }
+
+ *uri_p = uri;
+ *database_p = database;
+ *collection_p = collection;
+
+ return 0;
+}
diff --git a/backends/mongodb/mongodb.conf b/backends/mongodb/mongodb.conf
new file mode 100644
index 0000000..11ea6ef
--- /dev/null
+++ b/backends/mongodb/mongodb.conf
@@ -0,0 +1,12 @@
+# MongoDB backend configuration
+#
+# All options in this file are mandatory
+
+# URI
+uri =
+
+# database name
+database =
+
+# collection name
+collection =
diff --git a/backends/mongodb/mongodb.h b/backends/mongodb/mongodb.h
new file mode 100644
index 0000000..cae9b09
--- /dev/null
+++ b/backends/mongodb/mongodb.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_MONGODB_H
+#define NETDATA_BACKEND_MONGODB_H
+
+#include "backends/backends.h"
+
+extern int backends_mongodb_init(const char *uri_string, const char *database_string, const char *collection_string, const int32_t socket_timeout);
+
+extern int backends_mongodb_insert(char *data, size_t n_metrics);
+
+extern void backends_mongodb_cleanup();
+
+extern int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p);
+
+#endif //NETDATA_BACKEND_MONGODB_H
diff --git a/backends/nc-backend.sh b/backends/nc-backend.sh
new file mode 100755
index 0000000..7280f86
--- /dev/null
+++ b/backends/nc-backend.sh
@@ -0,0 +1,158 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This is a simple backend database proxy, written in BASH, using the nc command.
+# Run the script without any parameters for help.
+
+MODE="${1}"
+MY_PORT="${2}"
+BACKEND_HOST="${3}"
+BACKEND_PORT="${4}"
+FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
+
+log() {
+ logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
+}
+
+mync() {
+ local ret
+
+ log "Running: nc ${*}"
+ nc "${@}"
+ ret=$?
+
+ log "nc stopped with return code ${ret}."
+
+ return ${ret}
+}
+
+listen_save_replay_forever() {
+ local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
+
+ while true
+ do
+ log "Starting nc to listen on port ${port} and save metrics to ${file}"
+
+ started=$(date +%s)
+ mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
+ ended=$(date +%s)
+
+ if [ -s "${file}" ]
+ then
+ if [ ! -z "${real_backend_host}" ] && [ ! -z "${real_backend_port}" ]
+ then
+ log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
+
+ mync "${real_backend_host}" "${real_backend_port}" <"${file}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
+ mv "${file}" "${file}.old"
+ touch "${file}"
+ else
+ log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
+ fi
+ else
+ log "No backend configured - appending more data to ${file}"
+ fi
+ fi
+
+ # prevent a CPU hungry infinite loop
+ # if nc cannot listen to port
+ if [ $((ended - started)) -lt 5 ]
+ then
+ log "nc has been stopped too fast."
+ delay=30
+ else
+ delay=1
+ fi
+
+ log "Waiting ${delay} seconds before listening again for data."
+ sleep ${delay}
+ done
+}
+
+if [ "${MODE}" = "start" ]
+ then
+
+ # start the listener, in exclusive mode
+ # only one can use the same file/port at a time
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
+ exit 2
+ fi
+
+ # save our PID to the lock file
+ echo "$$" >"${FILE}.lock"
+
+ listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
+ ret=$?
+
+ log "listener exited."
+ exit ${ret}
+
+ } 9>>"${FILE}.lock"
+
+ # we can only get here if ${FILE}.lock cannot be created
+ log "Cannot create file ${FILE}."
+ exit 3
+
+elif [ "${MODE}" = "stop" ]
+ then
+
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ pid=$(<"${FILE}".lock)
+ log "Killing process ${pid}..."
+ kill -TERM "-${pid}"
+ exit 0
+ fi
+
+ log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
+ exit 4
+
+ } 9<"${FILE}.lock"
+
+ log "File ${FILE}.lock does not exist. Is a collector running?"
+ exit 5
+
+else
+
+ cat <<EOF
+Usage:
+
+ "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
+
+ PORT The port this script will listen
+ (configure netdata to use this as a second backend)
+
+ BACKEND_HOST The real backend host
+ BACKEND_PORT The real backend port
+
+ This script can act as fallback backend for netdata.
+ It will receive metrics from netdata, save them to
+ ${FILE}
+ and once netdata reconnects to the real-backend, this script
+ will push all metrics collected to the real-backend too and
+ wait for a failure to happen again.
+
+ Only one netdata can connect to this script at a time.
+ If you need fallback for multiple netdata, run this script
+ multiple times with different ports.
+
+ You can run me in the background with this:
+
+ screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
+EOF
+ exit 1
+fi
diff --git a/backends/opentsdb/Makefile.am b/backends/opentsdb/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/backends/opentsdb/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/backends/opentsdb/README.md b/backends/opentsdb/README.md
new file mode 100644
index 0000000..5ba7b12
--- /dev/null
+++ b/backends/opentsdb/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "OpenTSDB with HTTP"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/opentsdb/README.md
+-->
+
+# OpenTSDB with HTTP
+
+Netdata can easily communicate with OpenTSDB using HTTP API. To enable this channel, set the following options in your
+`netdata.conf`:
+
+```conf
+[backend]
+ type = opentsdb:http
+ destination = localhost:4242
+```
+
+In this example, OpenTSDB is running with its default port, which is `4242`. If you run OpenTSDB on a different port,
+change the `destination = localhost:4242` line accordingly.
+
+## HTTPS
+
+As of [v1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0), Netdata can send metrics to OpenTSDB using
+TLS/SSL. Unfortunately, OpenTDSB does not support encrypted connections, so you will have to configure a reverse proxy
+to enable HTTPS communication between Netdata and OpenTSDB. You can set up a reverse proxy with
+[Nginx](/docs/Running-behind-nginx.md).
+
+After your proxy is configured, make the following changes to `netdata.conf`:
+
+```conf
+[backend]
+ type = opentsdb:https
+ destination = localhost:8082
+```
+
+In this example, we used the port `8082` for our reverse proxy. If your reverse proxy listens on a different port,
+change the `destination = localhost:8082` line accordingly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fopentsdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/backends/opentsdb/opentsdb.c b/backends/opentsdb/opentsdb.c
new file mode 100644
index 0000000..965b4c0
--- /dev/null
+++ b/backends/opentsdb/opentsdb.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "opentsdb.h"
+
+// ----------------------------------------------------------------------------
+// opentsdb backend
+
+int backends_format_dimension_collected_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+ (void)after;
+ (void)before;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ b
+ , "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s\n"
+ , prefix
+ , chart_name
+ , dimension_name
+ , (unsigned long long)rd->last_collected_time.tv_sec
+ , rd->last_collected_value
+ , hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
+ );
+
+ return 1;
+}
+
+int backends_format_dimension_stored_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ if(!isnan(value)) {
+
+ buffer_sprintf(
+ b
+ , "put %s.%s.%s %llu " CALCULATED_NUMBER_FORMAT " host=%s%s%s\n"
+ , prefix
+ , chart_name
+ , dimension_name
+ , (unsigned long long) last_t
+ , value
+ , hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
+ );
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int process_opentsdb_response(BUFFER *b) {
+ return discard_response(b, "opentsdb");
+}
+
+static inline void opentsdb_build_message(BUFFER *b, char *message, const char *hostname, int length) {
+ buffer_sprintf(
+ b
+ , "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %d\r\n"
+ "\r\n"
+ "%s"
+ , hostname
+ , length
+ , message
+ );
+}
+
+int backends_format_dimension_collected_opentsdb_http(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+ (void)after;
+ (void)before;
+
+ char message[1024];
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ int length = snprintfz(message
+ , sizeof(message)
+ , "{"
+ " \"metric\": \"%s.%s.%s\","
+ " \"timestamp\": %llu,"
+ " \"value\": "COLLECTED_NUMBER_FORMAT ","
+ " \"tags\": {"
+ " \"host\": \"%s%s%s\""
+ " }"
+ "}"
+ , prefix
+ , chart_name
+ , dimension_name
+ , (unsigned long long)rd->last_collected_time.tv_sec
+ , rd->last_collected_value
+ , hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
+ );
+
+ if(length > 0) {
+ opentsdb_build_message(b, message, hostname, length);
+ }
+
+ return 1;
+}
+
+int backends_format_dimension_stored_opentsdb_http(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+) {
+ (void)host;
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ if(!isnan(value)) {
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ char message[1024];
+ int length = snprintfz(message
+ , sizeof(message)
+ , "{"
+ " \"metric\": \"%s.%s.%s\","
+ " \"timestamp\": %llu,"
+ " \"value\": "CALCULATED_NUMBER_FORMAT ","
+ " \"tags\": {"
+ " \"host\": \"%s%s%s\""
+ " }"
+ "}"
+ , prefix
+ , chart_name
+ , dimension_name
+ , (unsigned long long)last_t
+ , value
+ , hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
+ );
+
+ if(length > 0) {
+ opentsdb_build_message(b, message, hostname, length);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/backends/opentsdb/opentsdb.h b/backends/opentsdb/opentsdb.h
new file mode 100644
index 0000000..87d9c5c
--- /dev/null
+++ b/backends/opentsdb/opentsdb.h
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_OPENTSDB_H
+#define NETDATA_BACKEND_OPENTSDB_H
+
+#include "backends/backends.h"
+
+extern int backends_format_dimension_collected_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int backends_format_dimension_stored_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_opentsdb_response(BUFFER *b);
+
+int backends_format_dimension_collected_opentsdb_http(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+int backends_format_dimension_stored_opentsdb_http(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+#endif //NETDATA_BACKEND_OPENTSDB_H
diff --git a/backends/prometheus/Makefile.am b/backends/prometheus/Makefile.am
new file mode 100644
index 0000000..334fca8
--- /dev/null
+++ b/backends/prometheus/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ remote_write \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md
new file mode 100644
index 0000000..10275fa
--- /dev/null
+++ b/backends/prometheus/README.md
@@ -0,0 +1,457 @@
+<!--
+title: "Using Netdata with Prometheus"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/prometheus/README.md
+-->
+
+# Using Netdata with Prometheus
+
+> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.7. The new prometheus backend
+> for Netdata supports a lot more features and is aligned to the development of the rest of the Netdata backends.
+
+Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently
+Netdata added support for Prometheus. I'm going to quickly show you how to install both Netdata and prometheus on the
+same server. We can then use grafana pointed at Prometheus to obtain long term metrics Netdata offers. I'm assuming we
+are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+
+## Installing Netdata and prometheus
+
+### Installing Netdata
+
+There are number of ways to install Netdata according to [Installation](/packaging/installer/README.md). The suggested way
+of installing the latest Netdata and keep it upgrade automatically. Using one line installation:
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+At this point we should have Netdata listening on port 19999. Attempt to take your browser here:
+
+```sh
+http://your.netdata.ip:19999
+```
+
+_(replace `your.netdata.ip` with the IP or hostname of the server running Netdata)_
+
+### Installing Prometheus
+
+In order to install prometheus we are going to introduce our own systemd startup script along with an example of
+prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape
+Netdata's api. Prometheus is always a pull model meaning Netdata is the passive client within this architecture.
+Prometheus always initiates the connection with Netdata.
+
+#### Download Prometheus
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+```
+
+#### Create prometheus system user
+
+```sh
+sudo useradd -r prometheus
+```
+
+#### Create prometheus directory
+
+```sh
+sudo mkdir /opt/prometheus
+sudo chown prometheus:prometheus /opt/prometheus
+```
+
+#### Untar prometheus directory
+
+```sh
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+#### Install prometheus.yml
+
+We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
+
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
+
+```yaml
+# my global config
+global:
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first.rules"
+ # - "second.rules"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['0.0.0.0:9090']
+
+ - job_name: 'netdata-scrape'
+
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ # format: prometheus | prometheus_all_hosts
+ # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
+ format: [prometheus]
+ #
+ # sources: as-collected | raw | average | sum | volume
+ # default is: average
+ #source: [as-collected]
+ #
+ # server name for this prometheus - the default is the client IP
+ # for Netdata to uniquely identify it
+ #server: ['prometheus1']
+ honor_labels: true
+
+ static_configs:
+ - targets: ['{your.netdata.ip}:19999']
+```
+
+#### Install nodes.yml
+
+The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the
+values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and
+add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
+
+```yaml
+groups:
+- name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+```
+
+#### Install prometheus.service
+
+Save this service file as `/etc/systemd/system/prometheus.service`:
+
+```sh
+[Unit]
+Description=Prometheus Server
+AssertPathExists=/opt/prometheus
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/prometheus
+User=prometheus
+Group=prometheus
+ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info
+ExecReload=/bin/kill -SIGHUP $MAINPID
+ExecStop=/bin/kill -SIGINT $MAINPID
+
+[Install]
+WantedBy=multi-user.target
+```
+
+##### Start Prometheus
+
+```sh
+sudo systemctl start prometheus
+sudo systemctl enable prometheus
+```
+
+Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
+
+If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click
+this and click on 'targets' We should see the Netdata host as a scraped target.
+
+---
+
+## Netdata support for prometheus
+
+> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.6. The new format allows easier
+> queries for metrics and supports both `as collected` and normalized metrics.
+
+Before explaining the changes, we have to understand the key differences between Netdata and prometheus.
+
+### understanding Netdata metrics
+
+#### charts
+
+Each chart in Netdata has several properties (common to all its metrics):
+
+- `chart_id` - uniquely identifies a chart.
+
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
+
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
+ have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+
+- `units` is the units for all the metrics attached to the chart.
+
+#### dimensions
+
+Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
+measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
+they are both in the same chart).
+
+### Netdata data source
+
+Netdata can send metrics to prometheus from 3 data sources:
+
+- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is
+ done by Netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by
+ prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
+ to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
+
+ Unlike prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
+ (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
+ format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+
+- `average` - this data source uses the Netdata database to send the metrics to prometheus as they are presented on
+ the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
+ dashboard charts. This is the easiest to work with.
+
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ When this source is used, Netdata keeps track of the last access time for each prometheus server fetching the
+ metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the
+ time-frame the `average` will be calculated.
+
+ So, no matter how frequently prometheus scrapes Netdata, it will get all the database data.
+ To identify each prometheus server, Netdata uses by default the IP of the client fetching the metrics.
+
+ If there are multiple prometheus servers fetching data from the same Netdata, using the same IP, each prometheus
+ server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server.
+
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
+ other operations are the same with `average`.
+
+ To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
+ e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
+
+ Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
+
+### Querying Metrics
+
+Fetch with your web browser this URL:
+
+`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
+
+_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
+
+Netdata will respond with all the metrics it sends to prometheus.
+
+If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to prometheus for this
+chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
+such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+
+Searching for `"system.cpu"` reveals:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
+```
+
+_(Netdata response for `system.cpu` with source=`average`)_
+
+In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the
+'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
+that the text form begins to auto-fill as prometheus knows about this metric.
+
+If the data source was `as collected`, the response would be:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
+```
+
+_(Netdata response for `system.cpu` with source=`as-collected`)_
+
+For more information check prometheus documentation.
+
+### Streaming data from upstream hosts
+
+The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
+functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
+**prometheus.yml**:
+
+```yaml
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ format: [prometheus_all_hosts]
+ honor_labels: true
+```
+
+This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
+provided.
+
+### Timestamps
+
+To pass the metrics through prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics
+without timestamps.
+
+## Netdata host variables
+
+Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
+files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default.
+
+To expose them, append `variables=yes` to the Netdata URL.
+
+### TYPE and HELP
+
+To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
+wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
+`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
+
+Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against the Prometheus documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
+
+### Names and IDs
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
+names are human friendly labels (also unique).
+
+Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
+interrupts, QoS classes, statsd synthetic charts, etc.
+
+The default is controlled in `netdata.conf`:
+
+```conf
+[backend]
+ send names instead of ids = yes | no
+```
+
+You can overwrite it from prometheus, by appending to the URL:
+
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
+
+### Filtering metrics sent to prometheus
+
+Netdata can filter the metrics it sends to prometheus with this setting:
+
+```conf
+[backend]
+ send charts matching = *
+```
+
+This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern
+can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with `!` give a negative match
+(e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is
+important: the first match (positive or negative) left to right, is used.
+
+### Changing the prefix of Netdata metrics
+
+Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+
+```conf
+[backend]
+ prefix = netdata
+```
+
+It can also be changed from the URL, by appending `&prefix=netdata`.
+
+### Metric Units
+
+The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
+units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
+
+The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
+before v1.12, append to the URL `&oldunits=yes`
+
+### Accuracy of `average` and `sum` data sources
+
+When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing prometheus
+metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
+that. This means that prometheus servers are not losing data when they access Netdata with data source = `average` or
+`sum`.
+
+To uniquely identify each prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
+is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing Netdata
+through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append
+`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each prometheus server and keep track of
+its last access time.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c
new file mode 100644
index 0000000..a3ecf16
--- /dev/null
+++ b/backends/prometheus/backend_prometheus.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "backend_prometheus.h"
+
+// ----------------------------------------------------------------------------
+// PROMETHEUS
+// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
+
+static struct prometheus_server {
+ const char *server;
+ uint32_t hash;
+ RRDHOST *host;
+ time_t last_access;
+ struct prometheus_server *next;
+} *prometheus_server_root = NULL;
+
+static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now) {
+ static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
+
+ uint32_t hash = simple_hash(server);
+
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for(ps = prometheus_server_root; ps ;ps = ps->next) {
+ if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) {
+ time_t last = ps->last_access;
+ ps->last_access = now;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return last;
+ }
+ }
+
+ ps = callocz(1, sizeof(struct prometheus_server));
+ ps->server = strdupz(server);
+ ps->hash = hash;
+ ps->host = host;
+ ps->last_access = now;
+ ps->next = prometheus_server_root;
+ prometheus_server_root = ps;
+
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return 0;
+}
+
+static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(!isalnum(c)) *d = '_';
+ else *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+static inline size_t backends_prometheus_label_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ // make sure we can escape one character without overflowing the buffer
+ usable--;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(unlikely(c == '"' || c == '\\' || c == '\n')) {
+ *d++ = '\\';
+ n++;
+ }
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+static inline char *backends_prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
+ const char *sorig = s;
+ char *ret = d;
+ size_t n;
+
+ // Fix for issue 5227
+ if (unlikely(showoldunits)) {
+ static struct {
+ const char *newunit;
+ uint32_t hash;
+ const char *oldunit;
+ } units[] = {
+ {"KiB/s", 0, "kilobytes/s"}
+ , {"MiB/s", 0, "MB/s"}
+ , {"GiB/s", 0, "GB/s"}
+ , {"KiB" , 0, "KB"}
+ , {"MiB" , 0, "MB"}
+ , {"GiB" , 0, "GB"}
+ , {"inodes" , 0, "Inodes"}
+ , {"percentage" , 0, "percent"}
+ , {"faults/s" , 0, "page faults/s"}
+ , {"KiB/operation", 0, "kilobytes per operation"}
+ , {"milliseconds/operation", 0, "ms per operation"}
+ , {NULL, 0, NULL}
+ };
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(!initialized)) {
+ for (i = 0; units[i].newunit; i++)
+ units[i].hash = simple_hash(units[i].newunit);
+ initialized = 1;
+ }
+
+ uint32_t hash = simple_hash(s);
+ for(i = 0; units[i].newunit ; i++) {
+ if(unlikely(hash == units[i].hash && !strcmp(s, units[i].newunit))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ s=units[i].oldunit;
+ sorig = s;
+ break;
+ }
+ }
+ }
+ *d++ = '_';
+ for(n = 1; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(!isalnum(c)) *d = '_';
+ else *d = c;
+ }
+
+ if(n == 2 && sorig[0] == '%') {
+ n = 0;
+ d = ret;
+ s = "_percent";
+ for( ; *s && n < usable ; n++) *d++ = *s++;
+ }
+ else if(n > 3 && sorig[n-3] == '/' && sorig[n-2] == 's') {
+ n = n - 2;
+ d -= 2;
+ s = "_persec";
+ for( ; *s && n < usable ; n++) *d++ = *s++;
+ }
+
+ *d = '\0';
+
+ return ret;
+}
+
+
+#define PROMETHEUS_ELEMENT_MAX 256
+#define PROMETHEUS_LABELS_MAX 1024
+#define PROMETHEUS_VARIABLE_MAX 256
+
+#define PROMETHEUS_LABELS_MAX_NUMBER 128
+
+struct host_variables_callback_options {
+ RRDHOST *host;
+ BUFFER *wb;
+ BACKEND_OPTIONS backend_options;
+ BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options;
+ const char *prefix;
+ const char *labels;
+ time_t now;
+ int host_header_printed;
+ char name[PROMETHEUS_VARIABLE_MAX+1];
+};
+
+static int print_host_variables(RRDVAR *rv, void *data) {
+ struct host_variables_callback_options *opts = data;
+
+ if(rv->options & (RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_CUSTOM_CHART_VAR)) {
+ if(!opts->host_header_printed) {
+ opts->host_header_printed = 1;
+
+ if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP) {
+ buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
+ }
+ }
+
+ calculated_number value = rrdvar2number(rv);
+ if(isnan(value) || isinf(value)) {
+ if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP)
+ buffer_sprintf(opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rv->name, (isnan(value))?"NAN":"INF");
+
+ return 0;
+ }
+
+ char *label_pre = "";
+ char *label_post = "";
+ if(opts->labels && *opts->labels) {
+ label_pre = "{";
+ label_post = "}";
+ }
+
+ backends_prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
+
+ if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(opts->wb
+ , "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n"
+ , opts->prefix
+ , opts->name
+ , label_pre
+ , opts->labels
+ , label_post
+ , value
+ , opts->now * 1000ULL
+ );
+ else
+ buffer_sprintf(opts->wb, "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT "\n"
+ , opts->prefix
+ , opts->name
+ , label_pre
+ , opts->labels
+ , label_post
+ , value
+ );
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb, const char *prefix, BACKEND_OPTIONS backend_options, time_t after, time_t before, int allhosts, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
+ rrdhost_rdlock(host);
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ backends_prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+
+ char labels[PROMETHEUS_LABELS_MAX + 1] = "";
+ if(allhosts) {
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version);
+
+ if(host->tags && *(host->tags)) {
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS);
+ }
+ else {
+ buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags);
+ }
+
+ }
+
+ snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
+ }
+ else {
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(wb, "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n", hostname, host->program_name, host->program_version);
+
+ if(host->tags && *(host->tags)) {
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
+ }
+ else {
+ buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags);
+ }
+ }
+ }
+
+ // send custom variables set for the host
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_VARIABLES){
+ struct host_variables_callback_options opts = {
+ .host = host,
+ .wb = wb,
+ .labels = (labels[0] == ',')?&labels[1]:labels,
+ .backend_options = backend_options,
+ .output_options = output_options,
+ .prefix = prefix,
+ .now = now_realtime_sec(),
+ .host_header_printed = 0
+ };
+ foreach_host_variable_callback(host, print_host_variables, &opts);
+ }
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ backends_prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+ backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ if(likely(backends_can_send_rrdset(backend_options, st))) {
+ rrdset_rdlock(st);
+
+ int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
+ int homogeneous = 1;
+ if(as_collected) {
+ if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+ }
+ else {
+ if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE && !(output_options & BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS))
+ backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
+ }
+
+ if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(wb, "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n"
+ , (homogeneous)?"homogeneous":"heterogeneous"
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , st->units
+ );
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if(unlikely(rd->last_collected_time.tv_sec < after))
+ continue;
+
+ const char *t = "gauge", *h = "gives";
+ if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
+ rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ t = "counter";
+ h = "delta gives";
+ suffix = "_total";
+ }
+
+ if(homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(wb
+ , "# COMMENT %s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
+ , prefix
+ , context
+ , suffix
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
+ , rd->multiplier
+ , rd->divisor
+ , h
+ , st->units
+ , t
+ );
+
+ if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s %s\n"
+ , prefix
+ , context
+ , suffix
+ , t
+ );
+
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb
+ , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , rd->last_collected_value
+ , timeval_msec(&rd->last_collected_time)
+ );
+ else
+ buffer_sprintf(wb
+ , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n"
+ , prefix
+ , context
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , rd->last_collected_value
+ );
+ }
+ else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ backends_prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(wb
+ , "# COMMENT %s_%s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
+ , rd->multiplier
+ , rd->divisor
+ , h
+ , st->units
+ , t
+ );
+
+ if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s_%s%s %s\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , t
+ );
+
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb
+ , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , chart
+ , family
+ , labels
+ , rd->last_collected_value
+ , timeval_msec(&rd->last_collected_time)
+ );
+ else
+ buffer_sprintf(wb
+ , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT "\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , chart
+ , family
+ , labels
+ , rd->last_collected_value
+ );
+ }
+ }
+ else {
+ // we need average or sum of the data
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ if(!isnan(value) && !isinf(value)) {
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ , (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id
+ , st->units
+ , (unsigned long long)first_t
+ , (unsigned long long)last_t
+ );
+
+ if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s%s gauge\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ );
+
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , value
+ , last_t * MSEC_PER_SEC
+ );
+ else
+ buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT "\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , value
+ );
+ }
+ }
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+
+ rrdhost_unlock(host);
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+inline static void remote_write_split_words(char *str, char **words, int max_words) {
+ char *s = str;
+ int i = 0;
+
+ while(*s && i < max_words - 1) {
+ while(*s && isspace(*s)) s++; // skip spaces to the begining of a tag name
+
+ if(*s)
+ words[i] = s;
+
+ while(*s && !isspace(*s) && *s != '=') s++; // find the end of the tag name
+
+ if(*s != '=') {
+ words[i] = NULL;
+ break;
+ }
+ *s = '\0';
+ s++;
+ i++;
+
+ while(*s && isspace(*s)) s++; // skip spaces to the begining of a tag value
+
+ if(*s && *s == '"') s++; // strip an opening quote
+ if(*s)
+ words[i] = s;
+
+ while(*s && !isspace(*s) && *s != ',') s++; // find the end of the tag value
+
+ if(*s && *s != ',') {
+ words[i] = NULL;
+ break;
+ }
+ if(s != words[i] && *(s - 1) == '"') *(s - 1) = '\0'; // strip a closing quote
+ if(*s != '\0') {
+ *s = '\0';
+ s++;
+ i++;
+ }
+ }
+}
+
+void backends_rrd_stats_remote_write_allmetrics_prometheus(
+ RRDHOST *host
+ , const char *__hostname
+ , const char *prefix
+ , BACKEND_OPTIONS backend_options
+ , time_t after
+ , time_t before
+ , size_t *count_charts
+ , size_t *count_dims
+ , size_t *count_dims_skipped
+) {
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ backends_prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
+
+ backends_add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
+
+ if(host->tags && *(host->tags)) {
+ char tags[PROMETHEUS_LABELS_MAX + 1];
+ strncpy(tags, host->tags, PROMETHEUS_LABELS_MAX);
+ char *words[PROMETHEUS_LABELS_MAX_NUMBER] = {NULL};
+ int i;
+
+ remote_write_split_words(tags, words, PROMETHEUS_LABELS_MAX_NUMBER);
+
+ backends_add_host_info("netdata_host_tags_info", hostname, NULL, NULL, now_realtime_usec() / USEC_PER_MS);
+
+ for(i = 0; words[i] != NULL && words[i + 1] != NULL && (i + 1) < PROMETHEUS_LABELS_MAX_NUMBER; i += 2) {
+ backends_add_tag(words[i], words[i + 1]);
+ }
+ }
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ backends_prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+ backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ if(likely(backends_can_send_rrdset(backend_options, st))) {
+ rrdset_rdlock(st);
+
+ (*count_charts)++;
+
+ int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
+ int homogeneous = 1;
+ if(as_collected) {
+ if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+ }
+ else {
+ if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
+ backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
+ }
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if(unlikely(rd->last_collected_time.tv_sec < after)) {
+ debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
+ (*count_dims_skipped)++;
+ continue;
+ }
+
+ if(homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
+
+ backends_add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ (*count_dims)++;
+ }
+ else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ backends_prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
+
+ backends_add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ (*count_dims)++;
+ }
+ }
+ else {
+ // we need average or sum of the data
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
+
+ if(!isnan(value) && !isinf(value)) {
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
+
+ backends_add_metric(name, chart, family, dimension, hostname, value, last_t * MSEC_PER_SEC);
+ (*count_dims)++;
+ }
+ }
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+}
+#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
+
+static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_OPTIONS backend_options, const char *server, time_t now, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
+ if(!server || !*server) server = "default";
+
+ time_t after = prometheus_server_last_access(server, host, now);
+
+ int first_seen = 0;
+ if(!after) {
+ after = now - global_backend_update_every;
+ first_seen = 1;
+ }
+
+ if(after > now) {
+ // oops! this should never happen
+ after = now - global_backend_update_every;
+ }
+
+ if(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP) {
+ char *mode;
+ if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ mode = "as collected";
+ else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
+ mode = "average";
+ else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
+ mode = "sum";
+ else
+ mode = "unknown";
+
+ buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s, time range %lu to %lu\n\n"
+ , host->hostname
+ , (first_seen)?"FIRST SEEN ":""
+ , server
+ , mode
+ , (unsigned long)((first_seen)?0:(now - after))
+ , (first_seen)?"never":"seconds ago"
+ , (unsigned long)after, (unsigned long)now
+ );
+ }
+
+ return after;
+}
+
+void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
+ time_t before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options);
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 0, output_options);
+}
+
+void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options) {
+ time_t before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ time_t after = prometheus_preparation(host, wb, backend_options, server, before, output_options);
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, backend_options, after, before, 1, output_options);
+ }
+ rrd_unlock();
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+int backends_process_prometheus_remote_write_response(BUFFER *b) {
+ if(unlikely(!b)) return 1;
+
+ const char *s = buffer_tostring(b);
+ int len = buffer_strlen(b);
+
+ // do nothing with HTTP responses 200 or 204
+
+ while(!isspace(*s) && len) {
+ s++;
+ len--;
+ }
+ s++;
+ len--;
+
+ if(likely(len > 4 && (!strncmp(s, "200 ", 4) || !strncmp(s, "204 ", 4))))
+ return 0;
+ else
+ return discard_response(b, "prometheus remote write");
+}
+#endif
diff --git a/backends/prometheus/backend_prometheus.h b/backends/prometheus/backend_prometheus.h
new file mode 100644
index 0000000..8c14ddc
--- /dev/null
+++ b/backends/prometheus/backend_prometheus.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_PROMETHEUS_H
+#define NETDATA_BACKEND_PROMETHEUS_H 1
+
+#include "backends/backends.h"
+
+typedef enum backends_prometheus_output_flags {
+ BACKENDS_PROMETHEUS_OUTPUT_NONE = 0,
+ BACKENDS_PROMETHEUS_OUTPUT_HELP = (1 << 0),
+ BACKENDS_PROMETHEUS_OUTPUT_TYPES = (1 << 1),
+ BACKENDS_PROMETHEUS_OUTPUT_NAMES = (1 << 2),
+ BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
+ BACKENDS_PROMETHEUS_OUTPUT_VARIABLES = (1 << 4),
+ BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
+ BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
+} BACKENDS_PROMETHEUS_OUTPUT_OPTIONS;
+
+extern void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options);
+extern void backends_rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, BACKENDS_PROMETHEUS_OUTPUT_OPTIONS output_options);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+extern void backends_rrd_stats_remote_write_allmetrics_prometheus(
+ RRDHOST *host
+ , const char *__hostname
+ , const char *prefix
+ , BACKEND_OPTIONS backend_options
+ , time_t after
+ , time_t before
+ , size_t *count_charts
+ , size_t *count_dims
+ , size_t *count_dims_skipped
+);
+extern int backends_process_prometheus_remote_write_response(BUFFER *b);
+#endif
+
+#endif //NETDATA_BACKEND_PROMETHEUS_H
diff --git a/backends/prometheus/remote_write/Makefile.am b/backends/prometheus/remote_write/Makefile.am
new file mode 100644
index 0000000..d049ef4
--- /dev/null
+++ b/backends/prometheus/remote_write/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ remote_write.pb.cc \
+ remote_write.pb.h \
+ $(NULL)
+
+dist_noinst_DATA = \
+ remote_write.proto \
+ README.md \
+ $(NULL)
diff --git a/backends/prometheus/remote_write/README.md b/backends/prometheus/remote_write/README.md
new file mode 100644
index 0000000..b83575e
--- /dev/null
+++ b/backends/prometheus/remote_write/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Prometheus remote write backend"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/prometheus/remote_write/README.md
+-->
+
+# Prometheus remote write backend
+
+## Prerequisites
+
+To use the prometheus remote write API with [storage
+providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)
+[protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries
+should be installed first. Next, Netdata should be re-installed from the source. The installer will detect that the
+required libraries and utilities are now available.
+
+## Configuration
+
+An additional option in the backend configuration section is available for the remote write backend:
+
+```conf
+[backend]
+ remote write URL path = /receive
+```
+
+The default value is `/receive`. `remote write URL path` is used to set an endpoint path for the remote write protocol.
+For example, if your endpoint is `http://example.domain:example_port/storage/read` you should set
+
+```conf
+[backend]
+ destination = example.domain:example_port
+ remote write URL path = /storage/read
+```
+
+`buffered` and `lost` dimensions in the Netdata Backend Data Size operation monitoring chart estimate uncompressed
+buffer size on failures.
+
+## Notes
+
+The remote write backend does not support `buffer on failures`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/prometheus/remote_write/remote_write.cc b/backends/prometheus/remote_write/remote_write.cc
new file mode 100644
index 0000000..9448595
--- /dev/null
+++ b/backends/prometheus/remote_write/remote_write.cc
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <snappy.h>
+#include "remote_write.pb.h"
+#include "remote_write.h"
+
+using namespace prometheus;
+
+static google::protobuf::Arena arena;
+static WriteRequest *write_request;
+
+void backends_init_write_request() {
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
+}
+
+void backends_clear_write_request() {
+ write_request->clear_timeseries();
+}
+
+void backends_add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp) {
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ if(application) {
+ label = timeseries->add_labels();
+ label->set_name("application");
+ label->set_value(application);
+ }
+
+ if(version) {
+ label = timeseries->add_labels();
+ label->set_name("version");
+ label->set_value(version);
+ }
+
+ sample = timeseries->add_samples();
+ sample->set_value(1);
+ sample->set_timestamp(timestamp);
+}
+
+// adds tag to the last created timeseries
+void backends_add_tag(char *tag, char *value) {
+ TimeSeries *timeseries;
+ Label *label;
+
+ timeseries = write_request->mutable_timeseries(write_request->timeseries_size() - 1);
+
+ label = timeseries->add_labels();
+ label->set_name(tag);
+ label->set_value(value);
+}
+
+void backends_add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp) {
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("chart");
+ label->set_value(chart);
+
+ label = timeseries->add_labels();
+ label->set_name("family");
+ label->set_value(family);
+
+ if(dimension) {
+ label = timeseries->add_labels();
+ label->set_name("dimension");
+ label->set_value(dimension);
+ }
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ sample = timeseries->add_samples();
+ sample->set_value(value);
+ sample->set_timestamp(timestamp);
+}
+
+size_t backends_get_write_request_size(){
+#if GOOGLE_PROTOBUF_VERSION < 3001000
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSize());
+#else
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSizeLong());
+#endif
+
+ return (size < INT_MAX)?size:0;
+}
+
+int backends_pack_write_request(char *buffer, size_t *size) {
+ std::string uncompressed_write_request;
+ if(write_request->SerializeToString(&uncompressed_write_request) == false) return 1;
+
+ snappy::RawCompress(uncompressed_write_request.data(), uncompressed_write_request.size(), buffer, size);
+
+ return 0;
+}
+
+void backends_protocol_buffers_shutdown() {
+ google::protobuf::ShutdownProtobufLibrary();
+}
diff --git a/backends/prometheus/remote_write/remote_write.h b/backends/prometheus/remote_write/remote_write.h
new file mode 100644
index 0000000..1307d72
--- /dev/null
+++ b/backends/prometheus/remote_write/remote_write.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
+#define NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void backends_init_write_request();
+
+void backends_clear_write_request();
+
+void backends_add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+
+void backends_add_tag(char *tag, char *value);
+
+void backends_add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp);
+
+size_t backends_get_write_request_size();
+
+int backends_pack_write_request(char *buffer, size_t *size);
+
+void backends_protocol_buffers_shutdown();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
diff --git a/backends/prometheus/remote_write/remote_write.proto b/backends/prometheus/remote_write/remote_write.proto
new file mode 100644
index 0000000..dfde254
--- /dev/null
+++ b/backends/prometheus/remote_write/remote_write.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+package prometheus;
+
+option cc_enable_arenas = true;
+
+import "google/protobuf/descriptor.proto";
+
+message WriteRequest {
+ repeated TimeSeries timeseries = 1 [(nullable) = false];
+}
+
+message TimeSeries {
+ repeated Label labels = 1 [(nullable) = false];
+ repeated Sample samples = 2 [(nullable) = false];
+}
+
+message Label {
+ string name = 1;
+ string value = 2;
+}
+
+message Sample {
+ double value = 1;
+ int64 timestamp = 2;
+}
+
+extend google.protobuf.FieldOptions {
+ bool nullable = 65001;
+}
diff --git a/build-artifacts.sh b/build-artifacts.sh
new file mode 100755
index 0000000..a759efd
--- /dev/null
+++ b/build-artifacts.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+BASENAME="netdata-$(git describe)"
+
+mkdir -p artifacts
+
+autoreconf -ivf
+./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --localstatedir=/var \
+ --libexecdir=/usr/libexec \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata \
+ CFLAGS=-O2
+make dist
+mv "${BASENAME}.tar.gz" artifacts/
+
+USER="" ./packaging/makeself/build-x86_64-static.sh
+
+cp packaging/version artifacts/latest-version.txt
+
+cd artifacts || exit 1
+ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz
+ln -s "${BASENAME}.gz.run" netdata-latest.gz.run
+sha256sum -b ./* > "sha256sums.txt"
diff --git a/build/m4/ax_c___atomic.m4 b/build/m4/ax_c___atomic.m4
new file mode 100644
index 0000000..dd5ee3d
--- /dev/null
+++ b/build/m4/ax_c___atomic.m4
@@ -0,0 +1,36 @@
+# AC_C___ATOMIC
+# -------------
+# Define HAVE_C___ATOMIC if __atomic works.
+AN_IDENTIFIER([__atomic], [AC_C___ATOMIC])
+AC_DEFUN([AC_C___ATOMIC],
+[AC_CACHE_CHECK([for __atomic], ac_cv_c___atomic,
+[AC_LINK_IFELSE(
+ [AC_LANG_SOURCE(
+ [[int
+ main (int argc, char **argv)
+ {
+ volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
+ __atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
+ __atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ return 0;
+ }
+ ]])],
+ [ac_cv_c___atomic=yes],
+ [ac_cv_c___atomic=no])])
+if test $ac_cv_c___atomic = yes; then
+ AC_DEFINE([HAVE_C___ATOMIC], 1,
+ [Define to 1 if __atomic operations work.])
+fi
+])# AC_C___ATOMIC
+
diff --git a/build/m4/ax_c__generic.m4 b/build/m4/ax_c__generic.m4
new file mode 100644
index 0000000..0c4dd52
--- /dev/null
+++ b/build/m4/ax_c__generic.m4
@@ -0,0 +1,28 @@
+# https://lists.gnu.org/archive/html/autoconf-commit/2012-12/msg00004.html
+# AC_C__GENERIC
+# -------------
+# Define HAVE_C__GENERIC if _Generic works, a la C11.
+AN_IDENTIFIER([_Generic], [AC_C__GENERIC])
+AC_DEFUN([AC_C__GENERIC],
+[AC_CACHE_CHECK([for _Generic], ac_cv_c__Generic,
+[AC_COMPILE_IFELSE(
+ [AC_LANG_SOURCE(
+ [[int
+ main (int argc, char **argv)
+ {
+ int a = _Generic (argc, int: argc = 1);
+ int *b = &_Generic (argc, default: argc);
+ char ***c = _Generic (argv, int: argc, default: argv ? &argv : 0);
+ _Generic (1 ? 0 : b, int: a, default: b) = &argc;
+ _Generic (a = 1, default: a) = 3;
+ return a + !b + !c;
+ }
+ ]])],
+ [ac_cv_c__Generic=yes],
+ [ac_cv_c__Generic=no])])
+if test $ac_cv_c__Generic = yes; then
+ AC_DEFINE([HAVE_C__GENERIC], 1,
+ [Define to 1 if C11-style _Generic works.])
+fi
+])# AC_C__GENERIC
+
diff --git a/build/m4/ax_c_lto.m4 b/build/m4/ax_c_lto.m4
new file mode 100644
index 0000000..7e6bc01
--- /dev/null
+++ b/build/m4/ax_c_lto.m4
@@ -0,0 +1,21 @@
+# AC_C_LTO
+# -------------
+# Define HAVE_LTO if -flto works.
+AN_IDENTIFIER([lto], [AC_C_LTO])
+AC_DEFUN([AC_C_LTO],
+[AC_CACHE_CHECK([if -flto builds executables], ac_cv_c_lto,
+[AC_RUN_IFELSE(
+ [AC_LANG_SOURCE(
+ [[#include <stdio.h>
+ int main(int argc, char **argv) {
+ return 0;
+ }
+ ]])],
+ [ac_cv_c_lto=yes],
+ [ac_cv_c_lto=no],
+ [ac_cv_c_lto=${ac_cv_c_lto_cross_compile}])])
+if test "${ac_cv_c_lto}" = "yes"; then
+ AC_DEFINE([HAVE_LTO], 1,
+ [Define to 1 if -flto works.])
+fi
+])# AC_C_LTO
diff --git a/build/m4/ax_c_mallinfo.m4 b/build/m4/ax_c_mallinfo.m4
new file mode 100644
index 0000000..af8d048
--- /dev/null
+++ b/build/m4/ax_c_mallinfo.m4
@@ -0,0 +1,24 @@
+# AC_C_MALLINFO
+# -------------
+# Define HAVE_C_MALLINFO if mallinfo() works.
+AN_IDENTIFIER([mallinfo], [AC_C_MALLINFO])
+AC_DEFUN([AC_C_MALLINFO],
+[AC_CACHE_CHECK([for mallinfo], ac_cv_c_mallinfo,
+[AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM(
+ [[#include <malloc.h>]],
+ [[
+ struct mallinfo mi = mallinfo();
+ /* make sure that fields exists */
+ mi.uordblks = 0;
+ mi.hblkhd = 0;
+ mi.arena = 0;
+ ]]
+ )],
+ [ac_cv_c_mallinfo=yes],
+ [ac_cv_c_mallinfo=no])])
+if test $ac_cv_c_mallinfo = yes; then
+ AC_DEFINE([HAVE_C_MALLINFO], 1,
+ [Define to 1 if glibc mallinfo exists.])
+fi
+])# AC_C_MALLINFO
diff --git a/build/m4/ax_c_mallopt.m4 b/build/m4/ax_c_mallopt.m4
new file mode 100644
index 0000000..31c4fdc
--- /dev/null
+++ b/build/m4/ax_c_mallopt.m4
@@ -0,0 +1,20 @@
+# AC_C_MALLOPT
+# -------------
+# Define HAVE_C_MALLOPT if mallopt() works.
+AN_IDENTIFIER([mallopt], [AC_C_MALLOPT])
+AC_DEFUN([AC_C_MALLOPT],
+[AC_CACHE_CHECK([for mallopt], ac_cv_c_mallopt,
+[AC_LINK_IFELSE(
+ [AC_LANG_SOURCE(
+ [[#include <malloc.h>
+ int main(int argc, char **argv) {
+ mallopt(M_ARENA_MAX, 1);
+ }
+ ]])],
+ [ac_cv_c_mallopt=yes],
+ [ac_cv_c_mallopt=no])])
+if test $ac_cv_c_mallopt = yes; then
+ AC_DEFINE([HAVE_C_MALLOPT], 1,
+ [Define to 1 if glibc mallopt exists.])
+fi
+])# AC_C_MALLOPT
diff --git a/build/m4/ax_c_statement_expressions.m4 b/build/m4/ax_c_statement_expressions.m4
new file mode 100644
index 0000000..fb259e7
--- /dev/null
+++ b/build/m4/ax_c_statement_expressions.m4
@@ -0,0 +1,23 @@
+# AC_C_STMT_EXPR
+# -------------
+# Define HAVE_STMT_EXPR if compiler has statement expressions.
+AN_IDENTIFIER([_Generic], [AC_C_STMT_EXPR])
+AC_DEFUN([AC_C_STMT_EXPR],
+[AC_CACHE_CHECK([for statement expressions], ac_cv_c_stmt_expr,
+[AC_COMPILE_IFELSE(
+ [AC_LANG_SOURCE(
+ [[int
+ main (int argc, char **argv)
+ {
+ int x = ({ int y = 1; y; });
+ return x;
+ }
+ ]])],
+ [ac_cv_c_stmt_expr=yes],
+ [ac_cv_c_stmt_expr=no])])
+if test $ac_cv_c_stmt_expr = yes; then
+ AC_DEFINE([HAVE_STMT_EXPR], 1,
+ [Define to 1 if compiler supports statement expressions.])
+fi
+])# AC_C_STMT_EXPR
+
diff --git a/build/m4/ax_check_compile_flag.m4 b/build/m4/ax_check_compile_flag.m4
new file mode 100644
index 0000000..c515602
--- /dev/null
+++ b/build/m4/ax_check_compile_flag.m4
@@ -0,0 +1,50 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT])
+#
+# DESCRIPTION
+#
+# Check whether the given FLAG works with the current language's compiler
+# or gives an error. (Warnings, however, are ignored)
+#
+# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
+# success/failure.
+#
+# If EXTRA-FLAGS is defined, it is added to the current language's default
+# flags (e.g. CFLAGS) when the check is done. The check is thus made with
+# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to
+# force the compiler to issue an error when a bad flag is given.
+#
+# INPUT gives an alternative input source to AC_COMPILE_IFELSE.
+#
+# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this
+# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
+#
+# SPDX-License-Identifier: GPL-3.0
+
+#serial 3
+
+AC_DEFUN([AX_CHECK_COMPILE_FLAG],
+[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX
+AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl
+AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [
+ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
+ _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1"
+ AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])],
+ [AS_VAR_SET(CACHEVAR,[yes])],
+ [AS_VAR_SET(CACHEVAR,[no])])
+ _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags])
+AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes],
+ [m4_default([$2], :)],
+ [m4_default([$3], :)])
+AS_VAR_POPDEF([CACHEVAR])dnl
+])dnl AX_CHECK_COMPILE_FLAGS
diff --git a/build/m4/ax_check_enable_debug.m4 b/build/m4/ax_check_enable_debug.m4
new file mode 100644
index 0000000..db5bab2
--- /dev/null
+++ b/build/m4/ax_check_enable_debug.m4
@@ -0,0 +1,122 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_enable_debug.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_ENABLE_DEBUG([enable by default=yes/info/profile/no], [ENABLE DEBUG VARIABLES ...], [DISABLE DEBUG VARIABLES NDEBUG ...], [IS-RELEASE])
+#
+# DESCRIPTION
+#
+# Check for the presence of an --enable-debug option to configure, with
+# the specified default value used when the option is not present. Return
+# the value in the variable $ax_enable_debug.
+#
+# Specifying 'yes' adds '-g -O0' to the compilation flags for all
+# languages. Specifying 'info' adds '-g' to the compilation flags.
+# Specifying 'profile' adds '-g -pg' to the compilation flags and '-pg' to
+# the linking flags. Otherwise, nothing is added.
+#
+# Define the variables listed in the second argument if debug is enabled,
+# defaulting to no variables. Defines the variables listed in the third
+# argument if debug is disabled, defaulting to NDEBUG. All lists of
+# variables should be space-separated.
+#
+# If debug is not enabled, ensure AC_PROG_* will not add debugging flags.
+# Should be invoked prior to any AC_PROG_* compiler checks.
+#
+# IS-RELEASE can be used to change the default to 'no' when making a
+# release. Set IS-RELEASE to 'yes' or 'no' as appropriate. By default, it
+# uses the value of $ax_is_release, so if you are using the AX_IS_RELEASE
+# macro, there is no need to pass this parameter.
+#
+# AX_IS_RELEASE([git-directory])
+# AX_CHECK_ENABLE_DEBUG()
+#
+# LICENSE
+#
+# Copyright (c) 2011 Rhys Ulerich <rhys.ulerich@gmail.com>
+# Copyright (c) 2014, 2015 Philip Withnall <philip@tecnocode.co.uk>
+#
+# SPDX-License-Identifier: FSFAP
+
+#serial 5
+
+AC_DEFUN([AX_CHECK_ENABLE_DEBUG],[
+ AC_BEFORE([$0],[AC_PROG_CC])dnl
+ AC_BEFORE([$0],[AC_PROG_CXX])dnl
+ AC_BEFORE([$0],[AC_PROG_F77])dnl
+ AC_BEFORE([$0],[AC_PROG_FC])dnl
+
+ AC_MSG_CHECKING(whether to enable debugging)
+
+ ax_enable_debug_default=m4_tolower(m4_normalize(ifelse([$1],,[no],[$1])))
+ ax_enable_debug_is_release=m4_tolower(m4_normalize(ifelse([$4],,
+ [$ax_is_release],
+ [$4])))
+
+ # If this is a release, override the default.
+ AS_IF([test "$ax_enable_debug_is_release" = "yes"],
+ [ax_enable_debug_default="no"])
+
+ m4_define(ax_enable_debug_vars,[m4_normalize(ifelse([$2],,,[$2]))])
+ m4_define(ax_disable_debug_vars,[m4_normalize(ifelse([$3],,[NDEBUG],[$3]))])
+
+ AC_ARG_ENABLE(debug,
+ [AS_HELP_STRING([--enable-debug=]@<:@yes/info/profile/no@:>@,[compile with debugging])],
+ [],enable_debug=$ax_enable_debug_default)
+
+ # empty mean debug yes
+ AS_IF([test "x$enable_debug" = "x"],
+ [enable_debug="yes"])
+
+ # case of debug
+ AS_CASE([$enable_debug],
+ [yes],[
+ AC_MSG_RESULT(yes)
+ CFLAGS="${CFLAGS} -g -O0"
+ CXXFLAGS="${CXXFLAGS} -g -O0"
+ FFLAGS="${FFLAGS} -g -O0"
+ FCFLAGS="${FCFLAGS} -g -O0"
+ OBJCFLAGS="${OBJCFLAGS} -g -O0"
+ ],
+ [info],[
+ AC_MSG_RESULT(info)
+ CFLAGS="${CFLAGS} -g"
+ CXXFLAGS="${CXXFLAGS} -g"
+ FFLAGS="${FFLAGS} -g"
+ FCFLAGS="${FCFLAGS} -g"
+ OBJCFLAGS="${OBJCFLAGS} -g"
+ ],
+ [profile],[
+ AC_MSG_RESULT(profile)
+ CFLAGS="${CFLAGS} -g -pg"
+ CXXFLAGS="${CXXFLAGS} -g -pg"
+ FFLAGS="${FFLAGS} -g -pg"
+ FCFLAGS="${FCFLAGS} -g -pg"
+ OBJCFLAGS="${OBJCFLAGS} -g -pg"
+ LDFLAGS="${LDFLAGS} -pg"
+ ],
+ [
+ AC_MSG_RESULT(no)
+ dnl Ensure AC_PROG_CC/CXX/F77/FC/OBJC will not enable debug flags
+ dnl by setting any unset environment flag variables
+ AS_IF([test "x${CFLAGS+set}" != "xset"],
+ [CFLAGS=""])
+ AS_IF([test "x${CXXFLAGS+set}" != "xset"],
+ [CXXFLAGS=""])
+ AS_IF([test "x${FFLAGS+set}" != "xset"],
+ [FFLAGS=""])
+ AS_IF([test "x${FCFLAGS+set}" != "xset"],
+ [FCFLAGS=""])
+ AS_IF([test "x${OBJCFLAGS+set}" != "xset"],
+ [OBJCFLAGS=""])
+ ])
+
+ dnl Define various variables if debugging is disabled.
+ dnl assert.h is a NOP if NDEBUG is defined, so define it by default.
+ AS_IF([test "x$enable_debug" = "xyes"],
+ [m4_map_args_w(ax_enable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is enabled])])],
+ [m4_map_args_w(ax_disable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is disabled])])])
+ ax_enable_debug=$enable_debug
+])
diff --git a/build/m4/ax_gcc_func_attribute.m4 b/build/m4/ax_gcc_func_attribute.m4
new file mode 100644
index 0000000..6f1e1b0
--- /dev/null
+++ b/build/m4/ax_gcc_func_attribute.m4
@@ -0,0 +1,223 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE)
+#
+# DESCRIPTION
+#
+# This macro checks if the compiler supports one of GCC's function
+# attributes; many other compilers also provide function attributes with
+# the same syntax. Compiler warnings are used to detect supported
+# attributes as unsupported ones are ignored by default so quieting
+# warnings when using this macro will yield false positives.
+#
+# The ATTRIBUTE parameter holds the name of the attribute to be checked.
+#
+# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_<ATTRIBUTE>.
+#
+# The macro caches its result in the ax_cv_have_func_attribute_<attribute>
+# variable.
+#
+# The macro currently supports the following function attributes:
+#
+# alias
+# aligned
+# alloc_size
+# always_inline
+# artificial
+# cold
+# const
+# constructor
+# constructor_priority for constructor attribute with priority
+# deprecated
+# destructor
+# dllexport
+# dllimport
+# error
+# externally_visible
+# flatten
+# format
+# format_arg
+# gnu_inline
+# hot
+# ifunc
+# leaf
+# malloc
+# noclone
+# noinline
+# nonnull
+# noreturn
+# nothrow
+# optimize
+# pure
+# unused
+# used
+# visibility
+# warning
+# warn_unused_result
+# weak
+# weakref
+#
+# Unsuppored function attributes will be tested with a prototype returning
+# an int and not accepting any arguments and the result of the check might
+# be wrong or meaningless so use with care.
+#
+# LICENSE
+#
+# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com>
+#
+# SPDX-License-Identifier: FSFAP
+
+#serial 4
+
+AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [
+ AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1])
+
+ AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([
+ m4_case([$1],
+ [alias], [
+ int foo( void ) { return 0; }
+ int bar( void ) __attribute__(($1("foo")));
+ ],
+ [aligned], [
+ int foo( void ) __attribute__(($1(32)));
+ ],
+ [alloc_size], [
+ void *foo(int a) __attribute__(($1(1)));
+ ],
+ [always_inline], [
+ inline __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [artificial], [
+ inline __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [cold], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [const], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [constructor_priority], [
+ int foo( void ) __attribute__((__constructor__(65535/2)));
+ ],
+ [constructor], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [deprecated], [
+ int foo( void ) __attribute__(($1("")));
+ ],
+ [destructor], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [dllexport], [
+ __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [dllimport], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [error], [
+ int foo( void ) __attribute__(($1("")));
+ ],
+ [externally_visible], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [flatten], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [format], [
+ int foo(const char *p, ...) __attribute__(($1(printf, 1, 2)));
+ ],
+ [format_arg], [
+ char *foo(const char *p) __attribute__(($1(1)));
+ ],
+ [gnu_inline], [
+ inline __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [hot], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [ifunc], [
+ int my_foo( void ) { return 0; }
+ static int (*resolve_foo(void))(void) { return my_foo; }
+ int foo( void ) __attribute__(($1("resolve_foo")));
+ ],
+ [leaf], [
+ __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [malloc], [
+ void *foo( void ) __attribute__(($1));
+ ],
+ [noclone], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [noinline], [
+ __attribute__(($1)) int foo( void ) { return 0; }
+ ],
+ [nonnull], [
+ int foo(char *p) __attribute__(($1(1)));
+ ],
+ [noreturn], [
+ void foo( void ) __attribute__(($1));
+ ],
+ [nothrow], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [optimize], [
+ __attribute__(($1(3))) int foo( void ) { return 0; }
+ ],
+ [pure], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [returns_nonnull], [
+ void *foo( void ) __attribute__(($1));
+ ],
+ [unused], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [used], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [visibility], [
+ int foo_def( void ) __attribute__(($1("default")));
+ int foo_hid( void ) __attribute__(($1("hidden")));
+ int foo_int( void ) __attribute__(($1("internal")));
+ int foo_pro( void ) __attribute__(($1("protected")));
+ ],
+ [warning], [
+ int foo( void ) __attribute__(($1("")));
+ ],
+ [warn_unused_result], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [weak], [
+ int foo( void ) __attribute__(($1));
+ ],
+ [weakref], [
+ static int foo( void ) { return 0; }
+ static int bar( void ) __attribute__(($1("foo")));
+ ],
+ [
+ m4_warn([syntax], [Unsupported attribute $1, the test may fail])
+ int foo( void ) __attribute__(($1));
+ ]
+ )], [])
+ ],
+ dnl GCC doesn't exit with an error if an unknown attribute is
+ dnl provided but only outputs a warning, so accept the attribute
+ dnl only if no warning were issued.
+ [AS_IF([test -s conftest.err],
+ [AS_VAR_SET([ac_var], [no])],
+ [AS_VAR_SET([ac_var], [yes])])],
+ [AS_VAR_SET([ac_var], [no])])
+ ])
+
+ AS_IF([test yes = AS_VAR_GET([ac_var])],
+ [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1,
+ [Define to 1 if the system has the `$1' function attribute])], [])
+
+ AS_VAR_POPDEF([ac_var])
+])
diff --git a/build/m4/ax_pthread.m4 b/build/m4/ax_pthread.m4
new file mode 100644
index 0000000..ba9ac28
--- /dev/null
+++ b/build/m4/ax_pthread.m4
@@ -0,0 +1,308 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_pthread.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
+#
+# DESCRIPTION
+#
+# This macro figures out how to build C programs using POSIX threads. It
+# sets the PTHREAD_LIBS output variable to the threads library and linker
+# flags, and the PTHREAD_CFLAGS output variable to any special C compiler
+# flags that are needed. (The user can also force certain compiler
+# flags/libs to be tested by setting these environment variables.)
+#
+# Also sets PTHREAD_CC to any special C compiler that is needed for
+# multi-threaded programs (defaults to the value of CC otherwise). (This
+# is necessary on AIX to use the special cc_r compiler alias.)
+#
+# NOTE: You are assumed to not only compile your program with these flags,
+# but also link it with them as well. e.g. you should link with
+# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS
+#
+# If you are only building threads programs, you may wish to use these
+# variables in your default LIBS, CFLAGS, and CC:
+#
+# LIBS="$PTHREAD_LIBS $LIBS"
+# CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+# CC="$PTHREAD_CC"
+#
+# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant
+# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name
+# (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
+#
+# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the
+# PTHREAD_PRIO_INHERIT symbol is defined when compiling with
+# PTHREAD_CFLAGS.
+#
+# ACTION-IF-FOUND is a list of shell commands to run if a threads library
+# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it
+# is not found. If ACTION-IF-FOUND is not specified, the default action
+# will define HAVE_PTHREAD.
+#
+# Please let the authors know if this macro fails on any platform, or if
+# you have any other suggestions or comments. This macro was based on work
+# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help
+# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by
+# Alejandro Forero Cuervo to the autoconf macro repository. We are also
+# grateful for the helpful feedback of numerous users.
+#
+# Updated for Autoconf 2.68 by Daniel Richard G.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2011 Daniel Richard G. <skunk@iSKUNK.ORG>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#serial 21
+
+AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
+AC_DEFUN([AX_PTHREAD], [
+AC_REQUIRE([AC_CANONICAL_HOST])
+AC_LANG_PUSH([C])
+ax_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS])
+ AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes])
+ AC_MSG_RESULT([$ax_pthread_ok])
+ if test x"$ax_pthread_ok" = xno; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads too;
+# also defines -D_REENTRANT)
+# ... -mt is also the pthreads flag for HP/aCC
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case ${host_os} in
+ solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (We need to link with -pthreads/-mt/
+ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
+ # a function called by this macro, so we could check for that, but
+ # who knows whether they'll stub that too in a future libc.) So,
+ # we'll just look for -pthreads and -lpthread first:
+
+ ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags"
+ ;;
+
+ darwin*)
+ ax_pthread_flags="-pthread $ax_pthread_flags"
+ ;;
+esac
+
+# Clang doesn't consider unrecognized options an error unless we specify
+# -Werror. We throw in some extra Clang-specific options to ensure that
+# this doesn't happen for GCC, which also accepts -Werror.
+
+AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags])
+save_CFLAGS="$CFLAGS"
+ax_pthread_extra_flags="-Werror"
+CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument"
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])],
+ [AC_MSG_RESULT([yes])],
+ [ax_pthread_extra_flags=
+ AC_MSG_RESULT([no])])
+CFLAGS="$save_CFLAGS"
+
+if test x"$ax_pthread_ok" = xno; then
+for flag in $ax_pthread_flags; do
+
+ case $flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $flag])
+ PTHREAD_CFLAGS="$flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
+ if test x"$ax_pthread_config" = xno; then continue; fi
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$flag])
+ PTHREAD_LIBS="-l$flag"
+ ;;
+ esac
+
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
+ static void routine(void *a) { a = 0; }
+ static void *start_routine(void *a) { return a; }],
+ [pthread_t th; pthread_attr_t attr;
+ pthread_create(&th, 0, start_routine, 0);
+ pthread_join(th, 0);
+ pthread_attr_init(&attr);
+ pthread_cleanup_push(routine, 0);
+ pthread_cleanup_pop(0) /* ; */])],
+ [ax_pthread_ok=yes],
+ [])
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ AC_MSG_RESULT([$ax_pthread_ok])
+ if test "x$ax_pthread_ok" = xyes; then
+ break;
+ fi
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$ax_pthread_ok" = xyes; then
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_MSG_CHECKING([for joinable pthread attribute])
+ attr_name=unknown
+ for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
+ [int attr = $attr; return attr /* ; */])],
+ [attr_name=$attr; break],
+ [])
+ done
+ AC_MSG_RESULT([$attr_name])
+ if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
+ AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name],
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ fi
+
+ AC_MSG_CHECKING([if more special flags are required for pthreads])
+ flag=no
+ case ${host_os} in
+ aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";;
+ osf* | hpux*) flag="-D_REENTRANT";;
+ solaris*)
+ if test "$GCC" = "yes"; then
+ flag="-D_REENTRANT"
+ else
+ # TODO: What about Clang on Solaris?
+ flag="-mt -D_REENTRANT"
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$flag])
+ if test "x$flag" != xno; then
+ PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+ fi
+
+ AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
+ [ax_cv_PTHREAD_PRIO_INHERIT], [
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
+ [[int i = PTHREAD_PRIO_INHERIT;]])],
+ [ax_cv_PTHREAD_PRIO_INHERIT=yes],
+ [ax_cv_PTHREAD_PRIO_INHERIT=no])
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"],
+ [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])])
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ # More AIX lossage: compile with *_r variant
+ if test "x$GCC" != xyes; then
+ case $host_os in
+ aix*)
+ AS_CASE(["x/$CC"],
+ [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
+ [#handle absolute path differently from PATH based program lookup
+ AS_CASE(["x$CC"],
+ [x/*],
+ [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
+ [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+ ;;
+ esac
+ fi
+fi
+
+test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
+
+AC_SUBST([PTHREAD_LIBS])
+AC_SUBST([PTHREAD_CFLAGS])
+AC_SUBST([PTHREAD_CC])
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$ax_pthread_ok" = xyes; then
+ ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
+ :
+else
+ ax_pthread_ok=no
+ $2
+fi
+AC_LANG_POP
+])dnl AX_PTHREAD
diff --git a/build/m4/jemalloc.m4 b/build/m4/jemalloc.m4
new file mode 100644
index 0000000..c2008a8
--- /dev/null
+++ b/build/m4/jemalloc.m4
@@ -0,0 +1,75 @@
+dnl -------------------------------------------------------- -*- autoconf -*-
+dnl SPDX-License-Identifier: Apache-2.0
+
+dnl
+dnl jemalloc.m4: Trafficserver's jemalloc autoconf macros
+dnl modified to skip other TS_ helpers
+dnl
+
+AC_DEFUN([TS_CHECK_JEMALLOC], [
+AC_ARG_WITH([jemalloc-prefix],
+ [AS_HELP_STRING([--with-jemalloc-prefix=PREFIX],[Specify the jemalloc prefix [default=""]])],
+ [
+ jemalloc_prefix="$withval"
+ ],[
+ if test "`uname -s`" = "Darwin"; then
+ jemalloc_prefix="je_"
+ else
+ jemalloc_prefix=""
+ fi
+ ]
+)
+AC_DEFINE_UNQUOTED([prefix_jemalloc], [${jemalloc_prefix}], [jemalloc prefix])
+
+enable_jemalloc=no
+AC_ARG_WITH([jemalloc], [AS_HELP_STRING([--with-jemalloc=DIR], [use a specific jemalloc library])],
+[
+ if test "$withval" != "no"; then
+ if test "x${enable_tcmalloc}" = "xyes"; then
+ AC_MSG_ERROR([Cannot compile with both jemalloc and tcmalloc])
+ fi
+ enable_jemalloc=yes
+ jemalloc_base_dir="$withval"
+ case "$withval" in
+ yes)
+ jemalloc_base_dir="/usr"
+ AC_MSG_CHECKING(checking for jemalloc includes standard directories)
+ ;;
+ *":"*)
+ jemalloc_include="`echo $withval |sed -e 's/:.*$//'`"
+ jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`"
+ AC_MSG_CHECKING(checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags)
+ ;;
+ *)
+ jemalloc_include="$withval/include"
+ jemalloc_ldflags="$withval/lib"
+ AC_MSG_CHECKING(checking for jemalloc includes in $withval)
+ ;;
+ esac
+ fi
+])
+
+has_jemalloc=0
+if test "$enable_jemalloc" != "no"; then
+ jemalloc_have_headers=0
+ jemalloc_have_libs=0
+ if test "$jemalloc_base_dir" != "/usr"; then
+ CFLAGS="${CFLAGS} -I${jemalloc_include}"
+ LDFLAGS="${LDFLAGS} -L${jemalloc_ldflags}"
+ LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -R${jemalloc_ldflags}"
+ fi
+ func="${jemalloc_prefix}malloc_stats_print"
+ AC_CHECK_LIB(jemalloc, ${func}, [jemalloc_have_libs=1])
+ if test "$jemalloc_have_libs" != "0"; then
+ AC_CHECK_HEADERS([jemalloc/jemalloc.h], [jemalloc_have_headers=1])
+ fi
+ if test "$jemalloc_have_headers" != "0"; then
+ has_jemalloc=1
+ LIBS="${LIBS} -ljemalloc"
+ AC_DEFINE(has_jemalloc, [1], [Link/compile against jemalloc])
+ else
+ AC_MSG_ERROR([Couldn't find a jemalloc installation])
+ fi
+fi
+AC_SUBST(has_jemalloc)
+])
diff --git a/build/m4/tcmalloc.m4 b/build/m4/tcmalloc.m4
new file mode 100644
index 0000000..765d2ac
--- /dev/null
+++ b/build/m4/tcmalloc.m4
@@ -0,0 +1,45 @@
+dnl -------------------------------------------------------- -*- autoconf -*-
+dnl SPDX-License-Identifier: Apache-2.0
+
+dnl
+dnl tcmalloc.m4: Trafficserver's tcmalloc autoconf macros
+dnl modified to skip other TS_ helpers
+dnl
+
+dnl This is kinda fugly, but need a way to both specify a directory and which
+dnl of the many tcmalloc libraries to use ...
+AC_DEFUN([TS_CHECK_TCMALLOC], [
+AC_ARG_WITH([tcmalloc-lib],
+ [AS_HELP_STRING([--with-tcmalloc-lib],[specify the tcmalloc library to use [default=tcmalloc]])],
+ [
+ with_tcmalloc_lib="$withval"
+ ],[
+ with_tcmalloc_lib="tcmalloc"
+ ]
+)
+
+has_tcmalloc=0
+AC_ARG_WITH([tcmalloc], [AS_HELP_STRING([--with-tcmalloc=DIR], [use the tcmalloc library])],
+[
+ if test "$withval" != "no"; then
+ if test "x${enable_jemalloc}" = "xyes"; then
+ AC_MSG_ERROR([Cannot compile with both tcmalloc and jemalloc])
+ fi
+ tcmalloc_have_lib=0
+ if test "x$withval" != "xyes" && test "x$withval" != "x"; then
+ tcmalloc_ldflags="$withval/lib"
+ LDFLAGS="${LDFLAGS} -L${tcmalloc_ldflags}"
+ LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -rpath ${tcmalloc_ldflags}"
+ fi
+ AC_CHECK_LIB(${with_tcmalloc_lib}, tc_cfree, [tcmalloc_have_lib=1])
+ if test "$tcmalloc_have_lib" != "0"; then
+ LIBS="${LIBS} -l${with_tcmalloc_lib}"
+ has_tcmalloc=1
+ AC_DEFINE(has_tcmalloc, [1], [Link/compile against tcmalloc])
+ else
+ AC_MSG_ERROR([Couldn't find a tcmalloc installation])
+ fi
+ fi
+])
+AC_SUBST(has_tcmalloc)
+])
diff --git a/build/subst.inc b/build/subst.inc
new file mode 100644
index 0000000..af97623
--- /dev/null
+++ b/build/subst.inc
@@ -0,0 +1,20 @@
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]pkglibexecdir_POST@#$(pkglibexecdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ -e 's#[@]webdir_POST@#$(webdir)#g' \
+ -e 's#[@]can_enable_aclk_POST@#$(can_enable_aclk)#g' \
+ -e 's#[@]enable_cloud_POST@#$(enable_cloud)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
diff --git a/build_external/README.md b/build_external/README.md
new file mode 100644
index 0000000..6a1e30a
--- /dev/null
+++ b/build_external/README.md
@@ -0,0 +1,128 @@
+<!--
+title: "External build-system"
+date: 2020-03-31
+custom_edit_url: https://github.com/netdata/netdata/edit/master/build_external/README.md
+-->
+
+# External build-system
+
+This wraps the build-system in Docker so that the host system and the target system are
+decoupled. This allows:
+
+- Cross-compilation (e.g. linux development from macOS)
+- Cross-distro (e.g. using CentOS user-land while developing on Debian)
+- Multi-host scenarios (e.g. parent-child configurations)
+- Bleeding-edge scenarios (e.g. using the ACLK (**currently for internal-use only**))
+
+The advantage of these scenarios is that they allow **reproducible** builds and testing
+for developers. This is the first iteration of the build-system to allow the team to use
+it and get used to it.
+
+For configurations that involve building and running the agent alone, we still use
+`docker-compose` for consistency with more complex configurations. The more complex
+configurations allow the agent to be run in conjunction with parts of the cloud
+infrastructure (these parts of the code are not public), or with external brokers
+(such as VerneMQ for MQTT), or with other external tools (such as TSDB to allow the agent to
+export metrics). Note: no external TSDB scenarios are available in the first iteration,
+they will be added in subsequent iterations.
+
+This differs from the packaging dockerfiles as it designed to be used for local development.
+The main difference is that these files are designed to support incremental compilation in
+the following way:
+
+1. The initial build should be performed using `bin/clean-install.sh` to create a docker
+ image with the agent built from the source tree and installed into standard system paths
+ using `netdata-installer.sh`. In addition to the steps performed by the standard packaging
+ builds a manifest is created to allow subsequent builds to be made incrementally using
+ `make` inside the container. Any libraries that are required for 'bleeding-edge' development
+ are added on top of the standard install.
+2. When the `bin/make-install.sh` script is used the docker container will be updated with
+ a sanitized version of the current build-tree. The manifest will be used to line up the
+ state of the incoming docker cache with `make`'s view of the file-system according to the
+ manifest. This means the `make install` inside the container will only rebuild changes
+ since the last time the disk image was created.
+
+The exact improvement on the compile-cycle depends on the speed of the network connection
+to pull the netdata dependencies, but should shrink the time considerably. For example,
+on a macbook pro the initial install takes about 1min + network delay [Note: there is
+something bad happening with the standard installer at the end of the container build as
+it tries to kill the running agent - this is very slow and bad] and the incremental
+step only takes 15s. On a debian host with a fast network this reduces 1m30 -> 13s.
+
+## Examples
+
+1. Simple cross-compilation / cross-distro builds.
+
+```bash
+build_external/bin/clean-install.sh arch current
+docker run -it --rm arch_current_dev
+echo >>daemon/main.c # Simulate edit by touching file
+build_external/bin/make-install.sh arch current
+docker run -it --rm arch_current_dev
+```
+
+Currently there is no detection of when the installer needs to be rerun (really this is
+when the `autoreconf` / `configure` step must be rerun). Netdata was not written with
+multi-stage builds in mind and we need to work out how to do this in the future. For now
+it is up to you to know when you need to rerun the clean build step.
+
+```bash
+build_external/bin/clean-install.sh arch current
+build_external/bin/clean-install.sh ubuntu 19.10
+docker run -it --rm arch_current_dev
+echo >>daemon/main.c # Simulate edit by touching file
+build_external/bin/make-install.sh arch current
+docker run -it --rm arch_current_dev
+echo >>daemon/daemon.c # Simulate second edit step
+build_external/bin/make-install.sh arch current # Observe a single file is rebuilt
+build_external/bin/make-install.sh arch current # Observe both files are rebuilt
+```
+
+The state of the build in the two containers is independent.
+
+2. Single agent config in docker-compose
+
+This functions the same as the previous example but is wrapped in docker-compose to
+allow injection into more complex test configurations.
+
+```bash
+Distro=debian Version=10 docker-compose -f projects/only-agent/docker-compose.yml up
+```
+
+Note: it is possible to run multiple copies of the agent using the `--scale` option for
+`docker-compose up`.
+
+```bash
+Distro=debian Version=10 docker-compose -f projects/only-agent/docker-compose.yml up --scale agent=3
+```
+
+3. A simple parent-child scenario
+
+```bash
+# Need to call clean-install on the configs used in the parent-child containers
+docker-compose -f parent-child/docker-compose.yml up --scale agent_child1=2
+```
+
+Note: this is not production ready yet, but it is left in so that we can see how it behaves
+and improve it. Currently it produces the following problems:
+ * Only the base-configuration in the compose without scaling works.
+ * The containers are hard-coded in the compose.
+ * There is no way to separate the agent configurations, so running multiple agent child nodes with the same GUID kills
+ the parent which exits with a fatal condition.
+
+4. The ACLK
+
+This is for internal use only as it requires access to a private repo. Clone the vernemq-docker
+repo and follow the instructions within to build an image called `vernemq`.
+
+```bash
+build_external/bin/clean-install.sh arch current # Only needed first time
+docker-compose -f build_external/projects/aclk-testing/vernemq-compose.yml -f build_external/projects/aclk-testing/agent-compose.yml up --build
+```
+
+Notes:
+* We are currently limited to arch because of restrictions on libwebsockets
+* There is not yet a good way to configure the target agent container from the docker-compose command line.
+* Several other containers should be in this compose (a paho client, tshark etc).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbuild_external%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/build_external/bin/clean-install.sh b/build_external/bin/clean-install.sh
new file mode 100755
index 0000000..78d1f32
--- /dev/null
+++ b/build_external/bin/clean-install.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+DISTRO="$1"
+VERSION="$2"
+BuildBase="$(cd "$(dirname "$0")" && cd .. && pwd)"
+
+# This is temporary - not all of the package-builder images from the helper-images repo
+# are available on Docker Hub. When everything falls under the "happy case" below this
+# can be deleted in a future iteration. This is written in a weird way for portability,
+# can't rely on bash 4.0+ to allow case fall-through with ;&
+
+if cat <<HAPPY_CASE | grep "$DISTRO-$VERSION"
+ opensuse-15.1
+ fedora-29
+ debian-9
+ debian-8
+ fedora-30
+ opensuse-15.0
+ ubuntu-19.04
+ centos-7
+ fedora-31
+ ubuntu-16.04
+ ubuntu-18.04
+ ubuntu-19.10
+ debian-10
+ centos-8
+ ubuntu-1804
+ ubuntu-1904
+ ubuntu-1910
+ debian-stretch
+ debian-jessie
+ debian-buster
+HAPPY_CASE
+then
+ docker build -f "$BuildBase/clean-install.Dockerfile" -t "${DISTRO}_${VERSION}_dev" "$BuildBase/.." \
+ --build-arg "DISTRO=$DISTRO" --build-arg "VERSION=$VERSION" \
+ --build-arg EXTRA_CFLAGS="-DACLK_SSL_ALLOW_SELF_SIGNED"
+else
+ case "$DISTRO-$VERSION" in
+ arch-current)
+ docker build -f "$BuildBase/clean-install-arch.Dockerfile" -t "${DISTRO}_${VERSION}_dev" "$BuildBase/.." \
+ --build-arg "DISTRO=$DISTRO" --build-arg "VERSION=$VERSION" \
+ --build-arg EXTRA_CFLAGS="-DACLK_SSL_ALLOW_SELF_SIGNED" # --no-cache
+ ;;
+ arch-extras) # Add valgrind to the container
+ docker build -f "$BuildBase/clean-install-arch-extras.Dockerfile" -t "${DISTRO}_${VERSION}_dev" "$BuildBase/.." \
+ --build-arg "DISTRO=$DISTRO" --build-arg "VERSION=$VERSION" \
+ --build-arg EXTRA_CFLAGS="-DACLK_SSL_ALLOW_SELF_SIGNED" # --no-cache
+ ;;
+ *)
+ echo "Unknown $DISTRO-$VERSION"
+ ;;
+ esac
+fi
diff --git a/build_external/bin/make-install.sh b/build_external/bin/make-install.sh
new file mode 100755
index 0000000..fe4f7c9
--- /dev/null
+++ b/build_external/bin/make-install.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+DISTRO="$1"
+VERSION="$2"
+BuildBase="$(cd "$(dirname "$0")" && cd .. && pwd)"
+
+docker build -f "$BuildBase/make-install.Dockerfile" -t "${DISTRO}_${VERSION}_dev:latest" "$BuildBase/.." \
+ --build-arg "DISTRO=${DISTRO}" --build-arg "VERSION=${VERSION}"
diff --git a/build_external/clean-install-arch-debug.Dockerfile b/build_external/clean-install-arch-debug.Dockerfile
new file mode 100644
index 0000000..44d5b5e
--- /dev/null
+++ b/build_external/clean-install-arch-debug.Dockerfile
@@ -0,0 +1,62 @@
+FROM archlinux/base:latest
+
+# There is some redundancy between this file and the archlinux Dockerfile in the helper images
+# repo and also with the clean-install.Dockefile. Once the help image is availabled on Docker
+# Hub this file can be deleted.
+RUN echo sdlsjdkls
+RUN pacman -Syyu --noconfirm
+RUN pacman --noconfirm --needed -S autoconf \
+ autoconf-archive \
+ autogen \
+ automake \
+ gcc \
+ make \
+ git \
+ libuv \
+ lz4 \
+ netcat \
+ openssl \
+ pkgconfig \
+ python \
+ libvirt \
+ cmake \
+ valgrind \
+ gdb
+
+ARG EXTRA_CFLAGS
+COPY . /opt/netdata/source
+WORKDIR /opt/netdata/source
+
+RUN git config --global user.email "root@container"
+RUN git config --global user.name "Fake root"
+
+# RUN make distclean -> not safe if tree state changed on host since last config
+# Kill everything that is not in .gitignore preserving any fresh changes, i.e. untracked changes will be
+# deleted but local changes to tracked files will be preserved.
+RUN if git status --porcelain | grep '^[MADRC]'; then \
+ git stash && git clean -dxf && (git stash apply || true) \
+ else \
+ git clean -dxf ; \
+ fi
+
+# Not everybody is updating distclean properly - fix.
+RUN find . -name '*.Po' -exec rm \{\} \;
+RUN rm -rf autom4te.cache
+RUN rm -rf .git/
+RUN find . -type f >/opt/netdata/manifest
+
+RUN CFLAGS="-Og -g -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -DNETDATA_INTERNAL_CHECKS=1\
+ -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --require-cloud --disable-lto
+
+RUN ln -sf /dev/stdout /var/log/netdata/access.log
+RUN ln -sf /dev/stdout /var/log/netdata/debug.log
+RUN ln -sf /dev/stderr /var/log/netdata/error.log
+
+RUN printf >/opt/netdata/source/gdb_batch '\
+set args -D \n\
+handle SIG32 nostop \n\
+run \n\
+bt'
+
+#CMD ["/usr/sbin/valgrind", "--leak-check=full", "/usr/sbin/netdata", "-D"]
+CMD ["/usr/bin/gdb", "-x", "/opt/netdata/source/gdb_batch", "/usr/sbin/netdata"]
diff --git a/build_external/clean-install-arch-extras.Dockerfile b/build_external/clean-install-arch-extras.Dockerfile
new file mode 100644
index 0000000..b155c13
--- /dev/null
+++ b/build_external/clean-install-arch-extras.Dockerfile
@@ -0,0 +1,58 @@
+FROM archlinux/base:latest
+
+# There is some redundancy between this file and the archlinux Dockerfile in the helper images
+# repo and also with the clean-install.Dockefile. Once the help image is availabled on Docker
+# Hub this file can be deleted.
+RUN echo sdlsjdkls
+RUN pacman -Syyu --noconfirm
+RUN pacman --noconfirm --needed -S autoconf \
+ autoconf-archive \
+ autogen \
+ automake \
+ gcc \
+ make \
+ git \
+ libuv \
+ lz4 \
+ netcat \
+ openssl \
+ pkgconfig \
+ python \
+ libvirt \
+ cmake \
+ valgrind \
+ gdb
+
+ARG EXTRA_CFLAGS
+COPY . /opt/netdata/source
+WORKDIR /opt/netdata/source
+
+RUN git config --global user.email "root@container"
+RUN git config --global user.name "Fake root"
+
+# RUN make distclean -> not safe if tree state changed on host since last config
+# Kill everything that is not in .gitignore preserving any fresh changes, i.e. untracked changes will be
+# deleted but local changes to tracked files will be preserved.
+RUN if git status --porcelain | grep '^[MADRC]'; then \
+ git stash && git clean -dxf && (git stash apply || true) \
+ else \
+ git clean -dxf ; \
+ fi
+
+# Not everybody is updating distclean properly - fix.
+RUN find . -name '*.Po' -exec rm \{\} \;
+RUN rm -rf autom4te.cache
+RUN rm -rf .git/
+RUN find . -type f >/opt/netdata/manifest
+
+RUN CFLAGS="-Og -g -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -DNETDATA_INTERNAL_CHECKS=1\
+ -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --require-cloud --disable-lto
+
+RUN ln -sf /dev/stdout /var/log/netdata/access.log
+RUN ln -sf /dev/stdout /var/log/netdata/debug.log
+RUN ln -sf /dev/stderr /var/log/netdata/error.log
+
+RUN rm /var/lib/netdata/registry/netdata.public.unique.id
+
+CMD ["/usr/sbin/valgrind", "--leak-check=full", "/usr/sbin/netdata", "-D"]
+
diff --git a/build_external/clean-install-arch.Dockerfile b/build_external/clean-install-arch.Dockerfile
new file mode 100644
index 0000000..bb8274f
--- /dev/null
+++ b/build_external/clean-install-arch.Dockerfile
@@ -0,0 +1,54 @@
+FROM archlinux/base:latest
+
+# There is some redundancy between this file and the archlinux Dockerfile in the helper images
+# repo and also with the clean-install.Dockefile. Once the help image is availabled on Docker
+# Hub this file can be deleted.
+
+RUN pacman -Sy
+RUN pacman --noconfirm --needed -S autoconf \
+ autoconf-archive \
+ autogen \
+ automake \
+ gcc \
+ make \
+ git \
+ libuv \
+ lz4 \
+ netcat \
+ openssl \
+ pkgconfig \
+ python \
+ libvirt \
+ cmake
+
+ARG ACLK=no
+ARG EXTRA_CFLAGS
+COPY . /opt/netdata/source
+WORKDIR /opt/netdata/source
+
+RUN git config --global user.email "root@container"
+RUN git config --global user.name "Fake root"
+
+# RUN make distclean -> not safe if tree state changed on host since last config
+# Kill everything that is not in .gitignore preserving any fresh changes, i.e. untracked changes will be
+# deleted but local changes to tracked files will be preserved.
+RUN if git status --porcelain | grep '^[MADRC]'; then \
+ git stash && git clean -dxf && (git stash apply || true) \
+ else \
+ git clean -dxf ; \
+ fi
+
+# Not everybody is updating distclean properly - fix.
+RUN find . -name '*.Po' -exec rm \{\} \;
+RUN rm -rf autom4te.cache
+RUN rm -rf .git/
+RUN find . -type f >/opt/netdata/manifest
+
+RUN CFLAGS="-O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -DNETDATA_INTERNAL_CHECKS=1\
+ -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --disable-lto
+
+RUN ln -sf /dev/stdout /var/log/netdata/access.log
+RUN ln -sf /dev/stdout /var/log/netdata/debug.log
+RUN ln -sf /dev/stderr /var/log/netdata/error.log
+
+CMD ["/usr/sbin/netdata", "-D"]
diff --git a/build_external/clean-install.Dockerfile b/build_external/clean-install.Dockerfile
new file mode 100644
index 0000000..18586e8
--- /dev/null
+++ b/build_external/clean-install.Dockerfile
@@ -0,0 +1,39 @@
+ARG DISTRO=arch
+ARG VERSION=current
+FROM netdata/package-builders:${DISTRO}${VERSION}
+
+ARG ACLK=no
+ARG EXTRA_CFLAGS
+
+COPY . /opt/netdata/source
+WORKDIR /opt/netdata/source
+
+RUN git config --global user.email "root@container"
+RUN git config --global user.name "Fake root"
+
+# RUN make distclean -> not safe if tree state changed on host since last config
+# Kill everything that is not in .gitignore preserving any fresh changes, i.e. untracked changes will be
+# deleted but local changes to tracked files will be preserved.
+RUN if git status --porcelain | grep '^[MADRC]'; then \
+ git stash && git clean -dxf && (git stash apply || true) \
+ else \
+ git clean -dxf ; \
+ fi
+
+# Not everybody is updating distclean properly - fix.
+RUN find . -name '*.Po' -exec rm \{\} \;
+RUN rm -rf autom4te.cache
+RUN rm -rf .git/
+RUN find . -type f >/opt/netdata/manifest
+
+RUN CFLAGS="-O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -DNETDATA_INTERNAL_CHECKS=1\
+ -D_FORTIFY_SOURCE=2 -DNETDATA_VERIFY_LOCKS=1 ${EXTRA_CFLAGS}" ./netdata-installer.sh --disable-lto
+
+RUN ln -sf /dev/stdout /var/log/netdata/access.log
+RUN ln -sf /dev/stdout /var/log/netdata/debug.log
+RUN ln -sf /dev/stderr /var/log/netdata/error.log
+
+RUN rm /var/lib/netdata/registry/netdata.public.unique.id
+
+CMD ["/usr/sbin/netdata","-D"]
+ENTRYPOINT []
diff --git a/build_external/make-install.Dockerfile b/build_external/make-install.Dockerfile
new file mode 100644
index 0000000..1341b58
--- /dev/null
+++ b/build_external/make-install.Dockerfile
@@ -0,0 +1,11 @@
+ARG DISTRO=arch
+ARG VERSION=current
+
+FROM ${DISTRO}_${VERSION}_dev:latest
+
+# Sanitize new source tree by removing config-time state
+COPY . /opt/netdata/latest
+WORKDIR /opt/netdata/latest
+RUN while read -r f; do cp -p "$f" "../source/$f"; done <../manifest
+WORKDIR /opt/netdata/source
+RUN make install
diff --git a/build_external/scenarios/aclk-testing/agent-compose.yml b/build_external/scenarios/aclk-testing/agent-compose.yml
new file mode 100644
index 0000000..c05c97c
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/agent-compose.yml
@@ -0,0 +1,19 @@
+version: '3.3'
+services:
+ agent_parent:
+ build:
+ context: ../../..
+ dockerfile: build_external/make-install.Dockerfile
+ args:
+ - DISTRO=arch
+ - VERSION=current
+ image: arch_current_dev:latest
+ command: >
+ sh -c "echo -n 00000000-0000-0000-0000-000000000000 >/var/lib/netdata/cloud.d/claimed_id &&
+ echo '[agent_cloud_link]' >>/etc/netdata/netdata.conf &&
+ echo ' agent cloud link hostname = vernemq' >>/etc/netdata/netdata.conf &&
+ echo ' agent cloud link port = 9002' >>/etc/netdata/netdata.conf &&
+ /usr/sbin/netdata -D"
+ ports:
+ - 20000:19999
+
diff --git a/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml b/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml
new file mode 100644
index 0000000..d404ed4
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml
@@ -0,0 +1,19 @@
+version: '3.3'
+services:
+ agent_parent:
+ build:
+ context: ../../..
+ dockerfile: build_external/make-install.Dockerfile
+ args:
+ - DISTRO=arch
+ - VERSION=extras
+ image: arch_extras_dev:latest
+ command: >
+ sh -c "echo -n 00000000-0000-0000-0000-000000000000 >/var/lib/netdata/cloud.d/claimed_id &&
+ echo '[agent_cloud_link]' >>/etc/netdata/netdata.conf &&
+ echo ' agent cloud link hostname = vernemq' >>/etc/netdata/netdata.conf &&
+ echo ' agent cloud link port = 9002' >>/etc/netdata/netdata.conf &&
+ /usr/sbin/valgrind --leak-check=full /usr/sbin/netdata -D -W debug_flags=0x200000000"
+ ports:
+ - 20000:19999
+
diff --git a/build_external/scenarios/aclk-testing/agent_netdata.conf b/build_external/scenarios/aclk-testing/agent_netdata.conf
new file mode 100644
index 0000000..d13e514
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/agent_netdata.conf
@@ -0,0 +1,7158 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+ # glibc malloc arena max for plugins = 1
+ # glibc malloc arena max for netdata = 1
+ # hostname = b073e16793c4
+ # history = 3996
+ # update every = 1
+ # config directory = /etc/netdata
+ # stock config directory = /usr/lib/netdata/conf.d
+ # log directory = /var/log/netdata
+ # web files directory = /usr/share/netdata/web
+ # cache directory = /var/cache/netdata
+ # lib directory = /var/lib/netdata
+ # home directory = /var/cache/netdata
+ # plugins directory = "/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"
+ # memory mode = dbengine
+ # page cache size = 32
+ # dbengine disk space = 256
+ # host access prefix =
+ # memory deduplication (ksm) = yes
+ # TZ environment variable = :/etc/localtime
+ # timezone = Etc/UTC
+ # debug flags = 0x0000000000000000
+ # debug log = /var/log/netdata/debug.log
+ # error log = /var/log/netdata/error.log
+ # access log = /var/log/netdata/access.log
+ # facility log = daemon
+ # errors flood protection period = 1200
+ # errors to trigger flood protection = 200
+ # run as user = netdata
+ # OOM score = 1000
+ # process scheduling policy = idle
+ # pthread stack size = 8388608
+ # cleanup obsolete charts after seconds = 3600
+ # gap when lost iterations above = 1
+ # cleanup orphan hosts after seconds = 3600
+ # delete obsolete charts files = yes
+ # delete orphan hosts files = yes
+ # enable zero metrics = no
+
+[web]
+ # ssl key = /etc/netdata/ssl/key.pem
+ # ssl certificate = /etc/netdata/ssl/cert.pem
+ # ses max window = 15
+ # des max window = 15
+ # mode = static-threaded
+ # listen backlog = 4096
+ # default port = 19999
+ # bind to = *
+ # web files owner = netdata
+ # web files group = netdata
+ # disconnect idle clients after seconds = 60
+ # timeout for first request = 60
+ # accept a streaming request every seconds = 0
+ # respect do not track policy = no
+ # x-frame-options response header =
+ # allow connections from = localhost *
+ # allow connections by dns = heuristic
+ # allow dashboard from = localhost *
+ # allow dashboard by dns = heuristic
+ # allow badges from = *
+ # allow badges by dns = heuristic
+ # allow streaming from = *
+ # allow streaming by dns = heuristic
+ # allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+ # allow netdata.conf by dns = no
+ # allow management from = localhost
+ # allow management by dns = heuristic
+ # enable gzip compression = yes
+ # gzip compression strategy = default
+ # gzip compression level = 3
+ # web server threads = 6
+ # web server max sockets = 262144
+
+[plugins]
+ # PATH environment variable = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin
+ # PYTHONPATH environment variable =
+ # checks = no
+ # proc = yes
+ # diskspace = yes
+ # cgroups = yes
+ # tc = yes
+ # idlejitter = yes
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+ # slabinfo = no
+ # go.d = yes
+ # node.d = yes
+ # apps = yes
+ # charts.d = yes
+ # fping = yes
+ # python.d = yes
+ # perf = yes
+ # ioping = yes
+
+[health]
+ # silencers file = /var/lib/netdata/health.silencers.json
+ # enabled = yes
+ # default repeat warning = never
+ # default repeat critical = never
+ # in memory max health log entries = 1000
+ # script to execute on alarm = /usr/libexec/netdata/plugins.d/alarm-notify.sh
+ # stock health configuration directory = /usr/lib/netdata/conf.d/health.d
+ # health configuration directory = /etc/netdata/health.d
+ # rotate log every lines = 2000
+ # run at least every seconds = 10
+ # postpone alarms during hibernation for seconds = 60
+
+[registry]
+ # enabled = no
+ # registry db directory = /var/lib/netdata/registry
+ # netdata unique id file = /var/lib/netdata/registry/netdata.public.unique.id
+ # registry db file = /var/lib/netdata/registry/registry.db
+ # registry log file = /var/lib/netdata/registry/registry-log.db
+ # registry save db every new entries = 1000000
+ # registry expire idle persons days = 365
+ # registry domain =
+ # registry to announce = https://registry.my-netdata.io
+ # registry hostname = b073e16793c4
+ # verify browser cookies support = yes
+ # max URL length = 1024
+ # max URL name length = 50
+ # netdata management api key file = /var/lib/netdata/netdata.api.key
+ # allow from = *
+ # allow by dns = heuristic
+
+[cloud]
+ # cloud base url = https://netdata.cloud
+
+[backend]
+ # host tags =
+ # enabled = no
+ # data source = average
+ # type = graphite
+ # destination = localhost
+ # prefix = netdata
+ # hostname = b073e16793c4
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+[statsd]
+ # enabled = yes
+ # update every (flushInterval) = 1
+ # udp messages to process at once = 10
+ # create private charts for metrics matching = *
+ # max private charts allowed = 200
+ # max private charts hard limit = 1000
+ # private charts memory mode = dbengine
+ # private charts history = 3996
+ # decimal detail = 1000
+ # disconnect idle tcp clients after seconds = 600
+ # private charts hidden = no
+ # histograms and timers percentile (percentThreshold) = 95.00000
+ # add dimension for number of events received = yes
+ # gaps on gauges (deleteGauges) = no
+ # gaps on counters (deleteCounters) = no
+ # gaps on meters (deleteMeters) = no
+ # gaps on sets (deleteSets) = no
+ # gaps on histograms (deleteHistograms) = no
+ # gaps on timers (deleteTimers) = no
+ # statsd server max TCP sockets = 262144
+ # listen backlog = 4096
+ # default port = 8125
+ # bind to = udp:localhost tcp:localhost
+
+
+# per plugin configuration
+
+[plugin:cgroups]
+ # cgroups plugin resource charts = yes
+ # update every = 1
+ # check for new cgroups every = 10
+ # use unified cgroups = no
+ # containers priority = 40000
+ # enable cpuacct stat (total CPU) = auto
+ # enable cpuacct usage (per core CPU) = auto
+ # enable memory (used mem including cache) = auto
+ # enable detailed memory = auto
+ # enable memory limits fail count = auto
+ # enable swap memory = auto
+ # enable blkio bandwidth = auto
+ # enable blkio operations = auto
+ # enable blkio throttle bandwidth = auto
+ # enable blkio throttle operations = auto
+ # enable blkio queued operations = auto
+ # enable blkio merged operations = auto
+ # enable cpu pressure = auto
+ # enable io some pressure = auto
+ # enable io full pressure = auto
+ # enable memory some pressure = auto
+ # enable memory full pressure = auto
+ # recheck zero blkio every iterations = 10
+ # recheck zero memory failcnt every iterations = 10
+ # recheck zero detailed memory every iterations = 10
+ # enable systemd services = yes
+ # enable systemd services detailed memory = no
+ # report used memory without cache = yes
+ # path to /sys/fs/cgroup/cpuacct = /sys/fs/cgroup/cpu,cpuacct
+ # path to /sys/fs/cgroup/cpuset = /sys/fs/cgroup/cpuset
+ # path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio
+ # path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory
+ # path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
+ # max cgroups to allow = 1000
+ # max cgroups depth to monitor = 0
+ # enable new cgroups detected at run time = yes
+ # enable by default cgroups matching = !*/init.scope !/system.slice/run-*.scope *.scope /machine.slice/*.service !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.socket !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/* !/lxc.monitor !/lxc.pivot !/lxc.payload !/machine !/qemu !/system !/systemd !/user *
+ # search for cgroups in subpaths matching = !*/init.scope !*-qemu !*.libvirt-qemu !/init.scope !/system !/systemd !/user !/user.slice !/lxc/*/* !/lxc.monitor !/lxc.payload/*/* *
+ # script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh
+ # script to get cgroup network interfaces = /usr/libexec/netdata/plugins.d/cgroup-network
+ # run script to rename cgroups matching = !/ !*.mount !*.socket !*.partition /machine.slice/*.service !*.service !*.slice !*.swap !*.user !init.scope !*.scope/vcpu* !*.scope/emulator *.scope *docker* *lxc* *qemu* *kubepods* *.libvirt-qemu *
+ # cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
+
+[plugin:proc]
+ # netdata server resources = yes
+ # /proc/pagetypeinfo = no
+ # /proc/stat = yes
+ # /proc/uptime = yes
+ # /proc/loadavg = yes
+ # /proc/sys/kernel/random/entropy_avail = yes
+ # /proc/pressure = yes
+ # /proc/interrupts = yes
+ # /proc/softirqs = yes
+ # /proc/vmstat = yes
+ # /proc/meminfo = yes
+ # /sys/kernel/mm/ksm = yes
+ # /sys/block/zram = yes
+ # /sys/devices/system/edac/mc = yes
+ # /sys/devices/system/node = yes
+ # /proc/net/dev = yes
+ # /proc/net/sockstat = yes
+ # /proc/net/sockstat6 = yes
+ # /proc/net/netstat = yes
+ # /proc/net/snmp = yes
+ # /proc/net/snmp6 = yes
+ # /proc/net/sctp/snmp = yes
+ # /proc/net/softnet_stat = yes
+ # /proc/net/ip_vs/stats = yes
+ # /proc/net/stat/conntrack = yes
+ # /proc/net/stat/synproxy = yes
+ # /proc/diskstats = yes
+ # /proc/mdstat = yes
+ # /proc/net/rpc/nfsd = yes
+ # /proc/net/rpc/nfs = yes
+ # /proc/spl/kstat/zfs/arcstats = yes
+ # /sys/fs/btrfs = yes
+ # ipc = yes
+ # /sys/class/power_supply = yes
+
+[plugin:proc:diskspace]
+ # remove charts of unmounted disks = yes
+ # update every = 1
+ # check for new mount points every = 15
+ # exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*
+ # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs
+ # space usage for all disks = auto
+ # inodes usage for all disks = auto
+
+[plugin:tc]
+ # script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh
+
+[plugin:idlejitter]
+ # loop time in ms = 20
+
+[plugin:go.d]
+ # update every = 1
+ # command options =
+
+[plugin:node.d]
+ # update every = 1
+ # command options =
+
+[plugin:apps]
+ # update every = 1
+ # command options =
+
+[plugin:charts.d]
+ # update every = 1
+ # command options =
+
+[plugin:fping]
+ # update every = 1
+ # command options =
+
+[plugin:python.d]
+ # update every = 1
+ # command options =
+
+[plugin:perf]
+ # update every = 1
+ # command options =
+
+[plugin:ioping]
+ # update every = 1
+ # command options =
+
+[plugin:proc:/proc/stat]
+ # cpu utilization = yes
+ # per cpu core utilization = yes
+ # cpu interrupts = yes
+ # context switches = yes
+ # processes started = yes
+ # processes running = yes
+ # keep per core files open = yes
+ # keep cpuidle files open = yes
+ # core_throttle_count = auto
+ # package_throttle_count = no
+ # cpu frequency = yes
+ # cpu idle states = yes
+ # core_throttle_count filename to monitor = /sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count
+ # package_throttle_count filename to monitor = /sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count
+ # scaling_cur_freq filename to monitor = /sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq
+ # time_in_state filename to monitor = /sys/devices/system/cpu/%s/cpufreq/stats/time_in_state
+ # schedstat filename to monitor = /proc/schedstat
+ # cpuidle name filename to monitor = /sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name
+ # cpuidle time filename to monitor = /sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time
+ # filename to monitor = /proc/stat
+
+[plugin:proc:diskspace:/]
+ # space usage = auto
+ # inodes usage = auto
+
+[plugin:proc:diskspace:/dev]
+ # space usage = auto
+ # inodes usage = auto
+
+[plugin:proc:diskspace:/sys/fs/cgroup]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/systemd]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/cpu,cpuacct]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/devices]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/blkio]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/perf_event]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/rdma]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/net_cls,net_prio]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/pids]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/cpuset]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/memory]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/fs/cgroup/freezer]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/dev/shm]
+ # space usage = auto
+ # inodes usage = auto
+
+[plugin:proc:diskspace:/etc/resolv.conf]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/etc/hostname]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/etc/hosts]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/asound]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/acpi]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/kcore]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/keys]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/timer_list]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/proc/sched_debug]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:diskspace:/sys/firmware]
+ # space usage = no
+ # inodes usage = no
+
+[plugin:proc:/proc/uptime]
+ # filename to monitor = /proc/uptime
+
+[plugin:proc:/proc/loadavg]
+ # filename to monitor = /proc/loadavg
+ # enable load average = yes
+ # enable total processes = yes
+
+[plugin:proc:/proc/sys/kernel/random/entropy_avail]
+ # filename to monitor = /proc/sys/kernel/random/entropy_avail
+
+[plugin:proc:/proc/pressure]
+ # base path of pressure metrics = /proc/pressure
+ # enable cpu some pressure = yes
+ # enable memory some pressure = yes
+ # enable memory full pressure = yes
+ # enable io some pressure = yes
+ # enable io full pressure = yes
+
+[plugin:proc:/proc/interrupts]
+ # interrupts per core = auto
+ # filename to monitor = /proc/interrupts
+
+[plugin:proc:/proc/softirqs]
+ # interrupts per core = auto
+ # filename to monitor = /proc/softirqs
+
+[plugin:proc:/proc/vmstat]
+ # swap i/o = auto
+ # disk i/o = yes
+ # memory page faults = yes
+ # system-wide numa metric summary = auto
+ # filename to monitor = /proc/vmstat
+
+[plugin:proc:/sys/devices/system/node]
+ # directory to monitor = /sys/devices/system/node
+ # enable per-node numa metrics = auto
+
+[plugin:proc:/proc/meminfo]
+ # system ram = yes
+ # system swap = auto
+ # hardware corrupted ECC = auto
+ # committed memory = yes
+ # writeback memory = yes
+ # kernel memory = yes
+ # slab memory = yes
+ # hugepages = auto
+ # transparent hugepages = auto
+ # filename to monitor = /proc/meminfo
+
+[plugin:proc:/sys/kernel/mm/ksm]
+ # /sys/kernel/mm/ksm/pages_shared = /sys/kernel/mm/ksm/pages_shared
+ # /sys/kernel/mm/ksm/pages_sharing = /sys/kernel/mm/ksm/pages_sharing
+ # /sys/kernel/mm/ksm/pages_unshared = /sys/kernel/mm/ksm/pages_unshared
+ # /sys/kernel/mm/ksm/pages_volatile = /sys/kernel/mm/ksm/pages_volatile
+
+[plugin:proc:/sys/devices/system/edac/mc]
+ # directory to monitor = /sys/devices/system/edac/mc
+
+[plugin:proc:/proc/net/dev]
+ # filename to monitor = /proc/net/dev
+ # path to get virtual interfaces = /sys/devices/virtual/net/%s
+ # path to get net device speed = /sys/class/net/%s/speed
+ # path to get net device duplex = /sys/class/net/%s/duplex
+ # path to get net device operstate = /sys/class/net/%s/operstate
+ # enable new interfaces detected at runtime = auto
+ # bandwidth for all interfaces = auto
+ # packets for all interfaces = auto
+ # errors for all interfaces = auto
+ # drops for all interfaces = auto
+ # fifo for all interfaces = auto
+ # compressed packets for all interfaces = auto
+ # frames, collisions, carrier counters for all interfaces = auto
+ # disable by default interfaces matching = lo fireqos* *-ifb
+ # refresh interface speed every seconds = 10
+ # refresh interface duplex every seconds = 10
+ # refresh interface operstate every seconds = 10
+
+[plugin:proc:/proc/net/dev:lo]
+ # enabled = no
+ # virtual = yes
+
+[plugin:proc:/proc/net/dev:eth0]
+ # enabled = yes
+ # virtual = yes
+ # bandwidth = auto
+ # packets = auto
+ # errors = auto
+ # drops = auto
+ # fifo = auto
+ # compressed = auto
+ # events = auto
+
+[plugin:proc:/proc/net/sockstat]
+ # ipv4 sockets = auto
+ # ipv4 TCP sockets = auto
+ # ipv4 TCP memory = auto
+ # ipv4 UDP sockets = auto
+ # ipv4 UDP memory = auto
+ # ipv4 UDPLITE sockets = auto
+ # ipv4 RAW sockets = auto
+ # ipv4 FRAG sockets = auto
+ # ipv4 FRAG memory = auto
+ # update constants every = 60
+ # filename to monitor = /proc/net/sockstat
+
+[plugin:proc:/proc/net/sockstat6]
+ # ipv6 TCP sockets = auto
+ # ipv6 UDP sockets = auto
+ # ipv6 UDPLITE sockets = auto
+ # ipv6 RAW sockets = auto
+ # ipv6 FRAG sockets = auto
+ # filename to monitor = /proc/net/sockstat6
+
+[plugin:proc:/proc/net/netstat]
+ # bandwidth = auto
+ # input errors = auto
+ # multicast bandwidth = auto
+ # broadcast bandwidth = auto
+ # multicast packets = auto
+ # broadcast packets = auto
+ # ECN packets = auto
+ # TCP reorders = auto
+ # TCP SYN cookies = auto
+ # TCP out-of-order queue = auto
+ # TCP connection aborts = auto
+ # TCP memory pressures = auto
+ # TCP SYN queue = auto
+ # TCP accept queue = auto
+ # filename to monitor = /proc/net/netstat
+
+[plugin:proc:/proc/net/snmp]
+ # ipv4 packets = auto
+ # ipv4 fragments sent = auto
+ # ipv4 fragments assembly = auto
+ # ipv4 errors = auto
+ # ipv4 TCP connections = auto
+ # ipv4 TCP packets = auto
+ # ipv4 TCP errors = auto
+ # ipv4 TCP opens = auto
+ # ipv4 TCP handshake issues = auto
+ # ipv4 UDP packets = auto
+ # ipv4 UDP errors = auto
+ # ipv4 ICMP packets = auto
+ # ipv4 ICMP messages = auto
+ # ipv4 UDPLite packets = auto
+ # filename to monitor = /proc/net/snmp
+
+[plugin:proc:/proc/net/snmp6]
+ # ipv6 packets = auto
+ # ipv6 fragments sent = auto
+ # ipv6 fragments assembly = auto
+ # ipv6 errors = auto
+ # ipv6 UDP packets = auto
+ # ipv6 UDP errors = auto
+ # ipv6 UDPlite packets = auto
+ # ipv6 UDPlite errors = auto
+ # bandwidth = auto
+ # multicast bandwidth = auto
+ # broadcast bandwidth = auto
+ # multicast packets = auto
+ # icmp = auto
+ # icmp redirects = auto
+ # icmp errors = auto
+ # icmp echos = auto
+ # icmp group membership = auto
+ # icmp router = auto
+ # icmp neighbor = auto
+ # icmp mldv2 = auto
+ # icmp types = auto
+ # ect = auto
+ # filename to monitor = /proc/net/snmp6
+
+[plugin:proc:/proc/net/sctp/snmp]
+ # established associations = auto
+ # association transitions = auto
+ # fragmentation = auto
+ # packets = auto
+ # packet errors = auto
+ # chunk types = auto
+ # filename to monitor = /proc/net/sctp/snmp
+
+[plugin:proc:/proc/net/softnet_stat]
+ # softnet_stat per core = yes
+ # filename to monitor = /proc/net/softnet_stat
+
+[plugin:proc:/proc/net/ip_vs_stats]
+ # IPVS bandwidth = yes
+ # IPVS connections = yes
+ # IPVS packets = yes
+ # filename to monitor = /proc/net/ip_vs_stats
+
+[plugin:proc:/proc/net/stat/nf_conntrack]
+ # filename to monitor = /proc/net/stat/nf_conntrack
+ # netfilter new connections = yes
+ # netfilter connection changes = yes
+ # netfilter connection expectations = yes
+ # netfilter connection searches = yes
+ # netfilter errors = yes
+ # netfilter connections = yes
+
+[plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max]
+ # filename to monitor = /proc/sys/net/netfilter/nf_conntrack_max
+ # read every seconds = 10
+
+[plugin:proc:/proc/net/stat/synproxy]
+ # SYNPROXY entries = auto
+ # SYNPROXY cookies = auto
+ # SYNPROXY SYN received = auto
+ # SYNPROXY connections reopened = auto
+ # filename to monitor = /proc/net/stat/synproxy
+
+[plugin:proc:/proc/diskstats]
+ # enable new disks detected at runtime = yes
+ # performance metrics for physical disks = auto
+ # performance metrics for virtual disks = auto
+ # performance metrics for partitions = no
+ # bandwidth for all disks = auto
+ # operations for all disks = auto
+ # merged operations for all disks = auto
+ # i/o time for all disks = auto
+ # queued operations for all disks = auto
+ # utilization percentage for all disks = auto
+ # backlog for all disks = auto
+ # bcache for all disks = auto
+ # bcache priority stats update every = 0
+ # remove charts of removed disks = yes
+ # path to get block device = /sys/block/%s
+ # path to get block device bcache = /sys/block/%s/bcache
+ # path to get virtual block device = /sys/devices/virtual/block/%s
+ # path to get block device infos = /sys/dev/block/%lu:%lu/%s
+ # path to device mapper = /dev/mapper
+ # path to /dev/disk/by-label = /dev/disk/by-label
+ # path to /dev/disk/by-id = /dev/disk/by-id
+ # path to /dev/vx/dsk = /dev/vx/dsk
+ # name disks by id = no
+ # preferred disk ids = *
+ # exclude disks = loop* ram*
+ # filename to monitor = /proc/diskstats
+ # performance metrics for disks with major 8 = yes
+
+[plugin:proc:/proc/diskstats:sda]
+ # enable = yes
+ # enable performance metrics = yes
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
+ # backlog = auto
+
+[plugin:proc:/proc/diskstats:sda1]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sda2]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sda3]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sda4]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sda5]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sdb]
+ # enable = yes
+ # enable performance metrics = yes
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
+ # backlog = auto
+
+[plugin:proc:/proc/diskstats:sdb1]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sdb2]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sdb3]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sdb4]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/diskstats:sdb5]
+ # enable = yes
+ # enable performance metrics = no
+ # bandwidth = no
+ # operations = no
+ # merged operations = no
+ # i/o time = no
+ # queued operations = no
+ # utilization percentage = no
+ # backlog = no
+
+[plugin:proc:/proc/mdstat]
+ # faulty devices = yes
+ # nonredundant arrays availability = yes
+ # mismatch count = auto
+ # disk stats = yes
+ # operation status = yes
+ # make charts obsolete = yes
+ # filename to monitor = /proc/mdstat
+ # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt
+
+[plugin:proc:/proc/net/rpc/nfsd]
+ # filename to monitor = /proc/net/rpc/nfsd
+
+[plugin:proc:/proc/net/rpc/nfs]
+ # filename to monitor = /proc/net/rpc/nfs
+
+[plugin:proc:/proc/spl/kstat/zfs/arcstats]
+ # filename to monitor = /proc/spl/kstat/zfs/arcstats
+
+[plugin:proc:/sys/fs/btrfs]
+ # path to monitor = /sys/fs/btrfs
+ # check for btrfs changes every = 60
+ # physical disks allocation = auto
+ # data allocation = auto
+ # metadata allocation = auto
+ # system allocation = auto
+
+[plugin:proc:ipc]
+ # message queues = yes
+ # semaphore totals = yes
+ # shared memory totals = yes
+ # msg filename to monitor = /proc/sysvipc/msg
+ # shm filename to monitor = /proc/sysvipc/shm
+ # max dimensions in memory allowed = 50
+
+[plugin:proc:/sys/class/power_supply]
+ # battery capacity = yes
+ # battery charge = no
+ # battery energy = no
+ # power supply voltage = no
+ # keep files open = auto
+ # directory to monitor = /sys/class/power_supply
+
+
+# per chart configuration
+
+[system.idlejitter]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.idlejitter
+ # chart type = area
+ # type = system
+ # family = idlejitter
+ # units = microseconds lost/s
+ # context = system.idlejitter
+ # priority = 800
+ # name = system.idlejitter
+ # title = CPU Idle Jitter
+ # dim min name = min
+ # dim min algorithm = absolute
+ # dim min multiplier = 1
+ # dim min divisor = 1
+ # dim max name = max
+ # dim max algorithm = absolute
+ # dim max multiplier = 1
+ # dim max divisor = 1
+ # dim average name = average
+ # dim average algorithm = absolute
+ # dim average multiplier = 1
+ # dim average divisor = 1
+
+[netdata.statsd_metrics]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_metrics
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = metrics
+ # context = netdata.statsd_metrics
+ # priority = 132010
+ # name = netdata.statsd_metrics
+ # title = Metrics in the netdata statsd database
+ # dim gauges name = gauges
+ # dim gauges algorithm = absolute
+ # dim gauges multiplier = 1
+ # dim gauges divisor = 1
+ # dim counters name = counters
+ # dim counters algorithm = absolute
+ # dim counters multiplier = 1
+ # dim counters divisor = 1
+ # dim timers name = timers
+ # dim timers algorithm = absolute
+ # dim timers multiplier = 1
+ # dim timers divisor = 1
+ # dim meters name = meters
+ # dim meters algorithm = absolute
+ # dim meters multiplier = 1
+ # dim meters divisor = 1
+ # dim histograms name = histograms
+ # dim histograms algorithm = absolute
+ # dim histograms multiplier = 1
+ # dim histograms divisor = 1
+ # dim sets name = sets
+ # dim sets algorithm = absolute
+ # dim sets multiplier = 1
+ # dim sets divisor = 1
+
+[netdata.statsd_useful_metrics]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_useful_metrics
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = metrics
+ # context = netdata.statsd_useful_metrics
+ # priority = 132010
+ # name = netdata.statsd_useful_metrics
+ # title = Useful metrics in the netdata statsd database
+ # dim gauges name = gauges
+ # dim gauges algorithm = absolute
+ # dim gauges multiplier = 1
+ # dim gauges divisor = 1
+ # dim counters name = counters
+ # dim counters algorithm = absolute
+ # dim counters multiplier = 1
+ # dim counters divisor = 1
+ # dim timers name = timers
+ # dim timers algorithm = absolute
+ # dim timers multiplier = 1
+ # dim timers divisor = 1
+ # dim meters name = meters
+ # dim meters algorithm = absolute
+ # dim meters multiplier = 1
+ # dim meters divisor = 1
+ # dim histograms name = histograms
+ # dim histograms algorithm = absolute
+ # dim histograms multiplier = 1
+ # dim histograms divisor = 1
+ # dim sets name = sets
+ # dim sets algorithm = absolute
+ # dim sets multiplier = 1
+ # dim sets divisor = 1
+
+[netdata.statsd_events]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_events
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = events/s
+ # context = netdata.statsd_events
+ # priority = 132011
+ # name = netdata.statsd_events
+ # title = Events processed by the netdata statsd server
+ # dim gauges name = gauges
+ # dim gauges algorithm = incremental
+ # dim gauges multiplier = 1
+ # dim gauges divisor = 1
+ # dim counters name = counters
+ # dim counters algorithm = incremental
+ # dim counters multiplier = 1
+ # dim counters divisor = 1
+ # dim timers name = timers
+ # dim timers algorithm = incremental
+ # dim timers multiplier = 1
+ # dim timers divisor = 1
+ # dim meters name = meters
+ # dim meters algorithm = incremental
+ # dim meters multiplier = 1
+ # dim meters divisor = 1
+ # dim histograms name = histograms
+ # dim histograms algorithm = incremental
+ # dim histograms multiplier = 1
+ # dim histograms divisor = 1
+ # dim sets name = sets
+ # dim sets algorithm = incremental
+ # dim sets multiplier = 1
+ # dim sets divisor = 1
+ # dim unknown name = unknown
+ # dim unknown algorithm = incremental
+ # dim unknown multiplier = 1
+ # dim unknown divisor = 1
+ # dim errors name = errors
+ # dim errors algorithm = incremental
+ # dim errors multiplier = 1
+ # dim errors divisor = 1
+
+[netdata.statsd_reads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_reads
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = reads/s
+ # context = netdata.statsd_reads
+ # priority = 132012
+ # name = netdata.statsd_reads
+ # title = Read operations made by the netdata statsd server
+ # dim tcp name = tcp
+ # dim tcp algorithm = incremental
+ # dim tcp multiplier = 1
+ # dim tcp divisor = 1
+ # dim udp name = udp
+ # dim udp algorithm = incremental
+ # dim udp multiplier = 1
+ # dim udp divisor = 1
+
+[netdata.statsd_bytes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_bytes
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = kilobits/s
+ # context = netdata.statsd_bytes
+ # priority = 132013
+ # name = netdata.statsd_bytes
+ # title = Bytes read by the netdata statsd server
+ # dim tcp name = tcp
+ # dim tcp algorithm = incremental
+ # dim tcp multiplier = 8
+ # dim tcp divisor = 1000
+ # dim udp name = udp
+ # dim udp algorithm = incremental
+ # dim udp multiplier = 8
+ # dim udp divisor = 1000
+
+[netdata.statsd_packets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.statsd_packets
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = packets/s
+ # context = netdata.statsd_packets
+ # priority = 132014
+ # name = netdata.statsd_packets
+ # title = Network packets processed by the netdata statsd server
+ # dim tcp name = tcp
+ # dim tcp algorithm = incremental
+ # dim tcp multiplier = 1
+ # dim tcp divisor = 1
+ # dim udp name = udp
+ # dim udp algorithm = incremental
+ # dim udp multiplier = 1
+ # dim udp divisor = 1
+
+[netdata.tcp_connects]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.tcp_connects
+ # chart type = line
+ # type = netdata
+ # family = statsd
+ # units = events
+ # context = netdata.tcp_connects
+ # priority = 132015
+ # name = netdata.tcp_connects
+ # title = statsd server TCP connects and disconnects
+ # dim connects name = connects
+ # dim connects algorithm = incremental
+ # dim connects multiplier = 1
+ # dim connects divisor = 1
+ # dim disconnects name = disconnects
+ # dim disconnects algorithm = incremental
+ # dim disconnects multiplier = -1
+ # dim disconnects divisor = 1
+
+[netdata.tcp_connected]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.tcp_connected
+ # chart type = line
+ # type = netdata
+ # family = statsd
+ # units = sockets
+ # context = netdata.tcp_connected
+ # priority = 132016
+ # name = netdata.tcp_connected
+ # title = statsd server TCP connected sockets
+ # dim connected name = connected
+ # dim connected algorithm = absolute
+ # dim connected multiplier = 1
+ # dim connected divisor = 1
+
+[netdata.private_charts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.private_charts
+ # chart type = area
+ # type = netdata
+ # family = statsd
+ # units = charts
+ # context = netdata.private_charts
+ # priority = 132020
+ # name = netdata.private_charts
+ # title = Private metric charts created by the netdata statsd server
+ # dim charts name = charts
+ # dim charts algorithm = absolute
+ # dim charts multiplier = 1
+ # dim charts divisor = 1
+
+[netdata.plugin_statsd_charting_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_statsd_charting_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = milliseconds/s
+ # context = netdata.statsd_cpu
+ # priority = 132001
+ # name = netdata.plugin_statsd_charting_cpu
+ # title = NetData statsd charting thread CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.plugin_statsd_collector1_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_statsd_collector1_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = statsd
+ # units = milliseconds/s
+ # context = netdata.statsd_cpu
+ # priority = 132002
+ # name = netdata.plugin_statsd_collector1_cpu
+ # title = NetData statsd collector thread No 1 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[system.cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.cpu
+ # chart type = stacked
+ # type = system
+ # family = cpu
+ # units = percentage
+ # context = system.cpu
+ # priority = 100
+ # name = system.cpu
+ # title = Total CPU utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu0]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu0
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1001
+ # name = cpu.cpu0
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu1]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu1
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1002
+ # name = cpu.cpu1
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[disk_space._]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_space._
+ # chart type = stacked
+ # type = disk_space
+ # family = /
+ # units = GiB
+ # context = disk.space
+ # priority = 2023
+ # name = disk_space._
+ # title = Disk Space Usage for / [overlay]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 4096
+ # dim avail divisor = 1073741824
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 4096
+ # dim used divisor = 1073741824
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 4096
+ # dim reserved_for_root divisor = 1073741824
+
+[disk_inodes._]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_inodes._
+ # chart type = stacked
+ # type = disk_inodes
+ # family = /
+ # units = inodes
+ # context = disk.inodes
+ # priority = 2024
+ # name = disk_inodes._
+ # title = Disk Files (inodes) Usage for / [overlay]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 1
+ # dim avail divisor = 1
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 1
+ # dim reserved_for_root divisor = 1
+
+[disk_space._dev]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_space._dev
+ # chart type = stacked
+ # type = disk_space
+ # family = /dev
+ # units = GiB
+ # context = disk.space
+ # priority = 2023
+ # name = disk_space._dev
+ # title = Disk Space Usage for /dev [tmpfs]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 4096
+ # dim avail divisor = 1073741824
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 4096
+ # dim used divisor = 1073741824
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 4096
+ # dim reserved_for_root divisor = 1073741824
+
+[disk_inodes._dev]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_inodes._dev
+ # chart type = stacked
+ # type = disk_inodes
+ # family = /dev
+ # units = inodes
+ # context = disk.inodes
+ # priority = 2024
+ # name = disk_inodes._dev
+ # title = Disk Files (inodes) Usage for /dev [tmpfs]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 1
+ # dim avail divisor = 1
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 1
+ # dim reserved_for_root divisor = 1
+
+[disk_space._dev_shm]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_space._dev_shm
+ # chart type = stacked
+ # type = disk_space
+ # family = /dev/shm
+ # units = GiB
+ # context = disk.space
+ # priority = 2023
+ # name = disk_space._dev_shm
+ # title = Disk Space Usage for /dev/shm [shm]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 4096
+ # dim avail divisor = 1073741824
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 4096
+ # dim used divisor = 1073741824
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 4096
+ # dim reserved_for_root divisor = 1073741824
+
+[disk_inodes._dev_shm]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_inodes._dev_shm
+ # chart type = stacked
+ # type = disk_inodes
+ # family = /dev/shm
+ # units = inodes
+ # context = disk.inodes
+ # priority = 2024
+ # name = disk_inodes._dev_shm
+ # title = Disk Files (inodes) Usage for /dev/shm [shm]
+ # dim avail name = avail
+ # dim avail algorithm = absolute
+ # dim avail multiplier = 1
+ # dim avail divisor = 1
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1
+ # dim reserved_for_root name = reserved for root
+ # dim reserved_for_root algorithm = absolute
+ # dim reserved_for_root multiplier = 1
+ # dim reserved_for_root divisor = 1
+
+[cpu.cpu2]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu2
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1003
+ # name = cpu.cpu2
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu3]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu3
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1004
+ # name = cpu.cpu3
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu4]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu4
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1005
+ # name = cpu.cpu4
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu5]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu5
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1006
+ # name = cpu.cpu5
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu6]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu6
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1007
+ # name = cpu.cpu6
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[cpu.cpu7]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu7
+ # chart type = stacked
+ # type = cpu
+ # family = utilization
+ # units = percentage
+ # context = cpu.cpu
+ # priority = 1008
+ # name = cpu.cpu7
+ # title = Core utilization
+ # dim guest_nice name = guest_nice
+ # dim guest_nice algorithm = percentage-of-incremental-row
+ # dim guest_nice multiplier = 1
+ # dim guest_nice divisor = 1
+ # dim guest name = guest
+ # dim guest algorithm = percentage-of-incremental-row
+ # dim guest multiplier = 1
+ # dim guest divisor = 1
+ # dim steal name = steal
+ # dim steal algorithm = percentage-of-incremental-row
+ # dim steal multiplier = 1
+ # dim steal divisor = 1
+ # dim softirq name = softirq
+ # dim softirq algorithm = percentage-of-incremental-row
+ # dim softirq multiplier = 1
+ # dim softirq divisor = 1
+ # dim irq name = irq
+ # dim irq algorithm = percentage-of-incremental-row
+ # dim irq multiplier = 1
+ # dim irq divisor = 1
+ # dim user name = user
+ # dim user algorithm = percentage-of-incremental-row
+ # dim user multiplier = 1
+ # dim user divisor = 1
+ # dim system name = system
+ # dim system algorithm = percentage-of-incremental-row
+ # dim system multiplier = 1
+ # dim system divisor = 1
+ # dim nice name = nice
+ # dim nice algorithm = percentage-of-incremental-row
+ # dim nice multiplier = 1
+ # dim nice divisor = 1
+ # dim iowait name = iowait
+ # dim iowait algorithm = percentage-of-incremental-row
+ # dim iowait multiplier = 1
+ # dim iowait divisor = 1
+ # dim idle name = idle
+ # dim idle algorithm = percentage-of-incremental-row
+ # dim idle multiplier = 1
+ # dim idle divisor = 1
+
+[system.intr]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.intr
+ # chart type = line
+ # type = system
+ # family = interrupts
+ # units = interrupts/s
+ # context = system.intr
+ # priority = 900
+ # name = system.intr
+ # title = CPU Interrupts
+ # dim interrupts name = interrupts
+ # dim interrupts algorithm = incremental
+ # dim interrupts multiplier = 1
+ # dim interrupts divisor = 1
+
+[system.ctxt]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ctxt
+ # chart type = line
+ # type = system
+ # family = processes
+ # units = context switches/s
+ # context = system.ctxt
+ # priority = 800
+ # name = system.ctxt
+ # title = CPU Context Switches
+ # dim switches name = switches
+ # dim switches algorithm = incremental
+ # dim switches multiplier = 1
+ # dim switches divisor = 1
+
+[system.forks]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.forks
+ # chart type = line
+ # type = system
+ # family = processes
+ # units = processes/s
+ # context = system.forks
+ # priority = 700
+ # name = system.forks
+ # title = Started Processes
+ # dim started name = started
+ # dim started algorithm = incremental
+ # dim started multiplier = 1
+ # dim started divisor = 1
+
+[system.processes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.processes
+ # chart type = line
+ # type = system
+ # family = processes
+ # units = processes
+ # context = system.processes
+ # priority = 600
+ # name = system.processes
+ # title = System Processes
+ # dim running name = running
+ # dim running algorithm = absolute
+ # dim running multiplier = 1
+ # dim running divisor = 1
+ # dim blocked name = blocked
+ # dim blocked algorithm = absolute
+ # dim blocked multiplier = -1
+ # dim blocked divisor = 1
+
+[cpu.core_throttling]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.core_throttling
+ # chart type = line
+ # type = cpu
+ # family = throttling
+ # units = events/s
+ # context = cpu.core_throttling
+ # priority = 5001
+ # name = cpu.core_throttling
+ # title = Core Thermal Throttling Events
+ # dim cpu0 name = cpu0
+ # dim cpu0 algorithm = incremental
+ # dim cpu0 multiplier = 1
+ # dim cpu0 divisor = 1
+ # dim cpu1 name = cpu1
+ # dim cpu1 algorithm = incremental
+ # dim cpu1 multiplier = 1
+ # dim cpu1 divisor = 1
+ # dim cpu2 name = cpu2
+ # dim cpu2 algorithm = incremental
+ # dim cpu2 multiplier = 1
+ # dim cpu2 divisor = 1
+ # dim cpu3 name = cpu3
+ # dim cpu3 algorithm = incremental
+ # dim cpu3 multiplier = 1
+ # dim cpu3 divisor = 1
+ # dim cpu4 name = cpu4
+ # dim cpu4 algorithm = incremental
+ # dim cpu4 multiplier = 1
+ # dim cpu4 divisor = 1
+ # dim cpu5 name = cpu5
+ # dim cpu5 algorithm = incremental
+ # dim cpu5 multiplier = 1
+ # dim cpu5 divisor = 1
+ # dim cpu6 name = cpu6
+ # dim cpu6 algorithm = incremental
+ # dim cpu6 multiplier = 1
+ # dim cpu6 divisor = 1
+ # dim cpu7 name = cpu7
+ # dim cpu7 algorithm = incremental
+ # dim cpu7 multiplier = 1
+ # dim cpu7 divisor = 1
+
+[cpu.cpufreq]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpufreq
+ # chart type = line
+ # type = cpu
+ # family = cpufreq
+ # units = MHz
+ # context = cpufreq.cpufreq
+ # priority = 5003
+ # name = cpu.cpufreq
+ # title = Current CPU Frequency
+ # dim cpu0 name = cpu0
+ # dim cpu0 algorithm = absolute
+ # dim cpu0 multiplier = 1
+ # dim cpu0 divisor = 1000
+ # dim cpu1 name = cpu1
+ # dim cpu1 algorithm = absolute
+ # dim cpu1 multiplier = 1
+ # dim cpu1 divisor = 1000
+ # dim cpu2 name = cpu2
+ # dim cpu2 algorithm = absolute
+ # dim cpu2 multiplier = 1
+ # dim cpu2 divisor = 1000
+ # dim cpu3 name = cpu3
+ # dim cpu3 algorithm = absolute
+ # dim cpu3 multiplier = 1
+ # dim cpu3 divisor = 1000
+ # dim cpu4 name = cpu4
+ # dim cpu4 algorithm = absolute
+ # dim cpu4 multiplier = 1
+ # dim cpu4 divisor = 1000
+ # dim cpu5 name = cpu5
+ # dim cpu5 algorithm = absolute
+ # dim cpu5 multiplier = 1
+ # dim cpu5 divisor = 1000
+ # dim cpu6 name = cpu6
+ # dim cpu6 algorithm = absolute
+ # dim cpu6 multiplier = 1
+ # dim cpu6 divisor = 1000
+ # dim cpu7 name = cpu7
+ # dim cpu7 algorithm = absolute
+ # dim cpu7 multiplier = 1
+ # dim cpu7 divisor = 1000
+
+[netdata.plugin_cgroups_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_cgroups_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = cgroups
+ # units = milliseconds/s
+ # context = netdata.plugin_cgroups_cpu
+ # priority = 132000
+ # name = netdata.plugin_cgroups_cpu
+ # title = NetData CGroups Plugin CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.plugin_diskspace]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_diskspace
+ # chart type = stacked
+ # type = netdata
+ # family = diskspace
+ # units = milliseconds/s
+ # context = netdata.plugin_diskspace
+ # priority = 132020
+ # name = netdata.plugin_diskspace
+ # title = NetData Disk Space Plugin CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.plugin_diskspace_dt]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_diskspace_dt
+ # chart type = area
+ # type = netdata
+ # family = diskspace
+ # units = milliseconds/run
+ # context = netdata.plugin_diskspace_dt
+ # priority = 132021
+ # name = netdata.plugin_diskspace_dt
+ # title = NetData Disk Space Plugin Duration
+ # dim duration name = duration
+ # dim duration algorithm = absolute
+ # dim duration multiplier = 1
+ # dim duration divisor = 1000
+
+[cpu.cpu0_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu0_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6000
+ # name = cpu.cpu0_cpuidle
+ # title = C-state residency time
+ # dim cpu0_active_time name = C0 (active)
+ # dim cpu0_active_time algorithm = percentage-of-incremental-row
+ # dim cpu0_active_time multiplier = 1
+ # dim cpu0_active_time divisor = 1
+ # dim cpu0_cpuidle_state0_time name = POLL
+ # dim cpu0_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state0_time multiplier = 1
+ # dim cpu0_cpuidle_state0_time divisor = 1
+ # dim cpu0_cpuidle_state1_time name = C1
+ # dim cpu0_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state1_time multiplier = 1
+ # dim cpu0_cpuidle_state1_time divisor = 1
+ # dim cpu0_cpuidle_state2_time name = C1E
+ # dim cpu0_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state2_time multiplier = 1
+ # dim cpu0_cpuidle_state2_time divisor = 1
+ # dim cpu0_cpuidle_state3_time name = C3
+ # dim cpu0_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state3_time multiplier = 1
+ # dim cpu0_cpuidle_state3_time divisor = 1
+ # dim cpu0_cpuidle_state4_time name = C6
+ # dim cpu0_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state4_time multiplier = 1
+ # dim cpu0_cpuidle_state4_time divisor = 1
+ # dim cpu0_cpuidle_state5_time name = C7s
+ # dim cpu0_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu0_cpuidle_state5_time multiplier = 1
+ # dim cpu0_cpuidle_state5_time divisor = 1
+
+[cpu.cpu1_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu1_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6001
+ # name = cpu.cpu1_cpuidle
+ # title = C-state residency time
+ # dim cpu1_active_time name = C0 (active)
+ # dim cpu1_active_time algorithm = percentage-of-incremental-row
+ # dim cpu1_active_time multiplier = 1
+ # dim cpu1_active_time divisor = 1
+ # dim cpu1_cpuidle_state0_time name = POLL
+ # dim cpu1_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state0_time multiplier = 1
+ # dim cpu1_cpuidle_state0_time divisor = 1
+ # dim cpu1_cpuidle_state1_time name = C1
+ # dim cpu1_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state1_time multiplier = 1
+ # dim cpu1_cpuidle_state1_time divisor = 1
+ # dim cpu1_cpuidle_state2_time name = C1E
+ # dim cpu1_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state2_time multiplier = 1
+ # dim cpu1_cpuidle_state2_time divisor = 1
+ # dim cpu1_cpuidle_state3_time name = C3
+ # dim cpu1_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state3_time multiplier = 1
+ # dim cpu1_cpuidle_state3_time divisor = 1
+ # dim cpu1_cpuidle_state4_time name = C6
+ # dim cpu1_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state4_time multiplier = 1
+ # dim cpu1_cpuidle_state4_time divisor = 1
+ # dim cpu1_cpuidle_state5_time name = C7s
+ # dim cpu1_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu1_cpuidle_state5_time multiplier = 1
+ # dim cpu1_cpuidle_state5_time divisor = 1
+
+[cpu.cpu2_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu2_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6002
+ # name = cpu.cpu2_cpuidle
+ # title = C-state residency time
+ # dim cpu2_active_time name = C0 (active)
+ # dim cpu2_active_time algorithm = percentage-of-incremental-row
+ # dim cpu2_active_time multiplier = 1
+ # dim cpu2_active_time divisor = 1
+ # dim cpu2_cpuidle_state0_time name = POLL
+ # dim cpu2_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state0_time multiplier = 1
+ # dim cpu2_cpuidle_state0_time divisor = 1
+ # dim cpu2_cpuidle_state1_time name = C1
+ # dim cpu2_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state1_time multiplier = 1
+ # dim cpu2_cpuidle_state1_time divisor = 1
+ # dim cpu2_cpuidle_state2_time name = C1E
+ # dim cpu2_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state2_time multiplier = 1
+ # dim cpu2_cpuidle_state2_time divisor = 1
+ # dim cpu2_cpuidle_state3_time name = C3
+ # dim cpu2_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state3_time multiplier = 1
+ # dim cpu2_cpuidle_state3_time divisor = 1
+ # dim cpu2_cpuidle_state4_time name = C6
+ # dim cpu2_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state4_time multiplier = 1
+ # dim cpu2_cpuidle_state4_time divisor = 1
+ # dim cpu2_cpuidle_state5_time name = C7s
+ # dim cpu2_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu2_cpuidle_state5_time multiplier = 1
+ # dim cpu2_cpuidle_state5_time divisor = 1
+
+[cpu.cpu3_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu3_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6003
+ # name = cpu.cpu3_cpuidle
+ # title = C-state residency time
+ # dim cpu3_active_time name = C0 (active)
+ # dim cpu3_active_time algorithm = percentage-of-incremental-row
+ # dim cpu3_active_time multiplier = 1
+ # dim cpu3_active_time divisor = 1
+ # dim cpu3_cpuidle_state0_time name = POLL
+ # dim cpu3_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state0_time multiplier = 1
+ # dim cpu3_cpuidle_state0_time divisor = 1
+ # dim cpu3_cpuidle_state1_time name = C1
+ # dim cpu3_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state1_time multiplier = 1
+ # dim cpu3_cpuidle_state1_time divisor = 1
+ # dim cpu3_cpuidle_state2_time name = C1E
+ # dim cpu3_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state2_time multiplier = 1
+ # dim cpu3_cpuidle_state2_time divisor = 1
+ # dim cpu3_cpuidle_state3_time name = C3
+ # dim cpu3_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state3_time multiplier = 1
+ # dim cpu3_cpuidle_state3_time divisor = 1
+ # dim cpu3_cpuidle_state4_time name = C6
+ # dim cpu3_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state4_time multiplier = 1
+ # dim cpu3_cpuidle_state4_time divisor = 1
+ # dim cpu3_cpuidle_state5_time name = C7s
+ # dim cpu3_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu3_cpuidle_state5_time multiplier = 1
+ # dim cpu3_cpuidle_state5_time divisor = 1
+
+[cpu.cpu4_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu4_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6004
+ # name = cpu.cpu4_cpuidle
+ # title = C-state residency time
+ # dim cpu4_active_time name = C0 (active)
+ # dim cpu4_active_time algorithm = percentage-of-incremental-row
+ # dim cpu4_active_time multiplier = 1
+ # dim cpu4_active_time divisor = 1
+ # dim cpu4_cpuidle_state0_time name = POLL
+ # dim cpu4_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state0_time multiplier = 1
+ # dim cpu4_cpuidle_state0_time divisor = 1
+ # dim cpu4_cpuidle_state1_time name = C1
+ # dim cpu4_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state1_time multiplier = 1
+ # dim cpu4_cpuidle_state1_time divisor = 1
+ # dim cpu4_cpuidle_state2_time name = C1E
+ # dim cpu4_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state2_time multiplier = 1
+ # dim cpu4_cpuidle_state2_time divisor = 1
+ # dim cpu4_cpuidle_state3_time name = C3
+ # dim cpu4_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state3_time multiplier = 1
+ # dim cpu4_cpuidle_state3_time divisor = 1
+ # dim cpu4_cpuidle_state4_time name = C6
+ # dim cpu4_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state4_time multiplier = 1
+ # dim cpu4_cpuidle_state4_time divisor = 1
+ # dim cpu4_cpuidle_state5_time name = C7s
+ # dim cpu4_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu4_cpuidle_state5_time multiplier = 1
+ # dim cpu4_cpuidle_state5_time divisor = 1
+
+[cpu.cpu5_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu5_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6005
+ # name = cpu.cpu5_cpuidle
+ # title = C-state residency time
+ # dim cpu5_active_time name = C0 (active)
+ # dim cpu5_active_time algorithm = percentage-of-incremental-row
+ # dim cpu5_active_time multiplier = 1
+ # dim cpu5_active_time divisor = 1
+ # dim cpu5_cpuidle_state0_time name = POLL
+ # dim cpu5_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state0_time multiplier = 1
+ # dim cpu5_cpuidle_state0_time divisor = 1
+ # dim cpu5_cpuidle_state1_time name = C1
+ # dim cpu5_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state1_time multiplier = 1
+ # dim cpu5_cpuidle_state1_time divisor = 1
+ # dim cpu5_cpuidle_state2_time name = C1E
+ # dim cpu5_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state2_time multiplier = 1
+ # dim cpu5_cpuidle_state2_time divisor = 1
+ # dim cpu5_cpuidle_state3_time name = C3
+ # dim cpu5_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state3_time multiplier = 1
+ # dim cpu5_cpuidle_state3_time divisor = 1
+ # dim cpu5_cpuidle_state4_time name = C6
+ # dim cpu5_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state4_time multiplier = 1
+ # dim cpu5_cpuidle_state4_time divisor = 1
+ # dim cpu5_cpuidle_state5_time name = C7s
+ # dim cpu5_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu5_cpuidle_state5_time multiplier = 1
+ # dim cpu5_cpuidle_state5_time divisor = 1
+
+[cpu.cpu6_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu6_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6006
+ # name = cpu.cpu6_cpuidle
+ # title = C-state residency time
+ # dim cpu6_active_time name = C0 (active)
+ # dim cpu6_active_time algorithm = percentage-of-incremental-row
+ # dim cpu6_active_time multiplier = 1
+ # dim cpu6_active_time divisor = 1
+ # dim cpu6_cpuidle_state0_time name = POLL
+ # dim cpu6_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state0_time multiplier = 1
+ # dim cpu6_cpuidle_state0_time divisor = 1
+ # dim cpu6_cpuidle_state1_time name = C1
+ # dim cpu6_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state1_time multiplier = 1
+ # dim cpu6_cpuidle_state1_time divisor = 1
+ # dim cpu6_cpuidle_state2_time name = C1E
+ # dim cpu6_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state2_time multiplier = 1
+ # dim cpu6_cpuidle_state2_time divisor = 1
+ # dim cpu6_cpuidle_state3_time name = C3
+ # dim cpu6_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state3_time multiplier = 1
+ # dim cpu6_cpuidle_state3_time divisor = 1
+ # dim cpu6_cpuidle_state4_time name = C6
+ # dim cpu6_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state4_time multiplier = 1
+ # dim cpu6_cpuidle_state4_time divisor = 1
+ # dim cpu6_cpuidle_state5_time name = C7s
+ # dim cpu6_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu6_cpuidle_state5_time multiplier = 1
+ # dim cpu6_cpuidle_state5_time divisor = 1
+
+[cpu.cpu7_cpuidle]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu7_cpuidle
+ # chart type = stacked
+ # type = cpu
+ # family = cpuidle
+ # units = percentage
+ # context = cpuidle.cpuidle
+ # priority = 6007
+ # name = cpu.cpu7_cpuidle
+ # title = C-state residency time
+ # dim cpu7_active_time name = C0 (active)
+ # dim cpu7_active_time algorithm = percentage-of-incremental-row
+ # dim cpu7_active_time multiplier = 1
+ # dim cpu7_active_time divisor = 1
+ # dim cpu7_cpuidle_state0_time name = POLL
+ # dim cpu7_cpuidle_state0_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state0_time multiplier = 1
+ # dim cpu7_cpuidle_state0_time divisor = 1
+ # dim cpu7_cpuidle_state1_time name = C1
+ # dim cpu7_cpuidle_state1_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state1_time multiplier = 1
+ # dim cpu7_cpuidle_state1_time divisor = 1
+ # dim cpu7_cpuidle_state2_time name = C1E
+ # dim cpu7_cpuidle_state2_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state2_time multiplier = 1
+ # dim cpu7_cpuidle_state2_time divisor = 1
+ # dim cpu7_cpuidle_state3_time name = C3
+ # dim cpu7_cpuidle_state3_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state3_time multiplier = 1
+ # dim cpu7_cpuidle_state3_time divisor = 1
+ # dim cpu7_cpuidle_state4_time name = C6
+ # dim cpu7_cpuidle_state4_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state4_time multiplier = 1
+ # dim cpu7_cpuidle_state4_time divisor = 1
+ # dim cpu7_cpuidle_state5_time name = C7s
+ # dim cpu7_cpuidle_state5_time algorithm = percentage-of-incremental-row
+ # dim cpu7_cpuidle_state5_time multiplier = 1
+ # dim cpu7_cpuidle_state5_time divisor = 1
+
+[system.uptime]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.uptime
+ # chart type = line
+ # type = system
+ # family = uptime
+ # units = seconds
+ # context = system.uptime
+ # priority = 1000
+ # name = system.uptime
+ # title = System Uptime
+ # dim uptime name = uptime
+ # dim uptime algorithm = absolute
+ # dim uptime multiplier = 1
+ # dim uptime divisor = 1000
+
+[system.load]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.load
+ # chart type = line
+ # type = system
+ # family = load
+ # units = load
+ # context = system.load
+ # priority = 100
+ # name = system.load
+ # title = System Load Average
+ # dim load1 name = load1
+ # dim load1 algorithm = absolute
+ # dim load1 multiplier = 1
+ # dim load1 divisor = 1000
+ # dim load5 name = load5
+ # dim load5 algorithm = absolute
+ # dim load5 multiplier = 1
+ # dim load5 divisor = 1000
+ # dim load15 name = load15
+ # dim load15 algorithm = absolute
+ # dim load15 multiplier = 1
+ # dim load15 divisor = 1000
+
+[system.active_processes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.active_processes
+ # chart type = line
+ # type = system
+ # family = processes
+ # units = processes
+ # context = system.active_processes
+ # priority = 750
+ # name = system.active_processes
+ # title = System Active Processes
+ # dim active name = active
+ # dim active algorithm = absolute
+ # dim active multiplier = 1
+ # dim active divisor = 1
+
+[system.entropy]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.entropy
+ # chart type = line
+ # type = system
+ # family = entropy
+ # units = entropy
+ # context = system.entropy
+ # priority = 1000
+ # name = system.entropy
+ # title = Available Entropy
+ # dim entropy name = entropy
+ # dim entropy algorithm = absolute
+ # dim entropy multiplier = 1
+ # dim entropy divisor = 1
+
+[system.interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.interrupts
+ # chart type = stacked
+ # type = system
+ # family = interrupts
+ # units = interrupts/s
+ # context = system.interrupts
+ # priority = 1000
+ # name = system.interrupts
+ # title = System interrupts
+ # dim 0 name = timer_0
+ # dim 0 algorithm = incremental
+ # dim 0 multiplier = 1
+ # dim 0 divisor = 1
+ # dim 8 name = rtc0_8
+ # dim 8 algorithm = incremental
+ # dim 8 multiplier = 1
+ # dim 8 divisor = 1
+ # dim 16 name = ehci_hcd:usb1_16
+ # dim 16 algorithm = incremental
+ # dim 16 multiplier = 1
+ # dim 16 divisor = 1
+ # dim 18 name = snd_hda_intel:card1_18
+ # dim 18 algorithm = incremental
+ # dim 18 multiplier = 1
+ # dim 18 divisor = 1
+ # dim 23 name = ehci_hcd:usb3_23
+ # dim 23 algorithm = incremental
+ # dim 23 multiplier = 1
+ # dim 23 divisor = 1
+ # dim 25 name = ahci[0000:00:1f.2]_25
+ # dim 25 algorithm = incremental
+ # dim 25 multiplier = 1
+ # dim 25 divisor = 1
+ # dim 26 name = xhci_hcd_26
+ # dim 26 algorithm = incremental
+ # dim 26 multiplier = 1
+ # dim 26 divisor = 1
+ # dim 27 name = mei_me_27
+ # dim 27 algorithm = incremental
+ # dim 27 multiplier = 1
+ # dim 27 divisor = 1
+ # dim 28 name = snd_hda_intel:card0_28
+ # dim 28 algorithm = incremental
+ # dim 28 multiplier = 1
+ # dim 28 divisor = 1
+ # dim 29 name = enp4s0_29
+ # dim 29 algorithm = incremental
+ # dim 29 multiplier = 1
+ # dim 29 divisor = 1
+ # dim 30 name = enp4s0-TxRx-0_30
+ # dim 30 algorithm = incremental
+ # dim 30 multiplier = 1
+ # dim 30 divisor = 1
+ # dim 31 name = enp4s0-tx-1_31
+ # dim 31 algorithm = incremental
+ # dim 31 multiplier = 1
+ # dim 31 divisor = 1
+ # dim 32 name = enp4s0-tx-2_32
+ # dim 32 algorithm = incremental
+ # dim 32 multiplier = 1
+ # dim 32 divisor = 1
+ # dim 33 name = enp4s0-tx-3_33
+ # dim 33 algorithm = incremental
+ # dim 33 multiplier = 1
+ # dim 33 divisor = 1
+ # dim 34 name = nvidia_34
+ # dim 34 algorithm = incremental
+ # dim 34 multiplier = 1
+ # dim 34 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim IWI name = IWI
+ # dim IWI algorithm = incremental
+ # dim IWI multiplier = 1
+ # dim IWI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu0_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu0_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1100
+ # name = cpu.cpu0_interrupts
+ # title = CPU0 Interrupts
+ # dim 0 name = timer_0
+ # dim 0 algorithm = incremental
+ # dim 0 multiplier = 1
+ # dim 0 divisor = 1
+ # dim 34 name = nvidia_34
+ # dim 34 algorithm = incremental
+ # dim 34 multiplier = 1
+ # dim 34 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim IWI name = IWI
+ # dim IWI algorithm = incremental
+ # dim IWI multiplier = 1
+ # dim IWI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu1_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu1_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1101
+ # name = cpu.cpu1_interrupts
+ # title = CPU1 Interrupts
+ # dim 27 name = mei_me_27
+ # dim 27 algorithm = incremental
+ # dim 27 multiplier = 1
+ # dim 27 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu2_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu2_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1102
+ # name = cpu.cpu2_interrupts
+ # title = CPU2 Interrupts
+ # dim 8 name = rtc0_8
+ # dim 8 algorithm = incremental
+ # dim 8 multiplier = 1
+ # dim 8 divisor = 1
+ # dim 28 name = snd_hda_intel:card0_28
+ # dim 28 algorithm = incremental
+ # dim 28 multiplier = 1
+ # dim 28 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu3_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu3_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1103
+ # name = cpu.cpu3_interrupts
+ # title = CPU3 Interrupts
+ # dim 18 name = snd_hda_intel:card1_18
+ # dim 18 algorithm = incremental
+ # dim 18 multiplier = 1
+ # dim 18 divisor = 1
+ # dim 29 name = enp4s0_29
+ # dim 29 algorithm = incremental
+ # dim 29 multiplier = 1
+ # dim 29 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim IWI name = IWI
+ # dim IWI algorithm = incremental
+ # dim IWI multiplier = 1
+ # dim IWI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu4_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu4_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1104
+ # name = cpu.cpu4_interrupts
+ # title = CPU4 Interrupts
+ # dim 16 name = ehci_hcd:usb1_16
+ # dim 16 algorithm = incremental
+ # dim 16 multiplier = 1
+ # dim 16 divisor = 1
+ # dim 30 name = enp4s0-TxRx-0_30
+ # dim 30 algorithm = incremental
+ # dim 30 multiplier = 1
+ # dim 30 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu5_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu5_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1105
+ # name = cpu.cpu5_interrupts
+ # title = CPU5 Interrupts
+ # dim 25 name = ahci[0000:00:1f.2]_25
+ # dim 25 algorithm = incremental
+ # dim 25 multiplier = 1
+ # dim 25 divisor = 1
+ # dim 31 name = enp4s0-tx-1_31
+ # dim 31 algorithm = incremental
+ # dim 31 multiplier = 1
+ # dim 31 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu6_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu6_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1106
+ # name = cpu.cpu6_interrupts
+ # title = CPU6 Interrupts
+ # dim 26 name = xhci_hcd_26
+ # dim 26 algorithm = incremental
+ # dim 26 multiplier = 1
+ # dim 26 divisor = 1
+ # dim 32 name = enp4s0-tx-2_32
+ # dim 32 algorithm = incremental
+ # dim 32 multiplier = 1
+ # dim 32 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim IWI name = IWI
+ # dim IWI algorithm = incremental
+ # dim IWI multiplier = 1
+ # dim IWI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[cpu.cpu7_interrupts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu7_interrupts
+ # chart type = stacked
+ # type = cpu
+ # family = interrupts
+ # units = interrupts/s
+ # context = cpu.interrupts
+ # priority = 1107
+ # name = cpu.cpu7_interrupts
+ # title = CPU7 Interrupts
+ # dim 23 name = ehci_hcd:usb3_23
+ # dim 23 algorithm = incremental
+ # dim 23 multiplier = 1
+ # dim 23 divisor = 1
+ # dim 33 name = enp4s0-tx-3_33
+ # dim 33 algorithm = incremental
+ # dim 33 multiplier = 1
+ # dim 33 divisor = 1
+ # dim NMI name = NMI
+ # dim NMI algorithm = incremental
+ # dim NMI multiplier = 1
+ # dim NMI divisor = 1
+ # dim LOC name = LOC
+ # dim LOC algorithm = incremental
+ # dim LOC multiplier = 1
+ # dim LOC divisor = 1
+ # dim PMI name = PMI
+ # dim PMI algorithm = incremental
+ # dim PMI multiplier = 1
+ # dim PMI divisor = 1
+ # dim RES name = RES
+ # dim RES algorithm = incremental
+ # dim RES multiplier = 1
+ # dim RES divisor = 1
+ # dim CAL name = CAL
+ # dim CAL algorithm = incremental
+ # dim CAL multiplier = 1
+ # dim CAL divisor = 1
+ # dim TLB name = TLB
+ # dim TLB algorithm = incremental
+ # dim TLB multiplier = 1
+ # dim TLB divisor = 1
+ # dim MCP name = MCP
+ # dim MCP algorithm = incremental
+ # dim MCP multiplier = 1
+ # dim MCP divisor = 1
+
+[system.softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.softirqs
+ # chart type = stacked
+ # type = system
+ # family = softirqs
+ # units = softirqs/s
+ # context = system.softirqs
+ # priority = 950
+ # name = system.softirqs
+ # title = System softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim HRTIMER name = HRTIMER
+ # dim HRTIMER algorithm = incremental
+ # dim HRTIMER multiplier = 1
+ # dim HRTIMER divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu0_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu0_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3000
+ # name = cpu.cpu0_softirqs
+ # title = CPU0 softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu1_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu1_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3001
+ # name = cpu.cpu1_softirqs
+ # title = CPU1 softirqs
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu2_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu2_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3002
+ # name = cpu.cpu2_softirqs
+ # title = CPU2 softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu3_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu3_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3003
+ # name = cpu.cpu3_softirqs
+ # title = CPU3 softirqs
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu4_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu4_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3004
+ # name = cpu.cpu4_softirqs
+ # title = CPU4 softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim HRTIMER name = HRTIMER
+ # dim HRTIMER algorithm = incremental
+ # dim HRTIMER multiplier = 1
+ # dim HRTIMER divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu5_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu5_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3005
+ # name = cpu.cpu5_softirqs
+ # title = CPU5 softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu6_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu6_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3006
+ # name = cpu.cpu6_softirqs
+ # title = CPU6 softirqs
+ # dim HI name = HI
+ # dim HI algorithm = incremental
+ # dim HI multiplier = 1
+ # dim HI divisor = 1
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[cpu.cpu7_softirqs]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu7_softirqs
+ # chart type = stacked
+ # type = cpu
+ # family = softirqs
+ # units = softirqs/s
+ # context = cpu.softirqs
+ # priority = 3007
+ # name = cpu.cpu7_softirqs
+ # title = CPU7 softirqs
+ # dim TIMER name = TIMER
+ # dim TIMER algorithm = incremental
+ # dim TIMER multiplier = 1
+ # dim TIMER divisor = 1
+ # dim NET_TX name = NET_TX
+ # dim NET_TX algorithm = incremental
+ # dim NET_TX multiplier = 1
+ # dim NET_TX divisor = 1
+ # dim NET_RX name = NET_RX
+ # dim NET_RX algorithm = incremental
+ # dim NET_RX multiplier = 1
+ # dim NET_RX divisor = 1
+ # dim TASKLET name = TASKLET
+ # dim TASKLET algorithm = incremental
+ # dim TASKLET multiplier = 1
+ # dim TASKLET divisor = 1
+ # dim SCHED name = SCHED
+ # dim SCHED algorithm = incremental
+ # dim SCHED multiplier = 1
+ # dim SCHED divisor = 1
+ # dim HRTIMER name = HRTIMER
+ # dim HRTIMER algorithm = incremental
+ # dim HRTIMER multiplier = 1
+ # dim HRTIMER divisor = 1
+ # dim RCU name = RCU
+ # dim RCU algorithm = incremental
+ # dim RCU multiplier = 1
+ # dim RCU divisor = 1
+
+[system.pgpgio]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.pgpgio
+ # chart type = area
+ # type = system
+ # family = disk
+ # units = KiB/s
+ # context = system.pgpgio
+ # priority = 151
+ # name = system.pgpgio
+ # title = Memory Paged from/to disk
+ # dim in name = in
+ # dim in algorithm = incremental
+ # dim in multiplier = 1
+ # dim in divisor = 1
+ # dim out name = out
+ # dim out algorithm = incremental
+ # dim out multiplier = -1
+ # dim out divisor = 1
+
+[mem.pgfaults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.pgfaults
+ # chart type = line
+ # type = mem
+ # family = system
+ # units = faults/s
+ # context = mem.pgfaults
+ # priority = 1030
+ # name = mem.pgfaults
+ # title = Memory Page Faults
+ # dim minor name = minor
+ # dim minor algorithm = incremental
+ # dim minor multiplier = 1
+ # dim minor divisor = 1
+ # dim major name = major
+ # dim major algorithm = incremental
+ # dim major multiplier = -1
+ # dim major divisor = 1
+
+[system.ram]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ram
+ # chart type = stacked
+ # type = system
+ # family = ram
+ # units = MiB
+ # context = system.ram
+ # priority = 200
+ # name = system.ram
+ # title = System RAM
+ # dim free name = free
+ # dim free algorithm = absolute
+ # dim free multiplier = 1
+ # dim free divisor = 1024
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1024
+ # dim cached name = cached
+ # dim cached algorithm = absolute
+ # dim cached multiplier = 1
+ # dim cached divisor = 1024
+ # dim buffers name = buffers
+ # dim buffers algorithm = absolute
+ # dim buffers multiplier = 1
+ # dim buffers divisor = 1024
+
+[mem.available]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.available
+ # chart type = area
+ # type = mem
+ # family = system
+ # units = MiB
+ # context = mem.available
+ # priority = 1010
+ # name = mem.available
+ # title = Available RAM for applications
+ # dim MemAvailable name = avail
+ # dim MemAvailable algorithm = absolute
+ # dim MemAvailable multiplier = 1
+ # dim MemAvailable divisor = 1024
+
+[system.swap]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.swap
+ # chart type = stacked
+ # type = system
+ # family = swap
+ # units = MiB
+ # context = system.swap
+ # priority = 201
+ # name = system.swap
+ # title = System Swap
+ # dim free name = free
+ # dim free algorithm = absolute
+ # dim free multiplier = 1
+ # dim free divisor = 1024
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1024
+
+[mem.committed]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.committed
+ # chart type = area
+ # type = mem
+ # family = system
+ # units = MiB
+ # context = mem.committed
+ # priority = 1020
+ # name = mem.committed
+ # title = Committed (Allocated) Memory
+ # dim Committed_AS name = Committed_AS
+ # dim Committed_AS algorithm = absolute
+ # dim Committed_AS multiplier = 1
+ # dim Committed_AS divisor = 1024
+
+[mem.writeback]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.writeback
+ # chart type = line
+ # type = mem
+ # family = kernel
+ # units = MiB
+ # context = mem.writeback
+ # priority = 1100
+ # name = mem.writeback
+ # title = Writeback Memory
+ # dim Dirty name = Dirty
+ # dim Dirty algorithm = absolute
+ # dim Dirty multiplier = 1
+ # dim Dirty divisor = 1024
+ # dim Writeback name = Writeback
+ # dim Writeback algorithm = absolute
+ # dim Writeback multiplier = 1
+ # dim Writeback divisor = 1024
+ # dim FuseWriteback name = FuseWriteback
+ # dim FuseWriteback algorithm = absolute
+ # dim FuseWriteback multiplier = 1
+ # dim FuseWriteback divisor = 1024
+ # dim NfsWriteback name = NfsWriteback
+ # dim NfsWriteback algorithm = absolute
+ # dim NfsWriteback multiplier = 1
+ # dim NfsWriteback divisor = 1024
+ # dim Bounce name = Bounce
+ # dim Bounce algorithm = absolute
+ # dim Bounce multiplier = 1
+ # dim Bounce divisor = 1024
+
+[mem.kernel]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.kernel
+ # chart type = stacked
+ # type = mem
+ # family = kernel
+ # units = MiB
+ # context = mem.kernel
+ # priority = 1101
+ # name = mem.kernel
+ # title = Memory Used by Kernel
+ # dim Slab name = Slab
+ # dim Slab algorithm = absolute
+ # dim Slab multiplier = 1
+ # dim Slab divisor = 1024
+ # dim KernelStack name = KernelStack
+ # dim KernelStack algorithm = absolute
+ # dim KernelStack multiplier = 1
+ # dim KernelStack divisor = 1024
+ # dim PageTables name = PageTables
+ # dim PageTables algorithm = absolute
+ # dim PageTables multiplier = 1
+ # dim PageTables divisor = 1024
+ # dim VmallocUsed name = VmallocUsed
+ # dim VmallocUsed algorithm = absolute
+ # dim VmallocUsed multiplier = 1
+ # dim VmallocUsed divisor = 1024
+
+[mem.slab]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.slab
+ # chart type = stacked
+ # type = mem
+ # family = slab
+ # units = MiB
+ # context = mem.slab
+ # priority = 1200
+ # name = mem.slab
+ # title = Reclaimable Kernel Memory
+ # dim reclaimable name = reclaimable
+ # dim reclaimable algorithm = absolute
+ # dim reclaimable multiplier = 1
+ # dim reclaimable divisor = 1024
+ # dim unreclaimable name = unreclaimable
+ # dim unreclaimable algorithm = absolute
+ # dim unreclaimable multiplier = 1
+ # dim unreclaimable divisor = 1024
+
+[mem.transparent_hugepages]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/mem.transparent_hugepages
+ # chart type = stacked
+ # type = mem
+ # family = hugepages
+ # units = MiB
+ # context = mem.transparent_hugepages
+ # priority = 1250
+ # name = mem.transparent_hugepages
+ # title = Transparent HugePages Memory
+ # dim anonymous name = anonymous
+ # dim anonymous algorithm = absolute
+ # dim anonymous multiplier = 1
+ # dim anonymous divisor = 1024
+ # dim shmem name = shmem
+ # dim shmem algorithm = absolute
+ # dim shmem multiplier = 1
+ # dim shmem divisor = 1024
+
+[net.eth0]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/net.eth0
+ # chart type = area
+ # type = net
+ # family = eth0
+ # units = kilobits/s
+ # context = net.net
+ # priority = 7000
+ # name = net.eth0
+ # title = Bandwidth
+ # dim received name = received
+ # dim received algorithm = incremental
+ # dim received multiplier = 8
+ # dim received divisor = 1000
+ # dim sent name = sent
+ # dim sent algorithm = incremental
+ # dim sent multiplier = -8
+ # dim sent divisor = 1000
+
+[net_packets.eth0]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/net_packets.eth0
+ # chart type = line
+ # type = net_packets
+ # family = eth0
+ # units = packets/s
+ # context = net.packets
+ # priority = 7001
+ # name = net_packets.eth0
+ # title = Packets
+ # dim received name = received
+ # dim received algorithm = incremental
+ # dim received multiplier = 1
+ # dim received divisor = 1
+ # dim sent name = sent
+ # dim sent algorithm = incremental
+ # dim sent multiplier = -1
+ # dim sent divisor = 1
+ # dim multicast name = multicast
+ # dim multicast algorithm = incremental
+ # dim multicast multiplier = 1
+ # dim multicast divisor = 1
+
+[ipv4.sockstat_sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.sockstat_sockets
+ # chart type = line
+ # type = ipv4
+ # family = sockets
+ # units = sockets
+ # context = ipv4.sockstat_sockets
+ # priority = 5100
+ # name = ipv4.sockstat_sockets
+ # title = IPv4 Sockets Used
+ # dim used name = used
+ # dim used algorithm = absolute
+ # dim used multiplier = 1
+ # dim used divisor = 1
+
+[ipv4.sockstat_tcp_sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.sockstat_tcp_sockets
+ # chart type = line
+ # type = ipv4
+ # family = tcp
+ # units = sockets
+ # context = ipv4.sockstat_tcp_sockets
+ # priority = 5201
+ # name = ipv4.sockstat_tcp_sockets
+ # title = IPv4 TCP Sockets
+ # dim alloc name = alloc
+ # dim alloc algorithm = absolute
+ # dim alloc multiplier = 1
+ # dim alloc divisor = 1
+ # dim orphan name = orphan
+ # dim orphan algorithm = absolute
+ # dim orphan multiplier = 1
+ # dim orphan divisor = 1
+ # dim inuse name = inuse
+ # dim inuse algorithm = absolute
+ # dim inuse multiplier = 1
+ # dim inuse divisor = 1
+ # dim timewait name = timewait
+ # dim timewait algorithm = absolute
+ # dim timewait multiplier = 1
+ # dim timewait divisor = 1
+
+[ipv4.sockstat_tcp_mem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.sockstat_tcp_mem
+ # chart type = area
+ # type = ipv4
+ # family = tcp
+ # units = KiB
+ # context = ipv4.sockstat_tcp_mem
+ # priority = 5290
+ # name = ipv4.sockstat_tcp_mem
+ # title = IPv4 TCP Sockets Memory
+ # dim mem name = mem
+ # dim mem algorithm = absolute
+ # dim mem multiplier = 4096
+ # dim mem divisor = 1024
+
+[ipv4.sockstat_udp_sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.sockstat_udp_sockets
+ # chart type = line
+ # type = ipv4
+ # family = udp
+ # units = sockets
+ # context = ipv4.sockstat_udp_sockets
+ # priority = 5300
+ # name = ipv4.sockstat_udp_sockets
+ # title = IPv4 UDP Sockets
+ # dim inuse name = inuse
+ # dim inuse algorithm = absolute
+ # dim inuse multiplier = 1
+ # dim inuse divisor = 1
+
+[ipv4.sockstat_udp_mem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.sockstat_udp_mem
+ # chart type = area
+ # type = ipv4
+ # family = udp
+ # units = KiB
+ # context = ipv4.sockstat_udp_mem
+ # priority = 5390
+ # name = ipv4.sockstat_udp_mem
+ # title = IPv4 UDP Sockets Memory
+ # dim mem name = mem
+ # dim mem algorithm = absolute
+ # dim mem multiplier = 4096
+ # dim mem divisor = 1024
+
+[ipv6.sockstat6_tcp_sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv6.sockstat6_tcp_sockets
+ # chart type = line
+ # type = ipv6
+ # family = tcp6
+ # units = sockets
+ # context = ipv6.sockstat6_tcp_sockets
+ # priority = 6500
+ # name = ipv6.sockstat6_tcp_sockets
+ # title = IPv6 TCP Sockets
+ # dim inuse name = inuse
+ # dim inuse algorithm = absolute
+ # dim inuse multiplier = 1
+ # dim inuse divisor = 1
+
+[ip.tcpconnaborts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ip.tcpconnaborts
+ # chart type = line
+ # type = ip
+ # family = tcp
+ # units = connections/s
+ # context = ip.tcpconnaborts
+ # priority = 4210
+ # name = ip.tcpconnaborts
+ # title = TCP Connection Aborts
+ # dim TCPAbortOnData name = baddata
+ # dim TCPAbortOnData algorithm = incremental
+ # dim TCPAbortOnData multiplier = 1
+ # dim TCPAbortOnData divisor = 1
+ # dim TCPAbortOnClose name = userclosed
+ # dim TCPAbortOnClose algorithm = incremental
+ # dim TCPAbortOnClose multiplier = 1
+ # dim TCPAbortOnClose divisor = 1
+ # dim TCPAbortOnMemory name = nomemory
+ # dim TCPAbortOnMemory algorithm = incremental
+ # dim TCPAbortOnMemory multiplier = 1
+ # dim TCPAbortOnMemory divisor = 1
+ # dim TCPAbortOnTimeout name = timeout
+ # dim TCPAbortOnTimeout algorithm = incremental
+ # dim TCPAbortOnTimeout multiplier = 1
+ # dim TCPAbortOnTimeout divisor = 1
+ # dim TCPAbortOnLinger name = linger
+ # dim TCPAbortOnLinger algorithm = incremental
+ # dim TCPAbortOnLinger multiplier = 1
+ # dim TCPAbortOnLinger divisor = 1
+ # dim TCPAbortFailed name = failed
+ # dim TCPAbortFailed algorithm = incremental
+ # dim TCPAbortFailed multiplier = -1
+ # dim TCPAbortFailed divisor = 1
+
+[ip.tcpofo]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ip.tcpofo
+ # chart type = line
+ # type = ip
+ # family = tcp
+ # units = packets/s
+ # context = ip.tcpofo
+ # priority = 4250
+ # name = ip.tcpofo
+ # title = TCP Out-Of-Order Queue
+ # dim TCPOFOQueue name = inqueue
+ # dim TCPOFOQueue algorithm = incremental
+ # dim TCPOFOQueue multiplier = 1
+ # dim TCPOFOQueue divisor = 1
+ # dim TCPOFODrop name = dropped
+ # dim TCPOFODrop algorithm = incremental
+ # dim TCPOFODrop multiplier = -1
+ # dim TCPOFODrop divisor = 1
+ # dim TCPOFOMerge name = merged
+ # dim TCPOFOMerge algorithm = incremental
+ # dim TCPOFOMerge multiplier = 1
+ # dim TCPOFOMerge divisor = 1
+ # dim OfoPruned name = pruned
+ # dim OfoPruned algorithm = incremental
+ # dim OfoPruned multiplier = -1
+ # dim OfoPruned divisor = 1
+
+[system.ip]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ip
+ # chart type = area
+ # type = system
+ # family = network
+ # units = kilobits/s
+ # context = system.ip
+ # priority = 501
+ # name = system.ip
+ # title = IP Bandwidth
+ # dim InOctets name = received
+ # dim InOctets algorithm = incremental
+ # dim InOctets multiplier = 8
+ # dim InOctets divisor = 1000
+ # dim OutOctets name = sent
+ # dim OutOctets algorithm = incremental
+ # dim OutOctets multiplier = -8
+ # dim OutOctets divisor = 1000
+
+[ip.bcast]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ip.bcast
+ # chart type = area
+ # type = ip
+ # family = broadcast
+ # units = kilobits/s
+ # context = ip.bcast
+ # priority = 4500
+ # name = ip.bcast
+ # title = IP Broadcast Bandwidth
+ # dim InBcastOctets name = received
+ # dim InBcastOctets algorithm = incremental
+ # dim InBcastOctets multiplier = 8
+ # dim InBcastOctets divisor = 1000
+ # dim OutBcastOctets name = sent
+ # dim OutBcastOctets algorithm = incremental
+ # dim OutBcastOctets multiplier = -8
+ # dim OutBcastOctets divisor = 1000
+
+[ip.bcastpkts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ip.bcastpkts
+ # chart type = line
+ # type = ip
+ # family = broadcast
+ # units = packets/s
+ # context = ip.bcastpkts
+ # priority = 4510
+ # name = ip.bcastpkts
+ # title = IP Broadcast Packets
+ # dim InBcastPkts name = received
+ # dim InBcastPkts algorithm = incremental
+ # dim InBcastPkts multiplier = 1
+ # dim InBcastPkts divisor = 1
+ # dim OutBcastPkts name = sent
+ # dim OutBcastPkts algorithm = incremental
+ # dim OutBcastPkts multiplier = -1
+ # dim OutBcastPkts divisor = 1
+
+[ip.ecnpkts]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ip.ecnpkts
+ # chart type = line
+ # type = ip
+ # family = ecn
+ # units = packets/s
+ # context = ip.ecnpkts
+ # priority = 4700
+ # name = ip.ecnpkts
+ # title = IP ECN Statistics
+ # dim InCEPkts name = CEP
+ # dim InCEPkts algorithm = incremental
+ # dim InCEPkts multiplier = 1
+ # dim InCEPkts divisor = 1
+ # dim InNoECTPkts name = NoECTP
+ # dim InNoECTPkts algorithm = incremental
+ # dim InNoECTPkts multiplier = -1
+ # dim InNoECTPkts divisor = 1
+ # dim InECT0Pkts name = ECTP0
+ # dim InECT0Pkts algorithm = incremental
+ # dim InECT0Pkts multiplier = 1
+ # dim InECT0Pkts divisor = 1
+ # dim InECT1Pkts name = ECTP1
+ # dim InECT1Pkts algorithm = incremental
+ # dim InECT1Pkts multiplier = 1
+ # dim InECT1Pkts divisor = 1
+
+[ipv4.packets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.packets
+ # chart type = line
+ # type = ipv4
+ # family = packets
+ # units = packets/s
+ # context = ipv4.packets
+ # priority = 5130
+ # name = ipv4.packets
+ # title = IPv4 Packets
+ # dim InReceives name = received
+ # dim InReceives algorithm = incremental
+ # dim InReceives multiplier = 1
+ # dim InReceives divisor = 1
+ # dim OutRequests name = sent
+ # dim OutRequests algorithm = incremental
+ # dim OutRequests multiplier = -1
+ # dim OutRequests divisor = 1
+ # dim ForwDatagrams name = forwarded
+ # dim ForwDatagrams algorithm = incremental
+ # dim ForwDatagrams multiplier = 1
+ # dim ForwDatagrams divisor = 1
+ # dim InDelivers name = delivered
+ # dim InDelivers algorithm = incremental
+ # dim InDelivers multiplier = 1
+ # dim InDelivers divisor = 1
+
+[ipv4.tcppackets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.tcppackets
+ # chart type = line
+ # type = ipv4
+ # family = tcp
+ # units = packets/s
+ # context = ipv4.tcppackets
+ # priority = 5204
+ # name = ipv4.tcppackets
+ # title = IPv4 TCP Packets
+ # dim InSegs name = received
+ # dim InSegs algorithm = incremental
+ # dim InSegs multiplier = 1
+ # dim InSegs divisor = 1
+ # dim OutSegs name = sent
+ # dim OutSegs algorithm = incremental
+ # dim OutSegs multiplier = -1
+ # dim OutSegs divisor = 1
+
+[ipv4.tcpopens]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.tcpopens
+ # chart type = line
+ # type = ipv4
+ # family = tcp
+ # units = connections/s
+ # context = ipv4.tcpopens
+ # priority = 5205
+ # name = ipv4.tcpopens
+ # title = IPv4 TCP Opens
+ # dim ActiveOpens name = active
+ # dim ActiveOpens algorithm = incremental
+ # dim ActiveOpens multiplier = 1
+ # dim ActiveOpens divisor = 1
+ # dim PassiveOpens name = passive
+ # dim PassiveOpens algorithm = incremental
+ # dim PassiveOpens multiplier = 1
+ # dim PassiveOpens divisor = 1
+
+[ipv4.tcphandshake]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.tcphandshake
+ # chart type = line
+ # type = ipv4
+ # family = tcp
+ # units = events/s
+ # context = ipv4.tcphandshake
+ # priority = 5230
+ # name = ipv4.tcphandshake
+ # title = IPv4 TCP Handshake Issues
+ # dim EstabResets name = EstabResets
+ # dim EstabResets algorithm = incremental
+ # dim EstabResets multiplier = 1
+ # dim EstabResets divisor = 1
+ # dim OutRsts name = OutRsts
+ # dim OutRsts algorithm = incremental
+ # dim OutRsts multiplier = 1
+ # dim OutRsts divisor = 1
+ # dim AttemptFails name = AttemptFails
+ # dim AttemptFails algorithm = incremental
+ # dim AttemptFails multiplier = 1
+ # dim AttemptFails divisor = 1
+ # dim TCPSynRetrans name = SynRetrans
+ # dim TCPSynRetrans algorithm = incremental
+ # dim TCPSynRetrans multiplier = 1
+ # dim TCPSynRetrans divisor = 1
+
+[ipv4.udppackets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.udppackets
+ # chart type = line
+ # type = ipv4
+ # family = udp
+ # units = packets/s
+ # context = ipv4.udppackets
+ # priority = 5300
+ # name = ipv4.udppackets
+ # title = IPv4 UDP Packets
+ # dim InDatagrams name = received
+ # dim InDatagrams algorithm = incremental
+ # dim InDatagrams multiplier = 1
+ # dim InDatagrams divisor = 1
+ # dim OutDatagrams name = sent
+ # dim OutDatagrams algorithm = incremental
+ # dim OutDatagrams multiplier = -1
+ # dim OutDatagrams divisor = 1
+
+[ipv4.udperrors]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv4.udperrors
+ # chart type = line
+ # type = ipv4
+ # family = udp
+ # units = events/s
+ # context = ipv4.udperrors
+ # priority = 5310
+ # name = ipv4.udperrors
+ # title = IPv4 UDP Errors
+ # dim RcvbufErrors name = RcvbufErrors
+ # dim RcvbufErrors algorithm = incremental
+ # dim RcvbufErrors multiplier = 1
+ # dim RcvbufErrors divisor = 1
+ # dim SndbufErrors name = SndbufErrors
+ # dim SndbufErrors algorithm = incremental
+ # dim SndbufErrors multiplier = -1
+ # dim SndbufErrors divisor = 1
+ # dim InErrors name = InErrors
+ # dim InErrors algorithm = incremental
+ # dim InErrors multiplier = 1
+ # dim InErrors divisor = 1
+ # dim NoPorts name = NoPorts
+ # dim NoPorts algorithm = incremental
+ # dim NoPorts multiplier = 1
+ # dim NoPorts divisor = 1
+ # dim InCsumErrors name = InCsumErrors
+ # dim InCsumErrors algorithm = incremental
+ # dim InCsumErrors multiplier = 1
+ # dim InCsumErrors divisor = 1
+ # dim IgnoredMulti name = IgnoredMulti
+ # dim IgnoredMulti algorithm = incremental
+ # dim IgnoredMulti multiplier = 1
+ # dim IgnoredMulti divisor = 1
+
+[system.ipv6]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ipv6
+ # chart type = area
+ # type = system
+ # family = network
+ # units = kilobits/s
+ # context = system.ipv6
+ # priority = 502
+ # name = system.ipv6
+ # title = IPv6 Bandwidth
+ # dim InOctets name = received
+ # dim InOctets algorithm = incremental
+ # dim InOctets multiplier = 8
+ # dim InOctets divisor = 1000
+ # dim OutOctets name = sent
+ # dim OutOctets algorithm = incremental
+ # dim OutOctets multiplier = -8
+ # dim OutOctets divisor = 1000
+
+[ipv6.packets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv6.packets
+ # chart type = line
+ # type = ipv6
+ # family = packets
+ # units = packets/s
+ # context = ipv6.packets
+ # priority = 6200
+ # name = ipv6.packets
+ # title = IPv6 Packets
+ # dim InReceives name = received
+ # dim InReceives algorithm = incremental
+ # dim InReceives multiplier = 1
+ # dim InReceives divisor = 1
+ # dim OutRequests name = sent
+ # dim OutRequests algorithm = incremental
+ # dim OutRequests multiplier = -1
+ # dim OutRequests divisor = 1
+ # dim OutForwDatagrams name = forwarded
+ # dim OutForwDatagrams algorithm = incremental
+ # dim OutForwDatagrams multiplier = -1
+ # dim OutForwDatagrams divisor = 1
+ # dim InDelivers name = delivers
+ # dim InDelivers algorithm = incremental
+ # dim InDelivers multiplier = 1
+ # dim InDelivers divisor = 1
+
+[ipv6.errors]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/ipv6.errors
+ # chart type = line
+ # type = ipv6
+ # family = errors
+ # units = packets/s
+ # context = ipv6.errors
+ # priority = 6300
+ # name = ipv6.errors
+ # title = IPv6 Errors
+ # dim InDiscards name = InDiscards
+ # dim InDiscards algorithm = incremental
+ # dim InDiscards multiplier = 1
+ # dim InDiscards divisor = 1
+ # dim OutDiscards name = OutDiscards
+ # dim OutDiscards algorithm = incremental
+ # dim OutDiscards multiplier = -1
+ # dim OutDiscards divisor = 1
+ # dim InHdrErrors name = InHdrErrors
+ # dim InHdrErrors algorithm = incremental
+ # dim InHdrErrors multiplier = 1
+ # dim InHdrErrors divisor = 1
+ # dim InAddrErrors name = InAddrErrors
+ # dim InAddrErrors algorithm = incremental
+ # dim InAddrErrors multiplier = 1
+ # dim InAddrErrors divisor = 1
+ # dim InUnknownProtos name = InUnknownProtos
+ # dim InUnknownProtos algorithm = incremental
+ # dim InUnknownProtos multiplier = 1
+ # dim InUnknownProtos divisor = 1
+ # dim InTooBigErrors name = InTooBigErrors
+ # dim InTooBigErrors algorithm = incremental
+ # dim InTooBigErrors multiplier = 1
+ # dim InTooBigErrors divisor = 1
+ # dim InTruncatedPkts name = InTruncatedPkts
+ # dim InTruncatedPkts algorithm = incremental
+ # dim InTruncatedPkts multiplier = 1
+ # dim InTruncatedPkts divisor = 1
+ # dim InNoRoutes name = InNoRoutes
+ # dim InNoRoutes algorithm = incremental
+ # dim InNoRoutes multiplier = 1
+ # dim InNoRoutes divisor = 1
+ # dim OutNoRoutes name = OutNoRoutes
+ # dim OutNoRoutes algorithm = incremental
+ # dim OutNoRoutes multiplier = -1
+ # dim OutNoRoutes divisor = 1
+
+[system.softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.softnet_stat
+ # chart type = line
+ # type = system
+ # family = softnet_stat
+ # units = events/s
+ # context = system.softnet_stat
+ # priority = 955
+ # name = system.softnet_stat
+ # title = System softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu0_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu0_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4101
+ # name = cpu.cpu0_softnet_stat
+ # title = CPU0 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu1_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu1_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4102
+ # name = cpu.cpu1_softnet_stat
+ # title = CPU1 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu2_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu2_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4103
+ # name = cpu.cpu2_softnet_stat
+ # title = CPU2 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu3_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu3_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4104
+ # name = cpu.cpu3_softnet_stat
+ # title = CPU3 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu4_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu4_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4105
+ # name = cpu.cpu4_softnet_stat
+ # title = CPU4 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu5_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu5_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4106
+ # name = cpu.cpu5_softnet_stat
+ # title = CPU5 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu6_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu6_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4107
+ # name = cpu.cpu6_softnet_stat
+ # title = CPU6 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[cpu.cpu7_softnet_stat]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/cpu.cpu7_softnet_stat
+ # chart type = line
+ # type = cpu
+ # family = softnet_stat
+ # units = events/s
+ # context = cpu.softnet_stat
+ # priority = 4108
+ # name = cpu.cpu7_softnet_stat
+ # title = CPU7 softnet_stat
+ # dim processed name = processed
+ # dim processed algorithm = incremental
+ # dim processed multiplier = 1
+ # dim processed divisor = 1
+ # dim dropped name = dropped
+ # dim dropped algorithm = incremental
+ # dim dropped multiplier = 1
+ # dim dropped divisor = 1
+ # dim squeezed name = squeezed
+ # dim squeezed algorithm = incremental
+ # dim squeezed multiplier = 1
+ # dim squeezed divisor = 1
+ # dim received_rps name = received_rps
+ # dim received_rps algorithm = incremental
+ # dim received_rps multiplier = 1
+ # dim received_rps divisor = 1
+ # dim flow_limit_count name = flow_limit_count
+ # dim flow_limit_count algorithm = incremental
+ # dim flow_limit_count multiplier = 1
+ # dim flow_limit_count divisor = 1
+
+[netfilter.conntrack_sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_sockets
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = active connections
+ # context = netfilter.conntrack_sockets
+ # priority = 8700
+ # name = netfilter.conntrack_sockets
+ # title = Connection Tracker Connections
+ # dim connections name = connections
+ # dim connections algorithm = absolute
+ # dim connections multiplier = 1
+ # dim connections divisor = 1
+
+[netfilter.conntrack_new]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_new
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = connections/s
+ # context = netfilter.conntrack_new
+ # priority = 8701
+ # name = netfilter.conntrack_new
+ # title = Connection Tracker New Connections
+ # dim new name = new
+ # dim new algorithm = incremental
+ # dim new multiplier = 1
+ # dim new divisor = 1
+ # dim ignore name = ignore
+ # dim ignore algorithm = incremental
+ # dim ignore multiplier = -1
+ # dim ignore divisor = 1
+ # dim invalid name = invalid
+ # dim invalid algorithm = incremental
+ # dim invalid multiplier = -1
+ # dim invalid divisor = 1
+
+[netfilter.conntrack_changes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_changes
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = changes/s
+ # context = netfilter.conntrack_changes
+ # priority = 8702
+ # name = netfilter.conntrack_changes
+ # title = Connection Tracker Changes
+ # dim inserted name = inserted
+ # dim inserted algorithm = incremental
+ # dim inserted multiplier = 1
+ # dim inserted divisor = 1
+ # dim deleted name = deleted
+ # dim deleted algorithm = incremental
+ # dim deleted multiplier = -1
+ # dim deleted divisor = 1
+ # dim delete_list name = delete_list
+ # dim delete_list algorithm = incremental
+ # dim delete_list multiplier = -1
+ # dim delete_list divisor = 1
+
+[netfilter.conntrack_expect]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_expect
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = expectations/s
+ # context = netfilter.conntrack_expect
+ # priority = 8703
+ # name = netfilter.conntrack_expect
+ # title = Connection Tracker Expectations
+ # dim created name = created
+ # dim created algorithm = incremental
+ # dim created multiplier = 1
+ # dim created divisor = 1
+ # dim deleted name = deleted
+ # dim deleted algorithm = incremental
+ # dim deleted multiplier = -1
+ # dim deleted divisor = 1
+ # dim new name = new
+ # dim new algorithm = incremental
+ # dim new multiplier = 1
+ # dim new divisor = 1
+
+[netfilter.conntrack_search]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_search
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = searches/s
+ # context = netfilter.conntrack_search
+ # priority = 8710
+ # name = netfilter.conntrack_search
+ # title = Connection Tracker Searches
+ # dim searched name = searched
+ # dim searched algorithm = incremental
+ # dim searched multiplier = 1
+ # dim searched divisor = 1
+ # dim restarted name = restarted
+ # dim restarted algorithm = incremental
+ # dim restarted multiplier = -1
+ # dim restarted divisor = 1
+ # dim found name = found
+ # dim found algorithm = incremental
+ # dim found multiplier = 1
+ # dim found divisor = 1
+
+[netfilter.conntrack_errors]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netfilter.conntrack_errors
+ # chart type = line
+ # type = netfilter
+ # family = conntrack
+ # units = events/s
+ # context = netfilter.conntrack_errors
+ # priority = 8705
+ # name = netfilter.conntrack_errors
+ # title = Connection Tracker Errors
+ # dim icmp_error name = icmp_error
+ # dim icmp_error algorithm = incremental
+ # dim icmp_error multiplier = 1
+ # dim icmp_error divisor = 1
+ # dim insert_failed name = insert_failed
+ # dim insert_failed algorithm = incremental
+ # dim insert_failed multiplier = -1
+ # dim insert_failed divisor = 1
+ # dim drop name = drop
+ # dim drop algorithm = incremental
+ # dim drop multiplier = -1
+ # dim drop divisor = 1
+ # dim early_drop name = early_drop
+ # dim early_drop algorithm = incremental
+ # dim early_drop multiplier = -1
+ # dim early_drop divisor = 1
+
+[disk.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk.sda
+ # chart type = area
+ # type = disk
+ # family = sda
+ # units = KiB/s
+ # context = disk.io
+ # priority = 2000
+ # name = disk.sda
+ # title = Disk I/O Bandwidth
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 512
+ # dim reads divisor = 1024
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -512
+ # dim writes divisor = 1024
+
+[disk_ops.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_ops.sda
+ # chart type = line
+ # type = disk_ops
+ # family = sda
+ # units = operations/s
+ # context = disk.ops
+ # priority = 2001
+ # name = disk_ops.sda
+ # title = Disk Completed I/O Operations
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk_backlog.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_backlog.sda
+ # chart type = area
+ # type = disk_backlog
+ # family = sda
+ # units = milliseconds
+ # context = disk.backlog
+ # priority = 2003
+ # name = disk_backlog.sda
+ # title = Disk Backlog
+ # dim backlog name = backlog
+ # dim backlog algorithm = incremental
+ # dim backlog multiplier = 1
+ # dim backlog divisor = 10
+
+[disk_util.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_util.sda
+ # chart type = area
+ # type = disk_util
+ # family = sda
+ # units = % of time working
+ # context = disk.util
+ # priority = 2004
+ # name = disk_util.sda
+ # title = Disk Utilization Time
+ # dim utilization name = utilization
+ # dim utilization algorithm = incremental
+ # dim utilization multiplier = 1
+ # dim utilization divisor = 10
+
+[disk_iotime.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_iotime.sda
+ # chart type = line
+ # type = disk_iotime
+ # family = sda
+ # units = milliseconds/s
+ # context = disk.iotime
+ # priority = 2022
+ # name = disk_iotime.sda
+ # title = Disk Total I/O Time
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk.sdb
+ # chart type = area
+ # type = disk
+ # family = sdb
+ # units = KiB/s
+ # context = disk.io
+ # priority = 2000
+ # name = disk.sdb
+ # title = Disk I/O Bandwidth
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 512
+ # dim reads divisor = 1024
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -512
+ # dim writes divisor = 1024
+
+[disk_ops.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_ops.sdb
+ # chart type = line
+ # type = disk_ops
+ # family = sdb
+ # units = operations/s
+ # context = disk.ops
+ # priority = 2001
+ # name = disk_ops.sdb
+ # title = Disk Completed I/O Operations
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk_backlog.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_backlog.sdb
+ # chart type = area
+ # type = disk_backlog
+ # family = sdb
+ # units = milliseconds
+ # context = disk.backlog
+ # priority = 2003
+ # name = disk_backlog.sdb
+ # title = Disk Backlog
+ # dim backlog name = backlog
+ # dim backlog algorithm = incremental
+ # dim backlog multiplier = 1
+ # dim backlog divisor = 10
+
+[disk_util.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_util.sdb
+ # chart type = area
+ # type = disk_util
+ # family = sdb
+ # units = % of time working
+ # context = disk.util
+ # priority = 2004
+ # name = disk_util.sdb
+ # title = Disk Utilization Time
+ # dim utilization name = utilization
+ # dim utilization algorithm = incremental
+ # dim utilization multiplier = 1
+ # dim utilization divisor = 10
+
+[disk_mops.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_mops.sdb
+ # chart type = line
+ # type = disk_mops
+ # family = sdb
+ # units = merged operations/s
+ # context = disk.mops
+ # priority = 2021
+ # name = disk_mops.sdb
+ # title = Disk Merged Operations
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk_iotime.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_iotime.sdb
+ # chart type = line
+ # type = disk_iotime
+ # family = sdb
+ # units = milliseconds/s
+ # context = disk.iotime
+ # priority = 2022
+ # name = disk_iotime.sdb
+ # title = Disk Total I/O Time
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[system.io]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.io
+ # chart type = area
+ # type = system
+ # family = disk
+ # units = KiB/s
+ # context = system.io
+ # priority = 150
+ # name = system.io
+ # title = Disk I/O
+ # dim in name = in
+ # dim in algorithm = incremental
+ # dim in multiplier = 1
+ # dim in divisor = 1
+ # dim out name = out
+ # dim out algorithm = incremental
+ # dim out multiplier = -1
+ # dim out divisor = 1
+
+[system.ipc_semaphores]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ipc_semaphores
+ # chart type = area
+ # type = system
+ # family = ipc semaphores
+ # units = semaphores
+ # context = system.ipc_semaphores
+ # priority = 1203
+ # name = system.ipc_semaphores
+ # title = IPC Semaphores
+ # dim semaphores name = semaphores
+ # dim semaphores algorithm = absolute
+ # dim semaphores multiplier = 1
+ # dim semaphores divisor = 1
+
+[system.ipc_semaphore_arrays]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.ipc_semaphore_arrays
+ # chart type = area
+ # type = system
+ # family = ipc semaphores
+ # units = arrays
+ # context = system.ipc_semaphore_arrays
+ # priority = 1204
+ # name = system.ipc_semaphore_arrays
+ # title = IPC Semaphore Arrays
+ # dim arrays name = arrays
+ # dim arrays algorithm = absolute
+ # dim arrays multiplier = 1
+ # dim arrays divisor = 1
+
+[system.shared_memory_segments]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.shared_memory_segments
+ # chart type = stacked
+ # type = system
+ # family = ipc shared memory
+ # units = segments
+ # context = system.shared_memory_segments
+ # priority = 1205
+ # name = system.shared_memory_segments
+ # title = IPC Shared Memory Number of Segments
+ # dim segments name = segments
+ # dim segments algorithm = absolute
+ # dim segments multiplier = 1
+ # dim segments divisor = 1
+
+[system.shared_memory_bytes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/system.shared_memory_bytes
+ # chart type = stacked
+ # type = system
+ # family = ipc shared memory
+ # units = bytes
+ # context = system.shared_memory_bytes
+ # priority = 1206
+ # name = system.shared_memory_bytes
+ # title = IPC Shared Memory Used Bytes
+ # dim bytes name = bytes
+ # dim bytes algorithm = absolute
+ # dim bytes multiplier = 1
+ # dim bytes divisor = 1
+
+[netdata.plugin_proc_modules]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_proc_modules
+ # chart type = stacked
+ # type = netdata
+ # family = proc
+ # units = milliseconds/run
+ # context = netdata.plugin_proc_modules
+ # priority = 132001
+ # name = netdata.plugin_proc_modules
+ # title = NetData Proc Plugin Modules Durations
+ # dim stat name = stat
+ # dim stat algorithm = absolute
+ # dim stat multiplier = 1
+ # dim stat divisor = 1000
+ # dim uptime name = uptime
+ # dim uptime algorithm = absolute
+ # dim uptime multiplier = 1
+ # dim uptime divisor = 1000
+ # dim loadavg name = loadavg
+ # dim loadavg algorithm = absolute
+ # dim loadavg multiplier = 1
+ # dim loadavg divisor = 1000
+ # dim entropy name = entropy
+ # dim entropy algorithm = absolute
+ # dim entropy multiplier = 1
+ # dim entropy divisor = 1000
+ # dim interrupts name = interrupts
+ # dim interrupts algorithm = absolute
+ # dim interrupts multiplier = 1
+ # dim interrupts divisor = 1000
+ # dim softirqs name = softirqs
+ # dim softirqs algorithm = absolute
+ # dim softirqs multiplier = 1
+ # dim softirqs divisor = 1000
+ # dim vmstat name = vmstat
+ # dim vmstat algorithm = absolute
+ # dim vmstat multiplier = 1
+ # dim vmstat divisor = 1000
+ # dim meminfo name = meminfo
+ # dim meminfo algorithm = absolute
+ # dim meminfo multiplier = 1
+ # dim meminfo divisor = 1000
+ # dim ksm name = ksm
+ # dim ksm algorithm = absolute
+ # dim ksm multiplier = 1
+ # dim ksm divisor = 1000
+ # dim numa name = numa
+ # dim numa algorithm = absolute
+ # dim numa multiplier = 1
+ # dim numa divisor = 1000
+ # dim netdev name = netdev
+ # dim netdev algorithm = absolute
+ # dim netdev multiplier = 1
+ # dim netdev divisor = 1000
+ # dim sockstat name = sockstat
+ # dim sockstat algorithm = absolute
+ # dim sockstat multiplier = 1
+ # dim sockstat divisor = 1000
+ # dim sockstat6 name = sockstat6
+ # dim sockstat6 algorithm = absolute
+ # dim sockstat6 multiplier = 1
+ # dim sockstat6 divisor = 1000
+ # dim netstat name = netstat
+ # dim netstat algorithm = absolute
+ # dim netstat multiplier = 1
+ # dim netstat divisor = 1000
+ # dim snmp name = snmp
+ # dim snmp algorithm = absolute
+ # dim snmp multiplier = 1
+ # dim snmp divisor = 1000
+ # dim snmp6 name = snmp6
+ # dim snmp6 algorithm = absolute
+ # dim snmp6 multiplier = 1
+ # dim snmp6 divisor = 1000
+ # dim softnet name = softnet
+ # dim softnet algorithm = absolute
+ # dim softnet multiplier = 1
+ # dim softnet divisor = 1000
+ # dim conntrack name = conntrack
+ # dim conntrack algorithm = absolute
+ # dim conntrack multiplier = 1
+ # dim conntrack divisor = 1000
+ # dim diskstats name = diskstats
+ # dim diskstats algorithm = absolute
+ # dim diskstats multiplier = 1
+ # dim diskstats divisor = 1000
+ # dim mdstat name = mdstat
+ # dim mdstat algorithm = absolute
+ # dim mdstat multiplier = 1
+ # dim mdstat divisor = 1000
+ # dim btrfs name = btrfs
+ # dim btrfs algorithm = absolute
+ # dim btrfs multiplier = 1
+ # dim btrfs divisor = 1000
+ # dim ipc name = ipc
+ # dim ipc algorithm = absolute
+ # dim ipc multiplier = 1
+ # dim ipc divisor = 1000
+ # dim power_supply name = power_supply
+ # dim power_supply algorithm = absolute
+ # dim power_supply multiplier = 1
+ # dim power_supply divisor = 1000
+
+[netdata.plugin_proc_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.plugin_proc_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = proc
+ # units = milliseconds/s
+ # context = netdata.plugin_proc_cpu
+ # priority = 132000
+ # name = netdata.plugin_proc_cpu
+ # title = NetData Proc Plugin CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.server_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.server_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = netdata
+ # units = milliseconds/s
+ # context = netdata.server_cpu
+ # priority = 130000
+ # name = netdata.server_cpu
+ # title = NetData CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.clients]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.clients
+ # chart type = line
+ # type = netdata
+ # family = netdata
+ # units = connected clients
+ # context = netdata.clients
+ # priority = 130200
+ # name = netdata.clients
+ # title = NetData Web Clients
+ # dim clients name = clients
+ # dim clients algorithm = absolute
+ # dim clients multiplier = 1
+ # dim clients divisor = 1
+
+[netdata.requests]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.requests
+ # chart type = line
+ # type = netdata
+ # family = netdata
+ # units = requests/s
+ # context = netdata.requests
+ # priority = 130300
+ # name = netdata.requests
+ # title = NetData Web Requests
+ # dim requests name = requests
+ # dim requests algorithm = incremental
+ # dim requests multiplier = 1
+ # dim requests divisor = 1
+
+[netdata.net]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.net
+ # chart type = area
+ # type = netdata
+ # family = netdata
+ # units = kilobits/s
+ # context = netdata.net
+ # priority = 130000
+ # name = netdata.net
+ # title = NetData Network Traffic
+ # dim in name = in
+ # dim in algorithm = incremental
+ # dim in multiplier = 8
+ # dim in divisor = 1000
+ # dim out name = out
+ # dim out algorithm = incremental
+ # dim out multiplier = -8
+ # dim out divisor = 1000
+
+[netdata.response_time]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.response_time
+ # chart type = line
+ # type = netdata
+ # family = netdata
+ # units = milliseconds/request
+ # context = netdata.response_time
+ # priority = 130400
+ # name = netdata.response_time
+ # title = NetData API Response Time
+ # dim average name = average
+ # dim average algorithm = absolute
+ # dim average multiplier = 1
+ # dim average divisor = 1000
+ # dim max name = max
+ # dim max algorithm = absolute
+ # dim max multiplier = 1
+ # dim max divisor = 1000
+
+[netdata.compression_ratio]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.compression_ratio
+ # chart type = line
+ # type = netdata
+ # family = netdata
+ # units = percentage
+ # context = netdata.compression_ratio
+ # priority = 130500
+ # name = netdata.compression_ratio
+ # title = NetData API Responses Compression Savings Ratio
+ # dim savings name = savings
+ # dim savings algorithm = absolute
+ # dim savings multiplier = 1
+ # dim savings divisor = 1000
+
+[netdata.dbengine_compression_ratio]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_compression_ratio
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = percentage
+ # context = netdata.dbengine_compression_ratio
+ # priority = 130502
+ # name = netdata.dbengine_compression_ratio
+ # title = NetData DB engine data extents' compression savings ratio
+ # dim savings name = savings
+ # dim savings algorithm = absolute
+ # dim savings multiplier = 1
+ # dim savings divisor = 1000
+
+[netdata.page_cache_hit_ratio]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.page_cache_hit_ratio
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = percentage
+ # context = netdata.page_cache_hit_ratio
+ # priority = 130503
+ # name = netdata.page_cache_hit_ratio
+ # title = NetData DB engine page cache hit ratio
+ # dim ratio name = ratio
+ # dim ratio algorithm = absolute
+ # dim ratio multiplier = 1
+ # dim ratio divisor = 1000
+
+[netdata.page_cache_stats]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.page_cache_stats
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = pages
+ # context = netdata.page_cache_stats
+ # priority = 130504
+ # name = netdata.page_cache_stats
+ # title = NetData dbengine page cache statistics
+ # dim descriptors name = descriptors
+ # dim descriptors algorithm = absolute
+ # dim descriptors multiplier = 1
+ # dim descriptors divisor = 1
+ # dim populated name = populated
+ # dim populated algorithm = absolute
+ # dim populated multiplier = 1
+ # dim populated divisor = 1
+ # dim dirty name = dirty
+ # dim dirty algorithm = absolute
+ # dim dirty multiplier = 1
+ # dim dirty divisor = 1
+ # dim backfills name = backfills
+ # dim backfills algorithm = incremental
+ # dim backfills multiplier = 1
+ # dim backfills divisor = 1
+ # dim evictions name = evictions
+ # dim evictions algorithm = incremental
+ # dim evictions multiplier = -1
+ # dim evictions divisor = 1
+ # dim used_by_collectors name = used_by_collectors
+ # dim used_by_collectors algorithm = absolute
+ # dim used_by_collectors multiplier = 1
+ # dim used_by_collectors divisor = 1
+
+[netdata.dbengine_long_term_page_stats]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_long_term_page_stats
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = pages
+ # context = netdata.dbengine_long_term_page_stats
+ # priority = 130505
+ # name = netdata.dbengine_long_term_page_stats
+ # title = NetData dbengine long-term page statistics
+ # dim total name = total
+ # dim total algorithm = absolute
+ # dim total multiplier = 1
+ # dim total divisor = 1
+ # dim insertions name = insertions
+ # dim insertions algorithm = incremental
+ # dim insertions multiplier = 1
+ # dim insertions divisor = 1
+ # dim deletions name = deletions
+ # dim deletions algorithm = incremental
+ # dim deletions multiplier = -1
+ # dim deletions divisor = 1
+ # dim flushing_pressure_deletions name = flushing_pressure_deletions
+ # dim flushing_pressure_deletions algorithm = incremental
+ # dim flushing_pressure_deletions multiplier = -1
+ # dim flushing_pressure_deletions divisor = 1
+
+[netdata.dbengine_io_throughput]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_io_throughput
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = MiB/s
+ # context = netdata.dbengine_io_throughput
+ # priority = 130506
+ # name = netdata.dbengine_io_throughput
+ # title = NetData DB engine I/O throughput
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1048576
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1048576
+
+[netdata.dbengine_io_operations]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_io_operations
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = operations/s
+ # context = netdata.dbengine_io_operations
+ # priority = 130507
+ # name = netdata.dbengine_io_operations
+ # title = NetData DB engine I/O operations
+ # dim reads name = reads
+ # dim reads algorithm = incremental
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = incremental
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[netdata.dbengine_global_errors]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_global_errors
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = errors/s
+ # context = netdata.dbengine_global_errors
+ # priority = 130508
+ # name = netdata.dbengine_global_errors
+ # title = NetData DB engine errors
+ # dim io_errors name = io_errors
+ # dim io_errors algorithm = incremental
+ # dim io_errors multiplier = 1
+ # dim io_errors divisor = 1
+ # dim fs_errors name = fs_errors
+ # dim fs_errors algorithm = incremental
+ # dim fs_errors multiplier = 1
+ # dim fs_errors divisor = 1
+ # dim pg_cache_over_half_dirty_events name = pg_cache_over_half_dirty_events
+ # dim pg_cache_over_half_dirty_events algorithm = incremental
+ # dim pg_cache_over_half_dirty_events multiplier = 1
+ # dim pg_cache_over_half_dirty_events divisor = 1
+
+[netdata.dbengine_global_file_descriptors]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_global_file_descriptors
+ # chart type = line
+ # type = netdata
+ # family = dbengine
+ # units = descriptors
+ # context = netdata.dbengine_global_file_descriptors
+ # priority = 130509
+ # name = netdata.dbengine_global_file_descriptors
+ # title = NetData DB engine File Descriptors
+ # dim current name = current
+ # dim current algorithm = absolute
+ # dim current multiplier = 1
+ # dim current divisor = 1
+ # dim max name = max
+ # dim max algorithm = absolute
+ # dim max multiplier = 1
+ # dim max divisor = 1
+
+[netdata.dbengine_ram]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.dbengine_ram
+ # chart type = stacked
+ # type = netdata
+ # family = dbengine
+ # units = MiB
+ # context = netdata.dbengine_ram
+ # priority = 130510
+ # name = netdata.dbengine_ram
+ # title = NetData DB engine RAM usage
+ # dim cache name = cache
+ # dim cache algorithm = absolute
+ # dim cache multiplier = 1
+ # dim cache divisor = 256
+ # dim collectors name = collectors
+ # dim collectors algorithm = absolute
+ # dim collectors multiplier = 1
+ # dim collectors divisor = 256
+ # dim metadata name = metadata
+ # dim metadata algorithm = absolute
+ # dim metadata multiplier = 1
+ # dim metadata divisor = 1048576
+
+[netdata.web_thread4_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread4_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132003
+ # name = netdata.web_thread4_cpu
+ # title = NetData web server thread No 4 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.web_thread1_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread1_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132000
+ # name = netdata.web_thread1_cpu
+ # title = NetData web server thread No 1 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.web_thread6_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread6_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132005
+ # name = netdata.web_thread6_cpu
+ # title = NetData web server thread No 6 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.web_thread3_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread3_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132002
+ # name = netdata.web_thread3_cpu
+ # title = NetData web server thread No 3 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.web_thread2_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread2_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132001
+ # name = netdata.web_thread2_cpu
+ # title = NetData web server thread No 2 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[disk_await.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_await.sda
+ # chart type = line
+ # type = disk_await
+ # family = sda
+ # units = milliseconds/operation
+ # context = disk.await
+ # priority = 2005
+ # name = disk_await.sda
+ # title = Average Completed I/O Operation Time
+ # dim reads name = reads
+ # dim reads algorithm = absolute
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = absolute
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk_avgsz.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_avgsz.sda
+ # chart type = area
+ # type = disk_avgsz
+ # family = sda
+ # units = KiB/operation
+ # context = disk.avgsz
+ # priority = 2006
+ # name = disk_avgsz.sda
+ # title = Average Completed I/O Operation Bandwidth
+ # dim reads name = reads
+ # dim reads algorithm = absolute
+ # dim reads multiplier = 512
+ # dim reads divisor = 1024
+ # dim writes name = writes
+ # dim writes algorithm = absolute
+ # dim writes multiplier = -512
+ # dim writes divisor = 1024
+
+[disk_svctm.sda]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_svctm.sda
+ # chart type = line
+ # type = disk_svctm
+ # family = sda
+ # units = milliseconds/operation
+ # context = disk.svctm
+ # priority = 2007
+ # name = disk_svctm.sda
+ # title = Average Service Time
+ # dim svctm name = svctm
+ # dim svctm algorithm = absolute
+ # dim svctm multiplier = 1
+ # dim svctm divisor = 1
+
+[disk_await.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_await.sdb
+ # chart type = line
+ # type = disk_await
+ # family = sdb
+ # units = milliseconds/operation
+ # context = disk.await
+ # priority = 2005
+ # name = disk_await.sdb
+ # title = Average Completed I/O Operation Time
+ # dim reads name = reads
+ # dim reads algorithm = absolute
+ # dim reads multiplier = 1
+ # dim reads divisor = 1
+ # dim writes name = writes
+ # dim writes algorithm = absolute
+ # dim writes multiplier = -1
+ # dim writes divisor = 1
+
+[disk_avgsz.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_avgsz.sdb
+ # chart type = area
+ # type = disk_avgsz
+ # family = sdb
+ # units = KiB/operation
+ # context = disk.avgsz
+ # priority = 2006
+ # name = disk_avgsz.sdb
+ # title = Average Completed I/O Operation Bandwidth
+ # dim reads name = reads
+ # dim reads algorithm = absolute
+ # dim reads multiplier = 512
+ # dim reads divisor = 1024
+ # dim writes name = writes
+ # dim writes algorithm = absolute
+ # dim writes multiplier = -512
+ # dim writes divisor = 1024
+
+[disk_svctm.sdb]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/disk_svctm.sdb
+ # chart type = line
+ # type = disk_svctm
+ # family = sdb
+ # units = milliseconds/operation
+ # context = disk.svctm
+ # priority = 2007
+ # name = disk_svctm.sdb
+ # title = Average Service Time
+ # dim svctm name = svctm
+ # dim svctm algorithm = absolute
+ # dim svctm multiplier = 1
+ # dim svctm divisor = 1
+
+[netdata.web_thread5_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.web_thread5_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = web
+ # units = milliseconds/s
+ # context = netdata.web_cpu
+ # priority = 132004
+ # name = netdata.web_thread5_cpu
+ # title = NetData web server thread No 5 CPU usage
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.apps_cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.apps_cpu
+ # chart type = stacked
+ # type = netdata
+ # family = apps.plugin
+ # units = milliseconds/s
+ # context = netdata.apps_cpu
+ # priority = 140000
+ # name = netdata.apps_cpu
+ # title = Apps Plugin CPU
+ # dim user name = user
+ # dim user algorithm = incremental
+ # dim user multiplier = 1
+ # dim user divisor = 1000
+ # dim system name = system
+ # dim system algorithm = incremental
+ # dim system multiplier = 1
+ # dim system divisor = 1000
+
+[netdata.apps_sizes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.apps_sizes
+ # chart type = line
+ # type = netdata
+ # family = apps.plugin
+ # units = files/s
+ # context = netdata.apps_sizes
+ # priority = 140001
+ # name = netdata.apps_sizes
+ # title = Apps Plugin Files
+ # dim calls name = calls
+ # dim calls algorithm = incremental
+ # dim calls multiplier = 1
+ # dim calls divisor = 1
+ # dim files name = files
+ # dim files algorithm = incremental
+ # dim files multiplier = 1
+ # dim files divisor = 1
+ # dim filenames name = filenames
+ # dim filenames algorithm = incremental
+ # dim filenames multiplier = 1
+ # dim filenames divisor = 1
+ # dim inode_changes name = inode_changes
+ # dim inode_changes algorithm = incremental
+ # dim inode_changes multiplier = 1
+ # dim inode_changes divisor = 1
+ # dim link_changes name = link_changes
+ # dim link_changes algorithm = incremental
+ # dim link_changes multiplier = 1
+ # dim link_changes divisor = 1
+ # dim pids name = pids
+ # dim pids algorithm = absolute
+ # dim pids multiplier = 1
+ # dim pids divisor = 1
+ # dim fds name = fds
+ # dim fds algorithm = absolute
+ # dim fds multiplier = 1
+ # dim fds divisor = 1
+ # dim targets name = targets
+ # dim targets algorithm = absolute
+ # dim targets multiplier = 1
+ # dim targets divisor = 1
+ # dim new_pids name = new pids
+ # dim new_pids algorithm = incremental
+ # dim new_pids multiplier = 1
+ # dim new_pids divisor = 1
+
+[netdata.apps_fix]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.apps_fix
+ # chart type = line
+ # type = netdata
+ # family = apps.plugin
+ # units = percentage
+ # context = netdata.apps_fix
+ # priority = 140002
+ # name = netdata.apps_fix
+ # title = Apps Plugin Normalization Ratios
+ # dim utime name = utime
+ # dim utime algorithm = absolute
+ # dim utime multiplier = 1
+ # dim utime divisor = 10000
+ # dim stime name = stime
+ # dim stime algorithm = absolute
+ # dim stime multiplier = 1
+ # dim stime divisor = 10000
+ # dim gtime name = gtime
+ # dim gtime algorithm = absolute
+ # dim gtime multiplier = 1
+ # dim gtime divisor = 10000
+ # dim minflt name = minflt
+ # dim minflt algorithm = absolute
+ # dim minflt multiplier = 1
+ # dim minflt divisor = 10000
+ # dim majflt name = majflt
+ # dim majflt algorithm = absolute
+ # dim majflt multiplier = 1
+ # dim majflt divisor = 10000
+
+[netdata.apps_children_fix]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/netdata.apps_children_fix
+ # chart type = line
+ # type = netdata
+ # family = apps.plugin
+ # units = percentage
+ # context = netdata.apps_children_fix
+ # priority = 140003
+ # name = netdata.apps_children_fix
+ # title = Apps Plugin Exited Children Normalization Ratios
+ # dim cutime name = cutime
+ # dim cutime algorithm = absolute
+ # dim cutime multiplier = 1
+ # dim cutime divisor = 10000
+ # dim cstime name = cstime
+ # dim cstime algorithm = absolute
+ # dim cstime multiplier = 1
+ # dim cstime divisor = 10000
+ # dim cgtime name = cgtime
+ # dim cgtime algorithm = absolute
+ # dim cgtime multiplier = 1
+ # dim cgtime divisor = 10000
+ # dim cminflt name = cminflt
+ # dim cminflt algorithm = absolute
+ # dim cminflt multiplier = 1
+ # dim cminflt divisor = 10000
+ # dim cmajflt name = cmajflt
+ # dim cmajflt algorithm = absolute
+ # dim cmajflt multiplier = 1
+ # dim cmajflt divisor = 10000
+
+[apps.cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.cpu
+ # chart type = stacked
+ # type = apps
+ # family = cpu
+ # units = percentage
+ # context = apps.cpu
+ # priority = 20001
+ # name = apps.cpu
+ # title = Apps CPU Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10000
+
+[apps.mem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.mem
+ # chart type = stacked
+ # type = apps
+ # family = mem
+ # units = MiB
+ # context = apps.mem
+ # priority = 20003
+ # name = apps.mem
+ # title = Apps Real Memory (w/o shared)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1024
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1024
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1024
+
+[apps.vmem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.vmem
+ # chart type = stacked
+ # type = apps
+ # family = mem
+ # units = MiB
+ # context = apps.vmem
+ # priority = 20005
+ # name = apps.vmem
+ # title = Apps Virtual Memory Size
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1024
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1024
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1024
+
+[apps.threads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.threads
+ # chart type = stacked
+ # type = apps
+ # family = processes
+ # units = threads
+ # context = apps.threads
+ # priority = 20006
+ # name = apps.threads
+ # title = Apps Threads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.processes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.processes
+ # chart type = stacked
+ # type = apps
+ # family = processes
+ # units = processes
+ # context = apps.processes
+ # priority = 20007
+ # name = apps.processes
+ # title = Apps Processes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.uptime]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.uptime
+ # chart type = line
+ # type = apps
+ # family = processes
+ # units = seconds
+ # context = apps.uptime
+ # priority = 20008
+ # name = apps.uptime
+ # title = Apps Carried Over Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.uptime_min]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.uptime_min
+ # chart type = line
+ # type = apps
+ # family = processes
+ # units = seconds
+ # context = apps.uptime_min
+ # priority = 20009
+ # name = apps.uptime_min
+ # title = Apps Minimum Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.uptime_avg]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.uptime_avg
+ # chart type = line
+ # type = apps
+ # family = processes
+ # units = seconds
+ # context = apps.uptime_avg
+ # priority = 20010
+ # name = apps.uptime_avg
+ # title = Apps Average Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.uptime_max]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.uptime_max
+ # chart type = line
+ # type = apps
+ # family = processes
+ # units = seconds
+ # context = apps.uptime_max
+ # priority = 20011
+ # name = apps.uptime_max
+ # title = Apps Maximum Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.cpu_user]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.cpu_user
+ # chart type = stacked
+ # type = apps
+ # family = cpu
+ # units = percentage
+ # context = apps.cpu_user
+ # priority = 20020
+ # name = apps.cpu_user
+ # title = Apps CPU User Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10000
+
+[apps.cpu_system]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.cpu_system
+ # chart type = stacked
+ # type = apps
+ # family = cpu
+ # units = percentage
+ # context = apps.cpu_system
+ # priority = 20021
+ # name = apps.cpu_system
+ # title = Apps CPU System Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10000
+
+[apps.swap]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.swap
+ # chart type = stacked
+ # type = apps
+ # family = swap
+ # units = MiB
+ # context = apps.swap
+ # priority = 20011
+ # name = apps.swap
+ # title = Apps Swap Memory
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1024
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1024
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1024
+
+[apps.major_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.major_faults
+ # chart type = stacked
+ # type = apps
+ # family = swap
+ # units = page faults/s
+ # context = apps.major_faults
+ # priority = 20012
+ # name = apps.major_faults
+ # title = Apps Major Page Faults (swap read)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10000
+
+[apps.minor_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.minor_faults
+ # chart type = stacked
+ # type = apps
+ # family = mem
+ # units = page faults/s
+ # context = apps.minor_faults
+ # priority = 20011
+ # name = apps.minor_faults
+ # title = Apps Minor Page Faults
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10000
+
+[apps.preads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.preads
+ # chart type = stacked
+ # type = apps
+ # family = disk
+ # units = KiB/s
+ # context = apps.preads
+ # priority = 20002
+ # name = apps.preads
+ # title = Apps Disk Reads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10240000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10240000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10240000
+
+[apps.pwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.pwrites
+ # chart type = stacked
+ # type = apps
+ # family = disk
+ # units = KiB/s
+ # context = apps.pwrites
+ # priority = 20002
+ # name = apps.pwrites
+ # title = Apps Disk Writes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10240000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10240000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10240000
+
+[apps.lreads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.lreads
+ # chart type = stacked
+ # type = apps
+ # family = disk
+ # units = KiB/s
+ # context = apps.lreads
+ # priority = 20042
+ # name = apps.lreads
+ # title = Apps Disk Logical Reads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10240000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10240000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10240000
+
+[apps.lwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.lwrites
+ # chart type = stacked
+ # type = apps
+ # family = disk
+ # units = KiB/s
+ # context = apps.lwrites
+ # priority = 20042
+ # name = apps.lwrites
+ # title = Apps I/O Logical Writes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 10240000
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 10240000
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 10240000
+
+[apps.files]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.files
+ # chart type = stacked
+ # type = apps
+ # family = disk
+ # units = open files
+ # context = apps.files
+ # priority = 20050
+ # name = apps.files
+ # title = Apps Open Files
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.sockets
+ # chart type = stacked
+ # type = apps
+ # family = net
+ # units = open sockets
+ # context = apps.sockets
+ # priority = 20051
+ # name = apps.sockets
+ # title = Apps Open Sockets
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[apps.pipes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/apps.pipes
+ # chart type = stacked
+ # type = apps
+ # family = processes
+ # units = open pipes
+ # context = apps.pipes
+ # priority = 20053
+ # name = apps.pipes
+ # title = Apps Pipes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim apps.plugin name = apps.plugin
+ # dim apps.plugin algorithm = absolute
+ # dim apps.plugin multiplier = 1
+ # dim apps.plugin divisor = 1
+ # dim go.d.plugin name = go.d.plugin
+ # dim go.d.plugin algorithm = absolute
+ # dim go.d.plugin multiplier = 1
+ # dim go.d.plugin divisor = 1
+ # dim other name = other
+ # dim other algorithm = absolute
+ # dim other multiplier = 1
+ # dim other divisor = 1
+
+[users.cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.cpu
+ # chart type = stacked
+ # type = users
+ # family = cpu
+ # units = percentage
+ # context = users.cpu
+ # priority = 20001
+ # name = users.cpu
+ # title = Users CPU Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+
+[users.mem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.mem
+ # chart type = stacked
+ # type = users
+ # family = mem
+ # units = MiB
+ # context = users.mem
+ # priority = 20003
+ # name = users.mem
+ # title = Users Real Memory (w/o shared)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+
+[users.vmem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.vmem
+ # chart type = stacked
+ # type = users
+ # family = mem
+ # units = MiB
+ # context = users.vmem
+ # priority = 20005
+ # name = users.vmem
+ # title = Users Virtual Memory Size
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+
+[users.threads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.threads
+ # chart type = stacked
+ # type = users
+ # family = processes
+ # units = threads
+ # context = users.threads
+ # priority = 20006
+ # name = users.threads
+ # title = Users Threads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.processes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.processes
+ # chart type = stacked
+ # type = users
+ # family = processes
+ # units = processes
+ # context = users.processes
+ # priority = 20007
+ # name = users.processes
+ # title = Users Processes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.uptime]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.uptime
+ # chart type = line
+ # type = users
+ # family = processes
+ # units = seconds
+ # context = users.uptime
+ # priority = 20008
+ # name = users.uptime
+ # title = Users Carried Over Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.uptime_min]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.uptime_min
+ # chart type = line
+ # type = users
+ # family = processes
+ # units = seconds
+ # context = users.uptime_min
+ # priority = 20009
+ # name = users.uptime_min
+ # title = Users Minimum Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.uptime_avg]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.uptime_avg
+ # chart type = line
+ # type = users
+ # family = processes
+ # units = seconds
+ # context = users.uptime_avg
+ # priority = 20010
+ # name = users.uptime_avg
+ # title = Users Average Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.uptime_max]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.uptime_max
+ # chart type = line
+ # type = users
+ # family = processes
+ # units = seconds
+ # context = users.uptime_max
+ # priority = 20011
+ # name = users.uptime_max
+ # title = Users Maximum Uptime
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.cpu_user]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.cpu_user
+ # chart type = stacked
+ # type = users
+ # family = cpu
+ # units = percentage
+ # context = users.cpu_user
+ # priority = 20020
+ # name = users.cpu_user
+ # title = Users CPU User Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+
+[users.cpu_system]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.cpu_system
+ # chart type = stacked
+ # type = users
+ # family = cpu
+ # units = percentage
+ # context = users.cpu_system
+ # priority = 20021
+ # name = users.cpu_system
+ # title = Users CPU System Time (800% = 8 cores)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+
+[users.swap]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.swap
+ # chart type = stacked
+ # type = users
+ # family = swap
+ # units = MiB
+ # context = users.swap
+ # priority = 20011
+ # name = users.swap
+ # title = Users Swap Memory
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+
+[users.major_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.major_faults
+ # chart type = stacked
+ # type = users
+ # family = swap
+ # units = page faults/s
+ # context = users.major_faults
+ # priority = 20012
+ # name = users.major_faults
+ # title = Users Major Page Faults (swap read)
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+
+[users.minor_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.minor_faults
+ # chart type = stacked
+ # type = users
+ # family = mem
+ # units = page faults/s
+ # context = users.minor_faults
+ # priority = 20011
+ # name = users.minor_faults
+ # title = Users Minor Page Faults
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+
+[users.preads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.preads
+ # chart type = stacked
+ # type = users
+ # family = disk
+ # units = KiB/s
+ # context = users.preads
+ # priority = 20002
+ # name = users.preads
+ # title = Users Disk Reads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+
+[users.pwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.pwrites
+ # chart type = stacked
+ # type = users
+ # family = disk
+ # units = KiB/s
+ # context = users.pwrites
+ # priority = 20002
+ # name = users.pwrites
+ # title = Users Disk Writes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+
+[users.lreads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.lreads
+ # chart type = stacked
+ # type = users
+ # family = disk
+ # units = KiB/s
+ # context = users.lreads
+ # priority = 20042
+ # name = users.lreads
+ # title = Users Disk Logical Reads
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+
+[users.lwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.lwrites
+ # chart type = stacked
+ # type = users
+ # family = disk
+ # units = KiB/s
+ # context = users.lwrites
+ # priority = 20042
+ # name = users.lwrites
+ # title = Users I/O Logical Writes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+
+[users.files]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.files
+ # chart type = stacked
+ # type = users
+ # family = disk
+ # units = open files
+ # context = users.files
+ # priority = 20050
+ # name = users.files
+ # title = Users Open Files
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.sockets
+ # chart type = stacked
+ # type = users
+ # family = net
+ # units = open sockets
+ # context = users.sockets
+ # priority = 20051
+ # name = users.sockets
+ # title = Users Open Sockets
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[users.pipes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/users.pipes
+ # chart type = stacked
+ # type = users
+ # family = processes
+ # units = open pipes
+ # context = users.pipes
+ # priority = 20053
+ # name = users.pipes
+ # title = Users Pipes
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+
+[groups.cpu]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.cpu
+ # chart type = stacked
+ # type = groups
+ # family = cpu
+ # units = percentage
+ # context = groups.cpu
+ # priority = 20001
+ # name = groups.cpu
+ # title = User Groups CPU Time (800% = 8 cores)
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+
+[groups.mem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.mem
+ # chart type = stacked
+ # type = groups
+ # family = mem
+ # units = MiB
+ # context = groups.mem
+ # priority = 20003
+ # name = groups.mem
+ # title = User Groups Real Memory (w/o shared)
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+
+[groups.vmem]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.vmem
+ # chart type = stacked
+ # type = groups
+ # family = mem
+ # units = MiB
+ # context = groups.vmem
+ # priority = 20005
+ # name = groups.vmem
+ # title = User Groups Virtual Memory Size
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+
+[groups.threads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.threads
+ # chart type = stacked
+ # type = groups
+ # family = processes
+ # units = threads
+ # context = groups.threads
+ # priority = 20006
+ # name = groups.threads
+ # title = User Groups Threads
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.processes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.processes
+ # chart type = stacked
+ # type = groups
+ # family = processes
+ # units = processes
+ # context = groups.processes
+ # priority = 20007
+ # name = groups.processes
+ # title = User Groups Processes
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.uptime]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.uptime
+ # chart type = line
+ # type = groups
+ # family = processes
+ # units = seconds
+ # context = groups.uptime
+ # priority = 20008
+ # name = groups.uptime
+ # title = User Groups Carried Over Uptime
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.uptime_min]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.uptime_min
+ # chart type = line
+ # type = groups
+ # family = processes
+ # units = seconds
+ # context = groups.uptime_min
+ # priority = 20009
+ # name = groups.uptime_min
+ # title = User Groups Minimum Uptime
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.uptime_avg]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.uptime_avg
+ # chart type = line
+ # type = groups
+ # family = processes
+ # units = seconds
+ # context = groups.uptime_avg
+ # priority = 20010
+ # name = groups.uptime_avg
+ # title = User Groups Average Uptime
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.uptime_max]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.uptime_max
+ # chart type = line
+ # type = groups
+ # family = processes
+ # units = seconds
+ # context = groups.uptime_max
+ # priority = 20011
+ # name = groups.uptime_max
+ # title = User Groups Maximum Uptime
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.cpu_user]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.cpu_user
+ # chart type = stacked
+ # type = groups
+ # family = cpu
+ # units = percentage
+ # context = groups.cpu_user
+ # priority = 20020
+ # name = groups.cpu_user
+ # title = User Groups CPU User Time (800% = 8 cores)
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+
+[groups.cpu_system]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.cpu_system
+ # chart type = stacked
+ # type = groups
+ # family = cpu
+ # units = percentage
+ # context = groups.cpu_system
+ # priority = 20021
+ # name = groups.cpu_system
+ # title = User Groups CPU System Time (800% = 8 cores)
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+
+[groups.swap]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.swap
+ # chart type = stacked
+ # type = groups
+ # family = swap
+ # units = MiB
+ # context = groups.swap
+ # priority = 20011
+ # name = groups.swap
+ # title = User Groups Swap Memory
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1024
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1024
+
+[groups.major_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.major_faults
+ # chart type = stacked
+ # type = groups
+ # family = swap
+ # units = page faults/s
+ # context = groups.major_faults
+ # priority = 20012
+ # name = groups.major_faults
+ # title = User Groups Major Page Faults (swap read)
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+
+[groups.minor_faults]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.minor_faults
+ # chart type = stacked
+ # type = groups
+ # family = mem
+ # units = page faults/s
+ # context = groups.minor_faults
+ # priority = 20011
+ # name = groups.minor_faults
+ # title = User Groups Minor Page Faults
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10000
+
+[groups.preads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.preads
+ # chart type = stacked
+ # type = groups
+ # family = disk
+ # units = KiB/s
+ # context = groups.preads
+ # priority = 20002
+ # name = groups.preads
+ # title = User Groups Disk Reads
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+
+[groups.pwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.pwrites
+ # chart type = stacked
+ # type = groups
+ # family = disk
+ # units = KiB/s
+ # context = groups.pwrites
+ # priority = 20002
+ # name = groups.pwrites
+ # title = User Groups Disk Writes
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+
+[groups.lreads]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.lreads
+ # chart type = stacked
+ # type = groups
+ # family = disk
+ # units = KiB/s
+ # context = groups.lreads
+ # priority = 20042
+ # name = groups.lreads
+ # title = User Groups Disk Logical Reads
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+
+[groups.lwrites]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.lwrites
+ # chart type = stacked
+ # type = groups
+ # family = disk
+ # units = KiB/s
+ # context = groups.lwrites
+ # priority = 20042
+ # name = groups.lwrites
+ # title = User Groups I/O Logical Writes
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 10240000
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 10240000
+
+[groups.files]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.files
+ # chart type = stacked
+ # type = groups
+ # family = disk
+ # units = open files
+ # context = groups.files
+ # priority = 20050
+ # name = groups.files
+ # title = User Groups Open Files
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.sockets]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.sockets
+ # chart type = stacked
+ # type = groups
+ # family = net
+ # units = open sockets
+ # context = groups.sockets
+ # priority = 20051
+ # name = groups.sockets
+ # title = User Groups Open Sockets
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
+
+[groups.pipes]
+ history = 5
+ # enabled = yes
+ # cache directory = /var/cache/netdata/groups.pipes
+ # chart type = stacked
+ # type = groups
+ # family = processes
+ # units = open pipes
+ # context = groups.pipes
+ # priority = 20053
+ # name = groups.pipes
+ # title = User Groups Pipes
+ # dim root name = root
+ # dim root algorithm = absolute
+ # dim root multiplier = 1
+ # dim root divisor = 1
+ # dim netdata name = netdata
+ # dim netdata algorithm = absolute
+ # dim netdata multiplier = 1
+ # dim netdata divisor = 1
diff --git a/build_external/scenarios/aclk-testing/configureVerneMQ.Dockerfile b/build_external/scenarios/aclk-testing/configureVerneMQ.Dockerfile
new file mode 100644
index 0000000..228548c
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/configureVerneMQ.Dockerfile
@@ -0,0 +1,8 @@
+FROM vernemq:latest
+EXPOSE 9002
+COPY vernemq.conf /vernemq/etc/vernemq.conf
+WORKDIR /vernemq
+#RUN openssl req -newkey rsa:2048 -nodes -keyout server.key -x509 -days 365 -out server.crt -subj '/CN=vernemq'
+RUN openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -out server.crt -keyout server.key -subj "/C=SK/ST=XX/L=XX/O=NetdataIsAwesome/OU=NotSupremeLeader/CN=netdata.cloud"
+RUN chown vernemq:vernemq /vernemq/server.key /vernemq/server.crt
+RUN cat /vernemq/etc/vernemq.conf
diff --git a/build_external/scenarios/aclk-testing/paho-compose.yml b/build_external/scenarios/aclk-testing/paho-compose.yml
new file mode 100644
index 0000000..4fc6ce2
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/paho-compose.yml
@@ -0,0 +1,6 @@
+version: '3.3'
+services:
+ paho_inspect:
+ build:
+ context: .
+ dockerfile: paho.Dockerfile \ No newline at end of file
diff --git a/build_external/scenarios/aclk-testing/paho-inspection.py b/build_external/scenarios/aclk-testing/paho-inspection.py
new file mode 100644
index 0000000..ec1e167
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/paho-inspection.py
@@ -0,0 +1,33 @@
+import ssl
+import paho.mqtt.client as mqtt
+
+def on_connect(mqttc, obj, flags, rc):
+ print("connected rc: "+str(rc), flush=True)
+ mqttc.subscribe("/agent/#",0)
+def on_disconnect(mqttc, obj, flags, rc):
+ print("disconnected rc: "+str(rc), flush=True)
+def on_message(mqttc, obj, msg):
+ print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload), flush=True)
+def on_publish(mqttc, obj, mid):
+ print("mid: "+str(mid), flush=True)
+def on_subscribe(mqttc, obj, mid, granted_qos):
+ print("Subscribed: "+str(mid)+" "+str(granted_qos), flush=True)
+def on_log(mqttc, obj, level, string):
+ print(string)
+print("Starting paho-inspection", flush=True)
+mqttc = mqtt.Client(transport='websockets')
+#mqttc.tls_set(certfile="server.crt", keyfile="server.key", cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+#mqttc.tls_set(ca_certs="server.crt", cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+mqttc.tls_set(cert_reqs=ssl.CERT_NONE, tls_version=ssl.PROTOCOL_TLS, ciphers=None)
+mqttc.tls_insecure_set(True)
+mqttc.on_message = on_message
+mqttc.on_connect = on_connect
+mqttc.on_disconnect = on_disconnect
+mqttc.on_publish = on_publish
+mqttc.on_subscribe = on_subscribe
+mqttc.connect("vernemq", 9002, 60)
+
+#mqttc.publish("/agent/mine","Test1")
+#mqttc.subscribe("$SYS/#", 0)
+print("Connected succesfully, monitoring /agent/#", flush=True)
+mqttc.loop_forever()
diff --git a/build_external/scenarios/aclk-testing/paho.Dockerfile b/build_external/scenarios/aclk-testing/paho.Dockerfile
new file mode 100644
index 0000000..77a49e7
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/paho.Dockerfile
@@ -0,0 +1,12 @@
+FROM archlinux/base:latest
+
+RUN pacman -Syyu --noconfirm
+RUN pacman --noconfirm --needed -S python-pip
+
+RUN pip install paho-mqtt
+
+RUN mkdir -p /opt/paho
+COPY paho-inspection.py /opt/paho/
+
+WORKDIR /opt/paho
+CMD ["/usr/sbin/python", "paho-inspection.py"] \ No newline at end of file
diff --git a/build_external/scenarios/aclk-testing/vernemq-compose.yml b/build_external/scenarios/aclk-testing/vernemq-compose.yml
new file mode 100644
index 0000000..a9f07a5
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/vernemq-compose.yml
@@ -0,0 +1,7 @@
+version: '3.3'
+services:
+ vernemq:
+ build:
+ dockerfile: configureVerneMQ.Dockerfile
+ context: .
+
diff --git a/build_external/scenarios/aclk-testing/vernemq.conf b/build_external/scenarios/aclk-testing/vernemq.conf
new file mode 100644
index 0000000..18e8432
--- /dev/null
+++ b/build_external/scenarios/aclk-testing/vernemq.conf
@@ -0,0 +1,68 @@
+allow_anonymous = on
+allow_register_during_netsplit = off
+allow_publish_during_netsplit = off
+allow_subscribe_during_netsplit = off
+allow_unsubscribe_during_netsplit = off
+allow_multiple_sessions = off
+coordinate_registrations = on
+max_inflight_messages = 20
+max_online_messages = 1000
+max_offline_messages = 1000
+max_message_size = 0
+upgrade_outgoing_qos = off
+listener.max_connections = 10000
+listener.nr_of_acceptors = 10
+listener.tcp.default = 127.0.0.1:1883
+listener.wss.keyfile = /vernemq/server.key
+listener.wss.certfile = /vernemq/server.crt
+listener.wss.default = 0.0.0.0:9002
+listener.vmq.clustering = 0.0.0.0:44053
+listener.http.default = 127.0.0.1:8888
+listener.ssl.require_certificate = off
+listener.wss.require_certificate = off
+systree_enabled = on
+systree_interval = 20000
+graphite_enabled = off
+graphite_host = localhost
+graphite_port = 2003
+graphite_interval = 20000
+shared_subscription_policy = prefer_local
+plugins.vmq_passwd = on
+plugins.vmq_acl = on
+plugins.vmq_diversity = off
+plugins.vmq_webhooks = off
+plugins.vmq_bridge = off
+metadata_plugin = vmq_plumtree
+vmq_acl.acl_file = ./etc/vmq.acl
+vmq_acl.acl_reload_interval = 10
+vmq_passwd.password_file = ./etc/vmq.passwd
+vmq_passwd.password_reload_interval = 10
+vmq_diversity.script_dir = ./share/lua
+vmq_diversity.auth_postgres.enabled = off
+vmq_diversity.postgres.ssl = off
+vmq_diversity.postgres.password_hash_method = crypt
+vmq_diversity.auth_cockroachdb.enabled = off
+vmq_diversity.cockroachdb.ssl = on
+vmq_diversity.cockroachdb.password_hash_method = bcrypt
+vmq_diversity.auth_mysql.enabled = off
+vmq_diversity.mysql.password_hash_method = password
+vmq_diversity.auth_mongodb.enabled = off
+vmq_diversity.mongodb.ssl = off
+vmq_diversity.auth_redis.enabled = off
+vmq_bcrypt.pool_size = 1
+log.console = file
+log.console.level = info
+log.console.file = ./log/console.log
+log.error.file = ./log/error.log
+log.syslog = off
+log.crash = on
+log.crash.file = ./log/crash.log
+log.crash.maximum_message_size = 64KB
+log.crash.size = 10MB
+log.crash.rotation = $D0
+log.crash.rotation.keep = 5
+nodename = VerneMQ@127.0.0.1
+distributed_cookie = vmq
+erlang.async_threads = 64
+erlang.max_ports = 262144
+leveldb.maximum_memory.percent = 70
diff --git a/build_external/scenarios/gaps_hi/child-compose.yml b/build_external/scenarios/gaps_hi/child-compose.yml
new file mode 100644
index 0000000..e507360
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/child-compose.yml
@@ -0,0 +1,14 @@
+version: '3.3'
+services:
+ agent_child:
+ image: debian_10_dev
+ command: /usr/sbin/netdata -D
+ #ports:
+ #- 21002+:19999
+ volumes:
+ - ./child_stream.conf:/etc/netdata/stream.conf:ro
+ #- ./child_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./min.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
+
diff --git a/build_external/scenarios/gaps_hi/child_guid b/build_external/scenarios/gaps_hi/child_guid
new file mode 100644
index 0000000..670f7c2
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/child_guid
@@ -0,0 +1 @@
+22222222-2222-2222-2222-222222222222 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_hi/child_stream.conf b/build_external/scenarios/gaps_hi/child_stream.conf
new file mode 100644
index 0000000..2218c68
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/child_stream.conf
@@ -0,0 +1,11 @@
+[stream]
+ enabled = yes
+# destination = tcp:agent_middle
+ destination = tcp:192.168.1.2
+ api key = 00000000-0000-0000-0000-000000000000
+ timeout seconds = 60
+ default port = 19999
+ send charts matching = *
+ buffer size bytes = 10485760
+ reconnect delay seconds = 5
+ initial clock resync iterations = 60
diff --git a/build_external/scenarios/gaps_hi/middle-compose.yml b/build_external/scenarios/gaps_hi/middle-compose.yml
new file mode 100644
index 0000000..cb4a045
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/middle-compose.yml
@@ -0,0 +1,13 @@
+version: '3.3'
+services:
+ agent_middle:
+ image: debian_10_dev
+ command: /usr/sbin/netdata -D
+ ports:
+ - 21001:19999
+ volumes:
+ - ./middle_stream.conf:/etc/netdata/stream.conf:ro
+ - ./middle_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./min.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
diff --git a/build_external/scenarios/gaps_hi/middle_guid b/build_external/scenarios/gaps_hi/middle_guid
new file mode 100644
index 0000000..f8a43c2
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/middle_guid
@@ -0,0 +1 @@
+11111111-1111-1111-1111-111111111111 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_hi/middle_stream.conf b/build_external/scenarios/gaps_hi/middle_stream.conf
new file mode 100644
index 0000000..132eaa1
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/middle_stream.conf
@@ -0,0 +1,23 @@
+[stream]
+ enabled = yes
+ destination = tcp:agent_parent
+ api key = 00000000-0000-0000-0000-000000000000
+ timeout seconds = 60
+ default port = 19999
+
+ send charts matching = *
+ buffer size bytes = 1048576
+ reconnect delay seconds = 5
+ initial clock resync iterations = 60
+
+[00000000-0000-0000-0000-000000000000]
+ enabled = yes
+ allow from = *
+ default history = 3600
+ # default memory mode = ram
+
+ health enabled by default = auto
+
+ # postpone alarms for a short period after the sender is connected
+ default postpone alarms on connect seconds = 60
+ multiple connections = allow
diff --git a/build_external/scenarios/gaps_hi/min.conf b/build_external/scenarios/gaps_hi/min.conf
new file mode 100644
index 0000000..83fa23e
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/min.conf
@@ -0,0 +1,6 @@
+[global]
+ debug flags = 0x0000000040000000
+ errors flood protection period = 0
+[web]
+ ssl key = /etc/netdata/ssl/key.pem
+ ssl certificate = /etc/netdata/ssl/cert.pem
diff --git a/build_external/scenarios/gaps_hi/parent-compose.yml b/build_external/scenarios/gaps_hi/parent-compose.yml
new file mode 100644
index 0000000..2944bbc
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/parent-compose.yml
@@ -0,0 +1,13 @@
+version: '3.3'
+services:
+ agent_parent:
+ image: debian_10_dev
+ command: /usr/sbin/netdata -D
+ ports:
+ - 21000:19999
+ volumes:
+ - ./parent_stream.conf:/etc/netdata/stream.conf:ro
+ - ./parent_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./min.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
diff --git a/build_external/scenarios/gaps_hi/parent_guid b/build_external/scenarios/gaps_hi/parent_guid
new file mode 100644
index 0000000..fee6f32
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/parent_guid
@@ -0,0 +1 @@
+00000000-0000-0000-0000-000000000000 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_hi/parent_stream.conf b/build_external/scenarios/gaps_hi/parent_stream.conf
new file mode 100644
index 0000000..600a9fa
--- /dev/null
+++ b/build_external/scenarios/gaps_hi/parent_stream.conf
@@ -0,0 +1,11 @@
+[00000000-0000-0000-0000-000000000000]
+ enabled = yes
+ allow from = *
+ default history = 3600
+ default memory mode = dbengine
+ health enabled by default = no
+
+ # postpone alarms for a short period after the sender is connected
+ default postpone alarms on connect seconds = 60
+ multiple connections = allow
+
diff --git a/build_external/scenarios/gaps_lo/child-compose.yml b/build_external/scenarios/gaps_lo/child-compose.yml
new file mode 100644
index 0000000..dca900c
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/child-compose.yml
@@ -0,0 +1,15 @@
+version: '3.3'
+services:
+ agent_child:
+ image: arch_extras_dev
+ #command: /usr/sbin/valgrind --leak-check=full /usr/sbin/netdata -D
+ command: /usr/sbin/netdata -D # gdb does not like valgrind !
+ #ports:
+ #- 21002:19999
+ volumes:
+ - ./child_stream.conf:/etc/netdata/stream.conf:ro
+ # - ./child_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./mostly_off.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
+
diff --git a/build_external/scenarios/gaps_lo/child_guid b/build_external/scenarios/gaps_lo/child_guid
new file mode 100644
index 0000000..670f7c2
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/child_guid
@@ -0,0 +1 @@
+22222222-2222-2222-2222-222222222222 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_lo/child_stream.conf b/build_external/scenarios/gaps_lo/child_stream.conf
new file mode 100644
index 0000000..00bcdfa
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/child_stream.conf
@@ -0,0 +1,11 @@
+[stream]
+ # Enable this on child nodes, to have them send metrics.
+ enabled = yes
+ destination = tcp:192.168.1.2
+ api key = 00000000-0000-0000-0000-000000000000
+ timeout seconds = 60
+ default port = 19999
+ send charts matching = *
+ buffer size bytes = 1048576
+ reconnect delay seconds = 5
+ initial clock resync iterations = 60
diff --git a/build_external/scenarios/gaps_lo/middle-compose.yml b/build_external/scenarios/gaps_lo/middle-compose.yml
new file mode 100644
index 0000000..f94e1fe
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/middle-compose.yml
@@ -0,0 +1,14 @@
+version: '3.3'
+services:
+ agent_middle:
+ image: arch_extras_dev
+ #command: /usr/sbin/valgrind --leak-check=full /usr/sbin/netdata -D
+ command: /usr/sbin/netdata -D
+ ports:
+ - 21001:19999
+ volumes:
+ - ./middle_stream.conf:/etc/netdata/stream.conf:ro
+ - ./middle_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./mostly_off.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
diff --git a/build_external/scenarios/gaps_lo/middle_guid b/build_external/scenarios/gaps_lo/middle_guid
new file mode 100644
index 0000000..f8a43c2
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/middle_guid
@@ -0,0 +1 @@
+11111111-1111-1111-1111-111111111111 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_lo/middle_stream.conf b/build_external/scenarios/gaps_lo/middle_stream.conf
new file mode 100644
index 0000000..3e52e83
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/middle_stream.conf
@@ -0,0 +1,20 @@
+[stream]
+ enabled = yes
+ destination = tcp:agent_parent
+ api key = 00000000-0000-0000-0000-000000000000
+ timeout seconds = 60
+ default port = 19999
+ send charts matching = *
+ buffer size bytes = 1048576
+ reconnect delay seconds = 5
+ initial clock resync iterations = 60
+
+[00000000-0000-0000-0000-000000000000]
+ enabled = yes
+ allow from = *
+ default history = 3600
+ # default memory mode = ram
+ health enabled by default = auto
+ # postpone alarms for a short period after the sender is connected
+ default postpone alarms on connect seconds = 60
+ multiple connections = allow
diff --git a/build_external/scenarios/gaps_lo/mostly_off.conf b/build_external/scenarios/gaps_lo/mostly_off.conf
new file mode 100644
index 0000000..2ac395a
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/mostly_off.conf
@@ -0,0 +1,965 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+debug flags = 0x0000000040000000
+errors flood protection period = 0
+
+[plugins]
+ diskspace = no
+ cgroups = no
+ tc = no
+ idlejitter = no
+ ioping = no
+ apps = no
+ go.d = no
+ perf = no
+ fping = no
+ python.d = no
+ charts.d = no
+ node.d = no
+ nfacct = no
+ cups = no
+ freeipmi = no
+
+[health]
+enabled = no
+
+[statsd]
+enabled = no
+
+[plugin:proc]
+ /proc/uptime = yes
+ /proc/loadavg = no
+ /proc/sys/kernel/random/entropy_avail = no
+ /proc/pressure = no
+ /proc/interrupts = no
+ /proc/softirqs = no
+ /proc/vmstat = no
+ /proc/meminfo = no
+ /sys/kernel/mm/ksm = no
+ /sys/block/zram = no
+ /sys/devices/system/edac/mc = no
+ /sys/devices/system/node = no
+ /proc/net/dev = no
+ /proc/net/sockstat = no
+ /proc/net/sockstat6 = no
+ /proc/net/netstat = no
+ /proc/net/snmp = no
+ /proc/net/snmp6 = no
+ /proc/net/sctp/snmp = no
+ /proc/net/softnet_stat = no
+ /proc/net/ip_vs/stats = no
+ /proc/net/stat/conntrack = no
+ /proc/net/stat/synproxy = no
+ /proc/diskstats = no
+ /proc/mdstat = no
+ /proc/net/rpc/nfsd = no
+ /proc/net/rpc/nfs = no
+ /proc/spl/kstat/zfs/arcstats = no
+ /sys/fs/btrfs = no
+ ipc = no
+ /sys/class/power_supply = no
+
+
+[plugin:proc:/proc/net/dev:docker0]
+enabled = no
+
+[plugin:proc:/proc/net/dev:br-b87e56f878f1]
+enabled = no
+
+[plugin:proc:/proc/net/dev:enp4s0]
+enabled = no
+
+[plugin:proc:/proc/net/stat/nf_conntrack]
+filename to monitor = /proc/net/stat/nf_conntrack
+netfilter new connections = no
+netfilter connection changes = no
+netfilter connection expectations = no
+netfilter connection searches = no
+netfilter errors = no
+netfilter connections = no
+
+[system.idlejitter]
+enabled = no
+
+[netdata.statsd_metrics]
+enabled = no
+
+[netdata.statsd_useful_metrics]
+enabled = no
+
+[netdata.statsd_events]
+enabled = no
+
+[netdata.statsd_reads]
+enabled = no
+
+[netdata.statsd_bytes]
+enabled = no
+
+[netdata.statsd_packets]
+enabled = no
+
+[netdata.tcp_connects]
+enabled = no
+
+[netdata.tcp_connected]
+enabled = no
+
+[netdata.private_charts]
+enabled = no
+
+[netdata.plugin_statsd_charting_cpu]
+enabled = no
+
+[netdata.plugin_statsd_collector1_cpu]
+enabled = no
+
+[netdata.plugin_tc_cpu]
+enabled = no
+
+[netdata.plugin_tc_time]
+enabled = no
+
+[netdata.runtime_sensors]
+enabled = no
+
+[sensors.coretemp-isa-0000_temperature]
+enabled = no
+
+[sensors.acpitz-acpi-0_temperature]
+enabled = no
+
+[system.cpu]
+enabled = yes
+
+[disk_space._dev]
+enabled = no
+
+[netdata.plugin_cgroups_cpu]
+enabled = no
+
+[netdata.apps_cpu]
+enabled = no
+
+[netdata.apps_sizes]
+enabled = no
+
+[netdata.apps_fix]
+enabled = no
+
+[netdata.apps_children_fix]
+enabled = no
+
+[apps.cpu]
+enabled = no
+
+[apps.mem]
+enabled = no
+
+[apps.vmem]
+enabled = no
+
+[apps.threads]
+enabled = no
+
+[apps.processes]
+enabled = no
+
+[apps.uptime]
+enabled = no
+
+[apps.uptime_min]
+enabled = no
+
+[apps.uptime_avg]
+enabled = no
+
+[apps.uptime_max]
+enabled = no
+
+[apps.cpu_user]
+enabled = no
+
+[apps.cpu_system]
+enabled = no
+
+[apps.swap]
+enabled = no
+
+[apps.major_faults]
+enabled = no
+
+[apps.minor_faults]
+enabled = no
+
+[apps.preads]
+enabled = no
+
+[apps.pwrites]
+enabled = no
+
+[apps.lreads]
+enabled = no
+
+[apps.lwrites]
+enabled = no
+
+[apps.files]
+enabled = no
+
+[apps.sockets]
+enabled = no
+
+[apps.pipes]
+enabled = no
+
+[users.cpu]
+enabled = no
+
+[users.mem]
+enabled = no
+
+[users.vmem]
+enabled = no
+
+[users.threads]
+enabled = no
+
+[users.processes]
+enabled = no
+
+[users.uptime]
+enabled = no
+
+[users.uptime_min]
+enabled = no
+
+[users.uptime_avg]
+enabled = no
+
+[users.uptime_max]
+enabled = no
+
+[users.cpu_user]
+enabled = no
+
+[users.cpu_system]
+enabled = no
+
+[users.swap]
+enabled = no
+
+[users.major_faults]
+enabled = no
+
+[users.minor_faults]
+enabled = no
+
+[users.preads]
+enabled = no
+
+[users.pwrites]
+enabled = no
+
+[users.lreads]
+enabled = no
+
+[users.lwrites]
+enabled = no
+
+[users.files]
+enabled = no
+
+[users.sockets]
+enabled = no
+
+[users.pipes]
+enabled = no
+
+[groups.cpu]
+enabled = no
+
+[groups.mem]
+enabled = no
+
+[groups.vmem]
+enabled = no
+
+[groups.threads]
+enabled = no
+
+[groups.processes]
+enabled = no
+
+[groups.uptime]
+enabled = no
+
+[groups.uptime_min]
+enabled = no
+
+[groups.uptime_avg]
+enabled = no
+
+[groups.uptime_max]
+enabled = no
+
+[groups.cpu_user]
+enabled = no
+
+[groups.cpu_system]
+enabled = no
+
+[groups.swap]
+enabled = no
+
+[groups.major_faults]
+enabled = no
+
+[groups.minor_faults]
+enabled = no
+
+[groups.preads]
+enabled = no
+
+[groups.pwrites]
+enabled = no
+
+[groups.lreads]
+enabled = no
+
+[groups.lwrites]
+enabled = no
+
+[groups.files]
+enabled = no
+
+[groups.sockets]
+enabled = no
+
+[groups.pipes]
+enabled = no
+
+[netdata.web_thread4_cpu]
+enabled = no
+
+[netdata.web_thread2_cpu]
+enabled = no
+
+[netdata.web_thread5_cpu]
+enabled = no
+
+[netdata.web_thread3_cpu]
+enabled = no
+
+[netdata.web_thread6_cpu]
+enabled = no
+
+[netdata.web_thread1_cpu]
+enabled = no
+
+[disk_inodes._dev]
+enabled = no
+
+[disk_space._run]
+enabled = no
+
+[disk_inodes._run]
+enabled = no
+
+[disk_space._]
+enabled = no
+
+[disk_inodes._]
+enabled = no
+
+[disk_space._dev_shm]
+enabled = no
+
+[disk_inodes._dev_shm]
+enabled = no
+
+[disk_space._run_lock]
+enabled = no
+
+[disk_inodes._run_lock]
+enabled = no
+
+[disk_space._home]
+enabled = no
+
+[disk_inodes._home]
+enabled = no
+
+[disk_space._boot_efi]
+enabled = no
+
+[disk_space._media_amoss_deb10]
+enabled = no
+
+[netdata.plugin_diskspace]
+enabled = no
+
+[netdata.plugin_diskspace_dt]
+enabled = no
+
+[cpu.cpu0]
+enabled = no
+
+[cpu.cpu1]
+enabled = no
+
+[cpu.cpu2]
+enabled = no
+
+[cpu.cpu3]
+enabled = no
+
+[cpu.cpu4]
+enabled = no
+
+[cpu.cpu5]
+enabled = no
+
+[cpu.cpu6]
+enabled = no
+
+[cpu.cpu7]
+enabled = no
+
+[system.intr]
+enabled = no
+
+[system.ctxt]
+enabled = no
+
+[system.forks]
+enabled = no
+
+[system.processes]
+enabled = no
+
+[cpu.core_throttling]
+enabled = no
+
+[cpu.cpufreq]
+enabled = no
+
+[cpu.cpu0_cpuidle]
+enabled = no
+
+[cpu.cpu1_cpuidle]
+enabled = no
+
+[cpu.cpu2_cpuidle]
+enabled = no
+
+[cpu.cpu3_cpuidle]
+enabled = no
+
+[cpu.cpu4_cpuidle]
+enabled = no
+
+[cpu.cpu5_cpuidle]
+enabled = no
+
+[cpu.cpu6_cpuidle]
+enabled = no
+
+[cpu.cpu7_cpuidle]
+enabled = no
+
+[system.uptime]
+enabled = yes
+
+[system.load]
+enabled = no
+
+[system.active_processes]
+enabled = no
+
+[system.entropy]
+enabled = no
+
+[system.interrupts]
+enabled = no
+
+[cpu.cpu0_interrupts]
+enabled = no
+
+[cpu.cpu1_interrupts]
+enabled = no
+
+[cpu.cpu2_interrupts]
+enabled = no
+
+[cpu.cpu3_interrupts]
+enabled = no
+
+[cpu.cpu4_interrupts]
+enabled = no
+
+[cpu.cpu5_interrupts]
+enabled = no
+
+[cpu.cpu6_interrupts]
+enabled = no
+
+[cpu.cpu7_interrupts]
+enabled = no
+
+[system.softirqs]
+enabled = no
+
+[cpu.cpu0_softirqs]
+enabled = no
+
+[cpu.cpu1_softirqs]
+enabled = no
+
+[cpu.cpu2_softirqs]
+enabled = no
+
+[cpu.cpu3_softirqs]
+enabled = no
+
+[cpu.cpu4_softirqs]
+enabled = no
+
+[cpu.cpu5_softirqs]
+enabled = no
+
+[cpu.cpu6_softirqs]
+enabled = no
+
+[cpu.cpu7_softirqs]
+enabled = no
+
+[system.swapio]
+enabled = no
+
+[system.pgpgio]
+enabled = no
+
+[mem.pgfaults]
+enabled = no
+
+[system.ram]
+enabled = no
+
+[mem.available]
+enabled = no
+
+[system.swap]
+enabled = no
+
+[mem.committed]
+enabled = no
+
+[mem.writeback]
+enabled = no
+
+[mem.kernel]
+enabled = no
+
+[mem.slab]
+enabled = no
+
+[mem.transparent_hugepages]
+enabled = no
+
+[net.docker0]
+enabled = no
+
+[net_packets.docker0]
+enabled = no
+
+[net.br-b87e56f878f1]
+enabled = no
+
+[net_packets.br-b87e56f878f1]
+enabled = no
+
+[net.enp4s0]
+enabled = no
+
+[net_packets.enp4s0]
+enabled = no
+
+[system.net]
+enabled = no
+
+[ipv4.sockstat_sockets]
+enabled = no
+
+[ipv4.sockstat_tcp_sockets]
+enabled = no
+
+[ipv4.sockstat_tcp_mem]
+enabled = no
+
+[ipv4.sockstat_udp_sockets]
+enabled = no
+
+[ipv4.sockstat_udp_mem]
+enabled = no
+
+[ipv4.sockstat_raw_sockets]
+enabled = no
+
+[ipv6.sockstat6_tcp_sockets]
+enabled = no
+
+[ipv6.sockstat6_udp_sockets]
+enabled = no
+
+[ip.tcpconnaborts]
+enabled = no
+
+[ip.tcpreorders]
+enabled = no
+
+[ip.tcpofo]
+enabled = no
+
+[system.ip]
+enabled = no
+
+[ip.inerrors]
+enabled = no
+
+[ip.mcast]
+enabled = no
+
+[ip.bcast]
+enabled = no
+
+[ip.mcastpkts]
+enabled = no
+
+[ip.bcastpkts]
+enabled = no
+
+[ip.ecnpkts]
+enabled = no
+
+[ipv4.packets]
+enabled = no
+
+[ipv4.fragsin]
+enabled = no
+
+[ipv4.errors]
+enabled = no
+
+[ipv4.icmp]
+enabled = no
+
+[ipv4.icmp_errors]
+enabled = no
+
+[ipv4.icmpmsg]
+enabled = no
+
+[ipv4.tcpsock]
+enabled = no
+
+[ipv4.tcppackets]
+enabled = no
+
+[ipv4.tcperrors]
+enabled = no
+
+[ipv4.tcpopens]
+enabled = no
+
+[ipv4.tcphandshake]
+enabled = no
+
+[ipv4.udppackets]
+enabled = no
+
+[ipv4.udperrors]
+enabled = no
+
+[system.ipv6]
+enabled = no
+
+[ipv6.packets]
+enabled = no
+
+[ipv6.errors]
+enabled = no
+
+[ipv6.udppackets]
+enabled = no
+
+[ipv6.udperrors]
+enabled = no
+
+[ipv6.mcast]
+enabled = no
+
+[ipv6.mcastpkts]
+enabled = no
+
+[ipv6.icmp]
+enabled = no
+
+[ipv6.icmperrors]
+enabled = no
+
+[ipv6.icmprouter]
+enabled = no
+
+[ipv6.icmpneighbor]
+enabled = no
+
+[ipv6.icmpmldv2]
+enabled = no
+
+[ipv6.icmptypes]
+enabled = no
+
+[ipv6.ect]
+enabled = no
+
+[system.softnet_stat]
+enabled = no
+
+[cpu.cpu0_softnet_stat]
+enabled = no
+
+[cpu.cpu1_softnet_stat]
+enabled = no
+
+[cpu.cpu2_softnet_stat]
+enabled = no
+
+[cpu.cpu3_softnet_stat]
+enabled = no
+
+[cpu.cpu4_softnet_stat]
+enabled = no
+
+[cpu.cpu5_softnet_stat]
+enabled = no
+
+[cpu.cpu6_softnet_stat]
+enabled = no
+
+[cpu.cpu7_softnet_stat]
+enabled = no
+
+[netfilter.conntrack_sockets]
+enabled = no
+
+[netfilter.conntrack_new]
+enabled = no
+
+[netfilter.conntrack_changes]
+enabled = no
+
+[netfilter.conntrack_expect]
+enabled = no
+
+[netfilter.conntrack_search]
+enabled = no
+
+[netfilter.conntrack_errors]
+enabled = no
+
+[disk.sda]
+enabled = no
+
+[disk_ops.sda]
+enabled = no
+
+[disk_backlog.sda]
+enabled = no
+
+[disk_util.sda]
+enabled = no
+
+[disk_iotime.sda]
+enabled = no
+
+[disk.sdb]
+enabled = no
+
+[disk_ops.sdb]
+enabled = no
+
+[disk_backlog.sdb]
+enabled = no
+
+[disk_util.sdb]
+enabled = no
+
+[disk_mops.sdb]
+enabled = no
+
+[disk_iotime.sdb]
+enabled = no
+
+[disk.sdc]
+enabled = no
+
+[disk_ops.sdc]
+enabled = no
+
+[disk_backlog.sdc]
+enabled = no
+
+[disk_util.sdc]
+enabled = no
+
+[disk_mops.sdc]
+enabled = no
+
+[disk_iotime.sdc]
+enabled = no
+
+[system.io]
+enabled = no
+
+[system.ipc_semaphores]
+enabled = no
+
+[system.ipc_semaphore_arrays]
+enabled = no
+
+[system.shared_memory_segments]
+enabled = no
+
+[system.shared_memory_bytes]
+enabled = no
+
+[netdata.plugin_proc_modules]
+enabled = no
+
+[netdata.plugin_proc_cpu]
+enabled = no
+
+[netdata.server_cpu]
+enabled = no
+
+[netdata.clients]
+enabled = no
+
+[netdata.requests]
+enabled = no
+
+[netdata.net]
+enabled = no
+
+[netdata.response_time]
+enabled = no
+
+[netdata.compression_ratio]
+enabled = no
+
+[netdata.dbengine_compression_ratio]
+enabled = no
+
+[netdata.page_cache_hit_ratio]
+enabled = no
+
+[netdata.page_cache_stats]
+enabled = no
+
+[netdata.dbengine_long_term_page_stats]
+enabled = no
+
+[netdata.dbengine_io_throughput]
+enabled = no
+
+[netdata.dbengine_io_operations]
+enabled = no
+
+[netdata.dbengine_global_errors]
+enabled = no
+
+[netdata.dbengine_global_file_descriptors]
+enabled = no
+
+[netdata.dbengine_ram]
+enabled = no
+
+[disk_await.sda]
+enabled = no
+
+[disk_avgsz.sda]
+enabled = no
+
+[disk_svctm.sda]
+enabled = no
+
+[disk_await.sdb]
+enabled = no
+
+[disk_avgsz.sdb]
+enabled = no
+
+[disk_svctm.sdb]
+enabled = no
+
+[disk_await.sdc]
+enabled = no
+
+[disk_avgsz.sdc]
+enabled = no
+
+[disk_svctm.sdc]
+enabled = no
+
+[netdata.queries]
+enabled = no
+
+[netdata.db_points]
+enabled = no
+
+[services.cpu]
+enabled = no
+
+[services.mem_usage]
+enabled = no
+
+[services.throttle_io_read]
+enabled = no
+
+[services.throttle_io_write]
+enabled = no
+
+[services.throttle_io_ops_read]
+enabled = no
+
+[services.throttle_io_ops_write]
+enabled = no
+
+[netfilter.netlink_new]
+enabled = no
+
+[netfilter.netlink_changes]
+enabled = no
+
+[netfilter.netlink_search]
+enabled = no
+
+[netfilter.netlink_errors]
+enabled = no
+
+[netfilter.netlink_expect]
+enabled = no
diff --git a/build_external/scenarios/gaps_lo/parent-compose.yml b/build_external/scenarios/gaps_lo/parent-compose.yml
new file mode 100644
index 0000000..e7baad2
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/parent-compose.yml
@@ -0,0 +1,13 @@
+version: '3.3'
+services:
+ agent_parent:
+ image: debian_10_dev
+ command: /usr/sbin/netdata -D
+ ports:
+ - 21000:19999
+ volumes:
+ - ./parent_stream.conf:/etc/netdata/stream.conf:ro
+ - ./parent_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro
+ - ./mostly_off.conf:/etc/netdata/netdata.conf:ro
+ cap_add:
+ - SYS_PTRACE
diff --git a/build_external/scenarios/gaps_lo/parent_guid b/build_external/scenarios/gaps_lo/parent_guid
new file mode 100644
index 0000000..fee6f32
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/parent_guid
@@ -0,0 +1 @@
+00000000-0000-0000-0000-000000000000 \ No newline at end of file
diff --git a/build_external/scenarios/gaps_lo/parent_stream.conf b/build_external/scenarios/gaps_lo/parent_stream.conf
new file mode 100644
index 0000000..99611cc
--- /dev/null
+++ b/build_external/scenarios/gaps_lo/parent_stream.conf
@@ -0,0 +1,12 @@
+[00000000-0000-0000-0000-000000000000]
+ enabled = yes
+ allow from = *
+ default history = 3600
+ # default memory mode = ram
+
+ health enabled by default = auto
+
+ # postpone alarms for a short period after the sender is connected
+ default postpone alarms on connect seconds = 60
+ multiple connections = allow
+
diff --git a/build_external/scenarios/only-agent/docker-compose.yml b/build_external/scenarios/only-agent/docker-compose.yml
new file mode 100644
index 0000000..74bdef8
--- /dev/null
+++ b/build_external/scenarios/only-agent/docker-compose.yml
@@ -0,0 +1,8 @@
+version: '3'
+services:
+ agent:
+ image: ${Distro}_${Version}_dev
+ command: /usr/sbin/netdata -D
+ ports:
+ - 80
+ - 443
diff --git a/build_external/scenarios/parent-child/child_stream.conf b/build_external/scenarios/parent-child/child_stream.conf
new file mode 100644
index 0000000..7348f8c
--- /dev/null
+++ b/build_external/scenarios/parent-child/child_stream.conf
@@ -0,0 +1,10 @@
+[stream]
+ enabled = yes
+ destination = tcp:agent_parent
+ api key = 00000000-0000-0000-0000-000000000000
+ timeout seconds = 60
+ default port = 19999
+ send charts matching = *
+ buffer size bytes = 1048576
+ reconnect delay seconds = 5
+ initial clock resync iterations = 60
diff --git a/build_external/scenarios/parent-child/docker-compose.yml b/build_external/scenarios/parent-child/docker-compose.yml
new file mode 100644
index 0000000..ed6df15
--- /dev/null
+++ b/build_external/scenarios/parent-child/docker-compose.yml
@@ -0,0 +1,23 @@
+version: '3.3'
+services:
+ agent_parent:
+ image: debian_10_dev
+ command: /usr/sbin/netdata -D
+ ports:
+ - 20000:19999
+ volumes:
+ - ./parent_stream.conf:/etc/netdata/stream.conf:ro
+ agent_child1:
+ image: debian_9_dev
+ command: /usr/sbin/netdata -D
+ #ports: Removed to allow scaling
+ #- 20001:19999
+ volumes:
+ - ./child_stream.conf:/etc/netdata/stream.conf:ro
+ agent_child2:
+ image: fedora_30_dev
+ command: /usr/sbin/netdata -D
+ #ports: Removed to allow scaling
+ #- 20002:19999
+ volumes:
+ - ./child_stream.conf:/etc/netdata/stream.conf:ro
diff --git a/build_external/scenarios/parent-child/parent_stream.conf b/build_external/scenarios/parent-child/parent_stream.conf
new file mode 100644
index 0000000..bf85ae2
--- /dev/null
+++ b/build_external/scenarios/parent-child/parent_stream.conf
@@ -0,0 +1,7 @@
+[00000000-0000-0000-0000-000000000000]
+ enabled = yes
+ allow from = *
+ default history = 3600
+ health enabled by default = auto
+ default postpone alarms on connect seconds = 60
+ multiple connections = allow
diff --git a/claim/Makefile.am b/claim/Makefile.am
new file mode 100644
index 0000000..c838db9
--- /dev/null
+++ b/claim/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ netdata-claim.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+sbin_SCRIPTS = \
+ netdata-claim.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ netdata-claim.sh.in \
+ README.md \
+ $(NULL)
+
diff --git a/claim/README.md b/claim/README.md
new file mode 100644
index 0000000..1d0d6ee
--- /dev/null
+++ b/claim/README.md
@@ -0,0 +1,403 @@
+<!--
+title: "Agent claiming"
+description: "Agent claiming allows a Netdata Agent, running on a distributed node, to securely connect to Netdata Cloud via the encrypted Agent-Cloud link (ACLK)."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/claim/README.md
+-->
+
+# Agent claiming
+
+Agent claiming allows a Netdata Agent, running on a distributed node, to securely connect to Netdata Cloud. A Space's
+administrator creates a **claiming token**, which is used to add an Agent to their Space via the [Agent-Cloud link
+(ACLK)](/aclk/README.md).
+
+Are you just starting out with Netdata Cloud? See our [get started with
+Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide for a walkthrough of the process and simplified
+instructions.
+
+Claiming nodes is a security feature in Netdata Cloud. Through the process of claiming, you demonstrate in a few ways
+that you have administrative access to that node and the configuration settings for its Agent. By logging into the node,
+you prove you have access, and by using the claiming script or the Netdata command line, you prove you have write access
+and administrative privileges.
+
+Only the administrators of a Space in Netdata Cloud can view the claiming token and accompanying script generated by
+Netdata Cloud.
+
+> The claiming process ensures no third party can add your node, and then view your node's metrics, in a Cloud account,
+> Space, or War Room that you did not authorize.
+
+By claiming a node, you opt-in to sending data from your Agent to Netdata Cloud via the [ACLK](/aclk/README.md). This
+data is encrypted by TLS while it is in transit. We use the RSA keypair created during claiming to authenticate the
+identity of the Agent when it connects to the Cloud. While the data does flow through Netdata Cloud servers on its way
+from Agents to the browser, we do not store or log it.
+
+You can claim a node during the Cloud onboarding process, or after you created a Space by clicking on the **USER's
+Space** dropdown, then **Manage claimed nodes**.
+
+There are two important notes regarding claiming:
+
+- _You can only claim any given node in a single Space_. You can, however, add that claimed node to multiple War Rooms
+ within that one Space.
+- You must repeat the claiming process on every node you want to add to Netdata Cloud.
+
+## How to claim a node
+
+To claim a node, select which War Rooms you want to add this node to with the dropdown, then copy and paste the script
+given by Cloud into your node's terminal. Hit **Enter**.
+
+```bash
+sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud
+```
+
+The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if you don't see
+the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). If you prefer not to
+use root privileges via `sudo` to run the claiming script, see the next section.
+
+Repeat this process with every node you want to add to Cloud during onboarding. You can also add more nodes once you've
+finished onboarding.
+
+### Claim an agent without root privileges
+
+If you don't want to run the claiming script with root privileges, you can discover which user is running the Agent,
+switch to that user, and run the claiming script.
+
+Use `grep` to search your `netdata.conf` file, which is typically located at `/etc/netdata/netdata.conf`, for the `run
+as user` setting. For example:
+
+```bash
+grep "run as user" /etc/netdata/netdata.conf
+ # run as user = netdata
+```
+
+The default user is `netdata`. Yours may be different, so pay attention to the output from `grep`. Switch to that user
+and run the claiming script.
+
+```bash
+netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud
+```
+
+Hit **Enter**. The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if
+you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting).
+
+### Claim an Agent running in Docker
+
+To claim an instance of the Netdata Agent running inside of a Docker container, either set claiming environment
+variables in the container to have it automatically claimed on startup or restart, or use `docker exec` to manually
+claim an already running container.
+
+For claiming to work, the contents of `/var/lib/netdata` _must_ be preserved across container
+restarts using a persistent volume. See our [recommended `docker run` and Docker Compose
+examples](/packaging/docker/README.md#create-a-new-netdata-agent-container) for details.
+
+#### Using environment variables
+
+The Netdata Docker container looks for the following environment variables on startup:
+
+- `NETDATA_CLAIM_TOKEN`
+- `NETDATA_CLAIM_URL`
+- `NETDATA_CLAIM_ROOMS`
+- `NETDATA_CLAIM_PROXY`
+
+If the token and URL are specified in their corresponding variables _and_ the container is not already claimed,
+it will use these values to attempt to claim the container, automatically adding the node to the specified War
+Rooms. If a proxy is specified, it will be used for the claiming process and for connecting to Netdata Cloud.
+
+These variables can be specified using any mechanism supported by your container tooling for setting environment
+variables inside containers. For example, when creating a new Netdata continer using `docker run`, the following
+modified version of the command can be used to set the variables:
+
+```bash
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ -e NETDATA_CLAIM_TOKEN=TOKEN \
+ -e NETDATA_CLAIM_URL="https://app.netdata.cloud" \
+ -e NETDATA_CLAIM_ROOMS=ROOM1,ROOM2 \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+Output that would be seen from the claiming script when using other methods will be present in the container logs.
+
+Using the environment variables like this to handle claiming is the preferred method of claiming Docker containers
+as it works in the widest variety of situations and simplifies configuration management.
+
+#### Using docker exec
+
+Claim a _running Netdata Agent container_ by appending the script offered by Cloud to a `docker exec ...` command, replacing
+`netdata` with the name of your running container:
+
+```bash
+docker exec -it netdata netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud
+```
+
+The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if
+you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting).
+
+### Claim a Kubernetes cluster's parent Netdata pod
+
+Read our [Kubernetes installation](/packaging/installer/methods/kubernetes.md#claim-a-kubernetes-clusters-parent-pod)
+for details on claiming a parent Netdata pod.
+
+### Claim through a proxy
+
+A Space's administrator can claim a node through a SOCKS5 or HTTP(S) proxy.
+
+You should first configure the proxy in the `[cloud]` section of `netdata.conf`. The proxy settings you specify here
+will also be used to tunnel the ACLK. The default `proxy` setting is `none`.
+
+```conf
+[cloud]
+ proxy = none
+```
+
+The `proxy` setting can take one of the following values:
+
+- `none`: Do not use a proxy, even if the system configured otherwise.
+- `env`: Try to read proxy settings from set environment variables `http_proxy`/`socks_proxy`.
+- `socks5[h]://[user:pass@]host:ip`: The ACLK and claiming will use the specified SOCKS5 proxy.
+- `http://[user:pass@]host:ip`: The ACLK and claiming will use the specified HTTP(S) proxy.
+
+For example, a SOCKS5 proxy setting may look like the following:
+
+```conf
+[cloud]
+ proxy = socks5h://203.0.113.0:1080 # With an IP address
+ proxy = socks5h://proxy.example.com:1080 # With a URL
+```
+
+You can now move on to claiming. When you claim with the `netdata-claim.sh` script, add the `-proxy=` parameter and
+append the same proxy setting you added to `netdata.conf`.
+
+```bash
+sudo netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2 -url=https://app.netdata.cloud -proxy=socks5h://203.0.113.0:1080
+```
+
+Hit **Enter**. The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if
+you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting).
+
+### Troubleshooting
+
+If you're having trouble claiming a node, this may be because the [ACLK](/aclk/README.md) cannot connect to Cloud.
+
+With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP
+address or hostname of your Agent. The returned JSON contains four keys that will be helpful to diagnose any issues you
+might be having with the ACLK or claiming process.
+
+```json
+ "cloud-enabled"
+ "cloud-available"
+ "agent-claimed"
+ "aclk-available"
+```
+
+Use these keys and the information below to troubleshoot the ACLK.
+
+#### bash: netdata-claim.sh: command not found
+
+If you run the claiming script and see a `command not found` error, you either installed Netdata in a non-standard
+location or are using an unsupported package. If you installed Netdata in a non-standard path using the `--install`
+option, you need to update your `$PATH` or run `netdata-claim.sh` using the full path. For example, if you installed
+Netdata to `/opt/netdata`, use `/opt/netdata/bin/netdata-claim.sh` to run the claiming script.
+
+If you are using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution,
+please remove that package and reinstall using our [recommended kickstart
+script](/docs/get/README.md#install-the-netdata-agent).
+
+#### Claiming on older distributions (Ubuntu 14.04, Debian 8, CentOS 6)
+
+If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS
+6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old
+versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which
+helps securely encrypt SSL connections.
+
+We recommend you reinstall Netdata with a [static build](/packaging/installer/methods/kickstart-64.md), which uses an
+up-to-date version of OpenSSL with hostname validation enabled.
+
+If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit
+with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to
+man-in-the-middle attacks.
+
+#### cloud-enabled is false
+
+If `cloud-enabled` is `false`, you probably ran the installer with `--disable-cloud` option.
+
+Additionally, check that the `enabled` setting in `var/lib/netdata/cloud.d/cloud.conf` is set to `true`:
+
+```conf
+[global]
+ enabled = true
+```
+
+To fix this issue, reinstall Netdata using your [preferred method](/packaging/installer/README.md) and do not add the
+`--disable-cloud` option.
+
+#### cloud-available is false
+
+If `cloud-available` is `false` after you verified Cloud is enabled in the previous step, the most likely issue is that
+Cloud features failed to build during installation.
+
+If Cloud features fail to build, the installer continues and finishes the process without Cloud functionality as opposed
+to failing the installation altogether. We do this to ensure the Agent will always finish installing.
+
+If you can't see an explicit error in the installer's output, you can run the installer with the `--require-cloud`
+option. This option causes the installation to fail if Cloud functionality can't be built and enabled, and the
+installer's output should give you more error details.
+
+You may see one of the following error messages during installation:
+
+- Failed to build libmosquitto. The install process will continue, but you will not be able to connect this node to
+ Netdata Cloud.
+- Unable to fetch sources for libmosquitto. The install process will continue, but you will not be able to connect
+ this node to Netdata Cloud.
+- Failed to build libwebsockets. The install process will continue, but you may not be able to connect this node to
+ Netdata Cloud.
+- Unable to fetch sources for libwebsockets. The install process will continue, but you may not be able to connect
+ this node to Netdata Cloud.
+- Could not find cmake, which is required to build libwebsockets. The install process will continue, but you may not
+ be able to connect this node to Netdata Cloud.
+- Could not find cmake, which is required to build JSON-C. The install process will continue, but Netdata Cloud
+ support will be disabled.
+- Failed to build JSON-C. Netdata Cloud support will be disabled.
+- Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled.
+
+One common cause of the installer failing to build Cloud features is not having one of the following dependencies on
+your system: `cmake` and OpenSSL, including the `devel` package.
+
+You can also look for error messages in `/var/log/netdata/error.log`. Try one of the following two commands to search
+for ACLK-related errors.
+
+```bash
+less /var/log/netdata/error.log
+grep -i ACLK /var/log/netdata/error.log
+```
+
+If the installer's output does not help you enable Cloud features, contact us by [creating an issue on
+GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage%2C+ACLK&template=bug_report.md&title=The+installer+failed+to+prepare+the+required+dependencies+for+Netdata+Cloud+functionality)
+with details about your system and relevant output from `error.log`.
+
+#### agent-claimed is false
+
+You must [claim your node](#how-to-claim-a-node).
+
+#### aclk-available is false
+
+If `aclk-available` is `false` and all other keys are `true`, your Agent is having trouble connecting to the Cloud
+through the ACLK. Please check your system's firewall.
+
+If your Agent needs to use a proxy to access the internet, you must [set up a proxy for
+claiming](#claim-through-a-proxy).
+
+If you are certain firewall and proxy settings are not the issue, you should consult the Agent's `error.log` at
+`/var/log/netdata/error.log` and contact us by [creating an issue on
+GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage%2C+ACLK&template=bug_report.md&title=ACLK-available-is-false)
+with details about your system and relevant output from `error.log`.
+
+### Remove and reclaim a node
+
+To remove a node from your Space in Netdata Cloud, delete the `cloud.d/` directory in your Netdata library directory.
+
+```bash
+cd /var/lib/netdata # Replace with your Netdata library directory, if not /var/lib/netdata/
+sudo rm -rf cloud.d/
+```
+
+This node no longer has access to the credentials it was claimed with and cannot connect to Netdata Cloud via the ACLK.
+You will still be able to see this node in your War Rooms in an **unreachable** state.
+
+If you want to reclaim this node into a different Space, you need to create a new identity by adding `-id=$(uuidgen)` to
+the claiming script parameters. Make sure that you have the `uuidgen-runtime` package installed, as it is used to run the command `uuidgen`. For example, using the default claiming script:
+
+```bash
+sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud -id=$(uuidgen)
+```
+
+The agent _must be restarted_ after this change.
+
+## Claiming reference
+
+In the sections below, you can find reference material for the claiming script, claiming via the Agent's command line
+tool, and details about the files found in `cloud.d`.
+
+### The `cloud.conf` file
+
+This section defines how and whether your Agent connects to [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/)
+using the [ACLK](/aclk/README.md).
+
+| setting | default | info |
+|:-------------- |:------------------------- |:-------------------------------------------------------------------------------------------------------------------------------------- |
+| cloud base url | https://app.netdata.cloud | The URL for the Netdata Cloud web application. You should not change this. If you want to disable Cloud, change the `enabled` setting. |
+| enabled | yes | The runtime option to disable the [Agent-Cloud link](/aclk/README.md) and prevent your Agent from connecting to Netdata Cloud. |
+
+### Claiming script
+
+A Space's administrator can claim an Agent by directly calling the `netdata-claim.sh` script either with root privileges
+using `sudo`, or as the user running the Agent (typically `netdata`), and passing the following arguments:
+
+```sh
+-token=TOKEN
+ where TOKEN is the Space's claiming token.
+-rooms=ROOM1,ROOM2,...
+ where ROOMX is the War Room this node should be added to. This list is optional.
+-url=URL_BASE
+ where URL_BASE is the Netdata Cloud endpoint base URL. By default, this is https://app.netdata.cloud.
+-id=AGENT_ID
+ where AGENT_ID is the unique identifier of the Agent. This is the Agent's MACHINE_GUID by default.
+-hostname=HOSTNAME
+ where HOSTNAME is the result of the hostname command by default.
+-proxy=PROXY_URL
+ where PROXY_URL is the endpoint of a SOCKS5 proxy.
+```
+
+For example, the following command claims an Agent and adds it to rooms `room1` and `room2`:
+
+```sh
+netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2
+```
+
+You should then update the `netdata` service about the result with `netdatacli`:
+
+```sh
+netdatacli reload-claiming-state
+```
+
+This reloads the Agent claiming state from disk.
+
+### Netdata Agent command line
+
+If a Netdata Agent is running, the Space's administrator can claim a node using the `netdata` service binary with
+additional command line parameters:
+
+```sh
+-W "claim -token=TOKEN -rooms=ROOM1,ROOM2"
+```
+
+For example:
+
+```sh
+/usr/sbin/netdata -D -W "claim -token=MYTOKEN1234567 -rooms=room1,room2"
+```
+
+If need be, the user can override the Agent's defaults by providing additional arguments like those described
+[here](#claiming-script).
+
+### Claiming directory
+
+Netdata stores the Agent's claiming-related state in the Netdata library directory under `cloud.d`. For a default
+installation, this directory exists at `/var/lib/netdata/cloud.d`. The directory and its files should be owned by the
+user that runs the Agent, which is typically the `netdata` user.
+
+The `cloud.d/token` file should contain the claiming-token and the `cloud.d/rooms` file should contain the list of War
+Rooms you added that node to.
+
+The user can also put the Cloud endpoint's full certificate chain in `cloud.d/cloud_fullchain.pem` so that the Agent
+can trust the endpoint if necessary.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fclaim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/claim/claim.c b/claim/claim.c
new file mode 100644
index 0000000..bb93160
--- /dev/null
+++ b/claim/claim.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "claim.h"
+#include "../registry/registry_internals.h"
+#include "../aclk/legacy/aclk_common.h"
+
+char *claiming_pending_arguments = NULL;
+
+static char *claiming_errors[] = {
+ "Agent claimed successfully", // 0
+ "Unknown argument", // 1
+ "Problems with claiming working directory", // 2
+ "Missing dependencies", // 3
+ "Failure to connect to endpoint", // 4
+ "The CLI didn't work", // 5
+ "Wrong user", // 6
+ "Unknown HTTP error message", // 7
+ "invalid node id", // 8
+ "invalid node name", // 9
+ "invalid room id", // 10
+ "invalid public key", // 11
+ "token expired/token not found/invalid token", // 12
+ "already claimed", // 13
+ "processing claiming", // 14
+ "Internal Server Error", // 15
+ "Gateway Timeout", // 16
+ "Service Unavailable", // 17
+ "Agent Unique Id Not Readable" // 18
+};
+
+/* Retrieve the claim id for the agent.
+ * Caller owns the string.
+*/
+char *is_agent_claimed()
+{
+ char *result;
+ rrdhost_aclk_state_lock(localhost);
+ result = (localhost->aclk_state.claimed_id == NULL) ? NULL : strdupz(localhost->aclk_state.claimed_id);
+ rrdhost_aclk_state_unlock(localhost);
+ return result;
+}
+
+#define CLAIMING_COMMAND_LENGTH 16384
+#define CLAIMING_PROXY_LENGTH CLAIMING_COMMAND_LENGTH/4
+
+extern struct registry registry;
+
+/* rrd_init() and post_conf_load() must have been called before this function */
+void claim_agent(char *claiming_arguments)
+{
+ if (!netdata_cloud_setting) {
+ error("Refusing to claim agent -> cloud functionality has been disabled");
+ return;
+ }
+
+#ifndef DISABLE_CLOUD
+ int exit_code;
+ pid_t command_pid;
+ char command_buffer[CLAIMING_COMMAND_LENGTH + 1];
+ FILE *fp;
+
+ // This is guaranteed to be set early in main via post_conf_load()
+ char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL);
+ if (cloud_base_url == NULL)
+ fatal("Do not move the cloud base url out of post_conf_load!!");
+ const char *proxy_str;
+ ACLK_PROXY_TYPE proxy_type;
+ char proxy_flag[CLAIMING_PROXY_LENGTH] = "-noproxy";
+
+ proxy_str = aclk_get_proxy(&proxy_type);
+
+ if (proxy_type == PROXY_TYPE_SOCKS5 || proxy_type == PROXY_TYPE_HTTP)
+ snprintf(proxy_flag, CLAIMING_PROXY_LENGTH, "-proxy=\"%s\"", proxy_str);
+
+ snprintfz(command_buffer,
+ CLAIMING_COMMAND_LENGTH,
+ "exec netdata-claim.sh %s -hostname=%s -id=%s -url=%s -noreload %s",
+
+ proxy_flag,
+ netdata_configured_hostname,
+ localhost->machine_guid,
+ cloud_base_url,
+ claiming_arguments);
+
+ info("Executing agent claiming command 'netdata-claim.sh'");
+ fp = mypopen(command_buffer, &command_pid);
+ if(!fp) {
+ error("Cannot popen(\"%s\").", command_buffer);
+ return;
+ }
+ info("Waiting for claiming command to finish.");
+ while (fgets(command_buffer, CLAIMING_COMMAND_LENGTH, fp) != NULL) {;}
+ exit_code = mypclose(fp, command_pid);
+ info("Agent claiming command returned with code %d", exit_code);
+ if (0 == exit_code) {
+ load_claiming_state();
+ return;
+ }
+ if (exit_code < 0) {
+ error("Agent claiming command failed to complete its run.");
+ return;
+ }
+ errno = 0;
+ unsigned maximum_known_exit_code = sizeof(claiming_errors) / sizeof(claiming_errors[0]) - 1;
+
+ if ((unsigned)exit_code > maximum_known_exit_code) {
+ error("Agent failed to be claimed with an unknown error.");
+ return;
+ }
+ error("Agent failed to be claimed with the following error message:");
+ error("\"%s\"", claiming_errors[exit_code]);
+#else
+ UNUSED(claiming_arguments);
+ UNUSED(claiming_errors);
+#endif
+}
+
+#ifdef ENABLE_ACLK
+extern int aclk_connected, aclk_kill_link, aclk_disable_runtime;
+#endif
+
+/* Change the claimed state of the agent.
+ *
+ * This only happens when the user has explicitly requested it:
+ * - via the cli tool by reloading the claiming state
+ * - after spawning the claim because of a command-line argument
+ * If this happens with the ACLK active under an old claim then we MUST KILL THE LINK
+ */
+void load_claiming_state(void)
+{
+ // --------------------------------------------------------------------
+ // Check if the cloud is enabled
+#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK )
+ netdata_cloud_setting = 0;
+#else
+ uuid_t uuid;
+ rrdhost_aclk_state_lock(localhost);
+ if (localhost->aclk_state.claimed_id) {
+ freez(localhost->aclk_state.claimed_id);
+ localhost->aclk_state.claimed_id = NULL;
+ }
+ if (aclk_connected)
+ {
+ info("Agent was already connected to Cloud - forcing reconnection under new credentials");
+ aclk_kill_link = 1;
+ }
+ aclk_disable_runtime = 0;
+
+ // Propagate into aclk and registry. Be kind of atomic...
+ appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL);
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/cloud.d/claimed_id", netdata_configured_varlib_dir);
+
+ long bytes_read;
+ char *claimed_id = read_by_filename(filename, &bytes_read);
+ if(claimed_id && uuid_parse(claimed_id, uuid)) {
+ error("claimed_id \"%s\" doesn't look like valid UUID", claimed_id);
+ freez(claimed_id);
+ claimed_id = NULL;
+ }
+ localhost->aclk_state.claimed_id = claimed_id;
+ rrdhost_aclk_state_unlock(localhost);
+ if (!claimed_id) {
+ info("Unable to load '%s', setting state to AGENT_UNCLAIMED", filename);
+ return;
+ }
+
+ info("File '%s' was found. Setting state to AGENT_CLAIMED.", filename);
+ netdata_cloud_setting = appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1);
+#endif
+}
+
+struct config cloud_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+void load_cloud_conf(int silent)
+{
+ char *filename;
+ errno = 0;
+
+ int ret = 0;
+
+ filename = strdupz_path_subpath(netdata_configured_varlib_dir, "cloud.d/cloud.conf");
+
+ ret = appconfig_load(&cloud_config, filename, 1, NULL);
+ if(!ret && !silent) {
+ info("CONFIG: cannot load cloud config '%s'. Running with internal defaults.", filename);
+ }
+ freez(filename);
+}
diff --git a/claim/claim.h b/claim/claim.h
new file mode 100644
index 0000000..2fd8d3e
--- /dev/null
+++ b/claim/claim.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_CLAIM_H
+#define NETDATA_CLAIM_H 1
+
+#include "../daemon/common.h"
+
+extern char *claiming_pending_arguments;
+extern struct config cloud_config;
+
+void claim_agent(char *claiming_arguments);
+char *is_agent_claimed(void);
+void load_claiming_state(void);
+void load_cloud_conf(int silent);
+
+#endif //NETDATA_CLAIM_H
diff --git a/claim/netdata-claim.sh.in b/claim/netdata-claim.sh.in
new file mode 100755
index 0000000..7ac91d7
--- /dev/null
+++ b/claim/netdata-claim.sh.in
@@ -0,0 +1,401 @@
+#!/usr/bin/env bash
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Exit code: 0 - Success
+# Exit code: 1 - Unknown argument
+# Exit code: 2 - Problems with claiming working directory
+# Exit code: 3 - Missing dependencies
+# Exit code: 4 - Failure to connect to endpoint
+# Exit code: 5 - The CLI didn't work
+# Exit code: 6 - Wrong user
+# Exit code: 7 - Unknown HTTP error message
+#
+# OK: Agent claimed successfully
+# HTTP Status code: 204
+# Exit code: 0
+#
+# Unknown HTTP error message
+# HTTP Status code: 422
+# Exit code: 7
+ERROR_KEYS[7]="None"
+ERROR_MESSAGES[7]="Unknown HTTP error message"
+
+# Error: The agent id is invalid; it does not fulfill the constraints
+# HTTP Status code: 422
+# Exit code: 8
+ERROR_KEYS[8]="ErrInvalidNodeID"
+ERROR_MESSAGES[8]="invalid node id"
+
+# Error: The agent hostname is invalid; it does not fulfill the constraints
+# HTTP Status code: 422
+# Exit code: 9
+ERROR_KEYS[9]="ErrInvalidNodeName"
+ERROR_MESSAGES[9]="invalid node name"
+
+# Error: At least one of the given rooms ids is invalid; it does not fulfill the constraints
+# HTTP Status code: 422
+# Exit code: 10
+ERROR_KEYS[10]="ErrInvalidRoomID"
+ERROR_MESSAGES[10]="invalid room id"
+
+# Error: Invalid public key; the public key is empty or not present
+# HTTP Status code: 422
+# Exit code: 11
+ERROR_KEYS[11]="ErrInvalidPublicKey"
+ERROR_MESSAGES[11]="invalid public key"
+#
+# Error: Expired, missing or invalid token
+# HTTP Status code: 403
+# Exit code: 12
+ERROR_KEYS[12]="ErrForbidden"
+ERROR_MESSAGES[12]="token expired/token not found/invalid token"
+
+# Error: Duplicate agent id; an agent with the same id is already registered in the cloud
+# HTTP Status code: 409
+# Exit code: 13
+ERROR_KEYS[13]="ErrAlreadyClaimed"
+ERROR_MESSAGES[13]="already claimed"
+
+# Error: The node claiming process is still in progress.
+# HTTP Status code: 102
+# Exit code: 14
+ERROR_KEYS[14]="ErrProcessingClaim"
+ERROR_MESSAGES[14]="processing claiming"
+
+# Error: Internal server error. Any other unexpected error (DB problems, etc.)
+# HTTP Status code: 500
+# Exit code: 15
+ERROR_KEYS[15]="ErrInternalServerError"
+ERROR_MESSAGES[15]="Internal Server Error"
+
+# Error: There was a timout processing the claim.
+# HTTP Status code: 504
+# Exit code: 16
+ERROR_KEYS[16]="ErrGatewayTimeout"
+ERROR_MESSAGES[16]="Gateway Timeout"
+
+# Error: The service cannot handle the claiming request at this time.
+# HTTP Status code: 503
+# Exit code: 17
+ERROR_KEYS[17]="ErrServiceUnavailable"
+ERROR_MESSAGES[17]="Service Unavailable"
+
+# Exit code: 18 - Agent unique id is not generated yet.
+
+get_config_value() {
+ conf_file="${1}"
+ section="${2}"
+ key_name="${3}"
+ config_result=$(@sbindir_POST@/netdatacli 2>/dev/null read-config "$conf_file|$section|$key_name"; exit $?)
+ # shellcheck disable=SC2181
+ if [ "$?" != "0" ]; then
+ echo >&2 "cli failed, assume netdata is not running and query the on-disk config"
+ config_result=$(@sbindir_POST@/netdata 2>/dev/null -W get2 "$conf_file" "$section" "$key_name" unknown_default)
+ fi
+ echo "$config_result"
+}
+if command -v curl >/dev/null 2>&1 ; then
+ URLTOOL="curl"
+elif command -v wget >/dev/null 2>&1 ; then
+ URLTOOL="wget"
+else
+ echo >&2 "I need curl or wget to proceed, but neither is available on this system."
+ exit 3
+fi
+if ! command -v openssl >/dev/null 2>&1 ; then
+ echo >&2 "I need openssl to proceed, but it is not available on this system."
+ exit 3
+fi
+
+# shellcheck disable=SC2050
+if [ "@enable_cloud_POST@" = "no" ]; then
+ echo >&2 "This agent was built with --disable-cloud and cannot be claimed"
+ exit 3
+fi
+# shellcheck disable=SC2050
+if [ "@can_enable_aclk_POST@" != "yes" ]; then
+ echo >&2 "This agent was built without the dependencies for Cloud and cannot be claimed"
+ exit 3
+fi
+
+# -----------------------------------------------------------------------------
+# defaults to allow running this script by hand
+
+[ -z "${NETDATA_VARLIB_DIR}" ] && NETDATA_VARLIB_DIR="@varlibdir_POST@"
+MACHINE_GUID_FILE="@registrydir_POST@/netdata.public.unique.id"
+CLAIMING_DIR="${NETDATA_VARLIB_DIR}/cloud.d"
+TOKEN="unknown"
+URL_BASE=$(get_config_value cloud global "cloud base url")
+[ -z "$URL_BASE" ] && URL_BASE="https://app.netdata.cloud" # Cover post-install with --dont-start
+ID="unknown"
+ROOMS=""
+[ -z "$HOSTNAME" ] && HOSTNAME=$(hostname)
+CLOUD_CERTIFICATE_FILE="${CLAIMING_DIR}/cloud_fullchain.pem"
+VERBOSE=0
+INSECURE=0
+RELOAD=1
+NETDATA_USER=$(get_config_value netdata global "run as user")
+[ -z "$EUID" ] && EUID="$(id -u)"
+
+
+# get the MACHINE_GUID by default
+if [ -r "${MACHINE_GUID_FILE}" ]; then
+ ID="$(cat "${MACHINE_GUID_FILE}")"
+ MGUID=$ID
+else
+ echo >&2 "netdata.public.unique.id is not generated yet or not readable. Please run agent at least once before attempting to claim. Agent generates this file on first startup. If the ID is generated already make sure you have rights to read it (Filename: ${MACHINE_GUID_FILE})."
+ exit 18
+fi
+
+# get token from file
+if [ -r "${CLAIMING_DIR}/token" ]; then
+ TOKEN="$(cat "${CLAIMING_DIR}/token")"
+fi
+
+# get rooms from file
+if [ -r "${CLAIMING_DIR}/rooms" ]; then
+ ROOMS="$(cat "${CLAIMING_DIR}/rooms")"
+fi
+
+for arg in "$@"
+do
+ case $arg in
+ -token=*) TOKEN=${arg:7} ;;
+ -url=*) URL_BASE=${arg:5} ;;
+ -id=*) ID=${arg:4} ;;
+ -rooms=*) ROOMS=${arg:7} ;;
+ -hostname=*) HOSTNAME=${arg:10} ;;
+ -verbose) VERBOSE=1 ;;
+ -insecure) INSECURE=1 ;;
+ -proxy=*) PROXY=${arg:7} ;;
+ -noproxy) NOPROXY=yes ;;
+ -noreload) RELOAD=0 ;;
+ -user=*) NETDATA_USER=${arg:6} ;;
+ *) echo >&2 "Unknown argument ${arg}"
+ exit 1 ;;
+ esac
+ shift 1
+done
+
+if [ "$EUID" != "0" ] && [ "$(whoami)" != "$NETDATA_USER" ]; then
+ echo >&2 "This script must be run by the $NETDATA_USER user account"
+ exit 6
+fi
+
+# if curl not installed give warning SOCKS can't be used
+if [[ "${URLTOOL}" != "curl" && "${PROXY:0:5}" = socks ]] ; then
+ echo >&2 "wget doesn't support SOCKS. Please install curl or disable SOCKS proxy."
+ exit 1
+fi
+
+echo >&2 "Token: ****************"
+echo >&2 "Base URL: $URL_BASE"
+echo >&2 "Id: $ID"
+echo >&2 "Rooms: $ROOMS"
+echo >&2 "Hostname: $HOSTNAME"
+echo >&2 "Proxy: $PROXY"
+echo >&2 "Netdata user: $NETDATA_USER"
+
+# create the claiming directory for this user
+if [ ! -d "${CLAIMING_DIR}" ] ; then
+ mkdir -p "${CLAIMING_DIR}" && chmod 0770 "${CLAIMING_DIR}"
+# shellcheck disable=SC2181
+ if [ $? -ne 0 ] ; then
+ echo >&2 "Failed to create claiming working directory ${CLAIMING_DIR}"
+ exit 2
+ fi
+fi
+if [ ! -w "${CLAIMING_DIR}" ] ; then
+ echo >&2 "No write permission in claiming working directory ${CLAIMING_DIR}"
+ exit 2
+fi
+
+if [ ! -f "${CLAIMING_DIR}/private.pem" ] ; then
+ echo >&2 "Generating private/public key for the first time."
+ if ! openssl genrsa -out "${CLAIMING_DIR}/private.pem" 2048 ; then
+ echo >&2 "Failed to generate private/public key pair."
+ exit 2
+ fi
+fi
+if [ ! -f "${CLAIMING_DIR}/public.pem" ] ; then
+ echo >&2 "Extracting public key from private key."
+ if ! openssl rsa -in "${CLAIMING_DIR}/private.pem" -outform PEM -pubout -out "${CLAIMING_DIR}/public.pem" ; then
+ echo >&2 "Failed to extract public key."
+ exit 2
+ fi
+fi
+
+TARGET_URL="${URL_BASE%/}/api/v1/spaces/nodes/${ID}"
+# shellcheck disable=SC2002
+KEY=$(cat "${CLAIMING_DIR}/public.pem" | tr '\n' '!' | sed -e 's/!/\\n/g')
+# shellcheck disable=SC2001
+[ -n "$ROOMS" ] && ROOMS=\"$(echo "$ROOMS" | sed s'/,/", "/g')\"
+
+cat > "${CLAIMING_DIR}/tmpin.txt" <<EMBED_JSON
+{
+ "node": {
+ "id": "$ID",
+ "hostname": "$HOSTNAME"
+ },
+ "token": "$TOKEN",
+ "rooms" : [ $ROOMS ],
+ "publicKey" : "$KEY",
+ "mGUID" : "$MGUID"
+}
+EMBED_JSON
+
+if [ "${VERBOSE}" == 1 ] ; then
+ echo "Request to server:"
+ cat "${CLAIMING_DIR}/tmpin.txt"
+fi
+
+
+if [ "${URLTOOL}" = "curl" ] ; then
+ URLCOMMAND="curl --connect-timeout 5 --retry 0 -s -i -X PUT -d \"@${CLAIMING_DIR}/tmpin.txt\""
+ if [ "${NOPROXY}" = "yes" ] ; then
+ URLCOMMAND="${URLCOMMAND} -x \"\""
+ elif [ -n "${PROXY}" ] ; then
+ URLCOMMAND="${URLCOMMAND} -x \"${PROXY}\""
+ fi
+else
+ URLCOMMAND="wget -T 15 -O - -q --save-headers --content-on-error=on --method=PUT \
+ --body-file=\"${CLAIMING_DIR}/tmpin.txt\""
+ if [ "${NOPROXY}" = "yes" ] ; then
+ URLCOMMAND="${URLCOMMAND} --no-proxy"
+ elif [ "${PROXY:0:4}" = http ] ; then
+ URLCOMMAND="export http_proxy=${PROXY}; ${URLCOMMAND}"
+ fi
+fi
+
+if [ "${INSECURE}" == 1 ] ; then
+ if [ "${URLTOOL}" = "curl" ] ; then
+ URLCOMMAND="${URLCOMMAND} --insecure"
+ else
+ URLCOMMAND="${URLCOMMAND} --no-check-certificate"
+ fi
+fi
+
+if [ -r "${CLOUD_CERTIFICATE_FILE}" ] ; then
+ if [ "${URLTOOL}" = "curl" ] ; then
+ URLCOMMAND="${URLCOMMAND} --cacert \"${CLOUD_CERTIFICATE_FILE}\""
+ else
+ URLCOMMAND="${URLCOMMAND} --ca-certificate \"${CLOUD_CERTIFICATE_FILE}\""
+ fi
+fi
+
+if [ "${VERBOSE}" == 1 ]; then
+ echo "${URLCOMMAND} \"${TARGET_URL}\""
+fi
+
+attempt_contact () {
+ eval "${URLCOMMAND} \"${TARGET_URL}\"" >"${CLAIMING_DIR}/tmpout.txt"
+ URLCOMMAND_EXIT_CODE=$?
+ if [ "${URLTOOL}" = "wget" ] && [ "${URLCOMMAND_EXIT_CODE}" -eq 8 ] ; then
+ # We consider the server issuing an error response a successful attempt at communicating
+ URLCOMMAND_EXIT_CODE=0
+ fi
+
+ # Check if URLCOMMAND connected and received reply
+ if [ "${URLCOMMAND_EXIT_CODE}" -ne 0 ] ; then
+ echo >&2 "Failed to connect to ${URL_BASE}, return code ${URLCOMMAND_EXIT_CODE}"
+ rm -f "${CLAIMING_DIR}/tmpout.txt"
+ return 4
+ fi
+
+ if [ "${VERBOSE}" == 1 ] ; then
+ echo "Response from server:"
+ cat "${CLAIMING_DIR}/tmpout.txt"
+ fi
+
+ return 0
+}
+
+for i in {1..5}
+do
+ if attempt_contact ; then
+ echo "Connection attempt $i successful"
+ break
+ fi
+ echo "Connection attempt $i failed. Retry in ${i}s."
+ if [ "$i" -eq 5 ] ; then
+ rm -f "${CLAIMING_DIR}/tmpin.txt"
+ exit 4
+ fi
+ sleep "$i"
+done
+
+rm -f "${CLAIMING_DIR}/tmpin.txt"
+
+ERROR_KEY=$(grep "\"errorMsgKey\":" "${CLAIMING_DIR}/tmpout.txt" | awk -F "errorMsgKey\":\"" '{print $2}' | awk -F "\"" '{print $1}')
+case ${ERROR_KEY} in
+ "ErrInvalidNodeID") EXIT_CODE=8 ;;
+ "ErrInvalidNodeName") EXIT_CODE=9 ;;
+ "ErrInvalidRoomID") EXIT_CODE=10 ;;
+ "ErrInvalidPublicKey") EXIT_CODE=11 ;;
+ "ErrForbidden") EXIT_CODE=12 ;;
+ "ErrAlreadyClaimed") EXIT_CODE=13 ;;
+ "ErrProcessingClaim") EXIT_CODE=14 ;;
+ "ErrInternalServerError") EXIT_CODE=15 ;;
+ "ErrGatewayTimeout") EXIT_CODE=16 ;;
+ "ErrServiceUnavailable") EXIT_CODE=17 ;;
+ *) EXIT_CODE=7 ;;
+esac
+
+HTTP_STATUS_CODE=$(grep "HTTP" "${CLAIMING_DIR}/tmpout.txt" | awk -F " " '{print $2}')
+if [ "${HTTP_STATUS_CODE}" = "204" ] ; then
+ EXIT_CODE=0
+fi
+
+if [ "${HTTP_STATUS_CODE}" = "204" ] || [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then
+ rm -f "${CLAIMING_DIR}/tmpout.txt"
+ echo -n "${ID}" >"${CLAIMING_DIR}/claimed_id" || (echo >&2 "Claiming failed"; set -e; exit 2)
+ rm -f "${CLAIMING_DIR}/token" || (echo >&2 "Claiming failed"; set -e; exit 2)
+
+ # Rewrite the cloud.conf on the disk
+ cat > "$CLAIMING_DIR/cloud.conf" <<HERE_DOC
+[global]
+ enabled = yes
+ cloud base url = $URL_BASE
+HERE_DOC
+ if [ "$EUID" == "0" ]; then
+ chown -R "${NETDATA_USER}:${NETDATA_USER}" ${CLAIMING_DIR} || (echo >&2 "Claiming failed"; set -e; exit 2)
+ fi
+ if [ "${RELOAD}" == "0" ] ; then
+ exit $EXIT_CODE
+ fi
+
+ if [ -z "${PROXY}" ]; then
+ PROXYMSG=""
+ else
+ PROXYMSG="You have attempted to claim this node through a proxy - please update your the proxy setting in your netdata.conf to ${PROXY}. "
+ fi
+ # Update cloud.conf in the agent memory
+ @sbindir_POST@/netdatacli write-config 'cloud|global|enabled|yes' && \
+ @sbindir_POST@/netdatacli write-config "cloud|global|cloud base url|$URL_BASE" && \
+ @sbindir_POST@/netdatacli reload-claiming-state && \
+ if [ "${HTTP_STATUS_CODE}" = "204" ] ; then
+ echo >&2 "${PROXYMSG}Node was successfully claimed."
+ else
+ echo >&2 "The agent cloud base url is set to the url provided."
+ echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored."
+ echo >&2 "${PROXYMSG}Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\""
+ fi && exit $EXIT_CODE
+
+ if [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then
+ echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored."
+ echo >&2 "${PROXYMSG}Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\""
+ exit $EXIT_CODE
+ fi
+ echo >&2 "${PROXYMSG}The claim was successful but the agent could not be notified ($?)- it requires a restart to connect to the cloud."
+ exit 5
+fi
+
+echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\""
+if [ "${VERBOSE}" == 1 ]; then
+ echo >&2 "Error key was:\"${ERROR_KEYS[$EXIT_CODE]}\""
+fi
+rm -f "${CLAIMING_DIR}/tmpout.txt"
+exit $EXIT_CODE
diff --git a/cli/Makefile.am b/cli/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/cli/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/cli/README.md b/cli/README.md
new file mode 100644
index 0000000..9381237
--- /dev/null
+++ b/cli/README.md
@@ -0,0 +1,34 @@
+<!--
+title: "Netdata CLI"
+description: "The Netdata Agent includes a command-line experience for reloading health configuration, reopening log files, halting the daemon, and more."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/cli/README.md
+-->
+
+# Netdata CLI
+
+You can see the commands `netdatacli` supports by executing it with `netdatacli` and entering `help` in
+standard input. All commands are given as standard input to `netdatacli`.
+
+The commands that a running netdata agent can execute are the following:
+
+```sh
+The commands are (arguments are in brackets):
+help
+ Show this help menu.
+reload-health
+ Reload health configuration.
+save-database
+ Save internal DB to disk for for memory mode save.
+reopen-logs
+ Close and reopen log files.
+shutdown-agent
+ Cleanup and exit the netdata agent.
+fatal-agent
+ Log the state and halt the netdata agent.
+reload-claiming-state
+ Reload agent claiming state from disk.
+```
+
+Those commands are the same that can be sent to netdata via [signals](/daemon/README.md#command-line-options).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/cli/cli.c b/cli/cli.c
new file mode 100644
index 0000000..4df0201
--- /dev/null
+++ b/cli/cli.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "cli.h"
+#include "../libnetdata/required_dummies.h"
+
+static uv_pipe_t client_pipe;
+static uv_write_t write_req;
+static uv_shutdown_t shutdown_req;
+
+static char command_string[MAX_COMMAND_LENGTH];
+static unsigned command_string_size;
+
+static char response_string[MAX_COMMAND_LENGTH];
+static unsigned response_string_size;
+
+static int exit_status;
+
+struct command_context {
+ uv_work_t work;
+ uv_stream_t *client;
+ cmd_t idx;
+ char *args;
+ char *message;
+ cmd_status_t status;
+};
+
+static void parse_command_reply(void)
+{
+ FILE *stream = NULL;
+ char *pos;
+ int syntax_error = 0;
+
+ for (pos = response_string ;
+ pos < response_string + response_string_size && !syntax_error ;
+ ++pos) {
+ /* Skip white-space characters */
+ for ( ; isspace(*pos) && ('\0' != *pos); ++pos) {;}
+
+ if ('\0' == *pos)
+ continue;
+
+ switch (*pos) {
+ case CMD_PREFIX_EXIT_CODE:
+ exit_status = atoi(++pos);
+ break;
+ case CMD_PREFIX_INFO:
+ stream = stdout;
+ break;
+ case CMD_PREFIX_ERROR:
+ stream = stderr;
+ break;
+ default:
+ syntax_error = 1;
+ fprintf(stderr, "Syntax error, failed to parse command response.\n");
+ break;
+ }
+ if (stream) {
+ fprintf(stream, "%s\n", ++pos);
+ pos += strlen(pos);
+ stream = NULL;
+ }
+ }
+}
+
+static void pipe_read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf)
+{
+ if (0 == nread) {
+ fprintf(stderr, "%s: Zero bytes read by command pipe.\n", __func__);
+ } else if (UV_EOF == nread) {
+// fprintf(stderr, "EOF found in command pipe.\n");
+ parse_command_reply();
+ } else if (nread < 0) {
+ fprintf(stderr, "%s: %s\n", __func__, uv_strerror(nread));
+ }
+
+ if (nread < 0) { /* stop stream due to EOF or error */
+ (void)uv_read_stop((uv_stream_t *)client);
+ } else if (nread) {
+ size_t to_copy;
+
+ to_copy = MIN(nread, MAX_COMMAND_LENGTH - 1 - response_string_size);
+ memcpy(response_string + response_string_size, buf->base, to_copy);
+ response_string_size += to_copy;
+ response_string[response_string_size] = '\0';
+ }
+ if (buf && buf->len) {
+ free(buf->base);
+ }
+}
+
+static void alloc_cb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf)
+{
+ (void)handle;
+
+ buf->base = malloc(suggested_size);
+ buf->len = suggested_size;
+}
+
+static void shutdown_cb(uv_shutdown_t* req, int status)
+{
+ int ret;
+
+ (void)req;
+ (void)status;
+
+ /* receive reply */
+ response_string_size = 0;
+ response_string[0] = '\0';
+
+ ret = uv_read_start((uv_stream_t *)&client_pipe, alloc_cb, pipe_read_cb);
+ if (ret) {
+ fprintf(stderr, "uv_read_start(): %s\n", uv_strerror(ret));
+ uv_close((uv_handle_t *)&client_pipe, NULL);
+ return;
+ }
+
+}
+
+static void pipe_write_cb(uv_write_t* req, int status)
+{
+ int ret;
+
+ (void)req;
+ (void)status;
+
+ ret = uv_shutdown(&shutdown_req, (uv_stream_t *)&client_pipe, shutdown_cb);
+ if (ret) {
+ fprintf(stderr, "uv_shutdown(): %s\n", uv_strerror(ret));
+ uv_close((uv_handle_t *)&client_pipe, NULL);
+ return;
+ }
+}
+
+static void connect_cb(uv_connect_t* req, int status)
+{
+ int ret;
+ uv_buf_t write_buf;
+ char *s;
+
+ (void)req;
+ if (status) {
+ fprintf(stderr, "uv_pipe_connect(): %s\n", uv_strerror(status));
+ fprintf(stderr, "Make sure the netdata service is running.\n");
+ exit(-1);
+ }
+ if (0 == command_string_size) {
+ s = fgets(command_string, MAX_COMMAND_LENGTH, stdin);
+ }
+ (void)s; /* We don't need input to communicate with the server */
+ command_string_size = strlen(command_string);
+
+ write_req.data = &client_pipe;
+ write_buf.base = command_string;
+ write_buf.len = command_string_size;
+ ret = uv_write(&write_req, (uv_stream_t *)&client_pipe, &write_buf, 1, pipe_write_cb);
+ if (ret) {
+ fprintf(stderr, "uv_write(): %s\n", uv_strerror(ret));
+ }
+// fprintf(stderr, "COMMAND: Sending command: \"%s\"\n", command_string);
+}
+
+int main(int argc, char **argv)
+{
+ int ret, i;
+ static uv_loop_t* loop;
+ uv_connect_t req;
+
+ exit_status = -1; /* default status for when there is no command response from server */
+
+ loop = uv_default_loop();
+
+ ret = uv_pipe_init(loop, &client_pipe, 1);
+ if (ret) {
+ fprintf(stderr, "uv_pipe_init(): %s\n", uv_strerror(ret));
+ return exit_status;
+ }
+
+ command_string_size = 0;
+ command_string[0] = '\0';
+ for (i = 1 ; i < argc ; ++i) {
+ size_t to_copy;
+
+ to_copy = MIN(strlen(argv[i]), MAX_COMMAND_LENGTH - 1 - command_string_size);
+ strncpyz(command_string + command_string_size, argv[i], to_copy);
+ command_string_size += to_copy;
+
+ if (command_string_size < MAX_COMMAND_LENGTH - 1) {
+ command_string[command_string_size++] = ' ';
+ } else {
+ break;
+ }
+ }
+
+ uv_pipe_connect(&req, &client_pipe, PIPENAME, connect_cb);
+
+ uv_run(loop, UV_RUN_DEFAULT);
+
+ uv_close((uv_handle_t *)&client_pipe, NULL);
+
+ return exit_status;
+} \ No newline at end of file
diff --git a/cli/cli.h b/cli/cli.h
new file mode 100644
index 0000000..9e730a3
--- /dev/null
+++ b/cli/cli.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_CLI_H
+#define NETDATA_CLI_H 1
+
+#include "../daemon/common.h"
+
+#endif //NETDATA_CLI_H
diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md
new file mode 100644
index 0000000..a795de1
--- /dev/null
+++ b/collectors/COLLECTORS.md
@@ -0,0 +1,538 @@
+<!--
+title: "Supported collectors list"
+description: "Netdata gathers real-time metrics from hundreds of data sources using collectors. Most require zero configuration and are pre-configured out of the box."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/COLLECTORS.md
+-->
+
+# Supported collectors list
+
+Netdata uses collectors to help you gather metrics from your favorite applications and services and view them in
+real-time, interactive charts. The following list includes collectors for both external services/applications and
+internal system metrics.
+
+Learn more about [how collectors work](/docs/collect/how-collectors-work.md), and then learn how to [enable or
+configure](/docs/collect/enable-configure.md) any of the below collectors using the same process.
+
+Some collectors have both Go and Python versions as we continue our effort to migrate all collectors to Go. In these
+cases, _Netdata always prioritizes the Go version_, and we highly recommend you use the Go versions for the best
+experience.
+
+If you want to use a Python version of a collector, you need to explicitly [disable the Go
+version](/docs/collect/enable-configure.md), and enable the Python version. Netdata then skips the Go version and
+attempts to load the Python version and its accompanying configuration file.
+
+If you don't see the app/service you'd like to monitor in this list, check out our [GitHub
+issues](https://github.com/netdata/netdata/issues). Use the search bar to look for previous discussions about that
+collector—we may be looking for contributions from users such as yourself! If you don't see the collector there, make a
+[feature request](https://community.netdata.cloud/c/feature-requests/7/none) on our community forums.
+
+- [Service and application collectors](#service-and-application-collectors)
+ - [APM (application performance monitoring)](#apm-application-performance-monitoring)
+ - [Containers and VMs](#containers-and-vms)
+ - [Data stores](#data-stores)
+ - [Distributed computing](#distributed-computing)
+ - [Email](#email)
+ - [Kubernetes](#kubernetes)
+ - [Logs](#logs)
+ - [Messaging](#messaging)
+ - [Network](#network)
+ - [Provisioning](#provisioning)
+ - [Remote devices](#remote-devices)
+ - [Search](#search)
+ - [Storage](#storage)
+ - [Web](#web)
+- [System collectors](#system-collectors)
+ - [Applications](#applications)
+ - [Disks and filesystems](#disks-and-filesystems)
+ - [eBPF (extended Berkeley Packet Filter)](#ebpf)
+ - [Hardware](#hardware)
+ - [Memory](#memory)
+ - [Networks](#networks)
+ - [Processes](#processes)
+ - [Resources](#resources)
+ - [Users](#users)
+- [Netdata collectors](#netdata-collectors)
+- [Orchestrators](#orchestrators)
+- [Third-party collectors](#third-party-collectors)
+- [Etc](#etc)
+
+## Service and application collectors
+
+The Netdata Agent auto-detects and collects metrics from all of the services and applications below. You can also
+configure any of these collectors according to your setup and infrastructure.
+
+### Generic
+
+- [Prometheus endpoints](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus): Gathers
+ metrics from any number of Prometheus endpoints, with support to autodetect more than 600 services and applications.
+
+### APM (application performance monitoring)
+
+- [Go applications](/collectors/python.d.plugin/go_expvar/README.md): Monitor any Go application that exposes its
+ metrics with the `expvar` package from the Go standard library.
+- [Java Spring Boot 2
+ applications](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/springboot2/) (Go version):
+ Monitor running Java Spring Boot 2 applications that expose their metrics with the use of the Spring Boot Actuator.
+- [Java Spring Boot 2 applications](/collectors/python.d.plugin/springboot/README.md) (Python version): Monitor
+ running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator.
+- [statsd](/collectors/statsd.plugin/README.md): Implement a high performance `statsd` server for Netdata.
+- [phpDaemon](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpdaemon/): Collect worker
+ statistics (total, active, idle), and uptime for web and network applications.
+- [uWSGI](/collectors/python.d.plugin/uwsgi/README.md): Monitor performance metrics exposed by the uWSGI Stats
+ Server.
+
+### Containers and VMs
+
+- [Docker containers](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual Docker
+ containers using the cgroups collector plugin.
+- [DockerD](/collectors/python.d.plugin/dockerd/README.md): Collect container health statistics.
+- [Docker Engine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/docker_engine/): Collect
+ runtime statistics from the `docker` daemon using the `metrics-address` feature.
+- [Docker Hub](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dockerhub/): Collect statistics
+ about Docker repositories, such as pulls, starts, status, time since last update, and more.
+- [Libvirt](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual Libvirt containers
+ using the cgroups collector plugin.
+- [LXC](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual LXC containers using
+ the cgroups collector plugin.
+- [LXD](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual LXD containers using
+ the cgroups collector plugin.
+- [systemd-nspawn](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual
+ systemd-nspawn containers using the cgroups collector plugin.
+- [vCenter Server Appliance](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vcsa/): Monitor
+ appliance system, components, and software update health statuses via the Health API.
+- [vSphere](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vsphere/): Collect host and virtual
+ machine performance metrics.
+- [Xen/XCP-ng](/collectors/xenstat.plugin/README.md): Collect XenServer and XCP-ng metrics using `libxenstat`.
+
+### Data stores
+
+- [CockroachDB](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/): Monitor various
+ database components using `_status/vars` endpoint.
+- [Consul](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/consul/): Capture service and unbound
+ checks status (passing, warning, critical, maintenance).
+- [Couchbase](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/couchbase/): Gather per-bucket
+ metrics from any number of instances of the distributed JSON document database.
+- [CouchDB](/collectors/python.d.plugin/couchdb/README.md): Monitor database health and performance metrics
+ (reads/writes, HTTP traffic, replication status, etc).
+- [MongoDB](/collectors/python.d.plugin/mongodb/README.md): Collect memory-caching system performance metrics and
+ reads the server's response to `stats` command (stats interface).
+- [MySQL](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql/): Collect database global,
+ replication and per user statistics.
+- [OracleDB](/collectors/python.d.plugin/oracledb/README.md): Monitor database performance and health metrics.
+- [Pika](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pika/): Gather metric, such as clients,
+ memory usage, queries, and more from the Redis interface-compatible database.
+- [Postgres](/collectors/python.d.plugin/postgres/README.md): Collect database health and performance metrics.
+- [ProxySQL](/collectors/python.d.plugin/proxysql/README.md): Monitor database backend and frontend performance
+ metrics.
+- [Redis (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/redis/): Monitor status from any
+ number of database instances by reading the server's response to the `INFO ALL` command.
+- [Redis (Python)](/collectors/python.d.plugin/redis/): Monitor database status by reading the server's response to
+ the `INFO` command.
+- [RethinkDB](/collectors/python.d.plugin/rethinkdbs/README.md): Collect database server and cluster statistics.
+- [Riak KV](/collectors/python.d.plugin/riakkv/README.md): Collect database stats from the `/stats` endpoint.
+- [Zookeeper](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/zookeeper/): Monitor application
+ health metrics reading the server's response to the `mntr` command.
+
+### Distributed computing
+
+- [BOINC](/collectors/python.d.plugin/boinc/README.md): Monitor the total number of tasks, open tasks, and task
+ states for the distributed computing client.
+- [Gearman](/collectors/python.d.plugin/gearman/README.md): Collect application summary (queued, running) and per-job
+ worker statistics (queued, idle, running).
+
+### Email
+
+- [Dovecot](/collectors/python.d.plugin/dovecot/README.md): Collect email server performance metrics by reading the
+ server's response to the `EXPORT global` command.
+- [EXIM](/collectors/python.d.plugin/exim/README.md): Uses the `exim` tool to monitor the queue length of a
+ mail/message transfer agent (MTA).
+- [Postfix](/collectors/python.d.plugin/postfix/README.md): Uses the `postqueue` tool to monitor the queue length of a
+ mail/message transfer agent (MTA).
+
+### Kubernetes
+
+- [Kubelet](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet/): Monitor one or more
+ instances of the Kubelet agent and collects metrics on number of pods/containers running, volume of Docker
+ operations, and more.
+- [kube-proxy](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy/): Collect
+ metrics, such as syncing proxy rules and REST client requests, from one or more instances of `kube-proxy`.
+- [Service discovery](https://github.com/netdata/agent-service-discovery/): Find what services are running on a
+ cluster's pods, converts that into configuration files, and exports them so they can be monitored by Netdata.
+
+### Logs
+
+- [Fluentd](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/fluentd/): Gather application
+ plugins metrics from an endpoint provided by `in_monitor plugin`.
+- [Logstash](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/logstash/): Monitor JVM threads,
+ memory usage, garbage collection statistics, and more.
+- [OpenVPN status logs](/collectors/python.d.plugin/ovpn_status_log/): Parse server log files and provide summary
+ (client, traffic) metrics.
+- [Squid web server logs](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/squidlog/): Tail Squid
+ access logs to return the volume of requests, types of requests, bandwidth, and much more.
+- [Web server logs (Go version for Apache,
+ NGINX)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/): Tail access logs and provide
+ very detailed web server performance statistics. This module is able to parse 200k+ rows in less than half a second.
+- [Web server logs (Python version for Apache, NGINX, Squid)](/collectors/python.d.plugin/web_log/): Tail access log
+ file and collect web server/caching proxy metrics.
+
+### Messaging
+
+- [ActiveMQ](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/activemq/): Collect message broker
+ queues and topics statistics using the ActiveMQ Console API.
+- [Beanstalk](/collectors/python.d.plugin/beanstalk/README.md): Collect server and tube-level statistics, such as CPU
+ usage, jobs rates, commands, and more.
+- [Pulsar](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pulsar/): Collect summary,
+ namespaces, and topics performance statistics.
+- [RabbitMQ (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/rabbitmq/): Collect message
+ broker overview, system and per virtual host metrics.
+- [RabbitMQ (Python)](/collectors/python.d.plugin/rabbitmq/README.md): Collect message broker global and per virtual
+ host metrics.
+- [VerneMQ](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vernemq/): Monitor MQTT broker
+ health and performance metrics. It collects all available info for both MQTTv3 and v5 communication
+
+### Network
+
+- [Bind 9](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/bind/): Collect nameserver summary
+ performance statistics via a web interface (`statistics-channels` feature).
+- [Chrony](/collectors/python.d.plugin/chrony/README.md): Monitor the precision and statistics of a local `chronyd`
+ server.
+- [CoreDNS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/coredns/): Measure DNS query round
+ trip time.
+- [Dnsmasq](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsmasq_dhcp/): Automatically
+ detects all configured `Dnsmasq` DHCP ranges and Monitor their utilization.
+- [DNSdist (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsdist/): Collect
+ load-balancer performance and health metrics.
+- [DNSdist (Python)](/collectors/python.d.plugin/dnsdist/README.md): Collect load-balancer performance and health
+ metrics.
+- [Dnsmasq DNS Forwarder](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsmasq/): Gather
+ queries, entries, operations, and events for the lightweight DNS forwarder.
+- [dns_query](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsquery/): Monitor the round
+ trip time for DNS queries in milliseconds.
+- [DNS Query Time](/collectors/python.d.plugin/dns_query_time/README.md): Measure DNS query round trip time.
+- [Freeradius (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/freeradius/): Collect
+ server authentication and accounting statistics from the `status server`.
+- [Freeradius (Python)](/collectors/python.d.plugin/freeradius/README.md): Collect server authentication and
+ accounting statistics from the `status server` using the `radclient` tool.
+- [Libreswan](/collectors/charts.d.plugin/libreswan/): Collect bytes-in, bytes-out, and uptime metrics.
+- [Icecast](/collectors/python.d.plugin/icecast/README.md): Monitor the number of listeners for active sources.
+- [ISC BIND](/collectors/node.d.plugin/named/README.md): Collect nameserver summary performance statistics via a web
+ interface (`statistics-channels` feature).
+- [ISC Bind (RDNC)](/collectors/python.d.plugin/bind_rndc/README.md): Collect nameserver summary performance
+ statistics using the `rndc` tool.
+- [ISC DHCP (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/isc_dhcpd): Reads a
+ `dhcpd.leases` file and collects metrics on total active leases, pool active leases, and pool utilization.
+- [ISC DHCP (Python)](/collectors/python.d.plugin/isc_dhcpd/README.md): Reads `dhcpd.leases` file and reports DHCP
+ pools utilization and leases statistics (total number, leases per pool).
+- [OpenLDAP](/collectors/python.d.plugin/openldap/README.md): Provides statistics information from the OpenLDAP
+ (`slapd`) server.
+- [NSD](/collectors/python.d.plugin/nsd/README.md): Monitor nameserver performance metrics using the `nsd-control`
+ tool.
+- [NTP daemon](/collectors/python.d.plugin/ntpd/README.md): Monitor the system variables of the local `ntpd` daemon
+ (optionally including variables of the polled peers) using the NTP Control Message Protocol via a UDP socket.
+- [OpenSIPS](/collectors/charts.d.plugin/opensips/README.md): Collect server health and performance metrics using the
+ `opensipsctl` tool.
+- [OpenVPN](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/openvpn/): Gather server summary
+ (client, traffic) and per user metrics (traffic, connection time) stats using `management-interface`.
+- [Pi-hole](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pihole/): Monitor basic (DNS
+ queries, clients, blocklist) and extended (top clients, top permitted, and blocked domains) statistics using the PHP
+ API.
+- [PowerDNS Authoritative Server
+ (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/powerdns): Monitor one or more instances
+ of the nameserver software to collect questions, events, and latency metrics.
+- [PowerDNS Recursor (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/powerdns_recursor):
+ Gather incoming/outgoing questions, drops, timeouts, and cache usage from any number of DNS recursor instances.
+- [PowerDNS (Python)](/collectors/python.d.plugin/powerdns/README.md): Monitor authoritative server and recursor
+ statistics.
+- [RetroShare](/collectors/python.d.plugin/retroshare/README.md): Monitor application bandwidth, peers, and DHT
+ metrics.
+- [Tor](/collectors/python.d.plugin/tor/README.md): Capture traffic usage statistics using the Tor control port.
+- [Unbound](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/unbound/): Collect DNS resolver
+ summary and extended system and per thread metrics via the `remote-control` interface.
+
+### Provisioning
+
+- [Puppet](/collectors/python.d.plugin/puppet/README.md): Monitor the status of Puppet Server and Puppet DB.
+
+### Remote devices
+
+- [AM2320](/collectors/python.d.plugin/am2320/README.md): Monitor sensor temperature and humidity.
+- [Access point](/collectors/charts.d.plugin/ap/README.md): Monitor client, traffic and signal metrics using the `aw`
+ tool.
+- [APC UPS](/collectors/charts.d.plugin/apcupsd/README.md): Capture status information using the `apcaccess` tool.
+- [Energi Core (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/energid): Monitor
+ blockchain indexes, memory usage, network usage, and transactions of wallet instances.
+- [Energi Core (Python)](/collectors/python.d.plugin/energid/README.md): Monitor blockchain, memory, network, and
+ unspent transactions statistics.
+- [Fronius Symo](/collectors/node.d.plugin/fronius/): Collect power, consumption, autonomy, energy, and inverter
+ statistics.
+- [UPS/PDU](/collectors/charts.d.plugin/nut/README.md): Read the status of UPS/PDU devices using the `upsc` tool.
+- [SMA Sunny WebBox](/collectors/node.d.plugin/sma_webbox/README.md): Collect power statistics.
+- [SNMP devices](/collectors/node.d.plugin/snmp/README.md): Gather data using the SNMP protocol.
+- [Stiebel Eltron ISG](/collectors/node.d.plugin/stiebeleltron/README.md): Collect metrics from heat pump and hot
+ water installations.
+- [1-Wire sensors](/collectors/python.d.plugin/w1sensor/README.md): Monitor sensor temperature.
+
+### Search
+
+- [Elasticsearch (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/elasticsearch): Collect
+ dozens of metrics on search engine performance from local nodes and local indices. Includes cluster health and
+ statistics.
+- [Elasticsearch (Python)](/collectors/python.d.plugin/elasticsearch/README.md): Collect search engine performance and
+ health statistics. Optionally collects per-index metrics.
+- [Solr](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/solr/): Collect application search
+ requests, search errors, update requests, and update errors statistics.
+
+### Storage
+
+- [Ceph](/collectors/python.d.plugin/ceph/README.md): Monitor the Ceph cluster usage and server data consumption.
+- [HDFS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/hdfs/): Monitor health and performance
+ metrics for filesystem datanodes and namenodes.
+- [IPFS](/collectors/python.d.plugin/ipfs/README.md): Collect file system bandwidth, peers, and repo metrics.
+- [Scaleio](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/scaleio/): Monitor storage system,
+ storage pools, and SDCS health and performance metrics via VxFlex OS Gateway API.
+- [Samba](/collectors/python.d.plugin/samba/README.md): Collect file sharing metrics using the `smbstatus` tool.
+
+### Web
+
+- [Apache (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache/): Collect Apache web
+ server performance metrics via the `server-status?auto` endpoint.
+- [Apache (Python)](/collectors/python.d.plugin/apache/README.md): Collect Apache web server performance metrics via
+ the `server-status?auto` endpoint.
+- [HAProxy](/collectors/python.d.plugin/haproxy/README.md): Collect frontend, backend, and health metrics.
+- [HTTP endpoints (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/httpcheck/): Monitor
+ any HTTP endpoint's availability and response time.
+- [HTTP endpoints (Python)](/collectors/python.d.plugin/httpcheck/README.md): Monitor any HTTP endpoint's
+ availability and response time.
+- [Lighttpd](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/lighttpd/): Collect web server
+ performance metrics using the `server-status?auto` endpoint.
+- [Lighttpd2](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/lighttpd2/): Collect web server
+ performance metrics using the `server-status?format=plain` endpoint.
+- [Litespeed](/collectors/python.d.plugin/litespeed/README.md): Collect web server data (network, connection,
+ requests, cache) by reading `.rtreport*` files.
+- [Nginx (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx/): Monitor web server
+ status information by gathering metrics via `ngx_http_stub_status_module`.
+- [Nginx (Python)](/collectors/python.d.plugin/nginx/README.md): Monitor web server status information by gathering
+ metrics via `ngx_http_stub_status_module`.
+- [Nginx Plus](/collectors/python.d.plugin/nginx_plus/README.md): Collect global and per-server zone, upstream, and
+ cache metrics.
+- [Nginx VTS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginxvts/): Gathers metrics from
+ any Nginx deployment with the _virtual host traffic status module_ enabled, including metrics on uptime, memory
+ usage, and cache, and more.
+- [PHP-FPM (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm/): Collect application
+ summary and processes health metrics by scraping the status page (`/status?full`).
+- [PHP-FPM (Python)](/collectors/python.d.plugin/phpfpm/README.md): Collect application summary and processes health
+ metrics by scraping the status page (`/status?full`).
+- [TCP endpoints (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/portcheck/): Monitor any
+ TCP endpoint's availability and response time.
+- [TCP endpoints (Python)](/collectors/python.d.plugin/portcheck/README.md): Monitor any TCP endpoint's availability
+ and response time.
+- [Spigot Minecraft servers](/collectors/python.d.plugin/spigotmc/README.md): Monitor average ticket rate and number
+ of users.
+- [Squid](/collectors/python.d.plugin/squid/README.md): Monitor client and server bandwidth/requests by gathering
+ data from the Cache Manager component.
+- [Tengine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/tengine/): Monitor web server
+ statistics using information provided by `ngx_http_reqstat_module`.
+- [Tomcat](/collectors/python.d.plugin/tomcat/README.md): Collect web server performance metrics from the Manager App
+ (`/manager/status?XML=true`).
+- [Traefik](/collectors/python.d.plugin/traefik/README.md): Uses Trafik's Health API to provide statistics.
+- [Varnish](/collectors/python.d.plugin/varnish/README.md): Provides HTTP accelerator global, backends (VBE), and
+ disks (SMF) statistics using the `varnishstat` tool.
+- [x509 check](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/x509check/): Monitor certificate
+ expiration time.
+- [Whois domain expiry](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/whoisquery/): Checks the
+ remaining time until a given domain is expired.
+
+## System collectors
+
+The Netdata Agent can collect these system- and hardware-level metrics using a variety of collectors, some of which
+(such as `proc.plugin`) collect multiple types of metrics simultaneously.
+
+### Applications
+
+- [Fail2ban](/collectors/python.d.plugin/fail2ban/README.md): Parses configuration files to detect all jails, then
+ uses log files to report ban rates and volume of banned IPs.
+- [Monit](/collectors/python.d.plugin/monit/README.md): Monitor statuses of targets (service-checks) using the XML
+ stats interface.
+- [WMI (Windows Management Instrumentation)
+ exporter](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi/): Collect CPU, memory,
+ network, disk, OS, system, and log-in metrics scraping `wmi_exporter`.
+
+### Disks and filesystems
+
+- [BCACHE](/collectors/proc.plugin/README.md): Monitor BCACHE statistics with the the `proc.plugin` collector.
+- [Block devices](/collectors/proc.plugin/README.md): Gather metrics about the health and performance of block
+ devices using the the `proc.plugin` collector.
+- [Btrfs](/collectors/proc.plugin/README.md): Monitors Btrfs filesystems with the the `proc.plugin` collector.
+- [Device mapper](/collectors/proc.plugin/README.md): Gather metrics about the Linux device mapper with the proc
+ collector.
+- [Disk space](/collectors/diskspace.plugin/README.md): Collect disk space usage metrics on Linux mount points.
+- [Files and directories](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/filecheck): Gather
+ metrics about the existence, modification time, and size of files or directories.
+- [ioping.plugin](/collectors/ioping.plugin/README.md): Measure disk read/write latency.
+- [NFS file servers and clients](/collectors/proc.plugin/README.md): Gather operations, utilization, and space usage
+ using the the `proc.plugin` collector.
+- [RAID arrays](/collectors/proc.plugin/README.md): Collect health, disk status, operation status, and more with the
+ the `proc.plugin` collector.
+- [Veritas Volume Manager](/collectors/proc.plugin/README.md): Gather metrics about the Veritas Volume Manager (VVM).
+- [ZFS](/collectors/proc.plugin/README.md): Monitor bandwidth and utilization of ZFS disks/partitions using the proc
+ collector.
+
+### eBPF
+
+- [Files](/collectors/ebpf.plugin/README.md): Provides information about how often a system calls kernel
+ functions related to file descriptors using the eBPF collector.
+- [Virtual file system (VFS)](/collectors/ebpf.plugin/README.md): Monitor IO, errors, deleted objects, and
+ more for kernel virtual file systems (VFS) using the eBPF collector.
+- [Processes](/collectors/ebpf.plugin/README.md): Monitor threads, task exits, and errors using the eBPF collector.
+
+### Hardware
+
+- [Adaptec RAID](/collectors/python.d.plugin/adaptec_raid/README.md): Monitor logical and physical devices health
+ metrics using the `arcconf` tool.
+- [CUPS](/collectors/cups.plugin/README.md): Monitor CUPS.
+- [FreeIPMI](/collectors/freeipmi.plugin/README.md): Uses `libipmimonitoring-dev` or `libipmimonitoring-devel` to
+ monitor the number of sensors, temperatures, voltages, currents, and more.
+- [Hard drive temperature](/collectors/python.d.plugin/hddtemp/README.md): Monitor the temperature of storage
+ devices.
+- [HP Smart Storage Arrays](/collectors/python.d.plugin/hpssa/README.md): Monitor controller, cache module, logical
+ and physical drive state, and temperature using the `ssacli` tool.
+- [MegaRAID controllers](/collectors/python.d.plugin/megacli/README.md): Collect adapter, physical drives, and
+ battery stats using the `megacli` tool.
+- [NVIDIA GPU](/collectors/python.d.plugin/nvidia_smi/README.md): Monitor performance metrics (memory usage, fan
+ speed, pcie bandwidth utilization, temperature, and more) using the `nvidia-smi` tool.
+- [Sensors](/collectors/python.d.plugin/sensors/README.md): Reads system sensors information (temperature, voltage,
+ electric current, power, and more) from `/sys/devices/`.
+- [S.M.A.R.T](/collectors/python.d.plugin/smartd_log/README.md): Reads SMART Disk Monitoring daemon logs.
+
+### Memory
+
+- [Available memory](/collectors/proc.plugin/README.md): Tracks changes in available RAM using the the `proc.plugin`
+ collector.
+- [Committed memory](/collectors/proc.plugin/README.md): Monitor committed memory using the `proc.plugin` collector.
+- [Huge pages](/collectors/proc.plugin/README.md): Gather metrics about huge pages in Linux and FreeBSD with the
+ `proc.plugin` collector.
+- [KSM](/collectors/proc.plugin/README.md): Measure the amount of merging, savings, and effectiveness using the
+ `proc.plugin` collector.
+- [Memcached](/collectors/python.d.plugin/memcached/README.md): Collect memory-caching system performance metrics.
+- [Numa](/collectors/proc.plugin/README.md): Gather metrics on the number of non-uniform memory access (NUMA) events
+ every second using the `proc.plugin` collector.
+- [Page faults](/collectors/proc.plugin/README.md): Collect the number of memory page faults per second using the
+ `proc.plugin` collector.
+- [RAM](/collectors/proc.plugin/README.md): Collect metrics on system RAM, available RAM, and more using the
+ `proc.plugin` collector.
+- [SLAB](/collectors/slabinfo.plugin/README.md): Collect kernel SLAB details on Linux systems.
+- [swap](/collectors/proc.plugin/README.md): Monitor the amount of free and used swap at every second using the
+ `proc.plugin` collector.
+- [Writeback memory](/collectors/proc.plugin/README.md): Collect how much memory is actively being written to disk at
+ every second using the `proc.plugin` collector.
+
+### Networks
+
+- [Access points](/collectors/charts.d.plugin/ap/README.md): Visualizes data related to access points.
+- [fping.plugin](fping.plugin/README.md): Measure network latency, jitter and packet loss between the monitored node
+ and any number of remote network end points.
+- [Netfilter](/collectors/nfacct.plugin/README.md): Collect netfilter firewall, connection tracker, and accounting
+ metrics using `libmnl` and `libnetfilter_acct`.
+- [Network stack](/collectors/proc.plugin/README.md): Monitor the networking stack for errors, TCP connection aborts,
+ bandwidth, and more.
+- [Network QoS](/collectors/tc.plugin/README.md): Collect traffic QoS metrics (`tc`) of Linux network interfaces.
+- [SYNPROXY](/collectors/proc.plugin/README.md): Monitor entries uses, SYN packets received, TCP cookies, and more.
+
+### Operating systems
+
+- [freebsd.plugin](freebsd.plugin/README.md): Collect resource usage and performance data on FreeBSD systems.
+- [macOS](/collectors/macos.plugin/README.md): Collect resource usage and performance data on macOS systems.
+
+### Processes
+
+- [Applications](/collectors/apps.plugin/README.md): Gather CPU, disk, memory, network, eBPF, and other metrics per
+ application using the `apps.plugin` collector.
+- [systemd](/collectors/cgroups.plugin/README.md): Monitor the CPU and memory usage of systemd services using the
+ `cgroups.plugin` collector.
+- [systemd unit states](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/systemdunits): See the
+ state (active, inactive, activating, deactivating, failed) of various systemd unit types.
+- [System processes](/collectors/proc.plugin/README.md): Collect metrics on system load and total processes running
+ using `/proc/loadavg` and the `proc.plugin` collector.
+- [Uptime](/collectors/proc.plugin/README.md): Monitor the uptime of a system using the `proc.plugin` collector.
+
+### Resources
+
+- [CPU frequency](/collectors/proc.plugin/README.md): Monitor CPU frequency, as set by the `cpufreq` kernel module,
+ using the `proc.plugin` collector.
+- [CPU idle](/collectors/proc.plugin/README.md): Measure CPU idle every second using the `proc.plugin` collector.
+- [CPU performance](/collectors/perf.plugin/README.md): Collect CPU performance metrics using performance monitoring
+ units (PMU).
+- [CPU throttling](/collectors/proc.plugin/README.md): Gather metrics about thermal throttling using the `/proc/stat`
+ module and the `proc.plugin` collector.
+- [CPU utilization](/collectors/proc.plugin/README.md): Capture CPU utilization, both system-wide and per-core, using
+ the `/proc/stat` module and the `proc.plugin` collector.
+- [Entropy](/collectors/proc.plugin/README.md): Monitor the available entropy on a system using the `proc.plugin`
+ collector.
+- [Interprocess Communication (IPC)](/collectors/proc.plugin/README.md): Monitor IPC semaphores and shared memory
+ using the `proc.plugin` collector.
+- [Interrupts](/collectors/proc.plugin/README.md): Monitor interrupts per second using the `proc.plugin` collector.
+- [IdleJitter](/collectors/idlejitter.plugin/README.md): Measure CPU latency and jitter on all operating systems.
+- [SoftIRQs](/collectors/proc.plugin/README.md): Collect metrics on SoftIRQs, both system-wide and per-core, using the
+ `proc.plugin` collector.
+- [SoftNet](/collectors/proc.plugin/README.md): Capture SoftNet events per second, both system-wide and per-core,
+ using the `proc.plugin` collector.
+
+### Users
+
+- [systemd-logind](/collectors/python.d.plugin/logind/README.md): Monitor active sessions, users, and seats tracked
+ by `systemd-logind` or `elogind`.
+- [User/group usage](/collectors/apps.plugin/README.md): Gather CPU, disk, memory, network, and other metrics per user
+ and user group using the `apps.plugin` collector.
+
+## Netdata collectors
+
+These collectors are recursive in nature, in that they monitor some function of the Netdata Agent itself. Some
+collectors are described only in code and associated charts in Netdata dashboards.
+
+- [ACLK (code only)](https://github.com/netdata/netdata/blob/master/aclk/legacy/aclk_stats.c): View whether a Netdata
+ Agent is connected to Netdata Cloud via the [ACLK](/aclk/README.md), the volume of queries, process times, and more.
+- [Alarms](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/alarms): This collector creates an
+ <strong>Alarms</strong> menu with one line plot showing the alarm states of a Netdata Agent over time.
+- [Anomalies](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/anomalies): This collector uses the
+ Python PyOD library to perform unsupervised anomaly detection on your Netdata charts and/or dimensions.
+- [Exporting (code only)](https://github.com/netdata/netdata/blob/master/exporting/send_internal_metrics.c): Gather
+ metrics on CPU utilization for the [exporting engine](/exporting/README.md), and specific metrics for each enabled
+ exporting connector.
+- [Global statistics (code only)](https://github.com/netdata/netdata/blob/master/daemon/global_statistics.c): See
+ metrics on the CPU utilization, network traffic, volume of web clients, API responses, database engine usage, and
+ more.
+
+## Orchestrators
+
+Plugin orchestrators organize and run many of the above collectors.
+
+If you're interested in developing a new collector that you'd like to contribute to Netdata, we highly recommend using
+the `go.d.plugin`.
+
+- [go.d.plugin](https://github.com/netdata/go.d.plugin): An orchestrator for data collection modules written in `go`.
+- [python.d.plugin](python.d.plugin/README.md): An orchestrator for data collection modules written in `python` v2/v3.
+- [charts.d.plugin](charts.d.plugin/README.md): An orchestrator for data collection modules written in `bash` v4+.
+- [node.d.plugin](node.d.plugin/README.md): An orchestrator for data collection modules written in `node.js`.
+
+## Third-party collectors
+
+These collectors are developed and maintained by third parties and, unlike the other collectors, are not installed by
+default. To use a third-party collector, visit their GitHub/documentation page and follow their installation procedures.
+
+- [CyberPower UPS](https://github.com/HawtDogFlvrWtr/netdata_cyberpwrups_plugin): Polls CyberPower UPS data using
+ PowerPanel® Personal Linux.
+- [Logged-in users](https://github.com/veksh/netdata-numsessions): Collect the number of currently logged-on users.
+- [nim-netdata-plugin](https://github.com/FedericoCeratto/nim-netdata-plugin): A helper to create native Netdata
+ plugins using Nim.
+- [Nvidia GPUs](https://github.com/coraxx/netdata_nv_plugin): Monitor Nvidia GPUs.
+- [Teamspeak 3](https://github.com/coraxx/netdata_ts3_plugin): Plls active users and bandwidth from TeamSpeak 3
+ servers.
+- [SSH](https://github.com/Yaser-Amiri/netdata-ssh-module): Monitor failed authentication requests of an SSH server.
+
+## Etc
+
+- [checks.plugin](checks.plugin/README.md): A debugging collector, disabled by default.
+- [charts.d example](charts.d.plugin/example/README.md): An example `charts.d` collector.
+- [python.d example](python.d.plugin/example/README.md): An example `python.d` collector.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FCOLLECTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
new file mode 100644
index 0000000..460612c
--- /dev/null
+++ b/collectors/Makefile.am
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ plugins.d \
+ apps.plugin \
+ cgroups.plugin \
+ charts.d.plugin \
+ checks.plugin \
+ cups.plugin \
+ diskspace.plugin \
+ fping.plugin \
+ ioping.plugin \
+ freebsd.plugin \
+ freeipmi.plugin \
+ idlejitter.plugin \
+ macos.plugin \
+ nfacct.plugin \
+ xenstat.plugin \
+ perf.plugin \
+ node.d.plugin \
+ proc.plugin \
+ python.d.plugin \
+ slabinfo.plugin \
+ statsd.plugin \
+ ebpf.plugin \
+ tc.plugin \
+ $(NULL)
+
+usercustompluginsconfigdir=$(configdir)/custom-plugins.d
+usergoconfigdir=$(configdir)/go.d
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(usercustompluginsconfigdir)
+ $(INSTALL) -d $(DESTDIR)$(usergoconfigdir)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/QUICKSTART.md b/collectors/QUICKSTART.md
new file mode 100644
index 0000000..a691ffc
--- /dev/null
+++ b/collectors/QUICKSTART.md
@@ -0,0 +1,125 @@
+<!--
+title: "Collectors quickstart"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/QUICKSTART.md
+-->
+
+# Collectors quickstart
+
+In this quickstart guide, you'll learn how to enable collectors so you can get metrics from your favorite applications
+and services.
+
+This guide will not cover advanced collector features, such as enabling/disabling entire plugins,
+
+## What's in this quickstart guide
+
+- [Find the collector for your application or service](#find-the-collector-for-your-application-or-service)
+- [Configure your application or service for monitoring](#configure-your-application-or-service-for-monitoring)
+- [Edit the collector's configuration file](#edit-the-collectors-configuration-file)
+- [Enable the collector](#enable-the-collector)
+
+## Find the collector for your application or service
+
+Netdata has _pre-installed_ collectors for hundreds of popular applications and services. You don't need to install
+anything to collect metrics from many popular services, like Nginx web servers, MySQL/MariaDB databases, and much more.
+
+To find whether Netdata has a pre-installed collector for your favorite app/service, check out our [collector support
+list](COLLECTORS.md). The only exception is the [third-party collectors](COLLECTORS.md#third-party-plugins), which
+you do need to install yourself. However, this quickstart guide will focus on pre-installed collectors.
+
+When you find a collector you're interested in, take note of its orchestrator. These are in the headings above each
+table, and there are four: Bash, Go, Node, and Python. They go by their respective names: `charts.d`, `go.d`, `node.d`,
+and `python.d`.
+
+> If there is a collector written in both Go and Python, it's better to choose the Go-based version, as we will
+> eventually deprecate most Python-based collectors.
+
+From here on out, this quickstart guide will use the [Nginx
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) as an example to showcase the
+process of configuring and enabling one of Netdata's pre-installed collectors.
+
+## Configure your application or service for monitoring
+
+Every collector's documentation comes with instructions on how to configure your app/service to make it available to
+Netdata's collector. Our [collector support list](COLLECTORS.md) contains links to each collector's documentation page
+so you can learn more.
+
+For example, the [Nginx collector
+documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) states that your Nginx
+installation must have the `stub_status` module configured correctly, in addition to an active `stub_status/` page, for
+Netdata to monitor it. You can confirm whether you have the module enabled with the following command:
+
+```bash
+nginx -V 2>&1 | grep -o with-http_stub_status_module
+```
+
+If this command returns nothing, you'll need to [enable the `stub_status`
+module](https://www.nginx.com/blog/monitoring-nginx/).
+
+Next, edit your `/etc/nginx/sites-enabled/default` file to include a `location` block with the following, which enables
+the `stub_status` page:
+
+```conf
+server {
+ ...
+
+ location /nginx_status {
+ stub_status;
+ }
+}
+```
+
+At this point, your Nginx installation is fully configured and ready for Netdata to monitor it. Next, you'll configure
+your collector.
+
+## Edit the collector's configuration file
+
+This step may not be required based on how you configured your app/service, as each collector comes with a few
+pre-configured jobs that look for the app/service in common and expected locations. For example, the Nginx collector
+looks for a `stub_status` page at `http://localhost/stub_status` and `http://127.0.0.1/stub_status`, which allows it to
+auto-detect almost all local Nginx web servers.
+
+Despite Netdata's auto-detection capabilities, it's important to know how to edit collector configuration files.
+
+You should always edit configuration files with the `edit-config` script that comes with every installation of Netdata.
+To edit a collector configuration file, navigate to your [Netdata configuration directory](/docs/configure/nodes.md).
+Launch `edit-config` with the path to the collector's configuration file.
+
+How do you find that path to the collector's configuration file? Look under the **Configuration** heading in the
+collector's documentation. Each file contains a short code block with the relevant command.
+
+For example, the [Nginx collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) has its
+configuration file at `go.d/nginx.conf`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d/nginx.conf
+```
+
+This file contains all of the possible job parameters to help you monitor Nginx in all sorts of complex deployments. At
+the bottom of the file is a `[JOB]` section, which contains the two default jobs. Configure these as needed, using those
+parameters as a reference, to configure the collector.
+
+## Enable the collector
+
+Most collectors are enabled and will auto-detect their app/service without manual configuration. However, you need to
+restart Netdata to trigger the auto-detection process.
+
+To restart Netdata on most systems, use `service netdata restart`. For other systems, see the [other restart
+methods](/docs/getting-started.md#start-stop-and-restart-netdata).
+
+Open Netdata's dashboard in your browser, or refresh the page if you already have it open. You should now see a new
+entry in the menu and new interactive charts!
+
+## What's next?
+
+Collector not working? Learn about collector troubleshooting in our [collector
+reference](REFERENCE.md#troubleshoot-a-collector).
+
+View our [collectors guides](/collectors/README.md#guides) to get specific instructions on enabling new and
+popular collectors.
+
+Finally, learn more advanced collector features, such as disabling plugins or developing a custom collector, in our
+[internal plugin API](/collectors/REFERENCE.md#internal-plugins-api) or our [external plugin
+docs](/collectors/plugins.d/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FQUICKSTART&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/README.md b/collectors/README.md
new file mode 100644
index 0000000..a37a7e8
--- /dev/null
+++ b/collectors/README.md
@@ -0,0 +1,52 @@
+<!--
+title: "Collecting metrics"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/README.md
+-->
+
+# Collecting metrics
+
+Netdata can collect metrics from hundreds of different sources, be they internal data created by the system itself, or
+external data created by services or applications. To see _all_ of the sources Netdata collects from, view our [list of
+supported collectors](/collectors/COLLECTORS.md), and then view our [quickstart guide](/collectors/QUICKSTART.md) to get
+up-and-running.
+
+There are two essential points to understand about how collecting metrics works in Netdata:
+
+- All collectors are **installed by default** with every installation of Netdata. You do not need to install
+ collectors manually to collect metrics from new sources.
+- Upon startup, Netdata will **auto-detect** any application or service that has a
+ [collector](/collectors/COLLECTORS.md), as long as both the collector and the app/service are configured correctly.
+
+Most users will want to enable a new Netdata collector for their app/service. For those details, see our [quickstart
+guide](/collectors/QUICKSTART.md).
+
+## Take your next steps with collectors
+
+[Collectors quickstart](/collectors/QUICKSTART.md)
+
+[Supported collectors list](/collectors/COLLECTORS.md)
+
+[Collectors configuration reference](/collectors/REFERENCE.md)
+
+## Guides
+
+[Monitor Nginx or Apache web server log files with Netdata](/docs/guides/collect-apache-nginx-web-logs.md)
+
+[Monitor CockroachDB metrics with Netdata](/docs/guides/monitor-cockroachdb.md)
+
+[Monitor Unbound DNS servers with Netdata](/docs/guides/collect-unbound-metrics.md)
+
+[Monitor a Hadoop cluster with Netdata](/docs/guides/monitor-hadoop-cluster.md)
+
+## Related features
+
+**[Dashboards](/web/README.md)**: Visualize your newly-collect metrics in real-time using Netdata's [built-in
+dashboard](/web/gui/README.md).
+
+**[Backends](/backends/README.md)**: Extend our built-in [database engine](/database/engine/README.md), which supports
+long-term metrics storage, by archiving metrics to like Graphite, Prometheus, MongoDB, TimescaleDB, and more.
+
+**[Exporting](/exporting/README.md)**: An experimental refactoring of our backends system with a modular system and
+support for exporting metrics to multiple systems simultaneously.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/REFERENCE.md b/collectors/REFERENCE.md
new file mode 100644
index 0000000..9c6f0a6
--- /dev/null
+++ b/collectors/REFERENCE.md
@@ -0,0 +1,186 @@
+<!--
+title: "Collectors configuration reference"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/REFERENCE.md
+-->
+
+# Collectors configuration reference
+
+Welcome to the collector configuration reference guide.
+
+This guide contains detailed information about enabling/disabling plugins or modules, in addition a quick reference to
+the internal plugins API.
+
+To learn the basics of collecting metrics from other applications and services, see the [collector
+quickstart](QUICKSTART.md).
+
+## Netdata's collector architecture
+
+Netdata has an intricate system for organizing and managing its collectors. **Collectors** are the processes/programs
+that actually gather metrics from various sources. Collectors are organized by **plugins**, which help manage all the
+independent processes in a variety of programming languages based on their purpose and performance requirements.
+**Modules** are a type of collector, used primarily to connect to external applications, such as an Nginx web server or
+MySQL database, among many others.
+
+For most users, enabling individual collectors for the application/service you're interested in is far more important
+than knowing which plugin it uses. See our [collectors list](/collectors/COLLECTORS.md) to see whether your favorite app/service has
+a collector, and then read the [collectors quickstart](/collectors/QUICKSTART.md) and the documentation for that specific collector
+to figure out how to enable it.
+
+There are three types of plugins:
+
+- **Internal** plugins organize collectors that gather metrics from `/proc`, `/sys` and other Linux kernel sources.
+ They are written in `C`, and run as threads within the Netdata daemon.
+- **External** plugins organize collectors that gather metrics from external processes, such as a MySQL database or
+ Nginx web server. They can be written in any language, and the `netdata` daemon spawns them as long-running
+ independent processes. They communicate with the daemon via pipes.
+- **Plugin orchestrators**, which are external plugins that instead support a number of **modules**. Modules are a
+ type of collector. We have a few plugin orchestrators available for those who want to develop their own collectors,
+ but focus most of our efforts on the [Go plugin](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/).
+
+## Enable, configure, and disable modules
+
+Most collector modules come with **auto-detection**, configured to work out-of-the-box on popular operating systems with
+the default settings.
+
+However, there are cases that auto-detection fails. Usually, the reason is that the applications to be monitored do not
+allow Netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect
+metrics, will automatically enable data collection for the application in question (it will require a Netdata restart).
+
+View our [collectors quickstart](/collectors/QUICKSTART.md) for explicit details on enabling and configuring collector modules.
+
+## Troubleshoot a collector
+
+First, navigate to your plugins directory, which is usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case
+on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the plugins directory,
+switch to the `netdata` user.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+```
+
+The next step is based on the collector's orchestrator. You can figure out which orchestrator the collector uses by
+
+uses either
+by viewing the [collectors list](COLLECTORS.md) and referencing the _configuration file_ field. For example, if that
+field contains `go.d`, that collector uses the Go orchestrator.
+
+```bash
+# Go orchestrator (go.d.plugin)
+./go.d.plugin -d -m <MODULE_NAME>
+
+# Python orchestrator (python.d.plugin)
+./python.d.plugin <MODULE_NAME> debug trace
+
+# Node orchestrator (node.d.plugin)
+./node.d.plugin debug 1 <MODULE_NAME>
+
+# Bash orchestrator (bash.d.plugin)
+./charts.d.plugin debug 1 <MODULE_NAME>
+```
+
+The output from the relevant command will provide valuable troubleshooting information. If you can't figure out how to
+enable the collector using the details from this output, feel free to [create an issue on our
+GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md) to get some
+help from our collectors experts.
+
+## Enable and disable plugins
+
+You can enable or disable individual plugins by opening `netdata.conf` and scrolling down to the `[plugins]` section.
+This section features a list of Netdata's plugins, with a boolean setting to enable or disable them. The exception is
+`statsd.plugin`, which has its own `[statsd]` section. Your `[plugins]` section should look similar to this:
+
+```conf
+[plugins]
+ # PATH environment variable = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/var/lib/snapd/snap/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin
+ # PYTHONPATH environment variable =
+ # proc = yes
+ # diskspace = yes
+ # cgroups = yes
+ # tc = yes
+ # idlejitter = yes
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+ # slabinfo = no
+ # fping = yes
+ # ioping = yes
+ # node.d = yes
+ # python.d = yes
+ # go.d = yes
+ # apps = yes
+ # perf = yes
+ # charts.d = yes
+```
+
+By default, most plugins are enabled, so you don't need to enable them explicitly to use their collectors. To enable or
+disable any specific plugin, remove the comment (`#`) and change the boolean setting to `yes` or `no`.
+
+All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options.
+
+## Internal plugins
+
+Each of the internal plugins runs as a thread inside the `netdata` daemon. Once this thread has started, the plugin may
+spawn additional threads according to its design.
+
+### Internal plugins API
+
+The internal data collection API consists of the following calls:
+
+```c
+collect_data() {
+ // collect data here (one iteration)
+
+ collected_number collected_value = collect_a_value();
+
+ // give the metrics to Netdata
+
+ static RRDSET *st = NULL; // the chart
+ static RRDDIM *rd = NULL; // a dimension attached to this chart
+
+ if(unlikely(!st)) {
+ // we haven't created this chart before
+ // create it now
+ st = rrdset_create_localhost(
+ "type"
+ , "id"
+ , "name"
+ , "family"
+ , "context"
+ , "Chart Title"
+ , "units"
+ , "plugin-name"
+ , "module-name"
+ , priority
+ , update_every
+ , chart_type
+ );
+
+ // attach a metric to it
+ rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm);
+ }
+ else {
+ // this chart is already created
+ // let Netdata know we start a new iteration on it
+ rrdset_next(st);
+ }
+
+ // give the collected value(s) to the chart
+ rrddim_set_by_pointer(st, rd, collected_value);
+
+ // signal Netdata we are done with this iteration
+ rrdset_done(st);
+}
+```
+
+Of course, Netdata has a lot of libraries to help you also in collecting the metrics. The best way to find your way
+through this, is to examine what other similar plugins do.
+
+## External Plugins
+
+**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+
+## Write a custom collector
+
+You can add custom collectors by following the [external plugins documentation](../collectors/plugins.d/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2REFERENCE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/all.h b/collectors/all.h
new file mode 100644
index 0000000..153fce9
--- /dev/null
+++ b/collectors/all.h
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ALL_H
+#define NETDATA_ALL_H 1
+
+#include "../daemon/common.h"
+
+// netdata internal data collection plugins
+
+#include "checks.plugin/plugin_checks.h"
+#include "freebsd.plugin/plugin_freebsd.h"
+#include "idlejitter.plugin/plugin_idlejitter.h"
+#include "cgroups.plugin/sys_fs_cgroup.h"
+#include "diskspace.plugin/plugin_diskspace.h"
+#include "proc.plugin/plugin_proc.h"
+#include "tc.plugin/plugin_tc.h"
+#include "macos.plugin/plugin_macos.h"
+#include "statsd.plugin/statsd.h"
+
+#include "plugins.d/plugins_d.h"
+
+
+// ----------------------------------------------------------------------------
+// netdata chart priorities
+
+// This is a work in progress - to scope is to collect here all chart priorities.
+// These should be based on the CONTEXT of the charts + the chart id when needed
+// - for each SECTION +1000 (or +X000 for big sections)
+// - for each FAMILY +100
+// - for each CHART +10
+
+#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
+#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
+#define NETDATA_CHART_PRIO_SYSTEM_IO 150
+#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
+#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
+#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
+#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
+#define NETDATA_CHART_PRIO_SYSTEM_NET 500
+#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IP 501
+#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
+#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
+#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
+#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
+#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
+#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
+#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
+#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
+#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206
+#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
+
+
+// CPU per core
+
+#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
+#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
+#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
+#define NETDATA_CHART_PRIO_CPUIDLE 6000
+
+#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
+#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
+
+// Interrupts per core
+
+#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core
+
+// Memory Section - 1xxx
+
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030
+#define NETDATA_CHART_PRIO_MEM_KERNEL 1100
+#define NETDATA_CHART_PRIO_MEM_SLAB 1200
+#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250
+#define NETDATA_CHART_PRIO_MEM_KSM 1300
+#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301
+#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302
+#define NETDATA_CHART_PRIO_MEM_NUMA 1400
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
+#define NETDATA_CHART_PRIO_MEM_PAGEFRAG 1450
+#define NETDATA_CHART_PRIO_MEM_HW 1500
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
+#define NETDATA_CHART_PRIO_MEM_ZRAM 1600
+#define NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS 1601
+#define NETDATA_CHART_PRIO_MEM_ZRAM_RATIO 1602
+#define NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY 1603
+
+// Disks
+
+#define NETDATA_CHART_PRIO_DISK_IO 2000
+#define NETDATA_CHART_PRIO_DISK_OPS 2001
+#define NETDATA_CHART_PRIO_DISK_QOPS 2002
+#define NETDATA_CHART_PRIO_DISK_BACKLOG 2003
+#define NETDATA_CHART_PRIO_DISK_UTIL 2004
+#define NETDATA_CHART_PRIO_DISK_AWAIT 2005
+#define NETDATA_CHART_PRIO_DISK_AVGSZ 2006
+#define NETDATA_CHART_PRIO_DISK_SVCTM 2007
+#define NETDATA_CHART_PRIO_DISK_MOPS 2021
+#define NETDATA_CHART_PRIO_DISK_IOTIME 2022
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
+#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
+#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
+#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122
+#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123
+#define NETDATA_CHART_PRIO_BCACHE_OPS 2124
+#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126
+
+#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023
+#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024
+
+// NFS (server)
+
+#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100
+#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101
+#define NETDATA_CHART_PRIO_NFSD_IO 2102
+#define NETDATA_CHART_PRIO_NFSD_THREADS 2103
+#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104
+#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105
+#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105
+#define NETDATA_CHART_PRIO_NFSD_NET 2107
+#define NETDATA_CHART_PRIO_NFSD_RPC 2108
+#define NETDATA_CHART_PRIO_NFSD_PROC2 2109
+#define NETDATA_CHART_PRIO_NFSD_PROC3 2110
+#define NETDATA_CHART_PRIO_NFSD_PROC4 2111
+#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112
+
+// NFS (client)
+
+#define NETDATA_CHART_PRIO_NFS_NET 2207
+#define NETDATA_CHART_PRIO_NFS_RPC 2208
+#define NETDATA_CHART_PRIO_NFS_PROC2 2209
+#define NETDATA_CHART_PRIO_NFS_PROC3 2210
+#define NETDATA_CHART_PRIO_NFS_PROC4 2211
+
+// BTRFS
+
+#define NETDATA_CHART_PRIO_BTRFS_DISK 2300
+#define NETDATA_CHART_PRIO_BTRFS_DATA 2301
+#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302
+#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303
+
+// ZFS
+
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_READS 2510
+#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520
+#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522
+#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523
+#define NETDATA_CHART_PRIO_ZFS_IO 2700
+#define NETDATA_CHART_PRIO_ZFS_HITS 2520
+#define NETDATA_CHART_PRIO_ZFS_DHITS 2530
+#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2531
+#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2532
+#define NETDATA_CHART_PRIO_ZFS_PHITS 2540
+#define NETDATA_CHART_PRIO_ZFS_MHITS 2550
+#define NETDATA_CHART_PRIO_ZFS_L2HITS 2560
+#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600
+#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800
+#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810
+
+
+// SOFTIRQs
+
+#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core
+
+// IPFW (freebsd)
+
+#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001
+#define NETDATA_CHART_PRIO_IPFW_BYTES 3002
+#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003
+#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004
+#define NETDATA_CHART_PRIO_IPFW_MEM 3005
+
+
+// IPVS
+
+#define NETDATA_CHART_PRIO_IPVS_NET 3100
+#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101
+#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102
+
+// Softnet
+
+#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core
+
+// IP STACK
+
+#define NETDATA_CHART_PRIO_IP_ERRORS 4100
+#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210
+#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215
+#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216
+#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220
+#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250
+#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260
+#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290
+#define NETDATA_CHART_PRIO_IP_BCAST 4500
+#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510
+#define NETDATA_CHART_PRIO_IP_MCAST 4600
+#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610
+#define NETDATA_CHART_PRIO_IP_ECN 4700
+
+// IPv4
+
+#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100
+#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130
+#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150
+#define NETDATA_CHART_PRIO_IPV4_ICMP 5170
+#define NETDATA_CHART_PRIO_IPV4_TCP 5200
+#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201
+#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290
+#define NETDATA_CHART_PRIO_IPV4_UDP 5300
+#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390
+#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400
+#define NETDATA_CHART_PRIO_IPV4_RAW 5450
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470
+
+// IPv6
+
+#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200
+#define NETDATA_CHART_PRIO_IPV6_ECT 6210
+#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300
+#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400
+#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401
+#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402
+#define NETDATA_CHART_PRIO_IPV6_TCP 6500
+#define NETDATA_CHART_PRIO_IPV6_UDP 6600
+#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601
+#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710
+#define NETDATA_CHART_PRIO_IPV6_RAW 6800
+#define NETDATA_CHART_PRIO_IPV6_BCAST 6840
+#define NETDATA_CHART_PRIO_IPV6_MCAST 6850
+#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851
+#define NETDATA_CHART_PRIO_IPV6_ICMP 6900
+#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930
+#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950
+#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960
+#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970
+#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980
+
+
+// Network interfaces
+
+#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface
+#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001
+#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002
+#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003
+#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006
+#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000
+
+// SCTP
+
+#define NETDATA_CHART_PRIO_SCTP 7000
+
+// QoS
+
+#define NETDATA_CHART_PRIO_TC_QOS 7000
+#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010
+#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020
+#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030
+#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040
+
+// Infiniband
+#define NETDATA_CHART_PRIO_INFINIBAND 7100
+
+// Netfilter
+
+#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700
+#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
+#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
+#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
+#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
+#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
+
+// SYNPROXY
+
+#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751
+#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752
+#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
+#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
+
+// MDSTAT
+
+#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 9000
+#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 9001
+#define NETDATA_CHART_PRIO_MDSTAT_DISKS 9002 // 5 charts per raid
+#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 9003
+#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 9004
+#define NETDATA_CHART_PRIO_MDSTAT_FINISH 9005
+#define NETDATA_CHART_PRIO_MDSTAT_SPEED 9006
+
+// Linux Power Supply
+
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_CHARGE 9501
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_ENERGY 9502
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_VOLTAGE 9503
+
+
+// Wireless
+
+#define NETDATA_CHART_PRIO_WIRELESS_IFACE 7110
+
+// CGROUPS
+
+#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
+#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts
+
+// STATSD
+
+#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts
+
+// INTERNAL NETDATA INFO
+
+#define NETDATA_CHART_PRIO_CHECKS 99999
+
+#define NETDATA_CHART_PRIO_NETDATA_DISKSPACE 132020
+#define NETDATA_CHART_PRIO_NETDATA_TC_CPU 135000
+#define NETDATA_CHART_PRIO_NETDATA_TC_TIME 135001
+
+
+#endif //NETDATA_ALL_H
diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am
new file mode 100644
index 0000000..533b14d
--- /dev/null
+++ b/collectors/apps.plugin/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ apps_groups.conf \
+ $(NULL)
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
new file mode 100644
index 0000000..d10af1c
--- /dev/null
+++ b/collectors/apps.plugin/README.md
@@ -0,0 +1,399 @@
+<!--
+title: "apps.plugin"
+sidebar_label: "Application monitoring (apps.plugin)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/README.md
+-->
+
+# apps.plugin
+
+`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**.
+
+To achieve this task, it iterates through the whole process tree, collecting resource usage information
+for every process found running.
+
+Since Netdata needs to present this information in charts and track them through time,
+instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
+to which it assigns all running processes. This list is customizable via `apps_groups.conf`, and Netdata
+ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+
+So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
+processes together (evaluating both child and parent processes) so that the result is always a list with
+a predefined set of members (of course, only process groups found running are reported).
+
+> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
+> glad to accept pull requests improving the defaults shipped with Netdata in `apps_groups.conf`.
+
+Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
+utilization of exit processes. Their utilization is accounted at their currently running parents.
+So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes
+that fork/spawn other short lived processes hundreds of times per second.
+
+## Charts
+
+`apps.plugin` provides charts for 3 sections:
+
+1. Per application charts as **Applications** at Netdata dashboards
+2. Per user charts as **Users** at Netdata dashboards
+3. Per user group charts as **User Groups** at Netdata dashboards
+
+Each of these sections provides the same number of charts:
+
+- CPU utilization (`apps.cpu`)
+ - Total CPU usage
+ - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
+- Disk I/O
+ - Physical reads/writes (`apps.preads`/`apps.pwrites`)
+ - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
+ - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
+- Memory
+ - Real Memory Used (non-shared, `apps.mem`)
+ - Virtual Memory Allocated (`apps.vmem`)
+ - Minor page faults (i.e. memory activity, `apps.minor_faults`)
+- Processes
+ - Threads running (`apps.threads`)
+ - Processes running (`apps.processes`)
+ - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
+ - Minimum uptime (`apps.uptime_min`)
+ - Average uptime (`apps.uptime_average`)
+ - Maximum uptime (`apps.uptime_max`)
+ - Pipes open (`apps.pipes`)
+- Swap memory
+ - Swap memory used (`apps.swap`)
+ - Major page faults (i.e. swap activity, `apps.major_faults`)
+- Network
+ - Sockets open (`apps.sockets`)
+
+In addition, if the [eBPF collector](/collectors/ebpf.plugin/README.md) is running, your dashboard will also show an
+additional [list of charts](/collectors/ebpf.plugin/README.md#integration-with-appsplugin) using low-level Linux
+metrics.
+
+The above are reported:
+
+- For **Applications** per target configured.
+- For **Users** per username or UID (when the username is not available).
+- For **User Groups** per groupname or GID (when groupname is not available).
+
+## Performance
+
+`apps.plugin` is a complex piece of software and has a lot of work to do
+We are proud that `apps.plugin` is a lot faster compared to any other similar tool,
+while collecting a lot more information for the processes, however the fact is that
+this plugin requires more CPU resources than the `netdata` daemon itself.
+
+Under Linux, for each process running, `apps.plugin` reads several `/proc` files
+per process. Doing this work per-second, especially on hosts with several thousands
+of processes, may increase the CPU resources consumed by the plugin.
+
+In such cases, you many need to lower its data collection frequency.
+
+To do this, edit `/etc/netdata/netdata.conf` and find this section:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+Uncomment the line `update every` and set it to a higher number. If you just set it to `2`,
+its CPU resources will be cut in half, and data collection will be once every 2 seconds.
+
+## Configuration
+
+The configuration file is `/etc/netdata/apps_groups.conf`. To edit it on your system, run `/etc/netdata/edit-config apps_groups.conf`.
+
+The configuration file works accepts multiple lines, each having this format:
+
+```txt
+group: process1 process2 ...
+```
+
+Each group can be given multiple times, to add more processes to it.
+
+For the **Applications** section, only groups configured in this file are reported.
+All other processes will be reported as `other`.
+
+For each process given, its whole process tree will be grouped, not just the process matched.
+The plugin will include both parents and children. If including the parents into the group is
+undesirable, the line `other: *` should be appended to the `apps_groups.conf`.
+
+The process names are the ones returned by:
+
+- `ps -e` or `cat /proc/PID/stat`
+- in case of substring mode (see below): `/proc/PID/cmdline`
+
+To add process names with spaces, enclose them in quotes (single or double)
+example: `'Plex Media Serv'` or `"my other process"`.
+
+You can add an asterisk `*` at the beginning and/or the end of a process:
+
+- `*name` _suffix_ mode: will search for processes ending with `name` (at `/proc/PID/stat`)
+- `name*` _prefix_ mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
+- `*name*` _substring_ mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
+
+If you enter even just one _name_ (substring), `apps.plugin` will process
+`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen).
+
+To add processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`
+
+To add processes with double quotes, enclose them in single quotes: `'process with this " double quote'`
+
+If a group or process name starts with a `-`, the dimension will be hidden from the chart (cpu chart only).
+
+If a process starts with a `+`, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
+
+You can add any number of groups. Only the ones found running will affect the charts generated.
+However, producing charts with hundreds of dimensions may slow down your web browser.
+
+The order of the entries in this list is important: the first that matches a process is used, so put important
+ones at the top. Processes not matched by any row, will inherit it from their parents or children.
+
+The order also controls the order of the dimensions on the generated charts (although applications started
+after apps.plugin is started, will be appended to the existing list of dimensions the `netdata` daemon maintains).
+
+There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. The options can be set in the `netdata.conf` file. For example, to disable user and user group charts you should set
+
+```
+[plugin:apps]
+ command options = without-users without-groups
+```
+
+### Integration with eBPF
+
+If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your
+[`ebpf.conf`](/collectors/ebpf.plugin/README.md#ebpf-programs) file to ensure the eBPF program is enabled.
+
+Also see our [guide on troubleshooting apps with eBPF
+metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a
+few scenarios.
+
+## Permissions
+
+`apps.plugin` requires additional privileges to collect all the information it needs.
+The problem is described in issue #157.
+
+When Netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
+If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
+
+### linux capabilities in containers
+
+There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities
+are silently ignored (in `lxc` containers `setcap` fails).
+
+In these cases ()`setcap` succeeds but capabilities do not work), you will have to setuid
+to root `apps.plugin` by running these commands:
+
+```sh
+chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
+chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
+```
+
+You will have to run these, every time you update Netdata.
+
+## Security
+
+`apps.plugin` performs a hard-coded function of building the process tree in memory,
+iterating forever, collecting metrics for each running process and sending them to Netdata.
+This is a one-way communication, from `apps.plugin` to Netdata.
+
+So, since `apps.plugin` cannot be instructed by Netdata for the actions it performs,
+we think it is pretty safe to allow it have these increased privileges.
+
+Keep in mind that `apps.plugin` will still run without escalated permissions,
+but it will not be able to collect all the information.
+
+## Application Badges
+
+You can create badges that you can embed anywhere you like, with URLs like this:
+
+```
+https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred
+```
+
+The color expression unescaped is this: `value_color=green>0|red`.
+
+Here is an example for the process group `sql` at `https://registry.my-netdata.io`:
+
+![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+
+Netdata is able give you a lot more badges for your app.
+Examples below for process group `sql`:
+
+- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
+- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.files&dimensions=sql&value_color=green%3E30%7Cred)
+- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
+- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
+- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
+- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
+- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred)
+- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred)
+
+For more information about badges check [Generating Badges](/web/api/badges/README.md)
+
+## Comparison with console tools
+
+SSH to a server running Netdata and execute this:
+
+```sh
+while true; do ls -l /var/run >/dev/null; done
+```
+
+In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command
+from consuming entirely one of the CPU cores of the machine.
+
+As we will see below, **none** of the console performance monitoring tools can report that this
+command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to
+identify the process that consumes so much CPU**.
+
+Here is what common Linux console monitoring tools report:
+
+### top
+
+`top` reports that `bash` is using just 14%.
+
+If you check the total system CPU utilization, it says there is no idle CPU at all, but `top`
+fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization
+of all processes reported by `top`, is 15.6%.
+
+```
+top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02
+Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie
+%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st
+KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache
+KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash
+ 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0
+ 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata
+12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin
+ 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd
+ 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd
+```
+
+### htop
+
+Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization.
+
+```
+ CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running
+ Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
+ Swp[ 0K/0K] Uptime: 3 days, 21:37:03
+
+ PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command
+12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash
+ 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd
+ 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata
+ 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata
+ 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/
+ 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata
+```
+
+### atop
+
+`atop` also fails to break down CPU usage.
+
+```
+ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed
+PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 |
+CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% |
+CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 |
+MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M |
+SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M |
+DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms |
+NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 |
+NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 |
+NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps |
+
+ PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600
+12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash
+ 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0
+ 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin
+ 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata
+```
+
+### glances
+
+And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17%
+per process utilization.
+
+Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs.
+
+```
+localhost Uptime: 3 days, 21:42:00
+
+CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core
+MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18
+SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08
+ idle: 0.0% free: 757M free: 0 15 min: 1.00
+
+NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
+eth0 168b 2Kb
+eth1 0b 0b CPU% MEM% PID USER NI S Command
+lo 0b 0b 13.5 0.4 12789 root 0 S -bash
+ 1.6 2.2 7025 root 0 R /usr/bin/python /u
+DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0
+vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda
+ 0.3 0.0 7 root 0 S rcu_sched
+FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata
+/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper
+```
+
+### why does this happen?
+
+All the console tools report usage based on the processes found running *at the moment they
+examine the process tree*. So, they see just one `ls` command, which is actually very quick
+with minor CPU utilization. But the shell, is spawning hundreds of them, one after another
+(much like shell scripts do).
+
+### What does Netdata report?
+
+The total CPU utilization of the system:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png)
+<br/>***Figure 1**: The system overview section at Netdata, just a few seconds after the command was run*
+
+And at the applications `apps.plugin` breaks down CPU usage per application:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png)
+<br/>***Figure 2**: The Applications section at Netdata, just a few seconds after the command was run*
+
+So, the `ssh` session is using 95% CPU time.
+
+Why `ssh`?
+
+`apps.plugin` groups all processes based on its configuration file.
+The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates
+all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
+the process tree of `sshd`, **including the exited children**.
+
+> Distributions based on `systemd`, provide another way to get cpu utilization per user session
+> or service running: control groups, or cgroups, commonly used as part of containers
+> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works
+> on any Linux, `systemd` based or not.
+
+#### a more technical description of how Netdata works
+
+Netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
+`stime` (user and system cpu utilization), much like all the console tools do.
+
+But it also extracts `cutime` and `cstime` that account the user and system time of the exit children of each process.
+By keeping a map in memory of the whole process tree, it is capable of assigning the right time to every process, taking
+into account all its exited children.
+
+It is tricky, since a process may be running for 1 hour and once it exits, its parent should not
+receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has
+been reported for it prior to this iteration.
+
+It is even trickier, because walking through the entire process tree takes some time itself. So,
+if you sum the CPU utilization of all processes, you might have more CPU time than the reported
+total cpu time of the system. Netdata solves this, by adapting the per process cpu utilization to
+the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fapps.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
new file mode 100644
index 0000000..7242ed3
--- /dev/null
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -0,0 +1,316 @@
+#
+# apps.plugin process grouping
+#
+# The apps.plugin displays charts with information about the processes running.
+# This config allows grouping processes together, so that several processes
+# will be reported as one.
+#
+# Only groups in this file are reported. All other processes will be reported
+# as 'other'.
+#
+# For each process given, its whole process tree will be grouped, not just
+# the process matched. The plugin will include both parents and childs.
+#
+# The format is:
+#
+# group: process1 process2 process3 ...
+#
+# Each group can be given multiple times, to add more processes to it.
+#
+# The process names are the ones returned by:
+#
+# - ps -e or /proc/PID/stat
+# - in case of substring mode (see below): /proc/PID/cmdline
+#
+# To add process names with spaces, enclose them in quotes (single or double)
+# example: 'Plex Media Serv' "my other process".
+#
+# Wildcard support:
+# You can add an asterisk (*) at the beginning and/or the end of a process:
+#
+# *name suffix mode: will search for processes ending with 'name'
+# (/proc/PID/stat)
+#
+# name* prefix mode: will search for processes beginning with 'name'
+# (/proc/PID/stat)
+#
+# *name* substring mode: will search for 'name' in the whole command line
+# (/proc/PID/cmdline)
+#
+# If you enter even just one *name* (substring), apps.plugin will process
+# /proc/PID/cmdline for all processes, just once (when they are first seen).
+#
+# To add processes with single quotes, enclose them in double quotes
+# example: "process with this ' single quote"
+#
+# To add processes with double quotes, enclose them in single quotes:
+# example: 'process with this " double quote'
+#
+# If a group or process name starts with a -, the dimension will be hidden
+# (cpu chart only).
+#
+# If a process starts with a +, debugging will be enabled for it
+# (debugging produces a lot of output - do not enable it in production systems)
+#
+# You can add any number of groups you like. Only the ones found running will
+# affect the charts generated. However, producing charts with hundreds of
+# dimensions may slow down your web browser.
+#
+# The order of the entries in this list is important: the first that matches
+# a process is used, so put important ones at the top. Processes not matched
+# by any row, will inherit it from their parents or children.
+#
+# The order also controls the order of the dimensions on the generated charts
+# (although applications started after apps.plugin is started, will be appended
+# to the existing list of dimensions the netdata daemon maintains).
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+
+# netdata main process
+netdata: netdata
+
+# netdata known plugins
+# plugins not defined here will be accumulated in netdata, above
+apps.plugin: apps.plugin
+freeipmi.plugin: freeipmi.plugin
+nfacct.plugin: nfacct.plugin
+cups.plugin: cups.plugin
+xenstat.plugin: xenstat.plugin
+perf.plugin: perf.plugin
+charts.d.plugin: *charts.d.plugin*
+node.d.plugin: *node.d.plugin*
+python.d.plugin: *python.d.plugin*
+tc-qos-helper: *tc-qos-helper.sh*
+fping: fping
+ioping: ioping
+go.d.plugin: *go.d.plugin*
+slabinfo.plugin: slabinfo.plugin
+ebpf.plugin: *ebpf.plugin*
+
+# agent-service-discovery
+agent_sd: agent_sd
+
+# -----------------------------------------------------------------------------
+# authentication/authorization related servers
+
+auth: radius* openldap* ldap* slapd authelia
+fail2ban: fail2ban*
+
+# -----------------------------------------------------------------------------
+# web/ftp servers
+
+httpd: apache* httpd nginx* lighttpd hiawatha
+proxy: squid* c-icap squidGuard varnish*
+php: php* lsphp*
+ftpd: proftpd in.tftpd vsftpd
+uwsgi: uwsgi
+unicorn: *unicorn*
+puma: *puma*
+
+# -----------------------------------------------------------------------------
+# database servers
+
+sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
+nosql: mongod redis* memcached *couchdb*
+timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
+columndb: clickhouse-server*
+
+# -----------------------------------------------------------------------------
+# email servers
+
+email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam tlsmgr postfwd2 postscreen postfix smtp* lmtp* sendmail
+
+# -----------------------------------------------------------------------------
+# network, routing, VPN
+
+ppp: ppp*
+vpn: openvpn pptp* cjdroute gvpe tincd
+wifi: hostapd wpa_supplicant NetworkManager
+routing: ospfd* ospf6d* bgpd bfdd fabricd isisd eigrpd sharpd staticd ripd ripngd pimd pbrd nhrpd ldpd zebra vrrpd vtysh bird*
+modem: ModemManager
+tor: tor
+
+# -----------------------------------------------------------------------------
+# high availability and balancers
+
+camo: *camo*
+balancer: ipvs_* haproxy
+ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd
+
+# -----------------------------------------------------------------------------
+# telephony
+
+pbx: asterisk safe_asterisk *vicidial*
+sip: opensips* stund
+
+# -----------------------------------------------------------------------------
+# chat
+
+chat: irssi *vines* *prosody* murmurd
+
+# -----------------------------------------------------------------------------
+# monitoring
+
+logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs
+nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe
+splunk: splunkd
+azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent*
+
+# -----------------------------------------------------------------------------
+# storage, file systems and file servers
+
+ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* osdmaptool crushtool
+samba: smbd nmbd winbindd ctdbd ctdb-* ctdb_*
+nfs: rpcbind rpc.* nfs*
+zfs: spl_* z_* txg_* zil_* arc_* l2arc*
+btrfs: btrfs*
+iscsi: iscsid iscsi_eh
+
+# -----------------------------------------------------------------------------
+# kubernetes
+
+kubelet: kubelet
+kube-dns: kube-dns
+kube-proxy: kube-proxy
+metrics-server: metrics-server
+heapster: heapster
+
+# -----------------------------------------------------------------------------
+# containers & virtual machines
+
+containers: lxc* docker* balena*
+VMs: vbox* VBox* qemu*
+
+# -----------------------------------------------------------------------------
+# ssh servers and clients
+
+ssh: ssh* scp dropbear
+
+# -----------------------------------------------------------------------------
+# print servers and clients
+
+print: cups* lpd lpq
+
+# -----------------------------------------------------------------------------
+# time servers and clients
+
+time: ntp* systemd-timesyn* chronyd
+
+# -----------------------------------------------------------------------------
+# dhcp servers and clients
+
+dhcp: *dhcp*
+
+# -----------------------------------------------------------------------------
+# name servers and clients
+
+dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq systemd-resolve* pihole*
+dnsdist: dnsdist
+
+# -----------------------------------------------------------------------------
+# installation / compilation / debugging
+
+build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf
+build: git gdb valgrind*
+
+# -----------------------------------------------------------------------------
+# antivirus
+
+antivirus: clam* *clam imunify360*
+
+# -----------------------------------------------------------------------------
+# torrent clients
+
+torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
+
+# -----------------------------------------------------------------------------
+# backup servers and clients
+
+backup: rsync lsyncd bacula* borg rclone
+
+# -----------------------------------------------------------------------------
+# cron
+
+cron: cron* atd anacron systemd-cron*
+
+# -----------------------------------------------------------------------------
+# UPS
+
+ups: upsmon upsd */nut/*
+
+# -----------------------------------------------------------------------------
+# media players, servers, clients
+
+media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
+media: mpd minidlnad mt-daapd avahi* Plex* jellyfin squeeze* jackett Ombi
+
+# -----------------------------------------------------------------------------
+# java applications
+
+hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode*
+hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode*
+hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode*
+hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController*
+
+yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager*
+yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager*
+yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer*
+
+sparkworker: *org.apache.spark.deploy.worker.Worker*
+sparkmaster: *org.apache.spark.deploy.master.Master*
+
+hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer*
+hbaserest: *org.apache.hadoop.hbase.rest.RESTServer*
+hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer*
+hbasemaster: *org.apache.hadoop.hbase.master.HMaster*
+
+zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain*
+
+hive2: *org.apache.hive.service.server.HiveServer2*
+hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore*
+
+solr: *solr.install.dir*
+
+airflow: *airflow*
+
+# -----------------------------------------------------------------------------
+# X
+
+X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar
+X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* slim
+X: kdeinit* kdm plasmashell
+X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit*
+X: '*systemd --user*' chrome *chrome-sandbox* *google-chrome* *chromium* *firefox*
+
+# -----------------------------------------------------------------------------
+# Kernel / System
+
+ksmd: ksmd
+
+system: systemd-* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* rtkit*
+system: inetd xinetd mdadm polkitd acpid uuidd packagekitd upowerd colord
+system: accounts-daemon rngd haveged
+
+kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod
+kernel: fsnotify_mark kthrotld deferwq scsi_*
+
+# -----------------------------------------------------------------------------
+# other application servers
+
+kafka: *kafka.Kafka*
+
+rabbitmq: *rabbitmq*
+
+sidekiq: *sidekiq*
+java: java
+ipfs: ipfs
+
+node: node*
+factorio: factorio
+
+p4: p4*
+
+git-services: gitea gitlab-runner
+
+freeswitch: freeswitch*
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
new file mode 100644
index 0000000..7cbbb07
--- /dev/null
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -0,0 +1,4226 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * netdata apps.plugin
+ * (C) Copyright 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+ * Released under GPL v3+
+ */
+
+#include "../../libnetdata/libnetdata.h"
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+
+// ----------------------------------------------------------------------------
+// debugging
+
+static int debug_enabled = 0;
+static inline void debug_log_int(const char *fmt, ... ) {
+ va_list args;
+
+ fprintf( stderr, "apps.plugin: ");
+ va_start( args, fmt );
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ fputc('\n', stderr);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define debug_log(fmt, args...) do { if(unlikely(debug_enabled)) debug_log_int(fmt, ##args); } while(0)
+
+#else
+
+static inline void debug_log_dummy(void) {}
+#define debug_log(fmt, args...) debug_log_dummy()
+
+#endif
+
+
+// ----------------------------------------------------------------------------
+
+#ifdef __FreeBSD__
+#include <sys/user.h>
+#endif
+
+// ----------------------------------------------------------------------------
+// per O/S configuration
+
+// the minimum PID of the system
+// this is also the pid of the init process
+#define INIT_PID 1
+
+// if the way apps.plugin will work, will read the entire process list,
+// including the resource utilization of each process, instantly
+// set this to 1
+// when set to 0, apps.plugin builds a sort list of processes, in order
+// to process children processes, before parent processes
+#ifdef __FreeBSD__
+#define ALL_PIDS_ARE_READ_INSTANTLY 1
+#else
+#define ALL_PIDS_ARE_READ_INSTANTLY 0
+#endif
+
+// ----------------------------------------------------------------------------
+// string lengths
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+#define MAX_CMDLINE 16384
+
+// ----------------------------------------------------------------------------
+// the rates we are going to send to netdata will have this detail a value of:
+// - 1 will send just integer parts to netdata
+// - 100 will send 2 decimal points
+// - 1000 will send 3 decimal points
+// etc.
+#define RATES_DETAIL 10000ULL
+
+// ----------------------------------------------------------------------------
+// factor for calculating correct CPU time values depending on units of raw data
+static unsigned int time_factor = 0;
+
+// ----------------------------------------------------------------------------
+// to avoid reallocating too frequently, we can increase the number of spare
+// file descriptors used by processes.
+// IMPORTANT:
+// having a lot of spares, increases the CPU utilization of the plugin.
+#define MAX_SPARE_FDS 1
+
+// ----------------------------------------------------------------------------
+// command line options
+
+static int
+ update_every = 1,
+ enable_guest_charts = 0,
+#ifdef __FreeBSD__
+ enable_file_charts = 0,
+#else
+ enable_file_charts = 1,
+ max_fds_cache_seconds = 60,
+#endif
+ enable_users_charts = 1,
+ enable_groups_charts = 1,
+ include_exited_childs = 1;
+
+// will be changed to getenv(NETDATA_USER_CONFIG_DIR) if it exists
+static char *user_config_dir = CONFIG_DIR;
+static char *stock_config_dir = LIBCONFIG_DIR;
+
+// ----------------------------------------------------------------------------
+// internal flags
+// handled in code (automatically set)
+
+static int
+ show_guest_time = 0, // 1 when guest values are collected
+ show_guest_time_old = 0,
+ proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
+
+
+// ----------------------------------------------------------------------------
+// internal counters
+
+static size_t
+ global_iterations_counter = 1,
+ calls_counter = 0,
+ file_counter = 0,
+ filenames_allocated_counter = 0,
+ inodes_changed_counter = 0,
+ links_changed_counter = 0,
+ targets_assignment_counter = 0;
+
+
+// ----------------------------------------------------------------------------
+// Normalization
+//
+// With normalization we lower the collected metrics by a factor to make them
+// match the total utilization of the system.
+// The discrepancy exists because apps.plugin needs some time to collect all
+// the metrics. This results in utilization that exceeds the total utilization
+// of the system.
+//
+// With normalization we align the per-process utilization, to the total of
+// the system. We first consume the exited children utilization and it the
+// collected values is above the total, we proportionally scale each reported
+// metric.
+
+// the total system time, as reported by /proc/stat
+static kernel_uint_t
+ global_utime = 0,
+ global_stime = 0,
+ global_gtime = 0;
+
+// the normalization ratios, as calculated by normalize_utilization()
+double utime_fix_ratio = 1.0,
+ stime_fix_ratio = 1.0,
+ gtime_fix_ratio = 1.0,
+ minflt_fix_ratio = 1.0,
+ majflt_fix_ratio = 1.0,
+ cutime_fix_ratio = 1.0,
+ cstime_fix_ratio = 1.0,
+ cgtime_fix_ratio = 1.0,
+ cminflt_fix_ratio = 1.0,
+ cmajflt_fix_ratio = 1.0;
+
+
+struct pid_on_target {
+ int32_t pid;
+ struct pid_on_target *next;
+};
+
+// ----------------------------------------------------------------------------
+// target
+//
+// target is the structure that processes are aggregated to be reported
+// to netdata.
+//
+// - Each entry in /etc/apps_groups.conf creates a target.
+// - Each user and group used by a process in the system, creates a target.
+
+struct target {
+ char compare[MAX_COMPARE_NAME + 1];
+ uint32_t comparehash;
+ size_t comparelen;
+
+ char id[MAX_NAME + 1];
+ uint32_t idhash;
+
+ char name[MAX_NAME + 1];
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+ kernel_uint_t num_threads;
+ // kernel_uint_t rss;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ int *target_fds;
+ int target_fds_size;
+
+ kernel_uint_t openfiles;
+ kernel_uint_t openpipes;
+ kernel_uint_t opensockets;
+ kernel_uint_t openinotifies;
+ kernel_uint_t openeventfds;
+ kernel_uint_t opentimerfds;
+ kernel_uint_t opensignalfds;
+ kernel_uint_t openeventpolls;
+ kernel_uint_t openother;
+
+ kernel_uint_t starttime;
+ kernel_uint_t collected_starttime;
+ kernel_uint_t uptime_min;
+ kernel_uint_t uptime_sum;
+ kernel_uint_t uptime_max;
+
+ unsigned int processes; // how many processes have been merged to this
+ int exposed; // if set, we have sent this to netdata
+ int hidden; // if set, we set the hidden flag on the dimension
+ int debug_enabled;
+ int ends_with;
+ int starts_with; // if set, the compare string matches only the
+ // beginning of the command
+
+ struct pid_on_target *root_pid; // list of aggregated pids for target debugging
+
+ struct target *target; // the one that will be reported to netdata
+ struct target *next;
+};
+
+struct target
+ *apps_groups_default_target = NULL, // the default target
+ *apps_groups_root_target = NULL, // apps_groups.conf defined
+ *users_root_target = NULL, // users
+ *groups_root_target = NULL; // user groups
+
+size_t
+ apps_groups_targets_count = 0; // # of apps_groups.conf targets
+
+
+// ----------------------------------------------------------------------------
+// pid_stat
+//
+// structure to store data for each process running
+// see: man proc for the description of the fields
+
+struct pid_fd {
+ int fd;
+
+#ifndef __FreeBSD__
+ ino_t inode;
+ char *filename;
+ uint32_t link_hash;
+ size_t cache_iterations_counter;
+ size_t cache_iterations_reset;
+#endif
+};
+
+struct pid_stat {
+ int32_t pid;
+ char comm[MAX_COMPARE_NAME + 1];
+ char *cmdline;
+
+ uint32_t log_thrown;
+
+ // char state;
+ int32_t ppid;
+ // int32_t pgrp;
+ // int32_t session;
+ // int32_t tty_nr;
+ // int32_t tpgid;
+ // uint64_t flags;
+
+ // these are raw values collected
+ kernel_uint_t minflt_raw;
+ kernel_uint_t cminflt_raw;
+ kernel_uint_t majflt_raw;
+ kernel_uint_t cmajflt_raw;
+ kernel_uint_t utime_raw;
+ kernel_uint_t stime_raw;
+ kernel_uint_t gtime_raw; // guest_time
+ kernel_uint_t cutime_raw;
+ kernel_uint_t cstime_raw;
+ kernel_uint_t cgtime_raw; // cguest_time
+
+ // these are rates
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+
+ // int64_t priority;
+ // int64_t nice;
+ int32_t num_threads;
+ // int64_t itrealvalue;
+ kernel_uint_t collected_starttime;
+ // kernel_uint_t vsize;
+ // kernel_uint_t rss;
+ // kernel_uint_t rsslim;
+ // kernel_uint_t starcode;
+ // kernel_uint_t endcode;
+ // kernel_uint_t startstack;
+ // kernel_uint_t kstkesp;
+ // kernel_uint_t kstkeip;
+ // uint64_t signal;
+ // uint64_t blocked;
+ // uint64_t sigignore;
+ // uint64_t sigcatch;
+ // uint64_t wchan;
+ // uint64_t nswap;
+ // uint64_t cnswap;
+ // int32_t exit_signal;
+ // int32_t processor;
+ // uint32_t rt_priority;
+ // uint32_t policy;
+ // kernel_uint_t delayacct_blkio_ticks;
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+#ifndef __FreeBSD__
+ ARL_BASE *status_arl;
+#endif
+
+ kernel_uint_t io_logical_bytes_read_raw;
+ kernel_uint_t io_logical_bytes_written_raw;
+ // kernel_uint_t io_read_calls_raw;
+ // kernel_uint_t io_write_calls_raw;
+ kernel_uint_t io_storage_bytes_read_raw;
+ kernel_uint_t io_storage_bytes_written_raw;
+ // kernel_uint_t io_cancelled_write_bytes_raw;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ struct pid_fd *fds; // array of fds it uses
+ size_t fds_size; // the size of the fds array
+
+ int children_count; // number of processes directly referencing this
+ unsigned char keep:1; // 1 when we need to keep this process in memory even after it exited
+ int keeploops; // increases by 1 every time keep is 1 and updated 0
+ unsigned char updated:1; // 1 when the process is currently running
+ unsigned char merged:1; // 1 when it has been merged to its parent
+ unsigned char read:1; // 1 when we have already read this process for this iteration
+
+ int sortlist; // higher numbers = top on the process tree
+ // each process gets a unique number
+
+ struct target *target; // app_groups.conf targets
+ struct target *user_target; // uid based targets
+ struct target *group_target; // gid based targets
+
+ usec_t stat_collected_usec;
+ usec_t last_stat_collected_usec;
+
+ usec_t io_collected_usec;
+ usec_t last_io_collected_usec;
+
+ kernel_uint_t uptime;
+
+ char *fds_dirname; // the full directory name in /proc/PID/fd
+
+ char *stat_filename;
+ char *status_filename;
+ char *io_filename;
+ char *cmdline_filename;
+
+ struct pid_stat *parent;
+ struct pid_stat *prev;
+ struct pid_stat *next;
+};
+
+size_t pagesize;
+
+kernel_uint_t global_uptime;
+
+// log each problem once per process
+// log flood protection flags (log_thrown)
+#define PID_LOG_IO 0x00000001
+#define PID_LOG_STATUS 0x00000002
+#define PID_LOG_CMDLINE 0x00000004
+#define PID_LOG_FDS 0x00000008
+#define PID_LOG_STAT 0x00000010
+
+static struct pid_stat
+ *root_of_pids = NULL, // global list of all processes running
+ **all_pids = NULL; // to avoid allocations, we pre-allocate the
+ // the entire pid space.
+
+static size_t
+ all_pids_count = 0; // the number of processes running
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+// Another pre-allocated list of all possible pids.
+// We need it to pids and assign them a unique sortlist id, so that we
+// read parents before children. This is needed to prevent a situation where
+// a child is found running, but until we read its parent, it has exited and
+// its parent has accumulated its resources.
+static pid_t
+ *all_pids_sortlist = NULL;
+#endif
+
+// ----------------------------------------------------------------------------
+// file descriptor
+//
+// this is used to keep a global list of all open files of the system.
+// it is needed in order to calculate the unique files processes have open.
+
+#define FILE_DESCRIPTORS_INCREASE_STEP 100
+
+// types for struct file_descriptor->type
+typedef enum fd_filetype {
+ FILETYPE_OTHER,
+ FILETYPE_FILE,
+ FILETYPE_PIPE,
+ FILETYPE_SOCKET,
+ FILETYPE_INOTIFY,
+ FILETYPE_EVENTFD,
+ FILETYPE_EVENTPOLL,
+ FILETYPE_TIMERFD,
+ FILETYPE_SIGNALFD
+} FD_FILETYPE;
+
+struct file_descriptor {
+ avl avl;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ uint32_t magic;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ const char *name;
+ uint32_t hash;
+
+ FD_FILETYPE type;
+ int count;
+ int pos;
+} *all_files = NULL;
+
+static int
+ all_files_len = 0,
+ all_files_size = 0;
+ long double currentmaxfds = 0;
+
+// ----------------------------------------------------------------------------
+// read users and groups from files
+
+struct user_or_group_id {
+ avl avl;
+
+ union {
+ uid_t uid;
+ gid_t gid;
+ } id;
+
+ char *name;
+
+ int updated;
+
+ struct user_or_group_id * next;
+};
+
+enum user_or_group_id_type {
+ USER_ID,
+ GROUP_ID
+};
+
+struct user_or_group_ids{
+ enum user_or_group_id_type type;
+
+ avl_tree_type index;
+ struct user_or_group_id *root;
+
+ char filename[FILENAME_MAX + 1];
+};
+
+int user_id_compare(void* a, void* b) {
+ if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid)
+ return -1;
+
+ else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid)
+ return 1;
+
+ else
+ return 0;
+}
+
+struct user_or_group_ids all_user_ids = {
+ .type = USER_ID,
+
+ .index = {
+ NULL,
+ user_id_compare
+ },
+
+ .root = NULL,
+
+ .filename = "",
+};
+
+int group_id_compare(void* a, void* b) {
+ if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid)
+ return -1;
+
+ else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid)
+ return 1;
+
+ else
+ return 0;
+}
+
+struct user_or_group_ids all_group_ids = {
+ .type = GROUP_ID,
+
+ .index = {
+ NULL,
+ group_id_compare
+ },
+
+ .root = NULL,
+
+ .filename = "",
+};
+
+int file_changed(const struct stat *statbuf, struct timespec *last_modification_time) {
+ if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec &&
+ statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0;
+
+ last_modification_time->tv_sec = statbuf->st_mtim.tv_sec;
+ last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec;
+
+ return 1;
+}
+
+int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) {
+ struct stat statbuf;
+ if(unlikely(stat(ids->filename, &statbuf)))
+ return 1;
+ else
+ if(likely(!file_changed(&statbuf, last_modification_time))) return 0;
+
+ procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 1;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for(line = 0; line < lines ;line++) {
+ size_t words = procfile_linewords(ff, line);
+ if(unlikely(words < 3)) continue;
+
+ char *name = procfile_lineword(ff, line, 0);
+ if(unlikely(!name || !*name)) continue;
+
+ char *id_string = procfile_lineword(ff, line, 2);
+ if(unlikely(!id_string || !*id_string)) continue;
+
+
+ struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id));
+
+ if(ids->type == USER_ID)
+ user_or_group_id->id.uid = (uid_t)str2ull(id_string);
+ else
+ user_or_group_id->id.gid = (uid_t)str2ull(id_string);
+
+ user_or_group_id->name = strdupz(name);
+ user_or_group_id->updated = 1;
+
+ struct user_or_group_id *existing_user_id = NULL;
+
+ if(likely(ids->root))
+ existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl *) user_or_group_id);
+
+ if(unlikely(existing_user_id)) {
+ freez(existing_user_id->name);
+ existing_user_id->name = user_or_group_id->name;
+ existing_user_id->updated = 1;
+ freez(user_or_group_id);
+ }
+ else {
+ if(unlikely(avl_insert(&ids->index, (avl *) user_or_group_id) != (void *) user_or_group_id)) {
+ error("INTERNAL ERROR: duplicate indexing of id during realloc");
+ };
+
+ user_or_group_id->next = ids->root;
+ ids->root = user_or_group_id;
+ }
+ }
+
+ procfile_close(ff);
+
+ // remove unused ids
+ struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL;
+
+ while(user_or_group_id) {
+ if(unlikely(!user_or_group_id->updated)) {
+ if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl *) user_or_group_id) != user_or_group_id))
+ error("INTERNAL ERROR: removal of unused id from index, removed a different id");
+
+ if(prev_user_id)
+ prev_user_id->next = user_or_group_id->next;
+ else
+ ids->root = user_or_group_id->next;
+
+ freez(user_or_group_id->name);
+ freez(user_or_group_id);
+
+ if(prev_user_id)
+ user_or_group_id = prev_user_id->next;
+ else
+ user_or_group_id = ids->root;
+ }
+ else {
+ user_or_group_id->updated = 0;
+
+ prev_user_id = user_or_group_id;
+ user_or_group_id = user_or_group_id->next;
+ }
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// apps_groups.conf
+// aggregate all processes in groups, to have a limited number of dimensions
+
+static struct target *get_users_target(uid_t uid) {
+ struct target *w;
+ for(w = users_root_target ; w ; w = w->next)
+ if(w->uid == uid) return w;
+
+ w = callocz(sizeof(struct target), 1);
+ snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid);
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ snprintfz(w->id, MAX_NAME, "%u", uid);
+ w->idhash = simple_hash(w->id);
+
+ struct user_or_group_id user_id_to_find, *user_or_group_id = NULL;
+ user_id_to_find.id.uid = uid;
+
+ if(*netdata_configured_host_prefix) {
+ static struct timespec last_passwd_modification_time;
+ int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time);
+
+ if(likely(!ret && all_user_ids.index.root))
+ user_or_group_id = (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl *) &user_id_to_find);
+ }
+
+ if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name) {
+ snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name);
+ }
+ else {
+ struct passwd *pw = getpwuid(uid);
+ if(!pw || !pw->pw_name || !*pw->pw_name)
+ snprintfz(w->name, MAX_NAME, "%u", uid);
+ else
+ snprintfz(w->name, MAX_NAME, "%s", pw->pw_name);
+ }
+
+ netdata_fix_chart_name(w->name);
+
+ w->uid = uid;
+
+ w->next = users_root_target;
+ users_root_target = w;
+
+ debug_log("added uid %u ('%s') target", w->uid, w->name);
+
+ return w;
+}
+
+struct target *get_groups_target(gid_t gid)
+{
+ struct target *w;
+ for(w = groups_root_target ; w ; w = w->next)
+ if(w->gid == gid) return w;
+
+ w = callocz(sizeof(struct target), 1);
+ snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid);
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ snprintfz(w->id, MAX_NAME, "%u", gid);
+ w->idhash = simple_hash(w->id);
+
+ struct user_or_group_id group_id_to_find, *group_id = NULL;
+ group_id_to_find.id.gid = gid;
+
+ if(*netdata_configured_host_prefix) {
+ static struct timespec last_group_modification_time;
+ int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time);
+
+ if(likely(!ret && all_group_ids.index.root))
+ group_id = (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl *) &group_id_to_find);
+ }
+
+ if(group_id && group_id->name && *group_id->name) {
+ snprintfz(w->name, MAX_NAME, "%s", group_id->name);
+ }
+ else {
+ struct group *gr = getgrgid(gid);
+ if(!gr || !gr->gr_name || !*gr->gr_name)
+ snprintfz(w->name, MAX_NAME, "%u", gid);
+ else
+ snprintfz(w->name, MAX_NAME, "%s", gr->gr_name);
+ }
+
+ netdata_fix_chart_name(w->name);
+
+ w->gid = gid;
+
+ w->next = groups_root_target;
+ groups_root_target = w;
+
+ debug_log("added gid %u ('%s') target", w->gid, w->name);
+
+ return w;
+}
+
+// find or create a new target
+// there are targets that are just aggregated to other target (the second argument)
+static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) {
+ int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0;
+ const char *nid = id;
+
+ // extract the options
+ while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
+ if(nid[0] == '-') thidden = 1;
+ if(nid[0] == '+') tdebug = 1;
+ if(nid[0] == '*') ends_with = 1;
+ nid++;
+ }
+ uint32_t hash = simple_hash(id);
+
+ // find if it already exists
+ struct target *w, *last = apps_groups_root_target;
+ for(w = apps_groups_root_target ; w ; w = w->next) {
+ if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ return w;
+
+ last = w;
+ }
+
+ // find an existing target
+ if(unlikely(!target)) {
+ while(*name == '-') {
+ if(*name == '-') thidden = 1;
+ name++;
+ }
+
+ for(target = apps_groups_root_target ; target != NULL ; target = target->next) {
+ if(!target->target && strcmp(name, target->name) == 0)
+ break;
+ }
+
+ if(unlikely(debug_enabled)) {
+ if(unlikely(target))
+ debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id);
+ else
+ debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id);
+ }
+ }
+
+ if(target && target->target)
+ fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id);
+
+ w = callocz(sizeof(struct target), 1);
+ strncpyz(w->id, nid, MAX_NAME);
+ w->idhash = simple_hash(w->id);
+
+ if(unlikely(!target))
+ // copy the name
+ strncpyz(w->name, name, MAX_NAME);
+ else
+ // copy the id
+ strncpyz(w->name, nid, MAX_NAME);
+
+ strncpyz(w->compare, nid, MAX_COMPARE_NAME);
+ size_t len = strlen(w->compare);
+ if(w->compare[len - 1] == '*') {
+ w->compare[len - 1] = '\0';
+ w->starts_with = 1;
+ }
+ w->ends_with = ends_with;
+
+ if(w->starts_with && w->ends_with)
+ proc_pid_cmdline_is_needed = 1;
+
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ w->hidden = thidden;
+#ifdef NETDATA_INTERNAL_CHECKS
+ w->debug_enabled = tdebug;
+#else
+ if(tdebug)
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ w->target = target;
+
+ // append it, to maintain the order in apps_groups.conf
+ if(last) last->next = w;
+ else apps_groups_root_target = w;
+
+ debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s"
+ , w->id
+ , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact"))
+ , w->target?w->target->name:w->name
+ , (w->hidden)?"hidden":"-"
+ , (w->debug_enabled)?"debug":"-"
+ );
+
+ return w;
+}
+
+// read the apps_groups.conf file
+static int read_apps_groups_conf(const char *path, const char *file)
+{
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
+
+ debug_log("process groups file: '%s'", filename);
+
+ // ----------------------------------------
+
+ procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT);
+ if(!ff) return 1;
+
+ procfile_set_quotes(ff, "'\"");
+
+ ff = procfile_readall(ff);
+ if(!ff)
+ return 1;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for(line = 0; line < lines ;line++) {
+ size_t word, words = procfile_linewords(ff, line);
+ if(!words) continue;
+
+ char *name = procfile_lineword(ff, line, 0);
+ if(!name || !*name) continue;
+
+ // find a possibly existing target
+ struct target *w = NULL;
+
+ // loop through all words, skipping the first one (the name)
+ for(word = 0; word < words ;word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if(!s || !*s) continue;
+ if(*s == '#') break;
+
+ // is this the first word? skip it
+ if(s == name) continue;
+
+ // add this target
+ struct target *n = get_apps_groups_target(s, w, name);
+ if(!n) {
+ error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
+ continue;
+ }
+
+ // just some optimization
+ // to avoid searching for a target for each process
+ if(!w) w = n->target?n->target:n;
+ }
+ }
+
+ procfile_close(ff);
+
+ apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
+ if(!apps_groups_default_target)
+ fatal("Cannot create default target");
+
+ // allow the user to override group 'other'
+ if(apps_groups_default_target->target)
+ apps_groups_default_target = apps_groups_default_target->target;
+
+ return 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// struct pid_stat management
+static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size);
+
+static inline struct pid_stat *get_pid_entry(pid_t pid) {
+ if(unlikely(all_pids[pid]))
+ return all_pids[pid];
+
+ struct pid_stat *p = callocz(sizeof(struct pid_stat), 1);
+ p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS);
+ p->fds_size = MAX_SPARE_FDS;
+ init_pid_fds(p, 0, p->fds_size);
+
+ if(likely(root_of_pids))
+ root_of_pids->prev = p;
+
+ p->next = root_of_pids;
+ root_of_pids = p;
+
+ p->pid = pid;
+
+ all_pids[pid] = p;
+ all_pids_count++;
+
+ return p;
+}
+
+static inline void del_pid_entry(pid_t pid) {
+ struct pid_stat *p = all_pids[pid];
+
+ if(unlikely(!p)) {
+ error("attempted to free pid %d that is not allocated.", pid);
+ return;
+ }
+
+ debug_log("process %d %s exited, deleting it.", pid, p->comm);
+
+ if(root_of_pids == p)
+ root_of_pids = p->next;
+
+ if(p->next) p->next->prev = p->prev;
+ if(p->prev) p->prev->next = p->next;
+
+ // free the filename
+#ifndef __FreeBSD__
+ {
+ size_t i;
+ for(i = 0; i < p->fds_size; i++)
+ if(p->fds[i].filename)
+ freez(p->fds[i].filename);
+ }
+#endif
+ freez(p->fds);
+
+ freez(p->fds_dirname);
+ freez(p->stat_filename);
+ freez(p->status_filename);
+#ifndef __FreeBSD__
+ arl_free(p->status_arl);
+#endif
+ freez(p->io_filename);
+ freez(p->cmdline_filename);
+ freez(p->cmdline);
+ freez(p);
+
+ all_pids[pid] = NULL;
+ all_pids_count--;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline int managed_log(struct pid_stat *p, uint32_t log, int status) {
+ if(unlikely(!status)) {
+ // error("command failed log %u, errno %d", log, errno);
+
+ if(unlikely(debug_enabled || errno != ENOENT)) {
+ if(unlikely(debug_enabled || !(p->log_thrown & log))) {
+ p->log_thrown |= log;
+ switch(log) {
+ case PID_LOG_IO:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_STATUS:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_CMDLINE:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_FDS:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d files (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_STAT:
+ break;
+
+ default:
+ error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
+ break;
+ }
+ }
+ }
+ errno = 0;
+ }
+ else if(unlikely(p->log_thrown & log)) {
+ // error("unsetting log %u on pid %d", log, p->pid);
+ p->log_thrown &= ~log;
+ }
+
+ return status;
+}
+
+static inline void assign_target_to_pid(struct pid_stat *p) {
+ targets_assignment_counter++;
+
+ uint32_t hash = simple_hash(p->comm);
+ size_t pclen = strlen(p->comm);
+
+ struct target *w;
+ for(w = apps_groups_root_target; w ; w = w->next) {
+ // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
+
+ // find it - 4 cases:
+ // 1. the target is not a pattern
+ // 2. the target has the prefix
+ // 3. the target has the suffix
+ // 4. the target is something inside cmdline
+
+ if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm))
+ || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen))
+ || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen]))
+ || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))
+ ))) {
+
+ if(w->target) p->target = w->target;
+ else p->target = w;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("%s linked to target %s", p->comm, p->target->name);
+
+ break;
+ }
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// update pids from proc
+
+static inline int read_proc_pid_cmdline(struct pid_stat *p) {
+ static char cmdline[MAX_CMDLINE + 1];
+
+#ifdef __FreeBSD__
+ size_t i, bytes = MAX_CMDLINE;
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_ARGS;
+ mib[3] = p->pid;
+ if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0)))
+ goto cleanup;
+#else
+ if(unlikely(!p->cmdline_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ p->cmdline_filename = strdupz(filename);
+ }
+
+ int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1)) goto cleanup;
+
+ ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
+ close(fd);
+
+ if(unlikely(bytes < 0)) goto cleanup;
+#endif
+
+ cmdline[bytes] = '\0';
+ for(i = 0; i < bytes ; i++) {
+ if(unlikely(!cmdline[i])) cmdline[i] = ' ';
+ }
+
+ if(p->cmdline) freez(p->cmdline);
+ p->cmdline = strdupz(cmdline);
+
+ debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
+
+ return 1;
+
+cleanup:
+ // copy the command to the command line
+ if(p->cmdline) freez(p->cmdline);
+ p->cmdline = strdupz(p->comm);
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// macro to calculate the incremental rate of a value
+// each parameter is accessed only ONCE - so it is safe to pass function calls
+// or other macros as parameters
+
+#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) { \
+ kernel_uint_t _new_tmp = new_kernel_value; \
+ (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \
+ (last_kernel_variable) = _new_tmp; \
+ }
+
+// the same macro for struct pid members
+#define pid_incremental_rate(type, var, value) \
+ incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec)
+
+
+// ----------------------------------------------------------------------------
+
+#ifndef __FreeBSD__
+struct arl_callback_ptr {
+ struct pid_stat *p;
+ procfile *ff;
+ size_t line;
+};
+
+void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_uid && *effective_uid))
+ aptr->p->uid = (uid_t)str2l(effective_uid);
+}
+
+void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_gid && *effective_gid))
+ aptr->p->gid = (uid_t)str2l(effective_gid);
+}
+
+void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+#endif // !__FreeBSD__
+
+static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) {
+ p->status_vmsize = 0;
+ p->status_vmrss = 0;
+ p->status_vmshared = 0;
+ p->status_rssfile = 0;
+ p->status_rssshmem = 0;
+ p->status_vmswap = 0;
+
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ p->uid = proc_info->ki_uid;
+ p->gid = proc_info->ki_groups[0];
+ p->status_vmsize = proc_info->ki_size / 1024; // in KiB
+ p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB
+ // TODO: what about shared and swap memory on FreeBSD?
+ return 1;
+#else
+ (void)ptr;
+
+ static struct arl_callback_ptr arl_ptr;
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->status_arl)) {
+ p->status_arl = arl_create("/proc/pid/status", NULL, 60);
+ arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr);
+ }
+
+ if(unlikely(!p->status_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid);
+ p->status_filename = strdupz(filename);
+ }
+
+ ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) return 0;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0;
+
+ calls_counter++;
+
+ // let ARL use this pid
+ arl_ptr.p = p;
+ arl_ptr.ff = ff;
+
+ size_t lines = procfile_lines(ff), l;
+ arl_begin(p->status_arl);
+
+ for(l = 0; l < lines ;l++) {
+ // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1));
+ arl_ptr.line = l;
+ if(unlikely(arl_check(p->status_arl,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ p->status_vmshared = p->status_rssfile + p->status_rssshmem;
+
+ // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared);
+
+ return 1;
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+
+static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ if (unlikely(proc_info->ki_tdflags & TDF_IDLETD))
+ goto cleanup;
+#else
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->stat_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
+ p->stat_filename = strdupz(filename);
+ }
+
+ int set_quotes = (!ff)?1:0;
+
+ ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ // if(set_quotes) procfile_set_quotes(ff, "()");
+ if(unlikely(set_quotes))
+ procfile_set_open_close(ff, "(", ")");
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+#endif
+
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->stat_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+#ifdef __FreeBSD__
+ char *comm = proc_info->ki_comm;
+ p->ppid = proc_info->ki_ppid;
+#else
+ // p->pid = str2pid_t(procfile_lineword(ff, 0, 0));
+ char *comm = procfile_lineword(ff, 0, 1);
+ // p->state = *(procfile_lineword(ff, 0, 2));
+ p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
+ // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4));
+ // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5));
+ // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6));
+ // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7));
+ // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8));
+#endif
+
+ if(strcmp(p->comm, comm) != 0) {
+ if(unlikely(debug_enabled)) {
+ if(p->comm[0])
+ debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
+ else
+ debug_log("\tJust added %d (%s)", p->pid, comm);
+ }
+
+ strncpyz(p->comm, comm, MAX_COMPARE_NAME);
+
+ // /proc/<pid>/cmdline
+ if(likely(proc_pid_cmdline_is_needed))
+ managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
+
+ assign_target_to_pid(p);
+ }
+
+#ifdef __FreeBSD__
+ pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt);
+ pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt);
+ pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt);
+ pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt);
+ pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000);
+
+ p->num_threads = proc_info->ki_numthreads;
+
+ if(enable_guest_charts) {
+ enable_guest_charts = 0;
+ info("Guest charts aren't supported by FreeBSD");
+ }
+#else
+ pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9)));
+ pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10)));
+ pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11)));
+ pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12)));
+ pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13)));
+ pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14)));
+ pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15)));
+ pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16)));
+ // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17));
+ // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
+ p->num_threads = (int32_t)str2uint32_t(procfile_lineword(ff, 0, 19));
+ // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
+ p->collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz;
+ p->uptime = (global_uptime > p->collected_starttime)?(global_uptime - p->collected_starttime):0;
+ // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
+ // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
+ // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
+ // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25));
+ // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26));
+ // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27));
+ // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28));
+ // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29));
+ // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30));
+ // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31));
+ // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32));
+ // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33));
+ // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34));
+ // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35));
+ // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36));
+ // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37));
+ // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38));
+ // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39));
+ // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40));
+ // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41));
+
+ if(enable_guest_charts) {
+
+ pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42)));
+ pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43)));
+
+ if (show_guest_time || p->gtime || p->cgtime) {
+ p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime;
+ p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime;
+ show_guest_time = 1;
+ }
+ }
+#endif
+
+ if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads);
+
+ if(unlikely(global_iterations_counter == 1)) {
+ p->minflt = 0;
+ p->cminflt = 0;
+ p->majflt = 0;
+ p->cmajflt = 0;
+ p->utime = 0;
+ p->stime = 0;
+ p->gtime = 0;
+ p->cutime = 0;
+ p->cstime = 0;
+ p->cgtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ p->minflt = 0;
+ p->cminflt = 0;
+ p->majflt = 0;
+ p->cmajflt = 0;
+ p->utime = 0;
+ p->stime = 0;
+ p->gtime = 0;
+ p->cutime = 0;
+ p->cstime = 0;
+ p->cgtime = 0;
+ p->num_threads = 0;
+ // p->rss = 0;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+#else
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->io_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid);
+ p->io_filename = strdupz(filename);
+ }
+
+ // open the file
+ ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+#endif
+
+ calls_counter++;
+
+ p->last_io_collected_usec = p->io_collected_usec;
+ p->io_collected_usec = now_monotonic_usec();
+
+#ifdef __FreeBSD__
+ pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock);
+ pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock);
+#else
+ pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1)));
+ pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1)));
+ // pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1)));
+ // pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1)));
+ pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1)));
+ pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1)));
+ // pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1)));
+#endif
+
+ if(unlikely(global_iterations_counter == 1)) {
+ p->io_logical_bytes_read = 0;
+ p->io_logical_bytes_written = 0;
+ // p->io_read_calls = 0;
+ // p->io_write_calls = 0;
+ p->io_storage_bytes_read = 0;
+ p->io_storage_bytes_written = 0;
+ // p->io_cancelled_write_bytes = 0;
+ }
+
+ return 1;
+
+#ifndef __FreeBSD__
+cleanup:
+ p->io_logical_bytes_read = 0;
+ p->io_logical_bytes_written = 0;
+ // p->io_read_calls = 0;
+ // p->io_write_calls = 0;
+ p->io_storage_bytes_read = 0;
+ p->io_storage_bytes_written = 0;
+ // p->io_cancelled_write_bytes = 0;
+ return 0;
+#endif
+}
+
+#ifndef __FreeBSD__
+static inline int read_global_time() {
+ static char filename[FILENAME_MAX + 1] = "";
+ static procfile *ff = NULL;
+ static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
+ static usec_t collected_usec = 0, last_collected_usec = 0;
+
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) goto cleanup;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+
+ last_collected_usec = collected_usec;
+ collected_usec = now_monotonic_usec();
+
+ calls_counter++;
+
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_ntime = 0;
+
+ incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec);
+ incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec);
+ incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec);
+ incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec);
+
+ global_utime += global_ntime;
+
+ if(enable_guest_charts) {
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_gntime = 0;
+
+ // guest nice time, on guest time
+ incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec);
+
+ global_gtime += global_gntime;
+
+ // remove guest time from user time
+ global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
+ }
+
+ if(unlikely(global_iterations_counter == 1)) {
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ return 0;
+}
+#else
+static inline int read_global_time() {
+ static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, ntime_raw = 0;
+ static usec_t collected_usec = 0, last_collected_usec = 0;
+ long cp_time[CPUSTATES];
+
+ if (unlikely(CPUSTATES != 5)) {
+ goto cleanup;
+ } else {
+ static int mib[2] = {0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
+ goto cleanup;
+ }
+ }
+
+ last_collected_usec = collected_usec;
+ collected_usec = now_monotonic_usec();
+
+ calls_counter++;
+
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_ntime = 0;
+
+ incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec);
+ incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec);
+ incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec);
+
+ global_utime += global_ntime;
+
+ if(unlikely(global_iterations_counter == 1)) {
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ return 0;
+}
+#endif /* !__FreeBSD__ */
+
+// ----------------------------------------------------------------------------
+
+int file_descriptor_compare(void* a, void* b) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE)
+ error("Corrupted index data detected. Please report this.");
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash)
+ return -1;
+
+ else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash)
+ return 1;
+
+ else
+ return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name);
+}
+
+// int file_descriptor_iterator(avl *a) { if(a) {}; return 0; }
+
+avl_tree_type all_files_index = {
+ NULL,
+ file_descriptor_compare
+};
+
+static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) {
+ struct file_descriptor tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = name;
+ tmp.count = 0;
+ tmp.pos = 0;
+#ifdef NETDATA_INTERNAL_CHECKS
+ tmp.magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ return (struct file_descriptor *)avl_search(&all_files_index, (avl *) &tmp);
+}
+
+#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl *)(fd))
+#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl *)(fd))
+
+// ----------------------------------------------------------------------------
+
+static inline void file_descriptor_not_used(int id)
+{
+ if(id > 0 && id < all_files_size) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[id].magic != 0x0BADCAFE) {
+ error("Ignoring request to remove empty file id %d.", id);
+ return;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log("decreasing slot %d (count = %d).", id, all_files[id].count);
+
+ if(all_files[id].count > 0) {
+ all_files[id].count--;
+
+ if(!all_files[id].count) {
+ debug_log(" >> slot %d is empty.", id);
+
+ if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id]))
+ error("INTERNAL ERROR: removal of unused fd from index, removed a different fd");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[id].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files_len--;
+ }
+ }
+ else
+ error("Request to decrease counter of fd %d (%s), while the use counter is 0", id, all_files[id].name);
+ }
+ else error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", id, all_files_size);
+}
+
+static inline void all_files_grow() {
+ void *old = all_files;
+ int i;
+
+ // there is no empty slot
+ debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP);
+
+ all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor));
+
+ // if the address changed, we have to rebuild the index
+ // since all pointers are now invalid
+
+ if(unlikely(old && old != (void *)all_files)) {
+ debug_log(" >> re-indexing.");
+
+ all_files_index.root = NULL;
+ for(i = 0; i < all_files_size; i++) {
+ if(!all_files[i].count) continue;
+ if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i]))
+ error("INTERNAL ERROR: duplicate indexing of fd during realloc.");
+ }
+
+ debug_log(" >> re-indexing done.");
+ }
+
+ // initialize the newly added entries
+
+ for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) {
+ all_files[i].count = 0;
+ all_files[i].name = NULL;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[i].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files[i].pos = i;
+ }
+
+ if(unlikely(!all_files_size)) all_files_len = 1;
+ all_files_size += FILE_DESCRIPTORS_INCREASE_STEP;
+}
+
+static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) {
+ // check we have enough memory to add it
+ if(!all_files || all_files_len == all_files_size)
+ all_files_grow();
+
+ debug_log(" >> searching for empty slot.");
+
+ // search for an empty slot
+
+ static int last_pos = 0;
+ int i, c;
+ for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) {
+ if(c >= all_files_size) c = 0;
+ if(c == 0) continue;
+
+ if(!all_files[c].count) {
+ debug_log(" >> Examining slot %d.", c);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash))
+ error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name);
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name);
+
+ freez((void *)all_files[c].name);
+ all_files[c].name = NULL;
+ last_pos = c;
+ break;
+ }
+ }
+
+ all_files_len++;
+
+ if(i == all_files_size) {
+ fatal("We should find an empty slot, but there isn't any");
+ exit(1);
+ }
+ // else we have an empty slot in 'c'
+
+ debug_log(" >> updating slot %d.", c);
+
+ all_files[c].name = strdupz(name);
+ all_files[c].hash = hash;
+ all_files[c].type = type;
+ all_files[c].pos = c;
+ all_files[c].count = 1;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[c].magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c]))
+ error("INTERNAL ERROR: duplicate indexing of fd.");
+
+ debug_log("using fd position %d (name: %s)", c, all_files[c].name);
+
+ return c;
+}
+
+static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) {
+ if(unlikely(!hash))
+ hash = simple_hash(name);
+
+ debug_log("adding or finding name '%s' with hash %u", name, hash);
+
+ struct file_descriptor *fd = file_descriptor_find(name, hash);
+ if(fd) {
+ // found
+ debug_log(" >> found on slot %d", fd->pos);
+
+ fd->count++;
+ return fd->pos;
+ }
+ // not found
+
+ FD_FILETYPE type;
+ if(likely(name[0] == '/')) type = FILETYPE_FILE;
+ else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE;
+ else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET;
+ else if(likely(strncmp(name, "anon_inode:", 11) == 0)) {
+ const char *t = &name[11];
+
+ if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY;
+ else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD;
+ else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL;
+ else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD;
+ else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD;
+ else {
+ debug_log("UNKNOWN anonymous inode: %s", name);
+ type = FILETYPE_OTHER;
+ }
+ }
+ else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY;
+ else {
+ debug_log("UNKNOWN linkname: %s", name);
+ type = FILETYPE_OTHER;
+ }
+
+ return file_descriptor_set_on_empty_slot(name, hash, type);
+}
+
+static inline void clear_pid_fd(struct pid_fd *pfd) {
+ pfd->fd = 0;
+
+ #ifndef __FreeBSD__
+ pfd->link_hash = 0;
+ pfd->inode = 0;
+ pfd->cache_iterations_counter = 0;
+ pfd->cache_iterations_reset = 0;
+#endif
+}
+
+static inline void make_all_pid_fds_negative(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+ while(pfd < pfdend) {
+ pfd->fd = -(pfd->fd);
+ pfd++;
+ }
+}
+
+static inline void cleanup_negative_pid_fds(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+
+ while(pfd < pfdend) {
+ int fd = pfd->fd;
+
+ if(unlikely(fd < 0)) {
+ file_descriptor_not_used(-(fd));
+ clear_pid_fd(pfd);
+ }
+
+ pfd++;
+ }
+}
+
+static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size) {
+ struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size];
+ size_t i = first;
+
+ while(pfd < pfdend) {
+#ifndef __FreeBSD__
+ pfd->filename = NULL;
+#endif
+ clear_pid_fd(pfd);
+ pfd++;
+ i++;
+ }
+}
+
+static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+#ifdef __FreeBSD__
+ int mib[4];
+ size_t size;
+ struct kinfo_file *fds;
+ static char *fdsbuf;
+ char *bfdsbuf, *efdsbuf;
+ char fdsname[FILENAME_MAX + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_FILEDESC;
+ mib[3] = p->pid;
+
+ if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) {
+ error("sysctl error: Can't get file descriptors data size for pid %d", p->pid);
+ return 0;
+ }
+ if (likely(size > 0))
+ fdsbuf = reallocz(fdsbuf, size);
+ if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) {
+ error("sysctl error: Can't get file descriptors data for pid %d", p->pid);
+ return 0;
+ }
+
+ bfdsbuf = fdsbuf;
+ efdsbuf = fdsbuf + size;
+ while (bfdsbuf < efdsbuf) {
+ fds = (struct kinfo_file *)(uintptr_t)bfdsbuf;
+ if (unlikely(fds->kf_structsize == 0))
+ break;
+
+ // do not process file descriptors for current working directory, root directory,
+ // jail directory, ktrace vnode, text vnode and controlling terminal
+ if (unlikely(fds->kf_fd < 0)) {
+ bfdsbuf += fds->kf_structsize;
+ continue;
+ }
+
+ // get file descriptors array index
+ int fdid = fds->kf_fd;
+
+ // check if the fds array is small
+ if (unlikely(fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS);
+
+ p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
+ p->fds_size = fdid + MAX_SPARE_FDS;
+ }
+
+ if (unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ switch (fds->kf_type) {
+ case KF_TYPE_FIFO:
+ case KF_TYPE_VNODE:
+ if (unlikely(!fds->kf_path[0])) {
+ sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid);
+ break;
+ }
+ sprintf(fdsname, "%s", fds->kf_path);
+ break;
+ case KF_TYPE_SOCKET:
+ switch (fds->kf_sock_domain) {
+ case AF_INET:
+ case AF_INET6:
+ if (fds->kf_sock_protocol == IPPROTO_TCP)
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb);
+ else
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb);
+ break;
+ case AF_UNIX:
+ /* print address of pcb and connected pcb */
+ sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn);
+ break;
+ default:
+ /* print protocol number and socket address */
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2);
+#else
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2);
+#endif
+ }
+ break;
+ case KF_TYPE_PIPE:
+ sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer);
+ break;
+ case KF_TYPE_PTS:
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev);
+#else
+ sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev);
+#endif
+ break;
+ case KF_TYPE_SHM:
+ sprintf(fdsname, "other: shm: %s size: %lu", fds->kf_path, fds->kf_un.kf_file.kf_file_size);
+ break;
+ case KF_TYPE_SEM:
+ sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value);
+ break;
+ default:
+ sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd);
+ }
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0);
+ }
+
+ // else make it positive again, we need it
+ // of course, the actual file may have changed
+
+ else
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+
+ bfdsbuf += fds->kf_structsize;
+ }
+#else
+ if(unlikely(!p->fds_dirname)) {
+ char dirname[FILENAME_MAX+1];
+ snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid);
+ p->fds_dirname = strdupz(dirname);
+ }
+
+ DIR *fds = opendir(p->fds_dirname);
+ if(unlikely(!fds)) return 0;
+
+ struct dirent *de;
+ char linkname[FILENAME_MAX + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ while((de = readdir(fds))) {
+ // we need only files with numeric names
+
+ if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ // get its number
+ int fdid = (int) str2l(de->d_name);
+ if(unlikely(fdid < 0)) continue;
+
+ // check if the fds array is small
+ if(unlikely((size_t)fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ debug_log("extending fd memory slots for %s from %d to %d"
+ , p->comm
+ , p->fds_size
+ , fdid + MAX_SPARE_FDS
+ );
+
+ p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
+ p->fds_size = (size_t)fdid + MAX_SPARE_FDS;
+ }
+
+ if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) {
+ // inodes do not match, clear the previous entry
+ inodes_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) {
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ p->fds[fdid].cache_iterations_counter--;
+ continue;
+ }
+
+ if(unlikely(!p->fds[fdid].filename)) {
+ filenames_allocated_counter++;
+ char fdname[FILENAME_MAX + 1];
+ snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name);
+ p->fds[fdid].filename = strdupz(fdname);
+ }
+
+ file_counter++;
+ ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX);
+ if(unlikely(l == -1)) {
+ // cannot read the link
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ error("Cannot read link %s", p->fds[fdid].filename);
+
+ if(unlikely(p->fds[fdid].fd < 0)) {
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ continue;
+ }
+ else
+ linkname[l] = '\0';
+
+ uint32_t link_hash = simple_hash(linkname);
+
+ if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) {
+ // the link changed
+ links_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash);
+ p->fds[fdid].inode = de->d_ino;
+ p->fds[fdid].link_hash = link_hash;
+ }
+ else {
+ // else make it positive again, we need it
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ }
+
+ // caching control
+ // without this we read all the files on every iteration
+ if(max_fds_cache_seconds > 0) {
+ size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds;
+
+ // cache it for a few iterations
+ size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every;
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread))
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) ||
+ p->fds[fdid].cache_iterations_reset > max)) {
+ // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max
+ p->fds[fdid].cache_iterations_reset = max;
+ }
+
+ p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset;
+ }
+ }
+
+ closedir(fds);
+#endif
+ cleanup_negative_pid_fds(p);
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) {
+ char *prefix = "\\_ ";
+ int indent = 0;
+
+ if(p->parent)
+ indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec);
+ else
+ prefix = " > ";
+
+ char buffer[indent + 1];
+ int i;
+
+ for(i = 0; i < indent ;i++) buffer[i] = ' ';
+ buffer[i] = '\0';
+
+ fprintf(stderr, " %s %s%s (%d %s %llu"
+ , buffer
+ , prefix
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , p->stat_collected_usec - time
+ );
+
+ if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime);
+ if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime);
+ if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime);
+ if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime);
+ if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime);
+ if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime);
+ if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt);
+ if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt);
+ if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt);
+ if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt);
+ fprintf(stderr, ")\n");
+
+ return indent + 1;
+}
+
+static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) {
+ debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited");
+ debug_print_process_and_parents(p, p->stat_collected_usec);
+}
+
+static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) {
+ int found = 0;
+ struct pid_stat *p = NULL;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(p == pe) continue;
+
+ switch(type) {
+ case 1:
+ if(p->cminflt > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 2:
+ if(p->cmajflt > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 3:
+ if(p->cutime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 4:
+ if(p->cstime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 5:
+ if(p->cgtime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+ }
+ }
+
+ if(!found) {
+ switch(type) {
+ case 1:
+ fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 2:
+ fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 3:
+ fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 4:
+ fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 5:
+ fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+ }
+ }
+}
+
+static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) {
+ kernel_uint_t absorbed = 0;
+
+ if(*field > *pfield) {
+ absorbed += *pfield;
+ *field -= *pfield;
+ *pfield = 0;
+ }
+ else {
+ absorbed += *field;
+ *pfield -= *field;
+ *field = 0;
+ }
+
+ return absorbed;
+}
+
+static inline void process_exited_processes() {
+ struct pid_stat *p;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(p->updated || !p->stat_collected_usec)
+ continue;
+
+ kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+
+ if(utime + stime + gtime + minflt + majflt == 0)
+ continue;
+
+ if(unlikely(debug_enabled)) {
+ debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , utime
+ , stime
+ , gtime
+ , minflt
+ , majflt
+ );
+ debug_print_process_tree(p, "Searching parents");
+ }
+
+ struct pid_stat *pp;
+ for(pp = p->parent; pp ; pp = pp->parent) {
+ if(!pp->updated) continue;
+
+ kernel_uint_t absorbed;
+ absorbed = remove_exited_child_from_parent(&utime, &pp->cutime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime);
+
+ absorbed = remove_exited_child_from_parent(&stime, &pp->cstime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime);
+
+ absorbed = remove_exited_child_from_parent(&gtime, &pp->cgtime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime);
+
+ absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt);
+
+ absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt);
+ }
+
+ if(unlikely(utime + stime + gtime + minflt + majflt > 0)) {
+ if(unlikely(debug_enabled)) {
+ if(utime) debug_find_lost_child(p, utime, 3);
+ if(stime) debug_find_lost_child(p, stime, 4);
+ if(gtime) debug_find_lost_child(p, gtime, 5);
+ if(minflt) debug_find_lost_child(p, minflt, 1);
+ if(majflt) debug_find_lost_child(p, majflt, 2);
+ }
+
+ p->keep = 1;
+
+ debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , utime
+ , stime
+ , gtime
+ , minflt
+ , majflt
+ );
+
+ for(pp = p->parent; pp ; pp = pp->parent) {
+ if(pp->updated) break;
+ pp->keep = 1;
+
+ debug_log(" > - KEEP - parent for another loop: %s (%d %s)"
+ , pp->comm
+ , pp->pid
+ , pp->updated?"running":"exited"
+ );
+ }
+
+ p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0;
+
+ debug_log(" ");
+ }
+ else
+ debug_log(" > totally absorbed - DONE - %s (%d %s)"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ );
+ }
+}
+
+static inline void link_all_processes_to_their_parents(void) {
+ struct pid_stat *p, *pp;
+
+ // link all children to their parents
+ // and update children count on parents
+ for(p = root_of_pids; p ; p = p->next) {
+ // for each process found
+
+ p->sortlist = 0;
+ p->parent = NULL;
+
+ if(unlikely(!p->ppid)) {
+ //unnecessary code from apps_plugin.c
+ //p->parent = NULL;
+ continue;
+ }
+
+ pp = all_pids[p->ppid];
+ if(likely(pp)) {
+ p->parent = pp;
+ pp->children_count++;
+
+ if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt);
+ }
+ else {
+ p->parent = NULL;
+ error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+// 1. read all files in /proc
+// 2. for each numeric directory:
+// i. read /proc/pid/stat
+// ii. read /proc/pid/status
+// iii. read /proc/pid/io (requires root access)
+// iii. read the entries in directory /proc/pid/fd (requires root access)
+// for each entry:
+// a. find or create a struct file_descriptor
+// b. cleanup any old/unused file_descriptors
+
+// after all these, some pids may be linked to targets, while others may not
+
+// in case of errors, only 1 every 1000 errors is printed
+// to avoid filling up all disk space
+// if debug is enabled, all errors are printed
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static int compar_pid(const void *pid1, const void *pid2) {
+
+ struct pid_stat *p1 = all_pids[*((pid_t *)pid1)];
+ struct pid_stat *p2 = all_pids[*((pid_t *)pid2)];
+
+ if(p1->sortlist > p2->sortlist)
+ return -1;
+ else
+ return 1;
+}
+#endif
+
+static inline int collect_data_for_pid(pid_t pid, void *ptr) {
+ if(unlikely(pid < 0 || pid > pid_max)) {
+ error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
+ return 0;
+ }
+
+ struct pid_stat *p = get_pid_entry(pid);
+ if(unlikely(!p || p->read)) return 0;
+ p->read = 1;
+
+ // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist);
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/stat
+
+ if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // check its parent pid
+ if(unlikely(p->ppid < 0 || p->ppid > pid_max)) {
+ error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
+ p->ppid = 0;
+ }
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/io
+
+ managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr));
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/status
+
+ if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/fd
+
+ if(enable_file_charts)
+ managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr));
+
+ // --------------------------------------------------------------------
+ // done!
+
+ if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read))
+ debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist);
+
+ // mark it as updated
+ p->updated = 1;
+ p->keep = 0;
+ p->keeploops = 0;
+
+ return 1;
+}
+
+static int collect_data_for_all_processes(void) {
+ struct pid_stat *p = NULL;
+
+#ifdef __FreeBSD__
+ int i, procnum;
+
+ static size_t procbase_size = 0;
+ static struct kinfo_proc *procbase = NULL;
+
+ size_t new_procbase_size;
+
+ int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
+ if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
+ error("sysctl error: Can't get processes data size");
+ return 0;
+ }
+
+ // give it some air for processes that may be started
+ // during this little time.
+ new_procbase_size += 100 * sizeof(struct kinfo_proc);
+
+ // increase the buffer if needed
+ if(new_procbase_size > procbase_size) {
+ procbase_size = new_procbase_size;
+ procbase = reallocz(procbase, procbase_size);
+ }
+
+ // sysctl() gets from new_procbase_size the buffer size
+ // and also returns to it the amount of data filled in
+ new_procbase_size = procbase_size;
+
+ // get the processes from the system
+ if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) {
+ error("sysctl error: Can't get processes data");
+ return 0;
+ }
+
+ // based on the amount of data filled in
+ // calculate the number of processes we got
+ procnum = new_procbase_size / sizeof(struct kinfo_proc);
+
+#endif
+
+ if(all_pids_count) {
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ size_t slc = 0;
+#endif
+ for(p = root_of_pids; p ; p = p->next) {
+ p->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
+ p->updated = 0;
+ p->merged = 0;
+ p->children_count = 0;
+ p->parent = NULL;
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ all_pids_sortlist[slc++] = p->pid;
+#endif
+ }
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ if(unlikely(slc != all_pids_count)) {
+ error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc);
+ all_pids_count = slc;
+ }
+
+ if(include_exited_childs) {
+ // Read parents before childs
+ // This is needed to prevent a situation where
+ // a child is found running, but until we read
+ // its parent, it has exited and its parent
+ // has accumulated its resources.
+
+ qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid);
+
+ // we forward read all running processes
+ // collect_data_for_pid() is smart enough,
+ // not to read the same pid twice per iteration
+ for(slc = 0; slc < all_pids_count; slc++)
+ collect_data_for_pid(all_pids_sortlist[slc], NULL);
+ }
+#endif
+ }
+
+#ifdef __FreeBSD__
+ for (i = 0 ; i < procnum ; ++i) {
+ pid_t pid = procbase[i].ki_pid;
+ collect_data_for_pid(pid, &procbase[i]);
+ }
+#else
+ static char uptime_filename[FILENAME_MAX + 1] = "";
+ if(*uptime_filename == '\0')
+ snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix);
+
+ global_uptime = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC);
+
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ DIR *dir = opendir(dirname);
+ if(!dir) return 0;
+
+ struct dirent *de = NULL;
+
+ while((de = readdir(dir))) {
+ char *endptr = de->d_name;
+
+ if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10);
+
+ // make sure we read a valid number
+ if(unlikely(endptr == de->d_name || *endptr != '\0'))
+ continue;
+
+ collect_data_for_pid(pid, NULL);
+ }
+ closedir(dir);
+#endif
+
+ if(!all_pids_count)
+ return 0;
+
+ // we need /proc/stat to normalize the cpu consumption of the exited childs
+ read_global_time();
+
+ // build the process tree
+ link_all_processes_to_their_parents();
+
+ // normally this is done
+ // however we may have processes exited while we collected values
+ // so let's find the exited ones
+ // we do this by collecting the ownership of process
+ // if we manage to get the ownership, the process still runs
+ process_exited_processes();
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// update statistics on the targets
+
+// 1. link all childs to their parents
+// 2. go from bottom to top, marking as merged all childs to their parents
+// this step links all parents without a target to the child target, if any
+// 3. link all top level processes (the ones not merged) to the default target
+// 4. go from top to bottom, linking all childs without a target, to their parent target
+// after this step, all processes have a target
+// [5. for each killed pid (updated = 0), remove its usage from its target]
+// 6. zero all apps_groups_targets
+// 7. concentrate all values on the apps_groups_targets
+// 8. remove all killed processes
+// 9. find the unique file count for each target
+// check: update_apps_groups_statistics()
+
+static void cleanup_exited_pids(void) {
+ size_t c;
+ struct pid_stat *p = NULL;
+
+ for(p = root_of_pids; p ;) {
+ if(!p->updated && (!p->keep || p->keeploops > 0)) {
+ if(unlikely(debug_enabled && (p->keep || p->keeploops)))
+ debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
+
+ for(c = 0; c < p->fds_size; c++)
+ if(p->fds[c].fd > 0) {
+ file_descriptor_not_used(p->fds[c].fd);
+ clear_pid_fd(&p->fds[c]);
+ }
+
+ pid_t r = p->pid;
+ p = p->next;
+ del_pid_entry(r);
+ }
+ else {
+ if(unlikely(p->keep)) p->keeploops++;
+ p->keep = 0;
+ p = p->next;
+ }
+ }
+}
+
+static void apply_apps_groups_targets_inheritance(void) {
+ struct pid_stat *p = NULL;
+
+ // children that do not have a target
+ // inherit their target from their parent
+ int found = 1, loops = 0;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+ for(p = root_of_pids; p ; p = p->next) {
+ // if this process does not have a target
+ // and it has a parent
+ // and its parent has a target
+ // then, set the parent's target to this process
+ if(unlikely(!p->target && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ // find all the procs with 0 childs and merge them to their parents
+ // repeat, until nothing more can be done.
+ int sortlist = 1;
+ found = 1;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(unlikely(!p->sortlist && !p->children_count))
+ p->sortlist = sortlist++;
+
+ if(unlikely(
+ !p->children_count // if this process does not have any children
+ && !p->merged // and is not already merged
+ && p->parent // and has a parent
+ && p->parent->children_count // and its parent has children
+ // and the target of this process and its parent is the same,
+ // or the parent does not have a target
+ && (p->target == p->parent->target || !p->parent->target)
+ && p->ppid != INIT_PID // and its parent is not init
+ )) {
+ // mark it as merged
+ p->parent->children_count--;
+ p->merged = 1;
+
+ // the parent inherits the child's target, if it does not have a target itself
+ if(unlikely(p->target && !p->parent->target)) {
+ p->parent->target = p->target;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm);
+ }
+
+ found++;
+ }
+ }
+
+ debug_log("TARGET INHERITANCE: merged %d processes", found);
+ }
+
+ // init goes always to default target
+ if(all_pids[INIT_PID])
+ all_pids[INIT_PID]->target = apps_groups_default_target;
+
+ // pid 0 goes always to default target
+ if(all_pids[0])
+ all_pids[0]->target = apps_groups_default_target;
+
+ // give a default target on all top level processes
+ if(unlikely(debug_enabled)) loops++;
+ for(p = root_of_pids; p ; p = p->next) {
+ // if the process is not merged itself
+ // then is is a top level process
+ if(unlikely(!p->merged && !p->target))
+ p->target = apps_groups_default_target;
+
+ // make sure all processes have a sortlist
+ if(unlikely(!p->sortlist))
+ p->sortlist = sortlist++;
+ }
+
+ if(all_pids[1])
+ all_pids[1]->sortlist = sortlist++;
+
+ // give a target to all merged child processes
+ found = 1;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+ for(p = root_of_pids; p ; p = p->next) {
+ if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
+}
+
+static size_t zero_all_targets(struct target *root) {
+ struct target *w;
+ size_t count = 0;
+
+ for (w = root; w ; w = w->next) {
+ count++;
+
+ w->minflt = 0;
+ w->majflt = 0;
+ w->utime = 0;
+ w->stime = 0;
+ w->gtime = 0;
+ w->cminflt = 0;
+ w->cmajflt = 0;
+ w->cutime = 0;
+ w->cstime = 0;
+ w->cgtime = 0;
+ w->num_threads = 0;
+ // w->rss = 0;
+ w->processes = 0;
+
+ w->status_vmsize = 0;
+ w->status_vmrss = 0;
+ w->status_vmshared = 0;
+ w->status_rssfile = 0;
+ w->status_rssshmem = 0;
+ w->status_vmswap = 0;
+
+ w->io_logical_bytes_read = 0;
+ w->io_logical_bytes_written = 0;
+ // w->io_read_calls = 0;
+ // w->io_write_calls = 0;
+ w->io_storage_bytes_read = 0;
+ w->io_storage_bytes_written = 0;
+ // w->io_cancelled_write_bytes = 0;
+
+ // zero file counters
+ if(w->target_fds) {
+ memset(w->target_fds, 0, sizeof(int) * w->target_fds_size);
+ w->openfiles = 0;
+ w->openpipes = 0;
+ w->opensockets = 0;
+ w->openinotifies = 0;
+ w->openeventfds = 0;
+ w->opentimerfds = 0;
+ w->opensignalfds = 0;
+ w->openeventpolls = 0;
+ w->openother = 0;
+ }
+
+ w->collected_starttime = 0;
+ w->uptime_min = 0;
+ w->uptime_sum = 0;
+ w->uptime_max = 0;
+
+ if(unlikely(w->root_pid)) {
+ struct pid_on_target *pid_on_target_to_free, *pid_on_target = w->root_pid;
+
+ while(pid_on_target) {
+ pid_on_target_to_free = pid_on_target;
+ pid_on_target = pid_on_target->next;
+ free(pid_on_target_to_free);
+ }
+
+ w->root_pid = NULL;
+ }
+ }
+
+ return count;
+}
+
+static inline void reallocate_target_fds(struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) {
+ w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size);
+ memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size));
+ w->target_fds_size = all_files_size;
+ }
+}
+
+static inline void aggregate_fd_on_target(int fd, struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(w->target_fds[fd])) {
+ // it is already aggregated
+ // just increase its usage counter
+ w->target_fds[fd]++;
+ return;
+ }
+
+ // increase its usage counter
+ // so that we will not add it again
+ w->target_fds[fd]++;
+
+ switch(all_files[fd].type) {
+ case FILETYPE_FILE:
+ w->openfiles++;
+ break;
+
+ case FILETYPE_PIPE:
+ w->openpipes++;
+ break;
+
+ case FILETYPE_SOCKET:
+ w->opensockets++;
+ break;
+
+ case FILETYPE_INOTIFY:
+ w->openinotifies++;
+ break;
+
+ case FILETYPE_EVENTFD:
+ w->openeventfds++;
+ break;
+
+ case FILETYPE_TIMERFD:
+ w->opentimerfds++;
+ break;
+
+ case FILETYPE_SIGNALFD:
+ w->opensignalfds++;
+ break;
+
+ case FILETYPE_EVENTPOLL:
+ w->openeventpolls++;
+ break;
+
+ case FILETYPE_OTHER:
+ w->openother++;
+ break;
+ }
+}
+
+static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) {
+
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ struct target *w = p->target, *u = p->user_target, *g = p->group_target;
+
+ reallocate_target_fds(w);
+ reallocate_target_fds(u);
+ reallocate_target_fds(g);
+
+ long double currentfds = 0;
+ size_t c, size = p->fds_size;
+ struct pid_fd *fds = p->fds;
+ for(c = 0; c < size ;c++) {
+ int fd = fds[c].fd;
+
+ if(likely(fd <= 0 || fd >= all_files_size))
+ continue;
+
+ currentfds++;
+
+ aggregate_fd_on_target(fd, w);
+ aggregate_fd_on_target(fd, u);
+ aggregate_fd_on_target(fd, g);
+ }
+
+ if (currentfds >= currentmaxfds)
+ currentmaxfds = currentfds;
+}
+
+static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) {
+ (void)o;
+
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ if(unlikely(!w)) {
+ error("pid %d %s was left without a target!", p->pid, p->comm);
+ return;
+ }
+
+ w->cutime += p->cutime;
+ w->cstime += p->cstime;
+ w->cgtime += p->cgtime;
+ w->cminflt += p->cminflt;
+ w->cmajflt += p->cmajflt;
+
+ w->utime += p->utime;
+ w->stime += p->stime;
+ w->gtime += p->gtime;
+ w->minflt += p->minflt;
+ w->majflt += p->majflt;
+
+ // w->rss += p->rss;
+
+ w->status_vmsize += p->status_vmsize;
+ w->status_vmrss += p->status_vmrss;
+ w->status_vmshared += p->status_vmshared;
+ w->status_rssfile += p->status_rssfile;
+ w->status_rssshmem += p->status_rssshmem;
+ w->status_vmswap += p->status_vmswap;
+
+ w->io_logical_bytes_read += p->io_logical_bytes_read;
+ w->io_logical_bytes_written += p->io_logical_bytes_written;
+ // w->io_read_calls += p->io_read_calls;
+ // w->io_write_calls += p->io_write_calls;
+ w->io_storage_bytes_read += p->io_storage_bytes_read;
+ w->io_storage_bytes_written += p->io_storage_bytes_written;
+ // w->io_cancelled_write_bytes += p->io_cancelled_write_bytes;
+
+ w->processes++;
+ w->num_threads += p->num_threads;
+
+ if(!w->collected_starttime || p->collected_starttime < w->collected_starttime) w->collected_starttime = p->collected_starttime;
+ if(!w->uptime_min || p->uptime < w->uptime_min) w->uptime_min = p->uptime;
+ w->uptime_sum += p->uptime;
+ if(!w->uptime_max || w->uptime_max < p->uptime) w->uptime_max = p->uptime;
+
+ if(unlikely(debug_enabled || w->debug_enabled)) {
+ debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt);
+
+ struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
+ pid_on_target->pid = p->pid;
+ pid_on_target->next = w->root_pid;
+ w->root_pid = pid_on_target;
+ }
+}
+
+static inline void post_aggregate_targets(struct target *root) {
+ struct target *w;
+ for (w = root; w ; w = w->next) {
+ if(w->collected_starttime) {
+ if (!w->starttime || w->collected_starttime < w->starttime) {
+ w->starttime = w->collected_starttime;
+ }
+ } else {
+ w->starttime = 0;
+ }
+ }
+}
+
+static void calculate_netdata_statistics(void) {
+
+ apply_apps_groups_targets_inheritance();
+
+ zero_all_targets(users_root_target);
+ zero_all_targets(groups_root_target);
+ apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
+
+ // this has to be done, before the cleanup
+ struct pid_stat *p = NULL;
+ struct target *w = NULL, *o = NULL;
+
+ // concentrate everything on the targets
+ for(p = root_of_pids; p ; p = p->next) {
+
+ // --------------------------------------------------------------------
+ // apps_groups target
+
+ aggregate_pid_on_target(p->target, p, NULL);
+
+
+ // --------------------------------------------------------------------
+ // user target
+
+ o = p->user_target;
+ if(likely(p->user_target && p->user_target->uid == p->uid))
+ w = p->user_target;
+ else {
+ if(unlikely(debug_enabled && p->user_target))
+ debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid);
+
+ w = p->user_target = get_users_target(p->uid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+
+
+ // --------------------------------------------------------------------
+ // user group target
+
+ o = p->group_target;
+ if(likely(p->group_target && p->group_target->gid == p->gid))
+ w = p->group_target;
+ else {
+ if(unlikely(debug_enabled && p->group_target))
+ debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid);
+
+ w = p->group_target = get_groups_target(p->gid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+
+
+ // --------------------------------------------------------------------
+ // aggregate all file descriptors
+
+ if(enable_file_charts)
+ aggregate_pid_fds_on_targets(p);
+ }
+
+ post_aggregate_targets(apps_groups_root_target);
+ post_aggregate_targets(users_root_target);
+ post_aggregate_targets(groups_root_target);
+
+ cleanup_exited_pids();
+}
+
+// ----------------------------------------------------------------------------
+// update chart dimensions
+
+static inline void send_BEGIN(const char *type, const char *id, usec_t usec) {
+ fprintf(stdout, "BEGIN %s.%s %llu\n", type, id, usec);
+}
+
+static inline void send_SET(const char *name, kernel_uint_t value) {
+ fprintf(stdout, "SET %s = " KERNEL_UINT_FORMAT "\n", name, value);
+}
+
+static inline void send_END(void) {
+ fprintf(stdout, "END\n");
+}
+
+void send_resource_usage_to_netdata(usec_t dt) {
+ static struct timeval last = { 0, 0 };
+ static struct rusage me_last;
+
+ struct timeval now;
+ struct rusage me;
+
+ usec_t cpuuser;
+ usec_t cpusyst;
+
+ if(!last.tv_sec) {
+ now_monotonic_timeval(&last);
+ getrusage(RUSAGE_SELF, &me_last);
+
+ cpuuser = 0;
+ cpusyst = 0;
+ }
+ else {
+ now_monotonic_timeval(&now);
+ getrusage(RUSAGE_SELF, &me);
+
+ cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec;
+ cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec;
+
+ memmove(&last, &now, sizeof(struct timeval));
+ memmove(&me_last, &me, sizeof(struct rusage));
+ }
+
+ static char created_charts = 0;
+ if(unlikely(!created_charts)) {
+ created_charts = 1;
+
+ fprintf(stdout,
+ "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n"
+ "DIMENSION user '' incremental 1 1000\n"
+ "DIMENSION system '' incremental 1 1000\n"
+ "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n"
+ "DIMENSION calls '' incremental 1 1\n"
+ "DIMENSION files '' incremental 1 1\n"
+ "DIMENSION filenames '' incremental 1 1\n"
+ "DIMENSION inode_changes '' incremental 1 1\n"
+ "DIMENSION link_changes '' incremental 1 1\n"
+ "DIMENSION pids '' absolute 1 1\n"
+ "DIMENSION fds '' absolute 1 1\n"
+ "DIMENSION targets '' absolute 1 1\n"
+ "DIMENSION new_pids 'new pids' incremental 1 1\n"
+ , update_every
+ );
+
+ fprintf(stdout,
+ "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
+ "DIMENSION utime '' absolute 1 %2$llu\n"
+ "DIMENSION stime '' absolute 1 %2$llu\n"
+ "DIMENSION gtime '' absolute 1 %2$llu\n"
+ "DIMENSION minflt '' absolute 1 %2$llu\n"
+ "DIMENSION majflt '' absolute 1 %2$llu\n"
+ , update_every
+ , RATES_DETAIL
+ );
+
+ if(include_exited_childs)
+ fprintf(stdout,
+ "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n"
+ "DIMENSION cutime '' absolute 1 %2$llu\n"
+ "DIMENSION cstime '' absolute 1 %2$llu\n"
+ "DIMENSION cgtime '' absolute 1 %2$llu\n"
+ "DIMENSION cminflt '' absolute 1 %2$llu\n"
+ "DIMENSION cmajflt '' absolute 1 %2$llu\n"
+ , update_every
+ , RATES_DETAIL
+ );
+
+ }
+
+ fprintf(stdout,
+ "BEGIN netdata.apps_cpu %llu\n"
+ "SET user = %llu\n"
+ "SET system = %llu\n"
+ "END\n"
+ "BEGIN netdata.apps_sizes %llu\n"
+ "SET calls = %zu\n"
+ "SET files = %zu\n"
+ "SET filenames = %zu\n"
+ "SET inode_changes = %zu\n"
+ "SET link_changes = %zu\n"
+ "SET pids = %zu\n"
+ "SET fds = %d\n"
+ "SET targets = %zu\n"
+ "SET new_pids = %zu\n"
+ "END\n"
+ , dt
+ , cpuuser
+ , cpusyst
+ , dt
+ , calls_counter
+ , file_counter
+ , filenames_allocated_counter
+ , inodes_changed_counter
+ , links_changed_counter
+ , all_pids_count
+ , all_files_len
+ , apps_groups_targets_count
+ , targets_assignment_counter
+ );
+
+ fprintf(stdout,
+ "BEGIN netdata.apps_fix %llu\n"
+ "SET utime = %u\n"
+ "SET stime = %u\n"
+ "SET gtime = %u\n"
+ "SET minflt = %u\n"
+ "SET majflt = %u\n"
+ "END\n"
+ , dt
+ , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL)
+ );
+
+ if(include_exited_childs)
+ fprintf(stdout,
+ "BEGIN netdata.apps_children_fix %llu\n"
+ "SET cutime = %u\n"
+ "SET cstime = %u\n"
+ "SET cgtime = %u\n"
+ "SET cminflt = %u\n"
+ "SET cmajflt = %u\n"
+ "END\n"
+ , dt
+ , (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL)
+ );
+}
+
+static void normalize_utilization(struct target *root) {
+ struct target *w;
+
+ // childs processing introduces spikes
+ // here we try to eliminate them by disabling childs processing either for specific dimensions
+ // or entirely. Of course, either way, we disable it just a single iteration.
+
+ kernel_uint_t max_time = processors * time_factor * RATES_DETAIL;
+ kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0;
+
+ if(global_utime > max_time) global_utime = max_time;
+ if(global_stime > max_time) global_stime = max_time;
+ if(global_gtime > max_time) global_gtime = max_time;
+
+ for(w = root; w ; w = w->next) {
+ if(w->target || (!w->processes && !w->exposed)) continue;
+
+ utime += w->utime;
+ stime += w->stime;
+ gtime += w->gtime;
+ cutime += w->cutime;
+ cstime += w->cstime;
+ cgtime += w->cgtime;
+
+ minflt += w->minflt;
+ majflt += w->majflt;
+ cminflt += w->cminflt;
+ cmajflt += w->cmajflt;
+ }
+
+ if(global_utime || global_stime || global_gtime) {
+ if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) {
+ // everything we collected fits
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime);
+ }
+ else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) {
+ // childrens resources are too high
+ // lower only the children resources
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio = 1.0;
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime);
+ }
+ else if(utime || stime) {
+ // even running processes are unrealistic
+ // zero the children resources
+ // lower the running processes resources
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio = (double)(global_utime + global_stime) / (double)(utime + stime);
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
+ else {
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
+ }
+ else {
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
+
+ if(utime_fix_ratio > 1.0) utime_fix_ratio = 1.0;
+ if(cutime_fix_ratio > 1.0) cutime_fix_ratio = 1.0;
+ if(stime_fix_ratio > 1.0) stime_fix_ratio = 1.0;
+ if(cstime_fix_ratio > 1.0) cstime_fix_ratio = 1.0;
+ if(gtime_fix_ratio > 1.0) gtime_fix_ratio = 1.0;
+ if(cgtime_fix_ratio > 1.0) cgtime_fix_ratio = 1.0;
+
+ // if(utime_fix_ratio < 0.0) utime_fix_ratio = 0.0;
+ // if(cutime_fix_ratio < 0.0) cutime_fix_ratio = 0.0;
+ // if(stime_fix_ratio < 0.0) stime_fix_ratio = 0.0;
+ // if(cstime_fix_ratio < 0.0) cstime_fix_ratio = 0.0;
+ // if(gtime_fix_ratio < 0.0) gtime_fix_ratio = 0.0;
+ // if(cgtime_fix_ratio < 0.0) cgtime_fix_ratio = 0.0;
+
+ // TODO
+ // we use cpu time to normalize page faults
+ // the problem is that to find the proper max values
+ // for page faults we have to parse /proc/vmstat
+ // which is quite big to do it again (netdata does it already)
+ //
+ // a better solution could be to somehow have netdata
+ // do this normalization for us
+
+ if(utime || stime || gtime)
+ majflt_fix_ratio =
+ minflt_fix_ratio = (double)(utime * utime_fix_ratio + stime * stime_fix_ratio + gtime * gtime_fix_ratio) / (double)(utime + stime + gtime);
+ else
+ minflt_fix_ratio =
+ majflt_fix_ratio = 1.0;
+
+ if(cutime || cstime || cgtime)
+ cmajflt_fix_ratio =
+ cminflt_fix_ratio = (double)(cutime * cutime_fix_ratio + cstime * cstime_fix_ratio + cgtime * cgtime_fix_ratio) / (double)(cutime + cstime + cgtime);
+ else
+ cminflt_fix_ratio =
+ cmajflt_fix_ratio = 1.0;
+
+ // the report
+
+ debug_log(
+ "SYSTEM: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
+ "COLLECTED: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
+ "DELTA: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
+ "FIX: u=%0.2f s=%0.2f g=%0.2f cu=%0.2f cs=%0.2f cg=%0.2f "
+ "FINALLY: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
+ , global_utime
+ , global_stime
+ , global_gtime
+ , utime
+ , stime
+ , gtime
+ , cutime
+ , cstime
+ , cgtime
+ , utime + cutime - global_utime
+ , stime + cstime - global_stime
+ , gtime + cgtime - global_gtime
+ , utime_fix_ratio
+ , stime_fix_ratio
+ , gtime_fix_ratio
+ , cutime_fix_ratio
+ , cstime_fix_ratio
+ , cgtime_fix_ratio
+ , (kernel_uint_t)(utime * utime_fix_ratio)
+ , (kernel_uint_t)(stime * stime_fix_ratio)
+ , (kernel_uint_t)(gtime * gtime_fix_ratio)
+ , (kernel_uint_t)(cutime * cutime_fix_ratio)
+ , (kernel_uint_t)(cstime * cstime_fix_ratio)
+ , (kernel_uint_t)(cgtime * cgtime_fix_ratio)
+ );
+}
+
+static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) {
+ struct target *w;
+
+ send_BEGIN(type, "cpu", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "cpu_user", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "cpu_system", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ if(show_guest_time) {
+ send_BEGIN(type, "cpu_guest", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
+ }
+ send_END();
+ }
+
+ send_BEGIN(type, "threads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->num_threads);
+ }
+ send_END();
+
+ send_BEGIN(type, "processes", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->processes);
+ }
+ send_END();
+
+#ifndef __FreeBSD__
+ send_BEGIN(type, "uptime", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (global_uptime > w->starttime)?(global_uptime - w->starttime):0);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_min", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->uptime_min);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_avg", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->uptime_sum / w->processes);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_max", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->uptime_max);
+ }
+ send_END();
+#endif
+
+ send_BEGIN(type, "mem", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared):0ULL);
+ }
+ send_END();
+
+ send_BEGIN(type, "vmem", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->status_vmsize);
+ }
+ send_END();
+
+#ifndef __FreeBSD__
+ send_BEGIN(type, "swap", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->status_vmswap);
+ }
+ send_END();
+#endif
+
+ send_BEGIN(type, "minor_faults", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "major_faults", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL));
+ }
+ send_END();
+
+#ifndef __FreeBSD__
+ send_BEGIN(type, "lreads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->io_logical_bytes_read);
+ }
+ send_END();
+
+ send_BEGIN(type, "lwrites", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->io_logical_bytes_written);
+ }
+ send_END();
+#endif
+
+ send_BEGIN(type, "preads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->io_storage_bytes_read);
+ }
+ send_END();
+
+ send_BEGIN(type, "pwrites", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->io_storage_bytes_written);
+ }
+ send_END();
+
+ if(enable_file_charts) {
+ send_BEGIN(type, "files", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->openfiles);
+ }
+ if (!strcmp("apps", type)){
+ kernel_uint_t usedfdpercentage = (kernel_uint_t) ((currentmaxfds * 100) / sysconf(_SC_OPEN_MAX));
+ fprintf(stdout, "VARIABLE fdperc = " KERNEL_UINT_FORMAT "\n", usedfdpercentage);
+ }
+ send_END();
+
+ send_BEGIN(type, "sockets", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->opensockets);
+ }
+ send_END();
+
+ send_BEGIN(type, "pipes", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->openpipes);
+ }
+ send_END();
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// generate the charts
+
+static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *title)
+{
+ struct target *w;
+ int newly_added = 0;
+
+ for(w = root ; w ; w = w->next) {
+ if (w->target) continue;
+
+ if(unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+ struct pid_on_target *pid_on_target;
+
+ fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes, (w->processes == 1)?"":"es");
+
+ for(pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+ fprintf(stderr, " %d", pid_on_target->pid);
+ }
+
+ fputc('\n', stderr);
+ }
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ // nothing more to show
+ if(!newly_added && show_guest_time == show_guest_time_old) return;
+
+ // we have something new to show
+ // update the charts
+ fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (100%% = 1 core)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, time_factor * RATES_DETAIL / 100, w->hidden ? "hidden" : "");
+ }
+
+ fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MiB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+
+ fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MiB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+
+ fprintf(stdout, "CHART %s.threads '' '%s Threads' 'threads' processes %s.threads stacked 20006 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.processes '' '%s Processes' 'processes' processes %s.processes stacked 20007 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+#ifndef __FreeBSD__
+ fprintf(stdout, "CHART %s.uptime '' '%s Carried Over Uptime' 'seconds' processes %s.uptime line 20008 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_min '' '%s Minimum Uptime' 'seconds' processes %s.uptime_min line 20009 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_avg '' '%s Average Uptime' 'seconds' processes %s.uptime_avg line 20010 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_max '' '%s Maximum Uptime' 'seconds' processes %s.uptime_max line 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+#endif
+
+ fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (100%% = 1 core)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
+ }
+
+ fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (100%% = 1 core)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
+ }
+
+ if(show_guest_time) {
+ fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (100%% = 1 core)' 'percentage' cpu %s.cpu_system stacked 20022 %d\n", type, title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
+ }
+ }
+
+#ifndef __FreeBSD__
+ fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MiB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+#endif
+
+ fprintf(stdout, "CHART %s.major_faults '' '%s Major Page Faults (swap read)' 'page faults/s' swap %s.major_faults stacked 20012 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.minor_faults '' '%s Minor Page Faults' 'page faults/s' mem %s.minor_faults stacked 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+#ifdef __FreeBSD__
+ fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'blocks/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'blocks/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+#else
+ fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'KiB/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'KiB/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'KiB/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'KiB/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+#endif
+
+ if(enable_file_charts) {
+ fprintf(stdout, "CHART %s.files '' '%s Open Files' 'open files' disk %s.files stacked 20050 %d\n", type,
+ title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.sockets '' '%s Open Sockets' 'open sockets' net %s.sockets stacked 20051 %d\n",
+ type, title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.pipes '' '%s Pipes' 'open pipes' processes %s.pipes stacked 20053 %d\n", type,
+ title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// parse command line arguments
+
+int check_proc_1_io() {
+ int ret = 0;
+
+ procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) goto cleanup;
+
+ ff = procfile_readall(ff);
+ if(!ff) goto cleanup;
+
+ ret = 1;
+
+cleanup:
+ procfile_close(ff);
+ return ret;
+}
+
+static void parse_args(int argc, char **argv)
+{
+ int i, freq = 0;
+
+ for(i = 1; i < argc; i++) {
+ if(!freq) {
+ int n = (int)str2l(argv[i]);
+ if(n > 0) {
+ freq = n;
+ continue;
+ }
+ }
+
+ if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("apps.plugin %s\n", VERSION);
+ exit(0);
+ }
+
+ if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
+ if(!check_proc_1_io()) {
+ perror("Tried to read /proc/1/io and it failed");
+ exit(1);
+ }
+ printf("OK\n");
+ exit(0);
+ }
+
+ if(strcmp("debug", argv[i]) == 0) {
+ debug_enabled = 1;
+#ifndef NETDATA_INTERNAL_CHECKS
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ continue;
+ }
+
+#ifndef __FreeBSD__
+ if(strcmp("fds-cache-secs", argv[i]) == 0) {
+ if(argc <= i + 1) {
+ fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n");
+ exit(1);
+ }
+ i++;
+ max_fds_cache_seconds = str2i(argv[i]);
+ if(max_fds_cache_seconds < 0) max_fds_cache_seconds = 0;
+ continue;
+ }
+#endif
+
+ if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) {
+ include_exited_childs = 0;
+ continue;
+ }
+
+ if(strcmp("with-childs", argv[i]) == 0) {
+ include_exited_childs = 1;
+ continue;
+ }
+
+ if(strcmp("with-guest", argv[i]) == 0) {
+ enable_guest_charts = 1;
+ continue;
+ }
+
+ if(strcmp("no-guest", argv[i]) == 0 || strcmp("without-guest", argv[i]) == 0) {
+ enable_guest_charts = 0;
+ continue;
+ }
+
+ if(strcmp("with-files", argv[i]) == 0) {
+ enable_file_charts = 1;
+ continue;
+ }
+
+ if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) {
+ enable_file_charts = 0;
+ continue;
+ }
+
+ if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) {
+ enable_users_charts = 0;
+ continue;
+ }
+
+ if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) {
+ enable_groups_charts = 0;
+ continue;
+ }
+
+ if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata apps.plugin %s\n"
+ " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS set the data collection frequency\n"
+ "\n"
+ " debug enable debugging (lot of output)\n"
+ "\n"
+ " with-childs\n"
+ " without-childs enable / disable aggregating exited\n"
+ " children resources into parents\n"
+ " (default is enabled)\n"
+ "\n"
+ " with-guest\n"
+ " without-guest enable / disable reporting guest charts\n"
+ " (default is disabled)\n"
+ "\n"
+ " with-files\n"
+ " without-files enable / disable reporting files, sockets, pipes\n"
+ " (default is enabled)\n"
+ "\n"
+ " without-users disable reporting per user charts\n"
+ "\n"
+ " without-groups disable reporting per user group charts\n"
+ "\n"
+#ifndef __FreeBSD__
+ " fds-cache-secs N cache the files of processed for N seconds\n"
+ " caching is adaptive per file (when a file\n"
+ " is found, it starts at 0 and while the file\n"
+ " remains open, it is incremented up to the\n"
+ " max given)\n"
+ " (default is %d seconds)\n"
+ "\n"
+#endif
+ " version or -v or -V print program version and exit\n"
+ "\n"
+ , VERSION
+#ifndef __FreeBSD__
+ , max_fds_cache_seconds
+#endif
+ );
+ exit(1);
+ }
+
+ error("Cannot understand option %s", argv[i]);
+ exit(1);
+ }
+
+ if(freq > 0) update_every = freq;
+
+ if(read_apps_groups_conf(user_config_dir, "groups")) {
+ info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'", user_config_dir, stock_config_dir);
+
+ if(read_apps_groups_conf(stock_config_dir, "groups")) {
+ error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir);
+ exit(1);
+ }
+ else
+ info("Loaded config file '%s/apps_groups.conf'", stock_config_dir);
+ }
+ else
+ info("Loaded config file '%s/apps_groups.conf'", user_config_dir);
+}
+
+static int am_i_running_as_root() {
+ uid_t uid = getuid(), euid = geteuid();
+
+ if(uid == 0 || euid == 0) {
+ if(debug_enabled) info("I am running with escalated privileges, uid = %u, euid = %u.", uid, euid);
+ return 1;
+ }
+
+ if(debug_enabled) info("I am not running with escalated privileges, uid = %u, euid = %u.", uid, euid);
+ return 0;
+}
+
+#ifdef HAVE_CAPABILITY
+static int check_capabilities() {
+ cap_t caps = cap_get_proc();
+ if(!caps) {
+ error("Cannot get current capabilities.");
+ return 0;
+ }
+ else if(debug_enabled)
+ info("Received my capabilities from the system.");
+
+ int ret = 1;
+
+ cap_flag_value_t cfv = CAP_CLEAR;
+ if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) {
+ error("Cannot find if CAP_DAC_READ_SEARCH is effective.");
+ ret = 0;
+ }
+ else {
+ if(cfv != CAP_SET) {
+ error("apps.plugin should run with CAP_DAC_READ_SEARCH.");
+ ret = 0;
+ }
+ else if(debug_enabled)
+ info("apps.plugin runs with CAP_DAC_READ_SEARCH.");
+ }
+
+ cfv = CAP_CLEAR;
+ if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) {
+ error("Cannot find if CAP_SYS_PTRACE is effective.");
+ ret = 0;
+ }
+ else {
+ if(cfv != CAP_SET) {
+ error("apps.plugin should run with CAP_SYS_PTRACE.");
+ ret = 0;
+ }
+ else if(debug_enabled)
+ info("apps.plugin runs with CAP_SYS_PTRACE.");
+ }
+
+ cap_free(caps);
+
+ return ret;
+}
+#else
+static int check_capabilities() {
+ return 0;
+}
+#endif
+
+int main(int argc, char **argv) {
+ // debug_flags = D_PROCFILE;
+
+ pagesize = (size_t)sysconf(_SC_PAGESIZE);
+
+ // set the name for logging
+ program_name = "apps.plugin";
+
+ // disable syslog for apps.plugin
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ // since apps.plugin runs as root, prevent it from opening symbolic links
+ procfile_open_flags = O_RDONLY|O_NOFOLLOW;
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if(verify_netdata_host_prefix() == -1) exit(1);
+
+ user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if(user_config_dir == NULL) {
+ // info("NETDATA_CONFIG_DIR is not passed from netdata");
+ user_config_dir = CONFIG_DIR;
+ }
+ // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
+
+ stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if(stock_config_dir == NULL) {
+ // info("NETDATA_CONFIG_DIR is not passed from netdata");
+ stock_config_dir = LIBCONFIG_DIR;
+ }
+ // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(debug_flags != 0) {
+ struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
+ if(setrlimit(RLIMIT_CORE, &rl) != 0)
+ info("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+#ifdef HAVE_SYS_PRCTL_H
+ prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+#endif
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ procfile_adaptive_initial_allocation = 1;
+
+ get_system_HZ();
+#ifdef __FreeBSD__
+ time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
+#else
+ time_factor = system_hz; // Linux uses clock ticks
+#endif
+
+ get_system_pid_max();
+ get_system_cpus();
+
+ parse_args(argc, argv);
+
+ if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) {
+ uid_t uid = getuid(), euid = geteuid();
+#ifdef HAVE_CAPABILITY
+ error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0], argv[0]
+ );
+#else
+ error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "Your system does not support capabilities. "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0]
+ );
+#endif
+ }
+
+ info("started on pid %d", getpid());
+
+ snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix);
+ debug_log("passwd file: '%s'", all_user_ids.filename);
+
+ snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix);
+ debug_log("group file: '%s'", all_group_ids.filename);
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max);
+#endif
+
+ all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max);
+
+ usec_t step = update_every * USEC_PER_SEC;
+ global_iterations_counter = 1;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(;1; global_iterations_counter++) {
+
+#ifdef NETDATA_PROFILING
+#warning "compiling for profiling"
+ static int profiling_count=0;
+ profiling_count++;
+ if(unlikely(profiling_count > 2000)) exit(0);
+ usec_t dt = update_every * USEC_PER_SEC;
+#else
+ usec_t dt = heartbeat_next(&hb, step);
+#endif
+
+ struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR };
+ if (unlikely(poll(&pollfd, 1, 0) < 0))
+ fatal("Cannot check if a pipe is available");
+ if (unlikely(pollfd.revents & POLLERR))
+ fatal("Cannot write to a pipe");
+
+ if(!collect_data_for_all_processes()) {
+ error("Cannot collect /proc data for running processes. Disabling apps.plugin...");
+ printf("DISABLE\n");
+ exit(1);
+ }
+
+ currentmaxfds = 0;
+ calculate_netdata_statistics();
+ normalize_utilization(apps_groups_root_target);
+
+ send_resource_usage_to_netdata(dt);
+
+ // this is smart enough to show only newly added apps, when needed
+ send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps");
+
+ if(likely(enable_users_charts))
+ send_charts_updates_to_netdata(users_root_target, "users", "Users");
+
+ if(likely(enable_groups_charts))
+ send_charts_updates_to_netdata(groups_root_target, "groups", "User Groups");
+
+ send_collected_data_to_netdata(apps_groups_root_target, "apps", dt);
+
+ if(likely(enable_users_charts))
+ send_collected_data_to_netdata(users_root_target, "users", dt);
+
+ if(likely(enable_groups_charts))
+ send_collected_data_to_netdata(groups_root_target, "groups", dt);
+
+ fflush(stdout);
+
+ show_guest_time_old = show_guest_time;
+
+ debug_log("done Loop No %zu", global_iterations_counter);
+ }
+}
diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am
new file mode 100644
index 0000000..9e924ab
--- /dev/null
+++ b/collectors/cgroups.plugin/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ cgroup-name.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ README.md \
+ $(NULL)
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
new file mode 100644
index 0000000..21dbcae
--- /dev/null
+++ b/collectors/cgroups.plugin/README.md
@@ -0,0 +1,233 @@
+<!--
+title: "cgroups.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/README.md
+-->
+
+# cgroups.plugin
+
+You can monitor containers and virtual machines using **cgroups**.
+
+cgroups (or control groups), are a Linux kernel feature that provides accounting and resource usage limiting for processes. When cgroups are bundled with namespaces (i.e. isolation), they form what we usually call **containers**.
+
+cgroups are hierarchical, meaning that cgroups can contain child cgroups, which can contain more cgroups, etc. All accounting is reported (and resource usage limits are applied) also in a hierarchical way.
+
+To visualize cgroup metrics Netdata provides configuration for cherry picking the cgroups of interest. By default (without any configuration) Netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc) and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc).
+
+## configuring Netdata for cgroups
+
+For each cgroup available in the system, Netdata provides this configuration:
+
+```
+[plugin:cgroups]
+ enable cgroup XXX = yes | no
+```
+
+But it also provides a few patterns to provide a sane default (`yes` or `no`).
+
+Below we see, how this works.
+
+### how Netdata finds the available cgroups
+
+Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually) under `/sys/fs/cgroup`. Netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. Netdata also allows manual configuration of this mount point, using these settings:
+
+```
+[plugin:cgroups]
+ check for new cgroups every = 10
+ path to /sys/fs/cgroup/cpuacct = /sys/fs/cgroup/cpuacct
+ path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio
+ path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory
+ path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
+```
+
+Netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
+
+### hierarchical search for cgroups
+
+Since cgroups are hierarchical, for each of the directories shown above, Netdata walks through the subdirectories recursively searching for cgroups (each subdirectory is another cgroup).
+
+For each of the directories found, Netdata provides a configuration variable:
+
+```
+[plugin:cgroups]
+ search for cgroups under PATH = yes | no
+```
+
+To provide a sane default for this setting, Netdata uses the following pattern list (patterns starting with `!` give a negative match and their order is important: the first matching a path will be used):
+
+```
+[plugin:cgroups]
+ search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
+```
+
+So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by Netdata](#monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled.
+
+### unified cgroups (cgroups v2) support
+
+Basic unified cgroups metrics are supported. To use them instead of v1 cgroups add:
+
+```
+[plugin:cgroups]
+ use unified cgroups = yes
+ path to unified cgroups = /sys/fs/cgroup
+```
+
+Unified cgroups use same name pattern matching as v1 cgroups. `cgroup_enable_systemd_services_detailed_memory` is currently unsupported when using unified cgroups.
+
+### enabled cgroups
+
+To check if the cgroup is enabled, Netdata uses this setting:
+
+```
+[plugin:cgroups]
+ enable cgroup NAME = yes | no
+```
+
+To provide a sane default, Netdata uses the following pattern list (it checks the pattern against the path of the cgroup):
+
+```
+[plugin:cgroups]
+ enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user *
+```
+
+The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions.
+
+So, we need to somehow map the paths of the cgroups to names, to provide consistent Netdata configuration (i.e. there is no point to say `enable cgroup 1234 = yes | no`, if `1234` is a random number that changes over time - we need a name for the cgroup first, so that `enable cgroup NAME = yes | no` will be consistent).
+
+For this mapping Netdata provides 2 configuration options:
+
+```
+[plugin:cgroups]
+ run script to rename cgroups matching = *.scope *docker* *lxc* *qemu* !/ !*.mount !*.partition !*.service !*.slice !*.swap !*.user *
+ script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh
+```
+
+The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system.
+
+The above pattern list is matched against the path of the cgroup. For matched cgroups, Netdata calls the script [cgroup-name.sh](https://raw.githubusercontent.com/netdata/netdata/master/collectors/cgroups.plugin/cgroup-name.sh.in) to get its name. This script queries `docker`, `kubectl`, `podman`, or applies heuristics to find give a name for the cgroup.
+
+#### Note on Podman container names
+
+Podman's security model is a lot more restrictive than Docker's, so Netdata will not be able to detect container names out of the box unless they were started by the same user as Netdata itself.
+
+If Podman is used in "rootful" mode, it's also possible to use `podman system service` to grant Netdata access to container names. To do this, ensure `podman system service` is running and Netdata has access to `/run/podman/podman.sock` (the default permissions as specified by upstream are `0600`, with owner `root`, so you will have to adjust the configuration).
+
+[docker-socket-proxy](https://github.com/Tecnativa/docker-socket-proxy) can also be used to give Netdata restricted access to the socket. Note that `PODMAN_HOST` in Netdata's environment should be set to the proxy's URL in this case.
+
+### charts with zero metrics
+
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. For example:
+
+```
+[plugin:cgroups]
+ enable memory (used mem including cache) = yes
+```
+
+You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+### alarms
+
+CPU and memory limits are watched and used to rise alarms. Memory usage for every cgroup is checked against `ram` and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alarms is available in `health.d/cgroups.conf` file.
+
+## Monitoring systemd services
+
+Netdata monitors **systemd services**. Example:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21964372/20cd7b84-db53-11e6-98a2-b9c986b082c0.png)
+
+Support per distribution:
+
+|system|systemd services<br/>charts shown|`tree`<br/>`/sys/fs/cgroup`|comments|
+|:----:|:-------------------------------:|:-------------------------:|:-------|
+|Arch Linux|YES|||
+|Gentoo|NO||can be enabled, see below|
+|Ubuntu 16.04 LTS|YES|||
+|Ubuntu 16.10|YES|[here](http://pastebin.com/PiWbQEXy)||
+|Fedora 25|YES|[here](http://pastebin.com/ax0373wF)||
+|Debian 8|NO||can be enabled, see below|
+|AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system|
+|CentOS 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below|
+
+### how to enable cgroup accounting on systemd systems that is by default disabled
+
+You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for cgroup `/`, but all services will show nothing.
+
+To enable cgroup accounting, execute this:
+
+```sh
+sed -e 's|^#Default\(.*\)Accounting=.*$|Default\1Accounting=yes|g' /etc/systemd/system.conf >/tmp/system.conf
+```
+
+To see the changes it made, run this:
+
+```
+# diff /etc/systemd/system.conf /tmp/system.conf
+40,44c40,44
+< #DefaultCPUAccounting=no
+< #DefaultIOAccounting=no
+< #DefaultBlockIOAccounting=no
+< #DefaultMemoryAccounting=no
+< #DefaultTasksAccounting=yes
+---
+> DefaultCPUAccounting=yes
+> DefaultIOAccounting=yes
+> DefaultBlockIOAccounting=yes
+> DefaultMemoryAccounting=yes
+> DefaultTasksAccounting=yes
+```
+
+If you are happy with the changes, run:
+
+```sh
+# copy the file to the right location
+sudo cp /tmp/system.conf /etc/systemd/system.conf
+
+# restart systemd to take it into account
+sudo systemctl daemon-reexec
+```
+
+(`systemctl daemon-reload` does not reload the configuration of the server - so you have to execute `systemctl daemon-reexec`).
+
+Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart a service - any service - to wake it up). Refresh your Netdata dashboard, and you will have the charts too.
+
+In case memory accounting is missing, you will need to enable it at your kernel, by appending the following kernel boot options and rebooting:
+
+```
+cgroup_enable=memory swapaccount=1
+```
+
+You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`.
+
+Which systemd services are monitored by Netdata is determined by the following pattern list:
+
+```
+[plugin:cgroups]
+ cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
+```
+
+- - -
+
+## Monitoring ephemeral containers
+
+Netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that has access to the `/proc` and `/sys` filesystems of the host.
+
+Netdata prior to v1.6 had 2 issues when such containers were monitored:
+
+1. network interface alarms where triggering when containers were stopped
+
+2. charts were never cleaned up, so after some time dozens of containers were showing up on the dashboard, and they were occupying memory.
+
+### the current Netdata
+
+network interfaces and cgroups (containers) are now self-cleaned.
+
+So, when a network interface or container stops, Netdata might log a few errors in error.log complaining about files it cannot find, but immediately:
+
+1. it will detect this is a removed container or network interface
+2. it will freeze/pause all alarms for them
+3. it will mark their charts as obsolete
+4. obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
+5. existing dashboard sessions will continue to see them, but of course they will not refresh
+6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at `netdata.conf`).
+7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcgroups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
new file mode 100755
index 0000000..19fbf39
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -0,0 +1,470 @@
+#!/usr/bin/env bash
+#shellcheck disable=SC2001
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to find a better name for cgroups
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+function docker_like_get_name_command() {
+ local command="${1}"
+ local id="${2}"
+ info "Running command: ${command} ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$(${command} ps --filter=id="${id}" --format="{{.Names}}")"
+ return 0
+}
+
+function docker_like_get_name_api() {
+ local host_var="${1}"
+ local host="${!host_var}"
+ local path="/containers/${2}/json"
+ if [ -z "${host}" ]; then
+ warning "No ${host_var} is set"
+ return 1
+ fi
+ if ! command -v jq > /dev/null 2>&1; then
+ warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
+ return 1
+ fi
+ if [ -S "${host}" ]; then
+ info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
+ JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
+ else
+ info "Running API command: curl \"${host}${path}\""
+ JSON=$(curl -sS "${host}${path}")
+ fi
+ NAME=$(echo "${JSON}" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+# get_lbl_val returns the value for the label with the given name.
+# Returns "null" string if the label doesn't exist.
+# Expected labels format: 'name="value",...'.
+function get_lbl_val() {
+ local labels want_name
+ labels="${1}"
+ want_name="${2}"
+
+ IFS=, read -ra labels <<< "$labels"
+
+ local lname lval
+ for l in "${labels[@]}"; do
+ IFS="=" read -r lname lval <<< "$l"
+ if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
+ echo "${lval:1:-1}" # trim "
+ return 0
+ fi
+ done
+
+ echo "null"
+ return 1
+}
+
+function add_lbl_prefix() {
+ local orig_labels prefix
+ orig_labels="${1}"
+ prefix="${2}"
+
+ IFS=, read -ra labels <<< "$orig_labels"
+
+ local new_labels
+ for l in "${labels[@]}"; do
+ new_labels+="${prefix}${l},"
+ done
+
+ echo "${new_labels:0:-1}" # trim last ','
+}
+
+# k8s_get_kubepod_name resolves */kubepods/* cgroup name.
+# pod level cgroup name format: 'pod_<namespace>_<pod_name>'
+# container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
+function k8s_get_kubepod_name() {
+ # GKE /sys/fs/cgroup/*/ tree:
+ # |-- kubepods
+ # | |-- burstable
+ # | | |-- pod98cee708-023b-11eb-933d-42010a800193
+ # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
+ # | | `-- a5d223eec35e00f5a1c6fa3e3a5faac6148cdc1f03a2e762e873b7efede012d7
+ # | `-- pode314bbac-d577-11ea-a171-42010a80013b
+ # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
+ # | `-- 88ab4683b99cfa7cc8c5f503adf7987dd93a3faa7c4ce0d17d419962b3220d50
+ #
+ # Minikube (v1.8.2) /sys/fs/cgroup/*/ tree:
+ # |-- kubepods.slice
+ # | |-- kubepods-besteffort.slice
+ # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
+ # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
+ # | | | `-- docker-87e18c2323621cf0f635c53c798b926e33e9665c348c60d489eef31ee1bd38d7.scope
+ #
+ # NOTE: cgroups plugin uses '_' to join dir names, so it is <parent>_<child>_<child>_...
+
+ local fn="${FUNCNAME[0]}"
+ local id="${1}"
+
+ if [[ ! $id =~ ^kubepods ]]; then
+ warning "${fn}: '${id}' is not kubepod cgroup."
+ return 1
+ fi
+
+ local clean_id="$id"
+ clean_id=${clean_id//.slice/}
+ clean_id=${clean_id//.scope/}
+
+ local name pod_uid cntr_id
+ if [[ $clean_id == "kubepods" ]]; then
+ name="$clean_id"
+ elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
+ # kubepods_<QOS_CLASS>
+ # kubepods_kubepods-<QOS_CLASS>
+ name=${clean_id//-/_}
+ name=${name/#kubepods_kubepods/kubepods}
+ elif [[ $clean_id =~ .+pod[a-f0-9_-]+_docker-([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_docker-<CONTAINER_ID> (POD_UID w/ "_")
+ cntr_id=${BASH_REMATCH[1]}
+ elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_<CONTAINER_ID>
+ cntr_id=${BASH_REMATCH[1]}
+ elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
+ # ...pod<POD_UID> (POD_UID w/ and w/o "_")
+ pod_uid=${BASH_REMATCH[1]}
+ pod_uid=${pod_uid//_/-}
+ fi
+
+ if [ -n "$name" ]; then
+ echo "$name"
+ return 0
+ fi
+
+ if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
+ warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
+ return 1
+ fi
+
+ [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
+ [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
+
+ if ! command -v jq > /dev/null 2>&1; then
+ warning "${fn}: 'jq' command not available."
+ return 1
+ fi
+
+ local kube_system_ns
+ local tmp_kube_system_ns_file="${TMPDIR:-"/tmp/"}netdata-cgroups-kube-system-ns"
+ [ -f "$tmp_kube_system_ns_file" ] && kube_system_ns=$(cat "$tmp_kube_system_ns_file" 2> /dev/null)
+
+ local pods
+ if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
+ local token header host url
+ token="$(< /var/run/secrets/kubernetes.io/serviceaccount/token)"
+ header="Authorization: Bearer $token"
+ host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
+
+ if [ -z "$kube_system_ns" ]; then
+ url="https://$host/api/v1/namespaces/kube-system"
+ # FIX: check HTTP response code
+ if ! kube_system_ns=$(curl -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${kube_system_ns}."
+ else
+ echo "$kube_system_ns" > "$tmp_kube_system_ns_file" 2> /dev/null
+ fi
+ fi
+
+ url="https://$host/api/v1/pods"
+ [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
+ # FIX: check HTTP response code
+ if ! pods=$(curl -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${pods}."
+ return 1
+ fi
+ elif ps -C kubelet > /dev/null 2>&1 && command -v kubectl > /dev/null 2>&1; then
+ if [ -z "$kube_system_ns" ]; then
+ if ! kube_system_ns=$(kubectl get namespaces kube-system -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${kube_system_ns}."
+ else
+ echo "$kube_system_ns" > "$tmp_kube_system_ns_file" 2> /dev/null
+ fi
+ fi
+
+ [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
+ if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${pods}."
+ return 1
+ fi
+ else
+ warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
+ return 1
+ fi
+
+ local kube_system_uid
+ if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<< "$kube_system_ns" 2>&1); then
+ warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
+ fi
+
+ local jq_filter
+ jq_filter+='.items[] | "'
+ jq_filter+='namespace=\"\(.metadata.namespace)\",'
+ jq_filter+='pod_name=\"\(.metadata.name)\",'
+ jq_filter+='pod_uid=\"\(.metadata.uid)\",'
+ #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
+ jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
+ jq_filter+='node_name=\"\(.spec.nodeName)\",'
+ jq_filter+='" + '
+ jq_filter+='(.status.containerStatuses[]? | "'
+ jq_filter+='container_name=\"\(.name)\",'
+ jq_filter+='container_id=\"\(.containerID)\"'
+ jq_filter+='") | '
+ jq_filter+='sub("docker://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
+
+ local containers
+ if ! containers=$(jq -r "${jq_filter}" <<< "$pods" 2>&1); then
+ warning "${fn}: error on 'jq' parse pods: ${containers}."
+ return 1
+ fi
+
+ # available labels:
+ # namespace, pod_name, pod_uid, container_name, container_id, node_name
+ local labels
+ if [ -n "$cntr_id" ]; then
+ if labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
+ labels+=',kind="container"'
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ name="cntr"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ name+="_$(get_lbl_val "$labels" container_name)"
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ fi
+ elif [ -n "$pod_uid" ]; then
+ if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
+ labels="${labels%%,container_*}"
+ labels+=',kind="pod"'
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ name="pod"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ fi
+ fi
+
+ # jq filter nonexistent field and nonexistent label value is 'null'
+ if [[ $name =~ _null(_|$) ]]; then
+ warning "${fn}: invalid name: $name (cgroup '$id')"
+ name=""
+ fi
+
+ echo "$name"
+ [ -n "$name" ]
+ return
+}
+
+function k8s_get_name() {
+ local fn="${FUNCNAME[0]}"
+ local id="${1}"
+
+ NAME=$(k8s_get_kubepod_name "$id")
+
+ if [ -z "${NAME}" ]; then
+ warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${id} and disabling it."
+ NAME="${id}"
+ NAME_NOT_FOUND=3
+ else
+ NAME="k8s_${NAME}"
+
+ local name labels
+ name=${NAME%% *}
+ labels=${NAME#* }
+ if [ "$name" != "$labels" ]; then
+ info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
+ else
+ info "${fn}: cgroup '${id}' has chart name '${NAME}'"
+ fi
+ fi
+}
+
+function docker_get_name() {
+ local id="${1}"
+ if hash docker 2> /dev/null; then
+ docker_like_get_name_command docker "${id}"
+ else
+ docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
+ fi
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of docker container '${id}'"
+ NAME_NOT_FOUND=2
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+function docker_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
+ docker_get_name "${id}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+function podman_get_name() {
+ local id="${1}"
+
+ # for Podman, prefer using the API if we can, as netdata will not normally have access
+ # to other users' containers, so they will not be visible when running `podman ps`
+ docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
+
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of podman container '${id}'"
+ NAME_NOT_FOUND=2
+ NAME="${id:0:12}"
+ else
+ info "podman container '${id}' is named '${NAME}'"
+ fi
+}
+
+function podman_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
+ podman_get_name "${id}"
+ else
+ error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
+CGROUP="${1}"
+NAME_NOT_FOUND=0
+NAME=
+
+# -----------------------------------------------------------------------------
+
+if [ -z "${CGROUP}" ]; then
+ fatal "called without a cgroup name. Nothing to do."
+fi
+
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]; then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
+done
+
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
+ k8s_get_name "${CGROUP}"
+ fi
+fi
+
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # ECS
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
+ # Podman
+ PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
+ podman_validate_id "${PODMANID}"
+
+ elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
+ # systemd-nspawn
+ NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]; then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
+ NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
+ # LXC 4.0
+ NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+fi
+
+info "cgroup '${CGROUP}' is called '${NAME}'"
+echo "${NAME}"
+
+exit ${NAME_NOT_FOUND}
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
new file mode 100755
index 0000000..eb839ef
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh
@@ -0,0 +1,297 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117
+
+# cgroup-network-helper.sh
+# detect container and virtual machine interfaces
+#
+# (C) 2017 Costa Tsaousis
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
+# It tries to find all the network interfaces that belong to the same cgroup.
+#
+# It supports several method for this detection:
+#
+# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
+# by examining iflink and ifindex IDs and switching namespaces
+# (it also detects the interface name as it is used by the container).
+#
+# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
+#
+# 3. this script, calls virsh to find libvirt network interfaces.
+#
+
+# -----------------------------------------------------------------------------
+
+# the system path is cleared by cgroup-network
+# shellcheck source=/dev/null
+[ -f /etc/profile ] && source /etc/profile
+
+export LC_ALL=C
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=${NETDATA_CGROUP_NETWORK_HELPER_DEBUG=0}
+debug() {
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check for BASH v4+ (required for associative arrays)
+
+[ $(( BASH_VERSINFO[0] )) -lt 4 ] && \
+ fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
+
+# -----------------------------------------------------------------------------
+# parse the arguments
+
+pid=
+cgroup=
+while [ ! -z "${1}" ]
+do
+ case "${1}" in
+ --cgroup) cgroup="${2}"; shift 1;;
+ --pid|-p) pid="${2}"; shift 1;;
+ --debug|debug) debug=1;;
+ *) fatal "Cannot understand argument '${1}'";;
+ esac
+
+ shift
+done
+
+if [ -z "${pid}" ] && [ -z "${cgroup}" ]
+then
+ fatal "Either --pid or --cgroup is required"
+fi
+
+# -----------------------------------------------------------------------------
+
+set_source() {
+ [ ${debug} -eq 1 ] && echo "SRC ${*}"
+}
+
+
+# -----------------------------------------------------------------------------
+# veth interfaces via cgroup
+
+# cgroup-network can detect veth interfaces by itself (written in C).
+# If you seek for a shell version of what it does, check this:
+# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
+
+
+# -----------------------------------------------------------------------------
+# tun/tap interfaces via /proc/PID/fdinfo
+
+# find any tun/tap devices linked to a pid
+proc_pid_fdinfo_iff() {
+ local p="${1}" # the pid
+
+ debug "Searching for tun/tap interfaces for pid ${p}..."
+ set_source "fdinfo"
+ grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
+}
+
+find_tun_tap_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+ [ -d "${c}/emulator" ] && c="${c}/emulator" # check for 'emulator' subdirectory
+ c="${c}/cgroup.procs" # make full path
+
+ # for each pid of the cgroup
+ # find any tun/tap devices linked to the pid
+ if [ -f "${c}" ]
+ then
+ local p
+ for p in $(< "${c}" )
+ do
+ proc_pid_fdinfo_iff "${p}"
+ done
+ else
+ debug "Cannot find file '${c}', not searching for tun/tap interfaces."
+ fi
+}
+
+
+# -----------------------------------------------------------------------------
+# virsh domain network interfaces
+
+virsh_cgroup_to_domain_name() {
+ local c="${1}" # the cgroup path
+
+ debug "extracting a possible virsh domain from cgroup ${c}..."
+
+ # extract for the cgroup path
+ sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
+ -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
+ <<EOF
+${c}
+EOF
+}
+
+virsh_find_all_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ # the virsh command
+ local virsh
+ # shellcheck disable=SC2230
+ virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
+
+ if [ ! -z "${virsh}" ]
+ then
+ local d
+ d="$(virsh_cgroup_to_domain_name "${c}")"
+
+ if [ ! -z "${d}" ]
+ then
+ debug "running: virsh domiflist ${d}; to find the network interfaces"
+
+ # match only 'network' interfaces from virsh output
+
+ set_source "virsh"
+ "${virsh}" -r domiflist "${d}" |\
+ sed -n \
+ -e "s|^\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
+ -e "s|^\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
+ else
+ debug "no virsh domain extracted from cgroup ${c}"
+ fi
+ else
+ debug "virsh command is not available"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# netnsid detected interfaces
+
+netnsid_find_all_interfaces_for_pid() {
+ local pid="${1}"
+ [ -z "${pid}" ] && return 1
+
+ local nsid=$(lsns -t net -p ${pid} -o NETNSID -nr)
+ [ -z "${nsid}" -o "${nsid}" = "unassigned" ] && return 1
+
+ set_source "netnsid"
+ ip link show |\
+ grep -B 1 -E " link-netnsid ${nsid}($| )" |\
+ sed -n -e "s|^[[:space:]]*[0-9]\+:[[:space:]]\+\([A-Za-z0-9_]\+\)\(@[A-Za-z0-9_]\+\)*:[[:space:]].*$|\1|p"
+}
+
+netnsid_find_all_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ # for each pid of the cgroup
+ # find any tun/tap devices linked to the pid
+ if [ -f "${c}/cgroup.procs" ]
+ then
+ local p
+ for p in $(< "${c}/cgroup.procs" )
+ do
+ netnsid_find_all_interfaces_for_pid "${p}"
+ done
+ else
+ debug "Cannot find file '${c}/cgroup.procs', not searching for netnsid interfaces."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+find_all_interfaces_of_pid_or_cgroup() {
+ local p="${1}" c="${2}" # the pid and the cgroup path
+
+ if [ ! -z "${pid}" ]
+ then
+ # we have been called with a pid
+
+ proc_pid_fdinfo_iff "${p}"
+ netnsid_find_all_interfaces_for_pid "${p}"
+
+ elif [ ! -z "${c}" ]
+ then
+ # we have been called with a cgroup
+
+ info "searching for network interfaces of cgroup '${c}'"
+
+ find_tun_tap_interfaces_for_cgroup "${c}"
+ virsh_find_all_interfaces_for_cgroup "${c}"
+ netnsid_find_all_interfaces_for_cgroup "${c}"
+
+ else
+
+ error "Either a pid or a cgroup path is needed"
+ return 1
+
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+
+# an associative array to store the interfaces
+# the index is the interface name as seen by the host
+# the value is the interface name as seen by the guest / container
+declare -A devs=()
+
+# store all interfaces found in the associative array
+# this will also give the unique devices, as seen by the host
+last_src=
+# shellcheck disable=SC2162
+while read host_device guest_device
+do
+ [ -z "${host_device}" ] && continue
+
+ [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
+
+ # the default guest_device is the host_device
+ [ -z "${guest_device}" ] && guest_device="${host_device}"
+
+ # when we run in debug, show the source
+ debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
+
+ if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
+ devs[${host_device}]="${guest_device}"
+ fi
+
+done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
+
+# print the interfaces found, in the format netdata expects them
+found=0
+for x in "${!devs[@]}"
+do
+ found=$((found + 1))
+ echo "${x} ${devs[${x}]}"
+done
+
+debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
+
+# let netdata know if we found any
+[ ${found} -eq 0 ] && exit 1
+exit 0
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
new file mode 100644
index 0000000..921b14d
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef HAVE_SETNS
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#endif
+#include <sched.h>
+#endif
+
+char environment_variable2[FILENAME_MAX + 50] = "";
+char *environment[] = {
+ "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
+ environment_variable2,
+ NULL
+};
+
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+struct iface {
+ const char *device;
+ uint32_t hash;
+
+ unsigned int ifindex;
+ unsigned int iflink;
+
+ struct iface *next;
+};
+
+unsigned int read_iface_iflink(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface);
+
+ unsigned long long iflink = 0;
+ int ret = read_single_number_file(filename, &iflink);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)iflink;
+}
+
+unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface);
+
+ unsigned long long ifindex = 0;
+ int ret = read_single_number_file(filename, &ifindex);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)ifindex;
+}
+
+struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *prefix) {
+ if(!prefix) prefix = "";
+
+ procfile *ff = NULL;
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("parsing '%s'", filename);
+#endif
+
+ ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ error("Cannot read file '%s'", filename);
+ return NULL;
+ }
+
+ size_t lines = procfile_lines(ff), l;
+ struct iface *root = NULL;
+ for(l = 2; l < lines ;l++) {
+ if (unlikely(procfile_linewords(ff, l) < 1)) continue;
+
+ struct iface *t = callocz(1, sizeof(struct iface));
+ t->device = strdupz(procfile_lineword(ff, l, 0));
+ t->hash = simple_hash(t->device);
+ t->ifindex = read_iface_ifindex(prefix, t->device);
+ t->iflink = read_iface_iflink(prefix, t->device);
+ t->next = root;
+ root = t;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink);
+#endif
+ }
+
+ procfile_close(ff);
+
+ return root;
+}
+
+void free_iface(struct iface *iface) {
+ freez((void *)iface->device);
+ freez(iface);
+}
+
+void free_host_ifaces(struct iface *iface) {
+ while(iface) {
+ struct iface *t = iface->next;
+ free_iface(iface);
+ iface = t;
+ }
+}
+
+int iface_is_eligible(struct iface *iface) {
+ if(iface->iflink != iface->ifindex)
+ return 1;
+
+ return 0;
+}
+
+int eligible_ifaces(struct iface *root) {
+ int eligible = 0;
+
+ struct iface *t;
+ for(t = root; t ; t = t->next)
+ if(iface_is_eligible(t))
+ eligible++;
+
+ return eligible;
+}
+
+static void continue_as_child(void) {
+ pid_t child = fork();
+ int status;
+ pid_t ret;
+
+ if (child < 0)
+ error("fork() failed");
+
+ /* Only the child returns */
+ if (child == 0)
+ return;
+
+ for (;;) {
+ ret = waitpid(child, &status, WUNTRACED);
+ if ((ret == child) && (WIFSTOPPED(status))) {
+ /* The child suspended so suspend us as well */
+ kill(getpid(), SIGSTOP);
+ kill(child, SIGCONT);
+ } else {
+ break;
+ }
+ }
+
+ /* Return the child's exit code if possible */
+ if (WIFEXITED(status)) {
+ exit(WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ kill(getpid(), WTERMSIG(status));
+ }
+
+ exit(EXIT_FAILURE);
+}
+
+int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns);
+ int fd = open(filename, O_RDONLY);
+
+ if(fd == -1)
+ error("Cannot open proc_pid_fd() file '%s'", filename);
+
+ return fd;
+}
+
+static struct ns {
+ int nstype;
+ int fd;
+ int status;
+ const char *name;
+ const char *path;
+} all_ns[] = {
+ // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
+ // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
+ // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
+ // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
+ { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
+ { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
+ { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
+
+ // terminator
+ { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
+};
+
+int switch_namespace(const char *prefix, pid_t pid) {
+
+#ifdef HAVE_SETNS
+
+ int i;
+ for(i = 0; all_ns[i].name ; i++)
+ all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
+
+ int root_fd = proc_pid_fd(prefix, "root", pid);
+ int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
+
+ setgroups(0, NULL);
+
+ // 2 passes - found it at nsenter source code
+ // this is related CLONE_NEWUSER functionality
+
+ // This code cannot switch user namespace (it can all the other namespaces)
+ // Fortunately, we don't need to switch user namespaces.
+
+ int pass;
+ for(pass = 0; pass < 2 ;pass++) {
+ for(i = 0; all_ns[i].name ; i++) {
+ if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
+ if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
+ if(pass == 1) {
+ all_ns[i].status = 0;
+ error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
+ }
+ }
+ else
+ all_ns[i].status = 1;
+ }
+ }
+ }
+
+ setgroups(0, NULL);
+
+ if(root_fd != -1) {
+ if(fchdir(root_fd) < 0)
+ error("Cannot fchdir() to pid %d root directory", (int)pid);
+
+ if(chroot(".") < 0)
+ error("Cannot chroot() to pid %d root directory", (int)pid);
+
+ close(root_fd);
+ }
+
+ if(cwd_fd != -1) {
+ if(fchdir(cwd_fd) < 0)
+ error("Cannot fchdir() to pid %d current working directory", (int)pid);
+
+ close(cwd_fd);
+ }
+
+ int do_fork = 0;
+ for(i = 0; all_ns[i].name ; i++)
+ if(all_ns[i].fd != -1) {
+
+ // CLONE_NEWPID requires a fork() to become effective
+ if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
+ do_fork = 1;
+
+ close(all_ns[i].fd);
+ }
+
+ if(do_fork)
+ continue_as_child();
+
+ return 0;
+
+#else
+
+ errno = ENOSYS;
+ error("setns() is missing on this system.");
+ return 1;
+
+#endif
+}
+
+pid_t read_pid_from_cgroup_file(const char *filename) {
+ int fd = open(filename, procfile_open_flags);
+ if(fd == -1) {
+ error("Cannot open pid_from_cgroup() file '%s'.", filename);
+ return 0;
+ }
+
+ FILE *fp = fdopen(fd, "r");
+ if(!fp) {
+ error("Cannot upgrade fd to fp for file '%s'.", filename);
+ return 0;
+ }
+
+ char buffer[100 + 1];
+ pid_t pid = 0;
+ char *s;
+ while((s = fgets(buffer, 100, fp))) {
+ buffer[100] = '\0';
+ pid = atoi(s);
+ if(pid > 0) break;
+ }
+
+ fclose(fp);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(pid > 0) info("found pid %d on file '%s'", pid, filename);
+#endif
+
+ return pid;
+}
+
+pid_t read_pid_from_cgroup_files(const char *path) {
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path);
+ pid_t pid = read_pid_from_cgroup_file(filename);
+ if(pid > 0) return pid;
+
+ snprintfz(filename, FILENAME_MAX, "%s/tasks", path);
+ return read_pid_from_cgroup_file(filename);
+}
+
+pid_t read_pid_from_cgroup(const char *path) {
+ pid_t pid = read_pid_from_cgroup_files(path);
+ if (pid > 0) return pid;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("cannot read directory '%s'", path);
+ return 0;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if (de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if (de->d_type == DT_DIR) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ pid = read_pid_from_cgroup(filename);
+ if(pid > 0) break;
+ }
+ }
+ closedir(dir);
+ return pid;
+}
+
+// ----------------------------------------------------------------------------
+// send the result to netdata
+
+struct found_device {
+ const char *host_device;
+ const char *guest_device;
+
+ uint32_t host_device_hash;
+
+ struct found_device *next;
+} *detected_devices = NULL;
+
+void add_device(const char *host, const char *guest) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("adding device with host '%s', guest '%s'", host, guest);
+#endif
+
+ uint32_t hash = simple_hash(host);
+
+ if(guest && (!*guest || strcmp(host, guest) == 0))
+ guest = NULL;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ if(f->host_device_hash == hash && !strcmp(host, f->host_device)) {
+
+ if(guest && (!f->guest_device || !strcmp(f->host_device, f->guest_device))) {
+ if(f->guest_device) freez((void *)f->guest_device);
+ f->guest_device = strdupz(guest);
+ }
+
+ return;
+ }
+ }
+
+ f = mallocz(sizeof(struct found_device));
+ f->host_device = strdupz(host);
+ f->host_device_hash = hash;
+ f->guest_device = (guest)?strdupz(guest):NULL;
+ f->next = detected_devices;
+ detected_devices = f;
+}
+
+int send_devices(void) {
+ int found = 0;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ found++;
+ printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device);
+ }
+
+ return found;
+}
+
+// ----------------------------------------------------------------------------
+// this function should be called only **ONCE**
+// also it has to be the **LAST** to be called
+// since it switches namespaces, so after this call, everything is different!
+
+void detect_veth_interfaces(pid_t pid) {
+ struct iface *cgroup = NULL;
+ struct iface *host, *h, *c;
+
+ host = read_proc_net_dev("host", netdata_configured_host_prefix);
+ if(!host) {
+ errno = 0;
+ error("cannot read host interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(host)) {
+ errno = 0;
+ error("there are no double-linked host interfaces available.");
+ goto cleanup;
+ }
+
+ if(switch_namespace(netdata_configured_host_prefix, pid)) {
+ errno = 0;
+ error("cannot switch to the namespace of pid %u", (unsigned int) pid);
+ goto cleanup;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("switched to namespaces of pid %d", pid);
+#endif
+
+ cgroup = read_proc_net_dev("cgroup", NULL);
+ if(!cgroup) {
+ errno = 0;
+ error("cannot read cgroup interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(cgroup)) {
+ errno = 0;
+ error("there are not double-linked cgroup interfaces available.");
+ goto cleanup;
+ }
+
+ for(h = host; h ; h = h->next) {
+ if(iface_is_eligible(h)) {
+ for (c = cgroup; c; c = c->next) {
+ if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
+ add_device(h->device, c->device);
+ }
+ }
+ }
+ }
+
+cleanup:
+ free_host_ifaces(cgroup);
+ free_host_ifaces(host);
+}
+
+// ----------------------------------------------------------------------------
+// call the external helper
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+void call_the_helper(pid_t pid, const char *cgroup) {
+ if(setresuid(0, 0, 0) == -1)
+ error("setresuid(0, 0, 0) failed.");
+
+ char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ if(cgroup)
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
+ else
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
+
+ info("running: %s", command);
+
+ pid_t cgroup_pid;
+ FILE *fp = mypopene(command, &cgroup_pid, environment);
+ if(fp) {
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ char *s;
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s || !*t) continue;
+ add_device(s, t);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ }
+ else
+ error("cannot execute cgroup-network helper script: %s", command);
+}
+
+int is_valid_path_symbol(char c) {
+ switch(c) {
+ case '/': // path separators
+ case '\\': // needed for virsh domains \x2d1\x2dname
+ case ' ': // space
+ case '-': // hyphen
+ case '_': // underscore
+ case '.': // dot
+ case ',': // comma
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+// we will pass this path a shell script running as root
+// so, we need to make sure the path will be valid
+// and will not include anything that could allow
+// the caller use shell expansion for gaining escalated
+// privileges.
+int verify_path(const char *path) {
+ struct stat sb;
+
+ char c;
+ const char *s = path;
+ while((c = *s++)) {
+ if(!( isalnum(c) || is_valid_path_symbol(c) )) {
+ error("invalid character in path '%s'", path);
+ return -1;
+ }
+ }
+
+ if(strstr(path, "\\") && !strstr(path, "\\x")) {
+ error("invalid escape sequence in path '%s'", path);
+ return 1;
+ }
+
+ if(strstr(path, "/../")) {
+ error("invalid parent path sequence detected in '%s'", path);
+ return 1;
+ }
+
+ if(path[0] != '/') {
+ error("only absolute path names are supported - invalid path '%s'", path);
+ return -1;
+ }
+
+ if (stat(path, &sb) == -1) {
+ error("cannot stat() path '%s'", path);
+ return -1;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFDIR) {
+ error("path '%s' is not a directory", path);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+char *fix_path_variable(void) {
+ const char *path = getenv("PATH");
+ if(!path || !*path) return 0;
+
+ char *p = strdupz(path);
+ char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1);
+ strcpy(safe_path, "PATH=");
+
+ int added = 0;
+ char *ptr = p;
+ while(ptr && *ptr) {
+ char *s = strsep(&ptr, ":");
+ if(s && *s) {
+ if(verify_path(s) == -1) {
+ error("the PATH variable includes an invalid path '%s' - removed it.", s);
+ }
+ else {
+ info("the PATH variable includes a valid path '%s'.", s);
+ if(added) strcat(safe_path, ":");
+ strcat(safe_path, s);
+ added++;
+ }
+ }
+ }
+
+ info("unsafe PATH: '%s'.", path);
+ info(" safe PATH: '%s'.", safe_path);
+
+ freez(p);
+ return safe_path;
+}
+*/
+
+// ----------------------------------------------------------------------------
+// main
+
+void usage(void) {
+ fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
+ exit(1);
+}
+
+int main(int argc, char **argv) {
+ pid_t pid = 0;
+
+ program_name = argv[0];
+ program_version = VERSION;
+ error_log_syslog = 0;
+
+ // since cgroup-network runs as root, prevent it from opening symbolic links
+ procfile_open_flags = O_RDONLY|O_NOFOLLOW;
+
+ // ------------------------------------------------------------------------
+ // make sure NETDATA_HOST_PREFIX is safe
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if(verify_netdata_host_prefix() == -1) exit(1);
+
+ if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
+ fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+ // build a safe environment for our script
+
+ // the first environment variable is a fixed PATH=
+ snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+
+ if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
+ fprintf(stderr, "cgroup-network %s\n", VERSION);
+ exit(0);
+ }
+
+ if(argc != 3)
+ usage();
+
+ if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) {
+ pid = atoi(argv[2]);
+
+ if(pid <= 0) {
+ errno = 0;
+ error("Invalid pid %d given", (int) pid);
+ return 2;
+ }
+
+ call_the_helper(pid, NULL);
+ }
+ else if(!strcmp(argv[1], "--cgroup")) {
+ char *cgroup = argv[2];
+ if(verify_path(cgroup) == -1) {
+ error("cgroup '%s' does not exist or is not valid.", cgroup);
+ return 1;
+ }
+
+ pid = read_pid_from_cgroup(cgroup);
+ call_the_helper(pid, cgroup);
+
+ if(pid <= 0 && !detected_devices) {
+ errno = 0;
+ error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
+ }
+ }
+ else
+ usage();
+
+ if(pid > 0)
+ detect_veth_interfaces(pid);
+
+ int found = send_devices();
+ if(found <= 0) return 1;
+ return 0;
+}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
new file mode 100644
index 0000000..df1f5f2
--- /dev/null
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -0,0 +1,4051 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "sys_fs_cgroup.h"
+
+#define PLUGIN_CGROUPS_NAME "cgroups.plugin"
+#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd"
+#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup"
+
+// ----------------------------------------------------------------------------
+// cgroup globals
+
+static long system_page_size = 4096; // system will be queried via sysconf() in configuration()
+
+static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
+
+static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
+static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
+static int cgroup_used_memory_without_cache = CONFIG_BOOLEAN_YES;
+
+static int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO;
+static int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO;
+
+static int cgroup_search_in_devices = 1;
+
+static int cgroup_enable_new_cgroups_detected_at_runtime = 1;
+static int cgroup_check_for_new_every = 10;
+static int cgroup_update_every = 1;
+static int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
+
+static int cgroup_recheck_zero_blkio_every_iterations = 10;
+static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
+static int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
+
+static char *cgroup_cpuacct_base = NULL;
+static char *cgroup_cpuset_base = NULL;
+static char *cgroup_blkio_base = NULL;
+static char *cgroup_memory_base = NULL;
+static char *cgroup_devices_base = NULL;
+static char *cgroup_unified_base = NULL;
+
+static int cgroup_root_count = 0;
+static int cgroup_root_max = 1000;
+static int cgroup_max_depth = 0;
+
+static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL;
+static SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
+static SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
+static SIMPLE_PATTERN *systemd_services_cgroups = NULL;
+
+static char *cgroups_rename_script = NULL;
+static char *cgroups_network_interface_script = NULL;
+
+static int cgroups_check = 0;
+
+static uint32_t Read_hash = 0;
+static uint32_t Write_hash = 0;
+static uint32_t user_hash = 0;
+static uint32_t system_hash = 0;
+
+enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 };
+
+enum cgroups_systemd_setting {
+ SYSTEMD_CGROUP_ERR,
+ SYSTEMD_CGROUP_LEGACY,
+ SYSTEMD_CGROUP_HYBRID,
+ SYSTEMD_CGROUP_UNIFIED
+};
+
+struct cgroups_systemd_config_setting {
+ char *name;
+ enum cgroups_systemd_setting setting;
+};
+
+static struct cgroups_systemd_config_setting cgroups_systemd_options[] = {
+ { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY },
+ { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID },
+ { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED },
+ { .name = NULL, .setting = SYSTEMD_CGROUP_ERR },
+};
+
+/* on Fed systemd is not in PATH for some reason */
+#define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version"
+#define SYSTEMD_HIERARCHY_STRING "default-hierarchy="
+
+#define MAXSIZE_PROC_CMDLINE 4096
+static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec)
+{
+ pid_t command_pid;
+ enum cgroups_systemd_setting retval = SYSTEMD_CGROUP_ERR;
+ char buf[MAXSIZE_PROC_CMDLINE];
+ char *begin, *end;
+
+ FILE *f = mypopen(exec, &command_pid);
+
+ if (!f)
+ return retval;
+
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, f) != NULL) {
+ if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) {
+ end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING);
+ if (!*begin)
+ break;
+ while (isalpha(*end))
+ end++;
+ *end = 0;
+ for (int i = 0; cgroups_systemd_options[i].name; i++) {
+ if (!strcmp(begin, cgroups_systemd_options[i].name)) {
+ retval = cgroups_systemd_options[i].setting;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ if (mypclose(f, command_pid))
+ return SYSTEMD_CGROUP_ERR;
+
+ return retval;
+}
+
+static enum cgroups_type cgroups_try_detect_version()
+{
+ pid_t command_pid;
+ char buf[MAXSIZE_PROC_CMDLINE];
+ enum cgroups_systemd_setting systemd_setting;
+ int cgroups2_available = 0;
+
+ // 1. check if cgroups2 availible on system at all
+ FILE *f = mypopen("grep cgroup /proc/filesystems", &command_pid);
+ if (!f) {
+ error("popen failed");
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, f) != NULL) {
+ if (strstr(buf, "cgroup2")) {
+ cgroups2_available = 1;
+ break;
+ }
+ }
+ if(mypclose(f, command_pid))
+ return CGROUPS_AUTODETECT_FAIL;
+
+ if(!cgroups2_available)
+ return CGROUPS_V1;
+
+ // 2. check systemd compiletime setting
+ if ((systemd_setting = cgroups_detect_systemd("systemd --version")) == SYSTEMD_CGROUP_ERR)
+ systemd_setting = cgroups_detect_systemd(SYSTEMD_CMD_RHEL);
+
+ if(systemd_setting == SYSTEMD_CGROUP_ERR)
+ return CGROUPS_AUTODETECT_FAIL;
+
+ if(systemd_setting == SYSTEMD_CGROUP_LEGACY || systemd_setting == SYSTEMD_CGROUP_HYBRID) {
+ // curently we prefer V1 if HYBRID is set as it seems to be more feature complete
+ // in the future we might want to continue here if SYSTEMD_CGROUP_HYBRID
+ // and go ahead with V2
+ return CGROUPS_V1;
+ }
+
+ // 3. if we are unified as on Fedora (default cgroups2 only mode)
+ // check kernel command line flag that can override that setting
+ f = fopen("/proc/cmdline", "r");
+ if (!f) {
+ error("Error reading kernel boot commandline parameters");
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+
+ if (!fgets(buf, MAXSIZE_PROC_CMDLINE, f)) {
+ error("couldn't read all cmdline params into buffer");
+ fclose(f);
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+
+ fclose(f);
+
+ if (strstr(buf, "systemd.unified_cgroup_hierarchy=0")) {
+ info("cgroups v2 (unified cgroups) is available but are disabled on this system.");
+ return CGROUPS_V1;
+ }
+ return CGROUPS_V2;
+}
+
+void read_cgroup_plugin_configuration() {
+ system_page_size = sysconf(_SC_PAGESIZE);
+
+ Read_hash = simple_hash("Read");
+ Write_hash = simple_hash("Write");
+ user_hash = simple_hash("user");
+ system_hash = simple_hash("system");
+
+ cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
+ if(cgroup_update_every < localhost->rrd_update_every)
+ cgroup_update_every = localhost->rrd_update_every;
+
+ cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", (long long)cgroup_check_for_new_every * (long long)cgroup_update_every);
+ if(cgroup_check_for_new_every < cgroup_update_every)
+ cgroup_check_for_new_every = cgroup_update_every;
+
+ cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO);
+ if(cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO)
+ cgroup_use_unified_cgroups = (cgroups_try_detect_version() == CGROUPS_V2);
+
+ info("use unified cgroups %s", cgroup_use_unified_cgroups ? "true" : "false");
+
+ cgroup_containers_chart_priority = (int)config_get_number("plugin:cgroups", "containers priority", cgroup_containers_chart_priority);
+ if(cgroup_containers_chart_priority < 1)
+ cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
+
+ cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat);
+ cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage);
+
+ cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory (used mem including cache)", cgroup_enable_memory);
+ cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory);
+ cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt);
+ cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap);
+
+ cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io);
+ cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops);
+ cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io);
+ cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops);
+ cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops);
+ cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops);
+
+ cgroup_enable_pressure_cpu = config_get_boolean_ondemand("plugin:cgroups", "enable cpu pressure", cgroup_enable_pressure_cpu);
+ cgroup_enable_pressure_io_some = config_get_boolean_ondemand("plugin:cgroups", "enable io some pressure", cgroup_enable_pressure_io_some);
+ cgroup_enable_pressure_io_full = config_get_boolean_ondemand("plugin:cgroups", "enable io full pressure", cgroup_enable_pressure_io_full);
+ cgroup_enable_pressure_memory_some = config_get_boolean_ondemand("plugin:cgroups", "enable memory some pressure", cgroup_enable_pressure_memory_some);
+ cgroup_enable_pressure_memory_full = config_get_boolean_ondemand("plugin:cgroups", "enable memory full pressure", cgroup_enable_pressure_memory_full);
+
+ cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations);
+ cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations);
+ cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations);
+
+ cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services);
+ cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory);
+ cgroup_used_memory_without_cache = config_get_boolean("plugin:cgroups", "report used memory without cache", cgroup_used_memory_without_cache);
+
+ char filename[FILENAME_MAX + 1], *s;
+ struct mountinfo *mi, *root = mountinfo_read(0);
+ if(!cgroup_use_unified_cgroups) {
+ // cgroup v1 does not have pressure metrics
+ cgroup_enable_pressure_cpu =
+ cgroup_enable_pressure_io_some =
+ cgroup_enable_pressure_io_full =
+ cgroup_enable_pressure_memory_some =
+ cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_NO;
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
+ if(!mi) {
+ error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
+ s = "/sys/fs/cgroup/cpuacct";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuset");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset");
+ if(!mi) {
+ error("CGROUP: cannot find cpuset mountinfo. Assuming default: /sys/fs/cgroup/cpuset");
+ s = "/sys/fs/cgroup/cpuset";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_cpuset_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuset", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
+ if(!mi) {
+ error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
+ s = "/sys/fs/cgroup/blkio";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
+ if(!mi) {
+ error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
+ s = "/sys/fs/cgroup/memory";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
+ if(!mi) {
+ error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
+ s = "/sys/fs/cgroup/devices";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename);
+ }
+ else {
+ //cgroup_enable_cpuacct_stat =
+ cgroup_enable_cpuacct_usage =
+ //cgroup_enable_memory =
+ //cgroup_enable_detailed_memory =
+ cgroup_enable_memory_failcnt =
+ //cgroup_enable_swap =
+ //cgroup_enable_blkio_io =
+ //cgroup_enable_blkio_ops =
+ cgroup_enable_blkio_throttle_io =
+ cgroup_enable_blkio_throttle_ops =
+ cgroup_enable_blkio_merged_ops =
+ cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
+ cgroup_search_in_devices = 0;
+ cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
+ cgroup_used_memory_without_cache = CONFIG_BOOLEAN_NO; //unified cgroups use different values
+
+ //TODO: can there be more than 1 cgroup2 mount point?
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw"); //there is no cgroup2 specific super option - for now use 'rw' option
+ if(mi) debug(D_CGROUP, "found unified cgroup root using super options, with path: '%s'", mi->mount_point);
+ if(!mi) {
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup2", "cgroup");
+ if(mi) debug(D_CGROUP, "found unified cgroup root using mountsource info, with path: '%s'", mi->mount_point);
+ }
+ if(!mi) {
+ error("CGROUP: cannot find cgroup2 mountinfo. Assuming default: /sys/fs/cgroup");
+ s = "/sys/fs/cgroup";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_unified_base = config_get("plugin:cgroups", "path to unified cgroups", filename);
+ debug(D_CGROUP, "using cgroup root: '%s'", cgroup_unified_base);
+ }
+
+ cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max);
+ cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth);
+
+ cgroup_enable_new_cgroups_detected_at_runtime = config_get_boolean("plugin:cgroups", "enable new cgroups detected at run time", cgroup_enable_new_cgroups_detected_at_runtime);
+
+ enabled_cgroup_patterns = simple_pattern_create(
+ config_get("plugin:cgroups", "enable by default cgroups matching",
+ // ----------------------------------------------------------------
+
+ " !*/init.scope " // ignore init.scope
+ " !/system.slice/run-*.scope " // ignore system.slice/run-XXXX.scope
+ " *.scope " // we need all other *.scope for sure
+
+ // ----------------------------------------------------------------
+
+ " /machine.slice/*.service " // #3367 systemd-nspawn
+ " /kubepods/pod*/* " // k8s containers
+ " /kubepods/*/pod*/* " // k8s containers
+
+ // ----------------------------------------------------------------
+
+ " !/kubepods* " // all other k8s cgroups
+ " !*/vcpu* " // libvirtd adds these sub-cgroups
+ " !*/emulator " // libvirtd adds these sub-cgroups
+ " !*.mount "
+ " !*.partition "
+ " !*.service "
+ " !*.socket "
+ " !*.slice "
+ " !*.swap "
+ " !*.user "
+ " !/ "
+ " !/docker "
+ " !/libvirt "
+ " !/lxc "
+ " !/lxc/*/* " // #1397 #2649
+ " !/lxc.monitor* "
+ " !/lxc.pivot "
+ " !/lxc.payload "
+ " !/machine "
+ " !/qemu "
+ " !/system "
+ " !/systemd "
+ " !/user "
+ " * " // enable anything else
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ enabled_cgroup_paths = simple_pattern_create(
+ config_get("plugin:cgroups", "search for cgroups in subpaths matching",
+ " !*/init.scope " // ignore init.scope
+ " !*-qemu " // #345
+ " !*.libvirt-qemu " // #3010
+ " !/init.scope "
+ " !/system "
+ " !/systemd "
+ " !/user "
+ " !/user.slice "
+ " !/lxc/*/* " // #2161 #2649
+ " !/lxc.monitor "
+ " !/lxc.payload/*/* "
+ " !/lxc.payload.* "
+ " * "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_primary_plugins_dir);
+ cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_primary_plugins_dir);
+ cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename);
+
+ enabled_cgroup_renames = simple_pattern_create(
+ config_get("plugin:cgroups", "run script to rename cgroups matching",
+ " !/ "
+ " !*.mount "
+ " !*.socket "
+ " !*.partition "
+ " /machine.slice/*.service " // #3367 systemd-nspawn
+ " !*.service "
+ " !*.slice "
+ " !*.swap "
+ " !*.user "
+ " !init.scope "
+ " !*.scope/vcpu* " // libvirtd adds these sub-cgroups
+ " !*.scope/emulator " // libvirtd adds these sub-cgroups
+ " *.scope "
+ " *docker* "
+ " *lxc* "
+ " *qemu* "
+ " *kubepods* " // #3396 kubernetes
+ " *.libvirt-qemu " // #3010
+ " * "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ if(cgroup_enable_systemd_services) {
+ systemd_services_cgroups = simple_pattern_create(
+ config_get("plugin:cgroups", "cgroups to match as systemd services",
+ " !/system.slice/*/*.service "
+ " /system.slice/*.service "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+ }
+
+ mountinfo_free_all(root);
+}
+
+// ----------------------------------------------------------------------------
+// cgroup objects
+
+struct blkio {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int delay_counter;
+
+ char *filename;
+
+ unsigned long long Read;
+ unsigned long long Write;
+/*
+ unsigned long long Sync;
+ unsigned long long Async;
+ unsigned long long Total;
+*/
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+struct memory {
+ ARL_BASE *arl_base;
+ ARL_ENTRY *arl_dirty;
+ ARL_ENTRY *arl_swap;
+
+ int updated_detailed;
+ int updated_usage_in_bytes;
+ int updated_msw_usage_in_bytes;
+ int updated_failcnt;
+
+ int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ int delay_counter_detailed;
+ int delay_counter_failcnt;
+
+ char *filename_detailed;
+ char *filename_usage_in_bytes;
+ char *filename_msw_usage_in_bytes;
+ char *filename_failcnt;
+
+ int detailed_has_dirty;
+ int detailed_has_swap;
+
+ // detailed metrics
+/*
+ unsigned long long cache;
+ unsigned long long rss;
+ unsigned long long rss_huge;
+ unsigned long long mapped_file;
+ unsigned long long writeback;
+ unsigned long long dirty;
+ unsigned long long swap;
+ unsigned long long pgpgin;
+ unsigned long long pgpgout;
+ unsigned long long pgfault;
+ unsigned long long pgmajfault;
+ unsigned long long inactive_anon;
+ unsigned long long active_anon;
+ unsigned long long inactive_file;
+ unsigned long long active_file;
+ unsigned long long unevictable;
+ unsigned long long hierarchical_memory_limit;
+*/
+ //unified cgroups metrics
+ unsigned long long anon;
+ unsigned long long kernel_stack;
+ unsigned long long slab;
+ unsigned long long sock;
+ unsigned long long shmem;
+ unsigned long long anon_thp;
+ //unsigned long long file_writeback;
+ //unsigned long long file_dirty;
+ //unsigned long long file;
+
+ unsigned long long total_cache;
+ unsigned long long total_rss;
+ unsigned long long total_rss_huge;
+ unsigned long long total_mapped_file;
+ unsigned long long total_writeback;
+ unsigned long long total_dirty;
+ unsigned long long total_swap;
+ unsigned long long total_pgpgin;
+ unsigned long long total_pgpgout;
+ unsigned long long total_pgfault;
+ unsigned long long total_pgmajfault;
+/*
+ unsigned long long total_inactive_anon;
+ unsigned long long total_active_anon;
+ unsigned long long total_inactive_file;
+ unsigned long long total_active_file;
+ unsigned long long total_unevictable;
+*/
+
+ // single file metrics
+ unsigned long long usage_in_bytes;
+ unsigned long long msw_usage_in_bytes;
+ unsigned long long failcnt;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_stat {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned long long user;
+ unsigned long long system;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_usage {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned int cpus;
+ unsigned long long *cpu_percpu;
+};
+
+struct cgroup_network_interface {
+ const char *host_device;
+ const char *container_device;
+ struct cgroup_network_interface *next;
+};
+
+#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
+#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
+#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004
+
+// *** WARNING *** The fields are not thread safe. Take care of safe usage.
+struct cgroup {
+ uint32_t options;
+
+ char available; // found in the filesystem
+ char enabled; // enabled in the config
+
+ char pending_renames;
+
+ char *id;
+ uint32_t hash;
+
+ char *chart_id;
+ uint32_t hash_chart;
+
+ char *chart_title;
+
+ struct label *chart_labels;
+
+ struct cpuacct_stat cpuacct_stat;
+ struct cpuacct_usage cpuacct_usage;
+
+ struct memory memory;
+
+ struct blkio io_service_bytes; // bytes
+ struct blkio io_serviced; // operations
+
+ struct blkio throttle_io_service_bytes; // bytes
+ struct blkio throttle_io_serviced; // operations
+
+ struct blkio io_merged; // operations
+ struct blkio io_queued; // operations
+
+ struct cgroup_network_interface *interfaces;
+
+ struct pressure cpu_pressure;
+ struct pressure io_pressure;
+ struct pressure memory_pressure;
+
+ // per cgroup charts
+ RRDSET *st_cpu;
+ RRDSET *st_cpu_limit;
+ RRDSET *st_cpu_per_core;
+ RRDSET *st_mem;
+ RRDSET *st_writeback;
+ RRDSET *st_mem_activity;
+ RRDSET *st_pgfaults;
+ RRDSET *st_mem_usage;
+ RRDSET *st_mem_usage_limit;
+ RRDSET *st_mem_failcnt;
+ RRDSET *st_io;
+ RRDSET *st_serviced_ops;
+ RRDSET *st_throttle_io;
+ RRDSET *st_throttle_serviced_ops;
+ RRDSET *st_queued_ops;
+ RRDSET *st_merged_ops;
+
+ // per cgroup chart variables
+ char *filename_cpuset_cpus;
+ unsigned long long cpuset_cpus;
+
+ char *filename_cpu_cfs_period;
+ unsigned long long cpu_cfs_period;
+
+ char *filename_cpu_cfs_quota;
+ unsigned long long cpu_cfs_quota;
+
+ RRDSETVAR *chart_var_cpu_limit;
+ calculated_number prev_cpu_usage;
+
+ char *filename_memory_limit;
+ unsigned long long memory_limit;
+ RRDSETVAR *chart_var_memory_limit;
+
+ char *filename_memoryswap_limit;
+ unsigned long long memoryswap_limit;
+ RRDSETVAR *chart_var_memoryswap_limit;
+
+ // services
+ RRDDIM *rd_cpu;
+ RRDDIM *rd_mem_usage;
+ RRDDIM *rd_mem_failcnt;
+ RRDDIM *rd_swap_usage;
+
+ RRDDIM *rd_mem_detailed_cache;
+ RRDDIM *rd_mem_detailed_rss;
+ RRDDIM *rd_mem_detailed_mapped;
+ RRDDIM *rd_mem_detailed_writeback;
+ RRDDIM *rd_mem_detailed_pgpgin;
+ RRDDIM *rd_mem_detailed_pgpgout;
+ RRDDIM *rd_mem_detailed_pgfault;
+ RRDDIM *rd_mem_detailed_pgmajfault;
+
+ RRDDIM *rd_io_service_bytes_read;
+ RRDDIM *rd_io_serviced_read;
+ RRDDIM *rd_throttle_io_read;
+ RRDDIM *rd_throttle_io_serviced_read;
+ RRDDIM *rd_io_queued_read;
+ RRDDIM *rd_io_merged_read;
+
+ RRDDIM *rd_io_service_bytes_write;
+ RRDDIM *rd_io_serviced_write;
+ RRDDIM *rd_throttle_io_write;
+ RRDDIM *rd_throttle_io_serviced_write;
+ RRDDIM *rd_io_queued_write;
+ RRDDIM *rd_io_merged_write;
+
+ struct cgroup *next;
+ struct cgroup *discovered_next;
+
+} *cgroup_root = NULL;
+
+uv_mutex_t cgroup_root_mutex;
+
+struct cgroup *discovered_cgroup_root = NULL;
+
+struct discovery_thread {
+ uv_thread_t thread;
+ uv_mutex_t mutex;
+ uv_cond_t cond_var;
+ int start_discovery;
+ int exited;
+} discovery_thread;
+
+// ----------------------------------------------------------------------------
+// read values from /sys
+
+static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) {
+ static procfile *ff = NULL;
+
+ if(likely(cp->filename)) {
+ ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", cp->filename);
+ cp->updated = 0;
+ return;
+ }
+
+ for(i = 0; i < lines ; i++) {
+ char *s = procfile_lineword(ff, i, 0);
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == user_hash && !strcmp(s, "user")))
+ cp->user = str2ull(procfile_lineword(ff, i, 1));
+
+ else if(unlikely(hash == system_hash && !strcmp(s, "system")))
+ cp->system = str2ull(procfile_lineword(ff, i, 1));
+ }
+
+ cp->updated = 1;
+
+ if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO &&
+ (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ cp->enabled = CONFIG_BOOLEAN_YES;
+ }
+}
+
+static inline void cgroup2_read_cpuacct_stat(struct cpuacct_stat *cp) {
+ static procfile *ff = NULL;
+
+ if(likely(cp->filename)) {
+ ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long lines = procfile_lines(ff);
+
+ if(unlikely(lines < 3)) {
+ error("CGROUP: file '%s' should have 3+ lines.", cp->filename);
+ cp->updated = 0;
+ return;
+ }
+
+ cp->user = str2ull(procfile_lineword(ff, 1, 1));
+ cp->system = str2ull(procfile_lineword(ff, 2, 1));
+
+ cp->updated = 1;
+
+ if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO &&
+ (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ cp->enabled = CONFIG_BOOLEAN_YES;
+ }
+}
+
+static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) {
+ static procfile *ff = NULL;
+
+ if(likely(ca->filename)) {
+ ff = procfile_reopen(ff, ca->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ ca->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ ca->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ if(unlikely(procfile_lines(ff) < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
+ ca->updated = 0;
+ return;
+ }
+
+ unsigned long i = procfile_linewords(ff, 0);
+ if(unlikely(i == 0)) {
+ ca->updated = 0;
+ return;
+ }
+
+ // we may have 1 more CPU reported
+ while(i > 0) {
+ char *s = procfile_lineword(ff, 0, i - 1);
+ if(!*s) i--;
+ else break;
+ }
+
+ if(unlikely(i != ca->cpus)) {
+ freez(ca->cpu_percpu);
+ ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i);
+ ca->cpus = (unsigned int)i;
+ }
+
+ unsigned long long total = 0;
+ for(i = 0; i < ca->cpus ;i++) {
+ unsigned long long n = str2ull(procfile_lineword(ff, 0, i));
+ ca->cpu_percpu[i] = n;
+ total += n;
+ }
+
+ ca->updated = 1;
+
+ if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO &&
+ (total || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ ca->enabled = CONFIG_BOOLEAN_YES;
+ }
+}
+
+static inline void cgroup_read_blkio(struct blkio *io) {
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
+ io->delay_counter--;
+ return;
+ }
+
+ if(likely(io->filename)) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", io->filename);
+ io->updated = 0;
+ return;
+ }
+
+ io->Read = 0;
+ io->Write = 0;
+/*
+ io->Sync = 0;
+ io->Async = 0;
+ io->Total = 0;
+*/
+
+ for(i = 0; i < lines ; i++) {
+ char *s = procfile_lineword(ff, i, 1);
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == Read_hash && !strcmp(s, "Read")))
+ io->Read += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Write_hash && !strcmp(s, "Write")))
+ io->Write += str2ull(procfile_lineword(ff, i, 2));
+
+/*
+ else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync")))
+ io->Sync += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Async_hash && !strcmp(s, "Async")))
+ io->Async += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Total_hash && !strcmp(s, "Total")))
+ io->Total += str2ull(procfile_lineword(ff, i, 2));
+*/
+ }
+
+ io->updated = 1;
+
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
+ io->enabled = CONFIG_BOOLEAN_YES;
+ else
+ io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
+ }
+ }
+}
+
+static inline void cgroup2_read_blkio(struct blkio *io, unsigned int word_offset) {
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
+ io->delay_counter--;
+ return;
+ }
+
+ if(likely(io->filename)) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if (unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", io->filename);
+ io->updated = 0;
+ return;
+ }
+
+ io->Read = 0;
+ io->Write = 0;
+
+ for (i = 0; i < lines; i++) {
+ io->Read += str2ull(procfile_lineword(ff, i, 2 + word_offset));
+ io->Write += str2ull(procfile_lineword(ff, i, 4 + word_offset));
+ }
+
+ io->updated = 1;
+
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
+ io->enabled = CONFIG_BOOLEAN_YES;
+ else
+ io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
+ }
+ }
+}
+
+static inline void cgroup2_read_pressure(struct pressure *res) {
+ static procfile *ff = NULL;
+
+ if (likely(res->filename)) {
+ ff = procfile_reopen(ff, res->filename, " =", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) {
+ res->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) {
+ res->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ size_t lines = procfile_lines(ff);
+ if (lines < 1) {
+ error("CGROUP: file '%s' should have 1+ lines.", res->filename);
+ res->updated = 0;
+ return;
+ }
+
+ res->some.value10 = strtod(procfile_lineword(ff, 0, 2), NULL);
+ res->some.value60 = strtod(procfile_lineword(ff, 0, 4), NULL);
+ res->some.value300 = strtod(procfile_lineword(ff, 0, 6), NULL);
+
+ if (lines > 2) {
+ res->full.value10 = strtod(procfile_lineword(ff, 1, 2), NULL);
+ res->full.value60 = strtod(procfile_lineword(ff, 1, 4), NULL);
+ res->full.value300 = strtod(procfile_lineword(ff, 1, 6), NULL);
+ }
+
+ res->updated = 1;
+
+ if (unlikely(res->some.enabled == CONFIG_BOOLEAN_AUTO)) {
+ res->some.enabled = CONFIG_BOOLEAN_YES;
+ if (lines > 2) {
+ res->full.enabled = CONFIG_BOOLEAN_YES;
+ } else {
+ res->full.enabled = CONFIG_BOOLEAN_NO;
+ }
+ }
+ }
+}
+
+static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unified) {
+ static procfile *ff = NULL;
+
+ // read detailed ram usage
+ if(likely(mem->filename_detailed)) {
+ if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) {
+ mem->delay_counter_detailed--;
+ goto memory_next;
+ }
+
+ ff = procfile_reopen(ff, mem->filename_detailed, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ mem->updated_detailed = 0;
+ cgroups_check = 1;
+ goto memory_next;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ mem->updated_detailed = 0;
+ cgroups_check = 1;
+ goto memory_next;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed);
+ mem->updated_detailed = 0;
+ goto memory_next;
+ }
+
+
+ if(unlikely(!mem->arl_base)) {
+ if(parent_cg_is_unified == 0){
+ mem->arl_base = arl_create("cgroup/memory", NULL, 60);
+
+ arl_expect(mem->arl_base, "total_cache", &mem->total_cache);
+ arl_expect(mem->arl_base, "total_rss", &mem->total_rss);
+ arl_expect(mem->arl_base, "total_rss_huge", &mem->total_rss_huge);
+ arl_expect(mem->arl_base, "total_mapped_file", &mem->total_mapped_file);
+ arl_expect(mem->arl_base, "total_writeback", &mem->total_writeback);
+ mem->arl_dirty = arl_expect(mem->arl_base, "total_dirty", &mem->total_dirty);
+ mem->arl_swap = arl_expect(mem->arl_base, "total_swap", &mem->total_swap);
+ arl_expect(mem->arl_base, "total_pgpgin", &mem->total_pgpgin);
+ arl_expect(mem->arl_base, "total_pgpgout", &mem->total_pgpgout);
+ arl_expect(mem->arl_base, "total_pgfault", &mem->total_pgfault);
+ arl_expect(mem->arl_base, "total_pgmajfault", &mem->total_pgmajfault);
+ } else {
+ mem->arl_base = arl_create("cgroup/memory", NULL, 60);
+
+ arl_expect(mem->arl_base, "anon", &mem->anon);
+ arl_expect(mem->arl_base, "kernel_stack", &mem->kernel_stack);
+ arl_expect(mem->arl_base, "slab", &mem->slab);
+ arl_expect(mem->arl_base, "sock", &mem->sock);
+ arl_expect(mem->arl_base, "anon_thp", &mem->anon_thp);
+ arl_expect(mem->arl_base, "file", &mem->total_mapped_file);
+ arl_expect(mem->arl_base, "file_writeback", &mem->total_writeback);
+ mem->arl_dirty = arl_expect(mem->arl_base, "file_dirty", &mem->total_dirty);
+ arl_expect(mem->arl_base, "pgfault", &mem->total_pgfault);
+ arl_expect(mem->arl_base, "pgmajfault", &mem->total_pgmajfault);
+ }
+ }
+
+ arl_begin(mem->arl_base);
+
+ for(i = 0; i < lines ; i++) {
+ if(arl_check(mem->arl_base,
+ procfile_lineword(ff, i, 0),
+ procfile_lineword(ff, i, 1))) break;
+ }
+
+ if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND))
+ mem->detailed_has_dirty = 1;
+
+ if(unlikely(parent_cg_is_unified == 0 && mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND))
+ mem->detailed_has_swap = 1;
+
+ // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable);
+
+ mem->updated_detailed = 1;
+
+ if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) {
+ if(( (!parent_cg_is_unified) && ( mem->total_cache || mem->total_dirty || mem->total_rss || mem->total_rss_huge || mem->total_mapped_file || mem->total_writeback
+ || mem->total_swap || mem->total_pgpgin || mem->total_pgpgout || mem->total_pgfault || mem->total_pgmajfault))
+ || (parent_cg_is_unified && ( mem->anon || mem->total_dirty || mem->kernel_stack || mem->slab || mem->sock || mem->total_writeback
+ || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault))
+ || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)
+ mem->enabled_detailed = CONFIG_BOOLEAN_YES;
+ else
+ mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations;
+ }
+ }
+
+memory_next:
+
+ // read usage_in_bytes
+ if(likely(mem->filename_usage_in_bytes)) {
+ mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes);
+ if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO &&
+ (mem->usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES;
+ }
+
+ // read msw_usage_in_bytes
+ if(likely(mem->filename_msw_usage_in_bytes)) {
+ mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes);
+ if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO &&
+ (mem->msw_usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES;
+ }
+
+ // read failcnt
+ if(likely(mem->filename_failcnt)) {
+ if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) {
+ mem->updated_failcnt = 0;
+ mem->delay_counter_failcnt--;
+ }
+ else {
+ mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt);
+ if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(mem->failcnt || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
+ mem->enabled_failcnt = CONFIG_BOOLEAN_YES;
+ else
+ mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations;
+ }
+ }
+ }
+}
+
+static inline void cgroup_read(struct cgroup *cg) {
+ debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id);
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ cgroup_read_cpuacct_stat(&cg->cpuacct_stat);
+ cgroup_read_cpuacct_usage(&cg->cpuacct_usage);
+ cgroup_read_memory(&cg->memory, 0);
+ cgroup_read_blkio(&cg->io_service_bytes);
+ cgroup_read_blkio(&cg->io_serviced);
+ cgroup_read_blkio(&cg->throttle_io_service_bytes);
+ cgroup_read_blkio(&cg->throttle_io_serviced);
+ cgroup_read_blkio(&cg->io_merged);
+ cgroup_read_blkio(&cg->io_queued);
+ }
+ else {
+ //TODO: io_service_bytes and io_serviced use same file merge into 1 function
+ cgroup2_read_blkio(&cg->io_service_bytes, 0);
+ cgroup2_read_blkio(&cg->io_serviced, 4);
+ cgroup2_read_cpuacct_stat(&cg->cpuacct_stat);
+ cgroup2_read_pressure(&cg->cpu_pressure);
+ cgroup2_read_pressure(&cg->io_pressure);
+ cgroup2_read_pressure(&cg->memory_pressure);
+ cgroup_read_memory(&cg->memory, 1);
+ }
+}
+
+static inline void read_all_cgroups(struct cgroup *root) {
+ debug(D_CGROUP, "reading metrics for all cgroups");
+
+ struct cgroup *cg;
+
+ for(cg = root; cg ; cg = cg->next)
+ if(cg->enabled && !cg->pending_renames)
+ cgroup_read(cg);
+}
+
+// ----------------------------------------------------------------------------
+// cgroup network interfaces
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
+ debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+
+ pid_t cgroup_pid;
+ char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_cpuacct_base, cg->id);
+ }
+ else {
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_unified_base, cg->id);
+ }
+
+ debug(D_CGROUP, "executing command '%s' for cgroup '%s'", command, cg->id);
+ FILE *fp = mypopen(command, &cgroup_pid);
+ if(!fp) {
+ error("CGROUP: cannot popen(\"%s\", \"r\").", command);
+ return;
+ }
+
+ char *s;
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s) {
+ error("CGROUP: empty host interface returned by script");
+ continue;
+ }
+
+ if(!*t) {
+ error("CGROUP: empty guest interface returned by script");
+ continue;
+ }
+
+ struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
+ i->host_device = strdupz(s);
+ i->container_device = strdupz(t);
+ i->next = cg->interfaces;
+ cg->interfaces = i;
+
+ info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
+
+ // register a device rename to proc_net_dev.c
+ netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id, cg->chart_labels);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
+}
+
+static inline void free_cgroup_network_interfaces(struct cgroup *cg) {
+ while(cg->interfaces) {
+ struct cgroup_network_interface *i = cg->interfaces;
+ cg->interfaces = i->next;
+
+ // delete the registration of proc_net_dev rename
+ netdev_rename_device_del(i->host_device);
+
+ freez((void *)i->host_device);
+ freez((void *)i->container_device);
+ freez((void *)i);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// add/remove/find cgroup objects
+
+#define CGROUP_CHARTID_LINE_MAX 1024
+
+static inline char *cgroup_title_strdupz(const char *s) {
+ if(!s || !*s) s = "/";
+
+ if(*s == '/' && s[1] != '\0') s++;
+
+ char *r = strdupz(s);
+ netdata_fix_chart_name(r);
+
+ return r;
+}
+
+static inline char *cgroup_chart_id_strdupz(const char *s) {
+ if(!s || !*s) s = "/";
+
+ if(*s == '/' && s[1] != '\0') s++;
+
+ char *r = strdupz(s);
+ netdata_fix_chart_id(r);
+
+ return r;
+}
+
+char *parse_k8s_data(struct label **labels, char *data)
+{
+ char *name = mystrsep(&data, " ");
+
+ if (!data) {
+ return name;
+ }
+
+ while (data) {
+ char *key = mystrsep(&data, "=");
+
+ char *value;
+ if (data && *data == ',') {
+ value = "";
+ *data++ = '\0';
+ } else {
+ value = mystrsep(&data, ",");
+ }
+ value = strip_double_quotes(value, 1);
+
+ if (!key || *key == '\0' || !value || *value == '\0')
+ continue;
+
+ *labels = add_label_to_list(*labels, key, value, LABEL_SOURCE_KUBERNETES);
+ }
+
+ return name;
+}
+
+static inline void cgroup_get_chart_name(struct cgroup *cg) {
+ debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+
+ pid_t cgroup_pid;
+ char command[CGROUP_CHARTID_LINE_MAX + 1];
+
+ snprintfz(command, CGROUP_CHARTID_LINE_MAX, "exec %s '%s'", cgroups_rename_script, cg->chart_id);
+
+ debug(D_CGROUP, "executing command \"%s\" for cgroup '%s'", command, cg->chart_id);
+ FILE *fp = mypopen(command, &cgroup_pid);
+ if(fp) {
+ // debug(D_CGROUP, "reading from command '%s' for cgroup '%s'", command, cg->id);
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ char *s = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp);
+ // debug(D_CGROUP, "closing command for cgroup '%s'", cg->id);
+ int name_error = mypclose(fp, cgroup_pid);
+ // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
+
+ if(s && *s && *s != '\n') {
+ debug(D_CGROUP, "cgroup '%s' should be renamed to '%s'", cg->chart_id, s);
+
+ s = trim(s);
+ if (s) {
+ if(likely(name_error==0))
+ cg->pending_renames = 0;
+ else if (unlikely(name_error==3)) {
+ debug(D_CGROUP, "cgroup '%s' disabled based due to rename command output", cg->chart_id);
+ cg->enabled = 0;
+ }
+
+ if (likely(cg->pending_renames < 2)) {
+ char *name = s;
+
+ if (!strncmp(s, "k8s_", 4)) {
+ free_label_list(cg->chart_labels);
+ name = parse_k8s_data(&cg->chart_labels, s);
+ }
+
+ freez(cg->chart_title);
+ cg->chart_title = cgroup_title_strdupz(name);
+
+ freez(cg->chart_id);
+ cg->chart_id = cgroup_chart_id_strdupz(name);
+ cg->hash_chart = simple_hash(cg->chart_id);
+ }
+ }
+ }
+ }
+ else
+ error("CGROUP: cannot popen(\"%s\", \"r\").", command);
+}
+
+static inline struct cgroup *cgroup_add(const char *id) {
+ if(!id || !*id) id = "/";
+ debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
+
+ if(cgroup_root_count >= cgroup_root_max) {
+ info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id);
+ return NULL;
+ }
+
+ int def = simple_pattern_matches(enabled_cgroup_patterns, id)?cgroup_enable_new_cgroups_detected_at_runtime:0;
+ struct cgroup *cg = callocz(1, sizeof(struct cgroup));
+
+ cg->id = strdupz(id);
+ cg->hash = simple_hash(cg->id);
+
+ cg->chart_title = cgroup_title_strdupz(id);
+
+ cg->chart_id = cgroup_chart_id_strdupz(id);
+ cg->hash_chart = simple_hash(cg->chart_id);
+
+ if(cgroup_use_unified_cgroups) cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
+
+ if(!discovered_cgroup_root)
+ discovered_cgroup_root = cg;
+ else {
+ // append it
+ struct cgroup *e;
+ for(e = discovered_cgroup_root; e->discovered_next ;e = e->discovered_next) ;
+ e->discovered_next = cg;
+ }
+
+ cgroup_root_count++;
+
+ // fix the chart_id and title by calling the external script
+ if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) {
+
+ cg->pending_renames = 2;
+ cgroup_get_chart_name(cg);
+
+ debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
+ }
+ else
+ debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id);
+
+ int user_configurable = 1;
+
+ // check if this cgroup should be a systemd service
+ if(cgroup_enable_systemd_services) {
+ if(simple_pattern_matches(systemd_services_cgroups, cg->id) ||
+ simple_pattern_matches(systemd_services_cgroups, cg->chart_id)) {
+ debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') matches systemd services cgroups", cg->id, cg->chart_id, cg->chart_title);
+
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
+
+ strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
+ char *s = buffer;
+
+ //freez(cg->chart_id);
+ //cg->chart_id = cgroup_chart_id_strdupz(s);
+ //cg->hash_chart = simple_hash(cg->chart_id);
+
+ // skip to the last slash
+ size_t len = strlen(s);
+ while(len--) if(unlikely(s[len] == '/')) break;
+ if(len) s = &s[len + 1];
+
+ // remove extension
+ len = strlen(s);
+ while(len--) if(unlikely(s[len] == '.')) break;
+ if(len) s[len] = '\0';
+
+ freez(cg->chart_title);
+ cg->chart_title = cgroup_title_strdupz(s);
+
+ cg->enabled = 1;
+ user_configurable = 0;
+
+ debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
+ }
+ else
+ debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') does not match systemd services groups", cg->id, cg->chart_id, cg->chart_title);
+ }
+
+ if(user_configurable) {
+ // allow the user to enable/disable this individualy
+ char option[FILENAME_MAX + 1];
+ snprintfz(option, FILENAME_MAX, "enable cgroup %s", cg->chart_title);
+ cg->enabled = (char) config_get_boolean("plugin:cgroups", option, def);
+ }
+
+ // detect duplicate cgroups
+ if(cg->enabled) {
+ struct cgroup *t;
+ for (t = discovered_cgroup_root; t; t = t->discovered_next) {
+ if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ // TODO: use it after refactoring if system.slice might be scanned before init.scope/system.slice
+ //
+ // if (!strncmp(t->id, "/system.slice/", 14) && !strncmp(cg->id, "/init.scope/system.slice/", 25)) {
+ // error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
+ // cg->chart_id, t->id, cg->id, t->id);
+ // t->enabled = 0;
+ // t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ // }
+ // else {}
+ //
+ // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id);
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+
+ break;
+ }
+ }
+ }
+
+ if(cg->enabled && !cg->pending_renames && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))
+ read_cgroup_network_interfaces(cg);
+
+ debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled");
+
+ return cg;
+}
+
+static inline void free_pressure(struct pressure *res) {
+ if (res->some.st) rrdset_is_obsolete(res->some.st);
+ if (res->full.st) rrdset_is_obsolete(res->full.st);
+ freez(res->filename);
+}
+
+static inline void cgroup_free(struct cgroup *cg) {
+ debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
+
+ if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu);
+ if(cg->st_cpu_limit) rrdset_is_obsolete(cg->st_cpu_limit);
+ if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core);
+ if(cg->st_mem) rrdset_is_obsolete(cg->st_mem);
+ if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback);
+ if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity);
+ if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults);
+ if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage);
+ if(cg->st_mem_usage_limit) rrdset_is_obsolete(cg->st_mem_usage_limit);
+ if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt);
+ if(cg->st_io) rrdset_is_obsolete(cg->st_io);
+ if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops);
+ if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io);
+ if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops);
+ if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops);
+ if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops);
+
+ freez(cg->filename_cpuset_cpus);
+ freez(cg->filename_cpu_cfs_period);
+ freez(cg->filename_cpu_cfs_quota);
+ freez(cg->filename_memory_limit);
+ freez(cg->filename_memoryswap_limit);
+
+ free_cgroup_network_interfaces(cg);
+
+ freez(cg->cpuacct_usage.cpu_percpu);
+
+ freez(cg->cpuacct_stat.filename);
+ freez(cg->cpuacct_usage.filename);
+
+ arl_free(cg->memory.arl_base);
+ freez(cg->memory.filename_detailed);
+ freez(cg->memory.filename_failcnt);
+ freez(cg->memory.filename_usage_in_bytes);
+ freez(cg->memory.filename_msw_usage_in_bytes);
+
+ freez(cg->io_service_bytes.filename);
+ freez(cg->io_serviced.filename);
+
+ freez(cg->throttle_io_service_bytes.filename);
+ freez(cg->throttle_io_serviced.filename);
+
+ freez(cg->io_merged.filename);
+ freez(cg->io_queued.filename);
+
+ free_pressure(&cg->cpu_pressure);
+ free_pressure(&cg->io_pressure);
+ free_pressure(&cg->memory_pressure);
+
+ freez(cg->id);
+ freez(cg->chart_id);
+ freez(cg->chart_title);
+
+ free_label_list(cg->chart_labels);
+
+ freez(cg);
+
+ cgroup_root_count--;
+}
+
+// find if a given cgroup exists
+static inline struct cgroup *cgroup_find(const char *id) {
+ debug(D_CGROUP, "searching for cgroup '%s'", id);
+
+ uint32_t hash = simple_hash(id);
+
+ struct cgroup *cg;
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ if(hash == cg->hash && strcmp(id, cg->id) == 0)
+ break;
+ }
+
+ debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
+ return cg;
+}
+
+// ----------------------------------------------------------------------------
+// detect running cgroups
+
+// callback for find_file_in_subdirs()
+static inline void found_subdir_in_dir(const char *dir) {
+ debug(D_CGROUP, "examining cgroup dir '%s'", dir);
+
+ struct cgroup *cg = cgroup_find(dir);
+ if(!cg) {
+ if(*dir && cgroup_max_depth > 0) {
+ int depth = 0;
+ const char *s;
+
+ for(s = dir; *s ;s++)
+ if(unlikely(*s == '/'))
+ depth++;
+
+ if(depth > cgroup_max_depth) {
+ info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
+ return;
+ }
+ }
+ // debug(D_CGROUP, "will add dir '%s' as cgroup", dir);
+ cg = cgroup_add(dir);
+ }
+
+ if(cg) {
+ // delay renaming of the cgroup and looking for network interfaces to deal with the docker lag when starting the container
+ if(unlikely(cg->pending_renames == 1)) {
+ // fix the chart_id and title by calling the external script
+ if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) {
+
+ cgroup_get_chart_name(cg);
+ cg->pending_renames = 0;
+
+ if(cg->enabled && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))
+ read_cgroup_network_interfaces(cg);
+
+ debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
+ }
+ else
+ debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id);
+ }
+
+ cg->available = 1;
+ }
+}
+
+static inline int find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
+ if(!this) this = base;
+ debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
+
+ size_t dirlen = strlen(this), baselen = strlen(base);
+
+ int ret = -1;
+ int enabled = -1;
+
+ const char *relative_path = &this[baselen];
+ if(!*relative_path) relative_path = "/";
+
+ DIR *dir = opendir(this);
+ if(!dir) {
+ error("CGROUP: cannot read directory '%s'", base);
+ return ret;
+ }
+ ret = 1;
+
+ callback(relative_path);
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if(de->d_type == DT_DIR) {
+ if(enabled == -1) {
+ const char *r = relative_path;
+ if(*r == '\0') r = "/";
+
+ // do not decent in directories we are not interested
+ int def = simple_pattern_matches(enabled_cgroup_paths, r);
+
+ // we check for this option here
+ // so that the config will not have settings
+ // for leaf directories
+ char option[FILENAME_MAX + 1];
+ snprintfz(option, FILENAME_MAX, "search for cgroups under %s", r);
+ option[FILENAME_MAX] = '\0';
+ enabled = config_get_boolean("plugin:cgroups", option, def);
+ }
+
+ if(enabled) {
+ char *s = mallocz(dirlen + strlen(de->d_name) + 2);
+ strcpy(s, this);
+ strcat(s, "/");
+ strcat(s, de->d_name);
+ int ret2 = find_dir_in_subdirs(base, s, callback);
+ if(ret2 > 0) ret += ret2;
+ freez(s);
+ }
+ }
+ }
+
+ closedir(dir);
+ return ret;
+}
+
+static inline void mark_all_cgroups_as_not_available() {
+ debug(D_CGROUP, "marking all cgroups as not available");
+
+ struct cgroup *cg;
+
+ // mark all as not available
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ cg->available = 0;
+ }
+}
+
+static inline void update_filenames()
+{
+ struct cgroup *cg;
+ struct stat buf;
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name);
+
+ if(unlikely(cg->pending_renames))
+ cg->pending_renames--;
+
+ if(unlikely(!cg->available || cg->pending_renames))
+ continue;
+
+ debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id);
+
+ // check for newly added cgroups
+ // and update the filenames they read
+ char filename[FILENAME_MAX + 1];
+ if(!cgroup_use_unified_cgroups) {
+ if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_stat.filename = strdupz(filename);
+ cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id);
+ cg->filename_cpuset_cpus = strdupz(filename);
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id);
+ cg->filename_cpu_cfs_period = strdupz(filename);
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id);
+ cg->filename_cpu_cfs_quota = strdupz(filename);
+ debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
+ }
+ else
+ debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_usage.filename = strdupz(filename);
+ cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
+ debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
+ }
+ else
+ debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_detailed = strdupz(filename);
+ cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
+ debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
+ }
+ else
+ debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
+ debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id);
+ cg->filename_memory_limit = strdupz(filename);
+ }
+ else
+ debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id);
+ cg->filename_memoryswap_limit = strdupz(filename);
+ debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
+ }
+ else
+ debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_failcnt = strdupz(filename);
+ cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
+ debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt);
+ }
+ else
+ debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+ }
+ else
+ debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
+ }
+ else
+ debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_service_bytes.filename = strdupz(filename);
+ cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
+ debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
+ }
+ else
+ debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_serviced.filename = strdupz(filename);
+ cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
+ debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
+ }
+ else
+ debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_merged.filename = strdupz(filename);
+ cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
+ debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
+ }
+ else
+ debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_queued.filename = strdupz(filename);
+ cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
+ debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
+ }
+ else
+ debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+ }
+ else if(likely(cgroup_unified_exist)) {
+ if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+ } else
+ debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+ if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+ } else
+ debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+ if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_stat.filename = strdupz(filename);
+ cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
+ cg->filename_cpuset_cpus = NULL;
+ cg->filename_cpu_cfs_period = NULL;
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id);
+ cg->filename_cpu_cfs_quota = strdupz(filename);
+ debug(D_CGROUP, "cpu.stat filename for unified cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
+ }
+ else
+ debug(D_CGROUP, "cpu.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+ if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_detailed = strdupz(filename);
+ cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
+ debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
+ }
+ else
+ debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
+ debug(D_CGROUP, "memory.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id);
+ cg->filename_memory_limit = strdupz(filename);
+ }
+ else
+ debug(D_CGROUP, "memory.current file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id);
+ cg->filename_memoryswap_limit = strdupz(filename);
+ debug(D_CGROUP, "memory.swap.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
+ }
+ else
+ debug(D_CGROUP, "memory.swap file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpu_pressure.filename = strdupz(filename);
+ cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu;
+ cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO;
+ debug(D_CGROUP, "cpu.pressure filename for cgroup '%s': '%s'", cg->id, cg->cpu_pressure.filename);
+ } else {
+ debug(D_CGROUP, "cpu.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_pressure.filename = strdupz(filename);
+ cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some;
+ cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full;
+ debug(D_CGROUP, "io.pressure filename for cgroup '%s': '%s'", cg->id, cg->io_pressure.filename);
+ } else {
+ debug(D_CGROUP, "io.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) && !cg->memory_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory_pressure.filename = strdupz(filename);
+ cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some;
+ cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full;
+ debug(D_CGROUP, "memory.pressure filename for cgroup '%s': '%s'", cg->id, cg->memory_pressure.filename);
+ } else {
+ debug(D_CGROUP, "memory.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
+ }
+ }
+}
+
+static inline void cleanup_all_cgroups() {
+ struct cgroup *cg = discovered_cgroup_root, *last = NULL;
+
+ for(; cg ;) {
+ if(!cg->available) {
+ // enable the first duplicate cgroup
+ {
+ struct cgroup *t;
+ for(t = discovered_cgroup_root; t ; t = t->discovered_next) {
+ if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
+ t->enabled = 1;
+ t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ break;
+ }
+ }
+ }
+
+ if(!last)
+ discovered_cgroup_root = cg->discovered_next;
+ else
+ last->discovered_next = cg->discovered_next;
+
+ cgroup_free(cg);
+
+ if(!last)
+ cg = discovered_cgroup_root;
+ else
+ cg = last->discovered_next;
+ }
+ else {
+ last = cg;
+ cg = cg->discovered_next;
+ }
+ }
+}
+
+static inline void copy_discovered_cgroups()
+{
+ debug(D_CGROUP, "copy discovered cgroups to the main group list");
+
+ struct cgroup *cg;
+
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ cg->next = cg->discovered_next;
+ }
+
+ cgroup_root = discovered_cgroup_root;
+}
+
+static inline void find_all_cgroups() {
+ debug(D_CGROUP, "searching for cgroups");
+
+ mark_all_cgroups_as_not_available();
+ if(!cgroup_use_unified_cgroups) {
+ if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
+ if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_cpuacct_stat =
+ cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled cpu statistics.");
+ }
+ }
+
+ if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
+ if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_blkio_io =
+ cgroup_enable_blkio_ops =
+ cgroup_enable_blkio_throttle_io =
+ cgroup_enable_blkio_throttle_ops =
+ cgroup_enable_blkio_merged_ops =
+ cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled blkio statistics.");
+ }
+ }
+
+ if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
+ if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_memory =
+ cgroup_enable_detailed_memory =
+ cgroup_enable_swap =
+ cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled memory statistics.");
+ }
+ }
+
+ if(cgroup_search_in_devices) {
+ if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_search_in_devices = 0;
+ error("CGROUP: disabled devices statistics.");
+ }
+ }
+ }
+ else {
+ if (find_dir_in_subdirs(cgroup_unified_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_unified_exist = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled unified cgroups statistics.");
+ }
+ }
+
+ update_filenames();
+
+ uv_mutex_lock(&cgroup_root_mutex);
+ cleanup_all_cgroups();
+ copy_discovered_cgroups();
+ uv_mutex_unlock(&cgroup_root_mutex);
+
+ debug(D_CGROUP, "done searching for cgroups");
+}
+
+void cgroup_discovery_worker(void *ptr)
+{
+ UNUSED(ptr);
+
+ while (!netdata_exit) {
+ uv_mutex_lock(&discovery_thread.mutex);
+ while (!discovery_thread.start_discovery)
+ uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex);
+ discovery_thread.start_discovery = 0;
+ uv_mutex_unlock(&discovery_thread.mutex);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ find_all_cgroups();
+ }
+
+ discovery_thread.exited = 1;
+}
+
+// ----------------------------------------------------------------------------
+// generate charts
+
+#define CHART_TITLE_MAX 300
+
+void update_systemd_services_charts(
+ int update_every
+ , int do_cpu
+ , int do_mem_usage
+ , int do_mem_detailed
+ , int do_mem_failcnt
+ , int do_swap_usage
+ , int do_io
+ , int do_io_ops
+ , int do_throttle_io
+ , int do_throttle_ops
+ , int do_queued_ops
+ , int do_merged_ops
+) {
+ static RRDSET
+ *st_cpu = NULL,
+ *st_mem_usage = NULL,
+ *st_mem_failcnt = NULL,
+ *st_swap_usage = NULL,
+
+ *st_mem_detailed_cache = NULL,
+ *st_mem_detailed_rss = NULL,
+ *st_mem_detailed_mapped = NULL,
+ *st_mem_detailed_writeback = NULL,
+ *st_mem_detailed_pgfault = NULL,
+ *st_mem_detailed_pgmajfault = NULL,
+ *st_mem_detailed_pgpgin = NULL,
+ *st_mem_detailed_pgpgout = NULL,
+
+ *st_io_read = NULL,
+ *st_io_serviced_read = NULL,
+ *st_throttle_io_read = NULL,
+ *st_throttle_ops_read = NULL,
+ *st_queued_ops_read = NULL,
+ *st_merged_ops_read = NULL,
+
+ *st_io_write = NULL,
+ *st_io_serviced_write = NULL,
+ *st_throttle_io_write = NULL,
+ *st_throttle_ops_write = NULL,
+ *st_queued_ops_write = NULL,
+ *st_merged_ops_write = NULL;
+
+ // create the charts
+
+ if(likely(do_cpu)) {
+ if(unlikely(!st_cpu)) {
+ char title[CHART_TITLE_MAX + 1];
+ snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)");
+
+ st_cpu = rrdset_create_localhost(
+ "services"
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "services.cpu"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_cpu);
+ }
+
+ if(likely(do_mem_usage)) {
+ if(unlikely(!st_mem_usage)) {
+
+ st_mem_usage = rrdset_create_localhost(
+ "services"
+ , "mem_usage"
+ , NULL
+ , "mem"
+ , "services.mem_usage"
+ , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache"
+ : "Systemd Services Used Memory"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_usage);
+ }
+
+ if(likely(do_mem_detailed)) {
+ if(unlikely(!st_mem_detailed_rss)) {
+
+ st_mem_detailed_rss = rrdset_create_localhost(
+ "services"
+ , "mem_rss"
+ , NULL
+ , "mem"
+ , "services.mem_rss"
+ , "Systemd Services RSS Memory"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_rss);
+
+ if(unlikely(!st_mem_detailed_mapped)) {
+
+ st_mem_detailed_mapped = rrdset_create_localhost(
+ "services"
+ , "mem_mapped"
+ , NULL
+ , "mem"
+ , "services.mem_mapped"
+ , "Systemd Services Mapped Memory"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_mapped);
+
+ if(unlikely(!st_mem_detailed_cache)) {
+
+ st_mem_detailed_cache = rrdset_create_localhost(
+ "services"
+ , "mem_cache"
+ , NULL
+ , "mem"
+ , "services.mem_cache"
+ , "Systemd Services Cache Memory"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_cache);
+
+ if(unlikely(!st_mem_detailed_writeback)) {
+
+ st_mem_detailed_writeback = rrdset_create_localhost(
+ "services"
+ , "mem_writeback"
+ , NULL
+ , "mem"
+ , "services.mem_writeback"
+ , "Systemd Services Writeback Memory"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_writeback);
+
+ if(unlikely(!st_mem_detailed_pgfault)) {
+
+ st_mem_detailed_pgfault = rrdset_create_localhost(
+ "services"
+ , "mem_pgfault"
+ , NULL
+ , "mem"
+ , "services.mem_pgfault"
+ , "Systemd Services Memory Minor Page Faults"
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else
+ rrdset_next(st_mem_detailed_pgfault);
+
+ if(unlikely(!st_mem_detailed_pgmajfault)) {
+
+ st_mem_detailed_pgmajfault = rrdset_create_localhost(
+ "services"
+ , "mem_pgmajfault"
+ , NULL
+ , "mem"
+ , "services.mem_pgmajfault"
+ , "Systemd Services Memory Major Page Faults"
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgmajfault);
+
+ if(unlikely(!st_mem_detailed_pgpgin)) {
+
+ st_mem_detailed_pgpgin = rrdset_create_localhost(
+ "services"
+ , "mem_pgpgin"
+ , NULL
+ , "mem"
+ , "services.mem_pgpgin"
+ , "Systemd Services Memory Charging Activity"
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgpgin);
+
+ if(unlikely(!st_mem_detailed_pgpgout)) {
+
+ st_mem_detailed_pgpgout = rrdset_create_localhost(
+ "services"
+ , "mem_pgpgout"
+ , NULL
+ , "mem"
+ , "services.mem_pgpgout"
+ , "Systemd Services Memory Uncharging Activity"
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgpgout);
+ }
+
+ if(likely(do_mem_failcnt)) {
+ if(unlikely(!st_mem_failcnt)) {
+
+ st_mem_failcnt = rrdset_create_localhost(
+ "services"
+ , "mem_failcnt"
+ , NULL
+ , "mem"
+ , "services.mem_failcnt"
+ , "Systemd Services Memory Limit Failures"
+ , "failures"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_failcnt);
+ }
+
+ if(likely(do_swap_usage)) {
+ if(unlikely(!st_swap_usage)) {
+
+ st_swap_usage = rrdset_create_localhost(
+ "services"
+ , "swap_usage"
+ , NULL
+ , "swap"
+ , "services.swap_usage"
+ , "Systemd Services Swap Memory Used"
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_swap_usage);
+ }
+
+ if(likely(do_io)) {
+ if(unlikely(!st_io_read)) {
+
+ st_io_read = rrdset_create_localhost(
+ "services"
+ , "io_read"
+ , NULL
+ , "disk"
+ , "services.io_read"
+ , "Systemd Services Disk Read Bandwidth"
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_read);
+
+ if(unlikely(!st_io_write)) {
+
+ st_io_write = rrdset_create_localhost(
+ "services"
+ , "io_write"
+ , NULL
+ , "disk"
+ , "services.io_write"
+ , "Systemd Services Disk Write Bandwidth"
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_write);
+ }
+
+ if(likely(do_io_ops)) {
+ if(unlikely(!st_io_serviced_read)) {
+
+ st_io_serviced_read = rrdset_create_localhost(
+ "services"
+ , "io_ops_read"
+ , NULL
+ , "disk"
+ , "services.io_ops_read"
+ , "Systemd Services Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 140
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_serviced_read);
+
+ if(unlikely(!st_io_serviced_write)) {
+
+ st_io_serviced_write = rrdset_create_localhost(
+ "services"
+ , "io_ops_write"
+ , NULL
+ , "disk"
+ , "services.io_ops_write"
+ , "Systemd Services Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 150
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_serviced_write);
+ }
+
+ if(likely(do_throttle_io)) {
+ if(unlikely(!st_throttle_io_read)) {
+
+ st_throttle_io_read = rrdset_create_localhost(
+ "services"
+ , "throttle_io_read"
+ , NULL
+ , "disk"
+ , "services.throttle_io_read"
+ , "Systemd Services Throttle Disk Read Bandwidth"
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_io_read);
+
+ if(unlikely(!st_throttle_io_write)) {
+
+ st_throttle_io_write = rrdset_create_localhost(
+ "services"
+ , "throttle_io_write"
+ , NULL
+ , "disk"
+ , "services.throttle_io_write"
+ , "Systemd Services Throttle Disk Write Bandwidth"
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_io_write);
+ }
+
+ if(likely(do_throttle_ops)) {
+ if(unlikely(!st_throttle_ops_read)) {
+
+ st_throttle_ops_read = rrdset_create_localhost(
+ "services"
+ , "throttle_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.throttle_io_ops_read"
+ , "Systemd Services Throttle Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 180
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_ops_read);
+
+ if(unlikely(!st_throttle_ops_write)) {
+
+ st_throttle_ops_write = rrdset_create_localhost(
+ "services"
+ , "throttle_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.throttle_io_ops_write"
+ , "Systemd Services Throttle Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 190
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_ops_write);
+ }
+
+ if(likely(do_queued_ops)) {
+ if(unlikely(!st_queued_ops_read)) {
+
+ st_queued_ops_read = rrdset_create_localhost(
+ "services"
+ , "queued_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.queued_io_ops_read"
+ , "Systemd Services Queued Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_queued_ops_read);
+
+ if(unlikely(!st_queued_ops_write)) {
+
+ st_queued_ops_write = rrdset_create_localhost(
+ "services"
+ , "queued_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.queued_io_ops_write"
+ , "Systemd Services Queued Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 210
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_queued_ops_write);
+ }
+
+ if(likely(do_merged_ops)) {
+ if(unlikely(!st_merged_ops_read)) {
+
+ st_merged_ops_read = rrdset_create_localhost(
+ "services"
+ , "merged_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.merged_io_ops_read"
+ , "Systemd Services Merged Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 220
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_merged_ops_read);
+
+ if(unlikely(!st_merged_ops_write)) {
+
+ st_merged_ops_write = rrdset_create_localhost(
+ "services"
+ , "merged_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.merged_io_ops_write"
+ , "Systemd Services Merged Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 230
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_merged_ops_write);
+ }
+
+ // update the values
+ struct cgroup *cg;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->enabled || cg->pending_renames || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))
+ continue;
+
+ if(likely(do_cpu && cg->cpuacct_stat.updated)) {
+ if(unlikely(!cg->rd_cpu)){
+
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+
+ rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system);
+ }
+
+ if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
+ if(unlikely(!cg->rd_mem_usage))
+ cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.total_cache:0));
+ }
+
+ if(likely(do_mem_detailed && cg->memory.updated_detailed)) {
+ if(unlikely(!cg->rd_mem_detailed_rss))
+ cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.total_rss + cg->memory.total_rss_huge);
+
+ if(unlikely(!cg->rd_mem_detailed_mapped))
+ cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.total_mapped_file);
+
+ if(unlikely(!cg->rd_mem_detailed_cache))
+ cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.total_cache);
+
+ if(unlikely(!cg->rd_mem_detailed_writeback))
+ cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.total_writeback);
+
+ if(unlikely(!cg->rd_mem_detailed_pgfault))
+ cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.total_pgfault);
+
+ if(unlikely(!cg->rd_mem_detailed_pgmajfault))
+ cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.total_pgmajfault);
+
+ if(unlikely(!cg->rd_mem_detailed_pgpgin))
+ cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.total_pgpgin);
+
+ if(unlikely(!cg->rd_mem_detailed_pgpgout))
+ cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.total_pgpgout);
+ }
+
+ if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
+ if(unlikely(!cg->rd_mem_failcnt))
+ cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt);
+ }
+
+ if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) {
+ if(unlikely(!cg->rd_swap_usage))
+ cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes);
+ }
+
+ if(likely(do_io && cg->io_service_bytes.updated)) {
+ if(unlikely(!cg->rd_io_service_bytes_read))
+ cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read);
+
+ if(unlikely(!cg->rd_io_service_bytes_write))
+ cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write);
+ }
+
+ if(likely(do_io_ops && cg->io_serviced.updated)) {
+ if(unlikely(!cg->rd_io_serviced_read))
+ cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read);
+
+ if(unlikely(!cg->rd_io_serviced_write))
+ cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write);
+ }
+
+ if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
+ if(unlikely(!cg->rd_throttle_io_read))
+ cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read);
+
+ if(unlikely(!cg->rd_throttle_io_write))
+ cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write);
+ }
+
+ if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
+ if(unlikely(!cg->rd_throttle_io_serviced_read))
+ cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read);
+
+ if(unlikely(!cg->rd_throttle_io_serviced_write))
+ cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write);
+ }
+
+ if(likely(do_queued_ops && cg->io_queued.updated)) {
+ if(unlikely(!cg->rd_io_queued_read))
+ cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read);
+
+ if(unlikely(!cg->rd_io_queued_write))
+ cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write);
+ }
+
+ if(likely(do_merged_ops && cg->io_merged.updated)) {
+ if(unlikely(!cg->rd_io_merged_read))
+ cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read);
+
+ if(unlikely(!cg->rd_io_merged_write))
+ cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write);
+ }
+ }
+
+ // complete the iteration
+ if(likely(do_cpu))
+ rrdset_done(st_cpu);
+
+ if(likely(do_mem_usage))
+ rrdset_done(st_mem_usage);
+
+ if(unlikely(do_mem_detailed)) {
+ rrdset_done(st_mem_detailed_cache);
+ rrdset_done(st_mem_detailed_rss);
+ rrdset_done(st_mem_detailed_mapped);
+ rrdset_done(st_mem_detailed_writeback);
+ rrdset_done(st_mem_detailed_pgfault);
+ rrdset_done(st_mem_detailed_pgmajfault);
+ rrdset_done(st_mem_detailed_pgpgin);
+ rrdset_done(st_mem_detailed_pgpgout);
+ }
+
+ if(likely(do_mem_failcnt))
+ rrdset_done(st_mem_failcnt);
+
+ if(likely(do_swap_usage))
+ rrdset_done(st_swap_usage);
+
+ if(likely(do_io)) {
+ rrdset_done(st_io_read);
+ rrdset_done(st_io_write);
+ }
+
+ if(likely(do_io_ops)) {
+ rrdset_done(st_io_serviced_read);
+ rrdset_done(st_io_serviced_write);
+ }
+
+ if(likely(do_throttle_io)) {
+ rrdset_done(st_throttle_io_read);
+ rrdset_done(st_throttle_io_write);
+ }
+
+ if(likely(do_throttle_ops)) {
+ rrdset_done(st_throttle_ops_read);
+ rrdset_done(st_throttle_ops_write);
+ }
+
+ if(likely(do_queued_ops)) {
+ rrdset_done(st_queued_ops_read);
+ rrdset_done(st_queued_ops_write);
+ }
+
+ if(likely(do_merged_ops)) {
+ rrdset_done(st_merged_ops_read);
+ rrdset_done(st_merged_ops_write);
+ }
+}
+
+static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) {
+ if(buffer[0]) return buffer;
+
+ if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0'))
+ strncpy(buffer, "cgroup_root", len);
+ else
+ snprintfz(buffer, len, "cgroup_%s", id);
+
+ netdata_fix_chart_id(buffer);
+ return buffer;
+}
+
+static inline unsigned long long cpuset_str2ull(char **s) {
+ unsigned long long n = 0;
+ char c;
+ for(c = **s; c >= '0' && c <= '9' ; c = *(++*s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) {
+ if(*filename) {
+ int ret = -1;
+
+ if(value == &cg->cpuset_cpus) {
+ static char *buf = NULL;
+ static size_t buf_size = 0;
+
+ if(!buf) {
+ buf_size = 100U + 6 * get_system_cpus(); // taken from kernel/cgroup/cpuset.c
+ buf = mallocz(buf_size + 1);
+ }
+
+ ret = read_file(*filename, buf, buf_size);
+
+ if(!ret) {
+ char *s = buf;
+ unsigned long long ncpus = 0;
+
+ // parse the cpuset string and calculate the number of cpus the cgroup is allowed to use
+ while(*s) {
+ unsigned long long n = cpuset_str2ull(&s);
+ if(*s == ',') {
+ s++;
+ ncpus++;
+ continue;
+ }
+ if(*s == '-') {
+ s++;
+ unsigned long long m = cpuset_str2ull(&s);
+ ncpus += m - n + 1; // calculate the number of cpus in the region
+ }
+ s++;
+ }
+
+ if(likely(ncpus)) *value = ncpus;
+ }
+ }
+ else if(value == &cg->cpu_cfs_period) {
+ ret = read_single_number_file(*filename, value);
+ }
+ else if(value == &cg->cpu_cfs_quota) {
+ ret = read_single_number_file(*filename, value);
+ }
+ else ret = -1;
+
+ if(ret) {
+ error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
+ freez(*filename);
+ *filename = NULL;
+ }
+ }
+}
+
+static inline void update_cpu_limits2(struct cgroup *cg) {
+ if(cg->filename_cpu_cfs_quota){
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, cg->filename_cpu_cfs_quota, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ goto cpu_limits2_err;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ goto cpu_limits2_err;
+ }
+
+ unsigned long lines = procfile_lines(ff);
+
+ if (unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1 lines.", cg->filename_cpu_cfs_quota);
+ return;
+ }
+
+ cg->cpu_cfs_period = str2ull(procfile_lineword(ff, 0, 1));
+ cg->cpuset_cpus = get_system_cpus();
+
+ char *s = "max\n\0";
+ if(strsame(s, procfile_lineword(ff, 0, 0)) == 0){
+ cg->cpu_cfs_quota = cg->cpu_cfs_period * cg->cpuset_cpus;
+ } else {
+ cg->cpu_cfs_quota = str2ull(procfile_lineword(ff, 0, 0));
+ }
+ debug(D_CGROUP, "CPU limits values: %llu %llu %llu", cg->cpu_cfs_period, cg->cpuset_cpus, cg->cpu_cfs_quota);
+ return;
+
+cpu_limits2_err:
+ error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, cg->filename_cpu_cfs_quota);
+ freez(cg->filename_cpu_cfs_quota);
+ cg->filename_cpu_cfs_quota = NULL;
+
+ }
+}
+
+static inline int update_memory_limits(char **filename, RRDSETVAR **chart_var, unsigned long long *value, const char *chart_var_name, struct cgroup *cg) {
+ if(*filename) {
+ if(unlikely(!*chart_var)) {
+ *chart_var = rrdsetvar_custom_chart_variable_create(cg->st_mem_usage, chart_var_name);
+ if(!*chart_var) {
+ error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, chart_var_name);
+ freez(*filename);
+ *filename = NULL;
+ }
+ }
+
+ if(*filename && *chart_var) {
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ if(read_single_number_file(*filename, value)) {
+ error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
+ freez(*filename);
+ *filename = NULL;
+ }
+ else {
+ rrdsetvar_custom_chart_variable_set(*chart_var, (calculated_number)(*value / (1024 * 1024)));
+ return 1;
+ }
+ } else {
+ char buffer[30 + 1];
+ int ret = read_file(*filename, buffer, 30);
+ if(ret) {
+ error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
+ freez(*filename);
+ *filename = NULL;
+ return 0;
+ }
+ char *s = "max\n\0";
+ if(strsame(s, buffer) == 0){
+ *value = UINT64_MAX;
+ rrdsetvar_custom_chart_variable_set(*chart_var, (calculated_number)(*value / (1024 * 1024)));
+ return 1;
+ }
+ *value = str2ull(buffer);
+ rrdsetvar_custom_chart_variable_set(*chart_var, (calculated_number)(*value / (1024 * 1024)));
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+void update_cgroup_charts(int update_every) {
+ debug(D_CGROUP, "updating cgroups charts");
+
+ char type[RRD_ID_LENGTH_MAX + 1];
+ char title[CHART_TITLE_MAX + 1];
+
+ int services_do_cpu = 0,
+ services_do_mem_usage = 0,
+ services_do_mem_detailed = 0,
+ services_do_mem_failcnt = 0,
+ services_do_swap_usage = 0,
+ services_do_io = 0,
+ services_do_io_ops = 0,
+ services_do_throttle_io = 0,
+ services_do_throttle_ops = 0,
+ services_do_queued_ops = 0,
+ services_do_merged_ops = 0;
+
+ struct cgroup *cg;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->enabled || cg->pending_renames))
+ continue;
+
+ if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) {
+ if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++;
+
+ if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++;
+ if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++;
+ if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++;
+ if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++;
+
+ if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++;
+ if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++;
+ if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++;
+ if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++;
+ if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++;
+ if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++;
+ continue;
+ }
+
+ type[0] = '\0';
+
+ if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_cpu)) {
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (100%% = 1 core)");
+
+ cg->st_cpu = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_update_labels(cg->st_cpu, cg->chart_labels);
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else {
+ rrddim_add(cg->st_cpu, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+ else
+ rrdset_next(cg->st_cpu);
+
+ rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
+ rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
+ rrdset_done(cg->st_cpu);
+
+ if(likely(cg->filename_cpuset_cpus || cg->filename_cpu_cfs_period || cg->filename_cpu_cfs_quota)) {
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ update_cpu_limits(&cg->filename_cpuset_cpus, &cg->cpuset_cpus, cg);
+ update_cpu_limits(&cg->filename_cpu_cfs_period, &cg->cpu_cfs_period, cg);
+ update_cpu_limits(&cg->filename_cpu_cfs_quota, &cg->cpu_cfs_quota, cg);
+ } else {
+ update_cpu_limits2(cg);
+ }
+
+ if(unlikely(!cg->chart_var_cpu_limit)) {
+ cg->chart_var_cpu_limit = rrdsetvar_custom_chart_variable_create(cg->st_cpu, "cpu_limit");
+ if(!cg->chart_var_cpu_limit) {
+ error("Cannot create cgroup %s chart variable 'cpu_limit'. Will not update its limit anymore.", cg->id);
+ if(cg->filename_cpuset_cpus) freez(cg->filename_cpuset_cpus);
+ cg->filename_cpuset_cpus = NULL;
+ if(cg->filename_cpu_cfs_period) freez(cg->filename_cpu_cfs_period);
+ cg->filename_cpu_cfs_period = NULL;
+ if(cg->filename_cpu_cfs_quota) freez(cg->filename_cpu_cfs_quota);
+ cg->filename_cpu_cfs_quota = NULL;
+ }
+ }
+ else {
+ calculated_number value = 0, quota = 0;
+
+ if(likely( ((!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) && (cg->filename_cpuset_cpus || (cg->filename_cpu_cfs_period && cg->filename_cpu_cfs_quota)))
+ || ((cg->options & CGROUP_OPTIONS_IS_UNIFIED) && cg->filename_cpu_cfs_quota))) {
+ if(unlikely(cg->cpu_cfs_quota > 0))
+ quota = (calculated_number)cg->cpu_cfs_quota / (calculated_number)cg->cpu_cfs_period;
+
+ if(unlikely(quota > 0 && quota < cg->cpuset_cpus))
+ value = quota * 100;
+ else
+ value = (calculated_number)cg->cpuset_cpus * 100;
+ }
+ if(likely(value)) {
+ rrdsetvar_custom_chart_variable_set(cg->chart_var_cpu_limit, value);
+
+ if(unlikely(!cg->st_cpu_limit)) {
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits");
+
+ cg->st_cpu_limit = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu_limit"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu_limit"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority - 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_cpu_limit, cg->chart_labels);
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED))
+ rrddim_add(cg->st_cpu_limit, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE);
+ else
+ rrddim_add(cg->st_cpu_limit, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_cpu_limit);
+
+ calculated_number cpu_usage = 0;
+ cpu_usage = (calculated_number)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
+ calculated_number cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (value * update_every);
+
+ rrdset_isnot_obsolete(cg->st_cpu_limit);
+
+ rrddim_set(cg->st_cpu_limit, "used", (cpu_used > 0)?cpu_used:0);
+
+ cg->prev_cpu_usage = cpu_usage;
+
+ rrdset_done(cg->st_cpu_limit);
+ }
+ else {
+ rrdsetvar_custom_chart_variable_set(cg->chart_var_cpu_limit, NAN);
+ if(unlikely(cg->st_cpu_limit)) {
+ rrdset_is_obsolete(cg->st_cpu_limit);
+ cg->st_cpu_limit = NULL;
+ }
+ }
+ }
+ }
+ }
+
+ if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ unsigned int i;
+
+ if(unlikely(!cg->st_cpu_per_core)) {
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (100%% = 1 core) Per Core");
+
+ cg->st_cpu_per_core = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu_per_core"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu_per_core"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_update_labels(cg->st_cpu_per_core, cg->chart_labels);
+
+ for(i = 0; i < cg->cpuacct_usage.cpus; i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+ else
+ rrdset_next(cg->st_cpu_per_core);
+
+ for(i = 0; i < cg->cpuacct_usage.cpus ;i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]);
+ }
+ rrdset_done(cg->st_cpu_per_core);
+ }
+
+ if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Usage");
+
+ cg->st_mem = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem"
+ , NULL
+ , "mem"
+ , "cgroup.mem"
+ , title
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 210
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_update_labels(cg->st_mem, cg->chart_labels);
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cg->memory.detailed_has_swap)
+ rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrddim_add(cg->st_mem, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ else
+ rrdset_next(cg->st_mem);
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_set(cg->st_mem, "cache", cg->memory.total_cache);
+ rrddim_set(cg->st_mem, "rss", (cg->memory.total_rss > cg->memory.total_rss_huge)?(cg->memory.total_rss - cg->memory.total_rss_huge):0);
+
+ if(cg->memory.detailed_has_swap)
+ rrddim_set(cg->st_mem, "swap", cg->memory.total_swap);
+
+ rrddim_set(cg->st_mem, "rss_huge", cg->memory.total_rss_huge);
+ rrddim_set(cg->st_mem, "mapped_file", cg->memory.total_mapped_file);
+ } else {
+ rrddim_set(cg->st_mem, "anon", cg->memory.anon);
+ rrddim_set(cg->st_mem, "kernel_stack", cg->memory.kernel_stack);
+ rrddim_set(cg->st_mem, "slab", cg->memory.slab);
+ rrddim_set(cg->st_mem, "sock", cg->memory.sock);
+ rrddim_set(cg->st_mem, "anon_thp", cg->memory.anon_thp);
+ rrddim_set(cg->st_mem, "file", cg->memory.total_mapped_file);
+ }
+ rrdset_done(cg->st_mem);
+
+ if(unlikely(!cg->st_writeback)) {
+ snprintfz(title, CHART_TITLE_MAX, "Writeback Memory");
+
+ cg->st_writeback = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "writeback"
+ , NULL
+ , "mem"
+ , "cgroup.writeback"
+ , title
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 300
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_update_labels(cg->st_writeback, cg->chart_labels);
+
+ if(cg->memory.detailed_has_dirty)
+ rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_writeback);
+
+ if(cg->memory.detailed_has_dirty)
+ rrddim_set(cg->st_writeback, "dirty", cg->memory.total_dirty);
+
+ rrddim_set(cg->st_writeback, "writeback", cg->memory.total_writeback);
+ rrdset_done(cg->st_writeback);
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ if(unlikely(!cg->st_mem_activity)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Activity");
+
+ cg->st_mem_activity = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_activity"
+ , NULL
+ , "mem"
+ , "cgroup.mem_activity"
+ , title
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_mem_activity, cg->chart_labels);
+
+ rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_mem_activity);
+
+ rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.total_pgpgin);
+ rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.total_pgpgout);
+ rrdset_done(cg->st_mem_activity);
+ }
+
+ if(unlikely(!cg->st_pgfaults)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults");
+
+ cg->st_pgfaults = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "pgfaults"
+ , NULL
+ , "mem"
+ , "cgroup.pgfaults"
+ , title
+ , "MiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_pgfaults, cg->chart_labels);
+
+ rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_pgfaults);
+
+ rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.total_pgfault);
+ rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.total_pgmajfault);
+ rrdset_done(cg->st_pgfaults);
+ }
+
+ if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem_usage)) {
+ snprintfz(title, CHART_TITLE_MAX, "Used Memory %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"");
+
+ cg->st_mem_usage = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_usage"
+ , NULL
+ , "mem"
+ , "cgroup.mem_usage"
+ , title
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_update_labels(cg->st_mem_usage, cg->chart_labels);
+
+ rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_mem_usage);
+
+ rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.total_cache:0));
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_set(cg->st_mem_usage, "swap", (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes)?cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes:0);
+ } else {
+ rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes);
+ }
+ rrdset_done(cg->st_mem_usage);
+
+ if (likely(update_memory_limits(&cg->filename_memory_limit, &cg->chart_var_memory_limit, &cg->memory_limit, "memory_limit", cg))) {
+ static unsigned long long ram_total = 0;
+
+ if(unlikely(!ram_total)) {
+ procfile *ff = NULL;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
+ ff = procfile_open(config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+
+ if(likely(ff))
+ ff = procfile_readall(ff);
+ if(likely(ff && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8)))
+ ram_total = str2ull(procfile_word(ff, 1)) * 1024;
+ else {
+ error("Cannot read file %s. Will not update cgroup %s RAM limit anymore.", filename, cg->id);
+ freez(cg->filename_memory_limit);
+ cg->filename_memory_limit = NULL;
+ }
+
+ procfile_close(ff);
+ }
+
+ if(likely(ram_total)) {
+ unsigned long long memory_limit = ram_total;
+
+ if(unlikely(cg->memory_limit < ram_total))
+ memory_limit = cg->memory_limit;
+
+ if(unlikely(!cg->st_mem_usage_limit)) {
+ snprintfz(title, CHART_TITLE_MAX, "Used RAM without Cache within the limits");
+
+ cg->st_mem_usage_limit = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_usage_limit"
+ , NULL
+ , "mem"
+ , "cgroup.mem_usage_limit"
+ , title
+ , "MiB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 199
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_update_labels(cg->st_mem_usage_limit, cg->chart_labels);
+
+ rrddim_add(cg->st_mem_usage_limit, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem_usage_limit, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_mem_usage_limit);
+
+ rrdset_isnot_obsolete(cg->st_mem_usage_limit);
+
+ rrddim_set(cg->st_mem_usage_limit, "available", memory_limit - (cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.total_cache:0)));
+ rrddim_set(cg->st_mem_usage_limit, "used", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.total_cache:0));
+ rrdset_done(cg->st_mem_usage_limit);
+ }
+ }
+ else {
+ if(unlikely(cg->st_mem_usage_limit)) {
+ rrdset_is_obsolete(cg->st_mem_usage_limit);
+ cg->st_mem_usage_limit = NULL;
+ }
+ }
+
+ update_memory_limits(&cg->filename_memoryswap_limit, &cg->chart_var_memoryswap_limit, &cg->memoryswap_limit, "memory_and_swap_limit", cg);
+ }
+
+ if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem_failcnt)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures");
+
+ cg->st_mem_failcnt = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_failcnt"
+ , NULL
+ , "mem"
+ , "cgroup.mem_failcnt"
+ , title
+ , "count"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 250
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_mem_failcnt, cg->chart_labels);
+
+ rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_mem_failcnt);
+
+ rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt);
+ rrdset_done(cg->st_mem_failcnt);
+ }
+
+ if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_io)) {
+ snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks)");
+
+ cg->st_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io"
+ , NULL
+ , "disk"
+ , "cgroup.io"
+ , title
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 1200
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_update_labels(cg->st_io, cg->chart_labels);
+
+ rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_io);
+
+ rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
+ rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
+ rrdset_done(cg->st_io);
+ }
+
+ if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_serviced_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks)");
+
+ cg->st_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "serviced_ops"
+ , NULL
+ , "disk"
+ , "cgroup.serviced_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 1200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_serviced_ops, cg->chart_labels);
+
+ rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_serviced_ops);
+
+ rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
+ rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
+ rrdset_done(cg->st_serviced_ops);
+ }
+
+ if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_throttle_io)) {
+ snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks)");
+
+ cg->st_throttle_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "throttle_io"
+ , NULL
+ , "disk"
+ , "cgroup.throttle_io"
+ , title
+ , "KiB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 1200
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_update_labels(cg->st_throttle_io, cg->chart_labels);
+
+ rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_throttle_io);
+
+ rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
+ rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
+ rrdset_done(cg->st_throttle_io);
+ }
+
+ if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_throttle_serviced_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks)");
+
+ cg->st_throttle_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "throttle_serviced_ops"
+ , NULL
+ , "disk"
+ , "cgroup.throttle_serviced_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 1200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_throttle_serviced_ops, cg->chart_labels);
+
+ rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_throttle_serviced_ops);
+
+ rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
+ rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
+ rrdset_done(cg->st_throttle_serviced_ops);
+ }
+
+ if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_queued_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks)");
+
+ cg->st_queued_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "queued_ops"
+ , NULL
+ , "disk"
+ , "cgroup.queued_ops"
+ , title
+ , "operations"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_queued_ops, cg->chart_labels);
+
+ rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_queued_ops);
+
+ rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
+ rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
+ rrdset_done(cg->st_queued_ops);
+ }
+
+ if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_merged_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks)");
+
+ cg->st_merged_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "merged_ops"
+ , NULL
+ , "disk"
+ , "cgroup.merged_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(cg->st_merged_ops, cg->chart_labels);
+
+ rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_merged_ops);
+
+ rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
+ rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
+ rrdset_done(cg->st_merged_ops);
+ }
+
+ if (cg->options & CGROUP_OPTIONS_IS_UNIFIED) {
+ struct pressure *res = &cg->cpu_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "CPU pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu_pressure"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ res = &cg->memory_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "Memory pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_pressure"
+ , NULL
+ , "mem"
+ , "cgroup.memory_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2300
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ if (likely(res->updated && res->full.enabled)) {
+ if (unlikely(!res->full.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "Memory full pressure");
+
+ chart = res->full.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_full_pressure"
+ , NULL
+ , "mem"
+ , "cgroup.memory_full_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2350
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->full.st, cg->chart_labels);
+
+ res->full.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->full.st);
+ }
+
+ update_pressure_chart(&res->full);
+ }
+
+ res = &cg->io_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "I/O pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io_pressure"
+ , NULL
+ , "disk"
+ , "cgroup.io_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ if (likely(res->updated && res->full.enabled)) {
+ if (unlikely(!res->full.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "I/O full pressure");
+
+ chart = res->full.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io_full_pressure"
+ , NULL
+ , "disk"
+ , "cgroup.io_full_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2450
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->full.st, cg->chart_labels);
+
+ res->full.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->full.st);
+ }
+
+ update_pressure_chart(&res->full);
+ }
+ }
+ }
+
+ if(likely(cgroup_enable_systemd_services))
+ update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed
+ , services_do_mem_failcnt, services_do_swap_usage, services_do_io
+ , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops
+ , services_do_queued_ops, services_do_merged_ops
+ );
+
+ debug(D_CGROUP, "done updating cgroups charts");
+}
+
+// ----------------------------------------------------------------------------
+// cgroups main
+
+static void cgroup_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ if (!discovery_thread.exited) {
+ info("stopping discovery thread worker");
+ uv_mutex_lock(&discovery_thread.mutex);
+ discovery_thread.start_discovery = 1;
+ uv_cond_signal(&discovery_thread.cond_var);
+ uv_mutex_unlock(&discovery_thread.mutex);
+ }
+
+ while (!discovery_thread.exited && max > 0) {
+ max -= step;
+ info("waiting for discovery thread to finish...");
+ sleep_usec(step);
+ }
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *cgroups_main(void *ptr) {
+ netdata_thread_cleanup_push(cgroup_main_cleanup, ptr);
+
+ struct rusage thread;
+
+ // when ZERO, attempt to do it
+ int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1);
+
+ read_cgroup_plugin_configuration();
+
+ RRDSET *stcpu_thread = NULL;
+
+ if (uv_mutex_init(&cgroup_root_mutex)) {
+ error("CGROUP: cannot initialize mutex for the main cgroup list");
+ goto exit;
+ }
+
+ // dispatch a discovery worker thread
+ discovery_thread.start_discovery = 0;
+ discovery_thread.exited = 0;
+
+ if (uv_mutex_init(&discovery_thread.mutex)) {
+ error("CGROUP: cannot initialize mutex for discovery thread");
+ goto exit;
+ }
+ if (uv_cond_init(&discovery_thread.cond_var)) {
+ error("CGROUP: cannot initialize conditional variable for discovery thread");
+ goto exit;
+ }
+
+ int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL);
+ if (error) {
+ error("CGROUP: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
+ goto exit;
+ }
+ uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]");
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = cgroup_update_every * USEC_PER_SEC;
+ usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ if(unlikely(netdata_exit)) break;
+
+ find_dt += hb_dt;
+ if(unlikely(find_dt >= find_every || cgroups_check)) {
+ uv_cond_signal(&discovery_thread.cond_var);
+ discovery_thread.start_discovery = 1;
+ find_dt = 0;
+ cgroups_check = 0;
+ }
+
+ uv_mutex_lock(&cgroup_root_mutex);
+ read_all_cgroups(cgroup_root);
+ update_cgroup_charts(cgroup_update_every);
+ uv_mutex_unlock(&cgroup_root_mutex);
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(!stcpu_thread)) {
+
+ stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_cgroups_cpu"
+ , NULL
+ , "cgroups"
+ , NULL
+ , "NetData CGroups Plugin CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_CGROUPS_NAME
+ , "stats"
+ , 132000
+ , cgroup_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(stcpu_thread);
+
+ rrddim_set(stcpu_thread, "user" , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set(stcpu_thread, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+ }
+ }
+
+exit:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
new file mode 100644
index 0000000..155330f
--- /dev/null
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SYS_FS_CGROUP_H
+#define NETDATA_SYS_FS_CGROUP_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS \
+ { \
+ .name = "PLUGIN[cgroups]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "cgroups", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cgroups_main \
+ },
+
+extern void *cgroups_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS
+
+#endif // (TARGET_OS == OS_LINUX)
+
+extern char *parse_k8s_data(struct label **labels, char *data);
+
+#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
new file mode 100644
index 0000000..be8ea2c
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_cgroups_plugin.h"
+#include "libnetdata/required_dummies.h"
+
+RRDHOST *localhost;
+int netdata_zero_metrics_enabled = 1;
+struct config netdata_config;
+char *netdata_configured_primary_plugins_dir = NULL;
+
+static void test_parse_k8s_data(void **state)
+{
+ UNUSED(state);
+
+ struct label *labels = (struct label *)0xff;
+
+ struct k8s_test_data {
+ char *data;
+ char *name;
+ char *key[3];
+ char *value[3];
+ };
+
+ struct k8s_test_data test_data[] = {
+ // One label
+ { .data = "name label1=\"value1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1" },
+
+ // Three labels
+ { .data = "name label1=\"value1\",label2=\"value2\",label3=\"value3\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1",
+ .key[1] = "label2", .value[1] = "value2",
+ .key[2] = "label3", .value[2] = "value3" },
+
+ // Comma at the end of the data string
+ { .data = "name label1=\"value1\",",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1" },
+
+ // Equals sign in the value
+ { .data = "name label1=\"value=1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value=1" },
+
+ // Double quotation mark in the value
+ { .data = "name label1=\"value\"1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value" },
+
+ // Escaped double quotation mark in the value
+ { .data = "name label1=\"value\\\"1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value\\\"1" },
+
+ // Equals sign in the key
+ { .data = "name label=1=\"value1\"",
+ .name = "name",
+ .key[0] = "label", .value[0] = "1=\"value1\"" },
+
+ // Skipped value
+ { .data = "name label1=,label2=\"value2\"",
+ .name = "name",
+ .key[0] = "label2", .value[0] = "value2" },
+
+ // A pair of equals signs
+ { .data = "name= =",
+ .name = "name=" },
+
+ // A pair of commas
+ { .data = "name, ,",
+ .name = "name," },
+
+ { .data = NULL }
+ };
+
+ for (int i = 0; test_data[i].data != NULL; i++) {
+ char *data = strdup(test_data[i].data);
+
+ for (int l = 0; l < 3 && test_data[i].key[l] != NULL; l++) {
+ char *key = test_data[i].key[l];
+ char *value = test_data[i].value[l];
+
+ expect_function_call(__wrap_add_label_to_list);
+ expect_value(__wrap_add_label_to_list, l, 0xff);
+ expect_string(__wrap_add_label_to_list, key, key);
+ expect_string(__wrap_add_label_to_list, value, value);
+ expect_value(__wrap_add_label_to_list, label_source, LABEL_SOURCE_KUBERNETES);
+ }
+
+ char *name = parse_k8s_data(&labels, data);
+
+ assert_string_equal(name, test_data[i].name);
+ assert_ptr_equal(labels, 0xff);
+
+ free(data);
+ }
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(test_parse_k8s_data),
+ };
+
+ int test_res = cmocka_run_group_tests_name("test_parse_k8s_data", tests, NULL, NULL);
+
+ return test_res;
+}
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.h b/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
new file mode 100644
index 0000000..3d68e92
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef TEST_CGROUPS_PLUGIN_H
+#define TEST_CGROUPS_PLUGIN_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#include "../sys_fs_cgroup.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <stdint.h>
+#include <cmocka.h>
+
+#endif /* TEST_CGROUPS_PLUGIN_H */
diff --git a/collectors/cgroups.plugin/tests/test_doubles.c b/collectors/cgroups.plugin/tests/test_doubles.c
new file mode 100644
index 0000000..5572fb2
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_doubles.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_cgroups_plugin.h"
+
+void rrdset_is_obsolete(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+void rrdset_isnot_obsolete(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+struct mountinfo *mountinfo_read(int do_statvfs)
+{
+ UNUSED(do_statvfs);
+
+ return NULL;
+}
+
+struct mountinfo *
+mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source)
+{
+ UNUSED(root);
+ UNUSED(filesystem);
+ UNUSED(mount_source);
+
+ return NULL;
+}
+
+struct mountinfo *
+mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options)
+{
+ UNUSED(root);
+ UNUSED(filesystem);
+ UNUSED(super_options);
+
+ return NULL;
+}
+
+void mountinfo_free_all(struct mountinfo *mi)
+{
+ UNUSED(mi);
+}
+
+struct label *__wrap_add_label_to_list(struct label *l, char *key, char *value, LABEL_SOURCE label_source)
+{
+ function_called();
+ check_expected_ptr(l);
+ check_expected_ptr(key);
+ check_expected_ptr(value);
+ check_expected(label_source);
+ return l;
+}
+
+void rrdset_update_labels(RRDSET *st, struct label *labels)
+{
+ UNUSED(st);
+ UNUSED(labels);
+}
+
+RRDSET *rrdset_create_custom(
+ RRDHOST *host, const char *type, const char *id, const char *name, const char *family, const char *context,
+ const char *title, const char *units, const char *plugin, const char *module, long priority, int update_every,
+ RRDSET_TYPE chart_type, RRD_MEMORY_MODE memory_mode, long history_entries)
+{
+ UNUSED(host);
+ UNUSED(type);
+ UNUSED(id);
+ UNUSED(name);
+ UNUSED(family);
+ UNUSED(context);
+ UNUSED(title);
+ UNUSED(units);
+ UNUSED(plugin);
+ UNUSED(module);
+ UNUSED(priority);
+ UNUSED(update_every);
+ UNUSED(chart_type);
+ UNUSED(memory_mode);
+ UNUSED(history_entries);
+
+ return NULL;
+}
+
+RRDDIM *rrddim_add_custom(
+ RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor,
+ RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode)
+{
+ UNUSED(st);
+ UNUSED(id);
+ UNUSED(name);
+ UNUSED(multiplier);
+ UNUSED(divisor);
+ UNUSED(algorithm);
+ UNUSED(memory_mode);
+
+ return NULL;
+}
+
+collected_number rrddim_set(RRDSET *st, const char *id, collected_number value)
+{
+ UNUSED(st);
+ UNUSED(id);
+ UNUSED(value);
+
+ return 0;
+}
+
+collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
+{
+ UNUSED(st);
+ UNUSED(rd);
+ UNUSED(value);
+
+ return 0;
+}
+
+RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name)
+{
+ UNUSED(st);
+ UNUSED(name);
+
+ return NULL;
+}
+
+void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rs, calculated_number value)
+{
+ UNUSED(rs);
+ UNUSED(value);
+}
+
+void rrdset_next_usec(RRDSET *st, usec_t microseconds)
+{
+ UNUSED(st);
+ UNUSED(microseconds);
+}
+
+void rrdset_done(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+void update_pressure_chart(struct pressure_chart *chart)
+{
+ UNUSED(chart);
+}
+
+void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels)
+{
+ UNUSED(host_device);
+ UNUSED(container_device);
+ UNUSED(container_name);
+ UNUSED(labels);
+}
+
+void netdev_rename_device_del(const char *host_device)
+{
+ UNUSED(host_device);
+}
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
new file mode 100644
index 0000000..03c7f0a
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ charts.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+dist_charts_DATA = \
+ $(NULL)
+
+userchartsconfigdir=$(configdir)/charts.d
+dist_userchartsconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(userchartsconfigdir)
+
+chartsconfigdir=$(libconfigdir)/charts.d
+dist_chartsconfig_DATA = \
+ $(NULL)
+
+include ap/Makefile.inc
+include apcupsd/Makefile.inc
+include example/Makefile.inc
+include libreswan/Makefile.inc
+include nut/Makefile.inc
+include opensips/Makefile.inc
+include sensors/Makefile.inc
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
new file mode 100644
index 0000000..4a7911a
--- /dev/null
+++ b/collectors/charts.d.plugin/README.md
@@ -0,0 +1,198 @@
+<!--
+title: "charts.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/README.md
+-->
+
+# charts.d.plugin
+
+`charts.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+
+`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
+memory, collecting data with as little overheads as possible
+(i.e. initialize once, repeatedly collect values with minimal overhead).
+
+`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
+The scripts should have the filename suffix: `.chart.sh`.
+
+## Configuration
+
+`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf`
+(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script.
+
+In this file, you can place statements like this:
+
+```
+enable_all_charts="yes"
+X="yes"
+Y="no"
+```
+
+where `X` and `Y` are the names of individual charts.d collector scripts.
+When set to `yes`, charts.d will evaluate the collector script (see below).
+When set to `no`, charts.d will ignore the collector script.
+
+The variable `enable_all_charts` sets the default enable/disable state for all charts.
+
+## A charts.d module
+
+A `charts.d.plugin` module is a BASH script defining a few functions.
+
+For a module called `X`, the following criteria must be met:
+
+1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
+
+2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
+ The configuration file `X.conf` is also a BASH script itself.
+ To edit the default files supplied by Netdata, run `/etc/netdata/edit-config charts.d/X.conf`,
+ where `X` is the name of the module.
+
+3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
+
+4. The following functions must be defined:
+
+ - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
+ (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
+ the collector cannot be used).
+
+ - `X_create()` - creates the Netdata charts, following the standard Netdata plugin guides as described in
+ **[External Plugins](/collectors/plugins.d/README.md)** (commands `CHART` and `DIMENSION`).
+ The return value does matter: 0 = OK, 1 = FAILED.
+
+ - `X_update()` - collects the values for the defined charts, following the standard Netdata plugin guides
+ as described in **[External Plugins](/collectors/plugins.d/README.md)** (commands `BEGIN`, `SET`, `END`).
+ The return value also matters: 0 = OK, 1 = FAILED.
+
+5. The following global variables are available to be set:
+ - `X_update_every` - is the data collection frequency for the module script, in seconds.
+
+The module script may use more functions or variables. But all of them must begin with `X_`.
+
+The standard Netdata plugin variables are also available (check **[External Plugins](/collectors/plugins.d/README.md)**).
+
+### X_check()
+
+The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config).
+
+For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to
+connect to a local mysql database to find out if it can read the values it needs.
+
+`X_check()` is run only once for the lifetime of the module.
+
+### X_create()
+
+The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard Netdata
+plugin guides (**[External Plugins](/collectors/plugins.d/README.md)**).
+
+`X_create()` will be called just once and only after `X_check()` was successful.
+You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
+
+A non-zero return value will disable the collector.
+
+### X_update()
+
+`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to Netdata,
+following the Netdata plugin guides (**[External Plugins](/collectors/plugins.d/README.md)**).
+
+The function will be called with one parameter: microseconds since the last time it was run. This value should be
+appended to the `BEGIN` statement of every chart updated by the collector script.
+
+A non-zero return value will disable the collector.
+
+### Useful functions charts.d provides
+
+Module scripts can use the following charts.d functions:
+
+#### require_cmd command
+
+`require_cmd()` will check if a command is available in the running system.
+
+For example, your `X_check()` function may use it like this:
+
+```sh
+mysql_check() {
+ require_cmd mysql || return 1
+ return 0
+}
+```
+
+Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled.
+
+#### fixid "string"
+
+`fixid()` will get a string and return a properly formatted id for a chart or dimension.
+
+This is an expensive function that should not be used in `X_update()`.
+You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
+
+```sh
+declare -A X_ids=()
+X_create() {
+ local name="a very bad name for id"
+
+ X_ids[$name]="$(fixid "$name")"
+}
+
+X_update() {
+ local microseconds="$1"
+
+ ...
+ local name="a very bad name for id"
+ ...
+
+ echo "BEGIN ${X_ids[$name]} $microseconds"
+ ...
+}
+```
+
+### Debugging your collectors
+
+You can run `charts.d.plugin` by hand with something like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z
+```
+
+Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts
+`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all
+module scripts available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running
+`charts.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+Also, remember that Netdata runs `chart.d.plugin` as user `netdata` (or any other user the `netdata` process is configured to run as).
+
+## Running multiple instances of charts.d.plugin
+
+`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X`
+will also delay the collection of `Y` and `Z`.
+
+You can have multiple `charts.d.plugin` running to overcome this problem.
+
+This is what you need to do:
+
+1. Decide a new name for the new charts.d instance: example `charts2.d`.
+
+2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
+ module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
+ modules for each.
+
+3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
+ Netdata will spawn a new charts.d process.
+
+Execute the above in this order, since Netdata will (by default) attempt to start new plugins soon after they are
+created in `/usr/libexec/netdata/plugins.d/`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/ap/Makefile.inc b/collectors/charts.d.plugin/ap/Makefile.inc
new file mode 100644
index 0000000..a2dd375
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += ap/ap.chart.sh
+dist_chartsconfig_DATA += ap/ap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ap/README.md ap/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
new file mode 100644
index 0000000..35a00d6
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -0,0 +1,99 @@
+<!--
+title: "Access point monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/ap/README.md
+sidebar_label: "Access points"
+-->
+
+# Access point monitoring with Netdata
+
+The `ap` collector visualizes data related to access points.
+
+## Example Netdata charts
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png)
+
+## How it works
+
+It does the following:
+
+1. Runs `iw dev` searching for interfaces that have `type AP`.
+
+ From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+
+ Example:
+
+```sh
+# iw dev
+phy#0
+ Interface wlan0
+ ifindex 3
+ wdev 0x1
+ addr 7c:dd:90:77:34:2a
+ ssid TSAOUSIS
+ type AP
+ channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz
+```
+
+2. For each interface found, it runs `iw INTERFACE station dump`.
+
+ From the output is collects:
+
+ - rx/tx bytes
+ - rx/tx packets
+ - tx retries
+ - tx failed
+ - signal strength
+ - rx/tx bitrate
+ - expected throughput
+
+ Example:
+
+```sh
+# iw wlan0 station dump
+Station 40:b8:37:5a:ed:5e (on wlan0)
+ inactive time: 910 ms
+ rx bytes: 15588897
+ rx packets: 127772
+ tx bytes: 52257763
+ tx packets: 95802
+ tx retries: 2162
+ tx failed: 28
+ signal: -43 dBm
+ signal avg: -43 dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ rx bitrate: 1.0 MBit/s
+ expected throughput: 32.125Mbps
+ authorized: yes
+ authenticated: yes
+ preamble: long
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+```
+
+3. For each interface found, it creates 6 charts:
+
+ - Number of Connected clients
+ - Bandwidth for all clients
+ - Packets for all clients
+ - Transmit Issues for all clients
+ - Average Signal among all clients
+ - Average Bitrate (including average expected throughput) among all clients
+
+## Configuration
+
+Edit the `charts.d/ap.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/ap.conf
+```
+
+You can only set `ap_update_every=NUMBER` to change the data collection frequency.
+
+## Auto-detection
+
+The plugin is able to auto-detect if you are running access points on your linux box.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
new file mode 100644
index 0000000..5dd7878
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
@@ -0,0 +1,179 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+ap_update_every=
+ap_priority=6900
+
+declare -A ap_devs=()
+
+# _check is called once, to find out if this chart should be enabled or not
+ap_check() {
+ require_cmd iw || return 1
+ local ev
+ ev=$(run iw dev | awk '
+ BEGIN {
+ i = "";
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+Interface / {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+
+ i = $2;
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+ssid / { ssid = $2; }
+ /^[ \t]+type AP$/ { ap = 1; }
+ END {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+ }
+ ')
+ eval "${ev}"
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ ${#ap_devs[@]} -gt 0 ] && return 0
+ error "no devices found in AP mode, with 'iw dev'"
+ return 1
+}
+
+# _create is called once, to create the charts
+ap_create() {
+ local ssid dev
+
+ for dev in "${!ap_devs[@]}"; do
+ ssid="${ap_devs[${dev}]}"
+
+ # create the chart with 3 dimensions
+ cat << EOF
+CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every
+DIMENSION clients '' absolute 1 1
+
+CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every
+DIMENSION received '' incremental 8 1024
+DIMENSION sent '' incremental -8 1024
+
+CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every
+DIMENSION received '' incremental 1 1
+DIMENSION sent '' incremental -1 1
+
+CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every
+DIMENSION retries 'tx retries' incremental 1 1
+DIMENSION failures 'tx failures' incremental -1 1
+
+CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every
+DIMENSION signal 'average signal' absolute 1 1000
+
+CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every
+DIMENSION receive '' absolute 1 1000
+DIMENSION transmit '' absolute -1 1000
+DIMENSION expected 'expected throughput' absolute 1 1000
+EOF
+ done
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+ap_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ for dev in "${!ap_devs[@]}"; do
+ echo
+ echo "DEVICE ${dev}"
+ iw "${dev}" station dump
+ done | awk '
+ function zero_data() {
+ dev = "";
+ c = 0;
+ rb = 0;
+ tb = 0;
+ rp = 0;
+ tp = 0;
+ tr = 0;
+ tf = 0;
+ tt = 0;
+ rt = 0;
+ s = 0;
+ g = 0;
+ e = 0;
+ }
+ function print_device() {
+ if(dev != "" && length(dev) > 0) {
+ print "BEGIN ap_clients." dev;
+ print "SET clients = " c;
+ print "END";
+ print "BEGIN ap_bandwidth." dev;
+ print "SET received = " rb;
+ print "SET sent = " tb;
+ print "END";
+ print "BEGIN ap_packets." dev;
+ print "SET received = " rp;
+ print "SET sent = " tp;
+ print "END";
+ print "BEGIN ap_issues." dev;
+ print "SET retries = " tr;
+ print "SET failures = " tf;
+ print "END";
+
+ if( c == 0 ) c = 1;
+ print "BEGIN ap_signal." dev;
+ print "SET signal = " int(s / c);
+ print "END";
+ print "BEGIN ap_bitrate." dev;
+ print "SET receive = " int(rt / c);
+ print "SET transmit = " int(tt / c);
+ print "SET expected = " int(e / c);
+ print "END";
+ }
+ zero_data();
+ }
+ BEGIN {
+ zero_data();
+ }
+ /^DEVICE / {
+ print_device();
+ dev = $2;
+ }
+ /^Station/ { c++; }
+ /^[ \t]+rx bytes:/ { rb += $3; }
+ /^[ \t]+tx bytes:/ { tb += $3; }
+ /^[ \t]+rx packets:/ { rp += $3; }
+ /^[ \t]+tx packets:/ { tp += $3; }
+ /^[ \t]+tx retries:/ { tr += $3; }
+ /^[ \t]+tx failed:/ { tf += $3; }
+ /^[ \t]+signal:/ { x = $2; s += x * 1000; }
+ /^[ \t]+rx bitrate:/ { x = $3; rt += x * 1000; }
+ /^[ \t]+tx bitrate:/ { x = $3; tt += x * 1000; }
+ /^[ \t]+expected throughput:(.*)Mbps/ {
+ x=$3;
+ sub(/Mbps/, "", x);
+ e += x * 1000;
+ }
+ END {
+ print_device();
+ }
+ '
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/ap/ap.conf b/collectors/charts.d.plugin/ap/ap.conf
new file mode 100644
index 0000000..38fc157
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# nothing fancy to configure.
+# this module will run
+# iw dev - to find wireless devices in AP mode
+# iw ${dev} station dump - to get connected clients
+# based on the above, it generates several charts
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#ap_update_every=
+
+# the charts priority on the dashboard
+#ap_priority=6900
+
+# the number of retries to do in case of failure
+# before disabling the module
+#ap_retries=10
diff --git a/collectors/charts.d.plugin/apcupsd/Makefile.inc b/collectors/charts.d.plugin/apcupsd/Makefile.inc
new file mode 100644
index 0000000..19cb9ca
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += apcupsd/apcupsd.chart.sh
+dist_chartsconfig_DATA += apcupsd/apcupsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apcupsd/README.md apcupsd/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
new file mode 100644
index 0000000..b5b41e8
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -0,0 +1,21 @@
+<!--
+title: "APC UPS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/apcupsd/README.md
+sidebar_label: "APC UPS"
+-->
+
+# APC UPS monitoring with Netdata
+
+Monitors different APC UPS models and retrieves status information using `apcaccess` tool.
+
+## Configuration
+
+Edit the `charts.d/apcupsd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/apcupsd.conf
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
new file mode 100644
index 0000000..014a9c1
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
@@ -0,0 +1,213 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+apcupsd_ip=
+apcupsd_port=
+
+declare -A apcupsd_sources=(
+ ["local"]="127.0.0.1:3551"
+)
+
+# how frequently to collect UPS data
+apcupsd_update_every=10
+
+apcupsd_timeout=3
+
+# the priority of apcupsd related to other charts
+apcupsd_priority=90000
+
+apcupsd_get() {
+ run -t $apcupsd_timeout apcaccess status "$1"
+}
+
+is_ups_alive() {
+ local status
+ status="$(apcupsd_get "$1" | sed -e 's/STATUS.*: //' -e 't' -e 'd')"
+ case "$status" in
+ "" | "COMMLOST" | "SHUTTING DOWN") return 1 ;;
+ *) return 0 ;;
+ esac
+}
+
+apcupsd_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd apcaccess || return 1
+
+ # backwards compatibility
+ if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
+ apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
+ fi
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ run apcupsd_get "${apcupsd_sources[${host}]}" > /dev/null
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
+ failed=$((failed + 1))
+ else
+ if ! is_ups_alive ${apcupsd_sources[${host}]}; then
+ error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
+ failed=$((failed + 1))
+ else
+ working=$((working + 1))
+ fi
+ fi
+ done
+
+ if [ ${working} -eq 0 ]; then
+ error "No APC UPSes found available."
+ return 1
+ fi
+
+ return 0
+}
+
+apcupsd_create() {
+ local host src
+ for host in "${!apcupsd_sources[@]}"; do
+ src=${apcupsd_sources[${host}]}
+
+ # create the charts
+ cat << EOF
+CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage for ${host} on ${src}" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 3)) $apcupsd_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage for ${host} on ${src}" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_min min absolute 1 100
+DIMENSION input_voltage_max max absolute 1 100
+
+CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency for ${host} on ${src}" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 5)) $apcupsd_update_every
+DIMENSION input_frequency frequency absolute 1 100
+
+CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage for ${host} on ${src}" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 6)) $apcupsd_update_every
+DIMENSION output_voltage voltage absolute 1 100
+DIMENSION output_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.load '' "UPS Load for ${host} on ${src}" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every
+DIMENSION load load absolute 1 100
+
+CHART apcupsd_${host}.temp '' "UPS Temperature for ${host} on ${src}" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 7)) $apcupsd_update_every
+DIMENSION temp temp absolute 1 100
+
+CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every
+DIMENSION time time absolute 1 100
+
+CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean" ups apcupsd.online line $((apcupsd_priority + 8)) $apcupsd_update_every
+DIMENSION online online absolute 0 1
+
+EOF
+ done
+ return 0
+}
+
+apcupsd_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ apcupsd_get "${apcupsd_sources[${host}]}" | awk "
+
+BEGIN {
+ battery_charge = 0;
+ battery_voltage = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_min = 0;
+ input_voltage_max = 0;
+ input_frequency = 0;
+ output_voltage = 0;
+ output_voltage_nominal = 0;
+ load = 0;
+ temp = 0;
+ time = 0;
+}
+/^BCHARGE.*/ { battery_charge = \$3 * 100 };
+/^BATTV.*/ { battery_voltage = \$3 * 100 };
+/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
+/^LINEV.*/ { input_voltage = \$3 * 100 };
+/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
+/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
+/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
+/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
+/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
+/^LOADPCT.*/ { load = \$3 * 100 };
+/^ITEMP.*/ { temp = \$3 * 100 };
+/^TIMELEFT.*/ { time = \$3 * 100 };
+/^STATUS.*/ { online=(\$3 == \"ONLINE\" || \$3 == \"ONBATT\")?1:0 };
+END {
+ print \"BEGIN apcupsd_${host}.online $1\";
+ print \"SET online = \" online;
+ print \"END\"
+
+ if (online == 1) {
+ print \"BEGIN apcupsd_${host}.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_min = \" input_voltage_min;
+ print \"SET input_voltage_max = \" input_voltage_max;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"SET output_voltage_nominal = \" output_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.time $1\";
+ print \"SET time = \" time;
+ print \"END\"
+ }
+}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ failed=$((failed + 1))
+ error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
new file mode 100644
index 0000000..679c0d6
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
@@ -0,0 +1,25 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# add all your APC UPSes in this array - uncomment it too
+#declare -A apcupsd_sources=(
+# ["local"]="127.0.0.1:3551"
+#)
+
+# how long to wait for apcupsd to respond
+#apcupsd_timeout=3
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#apcupsd_update_every=10
+
+# the charts priority on the dashboard
+#apcupsd_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#apcupsd_retries=10
diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
new file mode 100644
index 0000000..d6add5e
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.conf
@@ -0,0 +1,47 @@
+# This is the configuration for charts.d.plugin
+
+# Each of its collectors can read configuration eiher from this file
+# or a NAME.conf file (where NAME is the collector name).
+# The collector specific file has higher precedence.
+
+# This file is a shell script too.
+
+# -----------------------------------------------------------------------------
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it, but a small gap
+# will appear in the charts.d.plugin charts.
+#restart_timeout=$[3600 * 4]
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+#time_divisor=50
+
+# -----------------------------------------------------------------------------
+
+# the default enable/disable for all charts.d collectors
+# the default is "yes"
+# enable_all_charts="yes"
+
+# BY DEFAULT ENABLED MODULES
+# ap=yes
+# apcupsd=yes
+# libreswan=yes
+# nut=yes
+# opensips=yes
+
+# -----------------------------------------------------------------------------
+# THESE NEED TO BE SET TO "force" TO BE ENABLED
+
+# Nothing useful.
+# Just an example charts.d plugin you can use as a template.
+# example=force
diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
new file mode 100755
index 0000000..91af2c5
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck disable=SC2181
+
+# will stop the script for any error
+set -e
+
+me="$0"
+name="$1"
+chart="$2"
+conf="$3"
+
+can_diff=1
+
+tmp1="$(mktemp)"
+tmp2="$(mktemp)"
+
+myset() {
+ set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
+}
+
+# save 2 'set'
+myset >"$tmp1"
+myset >"$tmp2"
+
+# make sure they don't differ
+diff "$tmp1" "$tmp2" >/dev/null 2>&1
+if [ $? -ne 0 ]; then
+ # they differ, we cannot do the check
+ echo >&2 "$me: cannot check with diff."
+ can_diff=0
+fi
+
+# do it again, now including the script
+myset >"$tmp1"
+
+# include the plugin and its config
+if [ -f "$conf" ]; then
+ # shellcheck source=/dev/null
+ . "$conf"
+ if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load config file $conf"
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+# shellcheck source=/dev/null
+. "$chart"
+if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load chart file $chart"
+ rm "$tmp1" "$tmp2"
+ exit 1
+fi
+
+# remove all variables starting with the plugin name
+myset | grep -v "^$name" >"$tmp2"
+
+if [ $can_diff -eq 1 ]; then
+ # check if they are different
+ # make sure they don't differ
+ diff "$tmp1" "$tmp2" >&2
+ if [ $? -ne 0 ]; then
+ # they differ
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+rm "$tmp1" "$tmp2"
+exit 0
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
new file mode 100755
index 0000000..62363f3
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
@@ -0,0 +1,732 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# charts.d.plugin allows easy development of BASH plugins
+#
+# if you need to run parallel charts.d processes, link this file to a different name
+# in the same directory, with a .plugin suffix and netdata will start both of them,
+# each will have a different config file and modules configuration directory.
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
+MODULE_NAME="main"
+
+# -----------------------------------------------------------------------------
+# create temp dir
+
+debug=0
+TMP_DIR=
+chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
+}
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
+
+if [ $UID = "0" ]; then
+ TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+else
+ TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+fi
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check a few commands
+
+require_cmd() {
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]; then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
+}
+
+require_cmd date || exit 1
+require_cmd sed || exit 1
+require_cmd basename || exit 1
+require_cmd dirname || exit 1
+require_cmd cat || exit 1
+require_cmd grep || exit 1
+require_cmd egrep || exit 1
+require_cmd mktemp || exit 1
+require_cmd awk || exit 1
+require_cmd timeout || exit 1
+require_cmd curl || exit 1
+
+# -----------------------------------------------------------------------------
+
+[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+
+info "started from '$PROGRAM_FILE' with options: $*"
+
+# -----------------------------------------------------------------------------
+# internal defaults
+# netdata exposes a few environment variables for us
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+pluginsd="${NETDATA_PLUGINS_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
+chartsd="$pluginsd/../charts.d"
+
+minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
+
+# work around for non BASH shells
+charts_create="_create"
+charts_update="_update"
+charts_check="_check"
+charts_undescore="_"
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+time_divisor=50
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it
+restart_timeout=$((3600 * 4))
+
+# check if the charts.d plugins are using global variables
+# they should not.
+# It does not currently support BASH v4 arrays, so it is
+# disabled
+dryrunner=0
+
+# check for timeout command
+check_for_timeout=1
+
+# the default enable/disable value for all charts
+enable_all_charts="yes"
+
+# -----------------------------------------------------------------------------
+# parse parameters
+
+check=0
+chart_only=
+while [ ! -z "$1" ]; do
+ if [ "$1" = "check" ]; then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]; then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]; then
+ debug=1
+ chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]; then
+ debug=1
+ chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$((n))
+ if [ "$x" = "$n" ]; then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
+done
+
+# -----------------------------------------------------------------------------
+# loop control
+
+# default sleep function
+LOOPSLEEPMS_HIGHRES=0
+now_ms=
+current_time_ms_default() {
+ now_ms="$(date +'%s')000"
+}
+current_time_ms="current_time_ms_default"
+current_time_ms_accuracy=1
+mysleep="sleep"
+
+# if found and included, this file overwrites loopsleepms()
+# and current_time_ms() with a high resolution timer function
+# for precise looping.
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
+
+# -----------------------------------------------------------------------------
+# load my configuration
+
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do
+ if [ -f "$myconfig" ]; then
+ source "$myconfig"
+ if [ $? -ne 0 ]; then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
+
+# we check for the timeout command, after we load our
+# configuration, so that the user may overwrite the
+# timeout command we use, providing a function that
+# can emulate the timeout command we need:
+# > timeout SECONDS command ...
+if [ $check_for_timeout -eq 1 ]; then
+ require_cmd timeout || exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# internal checks
+
+# netdata passes the requested update frequency as the first argument
+update_every=$((update_every + 1 - 1)) # makes sure it is a number
+test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
+
+# check the charts.d directory
+[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
+
+# -----------------------------------------------------------------------------
+# library functions
+
+fixid() {
+ echo "$*" |
+ tr -c "[A-Z][a-z][0-9]" "_" |
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
+ tr "[A-Z]" "[a-z]"
+}
+
+isvarset() {
+ [ -n "$1" ] && [ "$1" != "unknown" ] && [ "$1" != "none" ]
+ return $?
+}
+
+getosid() {
+ if isvarset "${NETDATA_CONTAINER_OS_ID}"; then
+ echo "${NETDATA_CONTAINER_OS_ID}"
+ else
+ echo "${NETDATA_SYSTEM_OS_ID}"
+ fi
+}
+
+run() {
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
+ t="${2}"
+ shift 2
+ timeout "${t}" "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]; then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm -f "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
+}
+
+# convert any floating point number
+# to integer, give a multiplier
+# the result is stored in ${FLOAT2INT_RESULT}
+# so that no fork is necessary
+# the multiplier must be a power of 10
+float2int() {
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$((${#m} - 1))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]; then
+ # too many digits - take the most significant
+ b=${b:0:l}
+
+ elif [ ${#b} -lt ${l} ]; then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:r}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(((a * m) + b))
+}
+
+# -----------------------------------------------------------------------------
+# charts check functions
+
+all_charts() {
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
+}
+
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+)
+
+declare -A obsolete_charts=(
+ ['apache']="python.d.plugin module"
+ ['cpu_apps']="apps.plugin"
+ ['cpufreq']="proc plugin"
+ ['exim']="python.d.plugin module"
+ ['hddtemp']="python.d.plugin module"
+ ['load_average']="proc plugin"
+ ['mem_apps']="proc plugin"
+ ['mysql']="python.d.plugin module"
+ ['nginx']="python.d.plugin module"
+ ['phpfpm']="python.d.plugin module"
+ ['postfix']="python.d.plugin module"
+ ['squid']="python.d.plugin module"
+ ['tomcat']="python.d.plugin module"
+)
+
+all_enabled_charts() {
+ local charts enabled required
+
+ # find all enabled charts
+ for chart in $(all_charts); do
+ MODULE_NAME="${chart}"
+
+ if [ -n "${obsolete_charts["$MODULE_NAME"]}" ]; then
+ debug "is replaced by ${obsolete_charts["$MODULE_NAME"]}, skipping it."
+ continue
+ fi
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]; then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]; then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts; do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
+ if [ -z "$check" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
+ if [ -z "$create" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
+ if [ -z "$update" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
+}
+
+# -----------------------------------------------------------------------------
+# load the charts
+
+suffix_retries="_retries"
+suffix_update_every="_update_every"
+active_charts=
+for chart in $(all_enabled_charts); do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]; then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$((dt + 1 - 1)) # make sure it is a number
+ if [ $dt -lt $update_every ]; then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]; then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "activated modules: $active_charts"
+
+# -----------------------------------------------------------------------------
+# check overwrites
+
+# enable work time reporting
+debug_time=
+test $debug -eq 1 && debug_time=tellwork
+
+# if we only need a specific chart, remove all the others
+if [ ! -z "${chart_only}" ]; then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts; do
+ if [ "$chart" = "$chart_only" ]; then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
+fi
+debug "activated charts: $active_charts"
+
+# stop if we just need a pre-check
+if [ $check -eq 1 ]; then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+cd "${TMP_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# create charts
+
+run_charts=
+for chart in $active_charts; do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]; then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "run_charts='$run_charts'"
+
+# -----------------------------------------------------------------------------
+# update dimensions
+
+[ -z "$run_charts" ] && fatal "No charts to collect data from."
+
+keepalive() {
+ if [ ! -t 1 ] && ! printf "\n"; then
+ chartsd_cleanup
+ fi
+}
+
+declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
+global_update() {
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$((now_ms + (restart_timeout * 1000)))
+
+ for chart in $run_charts; do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
+ charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]; do
+ keepalive
+
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"; do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
+ last_ms=${charts_last_update[$chart]}
+ dt=$((now_ms - last_ms))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
+ charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$((dt * 1000))
+ charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]; then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+ exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $((exec_end_ms - exec_start_ms))"
+ echo "END"
+
+ if [ $ret -eq 0 ]; then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100)))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
+ next_ms=$((next_ms + current_time_ms_accuracy))
+ seconds=$((next_ms / 1000))
+ millis=$((next_ms % 1000))
+ if [ ${millis} -lt 10 ]; then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]; then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
+}
+
+global_update
diff --git a/collectors/charts.d.plugin/example/Makefile.inc b/collectors/charts.d.plugin/example/Makefile.inc
new file mode 100644
index 0000000..e6838fb
--- /dev/null
+++ b/collectors/charts.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += example/example.chart.sh
+dist_chartsconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
new file mode 100644
index 0000000..de21f6a
--- /dev/null
+++ b/collectors/charts.d.plugin/example/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "Example"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/example/README.md
+-->
+
+# Example
+
+This is just an example charts.d data collector.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
new file mode 100644
index 0000000..5ff51a5
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.chart.sh
@@ -0,0 +1,123 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+example_update_every=
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+example_priority=150000
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+example_magic_number=
+
+# global variables to store our collected data
+# remember: they need to start with the module name example_
+example_value1=
+example_value2=
+example_value3=
+example_value4=
+example_last=0
+example_count=0
+
+example_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ #
+ # Remember:
+ # 1. KEEP IT SIMPLE AND SHORT
+ # 2. AVOID FORKS (avoid piping commands)
+ # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
+ # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
+
+ example_value1=$RANDOM
+ example_value2=$RANDOM
+ example_value3=$RANDOM
+ example_value4=$((8192 + (RANDOM * 16383 / 32767)))
+
+ if [ $example_count -gt 0 ]; then
+ example_count=$((example_count - 1))
+
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
+ [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
+ else
+ example_count=$((1 + (RANDOM * 5 / 32767)))
+
+ if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
+ example_value4=$((example_value4 - 16383))
+ fi
+ if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
+ example_value4=$((example_value4 + 16383))
+ fi
+ fi
+ example_last=$example_value4
+
+ # this should return:
+ # - 0 to send the data to netdata
+ # - 1 to report a failure to collect the data
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+example_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ # check something
+ [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
+
+ # check that we can collect data
+ example_get || return 1
+
+ return 0
+}
+
+# _create is called once, to create the charts
+example_create() {
+ # create the chart with 3 dimensions
+ cat << EOF
+CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every
+DIMENSION random1 '' percentage-of-absolute-row 1 1
+DIMENSION random2 '' percentage-of-absolute-row 1 1
+DIMENSION random3 '' percentage-of-absolute-row 1 1
+CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every
+DIMENSION random '' absolute 1 1
+EOF
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+example_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ example_get || return 1
+
+ # write the result of the work.
+ cat << VALUESEOF
+BEGIN example.random $1
+SET random1 = $example_value1
+SET random2 = $example_value2
+SET random3 = $example_value3
+END
+BEGIN example.random2 $1
+SET random = $example_value4
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/example/example.conf b/collectors/charts.d.plugin/example/example.conf
new file mode 100644
index 0000000..6232ca5
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+#example_magic_number=12345
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#example_update_every=
+
+# the charts priority on the dashboard
+#example_priority=150000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#example_retries=10
diff --git a/collectors/charts.d.plugin/libreswan/Makefile.inc b/collectors/charts.d.plugin/libreswan/Makefile.inc
new file mode 100644
index 0000000..af767d0
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += libreswan/libreswan.chart.sh
+dist_chartsconfig_DATA += libreswan/libreswan.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += libreswan/README.md libreswan/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
new file mode 100644
index 0000000..b1c1f05
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -0,0 +1,56 @@
+<!--
+title: "Libreswan IPSec tunnel monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/libreswan/README.md
+sidebar_label: "Libreswan IPSec tunnels"
+-->
+
+# Libreswan IPSec tunnel monitoring with Netdata
+
+Collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
+
+The following charts are created, **per tunnel**:
+
+1. **Uptime**
+
+- the uptime of the tunnel
+
+2. **Traffic**
+
+- bytes in
+- bytes out
+
+## Configuration
+
+Edit the `charts.d/libreswan.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/libreswan.conf
+```
+
+The plugin executes 2 commands to collect all the information it needs:
+
+```sh
+ipsec whack --status
+ipsec whack --trafficstatus
+```
+
+The first command is used to extract the currently established tunnels, their IDs and their names.
+The second command is used to extract the current uptime and traffic.
+
+Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
+The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
+
+To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
+
+```
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
+```
+
+Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Flibreswan%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
new file mode 100644
index 0000000..bfa2b9e
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -0,0 +1,187 @@
+# shellcheck shell=bash disable=SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+libreswan_update_every=1
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+libreswan_priority=90000
+
+# set to 1, to run ipsec with sudo
+libreswan_sudo=1
+
+# global variables to store our collected data
+
+# [TUNNELID] = TUNNELNAME
+# here we track the *latest* established tunnels
+# as detected by: ipsec whack --status
+declare -A libreswan_connected_tunnels=()
+
+# [TUNNELID] = VALUE
+# here we track values of all established tunnels (not only the latest)
+# as detected by: ipsec whack --trafficstatus
+declare -A libreswan_traffic_in=()
+declare -A libreswan_traffic_out=()
+declare -A libreswan_established_add_time=()
+
+# [TUNNELNAME] = CHARTID
+# here we remember CHARTIDs of all tunnels
+# we need this to avoid converting tunnel names to chart IDs on every iteration
+declare -A libreswan_tunnel_charts=()
+
+is_able_sudo_ipsec() {
+ if ! sudo -n -l "${IPSEC_CMD}" whack --status > /dev/null 2>&1; then
+ return 1
+ fi
+ if ! sudo -n -l "${IPSEC_CMD}" whack --trafficstatus > /dev/null 2>&1; then
+ return 1
+ fi
+ return 0
+}
+
+# run the ipsec command
+libreswan_ipsec() {
+ if [ ${libreswan_sudo} -ne 0 ]; then
+ sudo -n "${IPSEC_CMD}" "${@}"
+ return $?
+ else
+ "${IPSEC_CMD}" "${@}"
+ return $?
+ fi
+}
+
+# fetch latest values - fill the arrays
+libreswan_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # empty the variables
+ libreswan_traffic_in=()
+ libreswan_traffic_out=()
+ libreswan_established_add_time=()
+ libreswan_connected_tunnels=()
+
+ # convert the ipsec command output to a shell script
+ # and source it to get the values
+ # shellcheck disable=SC1090
+ source <(
+ {
+ libreswan_ipsec whack --status
+ libreswan_ipsec whack --trafficstatus
+ } | sed -n \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",\{0,1\}.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
+ ) || return 1
+
+ # check we got some data
+ [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+libreswan_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd ipsec || return 1
+
+ # make sure it is libreswan
+ # shellcheck disable=SC2143
+ if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
+ error "ipsec command is not Libreswan. Disabling Libreswan plugin."
+ return 1
+ fi
+
+ if [ ${libreswan_sudo} -ne 0 ] && ! is_able_sudo_ipsec; then
+ error "not enough permissions to execute ipsec with sudo. Disabling Libreswan plugin."
+ return 1
+ fi
+
+ # check that we can collect data
+ libreswan_get || return 1
+
+ return 0
+}
+
+# create the charts for an ipsec tunnel
+libreswan_create_one() {
+ local n="${1}" name
+
+ name="${libreswan_connected_tunnels[${n}]}"
+
+ [ -n "${libreswan_tunnel_charts[${name}]}" ] && return 0
+
+ libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
+
+ cat << EOF
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every
+DIMENSION in '' incremental 8 1000
+DIMENSION out '' incremental -8 1000
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every
+DIMENSION uptime '' absolute 1 1
+EOF
+
+ return 0
+
+}
+
+# _create is called once, to create the charts
+libreswan_create() {
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_create_one "${n}"
+ done
+ return 0
+}
+
+libreswan_now=$(date +%s)
+
+# send the values to netdata for an ipsec tunnel
+libreswan_update_one() {
+ local n="${1}" microseconds="${2}" name id uptime
+
+ name="${libreswan_connected_tunnels[${n}]}"
+ id="${libreswan_tunnel_charts[${name}]}"
+
+ [ -z "${id}" ] && libreswan_create_one "${name}"
+
+ uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
+ [ ${uptime} -lt 0 ] && uptime=0
+
+ # write the result of the work.
+ cat << VALUESEOF
+BEGIN libreswan.${id}_net ${microseconds}
+SET in = ${libreswan_traffic_in[${n}]}
+SET out = ${libreswan_traffic_out[${n}]}
+END
+BEGIN libreswan.${id}_uptime ${microseconds}
+SET uptime = ${uptime}
+END
+VALUESEOF
+}
+
+# _update is called continiously, to collect the values
+libreswan_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ libreswan_get || return 1
+ libreswan_now=$(date +%s)
+
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_update_one "${n}" "${@}"
+ done
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf
new file mode 100644
index 0000000..9b3ee77
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.conf
@@ -0,0 +1,29 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#libreswan_update_every=1
+
+# the charts priority on the dashboard
+#libreswan_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#libreswan_retries=10
+
+# set to 1, to run ipsec with sudo (the default)
+# set to 0, to run ipsec without sudo
+#libreswan_sudo=1
+
+# TO ALLOW NETDATA RUN ipsec AS ROOT
+# CREATE THE FILE: /etc/sudoers.d/netdata
+# WITH THESE 2 LINES (uncommented of course):
+#
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
new file mode 100644
index 0000000..c386083
--- /dev/null
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -0,0 +1,227 @@
+# no need for shebang - this file is included from other scripts
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
+if [ -z "$LOOPSLEEP_DATE" ]; then
+ echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# use the date command as a high resolution timer
+
+# macOS 'date' doesnt support '%N' precision
+# echo $(/bin/date +"%N") is "N"
+if [ "$($LOOPSLEEP_DATE +"%N")" = "N" ]; then
+ LOOPSLEEP_DATE_FORMAT="%s * 1000"
+else
+ LOOPSLEEP_DATE_FORMAT="%s * 1000 + 10#%-N / 1000000"
+fi
+
+now_ms=
+LOOPSLEEPMS_HIGHRES=1
+test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
+test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
+current_time_ms_from_date() {
+ if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
+ now_ms="$($LOOPSLEEP_DATE +'%s')000"
+ else
+ now_ms="$(($($LOOPSLEEP_DATE +"$LOOPSLEEP_DATE_FORMAT")))"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use /proc/uptime as a high resolution timer
+
+current_time_ms_from_date
+current_time_ms_from_uptime_started="${now_ms}"
+current_time_ms_from_uptime_last="${now_ms}"
+current_time_ms_from_uptime_first=0
+current_time_ms_from_uptime() {
+ local up rest arr=() n
+
+ read up rest </proc/uptime
+ if [ $? -ne 0 ]; then
+ echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ return
+ fi
+
+ arr=(${up//./ })
+
+ if [ ${#arr[1]} -lt 1 ]; then
+ n="${arr[0]}000"
+ elif [ ${#arr[1]} -lt 2 ]; then
+ n="${arr[0]}${arr[1]}00"
+ elif [ ${#arr[1]} -lt 3 ]; then
+ n="${arr[0]}${arr[1]}0"
+ else
+ n="${arr[0]}${arr[1]}"
+ fi
+
+ now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
+
+ if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]; then
+ echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ fi
+
+ current_time_ms_from_uptime_last="${now_ms}"
+}
+current_time_ms_from_uptime
+current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
+current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
+current_time_ms="current_time_ms_from_uptime"
+current_time_ms_accuracy=10
+if [ "${current_time_ms_from_uptime_first}" -eq 0 ]; then
+ echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_accuracy=1
+fi
+
+# -----------------------------------------------------------------------------
+# use read with timeout for sleep
+
+mysleep=""
+
+mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
+[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}"
+[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
+[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
+
+mysleep_read() {
+ read -t "${1}" <>"${mysleep_fifo}"
+ ret=$?
+ if [ $ret -le 128 ]; then
+ echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use bash loadable module for sleep
+
+mysleep_builtin() {
+ builtin sleep "${1}"
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] + 0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]; then
+ # enable modules only for bash version 3+
+
+ for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"; do
+ [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
+
+ # check for sleep
+ for bash_module_sleep in "sleep" "sleep.so"; do
+ if [ -f "${bash_modules_path}/${bash_module_sleep}" ]; then
+ if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null; then
+ mysleep="mysleep_builtin"
+ # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
+ break
+ fi
+ fi
+
+ done
+
+ [ ! -z "${mysleep}" ] && break
+ done
+fi
+
+# -----------------------------------------------------------------------------
+# fallback to external sleep
+
+[ -z "${mysleep}" ] && mysleep="sleep"
+
+# -----------------------------------------------------------------------------
+# this function is used to sleep a fraction of a second
+# it calculates the difference between every time is called
+# and tries to align the sleep time to give you exactly the
+# loop you need.
+
+LOOPSLEEPMS_LASTRUN=0
+LOOPSLEEPMS_NEXTRUN=0
+LOOPSLEEPMS_LASTSLEEP=0
+LOOPSLEEPMS_LASTWORK=0
+
+loopsleepms() {
+ local tellwork=0 t="${1}" div s m now mstosleep
+
+ if [ "${t}" = "tellwork" ]; then
+ tellwork=1
+ shift
+ t="${1}"
+ fi
+
+ # $t = the time in seconds to wait
+
+ # if high resolution is not supported
+ # just sleep the time requested, in seconds
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]; then
+ sleep ${t}
+ return
+ fi
+
+ # get the current time, in ms in ${now_ms}
+ ${current_time_ms}
+
+ # calculate ms since last run
+ [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] &&
+ LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
+ # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
+
+ # remember this run
+ LOOPSLEEPMS_LASTRUN=${now_ms}
+
+ # calculate the next run
+ LOOPSLEEPMS_NEXTRUN=$(((now_ms - (now_ms % (t * 1000))) + (t * 1000)))
+
+ # calculate ms to sleep
+ mstosleep=$((LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy))
+ # echo "# mstosleep is $mstosleep ms"
+
+ # if we are too slow, sleep some time
+ test ${mstosleep} -lt 200 && mstosleep=200
+
+ s=$((mstosleep / 1000))
+ m=$((mstosleep - (s * 1000)))
+ [ "${m}" -lt 100 ] && m="0${m}"
+ [ "${m}" -lt 10 ] && m="0${m}"
+
+ test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
+
+ # echo "# sleeping ${s}.${m}"
+ # echo
+ ${mysleep} ${s}.${m}
+
+ # keep the values we need
+ # for our next run
+ LOOPSLEEPMS_LASTSLEEP=$mstosleep
+}
+
+# test it
+#while [ 1 ]
+#do
+# r=$(( (RANDOM * 2000 / 32767) ))
+# s=$((r / 1000))
+# m=$((r - (s * 1000)))
+# [ "${m}" -lt 100 ] && m="0${m}"
+# [ "${m}" -lt 10 ] && m="0${m}"
+# echo "${r} = ${s}.${m}"
+#
+# # the work
+# ${mysleep} ${s}.${m}
+#
+# # the alignment loop
+# loopsleepms tellwork 1
+#done
diff --git a/collectors/charts.d.plugin/nut/Makefile.inc b/collectors/charts.d.plugin/nut/Makefile.inc
new file mode 100644
index 0000000..4fb4714
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += nut/nut.chart.sh
+dist_chartsconfig_DATA += nut/nut.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nut/README.md nut/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
new file mode 100644
index 0000000..3f9c5f0
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -0,0 +1,74 @@
+<!--
+title: "UPS/PDU monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/nut/README.md
+sidebar_label: "UPS/PDU"
+-->
+
+# UPS/PDU monitoring with Netdata
+
+Collects UPS data for all power devices configured in the system.
+
+The following charts will be created:
+
+1. **UPS Charge**
+
+- percentage changed
+
+2. **UPS Battery Voltage**
+
+- current voltage
+- high voltage
+- low voltage
+- nominal voltage
+
+3. **UPS Input Voltage**
+
+- current voltage
+- fault voltage
+- nominal voltage
+
+4. **UPS Input Current**
+
+- nominal current
+
+5. **UPS Input Frequency**
+
+- current frequency
+- nominal frequency
+
+6. **UPS Output Voltage**
+
+- current voltage
+
+7. **UPS Load**
+
+- current load
+
+8. **UPS Temperature**
+
+- current temperature
+
+## Configuration
+
+Edit the `charts.d/nut.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/nut.conf
+```
+
+This is the internal default for `charts.d/nut.conf`
+
+```sh
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
new file mode 100644
index 0000000..6023336
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
@@ -0,0 +1,232 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+
+# how much time in seconds, to wait for nut to respond
+nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+nut_clients_chart=0
+
+# the priority of nut related to other charts
+nut_priority=90000
+
+declare -A nut_ids=()
+declare -A nut_names=()
+
+nut_get_all() {
+ run -t $nut_timeout upsc -l
+}
+
+nut_get() {
+ run -t $nut_timeout upsc "$1"
+
+ if [ "${nut_clients_chart}" -eq "1" ]; then
+ printf "ups.connected_clients: "
+ run -t $nut_timeout upsc -c "$1" | wc -l
+ fi
+}
+
+nut_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x
+
+ require_cmd upsc || return 1
+
+ [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)"
+
+ for x in $nut_ups; do
+ nut_get "$x" > /dev/null
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]; then
+ if [ -n "${nut_names[${x}]}" ]; then
+ nut_ids[$x]="$(fixid "${nut_names[${x}]}")"
+ else
+ nut_ids[$x]="$(fixid "$x")"
+ fi
+ continue
+ fi
+ error "cannot get information for NUT UPS '$x'."
+ done
+
+ if [ ${#nut_ids[@]} -eq 0 ]; then
+ # shellcheck disable=SC2154
+ error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+nut_create() {
+ # create the charts
+ local x
+
+ for x in "${nut_ids[@]}"; do
+ cat << EOF
+CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 1)) $nut_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART nut_$x.runtime '' "UPS Runtime" "seconds" ups nut.runtime area $((nut_priority + 2)) $nut_update_every
+DIMENSION battery_runtime runtime absolute 1 100
+
+CHART nut_$x.battery_voltage '' "UPS Battery Voltage" "Volts" ups nut.battery.voltage line $((nut_priority + 3)) $nut_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_high high absolute 1 100
+DIMENSION battery_voltage_low low absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_voltage '' "UPS Input Voltage" "Volts" input nut.input.voltage line $((nut_priority + 4)) $nut_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_fault fault absolute 1 100
+DIMENSION input_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_current '' "UPS Input Current" "Ampere" input nut.input.current line $((nut_priority + 5)) $nut_update_every
+DIMENSION input_current_nominal nominal absolute 1 100
+
+CHART nut_$x.input_frequency '' "UPS Input Frequency" "Hz" input nut.input.frequency line $((nut_priority + 6)) $nut_update_every
+DIMENSION input_frequency frequency absolute 1 100
+DIMENSION input_frequency_nominal nominal absolute 1 100
+
+CHART nut_$x.output_voltage '' "UPS Output Voltage" "Volts" output nut.output.voltage line $((nut_priority + 7)) $nut_update_every
+DIMENSION output_voltage voltage absolute 1 100
+
+CHART nut_$x.load '' "UPS Load" "percentage" ups nut.load area $((nut_priority)) $nut_update_every
+DIMENSION load load absolute 1 100
+
+CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $((nut_priority + 8)) $nut_update_every
+DIMENSION temp temp absolute 1 100
+EOF
+
+ if [ "${nut_clients_chart}" = "1" ]; then
+ cat << EOF2
+CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 9)) $nut_update_every
+DIMENSION clients '' absolute 1 1
+EOF2
+ fi
+
+ done
+
+ return 0
+}
+
+nut_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local i x
+ for i in "${!nut_ids[@]}"; do
+ x="${nut_ids[$i]}"
+ nut_get "$i" | awk "
+BEGIN {
+ battery_charge = 0;
+ battery_runtime = 0;
+ battery_voltage = 0;
+ battery_voltage_high = 0;
+ battery_voltage_low = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_fault = 0;
+ input_voltage_nominal = 0;
+ input_current_nominal = 0;
+ input_frequency = 0;
+ input_frequency_nominal = 0;
+ output_voltage = 0;
+ load = 0;
+ temp = 0;
+ client = 0;
+ do_clients = ${nut_clients_chart};
+}
+/^battery.charge: .*/ { battery_charge = \$2 * 100 };
+/^battery.runtime: .*/ { battery_runtime = \$2 * 100 };
+/^battery.voltage: .*/ { battery_voltage = \$2 * 100 };
+/^battery.voltage.high: .*/ { battery_voltage_high = \$2 * 100 };
+/^battery.voltage.low: .*/ { battery_voltage_low = \$2 * 100 };
+/^battery.voltage.nominal: .*/ { battery_voltage_nominal = \$2 * 100 };
+/^input.voltage: .*/ { input_voltage = \$2 * 100 };
+/^input.voltage.fault: .*/ { input_voltage_fault = \$2 * 100 };
+/^input.voltage.nominal: .*/ { input_voltage_nominal = \$2 * 100 };
+/^input.current.nominal: .*/ { input_current_nominal = \$2 * 100 };
+/^input.frequency: .*/ { input_frequency = \$2 * 100 };
+/^input.frequency.nominal: .*/ { input_frequency_nominal = \$2 * 100 };
+/^output.voltage: .*/ { output_voltage = \$2 * 100 };
+/^ups.load: .*/ { load = \$2 * 100 };
+/^ups.temperature: .*/ { temp = \$2 * 100 };
+/^ups.connected_clients: .*/ { clients = \$2 };
+END {
+ print \"BEGIN nut_$x.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN nut_$x.runtime $1\";
+ print \"SET battery_runtime = \" battery_runtime;
+ print \"END\"
+
+ print \"BEGIN nut_$x.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_high = \" battery_voltage_high;
+ print \"SET battery_voltage_low = \" battery_voltage_low;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_fault = \" input_voltage_fault;
+ print \"SET input_voltage_nominal = \" input_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_current $1\";
+ print \"SET input_current_nominal = \" input_current_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"SET input_frequency_nominal = \" input_frequency_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"END\"
+
+ print \"BEGIN nut_$x.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN nut_$x.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ if(do_clients) {
+ print \"BEGIN nut_$x.clients $1\";
+ print \"SET clients = \" clients;
+ print \"END\"
+ }
+}"
+ # shellcheck disable=2181
+ [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it."
+ done
+
+ [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1
+ return 0
+}
diff --git a/collectors/charts.d.plugin/nut/nut.conf b/collectors/charts.d.plugin/nut/nut.conf
new file mode 100644
index 0000000..b95ad90
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.conf
@@ -0,0 +1,33 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+#nut_ups=
+
+# each line represents an alias for one UPS
+# if empty, the FQDN will be used
+#nut_names["FQDN1"]="alias"
+#nut_names["FQDN2"]="alias"
+
+# how much time in seconds, to wait for nut to respond
+#nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+#nut_clients_chart=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#nut_update_every=2
+
+# the charts priority on the dashboard
+#nut_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#nut_retries=10
diff --git a/collectors/charts.d.plugin/opensips/Makefile.inc b/collectors/charts.d.plugin/opensips/Makefile.inc
new file mode 100644
index 0000000..a7b5d3a
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += opensips/opensips.chart.sh
+dist_chartsconfig_DATA += opensips/opensips.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += opensips/README.md opensips/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
new file mode 100644
index 0000000..7575a1d
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -0,0 +1,19 @@
+<!--
+title: "OpenSIPS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/opensips/README.md
+sidebar_label: "OpenSIPS"
+-->
+
+# OpenSIPS monitoring with Netdata
+
+## Configuration
+
+Edit the `charts.d/opensips.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/opensips.conf
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
new file mode 100644
index 0000000..8ff3e32
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
@@ -0,0 +1,324 @@
+# shellcheck shell=bash disable=SC1117,SC2154,SC2086
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+opensips_opts="fifo get_statistics all"
+opensips_cmd=
+opensips_update_every=5
+opensips_timeout=2
+opensips_priority=80000
+
+opensips_get_stats() {
+ run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
+ grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
+ sed \
+ -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
+ -e "s|[[:space:]:-]\+|_|g" \
+ -e "s|^|opensips_|g"
+
+ local ret=$?
+ [ $ret -ne 0 ] && echo "opensips_command_failed=1"
+ return $ret
+}
+
+opensips_check() {
+ # if the user did not provide an opensips_cmd
+ # try to find it in the system
+ if [ -z "$opensips_cmd" ]; then
+ require_cmd opensipsctl || return 1
+ fi
+
+ # check once if the command works
+ local x
+ x="$(opensips_get_stats | grep "^opensips_core_")"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
+ error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+opensips_create() {
+ # create the charts
+ cat << EOF
+CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every
+DIMENSION dialog_active_dialogs active absolute 1 1
+DIMENSION dialog_early_dialogs early absolute -1 1
+
+CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every
+DIMENSION usrloc_registered_users registered absolute 1 1
+DIMENSION usrloc_location_users location absolute 1 1
+DIMENSION usrloc_location_contacts contacts absolute 1 1
+DIMENSION usrloc_location_expires expires incremental -1 1
+
+CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every
+DIMENSION registrar_accepted_regs accepted incremental 1 1
+DIMENSION registrar_rejected_regs rejected incremental -1 1
+
+CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every
+DIMENSION tm_UAS_transactions UAS incremental 1 1
+DIMENSION tm_UAC_transactions UAC incremental -1 1
+
+CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every
+DIMENSION core_rcv_requests requests incremental 1 1
+DIMENSION core_rcv_replies replies incremental -1 1
+
+CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every
+DIMENSION core_fwd_requests requests incremental 1 1
+DIMENSION core_fwd_replies replies incremental -1 1
+
+CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every
+DIMENSION core_drop_requests requests incremental 1 1
+DIMENSION core_drop_replies replies incremental -1 1
+
+CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every
+DIMENSION core_err_requests requests incremental 1 1
+DIMENSION core_err_replies replies incremental -1 1
+
+CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every
+DIMENSION core_bad_URIs_rcvd bad_URIs_rcvd incremental 1 1
+DIMENSION core_unsupported_methods unsupported_methods incremental 1 1
+DIMENSION core_bad_msg_hdr bad_msg_hdr incremental 1 1
+
+CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every
+DIMENSION tm_received_replies received incremental 1 1
+DIMENSION tm_relayed_replies relayed incremental 1 1
+DIMENSION tm_local_replies local incremental 1 1
+
+CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every
+DIMENSION tm_2xx_transactions 2xx incremental 1 1
+DIMENSION tm_3xx_transactions 3xx incremental 1 1
+DIMENSION tm_4xx_transactions 4xx incremental 1 1
+DIMENSION tm_5xx_transactions 5xx incremental 1 1
+DIMENSION tm_6xx_transactions 6xx incremental 1 1
+
+CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every
+DIMENSION tm_inuse_transactions inuse absolute 1 1
+
+CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every
+DIMENSION sl_1xx_replies 1xx incremental 1 1
+DIMENSION sl_2xx_replies 2xx incremental 1 1
+DIMENSION sl_3xx_replies 3xx incremental 1 1
+DIMENSION sl_4xx_replies 4xx incremental 1 1
+DIMENSION sl_5xx_replies 5xx incremental 1 1
+DIMENSION sl_6xx_replies 6xx incremental 1 1
+DIMENSION sl_sent_replies sent incremental 1 1
+DIMENSION sl_sent_err_replies error incremental 1 1
+DIMENSION sl_received_ACKs ACKed incremental 1 1
+
+CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every
+DIMENSION dialog_processed_dialogs processed incremental 1 1
+DIMENSION dialog_expired_dialogs expired incremental 1 1
+DIMENSION dialog_failed_dialogs failed incremental -1 1
+
+CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every
+DIMENSION net_waiting_udp UDP absolute 1 1024
+DIMENSION net_waiting_tcp TCP absolute 1 1024
+
+CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every
+DIMENSION uri_positive_checks positive incremental 1 1
+DIMENSION uri_negative_checks negative incremental -1 1
+
+CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every
+DIMENSION siptrace_traced_requests requests incremental 1 1
+DIMENSION siptrace_traced_replies replies incremental -1 1
+
+CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every
+DIMENSION shmem_total_size total absolute 1 1024
+DIMENSION shmem_used_size used absolute 1 1024
+DIMENSION shmem_real_used_size real_used absolute 1 1024
+DIMENSION shmem_max_used_size max_used absolute 1 1024
+DIMENSION shmem_free_size free absolute 1 1024
+
+CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every
+DIMENSION shmem_fragments fragments absolute 1 1
+EOF
+
+ return 0
+}
+
+opensips_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # 1. get the counters page from opensips
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
+ # 3. egrep lines starting with:
+ # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ unset \
+ opensips_dialog_active_dialogs \
+ opensips_dialog_early_dialogs \
+ opensips_usrloc_registered_users \
+ opensips_usrloc_location_users \
+ opensips_usrloc_location_contacts \
+ opensips_usrloc_location_expires \
+ opensips_registrar_accepted_regs \
+ opensips_registrar_rejected_regs \
+ opensips_tm_UAS_transactions \
+ opensips_tm_UAC_transactions \
+ opensips_core_rcv_requests \
+ opensips_core_rcv_replies \
+ opensips_core_fwd_requests \
+ opensips_core_fwd_replies \
+ opensips_core_drop_requests \
+ opensips_core_drop_replies \
+ opensips_core_err_requests \
+ opensips_core_err_replies \
+ opensips_core_bad_URIs_rcvd \
+ opensips_core_unsupported_methods \
+ opensips_core_bad_msg_hdr \
+ opensips_tm_received_replies \
+ opensips_tm_relayed_replies \
+ opensips_tm_local_replies \
+ opensips_tm_2xx_transactions \
+ opensips_tm_3xx_transactions \
+ opensips_tm_4xx_transactions \
+ opensips_tm_5xx_transactions \
+ opensips_tm_6xx_transactions \
+ opensips_tm_inuse_transactions \
+ opensips_sl_1xx_replies \
+ opensips_sl_2xx_replies \
+ opensips_sl_3xx_replies \
+ opensips_sl_4xx_replies \
+ opensips_sl_5xx_replies \
+ opensips_sl_6xx_replies \
+ opensips_sl_sent_replies \
+ opensips_sl_sent_err_replies \
+ opensips_sl_received_ACKs \
+ opensips_dialog_processed_dialogs \
+ opensips_dialog_expired_dialogs \
+ opensips_dialog_failed_dialogs \
+ opensips_net_waiting_udp \
+ opensips_net_waiting_tcp \
+ opensips_uri_positive_checks \
+ opensips_uri_negative_checks \
+ opensips_siptrace_traced_requests \
+ opensips_siptrace_traced_replies \
+ opensips_shmem_total_size \
+ opensips_shmem_used_size \
+ opensips_shmem_real_used_size \
+ opensips_shmem_max_used_size \
+ opensips_shmem_free_size \
+ opensips_shmem_fragments
+
+ opensips_command_failed=0
+ eval "local $(opensips_get_stats)"
+ # shellcheck disable=SC2181
+ [ $? -ne 0 ] && return 1
+
+ [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
+
+ # write the result of the work.
+ cat << VALUESEOF
+BEGIN opensips.dialogs_active $1
+SET dialog_active_dialogs = $opensips_dialog_active_dialogs
+SET dialog_early_dialogs = $opensips_dialog_early_dialogs
+END
+BEGIN opensips.users $1
+SET usrloc_registered_users = $opensips_usrloc_registered_users
+SET usrloc_location_users = $opensips_usrloc_location_users
+SET usrloc_location_contacts = $opensips_usrloc_location_contacts
+SET usrloc_location_expires = $opensips_usrloc_location_expires
+END
+BEGIN opensips.registrar $1
+SET registrar_accepted_regs = $opensips_registrar_accepted_regs
+SET registrar_rejected_regs = $opensips_registrar_rejected_regs
+END
+BEGIN opensips.transactions $1
+SET tm_UAS_transactions = $opensips_tm_UAS_transactions
+SET tm_UAC_transactions = $opensips_tm_UAC_transactions
+END
+BEGIN opensips.core_rcv $1
+SET core_rcv_requests = $opensips_core_rcv_requests
+SET core_rcv_replies = $opensips_core_rcv_replies
+END
+BEGIN opensips.core_fwd $1
+SET core_fwd_requests = $opensips_core_fwd_requests
+SET core_fwd_replies = $opensips_core_fwd_replies
+END
+BEGIN opensips.core_drop $1
+SET core_drop_requests = $opensips_core_drop_requests
+SET core_drop_replies = $opensips_core_drop_replies
+END
+BEGIN opensips.core_err $1
+SET core_err_requests = $opensips_core_err_requests
+SET core_err_replies = $opensips_core_err_replies
+END
+BEGIN opensips.core_bad $1
+SET core_bad_URIs_rcvd = $opensips_core_bad_URIs_rcvd
+SET core_unsupported_methods = $opensips_core_unsupported_methods
+SET core_bad_msg_hdr = $opensips_core_bad_msg_hdr
+END
+BEGIN opensips.tm_replies $1
+SET tm_received_replies = $opensips_tm_received_replies
+SET tm_relayed_replies = $opensips_tm_relayed_replies
+SET tm_local_replies = $opensips_tm_local_replies
+END
+BEGIN opensips.transactions_status $1
+SET tm_2xx_transactions = $opensips_tm_2xx_transactions
+SET tm_3xx_transactions = $opensips_tm_3xx_transactions
+SET tm_4xx_transactions = $opensips_tm_4xx_transactions
+SET tm_5xx_transactions = $opensips_tm_5xx_transactions
+SET tm_6xx_transactions = $opensips_tm_6xx_transactions
+END
+BEGIN opensips.transactions_inuse $1
+SET tm_inuse_transactions = $opensips_tm_inuse_transactions
+END
+BEGIN opensips.sl_replies $1
+SET sl_1xx_replies = $opensips_sl_1xx_replies
+SET sl_2xx_replies = $opensips_sl_2xx_replies
+SET sl_3xx_replies = $opensips_sl_3xx_replies
+SET sl_4xx_replies = $opensips_sl_4xx_replies
+SET sl_5xx_replies = $opensips_sl_5xx_replies
+SET sl_6xx_replies = $opensips_sl_6xx_replies
+SET sl_sent_replies = $opensips_sl_sent_replies
+SET sl_sent_err_replies = $opensips_sl_sent_err_replies
+SET sl_received_ACKs = $opensips_sl_received_ACKs
+END
+BEGIN opensips.dialogs $1
+SET dialog_processed_dialogs = $opensips_dialog_processed_dialogs
+SET dialog_expired_dialogs = $opensips_dialog_expired_dialogs
+SET dialog_failed_dialogs = $opensips_dialog_failed_dialogs
+END
+BEGIN opensips.net_waiting $1
+SET net_waiting_udp = $opensips_net_waiting_udp
+SET net_waiting_tcp = $opensips_net_waiting_tcp
+END
+BEGIN opensips.uri_checks $1
+SET uri_positive_checks = $opensips_uri_positive_checks
+SET uri_negative_checks = $opensips_uri_negative_checks
+END
+BEGIN opensips.traces $1
+SET siptrace_traced_requests = $opensips_siptrace_traced_requests
+SET siptrace_traced_replies = $opensips_siptrace_traced_replies
+END
+BEGIN opensips.shmem $1
+SET shmem_total_size = $opensips_shmem_total_size
+SET shmem_used_size = $opensips_shmem_used_size
+SET shmem_real_used_size = $opensips_shmem_real_used_size
+SET shmem_max_used_size = $opensips_shmem_max_used_size
+SET shmem_free_size = $opensips_shmem_free_size
+END
+BEGIN opensips.shmem_fragments $1
+SET shmem_fragments = $opensips_shmem_fragments
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/opensips/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf
new file mode 100644
index 0000000..e25111d
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+#opensips_opts="fifo get_statistics all"
+#opensips_cmd=
+#opensips_timeout=2
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#opensips_update_every=5
+
+# the charts priority on the dashboard
+#opensips_priority=80000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#opensips_retries=10
diff --git a/collectors/charts.d.plugin/sensors/Makefile.inc b/collectors/charts.d.plugin/sensors/Makefile.inc
new file mode 100644
index 0000000..f466a1b
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += sensors/sensors.chart.sh
+dist_chartsconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
new file mode 100644
index 0000000..cee3f60
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -0,0 +1,67 @@
+<!--
+title: "Linux machine sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/sensors/README.md
+-->
+
+# Linux machine sensors monitoring with Netdata
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](/collectors/python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+>
+> Unlike the python one, this module can collect temperature on RPi.
+
+The plugin will provide charts for all configured system sensors
+
+> This plugin is reading sensors directly from the kernel.
+> The `lm-sensors` package is able to perform calculations on the
+> kernel provided values, this plugin will not perform.
+> So, the values graphed, are the raw hardware values of the sensors.
+
+The plugin will create Netdata charts for:
+
+1. **Temperature**
+2. **Voltage**
+3. **Current**
+4. **Power**
+5. **Fans Speed**
+6. **Energy**
+7. **Humidity**
+
+One chart for every sensor chip found and each of the above will be created.
+
+## Configuration
+
+Edit the `charts.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/sensors.conf
+```
+
+This is the internal default for `charts.d/sensors.conf`
+
+```sh
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+# array of sensors which are excluded
+# the default is to include all
+sensors_excluded=()
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
new file mode 100644
index 0000000..b921877
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh
@@ -0,0 +1,250 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# sensors docs
+# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+sensors_priority=90000
+
+declare -A sensors_excluded=()
+
+sensors_find_all_files() {
+ find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null
+}
+
+sensors_find_all_dirs() {
+ # shellcheck disable=SC2162
+ sensors_find_all_files "$1" | while read; do
+ dirname "$REPLY"
+ done | sort -u
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+sensors_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
+ return 0
+}
+
+sensors_check_files() {
+ # we only need sensors that report a non-zero value
+ # also remove not needed sensors
+
+ local f v excluded
+ for f in "$@"; do
+ [ ! -f "$f" ] && continue
+ for ex in "${sensors_excluded[@]}"; do
+ [[ $f =~ .*$ex$ ]] && excluded='1' && break
+ done
+
+ [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0
+ v=$((v + 1 - 1))
+ [ $v -ne 0 ] && echo "$f" && continue
+ excluded=
+
+ error "$f gives zero values"
+ done
+}
+
+sensors_check_temp_type() {
+ # valid temp types are 1 to 6
+ # disabled sensors have the value 0
+
+ local f t v
+ for f in "$@"; do
+ # shellcheck disable=SC2001
+ t=$(echo "$f" | sed "s|_input$|_type|g")
+ [ "$f" = "$t" ] && echo "$f" && continue
+ [ ! -f "$t" ] && echo "$f" && continue
+
+ v="$(cat "$t")"
+ v=$((v + 1 - 1))
+ [ $v -ne 0 ] && echo "$f" && continue
+
+ error "$f is disabled"
+ done
+}
+
+# _create is called once, to create the charts
+sensors_create() {
+ local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor
+
+ # we create a script with the source of the
+ # sensors_update() function
+ # - the highest speed we can achieve -
+ [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
+
+ for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do
+ dir=$(basename "$path")
+ device=
+ subsystem=
+ id=
+ type=
+ name=
+
+ [ -h "$path/device" ] && device=$(readlink -f "$path/device")
+ [ ! -z "$device" ] && device=$(basename "$device")
+ [ -z "$device" ] && device="$dir"
+
+ [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem")
+ [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem")
+ [ -z "$subsystem" ] && subsystem="$dir"
+
+ [ -f "$path/name" ] && name=$(cat "$path/name")
+ [ -z "$name" ] && name="$dir"
+
+ [ -f "$path/type" ] && type=$(cat "$path/type")
+ [ -z "$type" ] && type="$dir"
+
+ id="$(fixid "$device.$subsystem.$dir")"
+
+ debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
+
+ for mode in temperature voltage fans power current energy humidity; do
+ files=
+ multiplier=1
+ divisor=1
+ algorithm="absolute"
+
+ case $mode in
+ temperature)
+ files="$(
+ ls "$path"/temp*_input 2>/dev/null
+ ls "$path/temp" 2>/dev/null
+ )"
+ files="$(sensors_check_files "$files")"
+ files="$(sensors_check_temp_type "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\""
+ divisor=1000
+ ;;
+
+ voltage)
+ files="$(ls "$path"/in*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\""
+ divisor=1000
+ ;;
+
+ current)
+ files="$(ls "$path"/curr*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\""
+ divisor=1000
+ ;;
+
+ power)
+ files="$(ls "$path"/power*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\""
+ divisor=1000000
+ ;;
+
+ fans)
+ files="$(ls "$path"/fan*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\""
+ ;;
+
+ energy)
+ files="$(ls "$path"/energy*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\""
+ algorithm="incremental"
+ divisor=1000000
+ ;;
+
+ humidity)
+ files="$(ls "$path"/humidity*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\""
+ divisor=1000
+ ;;
+
+ *)
+ continue
+ ;;
+ esac
+
+ for x in $files; do
+ file="$x"
+ fid="$(fixid "$file")"
+ lfile="$(basename "$file" | sed "s|_input$|_label|g")"
+ labelname="$(basename "$file" | sed "s|_input$||g")"
+
+ if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then
+ labelname="$(cat "$path/$lfile")"
+ fi
+
+ echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )"
+ done
+
+ echo >>"$TMP_DIR/sensors.sh" "echo END"
+ done
+ done
+
+ [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}"
+
+ # ok, load the function sensors_update() we created
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh"
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+sensors_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/sensors/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf
new file mode 100644
index 0000000..bcb2880
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.conf
@@ -0,0 +1,32 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the directory the kernel keeps sensor data
+#sensors_sys_dir="/sys/devices"
+
+# how deep in the tree to check for sensor data
+#sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+#sensors_source_update=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#sensors_update_every=
+
+# the charts priority on the dashboard
+#sensors_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#sensors_retries=10
+
diff --git a/collectors/checks.plugin/Makefile.am b/collectors/checks.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/checks.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md
new file mode 100644
index 0000000..5f1a6b9
--- /dev/null
+++ b/collectors/checks.plugin/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "checks.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/checks.plugin/README.md
+-->
+
+# checks.plugin
+
+A debugging plugin (by default it is disabled)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fchecks.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/checks.plugin/plugin_checks.c b/collectors/checks.plugin/plugin_checks.c
new file mode 100644
index 0000000..f8a2008
--- /dev/null
+++ b/collectors/checks.plugin/plugin_checks.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_checks.h"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+static void checks_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *checks_main(void *ptr) {
+ netdata_thread_cleanup_push(checks_main_cleanup, ptr);
+
+ usec_t usec = 0, susec = localhost->rrd_update_every * USEC_PER_SEC, loop_usec = 0, total_susec = 0;
+ struct timeval now, last, loop;
+
+ RRDSET *check1, *check2, *check3, *apps_cpu = NULL;
+
+ check1 = rrdset_create_localhost(
+ "netdata"
+ , "check1"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Caller gives microseconds"
+ , "a million !"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(check1, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check1, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ check2 = rrdset_create_localhost(
+ "netdata"
+ , "check2"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Netdata calcs microseconds"
+ , "a million !"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(check2, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check2, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ check3 = rrdset_create_localhost(
+ "netdata"
+ , "checkdt"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Clock difference"
+ , "microseconds diff"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(check3, "caller", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check3, "netdata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check3, "apps.plugin", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ now_realtime_timeval(&last);
+ while(!netdata_exit) {
+ usleep(susec);
+
+ // find the time to sleep in order to wait exactly update_every seconds
+ now_realtime_timeval(&now);
+ loop_usec = dt_usec(&now, &last);
+ usec = loop_usec - susec;
+ debug(D_PROCNETDEV_LOOP, "CHECK: last loop took %llu usec (worked for %llu, sleeped for %llu).", loop_usec, usec, susec);
+
+ if(usec < (localhost->rrd_update_every * USEC_PER_SEC / 2ULL)) susec = (localhost->rrd_update_every * USEC_PER_SEC) - usec;
+ else susec = localhost->rrd_update_every * USEC_PER_SEC / 2ULL;
+
+ // --------------------------------------------------------------------
+ // Calculate loop time
+
+ last.tv_sec = now.tv_sec;
+ last.tv_usec = now.tv_usec;
+ total_susec += loop_usec;
+
+ // --------------------------------------------------------------------
+ // check chart 1
+
+ if(check1->counter_done) rrdset_next_usec(check1, loop_usec);
+ rrddim_set(check1, "absolute", 1000000);
+ rrddim_set(check1, "incremental", total_susec);
+ rrdset_done(check1);
+
+ // --------------------------------------------------------------------
+ // check chart 2
+
+ if(check2->counter_done) rrdset_next(check2);
+ rrddim_set(check2, "absolute", 1000000);
+ rrddim_set(check2, "incremental", total_susec);
+ rrdset_done(check2);
+
+ // --------------------------------------------------------------------
+ // check chart 3
+
+ if(!apps_cpu) apps_cpu = rrdset_find_localhost("apps.cpu");
+ if(check3->counter_done) rrdset_next_usec(check3, loop_usec);
+ now_realtime_timeval(&loop);
+ rrddim_set(check3, "caller", (long long) dt_usec(&loop, &check1->last_collected_time));
+ rrddim_set(check3, "netdata", (long long) dt_usec(&loop, &check2->last_collected_time));
+ if(apps_cpu) rrddim_set(check3, "apps.plugin", (long long) dt_usec(&loop, &apps_cpu->last_collected_time));
+ rrdset_done(check3);
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+#endif // NETDATA_INTERNAL_CHECKS
diff --git a/collectors/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h
new file mode 100644
index 0000000..9349476
--- /dev/null
+++ b/collectors/checks.plugin/plugin_checks.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_CHECKS_H
+#define NETDATA_PLUGIN_CHECKS_H 1
+
+#include "../../daemon/common.h"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS \
+ { \
+ .name = "PLUGIN[check]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "checks", \
+ .enabled = 0, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = checks_main \
+ },
+
+extern void *checks_main(void *ptr);
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif // NETDATA_PLUGIN_CHECKS_H
diff --git a/collectors/cups.plugin/Makefile.am b/collectors/cups.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/cups.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
new file mode 100644
index 0000000..373602d
--- /dev/null
+++ b/collectors/cups.plugin/README.md
@@ -0,0 +1,62 @@
+<!--
+title: "cups.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/cups.plugin/README.md
+-->
+
+# cups.plugin
+
+`cups.plugin` collects Common Unix Printing System (CUPS) metrics.
+
+## Prerequisites
+
+This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not need any configuration. Supports cups since version 1.7.
+
+## Charts
+
+`cups.plugin` provides one common section `destinations` and one section per destination.
+
+> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (<https://www.cups.org/doc/cupspm.html#working-with-destinations>)
+
+The section `server` provides these charts:
+
+1. **destinations by state**
+
+ - idle
+ - printing
+ - stopped
+
+2. **destinations by options**
+
+ - total
+ - accepting jobs
+ - shared
+
+3. **total job number by status**
+
+ - pending
+ - processing
+ - held
+
+4. **total job size by status**
+
+ - pending
+ - processing
+ - held
+
+For each destination the plugin provides these charts:
+
+1. **job number by status**
+
+ - pending
+ - held
+ - processing
+
+2. **job size by status**
+
+ - pending
+ - held
+ - processing
+
+At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2cups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c
new file mode 100644
index 0000000..dc86437
--- /dev/null
+++ b/collectors/cups.plugin/cups_plugin.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * netdata cups.plugin
+ * (C) Copyright 2017-2018 Simon Nagl <simon.nagl@gmx.de>
+ * Released under GPL v3+
+ */
+
+#include "../../libnetdata/libnetdata.h"
+#include <limits.h>
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+static int debug = 0;
+
+static int netdata_update_every = 1;
+static int netdata_priority = 100004;
+
+
+#ifdef HAVE_CUPS
+#include <cups/cups.h>
+
+http_t *http; // connection to the cups daemon
+
+/*
+ * Used to aggregate job metrics for a destination (and all destianations).
+ */
+struct job_metrics {
+ int is_collected; // flag if this was collected in the current cycle
+
+ int num_pending;
+ int num_processing;
+ int num_held;
+
+ int size_pending; // in kilobyte
+ int size_processing; // in kilobyte
+ int size_held; // in kilobyte
+};
+DICTIONARY *dict_dest_job_metrics = NULL;
+struct job_metrics global_job_metrics;
+
+int num_dest_total;
+int num_dest_accepting_jobs;
+int num_dest_shared;
+
+int num_dest_idle;
+int num_dest_printing;
+int num_dest_stopped;
+
+void print_help() {
+ fprintf(stderr,
+ "\n"
+ "netdata cups.plugin %s\n"
+ "\n"
+ "Copyright (C) 2017-2018 Simon Nagl <simon.nagl@gmx.de>\n"
+ "Released under GNU General Public License v3+.\n"
+ "All rights reserved.\n"
+ "\n"
+ "This program is a data collector plugin for netdata.\n"
+ "\n"
+ "SYNOPSIS: cups.plugin [-d][-h][-v] COLLECTION_FREQUENCY\n"
+ "\n"
+ "Options:"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ "\n"
+ " -d enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v print version and exit\n"
+ "\n"
+ " -h print this message and exit\n"
+ "\n",
+ VERSION);
+}
+
+void parse_command_line(int argc, char **argv) {
+ int i;
+ int freq = 0;
+ int update_every_found = 0;
+ for (i = 1; i < argc; i++) {
+ if (isdigit(*argv[i]) && !update_every_found) {
+ int n = str2i(argv[i]);
+ if (n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ } else if (strcmp("-v", argv[i]) == 0) {
+ printf("cups.plugin %s\n", VERSION);
+ exit(0);
+ } else if (strcmp("-d", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ } else if (strcmp("-h", argv[i]) == 0) {
+ print_help();
+ exit(0);
+ }
+
+ print_help();
+ exit(1);
+ }
+
+ if (freq >= netdata_update_every) {
+ netdata_update_every = freq;
+ } else if (freq) {
+ error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
+ }
+}
+
+/*
+ * 'cupsGetIntegerOption()' - Get an integer option value.
+ *
+ * INT_MIN is returned when the option does not exist, is not an integer, or
+ * exceeds the range of values for the "int" type.
+ *
+ * @since CUPS 2.2.4/macOS 10.13@
+ */
+
+int /* O - Option value or @code INT_MIN@ */
+getIntegerOption(
+ const char *name, /* I - Name of option */
+ int num_options, /* I - Number of options */
+ cups_option_t *options) /* I - Options */
+{
+ const char *value = cupsGetOption(name, num_options, options);
+ /* String value of option */
+ char *ptr; /* Pointer into string value */
+ long intvalue; /* Integer value */
+
+
+ if (!value || !*value)
+ return (INT_MIN);
+
+ intvalue = strtol(value, &ptr, 10);
+ if (intvalue < INT_MIN || intvalue > INT_MAX || *ptr)
+ return (INT_MIN);
+
+ return ((int)intvalue);
+}
+
+int reset_job_metrics(void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ jm->is_collected = 0;
+ jm->num_held = 0;
+ jm->num_pending = 0;
+ jm->num_processing = 0;
+ jm->size_held = 0;
+ jm->size_pending = 0;
+ jm->size_processing = 0;
+
+ return 0;
+}
+
+struct job_metrics *get_job_metrics(char *dest) {
+ struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest);
+
+ if (unlikely(!jm)) {
+ struct job_metrics new_job_metrics;
+ reset_job_metrics(&new_job_metrics, NULL);
+ jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics));
+
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ };
+ return jm;
+}
+
+int collect_job_metrics(char *name, void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ if (jm->is_collected) {
+ printf(
+ "BEGIN cups.job_num_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->num_pending, jm->num_held, jm->num_processing);
+ printf(
+ "BEGIN cups.job_size_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->size_pending, jm->size_held, jm->size_processing);
+ } else {
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ dictionary_del(dict_dest_job_metrics, name);
+ }
+
+ return 0;
+}
+
+void reset_metrics() {
+ num_dest_total = 0;
+ num_dest_accepting_jobs = 0;
+ num_dest_shared = 0;
+
+ num_dest_idle = 0;
+ num_dest_printing = 0;
+ num_dest_stopped = 0;
+
+ reset_job_metrics(&global_job_metrics, NULL);
+ dictionary_get_all(dict_dest_job_metrics, reset_job_metrics, NULL);
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "cups.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ parse_command_line(argc, argv);
+
+ errno = 0;
+
+ dict_dest_job_metrics = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if (debug)
+ fprintf(stderr, "starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for (iteration = 0; 1; iteration++)
+ {
+ heartbeat_next(&hb, step);
+
+ if (unlikely(netdata_exit))
+ {
+ break;
+ }
+
+ reset_metrics();
+
+ cups_dest_t *dests;
+ num_dest_total = cupsGetDests2(http, &dests);
+
+ if(unlikely(num_dest_total == 0)) {
+ // reconnect to cups to check if the server is down.
+ httpClose(http);
+ http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL);
+ if(http == NULL) {
+ error("cups daemon is not running. Exiting!");
+ exit(1);
+ }
+ }
+
+ cups_dest_t *curr_dest = dests;
+ int counter = 0;
+ while (counter < num_dest_total) {
+ if (counter != 0) {
+ curr_dest++;
+ }
+ counter++;
+
+ const char *printer_uri_supported = cupsGetOption("printer-uri-supported", curr_dest->num_options, curr_dest->options);
+ if (!printer_uri_supported) {
+ if(debug)
+ fprintf(stderr, "destination %s discovered, but not yet setup as a local printer", curr_dest->name);
+ continue;
+ }
+
+ const char *printer_is_accepting_jobs = cupsGetOption("printer-is-accepting-jobs", curr_dest->num_options, curr_dest->options);
+ if (printer_is_accepting_jobs && !strcmp(printer_is_accepting_jobs, "true")) {
+ num_dest_accepting_jobs++;
+ }
+
+ const char *printer_is_shared = cupsGetOption("printer-is-shared", curr_dest->num_options, curr_dest->options);
+ if (printer_is_shared && !strcmp(printer_is_shared, "true")) {
+ num_dest_shared++;
+ }
+
+ int printer_state = getIntegerOption("printer-state", curr_dest->num_options, curr_dest->options);
+ switch (printer_state) {
+ case 3:
+ num_dest_idle++;
+ break;
+ case 4:
+ num_dest_printing++;
+ break;
+ case 5:
+ num_dest_stopped++;
+ break;
+ case INT_MIN:
+ if(debug)
+ fprintf(stderr, "printer state is missing for destination %s", curr_dest->name);
+ break;
+ default:
+ error("Unknown printer state (%d) found.", printer_state);
+ break;
+ }
+
+ /*
+ * flag job metrics to print values.
+ * This is needed to report also destinations with zero active jobs.
+ */
+ struct job_metrics *jm = get_job_metrics(curr_dest->name);
+ jm->is_collected = 1;
+ }
+ cupsFreeDests(num_dest_total, dests);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ cups_job_t *jobs, *curr_job;
+ int num_jobs = cupsGetJobs2(http, &jobs, NULL, 0, CUPS_WHICHJOBS_ACTIVE);
+ int i;
+ for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) {
+ struct job_metrics *jm = get_job_metrics(curr_job->dest);
+ jm->is_collected = 1;
+
+ switch (curr_job->state) {
+ case IPP_JOB_PENDING:
+ jm->num_pending++;
+ jm->size_pending += curr_job->size;
+ global_job_metrics.num_pending++;
+ global_job_metrics.size_pending += curr_job->size;
+ break;
+ case IPP_JOB_HELD:
+ jm->num_held++;
+ jm->size_held += curr_job->size;
+ global_job_metrics.num_held++;
+ global_job_metrics.size_held += curr_job->size;
+ break;
+ case IPP_JOB_PROCESSING:
+ jm->num_processing++;
+ jm->size_processing += curr_job->size;
+ global_job_metrics.num_processing++;
+ global_job_metrics.size_processing += curr_job->size;
+ break;
+ default:
+ error("Unsupported job state (%u) found.", curr_job->state);
+ break;
+ }
+ }
+ cupsFreeJobs(num_jobs, jobs);
+
+ dictionary_get_all_name_value(dict_dest_job_metrics, collect_job_metrics, NULL);
+
+ static int cups_printer_by_option_created = 0;
+ if (unlikely(!cups_printer_by_option_created))
+ {
+ cups_printer_by_option_created = 1;
+ printf("CHART cups.dest_state '' 'Destinations by state' dests overview dests stacked 100000 %i\n", netdata_update_every);
+ printf("DIMENSION idle '' absolute 1 1\n");
+ printf("DIMENSION printing '' absolute 1 1\n");
+ printf("DIMENSION stopped '' absolute 1 1\n");
+
+ printf("CHART cups.dest_option '' 'Destinations by option' dests overview dests line 100001 %i\n", netdata_update_every);
+ printf("DIMENSION total '' absolute 1 1\n");
+ printf("DIMENSION acceptingjobs '' absolute 1 1\n");
+ printf("DIMENSION shared '' absolute 1 1\n");
+
+ printf("CHART cups.job_num '' 'Total active job number' jobs overview job_num stacked 100002 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size '' 'Total active job size' KB overview job_size stacked 100003 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ }
+
+ printf(
+ "BEGIN cups.dest_state\n"
+ "SET idle = %d\n"
+ "SET printing = %d\n"
+ "SET stopped = %d\n"
+ "END\n",
+ num_dest_idle, num_dest_printing, num_dest_stopped);
+ printf(
+ "BEGIN cups.dest_option\n"
+ "SET total = %d\n"
+ "SET acceptingjobs = %d\n"
+ "SET shared = %d\n"
+ "END\n",
+ num_dest_total, num_dest_accepting_jobs, num_dest_shared);
+ printf(
+ "BEGIN cups.job_num\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.num_pending, global_job_metrics.num_held, global_job_metrics.num_processing);
+ printf(
+ "BEGIN cups.job_size\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.size_pending, global_job_metrics.size_held, global_job_metrics.size_processing);
+
+ fflush(stdout);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ // restart check (14400 seconds)
+ if (!now_monotonic_sec() - started_t > 14400)
+ break;
+ }
+
+ httpClose(http);
+ info("CUPS process exiting");
+}
+
+#else // !HAVE_CUPS
+
+int main(int argc, char **argv)
+{
+ fatal("cups.plugin is not compiled.");
+}
+
+#endif // !HAVE_CUPS
diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/diskspace.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
new file mode 100644
index 0000000..a8b41c8
--- /dev/null
+++ b/collectors/diskspace.plugin/README.md
@@ -0,0 +1,43 @@
+<!--
+title: "diskspace.plugin"
+description: "Monitor the disk usage space of mounted disks in real-time with the Netdata Agent, plus preconfigured alarms for disks at risk of filling up."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/diskspace.plugin/README.md
+-->
+
+# diskspace.plugin
+
+This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point.
+
+Two charts are available for every mount:
+
+- Disk Space Usage
+- Disk Files (inodes) Usage
+
+## configuration
+
+Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`.
+
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+```
+[plugin:proc:diskspace]
+ # remove charts of unmounted disks = yes
+ # update every = 1
+ # check for new mount points every = 15
+ # exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*
+ # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs
+ # space usage for all disks = auto
+ # inodes usage for all disks = auto
+```
+
+Charts can be enabled/disabled for every mount separately:
+
+```
+[plugin:proc:diskspace:/]
+ # space usage = auto
+ # inodes usage = auto
+```
+
+> for disks performance monitoring, see the `proc` plugin, [here](/collectors/proc.plugin/README.md#monitoring-disks)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
new file mode 100644
index 0000000..4010e57
--- /dev/null
+++ b/collectors/diskspace.plugin/plugin_diskspace.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_diskspace.h"
+
+#define PLUGIN_DISKSPACE_NAME "diskspace.plugin"
+
+#define DELAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*"
+#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs"
+#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace"
+
+static struct mountinfo *disk_mountinfo_root = NULL;
+static int check_for_new_mountpoints_every = 15;
+static int cleanup_mount_points = 1;
+
+static inline void mountinfo_reload(int force) {
+ static time_t last_loaded = 0;
+ time_t now = now_realtime_sec();
+
+ if(force || now - last_loaded >= check_for_new_mountpoints_every) {
+ // mountinfo_free_all() can be called with NULL disk_mountinfo_root
+ mountinfo_free_all(disk_mountinfo_root);
+
+ // re-read mountinfo in case something changed
+ disk_mountinfo_root = mountinfo_read(0);
+
+ last_loaded = now;
+ }
+}
+
+// Data to be stored in DICTIONARY dict_mountpoints used by do_disk_space_stats().
+// This DICTIONARY is used to lookup the settings of the mount point on each iteration.
+struct mount_point_metadata {
+ int do_space;
+ int do_inodes;
+ int shown_error;
+ int updated;
+
+ size_t collected; // the number of times this has been collected
+
+ RRDSET *st_space;
+ RRDDIM *rd_space_used;
+ RRDDIM *rd_space_avail;
+ RRDDIM *rd_space_reserved;
+
+ RRDSET *st_inodes;
+ RRDDIM *rd_inodes_used;
+ RRDDIM *rd_inodes_avail;
+ RRDDIM *rd_inodes_reserved;
+};
+
+static DICTIONARY *dict_mountpoints = NULL;
+
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+int mount_point_cleanup(void *entry, void *data) {
+ (void)data;
+
+ struct mount_point_metadata *mp = (struct mount_point_metadata *)entry;
+ if(!mp) return 0;
+
+ if(likely(mp->updated)) {
+ mp->updated = 0;
+ return 0;
+ }
+
+ if(likely(cleanup_mount_points && mp->collected)) {
+ mp->collected = 0;
+ mp->updated = 0;
+ mp->shown_error = 0;
+
+ mp->rd_space_avail = NULL;
+ mp->rd_space_used = NULL;
+ mp->rd_space_reserved = NULL;
+
+ mp->rd_inodes_avail = NULL;
+ mp->rd_inodes_used = NULL;
+ mp->rd_inodes_reserved = NULL;
+
+ rrdset_obsolete_and_pointer_null(mp->st_space);
+ rrdset_obsolete_and_pointer_null(mp->st_inodes);
+ }
+
+ return 0;
+}
+
+static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
+ const char *family = mi->mount_point;
+ const char *disk = mi->persistent_id;
+
+ static SIMPLE_PATTERN *excluded_mountpoints = NULL;
+ static SIMPLE_PATTERN *excluded_filesystems = NULL;
+ int do_space, do_inodes;
+
+ if(unlikely(!dict_mountpoints)) {
+ SIMPLE_PREFIX_MODE mode = SIMPLE_PATTERN_EXACT;
+
+ if(config_move("plugin:proc:/proc/diskstats", "exclude space metrics on paths", CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths") != -1) {
+ // old configuration, enable backwards compatibility
+ mode = SIMPLE_PATTERN_PREFIX;
+ }
+
+ excluded_mountpoints = simple_pattern_create(
+ config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths", DELAULT_EXCLUDED_PATHS)
+ , NULL
+ , mode
+ );
+
+ excluded_filesystems = simple_pattern_create(
+ config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+
+ dict_mountpoints = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ }
+
+ struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point);
+ if(unlikely(!m)) {
+ char var_name[4096 + 1];
+ snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point);
+
+ int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
+ int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) {
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+ if(unlikely(simple_pattern_matches(excluded_filesystems, mi->filesystem))) {
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+ // check if the mount point is a directory #2407
+ // but only when it is enabled by default #4491
+ if(def_space != CONFIG_BOOLEAN_NO || def_inodes != CONFIG_BOOLEAN_NO) {
+ struct stat bs;
+ if(stat(mi->mount_point, &bs) == -1) {
+ error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ if((bs.st_mode & S_IFMT) != S_IFDIR) {
+ error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ }
+ }
+
+ do_space = config_get_boolean_ondemand(var_name, "space usage", def_space);
+ do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes);
+
+ struct mount_point_metadata mp = {
+ .do_space = do_space,
+ .do_inodes = do_inodes,
+ .shown_error = 0,
+ .updated = 0,
+
+ .collected = 0,
+
+ .st_space = NULL,
+ .rd_space_avail = NULL,
+ .rd_space_used = NULL,
+ .rd_space_reserved = NULL,
+
+ .st_inodes = NULL,
+ .rd_inodes_avail = NULL,
+ .rd_inodes_used = NULL,
+ .rd_inodes_reserved = NULL
+ };
+
+ m = dictionary_set(dict_mountpoints, mi->mount_point, &mp, sizeof(struct mount_point_metadata));
+ }
+
+ m->updated = 1;
+
+ if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO))
+ return;
+
+ if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected && m->do_space != CONFIG_BOOLEAN_YES && m->do_inodes != CONFIG_BOOLEAN_YES))
+ return;
+
+ struct statvfs buff_statvfs;
+ if (statvfs(mi->mount_point, &buff_statvfs) < 0) {
+ if(!m->shown_error) {
+ error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')"
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ m->shown_error = 1;
+ }
+ return;
+ }
+ m->shown_error = 0;
+
+ // logic found at get_fs_usage() in coreutils
+ unsigned long bsize = (buff_statvfs.f_frsize) ? buff_statvfs.f_frsize : buff_statvfs.f_bsize;
+
+ fsblkcnt_t bavail = buff_statvfs.f_bavail;
+ fsblkcnt_t btotal = buff_statvfs.f_blocks;
+ fsblkcnt_t bavail_root = buff_statvfs.f_bfree;
+ fsblkcnt_t breserved_root = bavail_root - bavail;
+ fsblkcnt_t bused;
+ if(likely(btotal >= bavail_root))
+ bused = btotal - bavail_root;
+ else
+ bused = bavail_root - btotal;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(btotal != bavail + breserved_root + bused))
+ error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused);
+#endif
+
+ // --------------------------------------------------------------------------
+
+ fsfilcnt_t favail = buff_statvfs.f_favail;
+ fsfilcnt_t ftotal = buff_statvfs.f_files;
+ fsfilcnt_t favail_root = buff_statvfs.f_ffree;
+ fsfilcnt_t freserved_root = favail_root - favail;
+ fsfilcnt_t fused = ftotal - favail_root;
+
+ if(m->do_inodes == CONFIG_BOOLEAN_AUTO && favail == (fsfilcnt_t)-1) {
+ // this file system does not support inodes reporting
+ // eg. cephfs
+ m->do_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(btotal != bavail + breserved_root + bused))
+ error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused);
+#endif
+
+ // --------------------------------------------------------------------------
+
+ int rendered = 0;
+
+ if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO &&
+ (bavail || breserved_root || bused ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if(unlikely(!m->st_space)) {
+ m->do_space = CONFIG_BOOLEAN_YES;
+ m->st_space = rrdset_find_active_bytype_localhost("disk_space", disk);
+ if(unlikely(!m->st_space)) {
+ char title[4096 + 1];
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]", family, mi->mount_source);
+ m->st_space = rrdset_create_localhost(
+ "disk_space"
+ , disk
+ , NULL
+ , family
+ , "disk.space"
+ , title
+ , "GiB"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_DISKSPACE_SPACE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+
+ m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_used = rrddim_add(m->st_space, "used", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(m->st_space);
+
+ rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number)bavail);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number)bused);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number)breserved_root);
+ rrdset_done(m->st_space);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO &&
+ (favail || freserved_root || fused ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if(unlikely(!m->st_inodes)) {
+ m->do_inodes = CONFIG_BOOLEAN_YES;
+ m->st_inodes = rrdset_find_active_bytype_localhost("disk_inodes", disk);
+ if(unlikely(!m->st_inodes)) {
+ char title[4096 + 1];
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", family, mi->mount_source);
+ m->st_inodes = rrdset_create_localhost(
+ "disk_inodes"
+ , disk
+ , NULL
+ , family
+ , "disk.inodes"
+ , title
+ , "inodes"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_DISKSPACE_INODES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+
+ m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_reserved = rrddim_add(m->st_inodes, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(m->st_inodes);
+
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number)favail);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number)fused);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_reserved, (collected_number)freserved_root);
+ rrdset_done(m->st_inodes);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(likely(rendered))
+ m->collected++;
+}
+
+static void diskspace_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *diskspace_main(void *ptr) {
+ netdata_thread_cleanup_push(diskspace_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1);
+
+ cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
+
+ int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
+ if(update_every < localhost->rrd_update_every)
+ update_every = localhost->rrd_update_every;
+
+ check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
+ if(check_for_new_mountpoints_every < update_every)
+ check_for_new_mountpoints_every = update_every;
+
+ struct rusage thread;
+
+ usec_t duration = 0;
+ usec_t step = update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ while(!netdata_exit) {
+ duration = heartbeat_monotonic_dt_to_now_usec(&hb);
+ /* usec_t hb_dt = */ heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+
+ // --------------------------------------------------------------------------
+ // this is smart enough not to reload it every time
+
+ mountinfo_reload(0);
+
+
+ // --------------------------------------------------------------------------
+ // disk space metrics
+
+ struct mountinfo *mi;
+ for(mi = disk_mountinfo_root; mi; mi = mi->next) {
+
+ if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND)))
+ continue;
+
+ do_disk_space_stats(mi, update_every);
+ if(unlikely(netdata_exit)) break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ if(dict_mountpoints)
+ dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL);
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *stcpu_thread = NULL, *st_duration = NULL;
+ static RRDDIM *rd_user = NULL, *rd_system = NULL, *rd_duration = NULL;
+
+ // ----------------------------------------------------------------
+
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(!stcpu_thread)) {
+ stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_diskspace"
+ , NULL
+ , "diskspace"
+ , NULL
+ , "NetData Disk Space Plugin CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_DISKSPACE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(stcpu_thread);
+
+ rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!st_duration)) {
+ st_duration = rrdset_create_localhost(
+ "netdata"
+ , "plugin_diskspace_dt"
+ , NULL
+ , "diskspace"
+ , NULL
+ , "NetData Disk Space Plugin Duration"
+ , "milliseconds/run"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , 132021
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_duration = rrddim_add(st_duration, "duration", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_duration);
+
+ rrddim_set_by_pointer(st_duration, rd_duration, duration);
+ rrdset_done(st_duration);
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(netdata_exit)) break;
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h
new file mode 100644
index 0000000..7c9df9d
--- /dev/null
+++ b/collectors/diskspace.plugin/plugin_diskspace.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H
+#define NETDATA_PLUGIN_PROC_DISKSPACE_H
+
+#include "../../daemon/common.h"
+
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE \
+ { \
+ .name = "PLUGIN[diskspace]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "diskspace", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = diskspace_main \
+ },
+
+extern void *diskspace_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+
+#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
new file mode 100644
index 0000000..1327d47
--- /dev/null
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ reset_netdata_trace.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ reset_netdata_trace.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ reset_netdata_trace.sh.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ ebpf.conf \
+ ebpf_kernel_reject_list.txt \
+ $(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
new file mode 100644
index 0000000..5ea3b49
--- /dev/null
+++ b/collectors/ebpf.plugin/README.md
@@ -0,0 +1,400 @@
+<!--
+title: "eBPF monitoring with Netdata"
+description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about your complex applications with per-second granularity."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md
+sidebar_label: "eBPF"
+-->
+
+# eBPF monitoring with Netdata
+
+Netdata's extended Berkeley Packet Filter (eBPF) collector monitors kernel-level metrics for file descriptors, virtual
+filesystem IO, and process management on Linux systems. You can use our eBPF collector to analyze how and when a process
+accesses files, when it makes system calls, whether it leaks memory or creating zombie processes, and more.
+
+Netdata's eBPF monitoring toolkit uses two custom eBPF programs. The default, called `entry`, monitors calls to a
+variety of kernel functions, such as `do_sys_open`, `__close_fd`, `vfs_read`, `vfs_write`, `_do_fork`, and more. The
+`return` program also monitors the return of each kernel functions to deliver more granular metrics about how your
+system and its applications interact with the Linux kernel.
+
+eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See our [guide on
+troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for configuration
+and troubleshooting tips.
+
+<figure>
+ <img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
+ <figcaption>An example of VFS charts made possible by the eBPF collector plugin.</figcaption>
+</figure>
+
+## Enable the collector on Linux
+
+**The eBPF collector is installed and enabled by default on most new installations of the Agent**. The eBPF collector
+does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md), but improved
+support is in active development.
+
+eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`,
+and all kernels on CentOS 7.6 or later.
+
+If your Agent is v1.22 or older, you may to enable the collector yourself. See the [configuration](#configuration)
+section for details.
+
+## Charts
+
+The eBPF collector creates an **eBPF** menu in the Agent's dashboard along with three sub-menus: **File**, **VFS**, and
+**Process**. All the charts in this section update every second. The collector stores the actual value inside of its
+process, but charts only show the difference between the values collected in the previous and current seconds.
+
+### File
+
+This group has two charts demonstrating how software interacts with the Linux kernel to open and close file descriptors.
+
+#### File descriptor
+
+This chart contains two dimensions that show the number of calls to the functions `do_sys_open` and `__close_fd`. Most
+software do not commonly call these functions directly, but they are behind the system calls `open(2)`, `openat(2)`,
+and `close(2)`.
+
+#### File error
+
+This chart shows the number of times some software tried and failed to open or close a file descriptor.
+
+### VFS
+
+A [virtual file system](https://en.wikipedia.org/wiki/Virtual_file_system) (VFS) is a layer on top of regular
+filesystems. The functions present inside this API are used for all filesystems, so it's possible the charts in this
+group won't show _all_ the actions that occurred on your system.
+
+#### Deleted objects
+
+This chart monitors calls for `vfs_unlink`. This function is responsible for removing objects from the file system.
+
+#### IO
+
+This chart shows the number of calls to the functions `vfs_read` and `vfs_write`.
+
+#### IO bytes
+
+This chart also monitors `vfs_read` and `vfs_write`, but instead shows the total of bytes read and written with these
+functions.
+
+The Agent displays the number of bytes written as negative because they are moving down to disk.
+
+#### IO errors
+
+The Agent counts and shows the number of instances where a running program experiences a read or write error.
+
+### Process
+
+For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the
+following charts.
+
+#### Process thread
+
+Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
+system calls: `fork(2)`, `vfork(2)` and `clone(2)`. In turn, each of these system calls use the function `_do_fork`. To
+generate this chart, the eBPF collector monitors `_do_fork` to populate the `process` dimension, and monitors
+`sys_clone` to identify threads.
+
+#### Exit
+
+Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
+system that the task is finishing its work. The second step is to release the kernel information with the internal
+function `release_task`. The difference between the two dimensions can help you discover [zombie
+processes](https://en.wikipedia.org/wiki/Zombie_process).
+
+#### Task error
+
+The functions responsible for ending tasks do not return values, so this chart contains information about failures on
+process and thread creation.
+
+## Configuration
+
+Enable or disable the entire eBPF collector by editing `netdata.conf`.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config netdata.conf
+```
+
+To enable the collector, scroll down to the `[plugins]` section ensure the relevant line references `ebpf` (not
+`ebpf_process`), is uncommented, and is set to `yes`.
+
+```conf
+[plugins]
+ ebpf = yes
+```
+
+You can also configure the eBPF collector's behavior by editing `ebpf.conf`.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config ebpf.conf
+```
+
+### `[global]`
+
+The `[global]` section defines settings for the whole eBPF collector.
+
+#### ebpf load mode
+
+The collector has two different eBPF programs. These programs monitor the same functions inside the kernel, but they
+monitor, process, and display different kinds of information.
+
+By default, this plugin uses the `entry` mode. Changing this mode can create significant overhead on your operating
+system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
+accepts the following values: ​
+
+- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described
+ in the sections above, and does not show charts related to errors.
+- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+ new charts for the return of these functions, such as errors. Monitoring function returns can help in debugging
+ software, such as failing to close file descriptors or creating zombie processes.
+
+#### Integration with `apps.plugin`
+
+The eBPF collector also creates charts for each running application through an integration with the
+[`apps.plugin`](/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
+interact with the Linux kernel.
+
+When the integration is enabled, your dashboard will also show the following charts using low-level Linux metrics:
+
+- eBPF file
+ - Number of calls to open files. (`apps.file_open`)
+ - Number of files closed. (`apps.file_closed`)
+ - Number of calls to open files that returned errors.
+ - Number of calls to close files that returned errors.
+- eBPF syscall
+ - Number of calls to delete files. (`apps.file_deleted`)
+ - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
+ - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
+ - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
+ - Number of calls to write a file that returned errors.
+ - Number of calls to read a file that returned errors.
+- eBPF process
+ - Number of process created with `do_fork`. (`apps.process_create`)
+ - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`)
+ - Number of times that a process called `do_exit`. (`apps.task_close`)
+- eBPF net
+ - Number of bytes sent. (`apps.bandwidth_sent`)
+ - Number of bytes received. (`apps.bandwidth_recv`)
+
+If you want to _disable_ the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+`no`.
+
+```conf
+[global]
+ apps = yes
+```
+
+### `[ebpf programs]`
+
+The eBPF collector enables and runs the following eBPF programs by default:
+
+- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed.
+ When in `return` mode, it also creates charts showing errors when these operations are executed.
+- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+ bandwidth consumed by each.
+
+### `[network connections]`
+
+You can configure the information shown on `outbound` and `inbound` charts with the settings in this section.
+
+```conf
+[network connections]
+ maximum dimensions = 500
+ resolve hostname ips = no
+ ports = 1-1024 !145 !domain
+ hostnames = !example.com
+ ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7
+```
+
+When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
+write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
+[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to
+ deny specific values or asterisk alone to define all values.
+
+In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain)
+and 145.
+
+The following options are available:
+
+- `ports`: Define the destination ports for Netdata to monitor.
+- `hostnames`: The list of hostnames that can be resolved to an IP address.
+- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
+ range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this
+ can be changed with the `ips` setting.
+
+By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions,
+they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing the `maximum
+dimensions` setting.
+
+The dimensions for the traffic charts are created using the destination IPs of the sockets by default. This can be
+changed setting `resolve hostname ips = yes` and restarting Netdata, after this Netdata will create dimensions using
+the `hostnames` every time that is possible to resolve IPs to their hostnames.
+
+### `[service name]`
+
+Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain the
+name for a particular service you use in your infrastructure, you will need to add it to the `[service name]` section.
+
+For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
+service in network connection charts, and thus see the name of the service instead of its port, define it:
+
+```conf
+[service name]
+ 19999 = Netdata
+```
+
+## Troubleshooting
+
+If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
+output.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+./ebpf.plugin
+```
+
+You can also use `grep` to search the Agent's `error.log` for messages related to eBPF monitoring.
+
+```bash
+grep -i ebpf /var/log/netdata/error.log
+```
+
+### Confirm kernel compatibility
+
+The eBPF collector only works on Linux systems and with specific Linux kernels. We support all kernels more recent than
+`4.11.0`, and all kernels on CentOS 7.6 or later.
+
+You can run our helper script to determine whether your system can support eBPF monitoring.
+
+```bash
+curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash
+```
+
+If this script returns no output, your system is ready to compile and run the eBPF collector.
+
+If you see a warning about a missing kernel configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL
+BPF_JIT`), you will need to recompile your kernel to support this configuration. The process of recompiling Linux
+kernels varies based on your distribution and version. Read the documentation for your system's distribution to learn
+more about the specific workflow for recompiling the kernel, ensuring that you set all the necessary
+
+- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
+- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
+- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
+- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
+- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
+- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
+
+### Mount `debugfs` and `tracefs`
+
+The eBPF collector also requires both the `tracefs` and `debugfs` filesystems. Try mounting the `tracefs` and `debugfs`
+filesystems using the commands below:
+
+```bash
+sudo mount -t debugfs nodev /sys/kernel/debug
+sudo mount -t tracefs nodev /sys/kernel/tracing
+```
+
+If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to
+mount these filesystems on startup. More information can be found in the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
+
+## Performance
+
+Because eBPF monitoring is complex, we are evaluating the performance of this new collector in various real-world
+conditions, across various system loads, and when monitoring complex applications.
+
+Our [initial testing](https://github.com/netdata/netdata/issues/8195) shows the performance of the eBPF collector is
+nearly identical to our [apps.plugin collector](/collectors/apps.plugin/README.md), despite collecting and displaying
+much more sophisticated metrics. You can now use the eBPF to gather deeper insights without affecting the performance of
+your complex applications at any load.
+
+## SELinux
+
+When [SELinux](https://www.redhat.com/en/topics/linux/what-is-selinux) is enabled, it may prevent `ebpf.plugin` from
+starting correctly. Check the Agent's `error.log` file for errors like the ones below:
+
+```bash
+2020-06-14 15:32:08: ebpf.plugin ERROR : EBPF PROCESS : Cannot load program: /usr/libexec/netdata/plugins.d/pnetdata_ebpf_process.3.10.0.o (errno 13, Permission denied)
+2020-06-14 15:32:19: netdata ERROR : PLUGINSD[ebpf] : read failed: end of file (errno 9, Bad file descriptor)
+```
+
+You can also check for errors related to `ebpf.plugin` inside `/var/log/audit/audit.log`:
+
+```bash
+type=AVC msg=audit(1586260134.952:97): avc: denied { map_create } for pid=1387 comm="ebpf.pl" scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:unconfined_service_t:s0 tclass=bpf permissive=0
+type=SYSCALL msg=audit(1586260134.952:97): arch=c000003e syscall=321 success=no exit=-13 a0=0 a1=7ffe6b36f000 a2=70 a3=0 items=0 ppid=1135 pid=1387 auid=4294967295 uid=994 gid=990 euid=0 suid=0 fsuid=0 egid=990 sgid=990 fsgid=990 tty=(none) ses=4294967295 comm="ebpf_proc
+ess.pl" exe="/usr/libexec/netdata/plugins.d/ebpf.plugin" subj=system_u:system_r:unconfined_service_t:s0 key=(null)
+```
+
+If you see similar errors, you will have to adjust SELinux's policies to enable the eBPF collector.
+
+### Creation of bpf policies
+
+To enable `ebpf.plugin` to run on a distribution with SELinux enabled, it will be necessary to take the following
+actions.
+
+First, stop the Netdata Agent.
+
+```bash
+# systemctl stop netdata
+```
+
+Next, create a policy with the `audit.log` file you examined earlier.
+
+```bash
+# grep ebpf.plugin /var/log/audit/audit.log | audit2allow -M netdata_ebpf
+```
+
+This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`.
+
+Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of
+the `netdata_ebpf.te` file.
+
+```conf
+module netdata_ebpf 1.0;
+require {
+ type unconfined_service_t;
+ class bpf { map_create map_read map_write prog_load prog_run };
+}
+#============= unconfined_service_t ==============
+allow unconfined_service_t self:bpf { map_create map_read map_write prog_load prog_run };
+```
+
+Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies:
+
+```bash
+# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
+# semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod
+```
+
+Finally, you can load the new policy and start the Netdata agent again:
+
+```bash
+# semodule -i netdata_ebpf.pp
+# systemctl start netdata
+```
+
+## Lockdown
+
+Beginning with [version 5.4](https://www.zdnet.com/article/linux-to-get-kernel-lockdown-feature/), the Linux kernel has
+a feature called "lockdown," which may affect `ebpf.plugin` depending how the kernel was compiled. The following table
+shows how the lockdown module impacts `ebpf.plugin` based on the selected options:
+
+| Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? |
+|:------------------------- |:--------------------------------- |:--------------------- |:-------------------------------- |
+| YES | NO | NO | YES |
+| YES | Yes | None | YES |
+| YES | Yes | Integrity | YES |
+| YES | Yes | Confidentiality | NO |
+
+If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries
+required to run `ebpf.plugin`.
+
+## Cleaning `kprobe_events`
+The eBPF collector adds entries to the file `/sys/kernel/debug/tracing/kprobe_events`, and cleans them on exit, unless
+another process prevents it. If you need to clean the eBPF entries safely, you can manually run the script
+`/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Febpf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
new file mode 100644
index 0000000..26bcfcf
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -0,0 +1,1977 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <ifaddrs.h>
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED BY NETDATA
+ *
+ *****************************************************************/
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result)
+{
+ UNUSED(variable);
+ UNUSED(hash);
+ UNUSED(rc);
+ UNUSED(result);
+ return 0;
+};
+
+void send_statistics(const char *action, const char *action_result, const char *action_data)
+{
+ UNUSED(action);
+ UNUSED(action_result);
+ UNUSED(action_data);
+}
+
+// callbacks required by popen()
+void signals_block(void){};
+void signals_unblock(void){};
+void signals_reset(void){};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret)
+{
+ exit(ret);
+}
+
+// ----------------------------------------------------------------------
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+char *ebpf_plugin_dir = PLUGINS_DIR;
+char *ebpf_user_config_dir = CONFIG_DIR;
+char *ebpf_stock_config_dir = LIBCONFIG_DIR;
+static char *ebpf_configured_log_dir = LOG_DIR;
+
+char *ebpf_algorithms[] = {"absolute", "incremental"};
+int update_every = 1;
+static int thread_finished = 0;
+int close_ebpf_plugin = 0;
+struct config collector_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+int running_on_kernel = 0;
+char kernel_string[64];
+int ebpf_nprocs;
+static int isrh;
+uint32_t finalized_threads = 1;
+
+pthread_mutex_t lock;
+pthread_mutex_t collect_data_mutex;
+pthread_cond_t collect_data_cond_var;
+
+ebpf_module_t ebpf_modules[] = {
+ { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+ { .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+ { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
+ .global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+};
+
+// Link with apps.plugin
+ebpf_process_stat_t *global_process_stat = NULL;
+
+//Network viewer
+ebpf_network_viewer_options_t network_viewer_opt;
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
+ *
+ *****************************************************************/
+
+/**
+ * Cleanup publish syscall
+ *
+ * @param nps list of structures to clean
+ */
+void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps)
+{
+ while (nps) {
+ freez(nps->algorithm);
+ nps = nps->next;
+ }
+}
+
+/**
+ * Clean port Structure
+ *
+ * Clean the allocated list.
+ *
+ * @param clean the list that will be cleaned
+ */
+void clean_port_structure(ebpf_network_viewer_port_list_t **clean)
+{
+ ebpf_network_viewer_port_list_t *move = *clean;
+ while (move) {
+ ebpf_network_viewer_port_list_t *next = move->next;
+ freez(move->value);
+ freez(move);
+
+ move = next;
+ }
+ *clean = NULL;
+}
+
+/**
+ * Clean IP structure
+ *
+ * Clean the allocated list.
+ *
+ * @param clean the list that will be cleaned
+ */
+static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
+{
+ ebpf_network_viewer_ip_list_t *move = *clean;
+ while (move) {
+ ebpf_network_viewer_ip_list_t *next = move->next;
+ freez(move);
+
+ move = next;
+ }
+ *clean = NULL;
+}
+
+/**
+ * Clean Loaded Events
+ *
+ * This function cleans the events previous loaded on Linux.
+void clean_loaded_events()
+{
+ int event_pid;
+ for (event_pid = 0; ebpf_modules[event_pid].probes; event_pid++)
+ clean_kprobe_events(NULL, (int)ebpf_modules[event_pid].thread_id, ebpf_modules[event_pid].probes);
+}
+ */
+
+/**
+ * Close the collector gracefully
+ *
+ * @param sig is the signal number used to close the collector
+ */
+static void ebpf_exit(int sig)
+{
+ close_ebpf_plugin = 1;
+
+ // When both threads were not finished case I try to go in front this address, the collector will crash
+ if (!thread_finished) {
+ return;
+ }
+
+ freez(global_process_stat);
+
+ /*
+ int ret = fork();
+ if (ret < 0) // error
+ error("Cannot fork(), so I won't be able to clean %skprobe_events", NETDATA_DEBUGFS);
+ else if (!ret) { // child
+ int i;
+ for (i = getdtablesize(); i >= 0; --i)
+ close(i);
+
+ int fd = open("/dev/null", O_RDWR, 0);
+ if (fd != -1) {
+ dup2(fd, STDIN_FILENO);
+ dup2(fd, STDOUT_FILENO);
+ dup2(fd, STDERR_FILENO);
+ }
+
+ if (fd > 2)
+ close(fd);
+
+ int sid = setsid();
+ if (sid >= 0) {
+ debug(D_EXIT, "Wait for father %d die", getpid());
+ sleep_usec(200000); // Sleep 200 miliseconds to father dies.
+ clean_loaded_events();
+ } else {
+ error("Cannot become session id leader, so I won't try to clean kprobe_events.\n");
+ }
+ } else { // parent
+ exit(0);
+ }
+ */
+
+ exit(sig);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Get a value from a structure.
+ *
+ * @param basis it is the first address of the structure
+ * @param offset it is the offset of the data you want to access.
+ * @return
+ */
+collected_number get_value_from_structure(char *basis, size_t offset)
+{
+ collected_number *value = (collected_number *)(basis + offset);
+
+ collected_number ret = (collected_number)llabs(*value);
+ // this reset is necessary to avoid keep a constant value while processing is not executing a task
+ *value = 0;
+
+ return ret;
+}
+
+/**
+ * Write begin command on standard output
+ *
+ * @param family the chart family name
+ * @param name the chart name
+ */
+void write_begin_chart(char *family, char *name)
+{
+ printf("BEGIN %s.%s\n", family, name);
+}
+
+/**
+ * Write END command on stdout.
+ */
+inline void write_end_chart()
+{
+ printf("END\n");
+}
+
+/**
+ * Write set command on standard output
+ *
+ * @param dim the dimension name
+ * @param value the value for the dimension
+ */
+void write_chart_dimension(char *dim, long long value)
+{
+ int ret = printf("SET %s = %lld\n", dim, value);
+ UNUSED(ret);
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param name the chart name
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ * @param end the number of values that will be written on standard output
+ *
+ * @return It returns a variable tha maps the charts that did not have zero values.
+ */
+void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
+{
+ write_begin_chart(family, name);
+
+ uint32_t i = 0;
+ while (move && i < end) {
+ write_chart_dimension(move->name, move->ncall);
+
+ move = move->next;
+ i++;
+ }
+
+ write_end_chart();
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param name the chart name
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ * @param end the number of values that will be written on standard output
+ */
+void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end)
+{
+ write_begin_chart(family, name);
+
+ int i = 0;
+ while (move && i < end) {
+ write_chart_dimension(move->name, move->nerr);
+
+ move = move->next;
+ i++;
+ }
+
+ write_end_chart();
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param chart the chart name
+ * @param family the chart family
+ * @param dwrite the dimension name
+ * @param vwrite the value for previous dimension
+ * @param dread the dimension name
+ * @param vread the value for previous dimension
+ *
+ * @return It returns a variable tha maps the charts that did not have zero values.
+ */
+void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
+{
+ write_begin_chart(family, chart);
+
+ write_chart_dimension(dwrite, vwrite);
+ write_chart_dimension(dread, vread);
+
+ write_end_chart();
+}
+
+/**
+ * Write chart cmd on standard output
+ *
+ * @param type the chart type
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param charttype the chart type
+ * @param order the chart order
+ */
+void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family, char *charttype, int order)
+{
+ printf("CHART %s.%s '' '%s' '%s' '%s' '' %s %d %d\n",
+ type,
+ id,
+ title,
+ units,
+ family,
+ charttype,
+ order,
+ update_every);
+}
+
+/**
+ * Write the dimension command on standard output
+ *
+ * @param name the dimension name
+ * @param id the dimension id
+ * @param algo the dimension algorithm
+ */
+void ebpf_write_global_dimension(char *name, char *id, char *algorithm)
+{
+ printf("DIMENSION %s %s %s 1 1\n", name, id, algorithm);
+}
+
+/**
+ * Call ebpf_write_global_dimension to create the dimensions for a specific chart
+ *
+ * @param ptr a pointer to a structure of the type netdata_publish_syscall_t
+ * @param end the number of dimensions for the structure ptr
+ */
+void ebpf_create_global_dimension(void *ptr, int end)
+{
+ netdata_publish_syscall_t *move = ptr;
+
+ int i = 0;
+ while (move && i < end) {
+ ebpf_write_global_dimension(move->name, move->dimension, move->algorithm);
+
+ move = move->next;
+ i++;
+ }
+}
+
+/**
+ * Call write_chart_cmd to create the charts
+ *
+ * @param type the chart type
+ * @param id the chart id
+ * @param units the axis label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ * @param ncd a pointer to a function called to create dimensions
+ * @param move a pointer for a structure that has the dimensions
+ * @param end number of dimensions for the chart created
+ */
+void ebpf_create_chart(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ void (*ncd)(void *, int),
+ void *move,
+ int end)
+{
+ ebpf_write_chart_cmd(type, id, title, units, family, "line", order);
+
+ ncd(move, end);
+}
+
+/**
+ * Create charts on apps submenu
+ *
+ * @param id the chart id
+ * @param title the value displayed on vertical axis.
+ * @param units the value displayed on vertical axis.
+ * @param family Submenu that the chart will be attached on dashboard.
+ * @param order the chart order
+ * @param algorithm the algorithm used by dimension
+ * @param root structure used to create the dimensions.
+ */
+void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, int order,
+ char *algorithm, struct target *root)
+{
+ struct target *w;
+ ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, "stacked", order);
+
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO DEFINE OPTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Define labels used to generate charts
+ *
+ * @param is structure with information about number of calls made for a function.
+ * @param pio structure used to generate charts.
+ * @param dim a pointer for the dimensions name
+ * @param name a pointer for the tensor with the name of the functions.
+ * @param algorithm a vector with the algorithms used to make the charts
+ * @param end the number of elements in the previous 4 arguments.
+ */
+void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *pio, char **dim,
+ char **name, int *algorithm, int end)
+{
+ int i;
+
+ netdata_syscall_stat_t *prev = NULL;
+ netdata_publish_syscall_t *publish_prev = NULL;
+ for (i = 0; i < end; i++) {
+ if (prev) {
+ prev->next = &is[i];
+ }
+ prev = &is[i];
+
+ pio[i].dimension = dim[i];
+ pio[i].name = name[i];
+ pio[i].algorithm = strdupz(ebpf_algorithms[algorithm[i]]);
+ if (publish_prev) {
+ publish_prev->next = &pio[i];
+ }
+ publish_prev = &pio[i];
+ }
+}
+
+/**
+ * Define thread mode for all ebpf program.
+ *
+ * @param lmode the mode that will be used for them.
+ */
+static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].mode = lmode;
+ }
+}
+
+/**
+ * Enable specific charts selected by user.
+ *
+ * @param em the structure that will be changed
+ * @param enable the status about the apps charts.
+ */
+static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int enable)
+{
+ em->enabled = 1;
+ if (!enable) {
+ em->apps_charts = 1;
+ }
+ em->global_charts = 1;
+}
+
+/**
+ * Enable all charts
+ *
+ * @param apps what is the current status of apps
+ */
+static inline void ebpf_enable_all_charts(int apps)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_enable_specific_chart(&ebpf_modules[i], apps);
+ }
+}
+
+/**
+ * Enable the specified chart group
+ *
+ * @param idx the index of ebpf_modules that I am enabling
+ * @param disable_apps should I keep apps charts?
+ */
+static inline void ebpf_enable_chart(int idx, int disable_apps)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ if (i == idx) {
+ ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps);
+ break;
+ }
+ }
+}
+
+/**
+ * Disable APPs
+ *
+ * Disable charts for apps loading only global charts.
+ */
+static inline void ebpf_disable_apps()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].apps_charts = 0;
+ }
+}
+
+/**
+ * Print help on standard error for user knows how to use the collector.
+ */
+void ebpf_print_help()
+{
+ const time_t t = time(NULL);
+ struct tm ct;
+ struct tm *test = localtime_r(&t, &ct);
+ int year;
+ if (test)
+ year = ct.tm_year;
+ else
+ year = 0;
+
+ fprintf(stderr,
+ "\n"
+ " Netdata ebpf.plugin %s\n"
+ " Copyright (C) 2016-%d Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS set the data collection frequency.\n"
+ "\n"
+ " --help or -h show this help.\n"
+ "\n"
+ " --version or -v show software version.\n"
+ "\n"
+ " --global or -g disable charts per application.\n"
+ "\n"
+ " --all or -a Enable all chart groups (global and apps), unless -g is also given.\n"
+ "\n"
+ " --net or -n Enable network viewer charts.\n"
+ "\n"
+ " --process or -p Enable charts related to process run time.\n"
+ "\n"
+ " --return or -r Run the collector in return mode.\n"
+ "\n",
+ VERSION,
+ (year >= 116) ? year + 1900 : 2020);
+}
+
+/*****************************************************************
+ *
+ * AUXILIAR FUNCTIONS USED DURING INITIALIZATION
+ *
+ *****************************************************************/
+
+/**
+ * Is ip inside the range
+ *
+ * Check if the ip is inside a IP range
+ *
+ * @param rfirst the first ip address of the range
+ * @param rlast the last ip address of the range
+ * @param cmpfirst the first ip to compare
+ * @param cmplast the last ip to compare
+ * @param family the IP family
+ *
+ * @return It returns 1 if the IP is inside the range and 0 otherwise
+ */
+static int is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
+ union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
+{
+ if (family == AF_INET) {
+ if (ntohl(rfirst->addr32[0]) <= ntohl(cmpfirst->addr32[0]) &&
+ ntohl(rlast->addr32[0]) >= ntohl(cmplast->addr32[0]))
+ return 1;
+ } else {
+ if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 1;
+ }
+
+ }
+ return 0;
+}
+
+
+/**
+ * Fill IP list
+ *
+ * @param out a pointer to the link list.
+ * @param in the structure that will be linked.
+ */
+static inline void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table)
+{
+#ifndef NETDATA_INTERNAL_CHECKS
+ UNUSED(table);
+#endif
+ if (likely(*out)) {
+ ebpf_network_viewer_ip_list_t *move = *out, *store = *out;
+ while (move) {
+ if (in->ver == move->ver && is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
+ info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.",
+ in->value, move->value);
+ freez(in->value);
+ freez(in);
+ return;
+ }
+ store = move;
+ move = move->next;
+ }
+
+ store->next = in;
+ } else {
+ *out = in;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char first[512], last[512];
+ if (in->ver == AF_INET) {
+ if (inet_ntop(AF_INET, in->first.addr8, first, INET_ADDRSTRLEN) &&
+ inet_ntop(AF_INET, in->last.addr8, last, INET_ADDRSTRLEN))
+ info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
+ first, last,
+ (*out == network_viewer_opt.included_ips)?"included":"excluded",
+ table);
+ } else {
+ if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) &&
+ inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN))
+ info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
+ first, last,
+ (*out == network_viewer_opt.included_ips)?"included":"excluded",
+ table);
+ }
+#endif
+}
+
+/**
+ * Read Local Ports
+ *
+ * Parse /proc/net/{tcp,udp} and get the ports Linux is listening.
+ *
+ * @param filename the proc file to parse.
+ * @param proto is the magic number associated to the protocol file we are reading.
+ */
+static void read_local_ports(char *filename, uint8_t proto)
+{
+ procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return;
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return;
+
+ size_t lines = procfile_lines(ff), l;
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ // This is header or end of file
+ if (unlikely(words < 14))
+ continue;
+
+ // https://elixir.bootlin.com/linux/v5.7.8/source/include/net/tcp_states.h
+ // 0A = TCP_LISTEN
+ if (strcmp("0A", procfile_lineword(ff, l, 5)))
+ continue;
+
+ // Read local port
+ uint16_t port = (uint16_t)strtol(procfile_lineword(ff, l, 2), NULL, 16);
+ update_listen_table(htons(port), proto);
+ }
+
+ procfile_close(ff);
+}
+
+/**
+ * Read Local addresseses
+ *
+ * Read the local address from the interfaces.
+ */
+static void read_local_addresses()
+{
+ struct ifaddrs *ifaddr, *ifa;
+ if (getifaddrs(&ifaddr) == -1) {
+ error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections");
+ return;
+ }
+
+ char *notext = { "No text representation" };
+ for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr == NULL)
+ continue;
+
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6))
+ continue;
+
+ ebpf_network_viewer_ip_list_t *w = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
+
+ int family = ifa->ifa_addr->sa_family;
+ w->ver = (uint8_t) family;
+ char text[INET6_ADDRSTRLEN];
+ if (family == AF_INET) {
+ struct sockaddr_in *in = (struct sockaddr_in*) ifa->ifa_addr;
+
+ w->first.addr32[0] = in->sin_addr.s_addr;
+ w->last.addr32[0] = in->sin_addr.s_addr;
+
+ if (inet_ntop(AF_INET, w->first.addr8, text, INET_ADDRSTRLEN)) {
+ w->value = strdupz(text);
+ w->hash = simple_hash(text);
+ } else {
+ w->value = strdupz(notext);
+ w->hash = simple_hash(notext);
+ }
+ } else {
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6*) ifa->ifa_addr;
+
+ memcpy(w->first.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
+ memcpy(w->last.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
+
+ if (inet_ntop(AF_INET6, w->first.addr8, text, INET_ADDRSTRLEN)) {
+ w->value = strdupz(text);
+ w->hash = simple_hash(text);
+ } else {
+ w->value = strdupz(notext);
+ w->hash = simple_hash(notext);
+ }
+ }
+
+ fill_ip_list((family == AF_INET)?&network_viewer_opt.ipv4_local_ip:&network_viewer_opt.ipv6_local_ip,
+ w,
+ "selector");
+ }
+
+ freeifaddrs(ifaddr);
+}
+
+/**
+ * Start Ptherad Variable
+ *
+ * This function starts all pthread variables.
+ *
+ * @return It returns 0 on success and -1.
+ */
+int ebpf_start_pthread_variables()
+{
+ pthread_mutex_init(&lock, NULL);
+ pthread_mutex_init(&collect_data_mutex, NULL);
+
+ if (pthread_cond_init(&collect_data_cond_var, NULL)) {
+ thread_finished++;
+ error("Cannot start conditional variable to control Apps charts.");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate the vectors used for all threads.
+ */
+static void ebpf_allocate_common_vectors()
+{
+ all_pids = callocz((size_t)pid_max, sizeof(struct pid_stat *));
+ global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t));
+}
+
+/**
+ * Fill the ebpf_data structure with default values
+ *
+ * @param ef the pointer to set default values
+ */
+void fill_ebpf_data(ebpf_data_t *ef)
+{
+ memset(ef, 0, sizeof(ebpf_data_t));
+ ef->kernel_string = kernel_string;
+ ef->running_on_kernel = running_on_kernel;
+ ef->map_fd = callocz(EBPF_MAX_MAPS, sizeof(int));
+ ef->isrh = isrh;
+}
+
+/**
+ * Define how to load the ebpf programs
+ *
+ * @param ptr the option given by users
+ */
+static inline void how_to_load(char *ptr)
+{
+ if (!strcasecmp(ptr, "return"))
+ ebpf_set_thread_mode(MODE_RETURN);
+ else if (!strcasecmp(ptr, "entry"))
+ ebpf_set_thread_mode(MODE_ENTRY);
+ else
+ error("the option %s for \"ebpf load mode\" is not a valid option.", ptr);
+}
+
+/**
+ * Fill Port list
+ *
+ * @param out a pointer to the link list.
+ * @param in the structure that will be linked.
+ */
+static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in)
+{
+ if (likely(*out)) {
+ ebpf_network_viewer_port_list_t *move = *out, *store = *out;
+ uint16_t first = ntohs(in->first);
+ uint16_t last = ntohs(in->last);
+ while (move) {
+ uint16_t cmp_first = ntohs(move->first);
+ uint16_t cmp_last = ntohs(move->last);
+ if (cmp_first <= first && first <= cmp_last &&
+ cmp_first <= last && last <= cmp_last ) {
+ info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.",
+ first, last, cmp_first, cmp_last);
+ freez(in->value);
+ freez(in);
+ return;
+ } else if (first <= cmp_first && cmp_first <= last &&
+ first <= cmp_last && cmp_last <= last) {
+ info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.",
+ first, last, cmp_first, cmp_last);
+ freez(move->value);
+ move->value = in->value;
+ move->first = in->first;
+ move->last = in->last;
+ freez(in);
+ return;
+ }
+
+ store = move;
+ move = move->next;
+ }
+
+ store->next = in;
+ } else {
+ *out = in;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding values %s( %u, %u) to %s port list used on network viewer",
+ in->value, ntohs(in->first), ntohs(in->last),
+ (*out == network_viewer_opt.included_port)?"included":"excluded");
+#endif
+}
+
+/**
+ * Fill port list
+ *
+ * Fill an allocated port list with the range given
+ *
+ * @param out a pointer to store the link list
+ * @param range the informed range for the user.
+ */
+static void parse_port_list(void **out, char *range)
+{
+ int first, last;
+ ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
+
+ char *copied = strdupz(range);
+ if (*range == '*' && *(range+1) == '\0') {
+ first = 1;
+ last = 65535;
+
+ clean_port_structure(list);
+ goto fillenvpl;
+ }
+
+ char *end = range;
+ //Move while I cannot find a separator
+ while (*end && *end != ':' && *end != '-') end++;
+
+ //It has a range
+ if (likely(*end)) {
+ *end++ = '\0';
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied);
+ freez(copied);
+ return;
+ }
+ last = str2i((const char *)end);
+ } else {
+ last = 0;
+ }
+
+ first = str2i((const char *)range);
+ if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) {
+ info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied);
+ freez(copied);
+ return;
+ }
+
+ if (!last)
+ last = first;
+
+ if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) {
+ info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied);
+ freez(copied);
+ return;
+ }
+
+ if (first > last) {
+ info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied);
+ freez(copied);
+ return;
+ }
+
+ ebpf_network_viewer_port_list_t *w;
+fillenvpl:
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->value = copied;
+ w->hash = simple_hash(copied);
+ w->first = (uint16_t)htons((uint16_t)first);
+ w->last = (uint16_t)htons((uint16_t)last);
+ w->cmp_first = (uint16_t)first;
+ w->cmp_last = (uint16_t)last;
+
+ fill_port_list(list, w);
+}
+
+/**
+ * Parse Service List
+ *
+ * @param out a pointer to store the link list
+ * @param service the service used to create the structure that will be linked.
+ */
+static void parse_service_list(void **out, char *service)
+{
+ ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
+ struct servent *serv = getservbyname((const char *)service, "tcp");
+ if (!serv)
+ serv = getservbyname((const char *)service, "udp");
+
+ if (!serv) {
+ info("Cannot resolv the service '%s' with protocols TCP and UDP, it will be ignored", service);
+ return;
+ }
+
+ ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->value = strdupz(service);
+ w->hash = simple_hash(service);
+
+ w->first = w->last = (uint16_t)serv->s_port;
+
+ fill_port_list(list, w);
+}
+
+/**
+ * Netmask
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param prefix create the netmask based in the CIDR value.
+ *
+ * @return
+ */
+static inline in_addr_t netmask(int prefix) {
+
+ if (prefix == 0)
+ return (~((in_addr_t) - 1));
+ else
+ return (in_addr_t)(~((1 << (32 - prefix)) - 1));
+
+}
+
+/**
+ * Broadcast
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param addr is the ip address
+ * @param prefix is the CIDR value.
+ *
+ * @return It returns the last address of the range
+ */
+static inline in_addr_t broadcast(in_addr_t addr, int prefix)
+{
+ return (addr | ~netmask(prefix));
+}
+
+/**
+ * Network
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param addr is the ip address
+ * @param prefix is the CIDR value.
+ *
+ * @return It returns the first address of the range.
+ */
+static inline in_addr_t ipv4_network(in_addr_t addr, int prefix)
+{
+ return (addr & netmask(prefix));
+}
+
+/**
+ * IP to network long
+ *
+ * @param dst the vector to store the result
+ * @param ip the source ip given by our users.
+ * @param domain the ip domain (IPV4 or IPV6)
+ * @param source the original string
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+static inline int ip2nl(uint8_t *dst, char *ip, int domain, char *source)
+{
+ if (inet_pton(domain, ip, dst) <= 0) {
+ error("The address specified (%s) is invalid ", source);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Get IPV6 Last Address
+ *
+ * @param out the address to store the last address.
+ * @param in the address used to do the math.
+ * @param prefix number of bits used to calculate the address
+ */
+static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
+{
+ uint64_t mask,tmp;
+ uint64_t ret[2];
+ memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
+
+ if (prefix == 128) {
+ memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
+ return;
+ } else if (!prefix) {
+ ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF;
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+ return;
+ } else if (prefix <= 64) {
+ ret[1] = 0xFFFFFFFFFFFFFFFFULL;
+
+ tmp = be64toh(ret[0]);
+ if (prefix > 0) {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
+ tmp |= ~mask;
+ }
+ ret[0] = htobe64(tmp);
+ } else {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
+ tmp = be64toh(ret[1]);
+ tmp |= ~mask;
+ ret[1] = htobe64(tmp);
+ }
+
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+}
+
+/**
+ * Calculate ipv6 first address
+ *
+ * @param out the address to store the first address.
+ * @param in the address used to do the math.
+ * @param prefix number of bits used to calculate the address
+ */
+static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
+{
+ uint64_t mask,tmp;
+ uint64_t ret[2];
+
+ memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
+
+ if (prefix == 128) {
+ memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
+ return;
+ } else if (!prefix) {
+ ret[0] = ret[1] = 0;
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+ return;
+ } else if (prefix <= 64) {
+ ret[1] = 0ULL;
+
+ tmp = be64toh(ret[0]);
+ if (prefix > 0) {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
+ tmp &= mask;
+ }
+ ret[0] = htobe64(tmp);
+ } else {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
+ tmp = be64toh(ret[1]);
+ tmp &= mask;
+ ret[1] = htobe64(tmp);
+ }
+
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+}
+
+/**
+ * Parse IP List
+ *
+ * Parse IP list and link it.
+ *
+ * @param out a pointer to store the link list
+ * @param ip the value given as parameter
+ */
+static void parse_ip_list(void **out, char *ip)
+{
+ ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
+
+ char *ipdup = strdupz(ip);
+ union netdata_ip_t first = { };
+ union netdata_ip_t last = { };
+ char *is_ipv6;
+ if (*ip == '*' && *(ip+1) == '\0') {
+ memset(first.addr8, 0, sizeof(first.addr8));
+ memset(last.addr8, 0xFF, sizeof(last.addr8));
+
+ is_ipv6 = ip;
+
+ clean_ip_structure(list);
+ goto storethisip;
+ }
+
+ char *end = ip;
+ // Move while I cannot find a separator
+ while (*end && *end != '/' && *end != '-') end++;
+
+ // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future
+ // https://tools.ietf.org/html/rfc1924
+ is_ipv6 = strchr(ip, ':');
+
+ int select;
+ if (*end && !is_ipv6) { // IPV4 range
+ select = (*end == '/') ? 0 : 1;
+ *end++ = '\0';
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ if (!select) { // CIDR
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = (int) str2i(end);
+ if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) {
+ info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip);
+ goto cleanipdup;
+ }
+
+ last.addr32[0] = htonl(broadcast(ntohl(first.addr32[0]), select));
+ // This was added to remove
+ // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977
+ UNUSED(last.addr32[0]);
+
+ uint32_t ipv4_test = htonl(ipv4_network(ntohl(first.addr32[0]), select));
+ if (first.addr32[0] != ipv4_test) {
+ first.addr32[0] = ipv4_test;
+ struct in_addr ipv4_convert;
+ ipv4_convert.s_addr = ipv4_test;
+ char ipv4_msg[INET_ADDRSTRLEN];
+ if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN))
+ info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg);
+ }
+ } else { // Range
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = ip2nl(last.addr8, end, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+ }
+
+ if (htonl(first.addr32[0]) > htonl(last.addr32[0])) {
+ info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
+ ipdup);
+ goto cleanipdup;
+ }
+ } else if (is_ipv6) { // IPV6
+ if (!*end) { // Unique
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ memcpy(last.addr8, first.addr8, sizeof(first.addr8));
+ } else if (*end == '-') {
+ *end++ = 0x00;
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = ip2nl(last.addr8, end, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+ } else { // CIDR
+ *end++ = 0x00;
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ select = str2i(end);
+ if (select < 0 || select > 128) {
+ info("The CIDR %s is not valid, the address %s will be ignored.", end, ip);
+ goto cleanipdup;
+ }
+
+ uint64_t prefix = (uint64_t)select;
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ get_ipv6_last_addr(&last, &first, prefix);
+
+ union netdata_ip_t ipv6_test;
+ get_ipv6_first_addr(&ipv6_test, &first, prefix);
+
+ if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) {
+ memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t));
+
+ struct in6_addr ipv6_convert;
+ memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t));
+
+ char ipv6_msg[INET6_ADDRSTRLEN];
+ if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN))
+ info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg);
+ }
+ }
+
+ if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) &&
+ !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) ||
+ (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) {
+ info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
+ ipdup);
+ goto cleanipdup;
+ }
+ } else { // Unique ip
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ memcpy(last.addr8, first.addr8, sizeof(first.addr8));
+ }
+
+ ebpf_network_viewer_ip_list_t *store;
+
+storethisip:
+ store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
+ store->value = ipdup;
+ store->hash = simple_hash(ipdup);
+ store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6;
+ memcpy(store->first.addr8, first.addr8, sizeof(first.addr8));
+ memcpy(store->last.addr8, last.addr8, sizeof(last.addr8));
+
+ fill_ip_list(list, store, "socket");
+ return;
+
+cleanipdup:
+ freez(ipdup);
+}
+
+/**
+ * Parse IP Range
+ *
+ * Parse the IP ranges given and create Network Viewer IP Structure
+ *
+ * @param ptr is a pointer with the text to parse.
+ */
+static void parse_ips(char *ptr)
+{
+ // No value
+ if (unlikely(!ptr))
+ return;
+
+ while (likely(ptr)) {
+ // Move forward until next valid character
+ while (isspace(*ptr)) ptr++;
+
+ // No valid value found
+ if (unlikely(!*ptr))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(ptr, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*ptr == '!') {
+ neg++;
+ ptr++;
+ }
+
+ if (isascii(*ptr)) { // Parse port
+ parse_ip_list((!neg)?(void **)&network_viewer_opt.included_ips:(void **)&network_viewer_opt.excluded_ips,
+ ptr);
+ }
+
+ ptr = end;
+ }
+}
+
+
+/**
+ * Parse Port Range
+ *
+ * Parse the port ranges given and create Network Viewer Port Structure
+ *
+ * @param ptr is a pointer with the text to parse.
+ */
+static void parse_ports(char *ptr)
+{
+ // No value
+ if (unlikely(!ptr))
+ return;
+
+ while (likely(ptr)) {
+ // Move forward until next valid character
+ while (isspace(*ptr)) ptr++;
+
+ // No valid value found
+ if (unlikely(!*ptr))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(ptr, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*ptr == '!') {
+ neg++;
+ ptr++;
+ }
+
+ if (isdigit(*ptr)) { // Parse port
+ parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ } else if (isalpha(*ptr)) { // Parse service
+ parse_service_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ } else if (*ptr == '*') { // All
+ parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ }
+
+ ptr = end;
+ }
+}
+
+/**
+ * Link hostname
+ *
+ * @param out is the output link list
+ * @param in the hostname to add to list.
+ */
+static void link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in)
+{
+ if (likely(*out)) {
+ ebpf_network_viewer_hostname_list_t *move = *out;
+ for (; move->next ; move = move->next ) {
+ if (move->hash == in->hash && !strcmp(move->value, in->value)) {
+ info("The hostname %s was already inserted, it will be ignored.", in->value);
+ freez(in->value);
+ simple_pattern_free(in->value_pattern);
+ freez(in);
+ return;
+ }
+ }
+
+ move->next = in;
+ } else {
+ *out = in;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding value %s to %s hostname list used on network viewer",
+ in->value,
+ (*out == network_viewer_opt.included_hostnames)?"included":"excluded");
+#endif
+}
+
+/**
+ * Link Hostnames
+ *
+ * Parse the list of hostnames to create the link list.
+ * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP.
+ *
+ * @param out is the output link list
+ * @param parse is a pointer with the text to parser.
+ */
+static void link_hostnames(char *parse)
+{
+ // No value
+ if (unlikely(!parse))
+ return;
+
+ while (likely(parse)) {
+ // Find the first valid value
+ while (isspace(*parse)) parse++;
+
+ // No valid value found
+ if (unlikely(!*parse))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(parse, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*parse == '!') {
+ neg++;
+ parse++;
+ }
+
+ ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t));
+ hostname->value = strdupz(parse);
+ hostname->hash = simple_hash(parse);
+ hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT);
+
+ link_hostname((!neg)?&network_viewer_opt.included_hostnames:&network_viewer_opt.excluded_hostnames,
+ hostname);
+
+ parse = end;
+ }
+}
+
+/**
+ * Read max dimension.
+ *
+ * Netdata plot two dimensions per connection, so it is necessary to adjust the values.
+ */
+static void read_max_dimension()
+{
+ int maxdim ;
+ maxdim = (int) appconfig_get_number(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "maximum dimensions",
+ NETDATA_NV_CAP_VALUE);
+ if (maxdim < 0) {
+ error("'maximum dimensions = %d' must be a positive number, Netdata will change for default value %ld.",
+ maxdim, NETDATA_NV_CAP_VALUE);
+ maxdim = NETDATA_NV_CAP_VALUE;
+ }
+
+ maxdim /= 2;
+ if (!maxdim) {
+ info("The number of dimensions is too small (%u), we are setting it to minimum 2", network_viewer_opt.max_dim);
+ network_viewer_opt.max_dim = 1;
+ }
+
+ network_viewer_opt.max_dim = (uint32_t)maxdim;
+}
+
+/**
+ * Parse network viewer section
+ */
+static void parse_network_viewer_section()
+{
+ read_max_dimension();
+
+ network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "resolve hostnames",
+ CONFIG_BOOLEAN_NO);
+
+ network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "resolve service names",
+ CONFIG_BOOLEAN_NO);
+
+ char *value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION,
+ "ports", NULL);
+ parse_ports(value);
+
+ if (network_viewer_opt.hostname_resolution_enabled) {
+ value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION, "hostnames", NULL);
+ link_hostnames(value);
+ } else {
+ info("Name resolution is disabled, collector will not parser \"hostnames\" list.");
+ }
+
+ value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION,
+ "ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128");
+ parse_ips(value);
+}
+
+/**
+ * Link dimension name
+ *
+ * Link user specified names inside a link list.
+ *
+ * @param port the port number associated to the dimension name.
+ * @param hash the calculated hash for the dimension name.
+ * @param name the dimension name.
+ */
+static void link_dimension_name(char *port, uint32_t hash, char *value)
+{
+ int test = str2i(port);
+ if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){
+ error("The dimension given (%s = %s) has an invalid value and it will be ignored.", port, value);
+ return;
+ }
+
+ ebpf_network_viewer_dim_name_t *w;
+ w = callocz(1, sizeof(ebpf_network_viewer_dim_name_t));
+
+ w->name = strdupz(value);
+ w->hash = hash;
+
+ w->port = (uint16_t) htons(test);
+
+ ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
+ if (unlikely(!names)) {
+ network_viewer_opt.names = w;
+ } else {
+ for (; names->next; names = names->next) {
+ if (names->port == w->port) {
+ info("Dupplicated definition for a service, the name %s will be ignored. ", names->name);
+ freez(names->name);
+ names->name = w->name;
+ names->hash = w->hash;
+ freez(w);
+ return;
+ }
+ }
+ names->next = w;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding values %s( %u) to dimension name list used on network viewer", w->name, htons(w->port));
+#endif
+}
+
+/**
+ * Parse service Name section.
+ *
+ * This function gets the values that will be used to overwrite dimensions.
+ */
+static void parse_service_name_section()
+{
+ struct section *co = appconfig_get_section(&collector_config, EBPF_SERVICE_NAME_SECTION);
+ if (co) {
+ struct config_option *cv;
+ for (cv = co->values; cv ; cv = cv->next) {
+ link_dimension_name(cv->name, cv->hash, cv->value);
+ }
+ }
+
+ // Always associated the default port to Netdata
+ ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
+ if (names) {
+ uint16_t default_port = htons(19999);
+ while (names) {
+ if (names->port == default_port)
+ return;
+
+ names = names->next;
+ }
+ }
+
+ char *port_string = getenv("NETDATA_LISTEN_PORT");
+ if (port_string)
+ link_dimension_name(port_string, simple_hash(port_string), "Netdata");
+}
+
+/**
+ * Read collector values
+ *
+ * @param disable_apps variable to store information related to apps.
+ */
+static void read_collector_values(int *disable_apps)
+{
+ // Read global section
+ char *value;
+ if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
+ value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load", "entry");
+ else
+ value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "ebpf load mode", "entry");
+
+ how_to_load(value);
+
+ // This is kept to keep compatibility
+ uint32_t enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "disable apps",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled) {
+ // Apps is a positive sentence, so we need to invert the values to disable apps.
+ enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "apps",
+ CONFIG_BOOLEAN_YES);
+ enabled = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
+ }
+ *disable_apps = (int)enabled;
+
+ // Read ebpf programs section
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
+ ebpf_modules[0].config_name, CONFIG_BOOLEAN_YES);
+ int started = 0;
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps);
+ started++;
+ }
+
+ // This is kept to keep compatibility
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network viewer",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled)
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, ebpf_modules[1].config_name,
+ CONFIG_BOOLEAN_NO);
+
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps);
+ // Read network viewer section if network viewer is enabled
+ parse_network_viewer_section();
+ parse_service_name_section();
+ started++;
+ }
+
+ // This is kept to keep compatibility
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connection monitoring",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled)
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
+ CONFIG_BOOLEAN_NO);
+ ebpf_modules[1].optional = enabled;
+
+ if (!started){
+ ebpf_enable_all_charts(*disable_apps);
+ // Read network viewer section
+ parse_network_viewer_section();
+ parse_service_name_section();
+ }
+}
+
+/**
+ * Load collector config
+ *
+ * @param path the path where the file ebpf.conf is stored.
+ * @param disable_apps variable to store the information about apps plugin status.
+ *
+ * @return 0 on success and -1 otherwise.
+ */
+static int load_collector_config(char *path, int *disable_apps)
+{
+ char lpath[4096];
+
+ snprintf(lpath, 4095, "%s/%s", path, "ebpf.conf");
+
+ if (!appconfig_load(&collector_config, lpath, 0, NULL))
+ return -1;
+
+ read_collector_values(disable_apps);
+
+ return 0;
+}
+
+/**
+ * Set global variables reading environment variables
+ */
+void set_global_variables()
+{
+ // Get environment variables
+ ebpf_plugin_dir = getenv("NETDATA_PLUGINS_DIR");
+ if (!ebpf_plugin_dir)
+ ebpf_plugin_dir = PLUGINS_DIR;
+
+ ebpf_user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if (!ebpf_user_config_dir)
+ ebpf_user_config_dir = CONFIG_DIR;
+
+ ebpf_stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if (!ebpf_stock_config_dir)
+ ebpf_stock_config_dir = LIBCONFIG_DIR;
+
+ ebpf_configured_log_dir = getenv("NETDATA_LOG_DIR");
+ if (!ebpf_configured_log_dir)
+ ebpf_configured_log_dir = LOG_DIR;
+
+ ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
+ if (ebpf_nprocs > NETDATA_MAX_PROCESSOR) {
+ ebpf_nprocs = NETDATA_MAX_PROCESSOR;
+ }
+
+ isrh = get_redhat_release();
+ pid_max = get_system_pid_max();
+}
+
+/**
+ * Parse arguments given from user.
+ *
+ * @param argc the number of arguments
+ * @param argv the pointer to the arguments
+ */
+static void parse_args(int argc, char **argv)
+{
+ int enabled = 0;
+ int disable_apps = 0;
+ int freq = 0;
+ int option_index = 0;
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h' },
+ {"version", no_argument, 0, 'v' },
+ {"global", no_argument, 0, 'g' },
+ {"all", no_argument, 0, 'a' },
+ {"net", no_argument, 0, 'n' },
+ {"process", no_argument, 0, 'p' },
+ {"return", no_argument, 0, 'r' },
+ {0, 0, 0, 0}
+ };
+
+ memset(&network_viewer_opt, 0, sizeof(network_viewer_opt));
+ network_viewer_opt.max_dim = NETDATA_NV_CAP_VALUE;
+
+ if (argc > 1) {
+ int n = (int)str2l(argv[1]);
+ if (n > 0) {
+ freq = n;
+ }
+ }
+
+ while (1) {
+ int c = getopt_long(argc, argv, "hvganpr", long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'h': {
+ ebpf_print_help();
+ exit(0);
+ }
+ case 'v': {
+ printf("ebpf.plugin %s\n", VERSION);
+ exit(0);
+ }
+ case 'g': {
+ disable_apps = 1;
+ ebpf_disable_apps();
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ "EBPF running with global chart group, because it was started with the option \"--global\" or \"-g\".");
+#endif
+ break;
+ }
+ case 'a': {
+ ebpf_enable_all_charts(disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all chart groups, because it was started with the option \"--all\" or \"-a\".");
+#endif
+ break;
+ }
+ case 'n': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"NET\" charts, because it was started with the option \"--net\" or \"-n\".");
+#endif
+ break;
+ }
+ case 'p': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ "EBPF enabling \"PROCESS\" charts, because it was started with the option \"--process\" or \"-p\".");
+#endif
+ break;
+ }
+ case 'r': {
+ ebpf_set_thread_mode(MODE_RETURN);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running in \"return\" mode, because it was started with the option \"--return\" or \"-r\".");
+#endif
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ if (freq > 0) {
+ update_every = freq;
+ }
+
+ if (load_collector_config(ebpf_user_config_dir, &disable_apps)) {
+ info(
+ "Does not have a configuration file inside `%s/ebpf.conf. It will try to load stock file.",
+ ebpf_user_config_dir);
+ if (load_collector_config(ebpf_stock_config_dir, &disable_apps)) {
+ info("Does not have a stock file. It is starting with default options.");
+ } else {
+ enabled = 1;
+ }
+ } else {
+ enabled = 1;
+ }
+
+ if (!enabled) {
+ ebpf_enable_all_charts(disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all charts, because neither \"-n\" or \"-p\" was given.");
+#endif
+ }
+
+ if (disable_apps)
+ return;
+
+ // Load apps_groups.conf
+ if (ebpf_read_apps_groups_conf(
+ &apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
+ info(
+ "Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
+ ebpf_user_config_dir, ebpf_stock_config_dir);
+ if (ebpf_read_apps_groups_conf(
+ &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
+ error(
+ "Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
+ ebpf_stock_config_dir);
+ thread_finished++;
+ ebpf_exit(1);
+ }
+ } else
+ info("Loaded config file '%s/apps_groups.conf'", ebpf_user_config_dir);
+}
+
+/*****************************************************************
+ *
+ * COLLECTOR ENTRY POINT
+ *
+ *****************************************************************/
+
+/**
+ * Entry point
+ *
+ * @param argc the number of arguments
+ * @param argv the pointer to the arguments
+ *
+ * @return it returns 0 on success and another integer otherwise
+ */
+int main(int argc, char **argv)
+{
+ set_global_variables();
+ parse_args(argc, argv);
+
+ running_on_kernel = get_kernel_version(kernel_string, 63);
+ if (!has_condition_to_run(running_on_kernel)) {
+ error("The current collector cannot run on this kernel.");
+ return 2;
+ }
+
+ if (!am_i_running_as_root()) {
+ error(
+ "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
+ (unsigned int)getuid(), (unsigned int)geteuid());
+ return 3;
+ }
+
+ // set name
+ program_name = "ebpf.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ error("Setrlimit(RLIMIT_MEMLOCK)");
+ return 4;
+ }
+
+ signal(SIGINT, ebpf_exit);
+ signal(SIGTERM, ebpf_exit);
+ signal(SIGPIPE, ebpf_exit);
+
+ if (ebpf_start_pthread_variables()) {
+ thread_finished++;
+ error("Cannot start mutex to control overall charts.");
+ ebpf_exit(5);
+ }
+
+ ebpf_allocate_common_vectors();
+
+ read_local_addresses();
+ read_local_ports("/proc/net/tcp", IPPROTO_TCP);
+ read_local_ports("/proc/net/tcp6", IPPROTO_TCP);
+ read_local_ports("/proc/net/udp", IPPROTO_UDP);
+ read_local_ports("/proc/net/udp6", IPPROTO_UDP);
+
+ struct netdata_static_thread ebpf_threads[] = {
+ {"EBPF PROCESS", NULL, NULL, 1, NULL, NULL, ebpf_modules[0].start_routine},
+ {"EBPF SOCKET" , NULL, NULL, 1, NULL, NULL, ebpf_modules[1].start_routine},
+ {NULL , NULL, NULL, 0, NULL, NULL, NULL}
+ };
+
+ //clean_loaded_events();
+
+ int i;
+ for (i = 0; ebpf_threads[i].name != NULL; i++) {
+ struct netdata_static_thread *st = &ebpf_threads[i];
+ st->thread = mallocz(sizeof(netdata_thread_t));
+
+ ebpf_module_t *em = &ebpf_modules[i];
+ em->thread_id = i;
+ netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em);
+ }
+
+ for (i = 0; ebpf_threads[i].name != NULL; i++) {
+ struct netdata_static_thread *st = &ebpf_threads[i];
+ netdata_thread_join(*st->thread, NULL);
+ }
+
+ thread_finished++;
+ ebpf_exit(0);
+
+ return 0;
+}
diff --git a/collectors/ebpf.plugin/ebpf.conf b/collectors/ebpf.plugin/ebpf.conf
new file mode 100644
index 0000000..3a5b773
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.conf
@@ -0,0 +1,45 @@
+#
+# Global options
+#
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+[global]
+ ebpf load mode = entry
+ apps = yes
+
+#
+# eBPF Programs
+#
+# The eBPF collector enables and runs the following eBPF programs by default:
+#
+# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and
+# files removed.
+# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+# bandwidth consumed by each.
+[ebpf programs]
+ process = yes
+ socket = yes
+ network connections = no
+
+#
+# Network Connection
+#
+# This is a feature with status WIP(Work in Progress)
+#
+[network connections]
+ maximum dimensions = 50
+ resolve hostnames = no
+ resolve service names = no
+ ports = *
+ ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128
+ hostnames = *
+
+[service name]
+ 19999 = Netdata
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
new file mode 100644
index 0000000..35013c2
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COLLECTOR_EBPF_H
+#define NETDATA_COLLECTOR_EBPF_H 1
+
+#ifndef __FreeBSD__
+#include <linux/perf_event.h>
+#endif
+#include <stdint.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <dlfcn.h>
+
+#include <fcntl.h>
+#include <ctype.h>
+#include <dirent.h>
+
+// From libnetdata.h
+#include "libnetdata/threads/threads.h"
+#include "libnetdata/locks/locks.h"
+#include "libnetdata/avl/avl.h"
+#include "libnetdata/clocks/clocks.h"
+#include "libnetdata/config/appconfig.h"
+#include "libnetdata/ebpf/ebpf.h"
+#include "libnetdata/procfile/procfile.h"
+#include "daemon/main.h"
+
+#include "ebpf_apps.h"
+
+typedef struct netdata_syscall_stat {
+ unsigned long bytes; // total number of bytes
+ uint64_t call; // total number of calls
+ uint64_t ecall; // number of calls that returned error
+ struct netdata_syscall_stat *next; // Link list
+} netdata_syscall_stat_t;
+
+typedef uint64_t netdata_idx_t;
+
+typedef struct netdata_publish_syscall {
+ char *dimension;
+ char *name;
+ char *algorithm;
+ unsigned long nbyte;
+ unsigned long pbyte;
+ uint64_t ncall;
+ uint64_t pcall;
+ uint64_t nerr;
+ uint64_t perr;
+ struct netdata_publish_syscall *next;
+} netdata_publish_syscall_t;
+
+typedef struct netdata_publish_vfs_common {
+ long write;
+ long read;
+
+ long running;
+ long zombie;
+} netdata_publish_vfs_common_t;
+
+typedef struct netdata_error_report {
+ char comm[16];
+ __u32 pid;
+
+ int type;
+ int err;
+} netdata_error_report_t;
+
+extern ebpf_module_t ebpf_modules[];
+#define EBPF_MODULE_PROCESS_IDX 0
+#define EBPF_MODULE_SOCKET_IDX 1
+
+// Copied from musl header
+#ifndef offsetof
+#if __GNUC__ > 3
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#else
+#define offsetof(type, member) ((size_t)((char *)&(((type *)0)->member) - (char *)0))
+#endif
+#endif
+
+// Chart defintions
+#define NETDATA_EBPF_FAMILY "ebpf"
+
+// Log file
+#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
+
+// Maximum number of processors monitored on perf events
+#define NETDATA_MAX_PROCESSOR 512
+
+// Kernel versions calculated with the formula:
+// R = MAJOR*65536 + MINOR*256 + PATCH
+#define NETDATA_KERNEL_V5_3 328448
+#define NETDATA_KERNEL_V4_15 265984
+
+#define EBPF_SYS_CLONE_IDX 11
+#define EBPF_MAX_MAPS 32
+
+enum ebpf_algorithms_list {
+ NETDATA_EBPF_ABSOLUTE_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX
+};
+
+// Threads
+extern void *ebpf_process_thread(void *ptr);
+extern void *ebpf_socket_thread(void *ptr);
+
+// Common variables
+extern pthread_mutex_t lock;
+extern int close_ebpf_plugin;
+extern int ebpf_nprocs;
+extern int running_on_kernel;
+extern char *ebpf_plugin_dir;
+extern char kernel_string[64];
+
+extern pthread_mutex_t collect_data_mutex;
+extern pthread_cond_t collect_data_cond_var;
+
+// Common functions
+extern void ebpf_global_labels(netdata_syscall_stat_t *is,
+ netdata_publish_syscall_t *pio,
+ char **dim,
+ char **name,
+ int *algorithm,
+ int end);
+
+extern void ebpf_write_chart_cmd(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ char *charttype,
+ int order);
+
+extern void ebpf_write_global_dimension(char *name, char *id, char *algorithm);
+
+extern void ebpf_create_global_dimension(void *ptr, int end);
+
+extern void ebpf_create_chart(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ void (*ncd)(void *, int),
+ void *move,
+ int end);
+
+extern void write_begin_chart(char *family, char *name);
+
+extern void write_chart_dimension(char *dim, long long value);
+
+extern void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end);
+
+extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
+
+extern void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite,
+ char *dread, long long vread);
+
+extern void fill_ebpf_data(ebpf_data_t *ef);
+
+extern void ebpf_create_charts_on_apps(char *name,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ char *algorithm,
+ struct target *root);
+
+extern void write_end_chart();
+
+extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
+
+#define EBPF_GLOBAL_SECTION "global"
+#define EBPF_PROGRAMS_SECTION "ebpf programs"
+#define EBPF_NETWORK_VIEWER_SECTION "network connections"
+#define EBPF_SERVICE_NAME_SECTION "service name"
+
+#define EBPF_COMMON_DIMENSION_CALL "calls/s"
+#define EBPF_COMMON_DIMENSION_BITS "kilobits/s"
+#define EBPF_COMMON_DIMENSION_BYTES "bytes/s"
+#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
+#define EBPF_COMMON_DIMENSION_PACKETS "packets"
+
+// Common variables
+extern char *ebpf_user_config_dir;
+extern char *ebpf_stock_config_dir;
+extern int debug_enabled;
+extern struct pid_stat *root_of_pids;
+extern char *ebpf_algorithms[];
+
+// Socket functions and variables
+// Common functions
+extern void ebpf_socket_create_apps_charts(ebpf_module_t *em, struct target *root);
+extern collected_number get_value_from_structure(char *basis, size_t offset);
+extern struct pid_stat *root_of_pids;
+extern ebpf_process_stat_t *global_process_stat;
+extern size_t all_pids_count;
+extern int update_every;
+extern uint32_t finalized_threads;
+
+#define EBPF_MAX_SYNCHRONIZATION_TIME 300
+
+#endif /* NETDATA_COLLECTOR_EBPF_H */
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
new file mode 100644
index 0000000..844ce23
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+#include "ebpf_apps.h"
+
+// ----------------------------------------------------------------------------
+// internal flags
+// handled in code (automatically set)
+
+static int proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED TO READ HASH TABLES
+ *
+ *****************************************************************/
+
+/**
+ * Read statistic hash table.
+ *
+ * @param ep the output structure.
+ * @param fd the file descriptor mapped from kernel ring.
+ * @param pid the index used to select the data.
+ * @param bpf_map_lookup_elem a pointer for the function used to read data.
+ *
+ * @return It returns 0 when the data was copied and -1 otherwise
+ */
+int ebpf_read_hash_table(void *ep, int fd, uint32_t pid)
+{
+ if (!ep)
+ return -1;
+
+ if (!bpf_map_lookup_elem(fd, &pid, ep))
+ return 0;
+
+ return -1;
+}
+
+/**
+ * Read socket statistic
+ *
+ * Read information from kernel ring to user ring.
+ *
+ * @param ep the table with all process stats values.
+ * @param fd the file descriptor mapped from kernel
+ * @param ef a pointer for the functions mapped from dynamic library
+ * @param pids the list of pids associated to a target.
+ *
+ * @return
+ */
+size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids)
+{
+ size_t count = 0;
+ while (pids) {
+ uint32_t current_pid = pids->pid;
+ if (!ebpf_read_hash_table(ep[current_pid], fd, current_pid))
+ count++;
+
+ pids = pids->next;
+ }
+
+ return count;
+}
+
+/**
+ * Read bandwidth statistic using hash table
+ *
+ * @param out the output tensor that will receive the information.
+ * @param fd the file descriptor that has the data
+ * @param bpf_map_lookup_elem a pointer for the function to read the data
+ * @param bpf_map_get_next_key a pointer fo the function to read the index.
+ */
+size_t read_bandwidth_statistic_using_hash_table(ebpf_bandwidth_t **out, int fd)
+{
+ size_t count = 0;
+ uint32_t key = 0;
+ uint32_t next_key = 0;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ ebpf_bandwidth_t *eps = out[next_key];
+ if (!eps) {
+ eps = callocz(1, sizeof(ebpf_process_stat_t));
+ out[next_key] = eps;
+ }
+ ebpf_read_hash_table(eps, fd, next_key);
+ }
+
+ return count;
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS CALLED FROM COLLECTORS
+ *
+ *****************************************************************/
+
+/**
+ * Am I running as Root
+ *
+ * Verify the user that is running the collector.
+ *
+ * @return It returns 1 for root and 0 otherwise.
+ */
+int am_i_running_as_root()
+{
+ uid_t uid = getuid(), euid = geteuid();
+
+ if (uid == 0 || euid == 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset the target values
+ *
+ * @param root the pointer to the chain that will be reseted.
+ *
+ * @return it returns the number of structures that was reseted.
+ */
+size_t zero_all_targets(struct target *root)
+{
+ struct target *w;
+ size_t count = 0;
+
+ for (w = root; w; w = w->next) {
+ count++;
+
+ if (unlikely(w->root_pid)) {
+ struct pid_on_target *pid_on_target = w->root_pid;
+
+ while (pid_on_target) {
+ struct pid_on_target *pid_on_target_to_free = pid_on_target;
+ pid_on_target = pid_on_target->next;
+ free(pid_on_target_to_free);
+ }
+
+ w->root_pid = NULL;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * Clean the allocated structures
+ *
+ * @param agrt the pointer to be cleaned.
+ */
+void clean_apps_groups_target(struct target *agrt)
+{
+ struct target *current_target;
+ while (agrt) {
+ current_target = agrt;
+ agrt = current_target->target;
+
+ freez(current_target);
+ }
+}
+
+/**
+ * Find or create a new target
+ * there are targets that are just aggregated to other target (the second argument)
+ *
+ * @param id
+ * @param target
+ * @param name
+ *
+ * @return It returns the target on success and NULL otherwise
+ */
+struct target *get_apps_groups_target(struct target **agrt, const char *id, struct target *target, const char *name)
+{
+ int tdebug = 0, thidden = target ? target->hidden : 0, ends_with = 0;
+ const char *nid = id;
+
+ // extract the options
+ while (nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
+ if (nid[0] == '-')
+ thidden = 1;
+ if (nid[0] == '+')
+ tdebug = 1;
+ if (nid[0] == '*')
+ ends_with = 1;
+ nid++;
+ }
+ uint32_t hash = simple_hash(id);
+
+ // find if it already exists
+ struct target *w, *last = *agrt;
+ for (w = *agrt; w; w = w->next) {
+ if (w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ return w;
+
+ last = w;
+ }
+
+ // find an existing target
+ if (unlikely(!target)) {
+ while (*name == '-') {
+ if (*name == '-')
+ thidden = 1;
+ name++;
+ }
+
+ for (target = *agrt; target != NULL; target = target->next) {
+ if (!target->target && strcmp(name, target->name) == 0)
+ break;
+ }
+ }
+
+ if (target && target->target)
+ fatal(
+ "Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id,
+ target->id, target->target->id);
+
+ w = callocz(1, sizeof(struct target));
+ strncpyz(w->id, nid, MAX_NAME);
+ w->idhash = simple_hash(w->id);
+
+ if (unlikely(!target))
+ // copy the name
+ strncpyz(w->name, name, MAX_NAME);
+ else
+ // copy the id
+ strncpyz(w->name, nid, MAX_NAME);
+
+ strncpyz(w->compare, nid, MAX_COMPARE_NAME);
+ size_t len = strlen(w->compare);
+ if (w->compare[len - 1] == '*') {
+ w->compare[len - 1] = '\0';
+ w->starts_with = 1;
+ }
+ w->ends_with = ends_with;
+
+ if (w->starts_with && w->ends_with)
+ proc_pid_cmdline_is_needed = 1;
+
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ w->hidden = thidden;
+#ifdef NETDATA_INTERNAL_CHECKS
+ w->debug_enabled = tdebug;
+#else
+ if (tdebug)
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ w->target = target;
+
+ // append it, to maintain the order in apps_groups.conf
+ if (last)
+ last->next = w;
+ else
+ *agrt = w;
+
+ return w;
+}
+
+/**
+ * Read the apps_groups.conf file
+ *
+ * @param agrt a pointer to apps_group_root_target
+ * @param path the directory to search apps_%s.conf
+ * @param file the word to complement the file name.
+ *
+ * @return It returns 0 on succcess and -1 otherwise
+ */
+int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const char *path, const char *file)
+{
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
+
+ // ----------------------------------------
+
+ procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return -1;
+
+ procfile_set_quotes(ff, "'\"");
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return -1;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for (line = 0; line < lines; line++) {
+ size_t word, words = procfile_linewords(ff, line);
+ if (!words)
+ continue;
+
+ char *name = procfile_lineword(ff, line, 0);
+ if (!name || !*name)
+ continue;
+
+ // find a possibly existing target
+ struct target *w = NULL;
+
+ // loop through all words, skipping the first one (the name)
+ for (word = 0; word < words; word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if (!s || !*s)
+ continue;
+ if (*s == '#')
+ break;
+
+ // is this the first word? skip it
+ if (s == name)
+ continue;
+
+ // add this target
+ struct target *n = get_apps_groups_target(agrt, s, w, name);
+ if (!n) {
+ error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
+ continue;
+ }
+
+ // just some optimization
+ // to avoid searching for a target for each process
+ if (!w)
+ w = n->target ? n->target : n;
+ }
+ }
+
+ procfile_close(ff);
+
+ *agdt = get_apps_groups_target(agrt, "p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
+ if (!*agdt)
+ fatal("Cannot create default target");
+
+ struct target *ptr = *agdt;
+ if (ptr->target)
+ *agdt = ptr->target;
+
+ return 0;
+}
+
+// the minimum PID of the system
+// this is also the pid of the init process
+#define INIT_PID 1
+
+// ----------------------------------------------------------------------------
+// string lengths
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+#define MAX_CMDLINE 16384
+
+struct pid_stat **all_pids = NULL; // to avoid allocations, we pre-allocate the
+ // the entire pid space.
+struct pid_stat *root_of_pids = NULL; // global list of all processes running
+
+size_t all_pids_count = 0; // the number of processes running
+
+struct target
+ *apps_groups_default_target = NULL, // the default target
+ *apps_groups_root_target = NULL, // apps_groups.conf defined
+ *users_root_target = NULL, // users
+ *groups_root_target = NULL; // user groups
+
+size_t apps_groups_targets_count = 0; // # of apps_groups.conf targets
+
+// ----------------------------------------------------------------------------
+// internal counters
+
+static size_t
+ // global_iterations_counter = 1,
+ calls_counter = 0,
+ // file_counter = 0,
+ // filenames_allocated_counter = 0,
+ // inodes_changed_counter = 0,
+ // links_changed_counter = 0,
+ targets_assignment_counter = 0;
+
+// ----------------------------------------------------------------------------
+// debugging
+
+// log each problem once per process
+// log flood protection flags (log_thrown)
+#define PID_LOG_IO 0x00000001
+#define PID_LOG_STATUS 0x00000002
+#define PID_LOG_CMDLINE 0x00000004
+#define PID_LOG_FDS 0x00000008
+#define PID_LOG_STAT 0x00000010
+
+int debug_enabled = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define debug_log(fmt, args...) \
+ do { \
+ if (unlikely(debug_enabled)) \
+ debug_log_int(fmt, ##args); \
+ } while (0)
+
+#else
+
+static inline void debug_log_dummy(void)
+{
+}
+#define debug_log(fmt, args...) debug_log_dummy()
+
+#endif
+
+/**
+ * Managed log
+ *
+ * Store log information if it is necessary.
+ *
+ * @param p the pid stat structure
+ * @param log the log id
+ * @param status the return from a function.
+ *
+ * @return It returns the status value.
+ */
+static inline int managed_log(struct pid_stat *p, uint32_t log, int status)
+{
+ if (unlikely(!status)) {
+ // error("command failed log %u, errno %d", log, errno);
+
+ if (unlikely(debug_enabled || errno != ENOENT)) {
+ if (unlikely(debug_enabled || !(p->log_thrown & log))) {
+ p->log_thrown |= log;
+ switch (log) {
+ case PID_LOG_IO:
+ error(
+ "Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_STATUS:
+ error(
+ "Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_CMDLINE:
+ error(
+ "Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_FDS:
+ error(
+ "Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix,
+ p->pid, p->comm);
+ break;
+
+ case PID_LOG_STAT:
+ break;
+
+ default:
+ error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
+ break;
+ }
+ }
+ }
+ errno = 0;
+ } else if (unlikely(p->log_thrown & log)) {
+ // error("unsetting log %u on pid %d", log, p->pid);
+ p->log_thrown &= ~log;
+ }
+
+ return status;
+}
+
+/**
+ * Get PID entry
+ *
+ * Get or allocate the PID entry for the specifid pid.
+ *
+ * @param pid the pid to search the data.
+ *
+ * @return It returns the pid entry structure
+ */
+static inline struct pid_stat *get_pid_entry(pid_t pid)
+{
+ if (unlikely(all_pids[pid]))
+ return all_pids[pid];
+
+ struct pid_stat *p = callocz(1, sizeof(struct pid_stat));
+
+ if (likely(root_of_pids))
+ root_of_pids->prev = p;
+
+ p->next = root_of_pids;
+ root_of_pids = p;
+
+ p->pid = pid;
+
+ all_pids[pid] = p;
+ all_pids_count++;
+
+ return p;
+}
+
+/**
+ * Assign the PID to a target.
+ *
+ * @param p the pid_stat structure to assign for a target.
+ */
+static inline void assign_target_to_pid(struct pid_stat *p)
+{
+ targets_assignment_counter++;
+
+ uint32_t hash = simple_hash(p->comm);
+ size_t pclen = strlen(p->comm);
+
+ struct target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
+
+ // find it - 4 cases:
+ // 1. the target is not a pattern
+ // 2. the target has the prefix
+ // 3. the target has the suffix
+ // 4. the target is something inside cmdline
+
+ if (unlikely(
+ ((!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) ||
+ (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) ||
+ (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) ||
+ (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))))) {
+ if (w->target)
+ p->target = w->target;
+ else
+ p->target = w;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("%s linked to target %s", p->comm, p->target->name);
+
+ break;
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// update pids from proc
+
+/**
+ * Read cmd line from /proc/PID/cmdline
+ *
+ * @param p the pid_stat_structure.
+ *
+ * @return It returns 1 on success and 0 otherwise.
+ */
+static inline int read_proc_pid_cmdline(struct pid_stat *p)
+{
+ static char cmdline[MAX_CMDLINE + 1];
+
+ if (unlikely(!p->cmdline_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ p->cmdline_filename = strdupz(filename);
+ }
+
+ int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
+ if (unlikely(fd == -1))
+ goto cleanup;
+
+ ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
+ close(fd);
+
+ if (unlikely(bytes < 0))
+ goto cleanup;
+
+ cmdline[bytes] = '\0';
+ for (i = 0; i < bytes; i++) {
+ if (unlikely(!cmdline[i]))
+ cmdline[i] = ' ';
+ }
+
+ if (p->cmdline)
+ freez(p->cmdline);
+ p->cmdline = strdupz(cmdline);
+
+ debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
+
+ return 1;
+
+cleanup:
+ // copy the command to the command line
+ if (p->cmdline)
+ freez(p->cmdline);
+ p->cmdline = strdupz(p->comm);
+ return 0;
+}
+
+/**
+ * Read information from /proc/PID/stat and /proc/PID/cmdline
+ * Assign target to pid
+ *
+ * @param p the pid stat structure to store the data.
+ * @param ptr an useless argument.
+ */
+static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr)
+{
+ UNUSED(ptr);
+
+ static procfile *ff = NULL;
+
+ if (unlikely(!p->stat_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
+ p->stat_filename = strdupz(filename);
+ }
+
+ int set_quotes = (!ff) ? 1 : 0;
+
+ struct stat statbuf;
+ if (stat(p->stat_filename, &statbuf))
+ return 0;
+
+ ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if (unlikely(!ff))
+ return 0;
+
+ if (unlikely(set_quotes))
+ procfile_set_open_close(ff, "(", ")");
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff))
+ return 0;
+
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->stat_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+ char *comm = procfile_lineword(ff, 0, 1);
+ p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
+
+ if (strcmp(p->comm, comm) != 0) {
+ if (unlikely(debug_enabled)) {
+ if (p->comm[0])
+ debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
+ else
+ debug_log("\tJust added %d (%s)", p->pid, comm);
+ }
+
+ strncpyz(p->comm, comm, MAX_COMPARE_NAME);
+
+ // /proc/<pid>/cmdline
+ if (likely(proc_pid_cmdline_is_needed))
+ managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
+
+ assign_target_to_pid(p);
+ }
+
+ if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int(
+ "READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu)",
+ netdata_configured_host_prefix, p->pid, p->comm, (p->target) ? p->target->name : "UNSET",
+ p->stat_collected_usec - p->last_stat_collected_usec);
+
+ return 1;
+}
+
+/**
+ * Collect data for PID
+ *
+ * @param pid the current pid that we are working
+ * @param ptr a NULL value
+ *
+ * @return It returns 1 on succcess and 0 otherwise
+ */
+static inline int collect_data_for_pid(pid_t pid, void *ptr)
+{
+ if (unlikely(pid < 0 || pid > pid_max)) {
+ error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
+ return 0;
+ }
+
+ struct pid_stat *p = get_pid_entry(pid);
+ if (unlikely(!p || p->read))
+ return 0;
+ p->read = 1;
+
+ if (unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // check its parent pid
+ if (unlikely(p->ppid < 0 || p->ppid > pid_max)) {
+ error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
+ p->ppid = 0;
+ }
+
+ // mark it as updated
+ p->updated = 1;
+ p->keep = 0;
+ p->keeploops = 0;
+
+ return 1;
+}
+
+/**
+ * Fill link list of parents with children PIDs
+ */
+static inline void link_all_processes_to_their_parents(void)
+{
+ struct pid_stat *p, *pp;
+
+ // link all children to their parents
+ // and update children count on parents
+ for (p = root_of_pids; p; p = p->next) {
+ // for each process found
+
+ p->sortlist = 0;
+ p->parent = NULL;
+
+ if (unlikely(!p->ppid)) {
+ p->parent = NULL;
+ continue;
+ }
+
+ pp = all_pids[p->ppid];
+ if (likely(pp)) {
+ p->parent = pp;
+ pp->children_count++;
+
+ if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int(
+ "child %d (%s, %s) on target '%s' has parent %d (%s, %s).", p->pid, p->comm,
+ p->updated ? "running" : "exited", (p->target) ? p->target->name : "UNSET", pp->pid, pp->comm,
+ pp->updated ? "running" : "exited");
+ } else {
+ p->parent = NULL;
+ debug_log("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
+ }
+ }
+}
+
+/**
+ * Aggregate PIDs to targets.
+ */
+static void apply_apps_groups_targets_inheritance(void)
+{
+ struct pid_stat *p = NULL;
+
+ // children that do not have a target
+ // inherit their target from their parent
+ int found = 1, loops = 0;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+ for (p = root_of_pids; p; p = p->next) {
+ // if this process does not have a target
+ // and it has a parent
+ // and its parent has a target
+ // then, set the parent's target to this process
+ if (unlikely(!p->target && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name,
+ p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ // find all the procs with 0 childs and merge them to their parents
+ // repeat, until nothing more can be done.
+ int sortlist = 1;
+ found = 1;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+
+ for (p = root_of_pids; p; p = p->next) {
+ if (unlikely(!p->sortlist && !p->children_count))
+ p->sortlist = sortlist++;
+
+ if (unlikely(
+ !p->children_count // if this process does not have any children
+ && !p->merged // and is not already merged
+ && p->parent // and has a parent
+ && p->parent->children_count // and its parent has children
+ // and the target of this process and its parent is the same,
+ // or the parent does not have a target
+ && (p->target == p->parent->target || !p->parent->target) &&
+ p->ppid != INIT_PID // and its parent is not init
+ )) {
+ // mark it as merged
+ p->parent->children_count--;
+ p->merged = 1;
+
+ // the parent inherits the child's target, if it does not have a target itself
+ if (unlikely(p->target && !p->parent->target)) {
+ p->parent->target = p->target;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name,
+ p->parent->pid, p->parent->comm, p->pid, p->comm);
+ }
+
+ found++;
+ }
+ }
+
+ debug_log("TARGET INHERITANCE: merged %d processes", found);
+ }
+
+ // init goes always to default target
+ if (all_pids[INIT_PID])
+ all_pids[INIT_PID]->target = apps_groups_default_target;
+
+ // pid 0 goes always to default target
+ if (all_pids[0])
+ all_pids[0]->target = apps_groups_default_target;
+
+ // give a default target on all top level processes
+ if (unlikely(debug_enabled))
+ loops++;
+ for (p = root_of_pids; p; p = p->next) {
+ // if the process is not merged itself
+ // then is is a top level process
+ if (unlikely(!p->merged && !p->target))
+ p->target = apps_groups_default_target;
+
+ // make sure all processes have a sortlist
+ if (unlikely(!p->sortlist))
+ p->sortlist = sortlist++;
+ }
+
+ if (all_pids[1])
+ all_pids[1]->sortlist = sortlist++;
+
+ // give a target to all merged child processes
+ found = 1;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+ for (p = root_of_pids; p; p = p->next) {
+ if (unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.",
+ p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
+}
+
+/**
+ * Update target timestamp.
+ *
+ * @param root the targets that will be updated.
+ */
+static inline void post_aggregate_targets(struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (w->collected_starttime) {
+ if (!w->starttime || w->collected_starttime < w->starttime) {
+ w->starttime = w->collected_starttime;
+ }
+ } else {
+ w->starttime = 0;
+ }
+ }
+}
+
+/**
+ * Remove PID from the link list.
+ *
+ * @param pid the PID that will be removed.
+ */
+static inline void del_pid_entry(pid_t pid)
+{
+ struct pid_stat *p = all_pids[pid];
+
+ if (unlikely(!p)) {
+ error("attempted to free pid %d that is not allocated.", pid);
+ return;
+ }
+
+ debug_log("process %d %s exited, deleting it.", pid, p->comm);
+
+ if (root_of_pids == p)
+ root_of_pids = p->next;
+
+ if (p->next)
+ p->next->prev = p->prev;
+ if (p->prev)
+ p->prev->next = p->next;
+
+ freez(p->stat_filename);
+ freez(p->status_filename);
+ freez(p->io_filename);
+ freez(p->cmdline_filename);
+ freez(p->cmdline);
+ freez(p);
+
+ all_pids[pid] = NULL;
+ all_pids_count--;
+}
+
+/**
+ * Remove PIDs when they are not running more.
+ */
+void cleanup_exited_pids()
+{
+ struct pid_stat *p = NULL;
+
+ for (p = root_of_pids; p;) {
+ if (!p->updated && (!p->keep || p->keeploops > 0)) {
+ if (unlikely(debug_enabled && (p->keep || p->keeploops)))
+ debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
+
+ pid_t r = p->pid;
+ p = p->next;
+ del_pid_entry(r);
+
+ // Clean process structure
+ freez(global_process_stats[r]);
+ global_process_stats[r] = NULL;
+
+ freez(current_apps_data[r]);
+ current_apps_data[r] = NULL;
+
+ // Clean socket structures
+ if (socket_bandwidth_curr) {
+ freez(socket_bandwidth_curr[r]);
+ socket_bandwidth_curr[r] = NULL;
+ }
+ } else {
+ if (unlikely(p->keep))
+ p->keeploops++;
+ p->keep = 0;
+ p = p->next;
+ }
+ }
+}
+
+/**
+ * Read proc filesystem for the first time.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static inline void read_proc_filesystem()
+{
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ DIR *dir = opendir(dirname);
+ if (!dir)
+ return;
+
+ struct dirent *de = NULL;
+
+ while ((de = readdir(dir))) {
+ char *endptr = de->d_name;
+
+ if (unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ pid_t pid = (pid_t)strtoul(de->d_name, &endptr, 10);
+
+ // make sure we read a valid number
+ if (unlikely(endptr == de->d_name || *endptr != '\0'))
+ continue;
+
+ collect_data_for_pid(pid, NULL);
+ }
+ closedir(dir);
+}
+
+/**
+ * Aggregated PID on target
+ *
+ * @param w the target output
+ * @param p the pid with information to update
+ * @param o never used
+ */
+static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o)
+{
+ UNUSED(o);
+
+ if (unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ if (unlikely(!w)) {
+ error("pid %d %s was left without a target!", p->pid, p->comm);
+ return;
+ }
+
+ w->processes++;
+ struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
+ pid_on_target->pid = p->pid;
+ pid_on_target->next = w->root_pid;
+ w->root_pid = pid_on_target;
+}
+
+/**
+ * Collect data for all process
+ *
+ * Read data from hash table and store it in appropriate vectors.
+ * It also creates the link between targets and PIDs.
+ *
+ * @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
+ */
+void collect_data_for_all_processes(int tbl_pid_stats_fd)
+{
+ struct pid_stat *pids = root_of_pids; // global list of all processes running
+ while (pids) {
+ if (pids->updated_twice) {
+ pids->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
+ pids->updated = 0;
+ pids->merged = 0;
+ pids->children_count = 0;
+ pids->parent = NULL;
+ } else {
+ if (pids->updated)
+ pids->updated_twice = 1;
+ }
+
+ pids = pids->next;
+ }
+
+ read_proc_filesystem();
+
+ uint32_t key;
+ pids = root_of_pids; // global list of all processes running
+ // while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
+ while (pids) {
+ key = pids->pid;
+ ebpf_process_stat_t *w = global_process_stats[key];
+ if (!w) {
+ w = mallocz(sizeof(ebpf_process_stat_t));
+ global_process_stats[key] = w;
+ }
+
+ if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, w)) {
+ // Clean Process structures
+ freez(w);
+ global_process_stats[key] = NULL;
+
+ freez(current_apps_data[key]);
+ current_apps_data[key] = NULL;
+
+ // Clean socket structures
+ if (socket_bandwidth_curr) {
+ freez(socket_bandwidth_curr[key]);
+ socket_bandwidth_curr[key] = NULL;
+ }
+
+ pids = pids->next;
+ continue;
+ }
+
+ pids = pids->next;
+ }
+
+ link_all_processes_to_their_parents();
+
+ apply_apps_groups_targets_inheritance();
+
+ apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
+
+ // this has to be done, before the cleanup
+ // // concentrate everything on the targets
+ for (pids = root_of_pids; pids; pids = pids->next)
+ aggregate_pid_on_target(pids->target, pids, NULL);
+
+ post_aggregate_targets(apps_groups_root_target);
+}
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
new file mode 100644
index 0000000..f8cb7ac
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_APPS_H
+#define NETDATA_EBPF_APPS_H 1
+
+#include "libnetdata/threads/threads.h"
+#include "libnetdata/locks/locks.h"
+#include "libnetdata/avl/avl.h"
+#include "libnetdata/clocks/clocks.h"
+#include "libnetdata/config/appconfig.h"
+#include "libnetdata/ebpf/ebpf.h"
+
+#define NETDATA_APPS_FAMILY "apps"
+#define NETDATA_APPS_FILE_GROUP "ebpf file"
+#define NETDATA_APPS_VFS_GROUP "ebpf vfs"
+#define NETDATA_APPS_PROCESS_GROUP "ebpf process"
+#define NETDATA_APPS_NET_GROUP "ebpf net"
+
+#include "ebpf_process.h"
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+
+// ----------------------------------------------------------------------------
+// process_pid_stat
+//
+// Fields read from the kernel ring for a specific PID
+//
+typedef struct process_pid_stat {
+ uint64_t pid_tgid; // Unique identifier
+ uint32_t pid; // process id
+
+ // Count number of calls done for specific function
+ uint32_t open_call;
+ uint32_t write_call;
+ uint32_t writev_call;
+ uint32_t read_call;
+ uint32_t readv_call;
+ uint32_t unlink_call;
+ uint32_t exit_call;
+ uint32_t release_call;
+ uint32_t fork_call;
+ uint32_t clone_call;
+ uint32_t close_call;
+
+ // Count number of bytes written or read
+ uint64_t write_bytes;
+ uint64_t writev_bytes;
+ uint64_t readv_bytes;
+ uint64_t read_bytes;
+
+ // Count number of errors for the specified function
+ uint32_t open_err;
+ uint32_t write_err;
+ uint32_t writev_err;
+ uint32_t read_err;
+ uint32_t readv_err;
+ uint32_t unlink_err;
+ uint32_t fork_err;
+ uint32_t clone_err;
+ uint32_t close_err;
+} process_pid_stat_t;
+
+// ----------------------------------------------------------------------------
+// socket_bandwidth
+//
+// Fields read from the kernel ring for a specific PID
+//
+typedef struct socket_bandwidth {
+ uint64_t first;
+ uint64_t ct;
+ uint64_t sent;
+ uint64_t received;
+ unsigned char removed;
+} socket_bandwidth_t;
+
+// ----------------------------------------------------------------------------
+// pid_stat
+//
+// structure to store data for each process running
+// see: man proc for the description of the fields
+
+struct pid_fd {
+ int fd;
+
+#ifndef __FreeBSD__
+ ino_t inode;
+ char *filename;
+ uint32_t link_hash;
+ size_t cache_iterations_counter;
+ size_t cache_iterations_reset;
+#endif
+};
+
+struct target {
+ char compare[MAX_COMPARE_NAME + 1];
+ uint32_t comparehash;
+ size_t comparelen;
+
+ char id[MAX_NAME + 1];
+ uint32_t idhash;
+
+ char name[MAX_NAME + 1];
+
+ uid_t uid;
+ gid_t gid;
+
+ /* These variables are not necessary for eBPF collector
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+ kernel_uint_t num_threads;
+ // kernel_uint_t rss;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ int *target_fds;
+ int target_fds_size;
+
+ kernel_uint_t openfiles;
+ kernel_uint_t openpipes;
+ kernel_uint_t opensockets;
+ kernel_uint_t openinotifies;
+ kernel_uint_t openeventfds;
+ kernel_uint_t opentimerfds;
+ kernel_uint_t opensignalfds;
+ kernel_uint_t openeventpolls;
+ kernel_uint_t openother;
+ */
+
+ kernel_uint_t starttime;
+ kernel_uint_t collected_starttime;
+
+ /*
+ kernel_uint_t uptime_min;
+ kernel_uint_t uptime_sum;
+ kernel_uint_t uptime_max;
+ */
+
+ unsigned int processes; // how many processes have been merged to this
+ int exposed; // if set, we have sent this to netdata
+ int hidden; // if set, we set the hidden flag on the dimension
+ int debug_enabled;
+ int ends_with;
+ int starts_with; // if set, the compare string matches only the
+ // beginning of the command
+
+ struct pid_on_target *root_pid; // list of aggregated pids for target debugging
+
+ struct target *target; // the one that will be reported to netdata
+ struct target *next;
+};
+
+extern struct target *apps_groups_default_target;
+extern struct target *apps_groups_root_target;
+extern struct target *users_root_target;
+extern struct target *groups_root_target;
+
+struct pid_stat {
+ int32_t pid;
+ char comm[MAX_COMPARE_NAME + 1];
+ char *cmdline;
+
+ uint32_t log_thrown;
+
+ // char state;
+ int32_t ppid;
+
+ // int32_t pgrp;
+ // int32_t session;
+ // int32_t tty_nr;
+ // int32_t tpgid;
+ // uint64_t flags;
+
+ /*
+ // these are raw values collected
+ kernel_uint_t minflt_raw;
+ kernel_uint_t cminflt_raw;
+ kernel_uint_t majflt_raw;
+ kernel_uint_t cmajflt_raw;
+ kernel_uint_t utime_raw;
+ kernel_uint_t stime_raw;
+ kernel_uint_t gtime_raw; // guest_time
+ kernel_uint_t cutime_raw;
+ kernel_uint_t cstime_raw;
+ kernel_uint_t cgtime_raw; // cguest_time
+
+ // these are rates
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+
+ // int64_t priority;
+ // int64_t nice;
+ int32_t num_threads;
+ // int64_t itrealvalue;
+ kernel_uint_t collected_starttime;
+ // kernel_uint_t vsize;
+ // kernel_uint_t rss;
+ // kernel_uint_t rsslim;
+ // kernel_uint_t starcode;
+ // kernel_uint_t endcode;
+ // kernel_uint_t startstack;
+ // kernel_uint_t kstkesp;
+ // kernel_uint_t kstkeip;
+ // uint64_t signal;
+ // uint64_t blocked;
+ // uint64_t sigignore;
+ // uint64_t sigcatch;
+ // uint64_t wchan;
+ // uint64_t nswap;
+ // uint64_t cnswap;
+ // int32_t exit_signal;
+ // int32_t processor;
+ // uint32_t rt_priority;
+ // uint32_t policy;
+ // kernel_uint_t delayacct_blkio_ticks;
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+#ifndef __FreeBSD__
+ ARL_BASE *status_arl;
+#endif
+
+ kernel_uint_t io_logical_bytes_read_raw;
+ kernel_uint_t io_logical_bytes_written_raw;
+ // kernel_uint_t io_read_calls_raw;
+ // kernel_uint_t io_write_calls_raw;
+ kernel_uint_t io_storage_bytes_read_raw;
+ kernel_uint_t io_storage_bytes_written_raw;
+ // kernel_uint_t io_cancelled_write_bytes_raw;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+ */
+
+ struct pid_fd *fds; // array of fds it uses
+ size_t fds_size; // the size of the fds array
+
+ int children_count; // number of processes directly referencing this
+ unsigned char keep : 1; // 1 when we need to keep this process in memory even after it exited
+ int keeploops; // increases by 1 every time keep is 1 and updated 0
+ unsigned char updated : 1; // 1 when the process is currently running
+ unsigned char updated_twice : 1; // 1 when the process was running in the previous iteration
+ unsigned char merged : 1; // 1 when it has been merged to its parent
+ unsigned char read : 1; // 1 when we have already read this process for this iteration
+
+ int sortlist; // higher numbers = top on the process tree
+
+ // each process gets a unique number
+
+ struct target *target; // app_groups.conf targets
+ struct target *user_target; // uid based targets
+ struct target *group_target; // gid based targets
+
+ usec_t stat_collected_usec;
+ usec_t last_stat_collected_usec;
+
+ usec_t io_collected_usec;
+ usec_t last_io_collected_usec;
+
+ kernel_uint_t uptime;
+
+ char *fds_dirname; // the full directory name in /proc/PID/fd
+
+ char *stat_filename;
+ char *status_filename;
+ char *io_filename;
+ char *cmdline_filename;
+
+ struct pid_stat *parent;
+ struct pid_stat *prev;
+ struct pid_stat *next;
+};
+
+// ----------------------------------------------------------------------------
+// target
+//
+// target is the structure that processes are aggregated to be reported
+// to netdata.
+//
+// - Each entry in /etc/apps_groups.conf creates a target.
+// - Each user and group used by a process in the system, creates a target.
+struct pid_on_target {
+ int32_t pid;
+ struct pid_on_target *next;
+};
+
+// ----------------------------------------------------------------------------
+// Structures used to read information from kernel ring
+typedef struct ebpf_process_stat {
+ uint64_t pid_tgid;
+ uint32_t pid;
+
+ //Counter
+ uint32_t open_call;
+ uint32_t write_call;
+ uint32_t writev_call;
+ uint32_t read_call;
+ uint32_t readv_call;
+ uint32_t unlink_call;
+ uint32_t exit_call;
+ uint32_t release_call;
+ uint32_t fork_call;
+ uint32_t clone_call;
+ uint32_t close_call;
+
+ //Accumulator
+ uint64_t write_bytes;
+ uint64_t writev_bytes;
+ uint64_t readv_bytes;
+ uint64_t read_bytes;
+
+ //Counter
+ uint32_t open_err;
+ uint32_t write_err;
+ uint32_t writev_err;
+ uint32_t read_err;
+ uint32_t readv_err;
+ uint32_t unlink_err;
+ uint32_t fork_err;
+ uint32_t clone_err;
+ uint32_t close_err;
+
+ uint8_t removeme;
+} ebpf_process_stat_t;
+
+typedef struct ebpf_bandwidth {
+ uint32_t pid;
+
+ uint64_t first; // First timestamp
+ uint64_t ct; // Last timestamp
+ uint64_t bytes_sent; // Bytes sent
+ uint64_t bytes_received; // Bytes received
+ uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called
+ uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called
+ uint64_t retransmit; // Number of times tcp_retransmit was called
+ uint64_t call_udp_sent; // Number of times udp_sendmsg was called
+ uint64_t call_udp_received; // Number of times udp_recvmsg was called
+} ebpf_bandwidth_t;
+
+/**
+ * Internal function used to write debug messages.
+ *
+ * @param fmt the format to create the message.
+ * @param ... the arguments to fill the format.
+ */
+static inline void debug_log_int(const char *fmt, ...)
+{
+ va_list args;
+
+ fprintf(stderr, "apps.plugin: ");
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ fputc('\n', stderr);
+}
+
+// ----------------------------------------------------------------------------
+// Exported variabled and functions
+//
+extern struct pid_stat **all_pids;
+
+extern int ebpf_read_apps_groups_conf(struct target **apps_groups_default_target,
+ struct target **apps_groups_root_target,
+ const char *path,
+ const char *file);
+
+extern void clean_apps_groups_target(struct target *apps_groups_root_target);
+
+extern size_t zero_all_targets(struct target *root);
+
+extern int am_i_running_as_root();
+
+extern void cleanup_exited_pids();
+
+extern int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
+
+extern size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep,
+ int fd,
+ struct pid_on_target *pids);
+
+extern size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids);
+
+extern void collect_data_for_all_processes(int tbl_pid_stats_fd);
+
+extern ebpf_process_stat_t **global_process_stats;
+extern ebpf_process_publish_apps_t **current_apps_data;
+
+#endif /* NETDATA_EBPF_APPS_H */
diff --git a/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt b/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt
new file mode 100644
index 0000000..d56b216
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt
@@ -0,0 +1 @@
+Ubuntu 4.18.0-13.
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
new file mode 100644
index 0000000..27e39d1
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_process.h"
+
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "delete", "read", "write",
+ "process", "task", "process", "thread" };
+static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "vfs_unlink",
+ "vfs_read", "vfs_write", "do_exit",
+ "release_task", "_do_fork", "sys_clone" };
+static char *status[] = { "process", "zombie" };
+
+static netdata_idx_t *process_hash_values = NULL;
+static netdata_syscall_stat_t *process_aggregated_data = NULL;
+static netdata_publish_syscall_t *process_publish_aggregated = NULL;
+
+static ebpf_data_t process_data;
+
+ebpf_process_stat_t **global_process_stats = NULL;
+ebpf_process_publish_apps_t **current_apps_data = NULL;
+
+int process_enabled = 0;
+
+static int *map_fd = NULL;
+static struct bpf_object *objects = NULL;
+static struct bpf_link **probe_links = NULL;
+
+/*****************************************************************
+ *
+ * PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish the first output structure with independent dimensions
+ * @param pvc the second output structure with correlated dimensions
+ * @param input the structure with the input data.
+ */
+static void ebpf_update_global_publish(
+ netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input)
+{
+ netdata_publish_syscall_t *move = publish;
+ int selector = NETDATA_KEY_PUBLISH_PROCESS_OPEN;
+ while (move) {
+ // Until NETDATA_KEY_PUBLISH_PROCESS_READ we are creating accumulators, so it is possible
+ // to use incremental charts, but after this we will do some math with the values, so we are storing
+ // absolute values
+ if (selector < NETDATA_KEY_PUBLISH_PROCESS_READ) {
+ move->ncall = input->call;
+ move->nbyte = input->bytes;
+ move->nerr = input->ecall;
+ } else {
+ move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
+ move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
+ move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
+
+ move->pcall = input->call;
+ move->pbyte = input->bytes;
+ move->perr = input->ecall;
+ }
+
+ input = input->next;
+ move = move->next;
+ selector++;
+ }
+
+ pvc->write = -((long)publish[NETDATA_KEY_PUBLISH_PROCESS_WRITE].nbyte);
+ pvc->read = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_READ].nbyte;
+
+ pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
+ publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
+ pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ */
+static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc)
+{
+ write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME);
+
+ write_chart_dimension(status[0], (long long)pvc->running);
+ write_chart_dimension(status[1], (long long)pvc->zombie);
+
+ write_end_chart();
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_process_send_data(ebpf_module_t *em)
+{
+ netdata_publish_vfs_common_t pvc;
+ ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
+
+ write_count_chart(
+ NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+
+ write_count_chart(
+ NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1);
+
+ write_count_chart(
+ NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2);
+
+ write_count_chart(
+ NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2);
+ write_count_chart(
+ NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+
+ write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+ write_err_chart(
+ NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS);
+ write_err_chart(
+ NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ }
+
+ write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY,
+ process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], (long long) pvc.write,
+ process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], (long long)pvc.read);
+}
+
+/**
+ * Sum values for pid
+ *
+ * @param root the structure with all available PIDs
+ *
+ * @param offset the address that we are reading
+ *
+ * @return it returns the sum of all PIDs
+ */
+long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+{
+ long long ret = 0;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_process_publish_apps_t *w = current_apps_data[pid];
+ if (w) {
+ ret += get_value_from_structure((char *)w, offset);
+ }
+
+ root = root->next;
+ }
+
+ return ret;
+}
+
+/**
+ * Remove process pid
+ *
+ * Remove from PID task table when task_release was called.
+ */
+void ebpf_process_remove_pids()
+{
+ struct pid_stat *pids = root_of_pids;
+ int pid_fd = map_fd[0];
+ while (pids) {
+ uint32_t pid = pids->pid;
+ ebpf_process_stat_t *w = global_process_stats[pid];
+ if (w) {
+ if (w->removeme) {
+ freez(w);
+ global_process_stats[pid] = NULL;
+ bpf_map_delete_elem(pid_fd, &pid);
+ }
+ }
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+ */
+void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ collected_number value;
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_open));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_vfs_unlink));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, call_write));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_write));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_read));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_read));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_written));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_read));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
+ call_release_task));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ ebpf_process_remove_pids();
+}
+
+/*****************************************************************
+ *
+ * READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+ uint64_t idx;
+ netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
+
+ netdata_idx_t *val = process_hash_values;
+ for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
+ if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
+ uint64_t total = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++)
+ total += val[i];
+
+ res[idx] = total;
+ } else {
+ res[idx] = 0;
+ }
+ }
+
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].call = res[NETDATA_KEY_CALLS_SYS_CLONE];
+
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
+
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
+ process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void ebpf_process_update_apps_data()
+{
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ uint32_t current_pid = pids->pid;
+ ebpf_process_stat_t *ps = global_process_stats[current_pid];
+ if (!ps) {
+ pids = pids->next;
+ continue;
+ }
+
+ ebpf_process_publish_apps_t *cad = current_apps_data[current_pid];
+ if (!cad) {
+ cad = callocz(1, sizeof(ebpf_process_publish_apps_t));
+ current_apps_data[current_pid] = cad;
+ }
+
+ //Read data
+ cad->call_sys_open = ps->open_call;
+ cad->call_close_fd = ps->close_call;
+ cad->call_vfs_unlink = ps->unlink_call;
+ cad->call_read = ps->read_call + ps->readv_call;
+ cad->call_write = ps->write_call + ps->writev_call;
+ cad->call_do_exit = ps->exit_call;
+ cad->call_release_task = ps->release_call;
+ cad->call_do_fork = ps->fork_call;
+ cad->call_sys_clone = ps->clone_call;
+
+ cad->ecall_sys_open = ps->open_err;
+ cad->ecall_close_fd = ps->close_err;
+ cad->ecall_vfs_unlink = ps->unlink_err;
+ cad->ecall_read = ps->read_err + ps->readv_err;
+ cad->ecall_write = ps->write_err + ps->writev_err;
+ cad->ecall_do_fork = ps->fork_err;
+ cad->ecall_sys_clone = ps->clone_err;
+
+ cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes;
+ cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes;
+
+ pids = pids->next;
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create IO chart
+ *
+ * @param family the chart family
+ * @param name the chart name
+ * @param axis the axis label
+ * @param web the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ * @param algorithm the algorithm used to make the charts.
+ */
+static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm)
+{
+ printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
+ family,
+ name,
+ axis,
+ web,
+ order,
+ update_every);
+
+ printf("DIMENSION %s %s %s 1 1\n",
+ process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
+ process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
+ ebpf_algorithms[algorithm]);
+ printf("DIMENSION %s %s %s 1 1\n",
+ process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
+ process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
+ ebpf_algorithms[algorithm]);
+}
+
+/**
+ * Create process status chart
+ *
+ * @param family the chart family
+ * @param name the chart name
+ * @param axis the axis label
+ * @param web the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ */
+static void ebpf_process_status_chart(char *family, char *name, char *axis,
+ char *web, char *algorithm, int order)
+{
+ printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d ''\n",
+ family,
+ name,
+ axis,
+ web,
+ order,
+ update_every);
+
+ printf("DIMENSION %s '' %s 1 1\n", status[0], algorithm);
+ printf("DIMENSION %s '' %s 1 1\n", status[1], algorithm);
+}
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_FILE_OPEN_CLOSE_COUNT,
+ "Open and close calls",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ 21000,
+ ebpf_create_global_dimension,
+ process_publish_aggregated,
+ 2);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_FILE_OPEN_ERR_COUNT,
+ "Open fails",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ 21001,
+ ebpf_create_global_dimension,
+ process_publish_aggregated,
+ 2);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_CLEAN_COUNT,
+ "Remove files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21002,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_DEL_START],
+ 1);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_IO_COUNT,
+ "Calls to IO",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21003,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_IN_START_BYTE],
+ 2);
+
+ ebpf_create_io_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_VFS_GROUP,
+ 21004,
+ NETDATA_EBPF_ABSOLUTE_IDX);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_ERR_COUNT,
+ "Fails to write or read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21005,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[2],
+ NETDATA_VFS_ERRORS);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_SYSCALL,
+ "Start process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21006,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_PROCESS_START],
+ 2);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_EXIT_SYSCALL,
+ "Exit process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21007,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_EXIT_START],
+ 2);
+
+ ebpf_process_status_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_STATUS_NAME,
+ EBPF_COMMON_DIMENSION_DIFFERENCE,
+ NETDATA_PROCESS_GROUP,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ 21008);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_ERROR_NAME,
+ "Fails to create process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21009,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_PROCESS_START],
+ 2);
+ }
+}
+
+/**
+ * Create process apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ * @param root a pointer for the targets.
+ */
+static void ebpf_process_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
+ "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20061,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
+ "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20062,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED,
+ "Files closed",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20063,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
+ "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20064,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
+ "Files deleted",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20065,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
+ "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20066,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ apps_groups_root_target);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
+ "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20067,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
+ "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20068,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
+ "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20069,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
+ "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_APPS_VFS_GROUP,
+ 20070,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
+ "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_APPS_VFS_GROUP,
+ 20071,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
+ "Process started",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20072,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD,
+ "Threads started",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20073,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_CLOSE,
+ "Tasks closed",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20074,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root);
+}
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ * @param root a pointer for the targets.
+ */
+static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ int newly_added = 0;
+
+ for (w = root; w; w = w->next) {
+ if (w->target)
+ continue;
+
+ if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+ struct pid_on_target *pid_on_target;
+
+ fprintf(
+ stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
+ (w->processes == 1) ? "" : "es");
+
+ for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+ fprintf(stderr, " %d", pid_on_target->pid);
+ }
+
+ fputc('\n', stderr);
+ }
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ if (!newly_added)
+ return;
+
+ if (ebpf_modules[EBPF_MODULE_PROCESS_IDX].apps_charts)
+ ebpf_process_create_apps_charts(em, root);
+
+ if (ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts)
+ ebpf_socket_create_apps_charts(NULL, root);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em the structure with thread information
+ */
+static void process_collector(usec_t step, ebpf_module_t *em)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ int publish_global = em->global_charts;
+ int apps_enabled = em->apps_charts;
+ int pid_fd = map_fd[0];
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_hash_global_tables();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ cleanup_exited_pids();
+ collect_data_for_all_processes(pid_fd);
+
+ ebpf_create_apps_charts(em, apps_groups_root_target);
+
+ pthread_cond_broadcast(&collect_data_cond_var);
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ int publish_apps = 0;
+ if (apps_enabled && all_pids_count > 0) {
+ publish_apps = 1;
+ ebpf_process_update_apps_data();
+ }
+
+ pthread_mutex_lock(&lock);
+ if (publish_global) {
+ ebpf_process_send_data(em);
+ }
+
+ if (publish_apps) {
+ ebpf_process_send_apps_data(em, apps_groups_root_target);
+ }
+ pthread_mutex_unlock(&lock);
+
+ fflush(stdout);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+void clean_global_memory() {
+ int pid_fd = map_fd[0];
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ uint32_t pid = pids->pid;
+ freez(global_process_stats[pid]);
+
+ bpf_map_delete_elem(pid_fd, &pid);
+ freez(current_apps_data[pid]);
+
+ pids = pids->next;
+ }
+}
+
+void clean_pid_on_target(struct pid_on_target *ptr) {
+ while (ptr) {
+ struct pid_on_target *next = ptr->next;
+ freez(ptr);
+
+ ptr = next;
+ }
+}
+
+void clean_apps_structures(struct target *ptr) {
+ struct target *agdt = ptr;
+ while (agdt) {
+ struct target *next = agdt->next;
+ clean_pid_on_target(agdt->root_pid);
+ freez(agdt);
+
+ agdt = next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_process_cleanup(void *ptr)
+{
+ UNUSED(ptr);
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 50*USEC_PER_MS;
+ while (!finalized_threads) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(process_aggregated_data);
+ ebpf_cleanup_publish_syscall(process_publish_aggregated);
+ freez(process_publish_aggregated);
+ freez(process_hash_values);
+
+ clean_global_memory();
+ freez(global_process_stats);
+ freez(current_apps_data);
+
+ clean_apps_structures(apps_groups_root_target);
+ freez(process_data.map_fd);
+
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_process_allocate_global_vectors(size_t length)
+{
+ process_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+ process_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+ process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+
+ global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
+ current_apps_data = callocz((size_t)pid_max, sizeof(ebpf_process_publish_apps_t *));
+}
+
+static void change_syscalls()
+{
+ static char *lfork = { "do_fork" };
+ process_id_names[7] = lfork;
+}
+
+/**
+ * Set local variables
+ *
+ */
+static void set_local_pointers()
+{
+ map_fd = process_data.map_fd;
+
+ if (process_data.isrh >= NETDATA_MINIMUM_RH_VERSION && process_data.isrh < NETDATA_RH_8)
+ change_syscalls();
+}
+
+/*****************************************************************
+ *
+ * EBPF PROCESS THREAD
+ *
+ *****************************************************************/
+
+/**
+ *
+ */
+static void wait_for_all_threads_die()
+{
+ ebpf_modules[EBPF_MODULE_PROCESS_IDX].enabled = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ int max = 10;
+ int i;
+ for (i = 0; i < max; i++) {
+ heartbeat_next(&hb, 200000);
+
+ size_t j, counter = 0, compare = 0;
+ for (j = 0; ebpf_modules[j].thread_name; j++) {
+ if (!ebpf_modules[j].enabled)
+ counter++;
+
+ compare++;
+ }
+
+ if (counter == compare)
+ break;
+ }
+}
+
+/**
+ * Process thread
+ *
+ * Thread used to generate process charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_process_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_process_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ process_enabled = em->enabled;
+ fill_ebpf_data(&process_data);
+
+ pthread_mutex_lock(&lock);
+ ebpf_process_allocate_global_vectors(NETDATA_MAX_MONITOR_VECTOR);
+
+ if (ebpf_update_kernel(&process_data)) {
+ pthread_mutex_unlock(&lock);
+ goto endprocess;
+ }
+
+ set_local_pointers();
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, process_data.map_fd);
+ if (!probe_links) {
+ pthread_mutex_unlock(&lock);
+ goto endprocess;
+ }
+
+ int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
+ NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX, //open, close, unlink
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
+ };
+
+ ebpf_global_labels(
+ process_aggregated_data, process_publish_aggregated, process_dimension_names, process_id_names,
+ algorithms, NETDATA_MAX_MONITOR_VECTOR);
+
+ if (process_enabled) {
+ ebpf_create_global_charts(em);
+ }
+
+ pthread_mutex_unlock(&lock);
+
+ process_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+
+endprocess:
+ wait_for_all_threads_die();
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
new file mode 100644
index 0000000..aa6ed66
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_PROCESS_H
+#define NETDATA_EBPF_PROCESS_H 1
+
+// Groups used on Dashboard
+#define NETDATA_FILE_GROUP "File"
+#define NETDATA_VFS_GROUP "VFS"
+#define NETDATA_PROCESS_GROUP "Process"
+
+// Internal constants
+#define NETDATA_GLOBAL_VECTOR 24
+#define NETDATA_MAX_MONITOR_VECTOR 9
+#define NETDATA_VFS_ERRORS 3
+
+// Map index
+#define NETDATA_DEL_START 2
+#define NETDATA_IN_START_BYTE 3
+#define NETDATA_EXIT_START 5
+#define NETDATA_PROCESS_START 7
+
+// Global chart name
+#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
+#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
+#define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
+#define NETDATA_VFS_FILE_IO_COUNT "io"
+#define NETDATA_VFS_FILE_ERR_COUNT "io_error"
+
+#define NETDATA_EXIT_SYSCALL "exit"
+#define NETDATA_PROCESS_SYSCALL "process_thread"
+#define NETDATA_PROCESS_ERROR_NAME "task_error"
+#define NETDATA_PROCESS_STATUS_NAME "process_status"
+
+#define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
+
+// Charts created on Apps submenu
+#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
+#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
+#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
+#define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create"
+#define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create"
+#define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close"
+
+// Charts created on Apps submenu, if and only if, the return mode is active
+
+#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
+
+// Index from kernel
+typedef enum ebpf_process_index {
+ NETDATA_KEY_CALLS_DO_SYS_OPEN,
+ NETDATA_KEY_ERROR_DO_SYS_OPEN,
+
+ NETDATA_KEY_CALLS_VFS_WRITE,
+ NETDATA_KEY_ERROR_VFS_WRITE,
+ NETDATA_KEY_BYTES_VFS_WRITE,
+
+ NETDATA_KEY_CALLS_VFS_READ,
+ NETDATA_KEY_ERROR_VFS_READ,
+ NETDATA_KEY_BYTES_VFS_READ,
+
+ NETDATA_KEY_CALLS_VFS_UNLINK,
+ NETDATA_KEY_ERROR_VFS_UNLINK,
+
+ NETDATA_KEY_CALLS_DO_EXIT,
+
+ NETDATA_KEY_CALLS_RELEASE_TASK,
+
+ NETDATA_KEY_CALLS_DO_FORK,
+ NETDATA_KEY_ERROR_DO_FORK,
+
+ NETDATA_KEY_CALLS_CLOSE_FD,
+ NETDATA_KEY_ERROR_CLOSE_FD,
+
+ NETDATA_KEY_CALLS_SYS_CLONE,
+ NETDATA_KEY_ERROR_SYS_CLONE,
+
+ NETDATA_KEY_CALLS_VFS_WRITEV,
+ NETDATA_KEY_ERROR_VFS_WRITEV,
+ NETDATA_KEY_BYTES_VFS_WRITEV,
+
+ NETDATA_KEY_CALLS_VFS_READV,
+ NETDATA_KEY_ERROR_VFS_READV,
+ NETDATA_KEY_BYTES_VFS_READV
+
+} ebpf_process_index_t;
+
+// This enum acts as an index for publish vector.
+// Do not change the enum order because we use
+// different algorithms to make charts with incremental
+// values (the three initial positions) and absolute values
+// (the remaining charts).
+typedef enum netdata_publish_process {
+ NETDATA_KEY_PUBLISH_PROCESS_OPEN,
+ NETDATA_KEY_PUBLISH_PROCESS_CLOSE,
+ NETDATA_KEY_PUBLISH_PROCESS_UNLINK,
+ NETDATA_KEY_PUBLISH_PROCESS_READ,
+ NETDATA_KEY_PUBLISH_PROCESS_WRITE,
+ NETDATA_KEY_PUBLISH_PROCESS_EXIT,
+ NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK,
+ NETDATA_KEY_PUBLISH_PROCESS_FORK,
+ NETDATA_KEY_PUBLISH_PROCESS_CLONE,
+
+ NETDATA_KEY_PUBLISH_PROCESS_END
+} netdata_publish_process_t;
+
+typedef struct ebpf_process_publish_apps {
+ // Number of calls during the last read
+ uint64_t call_sys_open;
+ uint64_t call_close_fd;
+ uint64_t call_vfs_unlink;
+ uint64_t call_read;
+ uint64_t call_write;
+ uint64_t call_do_exit;
+ uint64_t call_release_task;
+ uint64_t call_do_fork;
+ uint64_t call_sys_clone;
+
+ // Number of errors during the last read
+ uint64_t ecall_sys_open;
+ uint64_t ecall_close_fd;
+ uint64_t ecall_vfs_unlink;
+ uint64_t ecall_read;
+ uint64_t ecall_write;
+ uint64_t ecall_do_fork;
+ uint64_t ecall_sys_clone;
+
+ // Number of bytes during the last read
+ uint64_t bytes_written;
+ uint64_t bytes_read;
+} ebpf_process_publish_apps_t;
+
+#endif /* NETDATA_EBPF_PROCESS_H */
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
new file mode 100644
index 0000000..7fbc244
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -0,0 +1,1920 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "sent", "received", "close", "sent",
+ "received", "retransmitted" };
+static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_sendmsg", "tcp_cleanup_rbuf", "tcp_close",
+ "udp_sendmsg", "udp_recvmsg", "tcp_retransmit_skb" };
+
+static netdata_idx_t *socket_hash_values = NULL;
+static netdata_syscall_stat_t *socket_aggregated_data = NULL;
+static netdata_publish_syscall_t *socket_publish_aggregated = NULL;
+
+static ebpf_data_t socket_data;
+
+ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
+static ebpf_bandwidth_t *bandwidth_vector = NULL;
+
+static int socket_apps_created = 0;
+pthread_mutex_t nv_mutex;
+int wait_to_plot = 0;
+int read_thread_closed = 1;
+
+netdata_vector_plot_t inbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
+netdata_vector_plot_t outbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
+netdata_socket_t *socket_values;
+
+ebpf_network_viewer_port_list_t *listen_ports = NULL;
+
+static int *map_fd = NULL;
+static struct bpf_object *objects = NULL;
+static struct bpf_link **probe_links = NULL;
+
+/*****************************************************************
+ *
+ * PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish the first output structure with independent dimensions
+ * @param tcp structure to store IO from tcp sockets
+ * @param udp structure to store IO from udp sockets
+ * @param input the structure with the input data.
+ */
+static void ebpf_update_global_publish(
+ netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *tcp, netdata_publish_vfs_common_t *udp,
+ netdata_syscall_stat_t *input)
+{
+ netdata_publish_syscall_t *move = publish;
+ while (move) {
+ if (input->call != move->pcall) {
+ // This condition happens to avoid initial values with dimensions higher than normal values.
+ if (move->pcall) {
+ move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
+ move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
+ move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ move->pcall = input->call;
+ move->pbyte = input->bytes;
+ move->perr = input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ input = input->next;
+ move = move->next;
+ }
+
+ tcp->write = -(long)publish[0].nbyte;
+ tcp->read = (long)publish[1].nbyte;
+
+ udp->write = -(long)publish[3].nbyte;
+ udp->read = (long)publish[4].nbyte;
+}
+
+/**
+ * Update Network Viewer plot data
+ *
+ * @param plot the structure where the data will be stored
+ * @param sock the last update from the socket
+ */
+static inline void update_nv_plot_data(netdata_plot_values_t *plot, netdata_socket_t *sock)
+{
+ if (sock->ct > plot->last_time) {
+ plot->last_time = sock->ct;
+ plot->plot_recv_packets = sock->recv_packets;
+ plot->plot_sent_packets = sock->sent_packets;
+ plot->plot_recv_bytes = sock->recv_bytes;
+ plot->plot_sent_bytes = sock->sent_bytes;
+ plot->plot_retransmit = sock->retransmit;
+ }
+
+ sock->recv_packets = 0;
+ sock->sent_packets = 0;
+ sock->recv_bytes = 0;
+ sock->sent_bytes = 0;
+ sock->retransmit = 0;
+}
+
+/**
+ * Calculate Network Viewer Plot
+ *
+ * Do math with collected values before to plot data.
+ */
+static inline void calculate_nv_plot()
+{
+ uint32_t i;
+ uint32_t end = inbound_vectors.next;
+ for (i = 0; i < end; i++) {
+ update_nv_plot_data(&inbound_vectors.plot[i].plot, &inbound_vectors.plot[i].sock);
+ }
+ inbound_vectors.max_plot = end;
+
+ // The 'Other' dimension is always calculated for the chart to have at least one dimension
+ update_nv_plot_data(&inbound_vectors.plot[inbound_vectors.last].plot,
+ &inbound_vectors.plot[inbound_vectors.last].sock);
+
+ end = outbound_vectors.next;
+ for (i = 0; i < end; i++) {
+ update_nv_plot_data(&outbound_vectors.plot[i].plot, &outbound_vectors.plot[i].sock);
+ }
+ outbound_vectors.max_plot = end;
+
+ // The 'Other' dimension is always calculated for the chart to have at least one dimension
+ update_nv_plot_data(&outbound_vectors.plot[outbound_vectors.last].plot,
+ &outbound_vectors.plot[outbound_vectors.last].sock);
+}
+
+/**
+ * Network viewer send bytes
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_bytes(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = ((collected_number) w[i].plot.plot_sent_bytes);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_bytes;
+ write_chart_dimension(w[i].dimension_recv, value);
+ }
+
+ i = ptr->last;
+ value = ((collected_number) w[i].plot.plot_sent_bytes);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_bytes;
+ write_chart_dimension(w[i].dimension_recv, value);
+ write_end_chart();
+}
+
+/**
+ * Network Viewer Send packets
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_packets(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = ((collected_number)w[i].plot.plot_sent_packets);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_packets;
+ write_chart_dimension(w[i].dimension_recv, value);
+ }
+
+ i = ptr->last;
+ value = ((collected_number)w[i].plot.plot_sent_packets);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number)w[i].plot.plot_recv_packets;
+ write_chart_dimension(w[i].dimension_recv, value);
+ write_end_chart();
+}
+
+/**
+ * Network Viewer Send Retransmit
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_retransmit(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = (collected_number) w[i].plot.plot_retransmit;
+ write_chart_dimension(w[i].dimension_retransmit, value);
+ }
+
+ i = ptr->last;
+ value = (collected_number)w[i].plot.plot_retransmit;
+ write_chart_dimension(w[i].dimension_retransmit, value);
+ write_end_chart();
+}
+
+/**
+ * Send network viewer data
+ *
+ * @param ptr the pointer to plot data
+ */
+static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr)
+{
+ if (!ptr->flags)
+ return;
+
+ if (ptr == (netdata_vector_plot_t *)&outbound_vectors) {
+ ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_OUTBOUND_BYTES);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_packets(ptr, NETDATA_NV_OUTBOUND_PACKETS);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_retransmit(ptr, NETDATA_NV_OUTBOUND_RETRANSMIT);
+ fflush(stdout);
+ } else {
+ ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_INBOUND_BYTES);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_packets(ptr, NETDATA_NV_INBOUND_PACKETS);
+ fflush(stdout);
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_socket_send_data(ebpf_module_t *em)
+{
+ netdata_publish_vfs_common_t common_tcp;
+ netdata_publish_vfs_common_t common_udp;
+ ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
+
+ // We read bytes from function arguments, but bandiwdth is given in bits,
+ // so we need to multiply by 8 to convert for the final value.
+ write_count_chart(
+ NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 3);
+ write_io_chart(
+ NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_FAMILY, socket_id_names[0], common_tcp.write*8/1000,
+ socket_id_names[1], common_tcp.read*8/1000);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 2);
+ }
+ write_count_chart(
+ NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_RETRANSMIT_START], 1);
+
+ write_count_chart(
+ NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START], 2);
+ write_io_chart(
+ NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_FAMILY,
+ socket_id_names[3],(long long)common_udp.write*8/100,
+ socket_id_names[4], (long long)common_udp.read*8/1000);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START], 2);
+ }
+}
+
+/**
+ * Sum values for pid
+ *
+ * @param root the structure with all available PIDs
+ *
+ * @param offset the address that we are reading
+ *
+ * @return it returns the sum of all PIDs
+ */
+long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+{
+ long long ret = 0;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_socket_publish_apps_t *w = socket_bandwidth_curr[pid];
+ if (w) {
+ ret += get_value_from_structure((char *)w, offset);
+ }
+
+ root = root->next;
+ }
+
+ return ret;
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+ */
+void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ UNUSED(em);
+ if (!socket_apps_created)
+ return;
+
+ struct target *w;
+ collected_number value;
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ bytes_sent));
+ // We multiply by 0.008, because we read bytes, but we display bits
+ write_chart_dimension(w->name, ((value)*8)/1000);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ bytes_received));
+ // We multiply by 0.008, because we read bytes, but we display bits
+ write_chart_dimension(w->name, ((value)*8)/1000);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ call_tcp_sent));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ call_tcp_received));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ retransmit));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ call_udp_sent));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ call_udp_received));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_FUNCTION_COUNT,
+ "Calls to internal functions",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21070,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 3);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_TCP_FUNCTION_BITS,
+ "TCP bandwidth", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_SOCKET_GROUP,
+ 21071,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 3);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_FUNCTION_ERROR,
+ "TCP errors",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21072,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 2);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_RETRANSMIT,
+ "Packages retransmitted",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21073,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_RETRANSMIT_START],
+ 1);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_UDP_FUNCTION_COUNT,
+ "UDP calls",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21074,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_UDP_FUNCTION_BITS,
+ "UDP bandwidth", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_SOCKET_GROUP,
+ 21075,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_UDP_FUNCTION_ERROR,
+ "UDP errors",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21076,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+ }
+}
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_socket_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ UNUSED(em);
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_SENT,
+ "Bytes sent", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_APPS_NET_GROUP,
+ 20080,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_RECV,
+ "bytes received", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_APPS_NET_GROUP,
+ 20081,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
+ "Calls for tcp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20082,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
+ "Calls for tcp_cleanup_rbuf",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20083,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
+ "Calls for tcp_retransmit",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20084,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
+ "Calls for udp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20085,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
+ "Calls for udp_recvmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20086,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root);
+
+ socket_apps_created = 1;
+}
+
+/**
+ * Create network viewer chart
+ *
+ * Create common charts.
+ *
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the chart order
+ * @param ptr the plot structure with values.
+ */
+static void ebpf_socket_create_nv_chart(char *id, char *title, char *units,
+ char *family, int order, netdata_vector_plot_t *ptr)
+{
+ ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
+ id,
+ title,
+ units,
+ family,
+ "stacked",
+ order);
+
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ for (i = 0; i < end; i++) {
+ fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[i].dimension_sent);
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_recv);
+ }
+
+ end = ptr->last;
+ fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[end].dimension_sent);
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_recv);
+}
+
+/**
+ * Create network viewer retransmit
+ *
+ * Create a specific chart.
+ *
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the chart order
+ * @param ptr the plot structure with values.
+ */
+static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units,
+ char *family, int order, netdata_vector_plot_t *ptr)
+{
+ ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
+ id,
+ title,
+ units,
+ family,
+ "stacked",
+ order);
+
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ for (i = 0; i < end; i++) {
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_retransmit);
+ }
+
+ end = ptr->last;
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_retransmit);
+}
+
+/**
+ * Create Network Viewer charts
+ *
+ * Recreate the charts when new sockets are created.
+ *
+ * @param ptr a pointer for inbound or outbound vectors.
+ */
+static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr)
+{
+ // We do not have new sockets, so we do not need move forward
+ if (ptr->max_plot == ptr->last_plot)
+ return;
+
+ ptr->last_plot = ptr->max_plot;
+
+ if (ptr == (netdata_vector_plot_t *)&outbound_vectors) {
+ ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_BYTES,
+ "Outbound connections (bytes).", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21080,
+ ptr);
+
+ ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_PACKETS,
+ "Outbound connections (packets)",
+ EBPF_COMMON_DIMENSION_PACKETS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21082,
+ ptr);
+
+ ebpf_socket_create_nv_retransmit(NETDATA_NV_OUTBOUND_RETRANSMIT,
+ "Retransmitted packets",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21083,
+ ptr);
+ } else {
+ ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_BYTES,
+ "Inbound connections (bytes)", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21084,
+ ptr);
+
+ ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_PACKETS,
+ "Inbound connections (packets)",
+ EBPF_COMMON_DIMENSION_PACKETS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21085,
+ ptr);
+ }
+
+ ptr->flags |= NETWORK_VIEWER_CHARTS_CREATED;
+}
+
+/*****************************************************************
+ *
+ * READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Is specific ip inside the range
+ *
+ * Check if the ip is inside a IP range previously defined
+ *
+ * @param cmp the IP to compare
+ * @param family the IP family
+ *
+ * @return It returns 1 if the IP is inside the range and 0 otherwise
+ */
+static int is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
+{
+ if (!network_viewer_opt.excluded_ips && !network_viewer_opt.included_ips)
+ return 1;
+
+ uint32_t ipv4_test = ntohl(cmp->addr32[0]);
+ ebpf_network_viewer_ip_list_t *move = network_viewer_opt.excluded_ips;
+ while (move) {
+ if (family == AF_INET) {
+ if (ntohl(move->first.addr32[0]) <= ipv4_test &&
+ ipv4_test <= ntohl(move->last.addr32[0]) )
+ return 0;
+ } else {
+ if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 0;
+ }
+ }
+ move = move->next;
+ }
+
+ move = network_viewer_opt.included_ips;
+ while (move) {
+ if (family == AF_INET) {
+ if (ntohl(move->first.addr32[0]) <= ipv4_test &&
+ ntohl(move->last.addr32[0]) >= ipv4_test)
+ return 1;
+ } else {
+ if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 1;
+ }
+ }
+ move = move->next;
+ }
+
+ return 0;
+}
+
+/**
+ * Is port inside range
+ *
+ * Verify if the cmp port is inside the range [first, last].
+ * This function expects only the last parameter as big endian.
+ *
+ * @param cmp the value to compare
+ *
+ * @return It returns 1 when cmp is inside and 0 otherwise.
+ */
+static int is_port_inside_range(uint16_t cmp)
+{
+ // We do not have restrictions for ports.
+ if (!network_viewer_opt.excluded_port && !network_viewer_opt.included_port)
+ return 1;
+
+ // Test if port is excluded
+ ebpf_network_viewer_port_list_t *move = network_viewer_opt.excluded_port;
+ cmp = htons(cmp);
+ while (move) {
+ if (move->cmp_first <= cmp && cmp <= move->cmp_last)
+ return 0;
+
+ move = move->next;
+ }
+
+ // Test if the port is inside allowed range
+ move = network_viewer_opt.included_port;
+ while (move) {
+ if (move->cmp_first <= cmp && cmp <= move->cmp_last)
+ return 1;
+
+ move = move->next;
+ }
+
+ return 0;
+}
+
+/**
+ * Hostname matches pattern
+ *
+ * @param cmp the value to compare
+ *
+ * @return It returns 1 when the value matches and zero otherwise.
+ */
+int hostname_matches_pattern(char *cmp)
+{
+ if (!network_viewer_opt.included_hostnames && !network_viewer_opt.excluded_hostnames)
+ return 1;
+
+ ebpf_network_viewer_hostname_list_t *move = network_viewer_opt.excluded_hostnames;
+ while (move) {
+ if (simple_pattern_matches(move->value_pattern, cmp))
+ return 0;
+
+ move = move->next;
+ }
+
+ move = network_viewer_opt.included_hostnames;
+ while (move) {
+ if (simple_pattern_matches(move->value_pattern, cmp))
+ return 1;
+
+ move = move->next;
+ }
+
+
+ return 0;
+}
+
+/**
+ * Is socket allowed?
+ *
+ * Compare destination addresses and destination ports to define next steps
+ *
+ * @param key the socket read from kernel ring
+ * @param family the family used to compare IPs (AF_INET and AF_INET6)
+ *
+ * @return It returns 1 if this socket is inside the ranges and 0 otherwise.
+ */
+int is_socket_allowed(netdata_socket_idx_t *key, int family)
+{
+ if (!is_port_inside_range(key->dport))
+ return 0;
+
+ return is_specific_ip_inside_range(&key->daddr, family);
+}
+
+/**
+ * Compare sockets
+ *
+ * Compare destination address and destination port.
+ * We do not compare source port, because it is random.
+ * We also do not compare source address, because inbound and outbound connections are stored in separated AVL trees.
+ *
+ * @param a pointer to netdata_socket_plot
+ * @param b pointer to netdata_socket_plot
+ *
+ * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
+ */
+static int compare_sockets(void *a, void *b)
+{
+ struct netdata_socket_plot *val1 = a;
+ struct netdata_socket_plot *val2 = b;
+ int cmp;
+
+ // We do not need to compare val2 family, because data inside hash table is always from the same family
+ if (val1->family == AF_INET) { //IPV4
+ if (val1->flags & NETDATA_INBOUND_DIRECTION) {
+ if (val1->index.sport == val2->index.sport)
+ cmp = 0;
+ else {
+ cmp = (val1->index.sport > val2->index.sport)?1:-1;
+ }
+ } else {
+ cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32[0], &val2->index.daddr.addr32[0], sizeof(uint32_t));
+ }
+ }
+ } else {
+ if (val1->flags & NETDATA_INBOUND_DIRECTION) {
+ if (val1->index.sport == val2->index.sport)
+ cmp = 0;
+ else {
+ cmp = (val1->index.sport > val2->index.sport)?1:-1;
+ }
+ } else {
+ cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32, &val2->index.daddr.addr32, 4*sizeof(uint32_t));
+ }
+ }
+ }
+
+ return cmp;
+}
+
+/**
+ * Build dimension name
+ *
+ * Fill dimension name vector with values given
+ *
+ * @param dimname the output vector
+ * @param hostname the hostname for the socket.
+ * @param service_name the service used to connect.
+ * @param proto the protocol used in this connection
+ * @param family is this IPV4(AF_INET) or IPV6(AF_INET6)
+ *
+ * @return it returns the size of the data copied on success and -1 otherwise.
+ */
+static inline int build_outbound_dimension_name(char *dimname, char *hostname, char *service_name,
+ char *proto, int family)
+{
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s:%s_":"%s:%s:[%s]_",
+ service_name, proto,
+ hostname);
+}
+
+/**
+ * Fill inbound dimension name
+ *
+ * Mount the dimension name with the input given
+ *
+ * @param dimname the output vector
+ * @param service_name the service used to connect.
+ * @param proto the protocol used in this connection
+ *
+ * @return it returns the size of the data copied on success and -1 otherwise.
+ */
+static inline int build_inbound_dimension_name(char *dimname, char *service_name, char *proto)
+{
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, "%s:%s_", service_name,
+ proto);
+}
+
+/**
+ * Fill Resolved Name
+ *
+ * Fill the resolved name structure with the value given.
+ * The hostname is the largest value possible, if it is necessary to cut some value, it must be cut.
+ *
+ * @param ptr the output vector
+ * @param hostname the hostname resolved or IP.
+ * @param length the length for the hostname.
+ * @param service_name the service name associated to the connection
+ * @param is_outbound the is this an outbound connection
+ */
+static inline void fill_resolved_name(netdata_socket_plot_t *ptr, char *hostname, size_t length,
+ char *service_name, int is_outbound)
+{
+ if (length < NETDATA_MAX_NETWORK_COMBINED_LENGTH)
+ ptr->resolved_name = strdupz(hostname);
+ else {
+ length = NETDATA_MAX_NETWORK_COMBINED_LENGTH;
+ ptr->resolved_name = mallocz( NETDATA_MAX_NETWORK_COMBINED_LENGTH + 1);
+ memcpy(ptr->resolved_name, hostname, length);
+ ptr->resolved_name[length] = '\0';
+ }
+
+ char dimname[CONFIG_MAX_NAME];
+ int size;
+ char *protocol;
+ if (ptr->sock.protocol == IPPROTO_UDP) {
+ protocol = "UDP";
+ } else if (ptr->sock.protocol == IPPROTO_TCP) {
+ protocol = "TCP";
+ } else {
+ protocol = "ALL";
+ }
+
+ if (is_outbound)
+ size = build_outbound_dimension_name(dimname, hostname, service_name, protocol, ptr->family);
+ else
+ size = build_inbound_dimension_name(dimname,service_name, protocol);
+
+ if (size > 0) {
+ strcpy(&dimname[size], "sent");
+ dimname[size + 4] = '\0';
+ ptr->dimension_sent = strdupz(dimname);
+
+ strcpy(&dimname[size], "recv");
+ ptr->dimension_recv = strdupz(dimname);
+
+ dimname[size - 1] = '\0';
+ ptr->dimension_retransmit = strdupz(dimname);
+ }
+}
+
+/**
+ * Mount dimension names
+ *
+ * Fill the vector names after to resolve the addresses
+ *
+ * @param ptr a pointer to the structure where the values are stored.
+ * @param is_outbound is a outbound ptr value?
+ *
+ * @return It returns 1 if the name is valid and 0 otherwise.
+ */
+int fill_names(netdata_socket_plot_t *ptr, int is_outbound)
+{
+ char hostname[NI_MAXHOST], service_name[NI_MAXSERV];
+ if (ptr->resolved)
+ return 1;
+
+ int ret;
+ static int resolve_name = -1;
+ static int resolve_service = -1;
+ if (resolve_name == -1)
+ resolve_name = network_viewer_opt.hostname_resolution_enabled;
+
+ if (resolve_service == -1)
+ resolve_service = network_viewer_opt.service_resolution_enabled;
+
+ netdata_socket_idx_t *idx = &ptr->index;
+
+ char *errname = { "Not resolved" };
+ // Resolve Name
+ if (ptr->family == AF_INET) { //IPV4
+ struct sockaddr_in myaddr;
+ memset(&myaddr, 0 , sizeof(myaddr));
+
+ myaddr.sin_family = ptr->family;
+ if (is_outbound) {
+ myaddr.sin_port = idx->dport;
+ myaddr.sin_addr.s_addr = idx->daddr.addr32[0];
+ } else {
+ myaddr.sin_port = idx->sport;
+ myaddr.sin_addr.s_addr = idx->saddr.addr32[0];
+ }
+
+ ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr, sizeof(myaddr), hostname,
+ sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD);
+
+ if (!ret && !resolve_service) {
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port));
+ }
+
+ if (ret) {
+ // I cannot resolve the name, I will use the IP
+ if (!inet_ntop(AF_INET, &myaddr.sin_addr.s_addr, hostname, NI_MAXHOST)) {
+ strncpy(hostname, errname, 13);
+ }
+
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port));
+ ret = 1;
+ }
+ } else { // IPV6
+ struct sockaddr_in6 myaddr6;
+ memset(&myaddr6, 0 , sizeof(myaddr6));
+
+ myaddr6.sin6_family = AF_INET6;
+ if (is_outbound) {
+ myaddr6.sin6_port = idx->dport;
+ memcpy(myaddr6.sin6_addr.s6_addr, idx->daddr.addr8, sizeof(union netdata_ip_t));
+ } else {
+ myaddr6.sin6_port = idx->sport;
+ memcpy(myaddr6.sin6_addr.s6_addr, idx->saddr.addr8, sizeof(union netdata_ip_t));
+ }
+
+ ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr6, sizeof(myaddr6), hostname,
+ sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD);
+
+ if (!ret && !resolve_service) {
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port));
+ }
+
+ if (ret) {
+ // I cannot resolve the name, I will use the IP
+ if (!inet_ntop(AF_INET6, myaddr6.sin6_addr.s6_addr, hostname, NI_MAXHOST)) {
+ strncpy(hostname, errname, 13);
+ }
+
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port));
+
+ ret = 1;
+ }
+ }
+
+ fill_resolved_name(ptr, hostname,
+ strlen(hostname) + strlen(service_name)+ NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH,
+ service_name, is_outbound);
+
+ if (resolve_name && !ret)
+ ret = hostname_matches_pattern(hostname);
+
+ ptr->resolved++;
+
+ return ret;
+}
+
+/**
+ * Fill last Network Viewer Dimension
+ *
+ * Fill the unique dimension that is always plotted.
+ *
+ * @param ptr the pointer for the last dimension
+ * @param is_outbound is this an inbound structure?
+ */
+static void fill_last_nv_dimension(netdata_socket_plot_t *ptr, int is_outbound)
+{
+ char hostname[NI_MAXHOST], service_name[NI_MAXSERV];
+ char *other = { "other" };
+ // We are also copying the NULL bytes to avoid warnings in new compilers
+ strncpy(hostname, other, 6);
+ strncpy(service_name, other, 6);
+
+ ptr->family = AF_INET;
+ ptr->sock.protocol = 255;
+ ptr->flags = (!is_outbound)?NETDATA_INBOUND_DIRECTION:NETDATA_OUTBOUND_DIRECTION;
+
+ fill_resolved_name(ptr, hostname, 10 + NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH, service_name, is_outbound);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Last %s dimension added: ID = %u, IP = OTHER, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s",
+ (is_outbound)?"outbound":"inbound", network_viewer_opt.max_dim - 1, ptr->resolved_name,
+ ptr->dimension_recv, ptr->dimension_sent, ptr->dimension_retransmit);
+#endif
+}
+
+/**
+ * Update Socket Data
+ *
+ * Update the socket information with last collected data
+ *
+ * @param sock
+ * @param lvalues
+ */
+static inline void update_socket_data(netdata_socket_t *sock, netdata_socket_t *lvalues)
+{
+ sock->recv_packets += lvalues->recv_packets;
+ sock->sent_packets += lvalues->sent_packets;
+ sock->recv_bytes += lvalues->recv_bytes;
+ sock->sent_bytes += lvalues->sent_bytes;
+ sock->retransmit += lvalues->retransmit;
+
+ if (lvalues->ct > sock->ct)
+ sock->ct = lvalues->ct;
+}
+
+/**
+ * Store socket inside avl
+ *
+ * Store the socket values inside the avl tree.
+ *
+ * @param out the structure with information used to plot charts.
+ * @param lvalues Values read from socket ring.
+ * @param lindex the index information, the real socket.
+ * @param family the family associated to the socket
+ * @param flags the connection flags
+ */
+static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t *lvalues,
+ netdata_socket_idx_t *lindex, int family, uint32_t flags)
+{
+ netdata_socket_plot_t test, *ret ;
+
+ memcpy(&test.index, lindex, sizeof(netdata_socket_idx_t));
+ test.flags = flags;
+
+ ret = (netdata_socket_plot_t *) avl_search_lock(&out->tree, (avl *)&test);
+ if (ret) {
+ if (lvalues->ct > ret->plot.last_time) {
+ update_socket_data(&ret->sock, lvalues);
+ }
+ } else {
+ uint32_t curr = out->next;
+ uint32_t last = out->last;
+
+ netdata_socket_plot_t *w = &out->plot[curr];
+
+ int resolved;
+ if (curr == last) {
+ if (lvalues->ct > w->plot.last_time) {
+ update_socket_data(&w->sock, lvalues);
+ }
+ return;
+ } else {
+ memcpy(&w->sock, lvalues, sizeof(netdata_socket_t));
+ memcpy(&w->index, lindex, sizeof(netdata_socket_idx_t));
+ w->family = family;
+
+ resolved = fill_names(w, out != (netdata_vector_plot_t *)&inbound_vectors);
+ }
+
+ if (!resolved) {
+ freez(w->resolved_name);
+ freez(w->dimension_sent);
+ freez(w->dimension_recv);
+ freez(w->dimension_retransmit);
+
+ memset(w, 0, sizeof(netdata_socket_plot_t));
+
+ return;
+ }
+
+ w->flags = flags;
+ netdata_socket_plot_t *check ;
+ check = (netdata_socket_plot_t *) avl_insert_lock(&out->tree, (avl *)w);
+ if (check != w)
+ error("Internal error, cannot insert the AVL tree.");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char iptext[INET6_ADDRSTRLEN];
+ if (inet_ntop(family, &w->index.daddr.addr8, iptext, sizeof(iptext)))
+ info("New %s dimension added: ID = %u, IP = %s, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s",
+ (out == &inbound_vectors)?"inbound":"outbound", curr, iptext, w->resolved_name,
+ w->dimension_recv, w->dimension_sent, w->dimension_retransmit);
+#endif
+ curr++;
+ if (curr > last)
+ curr = last;
+ out->next = curr;
+ }
+}
+
+/**
+ * Compare Vector to store
+ *
+ * Compare input values with local address to select table to store.
+ *
+ * @param direction store inbound and outbound direction.
+ * @param cmp index read from hash table.
+ * @param proto the protocol read.
+ *
+ * @return It returns the structure with address to compare.
+ */
+netdata_vector_plot_t * select_vector_to_store(uint32_t *direction, netdata_socket_idx_t *cmp, uint8_t proto)
+{
+ if (!listen_ports) {
+ *direction = NETDATA_OUTBOUND_DIRECTION;
+ return &outbound_vectors;
+ }
+
+ ebpf_network_viewer_port_list_t *move_ports = listen_ports;
+ while (move_ports) {
+ if (move_ports->protocol == proto && move_ports->first == cmp->sport) {
+ *direction = NETDATA_INBOUND_DIRECTION;
+ return &inbound_vectors;
+ }
+
+ move_ports = move_ports->next;
+ }
+
+ *direction = NETDATA_OUTBOUND_DIRECTION;
+ return &outbound_vectors;
+}
+
+/**
+ * Hash accumulator
+ *
+ * @param values the values used to calculate the data.
+ * @param key the key to store data.
+ * @param removesock check if this socket must be removed .
+ * @param family the connection family
+ * @param end the values size.
+ */
+static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key, int *removesock, int family, int end)
+{
+ uint64_t bsent = 0, brecv = 0, psent = 0, precv = 0;
+ uint16_t retransmit = 0;
+ int i;
+ uint8_t protocol = values[0].protocol;
+ uint64_t ct = values[0].ct;
+ for (i = 1; i < end; i++) {
+ netdata_socket_t *w = &values[i];
+
+ precv += w->recv_packets;
+ psent += w->sent_packets;
+ brecv += w->recv_bytes;
+ bsent += w->sent_bytes;
+ retransmit += w->retransmit;
+
+ if (!protocol)
+ protocol = w->protocol;
+
+ if (w->ct > ct)
+ ct = w->ct;
+
+ *removesock += (int)w->removeme;
+ }
+
+ values[0].recv_packets += precv;
+ values[0].sent_packets += psent;
+ values[0].recv_bytes += brecv;
+ values[0].sent_bytes += bsent;
+ values[0].retransmit += retransmit;
+ values[0].removeme += (uint8_t)*removesock;
+ values[0].protocol = (!protocol)?IPPROTO_TCP:protocol;
+ values[0].ct = ct;
+
+ if (is_socket_allowed(key, family)) {
+ uint32_t dir;
+ netdata_vector_plot_t *table = select_vector_to_store(&dir, key, protocol);
+ store_socket_inside_avl(table, &values[0], key, family, dir);
+ }
+}
+
+/**
+ * Read socket hash table
+ *
+ * Read data from hash tables created on kernel ring.
+ *
+ * @param fd the hash table with data.
+ * @param family the family associated to the hash table
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+static void read_socket_hash_table(int fd, int family, int network_connection)
+{
+ if (wait_to_plot)
+ return;
+
+ netdata_socket_idx_t key = {};
+ netdata_socket_idx_t next_key;
+ netdata_socket_idx_t removeme;
+ int removesock = 0;
+
+ netdata_socket_t *values = socket_values;
+ size_t length = ebpf_nprocs*sizeof(netdata_socket_t);
+ int test, end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ // We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create
+ // values for specific processor unless it is used to store data. As result of this behavior one the next socket
+ // can have values from the previous one.
+ memset(values, 0, length);
+ test = bpf_map_lookup_elem(fd, &key, values);
+ if (test < 0) {
+ key = next_key;
+ continue;
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &removeme);
+
+ if (network_connection) {
+ removesock = 0;
+ hash_accumulator(values, &key, &removesock, family, end);
+ }
+
+ if (removesock)
+ removeme = key;
+
+ key = next_key;
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &removeme);
+
+ test = bpf_map_lookup_elem(fd, &next_key, values);
+ if (test < 0) {
+ return;
+ }
+
+ if (network_connection) {
+ removesock = 0;
+ hash_accumulator(values, &next_key, &removesock, family, end);
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &next_key);
+}
+
+/**
+ * Update listen table
+ *
+ * Update link list when it is necessary.
+ *
+ * @param value the ports we are listen to.
+ * @param proto the protocol used with port connection.
+ */
+void update_listen_table(uint16_t value, uint8_t proto)
+{
+ ebpf_network_viewer_port_list_t *w;
+ if (likely(listen_ports)) {
+ ebpf_network_viewer_port_list_t *move = listen_ports, *store = listen_ports;
+ while (move) {
+ if (move->protocol == proto && move->first == value)
+ return;
+
+ store = move;
+ move = move->next;
+ }
+
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->first = value;
+ w->protocol = proto;
+ store->next = w;
+ } else {
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->first = value;
+ w->protocol = proto;
+
+ listen_ports = w;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("The network viewer is monitoring inbound connections for port %u", ntohs(value));
+#endif
+}
+
+/**
+ * Read listen table
+ *
+ * Read the table with all ports that we are listen on host.
+ */
+static void read_listen_table()
+{
+ uint16_t key = 0;
+ uint16_t next_key;
+
+ int fd = map_fd[NETDATA_SOCKET_LISTEN_TABLE];
+ uint8_t value;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ int test = bpf_map_lookup_elem(fd, &key, &value);
+ if (test < 0) {
+ key = next_key;
+ continue;
+ }
+
+ // The correct protocol must come from kernel
+ update_listen_table(htons(key), (key == 53)?IPPROTO_UDP:IPPROTO_TCP);
+
+ key = next_key;
+ }
+
+ if (next_key) {
+ // The correct protocol must come from kernel
+ update_listen_table(htons(next_key), (key == 53)?IPPROTO_UDP:IPPROTO_TCP);
+ }
+}
+
+/**
+ * Socket read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_socket_read_hash(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ read_thread_closed = 0;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = NETDATA_SOCKET_READ_SLEEP_MS;
+ int fd_ipv4 = map_fd[NETDATA_SOCKET_IPV4_HASH_TABLE];
+ int fd_ipv6 = map_fd[NETDATA_SOCKET_IPV6_HASH_TABLE];
+ int network_connection = em->optional;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ pthread_mutex_lock(&nv_mutex);
+ read_listen_table();
+ read_socket_hash_table(fd_ipv4, AF_INET, network_connection);
+ read_socket_hash_table(fd_ipv6, AF_INET6, network_connection);
+ wait_to_plot = 1;
+ pthread_mutex_unlock(&nv_mutex);
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+ uint64_t idx;
+ netdata_idx_t res[NETDATA_SOCKET_COUNTER];
+
+ netdata_idx_t *val = socket_hash_values;
+ int fd = map_fd[NETDATA_SOCKET_GLOBAL_HASH_TABLE];
+ for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, val)) {
+ uint64_t total = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++)
+ total += val[i];
+
+ res[idx] = total;
+ } else {
+ res[idx] = 0;
+ }
+ }
+
+ socket_aggregated_data[0].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
+ socket_aggregated_data[1].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[2].call = res[NETDATA_KEY_CALLS_TCP_CLOSE];
+ socket_aggregated_data[3].call = res[NETDATA_KEY_CALLS_UDP_RECVMSG];
+ socket_aggregated_data[4].call = res[NETDATA_KEY_CALLS_UDP_SENDMSG];
+ socket_aggregated_data[5].call = res[NETDATA_KEY_TCP_RETRANSMIT];
+
+ socket_aggregated_data[0].ecall = res[NETDATA_KEY_ERROR_TCP_SENDMSG];
+ socket_aggregated_data[1].ecall = res[NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[3].ecall = res[NETDATA_KEY_ERROR_UDP_RECVMSG];
+ socket_aggregated_data[4].ecall = res[NETDATA_KEY_ERROR_UDP_SENDMSG];
+
+ socket_aggregated_data[0].bytes = res[NETDATA_KEY_BYTES_TCP_SENDMSG];
+ socket_aggregated_data[1].bytes = res[NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[3].bytes = res[NETDATA_KEY_BYTES_UDP_RECVMSG];
+ socket_aggregated_data[4].bytes = res[NETDATA_KEY_BYTES_UDP_SENDMSG];
+}
+
+/**
+ * Fill publish apps when necessary.
+ *
+ * @param current_pid the PID that I am updating
+ * @param eb the structure with data read from memory.
+ */
+void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb)
+{
+ ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(ebpf_socket_publish_apps_t));
+ socket_bandwidth_curr[current_pid] = curr;
+ }
+
+ curr->bytes_sent = eb->bytes_sent;
+ curr->bytes_received = eb->bytes_received;
+ curr->call_tcp_sent = eb->call_tcp_sent;
+ curr->call_tcp_received = eb->call_tcp_received;
+ curr->retransmit = eb->retransmit;
+ curr->call_udp_sent = eb->call_udp_sent;
+ curr->call_udp_received = eb->call_udp_received;
+}
+
+/**
+ * Bandwidth accumulator.
+ *
+ * @param out the vector with the values to sum
+ */
+void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ ebpf_bandwidth_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ ebpf_bandwidth_t *move = &out[i];
+ total->bytes_sent += move->bytes_sent;
+ total->bytes_received += move->bytes_received;
+ total->call_tcp_sent += move->call_tcp_sent;
+ total->call_tcp_received += move->call_tcp_received;
+ total->retransmit += move->retransmit;
+ total->call_udp_sent += move->call_udp_sent;
+ total->call_udp_received += move->call_udp_received;
+ }
+}
+
+/**
+ * Update the apps data reading information from the hash table
+ */
+static void ebpf_socket_update_apps_data()
+{
+ int fd = map_fd[NETDATA_SOCKET_APPS_HASH_TABLE];
+ ebpf_bandwidth_t *eb = bandwidth_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, eb)) {
+ pids = pids->next;
+ continue;
+ }
+
+ ebpf_socket_bandwidth_accumulator(eb);
+
+ ebpf_socket_fill_publish_apps(key, eb);
+
+ pids = pids->next;
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+struct netdata_static_thread socket_threads = {"EBPF SOCKET READ",
+ NULL, NULL, 1, NULL,
+ NULL, ebpf_socket_read_hash };
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em the structure with thread information
+ */
+static void socket_collector(usec_t step, ebpf_module_t *em)
+{
+ UNUSED(em);
+ UNUSED(step);
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ socket_threads.thread = mallocz(sizeof(netdata_thread_t));
+
+ netdata_thread_create(socket_threads.thread, socket_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE, ebpf_socket_read_hash, em);
+
+ int socket_apps_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts;
+ int socket_global_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].global_charts;
+ int network_connection = em->optional;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (socket_global_enabled)
+ read_hash_global_tables();
+
+ if (socket_apps_enabled)
+ ebpf_socket_update_apps_data();
+
+ calculate_nv_plot();
+
+ pthread_mutex_lock(&lock);
+ if (socket_global_enabled)
+ ebpf_socket_send_data(em);
+
+ if (socket_apps_enabled)
+ ebpf_socket_send_apps_data(em, apps_groups_root_target);
+
+ fflush(stdout);
+
+ if (network_connection) {
+ // We are calling fflush many times, because when we have a lot of dimensions
+ // we began to have not expected outputs and Netdata closed the plugin.
+ pthread_mutex_lock(&nv_mutex);
+ ebpf_socket_create_nv_charts(&inbound_vectors);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&inbound_vectors);
+
+ ebpf_socket_create_nv_charts(&outbound_vectors);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&outbound_vectors);
+ wait_to_plot = 0;
+ pthread_mutex_unlock(&nv_mutex);
+
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ pthread_mutex_unlock(&lock);
+
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+
+/**
+ * Clean internal socket plot
+ *
+ * Clean all structures allocated with strdupz.
+ *
+ * @param ptr the pointer with addresses to clean.
+ */
+static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr)
+{
+ freez(ptr->dimension_recv);
+ freez(ptr->dimension_sent);
+ freez(ptr->resolved_name);
+ freez(ptr->dimension_retransmit);
+}
+
+/**
+ * Clean socket plot
+ *
+ * Clean the allocated data for inbound and outbound vectors.
+ */
+static void clean_allocated_socket_plot()
+{
+ uint32_t i;
+ uint32_t end = inbound_vectors.last;
+ netdata_socket_plot_t *plot = inbound_vectors.plot;
+ for (i = 0; i < end; i++) {
+ clean_internal_socket_plot(&plot[i]);
+ }
+
+ clean_internal_socket_plot(&plot[inbound_vectors.last]);
+
+ end = outbound_vectors.last;
+ plot = outbound_vectors.plot;
+ for (i = 0; i < end; i++) {
+ clean_internal_socket_plot(&plot[i]);
+ }
+ clean_internal_socket_plot(&plot[outbound_vectors.last]);
+}
+
+/**
+ * Clean netowrk ports allocated during initializaion.
+ *
+ * @param ptr a pointer to the link list.
+ */
+static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
+{
+ if (unlikely(!ptr))
+ return;
+
+ while (ptr) {
+ ebpf_network_viewer_port_list_t *next = ptr->next;
+ freez(ptr->value);
+ freez(ptr);
+ ptr = next;
+ }
+}
+
+/**
+ * Clean service names
+ *
+ * Clean the allocated link list that stores names.
+ *
+ * @param names the link list.
+ */
+static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
+{
+ if (unlikely(!names))
+ return;
+
+ while (names) {
+ ebpf_network_viewer_dim_name_t *next = names->next;
+ freez(names->name);
+ freez(names);
+ names = next;
+ }
+}
+
+/**
+ * Clean hostnames
+ *
+ * @param hostnames the hostnames to clean
+ */
+static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
+{
+ if (unlikely(!hostnames))
+ return;
+
+ while (hostnames) {
+ ebpf_network_viewer_hostname_list_t *next = hostnames->next;
+ freez(hostnames->value);
+ simple_pattern_free(hostnames->value_pattern);
+ freez(hostnames);
+ hostnames = next;
+ }
+}
+
+void clean_thread_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(socket_bandwidth_curr[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_socket_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2*USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(socket_aggregated_data);
+ ebpf_cleanup_publish_syscall(socket_publish_aggregated);
+ freez(socket_publish_aggregated);
+ freez(socket_hash_values);
+
+ clean_thread_structures();
+ freez(socket_bandwidth_curr);
+ freez(bandwidth_vector);
+
+ freez(socket_values);
+ clean_allocated_socket_plot();
+ freez(inbound_vectors.plot);
+ freez(outbound_vectors.plot);
+
+ clean_port_structure(&listen_ports);
+
+ ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0;
+
+ clean_network_ports(network_viewer_opt.included_port);
+ clean_network_ports(network_viewer_opt.excluded_port);
+ clean_service_names(network_viewer_opt.names);
+ clean_hostnames(network_viewer_opt.included_hostnames);
+ clean_hostnames(network_viewer_opt.excluded_hostnames);
+
+ pthread_mutex_destroy(&nv_mutex);
+ freez(socket_data.map_fd);
+
+ freez(socket_threads.thread);
+
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ finalized_threads = 1;
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_socket_allocate_global_vectors(size_t length)
+{
+ socket_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+ socket_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+ socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+
+ socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
+ bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
+
+ socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
+ inbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+ outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+}
+
+/**
+ * Set local function pointers, this function will never be compiled with static libraries
+ */
+static void set_local_pointers()
+{
+ map_fd = socket_data.map_fd;
+}
+
+/**
+ * Initialize Inbound and Outbound
+ *
+ * Initialize the common outbound and inbound sockets.
+ */
+static void initialize_inbound_outbound()
+{
+ inbound_vectors.last = network_viewer_opt.max_dim - 1;
+ outbound_vectors.last = inbound_vectors.last;
+ fill_last_nv_dimension(&inbound_vectors.plot[inbound_vectors.last], 0);
+ fill_last_nv_dimension(&outbound_vectors.plot[outbound_vectors.last], 1);
+}
+
+/*****************************************************************
+ *
+ * EBPF SOCKET THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Socket thread
+ *
+ * Thread used to generate socket charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_socket_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr);
+
+ avl_init_lock(&inbound_vectors.tree, compare_sockets);
+ avl_init_lock(&outbound_vectors.tree, compare_sockets);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ fill_ebpf_data(&socket_data);
+
+ if (!em->enabled)
+ goto endsocket;
+
+ if (pthread_mutex_init(&nv_mutex, NULL)) {
+ error("Cannot initialize local mutex");
+ goto endsocket;
+ }
+ pthread_mutex_lock(&lock);
+
+ ebpf_socket_allocate_global_vectors(NETDATA_MAX_SOCKET_VECTOR);
+ initialize_inbound_outbound();
+
+ if (ebpf_update_kernel(&socket_data)) {
+ pthread_mutex_unlock(&lock);
+ goto endsocket;
+ }
+
+ set_local_pointers();
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, socket_data.map_fd);
+ if (!probe_links) {
+ pthread_mutex_unlock(&lock);
+ goto endsocket;
+ }
+
+ int algorithms[NETDATA_MAX_SOCKET_VECTOR] = {
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
+ };
+ ebpf_global_labels(
+ socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names,
+ algorithms, NETDATA_MAX_SOCKET_VECTOR);
+
+ ebpf_create_global_charts(em);
+
+ finalized_threads = 0;
+ pthread_mutex_unlock(&lock);
+
+ socket_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+
+endsocket:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
new file mode 100644
index 0000000..1316c00
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_socket.h
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#ifndef NETDATA_EBPF_SOCKET_H
+#define NETDATA_EBPF_SOCKET_H 1
+#include <stdint.h>
+#include "libnetdata/avl/avl.h"
+
+// Vector indexes
+#define NETDATA_MAX_SOCKET_VECTOR 6
+#define NETDATA_UDP_START 3
+#define NETDATA_RETRANSMIT_START 5
+
+#define NETDATA_SOCKET_APPS_HASH_TABLE 0
+#define NETDATA_SOCKET_IPV4_HASH_TABLE 1
+#define NETDATA_SOCKET_IPV6_HASH_TABLE 2
+#define NETDATA_SOCKET_GLOBAL_HASH_TABLE 4
+#define NETDATA_SOCKET_LISTEN_TABLE 5
+
+#define NETDATA_SOCKET_READ_SLEEP_MS 800000ULL
+
+typedef enum ebpf_socket_idx {
+ NETDATA_KEY_CALLS_TCP_SENDMSG,
+ NETDATA_KEY_ERROR_TCP_SENDMSG,
+ NETDATA_KEY_BYTES_TCP_SENDMSG,
+
+ NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF,
+ NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF,
+ NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF,
+
+ NETDATA_KEY_CALLS_TCP_CLOSE,
+
+ NETDATA_KEY_CALLS_UDP_RECVMSG,
+ NETDATA_KEY_ERROR_UDP_RECVMSG,
+ NETDATA_KEY_BYTES_UDP_RECVMSG,
+
+ NETDATA_KEY_CALLS_UDP_SENDMSG,
+ NETDATA_KEY_ERROR_UDP_SENDMSG,
+ NETDATA_KEY_BYTES_UDP_SENDMSG,
+
+ NETDATA_KEY_TCP_RETRANSMIT,
+
+ NETDATA_SOCKET_COUNTER
+} ebpf_socket_index_t;
+
+#define NETDATA_SOCKET_GROUP "Socket"
+#define NETDATA_NETWORK_CONNECTIONS_GROUP "Network connections"
+
+// Global chart name
+#define NETDATA_TCP_FUNCTION_COUNT "tcp_functions"
+#define NETDATA_TCP_FUNCTION_BITS "total_tcp_bandwidth"
+#define NETDATA_TCP_FUNCTION_ERROR "tcp_error"
+#define NETDATA_TCP_RETRANSMIT "tcp_retransmit"
+#define NETDATA_UDP_FUNCTION_COUNT "udp_functions"
+#define NETDATA_UDP_FUNCTION_BITS "total_udp_bandwidth"
+#define NETDATA_UDP_FUNCTION_ERROR "udp_error"
+
+// Charts created on Apps submenu
+#define NETDATA_NET_APPS_BANDWIDTH_SENT "total_bandwidth_sent"
+#define NETDATA_NET_APPS_BANDWIDTH_RECV "total_bandwidth_recv"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS "bandwidth_tcp_send"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS "bandwidth_tcp_recv"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT "bandwidth_tcp_retransmit"
+#define NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS "bandwidth_udp_send"
+#define NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS "bandwidth_udp_recv"
+
+// Network viewer charts
+#define NETDATA_NV_OUTBOUND_BYTES "outbound_bytes"
+#define NETDATA_NV_OUTBOUND_PACKETS "outbound_packets"
+#define NETDATA_NV_OUTBOUND_RETRANSMIT "outbound_retransmit"
+#define NETDATA_NV_INBOUND_BYTES "inbound_bytes"
+#define NETDATA_NV_INBOUND_PACKETS "inbound_packets"
+
+// Port range
+#define NETDATA_MINIMUM_PORT_VALUE 1
+#define NETDATA_MAXIMUM_PORT_VALUE 65535
+
+#define NETDATA_MINIMUM_IPV4_CIDR 0
+#define NETDATA_MAXIMUM_IPV4_CIDR 32
+
+typedef struct ebpf_socket_publish_apps {
+ // Data read
+ uint64_t bytes_sent; // Bytes sent
+ uint64_t bytes_received; // Bytes received
+ uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called
+ uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called
+ uint64_t retransmit; // Number of times tcp_retransmit was called
+ uint64_t call_udp_sent; // Number of times udp_sendmsg was called
+ uint64_t call_udp_received; // Number of times udp_recvmsg was called
+
+ // Publish information.
+ uint64_t publish_sent_bytes;
+ uint64_t publish_received_bytes;
+ uint64_t publish_tcp_sent;
+ uint64_t publish_tcp_received;
+ uint64_t publish_retransmit;
+ uint64_t publish_udp_sent;
+ uint64_t publish_udp_received;
+} ebpf_socket_publish_apps_t;
+
+typedef struct ebpf_network_viewer_dimension_names {
+ char *name;
+ uint32_t hash;
+
+ uint16_t port;
+
+ struct ebpf_network_viewer_dimension_names *next;
+} ebpf_network_viewer_dim_name_t ;
+
+typedef struct ebpf_network_viewer_port_list {
+ char *value;
+ uint32_t hash;
+
+ uint16_t first;
+ uint16_t last;
+
+ uint16_t cmp_first;
+ uint16_t cmp_last;
+
+ uint8_t protocol;
+ struct ebpf_network_viewer_port_list *next;
+} ebpf_network_viewer_port_list_t;
+
+/**
+ * Union used to store ip addresses
+ */
+union netdata_ip_t {
+ uint8_t addr8[16];
+ uint16_t addr16[8];
+ uint32_t addr32[4];
+ uint64_t addr64[2];
+};
+
+typedef struct ebpf_network_viewer_ip_list {
+ char *value; // IP value
+ uint32_t hash; // IP hash
+
+ uint8_t ver; // IP version
+
+ union netdata_ip_t first; // The IP address informed
+ union netdata_ip_t last; // The IP address informed
+
+ struct ebpf_network_viewer_ip_list *next;
+} ebpf_network_viewer_ip_list_t;
+
+typedef struct ebpf_network_viewer_hostname_list {
+ char *value; // IP value
+ uint32_t hash; // IP hash
+
+ SIMPLE_PATTERN *value_pattern;
+
+ struct ebpf_network_viewer_hostname_list *next;
+} ebpf_network_viewer_hostname_list_t;
+
+#define NETDATA_NV_CAP_VALUE 50L
+typedef struct ebpf_network_viewer_options {
+ uint32_t max_dim; // Store value read from 'maximum dimensions'
+
+ uint32_t hostname_resolution_enabled;
+ uint32_t service_resolution_enabled;
+
+ ebpf_network_viewer_port_list_t *excluded_port;
+ ebpf_network_viewer_port_list_t *included_port;
+
+ ebpf_network_viewer_dim_name_t *names;
+
+ ebpf_network_viewer_ip_list_t *excluded_ips;
+ ebpf_network_viewer_ip_list_t *included_ips;
+
+ ebpf_network_viewer_hostname_list_t *excluded_hostnames;
+ ebpf_network_viewer_hostname_list_t *included_hostnames;
+
+ ebpf_network_viewer_ip_list_t *ipv4_local_ip;
+ ebpf_network_viewer_ip_list_t *ipv6_local_ip;
+} ebpf_network_viewer_options_t;
+
+extern ebpf_network_viewer_options_t network_viewer_opt;
+
+/**
+ * Structure to store socket information
+ */
+typedef struct netdata_socket {
+ uint64_t recv_packets;
+ uint64_t sent_packets;
+ uint64_t recv_bytes;
+ uint64_t sent_bytes;
+ uint64_t first; // First timestamp
+ uint64_t ct; // Current timestamp
+ uint16_t retransmit; // It is never used with UDP
+ uint8_t protocol;
+ uint8_t removeme;
+ uint32_t reserved;
+} netdata_socket_t __attribute__((__aligned__(8)));
+
+
+typedef struct netdata_plot_values {
+ // Values used in the previous iteration
+ uint64_t recv_packets;
+ uint64_t sent_packets;
+ uint64_t recv_bytes;
+ uint64_t sent_bytes;
+ uint16_t retransmit;
+
+ uint64_t last_time;
+
+ // Values used to plot
+ uint64_t plot_recv_packets;
+ uint64_t plot_sent_packets;
+ uint64_t plot_recv_bytes;
+ uint64_t plot_sent_bytes;
+ uint16_t plot_retransmit;
+} netdata_plot_values_t;
+
+/**
+ * Index used together previous structure
+ */
+typedef struct netdata_socket_idx {
+ union netdata_ip_t saddr;
+ uint16_t sport;
+ union netdata_ip_t daddr;
+ uint16_t dport;
+} netdata_socket_idx_t __attribute__((__aligned__(8)));
+
+// Next values were defined according getnameinfo(3)
+#define NETDATA_MAX_NETWORK_COMBINED_LENGTH 1018
+#define NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH 5 // :TCP:
+#define NETDATA_DIM_LENGTH_WITHOUT_SERVICE_PROTOCOL 979
+
+#define NETDATA_INBOUND_DIRECTION (uint32_t)1
+#define NETDATA_OUTBOUND_DIRECTION (uint32_t)2
+/**
+ * Allocate the maximum number of structures in the beginning, this can force the collector to use more memory
+ * in the long term, on the other had it is faster.
+ */
+typedef struct netdata_socket_plot {
+ // Search
+ avl avl;
+ netdata_socket_idx_t index;
+
+ // Current data
+ netdata_socket_t sock;
+
+ // Previous values and values used to write on chart.
+ netdata_plot_values_t plot;
+
+ int family; // AF_INET or AF_INET6
+ char *resolved_name; // Resolve only in the first call
+ unsigned char resolved;
+
+ char *dimension_sent;
+ char *dimension_recv;
+ char *dimension_retransmit;
+
+ uint32_t flags;
+} netdata_socket_plot_t;
+
+#define NETWORK_VIEWER_CHARTS_CREATED (uint32_t)1
+typedef struct netdata_vector_plot {
+ netdata_socket_plot_t *plot; // Vector used to plot charts
+
+ avl_tree_lock tree; // AVL tree to speed up search
+ uint32_t last; // The 'other' dimension, the last chart accepted.
+ uint32_t next; // The next position to store in the vector.
+ uint32_t max_plot; // Max number of elements to plot.
+ uint32_t last_plot; // Last element plot
+
+ uint32_t flags; // Flags
+
+} netdata_vector_plot_t;
+
+extern void clean_port_structure(ebpf_network_viewer_port_list_t **clean);
+extern ebpf_network_viewer_port_list_t *listen_ports;
+extern void update_listen_table(uint16_t value, uint8_t proto);
+
+extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
+
+#endif
diff --git a/collectors/ebpf.plugin/reset_netdata_trace.sh.in b/collectors/ebpf.plugin/reset_netdata_trace.sh.in
new file mode 100644
index 0000000..51d981e
--- /dev/null
+++ b/collectors/ebpf.plugin/reset_netdata_trace.sh.in
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+KPROBE_FILE="/sys/kernel/debug/tracing/kprobe_events"
+
+DATA="$(grep _netdata_ $KPROBE_FILE| cut -d' ' -f1 | cut -d: -f2)"
+
+for I in $DATA; do
+ echo "-:$I" > $KPROBE_FILE 2>/dev/null;
+done
diff --git a/collectors/fping.plugin/Makefile.am b/collectors/fping.plugin/Makefile.am
new file mode 100644
index 0000000..9065483
--- /dev/null
+++ b/collectors/fping.plugin/Makefile.am
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ fping.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ fping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
new file mode 100644
index 0000000..4aca2a9
--- /dev/null
+++ b/collectors/fping.plugin/README.md
@@ -0,0 +1,102 @@
+<!--
+title: "fping.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/fping.plugin/README.md
+-->
+
+# fping.plugin
+
+The fping plugin supports monitoring latency, packet loss and uptime of any number of network end points,
+by pinging them with `fping`.
+
+A recent version of `fping` is required (one that supports option `-N`).
+The supplied plugin can install it, by running:
+
+```sh
+/usr/libexec/netdata/plugins.d/fping.plugin install
+```
+
+The above will download, build and install the right version as `/usr/local/bin/fping`.
+
+Then you need to edit `/etc/netdata/fping.conf` (to edit it on your system run
+`/etc/netdata/edit-config fping.conf`) like this:
+
+```sh
+# uncomment the following line - it should already be there
+fping="/usr/local/bin/fping"
+
+# set here all the hosts you need to ping
+# I suggest to use hostnames and put their IPs in /etc/hosts
+hosts="host1 host2 host3"
+
+# override the chart update frequency - the default is inherited from Netdata
+update_every=1
+
+# time in milliseconds (1 sec = 1000 ms) to ping the hosts
+# 200 = 5 pings per second
+ping_every=200
+
+# other fping options - these are the defaults
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+```
+
+## alarms
+
+Netdata will automatically attach a few alarms for each host.
+Check the [latest versions of the fping alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/fping.conf)
+
+## Additional Tips
+
+### Customizing Amount of Pings Per Second
+
+For example, to update the chart every 10 seconds and use 2 pings every 10 seconds, use this:
+
+```sh
+# Chart Update Frequency (Time in Seconds)
+update_every=10
+
+# Time in Milliseconds (1 sec = 1000 ms) to Ping the Hosts
+# The Following Example Sends 1 Ping Every 5000 ms
+# Calculation Formula: ping_every = (update_every * 1000 ) / 2
+ping_every=5000
+```
+
+### Multiple fping Plugins With Different Settings
+
+You may need to run multiple fping plugins with different settings for different end points.
+For example, you may need to ping a few hosts 10 times per second, and others once per second.
+
+Netdata allows you to add as many `fping` plugins as you like.
+
+Follow this procedure:
+
+**1. Create New fping Configuration File**
+
+```sh
+# Step Into Configuration Directory
+cd /etc/netdata
+
+# Copy Original fping Configuration File To New Configuration File
+cp fping.conf fping2.conf
+```
+
+Edit `fping2.conf` and set the settings and the hosts you need for the seconds instance.
+
+**2. Soft Link Original fping Plugin to New Plugin File**
+
+```sh
+# Become root (If The Step Step Is Performed As Non-Root User)
+sudo su
+
+# Step Into The Plugins Directory
+cd /usr/libexec/netdata/plugins.d
+
+# Link fping.plugin to fping2.plugin
+ln -s fping.plugin fping2.plugin
+```
+
+That's it. Netdata will detect the new plugin and start it.
+
+You can name the new plugin any name you like.
+Just make sure the plugin and the configuration file have the same name.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/fping.plugin/fping.conf b/collectors/fping.plugin/fping.conf
new file mode 100644
index 0000000..63a7f7a
--- /dev/null
+++ b/collectors/fping.plugin/fping.conf
@@ -0,0 +1,44 @@
+# no need for shebang - this file is sourced from fping.plugin
+
+# fping.plugin requires a recent version of fping.
+#
+# You can get it on your system, by running:
+#
+# /usr/libexec/netdata/plugins.d/fping.plugin install
+
+# -----------------------------------------------------------------------------
+# configuration options
+
+# The fping binary to use. We need one that can output netdata friendly info
+# (supporting: -N). If you have multiple versions, put here the full filename
+# of the right one
+
+#fping="/usr/local/bin/fping"
+
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+
+hosts=""
+
+
+# The update frequency of the chart - the default is inherited from netdata
+
+#update_every=2
+
+
+# The time in milliseconds (1 sec = 1000 ms) to ping the hosts
+# by default 5 pings per host per iteration
+# fping will not allow this to be below 20ms
+
+#ping_every="200"
+
+
+# other fping options - defaults:
+# -R = send packets with random data
+# -b 56 = the number of bytes per packet
+# -i 1 = 1 ms when sending packets to others hosts (switching hosts)
+# -r 0 = never retry packets
+# -t 5000 = per packet timeout at 5000 ms
+
+#fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
diff --git a/collectors/fping.plugin/fping.plugin.in b/collectors/fping.plugin/fping.plugin.in
new file mode 100755
index 0000000..5518194
--- /dev/null
+++ b/collectors/fping.plugin/fping.plugin.in
@@ -0,0 +1,200 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of fping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+if [ "${1}" = "install" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
+ [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
+
+ local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
+ [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
+
+ echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
+ }
+
+ [ ! -d /usr/src ] && run mkdir -p /usr/src
+ [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
+
+ run cd /usr/src
+
+ if [ -d fping-4.2 ]
+ then
+ run rm -rf fping-4.2 || exit 1
+ fi
+
+ download 'https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz' | run tar -zxvpf -
+ [ $? -ne 0 ] && exit 1
+ run cd fping-4.2 || exit 1
+
+ run ./configure --prefix=/usr/local
+ run make clean
+ run make
+ if [ -f /usr/local/bin/fping ]
+ then
+ run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
+ fi
+ run mv src/fping /usr/local/bin/fping
+ run chown root:root /usr/local/bin/fping
+ run chmod 4755 /usr/local/bin/fping
+ echo >&2
+ echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
+ echo >&2
+
+ fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
+ if [ "${fping}" != "/usr/local/bin/fping" ]
+ then
+ echo >&2 "You have another fping installed at: ${fping}."
+ echo >&2 "Please set:"
+ echo >&2
+ echo >&2 " fping=\"/usr/local/bin/fping\""
+ echo >&2
+ echo >&2 "at /etc/netdata/fping.conf"
+ echo >&2
+ fi
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link fping.plugin under a different name
+# to have multiple fping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/fping.conf
+
+# the fping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+hosts=""
+
+# the time in milliseconds (1 sec = 1000 ms)
+# to ping the hosts - by default 5 pings per host per iteration
+ping_every="$((update_every * 1000 / 5))"
+
+# fping options
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${hosts}" ]
+then
+ fatal "no hosts configured - nothing to do."
+fi
+
+if [ -z "${fping}" ]
+then
+ fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${fping}" ]
+then
+ fatal "fping command '${fping}' is not executable - cannot proceed."
+fi
+
+if [ ${ping_every} -lt 20 ]
+ then
+ warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
+ ping_every=20
+fi
+
+# the fping options we will use
+options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
+
+# execute fping
+info "starting fping: ${fping} ${options[*]}"
+exec "${fping}" "${options[@]}"
+
+# if we cannot execute fping, stop
+fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/freebsd.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/freebsd.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md
new file mode 100644
index 0000000..1b519a6
--- /dev/null
+++ b/collectors/freebsd.plugin/README.md
@@ -0,0 +1,12 @@
+<!--
+title: "freebsd.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/README.md
+-->
+
+# freebsd.plugin
+
+Collects resource usage and performance data on FreeBSD systems
+
+By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c
new file mode 100644
index 0000000..910def5
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_devstat.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/devicestat.h>
+
+struct disk {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_io;
+ int do_ops;
+ int do_qops;
+ int do_util;
+ int do_iotime;
+ int do_await;
+ int do_avagsz;
+ int do_svctm;
+
+
+ // data for differential charts
+
+ struct prev_dstat {
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number bytes_free;
+ collected_number operations_read;
+ collected_number operations_write;
+ collected_number operations_other;
+ collected_number operations_free;
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number duration_other_ms;
+ collected_number duration_free_ms;
+ collected_number busy_time_ms;
+ } prev_dstat;
+
+ // charts and dimensions
+
+ RRDSET *st_io;
+ RRDDIM *rd_io_in;
+ RRDDIM *rd_io_out;
+ RRDDIM *rd_io_free;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_in;
+ RRDDIM *rd_ops_out;
+ RRDDIM *rd_ops_other;
+ RRDDIM *rd_ops_free;
+
+ RRDSET *st_qops;
+ RRDDIM *rd_qops;
+
+ RRDSET *st_util;
+ RRDDIM *rd_util;
+
+ RRDSET *st_iotime;
+ RRDDIM *rd_iotime_in;
+ RRDDIM *rd_iotime_out;
+ RRDDIM *rd_iotime_other;
+ RRDDIM *rd_iotime_free;
+
+ RRDSET *st_await;
+ RRDDIM *rd_await_in;
+ RRDDIM *rd_await_out;
+ RRDDIM *rd_await_other;
+ RRDDIM *rd_await_free;
+
+ RRDSET *st_avagsz;
+ RRDDIM *rd_avagsz_in;
+ RRDDIM *rd_avagsz_out;
+ RRDDIM *rd_avagsz_free;
+
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm;
+
+ struct disk *next;
+};
+
+static struct disk *disks_root = NULL, *disks_last_used = NULL;
+
+static size_t disks_added = 0, disks_found = 0;
+
+static void disk_free(struct disk *dm) {
+ if (likely(dm->st_io))
+ rrdset_is_obsolete(dm->st_io);
+ if (likely(dm->st_ops))
+ rrdset_is_obsolete(dm->st_ops);
+ if (likely(dm->st_qops))
+ rrdset_is_obsolete(dm->st_qops);
+ if (likely(dm->st_util))
+ rrdset_is_obsolete(dm->st_util);
+ if (likely(dm->st_iotime))
+ rrdset_is_obsolete(dm->st_iotime);
+ if (likely(dm->st_await))
+ rrdset_is_obsolete(dm->st_await);
+ if (likely(dm->st_avagsz))
+ rrdset_is_obsolete(dm->st_avagsz);
+ if (likely(dm->st_svctm))
+ rrdset_is_obsolete(dm->st_svctm);
+
+ disks_added--;
+ freez(dm->name);
+ freez(dm);
+}
+
+static void disks_cleanup() {
+ if (likely(disks_found == disks_added)) return;
+
+ struct disk *dm = disks_root, *last = NULL;
+ while(dm) {
+ if (unlikely(!dm->updated)) {
+ // info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT");
+
+ if (disks_last_used == dm)
+ disks_last_used = last;
+
+ struct disk *t = dm;
+
+ if (dm == disks_root || !last)
+ disks_root = dm = dm->next;
+
+ else
+ last->next = dm = dm->next;
+
+ t->next = NULL;
+ disk_free(t);
+ }
+ else {
+ last = dm;
+ dm->updated = 0;
+ dm = dm->next;
+ }
+ }
+}
+
+static struct disk *get_disk(const char *name) {
+ struct disk *dm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(dm = disks_last_used ; dm ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // create a new one
+ dm = callocz(1, sizeof(struct disk));
+ dm->name = strdupz(name);
+ dm->hash = simple_hash(dm->name);
+ dm->len = strlen(dm->name);
+ disks_added++;
+
+ // link it to the end
+ if (disks_root) {
+ struct disk *e;
+ for(e = disks_root; e->next ; e = e->next) ;
+ e->next = dm;
+ }
+ else
+ disks_root = dm;
+
+ return dm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.devstat
+
+int do_kern_devstat(int update_every, usec_t dt) {
+
+#define DELAULT_EXLUDED_DISKS ""
+#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat"
+#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64
+
+ static int enable_new_disks = -1;
+ static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1,
+ do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1;
+ static SIMPLE_PATTERN *excluded_disks = NULL;
+
+ if (unlikely(enable_new_disks == -1)) {
+ enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO);
+
+ enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO);
+
+ do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks",
+ CONFIG_BOOLEAN_YES);
+
+ do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DELAULT_EXLUDED_DISKS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) {
+ static int mib_numdevs[3] = {0, 0, 0};
+ int numdevs;
+ int common_error = 0;
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) {
+ common_error = 1;
+ } else {
+ static int mib_devstat[3] = {0, 0, 0};
+ static void *devstat_data = NULL;
+ static int old_numdevs = 0;
+
+ if (unlikely(numdevs != old_numdevs)) {
+ devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) *
+ numdevs); // there is generation number before devstat structures
+ old_numdevs = numdevs;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
+ sizeof(long) + sizeof(struct devstat) * numdevs))) {
+ common_error = 1;
+ } else {
+ struct devstat *dstat;
+ int i;
+ collected_number total_disk_kbytes_read = 0;
+ collected_number total_disk_kbytes_write = 0;
+
+ disks_found = 0;
+
+ dstat = (struct devstat*)((char*)devstat_data + sizeof(long)); // skip generation number
+
+ for (i = 0; i < numdevs; i++) {
+ if (likely(do_system_io)) {
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
+ ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR;
+ total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR;
+ }
+ }
+
+ if (unlikely(!enable_pass_devices))
+ if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS)
+ continue;
+
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
+ ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1];
+ struct cur_dstat {
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number duration_other_ms;
+ collected_number duration_free_ms;
+ collected_number busy_time_ms;
+ } cur_dstat;
+
+ sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number);
+
+ struct disk *dm = get_disk(disk);
+ dm->updated = 1;
+ disks_found++;
+
+ if(unlikely(!dm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this disk
+
+ // remember we configured it
+ dm->configured = 1;
+
+ dm->enabled = enable_new_disks;
+
+ if (likely(dm->enabled))
+ dm->enabled = !simple_pattern_matches(excluded_disks, disk);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk);
+ dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled);
+
+ dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io);
+ dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops);
+ dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops);
+ dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util);
+ dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime);
+ dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time",
+ do_await);
+ dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth",
+ do_avagsz);
+ dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm);
+
+ // initialise data for differential charts
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
+ dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
+ dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
+ + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
+ + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
+ dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000
+ + dstat[i].busy_time.frac * BINTIME_SCALE;
+ }
+
+ cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
+ cur_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
+ + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
+ cur_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
+ + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
+
+ cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE;
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].bytes[DEVSTAT_READ] ||
+ dstat[i].bytes[DEVSTAT_WRITE] ||
+ dstat[i].bytes[DEVSTAT_FREE] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_io)) {
+ dm->st_io = rrdset_create_localhost("disk",
+ disk,
+ NULL,
+ disk,
+ "disk.io",
+ "Disk I/O Bandwidth",
+ "KiB/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_IO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_io_free = rrddim_add(dm->st_io, "frees", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_io);
+
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]);
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_free, dstat[i].bytes[DEVSTAT_FREE]);
+ rrdset_done(dm->st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_ops)) {
+ dm->st_ops = rrdset_create_localhost("disk_ops",
+ disk,
+ NULL,
+ disk,
+ "disk.ops",
+ "Disk Completed I/O Operations",
+ "operations/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_OPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_other = rrddim_add(dm->st_ops, "other", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_free = rrddim_add(dm->st_ops, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_ops);
+
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_other, dstat[i].operations[DEVSTAT_NO_DATA]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_free, dstat[i].operations[DEVSTAT_FREE]);
+ rrdset_done(dm->st_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].start_count ||
+ dstat[i].end_count ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_qops)) {
+ dm->st_qops = rrdset_create_localhost("disk_qops",
+ disk,
+ NULL,
+ disk,
+ "disk.qops",
+ "Disk Current I/O Operations",
+ "operations",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_QOPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_qops);
+
+ rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count);
+ rrdset_done(dm->st_qops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO &&
+ (cur_dstat.busy_time_ms ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_util)) {
+ dm->st_util = rrdset_create_localhost("disk_util",
+ disk,
+ NULL,
+ disk,
+ "disk.util",
+ "Disk Utilization Time",
+ "% of time working",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_UTIL,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
+
+ dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_util);
+
+ rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms);
+ rrdset_done(dm->st_util);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO &&
+ (cur_dstat.duration_read_ms ||
+ cur_dstat.duration_write_ms ||
+ cur_dstat.duration_other_ms ||
+ cur_dstat.duration_free_ms ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_iotime)) {
+ dm->st_iotime = rrdset_create_localhost("disk_iotime",
+ disk,
+ NULL,
+ disk,
+ "disk.iotime",
+ "Disk Total I/O Time",
+ "milliseconds/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_IOTIME,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
+
+ dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_other = rrddim_add(dm->st_iotime, "other", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_free = rrddim_add(dm->st_iotime, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_iotime);
+
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_other, cur_dstat.duration_other_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_free, cur_dstat.duration_free_ms);
+ rrdset_done(dm->st_iotime);
+ }
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if (likely(dt)) {
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_await)) {
+ dm->st_await = rrdset_create_localhost("disk_await",
+ disk,
+ NULL,
+ disk,
+ "disk.await",
+ "Average Completed I/O Operation Time",
+ "milliseconds/operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_AWAIT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
+
+ dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_other = rrddim_add(dm->st_await, "other", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_free = rrddim_add(dm->st_await, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_await);
+
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_other,
+ (dstat[i].operations[DEVSTAT_NO_DATA] -
+ dm->prev_dstat.operations_other) ?
+ (cur_dstat.duration_other_ms - dm->prev_dstat.duration_other_ms) /
+ (dstat[i].operations[DEVSTAT_NO_DATA] -
+ dm->prev_dstat.operations_other) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_free,
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) ?
+ (cur_dstat.duration_free_ms - dm->prev_dstat.duration_free_ms) /
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) :
+ 0);
+ rrdset_done(dm->st_await);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_FREE] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_avagsz)) {
+ dm->st_avagsz = rrdset_create_localhost("disk_avgsz",
+ disk,
+ NULL,
+ disk,
+ "disk.avgsz",
+ "Average Completed I/O Operation Bandwidth",
+ "KiB/operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_AVGSZ,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
+
+ dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_avagsz_free = rrddim_add(dm->st_avagsz, "frees", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_avagsz);
+
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_free,
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) ?
+ (dstat[i].bytes[DEVSTAT_FREE] - dm->prev_dstat.bytes_free) /
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) :
+ 0);
+ rrdset_done(dm->st_avagsz);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!dm->st_svctm)) {
+ dm->st_svctm = rrdset_create_localhost("disk_svctm",
+ disk,
+ NULL,
+ disk,
+ "disk.svctm",
+ "Average Service Time",
+ "milliseconds/operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_SVCTM,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
+
+ dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_svctm);
+
+ rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm,
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
+ (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
+ (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) ?
+ (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) /
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
+ (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
+ (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) :
+ 0);
+ rrdset_done(dm->st_svctm);
+ }
+
+ // --------------------------------------------------------------------
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
+ dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
+ dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms;
+ dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms;
+ dm->prev_dstat.duration_other_ms = cur_dstat.duration_other_ms;
+ dm->prev_dstat.duration_free_ms = cur_dstat.duration_free_ms;
+ dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_system_io)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "io",
+ NULL,
+ "disk",
+ NULL,
+ "Disk I/O",
+ "KiB/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_SYSTEM_IO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read);
+ rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write);
+ rrdset_done(st);
+ }
+ }
+ }
+ if (unlikely(common_error)) {
+ do_system_io = 0;
+ error("DISABLED: system.io chart");
+ do_io = 0;
+ error("DISABLED: disk.* charts");
+ do_ops = 0;
+ error("DISABLED: disk_ops.* charts");
+ do_qops = 0;
+ error("DISABLED: disk_qops.* charts");
+ do_util = 0;
+ error("DISABLED: disk_util.* charts");
+ do_iotime = 0;
+ error("DISABLED: disk_iotime.* charts");
+ do_await = 0;
+ error("DISABLED: disk_await.* charts");
+ do_avagsz = 0;
+ error("DISABLED: disk_avgsz.* charts");
+ do_svctm = 0;
+ error("DISABLED: disk_svctm.* charts");
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+ } else {
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+
+ disks_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
new file mode 100644
index 0000000..1437d08
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <ifaddrs.h>
+
+struct cgroup_network_interface {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_bandwidth;
+ int do_packets;
+ int do_errors;
+ int do_drops;
+ int do_events;
+
+ // charts and dimensions
+
+ RRDSET *st_bandwidth;
+ RRDDIM *rd_bandwidth_in;
+ RRDDIM *rd_bandwidth_out;
+
+ RRDSET *st_packets;
+ RRDDIM *rd_packets_in;
+ RRDDIM *rd_packets_out;
+ RRDDIM *rd_packets_m_in;
+ RRDDIM *rd_packets_m_out;
+
+ RRDSET *st_errors;
+ RRDDIM *rd_errors_in;
+ RRDDIM *rd_errors_out;
+
+ RRDSET *st_drops;
+ RRDDIM *rd_drops_in;
+ RRDDIM *rd_drops_out;
+
+ RRDSET *st_events;
+ RRDDIM *rd_events_coll;
+
+ struct cgroup_network_interface *next;
+};
+
+static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
+
+static size_t network_interfaces_added = 0, network_interfaces_found = 0;
+
+static void network_interface_free(struct cgroup_network_interface *ifm) {
+ if (likely(ifm->st_bandwidth))
+ rrdset_is_obsolete(ifm->st_bandwidth);
+ if (likely(ifm->st_packets))
+ rrdset_is_obsolete(ifm->st_packets);
+ if (likely(ifm->st_errors))
+ rrdset_is_obsolete(ifm->st_errors);
+ if (likely(ifm->st_drops))
+ rrdset_is_obsolete(ifm->st_drops);
+ if (likely(ifm->st_events))
+ rrdset_is_obsolete(ifm->st_events);
+
+ network_interfaces_added--;
+ freez(ifm->name);
+ freez(ifm);
+}
+
+static void network_interfaces_cleanup() {
+ if (likely(network_interfaces_found == network_interfaces_added)) return;
+
+ struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL;
+ while(ifm) {
+ if (unlikely(!ifm->updated)) {
+ // info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT");
+
+ if (network_interfaces_last_used == ifm)
+ network_interfaces_last_used = last;
+
+ struct cgroup_network_interface *t = ifm;
+
+ if (ifm == network_interfaces_root || !last)
+ network_interfaces_root = ifm = ifm->next;
+
+ else
+ last->next = ifm = ifm->next;
+
+ t->next = NULL;
+ network_interface_free(t);
+ }
+ else {
+ last = ifm;
+ ifm->updated = 0;
+ ifm = ifm->next;
+ }
+ }
+}
+
+static struct cgroup_network_interface *get_network_interface(const char *name) {
+ struct cgroup_network_interface *ifm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // create a new one
+ ifm = callocz(1, sizeof(struct cgroup_network_interface));
+ ifm->name = strdupz(name);
+ ifm->hash = simple_hash(ifm->name);
+ ifm->len = strlen(ifm->name);
+ network_interfaces_added++;
+
+ // link it to the end
+ if (network_interfaces_root) {
+ struct cgroup_network_interface *e;
+ for(e = network_interfaces_root; e->next ; e = e->next) ;
+ e->next = ifm;
+ }
+ else
+ network_interfaces_root = ifm;
+
+ return ifm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getifaddrs
+
+int do_getifaddrs(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DEFAULT_EXLUDED_INTERFACES "lo*"
+#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re*"
+#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
+
+ static int enable_new_interfaces = -1;
+ static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1, do_bandwidth_net = -1, do_packets_net = -1,
+ do_errors = -1, do_drops = -1, do_events = -1;
+ static SIMPLE_PATTERN *excluded_interfaces = NULL, *physical_interfaces = NULL;
+
+ if (unlikely(enable_new_interfaces == -1)) {
+ enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS,
+ "enable new interfaces detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_interfaces = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching", DEFAULT_EXLUDED_INTERFACES)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ physical_interfaces = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETIFADDRS, "set physical interfaces for system.net", DEFAULT_PHYSICAL_INTERFACES)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors || do_bandwidth_net || do_packets_net ||
+ do_drops || do_events)) {
+ struct ifaddrs *ifap;
+
+ if (unlikely(getifaddrs(&ifap))) {
+ error("FREEBSD: getifaddrs() failed");
+ do_bandwidth_net = 0;
+ error("DISABLED: system.net chart");
+ do_packets_net = 0;
+ error("DISABLED: system.packets chart");
+ do_bandwidth_ipv4 = 0;
+ error("DISABLED: system.ipv4 chart");
+ do_bandwidth_ipv6 = 0;
+ error("DISABLED: system.ipv6 chart");
+ do_bandwidth = 0;
+ error("DISABLED: net.* charts");
+ do_packets = 0;
+ error("DISABLED: net_packets.* charts");
+ do_errors = 0;
+ error("DISABLED: net_errors.* charts");
+ do_drops = 0;
+ error("DISABLED: net_drops.* charts");
+ do_events = 0;
+ error("DISABLED: net_events.* charts");
+ error("DISABLED: getifaddrs module");
+ return 1;
+ } else {
+#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
+ struct ifaddrs *ifa;
+ struct iftot {
+ u_long ift_ibytes;
+ u_long ift_obytes;
+ u_long ift_ipackets;
+ u_long ift_opackets;
+ u_long ift_imcasts;
+ u_long ift_omcasts;
+ } iftot = {0, 0, 0, 0, 0, 0};
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_net)) {
+
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "net",
+ NULL,
+ "network",
+ NULL,
+ "Network Traffic",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_NET,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_packets_net)) {
+
+ iftot.ift_ipackets = iftot.ift_opackets = iftot.ift_imcasts = iftot.ift_omcasts = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
+ continue;
+ iftot.ift_ipackets += IFA_DATA(ipackets);
+ iftot.ift_opackets += IFA_DATA(opackets);
+ iftot.ift_imcasts += IFA_DATA(imcasts);
+ iftot.ift_omcasts += IFA_DATA(omcasts);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_packets_in = NULL, *rd_packets_out = NULL, *rd_packets_m_in = NULL, *rd_packets_m_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "packets",
+ NULL,
+ "network",
+ NULL,
+ "Network Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_m_out = rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_packets_in, iftot.ift_ipackets);
+ rrddim_set_by_pointer(st, rd_packets_out, iftot.ift_opackets);
+ rrddim_set_by_pointer(st, rd_packets_m_in, iftot.ift_imcasts);
+ rrddim_set_by_pointer(st, rd_packets_m_out, iftot.ift_omcasts);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv4)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv4",
+ NULL,
+ "network",
+ NULL,
+ "IPv4 Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_IPV4,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv6)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv6",
+ NULL,
+ "network",
+ NULL,
+ "IPv6 Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_IPV6,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ network_interfaces_found = 0;
+
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+
+ struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name);
+ ifm->updated = 1;
+ network_interfaces_found++;
+
+ if (unlikely(!ifm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this network interface
+
+ // remember we configured it
+ ifm->configured = 1;
+
+ ifm->enabled = enable_new_interfaces;
+
+ if (likely(ifm->enabled))
+ ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name);
+ ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled);
+
+ if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
+ ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
+ ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
+ ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
+ ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
+ }
+
+ if (unlikely(!ifm->enabled))
+ continue;
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ibytes) ||
+ IFA_DATA(obytes) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!ifm->st_bandwidth)) {
+ ifm->st_bandwidth = rrdset_create_localhost("net",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.net",
+ "Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_bandwidth);
+
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes));
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes));
+ rrdset_done(ifm->st_bandwidth);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ipackets) ||
+ IFA_DATA(opackets) ||
+ IFA_DATA(imcasts) ||
+ IFA_DATA(omcasts) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!ifm->st_packets)) {
+ ifm->st_packets = rrdset_create_localhost("net_packets",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.packets",
+ "Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_packets);
+
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts));
+ rrdset_done(ifm->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ierrors) ||
+ IFA_DATA(oerrors) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!ifm->st_errors)) {
+ ifm->st_errors = rrdset_create_localhost("net_errors",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.errors",
+ "Interface Errors",
+ "errors/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_errors);
+
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors));
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors));
+ rrdset_done(ifm->st_errors);
+ }
+ // --------------------------------------------------------------------
+
+ if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(iqdrops) ||
+ #if __FreeBSD__ >= 11
+ IFA_DATA(oqdrops) ||
+ #endif
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!ifm->st_drops)) {
+ ifm->st_drops = rrdset_create_localhost("net_drops",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.drops",
+ "Interface Drops",
+ "drops/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_DROPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if __FreeBSD__ >= 11
+ ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ } else
+ rrdset_next(ifm->st_drops);
+
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops));
+#if __FreeBSD__ >= 11
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops));
+#endif
+ rrdset_done(ifm->st_drops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(collisions) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!ifm->st_events)) {
+ ifm->st_events = rrdset_create_localhost("net_events",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.events",
+ "Network Interface Events",
+ "events/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_EVENTS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_events);
+
+ rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions));
+ rrdset_done(ifm->st_events);
+ }
+ }
+
+ freeifaddrs(ifap);
+ }
+ } else {
+ error("DISABLED: getifaddrs module");
+ return 1;
+ }
+
+ network_interfaces_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c
new file mode 100644
index 0000000..58b67a3
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/mount.h>
+
+struct mount_point {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_space;
+ int do_inodes;
+
+ size_t collected; // the number of times this has been collected
+
+ // charts and dimensions
+
+ RRDSET *st_space;
+ RRDDIM *rd_space_used;
+ RRDDIM *rd_space_avail;
+ RRDDIM *rd_space_reserved;
+
+ RRDSET *st_inodes;
+ RRDDIM *rd_inodes_used;
+ RRDDIM *rd_inodes_avail;
+
+ struct mount_point *next;
+};
+
+static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL;
+
+static size_t mount_points_added = 0, mount_points_found = 0;
+
+static void mount_point_free(struct mount_point *m) {
+ if (likely(m->st_space))
+ rrdset_is_obsolete(m->st_space);
+ if (likely(m->st_inodes))
+ rrdset_is_obsolete(m->st_inodes);
+
+ mount_points_added--;
+ freez(m->name);
+ freez(m);
+}
+
+static void mount_points_cleanup() {
+ if (likely(mount_points_found == mount_points_added)) return;
+
+ struct mount_point *m = mount_points_root, *last = NULL;
+ while(m) {
+ if (unlikely(!m->updated)) {
+ // info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT");
+
+ if (mount_points_last_used == m)
+ mount_points_last_used = last;
+
+ struct mount_point *t = m;
+
+ if (m == mount_points_root || !last)
+ mount_points_root = m = m->next;
+
+ else
+ last->next = m = m->next;
+
+ t->next = NULL;
+ mount_point_free(t);
+ }
+ else {
+ last = m;
+ m->updated = 0;
+ m = m->next;
+ }
+ }
+}
+
+static struct mount_point *get_mount_point(const char *name) {
+ struct mount_point *m;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(m = mount_points_last_used ; m ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // create a new one
+ m = callocz(1, sizeof(struct mount_point));
+ m->name = strdupz(name);
+ m->hash = simple_hash(m->name);
+ m->len = strlen(m->name);
+ mount_points_added++;
+
+ // link it to the end
+ if (mount_points_root) {
+ struct mount_point *e;
+ for(e = mount_points_root; e->next ; e = e->next) ;
+ e->next = m;
+ }
+ else
+ mount_points_root = m;
+
+ return m;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getmntinfo
+
+int do_getmntinfo(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DELAULT_EXCLUDED_PATHS "/proc/*"
+// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
+#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none"
+#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo"
+
+ static int enable_new_mount_points = -1;
+ static int do_space = -1, do_inodes = -1;
+ static SIMPLE_PATTERN *excluded_mountpoints = NULL;
+ static SIMPLE_PATTERN *excluded_filesystems = NULL;
+
+ if (unlikely(enable_new_mount_points == -1)) {
+ enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO,
+ "enable new mount points detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
+ do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
+
+ excluded_mountpoints = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths",
+ DELAULT_EXCLUDED_PATHS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+
+ excluded_filesystems = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems",
+ DEFAULT_EXCLUDED_FILESYSTEMS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_space || do_inodes)) {
+ struct statfs *mntbuf;
+ int mntsize;
+
+ // there is no mount info in sysctl MIBs
+ if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
+ error("FREEBSD: getmntinfo() failed");
+ do_space = 0;
+ error("DISABLED: disk_space.* charts");
+ do_inodes = 0;
+ error("DISABLED: disk_inodes.* charts");
+ error("DISABLED: getmntinfo module");
+ return 1;
+ } else {
+ int i;
+
+ mount_points_found = 0;
+
+ for (i = 0; i < mntsize; i++) {
+ char title[4096 + 1];
+
+ struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname);
+ m->updated = 1;
+ mount_points_found++;
+
+ if (unlikely(!m->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this filesystem
+
+ // remember we configured it
+ m->configured = 1;
+
+ m->enabled = enable_new_mount_points;
+
+ if (likely(m->enabled))
+ m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname)
+ || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename));
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname);
+ m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled);
+
+ if (unlikely(m->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space);
+ m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes);
+ }
+
+ if (unlikely(!m->enabled))
+ continue;
+
+ if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected))
+ continue;
+
+ // --------------------------------------------------------------------------
+
+ int rendered = 0;
+
+ if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO &&
+ (mntbuf[i].f_blocks > 2 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!m->st_space)) {
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_space = rrdset_create_localhost("disk_space",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.space",
+ title,
+ "GiB",
+ "freebsd.plugin",
+ "getmntinfo",
+ NETDATA_CHART_PRIO_DISKSPACE_SPACE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_used = rrddim_add(m->st_space, "used", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root",
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_space);
+
+ rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks -
+ mntbuf[i].f_bfree));
+ rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree -
+ mntbuf[i].f_bavail));
+ rrdset_done(m->st_space);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO &&
+ (mntbuf[i].f_files > 1 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ if (unlikely(!m->st_inodes)) {
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_inodes = rrdset_create_localhost("disk_inodes",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.inodes",
+ title,
+ "inodes",
+ "freebsd.plugin",
+ "getmntinfo",
+ NETDATA_CHART_PRIO_DISKSPACE_INODES,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_inodes);
+
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files -
+ mntbuf[i].f_ffree));
+ rrdset_done(m->st_inodes);
+
+ rendered++;
+ }
+
+ if (likely(rendered))
+ m->collected++;
+ }
+ }
+ } else {
+ error("DISABLED: getmntinfo module");
+ return 1;
+ }
+
+ mount_points_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
new file mode 100644
index 0000000..76466c3
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_ipfw.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <netinet/ip_fw.h>
+
+#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing
+
+#define COMMON_IPFW_ERROR() error("DISABLED: ipfw.packets chart"); \
+ error("DISABLED: ipfw.bytes chart"); \
+ error("DISABLED: ipfw.dyn_active chart"); \
+ error("DISABLED: ipfw.dyn_expired chart"); \
+ error("DISABLED: ipfw.mem chart");
+
+// --------------------------------------------------------------------------------------------------------------------
+// ipfw
+
+int do_ipfw(int update_every, usec_t dt) {
+ (void)dt;
+#if __FreeBSD__ >= 11
+
+ static int do_static = -1, do_dynamic = -1, do_mem = -1;
+
+ if (unlikely(do_static == -1)) {
+ do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1);
+ do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1);
+ do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1);
+ }
+
+ // variables for getting ipfw configuration
+
+ int error;
+ static int ipfw_socket = -1;
+ static ipfw_cfg_lheader *cfg = NULL;
+ ip_fw3_opheader *op3 = NULL;
+ static socklen_t *optlen = NULL, cfg_size = 0;
+
+ // variables for static rules handling
+
+ ipfw_obj_ctlv *ctlv = NULL;
+ ipfw_obj_tlv *rbase = NULL;
+ int rcnt = 0;
+
+ int n, seen;
+ struct ip_fw_rule *rule;
+ struct ip_fw_bcounter *cntr;
+ int c = 0;
+
+ char rule_num_str[12];
+
+ // variables for dynamic rules handling
+
+ caddr_t dynbase = NULL;
+ size_t dynsz = 0;
+ size_t readsz = sizeof(*cfg);;
+ int ttype = 0;
+ ipfw_obj_tlv *tlv;
+ ipfw_dyn_rule *dyn_rule;
+ uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE;
+ unsigned srn, static_rules_num = 0;
+ static size_t dyn_rules_num_size = 0;
+
+ static struct dyn_rule_num {
+ uint16_t rule_num;
+ uint32_t active_rules;
+ uint32_t expired_rules;
+ } *dyn_rules_num = NULL;
+
+ uint32_t *dyn_rules_counter;
+
+ if (likely(do_static | do_dynamic | do_mem)) {
+
+ // initialize the smallest ipfw_cfg_lheader possible
+
+ if (unlikely((optlen == NULL) || (cfg == NULL))) {
+ optlen = reallocz(optlen, sizeof(socklen_t));
+ *optlen = cfg_size = 32;
+ cfg = reallocz(cfg, *optlen);
+ }
+
+ // get socket descriptor and initialize ipfw_cfg_lheader structure
+
+ if (unlikely(ipfw_socket == -1))
+ ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ if (unlikely(ipfw_socket == -1)) {
+ error("FREEBSD: can't get socket for ipfw configuration");
+ error("FREEBSD: run netdata as root to get access to ipfw data");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+
+ // get ifpw configuration size than get configuration
+
+ *optlen = cfg_size;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error)
+ if (errno != ENOMEM) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ *optlen = cfg_size = cfg->size;
+ cfg = reallocz(cfg, *optlen);
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ }
+
+ // go through static rules configuration structures
+
+ ctlv = (ipfw_obj_ctlv *) (cfg + 1);
+
+ if (cfg->flags & IPFW_CFG_GET_STATIC) {
+ /* We've requested static rules */
+ if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv +
+ ctlv->head.length);
+ }
+
+ if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
+ rbase = (ipfw_obj_tlv *) (ctlv + 1);
+ rcnt = ctlv->count;
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length);
+ }
+ }
+
+ if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) {
+ /* We may have some dynamic states */
+ dynsz = *optlen - readsz;
+ /* Skip empty header */
+ if (dynsz != sizeof(ipfw_obj_ctlv))
+ dynbase = (caddr_t) ctlv;
+ else
+ dynsz = 0;
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_mem)) {
+ static RRDSET *st_mem = NULL;
+ static RRDDIM *rd_dyn_mem = NULL;
+ static RRDDIM *rd_stat_mem = NULL;
+
+ if (unlikely(!st_mem)) {
+ st_mem = rrdset_create_localhost("ipfw",
+ "mem",
+ NULL,
+ "memory allocated",
+ NULL,
+ "Memory allocated by rules",
+ "bytes",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_MEM,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL);
+
+ rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st_mem);
+
+ rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz);
+ rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz);
+ rrdset_done(st_mem);
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_packets = NULL, *st_bytes = NULL;
+ RRDDIM *rd_packets = NULL, *rd_bytes = NULL;
+
+ if (likely(do_static || do_dynamic)) {
+ if (likely(do_static)) {
+ if (unlikely(!st_packets))
+ st_packets = rrdset_create_localhost("ipfw",
+ "packets",
+ NULL,
+ "static rules",
+ NULL,
+ "Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_PACKETS,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_packets);
+
+ if (unlikely(!st_bytes))
+ st_bytes = rrdset_create_localhost("ipfw",
+ "bytes",
+ NULL,
+ "static rules",
+ NULL,
+ "Bytes",
+ "bytes/s",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_BYTES,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_bytes);
+ }
+
+ for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) {
+ cntr = (struct ip_fw_bcounter *) (rbase + 1);
+ rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size);
+ if (rule->rulenum != prev_rulenum)
+ static_rules_num++;
+ if (rule->rulenum > IPFW_DEFAULT_RULE)
+ break;
+
+ if (likely(do_static)) {
+ sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id);
+
+ rd_packets = rrddim_find_active(st_packets, rule_num_str);
+ if (unlikely(!rd_packets))
+ rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt);
+
+ rd_bytes = rrddim_find_active(st_bytes, rule_num_str);
+ if (unlikely(!rd_bytes))
+ rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt);
+ }
+
+ c += rbase->length;
+ seen++;
+ }
+
+ if (likely(do_static)) {
+ rrdset_done(st_packets);
+ rrdset_done(st_bytes);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // go through dynamic rules configuration structures
+
+ if (likely(do_dynamic && (dynsz > 0))) {
+ if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) ||
+ ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) >
+ sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num;
+ dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size);
+ }
+ bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num);
+ dyn_rules_num->rule_num = IPFW_DEFAULT_RULE;
+
+ if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) {
+ dynbase += sizeof(*ctlv);
+ dynsz -= sizeof(*ctlv);
+ ttype = IPFW_TLV_DYN_ENT;
+ }
+
+ while (dynsz > 0) {
+ tlv = (ipfw_obj_tlv *) dynbase;
+ if (tlv->type != ttype)
+ break;
+
+ dyn_rule = (ipfw_dyn_rule *) (tlv + 1);
+ bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum));
+
+ for (srn = 0; srn < (static_rules_num - 1); srn++) {
+ if (dyn_rule->expire > 0)
+ dyn_rules_counter = &dyn_rules_num[srn].active_rules;
+ else
+ dyn_rules_counter = &dyn_rules_num[srn].expired_rules;
+ if (dyn_rules_num[srn].rule_num == rulenum) {
+ (*dyn_rules_counter)++;
+ break;
+ }
+ if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) {
+ dyn_rules_num[srn].rule_num = rulenum;
+ dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE;
+ (*dyn_rules_counter)++;
+ break;
+ }
+ }
+
+ dynsz -= tlv->length;
+ dynbase += tlv->length;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_active = NULL, *st_expired = NULL;
+ RRDDIM *rd_active = NULL, *rd_expired = NULL;
+
+ if (unlikely(!st_active))
+ st_active = rrdset_create_localhost("ipfw",
+ "active",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Active rules",
+ "rules",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_ACTIVE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_active);
+
+ if (unlikely(!st_expired))
+ st_expired = rrdset_create_localhost("ipfw",
+ "expired",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Expired rules",
+ "rules",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_EXPIRED,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_expired);
+
+ for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) {
+ sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num);
+
+ rd_active = rrddim_find_active(st_active, rule_num_str);
+ if (unlikely(!rd_active))
+ rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules);
+
+ rd_expired = rrddim_find_active(st_expired, rule_num_str);
+ if (unlikely(!rd_expired))
+ rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules);
+ }
+
+ rrdset_done(st_active);
+ rrdset_done(st_expired);
+ }
+ }
+
+ return 0;
+#else
+ error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only");
+ COMMON_IPFW_ERROR();
+ return 1;
+#endif
+}
diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
new file mode 100644
index 0000000..7d609ea
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+#include "collectors/proc.plugin/zfs_common.h"
+
+extern struct arcstats arcstats;
+
+// --------------------------------------------------------------------------------------------------------------------
+// kstat.zfs.misc.arcstats
+
+int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int show_zero_charts = -1;
+ if(unlikely(show_zero_charts == -1))
+ show_zero_charts = config_get_boolean_ondemand("plugin:freebsd:zfs_arcstats", "show zero charts", CONFIG_BOOLEAN_NO);
+
+ unsigned long long l2_size;
+ size_t uint64_t_size = sizeof(uint64_t);
+ static struct mibs {
+ int hits[5];
+ int misses[5];
+ int demand_data_hits[5];
+ int demand_data_misses[5];
+ int demand_metadata_hits[5];
+ int demand_metadata_misses[5];
+ int prefetch_data_hits[5];
+ int prefetch_data_misses[5];
+ int prefetch_metadata_hits[5];
+ int prefetch_metadata_misses[5];
+ int mru_hits[5];
+ int mru_ghost_hits[5];
+ int mfu_hits[5];
+ int mfu_ghost_hits[5];
+ int deleted[5];
+ int mutex_miss[5];
+ int evict_skip[5];
+ // int evict_not_enough[5];
+ // int evict_l2_cached[5];
+ // int evict_l2_eligible[5];
+ // int evict_l2_ineligible[5];
+ // int evict_l2_skip[5];
+ int hash_elements[5];
+ int hash_elements_max[5];
+ int hash_collisions[5];
+ int hash_chains[5];
+ int hash_chain_max[5];
+ int p[5];
+ int c[5];
+ int c_min[5];
+ int c_max[5];
+ int size[5];
+ // int hdr_size[5];
+ // int data_size[5];
+ // int metadata_size[5];
+ // int other_size[5];
+ // int anon_size[5];
+ // int anon_evictable_data[5];
+ // int anon_evictable_metadata[5];
+ int mru_size[5];
+ // int mru_evictable_data[5];
+ // int mru_evictable_metadata[5];
+ // int mru_ghost_size[5];
+ // int mru_ghost_evictable_data[5];
+ // int mru_ghost_evictable_metadata[5];
+ int mfu_size[5];
+ // int mfu_evictable_data[5];
+ // int mfu_evictable_metadata[5];
+ // int mfu_ghost_size[5];
+ // int mfu_ghost_evictable_data[5];
+ // int mfu_ghost_evictable_metadata[5];
+ int l2_hits[5];
+ int l2_misses[5];
+ // int l2_feeds[5];
+ // int l2_rw_clash[5];
+ int l2_read_bytes[5];
+ int l2_write_bytes[5];
+ // int l2_writes_sent[5];
+ // int l2_writes_done[5];
+ // int l2_writes_error[5];
+ // int l2_writes_lock_retry[5];
+ // int l2_evict_lock_retry[5];
+ // int l2_evict_reading[5];
+ // int l2_evict_l1cached[5];
+ // int l2_free_on_write[5];
+ // int l2_cdata_free_on_write[5];
+ // int l2_abort_lowmem[5];
+ // int l2_cksum_bad[5];
+ // int l2_io_error[5];
+ int l2_size[5];
+ int l2_asize[5];
+ // int l2_hdr_size[5];
+ // int l2_compress_successes[5];
+ // int l2_compress_zeros[5];
+ // int l2_compress_failures[5];
+ int memory_throttle_count[5];
+ // int duplicate_buffers[5];
+ // int duplicate_buffers_size[5];
+ // int duplicate_reads[5];
+ // int memory_direct_count[5];
+ // int memory_indirect_count[5];
+ // int arc_no_grow[5];
+ // int arc_tempreserve[5];
+ // int arc_loaned_bytes[5];
+ // int arc_prune[5];
+ // int arc_meta_used[5];
+ // int arc_meta_limit[5];
+ // int arc_meta_max[5];
+ // int arc_meta_min[5];
+ // int arc_need_free[5];
+ // int arc_sys_free[5];
+ } mibs;
+
+ arcstats.l2exist = -1;
+
+ if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0)))
+ return 0;
+
+ if(likely(l2_size))
+ arcstats.l2exist = 1;
+ else
+ arcstats.l2exist = 0;
+
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free);
+
+ generate_charts_arcstats("freebsd", "zfs", show_zero_charts, update_every);
+ generate_charts_arc_summary("freebsd", "zfs", show_zero_charts, update_every);
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kstat.zfs.misc.zio_trim
+
+int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_bytes[5] = {0, 0, 0, 0, 0}, mib_success[5] = {0, 0, 0, 0, 0},
+ mib_failed[5] = {0, 0, 0, 0, 0}, mib_unsupported[5] = {0, 0, 0, 0, 0};
+ uint64_t bytes, success, failed, unsupported;
+
+ if (unlikely(GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.bytes", mib_bytes, bytes) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.success", mib_success, success) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.failed", mib_failed, failed) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.unsupported", mib_unsupported, unsupported))) {
+ error("DISABLED: zfs.trim_bytes chart");
+ error("DISABLED: zfs.trim_success chart");
+ error("DISABLED: kstat.zfs.misc.zio_trim module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_bytes = NULL;
+ static RRDDIM *rd_bytes = NULL;
+
+ if (unlikely(!st_bytes)) {
+ st_bytes = rrdset_create_localhost(
+ "zfs",
+ "trim_bytes",
+ NULL,
+ "trim",
+ NULL,
+ "Successfully TRIMmed bytes",
+ "bytes",
+ "freebsd",
+ "zfs",
+ 2320,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_bytes = rrddim_add(st_bytes, "TRIMmed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_bytes);
+
+ rrddim_set_by_pointer(st_bytes, rd_bytes, bytes);
+ rrdset_done(st_bytes);
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_requests = NULL;
+ static RRDDIM *rd_successful = NULL, *rd_failed = NULL, *rd_unsupported = NULL;
+
+ if (unlikely(!st_requests)) {
+ st_requests = rrdset_create_localhost(
+ "zfs",
+ "trim_requests",
+ NULL,
+ "trim",
+ NULL,
+ "TRIM requests",
+ "requests",
+ "freebsd",
+ "zfs",
+ 2321,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_successful = rrddim_add(st_requests, "successful", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_requests, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_unsupported = rrddim_add(st_requests, "unsupported", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_requests);
+
+ rrddim_set_by_pointer(st_requests, rd_successful, success);
+ rrddim_set_by_pointer(st_requests, rd_failed, failed);
+ rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported);
+ rrdset_done(st_requests);
+
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
new file mode 100644
index 0000000..a71ec56
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_sysctl.c
@@ -0,0 +1,3240 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/vmmeter.h>
+#include <vm/vm_param.h>
+
+#define _KERNEL
+#include <sys/sem.h>
+#include <sys/shm.h>
+#include <sys/msg.h>
+#undef _KERNEL
+
+#include <net/netisr.h>
+
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+#include <netinet6/ip6_var.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp_fsm.h>
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+
+// --------------------------------------------------------------------------------------------------------------------
+// common definitions and variables
+
+int system_pagesize = PAGE_SIZE;
+int number_of_cpus = 1;
+#if __FreeBSD_version >= 1200029
+struct __vmmeter {
+ uint64_t v_swtch;
+ uint64_t v_trap;
+ uint64_t v_syscall;
+ uint64_t v_intr;
+ uint64_t v_soft;
+ uint64_t v_vm_faults;
+ uint64_t v_io_faults;
+ uint64_t v_cow_faults;
+ uint64_t v_cow_optim;
+ uint64_t v_zfod;
+ uint64_t v_ozfod;
+ uint64_t v_swapin;
+ uint64_t v_swapout;
+ uint64_t v_swappgsin;
+ uint64_t v_swappgsout;
+ uint64_t v_vnodein;
+ uint64_t v_vnodeout;
+ uint64_t v_vnodepgsin;
+ uint64_t v_vnodepgsout;
+ uint64_t v_intrans;
+ uint64_t v_reactivated;
+ uint64_t v_pdwakeups;
+ uint64_t v_pdpages;
+ uint64_t v_pdshortfalls;
+ uint64_t v_dfree;
+ uint64_t v_pfree;
+ uint64_t v_tfree;
+ uint64_t v_forks;
+ uint64_t v_vforks;
+ uint64_t v_rforks;
+ uint64_t v_kthreads;
+ uint64_t v_forkpages;
+ uint64_t v_vforkpages;
+ uint64_t v_rforkpages;
+ uint64_t v_kthreadpages;
+ u_int v_page_size;
+ u_int v_page_count;
+ u_int v_free_reserved;
+ u_int v_free_target;
+ u_int v_free_min;
+ u_int v_free_count;
+ u_int v_wire_count;
+ u_int v_active_count;
+ u_int v_inactive_target;
+ u_int v_inactive_count;
+ u_int v_laundry_count;
+ u_int v_pageout_free_min;
+ u_int v_interrupt_free_min;
+ u_int v_free_severe;
+};
+typedef struct __vmmeter vmmeter_t;
+#else
+typedef struct vmmeter vmmeter_t;
+#endif
+
+#if (__FreeBSD_version >= 1101516 && __FreeBSD_version < 1200000) || __FreeBSD_version >= 1200015
+#define NETDATA_COLLECT_LAUNDRY 1
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// FreeBSD plugin initialization
+
+int freebsd_plugin_init()
+{
+ system_pagesize = getpagesize();
+ if (system_pagesize <= 0) {
+ error("FREEBSD: can't get system page size");
+ return 1;
+ }
+
+ if (unlikely(GETSYSCTL_BY_NAME("kern.smp.cpus", number_of_cpus))) {
+ error("FREEBSD: can't get number of cpus");
+ return 1;
+ }
+
+ if (unlikely(!number_of_cpus)) {
+ error("FREEBSD: wrong number of cpus");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.loadavg
+
+// FreeBSD calculates load averages once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_vm_loadavg(int update_every, usec_t dt){
+ static usec_t next_loadavg_dt = 0;
+
+ if (next_loadavg_dt <= dt) {
+ static int mib[2] = {0, 0};
+ struct loadavg sysload;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.loadavg", mib, sysload))) {
+ error("DISABLED: system.load chart");
+ error("DISABLED: vm.loadavg module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_load1 = NULL, *rd_load2 = NULL, *rd_load3 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "load",
+ NULL,
+ "load",
+ NULL,
+ "System Load Average",
+ "load",
+ "freebsd.plugin",
+ "vm.loadavg",
+ NETDATA_CHART_PRIO_SYSTEM_LOAD,
+ (update_every < MIN_LOADAVG_UPDATE_EVERY) ?
+ MIN_LOADAVG_UPDATE_EVERY : update_every, RRDSET_TYPE_LINE
+ );
+ rd_load1 = rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load2 = rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load3 = rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_load1, (collected_number) ((double) sysload.ldavg[0] / sysload.fscale * 1000));
+ rrddim_set_by_pointer(st, rd_load2, (collected_number) ((double) sysload.ldavg[1] / sysload.fscale * 1000));
+ rrddim_set_by_pointer(st, rd_load3, (collected_number) ((double) sysload.ldavg[2] / sysload.fscale * 1000));
+ rrdset_done(st);
+
+ next_loadavg_dt = st->update_every * USEC_PER_SEC;
+ }
+ }
+ else
+ next_loadavg_dt -= dt;
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.vmtotal
+
+int do_vm_vmtotal(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_all_processes = -1, do_processes = -1, do_committed = -1;
+
+ if (unlikely(do_all_processes == -1)) {
+ do_all_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "enable total processes", 1);
+ do_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "processes running", 1);
+ do_committed = config_get_boolean("plugin:freebsd:vm.vmtotal", "committed memory", 1);
+ }
+
+ if (likely(do_all_processes | do_processes | do_committed)) {
+ static int mib[2] = {0, 0};
+ struct vmtotal vmtotal_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.vmtotal", mib, vmtotal_data))) {
+ do_all_processes = 0;
+ error("DISABLED: system.active_processes chart");
+ do_processes = 0;
+ error("DISABLED: system.processes chart");
+ do_committed = 0;
+ error("DISABLED: mem.committed chart");
+ error("DISABLED: vm.vmtotal module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_all_processes)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "active_processes",
+ NULL,
+ "processes",
+ NULL,
+ "System Active Processes",
+ "processes",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ rd = rrddim_add(st, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, (vmtotal_data.t_rq + vmtotal_data.t_dw + vmtotal_data.t_pw + vmtotal_data.t_sl + vmtotal_data.t_sw));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_processes)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_running = NULL, *rd_blocked = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "processes",
+ NULL,
+ "processes",
+ NULL,
+ "System Processes",
+ "processes",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_SYSTEM_PROCESSES,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_running = rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_blocked = rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_running, vmtotal_data.t_rq);
+ rrddim_set_by_pointer(st, rd_blocked, (vmtotal_data.t_dw + vmtotal_data.t_pw));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_committed)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem",
+ "committed",
+ NULL,
+ "system",
+ NULL,
+ "Committed (Allocated) Memory",
+ "MiB",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd = rrddim_add(st, "Committed_AS", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, vmtotal_data.t_rm);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: vm.vmtotal module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.cp_time
+
+int do_kern_cp_time(int update_every, usec_t dt) {
+ (void)dt;
+
+ if (unlikely(CPUSTATES != 5)) {
+ error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
+ error("DISABLED: system.cpu chart");
+ error("DISABLED: kern.cp_time module");
+ return 1;
+ } else {
+ static int mib[2] = {0, 0};
+ long cp_time[CPUSTATES];
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
+ error("DISABLED: system.cpu chart");
+ error("DISABLED: kern.cp_time module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_nice = NULL, *rd_system = NULL, *rd_user = NULL, *rd_interrupt = NULL, *rd_idle = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "cpu",
+ NULL,
+ "cpu",
+ "system.cpu",
+ "Total CPU utilization",
+ "percentage",
+ "freebsd.plugin",
+ "kern.cp_time",
+ NETDATA_CHART_PRIO_SYSTEM_CPU,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_nice = rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_user = rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_interrupt = rrddim_add(st, "interrupt", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_idle = rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(st, "idle");
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_nice, cp_time[1]);
+ rrddim_set_by_pointer(st, rd_system, cp_time[2]);
+ rrddim_set_by_pointer(st, rd_user, cp_time[0]);
+ rrddim_set_by_pointer(st, rd_interrupt, cp_time[3]);
+ rrddim_set_by_pointer(st, rd_idle, cp_time[4]);
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.cp_times
+
+int do_kern_cp_times(int update_every, usec_t dt) {
+ (void)dt;
+
+ if (unlikely(CPUSTATES != 5)) {
+ error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
+ error("DISABLED: cpu.cpuXX charts");
+ error("DISABLED: kern.cp_times module");
+ return 1;
+ } else {
+ static int mib[2] = {0, 0};
+ long cp_time[CPUSTATES];
+ static long *pcpu_cp_time = NULL;
+ static int old_number_of_cpus = 0;
+
+ if(unlikely(number_of_cpus != old_number_of_cpus))
+ pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
+ if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) {
+ error("DISABLED: cpu.cpuXX charts");
+ error("DISABLED: kern.cp_times module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ int i;
+ static struct cpu_chart {
+ char cpuid[MAX_INT_DIGITS + 4];
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_nice;
+ RRDDIM *rd_system;
+ RRDDIM *rd_interrupt;
+ RRDDIM *rd_idle;
+ } *all_cpu_charts = NULL;
+
+ if(unlikely(number_of_cpus > old_number_of_cpus)) {
+ all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus);
+ memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus));
+ }
+
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!all_cpu_charts[i].st)) {
+ snprintfz(all_cpu_charts[i].cpuid, MAX_INT_DIGITS, "cpu%d", i);
+ all_cpu_charts[i].st = rrdset_create_localhost(
+ "cpu",
+ all_cpu_charts[i].cpuid,
+ NULL,
+ "utilization",
+ "cpu.cpu",
+ "Core utilization",
+ "percentage",
+ "freebsd.plugin",
+ "kern.cp_times",
+ NETDATA_CHART_PRIO_CPU_PER_CORE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ all_cpu_charts[i].rd_nice = rrddim_add(all_cpu_charts[i].st, "nice", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_system = rrddim_add(all_cpu_charts[i].st, "system", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_user = rrddim_add(all_cpu_charts[i].st, "user", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_interrupt = rrddim_add(all_cpu_charts[i].st, "interrupt", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_idle = rrddim_add(all_cpu_charts[i].st, "idle", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(all_cpu_charts[i].st, "idle");
+ } else rrdset_next(all_cpu_charts[i].st);
+
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_nice, pcpu_cp_time[i * 5 + 1]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_system, pcpu_cp_time[i * 5 + 2]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_user, pcpu_cp_time[i * 5 + 0]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_interrupt, pcpu_cp_time[i * 5 + 3]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_idle, pcpu_cp_time[i * 5 + 4]);
+ rrdset_done(all_cpu_charts[i].st);
+ }
+ }
+
+ old_number_of_cpus = number_of_cpus;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// dev.cpu.temperature
+
+int do_dev_cpu_temperature(int update_every, usec_t dt) {
+ (void)dt;
+
+ int i;
+ static int *mib = NULL;
+ static int *pcpu_temperature = NULL;
+ static int old_number_of_cpus = 0;
+ char char_mib[MAX_INT_DIGITS + 21];
+ char char_rd[MAX_INT_DIGITS + 9];
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus);
+ mib = reallocz(mib, sizeof(int) * number_of_cpus * 4);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&mib[old_number_of_cpus * 4], 0, sizeof(int) * (number_of_cpus - old_number_of_cpus) * 4);
+ }
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!(mib[i * 4])))
+ sprintf(char_mib, "dev.cpu.%d.temperature", i);
+ if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) {
+ error("DISABLED: cpu.temperature chart");
+ error("DISABLED: dev.cpu.temperature module");
+ return 1;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st;
+ static RRDDIM **rd_pcpu_temperature;
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM) * number_of_cpus);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus));
+ }
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "cpu",
+ "temperature",
+ NULL,
+ "temperature",
+ "cpu.temperatute",
+ "Core temperature",
+ "Celsius",
+ "freebsd.plugin",
+ "dev.cpu.temperature",
+ NETDATA_CHART_PRIO_CPU_TEMPERATURE,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ }
+ else rrdset_next(st);
+
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!rd_pcpu_temperature[i])) {
+ sprintf(char_rd, "cpu%d.temp", i);
+ rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15));
+ }
+
+ rrdset_done(st);
+
+ old_number_of_cpus = number_of_cpus;
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// dev.cpu.0.freq
+
+int do_dev_cpu_0_freq(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ int cpufreq;
+
+ if (unlikely(GETSYSCTL_SIMPLE("dev.cpu.0.freq", mib, cpufreq))) {
+ error("DISABLED: cpu.scaling_cur_freq chart");
+ error("DISABLED: dev.cpu.0.freq module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "cpu",
+ "scaling_cur_freq",
+ NULL,
+ "cpufreq",
+ NULL,
+ "Current CPU Scaling Frequency",
+ "MHz",
+ "freebsd.plugin",
+ "dev.cpu.0.freq",
+ NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "frequency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, cpufreq);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// hw.intrcnt
+
+int do_hw_intcnt(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_hw_intrcnt[2] = {0, 0};
+ size_t intrcnt_size = 0;
+
+ if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+ unsigned long nintr = 0;
+ static unsigned long old_nintr = 0;
+ static unsigned long *intrcnt = NULL;
+
+ nintr = intrcnt_size / sizeof(u_long);
+ if (unlikely(nintr != old_nintr))
+ intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
+ if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+ unsigned long long totalintr = 0;
+ unsigned long i;
+
+ for (i = 0; i < nintr; i++)
+ totalintr += intrcnt[i];
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_intr = NULL;
+ static RRDDIM *rd_intr = NULL;
+
+ if (unlikely(!st_intr)) {
+ st_intr = rrdset_create_localhost(
+ "system",
+ "intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Total Hardware Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "hw.intrcnt",
+ NETDATA_CHART_PRIO_SYSTEM_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
+
+ rd_intr = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st_intr);
+
+ rrddim_set_by_pointer(st_intr, rd_intr, totalintr);
+ rrdset_done(st_intr);
+
+ // --------------------------------------------------------------------
+
+ size_t size;
+ static int mib_hw_intrnames[2] = {0, 0};
+ static char *intrnames = NULL;
+
+ size = nintr * (MAXCOMLEN + 1);
+ if (unlikely(nintr != old_nintr))
+ intrnames = reallocz(intrnames, size);
+ if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_interrupts = NULL;
+
+ if (unlikely(!st_interrupts))
+ st_interrupts = rrdset_create_localhost(
+ "system",
+ "interrupts",
+ NULL,
+ "interrupts",
+ NULL,
+ "System interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "hw.intrcnt",
+ NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_interrupts);
+
+ for (i = 0; i < nintr; i++) {
+ void *p;
+
+ p = intrnames + i * (MAXCOMLEN + 1);
+ if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) {
+ RRDDIM *rd_interrupts = rrddim_find_active(st_interrupts, p);
+
+ if (unlikely(!rd_interrupts))
+ rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]);
+ }
+ }
+ rrdset_done(st_interrupts);
+ }
+ }
+
+ old_nintr = nintr;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_intr
+
+int do_vm_stats_sys_v_intr(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int int_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_intr", mib, int_number))) {
+ error("DISABLED: system.dev_intr chart");
+ error("DISABLED: vm.stats.sys.v_intr module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "dev_intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Device Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_intr",
+ NETDATA_CHART_PRIO_SYSTEM_DEV_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, int_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_soft
+
+int do_vm_stats_sys_v_soft(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int soft_intr_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_soft", mib, soft_intr_number))) {
+ error("DISABLED: system.dev_intr chart");
+ error("DISABLED: vm.stats.sys.v_soft module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "soft_intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Software Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_soft",
+ NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, soft_intr_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_swtch
+
+int do_vm_stats_sys_v_swtch(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int ctxt_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_swtch", mib, ctxt_number))) {
+ error("DISABLED: system.ctxt chart");
+ error("DISABLED: vm.stats.sys.v_swtch module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "ctxt",
+ NULL,
+ "processes",
+ NULL,
+ "CPU Context Switches",
+ "context switches/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_swtch",
+ NETDATA_CHART_PRIO_SYSTEM_CTXT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, ctxt_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_forks
+
+int do_vm_stats_sys_v_forks(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int forks_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_forks", mib, forks_number))) {
+ error("DISABLED: system.forks chart");
+ error("DISABLED: vm.stats.sys.v_swtch module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "forks",
+ NULL,
+ "processes",
+ NULL,
+ "Started Processes",
+ "processes/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_swtch",
+ NETDATA_CHART_PRIO_SYSTEM_FORKS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, forks_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.swap_info
+
+int do_vm_swap_info(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[3] = {0, 0, 0};
+
+ if (unlikely(getsysctl_mib("vm.swap_info", mib, 2))) {
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else {
+ int i;
+ struct xswdev xsw;
+ struct total_xsw {
+ collected_number bytes_used;
+ collected_number bytes_total;
+ } total_xsw = {0, 0};
+
+ for (i = 0; ; i++) {
+ size_t size;
+
+ mib[2] = i;
+ size = sizeof(xsw);
+ if (unlikely(sysctl(mib, 3, &xsw, &size, NULL, 0) == -1 )) {
+ if (unlikely(errno != ENOENT)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", "vm.swap_info", strerror(errno));
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else {
+ if (unlikely(size != sizeof(xsw))) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", "vm.swap_info", (unsigned long)sizeof(xsw), (unsigned long)size);
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else break;
+ }
+ }
+ total_xsw.bytes_used += xsw.xsw_used;
+ total_xsw.bytes_total += xsw.xsw_nblks;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "swap",
+ NULL,
+ "swap",
+ NULL,
+ "System Swap",
+ "MiB",
+ "freebsd.plugin",
+ "vm.swap_info",
+ NETDATA_CHART_PRIO_SYSTEM_SWAP,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_free, total_xsw.bytes_total - total_xsw.bytes_used);
+ rrddim_set_by_pointer(st, rd_used, total_xsw.bytes_used);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// system.ram
+
+int do_system_ram(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0},
+ mib_cache_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, mib_free_count[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+ int vfs_bufspace_count;
+
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ static int mib_laundry_count[4] = {0, 0, 0, 0};
+#endif
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) ||
+#if __FreeBSD_version < 1200016
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cache_count", mib_cache_count, vmmeter_data.v_cache_count) ||
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_laundry_count", mib_laundry_count, vmmeter_data.v_laundry_count) ||
+#endif
+ GETSYSCTL_SIMPLE("vfs.bufspace", mib_vfs_bufspace, vfs_bufspace_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_free_count", mib_free_count, vmmeter_data.v_free_count))) {
+ error("DISABLED: system.ram chart");
+ error("DISABLED: system.ram module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL,
+ *rd_cache = NULL, *rd_buffers = NULL;
+
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ static RRDDIM *rd_laundry = NULL;
+#endif
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "ram",
+ NULL,
+ "ram",
+ NULL,
+ "System RAM",
+ "MiB",
+ "freebsd.plugin",
+ "system.ram",
+ NETDATA_CHART_PRIO_SYSTEM_RAM,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_active = rrddim_add(st, "active", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_inactive = rrddim_add(st, "inactive", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_wired = rrddim_add(st, "wired", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#if __FreeBSD_version < 1200016
+ rd_cache = rrddim_add(st, "cache", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ rd_laundry = rrddim_add(st, "laundry", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#endif
+ rd_buffers = rrddim_add(st, "buffers", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_free, vmmeter_data.v_free_count);
+ rrddim_set_by_pointer(st, rd_active, vmmeter_data.v_active_count);
+ rrddim_set_by_pointer(st, rd_inactive, vmmeter_data.v_inactive_count);
+ rrddim_set_by_pointer(st, rd_wired, vmmeter_data.v_wire_count);
+#if __FreeBSD_version < 1200016
+ rrddim_set_by_pointer(st, rd_cache, vmmeter_data.v_cache_count);
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ rrddim_set_by_pointer(st, rd_laundry, vmmeter_data.v_laundry_count);
+#endif
+ rrddim_set_by_pointer(st, rd_buffers, vfs_bufspace_count);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_swappgs
+
+int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_swappgsin[4] = {0, 0, 0, 0}, mib_swappgsout[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsin", mib_swappgsin, vmmeter_data.v_swappgsin) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsout", mib_swappgsout, vmmeter_data.v_swappgsout))) {
+ error("DISABLED: system.swapio chart");
+ error("DISABLED: vm.stats.vm.v_swappgs module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "swapio",
+ NULL,
+ "swap",
+ NULL,
+ "Swap I/O",
+ "KiB/s",
+ "freebsd.plugin",
+ "vm.stats.vm.v_swappgs",
+ NETDATA_CHART_PRIO_SYSTEM_SWAPIO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "in", NULL, system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "out", NULL, -system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, vmmeter_data.v_swappgsin);
+ rrddim_set_by_pointer(st, rd_out, vmmeter_data.v_swappgsout);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_pgfaults
+
+int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_vm_faults[4] = {0, 0, 0, 0}, mib_io_faults[4] = {0, 0, 0, 0}, mib_cow_faults[4] = {0, 0, 0, 0},
+ mib_cow_optim[4] = {0, 0, 0, 0}, mib_intrans[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_vm_faults", mib_vm_faults, vmmeter_data.v_vm_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_io_faults", mib_io_faults, vmmeter_data.v_io_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_faults", mib_cow_faults, vmmeter_data.v_cow_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_optim", mib_cow_optim, vmmeter_data.v_cow_optim) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_intrans", mib_intrans, vmmeter_data.v_intrans))) {
+ error("DISABLED: mem.pgfaults chart");
+ error("DISABLED: vm.stats.vm.v_pgfaults module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_memory = NULL, *rd_io_requiring = NULL, *rd_cow = NULL,
+ *rd_cow_optimized = NULL, *rd_in_transit = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem",
+ "pgfaults",
+ NULL,
+ "system",
+ NULL,
+ "Memory Page Faults",
+ "page faults/s",
+ "freebsd.plugin",
+ "vm.stats.vm.v_pgfaults",
+ NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_cow_optimized = rrddim_add(st, "cow_optimized", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_transit = rrddim_add(st, "in_transit", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_memory, vmmeter_data.v_vm_faults);
+ rrddim_set_by_pointer(st, rd_io_requiring, vmmeter_data.v_io_faults);
+ rrddim_set_by_pointer(st, rd_cow, vmmeter_data.v_cow_faults);
+ rrddim_set_by_pointer(st, rd_cow_optimized, vmmeter_data.v_cow_optim);
+ rrddim_set_by_pointer(st, rd_in_transit, vmmeter_data.v_intrans);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.sem
+
+int do_kern_ipc_sem(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_semmni[3] = {0, 0, 0};
+ struct ipc_sem {
+ int semmni;
+ collected_number sets;
+ collected_number semaphores;
+ } ipc_sem = {0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.semmni", mib_semmni, ipc_sem.semmni))) {
+ error("DISABLED: system.ipc_semaphores chart");
+ error("DISABLED: system.ipc_semaphore_arrays chart");
+ error("DISABLED: kern.ipc.sem module");
+ return 1;
+ } else {
+ static struct semid_kernel *ipc_sem_data = NULL;
+ static int old_semmni = 0;
+ static int mib_sema[3] = {0, 0, 0};
+
+ if (unlikely(ipc_sem.semmni != old_semmni)) {
+ ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+ old_semmni = ipc_sem.semmni;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) {
+ error("DISABLED: system.ipc_semaphores chart");
+ error("DISABLED: system.ipc_semaphore_arrays chart");
+ error("DISABLED: kern.ipc.sem module");
+ return 1;
+ } else {
+ int i;
+
+ for (i = 0; i < ipc_sem.semmni; i++) {
+ if (unlikely(ipc_sem_data[i].u.sem_perm.mode & SEM_ALLOC)) {
+ ipc_sem.sets += 1;
+ ipc_sem.semaphores += ipc_sem_data[i].u.sem_nsems;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_semaphores = NULL, *st_semaphore_arrays = NULL;
+ static RRDDIM *rd_semaphores = NULL, *rd_semaphore_arrays = NULL;
+
+ if (unlikely(!st_semaphores)) {
+ st_semaphores = rrdset_create_localhost(
+ "system",
+ "ipc_semaphores",
+ NULL,
+ "ipc semaphores",
+ NULL,
+ "IPC Semaphores",
+ "semaphores",
+ "freebsd.plugin",
+ "kern.ipc.sem",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_semaphores);
+
+ rrddim_set_by_pointer(st_semaphores, rd_semaphores, ipc_sem.semaphores);
+ rrdset_done(st_semaphores);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_semaphore_arrays)) {
+ st_semaphore_arrays = rrdset_create_localhost(
+ "system",
+ "ipc_semaphore_arrays",
+ NULL,
+ "ipc semaphores",
+ NULL,
+ "IPC Semaphore Arrays",
+ "arrays",
+ "freebsd.plugin",
+ "kern.ipc.sem",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_semaphore_arrays = rrddim_add(st_semaphore_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_semaphore_arrays);
+
+ rrddim_set_by_pointer(st_semaphore_arrays, rd_semaphore_arrays, ipc_sem.sets);
+ rrdset_done(st_semaphore_arrays);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.shm
+
+int do_kern_ipc_shm(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_shmmni[3] = {0, 0, 0};
+ struct ipc_shm {
+ u_long shmmni;
+ collected_number segs;
+ collected_number segsize;
+ } ipc_shm = {0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.shmmni", mib_shmmni, ipc_shm.shmmni))) {
+ error("DISABLED: system.ipc_shared_mem_segs chart");
+ error("DISABLED: system.ipc_shared_mem_size chart");
+ error("DISABLED: kern.ipc.shmmodule");
+ return 1;
+ } else {
+ static struct shmid_kernel *ipc_shm_data = NULL;
+ static u_long old_shmmni = 0;
+ static int mib_shmsegs[3] = {0, 0, 0};
+
+ if (unlikely(ipc_shm.shmmni != old_shmmni)) {
+ ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+ old_shmmni = ipc_shm.shmmni;
+ }
+ if (unlikely(
+ GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) {
+ error("DISABLED: system.ipc_shared_mem_segs chart");
+ error("DISABLED: system.ipc_shared_mem_size chart");
+ error("DISABLED: kern.ipc.shmmodule");
+ return 1;
+ } else {
+ unsigned long i;
+
+ for (i = 0; i < ipc_shm.shmmni; i++) {
+ if (unlikely(ipc_shm_data[i].u.shm_perm.mode & 0x0800)) {
+ ipc_shm.segs += 1;
+ ipc_shm.segsize += ipc_shm_data[i].u.shm_segsz;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_segs = NULL, *st_size = NULL;
+ static RRDDIM *rd_segments = NULL, *rd_allocated = NULL;
+
+ if (unlikely(!st_segs)) {
+ st_segs = rrdset_create_localhost(
+ "system",
+ "ipc_shared_mem_segs",
+ NULL,
+ "ipc shared memory",
+ NULL,
+ "IPC Shared Memory Segments",
+ "segments",
+ "freebsd.plugin",
+ "kern.ipc.shm",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_segments = rrddim_add(st_segs, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_segs);
+
+ rrddim_set_by_pointer(st_segs, rd_segments, ipc_shm.segs);
+ rrdset_done(st_segs);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_size)) {
+ st_size = rrdset_create_localhost(
+ "system",
+ "ipc_shared_mem_size",
+ NULL,
+ "ipc shared memory",
+ NULL,
+ "IPC Shared Memory Segments Size",
+ "KiB",
+ "freebsd.plugin",
+ "kern.ipc.shm",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_size);
+
+ rrddim_set_by_pointer(st_size, rd_allocated, ipc_shm.segsize);
+ rrdset_done(st_size);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.msq
+
+int do_kern_ipc_msq(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_msgmni[3] = {0, 0, 0};
+ struct ipc_msq {
+ int msgmni;
+ collected_number queues;
+ collected_number messages;
+ collected_number usedsize;
+ collected_number allocsize;
+ } ipc_msq = {0, 0, 0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.msgmni", mib_msgmni, ipc_msq.msgmni))) {
+ error("DISABLED: system.ipc_msq_queues chart");
+ error("DISABLED: system.ipc_msq_messages chart");
+ error("DISABLED: system.ipc_msq_size chart");
+ error("DISABLED: kern.ipc.msg module");
+ return 1;
+ } else {
+ static struct msqid_kernel *ipc_msq_data = NULL;
+ static int old_msgmni = 0;
+ static int mib_msqids[3] = {0, 0, 0};
+
+ if (unlikely(ipc_msq.msgmni != old_msgmni)) {
+ ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+ old_msgmni = ipc_msq.msgmni;
+ }
+ if (unlikely(
+ GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) {
+ error("DISABLED: system.ipc_msq_queues chart");
+ error("DISABLED: system.ipc_msq_messages chart");
+ error("DISABLED: system.ipc_msq_size chart");
+ error("DISABLED: kern.ipc.msg module");
+ return 1;
+ } else {
+ int i;
+
+ for (i = 0; i < ipc_msq.msgmni; i++) {
+ if (unlikely(ipc_msq_data[i].u.msg_qbytes != 0)) {
+ ipc_msq.queues += 1;
+ ipc_msq.messages += ipc_msq_data[i].u.msg_qnum;
+ ipc_msq.usedsize += ipc_msq_data[i].u.msg_cbytes;
+ ipc_msq.allocsize += ipc_msq_data[i].u.msg_qbytes;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_queues = NULL, *st_messages = NULL, *st_size = NULL;
+ static RRDDIM *rd_queues = NULL, *rd_messages = NULL, *rd_allocated = NULL, *rd_used = NULL;
+
+ if (unlikely(!st_queues)) {
+ st_queues = rrdset_create_localhost(
+ "system",
+ "ipc_msq_queues",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Number of IPC Message Queues",
+ "queues",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_queues = rrddim_add(st_queues, "queues", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_queues);
+
+ rrddim_set_by_pointer(st_queues, rd_queues, ipc_msq.queues);
+ rrdset_done(st_queues);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_messages)) {
+ st_messages = rrdset_create_localhost(
+ "system",
+ "ipc_msq_messages",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Number of Messages in IPC Message Queues",
+ "messages",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_messages = rrddim_add(st_messages, "messages", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_messages);
+
+ rrddim_set_by_pointer(st_messages, rd_messages, ipc_msq.messages);
+ rrdset_done(st_messages);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_size)) {
+ st_size = rrdset_create_localhost(
+ "system",
+ "ipc_msq_size",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Size of IPC Message Queues",
+ "bytes",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_size, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_size);
+
+ rrddim_set_by_pointer(st_size, rd_allocated, ipc_msq.allocsize);
+ rrddim_set_by_pointer(st_size, rd_used, ipc_msq.usedsize);
+ rrdset_done(st_size);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// uptime
+
+int do_uptime(int update_every, usec_t dt) {
+ (void)dt;
+ struct timespec up_time;
+
+ clock_gettime(CLOCK_UPTIME, &up_time);
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "uptime",
+ NULL,
+ "uptime",
+ NULL,
+ "System Uptime",
+ "seconds",
+ "freebsd.plugin",
+ "uptime",
+ NETDATA_CHART_PRIO_SYSTEM_UPTIME,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, up_time.tv_sec);
+ rrdset_done(st);
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.isr
+
+int do_net_isr(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_netisr = -1, do_netisr_per_core = -1;
+
+ if (unlikely(do_netisr == -1)) {
+ do_netisr = config_get_boolean("plugin:freebsd:net.isr", "netisr", 1);
+ do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1);
+ }
+
+ static struct netisr_stats {
+ collected_number dispatched;
+ collected_number hybrid_dispatched;
+ collected_number qdrops;
+ collected_number queued;
+ } *netisr_stats = NULL;
+
+ if (likely(do_netisr || do_netisr_per_core)) {
+ static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
+ size_t netisr_workstream_size = 0, netisr_work_size = 0;
+ static struct sysctl_netisr_workstream *netisr_workstream = NULL;
+ static struct sysctl_netisr_work *netisr_work = NULL;
+ unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
+ int common_error = 0;
+
+ if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) {
+ common_error = 1;
+ } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
+ common_error = 1;
+ } else {
+ static size_t old_netisr_workstream_size = 0;
+
+ num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream);
+ if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) {
+ netisr_workstream = reallocz(netisr_workstream,
+ num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+ old_netisr_workstream_size = netisr_workstream_size;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream,
+ num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){
+ common_error = 1;
+ } else {
+ static size_t old_netisr_work_size = 0;
+
+ num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work);
+ if (unlikely(netisr_work_size != old_netisr_work_size)) {
+ netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+ old_netisr_work_size = netisr_work_size;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work,
+ num_netisr_works * sizeof(struct sysctl_netisr_work)))){
+ common_error = 1;
+ }
+ }
+ }
+ if (unlikely(common_error)) {
+ do_netisr = 0;
+ error("DISABLED: system.softnet_stat chart");
+ do_netisr_per_core = 0;
+ error("DISABLED: system.cpuX_softnet_stat chart");
+ common_error = 0;
+ error("DISABLED: net.isr module");
+ return 1;
+ } else {
+ unsigned long i, n;
+ int j;
+ static int old_number_of_cpus = 0;
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ old_number_of_cpus = number_of_cpus;
+ }
+ memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ for (i = 0; i < num_netisr_workstreams; i++) {
+ for (n = 0; n < num_netisr_works; n++) {
+ if (netisr_workstream[i].snws_wsid == netisr_work[n].snw_wsid) {
+ netisr_stats[netisr_workstream[i].snws_cpu].dispatched += netisr_work[n].snw_dispatched;
+ netisr_stats[netisr_workstream[i].snws_cpu].hybrid_dispatched += netisr_work[n].snw_hybrid_dispatched;
+ netisr_stats[netisr_workstream[i].snws_cpu].qdrops += netisr_work[n].snw_qdrops;
+ netisr_stats[netisr_workstream[i].snws_cpu].queued += netisr_work[n].snw_queued;
+ }
+ }
+ }
+ for (j = 0; j < number_of_cpus; j++) {
+ netisr_stats[number_of_cpus].dispatched += netisr_stats[j].dispatched;
+ netisr_stats[number_of_cpus].hybrid_dispatched += netisr_stats[j].hybrid_dispatched;
+ netisr_stats[number_of_cpus].qdrops += netisr_stats[j].qdrops;
+ netisr_stats[number_of_cpus].queued += netisr_stats[j].queued;
+ }
+ }
+ } else {
+ error("DISABLED: net.isr module");
+ return 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_netisr)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_dispatched = NULL, *rd_hybrid_dispatched = NULL, *rd_qdrops = NULL, *rd_queued = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "softnet_stat",
+ NULL,
+ "softnet_stat",
+ NULL,
+ "System softnet_stat",
+ "events/s",
+ "freebsd.plugin",
+ "net.isr",
+ NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_dispatched = rrddim_add(st, "dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hybrid_dispatched = rrddim_add(st, "hybrid_dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_qdrops = rrddim_add(st, "qdrops", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_queued = rrddim_add(st, "queued", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_dispatched, netisr_stats[number_of_cpus].dispatched);
+ rrddim_set_by_pointer(st, rd_hybrid_dispatched, netisr_stats[number_of_cpus].hybrid_dispatched);
+ rrddim_set_by_pointer(st, rd_qdrops, netisr_stats[number_of_cpus].qdrops);
+ rrddim_set_by_pointer(st, rd_queued, netisr_stats[number_of_cpus].queued);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_netisr_per_core)) {
+ static struct softnet_chart {
+ char netisr_cpuid[MAX_INT_DIGITS + 17];
+ RRDSET *st;
+ RRDDIM *rd_dispatched;
+ RRDDIM *rd_hybrid_dispatched;
+ RRDDIM *rd_qdrops;
+ RRDDIM *rd_queued;
+ } *all_softnet_charts = NULL;
+ static int old_number_of_cpus = 0;
+ int i;
+
+ if(unlikely(number_of_cpus > old_number_of_cpus)) {
+ all_softnet_charts = reallocz(all_softnet_charts, sizeof(struct softnet_chart) * number_of_cpus);
+ memset(&all_softnet_charts[old_number_of_cpus], 0, sizeof(struct softnet_chart) * (number_of_cpus - old_number_of_cpus));
+ old_number_of_cpus = number_of_cpus;
+ }
+
+ for (i = 0; i < number_of_cpus ;i++) {
+ snprintfz(all_softnet_charts[i].netisr_cpuid, MAX_INT_DIGITS + 17, "cpu%d_softnet_stat", i);
+
+ if (unlikely(!all_softnet_charts[i].st)) {
+ all_softnet_charts[i].st = rrdset_create_localhost(
+ "cpu",
+ all_softnet_charts[i].netisr_cpuid,
+ NULL,
+ "softnet_stat",
+ NULL,
+ "Per CPU netisr statistics",
+ "events/s",
+ "freebsd.plugin",
+ "net.isr",
+ NETDATA_CHART_PRIO_SOFTNET_PER_CORE + i,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ all_softnet_charts[i].rd_dispatched = rrddim_add(all_softnet_charts[i].st, "dispatched",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_hybrid_dispatched = rrddim_add(all_softnet_charts[i].st, "hybrid_dispatched",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_qdrops = rrddim_add(all_softnet_charts[i].st, "qdrops",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_queued = rrddim_add(all_softnet_charts[i].st, "queued",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(all_softnet_charts[i].st);
+
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_dispatched,
+ netisr_stats[i].dispatched);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_hybrid_dispatched,
+ netisr_stats[i].hybrid_dispatched);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_qdrops,
+ netisr_stats[i].qdrops);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_queued,
+ netisr_stats[i].queued);
+ rrdset_done(all_softnet_charts[i].st);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.tcp.states
+
+int do_net_inet_tcp_states(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ uint64_t tcps_states[TCP_NSTATES];
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.states", mib, tcps_states))) {
+ error("DISABLED: ipv4.tcpsock chart");
+ error("DISABLED: net.inet.tcp.states module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpsock",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Connections",
+ "active connections",
+ "freebsd.plugin",
+ "net.inet.tcp.states",
+ 2500,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, tcps_states[TCPS_ESTABLISHED]);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.tcp.stats
+
+int do_net_inet_tcp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcpext_connaborts = -1, do_tcpext_ofo = -1,
+ do_tcpext_syncookies = -1, do_tcpext_listen = -1, do_ecn = -1;
+
+ if (unlikely(do_tcp_packets == -1)) {
+ do_tcp_packets = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP packets", 1);
+ do_tcp_errors = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP errors", 1);
+ do_tcp_handshake = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP handshake issues", 1);
+ do_tcpext_connaborts = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP connection aborts",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP out-of-order queue",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syncookies = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP SYN cookies",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_listen = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP listen issues",
+ CONFIG_BOOLEAN_AUTO);
+ do_ecn = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "ECN packets",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo ||
+ do_tcpext_syncookies || do_tcpext_listen || do_ecn)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct tcpstat tcpstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.stats", mib, tcpstat))) {
+ do_tcp_packets = 0;
+ error("DISABLED: ipv4.tcppackets chart");
+ do_tcp_errors = 0;
+ error("DISABLED: ipv4.tcperrors chart");
+ do_tcp_handshake = 0;
+ error("DISABLED: ipv4.tcphandshake chart");
+ do_tcpext_connaborts = 0;
+ error("DISABLED: ipv4.tcpconnaborts chart");
+ do_tcpext_ofo = 0;
+ error("DISABLED: ipv4.tcpofo chart");
+ do_tcpext_syncookies = 0;
+ error("DISABLED: ipv4.tcpsyncookies chart");
+ do_tcpext_listen = 0;
+ error("DISABLED: ipv4.tcplistenissues chart");
+ do_ecn = 0;
+ error("DISABLED: ipv4.ecnpkts chart");
+ error("DISABLED: net.inet.tcp.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_segs = NULL, *rd_out_segs = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcppackets",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2600,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_segs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_segs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_segs, tcpstat.tcps_rcvtotal);
+ rrddim_set_by_pointer(st, rd_out_segs, tcpstat.tcps_sndtotal);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errs = NULL, *rd_in_csum_errs = NULL, *rd_retrans_segs = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcperrors",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+#if __FreeBSD__ >= 11
+ rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvreassfull +
+ tcpstat.tcps_rcvshort);
+#else
+ rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
+#endif
+ rrddim_set_by_pointer(st, rd_in_csum_errs, tcpstat.tcps_rcvbadsum);
+ rrddim_set_by_pointer(st, rd_retrans_segs, tcpstat.tcps_sndrexmitpack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_handshake)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_estab_resets = NULL, *rd_active_opens = NULL, *rd_passive_opens = NULL,
+ *rd_attempt_fails = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcphandshake",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Handshake Issues",
+ "events/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2900,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_attempt_fails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_estab_resets, tcpstat.tcps_drops);
+ rrddim_set_by_pointer(st, rd_active_opens, tcpstat.tcps_connattempt);
+ rrddim_set_by_pointer(st, rd_passive_opens, tcpstat.tcps_accepts);
+ rrddim_set_by_pointer(st, rd_attempt_fails, tcpstat.tcps_conndrops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_rcvpackafterwin ||
+ tcpstat.tcps_rcvafterclose ||
+ tcpstat.tcps_rcvmemdrop ||
+ tcpstat.tcps_persistdrop ||
+ tcpstat.tcps_finwait2_drops ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_on_data = NULL, *rd_on_close = NULL, *rd_on_memory = NULL,
+ *rd_on_timeout = NULL, *rd_on_linger = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpconnaborts",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Connection Aborts",
+ "connections/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_on_data = rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_close = rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_memory = rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_timeout = rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_linger = rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_on_data, tcpstat.tcps_rcvpackafterwin);
+ rrddim_set_by_pointer(st, rd_on_close, tcpstat.tcps_rcvafterclose);
+ rrddim_set_by_pointer(st, rd_on_memory, tcpstat.tcps_rcvmemdrop);
+ rrddim_set_by_pointer(st, rd_on_timeout, tcpstat.tcps_persistdrop);
+ rrddim_set_by_pointer(st, rd_on_linger, tcpstat.tcps_finwait2_drops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_rcvoopack ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ofo_queue = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpofo",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Out-Of-Order Queue",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3050,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_ofo_queue = rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ofo_queue, tcpstat.tcps_rcvoopack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_sc_sendcookie ||
+ tcpstat.tcps_sc_recvcookie ||
+ tcpstat.tcps_sc_zonefail ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_syncookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_recv = NULL, *rd_send = NULL, *rd_failed = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpsyncookies",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP SYN Cookies",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3100,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_recv = rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_send = rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_recv, tcpstat.tcps_sc_recvcookie);
+ rrddim_set_by_pointer(st, rd_send, tcpstat.tcps_sc_sendcookie);
+ rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_listendrop ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_listen = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_listen = NULL;
+ static RRDDIM *rd_overflows = NULL;
+
+ if(unlikely(!st_listen)) {
+
+ st_listen = rrdset_create_localhost(
+ "ipv4",
+ "tcplistenissues",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Listen Socket Issues",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3015,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_listen);
+
+ rrddim_set_by_pointer(st_listen, rd_overflows, tcpstat.tcps_listendrop);
+
+ rrdset_done(st_listen);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_ecn_ce ||
+ tcpstat.tcps_ecn_ect0 ||
+ tcpstat.tcps_ecn_ect1 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ce = NULL, *rd_no_ect = NULL, *rd_ect0 = NULL, *rd_ect1 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "ecnpkts",
+ NULL,
+ "ecn",
+ NULL,
+ "IPv4 ECN Statistics",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 8700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ce = rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_no_ect = rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ect0 = rrddim_add(st, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ect1 = rrddim_add(st, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ce, tcpstat.tcps_ecn_ce);
+ rrddim_set_by_pointer(st, rd_no_ect, tcpstat.tcps_ecn_ce - (tcpstat.tcps_ecn_ect0 +
+ tcpstat.tcps_ecn_ect1));
+ rrddim_set_by_pointer(st, rd_ect0, tcpstat.tcps_ecn_ect0);
+ rrddim_set_by_pointer(st, rd_ect1, tcpstat.tcps_ecn_ect1);
+ rrdset_done(st);
+ }
+
+ }
+ } else {
+ error("DISABLED: net.inet.tcp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.udp.stats
+
+int do_net_inet_udp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_udp_packets = -1, do_udp_errors = -1;
+
+ if (unlikely(do_udp_packets == -1)) {
+ do_udp_packets = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP packets", 1);
+ do_udp_errors = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP errors", 1);
+ }
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if (likely(do_udp_packets || do_udp_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct udpstat udpstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.udp.stats", mib, udpstat))) {
+ do_udp_packets = 0;
+ error("DISABLED: ipv4.udppackets chart");
+ do_udp_errors = 0;
+ error("DISABLED: ipv4.udperrors chart");
+ error("DISABLED: net.inet.udp.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "udppackets",
+ NULL,
+ "udp",
+ NULL,
+ "IPv4 UDP Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.udp.stats",
+ 2601,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, udpstat.udps_ipackets);
+ rrddim_set_by_pointer(st, rd_out, udpstat.udps_opackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errors = NULL, *rd_no_ports = NULL, *rd_recv_buf_errors = NULL,
+ *rd_in_csum_errors = NULL, *rd_ignored_multi = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "udperrors",
+ NULL,
+ "udp",
+ NULL,
+ "IPv4 UDP Errors",
+ "events/s",
+ "freebsd.plugin",
+ "net.inet.udp.stats",
+ 2701,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ignored_multi = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_errors, udpstat.udps_hdrops + udpstat.udps_badlen);
+ rrddim_set_by_pointer(st, rd_no_ports, udpstat.udps_noport);
+ rrddim_set_by_pointer(st, rd_recv_buf_errors, udpstat.udps_fullsock);
+ rrddim_set_by_pointer(st, rd_in_csum_errors, udpstat.udps_badsum + udpstat.udps_nosum);
+ rrddim_set_by_pointer(st, rd_ignored_multi, udpstat.udps_filtermcast);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.udp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.icmp.stats
+
+int do_net_inet_icmp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_icmp_packets = -1, do_icmp_errors = -1, do_icmpmsg = -1;
+
+ if (unlikely(do_icmp_packets == -1)) {
+ do_icmp_packets = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP packets", 1);
+ do_icmp_errors = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP errors", 1);
+ do_icmpmsg = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP messages", 1);
+ }
+
+ if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct icmpstat icmpstat;
+ struct icmp_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp_total = {0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.icmp.stats", mib, icmpstat))) {
+ do_icmp_packets = 0;
+ error("DISABLED: ipv4.icmp chart");
+ do_icmp_errors = 0;
+ error("DISABLED: ipv4.icmp_errors chart");
+ do_icmpmsg = 0;
+ error("DISABLED: ipv4.icmpmsg chart");
+ error("DISABLED: net.inet.icmp.stats module");
+ return 1;
+ } else {
+ int i;
+
+ for (i = 0; i <= ICMP_MAXTYPE; i++) {
+ icmp_total.msgs_in += icmpstat.icps_inhist[i];
+ icmp_total.msgs_out += icmpstat.icps_outhist[i];
+ }
+ icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2602
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmp_total.msgs_in);
+ rrddim_set_by_pointer(st, rd_out, icmp_total.msgs_out);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_csum = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2603
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmpstat.icps_badcode + icmpstat.icps_badlen +
+ icmpstat.icps_checksum + icmpstat.icps_tooshort);
+ rrddim_set_by_pointer(st, rd_out, icmpstat.icps_error);
+ rrddim_set_by_pointer(st, rd_in_csum, icmpstat.icps_checksum);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmpmsg)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2604
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_reps = rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_reps, icmpstat.icps_inhist[ICMP_ECHOREPLY]);
+ rrddim_set_by_pointer(st, rd_out_reps, icmpstat.icps_outhist[ICMP_ECHOREPLY]);
+ rrddim_set_by_pointer(st, rd_in, icmpstat.icps_inhist[ICMP_ECHO]);
+ rrddim_set_by_pointer(st, rd_out, icmpstat.icps_outhist[ICMP_ECHO]);
+
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.icmp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.ip.stats
+
+int do_net_inet_ip_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1;
+
+ if (unlikely(do_ip_packets == -1)) {
+ do_ip_packets = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 packets", 1);
+ do_ip_fragsout = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments sent", 1);
+ do_ip_fragsin = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments assembly", 1);
+ do_ip_errors = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 errors", 1);
+ }
+
+ // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
+ if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct ipstat ipstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.ip.stats", mib, ipstat))) {
+ do_ip_packets = 0;
+ error("DISABLED: ipv4.packets chart");
+ do_ip_fragsout = 0;
+ error("DISABLED: ipv4.fragsout chart");
+ do_ip_fragsin = 0;
+ error("DISABLED: ipv4.fragsin chart");
+ do_ip_errors = 0;
+ error("DISABLED: ipv4.errors chart");
+ error("DISABLED: net.inet.ip.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_receives = NULL, *rd_out_requests = NULL, *rd_forward_datagrams = NULL,
+ *rd_in_delivers = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "packets",
+ NULL,
+ "packets",
+ NULL,
+ "IPv4 Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_receives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_requests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forward_datagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_delivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_receives, ipstat.ips_total);
+ rrddim_set_by_pointer(st, rd_out_requests, ipstat.ips_localout);
+ rrddim_set_by_pointer(st, rd_forward_datagrams, ipstat.ips_forward);
+ rrddim_set_by_pointer(st, rd_in_delivers, ipstat.ips_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsout)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_fails = NULL, *rd_created = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "fragsout",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv4 Fragments Sent",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragmented);
+ rrddim_set_by_pointer(st, rd_fails, ipstat.ips_cantfrag);
+ rrddim_set_by_pointer(st, rd_created, ipstat.ips_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsin)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "fragsin",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv4 Fragments Reassembly",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3011,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragments);
+ rrddim_set_by_pointer(st, rd_failed, ipstat.ips_fragdropped);
+ rrddim_set_by_pointer(st, rd_all, ipstat.ips_reassembled);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
+ *rd_in_hdr_errors = NULL, *rd_out_no_routes = NULL,
+ *rd_in_addr_errors = NULL, *rd_in_unknown_protos = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "errors",
+ NULL,
+ "errors",
+ NULL,
+ "IPv4 Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_unknown_protos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_discards, ipstat.ips_badsum + ipstat.ips_tooshort +
+ ipstat.ips_toosmall + ipstat.ips_toolong);
+ rrddim_set_by_pointer(st, rd_out_discards, ipstat.ips_odropped);
+ rrddim_set_by_pointer(st, rd_in_hdr_errors, ipstat.ips_badhlen + ipstat.ips_badlen +
+ ipstat.ips_badoptions + ipstat.ips_badvers);
+ rrddim_set_by_pointer(st, rd_out_no_routes, ipstat.ips_noroute);
+ rrddim_set_by_pointer(st, rd_in_addr_errors, ipstat.ips_badaddr);
+ rrddim_set_by_pointer(st, rd_in_unknown_protos, ipstat.ips_noproto);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.ip.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet6.ip6.stats
+
+int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1;
+
+ if (unlikely(do_ip6_packets == -1)) {
+ do_ip6_packets = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 packets",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsout = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments sent",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsin = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments assembly",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 errors",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct ip6stat ip6stat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet6.ip6.stats", mib, ip6stat))) {
+ do_ip6_packets = 0;
+ error("DISABLED: ipv6.packets chart");
+ do_ip6_fragsout = 0;
+ error("DISABLED: ipv6.fragsout chart");
+ do_ip6_fragsin = 0;
+ error("DISABLED: ipv6.fragsin chart");
+ do_ip6_errors = 0;
+ error("DISABLED: ipv6.errors chart");
+ error("DISABLED: net.inet6.ip6.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_localout ||
+ ip6stat.ip6s_total ||
+ ip6stat.ip6s_forward ||
+ ip6stat.ip6s_delivered ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_forwarded = NULL, *rd_delivers = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "packets",
+ NULL,
+ "packets",
+ NULL,
+ "IPv6 Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forwarded = rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delivers = rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_sent, ip6stat.ip6s_localout);
+ rrddim_set_by_pointer(st, rd_received, ip6stat.ip6s_total);
+ rrddim_set_by_pointer(st, rd_forwarded, ip6stat.ip6s_forward);
+ rrddim_set_by_pointer(st, rd_delivers, ip6stat.ip6s_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_fragmented ||
+ ip6stat.ip6s_cantfrag ||
+ ip6stat.ip6s_ofragments ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_fragsout = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "fragsout",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv6 Fragments Sent",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_fragmented);
+ rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_cantfrag);
+ rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_reassembled ||
+ ip6stat.ip6s_fragdropped ||
+ ip6stat.ip6s_fragtimeout ||
+ ip6stat.ip6s_fragments ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_timeout = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "fragsin",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv6 Fragments Reassembly",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3011,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_reassembled);
+ rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_fragdropped);
+ rrddim_set_by_pointer(st, rd_timeout, ip6stat.ip6s_fragtimeout);
+ rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_fragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_toosmall ||
+ ip6stat.ip6s_odropped ||
+ ip6stat.ip6s_badoptions ||
+ ip6stat.ip6s_badvers ||
+ ip6stat.ip6s_exthdrtoolong ||
+ ip6stat.ip6s_sources_none ||
+ ip6stat.ip6s_tooshort ||
+ ip6stat.ip6s_cantforward ||
+ ip6stat.ip6s_noroute ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
+ *rd_in_hdr_errors = NULL, *rd_in_addr_errors = NULL, *rd_in_truncated_pkts = NULL,
+ *rd_in_no_routes = NULL, *rd_out_no_routes = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "errors",
+ NULL,
+ "errors",
+ NULL,
+ "IPv6 Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_truncated_pkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_no_routes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_discards, ip6stat.ip6s_toosmall);
+ rrddim_set_by_pointer(st, rd_out_discards, ip6stat.ip6s_odropped);
+ rrddim_set_by_pointer(st, rd_in_hdr_errors, ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers +
+ ip6stat.ip6s_exthdrtoolong);
+ rrddim_set_by_pointer(st, rd_in_addr_errors, ip6stat.ip6s_sources_none);
+ rrddim_set_by_pointer(st, rd_in_truncated_pkts, ip6stat.ip6s_tooshort);
+ rrddim_set_by_pointer(st, rd_in_no_routes, ip6stat.ip6s_cantforward);
+ rrddim_set_by_pointer(st, rd_out_no_routes, ip6stat.ip6s_noroute);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet6.ip6.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet6.icmp6.stats
+
+int do_net_inet6_icmp6_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, do_icmp6_router = -1,
+ do_icmp6_neighbor = -1, do_icmp6_types = -1;
+
+ if (unlikely(do_icmp6 == -1)) {
+ do_icmp6 = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_redir = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp redirects",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp errors",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_echos = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp echos",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_router = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp router",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_neighbor = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp neighbor",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_types = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp types",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct icmp6stat icmp6stat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet6.icmp6.stats", mib, icmp6stat))) {
+ do_icmp6 = 0;
+ error("DISABLED: ipv6.icmp chart");
+ do_icmp6_redir = 0;
+ error("DISABLED: ipv6.icmpredir chart");
+ do_icmp6_errors = 0;
+ error("DISABLED: ipv6.icmperrors chart");
+ do_icmp6_echos = 0;
+ error("DISABLED: ipv6.icmpechos chart");
+ do_icmp6_router = 0;
+ error("DISABLED: ipv6.icmprouter chart");
+ do_icmp6_neighbor = 0;
+ error("DISABLED: ipv6.icmpneighbor chart");
+ do_icmp6_types = 0;
+ error("DISABLED: ipv6.icmptypes chart");
+ error("DISABLED: net.inet6.icmp6.stats module");
+ return 1;
+ } else {
+ int i;
+ struct icmp6_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp6_total = {0, 0};
+
+ for (i = 0; i <= ICMP6_MAXTYPE; i++) {
+ icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
+ icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
+ }
+ icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO &&
+ (icmp6_total.msgs_in ||
+ icmp6_total.msgs_out ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6 = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmp",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, icmp6_total.msgs_out);
+ rrddim_set_by_pointer(st, rd_sent, icmp6_total.msgs_in);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_REDIRECT] ||
+ icmp6stat.icp6s_outhist[ND_REDIRECT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_redir = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpredir",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Redirects",
+ "redirects/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10050,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, icmp6stat.icp6s_outhist[ND_REDIRECT]);
+ rrddim_set_by_pointer(st, rd_sent, icmp6stat.icp6s_inhist[ND_REDIRECT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_badcode ||
+ icmp6stat.icp6s_badlen ||
+ icmp6stat.icp6s_checksum ||
+ icmp6stat.icp6s_tooshort ||
+ icmp6stat.icp6s_error ||
+ icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
+ icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errors = NULL, *rd_out_errors = NULL, *rd_in_csum_errors = NULL,
+ *rd_in_dest_unreachs = NULL, *rd_in_pkt_too_bigs = NULL, *rd_in_time_excds = NULL,
+ *rd_in_parm_problems = NULL, *rd_out_dest_unreachs = NULL, *rd_out_time_excds = NULL,
+ *rd_out_parm_problems = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmperrors",
+ NULL, "icmp",
+ NULL,
+ "IPv6 ICMP Errors",
+ "errors/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10100,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_errors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_dest_unreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_pkt_too_bigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_time_excds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_parm_problems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_dest_unreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_time_excds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_parm_problems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_errors, icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen +
+ icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
+ rrddim_set_by_pointer(st, rd_out_errors, icmp6stat.icp6s_error);
+ rrddim_set_by_pointer(st, rd_in_csum_errors, icmp6stat.icp6s_checksum);
+ rrddim_set_by_pointer(st, rd_in_dest_unreachs, icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
+ rrddim_set_by_pointer(st, rd_in_pkt_too_bigs, icmp6stat.icp6s_badlen);
+ rrddim_set_by_pointer(st, rd_in_time_excds, icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set_by_pointer(st, rd_in_parm_problems, icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
+ rrddim_set_by_pointer(st, rd_out_dest_unreachs, icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
+ rrddim_set_by_pointer(st, rd_out_time_excds, icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set_by_pointer(st, rd_out_parm_problems, icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_echos = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_replies = NULL, *rd_out_replies = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpechos",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Echo",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10200,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_replies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_replies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set_by_pointer(st, rd_out, icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set_by_pointer(st, rd_in_replies, icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
+ rrddim_set_by_pointer(st, rd_out_replies, icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_router = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
+ *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmprouter",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 Router Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10400,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
+ rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
+ *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpneighbor",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 Neighbor Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10500,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
+ rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[1] ||
+ icmp6stat.icp6s_inhist[128] ||
+ icmp6stat.icp6s_inhist[129] ||
+ icmp6stat.icp6s_inhist[136] ||
+ icmp6stat.icp6s_outhist[1] ||
+ icmp6stat.icp6s_outhist[128] ||
+ icmp6stat.icp6s_outhist[129] ||
+ icmp6stat.icp6s_outhist[133] ||
+ icmp6stat.icp6s_outhist[135] ||
+ icmp6stat.icp6s_outhist[136] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_types = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_1 = NULL, *rd_in_128 = NULL, *rd_in_129 = NULL, *rd_in_136 = NULL,
+ *rd_out_1 = NULL, *rd_out_128 = NULL, *rd_out_129 = NULL, *rd_out_133 = NULL,
+ *rd_out_135 = NULL, *rd_out_143 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmptypes",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Types",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_1, icmp6stat.icp6s_inhist[1]);
+ rrddim_set_by_pointer(st, rd_in_128, icmp6stat.icp6s_inhist[128]);
+ rrddim_set_by_pointer(st, rd_in_129, icmp6stat.icp6s_inhist[129]);
+ rrddim_set_by_pointer(st, rd_in_136, icmp6stat.icp6s_inhist[136]);
+ rrddim_set_by_pointer(st, rd_out_1, icmp6stat.icp6s_outhist[1]);
+ rrddim_set_by_pointer(st, rd_out_128, icmp6stat.icp6s_outhist[128]);
+ rrddim_set_by_pointer(st, rd_out_129, icmp6stat.icp6s_outhist[129]);
+ rrddim_set_by_pointer(st, rd_out_133, icmp6stat.icp6s_outhist[133]);
+ rrddim_set_by_pointer(st, rd_out_135, icmp6stat.icp6s_outhist[135]);
+ rrddim_set_by_pointer(st, rd_out_143, icmp6stat.icp6s_outhist[143]);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet6.icmp6.stats module");
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c
new file mode 100644
index 0000000..bee8395
--- /dev/null
+++ b/collectors/freebsd.plugin/plugin_freebsd.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+static struct freebsd_module {
+ const char *name;
+ const char *dim;
+
+ int enabled;
+
+ int (*func)(int update_every, usec_t dt);
+ usec_t duration;
+
+ RRDDIM *rd;
+
+} freebsd_modules[] = {
+
+ // system metrics
+ { .name = "kern.cp_time", .dim = "cp_time", .enabled = 1, .func = do_kern_cp_time },
+ { .name = "vm.loadavg", .dim = "loadavg", .enabled = 1, .func = do_vm_loadavg },
+ { .name = "system.ram", .dim = "system_ram", .enabled = 1, .func = do_system_ram },
+ { .name = "vm.swap_info", .dim = "swap", .enabled = 1, .func = do_vm_swap_info },
+ { .name = "vm.stats.vm.v_swappgs", .dim = "swap_io", .enabled = 1, .func = do_vm_stats_sys_v_swappgs },
+ { .name = "vm.vmtotal", .dim = "vmtotal", .enabled = 1, .func = do_vm_vmtotal },
+ { .name = "vm.stats.vm.v_forks", .dim = "forks", .enabled = 1, .func = do_vm_stats_sys_v_forks },
+ { .name = "vm.stats.sys.v_swtch", .dim = "context_swtch", .enabled = 1, .func = do_vm_stats_sys_v_swtch },
+ { .name = "hw.intrcnt", .dim = "hw_intr", .enabled = 1, .func = do_hw_intcnt },
+ { .name = "vm.stats.sys.v_intr", .dim = "dev_intr", .enabled = 1, .func = do_vm_stats_sys_v_intr },
+ { .name = "vm.stats.sys.v_soft", .dim = "soft_intr", .enabled = 1, .func = do_vm_stats_sys_v_soft },
+ { .name = "net.isr", .dim = "net_isr", .enabled = 1, .func = do_net_isr },
+ { .name = "kern.ipc.sem", .dim = "semaphores", .enabled = 1, .func = do_kern_ipc_sem },
+ { .name = "kern.ipc.shm", .dim = "shared_memory", .enabled = 1, .func = do_kern_ipc_shm },
+ { .name = "kern.ipc.msq", .dim = "message_queues", .enabled = 1, .func = do_kern_ipc_msq },
+ { .name = "uptime", .dim = "uptime", .enabled = 1, .func = do_uptime },
+
+ // memory metrics
+ { .name = "vm.stats.vm.v_pgfaults", .dim = "pgfaults", .enabled = 1, .func = do_vm_stats_sys_v_pgfaults },
+
+ // CPU metrics
+ { .name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times },
+ { .name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature },
+ { .name = "dev.cpu.0.freq", .dim = "cpu_frequency", .enabled = 1, .func = do_dev_cpu_0_freq },
+
+ // disk metrics
+ { .name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat },
+ { .name = "getmntinfo", .dim = "getmntinfo", .enabled = 1, .func = do_getmntinfo },
+
+ // network metrics
+ { .name = "net.inet.tcp.states", .dim = "tcp_states", .enabled = 1, .func = do_net_inet_tcp_states },
+ { .name = "net.inet.tcp.stats", .dim = "tcp_stats", .enabled = 1, .func = do_net_inet_tcp_stats },
+ { .name = "net.inet.udp.stats", .dim = "udp_stats", .enabled = 1, .func = do_net_inet_udp_stats },
+ { .name = "net.inet.icmp.stats", .dim = "icmp_stats", .enabled = 1, .func = do_net_inet_icmp_stats },
+ { .name = "net.inet.ip.stats", .dim = "ip_stats", .enabled = 1, .func = do_net_inet_ip_stats },
+ { .name = "net.inet6.ip6.stats", .dim = "ip6_stats", .enabled = 1, .func = do_net_inet6_ip6_stats },
+ { .name = "net.inet6.icmp6.stats", .dim = "icmp6_stats", .enabled = 1, .func = do_net_inet6_icmp6_stats },
+
+ // network interfaces metrics
+ { .name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs },
+
+ // ZFS metrics
+ { .name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats },
+ { .name = "kstat.zfs.misc.zio_trim", .dim = "trim", .enabled = 1, .func = do_kstat_zfs_misc_zio_trim },
+
+ // ipfw metrics
+ { .name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw },
+
+ // the terminator of this array
+ { .name = NULL, .dim = NULL, .enabled = 0, .func = NULL }
+};
+
+static void freebsd_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *freebsd_main(void *ptr) {
+ netdata_thread_cleanup_push(freebsd_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:freebsd", "netdata server resources", 1);
+
+ // initialize FreeBSD plugin
+ if (freebsd_plugin_init())
+ netdata_cleanup_and_exit(1);
+
+ // check the enabled status for each module
+ int i;
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+
+ pm->enabled = config_get_boolean("plugin:freebsd", pm->name, pm->enabled);
+ pm->duration = 0ULL;
+ pm->rd = NULL;
+ }
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t duration = 0ULL;
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ debug(D_PROCNETDEV_LOOP, "FREEBSD calling %s.", pm->name);
+
+ pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
+ pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration;
+ duration += pm->duration;
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_find_active_bytype_localhost("netdata", "plugin_freebsd_modules");
+
+ if(!st) {
+ st = rrdset_create_localhost(
+ "netdata"
+ , "plugin_freebsd_modules"
+ , NULL
+ , "freebsd"
+ , NULL
+ , "NetData FreeBSD Plugin Modules Durations"
+ , "milliseconds/run"
+ , "netdata"
+ , "stats"
+ , 132001
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ }
+ else rrdset_next(st);
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ rrddim_set_by_pointer(st, pm->rd, pm->duration);
+ }
+ rrdset_done(st);
+
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h
new file mode 100644
index 0000000..ab46080
--- /dev/null
+++ b/collectors/freebsd.plugin/plugin_freebsd.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_FREEBSD_H
+#define NETDATA_PLUGIN_FREEBSD_H 1
+
+#include "daemon/common.h"
+
+#if (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD \
+ { \
+ .name = "PLUGIN[freebsd]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "freebsd", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = freebsd_main \
+ },
+
+
+#include <sys/sysctl.h>
+
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+#define MAX_INT_DIGITS 10 // maximum number of digits for int
+
+void *freebsd_main(void *ptr);
+
+extern int freebsd_plugin_init();
+
+extern int do_vm_loadavg(int update_every, usec_t dt);
+extern int do_vm_vmtotal(int update_every, usec_t dt);
+extern int do_kern_cp_time(int update_every, usec_t dt);
+extern int do_kern_cp_times(int update_every, usec_t dt);
+extern int do_dev_cpu_temperature(int update_every, usec_t dt);
+extern int do_dev_cpu_0_freq(int update_every, usec_t dt);
+extern int do_hw_intcnt(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt);
+extern int do_vm_swap_info(int update_every, usec_t dt);
+extern int do_system_ram(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt);
+extern int do_kern_ipc_sem(int update_every, usec_t dt);
+extern int do_kern_ipc_shm(int update_every, usec_t dt);
+extern int do_kern_ipc_msq(int update_every, usec_t dt);
+extern int do_uptime(int update_every, usec_t dt);
+extern int do_net_isr(int update_every, usec_t dt);
+extern int do_net_inet_tcp_states(int update_every, usec_t dt);
+extern int do_net_inet_tcp_stats(int update_every, usec_t dt);
+extern int do_net_inet_udp_stats(int update_every, usec_t dt);
+extern int do_net_inet_icmp_stats(int update_every, usec_t dt);
+extern int do_net_inet_ip_stats(int update_every, usec_t dt);
+extern int do_net_inet6_ip6_stats(int update_every, usec_t dt);
+extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
+extern int do_getifaddrs(int update_every, usec_t dt);
+extern int do_getmntinfo(int update_every, usec_t dt);
+extern int do_kern_devstat(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt);
+extern int do_ipfw(int update_every, usec_t dt);
+
+#else // (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD
+
+#endif // (TARGET_OS == OS_FREEBSD)
+
+#endif /* NETDATA_PLUGIN_FREEBSD_H */
diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/freeipmi.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
new file mode 100644
index 0000000..52945e3
--- /dev/null
+++ b/collectors/freeipmi.plugin/README.md
@@ -0,0 +1,187 @@
+<!--
+title: "freeipmi.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/freeipmi.plugin/README.md
+-->
+
+# freeipmi.plugin
+
+Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
+
+> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+
+## Compile `freeipmi.plugin`
+
+1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
+
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`.
+
+Keep in mind IPMI requires root access, so the plugin is setuid to root.
+
+If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the Netdata plugin will be able to work.
+
+## Netdata use
+
+The plugin creates (up to) 8 charts, based on the information collected from IPMI:
+
+1. number of sensors by state
+2. number of events in SEL
+3. Temperatures CELSIUS
+4. Temperatures FAHRENHEIT
+5. Voltages
+6. Currents
+7. Power
+8. Fans
+
+It also adds 2 alarms:
+
+1. Sensors in non-nominal state (i.e. warning and critical)
+2. SEL is non empty
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23674138/88926a20-037d-11e7-89c0-20e74ee10cd1.png)
+
+The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.
+
+## `freeipmi.plugin` configuration
+
+The plugin supports a few options. To see them, run:
+
+```sh
+# /usr/libexec/netdata/plugins.d/freeipmi.plugin -h
+
+ netdata freeipmi.plugin 1.8.0-546-g72ce5d6b_rolling
+ Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+ Released under GNU General Public License v3 or later.
+ All rights reserved.
+
+ This program is a data collector plugin for netdata.
+
+ Available command line options:
+
+ SECONDS data collection frequency
+ minimum: 5
+
+ debug enable verbose output
+ default: disabled
+
+ sel
+ no-sel enable/disable SEL collection
+ default: enabled
+
+ hostname HOST
+ username USER
+ password PASS connect to remote IPMI host
+ default: local IPMI processor
+
+ driver-type IPMIDRIVER
+ Specify the driver type to use instead of doing an auto selection.
+ The currently available outofband drivers are LAN and LAN_2_0,
+ which perform IPMI 1.5 and IPMI 2.0 respectively.
+ The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC.
+
+ sdr-cache-dir PATH directory for SDR cache files
+ default: /tmp
+
+ sensor-config-file FILE filename to read sensor configuration
+ default: system default
+
+ ignore N1,N2,N3,... sensor IDs to ignore
+ default: none
+
+ -v
+ -V
+ version print version and exit
+
+ Linux kernel module for IPMI is CPU hungry.
+ On Linux run this to lower kipmiN CPU utilization:
+ # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+
+ or create: /etc/modprobe.d/ipmi.conf with these contents:
+ options ipmi_si kipmid_max_busy_us=10
+
+ For more information:
+ https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin
+```
+
+You can set these options in `/etc/netdata/netdata.conf` at this section:
+
+```
+[plugin:freeipmi]
+ update every = 5
+ command options =
+```
+
+Append to `command options =` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
+
+## Ignoring specific sensors
+
+Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).
+
+So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugin:freeipmi]
+ command options = ignore 1,2,3,4,...
+```
+
+To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:
+
+```
+ID | Name | Type | State | Reading | Units | Event
+1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'
+2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'
+3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'
+4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'
+5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'
+6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'
+7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'
+8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'
+9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'
+10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'
+11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'
+12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'
+13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'
+14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+...
+```
+
+## Debugging
+
+You can run the plugin by hand:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug
+```
+
+You will get verbose output on what the plugin does.
+
+## kipmi0 CPU usage
+
+There have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of
+the system you can issue this command:
+
+```sh
+echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+```
+
+You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content:
+
+```sh
+# prevent kipmi from consuming 100% CPU
+options ipmi_si kipmid_max_busy_us=10
+```
+
+This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).
+
+If you need to disable IPMI for Netdata, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugins]
+ freeipmi = no
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreeipmi.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
new file mode 100644
index 0000000..bd3c533
--- /dev/null
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -0,0 +1,1871 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * netdata freeipmi.plugin
+ * Copyright (C) 2017 Costa Tsaousis
+ * GPL v3+
+ *
+ * Based on:
+ * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ *
+ * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2006-2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Albert Chu <chu11@llnl.gov>
+ * UCRL-CODE-222073
+ */
+
+#include "../../libnetdata/libnetdata.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#ifdef HAVE_FREEIPMI
+
+#define IPMI_PARSE_DEVICE_LAN_STR "lan"
+#define IPMI_PARSE_DEVICE_LAN_2_0_STR "lan_2_0"
+#define IPMI_PARSE_DEVICE_LAN_2_0_STR2 "lan20"
+#define IPMI_PARSE_DEVICE_LAN_2_0_STR3 "lan_20"
+#define IPMI_PARSE_DEVICE_LAN_2_0_STR4 "lan2_0"
+#define IPMI_PARSE_DEVICE_LAN_2_0_STR5 "lanplus"
+#define IPMI_PARSE_DEVICE_KCS_STR "kcs"
+#define IPMI_PARSE_DEVICE_SSIF_STR "ssif"
+#define IPMI_PARSE_DEVICE_OPENIPMI_STR "openipmi"
+#define IPMI_PARSE_DEVICE_OPENIPMI_STR2 "open"
+#define IPMI_PARSE_DEVICE_SUNBMC_STR "sunbmc"
+#define IPMI_PARSE_DEVICE_SUNBMC_STR2 "bmc"
+#define IPMI_PARSE_DEVICE_INTELDCMI_STR "inteldcmi"
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void)action;
+ (void)action_result;
+ (void)action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+#include <ipmi_monitoring.h>
+#include <ipmi_monitoring_bitmasks.h>
+
+/* Communication Configuration - Initialize accordingly */
+
+/* Hostname, NULL for In-band communication, non-null for a hostname */
+char *hostname = NULL;
+
+/* In-band Communication Configuration */
+int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */
+int disable_auto_probe = 0; /* probe for in-band device */
+unsigned int driver_address = 0; /* not used if probing */
+unsigned int register_spacing = 0; /* not used if probing */
+char *driver_device = NULL; /* not used if probing */
+
+/* Out-of-band Communication Configuration */
+int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */
+char *username = "foousername";
+char *password = "foopassword";
+unsigned char *ipmi_k_g = NULL;
+unsigned int ipmi_k_g_len = 0;
+int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */
+int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */
+int cipher_suite_id = 0; /* or -1 for default */
+int session_timeout = 0; /* 0 for default */
+int retransmission_timeout = 0; /* 0 for default */
+
+/* Workarounds - specify workaround flags if necessary */
+unsigned int workaround_flags = 0;
+
+/* Initialize w/ record id numbers to only monitor specific record ids */
+unsigned int record_ids[] = {0};
+unsigned int record_ids_length = 0;
+
+/* Initialize w/ sensor types to only monitor specific sensor types
+ * see ipmi_monitoring.h sensor types list.
+ */
+unsigned int sensor_types[] = {0};
+unsigned int sensor_types_length = 0;
+
+/* Set to an appropriate alternate if desired */
+char *sdr_cache_directory = "/tmp";
+char *sensor_config_file = NULL;
+
+/* Set to 1 or 0 to enable these sensor reading flags
+ * - See ipmi_monitoring.h for descriptions of these flags.
+ */
+int reread_sdr_cache = 0;
+int ignore_non_interpretable_sensors = 0;
+int bridge_sensors = 0;
+int interpret_oem_data = 0;
+int shared_sensors = 0;
+int discrete_reading = 1;
+int ignore_scanning_disabled = 0;
+int assume_bmc_owner = 0;
+int entity_sensor_names = 0;
+
+/* Initialization flags
+ *
+ * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or
+ * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging
+ * information.
+ */
+unsigned int ipmimonitoring_init_flags = 0;
+
+int errnum;
+
+// ----------------------------------------------------------------------------
+// SEL only variables
+
+/* Initialize w/ date range to only monitoring specific date range */
+char *date_begin = NULL; /* use MM/DD/YYYY format */
+char *date_end = NULL; /* use MM/DD/YYYY format */
+
+int assume_system_event_record = 0;
+
+char *sel_config_file = NULL;
+
+
+// ----------------------------------------------------------------------------
+// functions common to sensors and SEL
+
+static void
+_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ fatal_assert(ipmi_config);
+
+ ipmi_config->driver_type = driver_type;
+ ipmi_config->disable_auto_probe = disable_auto_probe;
+ ipmi_config->driver_address = driver_address;
+ ipmi_config->register_spacing = register_spacing;
+ ipmi_config->driver_device = driver_device;
+
+ ipmi_config->protocol_version = protocol_version;
+ ipmi_config->username = username;
+ ipmi_config->password = password;
+ ipmi_config->k_g = ipmi_k_g;
+ ipmi_config->k_g_len = ipmi_k_g_len;
+ ipmi_config->privilege_level = privilege_level;
+ ipmi_config->authentication_type = authentication_type;
+ ipmi_config->cipher_suite_id = cipher_suite_id;
+ ipmi_config->session_timeout_len = session_timeout;
+ ipmi_config->retransmission_timeout_len = retransmission_timeout;
+
+ ipmi_config->workaround_flags = workaround_flags;
+}
+
+#ifdef NETDATA_COMMENTED
+static const char *
+_get_sensor_type_string (int sensor_type)
+{
+ switch (sensor_type)
+ {
+ case IPMI_MONITORING_SENSOR_TYPE_RESERVED:
+ return ("Reserved");
+ case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
+ return ("Temperature");
+ case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE:
+ return ("Voltage");
+ case IPMI_MONITORING_SENSOR_TYPE_CURRENT:
+ return ("Current");
+ case IPMI_MONITORING_SENSOR_TYPE_FAN:
+ return ("Fan");
+ case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY:
+ return ("Physical Security");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT:
+ return ("Platform Security Violation Attempt");
+ case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR:
+ return ("Processor");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY:
+ return ("Power Supply");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT:
+ return ("Power Unit");
+ case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE:
+ return ("Cooling Device");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR:
+ return ("Other Units Based Sensor");
+ case IPMI_MONITORING_SENSOR_TYPE_MEMORY:
+ return ("Memory");
+ case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT:
+ return ("Drive Slot");
+ case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE:
+ return ("POST Memory Resize");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS:
+ return ("System Firmware Progress");
+ case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED:
+ return ("Event Logging Disabled");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1:
+ return ("Watchdog 1");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT:
+ return ("System Event");
+ case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT:
+ return ("Critical Interrupt");
+ case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH:
+ return ("Button/Switch");
+ case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD:
+ return ("Module/Board");
+ case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR:
+ return ("Microcontroller/Coprocessor");
+ case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD:
+ return ("Add In Card");
+ case IPMI_MONITORING_SENSOR_TYPE_CHASSIS:
+ return ("Chassis");
+ case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET:
+ return ("Chip Set");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU:
+ return ("Other Fru");
+ case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT:
+ return ("Cable/Interconnect");
+ case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR:
+ return ("Terminator");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED:
+ return ("System Boot Initiated");
+ case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR:
+ return ("Boot Error");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT:
+ return ("OS Boot");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP:
+ return ("OS Critical Stop");
+ case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR:
+ return ("Slot/Connector");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE:
+ return ("System ACPI Power State");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2:
+ return ("Watchdog 2");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT:
+ return ("Platform Alert");
+ case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE:
+ return ("Entity Presence");
+ case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC:
+ return ("Monitor ASIC/IC");
+ case IPMI_MONITORING_SENSOR_TYPE_LAN:
+ return ("LAN");
+ case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH:
+ return ("Management Subsystem Health");
+ case IPMI_MONITORING_SENSOR_TYPE_BATTERY:
+ return ("Battery");
+ case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT:
+ return ("Session Audit");
+ case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE:
+ return ("Version Change");
+ case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE:
+ return ("FRU State");
+ }
+
+ return ("Unrecognized");
+}
+#endif // NETDATA_COMMENTED
+
+
+// ----------------------------------------------------------------------------
+// BEGIN NETDATA CODE
+
+static int debug = 0;
+
+static int netdata_update_every = 5; // this is the minimum update frequency
+static int netdata_priority = 90000;
+static int netdata_do_sel = 1;
+
+static size_t netdata_sensors_updated = 0;
+static size_t netdata_sensors_collected = 0;
+static size_t netdata_sel_events = 0;
+static size_t netdata_sensors_states_nominal = 0;
+static size_t netdata_sensors_states_warning = 0;
+static size_t netdata_sensors_states_critical = 0;
+
+struct sensor {
+ int record_id;
+ int sensor_number;
+ int sensor_type;
+ int sensor_state;
+ int sensor_units;
+ char *sensor_name;
+
+ int sensor_reading_type;
+ union {
+ uint8_t bool_value;
+ uint32_t uint32_value;
+ double double_value;
+ } sensor_reading;
+
+ int sent;
+ int ignore;
+ int exposed;
+ int updated;
+ struct sensor *next;
+} *sensors_root = NULL;
+
+static void netdata_mark_as_not_updated() {
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ sn->updated = sn->sent = 0;
+
+ netdata_sensors_updated = 0;
+ netdata_sensors_collected = 0;
+ netdata_sel_events = 0;
+
+ netdata_sensors_states_nominal = 0;
+ netdata_sensors_states_warning = 0;
+ netdata_sensors_states_critical = 0;
+}
+
+static void send_chart_to_netdata_for_units(int units) {
+ struct sensor *sn, *sn_stored;
+ int dupfound, multiplier;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("CHART ipmi.temperatures_c '' 'System Celsius Temperatures read by IPMI' 'Celsius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
+ , netdata_priority + 10
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n"
+ , netdata_priority + 11
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n"
+ , netdata_priority + 12
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n"
+ , netdata_priority + 13
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n"
+ , netdata_priority + 14
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n"
+ , netdata_priority + 5
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n"
+ , netdata_priority + 15
+ , netdata_update_every
+ );
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ dupfound = 0;
+ if(sn->sensor_units == units && sn->updated && !sn->ignore) {
+ sn->exposed = 1;
+ multiplier = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ multiplier = 1000;
+ // fallthrough
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ for (sn_stored = sensors_root; sn_stored; sn_stored = sn_stored->next) {
+ if (sn_stored == sn) continue;
+ // If the name is a duplicate, append the sensor number
+ if ( !strcmp(sn_stored->sensor_name, sn->sensor_name) ) {
+ dupfound = 1;
+ printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 %d\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , sn->sensor_number
+ , multiplier
+ );
+ break;
+ }
+ }
+ // No duplicate name was found, display it just with Name
+ if (!dupfound) {
+ // display without ID
+ printf("DIMENSION i%d_n%d_r%d '%s' absolute 1 %d\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , multiplier
+ );
+ }
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+}
+
+static void send_metrics_to_netdata_for_units(int units) {
+ struct sensor *sn;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("BEGIN ipmi.temperatures_c\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("BEGIN ipmi.temperatures_f\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("BEGIN ipmi.volts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("BEGIN ipmi.amps\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("BEGIN ipmi.rpm\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("BEGIN ipmi.watts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("BEGIN ipmi.percent\n");
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) {
+ netdata_sensors_updated++;
+
+ sn->sent = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.bool_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.uint32_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ printf("SET i%d_n%d_r%d = %lld\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , (long long int)(sn->sensor_reading.double_value * 1000)
+ );
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+
+ printf("END\n");
+}
+
+static void send_metrics_to_netdata() {
+ static int sel_chart_generated = 0, sensors_states_chart_generated = 0;
+ struct sensor *sn;
+
+ if(netdata_do_sel && !sel_chart_generated) {
+ sel_chart_generated = 1;
+ printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n"
+ , netdata_priority + 2
+ , netdata_update_every
+ );
+ printf("DIMENSION events '' absolute 1 1\n");
+ }
+
+ if(!sensors_states_chart_generated) {
+ sensors_states_chart_generated = 1;
+ printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n"
+ , netdata_priority + 1
+ , netdata_update_every
+ );
+ printf("DIMENSION nominal '' absolute 1 1\n");
+ printf("DIMENSION critical '' absolute 1 1\n");
+ printf("DIMENSION warning '' absolute 1 1\n");
+ }
+
+ // generate the CHART/DIMENSION lines, if we have to
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && !sn->exposed && !sn->ignore)
+ send_chart_to_netdata_for_units(sn->sensor_units);
+
+ if(netdata_do_sel) {
+ printf(
+ "BEGIN ipmi.events\n"
+ "SET events = %zu\n"
+ "END\n"
+ , netdata_sel_events
+ );
+ }
+
+ printf(
+ "BEGIN ipmi.sensors_states\n"
+ "SET nominal = %zu\n"
+ "SET warning = %zu\n"
+ "SET critical = %zu\n"
+ "END\n"
+ , netdata_sensors_states_nominal
+ , netdata_sensors_states_warning
+ , netdata_sensors_states_critical
+ );
+
+ // send metrics to netdata
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && sn->exposed && !sn->sent && !sn->ignore)
+ send_metrics_to_netdata_for_units(sn->sensor_units);
+
+}
+
+static int *excluded_record_ids = NULL;
+size_t excluded_record_ids_length = 0;
+
+static void excluded_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int));
+ if(!excluded_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_record_ids[excluded_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded record ids:");
+ size_t i;
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+static int *excluded_status_record_ids = NULL;
+size_t excluded_status_record_ids_length = 0;
+
+static void excluded_status_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_status_record_ids = realloc(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int));
+ if(!excluded_status_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded status record ids:");
+ size_t i;
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_status_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+
+static int excluded_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ if(excluded_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int excluded_status_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ if(excluded_status_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void netdata_get_sensor(
+ int record_id
+ , int sensor_number
+ , int sensor_type
+ , int sensor_state
+ , int sensor_units
+ , int sensor_reading_type
+ , char *sensor_name
+ , void *sensor_reading
+) {
+ // find the sensor record
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ if( sn->record_id == record_id &&
+ sn->sensor_number == sensor_number &&
+ sn->sensor_reading_type == sensor_reading_type &&
+ sn->sensor_units == sensor_units &&
+ !strcmp(sn->sensor_name, sensor_name)
+ )
+ break;
+
+ if(!sn) {
+ // not found, create it
+ // check if it is excluded
+ if(excluded_record_ids_check(record_id)) {
+ if(debug) fprintf(stderr, "Sensor '%s' is excluded by excluded_record_ids_check()\n", sensor_name);
+ return;
+ }
+
+ if(debug) fprintf(stderr, "Allocating new sensor data record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
+
+ sn = calloc(1, sizeof(struct sensor));
+ if(!sn) {
+ fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor));
+ }
+
+ sn->record_id = record_id;
+ sn->sensor_number = sensor_number;
+ sn->sensor_type = sensor_type;
+ sn->sensor_state = sensor_state;
+ sn->sensor_units = sensor_units;
+ sn->sensor_reading_type = sensor_reading_type;
+ sn->sensor_name = strdup(sensor_name);
+ if(!sn->sensor_name) {
+ fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name));
+ }
+
+ sn->next = sensors_root;
+ sensors_root = sn;
+ }
+ else {
+ if(debug) fprintf(stderr, "Reusing sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
+ }
+
+ switch(sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ sn->sensor_reading.double_value = *((double *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ default:
+ if(debug) fprintf(stderr, "Unknown reading type - Ignoring sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
+ sn->ignore = 1;
+ break;
+ }
+
+ // check if it is excluded
+ if(excluded_status_record_ids_check(record_id)) {
+ if(debug) fprintf(stderr, "Sensor '%s' is excluded for status check, by excluded_status_record_ids_check()\n", sensor_name);
+ return;
+ }
+
+ switch(sensor_state) {
+ case IPMI_MONITORING_STATE_NOMINAL:
+ netdata_sensors_states_nominal++;
+ break;
+
+ case IPMI_MONITORING_STATE_WARNING:
+ netdata_sensors_states_warning++;
+ break;
+
+ case IPMI_MONITORING_STATE_CRITICAL:
+ netdata_sensors_states_critical++;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void netdata_get_sel(
+ int record_id
+ , int record_type_class
+ , int sel_state
+) {
+ (void)record_id;
+ (void)record_type_class;
+ (void)sel_state;
+
+ netdata_sel_events++;
+}
+
+
+// END NETDATA CODE
+// ----------------------------------------------------------------------------
+
+
+static int
+_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sensor_reading_flags = 0;
+ int i;
+ int sensor_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ())) {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sensor_config_file)
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx,
+ sensor_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
+
+ if (ignore_non_interpretable_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS;
+
+ if (bridge_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS;
+
+ if (interpret_oem_data)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA;
+
+ if (shared_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS;
+
+ if (discrete_reading)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING;
+
+ if (ignore_scanning_disabled)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED;
+
+ if (assume_bmc_owner)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER;
+
+#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (!record_ids_length && !sensor_types_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (record_ids_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Sensor Name",
+ "Sensor Number",
+ "Sensor Type",
+ "Sensor State",
+ "Sensor Reading",
+ "Sensor Units",
+ "Sensor Event/Reading Type Code",
+ "Sensor Event Bitmask",
+ "Sensor Event String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx))
+ {
+ int record_id, sensor_number, sensor_type, sensor_state, sensor_units,
+ sensor_reading_type;
+
+#ifdef NETDATA_COMMENTED
+ int sensor_bitmask_type, sensor_bitmask, event_reading_type_code;
+ char **sensor_bitmask_strings = NULL;
+ const char *sensor_type_str;
+ const char *sensor_state_str;
+#endif // NETDATA_COMMENTED
+
+ char *sensor_name = NULL;
+ void *sensor_reading;
+
+ if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_units(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+#ifdef NETDATA_COMMENTED
+ if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0)
+ {
+ error(
+ "ipmi_monitoring_sensor_read_sensor_bitmask(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ /* it's ok for this to be NULL, i.e. sensor_bitmask ==
+ * IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
+ */
+ sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx);
+
+
+
+#endif // NETDATA_COMMENTED
+
+ if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx);
+
+#ifdef NETDATA_COMMENTED
+ if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+#endif // NETDATA_COMMENTED
+
+ netdata_get_sensor(
+ record_id
+ , sensor_number
+ , sensor_type
+ , sensor_state
+ , sensor_units
+ , sensor_reading_type
+ , sensor_name
+ , sensor_reading
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ printf ("%d, %s, %d, %s",
+ record_id,
+ sensor_name,
+ sensor_number,
+ sensor_type_str);
+
+ if (sensor_state == IPMI_MONITORING_STATE_NOMINAL)
+ sensor_state_str = "Nominal";
+ else if (sensor_state == IPMI_MONITORING_STATE_WARNING)
+ sensor_state_str = "Warning";
+ else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL)
+ sensor_state_str = "Critical";
+ else
+ sensor_state_str = "N/A";
+
+ printf (", %s", sensor_state_str);
+
+ if (sensor_reading)
+ {
+ const char *sensor_units_str;
+
+ if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL)
+ printf (", %s",
+ (*((uint8_t *)sensor_reading) ? "true" : "false"));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32)
+ printf (", %u",
+ *((uint32_t *)sensor_reading));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE)
+ printf (", %.2f",
+ *((double *)sensor_reading));
+ else
+ printf (", N/A");
+
+ if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS)
+ sensor_units_str = "C";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT)
+ sensor_units_str = "F";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS)
+ sensor_units_str = "V";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS)
+ sensor_units_str = "A";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM)
+ sensor_units_str = "RPM";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS)
+ sensor_units_str = "W";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT)
+ sensor_units_str = "%";
+ else
+ sensor_units_str = "N/A";
+
+ printf (", %s", sensor_units_str);
+ }
+ else
+ printf (", N/A, N/A");
+
+ printf (", %Xh", event_reading_type_code);
+
+ /* It is possible you may want to monitor specific event
+ * conditions that may occur. If that is the case, you may want
+ * to check out what specific bitmask type and bitmask events
+ * occurred. See ipmi_monitoring_bitmasks.h for a list of
+ * bitmasks and types.
+ */
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ printf (", %Xh", sensor_bitmask);
+ else
+ printf (", N/A");
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
+ && sensor_bitmask_strings)
+ {
+ unsigned int i = 0;
+
+ printf (",");
+
+ while (sensor_bitmask_strings[i])
+ {
+ printf (" ");
+
+ printf ("'%s'",
+ sensor_bitmask_strings[i]);
+
+ i++;
+ }
+ }
+ else
+ printf (", N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+
+static int
+_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sel_flags = 0;
+ int i;
+ int sel_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ()))
+ {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sel_config_file)
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx,
+ sel_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
+
+ if (interpret_oem_data)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA;
+
+ if (assume_system_event_record)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD;
+
+#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (record_ids_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (sensor_types_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (date_begin
+ || date_end)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ date_begin,
+ date_end,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Record Type",
+ "SEL State",
+ "Timestamp",
+ "Sensor Name",
+ "Sensor Type",
+ "Event Direction",
+ "Event Type Code",
+ "Event Data",
+ "Event Offset",
+ "Event Offset String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx))
+ {
+ int record_id, record_type, sel_state, record_type_class;
+#ifdef NETDATA_COMMENTED
+ int sensor_type, sensor_number, event_direction,
+ event_offset_type, event_offset, event_type_code, manufacturer_id;
+ unsigned int timestamp, event_data1, event_data2, event_data3;
+ char *event_offset_string = NULL;
+ const char *sensor_type_str;
+ const char *event_direction_str;
+ const char *sel_state_str;
+ char *sensor_name = NULL;
+ unsigned char oem_data[64];
+ int oem_data_len;
+ unsigned int j;
+#endif // NETDATA_COMMENTED
+
+ if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type_class(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sel_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ netdata_get_sel(
+ record_id
+ , record_type_class
+ , sel_state
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (sel_state == IPMI_MONITORING_STATE_NOMINAL)
+ sel_state_str = "Nominal";
+ else if (sel_state == IPMI_MONITORING_STATE_WARNING)
+ sel_state_str = "Warning";
+ else if (sel_state == IPMI_MONITORING_STATE_CRITICAL)
+ sel_state_str = "Critical";
+ else
+ sel_state_str = "N/A";
+
+ printf ("%d, %d, %s",
+ record_id,
+ record_type,
+ sel_state_str);
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+
+ if (ipmi_monitoring_sel_read_timestamp (ctx, &timestamp) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_timestamp(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ /* XXX: This should be converted to a nice date output using
+ * your favorite timestamp -> string conversion functions.
+ */
+ printf (", %u", timestamp);
+ }
+ else
+ printf (", N/A");
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD)
+ {
+ /* If you are integrating ipmimonitoring SEL into a monitoring application,
+ * you may wish to count the number of times a specific error occurred
+ * and report that to the monitoring application.
+ *
+ * In this particular case, you'll probably want to check out
+ * what sensor type each SEL event is reporting, the
+ * event offset type, and the specific event offset that occurred.
+ *
+ * See ipmi_monitoring_offsets.h for a list of event offsets
+ * and types.
+ */
+
+ if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_direction(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (ipmi_monitoring_sel_read_event_data (ctx,
+ &event_data1,
+ &event_data2,
+ &event_data3) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_string(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION)
+ event_direction_str = "Assertion";
+ else
+ event_direction_str = "Deassertion";
+
+ printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh",
+ sensor_name,
+ sensor_type_str,
+ sensor_number,
+ event_direction_str,
+ event_type_code,
+ event_data1,
+ event_data2,
+ event_data3);
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %Xh", event_offset);
+ else
+ printf (", N/A");
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %s", event_offset_string);
+ else
+ printf (", N/A");
+ }
+ else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD)
+ {
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+ if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_manufacturer_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", Manufacturer ID = %Xh", manufacturer_id);
+ }
+
+ if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_oem_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", OEM Data = ");
+
+ for (j = 0; j < oem_data_len; j++)
+ printf ("%02Xh ", oem_data[j]);
+ }
+ else
+ printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+// ----------------------------------------------------------------------------
+// MAIN PROGRAM FOR NETDATA PLUGIN
+
+int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ errno = 0;
+
+ if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1;
+
+ if(netdata_do_sel) {
+ if(_ipmimonitoring_sel(ipmi_config) < 0) return -2;
+ }
+
+ return 0;
+}
+
+int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ int i, checks = 10;
+ unsigned long long total = 0;
+
+ for(i = 0 ; i < checks ; i++) {
+ if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks);
+
+ // measure the time a data collection needs
+ unsigned long long start = now_realtime_usec();
+ if(ipmi_collect_data(ipmi_config) < 0)
+ fatal("freeipmi.plugin: data collection failed.");
+
+ unsigned long long end = now_realtime_usec();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start);
+
+ // add it to our total
+ total += end - start;
+
+ // wait the same time
+ // to avoid flooding the IPMI processor with requests
+ sleep_usec(end - start);
+ }
+
+ // so, we assume it needed 2x the time
+ // we find the average in microseconds
+ // and we round-up to the closest second
+
+ return (int)(( total * 2 / checks / 1000000 ) + 1);
+}
+
+int parse_inband_driver_type (const char *str)
+{
+ fatal_assert(str);
+
+ if (strcasecmp (str, IPMI_PARSE_DEVICE_KCS_STR) == 0)
+ return (IPMI_MONITORING_DRIVER_TYPE_KCS);
+ else if (strcasecmp (str, IPMI_PARSE_DEVICE_SSIF_STR) == 0)
+ return (IPMI_MONITORING_DRIVER_TYPE_SSIF);
+ /* support "open" for those that might be used to
+ * ipmitool.
+ */
+ else if (strcasecmp (str, IPMI_PARSE_DEVICE_OPENIPMI_STR) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_OPENIPMI_STR2) == 0)
+ return (IPMI_MONITORING_DRIVER_TYPE_OPENIPMI);
+ /* support "bmc" for those that might be used to
+ * ipmitool.
+ */
+ else if (strcasecmp (str, IPMI_PARSE_DEVICE_SUNBMC_STR) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_SUNBMC_STR2) == 0)
+ return (IPMI_MONITORING_DRIVER_TYPE_SUNBMC);
+
+ return (-1);
+}
+
+int parse_outofband_driver_type (const char *str)
+{
+ fatal_assert(str);
+
+ if (strcasecmp (str, IPMI_PARSE_DEVICE_LAN_STR) == 0)
+ return (IPMI_MONITORING_PROTOCOL_VERSION_1_5);
+ /* support "lanplus" for those that might be used to ipmitool.
+ * support typo variants to ease.
+ */
+ else if (strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR2) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR3) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR4) == 0
+ || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR5) == 0)
+ return (IPMI_MONITORING_PROTOCOL_VERSION_2_0);
+
+ return (-1);
+}
+
+int main (int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "freeipmi.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+
+ // ------------------------------------------------------------------------
+ // parse command line parameters
+
+ int i, freq = 0;
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("freeipmi.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("sel", argv[i]) == 0) {
+ netdata_do_sel = 1;
+ continue;
+ }
+ else if(strcmp("no-sel", argv[i]) == 0) {
+ netdata_do_sel = 0;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata freeipmi.plugin %s\n"
+ " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS data collection frequency\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " sel\n"
+ " no-sel enable/disable SEL collection\n"
+ " default: %s\n"
+ "\n"
+ " hostname HOST\n"
+ " username USER\n"
+ " password PASS connect to remote IPMI host\n"
+ " default: local IPMI processor\n"
+ "\n"
+ " driver-type IPMIDRIVER\n"
+ " Specify the driver type to use instead of doing an auto selection. \n"
+ " The currently available outofband drivers are LAN and LAN_2_0,\n"
+ " which perform IPMI 1.5 and IPMI 2.0 respectively. \n"
+ " The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC.\n"
+ "\n"
+ " sdr-cache-dir PATH directory for SDR cache files\n"
+ " default: %s\n"
+ "\n"
+ " sensor-config-file FILE filename to read sensor configuration\n"
+ " default: %s\n"
+ "\n"
+ " ignore N1,N2,N3,... sensor IDs to ignore\n"
+ " default: none\n"
+ "\n"
+ " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n"
+ " default: none\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " version print version and exit\n"
+ "\n"
+ " Linux kernel module for IPMI is CPU hungry.\n"
+ " On Linux run this to lower kipmiN CPU utilization:\n"
+ " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n"
+ "\n"
+ " or create: /etc/modprobe.d/ipmi.conf with these contents:\n"
+ " options ipmi_si kipmid_max_busy_us=10\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin\n"
+ "\n"
+ , VERSION
+ , netdata_update_every
+ , netdata_do_sel?"enabled":"disabled"
+ , sdr_cache_directory?sdr_cache_directory:"system default"
+ , sensor_config_file?sensor_config_file:"system default"
+ );
+ exit(1);
+ }
+ else if(i < argc && strcmp("hostname", argv[i]) == 0) {
+ hostname = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname);
+ continue;
+ }
+ else if(i < argc && strcmp("username", argv[i]) == 0) {
+ username = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username);
+ continue;
+ }
+ else if(i < argc && strcmp("password", argv[i]) == 0) {
+ password = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password);
+ continue;
+ }
+ else if(strcmp("driver-type", argv[i]) == 0) {
+ if (hostname) {
+ protocol_version=parse_outofband_driver_type(argv[++i]);
+ if(debug) fprintf(stderr, "freeipmi.plugin: outband protocol version set to '%d'\n", protocol_version);
+ }
+ else {
+ driver_type=parse_inband_driver_type(argv[++i]);
+ if(debug) fprintf(stderr, "freeipmi.plugin: inband driver type set to '%d'\n", driver_type);
+ }
+ continue;
+ }
+ else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) {
+ sdr_cache_directory = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory);
+ continue;
+ }
+ else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) {
+ sensor_config_file = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore", argv[i]) == 0) {
+ excluded_record_ids_parse(argv[++i]);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore-status", argv[i]) == 0) {
+ excluded_status_record_ids_parse(argv[++i]);
+ continue;
+ }
+
+ error("freeipmi.plugin: ignoring parameter '%s'", argv[i]);
+ }
+
+ errno = 0;
+
+ if(freq >= netdata_update_every)
+ netdata_update_every = freq;
+
+ else if(freq)
+ error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every);
+
+
+ // ------------------------------------------------------------------------
+ // initialize IPMI
+
+ struct ipmi_monitoring_ipmi_config ipmi_config;
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n");
+
+ _init_ipmi_config(&ipmi_config);
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n");
+ ipmimonitoring_init_flags|=IPMI_MONITORING_FLAGS_DEBUG|IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS;
+ }
+
+ if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0)
+ fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum));
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n");
+ freq = ipmi_detect_speed_secs(&ipmi_config);
+ if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq);
+
+ if(freq > netdata_update_every) {
+ info("enforcing minimum data collection frequency, calculated to %d seconds.", freq);
+ netdata_update_every = freq;
+ }
+
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1 ; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(debug && iteration)
+ fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n"
+ , iteration
+ , dt
+ , netdata_sensors_collected
+ , netdata_sensors_updated
+ );
+
+ netdata_mark_as_not_updated();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n");
+ if(ipmi_collect_data(&ipmi_config) < 0)
+ fatal("data collection failed.");
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n");
+ send_metrics_to_netdata();
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) exit(0);
+ }
+}
+
+#else // !HAVE_FREEIPMI
+
+int main(int argc, char **argv) {
+ fatal("freeipmi.plugin is not compiled.");
+}
+
+#endif // !HAVE_FREEIPMI
diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/idlejitter.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
new file mode 100644
index 0000000..3703e2e
--- /dev/null
+++ b/collectors/idlejitter.plugin/README.md
@@ -0,0 +1,20 @@
+<!--
+title: "idlejitter.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/idlejitter.plugin/README.md
+-->
+
+# idlejitter.plugin
+
+It works like this:
+
+A thread is spawned that requests to sleep for 20000 microseconds (20ms).
+When the system wakes it up, it measures how many microseconds have passed.
+The difference between the requested and the actual duration of the sleep, is the idle jitter.
+This is done at most 50 times per second, to ensure we have a good average.
+
+This number is useful:
+
+1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
+2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fidlejitter.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c
new file mode 100644
index 0000000..c59541e
--- /dev/null
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_idlejitter.h"
+
+#define CPU_IDLEJITTER_SLEEP_TIME_MS 20
+
+static void cpuidlejitter_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *cpuidlejitter_main(void *ptr) {
+ netdata_thread_cleanup_push(cpuidlejitter_main_cleanup, ptr);
+
+ usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
+ if(sleep_ut <= 0) {
+ config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
+ sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
+ }
+
+ RRDSET *st = rrdset_create_localhost(
+ "system"
+ , "idlejitter"
+ , NULL
+ , "idlejitter"
+ , NULL
+ , "CPU Idle Jitter"
+ , "microseconds lost/s"
+ , "idlejitter.plugin"
+ , NULL
+ , NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC;
+ struct timeval before, after;
+ unsigned long long counter;
+
+ for(counter = 0; 1 ;counter++) {
+ int iterations = 0;
+ usec_t error_total = 0,
+ error_min = 0,
+ error_max = 0,
+ elapsed = 0;
+
+ if(netdata_exit) break;
+
+ while(elapsed < update_every_ut) {
+ now_monotonic_high_precision_timeval(&before);
+ sleep_usec(sleep_ut);
+ now_monotonic_high_precision_timeval(&after);
+
+ usec_t dt = dt_usec(&after, &before);
+ elapsed += dt;
+
+ usec_t error = dt - sleep_ut;
+ error_total += error;
+
+ if(unlikely(!iterations))
+ error_min = error;
+ else if(error < error_min)
+ error_min = error;
+
+ if(error > error_max)
+ error_max = error;
+
+ iterations++;
+ }
+
+ if(netdata_exit) break;
+
+ if(iterations) {
+ if (likely(counter)) rrdset_next(st);
+ rrddim_set_by_pointer(st, rd_min, error_min);
+ rrddim_set_by_pointer(st, rd_max, error_max);
+ rrddim_set_by_pointer(st, rd_avg, error_total / iterations);
+ rrdset_done(st);
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h
new file mode 100644
index 0000000..62fabea
--- /dev/null
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_IDLEJITTER_H
+#define NETDATA_PLUGIN_IDLEJITTER_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_IDLEJITTER \
+ { \
+ .name = "PLUGIN[idlejitter]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "idlejitter", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cpuidlejitter_main \
+ },
+
+extern void *cpuidlejitter_main(void *ptr);
+
+#endif /* NETDATA_PLUGIN_IDLEJITTER_H */
diff --git a/collectors/ioping.plugin/Makefile.am b/collectors/ioping.plugin/Makefile.am
new file mode 100644
index 0000000..a9cd7c4
--- /dev/null
+++ b/collectors/ioping.plugin/Makefile.am
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ ioping.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ ioping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ ioping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ ioping.conf \
+ $(NULL)
diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md
new file mode 100644
index 0000000..08b0ce8
--- /dev/null
+++ b/collectors/ioping.plugin/README.md
@@ -0,0 +1,86 @@
+<!--
+title: "ioping.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ioping.plugin/README.md
+-->
+
+# ioping.plugin
+
+The ioping plugin supports monitoring latency for any number of directories/files/devices,
+by pinging them with `ioping`.
+
+A recent version of `ioping` is required (one that supports option `-N`).
+The supplied plugin can install it, by running:
+
+```sh
+/usr/libexec/netdata/plugins.d/ioping.plugin install
+```
+
+The `-e` option can be supplied to indicate where the Netdata environment file is installed. The default path is `/etc/netdata/.environment`.
+
+The above will download, build and install the right version as `/usr/libexec/netdata/plugins.d/ioping`.
+
+Then you need to edit `/etc/netdata/ioping.conf` (to edit it on your system run
+`/etc/netdata/edit-config ioping.conf`) like this:
+
+```sh
+# uncomment the following line - it should already be there
+ioping="/usr/libexec/netdata/plugins.d/ioping"
+
+# set here the directory/file/device, you need to ping
+destination="destination"
+
+# override the chart update frequency - the default is inherited from Netdata
+update_every="1s"
+
+# the request size in bytes to ping the destination
+request_size="4k"
+
+# other iping options - these are the defaults
+ioping_opts="-T 1000000 -R"
+```
+
+## alarms
+
+Netdata will automatically attach a few alarms for each host.
+Check the [latest versions of the ioping alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/ioping.conf)
+
+## Multiple ioping Plugins With Different Settings
+
+You may need to run multiple ioping plugins with different settings or different end points.
+For example, you may need to ping one destination once per 10 seconds, and another once per second.
+
+Netdata allows you to add as many `ioping` plugins as you like.
+
+Follow this procedure:
+
+**1. Create New ioping Configuration File**
+
+```sh
+# Step Into Configuration Directory
+cd /etc/netdata
+
+# Copy Original ioping Configuration File To New Configuration File
+cp ioping.conf ioping2.conf
+```
+
+Edit `ioping2.conf` and set the settings and the destination you need for the seconds instance.
+
+**2. Soft Link Original ioping Plugin to New Plugin File**
+
+```sh
+# Become root (If The Step Step Is Performed As Non-Root User)
+sudo su
+
+# Step Into The Plugins Directory
+cd /usr/libexec/netdata/plugins.d
+
+# Link ioping.plugin to ioping2.plugin
+ln -s ioping.plugin ioping2.plugin
+```
+
+That's it. Netdata will detect the new plugin and start it.
+
+You can name the new plugin any name you like.
+Just make sure the plugin and the configuration file have the same name.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fioping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ioping.plugin/ioping.conf b/collectors/ioping.plugin/ioping.conf
new file mode 100644
index 0000000..86f0de7
--- /dev/null
+++ b/collectors/ioping.plugin/ioping.conf
@@ -0,0 +1,40 @@
+# no need for shebang - this file is sourced from ioping.plugin
+
+# ioping.plugin requires a recent version of ioping.
+#
+# You can get it on your system, by running:
+#
+# /usr/libexec/netdata/plugins.d/ioping.plugin install
+
+# -----------------------------------------------------------------------------
+# configuration options
+
+# The ioping binary to use. We need one that can output netdata friendly info
+# (supporting: -N). If you have multiple versions, put here the full filename
+# of the right one
+
+#ioping="/usr/libexec/netdata/plugins.d/ioping"
+
+
+# The directory/file/device to ioping
+
+destination=""
+
+
+# The update frequency of the chart in seconds (symbolic modifiers are supported)
+# the default is inherited from netdata
+
+#update_every="1s"
+
+
+# The request size in bytes to ioping the destination (symbolic modifiers are supported)
+# by default 4k chunks are used
+
+#request_size="4k"
+
+
+# Other ioping options
+# the defaults:
+# -T 1000000 = maximum valid request time (us)
+
+#ioping_opts="-T 1000000"
diff --git a/collectors/ioping.plugin/ioping.plugin.in b/collectors/ioping.plugin/ioping.plugin.in
new file mode 100755
index 0000000..9f9babd
--- /dev/null
+++ b/collectors/ioping.plugin/ioping.plugin.in
@@ -0,0 +1,212 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of ioping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+usage="$(basename "$0") [install] [-h] [-e]
+
+where:
+ install install ioping binary
+ -e, --env path to environment file (defauls to '/etc/netdata/.environment'
+ -h show this help text"
+
+INSTALL=0
+ENVIRONMENT_FILE="/etc/netdata/.environment"
+
+while :; do
+ case "$1" in
+ -h | --help)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ install)
+ INSTALL=1
+ shift
+ ;;
+ -e | --env)
+ ENVIRONMENT_FILE="$2"
+ shift 2
+ ;;
+ -*)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ *) break ;;
+ esac
+done
+
+if [ "$INSTALL" == "1" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/libexec/netdata/plugins.d/ioping." && exit 1
+
+ source "${ENVIRONMENT_FILE}" || exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local git="$(which git 2>/dev/null || command -v git 2>/dev/null)"
+ [ ! -z "${git}" ] && run git clone "${1}" "${2}" && return 0
+
+ echo >&2 "Cannot find 'git' in this system." && exit 1
+ }
+
+ tmp=$(mktemp -d /tmp/netdata-ioping-XXXXXX)
+ [ ! -d "${NETDATA_PREFIX}/usr/libexec/netdata" ] && run mkdir -p "${NETDATA_PREFIX}/usr/libexec/netdata"
+
+ run cd "${tmp}"
+
+ if [ -d ioping-netdata ]
+ then
+ run rm -rf ioping-netdata || exit 1
+ fi
+
+ download 'https://github.com/netdata/ioping.git' 'ioping-netdata'
+ [ $? -ne 0 ] && exit 1
+ run cd ioping-netdata || exit 1
+
+ INSTALL_PATH="${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
+
+ run make clean
+ run make
+ run mv ioping "${INSTALL_PATH}"
+ run chown root:"${NETDATA_GROUP}" "${INSTALL_PATH}"
+ run chmod 4750 "${INSTALL_PATH}"
+ echo >&2
+ echo >&2 "All done, you have a compatible ioping now at ${INSTALL_PATH}."
+ echo >&2
+
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link ioping.plugin under a different name
+# to have multiple ioping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+# the netdata directory for internal binaries
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="@pluginsdir_POST@"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/ioping.conf
+
+# the ioping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+ioping="${NETDATA_PLUGINS_DIR}/ioping"
+
+# the destination to ioping
+destination=""
+
+# the request size in bytes to ping the disk
+request_size="4k"
+
+# ioping options
+ioping_opts="-T 1000000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${destination}" ]
+then
+ fatal "destination is not configured - nothing to do."
+fi
+
+if [ ! -f "${ioping}" ]
+then
+ fatal "ioping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${ioping}" ]
+then
+ fatal "ioping command '${ioping}' is not executable - cannot proceed."
+fi
+
+# the ioping options we will use
+options=( -N -i ${update_every} -s ${request_size} ${ioping_opts} ${destination} )
+
+# execute ioping
+info "starting ioping: ${ioping} ${options[*]}"
+exec "${ioping}" "${options[@]}"
+
+# if we cannot execute ioping, stop
+fatal "command '${ioping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/macos.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
new file mode 100644
index 0000000..800eb0e
--- /dev/null
+++ b/collectors/macos.plugin/README.md
@@ -0,0 +1,12 @@
+<!--
+title: "macos.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/macos.plugin/README.md
+-->
+
+# macos.plugin
+
+Collects resource usage and performance data on macOS systems
+
+By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
new file mode 100644
index 0000000..d0b3e0f
--- /dev/null
+++ b/collectors/macos.plugin/macos_fw.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <IOKit/IOKitLib.h>
+#include <IOKit/storage/IOBlockStorageDriver.h>
+#include <IOKit/IOBSD.h>
+// NEEDED BY do_space, do_inodes
+#include <sys/mount.h>
+// NEEDED BY: struct ifaddrs, getifaddrs()
+#include <net/if.h>
+#include <ifaddrs.h>
+
+// NEEDED BY: do_bandwidth
+#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
+
+#define MAXDRIVENAME 31
+
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+int do_macos_iokit(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_io = -1, do_space = -1, do_inodes = -1, do_bandwidth = -1;
+
+ if (unlikely(do_io == -1)) {
+ do_io = config_get_boolean("plugin:macos:iokit", "disk i/o", 1);
+ do_space = config_get_boolean("plugin:macos:sysctl", "space usage for all disks", 1);
+ do_inodes = config_get_boolean("plugin:macos:sysctl", "inodes usage for all disks", 1);
+ do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
+ }
+
+ RRDSET *st;
+
+ mach_port_t master_port;
+ io_registry_entry_t drive, drive_media;
+ io_iterator_t drive_list;
+ CFDictionaryRef properties, statistics;
+ CFStringRef name;
+ CFNumberRef number;
+ kern_return_t status;
+ collected_number total_disk_reads = 0;
+ collected_number total_disk_writes = 0;
+ struct diskstat {
+ char name[MAXDRIVENAME];
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number reads;
+ collected_number writes;
+ collected_number time_read;
+ collected_number time_write;
+ collected_number latency_read;
+ collected_number latency_write;
+ } diskstat;
+ struct cur_diskstat {
+ collected_number duration_read_ns;
+ collected_number duration_write_ns;
+ collected_number busy_time_ns;
+ } cur_diskstat;
+ struct prev_diskstat {
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number operations_read;
+ collected_number operations_write;
+ collected_number duration_read_ns;
+ collected_number duration_write_ns;
+ collected_number busy_time_ns;
+ } prev_diskstat;
+
+ // NEEDED BY: do_space, do_inodes
+ struct statfs *mntbuf;
+ int mntsize, i;
+ char mntonname[MNAMELEN + 1];
+ char title[4096 + 1];
+
+ // NEEDED BY: do_bandwidth
+ struct ifaddrs *ifa, *ifap;
+
+ /* Get ports and services for drive statistics. */
+ if (unlikely(IOMasterPort(bootstrap_port, &master_port))) {
+ error("MACOS: IOMasterPort() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ /* Get the list of all drive objects. */
+ } else if (unlikely(IOServiceGetMatchingServices(master_port, IOServiceMatching("IOBlockStorageDriver"), &drive_list))) {
+ error("MACOS: IOServiceGetMatchingServices() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ } else {
+ while ((drive = IOIteratorNext(drive_list)) != 0) {
+ properties = 0;
+ statistics = 0;
+ number = 0;
+ bzero(&diskstat, sizeof(diskstat));
+
+ /* Get drive media object. */
+ status = IORegistryEntryGetChildEntry(drive, kIOServicePlane, &drive_media);
+ if (unlikely(status != KERN_SUCCESS)) {
+ IOObjectRelease(drive);
+ continue;
+ }
+
+ /* Get drive media properties. */
+ if (likely(!IORegistryEntryCreateCFProperties(drive_media, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
+ /* Get disk name. */
+ if (likely(name = (CFStringRef)CFDictionaryGetValue(properties, CFSTR(kIOBSDNameKey)))) {
+ CFStringGetCString(name, diskstat.name, MAXDRIVENAME, kCFStringEncodingUTF8);
+ }
+ }
+
+ /* Release. */
+ CFRelease(properties);
+ IOObjectRelease(drive_media);
+
+ if(unlikely(!diskstat.name || !*diskstat.name)) {
+ IOObjectRelease(drive);
+ continue;
+ }
+
+ /* Obtain the properties for this drive object. */
+ if (unlikely(IORegistryEntryCreateCFProperties(drive, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
+ IOObjectRelease(drive);
+ error("MACOS: IORegistryEntryCreateCFProperties() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ break;
+ } else if (likely(properties)) {
+ /* Obtain the statistics from the drive properties. */
+ if (likely(statistics = (CFDictionaryRef)CFDictionaryGetValue(properties, CFSTR(kIOBlockStorageDriverStatisticsKey)))) {
+
+ // --------------------------------------------------------------------
+
+ /* Get bytes read. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_read);
+ total_disk_reads += diskstat.bytes_read;
+ }
+
+ /* Get bytes written. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_write);
+ total_disk_writes += diskstat.bytes_write;
+ }
+
+ st = rrdset_find_active_bytype_localhost("disk", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.io"
+ , "Disk I/O Bandwidth"
+ , "KiB/s"
+ , "macos"
+ , "iokit"
+ , 2000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ prev_diskstat.bytes_read = rrddim_set(st, "reads", diskstat.bytes_read);
+ prev_diskstat.bytes_write = rrddim_set(st, "writes", diskstat.bytes_write);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get number of reads. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.reads);
+ }
+
+ /* Get number of writes. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes);
+ }
+
+ st = rrdset_find_active_bytype_localhost("disk_ops", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_ops"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.ops"
+ , "Disk Completed I/O Operations"
+ , "operations/s"
+ , "macos"
+ , "iokit"
+ , 2001
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ prev_diskstat.operations_read = rrddim_set(st, "reads", diskstat.reads);
+ prev_diskstat.operations_write = rrddim_set(st, "writes", diskstat.writes);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get reads time. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_read);
+ }
+
+ /* Get writes time. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write);
+ }
+
+ st = rrdset_find_active_bytype_localhost("disk_util", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_util"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.util"
+ , "Disk Utilization Time"
+ , "% of time working"
+ , "macos"
+ , "iokit"
+ , 2004
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ cur_diskstat.busy_time_ns = (diskstat.time_read + diskstat.time_write);
+ prev_diskstat.busy_time_ns = rrddim_set(st, "utilization", cur_diskstat.busy_time_ns);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get reads latency. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentReadTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_read);
+ }
+
+ /* Get writes latency. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentWriteTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write);
+ }
+
+ st = rrdset_find_active_bytype_localhost("disk_iotime", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_iotime"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.iotime"
+ , "Disk Total I/O Time"
+ , "milliseconds/s"
+ , "macos"
+ , "iokit"
+ , 2022
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ cur_diskstat.duration_read_ns = diskstat.time_read + diskstat.latency_read;
+ cur_diskstat.duration_write_ns = diskstat.time_write + diskstat.latency_write;
+ prev_diskstat.duration_read_ns = rrddim_set(st, "reads", cur_diskstat.duration_read_ns);
+ prev_diskstat.duration_write_ns = rrddim_set(st, "writes", cur_diskstat.duration_write_ns);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if (likely(dt)) {
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("disk_await", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_await"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.await"
+ , "Average Completed I/O Operation Time"
+ , "milliseconds/operation"
+ , "macos"
+ , "iokit"
+ , 2005
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
+ (cur_diskstat.duration_read_ns - prev_diskstat.duration_read_ns) / (diskstat.reads - prev_diskstat.operations_read) : 0);
+ rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
+ (cur_diskstat.duration_write_ns - prev_diskstat.duration_write_ns) / (diskstat.writes - prev_diskstat.operations_write) : 0);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("disk_avgsz", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_avgsz"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.avgsz"
+ , "Average Completed I/O Operation Bandwidth"
+ , "KiB/operation"
+ , "macos"
+ , "iokit"
+ , 2006
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
+ (diskstat.bytes_read - prev_diskstat.bytes_read) / (diskstat.reads - prev_diskstat.operations_read) : 0);
+ rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
+ (diskstat.bytes_write - prev_diskstat.bytes_write) / (diskstat.writes - prev_diskstat.operations_write) : 0);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("disk_svctm", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_svctm"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.svctm"
+ , "Average Service Time"
+ , "milliseconds/operation"
+ , "macos"
+ , "iokit"
+ , 2007
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "svctm", ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) ?
+ (cur_diskstat.busy_time_ns - prev_diskstat.busy_time_ns) / ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) : 0);
+ rrdset_done(st);
+ }
+ }
+
+ /* Release. */
+ CFRelease(properties);
+ }
+
+ /* Release. */
+ IOObjectRelease(drive);
+ }
+ IOIteratorReset(drive_list);
+
+ /* Release. */
+ IOObjectRelease(drive_list);
+ }
+
+ if (likely(do_io)) {
+ st = rrdset_find_active_bytype_localhost("system", "io");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "io"
+ , NULL
+ , "disk"
+ , NULL
+ , "Disk I/O"
+ , "KiB/s"
+ , "macos"
+ , "iokit"
+ , 150
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrddim_add(st, "in", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "out", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "in", total_disk_reads);
+ rrddim_set(st, "out", total_disk_writes);
+ rrdset_done(st);
+ }
+
+ // Can be merged with FreeBSD plugin
+ // --------------------------------------------------------------------------
+
+ if (likely(do_space || do_inodes)) {
+ // there is no mount info in sysctl MIBs
+ if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
+ error("MACOS: getmntinfo() failed");
+ do_space = 0;
+ error("DISABLED: disk_space.X");
+ do_inodes = 0;
+ error("DISABLED: disk_inodes.X");
+ } else {
+ for (i = 0; i < mntsize; i++) {
+ if (mntbuf[i].f_flags == MNT_RDONLY ||
+ mntbuf[i].f_blocks == 0 ||
+ // taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
+ strcmp(mntbuf[i].f_fstypename, "autofs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "procfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "subfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "devfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "none") == 0)
+ continue;
+
+ // --------------------------------------------------------------------------
+
+ if (likely(do_space)) {
+ st = rrdset_find_active_bytype_localhost("disk_space", mntbuf[i].f_mntonname);
+ if (unlikely(!st)) {
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ st = rrdset_create_localhost(
+ "disk_space"
+ , mntbuf[i].f_mntonname
+ , NULL
+ , mntbuf[i].f_mntonname
+ , "disk.space"
+ , title
+ , "GiB"
+ , "macos"
+ , "iokit"
+ , 2023
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "avail", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "reserved_for_root", "reserved for root", mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "avail", (collected_number) mntbuf[i].f_bavail);
+ rrddim_set(st, "used", (collected_number) (mntbuf[i].f_blocks - mntbuf[i].f_bfree));
+ rrddim_set(st, "reserved_for_root", (collected_number) (mntbuf[i].f_bfree - mntbuf[i].f_bavail));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------------
+
+ if (likely(do_inodes)) {
+ st = rrdset_find_active_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname);
+ if (unlikely(!st)) {
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ st = rrdset_create_localhost(
+ "disk_inodes"
+ , mntbuf[i].f_mntonname
+ , NULL
+ , mntbuf[i].f_mntonname
+ , "disk.inodes"
+ , title
+ , "inodes"
+ , "macos"
+ , "iokit"
+ , 2024
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "avail", (collected_number) mntbuf[i].f_ffree);
+ rrddim_set(st, "used", (collected_number) (mntbuf[i].f_files - mntbuf[i].f_ffree));
+ rrdset_done(st);
+ }
+ }
+ }
+ }
+
+ // Can be merged with FreeBSD plugin
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth)) {
+ if (unlikely(getifaddrs(&ifap))) {
+ error("MACOS: getifaddrs()");
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("net", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.net"
+ , "Bandwidth"
+ , "kilobits/s"
+ , "macos"
+ , "iokit"
+ , 7000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", IFA_DATA(ibytes));
+ rrddim_set(st, "sent", IFA_DATA(obytes));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("net_packets", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_packets"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.packets"
+ , "Packets"
+ , "packets/s"
+ , "macos"
+ , "iokit"
+ , 7001
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", IFA_DATA(ipackets));
+ rrddim_set(st, "sent", IFA_DATA(opackets));
+ rrddim_set(st, "multicast_received", IFA_DATA(imcasts));
+ rrddim_set(st, "multicast_sent", IFA_DATA(omcasts));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("net_errors", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_errors"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.errors"
+ , "Interface Errors"
+ , "errors/s"
+ , "macos"
+ , "iokit"
+ , 7002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "inbound", IFA_DATA(ierrors));
+ rrddim_set(st, "outbound", IFA_DATA(oerrors));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("net_drops", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_drops"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.drops"
+ , "Interface Drops"
+ , "drops/s"
+ , "macos"
+ , "iokit"
+ , 7003
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "inbound", IFA_DATA(iqdrops));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("net_events", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_events"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.events"
+ , "Network Interface Events"
+ , "events/s"
+ , "macos"
+ , "iokit"
+ , 7006
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "collisions", IFA_DATA(collisions));
+ rrdset_done(st);
+ }
+
+ freeifaddrs(ifap);
+ }
+ }
+
+
+ return 0;
+}
diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
new file mode 100644
index 0000000..973b90a
--- /dev/null
+++ b/collectors/macos.plugin/macos_mach_smi.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <mach/mach.h>
+
+int do_macos_mach_smi(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1;
+
+ if (unlikely(do_cpu == -1)) {
+ do_cpu = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1);
+ do_ram = config_get_boolean("plugin:macos:mach_smi", "system ram", 1);
+ do_swapio = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1);
+ do_pgfaults = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1);
+ }
+
+ RRDSET *st;
+
+ kern_return_t kr;
+ mach_msg_type_number_t count;
+ host_t host;
+ vm_size_t system_pagesize;
+
+
+ // NEEDED BY: do_cpu
+ natural_t cp_time[CPU_STATE_MAX];
+
+ // NEEDED BY: do_ram, do_swapio, do_pgfaults
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ vm_statistics64_data_t vm_statistics;
+#else
+ vm_statistics_data_t vm_statistics;
+#endif
+
+ host = mach_host_self();
+ kr = host_page_size(host, &system_pagesize);
+ if (unlikely(kr != KERN_SUCCESS))
+ return -1;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_cpu)) {
+ if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) {
+ error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT);
+ do_cpu = 0;
+ error("DISABLED: system.cpu");
+ } else {
+ count = HOST_CPU_LOAD_INFO_COUNT;
+ kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count);
+ if (unlikely(kr != KERN_SUCCESS)) {
+ error("MACOS: host_statistics() failed: %s", mach_error_string(kr));
+ do_cpu = 0;
+ error("DISABLED: system.cpu");
+ } else {
+
+ st = rrdset_find_active_bytype_localhost("system", "cpu");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "system.cpu"
+ , "Total CPU utilization"
+ , "percentage"
+ , "macos"
+ , "mach_smi"
+ , 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(st, "idle");
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "user", cp_time[CPU_STATE_USER]);
+ rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]);
+ rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]);
+ rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ram || do_swapio || do_pgfaults)) {
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ count = sizeof(vm_statistics64_data_t);
+ kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count);
+#else
+ count = sizeof(vm_statistics_data_t);
+ kr = host_statistics(host, HOST_VM_INFO, (host_info_t)&vm_statistics, &count);
+#endif
+ if (unlikely(kr != KERN_SUCCESS)) {
+ error("MACOS: host_statistics64() failed: %s", mach_error_string(kr));
+ do_ram = 0;
+ error("DISABLED: system.ram");
+ do_swapio = 0;
+ error("DISABLED: system.swapio");
+ do_pgfaults = 0;
+ error("DISABLED: mem.pgfaults");
+ } else {
+ if (likely(do_ram)) {
+ st = rrdset_find_active_localhost("system.ram");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ram"
+ , NULL
+ , "ram"
+ , NULL
+ , "System RAM"
+ , "MiB"
+ , "macos"
+ , "mach_smi"
+ , 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "active", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "wired", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+#endif
+ rrddim_add(st, "inactive", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "free", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "active", vm_statistics.active_count);
+ rrddim_set(st, "wired", vm_statistics.wire_count);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "throttled", vm_statistics.throttled_count);
+ rrddim_set(st, "compressor", vm_statistics.compressor_page_count);
+#endif
+ rrddim_set(st, "inactive", vm_statistics.inactive_count);
+ rrddim_set(st, "purgeable", vm_statistics.purgeable_count);
+ rrddim_set(st, "speculative", vm_statistics.speculative_count);
+ rrddim_set(st, "free", (vm_statistics.free_count - vm_statistics.speculative_count));
+ rrdset_done(st);
+ }
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ // --------------------------------------------------------------------
+
+ if (likely(do_swapio)) {
+ st = rrdset_find_active_localhost("system.swapio");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "swapio"
+ , NULL
+ , "swap"
+ , NULL
+ , "Swap I/O"
+ , "KiB/s"
+ , "macos"
+ , "mach_smi"
+ , 250
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "in", NULL, system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "in", vm_statistics.swapins);
+ rrddim_set(st, "out", vm_statistics.swapouts);
+ rrdset_done(st);
+ }
+#endif
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_pgfaults)) {
+ st = rrdset_find_active_localhost("mem.pgfaults");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem"
+ , "pgfaults"
+ , NULL
+ , "system"
+ , NULL
+ , "Memory Page Faults"
+ , "faults/s"
+ , "macos"
+ , "mach_smi"
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "pagein", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "pageout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "compress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "decompress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ rrddim_add(st, "zero_fill", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "reactivate", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "purge", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "memory", vm_statistics.faults);
+ rrddim_set(st, "cow", vm_statistics.cow_faults);
+ rrddim_set(st, "pagein", vm_statistics.pageins);
+ rrddim_set(st, "pageout", vm_statistics.pageouts);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "compress", vm_statistics.compressions);
+ rrddim_set(st, "decompress", vm_statistics.decompressions);
+#endif
+ rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count);
+ rrddim_set(st, "reactivate", vm_statistics.reactivations);
+ rrddim_set(st, "purge", vm_statistics.purges);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ return 0;
+}
diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
new file mode 100644
index 0000000..84f7541
--- /dev/null
+++ b/collectors/macos.plugin/macos_sysctl.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <Availability.h>
+// NEEDED BY: do_bandwidth
+#include <net/route.h>
+// NEEDED BY do_tcp...
+#include <sys/socketvar.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp_fsm.h>
+// NEEDED BY do_udp..., do_ip...
+#include <netinet/ip_var.h>
+// NEEDED BY do_udp...
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+// NEEDED BY do_icmp...
+#include <netinet/ip.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+// NEEDED BY do_icmp6...
+#include <netinet/icmp6.h>
+// NEEDED BY do_uptime
+#include <time.h>
+
+// MacOS calculates load averages once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_macos_sysctl(int update_every, usec_t dt) {
+ static int do_loadavg = -1, do_swap = -1, do_bandwidth = -1,
+ do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_ecn = -1,
+ do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1,
+ do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1,
+ do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
+ do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1,
+ do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1,
+ do_icmp6_router = -1, do_icmp6_neighbor = -1, do_icmp6_types = -1, do_uptime = -1;
+
+
+ if (unlikely(do_loadavg == -1)) {
+ do_loadavg = config_get_boolean("plugin:macos:sysctl", "enable load average", 1);
+ do_swap = config_get_boolean("plugin:macos:sysctl", "system swap", 1);
+ do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
+ do_tcp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP packets", 1);
+ do_tcp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP errors", 1);
+ do_tcp_handshake = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP handshake issues", 1);
+ do_ecn = config_get_boolean_ondemand("plugin:macos:sysctl", "ECN packets", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syscookies = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_connaborts = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP packets", 1);
+ do_udp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP errors", 1);
+ do_icmp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP packets", 1);
+ do_icmpmsg = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP messages", 1);
+ do_ip_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 packets", 1);
+ do_ip_fragsout = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments sent", 1);
+ do_ip_fragsin = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments assembly", 1);
+ do_ip_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 errors", 1);
+ do_ip6_packets = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsout = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsin = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp6 = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_redir = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp redirects", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_echos = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp echos", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_router = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp router", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_neighbor = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_types = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp types", CONFIG_BOOLEAN_AUTO);
+ do_uptime = config_get_boolean("plugin:macos:sysctl", "system uptime", 1);
+ }
+
+ RRDSET *st;
+
+ int system_pagesize = getpagesize(); // wouldn't it be better to get value directly from hw.pagesize?
+ int i, n;
+ int common_error = 0;
+ size_t size;
+
+ // NEEDED BY: do_loadavg
+ static usec_t next_loadavg_dt = 0;
+ struct loadavg sysload;
+
+ // NEEDED BY: do_swap
+ struct xsw_usage swap_usage;
+
+ // NEEDED BY: do_bandwidth
+ int mib[6];
+ static char *ifstatdata = NULL;
+ char *lim, *next;
+ struct if_msghdr *ifm;
+ struct iftot {
+ u_long ift_ibytes;
+ u_long ift_obytes;
+ } iftot = {0, 0};
+
+ // NEEDED BY: do_tcp...
+ struct tcpstat tcpstat;
+ uint64_t tcps_states[TCP_NSTATES];
+
+ // NEEDED BY: do_udp...
+ struct udpstat udpstat;
+
+ // NEEDED BY: do_icmp...
+ struct icmpstat icmpstat;
+ struct icmp_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp_total = {0, 0};
+
+ // NEEDED BY: do_ip...
+ struct ipstat ipstat;
+
+ // NEEDED BY: do_ip6...
+ /*
+ * Dirty workaround for /usr/include/netinet6/ip6_var.h absence.
+ * Struct ip6stat was copied from bsd/netinet6/ip6_var.h from xnu sources.
+ * Do the same for previously missing scope6_var.h on OS X < 10.11.
+ */
+#define IP6S_SRCRULE_COUNT 16
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100)
+#ifndef _NETINET6_SCOPE6_VAR_H_
+#define _NETINET6_SCOPE6_VAR_H_
+#include <sys/appleapiopts.h>
+
+#define SCOPE6_ID_MAX 16
+#endif
+#else
+#include <netinet6/scope6_var.h>
+#endif
+
+ struct ip6stat {
+ u_quad_t ip6s_total; /* total packets received */
+ u_quad_t ip6s_tooshort; /* packet too short */
+ u_quad_t ip6s_toosmall; /* not enough data */
+ u_quad_t ip6s_fragments; /* fragments received */
+ u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */
+ u_quad_t ip6s_fragtimeout; /* fragments timed out */
+ u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */
+ u_quad_t ip6s_forward; /* packets forwarded */
+ u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */
+ u_quad_t ip6s_redirectsent; /* packets forwarded on same net */
+ u_quad_t ip6s_delivered; /* datagrams delivered to upper level */
+ u_quad_t ip6s_localout; /* total ip packets generated here */
+ u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */
+ u_quad_t ip6s_reassembled; /* total packets reassembled ok */
+ u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */
+ u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */
+ u_quad_t ip6s_ofragments; /* output fragments created */
+ u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */
+ u_quad_t ip6s_badoptions; /* error in option processing */
+ u_quad_t ip6s_noroute; /* packets discarded due to no route */
+ u_quad_t ip6s_badvers; /* ip6 version != 6 */
+ u_quad_t ip6s_rawout; /* total raw ip packets generated */
+ u_quad_t ip6s_badscope; /* scope error */
+ u_quad_t ip6s_notmember; /* don't join this multicast group */
+ u_quad_t ip6s_nxthist[256]; /* next header history */
+ u_quad_t ip6s_m1; /* one mbuf */
+ u_quad_t ip6s_m2m[32]; /* two or more mbuf */
+ u_quad_t ip6s_mext1; /* one ext mbuf */
+ u_quad_t ip6s_mext2m; /* two or more ext mbuf */
+ u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */
+ u_quad_t ip6s_nogif; /* no match gif found */
+ u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */
+
+ /*
+ * statistics for improvement of the source address selection
+ * algorithm:
+ */
+ /* number of times that address selection fails */
+ u_quad_t ip6s_sources_none;
+ /* number of times that an address on the outgoing I/F is chosen */
+ u_quad_t ip6s_sources_sameif[SCOPE6_ID_MAX];
+ /* number of times that an address on a non-outgoing I/F is chosen */
+ u_quad_t ip6s_sources_otherif[SCOPE6_ID_MAX];
+ /*
+ * number of times that an address that has the same scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_samescope[SCOPE6_ID_MAX];
+ /*
+ * number of times that an address that has a different scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_otherscope[SCOPE6_ID_MAX];
+ /* number of times that a deprecated address is chosen */
+ u_quad_t ip6s_sources_deprecated[SCOPE6_ID_MAX];
+
+ u_quad_t ip6s_forward_cachehit;
+ u_quad_t ip6s_forward_cachemiss;
+
+ /* number of times that each rule of source selection is applied. */
+ u_quad_t ip6s_sources_rule[IP6S_SRCRULE_COUNT];
+
+ /* number of times we ignored address on expensive secondary interfaces */
+ u_quad_t ip6s_sources_skip_expensive_secondary_if;
+
+ /* pkt dropped, no mbufs for control data */
+ u_quad_t ip6s_pktdropcntrl;
+
+ /* total packets trimmed/adjusted */
+ u_quad_t ip6s_adj;
+ /* hwcksum info discarded during adjustment */
+ u_quad_t ip6s_adj_hwcsum_clr;
+
+ /* duplicate address detection collisions */
+ u_quad_t ip6s_dad_collide;
+
+ /* DAD NS looped back */
+ u_quad_t ip6s_dad_loopcount;
+ } ip6stat;
+
+ // NEEDED BY: do_icmp6...
+ struct icmp6stat icmp6stat;
+ struct icmp6_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp6_total = {0, 0};
+
+ // NEEDED BY: do_uptime
+ struct timespec boot_time, cur_time;
+
+ // --------------------------------------------------------------------
+
+ if (next_loadavg_dt <= dt) {
+ if (likely(do_loadavg)) {
+ if (unlikely(GETSYSCTL_BY_NAME("vm.loadavg", sysload))) {
+ do_loadavg = 0;
+ error("DISABLED: system.load");
+ } else {
+
+ st = rrdset_find_active_bytype_localhost("system", "load");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "load"
+ , NULL
+ , "load"
+ , NULL
+ , "System Load Average"
+ , "load"
+ , "macos"
+ , "sysctl"
+ , 100
+ , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "load1", (collected_number) ((double)sysload.ldavg[0] / sysload.fscale * 1000));
+ rrddim_set(st, "load5", (collected_number) ((double)sysload.ldavg[1] / sysload.fscale * 1000));
+ rrddim_set(st, "load15", (collected_number) ((double)sysload.ldavg[2] / sysload.fscale * 1000));
+ rrdset_done(st);
+ }
+ }
+
+ next_loadavg_dt = st->update_every * USEC_PER_SEC;
+ }
+ else next_loadavg_dt -= dt;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_swap)) {
+ if (unlikely(GETSYSCTL_BY_NAME("vm.swapusage", swap_usage))) {
+ do_swap = 0;
+ error("DISABLED: system.swap");
+ } else {
+ st = rrdset_find_active_localhost("system.swap");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "swap"
+ , NULL
+ , "swap"
+ , NULL
+ , "System Swap"
+ , "MiB"
+ , "macos"
+ , "sysctl"
+ , 201
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "free", swap_usage.xsu_avail);
+ rrddim_set(st, "used", swap_usage.xsu_used);
+ rrdset_done(st);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth)) {
+ mib[0] = CTL_NET;
+ mib[1] = PF_ROUTE;
+ mib[2] = 0;
+ mib[3] = AF_INET;
+ mib[4] = NET_RT_IFLIST2;
+ mib[5] = 0;
+ if (unlikely(sysctl(mib, 6, NULL, &size, NULL, 0))) {
+ error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ ifstatdata = reallocz(ifstatdata, size);
+ if (unlikely(sysctl(mib, 6, ifstatdata, &size, NULL, 0) < 0)) {
+ error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ lim = ifstatdata + size;
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (next = ifstatdata; next < lim; ) {
+ ifm = (struct if_msghdr *)next;
+ next += ifm->ifm_msglen;
+
+ if (ifm->ifm_type == RTM_IFINFO2) {
+ struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm;
+
+ iftot.ift_ibytes += if2m->ifm_data.ifi_ibytes;
+ iftot.ift_obytes += if2m->ifm_data.ifi_obytes;
+ }
+ }
+ st = rrdset_find_active_localhost("system.ipv4");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ipv4"
+ , NULL
+ , "network"
+ , NULL
+ , "IPv4 Bandwidth"
+ , "kilobits/s"
+ , "macos"
+ , "sysctl"
+ , 500
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "InOctets", iftot.ift_ibytes);
+ rrddim_set(st, "OutOctets", iftot.ift_obytes);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || do_tcpext_syscookies || do_ecn)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.tcp.stats", tcpstat))){
+ do_tcp_packets = 0;
+ error("DISABLED: ipv4.tcppackets");
+ do_tcp_errors = 0;
+ error("DISABLED: ipv4.tcperrors");
+ do_tcp_handshake = 0;
+ error("DISABLED: ipv4.tcphandshake");
+ do_tcpext_connaborts = 0;
+ error("DISABLED: ipv4.tcpconnaborts");
+ do_tcpext_ofo = 0;
+ error("DISABLED: ipv4.tcpofo");
+ do_tcpext_syscookies = 0;
+ error("DISABLED: ipv4.tcpsyncookies");
+ do_ecn = 0;
+ error("DISABLED: ipv4.ecnpkts");
+ } else {
+ if (likely(do_tcp_packets)) {
+ st = rrdset_find_active_localhost("ipv4.tcppackets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcppackets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2600
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSegs", tcpstat.tcps_rcvtotal);
+ rrddim_set(st, "OutSegs", tcpstat.tcps_sndtotal);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_errors)) {
+ st = rrdset_find_active_localhost("ipv4.tcperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcperrors"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrs", tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
+ rrddim_set(st, "InCsumErrors", tcpstat.tcps_rcvbadsum);
+ rrddim_set(st, "RetransSegs", tcpstat.tcps_sndrexmitpack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_handshake)) {
+ st = rrdset_find_active_localhost("ipv4.tcphandshake");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcphandshake"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Handshake Issues"
+ , "events/s"
+ , "macos"
+ , "sysctl"
+ , 2900
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "EstabResets", tcpstat.tcps_drops);
+ rrddim_set(st, "ActiveOpens", tcpstat.tcps_connattempt);
+ rrddim_set(st, "PassiveOpens", tcpstat.tcps_accepts);
+ rrddim_set(st, "AttemptFails", tcpstat.tcps_conndrops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_rcvpackafterwin ||
+ tcpstat.tcps_rcvafterclose ||
+ tcpstat.tcps_rcvmemdrop ||
+ tcpstat.tcps_persistdrop ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv4.tcpconnaborts");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpconnaborts"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Connection Aborts"
+ , "connections/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "TCPAbortOnData", tcpstat.tcps_rcvpackafterwin);
+ rrddim_set(st, "TCPAbortOnClose", tcpstat.tcps_rcvafterclose);
+ rrddim_set(st, "TCPAbortOnMemory", tcpstat.tcps_rcvmemdrop);
+ rrddim_set(st, "TCPAbortOnTimeout", tcpstat.tcps_persistdrop);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_rcvoopack ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv4.tcpofo");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpofo"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Out-Of-Order Queue"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3050
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "TCPOFOQueue", tcpstat.tcps_rcvoopack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_sc_sendcookie ||
+ tcpstat.tcps_sc_recvcookie ||
+ tcpstat.tcps_sc_zonefail ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
+
+ st = rrdset_find_active_localhost("ipv4.tcpsyncookies");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpsyncookies"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Cookies"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "SyncookiesRecv", tcpstat.tcps_sc_recvcookie);
+ rrddim_set(st, "SyncookiesSent", tcpstat.tcps_sc_sendcookie);
+ rrddim_set(st, "SyncookiesFailed", tcpstat.tcps_sc_zonefail);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
+ if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO &&
+ (tcpstat.tcps_ecn_recv_ce ||
+ tcpstat.tcps_ecn_not_supported ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv4.ecnpkts");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "ecnpkts"
+ , NULL
+ , "ecn"
+ , NULL
+ , "IPv4 ECN Statistics"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 8700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "InCEPkts", tcpstat.tcps_ecn_recv_ce);
+ rrddim_set(st, "InNoECTPkts", tcpstat.tcps_ecn_not_supported);
+ rrdset_done(st);
+ }
+#endif
+
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if (likely(do_udp_packets || do_udp_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.udp.stats", udpstat))) {
+ do_udp_packets = 0;
+ error("DISABLED: ipv4.udppackets");
+ do_udp_errors = 0;
+ error("DISABLED: ipv4.udperrors");
+ } else {
+ if (likely(do_udp_packets)) {
+ st = rrdset_find_active_localhost("ipv4.udppackets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "udppackets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2601
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDatagrams", udpstat.udps_ipackets);
+ rrddim_set(st, "OutDatagrams", udpstat.udps_opackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_errors)) {
+ st = rrdset_find_active_localhost("ipv4.udperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "udperrors"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Errors"
+ , "events/s"
+ , "macos"
+ , "sysctl"
+ , 2701
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", udpstat.udps_hdrops + udpstat.udps_badlen);
+ rrddim_set(st, "NoPorts", udpstat.udps_noport);
+ rrddim_set(st, "RcvbufErrors", udpstat.udps_fullsock);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "InCsumErrors", udpstat.udps_badsum + udpstat.udps_nosum);
+ rrddim_set(st, "IgnoredMulti", udpstat.udps_filtermcast);
+#else
+ rrddim_set(st, "InCsumErrors", udpstat.udps_badsum);
+#endif
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets || do_icmpmsg)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.icmp.stats", icmpstat))) {
+ do_icmp_packets = 0;
+ error("DISABLED: ipv4.icmp");
+ error("DISABLED: ipv4.icmp_errors");
+ do_icmpmsg = 0;
+ error("DISABLED: ipv4.icmpmsg");
+ } else {
+ for (i = 0; i <= ICMP_MAXTYPE; i++) {
+ icmp_total.msgs_in += icmpstat.icps_inhist[i];
+ icmp_total.msgs_out += icmpstat.icps_outhist[i];
+ }
+ icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets)) {
+ st = rrdset_find_active_localhost("ipv4.icmp");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2602
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InMsgs", icmp_total.msgs_in);
+ rrddim_set(st, "OutMsgs", icmp_total.msgs_out);
+
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_localhost("ipv4.icmp_errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2603
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort);
+ rrddim_set(st, "OutErrors", icmpstat.icps_error);
+ rrddim_set(st, "InCsumErrors", icmpstat.icps_checksum);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmpmsg)) {
+ st = rrdset_find_active_localhost("ipv4.icmpmsg");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2604
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InEchoReps", icmpstat.icps_inhist[ICMP_ECHOREPLY]);
+ rrddim_set(st, "OutEchoReps", icmpstat.icps_outhist[ICMP_ECHOREPLY]);
+ rrddim_set(st, "InEchos", icmpstat.icps_inhist[ICMP_ECHO]);
+ rrddim_set(st, "OutEchos", icmpstat.icps_outhist[ICMP_ECHO]);
+
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
+ if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.ip.stats", ipstat))) {
+ do_ip_packets = 0;
+ error("DISABLED: ipv4.packets");
+ do_ip_fragsout = 0;
+ error("DISABLED: ipv4.fragsout");
+ do_ip_fragsin = 0;
+ error("DISABLED: ipv4.fragsin");
+ do_ip_errors = 0;
+ error("DISABLED: ipv4.errors");
+ } else {
+ if (likely(do_ip_packets)) {
+ st = rrdset_find_active_localhost("ipv4.packets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv4 Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "OutRequests", ipstat.ips_localout);
+ rrddim_set(st, "InReceives", ipstat.ips_total);
+ rrddim_set(st, "ForwDatagrams", ipstat.ips_forward);
+ rrddim_set(st, "InDelivers", ipstat.ips_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsout)) {
+ st = rrdset_find_active_localhost("ipv4.fragsout");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Sent"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "FragOKs", ipstat.ips_fragmented);
+ rrddim_set(st, "FragFails", ipstat.ips_cantfrag);
+ rrddim_set(st, "FragCreates", ipstat.ips_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsin)) {
+ st = rrdset_find_active_localhost("ipv4.fragsin");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Reassembly"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3011
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ReasmOKs", ipstat.ips_fragments);
+ rrddim_set(st, "ReasmFails", ipstat.ips_fragdropped);
+ rrddim_set(st, "ReasmReqds", ipstat.ips_reassembled);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_errors)) {
+ st = rrdset_find_active_localhost("ipv4.errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv4 Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDiscards", ipstat.ips_badsum + ipstat.ips_tooshort + ipstat.ips_toosmall + ipstat.ips_toolong);
+ rrddim_set(st, "OutDiscards", ipstat.ips_odropped);
+ rrddim_set(st, "InHdrErrors", ipstat.ips_badhlen + ipstat.ips_badlen + ipstat.ips_badoptions + ipstat.ips_badvers);
+ rrddim_set(st, "InAddrErrors", ipstat.ips_badaddr);
+ rrddim_set(st, "InUnknownProtos", ipstat.ips_noproto);
+ rrddim_set(st, "OutNoRoutes", ipstat.ips_noroute);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet6.ip6.stats", ip6stat))) {
+ do_ip6_packets = 0;
+ error("DISABLED: ipv6.packets");
+ do_ip6_fragsout = 0;
+ error("DISABLED: ipv6.fragsout");
+ do_ip6_fragsin = 0;
+ error("DISABLED: ipv6.fragsin");
+ do_ip6_errors = 0;
+ error("DISABLED: ipv6.errors");
+ } else {
+ if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_localout ||
+ ip6stat.ip6s_total ||
+ ip6stat.ip6s_forward ||
+ ip6stat.ip6s_delivered ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_packets = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.packets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", ip6stat.ip6s_localout);
+ rrddim_set(st, "received", ip6stat.ip6s_total);
+ rrddim_set(st, "forwarded", ip6stat.ip6s_forward);
+ rrddim_set(st, "delivers", ip6stat.ip6s_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_fragmented ||
+ ip6stat.ip6s_cantfrag ||
+ ip6stat.ip6s_ofragments ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_fragsout = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.fragsout");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv6 Fragments Sent"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ok", ip6stat.ip6s_fragmented);
+ rrddim_set(st, "failed", ip6stat.ip6s_cantfrag);
+ rrddim_set(st, "all", ip6stat.ip6s_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_reassembled ||
+ ip6stat.ip6s_fragdropped ||
+ ip6stat.ip6s_fragtimeout ||
+ ip6stat.ip6s_fragments ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_fragsin = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.fragsin");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv6 Fragments Reassembly"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3011
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ok", ip6stat.ip6s_reassembled);
+ rrddim_set(st, "failed", ip6stat.ip6s_fragdropped);
+ rrddim_set(st, "timeout", ip6stat.ip6s_fragtimeout);
+ rrddim_set(st, "all", ip6stat.ip6s_fragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_toosmall ||
+ ip6stat.ip6s_odropped ||
+ ip6stat.ip6s_badoptions ||
+ ip6stat.ip6s_badvers ||
+ ip6stat.ip6s_exthdrtoolong ||
+ ip6stat.ip6s_sources_none ||
+ ip6stat.ip6s_tooshort ||
+ ip6stat.ip6s_cantforward ||
+ ip6stat.ip6s_noroute ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip6_errors = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv6 Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDiscards", ip6stat.ip6s_toosmall);
+ rrddim_set(st, "OutDiscards", ip6stat.ip6s_odropped);
+
+ rrddim_set(st, "InHdrErrors",
+ ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + ip6stat.ip6s_exthdrtoolong);
+ rrddim_set(st, "InAddrErrors", ip6stat.ip6s_sources_none);
+ rrddim_set(st, "InTruncatedPkts", ip6stat.ip6s_tooshort);
+ rrddim_set(st, "InNoRoutes", ip6stat.ip6s_cantforward);
+
+ rrddim_set(st, "OutNoRoutes", ip6stat.ip6s_noroute);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet6.icmp6.stats", icmp6stat))) {
+ do_icmp6 = 0;
+ error("DISABLED: ipv6.icmp");
+ } else {
+ for (i = 0; i <= ICMP6_MAXTYPE; i++) {
+ icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
+ icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
+ }
+ icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
+ if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO &&
+ (icmp6_total.msgs_in ||
+ icmp6_total.msgs_out ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6 = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmp");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", icmp6_total.msgs_in);
+ rrddim_set(st, "received", icmp6_total.msgs_out);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_REDIRECT] ||
+ icmp6stat.icp6s_outhist[ND_REDIRECT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_redir = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmpredir");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpredir"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Redirects"
+ , "redirects/s"
+ , "macos"
+ , "sysctl"
+ , 10050
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", icmp6stat.icp6s_inhist[ND_REDIRECT]);
+ rrddim_set(st, "received", icmp6stat.icp6s_outhist[ND_REDIRECT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_badcode ||
+ icmp6stat.icp6s_badlen ||
+ icmp6stat.icp6s_checksum ||
+ icmp6stat.icp6s_tooshort ||
+ icmp6stat.icp6s_error ||
+ icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
+ icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_errors = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmperrors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Errors"
+ , "errors/s"
+ , "macos"
+ , "sysctl"
+ , 10100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
+ rrddim_set(st, "OutErrors", icmp6stat.icp6s_error);
+ rrddim_set(st, "InCsumErrors", icmp6stat.icp6s_checksum);
+ rrddim_set(st, "InDestUnreachs", icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
+ rrddim_set(st, "InPktTooBigs", icmp6stat.icp6s_badlen);
+ rrddim_set(st, "InTimeExcds", icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set(st, "InParmProblems", icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
+ rrddim_set(st, "OutDestUnreachs", icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
+ rrddim_set(st, "OutTimeExcds", icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set(st, "OutParmProblems", icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_echos = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmpechos");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpechos"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Echo"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InEchos", icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set(st, "OutEchos", icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set(st, "InEchoReplies", icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
+ rrddim_set(st, "OutEchoReplies", icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_router = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmprouter");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmprouter"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 Router Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
+ rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
+ rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
+ rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmpneighbor");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpneighbor"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 Neighbor Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
+ rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO &&
+ (icmp6stat.icp6s_inhist[1] ||
+ icmp6stat.icp6s_inhist[128] ||
+ icmp6stat.icp6s_inhist[129] ||
+ icmp6stat.icp6s_inhist[136] ||
+ icmp6stat.icp6s_outhist[1] ||
+ icmp6stat.icp6s_outhist[128] ||
+ icmp6stat.icp6s_outhist[129] ||
+ icmp6stat.icp6s_outhist[133] ||
+ icmp6stat.icp6s_outhist[135] ||
+ icmp6stat.icp6s_outhist[136] ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp6_types = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_active_localhost("ipv6.icmptypes");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmptypes"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Types"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InType1", icmp6stat.icp6s_inhist[1]);
+ rrddim_set(st, "InType128", icmp6stat.icp6s_inhist[128]);
+ rrddim_set(st, "InType129", icmp6stat.icp6s_inhist[129]);
+ rrddim_set(st, "InType136", icmp6stat.icp6s_inhist[136]);
+ rrddim_set(st, "OutType1", icmp6stat.icp6s_outhist[1]);
+ rrddim_set(st, "OutType128", icmp6stat.icp6s_outhist[128]);
+ rrddim_set(st, "OutType129", icmp6stat.icp6s_outhist[129]);
+ rrddim_set(st, "OutType133", icmp6stat.icp6s_outhist[133]);
+ rrddim_set(st, "OutType135", icmp6stat.icp6s_outhist[135]);
+ rrddim_set(st, "OutType143", icmp6stat.icp6s_outhist[143]);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_uptime)) {
+ if (unlikely(GETSYSCTL_BY_NAME("kern.boottime", boot_time))) {
+ do_uptime = 0;
+ error("DISABLED: system.uptime");
+ } else {
+ clock_gettime(CLOCK_REALTIME, &cur_time);
+ st = rrdset_find_active_localhost("system.uptime");
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , NULL
+ , "System Uptime"
+ , "seconds"
+ , "macos"
+ , "sysctl"
+ , 1000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "uptime", cur_time.tv_sec - boot_time.tv_sec);
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c
new file mode 100644
index 0000000..628a5b1
--- /dev/null
+++ b/collectors/macos.plugin/plugin_macos.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+static void macos_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *macos_main(void *ptr) {
+ netdata_thread_cleanup_push(macos_main_cleanup, ptr);
+
+ // when ZERO, attempt to do it
+ int vdo_cpu_netdata = !config_get_boolean("plugin:macos", "netdata server resources", 1);
+ int vdo_macos_sysctl = !config_get_boolean("plugin:macos", "sysctl", 1);
+ int vdo_macos_mach_smi = !config_get_boolean("plugin:macos", "mach system management interface", 1);
+ int vdo_macos_iokit = !config_get_boolean("plugin:macos", "iokit", 1);
+
+ // keep track of the time each module was called
+ unsigned long long sutime_macos_sysctl = 0ULL;
+ unsigned long long sutime_macos_mach_smi = 0ULL;
+ unsigned long long sutime_macos_iokit = 0ULL;
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ if(!vdo_macos_sysctl) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_sysctl().");
+ vdo_macos_sysctl = do_macos_sysctl(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ if(!vdo_macos_mach_smi) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_mach_smi().");
+ vdo_macos_mach_smi = do_macos_mach_smi(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ if(!vdo_macos_iokit) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_iokit().");
+ vdo_macos_iokit = do_macos_iokit(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(!vdo_cpu_netdata) {
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h
new file mode 100644
index 0000000..0815c59
--- /dev/null
+++ b/collectors/macos.plugin/plugin_macos.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+
+#ifndef NETDATA_PLUGIN_MACOS_H
+#define NETDATA_PLUGIN_MACOS_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS \
+ { \
+ .name = "PLUGIN[macos]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "macos", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = macos_main \
+ },
+
+void *macos_main(void *ptr);
+
+#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
+
+extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
+
+extern int do_macos_sysctl(int update_every, usec_t dt);
+extern int do_macos_mach_smi(int update_every, usec_t dt);
+extern int do_macos_iokit(int update_every, usec_t dt);
+
+
+#else // (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS
+
+#endif // (TARGET_OS == OS_MACOS)
+
+
+
+
+
+#endif /* NETDATA_PLUGIN_MACOS_H */
diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/nfacct.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
new file mode 100644
index 0000000..1bd8a46
--- /dev/null
+++ b/collectors/nfacct.plugin/README.md
@@ -0,0 +1,54 @@
+<!--
+title: "nfacct.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/nfacct.plugin/README.md
+-->
+
+# nfacct.plugin
+
+`nfacct.plugin` collects Netfilter statistics.
+
+## Prerequisites
+
+1. install `libmnl-dev` and `libnetfilter_acct-dev` using the package manager of your system.
+
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `netdata.plugin`.
+
+Keep in mind that NFACCT requires root access, so the plugin is setuid to root.
+
+## Charts
+
+The plugin provides Netfilter connection tracker statistics and nfacct packet and bandwidth accounting:
+
+Connection tracker:
+
+1. Connections.
+2. Changes.
+3. Expectations.
+4. Errors.
+5. Searches.
+
+Netfilter accounting:
+
+1. Packets.
+2. Bandwidth.
+
+## Configuration
+
+If you need to disable NFACCT for Netdata, edit /etc/netdata/netdata.conf and set:
+
+```
+[plugins]
+ nfacct = no
+```
+
+## Debugging
+
+You can run the plugin by hand:
+
+```
+sudo /usr/libexec/netdata/plugins.d/nfacct.plugin 1 debug
+```
+
+You will get verbose output on what the plugin does.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
new file mode 100644
index 0000000..996070f
--- /dev/null
+++ b/collectors/nfacct.plugin/plugin_nfacct.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../libnetdata/libnetdata.h"
+
+#define PLUGIN_NFACCT_NAME "nfacct.plugin"
+
+#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
+#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
+#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
+#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
+#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
+
+#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906
+#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907
+
+#ifdef HAVE_LIBMNL
+#include <libmnl/libmnl.h>
+
+static inline size_t mnl_buffer_size() {
+ long s = MNL_SOCKET_BUFFER_SIZE;
+ if(s <= 0) return 8192;
+ return (size_t)s;
+}
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+static int debug = 0;
+
+static int netdata_update_every = 1;
+
+// ----------------------------------------------------------------------------
+// DO_NFSTAT - collect netfilter connection tracker statistics via netlink
+// example: https://github.com/formorer/pkg-conntrack-tools/blob/master/src/conntrack.c
+
+#ifdef HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H
+#define DO_NFSTAT 1
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_CONNTRACK "netlink"
+
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+static struct {
+ int update_every;
+ char *buf;
+ size_t buf_size;
+ struct mnl_socket *mnl;
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfh;
+ unsigned int seq;
+ uint32_t portid;
+
+ struct nlattr *tb[CTA_STATS_MAX+1];
+ const char *attr2name[CTA_STATS_MAX+1];
+ kernel_uint_t metrics[CTA_STATS_MAX+1];
+
+ struct nlattr *tb_exp[CTA_STATS_EXP_MAX+1];
+ const char *attr2name_exp[CTA_STATS_EXP_MAX+1];
+ kernel_uint_t metrics_exp[CTA_STATS_EXP_MAX+1];
+} nfstat_root = {
+ .update_every = 1,
+ .buf = NULL,
+ .buf_size = 0,
+ .mnl = NULL,
+ .nlh = NULL,
+ .nfh = NULL,
+ .seq = 0,
+ .portid = 0,
+ .tb = {},
+ .attr2name = {
+ [CTA_STATS_SEARCHED] = "searched",
+ [CTA_STATS_FOUND] = "found",
+ [CTA_STATS_NEW] = "new",
+ [CTA_STATS_INVALID] = "invalid",
+ [CTA_STATS_IGNORE] = "ignore",
+ [CTA_STATS_DELETE] = "delete",
+ [CTA_STATS_DELETE_LIST] = "delete_list",
+ [CTA_STATS_INSERT] = "insert",
+ [CTA_STATS_INSERT_FAILED] = "insert_failed",
+ [CTA_STATS_DROP] = "drop",
+ [CTA_STATS_EARLY_DROP] = "early_drop",
+ [CTA_STATS_ERROR] = "icmp_error",
+ [CTA_STATS_SEARCH_RESTART] = "search_restart",
+ },
+ .metrics = {},
+ .tb_exp = {},
+ .attr2name_exp = {
+ [CTA_STATS_EXP_NEW] = "new",
+ [CTA_STATS_EXP_CREATE] = "created",
+ [CTA_STATS_EXP_DELETE] = "deleted",
+ },
+ .metrics_exp = {}
+};
+
+
+static int nfstat_init(int update_every) {
+ nfstat_root.update_every = update_every;
+
+ nfstat_root.buf_size = mnl_buffer_size();
+ nfstat_root.buf = mallocz(nfstat_root.buf_size);
+
+ nfstat_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
+ if(!nfstat_root.mnl) {
+ error("NFSTAT: mnl_socket_open() failed");
+ return 1;
+ }
+
+ nfstat_root.seq = (unsigned int)now_realtime_sec() - 1;
+
+ if(mnl_socket_bind(nfstat_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
+ error("NFSTAT: mnl_socket_bind() failed");
+ return 1;
+ }
+ nfstat_root.portid = mnl_socket_get_portid(nfstat_root.mnl);
+
+ return 0;
+}
+
+static struct nlmsghdr * nfct_mnl_nlmsghdr_put(char *buf, uint16_t subsys, uint16_t type, uint8_t family, uint32_t seq) {
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = (subsys << 8) | type;
+ nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_DUMP;
+ nlh->nlmsg_seq = seq;
+
+ nfh = mnl_nlmsg_put_extra_header(nlh, sizeof(struct nfgenmsg));
+ nfh->nfgen_family = family;
+ nfh->version = NFNETLINK_V0;
+ nfh->res_id = 0;
+
+ return nlh;
+}
+
+static int nfct_stats_attr_cb(const struct nlattr *attr, void *data) {
+ const struct nlattr **tb = data;
+ int type = mnl_attr_get_type(attr);
+
+ if (mnl_attr_type_valid(attr, CTA_STATS_MAX) < 0)
+ return MNL_CB_OK;
+
+ if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+ error("NFSTAT: mnl_attr_validate() failed");
+ return MNL_CB_ERROR;
+ }
+
+ tb[type] = attr;
+ return MNL_CB_OK;
+}
+
+static int nfstat_callback(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
+
+ mnl_attr_parse(nlh, sizeof(*nfg), nfct_stats_attr_cb, nfstat_root.tb);
+
+ // printf("cpu=%-4u\t", ntohs(nfg->res_id));
+
+ int i;
+ // add the metrics of this CPU into the metrics
+ for (i = 0; i < CTA_STATS_MAX+1; i++) {
+ if (nfstat_root.tb[i]) {
+ // printf("%s=%u ", nfstat_root.attr2name[i], ntohl(mnl_attr_get_u32(nfstat_root.tb[i])));
+ nfstat_root.metrics[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb[i]));
+ }
+ }
+ // printf("\n");
+
+ return MNL_CB_OK;
+}
+
+static int nfstat_collect_conntrack() {
+ // zero all metrics - we will sum the metrics of all CPUs later
+ int i;
+ for (i = 0; i < CTA_STATS_MAX+1; i++)
+ nfstat_root.metrics[i] = 0;
+
+ // prepare the request
+ nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
+
+ // send the request
+ if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
+ error("NFSTAT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfstat_root.buf
+ , (size_t)ret
+ , nfstat_root.nlh->nlmsg_seq
+ , nfstat_root.portid
+ , nfstat_callback
+ , NULL
+ ) <= MNL_CB_STOP)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nfexp_stats_attr_cb(const struct nlattr *attr, void *data)
+{
+ const struct nlattr **tb = data;
+ int type = mnl_attr_get_type(attr);
+
+ if (mnl_attr_type_valid(attr, CTA_STATS_EXP_MAX) < 0)
+ return MNL_CB_OK;
+
+ if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+ error("NFSTAT EXP: mnl_attr_validate() failed");
+ return MNL_CB_ERROR;
+ }
+
+ tb[type] = attr;
+ return MNL_CB_OK;
+}
+
+static int nfstat_callback_exp(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
+
+ mnl_attr_parse(nlh, sizeof(*nfg), nfexp_stats_attr_cb, nfstat_root.tb_exp);
+
+ int i;
+ for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) {
+ if (nfstat_root.tb_exp[i]) {
+ nfstat_root.metrics_exp[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb_exp[i]));
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+static int nfstat_collect_conntrack_expectations() {
+ // zero all metrics - we will sum the metrics of all CPUs later
+ int i;
+ for (i = 0; i < CTA_STATS_EXP_MAX+1; i++)
+ nfstat_root.metrics_exp[i] = 0;
+
+ // prepare the request
+ nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK_EXP, IPCTNL_MSG_EXP_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
+
+ // send the request
+ if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
+ error("NFSTAT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfstat_root.buf
+ , (size_t)ret
+ , nfstat_root.nlh->nlmsg_seq
+ , nfstat_root.portid
+ , nfstat_callback_exp
+ , NULL
+ ) <= MNL_CB_STOP)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nfstat_collect() {
+ nfstat_root.seq++;
+
+ if(nfstat_collect_conntrack())
+ return 1;
+
+ if(nfstat_collect_conntrack_expectations())
+ return 1;
+
+ return 0;
+}
+
+static void nfstat_send_metrics() {
+ static int new_chart_generated = 0, changes_chart_generated = 0, search_chart_generated = 0, errors_chart_generated = 0, expect_chart_generated = 0;
+
+ if(!new_chart_generated) {
+ new_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Connection Tracker New Connections' 'connections/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_new"
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NETDATA_CHART_PRIO_NETFILTER_NEW
+ , nfstat_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_NEW]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_IGNORE]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_INVALID]);
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_new"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_NEW]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_NEW]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_IGNORE]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_IGNORE]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_INVALID]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_INVALID]
+ );
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(!changes_chart_generated) {
+ changes_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Connection Tracker Changes' 'changes/s' %s '' line %d %d detail %s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NETDATA_CHART_PRIO_NETFILTER_CHANGES
+ , nfstat_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_INSERT]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DELETE]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DELETE_LIST]);
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_INSERT]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_INSERT]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_DELETE]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_DELETE]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_DELETE_LIST]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_DELETE_LIST]
+ );
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(!search_chart_generated) {
+ search_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Connection Tracker Searches' 'searches/s' %s '' line %d %d detail %s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_search"
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NETDATA_CHART_PRIO_NETFILTER_SEARCH
+ , nfstat_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_SEARCHED]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART]);
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_FOUND]);
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_search"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_SEARCHED]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_SEARCHED]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_SEARCH_RESTART]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_FOUND]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_FOUND]
+ );
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(!errors_chart_generated) {
+ errors_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Connection Tracker Errors' 'events/s' %s '' line %d %d detail %s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NETDATA_CHART_PRIO_NETFILTER_ERRORS
+ , nfstat_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_ERROR]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_INSERT_FAILED]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DROP]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_EARLY_DROP]);
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_ERROR]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_ERROR]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_INSERT_FAILED]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_INSERT_FAILED]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_DROP]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_DROP]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_EARLY_DROP]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_EARLY_DROP]
+ );
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(!expect_chart_generated) {
+ expect_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Connection Tracker Expectations' 'expectations/s' %s '' line %d %d detail %s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NETDATA_CHART_PRIO_NETFILTER_EXPECT
+ , nfstat_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_CREATE]);
+ printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_DELETE]);
+ printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_NEW]);
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_EXP_CREATE]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_CREATE]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_EXP_DELETE]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_DELETE]
+ );
+ printf(
+ "SET %s = %lld\n"
+ , nfstat_root.attr2name[CTA_STATS_EXP_NEW]
+ , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_NEW]
+ );
+ printf("END\n");
+}
+
+#endif // HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H
+
+
+// ----------------------------------------------------------------------------
+// DO_NFACCT - collect netfilter accounting statistics via netlink
+
+#ifdef HAVE_LIBNETFILTER_ACCT
+#define DO_NFACCT 1
+
+#include <libnetfilter_acct/libnetfilter_acct.h>
+
+struct nfacct_data {
+ char *name;
+ uint32_t hash;
+
+ uint64_t pkts;
+ uint64_t bytes;
+
+ int packets_dimension_added;
+ int bytes_dimension_added;
+
+ int updated;
+
+ struct nfacct_data *next;
+};
+
+static struct {
+ int update_every;
+ char *buf;
+ size_t buf_size;
+ struct mnl_socket *mnl;
+ struct nlmsghdr *nlh;
+ unsigned int seq;
+ uint32_t portid;
+ struct nfacct *nfacct_buffer;
+ struct nfacct_data *nfacct_metrics;
+} nfacct_root = {
+ .update_every = 1,
+ .buf = NULL,
+ .buf_size = 0,
+ .mnl = NULL,
+ .nlh = NULL,
+ .seq = 0,
+ .portid = 0,
+ .nfacct_buffer = NULL,
+ .nfacct_metrics = NULL
+};
+
+static inline struct nfacct_data *nfacct_data_get(const char *name, uint32_t hash) {
+ struct nfacct_data *d = NULL, *last = NULL;
+ for(d = nfacct_root.nfacct_metrics; d ; last = d, d = d->next) {
+ if(unlikely(d->hash == hash && !strcmp(d->name, name)))
+ return d;
+ }
+
+ d = callocz(1, sizeof(struct nfacct_data));
+ d->name = strdupz(name);
+ d->hash = hash;
+
+ if(!last) {
+ d->next = nfacct_root.nfacct_metrics;
+ nfacct_root.nfacct_metrics = d;
+ }
+ else {
+ d->next = last->next;
+ last->next = d;
+ }
+
+ return d;
+}
+
+static int nfacct_init(int update_every) {
+ nfacct_root.update_every = update_every;
+
+ nfacct_root.buf_size = mnl_buffer_size();
+ nfacct_root.buf = mallocz(nfacct_root.buf_size);
+
+ nfacct_root.nfacct_buffer = nfacct_alloc();
+ if(!nfacct_root.nfacct_buffer) {
+ error("nfacct.plugin: nfacct_alloc() failed.");
+ return 0;
+ }
+
+ nfacct_root.seq = (unsigned int)now_realtime_sec() - 1;
+
+ nfacct_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
+ if(!nfacct_root.mnl) {
+ error("nfacct.plugin: mnl_socket_open() failed");
+ return 1;
+ }
+
+ if(mnl_socket_bind(nfacct_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
+ error("nfacct.plugin: mnl_socket_bind() failed");
+ return 1;
+ }
+ nfacct_root.portid = mnl_socket_get_portid(nfacct_root.mnl);
+
+ return 0;
+}
+
+static int nfacct_callback(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ if(nfacct_nlmsg_parse_payload(nlh, nfacct_root.nfacct_buffer) < 0) {
+ error("NFACCT: nfacct_nlmsg_parse_payload() failed.");
+ return MNL_CB_OK;
+ }
+
+ const char *name = nfacct_attr_get_str(nfacct_root.nfacct_buffer, NFACCT_ATTR_NAME);
+ uint32_t hash = simple_hash(name);
+
+ struct nfacct_data *d = nfacct_data_get(name, hash);
+
+ d->pkts = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_PKTS);
+ d->bytes = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_BYTES);
+ d->updated = 1;
+
+ return MNL_CB_OK;
+}
+
+static int nfacct_collect() {
+ // mark all old metrics as not-updated
+ struct nfacct_data *d;
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next)
+ d->updated = 0;
+
+ // prepare the request
+ nfacct_root.seq++;
+ nfacct_root.nlh = nfacct_nlmsg_build_hdr(nfacct_root.buf, NFNL_MSG_ACCT_GET, NLM_F_DUMP, (uint32_t)nfacct_root.seq);
+ if(!nfacct_root.nlh) {
+ error("NFACCT: nfacct_nlmsg_build_hdr() failed");
+ return 1;
+ }
+
+ // send the request
+ if(mnl_socket_sendto(nfacct_root.mnl, nfacct_root.nlh, nfacct_root.nlh->nlmsg_len) < 0) {
+ error("NFACCT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while((ret = mnl_socket_recvfrom(nfacct_root.mnl, nfacct_root.buf, nfacct_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfacct_root.buf
+ , (size_t)ret
+ , nfacct_root.seq
+ , nfacct_root.portid
+ , nfacct_callback
+ , NULL
+ ) <= 0)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFACCT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void nfacct_send_metrics() {
+ static int bytes_chart_generated = 0, packets_chart_generated = 0;
+
+ if(!nfacct_root.nfacct_metrics) return;
+ struct nfacct_data *d;
+
+ if(!packets_chart_generated) {
+ packets_chart_generated = 1;
+ printf("CHART netfilter.nfacct_packets '' 'Netfilter Accounting Packets' 'packets/s' 'nfacct' '' stacked %d %d %s\n"
+ , NETDATA_CHART_PRIO_NETFILTER_PACKETS
+ , nfacct_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ }
+
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ if(unlikely(!d->packets_dimension_added)) {
+ d->packets_dimension_added = 1;
+ printf("CHART netfilter.nfacct_packets '' 'Netfilter Accounting Packets' 'packets/s'\n");
+ printf("DIMENSION %s '' incremental 1 %d\n", d->name, nfacct_root.update_every);
+ }
+ }
+ }
+ printf("BEGIN netfilter.nfacct_packets\n");
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ printf("SET %s = %lld\n"
+ , d->name
+ , (collected_number)d->pkts
+ );
+ }
+ }
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(!bytes_chart_generated) {
+ bytes_chart_generated = 1;
+ printf("CHART netfilter.nfacct_bytes '' 'Netfilter Accounting Bandwidth' 'kilobytes/s' 'nfacct' '' stacked %d %d %s\n"
+ , NETDATA_CHART_PRIO_NETFILTER_BYTES
+ , nfacct_root.update_every
+ , PLUGIN_NFACCT_NAME
+ );
+ }
+
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ if(unlikely(!d->bytes_dimension_added)) {
+ d->bytes_dimension_added = 1;
+ printf("CHART netfilter.nfacct_bytes '' 'Netfilter Accounting Bandwidth' 'kilobytes/s'\n");
+ printf("DIMENSION %s '' incremental 1 %d\n", d->name, 1000 * nfacct_root.update_every);
+ }
+ }
+ }
+ printf("BEGIN netfilter.nfacct_bytes\n");
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ printf("SET %s = %lld\n"
+ , d->name
+ , (collected_number)d->bytes
+ );
+ }
+ }
+ printf("END\n");
+}
+
+#endif // HAVE_LIBNETFILTER_ACCT
+
+static void nfacct_signal_handler(int signo)
+{
+ exit((signo == SIGPIPE)?1:0);
+}
+
+// When Netdata crashes this plugin was becoming zombie,
+// this function was added to remove it when sigpipe and other signals are received.
+void nfacct_signals()
+{
+ int signals[] = { SIGPIPE, SIGINT, SIGTERM, 0};
+ int i;
+ struct sigaction sa;
+ sa.sa_flags = 0;
+ sa.sa_handler = nfacct_signal_handler;
+
+ // ignore all signals while we run in a signal handler
+ sigfillset(&sa.sa_mask);
+
+ for (i = 0; signals[i]; i++) {
+ if(sigaction(signals[i], &sa, NULL) == -1)
+ error("Cannot add the handler to signal %d", signals[i]);
+ }
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "nfacct.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ // ------------------------------------------------------------------------
+ // parse command line parameters
+
+ int i, freq = 0;
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("nfacct.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata nfacct.plugin %s\n"
+ " Copyright (C) 2015-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " --version print version and exit\n"
+ "\n"
+ " -h\n"
+ " --help print this message and exit\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/netdata/netdata/tree/master/collectors/nfacct.plugin\n"
+ "\n"
+ , VERSION
+ , netdata_update_every
+ );
+ exit(1);
+ }
+
+ error("nfacct.plugin: ignoring parameter '%s'", argv[i]);
+ }
+
+ nfacct_signals();
+
+ errno = 0;
+
+ if(freq >= netdata_update_every)
+ netdata_update_every = freq;
+ else if(freq)
+ error("update frequency %d seconds is too small for NFACCT. Using %d.", freq, netdata_update_every);
+
+#ifdef DO_NFACCT
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfacct_init()\n");
+ int nfacct = !nfacct_init(netdata_update_every);
+#endif
+
+#ifdef DO_NFSTAT
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfstat_init()\n");
+ int nfstat = !nfstat_init(netdata_update_every);
+#endif
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(debug) fprintf(stderr, "nfacct.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+ if(debug && iteration)
+ fprintf(stderr, "nfacct.plugin: iteration %zu, dt %llu usec\n"
+ , iteration
+ , dt
+ );
+
+#ifdef DO_NFACCT
+ if(likely(nfacct)) {
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfacct_collect()\n");
+ nfacct = !nfacct_collect();
+
+ if(likely(nfacct)) {
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfacct_send_metrics()\n");
+ nfacct_send_metrics();
+ }
+ }
+#endif
+
+#ifdef DO_NFSTAT
+ if(likely(nfstat)) {
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfstat_collect()\n");
+ nfstat = !nfstat_collect();
+
+ if(likely(nfstat)) {
+ if(debug) fprintf(stderr, "nfacct.plugin: calling nfstat_send_metrics()\n");
+ nfstat_send_metrics();
+ }
+ }
+#endif
+
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) break;
+ }
+
+ info("NFACCT process exiting");
+}
+
+#else // !HAVE_LIBMNL
+
+int main(int argc, char **argv) {
+ (void)argc;
+ (void)argv;
+
+ fatal("nfacct.plugin is not compiled.");
+}
+
+#endif // !HAVE_LIBMNL
diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am
new file mode 100644
index 0000000..c3142d4
--- /dev/null
+++ b/collectors/node.d.plugin/Makefile.am
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ node.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ node.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ node.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ node.d.plugin.in \
+ README.md \
+ $(NULL)
+
+usernodeconfigdir=$(configdir)/node.d
+dist_usernodeconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(usernodeconfigdir)
+
+nodeconfigdir=$(libconfigdir)/node.d
+dist_nodeconfig_DATA = \
+ $(NULL)
+
+dist_node_DATA = \
+ $(NULL)
+
+include fronius/Makefile.inc
+include named/Makefile.inc
+include sma_webbox/Makefile.inc
+include snmp/Makefile.inc
+include stiebeleltron/Makefile.inc
+
+nodemodulesdir=$(nodedir)/node_modules
+dist_nodemodules_DATA = \
+ node_modules/netdata.js \
+ node_modules/extend.js \
+ node_modules/pixl-xml.js \
+ node_modules/net-snmp.js \
+ node_modules/asn1-ber.js \
+ $(NULL)
+
+nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
+ $(NULL)
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
new file mode 100644
index 0000000..8db80d8
--- /dev/null
+++ b/collectors/node.d.plugin/README.md
@@ -0,0 +1,236 @@
+<!--
+title: "node.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/README.md
+-->
+
+# node.d.plugin
+
+`node.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+## Pull Request Checklist for Node.js Plugins
+
+This is a generic checklist for submitting a new Node.js plugin for Netdata. It is by no means comprehensive.
+
+At minimum, to be buildable and testable, the PR needs to include:
+
+- The module itself, following proper naming conventions: `node.d/<module_dir>/<module_name>.node.js`
+- A README.md file for the plugin.
+- The configuration file for the module
+- A basic configuration for the plugin in the appropriate global config file: `conf.d/node.d.conf`, which is also in JSON format. If the module should be enabled by default, add a section for it in the `modules` dictionary.
+- A line for the plugin in the appropriate `Makefile.am` file: `node.d/Makefile.am` under `dist_node_DATA`.
+- A line for the plugin configuration file in `conf.d/Makefile.am`: under `dist_nodeconfig_DATA`
+- Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+
+## Motivation
+
+Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it).
+Since data collection is not a CPU intensive task, node.js is an ideal solution for it.
+
+`node.d.plugin` is a Netdata plugin that provides an abstraction layer to allow easy and quick development of data
+collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single
+instance of node, thus lowering the memory footprint of data collection.
+
+Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`).
+These will have to be developed using the guidelines of **[External Plugins](/collectors/plugins.d/README.md)**.
+
+To run `node.js` plugins you need to have `node` installed in your system.
+
+In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
+In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
+`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
+
+## configuring `node.d.plugin`
+
+`node.d.plugin` can work even without any configuration. Its default configuration file is
+`node.d.conf`. To edit it on your system, run `/etc/netdata/edit-config node.d.conf`.
+
+## configuring `node.d.plugin` modules
+
+`node.d.plugin` modules accept configuration in `JSON` format.
+
+Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files
+with instructions.
+
+`JSON` has a very strict formatting. If you get errors from Netdata at `/var/log/netdata/error.log` that a certain
+configuration file cannot be loaded, we suggest to verify it at <http://jsonlint.com/>.
+
+The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
+
+## debugging modules written for node.d.plugin
+
+To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand,
+like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z
+```
+
+`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only
+the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`.
+You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+---
+
+## developing `node.d.plugin` modules
+
+Your data collection module should be split in 3 parts:
+
+- a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
+ so you don't need to do anything about it for http.
+
+- a function to process the fetched/manipulate the data fetched. This function will make a number of calls
+ to create charts and dimensions and pass the collected values to Netdata.
+ This is the only function you need to write for collecting http JSON data.
+
+- a `configure` and an `update` function, which take care of your module configuration and data refresh
+ respectively. You can use the supplied ones.
+
+Your module will automatically be able to process any number of servers, with different settings (even different
+data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest.
+For each server you are going to fetch data from, you will have to create a `service` (more later).
+
+### writing the data collection module
+
+To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure:
+
+```js
+// the processor is needed only
+// if you need a custom processor
+// other than http
+netdata.processors.myprocessor = {
+ name: 'myprocessor',
+
+ process: function(service, callback) {
+
+ /* do data collection here */
+
+ callback(data);
+ }
+};
+
+// this is the mymodule definition
+var mymodule = {
+ processResponse: function(service, data) {
+
+ /* send information to the Netdata server here */
+
+ },
+
+ configure: function(config) {
+ var eligible_services = 0;
+
+ if(typeof(config.servers) === 'undefined' || config.servers.length === 0) {
+
+ /*
+ * create a service using internal defaults;
+ * this is used for auto-detecting the settings
+ * if possible
+ */
+
+ netdata.service({
+ name: 'a name for this service',
+ update_every: this.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ else {
+
+ /*
+ * create a service for each server in the
+ * configuration file
+ */
+
+ var len = config.servers.length;
+ while(len--) {
+ var server = config.servers[len];
+
+ netdata.service({
+ name: server.name,
+ update_every: server.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ }
+
+ return eligible_services;
+ },
+
+ update: function(service, callback) {
+
+ /*
+ * this function is called when each service
+ * created by the configure function, needs to
+ * collect updated values.
+ *
+ * You normally will not need to change it.
+ */
+
+ service.execute(function(service, data) {
+ mymodule.processResponse(service, data);
+ callback();
+ });
+ },
+};
+
+module.exports = mymodule;
+```
+
+#### configure(config)
+
+`configure(config)` is called just once, when `node.d.plugin` starts.
+The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`.
+This file should have the following format:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [ { /* server 1 */ }, { /* server 2 */ } ]
+}
+```
+
+If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these
+will be added by `node.d.plugin`. So you module will always have them.
+
+The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`.
+
+#### processResponse(data)
+
+`data` may be `null` or whatever the processor specified in the `service` returned.
+
+The `service` object defines a set of functions to allow you send information to the Netdata core about:
+
+1. Charts and dimension definitions
+2. Updated values, from the collected values
+
+---
+
+_FIXME: document an operational node.d.plugin data collector - the best example is the
+[snmp collector](https://raw.githubusercontent.com/netdata/netdata/master/collectors/node.d.plugin/snmp/snmp.node.js)_
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/fronius/Makefile.inc b/collectors/node.d.plugin/fronius/Makefile.inc
new file mode 100644
index 0000000..da0743a
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += fronius/fronius.node.js
+# dist_nodeconfig_DATA += fronius/fronius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fronius/README.md fronius/Makefile.inc
+
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
new file mode 100644
index 0000000..746737d
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -0,0 +1,135 @@
+<!--
+title: "Fronius Symo monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/fronius/README.md
+sidebar_label: "Fronius Symo"
+-->
+
+# Fronius Symo monitoring with Netdata
+
+Collects metrics from the configured solar power installation from Fronius Symo.
+
+**Requirements**
+
+- Configuration file `fronius.conf` in the node.d Netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
+- Fronius Symo with network access (http)
+
+It produces per server:
+
+1. **Power**
+
+- Current power input from the grid (positive values), output to the grid (negative values), in W
+- Current power input from the solar panels, in W
+- Current power stored in the accumulator (if present), in W (in theory, untested)
+
+2. **Consumption**
+
+- Local consumption in W
+
+3. **Autonomy**
+
+- Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
+- Relative self consumption in %. The lower the better
+
+4. **Energy**
+
+- The energy produced during the current day, in kWh
+- The energy produced during the current year, in kWh
+
+5. **Inverter**
+
+- The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
+
+## configuration
+
+Sample:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "Symo",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
+
+---
+
+[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m)
+
+The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
+
+- Datalogger version: 240.162630
+- Software version: 3.7.4-6
+- Hardware version: 2.4D
+
+Other products and versions may work, but without any guarantees.
+
+Example Netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
+The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "solar",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
+
+```json
+{
+ "Head" : {
+ "RequestArguments" : {},
+ "Status" : {
+ "Code" : 0,
+ "Reason" : "",
+ "UserMessage" : ""
+ },
+ "Timestamp" : "2017-07-05T12:35:12+02:00"
+ },
+ "Body" : {
+ "Data" : {
+ "Site" : {
+ "Mode" : "meter",
+ "P_Grid" : -6834.549847,
+ "P_Load" : -1271.450153,
+ "P_Akku" : null,
+ "P_PV" : 8106,
+ "rel_SelfConsumption" : 15.685297,
+ "rel_Autonomy" : 100,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870,
+ "Meter_Location" : "grid"
+ },
+ "Inverters" : {
+ "1" : {
+ "DT" : 123,
+ "P" : 8106,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870
+ }
+ }
+ }
+ }
+}
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Ffronius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/fronius/fronius.node.js b/collectors/node.d.plugin/fronius/fronius.node.js
new file mode 100644
index 0000000..436f3a3
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/fronius.node.js
@@ -0,0 +1,400 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one or more Fronius Symo Inverters.
+// to get the Solar Power Generated (current, today).
+
+// example configuration in netdata/conf.d/node.d/fronius.conf.md
+
+require("url");
+require("http");
+var netdata = require("netdata");
+
+netdata.debug("loaded " + __filename + " plugin");
+
+var fronius = {
+ name: "Fronius",
+ enable_autodetect: false,
+ update_every: 5,
+ base_priority: 60000,
+ charts: {},
+
+ powerGridId: "p_grid",
+ powerPvId: "p_pv",
+ powerAccuId: "p_akku", // not my typo! Using the ID from the AP
+ consumptionLoadId: "p_load",
+ autonomyId: "rel_autonomy",
+ consumptionSelfId: "rel_selfconsumption",
+ solarConsumptionId: "solar_consumption",
+ energyTodayId: "e_day",
+ energyYearId: "e_year",
+
+ createBasicDimension: function (id, name, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ // Gets the site power chart. Will be created if not existing.
+ getSitePowerChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+ dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "grid", 1);
+ dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "photovoltaics", 1);
+ dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "accumulator", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Site Power", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "power", // the family of the chart
+ context: "fronius.power", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteConsumptionChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "load", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Load", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "consumption", // the family of the chart
+ context: "fronius.consumption", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 2, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteAutonomyChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "autonomy", 1);
+ dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "self_consumption", 1);
+ dim[fronius.solarConsumptionId] = this.createBasicDimension(fronius.solarConsumptionId, "solar_consumption", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Autonomy", // the title of the chart
+ units: "%", // the units of the chart dimensions
+ family: "autonomy", // the family of the chart
+ context: "fronius.autonomy", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 3, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyTodayChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "today", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for today",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.today", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 4, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyYearChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "year", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for this year",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.year", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 5, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the inverter power chart. Will be created if not existing.
+ // Needs the array of inverters in order to create a chart with all inverters as dimensions
+ getInverterPowerChart: function (service, suffix, inverters) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ var name = key;
+ if (!isNaN(key)) name = "inverter_" + key;
+ dim[key] = this.createBasicDimension("inverter_" + key, name, 1);
+ }
+ }
+
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Inverter Output",// the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "inverters", // the family of the chart
+ context: "fronius.inverter.output", // the context of the chart
+ type: netdata.chartTypes.stacked, // the type of the chart
+ priority: fronius.base_priority + 6, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ processResponse: function (service, content) {
+ var json = fronius.convertToJson(content);
+ if (json === null) return;
+
+ // add the service
+ service.commit();
+
+ var chartDefinitions = fronius.parseCharts(service, json);
+ var chartCount = chartDefinitions.length;
+ while (chartCount--) {
+ var chartObj = chartDefinitions[chartCount];
+ service.begin(chartObj.chart);
+ var dimCount = chartObj.dimensions.length;
+ while (dimCount--) {
+ var dim = chartObj.dimensions[dimCount];
+ service.set(dim.name, dim.value);
+ }
+ service.end();
+ }
+ },
+
+ parseCharts: function (service, json) {
+ var site = json.Body.Data.Site;
+ return [
+ this.parsePowerChart(service, site),
+ this.parseConsumptionChart(service, site),
+ this.parseAutonomyChart(service, site),
+ this.parseEnergyTodayChart(service, site),
+ this.parseEnergyYearChart(service, site),
+ this.parseInverterChart(service, json.Body.Data.Inverters)
+ ];
+ },
+
+ parsePowerChart: function (service, site) {
+ return this.getChart(this.getSitePowerChart(service, "power"),
+ [
+ this.getDimension(this.powerGridId, Math.round(site.P_Grid)),
+ this.getDimension(this.powerPvId, Math.round(Math.max(site.P_PV, 0))),
+ this.getDimension(this.powerAccuId, Math.round(site.P_Akku))
+ ]
+ );
+ },
+
+ parseConsumptionChart: function (service, site) {
+ return this.getChart(this.getSiteConsumptionChart(service, "consumption"),
+ [this.getDimension(this.consumptionLoadId, Math.round(Math.abs(site.P_Load)))]
+ );
+ },
+
+ parseAutonomyChart: function (service, site) {
+ var selfConsumption = site.rel_SelfConsumption;
+ var solarConsumption = 0;
+ var load = Math.abs(site.P_Load);
+ var power = Math.max(site.P_PV, 0);
+ if (power <= 0) solarConsumption = 0;
+ else if (load >= power) solarConsumption = 100;
+ else solarConsumption = 100 / power * load;
+ return this.getChart(this.getSiteAutonomyChart(service, "autonomy"),
+ [
+ this.getDimension(this.autonomyId, Math.round(site.rel_Autonomy)),
+ this.getDimension(this.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption)),
+ this.getDimension(this.solarConsumptionId, Math.round(solarConsumption))
+ ]
+ );
+ },
+
+ parseEnergyTodayChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyTodayChart(service, "energy.today"),
+ [this.getDimension(this.energyTodayId, Math.round(Math.max(site.E_Day, 0)))]
+ );
+ },
+
+ parseEnergyYearChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyYearChart(service, "energy.year"),
+ [this.getDimension(this.energyYearId, Math.round(Math.max(site.E_Year, 0)))]
+ );
+ },
+
+ parseInverterChart: function (service, inverters) {
+ var dimensions = [];
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ dimensions.push(this.getDimension(key, Math.round(inverters[key].P)));
+ }
+ }
+ return this.getChart(this.getInverterPowerChart(service, "inverters.output", inverters), dimensions);
+ },
+
+ getDimension: function (name, value) {
+ return {
+ name: name,
+ value: value
+ };
+ },
+
+ getChart: function (chart, dimensions) {
+ return {
+ chart: chart,
+ dimensions: dimensions
+ };
+ },
+
+ getChartId: function (service, suffix) {
+ return "fronius_" + service.name + "." + suffix;
+ },
+
+ convertToJson: function (httpBody) {
+ if (httpBody === null) return null;
+ var json = httpBody;
+ // can't parse if it's already a json object,
+ // the check enables easier testing if the httpBody is already valid JSON.
+ if (typeof httpBody !== "object") {
+ try {
+ json = JSON.parse(httpBody);
+ } catch (error) {
+ netdata.error("fronius: Got a response, but it is not valid JSON. Ignoring. Error: " + error.message);
+ return null;
+ }
+ }
+ return this.isResponseValid(json) ? json : null;
+ },
+
+ // some basic validation
+ isResponseValid: function (json) {
+ if (this.isUndefined(json.Body)) return false;
+ if (this.isUndefined(json.Body.Data)) return false;
+ if (this.isUndefined(json.Body.Data.Site)) return false;
+ return this.isDefined(json.Body.Data.Inverters);
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ": " + name + ": url: " + uri + ", update_every: " + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL("http://" + uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (fronius.isUndefined(config.servers)) return 0;
+ var added = 0;
+ var len = config.servers.length;
+ while (len--) {
+ var server = config.servers[len];
+ if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every;
+ if (fronius.areUndefined([server.name, server.hostname, server.api_path])) continue;
+
+ var url = server.hostname + server.api_path;
+ this.serviceExecute(server.name, url, server.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ isUndefined: function (value) {
+ return typeof value === "undefined";
+ },
+
+ areUndefined: function (valueArray) {
+ var i = 0;
+ for (i; i < valueArray.length; i++) {
+ if (this.isUndefined(valueArray[i])) return true;
+ }
+ return false;
+ },
+
+ isDefined: function (value) {
+ return typeof value !== "undefined";
+ }
+};
+
+module.exports = fronius;
diff --git a/collectors/node.d.plugin/named/Makefile.inc b/collectors/node.d.plugin/named/Makefile.inc
new file mode 100644
index 0000000..95f4230
--- /dev/null
+++ b/collectors/node.d.plugin/named/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += named/named.node.js
+# dist_nodeconfig_DATA += named/named.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += named/README.md named/Makefile.inc
+
diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md
new file mode 100644
index 0000000..acd03f6
--- /dev/null
+++ b/collectors/node.d.plugin/named/README.md
@@ -0,0 +1,348 @@
+<!--
+title: "ISC BIND monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/named/README.md
+sidebar_label: "ISC BIND"
+-->
+
+# ISC BIND monitoring with Netdata
+
+Monitor one or more ISC Bind servers.
+
+## Example Netdata charts
+
+Depending on the number of views your bind has, you may get a large number of charts.
+Here this is with just one view:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png)
+![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png)
+
+## How it works
+
+The plugin will execute (from within node.js) the equivalent of:
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
+Here is a sample of the output this command produces.
+
+```js
+{
+ "json-stats-version":"1.0",
+ "boot-time":"2016-01-31T08:20:48Z",
+ "config-time":"2016-01-31T09:28:03Z",
+ "current-time":"2016-02-02T22:22:20Z",
+ "opcodes":{
+ "QUERY":247816,
+ "IQUERY":0,
+ "STATUS":0,
+ "RESERVED3":0,
+ "NOTIFY":0,
+ "UPDATE":3813,
+ "RESERVED6":0,
+ "RESERVED7":0,
+ "RESERVED8":0,
+ "RESERVED9":0,
+ "RESERVED10":0,
+ "RESERVED11":0,
+ "RESERVED12":0,
+ "RESERVED13":0,
+ "RESERVED14":0,
+ "RESERVED15":0
+ },
+ "qtypes":{
+ "A":89519,
+ "NS":863,
+ "CNAME":1,
+ "SOA":1,
+ "PTR":116779,
+ "MX":276,
+ "TXT":198,
+ "AAAA":39324,
+ "SRV":850,
+ "ANY":5
+ },
+ "nsstats":{
+ "Requestv4":251630,
+ "ReqEdns0":1255,
+ "ReqTSIG":3813,
+ "ReqTCP":57,
+ "AuthQryRej":1455,
+ "RecQryRej":122,
+ "Response":245918,
+ "TruncatedResp":44,
+ "RespEDNS0":1255,
+ "RespTSIG":3813,
+ "QrySuccess":205159,
+ "QryAuthAns":119495,
+ "QryNoauthAns":120770,
+ "QryNxrrset":32711,
+ "QrySERVFAIL":262,
+ "QryNXDOMAIN":2395,
+ "QryRecursion":40885,
+ "QryDuplicate":5712,
+ "QryFailure":1577,
+ "UpdateDone":2514,
+ "UpdateFail":1299,
+ "UpdateBadPrereq":1276,
+ "QryUDP":246194,
+ "QryTCP":45,
+ "OtherOpt":101
+ },
+ "views":{
+ "local":{
+ "resolver":{
+ "stats":{
+ "Queryv4":74577,
+ "Responsev4":67032,
+ "NXDOMAIN":601,
+ "SERVFAIL":5,
+ "FORMERR":7,
+ "EDNS0Fail":7,
+ "Truncated":3071,
+ "Lame":4,
+ "Retry":11826,
+ "QueryTimeout":1838,
+ "GlueFetchv4":6864,
+ "GlueFetchv4Fail":30,
+ "QryRTT10":112,
+ "QryRTT100":42900,
+ "QryRTT500":23275,
+ "QryRTT800":534,
+ "QryRTT1600":97,
+ "QryRTT1600+":20,
+ "BucketSize":31,
+ "REFUSED":13
+ },
+ "qtypes":{
+ "A":64931,
+ "NS":870,
+ "CNAME":185,
+ "PTR":5,
+ "MX":49,
+ "TXT":149,
+ "AAAA":7972,
+ "SRV":416
+ },
+ "cache":{
+ "A":40356,
+ "NS":8032,
+ "CNAME":14477,
+ "PTR":2,
+ "MX":21,
+ "TXT":32,
+ "AAAA":3301,
+ "SRV":94,
+ "DS":237,
+ "RRSIG":2301,
+ "NSEC":126,
+ "!A":52,
+ "!NS":4,
+ "!TXT":1,
+ "!AAAA":3797,
+ "!SRV":9,
+ "NXDOMAIN":590
+ },
+ "cachestats":{
+ "CacheHits":1085188,
+ "CacheMisses":109,
+ "QueryHits":464755,
+ "QueryMisses":55624,
+ "DeleteLRU":0,
+ "DeleteTTL":42615,
+ "CacheNodes":5188,
+ "CacheBuckets":2079,
+ "TreeMemTotal":2326026,
+ "TreeMemInUse":1508075,
+ "HeapMemMax":132096,
+ "HeapMemTotal":393216,
+ "HeapMemInUse":132096
+ },
+ "adb":{
+ "nentries":1021,
+ "entriescnt":3157,
+ "nnames":1021,
+ "namescnt":3022
+ }
+ }
+ },
+ "public":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ },
+ "_bind":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ }
+ }
+}
+```
+
+From this output it collects:
+
+- Global Received Requests by IP version (IPv4, IPv6)
+- Global Successful Queries
+- Current Recursive Clients
+- Global Queries by IP Protocol (TCP, UDP)
+- Global Queries Analysis
+- Global Received Updates
+- Global Query Failures
+- Global Query Failures Analysis
+- Other Global Server Statistics
+- Global Incoming Requests by OpCode
+- Global Incoming Requests by Query Type
+- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added)
+- Per View Statistics (the following set will be added for each bind view):
+ - View, Resolver Active Queries
+ - View, Resolver Statistics
+ - View, Resolver Round Trip Timings
+ - View, Requests by Query Type
+
+## Configuration
+
+The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents:
+
+```js
+{
+ "enable_autodetect": true,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "bind1",
+ "url": "http://127.0.0.1:8888/json/v1/server",
+ "update_every": 1
+ },
+ {
+ "name": "bind2",
+ "url": "http://10.1.2.3:8888/json/v1/server",
+ "update_every": 2
+ }
+ ]
+}
+```
+
+You can add any number of bind servers.
+
+If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too.
+
+### XML instead of JSON, from bind
+
+The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported.
+
+In such cases, use a URL like this:
+
+```sh
+curl "http://localhost:8888/xml/v3/server"
+```
+
+Only `xml` and `v3` has been tested.
+
+Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion:
+
+1. from XML to JSON using a javascript XML parser (**CPU intensive**),
+2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes),
+3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON).
+
+In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON.
+
+**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON.
+
+The XML interface of bind is not autodetected.
+You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 1,
+ "servers": [
+ {
+ "name": "local",
+ "url": "http://localhost:8888/xml/v3/server",
+ "update_every": 1
+ }
+ ]
+}
+```
+
+Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output.
+
+## Auto-detection
+
+Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it.
+
+## Bind (named) configuration
+
+To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format.
+
+For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
+
+Normally, you will need something like this in your `named.conf`:
+
+```
+statistics-channels {
+ inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
+ inet ::1 port 8888 allow { ::1; };
+};
+```
+
+(use the IPv4 or IPv6 line depending on what you are using, you can also use both)
+
+Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too):
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fnamed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/named/named.node.js b/collectors/node.d.plugin/named/named.node.js
new file mode 100644
index 0000000..d13c608
--- /dev/null
+++ b/collectors/node.d.plugin/named/named.node.js
@@ -0,0 +1,610 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// collect statistics from bind (named) v9.10+
+//
+// bind statistics documentation at:
+// http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/
+// https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics
+
+// example configuration in /etc/netdata/node.d/named.conf
+// the module supports auto-detection if bind is running at localhost
+
+/*
+{
+ "enable_autodetect": true,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "bind1",
+ "url": "http://127.0.0.1:8888/json/v1/server",
+ "update_every": 1
+ },
+ {
+ "name": "bind2",
+ "url": "http://10.0.0.1:8888/xml/v3/server",
+ "update_every": 2
+ }
+ ]
+}
+*/
+
+// the following is the bind named.conf configuration required
+
+/*
+statistics-channels {
+ inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
+};
+*/
+
+require('url');
+require('http');
+var XML = require('pixl-xml');
+var netdata = require('netdata');
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, 'plugin');
+
+var named = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 60000,
+ charts: {},
+
+ chartFromMembersCreate: function(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) {
+ var chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' ' + title_suffix, // the title of the chart
+ units: units, // the units of the chart dimensions
+ family: family, // the family of the chart
+ context: context, // the context of the chart
+ type: type, // the type of the chart
+ priority: priority, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {}
+ };
+
+ var found = 0;
+ var dims = Object.keys(obj);
+ var len = dims.length;
+ for(var i = 0; i < len ;i++) {
+ var x = dims[i];
+
+ if(typeof(obj[x]) !== 'undefined' && obj[x] !== 0) {
+ found++;
+ chart.dimensions[x] = {
+ id: x, // the unique id of the dimension
+ name: x, // the name of the dimension
+ algorithm: algorithm, // the id of the netdata algorithm
+ multiplier: multiplier, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ }
+ }
+
+ if(!found)
+ return null;
+
+ chart = service.chart(id, chart);
+ this.charts[id] = chart;
+ return chart;
+ },
+
+ chartFromMembers: function(service, obj, id_suffix, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) {
+ var id = 'named_' + service.name + '.' + id_suffix;
+ var chart = this.charts[id];
+ var dims, len, x, i;
+
+ if(typeof chart === 'undefined') {
+ chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor);
+ if(chart === null) return false;
+ }
+ else {
+ // check if we need to re-generate the chart
+ dims = Object.keys(obj);
+ len = dims.length;
+ for(i = 0; i < len ;i++) {
+ x = dims[i];
+ if(typeof(chart.dimensions[x]) === 'undefined') {
+ chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor);
+ if(chart === null) return false;
+ break;
+ }
+ }
+ }
+
+ service.begin(chart);
+
+ var found = 0;
+ dims = Object.keys(obj);
+ len = dims.length;
+ for(i = 0; i < len ;i++) {
+ x = dims[i];
+ if(typeof(chart.dimensions[x]) !== 'undefined') {
+ found++;
+ service.set(x, obj[x]);
+ }
+ }
+
+ service.end();
+
+ return (found > 0);
+ },
+
+ // an index to map values to different charts
+ lookups: {
+ nsstats: {},
+ resolver_stats: {},
+ numfetch: {}
+ },
+
+ // transform the XML response of bind
+ // to the JSON response of bind
+ xml2js: function(service, data_xml) {
+ var d = XML.parse(data_xml);
+ if(d === null) return null;
+
+ var a, aa, alen, alen2;
+
+ var data = {};
+ var len = d.server.counters.length;
+ while(len--) {
+ a = d.server.counters[len];
+ if(typeof a.counter === 'undefined') continue;
+ if(a.type === 'opcode') a.type = 'opcodes';
+ else if(a.type === 'qtype') a.type = 'qtypes';
+ else if(a.type === 'nsstat') a.type = 'nsstats';
+ aa = data[a.type] = {};
+ alen = 0;
+ alen2 = a.counter.length;
+ while(alen < alen2) {
+ aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10);
+ alen++;
+ }
+ }
+
+ data.views = {};
+ var vlen = d.views.view.length;
+ while(vlen--) {
+ var vname = d.views.view[vlen].name;
+ data.views[vname] = { resolver: {} };
+ len = d.views.view[vlen].counters.length;
+ while(len--) {
+ a = d.views.view[vlen].counters[len];
+ if(typeof a.counter === 'undefined') continue;
+ if(a.type === 'resstats') a.type = 'stats';
+ else if(a.type === 'resqtype') a.type = 'qtypes';
+ else if(a.type === 'adbstat') a.type = 'adb';
+ aa = data.views[vname].resolver[a.type] = {};
+ alen = 0;
+ alen2 = a.counter.length;
+ while(alen < alen2) {
+ aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10);
+ alen++;
+ }
+ }
+ }
+
+ return data;
+ },
+
+ processResponse: function(service, data) {
+ if(data !== null) {
+ var r, x, look, id, chart, keys, len;
+
+ // parse XML or JSON
+ // pepending on the URL given
+ if(service.request.path.match(/^\/xml/) !== null)
+ r = named.xml2js(service, data);
+ else
+ r = JSON.parse(data);
+
+ if(typeof r === 'undefined' || r === null) {
+ service.error("Cannot parse these data: " + data.toString());
+ return;
+ }
+
+ if(service.added !== true)
+ service.commit();
+
+ if(typeof r.nsstats !== 'undefined') {
+ // we split the nsstats object to several others
+ var global_requests = {}, global_requests_enable = false;
+ var global_failures = {}, global_failures_enable = false;
+ var global_failures_detail = {}, global_failures_detail_enable = false;
+ var global_updates = {}, global_updates_enable = false;
+ var protocol_queries = {}, protocol_queries_enable = false;
+ var global_queries = {}, global_queries_enable = false;
+ var global_queries_success = {}, global_queries_success_enable = false;
+ var default_enable = false;
+ var RecursClients = 0;
+
+ // RecursClients is an absolute value
+ if(typeof r.nsstats['RecursClients'] !== 'undefined') {
+ RecursClients = r.nsstats['RecursClients'];
+ delete r.nsstats['RecursClients'];
+ }
+
+ keys = Object.keys(r.nsstats);
+ len = keys.length;
+ while(len--) {
+ x = keys[len];
+
+ // we maintain an index of the values found
+ // mapping them to objects splitted
+
+ look = named.lookups.nsstats[x];
+ if(typeof look === 'undefined') {
+ // a new value, not found in the index
+ // index it:
+ if(x === 'Requestv4') {
+ named.lookups.nsstats[x] = {
+ name: 'IPv4',
+ type: 'global_requests'
+ };
+ }
+ else if(x === 'Requestv6') {
+ named.lookups.nsstats[x] = {
+ name: 'IPv6',
+ type: 'global_requests'
+ };
+ }
+ else if(x === 'QryFailure') {
+ named.lookups.nsstats[x] = {
+ name: 'failures',
+ type: 'global_failures'
+ };
+ }
+ else if(x === 'QryUDP') {
+ named.lookups.nsstats[x] = {
+ name: 'UDP',
+ type: 'protocol_queries'
+ };
+ }
+ else if(x === 'QryTCP') {
+ named.lookups.nsstats[x] = {
+ name: 'TCP',
+ type: 'protocol_queries'
+ };
+ }
+ else if(x === 'QrySuccess') {
+ named.lookups.nsstats[x] = {
+ name: 'queries',
+ type: 'global_queries_success'
+ };
+ }
+ else if(x.match(/QryRej$/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_failures_detail'
+ };
+ }
+ else if(x.match(/^Qry/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_queries'
+ };
+ }
+ else if(x.match(/^Update/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_updates'
+ };
+ }
+ else {
+ // values not mapped, will remain
+ // in the default map
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'default'
+ };
+ }
+
+ look = named.lookups.nsstats[x];
+ // netdata.error('lookup nsstats value: ' + x + ' >>> ' + named.lookups.nsstats[x].type);
+ }
+
+ switch(look.type) {
+ case 'global_requests': global_requests[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_requests_enable = true; break;
+ case 'global_queries': global_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_enable = true; break;
+ case 'global_queries_success': global_queries_success[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_success_enable = true; break;
+ case 'global_updates': global_updates[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_updates_enable = true; break;
+ case 'protocol_queries': protocol_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; protocol_queries_enable = true; break;
+ case 'global_failures': global_failures[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_enable = true; break;
+ case 'global_failures_detail': global_failures_detail[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_detail_enable = true; break;
+ default: default_enable = true; break;
+ }
+ }
+
+ if(global_requests_enable === true)
+ service.module.chartFromMembers(service, global_requests, 'received_requests', 'Bind, Global Received Requests by IP version', 'requests/s', 'requests', 'named.requests', netdata.chartTypes.stacked, named.base_priority + 1, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_queries_success_enable === true)
+ service.module.chartFromMembers(service, global_queries_success, 'global_queries_success', 'Bind, Global Successful Queries', 'queries/s', 'queries', 'named.queries_succcess', netdata.chartTypes.line, named.base_priority + 2, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(protocol_queries_enable === true)
+ service.module.chartFromMembers(service, protocol_queries, 'protocols_queries', 'Bind, Global Queries by IP Protocol', 'queries/s', 'queries', 'named.protocol_queries', netdata.chartTypes.stacked, named.base_priority + 3, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_queries_enable === true)
+ service.module.chartFromMembers(service, global_queries, 'global_queries', 'Bind, Global Queries Analysis', 'queries/s', 'queries', 'named.global_queries', netdata.chartTypes.stacked, named.base_priority + 4, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_updates_enable === true)
+ service.module.chartFromMembers(service, global_updates, 'received_updates', 'Bind, Global Received Updates', 'updates/s', 'updates', 'named.global_updates', netdata.chartTypes.stacked, named.base_priority + 5, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_failures_enable === true)
+ service.module.chartFromMembers(service, global_failures, 'query_failures', 'Bind, Global Query Failures', 'failures/s', 'failures', 'named.global_failures', netdata.chartTypes.line, named.base_priority + 6, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_failures_detail_enable === true)
+ service.module.chartFromMembers(service, global_failures_detail, 'query_failures_detail', 'Bind, Global Query Failures Analysis', 'failures/s', 'failures', 'named.global_failures_detail', netdata.chartTypes.stacked, named.base_priority + 7, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(default_enable === true)
+ service.module.chartFromMembers(service, r.nsstats, 'nsstats', 'Bind, Other Global Server Statistics', 'operations/s', 'other', 'named.nsstats', netdata.chartTypes.line, named.base_priority + 8, netdata.chartAlgorithms.incremental, 1, 1);
+
+ // RecursClients chart
+ id = 'named_' + service.name + '.recursive_clients';
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, Current Recursive Clients', // the title of the chart
+ units: 'clients', // the units of the chart dimensions
+ family: 'clients', // the family of the chart
+ context: 'named.recursive_clients', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: named.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'clients': {
+ id: 'clients', // the unique id of the dimension
+ name: '', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('clients', RecursClients);
+ service.end();
+ }
+
+ if(typeof r.opcodes !== 'undefined')
+ service.module.chartFromMembers(service, r.opcodes, 'in_opcodes', 'Bind, Global Incoming Requests by OpCode', 'requests/s', 'requests', 'named.in_opcodes', netdata.chartTypes.stacked, named.base_priority + 9, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.qtypes !== 'undefined')
+ service.module.chartFromMembers(service, r.qtypes, 'in_qtypes', 'Bind, Global Incoming Requests by Query Type', 'requests/s', 'requests', 'named.in_qtypes', netdata.chartTypes.stacked, named.base_priority + 10, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.sockstats !== 'undefined')
+ service.module.chartFromMembers(service, r.sockstats, 'in_sockstats', 'Bind, Global Socket Statistics', 'operations/s', 'sockets', 'named.in_sockstats', netdata.chartTypes.line, named.base_priority + 11, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.views !== 'undefined') {
+ keys = Object.keys(r.views);
+ len = keys.length;
+ while(len--) {
+ x = keys[len];
+ var resolver = r.views[x].resolver;
+
+ if(typeof resolver !== 'undefined') {
+ if(typeof resolver.stats !== 'undefined') {
+ var NumFetch = 0;
+ var key = service.name + '.' + x;
+ var rtt = {}, rtt_enable = false;
+ default_enable = false;
+
+ // NumFetch is an absolute value
+ if(typeof resolver.stats['NumFetch'] !== 'undefined') {
+ named.lookups.numfetch[key] = true;
+ NumFetch = resolver.stats['NumFetch'];
+ delete resolver.stats['NumFetch'];
+ }
+ if(typeof resolver.stats['BucketSize'] !== 'undefined') {
+ delete resolver.stats['BucketSize'];
+ }
+
+ // split the QryRTT* from the main chart
+ var ykeys = Object.keys(resolver.stats);
+ var ylen = ykeys.length;
+ while(ylen--) {
+ var y = ykeys[ylen];
+
+ // we maintain an index of the values found
+ // mapping them to objects splitted
+
+ look = named.lookups.resolver_stats[y];
+ if(typeof look === 'undefined') {
+ if(y.match(/^QryRTT/) !== null) {
+ named.lookups.resolver_stats[y] = {
+ name: y,
+ type: 'rtt'
+ };
+ }
+ else {
+ named.lookups.resolver_stats[y] = {
+ name: y,
+ type: 'default'
+ };
+ }
+
+ look = named.lookups.resolver_stats[y];
+ // netdata.error('lookup resolver stats value: ' + y + ' >>> ' + look.type);
+ }
+
+ switch(look.type) {
+ case 'rtt': rtt[look.name] = resolver.stats[y]; delete resolver.stats[y]; rtt_enable = true; break;
+ default: default_enable = true; break;
+ }
+ }
+
+ if(rtt_enable)
+ service.module.chartFromMembers(service, rtt, 'view_resolver_rtt_' + x, 'Bind, ' + x + ' View, Resolver Round Trip Timings', 'queries/s', 'view_' + x, 'named.resolver_rtt', netdata.chartTypes.stacked, named.base_priority + 12, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(default_enable)
+ service.module.chartFromMembers(service, resolver.stats, 'view_resolver_stats_' + x, 'Bind, ' + x + ' View, Resolver Statistics', 'operations/s', 'view_' + x, 'named.resolver_stats', netdata.chartTypes.line, named.base_priority + 13, netdata.chartAlgorithms.incremental, 1, 1);
+
+ // NumFetch chart
+ if(typeof named.lookups.numfetch[key] !== 'undefined') {
+ id = 'named_' + service.name + '.view_resolver_numfetch_' + x;
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, ' + x + ' View, Resolver Active Queries', // the title of the chart
+ units: 'queries', // the units of the chart dimensions
+ family: 'view_' + x, // the family of the chart
+ context: 'named.resolver_active_queries', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: named.base_priority + 1001, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'queries': {
+ id: 'queries', // the unique id of the dimension
+ name: '', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('queries', NumFetch);
+ service.end();
+ }
+ }
+ }
+
+ if(typeof resolver.qtypes !== 'undefined')
+ service.module.chartFromMembers(service, resolver.qtypes, 'view_resolver_qtypes_' + x, 'Bind, ' + x + ' View, Requests by Query Type', 'requests/s', 'view_' + x, 'named.resolver_qtypes', netdata.chartTypes.stacked, named.base_priority + 14, netdata.chartAlgorithms.incremental, 1, 1);
+
+ //if(typeof resolver.cache !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.cache, 'view_resolver_cache_' + x, 'Bind, ' + x + ' View, Cache Entries', 'entries', 'view_' + x, 'named.resolver_cache', netdata.chartTypes.stacked, named.base_priority + 15, netdata.chartAlgorithms.absolute, 1, 1);
+
+ if(typeof resolver.cachestats['CacheHits'] !== 'undefined' && resolver.cachestats['CacheHits'] > 0) {
+ id = 'named_' + service.name + '.view_resolver_cachehits_' + x;
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, ' + x + ' View, Resolver Cache Hits', // the title of the chart
+ units: 'operations/s', // the units of the chart dimensions
+ family: 'view_' + x, // the family of the chart
+ context: 'named.resolver_cache_hits', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: named.base_priority + 1100, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'CacheHits': {
+ id: 'CacheHits', // the unique id of the dimension
+ name: 'hits', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ },
+ 'CacheMisses': {
+ id: 'CacheMisses', // the unique id of the dimension
+ name: 'misses', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm
+ multiplier: -1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('CacheHits', resolver.cachestats['CacheHits']);
+ service.set('CacheMisses', resolver.cachestats['CacheMisses']);
+ service.end();
+ }
+
+ // this is wrong, it contains many types of info:
+ // 1. CacheHits, CacheMisses - incremental (added above)
+ // 2. QueryHits, QueryMisses - incremental
+ // 3. DeleteLRU, DeleteTTL - incremental
+ // 4. CacheNodes, CacheBuckets - absolute
+ // 5. TreeMemTotal, TreeMemInUse - absolute
+ // 6. HeapMemMax, HeapMemTotal, HeapMemInUse - absolute
+ //if(typeof resolver.cachestats !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.cachestats, 'view_resolver_cachestats_' + x, 'Bind, ' + x + ' View, Cache Statistics', 'requests/s', 'view_' + x, 'named.resolver_cache_stats', netdata.chartTypes.line, named.base_priority + 1001, netdata.chartAlgorithms.incremental, 1, 1);
+
+ //if(typeof resolver.adb !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.adb, 'view_resolver_adb_' + x, 'Bind, ' + x + ' View, ADB Statistics', 'entries', 'view_' + x, 'named.resolver_adb', netdata.chartTypes.line, named.base_priority + 1002, netdata.chartAlgorithms.absolute, 1, 1);
+ }
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function(name, a_url, update_every) {
+ if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': url: ' + a_url + ', update_every: ' + update_every);
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL(a_url),
+ update_every: update_every,
+ module: this
+ });
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function(config) {
+ var added = 0;
+
+ if(this.enable_autodetect === true) {
+ this.serviceExecute('local', 'http://localhost:8888/json/v1/server', this.update_every);
+ added++;
+ }
+
+ if(typeof(config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while(len--) {
+ if(typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ this.serviceExecute(config.servers[len].name, config.servers[len].url, config.servers[len].update_every);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function(service, callback) {
+ service.execute(function(serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ }
+};
+
+module.exports = named;
diff --git a/collectors/node.d.plugin/node.d.conf b/collectors/node.d.plugin/node.d.conf
new file mode 100644
index 0000000..95aec99
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.conf
@@ -0,0 +1,39 @@
+{
+ "___help_1": "Default options for node.d.plugin - this is a JSON file.",
+ "___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.",
+ "___help_3": "------------------------------------------------------------",
+
+ "___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.",
+ "update_every": 0,
+
+ "___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.",
+ "modules_enable_autodetect": true,
+
+ "___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.",
+ "modules_enable_all": true,
+
+ "___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js",
+ "modules": {
+ "named": {
+ "enabled": true
+ },
+ "sma_webbox": {
+ "enabled": true
+ },
+ "snmp": {
+ "enabled": true
+ }
+ },
+
+ "___help_paths": "Paths that control the operation of node.d.plugin",
+ "paths": {
+ "___help_plugins": "The full path to the modules javascript node.d/ directory",
+ "plugins": null,
+
+ "___help_config": "The full path to the modules configs node.d/ directory",
+ "config": null,
+
+ "___help_modules": "Array of paths to add to node.js when searching for node_modules",
+ "modules": []
+ }
+}
diff --git a/collectors/node.d.plugin/node.d.plugin.in b/collectors/node.d.plugin/node.d.plugin.in
new file mode 100755
index 0000000..05c126e
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.plugin.in
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+
+// shebang hack from:
+// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
+
+// Initially this is run as a shell script.
+// Then, the second line, finds nodejs or node or js in the system path
+// and executes it with the shell parameters.
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// --------------------------------------------------------------------------------------------------------------------
+
+'use strict';
+
+// --------------------------------------------------------------------------------------------------------------------
+// get NETDATA environment variables
+
+var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@';
+var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
+var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
+
+// make sure the modules are found
+process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
+process.mainModule.paths.unshift(NODE_D_DIR);
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// load required modules
+
+var fs = require('fs');
+var url = require('url');
+var util = require('util');
+var http = require('http');
+var path = require('path');
+var extend = require('extend');
+var netdata = require('netdata');
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// configuration
+
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
+
+ var m = f.match('.plugin' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
+
+ m = f.match('.node.js' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
+}
+
+// internal defaults
+extend(true, netdata.options, {
+ filename: path.basename(__filename),
+
+ update_every: NETDATA_UPDATE_EVERY,
+
+ paths: {
+ plugins: NETDATA_PLUGINS_DIR,
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
+ },
+
+ modules_enable_autodetect: true,
+ modules_enable_all: true,
+ modules: {}
+});
+
+// load configuration file
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
+
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
+
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
+
+
+// apply module paths to node.js process
+function applyModulePaths() {
+ var len = netdata.options.paths.modules.length;
+ while(len--)
+ process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
+}
+applyModulePaths();
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// tracing
+
+function dumpError(err) {
+ if (typeof err === 'object') {
+ if (err.stack) {
+ netdata.debug(err.stack);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// get command line arguments
+{
+ var found_myself = false;
+ var found_number = false;
+ var found_modules = false;
+ process.argv.forEach(function (val, index, array) {
+ netdata.debug('PARAM: ' + val);
+
+ if(!found_myself) {
+ if(val === __filename)
+ found_myself = true;
+ }
+ else {
+ switch(val) {
+ case 'debug':
+ netdata.options.DEBUG = true;
+ netdata.debug('DEBUG enabled');
+ break;
+
+ default:
+ if(found_number === true) {
+ if(found_modules === false) {
+ for(var i in netdata.options.modules)
+ netdata.options.modules[i].enabled = false;
+ }
+
+ if(typeof netdata.options.modules[val] === 'undefined')
+ netdata.options.modules[val] = {};
+
+ netdata.options.modules[val].enabled = true;
+ netdata.options.modules_enable_all = false;
+ netdata.debug('enabled module ' + val);
+ }
+ else {
+ try {
+ var x = parseInt(val);
+ if(x > 0) {
+ netdata.options.update_every = x;
+ if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
+ netdata.options.update_every = NETDATA_UPDATE_EVERY;
+ netdata.debug('Update frequency ' + x + 's is too low');
+ }
+
+ found_number = true;
+ netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
+ }
+ else netdata.error('Ignoring parameter: ' + val);
+ }
+ catch(e) {
+ netdata.error('Cannot get value of parameter: ' + val);
+ dumpError(e);
+ }
+ }
+ break;
+ }
+ }
+ });
+}
+
+if(netdata.options.update_every < 1) {
+ netdata.debug('Adjusting update frequency to 1 second');
+ netdata.options.update_every = 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// find modules
+
+function findModules() {
+ var found = 0;
+
+ var files = fs.readdirSync(NODE_D_DIR);
+ var len = files.length;
+ while(len--) {
+ var m = files[len].match('.node.js' + '$');
+ if(m !== null) {
+ var n = files[len].substring(0, m.index);
+
+ if(typeof(netdata.options.modules[n]) === 'undefined')
+ netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
+
+ if(netdata.options.modules[n].enabled === true) {
+ netdata.options.modules[n].name = n;
+ netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
+ netdata.options.modules[n].loaded = false;
+
+ // load the module
+ try {
+ netdata.debug('loading module ' + netdata.options.modules[n].filename);
+ netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
+ netdata.options.modules[n].module.name = n;
+ netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
+ }
+ catch(e) {
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
+ dumpError(e);
+ continue;
+ }
+
+ // load its configuration
+ var c = {
+ enable_autodetect: netdata.options.modules_enable_autodetect,
+ update_every: netdata.options.update_every
+ };
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
+
+ // call module auto-detection / configuration
+ try {
+ netdata.modules_configuring++;
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name);
+ var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
+ netdata.debug('Configured module ' + netdata.options.modules[n].name);
+ netdata.modules_configuring--;
+ });
+
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
+ }
+ catch(e) {
+ netdata.modules_configuring--;
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
+ dumpError(e);
+ continue;
+ }
+
+ netdata.options.modules[n].loaded = true;
+ found++;
+ }
+ }
+ }
+
+ // netdata.debug(netdata.options.modules);
+ return found;
+}
+
+if(findModules() === 0) {
+ netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
+ netdata.disableNodePlugin();
+ process.exit(1);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// start
+
+function start_when_configuring_ends() {
+ if(netdata.modules_configuring > 0) {
+ netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
+ setTimeout(start_when_configuring_ends, 500);
+ return;
+ }
+
+ netdata.modules_configuring = 0;
+ netdata.start();
+}
+start_when_configuring_ends();
+
+//netdata.debug('netdata object:')
+//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/node_modules/asn1-ber.js b/collectors/node.d.plugin/node_modules/asn1-ber.js
new file mode 100644
index 0000000..55c8f68
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/asn1-ber.js
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: MIT
+
+var Ber = require('./lib/ber/index')
+
+exports.Ber = Ber
+exports.BerReader = Ber.Reader
+exports.BerWriter = Ber.Writer
diff --git a/collectors/node.d.plugin/node_modules/extend.js b/collectors/node.d.plugin/node_modules/extend.js
new file mode 100644
index 0000000..3cd2e91
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/extend.js
@@ -0,0 +1,88 @@
+// https://github.com/justmoon/node-extend
+// SPDX-License-Identifier: MIT
+
+'use strict';
+
+var hasOwn = Object.prototype.hasOwnProperty;
+var toStr = Object.prototype.toString;
+
+var isArray = function isArray(arr) {
+ if (typeof Array.isArray === 'function') {
+ return Array.isArray(arr);
+ }
+
+ return toStr.call(arr) === '[object Array]';
+};
+
+var isPlainObject = function isPlainObject(obj) {
+ if (!obj || toStr.call(obj) !== '[object Object]') {
+ return false;
+ }
+
+ var hasOwnConstructor = hasOwn.call(obj, 'constructor');
+ var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf');
+ // Not own constructor property must be Object
+ if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) {
+ return false;
+ }
+
+ // Own properties are enumerated firstly, so to speed up,
+ // if last one is own, then all properties are own.
+ var key;
+ for (key in obj) { /**/ }
+
+ return typeof key === 'undefined' || hasOwn.call(obj, key);
+};
+
+module.exports = function extend() {
+ var options, name, src, copy, copyIsArray, clone;
+ var target = arguments[0];
+ var i = 1;
+ var length = arguments.length;
+ var deep = false;
+
+ // Handle a deep copy situation
+ if (typeof target === 'boolean') {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ } else if ((typeof target !== 'object' && typeof target !== 'function') || target == null) {
+ target = {};
+ }
+
+ for (; i < length; ++i) {
+ options = arguments[i];
+ // Only deal with non-null/undefined values
+ if (options != null) {
+ // Extend the base object
+ for (name in options) {
+ src = target[name];
+ copy = options[name];
+
+ // Prevent never-ending loop
+ if (target !== copy) {
+ // Recurse if we're merging plain objects or arrays
+ if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) {
+ if (copyIsArray) {
+ copyIsArray = false;
+ clone = src && isArray(src) ? src : [];
+ } else {
+ clone = src && isPlainObject(src) ? src : {};
+ }
+
+ // Never move original objects, clone them
+ target[name] = extend(deep, clone, copy);
+
+ // Don't bring in undefined values
+ } else if (typeof copy !== 'undefined') {
+ target[name] = copy;
+ }
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/errors.js b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
new file mode 100644
index 0000000..1c0df7b
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+
+module.exports = {
+ InvalidAsn1Error: function(msg) {
+ var e = new Error()
+ e.name = 'InvalidAsn1Error'
+ e.message = msg || ''
+ return e
+ }
+}
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/index.js b/collectors/node.d.plugin/node_modules/lib/ber/index.js
new file mode 100644
index 0000000..eb69ec5
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/index.js
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: MIT
+
+var errors = require('./errors')
+var types = require('./types')
+
+var Reader = require('./reader')
+var Writer = require('./writer')
+
+for (var t in types)
+ if (types.hasOwnProperty(t))
+ exports[t] = types[t]
+
+for (var e in errors)
+ if (errors.hasOwnProperty(e))
+ exports[e] = errors[e]
+
+exports.Reader = Reader
+exports.Writer = Writer
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/reader.js b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
new file mode 100644
index 0000000..06decf4
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: MIT
+
+var assert = require('assert');
+
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+
+
+///--- API
+
+function Reader(data) {
+ if (!data || !Buffer.isBuffer(data))
+ throw new TypeError('data must be a node Buffer');
+
+ this._buf = data;
+ this._size = data.length;
+
+ // These hold the "current" state
+ this._len = 0;
+ this._offset = 0;
+}
+
+Object.defineProperty(Reader.prototype, 'length', {
+ enumerable: true,
+ get: function () { return (this._len); }
+});
+
+Object.defineProperty(Reader.prototype, 'offset', {
+ enumerable: true,
+ get: function () { return (this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'remain', {
+ get: function () { return (this._size - this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'buffer', {
+ get: function () { return (this._buf.slice(this._offset)); }
+});
+
+
+/**
+ * Reads a single byte and advances offset; you can pass in `true` to make this
+ * a "peek" operation (i.e., get the byte, but don't advance the offset).
+ *
+ * @param {Boolean} peek true means don't move offset.
+ * @return {Number} the next byte, null if not enough data.
+ */
+Reader.prototype.readByte = function(peek) {
+ if (this._size - this._offset < 1)
+ return null;
+
+ var b = this._buf[this._offset] & 0xff;
+
+ if (!peek)
+ this._offset += 1;
+
+ return b;
+};
+
+
+Reader.prototype.peek = function() {
+ return this.readByte(true);
+};
+
+
+/**
+ * Reads a (potentially) variable length off the BER buffer. This call is
+ * not really meant to be called directly, as callers have to manipulate
+ * the internal buffer afterwards.
+ *
+ * As a result of this call, you can call `Reader.length`, until the
+ * next thing called that does a readLength.
+ *
+ * @return {Number} the amount of offset to advance the buffer.
+ * @throws {InvalidAsn1Error} on bad ASN.1
+ */
+Reader.prototype.readLength = function(offset) {
+ if (offset === undefined)
+ offset = this._offset;
+
+ if (offset >= this._size)
+ return null;
+
+ var lenB = this._buf[offset++] & 0xff;
+ if (lenB === null)
+ return null;
+
+ if ((lenB & 0x80) == 0x80) {
+ lenB &= 0x7f;
+
+ if (lenB == 0)
+ throw InvalidAsn1Error('Indefinite length not supported');
+
+ if (lenB > 4)
+ throw InvalidAsn1Error('encoding too long');
+
+ if (this._size - offset < lenB)
+ return null;
+
+ this._len = 0;
+ for (var i = 0; i < lenB; i++)
+ this._len = (this._len << 8) + (this._buf[offset++] & 0xff);
+
+ } else {
+ // Wasn't a variable length
+ this._len = lenB;
+ }
+
+ return offset;
+};
+
+
+/**
+ * Parses the next sequence in this BER buffer.
+ *
+ * To get the length of the sequence, call `Reader.length`.
+ *
+ * @return {Number} the sequence's tag.
+ */
+Reader.prototype.readSequence = function(tag) {
+ var seq = this.peek();
+ if (seq === null)
+ return null;
+ if (tag !== undefined && tag !== seq)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + seq.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ this._offset = o;
+ return seq;
+};
+
+
+Reader.prototype.readInt = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ return this._readTag(ASN1.Integer);
+};
+
+
+Reader.prototype.readBoolean = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ return (this._readTag(tag) === 0 ? false : true);
+};
+
+
+Reader.prototype.readEnumeration = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this._readTag(ASN1.Enumeration);
+};
+
+
+Reader.prototype.readString = function(tag, retbuf) {
+ if (!tag)
+ tag = ASN1.OctetString;
+
+ var b = this.peek();
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+
+ if (o === null)
+ return null;
+
+ if (this.length > this._size - o)
+ return null;
+
+ this._offset = o;
+
+ if (this.length === 0)
+ return retbuf ? new Buffer(0) : '';
+
+ var str = this._buf.slice(this._offset, this._offset + this.length);
+ this._offset += this.length;
+
+ return retbuf ? str : str.toString('utf8');
+};
+
+Reader.prototype.readOID = function(tag) {
+ if (!tag)
+ tag = ASN1.OID;
+
+ var b = this.readString(tag, true);
+ if (b === null)
+ return null;
+
+ var values = [];
+ var value = 0;
+
+ for (var i = 0; i < b.length; i++) {
+ var byte = b[i] & 0xff;
+
+ value <<= 7;
+ value += byte & 0x7f;
+ if ((byte & 0x80) == 0) {
+ values.push(value >>> 0);
+ value = 0;
+ }
+ }
+
+ value = values.shift();
+ values.unshift(value % 40);
+ values.unshift((value / 40) >> 0);
+
+ return values.join('.');
+};
+
+
+Reader.prototype._readTag = function(tag) {
+ assert.ok(tag !== undefined);
+
+ var b = this.peek();
+
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ if (this.length > 4)
+ throw InvalidAsn1Error('Integer too long: ' + this.length);
+
+ if (this.length > this._size - o)
+ return null;
+ this._offset = o;
+
+ var fb = this._buf[this._offset];
+ var value = 0;
+
+ for (var i = 0; i < this.length; i++) {
+ value <<= 8;
+ value |= (this._buf[this._offset++] & 0xff);
+ }
+
+ if ((fb & 0x80) == 0x80 && i !== 4)
+ value -= (1 << (i * 8));
+
+ return value >> 0;
+};
+
+
+
+///--- Exported API
+
+module.exports = Reader;
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/types.js b/collectors/node.d.plugin/node_modules/lib/ber/types.js
new file mode 100644
index 0000000..7519ddc
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/types.js
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+
+module.exports = {
+ EOC: 0,
+ Boolean: 1,
+ Integer: 2,
+ BitString: 3,
+ OctetString: 4,
+ Null: 5,
+ OID: 6,
+ ObjectDescriptor: 7,
+ External: 8,
+ Real: 9,
+ Enumeration: 10,
+ PDV: 11,
+ Utf8String: 12,
+ RelativeOID: 13,
+ Sequence: 16,
+ Set: 17,
+ NumericString: 18,
+ PrintableString: 19,
+ T61String: 20,
+ VideotexString: 21,
+ IA5String: 22,
+ UTCTime: 23,
+ GeneralizedTime: 24,
+ GraphicString: 25,
+ VisibleString: 26,
+ GeneralString: 28,
+ UniversalString: 29,
+ CharacterString: 30,
+ BMPString: 31,
+ Constructor: 32,
+ Context: 128
+}
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/writer.js b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
new file mode 100644
index 0000000..d3a718f
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: MIT
+
+var assert = require('assert');
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+var DEFAULT_OPTS = {
+ size: 1024,
+ growthFactor: 8
+};
+
+
+///--- Helpers
+
+function merge(from, to) {
+ assert.ok(from);
+ assert.equal(typeof(from), 'object');
+ assert.ok(to);
+ assert.equal(typeof(to), 'object');
+
+ var keys = Object.getOwnPropertyNames(from);
+ keys.forEach(function(key) {
+ if (to[key])
+ return;
+
+ var value = Object.getOwnPropertyDescriptor(from, key);
+ Object.defineProperty(to, key, value);
+ });
+
+ return to;
+}
+
+
+
+///--- API
+
+function Writer(options) {
+ options = merge(DEFAULT_OPTS, options || {});
+
+ this._buf = new Buffer(options.size || 1024);
+ this._size = this._buf.length;
+ this._offset = 0;
+ this._options = options;
+
+ // A list of offsets in the buffer where we need to insert
+ // sequence tag/len pairs.
+ this._seq = [];
+}
+
+Object.defineProperty(Writer.prototype, 'buffer', {
+ get: function () {
+ if (this._seq.length)
+ throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)');
+
+ return (this._buf.slice(0, this._offset));
+ }
+});
+
+Writer.prototype.writeByte = function(b) {
+ if (typeof(b) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(1);
+ this._buf[this._offset++] = b;
+};
+
+
+Writer.prototype.writeInt = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ var sz = 4;
+
+ while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) &&
+ (sz > 1)) {
+ sz--;
+ i <<= 8;
+ }
+
+ if (sz > 4)
+ throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff');
+
+ this._ensure(2 + sz);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = sz;
+
+ while (sz-- > 0) {
+ this._buf[this._offset++] = ((i & 0xff000000) >>> 24);
+ i <<= 8;
+ }
+
+};
+
+
+Writer.prototype.writeNull = function() {
+ this.writeByte(ASN1.Null);
+ this.writeByte(0x00);
+};
+
+
+Writer.prototype.writeEnumeration = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this.writeInt(i, tag);
+};
+
+
+Writer.prototype.writeBoolean = function(b, tag) {
+ if (typeof(b) !== 'boolean')
+ throw new TypeError('argument must be a Boolean');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ this._ensure(3);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = 0x01;
+ this._buf[this._offset++] = b ? 0xff : 0x00;
+};
+
+
+Writer.prototype.writeString = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string (was: ' + typeof(s) + ')');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OctetString;
+
+ var len = Buffer.byteLength(s);
+ this.writeByte(tag);
+ this.writeLength(len);
+ if (len) {
+ this._ensure(len);
+ this._buf.write(s, this._offset);
+ this._offset += len;
+ }
+};
+
+
+Writer.prototype.writeBuffer = function(buf, tag) {
+ if (!Buffer.isBuffer(buf))
+ throw new TypeError('argument must be a buffer');
+
+ // If no tag is specified we will assume `buf` already contains tag and length
+ if (typeof(tag) === 'number') {
+ this.writeByte(tag);
+ this.writeLength(buf.length);
+ }
+
+ this._ensure(buf.length);
+ buf.copy(this._buf, this._offset, 0, buf.length);
+ this._offset += buf.length;
+};
+
+
+Writer.prototype.writeStringArray = function(strings, tag) {
+ if (! (strings instanceof Array))
+ throw new TypeError('argument must be an Array[String]');
+
+ var self = this;
+ strings.forEach(function(s) {
+ self.writeString(s, tag);
+ });
+};
+
+// This is really to solve DER cases, but whatever for now
+Writer.prototype.writeOID = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OID;
+
+ if (!/^([0-9]+\.){3,}[0-9]+$/.test(s))
+ throw new Error('argument is not a valid OID string');
+
+ function encodeOctet(bytes, octet) {
+ if (octet < 128) {
+ bytes.push(octet);
+ } else if (octet < 16384) {
+ bytes.push((octet >>> 7) | 0x80);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 2097152) {
+ bytes.push((octet >>> 14) | 0x80);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 268435456) {
+ bytes.push((octet >>> 21) | 0x80);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else {
+ bytes.push(((octet >>> 28) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 21) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ }
+ }
+
+ var tmp = s.split('.');
+ var bytes = [];
+ bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10));
+ tmp.slice(2).forEach(function(b) {
+ encodeOctet(bytes, parseInt(b, 10));
+ });
+
+ var self = this;
+ this._ensure(2 + bytes.length);
+ this.writeByte(tag);
+ this.writeLength(bytes.length);
+ bytes.forEach(function(b) {
+ self.writeByte(b);
+ });
+};
+
+
+Writer.prototype.writeLength = function(len) {
+ if (typeof(len) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(4);
+
+ if (len <= 0x7f) {
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xff) {
+ this._buf[this._offset++] = 0x81;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffff) {
+ this._buf[this._offset++] = 0x82;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffffff) {
+ this._buf[this._offset++] = 0x83;
+ this._buf[this._offset++] = len >> 16;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else {
+ throw new InvalidAsn1Error('Length too long (> 4 bytes)');
+ }
+};
+
+Writer.prototype.startSequence = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Sequence | ASN1.Constructor;
+
+ this.writeByte(tag);
+ this._seq.push(this._offset);
+ this._ensure(3);
+ this._offset += 3;
+};
+
+
+Writer.prototype.endSequence = function() {
+ var seq = this._seq.pop();
+ var start = seq + 3;
+ var len = this._offset - start;
+
+ if (len <= 0x7f) {
+ this._shift(start, len, -2);
+ this._buf[seq] = len;
+ } else if (len <= 0xff) {
+ this._shift(start, len, -1);
+ this._buf[seq] = 0x81;
+ this._buf[seq + 1] = len;
+ } else if (len <= 0xffff) {
+ this._buf[seq] = 0x82;
+ this._buf[seq + 1] = len >> 8;
+ this._buf[seq + 2] = len;
+ } else if (len <= 0xffffff) {
+ this._shift(start, len, 1);
+ this._buf[seq] = 0x83;
+ this._buf[seq + 1] = len >> 16;
+ this._buf[seq + 2] = len >> 8;
+ this._buf[seq + 3] = len;
+ } else {
+ throw new InvalidAsn1Error('Sequence too long');
+ }
+};
+
+
+Writer.prototype._shift = function(start, len, shift) {
+ assert.ok(start !== undefined);
+ assert.ok(len !== undefined);
+ assert.ok(shift);
+
+ this._buf.copy(this._buf, start + shift, start, start + len);
+ this._offset += shift;
+};
+
+Writer.prototype._ensure = function(len) {
+ assert.ok(len);
+
+ if (this._size - this._offset < len) {
+ var sz = this._size * this._options.growthFactor;
+ if (sz - this._offset < len)
+ sz += len;
+
+ var buf = new Buffer(sz);
+
+ this._buf.copy(buf, 0, 0, this._offset);
+ this._buf = buf;
+ this._size = sz;
+ }
+};
+
+
+
+///--- Exported API
+
+module.exports = Writer;
diff --git a/collectors/node.d.plugin/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js
new file mode 100644
index 0000000..6b5b754
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/net-snmp.js
@@ -0,0 +1,3452 @@
+// Copyright 2013 Stephen Vickers <stephen.vickers.sv@gmail.com>
+// SPDX-License-Identifier: MIT
+
+var ber = require("asn1-ber").Ber;
+var dgram = require("dgram");
+var events = require("events");
+var util = require("util");
+var crypto = require("crypto");
+
+var DEBUG = false;
+
+var MAX_INT32 = 2147483647;
+
+function debug(line) {
+ if (DEBUG) {
+ console.debug(line);
+ }
+}
+
+/*****************************************************************************
+ ** Constants
+ **/
+
+
+function _expandConstantObject(object) {
+ var keys = [];
+ for (var key in object)
+ keys.push(key);
+ for (var i = 0; i < keys.length; i++)
+ object[object[keys[i]]] = parseInt(keys[i]);
+}
+
+var ErrorStatus = {
+ 0: "NoError",
+ 1: "TooBig",
+ 2: "NoSuchName",
+ 3: "BadValue",
+ 4: "ReadOnly",
+ 5: "GeneralError",
+ 6: "NoAccess",
+ 7: "WrongType",
+ 8: "WrongLength",
+ 9: "WrongEncoding",
+ 10: "WrongValue",
+ 11: "NoCreation",
+ 12: "InconsistentValue",
+ 13: "ResourceUnavailable",
+ 14: "CommitFailed",
+ 15: "UndoFailed",
+ 16: "AuthorizationError",
+ 17: "NotWritable",
+ 18: "InconsistentName"
+};
+
+_expandConstantObject(ErrorStatus);
+
+var ObjectType = {
+ 1: "Boolean",
+ 2: "Integer",
+ 4: "OctetString",
+ 5: "Null",
+ 6: "OID",
+ 64: "IpAddress",
+ 65: "Counter",
+ 66: "Gauge",
+ 67: "TimeTicks",
+ 68: "Opaque",
+ 70: "Counter64",
+ 128: "NoSuchObject",
+ 129: "NoSuchInstance",
+ 130: "EndOfMibView"
+};
+
+_expandConstantObject(ObjectType);
+
+ObjectType.Integer32 = ObjectType.Integer;
+ObjectType.Counter32 = ObjectType.Counter;
+ObjectType.Gauge32 = ObjectType.Gauge;
+ObjectType.Unsigned32 = ObjectType.Gauge32;
+
+var PduType = {
+ 160: "GetRequest",
+ 161: "GetNextRequest",
+ 162: "GetResponse",
+ 163: "SetRequest",
+ 164: "Trap",
+ 165: "GetBulkRequest",
+ 166: "InformRequest",
+ 167: "TrapV2",
+ 168: "Report"
+};
+
+_expandConstantObject(PduType);
+
+var TrapType = {
+ 0: "ColdStart",
+ 1: "WarmStart",
+ 2: "LinkDown",
+ 3: "LinkUp",
+ 4: "AuthenticationFailure",
+ 5: "EgpNeighborLoss",
+ 6: "EnterpriseSpecific"
+};
+
+_expandConstantObject(TrapType);
+
+var SecurityLevel = {
+ 1: "noAuthNoPriv",
+ 2: "authNoPriv",
+ 3: "authPriv"
+};
+
+_expandConstantObject(SecurityLevel);
+
+var AuthProtocols = {
+ "1": "none",
+ "2": "md5",
+ "3": "sha"
+};
+
+_expandConstantObject(AuthProtocols);
+
+var PrivProtocols = {
+ "1": "none",
+ "2": "des"
+};
+
+_expandConstantObject(PrivProtocols);
+
+var MibProviderType = {
+ "1": "Scalar",
+ "2": "Table"
+};
+
+_expandConstantObject(MibProviderType);
+
+var Version1 = 0;
+var Version2c = 1;
+var Version3 = 3;
+
+var Version = {
+ "1": Version1,
+ "2c": Version2c,
+ "3": Version3
+};
+
+/*****************************************************************************
+ ** Exception class definitions
+ **/
+
+function ResponseInvalidError(message) {
+ this.name = "ResponseInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, ResponseInvalidError);
+}
+
+util.inherits(ResponseInvalidError, Error);
+
+function RequestInvalidError(message) {
+ this.name = "RequestInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestInvalidError);
+}
+
+util.inherits(RequestInvalidError, Error);
+
+function RequestFailedError(message, status) {
+ this.name = "RequestFailedError";
+ this.message = message;
+ this.status = status;
+ Error.captureStackTrace(this, RequestFailedError);
+}
+
+util.inherits(RequestFailedError, Error);
+
+function RequestTimedOutError(message) {
+ this.name = "RequestTimedOutError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestTimedOutError);
+}
+
+util.inherits(RequestTimedOutError, Error);
+
+/*****************************************************************************
+ ** OID and varbind helper functions
+ **/
+
+function isVarbindError(varbind) {
+ return !!(varbind.type == ObjectType.NoSuchObject
+ || varbind.type == ObjectType.NoSuchInstance
+ || varbind.type == ObjectType.EndOfMibView);
+}
+
+function varbindError(varbind) {
+ return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid;
+}
+
+function oidFollowsOid(oidString, nextString) {
+ var oid = {str: oidString, len: oidString.length, idx: 0};
+ var next = {str: nextString, len: nextString.length, idx: 0};
+ var dotCharCode = ".".charCodeAt(0);
+
+ function getNumber(item) {
+ var n = 0;
+ if (item.idx >= item.len)
+ return null;
+ while (item.idx < item.len) {
+ var charCode = item.str.charCodeAt(item.idx++);
+ if (charCode == dotCharCode)
+ return n;
+ n = (n ? (n * 10) : n) + (charCode - 48);
+ }
+ return n;
+ }
+
+ while (1) {
+ var oidNumber = getNumber(oid);
+ var nextNumber = getNumber(next);
+
+ if (oidNumber !== null) {
+ if (nextNumber !== null) {
+ if (nextNumber > oidNumber) {
+ return true;
+ } else if (nextNumber < oidNumber) {
+ return false;
+ }
+ } else {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+}
+
+function oidInSubtree(oidString, nextString) {
+ var oid = oidString.split(".");
+ var next = nextString.split(".");
+
+ if (oid.length > next.length)
+ return false;
+
+ for (var i = 0; i < oid.length; i++) {
+ if (next[i] != oid[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ ** Some SNMP agents produce integers on the wire such as 00 ff ff ff ff.
+ ** The ASN.1 BER parser we use throws an error when parsing this, which we
+ ** believe is correct. So, we decided not to bother the "asn1" developer(s)
+ ** with this, instead opting to work around it here.
+ **
+ ** If an integer is 5 bytes in length we check if the first byte is 0, and if so
+ ** simply drop it and parse it like it was a 4 byte integer, otherwise throw
+ ** an error since the integer is too large.
+ **/
+
+function readInt(buffer) {
+ return readUint(buffer, true);
+}
+
+function readIpAddress(buffer) {
+ var bytes = buffer.readString(ObjectType.IpAddress, true);
+ if (bytes.length != 4)
+ throw new ResponseInvalidError("Length '" + bytes.length
+ + "' of IP address '" + bytes.toString("hex")
+ + "' is not 4");
+ var value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
+ return value;
+}
+
+function readUint(buffer, isSigned) {
+ buffer.readByte();
+ var length = buffer.readByte();
+ var value = 0;
+ var signedBitSet = false;
+
+ if (length > 5) {
+ throw new RangeError("Integer too long '" + length + "'");
+ } else if (length == 5) {
+ if (buffer.readByte() !== 0)
+ throw new RangeError("Integer too long '" + length + "'");
+ length = 4;
+ }
+
+ for (var i = 0; i < length; i++) {
+ value *= 256;
+ value += buffer.readByte();
+
+ if (isSigned && i <= 0) {
+ if ((value & 0x80) == 0x80)
+ signedBitSet = true;
+ }
+ }
+
+ if (signedBitSet)
+ value -= (1 << (i * 8));
+
+ return value;
+}
+
+function readUint64(buffer) {
+ var value = buffer.readString(ObjectType.Counter64, true);
+
+ return value;
+}
+
+function readVarbinds(buffer, varbinds) {
+ buffer.readSequence();
+
+ while (1) {
+ buffer.readSequence();
+ if (buffer.peek() != ObjectType.OID)
+ break;
+ var oid = buffer.readOID();
+ var type = buffer.peek();
+
+ if (type == null)
+ break;
+
+ var value;
+
+ if (type == ObjectType.Boolean) {
+ value = buffer.readBoolean();
+ } else if (type == ObjectType.Integer) {
+ value = readInt(buffer);
+ } else if (type == ObjectType.OctetString) {
+ value = buffer.readString(null, true);
+ } else if (type == ObjectType.Null) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.OID) {
+ value = buffer.readOID();
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = buffer.readString(ObjectType.IpAddress, true);
+ if (bytes.length != 4)
+ throw new ResponseInvalidError("Length '" + bytes.length
+ + "' of IP address '" + bytes.toString("hex")
+ + "' is not 4");
+ value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
+ } else if (type == ObjectType.Counter) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.Gauge) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.TimeTicks) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.Opaque) {
+ value = buffer.readString(ObjectType.Opaque, true);
+ } else if (type == ObjectType.Counter64) {
+ value = readUint64(buffer);
+ } else if (type == ObjectType.NoSuchObject) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.NoSuchInstance) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.EndOfMibView) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else {
+ throw new ResponseInvalidError("Unknown type '" + type
+ + "' in response");
+ }
+
+ varbinds.push({
+ oid: oid,
+ type: type,
+ value: value
+ });
+ }
+}
+
+function writeUint(buffer, type, value) {
+ var b = Buffer.alloc(4);
+ b.writeUInt32BE(value, 0);
+ buffer.writeBuffer(b, type);
+}
+
+function writeUint64(buffer, value) {
+ buffer.writeBuffer(value, ObjectType.Counter64);
+}
+
+function writeVarbinds(buffer, varbinds) {
+ buffer.startSequence();
+ for (var i = 0; i < varbinds.length; i++) {
+ buffer.startSequence();
+ buffer.writeOID(varbinds[i].oid);
+
+ if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) {
+ var type = varbinds[i].type;
+ var value = varbinds[i].value;
+
+ if (type == ObjectType.Boolean) {
+ buffer.writeBoolean(value ? true : false);
+ } else if (type == ObjectType.Integer) { // also Integer32
+ buffer.writeInt(value);
+ } else if (type == ObjectType.OctetString) {
+ if (typeof value == "string")
+ buffer.writeString(value);
+ else
+ buffer.writeBuffer(value, ObjectType.OctetString);
+ } else if (type == ObjectType.Null) {
+ buffer.writeNull();
+ } else if (type == ObjectType.OID) {
+ buffer.writeOID(value);
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = value.split(".");
+ if (bytes.length != 4)
+ throw new RequestInvalidError("Invalid IP address '"
+ + value + "'");
+ buffer.writeBuffer(Buffer.from(bytes), 64);
+ } else if (type == ObjectType.Counter) { // also Counter32
+ writeUint(buffer, ObjectType.Counter, value);
+ } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32
+ writeUint(buffer, ObjectType.Gauge, value);
+ } else if (type == ObjectType.TimeTicks) {
+ writeUint(buffer, ObjectType.TimeTicks, value);
+ } else if (type == ObjectType.Opaque) {
+ buffer.writeBuffer(value, ObjectType.Opaque);
+ } else if (type == ObjectType.Counter64) {
+ writeUint64(buffer, value);
+ } else if (type == ObjectType.EndOfMibView) {
+ buffer.writeByte(130);
+ buffer.writeByte(0);
+ } else {
+ throw new RequestInvalidError("Unknown type '" + type
+ + "' in request");
+ }
+ } else {
+ buffer.writeNull();
+ }
+
+ buffer.endSequence();
+ }
+ buffer.endSequence();
+}
+
+/*****************************************************************************
+ ** PDU class definitions
+ **/
+
+var SimplePdu = function () {
+};
+
+SimplePdu.prototype.toBuffer = function (buffer) {
+ buffer.startSequence(this.type);
+
+ buffer.writeInt(this.id);
+ buffer.writeInt((this.type == PduType.GetBulkRequest)
+ ? (this.options.nonRepeaters || 0)
+ : 0);
+ buffer.writeInt((this.type == PduType.GetBulkRequest)
+ ? (this.options.maxRepetitions || 0)
+ : 0);
+
+ writeVarbinds(buffer, this.varbinds);
+
+ buffer.endSequence();
+};
+
+SimplePdu.prototype.initializeFromVariables = function (id, varbinds, options) {
+ this.id = id;
+ this.varbinds = varbinds;
+ this.options = options || {};
+ this.contextName = (options && options.context) ? options.context : "";
+}
+
+SimplePdu.prototype.initializeFromBuffer = function (reader) {
+ this.type = reader.peek();
+ reader.readSequence();
+
+ this.id = reader.readInt();
+ this.nonRepeaters = reader.readInt();
+ this.maxRepetitions = reader.readInt();
+
+ this.varbinds = [];
+ readVarbinds(reader, this.varbinds);
+
+};
+
+SimplePdu.prototype.getResponsePduForRequest = function () {
+ var responsePdu = GetResponsePdu.createFromVariables(this.id, [], {});
+ if (this.contextEngineID) {
+ responsePdu.contextEngineID = this.contextEngineID;
+ responsePdu.contextName = this.contextName;
+ }
+ return responsePdu;
+};
+
+SimplePdu.createFromVariables = function (pduClass, id, varbinds, options) {
+ var pdu = new pduClass(id, varbinds, options);
+ pdu.id = id;
+ pdu.varbinds = varbinds;
+ pdu.options = options || {};
+ pdu.contextName = (options && options.context) ? options.context : "";
+ return pdu;
+};
+
+var GetBulkRequestPdu = function () {
+ this.type = PduType.GetBulkRequest;
+ GetBulkRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetBulkRequestPdu, SimplePdu);
+
+GetBulkRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetBulkRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+var GetNextRequestPdu = function () {
+ this.type = PduType.GetNextRequest;
+ GetNextRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetNextRequestPdu, SimplePdu);
+
+GetNextRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetNextRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+var GetRequestPdu = function () {
+ this.type = PduType.GetRequest;
+ GetRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetRequestPdu, SimplePdu);
+
+GetRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+GetRequestPdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new GetRequestPdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var InformRequestPdu = function () {
+ this.type = PduType.InformRequest;
+ InformRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(InformRequestPdu, SimplePdu);
+
+InformRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new InformRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+var SetRequestPdu = function () {
+ this.type = PduType.SetRequest;
+ SetRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(SetRequestPdu, SimplePdu);
+
+SetRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new SetRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+var TrapPdu = function () {
+ this.type = PduType.Trap;
+};
+
+TrapPdu.prototype.toBuffer = function (buffer) {
+ buffer.startSequence(this.type);
+
+ buffer.writeOID(this.enterprise);
+ buffer.writeBuffer(Buffer.from(this.agentAddr.split(".")),
+ ObjectType.IpAddress);
+ buffer.writeInt(this.generic);
+ buffer.writeInt(this.specific);
+ writeUint(buffer, ObjectType.TimeTicks,
+ this.upTime || Math.floor(process.uptime() * 100));
+
+ writeVarbinds(buffer, this.varbinds);
+
+ buffer.endSequence();
+};
+
+TrapPdu.createFromBuffer = function (reader) {
+ var pdu = new TrapPdu();
+ reader.readSequence();
+
+ pdu.enterprise = reader.readOID();
+ pdu.agentAddr = readIpAddress(reader);
+ pdu.generic = reader.readInt();
+ pdu.specific = reader.readInt();
+ pdu.upTime = readUint(reader)
+
+ pdu.varbinds = [];
+ readVarbinds(reader, pdu.varbinds);
+
+ return pdu;
+};
+
+TrapPdu.createFromVariables = function (typeOrOid, varbinds, options) {
+ var pdu = new TrapPdu();
+ pdu.agentAddr = options.agentAddr || "127.0.0.1";
+ pdu.upTime = options.upTime;
+
+ if (typeof typeOrOid == "string") {
+ pdu.generic = TrapType.EnterpriseSpecific;
+ pdu.specific = parseInt(typeOrOid.match(/\.(\d+)$/)[1]);
+ pdu.enterprise = typeOrOid.replace(/\.(\d+)$/, "");
+ } else {
+ pdu.generic = typeOrOid;
+ pdu.specific = 0;
+ pdu.enterprise = "1.3.6.1.4.1";
+ }
+
+ pdu.varbinds = varbinds;
+
+ return pdu;
+};
+
+var TrapV2Pdu = function () {
+ this.type = PduType.TrapV2;
+ TrapV2Pdu.super_.apply(this, arguments);
+};
+
+util.inherits(TrapV2Pdu, SimplePdu);
+
+TrapV2Pdu.createFromBuffer = function (reader) {
+ var pdu = new TrapV2Pdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+TrapV2Pdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new TrapV2Pdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var SimpleResponsePdu = function () {
+};
+
+SimpleResponsePdu.prototype.toBuffer = function (writer) {
+ writer.startSequence(this.type);
+
+ writer.writeInt(this.id);
+ writer.writeInt(this.errorStatus || 0);
+ writer.writeInt(this.errorIndex || 0);
+ writeVarbinds(writer, this.varbinds);
+ writer.endSequence();
+
+};
+
+SimpleResponsePdu.prototype.initializeFromBuffer = function (reader) {
+ reader.readSequence(this.type);
+
+ this.id = reader.readInt();
+ this.errorStatus = reader.readInt();
+ this.errorIndex = reader.readInt();
+
+ this.varbinds = [];
+ readVarbinds(reader, this.varbinds);
+};
+
+SimpleResponsePdu.prototype.initializeFromVariables = function (id, varbinds, options) {
+ this.id = id;
+ this.varbinds = varbinds;
+ this.options = options || {};
+};
+
+var GetResponsePdu = function () {
+ this.type = PduType.GetResponse;
+ GetResponsePdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetResponsePdu, SimpleResponsePdu);
+
+GetResponsePdu.createFromBuffer = function (reader) {
+ var pdu = new GetResponsePdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+GetResponsePdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new GetResponsePdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var ReportPdu = function () {
+ this.type = PduType.Report;
+ ReportPdu.super_.apply(this, arguments);
+};
+
+util.inherits(ReportPdu, SimpleResponsePdu);
+
+ReportPdu.createFromBuffer = function (reader) {
+ var pdu = new ReportPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+ReportPdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new ReportPdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var readPdu = function (reader, scoped) {
+ var pdu;
+ var contextEngineID;
+ var contextName;
+ if (scoped) {
+ reader.readSequence();
+ contextEngineID = reader.readString(ber.OctetString, true);
+ contextName = reader.readString();
+ }
+ var type = reader.peek();
+
+ if (type == PduType.GetResponse) {
+ pdu = GetResponsePdu.createFromBuffer(reader);
+ } else if (type == PduType.Report) {
+ pdu = ReportPdu.createFromBuffer(reader);
+ } else if (type == PduType.Trap) {
+ pdu = TrapPdu.createFromBuffer(reader);
+ } else if (type == PduType.TrapV2) {
+ pdu = TrapV2Pdu.createFromBuffer(reader);
+ } else if (type == PduType.InformRequest) {
+ pdu = InformRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetRequest) {
+ pdu = GetRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.SetRequest) {
+ pdu = SetRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetNextRequest) {
+ pdu = GetNextRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetBulkRequest) {
+ pdu = GetBulkRequestPdu.createFromBuffer(reader);
+ } else {
+ throw new ResponseInvalidError("Unknown PDU type '" + type
+ + "' in response");
+ }
+ if (scoped) {
+ pdu.contextEngineID = contextEngineID;
+ pdu.contextName = contextName;
+ }
+ pdu.scoped = scoped;
+ return pdu;
+};
+
+var createDiscoveryPdu = function (context) {
+ return GetRequestPdu.createFromVariables(_generateId(), [], {context: context});
+};
+
+var Authentication = {};
+
+Authentication.HMAC_BUFFER_SIZE = 1024 * 1024;
+Authentication.HMAC_BLOCK_SIZE = 64;
+Authentication.AUTHENTICATION_CODE_LENGTH = 12;
+Authentication.AUTH_PARAMETERS_PLACEHOLDER = Buffer.from('8182838485868788898a8b8c', 'hex');
+
+Authentication.algorithms = {};
+
+Authentication.algorithms[AuthProtocols.md5] = {
+ // KEY_LENGTH: 16,
+ CRYPTO_ALGORITHM: 'md5'
+};
+
+Authentication.algorithms[AuthProtocols.sha] = {
+ // KEY_LENGTH: 20,
+ CRYPTO_ALGORITHM: 'sha1'
+};
+
+// Adapted from RFC3414 Appendix A.2.1. Password to Key Sample Code for MD5
+Authentication.passwordToKey = function (authProtocol, authPasswordString, engineID) {
+ var hashAlgorithm;
+ var firstDigest;
+ var finalDigest;
+ var buf = Buffer.alloc(Authentication.HMAC_BUFFER_SIZE);
+ var bufOffset = 0;
+ var passwordIndex = 0;
+ var count = 0;
+ var password = Buffer.from(authPasswordString);
+ var cryptoAlgorithm = Authentication.algorithms[authProtocol].CRYPTO_ALGORITHM;
+
+ while (count < Authentication.HMAC_BUFFER_SIZE) {
+ for (var i = 0; i < Authentication.HMAC_BLOCK_SIZE; i++) {
+ buf.writeUInt8(password[passwordIndex++ % password.length], bufOffset++);
+ }
+ count += Authentication.HMAC_BLOCK_SIZE;
+ }
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(buf);
+ firstDigest = hashAlgorithm.digest();
+ // debug ("First digest: " + firstDigest.toString('hex'));
+
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(firstDigest);
+ hashAlgorithm.update(engineID);
+ hashAlgorithm.update(firstDigest);
+ finalDigest = hashAlgorithm.digest();
+ debug("Localized key: " + finalDigest.toString('hex'));
+
+ return finalDigest;
+};
+
+Authentication.addParametersToMessageBuffer = function (messageBuffer, authProtocol, authPassword, engineID) {
+ var authenticationParametersOffset;
+ var digestToAdd;
+
+ // clear the authenticationParameters field in message
+ authenticationParametersOffset = messageBuffer.indexOf(Authentication.AUTH_PARAMETERS_PLACEHOLDER);
+ messageBuffer.fill(0, authenticationParametersOffset, authenticationParametersOffset + Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ digestToAdd = Authentication.calculateDigest(messageBuffer, authProtocol, authPassword, engineID);
+ digestToAdd.copy(messageBuffer, authenticationParametersOffset, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+ debug("Added Auth Parameters: " + digestToAdd.toString('hex'));
+};
+
+Authentication.isAuthentic = function (messageBuffer, authProtocol, authPassword, engineID, digestInMessage) {
+ var authenticationParametersOffset;
+ var calculatedDigest;
+
+ // clear the authenticationParameters field in message
+ authenticationParametersOffset = messageBuffer.indexOf(digestInMessage);
+ messageBuffer.fill(0, authenticationParametersOffset, authenticationParametersOffset + Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ calculatedDigest = Authentication.calculateDigest(messageBuffer, authProtocol, authPassword, engineID);
+
+ // replace previously cleared authenticationParameters field in message
+ digestInMessage.copy(messageBuffer, authenticationParametersOffset, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ debug("Digest in message: " + digestInMessage.toString('hex'));
+ debug("Calculated digest: " + calculatedDigest.toString('hex'));
+ return calculatedDigest.equals(digestInMessage, Authentication.AUTHENTICATION_CODE_LENGTH);
+};
+
+Authentication.calculateDigest = function (messageBuffer, authProtocol, authPassword, engineID) {
+ var authKey = Authentication.passwordToKey(authProtocol, authPassword, engineID);
+
+ // Adapted from RFC3147 Section 6.3.1. Processing an Outgoing Message
+ var hashAlgorithm;
+ var kIpad;
+ var kOpad;
+ var firstDigest;
+ var finalDigest;
+ var truncatedDigest;
+ var i;
+ var cryptoAlgorithm = Authentication.algorithms[authProtocol].CRYPTO_ALGORITHM;
+
+ if (authKey.length > Authentication.HMAC_BLOCK_SIZE) {
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(authKey);
+ authKey = hashAlgorithm.digest();
+ }
+
+ // MD(K XOR opad, MD(K XOR ipad, msg))
+ kIpad = Buffer.alloc(Authentication.HMAC_BLOCK_SIZE);
+ kOpad = Buffer.alloc(Authentication.HMAC_BLOCK_SIZE);
+ for (i = 0; i < authKey.length; i++) {
+ kIpad[i] = authKey[i] ^ 0x36;
+ kOpad[i] = authKey[i] ^ 0x5c;
+ }
+ kIpad.fill(0x36, authKey.length);
+ kOpad.fill(0x5c, authKey.length);
+
+ // inner MD
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(kIpad);
+ hashAlgorithm.update(messageBuffer);
+ firstDigest = hashAlgorithm.digest();
+ // outer MD
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(kOpad);
+ hashAlgorithm.update(firstDigest);
+ finalDigest = hashAlgorithm.digest();
+
+ truncatedDigest = Buffer.alloc(Authentication.AUTHENTICATION_CODE_LENGTH);
+ finalDigest.copy(truncatedDigest, 0, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+ return truncatedDigest;
+};
+
+var Encryption = {};
+
+Encryption.INPUT_KEY_LENGTH = 16;
+Encryption.DES_KEY_LENGTH = 8;
+Encryption.DES_BLOCK_LENGTH = 8;
+Encryption.CRYPTO_DES_ALGORITHM = 'des-cbc';
+Encryption.PRIV_PARAMETERS_PLACEHOLDER = Buffer.from('9192939495969798', 'hex');
+
+Encryption.encryptPdu = function (scopedPdu, privProtocol, privPassword, authProtocol, engineID) {
+ var privLocalizedKey;
+ var encryptionKey;
+ var preIv;
+ var salt;
+ var iv;
+ var i;
+ var paddedScopedPduLength;
+ var paddedScopedPdu;
+ var encryptedPdu;
+ var cbcProtocol = Encryption.CRYPTO_DES_ALGORITHM;
+
+ privLocalizedKey = Authentication.passwordToKey(authProtocol, privPassword, engineID);
+ encryptionKey = Buffer.alloc(Encryption.DES_KEY_LENGTH);
+ privLocalizedKey.copy(encryptionKey, 0, 0, Encryption.DES_KEY_LENGTH);
+ preIv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ privLocalizedKey.copy(preIv, 0, Encryption.DES_KEY_LENGTH, Encryption.DES_KEY_LENGTH + Encryption.DES_BLOCK_LENGTH);
+
+ salt = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ // set local SNMP engine boots part of salt to 1, as we have no persistent engine state
+ salt.fill('00000001', 0, 4, 'hex');
+ // set local integer part of salt to random
+ salt.fill(crypto.randomBytes(4), 4, 8);
+ iv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ for (i = 0; i < iv.length; i++) {
+ iv[i] = preIv[i] ^ salt[i];
+ }
+
+ if (scopedPdu.length % Encryption.DES_BLOCK_LENGTH == 0) {
+ paddedScopedPdu = scopedPdu;
+ } else {
+ paddedScopedPduLength = Encryption.DES_BLOCK_LENGTH * (Math.floor(scopedPdu.length / Encryption.DES_BLOCK_LENGTH) + 1);
+ paddedScopedPdu = Buffer.alloc(paddedScopedPduLength);
+ scopedPdu.copy(paddedScopedPdu, 0, 0, scopedPdu.length);
+ }
+ cipher = crypto.createCipheriv(cbcProtocol, encryptionKey, iv);
+ encryptedPdu = cipher.update(paddedScopedPdu);
+ encryptedPdu = Buffer.concat([encryptedPdu, cipher.final()]);
+ debug("Key: " + encryptionKey.toString('hex'));
+ debug("IV: " + iv.toString('hex'));
+ debug("Plain: " + paddedScopedPdu.toString('hex'));
+ debug("Encrypted: " + encryptedPdu.toString('hex'));
+
+ return {
+ encryptedPdu: encryptedPdu,
+ msgPrivacyParameters: salt
+ };
+};
+
+Encryption.decryptPdu = function (encryptedPdu, privProtocol, privParameters, privPassword, authProtocol, engineID, forceAutoPaddingDisable) {
+ var privLocalizedKey;
+ var decryptionKey;
+ var preIv;
+ var salt;
+ var iv;
+ var i;
+ var decryptedPdu;
+ var cbcProtocol = Encryption.CRYPTO_DES_ALGORITHM;
+ ;
+
+ privLocalizedKey = Authentication.passwordToKey(authProtocol, privPassword, engineID);
+ decryptionKey = Buffer.alloc(Encryption.DES_KEY_LENGTH);
+ privLocalizedKey.copy(decryptionKey, 0, 0, Encryption.DES_KEY_LENGTH);
+ preIv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ privLocalizedKey.copy(preIv, 0, Encryption.DES_KEY_LENGTH, Encryption.DES_KEY_LENGTH + Encryption.DES_BLOCK_LENGTH);
+
+ salt = privParameters;
+ iv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ for (i = 0; i < iv.length; i++) {
+ iv[i] = preIv[i] ^ salt[i];
+ }
+
+ decipher = crypto.createDecipheriv(cbcProtocol, decryptionKey, iv);
+ if (forceAutoPaddingDisable) {
+ decipher.setAutoPadding(false);
+ }
+ decryptedPdu = decipher.update(encryptedPdu);
+ // This try-catch is a workaround for a seemingly incorrect error condition
+ // - where sometimes a decrypt error is thrown with decipher.final()
+ // It replaces this line which should have been sufficient:
+ // decryptedPdu = Buffer.concat ([decryptedPdu, decipher.final()]);
+ try {
+ decryptedPdu = Buffer.concat([decryptedPdu, decipher.final()]);
+ } catch (error) {
+ // debug("Decrypt error: " + error);
+ decipher = crypto.createDecipheriv(cbcProtocol, decryptionKey, iv);
+ decipher.setAutoPadding(false);
+ decryptedPdu = decipher.update(encryptedPdu);
+ decryptedPdu = Buffer.concat([decryptedPdu, decipher.final()]);
+ }
+ debug("Key: " + decryptionKey.toString('hex'));
+ debug("IV: " + iv.toString('hex'));
+ debug("Encrypted: " + encryptedPdu.toString('hex'));
+ debug("Plain: " + decryptedPdu.toString('hex'));
+
+ return decryptedPdu;
+
+};
+
+Encryption.addParametersToMessageBuffer = function (messageBuffer, msgPrivacyParameters) {
+ privacyParametersOffset = messageBuffer.indexOf(Encryption.PRIV_PARAMETERS_PLACEHOLDER);
+ msgPrivacyParameters.copy(messageBuffer, privacyParametersOffset, 0, Encryption.DES_IV_LENGTH);
+};
+
+/*****************************************************************************
+ ** Message class definition
+ **/
+
+var Message = function () {
+}
+
+Message.prototype.getReqId = function () {
+ return this.version == Version3 ? this.msgGlobalData.msgID : this.pdu.id;
+};
+
+Message.prototype.toBuffer = function () {
+ if (this.version == Version3) {
+ return this.toBufferV3();
+ } else {
+ return this.toBufferCommunity();
+ }
+}
+
+Message.prototype.toBufferCommunity = function () {
+ if (this.buffer)
+ return this.buffer;
+
+ var writer = new ber.Writer();
+
+ writer.startSequence();
+
+ writer.writeInt(this.version);
+ writer.writeString(this.community);
+
+ this.pdu.toBuffer(writer);
+
+ writer.endSequence();
+
+ this.buffer = writer.buffer;
+
+ return this.buffer;
+};
+
+Message.prototype.toBufferV3 = function () {
+ var encryptionResult;
+
+ if (this.buffer)
+ return this.buffer;
+
+ var writer = new ber.Writer();
+
+ writer.startSequence();
+
+ writer.writeInt(this.version);
+
+ // HeaderData
+ writer.startSequence();
+ writer.writeInt(this.msgGlobalData.msgID);
+ writer.writeInt(this.msgGlobalData.msgMaxSize);
+ writer.writeByte(ber.OctetString);
+ writer.writeByte(1);
+ writer.writeByte(this.msgGlobalData.msgFlags);
+ writer.writeInt(this.msgGlobalData.msgSecurityModel);
+ writer.endSequence();
+
+ // msgSecurityParameters
+ var msgSecurityParametersWriter = new ber.Writer();
+ msgSecurityParametersWriter.startSequence();
+ //msgSecurityParametersWriter.writeString (this.msgSecurityParameters.msgAuthoritativeEngineID);
+ // writing a zero-length buffer fails - should fix asn1-ber for this condition
+ if (this.msgSecurityParameters.msgAuthoritativeEngineID.length == 0) {
+ msgSecurityParametersWriter.writeString("");
+ } else {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgAuthoritativeEngineID, ber.OctetString);
+ }
+ msgSecurityParametersWriter.writeInt(this.msgSecurityParameters.msgAuthoritativeEngineBoots);
+ msgSecurityParametersWriter.writeInt(this.msgSecurityParameters.msgAuthoritativeEngineTime);
+ msgSecurityParametersWriter.writeString(this.msgSecurityParameters.msgUserName);
+
+ if (this.hasAuthentication()) {
+ msgSecurityParametersWriter.writeBuffer(Authentication.AUTH_PARAMETERS_PLACEHOLDER, ber.OctetString);
+ // should never happen where msgFlags has no authentication but authentication parameters still present
+ } else if (this.msgSecurityParameters.msgAuthenticationParameters.length > 0) {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgAuthenticationParameters, ber.OctetString);
+ } else {
+ msgSecurityParametersWriter.writeString("");
+ }
+
+ if (this.hasPrivacy()) {
+ msgSecurityParametersWriter.writeBuffer(Encryption.PRIV_PARAMETERS_PLACEHOLDER, ber.OctetString);
+ // should never happen where msgFlags has no privacy but privacy parameters still present
+ } else if (this.msgSecurityParameters.msgPrivacyParameters.length > 0) {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgPrivacyParameters, ber.OctetString);
+ } else {
+ msgSecurityParametersWriter.writeString("");
+ }
+ msgSecurityParametersWriter.endSequence();
+
+ writer.writeBuffer(msgSecurityParametersWriter.buffer, ber.OctetString);
+
+ // ScopedPDU
+ var scopedPduWriter = new ber.Writer();
+ scopedPduWriter.startSequence();
+ var contextEngineID = this.pdu.contextEngineID ? this.pdu.contextEngineID : this.msgSecurityParameters.msgAuthoritativeEngineID;
+ if (contextEngineID.length == 0) {
+ scopedPduWriter.writeString("");
+ } else {
+ scopedPduWriter.writeBuffer(contextEngineID, ber.OctetString);
+ }
+ scopedPduWriter.writeString(this.pdu.contextName);
+ this.pdu.toBuffer(scopedPduWriter);
+ scopedPduWriter.endSequence();
+
+ if (this.hasPrivacy()) {
+ encryptionResult = Encryption.encryptPdu(scopedPduWriter.buffer, this.user.privProtocol, this.user.privKey, this.user.authProtocol, this.msgSecurityParameters.msgAuthoritativeEngineID);
+ writer.writeBuffer(encryptionResult.encryptedPdu, ber.OctetString);
+ } else {
+ writer.writeBuffer(scopedPduWriter.buffer);
+ }
+
+ writer.endSequence();
+
+ this.buffer = writer.buffer;
+
+ if (this.hasPrivacy()) {
+ Encryption.addParametersToMessageBuffer(this.buffer, encryptionResult.msgPrivacyParameters);
+ }
+
+ if (this.hasAuthentication()) {
+ Authentication.addParametersToMessageBuffer(this.buffer, this.user.authProtocol, this.user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID);
+ }
+
+ return this.buffer;
+};
+
+Message.prototype.processIncomingSecurity = function (user, responseCb) {
+ if (this.hasPrivacy()) {
+ if (!this.decryptPdu(user, responseCb)) {
+ return false;
+ }
+ }
+
+ if (this.hasAuthentication() && !this.isAuthenticationDisabled()) {
+ return this.checkAuthentication(user, responseCb);
+ } else {
+ return true;
+ }
+};
+
+Message.prototype.decryptPdu = function (user, responseCb) {
+ var decryptedPdu;
+ var decryptedPduReader;
+ try {
+ decryptedPdu = Encryption.decryptPdu(this.encryptedPdu, user.privProtocol,
+ this.msgSecurityParameters.msgPrivacyParameters, user.privKey, user.authProtocol,
+ this.msgSecurityParameters.msgAuthoritativeEngineID);
+ decryptedPduReader = new ber.Reader(decryptedPdu);
+ this.pdu = readPdu(decryptedPduReader, true);
+ return true;
+ // really really occasionally the decrypt truncates a single byte
+ // causing an ASN read failure in readPdu()
+ // in this case, disabling auto padding decrypts the PDU correctly
+ // this try-catch provides the workaround for this condition
+ } catch (possibleTruncationError) {
+ try {
+ decryptedPdu = Encryption.decryptPdu(this.encryptedPdu, user.privProtocol,
+ this.msgSecurityParameters.msgPrivacyParameters, user.privKey, user.authProtocol,
+ this.msgSecurityParameters.msgAuthoritativeEngineID, true);
+ decryptedPduReader = new ber.Reader(decryptedPdu);
+ this.pdu = readPdu(decryptedPduReader, true);
+ return true;
+ } catch (error) {
+ responseCb(new ResponseInvalidError("Failed to decrypt PDU: " + error));
+ return false;
+ }
+ }
+
+};
+
+Message.prototype.checkAuthentication = function (user, responseCb) {
+ if (Authentication.isAuthentic(this.buffer, user.authProtocol, user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID, this.msgSecurityParameters.msgAuthenticationParameters)) {
+ return true;
+ } else {
+ responseCb(new ResponseInvalidError("Authentication digest "
+ + this.msgSecurityParameters.msgAuthenticationParameters.toString('hex')
+ + " received in message does not match digest "
+ + Authentication.calculateDigest(buffer, user.authProtocol, user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID).toString('hex')
+ + " calculated for message"));
+ return false;
+ }
+
+};
+
+Message.prototype.hasAuthentication = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 1;
+};
+
+Message.prototype.hasPrivacy = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 2;
+};
+
+Message.prototype.isReportable = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 4;
+};
+
+Message.prototype.setReportable = function (flag) {
+ if (this.msgGlobalData && this.msgGlobalData.msgFlags) {
+ if (flag) {
+ this.msgGlobalData.msgFlags = this.msgGlobalData.msgFlags | 4;
+ } else {
+ this.msgGlobalData.msgFlags = this.msgGlobalData.msgFlags & (255 - 4);
+ }
+ }
+};
+
+Message.prototype.isAuthenticationDisabled = function () {
+ return this.disableAuthentication;
+};
+
+Message.prototype.hasAuthoritativeEngineID = function () {
+ return this.msgSecurityParameters && this.msgSecurityParameters.msgAuthoritativeEngineID &&
+ this.msgSecurityParameters.msgAuthoritativeEngineID != "";
+};
+
+Message.prototype.createReportResponseMessage = function (engine, context) {
+ var user = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ var responseSecurityParameters = {
+ msgAuthoritativeEngineID: engine.engineID,
+ msgAuthoritativeEngineBoots: engine.engineBoots,
+ msgAuthoritativeEngineTime: engine.engineTime,
+ msgUserName: user.name,
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ var reportPdu = ReportPdu.createFromVariables(this.pdu.id, [], {});
+ reportPdu.contextName = context;
+ var responseMessage = Message.createRequestV3(user, responseSecurityParameters, reportPdu);
+ responseMessage.msgGlobalData.msgID = this.msgGlobalData.msgID;
+ return responseMessage;
+};
+
+Message.prototype.createResponseForRequest = function (responsePdu) {
+ if (this.version == Version3) {
+ return this.createV3ResponseFromRequest(responsePdu);
+ } else {
+ return this.createCommunityResponseFromRequest(responsePdu);
+ }
+};
+
+Message.prototype.createCommunityResponseFromRequest = function (responsePdu) {
+ return Message.createCommunity(this.version, this.community, responsePdu);
+};
+
+Message.prototype.createV3ResponseFromRequest = function (responsePdu) {
+ var responseUser = {
+ name: this.user.name,
+ level: this.user.name,
+ authProtocol: this.user.authProtocol,
+ authKey: this.user.authKey,
+ privProtocol: this.user.privProtocol,
+ privKey: this.user.privKey
+ };
+ var responseSecurityParameters = {
+ msgAuthoritativeEngineID: this.msgSecurityParameters.msgAuthoritativeEngineID,
+ msgAuthoritativeEngineBoots: this.msgSecurityParameters.msgAuthoritativeEngineBoots,
+ msgAuthoritativeEngineTime: this.msgSecurityParameters.msgAuthoritativeEngineTime,
+ msgUserName: this.msgSecurityParameters.msgUserName,
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ var responseGlobalData = {
+ msgID: this.msgGlobalData.msgID,
+ msgMaxSize: 65507,
+ msgFlags: this.msgGlobalData.msgFlags & (255 - 4),
+ msgSecurityModel: 3
+ };
+ return Message.createV3(responseUser, responseGlobalData, responseSecurityParameters, responsePdu);
+};
+
+Message.createCommunity = function (version, community, pdu) {
+ var message = new Message();
+
+ message.version = version;
+ message.community = community;
+ message.pdu = pdu;
+
+ return message;
+};
+
+Message.createRequestV3 = function (user, msgSecurityParameters, pdu) {
+ var authFlag = user.level == SecurityLevel.authNoPriv || user.level == SecurityLevel.authPriv ? 1 : 0;
+ var privFlag = user.level == SecurityLevel.authPriv ? 1 : 0;
+ var reportableFlag = (pdu.type == PduType.GetResponse || pdu.type == PduType.TrapV2) ? 0 : 1;
+ var msgGlobalData = {
+ msgID: _generateId(), // random ID
+ msgMaxSize: 65507,
+ msgFlags: reportableFlag * 4 | privFlag * 2 | authFlag * 1,
+ msgSecurityModel: 3
+ };
+ return Message.createV3(user, msgGlobalData, msgSecurityParameters, pdu);
+};
+
+Message.createV3 = function (user, msgGlobalData, msgSecurityParameters, pdu) {
+ var message = new Message();
+
+ message.version = 3;
+ message.user = user;
+ message.msgGlobalData = msgGlobalData;
+ message.msgSecurityParameters = {
+ msgAuthoritativeEngineID: msgSecurityParameters.msgAuthoritativeEngineID || Buffer.from(""),
+ msgAuthoritativeEngineBoots: msgSecurityParameters.msgAuthoritativeEngineBoots || 0,
+ msgAuthoritativeEngineTime: msgSecurityParameters.msgAuthoritativeEngineTime || 0,
+ msgUserName: user.name || "",
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ message.pdu = pdu;
+
+ return message;
+};
+
+Message.createDiscoveryV3 = function (pdu) {
+ var msgSecurityParameters = {
+ msgAuthoritativeEngineID: Buffer.from(""),
+ msgAuthoritativeEngineBoots: 0,
+ msgAuthoritativeEngineTime: 0
+ };
+ var emptyUser = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ return Message.createRequestV3(emptyUser, msgSecurityParameters, pdu);
+}
+
+Message.createFromBuffer = function (buffer, user) {
+ var reader = new ber.Reader(buffer);
+ var message = new Message();
+
+ reader.readSequence();
+
+ message.version = reader.readInt();
+
+ if (message.version != 3) {
+ message.community = reader.readString();
+ message.pdu = readPdu(reader, false);
+ } else {
+ // HeaderData
+ message.msgGlobalData = {};
+ reader.readSequence();
+ message.msgGlobalData.msgID = reader.readInt();
+ message.msgGlobalData.msgMaxSize = reader.readInt();
+ message.msgGlobalData.msgFlags = reader.readString(ber.OctetString, true)[0];
+ message.msgGlobalData.msgSecurityModel = reader.readInt();
+
+ // msgSecurityParameters
+ message.msgSecurityParameters = {};
+ var msgSecurityParametersReader = new ber.Reader(reader.readString(ber.OctetString, true));
+ msgSecurityParametersReader.readSequence();
+ message.msgSecurityParameters.msgAuthoritativeEngineID = msgSecurityParametersReader.readString(ber.OctetString, true);
+ message.msgSecurityParameters.msgAuthoritativeEngineBoots = msgSecurityParametersReader.readInt();
+ message.msgSecurityParameters.msgAuthoritativeEngineTime = msgSecurityParametersReader.readInt();
+ message.msgSecurityParameters.msgUserName = msgSecurityParametersReader.readString();
+ message.msgSecurityParameters.msgAuthenticationParameters = Buffer.from(msgSecurityParametersReader.readString(ber.OctetString, true));
+ message.msgSecurityParameters.msgPrivacyParameters = Buffer.from(msgSecurityParametersReader.readString(ber.OctetString, true));
+ scopedPdu = true;
+
+ if (message.hasPrivacy()) {
+ message.encryptedPdu = reader.readString(ber.OctetString, true);
+ message.pdu = null;
+ } else {
+ message.pdu = readPdu(reader, true);
+ }
+ }
+
+ message.buffer = buffer;
+
+ return message;
+};
+
+
+var Req = function (session, message, feedCb, responseCb, options) {
+
+ this.message = message;
+ this.responseCb = responseCb;
+ this.retries = session.retries;
+ this.timeout = session.timeout;
+ this.onResponse = session.onSimpleGetResponse;
+ this.feedCb = feedCb;
+ this.port = (options && options.port) ? options.port : session.port;
+ this.context = session.context;
+};
+
+Req.prototype.getId = function () {
+ return this.message.getReqId();
+};
+
+
+/*****************************************************************************
+ ** Session class definition
+ **/
+
+var Session = function (target, authenticator, options) {
+ this.target = target || "127.0.0.1";
+
+ this.version = (options && options.version)
+ ? options.version
+ : Version1;
+
+ if (this.version == Version3) {
+ this.user = authenticator;
+ } else {
+ this.community = authenticator || "public";
+ }
+
+ this.transport = (options && options.transport)
+ ? options.transport
+ : "udp4";
+ this.port = (options && options.port)
+ ? options.port
+ : 161;
+ this.trapPort = (options && options.trapPort)
+ ? options.trapPort
+ : 162;
+
+ this.retries = (options && (options.retries || options.retries == 0))
+ ? options.retries
+ : 1;
+ this.timeout = (options && options.timeout)
+ ? options.timeout
+ : 5000;
+
+ this.sourceAddress = (options && options.sourceAddress)
+ ? options.sourceAddress
+ : undefined;
+ this.sourcePort = (options && options.sourcePort)
+ ? parseInt(options.sourcePort)
+ : undefined;
+
+ this.idBitsSize = (options && options.idBitsSize)
+ ? parseInt(options.idBitsSize)
+ : 32;
+
+ this.context = (options && options.context) ? options.context : "";
+
+ DEBUG = options.debug;
+
+ this.reqs = {};
+ this.reqCount = 0;
+
+ this.dgram = dgram.createSocket(this.transport);
+ this.dgram.unref();
+
+ var me = this;
+ this.dgram.on("message", me.onMsg.bind(me));
+ this.dgram.on("close", me.onClose.bind(me));
+ this.dgram.on("error", me.onError.bind(me));
+
+ if (this.sourceAddress || this.sourcePort)
+ this.dgram.bind(this.sourcePort, this.sourceAddress);
+};
+
+util.inherits(Session, events.EventEmitter);
+
+Session.prototype.close = function () {
+ this.dgram.close();
+ return this;
+};
+
+Session.prototype.cancelRequests = function (error) {
+ var id;
+ for (id in this.reqs) {
+ var req = this.reqs[id];
+ this.unregisterRequest(req.getId());
+ req.responseCb(error);
+ }
+};
+
+function _generateId(bitSize) {
+ if (bitSize === 16) {
+ return Math.floor(Math.random() * 10000) % 65535;
+ }
+ return Math.floor(Math.random() * 100000000) % 4294967295;
+}
+
+Session.prototype.get = function (oids, responseCb) {
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(GetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.getBulk = function () {
+ var oids, nonRepeaters, maxRepetitions, responseCb;
+
+ if (arguments.length >= 4) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = 10;
+ responseCb = arguments[2];
+ } else {
+ oids = arguments[0];
+ nonRepeaters = 0;
+ maxRepetitions = 10;
+ responseCb = arguments[1];
+ }
+
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+ var i = 0;
+
+ // first walk through and grab non-repeaters
+ if (pdu.varbinds.length < nonRepeaters) {
+ req.responseCb(new ResponseInvalidError("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is less than "
+ + "non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ for (; i < nonRepeaters; i++) {
+ if (isVarbindError(pdu.varbinds[i])) {
+ varbinds.push(pdu.varbinds[i]);
+ } else if (!oidFollowsOid(req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+ }
+
+ var repeaters = req.message.pdu.varbinds.length - nonRepeaters;
+
+ // secondly walk through and grab repeaters
+ if (pdu.varbinds.length % (repeaters)) {
+ req.responseCb(new ResponseInvalidError("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is not a "
+ + "multiple of repeaters '" + repeaters
+ + "' plus non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ while (i < pdu.varbinds.length) {
+ for (var j = 0; j < repeaters; j++, i++) {
+ var reqIndex = nonRepeaters + j;
+ var respIndex = i;
+
+ if (isVarbindError(pdu.varbinds[respIndex])) {
+ if (!varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push(pdu.varbinds[respIndex]);
+ } else if (!oidFollowsOid(
+ req.message.pdu.varbinds[reqIndex].oid,
+ pdu.varbinds[respIndex].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[reqIndex].oid
+ + "' in request at positiion '" + (reqIndex)
+ + "' does not precede OID '"
+ + pdu.varbinds[respIndex].oid
+ + "' in response at position '" + (respIndex) + "'"));
+ return;
+ } else {
+ if (!varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push(pdu.varbinds[respIndex]);
+ }
+ }
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ var options = {
+ nonRepeaters: nonRepeaters,
+ maxRepetitions: maxRepetitions
+ };
+
+ this.simpleGet(GetBulkRequestPdu, feedCb, pduVarbinds, responseCb,
+ options);
+
+ return this;
+};
+
+Session.prototype.getNext = function (oids, responseCb) {
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (isVarbindError(pdu.varbinds[i])) {
+ varbinds.push(pdu.varbinds[i]);
+ } else if (!oidFollowsOid(req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(GetNextRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.inform = function () {
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ options = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ responseCb = arguments[2];
+ } else {
+ varbinds = arguments[1];
+ responseCb = arguments[2];
+ }
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ if (this.version == Version1) {
+ responseCb(new RequestInvalidError("Inform not allowed for SNMPv1"));
+ return;
+ }
+
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Inform OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in inform at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ var pduVarbinds = [
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor(process.uptime() * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ ];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ options.port = this.trapPort;
+
+ this.simpleGet(InformRequestPdu, feedCb, pduVarbinds, responseCb, options);
+
+ return this;
+};
+
+Session.prototype.onClose = function () {
+ this.cancelRequests(new Error("Socket forcibly closed"));
+ this.emit("close");
+};
+
+Session.prototype.onError = function (error) {
+ this.emit(error);
+};
+
+Session.prototype.onMsg = function (buffer) {
+ try {
+ var message = Message.createFromBuffer(buffer);
+
+ var req = this.unregisterRequest(message.getReqId());
+ if (!req)
+ return;
+
+ if (!message.processIncomingSecurity(this.user, req.responseCb))
+ return;
+
+ try {
+ if (message.version != req.message.version) {
+ req.responseCb(new ResponseInvalidError("Version in request '"
+ + req.message.version + "' does not match version in "
+ + "response '" + message.version + "'"));
+ } else if (message.community != req.message.community) {
+ req.responseCb(new ResponseInvalidError("Community '"
+ + req.message.community + "' in request does not match "
+ + "community '" + message.community + "' in response"));
+ } else if (message.pdu.type == PduType.GetResponse) {
+ req.onResponse(req, message);
+ } else if (message.pdu.type == PduType.Report) {
+ if (!req.originalPdu) {
+ req.responseCb(new ResponseInvalidError("Unexpected Report PDU"));
+ return;
+ }
+ this.msgSecurityParameters = {
+ msgAuthoritativeEngineID: message.msgSecurityParameters.msgAuthoritativeEngineID,
+ msgAuthoritativeEngineBoots: message.msgSecurityParameters.msgAuthoritativeEngineBoots,
+ msgAuthoritativeEngineTime: message.msgSecurityParameters.msgAuthoritativeEngineTime
+ };
+ req.originalPdu.contextName = this.context;
+ this.sendV3Req(req.originalPdu, req.feedCb, req.responseCb, req.options, req.port);
+ } else {
+ req.responseCb(new ResponseInvalidError("Unknown PDU type '"
+ + message.pdu.type + "' in response"));
+ }
+ } catch (error) {
+ req.responseCb(error);
+ }
+ } catch (error) {
+ this.emit("error", error);
+ }
+};
+
+Session.prototype.onSimpleGetResponse = function (req, message) {
+ var pdu = message.pdu;
+
+ if (pdu.errorStatus > 0) {
+ var statusString = ErrorStatus[pdu.errorStatus]
+ || ErrorStatus.GeneralError;
+ var statusCode = ErrorStatus[statusString]
+ || ErrorStatus[ErrorStatus.GeneralError];
+
+ if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) {
+ req.responseCb(new RequestFailedError(statusString, statusCode));
+ } else {
+ var oid = pdu.varbinds[pdu.errorIndex - 1].oid;
+ var error = new RequestFailedError(statusString + ": " + oid,
+ statusCode);
+ req.responseCb(error);
+ }
+ } else {
+ req.feedCb(req, message);
+ }
+};
+
+Session.prototype.registerRequest = function (req) {
+ if (!this.reqs[req.getId()]) {
+ this.reqs[req.getId()] = req;
+ if (this.reqCount <= 0)
+ this.dgram.ref();
+ this.reqCount++;
+ }
+ var me = this;
+ req.timer = setTimeout(function () {
+ if (req.retries-- > 0) {
+ me.send(req);
+ } else {
+ me.unregisterRequest(req.getId());
+ req.responseCb(new RequestTimedOutError(
+ "Request timed out"));
+ }
+ }, req.timeout);
+};
+
+Session.prototype.send = function (req, noWait) {
+ try {
+ var me = this;
+
+ var buffer = req.message.toBuffer();
+
+ this.dgram.send(buffer, 0, buffer.length, req.port, this.target,
+ function (error, bytes) {
+ if (error) {
+ req.responseCb(error);
+ } else {
+ if (noWait) {
+ req.responseCb(null);
+ } else {
+ me.registerRequest(req);
+ }
+ }
+ });
+ } catch (error) {
+ req.responseCb(error);
+ }
+
+ return this;
+};
+
+Session.prototype.set = function (varbinds, responseCb) {
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(SetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.simpleGet = function (pduClass, feedCb, varbinds,
+ responseCb, options) {
+ try {
+ var id = _generateId(this.idBitsSize);
+ var pdu = SimplePdu.createFromVariables(pduClass, id, varbinds, options);
+ var message;
+ var req;
+
+ if (this.version == Version3) {
+ if (this.msgSecurityParameters) {
+ this.sendV3Req(pdu, feedCb, responseCb, options, this.port);
+ } else {
+ // SNMPv3 discovery
+ var discoveryPdu = createDiscoveryPdu(this.context);
+ var discoveryMessage = Message.createDiscoveryV3(discoveryPdu);
+ var discoveryReq = new Req(this, discoveryMessage, feedCb, responseCb, options);
+ discoveryReq.originalPdu = pdu;
+ this.send(discoveryReq);
+ }
+ } else {
+ message = Message.createCommunity(this.version, this.community, pdu);
+ req = new Req(this, message, feedCb, responseCb, options);
+ this.send(req);
+ }
+ } catch (error) {
+ if (responseCb)
+ responseCb(error);
+ }
+}
+
+function subtreeCb(req, varbinds) {
+ var done = 0;
+
+ for (var i = varbinds.length; i > 0; i--) {
+ if (!oidInSubtree(req.baseOid, varbinds[i - 1].oid)) {
+ done = 1;
+ varbinds.pop();
+ }
+ }
+
+ if (varbinds.length > 0)
+ req.feedCb(varbinds);
+
+ if (done)
+ return true;
+}
+
+Session.prototype.subtree = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ feedCb: feedCb,
+ doneCb: doneCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid
+ };
+
+ this.walk(oid, maxRepetitions, subtreeCb.bind(me, req), doneCb);
+
+ return this;
+};
+
+function tableColumnsResponseCb(req, error) {
+ if (error) {
+ req.responseCb(error);
+ } else if (req.error) {
+ req.responseCb(req.error);
+ } else {
+ if (req.columns.length > 0) {
+ var column = req.columns.pop();
+ var me = this;
+ this.subtree(req.rowOid + column, req.maxRepetitions,
+ tableColumnsFeedCb.bind(me, req),
+ tableColumnsResponseCb.bind(me, req));
+ } else {
+ req.responseCb(null, req.table);
+ }
+ }
+}
+
+function tableColumnsFeedCb(req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError(varbinds[i])) {
+ req.error = new RequestFailedError(varbindError(varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace(req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match(/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (!req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
+}
+
+Session.prototype.tableColumns = function () {
+ var me = this;
+
+ var oid = arguments[0];
+ var columns = arguments[1];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 4) {
+ responseCb = arguments[2];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ columns: columns.slice(0),
+ table: {}
+ };
+
+ if (req.columns.length > 0) {
+ var column = req.columns.pop();
+ this.subtree(req.rowOid + column, maxRepetitions,
+ tableColumnsFeedCb.bind(me, req),
+ tableColumnsResponseCb.bind(me, req));
+ }
+
+ return this;
+};
+
+function tableResponseCb(req, error) {
+ if (error)
+ req.responseCb(error);
+ else if (req.error)
+ req.responseCb(req.error);
+ else
+ req.responseCb(null, req.table);
+}
+
+function tableFeedCb(req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError(varbinds[i])) {
+ req.error = new RequestFailedError(varbindError(varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace(req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match(/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (!req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
+}
+
+Session.prototype.table = function () {
+ var me = this;
+
+ var oid = arguments[0];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 3) {
+ responseCb = arguments[1];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[1];
+ responseCb = arguments[2];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ table: {}
+ };
+
+ this.subtree(oid, maxRepetitions, tableFeedCb.bind(me, req),
+ tableResponseCb.bind(me, req));
+
+ return this;
+};
+
+Session.prototype.trap = function () {
+ var req = {};
+
+ try {
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+ var message;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, agentAddr, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, agentAddr, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ if (typeof arguments[2] == "string") {
+ options.agentAddr = arguments[2];
+ } else if (arguments[2].constructor != Array) {
+ options = arguments[2];
+ }
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (typeof arguments[1] == "string") {
+ varbinds = [];
+ options.agentAddr = arguments[1];
+ } else if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ } else {
+ varbinds = arguments[1];
+ agentAddr = null;
+ }
+ responseCb = arguments[2];
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ var pdu, pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ var id = _generateId(this.idBitsSize);
+
+ if (this.version == Version2c || this.version == Version3) {
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ pduVarbinds.unshift(
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor(process.uptime() * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ );
+
+ pdu = TrapV2Pdu.createFromVariables(id, pduVarbinds, options);
+ } else {
+ pdu = TrapPdu.createFromVariables(typeOrOid, pduVarbinds, options);
+ }
+
+ if (this.version == Version3) {
+ var msgSecurityParameters = {
+ msgAuthoritativeEngineID: this.user.engineID,
+ msgAuthoritativeEngineBoots: 0,
+ msgAuthoritativeEngineTime: 0
+ };
+ message = Message.createRequestV3(this.user, msgSecurityParameters, pdu);
+ } else {
+ message = Message.createCommunity(this.version, this.community, pdu);
+ }
+
+ req = {
+ id: id,
+ message: message,
+ responseCb: responseCb,
+ port: this.trapPort
+ };
+
+ this.send(req, true);
+ } catch (error) {
+ if (req.responseCb)
+ req.responseCb(error);
+ }
+
+ return this;
+};
+
+Session.prototype.unregisterRequest = function (id) {
+ var req = this.reqs[id];
+ if (req) {
+ delete this.reqs[id];
+ clearTimeout(req.timer);
+ delete req.timer;
+ this.reqCount--;
+ if (this.reqCount <= 0)
+ this.dgram.unref();
+ return req;
+ } else {
+ return null;
+ }
+};
+
+function walkCb(req, error, varbinds) {
+ var done = 0;
+ var oid;
+
+ if (error) {
+ if (error instanceof RequestFailedError) {
+ if (error.status != ErrorStatus.NoSuchName) {
+ req.doneCb(error);
+ return;
+ } else {
+ // signal the version 1 walk code below that it should stop
+ done = 1;
+ }
+ } else {
+ req.doneCb(error);
+ return;
+ }
+ }
+
+ if (this.version == Version2c || this.version == Version3) {
+ for (var i = varbinds[0].length; i > 0; i--) {
+ if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) {
+ varbinds[0].pop();
+ done = 1;
+ }
+ }
+ if (req.feedCb(varbinds[0]))
+ done = 1;
+ if (!done)
+ oid = varbinds[0][varbinds[0].length - 1].oid;
+ } else {
+ if (!done) {
+ if (req.feedCb(varbinds)) {
+ done = 1;
+ } else {
+ oid = varbinds[0].oid;
+ }
+ }
+ }
+
+ if (done)
+ req.doneCb(null);
+ else
+ this.walk(oid, req.maxRepetitions, req.feedCb, req.doneCb,
+ req.baseOid);
+}
+
+Session.prototype.walk = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb, baseOid;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ maxRepetitions: maxRepetitions,
+ feedCb: feedCb,
+ doneCb: doneCb
+ };
+
+ if (this.version == Version2c || this.version == Version3)
+ this.getBulk([oid], 0, maxRepetitions,
+ walkCb.bind(me, req));
+ else
+ this.getNext([oid], walkCb.bind(me, req));
+
+ return this;
+};
+
+Session.prototype.sendV3Req = function (pdu, feedCb, responseCb, options, port) {
+ var message = Message.createRequestV3(this.user, this.msgSecurityParameters, pdu);
+ var reqOptions = options || {};
+ var req = new Req(this, message, feedCb, responseCb, reqOptions);
+ req.port = port;
+ this.send(req);
+};
+
+var Engine = function (engineID, engineBoots, engineTime) {
+ if (engineID) {
+ this.engineID = Buffer.from(engineID, 'hex');
+ } else {
+ this.generateEngineID();
+ }
+ this.engineBoots = 0;
+ this.engineTime = 10;
+};
+
+Engine.prototype.generateEngineID = function () {
+ // generate a 17-byte engine ID in the following format:
+ // 0x80 + 0x00B983 (enterprise OID) | 0x80 (enterprise-specific format) | 12 bytes of random
+ this.engineID = Buffer.alloc(17);
+ this.engineID.fill('8000B98380', 'hex', 0, 5);
+ this.engineID.fill(crypto.randomBytes(12), 5, 17, 'hex');
+}
+
+var Listener = function (options, receiver) {
+ this.receiver = receiver;
+ this.callback = receiver.onMsg;
+ this.family = options.transport || 'udp4';
+ this.port = options.port || 161;
+ this.disableAuthorization = options.disableAuthorization || false;
+};
+
+Listener.prototype.startListening = function (receiver) {
+ var me = this;
+ this.dgram = dgram.createSocket(this.family);
+ this.dgram.bind(this.port);
+ this.dgram.on("message", me.callback.bind(me.receiver));
+};
+
+Listener.prototype.send = function (message, rinfo) {
+ var me = this;
+
+ var buffer = message.toBuffer();
+
+ this.dgram.send(buffer, 0, buffer.length, rinfo.port, rinfo.address,
+ function (error, bytes) {
+ if (error) {
+ // me.callback (error);
+ console.error("Error sending: " + error.message);
+ } else {
+ // debug ("Listener sent response message");
+ }
+ });
+};
+
+Listener.formatCallbackData = function (pdu, rinfo) {
+ if (pdu.contextEngineID) {
+ pdu.contextEngineID = pdu.contextEngineID.toString('hex');
+ }
+ delete pdu.nonRepeaters;
+ delete pdu.maxRepetitions;
+ return {
+ pdu: pdu,
+ rinfo: rinfo
+ };
+};
+
+Listener.processIncoming = function (buffer, authorizer, callback) {
+ var message = Message.createFromBuffer(buffer);
+ var community;
+
+ // Authorization
+ if (message.version == Version3) {
+ message.user = authorizer.users.filter(localUser => localUser.name ==
+ message.msgSecurityParameters.msgUserName)[0];
+ message.disableAuthentication = authorizer.disableAuthorization;
+ if (!message.user) {
+ if (message.msgSecurityParameters.msgUserName != "" && !authorizer.disableAuthorization) {
+ callback(new RequestFailedError("Local user not found for message with user " +
+ message.msgSecurityParameters.msgUserName));
+ return;
+ } else if (message.hasAuthentication()) {
+ callback(new RequestFailedError("Local user not found and message requires authentication with user " +
+ message.msgSecurityParameters.msgUserName));
+ return;
+ } else {
+ message.user = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ }
+ }
+ if (!message.processIncomingSecurity(message.user, callback)) {
+ return;
+ }
+ } else {
+ community = authorizer.communities.filter(localCommunity => localCommunity == message.community)[0];
+ if (!community && !authorizer.disableAuthorization) {
+ callback(new RequestFailedError("Local community not found for message with community " + message.community));
+ return;
+ }
+ }
+
+ return message;
+};
+
+var Authorizer = function () {
+ this.communities = [];
+ this.users = [];
+}
+
+Authorizer.prototype.addCommunity = function (community) {
+ if (this.getCommunity(community)) {
+ return;
+ } else {
+ this.communities.push(community);
+ }
+};
+
+Authorizer.prototype.getCommunity = function (community) {
+ return this.communities.filter(localCommunity => localCommunity == community)[0] || null;
+};
+
+Authorizer.prototype.getCommunities = function () {
+ return this.communities;
+};
+
+Authorizer.prototype.deleteCommunity = function (community) {
+ var index = this.communities.indexOf(community);
+ if (index > -1) {
+ this.communities.splice(index, 1);
+ }
+};
+
+Authorizer.prototype.addUser = function (user) {
+ if (this.getUser(user.name)) {
+ this.deleteUser(user.name);
+ }
+ this.users.push(user);
+};
+
+Authorizer.prototype.getUser = function (userName) {
+ return this.users.filter(localUser => localUser.name == userName)[0] || null;
+};
+
+Authorizer.prototype.getUsers = function () {
+ return this.users;
+};
+
+Authorizer.prototype.deleteUser = function (userName) {
+ var index = this.users.findIndex(localUser => localUser.name == userName);
+ if (index > -1) {
+ this.users.splice(index, 1);
+ }
+};
+
+
+/*****************************************************************************
+ ** Receiver class definition
+ **/
+
+var Receiver = function (options, callback) {
+ DEBUG = options.debug;
+ this.listener = new Listener(options, this);
+ this.authorizer = new Authorizer();
+ this.engine = new Engine(options.engineID);
+
+ this.engineBoots = 0;
+ this.engineTime = 10;
+ this.disableAuthorization = false;
+
+ this.callback = callback;
+ this.family = options.transport || 'udp4';
+ this.port = options.port || 162;
+ options.port = this.port;
+ this.disableAuthorization = options.disableAuthorization || false;
+ this.context = (options && options.context) ? options.context : "";
+ this.listener = new Listener(options, this);
+};
+
+Receiver.prototype.addCommunity = function (community) {
+ this.authorizer.addCommunity(community);
+};
+
+Receiver.prototype.getCommunity = function (community) {
+ return this.authorizer.getCommunity(community);
+};
+
+Receiver.prototype.getCommunities = function () {
+ return this.authorizer.getCommunities();
+};
+
+Receiver.prototype.deleteCommunity = function (community) {
+ this.authorizer.deleteCommunities(community);
+};
+
+Receiver.prototype.addUser = function (user) {
+ this.authorizer.addUser(user);
+};
+
+Receiver.prototype.getUser = function (userName) {
+ return this.authorizer.getUser(userName);
+};
+
+Receiver.prototype.getUsers = function () {
+ return this.authorizer.getUsers();
+};
+
+Receiver.prototype.deleteUser = function (userName) {
+ this.authorizer.deleteUser(userName);
+};
+
+Receiver.prototype.onMsg = function (buffer, rinfo) {
+ var message = Listener.processIncoming(buffer, this.authorizer, this.callback);
+ var reportMessage;
+
+ if (!message) {
+ return;
+ }
+
+ // The only GetRequest PDUs supported are those used for SNMPv3 discovery
+ if (message.pdu.type == PduType.GetRequest) {
+ if (message.version != Version3) {
+ this.callback(new RequestInvalidError("Only SNMPv3 discovery GetRequests are supported"));
+ return;
+ } else if (message.hasAuthentication()) {
+ this.callback(new RequestInvalidError("Only discovery (noAuthNoPriv) GetRequests are supported but this message has authentication"));
+ return;
+ } else if (!message.isReportable()) {
+ this.callback(new RequestInvalidError("Only discovery GetRequests are supported and this message does not have the reportable flag set"));
+ return;
+ }
+ var reportMessage = message.createReportResponseMessage(this.engine, this.context);
+ this.listener.send(reportMessage, rinfo);
+ return;
+ }
+ ;
+
+ // Inform/trap processing
+ debug(JSON.stringify(message.pdu, null, 2));
+ if (message.pdu.type == PduType.Trap || message.pdu.type == PduType.TrapV2) {
+ this.callback(null, this.formatCallbackData(message.pdu, rinfo));
+ } else if (message.pdu.type == PduType.InformRequest) {
+ message.pdu.type = PduType.GetResponse;
+ message.buffer = null;
+ message.setReportable(false);
+ this.listener.send(message, rinfo);
+ message.pdu.type = PduType.InformRequest;
+ this.callback(null, this.formatCallbackData(message.pdu, rinfo));
+ } else {
+ this.callback(new RequestInvalidError("Unexpected PDU type " + message.pdu.type + " (" + PduType[message.pdu.type] + ")"));
+ }
+}
+
+Receiver.prototype.formatCallbackData = function (pdu, rinfo) {
+ if (pdu.contextEngineID) {
+ pdu.contextEngineID = pdu.contextEngineID.toString('hex');
+ }
+ delete pdu.nonRepeaters;
+ delete pdu.maxRepetitions;
+ return {
+ pdu: pdu,
+ rinfo: rinfo
+ };
+};
+
+Receiver.prototype.close = function () {
+ this.listener.close();
+};
+
+Receiver.create = function (options, callback) {
+ var receiver = new Receiver(options, callback);
+ receiver.listener.startListening();
+ return receiver;
+};
+
+var MibNode = function (address, parent) {
+ this.address = address;
+ this.oid = this.address.join('.');
+ ;
+ this.parent = parent;
+ this.children = {};
+};
+
+MibNode.prototype.child = function (index) {
+ return this.children[index];
+};
+
+MibNode.prototype.listChildren = function (lowest) {
+ var sorted = [];
+
+ lowest = lowest || 0;
+
+ this.children.forEach(function (c, i) {
+ if (i >= lowest)
+ sorted.push(i);
+ });
+
+ sorted.sort(function (a, b) {
+ return (a - b);
+ });
+
+ return sorted;
+};
+
+MibNode.prototype.isDescendant = function (address) {
+ return MibNode.oidIsDescended(this.address, address);
+};
+
+MibNode.prototype.isAncestor = function (address) {
+ return MibNode.oidIsDescended(address, this.address);
+};
+
+MibNode.prototype.getAncestorProvider = function () {
+ if (this.provider) {
+ return this;
+ } else if (!this.parent) {
+ return null;
+ } else {
+ return this.parent.getAncestorProvider();
+ }
+};
+
+MibNode.prototype.getInstanceNodeForTableRow = function () {
+ var childCount = Object.keys(this.children).length;
+ if (childCount == 0) {
+ if (this.value) {
+ return this;
+ } else {
+ return null;
+ }
+ } else if (childCount == 1) {
+ return this.children[0].getInstanceNodeForTableRow();
+ } else if (childCount > 1) {
+ return null;
+ }
+};
+
+MibNode.prototype.getInstanceNodeForTableRowIndex = function (index) {
+ var childCount = Object.keys(this.children).length;
+ if (childCount == 0) {
+ if (this.value) {
+ return this;
+ } else {
+ // not found
+ return null;
+ }
+ } else {
+ if (index.length == 0) {
+ return this.getInstanceNodeForTableRow();
+ } else {
+ var nextChildIndexPart = index[0];
+ if (!nextChildIndexPart) {
+ return null;
+ }
+ remainingIndex = index.slice(1);
+ return this.children[nextChildIndexPart].getInstanceNodeForTableRowIndex(remainingIndex);
+ }
+ }
+};
+
+MibNode.prototype.getNextInstanceNode = function () {
+
+ node = this;
+ if (this.value) {
+ // Need upwards traversal first
+ node = this;
+ while (node) {
+ siblingIndex = node.address.slice(-1)[0];
+ node = node.parent;
+ if (!node) {
+ // end of MIB
+ return null;
+ } else {
+ childrenAddresses = Object.keys(node.children).sort((a, b) => a - b);
+ siblingPosition = childrenAddresses.indexOf(siblingIndex.toString());
+ if (siblingPosition + 1 < childrenAddresses.length) {
+ node = node.children[childrenAddresses[siblingPosition + 1]];
+ break;
+ }
+ }
+ }
+ }
+ // Descent
+ while (node) {
+ if (node.value) {
+ return node;
+ }
+ childrenAddresses = Object.keys(node.children).sort((a, b) => a - b);
+ node = node.children[childrenAddresses[0]];
+ if (!node) {
+ // unexpected
+ return null;
+ }
+ }
+};
+
+MibNode.prototype.delete = function () {
+ if (Object.keys(this.children) > 0) {
+ throw new Error("Cannot delete non-leaf MIB node");
+ }
+ addressLastPart = this.address.slice(-1)[0];
+ delete this.parent.children[addressLastPart];
+ this.parent = null;
+};
+
+MibNode.prototype.pruneUpwards = function () {
+ if (!this.parent) {
+ return
+ }
+ if (Object.keys(this.children).length == 0) {
+ var lastAddressPart = this.address.splice(-1)[0].toString();
+ delete this.parent.children[lastAddressPart];
+ this.parent.pruneUpwards();
+ this.parent = null;
+ }
+}
+
+MibNode.prototype.dump = function (options) {
+ var valueString;
+ if ((!options.leavesOnly || options.showProviders) && this.provider) {
+ console.log(this.oid + " [" + MibProviderType[this.provider.type] + ": " + this.provider.name + "]");
+ } else if ((!options.leavesOnly) || Object.keys(this.children).length == 0) {
+ if (this.value) {
+ valueString = " = ";
+ valueString += options.showTypes ? ObjectType[this.valueType] + ": " : "";
+ valueString += options.showValues ? this.value : "";
+ } else {
+ valueString = "";
+ }
+ console.log(this.oid + valueString);
+ }
+ for (node of Object.keys(this.children).sort((a, b) => a - b)) {
+ this.children[node].dump(options);
+ }
+};
+
+MibNode.oidIsDescended = function (oid, ancestor) {
+ var ancestorAddress = Mib.convertOidToAddress(ancestor);
+ var address = Mib.convertOidToAddress(oid);
+ var isAncestor = true;
+
+ if (address.length <= ancestorAddress.length) {
+ return false;
+ }
+
+ ancestorAddress.forEach(function (o, i) {
+ if (address[i] !== ancestorAddress[i]) {
+ isAncestor = false;
+ }
+ });
+
+ return isAncestor;
+};
+
+var Mib = function () {
+ this.root = new MibNode([], null);
+ this.providers = {};
+ this.providerNodes = {};
+};
+
+Mib.prototype.addNodesForOid = function (oidString) {
+ var address = Mib.convertOidToAddress(oidString);
+ return this.addNodesForAddress(address);
+};
+
+Mib.prototype.addNodesForAddress = function (address) {
+ var address;
+ var node;
+ var i;
+
+ node = this.root;
+
+ for (i = 0; i < address.length; i++) {
+ if (!node.children.hasOwnProperty(address[i])) {
+ node.children[address[i]] = new MibNode(address.slice(0, i + 1), node);
+ }
+ node = node.children[address[i]];
+ }
+
+ return node;
+};
+
+Mib.prototype.lookup = function (oid) {
+ var address;
+ var i;
+ var node;
+
+ address = Mib.convertOidToAddress(oid);
+ node = this.root;
+ for (i = 0; i < address.length; i++) {
+ if (!node.children.hasOwnProperty(address[i])) {
+ return null
+ }
+ node = node.children[address[i]];
+ }
+
+ return node;
+};
+
+Mib.prototype.getProviderNodeForInstance = function (instanceNode) {
+ if (instanceNode.provider) {
+ throw new ReferenceError("Instance node has provider which should never happen");
+ }
+ return instanceNode.getAncestorProvider();
+};
+
+Mib.prototype.addProviderToNode = function (provider) {
+ var node = this.addNodesForOid(provider.oid);
+
+ node.provider = provider;
+ if (provider.type == MibProviderType.Table) {
+ if (!provider.index) {
+ provider.index = [1];
+ }
+ }
+ this.providerNodes[provider.name] = node;
+ return node;
+};
+
+Mib.prototype.registerProvider = function (provider) {
+ this.providers[provider.name] = provider;
+};
+
+Mib.prototype.unregisterProvider = function (name) {
+ var providerNode = this.providerNodes[name];
+ if (providerNode) {
+ providerNodeParent = providerNode.parent;
+ providerNode.delete();
+ providerNodeParent.pruneUpwards();
+ delete this.providerNodes[name];
+ }
+ delete this.providers[name];
+};
+
+Mib.prototype.getProvider = function (name) {
+ return this.providers[name];
+};
+
+Mib.prototype.getProviders = function () {
+ return this.providers;
+};
+
+Mib.prototype.getScalarValue = function (scalarName) {
+ var providerNode = this.providerNodes[scalarName];
+ if (!providerNode || !providerNode.provider || providerNode.provider.type != MibProviderType.Scalar) {
+ throw new ReferenceError("Failed to get node for registered MIB provider " + scalarName);
+ }
+ var instanceAddress = providerNode.address.concat([0]);
+ if (!this.lookup(instanceAddress)) {
+ throw new Error("Failed created instance node for registered MIB provider " + scalarName);
+ }
+ var instanceNode = this.lookup(instanceAddress);
+ return instanceNode.value;
+};
+
+Mib.prototype.setScalarValue = function (scalarName, newValue) {
+ var providerNode;
+ var instanceNode;
+
+ if (!this.providers[scalarName]) {
+ throw new ReferenceError("Provider " + scalarName + " not registered with this MIB");
+ }
+
+ providerNode = this.providerNodes[scalarName];
+ if (!providerNode) {
+ providerNode = this.addProviderToNode(this.providers[scalarName]);
+ }
+ if (!providerNode || !providerNode.provider || providerNode.provider.type != MibProviderType.Scalar) {
+ throw new ReferenceError("Could not find MIB node for registered provider " + scalarName);
+ }
+ var instanceAddress = providerNode.address.concat([0]);
+ instanceNode = this.lookup(instanceAddress);
+ if (!instanceNode) {
+ this.addNodesForAddress(instanceAddress);
+ instanceNode = this.lookup(instanceAddress);
+ instanceNode.valueType = providerNode.provider.scalarType;
+ }
+ instanceNode.value = newValue;
+};
+
+Mib.prototype.getProviderNodeForTable = function (table) {
+ var providerNode;
+ var provider;
+
+ providerNode = this.providerNodes[table];
+ if (!providerNode) {
+ throw new ReferenceError("No MIB provider registered for " + table);
+ }
+ provider = providerNode.provider;
+ if (!providerNode) {
+ throw new ReferenceError("No MIB provider definition for registered provider " + table);
+ }
+ if (provider.type != MibProviderType.Table) {
+ throw new TypeError("Registered MIB provider " + table +
+ " is not of the correct type (is type " + MibProviderType[provider.type] + ")");
+ }
+ return providerNode;
+};
+
+Mib.prototype.addTableRow = function (table, row) {
+ var providerNode;
+ var provider;
+ var instance = [];
+ var instanceAddress;
+ var instanceNode;
+
+ if (this.providers[table] && !this.providerNodes[table]) {
+ this.addProviderToNode(this.providers[table]);
+ }
+ providerNode = this.getProviderNodeForTable(table);
+ provider = providerNode.provider;
+ for (var indexPart of provider.index) {
+ columnPosition = provider.columns.findIndex(column => column.number == indexPart);
+ instance.push(row[columnPosition]);
+ }
+ for (var i = 0; i < providerNode.provider.columns.length; i++) {
+ var column = providerNode.provider.columns[i];
+ instanceAddress = providerNode.address.concat(column.number).concat(instance);
+ this.addNodesForAddress(instanceAddress);
+ instanceNode = this.lookup(instanceAddress);
+ instanceNode.valueType = column.type;
+ instanceNode.value = row[i];
+ }
+};
+
+Mib.prototype.getTableColumnDefinitions = function (table) {
+ var providerNode;
+ var provider;
+
+ providerNode = this.getProviderNodeForTable(table);
+ provider = providerNode.provider;
+ return provider.columns;
+};
+
+Mib.prototype.getTableColumnCells = function (table, columnNumber) {
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ column = []
+ for (var row of Object.keys(columnNode.children)) {
+ instanceNode = columnNode.children[row].getInstanceNodeForTableRow();
+ column.push(instanceNode.value);
+ }
+ return column;
+};
+
+Mib.prototype.getTableRowCells = function (table, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+ var row = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ row.push(instanceNode.value);
+ }
+ return row;
+};
+
+Mib.prototype.getTableCells = function (table, byRows) {
+ var providerNode;
+ var columnNode;
+ var data = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ column = [];
+ data.push(column);
+ for (var row of Object.keys(columnNode.children)) {
+ instanceNode = columnNode.children[row].getInstanceNodeForTableRow();
+ column.push(instanceNode.value);
+ }
+ }
+
+ if (byRows) {
+ return Object.keys(data[0]).map(function (c) {
+ return data.map(function (r) {
+ return r[c];
+ });
+ });
+ } else {
+ return data;
+ }
+
+};
+
+Mib.prototype.getTableSingleCell = function (table, columnNumber, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ return instanceNode.value;
+};
+
+Mib.prototype.setTableSingleCell = function (table, columnNumber, rowIndex, value) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ instanceNode.value = value;
+};
+
+Mib.prototype.deleteTableRow = function (table, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+ var row = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ if (instanceNode) {
+ instanceParentNode = instanceNode.parent;
+ instanceNode.delete();
+ instanceParentNode.pruneUpwards();
+ } else {
+ throw new ReferenceError("Cannot find row for index " + rowIndex + " at registered provider " + table);
+ }
+ }
+ return row;
+};
+
+Mib.prototype.dump = function (options) {
+ if (!options) {
+ options = {};
+ }
+ var completedOptions = {
+ leavesOnly: options.leavesOnly || true,
+ showProviders: options.leavesOnly || true,
+ showValues: options.leavesOnly || true,
+ showTypes: options.leavesOnly || true
+ };
+ this.root.dump(completedOptions);
+};
+
+Mib.convertOidToAddress = function (oid) {
+ var address;
+ var oidArray;
+ var i;
+
+ if (typeof (oid) === 'object' && util.isArray(oid)) {
+ address = oid;
+ } else if (typeof (oid) === 'string') {
+ address = oid.split('.');
+ } else {
+ throw new TypeError('oid (string or array) is required');
+ }
+
+ if (address.length < 3)
+ throw new RangeError('object identifier is too short');
+
+ oidArray = [];
+ for (i = 0; i < address.length; i++) {
+ var n;
+
+ if (address[i] === '')
+ continue;
+
+ if (address[i] === true || address[i] === false) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is malformed');
+ }
+
+ n = Number(address[i]);
+
+ if (isNaN(n)) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is malformed');
+ }
+ if (n % 1 !== 0) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is not an integer');
+ }
+ if (i === 0 && n > 2) {
+ throw new RangeError('object identifier does not ' +
+ 'begin with 0, 1, or 2');
+ }
+ if (i === 1 && n > 39) {
+ throw new RangeError('object identifier second ' +
+ 'component ' + n + ' exceeds encoding limit of 39');
+ }
+ if (n < 0) {
+ throw new RangeError('object identifier component ' +
+ address[i] + ' is negative');
+ }
+ if (n > MAX_INT32) {
+ throw new RangeError('object identifier component ' +
+ address[i] + ' is too large');
+ }
+ oidArray.push(n);
+ }
+
+ return oidArray;
+
+};
+
+var MibRequest = function (requestDefinition) {
+ this.operation = requestDefinition.operation;
+ this.address = Mib.convertOidToAddress(requestDefinition.oid);
+ this.oid = this.address.join('.');
+ this.providerNode = requestDefinition.providerNode;
+ this.instanceNode = requestDefinition.instanceNode;
+};
+
+MibRequest.prototype.isScalar = function () {
+ return this.providerNode && this.providerNode.provider &&
+ this.providerNode.provider.type == MibProviderType.Scalar;
+};
+
+MibRequest.prototype.isTabular = function () {
+ return this.providerNode && this.providerNode.provider &&
+ this.providerNode.provider.type == MibProviderType.Table;
+};
+
+var Agent = function (options, callback) {
+ DEBUG = options.debug;
+ this.listener = new Listener(options, this);
+ this.engine = new Engine(options.engineID);
+ this.authorizer = new Authorizer();
+ this.mib = new Mib();
+ this.callback = callback || function () {
+ };
+ this.context = "";
+};
+
+Agent.prototype.getMib = function () {
+ return this.mib;
+};
+
+Agent.prototype.getAuthorizer = function () {
+ return this.authorizer;
+};
+
+Agent.prototype.registerProvider = function (provider) {
+ this.mib.registerProvider(provider);
+};
+
+Agent.prototype.unregisterProvider = function (provider) {
+ this.mib.unregisterProvider(provider);
+};
+
+Agent.prototype.getProvider = function (provider) {
+ return this.mib.getProvider(provider);
+};
+
+Agent.prototype.getProviders = function () {
+ return this.mib.getProviders();
+};
+
+Agent.prototype.onMsg = function (buffer, rinfo) {
+ var message = Listener.processIncoming(buffer, this.authorizer, this.callback);
+ var reportMessage;
+ var responseMessage;
+
+ if (!message) {
+ return;
+ }
+
+ // SNMPv3 discovery
+ if (message.version == Version3 && message.pdu.type == PduType.GetRequest &&
+ !message.hasAuthoritativeEngineID() && message.isReportable()) {
+ reportMessage = message.createReportResponseMessage(this.engine, this.context);
+ this.listener.send(reportMessage, rinfo);
+ return;
+ }
+
+ // Request processing
+ debug(JSON.stringify(message.pdu, null, 2));
+ if (message.pdu.type == PduType.GetRequest) {
+ responseMessage = this.request(message, rinfo);
+ } else if (message.pdu.type == PduType.SetRequest) {
+ responseMessage = this.request(message, rinfo);
+ } else if (message.pdu.type == PduType.GetNextRequest) {
+ responseMessage = this.getNextRequest(message, rinfo);
+ } else if (message.pdu.type == PduType.GetBulkRequest) {
+ responseMessage = this.getBulkRequest(message, rinfo);
+ } else {
+ this.callback(new RequestInvalidError("Unexpected PDU type " +
+ message.pdu.type + " (" + PduType[message.pdu.type] + ")"));
+ }
+
+};
+
+Agent.prototype.request = function (requestMessage, rinfo) {
+ var me = this;
+ var varbindsCompleted = 0;
+ var requestPdu = requestMessage.pdu;
+ var varbindsLength = requestPdu.varbinds.length;
+ var responsePdu = requestPdu.getResponsePduForRequest();
+
+ for (var i = 0; i < requestPdu.varbinds.length; i++) {
+ var requestVarbind = requestPdu.varbinds[i];
+ var instanceNode = this.mib.lookup(requestVarbind.oid);
+ var providerNode;
+ var mibRequest;
+ var handler;
+ var responseVarbindType;
+
+ if (!instanceNode) {
+ mibRequest = new MibRequest({
+ operation: requestPdu.type,
+ oid: requestVarbind.oid
+ });
+ handler = function getNsoHandler(mibRequestForNso) {
+ mibRequestForNso.done({
+ errorStatus: ErrorStatus.NoSuchName,
+ errorIndex: i
+ });
+ };
+ } else {
+ providerNode = this.mib.getProviderNodeForInstance(instanceNode);
+ mibRequest = new MibRequest({
+ operation: requestPdu.type,
+ providerNode: providerNode,
+ instanceNode: instanceNode,
+ oid: requestVarbind.oid
+ });
+ handler = providerNode.provider.handler;
+ }
+
+ mibRequest.done = function (error) {
+ if (error) {
+ responsePdu.errorStatus = error.errorStatus;
+ responsePdu.errorIndex = error.errorIndex;
+ responseVarbind = {
+ oid: mibRequest.oid,
+ type: ObjectType.Null,
+ value: null
+ };
+ } else {
+ if (requestPdu.type == PduType.SetRequest) {
+ mibRequest.instanceNode.value = requestVarbind.value;
+ }
+ if (requestPdu.type == PduType.GetNextRequest && requestVarbind.type == ObjectType.EndOfMibView) {
+ responseVarbindType = ObjectType.EndOfMibView;
+ } else {
+ responseVarbindType = mibRequest.instanceNode.valueType;
+ }
+ responseVarbind = {
+ oid: mibRequest.oid,
+ type: responseVarbindType,
+ value: mibRequest.instanceNode.value
+ };
+ }
+ me.setSingleVarbind(responsePdu, i, responseVarbind);
+ if (++varbindsCompleted == varbindsLength) {
+ me.sendResponse.call(me, rinfo, requestMessage, responsePdu);
+ }
+ };
+ if (handler) {
+ handler(mibRequest);
+ } else {
+ mibRequest.done();
+ }
+ }
+ ;
+};
+
+Agent.prototype.addGetNextVarbind = function (targetVarbinds, startOid) {
+ var startNode = this.mib.lookup(startOid);
+ var getNextNode;
+
+ if (!startNode) {
+ // Off-tree start specified
+ targetVarbinds.push({
+ oid: requestVarbind.oid,
+ type: ObjectType.Null,
+ value: null
+ });
+ } else {
+ getNextNode = startNode.getNextInstanceNode();
+ if (!getNextNode) {
+ // End of MIB
+ targetVarbinds.push({
+ oid: requestVarbind.oid,
+ type: ObjectType.EndOfMibView,
+ value: null
+ });
+ } else {
+ // Normal response
+ targetVarbinds.push({
+ oid: getNextNode.oid,
+ type: getNextNode.valueType,
+ value: getNextNode.value
+ });
+ }
+ }
+ return getNextNode;
+};
+
+Agent.prototype.getNextRequest = function (requestMessage, rinfo) {
+ var requestPdu = requestMessage.pdu;
+ var varbindsLength = requestPdu.varbinds.length;
+ var getNextVarbinds = [];
+
+ for (var i = 0; i < varbindsLength; i++) {
+ this.addGetNextVarbind(getNextVarbinds, requestPdu.varbinds[i].oid);
+ }
+
+ requestMessage.pdu.varbinds = getNextVarbinds;
+ this.request(requestMessage, rinfo);
+};
+
+Agent.prototype.getBulkRequest = function (requestMessage, rinfo) {
+ var requestPdu = requestMessage.pdu;
+ var requestVarbinds = requestPdu.varbinds;
+ var getBulkVarbinds = [];
+ var startOid = [];
+ var getNextNode;
+
+ for (var n = 0; n < requestPdu.nonRepeaters; n++) {
+ this.addGetNextVarbind(getBulkVarbinds, requestVarbinds[n].oid);
+ }
+
+ for (var v = requestPdu.nonRepeaters; v < requestVarbinds.length; v++) {
+ startOid.push(requestVarbinds[v].oid);
+ }
+
+ for (var r = 0; r < requestPdu.maxRepetitions; r++) {
+ for (var v = requestPdu.nonRepeaters; v < requestVarbinds.length; v++) {
+ getNextNode = this.addGetNextVarbind(getBulkVarbinds, startOid[v - requestPdu.nonRepeaters]);
+ if (getNextNode) {
+ startOid[v - requestPdu.nonRepeaters] = getNextNode.oid;
+ }
+ }
+ }
+
+ requestMessage.pdu.varbinds = getBulkVarbinds;
+ this.request(requestMessage, rinfo);
+};
+
+Agent.prototype.setSingleVarbind = function (responsePdu, index, responseVarbind) {
+ responsePdu.varbinds[index] = responseVarbind;
+};
+
+Agent.prototype.sendResponse = function (rinfo, requestMessage, responsePdu) {
+ var responseMessage = requestMessage.createResponseForRequest(responsePdu);
+ this.listener.send(responseMessage, rinfo);
+ this.callback(null, Listener.formatCallbackData(responseMessage.pdu, rinfo));
+};
+
+Agent.create = function (options, callback) {
+ var agent = new Agent(options, callback);
+ agent.listener.startListening();
+ return agent;
+};
+
+/*****************************************************************************
+ ** Exports
+ **/
+
+exports.Session = Session;
+
+exports.createSession = function (target, community, options) {
+ if (options.version && !(options.version == Version1 || options.version == Version2c)) {
+ throw new ResponseInvalidError("SNMP community session requested but version '" + options.version + "' specified in options not valid");
+ } else {
+ return new Session(target, community, options);
+ }
+};
+
+exports.createV3Session = function (target, user, options) {
+ if (options.version && options.version != Version3) {
+ throw new ResponseInvalidError("SNMPv3 session requested but version '" + options.version + "' specified in options");
+ } else {
+ options.version = Version3;
+ }
+ return new Session(target, user, options);
+};
+
+exports.createReceiver = Receiver.create;
+exports.createAgent = Agent.create;
+
+exports.isVarbindError = isVarbindError;
+exports.varbindError = varbindError;
+
+exports.Version1 = Version1;
+exports.Version2c = Version2c;
+exports.Version3 = Version3;
+exports.Version = Version;
+
+exports.ErrorStatus = ErrorStatus;
+exports.TrapType = TrapType;
+exports.ObjectType = ObjectType;
+exports.PduType = PduType;
+exports.MibProviderType = MibProviderType;
+exports.SecurityLevel = SecurityLevel;
+exports.AuthProtocols = AuthProtocols;
+exports.PrivProtocols = PrivProtocols;
+
+exports.ResponseInvalidError = ResponseInvalidError;
+exports.RequestInvalidError = RequestInvalidError;
+exports.RequestFailedError = RequestFailedError;
+exports.RequestTimedOutError = RequestTimedOutError;
+
+/**
+ ** We've added this for testing.
+ **/
+exports.ObjectParser = {
+ readInt: readInt,
+ readUint: readUint
+};
+exports.Authentication = Authentication;
+exports.Encryption = Encryption;
diff --git a/collectors/node.d.plugin/node_modules/netdata.js b/collectors/node.d.plugin/node_modules/netdata.js
new file mode 100644
index 0000000..603922c
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/netdata.js
@@ -0,0 +1,654 @@
+'use strict';
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var url = require('url');
+var http = require('http');
+var util = require('util');
+
+/*
+var netdata = require('netdata');
+
+var example_chart = {
+ id: 'id', // the unique id of the chart
+ name: 'name', // the name of the chart
+ title: 'title', // the title of the chart
+ units: 'units', // the units of the chart dimensions
+ family: 'family', // the family of the chart
+ context: 'context', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: 0, // the priority relative to others in the same family
+ update_every: 1, // the expected update frequency of the chart
+ dimensions: {
+ 'dim1': {
+ id: 'dim1', // the unique id of the dimension
+ name: 'name', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false, // is hidden (boolean)
+ },
+ 'dim2': {
+ id: 'dim2', // the unique id of the dimension
+ name: 'name', // the name of the dimension
+ algorithm: 'absolute', // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false, // is hidden (boolean)
+ }
+ // add as many dimensions as needed
+ }
+};
+*/
+
+var netdata = {
+ options: {
+ filename: __filename,
+ DEBUG: false,
+ update_every: 1
+ },
+
+ chartAlgorithms: {
+ incremental: 'incremental',
+ absolute: 'absolute',
+ percentage_of_absolute_row: 'percentage-of-absolute-row',
+ percentage_of_incremental_row: 'percentage-of-incremental-row'
+ },
+
+ chartTypes: {
+ line: 'line',
+ area: 'area',
+ stacked: 'stacked'
+ },
+
+ services: new Array(),
+ modules_configuring: 0,
+ charts: {},
+
+ processors: {
+ http: {
+ name: 'http',
+
+ process: function(service, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': making ' + this.name + ' request: ' + netdata.stringify(service.request));
+
+ var req = http.request(service.request, function(response) {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': got server response...');
+
+ var end = false;
+ var data = '';
+ response.setEncoding('utf8');
+
+ if(response.statusCode !== 200) {
+ if(end === false) {
+ service.error('Got HTTP code ' + response.statusCode + ', failed to get data.');
+ end = true;
+ return callback(null);
+ }
+ }
+
+ response.on('data', function(chunk) {
+ if(end === false) data += chunk;
+ });
+
+ response.on('error', function() {
+ if(end === false) {
+ service.error(': Read error, failed to get data.');
+ end = true;
+ return callback(null);
+ }
+ });
+
+ response.on('end', function() {
+ if(end === false) {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': read completed.');
+ end = true;
+ return callback(data);
+ }
+ });
+ });
+
+ req.on('error', function(e) {
+ if(__DEBUG === true) netdata.debug('Failed to make request: ' + netdata.stringify(service.request) + ', message: ' + e.message);
+ service.error('Failed to make request, message: ' + e.message);
+ return callback(null);
+ });
+
+ // write data to request body
+ if(typeof service.postData !== 'undefined' && service.request.method === 'POST') {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': posting data: ' + service.postData);
+ req.write(service.postData);
+ }
+
+ req.end();
+ }
+ }
+ },
+
+ stringify: function(obj) {
+ return util.inspect(obj, {depth: 10});
+ },
+
+ zeropad2: function(s) {
+ return ("00" + s).slice(-2);
+ },
+
+ logdate: function(d) {
+ if(typeof d === 'undefined') d = new Date();
+ return d.getFullYear().toString() + '-' + this.zeropad2(d.getMonth() + 1) + '-' + this.zeropad2(d.getDate())
+ + ' ' + this.zeropad2(d.getHours()) + ':' + this.zeropad2(d.getMinutes()) + ':' + this.zeropad2(d.getSeconds());
+ },
+
+ // show debug info, if debug is enabled
+ debug: function(msg) {
+ if(this.options.DEBUG === true) {
+ console.error(this.logdate() + ': ' + netdata.options.filename + ': DEBUG: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
+ }
+ },
+
+ // log an error
+ error: function(msg) {
+ console.error(this.logdate() + ': ' + netdata.options.filename + ': ERROR: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
+ },
+
+ // send data to netdata
+ send: function(msg) {
+ console.log(msg.toString());
+ },
+
+ service: function(service) {
+ if(typeof service === 'undefined')
+ service = {};
+
+ var now = Date.now();
+
+ service._current_chart = null; // the current chart we work on
+ service._queue = ''; // data to be sent to netdata
+
+ service.error_reported = false; // error log flood control
+
+ service.added = false; // added to netdata.services
+ service.enabled = true;
+ service.updates = 0;
+ service.running = false;
+ service.started = 0;
+ service.ended = 0;
+
+ if(typeof service.module === 'undefined') {
+ service.module = { name: 'not-defined-module' };
+ service.error('Attempted to create service without a module.');
+ service.enabled = false;
+ }
+
+ if(typeof service.name === 'undefined') {
+ service.name = 'unnamed@' + service.module.name + '/' + now;
+ }
+
+ if(typeof service.processor === 'undefined')
+ service.processor = netdata.processors.http;
+
+ if(typeof service.update_every === 'undefined')
+ service.update_every = service.module.update_every;
+
+ if(typeof service.update_every === 'undefined')
+ service.update_every = netdata.options.update_every;
+
+ if(service.update_every < netdata.options.update_every)
+ service.update_every = netdata.options.update_every;
+
+ // align the runs
+ service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+
+ service.commit = function() {
+ if(this.added !== true) {
+ this.added = true;
+
+ var now = Date.now();
+ this.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+
+ netdata.services.push(this);
+ if(netdata.options.DEBUG === true) netdata.debug(this.module.name + ': ' + this.name + ': service committed.');
+ }
+ };
+
+ service.execute = function(responseProcessor) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(service.enabled === false)
+ return responseProcessor(null);
+
+ this.module.active++;
+ this.running = true;
+ this.started = Date.now();
+ this.updates++;
+
+ if(__DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': making ' + this.processor.name + ' request: ' + netdata.stringify(this));
+
+ this.processor.process(this, function(response) {
+ service.ended = Date.now();
+ service.duration = service.ended - service.started;
+
+ if(typeof response === 'undefined')
+ response = null;
+
+ if(response !== null)
+ service.errorClear();
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': processing ' + service.processor.name + ' response (received in ' + (service.ended - service.started).toString() + ' ms)');
+
+ try {
+ responseProcessor(service, response);
+ }
+ catch(e) {
+ netdata.error(e);
+ service.error("responseProcessor failed process response data.");
+ }
+
+ service.running = false;
+ service.module.active--;
+ if(service.module.active < 0) {
+ service.module.active = 0;
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': active module counter below zero.');
+ }
+
+ if(service.module.active === 0) {
+ // check if we run under configure
+ if(service.module.configure_callback !== null) {
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': configuration finish callback called from processResponse().');
+
+ var configure_callback = service.module.configure_callback;
+ service.module.configure_callback = null;
+ configure_callback();
+ }
+ }
+ });
+ };
+
+ service.update = function() {
+ if(netdata.options.DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': starting data collection...');
+
+ this.module.update(this, function() {
+ if(netdata.options.DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': data collection ended in ' + service.duration.toString() + ' ms.');
+ });
+ };
+
+ service.error = function(message) {
+ if(this.error_reported === false) {
+ netdata.error(this.module.name + ': ' + this.name + ': ' + message);
+ this.error_reported = true;
+ }
+ else if(netdata.options.DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': ' + message);
+ };
+
+ service.errorClear = function() {
+ this.error_reported = false;
+ };
+
+ service.queue = function(txt) {
+ this._queue += txt + '\n';
+ };
+
+ service._send_chart_to_netdata = function(chart) {
+ // internal function to send a chart to netdata
+ this.queue('CHART "' + chart.id + '" "' + chart.name + '" "' + chart.title + '" "' + chart.units + '" "' + chart.family + '" "' + chart.context + '" "' + chart.type + '" ' + chart.priority.toString() + ' ' + chart.update_every.toString());
+
+ if(typeof(chart.dimensions) !== 'undefined') {
+ var dims = Object.keys(chart.dimensions);
+ var len = dims.length;
+ while(len--) {
+ var d = chart.dimensions[dims[len]];
+
+ this.queue('DIMENSION "' + d.id + '" "' + d.name + '" "' + d.algorithm + '" ' + d.multiplier.toString() + ' ' + d.divisor.toString() + ' ' + ((d.hidden === true) ? 'hidden' : '').toString());
+ d._created = true;
+ d._updated = false;
+ }
+ }
+
+ chart._created = true;
+ chart._updated = false;
+ };
+
+ // begin data collection for a chart
+ service.begin = function(chart) {
+ if(this._current_chart !== null && this._current_chart !== chart) {
+ this.error('Called begin() for chart ' + chart.id + ' while chart ' + this._current_chart.id + ' is still open. Closing it.');
+ this.end();
+ }
+
+ if(typeof(chart.id) === 'undefined' || netdata.charts[chart.id] !== chart) {
+ this.error('Called begin() for chart ' + chart.id + ' that is not mine. Where did you find it? Ignoring it.');
+ return false;
+ }
+
+ if(netdata.options.DEBUG === true) netdata.debug('setting current chart to ' + chart.id);
+ this._current_chart = chart;
+ this._current_chart._began = true;
+
+ if(this._current_chart._dimensions_count !== 0) {
+ if(this._current_chart._created === false || this._current_chart._updated === true)
+ this._send_chart_to_netdata(this._current_chart);
+
+ var now = this.ended;
+ this.queue('BEGIN ' + this._current_chart.id + ' ' + ((this._current_chart._last_updated > 0)?((now - this._current_chart._last_updated) * 1000):'').toString());
+ }
+ // else this.error('Called begin() for chart ' + chart.id + ' which is empty.');
+
+ this._current_chart._last_updated = now;
+ this._current_chart._began = true;
+ this._current_chart._counter++;
+
+ return true;
+ };
+
+ // set a collected value for a chart
+ // we do most things on the first value we attempt to set
+ service.set = function(dimension, value) {
+ if(this._current_chart === null) {
+ this.error('Called set(' + dimension + ', ' + value + ') without an open chart.');
+ return false;
+ }
+
+ if(typeof(this._current_chart.dimensions[dimension]) === 'undefined') {
+ this.error('Called set(' + dimension + ', ' + value + ') but dimension "' + dimension + '" does not exist in chart "' + this._current_chart.id + '".');
+ return false;
+ }
+
+ if(typeof value === 'undefined' || value === null)
+ return false;
+
+ if(this._current_chart._dimensions_count !== 0)
+ this.queue('SET ' + dimension + ' = ' + value.toString());
+
+ return true;
+ };
+
+ // end data collection for the current chart - after calling begin()
+ service.end = function() {
+ if(this._current_chart !== null && this._current_chart._began === false) {
+ this.error('Called end() without an open chart.');
+ return false;
+ }
+
+ if(this._current_chart._dimensions_count !== 0) {
+ this.queue('END');
+ netdata.send(this._queue);
+ }
+
+ this._queue = '';
+ this._current_chart._began = false;
+ if(netdata.options.DEBUG === true) netdata.debug('sent chart ' + this._current_chart.id);
+ this._current_chart = null;
+ return true;
+ };
+
+ // discard the collected values for the current chart - after calling begin()
+ service.flush = function() {
+ if(this._current_chart === null || this._current_chart._began === false) {
+ this.error('Called flush() without an open chart.');
+ return false;
+ }
+
+ this._queue = '';
+ this._current_chart._began = false;
+ this._current_chart = null;
+ return true;
+ };
+
+ // create a netdata chart
+ service.chart = function(id, chart) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(typeof(netdata.charts[id]) === 'undefined') {
+ netdata.charts[id] = {
+ _created: false,
+ _updated: true,
+ _began: false,
+ _counter: 0,
+ _last_updated: 0,
+ _dimensions_count: 0,
+ id: id,
+ name: id,
+ title: 'untitled chart',
+ units: 'a unit',
+ family: '',
+ context: '',
+ type: netdata.chartTypes.line,
+ priority: 50000,
+ update_every: netdata.options.update_every,
+ dimensions: {}
+ };
+ }
+
+ var c = netdata.charts[id];
+
+ if(typeof(chart.name) !== 'undefined' && chart.name !== c.name) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its name');
+ c.name = chart.name;
+ c._updated = true;
+ }
+
+ if(typeof(chart.title) !== 'undefined' && chart.title !== c.title) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its title');
+ c.title = chart.title;
+ c._updated = true;
+ }
+
+ if(typeof(chart.units) !== 'undefined' && chart.units !== c.units) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its units');
+ c.units = chart.units;
+ c._updated = true;
+ }
+
+ if(typeof(chart.family) !== 'undefined' && chart.family !== c.family) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its family');
+ c.family = chart.family;
+ c._updated = true;
+ }
+
+ if(typeof(chart.context) !== 'undefined' && chart.context !== c.context) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its context');
+ c.context = chart.context;
+ c._updated = true;
+ }
+
+ if(typeof(chart.type) !== 'undefined' && chart.type !== c.type) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its type');
+ c.type = chart.type;
+ c._updated = true;
+ }
+
+ if(typeof(chart.priority) !== 'undefined' && chart.priority !== c.priority) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its priority');
+ c.priority = chart.priority;
+ c._updated = true;
+ }
+
+ if(typeof(chart.update_every) !== 'undefined' && chart.update_every !== c.update_every) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its update_every from ' + c.update_every + ' to ' + chart.update_every);
+ c.update_every = chart.update_every;
+ c._updated = true;
+ }
+
+ if(typeof(chart.dimensions) !== 'undefined') {
+ var dims = Object.keys(chart.dimensions);
+ var len = dims.length;
+ while(len--) {
+ var x = dims[len];
+
+ if(typeof(c.dimensions[x]) === 'undefined') {
+ c._dimensions_count++;
+
+ c.dimensions[x] = {
+ _created: false,
+ _updated: false,
+ id: x, // the unique id of the dimension
+ name: x, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' created dimension ' + x);
+ c._updated = true;
+ }
+
+ var dim = chart.dimensions[x];
+ var d = c.dimensions[x];
+
+ if(typeof(dim.name) !== 'undefined' && d.name !== dim.name) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its name');
+ d.name = dim.name;
+ d._updated = true;
+ }
+
+ if(typeof(dim.algorithm) !== 'undefined' && d.algorithm !== dim.algorithm) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its algorithm from ' + d.algorithm + ' to ' + dim.algorithm);
+ d.algorithm = dim.algorithm;
+ d._updated = true;
+ }
+
+ if(typeof(dim.multiplier) !== 'undefined' && d.multiplier !== dim.multiplier) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its multiplier');
+ d.multiplier = dim.multiplier;
+ d._updated = true;
+ }
+
+ if(typeof(dim.divisor) !== 'undefined' && d.divisor !== dim.divisor) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its divisor');
+ d.divisor = dim.divisor;
+ d._updated = true;
+ }
+
+ if(typeof(dim.hidden) !== 'undefined' && d.hidden !== dim.hidden) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its hidden status');
+ d.hidden = dim.hidden;
+ d._updated = true;
+ }
+
+ if(d._updated) c._updated = true;
+ }
+ }
+
+ //if(netdata.options.DEBUG === true) netdata.debug(netdata.charts);
+ return netdata.charts[id];
+ };
+
+ return service;
+ },
+
+ runAllServices: function() {
+ if(netdata.options.DEBUG === true) netdata.debug('runAllServices()');
+
+ var now = Date.now();
+ var len = netdata.services.length;
+ while(len--) {
+ var service = netdata.services[len];
+
+ if(service.enabled === false || service.running === true) continue;
+ if(now <= service.next_run) continue;
+
+ service.update();
+
+ now = Date.now();
+ service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+ }
+
+ // 1/10th of update_every in pause
+ setTimeout(netdata.runAllServices, netdata.options.update_every * 100);
+ },
+
+ start: function() {
+ if(netdata.options.DEBUG === true) this.debug('started, services: ' + netdata.stringify(this.services));
+
+ if(this.services.length === 0) {
+ this.disableNodePlugin();
+
+ // eslint suggested way to exit
+ var exit = process.exit;
+ exit(1);
+ }
+ else this.runAllServices();
+ },
+
+ // disable the whole node.js plugin
+ disableNodePlugin: function() {
+ this.send('DISABLE');
+
+ // eslint suggested way to exit
+ var exit = process.exit;
+ exit(1);
+ },
+
+ requestFromParams: function(protocol, hostname, port, path, method) {
+ return {
+ protocol: protocol,
+ hostname: hostname,
+ port: port,
+ path: path,
+ //family: 4,
+ method: method,
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Connection': 'keep-alive'
+ },
+ agent: new http.Agent({
+ keepAlive: true,
+ keepAliveMsecs: netdata.options.update_every * 1000,
+ maxSockets: 2, // it must be 2 to work
+ maxFreeSockets: 1
+ })
+ };
+ },
+
+ requestFromURL: function(a_url) {
+ var u = url.parse(a_url);
+ return netdata.requestFromParams(u.protocol, u.hostname, u.port, u.path, 'GET');
+ },
+
+ configure: function(module, config, callback) {
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configuring (update_every: ' + this.options.update_every + ')...');
+
+ module.active = 0;
+ module.update_every = this.options.update_every;
+
+ if(typeof config.update_every !== 'undefined')
+ module.update_every = config.update_every;
+
+ module.enable_autodetect = (config.enable_autodetect)?true:false;
+
+ if(typeof(callback) === 'function')
+ module.configure_callback = callback;
+ else
+ module.configure_callback = null;
+
+ var added = module.configure(config);
+
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configured, reporting ' + added + ' eligible services.');
+
+ if(module.configure_callback !== null && added === 0) {
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configuration finish callback called from configure().');
+ var configure_callback = module.configure_callback;
+ module.configure_callback = null;
+ configure_callback();
+ }
+
+ return added;
+ }
+};
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded netdata from:', __filename);
+module.exports = netdata;
diff --git a/collectors/node.d.plugin/node_modules/pixl-xml.js b/collectors/node.d.plugin/node_modules/pixl-xml.js
new file mode 100644
index 0000000..48de89e
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/pixl-xml.js
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: MIT
+/*
+ JavaScript XML Library
+ Plus a bunch of object utility functions
+
+ Usage:
+ var XML = require('pixl-xml');
+ var myxmlstring = '<?xml version="1.0"?><Document>' +
+ '<Simple>Hello</Simple>' +
+ '<Node Key="Value">Content</Node>' +
+ '</Document>';
+
+ var tree = XML.parse( myxmlstring, { preserveAttributes: true });
+ console.log( tree );
+
+ tree.Simple = "Hello2";
+ tree.Node._Attribs.Key = "Value2";
+ tree.Node._Data = "Content2";
+ tree.New = "I added this";
+
+ console.log( XML.stringify( tree, 'Document' ) );
+
+ Copyright (c) 2004 - 2015 Joseph Huckaby
+ Released under the MIT License
+ This version is for Node.JS, converted in 2012.
+*/
+
+var fs = require('fs');
+
+var indent_string = "\t";
+var xml_header = '<?xml version="1.0"?>';
+var sort_args = null;
+var re_valid_tag_name = /^\w[\w\-\:]*$/;
+
+var XML = exports.XML = function XML(args) {
+ // class constructor for XML parser class
+ // pass in args hash or text to parse
+ if (!args) args = '';
+ if (isa_hash(args)) {
+ for (var key in args) this[key] = args[key];
+ }
+ else this.text = args || '';
+
+ // stringify buffers
+ if (this.text instanceof Buffer) {
+ this.text = this.text.toString();
+ }
+
+ if (!this.text.match(/^\s*</)) {
+ // try as file path
+ var file = this.text;
+ this.text = fs.readFileSync(file, { encoding: 'utf8' });
+ if (!this.text) throw new Error("File not found: " + file);
+ }
+
+ this.tree = {};
+ this.errors = [];
+ this.piNodeList = [];
+ this.dtdNodeList = [];
+ this.documentNodeName = '';
+
+ if (this.lowerCase) {
+ this.attribsKey = this.attribsKey.toLowerCase();
+ this.dataKey = this.dataKey.toLowerCase();
+ }
+
+ this.patTag.lastIndex = 0;
+ if (this.text) this.parse();
+}
+
+XML.prototype.preserveAttributes = false;
+XML.prototype.lowerCase = false;
+
+XML.prototype.patTag = /([^<]*?)<([^>]+)>/g;
+XML.prototype.patSpecialTag = /^\s*([\!\?])/;
+XML.prototype.patPITag = /^\s*\?/;
+XML.prototype.patCommentTag = /^\s*\!--/;
+XML.prototype.patDTDTag = /^\s*\!DOCTYPE/;
+XML.prototype.patCDATATag = /^\s*\!\s*\[\s*CDATA/;
+XML.prototype.patStandardTag = /^\s*(\/?)([\w\-\:\.]+)\s*(.*)$/;
+XML.prototype.patSelfClosing = /\/\s*$/;
+XML.prototype.patAttrib = new RegExp("([\\w\\-\\:\\.]+)\\s*=\\s*([\\\"\\'])([^\\2]*?)\\2", "g");
+XML.prototype.patPINode = /^\s*\?\s*([\w\-\:]+)\s*(.*)$/;
+XML.prototype.patEndComment = /--$/;
+XML.prototype.patNextClose = /([^>]*?)>/g;
+XML.prototype.patExternalDTDNode = new RegExp("^\\s*\\!DOCTYPE\\s+([\\w\\-\\:]+)\\s+(SYSTEM|PUBLIC)\\s+\\\"([^\\\"]+)\\\"");
+XML.prototype.patInlineDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[/;
+XML.prototype.patEndDTD = /\]$/;
+XML.prototype.patDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[(.*)\]/;
+XML.prototype.patEndCDATA = /\]\]$/;
+XML.prototype.patCDATANode = /^\s*\!\s*\[\s*CDATA\s*\[([^]*)\]\]/;
+
+XML.prototype.attribsKey = '_Attribs';
+XML.prototype.dataKey = '_Data';
+
+XML.prototype.parse = function(branch, name) {
+ // parse text into XML tree, recurse for nested nodes
+ if (!branch) branch = this.tree;
+ if (!name) name = null;
+ var foundClosing = false;
+ var matches = null;
+
+ // match each tag, plus preceding text
+ while ( matches = this.patTag.exec(this.text) ) {
+ var before = matches[1];
+ var tag = matches[2];
+
+ // text leading up to tag = content of parent node
+ if (before.match(/\S/)) {
+ if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
+ branch[this.dataKey] += trim(decode_entities(before));
+ }
+
+ // parse based on tag type
+ if (tag.match(this.patSpecialTag)) {
+ // special tag
+ if (tag.match(this.patPITag)) tag = this.parsePINode(tag);
+ else if (tag.match(this.patCommentTag)) tag = this.parseCommentNode(tag);
+ else if (tag.match(this.patDTDTag)) tag = this.parseDTDNode(tag);
+ else if (tag.match(this.patCDATATag)) {
+ tag = this.parseCDATANode(tag);
+ if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
+ branch[this.dataKey] += trim(decode_entities(tag));
+ } // cdata
+ else {
+ this.throwParseError( "Malformed special tag", tag );
+ break;
+ } // error
+
+ if (tag == null) break;
+ continue;
+ } // special tag
+ else {
+ // Tag is standard, so parse name and attributes (if any)
+ var matches = tag.match(this.patStandardTag);
+ if (!matches) {
+ this.throwParseError( "Malformed tag", tag );
+ break;
+ }
+
+ var closing = matches[1];
+ var nodeName = this.lowerCase ? matches[2].toLowerCase() : matches[2];
+ var attribsRaw = matches[3];
+
+ // If this is a closing tag, make sure it matches its opening tag
+ if (closing) {
+ if (nodeName == (name || '')) {
+ foundClosing = 1;
+ break;
+ }
+ else {
+ this.throwParseError( "Mismatched closing tag (expected </" + name + ">)", tag );
+ break;
+ }
+ } // closing tag
+ else {
+ // Not a closing tag, so parse attributes into hash. If tag
+ // is self-closing, no recursive parsing is needed.
+ var selfClosing = !!attribsRaw.match(this.patSelfClosing);
+ var leaf = {};
+ var attribs = leaf;
+
+ // preserve attributes means they go into a sub-hash named "_Attribs"
+ // the XML composer honors this for restoring the tree back into XML
+ if (this.preserveAttributes) {
+ leaf[this.attribsKey] = {};
+ attribs = leaf[this.attribsKey];
+ }
+
+ // parse attributes
+ this.patAttrib.lastIndex = 0;
+ while ( matches = this.patAttrib.exec(attribsRaw) ) {
+ var key = this.lowerCase ? matches[1].toLowerCase() : matches[1];
+ attribs[ key ] = decode_entities( matches[3] );
+ } // foreach attrib
+
+ // if no attribs found, but we created the _Attribs subhash, clean it up now
+ if (this.preserveAttributes && !num_keys(attribs)) {
+ delete leaf[this.attribsKey];
+ }
+
+ // Recurse for nested nodes
+ if (!selfClosing) {
+ this.parse( leaf, nodeName );
+ if (this.error()) break;
+ }
+
+ // Compress into simple node if text only
+ var num_leaf_keys = num_keys(leaf);
+ if ((typeof(leaf[this.dataKey]) != 'undefined') && (num_leaf_keys == 1)) {
+ leaf = leaf[this.dataKey];
+ }
+ else if (!num_leaf_keys) {
+ leaf = '';
+ }
+
+ // Add leaf to parent branch
+ if (typeof(branch[nodeName]) != 'undefined') {
+ if (isa_array(branch[nodeName])) {
+ branch[nodeName].push( leaf );
+ }
+ else {
+ var temp = branch[nodeName];
+ branch[nodeName] = [ temp, leaf ];
+ }
+ }
+ else {
+ branch[nodeName] = leaf;
+ }
+
+ if (this.error() || (branch == this.tree)) break;
+ } // not closing
+ } // standard tag
+ } // main reg exp
+
+ // Make sure we found the closing tag
+ if (name && !foundClosing) {
+ this.throwParseError( "Missing closing tag (expected </" + name + ">)", name );
+ }
+
+ // If we are the master node, finish parsing and setup our doc node
+ if (branch == this.tree) {
+ if (typeof(this.tree[this.dataKey]) != 'undefined') delete this.tree[this.dataKey];
+
+ if (num_keys(this.tree) > 1) {
+ this.throwParseError( 'Only one top-level node is allowed in document', first_key(this.tree) );
+ return;
+ }
+
+ this.documentNodeName = first_key(this.tree);
+ if (this.documentNodeName) {
+ this.tree = this.tree[this.documentNodeName];
+ }
+ }
+};
+
+XML.prototype.throwParseError = function(key, tag) {
+ // log error and locate current line number in source XML document
+ var parsedSource = this.text.substring(0, this.patTag.lastIndex);
+ var eolMatch = parsedSource.match(/\n/g);
+ var lineNum = (eolMatch ? eolMatch.length : 0) + 1;
+ lineNum -= tag.match(/\n/) ? tag.match(/\n/g).length : 0;
+
+ this.errors.push({
+ type: 'Parse',
+ key: key,
+ text: '<' + tag + '>',
+ line: lineNum
+ });
+
+ // Throw actual error (must wrap parse in try/catch)
+ throw new Error( this.getLastError() );
+};
+
+XML.prototype.error = function() {
+ // return number of errors
+ return this.errors.length;
+};
+
+XML.prototype.getError = function(error) {
+ // get formatted error
+ var text = '';
+ if (!error) return '';
+
+ text = (error.type || 'General') + ' Error';
+ if (error.code) text += ' ' + error.code;
+ text += ': ' + error.key;
+
+ if (error.line) text += ' on line ' + error.line;
+ if (error.text) text += ': ' + error.text;
+
+ return text;
+};
+
+XML.prototype.getLastError = function() {
+ // Get most recently thrown error in plain text format
+ if (!this.error()) return '';
+ return this.getError( this.errors[this.errors.length - 1] );
+};
+
+XML.prototype.parsePINode = function(tag) {
+ // Parse Processor Instruction Node, e.g. <?xml version="1.0"?>
+ if (!tag.match(this.patPINode)) {
+ this.throwParseError( "Malformed processor instruction", tag );
+ return null;
+ }
+
+ this.piNodeList.push( tag );
+ return tag;
+};
+
+XML.prototype.parseCommentNode = function(tag) {
+ // Parse Comment Node, e.g. <!-- hello -->
+ var matches = null;
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndComment)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed comment tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+ return tag;
+};
+
+XML.prototype.parseDTDNode = function(tag) {
+ // Parse Document Type Descriptor Node, e.g. <!DOCTYPE ... >
+ var matches = null;
+
+ if (tag.match(this.patExternalDTDNode)) {
+ // tag is external, and thus self-closing
+ this.dtdNodeList.push( tag );
+ }
+ else if (tag.match(this.patInlineDTDNode)) {
+ // Tag is inline, so check for nested nodes.
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndDTD)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed DTD tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+
+ // Make sure complete tag is well-formed, and push onto DTD stack.
+ if (tag.match(this.patDTDNode)) {
+ this.dtdNodeList.push( tag );
+ }
+ else {
+ this.throwParseError( "Malformed DTD tag", tag );
+ return null;
+ }
+ }
+ else {
+ this.throwParseError( "Malformed DTD tag", tag );
+ return null;
+ }
+
+ return tag;
+};
+
+XML.prototype.parseCDATANode = function(tag) {
+ // Parse CDATA Node, e.g. <![CDATA[Brooks & Shields]]>
+ var matches = null;
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndCDATA)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed CDATA tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+
+ if (matches = tag.match(this.patCDATANode)) {
+ return matches[1];
+ }
+ else {
+ this.throwParseError( "Malformed CDATA tag", tag );
+ return null;
+ }
+};
+
+XML.prototype.getTree = function() {
+ // get reference to parsed XML tree
+ return this.tree;
+};
+
+XML.prototype.compose = function() {
+ // compose tree back into XML
+ var raw = compose_xml( this.tree, this.documentNodeName );
+ var body = raw.substring( raw.indexOf("\n") + 1, raw.length );
+ var xml = '';
+
+ if (this.piNodeList.length) {
+ for (var idx = 0, len = this.piNodeList.length; idx < len; idx++) {
+ xml += '<' + this.piNodeList[idx] + '>' + "\n";
+ }
+ }
+ else {
+ xml += xml_header + "\n";
+ }
+
+ if (this.dtdNodeList.length) {
+ for (var idx = 0, len = this.dtdNodeList.length; idx < len; idx++) {
+ xml += '<' + this.dtdNodeList[idx] + '>' + "\n";
+ }
+ }
+
+ xml += body;
+ return xml;
+};
+
+//
+// Static Utility Functions:
+//
+
+var parse_xml = exports.parse = function parse_xml(text, opts) {
+ // turn text into XML tree quickly
+ if (!opts) opts = {};
+ opts.text = text;
+ var parser = new XML(opts);
+ return parser.error() ? parser.getLastError() : parser.getTree();
+};
+
+var trim = exports.trim = function trim(text) {
+ // strip whitespace from beginning and end of string
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/^\s+/, "");
+ text = text.replace(/\s+$/, "");
+ }
+
+ return text;
+};
+
+var encode_entities = exports.encodeEntities = function encode_entities(text) {
+ // Simple entitize exports.for = function for composing XML
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
+ text = text.replace(/</g, "&lt;");
+ text = text.replace(/>/g, "&gt;");
+ }
+
+ return text;
+};
+
+var encode_attrib_entities = exports.encodeAttribEntities = function encode_attrib_entities(text) {
+ // Simple entitize exports.for = function for composing XML attributes
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
+ text = text.replace(/</g, "&lt;");
+ text = text.replace(/>/g, "&gt;");
+ text = text.replace(/\"/g, "&quot;");
+ text = text.replace(/\'/g, "&apos;");
+ }
+
+ return text;
+};
+
+var decode_entities = exports.decodeEntities = function decode_entities(text) {
+ // Decode XML entities into raw ASCII
+ if (text == null) return '';
+
+ if (text && text.replace && text.match(/\&/)) {
+ text = text.replace(/\&lt\;/g, "<");
+ text = text.replace(/\&gt\;/g, ">");
+ text = text.replace(/\&quot\;/g, '"');
+ text = text.replace(/\&apos\;/g, "'");
+ text = text.replace(/\&amp\;/g, "&"); // MUST BE LAST
+ }
+
+ return text;
+};
+
+var compose_xml = exports.stringify = function compose_xml(node, name, indent) {
+ // Compose node into XML including attributes
+ // Recurse for child nodes
+ var xml = "";
+
+ // If this is the root node, set the indent to 0
+ // and setup the XML header (PI node)
+ if (!indent) {
+ indent = 0;
+ xml = xml_header + "\n";
+
+ if (!name) {
+ // no name provided, assume content is wrapped in it
+ name = first_key(node);
+ node = node[name];
+ }
+ }
+
+ // Setup the indent text
+ var indent_text = "";
+ for (var k = 0; k < indent; k++) indent_text += indent_string;
+
+ if ((typeof(node) == 'object') && (node != null)) {
+ // node is object -- now see if it is an array or hash
+ if (!node.length) { // what about zero-length array?
+ // node is hash
+ xml += indent_text + "<" + name;
+
+ var num_keys = 0;
+ var has_attribs = 0;
+ for (var key in node) num_keys++; // there must be a better way...
+
+ if (node["_Attribs"]) {
+ has_attribs = 1;
+ var sorted_keys = hash_keys_to_array(node["_Attribs"]).sort();
+ for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
+ var key = sorted_keys[idx];
+ xml += " " + key + "=\"" + encode_attrib_entities(node["_Attribs"][key]) + "\"";
+ }
+ } // has attribs
+
+ if (num_keys > has_attribs) {
+ // has child elements
+ xml += ">";
+
+ if (node["_Data"]) {
+ // simple text child node
+ xml += encode_entities(node["_Data"]) + "</" + name + ">\n";
+ } // just text
+ else {
+ xml += "\n";
+
+ var sorted_keys = hash_keys_to_array(node).sort();
+ for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
+ var key = sorted_keys[idx];
+ if ((key != "_Attribs") && key.match(re_valid_tag_name)) {
+ // recurse for node, with incremented indent value
+ xml += compose_xml( node[key], key, indent + 1 );
+ } // not _Attribs key
+ } // foreach key
+
+ xml += indent_text + "</" + name + ">\n";
+ } // real children
+ }
+ else {
+ // no child elements, so self-close
+ xml += "/>\n";
+ }
+ } // standard node
+ else {
+ // node is array
+ for (var idx = 0; idx < node.length; idx++) {
+ // recurse for node in array with same indent
+ xml += compose_xml( node[idx], name, indent );
+ }
+ } // array of nodes
+ } // complex node
+ else {
+ // node is simple string
+ xml += indent_text + "<" + name + ">" + encode_entities(node) + "</" + name + ">\n";
+ } // simple text node
+
+ return xml;
+};
+
+var always_array = exports.alwaysArray = function always_array(obj, key) {
+ // if object is not array, return array containing object
+ // if key is passed, work like XMLalwaysarray() instead
+ if (key) {
+ if ((typeof(obj[key]) != 'object') || (typeof(obj[key].length) == 'undefined')) {
+ var temp = obj[key];
+ delete obj[key];
+ obj[key] = new Array();
+ obj[key][0] = temp;
+ }
+ return null;
+ }
+ else {
+ if ((typeof(obj) != 'object') || (typeof(obj.length) == 'undefined')) { return [ obj ]; }
+ else return obj;
+ }
+};
+
+var hash_keys_to_array = exports.hashKeysToArray = function hash_keys_to_array(hash) {
+ // convert hash keys to array (discard values)
+ var array = [];
+ for (var key in hash) array.push(key);
+ return array;
+};
+
+var isa_hash = exports.isaHash = function isa_hash(arg) {
+ // determine if arg is a hash
+ return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) == 'undefined') );
+};
+
+var isa_array = exports.isaArray = function isa_array(arg) {
+ // determine if arg is an array or is array-like
+ if (typeof(arg) == 'array') return true;
+ return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) != 'undefined') );
+};
+
+var first_key = exports.firstKey = function first_key(hash) {
+ // return first key from hash (unordered)
+ for (var key in hash) return key;
+ return null; // no keys in hash
+};
+
+var num_keys = exports.numKeys = function num_keys(hash) {
+ // count the number of keys in a hash
+ var count = 0;
+ for (var a in hash) count++;
+ return count;
+};
diff --git a/collectors/node.d.plugin/sma_webbox/Makefile.inc b/collectors/node.d.plugin/sma_webbox/Makefile.inc
new file mode 100644
index 0000000..38f2fe9
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += sma_webbox/sma_webbox.node.js
+# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sma_webbox/README.md sma_webbox/Makefile.inc
+
diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md
new file mode 100644
index 0000000..ec6b248
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/README.md
@@ -0,0 +1,33 @@
+<!--
+title: "SMA Sunny WebBox monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/sma_webbox/README.md
+sidebar_label: "SMA Sunny WebBox"
+-->
+
+# SMA Sunny WebBox monitoring with Netdata
+
+Montiroing for the [SMA Sunny
+WebBox](https://www.sma-sunny.com/en/questions-and-answers-on-discontinuation-of-the-sunny-webbox/).
+
+The module supports any number of name servers:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "plant1",
+ "hostname": "10.0.1.1",
+ "update_every": 10
+ },
+ {
+ "name": "plant2",
+ "hostname": "10.0.2.1",
+ "update_every": 15
+ }
+ ]
+}
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsma_webbox%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
new file mode 100644
index 0000000..aa60ae8
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
@@ -0,0 +1,239 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one or more SMA Sunny Webboxes
+// to get the Solar Power Generated (current, today, total).
+
+// example configuration in /etc/netdata/node.d/sma_webbox.conf
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "plant1",
+ "hostname": "10.0.1.1",
+ "update_every": 10
+ },
+ {
+ "name": "plant2",
+ "hostname": "10.0.2.1",
+ "update_every": 15
+ }
+ ]
+}
+*/
+
+require('url');
+require('http');
+var netdata = require('netdata');
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded ' + __filename + ' plugin');
+
+var webbox = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 60000,
+ charts: {},
+
+ processResponse: function(service, data) {
+ if(data !== null) {
+ var r = JSON.parse(data);
+
+ var d = {
+ 'GriPwr': {
+ unit: null,
+ value: null
+ },
+ 'GriEgyTdy': {
+ unit: null,
+ value: null
+ },
+ 'GriEgyTot': {
+ unit: null,
+ value: null
+ }
+ };
+
+ // parse the webbox response
+ // and put it in our d object
+ var found = 0;
+ var len = r.result.overview.length;
+ while(len--) {
+ var e = r.result.overview[len];
+ if(typeof(d[e.meta]) !== 'undefined') {
+ found++;
+ d[e.meta].value = e.value;
+ d[e.meta].unit = e.unit;
+ }
+ }
+
+ // add the service
+ if(found > 0 && service.added !== true)
+ service.commit();
+
+
+ // Grid Current Power Chart
+ if(d['GriPwr'].value !== null) {
+ const id = 'smawebbox_' + service.name + '.current';
+ let chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Grid Power', // the title of the chart
+ units: d['GriPwr'].unit, // the units of the chart dimensions
+ family: 'now', // the family of the chart
+ context: 'smawebbox.grid_power', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriPwr': {
+ id: 'GriPwr', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriPwr', Math.round(d['GriPwr'].value));
+ service.end();
+ }
+
+ if(d['GriEgyTdy'].value !== null) {
+ const id = 'smawebbox_' + service.name + '.today';
+ let chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Today Grid Power', // the title of the chart
+ units: d['GriEgyTdy'].unit, // the units of the chart dimensions
+ family: 'today', // the family of the chart
+ context: 'smawebbox.grid_power_today', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 2, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriEgyTdy': {
+ id: 'GriEgyTdy', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1000, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriEgyTdy', Math.round(d['GriEgyTdy'].value * 1000));
+ service.end();
+ }
+
+ if(d['GriEgyTot'].value !== null) {
+ const id = 'smawebbox_' + service.name + '.total';
+ let chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Total Grid Power', // the title of the chart
+ units: d['GriEgyTot'].unit, // the units of the chart dimensions
+ family: 'total', // the family of the chart
+ context: 'smawebbox.grid_power_total', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 3, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriEgyTot': {
+ id: 'GriEgyTot', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1000, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriEgyTot', Math.round(d['GriEgyTot'].value * 1000));
+ service.end();
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function(name, hostname, update_every) {
+ if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': hostname: ' + hostname + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL('http://' + hostname + '/rpc'),
+ update_every: update_every,
+ module: this
+ });
+ service.postData = 'RPC={"proc":"GetPlantOverview","format":"JSON","version":"1.0","id":"1"}';
+ service.request.method = 'POST';
+ service.request.headers['Content-Length'] = service.postData.length;
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function(config) {
+ var added = 0;
+
+ if(typeof(config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while(len--) {
+ if(typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ if(config.servers[len].update_every < 5)
+ config.servers[len].update_every = 5;
+
+ this.serviceExecute(config.servers[len].name, config.servers[len].hostname, config.servers[len].update_every);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function(service, callback) {
+ service.execute(function(serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+};
+
+module.exports = webbox;
diff --git a/collectors/node.d.plugin/snmp/Makefile.inc b/collectors/node.d.plugin/snmp/Makefile.inc
new file mode 100644
index 0000000..26448a1
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += snmp/snmp.node.js
+# dist_nodeconfig_DATA += snmp/snmp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += snmp/README.md snmp/Makefile.inc
+
diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md
new file mode 100644
index 0000000..93ade5e
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/README.md
@@ -0,0 +1,432 @@
+<!--
+title: "SNMP device monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/snmp/README.md
+sidebar_label: "SNMP"
+-->
+
+# SNMP device monitoring with Netdata
+
+Collects data from any SNMP device and uses the [net-snmp](https://github.com/markabrahams/node-net-snmp) module.
+
+It supports:
+
+- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3
+- any number of SNMP devices
+- each SNMP device can be used to collect data for any number of charts
+- each chart may have any number of dimensions
+- each SNMP device may have a different update frequency
+- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
+
+## Requirements
+
+- `nodejs` minimum required version 4
+
+## Configuration
+
+You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following.
+
+In this example:
+
+- the SNMP device is `10.11.12.8`.
+- the SNMP community is `public`.
+- we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`).
+- we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`. Note that the charts and dimensions must not contain any white space or special characters, other than `.` and `_`.
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "max_request_size": 100,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": {
+ "timeout": 10000
+ },
+ "charts": {
+ "snmp_switch.bandwidth_port1": {
+ "title": "Switch Bandwidth for port 1",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.1",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.1",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.bandwidth_port2": {
+ "title": "Switch Bandwidth for port 2",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.2",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.2",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+`update_every` is the update frequency for each server, in seconds.
+
+`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in Netdata's `error.log`.
+
+`family` sets the name of the submenu of the dashboard each chart will appear under.
+
+`multiplier` and `divisor` are passed by the plugin to the Netdata daemon and are applied to the metric to convert it properly to `units`. For incremental counters with the exception of Counter64 type metrics, `offset` is added to the metric from within the SNMP plugin. This means that the value you will see in debug mode in the `DEBUG: setting current chart to... SET` line for a metric will not have been multiplied or divided, but it will have had the offset added to it.
+
+<details markdown="1"><summary><b>Caution: Counter64 metrics do not support `offset` (issue #5028).</b></summary>
+The SNMP plugin supports Counter64 metrics with the only limitation that the `offset` parameter should not be defined. Due to the way Javascript handles large numbers and the fact that the offset is applied to metrics inside the plugin, the offset will be ignored silently.
+</details>
+
+If you need to define many charts using incremental OIDs, you can use something like this:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "options": {
+ "timeout": 20000
+ },
+ "charts": {
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "multiply_range": [
+ 1,
+ 24
+ ],
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`.
+
+Each of the 24 new charts will have its id (1-24) appended at:
+
+1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
+2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
+3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
+4. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
+
+The `options` given for each server, are:
+
+- `port` - UDP port to send requests too. Defaults to `161`.
+- `retries` - number of times to re-send a request. Defaults to `1`.
+- `sourceAddress` - IP address from which SNMP requests should originate, there is no default for this option, the operating system will select an appropriate source address when the SNMP request is sent.
+- `sourcePort` - UDP port from which SNMP requests should originate, defaults to an ephemeral port selected by the operation system.
+- `timeout` - number of milliseconds to wait for a response before re-trying or failing. Defaults to `5000`.
+- `transport` - specify the transport to use, can be either `udp4` or `udp6`. Defaults to `udp4`.
+- `version` - either `0` (v1) or `1` (v2) or `3` (v3). Defaults to `0`.
+- `idBitsSize` - either `16` or `32`. Defaults to `32`. Used to reduce the size of the generated id for compatibility with some older devices.
+
+## SNMPv3
+
+To use SNMPv3:
+
+- set `version` to 3
+- use `user` instead of `community`
+
+User syntax:
+
+```json
+{
+ "user": {
+ "name": "userName",
+ "level": 3,
+ "authProtocol": "3",
+ "authKey": "authKey",
+ "privProtocol": "2",
+ "privKey": "privKey"
+ }
+}
+```
+
+Security levels:
+
+- 1 is `noAuthNoPriv`
+- 2 is `authNoPriv`
+- 3 is `authPriv`
+
+Authentication protocols:
+
+- "1" is `none`
+- "2" is `md5`
+- "3" is `sha`
+
+Privacy protocols:
+
+- "1" is `none`
+- "2" is `des`
+
+For additional details please see [net-snmp module readme](https://github.com/markabrahams/node-net-snmp#snmpcreatev3session-target-user-options).
+
+## Retrieving names from snmp
+
+You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart.
+
+You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension.
+
+Both of the above will participate in `multiply_range`.
+
+## Testing the configuration
+
+To test it, you can run:
+
+```sh
+/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp
+```
+
+The above will run it on your console and you will be able to see what Netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line.
+
+If it works, restart Netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds).
+
+## Data collection speed
+
+Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out).
+
+Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector.
+
+## Finding OIDs
+
+Use `snmpwalk`, like this:
+
+```sh
+snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8
+```
+
+- `-t 20` is the timeout in seconds
+- `-v 1` is the SNMP version
+- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs)
+- `-c public` is the SNMP community
+- `10.11.12.8` is the SNMP device
+
+Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector.
+
+## Example: Linksys SRW2024P
+
+This is what I use for my Linksys SRW2024P. It creates:
+
+1. A chart for power consumption (it is a PoE switch)
+2. Two charts for packets received (total packets received and packets received with errors)
+3. One chart for packets output
+4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles.
+
+This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization.
+
+This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds.
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 15,
+ "options": {
+ "timeout": 20000,
+ "version": 1
+ },
+ "charts": {
+ "snmp_switch.power": {
+ "title": "Switch Power Supply",
+ "units": "watts",
+ "type": "line",
+ "priority": 10,
+ "family": "power",
+ "dimensions": {
+ "supply": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "used": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.input": {
+ "title": "Switch Packets Input",
+ "units": "packets/s",
+ "type": "area",
+ "priority": 20,
+ "family": "IP",
+ "dimensions": {
+ "receives": {
+ "oid": ".1.3.6.1.2.1.4.3.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "discards": {
+ "oid": ".1.3.6.1.2.1.4.8.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.input_errors": {
+ "title": "Switch Received Packets with Errors",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 30,
+ "family": "IP",
+ "dimensions": {
+ "bad_header": {
+ "oid": ".1.3.6.1.2.1.4.4.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "bad_address": {
+ "oid": ".1.3.6.1.2.1.4.5.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "unknown_protocol": {
+ "oid": ".1.3.6.1.2.1.4.7.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.output": {
+ "title": "Switch Output Packets",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 40,
+ "family": "IP",
+ "dimensions": {
+ "requests": {
+ "oid": ".1.3.6.1.2.1.4.10.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "discards": {
+ "oid": ".1.3.6.1.2.1.4.11.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "no_route": {
+ "oid": ".1.3.6.1.2.1.4.12.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 100,
+ "family": "ports",
+ "multiply_range": [
+ 1,
+ 24
+ ],
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsnmp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js
new file mode 100644
index 0000000..ca3f0bf
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/snmp.node.js
@@ -0,0 +1,527 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+// netdata snmp module
+// This program will connect to one or more SNMP Agents
+//
+
+// example configuration in /etc/netdata/node.d/snmp.conf
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "max_request_size": 50,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": { "timeout": 10000 },
+ "charts": {
+ "snmp_switch.bandwidth_port1": {
+ "title": "Switch Bandwidth for port 1",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.1",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.1",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.bandwidth_port2": {
+ "title": "Switch Bandwidth for port 2",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.2",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.2",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+*/
+
+// You can also give ranges of charts like the following.
+// This will append 1-24 to id, title, oid (on each dimension)
+// so that 24 charts will be created.
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "max_request_size": 50,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": { "timeout": 20000 },
+ "charts": {
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "multiply_range": [ 1, 24 ],
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+*/
+
+var net_snmp = require('net-snmp');
+var extend = require('extend');
+var netdata = require('netdata');
+
+if (netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin');
+
+netdata.processors.snmp = {
+ name: 'snmp',
+
+ fixoid: function (oid) {
+ if (typeof oid !== 'string')
+ return oid;
+
+ if (oid.charAt(0) === '.')
+ return oid.substring(1, oid.length);
+
+ return oid;
+ },
+
+ prepare: function (service) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if (typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) {
+ // this is the first time we see this service
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs');
+
+ // build an index of all OIDs
+ service.snmp_oids_index = {};
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ while (chart_keys_len--) {
+ var c = chart_keys[chart_keys_len];
+ var chart = service.request.charts[c];
+
+ // for each chart
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c);
+
+ if (typeof chart.titleoid !== 'undefined') {
+ service.snmp_oids_index[this.fixoid(chart.titleoid)] = {
+ type: 'title',
+ link: chart
+ };
+ }
+
+ var dim_keys = Object.keys(chart.dimensions);
+ var dim_keys_len = dim_keys.length;
+ while (dim_keys_len--) {
+ var d = dim_keys[dim_keys_len];
+ var dim = chart.dimensions[d];
+
+ // for each dimension in the chart
+
+ var oid = this.fixoid(dim.oid);
+ var oidname = this.fixoid(dim.oidname);
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname);
+
+ // link it to the point we need to set the value to
+ service.snmp_oids_index[oid] = {
+ type: 'value',
+ link: dim
+ };
+
+ if (typeof oidname !== 'undefined')
+ service.snmp_oids_index[oidname] = {
+ type: 'name',
+ link: dim
+ };
+
+ // and set the value to null
+ dim.value = null;
+ }
+ }
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index));
+
+ // now create the array of OIDs needed by net-snmp
+ service.snmp_oids = Object.keys(service.snmp_oids_index);
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids));
+
+ service.snmp_oids_cleaned = 0;
+ } else if (service.snmp_oids_cleaned === 0) {
+ service.snmp_oids_cleaned = 1;
+
+ // the second time, keep only values
+
+ service.snmp_oids = new Array();
+ var oid_keys = Object.keys(service.snmp_oids_index);
+ var oid_keys_len = oid_keys.length;
+ while (oid_keys_len--) {
+ if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value')
+ service.snmp_oids.push(oid_keys[oid_keys_len]);
+ }
+ }
+ },
+
+ getdata: function (service, index, ok, failed, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+ var that = this;
+
+ if (index >= service.snmp_oids.length) {
+ callback((ok > 0) ? {ok: ok, failed: failed} : null);
+ return;
+ }
+
+ var slice;
+ if (service.snmp_oids.length <= service.request.max_request_size) {
+ slice = service.snmp_oids;
+ index = service.snmp_oids.length;
+ } else if (service.snmp_oids.length - index <= service.request.max_request_size) {
+ slice = service.snmp_oids.slice(index, service.snmp_oids.length);
+ index = service.snmp_oids.length;
+ } else {
+ slice = service.snmp_oids.slice(index, index + service.request.max_request_size);
+ index += service.request.max_request_size;
+ }
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size);
+
+ service.snmp_session.get(slice, function (error, varbinds) {
+ if (error) {
+ service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds));
+
+ // make all values null
+ var len = slice.length;
+ while (len--)
+ service.snmp_oids_index[slice[len]].value = null;
+ } else {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds));
+
+ var varbinds_len = varbinds.length;
+ for (var i = 0; i < varbinds_len; i++) {
+ var value = null;
+
+ if (net_snmp.isVarbindError(varbinds[i])) {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid);
+
+ service.error('OID ' + varbinds[i].oid + ' gave error: ' + net_snmp.varbindError(varbinds[i]));
+ value = null;
+ failed++;
+ } else {
+ // test fom Counter64
+ // varbinds[i].type = net_snmp.ObjectType.Counter64;
+ // varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]);
+
+ switch (varbinds[i].type) {
+ case net_snmp.ObjectType.OctetString:
+ if (service.snmp_oids_index[varbinds[i].oid].type !== 'title' && service.snmp_oids_index[varbinds[i].oid].type !== 'name') {
+ // parse floating point values, exposed as strings
+ value = parseFloat(varbinds[i].value) * 1000;
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
+ } else {
+ // just use the string
+ value = varbinds[i].value;
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
+ }
+ break;
+
+ case net_snmp.ObjectType.Counter64:
+ // copy the buffer
+ value = '0x' + varbinds[i].value.toString('hex');
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
+ break;
+
+ case net_snmp.ObjectType.Integer:
+ case net_snmp.ObjectType.Counter:
+ case net_snmp.ObjectType.Gauge:
+ default:
+ value = varbinds[i].value;
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
+ break;
+ }
+
+ ok++;
+ }
+
+ if (value !== null) {
+ switch (service.snmp_oids_index[varbinds[i].oid].type) {
+ case 'title':
+ service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value;
+ break;
+ case 'name' :
+ service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_');
+ break;
+ case 'value':
+ service.snmp_oids_index[varbinds[i].oid].link.value = value;
+ break;
+ }
+ }
+ }
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values');
+ }
+ that.getdata(service, index, ok, failed, callback);
+ });
+ },
+
+ process: function (service, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ this.prepare(service);
+
+ if (service.snmp_oids.length === 0) {
+ // no OIDs found for this service
+
+ if (__DEBUG === true)
+ service.error('no OIDs to process.');
+
+ callback(null);
+ return;
+ }
+
+ if (typeof service.snmp_session === 'undefined' || service.snmp_session === null) {
+ // no SNMP session has been created for this service
+ // the SNMP session is just the initialization of NET-SNMP
+
+ var snmp_version = (service.request.options && service.request.options.version)
+ ? service.request.options.version
+ : net_snmp.Version1;
+
+ if (snmp_version === net_snmp.Version3) {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' user ' + service.request.user + ' options ' + netdata.stringify(service.request.options));
+
+ // create the SNMP session
+ service.snmp_session = net_snmp.createV3Session(service.request.hostname, service.request.user, service.request.options);
+ } else {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options));
+
+ // create the SNMP session
+ service.snmp_session = net_snmp.createSession(service.request.hostname, service.request.community, service.request.options);
+ }
+
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session));
+
+ // if we later need traps, this is how to do it:
+ //service.snmp_session.trap(net_snmp.TrapType.LinkDown, function(error) {
+ // if(error) console.error('trap error: ' + netdata.stringify(error));
+ //});
+ }
+
+ // do it, get the SNMP values for the sessions we need
+ this.getdata(service, 0, 0, 0, callback);
+ }
+};
+
+var snmp = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 50000,
+
+ charts: {},
+
+ processResponse: function (service, data) {
+ if (data !== null) {
+ if (service.added !== true)
+ service.commit();
+
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ for (var i = 0; i < chart_keys_len; i++) {
+ var c = chart_keys[i];
+
+ var chart = snmp.charts[c];
+ if (typeof chart === 'undefined') {
+ chart = service.chart(c, service.request.charts[c]);
+ snmp.charts[c] = chart;
+ }
+
+ service.begin(chart);
+
+ var dimensions = service.request.charts[c].dimensions;
+ var dim_keys = Object.keys(dimensions);
+ var dim_keys_len = dim_keys.length;
+ for (var j = 0; j < dim_keys_len; j++) {
+ var d = dim_keys[j];
+
+ if (dimensions[d].value !== null) {
+ if (typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number')
+ service.set(d, dimensions[d].value + dimensions[d].offset);
+ else
+ service.set(d, dimensions[d].value);
+ }
+ }
+
+ service.end();
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (conf) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if (__DEBUG === true)
+ netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every);
+
+ var service = netdata.service({
+ name: conf.hostname,
+ request: conf,
+ update_every: conf.update_every,
+ module: this,
+ processor: netdata.processors.snmp
+ });
+
+ // multiply the charts, if required
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ for (var i = 0; i < chart_keys_len; i++) {
+ var c = chart_keys[i];
+ var service_request_chart = service.request.charts[c];
+
+ if (__DEBUG === true)
+ netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c);
+
+ if (typeof service_request_chart.update_every === 'undefined')
+ service_request_chart.update_every = service.update_every;
+
+ if (typeof service_request_chart.multiply_range !== 'undefined') {
+ var from = service_request_chart.multiply_range[0];
+ var to = service_request_chart.multiply_range[1];
+ var prio = service_request_chart.priority || 1;
+
+ if (prio < snmp.base_priority) prio += snmp.base_priority;
+
+ while (from <= to) {
+ var id = c + from.toString();
+ var chart = extend(true, {}, service_request_chart);
+ chart.title += from.toString();
+
+ if (typeof chart.titleoid !== 'undefined')
+ chart.titleoid += from.toString();
+
+ chart.priority = prio++;
+
+ var dim_keys = Object.keys(chart.dimensions);
+ var dim_keys_len = dim_keys.length;
+ for (var j = 0; j < dim_keys_len; j++) {
+ var d = dim_keys[j];
+
+ chart.dimensions[d].oid += from.toString();
+
+ if (typeof chart.dimensions[d].oidname !== 'undefined')
+ chart.dimensions[d].oidname += from.toString();
+ }
+ service.request.charts[id] = chart;
+ from++;
+ }
+
+ delete service.request.charts[c];
+ } else {
+ if (service.request.charts[c].priority < snmp.base_priority)
+ service.request.charts[c].priority += snmp.base_priority;
+ }
+ }
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function (config) {
+ var added = 0;
+
+ if (typeof config.max_request_size === 'undefined')
+ config.max_request_size = 50;
+
+ if (typeof (config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while (len--) {
+ if (typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ if (typeof config.servers[len].max_request_size === 'undefined')
+ config.servers[len].max_request_size = config.max_request_size;
+
+ this.serviceExecute(config.servers[len]);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // service.execute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ }
+};
+
+module.exports = snmp;
diff --git a/collectors/node.d.plugin/stiebeleltron/Makefile.inc b/collectors/node.d.plugin/stiebeleltron/Makefile.inc
new file mode 100644
index 0000000..0c6e1e2
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += stiebeleltron/stiebeleltron.node.js
+# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += stiebeleltron/README.md stiebeleltron/Makefile.inc
+
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
new file mode 100644
index 0000000..59bbf70
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -0,0 +1,525 @@
+<!--
+title: "Stiebel Eltron ISG monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/stiebeleltron/README.md
+sidebar_label: "Stiebel Eltron ISG"
+-->
+
+# Stiebel Eltron ISG monitoring with Netdata
+
+Collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
+
+**Requirements**
+
+- Configuration file `stiebeleltron.conf` in the node.d Netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
+- Stiebel Eltron ISG web with network access (http), without password login
+
+The charts are configurable, however, the provided default configuration collects the following:
+
+1. **General**
+
+ - Outside temperature in C
+ - Condenser temperature in C
+ - Heating circuit pressure in bar
+ - Flow rate in l/min
+ - Output of water and heat pumps in %
+
+2. **Heating**
+
+ - Heat circuit 1 temperature in C (set/actual)
+ - Heat circuit 2 temperature in C (set/actual)
+ - Flow temperature in C (set/actual)
+ - Buffer temperature in C (set/actual)
+ - Pre-flow temperature in C
+
+3. **Hot Water**
+
+ - Hot water temperature in C (set/actual)
+
+4. **Room Temperature**
+
+ - Heat circuit 1 room temperature in C (set/actual)
+ - Heat circuit 2 room temperature in C (set/actual)
+
+5. **Electric Reheating**
+
+ - Dual Mode Reheating temperature in C (hot water/heating)
+
+6. **Process Data**
+
+ - Remaining compressor rest time in s
+
+7. **Runtime**
+
+ - Compressor runtime hours (hot water/heating)
+ - Reheating runtime hours (reheating 1/reheating 2)
+
+8. **Energy**
+
+ - Compressor today in kWh (hot water/heating)
+ - Compressor Total in kWh (hot water/heating)
+
+## configuration
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
+
+---
+
+[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html)
+
+Original author: BrainDoctor (github)
+
+The module supports any metrics that are parsable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
+
+### Testing
+
+This plugin has been tested within the following environment:
+
+- ISG version: 8.5.6
+- MFG version: 12
+- Controller version: 9
+- July (summer time, not much activity)
+- Interface language: English
+- login- and password-less ISG web access (without HTTPS it's useless anyway)
+- Heatpump model: WPL 25 I-2
+- Hot water boiler model: 820 WT 1
+
+So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
+
+In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me.
+
+### How to update the config
+
+- The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
+- The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
+- The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
+ Recommended: [regexr.com](https://regexr.com/) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
+
+The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
+
+### Configuration template
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "pages": [
+ {
+ "name": "System",
+ "id": "system",
+ "url": "http://machine.ip.or.dns/?s=1,0",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "eletricreheating",
+ "name": "electric reheating",
+ "charts": [
+ {
+ "title": "Dual Mode Reheating Temperature",
+ "id": "reheatingtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "dualmodeheatingtemp",
+ "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "dualmodehotwatertemp",
+ "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "roomtemp",
+ "name": "room temperature",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "heating",
+ "name": "heating",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Temperature",
+ "id": "flowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Reheating",
+ "id" : "reheating",
+ "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Buffer Temperature",
+ "id": "buffertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Fixed Temperature",
+ "id": "fixedtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Set",
+ "id" : "setfixed",
+ "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Pre-flow Temperature",
+ "id": "preflowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 6,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actualreturn",
+ "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "hotwater",
+ "name": "hot water",
+ "charts": [
+ {
+ "title": "Hot Water Temperature",
+ "id": "hotwatertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "general",
+ "name": "general",
+ "charts": [
+ {
+ "title": "Outside Temperature",
+ "id": "outside",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Outside temperature",
+ "id": "outsidetemp",
+ "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Condenser Temperature",
+ "id": "condenser",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Condenser",
+ "id": "condenser",
+ "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Heating Circuit Pressure",
+ "id": "heatingcircuit",
+ "unit": "bar",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating Circuit",
+ "id": "heatingcircuit",
+ "digits": 2,
+ "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Rate",
+ "id": "flowrate",
+ "unit": "liters/min",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Flow Rate",
+ "id": "flowrate",
+ "digits": 2,
+ "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Output",
+ "id": "output",
+ "unit": "%",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Heat Pump",
+ "id": "outputheatpump",
+ "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ },
+ {
+ "name": "Water Pump",
+ "id": "intpumprate",
+ "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "name": "Heat Pump",
+ "id": "heatpump",
+ "url": "http://machine.ip.or.dns/?s=1,1",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "runtime",
+ "name": "runtime",
+ "charts": [
+ {
+ "title": "Compressor",
+ "id": "compressor",
+ "unit": "h",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "hotwater",
+ "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Reheating",
+ "id": "reheating",
+ "unit": "h",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Reheating 1",
+ "id": "rh1",
+ "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Reheating 2",
+ "id" : "rh2",
+ "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "processdata",
+ "name": "process data",
+ "charts": [
+ {
+ "title": "Remaining Compressor Rest Time",
+ "id": "remaincomp",
+ "unit": "s",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Timer",
+ "id": "timer",
+ "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "energy",
+ "name": "energy",
+ "charts": [
+ {
+ "title": "Compressor Today",
+ "id": "compressorday",
+ "unit": "kWh",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Compressor Total",
+ "id": "compressortotal",
+ "unit": "MWh",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fstiebeleltron%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
new file mode 100644
index 0000000..250c265
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
@@ -0,0 +1,197 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one Stiebel Eltron ISG for heatpump heating
+// to get the heat pump metrics.
+
+// example configuration in netdata/conf.d/node.d/stiebeleltron.conf.md
+
+require("url");
+require("http");
+var netdata = require("netdata");
+
+netdata.debug("loaded " + __filename + " plugin");
+
+var stiebeleltron = {
+ name: "Stiebel Eltron",
+ enable_autodetect: false,
+ update_every: 10,
+ base_priority: 60000,
+ charts: {},
+ pages: {},
+
+ createBasicDimension: function (id, name, multiplier, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: multiplier, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ processResponse: function (service, html) {
+ if (html === null) return;
+
+ // add the service
+ service.commit();
+
+ var page = stiebeleltron.pages[service.name];
+ var categories = page.categories;
+ var categoriesCount = categories.length;
+ while (categoriesCount--) {
+ var context = {
+ html: html,
+ service: service,
+ category: categories[categoriesCount],
+ page: page,
+ chartDefinition: null,
+ dimension: null
+ };
+ stiebeleltron.processCategory(context);
+
+ }
+ },
+
+ processCategory: function (context) {
+ var charts = context.category.charts;
+ var chartCount = charts.length;
+ while (chartCount--) {
+ context.chartDefinition = charts[chartCount];
+ stiebeleltron.processChart(context);
+ }
+ },
+
+ processChart: function (context) {
+ var dimensions = context.chartDefinition.dimensions;
+ var dimensionCount = dimensions.length;
+ context.service.begin(stiebeleltron.getChartFromContext(context));
+
+ while (dimensionCount--) {
+ context.dimension = dimensions[dimensionCount];
+ stiebeleltron.processDimension(context);
+ }
+ context.service.end();
+ },
+
+ processDimension: function (context) {
+ var dimension = context.dimension;
+ var match = new RegExp(dimension.regex).exec(context.html);
+ if (match === null) return;
+ var value = match[1].replace(",", ".");
+ // most values have a single digit by default, which requires the values to be multiplied. can be overridden.
+ if (stiebeleltron.isDefined(dimension.digits)) {
+ value *= Math.pow(10, dimension.digits);
+ } else {
+ value *= 10;
+ }
+ context.service.set(stiebeleltron.getDimensionId(context), value);
+ },
+
+ getChartFromContext: function (context) {
+ var chartId = this.getChartId(context);
+ var chart = stiebeleltron.charts[chartId];
+ if (stiebeleltron.isDefined(chart)) return chart;
+
+ var chartDefinition = context.chartDefinition;
+ var service = context.service;
+ var dimensions = {};
+
+ var dimCount = chartDefinition.dimensions.length;
+ while (dimCount--) {
+ var dim = chartDefinition.dimensions[dimCount];
+ var multiplier = 1;
+ var divisor = 10;
+ if (stiebeleltron.isDefined(dim.digits)) divisor = Math.pow(10, Math.max(0, dim.digits));
+ if (stiebeleltron.isDefined(dim.multiplier)) multiplier = dim.multiplier;
+ if (stiebeleltron.isDefined(dim.divisor)) divisor = dim.divisor;
+ context.dimension = dim;
+ var dimId = this.getDimensionId(context);
+ dimensions[dimId] = this.createBasicDimension(dimId, dim.name, multiplier, divisor);
+ }
+
+ chart = {
+ id: chartId,
+ name: '',
+ title: chartDefinition.title,
+ units: chartDefinition.unit,
+ family: context.category.name,
+ context: 'stiebeleltron.' + context.category.id + '.' + chartDefinition.id,
+ type: chartDefinition.type,
+ priority: stiebeleltron.base_priority + chartDefinition.prio,// the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dimensions
+ };
+ chart = service.chart(chartId, chart);
+ stiebeleltron.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL(uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (stiebeleltron.isUndefined(config.pages)) return 0;
+ var added = 0;
+ var pageCount = config.pages.length;
+ while (pageCount--) {
+ var page = config.pages[pageCount];
+ // some validation
+ if (stiebeleltron.isUndefined(page.categories) || page.categories.length < 1) {
+ netdata.error("Your Stiebel Eltron config is invalid. Disabling plugin.");
+ return 0;
+ }
+ if (stiebeleltron.isUndefined(page.update_every)) page.update_every = this.update_every;
+ this.pages[page.name] = page;
+ this.serviceExecute(page.name, page.url, page.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ getChartId: function (context) {
+ return "stiebeleltron_" + context.page.id +
+ "." + context.category.id +
+ "." + context.chartDefinition.id;
+ },
+
+ getDimensionId: function (context) {
+ return context.dimension.id;
+ },
+
+ isUndefined: function (value) {
+ return typeof value === 'undefined';
+ },
+
+ isDefined: function (value) {
+ return typeof value !== 'undefined';
+ }
+};
+
+module.exports = stiebeleltron;
diff --git a/collectors/perf.plugin/Makefile.am b/collectors/perf.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/perf.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md
new file mode 100644
index 0000000..ccd185c
--- /dev/null
+++ b/collectors/perf.plugin/README.md
@@ -0,0 +1,80 @@
+<!--
+title: "perf.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/perf.plugin/README.md
+-->
+
+# perf.plugin
+
+`perf.plugin` collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using
+the `perf_event_open()` system call.
+
+## Important Notes
+
+Accessing hardware PMUs requires root permissions, so the plugin is setuid to root.
+
+Keep in mind that the number of PMUs in a system is usually quite limited and every hardware monitoring
+event for every CPU core needs a separate file descriptor to be opened.
+
+## Charts
+
+The plugin provides statistics for general hardware and software performance monitoring events:
+
+Hardware events:
+
+1. CPU cycles
+2. Instructions
+3. Branch instructions
+4. Cache operations
+5. BUS cycles
+6. Stalled frontend and backend cycles
+
+Software events:
+
+1. CPU migrations
+2. Alignment faults
+3. Emulation faults
+
+Hardware cache events:
+
+1. L1D cache operations
+2. L1D prefetch cache operations
+3. L1I cache operations
+4. LL cache operations
+5. DTLB cache operations
+6. ITLB cache operations
+7. PBU cache operations
+
+## Configuration
+
+The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to
+allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software. If you need to
+enable the perf plugin, edit /etc/netdata/netdata.conf and set:
+
+```raw
+[plugins]
+ perf = yes
+```
+
+```raw
+[plugin:perf]
+ update every = 1
+ command options = all
+```
+
+You can use the `command options` parameter to pick what data should be collected and which charts should be
+displayed. If `all` is used, all general performance monitoring counters are probed and corresponding charts
+are enabled for the available counters. You can also define a particular set of enabled charts using the
+following keywords: `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`,
+`emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`.
+
+## Debugging
+
+You can run the plugin by hand:
+
+```raw
+sudo /usr/libexec/netdata/plugins.d/perf.plugin 1 all debug
+```
+
+You will get verbose output on what the plugin does.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fperf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/perf.plugin/perf_plugin.c b/collectors/perf.plugin/perf_plugin.c
new file mode 100644
index 0000000..9fe3c5e
--- /dev/null
+++ b/collectors/perf.plugin/perf_plugin.c
@@ -0,0 +1,1348 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../libnetdata/libnetdata.h"
+
+#include <linux/perf_event.h>
+
+#define PLUGIN_PERF_NAME "perf.plugin"
+
+// Hardware counters
+#define NETDATA_CHART_PRIO_PERF_CPU_CYCLES 8800
+#define NETDATA_CHART_PRIO_PERF_INSTRUCTIONS 8801
+#define NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUSTIONS 8802
+#define NETDATA_CHART_PRIO_PERF_CACHE 8803
+#define NETDATA_CHART_PRIO_PERF_BUS_CYCLES 8804
+#define NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES 8805
+
+// Software counters
+#define NETDATA_CHART_PRIO_PERF_MIGRATIONS 8810
+#define NETDATA_CHART_PRIO_PERF_ALIGNMENT 8811
+#define NETDATA_CHART_PRIO_PERF_EMULATION 8812
+
+// Hardware cache counters
+#define NETDATA_CHART_PRIO_PERF_L1D 8820
+#define NETDATA_CHART_PRIO_PERF_L1D_PREFETCH 8821
+#define NETDATA_CHART_PRIO_PERF_L1I 8822
+#define NETDATA_CHART_PRIO_PERF_LL 8823
+#define NETDATA_CHART_PRIO_PERF_DTLB 8824
+#define NETDATA_CHART_PRIO_PERF_ITLB 8825
+#define NETDATA_CHART_PRIO_PERF_PBU 8826
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+#define RRD_TYPE_PERF "perf"
+#define RRD_FAMILY_HW "hardware"
+#define RRD_FAMILY_SW "software"
+#define RRD_FAMILY_CACHE "cache"
+
+#define NO_FD -1
+#define ALL_PIDS -1
+#define RUNNING_THRESHOLD 100
+
+static int debug = 0;
+
+static int update_every = 1;
+static int freq = 0;
+
+typedef enum perf_event_id {
+ // Hardware counters
+ EV_ID_CPU_CYCLES,
+ EV_ID_INSTRUCTIONS,
+ EV_ID_CACHE_REFERENCES,
+ EV_ID_CACHE_MISSES,
+ EV_ID_BRANCH_INSTRUCTIONS,
+ EV_ID_BRANCH_MISSES,
+ EV_ID_BUS_CYCLES,
+ EV_ID_STALLED_CYCLES_FRONTEND,
+ EV_ID_STALLED_CYCLES_BACKEND,
+ EV_ID_REF_CPU_CYCLES,
+
+ // Software counters
+ // EV_ID_CPU_CLOCK,
+ // EV_ID_TASK_CLOCK,
+ // EV_ID_PAGE_FAULTS,
+ // EV_ID_CONTEXT_SWITCHES,
+ EV_ID_CPU_MIGRATIONS,
+ // EV_ID_PAGE_FAULTS_MIN,
+ // EV_ID_PAGE_FAULTS_MAJ,
+ EV_ID_ALIGNMENT_FAULTS,
+ EV_ID_EMULATION_FAULTS,
+
+ // Hardware cache counters
+ EV_ID_L1D_READ_ACCESS,
+ EV_ID_L1D_READ_MISS,
+ EV_ID_L1D_WRITE_ACCESS,
+ EV_ID_L1D_WRITE_MISS,
+ EV_ID_L1D_PREFETCH_ACCESS,
+
+ EV_ID_L1I_READ_ACCESS,
+ EV_ID_L1I_READ_MISS,
+
+ EV_ID_LL_READ_ACCESS,
+ EV_ID_LL_READ_MISS,
+ EV_ID_LL_WRITE_ACCESS,
+ EV_ID_LL_WRITE_MISS,
+
+ EV_ID_DTLB_READ_ACCESS,
+ EV_ID_DTLB_READ_MISS,
+ EV_ID_DTLB_WRITE_ACCESS,
+ EV_ID_DTLB_WRITE_MISS,
+
+ EV_ID_ITLB_READ_ACCESS,
+ EV_ID_ITLB_READ_MISS,
+
+ EV_ID_PBU_READ_ACCESS,
+
+ EV_ID_END
+} perf_event_id_t;
+
+enum perf_event_group {
+ EV_GROUP_CYCLES,
+ EV_GROUP_INSTRUCTIONS_AND_CACHE,
+ EV_GROUP_SOFTWARE,
+ EV_GROUP_CACHE_L1D,
+ EV_GROUP_CACHE_L1I_LL_DTLB,
+ EV_GROUP_CACHE_ITLB_BPU,
+
+ EV_GROUP_NUM
+};
+
+static int number_of_cpus;
+
+static int *group_leader_fds[EV_GROUP_NUM];
+
+static struct perf_event {
+ perf_event_id_t id;
+
+ int type;
+ int config;
+
+ int **group_leader_fd;
+ int *fd;
+
+ int disabled;
+ int updated;
+
+ uint64_t value;
+
+ uint64_t *prev_value;
+ uint64_t *prev_time_enabled;
+ uint64_t *prev_time_running;
+} perf_events[] = {
+ // Hardware counters
+ {EV_ID_CPU_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_INSTRUCTIONS, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_CACHE_REFERENCES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_CACHE_MISSES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_BRANCH_INSTRUCTIONS, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_BRANCH_MISSES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_BUS_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_STALLED_CYCLES_FRONTEND, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_STALLED_CYCLES_BACKEND, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
+
+ // Software counters
+ // {EV_ID_CPU_CLOCK, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ // {EV_ID_TASK_CLOCK, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ // {EV_ID_PAGE_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ // {EV_ID_CONTEXT_SWITCHES, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_CPU_MIGRATIONS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ // {EV_ID_PAGE_FAULTS_MIN, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ // {EV_ID_PAGE_FAULTS_MAJ, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_ALIGNMENT_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+ {EV_ID_EMULATION_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
+
+ // Hardware cache counters
+ {
+ EV_ID_L1D_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_L1D_READ_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_L1D_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_L1D_WRITE_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_L1D_PREFETCH_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {
+ EV_ID_L1I_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1I) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_L1I_READ_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_L1I) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {
+ EV_ID_LL_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_LL_READ_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_LL_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_LL_WRITE_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {
+ EV_ID_DTLB_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_DTLB_READ_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_DTLB_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_DTLB_WRITE_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {
+ EV_ID_ITLB_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_ITLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
+ }, {
+ EV_ID_ITLB_READ_MISS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_ITLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {
+ EV_ID_PBU_READ_ACCESS, PERF_TYPE_HW_CACHE,
+ (PERF_COUNT_HW_CACHE_BPU) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
+ },
+
+ {EV_ID_END, 0, 0, NULL, NULL, 0, 0, 0, NULL, NULL, NULL}
+};
+
+static int perf_init() {
+ int cpu, group;
+ struct perf_event_attr perf_event_attr;
+ struct perf_event *current_event = NULL;
+ unsigned long flags = 0;
+
+ number_of_cpus = (int)get_system_cpus();
+
+ // initialize all perf event file descriptors
+ for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
+ current_event->fd = mallocz(number_of_cpus * sizeof(int));
+ memset(current_event->fd, NO_FD, number_of_cpus * sizeof(int));
+
+ current_event->prev_value = mallocz(number_of_cpus * sizeof(uint64_t));
+ memset(current_event->prev_value, 0, number_of_cpus * sizeof(uint64_t));
+
+ current_event->prev_time_enabled = mallocz(number_of_cpus * sizeof(uint64_t));
+ memset(current_event->prev_time_enabled, 0, number_of_cpus * sizeof(uint64_t));
+
+ current_event->prev_time_running = mallocz(number_of_cpus * sizeof(uint64_t));
+ memset(current_event->prev_time_running, 0, number_of_cpus * sizeof(uint64_t));
+ }
+
+ for(group = 0; group < EV_GROUP_NUM; group++) {
+ group_leader_fds[group] = mallocz(number_of_cpus * sizeof(int));
+ memset(group_leader_fds[group], NO_FD, number_of_cpus * sizeof(int));
+ }
+
+ memset(&perf_event_attr, 0, sizeof(perf_event_attr));
+
+ for(cpu = 0; cpu < number_of_cpus; cpu++) {
+ for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
+ if(unlikely(current_event->disabled)) continue;
+
+ perf_event_attr.type = current_event->type;
+ perf_event_attr.config = current_event->config;
+ perf_event_attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ int fd, group_leader_fd = *(*current_event->group_leader_fd + cpu);
+
+ fd = syscall(
+ __NR_perf_event_open,
+ &perf_event_attr,
+ ALL_PIDS,
+ cpu,
+ group_leader_fd,
+ flags
+ );
+
+ if(unlikely(group_leader_fd == NO_FD)) group_leader_fd = fd;
+
+ if(unlikely(fd < 0)) {
+ switch errno {
+ case EACCES:
+ error("Cannot access to the PMU: Permission denied");
+ break;
+ case EBUSY:
+ error("Another event already has exclusive access to the PMU");
+ break;
+ default:
+ error("Cannot open perf event");
+ }
+ error("Disabling event %u", current_event->id);
+ current_event->disabled = 1;
+ }
+
+ *(current_event->fd + cpu) = fd;
+ *(*current_event->group_leader_fd + cpu) = group_leader_fd;
+
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: event id = %u, cpu = %d, fd = %d, leader_fd = %d\n", current_event->id, cpu, fd, group_leader_fd);
+ }
+ }
+
+ return 0;
+}
+
+static void perf_free(void) {
+ int cpu, group;
+ struct perf_event *current_event = NULL;
+
+ for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
+ for(cpu = 0; cpu < number_of_cpus; cpu++)
+ if(*(current_event->fd + cpu) != NO_FD) close(*(current_event->fd + cpu));
+
+ free(current_event->fd);
+ free(current_event->prev_value);
+ free(current_event->prev_time_enabled);
+ free(current_event->prev_time_running);
+ }
+
+ for(group = 0; group < EV_GROUP_NUM; group++)
+ free(group_leader_fds[group]);
+}
+
+static void reenable_events() {
+ int group, cpu;
+
+ for(group = 0; group < EV_GROUP_NUM; group++) {
+ for(cpu = 0; cpu < number_of_cpus; cpu++) {
+ int current_fd = *(group_leader_fds[group] + cpu);
+
+ if(unlikely(current_fd == NO_FD)) continue;
+
+ if(ioctl(current_fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP) == -1
+ || ioctl(current_fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP) == -1)
+ {
+ error("Cannot reenable event group");
+ }
+ }
+ }
+}
+
+static int perf_collect() {
+ int cpu;
+ struct perf_event *current_event = NULL;
+ static uint64_t prev_cpu_cycles_value = 0;
+ struct {
+ uint64_t value;
+ uint64_t time_enabled;
+ uint64_t time_running;
+ } read_result;
+
+ for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
+ current_event->updated = 0;
+ current_event->value = 0;
+
+ if(unlikely(current_event->disabled)) continue;
+
+ for(cpu = 0; cpu < number_of_cpus; cpu++) {
+
+ ssize_t read_size = read(current_event->fd[cpu], &read_result, sizeof(read_result));
+
+ if(likely(read_size == sizeof(read_result))) {
+ if (likely(read_result.time_running
+ && read_result.time_running != *(current_event->prev_time_running + cpu)
+ && (read_result.time_enabled / read_result.time_running < RUNNING_THRESHOLD))) {
+ current_event->value += (read_result.value - *(current_event->prev_value + cpu)) \
+ * (read_result.time_enabled - *(current_event->prev_time_enabled + cpu)) \
+ / (read_result.time_running - *(current_event->prev_time_running + cpu));
+ }
+
+ *(current_event->prev_value + cpu) = read_result.value;
+ *(current_event->prev_time_enabled + cpu) = read_result.time_enabled;
+ *(current_event->prev_time_running + cpu) = read_result.time_running;
+
+ current_event->updated = 1;
+ }
+ else {
+ error("Cannot update value for event %u", current_event->id);
+ return 1;
+ }
+ }
+
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: successfully read event id = %u, value = %"PRIu64"\n", current_event->id, current_event->value);
+ }
+
+ if(unlikely(perf_events[EV_ID_CPU_CYCLES].value == prev_cpu_cycles_value))
+ reenable_events();
+ prev_cpu_cycles_value = perf_events[EV_ID_CPU_CYCLES].value;
+
+ return 0;
+}
+
+static void perf_send_metrics() {
+ static int // Hardware counters
+ cpu_cycles_chart_generated = 0,
+ instructions_chart_generated = 0,
+ branch_chart_generated = 0,
+ cache_chart_generated = 0,
+ bus_cycles_chart_generated = 0,
+ stalled_cycles_chart_generated = 0,
+
+ // Software counters
+ migrations_chart_generated = 0,
+ alighnment_chart_generated = 0,
+ emulation_chart_generated = 0,
+
+ // Hardware cache counters
+ L1D_chart_generated = 0,
+ L1D_prefetch_chart_generated = 0,
+ L1I_chart_generated = 0,
+ LL_chart_generated = 0,
+ DTLB_chart_generated = 0,
+ ITLB_chart_generated = 0,
+ PBU_chart_generated = 0;
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_CPU_CYCLES].updated || perf_events[EV_ID_REF_CPU_CYCLES].updated)) {
+ if(unlikely(!cpu_cycles_chart_generated)) {
+ cpu_cycles_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'CPU cycles' 'cycles/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "cpu_cycles"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_CPU_CYCLES
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "cpu");
+ printf("DIMENSION %s '' absolute 1 1\n", "ref_cpu");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "cpu_cycles"
+ );
+ if(likely(perf_events[EV_ID_CPU_CYCLES].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "cpu"
+ , (collected_number) perf_events[EV_ID_CPU_CYCLES].value
+ );
+ }
+ if(likely(perf_events[EV_ID_REF_CPU_CYCLES].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "ref_cpu"
+ , (collected_number) perf_events[EV_ID_REF_CPU_CYCLES].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_INSTRUCTIONS].updated)) {
+ if(unlikely(!instructions_chart_generated)) {
+ instructions_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Instructions' 'instructions/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "instructions"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_INSTRUCTIONS
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "instructions");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "instructions"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "instructions"
+ , (collected_number) perf_events[EV_ID_INSTRUCTIONS].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_BRANCH_INSTRUCTIONS].updated || perf_events[EV_ID_BRANCH_MISSES].updated)) {
+ if(unlikely(!branch_chart_generated)) {
+ branch_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Branch instructions' 'instructions/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "branch_instructions"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUSTIONS
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "instructions");
+ printf("DIMENSION %s '' absolute 1 1\n", "misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "branch_instructions"
+ );
+ if(likely(perf_events[EV_ID_BRANCH_INSTRUCTIONS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "instructions"
+ , (collected_number) perf_events[EV_ID_BRANCH_INSTRUCTIONS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_BRANCH_MISSES].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "misses"
+ , (collected_number) perf_events[EV_ID_BRANCH_MISSES].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_CACHE_REFERENCES].updated || perf_events[EV_ID_CACHE_MISSES].updated)) {
+ if(unlikely(!cache_chart_generated)) {
+ cache_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Cache operations' 'operations/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "cache"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_CACHE
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "references");
+ printf("DIMENSION %s '' absolute 1 1\n", "misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "cache"
+ );
+ if(likely(perf_events[EV_ID_CACHE_REFERENCES].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "references"
+ , (collected_number) perf_events[EV_ID_CACHE_REFERENCES].value
+ );
+ }
+ if(likely(perf_events[EV_ID_CACHE_MISSES].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "misses"
+ , (collected_number) perf_events[EV_ID_CACHE_MISSES].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_BUS_CYCLES].updated)) {
+ if(unlikely(!bus_cycles_chart_generated)) {
+ bus_cycles_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Bus cycles' 'cycles/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "bus_cycles"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_BUS_CYCLES
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "bus");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "bus_cycles"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "bus"
+ , (collected_number) perf_events[EV_ID_BUS_CYCLES].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_STALLED_CYCLES_FRONTEND].updated || perf_events[EV_ID_STALLED_CYCLES_BACKEND].updated)) {
+ if(unlikely(!stalled_cycles_chart_generated)) {
+ stalled_cycles_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Stalled frontend and backend cycles' 'cycles/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "stalled_cycles"
+ , RRD_FAMILY_HW
+ , NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "frontend");
+ printf("DIMENSION %s '' absolute 1 1\n", "backend");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "stalled_cycles"
+ );
+ if(likely(perf_events[EV_ID_STALLED_CYCLES_FRONTEND].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "frontend"
+ , (collected_number) perf_events[EV_ID_STALLED_CYCLES_FRONTEND].value
+ );
+ }
+ if(likely(perf_events[EV_ID_STALLED_CYCLES_BACKEND].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "backend"
+ , (collected_number) perf_events[EV_ID_STALLED_CYCLES_BACKEND].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_CPU_MIGRATIONS].updated)) {
+ if(unlikely(!migrations_chart_generated)) {
+ migrations_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'CPU migrations' 'migrations' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "migrations"
+ , RRD_FAMILY_SW
+ , NETDATA_CHART_PRIO_PERF_MIGRATIONS
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "migrations");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "migrations"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "migrations"
+ , (collected_number) perf_events[EV_ID_CPU_MIGRATIONS].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_ALIGNMENT_FAULTS].updated)) {
+ if(unlikely(!alighnment_chart_generated)) {
+ alighnment_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Alighnment faults' 'faults' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "alighnment_faults"
+ , RRD_FAMILY_SW
+ , NETDATA_CHART_PRIO_PERF_ALIGNMENT
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "faults");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "alighnment_faults"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "faults"
+ , (collected_number) perf_events[EV_ID_ALIGNMENT_FAULTS].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_EMULATION_FAULTS].updated)) {
+ if(unlikely(!emulation_chart_generated)) {
+ emulation_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'Emulation faults' 'faults' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "emulation_faults"
+ , RRD_FAMILY_SW
+ , NETDATA_CHART_PRIO_PERF_EMULATION
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "faults");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "emulation_faults"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "faults"
+ , (collected_number) perf_events[EV_ID_EMULATION_FAULTS].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_L1D_READ_ACCESS].updated || perf_events[EV_ID_L1D_READ_MISS].updated
+ || perf_events[EV_ID_L1D_WRITE_ACCESS].updated || perf_events[EV_ID_L1D_WRITE_MISS].updated)) {
+ if(unlikely(!L1D_chart_generated)) {
+ L1D_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'L1D cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "l1d_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_L1D
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_access");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "l1d_cache"
+ );
+ if(likely(perf_events[EV_ID_L1D_READ_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_L1D_READ_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_L1D_READ_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_misses"
+ , (collected_number) perf_events[EV_ID_L1D_READ_MISS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_L1D_WRITE_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_access"
+ , (collected_number) perf_events[EV_ID_L1D_WRITE_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_L1D_WRITE_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_misses"
+ , (collected_number) perf_events[EV_ID_L1D_WRITE_MISS].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_L1D_PREFETCH_ACCESS].updated)) {
+ if(unlikely(!L1D_prefetch_chart_generated)) {
+ L1D_prefetch_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'L1D prefetch cache operations' 'prefetches/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "l1d_cache_prefetch"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_L1D_PREFETCH
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "prefetches");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "l1d_cache_prefetch"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "prefetches"
+ , (collected_number) perf_events[EV_ID_L1D_PREFETCH_ACCESS].value
+ );
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_L1I_READ_ACCESS].updated || perf_events[EV_ID_L1I_READ_MISS].updated)) {
+ if(unlikely(!L1I_chart_generated)) {
+ L1I_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'L1I cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "l1i_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_L1I
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "l1i_cache"
+ );
+ if(likely(perf_events[EV_ID_L1I_READ_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_L1I_READ_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_L1I_READ_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_misses"
+ , (collected_number) perf_events[EV_ID_L1I_READ_MISS].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_LL_READ_ACCESS].updated || perf_events[EV_ID_LL_READ_MISS].updated
+ || perf_events[EV_ID_LL_WRITE_ACCESS].updated || perf_events[EV_ID_LL_WRITE_MISS].updated)) {
+ if(unlikely(!LL_chart_generated)) {
+ LL_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'LL cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "ll_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_LL
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_access");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "ll_cache"
+ );
+ if(likely(perf_events[EV_ID_LL_READ_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_LL_READ_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_LL_READ_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_misses"
+ , (collected_number) perf_events[EV_ID_LL_READ_MISS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_LL_WRITE_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_access"
+ , (collected_number) perf_events[EV_ID_LL_WRITE_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_LL_WRITE_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_misses"
+ , (collected_number) perf_events[EV_ID_LL_WRITE_MISS].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_DTLB_READ_ACCESS].updated || perf_events[EV_ID_DTLB_READ_MISS].updated
+ || perf_events[EV_ID_DTLB_WRITE_ACCESS].updated || perf_events[EV_ID_DTLB_WRITE_MISS].updated)) {
+ if(unlikely(!DTLB_chart_generated)) {
+ DTLB_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'DTLB cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "dtlb_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_DTLB
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_access");
+ printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "dtlb_cache"
+ );
+ if(likely(perf_events[EV_ID_DTLB_READ_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_DTLB_READ_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_DTLB_READ_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_misses"
+ , (collected_number) perf_events[EV_ID_DTLB_READ_MISS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_DTLB_WRITE_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_access"
+ , (collected_number) perf_events[EV_ID_DTLB_WRITE_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_DTLB_WRITE_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "write_misses"
+ , (collected_number) perf_events[EV_ID_DTLB_WRITE_MISS].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_ITLB_READ_ACCESS].updated || perf_events[EV_ID_ITLB_READ_MISS].updated)) {
+ if(unlikely(!ITLB_chart_generated)) {
+ ITLB_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'ITLB cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "itlb_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_ITLB
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "itlb_cache"
+ );
+ if(likely(perf_events[EV_ID_ITLB_READ_ACCESS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_ITLB_READ_ACCESS].value
+ );
+ }
+ if(likely(perf_events[EV_ID_ITLB_READ_MISS].updated)) {
+ printf(
+ "SET %s = %lld\n"
+ , "read_misses"
+ , (collected_number) perf_events[EV_ID_ITLB_READ_MISS].value
+ );
+ }
+ printf("END\n");
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(likely(perf_events[EV_ID_PBU_READ_ACCESS].updated)) {
+ if(unlikely(!PBU_chart_generated)) {
+ PBU_chart_generated = 1;
+
+ printf("CHART %s.%s '' 'PBU cache operations' 'events/s' %s '' line %d %d %s\n"
+ , RRD_TYPE_PERF
+ , "pbu_cache"
+ , RRD_FAMILY_CACHE
+ , NETDATA_CHART_PRIO_PERF_PBU
+ , update_every
+ , PLUGIN_PERF_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 1\n", "read_access");
+ }
+
+ printf(
+ "BEGIN %s.%s\n"
+ , RRD_TYPE_PERF
+ , "pbu_cache"
+ );
+ printf(
+ "SET %s = %lld\n"
+ , "read_access"
+ , (collected_number) perf_events[EV_ID_PBU_READ_ACCESS].value
+ );
+ printf("END\n");
+ }
+}
+
+void parse_command_line(int argc, char **argv) {
+ int i, plugin_enabled = 0;
+
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("perf.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("all", argv[i]) == 0) {
+ struct perf_event *current_event = NULL;
+
+ for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++)
+ current_event->disabled = 0;
+
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("cycles", argv[i]) == 0) {
+ perf_events[EV_ID_CPU_CYCLES].disabled = 0;
+ perf_events[EV_ID_REF_CPU_CYCLES].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("instructions", argv[i]) == 0) {
+ perf_events[EV_ID_INSTRUCTIONS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("branch", argv[i]) == 0) {
+ perf_events[EV_ID_BRANCH_INSTRUCTIONS].disabled = 0;
+ perf_events[EV_ID_BRANCH_MISSES].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("cache", argv[i]) == 0) {
+ perf_events[EV_ID_CACHE_REFERENCES].disabled = 0;
+ perf_events[EV_ID_CACHE_MISSES].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("bus", argv[i]) == 0) {
+ perf_events[EV_ID_BUS_CYCLES].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("stalled", argv[i]) == 0) {
+ perf_events[EV_ID_STALLED_CYCLES_FRONTEND].disabled = 0;
+ perf_events[EV_ID_STALLED_CYCLES_BACKEND].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("migrations", argv[i]) == 0) {
+ perf_events[EV_ID_CPU_MIGRATIONS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("alighnment", argv[i]) == 0) {
+ perf_events[EV_ID_ALIGNMENT_FAULTS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("emulation", argv[i]) == 0) {
+ perf_events[EV_ID_EMULATION_FAULTS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("L1D", argv[i]) == 0) {
+ perf_events[EV_ID_L1D_READ_ACCESS].disabled = 0;
+ perf_events[EV_ID_L1D_READ_MISS].disabled = 0;
+ perf_events[EV_ID_L1D_WRITE_ACCESS].disabled = 0;
+ perf_events[EV_ID_L1D_WRITE_MISS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("L1D-prefetch", argv[i]) == 0) {
+ perf_events[EV_ID_L1D_PREFETCH_ACCESS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("L1I", argv[i]) == 0) {
+ perf_events[EV_ID_L1I_READ_ACCESS].disabled = 0;
+ perf_events[EV_ID_L1I_READ_MISS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("LL", argv[i]) == 0) {
+ perf_events[EV_ID_LL_READ_ACCESS].disabled = 0;
+ perf_events[EV_ID_LL_READ_MISS].disabled = 0;
+ perf_events[EV_ID_LL_WRITE_ACCESS].disabled = 0;
+ perf_events[EV_ID_LL_WRITE_MISS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("DTLB", argv[i]) == 0) {
+ perf_events[EV_ID_DTLB_READ_ACCESS].disabled = 0;
+ perf_events[EV_ID_DTLB_READ_MISS].disabled = 0;
+ perf_events[EV_ID_DTLB_WRITE_ACCESS].disabled = 0;
+ perf_events[EV_ID_DTLB_WRITE_MISS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("ITLB", argv[i]) == 0) {
+ perf_events[EV_ID_ITLB_READ_ACCESS].disabled = 0;
+ perf_events[EV_ID_ITLB_READ_MISS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("PBU", argv[i]) == 0) {
+ perf_events[EV_ID_PBU_READ_ACCESS].disabled = 0;
+ plugin_enabled = 1;
+ continue;
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata perf.plugin %s\n"
+ " Copyright (C) 2019 Netdata Inc.\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ " minimum: %d\n"
+ "\n"
+ " all enable all charts\n"
+ "\n"
+ " cycles enable CPU cycles chart\n"
+ "\n"
+ " instructions enable Instructions chart\n"
+ "\n"
+ " branch enable Branch instructions chart\n"
+ "\n"
+ " cache enable Cache operations chart\n"
+ "\n"
+ " bus enable Bus cycles chart\n"
+ "\n"
+ " stalled enable Stalled frontend and backend cycles chart\n"
+ "\n"
+ " migrations enable CPU migrations chart\n"
+ "\n"
+ " alighnment enable Alignment faults chart\n"
+ "\n"
+ " emulation enable Emulation faults chart\n"
+ "\n"
+ " L1D enable L1D cache operations chart\n"
+ "\n"
+ " L1D-prefetch enable L1D prefetch cache operations chart\n"
+ "\n"
+ " L1I enable L1I cache operations chart\n"
+ "\n"
+ " LL enable LL cache operations chart\n"
+ "\n"
+ " DTLB enable DTLB cache operations chart\n"
+ "\n"
+ " ITLB enable ITLB cache operations chart\n"
+ "\n"
+ " PBU enable PBU cache operations chart\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " --version print version and exit\n"
+ "\n"
+ " -h\n"
+ " --help print this message and exit\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/netdata/netdata/tree/master/collectors/perf.plugin\n"
+ "\n"
+ , VERSION
+ , update_every
+ );
+ exit(1);
+ }
+
+ error("ignoring parameter '%s'", argv[i]);
+ }
+
+ if(!plugin_enabled){
+ info("no charts enabled - nothing to do.");
+ printf("DISABLE\n");
+ exit(1);
+ }
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "perf.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ parse_command_line(argc, argv);
+
+ errno = 0;
+
+ if(freq >= update_every)
+ update_every = freq;
+ else if(freq)
+ error("update frequency %d seconds is too small for PERF. Using %d.", freq, update_every);
+
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_init()\n");
+ int perf = !perf_init();
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration;
+ usec_t step = update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+ if(unlikely(debug && iteration))
+ fprintf(stderr, "perf.plugin: iteration %zu, dt %llu usec\n"
+ , iteration
+ , dt
+ );
+
+ if(likely(perf)) {
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_collect()\n");
+ perf = !perf_collect();
+
+ if(likely(perf)) {
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n");
+ perf_send_metrics();
+ }
+ }
+
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) break;
+ }
+
+ info("process exiting");
+ perf_free();
+}
diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am
new file mode 100644
index 0000000..59250a9
--- /dev/null
+++ b/collectors/plugins.d/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
new file mode 100644
index 0000000..c166e11
--- /dev/null
+++ b/collectors/plugins.d/README.md
@@ -0,0 +1,484 @@
+<!--
+title: "External plugins overview"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/plugins.d/README.md
+-->
+
+# External plugins overview
+
+`plugins.d` is the Netdata internal plugin that collects metrics
+from external processes, thus allowing Netdata to use **external plugins**.
+
+## Provided External Plugins
+
+|plugin|language|O/S|description|
+|:----:|:------:|:-:|:----------|
+|[apps.plugin](/collectors/apps.plugin/README.md)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
+|[charts.d.plugin](/collectors/charts.d.plugin/README.md)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
+|[cups.plugin](/collectors/cups.plugin/README.md)|`C`|all|monitors **CUPS**|
+|[fping.plugin](/collectors/fping.plugin/README.md)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
+|[ioping.plugin](/collectors/ioping.plugin/README.md)|`C`|all|measures disk latency.|
+|[freeipmi.plugin](/collectors/freeipmi.plugin/README.md)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.|
+|[nfacct.plugin](/collectors/nfacct.plugin/README.md)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.|
+|[xenstat.plugin](/collectors/xenstat.plugin/README.md)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.|
+|[perf.plugin](/collectors/perf.plugin/README.md)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).|
+|[node.d.plugin](/collectors/node.d.plugin/README.md)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.|
+|[python.d.plugin](/collectors/python.d.plugin/README.md)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
+|[slabinfo.plugin](/collectors/slabinfo.plugin/README.md)|`C`|linux|collects kernel internal cache objects (SLAB) metrics.|
+
+Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
+Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
+
+## Motivation
+
+This plugin allows Netdata to use **external plugins** for data collection:
+
+1. external data collection plugins may be written in any computer language.
+
+2. external data collection plugins may use O/S capabilities or `setuid` to
+ run with escalated privileges (compared to the `netdata` daemon).
+ The communication between the external plugin and Netdata is unidirectional
+ (from the plugin to Netdata), so that Netdata cannot manipulate an external
+ plugin running with escalated privileges.
+
+## Operation
+
+Each of the external plugins is expected to run forever.
+Netdata will start it when it starts and stop it when it exits.
+
+If the external plugin exits or crashes, Netdata will log an error.
+If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again.
+
+- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
+- Plugins may also be disabled by Netdata if they output things that Netdata does not understand.
+
+The `stdout` of external plugins is connected to Netdata to receive metrics,
+with the API defined below.
+
+The `stderr` of external plugins is connected to Netdata's `error.log`.
+
+Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
+
+## Configuration
+
+Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
+
+The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
+
+Example:
+
+```
+[plugins]
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+
+ # charts.d = yes
+ # fping = yes
+ # ioping = yes
+ # node.d = yes
+ # python.d = yes
+```
+
+The setting `enable running new plugins` sets the default behavior for all external plugins. It can be
+overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`.
+
+The setting `check for new plugins every` sets the interval between scans of the directory
+`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner.
+
+For each of the external plugins enabled, another `netdata.conf` section
+is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
+This section allows controlling the update frequency of the plugin and provide
+additional command line arguments to it.
+
+For example, for `apps.plugin` the following section is available:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+- `update every` controls the granularity of the external plugin.
+- `command options` allows giving additional command line options to the plugin.
+
+Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
+
+Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
+
+Other than the above, the plugin configuration is up to the plugin.
+
+Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
+
+### Autoconfiguration
+
+Plugins should attempt to autoconfigure themselves when possible.
+
+For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again.
+
+## External Plugins API
+
+Any program that can print a few values to its standard output can become a Netdata external plugin.
+
+Netdata parses 7 lines starting with:
+
+- `CHART` - create or update a chart
+- `DIMENSION` - add or update a dimension to the chart just created
+- `BEGIN` - initialize data collection for a chart
+- `SET` - set the value of a dimension for the initialized chart
+- `END` - complete data collection for the initialized chart
+- `FLUSH` - ignore the last collected values
+- `DISABLE` - disable this plugin
+
+a single program can produce any number of charts with any number of dimensions each.
+
+Charts can be added any time (not just the beginning).
+
+### command line parameters
+
+The plugin **MUST** accept just **one** parameter: **the number of seconds it is
+expected to update the values for its charts**. The value passed by Netdata
+to the plugin is controlled via its configuration file (so there is no need
+for the plugin to handle this configuration option).
+
+The external plugin can overwrite the update frequency. For example, the server may
+request per second updates, but the plugin may ignore it and update its charts
+every 5 seconds.
+
+### environment variables
+
+There are a few environment variables that are set by `netdata` and are
+available for the plugin to use.
+
+|variable|description|
+|:------:|:----------|
+|`NETDATA_USER_CONFIG_DIR`|The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).|
+|`NETDATA_STOCK_CONFIG_DIR`|The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).|
+|`NETDATA_PLUGINS_DIR`|The directory where all Netdata plugins are stored.|
+|`NETDATA_WEB_DIR`|The directory where the web files of Netdata are saved.|
+|`NETDATA_CACHE_DIR`|The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.|
+|`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata.|
+|`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.|
+|`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information.|
+|`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.|
+
+### The output of the plugin
+
+The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
+
+#### DISABLE
+
+`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
+
+#### CHART
+
+`CHART` defines a new chart.
+
+the template is:
+
+> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]]
+
+ where:
+
+- `type.id`
+
+ uniquely identifies the chart,
+ this is what will be needed to add values to the chart
+
+ the `type` part controls the menu the charts will appear in
+
+- `name`
+
+ is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it.
+
+- `title`
+
+ the text above the chart
+
+- `units`
+
+ the label of the vertical axis of the chart,
+ all dimensions added to a chart should have the same units
+ of measurement
+
+- `family`
+
+ is used to group charts together
+ (for example all eth0 charts should say: eth0),
+ if empty or missing, the `id` part of `type.id` will be used
+
+ this controls the sub-menu on the dashboard
+
+- `context`
+
+ the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
+
+ this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it
+
+- `charttype`
+
+ one of `line`, `area` or `stacked`,
+ if empty or missing, the `line` will be used
+
+- `priority`
+
+ is the relative priority of the charts as rendered on the web page,
+ lower numbers make the charts appear before the ones with higher numbers,
+ if empty or missing, `1000` will be used
+
+- `update_every`
+
+ overwrite the update frequency set by the server,
+ if empty or missing, the user configured value will be used
+
+- `options`
+
+ a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
+
+- `plugin` and `module`
+
+ both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
+
+#### DIMENSION
+
+`DIMENSION` defines a new dimension for the chart
+
+the template is:
+
+> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]]
+
+ where:
+
+- `id`
+
+ the `id` of this dimension (it is a text value, not numeric),
+ this will be needed later to add values to the dimension
+
+ We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
+
+- `name`
+
+ the name of the dimension as it will appear at the legend of the chart,
+ if empty or missing the `id` will be used
+
+- `algorithm`
+
+ one of:
+
+ - `absolute`
+
+ the value is to drawn as-is (interpolated to second boundary),
+ if `algorithm` is empty, invalid or missing, `absolute` is used
+
+ - `incremental`
+
+ the value increases over time,
+ the difference from the last value is presented in the chart,
+ the server interpolates the value and calculates a per second figure
+
+ - `percentage-of-absolute-row`
+
+ the % of this value compared to the total of all dimensions
+
+ - `percentage-of-incremental-row`
+
+ the % of this value compared to the incremental total of
+ all dimensions
+
+- `multiplier`
+
+ an integer value to multiply the collected value,
+ if empty or missing, `1` is used
+
+- `divisor`
+
+ an integer value to divide the collected value,
+ if empty or missing, `1` is used
+
+- `options`
+
+ a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
+
+#### VARIABLE
+
+> VARIABLE [SCOPE] name = value
+
+`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept).
+
+Variables support 2 scopes:
+
+- `GLOBAL` or `HOST` to define the variable at the host level.
+- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
+
+The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
+
+These variables can be set and updated at any point.
+
+Variable names should use alphanumeric characters, the `.` and the `_`.
+
+The `value` is floating point (Netdata used `long double`).
+
+Variables are transferred to upstream Netdata servers (streaming and database replication).
+
+## Data collection
+
+data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
+
+> BEGIN type.id [microseconds]
+
+- `type.id`
+
+ is the unique identification of the chart (as given in `CHART`)
+
+- `microseconds`
+
+ is the number of microseconds since the last update of the chart. It is optional.
+
+ Under heavy system load, the system may have some latency transferring
+ data from the plugins to Netdata via the pipe. This number improves
+ accuracy significantly, since the plugin is able to calculate the
+ duration between its iterations better than Netdata.
+
+ The first time the plugin is started, no microseconds should be given
+ to Netdata.
+
+> SET id = value
+
+- `id`
+
+ is the unique identification of the dimension (of the chart just began)
+
+- `value`
+
+ is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
+
+> END
+
+ END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
+
+More `SET` lines may appear to update all the dimensions of the chart.
+All of them in one `BEGIN` -> `END` block.
+
+All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
+same chart.
+
+If more charts need to be updated, each chart should have its own
+`BEGIN` -> `SET` -> `END` block.
+
+If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
+it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore
+all the values collected since the last `BEGIN` command.
+
+If a plugin does not behave properly (outputs invalid lines, or does not
+follow these guidelines), will be disabled by Netdata.
+
+### collected values
+
+Netdata will collect any **signed** value in the 64bit range:
+`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
+
+If a value is not collected, leave it empty, like this:
+
+`SET id =`
+
+or do not output the line at all.
+
+## Modular Plugins
+
+1. **python**, use `python.d.plugin`, there are many examples in the [python.d
+ directory](/collectors/python.d.plugin/README.md)
+
+ python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
+
+2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d
+ directory](/collectors/node.d.plugin/README.md)
+
+ node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
+
+3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
+ directory](/collectors/charts.d.plugin/README.md)
+
+ BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
+
+4. **C**
+
+ Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C.
+
+## Writing Plugins Properly
+
+There are a few rules for writing plugins properly:
+
+1. Respect system resources
+
+ Pay special attention to efficiency:
+
+ - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
+ - Do the absolutely minimum while iterating to collect values repeatedly.
+ - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
+ - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
+ - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
+
+2. The best way to iterate at a constant pace is this pseudo code:
+
+```js
+ var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
+
+ readConfiguration();
+
+ if(!verifyWeCanCollectValues()) {
+ print "DISABLE";
+ exit(1);
+ }
+
+ createCharts(); /* print CHART and DIMENSION statements */
+
+ var loops = 0;
+ var last_run = 0;
+ var next_run = 0;
+ var dt_since_last_run = 0;
+ var now = 0;
+
+ FOREVER {
+ /* find the current time in milliseconds */
+ now = currentTimeStampInMilliseconds();
+
+ /*
+ * find the time of the next loop
+ * this makes sure we are always aligned
+ * with the Netdata daemon
+ */
+ next_run = now - (now % update_every) + update_every;
+
+ /*
+ * wait until it is time
+ * it is important to do it in a loop
+ * since many wait functions can be interrupted
+ */
+ while( now < next_run ) {
+ sleepMilliseconds(next_run - now);
+ now = currentTimeStampInMilliseconds();
+ }
+
+ /* calculate the time passed since the last run */
+ if ( loops > 0 )
+ dt_since_last_run = (now - last_run) * 1000; /* in microseconds */
+
+ /* prepare for the next loop */
+ last_run = now;
+ loops++;
+
+ /* do your magic here to collect values */
+ collectValues();
+
+ /* send the collected data to Netdata */
+ printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
+ }
+```
+
+ Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection.
+
+ Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
+
+3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
+
+4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fplugins.d%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
new file mode 100644
index 0000000..42889fa
--- /dev/null
+++ b/collectors/plugins.d/plugins_d.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugins_d.h"
+#include "pluginsd_parser.h"
+
+char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL };
+struct plugind *pluginsd_root = NULL;
+
+inline int pluginsd_space(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case '=':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+inline int config_isspace(char c)
+{
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case ',':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+// split a text into words, respecting quotes
+static inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char), char *recover_input, char **recover_location, int max_recover)
+{
+ char *s = str, quote = 0;
+ int i = 0, j, rec = 0;
+ char *recover = recover_input;
+
+ // skip all white space
+ while (unlikely(custom_isspace(*s)))
+ s++;
+
+ // check for quote
+ if (unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // store the first word
+ words[i++] = s;
+
+ // while we have something
+ while (likely(*s)) {
+ // if it is escape
+ if (unlikely(*s == '\\' && s[1])) {
+ s += 2;
+ continue;
+ }
+
+ // if it is quote
+ else if (unlikely(*s == quote)) {
+ quote = 0;
+ if (recover && rec < max_recover) {
+ recover_location[rec++] = s;
+ *recover++ = *s;
+ }
+ *s = ' ';
+ continue;
+ }
+
+ // if it is a space
+ else if (unlikely(quote == 0 && custom_isspace(*s))) {
+ // terminate the word
+ if (recover && rec < max_recover) {
+ if (!rec || (rec && recover_location[rec-1] != s)) {
+ recover_location[rec++] = s;
+ *recover++ = *s;
+ }
+ }
+ *s++ = '\0';
+
+ // skip all white space
+ while (likely(custom_isspace(*s)))
+ s++;
+
+ // check for quote
+ if (unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // if we reached the end, stop
+ if (unlikely(!*s))
+ break;
+
+ // store the next word
+ if (likely(i < max_words))
+ words[i++] = s;
+ else
+ break;
+ }
+
+ // anything else
+ else
+ s++;
+ }
+
+ // terminate the words
+ j = i;
+ while (likely(j < max_words))
+ words[j++] = NULL;
+
+ return i;
+}
+
+inline int pluginsd_initialize_plugin_directories()
+{
+ char plugins_dirs[(FILENAME_MAX * 2) + 1];
+ static char *plugins_dir_list = NULL;
+
+ // Get the configuration entry
+ if (likely(!plugins_dir_list)) {
+ snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR);
+ plugins_dir_list = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs));
+ }
+
+ // Parse it and store it to plugin directories
+ return quoted_strings_splitter(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace, NULL, NULL, 0);
+}
+
+inline int pluginsd_split_words(char *str, char **words, int max_words, char *recover_input, char **recover_location, int max_recover)
+{
+ return quoted_strings_splitter(str, words, max_words, pluginsd_space, recover_input, recover_location, max_recover);
+}
+
+
+static void pluginsd_worker_thread_cleanup(void *arg)
+{
+ struct plugind *cd = (struct plugind *)arg;
+
+ if (cd->enabled && !cd->obsolete) {
+ cd->obsolete = 1;
+
+ info("data collection thread exiting");
+
+ if (cd->pid) {
+ siginfo_t info;
+ info("killing child process pid %d", cd->pid);
+ if (killpid(cd->pid) != -1) {
+ info("waiting for child process pid %d to exit...", cd->pid);
+ waitid(P_PID, (id_t)cd->pid, &info, WEXITED);
+ }
+ cd->pid = 0;
+ }
+ }
+}
+
+#define SERIAL_FAILURES_THRESHOLD 10
+static void pluginsd_worker_thread_handle_success(struct plugind *cd)
+{
+ if (likely(cd->successful_collections)) {
+ sleep((unsigned int)cd->update_every);
+ return;
+ }
+
+ if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) {
+ info(
+ "'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.",
+ cd->fullfilename, cd->pid,
+ cd->enabled ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled.");
+ sleep((unsigned int)(cd->update_every * 10));
+ return;
+ }
+
+ if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
+ error(
+ "'%s' (pid %d) does not generate useful output, although it reports success (exits with 0)."
+ "We have tried to collect something %zu times - unsuccessfully. Disabling it.",
+ cd->fullfilename, cd->pid, cd->serial_failures);
+ cd->enabled = 0;
+ return;
+ }
+
+ return;
+}
+
+static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code)
+{
+ if (worker_ret_code == -1) {
+ info("'%s' (pid %d) was killed with SIGTERM. Disabling it.", cd->fullfilename, cd->pid);
+ cd->enabled = 0;
+ return;
+ }
+
+ if (!cd->successful_collections) {
+ error(
+ "'%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", cd->fullfilename,
+ cd->pid, worker_ret_code);
+ cd->enabled = 0;
+ return;
+ }
+
+ if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) {
+ error(
+ "'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s",
+ cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections,
+ cd->enabled ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled.");
+ sleep((unsigned int)(cd->update_every * 10));
+ return;
+ }
+
+ if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
+ error(
+ "'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)."
+ "We tried to restart it %zu times, but it failed to generate data. Disabling it.",
+ cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, cd->serial_failures);
+ cd->enabled = 0;
+ return;
+ }
+
+ return;
+}
+#undef SERIAL_FAILURES_THRESHOLD
+
+void *pluginsd_worker_thread(void *arg)
+{
+ netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg);
+
+ struct plugind *cd = (struct plugind *)arg;
+
+ cd->obsolete = 0;
+ size_t count = 0;
+
+ while (!netdata_exit) {
+ FILE *fp = mypopen(cd->cmd, &cd->pid);
+ if (unlikely(!fp)) {
+ error("Cannot popen(\"%s\", \"r\").", cd->cmd);
+ break;
+ }
+
+ info("connected to '%s' running on pid %d", cd->fullfilename, cd->pid);
+ count = pluginsd_process(localhost, cd, fp, 0);
+ error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count);
+ killpid(cd->pid);
+
+ int worker_ret_code = mypclose(fp, cd->pid);
+
+ if (likely(worker_ret_code == 0))
+ pluginsd_worker_thread_handle_success(cd);
+ else
+ pluginsd_worker_thread_handle_error(cd, worker_ret_code);
+
+ cd->pid = 0;
+ if (unlikely(!cd->enabled))
+ break;
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+static void pluginsd_main_cleanup(void *data)
+{
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+
+ struct plugind *cd;
+ for (cd = pluginsd_root; cd; cd = cd->next) {
+ if (cd->enabled && !cd->obsolete) {
+ info("stopping plugin thread: %s", cd->id);
+ netdata_thread_cancel(cd->thread);
+ }
+ }
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *pluginsd_main(void *ptr)
+{
+ netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr);
+
+ int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1);
+ int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
+ if (scan_frequency < 1)
+ scan_frequency = 1;
+
+ // disable some plugins by default
+ config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO);
+
+ // store the errno for each plugins directory
+ // so that we don't log broken directories on each loop
+ int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
+
+ while (!netdata_exit) {
+ int idx;
+ const char *directory_name;
+
+ for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) {
+ if (unlikely(netdata_exit))
+ break;
+
+ errno = 0;
+ DIR *dir = opendir(directory_name);
+ if (unlikely(!dir)) {
+ if (directory_errors[idx] != errno) {
+ directory_errors[idx] = errno;
+ error("cannot open plugins directory '%s'", directory_name);
+ }
+ continue;
+ }
+
+ struct dirent *file = NULL;
+ while (likely((file = readdir(dir)))) {
+ if (unlikely(netdata_exit))
+ break;
+
+ debug(D_PLUGINSD, "examining file '%s'", file->d_name);
+
+ if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0))
+ continue;
+
+ int len = (int)strlen(file->d_name);
+ if (unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN))
+ continue;
+ if (unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
+ debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX);
+ continue;
+ }
+
+ char pluginname[CONFIG_MAX_NAME + 1];
+ snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name);
+ int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run);
+
+ if (unlikely(!enabled)) {
+ debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name);
+ continue;
+ }
+
+ // check if it runs already
+ struct plugind *cd;
+ for (cd = pluginsd_root; cd; cd = cd->next)
+ if (unlikely(strcmp(cd->filename, file->d_name) == 0))
+ break;
+
+ if (likely(cd && !cd->obsolete)) {
+ debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename);
+ continue;
+ }
+
+ // it is not running
+ // allocate a new one, or use the obsolete one
+ if (unlikely(!cd)) {
+ cd = callocz(sizeof(struct plugind), 1);
+
+ snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname);
+
+ strncpyz(cd->filename, file->d_name, FILENAME_MAX);
+ snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename);
+
+ cd->enabled = enabled;
+ cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every);
+ cd->started_t = now_realtime_sec();
+
+ char *def = "";
+ snprintfz(
+ cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every,
+ config_get(cd->id, "command options", def));
+
+ // link it
+ if (likely(pluginsd_root))
+ cd->next = pluginsd_root;
+ pluginsd_root = cd;
+
+ // it is not currently running
+ cd->obsolete = 1;
+
+ if (cd->enabled) {
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PLUGINSD[%s]", pluginname);
+ // spawn a new thread for it
+ netdata_thread_create(
+ &cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd);
+ }
+ }
+ }
+
+ closedir(dir);
+ }
+
+ sleep((unsigned int)scan_frequency);
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
new file mode 100644
index 0000000..fd99b35
--- /dev/null
+++ b/collectors/plugins.d/plugins_d.h
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGINS_D_H
+#define NETDATA_PLUGINS_D_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_PLUGINSD \
+ { \
+ .name = "PLUGINSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = pluginsd_main \
+ },
+
+
+#define PLUGINSD_FILE_SUFFIX ".plugin"
+#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
+#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
+#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0
+
+#define PLUGINSD_KEYWORD_CHART "CHART"
+#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION"
+#define PLUGINSD_KEYWORD_BEGIN "BEGIN"
+#define PLUGINSD_KEYWORD_END "END"
+#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
+#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
+#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE"
+#define PLUGINSD_KEYWORD_LABEL "LABEL"
+#define PLUGINSD_KEYWORD_OVERWRITE "OVERWRITE"
+#define PLUGINSD_KEYWORD_GUID "GUID"
+#define PLUGINSD_KEYWORD_CONTEXT "CONTEXT"
+#define PLUGINSD_KEYWORD_TOMBSTONE "TOMBSTONE"
+#define PLUGINSD_KEYWORD_HOST "HOST"
+
+
+#define PLUGINSD_LINE_MAX 1024
+#define PLUGINSD_LINE_MAX_SSL_READ 512
+#define PLUGINSD_MAX_WORDS 20
+
+#define PLUGINSD_MAX_DIRECTORIES 20
+extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
+
+struct plugind {
+ char id[CONFIG_MAX_NAME+1]; // config node id
+
+ char filename[FILENAME_MAX+1]; // just the filename
+ char fullfilename[FILENAME_MAX+1]; // with path
+ char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
+
+ volatile pid_t pid;
+ netdata_thread_t thread;
+
+ size_t successful_collections; // the number of times we have seen
+ // values collected from this plugin
+
+ size_t serial_failures; // the number of times the plugin started
+ // without collecting values
+
+ int update_every; // the plugin default data collection frequency
+ volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1
+ volatile sig_atomic_t enabled; // if this is enabled or not
+
+ time_t started_t;
+ uint32_t version;
+ struct plugind *next;
+};
+
+extern struct plugind *pluginsd_root;
+
+extern void *pluginsd_main(void *ptr);
+
+extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
+extern int pluginsd_split_words(char *str, char **words, int max_words, char *recover_string, char **recover_location, int max_recover);
+
+extern int pluginsd_initialize_plugin_directories();
+
+extern int config_isspace(char c);
+extern int pluginsd_space(char c);
+
+#endif /* NETDATA_PLUGINS_D_H */
diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c
new file mode 100644
index 0000000..4a97c55
--- /dev/null
+++ b/collectors/plugins.d/pluginsd_parser.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "pluginsd_parser.h"
+
+/*
+ * This is the action defined for the FLUSH command
+ */
+PARSER_RC pluginsd_set_action(void *user, RRDSET *st, RRDDIM *rd, long long int value)
+{
+ UNUSED(user);
+
+ rrddim_set_by_pointer(st, rd, value);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_flush_action(void *user, RRDSET *st)
+{
+ UNUSED(user);
+ UNUSED(st);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_begin_action(void *user, RRDSET *st, usec_t microseconds, int trust_durations)
+{
+ UNUSED(user);
+ if (likely(st->counter_done)) {
+ if (likely(microseconds)) {
+ if (trust_durations)
+ rrdset_next_usec_unfiltered(st, microseconds);
+ else
+ rrdset_next_usec(st, microseconds);
+ } else
+ rrdset_next(st);
+ }
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_end_action(void *user, RRDSET *st)
+{
+ UNUSED(user);
+
+ rrdset_done(st);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family, char *context, char *title, char *units, char *plugin,
+ char *module, int priority, int update_every, RRDSET_TYPE chart_type, char *options)
+{
+ RRDSET *st = NULL;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ st = rrdset_create(
+ host, type, id, name, family, context, title, units,
+ plugin, module, priority, update_every,
+ chart_type);
+
+ if (options && *options) {
+ if (strstr(options, "obsolete"))
+ rrdset_is_obsolete(st);
+ else
+ rrdset_isnot_obsolete(st);
+
+ if (strstr(options, "detail"))
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+
+ if (strstr(options, "hidden"))
+ rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
+
+ if (strstr(options, "store_first"))
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ } else {
+ rrdset_isnot_obsolete(st);
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ }
+ ((PARSER_USER_OBJECT *)user)->st = st;
+
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_disable_action(void *user)
+{
+ UNUSED(user);
+
+ info("called DISABLE. Disabling it.");
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+
+PARSER_RC pluginsd_variable_action(void *user, RRDHOST *host, RRDSET *st, char *name, int global, calculated_number value)
+{
+ UNUSED(user);
+
+ if (global) {
+ RRDVAR *rv = rrdvar_custom_host_variable_create(host, name);
+ if (rv)
+ rrdvar_custom_host_variable_set(host, rv, value);
+ else
+ error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname);
+ } else {
+ RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name);
+ if (rs)
+ rrdsetvar_custom_chart_variable_set(rs, value);
+ else
+ error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id);
+ }
+ return PARSER_RC_OK;
+}
+
+
+
+PARSER_RC pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm, long multiplier, long divisor, char *options,
+ RRD_ALGORITHM algorithm_type)
+{
+ UNUSED(user);
+ UNUSED(algorithm);
+
+ RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, algorithm_type);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if (options && *options) {
+ if (strstr(options, "obsolete") != NULL)
+ rrddim_is_obsolete(st, rd);
+ else
+ rrddim_isnot_obsolete(st, rd);
+ if (strstr(options, "hidden") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
+ if (strstr(options, "noreset") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if (strstr(options, "nooverflow") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ } else {
+ rrddim_isnot_obsolete(st, rd);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE source)
+{
+
+ ((PARSER_USER_OBJECT *) user)->new_labels = add_label_to_list(((PARSER_USER_OBJECT *) user)->new_labels, key, value, source);
+
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels)
+{
+ UNUSED(user);
+
+ if (!host->labels.head) {
+ host->labels.head = new_labels;
+ } else {
+ rrdhost_rdlock(host);
+ replace_label_list(&host->labels, new_labels);
+ rrdhost_unlock(host);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_set(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *dimension = words[1];
+ char *value = words[2];
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ if (unlikely(!dimension || !*dimension)) {
+ error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname);
+ goto disable;
+ }
+
+ if (unlikely(!value || !*value))
+ value = NULL;
+
+ if (unlikely(!st)) {
+ error(
+ "requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension,
+ value ? value : "<nothing>", host->hostname);
+ goto disable;
+ }
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value ? value : "<nothing>");
+
+ if (value) {
+ RRDDIM *rd = rrddim_find(st, dimension);
+ if (unlikely(!rd)) {
+ error(
+ "requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.",
+ dimension, st->name, st->id, st->rrdhost->hostname);
+ goto disable;
+ } else {
+ if (plugins_action->set_action) {
+ return plugins_action->set_action(
+ user, st, rd, strtoll(value, NULL, 0));
+ }
+ }
+ }
+ return PARSER_RC_OK;
+
+disable:
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_begin(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *id = words[1];
+ char *microseconds_txt = words[2];
+
+ RRDSET *st = NULL;
+ RRDHOST *host = ((PARSER_USER_OBJECT *)user)->host;
+
+ if (unlikely(!id)) {
+ error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname);
+ goto disable;
+ }
+
+ st = rrdset_find(host, id);
+ if (unlikely(!st)) {
+ error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname);
+ goto disable;
+ }
+ ((PARSER_USER_OBJECT *)user)->st = st;
+
+ usec_t microseconds = 0;
+ if (microseconds_txt && *microseconds_txt)
+ microseconds = str2ull(microseconds_txt);
+
+ if (plugins_action->begin_action) {
+ return plugins_action->begin_action(user, st, microseconds,
+ ((PARSER_USER_OBJECT *)user)->trust_durations);
+ }
+ return PARSER_RC_OK;
+disable:
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_end(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ if (unlikely(!st)) {
+ error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname);
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "requested an END on chart %s", st->id);
+
+ ((PARSER_USER_OBJECT *) user)->st = NULL;
+ ((PARSER_USER_OBJECT *) user)->count++;
+ if (plugins_action->end_action) {
+ return plugins_action->end_action(user, st);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_chart(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ if (unlikely(!host && !((PARSER_USER_OBJECT *) user)->host_exists)) {
+ debug(D_PLUGINSD, "Ignoring chart belonging to missing or ignored host.");
+ return PARSER_RC_OK;
+ }
+
+ char *type = words[1];
+ char *name = words[2];
+ char *title = words[3];
+ char *units = words[4];
+ char *family = words[5];
+ char *context = words[6];
+ char *chart = words[7];
+ char *priority_s = words[8];
+ char *update_every_s = words[9];
+ char *options = words[10];
+ char *plugin = words[11];
+ char *module = words[12];
+
+ int have_action = ((plugins_action->chart_action) != NULL);
+
+ // parse the id from type
+ char *id = NULL;
+ if (likely(type && (id = strchr(type, '.')))) {
+ *id = '\0';
+ id++;
+ }
+
+ // make sure we have the required variables
+ if (unlikely((!type || !*type || !id || !*id))) {
+ if (likely(host))
+ error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname);
+ else
+ error("requested a CHART, without a type.id. Disabling it.");
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ // parse the name, and make sure it does not include 'type.'
+ if (unlikely(name && *name)) {
+ // when data are streamed from child nodes
+ // name will be type.name
+ // so we have to remove 'type.' from name too
+ size_t len = strlen(type);
+ if (strncmp(type, name, len) == 0 && name[len] == '.')
+ name = &name[len + 1];
+
+ // if the name is the same with the id,
+ // or is just 'NULL', clear it.
+ if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
+ name = NULL;
+ }
+
+ int priority = 1000;
+ if (likely(priority_s && *priority_s))
+ priority = str2i(priority_s);
+
+ int update_every = ((PARSER_USER_OBJECT *) user)->cd->update_every;
+ if (likely(update_every_s && *update_every_s))
+ update_every = str2i(update_every_s);
+ if (unlikely(!update_every))
+ update_every = ((PARSER_USER_OBJECT *) user)->cd->update_every;
+
+ RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
+ if (unlikely(chart))
+ chart_type = rrdset_type_id(chart);
+
+ if (unlikely(name && !*name))
+ name = NULL;
+ if (unlikely(family && !*family))
+ family = NULL;
+ if (unlikely(context && !*context))
+ context = NULL;
+ if (unlikely(!title))
+ title = "";
+ if (unlikely(!units))
+ units = "unknown";
+
+ debug(
+ D_PLUGINSD,
+ "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d",
+ type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type),
+ priority, update_every);
+
+ if (have_action) {
+ return plugins_action->chart_action(
+ user, type, id, name, family, context, title, units,
+ (plugin && *plugin) ? plugin : ((PARSER_USER_OBJECT *)user)->cd->filename, module, priority, update_every,
+ chart_type, options);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_dimension(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *id = words[1];
+ char *name = words[2];
+ char *algorithm = words[3];
+ char *multiplier_s = words[4];
+ char *divisor_s = words[5];
+ char *options = words[6];
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ if (unlikely(!host && !((PARSER_USER_OBJECT *) user)->host_exists)) {
+ debug(D_PLUGINSD, "Ignoring dimension belonging to missing or ignored host.");
+ return PARSER_RC_OK;
+ }
+
+ if (unlikely(!id)) {
+ error(
+ "requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname,
+ st ? st->id : "UNSET");
+ goto disable;
+ }
+
+ if (unlikely(!st && !((PARSER_USER_OBJECT *) user)->st_exists)) {
+ error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname);
+ goto disable;
+ }
+
+ long multiplier = 1;
+ if (multiplier_s && *multiplier_s) {
+ multiplier = strtol(multiplier_s, NULL, 0);
+ if (unlikely(!multiplier))
+ multiplier = 1;
+ }
+
+ long divisor = 1;
+ if (likely(divisor_s && *divisor_s)) {
+ divisor = strtol(divisor_s, NULL, 0);
+ if (unlikely(!divisor))
+ divisor = 1;
+ }
+
+ if (unlikely(!algorithm || !*algorithm))
+ algorithm = "absolute";
+
+ if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(
+ D_PLUGINSD,
+ "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'",
+ st->id, id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor,
+ options ? options : "");
+
+ if (plugins_action->dimension_action) {
+ return plugins_action->dimension_action(
+ user, st, id, name, algorithm,
+ multiplier, divisor, (options && *options)?options:NULL, rrd_algorithm_id(algorithm));
+ }
+
+ return PARSER_RC_OK;
+disable:
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_variable(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *name = words[1];
+ char *value = words[2];
+ calculated_number v;
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ int global = (st) ? 0 : 1;
+
+ if (name && *name) {
+ if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
+ global = 1;
+ name = words[2];
+ value = words[3];
+ } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
+ global = 0;
+ name = words[2];
+ value = words[3];
+ }
+ }
+
+ if (unlikely(!name || !*name)) {
+ error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname);
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ if (unlikely(!value || !*value))
+ value = NULL;
+
+ if (unlikely(!value)) {
+ error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global) ? "HOST" : "CHART", name,
+ host->hostname);
+ return PARSER_RC_OK;
+ }
+
+ if (!global && !st) {
+ error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname);
+ return PARSER_RC_OK;
+ }
+
+ char *endptr = NULL;
+ v = (calculated_number)str2ld(value, &endptr);
+ if (unlikely(endptr && *endptr)) {
+ if (endptr == value)
+ error(
+ "the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name,
+ host->hostname);
+ else
+ error(
+ "the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname,
+ endptr);
+ }
+
+ if (plugins_action->variable_action) {
+ return plugins_action->variable_action(user, host, st, name, global, v);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_flush(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+ debug(D_PLUGINSD, "requested a FLUSH");
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ ((PARSER_USER_OBJECT *) user)->st = NULL;
+ if (plugins_action->flush_action) {
+ return plugins_action->flush_action(user, st);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_disable(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(user);
+ UNUSED(words);
+
+ if (plugins_action->disable_action) {
+ return plugins_action->disable_action(user);
+ }
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_label(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *store;
+
+ if (!words[1] || !words[2] || !words[3]) {
+ error("Ignoring malformed or empty LABEL command.");
+ return PARSER_RC_OK;
+ }
+ if (!words[4])
+ store = words[3];
+ else {
+ store = callocz(PLUGINSD_LINE_MAX + 1, sizeof(char));
+ size_t remaining = PLUGINSD_LINE_MAX;
+ char *move = store;
+ int i = 3;
+ while (i < PLUGINSD_MAX_WORDS) {
+ size_t length = strlen(words[i]);
+ if ((length + 1) >= remaining)
+ break;
+
+ remaining -= (length + 1);
+ memcpy(move, words[i], length);
+ move += length;
+ *move++ = ' ';
+
+ i++;
+ if (!words[i])
+ break;
+ }
+ }
+
+ if (plugins_action->label_action) {
+ PARSER_RC rc = plugins_action->label_action(user, words[1], store, strtol(words[2], NULL, 10));
+ if (store != words[3])
+ freez(store);
+ return rc;
+ }
+
+ if (store != words[3])
+ freez(store);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_overwrite(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ debug(D_PLUGINSD, "requested a OVERWITE a variable");
+
+ struct label *new_labels = ((PARSER_USER_OBJECT *)user)->new_labels;
+ ((PARSER_USER_OBJECT *)user)->new_labels = NULL;
+
+ if (plugins_action->overwrite_action) {
+ return plugins_action->overwrite_action(user, host, new_labels);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_guid(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a GUID, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a GUID, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->guid_action) {
+ return plugins_action->guid_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_context(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a CONTEXT, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a CONTEXT, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->context_action) {
+ return plugins_action->context_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_tombstone(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a TOMBSTONE, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a TOMBSTONE, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->tombstone_action) {
+ return plugins_action->tombstone_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_host(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *machine_guid = words[1];
+ char *hostname = words[2];
+ char *registry_hostname = words[3];
+ char *update_every_s = words[4];
+ char *os = words[5];
+ char *timezone = words[6];
+ char *tags = words[7];
+
+ int update_every = 1;
+ if (likely(update_every_s && *update_every_s))
+ update_every = str2i(update_every_s);
+ if (unlikely(!update_every))
+ update_every = 1;
+
+ debug(D_PLUGINSD, "HOST PARSED: guid=%s, hostname=%s, reg_host=%s, update=%d, os=%s, timezone=%s, tags=%s",
+ machine_guid, hostname, registry_hostname, update_every, os, timezone, tags);
+
+ if (plugins_action->host_action) {
+ return plugins_action->host_action(
+ user, machine_guid, hostname, registry_hostname, update_every, os, timezone, tags);
+ }
+
+ return PARSER_RC_OK;
+}
+
+// New plugins.d parser
+
+inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations)
+{
+ int enabled = cd->enabled;
+
+ if (!fp || !enabled) {
+ cd->enabled = 0;
+ return 0;
+ }
+
+ if (unlikely(fileno(fp) == -1)) {
+ error("file descriptor given is not a valid stream");
+ cd->serial_failures++;
+ return 0;
+ }
+ clearerr(fp);
+
+ PARSER_USER_OBJECT *user = callocz(1, sizeof(*user));
+ ((PARSER_USER_OBJECT *) user)->enabled = cd->enabled;
+ ((PARSER_USER_OBJECT *) user)->host = host;
+ ((PARSER_USER_OBJECT *) user)->cd = cd;
+ ((PARSER_USER_OBJECT *) user)->trust_durations = trust_durations;
+
+ PARSER *parser = parser_init(host, user, fp, PARSER_INPUT_SPLIT);
+
+ if (unlikely(!parser)) {
+ error("Failed to initialize parser");
+ cd->serial_failures++;
+ return 0;
+ }
+
+ parser->plugins_action->begin_action = &pluginsd_begin_action;
+ parser->plugins_action->flush_action = &pluginsd_flush_action;
+ parser->plugins_action->end_action = &pluginsd_end_action;
+ parser->plugins_action->disable_action = &pluginsd_disable_action;
+ parser->plugins_action->variable_action = &pluginsd_variable_action;
+ parser->plugins_action->dimension_action = &pluginsd_dimension_action;
+ parser->plugins_action->label_action = &pluginsd_label_action;
+ parser->plugins_action->overwrite_action = &pluginsd_overwrite_action;
+ parser->plugins_action->chart_action = &pluginsd_chart_action;
+ parser->plugins_action->set_action = &pluginsd_set_action;
+
+ user->parser = parser;
+
+ while (likely(!parser_next(parser))) {
+ if (unlikely(netdata_exit || parser_action(parser, NULL)))
+ break;
+ }
+ info("PARSER ended");
+
+ parser_destroy(parser);
+
+ cd->enabled = ((PARSER_USER_OBJECT *) user)->enabled;
+ size_t count = ((PARSER_USER_OBJECT *) user)->count;
+
+ freez(user);
+
+ if (likely(count)) {
+ cd->successful_collections += count;
+ cd->serial_failures = 0;
+ } else
+ cd->serial_failures++;
+
+ return count;
+}
diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h
new file mode 100644
index 0000000..61e9c9b
--- /dev/null
+++ b/collectors/plugins.d/pluginsd_parser.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGINSD_PARSER_H
+#define NETDATA_PLUGINSD_PARSER_H
+
+#include "../../parser/parser.h"
+
+
+typedef struct parser_user_object {
+ PARSER *parser;
+ RRDSET *st;
+ RRDHOST *host;
+ void *opaque;
+ struct plugind *cd;
+ int trust_durations;
+ struct label *new_labels;
+ size_t count;
+ int enabled;
+ uint8_t st_exists;
+ uint8_t host_exists;
+ void *private; // the user can set this for private use
+} PARSER_USER_OBJECT;
+
+extern PARSER_RC pluginsd_set_action(void *user, RRDSET *st, RRDDIM *rd, long long int value);
+extern PARSER_RC pluginsd_flush_action(void *user, RRDSET *st);
+extern PARSER_RC pluginsd_begin_action(void *user, RRDSET *st, usec_t microseconds, int trust_durations);
+extern PARSER_RC pluginsd_end_action(void *user, RRDSET *st);
+extern PARSER_RC pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family, char *context,
+ char *title, char *units, char *plugin, char *module, int priority,
+ int update_every, RRDSET_TYPE chart_type, char *options);
+extern PARSER_RC pluginsd_disable_action(void *user);
+extern PARSER_RC pluginsd_variable_action(void *user, RRDHOST *host, RRDSET *st, char *name, int global,
+ calculated_number value);
+extern PARSER_RC pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm,
+ long multiplier, long divisor, char *options, RRD_ALGORITHM algorithm_type);
+extern PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE source);
+extern PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels);
+
+
+#endif //NETDATA_PLUGINSD_PARSER_H
diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/proc.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
new file mode 100644
index 0000000..085afb4
--- /dev/null
+++ b/collectors/proc.plugin/README.md
@@ -0,0 +1,571 @@
+<!--
+title: "proc.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/README.md
+-->
+
+# proc.plugin
+
+- `/proc/net/dev` (all network interfaces for all their values)
+- `/proc/diskstats` (all disks for all their values)
+- `/proc/mdstat` (status of RAID arrays)
+- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
+- `/proc/net/snmp6` (total IPv6 usage)
+- `/proc/net/netstat` (more IPv4 usage)
+- `/proc/net/wireless` (wireless extension)
+- `/proc/net/stat/nf_conntrack` (connection tracking performance)
+- `/proc/net/stat/synproxy` (synproxy performance)
+- `/proc/net/ip_vs/stats` (IPVS connection statistics)
+- `/proc/stat` (CPU utilization and attributes)
+- `/proc/meminfo` (memory information)
+- `/proc/vmstat` (system performance)
+- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
+- `/sys/fs/cgroup` (Control Groups - Linux Containers)
+- `/proc/self/mountinfo` (mount points)
+- `/proc/interrupts` (total and per core hardware interrupts)
+- `/proc/softirqs` (total and per core software interrupts)
+- `/proc/loadavg` (system load and total processes running)
+- `/proc/pressure/{cpu,memory,io}` (pressure stall information)
+- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+- `/sys/class/power_supply` (power supply properties)
+- `/sys/class/infiniband` (infiniband interconnect)
+- `ipc` (IPC semaphores and message queues)
+- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
+- `netdata` (internal Netdata resources utilization)
+
+- - -
+
+## Monitoring Disks
+
+> Live demo of disk monitoring at: **[http://london.netdata.rocks](https://registry.my-netdata.io/#menu_disk)**
+
+Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features.
+
+Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**.
+
+### Monitored disk metrics
+
+- **I/O bandwidth/s (kb/s)**
+ The amount of data transferred from and to the disk.
+- **I/O operations/s**
+ The number of I/O operations completed.
+- **Queued I/O operations**
+ The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
+- **Backlog size (time in ms)**
+ The expected duration of the currently queued I/O operations.
+- **Utilization (time percentage)**
+ The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
+ Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
+- **Average I/O operation time (ms)**
+ The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
+- **Average I/O operation size (kb)**
+ The average amount of data of the completed I/O operations.
+- **Average Service Time (ms)**
+ The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
+- **Merged I/O operations/s**
+ The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
+- **Total I/O time**
+ The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
+- **Space usage**
+ For mounted disks, Netdata will provide a chart for their space, with 3 dimensions:
+ 1. free
+ 2. used
+ 3. reserved for root
+- **inode usage**
+ For mounted disks, Netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
+ 1. free
+ 2. used
+ 3. reserved for root
+
+### disk names
+
+Netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart Netdata to change the name of the disk). To use disk IDs provided by `/dev/disk/by-id`, the `name disks by id` option should be enabled. The `preferred disk ids` simple pattern allows choosing disk IDs to be used in the first place.
+
+### performance metrics
+
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+Netdata categorizes all block devices in 3 categories:
+
+1. physical disks (i.e. block devices that do not have child devices and are not partitions)
+2. virtual disks (i.e. block devices that have child devices - like RAID devices)
+3. disk partitions (i.e. block devices that are part of a physical disk)
+
+Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file.
+
+### Netdata configuration
+
+You can get the running Netdata configuration using this:
+
+```sh
+cd /etc/netdata
+curl "http://localhost:19999/netdata.conf" >netdata.conf.new
+mv netdata.conf.new netdata.conf
+```
+
+Then edit `netdata.conf` and find the following section. This is the basic plugin configuration.
+
+```
+[plugin:proc:/proc/diskstats]
+ # enable new disks detected at runtime = yes
+ # performance metrics for physical disks = auto
+ # performance metrics for virtual disks = auto
+ # performance metrics for partitions = no
+ # bandwidth for all disks = auto
+ # operations for all disks = auto
+ # merged operations for all disks = auto
+ # i/o time for all disks = auto
+ # queued operations for all disks = auto
+ # utilization percentage for all disks = auto
+ # backlog for all disks = auto
+ # bcache for all disks = auto
+ # bcache priority stats update every = 0
+ # remove charts of removed disks = yes
+ # path to get block device = /sys/block/%s
+ # path to get block device bcache = /sys/block/%s/bcache
+ # path to get virtual block device = /sys/devices/virtual/block/%s
+ # path to get block device infos = /sys/dev/block/%lu:%lu/%s
+ # path to device mapper = /dev/mapper
+ # path to /dev/disk/by-label = /dev/disk/by-label
+ # path to /dev/disk/by-id = /dev/disk/by-id
+ # path to /dev/vx/dsk = /dev/vx/dsk
+ # name disks by id = no
+ # preferred disk ids = *
+ # exclude disks = loop* ram*
+ # filename to monitor = /proc/diskstats
+ # performance metrics for disks with major 8 = yes
+```
+
+For each virtual disk, physical disk and partition you will have a section like this:
+
+```
+[plugin:proc:/proc/diskstats:sda]
+ # enable = yes
+ # enable performance metrics = auto
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
+ # backlog = auto
+```
+
+For all configuration options:
+
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
+
+Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
+
+After saving `/etc/netdata/netdata.conf`, restart your Netdata to apply them.
+
+#### Disabling performance metrics for individual device and to multiple devices by device type
+
+You can pretty easy disable performance metrics for individual device, for ex.:
+
+```
+[plugin:proc:/proc/diskstats:sda]
+ enable performance metrics = no
+```
+
+But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
+
+```
+ 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
+ 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
+ 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4
+ 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
+ 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
+ 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
+ 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
+```
+
+All zram devices starts with `251` number and all loop devices starts with `7`.
+So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
+
+```
+[plugin:proc:/proc/diskstats]
+ performance metrics for disks with major 7 = no
+```
+
+## Monitoring RAID arrays
+
+### Monitored RAID array metrics
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+
+- total (number of devices array ideally would have)
+- inuse (number of devices currently are in use)
+
+3. **Mismatch count**
+
+- unsynchronized blocks
+
+4. **Current status**
+
+- resync in percent
+- recovery in percent
+- reshape in percent
+- check in percent
+
+5. **Operation status** (if resync/recovery/reshape/check is active)
+
+- finish in minutes
+- speed in megabytes/s
+
+6. **Nonredundant array availability**
+
+#### configuration
+
+```
+[plugin:proc:/proc/mdstat]
+ # faulty devices = yes
+ # nonredundant arrays availability = yes
+ # mismatch count = auto
+ # disk stats = yes
+ # operation status = yes
+ # make charts obsolete = yes
+ # filename to monitor = /proc/mdstat
+ # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt
+```
+
+## Monitoring CPUs
+
+The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal
+throttling, frequency, and idle states. It gathers this information from multiple files.
+
+If your system has more than 50 processors (`physical processors * cores per processor * threads per core`), the Agent
+automatically disables CPU thermal throttling, frequency, and idle state charts. To override this default, see the next
+section on configuration.
+
+### Configuration
+
+The settings for monitoring CPUs is in the `[plugin:proc:/proc/stat]` of your `netdata.conf` file.
+
+The `keep per core files open` option lets you reduce the number of file operations on multiple files.
+
+If your system has more than 50 processors and you would like to see the CPU thermal throttling, frequency, and idle
+state charts that are automatically disabled, you can set the following boolean options in the
+`[plugin:proc:/proc/stat]` section.
+
+```conf
+ keep per core files open = yes
+ keep cpuidle files open = yes
+ core_throttle_count = yes
+ package_throttle_count = yes
+ cpu frequency = yes
+ cpu idle states = yes
+```
+
+### CPU frequency
+
+The module shows the current CPU frequency as set by the `cpufreq` kernel
+module.
+
+**Requirement:**
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+`cpufreq` interface provides two different ways of getting the information through `/sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq` and `/sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state` files. The latter is more accurate so it is preferred in the module. `scaling_cur_freq` represents only the current CPU frequency, and doesn't account for any state changes which happen between updates. The module switches back and forth between these two methods if governor is changed.
+
+It produces one chart with multiple lines (one line per core).
+
+#### configuration
+
+`scaling_cur_freq filename to monitor` and `time_in_state filename to monitor` in the `[plugin:proc:/proc/stat]` configuration section
+
+### CPU idle states
+
+The module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+#### configuration
+
+`schedstat filename to monitor`, `cpuidle name filename to monitor`, and `cpuidle time filename to monitor` in the `[plugin:proc:/proc/stat]` configuration section
+
+## Monitoring Network Interfaces
+
+### Monitored network interface metrics
+
+- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
+ The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section.
+
+- **Bandwidth (kilobits/s)**
+ The amount of data received and sent through the interface.
+
+- **Packets (packets/s)**
+ The number of packets received, packets sent, and multicast packets transmitted through the interface.
+
+- **Interface Errors (errors/s)**
+ The number of errors for the inbound and outbound traffic on the interface.
+
+- **Interface Drops (drops/s)**
+ The number of packets dropped for the inbound and outbound traffic on the interface.
+
+- **Interface FIFO Buffer Errors (errors/s)**
+ The number of FIFO buffer errors encountered while receiving and transmitting data through the interface.
+
+- **Compressed Packets (packets/s)**
+ The number of compressed packets transmitted or received by the device driver.
+
+- **Network Interface Events (events/s)**
+ The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver.
+
+By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+
+### Monitoring wireless network interfaces
+
+The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]` section of your `netdata.conf` file.
+
+```conf
+ status for all interfaces = yes
+ quality for all interfaces = yes
+ discarded packets for all interfaces = yes
+ missed beacon for all interface = yes
+```
+
+You can set the following values for each configuration option:
+
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
+
+#### Monitored wireless interface metrics
+
+- **Status**
+ The current state of the interface. This is a device-dependent option.
+
+- **Link**
+ Overall quality of the link.
+
+- **Level**
+ Received signal strength (RSSI), which indicates how strong the received signal is.
+
+- **Noise**
+ Background noise level.
+
+- **Discarded packets**
+ Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`).
+
+- **Missed beacon**
+ Number of periodic beacons from the cell or the access point the interface has missed.
+
+#### Wireless configuration
+
+#### alarms
+
+There are several alarms defined in `health.d/net.conf`.
+
+The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alarms can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alarm with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](/health/REFERENCE.md#alarm-line-families) line in the alarm configuration. For example, if you want to disable the `inbound packets dropped` alarm for `eth0`, set `families: !eth0 *` in the alarm definition for `template: inbound_packets_dropped`.
+
+#### configuration
+
+Module configuration:
+
+```
+[plugin:proc:/proc/net/dev]
+ # filename to monitor = /proc/net/dev
+ # path to get virtual interfaces = /sys/devices/virtual/net/%s
+ # path to get net device speed = /sys/class/net/%s/speed
+ # enable new interfaces detected at runtime = auto
+ # bandwidth for all interfaces = auto
+ # packets for all interfaces = auto
+ # errors for all interfaces = auto
+ # drops for all interfaces = auto
+ # fifo for all interfaces = auto
+ # compressed packets for all interfaces = auto
+ # frames, collisions, carrier counters for all interfaces = auto
+ # disable by default interfaces matching = lo fireqos* *-ifb
+ # refresh interface speed every seconds = 10
+```
+
+Per interface configuration:
+
+```
+[plugin:proc:/proc/net/dev:enp0s3]
+ # enabled = yes
+ # virtual = no
+ # bandwidth = auto
+ # packets = auto
+ # errors = auto
+ # drops = auto
+ # fifo = auto
+ # compressed = auto
+ # events = auto
+```
+
+## Linux Anti-DDoS
+
+![image6](https://cloud.githubusercontent.com/assets/2662304/14253733/53550b16-fa95-11e5-8d9d-4ed171df4735.gif)
+
+---
+
+SYNPROXY is a TCP SYN packets proxy. It can be used to protect any TCP server (like a web server) from SYN floods and similar DDos attacks.
+
+SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections.
+
+The net effect of this, is that the real servers will not notice any change during the attack. The valid TCP connections will pass through and served, while the attack will be stopped at the firewall.
+
+Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides:
+
+- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
+- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
+
+### Real-time monitoring of Linux Anti-DDoS
+
+Netdata is able to monitor in real-time (per second updates) the operation of the Linux Anti-DDoS protection.
+
+It visualizes 4 charts:
+
+1. TCP SYN Packets received on ports operated by SYNPROXY
+2. TCP Cookies (valid, invalid, retransmits)
+3. Connections Reopened
+4. Entries used
+
+Example image:
+
+![ddos](https://cloud.githubusercontent.com/assets/2662304/14398891/6016e3fc-fdf0-11e5-942b-55de6a52cb66.gif)
+
+See Linux Anti-DDoS in action at: **[Netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
+
+## Linux power supply
+
+This module monitors various metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the underlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+
+ - capacity_now
+
+2. Charge: The charge for the power supply, expressed as amphours.
+
+ - charge_full_design
+ - charge_full
+ - charge_now
+ - charge_empty
+ - charge_empty_design
+
+3. Energy: The energy for the power supply, expressed as watthours.
+
+ - energy_full_design
+ - energy_full
+ - energy_now
+ - energy_empty
+ - energy_empty_design
+
+4. Voltage: The voltage for the power supply, expressed as volts.
+
+ - voltage_max_design
+ - voltage_max
+ - voltage_now
+ - voltage_min
+ - voltage_min_design
+
+#### configuration
+
+```
+[plugin:proc:/sys/class/power_supply]
+ # battery capacity = yes
+ # battery charge = no
+ # battery energy = no
+ # power supply voltage = no
+ # keep files open = auto
+ # directory to monitor = /sys/class/power_supply
+```
+
+#### notes
+
+- Most drivers provide at least the first chart. Battery powered ACPI
+ compliant systems (like most laptops) provide all but the third, but do
+ not provide all of the metrics for each chart.
+
+- Current, energy, and voltages are reported with a *very* high precision
+ by the power_supply framework. Usually, this is far higher than the
+ actual hardware supports reporting, so expect to see changes in these
+ charts jump instead of scaling smoothly.
+
+- If `max` or `full` attribute is defined by the driver, but not a
+ corresponding `min` or `empty` attribute, then Netdata will still provide
+ the corresponding `min` or `empty`, which will then always read as zero.
+ This way, alerts which match on these will still work.
+
+## Infiniband interconnect
+
+This module monitors every active Infiniband port. It provides generic counters statistics, and per-vendor hw-counters (if vendor is supported).
+
+### Monitored interface metrics
+
+Each port will have its counters metrics monitored, grouped in the following charts:
+
+- **Bandwidth usage**
+ Sent/Received data, in KB/s
+
+- **Packets Statistics**
+ Sent/Received packets, in 3 categories: total, unicast and multicast.
+
+- **Errors Statistics**
+ Many errors counters are provided, presenting statistics for:
+ - Packets: malformated, sent/received discarded by card/switch, missing ressource
+ - Link: downed, recovered, integrity error, minor error
+ - Other events: Tick Wait to send, buffer overrun
+
+If your vendor is supported, you'll also get HW-Counters statistics. These being vendor specific, please refer to their documentation.
+
+- Mellanox: [see statistics documentation](https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters)
+
+### configuration
+
+Default configuration will monitor only enabled infiniband ports, and refresh newly activated or created ports every 30 seconds
+
+```
+[plugin:proc:/sys/class/infiniband]
+ # dirname to monitor = /sys/class/infiniband
+ # bandwidth counters = yes
+ # packets counters = yes
+ # errors counters = yes
+ # hardware packets counters = auto
+ # hardware errors counters = auto
+ # monitor only ports being active = auto
+ # disable by default interfaces matching =
+ # refresh ports state every seconds = 30
+```
+
+
+## IPC
+
+### Monitored IPC metrics
+
+- **number of messages in message queues**
+- **amount of memory used by message queues**
+- **number of semaphores**
+- **number of semaphore arrays**
+- **number of shared memory segments**
+- **amount of memory used by shared memory segments**
+
+As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable).
+
+### configuration
+
+```
+[plugin:proc:ipc]
+ # message queues = yes
+ # semaphore totals = yes
+ # shared memory totals = yes
+ # msg filename to monitor = /proc/sysvipc/msg
+ # shm filename to monitor = /proc/sysvipc/shm
+ # max dimensions in memory allowed = 50
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c
new file mode 100644
index 0000000..048fe74
--- /dev/null
+++ b/collectors/proc.plugin/ipc.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#include <sys/sem.h>
+#include <sys/msg.h>
+#include <sys/shm.h>
+
+
+#ifndef SEMVMX
+#define SEMVMX 32767 /* <= 32767 semaphore maximum value */
+#endif
+
+/* Some versions of libc only define IPC_INFO when __USE_GNU is defined. */
+#ifndef IPC_INFO
+#define IPC_INFO 3
+#endif
+
+struct ipc_limits {
+ uint64_t shmmni; /* max number of segments */
+ uint64_t shmmax; /* max segment size */
+ uint64_t shmall; /* max total shared memory */
+ uint64_t shmmin; /* min segment size */
+
+ int semmni; /* max number of arrays */
+ int semmsl; /* max semaphores per array */
+ int semmns; /* max semaphores system wide */
+ int semopm; /* max ops per semop call */
+ unsigned int semvmx; /* semaphore max value (constant) */
+
+ int msgmni; /* max queues system wide */
+ size_t msgmax; /* max size of message */
+ int msgmnb; /* default max size of queue */
+};
+
+struct ipc_status {
+ int semusz; /* current number of arrays */
+ int semaem; /* current semaphores system wide */
+};
+
+/*
+ * The last arg of semctl is a union semun, but where is it defined? X/OPEN
+ * tells us to define it ourselves, but until recently Linux include files
+ * would also define it.
+ */
+#ifndef HAVE_UNION_SEMUN
+/* according to X/OPEN we have to define it ourselves */
+union semun {
+ int val;
+ struct semid_ds *buf;
+ unsigned short int *array;
+ struct seminfo *__buf;
+};
+#endif
+
+struct message_queue {
+ unsigned long long id;
+ int found;
+
+ RRDDIM *rd_messages;
+ RRDDIM *rd_bytes;
+ unsigned long long messages;
+ unsigned long long bytes;
+
+ struct message_queue * next;
+};
+
+struct shm_stats {
+ unsigned long long segments;
+ unsigned long long bytes;
+};
+
+static inline int ipc_sem_get_limits(struct ipc_limits *lim) {
+ static procfile *ff = NULL;
+ static int error_shown = 0;
+ static char filename[FILENAME_MAX + 1] = "";
+
+ if(unlikely(!filename[0]))
+ snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/sem", netdata_configured_host_prefix);
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ if(unlikely(!error_shown)) {
+ error("IPC: Cannot open file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ if(unlikely(!error_shown)) {
+ error("IPC: Cannot read file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+
+ if(procfile_lines(ff) >= 1 && procfile_linewords(ff, 0) >= 4) {
+ lim->semvmx = SEMVMX;
+ lim->semmsl = str2i(procfile_lineword(ff, 0, 0));
+ lim->semmns = str2i(procfile_lineword(ff, 0, 1));
+ lim->semopm = str2i(procfile_lineword(ff, 0, 2));
+ lim->semmni = str2i(procfile_lineword(ff, 0, 3));
+ return 0;
+ }
+ else {
+ if(unlikely(!error_shown)) {
+ error("IPC: Invalid content in file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+
+ipc:
+ // cannot do it from the file
+ // query IPC
+ {
+ struct seminfo seminfo = {.semmni = 0};
+ union semun arg = {.array = (ushort *) &seminfo};
+
+ if(unlikely(semctl(0, 0, IPC_INFO, arg) < 0)) {
+ error("IPC: Failed to read '%s' and request IPC_INFO with semctl().", filename);
+ goto error;
+ }
+
+ lim->semvmx = SEMVMX;
+ lim->semmni = seminfo.semmni;
+ lim->semmsl = seminfo.semmsl;
+ lim->semmns = seminfo.semmns;
+ lim->semopm = seminfo.semopm;
+ return 0;
+ }
+
+error:
+ lim->semvmx = 0;
+ lim->semmni = 0;
+ lim->semmsl = 0;
+ lim->semmns = 0;
+ lim->semopm = 0;
+ return -1;
+}
+
+/*
+printf ("------ Semaphore Limits --------\n");
+printf ("max number of arrays = %d\n", limits.semmni);
+printf ("max semaphores per array = %d\n", limits.semmsl);
+printf ("max semaphores system wide = %d\n", limits.semmns);
+printf ("max ops per semop call = %d\n", limits.semopm);
+printf ("semaphore max value = %u\n", limits.semvmx);
+
+printf ("------ Semaphore Status --------\n");
+printf ("used arrays = %d\n", status.semusz);
+printf ("allocated semaphores = %d\n", status.semaem);
+*/
+
+static inline int ipc_sem_get_status(struct ipc_status *st) {
+ struct seminfo seminfo;
+ union semun arg;
+
+ arg.array = (ushort *) (void *) &seminfo;
+
+ if(unlikely(semctl (0, 0, SEM_INFO, arg) < 0)) {
+ /* kernel not configured for semaphores */
+ static int error_shown = 0;
+ if(unlikely(!error_shown)) {
+ error("IPC: kernel is not configured for semaphores");
+ error_shown = 1;
+ }
+ st->semusz = 0;
+ st->semaem = 0;
+ return -1;
+ }
+
+ st->semusz = seminfo.semusz;
+ st->semaem = seminfo.semaem;
+ return 0;
+}
+
+int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) {
+ static procfile *ff;
+ struct message_queue *msq;
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(msg_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff);
+ size_t words = 0;
+
+ if(unlikely(lines < 2)) {
+ error("Cannot read %s. Expected 2 or more lines, read %zu.", ff->filename, lines);
+ return 1;
+ }
+
+ // loop through all lines except the first and the last ones
+ size_t l;
+ for(l = 1; l < lines - 1; l++) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) continue;
+ if(unlikely(words < 14)) {
+ error("Cannot read %s line. Expected 14 params, read %zu.", ff->filename, words);
+ continue;
+ }
+
+ // find the id in the linked list or create a new stucture
+ int found = 0;
+
+ unsigned long long id = str2ull(procfile_lineword(ff, l, 1));
+ for(msq = *message_queue_root; msq ; msq = msq->next) {
+ if(unlikely(id == msq->id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if(unlikely(!found)) {
+ msq = callocz(1, sizeof(struct message_queue));
+ msq->next = *message_queue_root;
+ *message_queue_root = msq;
+ msq->id = id;
+ }
+
+ msq->messages = str2ull(procfile_lineword(ff, l, 4));
+ msq->bytes = str2ull(procfile_lineword(ff, l, 3));
+ msq->found = 1;
+ }
+
+ return 0;
+}
+
+int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) {
+ static procfile *ff;
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(shm_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff);
+ size_t words = 0;
+
+ if(unlikely(lines < 2)) {
+ error("Cannot read %s. Expected 2 or more lines, read %zu.", ff->filename, lines);
+ return 1;
+ }
+
+ shm->segments = 0;
+ shm->bytes = 0;
+
+ // loop through all lines except the first and the last ones
+ size_t l;
+ for(l = 1; l < lines - 1; l++) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) continue;
+ if(unlikely(words < 16)) {
+ error("Cannot read %s line. Expected 16 params, read %zu.", ff->filename, words);
+ continue;
+ }
+
+ shm->segments++;
+ shm->bytes += str2ull(procfile_lineword(ff, l, 3));
+ }
+
+ return 0;
+}
+
+int do_ipc(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_sem = -1, do_msg = -1, do_shm = -1;
+ static int read_limits_next = -1;
+ static struct ipc_limits limits;
+ static struct ipc_status status;
+ static RRDVAR *arrays_max = NULL, *semaphores_max = NULL;
+ static RRDSET *st_semaphores = NULL, *st_arrays = NULL;
+ static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL;
+ static char *msg_filename = NULL;
+ static struct message_queue *message_queue_root = NULL;
+ static long long dimensions_limit;
+ static char *shm_filename = NULL;
+
+ if(unlikely(do_sem == -1)) {
+ do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES);
+ do_sem = config_get_boolean("plugin:proc:ipc", "semaphore totals", CONFIG_BOOLEAN_YES);
+ do_shm = config_get_boolean("plugin:proc:ipc", "shared memory totals", CONFIG_BOOLEAN_YES);
+
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sysvipc/msg");
+ msg_filename = config_get("plugin:proc:ipc", "msg filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sysvipc/shm");
+ shm_filename = config_get("plugin:proc:ipc", "shm filename to monitor", filename);
+
+ dimensions_limit = config_get_number("plugin:proc:ipc", "max dimensions in memory allowed", 50);
+
+ // make sure it works
+ if(ipc_sem_get_limits(&limits) == -1) {
+ error("unable to fetch semaphore limits");
+ do_sem = CONFIG_BOOLEAN_NO;
+ }
+ else if(ipc_sem_get_status(&status) == -1) {
+ error("unable to fetch semaphore statistics");
+ do_sem = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // create the charts
+ if(unlikely(!st_semaphores)) {
+ st_semaphores = rrdset_create_localhost(
+ "system"
+ , "ipc_semaphores"
+ , NULL
+ , "ipc semaphores"
+ , NULL
+ , "IPC Semaphores"
+ , "semaphores"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if(unlikely(!st_arrays)) {
+ st_arrays = rrdset_create_localhost(
+ "system"
+ , "ipc_semaphore_arrays"
+ , NULL
+ , "ipc semaphores"
+ , NULL
+ , "IPC Semaphore Arrays"
+ , "arrays"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ // variables
+ semaphores_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_max");
+ arrays_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_arrays_max");
+ }
+
+ struct stat stbuf;
+ if (stat(msg_filename, &stbuf)) {
+ do_msg = CONFIG_BOOLEAN_NO;
+ }
+
+ if(unlikely(do_sem == CONFIG_BOOLEAN_NO && do_msg == CONFIG_BOOLEAN_NO)) {
+ error("ipc module disabled");
+ return 1;
+ }
+ }
+
+ if(likely(do_sem != CONFIG_BOOLEAN_NO)) {
+ if(unlikely(read_limits_next < 0)) {
+ if(unlikely(ipc_sem_get_limits(&limits) == -1)) {
+ error("Unable to fetch semaphore limits.");
+ }
+ else {
+ if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns);
+ if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni);
+
+ st_arrays->red = limits.semmni;
+ st_semaphores->red = limits.semmns;
+
+ read_limits_next = 60 / update_every;
+ }
+ }
+ else
+ read_limits_next--;
+
+ if(unlikely(ipc_sem_get_status(&status) == -1)) {
+ error("Unable to get semaphore statistics");
+ return 0;
+ }
+
+ if(st_semaphores->counter_done) rrdset_next(st_semaphores);
+ rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem);
+ rrdset_done(st_semaphores);
+
+ if(st_arrays->counter_done) rrdset_next(st_arrays);
+ rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz);
+ rrdset_done(st_arrays);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_msg != CONFIG_BOOLEAN_NO)) {
+ static RRDSET *st_msq_messages = NULL, *st_msq_bytes = NULL;
+
+ int ret = ipc_msq_get_info(msg_filename, &message_queue_root);
+
+ if(!ret && message_queue_root) {
+ if(unlikely(!st_msq_messages))
+ st_msq_messages = rrdset_create_localhost(
+ "system"
+ , "message_queue_messages"
+ , NULL
+ , "ipc message queues"
+ , NULL
+ , "IPC Message Queue Number of Messages"
+ , "messages"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_msq_messages);
+
+ if(unlikely(!st_msq_bytes))
+ st_msq_bytes = rrdset_create_localhost(
+ "system"
+ , "message_queue_bytes"
+ , NULL
+ , "ipc message queues"
+ , NULL
+ , "IPC Message Queue Used Bytes"
+ , "bytes"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_msq_bytes);
+
+ struct message_queue *msq = message_queue_root, *msq_prev = NULL;
+ while(likely(msq)){
+ if(likely(msq->found)) {
+ if(unlikely(!msq->rd_messages || !msq->rd_bytes)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%llu", msq->id);
+ if(likely(!msq->rd_messages)) msq->rd_messages = rrddim_add(st_msq_messages, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if(likely(!msq->rd_bytes)) msq->rd_bytes = rrddim_add(st_msq_bytes, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(st_msq_messages, msq->rd_messages, msq->messages);
+ rrddim_set_by_pointer(st_msq_bytes, msq->rd_bytes, msq->bytes);
+
+ msq->found = 0;
+ }
+ else {
+ rrddim_is_obsolete(st_msq_messages, msq->rd_messages);
+ rrddim_is_obsolete(st_msq_bytes, msq->rd_bytes);
+
+ // remove message queue from the linked list
+ if(!msq_prev)
+ message_queue_root = msq->next;
+ else
+ msq_prev->next = msq->next;
+ freez(msq);
+ msq = NULL;
+ }
+ if(likely(msq)) {
+ msq_prev = msq;
+ msq = msq->next;
+ }
+ else if(!msq_prev)
+ msq = message_queue_root;
+ else
+ msq = msq_prev->next;
+ }
+
+ rrdset_done(st_msq_messages);
+ rrdset_done(st_msq_bytes);
+
+ long long dimensions_num = 0;
+ RRDDIM *rd;
+ rrdset_rdlock(st_msq_messages);
+ rrddim_foreach_read(rd, st_msq_messages) dimensions_num++;
+ rrdset_unlock(st_msq_messages);
+
+ if(unlikely(dimensions_num > dimensions_limit)) {
+ info("Message queue statistics has been disabled");
+ info("There are %lld dimensions in memory but limit was set to %lld", dimensions_num, dimensions_limit);
+ rrdset_is_obsolete(st_msq_messages);
+ rrdset_is_obsolete(st_msq_bytes);
+ st_msq_messages = NULL;
+ st_msq_bytes = NULL;
+ do_msg = CONFIG_BOOLEAN_NO;
+ }
+ else if(unlikely(!message_queue_root)) {
+ info("Making chart %s (%s) obsolete since it does not have any dimensions", st_msq_messages->name, st_msq_messages->id);
+ rrdset_is_obsolete(st_msq_messages);
+ st_msq_messages = NULL;
+
+ info("Making chart %s (%s) obsolete since it does not have any dimensions", st_msq_bytes->name, st_msq_bytes->id);
+ rrdset_is_obsolete(st_msq_bytes);
+ st_msq_bytes = NULL;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_shm != CONFIG_BOOLEAN_NO)) {
+ static RRDSET *st_shm_segments = NULL, *st_shm_bytes = NULL;
+ static RRDDIM *rd_shm_segments = NULL, *rd_shm_bytes = NULL;
+ struct shm_stats shm;
+
+ if(!ipc_shm_get_info(shm_filename, &shm)) {
+ if(unlikely(!st_shm_segments)) {
+ st_shm_segments = rrdset_create_localhost(
+ "system"
+ , "shared_memory_segments"
+ , NULL
+ , "ipc shared memory"
+ , NULL
+ , "IPC Shared Memory Number of Segments"
+ , "segments"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_shm_segments = rrddim_add(st_shm_segments, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_shm_segments);
+
+ rrddim_set_by_pointer(st_shm_segments, rd_shm_segments, shm.segments);
+
+ rrdset_done(st_shm_segments);
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(!st_shm_bytes)) {
+ st_shm_bytes = rrdset_create_localhost(
+ "system"
+ , "shared_memory_bytes"
+ , NULL
+ , "ipc shared memory"
+ , NULL
+ , "IPC Shared Memory Used Bytes"
+ , "bytes"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_shm_bytes = rrddim_add(st_shm_bytes, "bytes", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_shm_bytes);
+
+ rrddim_set_by_pointer(st_shm_bytes, rd_shm_bytes, shm.bytes);
+
+ rrdset_done(st_shm_bytes);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
new file mode 100644
index 0000000..19230c0
--- /dev/null
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+static struct proc_module {
+ const char *name;
+ const char *dim;
+
+ int enabled;
+
+ int (*func)(int update_every, usec_t dt);
+ usec_t duration;
+
+ RRDDIM *rd;
+
+} proc_modules[] = {
+
+ // system metrics
+ { .name = "/proc/stat", .dim = "stat", .func = do_proc_stat },
+ { .name = "/proc/uptime", .dim = "uptime", .func = do_proc_uptime },
+ { .name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg },
+ { .name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail },
+
+ // pressure metrics
+ { .name = "/proc/pressure", .dim = "pressure", .func = do_proc_pressure },
+
+ // CPU metrics
+ { .name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts },
+ { .name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs },
+
+ // memory metrics
+ { .name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat },
+ { .name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo },
+ { .name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm },
+ { .name = "/sys/block/zram", .dim = "zram", .func = do_sys_block_zram },
+ { .name = "/sys/devices/system/edac/mc", .dim = "ecc", .func = do_proc_sys_devices_system_edac_mc },
+ { .name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node },
+ { .name = "/proc/pagetypeinfo", .dim = "pagetypeinfo", .func = do_proc_pagetypeinfo },
+
+ // network metrics
+ { .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev },
+ { .name = "/proc/net/wireless", .dim = "netwireless", .func = do_proc_net_wireless },
+ { .name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat },
+ { .name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6 },
+ { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric
+ { .name = "/proc/net/snmp", .dim = "snmp", .func = do_proc_net_snmp },
+ { .name = "/proc/net/snmp6", .dim = "snmp6", .func = do_proc_net_snmp6 },
+ { .name = "/proc/net/sctp/snmp", .dim = "sctp", .func = do_proc_net_sctp_snmp },
+ { .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat },
+ { .name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats },
+ { .name = "/sys/class/infiniband", .dim = "infiniband", .func = do_sys_class_infiniband },
+
+ // firewall metrics
+ { .name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack },
+ { .name = "/proc/net/stat/synproxy", .dim = "synproxy", .func = do_proc_net_stat_synproxy },
+
+ // disk metrics
+ { .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats },
+ { .name = "/proc/mdstat", .dim = "mdstat", .func = do_proc_mdstat },
+
+ // NFS metrics
+ { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd },
+ { .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs },
+
+ // ZFS metrics
+ { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats },
+
+ // BTRFS metrics
+ { .name = "/sys/fs/btrfs", .dim = "btrfs", .func = do_sys_fs_btrfs },
+
+ // IPC metrics
+ { .name = "ipc", .dim = "ipc", .func = do_ipc },
+
+ // linux power supply metrics
+ { .name = "/sys/class/power_supply", .dim = "power_supply", .func = do_sys_class_power_supply },
+
+ // the terminator of this array
+ { .name = NULL, .dim = NULL, .func = NULL }
+};
+
+static void proc_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *proc_main(void *ptr) {
+ netdata_thread_cleanup_push(proc_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", CONFIG_BOOLEAN_YES);
+
+ config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO);
+
+ // check the enabled status for each module
+ int i;
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+
+ pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES);
+ pm->duration = 0ULL;
+ pm->rd = NULL;
+ }
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ size_t iterations = 0;
+
+ while(!netdata_exit) {
+ iterations++;
+ (void)iterations;
+
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t duration = 0ULL;
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name);
+
+//#ifdef NETDATA_LOG_ALLOCATIONS
+// if(pm->func == do_proc_interrupts)
+// log_thread_memory_allocations = iterations;
+//#endif
+ pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
+ pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration;
+ duration += pm->duration;
+
+//#ifdef NETDATA_LOG_ALLOCATIONS
+// if(pm->func == do_proc_interrupts)
+// log_thread_memory_allocations = 0;
+//#endif
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_find_active_bytype_localhost("netdata", "plugin_proc_modules");
+
+ if(!st) {
+ st = rrdset_create_localhost(
+ "netdata"
+ , "plugin_proc_modules"
+ , NULL
+ , "proc"
+ , NULL
+ , "NetData Proc Plugin Modules Durations"
+ , "milliseconds/run"
+ , "netdata"
+ , "stats"
+ , 132001
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ }
+ else rrdset_next(st);
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ rrddim_set_by_pointer(st, pm->rd, pm->duration);
+ }
+ rrdset_done(st);
+
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+int get_numa_node_count(void)
+{
+ static int numa_node_count = -1;
+
+ if (numa_node_count != -1)
+ return numa_node_count;
+
+ numa_node_count = 0;
+
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(dir) {
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type != DT_DIR)
+ continue;
+
+ if(strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ if(!isdigit(de->d_name[4]))
+ continue;
+
+ numa_node_count++;
+ }
+ closedir(dir);
+ }
+
+ return numa_node_count;
+}
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
new file mode 100644
index 0000000..108c026
--- /dev/null
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_H
+#define NETDATA_PLUGIN_PROC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC \
+ { \
+ .name = "PLUGIN[proc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "proc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = proc_main \
+ },
+
+
+#define PLUGIN_PROC_CONFIG_NAME "proc"
+#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin"
+
+extern void *proc_main(void *ptr);
+
+extern int do_proc_net_dev(int update_every, usec_t dt);
+extern int do_proc_net_wireless(int update_every, usec_t dt);
+extern int do_proc_diskstats(int update_every, usec_t dt);
+extern int do_proc_mdstat(int update_every, usec_t dt);
+extern int do_proc_net_snmp(int update_every, usec_t dt);
+extern int do_proc_net_snmp6(int update_every, usec_t dt);
+extern int do_proc_net_netstat(int update_every, usec_t dt);
+extern int do_proc_net_stat_conntrack(int update_every, usec_t dt);
+extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt);
+extern int do_proc_stat(int update_every, usec_t dt);
+extern int do_proc_meminfo(int update_every, usec_t dt);
+extern int do_proc_vmstat(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfs(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
+extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
+extern int do_proc_interrupts(int update_every, usec_t dt);
+extern int do_proc_softirqs(int update_every, usec_t dt);
+extern int do_proc_pressure(int update_every, usec_t dt);
+extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
+extern int do_sys_block_zram(int update_every, usec_t dt);
+extern int do_proc_loadavg(int update_every, usec_t dt);
+extern int do_proc_net_stat_synproxy(int update_every, usec_t dt);
+extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
+extern int do_proc_uptime(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
+extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
+extern int do_sys_fs_btrfs(int update_every, usec_t dt);
+extern int do_proc_net_sockstat(int update_every, usec_t dt);
+extern int do_proc_net_sockstat6(int update_every, usec_t dt);
+extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
+extern int do_ipc(int update_every, usec_t dt);
+extern int do_sys_class_power_supply(int update_every, usec_t dt);
+extern int do_proc_pagetypeinfo(int update_every, usec_t dt);
+extern int do_sys_class_infiniband(int update_every, usec_t dt);
+extern int get_numa_node_count(void);
+
+// metrics that need to be shared among data collectors
+extern unsigned long long tcpext_TCPSynRetrans;
+
+// netdev renames
+extern void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels);
+extern void netdev_rename_device_del(const char *host_device);
+
+#include "proc_self_mountinfo.h"
+#include "proc_pressure.h"
+#include "zfs_common.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
new file mode 100644
index 0000000..eee0cbe
--- /dev/null
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_DISK "disk"
+#define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats"
+#define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME
+
+#define DISK_TYPE_UNKNOWN 0
+#define DISK_TYPE_PHYSICAL 1
+#define DISK_TYPE_PARTITION 2
+#define DISK_TYPE_VIRTUAL 3
+
+#define DEFAULT_PREFERRED_IDS "*"
+#define DEFAULT_EXCLUDED_DISKS "loop* ram*"
+
+static struct disk {
+ char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
+ char *device; // the device of the disk (before being looked up)
+ unsigned long major;
+ unsigned long minor;
+ int sector_size;
+ int type;
+
+ char *mount_point;
+
+ // disk options caching
+ int do_io;
+ int do_ops;
+ int do_mops;
+ int do_iotime;
+ int do_qops;
+ int do_util;
+ int do_backlog;
+ int do_bcache;
+
+ int updated;
+
+ int device_is_bcache;
+
+ char *bcache_filename_dirty_data;
+ char *bcache_filename_writeback_rate;
+ char *bcache_filename_cache_congested;
+ char *bcache_filename_cache_available_percent;
+ char *bcache_filename_stats_five_minute_cache_hit_ratio;
+ char *bcache_filename_stats_hour_cache_hit_ratio;
+ char *bcache_filename_stats_day_cache_hit_ratio;
+ char *bcache_filename_stats_total_cache_hit_ratio;
+ char *bcache_filename_stats_total_cache_hits;
+ char *bcache_filename_stats_total_cache_misses;
+ char *bcache_filename_stats_total_cache_miss_collisions;
+ char *bcache_filename_stats_total_cache_bypass_hits;
+ char *bcache_filename_stats_total_cache_bypass_misses;
+ char *bcache_filename_stats_total_cache_readaheads;
+ char *bcache_filename_cache_read_races;
+ char *bcache_filename_cache_io_errors;
+ char *bcache_filename_priority_stats;
+
+ usec_t bcache_priority_stats_update_every_usec;
+ usec_t bcache_priority_stats_elapsed_usec;
+
+ RRDSET *st_io;
+ RRDDIM *rd_io_reads;
+ RRDDIM *rd_io_writes;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_reads;
+ RRDDIM *rd_ops_writes;
+
+ RRDSET *st_qops;
+ RRDDIM *rd_qops_operations;
+
+ RRDSET *st_backlog;
+ RRDDIM *rd_backlog_backlog;
+
+ RRDSET *st_util;
+ RRDDIM *rd_util_utilization;
+
+ RRDSET *st_mops;
+ RRDDIM *rd_mops_reads;
+ RRDDIM *rd_mops_writes;
+
+ RRDSET *st_iotime;
+ RRDDIM *rd_iotime_reads;
+ RRDDIM *rd_iotime_writes;
+
+ RRDSET *st_await;
+ RRDDIM *rd_await_reads;
+ RRDDIM *rd_await_writes;
+
+ RRDSET *st_avgsz;
+ RRDDIM *rd_avgsz_reads;
+ RRDDIM *rd_avgsz_writes;
+
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm_svctm;
+
+ RRDSET *st_bcache_size;
+ RRDDIM *rd_bcache_dirty_size;
+
+ RRDSET *st_bcache_usage;
+ RRDDIM *rd_bcache_available_percent;
+
+ RRDSET *st_bcache_hit_ratio;
+ RRDDIM *rd_bcache_hit_ratio_5min;
+ RRDDIM *rd_bcache_hit_ratio_1hour;
+ RRDDIM *rd_bcache_hit_ratio_1day;
+ RRDDIM *rd_bcache_hit_ratio_total;
+
+ RRDSET *st_bcache;
+ RRDDIM *rd_bcache_hits;
+ RRDDIM *rd_bcache_misses;
+ RRDDIM *rd_bcache_miss_collisions;
+
+ RRDSET *st_bcache_bypass;
+ RRDDIM *rd_bcache_bypass_hits;
+ RRDDIM *rd_bcache_bypass_misses;
+
+ RRDSET *st_bcache_rates;
+ RRDDIM *rd_bcache_rate_congested;
+ RRDDIM *rd_bcache_readaheads;
+ RRDDIM *rd_bcache_rate_writeback;
+
+ RRDSET *st_bcache_cache_allocations;
+ RRDDIM *rd_bcache_cache_allocations_unused;
+ RRDDIM *rd_bcache_cache_allocations_clean;
+ RRDDIM *rd_bcache_cache_allocations_dirty;
+ RRDDIM *rd_bcache_cache_allocations_metadata;
+ RRDDIM *rd_bcache_cache_allocations_unknown;
+
+ RRDSET *st_bcache_cache_read_races;
+ RRDDIM *rd_bcache_cache_read_races;
+ RRDDIM *rd_bcache_cache_io_errors;
+
+ struct disk *next;
+} *disk_root = NULL;
+
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+// static char *path_to_get_hw_sector_size = NULL;
+// static char *path_to_get_hw_sector_size_partitions = NULL;
+static char *path_to_sys_dev_block_major_minor_string = NULL;
+static char *path_to_sys_block_device = NULL;
+static char *path_to_sys_block_device_bcache = NULL;
+static char *path_to_sys_devices_virtual_block_device = NULL;
+static char *path_to_device_mapper = NULL;
+static char *path_to_device_label = NULL;
+static char *path_to_device_id = NULL;
+static char *path_to_veritas_volume_groups = NULL;
+static int name_disks_by_id = CONFIG_BOOLEAN_NO;
+static int global_bcache_priority_stats_update_every = 0; // disabled by default
+
+static int global_enable_new_disks_detected_at_runtime = CONFIG_BOOLEAN_YES,
+ global_enable_performance_for_physical_disks = CONFIG_BOOLEAN_AUTO,
+ global_enable_performance_for_virtual_disks = CONFIG_BOOLEAN_AUTO,
+ global_enable_performance_for_partitions = CONFIG_BOOLEAN_NO,
+ global_do_io = CONFIG_BOOLEAN_AUTO,
+ global_do_ops = CONFIG_BOOLEAN_AUTO,
+ global_do_mops = CONFIG_BOOLEAN_AUTO,
+ global_do_iotime = CONFIG_BOOLEAN_AUTO,
+ global_do_qops = CONFIG_BOOLEAN_AUTO,
+ global_do_util = CONFIG_BOOLEAN_AUTO,
+ global_do_backlog = CONFIG_BOOLEAN_AUTO,
+ global_do_bcache = CONFIG_BOOLEAN_AUTO,
+ globals_initialized = 0,
+ global_cleanup_removed_disks = 1;
+
+static SIMPLE_PATTERN *preferred_ids = NULL;
+static SIMPLE_PATTERN *excluded_disks = NULL;
+
+static unsigned long long int bcache_read_number_with_units(const char *filename) {
+ char buffer[50 + 1];
+ if(read_file(filename, buffer, 50) == 0) {
+ static int unknown_units_error = 10;
+
+ char *end = NULL;
+ long double value = str2ld(buffer, &end);
+ if(end && *end) {
+ if(*end == 'k')
+ return (unsigned long long int)(value * 1024.0);
+ else if(*end == 'M')
+ return (unsigned long long int)(value * 1024.0 * 1024.0);
+ else if(*end == 'G')
+ return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0);
+ else if(*end == 'T')
+ return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0 * 1024.0);
+ else if(unknown_units_error > 0) {
+ error("bcache file '%s' provides value '%s' with unknown units '%s'", filename, buffer, end);
+ unknown_units_error--;
+ }
+ }
+
+ return (unsigned long long int)value;
+ }
+
+ return 0;
+}
+
+void bcache_read_priority_stats(struct disk *d, const char *family, int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static char *separators = " \t:%[]";
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long unused;
+ static unsigned long long clean;
+ static unsigned long long dirty;
+ static unsigned long long metadata;
+ static unsigned long long unknown;
+
+ // check if it is time to update this metric
+ d->bcache_priority_stats_elapsed_usec += dt;
+ if(likely(d->bcache_priority_stats_elapsed_usec < d->bcache_priority_stats_update_every_usec)) return;
+ d->bcache_priority_stats_elapsed_usec = 0;
+
+ // initialize ARL
+ if(unlikely(!arl_base)) {
+ arl_base = arl_create("bcache/priority_stats", NULL, 60);
+ arl_expect(arl_base, "Unused", &unused);
+ arl_expect(arl_base, "Clean", &clean);
+ arl_expect(arl_base, "Dirty", &dirty);
+ arl_expect(arl_base, "Metadata", &metadata);
+ }
+
+ ff = procfile_reopen(ff, d->bcache_filename_priority_stats, separators, PROCFILE_FLAG_DEFAULT);
+ if(likely(ff)) ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ separators = " \t:%[]";
+ return;
+ }
+
+ // do not reset the separators on every iteration
+ separators = NULL;
+
+ arl_begin(arl_base);
+ unused = clean = dirty = metadata = unknown = 0;
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read '%s' line %zu. Expected 2 params, read %zu.", d->bcache_filename_priority_stats, l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ unknown = 100 - unused - clean - dirty - metadata;
+
+ // create / update the cache allocations chart
+ {
+ if(unlikely(!d->st_bcache_cache_allocations)) {
+ d->st_bcache_cache_allocations = rrdset_create_localhost(
+ "disk_bcache_cache_alloc"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_cache_alloc"
+ , "BCache Cache Allocations"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ d->rd_bcache_cache_allocations_unused = rrddim_add(d->st_bcache_cache_allocations, "unused", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_dirty = rrddim_add(d->st_bcache_cache_allocations, "dirty", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_clean = rrddim_add(d->st_bcache_cache_allocations, "clean", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_metadata = rrddim_add(d->st_bcache_cache_allocations, "metadata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_unknown = rrddim_add(d->st_bcache_cache_allocations, "undefined", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ d->bcache_priority_stats_update_every_usec = update_every * USEC_PER_SEC;
+ }
+ else rrdset_next(d->st_bcache_cache_allocations);
+
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unused, unused);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_dirty, dirty);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_clean, clean);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_metadata, metadata);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unknown, unknown);
+ rrdset_done(d->st_bcache_cache_allocations);
+ }
+}
+
+static inline int is_major_enabled(int major) {
+ static int8_t *major_configs = NULL;
+ static size_t major_size = 0;
+
+ if(major < 0) return 1;
+
+ size_t wanted_size = (size_t)major + 1;
+
+ if(major_size < wanted_size) {
+ major_configs = reallocz(major_configs, wanted_size * sizeof(int8_t));
+
+ size_t i;
+ for(i = major_size; i < wanted_size ; i++)
+ major_configs[i] = -1;
+
+ major_size = wanted_size;
+ }
+
+ if(major_configs[major] == -1) {
+ char buffer[CONFIG_MAX_NAME + 1];
+ snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major);
+ major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, buffer, 1);
+ }
+
+ return (int)major_configs[major];
+}
+
+static inline int get_disk_name_from_path(const char *path, char *result, size_t result_size, unsigned long major, unsigned long minor, char *disk, char *prefix, int depth) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): examining directory '%s' (allowed depth %d).", disk, major, minor, path, depth);
+
+ int found = 0, preferred = 0;
+
+ char *first_result = mallocz(result_size);
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path);
+ goto failed;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if(de->d_type == DT_DIR) {
+ if((de->d_name[0] == '.' && de->d_name[1] == '\0') || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0'))
+ continue;
+
+ if(depth <= 0) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Depth limit reached for path '%s/%s'. Ignoring path.", disk, major, minor, path, de->d_name);
+ break;
+ }
+ else {
+ char *path_nested = NULL;
+ char *prefix_nested = NULL;
+
+ {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/%s", path, de->d_name);
+ path_nested = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
+ prefix_nested = strdupz(buffer);
+ }
+
+ found = get_disk_name_from_path(path_nested, result, result_size, major, minor, disk, prefix_nested, depth - 1);
+ freez(path_nested);
+ freez(prefix_nested);
+
+ if(found) break;
+ }
+ }
+ else if(de->d_type == DT_LNK || de->d_type == DT_BLK) {
+ char filename[FILENAME_MAX + 1];
+
+ if(de->d_type == DT_LNK) {
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ ssize_t len = readlink(filename, result, result_size - 1);
+ if(len <= 0) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ result[len] = '\0';
+ if(result[0] != '/')
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, result);
+ else
+ strncpyz(filename, result, FILENAME_MAX);
+ }
+ else {
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ }
+
+ struct stat sb;
+ if(stat(filename, &sb) == -1) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFBLK) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename);
+ continue;
+ }
+
+ if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev));
+ continue;
+ }
+
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename);
+
+ snprintfz(result, result_size - 1, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
+
+ if(!found) {
+ strncpyz(first_result, result, result_size);
+ found = 1;
+ }
+
+ if(simple_pattern_matches(preferred_ids, result)) {
+ preferred = 1;
+ break;
+ }
+ }
+ }
+ closedir(dir);
+
+
+failed:
+
+ if(!found)
+ result[0] = '\0';
+ else if(!preferred)
+ strncpyz(result, first_result, result_size);
+
+ freez(first_result);
+
+ return found;
+}
+
+static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) {
+ char result[FILENAME_MAX + 1] = "";
+
+ if(!path_to_device_mapper || !*path_to_device_mapper || !get_disk_name_from_path(path_to_device_mapper, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ if(!path_to_device_label || !*path_to_device_label || !get_disk_name_from_path(path_to_device_label, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ if(!path_to_veritas_volume_groups || !*path_to_veritas_volume_groups || !get_disk_name_from_path(path_to_veritas_volume_groups, result, FILENAME_MAX + 1, major, minor, disk, "vx", 2))
+ if(name_disks_by_id != CONFIG_BOOLEAN_YES || !path_to_device_id || !*path_to_device_id || !get_disk_name_from_path(path_to_device_id, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ strncpy(result, disk, FILENAME_MAX);
+
+ if(!result[0])
+ strncpy(result, disk, FILENAME_MAX);
+
+ netdata_fix_chart_name(result);
+ return strdup(result);
+}
+
+static void get_disk_config(struct disk *d) {
+ int def_enable = global_enable_new_disks_detected_at_runtime;
+
+ if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk)))
+ def_enable = CONFIG_BOOLEAN_NO;
+
+ char var_name[4096 + 1];
+ snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk);
+
+ def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
+ if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
+ // the user does not want any metrics for this disk
+ d->do_io = CONFIG_BOOLEAN_NO;
+ d->do_ops = CONFIG_BOOLEAN_NO;
+ d->do_mops = CONFIG_BOOLEAN_NO;
+ d->do_iotime = CONFIG_BOOLEAN_NO;
+ d->do_qops = CONFIG_BOOLEAN_NO;
+ d->do_util = CONFIG_BOOLEAN_NO;
+ d->do_backlog = CONFIG_BOOLEAN_NO;
+ d->do_bcache = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // this disk is enabled
+ // check its direct settings
+
+ int def_performance = CONFIG_BOOLEAN_AUTO;
+
+ // since this is 'on demand' we can figure the performance settings
+ // based on the type of disk
+
+ if(!d->device_is_bcache) {
+ switch(d->type) {
+ default:
+ case DISK_TYPE_UNKNOWN:
+ break;
+
+ case DISK_TYPE_PHYSICAL:
+ def_performance = global_enable_performance_for_physical_disks;
+ break;
+
+ case DISK_TYPE_PARTITION:
+ def_performance = global_enable_performance_for_partitions;
+ break;
+
+ case DISK_TYPE_VIRTUAL:
+ def_performance = global_enable_performance_for_virtual_disks;
+ break;
+ }
+ }
+
+ // check if we have to disable performance for this disk
+ if(def_performance)
+ def_performance = is_major_enabled((int)d->major);
+
+ // ------------------------------------------------------------
+ // now we have def_performance and def_space
+ // to work further
+
+ // def_performance
+ // check the user configuration (this will also show our 'on demand' decision)
+ def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
+
+ int ddo_io = CONFIG_BOOLEAN_NO,
+ ddo_ops = CONFIG_BOOLEAN_NO,
+ ddo_mops = CONFIG_BOOLEAN_NO,
+ ddo_iotime = CONFIG_BOOLEAN_NO,
+ ddo_qops = CONFIG_BOOLEAN_NO,
+ ddo_util = CONFIG_BOOLEAN_NO,
+ ddo_backlog = CONFIG_BOOLEAN_NO,
+ ddo_bcache = CONFIG_BOOLEAN_NO;
+
+ // we enable individual performance charts only when def_performance is not disabled
+ if(unlikely(def_performance != CONFIG_BOOLEAN_NO)) {
+ ddo_io = global_do_io,
+ ddo_ops = global_do_ops,
+ ddo_mops = global_do_mops,
+ ddo_iotime = global_do_iotime,
+ ddo_qops = global_do_qops,
+ ddo_util = global_do_util,
+ ddo_backlog = global_do_backlog,
+ ddo_bcache = global_do_bcache;
+ }
+
+ d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
+ d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
+ d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
+ d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
+ d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
+ d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
+ d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
+
+ if(d->device_is_bcache)
+ d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache);
+ else
+ d->do_bcache = 0;
+ }
+}
+
+static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) {
+ static struct mountinfo *disk_mountinfo_root = NULL;
+
+ struct disk *d;
+
+ // search for it in our RAM list.
+ // this is sequential, but since we just walk through
+ // and the number of disks / partitions in a system
+ // should not be that many, it should be acceptable
+ for(d = disk_root; d ; d = d->next)
+ if(unlikely(d->major == major && d->minor == minor))
+ return d;
+
+ // not found
+ // create a new disk structure
+ d = (struct disk *)callocz(1, sizeof(struct disk));
+
+ d->disk = get_disk_name(major, minor, disk);
+ d->device = strdupz(disk);
+ d->major = major;
+ d->minor = minor;
+ d->type = DISK_TYPE_UNKNOWN; // Default type. Changed later if not correct.
+ d->sector_size = 512; // the default, will be changed below
+ d->next = NULL;
+
+ // append it to the list
+ if(unlikely(!disk_root))
+ disk_root = d;
+ else {
+ struct disk *last;
+ for(last = disk_root; last->next ;last = last->next);
+ last->next = d;
+ }
+
+ char buffer[FILENAME_MAX + 1];
+
+ // find if it is a physical disk
+ // by checking if /sys/block/DISK is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device, disk);
+ if(likely(access(buffer, R_OK) == 0)) {
+ // assign it here, but it will be overwritten if it is not a physical disk
+ d->type = DISK_TYPE_PHYSICAL;
+ }
+
+ // find if it is a partition
+ // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "partition");
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->type = DISK_TYPE_PARTITION;
+ }
+ else {
+ // find if it is a virtual disk
+ // by checking if /sys/devices/virtual/block/DISK is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk);
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->type = DISK_TYPE_VIRTUAL;
+ }
+ else {
+ // find if it is a virtual device
+ // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "slaves/");
+ DIR *dirp = opendir(buffer);
+ if (likely(dirp != NULL)) {
+ struct dirent *dp;
+ while ((dp = readdir(dirp))) {
+ // . and .. are also files in empty folders.
+ if (unlikely(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0)) {
+ continue;
+ }
+
+ d->type = DISK_TYPE_VIRTUAL;
+
+ // Stop the loop after we found one file.
+ break;
+ }
+ if (unlikely(closedir(dirp) == -1))
+ error("Unable to close dir %s", buffer);
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // check if we can find its mount point
+
+ // mountinfo_find() can be called with NULL disk_mountinfo_root
+ struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
+ if(unlikely(!mi)) {
+ // mountinfo_free_all can be called with NULL
+ mountinfo_free_all(disk_mountinfo_root);
+ disk_mountinfo_root = mountinfo_read(0);
+ mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
+ }
+
+ if(unlikely(mi))
+ d->mount_point = strdupz(mi->mount_point);
+ else
+ d->mount_point = NULL;
+
+ // ------------------------------------------------------------------------
+ // find the disk sector size
+
+ /*
+ * sector size is always 512 bytes inside the kernel #3481
+ *
+ {
+ char tf[FILENAME_MAX + 1], *t;
+ strncpyz(tf, d->device, FILENAME_MAX);
+
+ // replace all / with !
+ for(t = tf; *t ;t++)
+ if(unlikely(*t == '/')) *t = '!';
+
+ if(likely(d->type == DISK_TYPE_PARTITION))
+ snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf);
+ else
+ snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf);
+
+ FILE *fpss = fopen(buffer, "r");
+ if(likely(fpss)) {
+ char buffer2[1024 + 1];
+ char *tmp = fgets(buffer2, 1024, fpss);
+
+ if(likely(tmp)) {
+ d->sector_size = str2i(tmp);
+ if(unlikely(d->sector_size <= 0)) {
+ error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer);
+ d->sector_size = 512;
+ }
+ }
+ else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer);
+
+ fclose(fpss);
+ }
+ else error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer);
+ }
+ */
+
+ // ------------------------------------------------------------------------
+ // check if the device is a bcache
+
+ struct stat bcache;
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device_bcache, disk);
+ if(unlikely(stat(buffer, &bcache) == 0 && (bcache.st_mode & S_IFMT) == S_IFDIR)) {
+ // we have the 'bcache' directory
+ d->device_is_bcache = 1;
+
+ char buffer2[FILENAME_MAX + 1];
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/congested", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_congested = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/readahead", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_readaheads = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/priority_stats", buffer); // only one cache is supported by bcache
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_priority_stats = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/internal/cache_read_races", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_read_races = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/io_errors", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_io_errors = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/dirty_data", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_dirty_data = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/writeback_rate", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_writeback_rate = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache_available_percent", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_available_percent = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hits", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_hits = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_five_minute/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_five_minute_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_hour/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_hour_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_day/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_day_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_misses", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_misses = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_hits", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_bypass_hits = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_misses", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_bypass_misses = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_miss_collisions", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_miss_collisions = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+ }
+
+ get_disk_config(d);
+ return d;
+}
+
+int do_proc_diskstats(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+
+ if(unlikely(!globals_initialized)) {
+ globals_initialized = 1;
+
+ global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
+ global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks);
+ global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
+ global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions);
+
+ global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bandwidth for all disks", global_do_io);
+ global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "operations for all disks", global_do_ops);
+ global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "merged operations for all disks", global_do_mops);
+ global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "i/o time for all disks", global_do_iotime);
+ global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "queued operations for all disks", global_do_qops);
+ global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "utilization percentage for all disks", global_do_util);
+ global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog);
+ global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache);
+ global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
+
+ global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
+
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s");
+ path_to_sys_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/bcache");
+ path_to_sys_block_device_bcache = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device bcache", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/block/%s");
+ path_to_sys_devices_virtual_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get virtual block device", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
+ path_to_sys_dev_block_major_minor_string = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device infos", buffer);
+
+ //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
+ //path_to_get_hw_sector_size = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size", buffer);
+
+ //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
+ //path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size for partitions", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix);
+ path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix);
+ path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-id", netdata_configured_host_prefix);
+ path_to_device_id = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-id", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/vx/dsk", netdata_configured_host_prefix);
+ path_to_veritas_volume_groups = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/vx/dsk", buffer);
+
+ name_disks_by_id = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "name disks by id", name_disks_by_id);
+
+ preferred_ids = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "preferred disk ids", DEFAULT_PREFERRED_IDS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(unlikely(!ff)) return 0;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ collected_number system_read_kb = 0, system_write_kb = 0;
+
+ for(l = 0; l < lines ;l++) {
+ // --------------------------------------------------------------------------
+ // Read parameters
+
+ char *disk;
+ unsigned long major = 0, minor = 0;
+
+ collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0,
+ writes = 0, mwrites = 0, writesectors = 0, writems = 0,
+ queued_ios = 0, busy_ms = 0, backlog_ms = 0;
+
+ collected_number last_reads = 0, last_readsectors = 0, last_readms = 0,
+ last_writes = 0, last_writesectors = 0, last_writems = 0,
+ last_busy_ms = 0;
+
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 14)) continue;
+
+ major = str2ul(procfile_lineword(ff, l, 0));
+ minor = str2ul(procfile_lineword(ff, l, 1));
+ disk = procfile_lineword(ff, l, 2);
+
+ // # of reads completed # of writes completed
+ // This is the total number of reads or writes completed successfully.
+ reads = str2ull(procfile_lineword(ff, l, 3)); // rd_ios
+ writes = str2ull(procfile_lineword(ff, l, 7)); // wr_ios
+
+ // # of reads merged # of writes merged
+ // Reads and writes which are adjacent to each other may be merged for
+ // efficiency. Thus two 4K reads may become one 8K read before it is
+ // ultimately handed to the disk, and so it will be counted (and queued)
+ mreads = str2ull(procfile_lineword(ff, l, 4)); // rd_merges_or_rd_sec
+ mwrites = str2ull(procfile_lineword(ff, l, 8)); // wr_merges
+
+ // # of sectors read # of sectors written
+ // This is the total number of sectors read or written successfully.
+ readsectors = str2ull(procfile_lineword(ff, l, 5)); // rd_sec_or_wr_ios
+ writesectors = str2ull(procfile_lineword(ff, l, 9)); // wr_sec
+
+ // # of milliseconds spent reading # of milliseconds spent writing
+ // This is the total number of milliseconds spent by all reads or writes (as
+ // measured from __make_request() to end_that_request_last()).
+ readms = str2ull(procfile_lineword(ff, l, 6)); // rd_ticks_or_wr_sec
+ writems = str2ull(procfile_lineword(ff, l, 10)); // wr_ticks
+
+ // # of I/Os currently in progress
+ // The only field that should go to zero. Incremented as requests are
+ // given to appropriate struct request_queue and decremented as they finish.
+ queued_ios = str2ull(procfile_lineword(ff, l, 11)); // ios_pgr
+
+ // # of milliseconds spent doing I/Os
+ // This field increases so long as field queued_ios is nonzero.
+ busy_ms = str2ull(procfile_lineword(ff, l, 12)); // tot_ticks
+
+ // weighted # of milliseconds spent doing I/Os
+ // This field is incremented at each I/O start, I/O completion, I/O
+ // merge, or read of these stats by the number of I/Os in progress
+ // (field queued_ios) times the number of milliseconds spent doing I/O since the
+ // last update of this field. This can provide an easy measure of both
+ // I/O completion time and the backlog that may be accumulating.
+ backlog_ms = str2ull(procfile_lineword(ff, l, 13)); // rq_ticks
+
+
+ // --------------------------------------------------------------------------
+ // remove slashes from disk names
+ char *s;
+ for(s = disk; *s ;s++)
+ if(*s == '/') *s = '_';
+
+ // --------------------------------------------------------------------------
+ // get a disk structure for the disk
+
+ struct disk *d = get_disk(major, minor, disk);
+ d->updated = 1;
+
+ // --------------------------------------------------------------------------
+ // count the global system disk I/O of physical disks
+
+ if(unlikely(d->type == DISK_TYPE_PHYSICAL)) {
+ system_read_kb += readsectors * d->sector_size / 1024;
+ system_write_kb += writesectors * d->sector_size / 1024;
+ }
+
+ // --------------------------------------------------------------------------
+ // Set its family based on mount point
+
+ char *family = d->mount_point;
+ if(!family) family = d->disk;
+
+
+ // --------------------------------------------------------------------------
+ // Do performance metrics
+
+ if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO &&
+ (readsectors || writesectors ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_io = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_io)) {
+ d->st_io = rrdset_create_localhost(
+ RRD_TYPE_DISK
+ , d->device
+ , d->disk
+ , family
+ , "disk.io"
+ , "Disk I/O Bandwidth"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_io);
+
+ last_readsectors = rrddim_set_by_pointer(d->st_io, d->rd_io_reads, readsectors);
+ last_writesectors = rrddim_set_by_pointer(d->st_io, d->rd_io_writes, writesectors);
+ rrdset_done(d->st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_ops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_ops)) {
+ d->st_ops = rrdset_create_localhost(
+ "disk_ops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.ops"
+ , "Disk Completed I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL);
+
+ d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_ops);
+
+ last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads);
+ last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes);
+ rrdset_done(d->st_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO &&
+ (queued_ios || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_qops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_qops)) {
+ d->st_qops = rrdset_create_localhost(
+ "disk_qops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.qops"
+ , "Disk Current I/O Operations"
+ , "operations"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_QOPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL);
+
+ d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_qops);
+
+ rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios);
+ rrdset_done(d->st_qops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO &&
+ (backlog_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_backlog = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_backlog)) {
+ d->st_backlog = rrdset_create_localhost(
+ "disk_backlog"
+ , d->device
+ , d->disk
+ , family
+ , "disk.backlog"
+ , "Disk Backlog"
+ , "milliseconds"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_BACKLOG
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL);
+
+ d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_backlog);
+
+ rrddim_set_by_pointer(d->st_backlog, d->rd_backlog_backlog, backlog_ms);
+ rrdset_done(d->st_backlog);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO &&
+ (busy_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_util = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_util)) {
+ d->st_util = rrdset_create_localhost(
+ "disk_util"
+ , d->device
+ , d->disk
+ , family
+ , "disk.util"
+ , "Disk Utilization Time"
+ , "% of time working"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_UTIL
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL);
+
+ d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_util);
+
+ last_busy_ms = rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, busy_ms);
+ rrdset_done(d->st_util);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO &&
+ (mreads || mwrites || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_mops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_mops)) {
+ d->st_mops = rrdset_create_localhost(
+ "disk_mops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.mops"
+ , "Disk Merged Operations"
+ , "merged operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_MOPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL);
+
+ d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_mops);
+
+ rrddim_set_by_pointer(d->st_mops, d->rd_mops_reads, mreads);
+ rrddim_set_by_pointer(d->st_mops, d->rd_mops_writes, mwrites);
+ rrdset_done(d->st_mops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO &&
+ (readms || writems || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->do_iotime = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_iotime)) {
+ d->st_iotime = rrdset_create_localhost(
+ "disk_iotime"
+ , d->device
+ , d->disk
+ , family
+ , "disk.iotime"
+ , "Disk Total I/O Time"
+ , "milliseconds/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_IOTIME
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL);
+
+ d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_iotime);
+
+ last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms);
+ last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems);
+ rrdset_done(d->st_iotime);
+ }
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if(likely(dt)) {
+ if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO &&
+ (readms || writems ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (reads || writes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
+
+ if(unlikely(!d->st_await)) {
+ d->st_await = rrdset_create_localhost(
+ "disk_await"
+ , d->device
+ , d->disk
+ , family
+ , "disk.await"
+ , "Average Completed I/O Operation Time"
+ , "milliseconds/operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_AWAIT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL);
+
+ d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_await);
+
+ rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0);
+ rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0);
+ rrdset_done(d->st_await);
+ }
+
+ if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO &&
+ (readsectors || writesectors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
+
+ if(unlikely(!d->st_avgsz)) {
+ d->st_avgsz = rrdset_create_localhost(
+ "disk_avgsz"
+ , d->device
+ , d->disk
+ , family
+ , "disk.avgsz"
+ , "Average Completed I/O Operation Bandwidth"
+ , "KiB/operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_AVGSZ
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL);
+
+ d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_avgsz);
+
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0);
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0);
+ rrdset_done(d->st_avgsz);
+ }
+
+ if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO &&
+ (busy_ms ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (reads || writes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
+
+ if(unlikely(!d->st_svctm)) {
+ d->st_svctm = rrdset_create_localhost(
+ "disk_svctm"
+ , d->device
+ , d->disk
+ , family
+ , "disk.svctm"
+ , "Average Service Time"
+ , "milliseconds/operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_SVCTM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL);
+
+ d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_svctm);
+
+ rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0);
+ rrdset_done(d->st_svctm);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // read bcache metrics and generate the bcache charts
+
+ if(d->device_is_bcache && d->do_bcache != CONFIG_BOOLEAN_NO) {
+ unsigned long long int
+ stats_total_cache_bypass_hits = 0,
+ stats_total_cache_bypass_misses = 0,
+ stats_total_cache_hits = 0,
+ stats_total_cache_miss_collisions = 0,
+ stats_total_cache_misses = 0,
+ stats_five_minute_cache_hit_ratio = 0,
+ stats_hour_cache_hit_ratio = 0,
+ stats_day_cache_hit_ratio = 0,
+ stats_total_cache_hit_ratio = 0,
+ cache_available_percent = 0,
+ cache_readaheads = 0,
+ cache_read_races = 0,
+ cache_io_errors = 0,
+ cache_congested = 0,
+ dirty_data = 0,
+ writeback_rate = 0;
+
+ // read the bcache values
+
+ if(d->bcache_filename_dirty_data)
+ dirty_data = bcache_read_number_with_units(d->bcache_filename_dirty_data);
+
+ if(d->bcache_filename_writeback_rate)
+ writeback_rate = bcache_read_number_with_units(d->bcache_filename_writeback_rate);
+
+ if(d->bcache_filename_cache_congested)
+ cache_congested = bcache_read_number_with_units(d->bcache_filename_cache_congested);
+
+ if(d->bcache_filename_cache_available_percent)
+ read_single_number_file(d->bcache_filename_cache_available_percent, &cache_available_percent);
+
+ if(d->bcache_filename_stats_five_minute_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_five_minute_cache_hit_ratio, &stats_five_minute_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_hour_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_hour_cache_hit_ratio, &stats_hour_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_day_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_day_cache_hit_ratio, &stats_day_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_total_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_total_cache_hit_ratio, &stats_total_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_total_cache_hits)
+ read_single_number_file(d->bcache_filename_stats_total_cache_hits, &stats_total_cache_hits);
+
+ if(d->bcache_filename_stats_total_cache_misses)
+ read_single_number_file(d->bcache_filename_stats_total_cache_misses, &stats_total_cache_misses);
+
+ if(d->bcache_filename_stats_total_cache_miss_collisions)
+ read_single_number_file(d->bcache_filename_stats_total_cache_miss_collisions, &stats_total_cache_miss_collisions);
+
+ if(d->bcache_filename_stats_total_cache_bypass_hits)
+ read_single_number_file(d->bcache_filename_stats_total_cache_bypass_hits, &stats_total_cache_bypass_hits);
+
+ if(d->bcache_filename_stats_total_cache_bypass_misses)
+ read_single_number_file(d->bcache_filename_stats_total_cache_bypass_misses, &stats_total_cache_bypass_misses);
+
+ if(d->bcache_filename_stats_total_cache_readaheads)
+ cache_readaheads = bcache_read_number_with_units(d->bcache_filename_stats_total_cache_readaheads);
+
+ if(d->bcache_filename_cache_read_races)
+ read_single_number_file(d->bcache_filename_cache_read_races, &cache_read_races);
+
+ if(d->bcache_filename_cache_io_errors)
+ read_single_number_file(d->bcache_filename_cache_io_errors, &cache_io_errors);
+
+ if(d->bcache_filename_priority_stats && global_bcache_priority_stats_update_every >= 1)
+ bcache_read_priority_stats(d, family, global_bcache_priority_stats_update_every, dt);
+
+ // update the charts
+
+ {
+ if(unlikely(!d->st_bcache_hit_ratio)) {
+ d->st_bcache_hit_ratio = rrdset_create_localhost(
+ "disk_bcache_hit_ratio"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_hit_ratio"
+ , "BCache Cache Hit Ratio"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_HIT_RATIO
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_bcache_hit_ratio_5min = rrddim_add(d->st_bcache_hit_ratio, "5min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_1hour = rrddim_add(d->st_bcache_hit_ratio, "1hour", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_1day = rrddim_add(d->st_bcache_hit_ratio, "1day", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_total = rrddim_add(d->st_bcache_hit_ratio, "ever", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_hit_ratio);
+
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_5min, stats_five_minute_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1hour, stats_hour_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1day, stats_day_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_total, stats_total_cache_hit_ratio);
+ rrdset_done(d->st_bcache_hit_ratio);
+ }
+
+ {
+
+ if(unlikely(!d->st_bcache_rates)) {
+ d->st_bcache_rates = rrdset_create_localhost(
+ "disk_bcache_rates"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_rates"
+ , "BCache Rates"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_RATES
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_rate_congested = rrddim_add(d->st_bcache_rates, "congested", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_rate_writeback = rrddim_add(d->st_bcache_rates, "writeback", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_rates);
+
+ rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_writeback, writeback_rate);
+ rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_congested, cache_congested);
+ rrdset_done(d->st_bcache_rates);
+ }
+
+ {
+ if(unlikely(!d->st_bcache_size)) {
+ d->st_bcache_size = rrdset_create_localhost(
+ "disk_bcache_size"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_size"
+ , "BCache Cache Sizes"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_dirty_size = rrddim_add(d->st_bcache_size, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_size);
+
+ rrddim_set_by_pointer(d->st_bcache_size, d->rd_bcache_dirty_size, dirty_data);
+ rrdset_done(d->st_bcache_size);
+ }
+
+ {
+ if(unlikely(!d->st_bcache_usage)) {
+ d->st_bcache_usage = rrdset_create_localhost(
+ "disk_bcache_usage"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_usage"
+ , "BCache Cache Usage"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_USAGE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_available_percent = rrddim_add(d->st_bcache_usage, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_usage);
+
+ rrddim_set_by_pointer(d->st_bcache_usage, d->rd_bcache_available_percent, cache_available_percent);
+ rrdset_done(d->st_bcache_usage);
+ }
+
+ {
+
+ if(unlikely(!d->st_bcache_cache_read_races)) {
+ d->st_bcache_cache_read_races = rrdset_create_localhost(
+ "disk_bcache_cache_read_races"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_cache_read_races"
+ , "BCache Cache Read Races"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_bcache_cache_read_races = rrddim_add(d->st_bcache_cache_read_races, "races", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_cache_io_errors = rrddim_add(d->st_bcache_cache_read_races, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache_cache_read_races);
+
+ rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_read_races, cache_read_races);
+ rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_io_errors, cache_io_errors);
+ rrdset_done(d->st_bcache_cache_read_races);
+ }
+
+ if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO &&
+ (stats_total_cache_hits ||
+ stats_total_cache_misses ||
+ stats_total_cache_miss_collisions ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+
+ if(unlikely(!d->st_bcache)) {
+ d->st_bcache = rrdset_create_localhost(
+ "disk_bcache"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache"
+ , "BCache Cache I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL);
+
+ d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_readaheads = rrddim_add(d->st_bcache, "readaheads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache);
+
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_hits, stats_total_cache_hits);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_misses, stats_total_cache_misses);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_miss_collisions, stats_total_cache_miss_collisions);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_readaheads, cache_readaheads);
+ rrdset_done(d->st_bcache);
+ }
+
+ if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO &&
+ (stats_total_cache_bypass_hits ||
+ stats_total_cache_bypass_misses ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+
+ if(unlikely(!d->st_bcache_bypass)) {
+ d->st_bcache_bypass = rrdset_create_localhost(
+ "disk_bcache_bypass"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_bypass"
+ , "BCache Cache Bypass I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_BYPASS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL);
+
+ d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache_bypass);
+
+ rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_hits, stats_total_cache_bypass_hits);
+ rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_misses, stats_total_cache_bypass_misses);
+ rrdset_done(d->st_bcache_bypass);
+ }
+ }
+ }
+
+
+ // ------------------------------------------------------------------------
+ // update the system total I/O
+
+ if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO &&
+ (system_read_kb || system_write_kb ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ static RRDSET *st_io = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_io)) {
+ st_io = rrdset_create_localhost(
+ "system"
+ , "io"
+ , NULL
+ , "disk"
+ , NULL
+ , "Disk I/O"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_io);
+
+ rrddim_set_by_pointer(st_io, rd_in, system_read_kb);
+ rrddim_set_by_pointer(st_io, rd_out, system_write_kb);
+ rrdset_done(st_io);
+ }
+
+
+ // ------------------------------------------------------------------------
+ // cleanup removed disks
+
+ struct disk *d = disk_root, *last = NULL;
+ while(d) {
+ if(unlikely(global_cleanup_removed_disks && !d->updated)) {
+ struct disk *t = d;
+
+ rrdset_obsolete_and_pointer_null(d->st_avgsz);
+ rrdset_obsolete_and_pointer_null(d->st_await);
+ rrdset_obsolete_and_pointer_null(d->st_backlog);
+ rrdset_obsolete_and_pointer_null(d->st_io);
+ rrdset_obsolete_and_pointer_null(d->st_iotime);
+ rrdset_obsolete_and_pointer_null(d->st_mops);
+ rrdset_obsolete_and_pointer_null(d->st_ops);
+ rrdset_obsolete_and_pointer_null(d->st_qops);
+ rrdset_obsolete_and_pointer_null(d->st_svctm);
+ rrdset_obsolete_and_pointer_null(d->st_util);
+ rrdset_obsolete_and_pointer_null(d->st_bcache);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_size);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_usage);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio);
+
+ if(d == disk_root) {
+ disk_root = d = d->next;
+ last = NULL;
+ }
+ else if(last) {
+ last->next = d = d->next;
+ }
+
+ freez(t->bcache_filename_dirty_data);
+ freez(t->bcache_filename_writeback_rate);
+ freez(t->bcache_filename_cache_congested);
+ freez(t->bcache_filename_cache_available_percent);
+ freez(t->bcache_filename_stats_five_minute_cache_hit_ratio);
+ freez(t->bcache_filename_stats_hour_cache_hit_ratio);
+ freez(t->bcache_filename_stats_day_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hits);
+ freez(t->bcache_filename_stats_total_cache_misses);
+ freez(t->bcache_filename_stats_total_cache_miss_collisions);
+ freez(t->bcache_filename_stats_total_cache_bypass_hits);
+ freez(t->bcache_filename_stats_total_cache_bypass_misses);
+ freez(t->bcache_filename_stats_total_cache_readaheads);
+ freez(t->bcache_filename_cache_read_races);
+ freez(t->bcache_filename_cache_io_errors);
+ freez(t->bcache_filename_priority_stats);
+
+ freez(t->disk);
+ freez(t->device);
+ freez(t->mount_point);
+ freez(t);
+ }
+ else {
+ d->updated = 0;
+ last = d;
+ d = d->next;
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c
new file mode 100644
index 0000000..73b1171
--- /dev/null
+++ b/collectors/proc.plugin/proc_interrupts.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_INTERRUPTS_NAME "/proc/interrupts"
+#define CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+
+#define MAX_INTERRUPT_NAME 50
+
+struct cpu_interrupt {
+ unsigned long long value;
+ RRDDIM *rd;
+};
+
+struct interrupt {
+ int used;
+ char *id;
+ char name[MAX_INTERRUPT_NAME + 1];
+ RRDDIM *rd;
+ unsigned long long total;
+ struct cpu_interrupt cpu[];
+};
+
+// since each interrupt is variable in size
+// we use this to calculate its record size
+#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
+
+// given a base, get a pointer to each record
+#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
+
+static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
+ static struct interrupt *irrs = NULL;
+ static size_t allocated = 0;
+
+ if(unlikely(lines != allocated)) {
+ size_t l;
+ int c;
+
+ irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
+
+ // reset all interrupt RRDDIM pointers as any line could have shifted
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->rd = NULL;
+ irr->name[0] = '\0';
+ for(c = 0; c < cpus ;c++)
+ irr->cpu[c].rd = NULL;
+ }
+
+ allocated = lines;
+ }
+
+ return irrs;
+}
+
+int do_proc_interrupts(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
+ struct interrupt *irrs = NULL;
+
+ if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
+ do_per_core = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "interrupts per core", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(unlikely(!ff))
+ return 1;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0);
+
+ if(unlikely(!lines)) {
+ error("Cannot read /proc/interrupts, zero lines reported.");
+ return 1;
+ }
+
+ // find how many CPUs are there
+ if(unlikely(cpus == -1)) {
+ uint32_t w;
+ cpus = 0;
+ for(w = 0; w < words ; w++) {
+ if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
+ cpus++;
+ }
+ }
+
+ if(unlikely(!cpus)) {
+ error("PLUGIN: PROC_INTERRUPTS: Cannot find the number of CPUs in /proc/interrupts");
+ return 1;
+ }
+
+ // allocate the size we need;
+ irrs = get_interrupts_array(lines, cpus);
+ irrs[0].used = 0;
+
+ // loop through all lines
+ for(l = 1; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->used = 0;
+ irr->total = 0;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ irr->id = procfile_lineword(ff, l, 0);
+ if(unlikely(!irr->id || !irr->id[0])) continue;
+
+ size_t idlen = strlen(irr->id);
+ if(irr->id[idlen - 1] == ':')
+ irr->id[--idlen] = '\0';
+
+ int c;
+ for(c = 0; c < cpus ;c++) {
+ if(likely((c + 1) < (int)words))
+ irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1)));
+ else
+ irr->cpu[c].value = 0;
+
+ irr->total += irr->cpu[c].value;
+ }
+
+ if(unlikely(isdigit(irr->id[0]) && (uint32_t)(cpus + 2) < words)) {
+ strncpyz(irr->name, procfile_lineword(ff, l, words - 1), MAX_INTERRUPT_NAME);
+ size_t nlen = strlen(irr->name);
+ if(likely(nlen + 1 + idlen <= MAX_INTERRUPT_NAME)) {
+ irr->name[nlen] = '_';
+ strncpyz(&irr->name[nlen + 1], irr->id, MAX_INTERRUPT_NAME - nlen - 1);
+ }
+ else {
+ irr->name[MAX_INTERRUPT_NAME - idlen - 1] = '_';
+ strncpyz(&irr->name[MAX_INTERRUPT_NAME - idlen], irr->id, idlen);
+ }
+ }
+ else {
+ strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
+ }
+
+ irr->used = 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_system_interrupts = NULL;
+ if(unlikely(!st_system_interrupts))
+ st_system_interrupts = rrdset_create_localhost(
+ "system"
+ , "interrupts"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "System interrupts"
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_interrupts);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if(irr->used && irr->total) {
+ // some interrupt may have changed without changing the total number of lines
+ // if the same number of interrupts have been added and removed between two
+ // calls of this function.
+ if(unlikely(!irr->rd || strncmp(irr->rd->name, irr->name, MAX_INTERRUPT_NAME) != 0)) {
+ irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(st_system_interrupts, irr->rd, irr->name);
+
+ // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ int c;
+ for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
+ }
+ }
+
+ rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total);
+ }
+ }
+
+ rrdset_done(st_system_interrupts);
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
+ int c;
+
+ for(c = 0; c < cpus ;c++) {
+ if(unlikely(!core_st[c])) {
+ char id[50+1];
+ snprintfz(id, 50, "cpu%d_interrupts", c);
+
+ char title[100+1];
+ snprintfz(title, 100, "CPU%d Interrupts", c);
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "interrupts"
+ , "cpu.interrupts"
+ , title
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+ , NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(core_st[c]);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
+ if(unlikely(!irr->cpu[c].rd)) {
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
+ }
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
+ }
+ }
+
+ rrdset_done(core_st[c]);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c
new file mode 100644
index 0000000..8b78ecc
--- /dev/null
+++ b/collectors/proc.plugin/proc_loadavg.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_LOADAVG_NAME "/proc/loadavg"
+#define CONFIG_SECTION_PLUGIN_PROC_LOADAVG "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_LOADAVG_NAME
+
+// linux calculates this once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_proc_loadavg(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static int do_loadavg = -1, do_all_processes = -1;
+ static usec_t next_loadavg_dt = 0;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/loadavg");
+
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "filename to monitor", filename), " \t,:|/", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ if(unlikely(do_loadavg == -1)) {
+ do_loadavg = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable load average", 1);
+ do_all_processes = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable total processes", 1);
+ }
+
+ if(unlikely(procfile_lines(ff) < 1)) {
+ error("/proc/loadavg has no lines.");
+ return 1;
+ }
+ if(unlikely(procfile_linewords(ff, 0) < 6)) {
+ error("/proc/loadavg has less than 6 words in it.");
+ return 1;
+ }
+
+ double load1 = strtod(procfile_lineword(ff, 0, 0), NULL);
+ double load5 = strtod(procfile_lineword(ff, 0, 1), NULL);
+ double load15 = strtod(procfile_lineword(ff, 0, 2), NULL);
+
+ //unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3));
+ unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4));
+
+ //get system pid_max
+ unsigned long long max_processes = get_system_pid_max();
+ //
+ //unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5));
+
+
+ // --------------------------------------------------------------------
+
+ if(next_loadavg_dt <= dt) {
+ if(likely(do_loadavg)) {
+ static RRDSET *load_chart = NULL;
+ static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL;
+
+ if(unlikely(!load_chart)) {
+ load_chart = rrdset_create_localhost(
+ "system"
+ , "load"
+ , NULL
+ , "load"
+ , NULL
+ , "System Load Average"
+ , "load"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_LOADAVG_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_LOAD
+ , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(load_chart);
+
+ rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000));
+ rrdset_done(load_chart);
+
+ next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+ }
+ else next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC;
+ }
+ else next_loadavg_dt -= dt;
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_all_processes)) {
+ static RRDSET *processes_chart = NULL;
+ static RRDDIM *rd_active = NULL;
+ static RRDSETVAR *rd_pidmax;
+
+ if(unlikely(!processes_chart)) {
+ processes_chart = rrdset_create_localhost(
+ "system"
+ , "active_processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Active Processes"
+ , "processes"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_LOADAVG_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_pidmax = rrdsetvar_custom_chart_variable_create(processes_chart, "pidmax");
+ }
+ else rrdset_next(processes_chart);
+
+ rrddim_set_by_pointer(processes_chart, rd_active, active_processes);
+ rrdsetvar_custom_chart_variable_set(rd_pidmax, max_processes);
+ rrdset_done(processes_chart);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c
new file mode 100644
index 0000000..e932453
--- /dev/null
+++ b/collectors/proc.plugin/proc_mdstat.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat"
+
+struct raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+
+ RRDDIM *rd_health;
+ unsigned long long failed_disks;
+
+ RRDSET *st_disks;
+ RRDDIM *rd_down;
+ RRDDIM *rd_inuse;
+ unsigned long long total_disks;
+ unsigned long long inuse_disks;
+
+ RRDSET *st_operation;
+ RRDDIM *rd_check;
+ RRDDIM *rd_resync;
+ RRDDIM *rd_recovery;
+ RRDDIM *rd_reshape;
+ unsigned long long check;
+ unsigned long long resync;
+ unsigned long long recovery;
+ unsigned long long reshape;
+
+ RRDSET *st_finish;
+ RRDDIM *rd_finish_in;
+ unsigned long long finish_in;
+
+ RRDSET *st_speed;
+ RRDDIM *rd_speed;
+ unsigned long long speed;
+
+ char *mismatch_cnt_filename;
+ RRDSET *st_mismatch_cnt;
+ RRDDIM *rd_mismatch_cnt;
+ unsigned long long mismatch_cnt;
+
+ RRDSET *st_nonredundant;
+ RRDDIM *rd_nonredundant;
+};
+
+struct old_raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+ int found;
+};
+
+static inline char *remove_trailing_chars(char *s, char c)
+{
+ while (*s) {
+ if (unlikely(*s == c)) {
+ *s = '\0';
+ }
+ s++;
+ }
+ return s;
+}
+
+static inline void make_chart_obsolete(char *name, const char *id_modifier)
+{
+ char id[50 + 1];
+ RRDSET *st = NULL;
+
+ if (likely(name && id_modifier)) {
+ snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier);
+ st = rrdset_find_active_byname_localhost(id);
+ if (likely(st))
+ rrdset_is_obsolete(st);
+ }
+}
+
+int do_proc_mdstat(int update_every, usec_t dt)
+{
+ (void)dt;
+ static procfile *ff = NULL;
+ static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1,
+ do_mismatch_config = -1;
+ static int make_charts_obsolete = -1;
+ static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
+ static struct raid *raids = NULL;
+ static size_t raids_allocated = 0;
+ size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
+ static struct old_raid *old_raids = NULL;
+ static size_t old_raids_allocated = 0;
+ size_t old_raid_idx = 0;
+
+ if (unlikely(do_health == -1)) {
+ do_health =
+ config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES);
+ do_nonredundant =
+ config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES);
+ do_mismatch_config =
+ config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO);
+ do_disks =
+ config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES);
+ do_operations =
+ config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES);
+
+ make_charts_obsolete =
+ config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES);
+
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat");
+ mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt");
+ mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename);
+ }
+
+ if (unlikely(!ff)) {
+ ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff))
+ return 0; // we return 0, so that we will retry opening it next time
+
+ size_t lines = procfile_lines(ff);
+ size_t words = 0;
+
+ if (unlikely(lines < 2)) {
+ error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines);
+ return 1;
+ }
+
+ // find how many raids are there
+ size_t l;
+ raids_num = 0;
+ for (l = 1; l < lines - 2; l++) {
+ if (unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active
+ raids_num++;
+ }
+
+ if (unlikely(!raids_num && !old_raids_allocated))
+ return 0; // we return 0, so that we will retry searching for raids next time
+
+ // allocate the memory we need;
+ if (unlikely(raids_num != raids_allocated)) {
+ for (raid_idx = 0; raid_idx < raids_allocated; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ }
+ if (raids_num) {
+ raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid));
+ memset(raids, 0, raids_num * sizeof(struct raid));
+ } else {
+ freez(raids);
+ raids = NULL;
+ }
+ raids_allocated = raids_num;
+ }
+
+ // loop through all lines except the first and the last ones
+ for (l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) {
+ struct raid *raid = &raids[raid_idx];
+ raid->redundant = 0;
+
+ words = procfile_linewords(ff, l);
+
+ if (unlikely(words < 2))
+ continue;
+
+ if (unlikely(procfile_lineword(ff, l, 1)[0] != 'a'))
+ continue;
+
+ if (unlikely(!raid->name)) {
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ } else if (unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) {
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ memset(raid, 0, sizeof(struct raid));
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ }
+
+ if (unlikely(!raid->name || !raid->name[0]))
+ continue;
+
+ raid_idx++;
+
+ // check if raid has disk status
+ l++;
+ words = procfile_linewords(ff, l);
+ if (words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[')
+ continue;
+
+ // split inuse and total number of disks
+ if (likely(do_health || do_disks)) {
+ char *s = NULL, *str_total = NULL, *str_inuse = NULL;
+
+ s = procfile_lineword(ff, l, words - 2);
+ if (unlikely(s[0] != '[')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket.");
+ continue;
+ }
+ str_total = ++s;
+ while (*s) {
+ if (unlikely(*s == '/')) {
+ *s = '\0';
+ str_inuse = s + 1;
+ } else if (unlikely(*s == ']')) {
+ *s = '\0';
+ break;
+ }
+ s++;
+ }
+ if (unlikely(str_total[0] == '\0' || !str_inuse || str_inuse[0] == '\0')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format.");
+ continue;
+ }
+
+ raid->inuse_disks = str2ull(str_inuse);
+ raid->total_disks = str2ull(str_total);
+ raid->failed_disks = raid->total_disks - raid->inuse_disks;
+ }
+
+ raid->redundant = 1;
+ redundant_num++;
+ l++;
+
+ // check if any operation is performed on the raid
+ if (likely(do_operations)) {
+ char *s = NULL;
+
+ raid->check = 0;
+ raid->resync = 0;
+ raid->recovery = 0;
+ raid->reshape = 0;
+ raid->finish_in = 0;
+ raid->speed = 0;
+
+ words = procfile_linewords(ff, l);
+
+ if (likely(words < 2))
+ continue;
+
+ if (unlikely(procfile_lineword(ff, l, 0)[0] != '['))
+ continue;
+
+ if (unlikely(words < 7)) {
+ error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words);
+ continue;
+ }
+
+ char *word;
+ word = procfile_lineword(ff, l, 3);
+ remove_trailing_chars(word, '%');
+
+ unsigned long long percentage = (unsigned long long)(str2ld(word, NULL) * 100);
+ // possible operations: check, resync, recovery, reshape
+ // 4-th character is unique for each operation so it is checked
+ switch (procfile_lineword(ff, l, 1)[3]) {
+ case 'c': // check
+ raid->check = percentage;
+ break;
+ case 'y': // resync
+ raid->resync = percentage;
+ break;
+ case 'o': // recovery
+ raid->recovery = percentage;
+ break;
+ case 'h': // reshape
+ raid->reshape = percentage;
+ break;
+ }
+
+ word = procfile_lineword(ff, l, 5);
+ s = remove_trailing_chars(word, 'm'); // remove trailing "min"
+
+ word += 7; // skip leading "finish="
+
+ if (likely(s > word))
+ raid->finish_in = (unsigned long long)(str2ld(word, NULL) * 60);
+
+ word = procfile_lineword(ff, l, 6);
+ s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec"
+
+ word += 6; // skip leading "speed="
+
+ if (likely(s > word))
+ raid->speed = str2ull(word);
+ }
+ }
+
+ // read mismatch_cnt files
+ if (do_mismatch == -1) {
+ if (do_mismatch_config == CONFIG_BOOLEAN_AUTO) {
+ if (raids_num > 50)
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ else
+ do_mismatch = CONFIG_BOOLEAN_YES;
+ } else
+ do_mismatch = do_mismatch_config;
+ }
+
+ if (likely(do_mismatch)) {
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ char filename[FILENAME_MAX + 1];
+ struct raid *raid = &raids[raid_idx];
+
+ if (likely(raid->redundant)) {
+ if (unlikely(!raid->mismatch_cnt_filename)) {
+ snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name);
+ raid->mismatch_cnt_filename = strdupz(filename);
+ }
+ if (unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) {
+ error("Cannot read file '%s'", raid->mismatch_cnt_filename);
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ error("Monitoring for mismatch count has been disabled");
+ break;
+ }
+ }
+ }
+ }
+
+ // check for disappeared raids
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ int found = 0;
+
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if (unlikely(
+ raid->hash == old_raid->hash && !strcmp(raid->name, old_raid->name) &&
+ raid->redundant == old_raid->redundant))
+ found = 1;
+ }
+
+ old_raid->found = found;
+ }
+
+ int raid_disappeared = 0;
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+
+ if (unlikely(!old_raid->found)) {
+ if (likely(make_charts_obsolete)) {
+ make_chart_obsolete(old_raid->name, "disks");
+ make_chart_obsolete(old_raid->name, "mismatch");
+ make_chart_obsolete(old_raid->name, "operation");
+ make_chart_obsolete(old_raid->name, "finish");
+ make_chart_obsolete(old_raid->name, "speed");
+ make_chart_obsolete(old_raid->name, "availability");
+ }
+ raid_disappeared = 1;
+ }
+ }
+
+ // allocate memory for nonredundant arrays
+ if (unlikely(raid_disappeared || old_raids_allocated != raids_num)) {
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ freez(old_raids[old_raid_idx].name);
+ }
+ if (likely(raids_num)) {
+ old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num);
+ memset(old_raids, 0, sizeof(struct old_raid) * raids_num);
+ } else {
+ freez(old_raids);
+ old_raids = NULL;
+ }
+ old_raids_allocated = raids_num;
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ struct raid *raid = &raids[old_raid_idx];
+
+ old_raid->name = strdupz(raid->name);
+ old_raid->hash = raid->hash;
+ old_raid->redundant = raid->redundant;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_health && redundant_num)) {
+ static RRDSET *st_mdstat_health = NULL;
+ if (unlikely(!st_mdstat_health)) {
+ st_mdstat_health = rrdset_create_localhost(
+ "mdstat",
+ "mdstat_health",
+ NULL,
+ "health",
+ "md.health",
+ "Faulty Devices In MD",
+ "failed disks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_HEALTH,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(st_mdstat_health);
+ } else
+ rrdset_next(st_mdstat_health);
+
+ if (!redundant_num) {
+ if (likely(make_charts_obsolete))
+ make_chart_obsolete("mdstat", "health");
+ } else {
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if (likely(raid->redundant)) {
+ if (unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find_active(st_mdstat_health, raid->name))))
+ raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks);
+ }
+ }
+
+ rrdset_done(st_mdstat_health);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ char id[50 + 1];
+ char family[50 + 1];
+
+ if (likely(raid->redundant)) {
+ if (likely(do_disks)) {
+ snprintfz(id, 50, "%s_disks", raid->name);
+
+ if (unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_disks = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.disks",
+ "Disks Stats",
+ "disks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_isnot_obsolete(raid->st_disks);
+ } else
+ rrdset_next(raid->st_disks);
+
+ if (unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find_active(raid->st_disks, "inuse"))))
+ raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if (unlikely(!raid->rd_down && !(raid->rd_down = rrddim_find_active(raid->st_disks, "down"))))
+ raid->rd_down = rrddim_add(raid->st_disks, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks);
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_down, raid->failed_disks);
+
+ rrdset_done(raid->st_disks);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_mismatch)) {
+ snprintfz(id, 50, "%s_mismatch", raid->name);
+
+ if (unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_mismatch_cnt = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.mismatch_cnt",
+ "Mismatch Count",
+ "unsynchronized blocks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_mismatch_cnt);
+ } else
+ rrdset_next(raid->st_mismatch_cnt);
+
+ if (unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find_active(raid->st_mismatch_cnt, "count"))))
+ raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt);
+
+ rrdset_done(raid->st_mismatch_cnt);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_operations)) {
+ snprintfz(id, 50, "%s_operation", raid->name);
+
+ if (unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_operation = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.status",
+ "Current Status",
+ "percent",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_operation);
+ } else
+ rrdset_next(raid->st_operation);
+
+ if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find_active(raid->st_operation, "check"))))
+ raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find_active(raid->st_operation, "resync"))))
+ raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find_active(raid->st_operation, "recovery"))))
+ raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find_active(raid->st_operation, "reshape"))))
+ raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
+
+ rrdset_done(raid->st_operation);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_finish", raid->name);
+
+ if (unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_finish = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.rate",
+ "Approximate Time Unit Finish",
+ "seconds",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10,
+ update_every, RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_finish);
+ } else
+ rrdset_next(raid->st_finish);
+
+ if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find_active(raid->st_finish, "finish_in"))))
+ raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
+
+ rrdset_done(raid->st_finish);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_speed", raid->name);
+
+ if (unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_speed = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.rate",
+ "Operation Speed",
+ "KiB/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_speed);
+ } else
+ rrdset_next(raid->st_speed);
+
+ if (unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find_active(raid->st_speed, "speed"))))
+ raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed);
+
+ rrdset_done(raid->st_speed);
+ }
+ } else {
+ // --------------------------------------------------------------------
+
+ if (likely(do_nonredundant)) {
+ snprintfz(id, 50, "%s_availability", raid->name);
+
+ if (unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_active_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_nonredundant = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.nonredundant",
+ "Nonredundant Array Availability",
+ "boolean",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_nonredundant);
+ } else
+ rrdset_next(raid->st_nonredundant);
+
+ if (unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find_active(raid->st_nonredundant, "available"))))
+ raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1);
+
+ rrdset_done(raid->st_nonredundant);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
new file mode 100644
index 0000000..51d77fe
--- /dev/null
+++ b/collectors/proc.plugin/proc_meminfo.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_MEMINFO_NAME "/proc/meminfo"
+#define CONFIG_SECTION_PLUGIN_PROC_MEMINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_MEMINFO_NAME
+
+int do_proc_meminfo(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_ram = -1, do_swap = -1, do_hwcorrupt = -1, do_committed = -1, do_writeback = -1, do_kernel = -1, do_slab = -1, do_hugepages = -1, do_transparent_hugepages = -1;
+
+ static ARL_BASE *arl_base = NULL;
+ static ARL_ENTRY *arl_hwcorrupted = NULL, *arl_memavailable = NULL;
+
+ static unsigned long long
+ MemTotal = 0,
+ MemFree = 0,
+ MemAvailable = 0,
+ Buffers = 0,
+ Cached = 0,
+ //SwapCached = 0,
+ //Active = 0,
+ //Inactive = 0,
+ //ActiveAnon = 0,
+ //InactiveAnon = 0,
+ //ActiveFile = 0,
+ //InactiveFile = 0,
+ //Unevictable = 0,
+ //Mlocked = 0,
+ SwapTotal = 0,
+ SwapFree = 0,
+ Dirty = 0,
+ Writeback = 0,
+ //AnonPages = 0,
+ //Mapped = 0,
+ Shmem = 0,
+ Slab = 0,
+ SReclaimable = 0,
+ SUnreclaim = 0,
+ KernelStack = 0,
+ PageTables = 0,
+ NFS_Unstable = 0,
+ Bounce = 0,
+ WritebackTmp = 0,
+ //CommitLimit = 0,
+ Committed_AS = 0,
+ //VmallocTotal = 0,
+ VmallocUsed = 0,
+ //VmallocChunk = 0,
+ AnonHugePages = 0,
+ ShmemHugePages = 0,
+ HugePages_Total = 0,
+ HugePages_Free = 0,
+ HugePages_Rsvd = 0,
+ HugePages_Surp = 0,
+ Hugepagesize = 0,
+ //DirectMap4k = 0,
+ //DirectMap2M = 0,
+ HardwareCorrupted = 0;
+
+ if(unlikely(!arl_base)) {
+ do_ram = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system ram", 1);
+ do_swap = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system swap", CONFIG_BOOLEAN_AUTO);
+ do_hwcorrupt = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hardware corrupted ECC", CONFIG_BOOLEAN_AUTO);
+ do_committed = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "committed memory", 1);
+ do_writeback = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "writeback memory", 1);
+ do_kernel = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "kernel memory", 1);
+ do_slab = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "slab memory", 1);
+ do_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hugepages", CONFIG_BOOLEAN_AUTO);
+ do_transparent_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "transparent hugepages", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("meminfo", NULL, 60);
+ arl_expect(arl_base, "MemTotal", &MemTotal);
+ arl_expect(arl_base, "MemFree", &MemFree);
+ arl_memavailable = arl_expect(arl_base, "MemAvailable", &MemAvailable);
+ arl_expect(arl_base, "Buffers", &Buffers);
+ arl_expect(arl_base, "Cached", &Cached);
+ //arl_expect(arl_base, "SwapCached", &SwapCached);
+ //arl_expect(arl_base, "Active", &Active);
+ //arl_expect(arl_base, "Inactive", &Inactive);
+ //arl_expect(arl_base, "ActiveAnon", &ActiveAnon);
+ //arl_expect(arl_base, "InactiveAnon", &InactiveAnon);
+ //arl_expect(arl_base, "ActiveFile", &ActiveFile);
+ //arl_expect(arl_base, "InactiveFile", &InactiveFile);
+ //arl_expect(arl_base, "Unevictable", &Unevictable);
+ //arl_expect(arl_base, "Mlocked", &Mlocked);
+ arl_expect(arl_base, "SwapTotal", &SwapTotal);
+ arl_expect(arl_base, "SwapFree", &SwapFree);
+ arl_expect(arl_base, "Dirty", &Dirty);
+ arl_expect(arl_base, "Writeback", &Writeback);
+ //arl_expect(arl_base, "AnonPages", &AnonPages);
+ //arl_expect(arl_base, "Mapped", &Mapped);
+ arl_expect(arl_base, "Shmem", &Shmem);
+ arl_expect(arl_base, "Slab", &Slab);
+ arl_expect(arl_base, "SReclaimable", &SReclaimable);
+ arl_expect(arl_base, "SUnreclaim", &SUnreclaim);
+ arl_expect(arl_base, "KernelStack", &KernelStack);
+ arl_expect(arl_base, "PageTables", &PageTables);
+ arl_expect(arl_base, "NFS_Unstable", &NFS_Unstable);
+ arl_expect(arl_base, "Bounce", &Bounce);
+ arl_expect(arl_base, "WritebackTmp", &WritebackTmp);
+ //arl_expect(arl_base, "CommitLimit", &CommitLimit);
+ arl_expect(arl_base, "Committed_AS", &Committed_AS);
+ //arl_expect(arl_base, "VmallocTotal", &VmallocTotal);
+ arl_expect(arl_base, "VmallocUsed", &VmallocUsed);
+ //arl_expect(arl_base, "VmallocChunk", &VmallocChunk);
+ arl_hwcorrupted = arl_expect(arl_base, "HardwareCorrupted", &HardwareCorrupted);
+ arl_expect(arl_base, "AnonHugePages", &AnonHugePages);
+ arl_expect(arl_base, "ShmemHugePages", &ShmemHugePages);
+ arl_expect(arl_base, "HugePages_Total", &HugePages_Total);
+ arl_expect(arl_base, "HugePages_Free", &HugePages_Free);
+ arl_expect(arl_base, "HugePages_Rsvd", &HugePages_Rsvd);
+ arl_expect(arl_base, "HugePages_Surp", &HugePages_Surp);
+ arl_expect(arl_base, "Hugepagesize", &Hugepagesize);
+ //arl_expect(arl_base, "DirectMap4k", &DirectMap4k);
+ //arl_expect(arl_base, "DirectMap2M", &DirectMap2M);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) continue;
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // http://calimeroteknik.free.fr/blag/?article20/really-used-memory-on-gnu-linux
+ unsigned long long MemCached = Cached + SReclaimable - Shmem;
+ unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers;
+
+ if(do_ram) {
+ {
+ static RRDSET *st_system_ram = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL;
+
+ if(unlikely(!st_system_ram)) {
+ st_system_ram = rrdset_create_localhost(
+ "system"
+ , "ram"
+ , NULL
+ , "ram"
+ , NULL
+ , "System RAM"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_RAM
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_system_ram);
+
+ rrddim_set_by_pointer(st_system_ram, rd_free, MemFree);
+ rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed);
+ rrddim_set_by_pointer(st_system_ram, rd_cached, MemCached);
+ rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers);
+
+ rrdset_done(st_system_ram);
+ }
+
+ if(arl_memavailable->flags & ARL_ENTRY_FLAG_FOUND) {
+ static RRDSET *st_mem_available = NULL;
+ static RRDDIM *rd_avail = NULL;
+
+ if(unlikely(!st_mem_available)) {
+ st_mem_available = rrdset_create_localhost(
+ "mem"
+ , "available"
+ , NULL
+ , "system"
+ , NULL
+ , "Available RAM for applications"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_available);
+
+ rrddim_set_by_pointer(st_mem_available, rd_avail, MemAvailable);
+
+ rrdset_done(st_mem_available);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ unsigned long long SwapUsed = SwapTotal - SwapFree;
+
+ if(do_swap == CONFIG_BOOLEAN_YES || (do_swap == CONFIG_BOOLEAN_AUTO &&
+ (SwapTotal || SwapUsed || SwapFree ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_swap = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_system_swap = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL;
+
+ if(unlikely(!st_system_swap)) {
+ st_system_swap = rrdset_create_localhost(
+ "system"
+ , "swap"
+ , NULL
+ , "swap"
+ , NULL
+ , "System Swap"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SWAP
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_system_swap);
+
+ rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed);
+ rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree);
+
+ rrdset_done(st_system_swap);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND &&
+ (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO &&
+ (HardwareCorrupted > 0 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
+ do_hwcorrupt = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_hwcorrupt = NULL;
+ static RRDDIM *rd_corrupted = NULL;
+
+ if(unlikely(!st_mem_hwcorrupt)) {
+ st_mem_hwcorrupt = rrdset_create_localhost(
+ "mem"
+ , "hwcorrupt"
+ , NULL
+ , "ecc"
+ , NULL
+ , "Corrupted Memory, detected by ECC"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL);
+
+ rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_hwcorrupt);
+
+ rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted);
+
+ rrdset_done(st_mem_hwcorrupt);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_committed) {
+ static RRDSET *st_mem_committed = NULL;
+ static RRDDIM *rd_committed = NULL;
+
+ if(unlikely(!st_mem_committed)) {
+ st_mem_committed = rrdset_create_localhost(
+ "mem"
+ , "committed"
+ , NULL
+ , "system"
+ , NULL
+ , "Committed (Allocated) Memory"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL);
+
+ rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_committed);
+
+ rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS);
+
+ rrdset_done(st_mem_committed);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_writeback) {
+ static RRDSET *st_mem_writeback = NULL;
+ static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL;
+
+ if(unlikely(!st_mem_writeback)) {
+ st_mem_writeback = rrdset_create_localhost(
+ "mem"
+ , "writeback"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Writeback Memory"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_KERNEL
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL);
+
+ rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_writeback);
+
+ rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty);
+ rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback);
+ rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp);
+ rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable);
+ rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce);
+
+ rrdset_done(st_mem_writeback);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_kernel) {
+ static RRDSET *st_mem_kernel = NULL;
+ static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL;
+
+ if(unlikely(!st_mem_kernel)) {
+ st_mem_kernel = rrdset_create_localhost(
+ "mem"
+ , "kernel"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Memory Used by Kernel"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_KERNEL + 1
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL);
+
+ rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_kernel);
+
+ rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab);
+ rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack);
+ rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables);
+ rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed);
+
+ rrdset_done(st_mem_kernel);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_slab) {
+ static RRDSET *st_mem_slab = NULL;
+ static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL;
+
+ if(unlikely(!st_mem_slab)) {
+ st_mem_slab = rrdset_create_localhost(
+ "mem"
+ , "slab"
+ , NULL
+ , "slab"
+ , NULL
+ , "Reclaimable Kernel Memory"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SLAB
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL);
+
+ rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_slab);
+
+ rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable);
+ rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim);
+
+ rrdset_done(st_mem_slab);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO &&
+ ((Hugepagesize && HugePages_Total) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_hugepages = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_hugepages = NULL;
+ static RRDDIM *rd_used = NULL, *rd_free = NULL, *rd_rsvd = NULL, *rd_surp = NULL;
+
+ if(unlikely(!st_mem_hugepages)) {
+ st_mem_hugepages = rrdset_create_localhost(
+ "mem"
+ , "hugepages"
+ , NULL
+ , "hugepages"
+ , NULL
+ , "Dedicated HugePages Memory"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_rsvd = rrddim_add(st_mem_hugepages, "reserved", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_hugepages);
+
+ rrddim_set_by_pointer(st_mem_hugepages, rd_used, HugePages_Total - HugePages_Free - HugePages_Rsvd);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_free, HugePages_Free);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_rsvd, HugePages_Rsvd);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_surp, HugePages_Surp);
+
+ rrdset_done(st_mem_hugepages);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO &&
+ (AnonHugePages ||
+ ShmemHugePages ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_transparent_hugepages = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_transparent_hugepages = NULL;
+ static RRDDIM *rd_anonymous = NULL, *rd_shared = NULL;
+
+ if(unlikely(!st_mem_transparent_hugepages)) {
+ st_mem_transparent_hugepages = rrdset_create_localhost(
+ "mem"
+ , "transparent_hugepages"
+ , NULL
+ , "hugepages"
+ , NULL
+ , "Transparent HugePages Memory"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HUGEPAGES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL);
+
+ rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_transparent_hugepages);
+
+ rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_anonymous, AnonHugePages);
+ rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_shared, ShmemHugePages);
+
+ rrdset_done(st_mem_transparent_hugepages);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
new file mode 100644
index 0000000..a7db37e
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_dev.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev"
+#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME
+
+// As defined in https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net
+const char *operstate_names[] = { "unknown", "notpresent", "down", "lowerlayerdown", "testing", "dormant", "up" };
+
+static inline int get_operstate(char *operstate)
+{
+ int i;
+
+ for (i = 0; i < (int) (sizeof(operstate_names) / sizeof(char *)); i++) {
+ if (!strcmp(operstate, operstate_names[i])) {
+ return i;
+ }
+ }
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// netdev list
+
+static struct netdev {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int virtual;
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_bandwidth;
+ int do_packets;
+ int do_errors;
+ int do_drops;
+ int do_fifo;
+ int do_compressed;
+ int do_events;
+
+ const char *chart_type_net_bytes;
+ const char *chart_type_net_packets;
+ const char *chart_type_net_errors;
+ const char *chart_type_net_fifo;
+ const char *chart_type_net_events;
+ const char *chart_type_net_drops;
+ const char *chart_type_net_compressed;
+
+ const char *chart_id_net_bytes;
+ const char *chart_id_net_packets;
+ const char *chart_id_net_errors;
+ const char *chart_id_net_fifo;
+ const char *chart_id_net_events;
+ const char *chart_id_net_drops;
+ const char *chart_id_net_compressed;
+
+ const char *chart_family;
+
+ struct label *chart_labels;
+
+ int flipped;
+ unsigned long priority;
+
+ // data collected
+ kernel_uint_t rbytes;
+ kernel_uint_t rpackets;
+ kernel_uint_t rerrors;
+ kernel_uint_t rdrops;
+ kernel_uint_t rfifo;
+ kernel_uint_t rframe;
+ kernel_uint_t rcompressed;
+ kernel_uint_t rmulticast;
+
+ kernel_uint_t tbytes;
+ kernel_uint_t tpackets;
+ kernel_uint_t terrors;
+ kernel_uint_t tdrops;
+ kernel_uint_t tfifo;
+ kernel_uint_t tcollisions;
+ kernel_uint_t tcarrier;
+ kernel_uint_t tcompressed;
+ kernel_uint_t speed;
+ kernel_uint_t duplex;
+ kernel_uint_t operstate;
+
+ // charts
+ RRDSET *st_bandwidth;
+ RRDSET *st_packets;
+ RRDSET *st_errors;
+ RRDSET *st_drops;
+ RRDSET *st_fifo;
+ RRDSET *st_compressed;
+ RRDSET *st_events;
+
+ // dimensions
+ RRDDIM *rd_rbytes;
+ RRDDIM *rd_rpackets;
+ RRDDIM *rd_rerrors;
+ RRDDIM *rd_rdrops;
+ RRDDIM *rd_rfifo;
+ RRDDIM *rd_rframe;
+ RRDDIM *rd_rcompressed;
+ RRDDIM *rd_rmulticast;
+
+ RRDDIM *rd_tbytes;
+ RRDDIM *rd_tpackets;
+ RRDDIM *rd_terrors;
+ RRDDIM *rd_tdrops;
+ RRDDIM *rd_tfifo;
+ RRDDIM *rd_tcollisions;
+ RRDDIM *rd_tcarrier;
+ RRDDIM *rd_tcompressed;
+
+ usec_t speed_last_collected_usec;
+ usec_t duplex_last_collected_usec;
+ usec_t operstate_last_collected_usec;
+
+ char *filename_speed;
+ RRDSETVAR *chart_var_speed;
+
+ char *filename_duplex;
+ RRDSETVAR *chart_var_duplex;
+
+ char *filename_operstate;
+ RRDSETVAR *chart_var_operstate;
+
+ struct netdev *next;
+} *netdev_root = NULL, *netdev_last_used = NULL;
+
+static size_t netdev_added = 0, netdev_found = 0;
+
+// ----------------------------------------------------------------------------
+
+static void netdev_charts_release(struct netdev *d) {
+ if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth);
+ if(d->st_packets) rrdset_is_obsolete(d->st_packets);
+ if(d->st_errors) rrdset_is_obsolete(d->st_errors);
+ if(d->st_drops) rrdset_is_obsolete(d->st_drops);
+ if(d->st_fifo) rrdset_is_obsolete(d->st_fifo);
+ if(d->st_compressed) rrdset_is_obsolete(d->st_compressed);
+ if(d->st_events) rrdset_is_obsolete(d->st_events);
+
+ d->st_bandwidth = NULL;
+ d->st_compressed = NULL;
+ d->st_drops = NULL;
+ d->st_errors = NULL;
+ d->st_events = NULL;
+ d->st_fifo = NULL;
+ d->st_packets = NULL;
+
+ d->rd_rbytes = NULL;
+ d->rd_rpackets = NULL;
+ d->rd_rerrors = NULL;
+ d->rd_rdrops = NULL;
+ d->rd_rfifo = NULL;
+ d->rd_rframe = NULL;
+ d->rd_rcompressed = NULL;
+ d->rd_rmulticast = NULL;
+
+ d->rd_tbytes = NULL;
+ d->rd_tpackets = NULL;
+ d->rd_terrors = NULL;
+ d->rd_tdrops = NULL;
+ d->rd_tfifo = NULL;
+ d->rd_tcollisions = NULL;
+ d->rd_tcarrier = NULL;
+ d->rd_tcompressed = NULL;
+
+ d->chart_var_speed = NULL;
+ d->chart_var_duplex = NULL;
+ d->chart_var_operstate = NULL;
+}
+
+static void netdev_free_chart_strings(struct netdev *d) {
+ freez((void *)d->chart_type_net_bytes);
+ freez((void *)d->chart_type_net_compressed);
+ freez((void *)d->chart_type_net_drops);
+ freez((void *)d->chart_type_net_errors);
+ freez((void *)d->chart_type_net_events);
+ freez((void *)d->chart_type_net_fifo);
+ freez((void *)d->chart_type_net_packets);
+
+ freez((void *)d->chart_id_net_bytes);
+ freez((void *)d->chart_id_net_compressed);
+ freez((void *)d->chart_id_net_drops);
+ freez((void *)d->chart_id_net_errors);
+ freez((void *)d->chart_id_net_events);
+ freez((void *)d->chart_id_net_fifo);
+ freez((void *)d->chart_id_net_packets);
+
+ freez((void *)d->chart_family);
+}
+
+static void netdev_free(struct netdev *d) {
+ netdev_charts_release(d);
+ netdev_free_chart_strings(d);
+ free_label_list(d->chart_labels);
+
+ freez((void *)d->name);
+ freez((void *)d->filename_speed);
+ freez((void *)d->filename_duplex);
+ freez((void *)d->filename_operstate);
+ freez((void *)d);
+ netdev_added--;
+}
+
+// ----------------------------------------------------------------------------
+// netdev renames
+
+static struct netdev_rename {
+ const char *host_device;
+ uint32_t hash;
+
+ const char *container_device;
+ const char *container_name;
+
+ struct label *chart_labels;
+
+ int processed;
+
+ struct netdev_rename *next;
+} *netdev_rename_root = NULL;
+
+static int netdev_pending_renames = 0;
+static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER;
+
+static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) {
+ struct netdev_rename *r;
+
+ for(r = netdev_rename_root; r ; r = r->next)
+ if(r->hash == hash && !strcmp(host_device, r->host_device))
+ return r;
+
+ return NULL;
+}
+
+// other threads can call this function to register a rename to a netdev
+void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels)
+{
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ uint32_t hash = simple_hash(host_device);
+ struct netdev_rename *r = netdev_rename_find(host_device, hash);
+ if(!r) {
+ r = callocz(1, sizeof(struct netdev_rename));
+ r->host_device = strdupz(host_device);
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+ update_label_list(&r->chart_labels, labels);
+ r->hash = hash;
+ r->next = netdev_rename_root;
+ r->processed = 0;
+ netdev_rename_root = r;
+ netdev_pending_renames++;
+ info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ else {
+ if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) {
+ freez((void *) r->container_device);
+ freez((void *) r->container_name);
+
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+
+ update_label_list(&r->chart_labels, labels);
+
+ r->processed = 0;
+ netdev_pending_renames++;
+ info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// other threads can call this function to delete a rename to a netdev
+void netdev_rename_device_del(const char *host_device) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev_rename *r, *last = NULL;
+
+ uint32_t hash = simple_hash(host_device);
+ for(r = netdev_rename_root; r ; last = r, r = r->next) {
+ if (r->hash == hash && !strcmp(host_device, r->host_device)) {
+ if (netdev_rename_root == r)
+ netdev_rename_root = r->next;
+ else if (last)
+ last->next = r->next;
+
+ if(!r->processed)
+ netdev_pending_renames--;
+
+ info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ freez((void *) r->host_device);
+ freez((void *) r->container_name);
+ freez((void *) r->container_device);
+ free_label_list(r->chart_labels);
+ freez((void *) r);
+ break;
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) {
+ info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ netdev_charts_release(d);
+ netdev_free_chart_strings(d);
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name);
+ d->chart_type_net_bytes = strdupz(buffer);
+ d->chart_type_net_compressed = strdupz(buffer);
+ d->chart_type_net_drops = strdupz(buffer);
+ d->chart_type_net_errors = strdupz(buffer);
+ d->chart_type_net_events = strdupz(buffer);
+ d->chart_type_net_fifo = strdupz(buffer);
+ d->chart_type_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device);
+ d->chart_id_net_bytes = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device);
+ d->chart_id_net_compressed = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device);
+ d->chart_id_net_drops = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device);
+ d->chart_id_net_errors = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device);
+ d->chart_id_net_events = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device);
+ d->chart_id_net_fifo = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device);
+ d->chart_id_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device);
+ d->chart_family = strdupz(buffer);
+
+ update_label_list(&d->chart_labels, r->chart_labels);
+
+ d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE;
+ d->flipped = 1;
+}
+
+static inline void netdev_rename(struct netdev *d) {
+ struct netdev_rename *r = netdev_rename_find(d->name, d->hash);
+ if(unlikely(r && !r->processed)) {
+ netdev_rename_cgroup(d, r);
+ r->processed = 1;
+ netdev_pending_renames--;
+ }
+}
+
+static inline void netdev_rename_lock(struct netdev *d) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+ netdev_rename(d);
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_all_lock(void) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev *d;
+ for(d = netdev_root; d ; d = d->next)
+ netdev_rename(d);
+
+ netdev_pending_renames = 0;
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// ----------------------------------------------------------------------------
+// netdev data collection
+
+static void netdev_cleanup() {
+ if(likely(netdev_found == netdev_added)) return;
+
+ netdev_added = 0;
+ struct netdev *d = netdev_root, *last = NULL;
+ while(d) {
+ if(unlikely(!d->updated)) {
+ // info("Removing network device '%s', linked after '%s'", d->name, last?last->name:"ROOT");
+
+ if(netdev_last_used == d)
+ netdev_last_used = last;
+
+ struct netdev *t = d;
+
+ if(d == netdev_root || !last)
+ netdev_root = d = d->next;
+
+ else
+ last->next = d = d->next;
+
+ t->next = NULL;
+ netdev_free(t);
+ }
+ else {
+ netdev_added++;
+ last = d;
+ d->updated = 0;
+ d = d->next;
+ }
+ }
+}
+
+static struct netdev *get_netdev(const char *name) {
+ struct netdev *d;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(d = netdev_last_used ; d ; d = d->next) {
+ if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
+ netdev_last_used = d->next;
+ return d;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(d = netdev_root ; d != netdev_last_used ; d = d->next) {
+ if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
+ netdev_last_used = d->next;
+ return d;
+ }
+ }
+
+ // create a new one
+ d = callocz(1, sizeof(struct netdev));
+ d->name = strdupz(name);
+ d->hash = simple_hash(d->name);
+ d->len = strlen(d->name);
+
+ d->chart_type_net_bytes = strdupz("net");
+ d->chart_type_net_compressed = strdupz("net_compressed");
+ d->chart_type_net_drops = strdupz("net_drops");
+ d->chart_type_net_errors = strdupz("net_errors");
+ d->chart_type_net_events = strdupz("net_events");
+ d->chart_type_net_fifo = strdupz("net_fifo");
+ d->chart_type_net_packets = strdupz("net_packets");
+
+ d->chart_id_net_bytes = strdupz(d->name);
+ d->chart_id_net_compressed = strdupz(d->name);
+ d->chart_id_net_drops = strdupz(d->name);
+ d->chart_id_net_errors = strdupz(d->name);
+ d->chart_id_net_events = strdupz(d->name);
+ d->chart_id_net_fifo = strdupz(d->name);
+ d->chart_id_net_packets = strdupz(d->name);
+
+ d->chart_family = strdupz(d->name);
+ d->priority = NETDATA_CHART_PRIO_FIRST_NET_IFACE;
+
+ netdev_rename_lock(d);
+
+ netdev_added++;
+
+ // link it to the end
+ if(netdev_root) {
+ struct netdev *e;
+ for(e = netdev_root; e->next ; e = e->next) ;
+ e->next = d;
+ }
+ else
+ netdev_root = d;
+
+ return d;
+}
+
+int do_proc_net_dev(int update_every, usec_t dt) {
+ (void)dt;
+ static SIMPLE_PATTERN *disabled_list = NULL;
+ static procfile *ff = NULL;
+ static int enable_new_interfaces = -1;
+ static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1,
+ do_events = -1;
+ static char *path_to_sys_devices_virtual_net = NULL, *path_to_sys_class_net_speed = NULL,
+ *proc_net_dev_filename = NULL;
+ static char *path_to_sys_class_net_duplex = NULL;
+ static char *path_to_sys_class_net_operstate = NULL;
+ static long long int dt_to_refresh_speed = 0;
+ static long long int dt_to_refresh_duplex = 0;
+ static long long int dt_to_refresh_operstate = 0;
+
+ if(unlikely(enable_new_interfaces == -1)) {
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, (*netdata_configured_host_prefix)?"/proc/1/net/dev":"/proc/net/dev");
+ proc_net_dev_filename = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/net/%s");
+ path_to_sys_devices_virtual_net = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get virtual interfaces", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/speed");
+ path_to_sys_class_net_speed = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device speed", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/duplex");
+ path_to_sys_class_net_duplex = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device duplex", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/operstate");
+ path_to_sys_class_net_operstate = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device operstate", filename);
+
+
+ enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO);
+
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "packets for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_errors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "errors for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_drops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "drops for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_fifo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "fifo for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_compressed = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_events = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ disabled_list = simple_pattern_create(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "disable by default interfaces matching", "lo fireqos* *-ifb"), NULL, SIMPLE_PATTERN_EXACT);
+
+ dt_to_refresh_speed = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface speed every seconds", 10) * USEC_PER_SEC;
+ dt_to_refresh_duplex = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface duplex every seconds", 10) * USEC_PER_SEC;
+ dt_to_refresh_operstate = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface operstate every seconds", 10) * USEC_PER_SEC;
+
+ if (dt_to_refresh_operstate < 0)
+ dt_to_refresh_operstate = 0;
+ if (dt_to_refresh_duplex < 0)
+ dt_to_refresh_duplex = 0;
+ if (dt_to_refresh_speed < 0)
+ dt_to_refresh_speed = 0;
+ }
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(proc_net_dev_filename, " \t,|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ // rename all the devices, if we have pending renames
+ if(unlikely(netdev_pending_renames))
+ netdev_rename_all_lock();
+
+ netdev_found = 0;
+
+ kernel_uint_t system_rbytes = 0;
+ kernel_uint_t system_tbytes = 0;
+
+ size_t lines = procfile_lines(ff), l;
+ for(l = 2; l < lines ;l++) {
+ // require 17 words on each line
+ if(unlikely(procfile_linewords(ff, l) < 17)) continue;
+
+ char *name = procfile_lineword(ff, l, 0);
+ size_t len = strlen(name);
+ if(name[len - 1] == ':') name[len - 1] = '\0';
+
+ struct netdev *d = get_netdev(name);
+ d->updated = 1;
+ netdev_found++;
+
+ if(unlikely(!d->configured)) {
+ // this is the first time we see this interface
+
+ // remember we configured it
+ d->configured = 1;
+
+ d->enabled = enable_new_interfaces;
+
+ if(d->enabled)
+ d->enabled = !simple_pattern_matches(disabled_list, d->name);
+
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name);
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->virtual = 1;
+ }
+ else
+ d->virtual = 0;
+
+ if(likely(!d->virtual)) {
+ // set the filename to get the interface speed
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_speed, d->name);
+ d->filename_speed = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_duplex, d->name);
+ d->filename_duplex = strdupz(buffer);
+ }
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_operstate, d->name);
+ d->filename_operstate = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
+ d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled);
+ d->virtual = config_get_boolean(buffer, "virtual", d->virtual);
+
+ if(d->enabled == CONFIG_BOOLEAN_NO)
+ continue;
+
+ d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth);
+ d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
+ d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
+ d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops);
+ d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo);
+ d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed);
+ d->do_events = config_get_boolean_ondemand(buffer, "events", do_events);
+ }
+
+ if(unlikely(!d->enabled))
+ continue;
+
+ if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) {
+ d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1));
+ d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9));
+
+ if(likely(!d->virtual)) {
+ system_rbytes += d->rbytes;
+ system_tbytes += d->tbytes;
+ }
+ }
+
+ if(likely(d->do_packets != CONFIG_BOOLEAN_NO)) {
+ d->rpackets = str2kernel_uint_t(procfile_lineword(ff, l, 2));
+ d->rmulticast = str2kernel_uint_t(procfile_lineword(ff, l, 8));
+ d->tpackets = str2kernel_uint_t(procfile_lineword(ff, l, 10));
+ }
+
+ if(likely(d->do_errors != CONFIG_BOOLEAN_NO)) {
+ d->rerrors = str2kernel_uint_t(procfile_lineword(ff, l, 3));
+ d->terrors = str2kernel_uint_t(procfile_lineword(ff, l, 11));
+ }
+
+ if(likely(d->do_drops != CONFIG_BOOLEAN_NO)) {
+ d->rdrops = str2kernel_uint_t(procfile_lineword(ff, l, 4));
+ d->tdrops = str2kernel_uint_t(procfile_lineword(ff, l, 12));
+ }
+
+ if(likely(d->do_fifo != CONFIG_BOOLEAN_NO)) {
+ d->rfifo = str2kernel_uint_t(procfile_lineword(ff, l, 5));
+ d->tfifo = str2kernel_uint_t(procfile_lineword(ff, l, 13));
+ }
+
+ if(likely(d->do_compressed != CONFIG_BOOLEAN_NO)) {
+ d->rcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 7));
+ d->tcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 16));
+ }
+
+ if(likely(d->do_events != CONFIG_BOOLEAN_NO)) {
+ d->rframe = str2kernel_uint_t(procfile_lineword(ff, l, 6));
+ d->tcollisions = str2kernel_uint_t(procfile_lineword(ff, l, 14));
+ d->tcarrier = str2kernel_uint_t(procfile_lineword(ff, l, 15));
+ }
+
+ //info("PROC_NET_DEV: %s speed %zu, bytes %zu/%zu, packets %zu/%zu/%zu, errors %zu/%zu, drops %zu/%zu, fifo %zu/%zu, compressed %zu/%zu, rframe %zu, tcollisions %zu, tcarrier %zu"
+ // , d->name, d->speed
+ // , d->rbytes, d->tbytes
+ // , d->rpackets, d->tpackets, d->rmulticast
+ // , d->rerrors, d->terrors
+ // , d->rdrops, d->tdrops
+ // , d->rfifo, d->tfifo
+ // , d->rcompressed, d->tcompressed
+ // , d->rframe, d->tcollisions, d->tcarrier
+ // );
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (d->rbytes || d->tbytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_bandwidth = CONFIG_BOOLEAN_YES;
+
+ if(d->do_bandwidth == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_bandwidth)) {
+
+ d->st_bandwidth = rrdset_create_localhost(
+ d->chart_type_net_bytes
+ , d->chart_id_net_bytes
+ , NULL
+ , d->chart_family
+ , "net.net"
+ , "Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_update_labels(d->st_bandwidth, d->chart_labels);
+
+ d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rbytes;
+ d->rd_rbytes = d->rd_tbytes;
+ d->rd_tbytes = td;
+ }
+ }
+ else rrdset_next(d->st_bandwidth);
+
+ rrddim_set_by_pointer(d->st_bandwidth, d->rd_rbytes, (collected_number)d->rbytes);
+ rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes);
+ rrdset_done(d->st_bandwidth);
+
+ // update the interface speed
+ if(d->filename_speed) {
+ d->speed_last_collected_usec += dt;
+
+ if(unlikely(d->speed_last_collected_usec >= (usec_t)dt_to_refresh_speed)) {
+
+ if(unlikely(!d->chart_var_speed)) {
+ d->chart_var_speed = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "nic_speed_max");
+ if(!d->chart_var_speed) {
+ error("Cannot create interface %s chart variable 'nic_speed_max'. Will not update its speed anymore.", d->name);
+ freez(d->filename_speed);
+ d->filename_speed = NULL;
+ }
+ }
+
+ if(d->filename_speed && d->chart_var_speed) {
+ if(read_single_number_file(d->filename_speed, (unsigned long long *) &d->speed)) {
+ error("Cannot refresh interface %s speed by reading '%s'. Will not update its speed anymore.", d->name, d->filename_speed);
+ freez(d->filename_speed);
+ d->filename_speed = NULL;
+ }
+ else {
+ rrdsetvar_custom_chart_variable_set(d->chart_var_speed, (calculated_number) d->speed);
+ d->speed_last_collected_usec = 0;
+ }
+ }
+ }
+ }
+
+ if (d->filename_duplex) {
+ d->duplex_last_collected_usec += dt;
+
+ if (unlikely(d->duplex_last_collected_usec >= (usec_t)dt_to_refresh_duplex)) {
+ if (unlikely(!d->chart_var_duplex)) {
+ d->chart_var_duplex = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "duplex");
+ if (!d->chart_var_duplex) {
+ error("Cannot create interface %s chart variable 'duplex'. Will not update the duplex status anymore.", d->name);
+ freez(d->filename_duplex);
+ d->filename_duplex = NULL;
+ }
+ }
+
+ if (d->filename_duplex && d->chart_var_duplex) {
+ char buffer[32 + 1];
+
+ if (read_file(d->filename_duplex, buffer, 32)) {
+ error("Cannot refresh interface %s duplex state by reading '%s'. I will stop updating it.", d->name, d->filename_duplex);
+ freez(d->filename_duplex);
+ d->filename_duplex = NULL;
+ } else {
+ // values can be unknown, half or full -- just check the first letter for speed
+ if (buffer[0] == 'f')
+ d->duplex = 2;
+ else if (buffer[0] == 'h')
+ d->duplex = 1;
+ else
+ d->duplex = 0;
+
+ rrdsetvar_custom_chart_variable_set(d->chart_var_duplex, (calculated_number)d->duplex);
+ d->duplex_last_collected_usec = 0;
+ }
+ }
+ }
+ }
+
+ if (d->filename_operstate) {
+ d->operstate_last_collected_usec += dt;
+
+ if (unlikely(d->operstate_last_collected_usec >= (usec_t)dt_to_refresh_operstate)) {
+ if (unlikely(!d->chart_var_operstate)) {
+ d->chart_var_operstate = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "operstate");
+ if (!d->chart_var_operstate) {
+ error(
+ "Cannot create interface %s chart variable 'operstate'. I will stop updating it.",
+ d->name);
+ freez(d->filename_operstate);
+ d->filename_operstate = NULL;
+ }
+ }
+
+ if (d->filename_operstate && d->chart_var_operstate) {
+ char buffer[32 + 1], *trimmed_buffer;
+
+ if (read_file(d->filename_operstate, buffer, 32)) {
+ error(
+ "Cannot refresh %s operstate by reading '%s'. Will not update its status anymore.",
+ d->name, d->filename_operstate);
+ freez(d->filename_operstate);
+ d->filename_operstate = NULL;
+ } else {
+ trimmed_buffer = trim(buffer);
+ d->operstate = get_operstate(trimmed_buffer);
+ rrdsetvar_custom_chart_variable_set(d->chart_var_operstate, (calculated_number)d->operstate);
+ d->operstate_last_collected_usec = 0;
+ }
+ }
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_packets == CONFIG_BOOLEAN_AUTO &&
+ (d->rpackets || d->tpackets || d->rmulticast || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_packets = CONFIG_BOOLEAN_YES;
+
+ if(d->do_packets == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_packets)) {
+
+ d->st_packets = rrdset_create_localhost(
+ d->chart_type_net_packets
+ , d->chart_id_net_packets
+ , NULL
+ , d->chart_family
+ , "net.packets"
+ , "Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_packets, d->chart_labels);
+
+ d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rpackets;
+ d->rd_rpackets = d->rd_tpackets;
+ d->rd_tpackets = td;
+ }
+ }
+ else rrdset_next(d->st_packets);
+
+ rrddim_set_by_pointer(d->st_packets, d->rd_rpackets, (collected_number)d->rpackets);
+ rrddim_set_by_pointer(d->st_packets, d->rd_tpackets, (collected_number)d->tpackets);
+ rrddim_set_by_pointer(d->st_packets, d->rd_rmulticast, (collected_number)d->rmulticast);
+ rrdset_done(d->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_errors == CONFIG_BOOLEAN_AUTO &&
+ (d->rerrors || d->terrors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_errors = CONFIG_BOOLEAN_YES;
+
+ if(d->do_errors == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_errors)) {
+
+ d->st_errors = rrdset_create_localhost(
+ d->chart_type_net_errors
+ , d->chart_id_net_errors
+ , NULL
+ , d->chart_family
+ , "net.errors"
+ , "Interface Errors"
+ , "errors/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_errors, d->chart_labels);
+
+ d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rerrors;
+ d->rd_rerrors = d->rd_terrors;
+ d->rd_terrors = td;
+ }
+ }
+ else rrdset_next(d->st_errors);
+
+ rrddim_set_by_pointer(d->st_errors, d->rd_rerrors, (collected_number)d->rerrors);
+ rrddim_set_by_pointer(d->st_errors, d->rd_terrors, (collected_number)d->terrors);
+ rrdset_done(d->st_errors);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_drops == CONFIG_BOOLEAN_AUTO &&
+ (d->rdrops || d->tdrops || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_drops = CONFIG_BOOLEAN_YES;
+
+ if(d->do_drops == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_drops)) {
+
+ d->st_drops = rrdset_create_localhost(
+ d->chart_type_net_drops
+ , d->chart_id_net_drops
+ , NULL
+ , d->chart_family
+ , "net.drops"
+ , "Interface Drops"
+ , "drops/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 3
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_drops, d->chart_labels);
+
+ d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rdrops;
+ d->rd_rdrops = d->rd_tdrops;
+ d->rd_tdrops = td;
+ }
+ }
+ else rrdset_next(d->st_drops);
+
+ rrddim_set_by_pointer(d->st_drops, d->rd_rdrops, (collected_number)d->rdrops);
+ rrddim_set_by_pointer(d->st_drops, d->rd_tdrops, (collected_number)d->tdrops);
+ rrdset_done(d->st_drops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_fifo == CONFIG_BOOLEAN_AUTO &&
+ (d->rfifo || d->tfifo || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_fifo = CONFIG_BOOLEAN_YES;
+
+ if(d->do_fifo == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_fifo)) {
+
+ d->st_fifo = rrdset_create_localhost(
+ d->chart_type_net_fifo
+ , d->chart_id_net_fifo
+ , NULL
+ , d->chart_family
+ , "net.fifo"
+ , "Interface FIFO Buffer Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 4
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_fifo, d->chart_labels);
+
+ d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rfifo;
+ d->rd_rfifo = d->rd_tfifo;
+ d->rd_tfifo = td;
+ }
+ }
+ else rrdset_next(d->st_fifo);
+
+ rrddim_set_by_pointer(d->st_fifo, d->rd_rfifo, (collected_number)d->rfifo);
+ rrddim_set_by_pointer(d->st_fifo, d->rd_tfifo, (collected_number)d->tfifo);
+ rrdset_done(d->st_fifo);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_compressed == CONFIG_BOOLEAN_AUTO &&
+ (d->rcompressed || d->tcompressed || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_compressed = CONFIG_BOOLEAN_YES;
+
+ if(d->do_compressed == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_compressed)) {
+
+ d->st_compressed = rrdset_create_localhost(
+ d->chart_type_net_compressed
+ , d->chart_id_net_compressed
+ , NULL
+ , d->chart_family
+ , "net.compressed"
+ , "Compressed Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 5
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_compressed, d->chart_labels);
+
+ d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rcompressed;
+ d->rd_rcompressed = d->rd_tcompressed;
+ d->rd_tcompressed = td;
+ }
+ }
+ else rrdset_next(d->st_compressed);
+
+ rrddim_set_by_pointer(d->st_compressed, d->rd_rcompressed, (collected_number)d->rcompressed);
+ rrddim_set_by_pointer(d->st_compressed, d->rd_tcompressed, (collected_number)d->tcompressed);
+ rrdset_done(d->st_compressed);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely(d->do_events == CONFIG_BOOLEAN_AUTO &&
+ (d->rframe || d->tcollisions || d->tcarrier || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
+ d->do_events = CONFIG_BOOLEAN_YES;
+
+ if(d->do_events == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_events)) {
+
+ d->st_events = rrdset_create_localhost(
+ d->chart_type_net_events
+ , d->chart_id_net_events
+ , NULL
+ , d->chart_family
+ , "net.events"
+ , "Network Interface Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 6
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
+
+ rrdset_update_labels(d->st_events, d->chart_labels);
+
+ d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_events);
+
+ rrddim_set_by_pointer(d->st_events, d->rd_rframe, (collected_number)d->rframe);
+ rrddim_set_by_pointer(d->st_events, d->rd_tcollisions, (collected_number)d->tcollisions);
+ rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier);
+ rrdset_done(d->st_events);
+ }
+ }
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (system_rbytes || system_tbytes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_system_net = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_system_net)) {
+ st_system_net = rrdset_create_localhost(
+ "system"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "Physical Network Interfaces Aggregated Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_NET
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_system_net, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_system_net, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_system_net);
+
+ rrddim_set_by_pointer(st_system_net, rd_in, (collected_number)system_rbytes);
+ rrddim_set_by_pointer(st_system_net, rd_out, (collected_number)system_tbytes);
+
+ rrdset_done(st_system_net);
+ }
+
+ netdev_cleanup();
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c
new file mode 100644
index 0000000..43dcf2a
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_ip_vs_stats.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_IPVS "ipvs"
+#define PLUGIN_PROC_MODULE_NET_IPVS_NAME "/proc/net/ip_vs_stats"
+#define CONFIG_SECTION_PLUGIN_PROC_NET_IPVS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NET_IPVS_NAME
+
+int do_proc_net_ip_vs_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_bandwidth = -1, do_sockets = -1, do_packets = -1;
+ static procfile *ff = NULL;
+
+ if(do_bandwidth == -1) do_bandwidth = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS bandwidth", 1);
+ if(do_sockets == -1) do_sockets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS connections", 1);
+ if(do_packets == -1) do_packets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS packets", 1);
+
+ if(!ff) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/ip_vs_stats");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
+ }
+ if(!ff) return 1;
+
+ ff = procfile_readall(ff);
+ if(!ff) return 0; // we return 0, so that we will retry to open it next time
+
+ // make sure we have 3 lines
+ if(procfile_lines(ff) < 3) return 1;
+
+ // make sure we have 5 words on the 3rd line
+ if(procfile_linewords(ff, 2) < 5) return 1;
+
+ unsigned long long entries, InPackets, OutPackets, InBytes, OutBytes;
+
+ entries = strtoull(procfile_lineword(ff, 2, 0), NULL, 16);
+ InPackets = strtoull(procfile_lineword(ff, 2, 1), NULL, 16);
+ OutPackets = strtoull(procfile_lineword(ff, 2, 2), NULL, 16);
+ InBytes = strtoull(procfile_lineword(ff, 2, 3), NULL, 16);
+ OutBytes = strtoull(procfile_lineword(ff, 2, 4), NULL, 16);
+
+
+ // --------------------------------------------------------------------
+
+ if(do_sockets) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "sockets"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS New Connections"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "connections", entries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packets) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "packets"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", InPackets);
+ rrddim_set(st, "sent", OutPackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "net"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_NET
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", InBytes);
+ rrddim_set(st, "sent", OutBytes);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c
new file mode 100644
index 0000000..ab8206b
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_netstat.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_NETSTAT "ip"
+#define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat"
+#define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME
+
+unsigned long long tcpext_TCPSynRetrans = 0;
+
+static void parse_line_pair(procfile *ff, ARL_BASE *base, size_t header_line, size_t values_line) {
+ size_t hwords = procfile_linewords(ff, header_line);
+ size_t vwords = procfile_linewords(ff, values_line);
+ size_t w;
+
+ if(unlikely(vwords > hwords)) {
+ error("File /proc/net/netstat on header line %zu has %zu words, but on value line %zu has %zu words.", header_line, hwords, values_line, vwords);
+ vwords = hwords;
+ }
+
+ for(w = 1; w < vwords ;w++) {
+ if(unlikely(arl_check(base, procfile_lineword(ff, header_line, w), procfile_lineword(ff, values_line, w))))
+ break;
+ }
+}
+
+int do_proc_net_netstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_bandwidth = -1, do_inerrors = -1, do_mcast = -1, do_bcast = -1, do_mcast_p = -1, do_bcast_p = -1, do_ecn = -1, \
+ do_tcpext_reorder = -1, do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, do_tcpext_memory = -1,
+ do_tcpext_syn_queue = -1, do_tcpext_accept_queue = -1;
+
+ static uint32_t hash_ipext = 0, hash_tcpext = 0;
+ static procfile *ff = NULL;
+
+ static ARL_BASE *arl_tcpext = NULL;
+ static ARL_BASE *arl_ipext = NULL;
+
+ // --------------------------------------------------------------------
+ // IP
+
+ // IP bandwidth
+ static unsigned long long ipext_InOctets = 0;
+ static unsigned long long ipext_OutOctets = 0;
+
+ // IP input errors
+ static unsigned long long ipext_InNoRoutes = 0;
+ static unsigned long long ipext_InTruncatedPkts = 0;
+ static unsigned long long ipext_InCsumErrors = 0;
+
+ // IP multicast bandwidth
+ static unsigned long long ipext_InMcastOctets = 0;
+ static unsigned long long ipext_OutMcastOctets = 0;
+
+ // IP multicast packets
+ static unsigned long long ipext_InMcastPkts = 0;
+ static unsigned long long ipext_OutMcastPkts = 0;
+
+ // IP broadcast bandwidth
+ static unsigned long long ipext_InBcastOctets = 0;
+ static unsigned long long ipext_OutBcastOctets = 0;
+
+ // IP broadcast packets
+ static unsigned long long ipext_InBcastPkts = 0;
+ static unsigned long long ipext_OutBcastPkts = 0;
+
+ // IP ECN
+ static unsigned long long ipext_InNoECTPkts = 0;
+ static unsigned long long ipext_InECT1Pkts = 0;
+ static unsigned long long ipext_InECT0Pkts = 0;
+ static unsigned long long ipext_InCEPkts = 0;
+
+ // --------------------------------------------------------------------
+ // IP TCP
+
+ // IP TCP Reordering
+ static unsigned long long tcpext_TCPRenoReorder = 0;
+ static unsigned long long tcpext_TCPFACKReorder = 0;
+ static unsigned long long tcpext_TCPSACKReorder = 0;
+ static unsigned long long tcpext_TCPTSReorder = 0;
+
+ // IP TCP SYN Cookies
+ static unsigned long long tcpext_SyncookiesSent = 0;
+ static unsigned long long tcpext_SyncookiesRecv = 0;
+ static unsigned long long tcpext_SyncookiesFailed = 0;
+
+ // IP TCP Out Of Order Queue
+ // http://www.spinics.net/lists/netdev/msg204696.html
+ static unsigned long long tcpext_TCPOFOQueue = 0; // Number of packets queued in OFO queue
+ static unsigned long long tcpext_TCPOFODrop = 0; // Number of packets meant to be queued in OFO but dropped because socket rcvbuf limit hit.
+ static unsigned long long tcpext_TCPOFOMerge = 0; // Number of packets in OFO that were merged with other packets.
+ static unsigned long long tcpext_OfoPruned = 0; // packets dropped from out-of-order queue because of socket buffer overrun
+
+ // IP TCP connection resets
+ // https://github.com/ecki/net-tools/blob/bd8bceaed2311651710331a7f8990c3e31be9840/statistics.c
+ static unsigned long long tcpext_TCPAbortOnData = 0; // connections reset due to unexpected data
+ static unsigned long long tcpext_TCPAbortOnClose = 0; // connections reset due to early user close
+ static unsigned long long tcpext_TCPAbortOnMemory = 0; // connections aborted due to memory pressure
+ static unsigned long long tcpext_TCPAbortOnTimeout = 0; // connections aborted due to timeout
+ static unsigned long long tcpext_TCPAbortOnLinger = 0; // connections aborted after user close in linger timeout
+ static unsigned long long tcpext_TCPAbortFailed = 0; // times unable to send RST due to no memory
+
+ // https://perfchron.com/2015/12/26/investigating-linux-network-issues-with-netstat-and-nstat/
+ static unsigned long long tcpext_ListenOverflows = 0; // times the listen queue of a socket overflowed
+ static unsigned long long tcpext_ListenDrops = 0; // SYNs to LISTEN sockets ignored
+
+ // IP TCP memory pressures
+ static unsigned long long tcpext_TCPMemoryPressures = 0;
+
+ static unsigned long long tcpext_TCPReqQFullDrop = 0;
+ static unsigned long long tcpext_TCPReqQFullDoCookies = 0;
+
+ // shared: tcpext_TCPSynRetrans
+
+
+ if(unlikely(!arl_ipext)) {
+ hash_ipext = simple_hash("IpExt");
+ hash_tcpext = simple_hash("TcpExt");
+
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_inerrors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "input errors", CONFIG_BOOLEAN_AUTO);
+ do_mcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_bcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast packets", CONFIG_BOOLEAN_AUTO);
+ do_bcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast packets", CONFIG_BOOLEAN_AUTO);
+ do_ecn = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "ECN packets", CONFIG_BOOLEAN_AUTO);
+
+ do_tcpext_reorder = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP reorders", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syscookies = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_connaborts = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_memory = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP memory pressures", CONFIG_BOOLEAN_AUTO);
+
+ do_tcpext_syn_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_accept_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP accept queue", CONFIG_BOOLEAN_AUTO);
+
+ arl_ipext = arl_create("netstat/ipext", NULL, 60);
+ arl_tcpext = arl_create("netstat/tcpext", NULL, 60);
+
+ // --------------------------------------------------------------------
+ // IP
+
+ if(do_bandwidth != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InOctets", &ipext_InOctets);
+ arl_expect(arl_ipext, "OutOctets", &ipext_OutOctets);
+ }
+
+ if(do_inerrors != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InNoRoutes", &ipext_InNoRoutes);
+ arl_expect(arl_ipext, "InTruncatedPkts", &ipext_InTruncatedPkts);
+ arl_expect(arl_ipext, "InCsumErrors", &ipext_InCsumErrors);
+ }
+
+ if(do_mcast != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InMcastOctets", &ipext_InMcastOctets);
+ arl_expect(arl_ipext, "OutMcastOctets", &ipext_OutMcastOctets);
+ }
+
+ if(do_mcast_p != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InMcastPkts", &ipext_InMcastPkts);
+ arl_expect(arl_ipext, "OutMcastPkts", &ipext_OutMcastPkts);
+ }
+
+ if(do_bcast != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InBcastPkts", &ipext_InBcastPkts);
+ arl_expect(arl_ipext, "OutBcastPkts", &ipext_OutBcastPkts);
+ }
+
+ if(do_bcast_p != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InBcastOctets", &ipext_InBcastOctets);
+ arl_expect(arl_ipext, "OutBcastOctets", &ipext_OutBcastOctets);
+ }
+
+ if(do_ecn != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InNoECTPkts", &ipext_InNoECTPkts);
+ arl_expect(arl_ipext, "InECT1Pkts", &ipext_InECT1Pkts);
+ arl_expect(arl_ipext, "InECT0Pkts", &ipext_InECT0Pkts);
+ arl_expect(arl_ipext, "InCEPkts", &ipext_InCEPkts);
+ }
+
+ // --------------------------------------------------------------------
+ // IP TCP
+
+ if(do_tcpext_reorder != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPFACKReorder", &tcpext_TCPFACKReorder);
+ arl_expect(arl_tcpext, "TCPSACKReorder", &tcpext_TCPSACKReorder);
+ arl_expect(arl_tcpext, "TCPRenoReorder", &tcpext_TCPRenoReorder);
+ arl_expect(arl_tcpext, "TCPTSReorder", &tcpext_TCPTSReorder);
+ }
+
+ if(do_tcpext_syscookies != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "SyncookiesSent", &tcpext_SyncookiesSent);
+ arl_expect(arl_tcpext, "SyncookiesRecv", &tcpext_SyncookiesRecv);
+ arl_expect(arl_tcpext, "SyncookiesFailed", &tcpext_SyncookiesFailed);
+ }
+
+ if(do_tcpext_ofo != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPOFOQueue", &tcpext_TCPOFOQueue);
+ arl_expect(arl_tcpext, "TCPOFODrop", &tcpext_TCPOFODrop);
+ arl_expect(arl_tcpext, "TCPOFOMerge", &tcpext_TCPOFOMerge);
+ arl_expect(arl_tcpext, "OfoPruned", &tcpext_OfoPruned);
+ }
+
+ if(do_tcpext_connaborts != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPAbortOnData", &tcpext_TCPAbortOnData);
+ arl_expect(arl_tcpext, "TCPAbortOnClose", &tcpext_TCPAbortOnClose);
+ arl_expect(arl_tcpext, "TCPAbortOnMemory", &tcpext_TCPAbortOnMemory);
+ arl_expect(arl_tcpext, "TCPAbortOnTimeout", &tcpext_TCPAbortOnTimeout);
+ arl_expect(arl_tcpext, "TCPAbortOnLinger", &tcpext_TCPAbortOnLinger);
+ arl_expect(arl_tcpext, "TCPAbortFailed", &tcpext_TCPAbortFailed);
+ }
+
+ if(do_tcpext_memory != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures);
+ }
+
+ if(do_tcpext_accept_queue != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "ListenOverflows", &tcpext_ListenOverflows);
+ arl_expect(arl_tcpext, "ListenDrops", &tcpext_ListenDrops);
+ }
+
+ if(do_tcpext_syn_queue != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPReqQFullDrop", &tcpext_TCPReqQFullDrop);
+ arl_expect(arl_tcpext, "TCPReqQFullDoCookies", &tcpext_TCPReqQFullDoCookies);
+ }
+
+ // shared metrics
+ arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/netstat");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ arl_begin(arl_ipext);
+ arl_begin(arl_tcpext);
+
+ for(l = 0; l < lines ;l++) {
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ if(unlikely(hash == hash_ipext && strcmp(key, "IpExt") == 0)) {
+ size_t h = l++;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ error("Cannot read /proc/net/netstat IpExt line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ parse_line_pair(ff, arl_ipext, h, l);
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InOctets ||
+ ipext_OutOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_system_ip = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_system_ip)) {
+ st_system_ip = rrdset_create_localhost(
+ "system"
+ , RRD_TYPE_NET_NETSTAT
+ , NULL
+ , "network"
+ , NULL
+ , "IP Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IP
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_system_ip, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_system_ip, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_system_ip);
+
+ rrddim_set_by_pointer(st_system_ip, rd_in, ipext_InOctets);
+ rrddim_set_by_pointer(st_system_ip, rd_out, ipext_OutOctets);
+
+ rrdset_done(st_system_ip);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InNoRoutes ||
+ ipext_InTruncatedPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_inerrors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_ip_inerrors = NULL;
+ static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL;
+
+ if(unlikely(!st_ip_inerrors)) {
+ st_ip_inerrors = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "inerrors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IP Input Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_inerrors, RRDSET_FLAG_DETAIL);
+
+ rd_noroutes = rrddim_add(st_ip_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_truncated = rrddim_add(st_ip_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_checksum = rrddim_add(st_ip_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_inerrors);
+
+ rrddim_set_by_pointer(st_ip_inerrors, rd_noroutes, ipext_InNoRoutes);
+ rrddim_set_by_pointer(st_ip_inerrors, rd_truncated, ipext_InTruncatedPkts);
+ rrddim_set_by_pointer(st_ip_inerrors, rd_checksum, ipext_InCsumErrors);
+
+ rrdset_done(st_ip_inerrors);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InMcastOctets ||
+ ipext_OutMcastOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_mcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_ip_mcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_mcast)) {
+ st_ip_mcast = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "mcast"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IP Multicast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_MCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_mcast);
+
+ rrddim_set_by_pointer(st_ip_mcast, rd_in, ipext_InMcastOctets);
+ rrddim_set_by_pointer(st_ip_mcast, rd_out, ipext_OutMcastOctets);
+
+ rrdset_done(st_ip_mcast);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InBcastOctets ||
+ ipext_OutBcastOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bcast = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_bcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_bcast)) {
+ st_ip_bcast = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "bcast"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IP Broadcast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_BCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_bcast);
+
+ rrddim_set_by_pointer(st_ip_bcast, rd_in, ipext_InBcastOctets);
+ rrddim_set_by_pointer(st_ip_bcast, rd_out, ipext_OutBcastOctets);
+
+ rrdset_done(st_ip_bcast);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InMcastPkts ||
+ ipext_OutMcastPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_mcast_p = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_mcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_mcastpkts)) {
+ st_ip_mcastpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "mcastpkts"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IP Multicast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_MCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ip_mcastpkts);
+
+ rrddim_set_by_pointer(st_ip_mcastpkts, rd_in, ipext_InMcastPkts);
+ rrddim_set_by_pointer(st_ip_mcastpkts, rd_out, ipext_OutMcastPkts);
+
+ rrdset_done(st_ip_mcastpkts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InBcastPkts ||
+ ipext_OutBcastPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bcast_p = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_bcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_bcastpkts)) {
+ st_ip_bcastpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "bcastpkts"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IP Broadcast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_BCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_bcastpkts);
+
+ rrddim_set_by_pointer(st_ip_bcastpkts, rd_in, ipext_InBcastPkts);
+ rrddim_set_by_pointer(st_ip_bcastpkts, rd_out, ipext_OutBcastPkts);
+
+ rrdset_done(st_ip_bcastpkts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO &&
+ (ipext_InCEPkts ||
+ ipext_InECT0Pkts ||
+ ipext_InECT1Pkts ||
+ ipext_InNoECTPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ecnpkts = NULL;
+ static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL;
+
+ if(unlikely(!st_ecnpkts)) {
+ st_ecnpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "ecnpkts"
+ , NULL
+ , "ecn"
+ , NULL
+ , "IP ECN Statistics"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_ECN
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL);
+
+ rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ecnpkts);
+
+ rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts);
+
+ rrdset_done(st_ecnpkts);
+ }
+ }
+ else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) {
+ size_t h = l++;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ error("Cannot read /proc/net/netstat TcpExt line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ parse_line_pair(ff, arl_tcpext, h, l);
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_TCPMemoryPressures ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_memory = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpmemorypressures = NULL;
+ static RRDDIM *rd_pressures = NULL;
+
+ if(unlikely(!st_tcpmemorypressures)) {
+ st_tcpmemorypressures = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpmemorypressures"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Memory Pressures"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_MEM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpmemorypressures);
+
+ rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures);
+
+ rrdset_done(st_tcpmemorypressures);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_TCPAbortOnData ||
+ tcpext_TCPAbortOnClose ||
+ tcpext_TCPAbortOnMemory ||
+ tcpext_TCPAbortOnTimeout ||
+ tcpext_TCPAbortOnLinger ||
+ tcpext_TCPAbortFailed ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpconnaborts = NULL;
+ static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_tcpconnaborts)) {
+ st_tcpconnaborts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpconnaborts"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Connection Aborts"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_CONNABORTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpconnaborts);
+
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed);
+
+ rrdset_done(st_tcpconnaborts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_TCPRenoReorder ||
+ tcpext_TCPFACKReorder ||
+ tcpext_TCPSACKReorder ||
+ tcpext_TCPTSReorder ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_reorder = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpreorders = NULL;
+ static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL;
+
+ if(unlikely(!st_tcpreorders)) {
+ st_tcpreorders = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpreorders"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Reordered Packets by Detection Method"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_REORDERS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpreorders);
+
+ rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder);
+
+ rrdset_done(st_tcpreorders);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_TCPOFOQueue ||
+ tcpext_TCPOFODrop ||
+ tcpext_TCPOFOMerge ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_tcpofo = NULL;
+ static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL;
+
+ if(unlikely(!st_ip_tcpofo)) {
+
+ st_ip_tcpofo = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpofo"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Out-Of-Order Queue"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_OFO
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inqueue = rrddim_add(st_ip_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dropped = rrddim_add(st_ip_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_merged = rrddim_add(st_ip_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pruned = rrddim_add(st_ip_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_tcpofo);
+
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_inqueue, tcpext_TCPOFOQueue);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_dropped, tcpext_TCPOFODrop);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_merged, tcpext_TCPOFOMerge);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_pruned, tcpext_OfoPruned);
+
+ rrdset_done(st_ip_tcpofo);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_SyncookiesSent ||
+ tcpext_SyncookiesRecv ||
+ tcpext_SyncookiesFailed ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_syncookies = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_syncookies)) {
+
+ st_syncookies = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpsyncookies"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Cookies"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_syncookies);
+
+ rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv);
+ rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent);
+ rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed);
+
+ rrdset_done(st_syncookies);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_TCPReqQFullDrop ||
+ tcpext_TCPReqQFullDoCookies ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_syn_queue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_syn_queue = NULL;
+ static RRDDIM
+ *rd_TCPReqQFullDrop = NULL,
+ *rd_TCPReqQFullDoCookies = NULL;
+
+ if(unlikely(!st_syn_queue)) {
+
+ st_syn_queue = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcp_syn_queue"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Queue Issues"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_TCPReqQFullDrop = rrddim_add(st_syn_queue, "TCPReqQFullDrop", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_TCPReqQFullDoCookies = rrddim_add(st_syn_queue, "TCPReqQFullDoCookies", "cookies", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_syn_queue);
+
+ rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDrop, tcpext_TCPReqQFullDrop);
+ rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDoCookies, tcpext_TCPReqQFullDoCookies);
+
+ rrdset_done(st_syn_queue);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO &&
+ (tcpext_ListenOverflows ||
+ tcpext_ListenDrops ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcpext_accept_queue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_accept_queue = NULL;
+ static RRDDIM *rd_overflows = NULL,
+ *rd_drops = NULL;
+
+ if(unlikely(!st_accept_queue)) {
+
+ st_accept_queue = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcp_accept_queue"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Accept Queue Issues"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_overflows = rrddim_add(st_accept_queue, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_drops = rrddim_add(st_accept_queue, "ListenDrops", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_accept_queue);
+
+ rrddim_set_by_pointer(st_accept_queue, rd_overflows, tcpext_ListenOverflows);
+ rrddim_set_by_pointer(st_accept_queue, rd_drops, tcpext_ListenDrops);
+
+ rrdset_done(st_accept_queue);
+ }
+
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c
new file mode 100644
index 0000000..f570285
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_rpc_nfs.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NFS_NAME "/proc/net/rpc/nfs"
+#define CONFIG_SECTION_PLUGIN_PROC_NFS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NFS_NAME
+
+struct nfs_procs {
+ char name[30];
+ unsigned long long value;
+ int present;
+ RRDDIM *rd;
+};
+
+struct nfs_procs nfs_proc2_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"root" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"readlink", 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"wrcache" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfs_procs nfs_proc3_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mknod" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readdirplus", 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfs_procs nfs_proc4_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"open_conf" , 0ULL, 0, NULL}
+ , {"open_noat" , 0ULL, 0, NULL}
+ , {"open_dgrd" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"setclntid" , 0ULL, 0, NULL}
+ , {"confirm" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"statfs" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"server_caps" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getacl" , 0ULL, 0, NULL}
+ , {"setacl" , 0ULL, 0, NULL}
+ , {"fs_locations" , 0ULL, 0, NULL}
+ , {"rel_lkowner" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"fsid_present" , 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.1 client ops */
+ { "exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"get_lease_time" , 0ULL, 0, NULL}
+ , {"reclaim_comp" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"getdevinfo" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"bind_conn_to_ses", 0ULL, 0, NULL}
+ , {"destroy_clientid", 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.2 client ops */
+ { "seek" , 0ULL, 0, NULL}
+ , {"allocate" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"clone" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+int do_proc_net_rpc_nfs(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1;
+ static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0;
+
+ if(!ff) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfs");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NFS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(!ff) return 1;
+
+ ff = procfile_readall(ff);
+ if(!ff) return 0; // we return 0, so that we will retry to open it next time
+
+ if(do_net == -1) do_net = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "network", 1);
+ if(do_rpc == -1) do_rpc = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "rpc", 1);
+ if(do_proc2 == -1) do_proc2 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v2 procedures", 1);
+ if(do_proc3 == -1) do_proc3 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v3 procedures", 1);
+ if(do_proc4 == -1) do_proc4 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v4 procedures", 1);
+
+ // if they are enabled, reset them to 1
+ // later we do them =2 to avoid doing strcmp() for all lines
+ if(do_net) do_net = 1;
+ if(do_rpc) do_rpc = 1;
+ if(do_proc2) do_proc2 = 1;
+ if(do_proc3) do_proc3 = 1;
+ if(do_proc4) do_proc4 = 1;
+
+ size_t lines = procfile_lines(ff), l;
+
+ char *type;
+ unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
+ unsigned long long rpc_calls = 0, rpc_retransmits = 0, rpc_auth_refresh = 0;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(!words) continue;
+
+ type = procfile_lineword(ff, l, 0);
+
+ if(do_net == 1 && strcmp(type, "net") == 0) {
+ if(words < 5) {
+ error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 5);
+ continue;
+ }
+
+ net_count = str2ull(procfile_lineword(ff, l, 1));
+ net_udp_count = str2ull(procfile_lineword(ff, l, 2));
+ net_tcp_count = str2ull(procfile_lineword(ff, l, 3));
+ net_tcp_connections = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
+ if(sum == 0ULL) do_net = -1;
+ else do_net = 2;
+ }
+ else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
+ if(words < 4) {
+ error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ rpc_calls = str2ull(procfile_lineword(ff, l, 1));
+ rpc_retransmits = str2ull(procfile_lineword(ff, l, 2));
+ rpc_auth_refresh = str2ull(procfile_lineword(ff, l, 3));
+
+ unsigned long long sum = rpc_calls + rpc_retransmits + rpc_auth_refresh;
+ if(sum == 0ULL) do_rpc = -1;
+ else do_rpc = 2;
+ }
+ else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc2_values[i].name[0] ; i++, j++) {
+ nfs_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc2_values[i].present = 1;
+ sum += nfs_proc2_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc2_warning) {
+ error("Disabling /proc/net/rpc/nfs v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc2_warning = 1;
+ }
+ do_proc2 = 0;
+ }
+ else do_proc2 = 2;
+ }
+ else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc3_values[i].name[0] ; i++, j++) {
+ nfs_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc3_values[i].present = 1;
+ sum += nfs_proc3_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc3_warning) {
+ info("Disabling /proc/net/rpc/nfs v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc3_warning = 1;
+ }
+ do_proc3 = 0;
+ }
+ else do_proc3 = 2;
+ }
+ else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc4_values[i].name[0] ; i++, j++) {
+ nfs_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc4_values[i].present = 1;
+ sum += nfs_proc4_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4_warning) {
+ info("Disabling /proc/net/rpc/nfs v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4_warning = 1;
+ }
+ do_proc4 = 0;
+ }
+ else do_proc4 = 2;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_net == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_udp = NULL,
+ *rd_tcp = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "NFS Client Network"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_NET
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore net_count, net_tcp_connections
+ (void)net_count;
+ (void)net_tcp_connections;
+
+ rrddim_set_by_pointer(st, rd_udp, net_udp_count);
+ rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rpc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_calls = NULL,
+ *rd_retransmits = NULL,
+ *rd_auth_refresh = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "rpc"
+ , NULL
+ , "rpc"
+ , NULL
+ , "NFS Client Remote Procedure Calls Statistics"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_RPC
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_auth_refresh = rrddim_add(st, "auth_refresh", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_calls, rpc_calls);
+ rrddim_set_by_pointer(st, rd_retransmits, rpc_retransmits);
+ rrddim_set_by_pointer(st, rd_auth_refresh, rpc_auth_refresh);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc2 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc2"
+ , NULL
+ , "nfsv2rpc"
+ , NULL
+ , "NFS v2 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC2
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc2_values[i].present ; i++) {
+ if(unlikely(!nfs_proc2_values[i].rd))
+ nfs_proc2_values[i].rd = rrddim_add(st, nfs_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc2_values[i].rd, nfs_proc2_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc3 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc3"
+ , NULL
+ , "nfsv3rpc"
+ , NULL
+ , "NFS v3 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC3
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc3_values[i].present ; i++) {
+ if(unlikely(!nfs_proc3_values[i].rd))
+ nfs_proc3_values[i].rd = rrddim_add(st, nfs_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc3_values[i].rd, nfs_proc3_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc4"
+ , NULL
+ , "nfsv4rpc"
+ , NULL
+ , "NFS v4 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC4
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc4_values[i].present ; i++) {
+ if(unlikely(!nfs_proc4_values[i].rd))
+ nfs_proc4_values[i].rd = rrddim_add(st, nfs_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc4_values[i].rd, nfs_proc4_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c
new file mode 100644
index 0000000..29ef7a3
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NFSD_NAME "/proc/net/rpc/nfsd"
+
+struct nfsd_procs {
+ char name[30];
+ unsigned long long value;
+ int present;
+ RRDDIM *rd;
+};
+
+struct nfsd_procs nfsd_proc2_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"root" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"readlink", 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"wrcache" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd_proc3_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mknod" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readdirplus", 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd_proc4_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"open_conf" , 0ULL, 0, NULL}
+ , {"open_noat" , 0ULL, 0, NULL}
+ , {"open_dgrd" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"setclntid" , 0ULL, 0, NULL}
+ , {"confirm" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"statfs" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"server_caps" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getacl" , 0ULL, 0, NULL}
+ , {"setacl" , 0ULL, 0, NULL}
+ , {"fs_locations" , 0ULL, 0, NULL}
+ , {"rel_lkowner" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"fsid_present" , 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.1 client ops */
+ { "exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"get_lease_time" , 0ULL, 0, NULL}
+ , {"reclaim_comp" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"getdevinfo" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"bind_conn_to_ses", 0ULL, 0, NULL}
+ , {"destroy_clientid", 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.2 client ops */
+ { "seek" , 0ULL, 0, NULL}
+ , {"allocate" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"clone" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd4_ops_values[] = {
+ { "unused_op0" , 0ULL, 0, NULL}
+ , {"unused_op1" , 0ULL, 0, NULL}
+ , {"future_op2" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"delegpurge" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"getfh" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"nverify" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"openattr" , 0ULL, 0, NULL}
+ , {"open_confirm" , 0ULL, 0, NULL}
+ , {"open_downgrade" , 0ULL, 0, NULL}
+ , {"putfh" , 0ULL, 0, NULL}
+ , {"putpubfh" , 0ULL, 0, NULL}
+ , {"putrootfh" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"restorefh" , 0ULL, 0, NULL}
+ , {"savefh" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"setclientid" , 0ULL, 0, NULL}
+ , {"setclientid_confirm" , 0ULL, 0, NULL}
+ , {"verify" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"release_lockowner" , 0ULL, 0, NULL}
+ ,
+
+ /* nfs41 */
+ { "backchannel_ctl" , 0ULL, 0, NULL}
+ , {"bind_conn_to_session", 0ULL, 0, NULL}
+ , {"exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"get_dir_delegation" , 0ULL, 0, NULL}
+ , {"getdeviceinfo" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no_name" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"set_ssv" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"want_delegation" , 0ULL, 0, NULL}
+ , {"destroy_clientid" , 0ULL, 0, NULL}
+ , {"reclaim_complete" , 0ULL, 0, NULL}
+ ,
+
+ /* nfs42 */
+ { "allocate" , 0ULL, 0, NULL}
+ , {"copy" , 0ULL, 0, NULL}
+ , {"copy_notify" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"ioadvise" , 0ULL, 0, NULL}
+ , {"layouterror" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"offload_cancel" , 0ULL, 0, NULL}
+ , {"offload_status" , 0ULL, 0, NULL}
+ , {"read_plus" , 0ULL, 0, NULL}
+ , {"seek" , 0ULL, 0, NULL}
+ , {"write_same" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+
+int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_ra = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1;
+ static int ra_warning = 0, th_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfsd");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfsd", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ if(unlikely(do_rc == -1)) {
+ do_rc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read cache", 1);
+ do_fh = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "file handles", 1);
+ do_io = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "I/O", 1);
+ do_th = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "threads", 1);
+ do_ra = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read ahead", 1);
+ do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "network", 1);
+ do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "rpc", 1);
+ do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v2 procedures", 1);
+ do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v3 procedures", 1);
+ do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 procedures", 1);
+ do_proc4ops = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 operations", 1);
+ }
+
+ // if they are enabled, reset them to 1
+ // later we do them = 2 to avoid doing strcmp() for all lines
+ if(do_rc) do_rc = 1;
+ if(do_fh) do_fh = 1;
+ if(do_io) do_io = 1;
+ if(do_th) do_th = 1;
+ if(do_ra) do_ra = 1;
+ if(do_net) do_net = 1;
+ if(do_rpc) do_rpc = 1;
+ if(do_proc2) do_proc2 = 1;
+ if(do_proc3) do_proc3 = 1;
+ if(do_proc4) do_proc4 = 1;
+ if(do_proc4ops) do_proc4ops = 1;
+
+ size_t lines = procfile_lines(ff), l;
+
+ char *type;
+ unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0;
+ unsigned long long fh_stale = 0, fh_total_lookups = 0, fh_anonymous_lookups = 0, fh_dir_not_in_dcache = 0, fh_non_dir_not_in_dcache = 0;
+ unsigned long long io_read = 0, io_write = 0;
+ unsigned long long th_threads = 0, th_fullcnt = 0, th_hist10 = 0, th_hist20 = 0, th_hist30 = 0, th_hist40 = 0, th_hist50 = 0, th_hist60 = 0, th_hist70 = 0, th_hist80 = 0, th_hist90 = 0, th_hist100 = 0;
+ unsigned long long ra_size = 0, ra_hist10 = 0, ra_hist20 = 0, ra_hist30 = 0, ra_hist40 = 0, ra_hist50 = 0, ra_hist60 = 0, ra_hist70 = 0, ra_hist80 = 0, ra_hist90 = 0, ra_hist100 = 0, ra_none = 0;
+ unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
+ unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ type = procfile_lineword(ff, l, 0);
+
+ if(do_rc == 1 && strcmp(type, "rc") == 0) {
+ if(unlikely(words < 4)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 4);
+ continue;
+ }
+
+ rc_hits = str2ull(procfile_lineword(ff, l, 1));
+ rc_misses = str2ull(procfile_lineword(ff, l, 2));
+ rc_nocache = str2ull(procfile_lineword(ff, l, 3));
+
+ unsigned long long sum = rc_hits + rc_misses + rc_nocache;
+ if(sum == 0ULL) do_rc = -1;
+ else do_rc = 2;
+ }
+ else if(do_fh == 1 && strcmp(type, "fh") == 0) {
+ if(unlikely(words < 6)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ fh_stale = str2ull(procfile_lineword(ff, l, 1));
+ fh_total_lookups = str2ull(procfile_lineword(ff, l, 2));
+ fh_anonymous_lookups = str2ull(procfile_lineword(ff, l, 3));
+ fh_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 4));
+ fh_non_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 5));
+
+ unsigned long long sum = fh_stale + fh_total_lookups + fh_anonymous_lookups + fh_dir_not_in_dcache + fh_non_dir_not_in_dcache;
+ if(sum == 0ULL) do_fh = -1;
+ else do_fh = 2;
+ }
+ else if(do_io == 1 && strcmp(type, "io") == 0) {
+ if(unlikely(words < 3)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 3);
+ continue;
+ }
+
+ io_read = str2ull(procfile_lineword(ff, l, 1));
+ io_write = str2ull(procfile_lineword(ff, l, 2));
+
+ unsigned long long sum = io_read + io_write;
+ if(sum == 0ULL) do_io = -1;
+ else do_io = 2;
+ }
+ else if(do_th == 1 && strcmp(type, "th") == 0) {
+ if(unlikely(words < 13)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13);
+ continue;
+ }
+
+ th_threads = str2ull(procfile_lineword(ff, l, 1));
+ th_fullcnt = str2ull(procfile_lineword(ff, l, 2));
+ th_hist10 = (unsigned long long)(atof(procfile_lineword(ff, l, 3)) * 1000.0);
+ th_hist20 = (unsigned long long)(atof(procfile_lineword(ff, l, 4)) * 1000.0);
+ th_hist30 = (unsigned long long)(atof(procfile_lineword(ff, l, 5)) * 1000.0);
+ th_hist40 = (unsigned long long)(atof(procfile_lineword(ff, l, 6)) * 1000.0);
+ th_hist50 = (unsigned long long)(atof(procfile_lineword(ff, l, 7)) * 1000.0);
+ th_hist60 = (unsigned long long)(atof(procfile_lineword(ff, l, 8)) * 1000.0);
+ th_hist70 = (unsigned long long)(atof(procfile_lineword(ff, l, 9)) * 1000.0);
+ th_hist80 = (unsigned long long)(atof(procfile_lineword(ff, l, 10)) * 1000.0);
+ th_hist90 = (unsigned long long)(atof(procfile_lineword(ff, l, 11)) * 1000.0);
+ th_hist100 = (unsigned long long)(atof(procfile_lineword(ff, l, 12)) * 1000.0);
+
+ // threads histogram has been disabled on recent kernels
+ // http://permalink.gmane.org/gmane.linux.nfs/24528
+ unsigned long long sum = th_hist10 + th_hist20 + th_hist30 + th_hist40 + th_hist50 + th_hist60 + th_hist70 + th_hist80 + th_hist90 + th_hist100;
+ if(sum == 0ULL) {
+ if(!th_warning) {
+ info("Disabling /proc/net/rpc/nfsd threads histogram. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ th_warning = 1;
+ }
+ do_th = -1;
+ }
+ else do_th = 2;
+ }
+ else if(do_ra == 1 && strcmp(type, "ra") == 0) {
+ if(unlikely(words < 13)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13);
+ continue;
+ }
+
+ ra_size = str2ull(procfile_lineword(ff, l, 1));
+ ra_hist10 = str2ull(procfile_lineword(ff, l, 2));
+ ra_hist20 = str2ull(procfile_lineword(ff, l, 3));
+ ra_hist30 = str2ull(procfile_lineword(ff, l, 4));
+ ra_hist40 = str2ull(procfile_lineword(ff, l, 5));
+ ra_hist50 = str2ull(procfile_lineword(ff, l, 6));
+ ra_hist60 = str2ull(procfile_lineword(ff, l, 7));
+ ra_hist70 = str2ull(procfile_lineword(ff, l, 8));
+ ra_hist80 = str2ull(procfile_lineword(ff, l, 9));
+ ra_hist90 = str2ull(procfile_lineword(ff, l, 10));
+ ra_hist100 = str2ull(procfile_lineword(ff, l, 11));
+ ra_none = str2ull(procfile_lineword(ff, l, 12));
+
+ unsigned long long sum = ra_hist10 + ra_hist20 + ra_hist30 + ra_hist40 + ra_hist50 + ra_hist60 + ra_hist70 + ra_hist80 + ra_hist90 + ra_hist100 + ra_none;
+ if(sum == 0ULL) {
+ if(!ra_warning) {
+ info("Disabling /proc/net/rpc/nfsd read ahead histogram. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ ra_warning = 1;
+ }
+ do_ra = -1;
+ }
+ else do_ra = 2;
+ }
+ else if(do_net == 1 && strcmp(type, "net") == 0) {
+ if(unlikely(words < 5)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 5);
+ continue;
+ }
+
+ net_count = str2ull(procfile_lineword(ff, l, 1));
+ net_udp_count = str2ull(procfile_lineword(ff, l, 2));
+ net_tcp_count = str2ull(procfile_lineword(ff, l, 3));
+ net_tcp_connections = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
+ if(sum == 0ULL) do_net = -1;
+ else do_net = 2;
+ }
+ else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
+ if(unlikely(words < 6)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ rpc_calls = str2ull(procfile_lineword(ff, l, 1));
+ rpc_bad_format = str2ull(procfile_lineword(ff, l, 2));
+ rpc_bad_auth = str2ull(procfile_lineword(ff, l, 3));
+ rpc_bad_client = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client;
+ if(sum == 0ULL) do_rpc = -1;
+ else do_rpc = 2;
+ }
+ else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc2_values[i].name[0] ; i++, j++) {
+ nfsd_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc2_values[i].present = 1;
+ sum += nfsd_proc2_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc2_warning) {
+ error("Disabling /proc/net/rpc/nfsd v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc2_warning = 1;
+ }
+ do_proc2 = 0;
+ }
+ else do_proc2 = 2;
+ }
+ else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc3_values[i].name[0] ; i++, j++) {
+ nfsd_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc3_values[i].present = 1;
+ sum += nfsd_proc3_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc3_warning) {
+ info("Disabling /proc/net/rpc/nfsd v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc3_warning = 1;
+ }
+ do_proc3 = 0;
+ }
+ else do_proc3 = 2;
+ }
+ else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc4_values[i].name[0] ; i++, j++) {
+ nfsd_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc4_values[i].present = 1;
+ sum += nfsd_proc4_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4_warning) {
+ info("Disabling /proc/net/rpc/nfsd v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4_warning = 1;
+ }
+ do_proc4 = 0;
+ }
+ else do_proc4 = 2;
+ }
+ else if(do_proc4ops == 1 && strcmp(type, "proc4ops") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd4_ops_values[i].name[0] ; i++, j++) {
+ nfsd4_ops_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd4_ops_values[i].present = 1;
+ sum += nfsd4_ops_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4ops_warning) {
+ info("Disabling /proc/net/rpc/nfsd v4 operations chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4ops_warning = 1;
+ }
+ do_proc4ops = 0;
+ }
+ else do_proc4ops = 2;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_hits = NULL,
+ *rd_misses = NULL,
+ *rd_nocache = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "readcache"
+ , NULL
+ , "cache"
+ , NULL
+ , "NFS Server Read Cache"
+ , "reads/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_READCACHE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_hits = rrddim_add(st, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_misses = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_nocache = rrddim_add(st, "nocache", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_hits, rc_hits);
+ rrddim_set_by_pointer(st, rd_misses, rc_misses);
+ rrddim_set_by_pointer(st, rd_nocache, rc_nocache);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_fh == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_stale = NULL,
+ *rd_total_lookups = NULL,
+ *rd_anonymous_lookups = NULL,
+ *rd_dir_not_in_dcache = NULL,
+ *rd_non_dir_not_in_dcache = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "filehandles"
+ , NULL
+ , "filehandles"
+ , NULL
+ , "NFS Server File Handles"
+ , "handles/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_FILEHANDLES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_total_lookups = rrddim_add(st, "total_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_anonymous_lookups = rrddim_add(st, "anonymous_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dir_not_in_dcache = rrddim_add(st, "dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_non_dir_not_in_dcache = rrddim_add(st, "non_dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_stale, fh_stale);
+ rrddim_set_by_pointer(st, rd_total_lookups, fh_total_lookups);
+ rrddim_set_by_pointer(st, rd_anonymous_lookups, fh_anonymous_lookups);
+ rrddim_set_by_pointer(st, rd_dir_not_in_dcache, fh_dir_not_in_dcache);
+ rrddim_set_by_pointer(st, rd_non_dir_not_in_dcache, fh_non_dir_not_in_dcache);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_io == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_read = NULL,
+ *rd_write = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "io"
+ , NULL
+ , "io"
+ , NULL
+ , "NFS Server I/O"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_read = rrddim_add(st, "read", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_write = rrddim_add(st, "write", NULL, -1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_read, io_read);
+ rrddim_set_by_pointer(st, rd_write, io_write);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_th == 2) {
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_threads = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads"
+ , "threads"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_threads, th_threads);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_full_count = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads_fullcnt"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads Full Count"
+ , "events"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_full_count = rrddim_add(st, "full_count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_full_count, th_fullcnt);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_th_hist10 = NULL,
+ *rd_th_hist20 = NULL,
+ *rd_th_hist30 = NULL,
+ *rd_th_hist40 = NULL,
+ *rd_th_hist50 = NULL,
+ *rd_th_hist60 = NULL,
+ *rd_th_hist70 = NULL,
+ *rd_th_hist80 = NULL,
+ *rd_th_hist90 = NULL,
+ *rd_th_hist100 = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads_histogram"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads Usage Histogram"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_th_hist10 = rrddim_add(st, "0%-10%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist20 = rrddim_add(st, "10%-20%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist30 = rrddim_add(st, "20%-30%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist40 = rrddim_add(st, "30%-40%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist50 = rrddim_add(st, "40%-50%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist60 = rrddim_add(st, "50%-60%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist70 = rrddim_add(st, "60%-70%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist80 = rrddim_add(st, "70%-80%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist90 = rrddim_add(st, "80%-90%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist100 = rrddim_add(st, "90%-100%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_th_hist10, th_hist10);
+ rrddim_set_by_pointer(st, rd_th_hist20, th_hist20);
+ rrddim_set_by_pointer(st, rd_th_hist30, th_hist30);
+ rrddim_set_by_pointer(st, rd_th_hist40, th_hist40);
+ rrddim_set_by_pointer(st, rd_th_hist50, th_hist50);
+ rrddim_set_by_pointer(st, rd_th_hist60, th_hist60);
+ rrddim_set_by_pointer(st, rd_th_hist70, th_hist70);
+ rrddim_set_by_pointer(st, rd_th_hist80, th_hist80);
+ rrddim_set_by_pointer(st, rd_th_hist90, th_hist90);
+ rrddim_set_by_pointer(st, rd_th_hist100, th_hist100);
+ rrdset_done(st);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ra == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ra_hist10 = NULL,
+ *rd_ra_hist20 = NULL,
+ *rd_ra_hist30 = NULL,
+ *rd_ra_hist40 = NULL,
+ *rd_ra_hist50 = NULL,
+ *rd_ra_hist60 = NULL,
+ *rd_ra_hist70 = NULL,
+ *rd_ra_hist80 = NULL,
+ *rd_ra_hist90 = NULL,
+ *rd_ra_hist100 = NULL,
+ *rd_ra_none = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "readahead"
+ , NULL
+ , "readahead"
+ , NULL
+ , "NFS Server Read Ahead Depth"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_READAHEAD
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_ra_hist10 = rrddim_add(st, "10%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist20 = rrddim_add(st, "20%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist30 = rrddim_add(st, "30%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist40 = rrddim_add(st, "40%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist50 = rrddim_add(st, "50%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist60 = rrddim_add(st, "60%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist70 = rrddim_add(st, "70%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist80 = rrddim_add(st, "80%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist90 = rrddim_add(st, "90%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist100 = rrddim_add(st, "100%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_none = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore ra_size
+ (void)ra_size;
+
+ rrddim_set_by_pointer(st, rd_ra_hist10, ra_hist10);
+ rrddim_set_by_pointer(st, rd_ra_hist20, ra_hist20);
+ rrddim_set_by_pointer(st, rd_ra_hist30, ra_hist30);
+ rrddim_set_by_pointer(st, rd_ra_hist40, ra_hist40);
+ rrddim_set_by_pointer(st, rd_ra_hist50, ra_hist50);
+ rrddim_set_by_pointer(st, rd_ra_hist60, ra_hist60);
+ rrddim_set_by_pointer(st, rd_ra_hist70, ra_hist70);
+ rrddim_set_by_pointer(st, rd_ra_hist80, ra_hist80);
+ rrddim_set_by_pointer(st, rd_ra_hist90, ra_hist90);
+ rrddim_set_by_pointer(st, rd_ra_hist100,ra_hist100);
+ rrddim_set_by_pointer(st, rd_ra_none, ra_none);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_net == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_udp = NULL,
+ *rd_tcp = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "NFS Server Network Statistics"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_NET
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore net_count, net_tcp_connections
+ (void)net_count;
+ (void)net_tcp_connections;
+
+ rrddim_set_by_pointer(st, rd_udp, net_udp_count);
+ rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rpc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_calls = NULL,
+ *rd_bad_format = NULL,
+ *rd_bad_auth = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "rpc"
+ , NULL
+ , "rpc"
+ , NULL
+ , "NFS Server Remote Procedure Calls Statistics"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_RPC
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_bad_auth = rrddim_add(st, "bad_auth", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore rpc_bad_client
+ (void)rpc_bad_client;
+
+ rrddim_set_by_pointer(st, rd_calls, rpc_calls);
+ rrddim_set_by_pointer(st, rd_bad_format, rpc_bad_format);
+ rrddim_set_by_pointer(st, rd_bad_auth, rpc_bad_auth);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc2 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc2"
+ , NULL
+ , "nfsv2rpc"
+ , NULL
+ , "NFS v2 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC2
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc2_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc2_values[i].rd))
+ nfsd_proc2_values[i].rd = rrddim_add(st, nfsd_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc2_values[i].rd, nfsd_proc2_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc3 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc3"
+ , NULL
+ , "nfsv3rpc"
+ , NULL
+ , "NFS v3 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC3
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc3_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc3_values[i].rd))
+ nfsd_proc3_values[i].rd = rrddim_add(st, nfsd_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc3_values[i].rd, nfsd_proc3_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc4"
+ , NULL
+ , "nfsv4rpc"
+ , NULL
+ , "NFS v4 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC4
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc4_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc4_values[i].rd))
+ nfsd_proc4_values[i].rd = rrddim_add(st, nfsd_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc4_values[i].rd, nfsd_proc4_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4ops == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc4ops"
+ , NULL
+ , "nfsv2ops"
+ , NULL
+ , "NFS v4 Server Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC4OPS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd4_ops_values[i].present ; i++) {
+ if(unlikely(!nfsd4_ops_values[i].rd))
+ nfsd4_ops_values[i].rd = rrddim_add(st, nfsd4_ops_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd4_ops_values[i].rd, nfsd4_ops_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c
new file mode 100644
index 0000000..343cc5a
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sctp_snmp.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#define PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME "/proc/net/sctp/snmp"
+
+int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static int
+ do_associations = -1,
+ do_transitions = -1,
+ do_packet_errors = -1,
+ do_packets = -1,
+ do_fragmentation = -1,
+ do_chunk_types = -1;
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long SctpCurrEstab = 0ULL;
+ static unsigned long long SctpActiveEstabs = 0ULL;
+ static unsigned long long SctpPassiveEstabs = 0ULL;
+ static unsigned long long SctpAborteds = 0ULL;
+ static unsigned long long SctpShutdowns = 0ULL;
+ static unsigned long long SctpOutOfBlues = 0ULL;
+ static unsigned long long SctpChecksumErrors = 0ULL;
+ static unsigned long long SctpOutCtrlChunks = 0ULL;
+ static unsigned long long SctpOutOrderChunks = 0ULL;
+ static unsigned long long SctpOutUnorderChunks = 0ULL;
+ static unsigned long long SctpInCtrlChunks = 0ULL;
+ static unsigned long long SctpInOrderChunks = 0ULL;
+ static unsigned long long SctpInUnorderChunks = 0ULL;
+ static unsigned long long SctpFragUsrMsgs = 0ULL;
+ static unsigned long long SctpReasmUsrMsgs = 0ULL;
+ static unsigned long long SctpOutSCTPPacks = 0ULL;
+ static unsigned long long SctpInSCTPPacks = 0ULL;
+ static unsigned long long SctpT1InitExpireds = 0ULL;
+ static unsigned long long SctpT1CookieExpireds = 0ULL;
+ static unsigned long long SctpT2ShutdownExpireds = 0ULL;
+ static unsigned long long SctpT3RtxExpireds = 0ULL;
+ static unsigned long long SctpT4RtoExpireds = 0ULL;
+ static unsigned long long SctpT5ShutdownGuardExpireds = 0ULL;
+ static unsigned long long SctpDelaySackExpireds = 0ULL;
+ static unsigned long long SctpAutocloseExpireds = 0ULL;
+ static unsigned long long SctpT3Retransmits = 0ULL;
+ static unsigned long long SctpPmtudRetransmits = 0ULL;
+ static unsigned long long SctpFastRetransmits = 0ULL;
+ static unsigned long long SctpInPktSoftirq = 0ULL;
+ static unsigned long long SctpInPktBacklog = 0ULL;
+ static unsigned long long SctpInPktDiscards = 0ULL;
+ static unsigned long long SctpInDataChunkDiscards = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_associations = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "established associations", CONFIG_BOOLEAN_AUTO);
+ do_transitions = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "association transitions", CONFIG_BOOLEAN_AUTO);
+ do_fragmentation = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "fragmentation", CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packets", CONFIG_BOOLEAN_AUTO);
+ do_packet_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packet errors", CONFIG_BOOLEAN_AUTO);
+ do_chunk_types = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "chunk types", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("sctp", NULL, 60);
+ arl_expect(arl_base, "SctpCurrEstab", &SctpCurrEstab);
+ arl_expect(arl_base, "SctpActiveEstabs", &SctpActiveEstabs);
+ arl_expect(arl_base, "SctpPassiveEstabs", &SctpPassiveEstabs);
+ arl_expect(arl_base, "SctpAborteds", &SctpAborteds);
+ arl_expect(arl_base, "SctpShutdowns", &SctpShutdowns);
+ arl_expect(arl_base, "SctpOutOfBlues", &SctpOutOfBlues);
+ arl_expect(arl_base, "SctpChecksumErrors", &SctpChecksumErrors);
+ arl_expect(arl_base, "SctpOutCtrlChunks", &SctpOutCtrlChunks);
+ arl_expect(arl_base, "SctpOutOrderChunks", &SctpOutOrderChunks);
+ arl_expect(arl_base, "SctpOutUnorderChunks", &SctpOutUnorderChunks);
+ arl_expect(arl_base, "SctpInCtrlChunks", &SctpInCtrlChunks);
+ arl_expect(arl_base, "SctpInOrderChunks", &SctpInOrderChunks);
+ arl_expect(arl_base, "SctpInUnorderChunks", &SctpInUnorderChunks);
+ arl_expect(arl_base, "SctpFragUsrMsgs", &SctpFragUsrMsgs);
+ arl_expect(arl_base, "SctpReasmUsrMsgs", &SctpReasmUsrMsgs);
+ arl_expect(arl_base, "SctpOutSCTPPacks", &SctpOutSCTPPacks);
+ arl_expect(arl_base, "SctpInSCTPPacks", &SctpInSCTPPacks);
+ arl_expect(arl_base, "SctpT1InitExpireds", &SctpT1InitExpireds);
+ arl_expect(arl_base, "SctpT1CookieExpireds", &SctpT1CookieExpireds);
+ arl_expect(arl_base, "SctpT2ShutdownExpireds", &SctpT2ShutdownExpireds);
+ arl_expect(arl_base, "SctpT3RtxExpireds", &SctpT3RtxExpireds);
+ arl_expect(arl_base, "SctpT4RtoExpireds", &SctpT4RtoExpireds);
+ arl_expect(arl_base, "SctpT5ShutdownGuardExpireds", &SctpT5ShutdownGuardExpireds);
+ arl_expect(arl_base, "SctpDelaySackExpireds", &SctpDelaySackExpireds);
+ arl_expect(arl_base, "SctpAutocloseExpireds", &SctpAutocloseExpireds);
+ arl_expect(arl_base, "SctpT3Retransmits", &SctpT3Retransmits);
+ arl_expect(arl_base, "SctpPmtudRetransmits", &SctpPmtudRetransmits);
+ arl_expect(arl_base, "SctpFastRetransmits", &SctpFastRetransmits);
+ arl_expect(arl_base, "SctpInPktSoftirq", &SctpInPktSoftirq);
+ arl_expect(arl_base, "SctpInPktBacklog", &SctpInPktBacklog);
+ arl_expect(arl_base, "SctpInPktDiscards", &SctpInPktDiscards);
+ arl_expect(arl_base, "SctpInDataChunkDiscards", &SctpInDataChunkDiscards);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sctp/snmp");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sctp/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/net/sctp/snmp line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO &&
+ (SctpCurrEstab || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_associations = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_established = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "established"
+ , NULL
+ , "associations"
+ , NULL
+ , "SCTP current total number of established associations"
+ , "associations"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_established = rrddim_add(st, "SctpCurrEstab", "established", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_established, SctpCurrEstab);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO &&
+ (SctpActiveEstabs ||
+ SctpPassiveEstabs ||
+ SctpAborteds ||
+ SctpShutdowns ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_transitions = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_active = NULL,
+ *rd_passive = NULL,
+ *rd_aborted = NULL,
+ *rd_shutdown = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "transitions"
+ , NULL
+ , "transitions"
+ , NULL
+ , "SCTP Association Transitions"
+ , "transitions/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_active = rrddim_add(st, "SctpActiveEstabs", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_passive = rrddim_add(st, "SctpPassiveEstabs", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_aborted = rrddim_add(st, "SctpAborteds", "aborted", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_shutdown = rrddim_add(st, "SctpShutdowns", "shutdown", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_active, SctpActiveEstabs);
+ rrddim_set_by_pointer(st, rd_passive, SctpPassiveEstabs);
+ rrddim_set_by_pointer(st, rd_aborted, SctpAborteds);
+ rrddim_set_by_pointer(st, rd_shutdown, SctpShutdowns);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO &&
+ (SctpInSCTPPacks ||
+ SctpOutSCTPPacks ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "SCTP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 20
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, SctpInSCTPPacks);
+ rrddim_set_by_pointer(st, rd_sent, SctpOutSCTPPacks);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO &&
+ (SctpOutOfBlues ||
+ SctpChecksumErrors ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_packet_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_invalid = NULL,
+ *rd_csum = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "packet_errors"
+ , NULL
+ , "packets"
+ , NULL
+ , "SCTP Packet Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 30
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_invalid, SctpOutOfBlues);
+ rrddim_set_by_pointer(st, rd_csum, SctpChecksumErrors);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO &&
+ (SctpFragUsrMsgs ||
+ SctpReasmUsrMsgs ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_fragmentation = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_fragmented = NULL,
+ *rd_reassembled = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "fragmentation"
+ , NULL
+ , "fragmentation"
+ , NULL
+ , "SCTP Fragmentation"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 40
+ , update_every
+ , RRDSET_TYPE_LINE);
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_reassembled, SctpReasmUsrMsgs);
+ rrddim_set_by_pointer(st, rd_fragmented, SctpFragUsrMsgs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO &&
+ (SctpInCtrlChunks ||
+ SctpInOrderChunks ||
+ SctpInUnorderChunks ||
+ SctpOutCtrlChunks ||
+ SctpOutOrderChunks ||
+ SctpOutUnorderChunks ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_chunk_types = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_InCtrl = NULL,
+ *rd_InOrder = NULL,
+ *rd_InUnorder = NULL,
+ *rd_OutCtrl = NULL,
+ *rd_OutOrder = NULL,
+ *rd_OutUnorder = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "chunks"
+ , NULL
+ , "chunks"
+ , NULL
+ , "SCTP Chunk Types"
+ , "chunks/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 50
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnorder = rrddim_add(st, "SctpInUnorderChunks", "InUnorder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutCtrl = rrddim_add(st, "SctpOutCtrlChunks", "OutCtrl", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutOrder = rrddim_add(st, "SctpOutOrderChunks", "OutOrder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutUnorder = rrddim_add(st, "SctpOutUnorderChunks", "OutUnorder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InCtrl, SctpInCtrlChunks);
+ rrddim_set_by_pointer(st, rd_InOrder, SctpInOrderChunks);
+ rrddim_set_by_pointer(st, rd_InUnorder, SctpInUnorderChunks);
+ rrddim_set_by_pointer(st, rd_OutCtrl, SctpOutCtrlChunks);
+ rrddim_set_by_pointer(st, rd_OutOrder, SctpOutOrderChunks);
+ rrddim_set_by_pointer(st, rd_OutUnorder, SctpOutUnorderChunks);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c
new file mode 100644
index 0000000..b03a6ac
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_snmp.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#define PLUGIN_PROC_MODULE_NET_SNMP_NAME "/proc/net/snmp"
+
+#define RRD_TYPE_NET_SNMP "ipv4"
+
+static struct proc_net_snmp {
+ // kernel_uint_t ip_Forwarding;
+ kernel_uint_t ip_DefaultTTL;
+ kernel_uint_t ip_InReceives;
+ kernel_uint_t ip_InHdrErrors;
+ kernel_uint_t ip_InAddrErrors;
+ kernel_uint_t ip_ForwDatagrams;
+ kernel_uint_t ip_InUnknownProtos;
+ kernel_uint_t ip_InDiscards;
+ kernel_uint_t ip_InDelivers;
+ kernel_uint_t ip_OutRequests;
+ kernel_uint_t ip_OutDiscards;
+ kernel_uint_t ip_OutNoRoutes;
+ kernel_uint_t ip_ReasmTimeout;
+ kernel_uint_t ip_ReasmReqds;
+ kernel_uint_t ip_ReasmOKs;
+ kernel_uint_t ip_ReasmFails;
+ kernel_uint_t ip_FragOKs;
+ kernel_uint_t ip_FragFails;
+ kernel_uint_t ip_FragCreates;
+
+ kernel_uint_t icmp_InMsgs;
+ kernel_uint_t icmp_OutMsgs;
+ kernel_uint_t icmp_InErrors;
+ kernel_uint_t icmp_OutErrors;
+ kernel_uint_t icmp_InCsumErrors;
+
+ kernel_uint_t icmpmsg_InEchoReps;
+ kernel_uint_t icmpmsg_OutEchoReps;
+ kernel_uint_t icmpmsg_InDestUnreachs;
+ kernel_uint_t icmpmsg_OutDestUnreachs;
+ kernel_uint_t icmpmsg_InRedirects;
+ kernel_uint_t icmpmsg_OutRedirects;
+ kernel_uint_t icmpmsg_InEchos;
+ kernel_uint_t icmpmsg_OutEchos;
+ kernel_uint_t icmpmsg_InRouterAdvert;
+ kernel_uint_t icmpmsg_OutRouterAdvert;
+ kernel_uint_t icmpmsg_InRouterSelect;
+ kernel_uint_t icmpmsg_OutRouterSelect;
+ kernel_uint_t icmpmsg_InTimeExcds;
+ kernel_uint_t icmpmsg_OutTimeExcds;
+ kernel_uint_t icmpmsg_InParmProbs;
+ kernel_uint_t icmpmsg_OutParmProbs;
+ kernel_uint_t icmpmsg_InTimestamps;
+ kernel_uint_t icmpmsg_OutTimestamps;
+ kernel_uint_t icmpmsg_InTimestampReps;
+ kernel_uint_t icmpmsg_OutTimestampReps;
+
+ //kernel_uint_t tcp_RtoAlgorithm;
+ //kernel_uint_t tcp_RtoMin;
+ //kernel_uint_t tcp_RtoMax;
+ ssize_t tcp_MaxConn;
+ kernel_uint_t tcp_ActiveOpens;
+ kernel_uint_t tcp_PassiveOpens;
+ kernel_uint_t tcp_AttemptFails;
+ kernel_uint_t tcp_EstabResets;
+ kernel_uint_t tcp_CurrEstab;
+ kernel_uint_t tcp_InSegs;
+ kernel_uint_t tcp_OutSegs;
+ kernel_uint_t tcp_RetransSegs;
+ kernel_uint_t tcp_InErrs;
+ kernel_uint_t tcp_OutRsts;
+ kernel_uint_t tcp_InCsumErrors;
+
+ kernel_uint_t udp_InDatagrams;
+ kernel_uint_t udp_NoPorts;
+ kernel_uint_t udp_InErrors;
+ kernel_uint_t udp_OutDatagrams;
+ kernel_uint_t udp_RcvbufErrors;
+ kernel_uint_t udp_SndbufErrors;
+ kernel_uint_t udp_InCsumErrors;
+ kernel_uint_t udp_IgnoredMulti;
+
+ kernel_uint_t udplite_InDatagrams;
+ kernel_uint_t udplite_NoPorts;
+ kernel_uint_t udplite_InErrors;
+ kernel_uint_t udplite_OutDatagrams;
+ kernel_uint_t udplite_RcvbufErrors;
+ kernel_uint_t udplite_SndbufErrors;
+ kernel_uint_t udplite_InCsumErrors;
+ kernel_uint_t udplite_IgnoredMulti;
+} snmp_root = { 0 };
+
+int do_proc_net_snmp(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
+ do_tcp_sockets = -1, do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcp_opens = -1,
+ do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, do_udplite_packets = -1;
+ static uint32_t hash_ip = 0, hash_icmp = 0, hash_tcp = 0, hash_udp = 0, hash_icmpmsg = 0, hash_udplite = 0;
+
+ static ARL_BASE *arl_ip = NULL,
+ *arl_icmp = NULL,
+ *arl_icmpmsg = NULL,
+ *arl_tcp = NULL,
+ *arl_udp = NULL,
+ *arl_udplite = NULL;
+
+ static RRDVAR *tcp_max_connections_var = NULL;
+
+ if(unlikely(!arl_ip)) {
+ do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 errors", CONFIG_BOOLEAN_AUTO);
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP connections", CONFIG_BOOLEAN_AUTO);
+ do_tcp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP packets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP errors", CONFIG_BOOLEAN_AUTO);
+ do_tcp_opens = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP opens", CONFIG_BOOLEAN_AUTO);
+ do_tcp_handshake = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP handshake issues", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP packets", CONFIG_BOOLEAN_AUTO);
+ do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP packets", CONFIG_BOOLEAN_AUTO);
+ do_icmpmsg = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP messages", CONFIG_BOOLEAN_AUTO);
+ do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDPLite packets", CONFIG_BOOLEAN_AUTO);
+
+ hash_ip = simple_hash("Ip");
+ hash_tcp = simple_hash("Tcp");
+ hash_udp = simple_hash("Udp");
+ hash_icmp = simple_hash("Icmp");
+ hash_icmpmsg = simple_hash("IcmpMsg");
+ hash_udplite = simple_hash("UdpLite");
+
+ arl_ip = arl_create("snmp/Ip", arl_callback_str2kernel_uint_t, 60);
+ // arl_expect(arl_ip, "Forwarding", &snmp_root.ip_Forwarding);
+ arl_expect(arl_ip, "DefaultTTL", &snmp_root.ip_DefaultTTL);
+ arl_expect(arl_ip, "InReceives", &snmp_root.ip_InReceives);
+ arl_expect(arl_ip, "InHdrErrors", &snmp_root.ip_InHdrErrors);
+ arl_expect(arl_ip, "InAddrErrors", &snmp_root.ip_InAddrErrors);
+ arl_expect(arl_ip, "ForwDatagrams", &snmp_root.ip_ForwDatagrams);
+ arl_expect(arl_ip, "InUnknownProtos", &snmp_root.ip_InUnknownProtos);
+ arl_expect(arl_ip, "InDiscards", &snmp_root.ip_InDiscards);
+ arl_expect(arl_ip, "InDelivers", &snmp_root.ip_InDelivers);
+ arl_expect(arl_ip, "OutRequests", &snmp_root.ip_OutRequests);
+ arl_expect(arl_ip, "OutDiscards", &snmp_root.ip_OutDiscards);
+ arl_expect(arl_ip, "OutNoRoutes", &snmp_root.ip_OutNoRoutes);
+ arl_expect(arl_ip, "ReasmTimeout", &snmp_root.ip_ReasmTimeout);
+ arl_expect(arl_ip, "ReasmReqds", &snmp_root.ip_ReasmReqds);
+ arl_expect(arl_ip, "ReasmOKs", &snmp_root.ip_ReasmOKs);
+ arl_expect(arl_ip, "ReasmFails", &snmp_root.ip_ReasmFails);
+ arl_expect(arl_ip, "FragOKs", &snmp_root.ip_FragOKs);
+ arl_expect(arl_ip, "FragFails", &snmp_root.ip_FragFails);
+ arl_expect(arl_ip, "FragCreates", &snmp_root.ip_FragCreates);
+
+ arl_icmp = arl_create("snmp/Icmp", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_icmp, "InMsgs", &snmp_root.icmp_InMsgs);
+ arl_expect(arl_icmp, "OutMsgs", &snmp_root.icmp_OutMsgs);
+ arl_expect(arl_icmp, "InErrors", &snmp_root.icmp_InErrors);
+ arl_expect(arl_icmp, "OutErrors", &snmp_root.icmp_OutErrors);
+ arl_expect(arl_icmp, "InCsumErrors", &snmp_root.icmp_InCsumErrors);
+
+ arl_icmpmsg = arl_create("snmp/Icmpmsg", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_icmpmsg, "InType0", &snmp_root.icmpmsg_InEchoReps);
+ arl_expect(arl_icmpmsg, "OutType0", &snmp_root.icmpmsg_OutEchoReps);
+ arl_expect(arl_icmpmsg, "InType3", &snmp_root.icmpmsg_InDestUnreachs);
+ arl_expect(arl_icmpmsg, "OutType3", &snmp_root.icmpmsg_OutDestUnreachs);
+ arl_expect(arl_icmpmsg, "InType5", &snmp_root.icmpmsg_InRedirects);
+ arl_expect(arl_icmpmsg, "OutType5", &snmp_root.icmpmsg_OutRedirects);
+ arl_expect(arl_icmpmsg, "InType8", &snmp_root.icmpmsg_InEchos);
+ arl_expect(arl_icmpmsg, "OutType8", &snmp_root.icmpmsg_OutEchos);
+ arl_expect(arl_icmpmsg, "InType9", &snmp_root.icmpmsg_InRouterAdvert);
+ arl_expect(arl_icmpmsg, "OutType9", &snmp_root.icmpmsg_OutRouterAdvert);
+ arl_expect(arl_icmpmsg, "InType10", &snmp_root.icmpmsg_InRouterSelect);
+ arl_expect(arl_icmpmsg, "OutType10", &snmp_root.icmpmsg_OutRouterSelect);
+ arl_expect(arl_icmpmsg, "InType11", &snmp_root.icmpmsg_InTimeExcds);
+ arl_expect(arl_icmpmsg, "OutType11", &snmp_root.icmpmsg_OutTimeExcds);
+ arl_expect(arl_icmpmsg, "InType12", &snmp_root.icmpmsg_InParmProbs);
+ arl_expect(arl_icmpmsg, "OutType12", &snmp_root.icmpmsg_OutParmProbs);
+ arl_expect(arl_icmpmsg, "InType13", &snmp_root.icmpmsg_InTimestamps);
+ arl_expect(arl_icmpmsg, "OutType13", &snmp_root.icmpmsg_OutTimestamps);
+ arl_expect(arl_icmpmsg, "InType14", &snmp_root.icmpmsg_InTimestampReps);
+ arl_expect(arl_icmpmsg, "OutType14", &snmp_root.icmpmsg_OutTimestampReps);
+
+ arl_tcp = arl_create("snmp/Tcp", arl_callback_str2kernel_uint_t, 60);
+ // arl_expect(arl_tcp, "RtoAlgorithm", &snmp_root.tcp_RtoAlgorithm);
+ // arl_expect(arl_tcp, "RtoMin", &snmp_root.tcp_RtoMin);
+ // arl_expect(arl_tcp, "RtoMax", &snmp_root.tcp_RtoMax);
+ arl_expect_custom(arl_tcp, "MaxConn", arl_callback_ssize_t, &snmp_root.tcp_MaxConn);
+ arl_expect(arl_tcp, "ActiveOpens", &snmp_root.tcp_ActiveOpens);
+ arl_expect(arl_tcp, "PassiveOpens", &snmp_root.tcp_PassiveOpens);
+ arl_expect(arl_tcp, "AttemptFails", &snmp_root.tcp_AttemptFails);
+ arl_expect(arl_tcp, "EstabResets", &snmp_root.tcp_EstabResets);
+ arl_expect(arl_tcp, "CurrEstab", &snmp_root.tcp_CurrEstab);
+ arl_expect(arl_tcp, "InSegs", &snmp_root.tcp_InSegs);
+ arl_expect(arl_tcp, "OutSegs", &snmp_root.tcp_OutSegs);
+ arl_expect(arl_tcp, "RetransSegs", &snmp_root.tcp_RetransSegs);
+ arl_expect(arl_tcp, "InErrs", &snmp_root.tcp_InErrs);
+ arl_expect(arl_tcp, "OutRsts", &snmp_root.tcp_OutRsts);
+ arl_expect(arl_tcp, "InCsumErrors", &snmp_root.tcp_InCsumErrors);
+
+ arl_udp = arl_create("snmp/Udp", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "InDatagrams", &snmp_root.udp_InDatagrams);
+ arl_expect(arl_udp, "NoPorts", &snmp_root.udp_NoPorts);
+ arl_expect(arl_udp, "InErrors", &snmp_root.udp_InErrors);
+ arl_expect(arl_udp, "OutDatagrams", &snmp_root.udp_OutDatagrams);
+ arl_expect(arl_udp, "RcvbufErrors", &snmp_root.udp_RcvbufErrors);
+ arl_expect(arl_udp, "SndbufErrors", &snmp_root.udp_SndbufErrors);
+ arl_expect(arl_udp, "InCsumErrors", &snmp_root.udp_InCsumErrors);
+ arl_expect(arl_udp, "IgnoredMulti", &snmp_root.udp_IgnoredMulti);
+
+ arl_udplite = arl_create("snmp/Udplite", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "InDatagrams", &snmp_root.udplite_InDatagrams);
+ arl_expect(arl_udplite, "NoPorts", &snmp_root.udplite_NoPorts);
+ arl_expect(arl_udplite, "InErrors", &snmp_root.udplite_InErrors);
+ arl_expect(arl_udplite, "OutDatagrams", &snmp_root.udplite_OutDatagrams);
+ arl_expect(arl_udplite, "RcvbufErrors", &snmp_root.udplite_RcvbufErrors);
+ arl_expect(arl_udplite, "SndbufErrors", &snmp_root.udplite_SndbufErrors);
+ arl_expect(arl_udplite, "InCsumErrors", &snmp_root.udplite_InCsumErrors);
+ arl_expect(arl_udplite, "IgnoredMulti", &snmp_root.udplite_IgnoredMulti);
+
+ tcp_max_connections_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_connections");
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words, w;
+
+ for(l = 0; l < lines ;l++) {
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ if(unlikely(hash == hash_ip && strcmp(key, "Ip") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Ip") != 0) {
+ error("Cannot read Ip line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Ip line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_ip);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_ip, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.ip_OutRequests ||
+ snmp_root.ip_InReceives ||
+ snmp_root.ip_ForwDatagrams ||
+ snmp_root.ip_InDelivers ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InReceives = NULL,
+ *rd_OutRequests = NULL,
+ *rd_ForwDatagrams = NULL,
+ *rd_InDelivers = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv4 Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InReceives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRequests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ForwDatagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDelivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_OutRequests, (collected_number)snmp_root.ip_OutRequests);
+ rrddim_set_by_pointer(st, rd_InReceives, (collected_number)snmp_root.ip_InReceives);
+ rrddim_set_by_pointer(st, rd_ForwDatagrams, (collected_number)snmp_root.ip_ForwDatagrams);
+ rrddim_set_by_pointer(st, rd_InDelivers, (collected_number)snmp_root.ip_InDelivers);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.ip_FragOKs ||
+ snmp_root.ip_FragFails ||
+ snmp_root.ip_FragCreates ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_fragsout = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_FragOKs = NULL,
+ *rd_FragFails = NULL,
+ *rd_FragCreates = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Sent"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_FragCreates = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_FragOKs, (collected_number)snmp_root.ip_FragOKs);
+ rrddim_set_by_pointer(st, rd_FragFails, (collected_number)snmp_root.ip_FragFails);
+ rrddim_set_by_pointer(st, rd_FragCreates, (collected_number)snmp_root.ip_FragCreates);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.ip_ReasmOKs ||
+ snmp_root.ip_ReasmFails ||
+ snmp_root.ip_ReasmReqds ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ReasmOKs = NULL,
+ *rd_ReasmFails = NULL,
+ *rd_ReasmReqds = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Reassembly"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ReasmReqds = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ReasmOKs, (collected_number)snmp_root.ip_ReasmOKs);
+ rrddim_set_by_pointer(st, rd_ReasmFails, (collected_number)snmp_root.ip_ReasmFails);
+ rrddim_set_by_pointer(st, rd_ReasmReqds, (collected_number)snmp_root.ip_ReasmReqds);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.ip_InDiscards ||
+ snmp_root.ip_OutDiscards ||
+ snmp_root.ip_InHdrErrors ||
+ snmp_root.ip_InAddrErrors ||
+ snmp_root.ip_InUnknownProtos ||
+ snmp_root.ip_OutNoRoutes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDiscards = NULL,
+ *rd_OutDiscards = NULL,
+ *rd_InHdrErrors = NULL,
+ *rd_OutNoRoutes = NULL,
+ *rd_InAddrErrors = NULL,
+ *rd_InUnknownProtos = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv4 Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards);
+ rrddim_set_by_pointer(st, rd_OutDiscards, (collected_number)snmp_root.ip_OutDiscards);
+ rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors);
+ rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors);
+ rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos);
+ rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_icmp && strcmp(key, "Icmp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Icmp") != 0) {
+ error("Cannot read Icmp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Icmp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_icmp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_icmp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.icmp_InMsgs ||
+ snmp_root.icmp_OutMsgs ||
+ snmp_root.icmp_InErrors ||
+ snmp_root.icmp_OutErrors ||
+ snmp_root.icmp_InCsumErrors ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_packets = CONFIG_BOOLEAN_YES;
+
+ {
+ static RRDSET *st_packets = NULL;
+ static RRDDIM *rd_InMsgs = NULL,
+ *rd_OutMsgs = NULL;
+
+ if(unlikely(!st_packets)) {
+ st_packets = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InMsgs = rrddim_add(st_packets, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutMsgs = rrddim_add(st_packets, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_packets);
+
+ rrddim_set_by_pointer(st_packets, rd_InMsgs, (collected_number)snmp_root.icmp_InMsgs);
+ rrddim_set_by_pointer(st_packets, rd_OutMsgs, (collected_number)snmp_root.icmp_OutMsgs);
+
+ rrdset_done(st_packets);
+ }
+
+ {
+ static RRDSET *st_errors = NULL;
+ static RRDDIM *rd_InErrors = NULL,
+ *rd_OutErrors = NULL,
+ *rd_InCsumErrors = NULL;
+
+ if(unlikely(!st_errors)) {
+ st_errors = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InErrors = rrddim_add(st_errors, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutErrors = rrddim_add(st_errors, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st_errors, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_errors);
+
+ rrddim_set_by_pointer(st_errors, rd_InErrors, (collected_number)snmp_root.icmp_InErrors);
+ rrddim_set_by_pointer(st_errors, rd_OutErrors, (collected_number)snmp_root.icmp_OutErrors);
+ rrddim_set_by_pointer(st_errors, rd_InCsumErrors, (collected_number)snmp_root.icmp_InCsumErrors);
+
+ rrdset_done(st_errors);
+ }
+ }
+ }
+ else if(unlikely(hash == hash_icmpmsg && strcmp(key, "IcmpMsg") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "IcmpMsg") != 0) {
+ error("Cannot read IcmpMsg line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 2) {
+ error("Cannot read /proc/net/snmp IcmpMsg line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_icmpmsg);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_icmpmsg, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.icmpmsg_InEchoReps ||
+ snmp_root.icmpmsg_OutEchoReps ||
+ snmp_root.icmpmsg_InDestUnreachs ||
+ snmp_root.icmpmsg_OutDestUnreachs ||
+ snmp_root.icmpmsg_InRedirects ||
+ snmp_root.icmpmsg_OutRedirects ||
+ snmp_root.icmpmsg_InEchos ||
+ snmp_root.icmpmsg_OutEchos ||
+ snmp_root.icmpmsg_InRouterAdvert ||
+ snmp_root.icmpmsg_OutRouterAdvert ||
+ snmp_root.icmpmsg_InRouterSelect ||
+ snmp_root.icmpmsg_OutRouterSelect ||
+ snmp_root.icmpmsg_InTimeExcds ||
+ snmp_root.icmpmsg_OutTimeExcds ||
+ snmp_root.icmpmsg_InParmProbs ||
+ snmp_root.icmpmsg_OutParmProbs ||
+ snmp_root.icmpmsg_InTimestamps ||
+ snmp_root.icmpmsg_OutTimestamps ||
+ snmp_root.icmpmsg_InTimestampReps ||
+ snmp_root.icmpmsg_OutTimestampReps ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmpmsg = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InEchoReps = NULL,
+ *rd_OutEchoReps = NULL,
+ *rd_InDestUnreachs = NULL,
+ *rd_OutDestUnreachs = NULL,
+ *rd_InRedirects = NULL,
+ *rd_OutRedirects = NULL,
+ *rd_InEchos = NULL,
+ *rd_OutEchos = NULL,
+ *rd_InRouterAdvert = NULL,
+ *rd_OutRouterAdvert = NULL,
+ *rd_InRouterSelect = NULL,
+ *rd_OutRouterSelect = NULL,
+ *rd_InTimeExcds = NULL,
+ *rd_OutTimeExcds = NULL,
+ *rd_InParmProbs = NULL,
+ *rd_OutParmProbs = NULL,
+ *rd_InTimestamps = NULL,
+ *rd_OutTimestamps = NULL,
+ *rd_InTimestampReps = NULL,
+ *rd_OutTimestampReps = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP + 2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InEchoReps = rrddim_add(st, "InType0", "InEchoReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchoReps = rrddim_add(st, "OutType0", "OutEchoReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDestUnreachs = rrddim_add(st, "InType3", "InDestUnreachs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDestUnreachs = rrddim_add(st, "OutType3", "OutDestUnreachs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRedirects = rrddim_add(st, "InType5", "InRedirects", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRedirects = rrddim_add(st, "OutType5", "OutRedirects", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InEchos = rrddim_add(st, "InType8", "InEchos", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchos = rrddim_add(st, "OutType8", "OutEchos", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRouterAdvert = rrddim_add(st, "InType9", "InRouterAdvert", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRouterAdvert = rrddim_add(st, "OutType9", "OutRouterAdvert", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRouterSelect = rrddim_add(st, "InType10", "InRouterSelect", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRouterSelect = rrddim_add(st, "OutType10", "OutRouterSelect", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimeExcds = rrddim_add(st, "InType11", "InTimeExcds", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimeExcds = rrddim_add(st, "OutType11", "OutTimeExcds", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InParmProbs = rrddim_add(st, "InType12", "InParmProbs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutParmProbs = rrddim_add(st, "OutType12", "OutParmProbs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimestamps = rrddim_add(st, "InType13", "InTimestamps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimestamps = rrddim_add(st, "OutType13", "OutTimestamps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimestampReps = rrddim_add(st, "InType14", "InTimestampReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimestampReps = rrddim_add(st, "OutType14", "OutTimestampReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InEchoReps, (collected_number)snmp_root.icmpmsg_InEchoReps);
+ rrddim_set_by_pointer(st, rd_OutEchoReps, (collected_number)snmp_root.icmpmsg_OutEchoReps);
+ rrddim_set_by_pointer(st, rd_InDestUnreachs, (collected_number)snmp_root.icmpmsg_InDestUnreachs);
+ rrddim_set_by_pointer(st, rd_OutDestUnreachs, (collected_number)snmp_root.icmpmsg_OutDestUnreachs);
+ rrddim_set_by_pointer(st, rd_InRedirects, (collected_number)snmp_root.icmpmsg_InRedirects);
+ rrddim_set_by_pointer(st, rd_OutRedirects, (collected_number)snmp_root.icmpmsg_OutRedirects);
+ rrddim_set_by_pointer(st, rd_InEchos, (collected_number)snmp_root.icmpmsg_InEchos);
+ rrddim_set_by_pointer(st, rd_OutEchos, (collected_number)snmp_root.icmpmsg_OutEchos);
+ rrddim_set_by_pointer(st, rd_InRouterAdvert, (collected_number)snmp_root.icmpmsg_InRouterAdvert);
+ rrddim_set_by_pointer(st, rd_OutRouterAdvert, (collected_number)snmp_root.icmpmsg_OutRouterAdvert);
+ rrddim_set_by_pointer(st, rd_InRouterSelect, (collected_number)snmp_root.icmpmsg_InRouterSelect);
+ rrddim_set_by_pointer(st, rd_OutRouterSelect, (collected_number)snmp_root.icmpmsg_OutRouterSelect);
+ rrddim_set_by_pointer(st, rd_InTimeExcds, (collected_number)snmp_root.icmpmsg_InTimeExcds);
+ rrddim_set_by_pointer(st, rd_OutTimeExcds, (collected_number)snmp_root.icmpmsg_OutTimeExcds);
+ rrddim_set_by_pointer(st, rd_InParmProbs, (collected_number)snmp_root.icmpmsg_InParmProbs);
+ rrddim_set_by_pointer(st, rd_OutParmProbs, (collected_number)snmp_root.icmpmsg_OutParmProbs);
+ rrddim_set_by_pointer(st, rd_InTimestamps, (collected_number)snmp_root.icmpmsg_InTimestamps);
+ rrddim_set_by_pointer(st, rd_OutTimestamps, (collected_number)snmp_root.icmpmsg_OutTimestamps);
+ rrddim_set_by_pointer(st, rd_InTimestampReps, (collected_number)snmp_root.icmpmsg_InTimestampReps);
+ rrddim_set_by_pointer(st, rd_OutTimestampReps, (collected_number)snmp_root.icmpmsg_OutTimestampReps);
+
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_tcp && strcmp(key, "Tcp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Tcp") != 0) {
+ error("Cannot read Tcp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Tcp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_tcp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_tcp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // this is smart enough to update it, only when it is changed
+ rrdvar_custom_host_variable_set(localhost, tcp_max_connections_var, snmp_root.tcp_MaxConn);
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.tcp_CurrEstab ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_CurrEstab = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcpsock"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Connections"
+ , "active connections"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_CurrEstab = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_CurrEstab, (collected_number)snmp_root.tcp_CurrEstab);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.tcp_InSegs ||
+ snmp_root.tcp_OutSegs ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSegs = NULL,
+ *rd_OutSegs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcppackets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 4
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSegs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSegs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSegs, (collected_number)snmp_root.tcp_InSegs);
+ rrddim_set_by_pointer(st, rd_OutSegs, (collected_number)snmp_root.tcp_OutSegs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.tcp_InErrs ||
+ snmp_root.tcp_InCsumErrors ||
+ snmp_root.tcp_RetransSegs ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InErrs = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_RetransSegs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcperrors"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 20
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_RetransSegs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrs, (collected_number)snmp_root.tcp_InErrs);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.tcp_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_RetransSegs, (collected_number)snmp_root.tcp_RetransSegs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.tcp_ActiveOpens ||
+ snmp_root.tcp_PassiveOpens ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_opens = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ActiveOpens = NULL,
+ *rd_PassiveOpens = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcpopens"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Opens"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 5
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ActiveOpens, (collected_number)snmp_root.tcp_ActiveOpens);
+ rrddim_set_by_pointer(st, rd_PassiveOpens, (collected_number)snmp_root.tcp_PassiveOpens);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.tcp_EstabResets ||
+ snmp_root.tcp_OutRsts ||
+ snmp_root.tcp_AttemptFails ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_handshake = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_EstabResets = NULL,
+ *rd_OutRsts = NULL,
+ *rd_AttemptFails = NULL,
+ *rd_TCPSynRetrans = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcphandshake"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Handshake Issues"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 30
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_AttemptFails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_TCPSynRetrans = rrddim_add(st, "TCPSynRetrans", "SynRetrans", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_EstabResets, (collected_number)snmp_root.tcp_EstabResets);
+ rrddim_set_by_pointer(st, rd_OutRsts, (collected_number)snmp_root.tcp_OutRsts);
+ rrddim_set_by_pointer(st, rd_AttemptFails, (collected_number)snmp_root.tcp_AttemptFails);
+ rrddim_set_by_pointer(st, rd_TCPSynRetrans, tcpext_TCPSynRetrans);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_udp && strcmp(key, "Udp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Udp") != 0) {
+ error("Cannot read Udp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Udp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_udp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_udp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.udp_InDatagrams ||
+ snmp_root.udp_OutDatagrams ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDatagrams = NULL,
+ *rd_OutDatagrams = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udppackets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udp_InDatagrams);
+ rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udp_OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.udp_InErrors ||
+ snmp_root.udp_NoPorts ||
+ snmp_root.udp_RcvbufErrors ||
+ snmp_root.udp_SndbufErrors ||
+ snmp_root.udp_InCsumErrors ||
+ snmp_root.udp_IgnoredMulti ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udperrors"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP + 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udp_InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udp_NoPorts);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udp_RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udp_SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udp_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udp_IgnoredMulti);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_udplite && strcmp(key, "UdpLite") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "UdpLite") != 0) {
+ error("Cannot read UdpLite line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp UdpLite line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_udplite);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_udplite, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO &&
+ (snmp_root.udplite_InDatagrams ||
+ snmp_root.udplite_OutDatagrams ||
+ snmp_root.udplite_NoPorts ||
+ snmp_root.udplite_InErrors ||
+ snmp_root.udplite_InCsumErrors ||
+ snmp_root.udplite_RcvbufErrors ||
+ snmp_root.udplite_SndbufErrors ||
+ snmp_root.udplite_IgnoredMulti ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udplite_packets = CONFIG_BOOLEAN_YES;
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDatagrams = NULL,
+ *rd_OutDatagrams = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udplite"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLite Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udplite_InDatagrams);
+ rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udplite_OutDatagrams);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udplite_errors"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLite Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE + 10
+ , update_every
+ , RRDSET_TYPE_LINE);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udplite_NoPorts);
+ rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udplite_InErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udplite_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udplite_RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udplite_SndbufErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udplite_IgnoredMulti);
+ rrdset_done(st);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c
new file mode 100644
index 0000000..445e0dc
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_snmp6.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_SNMP6 "ipv6"
+#define PLUGIN_PROC_MODULE_NET_SNMP6_NAME "/proc/net/snmp6"
+
+int do_proc_net_snmp6(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static int do_ip_packets = -1,
+ do_ip_fragsout = -1,
+ do_ip_fragsin = -1,
+ do_ip_errors = -1,
+ do_udplite_packets = -1,
+ do_udplite_errors = -1,
+ do_udp_packets = -1,
+ do_udp_errors = -1,
+ do_bandwidth = -1,
+ do_mcast = -1,
+ do_bcast = -1,
+ do_mcast_p = -1,
+ do_icmp = -1,
+ do_icmp_redir = -1,
+ do_icmp_errors = -1,
+ do_icmp_echos = -1,
+ do_icmp_groupmemb = -1,
+ do_icmp_router = -1,
+ do_icmp_neighbor = -1,
+ do_icmp_mldv2 = -1,
+ do_icmp_types = -1,
+ do_ect = -1;
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long Ip6InReceives = 0ULL;
+ static unsigned long long Ip6InHdrErrors = 0ULL;
+ static unsigned long long Ip6InTooBigErrors = 0ULL;
+ static unsigned long long Ip6InNoRoutes = 0ULL;
+ static unsigned long long Ip6InAddrErrors = 0ULL;
+ static unsigned long long Ip6InUnknownProtos = 0ULL;
+ static unsigned long long Ip6InTruncatedPkts = 0ULL;
+ static unsigned long long Ip6InDiscards = 0ULL;
+ static unsigned long long Ip6InDelivers = 0ULL;
+ static unsigned long long Ip6OutForwDatagrams = 0ULL;
+ static unsigned long long Ip6OutRequests = 0ULL;
+ static unsigned long long Ip6OutDiscards = 0ULL;
+ static unsigned long long Ip6OutNoRoutes = 0ULL;
+ static unsigned long long Ip6ReasmTimeout = 0ULL;
+ static unsigned long long Ip6ReasmReqds = 0ULL;
+ static unsigned long long Ip6ReasmOKs = 0ULL;
+ static unsigned long long Ip6ReasmFails = 0ULL;
+ static unsigned long long Ip6FragOKs = 0ULL;
+ static unsigned long long Ip6FragFails = 0ULL;
+ static unsigned long long Ip6FragCreates = 0ULL;
+ static unsigned long long Ip6InMcastPkts = 0ULL;
+ static unsigned long long Ip6OutMcastPkts = 0ULL;
+ static unsigned long long Ip6InOctets = 0ULL;
+ static unsigned long long Ip6OutOctets = 0ULL;
+ static unsigned long long Ip6InMcastOctets = 0ULL;
+ static unsigned long long Ip6OutMcastOctets = 0ULL;
+ static unsigned long long Ip6InBcastOctets = 0ULL;
+ static unsigned long long Ip6OutBcastOctets = 0ULL;
+ static unsigned long long Ip6InNoECTPkts = 0ULL;
+ static unsigned long long Ip6InECT1Pkts = 0ULL;
+ static unsigned long long Ip6InECT0Pkts = 0ULL;
+ static unsigned long long Ip6InCEPkts = 0ULL;
+ static unsigned long long Icmp6InMsgs = 0ULL;
+ static unsigned long long Icmp6InErrors = 0ULL;
+ static unsigned long long Icmp6OutMsgs = 0ULL;
+ static unsigned long long Icmp6OutErrors = 0ULL;
+ static unsigned long long Icmp6InCsumErrors = 0ULL;
+ static unsigned long long Icmp6InDestUnreachs = 0ULL;
+ static unsigned long long Icmp6InPktTooBigs = 0ULL;
+ static unsigned long long Icmp6InTimeExcds = 0ULL;
+ static unsigned long long Icmp6InParmProblems = 0ULL;
+ static unsigned long long Icmp6InEchos = 0ULL;
+ static unsigned long long Icmp6InEchoReplies = 0ULL;
+ static unsigned long long Icmp6InGroupMembQueries = 0ULL;
+ static unsigned long long Icmp6InGroupMembResponses = 0ULL;
+ static unsigned long long Icmp6InGroupMembReductions = 0ULL;
+ static unsigned long long Icmp6InRouterSolicits = 0ULL;
+ static unsigned long long Icmp6InRouterAdvertisements = 0ULL;
+ static unsigned long long Icmp6InNeighborSolicits = 0ULL;
+ static unsigned long long Icmp6InNeighborAdvertisements = 0ULL;
+ static unsigned long long Icmp6InRedirects = 0ULL;
+ static unsigned long long Icmp6InMLDv2Reports = 0ULL;
+ static unsigned long long Icmp6OutDestUnreachs = 0ULL;
+ static unsigned long long Icmp6OutPktTooBigs = 0ULL;
+ static unsigned long long Icmp6OutTimeExcds = 0ULL;
+ static unsigned long long Icmp6OutParmProblems = 0ULL;
+ static unsigned long long Icmp6OutEchos = 0ULL;
+ static unsigned long long Icmp6OutEchoReplies = 0ULL;
+ static unsigned long long Icmp6OutGroupMembQueries = 0ULL;
+ static unsigned long long Icmp6OutGroupMembResponses = 0ULL;
+ static unsigned long long Icmp6OutGroupMembReductions = 0ULL;
+ static unsigned long long Icmp6OutRouterSolicits = 0ULL;
+ static unsigned long long Icmp6OutRouterAdvertisements = 0ULL;
+ static unsigned long long Icmp6OutNeighborSolicits = 0ULL;
+ static unsigned long long Icmp6OutNeighborAdvertisements = 0ULL;
+ static unsigned long long Icmp6OutRedirects = 0ULL;
+ static unsigned long long Icmp6OutMLDv2Reports = 0ULL;
+ static unsigned long long Icmp6InType1 = 0ULL;
+ static unsigned long long Icmp6InType128 = 0ULL;
+ static unsigned long long Icmp6InType129 = 0ULL;
+ static unsigned long long Icmp6InType136 = 0ULL;
+ static unsigned long long Icmp6OutType1 = 0ULL;
+ static unsigned long long Icmp6OutType128 = 0ULL;
+ static unsigned long long Icmp6OutType129 = 0ULL;
+ static unsigned long long Icmp6OutType133 = 0ULL;
+ static unsigned long long Icmp6OutType135 = 0ULL;
+ static unsigned long long Icmp6OutType143 = 0ULL;
+ static unsigned long long Udp6InDatagrams = 0ULL;
+ static unsigned long long Udp6NoPorts = 0ULL;
+ static unsigned long long Udp6InErrors = 0ULL;
+ static unsigned long long Udp6OutDatagrams = 0ULL;
+ static unsigned long long Udp6RcvbufErrors = 0ULL;
+ static unsigned long long Udp6SndbufErrors = 0ULL;
+ static unsigned long long Udp6InCsumErrors = 0ULL;
+ static unsigned long long Udp6IgnoredMulti = 0ULL;
+ static unsigned long long UdpLite6InDatagrams = 0ULL;
+ static unsigned long long UdpLite6NoPorts = 0ULL;
+ static unsigned long long UdpLite6InErrors = 0ULL;
+ static unsigned long long UdpLite6OutDatagrams = 0ULL;
+ static unsigned long long UdpLite6RcvbufErrors = 0ULL;
+ static unsigned long long UdpLite6SndbufErrors = 0ULL;
+ static unsigned long long UdpLite6InCsumErrors = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP packets", CONFIG_BOOLEAN_AUTO);
+ do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP errors", CONFIG_BOOLEAN_AUTO);
+ do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite packets", CONFIG_BOOLEAN_AUTO);
+ do_udplite_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite errors", CONFIG_BOOLEAN_AUTO);
+ do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast packets", CONFIG_BOOLEAN_AUTO);
+ do_icmp = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp", CONFIG_BOOLEAN_AUTO);
+ do_icmp_redir = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp redirects", CONFIG_BOOLEAN_AUTO);
+ do_icmp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp_echos = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp echos", CONFIG_BOOLEAN_AUTO);
+ do_icmp_groupmemb = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp group membership", CONFIG_BOOLEAN_AUTO);
+ do_icmp_router = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp router", CONFIG_BOOLEAN_AUTO);
+ do_icmp_neighbor = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
+ do_icmp_mldv2 = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp mldv2", CONFIG_BOOLEAN_AUTO);
+ do_icmp_types = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp types", CONFIG_BOOLEAN_AUTO);
+ do_ect = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ect", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("snmp6", NULL, 60);
+ arl_expect(arl_base, "Ip6InReceives", &Ip6InReceives);
+ arl_expect(arl_base, "Ip6InHdrErrors", &Ip6InHdrErrors);
+ arl_expect(arl_base, "Ip6InTooBigErrors", &Ip6InTooBigErrors);
+ arl_expect(arl_base, "Ip6InNoRoutes", &Ip6InNoRoutes);
+ arl_expect(arl_base, "Ip6InAddrErrors", &Ip6InAddrErrors);
+ arl_expect(arl_base, "Ip6InUnknownProtos", &Ip6InUnknownProtos);
+ arl_expect(arl_base, "Ip6InTruncatedPkts", &Ip6InTruncatedPkts);
+ arl_expect(arl_base, "Ip6InDiscards", &Ip6InDiscards);
+ arl_expect(arl_base, "Ip6InDelivers", &Ip6InDelivers);
+ arl_expect(arl_base, "Ip6OutForwDatagrams", &Ip6OutForwDatagrams);
+ arl_expect(arl_base, "Ip6OutRequests", &Ip6OutRequests);
+ arl_expect(arl_base, "Ip6OutDiscards", &Ip6OutDiscards);
+ arl_expect(arl_base, "Ip6OutNoRoutes", &Ip6OutNoRoutes);
+ arl_expect(arl_base, "Ip6ReasmTimeout", &Ip6ReasmTimeout);
+ arl_expect(arl_base, "Ip6ReasmReqds", &Ip6ReasmReqds);
+ arl_expect(arl_base, "Ip6ReasmOKs", &Ip6ReasmOKs);
+ arl_expect(arl_base, "Ip6ReasmFails", &Ip6ReasmFails);
+ arl_expect(arl_base, "Ip6FragOKs", &Ip6FragOKs);
+ arl_expect(arl_base, "Ip6FragFails", &Ip6FragFails);
+ arl_expect(arl_base, "Ip6FragCreates", &Ip6FragCreates);
+ arl_expect(arl_base, "Ip6InMcastPkts", &Ip6InMcastPkts);
+ arl_expect(arl_base, "Ip6OutMcastPkts", &Ip6OutMcastPkts);
+ arl_expect(arl_base, "Ip6InOctets", &Ip6InOctets);
+ arl_expect(arl_base, "Ip6OutOctets", &Ip6OutOctets);
+ arl_expect(arl_base, "Ip6InMcastOctets", &Ip6InMcastOctets);
+ arl_expect(arl_base, "Ip6OutMcastOctets", &Ip6OutMcastOctets);
+ arl_expect(arl_base, "Ip6InBcastOctets", &Ip6InBcastOctets);
+ arl_expect(arl_base, "Ip6OutBcastOctets", &Ip6OutBcastOctets);
+ arl_expect(arl_base, "Ip6InNoECTPkts", &Ip6InNoECTPkts);
+ arl_expect(arl_base, "Ip6InECT1Pkts", &Ip6InECT1Pkts);
+ arl_expect(arl_base, "Ip6InECT0Pkts", &Ip6InECT0Pkts);
+ arl_expect(arl_base, "Ip6InCEPkts", &Ip6InCEPkts);
+ arl_expect(arl_base, "Icmp6InMsgs", &Icmp6InMsgs);
+ arl_expect(arl_base, "Icmp6InErrors", &Icmp6InErrors);
+ arl_expect(arl_base, "Icmp6OutMsgs", &Icmp6OutMsgs);
+ arl_expect(arl_base, "Icmp6OutErrors", &Icmp6OutErrors);
+ arl_expect(arl_base, "Icmp6InCsumErrors", &Icmp6InCsumErrors);
+ arl_expect(arl_base, "Icmp6InDestUnreachs", &Icmp6InDestUnreachs);
+ arl_expect(arl_base, "Icmp6InPktTooBigs", &Icmp6InPktTooBigs);
+ arl_expect(arl_base, "Icmp6InTimeExcds", &Icmp6InTimeExcds);
+ arl_expect(arl_base, "Icmp6InParmProblems", &Icmp6InParmProblems);
+ arl_expect(arl_base, "Icmp6InEchos", &Icmp6InEchos);
+ arl_expect(arl_base, "Icmp6InEchoReplies", &Icmp6InEchoReplies);
+ arl_expect(arl_base, "Icmp6InGroupMembQueries", &Icmp6InGroupMembQueries);
+ arl_expect(arl_base, "Icmp6InGroupMembResponses", &Icmp6InGroupMembResponses);
+ arl_expect(arl_base, "Icmp6InGroupMembReductions", &Icmp6InGroupMembReductions);
+ arl_expect(arl_base, "Icmp6InRouterSolicits", &Icmp6InRouterSolicits);
+ arl_expect(arl_base, "Icmp6InRouterAdvertisements", &Icmp6InRouterAdvertisements);
+ arl_expect(arl_base, "Icmp6InNeighborSolicits", &Icmp6InNeighborSolicits);
+ arl_expect(arl_base, "Icmp6InNeighborAdvertisements", &Icmp6InNeighborAdvertisements);
+ arl_expect(arl_base, "Icmp6InRedirects", &Icmp6InRedirects);
+ arl_expect(arl_base, "Icmp6InMLDv2Reports", &Icmp6InMLDv2Reports);
+ arl_expect(arl_base, "Icmp6OutDestUnreachs", &Icmp6OutDestUnreachs);
+ arl_expect(arl_base, "Icmp6OutPktTooBigs", &Icmp6OutPktTooBigs);
+ arl_expect(arl_base, "Icmp6OutTimeExcds", &Icmp6OutTimeExcds);
+ arl_expect(arl_base, "Icmp6OutParmProblems", &Icmp6OutParmProblems);
+ arl_expect(arl_base, "Icmp6OutEchos", &Icmp6OutEchos);
+ arl_expect(arl_base, "Icmp6OutEchoReplies", &Icmp6OutEchoReplies);
+ arl_expect(arl_base, "Icmp6OutGroupMembQueries", &Icmp6OutGroupMembQueries);
+ arl_expect(arl_base, "Icmp6OutGroupMembResponses", &Icmp6OutGroupMembResponses);
+ arl_expect(arl_base, "Icmp6OutGroupMembReductions", &Icmp6OutGroupMembReductions);
+ arl_expect(arl_base, "Icmp6OutRouterSolicits", &Icmp6OutRouterSolicits);
+ arl_expect(arl_base, "Icmp6OutRouterAdvertisements", &Icmp6OutRouterAdvertisements);
+ arl_expect(arl_base, "Icmp6OutNeighborSolicits", &Icmp6OutNeighborSolicits);
+ arl_expect(arl_base, "Icmp6OutNeighborAdvertisements", &Icmp6OutNeighborAdvertisements);
+ arl_expect(arl_base, "Icmp6OutRedirects", &Icmp6OutRedirects);
+ arl_expect(arl_base, "Icmp6OutMLDv2Reports", &Icmp6OutMLDv2Reports);
+ arl_expect(arl_base, "Icmp6InType1", &Icmp6InType1);
+ arl_expect(arl_base, "Icmp6InType128", &Icmp6InType128);
+ arl_expect(arl_base, "Icmp6InType129", &Icmp6InType129);
+ arl_expect(arl_base, "Icmp6InType136", &Icmp6InType136);
+ arl_expect(arl_base, "Icmp6OutType1", &Icmp6OutType1);
+ arl_expect(arl_base, "Icmp6OutType128", &Icmp6OutType128);
+ arl_expect(arl_base, "Icmp6OutType129", &Icmp6OutType129);
+ arl_expect(arl_base, "Icmp6OutType133", &Icmp6OutType133);
+ arl_expect(arl_base, "Icmp6OutType135", &Icmp6OutType135);
+ arl_expect(arl_base, "Icmp6OutType143", &Icmp6OutType143);
+ arl_expect(arl_base, "Udp6InDatagrams", &Udp6InDatagrams);
+ arl_expect(arl_base, "Udp6NoPorts", &Udp6NoPorts);
+ arl_expect(arl_base, "Udp6InErrors", &Udp6InErrors);
+ arl_expect(arl_base, "Udp6OutDatagrams", &Udp6OutDatagrams);
+ arl_expect(arl_base, "Udp6RcvbufErrors", &Udp6RcvbufErrors);
+ arl_expect(arl_base, "Udp6SndbufErrors", &Udp6SndbufErrors);
+ arl_expect(arl_base, "Udp6InCsumErrors", &Udp6InCsumErrors);
+ arl_expect(arl_base, "Udp6IgnoredMulti", &Udp6IgnoredMulti);
+ arl_expect(arl_base, "UdpLite6InDatagrams", &UdpLite6InDatagrams);
+ arl_expect(arl_base, "UdpLite6NoPorts", &UdpLite6NoPorts);
+ arl_expect(arl_base, "UdpLite6InErrors", &UdpLite6InErrors);
+ arl_expect(arl_base, "UdpLite6OutDatagrams", &UdpLite6OutDatagrams);
+ arl_expect(arl_base, "UdpLite6RcvbufErrors", &UdpLite6RcvbufErrors);
+ arl_expect(arl_base, "UdpLite6SndbufErrors", &UdpLite6SndbufErrors);
+ arl_expect(arl_base, "UdpLite6InCsumErrors", &UdpLite6InCsumErrors);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp6");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/snmp6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/net/snmp6 line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (Ip6InOctets ||
+ Ip6OutOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ipv6"
+ , NULL
+ , "network"
+ , NULL
+ , "IPv6 Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IPV6
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_received = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Ip6InOctets);
+ rrddim_set_by_pointer(st, rd_sent, Ip6OutOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO &&
+ (Ip6InReceives ||
+ Ip6OutRequests ||
+ Ip6InDelivers ||
+ Ip6OutForwDatagrams ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL,
+ *rd_forwarded = NULL,
+ *rd_delivers = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forwarded = rrddim_add(st, "OutForwDatagrams", "forwarded", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delivers = rrddim_add(st, "InDelivers", "delivers", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Ip6InReceives);
+ rrddim_set_by_pointer(st, rd_sent, Ip6OutRequests);
+ rrddim_set_by_pointer(st, rd_forwarded, Ip6OutForwDatagrams);
+ rrddim_set_by_pointer(st, rd_delivers, Ip6InDelivers);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (Ip6FragOKs ||
+ Ip6FragFails ||
+ Ip6FragCreates ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_fragsout = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL,
+ *rd_failed = NULL,
+ *rd_all = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "fragsout"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 Fragments Sent"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGSOUT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "FragCreates", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, Ip6FragOKs);
+ rrddim_set_by_pointer(st, rd_failed, Ip6FragFails);
+ rrddim_set_by_pointer(st, rd_all, Ip6FragCreates);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (Ip6ReasmOKs ||
+ Ip6ReasmFails ||
+ Ip6ReasmTimeout ||
+ Ip6ReasmReqds ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL,
+ *rd_failed = NULL,
+ *rd_timeout = NULL,
+ *rd_all = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "fragsin"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 Fragments Reassembly"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGSIN
+ , update_every
+ , RRDSET_TYPE_LINE);
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st, "ReasmTimeout", "timeout", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, Ip6ReasmOKs);
+ rrddim_set_by_pointer(st, rd_failed, Ip6ReasmFails);
+ rrddim_set_by_pointer(st, rd_timeout, Ip6ReasmTimeout);
+ rrddim_set_by_pointer(st, rd_all, Ip6ReasmReqds);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO &&
+ (Ip6InDiscards ||
+ Ip6OutDiscards ||
+ Ip6InHdrErrors ||
+ Ip6InAddrErrors ||
+ Ip6InUnknownProtos ||
+ Ip6InTooBigErrors ||
+ Ip6InTruncatedPkts ||
+ Ip6InNoRoutes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ip_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDiscards = NULL,
+ *rd_OutDiscards = NULL,
+ *rd_InHdrErrors = NULL,
+ *rd_InAddrErrors = NULL,
+ *rd_InUnknownProtos = NULL,
+ *rd_InTooBigErrors = NULL,
+ *rd_InTruncatedPkts = NULL,
+ *rd_InNoRoutes = NULL,
+ *rd_OutNoRoutes = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv6 Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTooBigErrors = rrddim_add(st, "InTooBigErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDiscards, Ip6InDiscards);
+ rrddim_set_by_pointer(st, rd_OutDiscards, Ip6OutDiscards);
+ rrddim_set_by_pointer(st, rd_InHdrErrors, Ip6InHdrErrors);
+ rrddim_set_by_pointer(st, rd_InAddrErrors, Ip6InAddrErrors);
+ rrddim_set_by_pointer(st, rd_InUnknownProtos, Ip6InUnknownProtos);
+ rrddim_set_by_pointer(st, rd_InTooBigErrors, Ip6InTooBigErrors);
+ rrddim_set_by_pointer(st, rd_InTruncatedPkts, Ip6InTruncatedPkts);
+ rrddim_set_by_pointer(st, rd_InNoRoutes, Ip6InNoRoutes);
+ rrddim_set_by_pointer(st, rd_OutNoRoutes, Ip6OutNoRoutes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO &&
+ (Udp6InDatagrams ||
+ Udp6OutDatagrams ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udppackets"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Udp6InDatagrams);
+ rrddim_set_by_pointer(st, rd_sent, Udp6OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO &&
+ (Udp6InErrors ||
+ Udp6NoPorts ||
+ Udp6RcvbufErrors ||
+ Udp6SndbufErrors ||
+ Udp6InCsumErrors ||
+ Udp6IgnoredMulti ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udperrors"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, Udp6RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, Udp6SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InErrors, Udp6InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, Udp6NoPorts);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, Udp6InCsumErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, Udp6IgnoredMulti);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO &&
+ (UdpLite6InDatagrams ||
+ UdpLite6OutDatagrams ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udplite_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udplitepackets"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDPlite Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, UdpLite6InDatagrams);
+ rrddim_set_by_pointer(st, rd_sent, UdpLite6OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO &&
+ (UdpLite6InErrors ||
+ UdpLite6NoPorts ||
+ UdpLite6RcvbufErrors ||
+ UdpLite6SndbufErrors ||
+ Udp6InCsumErrors ||
+ UdpLite6InCsumErrors ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udplite_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udpliteerrors"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDP Lite Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, UdpLite6InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, UdpLite6NoPorts);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, UdpLite6RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, UdpLite6SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, UdpLite6InCsumErrors);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO &&
+ (Ip6OutMcastOctets ||
+ Ip6InMcastOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_mcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InMcastOctets = NULL,
+ *rd_Ip6OutMcastOctets = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "mcast"
+ , NULL
+ , "multicast6"
+ , NULL
+ , "IPv6 Multicast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_MCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InMcastOctets = rrddim_add(st, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutMcastOctets = rrddim_add(st, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InMcastOctets, Ip6InMcastOctets);
+ rrddim_set_by_pointer(st, rd_Ip6OutMcastOctets, Ip6OutMcastOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO &&
+ (Ip6OutBcastOctets ||
+ Ip6InBcastOctets ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_bcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InBcastOctets = NULL,
+ *rd_Ip6OutBcastOctets = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "bcast"
+ , NULL
+ , "broadcast6"
+ , NULL
+ , "IPv6 Broadcast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_BCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InBcastOctets = rrddim_add(st, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutBcastOctets = rrddim_add(st, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InBcastOctets, Ip6InBcastOctets);
+ rrddim_set_by_pointer(st, rd_Ip6OutBcastOctets, Ip6OutBcastOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO &&
+ (Ip6OutMcastPkts ||
+ Ip6InMcastPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_mcast_p = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InMcastPkts = NULL,
+ *rd_Ip6OutMcastPkts = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "mcastpkts"
+ , NULL
+ , "multicast6"
+ , NULL
+ , "IPv6 Multicast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InMcastPkts = rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutMcastPkts = rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InMcastPkts, Ip6InMcastPkts);
+ rrddim_set_by_pointer(st, rd_Ip6OutMcastPkts, Ip6OutMcastPkts);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InMsgs ||
+ Icmp6OutMsgs ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Icmp6InMsgs = NULL,
+ *rd_Icmp6OutMsgs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmp"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_Icmp6InMsgs = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Icmp6OutMsgs = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Icmp6InMsgs, Icmp6InMsgs);
+ rrddim_set_by_pointer(st, rd_Icmp6OutMsgs, Icmp6OutMsgs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InRedirects ||
+ Icmp6OutRedirects ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_redir = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Icmp6InRedirects = NULL,
+ *rd_Icmp6OutRedirects = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpredir"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Redirects"
+ , "redirects/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_REDIR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_Icmp6InRedirects = rrddim_add(st, "InRedirects", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Icmp6OutRedirects = rrddim_add(st, "OutRedirects", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Icmp6InRedirects, Icmp6InRedirects);
+ rrddim_set_by_pointer(st, rd_Icmp6OutRedirects, Icmp6OutRedirects);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InErrors ||
+ Icmp6OutErrors ||
+ Icmp6InCsumErrors ||
+ Icmp6InDestUnreachs ||
+ Icmp6InPktTooBigs ||
+ Icmp6InTimeExcds ||
+ Icmp6InParmProblems ||
+ Icmp6OutDestUnreachs ||
+ Icmp6OutPktTooBigs ||
+ Icmp6OutTimeExcds ||
+ Icmp6OutParmProblems ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InErrors = NULL,
+ *rd_OutErrors = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_InDestUnreachs = NULL,
+ *rd_InPktTooBigs = NULL,
+ *rd_InTimeExcds = NULL,
+ *rd_InParmProblems = NULL,
+ *rd_OutDestUnreachs = NULL,
+ *rd_OutPktTooBigs = NULL,
+ *rd_OutTimeExcds = NULL,
+ *rd_OutParmProblems = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmperrors"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Errors"
+ , "errors/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutErrors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDestUnreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InPktTooBigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimeExcds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InParmProblems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDestUnreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutPktTooBigs = rrddim_add(st, "OutPktTooBigs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimeExcds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutParmProblems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, Icmp6InErrors);
+ rrddim_set_by_pointer(st, rd_OutErrors, Icmp6OutErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, Icmp6InCsumErrors);
+ rrddim_set_by_pointer(st, rd_InDestUnreachs, Icmp6InDestUnreachs);
+ rrddim_set_by_pointer(st, rd_InPktTooBigs, Icmp6InPktTooBigs);
+ rrddim_set_by_pointer(st, rd_InTimeExcds, Icmp6InTimeExcds);
+ rrddim_set_by_pointer(st, rd_InParmProblems, Icmp6InParmProblems);
+ rrddim_set_by_pointer(st, rd_OutDestUnreachs, Icmp6OutDestUnreachs);
+ rrddim_set_by_pointer(st, rd_OutPktTooBigs, Icmp6OutPktTooBigs);
+ rrddim_set_by_pointer(st, rd_OutTimeExcds, Icmp6OutTimeExcds);
+ rrddim_set_by_pointer(st, rd_OutParmProblems, Icmp6OutParmProblems);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InEchos ||
+ Icmp6OutEchos ||
+ Icmp6InEchoReplies ||
+ Icmp6OutEchoReplies ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_echos = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InEchos = NULL,
+ *rd_OutEchos = NULL,
+ *rd_InEchoReplies = NULL,
+ *rd_OutEchoReplies = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpechos"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Echo"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InEchos = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchos = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InEchoReplies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchoReplies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InEchos, Icmp6InEchos);
+ rrddim_set_by_pointer(st, rd_OutEchos, Icmp6OutEchos);
+ rrddim_set_by_pointer(st, rd_InEchoReplies, Icmp6InEchoReplies);
+ rrddim_set_by_pointer(st, rd_OutEchoReplies, Icmp6OutEchoReplies);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InGroupMembQueries ||
+ Icmp6OutGroupMembQueries ||
+ Icmp6InGroupMembResponses ||
+ Icmp6OutGroupMembResponses ||
+ Icmp6InGroupMembReductions ||
+ Icmp6OutGroupMembReductions ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_groupmemb = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InQueries = NULL,
+ *rd_OutQueries = NULL,
+ *rd_InResponses = NULL,
+ *rd_OutResponses = NULL,
+ *rd_InReductions = NULL,
+ *rd_OutReductions = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "groupmemb"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Group Membership"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB
+ , update_every
+ , RRDSET_TYPE_LINE);
+
+ rd_InQueries = rrddim_add(st, "InQueries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutQueries = rrddim_add(st, "OutQueries", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InResponses = rrddim_add(st, "InResponses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutResponses = rrddim_add(st, "OutResponses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InReductions = rrddim_add(st, "InReductions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutReductions = rrddim_add(st, "OutReductions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InQueries, Icmp6InGroupMembQueries);
+ rrddim_set_by_pointer(st, rd_OutQueries, Icmp6OutGroupMembQueries);
+ rrddim_set_by_pointer(st, rd_InResponses, Icmp6InGroupMembResponses);
+ rrddim_set_by_pointer(st, rd_OutResponses, Icmp6OutGroupMembResponses);
+ rrddim_set_by_pointer(st, rd_InReductions, Icmp6InGroupMembReductions);
+ rrddim_set_by_pointer(st, rd_OutReductions, Icmp6OutGroupMembReductions);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InRouterSolicits ||
+ Icmp6OutRouterSolicits ||
+ Icmp6InRouterAdvertisements ||
+ Icmp6OutRouterAdvertisements ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_router = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSolicits = NULL,
+ *rd_OutSolicits = NULL,
+ *rd_InAdvertisements = NULL,
+ *rd_OutAdvertisements = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmprouter"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 Router Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InRouterSolicits);
+ rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutRouterSolicits);
+ rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InRouterAdvertisements);
+ rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutRouterAdvertisements);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InNeighborSolicits ||
+ Icmp6OutNeighborSolicits ||
+ Icmp6InNeighborAdvertisements ||
+ Icmp6OutNeighborAdvertisements ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_neighbor = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSolicits = NULL,
+ *rd_OutSolicits = NULL,
+ *rd_InAdvertisements = NULL,
+ *rd_OutAdvertisements = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpneighbor"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 Neighbor Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InNeighborSolicits);
+ rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutNeighborSolicits);
+ rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InNeighborAdvertisements);
+ rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutNeighborAdvertisements);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InMLDv2Reports ||
+ Icmp6OutMLDv2Reports ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_mldv2 = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InMLDv2Reports = NULL,
+ *rd_OutMLDv2Reports = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpmldv2"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP MLDv2 Reports"
+ , "reports/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_LDV2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InMLDv2Reports = rrddim_add(st, "InMLDv2Reports", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutMLDv2Reports = rrddim_add(st, "OutMLDv2Reports", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InMLDv2Reports, Icmp6InMLDv2Reports);
+ rrddim_set_by_pointer(st, rd_OutMLDv2Reports, Icmp6OutMLDv2Reports);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO &&
+ (Icmp6InType1 ||
+ Icmp6InType128 ||
+ Icmp6InType129 ||
+ Icmp6InType136 ||
+ Icmp6OutType1 ||
+ Icmp6OutType128 ||
+ Icmp6OutType129 ||
+ Icmp6OutType133 ||
+ Icmp6OutType135 ||
+ Icmp6OutType143 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_icmp_types = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InType1 = NULL,
+ *rd_InType128 = NULL,
+ *rd_InType129 = NULL,
+ *rd_InType136 = NULL,
+ *rd_OutType1 = NULL,
+ *rd_OutType128 = NULL,
+ *rd_OutType129 = NULL,
+ *rd_OutType133 = NULL,
+ *rd_OutType135 = NULL,
+ *rd_OutType143 = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmptypes"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Types"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_TYPES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InType1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InType1, Icmp6InType1);
+ rrddim_set_by_pointer(st, rd_InType128, Icmp6InType128);
+ rrddim_set_by_pointer(st, rd_InType129, Icmp6InType129);
+ rrddim_set_by_pointer(st, rd_InType136, Icmp6InType136);
+ rrddim_set_by_pointer(st, rd_OutType1, Icmp6OutType1);
+ rrddim_set_by_pointer(st, rd_OutType128, Icmp6OutType128);
+ rrddim_set_by_pointer(st, rd_OutType129, Icmp6OutType129);
+ rrddim_set_by_pointer(st, rd_OutType133, Icmp6OutType133);
+ rrddim_set_by_pointer(st, rd_OutType135, Icmp6OutType135);
+ rrddim_set_by_pointer(st, rd_OutType143, Icmp6OutType143);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO &&
+ (Ip6InNoECTPkts ||
+ Ip6InECT1Pkts ||
+ Ip6InECT0Pkts ||
+ Ip6InCEPkts ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ect = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InNoECTPkts = NULL,
+ *rd_InECT1Pkts = NULL,
+ *rd_InECT0Pkts = NULL,
+ *rd_InCEPkts = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "ect"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 ECT Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ECT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InNoECTPkts = rrddim_add(st, "InNoECTPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InECT1Pkts = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InECT0Pkts = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCEPkts = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InNoECTPkts, Ip6InNoECTPkts);
+ rrddim_set_by_pointer(st, rd_InECT1Pkts, Ip6InECT1Pkts);
+ rrddim_set_by_pointer(st, rd_InECT0Pkts, Ip6InECT0Pkts);
+ rrddim_set_by_pointer(st, rd_InCEPkts, Ip6InCEPkts);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
new file mode 100644
index 0000000..994cbad
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sockstat.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME "/proc/net/sockstat"
+
+static struct proc_net_sockstat {
+ kernel_uint_t sockets_used;
+
+ kernel_uint_t tcp_inuse;
+ kernel_uint_t tcp_orphan;
+ kernel_uint_t tcp_tw;
+ kernel_uint_t tcp_alloc;
+ kernel_uint_t tcp_mem;
+
+ kernel_uint_t udp_inuse;
+ kernel_uint_t udp_mem;
+
+ kernel_uint_t udplite_inuse;
+
+ kernel_uint_t raw_inuse;
+
+ kernel_uint_t frag_inuse;
+ kernel_uint_t frag_memory;
+} sockstat_root = { 0 };
+
+
+static int read_tcp_mem(void) {
+ static char *filename = NULL;
+ static RRDVAR *tcp_mem_low_threshold = NULL,
+ *tcp_mem_pressure_threshold = NULL,
+ *tcp_mem_high_threshold = NULL;
+
+ if(unlikely(!tcp_mem_low_threshold)) {
+ tcp_mem_low_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_low");
+ tcp_mem_pressure_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_pressure");
+ tcp_mem_high_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_high");
+ }
+
+ if(unlikely(!filename)) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_mem", netdata_configured_host_prefix);
+ filename = strdupz(buffer);
+ }
+
+ char buffer[200 + 1], *start, *end;
+ if(read_file(filename, buffer, 200) != 0) return 1;
+ buffer[200] = '\0';
+
+ unsigned long long low = 0, pressure = 0, high = 0;
+
+ start = buffer;
+ low = strtoull(start, &end, 10);
+
+ start = end;
+ pressure = strtoull(start, &end, 10);
+
+ start = end;
+ high = strtoull(start, &end, 10);
+
+ // fprintf(stderr, "TCP MEM low = %llu, pressure = %llu, high = %llu\n", low, pressure, high);
+
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_low_threshold, low * sysconf(_SC_PAGESIZE) / 1024.0);
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_pressure_threshold, pressure * sysconf(_SC_PAGESIZE) / 1024.0);
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_high_threshold, high * sysconf(_SC_PAGESIZE) / 1024.0);
+
+ return 0;
+}
+
+static kernel_uint_t read_tcp_max_orphans(void) {
+ static char *filename = NULL;
+ static RRDVAR *tcp_max_orphans_var = NULL;
+
+ if(unlikely(!filename)) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_max_orphans", netdata_configured_host_prefix);
+ filename = strdupz(buffer);
+ }
+
+ unsigned long long tcp_max_orphans = 0;
+ if(read_single_number_file(filename, &tcp_max_orphans) == 0) {
+
+ if(unlikely(!tcp_max_orphans_var))
+ tcp_max_orphans_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_orphans");
+
+ rrdvar_custom_host_variable_set(localhost, tcp_max_orphans_var, tcp_max_orphans);
+ return tcp_max_orphans;
+ }
+
+ return 0;
+}
+
+int do_proc_net_sockstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static uint32_t hash_sockets = 0,
+ hash_raw = 0,
+ hash_frag = 0,
+ hash_tcp = 0,
+ hash_udp = 0,
+ hash_udplite = 0;
+
+ static long long update_constants_every = 60, update_constants_count = 0;
+
+ static ARL_BASE *arl_sockets = NULL;
+ static ARL_BASE *arl_tcp = NULL;
+ static ARL_BASE *arl_udp = NULL;
+ static ARL_BASE *arl_udplite = NULL;
+ static ARL_BASE *arl_raw = NULL;
+ static ARL_BASE *arl_frag = NULL;
+
+ static int do_sockets = -1, do_tcp_sockets = -1, do_tcp_mem = -1, do_udp_sockets = -1, do_udp_mem = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1, do_frag_mem = -1;
+
+ static char *keys[7] = { NULL };
+ static uint32_t hashes[7] = { 0 };
+ static ARL_BASE *bases[7] = { NULL };
+
+ if(unlikely(!arl_sockets)) {
+ do_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 sockets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP sockets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP memory", CONFIG_BOOLEAN_AUTO);
+ do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP memory", CONFIG_BOOLEAN_AUTO);
+ do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
+ do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 RAW sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO);
+
+ update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
+ update_constants_count = update_constants_every;
+
+ arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_sockets, "used", &sockstat_root.sockets_used);
+
+ arl_tcp = arl_create("sockstat/TCP", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_tcp, "inuse", &sockstat_root.tcp_inuse);
+ arl_expect(arl_tcp, "orphan", &sockstat_root.tcp_orphan);
+ arl_expect(arl_tcp, "tw", &sockstat_root.tcp_tw);
+ arl_expect(arl_tcp, "alloc", &sockstat_root.tcp_alloc);
+ arl_expect(arl_tcp, "mem", &sockstat_root.tcp_mem);
+
+ arl_udp = arl_create("sockstat/UDP", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "inuse", &sockstat_root.udp_inuse);
+ arl_expect(arl_udp, "mem", &sockstat_root.udp_mem);
+
+ arl_udplite = arl_create("sockstat/UDPLITE", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "inuse", &sockstat_root.udplite_inuse);
+
+ arl_raw = arl_create("sockstat/RAW", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_raw, "inuse", &sockstat_root.raw_inuse);
+
+ arl_frag = arl_create("sockstat/FRAG", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_frag, "inuse", &sockstat_root.frag_inuse);
+ arl_expect(arl_frag, "memory", &sockstat_root.frag_memory);
+
+ hash_sockets = simple_hash("sockets");
+ hash_tcp = simple_hash("TCP");
+ hash_udp = simple_hash("UDP");
+ hash_udplite = simple_hash("UDPLITE");
+ hash_raw = simple_hash("RAW");
+ hash_frag = simple_hash("FRAG");
+
+ keys[0] = "sockets"; hashes[0] = hash_sockets; bases[0] = arl_sockets;
+ keys[1] = "TCP"; hashes[1] = hash_tcp; bases[1] = arl_tcp;
+ keys[2] = "UDP"; hashes[2] = hash_udp; bases[2] = arl_udp;
+ keys[3] = "UDPLITE"; hashes[3] = hash_udplite; bases[3] = arl_udplite;
+ keys[4] = "RAW"; hashes[4] = hash_raw; bases[4] = arl_raw;
+ keys[5] = "FRAG"; hashes[5] = hash_frag; bases[5] = arl_frag;
+ keys[6] = NULL; // terminator
+ }
+
+ update_constants_count += update_every;
+ if(unlikely(update_constants_count > update_constants_every)) {
+ read_tcp_max_orphans();
+ read_tcp_mem();
+ update_constants_count = 0;
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ int k;
+ for(k = 0; keys[k] ; k++) {
+ if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
+ // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
+ ARL_BASE *arl = bases[k];
+ arl_begin(arl);
+ size_t w = 1;
+
+ while(w + 1 < words) {
+ char *name = procfile_lineword(ff, l, w); w++;
+ char *value = procfile_lineword(ff, l, w); w++;
+ // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
+ if(unlikely(arl_check(arl, name, value) != 0))
+ break;
+ }
+
+ break;
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.sockets_used ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_used = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_sockets"
+ , NULL
+ , "sockets"
+ , NULL
+ , "IPv4 Sockets Used"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_used = rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_used, (collected_number)sockstat_root.sockets_used);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.tcp_inuse ||
+ sockstat_root.tcp_orphan ||
+ sockstat_root.tcp_tw ||
+ sockstat_root.tcp_alloc ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL,
+ *rd_orphan = NULL,
+ *rd_timewait = NULL,
+ *rd_alloc = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_tcp_sockets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_alloc = rrddim_add(st, "alloc", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_orphan = rrddim_add(st, "orphan", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_timewait = rrddim_add(st, "timewait", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.tcp_inuse);
+ rrddim_set_by_pointer(st, rd_orphan, (collected_number)sockstat_root.tcp_orphan);
+ rrddim_set_by_pointer(st, rd_timewait, (collected_number)sockstat_root.tcp_tw);
+ rrddim_set_by_pointer(st, rd_alloc, (collected_number)sockstat_root.tcp_alloc);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.tcp_mem || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_tcp_mem"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Sockets Memory"
+ , "KiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.tcp_mem);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.udp_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udp_sockets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udp_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.udp_mem ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udp_mem"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Sockets Memory"
+ , "KiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.udp_mem);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.udplite_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udplite_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udplite_sockets"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLITE Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udplite_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.raw_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_raw_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_raw_sockets"
+ , NULL
+ , "raw"
+ , NULL
+ , "IPv4 RAW Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_RAW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.raw_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.frag_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_frag_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_frag_sockets"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 FRAG Sockets"
+ , "fragments"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.frag_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO &&
+ (sockstat_root.frag_memory ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_frag_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_frag_mem"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 FRAG Sockets Memory"
+ , "KiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.frag_memory);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c
new file mode 100644
index 0000000..ce8c9e0
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sockstat6.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME "/proc/net/sockstat6"
+
+static struct proc_net_sockstat6 {
+ kernel_uint_t tcp6_inuse;
+ kernel_uint_t udp6_inuse;
+ kernel_uint_t udplite6_inuse;
+ kernel_uint_t raw6_inuse;
+ kernel_uint_t frag6_inuse;
+} sockstat6_root = { 0 };
+
+int do_proc_net_sockstat6(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static uint32_t hash_raw = 0,
+ hash_frag = 0,
+ hash_tcp = 0,
+ hash_udp = 0,
+ hash_udplite = 0;
+
+ static ARL_BASE *arl_tcp = NULL;
+ static ARL_BASE *arl_udp = NULL;
+ static ARL_BASE *arl_udplite = NULL;
+ static ARL_BASE *arl_raw = NULL;
+ static ARL_BASE *arl_frag = NULL;
+
+ static int do_tcp_sockets = -1, do_udp_sockets = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1;
+
+ static char *keys[6] = { NULL };
+ static uint32_t hashes[6] = { 0 };
+ static ARL_BASE *bases[6] = { NULL };
+
+ if(unlikely(!arl_tcp)) {
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 TCP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
+ do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 RAW sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 FRAG sockets", CONFIG_BOOLEAN_AUTO);
+
+ arl_tcp = arl_create("sockstat6/TCP6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_tcp, "inuse", &sockstat6_root.tcp6_inuse);
+
+ arl_udp = arl_create("sockstat6/UDP6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "inuse", &sockstat6_root.udp6_inuse);
+
+ arl_udplite = arl_create("sockstat6/UDPLITE6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "inuse", &sockstat6_root.udplite6_inuse);
+
+ arl_raw = arl_create("sockstat6/RAW6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_raw, "inuse", &sockstat6_root.raw6_inuse);
+
+ arl_frag = arl_create("sockstat6/FRAG6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_frag, "inuse", &sockstat6_root.frag6_inuse);
+
+ hash_tcp = simple_hash("TCP6");
+ hash_udp = simple_hash("UDP6");
+ hash_udplite = simple_hash("UDPLITE6");
+ hash_raw = simple_hash("RAW6");
+ hash_frag = simple_hash("FRAG6");
+
+ keys[0] = "TCP6"; hashes[0] = hash_tcp; bases[0] = arl_tcp;
+ keys[1] = "UDP6"; hashes[1] = hash_udp; bases[1] = arl_udp;
+ keys[2] = "UDPLITE6"; hashes[2] = hash_udplite; bases[2] = arl_udplite;
+ keys[3] = "RAW6"; hashes[3] = hash_raw; bases[3] = arl_raw;
+ keys[4] = "FRAG6"; hashes[4] = hash_frag; bases[4] = arl_frag;
+ keys[5] = NULL; // terminator
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat6");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ int k;
+ for(k = 0; keys[k] ; k++) {
+ if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
+ // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
+ ARL_BASE *arl = bases[k];
+ arl_begin(arl);
+ size_t w = 1;
+
+ while(w + 1 < words) {
+ char *name = procfile_lineword(ff, l, w); w++;
+ char *value = procfile_lineword(ff, l, w); w++;
+ // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
+ if(unlikely(arl_check(arl, name, value) != 0))
+ break;
+ }
+
+ break;
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat6_root.tcp6_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_tcp_sockets"
+ , NULL
+ , "tcp6"
+ , NULL
+ , "IPv6 TCP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_TCP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.tcp6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat6_root.udp6_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_udp_sockets"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udp6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat6_root.udplite6_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_udplite_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_udplite_sockets"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDPLITE Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udplite6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat6_root.raw6_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_raw_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_raw_sockets"
+ , NULL
+ , "raw6"
+ , NULL
+ , "IPv6 RAW Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_RAW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.raw6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO &&
+ (sockstat6_root.frag6_inuse ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_frag_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_frag_sockets"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 FRAG Sockets"
+ , "fragments"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.frag6_inuse);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
new file mode 100644
index 0000000..a29cccc
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_softnet_stat.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOFTNET_NAME "/proc/net/softnet_stat"
+
+static inline char *softnet_column_name(size_t column) {
+ switch(column) {
+ // https://github.com/torvalds/linux/blob/a7fd20d1c476af4563e66865213474a2f9f473a4/net/core/net-procfs.c#L161-L166
+ case 0: return "processed";
+ case 1: return "dropped";
+ case 2: return "squeezed";
+ case 9: return "received_rps";
+ case 10: return "flow_limit_count";
+ default: return NULL;
+ }
+}
+
+int do_proc_net_softnet_stat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_per_core = -1;
+ static size_t allocated_lines = 0, allocated_columns = 0;
+ static uint32_t *data = NULL;
+
+ if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/net/softnet_stat", "softnet_stat per core", 1);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/softnet_stat");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/softnet_stat", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0), w;
+
+ if(unlikely(!lines || !words)) {
+ error("Cannot read /proc/net/softnet_stat, %zu lines and %zu columns reported.", lines, words);
+ return 1;
+ }
+
+ if(unlikely(lines > 200)) lines = 200;
+ if(unlikely(words > 50)) words = 50;
+
+ if(unlikely(!data || lines > allocated_lines || words > allocated_columns)) {
+ freez(data);
+ allocated_lines = lines;
+ allocated_columns = words;
+ data = mallocz((allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
+ }
+
+ // initialize to zero
+ memset(data, 0, (allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
+
+ // parse the values
+ for(l = 0; l < lines ;l++) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ if(unlikely(words > allocated_columns))
+ words = allocated_columns;
+
+ for(w = 0; w < words ; w++) {
+ if(unlikely(softnet_column_name(w))) {
+ uint32_t t = (uint32_t)strtoul(procfile_lineword(ff, l, w), NULL, 16);
+ data[w] += t;
+ data[((l + 1) * allocated_columns) + w] = t;
+ }
+ }
+ }
+
+ if(unlikely(data[(lines * allocated_columns)] == 0))
+ lines--;
+
+ RRDSET *st;
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_active_bytype_localhost("system", "softnet_stat");
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "softnet_stat"
+ , NULL
+ , "softnet_stat"
+ , "system.softnet_stat"
+ , "System softnet_stat"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_set(st, softnet_column_name(w), data[w]);
+
+ rrdset_done(st);
+
+ if(do_per_core) {
+ for(l = 0; l < lines ;l++) {
+ char id[50+1];
+ snprintfz(id, 50, "cpu%zu_softnet_stat", l);
+
+ st = rrdset_find_active_bytype_localhost("cpu", id);
+ if(unlikely(!st)) {
+ char title[100+1];
+ snprintfz(title, 100, "CPU%zu softnet_stat", l);
+
+ st = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "softnet_stat"
+ , "cpu.softnet_stat"
+ , title
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
+ , NETDATA_CHART_PRIO_SOFTNET_PER_CORE + l
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_set(st, softnet_column_name(w), data[((l + 1) * allocated_columns) + w]);
+
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c
new file mode 100644
index 0000000..642e33f
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_stat_conntrack.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_CONNTRACK "conntrack"
+#define PLUGIN_PROC_MODULE_CONNTRACK_NAME "/proc/net/stat/nf_conntrack"
+
+int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1;
+ static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0;
+ static int read_full = 1;
+ static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
+ static RRDVAR *rrdvar_max = NULL;
+
+ unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0,
+ ainsert = 0, ainsert_failed = 0, adrop = 0, aearly_drop = 0, aicmp_error = 0, aexpect_new = 0, aexpect_create = 0, aexpect_delete = 0, asearch_restart = 0;
+
+ if(unlikely(do_sockets == -1)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/nf_conntrack");
+ nf_conntrack_filename = config_get("plugin:proc:/proc/net/stat/nf_conntrack", "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_max");
+ nf_conntrack_max_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "filename to monitor", filename);
+ usec_since_last_max = get_max_every = config_get_number("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "read every seconds", 10) * USEC_PER_SEC;
+
+ read_full = 1;
+ ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(!ff) read_full = 0;
+
+ do_new = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter new connections", read_full);
+ do_changes = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection changes", read_full);
+ do_expect = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection expectations", read_full);
+ do_search = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection searches", read_full);
+ do_errors = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter errors", read_full);
+
+ do_sockets = 1;
+ if(!read_full) {
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_count");
+ nf_conntrack_count_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count", "filename to monitor", filename);
+
+ if(read_single_number_file(nf_conntrack_count_filename, &aentries))
+ do_sockets = 0;
+ }
+
+ do_sockets = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connections", do_sockets);
+
+ if(!do_sockets && !read_full)
+ return 1;
+
+ rrdvar_max = rrdvar_custom_host_variable_create(localhost, "netfilter_conntrack_max");
+ }
+
+ if(likely(read_full)) {
+ if(unlikely(!ff)) {
+ ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 1; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 17)) {
+ if(unlikely(words)) error("Cannot read /proc/net/stat/nf_conntrack line. Expected 17 params, read %zu.", words);
+ continue;
+ }
+
+ unsigned long long tentries = 0, tsearched = 0, tfound = 0, tnew = 0, tinvalid = 0, tignore = 0, tdelete = 0, tdelete_list = 0, tinsert = 0, tinsert_failed = 0, tdrop = 0, tearly_drop = 0, ticmp_error = 0, texpect_new = 0, texpect_create = 0, texpect_delete = 0, tsearch_restart = 0;
+
+ tentries = strtoull(procfile_lineword(ff, l, 0), NULL, 16);
+ tsearched = strtoull(procfile_lineword(ff, l, 1), NULL, 16);
+ tfound = strtoull(procfile_lineword(ff, l, 2), NULL, 16);
+ tnew = strtoull(procfile_lineword(ff, l, 3), NULL, 16);
+ tinvalid = strtoull(procfile_lineword(ff, l, 4), NULL, 16);
+ tignore = strtoull(procfile_lineword(ff, l, 5), NULL, 16);
+ tdelete = strtoull(procfile_lineword(ff, l, 6), NULL, 16);
+ tdelete_list = strtoull(procfile_lineword(ff, l, 7), NULL, 16);
+ tinsert = strtoull(procfile_lineword(ff, l, 8), NULL, 16);
+ tinsert_failed = strtoull(procfile_lineword(ff, l, 9), NULL, 16);
+ tdrop = strtoull(procfile_lineword(ff, l, 10), NULL, 16);
+ tearly_drop = strtoull(procfile_lineword(ff, l, 11), NULL, 16);
+ ticmp_error = strtoull(procfile_lineword(ff, l, 12), NULL, 16);
+ texpect_new = strtoull(procfile_lineword(ff, l, 13), NULL, 16);
+ texpect_create = strtoull(procfile_lineword(ff, l, 14), NULL, 16);
+ texpect_delete = strtoull(procfile_lineword(ff, l, 15), NULL, 16);
+ tsearch_restart = strtoull(procfile_lineword(ff, l, 16), NULL, 16);
+
+ if(unlikely(!aentries)) aentries = tentries;
+
+ // sum all the cpus together
+ asearched += tsearched; // conntrack.search
+ afound += tfound; // conntrack.search
+ anew += tnew; // conntrack.new
+ ainvalid += tinvalid; // conntrack.new
+ aignore += tignore; // conntrack.new
+ adelete += tdelete; // conntrack.changes
+ adelete_list += tdelete_list; // conntrack.changes
+ ainsert += tinsert; // conntrack.changes
+ ainsert_failed += tinsert_failed; // conntrack.errors
+ adrop += tdrop; // conntrack.errors
+ aearly_drop += tearly_drop; // conntrack.errors
+ aicmp_error += ticmp_error; // conntrack.errors
+ aexpect_new += texpect_new; // conntrack.expect
+ aexpect_create += texpect_create; // conntrack.expect
+ aexpect_delete += texpect_delete; // conntrack.expect
+ asearch_restart += tsearch_restart; // conntrack.search
+ }
+ }
+ else {
+ if(unlikely(read_single_number_file(nf_conntrack_count_filename, &aentries)))
+ return 0; // we return 0, so that we will retry to open it next time
+ }
+
+ usec_since_last_max += dt;
+ if(unlikely(rrdvar_max && usec_since_last_max >= get_max_every)) {
+ usec_since_last_max = 0;
+
+ unsigned long long max;
+ if(likely(!read_single_number_file(nf_conntrack_max_filename, &max)))
+ rrdvar_custom_host_variable_set(localhost, rrdvar_max, max);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_sockets) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_connections = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_sockets"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Connections"
+ , "active connections"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_connections = rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_connections, aentries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_new) {
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_new = NULL,
+ *rd_ignore = NULL,
+ *rd_invalid = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_new"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker New Connections"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_NEW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ignore = rrddim_add(st, "ignore", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_invalid = rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_new, anew);
+ rrddim_set_by_pointer(st, rd_ignore, aignore);
+ rrddim_set_by_pointer(st, rd_invalid, ainvalid);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_changes) {
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_inserted = NULL,
+ *rd_deleted = NULL,
+ *rd_delete_list = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Changes"
+ , "changes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_CHANGES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delete_list = rrddim_add(st, "delete_list", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inserted, ainsert);
+ rrddim_set_by_pointer(st, rd_deleted, adelete);
+ rrddim_set_by_pointer(st, rd_delete_list, adelete_list);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_expect) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_created = NULL,
+ *rd_deleted = NULL,
+ *rd_new = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Expectations"
+ , "expectations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_EXPECT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_created, aexpect_create);
+ rrddim_set_by_pointer(st, rd_deleted, aexpect_delete);
+ rrddim_set_by_pointer(st, rd_new, aexpect_new);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_search) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_searched = NULL,
+ *rd_restarted = NULL,
+ *rd_found = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_search"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Searches"
+ , "searches/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_SEARCH
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_found = rrddim_add(st, "found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_searched, asearched);
+ rrddim_set_by_pointer(st, rd_restarted, asearch_restart);
+ rrddim_set_by_pointer(st, rd_found, afound);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_errors) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_icmp_error = NULL,
+ *rd_insert_failed = NULL,
+ *rd_drop = NULL,
+ *rd_early_drop = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_drop = rrddim_add(st, "drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_early_drop = rrddim_add(st, "early_drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_icmp_error, aicmp_error);
+ rrddim_set_by_pointer(st, rd_insert_failed, ainsert_failed);
+ rrddim_set_by_pointer(st, rd_drop, adrop);
+ rrddim_set_by_pointer(st, rd_early_drop, aearly_drop);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c
new file mode 100644
index 0000000..f5030f9
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_stat_synproxy.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_SYNPROXY_NAME "/proc/net/stat/synproxy"
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_SYNPROXY "synproxy"
+
+int do_proc_net_stat_synproxy(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_entries = -1, do_cookies = -1, do_syns = -1, do_reopened = -1;
+ static procfile *ff = NULL;
+
+ if(unlikely(do_entries == -1)) {
+ do_entries = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY entries", CONFIG_BOOLEAN_AUTO);
+ do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO);
+ do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO);
+ do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/synproxy");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/stat/synproxy", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ // make sure we have 3 lines
+ size_t lines = procfile_lines(ff), l;
+ if(unlikely(lines < 2)) {
+ error("/proc/net/stat/synproxy has %zu lines, expected no less than 2. Disabling it.", lines);
+ return 1;
+ }
+
+ unsigned long long entries = 0, syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0;
+
+ // synproxy gives its values per CPU
+ for(l = 1; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 6))
+ continue;
+
+ entries += strtoull(procfile_lineword(ff, l, 0), NULL, 16);
+ syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16);
+ cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16);
+ cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16);
+ cookie_retrans += strtoull(procfile_lineword(ff, l, 4), NULL, 16);
+ conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16);
+ }
+
+ unsigned long long events = entries + syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened;
+
+ // --------------------------------------------------------------------
+
+ if(do_entries == CONFIG_BOOLEAN_YES || (do_entries == CONFIG_BOOLEAN_AUTO &&
+ (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_entries = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_entries"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY Entries Used"
+ , "entries"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_ENTRIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "entries", entries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_syns == CONFIG_BOOLEAN_YES || (do_syns == CONFIG_BOOLEAN_AUTO &&
+ (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_syns = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_syn_received"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY SYN Packets received"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", syn_received);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_reopened == CONFIG_BOOLEAN_YES || (do_reopened == CONFIG_BOOLEAN_AUTO &&
+ (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_reopened = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_conn_reopened"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY Connections Reopened"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "reopened", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reopened", conn_reopened);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_cookies == CONFIG_BOOLEAN_YES || (do_cookies == CONFIG_BOOLEAN_AUTO &&
+ (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_cookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_cookies"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY TCP Cookies"
+ , "cookies/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_COOKIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "valid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "retransmits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "valid", cookie_valid);
+ rrddim_set(st, "invalid", cookie_invalid);
+ rrddim_set(st, "retransmits", cookie_retrans);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_wireless.c b/collectors/proc.plugin/proc_net_wireless.c
new file mode 100644
index 0000000..32a53c6
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_wireless.c
@@ -0,0 +1,453 @@
+#include <stdbool.h>
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NETWIRELESS_NAME "/proc/net/wireless"
+
+#define CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETWIRELESS_NAME
+
+
+static struct netwireless {
+ char *name;
+ uint32_t hash;
+
+ //flags
+ bool configured;
+ struct timeval updated;
+
+ int do_status;
+ int do_quality;
+ int do_discarded_packets;
+ int do_missed_beacon;
+
+ // Data collected
+ // status
+ kernel_uint_t status;
+
+ // Quality
+ calculated_number link;
+ calculated_number level;
+ calculated_number noise;
+
+ // Discarded packets
+ kernel_uint_t nwid;
+ kernel_uint_t crypt;
+ kernel_uint_t frag;
+ kernel_uint_t retry;
+ kernel_uint_t misc;
+
+ // missed beacon
+ kernel_uint_t missed_beacon;
+
+ const char *chart_id_net_status;
+ const char *chart_id_net_link;
+ const char *chart_id_net_level;
+ const char *chart_id_net_noise;
+ const char *chart_id_net_discarded_packets;
+ const char *chart_id_net_missed_beacon;
+
+ const char *chart_family;
+
+ // charts
+ // satus
+ RRDSET *st_status;
+
+ // Quality
+ RRDSET *st_link;
+ RRDSET *st_level;
+ RRDSET *st_noise;
+
+ // Discarded Packets
+ RRDSET *st_discarded_packets;
+ // Missed beacon
+ RRDSET *st_missed_beacon;
+
+ // Dimensions
+ // status
+ RRDDIM *rd_status;
+
+ // Quality
+ RRDDIM *rd_link;
+ RRDDIM *rd_level;
+ RRDDIM *rd_noise;
+
+ // Discarded packets
+ RRDDIM *rd_nwid;
+ RRDDIM *rd_crypt;
+ RRDDIM *rd_frag;
+ RRDDIM *rd_retry;
+ RRDDIM *rd_misc;
+
+ // missed beacon
+ RRDDIM *rd_missed_beacon;
+
+ struct netwireless *next;
+} *netwireless_root = NULL;
+
+static void netwireless_free_st(struct netwireless *wireless_dev)
+{
+ if (wireless_dev->st_status) rrdset_is_obsolete(wireless_dev->st_status);
+ if (wireless_dev->st_link) rrdset_is_obsolete(wireless_dev->st_link);
+ if (wireless_dev->st_level) rrdset_is_obsolete(wireless_dev->st_level);
+ if (wireless_dev->st_noise) rrdset_is_obsolete(wireless_dev->st_noise);
+ if (wireless_dev->st_discarded_packets) rrdset_is_obsolete(wireless_dev->st_discarded_packets);
+ if (wireless_dev->st_missed_beacon) rrdset_is_obsolete(wireless_dev->st_missed_beacon);
+
+ wireless_dev->st_status = NULL;
+ wireless_dev->st_link = NULL;
+ wireless_dev->st_level = NULL;
+ wireless_dev->st_noise = NULL;
+ wireless_dev->st_discarded_packets = NULL;
+ wireless_dev->st_missed_beacon = NULL;
+}
+
+static void netwireless_free(struct netwireless *wireless_dev)
+{
+ wireless_dev->next = NULL;
+ freez((void *)wireless_dev->name);
+ netwireless_free_st(wireless_dev);
+ freez((void *)wireless_dev->chart_id_net_status);
+ freez((void *)wireless_dev->chart_id_net_link);
+ freez((void *)wireless_dev->chart_id_net_level);
+ freez((void *)wireless_dev->chart_id_net_noise);
+ freez((void *)wireless_dev->chart_id_net_discarded_packets);
+ freez((void *)wireless_dev->chart_id_net_missed_beacon);
+
+ freez((void *)wireless_dev);
+}
+
+static void netwireless_cleanup(struct timeval *timestamp)
+{
+ struct netwireless *previous = NULL;
+ struct netwireless *current;
+ // search it, from begining to the end
+ for (current = netwireless_root; current;) {
+
+ if (timercmp(&current->updated, timestamp, <)) {
+ struct netwireless *to_free = current;
+ current = current->next;
+ netwireless_free(to_free);
+
+ if (previous) {
+ previous->next = current;
+ } else {
+ netwireless_root = current;
+ }
+ } else {
+ previous = current;
+ current = current->next;
+ }
+ }
+}
+
+// finds an existing interface or creates a new entry
+static struct netwireless *find_or_create_wireless(const char *name)
+{
+ struct netwireless *wireless;
+ uint32_t hash = simple_hash(name);
+
+ // search it, from begining to the end
+ for (wireless = netwireless_root ; wireless ; wireless = wireless->next) {
+ if (unlikely(hash == wireless->hash && !strcmp(name, wireless->name))) {
+ return wireless;
+ }
+ }
+
+ // create a new one
+ wireless = callocz(1, sizeof(struct netwireless));
+ wireless->name = strdupz(name);
+ wireless->hash = hash;
+
+ // link it to the end
+ if (netwireless_root) {
+ struct netwireless *last_node;
+ for (last_node = netwireless_root; last_node->next ; last_node = last_node->next);
+
+ last_node->next = wireless;
+ } else
+ netwireless_root = wireless;
+
+ return wireless;
+}
+
+static void configure_device(int do_status, int do_quality, int do_discarded_packets, int do_missed,
+ struct netwireless *wireless_dev) {
+ wireless_dev->do_status = do_status;
+ wireless_dev->do_quality = do_quality;
+ wireless_dev->do_discarded_packets = do_discarded_packets;
+ wireless_dev->do_missed_beacon = do_missed;
+ wireless_dev->configured = true;
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_status", wireless_dev->name);
+ wireless_dev->chart_id_net_status = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_link_quality", wireless_dev->name);
+ wireless_dev->chart_id_net_link = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_signal_level", wireless_dev->name);
+ wireless_dev->chart_id_net_level = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_noise_level", wireless_dev->name);
+ wireless_dev->chart_id_net_noise = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_discarded_packets", wireless_dev->name);
+ wireless_dev->chart_id_net_discarded_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_missed_beacon", wireless_dev->name);
+ wireless_dev->chart_id_net_missed_beacon = strdupz(buffer);
+}
+
+int do_proc_net_wireless(int update_every, usec_t dt)
+{
+ UNUSED(dt);
+ static procfile *ff = NULL;
+ static int do_status, do_quality = -1, do_discarded_packets, do_beacon;
+ static char *proc_net_wireless_filename = NULL;
+
+ if (unlikely(do_quality == -1)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/wireless");
+
+ proc_net_wireless_filename = config_get(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,"filename to monitor",
+ filename);
+
+ do_status = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "status for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ do_quality = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "quality for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ do_discarded_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "discarded packets for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_beacon = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "missed beacon for all interface", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (unlikely(!ff)) {
+ ff = procfile_open(proc_net_wireless_filename, " \t,|", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff);
+ struct timeval timestamp;
+ size_t l;
+ gettimeofday(&timestamp, NULL);
+ for (l = 2; l < lines; l++) {
+ if (unlikely(procfile_linewords(ff, l) < 11)) continue;
+
+ char *name = procfile_lineword(ff, l, 0);
+ size_t len = strlen(name);
+ if (name[len - 1] == ':') name[len - 1] = '\0';
+
+ struct netwireless *wireless_dev = find_or_create_wireless(name);
+
+ if (unlikely(!wireless_dev->configured)) {
+ configure_device(do_status, do_quality, do_discarded_packets, do_beacon, wireless_dev);
+ }
+
+ if (likely(do_status != CONFIG_BOOLEAN_NO)) {
+ wireless_dev->status = str2kernel_uint_t(procfile_lineword(ff, l, 1));
+
+ if (unlikely(!wireless_dev->st_status)) {
+ wireless_dev->st_status = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_status,
+ NULL,
+ wireless_dev->name,
+ "wireless.status",
+ "Internal status reported by interface.",
+ "status",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_status, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_status = rrddim_add(wireless_dev->st_status, "status", NULL, 1,
+ 1, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_status);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_status, wireless_dev->rd_status,
+ (collected_number)wireless_dev->status);
+ rrdset_done(wireless_dev->st_status);
+ }
+
+ if (likely(do_quality != CONFIG_BOOLEAN_NO)) {
+ wireless_dev->link = str2ld(procfile_lineword(ff, l, 2), NULL);
+ wireless_dev->level = str2ld(procfile_lineword(ff, l, 3), NULL);
+ wireless_dev->noise = str2ld(procfile_lineword(ff, l, 4), NULL);
+
+ if (unlikely(!wireless_dev->st_link)) {
+ wireless_dev->st_link = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_link,
+ NULL,
+ wireless_dev->name,
+ "wireless.link_quality",
+ "Overall quality of the link. This is an aggregate value, and depends on the driver and hardware.",
+ "value",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 1,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_link, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_link = rrddim_add(wireless_dev->st_link, "link_quality", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_link);
+ }
+
+ if (unlikely(!wireless_dev->st_level)) {
+ wireless_dev->st_level = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_level,
+ NULL,
+ wireless_dev->name,
+ "wireless.signal_level",
+ "The signal level is the wireless signal power level received by the wireless client. The closer the value is to 0, the stronger the signal.",
+ "dBm",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 2,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_level, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_level = rrddim_add(wireless_dev->st_level, "signal_level", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_level);
+ }
+
+ if (unlikely(!wireless_dev->st_noise)) {
+ wireless_dev->st_noise = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_noise,
+ NULL,
+ wireless_dev->name,
+ "wireless.noise_level",
+ "The noise level indicates the amount of background noise in your environment. The closer the value to 0, the greater the noise level.",
+ "dBm",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 3,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_noise, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_noise = rrddim_add(wireless_dev->st_noise, "noise_level", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_noise);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_link, wireless_dev->rd_link,
+ (collected_number)wireless_dev->link);
+ rrdset_done(wireless_dev->st_link);
+
+ rrddim_set_by_pointer(wireless_dev->st_level, wireless_dev->rd_level,
+ (collected_number)wireless_dev->level);
+ rrdset_done(wireless_dev->st_level);
+
+ rrddim_set_by_pointer(wireless_dev->st_noise, wireless_dev->rd_noise,
+ (collected_number)wireless_dev->noise);
+ rrdset_done(wireless_dev->st_noise);
+ }
+
+ if (likely(do_discarded_packets)) {
+ wireless_dev->nwid = str2kernel_uint_t(procfile_lineword(ff, l, 5));
+ wireless_dev->crypt = str2kernel_uint_t(procfile_lineword(ff, l, 6));
+ wireless_dev->frag = str2kernel_uint_t(procfile_lineword(ff, l, 7));
+ wireless_dev->retry = str2kernel_uint_t(procfile_lineword(ff, l, 8));
+ wireless_dev->misc = str2kernel_uint_t(procfile_lineword(ff, l, 9));
+
+ if (unlikely(!wireless_dev->st_discarded_packets)) {
+ wireless_dev->st_discarded_packets = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_discarded_packets,
+ NULL,
+ wireless_dev->name,
+ "wireless.discarded_packets",
+ "Packet discarded in the wireless adapter due to \"wireless\" specific problems.",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 4,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(wireless_dev->st_discarded_packets, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_nwid = rrddim_add(wireless_dev->st_discarded_packets, "nwid", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_crypt = rrddim_add(wireless_dev->st_discarded_packets, "crypt", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_frag = rrddim_add(wireless_dev->st_discarded_packets, "frag", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_retry = rrddim_add(wireless_dev->st_discarded_packets, "retry", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_misc = rrddim_add(wireless_dev->st_discarded_packets, "misc", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ rrdset_next(wireless_dev->st_discarded_packets);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_nwid,
+ (collected_number)wireless_dev->nwid);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_crypt,
+ (collected_number)wireless_dev->crypt);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_frag,
+ (collected_number)wireless_dev->frag);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_retry,
+ (collected_number)wireless_dev->retry);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_misc,
+ (collected_number)wireless_dev->misc);
+
+ rrdset_done(wireless_dev->st_discarded_packets);
+ }
+
+ if (likely(do_beacon)) {
+ wireless_dev->missed_beacon = str2kernel_uint_t(procfile_lineword(ff, l, 10));
+
+ if (unlikely(!wireless_dev->st_missed_beacon)) {
+ wireless_dev->st_missed_beacon = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_missed_beacon,
+ NULL,
+ wireless_dev->name,
+ "wireless.missed_beacons",
+ "Number of missed beacons.",
+ "frames/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 5,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_missed_beacon, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_missed_beacon = rrddim_add(wireless_dev->st_missed_beacon, "missed_beacons",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ rrdset_next(wireless_dev->st_missed_beacon);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_missed_beacon, wireless_dev->rd_missed_beacon,
+ (collected_number)wireless_dev->missed_beacon);
+ rrdset_done(wireless_dev->st_missed_beacon);
+ }
+
+ wireless_dev->updated = timestamp;
+ }
+
+ netwireless_cleanup(&timestamp);
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c
new file mode 100644
index 0000000..6b6c6c4
--- /dev/null
+++ b/collectors/proc.plugin/proc_pagetypeinfo.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+// For ULONG_MAX
+#include <limits.h>
+
+#define PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME "/proc/pagetypeinfo"
+#define CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
+
+// Zone struct is pglist_data, in include/linux/mmzone.h
+// MAX_NR_ZONES is from __MAX_NR_ZONE, which is the last value of the enum.
+#define MAX_PAGETYPE_ORDER 11
+
+// Names are in mm/page_alloc.c :: migratetype_names. Max size = 10.
+#define MAX_ZONETYPE_NAME 16
+#define MAX_PAGETYPE_NAME 16
+
+// Defined in include/linux/mmzone.h as __MAX_NR_ZONE (last enum of zone_type)
+#define MAX_ZONETYPE 6
+// Defined in include/linux/mmzone.h as MIGRATE_TYPES (last enum of migratetype)
+#define MAX_PAGETYPE 7
+
+
+//
+// /proc/pagetypeinfo is declared in mm/vmstat.c :: init_mm_internals
+//
+
+// One line of /proc/pagetypeinfo
+struct pageline {
+ int node;
+ char *zone;
+ char *type;
+ int line;
+ uint64_t free_pages_size[MAX_PAGETYPE_ORDER];
+ RRDDIM *rd[MAX_PAGETYPE_ORDER];
+};
+
+// Sum of all orders
+struct systemorder {
+ uint64_t size;
+ RRDDIM *rd;
+};
+
+
+static inline uint64_t pageline_total_count(struct pageline *p) {
+ uint64_t sum = 0, o;
+ for (o=0; o<MAX_PAGETYPE_ORDER; o++)
+ sum += p->free_pages_size[o];
+ return sum;
+}
+
+// Check if a line of /proc/pagetypeinfo is valid to use
+// Free block lines starts by "Node" && 4th col is "type"
+#define pagetypeinfo_line_valid(ff, l) (strncmp(procfile_lineword(ff, l, 0), "Node", 4) == 0 && strncmp(procfile_lineword(ff, l, 4), "type", 4) == 0)
+
+// Dimension name from the order
+#define dim_name(s, o, pagesize) (snprintfz(s, 16,"%ldKB (%lu)", (1 << o) * pagesize / 1024, o))
+
+int do_proc_pagetypeinfo(int update_every, usec_t dt) {
+ (void)dt;
+
+ // Config
+ static int do_global, do_detail;
+ static SIMPLE_PATTERN *filter_types = NULL;
+
+ // Counters from parsing the file, that doesn't change after boot
+ static struct systemorder systemorders[MAX_PAGETYPE_ORDER] = {};
+ static struct pageline* pagelines = NULL;
+ static long pagesize = 0;
+ static size_t pageorders_cnt = 0, pagelines_cnt = 0, ff_lines = 0;
+
+ // Handle
+ static procfile *ff = NULL;
+ static char ff_path[FILENAME_MAX + 1];
+
+ // RRD Sets
+ static RRDSET *st_order = NULL;
+ static RRDSET **st_nodezonetype = NULL;
+
+ // Local temp variables
+ size_t l, o, p;
+ struct pageline *pgl = NULL;
+
+ // --------------------------------------------------------------------
+ // Startup: Init arch and open /proc/pagetypeinfo
+ if (unlikely(!pagesize)) {
+ pagesize = sysconf(_SC_PAGESIZE);
+ }
+
+ if(unlikely(!ff)) {
+ snprintfz(ff_path, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME);
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "filename to monitor", ff_path), " \t:", PROCFILE_FLAG_DEFAULT);
+
+ if(unlikely(!ff)) {
+ strncpyz(ff_path, PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME, FILENAME_MAX);
+ ff = procfile_open(PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME, " \t,", PROCFILE_FLAG_DEFAULT);
+ }
+ }
+ if(unlikely(!ff))
+ return 1;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ // --------------------------------------------------------------------
+ // Init: find how many Nodes, Zones and Types
+ if(unlikely(pagelines_cnt == 0)) {
+ size_t nodenumlast = -1;
+ char *zonenamelast = NULL;
+
+ ff_lines = procfile_lines(ff);
+ if(unlikely(!ff_lines)) {
+ error("PLUGIN: PROC_PAGETYPEINFO: Cannot read %s, zero lines reported.", ff_path);
+ return 1;
+ }
+
+ // Configuration
+ do_global = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "enable system summary", CONFIG_BOOLEAN_YES);
+ do_detail = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "enable detail per-type", CONFIG_BOOLEAN_AUTO);
+ filter_types = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "hide charts id matching", "")
+ , NULL
+ , SIMPLE_PATTERN_SUFFIX
+ );
+
+ pagelines_cnt = 0;
+
+ // Pass 1: how many lines would be valid
+ for (l = 4; l < ff_lines; l++) {
+ if (!pagetypeinfo_line_valid(ff, l))
+ continue;
+
+ pagelines_cnt++;
+ }
+ if (pagelines_cnt == 0) {
+ error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse any valid line in %s", ff_path);
+ return 1;
+ }
+
+ // 4th line is the "Free pages count per migrate type at order". Just substract these 8 words.
+ pageorders_cnt = procfile_linewords(ff, 3);
+ if (pageorders_cnt < 9) {
+ error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse Line 4 of %s", ff_path);
+ return 1;
+ }
+
+ pageorders_cnt -= 9;
+
+ if (pageorders_cnt > MAX_PAGETYPE_ORDER) {
+ error("PLUGIN: PROC_PAGETYPEINFO: pageorder found (%lu) is higher than max %d", pageorders_cnt, MAX_PAGETYPE_ORDER);
+ return 1;
+ }
+
+ // Init pagelines from scanned lines
+ if (!pagelines) {
+ pagelines = callocz(pagelines_cnt, sizeof(struct pageline));
+ if (!pagelines) {
+ error("PLUGIN: PROC_PAGETYPEINFO: Cannot allocate %lu pagelines of %lu B", pagelines_cnt, sizeof(struct pageline));
+ return 1;
+ }
+ }
+
+ // Pass 2: Scan the file again, with details
+ p = 0;
+ for (l=4; l < ff_lines; l++) {
+
+ if (!pagetypeinfo_line_valid(ff, l))
+ continue;
+
+ size_t nodenum = strtoul(procfile_lineword(ff, l, 1), NULL, 10);
+ char *zonename = procfile_lineword(ff, l, 3);
+ char *typename = procfile_lineword(ff, l, 5);
+
+ // We changed node or zone
+ if (nodenum != nodenumlast || !zonenamelast || strncmp(zonename, zonenamelast, 6) != 0) {
+ zonenamelast = zonename;
+ }
+
+ // Populate the line
+ pgl = &pagelines[p];
+
+ pgl->line = l;
+ pgl->node = nodenum;
+ pgl->type = typename;
+ pgl->zone = zonename;
+ for (o = 0; o < pageorders_cnt; o++)
+ pgl->free_pages_size[o] = str2uint64_t(procfile_lineword(ff, l, o+6)) * 1 << o;
+
+ p++;
+ }
+
+ // Init the RRD graphs
+
+ // Per-Order: sum of all node, zone, type Grouped by order
+ if (do_global != CONFIG_BOOLEAN_NO) {
+ st_order = rrdset_create_localhost(
+ "mem"
+ , "pagetype_global"
+ , NULL
+ , "pagetype"
+ , NULL
+ , "System orders available"
+ , "B"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_PAGEFRAG
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ for (o = 0; o < pageorders_cnt; o++) {
+ char id[3+1];
+ snprintfz(id, 3, "%lu", o);
+
+ char name[20+1];
+ dim_name(name, o, pagesize);
+
+ systemorders[o].rd = rrddim_add(st_order, id, name, pagesize, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+
+
+ // Per-Numa Node & Zone & Type (full detail). Only if sum(line) > 0
+ st_nodezonetype = callocz(pagelines_cnt, sizeof(RRDSET *));
+ for (p = 0; p < pagelines_cnt; p++) {
+ pgl = &pagelines[p];
+
+ // Skip invalid, refused or empty pagelines if not explicitely requested
+ if (!pgl
+ || do_detail == CONFIG_BOOLEAN_NO
+ || (do_detail == CONFIG_BOOLEAN_AUTO && pageline_total_count(pgl) == 0 && netdata_zero_metrics_enabled != CONFIG_BOOLEAN_YES))
+ continue;
+
+ // "pagetype Node" + NUMA-NodeId + ZoneName + TypeName
+ char setid[13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME+1];
+ snprintfz(setid, 13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type);
+
+ // Skip explicitely refused charts
+ if (simple_pattern_matches(filter_types, setid))
+ continue;
+
+ // "Node" + NUMA-NodeID + ZoneName + TypeName
+ char setname[4+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME +1];
+ snprintfz(setname, MAX_ZONETYPE_NAME + MAX_PAGETYPE_NAME, "Node %d %s %s",
+ pgl->node, pgl->zone, pgl->type);
+
+ st_nodezonetype[p] = rrdset_create_localhost(
+ "mem"
+ , setid
+ , NULL
+ , "pagetype"
+ , NULL
+ , setname
+ , "B"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_PAGEFRAG + 1 + p
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ for (o = 0; o < pageorders_cnt; o++) {
+ char dimid[3+1];
+ snprintfz(dimid, 3, "%lu", o);
+ char dimname[20+1];
+ dim_name(dimname, o, pagesize);
+
+ pgl->rd[o] = rrddim_add(st_nodezonetype[p], dimid, dimname, pagesize, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+ // Update pagelines
+
+ // Process each line
+ p = 0;
+ for (l=4; l<ff_lines; l++) {
+
+ if (!pagetypeinfo_line_valid(ff, l))
+ continue;
+
+ size_t words = procfile_linewords(ff, l);
+
+ if (words != 7+pageorders_cnt) {
+ error("PLUGIN: PROC_PAGETYPEINFO: Unable to read line %lu, %lu words found instead of %lu", l+1, words, 7+pageorders_cnt);
+ break;
+ }
+
+ for (o = 0; o < pageorders_cnt; o++) {
+ // Reset counter
+ if (p == 0)
+ systemorders[o].size = 0;
+
+ // Update orders of the current line
+ pagelines[p].free_pages_size[o] = str2uint64_t(procfile_lineword(ff, l, o+6)) * 1 << o;
+
+ // Update sum by order
+ systemorders[o].size += pagelines[p].free_pages_size[o];
+ }
+
+ p++;
+ }
+
+ // --------------------------------------------------------------------
+ // update RRD values
+
+ // Global system per order
+ if (st_order) {
+ rrdset_next(st_order);
+ for (o = 0; o < pageorders_cnt; o++) {
+ rrddim_set_by_pointer(st_order, systemorders[o].rd, systemorders[o].size);
+ }
+ rrdset_done(st_order);
+ }
+
+ // Per Node-Zone-Type
+ if (do_detail) {
+ for (p = 0; p < pagelines_cnt; p++) {
+ // Skip empty graphs
+ if (!st_nodezonetype[p])
+ continue;
+
+ rrdset_next(st_nodezonetype[p]);
+ for (o = 0; o < pageorders_cnt; o++)
+ rrddim_set_by_pointer(st_nodezonetype[p], pagelines[p].rd[o], pagelines[p].free_pages_size[o]);
+
+ rrdset_done(st_nodezonetype[p]);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_pressure.c b/collectors/proc.plugin/proc_pressure.c
new file mode 100644
index 0000000..4a40b4a
--- /dev/null
+++ b/collectors/proc.plugin/proc_pressure.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_PRESSURE_NAME "/proc/pressure"
+#define CONFIG_SECTION_PLUGIN_PROC_PRESSURE "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_PRESSURE_NAME
+
+// linux calculates this every 2 seconds, see kernel/sched/psi.c PSI_FREQ
+#define MIN_PRESSURE_UPDATE_EVERY 2
+
+
+static struct pressure resources[PRESSURE_NUM_RESOURCES] = {
+ {
+ .some = { .id = "cpu_pressure", .title = "CPU Pressure" },
+ },
+ {
+ .some = { .id = "memory_some_pressure", .title = "Memory Pressure" },
+ .full = { .id = "memory_full_pressure", .title = "Memory Full Pressure" },
+ },
+ {
+ .some = { .id = "io_some_pressure", .title = "I/O Pressure" },
+ .full = { .id = "io_full_pressure", .title = "I/O Full Pressure" },
+ },
+};
+
+static struct {
+ procfile *pf;
+ const char *name; // metric file name
+ const char *family; // webui section name
+ int section_priority;
+} resource_info[PRESSURE_NUM_RESOURCES] = {
+ { .name = "cpu", .family = "cpu", .section_priority = NETDATA_CHART_PRIO_SYSTEM_CPU },
+ { .name = "memory", .family = "ram", .section_priority = NETDATA_CHART_PRIO_SYSTEM_RAM },
+ { .name = "io", .family = "disk", .section_priority = NETDATA_CHART_PRIO_SYSTEM_IO },
+};
+
+void update_pressure_chart(struct pressure_chart *chart) {
+ rrddim_set_by_pointer(chart->st, chart->rd10, (collected_number)(chart->value10 * 100));
+ rrddim_set_by_pointer(chart->st, chart->rd60, (collected_number) (chart->value60 * 100));
+ rrddim_set_by_pointer(chart->st, chart->rd300, (collected_number) (chart->value300 * 100));
+
+ rrdset_done(chart->st);
+}
+
+int do_proc_pressure(int update_every, usec_t dt) {
+ int fail_count = 0;
+ int i;
+
+ static usec_t next_pressure_dt = 0;
+ static char *base_path = NULL;
+
+ update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every;
+
+ if (next_pressure_dt <= dt) {
+ next_pressure_dt = update_every * USEC_PER_SEC;
+ } else {
+ next_pressure_dt -= dt;
+ return 0;
+ }
+
+ if (unlikely(!base_path)) {
+ base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure");
+ }
+
+ for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) {
+ procfile *ff = resource_info[i].pf;
+ int do_some = resources[i].some.enabled, do_full = resources[i].full.enabled;
+
+ if (unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ char config_key[CONFIG_MAX_NAME + 1];
+
+ snprintfz(filename
+ , FILENAME_MAX
+ , "%s%s/%s"
+ , netdata_configured_host_prefix
+ , base_path
+ , resource_info[i].name);
+
+ snprintfz(config_key, CONFIG_MAX_NAME, "enable %s some pressure", resource_info[i].name);
+ do_some = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, CONFIG_BOOLEAN_YES);
+ resources[i].some.enabled = do_some;
+ if (resources[i].full.id) {
+ snprintfz(config_key, CONFIG_MAX_NAME, "enable %s full pressure", resource_info[i].name);
+ do_full = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, CONFIG_BOOLEAN_YES);
+ resources[i].full.enabled = do_full;
+ }
+
+ ff = procfile_open(filename, " =", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) {
+ error("Cannot read pressure information from %s.", filename);
+ fail_count++;
+ continue;
+ }
+ }
+
+ ff = procfile_readall(ff);
+ resource_info[i].pf = ff;
+ if (unlikely(!ff)) {
+ fail_count++;
+ continue;
+ }
+
+ size_t lines = procfile_lines(ff);
+ if (unlikely(lines < 1)) {
+ error("%s has no lines.", procfile_filename(ff));
+ fail_count++;
+ continue;
+ }
+
+ struct pressure_chart *chart;
+ if (do_some) {
+ chart = &resources[i].some;
+ if (unlikely(!chart->st)) {
+ chart->st = rrdset_create_localhost(
+ "system"
+ , chart->id
+ , NULL
+ , resource_info[i].family
+ , NULL
+ , chart->title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PRESSURE_NAME
+ , resource_info[i].section_priority + 40
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ chart->rd10 = rrddim_add(chart->st, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd60 = rrddim_add(chart->st, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd300 = rrddim_add(chart->st, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(chart->st);
+ }
+
+ chart->value10 = strtod(procfile_lineword(ff, 0, 2), NULL);
+ chart->value60 = strtod(procfile_lineword(ff, 0, 4), NULL);
+ chart->value300 = strtod(procfile_lineword(ff, 0, 6), NULL);
+ update_pressure_chart(chart);
+ }
+
+ if (do_full && lines > 2) {
+ chart = &resources[i].full;
+ if (unlikely(!chart->st)) {
+ chart->st = rrdset_create_localhost(
+ "system"
+ , chart->id
+ , NULL
+ , resource_info[i].family
+ , NULL
+ , chart->title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PRESSURE_NAME
+ , resource_info[i].section_priority + 45
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ chart->rd10 = rrddim_add(chart->st, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd60 = rrddim_add(chart->st, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd300 = rrddim_add(chart->st, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(chart->st);
+ }
+
+ chart->value10 = strtod(procfile_lineword(ff, 1, 2), NULL);
+ chart->value60 = strtod(procfile_lineword(ff, 1, 4), NULL);
+ chart->value300 = strtod(procfile_lineword(ff, 1, 6), NULL);
+ update_pressure_chart(chart);
+ }
+ }
+
+ if (PRESSURE_NUM_RESOURCES == fail_count) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_pressure.h b/collectors/proc.plugin/proc_pressure.h
new file mode 100644
index 0000000..3330218
--- /dev/null
+++ b/collectors/proc.plugin/proc_pressure.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROC_PRESSURE_H
+#define NETDATA_PROC_PRESSURE_H
+
+#define PRESSURE_NUM_RESOURCES 3
+
+struct pressure {
+ int updated;
+ char *filename;
+
+ struct pressure_chart {
+ int enabled;
+
+ const char *id;
+ const char *title;
+
+ double value10;
+ double value60;
+ double value300;
+
+ RRDSET *st;
+ RRDDIM *rd10;
+ RRDDIM *rd60;
+ RRDDIM *rd300;
+ } some, full;
+};
+
+extern void update_pressure_chart(struct pressure_chart *chart);
+
+#endif //NETDATA_PROC_PRESSURE_H
diff --git a/collectors/proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c
new file mode 100644
index 0000000..3f17ccc
--- /dev/null
+++ b/collectors/proc.plugin/proc_self_mountinfo.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+// ----------------------------------------------------------------------------
+// taken from gnulib/mountlist.c
+
+#ifndef ME_REMOTE
+/* A file system is "remote" if its Fs_name contains a ':'
+ or if (it is of type (smbfs or cifs) and its Fs_name starts with '//')
+ or Fs_name is equal to "-hosts" (used by autofs to mount remote fs). */
+# define ME_REMOTE(Fs_name, Fs_type) \
+ (strchr (Fs_name, ':') != NULL \
+ || ((Fs_name)[0] == '/' \
+ && (Fs_name)[1] == '/' \
+ && (strcmp (Fs_type, "smbfs") == 0 \
+ || strcmp (Fs_type, "cifs") == 0)) \
+ || (strcmp("-hosts", Fs_name) == 0))
+#endif
+
+#define ME_DUMMY_0(Fs_name, Fs_type) \
+ (strcmp (Fs_type, "autofs") == 0 \
+ || strcmp (Fs_type, "proc") == 0 \
+ || strcmp (Fs_type, "subfs") == 0 \
+ /* for Linux 2.6/3.x */ \
+ || strcmp (Fs_type, "debugfs") == 0 \
+ || strcmp (Fs_type, "devpts") == 0 \
+ || strcmp (Fs_type, "fusectl") == 0 \
+ || strcmp (Fs_type, "mqueue") == 0 \
+ || strcmp (Fs_type, "rpc_pipefs") == 0 \
+ || strcmp (Fs_type, "sysfs") == 0 \
+ /* FreeBSD, Linux 2.4 */ \
+ || strcmp (Fs_type, "devfs") == 0 \
+ /* for NetBSD 3.0 */ \
+ || strcmp (Fs_type, "kernfs") == 0 \
+ /* for Irix 6.5 */ \
+ || strcmp (Fs_type, "ignore") == 0)
+
+/* Historically, we have marked as "dummy" any file system of type "none",
+ but now that programs like du need to know about bind-mounted directories,
+ we grant an exception to any with "bind" in its list of mount options.
+ I.e., those are *not* dummy entries. */
+# define ME_DUMMY(Fs_name, Fs_type) \
+ (ME_DUMMY_0 (Fs_name, Fs_type) || strcmp (Fs_type, "none") == 0)
+
+// ----------------------------------------------------------------------------
+
+// find the mount info with the given major:minor
+// in the supplied linked list of mountinfo structures
+struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor) {
+ struct mountinfo *mi;
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->major == major && mi->minor == minor))
+ return mi;
+
+ return NULL;
+}
+
+// find the mount info with the given filesystem and mount_source
+// in the supplied linked list of mountinfo structures
+struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source) {
+ struct mountinfo *mi;
+ uint32_t filesystem_hash = simple_hash(filesystem), mount_source_hash = simple_hash(mount_source);
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->filesystem
+ && mi->mount_source
+ && mi->filesystem_hash == filesystem_hash
+ && mi->mount_source_hash == mount_source_hash
+ && !strcmp(mi->filesystem, filesystem)
+ && !strcmp(mi->mount_source, mount_source)))
+ return mi;
+
+ return NULL;
+}
+
+struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options) {
+ struct mountinfo *mi;
+ uint32_t filesystem_hash = simple_hash(filesystem);
+
+ size_t solen = strlen(super_options);
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->filesystem
+ && mi->super_options
+ && mi->filesystem_hash == filesystem_hash
+ && !strcmp(mi->filesystem, filesystem))) {
+
+ // super_options is a comma separated list
+ char *s = mi->super_options, *e;
+ while(*s) {
+ e = s + 1;
+ while(*e && *e != ',') e++;
+
+ size_t len = e - s;
+ if(unlikely(len == solen && !strncmp(s, super_options, len)))
+ return mi;
+
+ if(*e == ',') s = ++e;
+ else s = e;
+ }
+ }
+
+ return NULL;
+}
+
+static void mountinfo_free(struct mountinfo *mi) {
+ freez(mi->root);
+ freez(mi->mount_point);
+ freez(mi->mount_options);
+ freez(mi->persistent_id);
+/*
+ if(mi->optional_fields_count) {
+ int i;
+ for(i = 0; i < mi->optional_fields_count ; i++)
+ free(*mi->optional_fields[i]);
+ }
+ free(mi->optional_fields);
+*/
+ freez(mi->filesystem);
+ freez(mi->mount_source);
+ freez(mi->super_options);
+ freez(mi);
+}
+
+// free a linked list of mountinfo structures
+void mountinfo_free_all(struct mountinfo *mi) {
+ while(mi) {
+ struct mountinfo *t = mi;
+ mi = mi->next;
+
+ mountinfo_free(t);
+ }
+}
+
+static char *strdupz_decoding_octal(const char *string) {
+ char *buffer = strdupz(string);
+
+ char *d = buffer;
+ const char *s = string;
+
+ while(*s) {
+ if(unlikely(*s == '\\')) {
+ s++;
+ if(likely(isdigit(*s) && isdigit(s[1]) && isdigit(s[2]))) {
+ char c = *s++ - '0';
+ c <<= 3;
+ c |= *s++ - '0';
+ c <<= 3;
+ c |= *s++ - '0';
+ *d++ = c;
+ }
+ else *d++ = '_';
+ }
+ else *d++ = *s++;
+ }
+ *d = '\0';
+
+ return buffer;
+}
+
+static inline int is_read_only(const char *s) {
+ if(!s) return 0;
+
+ size_t len = strlen(s);
+ if(len < 2) return 0;
+ if(len == 2) {
+ if(!strcmp(s, "ro")) return 1;
+ return 0;
+ }
+ if(!strncmp(s, "ro,", 3)) return 1;
+ if(!strncmp(&s[len - 3], ",ro", 3)) return 1;
+ if(strstr(s, ",ro,")) return 1;
+ return 0;
+}
+
+// read the whole mountinfo into a linked list
+struct mountinfo *mountinfo_read(int do_statvfs) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return NULL;
+
+ struct mountinfo *root = NULL, *last = NULL, *mi = NULL;
+
+ unsigned long l, lines = procfile_lines(ff);
+ for(l = 0; l < lines ;l++) {
+ if(unlikely(procfile_linewords(ff, l) < 5))
+ continue;
+
+ mi = mallocz(sizeof(struct mountinfo));
+
+ unsigned long w = 0;
+ mi->id = str2ul(procfile_lineword(ff, l, w)); w++;
+ mi->parentid = str2ul(procfile_lineword(ff, l, w)); w++;
+
+ char *major = procfile_lineword(ff, l, w), *minor; w++;
+ for(minor = major; *minor && *minor != ':' ;minor++) ;
+
+ if(unlikely(!*minor)) {
+ error("Cannot parse major:minor on '%s' at line %lu of '%s'", major, l + 1, filename);
+ freez(mi);
+ continue;
+ }
+
+ *minor = '\0';
+ minor++;
+
+ mi->flags = 0;
+
+ mi->major = str2ul(major);
+ mi->minor = str2ul(minor);
+
+ mi->root = strdupz(procfile_lineword(ff, l, w)); w++;
+ mi->root_hash = simple_hash(mi->root);
+
+ mi->mount_point = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
+ mi->mount_point_hash = simple_hash(mi->mount_point);
+
+ mi->persistent_id = strdupz(mi->mount_point);
+ netdata_fix_chart_id(mi->persistent_id);
+ mi->persistent_id_hash = simple_hash(mi->persistent_id);
+
+ mi->mount_options = strdupz(procfile_lineword(ff, l, w)); w++;
+
+ if(unlikely(is_read_only(mi->mount_options)))
+ mi->flags |= MOUNTINFO_READONLY;
+
+ // count the optional fields
+/*
+ unsigned long wo = w;
+*/
+ mi->optional_fields_count = 0;
+ char *s = procfile_lineword(ff, l, w);
+ while(*s && *s != '-') {
+ w++;
+ s = procfile_lineword(ff, l, w);
+ mi->optional_fields_count++;
+ }
+
+/*
+ if(unlikely(mi->optional_fields_count)) {
+ // we have some optional fields
+ // read them into a new array of pointers;
+
+ mi->optional_fields = mallocz(mi->optional_fields_count * sizeof(char *));
+
+ int i;
+ for(i = 0; i < mi->optional_fields_count ; i++) {
+ *mi->optional_fields[wo] = strdupz(procfile_lineword(ff, l, w));
+ wo++;
+ }
+ }
+ else
+ mi->optional_fields = NULL;
+*/
+
+ if(likely(*s == '-')) {
+ w++;
+
+ mi->filesystem = strdupz(procfile_lineword(ff, l, w)); w++;
+ mi->filesystem_hash = simple_hash(mi->filesystem);
+
+ mi->mount_source = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
+ mi->mount_source_hash = simple_hash(mi->mount_source);
+
+ mi->super_options = strdupz(procfile_lineword(ff, l, w)); w++;
+
+ if(unlikely(is_read_only(mi->super_options)))
+ mi->flags |= MOUNTINFO_READONLY;
+
+ if(unlikely(ME_DUMMY(mi->mount_source, mi->filesystem)))
+ mi->flags |= MOUNTINFO_IS_DUMMY;
+
+ if(unlikely(ME_REMOTE(mi->mount_source, mi->filesystem)))
+ mi->flags |= MOUNTINFO_IS_REMOTE;
+
+ // mark as BIND the duplicates (i.e. same filesystem + same source)
+ if(do_statvfs) {
+ struct stat buf;
+ if(unlikely(stat(mi->mount_point, &buf) == -1)) {
+ mi->st_dev = 0;
+ mi->flags |= MOUNTINFO_NO_STAT;
+ }
+ else {
+ mi->st_dev = buf.st_dev;
+
+ struct mountinfo *mt;
+ for(mt = root; mt; mt = mt->next) {
+ if(unlikely(mt->st_dev == mi->st_dev && !(mt->flags & MOUNTINFO_IS_SAME_DEV))) {
+ if(strlen(mi->mount_point) < strlen(mt->mount_point))
+ mt->flags |= MOUNTINFO_IS_SAME_DEV;
+ else
+ mi->flags |= MOUNTINFO_IS_SAME_DEV;
+ }
+ }
+ }
+ }
+ else {
+ mi->st_dev = 0;
+ }
+ }
+ else {
+ mi->filesystem = NULL;
+ mi->filesystem_hash = 0;
+
+ mi->mount_source = NULL;
+ mi->mount_source_hash = 0;
+
+ mi->super_options = NULL;
+
+ mi->st_dev = 0;
+ }
+
+ // check if it has size
+ if(do_statvfs && !(mi->flags & MOUNTINFO_IS_DUMMY)) {
+ struct statvfs buff_statvfs;
+ if(unlikely(statvfs(mi->mount_point, &buff_statvfs) < 0)) {
+ mi->flags |= MOUNTINFO_NO_STAT;
+ }
+ else if(unlikely(!buff_statvfs.f_blocks /* || !buff_statvfs.f_files */)) {
+ mi->flags |= MOUNTINFO_NO_SIZE;
+ }
+ }
+
+ // link it
+ if(unlikely(!root))
+ root = mi;
+ else
+ last->next = mi;
+
+ last = mi;
+ mi->next = NULL;
+
+/*
+#ifdef NETDATA_INTERNAL_CHECKS
+ fprintf(stderr, "MOUNTINFO: %ld %ld %lu:%lu root '%s', persistent id '%s', mount point '%s', mount options '%s', filesystem '%s', mount source '%s', super options '%s'%s%s%s%s%s%s\n",
+ mi->id,
+ mi->parentid,
+ mi->major,
+ mi->minor,
+ mi->root,
+ mi->persistent_id,
+ (mi->mount_point)?mi->mount_point:"",
+ (mi->mount_options)?mi->mount_options:"",
+ (mi->filesystem)?mi->filesystem:"",
+ (mi->mount_source)?mi->mount_source:"",
+ (mi->super_options)?mi->super_options:"",
+ (mi->flags & MOUNTINFO_IS_DUMMY)?" DUMMY":"",
+ (mi->flags & MOUNTINFO_IS_BIND)?" BIND":"",
+ (mi->flags & MOUNTINFO_IS_REMOTE)?" REMOTE":"",
+ (mi->flags & MOUNTINFO_NO_STAT)?" NOSTAT":"",
+ (mi->flags & MOUNTINFO_NO_SIZE)?" NOSIZE":"",
+ (mi->flags & MOUNTINFO_IS_SAME_DEV)?" SAMEDEV":""
+ );
+#endif
+*/
+ }
+
+/* find if the mount options have "bind" in them
+ {
+ FILE *fp = setmntent(MOUNTED, "r");
+ if (fp != NULL) {
+ struct mntent mntbuf;
+ struct mntent *mnt;
+ char buf[4096 + 1];
+
+ while ((mnt = getmntent_r(fp, &mntbuf, buf, 4096))) {
+ char *bind = hasmntopt(mnt, "bind");
+ if(unlikely(bind)) {
+ struct mountinfo *mi;
+ for(mi = root; mi ; mi = mi->next) {
+ if(unlikely(strcmp(mnt->mnt_dir, mi->mount_point) == 0)) {
+ fprintf(stderr, "Mount point '%s' is BIND\n", mi->mount_point);
+ mi->flags |= MOUNTINFO_IS_BIND;
+ break;
+ }
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(!mi)) {
+ error("Mount point '%s' not found in /proc/self/mountinfo", mnt->mnt_dir);
+ }
+#endif
+ }
+ }
+ endmntent(fp);
+ }
+ }
+*/
+
+ procfile_close(ff);
+ return root;
+}
diff --git a/collectors/proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h
new file mode 100644
index 0000000..15d63c7
--- /dev/null
+++ b/collectors/proc.plugin/proc_self_mountinfo.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROC_SELF_MOUNTINFO_H
+#define NETDATA_PROC_SELF_MOUNTINFO_H 1
+
+#define MOUNTINFO_IS_DUMMY 0x00000001
+#define MOUNTINFO_IS_REMOTE 0x00000002
+#define MOUNTINFO_IS_BIND 0x00000004
+#define MOUNTINFO_IS_SAME_DEV 0x00000008
+#define MOUNTINFO_NO_STAT 0x00000010
+#define MOUNTINFO_NO_SIZE 0x00000020
+#define MOUNTINFO_READONLY 0x00000040
+
+struct mountinfo {
+ long id; // mount ID: unique identifier of the mount (may be reused after umount(2)).
+ long parentid; // parent ID: ID of parent mount (or of self for the top of the mount tree).
+ unsigned long major; // major:minor: value of st_dev for files on filesystem (see stat(2)).
+ unsigned long minor;
+
+ char *persistent_id; // a calculated persistent id for the mount point
+ uint32_t persistent_id_hash;
+
+ char *root; // root: root of the mount within the filesystem.
+ uint32_t root_hash;
+
+ char *mount_point; // mount point: mount point relative to the process's root.
+ uint32_t mount_point_hash;
+
+ char *mount_options; // mount options: per-mount options.
+
+ int optional_fields_count;
+/*
+ char ***optional_fields; // optional fields: zero or more fields of the form "tag[:value]".
+*/
+ char *filesystem; // filesystem type: name of filesystem in the form "type[.subtype]".
+ uint32_t filesystem_hash;
+
+ char *mount_source; // mount source: filesystem-specific information or "none".
+ uint32_t mount_source_hash;
+
+ char *super_options; // super options: per-superblock options.
+
+ uint32_t flags;
+
+ dev_t st_dev; // id of device as given by stat()
+
+ struct mountinfo *next;
+};
+
+extern struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor);
+extern struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source);
+extern struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options);
+
+extern void mountinfo_free_all(struct mountinfo *mi);
+extern struct mountinfo *mountinfo_read(int do_statvfs);
+
+#endif /* NETDATA_PROC_SELF_MOUNTINFO_H */ \ No newline at end of file
diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c
new file mode 100644
index 0000000..d68c69b
--- /dev/null
+++ b/collectors/proc.plugin/proc_softirqs.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_SOFTIRQS_NAME "/proc/softirqs"
+
+#define MAX_INTERRUPT_NAME 50
+
+struct cpu_interrupt {
+ unsigned long long value;
+ RRDDIM *rd;
+};
+
+struct interrupt {
+ int used;
+ char *id;
+ char name[MAX_INTERRUPT_NAME + 1];
+ RRDDIM *rd;
+ unsigned long long total;
+ struct cpu_interrupt cpu[];
+};
+
+// since each interrupt is variable in size
+// we use this to calculate its record size
+#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
+
+// given a base, get a pointer to each record
+#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
+
+static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
+ static struct interrupt *irrs = NULL;
+ static size_t allocated = 0;
+
+ if(unlikely(lines != allocated)) {
+ uint32_t l;
+ int c;
+
+ irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
+
+ // reset all interrupt RRDDIM pointers as any line could have shifted
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->rd = NULL;
+ irr->name[0] = '\0';
+ for(c = 0; c < cpus ;c++)
+ irr->cpu[c].rd = NULL;
+ }
+
+ allocated = lines;
+ }
+
+ return irrs;
+}
+
+int do_proc_softirqs(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
+ struct interrupt *irrs = NULL;
+
+ if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
+ do_per_core = config_get_boolean_ondemand("plugin:proc:/proc/softirqs", "interrupts per core", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/softirqs");
+ ff = procfile_open(config_get("plugin:proc:/proc/softirqs", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0);
+
+ if(unlikely(!lines)) {
+ error("Cannot read /proc/softirqs, zero lines reported.");
+ return 1;
+ }
+
+ // find how many CPUs are there
+ if(unlikely(cpus == -1)) {
+ uint32_t w;
+ cpus = 0;
+ for(w = 0; w < words ; w++) {
+ if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
+ cpus++;
+ }
+ }
+
+ if(unlikely(!cpus)) {
+ error("PLUGIN: PROC_SOFTIRQS: Cannot find the number of CPUs in /proc/softirqs");
+ return 1;
+ }
+
+ // allocate the size we need;
+ irrs = get_interrupts_array(lines, cpus);
+ irrs[0].used = 0;
+
+ // loop through all lines
+ for(l = 1; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->used = 0;
+ irr->total = 0;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ irr->id = procfile_lineword(ff, l, 0);
+ if(unlikely(!irr->id || !irr->id[0])) continue;
+
+ int c;
+ for(c = 0; c < cpus ;c++) {
+ if(likely((c + 1) < (int)words))
+ irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1)));
+ else
+ irr->cpu[c].value = 0;
+
+ irr->total += irr->cpu[c].value;
+ }
+
+ strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
+
+ irr->used = 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_system_softirqs = NULL;
+ if(unlikely(!st_system_softirqs))
+ st_system_softirqs = rrdset_create_localhost(
+ "system"
+ , "softirqs"
+ , NULL
+ , "softirqs"
+ , NULL
+ , "System softirqs"
+ , "softirqs/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_softirqs);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+
+ if(irr->used && irr->total) {
+ // some interrupt may have changed without changing the total number of lines
+ // if the same number of interrupts have been added and removed between two
+ // calls of this function.
+ if(unlikely(!irr->rd || strncmp(irr->name, irr->rd->name, MAX_INTERRUPT_NAME) != 0)) {
+ irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(st_system_softirqs, irr->rd, irr->name);
+
+ // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ int c;
+ for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
+ }
+ }
+
+ rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total);
+ }
+ }
+
+ rrdset_done(st_system_softirqs);
+
+ // --------------------------------------------------------------------
+
+ if(do_per_core != CONFIG_BOOLEAN_NO) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
+ int c;
+
+ for(c = 0; c < cpus ; c++) {
+ if(unlikely(!core_st[c])) {
+ // find if everything is just zero
+ unsigned long long core_sum = 0;
+
+ for (l = 0; l < lines; l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if (unlikely(!irr->used)) continue;
+ core_sum += irr->cpu[c].value;
+ }
+
+ if (unlikely(core_sum == 0)) continue; // try next core
+
+ char id[50 + 1];
+ snprintfz(id, 50, "cpu%d_softirqs", c);
+
+ char title[100 + 1];
+ snprintfz(title, 100, "CPU%d softirqs", c);
+
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "softirqs"
+ , "cpu.softirqs"
+ , title
+ , "softirqs/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
+ , NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else
+ rrdset_next(core_st[c]);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+
+ if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
+ if(unlikely(!irr->cpu[c].rd)) {
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
+ }
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
+ }
+ }
+
+ rrdset_done(core_st[c]);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
new file mode 100644
index 0000000..32ff36b
--- /dev/null
+++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#include "zfs_common.h"
+
+#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats"
+
+extern struct arcstats arcstats;
+
+int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int show_zero_charts = 0, do_zfs_stats = 0;
+ static procfile *ff = NULL;
+ static char *dirname = NULL;
+ static ARL_BASE *arl_base = NULL;
+
+ arcstats.l2exist = -1;
+
+ if(unlikely(!arl_base)) {
+ arl_base = arl_create("arcstats", NULL, 60);
+
+ arl_expect(arl_base, "hits", &arcstats.hits);
+ arl_expect(arl_base, "misses", &arcstats.misses);
+ arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits);
+ arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses);
+ arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits);
+ arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses);
+ arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits);
+ arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses);
+ arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits);
+ arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses);
+ arl_expect(arl_base, "mru_hits", &arcstats.mru_hits);
+ arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits);
+ arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits);
+ arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits);
+ arl_expect(arl_base, "deleted", &arcstats.deleted);
+ arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss);
+ arl_expect(arl_base, "evict_skip", &arcstats.evict_skip);
+ arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough);
+ arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached);
+ arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible);
+ arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible);
+ arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip);
+ arl_expect(arl_base, "hash_elements", &arcstats.hash_elements);
+ arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max);
+ arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions);
+ arl_expect(arl_base, "hash_chains", &arcstats.hash_chains);
+ arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max);
+ arl_expect(arl_base, "p", &arcstats.p);
+ arl_expect(arl_base, "c", &arcstats.c);
+ arl_expect(arl_base, "c_min", &arcstats.c_min);
+ arl_expect(arl_base, "c_max", &arcstats.c_max);
+ arl_expect(arl_base, "size", &arcstats.size);
+ arl_expect(arl_base, "hdr_size", &arcstats.hdr_size);
+ arl_expect(arl_base, "data_size", &arcstats.data_size);
+ arl_expect(arl_base, "metadata_size", &arcstats.metadata_size);
+ arl_expect(arl_base, "other_size", &arcstats.other_size);
+ arl_expect(arl_base, "anon_size", &arcstats.anon_size);
+ arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data);
+ arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata);
+ arl_expect(arl_base, "mru_size", &arcstats.mru_size);
+ arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data);
+ arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata);
+ arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size);
+ arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data);
+ arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata);
+ arl_expect(arl_base, "mfu_size", &arcstats.mfu_size);
+ arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data);
+ arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata);
+ arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size);
+ arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data);
+ arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata);
+ arl_expect(arl_base, "l2_hits", &arcstats.l2_hits);
+ arl_expect(arl_base, "l2_misses", &arcstats.l2_misses);
+ arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds);
+ arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash);
+ arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes);
+ arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes);
+ arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent);
+ arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done);
+ arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error);
+ arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry);
+ arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry);
+ arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading);
+ arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached);
+ arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write);
+ arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write);
+ arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem);
+ arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad);
+ arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error);
+ arl_expect(arl_base, "l2_size", &arcstats.l2_size);
+ arl_expect(arl_base, "l2_asize", &arcstats.l2_asize);
+ arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size);
+ arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes);
+ arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros);
+ arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures);
+ arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count);
+ arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers);
+ arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size);
+ arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads);
+ arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count);
+ arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count);
+ arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow);
+ arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve);
+ arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes);
+ arl_expect(arl_base, "arc_prune", &arcstats.arc_prune);
+ arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used);
+ arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit);
+ arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max);
+ arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min);
+ arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free);
+ arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS);
+ ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/spl/kstat/zfs");
+ dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename);
+
+ show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO);
+ if(show_zero_charts == CONFIG_BOOLEAN_AUTO && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)
+ show_zero_charts = CONFIG_BOOLEAN_YES;
+ if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES))
+ do_zfs_stats = 1;
+ }
+
+ // check if any pools exist
+ if(likely(!do_zfs_stats)) {
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read directory '%s'", dirname);
+ return 1;
+ }
+
+ struct dirent *de = NULL;
+ while(likely(de = readdir(dir))) {
+ if(likely(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ )))
+ continue;
+
+ if(unlikely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
+ do_zfs_stats = 1;
+ break;
+ }
+ }
+
+ closedir(dir);
+ }
+
+ // do not show ZFS filesystem metrics if there haven't been any pools in the system yet
+ if(unlikely(!do_zfs_stats))
+ return 0;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 3)) {
+ if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words);
+ continue;
+ }
+
+ const char *key = procfile_lineword(ff, l, 0);
+ const char *value = procfile_lineword(ff, l, 2);
+
+ if(unlikely(arcstats.l2exist == -1)) {
+ if(key[0] == 'l' && key[1] == '2' && key[2] == '_')
+ arcstats.l2exist = 1;
+ }
+
+ if(unlikely(arl_check(arl_base, key, value))) break;
+ }
+
+ if(unlikely(arcstats.l2exist == -1))
+ arcstats.l2exist = 0;
+
+ generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
+ generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
new file mode 100644
index 0000000..373a067
--- /dev/null
+++ b/collectors/proc.plugin/proc_stat.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_STAT_NAME "/proc/stat"
+
+struct per_core_single_number_file {
+ unsigned char found:1;
+ const char *filename;
+ int fd;
+ collected_number value;
+ RRDDIM *rd;
+};
+
+struct last_ticks {
+ collected_number frequency;
+ collected_number ticks;
+};
+
+// This is an extension of struct per_core_single_number_file at CPU_FREQ_INDEX.
+// Either scaling_cur_freq or time_in_state file is used at one time.
+struct per_core_time_in_state_file {
+ const char *filename;
+ procfile *ff;
+ size_t last_ticks_len;
+ struct last_ticks *last_ticks;
+};
+
+#define CORE_THROTTLE_COUNT_INDEX 0
+#define PACKAGE_THROTTLE_COUNT_INDEX 1
+#define CPU_FREQ_INDEX 2
+#define PER_CORE_FILES 3
+
+struct cpu_chart {
+ const char *id;
+
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_nice;
+ RRDDIM *rd_system;
+ RRDDIM *rd_idle;
+ RRDDIM *rd_iowait;
+ RRDDIM *rd_irq;
+ RRDDIM *rd_softirq;
+ RRDDIM *rd_steal;
+ RRDDIM *rd_guest;
+ RRDDIM *rd_guest_nice;
+
+ struct per_core_single_number_file files[PER_CORE_FILES];
+
+ struct per_core_time_in_state_file time_in_state_files;
+};
+
+static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+static int keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
+
+static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
+ char buf[50 + 1];
+ size_t x, files_read = 0, files_nonzero = 0;
+
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ f->found = 0;
+
+ if(unlikely(!f->filename))
+ continue;
+
+ if(unlikely(f->fd == -1)) {
+ f->fd = open(f->filename, O_RDONLY);
+ if (unlikely(f->fd == -1)) {
+ error("Cannot open file '%s'", f->filename);
+ continue;
+ }
+ }
+
+ ssize_t ret = read(f->fd, buf, 50);
+ if(unlikely(ret < 0)) {
+ // cannot read that file
+
+ error("Cannot read file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ continue;
+ }
+ else {
+ // successful read
+
+ // terminate the buffer
+ buf[ret] = '\0';
+
+ if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
+ close(f->fd);
+ f->fd = -1;
+ }
+ else if(lseek(f->fd, 0, SEEK_SET) == -1) {
+ error("Cannot seek in file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ }
+ }
+
+ files_read++;
+ f->found = 1;
+
+ f->value = str2ll(buf, NULL);
+ if(likely(f->value != 0))
+ files_nonzero++;
+ }
+
+ if(files_read == 0)
+ return -1;
+
+ if(files_nonzero == 0)
+ return 0;
+
+ return (int)files_nonzero;
+}
+
+static int read_per_core_time_in_state_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
+ size_t x, files_read = 0, files_nonzero = 0;
+
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+ struct per_core_time_in_state_file *tsf = &all_cpu_charts[x].time_in_state_files;
+
+ f->found = 0;
+
+ if(unlikely(!tsf->filename))
+ continue;
+
+ if(unlikely(!tsf->ff)) {
+ tsf->ff = procfile_open(tsf->filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!tsf->ff))
+ {
+ error("Cannot open file '%s'", tsf->filename);
+ continue;
+ }
+ }
+
+ tsf->ff = procfile_readall(tsf->ff);
+ if(unlikely(!tsf->ff)) {
+ error("Cannot read file '%s'", tsf->filename);
+ procfile_close(tsf->ff);
+ tsf->ff = NULL;
+ continue;
+ }
+ else {
+ // successful read
+
+ size_t lines = procfile_lines(tsf->ff), l;
+ size_t words;
+ unsigned long long total_ticks_since_last = 0, avg_freq = 0;
+
+ // Check if there is at least one frequency in time_in_state
+ if (procfile_word(tsf->ff, 0)[0] == '\0') {
+ if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
+ procfile_close(tsf->ff);
+ tsf->ff = NULL;
+ }
+ // TODO: Is there a better way to avoid spikes than calculating the average over
+ // the whole period under schedutil governor?
+ // freez(tsf->last_ticks);
+ // tsf->last_ticks = NULL;
+ // tsf->last_ticks_len = 0;
+ continue;
+ }
+
+ if (unlikely(tsf->last_ticks_len < lines || tsf->last_ticks == NULL)) {
+ tsf->last_ticks = reallocz(tsf->last_ticks, sizeof(struct last_ticks) * lines);
+ memset(tsf->last_ticks, 0, sizeof(struct last_ticks) * lines);
+ tsf->last_ticks_len = lines;
+ }
+
+ f->value = 0;
+
+ for(l = 0; l < lines - 1 ;l++) {
+ unsigned long long frequency = 0, ticks = 0, ticks_since_last = 0;
+
+ words = procfile_linewords(tsf->ff, l);
+ if(unlikely(words < 2)) {
+ error("Cannot read time_in_state line. Expected 2 params, read %zu.", words);
+ continue;
+ }
+ frequency = str2ull(procfile_lineword(tsf->ff, l, 0));
+ ticks = str2ull(procfile_lineword(tsf->ff, l, 1));
+
+ // It is assumed that frequencies are static and sorted
+ ticks_since_last = ticks - tsf->last_ticks[l].ticks;
+ tsf->last_ticks[l].frequency = frequency;
+ tsf->last_ticks[l].ticks = ticks;
+
+ total_ticks_since_last += ticks_since_last;
+ avg_freq += frequency * ticks_since_last;
+
+ }
+
+ if (likely(total_ticks_since_last)) {
+ avg_freq /= total_ticks_since_last;
+ f->value = avg_freq;
+ }
+
+ if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
+ procfile_close(tsf->ff);
+ tsf->ff = NULL;
+ }
+ }
+
+ files_read++;
+
+ f->found = 1;
+
+ if(likely(f->value != 0))
+ files_nonzero++;
+ }
+
+ if(unlikely(files_read == 0))
+ return -1;
+
+ if(unlikely(files_nonzero == 0))
+ return 0;
+
+ return (int)files_nonzero;
+}
+
+static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) {
+ size_t x;
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ if(unlikely(!f->found))
+ continue;
+
+ if(unlikely(!f->rd))
+ f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm);
+
+ rrddim_set_by_pointer(st, f->rd, f->value);
+ }
+}
+
+struct cpuidle_state {
+ char *name;
+
+ char *time_filename;
+ int time_fd;
+
+ collected_number value;
+
+ RRDDIM *rd;
+};
+
+struct per_core_cpuidle_chart {
+ RRDSET *st;
+
+ RRDDIM *active_time_rd;
+ collected_number active_time;
+ collected_number last_active_time;
+
+ struct cpuidle_state *cpuidle_state;
+ size_t cpuidle_state_len;
+ int rescan_cpu_states;
+};
+
+static void* wake_cpu_thread(void* core) {
+ pthread_t thread;
+ cpu_set_t cpu_set;
+ static size_t cpu_wakeups = 0;
+ static int errors = 0;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(*(int*)core, &cpu_set);
+
+ thread = pthread_self();
+ if(unlikely(pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpu_set))) {
+ if(unlikely(errors < 8)) {
+ error("Cannot set CPU affinity for core %d", *(int*)core);
+ errors++;
+ }
+ else if(unlikely(errors < 9)) {
+ error("CPU affinity errors are disabled");
+ errors++;
+ }
+ }
+
+ // Make the CPU core do something to force it to update its idle counters
+ cpu_wakeups++;
+
+ return 0;
+}
+
+static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
+ static size_t cpuidle_charts_len = 0;
+ static procfile *ff = NULL;
+ struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address;
+ size_t cores_found = 0;
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(schedstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ for(l = 0; l < lines ;l++) {
+ char *row_key = procfile_lineword(ff, l, 0);
+
+ // faster strncmp(row_key, "cpu", 3) == 0
+ if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 10)) {
+ error("Cannot read /proc/schedstat cpu line. Expected 9 params, read %zu.", words);
+ return 1;
+ }
+ cores_found++;
+
+ size_t core = str2ul(&row_key[3]);
+ if(unlikely(core >= cores_found)) {
+ error("Core %zu found but no more than %zu cores were expected.", core, cores_found);
+ return 1;
+ }
+
+ if(unlikely(cpuidle_charts_len < cores_found)) {
+ cpuidle_charts = reallocz(cpuidle_charts, sizeof(struct per_core_cpuidle_chart) * cores_found);
+ *cpuidle_charts_address = cpuidle_charts;
+ memset(cpuidle_charts + cpuidle_charts_len, 0, sizeof(struct per_core_cpuidle_chart) * (cores_found - cpuidle_charts_len));
+ cpuidle_charts_len = cores_found;
+ }
+
+ cpuidle_charts[core].active_time = str2ull(procfile_lineword(ff, l, 7)) / 1000;
+ }
+ }
+
+ *schedstat_cores_found = cores_found;
+ return 0;
+}
+
+static int read_one_state(char *buf, const char *filename, int *fd) {
+ ssize_t ret = read(*fd, buf, 50);
+
+ if(unlikely(ret <= 0)) {
+ // cannot read that file
+ error("Cannot read file '%s'", filename);
+ close(*fd);
+ *fd = -1;
+ return 0;
+ }
+ else {
+ // successful read
+
+ // terminate the buffer
+ buf[ret - 1] = '\0';
+
+ if(unlikely(keep_cpuidle_fds_open != CONFIG_BOOLEAN_YES)) {
+ close(*fd);
+ *fd = -1;
+ }
+ else if(lseek(*fd, 0, SEEK_SET) == -1) {
+ error("Cannot seek in file '%s'", filename);
+ close(*fd);
+ *fd = -1;
+ }
+ }
+
+ return 1;
+}
+
+static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
+ char filename[FILENAME_MAX + 1];
+ static char next_state_filename[FILENAME_MAX + 1];
+ struct stat stbuf;
+ struct per_core_cpuidle_chart *cc = &cpuidle_charts[core];
+ size_t state;
+
+ if(unlikely(!cc->cpuidle_state_len || cc->rescan_cpu_states)) {
+ int state_file_found = 1; // check at least one state
+
+ if(cc->cpuidle_state_len) {
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+ freez(cc->cpuidle_state[state].name);
+
+ freez(cc->cpuidle_state[state].time_filename);
+ close(cc->cpuidle_state[state].time_fd);
+ cc->cpuidle_state[state].time_fd = -1;
+ }
+
+ freez(cc->cpuidle_state);
+ cc->cpuidle_state = NULL;
+ cc->cpuidle_state_len = 0;
+
+ cc->active_time_rd = NULL;
+ cc->st = NULL;
+ }
+
+ while(likely(state_file_found)) {
+ snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
+ if (stat(filename, &stbuf) == 0)
+ cc->cpuidle_state_len++;
+ else
+ state_file_found = 0;
+ }
+ snprintfz(next_state_filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
+
+ if(likely(cc->cpuidle_state_len))
+ cc->cpuidle_state = callocz(cc->cpuidle_state_len, sizeof(struct cpuidle_state));
+
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+ char name_buf[50 + 1];
+ snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, state);
+
+ int fd = open(filename, O_RDONLY, 0666);
+ if(unlikely(fd == -1)) {
+ error("Cannot open file '%s'", filename);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ ssize_t r = read(fd, name_buf, 50);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", filename);
+ close(fd);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ name_buf[r - 1] = '\0'; // erase extra character
+ cc->cpuidle_state[state].name = strdupz(trim(name_buf));
+ close(fd);
+
+ snprintfz(filename, FILENAME_MAX, cpuidle_time_filename, core, state);
+ cc->cpuidle_state[state].time_filename = strdupz(filename);
+ cc->cpuidle_state[state].time_fd = -1;
+ }
+
+ cc->rescan_cpu_states = 0;
+ }
+
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+
+ struct cpuidle_state *cs = &cc->cpuidle_state[state];
+
+ if(unlikely(cs->time_fd == -1)) {
+ cs->time_fd = open(cs->time_filename, O_RDONLY);
+ if (unlikely(cs->time_fd == -1)) {
+ error("Cannot open file '%s'", cs->time_filename);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+ }
+
+ char time_buf[50 + 1];
+ if(likely(read_one_state(time_buf, cs->time_filename, &cs->time_fd))) {
+ cs->value = str2ll(time_buf, NULL);
+ }
+ else {
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+ }
+
+ // check if the number of states was increased
+ if(unlikely(stat(next_state_filename, &stbuf) == 0)) {
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+int do_proc_stat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static struct cpu_chart *all_cpu_charts = NULL;
+ static size_t all_cpu_charts_size = 0;
+ static procfile *ff = NULL;
+ static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1,
+ do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1;
+ static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
+ static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
+ *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL;
+ static RRDVAR *cpus_var = NULL;
+ static int accurate_freq_avail = 0, accurate_freq_is_used = 0;
+ size_t cores_found = (size_t)processors;
+
+ if(unlikely(do_cpu == -1)) {
+ do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES);
+ do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES);
+ do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES);
+ do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES);
+ do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES);
+ do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES);
+
+ // give sane defaults based on the number of processors
+ if(unlikely(processors > 50)) {
+ // the system has too many processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_NO;
+ do_core_throttle_count = CONFIG_BOOLEAN_NO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_cpu_freq = CONFIG_BOOLEAN_NO;
+ do_cpuidle = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // the system has a reasonable number of processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+ do_core_throttle_count = CONFIG_BOOLEAN_AUTO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_cpu_freq = CONFIG_BOOLEAN_YES;
+ do_cpuidle = CONFIG_BOOLEAN_YES;
+ }
+ if(unlikely(processors > 24)) {
+ // the system has too many processors
+ keep_cpuidle_fds_open = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // the system has a reasonable number of processors
+ keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
+ }
+
+ keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open);
+ keep_cpuidle_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep cpuidle files open", keep_cpuidle_fds_open);
+ do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count);
+ do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count);
+ do_cpu_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu frequency", do_cpu_freq);
+ do_cpuidle = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu idle states", do_cpuidle);
+
+ hash_intr = simple_hash("intr");
+ hash_ctxt = simple_hash("ctxt");
+ hash_processes = simple_hash("processes");
+ hash_procs_running = simple_hash("procs_running");
+ hash_procs_blocked = simple_hash("procs_blocked");
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count");
+ core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count");
+ package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq");
+ scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/stats/time_in_state");
+ time_in_state_filename = config_get("plugin:proc:/proc/stat", "time_in_state filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/schedstat");
+ schedstat_filename = config_get("plugin:proc:/proc/stat", "schedstat filename to monitor", filename);
+
+ if(do_cpuidle != CONFIG_BOOLEAN_NO) {
+ struct stat stbuf;
+
+ if (stat(schedstat_filename, &stbuf))
+ do_cpuidle = CONFIG_BOOLEAN_NO;
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name");
+ cpuidle_name_filename = config_get("plugin:proc:/proc/stat", "cpuidle name filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time");
+ cpuidle_time_filename = config_get("plugin:proc:/proc/stat", "cpuidle time filename to monitor", filename);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/stat");
+ ff = procfile_open(config_get("plugin:proc:/proc/stat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ unsigned long long processes = 0, running = 0 , blocked = 0;
+
+ for(l = 0; l < lines ;l++) {
+ char *row_key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(row_key);
+
+ // faster strncmp(row_key, "cpu", 3) == 0
+ if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 9)) {
+ error("Cannot read /proc/stat cpu line. Expected 9 params, read %zu.", words);
+ continue;
+ }
+
+ size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1;
+ if(likely(core > 0)) cores_found = core;
+
+ if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) {
+ char *id;
+ unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
+
+ id = row_key;
+ user = str2ull(procfile_lineword(ff, l, 1));
+ nice = str2ull(procfile_lineword(ff, l, 2));
+ system = str2ull(procfile_lineword(ff, l, 3));
+ idle = str2ull(procfile_lineword(ff, l, 4));
+ iowait = str2ull(procfile_lineword(ff, l, 5));
+ irq = str2ull(procfile_lineword(ff, l, 6));
+ softirq = str2ull(procfile_lineword(ff, l, 7));
+ steal = str2ull(procfile_lineword(ff, l, 8));
+
+ guest = str2ull(procfile_lineword(ff, l, 9));
+ user -= guest;
+
+ guest_nice = str2ull(procfile_lineword(ff, l, 10));
+ nice -= guest_nice;
+
+ char *title, *type, *context, *family;
+ long priority;
+
+ if(unlikely(core >= all_cpu_charts_size)) {
+ size_t old_cpu_charts_size = all_cpu_charts_size;
+ all_cpu_charts_size = core + 1;
+ all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size);
+ memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size));
+ }
+ struct cpu_chart *cpu_chart = &all_cpu_charts[core];
+
+ if(unlikely(!cpu_chart->st)) {
+ cpu_chart->id = strdupz(id);
+
+ if(unlikely(core == 0)) {
+ title = "Total CPU utilization";
+ type = "system";
+ context = "system.cpu";
+ family = id;
+ priority = NETDATA_CHART_PRIO_SYSTEM_CPU;
+ }
+ else {
+ title = "Core utilization";
+ type = "cpu";
+ context = "cpu.cpu";
+ family = "utilization";
+ priority = NETDATA_CHART_PRIO_CPU_PER_CORE;
+
+ char filename[FILENAME_MAX + 1];
+ struct stat stbuf;
+
+ if(do_core_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_package_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_cpu_freq != CONFIG_BOOLEAN_NO) {
+
+ snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id);
+
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[CPU_FREQ_INDEX].filename = strdupz(filename);
+ cpu_chart->files[CPU_FREQ_INDEX].fd = -1;
+ do_cpu_freq = CONFIG_BOOLEAN_YES;
+ }
+
+ snprintfz(filename, FILENAME_MAX, time_in_state_filename, id);
+
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->time_in_state_files.filename = strdupz(filename);
+ cpu_chart->time_in_state_files.ff = NULL;
+ do_cpu_freq = CONFIG_BOOLEAN_YES;
+ accurate_freq_avail = 1;
+ }
+ }
+ }
+
+ cpu_chart->st = rrdset_create_localhost(
+ type
+ , id
+ , NULL
+ , family
+ , context
+ , title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , priority + core
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ long multiplier = 1;
+ long divisor = 1; // sysconf(_SC_CLK_TCK);
+
+ cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(cpu_chart->st, "idle");
+
+ if(unlikely(core == 0 && cpus_var == NULL))
+ cpus_var = rrdvar_custom_host_variable_create(localhost, "active_processors");
+ }
+ else rrdset_next(cpu_chart->st);
+
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice);
+ rrdset_done(cpu_chart->st);
+ }
+ }
+ else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) {
+ if(likely(do_interrupts)) {
+ static RRDSET *st_intr = NULL;
+ static RRDDIM *rd_interrupts = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_intr)) {
+ st_intr = rrdset_create_localhost(
+ "system"
+ , "intr"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "CPU Interrupts"
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_INTR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
+
+ rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_intr);
+
+ rrddim_set_by_pointer(st_intr, rd_interrupts, value);
+ rrdset_done(st_intr);
+ }
+ }
+ else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) {
+ if(likely(do_context)) {
+ static RRDSET *st_ctxt = NULL;
+ static RRDDIM *rd_switches = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_ctxt)) {
+ st_ctxt = rrdset_create_localhost(
+ "system"
+ , "ctxt"
+ , NULL
+ , "processes"
+ , NULL
+ , "CPU Context Switches"
+ , "context switches/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_CTXT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ctxt);
+
+ rrddim_set_by_pointer(st_ctxt, rd_switches, value);
+ rrdset_done(st_ctxt);
+ }
+ }
+ else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) {
+ processes = str2ull(procfile_lineword(ff, l, 1));
+ }
+ else if(unlikely(hash == hash_procs_running && !running && strcmp(row_key, "procs_running") == 0)) {
+ running = str2ull(procfile_lineword(ff, l, 1));
+ }
+ else if(unlikely(hash == hash_procs_blocked && !blocked && strcmp(row_key, "procs_blocked") == 0)) {
+ blocked = str2ull(procfile_lineword(ff, l, 1));
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_forks)) {
+ static RRDSET *st_forks = NULL;
+ static RRDDIM *rd_started = NULL;
+
+ if(unlikely(!st_forks)) {
+ st_forks = rrdset_create_localhost(
+ "system"
+ , "forks"
+ , NULL
+ , "processes"
+ , NULL
+ , "Started Processes"
+ , "processes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_FORKS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL);
+
+ rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_forks);
+
+ rrddim_set_by_pointer(st_forks, rd_started, processes);
+ rrdset_done(st_forks);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_processes)) {
+ static RRDSET *st_processes = NULL;
+ static RRDDIM *rd_running = NULL;
+ static RRDDIM *rd_blocked = NULL;
+
+ if(unlikely(!st_processes)) {
+ st_processes = rrdset_create_localhost(
+ "system"
+ , "processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Processes"
+ , "processes"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_PROCESSES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_processes);
+
+ rrddim_set_by_pointer(st_processes, rd_running, running);
+ rrddim_set_by_pointer(st_processes, rd_blocked, blocked);
+ rrdset_done(st_processes);
+ }
+
+ if(likely(all_cpu_charts_size > 1)) {
+ if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_core_throttle_count = NULL;
+
+ if (unlikely(!st_core_throttle_count))
+ st_core_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "core_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.core_throttling"
+ , "Core Thermal Throttling Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_CORE_THROTTLING
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_core_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_core_throttle_count);
+ }
+ }
+
+ if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_package_throttle_count = NULL;
+
+ if(unlikely(!st_package_throttle_count))
+ st_package_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "package_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.package_throttling"
+ , "Package Thermal Throttling Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_PACKAGE_THROTTLING
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_package_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_package_throttle_count);
+ }
+ }
+
+ if(likely(do_cpu_freq != CONFIG_BOOLEAN_NO)) {
+ char filename[FILENAME_MAX + 1];
+ int r = 0;
+
+ if (accurate_freq_avail) {
+ r = read_per_core_time_in_state_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX);
+ if(r > 0 && !accurate_freq_is_used) {
+ accurate_freq_is_used = 1;
+ snprintfz(filename, FILENAME_MAX, time_in_state_filename, "cpu*");
+ info("cpufreq is using %s", filename);
+ }
+ }
+ if (r < 1) {
+ r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX);
+ if(accurate_freq_is_used) {
+ accurate_freq_is_used = 0;
+ snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, "cpu*");
+ info("cpufreq fell back to %s", filename);
+ }
+ }
+
+ if(likely(r != -1 && (do_cpu_freq == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_cpu_freq = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_scaling_cur_freq = NULL;
+
+ if(unlikely(!st_scaling_cur_freq))
+ st_scaling_cur_freq = rrdset_create_localhost(
+ "cpu"
+ , "cpufreq"
+ , NULL
+ , "cpufreq"
+ , "cpufreq.cpufreq"
+ , "Current CPU Frequency"
+ , "MHz"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_scaling_cur_freq);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrdset_done(st_scaling_cur_freq);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static struct per_core_cpuidle_chart *cpuidle_charts = NULL;
+ size_t schedstat_cores_found = 0;
+
+ if(likely(do_cpuidle != CONFIG_BOOLEAN_NO && !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
+ int cpu_states_updated = 0;
+ size_t core, state;
+
+
+ // proc.plugin runs on Linux systems only. Multi-platform compatibility is not needed here,
+ // so bare pthread functions are used to avoid unneeded overheads.
+ for(core = 0; core < schedstat_cores_found; core++) {
+ if(unlikely(!(cpuidle_charts[core].active_time - cpuidle_charts[core].last_active_time))) {
+ pthread_t thread;
+ cpu_set_t global_cpu_set;
+
+ if (likely(!pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &global_cpu_set))) {
+ if (unlikely(!CPU_ISSET(core, &global_cpu_set))) {
+ continue;
+ }
+ }
+ else
+ error("Cannot read current process affinity");
+
+ // These threads are very ephemeral and don't need to have a specific name
+ if(unlikely(pthread_create(&thread, NULL, wake_cpu_thread, (void *)&core)))
+ error("Cannot create wake_cpu_thread");
+ else if(unlikely(pthread_join(thread, NULL)))
+ error("Cannot join wake_cpu_thread");
+ cpu_states_updated = 1;
+ }
+ }
+
+ if(unlikely(!cpu_states_updated || !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
+ for(core = 0; core < schedstat_cores_found; core++) {
+ cpuidle_charts[core].last_active_time = cpuidle_charts[core].active_time;
+
+ int r = read_cpuidle_states(cpuidle_name_filename, cpuidle_time_filename, cpuidle_charts, core);
+ if(likely(r != -1 && (do_cpuidle == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_cpuidle = CONFIG_BOOLEAN_YES;
+
+ char cpuidle_chart_id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(cpuidle_chart_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle", core);
+
+ if(unlikely(!cpuidle_charts[core].st)) {
+ cpuidle_charts[core].st = rrdset_create_localhost(
+ "cpu"
+ , cpuidle_chart_id
+ , NULL
+ , "cpuidle"
+ , "cpuidle.cpuidle"
+ , "C-state residency time"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_CPUIDLE + core
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_active_time", core);
+ cpuidle_charts[core].active_time_rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id, "C0 (active)", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
+ snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle_state%zu_time", core, state);
+ cpuidle_charts[core].cpuidle_state[state].rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id,
+ cpuidle_charts[core].cpuidle_state[state].name,
+ 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ }
+ else
+ rrdset_next(cpuidle_charts[core].st);
+
+ rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].active_time_rd, cpuidle_charts[core].active_time);
+ for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
+ rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].cpuidle_state[state].rd, cpuidle_charts[core].cpuidle_state[state].value);
+ }
+ rrdset_done(cpuidle_charts[core].st);
+ }
+ }
+ }
+ }
+
+ if(cpus_var)
+ rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
new file mode 100644
index 0000000..20d2116
--- /dev/null
+++ b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/kernel/random/entropy_avail");
+ ff = procfile_open(config_get("plugin:proc:/proc/sys/kernel/random/entropy_avail", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0));
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "entropy"
+ , NULL
+ , "entropy"
+ , NULL
+ , "Available Entropy"
+ , "entropy"
+ , PLUGIN_PROC_NAME
+ , "/proc/sys/kernel/random/entropy_avail"
+ , NETDATA_CHART_PRIO_SYSTEM_ENTROPY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, entropy);
+ rrdset_done(st);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c
new file mode 100644
index 0000000..28b00e0
--- /dev/null
+++ b/collectors/proc.plugin/proc_uptime.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+int do_proc_uptime(int update_every, usec_t dt) {
+ (void)dt;
+
+ static char *uptime_filename = NULL;
+ if(!uptime_filename) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
+
+ uptime_filename = config_get("plugin:proc:/proc/uptime", "filename to monitor", filename);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , NULL
+ , "System Uptime"
+ , "seconds"
+ , PLUGIN_PROC_NAME
+ , "/proc/uptime"
+ , NETDATA_CHART_PRIO_SYSTEM_UPTIME
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, uptime_msec(uptime_filename));
+
+ rrdset_done(st);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c
new file mode 100644
index 0000000..7def02d
--- /dev/null
+++ b/collectors/proc.plugin/proc_vmstat.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_VMSTAT_NAME "/proc/vmstat"
+
+int do_proc_vmstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_numa = -1;
+ static int has_numa = -1;
+
+ static ARL_BASE *arl_base = NULL;
+ static unsigned long long numa_foreign = 0ULL;
+ static unsigned long long numa_hint_faults = 0ULL;
+ static unsigned long long numa_hint_faults_local = 0ULL;
+ static unsigned long long numa_huge_pte_updates = 0ULL;
+ static unsigned long long numa_interleave = 0ULL;
+ static unsigned long long numa_local = 0ULL;
+ static unsigned long long numa_other = 0ULL;
+ static unsigned long long numa_pages_migrated = 0ULL;
+ static unsigned long long numa_pte_updates = 0ULL;
+ static unsigned long long pgfault = 0ULL;
+ static unsigned long long pgmajfault = 0ULL;
+ static unsigned long long pgpgin = 0ULL;
+ static unsigned long long pgpgout = 0ULL;
+ static unsigned long long pswpin = 0ULL;
+ static unsigned long long pswpout = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO);
+ do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", 1);
+ do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", 1);
+ do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO);
+
+
+ arl_base = arl_create("vmstat", NULL, 60);
+ arl_expect(arl_base, "pgfault", &pgfault);
+ arl_expect(arl_base, "pgmajfault", &pgmajfault);
+ arl_expect(arl_base, "pgpgin", &pgpgin);
+ arl_expect(arl_base, "pgpgout", &pgpgout);
+ arl_expect(arl_base, "pswpin", &pswpin);
+ arl_expect(arl_base, "pswpout", &pswpout);
+
+ if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO &&
+ (get_numa_node_count() >= 2 ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ arl_expect(arl_base, "numa_foreign", &numa_foreign);
+ arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local);
+ arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults);
+ arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates);
+ arl_expect(arl_base, "numa_interleave", &numa_interleave);
+ arl_expect(arl_base, "numa_local", &numa_local);
+ arl_expect(arl_base, "numa_other", &numa_other);
+ arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated);
+ arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates);
+ }
+ else {
+ // Do not expect numa metrics when they are not needed.
+ // By not adding them, the ARL will stop processing the file
+ // when all the expected metrics are collected.
+ // Also ARL will not parse their values.
+ has_numa = 0;
+ do_numa = CONFIG_BOOLEAN_NO;
+ }
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat");
+ ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_swapio == CONFIG_BOOLEAN_YES || (do_swapio == CONFIG_BOOLEAN_AUTO &&
+ (pswpin || pswpout ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_swapio = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_swapio = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_swapio)) {
+ st_swapio = rrdset_create_localhost(
+ "system"
+ , "swapio"
+ , NULL
+ , "swap"
+ , NULL
+ , "Swap I/O"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SWAPIO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_swapio);
+
+ rrddim_set_by_pointer(st_swapio, rd_in, pswpin);
+ rrddim_set_by_pointer(st_swapio, rd_out, pswpout);
+ rrdset_done(st_swapio);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_io) {
+ static RRDSET *st_io = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_io)) {
+ st_io = rrdset_create_localhost(
+ "system"
+ , "pgpgio"
+ , NULL
+ , "disk"
+ , NULL
+ , "Memory Paged from/to disk"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_PGPGIO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_io);
+
+ rrddim_set_by_pointer(st_io, rd_in, pgpgin);
+ rrddim_set_by_pointer(st_io, rd_out, pgpgout);
+ rrdset_done(st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_pgfaults) {
+ static RRDSET *st_pgfaults = NULL;
+ static RRDDIM *rd_minor = NULL, *rd_major = NULL;
+
+ if(unlikely(!st_pgfaults)) {
+ st_pgfaults = rrdset_create_localhost(
+ "mem"
+ , "pgfaults"
+ , NULL
+ , "system"
+ , NULL
+ , "Memory Page Faults"
+ , "faults/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
+
+ rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_pgfaults);
+
+ rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault);
+ rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault);
+ rrdset_done(st_pgfaults);
+ }
+
+ // --------------------------------------------------------------------
+
+ // Ondemand criteria for NUMA. Since this won't change at run time, we
+ // check it only once. We check whether the node count is >= 2 because
+ // single-node systems have uninteresting statistics (since all accesses
+ // are local).
+ if(unlikely(has_numa == -1))
+
+ has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates ||
+ numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0;
+
+ if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) {
+ do_numa = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_numa = NULL;
+ static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL;
+
+ if(unlikely(!st_numa)) {
+ st_numa = rrdset_create_localhost(
+ "mem"
+ , "numa"
+ , NULL
+ , "numa"
+ , NULL
+ , "NUMA events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_MEM_NUMA
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
+
+ // These depend on CONFIG_NUMA in the kernel.
+ rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ // The following stats depend on CONFIG_NUMA_BALANCING in the
+ // kernel.
+ rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_numa);
+
+ rrddim_set_by_pointer(st_numa, rd_local, numa_local);
+ rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign);
+ rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave);
+ rrddim_set_by_pointer(st_numa, rd_other, numa_other);
+
+ rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local);
+ rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated);
+
+ rrdset_done(st_numa);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/sys_block_zram.c b/collectors/proc.plugin/sys_block_zram.c
new file mode 100644
index 0000000..170c720
--- /dev/null
+++ b/collectors/proc.plugin/sys_block_zram.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_ZRAM_NAME "/sys/block/zram"
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+typedef struct mm_stat {
+ unsigned long long orig_data_size;
+ unsigned long long compr_data_size;
+ unsigned long long mem_used_total;
+ unsigned long long mem_limit;
+ unsigned long long mem_used_max;
+ unsigned long long same_pages;
+ unsigned long long pages_compacted;
+} MM_STAT;
+
+typedef struct zram_device {
+ procfile *file;
+
+ RRDSET *st_usage;
+ RRDDIM *rd_compr_data_size;
+ RRDDIM *rd_metadata_size;
+
+ RRDSET *st_savings;
+ RRDDIM *rd_original_size;
+ RRDDIM *rd_savings_size;
+
+ RRDSET *st_comp_ratio;
+ RRDDIM *rd_comp_ratio;
+
+ RRDSET *st_alloc_efficiency;
+ RRDDIM *rd_alloc_efficiency;
+} ZRAM_DEVICE;
+
+ // --------------------------------------------------------------------
+
+static int try_get_zram_major_number(procfile *file) {
+ size_t i;
+ unsigned int lines = procfile_lines(file);
+ int id = -1;
+ char *name = NULL;
+ for (i = 0; i < lines; i++)
+ {
+ if (procfile_linewords(file, i) < 2)
+ continue;
+ name = procfile_lineword(file, i, 1);
+ if (strcmp(name, "zram") == 0)
+ {
+ id = str2i(procfile_lineword(file, i, 0));
+ if (id == 0)
+ return -1;
+ return id;
+ }
+ }
+ return -1;
+}
+
+static inline void init_rrd(const char *name, ZRAM_DEVICE *d, int update_every) {
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_usage.%s", name);
+ d->st_usage = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_usage"
+ , "ZRAM Memory Usage"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM
+ , update_every
+ , RRDSET_TYPE_AREA);
+ d->rd_compr_data_size = rrddim_add(d->st_usage, "compressed", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_metadata_size = rrddim_add(d->st_usage, "metadata", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_savings.%s", name);
+ d->st_savings = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_savings"
+ , "ZRAM Memory Savings"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS
+ , update_every
+ , RRDSET_TYPE_AREA);
+ d->rd_savings_size = rrddim_add(d->st_savings, "savings", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_original_size = rrddim_add(d->st_savings, "original", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_ratio.%s", name);
+ d->st_comp_ratio = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_ratio"
+ , "ZRAM Compression Ratio (original to compressed)"
+ , "ratio"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_RATIO
+ , update_every
+ , RRDSET_TYPE_LINE);
+ d->rd_comp_ratio = rrddim_add(d->st_comp_ratio, "ratio", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_efficiency.%s", name);
+ d->st_alloc_efficiency = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_efficiency"
+ , "ZRAM Efficiency"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY
+ , update_every
+ , RRDSET_TYPE_LINE);
+ d->rd_alloc_efficiency = rrddim_add(d->st_alloc_efficiency, "percent", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+}
+
+static int init_devices(DICTIONARY *devices, unsigned int zram_id, int update_every) {
+ int count = 0;
+ DIR *dir = opendir("/dev");
+ struct dirent *de;
+ struct stat st;
+ char filename[FILENAME_MAX + 1];
+ procfile *ff = NULL;
+ ZRAM_DEVICE device;
+
+ if (unlikely(!dir))
+ return 0;
+ while ((de = readdir(dir)))
+ {
+ snprintfz(filename, FILENAME_MAX, "/dev/%s", de->d_name);
+ if (unlikely(stat(filename, &st) != 0))
+ {
+ error("ZRAM : Unable to stat %s: %s", filename, strerror(errno));
+ continue;
+ }
+ if (major(st.st_rdev) == zram_id)
+ {
+ info("ZRAM : Found device %s", filename);
+ snprintfz(filename, FILENAME_MAX, "/sys/block/%s/mm_stat", de->d_name);
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (ff == NULL)
+ {
+ error("ZRAM : Failed to open %s: %s", filename, strerror(errno));
+ continue;
+ }
+ device.file = ff;
+ init_rrd(de->d_name, &device, update_every);
+ dictionary_set(devices, de->d_name, &device, sizeof(ZRAM_DEVICE));
+ count++;
+ }
+ }
+ closedir(dir);
+ return count;
+}
+
+static void free_device(DICTIONARY *dict, char *name)
+{
+ ZRAM_DEVICE *d = (ZRAM_DEVICE*)dictionary_get(dict, name);
+ info("ZRAM : Disabling monitoring of device %s", name);
+ rrdset_obsolete_and_pointer_null(d->st_usage);
+ rrdset_obsolete_and_pointer_null(d->st_savings);
+ rrdset_obsolete_and_pointer_null(d->st_alloc_efficiency);
+ rrdset_obsolete_and_pointer_null(d->st_comp_ratio);
+ dictionary_del(dict, name);
+}
+ // --------------------------------------------------------------------
+
+static inline int read_mm_stat(procfile *ff, MM_STAT *stats) {
+ ff = procfile_readall(ff);
+ if (!ff)
+ return -1;
+ if (procfile_lines(ff) < 1) {
+ procfile_close(ff);
+ return -1;
+ }
+ if (procfile_linewords(ff, 0) < 7) {
+ procfile_close(ff);
+ return -1;
+ }
+
+ stats->orig_data_size = str2ull(procfile_word(ff, 0));
+ stats->compr_data_size = str2ull(procfile_word(ff, 1));
+ stats->mem_used_total = str2ull(procfile_word(ff, 2));
+ stats->mem_limit = str2ull(procfile_word(ff, 3));
+ stats->mem_used_max = str2ull(procfile_word(ff, 4));
+ stats->same_pages = str2ull(procfile_word(ff, 5));
+ stats->pages_compacted = str2ull(procfile_word(ff, 6));
+ return 0;
+}
+
+static inline int _collect_zram_metrics(char* name, ZRAM_DEVICE *d, int advance, DICTIONARY* dict) {
+ MM_STAT mm;
+ int value;
+ if (unlikely(read_mm_stat(d->file, &mm) < 0))
+ {
+ free_device(dict, name);
+ return -1;
+ }
+
+ if (likely(advance))
+ {
+ rrdset_next(d->st_usage);
+ rrdset_next(d->st_savings);
+ rrdset_next(d->st_comp_ratio);
+ rrdset_next(d->st_alloc_efficiency);
+ }
+ // zram_usage
+ rrddim_set_by_pointer(d->st_usage, d->rd_compr_data_size, mm.compr_data_size);
+ rrddim_set_by_pointer(d->st_usage, d->rd_metadata_size, mm.mem_used_total - mm.compr_data_size);
+ rrdset_done(d->st_usage);
+ // zram_savings
+ rrddim_set_by_pointer(d->st_savings, d->rd_savings_size, mm.compr_data_size - mm.orig_data_size);
+ rrddim_set_by_pointer(d->st_savings, d->rd_original_size, mm.orig_data_size);
+ rrdset_done(d->st_savings);
+ // zram_ratio
+ value = mm.compr_data_size == 0 ? 1 : mm.orig_data_size * 100 / mm.compr_data_size;
+ rrddim_set_by_pointer(d->st_comp_ratio, d->rd_comp_ratio, value);
+ rrdset_done(d->st_comp_ratio);
+ // zram_efficiency
+ value = mm.mem_used_total == 0 ? 100 : (mm.compr_data_size * 1000000 / mm.mem_used_total);
+ rrddim_set_by_pointer(d->st_alloc_efficiency, d->rd_alloc_efficiency, value);
+ rrdset_done(d->st_alloc_efficiency);
+ return 0;
+}
+
+static int collect_first_zram_metrics(char *name, void *entry, void *data) {
+ // collect without calling rrdset_next (init only)
+ return _collect_zram_metrics(name, (ZRAM_DEVICE *)entry, 0, (DICTIONARY *)data);
+}
+
+static int collect_zram_metrics(char *name, void *entry, void *data) {
+ (void)name;
+ // collect with calling rrdset_next
+ return _collect_zram_metrics(name, (ZRAM_DEVICE *)entry, 1, (DICTIONARY *)data);
+}
+
+ // --------------------------------------------------------------------
+
+int do_sys_block_zram(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static DICTIONARY *devices = NULL;
+ static int initialized = 0;
+ static int device_count = 0;
+ int zram_id = -1;
+ if (unlikely(!initialized))
+ {
+ initialized = 1;
+ ff = procfile_open("/proc/devices", " \t:", PROCFILE_FLAG_DEFAULT);
+ if (ff == NULL)
+ {
+ error("Cannot read /proc/devices");
+ return 1;
+ }
+ ff = procfile_readall(ff);
+ if (!ff)
+ return 1;
+ zram_id = try_get_zram_major_number(ff);
+ if (zram_id == -1)
+ {
+ if (ff != NULL)
+ procfile_close(ff);
+ return 1;
+ }
+ procfile_close(ff);
+
+ devices = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ device_count = init_devices(devices, (unsigned int)zram_id, update_every);
+ if (device_count < 1)
+ return 1;
+ dictionary_get_all_name_value(devices, collect_first_zram_metrics, devices);
+ }
+ else
+ {
+ if (unlikely(device_count < 1))
+ return 1;
+ dictionary_get_all_name_value(devices, collect_zram_metrics, devices);
+ }
+ return 0;
+} \ No newline at end of file
diff --git a/collectors/proc.plugin/sys_class_infiniband.c b/collectors/proc.plugin/sys_class_infiniband.c
new file mode 100644
index 0000000..46f40f2
--- /dev/null
+++ b/collectors/proc.plugin/sys_class_infiniband.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// Heavily inspired from proc_net_dev.c
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_INFINIBAND_NAME "/sys/class/infiniband"
+#define CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND \
+ "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INFINIBAND_NAME
+
+// ib_device::name[IB_DEVICE_NAME_MAX(64)] + "-" + ib_device::phys_port_cnt[u8 = 3 chars]
+#define IBNAME_MAX 68
+
+// ----------------------------------------------------------------------------
+// infiniband & omnipath standard counters
+
+// I use macro as there's no single file acting as summary, but a lot of different files, so can't use helpers like
+// procfile(). Also, omnipath generates other counters, that are not provided by infiniband
+#define FOREACH_COUNTER(GEN, ...) \
+ FOREACH_COUNTER_BYTES(GEN, __VA_ARGS__) \
+ FOREACH_COUNTER_PACKETS(GEN, __VA_ARGS__) \
+ FOREACH_COUNTER_ERRORS(GEN, __VA_ARGS__)
+
+#define FOREACH_COUNTER_BYTES(GEN, ...) \
+ GEN(port_rcv_data, bytes, "Received", 1, __VA_ARGS__) \
+ GEN(port_xmit_data, bytes, "Sent", -1, __VA_ARGS__)
+
+#define FOREACH_COUNTER_PACKETS(GEN, ...) \
+ GEN(port_rcv_packets, packets, "Received", 1, __VA_ARGS__) \
+ GEN(port_xmit_packets, packets, "Sent", -1, __VA_ARGS__) \
+ GEN(multicast_rcv_packets, packets, "Mcast rcvd", 1, __VA_ARGS__) \
+ GEN(multicast_xmit_packets, packets, "Mcast sent", -1, __VA_ARGS__) \
+ GEN(unicast_rcv_packets, packets, "Ucast rcvd", 1, __VA_ARGS__) \
+ GEN(unicast_xmit_packets, packets, "Ucast sent", -1, __VA_ARGS__)
+
+#define FOREACH_COUNTER_ERRORS(GEN, ...) \
+ GEN(port_rcv_errors, errors, "Pkts malformated", 1, __VA_ARGS__) \
+ GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \
+ GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \
+ GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \
+ GEN(VL15_dropped, errors, "Pkts missed ressource", 1, __VA_ARGS__) \
+ GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \
+ GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \
+ GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \
+ GEN(local_link_integrity_errors, errors, "Link integrity err", 1, __VA_ARGS__) \
+ GEN(symbol_error, errors, "Link minor errors", 1, __VA_ARGS__) \
+ GEN(port_rcv_remote_physical_errors, errors, "Pkts rcvd with EBP", 1, __VA_ARGS__) \
+ GEN(port_rcv_switch_relay_errors, errors, "Pkts rcvd discarded by switch", 1, __VA_ARGS__) \
+ GEN(port_xmit_constraint_errors, errors, "Pkts sent discarded by switch", 1, __VA_ARGS__)
+
+//
+// Hardware Counters
+//
+
+// IMPORTANT: These are vendor-specific fields.
+// If you want to add a new vendor, search this for for 'VENDORS:' keyword and
+// add your definition as 'VENDOR-<key>:' where <key> if the string part that
+// is shown in /sys/class/infiniband/<key>X_Y
+// EG: for Mellanox, shown as mlx0_1, it's 'mlx'
+// for Intel, shown as hfi1_1, it's 'hfi'
+
+// VENDORS: List of implemented hardware vendors
+#define FOREACH_HWCOUNTER_NAME(GEN, ...) GEN(mlx, __VA_ARGS__)
+
+// VENDOR-MLX: HW Counters for Mellanox ConnectX Devices
+#define FOREACH_HWCOUNTER_MLX(GEN, ...) \
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN, __VA_ARGS__) \
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN, __VA_ARGS__)
+
+#define FOREACH_HWCOUNTER_MLX_PACKETS(GEN, ...) \
+ GEN(np_cnp_sent, hwpackets, "RoCEv2 Congestion sent", 1, __VA_ARGS__) \
+ GEN(np_ecn_marked_roce_packets, hwpackets, "RoCEv2 Congestion rcvd", -1, __VA_ARGS__) \
+ GEN(rp_cnp_handled, hwpackets, "IB Congestion handled", 1, __VA_ARGS__) \
+ GEN(rx_atomic_requests, hwpackets, "ATOMIC req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_dct_connect, hwpackets, "Connection req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_read_requests, hwpackets, "Read req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_write_requests, hwpackets, "Write req. rcvd", 1, __VA_ARGS__) \
+ GEN(roce_adp_retrans, hwpackets, "RoCE retrans adaptive", 1, __VA_ARGS__) \
+ GEN(roce_adp_retrans_to, hwpackets, "RoCE retrans timeout", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart, hwpackets, "RoCE slow restart", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart_cnps, hwpackets, "RoCE slow restart congestion", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart_trans, hwpackets, "RoCE slow restart count", 1, __VA_ARGS__)
+
+#define FOREACH_HWCOUNTER_MLX_ERRORS(GEN, ...) \
+ GEN(duplicate_request, hwerrors, "Duplicated packets", -1, __VA_ARGS__) \
+ GEN(implied_nak_seq_err, hwerrors, "Pkt Seq Num gap", 1, __VA_ARGS__) \
+ GEN(local_ack_timeout_err, hwerrors, "Ack timer expired", 1, __VA_ARGS__) \
+ GEN(out_of_buffer, hwerrors, "Drop missing buffer", 1, __VA_ARGS__) \
+ GEN(out_of_sequence, hwerrors, "Drop out of sequence", 1, __VA_ARGS__) \
+ GEN(packet_seq_err, hwerrors, "NAK sequence rcvd", 1, __VA_ARGS__) \
+ GEN(req_cqe_error, hwerrors, "CQE err Req", 1, __VA_ARGS__) \
+ GEN(resp_cqe_error, hwerrors, "CQE err Resp", 1, __VA_ARGS__) \
+ GEN(req_cqe_flush_error, hwerrors, "CQE Flushed err Req", 1, __VA_ARGS__) \
+ GEN(resp_cqe_flush_error, hwerrors, "CQE Flushed err Resp", 1, __VA_ARGS__) \
+ GEN(req_remote_access_errors, hwerrors, "Remote access err Req", 1, __VA_ARGS__) \
+ GEN(resp_remote_access_errors, hwerrors, "Remote access err Resp", 1, __VA_ARGS__) \
+ GEN(req_remote_invalid_request, hwerrors, "Remote invalid req", 1, __VA_ARGS__) \
+ GEN(resp_local_length_error, hwerrors, "Local length err Resp", 1, __VA_ARGS__) \
+ GEN(rnr_nak_retry_err, hwerrors, "RNR NAK Packets", 1, __VA_ARGS__) \
+ GEN(rp_cnp_ignored, hwerrors, "CNP Pkts ignored", 1, __VA_ARGS__) \
+ GEN(rx_icrc_encapsulated, hwerrors, "RoCE ICRC Errors", 1, __VA_ARGS__)
+
+// Common definitions used more than once
+#define GEN_RRD_DIM_ADD(NAME, GRP, DESC, DIR, PORT) \
+ GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, 1, 1, RRD_ALGORITHM_INCREMENTAL)
+
+#define GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, MULT, DIV, ALGO) \
+ PORT->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR * MULT, DIV, ALGO);
+
+#define GEN_RRD_DIM_ADD_HW(NAME, GRP, DESC, DIR, PORT, HW) \
+ HW->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR, 1, RRD_ALGORITHM_INCREMENTAL);
+
+#define GEN_RRD_DIM_SETP(NAME, GRP, DESC, DIR, PORT) \
+ rrddim_set_by_pointer(PORT->st_##GRP, PORT->rd_##NAME, (collected_number)PORT->NAME);
+
+#define GEN_RRD_DIM_SETP_HW(NAME, GRP, DESC, DIR, PORT, HW) \
+ rrddim_set_by_pointer(PORT->st_##GRP, HW->rd_##NAME, (collected_number)HW->NAME);
+
+// https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters
+// https://community.mellanox.com/s/article/infiniband-port-counters
+static struct ibport {
+ char *name;
+ char *counters_path;
+ char *hwcounters_path;
+ int len;
+
+ // flags
+ int configured;
+ int enabled;
+ int discovered;
+
+ int do_bytes;
+ int do_packets;
+ int do_errors;
+ int do_hwpackets;
+ int do_hwerrors;
+
+ const char *chart_type_bytes;
+ const char *chart_type_packets;
+ const char *chart_type_errors;
+ const char *chart_type_hwpackets;
+ const char *chart_type_hwerrors;
+
+ const char *chart_id_bytes;
+ const char *chart_id_packets;
+ const char *chart_id_errors;
+ const char *chart_id_hwpackets;
+ const char *chart_id_hwerrors;
+
+ const char *chart_family;
+
+ unsigned long priority;
+
+ // Port details using drivers/infiniband/core/sysfs.c :: rate_show()
+ RRDDIM *rd_speed;
+ uint64_t speed;
+ uint64_t width;
+
+// Stats from /$device/ports/$portid/counters
+// as drivers/infiniband/hw/qib/qib_verbs.h
+// All uint64 except vl15_dropped, local_link_integrity_errors, excessive_buffer_overrun_errors uint32
+// Will generate 2 elements for each counter:
+// - uint64_t to store the value
+// - char* to store the filename path
+// - RRDDIM* to store the RRD Dimension
+#define GEN_DEF_COUNTER(NAME, ...) \
+ uint64_t NAME; \
+ char *file_##NAME; \
+ RRDDIM *rd_##NAME;
+ FOREACH_COUNTER(GEN_DEF_COUNTER)
+
+// Vendor specific hwcounters from /$device/ports/$portid/hw_counters
+// We will generate one struct pointer per vendor to avoid future casting
+#define GEN_DEF_HWCOUNTER_PTR(VENDOR, ...) struct ibporthw_##VENDOR *hwcounters_##VENDOR;
+ FOREACH_HWCOUNTER_NAME(GEN_DEF_HWCOUNTER_PTR)
+
+ // Function pointer to the "infiniband_hwcounters_parse_<vendor>" function
+ void (*hwcounters_parse)(struct ibport *);
+ void (*hwcounters_dorrd)(struct ibport *);
+
+ // charts and dim
+ RRDSET *st_bytes;
+ RRDSET *st_packets;
+ RRDSET *st_errors;
+ RRDSET *st_hwpackets;
+ RRDSET *st_hwerrors;
+
+ RRDSETVAR *stv_speed;
+
+ usec_t speed_last_collected_usec;
+
+ struct ibport *next;
+} *ibport_root = NULL, *ibport_last_used = NULL;
+
+// VENDORS: reading / calculation functions
+#define GEN_DEF_HWCOUNTER(NAME, ...) \
+ uint64_t NAME; \
+ char *file_##NAME; \
+ RRDDIM *rd_##NAME;
+
+#define GEN_DO_HWCOUNTER_READ(NAME, GRP, DESC, DIR, PORT, HW, ...) \
+ if (HW->file_##NAME) { \
+ if (read_single_number_file(HW->file_##NAME, (unsigned long long *)&HW->NAME)) { \
+ error("cannot read iface '%s' hwcounter '" #HW "'", PORT->name); \
+ HW->file_##NAME = NULL; \
+ } \
+ }
+
+// VENDOR-MLX: Mellanox
+struct ibporthw_mlx {
+ FOREACH_HWCOUNTER_MLX(GEN_DEF_HWCOUNTER)
+};
+void infiniband_hwcounters_parse_mlx(struct ibport *port)
+{
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO)
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO)
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
+}
+void infiniband_hwcounters_dorrd_mlx(struct ibport *port)
+{
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
+ rrdset_done(port->st_hwerrors);
+ }
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
+ rrdset_done(port->st_hwpackets);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+static struct ibport *get_ibport(const char *dev, const char *port)
+{
+ struct ibport *p;
+
+ char name[IBNAME_MAX + 1];
+ snprintfz(name, IBNAME_MAX, "%s-%s", dev, port);
+
+ // search it, resuming from the last position in sequence
+ for (p = ibport_last_used; p; p = p->next) {
+ if (unlikely(!strcmp(name, p->name))) {
+ ibport_last_used = p->next;
+ return p;
+ }
+ }
+
+ // new round, from the beginning to the last position used this time
+ for (p = ibport_root; p != ibport_last_used; p = p->next) {
+ if (unlikely(!strcmp(name, p->name))) {
+ ibport_last_used = p->next;
+ return p;
+ }
+ }
+
+ // create a new one
+ p = callocz(1, sizeof(struct ibport));
+ p->name = strdupz(name);
+ p->len = strlen(p->name);
+
+ p->chart_type_bytes = strdupz("infiniband_cnt_bytes");
+ p->chart_type_packets = strdupz("infiniband_cnt_packets");
+ p->chart_type_errors = strdupz("infiniband_cnt_errors");
+ p->chart_type_hwpackets = strdupz("infiniband_hwc_packets");
+ p->chart_type_hwerrors = strdupz("infiniband_hwc_errors");
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntbytes_%s", p->name);
+ p->chart_id_bytes = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntpackets_%s", p->name);
+ p->chart_id_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cnterrors_%s", p->name);
+ p->chart_id_errors = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcntpackets_%s", p->name);
+ p->chart_id_hwpackets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcnterrors_%s", p->name);
+ p->chart_id_hwerrors = strdupz(buffer);
+
+ p->chart_family = strdupz(p->name);
+ p->priority = NETDATA_CHART_PRIO_INFINIBAND;
+
+ // Link current ibport to last one in the list
+ if (ibport_root) {
+ struct ibport *t;
+ for (t = ibport_root; t->next; t = t->next)
+ ;
+ t->next = p;
+ } else
+ ibport_root = p;
+
+ return p;
+}
+
+int do_sys_class_infiniband(int update_every, usec_t dt)
+{
+ (void)dt;
+ static SIMPLE_PATTERN *disabled_list = NULL;
+ static int initialized = 0;
+ static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES;
+ static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1;
+ static char *sys_class_infiniband_dirname = NULL;
+
+ static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0;
+
+ if (unlikely(enable_new_ports == -1)) {
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/infiniband");
+ sys_class_infiniband_dirname =
+ config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "dirname to monitor", dirname);
+
+ do_bytes = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "bandwidth counters", CONFIG_BOOLEAN_YES);
+ do_packets = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "packets counters", CONFIG_BOOLEAN_YES);
+ do_errors = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "errors counters", CONFIG_BOOLEAN_YES);
+ do_hwpackets = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware packets counters", CONFIG_BOOLEAN_AUTO);
+ do_hwerrors = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware errors counters", CONFIG_BOOLEAN_AUTO);
+
+ enable_only_active = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "monitor only active ports", CONFIG_BOOLEAN_AUTO);
+ disabled_list = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "disable by default interfaces matching", ""), NULL,
+ SIMPLE_PATTERN_EXACT);
+
+ dt_to_refresh_ports =
+ config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) *
+ USEC_PER_SEC;
+ if (dt_to_refresh_ports < 0)
+ dt_to_refresh_ports = 0;
+ }
+
+ // init listing of /sys/class/infiniband/ (or rediscovery)
+ if (unlikely(!initialized) || unlikely(last_refresh_ports_usec >= dt_to_refresh_ports)) {
+ // If folder does not exists, return 1 to disable
+ DIR *devices_dir = opendir(sys_class_infiniband_dirname);
+ if (unlikely(!devices_dir))
+ return 1;
+
+ // Work on all device available
+ struct dirent *dev_dent;
+ while ((dev_dent = readdir(devices_dir))) {
+ // Skip special folders
+ if (!strcmp(dev_dent->d_name, "..") || !strcmp(dev_dent->d_name, "."))
+ continue;
+
+ // /sys/class/infiniband/<dev>/ports
+ char ports_dirname[FILENAME_MAX + 1];
+ snprintfz(ports_dirname, FILENAME_MAX, "%s/%s/%s", sys_class_infiniband_dirname, dev_dent->d_name, "ports");
+
+ DIR *ports_dir = opendir(ports_dirname);
+ if (unlikely(!ports_dir))
+ continue;
+
+ struct dirent *port_dent;
+ while ((port_dent = readdir(ports_dir))) {
+ // Skip special folders
+ if (!strcmp(port_dent->d_name, "..") || !strcmp(port_dent->d_name, "."))
+ continue;
+
+ char buffer[FILENAME_MAX + 1];
+
+ // Check if counters are availablea (mandatory)
+ // /sys/class/infiniband/<device>/ports/<port>/counters
+ char counters_dirname[FILENAME_MAX + 1];
+ snprintfz(counters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "counters");
+ DIR *counters_dir = opendir(counters_dirname);
+ // Standard counters are mandatory
+ if (!counters_dir)
+ continue;
+ closedir(counters_dir);
+
+ // Hardware Counters are optionnal, used later
+ char hwcounters_dirname[FILENAME_MAX + 1];
+ snprintfz(
+ hwcounters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "hw_counters");
+
+ // Get new or existing ibport
+ struct ibport *p = get_ibport(dev_dent->d_name, port_dent->d_name);
+ if (!p)
+ continue;
+
+ // Prepare configuration
+ if (!p->configured) {
+ p->configured = 1;
+
+ // Enable by default, will be filtered out later
+ p->enabled = 1;
+
+ p->counters_path = strdupz(counters_dirname);
+ p->hwcounters_path = strdupz(hwcounters_dirname);
+
+ snprintfz(buffer, FILENAME_MAX, "plugin:proc:/sys/class/infiniband:%s", p->name);
+
+ // Standard counters
+ p->do_bytes = config_get_boolean_ondemand(buffer, "bytes", do_bytes);
+ p->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
+ p->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
+
+// Gen filename allocation and concatenation
+#define GEN_DO_COUNTER_NAME(NAME, GRP, DESC, DIR, PORT, ...) \
+ PORT->file_##NAME = callocz(1, strlen(PORT->counters_path) + sizeof(#NAME) + 3); \
+ strcat(PORT->file_##NAME, PORT->counters_path); \
+ strcat(PORT->file_##NAME, "/" #NAME);
+ FOREACH_COUNTER(GEN_DO_COUNTER_NAME, p)
+
+ // Check HW Counters vendor dependent
+ DIR *hwcounters_dir = opendir(hwcounters_dirname);
+ if (hwcounters_dir) {
+ // By default set standard
+ p->do_hwpackets = config_get_boolean_ondemand(buffer, "hwpackets", do_hwpackets);
+ p->do_hwerrors = config_get_boolean_ondemand(buffer, "hwerrors", do_hwerrors);
+
+// VENDORS: Set your own
+
+// Allocate the chars to the filenames
+#define GEN_DO_HWCOUNTER_NAME(NAME, GRP, DESC, DIR, PORT, HW, ...) \
+ HW->file_##NAME = callocz(1, strlen(PORT->hwcounters_path) + sizeof(#NAME) + 3); \
+ strcat(HW->file_##NAME, PORT->hwcounters_path); \
+ strcat(HW->file_##NAME, "/" #NAME);
+
+ // VENDOR-MLX: Mellanox
+ if (strncmp(dev_dent->d_name, "mlx", 3) == 0) {
+ // Allocate the vendor specific struct
+ p->hwcounters_mlx = callocz(1, sizeof(struct ibporthw_mlx));
+
+ FOREACH_HWCOUNTER_MLX(GEN_DO_HWCOUNTER_NAME, p, p->hwcounters_mlx)
+
+ // Set the function pointer for hwcounter parsing
+ p->hwcounters_parse = &infiniband_hwcounters_parse_mlx;
+ p->hwcounters_dorrd = &infiniband_hwcounters_dorrd_mlx;
+ }
+
+ // VENDOR: Unknown
+ else {
+ p->do_hwpackets = CONFIG_BOOLEAN_NO;
+ p->do_hwerrors = CONFIG_BOOLEAN_NO;
+ }
+ closedir(hwcounters_dir);
+ }
+ }
+
+ // Check port state to keep activation
+ if (enable_only_active) {
+ snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "state");
+ unsigned long long active;
+ // File is "1: DOWN" or "4: ACTIVE", but str2ull will stop on first non-decimal char
+ read_single_number_file(buffer, &active);
+
+ // Want "IB_PORT_ACTIVE" == "4", as defined by drivers/infiniband/core/sysfs.c::state_show()
+ if (active == 4)
+ p->enabled = 1;
+ else
+ p->enabled = 0;
+ }
+
+ if (p->enabled)
+ p->enabled = !simple_pattern_matches(disabled_list, p->name);
+
+ // Check / Update the link speed & width frm "rate" file
+ // Sample output: "100 Gb/sec (4X EDR)"
+ snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "rate");
+ char buffer_rate[65];
+ if (read_file(buffer, buffer_rate, 64)) {
+ error("Unable to read '%s'", buffer);
+ p->width = 1;
+ } else {
+ char *buffer_width = strstr(buffer_rate, "(");
+ buffer_width++;
+ // str2ull will stop on first non-decimal value
+ p->speed = str2ull(buffer_rate);
+ p->width = str2ull(buffer_width);
+ }
+
+ if (!p->discovered)
+ info(
+ "Infiniband card %s port %s at speed %lu width %lu", dev_dent->d_name, port_dent->d_name,
+ p->speed, p->width);
+
+ p->discovered = 1;
+ }
+ closedir(ports_dir);
+ }
+ closedir(devices_dir);
+
+ initialized = 1;
+ last_refresh_ports_usec = 0;
+ }
+ last_refresh_ports_usec += dt;
+
+ // Update all ports values
+ struct ibport *port;
+ for (port = ibport_root; port; port = port->next) {
+ if (!port->enabled)
+ continue;
+ //
+ // Read values from system to struct
+ //
+
+// counter from file and place it in ibport struct
+#define GEN_DO_COUNTER_READ(NAME, GRP, DESC, DIR, PORT, ...) \
+ if (PORT->file_##NAME) { \
+ if (read_single_number_file(PORT->file_##NAME, (unsigned long long *)&PORT->NAME)) { \
+ error("cannot read iface '%s' counter '" #NAME "'", PORT->name); \
+ PORT->file_##NAME = NULL; \
+ } \
+ }
+
+ // Update charts
+ if (port->do_bytes != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_BYTES(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_bytes)) {
+ port->st_bytes = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_bytes,
+ NULL,
+ port->chart_family,
+ "ib.bytes",
+ "Bandwidth usage",
+ "kilobits/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 1,
+ update_every,
+ RRDSET_TYPE_AREA);
+ // Create Dimensions
+ rrdset_flag_set(port->st_bytes, RRDSET_FLAG_DETAIL);
+ // On this chart, we want to have a KB/s so the dashboard will autoscale it
+ // The reported values are also per-lane, so we must multiply it by the width
+ FOREACH_COUNTER_BYTES(GEN_RRD_DIM_ADD_CUSTOM, port, 8 * port->width, 1024, RRD_ALGORITHM_INCREMENTAL)
+
+ port->stv_speed = rrdsetvar_custom_chart_variable_create(port->st_bytes, "link_speed");
+ } else
+ rrdset_next(port->st_bytes);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_BYTES(GEN_RRD_DIM_SETP, port)
+
+ // For link speed set only variable
+ rrdsetvar_custom_chart_variable_set(port->stv_speed, port->speed);
+
+ rrdset_done(port->st_bytes);
+ }
+
+ if (port->do_packets != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_PACKETS(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_packets)) {
+ port->st_packets = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_packets,
+ NULL,
+ port->chart_family,
+ "ib.packets",
+ "Packets Statistics",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 2,
+ update_every,
+ RRDSET_TYPE_AREA);
+ // Create Dimensions
+ rrdset_flag_set(port->st_packets, RRDSET_FLAG_DETAIL);
+ FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_ADD, port)
+ } else
+ rrdset_next(port->st_packets);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_SETP, port)
+ rrdset_done(port->st_packets);
+ }
+
+ if (port->do_errors != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_ERRORS(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_errors)) {
+ port->st_errors = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_errors,
+ NULL,
+ port->chart_family,
+ "ib.errors",
+ "Error Counters",
+ "errors/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 3,
+ update_every,
+ RRDSET_TYPE_LINE);
+ // Create Dimensions
+ rrdset_flag_set(port->st_errors, RRDSET_FLAG_DETAIL);
+ FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_ADD, port)
+ } else
+ rrdset_next(port->st_errors);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_SETP, port)
+ rrdset_done(port->st_errors);
+ }
+
+ //
+ // HW Counters
+ //
+
+ // Call the function for parsing and setting hwcounters
+ if (port->hwcounters_parse && port->hwcounters_dorrd) {
+ // Read all values (done by vendor-specific function)
+ (*port->hwcounters_parse)(port);
+
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_hwerrors)) {
+ port->st_hwerrors = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_hwerrors,
+ NULL,
+ port->chart_family,
+ "ib.hwerrors",
+ "Hardware Errors",
+ "errors/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 4,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(port->st_hwerrors, RRDSET_FLAG_DETAIL);
+
+ // VENDORS: Set your selection
+
+ // VENDOR: Mellanox
+ if (strncmp(port->name, "mlx", 3) == 0) {
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
+ }
+
+ // Unknown vendor, should not happen
+ else {
+ error(
+ "Unmanaged vendor for '%s', do_hwerrors should have been set to no. Please report this bug",
+ port->name);
+ port->do_hwerrors = CONFIG_BOOLEAN_NO;
+ }
+ } else
+ rrdset_next(port->st_hwerrors);
+ }
+
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_hwpackets)) {
+ port->st_hwpackets = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_hwpackets,
+ NULL,
+ port->chart_family,
+ "ib.hwpackets",
+ "Hardware Packets Statistics",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 5,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(port->st_hwpackets, RRDSET_FLAG_DETAIL);
+
+ // VENDORS: Set your selection
+
+ // VENDOR: Mellanox
+ if (strncmp(port->name, "mlx", 3) == 0) {
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
+ }
+
+ // Unknown vendor, should not happen
+ else {
+ error(
+ "Unmanaged vendor for '%s', do_hwpackets should have been set to no. Please report this bug",
+ port->name);
+ port->do_hwpackets = CONFIG_BOOLEAN_NO;
+ }
+ } else
+ rrdset_next(port->st_hwpackets);
+ }
+
+ // Update values to rrd (done by vendor-specific function)
+ (*port->hwcounters_dorrd)(port);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c
new file mode 100644
index 0000000..c558a38
--- /dev/null
+++ b/collectors/proc.plugin/sys_class_power_supply.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME "/sys/class/power_supply"
+
+const char *ps_property_names[] = { "charge", "energy", "voltage"};
+const char *ps_property_titles[] = {"Battery charge", "Battery energy", "Power supply voltage"};
+const char *ps_property_units[] = { "Ah", "Wh", "V"};
+
+const char *ps_property_dim_names[] = {"empty_design", "empty", "now", "full", "full_design",
+ "empty_design", "empty", "now", "full", "full_design",
+ "min_design", "min", "now", "max", "max_design"};
+
+struct ps_property_dim {
+ char *name;
+ char *filename;
+ int fd;
+
+ RRDDIM *rd;
+ unsigned long long value;
+ int always_zero;
+
+ struct ps_property_dim *next;
+};
+
+struct ps_property {
+ char *name;
+ char *title;
+ char *units;
+
+ RRDSET *st;
+
+ struct ps_property_dim *property_dim_root;
+
+ struct ps_property *next;
+};
+
+struct capacity {
+ char *filename;
+ int fd;
+
+ RRDSET *st;
+ RRDDIM *rd;
+ unsigned long long value;
+};
+
+struct power_supply {
+ char *name;
+ uint32_t hash;
+ int found;
+
+ struct capacity *capacity;
+
+ struct ps_property *property_root;
+
+ struct power_supply *next;
+};
+
+static struct power_supply *power_supply_root = NULL;
+static int files_num = 0;
+
+void power_supply_free(struct power_supply *ps) {
+ if(likely(ps)) {
+
+ // free capacity structure
+ if(likely(ps->capacity)) {
+ if(likely(ps->capacity->st)) rrdset_is_obsolete(ps->capacity->st);
+ freez(ps->capacity->filename);
+ if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd);
+ files_num--;
+ freez(ps->capacity);
+ }
+ freez(ps->name);
+
+ struct ps_property *pr = ps->property_root;
+ while(likely(pr)) {
+
+ // free dimensions
+ struct ps_property_dim *pd = pr->property_dim_root;
+ while(likely(pd)) {
+ freez(pd->name);
+ freez(pd->filename);
+ if(likely(pd->fd != -1)) close(pd->fd);
+ files_num--;
+ struct ps_property_dim *d = pd;
+ pd = pd->next;
+ freez(d);
+ }
+
+ // free properties
+ if(likely(pr->st)) rrdset_is_obsolete(pr->st);
+ freez(pr->name);
+ freez(pr->title);
+ freez(pr->units);
+ struct ps_property *p = pr;
+ pr = pr->next;
+ freez(p);
+ }
+
+ // remove power supply from linked list
+ if(likely(ps == power_supply_root)) {
+ power_supply_root = ps->next;
+ }
+ else {
+ struct power_supply *last;
+ for(last = power_supply_root; last && last->next != ps; last = last->next);
+ if(likely(last)) last->next = ps->next;
+ }
+
+ freez(ps);
+ }
+}
+
+int do_sys_class_power_supply(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_capacity = -1, do_property[3] = {-1};
+ static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1;
+ static char *dirname = NULL;
+
+ if(unlikely(do_capacity == -1)) {
+ do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES);
+ do_property[0] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery charge", CONFIG_BOOLEAN_NO);
+ do_property[1] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery energy", CONFIG_BOOLEAN_NO);
+ do_property[2] = config_get_boolean("plugin:proc:/sys/class/power_supply", "power supply voltage", CONFIG_BOOLEAN_NO);
+
+ keep_fds_open_config = config_get_boolean_ondemand("plugin:proc:/sys/class/power_supply", "keep files open", CONFIG_BOOLEAN_AUTO);
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/power_supply");
+ dirname = config_get("plugin:proc:/sys/class/power_supply", "directory to monitor", filename);
+ }
+
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read directory '%s'", dirname);
+ return 1;
+ }
+
+ struct dirent *de = NULL;
+ while(likely(de = readdir(dir))) {
+ if(likely(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ )))
+ continue;
+
+ if(likely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
+ uint32_t hash = simple_hash(de->d_name);
+
+ struct power_supply *ps;
+ for(ps = power_supply_root; ps; ps = ps->next) {
+ if(unlikely(ps->hash == hash && !strcmp(ps->name, de->d_name))) {
+ ps->found = 1;
+ break;
+ }
+ }
+
+ // allocate memory for power supply and initialize it
+ if(unlikely(!ps)) {
+ ps = callocz(sizeof(struct power_supply), 1);
+ ps->name = strdupz(de->d_name);
+ ps->hash = simple_hash(de->d_name);
+ ps->found = 1;
+ ps->next = power_supply_root;
+ power_supply_root = ps;
+
+ struct stat stbuf;
+ if(likely(do_capacity != CONFIG_BOOLEAN_NO)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s", dirname, de->d_name, "capacity");
+ if (stat(filename, &stbuf) == 0) {
+ ps->capacity = callocz(sizeof(struct capacity), 1);
+ ps->capacity->filename = strdupz(filename);
+ ps->capacity->fd = -1;
+ files_num++;
+ }
+ }
+
+ // allocate memory and initialize structures for every property and file found
+ size_t pr_idx, pd_idx;
+ size_t prev_idx = 3; // there is no property with this index
+
+ for(pr_idx = 0; pr_idx < 3; pr_idx++) {
+ if(unlikely(do_property[pr_idx] != CONFIG_BOOLEAN_NO)) {
+ struct ps_property *pr = NULL;
+ int min_value_found = 0, max_value_found = 0;
+
+ for(pd_idx = pr_idx * 5; pd_idx < pr_idx * 5 + 5; pd_idx++) {
+
+ // check if file exists
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s_%s", dirname, de->d_name,
+ ps_property_names[pr_idx], ps_property_dim_names[pd_idx]);
+ if (stat(filename, &stbuf) == 0) {
+
+ if(unlikely(pd_idx == pr_idx * 5 + 1))
+ min_value_found = 1;
+ if(unlikely(pd_idx == pr_idx * 5 + 3))
+ max_value_found = 1;
+
+ // add chart
+ if(unlikely(prev_idx != pr_idx)) {
+ pr = callocz(sizeof(struct ps_property), 1);
+ pr->name = strdupz(ps_property_names[pr_idx]);
+ pr->title = strdupz(ps_property_titles[pr_idx]);
+ pr->units = strdupz(ps_property_units[pr_idx]);
+ prev_idx = pr_idx;
+ pr->next = ps->property_root;
+ ps->property_root = pr;
+ }
+
+ // add dimension
+ struct ps_property_dim *pd;
+ pd= callocz(sizeof(struct ps_property_dim), 1);
+ pd->name = strdupz(ps_property_dim_names[pd_idx]);
+ pd->filename = strdupz(filename);
+ pd->fd = -1;
+ files_num++;
+ pd->next = pr->property_dim_root;
+ pr->property_dim_root = pd;
+ }
+ }
+
+ // create a zero empty/min dimension
+ if(unlikely(max_value_found && !min_value_found)) {
+ struct ps_property_dim *pd;
+ pd= callocz(sizeof(struct ps_property_dim), 1);
+ pd->name = strdupz(ps_property_dim_names[pr_idx * 5 + 1]);
+ pd->always_zero = 1;
+ pd->next = pr->property_dim_root;
+ pr->property_dim_root = pd;
+ }
+ }
+ }
+ }
+
+ // read capacity file
+ if(likely(ps->capacity)) {
+ char buffer[30 + 1];
+
+ if(unlikely(ps->capacity->fd == -1)) {
+ ps->capacity->fd = open(ps->capacity->filename, O_RDONLY, 0666);
+ if(unlikely(ps->capacity->fd == -1)) {
+ error("Cannot open file '%s'", ps->capacity->filename);
+ power_supply_free(ps);
+ ps = NULL;
+ }
+ }
+
+ if (ps)
+ {
+ ssize_t r = read(ps->capacity->fd, buffer, 30);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", ps->capacity->filename);
+ power_supply_free(ps);
+ ps = NULL;
+ }
+ else {
+ buffer[r] = '\0';
+ ps->capacity->value = str2ull(buffer);
+
+ if(unlikely(!keep_fds_open)) {
+ close(ps->capacity->fd);
+ ps->capacity->fd = -1;
+ }
+ else if(unlikely(lseek(ps->capacity->fd, 0, SEEK_SET) == -1)) {
+ error("Cannot seek in file '%s'", ps->capacity->filename);
+ close(ps->capacity->fd);
+ ps->capacity->fd = -1;
+ }
+ }
+ }
+ }
+
+ // read property files
+ int read_error = 0;
+ struct ps_property *pr;
+ if (ps)
+ {
+ for(pr = ps->property_root; pr && !read_error; pr = pr->next) {
+ struct ps_property_dim *pd;
+ for(pd = pr->property_dim_root; pd; pd = pd->next) {
+ if(likely(!pd->always_zero)) {
+ char buffer[30 + 1];
+
+ if(unlikely(pd->fd == -1)) {
+ pd->fd = open(pd->filename, O_RDONLY, 0666);
+ if(unlikely(pd->fd == -1)) {
+ error("Cannot open file '%s'", pd->filename);
+ read_error = 1;
+ power_supply_free(ps);
+ break;
+ }
+ }
+
+ ssize_t r = read(pd->fd, buffer, 30);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", pd->filename);
+ read_error = 1;
+ power_supply_free(ps);
+ break;
+ }
+ buffer[r] = '\0';
+ pd->value = str2ull(buffer);
+
+ if(unlikely(!keep_fds_open)) {
+ close(pd->fd);
+ pd->fd = -1;
+ }
+ else if(unlikely(lseek(pd->fd, 0, SEEK_SET) == -1)) {
+ error("Cannot seek in file '%s'", pd->filename);
+ close(pd->fd);
+ pd->fd = -1;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ closedir(dir);
+
+ keep_fds_open = keep_fds_open_config;
+ if(likely(keep_fds_open_config == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(files_num > 32))
+ keep_fds_open = CONFIG_BOOLEAN_NO;
+ else
+ keep_fds_open = CONFIG_BOOLEAN_YES;
+ }
+
+ // --------------------------------------------------------------------
+
+ struct power_supply *ps = power_supply_root;
+ while(unlikely(ps)) {
+ if(unlikely(!ps->found)) {
+ struct power_supply *f = ps;
+ ps = ps->next;
+ power_supply_free(f);
+ continue;
+ }
+
+ if(likely(ps->capacity)) {
+ if(unlikely(!ps->capacity->st)) {
+ ps->capacity->st = rrdset_create_localhost(
+ "powersupply_capacity"
+ , ps->name
+ , NULL
+ , ps->name
+ , "powersupply.capacity"
+ , "Battery capacity"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
+ , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ps->capacity->st);
+
+ if(unlikely(!ps->capacity->rd)) ps->capacity->rd = rrddim_add(ps->capacity->st, "capacity", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(ps->capacity->st, ps->capacity->rd, ps->capacity->value);
+
+ rrdset_done(ps->capacity->st);
+ }
+
+ struct ps_property *pr;
+ for(pr = ps->property_root; pr; pr = pr->next) {
+ if(unlikely(!pr->st)) {
+ char id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "powersupply_%s", pr->name);
+ snprintfz(context, RRD_ID_LENGTH_MAX, "powersupply.%s", pr->name);
+
+ pr->st = rrdset_create_localhost(
+ id
+ , ps->name
+ , NULL
+ , ps->name
+ , context
+ , pr->title
+ , pr->units
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
+ , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(pr->st);
+
+ struct ps_property_dim *pd;
+ for(pd = pr->property_dim_root; pd; pd = pd->next) {
+ if(unlikely(!pd->rd)) pd->rd = rrddim_add(pr->st, pd->name, NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(pr->st, pd->rd, pd->value);
+ }
+
+ rrdset_done(pr->st);
+ }
+
+ ps->found = 0;
+ ps = ps->next;
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c
new file mode 100644
index 0000000..b111483
--- /dev/null
+++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+struct mc {
+ char *name;
+ char ce_updated;
+ char ue_updated;
+
+ char *ce_count_filename;
+ char *ue_count_filename;
+
+ procfile *ce_ff;
+ procfile *ue_ff;
+
+ collected_number ce_count;
+ collected_number ue_count;
+
+ RRDDIM *ce_rd;
+ RRDDIM *ue_rd;
+
+ struct mc *next;
+};
+static struct mc *mc_root = NULL;
+
+static void find_all_mc() {
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/edac/mc");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read ECC memory errors directory '%s'", dirname);
+ return;
+ }
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR && de->d_name[0] == 'm' && de->d_name[1] == 'c' && isdigit(de->d_name[2])) {
+ struct mc *m = callocz(1, sizeof(struct mc));
+ m->name = strdupz(de->d_name);
+
+ struct stat st;
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/ce_count", dirname, de->d_name);
+ if(stat(name, &st) != -1)
+ m->ce_count_filename = strdupz(name);
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/ue_count", dirname, de->d_name);
+ if(stat(name, &st) != -1)
+ m->ue_count_filename = strdupz(name);
+
+ if(!m->ce_count_filename && !m->ue_count_filename) {
+ freez(m->name);
+ freez(m);
+ }
+ else {
+ m->next = mc_root;
+ mc_root = m;
+ }
+ }
+ }
+
+ closedir(dir);
+}
+
+int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) {
+ (void)dt;
+
+ if(unlikely(mc_root == NULL)) {
+ find_all_mc();
+ if(unlikely(mc_root == NULL))
+ return 1;
+ }
+
+ static int do_ce = -1, do_ue = -1;
+ calculated_number ce_sum = 0, ue_sum = 0;
+ struct mc *m;
+
+ if(unlikely(do_ce == -1)) {
+ do_ce = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory correctable errors", CONFIG_BOOLEAN_AUTO);
+ do_ue = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory uncorrectable errors", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if(do_ce != CONFIG_BOOLEAN_NO) {
+ for(m = mc_root; m; m = m->next) {
+ if(m->ce_count_filename) {
+ m->ce_updated = 0;
+
+ if(unlikely(!m->ce_ff)) {
+ m->ce_ff = procfile_open(m->ce_count_filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!m->ce_ff))
+ continue;
+ }
+
+ m->ce_ff = procfile_readall(m->ce_ff);
+ if(unlikely(!m->ce_ff || procfile_lines(m->ce_ff) < 1 || procfile_linewords(m->ce_ff, 0) < 1))
+ continue;
+
+ m->ce_count = str2ull(procfile_lineword(m->ce_ff, 0, 0));
+ ce_sum += m->ce_count;
+ m->ce_updated = 1;
+ }
+ }
+ }
+
+ if(do_ue != CONFIG_BOOLEAN_NO) {
+ for(m = mc_root; m; m = m->next) {
+ if(m->ue_count_filename) {
+ m->ue_updated = 0;
+
+ if(unlikely(!m->ue_ff)) {
+ m->ue_ff = procfile_open(m->ue_count_filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!m->ue_ff))
+ continue;
+ }
+
+ m->ue_ff = procfile_readall(m->ue_ff);
+ if(unlikely(!m->ue_ff || procfile_lines(m->ue_ff) < 1 || procfile_linewords(m->ue_ff, 0) < 1))
+ continue;
+
+ m->ue_count = str2ull(procfile_lineword(m->ue_ff, 0, 0));
+ ue_sum += m->ue_count;
+ m->ue_updated = 1;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO &&
+ (ce_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ce = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *ce_st = NULL;
+
+ if(unlikely(!ce_st)) {
+ ce_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ce"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Correctable Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/edac/mc"
+ , NETDATA_CHART_PRIO_MEM_HW_ECC_CE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ce_st);
+
+ for(m = mc_root; m; m = m->next) {
+ if (m->ce_count_filename && m->ce_updated) {
+ if(unlikely(!m->ce_rd))
+ m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(ce_st, m->ce_rd, m->ce_count);
+ }
+ }
+
+ rrdset_done(ce_st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO &&
+ (ue_sum > 0 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_ue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *ue_st = NULL;
+
+ if(unlikely(!ue_st)) {
+ ue_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ue"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Uncorrectable Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/edac/mc"
+ , NETDATA_CHART_PRIO_MEM_HW_ECC_UE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ue_st);
+
+ for(m = mc_root; m; m = m->next) {
+ if (m->ue_count_filename && m->ue_updated) {
+ if(unlikely(!m->ue_rd))
+ m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(ue_st, m->ue_rd, m->ue_count);
+ }
+ }
+
+ rrdset_done(ue_st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c
new file mode 100644
index 0000000..ff408ed
--- /dev/null
+++ b/collectors/proc.plugin/sys_devices_system_node.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+struct node {
+ char *name;
+ char *numastat_filename;
+ procfile *numastat_ff;
+ RRDSET *numastat_st;
+ struct node *next;
+};
+static struct node *numa_root = NULL;
+
+static int find_all_nodes() {
+ int numa_node_count = 0;
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(!dir) {
+ error("Cannot read NUMA node directory '%s'", dirname);
+ return 0;
+ }
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type != DT_DIR)
+ continue;
+
+ if(strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ if(!isdigit(de->d_name[4]))
+ continue;
+
+ numa_node_count++;
+
+ struct node *m = callocz(1, sizeof(struct node));
+ m->name = strdupz(de->d_name);
+
+ struct stat st;
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/numastat", dirname, de->d_name);
+ if(stat(name, &st) == -1) {
+ freez(m->name);
+ freez(m);
+ continue;
+ }
+
+ m->numastat_filename = strdupz(name);
+
+ m->next = numa_root;
+ numa_root = m;
+ }
+
+ closedir(dir);
+
+ return numa_node_count;
+}
+
+int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
+ (void)dt;
+
+ static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0;
+ static int do_numastat = -1, numa_node_count = 0;
+ struct node *m;
+
+ if(unlikely(numa_root == NULL)) {
+ numa_node_count = find_all_nodes();
+ if(unlikely(numa_root == NULL))
+ return 1;
+ }
+
+ if(unlikely(do_numastat == -1)) {
+ do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO);
+
+ hash_local_node = simple_hash("local_node");
+ hash_numa_foreign = simple_hash("numa_foreign");
+ hash_interleave_hit = simple_hash("interleave_hit");
+ hash_other_node = simple_hash("other_node");
+ hash_numa_hit = simple_hash("numa_hit");
+ hash_numa_miss = simple_hash("numa_miss");
+ }
+
+ if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO &&
+ (numa_node_count >= 2 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ for(m = numa_root; m; m = m->next) {
+ if(m->numastat_filename) {
+
+ if(unlikely(!m->numastat_ff)) {
+ m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT);
+
+ if(unlikely(!m->numastat_ff))
+ continue;
+ }
+
+ m->numastat_ff = procfile_readall(m->numastat_ff);
+ if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1))
+ continue;
+
+ if(unlikely(!m->numastat_st)) {
+ m->numastat_st = rrdset_create_localhost(
+ "mem"
+ , m->name
+ , NULL
+ , "numa"
+ , NULL
+ , "NUMA events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/node"
+ , NETDATA_CHART_PRIO_MEM_NUMA_NODES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ }
+ else rrdset_next(m->numastat_st);
+
+ size_t lines = procfile_lines(m->numastat_ff), l;
+ for(l = 0; l < lines; l++) {
+ size_t words = procfile_linewords(m->numastat_ff, l);
+
+ if(unlikely(words < 2)) {
+ if(unlikely(words))
+ error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words);
+ continue;
+ }
+
+ char *name = procfile_lineword(m->numastat_ff, l, 0);
+ char *value = procfile_lineword(m->numastat_ff, l, 1);
+
+ if (unlikely(!name || !*name || !value || !*value))
+ continue;
+
+ uint32_t hash = simple_hash(name);
+ if(likely(
+ (hash == hash_numa_hit && !strcmp(name, "numa_hit"))
+ || (hash == hash_numa_miss && !strcmp(name, "numa_miss"))
+ || (hash == hash_local_node && !strcmp(name, "local_node"))
+ || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign"))
+ || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit"))
+ || (hash == hash_other_node && !strcmp(name, "other_node"))
+ ))
+ rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value));
+ }
+
+ rrdset_done(m->numastat_st);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
new file mode 100644
index 0000000..4e58a1a
--- /dev/null
+++ b/collectors/proc.plugin/sys_fs_btrfs.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_BTRFS_NAME "/sys/fs/btrfs"
+
+typedef struct btrfs_disk {
+ char *name;
+ uint32_t hash;
+ int exists;
+
+ char *size_filename;
+ char *hw_sector_size_filename;
+ unsigned long long size;
+ unsigned long long hw_sector_size;
+
+ struct btrfs_disk *next;
+} BTRFS_DISK;
+
+typedef struct btrfs_node {
+ int exists;
+ int logged_error;
+
+ char *id;
+ uint32_t hash;
+
+ char *label;
+
+ // unsigned long long int sectorsize;
+ // unsigned long long int nodesize;
+ // unsigned long long int quota_override;
+
+ #define declare_btrfs_allocation_section_field(SECTION, FIELD) \
+ char *allocation_ ## SECTION ## _ ## FIELD ## _filename; \
+ unsigned long long int allocation_ ## SECTION ## _ ## FIELD;
+
+ #define declare_btrfs_allocation_field(FIELD) \
+ char *allocation_ ## FIELD ## _filename; \
+ unsigned long long int allocation_ ## FIELD;
+
+ RRDSET *st_allocation_disks;
+ RRDDIM *rd_allocation_disks_unallocated;
+ RRDDIM *rd_allocation_disks_data_used;
+ RRDDIM *rd_allocation_disks_data_free;
+ RRDDIM *rd_allocation_disks_metadata_used;
+ RRDDIM *rd_allocation_disks_metadata_free;
+ RRDDIM *rd_allocation_disks_system_used;
+ RRDDIM *rd_allocation_disks_system_free;
+ unsigned long long all_disks_total;
+
+ RRDSET *st_allocation_data;
+ RRDDIM *rd_allocation_data_free;
+ RRDDIM *rd_allocation_data_used;
+ declare_btrfs_allocation_section_field(data, total_bytes)
+ declare_btrfs_allocation_section_field(data, bytes_used)
+ declare_btrfs_allocation_section_field(data, disk_total)
+ declare_btrfs_allocation_section_field(data, disk_used)
+
+ RRDSET *st_allocation_metadata;
+ RRDDIM *rd_allocation_metadata_free;
+ RRDDIM *rd_allocation_metadata_used;
+ RRDDIM *rd_allocation_metadata_reserved;
+ declare_btrfs_allocation_section_field(metadata, total_bytes)
+ declare_btrfs_allocation_section_field(metadata, bytes_used)
+ declare_btrfs_allocation_section_field(metadata, disk_total)
+ declare_btrfs_allocation_section_field(metadata, disk_used)
+ //declare_btrfs_allocation_field(global_rsv_reserved)
+ declare_btrfs_allocation_field(global_rsv_size)
+
+ RRDSET *st_allocation_system;
+ RRDDIM *rd_allocation_system_free;
+ RRDDIM *rd_allocation_system_used;
+ declare_btrfs_allocation_section_field(system, total_bytes)
+ declare_btrfs_allocation_section_field(system, bytes_used)
+ declare_btrfs_allocation_section_field(system, disk_total)
+ declare_btrfs_allocation_section_field(system, disk_used)
+
+ BTRFS_DISK *disks;
+
+ struct btrfs_node *next;
+} BTRFS_NODE;
+
+static BTRFS_NODE *nodes = NULL;
+
+static inline void btrfs_free_disk(BTRFS_DISK *d) {
+ freez(d->name);
+ freez(d->size_filename);
+ freez(d->hw_sector_size_filename);
+ freez(d);
+}
+
+static inline void btrfs_free_node(BTRFS_NODE *node) {
+ // info("BTRFS: destroying '%s'", node->id);
+
+ if(node->st_allocation_disks)
+ rrdset_is_obsolete(node->st_allocation_disks);
+
+ if(node->st_allocation_data)
+ rrdset_is_obsolete(node->st_allocation_data);
+
+ if(node->st_allocation_metadata)
+ rrdset_is_obsolete(node->st_allocation_metadata);
+
+ if(node->st_allocation_system)
+ rrdset_is_obsolete(node->st_allocation_system);
+
+ freez(node->allocation_data_bytes_used_filename);
+ freez(node->allocation_data_total_bytes_filename);
+
+ freez(node->allocation_metadata_bytes_used_filename);
+ freez(node->allocation_metadata_total_bytes_filename);
+
+ freez(node->allocation_system_bytes_used_filename);
+ freez(node->allocation_system_total_bytes_filename);
+
+ while(node->disks) {
+ BTRFS_DISK *d = node->disks;
+ node->disks = node->disks->next;
+ btrfs_free_disk(d);
+ }
+
+ freez(node->label);
+ freez(node->id);
+ freez(node);
+}
+
+static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) {
+ char filename[FILENAME_MAX + 1];
+
+ node->all_disks_total = 0;
+
+ BTRFS_DISK *d;
+ for(d = node->disks ; d ; d = d->next)
+ d->exists = 0;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ if(!node->logged_error) {
+ error("BTRFS: Cannot open directory '%s'.", path);
+ node->logged_error = 1;
+ }
+ return 1;
+ }
+ node->logged_error = 0;
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if (de->d_type != DT_LNK
+ || !strcmp(de->d_name, ".")
+ || !strcmp(de->d_name, "..")
+ ) {
+ // info("BTRFS: ignoring '%s'", de->d_name);
+ continue;
+ }
+
+ uint32_t hash = simple_hash(de->d_name);
+
+ // --------------------------------------------------------------------
+ // search for it
+
+ for(d = node->disks ; d ; d = d->next) {
+ if(hash == d->hash && !strcmp(de->d_name, d->name))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+ // did we find it?
+
+ if(!d) {
+ d = callocz(sizeof(BTRFS_DISK), 1);
+
+ d->name = strdupz(de->d_name);
+ d->hash = simple_hash(d->name);
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/size", path, de->d_name);
+ d->size_filename = strdupz(filename);
+
+ // for bcache
+ snprintfz(filename, FILENAME_MAX, "%s/%s/bcache/../queue/hw_sector_size", path, de->d_name);
+ struct stat sb;
+ if(stat(filename, &sb) == -1) {
+ // for disks
+ snprintfz(filename, FILENAME_MAX, "%s/%s/queue/hw_sector_size", path, de->d_name);
+ if(stat(filename, &sb) == -1)
+ // for partitions
+ snprintfz(filename, FILENAME_MAX, "%s/%s/../queue/hw_sector_size", path, de->d_name);
+ }
+
+ d->hw_sector_size_filename = strdupz(filename);
+
+ // link it
+ d->next = node->disks;
+ node->disks = d;
+ }
+
+ d->exists = 1;
+
+
+ // --------------------------------------------------------------------
+ // update the values
+
+ if(read_single_number_file(d->size_filename, &d->size) != 0) {
+ error("BTRFS: failed to read '%s'", d->size_filename);
+ d->exists = 0;
+ continue;
+ }
+
+ if(read_single_number_file(d->hw_sector_size_filename, &d->hw_sector_size) != 0) {
+ error("BTRFS: failed to read '%s'", d->hw_sector_size_filename);
+ d->exists = 0;
+ continue;
+ }
+
+ node->all_disks_total += d->size * d->hw_sector_size;
+ }
+ closedir(dir);
+
+ // ------------------------------------------------------------------------
+ // cleanup
+
+ BTRFS_DISK *last = NULL;
+ d = node->disks;
+
+ while(d) {
+ if(unlikely(!d->exists)) {
+ if(unlikely(node->disks == d)) {
+ node->disks = d->next;
+ btrfs_free_disk(d);
+ d = node->disks;
+ last = NULL;
+ }
+ else {
+ last->next = d->next;
+ btrfs_free_disk(d);
+ d = last->next;
+ }
+
+ continue;
+ }
+
+ last = d;
+ d = d->next;
+ }
+
+ return 0;
+}
+
+
+static inline int find_all_btrfs_pools(const char *path) {
+ static int logged_error = 0;
+ char filename[FILENAME_MAX + 1];
+
+ BTRFS_NODE *node;
+ for(node = nodes ; node ; node = node->next)
+ node->exists = 0;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ if(!logged_error) {
+ error("BTRFS: Cannot open directory '%s'.", path);
+ logged_error = 1;
+ }
+ return 1;
+ }
+ logged_error = 0;
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if(de->d_type != DT_DIR
+ || !strcmp(de->d_name, ".")
+ || !strcmp(de->d_name, "..")
+ || !strcmp(de->d_name, "features")
+ ) {
+ // info("BTRFS: ignoring '%s'", de->d_name);
+ continue;
+ }
+
+ uint32_t hash = simple_hash(de->d_name);
+
+ // search for it
+ for(node = nodes ; node ; node = node->next) {
+ if(hash == node->hash && !strcmp(de->d_name, node->id))
+ break;
+ }
+
+ // did we find it?
+ if(node) {
+ // info("BTRFS: already exists '%s'", de->d_name);
+ node->exists = 1;
+
+ // update the disk sizes
+ snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
+ find_btrfs_disks(node, filename);
+
+ continue;
+ }
+
+ // info("BTRFS: adding '%s'", de->d_name);
+
+ // not found, create it
+ node = callocz(sizeof(BTRFS_NODE), 1);
+
+ node->id = strdupz(de->d_name);
+ node->hash = simple_hash(node->id);
+ node->exists = 1;
+
+ {
+ char label[FILENAME_MAX + 1] = "";
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/label", path, de->d_name);
+ if(read_file(filename, label, FILENAME_MAX) != 0) {
+ error("BTRFS: failed to read '%s'", filename);
+ btrfs_free_node(node);
+ continue;
+ }
+
+ char *s = label;
+ if (s[0])
+ s = trim(label);
+
+ if(s && s[0])
+ node->label = strdupz(s);
+ else
+ node->label = strdupz(node->id);
+ }
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/sectorsize", path, de->d_name);
+ //if(read_single_number_file(filename, &node->sectorsize) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/nodesize", path, de->d_name);
+ //if(read_single_number_file(filename, &node->nodesize) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/quota_override", path, de->d_name);
+ //if(read_single_number_file(filename, &node->quota_override) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ // --------------------------------------------------------------------
+ // macros to simplify our life
+
+ #define init_btrfs_allocation_field(FIELD) {\
+ snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #FIELD, path, de->d_name); \
+ if(read_single_number_file(filename, &node->allocation_ ## FIELD) != 0) {\
+ error("BTRFS: failed to read '%s'", filename);\
+ btrfs_free_node(node);\
+ continue;\
+ }\
+ if(!node->allocation_ ## FIELD ## _filename)\
+ node->allocation_ ## FIELD ## _filename = strdupz(filename);\
+ }
+
+ #define init_btrfs_allocation_section_field(SECTION, FIELD) {\
+ snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #SECTION "/" #FIELD, path, de->d_name); \
+ if(read_single_number_file(filename, &node->allocation_ ## SECTION ## _ ## FIELD) != 0) {\
+ error("BTRFS: failed to read '%s'", filename);\
+ btrfs_free_node(node);\
+ continue;\
+ }\
+ if(!node->allocation_ ## SECTION ## _ ## FIELD ## _filename)\
+ node->allocation_ ## SECTION ## _ ## FIELD ## _filename = strdupz(filename);\
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/data
+
+ init_btrfs_allocation_section_field(data, total_bytes);
+ init_btrfs_allocation_section_field(data, bytes_used);
+ init_btrfs_allocation_section_field(data, disk_total);
+ init_btrfs_allocation_section_field(data, disk_used);
+
+
+ // --------------------------------------------------------------------
+ // allocation/metadata
+
+ init_btrfs_allocation_section_field(metadata, total_bytes);
+ init_btrfs_allocation_section_field(metadata, bytes_used);
+ init_btrfs_allocation_section_field(metadata, disk_total);
+ init_btrfs_allocation_section_field(metadata, disk_used);
+
+ init_btrfs_allocation_field(global_rsv_size);
+ // init_btrfs_allocation_field(global_rsv_reserved);
+
+
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ init_btrfs_allocation_section_field(system, total_bytes);
+ init_btrfs_allocation_section_field(system, bytes_used);
+ init_btrfs_allocation_section_field(system, disk_total);
+ init_btrfs_allocation_section_field(system, disk_used);
+
+
+ // --------------------------------------------------------------------
+ // find all disks related to this node
+ // and collect their sizes
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
+ find_btrfs_disks(node, filename);
+
+
+ // --------------------------------------------------------------------
+ // link it
+
+ // info("BTRFS: linking '%s'", node->id);
+ node->next = nodes;
+ nodes = node;
+ }
+ closedir(dir);
+
+
+ // ------------------------------------------------------------------------
+ // cleanup
+
+ BTRFS_NODE *last = NULL;
+ node = nodes;
+
+ while(node) {
+ if(unlikely(!node->exists)) {
+ if(unlikely(nodes == node)) {
+ nodes = node->next;
+ btrfs_free_node(node);
+ node = nodes;
+ last = NULL;
+ }
+ else {
+ last->next = node->next;
+ btrfs_free_node(node);
+ node = last->next;
+ }
+
+ continue;
+ }
+
+ last = node;
+ node = node->next;
+ }
+
+ return 0;
+}
+
+int do_sys_fs_btrfs(int update_every, usec_t dt) {
+ static int initialized = 0
+ , do_allocation_disks = CONFIG_BOOLEAN_AUTO
+ , do_allocation_system = CONFIG_BOOLEAN_AUTO
+ , do_allocation_data = CONFIG_BOOLEAN_AUTO
+ , do_allocation_metadata = CONFIG_BOOLEAN_AUTO;
+
+ static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC;
+ static char *btrfs_path = NULL;
+
+ (void)dt;
+
+ if(unlikely(!initialized)) {
+ initialized = 1;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs");
+ btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename);
+
+ refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
+ refresh_delta = refresh_every;
+
+ do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks);
+ do_allocation_data = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "data allocation", do_allocation_data);
+ do_allocation_metadata = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "metadata allocation", do_allocation_metadata);
+ do_allocation_system = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "system allocation", do_allocation_system);
+ }
+
+ refresh_delta += dt;
+ if(refresh_delta >= refresh_every) {
+ refresh_delta = 0;
+ find_all_btrfs_pools(btrfs_path);
+ }
+
+ BTRFS_NODE *node;
+ for(node = nodes; node ; node = node->next) {
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ #define collect_btrfs_allocation_field(FIELD) \
+ read_single_number_file(node->allocation_ ## FIELD ## _filename, &node->allocation_ ## FIELD)
+
+ #define collect_btrfs_allocation_section_field(SECTION, FIELD) \
+ read_single_number_file(node->allocation_ ## SECTION ## _ ## FIELD ## _filename, &node->allocation_ ## SECTION ## _ ## FIELD)
+
+ if(do_allocation_disks != CONFIG_BOOLEAN_NO) {
+ if( collect_btrfs_allocation_section_field(data, disk_total) != 0
+ || collect_btrfs_allocation_section_field(data, disk_used) != 0
+ || collect_btrfs_allocation_section_field(metadata, disk_total) != 0
+ || collect_btrfs_allocation_section_field(metadata, disk_used) != 0
+ || collect_btrfs_allocation_section_field(system, disk_total) != 0
+ || collect_btrfs_allocation_section_field(system, disk_used) != 0) {
+ error("BTRFS: failed to collect physical disks allocation for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_data != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(data, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(data, bytes_used) != 0) {
+ error("BTRFS: failed to collect allocation/data for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_metadata != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(metadata, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(metadata, bytes_used) != 0
+ || collect_btrfs_allocation_field(global_rsv_size) != 0
+ ) {
+ error("BTRFS: failed to collect allocation/metadata for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_system != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(system, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(system, bytes_used) != 0) {
+ error("BTRFS: failed to collect allocation/system for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/disks
+
+ if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO &&
+ ((node->all_disks_total && node->allocation_data_disk_total) ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_allocation_disks = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_disks)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label);
+ snprintf(title, 200, "BTRFS Physical Disk Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_disks = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.disk"
+ , title
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_DISK
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_disks_unallocated = rrddim_add(node->st_allocation_disks, "unallocated", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_data_free = rrddim_add(node->st_allocation_disks, "data_free", "data free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_data_used = rrddim_add(node->st_allocation_disks, "data_used", "data used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_metadata_free = rrddim_add(node->st_allocation_disks, "meta_free", "meta free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_metadata_used = rrddim_add(node->st_allocation_disks, "meta_used", "meta used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_system_free = rrddim_add(node->st_allocation_disks, "sys_free", "sys free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_system_used = rrddim_add(node->st_allocation_disks, "sys_used", "sys used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_disks);
+
+ // unsigned long long disk_used = node->allocation_data_disk_used + node->allocation_metadata_disk_used + node->allocation_system_disk_used;
+ unsigned long long disk_total = node->allocation_data_disk_total + node->allocation_metadata_disk_total + node->allocation_system_disk_total;
+ unsigned long long disk_unallocated = node->all_disks_total - disk_total;
+
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_unallocated, disk_unallocated);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_used, node->allocation_data_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_free, node->allocation_data_disk_total - node->allocation_data_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_used, node->allocation_metadata_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_free, node->allocation_metadata_disk_total - node->allocation_metadata_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_used, node->allocation_system_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_free, node->allocation_system_disk_total - node->allocation_system_disk_used);
+ rrdset_done(node->st_allocation_disks);
+ }
+
+
+ // --------------------------------------------------------------------
+ // allocation/data
+
+ if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO &&
+ (node->allocation_data_total_bytes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_allocation_data = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_data)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "data_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "data_%s", node->label);
+ snprintf(title, 200, "BTRFS Data Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_data = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.data"
+ , title
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_DATA
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_data_free = rrddim_add(node->st_allocation_data, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_data_used = rrddim_add(node->st_allocation_data, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_data);
+
+ rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_free, node->allocation_data_total_bytes - node->allocation_data_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_used, node->allocation_data_bytes_used);
+ rrdset_done(node->st_allocation_data);
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/metadata
+
+ if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO &&
+ (node->allocation_metadata_total_bytes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_allocation_metadata = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_metadata)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label);
+ snprintf(title, 200, "BTRFS Metadata Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_metadata = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.metadata"
+ , title
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_METADATA
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_metadata_free = rrddim_add(node->st_allocation_metadata, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_metadata_used = rrddim_add(node->st_allocation_metadata, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_metadata_reserved = rrddim_add(node->st_allocation_metadata, "reserved", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_metadata);
+
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_free, node->allocation_metadata_total_bytes - node->allocation_metadata_bytes_used - node->allocation_global_rsv_size);
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_used, node->allocation_metadata_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_reserved, node->allocation_global_rsv_size);
+ rrdset_done(node->st_allocation_metadata);
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO &&
+ (node->allocation_system_total_bytes ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ do_allocation_system = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_system)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "system_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "system_%s", node->label);
+ snprintf(title, 200, "BTRFS System Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_system = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.system"
+ , title
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_SYSTEM
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_system_free = rrddim_add(node->st_allocation_system, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_system_used = rrddim_add(node->st_allocation_system, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_system);
+
+ rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_free, node->allocation_system_total_bytes - node->allocation_system_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_used, node->allocation_system_bytes_used);
+ rrdset_done(node->st_allocation_system);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c
new file mode 100644
index 0000000..a0e5690
--- /dev/null
+++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_KSM_NAME "/sys/kernel/mm/ksm"
+
+typedef struct ksm_name_value {
+ char filename[FILENAME_MAX + 1];
+ unsigned long long value;
+} KSM_NAME_VALUE;
+
+#define PAGES_SHARED 0
+#define PAGES_SHARING 1
+#define PAGES_UNSHARED 2
+#define PAGES_VOLATILE 3
+#define PAGES_TO_SCAN 4
+
+KSM_NAME_VALUE values[] = {
+ [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL },
+ [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL },
+ [PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL },
+ [PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL },
+ // [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL },
+};
+
+int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL/*, *ff_pages_to_scan = NULL*/;
+ static unsigned long page_size = 0;
+
+ if(unlikely(page_size == 0))
+ page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+
+ if(unlikely(!ff_pages_shared)) {
+ snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared");
+ snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename));
+ ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_sharing)) {
+ snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing");
+ snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename));
+ ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_unshared)) {
+ snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared");
+ snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename));
+ ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_volatile)) {
+ snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile");
+ snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename));
+ ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ //if(unlikely(!ff_pages_to_scan)) {
+ // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan");
+ // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename));
+ // ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ //}
+
+ if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile /*|| !ff_pages_to_scan */))
+ return 1;
+
+ unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, /*pages_to_scan = 0,*/ offered = 0, saved = 0;
+
+ ff_pages_shared = procfile_readall(ff_pages_shared);
+ if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time
+ pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0));
+
+ ff_pages_sharing = procfile_readall(ff_pages_sharing);
+ if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time
+ pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0));
+
+ ff_pages_unshared = procfile_readall(ff_pages_unshared);
+ if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time
+ pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0));
+
+ ff_pages_volatile = procfile_readall(ff_pages_volatile);
+ if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time
+ pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0));
+
+ //ff_pages_to_scan = procfile_readall(ff_pages_to_scan);
+ //if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time
+ //pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0));
+
+ offered = pages_sharing + pages_shared + pages_unshared + pages_volatile;
+ saved = pages_sharing;
+
+ if(unlikely(!offered /*|| !pages_to_scan*/ && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_NO)) return 0;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm = NULL;
+ static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL/*, *rd_to_scan = NULL*/;
+
+ if (unlikely(!st_mem_ksm)) {
+ st_mem_ksm = rrdset_create_localhost(
+ "mem"
+ , "ksm"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ //rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm);
+
+ rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size);
+ //rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size);
+
+ rrdset_done(st_mem_ksm);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm_savings = NULL;
+ static RRDDIM *rd_savings = NULL, *rd_offered = NULL;
+
+ if (unlikely(!st_mem_ksm_savings)) {
+ st_mem_ksm_savings = rrdset_create_localhost(
+ "mem"
+ , "ksm_savings"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Savings"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM_SAVINGS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_savings);
+
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size);
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size);
+
+ rrdset_done(st_mem_ksm_savings);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm_ratios = NULL;
+ static RRDDIM *rd_savings = NULL;
+
+ if (unlikely(!st_mem_ksm_ratios)) {
+ st_mem_ksm_ratios = rrdset_create_localhost(
+ "mem"
+ , "ksm_ratios"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Effectiveness"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM_RATIOS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_ratios);
+
+ rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, offered ? (saved * 1000000) / offered : 0);
+
+ rrdset_done(st_mem_ksm_ratios);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c
new file mode 100644
index 0000000..330bcf1
--- /dev/null
+++ b/collectors/proc.plugin/zfs_common.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "zfs_common.h"
+
+struct arcstats arcstats = { 0 };
+
+void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every) {
+ static int do_arc_size = -1, do_l2_size = -1, do_reads = -1, do_l2bytes = -1, do_ahits = -1, do_dhits = -1, \
+ do_phits = -1, do_mhits = -1, do_l2hits = -1, do_list_hits = -1;
+
+ if(unlikely(do_arc_size == -1))
+ do_arc_size = do_l2_size = do_reads = do_l2bytes = do_ahits = do_dhits = do_phits = do_mhits \
+ = do_l2hits = do_list_hits = show_zero_charts;
+
+ // ARC reads
+ unsigned long long aread = arcstats.hits + arcstats.misses;
+
+ // Demand reads
+ unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits;
+ unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses;
+ unsigned long long dread = dhit + dmiss;
+
+ // Prefetch reads
+ unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits;
+ unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses;
+ unsigned long long pread = phit + pmiss;
+
+ // Metadata reads
+ unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits;
+ unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+ unsigned long long mread = mhit + mmiss;
+
+ // l2 reads
+ unsigned long long l2hit = arcstats.l2_hits;
+ unsigned long long l2miss = arcstats.l2_misses;
+ unsigned long long l2read = l2hit + l2miss;
+
+ // --------------------------------------------------------------------
+
+ if(do_arc_size == CONFIG_BOOLEAN_YES || arcstats.size || arcstats.c || arcstats.c_min || arcstats.c_max) {
+ do_arc_size = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_arc_size = NULL;
+ static RRDDIM *rd_arc_size = NULL;
+ static RRDDIM *rd_arc_target_size = NULL;
+ static RRDDIM *rd_arc_target_min_size = NULL;
+ static RRDDIM *rd_arc_target_max_size = NULL;
+
+ if (unlikely(!st_arc_size)) {
+ st_arc_size = rrdset_create_localhost(
+ "zfs"
+ , "arc_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS ARC Size"
+ , "MiB"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ARC_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_arc_size);
+
+ rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max);
+ rrdset_done(st_arc_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist) && (do_l2_size == CONFIG_BOOLEAN_YES || arcstats.l2_size || arcstats.l2_asize)) {
+ do_l2_size = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_l2_size = NULL;
+ static RRDDIM *rd_l2_size = NULL;
+ static RRDDIM *rd_l2_asize = NULL;
+
+ if (unlikely(!st_l2_size)) {
+ st_l2_size = rrdset_create_localhost(
+ "zfs"
+ , "l2_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS L2 ARC Size"
+ , "MiB"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_L2_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_l2_size);
+
+ rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size);
+ rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize);
+ rrdset_done(st_l2_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_reads == CONFIG_BOOLEAN_YES || aread || dread || pread || mread || l2read)) {
+ do_reads = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_reads = NULL;
+ static RRDDIM *rd_aread = NULL;
+ static RRDDIM *rd_dread = NULL;
+ static RRDDIM *rd_pread = NULL;
+ static RRDDIM *rd_mread = NULL;
+ static RRDDIM *rd_l2read = NULL;
+
+ if (unlikely(!st_reads)) {
+ st_reads = rrdset_create_localhost(
+ "zfs"
+ , "reads"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS Reads"
+ , "reads/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_READS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(arcstats.l2exist)
+ rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_reads);
+
+ rrddim_set_by_pointer(st_reads, rd_aread, aread);
+ rrddim_set_by_pointer(st_reads, rd_dread, dread);
+ rrddim_set_by_pointer(st_reads, rd_pread, pread);
+ rrddim_set_by_pointer(st_reads, rd_mread, mread);
+
+ if(arcstats.l2exist)
+ rrddim_set_by_pointer(st_reads, rd_l2read, l2read);
+
+ rrdset_done(st_reads);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist && (do_l2bytes == CONFIG_BOOLEAN_YES || arcstats.l2_read_bytes || arcstats.l2_write_bytes))) {
+ do_l2bytes = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_l2bytes = NULL;
+ static RRDDIM *rd_l2_read_bytes = NULL;
+ static RRDDIM *rd_l2_write_bytes = NULL;
+
+ if (unlikely(!st_l2bytes)) {
+ st_l2bytes = rrdset_create_localhost(
+ "zfs"
+ , "bytes"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS ARC L2 Read/Write Rate"
+ , "KiB/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_l2bytes);
+
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes);
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes);
+ rrdset_done(st_l2bytes);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_ahits == CONFIG_BOOLEAN_YES || arcstats.hits || arcstats.misses)) {
+ do_ahits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ahits = NULL;
+ static RRDDIM *rd_ahits = NULL;
+ static RRDDIM *rd_amisses = NULL;
+
+ if (unlikely(!st_ahits)) {
+ st_ahits = rrdset_create_localhost(
+ "zfs"
+ , "hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_ahits);
+
+ rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits);
+ rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses);
+ rrdset_done(st_ahits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_dhits == CONFIG_BOOLEAN_YES || dhit || dmiss)) {
+ do_dhits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_dhits = NULL;
+ static RRDDIM *rd_dhits = NULL;
+ static RRDDIM *rd_dmisses = NULL;
+
+ if (unlikely(!st_dhits)) {
+ st_dhits = rrdset_create_localhost(
+ "zfs"
+ , "dhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Demand Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_DHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_dhits);
+
+ rrddim_set_by_pointer(st_dhits, rd_dhits, dhit);
+ rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss);
+ rrdset_done(st_dhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_phits == CONFIG_BOOLEAN_YES || phit || pmiss)) {
+ do_phits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_phits = NULL;
+ static RRDDIM *rd_phits = NULL;
+ static RRDDIM *rd_pmisses = NULL;
+
+ if (unlikely(!st_phits)) {
+ st_phits = rrdset_create_localhost(
+ "zfs"
+ , "phits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Prefetch Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_PHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_phits);
+
+ rrddim_set_by_pointer(st_phits, rd_phits, phit);
+ rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss);
+ rrdset_done(st_phits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_mhits == CONFIG_BOOLEAN_YES || mhit || mmiss)) {
+ do_mhits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mhits = NULL;
+ static RRDDIM *rd_mhits = NULL;
+ static RRDDIM *rd_mmisses = NULL;
+
+ if (unlikely(!st_mhits)) {
+ st_mhits = rrdset_create_localhost(
+ "zfs"
+ , "mhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Metadata Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_MHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_mhits);
+
+ rrddim_set_by_pointer(st_mhits, rd_mhits, mhit);
+ rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss);
+ rrdset_done(st_mhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist && (do_l2hits == CONFIG_BOOLEAN_YES || l2hit || l2miss))) {
+ do_l2hits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_l2hits = NULL;
+ static RRDDIM *rd_l2hits = NULL;
+ static RRDDIM *rd_l2misses = NULL;
+
+ if (unlikely(!st_l2hits)) {
+ st_l2hits = rrdset_create_localhost(
+ "zfs"
+ , "l2hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS L2 Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_L2HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_l2hits);
+
+ rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit);
+ rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss);
+ rrdset_done(st_l2hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_list_hits == CONFIG_BOOLEAN_YES || arcstats.mfu_hits \
+ || arcstats.mru_hits \
+ || arcstats.mfu_ghost_hits \
+ || arcstats.mru_ghost_hits)) {
+ do_list_hits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_list_hits = NULL;
+ static RRDDIM *rd_mfu = NULL;
+ static RRDDIM *rd_mru = NULL;
+ static RRDDIM *rd_mfug = NULL;
+ static RRDDIM *rd_mrug = NULL;
+
+ if (unlikely(!st_list_hits)) {
+ st_list_hits = rrdset_create_localhost(
+ "zfs"
+ , "list_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS List Hits"
+ , "hits/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_LIST_HITS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_list_hits);
+
+ rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits);
+ rrdset_done(st_list_hits);
+ }
+}
+
+void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every) {
+ static int do_arc_size_breakdown = -1, do_memory = -1, do_important_ops = -1, do_actual_hits = -1, \
+ do_demand_data_hits = -1, do_prefetch_data_hits = -1, do_hash_elements = -1, do_hash_chains = -1;
+
+ if(unlikely(do_arc_size_breakdown == -1))
+ do_arc_size_breakdown = do_memory = do_important_ops = do_actual_hits = do_demand_data_hits \
+ = do_prefetch_data_hits = do_hash_elements = do_hash_chains = show_zero_charts;
+
+ unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
+ unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
+ unsigned long long real_misses = arc_accesses_total - real_hits;
+
+ //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits);
+
+ unsigned long long arc_size = arcstats.size;
+ unsigned long long mru_size = arcstats.p;
+ //unsigned long long target_min_size = arcstats.c_min;
+ //unsigned long long target_max_size = arcstats.c_max;
+ unsigned long long target_size = arcstats.c;
+ //unsigned long long target_size_ratio = (target_max_size / target_min_size);
+
+ unsigned long long mfu_size;
+ if(arc_size > target_size)
+ mfu_size = arc_size - mru_size;
+ else
+ mfu_size = target_size - mru_size;
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_arc_size_breakdown == CONFIG_BOOLEAN_YES || mru_size || mfu_size)) {
+ do_arc_size_breakdown = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_arc_size_breakdown = NULL;
+ static RRDDIM *rd_most_recent = NULL;
+ static RRDDIM *rd_most_frequent = NULL;
+
+ if (unlikely(!st_arc_size_breakdown)) {
+ st_arc_size_breakdown = rrdset_create_localhost(
+ "zfs"
+ , "arc_size_breakdown"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Size Breakdown"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ }
+ else
+ rrdset_next(st_arc_size_breakdown);
+
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size);
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size);
+ rrdset_done(st_arc_size_breakdown);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_memory == CONFIG_BOOLEAN_YES || arcstats.memory_direct_count \
+ || arcstats.memory_throttle_count \
+ || arcstats.memory_indirect_count)) {
+ do_memory = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_memory = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_direct = NULL;
+#endif
+ static RRDDIM *rd_throttled = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_indirect = NULL;
+#endif
+
+ if (unlikely(!st_memory)) {
+ st_memory = rrdset_create_localhost(
+ "zfs"
+ , "memory_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Memory Operations"
+ , "operations/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_MEMORY_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+#ifndef __FreeBSD__
+ rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#ifndef __FreeBSD__
+ rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ }
+ else
+ rrdset_next(st_memory);
+
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count);
+#endif
+ rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count);
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count);
+#endif
+ rrdset_done(st_memory);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_important_ops == CONFIG_BOOLEAN_YES || arcstats.deleted \
+ || arcstats.evict_skip \
+ || arcstats.mutex_miss \
+ || arcstats.hash_collisions)) {
+ do_important_ops = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_important_ops = NULL;
+ static RRDDIM *rd_deleted = NULL;
+ static RRDDIM *rd_mutex_misses = NULL;
+ static RRDDIM *rd_evict_skips = NULL;
+ static RRDDIM *rd_hash_collisions = NULL;
+
+ if (unlikely(!st_important_ops)) {
+ st_important_ops = rrdset_create_localhost(
+ "zfs"
+ , "important_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Important Operations"
+ , "operations/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_important_ops);
+
+ rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted);
+ rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip);
+ rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss);
+ rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions);
+ rrdset_done(st_important_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_actual_hits == CONFIG_BOOLEAN_YES || real_hits || real_misses)) {
+ do_actual_hits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_actual_hits = NULL;
+ static RRDDIM *rd_actual_hits = NULL;
+ static RRDDIM *rd_actual_misses = NULL;
+
+ if (unlikely(!st_actual_hits)) {
+ st_actual_hits = rrdset_create_localhost(
+ "zfs"
+ , "actual_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Actual Cache Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_actual_hits);
+
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits);
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses);
+ rrdset_done(st_actual_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_demand_data_hits == CONFIG_BOOLEAN_YES || arcstats.demand_data_hits || arcstats.demand_data_misses)) {
+ do_demand_data_hits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_misses = NULL;
+
+ if (unlikely(!st_demand_data_hits)) {
+ st_demand_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "demand_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Demand Efficiency"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_demand_data_hits);
+
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits);
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses);
+ rrdset_done(st_demand_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_prefetch_data_hits == CONFIG_BOOLEAN_YES || arcstats.prefetch_data_hits \
+ || arcstats.prefetch_data_misses)) {
+ do_prefetch_data_hits = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_misses = NULL;
+
+ if (unlikely(!st_prefetch_data_hits)) {
+ st_prefetch_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "prefetch_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Prefetch Efficiency"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_prefetch_data_hits);
+
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits);
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses);
+ rrdset_done(st_prefetch_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_hash_elements == CONFIG_BOOLEAN_YES || arcstats.hash_elements || arcstats.hash_elements_max)) {
+ do_hash_elements = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_hash_elements = NULL;
+ static RRDDIM *rd_hash_elements_current = NULL;
+ static RRDDIM *rd_hash_elements_max = NULL;
+
+ if (unlikely(!st_hash_elements)) {
+ st_hash_elements = rrdset_create_localhost(
+ "zfs"
+ , "hash_elements"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Elements"
+ , "elements"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_elements);
+
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements);
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max);
+ rrdset_done(st_hash_elements);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_hash_chains == CONFIG_BOOLEAN_YES || arcstats.hash_chains || arcstats.hash_chain_max)) {
+ do_hash_chains = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_hash_chains = NULL;
+ static RRDDIM *rd_hash_chains_current = NULL;
+ static RRDDIM *rd_hash_chains_max = NULL;
+
+ if (unlikely(!st_hash_chains)) {
+ st_hash_chains = rrdset_create_localhost(
+ "zfs"
+ , "hash_chains"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Chains"
+ , "chains"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HASH_CHAINS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_chains);
+
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains);
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max);
+ rrdset_done(st_hash_chains);
+ }
+
+ // --------------------------------------------------------------------
+
+} \ No newline at end of file
diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h
new file mode 100644
index 0000000..148f9e4
--- /dev/null
+++ b/collectors/proc.plugin/zfs_common.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ZFS_COMMON_H
+#define NETDATA_ZFS_COMMON_H 1
+
+#include "../../daemon/common.h"
+
+#define ZFS_FAMILY_SIZE "size"
+#define ZFS_FAMILY_EFFICIENCY "efficiency"
+#define ZFS_FAMILY_ACCESSES "accesses"
+#define ZFS_FAMILY_OPERATIONS "operations"
+#define ZFS_FAMILY_HASH "hashes"
+
+struct arcstats {
+ // values
+ unsigned long long hits;
+ unsigned long long misses;
+ unsigned long long demand_data_hits;
+ unsigned long long demand_data_misses;
+ unsigned long long demand_metadata_hits;
+ unsigned long long demand_metadata_misses;
+ unsigned long long prefetch_data_hits;
+ unsigned long long prefetch_data_misses;
+ unsigned long long prefetch_metadata_hits;
+ unsigned long long prefetch_metadata_misses;
+ unsigned long long mru_hits;
+ unsigned long long mru_ghost_hits;
+ unsigned long long mfu_hits;
+ unsigned long long mfu_ghost_hits;
+ unsigned long long deleted;
+ unsigned long long mutex_miss;
+ unsigned long long evict_skip;
+ unsigned long long evict_not_enough;
+ unsigned long long evict_l2_cached;
+ unsigned long long evict_l2_eligible;
+ unsigned long long evict_l2_ineligible;
+ unsigned long long evict_l2_skip;
+ unsigned long long hash_elements;
+ unsigned long long hash_elements_max;
+ unsigned long long hash_collisions;
+ unsigned long long hash_chains;
+ unsigned long long hash_chain_max;
+ unsigned long long p;
+ unsigned long long c;
+ unsigned long long c_min;
+ unsigned long long c_max;
+ unsigned long long size;
+ unsigned long long hdr_size;
+ unsigned long long data_size;
+ unsigned long long metadata_size;
+ unsigned long long other_size;
+ unsigned long long anon_size;
+ unsigned long long anon_evictable_data;
+ unsigned long long anon_evictable_metadata;
+ unsigned long long mru_size;
+ unsigned long long mru_evictable_data;
+ unsigned long long mru_evictable_metadata;
+ unsigned long long mru_ghost_size;
+ unsigned long long mru_ghost_evictable_data;
+ unsigned long long mru_ghost_evictable_metadata;
+ unsigned long long mfu_size;
+ unsigned long long mfu_evictable_data;
+ unsigned long long mfu_evictable_metadata;
+ unsigned long long mfu_ghost_size;
+ unsigned long long mfu_ghost_evictable_data;
+ unsigned long long mfu_ghost_evictable_metadata;
+ unsigned long long l2_hits;
+ unsigned long long l2_misses;
+ unsigned long long l2_feeds;
+ unsigned long long l2_rw_clash;
+ unsigned long long l2_read_bytes;
+ unsigned long long l2_write_bytes;
+ unsigned long long l2_writes_sent;
+ unsigned long long l2_writes_done;
+ unsigned long long l2_writes_error;
+ unsigned long long l2_writes_lock_retry;
+ unsigned long long l2_evict_lock_retry;
+ unsigned long long l2_evict_reading;
+ unsigned long long l2_evict_l1cached;
+ unsigned long long l2_free_on_write;
+ unsigned long long l2_cdata_free_on_write;
+ unsigned long long l2_abort_lowmem;
+ unsigned long long l2_cksum_bad;
+ unsigned long long l2_io_error;
+ unsigned long long l2_size;
+ unsigned long long l2_asize;
+ unsigned long long l2_hdr_size;
+ unsigned long long l2_compress_successes;
+ unsigned long long l2_compress_zeros;
+ unsigned long long l2_compress_failures;
+ unsigned long long memory_throttle_count;
+ unsigned long long duplicate_buffers;
+ unsigned long long duplicate_buffers_size;
+ unsigned long long duplicate_reads;
+ unsigned long long memory_direct_count;
+ unsigned long long memory_indirect_count;
+ unsigned long long arc_no_grow;
+ unsigned long long arc_tempreserve;
+ unsigned long long arc_loaned_bytes;
+ unsigned long long arc_prune;
+ unsigned long long arc_meta_used;
+ unsigned long long arc_meta_limit;
+ unsigned long long arc_meta_max;
+ unsigned long long arc_meta_min;
+ unsigned long long arc_need_free;
+ unsigned long long arc_sys_free;
+
+ // flags
+ int l2exist;
+};
+
+void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every);
+void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every);
+
+#endif //NETDATA_ZFS_COMMON_H
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
new file mode 100644
index 0000000..1de2d1d
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.am
@@ -0,0 +1,253 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_python_SCRIPTS = \
+ $(NULL)
+
+dist_python_DATA = \
+ $(NULL)
+
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(userpythonconfigdir)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include alarms/Makefile.inc
+include am2320/Makefile.inc
+include anomalies/Makefile.inc
+include apache/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include chrony/Makefile.inc
+include couchdb/Makefile.inc
+include dnsdist/Makefile.inc
+include dns_query_time/Makefile.inc
+include dockerd/Makefile.inc
+include dovecot/Makefile.inc
+include elasticsearch/Makefile.inc
+include energid/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include freeradius/Makefile.inc
+include gearman/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include httpcheck/Makefile.inc
+include hpssa/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include isc_dhcpd/Makefile.inc
+include litespeed/Makefile.inc
+include logind/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include mongodb/Makefile.inc
+include monit/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nginx_plus/Makefile.inc
+include nvidia_smi/Makefile.inc
+include nsd/Makefile.inc
+include ntpd/Makefile.inc
+include ovpn_status_log/Makefile.inc
+include openldap/Makefile.inc
+include oracledb/Makefile.inc
+include phpfpm/Makefile.inc
+include portcheck/Makefile.inc
+include postfix/Makefile.inc
+include postgres/Makefile.inc
+include powerdns/Makefile.inc
+include proxysql/Makefile.inc
+include puppet/Makefile.inc
+include rabbitmq/Makefile.inc
+include redis/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include riakkv/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include springboot/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include tor/Makefile.inc
+include traefik/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include web_log/Makefile.inc
+
+pythonmodulesdir=$(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir=$(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir=$(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir=$(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ python_modules/third_party/filelock.py \
+ $(NULL)
+
+pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 0000000..a05bc81
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,268 @@
+<!--
+title: "python.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md
+-->
+
+# python.d.plugin
+
+`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+## Disclaimer
+
+Every module should be compatible with python2 and python3.
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ other_var2 : val # module specific variable
+```
+
+`update_every` and `priority` are always optional.
+
+## How to debug a python module
+
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+```
+
+Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
+
+```
+# execute the plugin in debug mode, for a specific module
+/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+```
+
+Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
+
+**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
+
+## How to write a new module
+
+Writing new python module is simple. You just need to remember to include 5 major things:
+
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **\_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+
+For a quick start, you can look at the [example
+plugin](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/example/example.chart.py).
+
+**Note**: If you are working 'locally' on a new collector and would like to run it in an already installed and running Netdata (as opposed to having to install Netdata from source again with your new changes) to can copy over the relevant file to where Netdata expects it and then either `sudo service netdata restart` to have it be picked up and used by Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have [installed Netdata from a GitHub fork](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/manual) you have made to do your development on).
+
+```bash
+# clone your fork (done once at the start but shown here for clarity)
+#git clone --branch my-example-collector https://github.com/mygithubusername/netdata.git --depth=100
+# go into your netdata source folder
+cd netdata
+# git pull your latest changes (assuming you built from a fork you are using to develop on)
+git pull
+# instead of running the installer we can just copy over the updated collector files
+#sudo ./netdata-installer.sh --dont-wait
+# copy over the file you have updated locally (pretending we are working on the 'example' collector)
+sudo cp collectors/python.d.plugin/example/example.chart.py /usr/libexec/netdata/python.d/
+# become user netdata
+sudo su -s /bin/bash netdata
+# run your updated collector in debug mode to see if it works without having to reinstall netdata
+/usr/libexec/netdata/plugins.d/python.d.plugin example debug trace nolock
+```
+
+### Global variables `ORDER` and `CHART`
+
+`ORDER` list should contain the order of chart ids. Example:
+
+```py
+ORDER = ['first_chart', 'second_chart', 'third_chart']
+```
+
+`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+
+```py
+CHART = {
+ id: {
+ 'options': [name, title, units, family, context, charttype],
+ 'lines': [
+ [unique_dimension_name, name, algorithm, multiplier, divisor]
+ ]}
+```
+
+All names are better explained in the [External Plugins](../) section.
+Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
+
+### `Service` class
+
+Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
+
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
+
+Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
+
+Simple example:
+
+```py
+from base import UrlService
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+```
+
+### `_get_data` collector/parser
+
+This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
+
+Example:
+
+```py
+def _get_data(self):
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2])}
+ except (ValueError, AttributeError):
+ return None
+```
+
+# More about framework classes
+
+Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
+
+If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+
+```py
+def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.baseurl = str(self.configuration['baseurl'])
+ except (KeyError, TypeError):
+ self.baseurl = "http://localhost:5001"
+```
+
+Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings.
+
+### `SimpleService`
+
+_This is last resort class, if a new module cannot be written by using other framework class this one can be used._
+
+_Example: `ceph`, `sensors`_
+
+It is the lowest-level class which implements most of module logic, like:
+
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
+
+### `LogService`
+
+_Examples: `apache_cache`, `nginx_log`_
+
+_Variable from config file_: `log_path`.
+
+Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`.
+
+### `ExecutableService`
+
+_Examples: `exim`, `postfix`_
+
+_Variable from config file_: `command`.
+
+This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
+
+- '&'
+- '|'
+- ';'
+- '>'
+- '\<'
+
+For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
+
+`_get_raw_data` returns list of decoded lines returned by `command`.
+
+### UrlService
+
+_Examples: `apache`, `nginx`, `tomcat`_
+
+_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler) ,
+[`elasticsearch`](/collectors/python.d.plugin/elasticsearch/README.md) (threaded)_
+
+
+_Variables from config file_: `url`, `user`, `pass`.
+
+If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+
+Please note that the config file can use different variables according to the specification of each module.
+
+`_get_raw_data` returns list of utf-8 decoded strings (lines).
+
+### SocketService
+
+_Examples: `dovecot`, `redis`_
+
+_Variables from config file_: `unix_socket`, `host`, `port`, `request`.
+
+Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting.
+
+Sockets are accessed in non-blocking mode with 15 second timeout.
+
+After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
+
+`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way.
+
+## Pull Request Checklist for Python Plugins
+
+This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
+
+At minimum, to be buildable and testable, the PR needs to include:
+
+- The module itself, following proper naming conventions: `collectors/python.d.plugin/<module_dir>/<module_name>.chart.py`
+- A README.md file for the plugin under `collectors/python.d.plugin/<module_dir>`.
+- The configuration file for the module: `collectors/python.d.plugin/<module_dir>/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
+- A basic configuration for the plugin in the appropriate global config file: `collectors/python.d.plugin/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
+- A makefile for the plugin at `collectors/python.d.plugin/<module_dir>/Makefile.inc`. Check an existing plugin for what this should look like.
+- A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically).
+- Optionally, chart information in `web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+- Optionally, some default alarm configurations for your collector in `health/health.d/<module_name>.conf` and a line adding `<module_name>.conf` in `health/Makefile.am`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
new file mode 100644
index 0000000..716cdb2
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
+dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
new file mode 100644
index 0000000..b14e8f9
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -0,0 +1,80 @@
+<!--
+title: "Adaptec RAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md
+sidebar_label: "Adaptec RAID"
+-->
+
+# Adaptec RAID controller monitoring with Netdata
+
+Collects logical and physical devices metrics using `arcconf` command-line utility.
+
+Executed commands:
+
+- `sudo -n arcconf GETCONFIG 1 LD`
+- `sudo -n arcconf GETCONFIG 1 PD`
+
+## Requirements
+
+The module uses `arcconf`, which can only be executed by `root`. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can execute `arcconf` as root without a password.
+
+- Add to your `/etc/sudoers` file:
+
+`which arcconf` shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
+
+- Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `arcconf` using `sudo`.
+
+
+As the `root` user, do the following:
+
+```cmd
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+## Charts
+
+- Logical Device Status
+- Physical Device State
+- Physical Device S.M.A.R.T warnings
+- Physical Device Temperature
+
+## Enable the collector
+
+The `adaptec_raid` collector is disabled by default. To enable it, use `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `adaptec_raid` setting to `yes`. Save the file and restart the Netdata Agent
+with `sudo systemctl restart netdata`, or the appropriate method for your system.
+
+## Configuration
+
+Edit the `python.d/adaptec_raid.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/adaptec_raid.conf
+```
+
+![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
new file mode 100644
index 0000000..564c2ce
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'ld_status',
+ 'pd_state',
+ 'pd_smart_warnings',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ld_status': {
+ 'options': [None, 'Status Is Not OK', 'bool', 'logical devices', 'adapter_raid.ld_status', 'line'],
+ 'lines': []
+ },
+ 'pd_state': {
+ 'options': [None, 'State Is Not OK', 'bool', 'physical devices', 'adapter_raid.pd_state', 'line'],
+ 'lines': []
+ },
+ 'pd_smart_warnings': {
+ 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
+ 'adapter_raid.smart_warnings', 'line'],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adapter_raid.temperature', 'line'],
+ 'lines': []
+ },
+}
+
+SUDO = 'sudo'
+ARCCONF = 'arcconf'
+
+BAD_LD_STATUS = (
+ 'Degraded',
+ 'Failed',
+)
+
+GOOD_PD_STATUS = (
+ 'Online',
+)
+
+RE_LD = re.compile(
+ r'Logical [dD]evice number\s+([0-9]+).*?'
+ r'Status of [lL]ogical [dD]evice\s+: ([a-zA-Z]+)'
+)
+
+
+def find_lds(d):
+ d = ' '.join(v.strip() for v in d)
+ return [LD(*v) for v in RE_LD.findall(d)]
+
+
+def find_pds(d):
+ pds = list()
+ pd = PD()
+
+ for row in d:
+ row = row.strip()
+ if row.startswith('Device #'):
+ pd = PD()
+ pd.id = row.split('#')[-1]
+ elif not pd.id:
+ continue
+
+ if row.startswith('State'):
+ v = row.split()[-1]
+ pd.state = v
+ elif row.startswith('S.M.A.R.T. warnings'):
+ v = row.split()[-1]
+ pd.smart_warnings = v
+ elif row.startswith('Temperature'):
+ v = row.split(':')[-1].split()[0]
+ pd.temperature = v
+ elif row.startswith('NCQ status'):
+ if pd.id and pd.state and pd.smart_warnings:
+ pds.append(pd)
+ pd = PD()
+
+ return pds
+
+
+class LD:
+ def __init__(self, ld_id, status):
+ self.id = ld_id
+ self.status = status
+
+ def data(self):
+ return {
+ 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
+ }
+
+
+class PD:
+ def __init__(self):
+ self.id = None
+ self.state = None
+ self.smart_warnings = None
+ self.temperature = None
+
+ def data(self):
+ data = {
+ 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
+ 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
+ }
+ if self.temperature and self.temperature.isdigit():
+ data['pd_{0}_temperature'.format(self.id)] = self.temperature
+
+ return data
+
+
+class Arcconf:
+ def __init__(self, arcconf):
+ self.arcconf = arcconf
+
+ def ld_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'LD']
+
+ def pd_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'PD']
+
+
+# TODO: hardcoded sudo...
+class SudoArcconf:
+ def __init__(self, arcconf, sudo):
+ self.arcconf = Arcconf(arcconf)
+ self.sudo = sudo
+
+ def ld_info(self):
+ return [self.sudo, '-n'] + self.arcconf.ld_info()
+
+ def pd_info(self):
+ return [self.sudo, '-n'] + self.arcconf.pd_info()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.arcconf = None
+
+ def execute(self, command, stderr=False):
+ return self._get_raw_data(command=command, stderr=stderr)
+
+ def check(self):
+ arcconf = find_binary(ARCCONF)
+ if not arcconf:
+ self.error('can\'t locate "{0}" binary'.format(ARCCONF))
+ return False
+
+ sudo = find_binary(SUDO)
+ if self.use_sudo:
+ if not sudo:
+ self.error('can\'t locate "{0}" binary'.format(SUDO))
+ return False
+ err = self.execute([sudo, '-n', '-v'], True)
+ if err:
+ self.error(' '.join(err))
+ return False
+
+ if self.use_sudo:
+ self.arcconf = SudoArcconf(arcconf, sudo)
+ else:
+ self.arcconf = Arcconf(arcconf)
+
+ lds = self.get_lds()
+ if not lds:
+ return False
+
+ self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
+
+ pds = self.get_pds()
+ if not pds:
+ return False
+
+ self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
+
+ self.update_charts(lds, pds)
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for ld in self.get_lds():
+ data.update(ld.data())
+
+ for pd in self.get_pds():
+ data.update(pd.data())
+
+ return data
+
+ def get_lds(self):
+ raw_lds = self.execute(self.arcconf.ld_info())
+ if not raw_lds:
+ return None
+
+ lds = find_lds(raw_lds)
+ if not lds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
+ self.debug('output: {0}'.format(raw_lds))
+ return None
+ return lds
+
+ def get_pds(self):
+ raw_pds = self.execute(self.arcconf.pd_info())
+ if not raw_pds:
+ return None
+
+ pds = find_pds(raw_pds)
+ if not pds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
+ self.debug('output: {0}'.format(raw_pds))
+ return None
+ return pds
+
+ def update_charts(self, lds, pds):
+ charts = self.definitions
+ for ld in lds:
+ dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
+ charts['ld_status']['lines'].append(dim)
+
+ for pd in pds:
+ dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_state']['lines'].append(dim)
+
+ dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_smart_warnings']['lines'].append(dim)
+
+ dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
new file mode 100644
index 0000000..fa462ec
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -0,0 +1,53 @@
+# netdata python.d.plugin configuration for adaptec raid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/alarms/Makefile.inc b/collectors/python.d.plugin/alarms/Makefile.inc
new file mode 100644
index 0000000..c2de117
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += alarms/alarms.chart.py
+dist_pythonconfig_DATA += alarms/alarms.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += alarms/README.md alarms/Makefile.inc
+
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
new file mode 100644
index 0000000..ea96061
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md
+-->
+
+# Alarms - graphing Netdata alarm states over time
+
+This collector creates an 'Alarms' menu with one line plot showing alarm states over time. Alarm states are mapped to integer values according to the below default mapping. Any alarm status types not in this mapping will be ignored (Note: This mapping can be changed by editing the `status_map` in the `alarms.conf` file). If you would like to learn more about the different alarm statuses check out the docs [here](https://learn.netdata.cloud/docs/agent/health/reference#alarm-statuses).
+
+```
+{
+ 'CLEAR': 0,
+ 'WARNING': 1,
+ 'CRITICAL': 2
+}
+```
+
+## Charts
+
+Below is an example of the chart produced when running `stress-ng --all 2` for a few minutes. You can see the various warning and critical alarms raised.
+
+![alarms collector](https://user-images.githubusercontent.com/1153921/101641493-0b086a80-39ef-11eb-9f55-0713e5dfb19f.png)
+
+## Configuration
+
+Enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `alarms: no` to `alarms: yes`
+sudo systemctl restart netdata
+```
+
+If needed, edit the `python.d/alarms.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/alarms.conf
+```
+
+The `alarms` specific part of the `alarms.conf` file should look like this:
+
+```yaml
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+```
+
+It will default to pulling all alarms at each time step from the Netdata rest api at `http://127.0.0.1:19999/api/v1/alarms?all`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Falarms%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
new file mode 100644
index 0000000..973a1f3
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.chart.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Description: alarms netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 10
+disabled_by_default = True
+
+
+def charts_template(sm):
+ order = [
+ 'alarms',
+ ]
+
+ mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
+ charts = {
+ 'alarms': {
+ 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'alarms', 'alarms.status', 'line'],
+ 'lines': [],
+ 'variables': [
+ ['alarms_num'],
+ ]
+ }
+ }
+ return order, charts
+
+
+DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
+
+DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
+ self.order, self.definitions = charts_template(self.sm)
+ self.url = self.configuration.get('url', DEFAULT_URL)
+ self.collected_alarms = set()
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ alarms = raw_data.get('alarms', {})
+
+ data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
+ self.update_charts(alarms, data)
+ data['alarms_num'] = len(data)
+
+ return data
+
+ def update_charts(self, alarms, data):
+ if not self.charts:
+ return
+
+ for a in data:
+ if a not in self.collected_alarms:
+ self.collected_alarms.add(a)
+ self.charts['alarms'].add_dimension([a, a, 'absolute', '1', '1'])
+
+ for a in list(self.collected_alarms):
+ if a not in alarms:
+ self.collected_alarms.remove(a)
+ self.charts['alarms'].del_dimension(a, hide=False)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
new file mode 100644
index 0000000..fd7780c
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.conf
@@ -0,0 +1,50 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
diff --git a/collectors/python.d.plugin/am2320/Makefile.inc b/collectors/python.d.plugin/am2320/Makefile.inc
new file mode 100644
index 0000000..48e5a88
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/Makefile.inc
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# install these files
+dist_python_DATA += am2320/am2320.chart.py
+dist_pythonconfig_DATA += am2320/am2320.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += am2320/README.md am2320/Makefile.inc
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
new file mode 100644
index 0000000..14ddaa7
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/README.md
@@ -0,0 +1,54 @@
+<!--
+title: "AM2320 sensor monitoring with netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md
+sidebar_label: "AM2320"
+-->
+
+# AM2320 sensor monitoring with netdata
+
+Displays a graph of the temperature and humidity from a AM2320 sensor.
+
+## Requirements
+ - Adafruit Circuit Python AM2320 library
+ - Adafruit AM2320 I2C sensor
+ - Python 3 (Adafruit libraries are not Python 2.x compatible)
+
+
+It produces the following charts:
+1. **Temperature**
+2. **Humidity**
+
+## Configuration
+
+Edit the `python.d/am2320.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/am2320.conf
+```
+
+Raspberry Pi Instructions:
+
+Hardware install:
+Connect the am2320 to the Raspberry Pi I2C pins
+
+Raspberry Pi 3B/4 Pins:
+
+- Board 3.3V (pin 1) to sensor VIN (pin 1)
+- Board SDA (pin 3) to sensor SDA (pin 2)
+- Board GND (pin 6) to sensor GND (pin 3)
+- Board SCL (pin 5) to sensor SCL (pin 4)
+
+You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
+
+Software install:
+- `sudo pip3 install adafruit-circuitpython-am2320`
+- edit `/etc/netdata/netdata.conf`
+- find `[plugin:python.d]`
+- add `command options = -ppython3`
+- save the file.
+- restart the netdata service.
+- check the dashboard.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fam2320%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/am2320/am2320.chart.py b/collectors/python.d.plugin/am2320/am2320.chart.py
new file mode 100644
index 0000000..8e66544
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/am2320.chart.py
@@ -0,0 +1,68 @@
+# _*_ coding: utf-8 _*_
+# Description: AM2320 netdata module
+# Author: tommybuck
+# SPDX-License-Identifier: GPL-3.0-or-Later
+
+try:
+ import board
+ import busio
+ import adafruit_am2320
+
+ HAS_AM2320 = True
+except ImportError:
+ HAS_AM2320 = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'temperature',
+ 'humidity',
+]
+
+CHARTS = {
+ 'temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'temperature', 'am2320.temperature', 'line'],
+ 'lines': [
+ ['temperature']
+ ]
+ },
+ 'humidity': {
+ 'options': [None, 'Relative Humidity', 'percentage', 'humidity', 'am2320.humidity', 'line'],
+ 'lines': [
+ ['humidity']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.am = None
+
+ def check(self):
+ if not HAS_AM2320:
+ self.error("Could not find the adafruit-circuitpython-am2320 package.")
+ return False
+
+ try:
+ i2c = busio.I2C(board.SCL, board.SDA)
+ self.am = adafruit_am2320.AM2320(i2c)
+ except ValueError as error:
+ self.error("error on creating I2C shared bus : {0}".format(error))
+ return False
+
+ return True
+
+ def get_data(self):
+ try:
+ return {
+ 'temperature': self.am.temperature,
+ 'humidity': self.am.relative_humidity,
+ }
+
+ except (OSError, RuntimeError) as error:
+ self.error(error)
+ return None
diff --git a/collectors/python.d.plugin/am2320/am2320.conf b/collectors/python.d.plugin/am2320/am2320.conf
new file mode 100644
index 0000000..c6b9885
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/am2320.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for am2320 temperature/humidity sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/anomalies/Makefile.inc b/collectors/python.d.plugin/anomalies/Makefile.inc
new file mode 100644
index 0000000..94937b3
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += anomalies/anomalies.chart.py
+dist_pythonconfig_DATA += anomalies/anomalies.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += anomalies/README.md anomalies/Makefile.inc
+
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
new file mode 100644
index 0000000..862f4f3
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -0,0 +1,231 @@
+<!--
+title: "Anomaly detection with Netdata"
+description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md
+sidebar_url: Anomalies
+-->
+
+# Anomaly detection with Netdata
+
+This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
+
+Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
+
+> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
+
+## Charts
+
+Two charts are produced:
+
+- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
+- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
+
+Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
+
+![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
+
+If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
+
+![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
+
+Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
+
+![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
+
+## Requirements
+
+- This collector will only work with Python 3 and requires the packages below be installed.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages for the netdata user
+pip3 install --user netdata-pandas==0.0.32 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
+```
+
+## Configuration
+
+Install the Python requirements above, enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `anomalies: no` to `anomalies: yes`
+sudo systemctl restart netdata
+```
+
+The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
+
+_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
+
+Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/anomalies.conf
+```
+
+The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
+
+```yaml
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+```
+
+## Custom models
+
+In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
+
+To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
+
+`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
+
+```yaml
+custom_models:
+ # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
+ - name: 'user_netdata'
+ dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+ # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
+ - name: 'apps_python_d_plugin'
+ dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+custom_models_normalize: false
+```
+
+## Troubleshooting
+
+To see any relevant log messages you can use a command like below.
+
+```bash
+`grep 'anomalies' /var/log/netdata/error.log`
+```
+
+If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# run collector in debug using `nolock` option if netdata is already running the collector itself.
+/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
+```
+
+## Deepdive tutorial
+
+If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
+
+(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
+
+## Notes
+
+- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://learn.netdata.cloud/docs/agent/web/api) to get the required data for each chart.
+- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
+- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
+- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
+- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
+ - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
+ - `after` and `before` here refer to the start and end of the training data used to train the models.
+- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
+ - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
+ - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
+- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
+- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
+- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
+
+## Useful links and further reading
+
+- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
+- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
+- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
+- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
+- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
+- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
+- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fanomalies%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py
new file mode 100644
index 0000000..97dbb1d
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+# Description: anomalies netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import time
+from datetime import datetime
+import re
+import warnings
+
+import requests
+import numpy as np
+import pandas as pd
+from netdata_pandas.data import get_data, get_allmetrics_async
+from pyod.models.hbos import HBOS
+from pyod.models.pca import PCA
+from pyod.models.loda import LODA
+from pyod.models.iforest import IForest
+from pyod.models.cblof import CBLOF
+from pyod.models.feature_bagging import FeatureBagging
+from pyod.models.copod import COPOD
+from sklearn.preprocessing import MinMaxScaler
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# ignore some sklearn/numpy warnings that are ok
+warnings.filterwarnings('ignore', r'All-NaN slice encountered')
+warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
+warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
+warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
+
+disabled_by_default = True
+
+ORDER = ['probability', 'anomaly']
+
+CHARTS = {
+ 'probability': {
+ 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
+ 'lines': []
+ },
+ 'anomaly': {
+ 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
+ 'lines': []
+ },
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.basic_init()
+ self.charts_init()
+ self.custom_models_init()
+ self.model_params_init()
+ self.models_init()
+
+ def check(self):
+ _ = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ return True
+
+ def basic_init(self):
+ """Perform some basic initialization.
+ """
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.protocol = self.configuration.get('protocol', 'http')
+ self.host = self.configuration.get('host', '127.0.0.1:19999')
+ self.username = self.configuration.get('username', None)
+ self.password = self.configuration.get('password', None)
+ self.fitted_at = {}
+ self.df_allmetrics = pd.DataFrame()
+ self.data_latest = {}
+ self.last_train_at = 0
+ self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
+
+ def charts_init(self):
+ """Do some initialisation of charts in scope related variables.
+ """
+ self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
+ self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts').json().get('charts', {}).keys())]
+ self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ if len(self.charts_to_exclude) > 0:
+ self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
+
+ def custom_models_init(self):
+ """Perform initialization steps related to custom models.
+ """
+ self.custom_models = self.configuration.get('custom_models', None)
+ self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
+ if self.custom_models:
+ self.custom_models_names = [model['name'] for model in self.custom_models]
+ self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
+ self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
+ self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
+ self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
+ self.custom_models_host_charts_dict = {}
+ for host in self.custom_models_hosts:
+ self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
+ self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
+ self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
+ self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ for host in self.custom_models_host_charts_dict:
+ if host not in self.host_charts_dict:
+ self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
+ else:
+ for chart in self.custom_models_host_charts_dict[host]:
+ if chart not in self.host_charts_dict[host]:
+ self.host_charts_dict[host].extend(chart)
+ else:
+ self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
+
+ def model_params_init(self):
+ """Model parameters initialisation.
+ """
+ self.train_max_n = self.configuration.get('train_max_n', 100000)
+ self.train_n_secs = self.configuration.get('train_n_secs', 14400)
+ self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
+ self.train_every_n = self.configuration.get('train_every_n', 1800)
+ self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
+ self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
+ self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
+ self.contamination = self.configuration.get('contamination', 0.001)
+ self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
+ self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
+ self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
+
+ def models_init(self):
+ """Models initialisation.
+ """
+ self.model = self.configuration.get('model', 'pca')
+ if self.model == 'pca':
+ self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'loda':
+ self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'iforest':
+ self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'cblof':
+ self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'feature_bagging':
+ self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'copod':
+ self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'hbos':
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ else:
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
+
+ def validate_charts(self, name, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ for dim in data:
+ if dim not in self.charts[name]:
+ self.charts[name].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ def add_custom_models_dims(self, df):
+ """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
+
+ :param df <pd.DataFrame>: dataframe to append new renamed columns to.
+ :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
+ """
+ df_custom = df[self.custom_models_dims].copy()
+ df_custom.columns = self.custom_models_dims_renamed
+ df = df.join(df_custom)
+
+ return df
+
+ def make_features(self, arr, train=False, model=None):
+ """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
+
+ :param arr <np.ndarray>: numpy array we want to make features from.
+ :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
+ :param model <str>: model to make features for.
+ :return: <np.ndarray> transformed numpy array.
+ """
+
+ def lag(arr, n):
+ res = np.empty_like(arr)
+ res[:n] = np.nan
+ res[n:] = arr[:-n]
+
+ return res
+
+ arr = np.nan_to_num(arr)
+
+ diffs_n = self.diffs_n[model]
+ smooth_n = self.smooth_n[model]
+ lags_n = self.lags_n[model]
+
+ if self.custom_models_normalize and model in self.custom_models_names:
+ if train:
+ arr = self.custom_model_scalers[model].fit_transform(arr)
+ else:
+ arr = self.custom_model_scalers[model].transform(arr)
+
+ if diffs_n > 0:
+ arr = np.diff(arr, diffs_n, axis=0)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if smooth_n > 1:
+ arr = np.cumsum(arr, axis=0, dtype=float)
+ arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
+ arr = arr[smooth_n - 1:] / smooth_n
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if lags_n > 0:
+ arr_orig = np.copy(arr)
+ for lag_n in range(1, lags_n + 1):
+ arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if train:
+ if len(arr) > self.train_max_n:
+ arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
+
+ arr = np.nan_to_num(arr)
+
+ return arr
+
+ def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
+ """Pull required training data and train a model for each specified model.
+
+ :param models_to_train <list>: list of models to train on.
+ :param train_data_after <int>: integer timestamp for start of train data.
+ :param train_data_before <int>: integer timestamp for end of train data.
+ """
+ now = datetime.now().timestamp()
+ if train_data_after > 0 and train_data_before > 0:
+ before = train_data_before
+ after = train_data_after
+ else:
+ before = int(now) - self.offset_n_secs
+ after = before - self.train_n_secs
+
+ # get training data
+ df_train = get_data(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
+ sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password
+ ).ffill()
+ if self.custom_models:
+ df_train = self.add_custom_models_dims(df_train)
+
+ # train model
+ self.try_fit(df_train, models_to_train=models_to_train)
+ self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
+ self.last_train_at = self.runs_counter
+
+ def try_fit(self, df_train, models_to_train=None):
+ """Try fit each model and try to fallback to a default model if fit fails for any reason.
+
+ :param df_train <pd.DataFrame>: data to train on.
+ :param models_to_train <list>: list of models to train.
+ """
+ if models_to_train is None:
+ models_to_train = list(self.models.keys())
+ self.n_fit_fail, self.n_fit_success = 0, 0
+ for model in models_to_train:
+ X_train = self.make_features(
+ df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
+ train=True, model=model)
+ try:
+ self.models[model].fit(X_train)
+ self.n_fit_success += 1
+ except Exception as e:
+ self.n_fit_fail += 1
+ self.info(e)
+ self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
+ self.models[model] = HBOS(contamination=self.contamination)
+ self.models[model].fit(X_train)
+ self.fitted_at[model] = self.runs_counter
+
+ def predict(self):
+ """Get latest data, make it into a feature vector, and get predictions for each available model.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ # get recent data to predict on
+ df_allmetrics = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ if self.custom_models:
+ df_allmetrics = self.add_custom_models_dims(df_allmetrics)
+ self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
+
+ # get predictions
+ data_probability, data_anomaly = self.try_predict()
+
+ return data_probability, data_anomaly
+
+ def try_predict(self):
+ """Try make prediction and fall back to last known prediction if fails.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ data_probability, data_anomaly = {}, {}
+ for model in self.fitted_at.keys():
+ model_display_name = self.model_display_names[model]
+ X_model = np.nan_to_num(self.make_features(
+ self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
+ model=model)[-1,:].reshape(1, -1))
+ try:
+ data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
+ data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
+ except Exception:
+ #self.info(e)
+ if model_display_name + '_prob' in self.data_latest:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
+ data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
+ data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
+ else:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
+ continue
+
+ return data_probability, data_anomaly
+
+ def get_data(self):
+
+ # if not all models have been trained then train those we need to
+ if len(self.fitted_at) < len(self.models):
+ self.train(
+ models_to_train=[m for m in self.models if m not in self.fitted_at],
+ train_data_after=self.initial_train_data_after,
+ train_data_before=self.initial_train_data_before)
+ # retrain all models as per schedule from config
+ elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
+ self.train()
+
+ # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
+ if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
+ data = self.data_latest
+ else:
+ data_probability, data_anomaly = self.predict()
+ if self.include_average_prob:
+ data_probability['average_prob'] = np.mean(list(data_probability.values()))
+ data = {**data_probability, **data_anomaly}
+ self.validate_charts('probability', data_probability, divisor=100)
+ self.validate_charts('anomaly', data_anomaly)
+
+ self.data_latest = data
+
+ return data
diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf
new file mode 100644
index 0000000..9950534
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.conf
@@ -0,0 +1,181 @@
+# netdata python.d.plugin configuration for anomalies
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 2
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+
+# Standalone Custom models example as an additional collector job.
+# custom:
+# name: 'custom'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'user_netdata'
+# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+# - name: 'apps_python_d_plugin'
+# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+# Pull data from some demo nodes for cross node custom models.
+# demos:
+# name: 'demos'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'system.cpu'
+# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+# - name: 'system.ip'
+# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
+# - name: 'system.net'
+# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
+# - name: 'system.io'
+# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
+
+# Example additional job if you want to also pull data from a child streaming to your
+# local parent or even a remote node so long as the Netdata REST API is accessible.
+# mychildnode1:
+# name: 'mychildnode1'
+# host: '127.0.0.1:19999/host/mychildnode1'
+# protocol: 'http'
+# charts_regex: 'system\..*'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc
new file mode 100644
index 0000000..70a4215
--- /dev/null
+++ b/collectors/python.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += apache/apache.chart.py
+dist_pythonconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
new file mode 100644
index 0000000..d275250
--- /dev/null
+++ b/collectors/python.d.plugin/apache/README.md
@@ -0,0 +1,82 @@
+<!--
+title: "Apache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/apache/README.md
+sidebar_label: "Apache"
+-->
+
+# Apache monitoring with Netdata
+
+Monitors one or more Apache servers depending on configuration.
+
+## Requirements
+
+- apache with enabled `mod_status`
+
+It produces the following charts:
+
+1. **Requests** in requests/s
+
+ - requests
+
+2. **Connections**
+
+ - connections
+
+3. **Async Connections**
+
+ - keepalive
+ - closing
+ - writing
+
+4. **Bandwidth** in kilobytes/s
+
+ - sent
+
+5. **Workers**
+
+ - idle
+ - busy
+
+6. **Lifetime Avg. Requests/s** in requests/s
+
+ - requests_sec
+
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
+
+ - size_sec
+
+8. **Lifetime Avg. Response Size** in bytes/request
+
+ - size_req
+
+## Configuration
+
+Edit the `python.d/apache.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/apache.conf
+```
+
+Needs only `url` to server's `server-status?auto`
+
+Example for two servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/server-status?auto'
+
+remote:
+ url : 'http://www.apache.org/server-status?auto'
+ update_every : 5
+```
+
+Without configuration, module attempts to connect to `http://localhost/server-status?auto`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
new file mode 100644
index 0000000..ceac9ec
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# Description: apache netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'requests',
+ 'connections',
+ 'conns_async',
+ 'net',
+ 'workers',
+ 'reqpersec',
+ 'bytespersec',
+ 'bytesperreq',
+]
+
+CHARTS = {
+ 'bytesperreq': {
+ 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
+ 'statistics', 'apache.bytesperreq', 'area'],
+ 'lines': [
+ ['size_req', 'size', 'absolute', 1, 1024 * 100000]
+ ]},
+ 'workers': {
+ 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+ 'lines': [
+ ['idle'],
+ ['busy'],
+ ]},
+ 'reqpersec': {
+ 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'apache.reqpersec', 'area'],
+ 'lines': [
+ ['requests_sec', 'requests', 'absolute', 1, 100000]
+ ]},
+ 'bytespersec': {
+ 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+ 'apache.bytespersec', 'area'],
+ 'lines': [
+ ['size_sec', None, 'absolute', 8, 1000 * 100000]
+ ]},
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]},
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+ 'lines': [
+ ['sent', None, 'incremental', 8, 1]
+ ]},
+ 'connections': {
+ 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
+ 'lines': [
+ ['connections']
+ ]},
+ 'conns_async': {
+ 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+ 'lines': [
+ ['keepalive'],
+ ['closing'],
+ ['writing']
+ ]}
+}
+
+ASSIGNMENT = {
+ 'BytesPerReq': 'size_req',
+ 'IdleWorkers': 'idle',
+ 'IdleServers': 'idle_servers',
+ 'BusyWorkers': 'busy',
+ 'BusyServers': 'busy_servers',
+ 'ReqPerSec': 'requests_sec',
+ 'BytesPerSec': 'size_sec',
+ 'Total Accesses': 'requests',
+ 'Total kBytes': 'sent',
+ 'ConnsTotal': 'connections',
+ 'ConnsAsyncKeepAlive': 'keepalive',
+ 'ConnsAsyncClosing': 'closing',
+ 'ConnsAsyncWriting': 'writing'
+}
+
+FLOAT_VALUES = [
+ 'BytesPerReq',
+ 'ReqPerSec',
+ 'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
+
+ def check(self):
+ self._manager = self._build_manager()
+
+ data = self._get_data()
+
+ if not data:
+ return None
+
+ if LIGHTTPD_MARKER in data:
+ self.turn_into_lighttpd()
+
+ return True
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ data = dict()
+
+ for line in raw_data.split('\n'):
+ try:
+ parse_line(line, data)
+ except ValueError:
+ continue
+
+ return data or None
+
+ def turn_into_lighttpd(self):
+ self.module_name = 'lighttpd'
+ for chart in self.definitions:
+ if chart == 'workers':
+ lines = self.definitions[chart]['lines']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+ parts = line.split(':')
+
+ if len(parts) != 2:
+ return
+
+ key, value = parts[0], parts[1]
+
+ if key not in ASSIGNMENT:
+ return
+
+ if key in FLOAT_VALUES:
+ data[ASSIGNMENT[key]] = int((float(value) * 100000))
+ else:
+ data[ASSIGNMENT[key]] = int(value)
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
new file mode 100644
index 0000000..84e12a5
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for apache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch apache's mod_status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/server-status?auto'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/server-status?auto'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/server-status?auto'
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
new file mode 100644
index 0000000..4bbb708
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += beanstalk/beanstalk.chart.py
+dist_pythonconfig_DATA += beanstalk/beanstalk.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
+
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 100644
index 0000000..24315ad
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1,133 @@
+<!--
+title: "Beanstalk monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md
+sidebar_label: "Beanstalk"
+-->
+
+# Beanstalk monitoring with Netdata
+
+Provides server and tube-level statistics.
+
+## Requirements
+
+- `python-beanstalkc`
+
+**Server statistics:**
+
+1. **Cpu usage** in cpu time
+
+ - user
+ - system
+
+2. **Jobs rate** in jobs/s
+
+ - total
+ - timeouts
+
+3. **Connections rate** in connections/s
+
+ - connections
+
+4. **Commands rate** in commands/s
+
+ - put
+ - peek
+ - peek-ready
+ - peek-delayed
+ - peek-buried
+ - reserve
+ - use
+ - watch
+ - ignore
+ - delete
+ - release
+ - bury
+ - kick
+ - stats
+ - stats-job
+ - stats-tube
+ - list-tubes
+ - list-tube-used
+ - list-tubes-watched
+ - pause-tube
+
+5. **Current tubes** in tubes
+
+ - tubes
+
+6. **Current jobs** in jobs
+
+ - urgent
+ - ready
+ - reserved
+ - delayed
+ - buried
+
+7. **Current connections** in connections
+
+ - written
+ - producers
+ - workers
+ - waiting
+
+8. **Binlog** in records/s
+
+ - written
+ - migrated
+
+9. **Uptime** in seconds
+
+ - uptime
+
+**Per tube statistics:**
+
+1. **Jobs rate** in jobs/s
+
+ - jobs
+
+2. **Jobs** in jobs
+
+ - using
+ - ready
+ - reserved
+ - delayed
+ - buried
+
+3. **Connections** in connections
+
+ - using
+ - waiting
+ - watching
+
+4. **Commands** in commands/s
+
+ - deletes
+ - pauses
+
+5. **Pause** in seconds
+
+ - since
+ - left
+
+## Configuration
+
+Edit the `python.d/beanstalk.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/beanstalk.conf
+```
+
+Sample:
+
+```yaml
+host : '127.0.0.1'
+port : 11300
+```
+
+If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
new file mode 100644
index 0000000..396543e
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: beanstalk netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import beanstalkc
+
+ BEANSTALKC = True
+except ImportError:
+ BEANSTALKC = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import load_yaml
+
+ORDER = [
+ 'cpu_usage',
+ 'jobs_rate',
+ 'connections_rate',
+ 'commands_rate',
+ 'current_tubes',
+ 'current_jobs',
+ 'current_connections',
+ 'binlog',
+ 'uptime',
+]
+
+CHARTS = {
+ 'cpu_usage': {
+ 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
+ 'lines': [
+ ['rusage-utime', 'user', 'incremental'],
+ ['rusage-stime', 'system', 'incremental']
+ ]
+ },
+ 'jobs_rate': {
+ 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
+ 'lines': [
+ ['total-jobs', 'total', 'incremental'],
+ ['job-timeouts', 'timeouts', 'incremental']
+ ]
+ },
+ 'connections_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
+ 'area'],
+ 'lines': [
+ ['total-connections', 'connections', 'incremental']
+ ]
+ },
+ 'commands_rate': {
+ 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
+ 'lines': [
+ ['cmd-put', 'put', 'incremental'],
+ ['cmd-peek', 'peek', 'incremental'],
+ ['cmd-peek-ready', 'peek-ready', 'incremental'],
+ ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
+ ['cmd-peek-buried', 'peek-buried', 'incremental'],
+ ['cmd-reserve', 'reserve', 'incremental'],
+ ['cmd-use', 'use', 'incremental'],
+ ['cmd-watch', 'watch', 'incremental'],
+ ['cmd-ignore', 'ignore', 'incremental'],
+ ['cmd-delete', 'delete', 'incremental'],
+ ['cmd-release', 'release', 'incremental'],
+ ['cmd-bury', 'bury', 'incremental'],
+ ['cmd-kick', 'kick', 'incremental'],
+ ['cmd-stats', 'stats', 'incremental'],
+ ['cmd-stats-job', 'stats-job', 'incremental'],
+ ['cmd-stats-tube', 'stats-tube', 'incremental'],
+ ['cmd-list-tubes', 'list-tubes', 'incremental'],
+ ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
+ ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
+ ['cmd-pause-tube', 'pause-tube', 'incremental']
+ ]
+ },
+ 'current_tubes': {
+ 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
+ 'lines': [
+ ['current-tubes', 'tubes']
+ ]
+ },
+ 'current_jobs': {
+ 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
+ 'lines': [
+ ['current-jobs-urgent', 'urgent'],
+ ['current-jobs-ready', 'ready'],
+ ['current-jobs-reserved', 'reserved'],
+ ['current-jobs-delayed', 'delayed'],
+ ['current-jobs-buried', 'buried']
+ ]
+ },
+ 'current_connections': {
+ 'options': [None, 'Current Connections', 'connections', 'server statistics',
+ 'beanstalk.current_connections', 'line'],
+ 'lines': [
+ ['current-connections', 'written'],
+ ['current-producers', 'producers'],
+ ['current-workers', 'workers'],
+ ['current-waiting', 'waiting']
+ ]
+ },
+ 'binlog': {
+ 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
+ 'lines': [
+ ['binlog-records-written', 'written', 'incremental'],
+ ['binlog-records-migrated', 'migrated', 'incremental']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
+ 'lines': [
+ ['uptime'],
+ ]
+ }
+}
+
+
+def tube_chart_template(name):
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
+ family = 'tube {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
+ 'lines': [
+ ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
+ ['_'.join([name, 'current-jobs-ready']), 'ready'],
+ ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
+ ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
+ ['_'.join([name, 'current-jobs-buried']), 'buried']
+ ]
+ },
+ order[2]: {
+ 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-using']), 'using'],
+ ['_'.join([name, 'current-waiting']), 'waiting'],
+ ['_'.join([name, 'current-watching']), 'watching']
+ ]
+ },
+ order[3]: {
+ 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
+ ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
+ ]
+ },
+ order[4]: {
+ 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'pause']), 'since'],
+ ['_'.join([name, 'pause-time-left']), 'left']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not BEANSTALKC:
+ self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
+ return False
+
+ self.conn = self.connect()
+
+ return True if self.conn else False
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.is_alive():
+ return None
+
+ active_charts = self.charts.active_charts()
+ data = dict()
+
+ try:
+ data.update(self.conn.stats())
+
+ for tube in self.conn.tubes():
+ stats = self.conn.stats_tube(tube)
+
+ if tube + '_jobs_rate' not in active_charts:
+ self.create_new_tube_charts(tube)
+
+ for stat in stats:
+ data['_'.join([tube, stat])] = stats[stat]
+
+ except beanstalkc.SocketError:
+ self.alive = False
+ return None
+
+ return data or None
+
+ def create_new_tube_charts(self, tube):
+ order, charts = tube_chart_template(tube)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+ def connect(self):
+ host = self.configuration.get('host', '127.0.0.1')
+ port = self.configuration.get('port', 11300)
+ timeout = self.configuration.get('timeout', 1)
+ try:
+ return beanstalkc.Connection(host=host,
+ port=port,
+ connect_timeout=timeout,
+ parse_yaml=load_yaml)
+ except beanstalkc.SocketError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
+ return None
+
+ def reconnect(self):
+ try:
+ self.conn.reconnect()
+ self.alive = True
+ return True
+ except beanstalkc.SocketError:
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
new file mode 100644
index 0000000..7586ad2
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for beanstalk
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# When a plugin sends the obsolete flag, the charts are not deleted
+# from netdata immediately.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# chart_cleanup: 10
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
+#
+# Additionally to the above, beanstalk also supports the following:
+#
+# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
+# port: port # Beanstalkd port. Default:
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
new file mode 100644
index 0000000..72f3914
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += bind_rndc/bind_rndc.chart.py
+dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 100644
index 0000000..2832575
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1,79 @@
+<!--
+title: "ISC Bind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md
+sidebar_label: "ISC Bind"
+-->
+
+# ISC Bind monitoring with Netdata
+
+Collects Name server summary performance statistics using `rndc` tool.
+
+## Requirements
+
+- Version of bind must be 9.6 +
+- Netdata must have permissions to run `rndc stats`
+
+It produces:
+
+1. **Name server statistics**
+
+ - requests
+ - responses
+ - success
+ - auth_answer
+ - nonauth_answer
+ - nxrrset
+ - failure
+ - nxdomain
+ - recursion
+ - duplicate
+ - rejections
+
+2. **Incoming queries**
+
+ - RESERVED0
+ - A
+ - NS
+ - CNAME
+ - SOA
+ - PTR
+ - MX
+ - TXT
+ - X25
+ - AAAA
+ - SRV
+ - NAPTR
+ - A6
+ - DS
+ - RSIG
+ - DNSKEY
+ - SPF
+ - ANY
+ - DLV
+
+3. **Outgoing queries**
+
+- Same as Incoming queries
+
+## Configuration
+
+Edit the `python.d/bind_rndc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/bind_rndc.conf
+```
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
new file mode 100644
index 0000000..9d6c9fe
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: bind rndc netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+from collections import defaultdict
+from subprocess import Popen
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+update_every = 30
+
+ORDER = [
+ 'name_server_statistics',
+ 'incoming_queries',
+ 'outgoing_queries',
+ 'named_stats_size',
+]
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]
+ }
+}
+
+NMS = {
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
+ self.rndc = find_binary('rndc')
+ self.data = dict(
+ nms_requests=0,
+ nms_responses=0,
+ nms_failure=0,
+ nms_auth=0,
+ nms_non_auth=0,
+ nms_nxrrset=0,
+ nms_success=0,
+ nms_nxdomain=0,
+ nms_recursion=0,
+ nms_duplicate=0,
+ nms_rejected_queries=0,
+ nms_dropped_queries=0,
+ )
+
+ def check(self):
+ if not self.rndc:
+ self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
+ return False
+
+ if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
+ self.error('Cannot access file %s' % self.named_stats_path)
+ return False
+
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if not run_rndc.returncode:
+ return True
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
+
+ def _get_raw_data(self):
+ """
+ Run 'rndc stats' and read last dump from named.stats
+ :return: dict
+ """
+ result = dict()
+ try:
+ current_size = os.path.getsize(self.named_stats_path)
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+
+ if raw_data is None:
+ return None
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ if dimension_id not in self.charts[chart_name]:
+ self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
+
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
new file mode 100644
index 0000000..3b7e9a2
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -0,0 +1,110 @@
+# netdata python.d.plugin configuration for bind_rndc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, bind_rndc also supports the following:
+#
+# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats'
+#------------------------------------------------------------------------------------------------------------------
+# IMPORTANT Information
+#
+# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec.
+# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate
+#
+# To set up your BIND to dump stats do the following:
+#
+# 1. add to 'named.conf.options' options {}:
+# statistics-file "/var/log/bind/named.stats";
+#
+# 2. Create bind/ directory in /var/log
+# cd /var/log/ && mkdir bind
+#
+# 3. Change owner of directory to 'bind' user
+# chown bind bind/
+#
+# 4. RELOAD (NOT restart) BIND
+# systemctl reload bind9.service
+#
+# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+#
+#
+# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata
+# chown :netdata rndc.key
+#
+# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/
+# The working one
+# /var/log/bind/named.stats {
+#
+# daily
+# rotate 4
+# compress
+# delaycompress
+# create 0644 bind bind
+# missingok
+# postrotate
+# rndc reload > /dev/null
+# endscript
+# }
+#
+# To test your logrotate conf file run as root:
+#
+# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
new file mode 100644
index 0000000..319e19c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += boinc/boinc.chart.py
+dist_pythonconfig_DATA += boinc/boinc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 100644
index 0000000..bd509c9
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "BOINC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md
+sidebar_label: "BOINC"
+-->
+
+# BOINC monitoring with Netdata
+
+Monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client using the same RPC interface that the BOINC monitoring GUI does.
+
+It provides charts tracking the total number of tasks and active tasks, as well as ones tracking each of the possible states for tasks.
+
+## Configuration
+
+Edit the `python.d/boinc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/boinc.conf
+```
+
+BOINC requires use of a password to access it's RPC interface. You can
+find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+By default, the module will try to auto-detect the password by looking
+in `/var/lib/boinc` for this file (this is the location most Linux
+distributions use for a system-wide BOINC installation), so things may
+just work without needing configuration for the local system.
+
+You can monitor remote systems as well:
+
+```yaml
+remote:
+ hostname: some-host
+ password: some-password
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
new file mode 100644
index 0000000..a31eda1
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+# Description: BOINC netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import boinc_client
+
+ORDER = [
+ 'tasks',
+ 'states',
+ 'sched_states',
+ 'process_states',
+]
+
+CHARTS = {
+ 'tasks': {
+ 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
+ 'lines': [
+ ['total', 'Total', 'absolute', 1, 1],
+ ['active', 'Active', 'absolute', 1, 1]
+ ]
+ },
+ 'states': {
+ 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
+ 'lines': [
+ ['new', 'New', 'absolute', 1, 1],
+ ['downloading', 'Downloading', 'absolute', 1, 1],
+ ['downloaded', 'Ready to Run', 'absolute', 1, 1],
+ ['comperror', 'Compute Errors', 'absolute', 1, 1],
+ ['uploading', 'Uploading', 'absolute', 1, 1],
+ ['uploaded', 'Uploaded', 'absolute', 1, 1],
+ ['aborted', 'Aborted', 'absolute', 1, 1],
+ ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
+ ]
+ },
+ 'sched_states': {
+ 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
+ 'lines': [
+ ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
+ ['preempted', 'Preempted', 'absolute', 1, 1],
+ ['scheduled', 'Scheduled', 'absolute', 1, 1]
+ ]
+ },
+ 'process_states': {
+ 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
+ 'lines': [
+ ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
+ ['executing', 'Executing', 'absolute', 1, 1],
+ ['suspended', 'Suspended', 'absolute', 1, 1],
+ ['aborting', 'Aborted', 'absolute', 1, 1],
+ ['quit', 'Quit', 'absolute', 1, 1],
+ ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# A simple template used for pre-loading the return dictionary to make
+# the _get_data() method simpler.
+_DATA_TEMPLATE = {
+ 'total': 0,
+ 'active': 0,
+ 'new': 0,
+ 'downloading': 0,
+ 'downloaded': 0,
+ 'comperror': 0,
+ 'uploading': 0,
+ 'uploaded': 0,
+ 'aborted': 0,
+ 'upload_failed': 0,
+ 'uninit_sched': 0,
+ 'preempted': 0,
+ 'scheduled': 0,
+ 'uninit_proc': 0,
+ 'executing': 0,
+ 'suspended': 0,
+ 'aborting': 0,
+ 'quit': 0,
+ 'copy_pending': 0
+}
+
+# Map task states to dimensions
+_TASK_MAP = {
+ boinc_client.ResultState.NEW: 'new',
+ boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
+ boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
+ boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
+ boinc_client.ResultState.FILES_UPLOADING: 'uploading',
+ boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
+ boinc_client.ResultState.ABORTED: 'aborted',
+ boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
+}
+
+# Map scheduler states to dimensions
+_SCHED_MAP = {
+ boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
+ boinc_client.CpuSched.PREEMPTED: 'preempted',
+ boinc_client.CpuSched.SCHEDULED: 'scheduled',
+}
+
+# Maps process states to dimensions
+_PROC_MAP = {
+ boinc_client.Process.UNINITIALIZED: 'uninit_proc',
+ boinc_client.Process.EXECUTING: 'executing',
+ boinc_client.Process.SUSPENDED: 'suspended',
+ boinc_client.Process.ABORT_PENDING: 'aborted',
+ boinc_client.Process.QUIT_PENDING: 'quit',
+ boinc_client.Process.COPY_PENDING: 'copy_pending'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 0)
+ self.password = self.configuration.get('password', '')
+ self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
+ self.alive = False
+
+ def check(self):
+ return self.connect()
+
+ def connect(self):
+ self.client.connect()
+ self.alive = self.client.connected and self.client.authorized
+ return self.alive
+
+ def reconnect(self):
+ # The client class itself actually disconnects existing
+ # connections when it is told to connect, so we don't need to
+ # explicitly disconnect when we're just trying to reconnect.
+ return self.connect()
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+
+ data = dict(_DATA_TEMPLATE)
+
+ try:
+ results = self.client.get_tasks()
+ except socket.error:
+ self.error('Connection is dead')
+ self.alive = False
+ return None
+
+ for task in results:
+ data['total'] += 1
+ data[_TASK_MAP[task.state]] += 1
+ try:
+ if task.active_task:
+ data['active'] += 1
+ data[_SCHED_MAP[task.scheduler_state]] += 1
+ data[_PROC_MAP[task.active_task_state]] += 1
+ except AttributeError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
new file mode 100644
index 0000000..16edf55
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for boinc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, boinc also supports the following:
+#
+# hostname: localhost # The host running the BOINC client
+# port: 31416 # The remote GUI RPC port for BOINC
+# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
new file mode 100644
index 0000000..15b039e
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ceph/ceph.chart.py
+dist_pythonconfig_DATA += ceph/ceph.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 100644
index 0000000..5d671f2
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1,48 @@
+<!--
+title: "CEPH monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md
+sidebar_label: "CEPH"
+-->
+
+# CEPH monitoring with Netdata
+
+Monitors the ceph cluster usage and consumption data of a server, and produces:
+
+- Cluster statistics (usage, available, latency, objects, read/write rate)
+- OSD usage
+- OSD latency
+- Pool usage
+- Pool read/write operations
+- Pool read/write rate
+- number of objects per pool
+
+## Requirements
+
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+
+```shell
+# chmod 640 /etc/ceph/ceph.client.admin.keyring
+```
+
+## Configuration
+
+Edit the `python.d/ceph.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ceph.conf
+```
+
+Sample:
+
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
new file mode 100644
index 0000000..494eef4
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Description: ceph netdata python.d module
+# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rados
+
+ CEPH = True
+except ImportError:
+ CEPH = False
+
+import json
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 10
+
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_size',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
+
+CHARTS = {
+ 'general_usage': {
+ 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
+ 'lines': [
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
+ ]
+ },
+ 'general_objects': {
+ 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
+ 'lines': [
+ ['general_objects', 'cluster', 'absolute']
+ ]
+ },
+ 'general_bytes': {
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
+ 'area'],
+ 'lines': [
+ ['general_read_bytes', 'read', 'absolute', 1, 1024],
+ ['general_write_bytes', 'write', 'absolute', -1, 1024]
+ ]
+ },
+ 'general_operations': {
+ 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
+ 'area'],
+ 'lines': [
+ ['general_read_operations', 'read', 'absolute', 1],
+ ['general_write_operations', 'write', 'absolute', -1]
+ ]
+ },
+ 'general_latency': {
+ 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
+ 'area'],
+ 'lines': [
+ ['general_apply_latency', 'apply', 'absolute'],
+ ['general_commit_latency', 'commit', 'absolute']
+ ]
+ },
+ 'pool_usage': {
+ 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
+ 'lines': []
+ },
+ 'pool_objects': {
+ 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
+ 'lines': []
+ },
+ 'pool_read_bytes': {
+ 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_write_bytes': {
+ 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_read_operations': {
+ 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
+ 'lines': []
+ },
+ 'pool_write_operations': {
+ 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
+ 'lines': []
+ },
+ 'osd_usage': {
+ 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
+ 'lines': []
+ },
+ 'osd_size': {
+ 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
+ 'lines': []
+ },
+ 'osd_apply_latency': {
+ 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
+ 'lines': []
+ },
+ 'osd_commit_latency': {
+ 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
+ 'lines': []
+ }
+
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.config_file = self.configuration.get('config_file')
+ self.keyring_file = self.configuration.get('keyring_file')
+ self.rados_id = self.configuration.get('rados_id', 'admin')
+
+ def check(self):
+ """
+ Checks module
+ :return:
+ """
+ if not CEPH:
+ self.error('rados module is needed to use ceph.chart.py')
+ return False
+ if not (self.config_file and self.keyring_file):
+ self.error('config_file and/or keyring_file is not defined')
+ return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
+ try:
+ self.cluster = rados.Rados(conffile=self.config_file,
+ conf=dict(keyring=self.keyring_file),
+ rados_id=self.rados_id)
+ self.cluster.connect()
+ except rados.Error as error:
+ self.error(error)
+ return False
+ self.create_definitions()
+ return True
+
+ def create_definitions(self):
+ """
+ Create dynamically charts options
+ :return: None
+ """
+ # Pool lines
+ for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
+ self.definitions['pool_usage']['lines'].append([pool['name'],
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+
+ # OSD lines
+ for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
+ self.definitions['osd_usage']['lines'].append([osd['name'],
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+
+ def get_data(self):
+ """
+ Catch all ceph data
+ :return: dict
+ """
+ try:
+ data = {}
+ df = self._get_df()
+ osd_df = self._get_osd_df()
+ osd_perf = self._get_osd_perf()
+ osd_perf_infos = get_osd_perf_infos(osd_perf)
+ pool_stats = self._get_osd_pool_stats()
+
+ data.update(self._get_general(osd_perf_infos, pool_stats))
+ for pool in df['pools']:
+ data.update(self._get_pool_usage(pool))
+ data.update(self._get_pool_objects(pool))
+ for pool_io in pool_stats:
+ data.update(self._get_pool_rw(pool_io))
+ for osd in osd_df['nodes']:
+ data.update(self._get_osd_usage(osd))
+ data.update(self._get_osd_size(osd))
+ for osd_apply_commit in osd_perf_infos:
+ data.update(self._get_osd_latency(osd_apply_commit))
+ return data
+ except (ValueError, AttributeError) as error:
+ self.error(error)
+ return None
+
+ def _get_general(self, osd_perf_infos, pool_stats):
+ """
+ Get ceph's general usage
+ :return: dict
+ """
+ status = self.cluster.get_cluster_stats()
+ read_bytes_sec = 0
+ write_bytes_sec = 0
+ read_op_per_sec = 0
+ write_op_per_sec = 0
+ apply_latency = 0
+ commit_latency = 0
+
+ for pool_rw_io_b in pool_stats:
+ read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
+ write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
+ read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
+ write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
+ for perf in osd_perf_infos:
+ apply_latency += perf['perf_stats']['apply_latency_ms']
+ commit_latency += perf['perf_stats']['commit_latency_ms']
+
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
+
+ @staticmethod
+ def _get_pool_usage(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and usage bytes' value
+ """
+ return {pool['name']: pool['stats']['kb_used']}
+
+ @staticmethod
+ def _get_pool_objects(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and object numbers
+ """
+ return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
+
+ @staticmethod
+ def _get_pool_rw(pool):
+ """
+ Get read/write kb and operations in a pool
+ :return: A pool dict with both read/write bytes and operations.
+ """
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
+
+ @staticmethod
+ def _get_osd_usage(osd):
+ """
+ Process raw data into osd dict information to get osd usage
+ :return: A osd dict with osd name's key and usage bytes' value
+ """
+ return {osd['name']: float(osd['kb_used'])}
+
+ @staticmethod
+ def _get_osd_size(osd):
+ """
+ Process raw data into osd dict information to get osd size (kb)
+ :return: A osd dict with osd name's key and size bytes' value
+ """
+ return {'size_{0}'.format(osd['name']): float(osd['kb'])}
+
+ @staticmethod
+ def _get_osd_latency(osd):
+ """
+ Get ceph osd apply and commit latency
+ :return: A osd dict with osd name's key with both apply and commit latency values
+ """
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
+
+ def _get_df(self):
+ """
+ Get ceph df output
+ :return: ceph df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'df',
+ 'format': 'json'
+ }), '')[1].decode('utf-8'))
+
+ def _get_osd_df(self):
+ """
+ Get ceph osd df output
+ :return: ceph osd df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd df',
+ 'format': 'json'
+ }), '')[1].decode('utf-8').replace('-nan', '"-nan"'))
+
+ def _get_osd_perf(self):
+ """
+ Get ceph osd performance
+ :return: ceph osd perf --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd perf',
+ 'format': 'json'
+ }), '')[1].decode('utf-8'))
+
+ def _get_osd_pool_stats(self):
+ """
+ Get ceph osd pool status.
+ This command is used to get information about both
+ read/write operation and bytes per second on each pool
+ :return: ceph osd pool stats --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd pool stats',
+ 'format': 'json'
+ }), '')[1].decode('utf-8'))
+
+
+def get_osd_perf_infos(osd_perf):
+ # https://github.com/netdata/netdata/issues/8247
+ # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
+ if 'osd_perf_infos' in osd_perf:
+ return osd_perf['osd_perf_infos']
+ return osd_perf['osdstats']['osd_perf_infos']
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
new file mode 100644
index 0000000..81788e8
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for ceph stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 10 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ceph plugin also supports the following:
+#
+# config_file: 'config_file' # Ceph config file.
+# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
+# # and keyring file must be read group permission.
+# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
+# # creating a read only key for pulling data v.s. admin
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+config_file: '/etc/ceph/ceph.conf'
+keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+rados_id: 'admin'
diff --git a/collectors/python.d.plugin/chrony/Makefile.inc b/collectors/python.d.plugin/chrony/Makefile.inc
new file mode 100644
index 0000000..18a805b
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += chrony/chrony.chart.py
+dist_pythonconfig_DATA += chrony/chrony.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += chrony/README.md chrony/Makefile.inc
+
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
new file mode 100644
index 0000000..b1e7ec3
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "Chrony monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/chrony/README.md
+sidebar_label: "Chrony"
+-->
+
+# Chrony monitoring with Netdata
+
+Monitors the precision and statistics of a local chronyd server, and produces:
+
+- frequency
+- last offset
+- RMS offset
+- residual freq
+- root delay
+- root dispersion
+- skew
+- system time
+
+## Requirements
+
+Verify that user Netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+## Enable the collector
+
+The `chrony` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `chrony` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `chrony` collector.
+
+## Configuration
+
+Edit the `python.d/chrony.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/chrony.conf
+```
+
+Sample:
+
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the appropriate method for your
+system, to finish configuring the `chrony` collector.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
new file mode 100644
index 0000000..91f7250
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Description: chrony netdata python.d module
+# Author: Dominik Schloesser (domschl)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+CHRONY_COMMAND = 'chronyc -n tracking'
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'system',
+ 'offsets',
+ 'stratum',
+ 'root',
+ 'frequency',
+ 'residualfreq',
+ 'skew',
+]
+
+CHARTS = {
+ 'system': {
+ 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'],
+ 'lines': [
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]
+ },
+ 'offsets': {
+ 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'],
+ 'lines': [
+ ['lastoffset', 'last offset', 'absolute', 1, 1000],
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]
+ },
+ 'stratum': {
+ 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'],
+ 'lines': [
+ ['stratum', None, 'absolute', 1, 1]
+ ]
+ },
+ 'root': {
+ 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
+ ]
+ },
+ 'frequency': {
+ 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'],
+ 'lines': [
+ ['frequency', None, 'absolute', 1, 1000]
+ ]
+ },
+ 'residualfreq': {
+ 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'],
+ 'lines': [
+ ['residualfreq', 'residual frequency', 'absolute', 1, 1000]
+ ]
+ },
+ 'skew': {
+ 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'],
+ 'lines': [
+ ['skew', None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+CHRONY = [
+ ('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)
+]
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = CHRONY_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ raw_data = (line.split(':', 1) for line in raw_data)
+ parsed, data = dict(), dict()
+
+ for line in raw_data:
+ try:
+ key, value = (l.strip() for l in line)
+ except ValueError:
+ continue
+ if value:
+ parsed[key] = value.split()[0]
+
+ for key, dim_id, multiplier in CHRONY:
+ try:
+ data[dim_id] = int(float(parsed[key]) * multiplier)
+ except (KeyError, ValueError):
+ continue
+
+ return data or None
diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
new file mode 100644
index 0000000..fd95519
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for chrony
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, chrony also supports the following:
+#
+# command: 'chrony tracking' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED chrony CONFIGURATION
+#
+# netdata will query chrony as user netdata.
+# verify that user netdata is allowed to call 'chronyc tracking'
+# Check cmdallow in chrony.conf
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'chronyc -n tracking'
diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc
new file mode 100644
index 0000000..89dfb51
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += couchdb/couchdb.chart.py
+dist_pythonconfig_DATA += couchdb/couchdb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
new file mode 100644
index 0000000..896bbdd
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -0,0 +1,53 @@
+<!--
+title: "Apache CouchDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/couchdb/README.md
+sidebar_label: "CouchDB"
+-->
+
+# Apache CouchDB monitoring with Netdata
+
+Monitors vital statistics of a local Apache CouchDB 2.x server, including:
+
+- Overall server reads/writes
+- HTTP traffic breakdown
+ - Request methods (`GET`, `PUT`, `POST`, etc.)
+ - Response status codes (`200`, `201`, `4xx`, etc.)
+- Active server tasks
+- Replication status (CouchDB 2.1 and up only)
+- Erlang VM stats
+- Optional per-database statistics: sizes, # of docs, # of deleted docs
+
+## Configuration
+
+Edit the `python.d/couchdb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/couchdb.conf
+```
+
+Sample for a local server running on port 5984:
+
+```yaml
+local:
+ user: 'admin'
+ pass: 'password'
+ node: 'couchdb@127.0.0.1'
+```
+
+Be sure to specify a correct admin-level username and password.
+
+You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
+
+If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+
+```yaml
+local:
+ ...
+ databases: 'db1 db2 db3 ...'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
new file mode 100644
index 0000000..a395f35
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -0,0 +1,398 @@
+# -*- coding: utf-8 -*-
+# Description: couchdb netdata python.d module
+# Author: wohali <wohali@apache.org>
+# Thanks to ilyam8 for good examples :)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import namedtuple, defaultdict
+from json import loads
+from socket import gethostbyname, gaierror
+from threading import Thread
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 1
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+
+OVERVIEW_STATS = [
+ 'couchdb.database_reads.value',
+ 'couchdb.database_writes.value',
+ 'couchdb.httpd.view_reads.value',
+ 'couchdb.httpd_request_methods.COPY.value',
+ 'couchdb.httpd_request_methods.DELETE.value',
+ 'couchdb.httpd_request_methods.GET.value',
+ 'couchdb.httpd_request_methods.HEAD.value',
+ 'couchdb.httpd_request_methods.OPTIONS.value',
+ 'couchdb.httpd_request_methods.POST.value',
+ 'couchdb.httpd_request_methods.PUT.value',
+ 'couchdb.httpd_status_codes.200.value',
+ 'couchdb.httpd_status_codes.201.value',
+ 'couchdb.httpd_status_codes.202.value',
+ 'couchdb.httpd_status_codes.204.value',
+ 'couchdb.httpd_status_codes.206.value',
+ 'couchdb.httpd_status_codes.301.value',
+ 'couchdb.httpd_status_codes.302.value',
+ 'couchdb.httpd_status_codes.304.value',
+ 'couchdb.httpd_status_codes.400.value',
+ 'couchdb.httpd_status_codes.401.value',
+ 'couchdb.httpd_status_codes.403.value',
+ 'couchdb.httpd_status_codes.404.value',
+ 'couchdb.httpd_status_codes.405.value',
+ 'couchdb.httpd_status_codes.406.value',
+ 'couchdb.httpd_status_codes.409.value',
+ 'couchdb.httpd_status_codes.412.value',
+ 'couchdb.httpd_status_codes.413.value',
+ 'couchdb.httpd_status_codes.414.value',
+ 'couchdb.httpd_status_codes.415.value',
+ 'couchdb.httpd_status_codes.416.value',
+ 'couchdb.httpd_status_codes.417.value',
+ 'couchdb.httpd_status_codes.500.value',
+ 'couchdb.httpd_status_codes.501.value',
+ 'couchdb.open_os_files.value',
+ 'couch_replicator.jobs.running.value',
+ 'couch_replicator.jobs.pending.value',
+ 'couch_replicator.jobs.crashed.value',
+]
+
+SYSTEM_STATS = [
+ 'context_switches',
+ 'run_queue',
+ 'ets_table_count',
+ 'reductions',
+ 'memory.atom',
+ 'memory.atom_used',
+ 'memory.binary',
+ 'memory.code',
+ 'memory.ets',
+ 'memory.other',
+ 'memory.processes',
+ 'io_input',
+ 'io_output',
+ 'os_proc_count',
+ 'process_count',
+ 'internal_replication_jobs'
+]
+
+DB_STATS = [
+ 'doc_count',
+ 'doc_del_count',
+ 'sizes.file',
+ 'sizes.external',
+ 'sizes.active'
+]
+
+ORDER = [
+ 'activity',
+ 'request_methods',
+ 'response_codes',
+ 'active_tasks',
+ 'replicator_jobs',
+ 'open_files',
+ 'db_sizes_file',
+ 'db_sizes_external',
+ 'db_sizes_active',
+ 'db_doc_counts',
+ 'db_doc_del_counts',
+ 'erlang_memory',
+ 'erlang_proc_counts',
+ 'erlang_peak_msg_queue',
+ 'erlang_reductions'
+]
+
+CHARTS = {
+ 'activity': {
+ 'options': [None, 'Overall Activity', 'requests/s',
+ 'dbactivity', 'couchdb.activity', 'stacked'],
+ 'lines': [
+ ['couchdb_database_reads', 'DB reads', 'incremental'],
+ ['couchdb_database_writes', 'DB writes', 'incremental'],
+ ['couchdb_httpd_view_reads', 'View reads', 'incremental']
+ ]
+ },
+ 'request_methods': {
+ 'options': [None, 'HTTP request methods', 'requests/s',
+ 'httptraffic', 'couchdb.request_methods',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'],
+ ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'],
+ ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
+ ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
+ ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
+ 'incremental'],
+ ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
+ ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'HTTP response status codes', 'responses/s',
+ 'httptraffic', 'couchdb.response_codes',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'],
+ ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
+ ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
+ ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_3xx', '3xx Redirection',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_4xx', '4xx Client error',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_5xx', '5xx Server error',
+ 'incremental']
+ ]
+ },
+ 'open_files': {
+ 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
+ 'lines': [
+ ['couchdb_open_os_files', '# files', 'absolute']
+ ]
+ },
+ 'active_tasks': {
+ 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
+ 'lines': [
+ ['activetasks_indexer', 'Indexer', 'absolute'],
+ ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
+ ['activetasks_replication', 'Replication', 'absolute'],
+ ['activetasks_view_compaction', 'View Compaction', 'absolute']
+ ]
+ },
+ 'replicator_jobs': {
+ 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
+ 'lines': [
+ ['couch_replicator_jobs_running', 'Running', 'absolute'],
+ ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
+ ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'],
+ ['internal_replication_jobs', 'Internal replication jobs',
+ 'absolute']
+ ]
+ },
+ 'erlang_memory': {
+ 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+ 'lines': [
+ ['memory_atom', 'atom', 'absolute'],
+ ['memory_binary', 'binaries', 'absolute'],
+ ['memory_code', 'code', 'absolute'],
+ ['memory_ets', 'ets', 'absolute'],
+ ['memory_processes', 'procs', 'absolute'],
+ ['memory_other', 'other', 'absolute']
+ ]
+ },
+ 'erlang_reductions': {
+ 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
+ 'lines': [
+ ['reductions', 'reductions', 'incremental']
+ ]
+ },
+ 'erlang_proc_counts': {
+ 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
+ 'lines': [
+ ['os_proc_count', 'OS procs', 'absolute'],
+ ['process_count', 'erl procs', 'absolute']
+ ]
+ },
+ 'erlang_peak_msg_queue': {
+ 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
+ 'line'],
+ 'lines': [
+ ['peak_msg_queue', 'peak size', 'absolute']
+ ]
+ },
+ # Lines for the following are added as part of check()
+ 'db_sizes_file': {
+ 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_external': {
+ 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_active': {
+ 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
+ 'lines': []
+ },
+ 'db_doc_counts': {
+ 'options': [None, 'Database # of docs', 'docs',
+ 'perdbstats', 'couchdb_db_doc_count', 'line'],
+ 'lines': []
+ },
+ 'db_doc_del_counts': {
+ 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 5984)
+ self.node = self.configuration.get('node', 'couchdb@127.0.0.1')
+ self.scheme = self.configuration.get('scheme', 'http')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ try:
+ self.dbs = self.configuration.get('databases').split(' ')
+ except (KeyError, AttributeError):
+ self.dbs = list()
+
+ def check(self):
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+ self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
+ stats = self.url + '/_node/{node}/_stats'.format(node=self.node)
+ active_tasks = self.url + '/_active_tasks'
+ system = self.url + '/_node/{node}/_system'.format(node=self.node)
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=stats,
+ stats=OVERVIEW_STATS),
+ METHODS(get_data=self._get_active_tasks_stats,
+ url=active_tasks,
+ stats=None),
+ METHODS(get_data=self._get_overview_stats,
+ url=system,
+ stats=SYSTEM_STATS),
+ METHODS(get_data=self._get_dbs_stats,
+ url=self.url,
+ stats=DB_STATS)]
+ # must initialise manager before using _get_raw_data
+ self._manager = self._build_manager()
+ self.dbs = [db for db in self.dbs
+ if self._get_raw_data(self.url + '/' + db)]
+ for db in self.dbs:
+ self.definitions['db_sizes_file']['lines'].append(
+ ['db_' + db + '_sizes_file', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_external']['lines'].append(
+ ['db_' + db + '_sizes_external', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_active']['lines'].append(
+ ['db_' + db + '_sizes_active', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_doc_counts']['lines'].append(
+ ['db_' + db + '_doc_count', db, 'absolute']
+ )
+ self.definitions['db_doc_del_counts']['lines'].append(
+ ['db_' + db + '_doc_del_count', db, 'absolute']
+ )
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ # self.info('couchdb result = ' + str(result))
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ to_netdata = self._fetch_data(raw_data=data, metrics=stats)
+ if 'message_queues' in data:
+ to_netdata['peak_msg_queue'] = get_peak_msg_queue(data)
+ return queue.put(to_netdata)
+
+ def _get_active_tasks_stats(self, queue, url, _):
+ taskdict = defaultdict(int)
+ taskdict["activetasks_indexer"] = 0
+ taskdict["activetasks_database_compaction"] = 0
+ taskdict["activetasks_replication"] = 0
+ taskdict["activetasks_view_compaction"] = 0
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ for task in data:
+ taskdict["activetasks_" + task["type"]] += 1
+ return queue.put(dict(taskdict))
+
+ def _get_dbs_stats(self, queue, url, stats):
+ to_netdata = {}
+ for db in self.dbs:
+ raw_data = self._get_raw_data(url + '/' + db)
+ if not raw_data:
+ continue
+ data = loads(raw_data)
+ for metric in stats:
+ value = data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except (KeyError, TypeError) as e:
+ self.debug('cannot process ' + metric + ' for ' + db
+ + ": " + str(e))
+ continue
+ metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list))
+ to_netdata[metric_name] = value
+ return queue.put(to_netdata)
+
+ def _fetch_data(self, raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except (KeyError, TypeError) as e:
+ self.debug('cannot process ' + metric + ': ' + str(e))
+ continue
+ # strip off .value from end of stat
+ if metrics_list[-1] == 'value':
+ metrics_list = metrics_list[:-1]
+ # sum up 3xx/4xx/5xx
+ if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \
+ int(metrics_list[2]) > 202:
+ metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100)
+ if '_'.join(metrics_list) in data:
+ data['_'.join(metrics_list)] += value
+ else:
+ data['_'.join(metrics_list)] = value
+ else:
+ data['_'.join(metrics_list)] = value
+ return data
+
+
+def get_peak_msg_queue(data):
+ maxsize = 0
+ queues = data['message_queues']
+ for queue in iter(queues.values()):
+ if isinstance(queue, dict) and 'count' in queue:
+ value = queue['count']
+ elif isinstance(queue, int):
+ value = queue
+ else:
+ continue
+ maxsize = max(maxsize, value)
+ return maxsize
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
new file mode 100644
index 0000000..9c68be7
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for couchdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# By default, CouchDB only updates its stats every 10 seconds.
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, the couchdb plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # CouchDB port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if db-specific stats are desired, place their names in databases:
+# databases: 'npm-registry animaldb'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '5984'
+ node: 'couchdb@127.0.0.1'
+ scheme: 'http'
+# user: 'admin'
+# pass: 'password'
diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc
new file mode 100644
index 0000000..7eca3e0
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dns_query_time/dns_query_time.chart.py
+dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
new file mode 100644
index 0000000..e1fde74
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -0,0 +1,29 @@
+<!--
+title: "DNS query RTT monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dns_query_time/README.md
+sidebar_label: "DNS query RTT"
+-->
+
+# DNS query RTT monitoring with Netdata
+
+Measures DNS query round trip time.
+
+**Requirement:**
+
+- `python-dnspython` package
+
+It produces one aggregate chart or one chart per DNS server, showing the query time.
+
+## Configuration
+
+Edit the `python.d/dns_query_time.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dns_query_time.conf
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
new file mode 100644
index 0000000..7e1cb32
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import choice
+from socket import getaddrinfo, gaierror
+from threading import Thread
+
+try:
+ import dns.message
+ import dns.query
+ import dns.name
+
+ DNS_PYTHON = True
+except ImportError:
+ DNS_PYTHON = False
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+update_every = 5
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.timeout = self.configuration.get('response_timeout', 4)
+ self.aggregate = self.configuration.get('aggregate', True)
+ self.domains = self.configuration.get('domains')
+ self.server_list = self.configuration.get('dns_servers')
+
+ def check(self):
+ if not DNS_PYTHON:
+ self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
+ return False
+
+ self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+
+ if not all([self.domains, self.server_list,
+ isinstance(self.server_list, str), isinstance(self.domains, str)]):
+ self.error("server_list and domain_list can't be empty")
+ return False
+ else:
+ self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+ for ns in self.server_list:
+ if not check_ns(ns):
+ self.info('Bad NS: %s' % ns)
+ self.server_list.remove(ns)
+ if not self.server_list:
+ return False
+
+ data = self._get_data(timeout=1)
+
+ down_servers = [s for s in data if data[s] == -100]
+ for down in down_servers:
+ down = down[3:].replace('_', '.')
+ self.info('Removed due to non response %s' % down)
+ self.server_list.remove(down)
+ if not self.server_list:
+ return False
+
+ self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+ return True
+
+ def _get_data(self, timeout=None):
+ return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+ threads = list()
+ que = Queue()
+ result = dict()
+
+ def dns_req(ns, t, q):
+ domain = dns.name.from_text(choice(domains))
+ request = dns.message.make_query(domain, dns.rdatatype.A)
+
+ try:
+ resp = dns.query.udp(request, ns, timeout=t)
+ if (resp.rcode() == dns.rcode.NOERROR and resp.answer):
+ query_time = resp.time * 1000
+ else:
+ query_time = -100
+ except dns.exception.Timeout:
+ query_time = -100
+ finally:
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+
+ for server in server_list:
+ th = Thread(target=dns_req, args=(server, timeout, que))
+ th.start()
+ threads.append(th)
+
+ for th in threads:
+ th.join()
+ result.update(que.get())
+
+ return result
+
+
+def check_ns(ns):
+ try:
+ return getaddrinfo(ns, 'domain')[0][4][0]
+ except gaierror:
+ return False
+
+
+def create_charts(aggregate, server_list):
+ if aggregate:
+ order = ['dns_group']
+ definitions = {
+ 'dns_group': {
+ 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
+ 'lines': []
+ }
+ }
+ for ns in server_list:
+ dim = [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ definitions['dns_group']['lines'].append(dim)
+
+ return order, definitions
+ else:
+ order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+ definitions = dict()
+
+ for ns in server_list:
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {
+ 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
+ 'lines': [
+ [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ ]
+ }
+ return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
new file mode 100644
index 0000000..9c0838e
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
@@ -0,0 +1,69 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+# dns_servers: 'dns servers' # List of dns servers to query
+# domains: 'domains' # List of domains
+# aggregate: yes/no # Aggregate all servers in one chart or not
+# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time)
+#
+# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc
new file mode 100644
index 0000000..a53f518
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dnsdist/dnsdist.chart.py
+dist_pythonconfig_DATA += dnsdist/dnsdist.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
new file mode 100644
index 0000000..7c279ef
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -0,0 +1,72 @@
+<!--
+title: "PowerDNS dnsdist monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dnsdist/README.md
+sidebar_label: "PowerDNS dnsdist"
+-->
+
+# PowerDNS dnsdist monitoring with Netdata
+
+Collects load-balancer performance and health metrics, and draws the following charts:
+
+1. **Response latency**
+
+ - latency-slow
+ - latency100-1000
+ - latency50-100
+ - latency10-50
+ - latency1-10
+ - latency0-1
+
+2. **Cache performance**
+
+ - cache-hits
+ - cache-misses
+
+3. **ACL events**
+
+ - acl-drops
+ - rule-drop
+ - rule-nxdomain
+ - rule-refused
+
+4. **Noncompliant data**
+
+ - empty-queries
+ - no-policy
+ - noncompliant-queries
+ - noncompliant-responses
+
+5. **Queries**
+
+ - queries
+ - rdqueries
+ - rdqueries
+
+6. **Health**
+
+ - downstream-send-errors
+ - downstream-timeouts
+ - servfail-responses
+ - trunc-failures
+
+## Configuration
+
+Edit the `python.d/dnsdist.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dnsdist.conf
+```
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+ user : 'username'
+ pass : 'password'
+ header:
+ X-API-Key: 'dnsdist-api-key'
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
new file mode 100644
index 0000000..7e94792
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'queries',
+ 'queries_dropped',
+ 'packets_dropped',
+ 'answers',
+ 'backend_responses',
+ 'backend_commerrors',
+ 'backend_errors',
+ 'cache',
+ 'servercpu',
+ 'servermem',
+ 'query_latency',
+ 'query_latency_avg'
+]
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
+ 'lines': [
+ ['queries', 'all', 'incremental'],
+ ['rdqueries', 'recursive', 'incremental'],
+ ['empty-queries', 'empty', 'incremental']
+ ]
+ },
+ 'queries_dropped': {
+ 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
+ 'lines': [
+ ['rule-drop', 'rule drop', 'incremental'],
+ ['dyn-blocked', 'dynamic block', 'incremental'],
+ ['no-policy', 'no policy', 'incremental'],
+ ['noncompliant-queries', 'non compliant', 'incremental']
+ ]
+ },
+ 'packets_dropped': {
+ 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
+ 'lines': [
+ ['acl-drops', 'acl', 'incremental']
+ ]
+ },
+ 'answers': {
+ 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
+ 'lines': [
+ ['self-answered', 'self answered', 'incremental'],
+ ['rule-nxdomain', 'nxdomain', 'incremental', -1],
+ ['rule-refused', 'refused', 'incremental', -1],
+ ['trunc-failures', 'trunc failures', 'incremental', -1]
+ ]
+ },
+ 'backend_responses': {
+ 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
+ 'lines': [
+ ['responses', 'responses', 'incremental']
+ ]
+ },
+ 'backend_commerrors': {
+ 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
+ 'lines': [
+ ['downstream-send-errors', 'send errors', 'incremental']
+ ]
+ },
+ 'backend_errors': {
+ 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
+ 'lines': [
+ ['downstream-timeouts', 'timeout', 'incremental'],
+ ['servfail-responses', 'servfail', 'incremental'],
+ ['noncompliant-responses', 'non compliant', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
+ 'lines': [
+ ['cache-hits', 'hits', 'incremental'],
+ ['cache-misses', 'misses', 'incremental', -1]
+ ]
+ },
+ 'servercpu': {
+ 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
+ 'lines': [
+ ['cpu-sys-msec', 'system state', 'incremental'],
+ ['cpu-user-msec', 'user state', 'incremental']
+ ]
+ },
+ 'servermem': {
+ 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
+ 'lines': [
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'query_latency': {
+ 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
+ 'lines': [
+ ['latency0-1', '1ms', 'incremental'],
+ ['latency1-10', '10ms', 'incremental'],
+ ['latency10-50', '50ms', 'incremental'],
+ ['latency50-100', '100ms', 'incremental'],
+ ['latency100-1000', '1sec', 'incremental'],
+ ['latency-slow', 'slow', 'incremental']
+ ]
+ },
+ 'query_latency_avg': {
+ 'options': [None, 'Average latency for the last N queries', 'microseconds', 'latency',
+ 'dnsdist.query_latency_avg', 'line'],
+ 'lines': [
+ ['latency-avg100', '100', 'absolute'],
+ ['latency-avg1000', '1k', 'absolute'],
+ ['latency-avg10000', '10k', 'absolute'],
+ ['latency-avg1000000', '1000k', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+
+ return loads(data)
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
new file mode 100644
index 0000000..324d65a
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for dnsdist
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+#update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+#autodetection_retry: 1
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# Additionally to the above, dnsdist also supports the following:
+#
+# url: 'URL' # the URL to fetch dnsdist performance statistics
+# user: 'username' # username for basic auth
+# pass: 'password' # password for basic auth
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+# user : 'username'
+# pass : 'password'
+# header:
+# X-API-Key: 'dnsdist-api-key'
+
+
diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc
new file mode 100644
index 0000000..b100bc6
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dockerd/dockerd.chart.py
+dist_pythonconfig_DATA += dockerd/dockerd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
new file mode 100644
index 0000000..178bae2
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -0,0 +1,46 @@
+<!--
+title: "Docker Engine monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dockerd/README.md
+sidebar_label: "Docker Engine"
+-->
+
+# Docker Engine monitoring with Netdata
+
+Collects docker container health metrics.
+
+**Requirement:**
+
+- `docker` package, required version 3.2.0+
+
+Following charts are drawn:
+
+1. **running containers**
+
+ - count
+
+2. **healthy containers**
+
+ - count
+
+3. **unhealthy containers**
+
+ - count
+
+## Configuration
+
+Edit the `python.d/dockerd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dockerd.conf
+```
+
+```yaml
+ update_every : 1
+ priority : 60000
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
new file mode 100644
index 0000000..bd9640b
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+# Description: docker netdata python.d module
+# Author: Kévin Darcel (@tuxity)
+
+try:
+ import docker
+
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+from distutils.version import StrictVersion
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'running_containers',
+ 'healthy_containers',
+ 'unhealthy_containers'
+]
+
+CHARTS = {
+ 'running_containers': {
+ 'options': [None, 'Number of running containers', 'containers', 'running containers',
+ 'docker.running_containers', 'line'],
+ 'lines': [
+ ['running_containers', 'running']
+ ]
+ },
+ 'healthy_containers': {
+ 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
+ 'docker.healthy_containers', 'line'],
+ 'lines': [
+ ['healthy_containers', 'healthy']
+ ]
+ },
+ 'unhealthy_containers': {
+ 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
+ 'docker.unhealthy_containers', 'line'],
+ 'lines': [
+ ['unhealthy_containers', 'unhealthy']
+ ]
+ }
+}
+
+MIN_REQUIRED_VERSION = '3.2.0'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.client = None
+
+ def check(self):
+ if not HAS_DOCKER:
+ self.error("'docker' package is needed to use dockerd module")
+ return False
+
+ if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
+ self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
+ docker.__version__,
+ MIN_REQUIRED_VERSION,
+ ))
+ return False
+
+ self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
+
+ try:
+ self.client.ping()
+ except docker.errors.APIError as error:
+ self.error(error)
+ return False
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data['running_containers'] = len(self.client.containers.list(sparse=True))
+ data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
+ data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
+
+ return data or None
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
new file mode 100644
index 0000000..96c8ee0
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for dockerd health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dockerd plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'unix://var/run/docker.sock'
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
new file mode 100644
index 0000000..fd7d13b
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dovecot/dovecot.chart.py
+dist_pythonconfig_DATA += dovecot/dovecot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 100644
index 0000000..730b642
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1,105 @@
+<!--
+title: "Dovecot monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md
+sidebar_label: "Dovecot"
+-->
+
+# Dovecot monitoring with Netdata
+
+Provides statistics information from Dovecot server.
+
+Statistics are taken from dovecot socket by executing `EXPORT global` command.
+More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+
+Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3
+by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3).
+
+**Requirement:**
+Dovecot UNIX socket with R/W permissions for user `netdata` or Dovecot with configured TCP/IP socket.
+
+Module gives information with following charts:
+
+1. **sessions**
+
+ - active sessions
+
+2. **logins**
+
+ - logins
+
+3. **commands** - number of IMAP commands
+
+ - commands
+
+4. **Faults**
+
+ - minor
+ - major
+
+5. **Context Switches**
+
+ - voluntary
+ - involuntary
+
+6. **disk** in bytes/s
+
+ - read
+ - write
+
+7. **bytes** in bytes/s
+
+ - read
+ - write
+
+8. **number of syscalls** in syscalls/s
+
+ - read
+ - write
+
+9. **lookups** - number of lookups per second
+
+ - path
+ - attr
+
+10. **hits** - number of cache hits
+
+ - hits
+
+11. **attempts** - authorization attempts
+
+ - success
+ - failure
+
+12. **cache** - cached authorization hits
+
+ - hit
+ - miss
+
+## Configuration
+
+Edit the `python.d/dovecot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dovecot.conf
+```
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+```
+
+If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
new file mode 100644
index 0000000..dfaef28
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Description: dovecot netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+UNIX_SOCKET = '/var/run/dovecot/stats'
+
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'lines': [
+ ['num_connected_sessions', 'active sessions', 'absolute']
+ ]
+ },
+ 'logins': {
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
+ 'lines': [
+ ['num_logins', 'logins', 'absolute']
+ ]
+ },
+ 'commands': {
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
+ 'lines': [
+ ['num_cmds', 'commands', 'absolute']
+ ]
+ },
+ 'faults': {
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
+ 'lines': [
+ ['min_faults', 'minor', 'absolute'],
+ ['maj_faults', 'major', 'absolute']
+ ]
+ },
+ 'context_switches': {
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
+ 'line'],
+ 'lines': [
+ ['vol_cs', 'voluntary', 'absolute'],
+ ['invol_cs', 'involuntary', 'absolute']
+ ]
+ },
+ 'io': {
+ 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
+ 'lines': [
+ ['disk_input', 'read', 'incremental', 1, 1024],
+ ['disk_output', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'lines': [
+ ['read_bytes', 'read', 'incremental', 8, 1000],
+ ['write_bytes', 'write', 'incremental', -8, 1000]
+ ]
+ },
+ 'syscalls': {
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'lines': [
+ ['read_count', 'read', 'incremental'],
+ ['write_count', 'write', 'incremental']
+ ]
+ },
+ 'lookup': {
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'lines': [
+ ['mail_lookup_path', 'path', 'incremental'],
+ ['mail_lookup_attr', 'attr', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'lines': [
+ ['mail_cache_hits', 'hits', 'incremental']
+ ]
+ },
+ 'auth': {
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'lines': [
+ ['auth_successes', 'ok', 'absolute'],
+ ['auth_failures', 'failed', 'absolute']
+ ]
+ },
+ 'auth_cache': {
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'lines': [
+ ['auth_cache_hits', 'hit', 'absolute'],
+ ['auth_cache_misses', 'miss', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = None # localhost
+ self.port = None # 24242
+ self.unix_socket = UNIX_SOCKET
+ self.request = 'EXPORT\tglobal\r\n'
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ return None
+
+ if raw is None:
+ self.debug('dovecot returned no data')
+ return None
+
+ data = raw.split('\n')[:2]
+ desc = data[0].split('\t')
+ vals = data[1].split('\t')
+ ret = dict()
+ for i, _ in enumerate(desc):
+ try:
+ ret[str(desc[i])] = int(vals[i])
+ except ValueError:
+ continue
+ return ret or None
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
new file mode 100644
index 0000000..451dbc9
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -0,0 +1,98 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dovecot also supports the following:
+#
+# socket: 'path/to/dovecot/stats'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 24242
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+
+localsocket_old:
+ name : 'local'
+ socket : '/var/run/dovecot/old-stats'
+
diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc
new file mode 100644
index 0000000..15c63c2
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += elasticsearch/elasticsearch.chart.py
+dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
new file mode 100644
index 0000000..cf1834c
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -0,0 +1,94 @@
+<!--
+title: "Elasticsearch monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/elasticsearch/README.md
+sidebar_label: "Elasticsearch"
+-->
+
+# Elasticsearch monitoring with Netdata
+
+Monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
+
+It produces:
+
+1. **Search performance** charts:
+
+ - Number of queries, fetches
+ - Time spent on queries, fetches
+ - Query and fetch latency
+
+2. **Indexing performance** charts:
+
+ - Number of documents indexed, index refreshes, flushes
+ - Time spent on indexing, refreshing, flushing
+ - Indexing and flushing latency
+
+3. **Memory usage and garbage collection** charts:
+
+ - JVM heap currently in use, committed
+ - Count of garbage collections
+ - Time spent on garbage collections
+
+4. **Host metrics** charts:
+
+ - Available file descriptors in percent
+ - Opened HTTP connections
+ - Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+
+ - Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+
+ - Fielddata cache size
+ - Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+
+ - Cluster status
+ - Nodes and tasks statistics
+ - Shards statistics
+
+8. **Cluster stats API** charts:
+
+ - Nodes statistics
+ - Query cache statistics
+ - Docs statistics
+ - Store statistics
+ - Indices and shards statistics
+
+9. **Indices** charts (per index statistics, disabled by default):
+
+ - Docs count
+ - Store size
+ - Num of replicas
+ - Health status
+
+## Configuration
+
+Edit the `python.d/elasticsearch.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/elasticsearch.conf
+```
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Elasticsearch server ip address or hostname.
+ port : 'port' # Port on which elasticsearch listens.
+ scheme : 'http' # URL scheme. Use 'https' if your elasticsearch uses TLS.
+ node_status : yes/no # Get metrics from "/_nodes/_local/stats". Enabled by default.
+ cluster_health : yes/no # Get metrics from "/_cluster/health". Enabled by default.
+ cluster_stats : yes/no # Get metrics from "'/_cluster/stats". Enabled by default.
+ indices_stats : yes/no # Get metrics from "/_cat/indices". Disabled by default.
+```
+
+If no configuration is given, module will try to connect to `http://127.0.0.1:9200`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
new file mode 100644
index 0000000..dddf50b
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -0,0 +1,774 @@
+# -*- coding: utf-8 -*-
+# Description: elastic search node stats netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import threading
+
+from collections import namedtuple
+from socket import gethostbyname, gaierror
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
+
+NODE_STATS = [
+ 'indices.search.fetch_current',
+ 'indices.search.fetch_total',
+ 'indices.search.query_current',
+ 'indices.search.query_total',
+ 'indices.search.query_time_in_millis',
+ 'indices.search.fetch_time_in_millis',
+ 'indices.indexing.index_total',
+ 'indices.indexing.index_current',
+ 'indices.indexing.index_time_in_millis',
+ 'indices.refresh.total',
+ 'indices.refresh.total_time_in_millis',
+ 'indices.flush.total',
+ 'indices.flush.total_time_in_millis',
+ 'indices.translog.operations',
+ 'indices.translog.size_in_bytes',
+ 'indices.translog.uncommitted_operations',
+ 'indices.translog.uncommitted_size_in_bytes',
+ 'indices.segments.count',
+ 'indices.segments.terms_memory_in_bytes',
+ 'indices.segments.stored_fields_memory_in_bytes',
+ 'indices.segments.term_vectors_memory_in_bytes',
+ 'indices.segments.norms_memory_in_bytes',
+ 'indices.segments.points_memory_in_bytes',
+ 'indices.segments.doc_values_memory_in_bytes',
+ 'indices.segments.index_writer_memory_in_bytes',
+ 'indices.segments.version_map_memory_in_bytes',
+ 'indices.segments.fixed_bit_set_memory_in_bytes',
+ 'jvm.gc.collectors.young.collection_count',
+ 'jvm.gc.collectors.old.collection_count',
+ 'jvm.gc.collectors.young.collection_time_in_millis',
+ 'jvm.gc.collectors.old.collection_time_in_millis',
+ 'jvm.mem.heap_used_percent',
+ 'jvm.mem.heap_used_in_bytes',
+ 'jvm.mem.heap_committed_in_bytes',
+ 'jvm.buffer_pools.direct.count',
+ 'jvm.buffer_pools.direct.used_in_bytes',
+ 'jvm.buffer_pools.direct.total_capacity_in_bytes',
+ 'jvm.buffer_pools.mapped.count',
+ 'jvm.buffer_pools.mapped.used_in_bytes',
+ 'jvm.buffer_pools.mapped.total_capacity_in_bytes',
+ 'thread_pool.bulk.queue',
+ 'thread_pool.bulk.rejected',
+ 'thread_pool.write.queue',
+ 'thread_pool.write.rejected',
+ 'thread_pool.index.queue',
+ 'thread_pool.index.rejected',
+ 'thread_pool.search.queue',
+ 'thread_pool.search.rejected',
+ 'thread_pool.merge.queue',
+ 'thread_pool.merge.rejected',
+ 'indices.fielddata.memory_size_in_bytes',
+ 'indices.fielddata.evictions',
+ 'breakers.fielddata.tripped',
+ 'http.current_open',
+ 'transport.rx_size_in_bytes',
+ 'transport.tx_size_in_bytes',
+ 'process.max_file_descriptors',
+ 'process.open_file_descriptors'
+]
+
+CLUSTER_STATS = [
+ 'nodes.count.data',
+ 'nodes.count.master',
+ 'nodes.count.total',
+ 'nodes.count.coordinating_only',
+ 'nodes.count.ingest',
+ 'indices.docs.count',
+ 'indices.query_cache.hit_count',
+ 'indices.query_cache.miss_count',
+ 'indices.store.size_in_bytes',
+ 'indices.count',
+ 'indices.shards.total'
+]
+
+HEALTH_STATS = [
+ 'number_of_nodes',
+ 'number_of_data_nodes',
+ 'number_of_pending_tasks',
+ 'number_of_in_flight_fetch',
+ 'active_shards',
+ 'relocating_shards',
+ 'unassigned_shards',
+ 'delayed_unassigned_shards',
+ 'initializing_shards',
+ 'active_shards_percent_as_number'
+]
+
+LATENCY = {
+ 'query_latency': {
+ 'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'
+ },
+ 'fetch_latency': {
+ 'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'
+ },
+ 'indexing_latency': {
+ 'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'
+ },
+ 'flushing_latency': {
+ 'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'
+ }
+}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'search_performance_total',
+ 'search_performance_current',
+ 'search_performance_time',
+ 'search_latency',
+ 'index_performance_total',
+ 'index_performance_current',
+ 'index_performance_time',
+ 'index_latency',
+ 'index_translog_operations',
+ 'index_translog_size',
+ 'index_segments_count',
+ 'index_segments_memory_writer',
+ 'index_segments_memory',
+ 'jvm_mem_heap',
+ 'jvm_mem_heap_bytes',
+ 'jvm_buffer_pool_count',
+ 'jvm_direct_buffers_memory',
+ 'jvm_mapped_buffers_memory',
+ 'jvm_gc_count',
+ 'jvm_gc_time',
+ 'host_metrics_file_descriptors',
+ 'host_metrics_http',
+ 'host_metrics_transport',
+ 'thread_pool_queued',
+ 'thread_pool_rejected',
+ 'fielddata_cache',
+ 'fielddata_evictions_tripped',
+ 'cluster_health_status',
+ 'cluster_health_nodes',
+ 'cluster_health_pending_tasks',
+ 'cluster_health_flight_fetch',
+ 'cluster_health_shards',
+ 'cluster_stats_nodes',
+ 'cluster_stats_query_cache',
+ 'cluster_stats_docs',
+ 'cluster_stats_store',
+ 'cluster_stats_indices',
+ 'cluster_stats_shards_total',
+ 'index_docs_count',
+ 'index_store_size',
+ 'index_replica',
+ 'index_health',
+]
+
+CHARTS = {
+ 'search_performance_total': {
+ 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
+ 'elastic.search_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_search_query_total', 'queries', 'incremental'],
+ ['indices_search_fetch_total', 'fetches', 'incremental']
+ ]
+ },
+ 'search_performance_current': {
+ 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
+ 'elastic.search_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_search_query_current', 'queries', 'absolute'],
+ ['indices_search_fetch_current', 'fetches', 'absolute']
+ ]
+ },
+ 'search_performance_time': {
+ 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
+ 'elastic.search_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
+ ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
+ ]
+ },
+ 'search_latency': {
+ 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency',
+ 'stacked'],
+ 'lines': [
+ ['query_latency', 'query', 'absolute', 1, 1000],
+ ['fetch_latency', 'fetch', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_performance_total': {
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
+ 'indexing performance', 'elastic.index_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_total', 'indexed', 'incremental'],
+ ['indices_refresh_total', 'refreshes', 'incremental'],
+ ['indices_flush_total', 'flushes', 'incremental']
+ ]
+ },
+ 'index_performance_current': {
+ 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
+ 'indexing performance', 'elastic.index_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_current', 'documents', 'absolute']
+ ]
+ },
+ 'index_performance_time': {
+ 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
+ 'elastic.index_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
+ ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
+ ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
+ ]
+ },
+ 'index_latency': {
+ 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
+ 'elastic.index_latency', 'stacked'],
+ 'lines': [
+ ['indexing_latency', 'indexing', 'absolute', 1, 1000],
+ ['flushing_latency', 'flushing', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_translog_operations': {
+ 'options': [None, 'Translog Operations', 'operations', 'translog',
+ 'elastic.index_translog_operations', 'area'],
+ 'lines': [
+ ['indices_translog_operations', 'total', 'absolute'],
+ ['indices_translog_uncommitted_operations', 'uncommitted', 'absolute']
+ ]
+ },
+ 'index_translog_size': {
+ 'options': [None, 'Translog Size', 'MiB', 'translog',
+ 'elastic.index_translog_size', 'area'],
+ 'lines': [
+ ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
+ ['indices_translog_uncommitted_size_in_bytes', 'uncommitted', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_count': {
+ 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
+ 'elastic.index_segments_count', 'line'],
+ 'lines': [
+ ['indices_segments_count', 'segments', 'absolute']
+ ]
+ },
+ 'index_segments_memory_writer': {
+ 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
+ 'elastic.index_segments_memory_writer', 'area'],
+ 'lines': [
+ ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_memory': {
+ 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
+ 'elastic.index_segments_memory', 'stacked'],
+ 'lines': [
+ ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
+ ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567],
+ ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567],
+ ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567],
+ ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567],
+ ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
+ ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
+ ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mem_heap': {
+ 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
+ 'elastic.jvm_heap', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
+ ]
+ },
+ 'jvm_mem_heap_bytes': {
+ 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_heap_bytes', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_committed_in_bytes', 'committed', 'absolute', 1, 1048576],
+ ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
+ ]
+ },
+ 'jvm_buffer_pool_count': {
+ 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
+ 'elastic.jvm_buffer_pool_count', 'line'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
+ ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
+ ]
+ },
+ 'jvm_direct_buffers_memory': {
+ 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_direct_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mapped_buffers_memory': {
+ 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_mapped_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_gc_count': {
+ 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
+ ]
+ },
+ 'jvm_gc_time': {
+ 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
+ 'elastic.gc_time', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
+ ]
+ },
+ 'thread_pool_queued': {
+ 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
+ 'elastic.thread_pool_queued', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_write_queue', 'write', 'absolute'],
+ ['thread_pool_index_queue', 'index', 'absolute'],
+ ['thread_pool_search_queue', 'search', 'absolute'],
+ ['thread_pool_merge_queue', 'merge', 'absolute']
+ ]
+ },
+ 'thread_pool_rejected': {
+ 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
+ 'elastic.thread_pool_rejected', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_write_rejected', 'write', 'absolute'],
+ ['thread_pool_index_rejected', 'index', 'absolute'],
+ ['thread_pool_search_rejected', 'search', 'absolute'],
+ ['thread_pool_merge_rejected', 'merge', 'absolute']
+ ]
+ },
+ 'fielddata_cache': {
+ 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
+ 'lines': [
+ ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
+ ]
+ },
+ 'fielddata_evictions_tripped': {
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
+ 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
+ 'lines': [
+ ['indices_fielddata_evictions', 'evictions', 'incremental'],
+ ['indices_fielddata_tripped', 'tripped', 'incremental']
+ ]
+ },
+ 'cluster_health_nodes': {
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
+ 'elastic.cluster_health_nodes', 'area'],
+ 'lines': [
+ ['number_of_nodes', 'nodes', 'absolute'],
+ ['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ]
+ },
+ 'cluster_health_pending_tasks': {
+ 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
+ 'elastic.cluster_health_pending_tasks', 'line'],
+ 'lines': [
+ ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ]
+ },
+ 'cluster_health_flight_fetch': {
+ 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
+ 'elastic.cluster_health_flight_fetch', 'line'],
+ 'lines': [
+ ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
+ ]
+ },
+ 'cluster_health_status': {
+ 'options': [None, 'Cluster Status', 'status', 'cluster health API',
+ 'elastic.cluster_health_status', 'area'],
+ 'lines': [
+ ['status_green', 'green', 'absolute'],
+ ['status_red', 'red', 'absolute'],
+ ['status_yellow', 'yellow', 'absolute']
+ ]
+ },
+ 'cluster_health_shards': {
+ 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
+ 'elastic.cluster_health_shards', 'stacked'],
+ 'lines': [
+ ['active_shards', 'active_shards', 'absolute'],
+ ['relocating_shards', 'relocating_shards', 'absolute'],
+ ['unassigned_shards', 'unassigned', 'absolute'],
+ ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
+ ['initializing_shards', 'initializing', 'absolute'],
+ ['active_shards_percent_as_number', 'active_percent', 'absolute']
+ ]
+ },
+ 'cluster_stats_nodes': {
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
+ 'elastic.cluster_nodes', 'area'],
+ 'lines': [
+ ['nodes_count_data', 'data', 'absolute'],
+ ['nodes_count_master', 'master', 'absolute'],
+ ['nodes_count_total', 'total', 'absolute'],
+ ['nodes_count_ingest', 'ingest', 'absolute'],
+ ['nodes_count_coordinating_only', 'coordinating_only', 'absolute']
+ ]
+ },
+ 'cluster_stats_query_cache': {
+ 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
+ 'elastic.cluster_query_cache', 'stacked'],
+ 'lines': [
+ ['indices_query_cache_hit_count', 'hit', 'incremental'],
+ ['indices_query_cache_miss_count', 'miss', 'incremental']
+ ]
+ },
+ 'cluster_stats_docs': {
+ 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
+ 'elastic.cluster_docs', 'line'],
+ 'lines': [
+ ['indices_docs_count', 'docs', 'absolute']
+ ]
+ },
+ 'cluster_stats_store': {
+ 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
+ 'elastic.cluster_store', 'line'],
+ 'lines': [
+ ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
+ ]
+ },
+ 'cluster_stats_indices': {
+ 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
+ 'elastic.cluster_indices', 'line'],
+ 'lines': [
+ ['indices_count', 'indices', 'absolute'],
+ ]
+ },
+ 'cluster_stats_shards_total': {
+ 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
+ 'elastic.cluster_shards_total', 'line'],
+ 'lines': [
+ ['indices_shards_total', 'shards', 'absolute']
+ ]
+ },
+ 'host_metrics_transport': {
+ 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
+ 'elastic.host_transport', 'area'],
+ 'lines': [
+ ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
+ ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'host_metrics_file_descriptors': {
+ 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
+ 'elastic.host_descriptors', 'area'],
+ 'lines': [
+ ['file_descriptors_used', 'used', 'absolute', 1, 10]
+ ]
+ },
+ 'host_metrics_http': {
+ 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
+ 'elastic.host_http_connections', 'line'],
+ 'lines': [
+ ['http_current_open', 'opened', 'absolute', 1, 1]
+ ]
+ },
+ 'index_docs_count': {
+ 'options': [None, 'Docs Count', 'count', 'indices', 'elastic.index_docs', 'line'],
+ 'lines': []
+ },
+ 'index_store_size': {
+ 'options': [None, 'Store Size', 'bytes', 'indices', 'elastic.index_store_size', 'line'],
+ 'lines': []
+ },
+ 'index_replica': {
+ 'options': [None, 'Replica', 'count', 'indices', 'elastic.index_replica', 'line'],
+ 'lines': []
+ },
+ 'index_health': {
+ 'options': [None, 'Health', 'status', 'indices', 'elastic.index_health', 'line'],
+ 'lines': []
+ },
+}
+
+
+def convert_index_store_size_to_bytes(size):
+ # can be b, kb, mb, gb
+ if size.endswith('kb'):
+ return round(float(size[:-2]) * 1024)
+ elif size.endswith('mb'):
+ return round(float(size[:-2]) * 1024 * 1024)
+ elif size.endswith('gb'):
+ return round(float(size[:-2]) * 1024 * 1024 * 1024)
+ elif size.endswith('tb'):
+ return round(float(size[:-2]) * 1024 * 1024 * 1024 * 1024)
+ elif size.endswith('b'):
+ return round(float(size[:-1]))
+ return -1
+
+
+def convert_index_health(health):
+ if health == 'green':
+ return 0
+ elif health == 'yellow':
+ return 1
+ elif health == 'read':
+ return 2
+ return -1
+
+
+def get_survive_any(method):
+ def w(*args):
+ try:
+ method(*args)
+ except Exception as error:
+ self, queue, url = args[0], args[1], args[2]
+ self.error("error during '{0}' : {1}".format(url, error))
+ queue.put(dict())
+
+ return w
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port', 9200)
+ self.url = '{scheme}://{host}:{port}'.format(
+ scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port,
+ )
+ self.latency = dict()
+ self.methods = list()
+ self.collected_indices = set()
+
+ def check(self):
+ if not self.host:
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(repr(error))
+ return False
+
+ self.methods = [
+ METHODS(
+ get_data=self._get_node_stats,
+ url=self.url + '/_nodes/_local/stats',
+ run=self.configuration.get('node_stats', True),
+ ),
+ METHODS(
+ get_data=self._get_cluster_health,
+ url=self.url + '/_cluster/health',
+ run=self.configuration.get('cluster_health', True)
+ ),
+ METHODS(
+ get_data=self._get_cluster_stats,
+ url=self.url + '/_cluster/stats',
+ run=self.configuration.get('cluster_stats', True),
+ ),
+ METHODS(
+ get_data=self._get_indices,
+ url=self.url + '/_cat/indices?format=json',
+ run=self.configuration.get('indices_stats', False),
+ ),
+ ]
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ if not method.run:
+ continue
+ th = threading.Thread(
+ target=method.get_data,
+ args=(queue, method.url),
+ )
+ th.daemon = True
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def add_index_to_charts(self, idx_name):
+ for name in ('index_docs_count', 'index_store_size', 'index_replica', 'index_health'):
+ chart = self.charts[name]
+ dim = ['{0}_{1}'.format(idx_name, name), idx_name]
+ chart.add_dimension(dim)
+
+ @get_survive_any
+ def _get_indices(self, queue, url):
+ # [
+ # {
+ # "pri.store.size": "650b",
+ # "health": "yellow",
+ # "status": "open",
+ # "index": "twitter",
+ # "pri": "5",
+ # "rep": "1",
+ # "docs.count": "10",
+ # "docs.deleted": "3",
+ # "store.size": "650b"
+ # }
+ # ]
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+
+ indices = self.json_parse(raw_data)
+ if not indices:
+ return queue.put(dict())
+
+ charts_initialized = len(self.charts) != 0
+ data = dict()
+ for idx in indices:
+ try:
+ name = idx['index']
+ is_system_index = name.startswith('.')
+ if is_system_index:
+ continue
+
+ v = {
+ '{0}_index_docs_count'.format(name): idx['docs.count'],
+ '{0}_index_replica'.format(name): idx['rep'],
+ '{0}_index_health'.format(name): convert_index_health(idx['health']),
+ }
+ size = convert_index_store_size_to_bytes(idx['store.size'])
+ if size != -1:
+ v['{0}_index_store_size'.format(name)] = size
+ except KeyError as error:
+ self.debug("error on parsing index : {0}".format(repr(error)))
+ continue
+
+ data.update(v)
+ if name not in self.collected_indices and charts_initialized:
+ self.collected_indices.add(name)
+ self.add_index_to_charts(name)
+
+ return queue.put(data)
+
+ @get_survive_any
+ def _get_cluster_health(self, queue, url):
+ raw = self._get_raw_data(url)
+ if not raw:
+ return queue.put(dict())
+
+ parsed = self.json_parse(raw)
+ if not parsed:
+ return queue.put(dict())
+
+ data = fetch_data(raw_data=parsed, metrics=HEALTH_STATS)
+ dummy = {
+ 'status_green': 0,
+ 'status_red': 0,
+ 'status_yellow': 0,
+ }
+ data.update(dummy)
+ current_status = 'status_' + parsed['status']
+ data[current_status] = 1
+
+ return queue.put(data)
+
+ @get_survive_any
+ def _get_cluster_stats(self, queue, url):
+ raw = self._get_raw_data(url)
+ if not raw:
+ return queue.put(dict())
+
+ parsed = self.json_parse(raw)
+ if not parsed:
+ return queue.put(dict())
+
+ data = fetch_data(raw_data=parsed, metrics=CLUSTER_STATS)
+
+ return queue.put(data)
+
+ @get_survive_any
+ def _get_node_stats(self, queue, url):
+ raw = self._get_raw_data(url)
+ if not raw:
+ return queue.put(dict())
+
+ parsed = self.json_parse(raw)
+ if not parsed:
+ return queue.put(dict())
+
+ node = list(parsed['nodes'].keys())[0]
+ data = fetch_data(raw_data=parsed['nodes'][node], metrics=NODE_STATS)
+
+ # Search, index, flush, fetch performance latency
+ for key in LATENCY:
+ try:
+ data[key] = self.find_avg(
+ total=data[LATENCY[key]['total']],
+ spent_time=data[LATENCY[key]['spent_time']],
+ key=key)
+ except KeyError:
+ continue
+ if 'process_open_file_descriptors' in data and 'process_max_file_descriptors' in data:
+ v = float(data['process_open_file_descriptors']) / data['process_max_file_descriptors'] * 1000
+ data['file_descriptors_used'] = round(v)
+
+ return queue.put(data)
+
+ def json_parse(self, reply):
+ try:
+ return json.loads(reply)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def find_avg(self, total, spent_time, key):
+ if key not in self.latency:
+ self.latency[key] = dict(total=total, spent_time=spent_time)
+ return 0
+
+ if self.latency[key]['total'] != total:
+ spent_diff = spent_time - self.latency[key]['spent_time']
+ total_diff = total - self.latency[key]['total']
+ latency = float(spent_diff) / float(total_diff) * 1000
+ self.latency[key]['total'] = total
+ self.latency[key]['spent_time'] = spent_time
+ return latency
+
+ self.latency[key]['spent_time'] = spent_time
+ return 0
+
+
+def fetch_data(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except (KeyError, TypeError):
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
new file mode 100644
index 0000000..4058deb
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for elasticsearch stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, elasticsearch plugin also supports the following:
+#
+# host : 'ipaddress' # Elasticsearch server ip address or hostname.
+# port : 'port' # Port on which elasticsearch listens.
+# node_status : yes/no # Get metrics from "/_nodes/_local/stats". Enabled by default.
+# cluster_health : yes/no # Get metrics from "/_cluster/health". Enabled by default.
+# cluster_stats : yes/no # Get metrics from "'/_cluster/stats". Enabled by default.
+# indices_stats : yes/no # Get metrics from "/_cat/indices". Disabled by default.
+#
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ port: '9200'
diff --git a/collectors/python.d.plugin/energid/Makefile.inc b/collectors/python.d.plugin/energid/Makefile.inc
new file mode 100644
index 0000000..44a209d
--- /dev/null
+++ b/collectors/python.d.plugin/energid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += energid/energid.chart.py
+dist_pythonconfig_DATA += energid/energid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += energid/README.md energid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/energid/README.md b/collectors/python.d.plugin/energid/README.md
new file mode 100644
index 0000000..60c829f
--- /dev/null
+++ b/collectors/python.d.plugin/energid/README.md
@@ -0,0 +1,77 @@
+<!--
+title: "Energi Core node monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/energid/README.md
+sidebar_label: "Energi Core"
+-->
+
+# Energi Core node monitoring with Netdata
+
+Monitors blockchain, memory, network and unspent transactions statistics.
+
+
+As [Energi Core](https://github.com/energicryptocurrency/energi) Gen 1 & 2 are based on the original Bitcoin code and
+supports very similar JSON RPC, there is quite high chance the module works
+with many others forks including bitcoind itself.
+
+Introduces several new charts:
+
+1. **Blockchain Index**
+ - blocks
+ - headers
+
+2. **Blockchain Difficulty**
+ - diff
+
+3. **MemPool** in MiB
+ - Max
+ - Usage
+ - TX Size
+
+4. **Secure Memory** in KiB
+ - Total
+ - Locked
+ - Used
+
+5. **Network**
+ - Connections
+
+6. **UTXO** (Unspent Transaction Output)
+ - UTXO
+ - Xfers (related transactions)
+
+Configuration is needed in most cases of secure deployment to specify RPC
+credentials. However, Energi, Bitcoin and Dash daemons are checked on
+startup by default.
+
+It may be desired to increase retry count for production use due to possibly
+long daemon startup.
+
+## Configuration
+
+Edit the `python.d/energid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/energid.conf
+```
+
+Sample:
+
+```yaml
+energi:
+ host: '127.0.0.1'
+ port: 9796
+ user: energi
+ pass: energi
+
+bitcoin:
+ host: '127.0.0.1'
+ port: 8332
+ user: bitcoin
+ pass: bitcoin
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fenergid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/energid/energid.chart.py b/collectors/python.d.plugin/energid/energid.chart.py
new file mode 100644
index 0000000..079c32d
--- /dev/null
+++ b/collectors/python.d.plugin/energid/energid.chart.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Description: Energi Core / Bitcoin netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module is designed for energid, but it should work with many other Bitcoin forks
+# which support more or less standard JSON-RPC.
+#
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 5
+
+ORDER = [
+ 'blockindex',
+ 'difficulty',
+ 'mempool',
+ 'secmem',
+ 'network',
+ 'timeoffset',
+ 'utxo',
+ 'xfers',
+]
+
+CHARTS = {
+ 'blockindex': {
+ 'options': [None, 'Blockchain Index', 'count', 'blockchain', 'energi.blockindex', 'area'],
+ 'lines': [
+ ['blockchain_blocks', 'blocks', 'absolute'],
+ ['blockchain_headers', 'headers', 'absolute'],
+ ]
+ },
+ 'difficulty': {
+ 'options': [None, 'Blockchain Difficulty', 'difficulty', 'blockchain', 'energi.difficulty', 'line'],
+ 'lines': [
+ ['blockchain_difficulty', 'Diff', 'absolute'],
+ ],
+ },
+ 'mempool': {
+ 'options': [None, 'MemPool', 'MiB', 'memory', 'energid.mempool', 'area'],
+ 'lines': [
+ ['mempool_max', 'Max', 'absolute', None, 1024 * 1024],
+ ['mempool_current', 'Usage', 'absolute', None, 1024 * 1024],
+ ['mempool_txsize', 'TX Size', 'absolute', None, 1024 * 1024],
+ ],
+ },
+ 'secmem': {
+ 'options': [None, 'Secure Memory', 'KiB', 'memory', 'energid.secmem', 'area'],
+ 'lines': [
+ ['secmem_total', 'Total', 'absolute', None, 1024],
+ ['secmem_locked', 'Locked', 'absolute', None, 1024],
+ ['secmem_used', 'Used', 'absolute', None, 1024],
+ ],
+ },
+ 'network': {
+ 'options': [None, 'Network', 'count', 'network', 'energid.network', 'line'],
+ 'lines': [
+ ['network_connections', 'Connections', 'absolute'],
+ ],
+ },
+ 'timeoffset': {
+ 'options': [None, 'Network', 'seconds', 'network', 'energid.timeoffset', 'line'],
+ 'lines': [
+ ['network_timeoffset', 'offseet', 'absolute'],
+ ],
+ },
+ 'utxo': {
+ 'options': [None, 'UTXO', 'count', 'UTXO', 'energid.utxo', 'line'],
+ 'lines': [
+ ['utxo_count', 'UTXO', 'absolute'],
+ ],
+ },
+ 'xfers': {
+ 'options': [None, 'UTXO', 'count', 'UTXO', 'energid.xfers', 'line'],
+ 'lines': [
+ ['utxo_xfers', 'Xfers', 'absolute'],
+ ],
+ },
+}
+
+METHODS = {
+ 'getblockchaininfo': lambda r: {
+ 'blockchain_blocks': r['blocks'],
+ 'blockchain_headers': r['headers'],
+ 'blockchain_difficulty': r['difficulty'],
+ },
+ 'getmempoolinfo': lambda r: {
+ 'mempool_txcount': r['size'],
+ 'mempool_txsize': r['bytes'],
+ 'mempool_current': r['usage'],
+ 'mempool_max': r['maxmempool'],
+ },
+ 'getmemoryinfo': lambda r: dict([
+ ('secmem_' + k, v) for (k, v) in r['locked'].items()
+ ]),
+ 'getnetworkinfo': lambda r: {
+ 'network_timeoffset': r['timeoffset'],
+ 'network_connections': r['connections'],
+ },
+ 'gettxoutsetinfo': lambda r: {
+ 'utxo_count': r['txouts'],
+ 'utxo_xfers': r['transactions'],
+ 'utxo_size': r['disk_size'],
+ 'utxo_amount': r['total_amount'],
+ },
+}
+
+JSON_RPC_VERSION = '1.1'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 9796)
+ self.url = '{scheme}://{host}:{port}'.format(
+ scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port,
+ )
+ self.method = 'POST'
+ self.header = {
+ 'Content-Type': 'application/json',
+ }
+
+ def _get_data(self):
+ #
+ # Bitcoin family speak JSON-RPC version 1.0 for maximum compatibility,
+ # but uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were
+ # unspecified (HTTP errors and contents of 'error').
+ #
+ # 1.0 spec: https://www.jsonrpc.org/specification_v1
+ # 2.0 spec: https://www.jsonrpc.org/specification
+ #
+ # The call documentation: https://github.com/energicryptocurrency/core-api-documentation
+ #
+ batch = []
+
+ for i, method in enumerate(METHODS):
+ batch.append({
+ 'version': JSON_RPC_VERSION,
+ 'id': i,
+ 'method': method,
+ 'params': [],
+ })
+
+ result = self._get_raw_data(body=json.dumps(batch))
+
+ if not result:
+ return None
+
+ result = json.loads(result.decode('utf-8'))
+ data = dict()
+
+ for i, (_, handler) in enumerate(METHODS.items()):
+ r = result[i]
+ data.update(handler(r['result']))
+
+ return data
diff --git a/collectors/python.d.plugin/energid/energid.conf b/collectors/python.d.plugin/energid/energid.conf
new file mode 100644
index 0000000..3b13841
--- /dev/null
+++ b/collectors/python.d.plugin/energid/energid.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for energid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, energid also supports the following:
+#
+# host: 'IP or HOSTNAME' # type <str> the RPC host to connect to
+# port: PORT # type <int> the RPC port to connect to
+# user: 'RPC username' # type <str> the RPC username to use
+# pass: 'RPC password' # type <str> the RPC password to use
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+# Defaults:
+# host: '127.0.0.1'
+# user:
+# pass:
+#
+
+# Energi mainnet
+energi:
+ port: 9796
+
+# Most likely supported Bitcoin mainnet
+bitcoin:
+ port: 8332
+
+# Most likely supported Dash mainnet
+dash:
+ port: 9998
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
new file mode 100644
index 0000000..1b027d5
--- /dev/null
+++ b/collectors/python.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += example/example.chart.py
+dist_pythonconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 100644
index 0000000..561ea62
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1,11 @@
+<!--
+title: "Example"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md
+-->
+
+# Example
+
+An example python data collection module.
+You can use this example to help you [write a new Python module](../#how-to-write-a-new-module).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
new file mode 100644
index 0000000..61ae47f
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import SystemRandom
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+priority = 90000
+
+ORDER = [
+ 'random',
+]
+
+CHARTS = {
+ 'random': {
+ 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
+ 'lines': [
+ ['random1']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.random = SystemRandom()
+
+ @staticmethod
+ def check():
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for i in range(1, 4):
+ dimension_id = ''.join(['random', str(i)])
+
+ if dimension_id not in self.charts['random']:
+ self.charts['random'].add_dimension([dimension_id])
+
+ data[dimension_id] = self.random.randint(0, 100)
+
+ return data
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
new file mode 100644
index 0000000..3d84351
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
new file mode 100644
index 0000000..36ffa56
--- /dev/null
+++ b/collectors/python.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += exim/exim.chart.py
+dist_pythonconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 100644
index 0000000..240aa7b
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Exim monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md
+sidebar_label: "Exim"
+-->
+
+# Exim monitoring with Netdata
+
+Simple module executing `exim -bpc` to grab exim queue.
+This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+
+## Requirements
+
+The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
+
+1. Edit the `exim` configuration with your preferred editor and add:
+`queue_list_requires_admin = false`
+2. Restart `exim` and Netdata
+
+*WHM (CPanel) server*
+
+On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
+
+1. Login to WHM
+2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
+3. Scroll down to the button **Add additional configuration setting** and click on it.
+4. In the new dropdown which will appear above we need to find and choose:
+`queue_list_requires_admin` and set to `false`
+5. Scroll to the end and click the **Save** button.
+
+It produces only one chart:
+
+1. **Exim Queue Emails**
+
+ - emails
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
new file mode 100644
index 0000000..7238a1b
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Description: exim netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+EXIM_COMMAND = 'exim -bpc'
+
+ORDER = [
+ 'qemails',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = EXIM_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ return {'emails': int(self._get_raw_data()[0])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
new file mode 100644
index 0000000..3b7e659
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for exim
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, exim also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED exim CONFIGURATION
+#
+# netdata will query exim as user netdata.
+# By default exim will refuse to respond.
+#
+# To allow querying exim as non-admin user, please set the following
+# to your exim configuration:
+#
+# queue_list_requires_admin = false
+#
+# Your exim configuration should be in
+#
+# /etc/exim/exim4.conf
+# or
+# /etc/exim4/conf.d/main/000_local_options
+#
+# Please consult your distribution information to find the exact file.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
new file mode 100644
index 0000000..31e117e
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += fail2ban/fail2ban.chart.py
+dist_pythonconfig_DATA += fail2ban/fail2ban.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
+
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 100644
index 0000000..c1ad994
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Fail2ban monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md
+sidebar_label: "Fail2ban"
+-->
+
+# Fail2ban monitoring with Netdata
+
+Monitors the fail2ban log file to show all bans for all active jails.
+
+## Requirements
+
+- fail2ban.log file MUST BE readable by Netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+## Configuration
+
+Edit the `python.d/fail2ban.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/fail2ban.conf
+```
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
new file mode 100644
index 0000000..99dbf79
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -0,0 +1,204 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from collections import defaultdict
+from glob import glob
+
+from bases.FrameworkServices.LogService import LogService
+
+ORDER = [
+ 'jails_bans',
+ 'jails_in_jail',
+]
+
+
+def charts(jails):
+ """
+ Chart definitions creating
+ """
+
+ ch = {
+ ORDER[0]: {
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
+ },
+ ORDER[1]: {
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
+ },
+ }
+ for jail in jails:
+ dim = [
+ jail,
+ jail,
+ 'incremental',
+ ]
+ ch[ORDER[0]]['lines'].append(dim)
+
+ dim = [
+ '{0}_in_jail'.format(jail),
+ jail,
+ 'absolute',
+ ]
+ ch[ORDER[1]]['lines'].append(dim)
+
+ return ch
+
+
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= +(true|yes|false|no)')
+
+# Example:
+# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban|Restore Ban) (?P<ip>[a-f0-9.:]+)')
+
+DEFAULT_JAILS = [
+ 'ssh',
+]
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = dict()
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude', str())
+ self.monitoring_jails = list()
+ self.banned_ips = defaultdict(set)
+ self.data = dict()
+
+ def check(self):
+ """
+ :return: bool
+ """
+ if not self.conf_path.endswith(('.conf', '.local')):
+ self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
+ return False
+
+ if not os.access(self.log_path, os.R_OK):
+ self.error('{0} is not readable'.format(self.log_path))
+ return False
+
+ if os.path.getsize(self.log_path) == 0:
+ self.error('{0} is empty'.format(self.log_path))
+ return False
+
+ self.monitoring_jails = self.jails_auto_detection()
+ for jail in self.monitoring_jails:
+ self.data[jail] = 0
+ self.data['{0}_in_jail'.format(jail)] = 0
+
+ self.definitions = charts(self.monitoring_jails)
+ self.info('monitoring jails: {0}'.format(self.monitoring_jails))
+
+ return True
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ raw = self._get_raw_data()
+
+ if not raw:
+ return None if raw is None else self.data
+
+ for row in raw:
+ match = RE_DATA.search(row)
+
+ if not match:
+ continue
+
+ match = match.groupdict()
+
+ if match['jail'] not in self.monitoring_jails:
+ continue
+
+ jail, action, ip = match['jail'], match['action'], match['ip']
+
+ if action == 'Ban' or action == 'Restore Ban':
+ self.data[jail] += 1
+ if ip not in self.banned_ips[jail]:
+ self.banned_ips[jail].add(ip)
+ self.data['{0}_in_jail'.format(jail)] += 1
+ else:
+ if ip in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ip)
+ self.data['{0}_in_jail'.format(jail)] -= 1
+
+ return self.data
+
+ def get_files_from_dir(self, dir_path, suffix):
+ """
+ :return: list
+ """
+ if not os.path.isdir(dir_path):
+ self.error('{0} is not a directory'.format(dir_path))
+ return list()
+
+ return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
+
+ def get_jails_from_file(self, file_path):
+ """
+ :return: list
+ """
+ if not os.access(file_path, os.R_OK):
+ self.error('{0} is not readable or not exist'.format(file_path))
+ return list()
+
+ with open(file_path, 'rt') as f:
+ lines = f.readlines()
+ raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
+
+ match = RE_JAILS.findall(raw)
+ # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
+
+ if not match:
+ self.debug('{0} parse failed'.format(file_path))
+ return list()
+
+ return match
+
+ def jails_auto_detection(self):
+ """
+ :return: list
+
+ Parses jail configuration files. Returns list of enabled jails.
+ According man jail.conf parse order must be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ jails_files, all_jails, active_jails = list(), list(), list()
+
+ jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
+ jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
+
+ self.debug('config files to parse: {0}'.format(jails_files))
+
+ for f in jails_files:
+ all_jails.extend(self.get_jails_from_file(f))
+
+ exclude = self.exclude.split()
+
+ for name, status in all_jails:
+ if name in exclude:
+ continue
+
+ if status in ('true','yes') and name not in active_jails:
+ active_jails.append(name)
+ elif status in ('false','no') and name in active_jails:
+ active_jails.remove(name)
+
+ return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
new file mode 100644
index 0000000..a36436b
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for fail2ban
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, fail2ban also supports the following:
+#
+# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
+# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
+# exclude: 'jails you want to exclude from autodetection' # Default: none
+#------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc
new file mode 100644
index 0000000..54aa649
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += freeradius/freeradius.chart.py
+dist_pythonconfig_DATA += freeradius/freeradius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc
+
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
new file mode 100644
index 0000000..2993c89
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -0,0 +1,90 @@
+<!--
+title: "FreeRADIUS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/freeradius/README.md
+sidebar_label: "FreeRADIUS"
+-->
+
+# FreeRADIUS monitoring with Netdata
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+
+ - access-accepts
+ - access-rejects
+ - auth-dropped-requests
+ - auth-duplicate-requests
+ - auth-invalid-requests
+ - auth-malformed-requests
+ - auth-unknown-types
+
+2. **Accounting counters:** [optional]
+
+ - accounting-requests
+ - accounting-responses
+ - acct-dropped-requests
+ - acct-duplicate-requests
+ - acct-invalid-requests
+ - acct-malformed-requests
+ - acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+
+ - proxy-access-accepts
+ - proxy-access-rejects
+ - proxy-auth-dropped-requests
+ - proxy-auth-duplicate-requests
+ - proxy-auth-invalid-requests
+ - proxy-auth-malformed-requests
+ - proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+
+ - proxy-accounting-requests
+ - proxy-accounting-responses
+ - proxy-acct-dropped-requests
+ - proxy-acct-duplicate-requests
+ - proxy-acct-invalid-requests
+ - proxy-acct-malformed-requests
+ - proxy-acct-unknown-typesa
+
+## Configuration
+
+Edit the `python.d/freeradius.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/freeradius.conf
+```
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+
+- cd sites-enabled
+- ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
new file mode 100644
index 0000000..161d57e
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: freeradius netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+from subprocess import Popen, PIPE
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+update_every = 15
+
+PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
+
+RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
+
+RADCLIENT_RETRIES = 1
+RADCLIENT_TIMEOUT = 1
+
+DEFAULT_HOST = 'localhost'
+DEFAULT_PORT = 18121
+DEFAULT_DO_ACCT = False
+DEFAULT_DO_PROXY_AUTH = False
+DEFAULT_DO_PROXY_ACCT = False
+
+ORDER = [
+ 'authentication',
+ 'accounting',
+ 'proxy-auth',
+ 'proxy-acct',
+]
+
+CHARTS = {
+ 'authentication': {
+ 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
+ 'lines': [
+ ['access-accepts', None, 'incremental'],
+ ['access-rejects', None, 'incremental'],
+ ['auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'accounting': {
+ 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
+ 'lines': [
+ ['accounting-requests', 'requests', 'incremental'],
+ ['accounting-responses', 'responses', 'incremental'],
+ ['acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-auth': {
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
+ 'lines': [
+ ['proxy-access-accepts', 'access-accepts', 'incremental'],
+ ['proxy-access-rejects', 'access-rejects', 'incremental'],
+ ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-acct': {
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
+ 'lines': [
+ ['proxy-accounting-requests', 'requests', 'incremental'],
+ ['proxy-accounting-responses', 'responses', 'incremental'],
+ ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ }
+}
+
+
+def radclient_status(radclient, retries, timeout, host, port, secret):
+ # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
+
+ return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
+ radclient=radclient,
+ num_retries=retries,
+ timeout=timeout,
+ host=host,
+ port=port,
+ secret=secret,
+ ).split()
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', DEFAULT_HOST)
+ self.port = self.configuration.get('port', DEFAULT_PORT)
+ self.secret = self.configuration.get('secret')
+ self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
+ self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
+ self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
+ self.echo = find_binary('echo')
+ self.radclient = find_binary('radclient')
+ self.sub_echo = [self.echo, RADIUS_MSG]
+ self.sub_radclient = radclient_status(
+ self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
+ )
+
+ def check(self):
+ if not self.radclient:
+ self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
+ return False
+
+ if not self.echo:
+ self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
+ return None
+
+ if not self.secret:
+ self.error("'secret' isn't set")
+ return None
+
+ if not self.get_raw_data():
+ self.error('Request returned no data. Is server alive?')
+ return False
+
+ if not self.do_acct:
+ self.order.remove('accounting')
+
+ if not self.do_proxy_auth:
+ self.order.remove('proxy-auth')
+
+ if not self.do_proxy_acct:
+ self.order.remove('proxy-acct')
+
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ result = self.get_raw_data()
+
+ if not result:
+ return None
+
+ return dict(
+ (key.lower(), value) for key, value in PARSER.findall(result)
+ )
+
+ def get_raw_data(self):
+ """
+ The following code is equivalent to
+ 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
+ | radclient -t 1 -r 1 host:port status secret'
+ :return: str
+ """
+ try:
+ process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
+ process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
+ process_echo.stdout.close()
+ raw_result = process_rad.communicate()[0]
+ except OSError:
+ return None
+
+ if process_rad.returncode is 0:
+ return raw_result.decode()
+
+ return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
new file mode 100644
index 0000000..74b2737
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for freeradius
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, freeradius also supports the following:
+#
+# host: 'host' # Default: 'localhost'. Server ip address or hostname.
+# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
+# secret: 'secret' # Default: 'adminsecret'.
+# acct: yes/no # Default: no. Freeradius accounting statistics.
+# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics.
+# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics.
+#
+# ------------------------------------------------------------------------------------------------------------------
+# Freeradius server configuration:
+# The configuration for the status server is automatically created in the sites-available directory.
+# By default, server is enabled and can be queried from every client.
+# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+# cd sites-enabled
+# ln -s ../sites-available/status status
+# and restart/reload your FREERADIUS server.
+# ------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/gearman/Makefile.inc b/collectors/python.d.plugin/gearman/Makefile.inc
new file mode 100644
index 0000000..275adf1
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += gearman/gearman.chart.py
+dist_pythonconfig_DATA += gearman/gearman.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += gearman/README.md gearman/Makefile.inc
+
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
new file mode 100644
index 0000000..b9fc914
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "Gearman monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md
+sidebar_label: "Gearman"
+-->
+
+# Gearman monitoring with Netdata
+
+Monitors Gearman worker statistics. A chart is shown for each job as well as one showing a summary of all workers.
+
+Note: Charts may show as a line graph rather than an area
+graph if you load Netdata with no jobs running. To change
+this go to "Settings" > "Which dimensions to show?" and
+select "All".
+
+Plugin can obtain data from tcp socket **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable by netdata user.
+
+It produces:
+
+ * Workers queued
+ * Workers idle
+ * Workers running
+
+## Configuration
+
+Edit the `python.d/gearman.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/gearman.conf
+```
+
+```yaml
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 4730
+
+ # TLS information can be provided as well
+ tls : no
+ cert : /path/to/cert
+ key : /path/to/key
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:4730`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgearman%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/gearman/gearman.chart.py b/collectors/python.d.plugin/gearman/gearman.chart.py
new file mode 100644
index 0000000..5e280a4
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/gearman.chart.py
@@ -0,0 +1,243 @@
+# Description: dovecot netdata python.d module
+# Author: Kyle Agronick (agronick)
+# SPDX-License-Identifier: GPL-3.0+
+
+# Gearman Netdata Plugin
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+CHARTS = {
+ 'total_workers': {
+ 'options': [None, 'Total Jobs', 'Jobs', 'Total Jobs', 'gearman.total_jobs', 'line'],
+ 'lines': [
+ ['total_pending', 'Pending', 'absolute'],
+ ['total_running', 'Running', 'absolute'],
+ ]
+ },
+}
+
+
+def job_chart_template(job_name):
+ return {
+ 'options': [None, job_name, 'Jobs', 'Activity by Job', 'gearman.single_job', 'stacked'],
+ 'lines': [
+ ['{0}_pending'.format(job_name), 'Pending', 'absolute'],
+ ['{0}_idle'.format(job_name), 'Idle', 'absolute'],
+ ['{0}_running'.format(job_name), 'Running', 'absolute'],
+ ]
+ }
+
+
+def build_result_dict(job):
+ """
+ Get the status for each job
+ :return: dict
+ """
+
+ total, running, available = job['metrics']
+
+ idle = available - running
+ pending = total - running
+
+ return {
+ '{0}_pending'.format(job['job_name']): pending,
+ '{0}_idle'.format(job['job_name']): idle,
+ '{0}_running'.format(job['job_name']): running,
+ }
+
+
+def parse_worker_data(job):
+ job_name = job[0]
+ job_metrics = job[1:]
+
+ return {
+ 'job_name': job_name,
+ 'metrics': job_metrics,
+ }
+
+
+class GearmanReadException(BaseException):
+ pass
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.request = "status\n"
+ self._keep_alive = True
+
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 4730)
+
+ self.tls = self.configuration.get('tls', False)
+ self.cert = self.configuration.get('cert', None)
+ self.key = self.configuration.get('key', None)
+
+ self.active_jobs = set()
+ self.definitions = deepcopy(CHARTS)
+ self.order = ['total_workers']
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+
+ try:
+ active_jobs = self.get_active_jobs()
+ except GearmanReadException:
+ return None
+
+ found_jobs, job_data = self.process_jobs(active_jobs)
+ self.remove_stale_jobs(found_jobs)
+ return job_data
+
+ def get_active_jobs(self):
+ active_jobs = []
+
+ for job in self.get_worker_data():
+ parsed_job = parse_worker_data(job)
+
+ # Gearman does not clean up old jobs
+ # We only care about jobs that have
+ # some relevant data
+ if not any(parsed_job['metrics']):
+ continue
+
+ active_jobs.append(parsed_job)
+
+ return active_jobs
+
+ def get_worker_data(self):
+ """
+ Split the data returned from Gearman
+ into a list of lists
+
+ This returns the same output that you
+ would get from a gearadmin --status
+ command.
+
+ Example output returned from
+ _get_raw_data():
+ prefix generic_worker4 78 78 500
+ generic_worker2 78 78 500
+ generic_worker3 0 0 760
+ generic_worker1 0 0 500
+
+ :return: list
+ """
+
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ raise GearmanReadException()
+
+ if raw is None:
+ self.debug("Gearman returned no data")
+ raise GearmanReadException()
+
+ workers = list()
+
+ for line in raw.splitlines()[:-1]:
+ parts = line.split()
+ if not parts:
+ continue
+
+ name = '_'.join(parts[:-3])
+ try:
+ values = [int(w) for w in parts[-3:]]
+ except ValueError:
+ continue
+
+ w = [name]
+ w.extend(values)
+ workers.append(w)
+
+ return workers
+
+ def process_jobs(self, active_jobs):
+
+ output = {
+ 'total_pending': 0,
+ 'total_idle': 0,
+ 'total_running': 0,
+ }
+ found_jobs = set()
+
+ for parsed_job in active_jobs:
+
+ job_name = self.add_job(parsed_job)
+ found_jobs.add(job_name)
+ job_data = build_result_dict(parsed_job)
+
+ for sum_value in ('pending', 'running', 'idle'):
+ output['total_{0}'.format(sum_value)] += job_data['{0}_{1}'.format(job_name, sum_value)]
+
+ output.update(job_data)
+
+ return found_jobs, output
+
+ def remove_stale_jobs(self, active_job_list):
+ """
+ Removes jobs that have no workers, pending jobs,
+ or running jobs
+ :param active_job_list: The latest list of active jobs
+ :type active_job_list: iterable
+ :return: None
+ """
+
+ for to_remove in self.active_jobs - active_job_list:
+ self.remove_job(to_remove)
+
+ def add_job(self, parsed_job):
+ """
+ Adds a job to the list of active jobs
+ :param parsed_job: A parsed job dict
+ :type parsed_job: dict
+ :return: None
+ """
+
+ def add_chart(job_name):
+ """
+ Adds a new job chart
+ :param job_name: The name of the job to add
+ :type job_name: string
+ :return: None
+ """
+
+ job_key = 'job_{0}'.format(job_name)
+ template = job_chart_template(job_name)
+ new_chart = self.charts.add_chart([job_key] + template['options'])
+ for dimension in template['lines']:
+ new_chart.add_dimension(dimension)
+
+ if parsed_job['job_name'] not in self.active_jobs:
+ add_chart(parsed_job['job_name'])
+ self.active_jobs.add(parsed_job['job_name'])
+
+ return parsed_job['job_name']
+
+ def remove_job(self, job_name):
+ """
+ Removes a job to the list of active jobs
+ :param job_name: The name of the job to remove
+ :type job_name: string
+ :return: None
+ """
+
+ def remove_chart(job_name):
+ """
+ Removes a job chart
+ :param job_name: The name of the job to remove
+ :type job_name: string
+ :return: None
+ """
+
+ job_key = 'job_{0}'.format(job_name)
+ self.charts[job_key].obsolete()
+ del self.charts[job_key]
+
+ remove_chart(job_name)
+ self.active_jobs.remove(job_name)
diff --git a/collectors/python.d.plugin/gearman/gearman.conf b/collectors/python.d.plugin/gearman/gearman.conf
new file mode 100644
index 0000000..c41fd9f
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/gearman.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for gearman
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, gearman also supports the following:
+#
+# hostname: localhost # The host running the Gearman server
+# port: 4730 # Port of the Gearman server
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOB
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 4730 \ No newline at end of file
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
new file mode 100644
index 0000000..74f50d7
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += go_expvar/go_expvar.chart.py
+dist_pythonconfig_DATA += go_expvar/go_expvar.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
+
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 100644
index 0000000..a73610e
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1,319 @@
+<!--
+title: "Go applications monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md
+sidebar_label: "Go applications"
+-->
+
+# Go applications monitoring with Netdata
+
+Monitors Go application that exposes its metrics with the use of `expvar` package from the Go standard library. The package produces charts for Go runtime memory statistics and optionally any number of custom charts.
+
+The `go_expvar` module produces the following charts:
+
+1. **Heap allocations** in kB
+
+ - alloc: size of objects allocated on the heap
+ - inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+
+ - inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+
+ - inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+
+ - inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+
+ - sys: size of reserved virtual address space
+
+6. **Live objects**
+
+ - live: number of live objects in memory
+
+7. **GC pauses average** in ns
+
+ - avg: average duration of all GC stop-the-world pauses
+
+## Monitoring Go applications
+
+Netdata can be used to monitor running Go applications that expose their metrics with
+the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
+
+The `expvar` package exposes these metrics over HTTP and is very easy to use.
+Consider this minimal sample below:
+
+```go
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+the URL in your browser (or by using `wget` or `curl`).
+
+Sample output:
+
+```json
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well.
+Here is a sample Go application that exposes a few custom variables:
+
+```go
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the
+number of currently running Goroutines and updates these stats every second.
+
+In the next section, we will cover how to monitor and chart these exposed stats with
+the use of `netdata`s `go_expvar` module.
+
+### Using Netdata go_expvar module
+
+The `go_expvar` module is disabled by default. To enable it, edit `python.d.conf` (to edit it on your system run
+`/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`:
+
+```
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+...
+go_expvar: yes
+...
+```
+
+Next, we need to edit the module configuration file (found at `/etc/netdata/python.d/go_expvar.conf` by default) (to
+edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of
+jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration:
+
+```
+# /etc/netdata/python.d/go_expvar.conf
+
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts: {}
+```
+
+Let's go over each of the defined options:
+
+```
+name: 'app1'
+```
+
+This is the job name that will appear at the Netdata dashboard.
+If not defined, the job_name (top level key) will be used.
+
+```
+url: 'http://127.0.0.1:8080/debug/vars'
+```
+
+This is the URL of the expvar endpoint. As the expvar handler can be installed
+in a custom path, the whole URL has to be specified. This value is mandatory.
+
+```
+collect_memstats: true
+```
+
+Whether to enable collecting stats about Go runtime's memory. You can find more
+information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
+
+```
+extra_charts: {}
+```
+
+Enables the user to specify custom expvars to monitor and chart.
+Will be explained in more detail below.
+
+**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will
+disable itself, as there will be no data to collect!**
+
+Apart from these options, each job supports options inherited from Netdata's `python.d.plugin`
+and its base `UrlService` class. These are:
+
+```
+update_every: 1 # the job's data collection frequency
+priority: 60000 # the job's order on the dashboard
+user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+```
+
+### Monitoring custom vars with go_expvar
+
+Now, memory stats might be useful, but what if you want Netdata to monitor some custom values
+that your Go application exposes? The `go_expvar` module can do that as well with the use of
+the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of Netdata chart definitions.
+Each chart definition has the following keys:
+
+```
+id: Netdata chart ID
+options: a key-value mapping of chart options
+lines: a list of line definitions
+```
+
+**Note: please do not use dots in the chart or line ID field.
+See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official Netdata documentation for more information about the values:
+
+- [External plugins - charts](/collectors/plugins.d/README.md#chart)
+- [Chart variables](/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions).
+A line definition is a key-value mapping of line options.
+Each line can have the following options:
+
+```
+# mandatory
+expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+expvar_type: value type; supported are "float" or "int"
+id: the id of this line/dimension in Netdata
+
+# optional - Netdata defaults are used if these options are not defined
+name: ''
+algorithm: absolute
+multiplier: 1
+divisor: 100 if expvar_type == float, 1 if expvar_type == int
+hidden: False
+```
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](/collectors/plugins.d/README.md#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+All dicts in the resulting JSON document are then flattened to one level.
+Expvar names are joined together with '.' when flattening.
+
+Example:
+
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`,
+`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+the first defined key wins and all subsequent keys with the same name are ignored.
+
+## Enable the collector
+
+The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `go_expvar` collector.
+
+## Configuration
+
+Edit the `python.d/go_expvar.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/go_expvar.conf
+```
+
+The configuration below matches the second Go application described above.
+Netdata will monitor and chart memory stats for the application, as well as a custom chart of
+running goroutines and two dummy counters.
+
+```
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+```
+
+**Netdata charts example**
+
+The images below show how do the final charts in Netdata look.
+
+![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
+
+![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
new file mode 100644
index 0000000..f9bbdc1
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import division
+
+import json
+from collections import namedtuple
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MEMSTATS_ORDER = [
+ 'memstats_heap',
+ 'memstats_stack',
+ 'memstats_mspan',
+ 'memstats_mcache',
+ 'memstats_sys',
+ 'memstats_live_objects',
+ 'memstats_gc_pauses',
+]
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
+ 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]
+ },
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
+ 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
+ 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]
+ }
+}
+
+EXPVAR = namedtuple(
+ "EXPVAR",
+ [
+ "key",
+ "type",
+ "id",
+ ]
+)
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = dict(MEMSTATS_CHARTS)
+ self.order = list(MEMSTATS_ORDER)
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = list()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ # self.expvars[ev_key] = (ev_type, line_id)
+ self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+
+ for ev in self.expvars:
+ v = flattened.get(ev.key)
+
+ if v is None:
+ continue
+
+ try:
+ if ev.type == 'int':
+ expvars[ev.id] = int(v)
+ elif ev.type == 'float':
+ expvars[ev.id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
+ return None
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occured yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
new file mode 100644
index 0000000..4b821cd
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -0,0 +1,108 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
new file mode 100644
index 0000000..ad24dea
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += haproxy/haproxy.chart.py
+dist_pythonconfig_DATA += haproxy/haproxy.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
+
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 0000000..33d34f1
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,67 @@
+<!--
+title: "HAProxy monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md
+sidebar_label: "HAProxy"
+-->
+
+# HAProxy monitoring with Netdata
+
+Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by the `netdata` user.
+
+It produces:
+
+1. **Frontend** family charts
+
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
+
+2. **Backend** family charts
+
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
+
+3. **Health** chart
+
+ - number of failed servers for every backend (in DOWN state)
+
+## Configuration
+
+Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/haproxy.conf
+```
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
new file mode 100644
index 0000000..6f94c9a
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -0,0 +1,360 @@
+# -*- coding: utf-8 -*-
+# Description: haproxy netdata python.d module
+# Author: ilyam8, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.UrlService import UrlService
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_bdown',
+ 'health_idle'
+]
+
+CHARTS = {
+ 'fbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
+ 'fbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
+ 'fscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
+ 'fqcur': {
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_total': {
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
+ 'bbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
+ 'bscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
+ 'bqcur': {
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
+ 'haproxy_b.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_total': {
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bqtime': {
+ 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
+ 'bctime': {
+ 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ctime', 'line'],
+ 'lines': []
+ },
+ 'brtime': {
+ 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.rtime', 'line'],
+ 'lines': []
+ },
+ 'bttime': {
+ 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ttime', 'line'],
+ 'lines': []
+ },
+ 'health_sdown': {
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
+ 'lines': []
+ },
+ 'health_sup': {
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
+ 'lines': []
+ },
+ 'health_bdown': {
+ 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
+ 'health_idle': {
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
+ 'lines': [
+ ['idle', None, 'absolute']
+ ]
+ }
+}
+
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
+
+BACKEND_METRICS = {
+ 'qtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ctime': {'algorithm': 'absolute', 'divisor': 1},
+ 'rtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ttime': {'algorithm': 'absolute', 'divisor': 1}
+}
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
+ socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
+
+# TODO: the code is unreadable
+class Service(UrlService, SocketService):
+ def __init__(self, configuration=None, name=None):
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'],
+ stat='show stat\n'.encode(),
+ info='show info\n'.encode())
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'],
+ stat=self.url,
+ info=url_remove_params(self.url))
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
+ return True
+ return False
+
+ def _get_data(self):
+ to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
+
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
+
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsup_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'UP')])
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'DOWN')])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in BACKEND_METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ hrsp_total = 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ if metric.startswith('hrsp_'):
+ hrsp_total += int(backend.get(metric) or 0)
+ stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
+
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return not bool(data)
+
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for metric in BACKEND_METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, BACKEND_METRICS[metric]['algorithm'], 1,
+ BACKEND_METRICS[metric]['divisor']])
+ self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_status(server, backend_name, status='DOWN'):
+ return server.get('# pxname') == backend_name and server.get('status') == status
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
new file mode 100644
index 0000000..10a0df3
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for haproxy
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, haproxy also supports the following:
+#
+# IMPORTANT: socket MUST BE readable AND writable by netdata user
+#
+# socket: 'path/to/haproxy/sock'
+#
+# OR
+# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
+# [user: USERNAME] only if stats auth is used
+# [pass: PASSWORD] only if stats auth is used
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#via_url:
+# user : 'admin'
+# pass : 'password'
+# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
+
+#via_socket:
+# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 0000000..22852b6
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hddtemp/hddtemp.chart.py
+dist_pythonconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 100644
index 0000000..aaaf214
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Hard drive temperature monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md
+sidebar_label: "Hard drive temperature"
+-->
+
+# Hard drive temperature monitoring with Netdata
+
+Monitors disk temperatures from one or more `hddtemp` daemons.
+
+**Requirement:**
+Running `hddtemp` in daemonized mode with access on tcp port
+
+It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
+
+## Configuration
+
+Edit the `python.d/hddtemp.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hddtemp.conf
+```
+
+Sample:
+
+```yaml
+update_every: 3
+host: "127.0.0.1"
+port: 7634
+```
+
+If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
new file mode 100644
index 0000000..6427aa1
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# Description: hddtemp netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'temperatures',
+]
+
+CHARTS = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}}
+
+RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
+
+
+class Disk:
+ def __init__(self, id_, name, temp):
+ self.id = id_.split('/')[-1]
+ self.name = name.replace(' ', '_')
+ self.temp = temp if temp.isdigit() else None
+
+ def __repr__(self):
+ return self.id
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.do_only = self.configuration.get('devices')
+ self._keep_alive = False
+ self.request = ""
+ self.host = "127.0.0.1"
+ self.port = 7634
+
+ def get_disks(self):
+ r = self._get_raw_data()
+
+ if not r:
+ return None
+
+ m = RE.findall(r)
+
+ if not m:
+ self.error("received data doesn't have needed records")
+ return None
+
+ rv = [Disk(*d) for d in m]
+ self.debug('available disks: {0}'.format(rv))
+
+ if self.do_only:
+ return [v for v in rv if v.id in self.do_only]
+ return rv
+
+ def get_data(self):
+ """
+ Get data from TCP/IP socket
+ :return: dict
+ """
+
+ disks = self.get_disks()
+
+ if not disks:
+ return None
+
+ return dict((d.id, d.temp) for d in disks)
+
+ def check(self):
+ """
+ Parse configuration, check if hddtemp is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ self._parse_config()
+ disks = self.get_disks()
+
+ if not disks:
+ return False
+
+ for d in disks:
+ dim = [d.id]
+ self.definitions['temperatures']['lines'].append(dim)
+
+ return True
+
+ @staticmethod
+ def _check_raw_data(data):
+ return not bool(data)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 0000000..b2d7aef
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,95 @@
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# By default this module will try to autodetect disks
+# (autodetection works only for disk which names start with "sd").
+# However this can be overridden by setting variable `disks` to
+# array of desired disks. Example for two disks:
+#
+# devices:
+# - sda
+# - sdb
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '::1'
+ port: 7634
diff --git a/collectors/python.d.plugin/hpssa/Makefile.inc b/collectors/python.d.plugin/hpssa/Makefile.inc
new file mode 100644
index 0000000..1c04aa4
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hpssa/hpssa.chart.py
+dist_pythonconfig_DATA += hpssa/hpssa.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hpssa/README.md hpssa/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
new file mode 100644
index 0000000..af8c437
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/README.md
@@ -0,0 +1,81 @@
+<!--
+title: "HP Smart Storage Arrays monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md
+sidebar_label: "HP Smart Storage Arrays"
+-->
+
+# HP Smart Storage Arrays monitoring with Netdata
+
+Monitors controller, cache module, logical and physical drive state and temperature using `ssacli` tool.
+
+Executed commands:
+
+- `sudo -n ssacli ctrl all show config detail`
+
+## Requirements:
+
+This module uses `ssacli`, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.
+
+- Add to your `/etc/sudoers` file:
+
+`which ssacli` shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/ssacli
+```
+
+- Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.
+
+As the `root` user, do the following:
+
+```cmd
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+## Charts
+
+- Controller status
+- Controller temperature
+- Logical drive status
+- Physical drive status
+- Physical drive temperature
+
+## Enable the collector
+
+The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent
+with `sudo systemctl restart netdata`, or the appropriate method for your system.
+
+## Configuration
+
+Edit the `python.d/hpssa.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hpssa.conf
+```
+
+If `ssacli` cannot be found in the `PATH`, configure it in `hpssa.conf`.
+
+```yaml
+ssacli_path: /usr/sbin/ssacli
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhpssa%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hpssa/hpssa.chart.py b/collectors/python.d.plugin/hpssa/hpssa.chart.py
new file mode 100644
index 0000000..ce1b430
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.chart.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+# Description: hpssa netdata python.d module
+# Author: Peter Gnodde (gnoddep)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+update_every = 5
+
+ORDER = [
+ 'ctrl_status',
+ 'ctrl_temperature',
+ 'ld_status',
+ 'pd_status',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ctrl_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Controller',
+ 'hpssa.ctrl_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ctrl_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Controller',
+ 'hpssa.ctrl_temperature',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ld_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Logical drives',
+ 'hpssa.ld_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Physical drives',
+ 'hpssa.pd_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Physical drives',
+ 'hpssa.pd_temperature',
+ 'line'
+ ],
+ 'lines': []
+ }
+}
+
+adapter_regex = re.compile(r'^(?P<adapter_type>.+) in Slot (?P<slot>\d+)')
+ignored_sections_regex = re.compile(
+ r'''
+ ^
+ Physical[ ]Drives
+ | None[ ]attached
+ | (?:Expander|Enclosure|SEP|Port[ ]Name:)[ ].+
+ | .+[ ]at[ ]Port[ ]\S+,[ ]Box[ ]\d+,[ ].+
+ | Mirror[ ]Group[ ]\d+:
+ $
+ ''',
+ re.X
+)
+mirror_group_regex = re.compile(r'^Mirror Group \d+:$')
+array_regex = re.compile(r'^Array: (?P<id>[A-Z]+)$')
+drive_regex = re.compile(
+ r'''
+ ^
+ Logical[ ]Drive:[ ](?P<logical_drive_id>\d+)
+ | physicaldrive[ ](?P<fqn>[^:]+:\d+:\d+)
+ $
+ ''',
+ re.X
+)
+key_value_regex = re.compile(r'^(?P<key>[^:]+): ?(?P<value>.*)$')
+ld_status_regex = re.compile(r'^Status: (?P<status>[^,]+)(?:, (?P<percentage>[0-9.]+)% complete)?$')
+error_match = re.compile(r'Error:')
+
+
+class HPSSAException(Exception):
+ pass
+
+
+class HPSSA(object):
+ def __init__(self, lines):
+ self.lines = [line.strip() for line in lines if line.strip()]
+ self.current_line = 0
+ self.adapters = []
+ self.parse()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.current_line == len(self.lines):
+ raise StopIteration
+
+ line = self.lines[self.current_line]
+ self.current_line += 1
+
+ return line
+
+ def next(self):
+ """
+ This is for Python 2.7 compatibility
+ """
+ return self.__next__()
+
+ def rewind(self):
+ self.current_line = max(self.current_line - 1, 0)
+
+ @staticmethod
+ def match_any(line, *regexes):
+ return any([regex.match(line) for regex in regexes])
+
+ def parse(self):
+ for line in self:
+ match = adapter_regex.match(line)
+ if match:
+ self.adapters.append(self.parse_adapter(**match.groupdict()))
+
+ def parse_adapter(self, slot, adapter_type):
+ adapter = {
+ 'slot': int(slot),
+ 'type': adapter_type,
+
+ 'controller': {
+ 'status': None,
+ 'temperature': None,
+ },
+ 'cache': {
+ 'present': False,
+ 'status': None,
+ 'temperature': None,
+ },
+ 'battery': {
+ 'status': None,
+ 'count': 0,
+ },
+
+ 'logical_drives': [],
+ 'physical_drives': [],
+ }
+
+ for line in self:
+ if error_match.match(line):
+ raise HPSSAException('Error: {}'.format(line))
+ elif adapter_regex.match(line):
+ self.rewind()
+ break
+ elif array_regex.match(line):
+ self.parse_array(adapter)
+ elif line == 'Unassigned' or line == 'HBA Drives':
+ self.parse_physical_drives(adapter)
+ elif ignored_sections_regex.match(line):
+ self.parse_ignored_section()
+ else:
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Controller Status':
+ adapter['controller']['status'] = value == 'OK'
+ elif key == 'Controller Temperature (C)':
+ adapter['controller']['temperature'] = int(value)
+ elif key == 'Cache Board Present':
+ adapter['cache']['present'] = value == 'True'
+ elif key == 'Cache Status':
+ adapter['cache']['status'] = value == 'OK'
+ elif key == 'Cache Module Temperature (C)':
+ adapter['cache']['temperature'] = int(value)
+ elif key == 'Battery/Capacitor Count':
+ adapter['battery']['count'] = int(value)
+ elif key == 'Battery/Capacitor Status':
+ adapter['battery']['status'] = value == 'OK'
+ else:
+ raise HPSSAException('Cannot parse line: {}'.format(line))
+
+ return adapter
+
+ def parse_array(self, adapter):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = drive_regex.match(line)
+ if match:
+ data = match.groupdict()
+ if data['logical_drive_id']:
+ self.parse_logical_drive(adapter, int(data['logical_drive_id']))
+ else:
+ self.parse_physical_drive(adapter, data['fqn'])
+ elif not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ def parse_physical_drives(self, adapter):
+ for line in self:
+ match = drive_regex.match(line)
+ if match:
+ self.parse_physical_drive(adapter, match.group('fqn'))
+ else:
+ self.rewind()
+ break
+
+ def parse_logical_drive(self, adapter, logical_drive_id):
+ ld = {
+ 'id': logical_drive_id,
+ 'status': None,
+ 'status_complete': None,
+ }
+
+ for line in self:
+ if mirror_group_regex.match(line):
+ self.parse_ignored_section()
+ continue
+
+ match = ld_status_regex.match(line)
+ if match:
+ ld['status'] = match.group('status') == 'OK'
+
+ if match.group('percentage'):
+ ld['status_complete'] = float(match.group('percentage')) / 100
+ elif HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ adapter['logical_drives'].append(ld)
+
+ def parse_physical_drive(self, adapter, fqn):
+ pd = {
+ 'fqn': fqn,
+ 'status': None,
+ 'temperature': None,
+ }
+
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Status':
+ pd['status'] = value == 'OK'
+ elif key == 'Current Temperature (C)':
+ pd['temperature'] = int(value)
+ else:
+ self.rewind()
+ break
+
+ adapter['physical_drives'].append(pd)
+
+ def parse_ignored_section(self):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.ssacli_path = self.configuration.get('ssacli_path', 'ssacli')
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.cmd = []
+
+ def get_adapters(self):
+ try:
+ adapters = HPSSA(self._get_raw_data(command=self.cmd)).adapters
+ if not adapters:
+ # If no adapters are returned, run the command again but capture stderr
+ err = self._get_raw_data(command=self.cmd, stderr=True)
+ if err:
+ raise HPSSAException('Error executing cmd {}: {}'.format(' '.join(self.cmd), '\n'.join(err)))
+ return adapters
+ except HPSSAException as ex:
+ self.error(ex)
+ return []
+
+ def check(self):
+ if not os.path.isfile(self.ssacli_path):
+ ssacli_path = find_binary(self.ssacli_path)
+ if ssacli_path:
+ self.ssacli_path = ssacli_path
+ else:
+ self.error('Cannot locate "{}" binary'.format(self.ssacli_path))
+ return False
+
+ if self.use_sudo:
+ sudo = find_binary('sudo')
+ if not sudo:
+ self.error('Cannot locate "{}" binary'.format('sudo'))
+ return False
+
+ allowed = self._get_raw_data(command=[sudo, '-n', '-l', self.ssacli_path])
+ if not allowed or allowed[0].strip() != os.path.realpath(self.ssacli_path):
+ self.error('Not allowed to run sudo for command {}'.format(self.ssacli_path))
+ return False
+
+ self.cmd = [sudo, '-n']
+
+ self.cmd.extend([self.ssacli_path, 'ctrl', 'all', 'show', 'config', 'detail'])
+ self.info('Command: {}'.format(self.cmd))
+
+ adapters = self.get_adapters()
+
+ self.info('Discovered adapters: {}'.format([adapter['type'] for adapter in adapters]))
+ if not adapters:
+ self.error('No adapters discovered')
+ return False
+
+ return True
+
+ def get_data(self):
+ netdata = {}
+
+ for adapter in self.get_adapters():
+ status_key = '{}_status'.format(adapter['slot'])
+ temperature_key = '{}_temperature'.format(adapter['slot'])
+ ld_key = 'ld_{}_'.format(adapter['slot'])
+
+ data = {
+ 'ctrl_status': {
+ 'ctrl_' + status_key: adapter['controller']['status'],
+ 'cache_' + status_key: adapter['cache']['present'] and adapter['cache']['status'],
+ 'battery_' + status_key:
+ adapter['battery']['status'] if adapter['battery']['count'] > 0 else None
+ },
+
+ 'ctrl_temperature': {
+ 'ctrl_' + temperature_key: adapter['controller']['temperature'],
+ 'cache_' + temperature_key: adapter['cache']['temperature'],
+ },
+
+ 'ld_status': {
+ ld_key + '{}_status'.format(ld['id']): ld['status'] for ld in adapter['logical_drives']
+ },
+
+ 'pd_status': {},
+ 'pd_temperature': {},
+ }
+
+ for pd in adapter['physical_drives']:
+ pd_key = 'pd_{}_{}'.format(adapter['slot'], pd['fqn'])
+ data['pd_status'][pd_key + '_status'] = pd['status']
+ data['pd_temperature'][pd_key + '_temperature'] = pd['temperature']
+
+ for chart, dimension_data in data.items():
+ for dimension_id, value in dimension_data.items():
+ if value is None:
+ continue
+
+ if dimension_id not in self.charts[chart]:
+ self.charts[chart].add_dimension([dimension_id])
+
+ netdata[dimension_id] = value
+
+ return netdata
diff --git a/collectors/python.d.plugin/hpssa/hpssa.conf b/collectors/python.d.plugin/hpssa/hpssa.conf
new file mode 100644
index 0000000..cc50c98
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.conf
@@ -0,0 +1,61 @@
+# netdata python.d.plugin configuration for hpssa
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hpssa also supports the following:
+#
+# ssacli_path: /usr/sbin/ssacli # The path to the ssacli executable
+# use_sudo: True # Whether to use sudo or not
+# ----------------------------------------------------------------------
+
+# ssacli_path: /usr/sbin/ssacli
+# use_sudo: True
diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc
new file mode 100644
index 0000000..4a5bd85
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += httpcheck/httpcheck.chart.py
+dist_pythonconfig_DATA += httpcheck/httpcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
new file mode 100644
index 0000000..55aad52
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -0,0 +1,59 @@
+<!--
+title: "HTTP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/httpcheck/README.md
+sidebar_label: "HTTP endpoints"
+-->
+
+# HTTP endpoint monitoring with Netdata
+
+Monitors remote http server for availability and response time.
+
+Following charts are drawn per job:
+
+1. **Response time** ms
+
+ - Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
+
+2. **Status** boolean
+
+ - Connection successful
+ - Unexpected content: No Regex match found in the response
+ - Unexpected status code: Do we get 500 errors?
+ - Connection failed: port not listening or blocked
+ - Connection timed out: host or port unreachable
+
+## Configuration
+
+Edit the `python.d/httpcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/httpcheck.conf
+```
+
+Sample configuration and their default values.
+
+```yaml
+server:
+ url: 'http://host:port/path' # required
+ status_accepted: # optional
+ - 200
+ timeout: 1 # optional, supports decimals (e.g. 0.2)
+ update_every: 3 # optional
+ regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
+ redirect: yes # optional
+```
+
+### Notes
+
+- The status chart is primarily intended for alarms, badges or for access via API.
+- A system/service/firewall might block Netdata's access if a portscan or
+ similar is detected.
+- This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
new file mode 100644
index 0000000..75718bb
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Description: http check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+import urllib3
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 3
+priority = 60000
+
+# Response
+HTTP_RESPONSE_TIME = 'time'
+HTTP_RESPONSE_LENGTH = 'length'
+
+# Status dimensions
+HTTP_SUCCESS = 'success'
+HTTP_BAD_CONTENT = 'bad_content'
+HTTP_BAD_STATUS = 'bad_status'
+HTTP_TIMEOUT = 'timeout'
+HTTP_NO_CONNECTION = 'no_connection'
+
+ORDER = [
+ 'response_time',
+ 'response_length',
+ 'status',
+]
+
+CHARTS = {
+ 'response_time': {
+ 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
+ ]
+ },
+ 'response_length': {
+ 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_LENGTH, 'length', 'absolute']
+ ]
+ },
+ 'status': {
+ 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
+ 'lines': [
+ [HTTP_SUCCESS, 'success', 'absolute'],
+ [HTTP_BAD_CONTENT, 'bad content', 'absolute'],
+ [HTTP_BAD_STATUS, 'bad status', 'absolute'],
+ [HTTP_TIMEOUT, 'timeout', 'absolute'],
+ [HTTP_NO_CONNECTION, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ pattern = self.configuration.get('regex')
+ self.regex = re.compile(pattern) if pattern else None
+ self.status_codes_accepted = self.configuration.get('status_accepted', [200])
+ self.follow_redirect = self.configuration.get('redirect', True)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict()
+ data[HTTP_SUCCESS] = 0
+ data[HTTP_BAD_CONTENT] = 0
+ data[HTTP_BAD_STATUS] = 0
+ data[HTTP_TIMEOUT] = 0
+ data[HTTP_NO_CONNECTION] = 0
+ url = self.url
+ try:
+ start = time()
+ status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False,
+ redirect=self.follow_redirect)
+ diff = time() - start
+ data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0)
+ self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format(
+ url=url, code=status, diff=diff
+ ))
+ self.process_response(content, data, status)
+
+ except urllib3.exceptions.NewConnectionError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
+ self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_TIMEOUT] = 1
+
+ except urllib3.exceptions.HTTPError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
+ return None
+
+ return data
+
+ def process_response(self, content, data, status):
+ data[HTTP_RESPONSE_LENGTH] = len(content)
+ self.debug('Content: \n\n{content}\n'.format(content=content))
+ if status in self.status_codes_accepted:
+ if self.regex and self.regex.search(content) is None:
+ self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
+ data[HTTP_BAD_CONTENT] = 1
+ else:
+ data[HTTP_SUCCESS] = 1
+ else:
+ data[HTTP_BAD_STATUS] = 1
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
new file mode 100644
index 0000000..95adba2
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -0,0 +1,107 @@
+# netdata python.d.plugin configuration for httpcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the httpcheck default is used, which is at 3 seconds.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
+# time charts
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config, e.g. for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 3 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
+# url: 'http[s]://host-ip-or-dns[:port][path]'
+# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
+# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# header: {'Content-Type': 'application/json'}
+# # [optional] the HTTP header sent with the request.
+# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
+# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
+# # followed (default).
+# body: {'key': 'value'} # [optional] the body sent with the request (e.g. POST, PUT, PATCH).
+# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
+# # status chart, however: The response time will still be > 0, since the
+# # host responded with something.
+# # If redirect is enabled, the accepted status will be checked against the redirected page.
+# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still
+# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status'
+# # if code is 200. Do specify numerical entries such as 200, not 'OK'.
+# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this
+# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled,
+# # the regex will be matched to the redirected page, not the initial 3xx response.
+
+# Simple example:
+#
+# jira:
+# url: 'https://jira.localdomain/'
+
+
+# Complex example:
+#
+# cool_website:
+# url: 'http://cool.website:8080/home'
+# status_accepted:
+# - 200
+# - 204
+# regex: <title>My cool website!<\/title>
+# timeout: 2
+
+# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
new file mode 100644
index 0000000..cb7c6fa
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += icecast/icecast.chart.py
+dist_pythonconfig_DATA += icecast/icecast.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
+
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 100644
index 0000000..90cdaa5
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1,44 @@
+<!--
+title: "Icecast monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md
+sidebar_label: "Icecast"
+-->
+
+# Icecast monitoring with Netdata
+
+Monitors the number of listeners for active sources.
+
+## Requirements
+
+- icecast version >= 2.4.0
+
+It produces the following charts:
+
+1. **Listeners** in listeners
+
+- source number
+
+## Configuration
+
+Edit the `python.d/icecast.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/icecast.conf
+```
+
+Needs only `url` to server's `/status-json.xsl`
+
+Here is an example for remote server:
+
+```yaml
+remote:
+ url : 'http://1.2.3.4:8443/status-json.xsl'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
new file mode 100644
index 0000000..a967d17
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Description: icecast netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'listeners',
+]
+
+CHARTS = {
+ 'listeners': {
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
+ 'lines': [
+ ]
+ }
+}
+
+
+class Source:
+ def __init__(self, idx, data):
+ self.name = 'source_{0}'.format(idx)
+ self.is_active = data.get('stream_start') and data.get('server_name')
+ self.listeners = data['listeners']
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url')
+ self._manager = self._build_manager()
+
+ def check(self):
+ """
+ Add active sources to the "listeners" chart
+ :return: bool
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ active_sources = 0
+ for idx, raw_source in enumerate(sources):
+ if Source(idx, raw_source).is_active:
+ active_sources += 1
+ dim_id = 'source_{0}'.format(idx)
+ dim = 'source {0}'.format(idx)
+ self.definitions['listeners']['lines'].append([dim_id, dim])
+
+ return bool(active_sources)
+
+ def _get_data(self):
+ """
+ Get number of listeners for every source
+ :return: dict
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ data = dict()
+
+ for idx, raw_source in enumerate(sources):
+ source = Source(idx, raw_source)
+ data[source.name] = source.listeners
+
+ return data
+
+ def get_sources(self):
+ """
+ Format data received from http request and return list of sources
+ :return: list
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError as error:
+ self.error('JSON decode error:', error)
+ return None
+
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
new file mode 100644
index 0000000..a33074a
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for icecast
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, icecast also supports the following:
+#
+# url: 'URL' # the URL to fetch icecast's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8443/status-json.xsl'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 0000000..68458cb
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 0000000..4d3b0ec
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "IPFS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md
+sidebar_label: "IPFS"
+-->
+
+# IPFS monitoring with Netdata
+
+Collects [`IPFS`](https://ipfs.io) basic information like file system bandwidth, peers and repo metrics.
+
+## Charts
+
+It produces the following charts:
+
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- Repo Size in `GiB`
+- Repo Objects in `objects`
+
+## Configuration
+
+Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ipfs.conf
+```
+
+---
+
+Calls to the following endpoints are disabled due to `IPFS` bugs:
+
+- `/api/v0/stats/repo` (https://github.com/ipfs/go-ipfs/issues/3874)
+- `/api/v0/pin/ls` (https://github.com/ipfs/go-ipfs/issues/7528)
+
+Can be enabled in the collector configuration file.
+
+The configuration needs only `url` to `IPFS` server, here is an example for 2 `IPFS` instances:
+
+```yaml
+localhost:
+ url: 'http://localhost:5001'
+
+remote:
+ url: 'http://203.0.113.10::5001'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
new file mode 100644
index 0000000..abfc9c4
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'lines': [
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+ 'lines': [
+ ['peers', None, 'absolute']
+ ]
+ },
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
+ ]
+ },
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
+}
+
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.method = "POST"
+ self.do_pinapi = self.configuration.get('pinapi')
+ self.do_repoapi = self.configuration.get('repoapi')
+ self.__storage_max = None
+
+ def _get_json(self, sub_url):
+ """
+ :return: json decoding of the specified url
+ """
+ self.url = self.baseurl + sub_url
+ try:
+ return json.loads(self._get_raw_data())
+ except (TypeError, ValueError):
+ return dict()
+
+ @staticmethod
+ def _recursive_pins(keys):
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
+
+ @staticmethod
+ def _dehumanize(store_max):
+ # convert from '10Gb' to 10000000000
+ if not isinstance(store_max, int):
+ store_max = store_max.lower()
+ if store_max.endswith('b'):
+ val, units = store_max[:-2], store_max[-2]
+ if units in SI_zeroes:
+ val += '0' * SI_zeroes[units]
+ store_max = val
+ try:
+ store_max = int(store_max)
+ except (TypeError, ValueError):
+ store_max = None
+ return store_max
+
+ def _storagemax(self, store_cfg):
+ if self.__storage_max is None:
+ self.__storage_max = self._dehumanize(store_cfg)
+ return self.__storage_max
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ # suburl : List of (result-key, original-key, transform-func)
+ cfg = {
+ '/api/v0/stats/bw':
+ [
+ ('in', 'RateIn', int),
+ ('out', 'RateOut', int),
+ ],
+ '/api/v0/swarm/peers':
+ [
+ ('peers', 'Peers', len),
+ ],
+ }
+ if self.do_repoapi:
+ cfg.update({
+ '/api/v0/stats/repo':
+ [
+ ('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int),
+ ('avail', 'StorageMax', self._storagemax),
+ ],
+ })
+
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [
+ ('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins),
+ ]
+ })
+ r = dict()
+ for suburl in cfg:
+ in_json = self._get_json(suburl)
+ for new_key, orig_key, xmute in cfg[suburl]:
+ try:
+ r[new_key] = xmute(in_json[orig_key])
+ except Exception as error:
+ self.debug(error)
+ return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
new file mode 100644
index 0000000..8b167b3
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+# repoapi: no # Collect repo metrics
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/7528
+# # resulting in very high CPU Usage
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
new file mode 100644
index 0000000..44343fc
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py
+dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
new file mode 100644
index 0000000..5830bd6
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -0,0 +1,57 @@
+<!--
+title: "ISC DHCP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/isc_dhcpd/README.md
+sidebar_label: "ISC DHCP"
+-->
+
+# ISC DHCP monitoring with Netdata
+
+Monitors the leases database to show all active leases for given pools.
+
+## Requirements
+
+- dhcpd leases file MUST BE readable by Netdata
+- pools MUST BE in CIDR format
+- `python-ipaddress` package is needed in Python2
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+
+ - utilization in percent
+
+2. **Total leases**
+
+ - leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+
+ - leases (number of active leases in pool)
+
+## Configuration
+
+Edit the `python.d/isc_dhcpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/isc_dhcpd.conf
+```
+
+Sample:
+
+```yaml
+local:
+ leases_path: '/var/lib/dhcp/dhcpd.leases'
+ pools:
+ office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+ wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+ 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+ wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
+```
+
+The module will not work If no configuration is given.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
new file mode 100644
index 0000000..099c7d4
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+# Description: isc dhcpd lease netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+import time
+
+try:
+ import ipaddress
+
+ HAVE_IP_ADDRESS = True
+except ImportError:
+ HAVE_IP_ADDRESS = False
+
+from collections import defaultdict
+from copy import deepcopy
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'pools_utilization',
+ 'pools_active_leases',
+ 'leases_total',
+]
+
+CHARTS = {
+ 'pools_utilization': {
+ 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'lines': []
+ },
+ 'pools_active_leases': {
+ 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []
+ },
+ 'leases_total': {
+ 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [
+ ['leases_total', 'leases', 'absolute']
+ ],
+ 'variables': [
+ ['leases_size']
+ ]
+ }
+}
+
+POOL_CIDR = "CIDR"
+POOL_IP_RANGE = "IP_RANGE"
+POOL_UNKNOWN = "UNKNOWN"
+
+def detect_ip_type(ip):
+ ip_type = ip.split("-")
+ if len(ip_type) == 1:
+ return POOL_CIDR
+ elif len(ip_type) == 2:
+ return POOL_IP_RANGE
+ else:
+ return POOL_UNKNOWN
+
+
+class DhcpdLeasesFile:
+ def __init__(self, path):
+ self.path = path
+ self.mod_time = 0
+ self.size = 0
+
+ def is_valid(self):
+ return os.path.isfile(self.path) and os.access(self.path, os.R_OK)
+
+ def is_changed(self):
+ mod_time = os.path.getmtime(self.path)
+ if mod_time != self.mod_time:
+ self.mod_time = mod_time
+ self.size = int(os.path.getsize(self.path) / 1024)
+ return True
+ return False
+
+ def get_data(self):
+ try:
+ with open(self.path) as leases:
+ result = defaultdict(dict)
+ for row in leases:
+ row = row.strip()
+ if row.startswith('lease'):
+ address = row[6:-2]
+ elif row.startswith('iaaddr'):
+ address = row[7:-2]
+ elif row.startswith('ends'):
+ result[address]['ends'] = row[5:-1]
+ elif row.startswith('binding state'):
+ result[address]['state'] = row[14:-1]
+ return dict((k, v) for k, v in result.items() if len(v) == 2)
+ except (OSError, IOError):
+ return None
+
+
+class Pool:
+ def __init__(self, name, network):
+ self.id = re.sub(r'[:/.-]+', '_', name)
+ self.name = name
+
+ self.networks = list()
+ for network in network.split(" "):
+ if not network:
+ continue
+
+ ip_type = detect_ip_type(ip=network)
+ if ip_type == POOL_CIDR:
+ self.networks.append(PoolCIDR(network=network))
+ elif ip_type == POOL_IP_RANGE:
+ self.networks.append(PoolIPRange(ip_range=network))
+ else:
+ raise ValueError('Network ({0}) incorrect syntax, expect CIDR or IPRange format.'.format(network))
+
+ def num_hosts(self):
+ return sum([network.num_hosts() for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item in network:
+ return True
+ return False
+
+
+class PoolCIDR:
+ def __init__(self, network):
+ self.network = ipaddress.ip_network(address=u'%s' % network)
+
+ def num_hosts(self):
+ return self.network.num_addresses - 2
+
+ def __contains__(self, item):
+ return item.address in self.network
+
+
+class PoolIPRange:
+ def __init__(self, ip_range):
+ ip_range = ip_range.split("-")
+ self.networks = list(self._summarize_address_range(ip_range[0], ip_range[1]))
+
+ @staticmethod
+ def ip_address(ip):
+ return ipaddress.ip_address(u'%s' % ip)
+
+ def _summarize_address_range(self, first, last):
+ address_first = self.ip_address(first)
+ address_last = self.ip_address(last)
+ return ipaddress.summarize_address_range(address_first, address_last)
+
+ def num_hosts(self):
+ return sum([network.num_addresses for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item.address in network:
+ return True
+ return False
+
+
+class Lease:
+ def __init__(self, address, ends, state):
+ self.address = ipaddress.ip_address(address=u'%s' % address)
+ self.ends = ends
+ self.state = state
+
+ def is_active(self, current_time):
+ # lease_end_time might be epoch
+ if self.ends.startswith('epoch'):
+ epoch = int(self.ends.split()[1].replace(';', ''))
+ return epoch - current_time > 0
+ # max. int for lease-time causes lease to expire in year 2038.
+ # dhcpd puts 'never' in the ends section of active lease
+ elif self.ends == 'never':
+ return True
+ return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
+
+ def is_valid(self):
+ return self.state == 'active'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
+ self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
+ self.pools = list()
+ self.data = dict()
+
+ # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
+ # TODO: update algorithm to parse correctly 'local' db-time-format
+
+ def check(self):
+ if not HAVE_IP_ADDRESS:
+ self.error("'python-ipaddress' package is needed")
+ return False
+
+ if not self.dhcpd_leases.is_valid():
+ self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path))
+ return False
+
+ pools = self.configuration.get('pools')
+ if not pools:
+ self.error('Pools are not defined')
+ return False
+
+ for pool in pools:
+ try:
+ new_pool = Pool(name=pool, network=pools[pool])
+ except ValueError as error:
+ self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error))
+ else:
+ self.pools.append(new_pool)
+
+ self.create_charts()
+ return bool(self.pools)
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.dhcpd_leases.is_changed():
+ return self.data
+
+ raw_leases = self.dhcpd_leases.get_data()
+ if not raw_leases:
+ self.data = dict()
+ return None
+
+ active_leases = list()
+ current_time = time.mktime(time.gmtime())
+
+ for address in raw_leases:
+ try:
+ new_lease = Lease(address, **raw_leases[address])
+ except ValueError:
+ continue
+ else:
+ if new_lease.is_active(current_time) and new_lease.is_valid():
+ active_leases.append(new_lease)
+
+ for pool in self.pools:
+ count = len([ip for ip in active_leases if ip in pool])
+ self.data[pool.id + '_active_leases'] = count
+ self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000
+
+ self.data['leases_size'] = self.dhcpd_leases.size
+ self.data['leases_total'] = len(active_leases)
+
+ return self.data
+
+ def create_charts(self):
+ for pool in self.pools:
+ dim = [
+ pool.id + '_utilization',
+ pool.name,
+ 'absolute',
+ 1,
+ 100,
+ ]
+ self.definitions['pools_utilization']['lines'].append(dim)
+
+ dim = [
+ pool.id + '_active_leases',
+ pool.name,
+ ]
+ self.definitions['pools_active_leases']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
new file mode 100644
index 0000000..c700947
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for isc dhcpd leases
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, isc_dhcpd supports the following:
+#
+# leases_path: 'PATH' # the path to dhcpd.leases file
+# pools:
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+# wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
+#
+#-----------------------------------------------------------------------
+# IMPORTANT notes
+#
+# 1. Make sure leases file is readable by netdata.
+# 2. Current implementation works only with 'default' db-time-format
+# (weekday year/month/day hour:minute:second).
+# This is the default, so it will work in most cases.
+# 3. Pools MUST BE in CIDR format.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
new file mode 100644
index 0000000..5dd6450
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += litespeed/litespeed.chart.py
+dist_pythonconfig_DATA += litespeed/litespeed.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
+
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 100644
index 0000000..2225773
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1,72 @@
+<!--
+title: "LiteSpeed monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md
+sidebar_label: "LiteSpeed"
+-->
+
+# LiteSpeed monitoring with Netdata
+
+Collects web server performance metrics for network, connection, requests, and cache.
+
+It produces:
+
+1. **Network Throughput HTTP** in kilobits/s
+
+ - in
+ - out
+
+2. **Network Throughput HTTPS** in kilobits/s
+
+ - in
+ - out
+
+3. **Connections HTTP** in connections
+
+ - free
+ - used
+
+4. **Connections HTTPS** in connections
+
+ - free
+ - used
+
+5. **Requests** in requests/s
+
+ - requests
+
+6. **Requests In Processing** in requests
+
+ - processing
+
+7. **Public Cache Hits** in hits/s
+
+ - hits
+
+8. **Private Cache Hits** in hits/s
+
+ - hits
+
+9. **Static Hits** in hits/s
+
+ - hits
+
+## Configuration
+
+Edit the `python.d/litespeed.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/litespeed.conf
+```
+
+```yaml
+local:
+ path : 'PATH'
+```
+
+If no configuration is given, module will use "/tmp/lshttpd/".
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
new file mode 100644
index 0000000..7ef8189
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# Description: litespeed netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import os
+import re
+from collections import namedtuple
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+update_every = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'net_throughput_http', # net throughput
+ 'net_throughput_https', # net throughput
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
+]
+
+CHARTS = {
+ 'net_throughput_http': {
+ 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['bps_in', 'in', 'absolute'],
+ ['bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'net_throughput_https': {
+ 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['ssl_bps_in', 'in', 'absolute'],
+ ['ssl_bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'connections_http': {
+ 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['conn_free', 'free', 'absolute'],
+ ['conn_used', 'used', 'absolute']
+ ]
+ },
+ 'connections_https': {
+ 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['ssl_conn_free', 'free', 'absolute'],
+ ['ssl_conn_used', 'used', 'absolute']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'absolute', 1, 100]
+ ]
+ },
+ 'requests_processing': {
+ 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
+ 'lines': [
+ ['requests_processing', 'processing', 'absolute']
+ ]
+ },
+ 'pub_cache_hits': {
+ 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['pub_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'private_cache_hits': {
+ 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['private_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'static_hits': {
+ 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
+ 'lines': [
+ ['static_hits', 'hits', 'absolute', 1, 100]
+ ]
+ }
+}
+
+t = namedtuple('T', ['key', 'id', 'mul'])
+
+T = [
+ t('BPS_IN', 'bps_in', 8),
+ t('BPS_OUT', 'bps_out', 8),
+ t('SSL_BPS_IN', 'ssl_bps_in', 8),
+ t('SSL_BPS_OUT', 'ssl_bps_out', 8),
+ t('REQ_PER_SEC', 'requests', 100),
+ t('REQ_PROCESSING', 'requests_processing', 1),
+ t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
+ t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
+ t('STATIC_HITS_PER_SEC', 'static_hits', 100),
+ t('PLAINCONN', 'conn_used', 1),
+ t('AVAILCONN', 'conn_free', 1),
+ t('SSLCONN', 'ssl_conn_used', 1),
+ t('AVAILSSL', 'ssl_conn_free', 1),
+]
+
+RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
+
+ZERO_DATA = {
+ 'bps_in': 0,
+ 'bps_out': 0,
+ 'ssl_bps_in': 0,
+ 'ssl_bps_out': 0,
+ 'requests': 0,
+ 'requests_processing': 0,
+ 'pub_cache_hits': 0,
+ 'private_cache_hits': 0,
+ 'static_hits': 0,
+ 'conn_used': 0,
+ 'conn_free': 0,
+ 'ssl_conn_used': 0,
+ 'ssl_conn_free': 0,
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.path = self.configuration.get('path', '/tmp/lshttpd/')
+ self.files = list()
+
+ def check(self):
+ if not self.path:
+ self.error('"path" not specified')
+ return False
+
+ fs = glob.glob(os.path.join(self.path, '.rtreport*'))
+
+ if not fs:
+ self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
+ return None
+
+ self.debug('stats files:', fs)
+
+ for f in fs:
+ if not is_readable_file(f):
+ self.error('{0} is not readable'.format(f))
+ continue
+ self.files.append(f)
+
+ return bool(self.files)
+
+ def get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict(ZERO_DATA)
+
+ for f in self.files:
+ try:
+ with open(f) as b:
+ lines = b.readlines()
+ except (OSError, IOError) as err:
+ self.error(err)
+ return None
+ else:
+ parse_file(data, lines)
+
+ return data
+
+
+def parse_file(data, lines):
+ for line in lines:
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
+ continue
+ m = dict(RE.findall(line))
+ for v in T:
+ if v.key in m:
+ data[v.id] += float(m[v.key]) * v.mul
+
+
+def is_readable_file(v):
+ return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
new file mode 100644
index 0000000..a326e18
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for litespeed
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, lightspeed also supports the following:
+#
+# path: 'PATH' # path to lightspeed stats files directory
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc
new file mode 100644
index 0000000..adadab1
--- /dev/null
+++ b/collectors/python.d.plugin/logind/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += logind/logind.chart.py
+dist_pythonconfig_DATA += logind/logind.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += logind/README.md logind/Makefile.inc
+
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
new file mode 100644
index 0000000..3e2d4c1
--- /dev/null
+++ b/collectors/python.d.plugin/logind/README.md
@@ -0,0 +1,86 @@
+<!--
+title: "systemd-logind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/logind/README.md
+sidebar_label: "systemd-logind"
+-->
+
+# Systemd-Logind monitoring with Netdata
+
+Monitors active sessions, users, and seats tracked by `systemd-logind` or `elogind`.
+
+It provides the following charts:
+
+1. **Sessions** Tracks the total number of sessions.
+
+ - Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ - Console: Local console sessions.
+ - Remote: Remote sessions.
+
+2. **Users** Tracks total number of unique user logins of each type.
+
+ - Graphical
+ - Console
+ - Remote
+
+3. **Seats** Total number of seats in use.
+
+ - Seats
+
+## Enable the collector
+
+The `logind` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `logind` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `logind` collector.
+
+## Configuration
+
+This module needs no configuration. Just make sure the `netdata` user
+can run the `loginctl` command and get a session list without having to
+specify a path.
+
+This will work with any command that can output data in the _exact_
+same format as `loginctl list-sessions --no-legend`. If you have some
+other command you want to use that outputs data in this format, you can
+specify it using the `command` key like so:
+
+```yaml
+command: '/path/to/other/command'
+```
+
+Edit the `python.d/logind.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/logind.conf
+```
+
+## Notes
+
+- This module's ability to track logins is dependent on what PAM services
+ are configured to register sessions with logind. In particular, for
+ most systems, it will only track TTY logins, local desktop logins,
+ and logins through remote shell connections.
+
+- The users chart counts _usernames_ not UID's. This is potentially
+ important in configurations where multiple users have the same UID.
+
+- The users chart counts any given user name up to once for _each_ type
+ of login. So if the same user has a graphical and a console login on a
+ system, they will show up once in the graphical count, and once in the
+ console count.
+
+- Because the data collection process is rather expensive, this plugin
+ is currently disabled by default, and needs to be explicitly enabled in
+ `/etc/netdata/python.d.conf` before it will run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
new file mode 100644
index 0000000..7086686
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Description: logind netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+priority = 59999
+disabled_by_default = True
+
+LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
+
+ORDER = [
+ 'sessions',
+ 'users',
+ 'seats',
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'],
+ 'lines': [
+ ['sessions_graphical', 'Graphical', 'absolute', 1, 1],
+ ['sessions_console', 'Console', 'absolute', 1, 1],
+ ['sessions_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'],
+ 'lines': [
+ ['users_graphical', 'Graphical', 'absolute', 1, 1],
+ ['users_console', 'Console', 'absolute', 1, 1],
+ ['users_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'seats': {
+ 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'],
+ 'lines': [
+ ['seats', 'Active Seats', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = LOGINCTL_COMMAND
+
+ def _get_data(self):
+ ret = {
+ 'sessions_graphical': 0,
+ 'sessions_console': 0,
+ 'sessions_remote': 0,
+ }
+ users = {
+ 'graphical': list(),
+ 'console': list(),
+ 'remote': list()
+ }
+ seats = list()
+ data = self._get_raw_data()
+
+ for item in data:
+ fields = item.split()
+ if len(fields) == 3:
+ users['remote'].append(fields[2])
+ ret['sessions_remote'] += 1
+ elif len(fields) == 4:
+ users['graphical'].append(fields[2])
+ ret['sessions_graphical'] += 1
+ seats.append(fields[3])
+ elif len(fields) == 5:
+ users['console'].append(fields[2])
+ ret['sessions_console'] += 1
+ seats.append(fields[3])
+
+ ret['users_graphical'] = len(set(users['graphical']))
+ ret['users_console'] = len(set(users['console']))
+ ret['users_remote'] = len(set(users['remote']))
+ ret['seats'] = len(set(seats))
+
+ return ret
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
new file mode 100644
index 0000000..01a859d
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for logind
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
new file mode 100644
index 0000000..83680d7
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += megacli/megacli.chart.py
+dist_pythonconfig_DATA += megacli/megacli.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
+
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 100644
index 0000000..4fb7eb1
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1,85 @@
+<!--
+title: "MegaRAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md
+sidebar_label: "MegaRAID controllers"
+-->
+
+# MegaRAID controller monitoring with Netdata
+
+Collects adapter, physical drives and battery stats using `megacli` command-line tool.
+
+Executed commands:
+
+- `sudo -n megacli -LDPDInfo -aAll`
+- `sudo -n megacli -AdpBbuCmd -a0`
+
+## Requirements
+
+The module uses `megacli`, which can only be executed by `root`. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can execute `megacli` as root without a password.
+
+- Add to your `/etc/sudoers` file:
+
+`which megacli` shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
+
+- Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `megacli` using `sudo`.
+
+
+As the `root` user, do the following:
+
+```cmd
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+## Charts
+
+- Adapter State
+- Physical Drives Media Errors
+- Physical Drives Predictive Failures
+- Battery Relative State of Charge
+- Battery Cycle Count
+
+## Enable the collector
+
+The `megacli` collector is disabled by default. To enable it, use `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `megacli` setting to `yes`. Save the file and restart the Netdata Agent
+with `sudo systemctl restart netdata`, or the appropriate method for your system.
+
+## Configuration
+
+Edit the `python.d/megacli.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/megacli.conf
+```
+
+Battery stats disabled by default. To enable them, modify `megacli.conf`.
+
+```yaml
+do_battery: yes
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
new file mode 100644
index 0000000..ef35ff6
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+# Description: megacli netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+
+def adapter_charts(ads):
+ order = [
+ 'adapter_degraded',
+ ]
+
+ def dims(ad):
+ return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
+
+ charts = {
+ 'adapter_degraded': {
+ 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
+ 'lines': dims(ads)
+ },
+ }
+
+ return order, charts
+
+
+def pd_charts(pds):
+ order = [
+ 'pd_media_error',
+ 'pd_predictive_failure',
+ ]
+
+ def dims(k, pd):
+ return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
+
+ charts = {
+ 'pd_media_error': {
+ 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
+ 'lines': dims('media_error', pds)
+ },
+ 'pd_predictive_failure': {
+ 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
+ 'megacli.pd_predictive_failure', 'line'],
+ 'lines': dims('predictive_failure', pds)
+ }
+ }
+
+ return order, charts
+
+
+def battery_charts(bats):
+ order = list()
+ charts = dict()
+
+ for b in bats:
+ order.append('bbu_{0}_relative_charge'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_relative_charge'.format(b.id): {
+ 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
+ 'megacli.bbu_relative_charge', 'line'],
+ 'lines': [
+ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ for b in bats:
+ order.append('bbu_{0}_cycle_count'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_cycle_count'.format(b.id): {
+ 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
+ 'lines': [
+ ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ return order, charts
+
+
+RE_ADAPTER = re.compile(
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)'
+)
+
+RE_VD = re.compile(
+ r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
+)
+
+RE_BATTERY = re.compile(
+ r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
+)
+
+
+def find_adapters(d):
+ keys = ('Adapter #', 'State')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
+
+
+def find_pds(d):
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [PD(*v) for v in RE_VD.findall(d)]
+
+
+def find_batteries(d):
+ keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
+ d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
+ return [Battery(*v) for v in RE_BATTERY.findall(d)]
+
+
+class Adapter:
+ def __init__(self, n, state):
+ self.id = n
+ self.state = int(state == 'Degraded')
+
+ def data(self):
+ return {
+ 'adapter_{0}_degraded'.format(self.id): self.state,
+ }
+
+
+class PD:
+ def __init__(self, n, media_err, predict_fail):
+ self.id = n
+ self.media_err = media_err
+ self.predict_fail = predict_fail
+
+ def data(self):
+ return {
+ 'slot_{0}_media_error'.format(self.id): self.media_err,
+ 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
+ }
+
+
+class Battery:
+ def __init__(self, adapt_id, rel_charge, cycle_count):
+ self.id = adapt_id
+ self.rel_charge = rel_charge
+ self.cycle_count = cycle_count
+
+ def data(self):
+ return {
+ 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
+ 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
+ }
+
+
+# TODO: hardcoded sudo...
+class Megacli:
+ def __init__(self):
+ self.s = find_binary('sudo')
+ self.m = find_binary('megacli') or find_binary('MegaCli') # Binary on FreeBSD is MegaCli
+ self.sudo_check = [self.s, '-n', '-l']
+ self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
+ self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
+
+ def __bool__(self):
+ return bool(self.s and self.m)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.do_battery = self.configuration.get('do_battery')
+ self.megacli = Megacli()
+
+ def check_sudo(self):
+ err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+ return True
+
+ def check_disk_info(self):
+ d = self._get_raw_data(command=self.megacli.disk_info)
+ if not d:
+ return False
+
+ ads = find_adapters(d)
+ pds = find_pds(d)
+
+ if not (ads and pds):
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
+ return False
+
+ o, c = adapter_charts(ads)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ o, c = pd_charts(pds)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def check_battery(self):
+ d = self._get_raw_data(command=self.megacli.battery_info)
+ if not d:
+ return False
+
+ bats = find_batteries(d)
+
+ if not bats:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
+ return False
+
+ o, c = battery_charts(bats)
+ self.order.extend(o)
+ self.definitions.update(c)
+ return True
+
+ def check(self):
+ if not self.megacli:
+ self.error('can\'t locate "sudo" or "megacli" binary')
+ return None
+
+ if not (self.check_sudo() and self.check_disk_info()):
+ return False
+
+ if self.do_battery:
+ self.do_battery = self.check_battery()
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data.update(self.get_adapter_pd_data())
+
+ if self.do_battery:
+ data.update(self.get_battery_data())
+
+ return data or None
+
+ def get_adapter_pd_data(self):
+ raw = self._get_raw_data(command=self.megacli.disk_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for a in find_adapters(raw):
+ data.update(a.data())
+
+ for p in find_pds(raw):
+ data.update(p.data())
+
+ return data
+
+ def get_battery_data(self):
+ raw = self._get_raw_data(command=self.megacli.battery_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for b in find_batteries(raw):
+ data.update(b.data())
+
+ return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
new file mode 100644
index 0000000..1af4292
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for megacli
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, megacli also supports the following:
+#
+# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+#
+# ----------------------------------------------------------------------
+# uncomment the line below to collect battery statistics
+# do_battery: yes
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
new file mode 100644
index 0000000..e603571
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += memcached/memcached.chart.py
+dist_pythonconfig_DATA += memcached/memcached.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
+
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 100644
index 0000000..abd93fd
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1,99 @@
+<!--
+title: "Memcached monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md
+sidebar_label: "Memcached"
+-->
+
+# Memcached monitoring with Netdata
+
+Collects memory-caching system performance metrics. It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
+
+
+1. **Network** in kilobytes/s
+
+ - read
+ - written
+
+2. **Connections** per second
+
+ - current
+ - rejected
+ - total
+
+3. **Items** in cluster
+
+ - current
+ - total
+
+4. **Evicted and Reclaimed** items
+
+ - evicted
+ - reclaimed
+
+5. **GET** requests/s
+
+ - hits
+ - misses
+
+6. **GET rate** rate in requests/s
+
+ - rate
+
+7. **SET rate** rate in requests/s
+
+ - rate
+
+8. **DELETE** requests/s
+
+ - hits
+ - misses
+
+9. **CAS** requests/s
+
+ - hits
+ - misses
+ - bad value
+
+10. **Increment** requests/s
+
+ - hits
+ - misses
+
+11. **Decrement** requests/s
+
+ - hits
+ - misses
+
+12. **Touch** requests/s
+
+ - hits
+ - misses
+
+13. **Touch rate** rate in requests/s
+
+ - rate
+
+## Configuration
+
+Edit the `python.d/memcached.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/memcached.conf
+```
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+```
+
+If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
new file mode 100644
index 0000000..bb656a2
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# Description: memcached netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'cache',
+ 'net',
+ 'connections',
+ 'items',
+ 'evicted_reclaimed',
+ 'get',
+ 'get_rate',
+ 'set_rate',
+ 'cas',
+ 'delete',
+ 'increment',
+ 'decrement',
+ 'touch',
+ 'touch_rate',
+]
+
+CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
+ 'lines': [
+ ['avail', 'available', 'absolute', 1, 1 << 20],
+ ['used', 'used', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
+ 'lines': [
+ ['bytes_read', 'in', 'incremental', 8, 1000],
+ ['bytes_written', 'out', 'incremental', -8, 1000],
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
+ 'lines': [
+ ['curr_connections', 'current', 'incremental'],
+ ['rejected_connections', 'rejected', 'incremental'],
+ ['total_connections', 'total', 'incremental']
+ ]
+ },
+ 'items': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
+ 'lines': [
+ ['curr_items', 'current', 'absolute'],
+ ['total_items', 'total', 'absolute']
+ ]
+ },
+ 'evicted_reclaimed': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
+ 'lines': [
+ ['reclaimed', 'reclaimed', 'absolute'],
+ ['evictions', 'evicted', 'absolute']
+ ]
+ },
+ 'get': {
+ 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
+ 'lines': [
+ ['get_hits', 'hits', 'percent-of-absolute-row'],
+ ['get_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'get_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
+ 'lines': [
+ ['cmd_get', 'rate', 'incremental']
+ ]
+ },
+ 'set_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
+ 'lines': [
+ ['cmd_set', 'rate', 'incremental']
+ ]
+ },
+ 'delete': {
+ 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
+ 'lines': [
+ ['delete_hits', 'hits', 'percent-of-absolute-row'],
+ ['delete_misses', 'misses', 'percent-of-absolute-row'],
+ ]
+ },
+ 'cas': {
+ 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
+ 'lines': [
+ ['cas_hits', 'hits', 'percent-of-absolute-row'],
+ ['cas_misses', 'misses', 'percent-of-absolute-row'],
+ ['cas_badval', 'bad value', 'percent-of-absolute-row']
+ ]
+ },
+ 'increment': {
+ 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
+ 'lines': [
+ ['incr_hits', 'hits', 'percent-of-absolute-row'],
+ ['incr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'decrement': {
+ 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
+ 'lines': [
+ ['decr_hits', 'hits', 'percent-of-absolute-row'],
+ ['decr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch': {
+ 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
+ 'lines': [
+ ['touch_hits', 'hits', 'percent-of-absolute-row'],
+ ['touch_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
+ 'lines': [
+ ['cmd_touch', 'rate', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
+ self.port = 11211
+ self._keep_alive = True
+ self.unix_socket = None
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
+ if response.startswith('ERROR'):
+ self.error('received ERROR')
+ return None
+
+ try:
+ parsed = response.split('\n')
+ except AttributeError:
+ self.error('response is invalid/empty')
+ return None
+
+ # split the response
+ data = {}
+ for line in parsed:
+ if line.startswith('STAT'):
+ try:
+ t = line[5:].split(' ')
+ data[t[0]] = t[1]
+ except (IndexError, ValueError):
+ self.debug('invalid line received: ' + str(line))
+
+ if not data:
+ self.error("received data doesn't have any records")
+ return None
+
+ # custom calculations
+ try:
+ data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
+ data['used'] = int(data['bytes'])
+ except (KeyError, ValueError, TypeError):
+ pass
+
+ return data
+
+ def _check_raw_data(self, data):
+ if data.endswith('END\r\n'):
+ self.debug('received full response from memcached')
+ return True
+
+ self.debug('waiting more data from memcached')
+ return False
+
+ def check(self):
+ """
+ Parse configuration, check if memcached is available
+ :return: boolean
+ """
+ self._parse_config()
+ data = self._get_data()
+ if data is None:
+ return False
+ return True
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
new file mode 100644
index 0000000..3286b46
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, memcached also supports the following:
+#
+# socket: 'path/to/memcached.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 11211
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 11211
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 11211
+
diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc
new file mode 100644
index 0000000..784945a
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mongodb/mongodb.chart.py
+dist_pythonconfig_DATA += mongodb/mongodb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
new file mode 100644
index 0000000..c0df123
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -0,0 +1,210 @@
+<!--
+title: "MongoDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mongodb/README.md
+sidebar_label: "MongoDB"
+-->
+
+# MongoDB monitoring with Netdata
+
+Monitors performance and health metrics of MongoDB.
+
+## Requirements
+
+- `python-pymongo` package v2.4+.
+
+You need to install it manually.
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+
+ - query
+ - getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+
+ - insert
+ - delete
+ - update
+
+3. **Active clients**:
+
+ - readers (number of clients with read operations in progress or queued)
+ - writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+
+ - commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+
+ - volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+
+ - average ms (average time taken by flushes to execute)
+ - last ms (time taken by the last flush)
+
+7. **Read tickets** (WiredTiger):
+
+ - in use (number of read tickets in use)
+ - available (number of available read tickets remaining)
+
+8. **Write tickets** (WiredTiger):
+
+ - in use (number of write tickets in use)
+ - available (number of available write tickets remaining)
+
+9. **Cursors**:
+
+- opened (number of cursors currently opened by MongoDB for clients)
+- timedOut (number of cursors that have timed)
+- noTimeout (number of open cursors with timeout disabled)
+
+10. **Connections**:
+
+ - connected (number of clients currently connected to the database server)
+ - unused (number of unused connections available for new clients)
+
+11. **Memory usage metrics**:
+
+ - virtual
+ - resident (amount of memory used by the database process)
+ - mapped
+ - non mapped
+
+12. **Page faults**:
+
+ - page faults (number of times MongoDB had to request from disk)
+
+13. **Cache metrics** (WiredTiger):
+
+ - percentage of bytes currently in the cache (amount of space taken by cached data)
+ - percentage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+14. **Pages evicted from cache** (WiredTiger):
+
+ - modified
+ - unmodified
+
+15. **Queued requests**:
+
+ - readers (number of read request currently queued)
+ - writers (number of write request currently queued)
+
+16. **Errors**:
+
+ - msg (number of message assertions raised)
+ - warning (number of warning assertions raised)
+ - regular (number of regular assertions raised)
+ - user (number of assertions corresponding to errors generated by users)
+
+17. **Storage metrics** (one chart for every database)
+
+ - dataSize (size of all documents + padding in the database)
+ - indexSize (size of all indexes in the database)
+ - storageSize (size of all extents in the database)
+
+18. **Documents in the database** (one chart for all databases)
+
+- documents (number of objects in the database among all the collections)
+
+19. **tcmalloc metrics**
+
+ - central cache free
+ - current total thread cache
+ - pageheap free
+ - pageheap unmapped
+ - thread cache free
+ - transfer cache free
+ - heap size
+
+20. **Commands total/failed rate**
+
+ - count
+ - createIndex
+ - delete
+ - eval
+ - findAndModify
+ - insert
+
+21. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+
+ - Global lock
+ - Database lock
+ - Collection lock
+ - Metadata lock
+ - oplog lock
+
+22. **Replica set members state**
+
+ - state
+
+23. **Oplog window**
+
+ - window (interval of time between the oldest and the latest entries in the oplog)
+
+24. **Replication lag**
+
+ - member (time when last entry from the oplog was applied for every member)
+
+25. **Replication set member heartbeat latency**
+
+ - member (time when last heartbeat was received from replica set member)
+
+## prerequisite
+
+Create a read-only user for Netdata in the admin database.
+
+1. Authenticate as the admin user.
+
+```
+use admin
+db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+```
+
+2. Create a user.
+
+```
+# MongoDB 2.x.
+db.addUser("netdata", "<UNIQUE_PASSWORD>", true)
+
+# MongoDB 3.x or higher.
+db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+})
+```
+
+## Configuration
+
+Edit the `python.d/mongodb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mongodb.conf
+```
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ authdb: 'admin'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
new file mode 100644
index 0000000..2e6fb22
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -0,0 +1,786 @@
+# -*- coding: utf-8 -*-
+# Description: mongodb netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import ssl
+
+from copy import deepcopy
+from datetime import datetime
+from sys import exc_info
+
+try:
+ from pymongo import MongoClient, ASCENDING, DESCENDING
+ from pymongo.errors import PyMongoError
+
+ PYMONGO = True
+except ImportError:
+ PYMONGO = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+REPL_SET_STATES = [
+ ('1', 'primary'),
+ ('8', 'down'),
+ ('2', 'secondary'),
+ ('3', 'recovering'),
+ ('5', 'startup2'),
+ ('4', 'fatal'),
+ ('7', 'arbiter'),
+ ('6', 'unknown'),
+ ('9', 'rollback'),
+ ('10', 'removed'),
+ ('0', 'startup')
+]
+
+
+def multiply_by_100(value):
+ return value * 100
+
+
+DEFAULT_METRICS = [
+ ('opcounters.delete', None, None),
+ ('opcounters.update', None, None),
+ ('opcounters.insert', None, None),
+ ('opcounters.query', None, None),
+ ('opcounters.getmore', None, None),
+ ('globalLock.activeClients.readers', 'activeClients_readers', None),
+ ('globalLock.activeClients.writers', 'activeClients_writers', None),
+ ('connections.available', 'connections_available', None),
+ ('connections.current', 'connections_current', None),
+ ('mem.mapped', None, None),
+ ('mem.resident', None, None),
+ ('mem.virtual', None, None),
+ ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
+ ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
+ ('asserts.msg', None, None),
+ ('asserts.regular', None, None),
+ ('asserts.user', None, None),
+ ('asserts.warning', None, None),
+ ('extra_info.page_faults', None, None),
+ ('metrics.record.moves', None, None),
+ ('backgroundFlushing.average_ms', None, multiply_by_100),
+ ('backgroundFlushing.last_ms', None, multiply_by_100),
+ ('backgroundFlushing.flushes', None, multiply_by_100),
+ ('metrics.cursor.timedOut', None, None),
+ ('metrics.cursor.open.total', 'cursor_total', None),
+ ('metrics.cursor.open.noTimeout', None, None),
+ ('cursors.timedOut', None, None),
+ ('cursors.totalOpen', 'cursor_total', None)
+]
+
+DUR = [
+ ('dur.commits', None, None),
+ ('dur.journaledMB', None, multiply_by_100)
+]
+
+WIREDTIGER = [
+ ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
+ ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
+ ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
+ ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
+ ('wiredTiger.cache.bytes currently in the cache', None, None),
+ ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
+ ('wiredTiger.cache.maximum bytes configured', None, None),
+ ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
+ ('wiredTiger.cache.modified pages evicted', 'modified', None)
+]
+
+TCMALLOC = [
+ ('tcmalloc.generic.current_allocated_bytes', None, None),
+ ('tcmalloc.generic.heap_size', None, None),
+ ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
+ ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
+]
+
+COMMANDS = [
+ ('metrics.commands.count.total', 'count_total', None),
+ ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.eval.total', 'eval_total', None),
+ ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
+ ('metrics.commands.insert.total', 'insert_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.count.failed', 'count_failed', None),
+ ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None),
+ ('metrics.commands.eval.failed', 'eval_failed', None),
+ ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
+ ('metrics.commands.insert.failed', 'insert_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None)
+]
+
+LOCKS = [
+ ('locks.Collection.acquireCount.R', 'Collection_R', None),
+ ('locks.Collection.acquireCount.r', 'Collection_r', None),
+ ('locks.Collection.acquireCount.W', 'Collection_W', None),
+ ('locks.Collection.acquireCount.w', 'Collection_w', None),
+ ('locks.Database.acquireCount.R', 'Database_R', None),
+ ('locks.Database.acquireCount.r', 'Database_r', None),
+ ('locks.Database.acquireCount.W', 'Database_W', None),
+ ('locks.Database.acquireCount.w', 'Database_w', None),
+ ('locks.Global.acquireCount.R', 'Global_R', None),
+ ('locks.Global.acquireCount.r', 'Global_r', None),
+ ('locks.Global.acquireCount.W', 'Global_W', None),
+ ('locks.Global.acquireCount.w', 'Global_w', None),
+ ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
+ ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
+ ('locks.oplog.acquireCount.r', 'oplog_r', None),
+ ('locks.oplog.acquireCount.w', 'oplog_w', None)
+]
+
+DBSTATS = [
+ 'dataSize',
+ 'indexSize',
+ 'storageSize',
+ 'objects'
+]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'read_operations',
+ 'write_operations',
+ 'active_clients',
+ 'journaling_transactions',
+ 'journaling_volume',
+ 'background_flush_average',
+ 'background_flush_last',
+ 'background_flush_rate',
+ 'wiredtiger_read',
+ 'wiredtiger_write',
+ 'cursors',
+ 'connections',
+ 'memory',
+ 'page_faults',
+ 'queued_requests',
+ 'record_moves',
+ 'wiredtiger_cache',
+ 'wiredtiger_pages_evicted',
+ 'asserts',
+ 'locks_collection',
+ 'locks_database',
+ 'locks_global',
+ 'locks_metadata',
+ 'locks_oplog',
+ 'dbstats_objects',
+ 'tcmalloc_generic',
+ 'tcmalloc_metrics',
+ 'command_total_rate',
+ 'command_failed_rate'
+]
+
+CHARTS = {
+ 'read_operations': {
+ 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
+ 'mongodb.read_operations', 'line'],
+ 'lines': [
+ ['query', None, 'incremental'],
+ ['getmore', None, 'incremental']
+ ]
+ },
+ 'write_operations': {
+ 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
+ 'mongodb.write_operations', 'line'],
+ 'lines': [
+ ['insert', None, 'incremental'],
+ ['update', None, 'incremental'],
+ ['delete', None, 'incremental']
+ ]
+ },
+ 'active_clients': {
+ 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
+ 'throughput metrics', 'mongodb.active_clients', 'line'],
+ 'lines': [
+ ['activeClients_readers', 'readers', 'absolute'],
+ ['activeClients_writers', 'writers', 'absolute']
+ ]
+ },
+ 'journaling_transactions': {
+ 'options': [None, 'Transactions that have been written to the journal', 'commits',
+ 'database performance', 'mongodb.journaling_transactions', 'line'],
+ 'lines': [
+ ['commits', None, 'absolute']
+ ]
+ },
+ 'journaling_volume': {
+ 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
+ 'mongodb.journaling_volume', 'line'],
+ 'lines': [
+ ['journaledMB', 'volume', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_average': {
+ 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
+ 'mongodb.background_flush_average', 'line'],
+ 'lines': [
+ ['average_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_last': {
+ 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
+ 'mongodb.background_flush_last', 'line'],
+ 'lines': [
+ ['last_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_rate': {
+ 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
+ 'lines': [
+ ['flushes', 'flushes', 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_read': {
+ 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_read', 'stacked'],
+ 'lines': [
+ ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'wiredtiger_write': {
+ 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_write', 'stacked'],
+ 'lines': [
+ ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'cursors': {
+ 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
+ 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
+ 'lines': [
+ ['cursor_total', 'openned', 'absolute', 1, 1],
+ ['noTimeout', None, 'absolute', 1, 1],
+ ['timedOut', None, 'incremental', 1, 1]
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Currently connected clients and unused connections', 'connections',
+ 'resource utilization', 'mongodb.connections', 'stacked'],
+ 'lines': [
+ ['connections_available', 'unused', 'absolute', 1, 1],
+ ['connections_current', 'connected', 'absolute', 1, 1]
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'lines': [
+ ['virtual', None, 'absolute', 1, 1],
+ ['resident', None, 'absolute', 1, 1],
+ ['nonmapped', None, 'absolute', 1, 1],
+ ['mapped', None, 'absolute', 1, 1]
+ ]
+ },
+ 'page_faults': {
+ 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
+ 'resource utilization', 'mongodb.page_faults', 'line'],
+ 'lines': [
+ ['page_faults', None, 'incremental', 1, 1]
+ ]
+ },
+ 'queued_requests': {
+ 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
+ 'mongodb.queued_requests', 'line'],
+ 'lines': [
+ ['currentQueue_readers', 'readers', 'absolute', 1, 1],
+ ['currentQueue_writers', 'writers', 'absolute', 1, 1]
+ ]
+ },
+ 'record_moves': {
+ 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
+ 'resource saturation', 'mongodb.record_moves', 'line'],
+ 'lines': [
+ ['moves', None, 'incremental', 1, 1]
+ ]
+ },
+ 'asserts': {
+ 'options': [
+ None,
+ 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
+ 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'lines': [
+ ['msg', None, 'incremental', 1, 1],
+ ['warning', None, 'incremental', 1, 1],
+ ['regular', None, 'incremental', 1, 1],
+ ['user', None, 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_cache': {
+ 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
+ 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'lines': [
+ ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
+ ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
+ ]
+ },
+ 'wiredtiger_pages_evicted': {
+ 'options': [None, 'Pages evicted from the cache',
+ 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
+ 'lines': [
+ ['unmodified', None, 'absolute', 1, 1],
+ ['modified', None, 'absolute', 1, 1]
+ ]
+ },
+ 'dbstats_objects': {
+ 'options': [None, 'Number of documents in the database among all the collections', 'documents',
+ 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
+ 'lines': []
+ },
+ 'tcmalloc_generic': {
+ 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'lines': [
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
+ ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'tcmalloc_metrics': {
+ 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'lines': [
+ ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
+ ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
+ ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
+ ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
+ ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
+ ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
+ ]
+ },
+ 'command_total_rate': {
+ 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
+ 'lines': [
+ ['count_total', 'count', 'incremental', 1, 1],
+ ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
+ ['delete_total', 'delete', 'incremental', 1, 1],
+ ['eval_total', 'eval', 'incremental', 1, 1],
+ ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
+ ['insert_total', 'insert', 'incremental', 1, 1],
+ ['update_total', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'command_failed_rate': {
+ 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
+ 'lines': [
+ ['count_failed', 'count', 'incremental', 1, 1],
+ ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
+ ['delete_failed', 'delete', 'incremental', 1, 1],
+ ['eval_failed', 'eval', 'incremental', 1, 1],
+ ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
+ ['insert_failed', 'insert', 'incremental', 1, 1],
+ ['update_failed', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'locks_collection': {
+ 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
+ 'lines': [
+ ['Collection_R', 'shared', 'incremental'],
+ ['Collection_W', 'exclusive', 'incremental'],
+ ['Collection_r', 'intent_shared', 'incremental'],
+ ['Collection_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_database': {
+ 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
+ 'lines': [
+ ['Database_R', 'shared', 'incremental'],
+ ['Database_W', 'exclusive', 'incremental'],
+ ['Database_r', 'intent_shared', 'incremental'],
+ ['Database_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_global': {
+ 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
+ 'lines': [
+ ['Global_R', 'shared', 'incremental'],
+ ['Global_W', 'exclusive', 'incremental'],
+ ['Global_r', 'intent_shared', 'incremental'],
+ ['Global_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_metadata': {
+ 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
+ 'lines': [
+ ['Metadata_R', 'shared', 'incremental'],
+ ['Metadata_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_oplog': {
+ 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
+ 'lines': [
+ ['oplog_r', 'intent_shared', 'incremental'],
+ ['oplog_w', 'intent_exclusive', 'incremental']
+ ]
+ }
+}
+
+DEFAULT_HOST = '127.0.0.1'
+DEFAULT_PORT = 27017
+DEFAULT_TIMEOUT = 100
+DEFAULT_AUTHDB = 'admin'
+
+CONN_PARAM_HOST = 'host'
+CONN_PARAM_PORT = 'port'
+CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS = 'serverselectiontimeoutms'
+CONN_PARAM_SSL_SSL = 'ssl'
+CONN_PARAM_SSL_CERT_REQS = 'ssl_cert_reqs'
+CONN_PARAM_SSL_CA_CERTS = 'ssl_ca_certs'
+CONN_PARAM_SSL_CRL_FILE = 'ssl_crlfile'
+CONN_PARAM_SSL_CERT_FILE = 'ssl_certfile'
+CONN_PARAM_SSL_KEY_FILE = 'ssl_keyfile'
+CONN_PARAM_SSL_PEM_PASSPHRASE = 'ssl_pem_passphrase'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.authdb = self.configuration.get('authdb', DEFAULT_AUTHDB)
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
+ self.connection = None
+ self.do_replica = None
+ self.databases = list()
+
+ def check(self):
+ if not PYMONGO:
+ self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
+ return False
+ self.connection, server_status, error = self._create_connection()
+ if error:
+ self.error(error)
+ return False
+
+ self.build_metrics_to_collect_(server_status)
+
+ try:
+ data = self._get_data()
+ except (LookupError, SyntaxError, AttributeError):
+ self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
+ return False
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
+ self.create_charts_(server_status)
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
+
+ def build_metrics_to_collect_(self, server_status):
+
+ self.do_replica = 'repl' in server_status
+ if 'dur' in server_status:
+ self.metrics_to_collect.extend(DUR)
+ if 'tcmalloc' in server_status:
+ self.metrics_to_collect.extend(TCMALLOC)
+ if 'commands' in server_status['metrics']:
+ self.metrics_to_collect.extend(COMMANDS)
+ if 'wiredTiger' in server_status:
+ self.metrics_to_collect.extend(WIREDTIGER)
+ has_locks = 'locks' in server_status
+ if has_locks and 'Collection' in server_status['locks']:
+ self.metrics_to_collect.extend(LOCKS)
+
+ def create_charts_(self, server_status):
+
+ if 'dur' not in server_status:
+ self.order.remove('journaling_transactions')
+ self.order.remove('journaling_volume')
+
+ if 'backgroundFlushing' not in server_status:
+ self.order.remove('background_flush_average')
+ self.order.remove('background_flush_last')
+ self.order.remove('background_flush_rate')
+
+ if 'wiredTiger' not in server_status:
+ self.order.remove('wiredtiger_write')
+ self.order.remove('wiredtiger_read')
+ self.order.remove('wiredtiger_cache')
+
+ if 'tcmalloc' not in server_status:
+ self.order.remove('tcmalloc_generic')
+ self.order.remove('tcmalloc_metrics')
+
+ if 'commands' not in server_status['metrics']:
+ self.order.remove('command_total_rate')
+ self.order.remove('command_failed_rate')
+
+ has_no_locks = 'locks' not in server_status
+ if has_no_locks or 'Collection' not in server_status['locks']:
+ self.order.remove('locks_collection')
+ self.order.remove('locks_database')
+ self.order.remove('locks_global')
+ self.order.remove('locks_metadata')
+
+ if has_no_locks or 'oplog' not in server_status['locks']:
+ self.order.remove('locks_oplog')
+
+ for dbase in self.databases:
+ self.order.append('_'.join([dbase, 'dbstats']))
+ self.definitions['_'.join([dbase, 'dbstats'])] = {
+ 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
+ 'storage size metrics', 'mongodb.dbstats', 'line'],
+ 'lines': [
+ ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
+ ]}
+ self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
+
+ if self.do_replica:
+ def create_lines(hosts, string):
+ lines = list()
+ for host in hosts:
+ dim_id = '_'.join([host, string])
+ lines.append([dim_id, host, 'absolute', 1, 1000])
+ return lines
+
+ def create_state_lines(states):
+ lines = list()
+ for state, description in states:
+ dim_id = '_'.join([host, 'state', state])
+ lines.append([dim_id, description, 'absolute', 1, 1])
+ return lines
+
+ all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
+ this_host = server_status['repl']['me']
+ other_hosts = [host for host in all_hosts if host != this_host]
+
+ if 'local' in self.databases:
+ self.order.append('oplog_window')
+ self.definitions['oplog_window'] = {
+ 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
+ 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
+ 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
+ # Create "heartbeat delay" chart
+ self.order.append('heartbeat_delay')
+ self.definitions['heartbeat_delay'] = {
+ 'options': [
+ None,
+ 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'lines': create_lines(other_hosts, 'heartbeat_lag')}
+ # Create "optimedate delay" chart
+ self.order.append('optimedate_delay')
+ self.definitions['optimedate_delay'] = {
+ 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
+ 'lines': create_lines(all_hosts, 'optimedate')}
+ # Create "replica set members state" chart
+ for host in all_hosts:
+ chart_name = '_'.join([host, 'state'])
+ self.order.append(chart_name)
+ self.definitions[chart_name] = {
+ 'options': [None, 'Replica set member (%s) current state' % host, 'state',
+ 'replication and oplog', 'mongodb.replication_state', 'line'],
+ 'lines': create_state_lines(REPL_SET_STATES)}
+
+ def _get_raw_data(self):
+ raw_data = dict()
+
+ raw_data.update(self.get_server_status() or dict())
+ raw_data.update(self.get_db_stats() or dict())
+ raw_data.update(self.get_repl_set_get_status() or dict())
+ raw_data.update(self.get_get_replication_info() or dict())
+
+ return raw_data or None
+
+ def get_server_status(self):
+ raw_data = dict()
+ try:
+ raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_db_stats(self):
+ if not self.databases:
+ return None
+
+ raw_data = dict()
+ raw_data['dbStats'] = dict()
+ try:
+ for dbase in self.databases:
+ raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_repl_set_get_status(self):
+ if not self.do_replica:
+ return None
+
+ raw_data = dict()
+ try:
+ raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_get_replication_info(self):
+ if not (self.do_replica and 'local' in self.databases):
+ return None
+
+ raw_data = dict()
+ raw_data['getReplicationInfo'] = dict()
+ try:
+ raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', ASCENDING).limit(1)[0]
+ raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', DESCENDING).limit(1)[0]
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def _get_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ data = dict()
+ serverStatus = raw_data['serverStatus']
+ dbStats = raw_data.get('dbStats')
+ replSetGetStatus = raw_data.get('replSetGetStatus')
+ getReplicationInfo = raw_data.get('getReplicationInfo')
+ utc_now = datetime.utcnow()
+
+ # serverStatus
+ for metric, new_name, func in self.metrics_to_collect:
+ value = serverStatus
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except KeyError:
+ break
+
+ if not isinstance(value, dict) and key:
+ data[new_name or key] = value if not func else func(value)
+
+ if 'mapped' in serverStatus['mem']:
+ data['nonmapped'] = data['virtual'] - serverStatus['mem'].get('mappedWithJournal', data['mapped'])
+
+ if data.get('maximum bytes configured'):
+ maximum = data['maximum bytes configured']
+ data['wiredTiger_percent_clean'] = int(data['bytes currently in the cache'] * 100 / maximum * 1000)
+ data['wiredTiger_percent_dirty'] = int(data['tracked dirty bytes in the cache'] * 100 / maximum * 1000)
+
+ # dbStats
+ if dbStats:
+ for dbase in dbStats:
+ for metric in DBSTATS:
+ key = '_'.join([dbase, metric])
+ data[key] = dbStats[dbase][metric]
+
+ # replSetGetStatus
+ if replSetGetStatus:
+ other_hosts = list()
+ members = replSetGetStatus['members']
+ unix_epoch = datetime(1970, 1, 1, 0, 0)
+
+ for member in members:
+ if not member.get('self'):
+ other_hosts.append(member)
+
+ # Replica set time diff between current time and time when last entry from the oplog was applied
+ if member.get('optimeDate', unix_epoch) != unix_epoch:
+ member_optimedate = member['name'] + '_optimedate'
+ delta = utc_now - member['optimeDate']
+ data[member_optimedate] = int(delta_calculation(delta=delta, multiplier=1000))
+
+ # Replica set members state
+ member_state = member['name'] + '_state'
+ for elem in REPL_SET_STATES:
+ state = elem[0]
+ data.update({'_'.join([member_state, state]): 0})
+ data.update({'_'.join([member_state, str(member['state'])]): member['state']})
+
+ # Heartbeat lag calculation
+ for other in other_hosts:
+ if other['lastHeartbeatRecv'] != unix_epoch:
+ node = other['name'] + '_heartbeat_lag'
+ delta = utc_now - other['lastHeartbeatRecv']
+ data[node] = int(delta_calculation(delta=delta, multiplier=1000))
+
+ if getReplicationInfo:
+ first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
+ last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
+ data['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
+
+ return data
+
+ def build_ssl_connection_params(self):
+ conf = self.configuration
+
+ def cert_req(v):
+ if v is None:
+ return None
+ if not v:
+ return ssl.CERT_NONE
+ return ssl.CERT_REQUIRED
+
+ ssl_params = {
+ CONN_PARAM_SSL_SSL: conf.get(CONN_PARAM_SSL_SSL),
+ CONN_PARAM_SSL_CERT_REQS: cert_req(conf.get(CONN_PARAM_SSL_CERT_REQS)),
+ CONN_PARAM_SSL_CA_CERTS: conf.get(CONN_PARAM_SSL_CA_CERTS),
+ CONN_PARAM_SSL_CRL_FILE: conf.get(CONN_PARAM_SSL_CRL_FILE),
+ CONN_PARAM_SSL_CERT_FILE: conf.get(CONN_PARAM_SSL_CERT_FILE),
+ CONN_PARAM_SSL_KEY_FILE: conf.get(CONN_PARAM_SSL_KEY_FILE),
+ CONN_PARAM_SSL_PEM_PASSPHRASE: conf.get(CONN_PARAM_SSL_PEM_PASSPHRASE),
+ }
+
+ ssl_params = dict((k, v) for k, v in ssl_params.items() if v is not None)
+
+ return ssl_params
+
+ def build_connection_params(self):
+ conf = self.configuration
+ params = {
+ CONN_PARAM_HOST: conf.get(CONN_PARAM_HOST, DEFAULT_HOST),
+ CONN_PARAM_PORT: conf.get(CONN_PARAM_PORT, DEFAULT_PORT),
+ }
+ if hasattr(MongoClient, 'server_selection_timeout'):
+ params[CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS] = conf.get('timeout', DEFAULT_TIMEOUT)
+
+ params.update(self.build_ssl_connection_params())
+ return params
+
+ def _create_connection(self):
+ params = self.build_connection_params()
+ self.debug('creating connection, connection params: {0}'.format(sorted(params)))
+
+ try:
+ connection = MongoClient(**params)
+ if self.user and self.password:
+ self.debug('authenticating, user: {0}, password: {1}'.format(self.user, self.password))
+ getattr(connection, self.authdb).authenticate(name=self.user, password=self.password)
+ else:
+ self.debug('skip authenticating, user and password are not set')
+ # elif self.user:
+ # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
+ server_status = connection.admin.command('serverStatus')
+ except PyMongoError as error:
+ return None, None, str(error)
+ else:
+ try:
+ self.databases = connection.database_names()
+ except PyMongoError as error:
+ self.info('Can\'t collect databases: %s' % str(error))
+ return connection, server_status, None
+
+
+def delta_calculation(delta, multiplier=1):
+ if hasattr(delta, 'total_seconds'):
+ return delta.total_seconds() * multiplier
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
new file mode 100644
index 0000000..9f660f5
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -0,0 +1,102 @@
+# netdata python.d.plugin configuration for mongodb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mongodb also supports the following:
+#
+# host: 'IP or HOSTNAME' # type <str> the host to connect to
+# port: PORT # type <int> the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# authdb: 'dbname' # database to authenticate the user against,
+# # defaults to "admin".
+# user: 'username' # the mongodb username to use
+# pass: 'password' # the mongodb password to use
+#
+# SSL connection parameters (https://api.mongodb.com/python/current/examples/tls.html):
+#
+# ssl: yes # connect to the server using TLS
+# ssl_cert_reqs: yes # require a certificate from the server when TLS is enabled
+# ssl_ca_certs: '/path/to/ca.pem' # use a specific set of CA certificates
+# ssl_crlfile: '/path/to/crl.pem' # use a certificate revocation lists
+# ssl_certfile: '/path/to/client.pem' # use a client certificate
+# ssl_keyfile: '/path/to/key.pem' # use a specific client certificate key
+# ssl_pem_passphrase: 'passphrase' # use a passphrase to decrypt encrypted private keys
+#
+
+# ----------------------------------------------------------------------
+# to connect to the mongodb on localhost, without a password:
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+
+# authsample:
+# name : 'secure'
+# host : 'mongodb.example.com'
+# port : 27017
+# authdb : 'admin'
+# user : 'monitor'
+# pass : 'supersecret'
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
new file mode 100644
index 0000000..4a3673f
--- /dev/null
+++ b/collectors/python.d.plugin/monit/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += monit/monit.chart.py
+dist_pythonconfig_DATA += monit/monit.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += monit/README.md monit/Makefile.inc
+
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 100644
index 0000000..fe13896
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1,52 @@
+<!--
+title: "Monit monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md
+sidebar_label: "Monit"
+-->
+
+# Monit monitoring with Netdata
+
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+
+1. **Filesystems**
+
+ - Filesystems
+ - Directories
+ - Files
+ - Pipes
+
+2. **Applications**
+
+ - Processes (+threads/childs)
+ - Programs
+
+3. **Network**
+
+ - Hosts (+latency)
+ - Network interfaces
+
+## Configuration
+
+Edit the `python.d/monit.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/monit.conf
+```
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: : admin
+ pass: : monit
+```
+
+If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
new file mode 100644
index 0000000..bfc1823
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -0,0 +1,360 @@
+# -*- coding: utf-8 -*-
+# Description: monit netdata python.d module
+# Author: Evgeniy K. (n0guest)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+from collections import namedtuple
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MonitType = namedtuple('MonitType', ('index', 'name'))
+
+# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
+# typedef enum {
+# Service_Filesystem = 0,
+# Service_Directory,
+# Service_File,
+# Service_Process,
+# Service_Host,
+# Service_System,
+# Service_Fifo,
+# Service_Program,
+# Service_Net,
+# Service_Last = Service_Net
+# } __attribute__((__packed__)) Service_Type;
+
+TYPE_FILESYSTEM = MonitType(0, 'filesystem')
+TYPE_DIRECTORY = MonitType(1, 'directory')
+TYPE_FILE = MonitType(2, 'file')
+TYPE_PROCESS = MonitType(3, 'process')
+TYPE_HOST = MonitType(4, 'host')
+TYPE_SYSTEM = MonitType(5, 'system')
+TYPE_FIFO = MonitType(6, 'fifo')
+TYPE_PROGRAM = MonitType(7, 'program')
+TYPE_NET = MonitType(8, 'net')
+
+TYPES = (
+ TYPE_FILESYSTEM,
+ TYPE_DIRECTORY,
+ TYPE_FILE,
+ TYPE_PROCESS,
+ TYPE_HOST,
+ TYPE_SYSTEM,
+ TYPE_FIFO,
+ TYPE_PROGRAM,
+ TYPE_NET,
+)
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'filesystem',
+ 'directory',
+ 'file',
+ 'process',
+ 'process_uptime',
+ 'process_threads',
+ 'process_children',
+ 'host',
+ 'host_latency',
+ 'system',
+ 'fifo',
+ 'program',
+ 'net'
+]
+
+CHARTS = {
+ 'filesystem': {
+ 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
+ 'lines': []
+ },
+ 'directory': {
+ 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
+ 'lines': []
+ },
+ 'file': {
+ 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
+ 'lines': []
+ },
+ 'fifo': {
+ 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
+ 'lines': []
+ },
+ 'program': {
+ 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
+ 'lines': []
+ },
+ 'process': {
+ 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
+ 'lines': []
+ },
+ 'process_uptime': {
+ 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
+ 'monit.process_uptime', 'line', 'hidden'],
+ 'lines': []
+ },
+ 'process_threads': {
+ 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
+ 'monit.process_threads', 'line'],
+ 'lines': []
+ },
+ 'process_children': {
+ 'options': ['processes childrens', 'Child processes', 'childrens', 'applications',
+ 'monit.process_childrens', 'line'],
+ 'lines': []
+ },
+ 'host': {
+ 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
+ 'lines': []
+ },
+ 'host_latency': {
+ 'options': ['hosts latency', 'Hosts latency', 'milliseconds', 'network', 'monit.host_latency', 'line'],
+ 'lines': []
+ },
+ 'net': {
+ 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
+ 'monit.networks', 'line'],
+ 'lines': []
+ },
+}
+
+
+class BaseMonitService(object):
+ def __init__(self, typ, name, status, monitor):
+ self.type = typ
+ self.name = name
+ self.status = status
+ self.monitor = monitor
+
+ def __repr__(self):
+ return 'MonitService({0}:{1})'.format(self.type.name, self.name)
+
+ def __eq__(self, other):
+ if not isinstance(other, BaseMonitService):
+ return False
+ return self.type == other.type and self.name == other.name
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def is_running(self):
+ return self.status == '0' and self.monitor == '1'
+
+ def key(self):
+ return '{0}_{1}'.format(self.type.name, self.name)
+
+ def data(self):
+ return {self.key(): int(self.is_running())}
+
+
+class ProcessMonitService(BaseMonitService):
+ def __init__(self, typ, name, status, monitor):
+ super(ProcessMonitService, self).__init__(typ, name, status, monitor)
+ self.uptime = None
+ self.threads = None
+ self.children = None
+
+ def __eq__(self, other):
+ return super(ProcessMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(ProcessMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(ProcessMonitService, self).__hash__()
+
+ def uptime_key(self):
+ return 'process_uptime_{0}'.format(self.name)
+
+ def threads_key(self):
+ return 'process_threads_{0}'.format(self.name)
+
+ def children_key(self):
+ return 'process_children_{0}'.format(self.name)
+
+ def data(self):
+ base_data = super(ProcessMonitService, self).data()
+ # skipping bugged metrics with negative uptime (monit before v5.16)
+ uptime = self.uptime if self.uptime and int(self.uptime) >= 0 else None
+ data = {
+ self.uptime_key(): uptime,
+ self.threads_key(): self.threads,
+ self.children_key(): self.children,
+ }
+ data.update(base_data)
+
+ return data
+
+
+class HostMonitService(BaseMonitService):
+ def __init__(self, typ, name, status, monitor):
+ super(HostMonitService, self).__init__(typ, name, status, monitor)
+ self.latency = None
+
+ def __eq__(self, other):
+ return super(HostMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(HostMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(HostMonitService, self).__hash__()
+
+ def latency_key(self):
+ return 'host_latency_{0}'.format(self.name)
+
+ def data(self):
+ base_data = super(HostMonitService, self).data()
+ latency = float(self.latency) * 1000000 if self.latency else None
+ data = {self.latency_key(): latency}
+ data.update(base_data)
+
+ return data
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ base_url = self.configuration.get('url', "http://localhost:2812")
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
+ self.active_services = list()
+
+ def parse(self, raw):
+ try:
+ root = ET.fromstring(raw)
+ except ET.ParseError:
+ self.error("URL {0} didn't return a valid XML page. Please check your settings.".format(self.url))
+ return None
+ return root
+
+ def _get_data(self):
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ root = self.parse(raw)
+ if root is None:
+ return None
+
+ services = self.get_services(root)
+ if not services:
+ return None
+
+ if len(self.charts) > 0:
+ self.update_charts(services)
+
+ data = dict()
+
+ for svc in services:
+ data.update(svc.data())
+
+ return data
+
+ def get_services(self, root):
+ services = list()
+
+ for typ in TYPES:
+ if typ == TYPE_SYSTEM:
+ self.debug("skipping service from '{0}' category, it's useless in graphs".format(TYPE_SYSTEM.name))
+ continue
+
+ xpath_query = "./service[@type='{0}']".format(typ.index)
+ self.debug('Searching for {0} as {1}'.format(typ.name, xpath_query))
+
+ for svc_root in root.findall(xpath_query):
+ svc = create_service(svc_root, typ)
+ self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(
+ svc.name, svc.type.name, svc.status, svc.monitor))
+
+ services.append(svc)
+
+ return services
+
+ def update_charts(self, services):
+ remove = [svc for svc in self.active_services if svc not in services]
+ add = [svc for svc in services if svc not in self.active_services]
+
+ self.remove_services_from_charts(remove)
+ self.add_services_to_charts(add)
+
+ self.active_services = services
+
+ def add_services_to_charts(self, services):
+ for svc in services:
+ if svc.type == TYPE_HOST:
+ self.charts['host_latency'].add_dimension([svc.latency_key(), svc.name, 'absolute', 1000, 1000000])
+ if svc.type == TYPE_PROCESS:
+ self.charts['process_uptime'].add_dimension([svc.uptime_key(), svc.name])
+ self.charts['process_threads'].add_dimension([svc.threads_key(), svc.name])
+ self.charts['process_children'].add_dimension([svc.children_key(), svc.name])
+ self.charts[svc.type.name].add_dimension([svc.key(), svc.name])
+
+ def remove_services_from_charts(self, services):
+ for svc in services:
+ if svc.type == TYPE_HOST:
+ self.charts['host_latency'].del_dimension(svc.latency_key(), False)
+ if svc.type == TYPE_PROCESS:
+ self.charts['process_uptime'].del_dimension(svc.uptime_key(), False)
+ self.charts['process_threads'].del_dimension(svc.threads_key(), False)
+ self.charts['process_children'].del_dimension(svc.children_key(), False)
+ self.charts[svc.type.name].del_dimension(svc.key(), False)
+
+
+def create_service(root, typ):
+ if typ == TYPE_HOST:
+ return create_host_service(root)
+ elif typ == TYPE_PROCESS:
+ return create_process_service(root)
+ return create_base_service(root, typ)
+
+
+def create_host_service(root):
+ svc = HostMonitService(
+ TYPE_HOST,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
+
+ latency = root.find('./icmp/responsetime')
+ if latency is not None:
+ svc.latency = latency.text
+
+ return svc
+
+
+def create_process_service(root):
+ svc = ProcessMonitService(
+ TYPE_PROCESS,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
+
+ uptime = root.find('uptime')
+ if uptime is not None:
+ svc.uptime = uptime.text
+
+ threads = root.find('threads')
+ if threads is not None:
+ svc.threads = threads.text
+
+ children = root.find('children')
+ if children is not None:
+ svc.children = children.text
+
+ return svc
+
+
+def create_base_service(root, typ):
+ return BaseMonitService(
+ typ,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
new file mode 100644
index 0000000..9a3fb69
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -0,0 +1,86 @@
+# netdata python.d.plugin configuration for monit
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch monit's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# local:
+# name : 'Local Monit'
+# url : 'http://localhost:2812'
+#
+# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the monit section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc
new file mode 100644
index 0000000..03e8b65
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mysql/mysql.chart.py
+dist_pythonconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
new file mode 100644
index 0000000..d8d3c1d
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -0,0 +1,396 @@
+<!--
+title: "MySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mysql/README.md
+sidebar_label: "MySQL"
+-->
+
+# MySQL monitoring with Netdata
+
+Monitors one or more MySQL servers.
+
+## Requirements
+
+- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+- `netdata` local user to connect to the MySQL server.
+
+To create the `netdata` user, execute the following in the MySQL shell:
+
+```sh
+create user 'netdata'@'localhost';
+grant usage on *.* to 'netdata'@'localhost';
+flush privileges;
+```
+The `netdata` user will have the ability to connect to the MySQL server on `localhost` without a password.
+It will only be able to gather MySQL statistics without being able to alter or affect MySQL operations in any way.
+
+This module will produce following charts (if data is available):
+
+1. **Bandwidth** in kilobits/s
+
+ - in
+ - out
+
+2. **Queries** in queries/sec
+
+ - queries
+ - questions
+ - slow queries
+
+3. **Queries By Type** in queries/s
+
+ - select
+ - delete
+ - update
+ - insert
+ - cache hits
+ - replace
+
+4. **Handlers** in handlers/s
+
+ - commit
+ - delete
+ - prepare
+ - read first
+ - read key
+ - read next
+ - read prev
+ - read rnd
+ - read rnd next
+ - rollback
+ - savepoint
+ - savepoint rollback
+ - update
+ - write
+
+5. **Table Locks** in locks/s
+
+ - immediate
+ - waited
+
+6. **Table Select Join Issues** in joins/s
+
+ - full join
+ - full range join
+ - range
+ - range check
+ - scan
+
+7. **Table Sort Issues** in joins/s
+
+ - merge passes
+ - range
+ - scan
+
+8. **Tmp Operations** in created/s
+
+ - disk tables
+ - files
+ - tables
+
+9. **Connections** in connections/s
+
+ - all
+ - aborted
+
+10. **Connections Active** in connections/s
+
+ - active
+ - limit
+ - max active
+
+11. **Binlog Cache** in threads
+
+ - disk
+ - all
+
+12. **Threads** in transactions/s
+
+ - connected
+ - cached
+ - running
+
+13. **Threads Creation Rate** in threads/s
+
+ - created
+
+14. **Threads Cache Misses** in misses
+
+ - misses
+
+15. **InnoDB I/O Bandwidth** in KiB/s
+
+ - read
+ - write
+
+16. **InnoDB I/O Operations** in operations/s
+
+ - reads
+ - writes
+ - fsyncs
+
+17. **InnoDB Pending I/O Operations** in operations/s
+
+ - reads
+ - writes
+ - fsyncs
+
+18. **InnoDB Log Operations** in operations/s
+
+ - waits
+ - write requests
+ - writes
+
+19. **InnoDB OS Log Pending Operations** in operations
+
+ - fsyncs
+ - writes
+
+20. **InnoDB OS Log Operations** in operations/s
+
+ - fsyncs
+
+21. **InnoDB OS Log Bandwidth** in KiB/s
+
+ - write
+
+22. **InnoDB Current Row Locks** in operations
+
+ - current waits
+
+23. **InnoDB Row Operations** in operations/s
+
+ - inserted
+ - read
+ - updated
+ - deleted
+
+24. **InnoDB Buffer Pool Pages** in pages
+
+ - data
+ - dirty
+ - free
+ - misc
+ - total
+
+25. **InnoDB Buffer Pool Flush Pages Requests** in requests/s
+
+ - flush pages
+
+26. **InnoDB Buffer Pool Bytes** in MiB
+
+ - data
+ - dirty
+
+27. **InnoDB Buffer Pool Operations** in operations/s
+
+ - disk reads
+ - wait free
+
+28. **QCache Operations** in queries/s
+
+ - hits
+ - lowmem prunes
+ - inserts
+ - no caches
+
+29. **QCache Queries in Cache** in queries
+
+ - queries
+
+30. **QCache Free Memory** in MiB
+
+ - free
+
+31. **QCache Memory Blocks** in blocks
+
+ - free
+ - total
+
+32. **MyISAM Key Cache Blocks** in blocks
+
+ - unused
+ - used
+ - not flushed
+
+33. **MyISAM Key Cache Requests** in requests/s
+
+ - reads
+ - writes
+
+34. **MyISAM Key Cache Requests** in requests/s
+
+ - reads
+ - writes
+
+35. **MyISAM Key Cache Disk Operations** in operations/s
+
+ - reads
+ - writes
+
+36. **Open Files** in files
+
+ - files
+
+37. **Opened Files Rate** in files/s
+
+ - files
+
+38. **Binlog Statement Cache** in statements/s
+
+ - disk
+ - all
+
+39. **Connection Errors** in errors/s
+
+ - accept
+ - internal
+ - max
+ - peer addr
+ - select
+ - tcpwrap
+
+40. **Slave Behind Seconds** in seconds
+
+ - time
+
+41. **I/O / SQL Thread Running State** in bool
+
+ - sql
+ - io
+
+42. **Galera Replicated Writesets** in writesets/s
+
+ - rx
+ - tx
+
+43. **Galera Replicated Bytes** in KiB/s
+
+ - rx
+ - tx
+
+44. **Galera Queue** in writesets
+
+ - rx
+ - tx
+
+45. **Galera Replication Conflicts** in transactions
+
+ - bf aborts
+ - cert fails
+
+46. **Galera Flow Control** in ms
+
+ - paused
+
+47. **Galera Cluster Status** in status
+
+ - status
+
+48. **Galera Cluster State** in state
+
+ - state
+
+49. **Galera Number of Nodes in the Cluster** in num
+
+ - nodes
+
+50. **Galera Total Weight of the Current Members in the Cluster** in weight
+
+ - weight
+
+51. **Galera Whether the Node is Connected to the Cluster** in boolean
+
+ - connected
+
+52. **Galera Whether the Node is Ready to Accept Queries** in boolean
+
+ - ready
+
+53. **Galera Open Transactions** in num
+
+ - open transactions
+
+54. **Galera Total Number of WSRep (applier/rollbacker) Threads** in num
+
+ - threads
+
+55. **Users CPU time** in percentage
+
+ - users
+
+**Per user statistics:**
+
+1. **Rows Operations** in operations/s
+
+ - read
+ - send
+ - updated
+ - inserted
+ - deleted
+
+2. **Commands** in commands/s
+
+ - select
+ - update
+ - other
+
+## Configuration
+
+Edit the `python.d/mysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mysql.conf
+```
+
+You can provide, per server, the following:
+
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+7. ssl connection parameters
+
+ - key: the path name of the client private key file.
+ - cert: the path name of the client public key certificate file.
+ - ca: the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the
+ same certificate used by the server.
+ - capath: the path name of the directory that contains trusted SSL CA certificate files.
+ - cipher: the list of permitted ciphers for SSL encryption.
+
+Here is an example for 3 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ 'my.cnf' : '/etc/mysql/my.cnf'
+ priority : 90000
+
+local_2:
+ user : 'root'
+ pass : 'blablablabla'
+ socket : '/var/run/mysqld/mysqld.sock'
+ update_every : 1
+
+remote:
+ user : 'admin'
+ pass : 'bla'
+ host : 'example.org'
+ port : 9000
+```
+
+If no configuration is given, the module will attempt to connect to MySQL server via a unix socket at
+`/var/run/mysqld/mysqld.sock` without password and with username `root` or `netdata` (you granted permissions for `netdata` user in the Requirements section of this document).
+
+`userstats` graph works only if you enable the plugin in MariaDB server and set proper MySQL privileges (SUPER or
+PROCESS). For more details, please check the [MariaDB User Statistics
+page](https://mariadb.com/kb/en/library/user-statistics/)
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
new file mode 100644
index 0000000..1737e16
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -0,0 +1,976 @@
+# -*- coding: utf-8 -*-
+# Description: MySQL netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+# query executed on MySQL server
+QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
+QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
+QUERY_USER_STATISTICS = 'SHOW USER_STATISTICS;'
+
+GLOBAL_STATS = [
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Max_used_connections',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Innodb_deadlocks',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap',
+ 'Com_delete',
+ 'Com_insert',
+ 'Com_select',
+ 'Com_update',
+ 'Com_replace'
+]
+
+GALERA_STATS = [
+ 'wsrep_local_recv_queue',
+ 'wsrep_local_send_queue',
+ 'wsrep_received',
+ 'wsrep_replicated',
+ 'wsrep_received_bytes',
+ 'wsrep_replicated_bytes',
+ 'wsrep_local_bf_aborts',
+ 'wsrep_local_cert_failures',
+ 'wsrep_flow_control_paused_ns',
+ 'wsrep_cluster_weight',
+ 'wsrep_cluster_size',
+ 'wsrep_cluster_status',
+ 'wsrep_local_state',
+ 'wsrep_open_transactions',
+ 'wsrep_connected',
+ 'wsrep_ready',
+ 'wsrep_thread_count'
+]
+
+
+def slave_seconds(value):
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return -1
+
+
+def slave_running(value):
+ return 1 if value == 'Yes' else -1
+
+
+SLAVE_STATS = [
+ ('Seconds_Behind_Master', slave_seconds),
+ ('Slave_SQL_Running', slave_running),
+ ('Slave_IO_Running', slave_running)
+]
+
+USER_STATISTICS = [
+ 'Select_commands',
+ 'Update_commands',
+ 'Other_commands',
+ 'Cpu_time',
+ 'Rows_read',
+ 'Rows_sent',
+ 'Rows_deleted',
+ 'Rows_inserted',
+ 'Rows_updated'
+]
+
+VARIABLES = [
+ 'max_connections'
+]
+
+ORDER = [
+ 'net',
+ 'queries',
+ 'queries_type',
+ 'handlers',
+ 'table_locks',
+ 'join_issues',
+ 'sort_issues',
+ 'tmp',
+ 'connections',
+ 'connections_active',
+ 'connection_errors',
+ 'binlog_cache',
+ 'binlog_stmt_cache',
+ 'threads',
+ 'threads_creation_rate',
+ 'thread_cache_misses',
+ 'innodb_io',
+ 'innodb_io_ops',
+ 'innodb_io_pending_ops',
+ 'innodb_log',
+ 'innodb_os_log',
+ 'innodb_os_log_fsync_writes',
+ 'innodb_os_log_io',
+ 'innodb_cur_row_lock',
+ 'innodb_deadlocks',
+ 'innodb_rows',
+ 'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_flush_pages_requests',
+ 'innodb_buffer_pool_bytes',
+ 'innodb_buffer_pool_read_ahead',
+ 'innodb_buffer_pool_reqs',
+ 'innodb_buffer_pool_ops',
+ 'qcache_ops',
+ 'qcache',
+ 'qcache_freemem',
+ 'qcache_memblocks',
+ 'key_blocks',
+ 'key_requests',
+ 'key_disk_ops',
+ 'files',
+ 'files_rate',
+ 'slave_behind',
+ 'slave_status',
+ 'galera_writesets',
+ 'galera_bytes',
+ 'galera_queue',
+ 'galera_conflicts',
+ 'galera_flow_control',
+ 'galera_cluster_status',
+ 'galera_cluster_state',
+ 'galera_cluster_size',
+ 'galera_cluster_weight',
+ 'galera_connected',
+ 'galera_ready',
+ 'galera_open_transactions',
+ 'galera_thread_count',
+ 'userstats_cpu',
+]
+
+CHARTS = {
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
+ 'lines': [
+ ['Bytes_received', 'in', 'incremental', 8, 1000],
+ ['Bytes_sent', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'queries': {
+ 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
+ 'lines': [
+ ['Queries', 'queries', 'incremental'],
+ ['Questions', 'questions', 'incremental'],
+ ['Slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'queries_type': {
+ 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'lines': [
+ ['Com_select', 'select', 'incremental'],
+ ['Com_delete', 'delete', 'incremental'],
+ ['Com_update', 'update', 'incremental'],
+ ['Com_insert', 'insert', 'incremental'],
+ ['Qcache_hits', 'cache_hits', 'incremental'],
+ ['Com_replace', 'replace', 'incremental']
+ ]
+ },
+ 'handlers': {
+ 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
+ 'lines': [
+ ['Handler_commit', 'commit', 'incremental'],
+ ['Handler_delete', 'delete', 'incremental'],
+ ['Handler_prepare', 'prepare', 'incremental'],
+ ['Handler_read_first', 'read_first', 'incremental'],
+ ['Handler_read_key', 'read_key', 'incremental'],
+ ['Handler_read_next', 'read_next', 'incremental'],
+ ['Handler_read_prev', 'read_prev', 'incremental'],
+ ['Handler_read_rnd', 'read_rnd', 'incremental'],
+ ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
+ ['Handler_rollback', 'rollback', 'incremental'],
+ ['Handler_savepoint', 'savepoint', 'incremental'],
+ ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
+ ['Handler_update', 'update', 'incremental'],
+ ['Handler_write', 'write', 'incremental']
+ ]
+ },
+ 'table_locks': {
+ 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
+ 'lines': [
+ ['Table_locks_immediate', 'immediate', 'incremental'],
+ ['Table_locks_waited', 'waited', 'incremental', -1, 1]
+ ]
+ },
+ 'join_issues': {
+ 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
+ 'lines': [
+ ['Select_full_join', 'full_join', 'incremental'],
+ ['Select_full_range_join', 'full_range_join', 'incremental'],
+ ['Select_range', 'range', 'incremental'],
+ ['Select_range_check', 'range_check', 'incremental'],
+ ['Select_scan', 'scan', 'incremental']
+ ]
+ },
+ 'sort_issues': {
+ 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
+ 'lines': [
+ ['Sort_merge_passes', 'merge_passes', 'incremental'],
+ ['Sort_range', 'range', 'incremental'],
+ ['Sort_scan', 'scan', 'incremental']
+ ]
+ },
+ 'tmp': {
+ 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
+ 'lines': [
+ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
+ ['Created_tmp_files', 'files', 'incremental'],
+ ['Created_tmp_tables', 'tables', 'incremental']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
+ 'lines': [
+ ['Connections', 'all', 'incremental'],
+ ['Aborted_connects', 'aborted', 'incremental']
+ ]
+ },
+ 'connections_active': {
+ 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'lines': [
+ ['Threads_connected', 'active', 'absolute'],
+ ['max_connections', 'limit', 'absolute'],
+ ['Max_used_connections', 'max_active', 'absolute']
+ ]
+ },
+ 'binlog_cache': {
+ 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
+ 'lines': [
+ ['Binlog_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_connected', 'connected', 'absolute'],
+ ['Threads_cached', 'cached', 'absolute', -1, 1],
+ ['Threads_running', 'running', 'absolute'],
+ ]
+ },
+ 'threads_creation_rate': {
+ 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
+ 'lines': [
+ ['Threads_created', 'created', 'incremental'],
+ ]
+ },
+ 'thread_cache_misses': {
+ 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
+ 'lines': [
+ ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
+ ]
+ },
+ 'innodb_io': {
+ 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
+ 'lines': [
+ ['Innodb_data_read', 'read', 'incremental', 1, 1024],
+ ['Innodb_data_written', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'innodb_io_ops': {
+ 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_reads', 'reads', 'incremental'],
+ ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
+ ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
+ ]
+ },
+ 'innodb_io_pending_ops': {
+ 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb',
+ 'mysql.innodb_io_pending_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_pending_reads', 'reads', 'absolute'],
+ ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
+ ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
+ ]
+ },
+ 'innodb_log': {
+ 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
+ 'lines': [
+ ['Innodb_log_waits', 'waits', 'incremental'],
+ ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
+ ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_os_log': {
+ 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1],
+ ]
+ },
+ 'innodb_os_log_fsync_writes': {
+ 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
+ ]
+ },
+ 'innodb_os_log_io': {
+ 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
+ 'lines': [
+ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
+ ]
+ },
+ 'innodb_cur_row_lock': {
+ 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb',
+ 'mysql.innodb_cur_row_lock', 'area'],
+ 'lines': [
+ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
+ ]
+ },
+ 'innodb_deadlocks': {
+ 'options': [None, 'InnoDB Deadlocks', 'operations/s', 'innodb',
+ 'mysql.innodb_deadlocks', 'area'],
+ 'lines': [
+ ['Innodb_deadlocks', 'deadlocks', 'incremental']
+ ]
+ },
+ 'innodb_rows': {
+ 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
+ 'lines': [
+ ['Innodb_rows_inserted', 'inserted', 'incremental'],
+ ['Innodb_rows_read', 'read', 'incremental', 1, 1],
+ ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_buffer_pool_pages': {
+ 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
+ ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
+ ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
+ ]
+ },
+ 'innodb_buffer_pool_flush_pages_requests': {
+ 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'],
+ ]
+ },
+ 'innodb_buffer_pool_bytes': {
+ 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
+ ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
+ ]
+ },
+ 'innodb_buffer_pool_read_ahead': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_read_ahead', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
+ ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
+ ]
+ },
+ 'innodb_buffer_pool_reqs': {
+ 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_reqs', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
+ ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'innodb_buffer_pool_ops': {
+ 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_ops', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
+ ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache_ops': {
+ 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
+ 'lines': [
+ ['Qcache_hits', 'hits', 'incremental'],
+ ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
+ ['Qcache_inserts', 'inserts', 'incremental'],
+ ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache': {
+ 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
+ 'lines': [
+ ['Qcache_queries_in_cache', 'queries', 'absolute']
+ ]
+ },
+ 'qcache_freemem': {
+ 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
+ 'lines': [
+ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'qcache_memblocks': {
+ 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
+ 'lines': [
+ ['Qcache_free_blocks', 'free', 'absolute'],
+ ['Qcache_total_blocks', 'total', 'absolute']
+ ]
+ },
+ 'key_blocks': {
+ 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
+ 'lines': [
+ ['Key_blocks_unused', 'unused', 'absolute'],
+ ['Key_blocks_used', 'used', 'absolute', -1, 1],
+ ['Key_blocks_not_flushed', 'not flushed', 'absolute']
+ ]
+ },
+ 'key_requests': {
+ 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
+ 'lines': [
+ ['Key_read_requests', 'reads', 'incremental'],
+ ['Key_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'key_disk_ops': {
+ 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s',
+ 'myisam', 'mysql.key_disk_ops', 'area'],
+ 'lines': [
+ ['Key_reads', 'reads', 'incremental'],
+ ['Key_writes', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'files': {
+ 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'],
+ 'lines': [
+ ['Open_files', 'files', 'absolute']
+ ]
+ },
+ 'files_rate': {
+ 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
+ 'lines': [
+ ['Opened_files', 'files', 'incremental']
+ ]
+ },
+ 'binlog_stmt_cache': {
+ 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog',
+ 'mysql.binlog_stmt_cache', 'line'],
+ 'lines': [
+ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_stmt_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'connection_errors': {
+ 'options': [None, 'Connection Errors', 'connections/s', 'connections',
+ 'mysql.connection_errors', 'line'],
+ 'lines': [
+ ['Connection_errors_accept', 'accept', 'incremental'],
+ ['Connection_errors_internal', 'internal', 'incremental'],
+ ['Connection_errors_max_connections', 'max', 'incremental'],
+ ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
+ ['Connection_errors_select', 'select', 'incremental'],
+ ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
+ ]
+ },
+ 'slave_behind': {
+ 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ['Seconds_Behind_Master', 'seconds', 'absolute']
+ ]
+ },
+ 'slave_status': {
+ 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ['Slave_SQL_Running', 'sql_running', 'absolute'],
+ ['Slave_IO_Running', 'io_running', 'absolute']
+ ]
+ },
+ 'galera_writesets': {
+ 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
+ 'lines': [
+ ['wsrep_received', 'rx', 'incremental'],
+ ['wsrep_replicated', 'tx', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_bytes': {
+ 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
+ 'lines': [
+ ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
+ ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
+ ]
+ },
+ 'galera_queue': {
+ 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
+ 'lines': [
+ ['wsrep_local_recv_queue', 'rx', 'absolute'],
+ ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
+ ]
+ },
+ 'galera_conflicts': {
+ 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
+ 'lines': [
+ ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
+ ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_flow_control': {
+ 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
+ 'lines': [
+ ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
+ ]
+ },
+ 'galera_cluster_status': {
+ 'options': [None, 'Cluster Component Status', 'status', 'galera', 'mysql.galera_cluster_status', 'line'],
+ 'lines': [
+ ['wsrep_cluster_status', 'status', 'absolute'],
+ ]
+ },
+ 'galera_cluster_state': {
+ 'options': [None, 'Cluster Component State', 'state', 'galera', 'mysql.galera_cluster_state', 'line'],
+ 'lines': [
+ ['wsrep_local_state', 'state', 'absolute'],
+ ]
+ },
+ 'galera_cluster_size': {
+ 'options': [None, 'Number of Nodes in the Cluster', 'num', 'galera', 'mysql.galera_cluster_size', 'line'],
+ 'lines': [
+ ['wsrep_cluster_size', 'nodes', 'absolute'],
+ ]
+ },
+ 'galera_cluster_weight': {
+ 'options': [None, 'The Total Weight of the Current Members in the Cluster', 'weight', 'galera',
+ 'mysql.galera_cluster_weight', 'line'],
+ 'lines': [
+ ['wsrep_cluster_weight', 'weight', 'absolute'],
+ ]
+ },
+ 'galera_connected': {
+ 'options': [None, 'Whether the Node is Connected to the Cluster', 'boolean', 'galera',
+ 'mysql.galera_connected', 'line'],
+ 'lines': [
+ ['wsrep_connected', 'connected', 'absolute'],
+ ]
+ },
+ 'galera_ready': {
+ 'options': [None, 'Whether the Node is Ready to Accept Queries', 'boolean', 'galera',
+ 'mysql.galera_ready', 'line'],
+ 'lines': [
+ ['wsrep_ready', 'ready', 'absolute'],
+ ]
+ },
+ 'galera_open_transactions': {
+ 'options': [None, 'Open Transactions', 'num', 'galera', 'mysql.galera_open_transactions', 'line'],
+ 'lines': [
+ ['wsrep_open_transactions', 'open transactions', 'absolute'],
+ ]
+ },
+ 'galera_thread_count': {
+ 'options': [None, 'Total Number of WSRep (applier/rollbacker) Threads', 'num', 'galera',
+ 'mysql.galera_thread_count', 'line'],
+ 'lines': [
+ ['wsrep_thread_count', 'threads', 'absolute'],
+ ]
+ },
+ 'userstats_cpu': {
+ 'options': [None, 'Users CPU time', 'percentage', 'userstats', 'mysql.userstats_cpu', 'stacked'],
+ 'lines': []
+ }
+}
+
+
+def slave_status_chart_template(channel_name):
+ order = [
+ 'slave_behind_{0}'.format(channel_name),
+ 'slave_status_{0}'.format(channel_name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Slave Behind Seconds Channel {0}'.format(channel_name),
+ 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ['Seconds_Behind_Master_{0}'.format(channel_name), 'seconds', 'absolute']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Slave Status Channel {0}'.format(channel_name),
+ 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ['Slave_SQL_Running_{0}'.format(channel_name), 'sql_running', 'absolute'],
+ ['Slave_IO_Running_{0}'.format(channel_name), 'io_running', 'absolute']
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def userstats_chart_template(name):
+ order = [
+ 'userstats_rows_{0}'.format(name),
+ 'userstats_commands_{0}'.format(name)
+ ]
+ family = 'userstats {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Rows Operations', 'operations/s', family, 'mysql.userstats_rows', 'stacked'],
+ 'lines': [
+ ['userstats_{0}_Rows_read'.format(name), 'read', 'incremental'],
+ ['userstats_{0}_Rows_send'.format(name), 'send', 'incremental'],
+ ['userstats_{0}_Rows_updated'.format(name), 'updated', 'incremental'],
+ ['userstats_{0}_Rows_inserted'.format(name), 'inserted', 'incremental'],
+ ['userstats_{0}_Rows_deleted'.format(name), 'deleted', 'incremental']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Commands', 'commands/s', family, 'mysql.userstats_commands', 'stacked'],
+ 'lines': [
+ ['userstats_{0}_Select_commands'.format(name), 'select', 'incremental'],
+ ['userstats_{0}_Update_commands'.format(name), 'update', 'incremental'],
+ ['userstats_{0}_Other_commands'.format(name), 'other', 'incremental']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+# https://dev.mysql.com/doc/refman/8.0/en/replication-channels.html
+DEFAULT_REPL_CHANNEL = ''
+
+
+# Write Set REPlication
+# https://galeracluster.com/library/documentation/galera-status-variables.html
+# https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html
+class WSRepDataConverter:
+ unknown_value = -1
+
+ def convert(self, key, value):
+ if key == 'wsrep_connected':
+ return self.convert_connected(value)
+ elif key == 'wsrep_ready':
+ return self.convert_ready(value)
+ elif key == 'wsrep_cluster_status':
+ return self.convert_cluster_status(value)
+ return value
+
+ def convert_connected(self, value):
+ # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_connected
+ if value == 'OFF':
+ return 0
+ if value == 'ON':
+ return 1
+ return self.unknown_value
+
+ def convert_ready(self, value):
+ # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_ready
+ if value == 'OFF':
+ return 0
+ if value == 'ON':
+ return 1
+ return self.unknown_value
+
+ def convert_cluster_status(self, value):
+ # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_cluster_status
+ # https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h
+ # typedef enum wsrep_view_status {
+ # WSREP_VIEW_PRIMARY, //!< primary group configuration (quorum present)
+ # WSREP_VIEW_NON_PRIMARY, //!< non-primary group configuration (quorum lost)
+ # WSREP_VIEW_DISCONNECTED, //!< not connected to group, retrying.
+ # WSREP_VIEW_MAX
+ # } wsrep_view_status_t;
+ value = value.lower()
+ if value == 'primary':
+ return 0
+ elif value == 'non-primary':
+ return 1
+ elif value == 'disconnected':
+ return 2
+ return self.unknown_value
+
+
+wsrep_converter = WSRepDataConverter()
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ slave_status=QUERY_SLAVE,
+ variables=QUERY_VARIABLES,
+ user_statistics=QUERY_USER_STATISTICS,
+ )
+ self.repl_channels = [DEFAULT_REPL_CHANNEL]
+
+ def _get_data(self):
+
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ data = dict()
+
+ if 'global_status' in raw_data:
+ global_status = self.get_global_status(raw_data['global_status'])
+ if global_status:
+ data.update(global_status)
+
+ if 'slave_status' in raw_data:
+ status = self.get_slave_status(raw_data['slave_status'])
+ if status:
+ data.update(status)
+
+ if 'user_statistics' in raw_data:
+ if raw_data['user_statistics'][0]:
+ data.update(self.get_userstats(raw_data))
+ else:
+ self.queries.pop('user_statistics')
+
+ if 'variables' in raw_data:
+ variables = dict(raw_data['variables'][0])
+ for key in VARIABLES:
+ if key in variables:
+ data[key] = variables[key]
+
+ return data or None
+
+ @staticmethod
+ def convert_wsrep(key, value):
+ return wsrep_converter.convert(key, value)
+
+ def get_global_status(self, raw_global_status):
+ # (
+ # (
+ # ('Aborted_clients', '18'),
+ # ('Aborted_connects', '33'),
+ # ('Access_denied_errors', '80'),
+ # ('Acl_column_grants', '0'),
+ # ('Acl_database_grants', '0'),
+ # ('Acl_function_grants', '0'),
+ # ('wsrep_ready', 'OFF'),
+ # ('wsrep_rollbacker_thread_count', '0'),
+ # ('wsrep_thread_count', '0')
+ # ),
+ # (
+ # ('Variable_name', 253, 60, 64, 64, 0, 0),
+ # ('Value', 253, 48, 2048, 2048, 0, 0),
+ # )
+ # )
+ rows = raw_global_status[0]
+ if not rows:
+ return
+
+ global_status = dict(rows)
+ data = dict()
+
+ for key in GLOBAL_STATS:
+ if key not in global_status:
+ continue
+ value = global_status[key]
+ data[key] = value
+
+ for key in GALERA_STATS:
+ if key not in global_status:
+ continue
+ value = global_status[key]
+ value = self.convert_wsrep(key, value)
+ data[key] = value
+
+ if 'Threads_created' in data and 'Connections' in data:
+ data['Thread_cache_misses'] = round(int(data['Threads_created']) / float(data['Connections']) * 10000)
+ return data
+
+ def get_slave_status(self, slave_status_data):
+ rows, description = slave_status_data[0], slave_status_data[1]
+ description_keys = [v[0] for v in description]
+ if not rows:
+ return
+
+ data = dict()
+ for row in rows:
+ slave_data = dict(zip(description_keys, row))
+ channel_name = slave_data.get('Channel_Name', DEFAULT_REPL_CHANNEL)
+
+ if channel_name not in self.repl_channels and len(self.charts) > 0:
+ self.add_repl_channel_charts(channel_name)
+ self.repl_channels.append(channel_name)
+
+ for key, func in SLAVE_STATS:
+ if key not in slave_data:
+ continue
+
+ value = slave_data[key]
+ if channel_name:
+ key = '{0}_{1}'.format(key, channel_name)
+ data[key] = func(value)
+
+ return data
+
+ def add_repl_channel_charts(self, name):
+ self.add_new_charts(slave_status_chart_template, name)
+
+ def get_userstats(self, raw_data):
+ # (
+ # (
+ # ('netdata', 1L, 0L, 60L, 0.15842499999999984, 0.15767439999999996, 5206L, 963957L, 0L, 0L,
+ # 61L, 0L, 0L, 0L, 0L, 0L, 62L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L),
+ # ),
+ # (
+ # ('User', 253, 7, 128, 128, 0, 0),
+ # ('Total_connections', 3, 2, 11, 11, 0, 0),
+ # ('Concurrent_connections', 3, 1, 11, 11, 0, 0),
+ # ('Connected_time', 3, 2, 11, 11, 0, 0),
+ # ('Busy_time', 5, 20, 21, 21, 31, 0),
+ # ('Cpu_time', 5, 20, 21, 21, 31, 0),
+ # ('Bytes_received', 8, 4, 21, 21, 0, 0),
+ # ('Bytes_sent', 8, 6, 21, 21, 0, 0),
+ # ('Binlog_bytes_written', 8, 1, 21, 21, 0, 0),
+ # ('Rows_read', 8, 1, 21, 21, 0, 0),
+ # ('Rows_sent', 8, 2, 21, 21, 0, 0),
+ # ('Rows_deleted', 8, 1, 21, 21, 0, 0),
+ # ('Rows_inserted', 8, 1, 21, 21, 0, 0),
+ # ('Rows_updated', 8, 1, 21, 21, 0, 0),
+ # ('Select_commands', 8, 2, 21, 21, 0, 0),
+ # ('Update_commands', 8, 1, 21, 21, 0, 0),
+ # ('Other_commands', 8, 2, 21, 21, 0, 0),
+ # ('Commit_transactions', 8, 1, 21, 21, 0, 0),
+ # ('Rollback_transactions', 8, 1, 21, 21, 0, 0),
+ # ('Denied_connections', 8, 1, 21, 21, 0, 0),
+ # ('Lost_connections', 8, 1, 21, 21, 0, 0),
+ # ('Access_denied', 8, 1, 21, 21, 0, 0),
+ # ('Empty_queries', 8, 2, 21, 21, 0, 0),
+ # ('Total_ssl_connections', 8, 1, 21, 21, 0, 0),
+ # ('Max_statement_time_exceeded', 8, 1, 21, 21, 0, 0)
+ # )
+ # )
+ data = dict()
+ userstats_vars = [e[0] for e in raw_data['user_statistics'][1]]
+ for i, _ in enumerate(raw_data['user_statistics'][0]):
+ user_name = raw_data['user_statistics'][0][i][0]
+ userstats = dict(zip(userstats_vars, raw_data['user_statistics'][0][i]))
+
+ if len(self.charts) > 0:
+ if ('userstats_{0}_Cpu_time'.format(user_name)) not in self.charts['userstats_cpu']:
+ self.add_userstats_dimensions(user_name)
+ self.create_new_userstats_charts(user_name)
+
+ for key in USER_STATISTICS:
+ if key in userstats:
+ data['userstats_{0}_{1}'.format(user_name, key)] = userstats[key]
+
+ return data
+
+ def add_userstats_dimensions(self, name):
+ self.charts['userstats_cpu'].add_dimension(['userstats_{0}_Cpu_time'.format(name), name, 'incremental', 100, 1])
+
+ def create_new_userstats_charts(self, tube):
+ self.add_new_charts(userstats_chart_template, tube)
+
+ def add_new_charts(self, template, *params):
+ order, charts = template(*params)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
new file mode 100644
index 0000000..31bfe9c
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.conf
@@ -0,0 +1,293 @@
+# netdata python.d.plugin configuration for mysql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mysql also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mysql username to use
+# pass: 'password' # the mysql password to use
+#
+# ssl connection parameters
+#
+# ssl:
+# key: 'key' # the path name of the client private key file.
+# cert: 'cert' # the path name of the client public key certificate file.
+# ca: 'ca' # the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the same certificate used by the server.
+# capath: 'capath' # the path name of the directory that contains trusted SSL CA certificate files.
+# cipher: [ciphers] # the list of permitted ciphers for SSL encryption.
+
+# ----------------------------------------------------------------------
+# mySQL CONFIGURATION
+#
+# netdata does not need any privilege - only the ability to connect
+# to the mysql server (netdata will not be able to see any data).
+#
+# Execute these commands to give the local user 'netdata' the ability
+# to connect to the mysql server on localhost, without a password:
+#
+# > create user 'netdata'@'localhost';
+# > grant usage on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+# with the above statements, netdata will be able to gather mysql
+# statistics, without the ability to see or alter any data or affect
+# mysql operation in any way. No change is required below.
+#
+# If you need to monitor mysql replication too, use this instead:
+#
+# > create user 'netdata'@'localhost';
+# > grant replication client on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+mycnf1:
+ name : 'local'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+debiancnf:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/debian.cnf'
+
+socket1:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: root
+# A few systems configure mysql to accept passwordless
+# root access.
+
+mycnf1_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: netdata
+
+mycnf1_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc
new file mode 100644
index 0000000..4636aa8
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx/nginx.chart.py
+dist_pythonconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
new file mode 100644
index 0000000..b55b01e
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -0,0 +1,65 @@
+<!--
+title: "NGINX monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx/README.md
+sidebar_label: "NGINX"
+-->
+
+# NGINX monitoring with Netdata
+
+Monitors one or more NGINX servers depending on configuration. Servers can be either local or remote.
+
+## Requirements
+
+- nginx with configured 'ngx_http_stub_status_module'
+- 'location /stub_status'
+
+Example nginx configuration can be found in 'python.d/nginx.conf'
+
+It produces following charts:
+
+1. **Active Connections**
+
+ - active
+
+2. **Requests** in requests/s
+
+ - requests
+
+3. **Active Connections by Status**
+
+ - reading
+ - writing
+ - waiting
+
+4. **Connections Rate** in connections/s
+
+ - accepts
+ - handled
+
+## Configuration
+
+Edit the `python.d/nginx.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx.conf
+```
+
+Needs only `url` to server's `stub_status`.
+
+Here is an example for local server:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/stub_status'
+```
+
+Without configuration, module attempts to connect to `http://localhost/stub_status`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
new file mode 100644
index 0000000..7548d6a
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Description: nginx netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'connections',
+ 'requests',
+ 'connection_status',
+ 'connect_rate',
+]
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'Active Connections', 'connections', 'active connections',
+ 'nginx.connections', 'line'],
+ 'lines': [
+ ['active']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'connection_status': {
+ 'options': [None, 'Active Connections by Status', 'connections', 'status',
+ 'nginx.connection_status', 'line'],
+ 'lines': [
+ ['reading'],
+ ['writing'],
+ ['waiting', 'idle']
+ ]
+ },
+ 'connect_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
+ 'nginx.connect_rate', 'line'],
+ 'lines': [
+ ['accepts', 'accepted', 'incremental'],
+ ['handled', None, 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/stub_status')
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2]),
+ 'requests': int(raw[9]),
+ 'reading': int(raw[11]),
+ 'writing': int(raw[13]),
+ 'waiting': int(raw[15]),
+ 'accepts': int(raw[7]),
+ 'handled': int(raw[8])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
new file mode 100644
index 0000000..4001b4b
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.conf
@@ -0,0 +1,107 @@
+# netdata python.d.plugin configuration for nginx
+#
+# You must have ngx_http_stub_status_module configured on your nginx server for this
+# plugin to work. The following is an example config.
+# It must be located inside a server { } block.
+#
+# location /stub_status {
+# stub_status;
+# # Security: Only allow access from the IP below.
+# allow 192.168.1.200;
+# # Deny anyone else
+# deny all;
+# }
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# RemoteNginx:
+# name : 'Reverse_Proxy'
+# url : 'http://yourdomain.com/stub_status'
+#
+# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the nginx section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/stub_status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/stub_status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/stub_status'
+
diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc
new file mode 100644
index 0000000..d3fdeaf
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx_plus/nginx_plus.chart.py
+dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 0000000..2580740
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,165 @@
+<!--
+title: "NGINX Plus monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx_plus/README.md
+sidebar_label: "NGINX Plus"
+-->
+
+# NGINX Plus monitoring with Netdata
+
+Monitors one or more NGINX Plus servers depending on configuration. Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+
+ - total
+
+2. **Requests current** in requests
+
+ - current
+
+3. **Connection Statistics** in connections/s
+
+ - accepted
+ - dropped
+
+4. **Workers Statistics** in workers
+
+ - idle
+ - active
+
+5. **SSL Handshakes** in handshakes/s
+
+ - successful
+ - failed
+
+6. **SSL Session Reuses** in sessions/s
+
+ - reused
+
+7. **SSL Memory Usage** in percent
+
+ - usage
+
+8. **Processes** in processes
+
+ - respawned
+
+For every server zone:
+
+1. **Processing** in requests
+
+- processing
+
+2. **Requests** in requests/s
+
+ - requests
+
+3. **Responses** in requests/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+4. **Traffic** in kilobits/s
+
+ - received
+ - sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+
+ - peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+4. **Peers Connections** in active
+
+ - peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+
+ - peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+
+ - received
+ - sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+
+ - received
+ - sent
+
+8. **Peer Timings** in ms (for every peer)
+
+ - header
+ - response
+
+9. **Memory Usage** in percent
+
+ - usage
+
+10. **Peers Status** in state
+
+ - peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+
+ - peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+
+ - served
+ - written
+ - bypass
+
+2. **Memory Usage** in percent
+
+ - usage
+
+## Configuration
+
+Edit the `python.d/nginx_plus.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx_plus.conf
+```
+
+Needs only `url` to server's `status`.
+
+Here is an example for a local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
new file mode 100644
index 0000000..a6c035f
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-
+# Description: nginx_plus netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+from copy import deepcopy
+from json import loads
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
+
+CHARTS = {
+ 'requests_total': {
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
+ 'lines': [
+ ['requests_total', 'total', 'incremental']
+ ]
+ },
+ 'requests_current': {
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
+ 'lines': [
+ ['requests_current', 'current']
+ ]
+ },
+ 'connections_statistics': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'connections', 'nginx_plus.connections_statistics', 'stacked'],
+ 'lines': [
+ ['connections_accepted', 'accepted', 'incremental'],
+ ['connections_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'connections_workers': {
+ 'options': [None, 'Workers Statistics', 'workers',
+ 'connections', 'nginx_plus.connections_workers', 'stacked'],
+ 'lines': [
+ ['connections_idle', 'idle'],
+ ['connections_active', 'active']
+ ]
+ },
+ 'ssl_handshakes': {
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'lines': [
+ ['ssl_handshakes', 'successful', 'incremental'],
+ ['ssl_handshakes_failed', 'failed', 'incremental']
+ ]
+ },
+ 'ssl_session_reuses': {
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'lines': [
+ ['ssl_session_reuses', 'reused', 'incremental']
+ ]
+ },
+ 'ssl_memory_usage': {
+ 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'lines': [
+ ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
+ ]
+ },
+ 'processes': {
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
+ 'lines': [
+ ['processes_respawned', 'respawned']
+ ]
+ }
+}
+
+
+def cache_charts(cache):
+ family = 'cache {0}'.format(cache.real_name)
+ charts = OrderedDict()
+
+ charts['{0}_traffic'.format(cache.name)] = {
+ 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'lines': [
+ ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
+ ]
+ }
+ charts['{0}_memory_usage'.format(cache.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
+ ]
+ }
+ return charts
+
+
+def web_zone_charts(wz):
+ charts = OrderedDict()
+ family = 'web zone {name}'.format(name=wz.real_name)
+
+ # Processing
+ charts['zone_{name}_processing'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family,
+ 'nginx_plus.web_zone_processing', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'processing']), 'processing']
+ ]
+ }
+ # Requests
+ charts['zone_{name}_requests'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_requests', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'requests']), 'requests', 'incremental']
+ ]
+ }
+ # Response Codes
+ charts['zone_{name}_responses'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Traffic
+ charts['zone_{name}_net'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family,
+ 'nginx_plus.zone_net', 'area'],
+ 'lines': [
+ ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000],
+ ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ return charts
+
+
+def web_upstream_charts(wu):
+ def dimensions(value, a='absolute', m=1, d=1):
+ dims = list()
+ for p in wu:
+ dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d])
+ return dims
+
+ charts = OrderedDict()
+ family = 'web upstream {name}'.format(name=wu.real_name)
+
+ # Requests
+ charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
+ 'lines': dimensions('requests', 'incremental')
+ }
+ # Responses Codes
+ charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = {
+ 'options': [None, 'All Peers Responses', 'responses/s', family,
+ 'nginx_plus.web_upstream_all_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'],
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
+ 'nginx_plus.web_upstream_peer_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Connections
+ charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
+ 'lines': dimensions('active')
+ }
+ charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections Usage', 'percentage', family,
+ 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'lines': dimensions('connections_usage', d=100)
+ }
+ # Traffic
+ charts['web_upstream_{0}_all_net'.format(wu.name)] = {
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
+ 'lines': [
+ ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
+ ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
+ 'nginx_plus.web_upstream_peer_traffic', 'area'],
+ 'lines': [
+ ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000],
+ ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ # Response Time
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
+ 'nginx_plus.web_upstream_peer_timings', 'line'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
+ ['_'.join([wu.name, peer.server, 'response_time']), 'response']
+ ]
+ }
+ # Memory Usage
+ charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
+ ]
+ }
+ # State
+ charts['web_upstream_{name}_status'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
+ 'lines': dimensions('state')
+ }
+ # Downtime
+ charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'lines': dimensions('downtime', d=1000)
+ }
+
+ return charts
+
+
+METRICS = {
+ 'SERVER': [
+ 'processes.respawned',
+ 'connections.accepted',
+ 'connections.dropped',
+ 'connections.active',
+ 'connections.idle',
+ 'ssl.handshakes',
+ 'ssl.handshakes_failed',
+ 'ssl.session_reuses',
+ 'requests.total',
+ 'requests.current',
+ 'slabs.SSL.pages.free',
+ 'slabs.SSL.pages.used'
+ ],
+ 'WEB_ZONE': [
+ 'processing',
+ 'requests',
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'discarded',
+ 'received',
+ 'sent'
+ ],
+ 'WEB_UPSTREAM_PEER': [
+ 'id',
+ 'server',
+ 'name',
+ 'state',
+ 'active',
+ 'max_conns',
+ 'requests',
+ 'header_time', # alive only
+ 'response_time', # alive only
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received',
+ 'downtime'
+ ],
+ 'WEB_UPSTREAM_SUMMARY': [
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received'
+ ],
+ 'CACHE': [
+ 'hit.bytes', # served
+ 'miss.bytes_written', # written
+ 'miss.bytes' # bypass
+
+ ]
+}
+
+BAD_SYMBOLS = re.compile(r'[:/.-]+')
+
+
+class Cache:
+ key = 'caches'
+ charts = cache_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['caches'][self.real_name]
+ data = parse_json(zone_data, METRICS['CACHE'])
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebZone:
+ key = 'server_zones'
+ charts = web_zone_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['server_zones'][self.real_name]
+ data = parse_json(zone_data, METRICS['WEB_ZONE'])
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstream:
+ key = 'upstreams'
+ charts = web_upstream_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+ self.peers = OrderedDict()
+
+ peers = kw['response']['upstreams'][self.real_name]['peers']
+ for peer in peers:
+ self.add_peer(peer['id'], peer['server'])
+
+ def __iter__(self):
+ return iter(self.peers.values())
+
+ def add_peer(self, idx, server):
+ peer = WebUpstreamPeer(idx, server)
+ self.peers[peer.real_server] = peer
+ return peer
+
+ def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
+ data = dict()
+ for peer in self.peers.values():
+ if not peer.active:
+ continue
+ try:
+ data.update(peer.get_data(peers[peer.id]))
+ except KeyError:
+ peer.active = False
+ return data
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def summary_stats(self, data):
+ rv = defaultdict(int)
+ for metric in METRICS['WEB_UPSTREAM_SUMMARY']:
+ for peer in self.peers.values():
+ if peer.active:
+ metric = '_'.join(metric.split('.'))
+ rv[metric] += data['_'.join([peer.server, metric])]
+ return rv
+
+ def get_data(self, raw_data):
+ data = dict()
+ peers = raw_data['upstreams'][self.real_name]['peers']
+ data.update(self.peers_stats(peers))
+ data.update(self.summary_stats(data))
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstreamPeer:
+ def __init__(self, idx, server):
+ self.id = idx
+ self.real_server = server
+ self.server = BAD_SYMBOLS.sub('_', self.real_server)
+ self.active = True
+
+ def get_data(self, raw):
+ data = dict(header_time=0, response_time=0, max_conns=0)
+ data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER']))
+ data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4
+ data['state'] = int(data['state'] == 'up')
+ return dict(('_'.join([self.server, k]), v) for k, v in data.items())
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.objects = dict()
+
+ def check(self):
+ if not self.url:
+ self.error('URL is not defined')
+ return None
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ response = loads(raw_data)
+ except ValueError:
+ return None
+
+ for obj_cls in [WebZone, WebUpstream, Cache]:
+ for obj_name in response.get(obj_cls.key, list()):
+ obj = obj_cls(name=obj_name, response=response)
+ self.objects[obj.real_name] = obj
+ charts = obj_cls.charts(obj)
+ for chart in charts:
+ self.order.append(chart)
+ self.definitions[chart] = charts[chart]
+
+ return bool(self.objects)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ response = loads(raw_data)
+
+ data = parse_json(response, METRICS['SERVER'])
+ data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4
+
+ for obj in self.objects.values():
+ if obj.real_name in response[obj.key]:
+ data.update(obj.get_data(response))
+
+ return data
+
+
+def parse_json(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
new file mode 100644
index 0000000..201eb0e
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for nginx_plus
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nginx_plus also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx_plus's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/status'
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
new file mode 100644
index 0000000..58e9fd6
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nsd/nsd.chart.py
+dist_pythonconfig_DATA += nsd/nsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 100644
index 0000000..1e7b240
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1,68 @@
+<!--
+title: "NSD monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md
+sidebar_label: "NSD"
+-->
+
+# NSD monitoring with Netdata
+
+Uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+## Requirements
+
+- Version of `nsd` must be 4.0+
+- Netdata must have permissions to run `nsd-control stats_noreset`
+
+It produces:
+
+1. **Queries**
+
+ - queries
+
+2. **Zones**
+
+ - master
+ - slave
+
+3. **Protocol**
+
+ - udp
+ - udp6
+ - tcp
+ - tcp6
+
+4. **Query Type**
+
+ - A
+ - NS
+ - CNAME
+ - SOA
+ - PTR
+ - HINFO
+ - MX
+ - NAPTR
+ - TXT
+ - AAAA
+ - SRV
+ - ANY
+
+5. **Transfer**
+
+ - NOTIFY
+ - AXFR
+
+6. **Return Code**
+
+ - NOERROR
+ - FORMERR
+ - SERVFAIL
+ - NXDOMAIN
+ - NOTIMP
+ - REFUSED
+ - YXDOMAIN
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
new file mode 100644
index 0000000..6f9b2ce
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Description: NSD `nsd-control stats_noreset` netdata python.d module
+# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+update_every = 30
+
+NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
+REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ORDER = [
+ 'queries',
+ 'zones',
+ 'protocol',
+ 'type',
+ 'transfer',
+ 'rcode',
+]
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'lines': [
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
+ 'zones': {
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'lines': [
+ ['zone_master', 'master', 'absolute'],
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
+ 'protocol': {
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'lines': [
+ ['num_udp', 'udp', 'incremental'],
+ ['num_udp6', 'udp6', 'incremental'],
+ ['num_tcp', 'tcp', 'incremental'],
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
+ 'type': {
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'lines': [
+ ['num_type_A', 'A', 'incremental'],
+ ['num_type_NS', 'NS', 'incremental'],
+ ['num_type_CNAME', 'CNAME', 'incremental'],
+ ['num_type_SOA', 'SOA', 'incremental'],
+ ['num_type_PTR', 'PTR', 'incremental'],
+ ['num_type_HINFO', 'HINFO', 'incremental'],
+ ['num_type_MX', 'MX', 'incremental'],
+ ['num_type_NAPTR', 'NAPTR', 'incremental'],
+ ['num_type_TXT', 'TXT', 'incremental'],
+ ['num_type_AAAA', 'AAAA', 'incremental'],
+ ['num_type_SRV', 'SRV', 'incremental'],
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
+ 'transfer': {
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'lines': [
+ ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
+ 'rcode': {
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'lines': [
+ ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
+ ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
+ ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
+ ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
+ ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
+ ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = NSD_CONTROL_COMMAND
+
+ def _get_data(self):
+ lines = self._get_raw_data()
+ if not lines:
+ return None
+
+ stats = dict(
+ (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
+ )
+ stats.setdefault('num_opcode_NOTIFY', 0)
+ stats.setdefault('num_type_TYPE252', 0)
+ stats.setdefault('num_type_TYPE255', 0)
+
+ return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
new file mode 100644
index 0000000..77a8a31
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for nsd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# nsd-control is slow, so once every 30 seconds
+# update_every: 30
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nsd also supports the following:
+#
+# command: 'nsd-control stats_noreset' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# IMPORTANT Information
+#
+# Netdata must have permissions to run `nsd-control stats_noreset` command
+#
+# - Example-1 (use "sudo")
+# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
+# Defaults:netdata !requiretty
+# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+# 2. etc/netdata/python.d/nsd.conf
+# local:
+# update_every: 30
+# command: 'sudo /usr/sbin/nsd-control stats_noreset'
+#
+# - Example-2 (add "netdata" user to "nsd" group)
+# usermod -aG nsd netdata
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ update_every: 30
+ command: 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/ntpd/Makefile.inc b/collectors/python.d.plugin/ntpd/Makefile.inc
new file mode 100644
index 0000000..81210eb
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ntpd/ntpd.chart.py
+dist_pythonconfig_DATA += ntpd/ntpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ntpd/README.md ntpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
new file mode 100644
index 0000000..0b08f12
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -0,0 +1,90 @@
+<!--
+title: "NTP daemon monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ntpd/README.md
+sidebar_label: "NTP daemon"
+-->
+
+# NTP daemon monitoring with Netdata
+
+Monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+## Requirements
+
+- Version: `NTPv4`
+- Local interrogation allowed in `/etc/ntp.conf` (default):
+
+```
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+```
+
+It produces:
+
+1. system
+
+ - offset
+ - jitter
+ - frequency
+ - delay
+ - dispersion
+ - stratum
+ - tc
+ - precision
+
+2. peers
+
+ - offset
+ - delay
+ - dispersion
+ - jitter
+ - rootdelay
+ - rootdispersion
+ - stratum
+ - hmode
+ - pmode
+ - hpoll
+ - ppoll
+ - precision
+
+## Configuration
+
+Edit the `python.d/ntpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ntpd.conf
+```
+
+Sample:
+
+```yaml
+update_every: 10
+
+host: 'localhost'
+port: '123'
+show_peers: yes
+# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
+peer_filter: '(127\..*)|(192\.168\..*)'
+# check for new/changed peers every 60 updates
+peer_rescan: 60
+```
+
+Sample (multiple jobs):
+
+Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
+
+```yaml
+local:
+ host: 'localhost'
+
+otherhost:
+ host: 'otherhost'
+```
+
+If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
new file mode 100644
index 0000000..275d227
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+# Description: ntpd netdata python.d module
+# Author: Sven Mäder (rda0)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import struct
+
+from bases.FrameworkServices.SocketService import SocketService
+
+# NTP Control Message Protocol constants
+MODE = 6
+HEADER_FORMAT = '!BBHHHHH'
+HEADER_LEN = 12
+OPCODES = {
+ 'readstat': 1,
+ 'readvar': 2
+}
+
+# Maximal dimension precision
+PRECISION = 1000000
+
+# Static charts
+ORDER = [
+ 'sys_offset',
+ 'sys_jitter',
+ 'sys_frequency',
+ 'sys_wander',
+ 'sys_rootdelay',
+ 'sys_rootdisp',
+ 'sys_stratum',
+ 'sys_tc',
+ 'sys_precision',
+ 'peer_offset',
+ 'peer_delay',
+ 'peer_dispersion',
+ 'peer_jitter',
+ 'peer_xleave',
+ 'peer_rootdelay',
+ 'peer_rootdisp',
+ 'peer_stratum',
+ 'peer_hmode',
+ 'peer_pmode',
+ 'peer_hpoll',
+ 'peer_ppoll',
+ 'peer_precision'
+]
+
+CHARTS = {
+ 'sys_offset': {
+ 'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
+ 'system', 'ntpd.sys_offset', 'area'],
+ 'lines': [
+ ['offset', 'offset', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_jitter': {
+ 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
+ 'system', 'ntpd.sys_jitter', 'line'],
+ 'lines': [
+ ['sys_jitter', 'system', 'absolute', 1, PRECISION],
+ ['clk_jitter', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_frequency': {
+ 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
+ 'lines': [
+ ['frequency', 'frequency', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_wander': {
+ 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
+ 'lines': [
+ ['clk_wander', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
+ 'ntpd.sys_rootdelay', 'area'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
+ 'ntpd.sys_rootdisp', 'area'],
+ 'lines': [
+ ['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
+ 'lines': [
+ ['stratum', 'stratum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_tc': {
+ 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
+ 'lines': [
+ ['tc', 'current', 'absolute', 1, PRECISION],
+ ['mintc', 'minimum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
+ 'lines': [
+ ['precision', 'precision', 'absolute', 1, PRECISION]
+ ]
+ }
+}
+
+PEER_CHARTS = {
+ 'peer_offset': {
+ 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
+ 'lines': []
+ },
+ 'peer_delay': {
+ 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
+ 'lines': []
+ },
+ 'peer_dispersion': {
+ 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
+ 'lines': []
+ },
+ 'peer_jitter': {
+ 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
+ 'lines': []
+ },
+ 'peer_xleave': {
+ 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
+ 'ntpd.peer_rootdelay', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
+ 'ntpd.peer_rootdisp', 'line'],
+ 'lines': []
+ },
+ 'peer_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
+ 'lines': []
+ },
+ 'peer_hmode': {
+ 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
+ 'lines': []
+ },
+ 'peer_pmode': {
+ 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
+ 'lines': []
+ },
+ 'peer_hpoll': {
+ 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
+ 'lines': []
+ },
+ 'peer_ppoll': {
+ 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
+ 'lines': []
+ },
+ 'peer_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Base:
+ regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)')
+
+ @staticmethod
+ def get_header(associd=0, operation='readvar'):
+ """
+ Constructs the NTP Control Message header:
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |LI | VN |Mode |R|E|M| OpCode | Sequence Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Status | Association ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Offset | Count |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ version = 2
+ sequence = 1
+ status = 0
+ offset = 0
+ count = 0
+ header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation],
+ sequence, status, associd, offset, count)
+ return header
+
+
+class System(Base):
+ def __init__(self):
+ self.request = self.get_header()
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ data[key] = float(value) * PRECISION
+ return data
+
+
+class Peer(Base):
+ def __init__(self, idx, name):
+ self.id = idx
+ self.real_name = name
+ self.name = name.replace('.', '_')
+ self.request = self.get_header(self.id)
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ dimension = '_'.join([self.name, key])
+ data[dimension] = float(value) * PRECISION
+ return data
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.port = 'ntp'
+ self.dgram_socket = True
+ self.system = System()
+ self.peers = dict()
+ self.request = str()
+ self.retries = 0
+ self.show_peers = self.configuration.get('show_peers', False)
+ self.peer_rescan = self.configuration.get('peer_rescan', 60)
+ if self.show_peers:
+ self.definitions.update(PEER_CHARTS)
+
+ def check(self):
+ """
+ Checks if we can get valid systemvars.
+ If not, returns None to disable module.
+ """
+ self._parse_config()
+
+ peer_filter = self.configuration.get('peer_filter', r'127\..*')
+ try:
+ self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter))
+ except re.error as error:
+ self.error('Compile pattern error (peer_filter) : {0}'.format(error))
+ return None
+
+ self.request = self.system.request
+ raw_systemvars = self._get_raw_data()
+
+ if not self.system.get_data(raw_systemvars):
+ return None
+
+ return True
+
+ def get_data(self):
+ """
+ Gets systemvars data on each update.
+ Gets peervars data for all peers on each update.
+ """
+ data = dict()
+
+ self.request = self.system.request
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data.update(self.system.get_data(raw))
+
+ if not self.show_peers:
+ return data
+
+ if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8:
+ self.find_new_peers()
+
+ for peer in self.peers.values():
+ self.request = peer.request
+ peer_data = peer.get_data(self._get_raw_data())
+ if peer_data:
+ data.update(peer_data)
+ else:
+ self.retries += 1
+
+ return data
+
+ def find_new_peers(self):
+ new_peers = dict((p.real_name, p) for p in self.get_peers())
+ if new_peers:
+
+ peers_to_remove = set(self.peers) - set(new_peers)
+ peers_to_add = set(new_peers) - set(self.peers)
+
+ for peer_name in peers_to_remove:
+ self.hide_old_peer_from_charts(self.peers[peer_name])
+ del self.peers[peer_name]
+
+ for peer_name in peers_to_add:
+ self.add_new_peer_to_charts(new_peers[peer_name])
+
+ self.peers.update(new_peers)
+ self.retries = 0
+
+ def add_new_peer_to_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ if dim_id not in self.charts[chart_id]:
+ self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION])
+ else:
+ self.charts[chart_id].hide_dimension(dim_id, reverse=True)
+
+ def hide_old_peer_from_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ self.charts[chart_id].hide_dimension(dim_id)
+
+ def get_peers(self):
+ self.request = Base.get_header(operation='readstat')
+
+ raw_data = self._get_raw_data(raw=True)
+ if not raw_data:
+ return list()
+
+ peer_ids = self.get_peer_ids(raw_data)
+ if not peer_ids:
+ return list()
+
+ new_peers = list()
+ for peer_id in peer_ids:
+ self.request = Base.get_header(peer_id)
+ raw_peer_data = self._get_raw_data()
+ if not raw_peer_data:
+ continue
+ srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data)
+ if not srcadr:
+ continue
+ srcadr = srcadr.group(2)
+ if self.peer_filter.search(srcadr):
+ continue
+ stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data)
+ if not stratum:
+ continue
+ if int(stratum.group(2)) > 15:
+ continue
+
+ new_peer = Peer(idx=peer_id, name=srcadr)
+ new_peers.append(new_peer)
+ return new_peers
+
+ def get_peer_ids(self, res):
+ """
+ Unpack the NTP Control Message header
+ Get data length from header
+ Get list of association ids returned in the readstat response
+ """
+
+ try:
+ count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6]
+ except struct.error as error:
+ self.error('error unpacking header: {0}'.format(error))
+ return None
+ if not count:
+ self.error('empty data field in NTP control packet')
+ return None
+
+ data_end = HEADER_LEN + count
+ data = res[HEADER_LEN:data_end]
+ data_format = ''.join(['!', 'H' * int(count / 2)])
+ try:
+ peer_ids = list(struct.unpack(data_format, data))[::2]
+ except struct.error as error:
+ self.error('error unpacking data: {0}'.format(error))
+ return None
+ return peer_ids
diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
new file mode 100644
index 0000000..80bd468
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for ntpd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+#
+# Additionally to the above, ntp also supports the following:
+#
+# host: 'localhost' # the host to query
+# port: '123' # the UDP port where `ntpd` listens
+# show_peers: no # use `yes` to show peer charts. enabling this
+# # option is recommended only for debugging, as
+# # it could possibly imply memory leaks if the
+# # peers change frequently.
+# peer_filter: '127\..*' # regex to exclude peers
+# # by default local peers are hidden
+# # use `''` to show all peers.
+# peer_rescan: 60 # interval (>0) to check for new/changed peers
+# # use `1` to check on every update
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: '123'
+ show_peers: no
+
+localhost_ipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '123'
+ show_peers: no
+
+localhost_ipv6:
+ name: 'local'
+ host: '::1'
+ port: '123'
+ show_peers: no
diff --git a/collectors/python.d.plugin/nvidia_smi/Makefile.inc b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
new file mode 100644
index 0000000..52fb25a
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nvidia_smi/nvidia_smi.chart.py
+dist_pythonconfig_DATA += nvidia_smi/nvidia_smi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nvidia_smi/README.md nvidia_smi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
new file mode 100644
index 0000000..9bfb209
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Nvidia GPU monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md
+sidebar_label: "Nvidia GPUs"
+-->
+
+# Nvidia GPU monitoring with Netdata
+
+Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
+
+
+## Requirements and Notes
+
+- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+- You must enable this plugin as its disabled by default due to minor performance issues.
+- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
+- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
+- Contributions are welcome.
+- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
+- If `nvidia-smi` process [is not killed after netdata restart](https://github.com/netdata/netdata/issues/7143) you need to off `loop_mode`.
+- `poll_seconds` is how often in seconds the tool is polled for as an integer.
+
+## Charts
+
+It produces the following charts:
+
+- PCI Express Bandwidth Utilization in `KiB/s`
+- Fan Speed in `percentage`
+- GPU Utilization in `percentage`
+- Memory Bandwidth Utilization in `percentage`
+- Encoder/Decoder Utilization in `percentage`
+- Memory Usage in `MiB`
+- Temperature in `celsius`
+- Clock Frequencies in `MHz`
+- Power Utilization in `Watts`
+- Memory Used by Each Process in `MiB`
+- Memory Used by Each User in `MiB`
+- Number of User on GPU in `num`
+
+## Configuration
+
+Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nvidia_smi.conf
+```
+
+Sample:
+
+```yaml
+loop_mode : yes
+poll_seconds : 1
+exclude_zero_memory_users : yes
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
new file mode 100644
index 0000000..9c69586
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -0,0 +1,550 @@
+# -*- coding: utf-8 -*-
+# Description: nvidia-smi netdata python.d module
+# Original Author: Steven Noonan (tycho)
+# Author: Ilya Mashchenko (ilyam8)
+# User Memory Stat Author: Guido Scatena (scatenag)
+
+import subprocess
+import threading
+import os
+import pwd
+
+import xml.etree.ElementTree as et
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+NVIDIA_SMI = 'nvidia-smi'
+
+EMPTY_ROW = ''
+EMPTY_ROW_LIMIT = 500
+POLLER_BREAK_ROW = '</nvidia_smi_log>'
+
+PCI_BANDWIDTH = 'pci_bandwidth'
+FAN_SPEED = 'fan_speed'
+GPU_UTIL = 'gpu_utilization'
+MEM_UTIL = 'mem_utilization'
+ENCODER_UTIL = 'encoder_utilization'
+MEM_USAGE = 'mem_usage'
+TEMPERATURE = 'temperature'
+CLOCKS = 'clocks'
+POWER = 'power'
+PROCESSES_MEM = 'processes_mem'
+USER_MEM = 'user_mem'
+USER_NUM = 'user_num'
+
+ORDER = [
+ PCI_BANDWIDTH,
+ FAN_SPEED,
+ GPU_UTIL,
+ MEM_UTIL,
+ ENCODER_UTIL,
+ MEM_USAGE,
+ TEMPERATURE,
+ CLOCKS,
+ POWER,
+ PROCESSES_MEM,
+ USER_MEM,
+ USER_NUM,
+]
+
+
+def gpu_charts(gpu):
+ fam = gpu.full_name()
+
+ charts = {
+ PCI_BANDWIDTH: {
+ 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
+ 'lines': [
+ ['rx_util', 'rx', 'absolute', 1, 1],
+ ['tx_util', 'tx', 'absolute', 1, -1],
+ ]
+ },
+ FAN_SPEED: {
+ 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
+ 'lines': [
+ ['fan_speed', 'speed'],
+ ]
+ },
+ GPU_UTIL: {
+ 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
+ 'lines': [
+ ['gpu_util', 'utilization'],
+ ]
+ },
+ MEM_UTIL: {
+ 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
+ 'lines': [
+ ['memory_util', 'utilization'],
+ ]
+ },
+ ENCODER_UTIL: {
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
+ 'line'],
+ 'lines': [
+ ['encoder_util', 'encoder'],
+ ['decoder_util', 'decoder'],
+ ]
+ },
+ MEM_USAGE: {
+ 'options': [None, 'Memory Usage', 'MiB', fam, 'nvidia_smi.memory_allocated', 'stacked'],
+ 'lines': [
+ ['fb_memory_free', 'free'],
+ ['fb_memory_used', 'used'],
+ ]
+ },
+ TEMPERATURE: {
+ 'options': [None, 'Temperature', 'celsius', fam, 'nvidia_smi.temperature', 'line'],
+ 'lines': [
+ ['gpu_temp', 'temp'],
+ ]
+ },
+ CLOCKS: {
+ 'options': [None, 'Clock Frequencies', 'MHz', fam, 'nvidia_smi.clocks', 'line'],
+ 'lines': [
+ ['graphics_clock', 'graphics'],
+ ['video_clock', 'video'],
+ ['sm_clock', 'sm'],
+ ['mem_clock', 'mem'],
+ ]
+ },
+ POWER: {
+ 'options': [None, 'Power Utilization', 'Watts', fam, 'nvidia_smi.power', 'line'],
+ 'lines': [
+ ['power_draw', 'power', 'absolute', 1, 100],
+ ]
+ },
+ PROCESSES_MEM: {
+ 'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_MEM: {
+ 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_NUM: {
+ 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
+ 'lines': [
+ ['user_num', 'users'],
+ ]
+ },
+ }
+
+ idx = gpu.num
+
+ order = ['gpu{0}_{1}'.format(idx, v) for v in ORDER]
+ charts = dict(('gpu{0}_{1}'.format(idx, k), v) for k, v in charts.items())
+
+ for chart in charts.values():
+ for line in chart['lines']:
+ line[0] = 'gpu{0}_{1}'.format(idx, line[0])
+
+ return order, charts
+
+
+class NvidiaSMI:
+ def __init__(self):
+ self.command = find_binary(NVIDIA_SMI)
+ self.active_proc = None
+
+ def run_once(self):
+ proc = subprocess.Popen([self.command, '-x', '-q'], stdout=subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ return stdout
+
+ def run_loop(self, interval):
+ if self.active_proc:
+ self.kill()
+ proc = subprocess.Popen([self.command, '-x', '-q', '-l', str(interval)], stdout=subprocess.PIPE)
+ self.active_proc = proc
+ return proc.stdout
+
+ def kill(self):
+ if self.active_proc:
+ self.active_proc.kill()
+ self.active_proc = None
+
+
+class NvidiaSMIPoller(threading.Thread):
+ def __init__(self, poll_interval):
+ threading.Thread.__init__(self)
+ self.daemon = True
+
+ self.smi = NvidiaSMI()
+ self.interval = poll_interval
+
+ self.lock = threading.RLock()
+ self.last_data = str()
+ self.exit = False
+ self.empty_rows = 0
+ self.rows = list()
+
+ def has_smi(self):
+ return bool(self.smi.command)
+
+ def run_once(self):
+ return self.smi.run_once()
+
+ def run(self):
+ out = self.smi.run_loop(self.interval)
+
+ for row in out:
+ if self.exit or self.empty_rows > EMPTY_ROW_LIMIT:
+ break
+ self.process_row(row)
+ self.smi.kill()
+
+ def process_row(self, row):
+ row = row.decode()
+ self.empty_rows += (row == EMPTY_ROW)
+ self.rows.append(row)
+
+ if POLLER_BREAK_ROW in row:
+ self.lock.acquire()
+ self.last_data = '\n'.join(self.rows)
+ self.lock.release()
+
+ self.rows = list()
+ self.empty_rows = 0
+
+ def is_started(self):
+ return self.ident is not None
+
+ def shutdown(self):
+ self.exit = True
+
+ def data(self):
+ self.lock.acquire()
+ data = self.last_data
+ self.lock.release()
+ return data
+
+
+def handle_attr_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except AttributeError:
+ return None
+
+ return on_call
+
+
+def handle_value_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except ValueError:
+ return None
+
+ return on_call
+
+
+HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
+ETC_PASSWD_PATH = '/etc/passwd'
+PROC_PATH = '/proc'
+
+IS_INSIDE_DOCKER = False
+
+if HOST_PREFIX:
+ ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
+ PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
+ IS_INSIDE_DOCKER = True
+
+
+def read_passwd_file():
+ data = dict()
+ with open(ETC_PASSWD_PATH, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#"):
+ continue
+ fields = line.split(":")
+ # name, passwd, uid, gid, comment, home_dir, shell
+ if len(fields) != 7:
+ continue
+ # uid, guid
+ fields[2], fields[3] = int(fields[2]), int(fields[3])
+ data[fields[2]] = fields
+ return data
+
+
+def read_passwd_file_safe():
+ try:
+ if IS_INSIDE_DOCKER:
+ return read_passwd_file()
+ return dict((k[2], k) for k in pwd.getpwall())
+ except (OSError, IOError):
+ return dict()
+
+
+def get_username_by_pid_safe(pid, passwd_file):
+ path = os.path.join(PROC_PATH, pid)
+ try:
+ uid = os.stat(path).st_uid
+ except (OSError, IOError):
+ return ''
+
+ try:
+ return passwd_file[uid][0]
+ except KeyError:
+ return str(uid)
+
+
+class GPU:
+ def __init__(self, num, root, exclude_zero_memory_users=False):
+ self.num = num
+ self.root = root
+ self.exclude_zero_memory_users = exclude_zero_memory_users
+
+ def id(self):
+ return self.root.get('id')
+
+ def name(self):
+ return self.root.find('product_name').text
+
+ def full_name(self):
+ return 'gpu{0} {1}'.format(self.num, self.name())
+
+ @handle_attr_error
+ def rx_util(self):
+ return self.root.find('pci').find('rx_util').text.split()[0]
+
+ @handle_attr_error
+ def tx_util(self):
+ return self.root.find('pci').find('tx_util').text.split()[0]
+
+ @handle_attr_error
+ def fan_speed(self):
+ return self.root.find('fan_speed').text.split()[0]
+
+ @handle_attr_error
+ def gpu_util(self):
+ return self.root.find('utilization').find('gpu_util').text.split()[0]
+
+ @handle_attr_error
+ def memory_util(self):
+ return self.root.find('utilization').find('memory_util').text.split()[0]
+
+ @handle_attr_error
+ def encoder_util(self):
+ return self.root.find('utilization').find('encoder_util').text.split()[0]
+
+ @handle_attr_error
+ def decoder_util(self):
+ return self.root.find('utilization').find('decoder_util').text.split()[0]
+
+ @handle_attr_error
+ def fb_memory_used(self):
+ return self.root.find('fb_memory_usage').find('used').text.split()[0]
+
+ @handle_attr_error
+ def fb_memory_free(self):
+ return self.root.find('fb_memory_usage').find('free').text.split()[0]
+
+ @handle_attr_error
+ def temperature(self):
+ return self.root.find('temperature').find('gpu_temp').text.split()[0]
+
+ @handle_attr_error
+ def graphics_clock(self):
+ return self.root.find('clocks').find('graphics_clock').text.split()[0]
+
+ @handle_attr_error
+ def video_clock(self):
+ return self.root.find('clocks').find('video_clock').text.split()[0]
+
+ @handle_attr_error
+ def sm_clock(self):
+ return self.root.find('clocks').find('sm_clock').text.split()[0]
+
+ @handle_attr_error
+ def mem_clock(self):
+ return self.root.find('clocks').find('mem_clock').text.split()[0]
+
+ @handle_value_error
+ @handle_attr_error
+ def power_draw(self):
+ return float(self.root.find('power_readings').find('power_draw').text.split()[0]) * 100
+
+ @handle_attr_error
+ def processes(self):
+ processes_info = self.root.find('processes').findall('process_info')
+ if not processes_info:
+ return list()
+
+ passwd_file = read_passwd_file_safe()
+ processes = list()
+
+ for info in processes_info:
+ pid = info.find('pid').text
+ processes.append({
+ 'pid': int(pid),
+ 'process_name': info.find('process_name').text,
+ 'used_memory': int(info.find('used_memory').text.split()[0]),
+ 'username': get_username_by_pid_safe(pid, passwd_file),
+ })
+ return processes
+
+ def data(self):
+ data = {
+ 'rx_util': self.rx_util(),
+ 'tx_util': self.tx_util(),
+ 'fan_speed': self.fan_speed(),
+ 'gpu_util': self.gpu_util(),
+ 'memory_util': self.memory_util(),
+ 'encoder_util': self.encoder_util(),
+ 'decoder_util': self.decoder_util(),
+ 'fb_memory_used': self.fb_memory_used(),
+ 'fb_memory_free': self.fb_memory_free(),
+ 'gpu_temp': self.temperature(),
+ 'graphics_clock': self.graphics_clock(),
+ 'video_clock': self.video_clock(),
+ 'sm_clock': self.sm_clock(),
+ 'mem_clock': self.mem_clock(),
+ 'power_draw': self.power_draw(),
+ }
+ processes = self.processes() or []
+ users = set()
+ for p in processes:
+ data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
+ if p['username']:
+ if self.exclude_zero_memory_users and p['used_memory'] == 0:
+ continue
+ users.add(p['username'])
+ key = 'user_mem_{0}'.format(p['username'])
+ if key in data:
+ data[key] += p['used_memory']
+ else:
+ data[key] = p['used_memory']
+ data['user_num'] = len(users)
+
+ return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.loop_mode = configuration.get('loop_mode', True)
+ poll = int(configuration.get('poll_seconds', 1))
+ self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
+ self.poller = NvidiaSMIPoller(poll)
+
+ def get_data_loop_mode(self):
+ if not self.poller.is_started():
+ self.poller.start()
+
+ if not self.poller.is_alive():
+ self.debug('poller is off')
+ return None
+
+ return self.poller.data()
+
+ def get_data_normal_mode(self):
+ return self.poller.run_once()
+
+ def get_data(self):
+ if self.loop_mode:
+ last_data = self.get_data_loop_mode()
+ else:
+ last_data = self.get_data_normal_mode()
+
+ if not last_data:
+ return None
+
+ parsed = self.parse_xml(last_data)
+ if parsed is None:
+ return None
+
+ data = dict()
+ for idx, root in enumerate(parsed.findall('gpu')):
+ gpu = GPU(idx, root, self.exclude_zero_memory_users)
+ gpu_data = gpu.data()
+ # self.debug(gpu_data)
+ gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
+ data.update(gpu_data)
+ self.update_processes_mem_chart(gpu)
+ self.update_processes_user_mem_chart(gpu)
+
+ return data or None
+
+ def update_processes_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, PROCESSES_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ dim_id = 'gpu{0}_process_mem_{1}'.format(gpu.num, p['pid'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0} {1}'.format(p['pid'], p['process_name'])])
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
+ def update_processes_user_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ if not p.get('username'):
+ continue
+ dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0}'.format(p['username'])])
+
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
+ def check(self):
+ if not self.poller.has_smi():
+ self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ raw_data = self.poller.run_once()
+ if not raw_data:
+ self.error("failed to invoke '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ parsed = self.parse_xml(raw_data)
+ if parsed is None:
+ return False
+
+ gpus = parsed.findall('gpu')
+ if not gpus:
+ return False
+
+ self.create_charts(gpus)
+
+ return True
+
+ def parse_xml(self, data):
+ try:
+ return et.fromstring(data)
+ except et.ParseError as error:
+ self.error('xml parse failed: "{0}", error: {1}'.format(data, error))
+
+ return None
+
+ def create_charts(self, gpus):
+ for idx, root in enumerate(gpus):
+ order, charts = gpu_charts(GPU(idx, root))
+ self.order.extend(order)
+ self.definitions.update(charts)
+
+
+def is_gpu_data_value_valid(value):
+ try:
+ int(value)
+ except (TypeError, ValueError):
+ return False
+ return True
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
new file mode 100644
index 0000000..3d2a30d
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for nvidia_smi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
+# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
+# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/Makefile.inc b/collectors/python.d.plugin/openldap/Makefile.inc
new file mode 100644
index 0000000..dc947e2
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += openldap/openldap.chart.py
+dist_pythonconfig_DATA += openldap/openldap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += openldap/README.md openldap/Makefile.inc
+
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
new file mode 100644
index 0000000..4942d0f
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -0,0 +1,79 @@
+<!--
+title: "OpenLDAP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md
+sidebar_label: "OpenLDAP"
+-->
+
+# OpenLDAP monitoring with Netdata
+
+Provides statistics information from openldap (slapd) server.
+Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available.
+
+**Requirement:**
+
+- Follow instructions from <https://www.openldap.org/doc/admin24/monitoringslapd.html> to activate monitoring interface.
+- Install python ldap module `pip install ldap` or `yum install python-ldap`
+- Modify openldap.conf with your credentials
+
+### Module gives information with following charts:
+
+1. **connections**
+
+ - total connections number
+
+2. **Bytes**
+
+ - sent
+
+3. **operations**
+
+ - completed
+ - initiated
+
+4. **referrals**
+
+ - sent
+
+5. **entries**
+
+ - sent
+
+6. **ldap operations**
+
+ - bind
+ - search
+ - unbind
+ - add
+ - delete
+ - modify
+ - compare
+
+7. **waiters**
+
+ - read
+ - write
+
+## Configuration
+
+Edit the `python.d/openldap.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/openldap.conf
+```
+
+Sample:
+
+```yaml
+openldap:
+ name : 'local'
+ username : "cn=monitor,dc=superb,dc=eu"
+ password : "testpass"
+ server : 'localhost'
+ port : 389
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
new file mode 100644
index 0000000..aba1439
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Description: openldap netdata python.d module
+# Author: Manolis Kartsonakis (ekartsonakis)
+# SPDX-License-Identifier: GPL-3.0+
+
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ HAS_LDAP = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+DEFAULT_SERVER = 'localhost'
+DEFAULT_PORT = '389'
+DEFAULT_TLS = False
+DEFAULT_CERT_CHECK = True
+DEFAULT_TIMEOUT = 1
+DEFAULT_START_TLS = False
+
+ORDER = [
+ 'total_connections',
+ 'bytes_sent',
+ 'operations',
+ 'referrals_sent',
+ 'entries_sent',
+ 'ldap_operations',
+ 'waiters'
+]
+
+CHARTS = {
+ 'total_connections': {
+ 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'],
+ 'lines': [
+ ['total_connections', 'connections', 'incremental']
+ ]
+ },
+ 'bytes_sent': {
+ 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
+ 'lines': [
+ ['bytes_sent', 'sent', 'incremental', 1, 1024]
+ ]
+ },
+ 'operations': {
+ 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'],
+ 'lines': [
+ ['completed_operations', 'completed', 'incremental'],
+ ['initiated_operations', 'initiated', 'incremental']
+ ]
+ },
+ 'referrals_sent': {
+ 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
+ 'lines': [
+ ['referrals_sent', 'sent', 'incremental']
+ ]
+ },
+ 'entries_sent': {
+ 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'],
+ 'lines': [
+ ['entries_sent', 'sent', 'incremental']
+ ]
+ },
+ 'ldap_operations': {
+ 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'],
+ 'lines': [
+ ['bind_operations', 'bind', 'incremental'],
+ ['search_operations', 'search', 'incremental'],
+ ['unbind_operations', 'unbind', 'incremental'],
+ ['add_operations', 'add', 'incremental'],
+ ['delete_operations', 'delete', 'incremental'],
+ ['modify_operations', 'modify', 'incremental'],
+ ['compare_operations', 'compare', 'incremental']
+ ]
+ },
+ 'waiters': {
+ 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'],
+ 'lines': [
+ ['write_waiters', 'write', 'incremental'],
+ ['read_waiters', 'read', 'incremental']
+ ]
+ },
+}
+
+# Stuff to gather - make tuples of DN dn and attrib to get
+SEARCH_LIST = {
+ 'total_connections': (
+ 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter',
+ ),
+ 'bytes_sent': (
+ 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'completed_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'initiated_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'referrals_sent': (
+ 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'entries_sent': (
+ 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'bind_operations': (
+ 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'unbind_operations': (
+ 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'add_operations': (
+ 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'delete_operations': (
+ 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'modify_operations': (
+ 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'compare_operations': (
+ 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'search_operations': (
+ 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'write_waiters': (
+ 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+ 'read_waiters': (
+ 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.server = configuration.get('server', DEFAULT_SERVER)
+ self.port = configuration.get('port', DEFAULT_PORT)
+ self.username = configuration.get('username')
+ self.password = configuration.get('password')
+ self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
+ self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
+ self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
+ self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
+ self.alive = False
+ self.conn = None
+
+ def disconnect(self):
+ if self.conn:
+ self.conn.unbind()
+ self.conn = None
+ self.alive = False
+
+ def connect(self):
+ try:
+ if self.use_tls:
+ self.conn = ldap.initialize('ldaps://%s:%s' % (self.server, self.port))
+ else:
+ self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
+ self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
+ if (self.use_tls or self.use_start_tls) and not self.cert_check:
+ self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ if self.use_start_tls or self.use_tls:
+ self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
+ if self.use_start_tls:
+ self.conn.protocol_version = ldap.VERSION3
+ self.conn.start_tls_s()
+ if self.username and self.password:
+ self.conn.simple_bind(self.username, self.password)
+ except ldap.LDAPError as error:
+ self.error(error)
+ return False
+
+ self.alive = True
+ return True
+
+ def reconnect(self):
+ self.disconnect()
+ return self.connect()
+
+ def check(self):
+ if not HAS_LDAP:
+ self.error("'python-ldap' package is needed")
+ return None
+
+ return self.connect() and self.get_data()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+ for key in SEARCH_LIST:
+ dn = SEARCH_LIST[key][0]
+ attr = SEARCH_LIST[key][1]
+ try:
+ num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
+ result_type, result_data = self.conn.result(num, 1)
+ except ldap.LDAPError as error:
+ self.error("Empty result. Check bind username/password. Message: ", error)
+ self.alive = False
+ return None
+
+ if result_type != 101:
+ continue
+
+ try:
+ data[key] = int(list(result_data[0][1].values())[0][0])
+ except (ValueError, IndexError) as error:
+ self.debug(error)
+ continue
+
+ return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
new file mode 100644
index 0000000..5fd99a5
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for openldap
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# ----------------------------------------------------------------------
+# OPENLDAP EXTRA PARAMETERS
+
+# Set here your LDAP connection settings
+
+#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
+#password : "yourpass" # The password for the binded user
+#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
+#use_tls : False # Make True if a TLS connection is used over ldaps://
+#use_start_tls: False # Make True if a TLS connection is used over ldap://
+#cert_check : True # False if you want to ignore certificate check
+#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/collectors/python.d.plugin/oracledb/Makefile.inc b/collectors/python.d.plugin/oracledb/Makefile.inc
new file mode 100644
index 0000000..ea3a824
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += oracledb/oracledb.chart.py
+dist_pythonconfig_DATA += oracledb/oracledb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += oracledb/README.md oracledb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
new file mode 100644
index 0000000..d61c7d2
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -0,0 +1,97 @@
+<!--
+title: "OracleDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md
+sidebar_label: "OracleDB"
+-->
+
+# OracleDB monitoring with Netdata
+
+Monitors the performance and health metrics of the Oracle database.
+
+## Requirements
+
+- `cx_Oracle` package.
+- Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
+
+It produces following charts:
+
+- session activity
+ - Session Count
+ - Session Limit Usage
+ - Logons
+- disk activity
+ - Physical Disk Reads/Writes
+ - Sorts On Disk
+ - Full Table Scans
+- database and buffer activity
+ - Database Wait Time Ratio
+ - Shared Pool Free Memory
+ - In-Memory Sorts Ratio
+ - SQL Service Response Time
+ - User Rollbacks
+ - Enqueue Timeouts
+- cache
+ - Cache Hit Ratio
+ - Global Cache Blocks Events
+- activities
+ - Activities
+- wait time
+ - Wait Time
+- tablespace
+ - Size
+ - Usage
+ - Usage In Percent
+- allocated space
+ - Size
+ - Usage
+ - Usage In Percent
+
+## prerequisite
+
+To use the Oracle module do the following:
+
+1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html)).
+
+2. Install Oracle Client libraries
+ ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html#install-oracle-client)).
+
+3. Create a read-only `netdata` user with proper access to your Oracle Database Server.
+
+Connect to your Oracle database with an administrative user and execute:
+
+```
+ALTER SESSION SET "_ORACLE_SCRIPT"=true;
+
+CREATE USER netdata IDENTIFIED BY <PASSWORD>;
+
+GRANT CONNECT TO netdata;
+GRANT SELECT_CATALOG_ROLE TO netdata;
+```
+
+## Configuration
+
+Edit the `python.d/oracledb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/oracledb.conf
+```
+
+```yaml
+local:
+ user: 'netdata'
+ password: 'secret'
+ server: 'localhost:1521'
+ service: 'XE'
+
+remote:
+ user: 'netdata'
+ password: 'secret'
+ server: '10.0.0.1:1521'
+ service: 'XE'
+```
+
+All parameters are required. Without them module will fail to start.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Foracledb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
new file mode 100644
index 0000000..28ef8db
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/oracledb.chart.py
@@ -0,0 +1,831 @@
+# -*- coding: utf-8 -*-
+# Description: oracledb netdata python.d module
+# Author: ilyam8 (Ilya Mashchenko)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ import cx_Oracle
+
+ HAS_ORACLE = True
+except ImportError:
+ HAS_ORACLE = False
+
+ORDER = [
+ 'session_count',
+ 'session_limit_usage',
+ 'logons',
+ 'physical_disk_read_write',
+ 'sorts_on_disk',
+ 'full_table_scans',
+ 'database_wait_time_ratio',
+ 'shared_pool_free_memory',
+ 'in_memory_sorts_ratio',
+ 'sql_service_response_time',
+ 'user_rollbacks',
+ 'enqueue_timeouts',
+ 'cache_hit_ratio',
+ 'global_cache_blocks',
+ 'activity',
+ 'wait_time',
+ 'tablespace_size',
+ 'tablespace_usage',
+ 'tablespace_usage_in_percent',
+ 'allocated_size',
+ 'allocated_usage',
+ 'allocated_usage_in_percent',
+]
+
+CHARTS = {
+ 'session_count': {
+ 'options': [None, 'Session Count', 'sessions', 'session activity', 'oracledb.session_count', 'line'],
+ 'lines': [
+ ['session_count', 'total', 'absolute', 1, 1000],
+ ['average_active_sessions', 'active', 'absolute', 1, 1000],
+ ]
+ },
+ 'session_limit_usage': {
+ 'options': [None, 'Session Limit Usage', '%', 'session activity', 'oracledb.session_limit_usage', 'area'],
+ 'lines': [
+ ['session_limit_percent', 'usage', 'absolute', 1, 1000],
+ ]
+ },
+ 'logons': {
+ 'options': [None, 'Logons', 'events/s', 'session activity', 'oracledb.logons', 'area'],
+ 'lines': [
+ ['logons_per_sec', 'logons', 'absolute', 1, 1000],
+ ]
+ },
+ 'physical_disk_read_write': {
+ 'options': [None, 'Physical Disk Reads/Writes', 'events/s', 'disk activity',
+ 'oracledb.physical_disk_read_writes', 'area'],
+ 'lines': [
+ ['physical_reads_per_sec', 'reads', 'absolute', 1, 1000],
+ ['physical_writes_per_sec', 'writes', 'absolute', -1, 1000],
+ ]
+ },
+ 'sorts_on_disk': {
+ 'options': [None, 'Sorts On Disk', 'events/s', 'disk activity', 'oracledb.sorts_on_disks', 'line'],
+ 'lines': [
+ ['disk_sort_per_sec', 'sorts', 'absolute', 1, 1000],
+ ]
+ },
+ 'full_table_scans': {
+ 'options': [None, 'Full Table Scans', 'events/s', 'disk activity', 'oracledb.full_table_scans', 'line'],
+ 'lines': [
+ ['long_table_scans_per_sec', 'full table scans', 'absolute', 1, 1000],
+ ]
+ },
+ 'database_wait_time_ratio': {
+ 'options': [None, 'Database Wait Time Ratio', '%', 'database and buffer activity',
+ 'oracledb.database_wait_time_ratio', 'line'],
+ 'lines': [
+ ['database_wait_time_ratio', 'wait time ratio', 'absolute', 1, 1000],
+ ]
+ },
+ 'shared_pool_free_memory': {
+ 'options': [None, 'Shared Pool Free Memory', '%', 'database and buffer activity',
+ 'oracledb.shared_pool_free_memory', 'line'],
+ 'lines': [
+ ['shared_pool_free_percent', 'free memory', 'absolute', 1, 1000],
+ ]
+ },
+ 'in_memory_sorts_ratio': {
+ 'options': [None, 'In-Memory Sorts Ratio', '%', 'database and buffer activity',
+ 'oracledb.in_memory_sorts_ratio', 'line'],
+ 'lines': [
+ ['memory_sorts_ratio', 'in-memory sorts', 'absolute', 1, 1000],
+ ]
+ },
+ 'sql_service_response_time': {
+ 'options': [None, 'SQL Service Response Time', 'seconds', 'database and buffer activity',
+ 'oracledb.sql_service_response_time', 'line'],
+ 'lines': [
+ ['sql_service_response_time', 'time', 'absolute', 1, 1000],
+ ]
+ },
+ 'user_rollbacks': {
+ 'options': [None, 'User Rollbacks', 'events/s', 'database and buffer activity',
+ 'oracledb.user_rollbacks', 'line'],
+ 'lines': [
+ ['user_rollbacks_per_sec', 'rollbacks', 'absolute', 1, 1000],
+ ]
+ },
+ 'enqueue_timeouts': {
+ 'options': [None, 'Enqueue Timeouts', 'events/s', 'database and buffer activity',
+ 'oracledb.enqueue_timeouts', 'line'],
+ 'lines': [
+ ['enqueue_timeouts_per_sec', 'enqueue timeouts', 'absolute', 1, 1000],
+ ]
+ },
+ 'cache_hit_ratio': {
+ 'options': [None, 'Cache Hit Ratio', '%', 'cache', 'oracledb.cache_hit_ration', 'stacked'],
+ 'lines': [
+ ['buffer_cache_hit_ratio', 'buffer', 'absolute', 1, 1000],
+ ['cursor_cache_hit_ratio', 'cursor', 'absolute', 1, 1000],
+ ['library_cache_hit_ratio', 'library', 'absolute', 1, 1000],
+ ['row_cache_hit_ratio', 'row', 'absolute', 1, 1000],
+ ]
+ },
+ 'global_cache_blocks': {
+ 'options': [None, 'Global Cache Blocks Events', 'events/s', 'cache', 'oracledb.global_cache_blocks', 'area'],
+ 'lines': [
+ ['global_cache_blocks_corrupted', 'corrupted', 'incremental', 1, 1000],
+ ['global_cache_blocks_lost', 'lost', 'incremental', 1, 1000],
+ ]
+ },
+ 'activity': {
+ 'options': [None, 'Activities', 'events/s', 'activities', 'oracledb.activity', 'stacked'],
+ 'lines': [
+ ['activity_parse_count_total', 'parse count', 'incremental', 1, 1000],
+ ['activity_execute_count', 'execute count', 'incremental', 1, 1000],
+ ['activity_user_commits', 'user commits', 'incremental', 1, 1000],
+ ['activity_user_rollbacks', 'user rollbacks', 'incremental', 1, 1000],
+ ]
+ },
+ 'wait_time': {
+ 'options': [None, 'Wait Time', 'ms', 'wait time', 'oracledb.wait_time', 'stacked'],
+ 'lines': [
+ ['wait_time_application', 'application', 'absolute', 1, 1000],
+ ['wait_time_configuration', 'configuration', 'absolute', 1, 1000],
+ ['wait_time_administrative', 'administrative', 'absolute', 1, 1000],
+ ['wait_time_concurrency', 'concurrency', 'absolute', 1, 1000],
+ ['wait_time_commit', 'commit', 'absolute', 1, 1000],
+ ['wait_time_network', 'network', 'absolute', 1, 1000],
+ ['wait_time_user_io', 'user I/O', 'absolute', 1, 1000],
+ ['wait_time_system_io', 'system I/O', 'absolute', 1, 1000],
+ ['wait_time_scheduler', 'scheduler', 'absolute', 1, 1000],
+ ['wait_time_other', 'other', 'absolute', 1, 1000],
+ ]
+ },
+ 'tablespace_size': {
+ 'options': [None, 'Size', 'KiB', 'tablespace', 'oracledb.tablespace_size', 'line'],
+ 'lines': [],
+ },
+ 'tablespace_usage': {
+ 'options': [None, 'Usage', 'KiB', 'tablespace', 'oracledb.tablespace_usage', 'line'],
+ 'lines': [],
+ },
+ 'tablespace_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
+ 'lines': [],
+ },
+ 'allocated_size': {
+ 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage': {
+ 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
+ 'lines': [],
+ },
+}
+
+CX_CONNECT_STRING = "{0}/{1}@//{2}/{3}"
+
+QUERY_SYSTEM = '''
+SELECT
+ metric_name,
+ value
+FROM
+ gv$sysmetric
+ORDER BY
+ begin_time
+'''
+QUERY_TABLESPACE = '''
+SELECT
+ m.tablespace_name,
+ m.used_space * t.block_size AS used_bytes,
+ m.tablespace_size * t.block_size AS max_bytes,
+ m.used_percent
+FROM
+ dba_tablespace_usage_metrics m
+ JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
+'''
+QUERY_ALLOCATED = '''
+SELECT
+ nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
+ bytes_alloc used_bytes,
+ bytes_alloc-nvl(bytes_free,0) max_bytes,
+ ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
+FROM
+ (SELECT
+ sum(bytes) bytes_free,
+ tablespace_name
+ FROM sys.dba_free_space
+ GROUP BY tablespace_name
+ ) a,
+ (SELECT
+ sum(bytes) bytes_alloc,
+ tablespace_name
+ FROM sys.dba_data_files
+ GROUP BY tablespace_name
+ ) b
+WHERE a.tablespace_name (+) = b.tablespace_name
+'''
+QUERY_ACTIVITIES_COUNT = '''
+SELECT
+ name,
+ value
+FROM
+ v$sysstat
+WHERE
+ name IN (
+ 'parse count (total)',
+ 'execute count',
+ 'user commits',
+ 'user rollbacks'
+ )
+'''
+QUERY_WAIT_TIME = '''
+SELECT
+ n.wait_class,
+ round(m.time_waited / m.INTSIZE_CSEC, 3)
+FROM
+ v$waitclassmetric m,
+ v$system_wait_class n
+WHERE
+ m.wait_class_id = n.wait_class_id
+ AND n.wait_class != 'Idle'
+'''
+# QUERY_SESSION_COUNT = '''
+# SELECT
+# status,
+# type
+# FROM
+# v$session
+# GROUP BY
+# status,
+# type
+# '''
+# QUERY_PROCESSES_COUNT = '''
+# SELECT
+# COUNT(*)
+# FROM
+# v$process
+# '''
+# QUERY_PROCESS = '''
+# SELECT
+# program,
+# pga_used_mem,
+# pga_alloc_mem,
+# pga_freeable_mem,
+# pga_max_mem
+# FROM
+# gv$process
+# '''
+
+# PROCESS_METRICS = [
+# 'pga_used_memory',
+# 'pga_allocated_memory',
+# 'pga_freeable_memory',
+# 'pga_maximum_memory',
+# ]
+
+
+SYS_METRICS = {
+ 'Average Active Sessions': 'average_active_sessions',
+ 'Session Count': 'session_count',
+ 'Session Limit %': 'session_limit_percent',
+ 'Logons Per Sec': 'logons_per_sec',
+ 'Physical Reads Per Sec': 'physical_reads_per_sec',
+ 'Physical Writes Per Sec': 'physical_writes_per_sec',
+ 'Disk Sort Per Sec': 'disk_sort_per_sec',
+ 'Long Table Scans Per Sec': 'long_table_scans_per_sec',
+ 'Database Wait Time Ratio': 'database_wait_time_ratio',
+ 'Shared Pool Free %': 'shared_pool_free_percent',
+ 'Memory Sorts Ratio': 'memory_sorts_ratio',
+ 'SQL Service Response Time': 'sql_service_response_time',
+ 'User Rollbacks Per Sec': 'user_rollbacks_per_sec',
+ 'Enqueue Timeouts Per Sec': 'enqueue_timeouts_per_sec',
+ 'Buffer Cache Hit Ratio': 'buffer_cache_hit_ratio',
+ 'Cursor Cache Hit Ratio': 'cursor_cache_hit_ratio',
+ 'Library Cache Hit Ratio': 'library_cache_hit_ratio',
+ 'Row Cache Hit Ratio': 'row_cache_hit_ratio',
+ 'Global Cache Blocks Corrupted': 'global_cache_blocks_corrupted',
+ 'Global Cache Blocks Lost': 'global_cache_blocks_lost',
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.user = configuration.get('user')
+ self.password = configuration.get('password')
+ self.server = configuration.get('server')
+ self.service = configuration.get('service')
+ self.alive = False
+ self.conn = None
+ self.active_tablespaces = set()
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ try:
+ self.conn = cx_Oracle.connect(
+ CX_CONNECT_STRING.format(
+ self.user,
+ self.password,
+ self.server,
+ self.service,
+ ))
+ except cx_Oracle.DatabaseError as error:
+ self.error(error)
+ return False
+
+ self.alive = True
+ return True
+
+ def reconnect(self):
+ return self.connect()
+
+ def check(self):
+ if not HAS_ORACLE:
+ self.error("'cx_Oracle' package is needed to use oracledb module")
+ return False
+
+ if not all([
+ self.user,
+ self.password,
+ self.server,
+ self.service,
+ ]):
+ self.error("one of these parameters is not specified: user, password, server, service")
+ return False
+
+ if not self.connect():
+ return False
+
+ return bool(self.get_data())
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+
+ # SYSTEM
+ try:
+ rv = self.gather_system_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, value in rv:
+ if name not in SYS_METRICS:
+ continue
+ data[SYS_METRICS[name]] = int(float(value) * 1000)
+
+ # ACTIVITIES COUNT
+ try:
+ rv = self.gather_activities_count()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, amount in rv:
+ cleaned = name.replace(' ', '_').replace('(', '').replace(')', '')
+ new_name = 'activity_{0}'.format(cleaned)
+ data[new_name] = int(float(amount) * 1000)
+
+ # WAIT TIME
+ try:
+ rv = self.gather_wait_time_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, amount in rv:
+ cleaned = name.replace(' ', '_').replace('/', '').lower()
+ new_name = 'wait_time_{0}'.format(cleaned)
+ data[new_name] = amount
+
+ # TABLESPACE
+ try:
+ rv = self.gather_tablespace_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_tablespace_size'.format(name)] = int(size * 1000)
+ data['{0}_tablespace_used'.format(name)] = int(used * 1000)
+ data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
+ # ALLOCATED SPACE
+ try:
+ rv = self.gather_allocated_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_allocated_size'.format(name)] = int(size * 1000)
+ data['{0}_allocated_used'.format(name)] = int(used * 1000)
+ data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
+ return data or None
+
+ def gather_system_metrics(self):
+
+ """
+ :return:
+
+ [['Buffer Cache Hit Ratio', 100],
+ ['Memory Sorts Ratio', 100],
+ ['Redo Allocation Hit Ratio', 100],
+ ['User Transaction Per Sec', 0],
+ ['Physical Reads Per Sec', 0],
+ ['Physical Reads Per Txn', 0],
+ ['Physical Writes Per Sec', 0],
+ ['Physical Writes Per Txn', 0],
+ ['Physical Reads Direct Per Sec', 0],
+ ['Physical Reads Direct Per Txn', 0],
+ ['Physical Writes Direct Per Sec', 0],
+ ['Physical Writes Direct Per Txn', 0],
+ ['Physical Reads Direct Lobs Per Sec', 0],
+ ['Physical Reads Direct Lobs Per Txn', 0],
+ ['Physical Writes Direct Lobs Per Sec', 0],
+ ['Physical Writes Direct Lobs Per Txn', 0],
+ ['Redo Generated Per Sec', Decimal('4.66666666666667')],
+ ['Redo Generated Per Txn', 280],
+ ['Logons Per Sec', Decimal('0.0166666666666667')],
+ ['Logons Per Txn', 1],
+ ['Open Cursors Per Sec', 0.35],
+ ['Open Cursors Per Txn', 21],
+ ['User Commits Per Sec', 0],
+ ['User Commits Percentage', 0],
+ ['User Rollbacks Per Sec', 0],
+ ['User Rollbacks Percentage', 0],
+ ['User Calls Per Sec', Decimal('0.0333333333333333')],
+ ['User Calls Per Txn', 2],
+ ['Recursive Calls Per Sec', 14.15],
+ ['Recursive Calls Per Txn', 849],
+ ['Logical Reads Per Sec', Decimal('0.683333333333333')],
+ ['Logical Reads Per Txn', 41],
+ ['DBWR Checkpoints Per Sec', 0],
+ ['Background Checkpoints Per Sec', 0],
+ ['Redo Writes Per Sec', Decimal('0.0333333333333333')],
+ ['Redo Writes Per Txn', 2],
+ ['Long Table Scans Per Sec', 0],
+ ['Long Table Scans Per Txn', 0],
+ ['Total Table Scans Per Sec', Decimal('0.0166666666666667')],
+ ['Total Table Scans Per Txn', 1],
+ ['Full Index Scans Per Sec', 0],
+ ['Full Index Scans Per Txn', 0],
+ ['Total Index Scans Per Sec', Decimal('0.216666666666667')],
+ ['Total Index Scans Per Txn', 13],
+ ['Total Parse Count Per Sec', 0.35],
+ ['Total Parse Count Per Txn', 21],
+ ['Hard Parse Count Per Sec', 0],
+ ['Hard Parse Count Per Txn', 0],
+ ['Parse Failure Count Per Sec', 0],
+ ['Parse Failure Count Per Txn', 0],
+ ['Cursor Cache Hit Ratio', Decimal('52.3809523809524')],
+ ['Disk Sort Per Sec', 0],
+ ['Disk Sort Per Txn', 0],
+ ['Rows Per Sort', 8.6],
+ ['Execute Without Parse Ratio', Decimal('27.5862068965517')],
+ ['Soft Parse Ratio', 100],
+ ['User Calls Ratio', Decimal('0.235017626321974')],
+ ['Host CPU Utilization (%)', Decimal('0.124311845142959')],
+ ['Network Traffic Volume Per Sec', 0],
+ ['Enqueue Timeouts Per Sec', 0],
+ ['Enqueue Timeouts Per Txn', 0],
+ ['Enqueue Waits Per Sec', 0],
+ ['Enqueue Waits Per Txn', 0],
+ ['Enqueue Deadlocks Per Sec', 0],
+ ['Enqueue Deadlocks Per Txn', 0],
+ ['Enqueue Requests Per Sec', Decimal('216.683333333333')],
+ ['Enqueue Requests Per Txn', 13001],
+ ['DB Block Gets Per Sec', 0],
+ ['DB Block Gets Per Txn', 0],
+ ['Consistent Read Gets Per Sec', Decimal('0.683333333333333')],
+ ['Consistent Read Gets Per Txn', 41],
+ ['DB Block Changes Per Sec', 0],
+ ['DB Block Changes Per Txn', 0],
+ ['Consistent Read Changes Per Sec', 0],
+ ['Consistent Read Changes Per Txn', 0],
+ ['CPU Usage Per Sec', 0],
+ ['CPU Usage Per Txn', 0],
+ ['CR Blocks Created Per Sec', 0],
+ ['CR Blocks Created Per Txn', 0],
+ ['CR Undo Records Applied Per Sec', 0],
+ ['CR Undo Records Applied Per Txn', 0],
+ ['User Rollback UndoRec Applied Per Sec', 0],
+ ['User Rollback Undo Records Applied Per Txn', 0],
+ ['Leaf Node Splits Per Sec', 0],
+ ['Leaf Node Splits Per Txn', 0],
+ ['Branch Node Splits Per Sec', 0],
+ ['Branch Node Splits Per Txn', 0],
+ ['PX downgraded 1 to 25% Per Sec', 0],
+ ['PX downgraded 25 to 50% Per Sec', 0],
+ ['PX downgraded 50 to 75% Per Sec', 0],
+ ['PX downgraded 75 to 99% Per Sec', 0],
+ ['PX downgraded to serial Per Sec', 0],
+ ['Physical Read Total IO Requests Per Sec', Decimal('2.16666666666667')],
+ ['Physical Read Total Bytes Per Sec', Decimal('35498.6666666667')],
+ ['GC CR Block Received Per Second', 0],
+ ['GC CR Block Received Per Txn', 0],
+ ['GC Current Block Received Per Second', 0],
+ ['GC Current Block Received Per Txn', 0],
+ ['Global Cache Average CR Get Time', 0],
+ ['Global Cache Average Current Get Time', 0],
+ ['Physical Write Total IO Requests Per Sec', Decimal('0.966666666666667')],
+ ['Global Cache Blocks Corrupted', 0],
+ ['Global Cache Blocks Lost', 0],
+ ['Current Logons Count', 49],
+ ['Current Open Cursors Count', 64],
+ ['User Limit %', Decimal('0.00000114087015416959')],
+ ['SQL Service Response Time', 0],
+ ['Database Wait Time Ratio', 0],
+ ['Database CPU Time Ratio', 0],
+ ['Response Time Per Txn', 0],
+ ['Row Cache Hit Ratio', 100],
+ ['Row Cache Miss Ratio', 0],
+ ['Library Cache Hit Ratio', 100],
+ ['Library Cache Miss Ratio', 0],
+ ['Shared Pool Free %', Decimal('7.82380268491548')],
+ ['PGA Cache Hit %', Decimal('98.0399767109115')],
+ ['Process Limit %', Decimal('17.6666666666667')],
+ ['Session Limit %', Decimal('15.2542372881356')],
+ ['Executions Per Txn', 29],
+ ['Executions Per Sec', Decimal('0.483333333333333')],
+ ['Txns Per Logon', 0],
+ ['Database Time Per Sec', 0],
+ ['Physical Write Total Bytes Per Sec', 15308.8],
+ ['Physical Read IO Requests Per Sec', 0],
+ ['Physical Read Bytes Per Sec', 0],
+ ['Physical Write IO Requests Per Sec', 0],
+ ['Physical Write Bytes Per Sec', 0],
+ ['DB Block Changes Per User Call', 0],
+ ['DB Block Gets Per User Call', 0],
+ ['Executions Per User Call', 14.5],
+ ['Logical Reads Per User Call', 20.5],
+ ['Total Sorts Per User Call', 2.5],
+ ['Total Table Scans Per User Call', 0.5],
+ ['Current OS Load', 0.0390625],
+ ['Streams Pool Usage Percentage', 0],
+ ['PQ QC Session Count', 0],
+ ['PQ Slave Session Count', 0],
+ ['Queries parallelized Per Sec', 0],
+ ['DML statements parallelized Per Sec', 0],
+ ['DDL statements parallelized Per Sec', 0],
+ ['PX operations not downgraded Per Sec', 0],
+ ['Session Count', 72],
+ ['Average Synchronous Single-Block Read Latency', 0],
+ ['I/O Megabytes per Second', 0.05],
+ ['I/O Requests per Second', Decimal('3.13333333333333')],
+ ['Average Active Sessions', 0],
+ ['Active Serial Sessions', 1],
+ ['Active Parallel Sessions', 0],
+ ['Captured user calls', 0],
+ ['Replayed user calls', 0],
+ ['Workload Capture and Replay status', 0],
+ ['Background CPU Usage Per Sec', Decimal('1.22578833333333')],
+ ['Background Time Per Sec', 0.0147551],
+ ['Host CPU Usage Per Sec', Decimal('0.116666666666667')],
+ ['Cell Physical IO Interconnect Bytes', 3048448],
+ ['Temp Space Used', 0],
+ ['Total PGA Allocated', 200657920],
+ ['Total PGA Used by SQL Workareas', 0],
+ ['Run Queue Per Sec', 0],
+ ['VM in bytes Per Sec', 0],
+ ['VM out bytes Per Sec', 0]]
+ """
+
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_SYSTEM)
+ for metric_name, value in cursor.fetchall():
+ metrics.append([metric_name, value])
+ return metrics
+
+ def gather_tablespace_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_TABLESPACE)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
+ def gather_allocated_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ALLOCATED)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
+ def gather_wait_time_metrics(self):
+ """
+ :return:
+
+ [['Other', 0],
+ ['Application', 0],
+ ['Configuration', 0],
+ ['Administrative', 0],
+ ['Concurrency', 0],
+ ['Commit', 0],
+ ['Network', 0],
+ ['User I/O', 0],
+ ['System I/O', 0.002],
+ ['Scheduler', 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_WAIT_TIME)
+ for wait_class_name, value in cursor.fetchall():
+ metrics.append([wait_class_name, value])
+ return metrics
+
+ def gather_activities_count(self):
+ """
+ :return:
+
+ [('user commits', 9104),
+ ('user rollbacks', 17),
+ ('parse count (total)', 483695),
+ ('execute count', 2020356)]
+ """
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ACTIVITIES_COUNT)
+ return cursor.fetchall()
+
+ # def gather_process_metrics(self):
+ # """
+ # :return:
+ #
+ # [['PSEUDO', 'pga_used_memory', 0],
+ # ['PSEUDO', 'pga_allocated_memory', 0],
+ # ['PSEUDO', 'pga_freeable_memory', 0],
+ # ['PSEUDO', 'pga_maximum_memory', 0],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_used_memory', 1793827],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_allocated_memory', 1888651],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_freeable_memory', 0],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_maximum_memory', 1888651],
+ # ...
+ # ...
+ # """
+ #
+ # metrics = list()
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_PROCESS)
+ # for row in cursor.fetchall():
+ # for i, name in enumerate(PROCESS_METRICS, 1):
+ # metrics.append([row[0], name, row[i]])
+ # return metrics
+
+ # def gather_processes_count(self):
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_PROCESSES_COUNT)
+ # return cursor.fetchone()[0] # 53
+
+ # def gather_sessions_count(self):
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_SESSION_COUNT)
+ # total, active, inactive = 0, 0, 0
+ # for status, _ in cursor.fetchall():
+ # total += 1
+ # active += status == 'ACTIVE'
+ # inactive += status == 'INACTIVE'
+ # return [total, active, inactive]
+
+ def add_tablespace_to_charts(self, name):
+ self.charts['tablespace_size'].add_dimension(
+ [
+ '{0}_tablespace_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1024 * 1000,
+ ])
+ self.charts['tablespace_usage'].add_dimension(
+ [
+ '{0}_tablespace_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1024 * 1000,
+ ])
+ self.charts['tablespace_usage_in_percent'].add_dimension(
+ [
+ '{0}_tablespace_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_size'].add_dimension(
+ [
+ '{0}_allocated_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage'].add_dimension(
+ [
+ '{0}_allocated_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage_in_percent'].add_dimension(
+ [
+ '{0}_allocated_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
diff --git a/collectors/python.d.plugin/oracledb/oracledb.conf b/collectors/python.d.plugin/oracledb/oracledb.conf
new file mode 100644
index 0000000..6257172
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/oracledb.conf
@@ -0,0 +1,84 @@
+# netdata python.d.plugin configuration for oracledb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, oracledb also supports the following:
+#
+# user: username # the username for the user account. Required.
+# password: password # the password for the user account. Required.
+# server: localhost:1521 # the IP address or hostname of the Oracle Database Server. Required.
+# service: XE # the Oracle Database service name. Required. To view the services available on your server,
+# run this query: `SELECT value FROM v$parameter WHERE name='service_names'`.
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#local:
+# user: 'netdata'
+# password: 'secret'
+# server: 'localhost:1521'
+# service: 'XE'
+
+#remote:
+# user: 'netdata'
+# password: 'secret'
+# server: '10.0.0.1:1521'
+# service: 'XE' \ No newline at end of file
diff --git a/collectors/python.d.plugin/ovpn_status_log/Makefile.inc b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
new file mode 100644
index 0000000..1fbc506
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ovpn_status_log/ovpn_status_log.chart.py
+dist_pythonconfig_DATA += ovpn_status_log/ovpn_status_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ovpn_status_log/README.md ovpn_status_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
new file mode 100644
index 0000000..8fa8cb8
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -0,0 +1,50 @@
+<!--
+title: "OpenVPN monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ovpn_status_log/README.md
+sidebar_label: "OpenVPN"
+-->
+
+# OpenVPN monitoring with Netdata
+
+Parses server log files and provides summary (client, traffic) metrics.
+
+## Requirements
+
+- If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+- Make sure NETDATA USER CAN READ openvpn-status.log
+
+- Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+
+ - users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+
+ - in
+ - out
+
+## Configuration
+
+Edit the `python.d/ovpn_status_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ovpn_status_log.conf
+```
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
new file mode 100644
index 0000000..cfc87be
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# Description: openvpn status log netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+update_every = 10
+
+ORDER = [
+ 'users',
+ 'traffic',
+]
+
+CHARTS = {
+ 'users': {
+ 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
+ 'lines': [
+ ['users', None, 'absolute'],
+ ]
+ },
+ 'traffic': {
+ 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
+ 'lines': [
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10],
+ ['bytes_out', 'out', 'incremental', -1, 1 << 10]
+ ]
+ }
+}
+
+TLS_REGEX = re.compile(
+ r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
+)
+STATIC_KEY_REGEX = re.compile(
+ r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
+)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.log_path = self.configuration.get('log_path')
+ self.regex = {
+ 'tls': TLS_REGEX,
+ 'static_key': STATIC_KEY_REGEX
+ }
+
+ def check(self):
+ if not (self.log_path and isinstance(self.log_path, str)):
+ self.error("'log_path' is not defined")
+ return False
+
+ data = self._get_raw_data()
+ if not data:
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return None
+
+ found = None
+ for row in data:
+ if 'ROUTING' in row:
+ self.get_data = self.get_data_tls
+ found = True
+ break
+ elif 'STATISTICS' in row:
+ self.get_data = self.get_data_static_key
+ found = True
+ break
+ if found:
+ return True
+ self.error('Failed to parse openvpn log file')
+ return False
+
+ def _get_raw_data(self):
+ """
+ Open log file
+ :return: str
+ """
+
+ try:
+ with open(self.log_path) as log:
+ raw_data = log.readlines() or None
+ except OSError:
+ return None
+ else:
+ return raw_data
+
+ def get_data_static_key(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(bytes_in=0, bytes_out=0)
+
+ for row in raw_data:
+ match = self.regex['static_key'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['direction'] == 'read':
+ data['bytes_in'] += int(match['bytes'])
+ else:
+ data['bytes_out'] += int(match['bytes'])
+
+ return data or None
+
+ def get_data_tls(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(users=0, bytes_in=0, bytes_out=0)
+ for row in raw_data:
+ columns = row.split(',') if ',' in row else row.split()
+ if 'UNDEF' in columns:
+ # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html
+ continue
+
+ match = self.regex['tls'].search(' '.join(columns))
+ if match:
+ match = match.groupdict()
+ data['users'] += 1
+ data['bytes_in'] += int(match['bytes_in'])
+ data['bytes_out'] += int(match['bytes_out'])
+
+ return data or None
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
new file mode 100644
index 0000000..1d71f6b
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -0,0 +1,97 @@
+# netdata python.d.plugin configuration for openvpn status log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, openvpn status log also supports the following:
+#
+# log_path: 'PATH' # the path to openvpn status log file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# IMPORTANT information
+#
+# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+# so that multiple instances do not overwrite each other's output files.
+# 2. Make sure NETDATA USER CAN READ openvpn-status.log
+#
+# * cd into directory with openvpn-status.log and run the following commands as root
+# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log
+# * To check permission and group membership run
+# * #ls -l openvpn-status.log
+# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log
+#
+# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+# If its not true traffic chart WILL DISPLAY WRONG values
+#
+# Default OpenVPN update interval is 10 second on Debian 8
+# # ps -C openvpn -o command=
+# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf
+#
+# 4. Confirm status is configured in your OpenVPN configuration.
+# * Open OpenVPN config in an editor (e.g. sudo nano /etc/openvpn/default.conf)
+# * Confirm status is enabled with below:
+# status /var/log/openvpn-status.log
+#
+#default:
+# log_path: '/var/log/openvpn-status.log'
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 0000000..ff312fe
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += phpfpm/phpfpm.chart.py
+dist_pythonconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
new file mode 100644
index 0000000..9d0dbb5
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "PHP-FPM monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/phpfpm/README.md
+sidebar_label: "PHP-FPM"
+-->
+
+# PHP-FPM monitoring with Netdata
+
+Monitors one or more PHP-FPM instances depending on configuration.
+
+## Requirements
+
+- `PHP-FPM` with [enabled `status` page](https://easyengine.io/tutorials/php/fpm-status-page/)
+- access to `status` page via web server
+
+## Charts
+
+It produces following charts:
+
+- Active Connections in `connections`
+- Requests in `requests/s`
+- Performance in `status`
+- Requests Duration Among All Idle Processes in `milliseconds`
+- Last Request CPU Usage Among All Idle Processes in `percentage`
+- Last Request Memory Usage Among All Idle Processes in `KB`
+
+## Configuration
+
+Edit the `python.d/phpfpm.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/phpfpm.conf
+```
+
+Needs only `url` to server's `status`. Here is an example for local and remote instances:
+
+```yaml
+local:
+ url : 'http://localhost/status?full&json'
+
+remote:
+ url : 'http://203.0.113.10/status?full&json'
+```
+
+Without configuration, module attempts to connect to `http://localhost/status`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
new file mode 100644
index 0000000..226df99
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+# Description: PHP-FPM netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import re
+
+from bases.FrameworkServices.UrlService import UrlService
+
+REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+
+POOL_INFO = [
+ ('active processes', 'active'),
+ ('max active processes', 'maxActive'),
+ ('idle processes', 'idle'),
+ ('accepted conn', 'requests'),
+ ('max children reached', 'reached'),
+ ('slow requests', 'slow')
+]
+
+PER_PROCESS_INFO = [
+ ('request duration', 'ReqDur'),
+ ('last request cpu', 'ReqCpu'),
+ ('last request memory', 'ReqMem')
+]
+
+
+def average(collection):
+ return sum(collection, 0.0) / max(len(collection), 1)
+
+
+CALC = [
+ ('min', min),
+ ('max', max),
+ ('avg', average)
+]
+
+ORDER = [
+ 'connections',
+ 'requests',
+ 'performance',
+ 'request_duration',
+ 'request_cpu',
+ 'request_mem',
+]
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
+ 'line'],
+ 'lines': [
+ ['active'],
+ ['maxActive', 'max active'],
+ ['idle']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'performance': {
+ 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
+ 'lines': [
+ ['reached', 'max children reached'],
+ ['slow', 'slow requests']
+ ]
+ },
+ 'request_duration': {
+ 'options': [None, 'PHP-FPM Requests Duration Among All Idle Processes', 'milliseconds', 'request duration',
+ 'phpfpm.request_duration',
+ 'line'],
+ 'lines': [
+ ['minReqDur', 'min', 'absolute', 1, 1000],
+ ['maxReqDur', 'max', 'absolute', 1, 1000],
+ ['avgReqDur', 'avg', 'absolute', 1, 1000]
+ ]
+ },
+ 'request_cpu': {
+ 'options': [None, 'PHP-FPM Last Request CPU Usage Among All Idle Processes', 'percentage', 'request CPU',
+ 'phpfpm.request_cpu', 'line'],
+ 'lines': [
+ ['minReqCpu', 'min'],
+ ['maxReqCpu', 'max'],
+ ['avgReqCpu', 'avg']
+ ]
+ },
+ 'request_mem': {
+ 'options': [None, 'PHP-FPM Last Request Memory Usage Among All Idle Processes', 'KB', 'request memory',
+ 'phpfpm.request_mem', 'line'],
+ 'lines': [
+ ['minReqMem', 'min', 'absolute', 1, 1024],
+ ['maxReqMem', 'max', 'absolute', 1, 1024],
+ ['avgReqMem', 'avg', 'absolute', 1, 1024]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
+ self.json = '&json' in self.url or '?json' in self.url
+ self.json_full = self.url.endswith(('?full&json', '?json&full'))
+ self.if_all_processes_running = dict(
+ [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
+ )
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
+
+ # Per Pool info: active connections, requests and performance charts
+ to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
+
+ # Per Process Info: duration, cpu and memory charts (min, max, avg)
+ if self.json_full:
+ p_info = dict()
+ to_netdata.update(self.if_all_processes_running) # If all processes are in running state
+ # Metrics are always 0 if the process is not in Idle state because calculation is done
+ # when the request processing has terminated
+ for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
+ p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
+
+ if p_info:
+ for new_name in PER_PROCESS_INFO:
+ for name, func in CALC:
+ to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k])
+
+ return to_netdata or None
+
+
+def fetch_data_(raw_data, metrics_list, pid=''):
+ """
+ :param raw_data: dict
+ :param metrics_list: list
+ :param pid: str
+ :return: dict
+ """
+ result = dict()
+ for metric, new_name in metrics_list:
+ if metric in raw_data:
+ result[new_name + pid] = float(raw_data[metric])
+ return result
+
+
+def parse_raw_data_(is_json, raw_data):
+ """
+ :param is_json: bool
+ :param regex: compiled regular expr
+ :param raw_data: dict
+ :return: dict
+ """
+ if is_json:
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ return dict()
+ else:
+ raw_data = ' '.join(raw_data.split())
+ return dict(REGEX.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
new file mode 100644
index 0000000..d318539
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -0,0 +1,88 @@
+# netdata python.d.plugin configuration for PHP-FPM
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, PHP-FPM also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+# # Be sure and include ?full&status at the end of the url
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : "http://localhost/status?full&json"
+
+localipv4:
+ name : 'local'
+ url : "http://127.0.0.1/status?full&json"
+
+localipv6:
+ name : 'local'
+ url : "http://[::1]/status?full&json"
+
diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc
new file mode 100644
index 0000000..76763f0
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += portcheck/portcheck.chart.py
+dist_pythonconfig_DATA += portcheck/portcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
new file mode 100644
index 0000000..35521b2
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -0,0 +1,52 @@
+<!--
+title: "TCP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/portcheck/README.md
+sidebar_label: "TCP endpoints"
+-->
+
+# TCP endpoint monitoring with Netdata
+
+Monitors TCP endpoint availability and response time.
+
+Following charts are drawn per host:
+
+1. **Latency** ms
+
+ - Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+
+2. **Status** boolean
+
+ - Connection successful
+ - Could not create socket: possible DNS problems
+ - Connection refused: port not listening or blocked
+ - Connection timed out: host or port unreachable
+
+## Configuration
+
+Edit the `python.d/portcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/portcheck.conf
+```
+
+```yaml
+server:
+ host: 'dns or ip' # required
+ port: 22 # required
+ timeout: 1 # optional
+ update_every: 1 # optional
+```
+
+### notes
+
+- The error chart is intended for alarms, badges or for access via API.
+- A system/service/firewall might block Netdata's access if a portscan or
+ similar is detected.
+- Currently, the accuracy of the latency is low and should be used as reference only.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
new file mode 100644
index 0000000..818ac76
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+# Description: simple port check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+PORT_LATENCY = 'connect'
+
+PORT_SUCCESS = 'success'
+PORT_TIMEOUT = 'timeout'
+PORT_FAILED = 'no_connection'
+
+ORDER = ['latency', 'status']
+
+CHARTS = {
+ 'latency': {
+ 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
+ 'lines': [
+ [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
+ ]
+ },
+ 'status': {
+ 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'],
+ 'lines': [
+ [PORT_SUCCESS, 'success', 'absolute'],
+ [PORT_TIMEOUT, 'timeout', 'absolute'],
+ [PORT_FAILED, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+# Not deriving from SocketService, too much is different
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port')
+ self.timeout = self.configuration.get('timeout', 1)
+
+ def check(self):
+ """
+ Parse configuration, check if configuration is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ if self.host is None or self.port is None:
+ self.error('Host or port missing')
+ return False
+ if not isinstance(self.port, int):
+ self.error('"port" is not an integer. Specify a numerical value, not service name.')
+ return False
+
+ self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
+ host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
+ ))
+ # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
+ # the beginning)
+ return True
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = dict()
+ data[PORT_SUCCESS] = 0
+ data[PORT_TIMEOUT] = 0
+ data[PORT_FAILED] = 0
+
+ success = False
+ try:
+ for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ # use first working socket
+ sock = self._create_socket(socket_config)
+ if sock is not None:
+ self._connect2socket(data, socket_config, sock)
+ self._disconnect(sock)
+ success = True
+ break
+ except socket.gaierror as error:
+ self.debug('Failed to connect to "{host}:{port}", error: {error}'.format(
+ host=self.host, port=self.port, error=error
+ ))
+
+ # We could not connect
+ if not success:
+ data[PORT_FAILED] = 1
+
+ return data
+
+ def _create_socket(self, socket_config):
+ af, sock_type, proto, _, sa = socket_config
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ sock = socket.socket(af, sock_type, proto)
+ sock.settimeout(self.timeout)
+ return sock
+ except socket.error as error:
+ self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=sa[1], error=error
+ ))
+ return None
+
+ def _connect2socket(self, data, socket_config, sock):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: dict
+ """
+
+ _, _, _, _, sa = socket_config
+ port = str(sa[1])
+ try:
+ self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
+ start = time()
+ sock.connect(sa)
+ diff = time() - start
+ self.debug('Connected to "{address}", port {port}, latency {latency}'.format(
+ address=sa[0], port=port, latency=diff
+ ))
+ # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs)
+ data[PORT_LATENCY] = max(round(diff * 10000), 0)
+ data[PORT_SUCCESS] = 1
+
+ except socket.timeout as error:
+ self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_TIMEOUT] = 1
+
+ except socket.error as error:
+ self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_FAILED] = 1
+
+ def _disconnect(self, sock):
+ """
+ Close socket connection
+ :return:
+ """
+ if sock is not None:
+ try:
+ self.debug('Closing socket')
+ sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ sock.close()
+ except socket.error:
+ pass
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
new file mode 100644
index 0000000..df67824
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for portcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# timeout: 1 # [optional] the socket timeout when connecting
+# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
+# port: 22 # [required] the port number to check. Specify an integer, not service name.
+
+# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases.
+# Currently, the accuracy of the latency is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
new file mode 100644
index 0000000..f4091b2
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postfix/postfix.chart.py
+dist_pythonconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 100644
index 0000000..53073ea
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1,27 @@
+<!--
+title: "Postfix monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md
+sidebar_label: "Postfix"
+-->
+
+# Postfix monitoring with Netdata
+
+Monitors MTA email queue statistics using postqueue tool.
+
+Execute `postqueue -p` to grab postfix queue.
+
+It produces only two charts:
+
+1. **Postfix Queue Emails**
+
+ - emails
+
+2. **Postfix Queue Emails Size** in KB
+
+ - size
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
new file mode 100644
index 0000000..b650514
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Description: postfix netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+POSTQUEUE_COMMAND = 'postqueue -p'
+
+ORDER = [
+ 'qemails',
+ 'qsize',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ },
+ 'qsize': {
+ 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
+ 'lines': [
+ ['size', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = POSTQUEUE_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()[-1].split(' ')
+ if raw[0] == 'Mail' and raw[1] == 'queue':
+ return {'emails': 0,
+ 'size': 0}
+
+ return {'emails': raw[4],
+ 'size': raw[1]}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
new file mode 100644
index 0000000..a4d2472
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
diff --git a/collectors/python.d.plugin/postgres/Makefile.inc b/collectors/python.d.plugin/postgres/Makefile.inc
new file mode 100644
index 0000000..91a185c
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postgres/postgres.chart.py
+dist_pythonconfig_DATA += postgres/postgres.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postgres/README.md postgres/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
new file mode 100644
index 0000000..dc9b184
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -0,0 +1,99 @@
+<!--
+title: "PostgreSQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postgres/README.md
+sidebar_label: "PostgreSQL"
+-->
+
+# PostgreSQL monitoring with Netdata
+
+Collects database health and performance metrics.
+
+## Requirements
+
+- `python-psycopg2` package. You have to install it manually and make sure that it is available to the `netdata` user, either using `pip`, the package manager of your Linux distribution, or any other method you prefer.
+
+Following charts are drawn:
+
+1. **Database size** MB
+
+ - size
+
+2. **Current Backend Processes** processes
+
+ - active
+
+3. **Current Backend Process Usage** percentage
+
+ - used
+ - available
+
+4. **Write-Ahead Logging Statistics** files/s
+
+ - total
+ - ready
+ - done
+
+5. **Checkpoints** writes/s
+
+ - scheduled
+ - requested
+
+6. **Current connections to db** count
+
+ - connections
+
+7. **Tuples returned from db** tuples/s
+
+ - sequential
+ - bitmap
+
+8. **Tuple reads from db** reads/s
+
+ - disk
+ - cache
+
+9. **Transactions on db** transactions/s
+
+ - committed
+ - rolled back
+
+10. **Tuples written to db** writes/s
+
+ - inserted
+ - updated
+ - deleted
+ - conflicts
+
+11. **Locks on db** count per type
+
+ - locks
+
+## Configuration
+
+Edit the `python.d/postgres.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/postgres.conf
+```
+
+When no configuration file is found, the module tries to connect to TCP/IP socket: `localhost:5432`.
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
new file mode 100644
index 0000000..bd28dd9
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -0,0 +1,1235 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Authors: facetoe, dangtranhoang
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+
+try:
+ import psycopg2
+ from psycopg2 import extensions
+ from psycopg2.extras import DictCursor
+ from psycopg2 import OperationalError
+
+ PSYCOPG2 = True
+except ImportError:
+ PSYCOPG2 = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+DEFAULT_PORT = 5432
+DEFAULT_USER = 'postgres'
+DEFAULT_CONNECT_TIMEOUT = 2 # seconds
+DEFAULT_STATEMENT_TIMEOUT = 5000 # ms
+
+CONN_PARAM_DSN = 'dsn'
+CONN_PARAM_HOST = 'host'
+CONN_PARAM_PORT = 'port'
+CONN_PARAM_DATABASE = 'database'
+CONN_PARAM_USER = 'user'
+CONN_PARAM_PASSWORD = 'password'
+CONN_PARAM_CONN_TIMEOUT = 'connect_timeout'
+CONN_PARAM_STATEMENT_TIMEOUT = 'statement_timeout'
+CONN_PARAM_SSL_MODE = 'sslmode'
+CONN_PARAM_SSL_ROOT_CERT = 'sslrootcert'
+CONN_PARAM_SSL_CRL = 'sslcrl'
+CONN_PARAM_SSL_CERT = 'sslcert'
+CONN_PARAM_SSL_KEY = 'sslkey'
+
+QUERY_NAME_WAL = 'WAL'
+QUERY_NAME_ARCHIVE = 'ARCHIVE'
+QUERY_NAME_BACKENDS = 'BACKENDS'
+QUERY_NAME_BACKEND_USAGE = 'BACKEND_USAGE'
+QUERY_NAME_TABLE_STATS = 'TABLE_STATS'
+QUERY_NAME_INDEX_STATS = 'INDEX_STATS'
+QUERY_NAME_DATABASE = 'DATABASE'
+QUERY_NAME_BGWRITER = 'BGWRITER'
+QUERY_NAME_LOCKS = 'LOCKS'
+QUERY_NAME_DATABASES = 'DATABASES'
+QUERY_NAME_STANDBY = 'STANDBY'
+QUERY_NAME_REPLICATION_SLOT = 'REPLICATION_SLOT'
+QUERY_NAME_STANDBY_DELTA = 'STANDBY_DELTA'
+QUERY_NAME_REPSLOT_FILES = 'REPSLOT_FILES'
+QUERY_NAME_IF_SUPERUSER = 'IF_SUPERUSER'
+QUERY_NAME_SERVER_VERSION = 'SERVER_VERSION'
+QUERY_NAME_AUTOVACUUM = 'AUTOVACUUM'
+QUERY_NAME_DIFF_LSN = 'DIFF_LSN'
+QUERY_NAME_WAL_WRITES = 'WAL_WRITES'
+
+METRICS = {
+ QUERY_NAME_DATABASE: [
+ 'connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'temp_files',
+ 'temp_bytes',
+ 'size'
+ ],
+ QUERY_NAME_BACKENDS: [
+ 'backends_active',
+ 'backends_idle'
+ ],
+ QUERY_NAME_BACKEND_USAGE: [
+ 'available',
+ 'used'
+ ],
+ QUERY_NAME_INDEX_STATS: [
+ 'index_count',
+ 'index_size'
+ ],
+ QUERY_NAME_TABLE_STATS: [
+ 'table_size',
+ 'table_count'
+ ],
+ QUERY_NAME_WAL: [
+ 'written_wal',
+ 'recycled_wal',
+ 'total_wal'
+ ],
+ QUERY_NAME_WAL_WRITES: [
+ 'wal_writes'
+ ],
+ QUERY_NAME_ARCHIVE: [
+ 'ready_count',
+ 'done_count',
+ 'file_count'
+ ],
+ QUERY_NAME_BGWRITER: [
+ 'checkpoint_scheduled',
+ 'checkpoint_requested',
+ 'buffers_checkpoint',
+ 'buffers_clean',
+ 'maxwritten_clean',
+ 'buffers_backend',
+ 'buffers_alloc',
+ 'buffers_backend_fsync'
+ ],
+ QUERY_NAME_LOCKS: [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+ ],
+ QUERY_NAME_AUTOVACUUM: [
+ 'analyze',
+ 'vacuum_analyze',
+ 'vacuum',
+ 'vacuum_freeze',
+ 'brin_summarize'
+ ],
+ QUERY_NAME_STANDBY_DELTA: [
+ 'sent_delta',
+ 'write_delta',
+ 'flush_delta',
+ 'replay_delta'
+ ],
+ QUERY_NAME_REPSLOT_FILES: [
+ 'replslot_wal_keep',
+ 'replslot_files'
+ ]
+}
+
+NO_VERSION = 0
+DEFAULT = 'DEFAULT'
+V72 = 'V72'
+V82 = 'V82'
+V91 = 'V91'
+V92 = 'V92'
+V96 = 'V96'
+V10 = 'V10'
+V11 = 'V11'
+
+QUERY_WAL = {
+ DEFAULT: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_wal/'||name)).modification,
+ wal.name DESC) sub;
+""",
+ V96: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END ),
+ CASE
+ WHEN wal.name > pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_xlog/'||name)).modification,
+ wal.name DESC) sub;
+""",
+}
+
+QUERY_ARCHIVE = {
+ DEFAULT: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+""",
+ V96: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+
+""",
+}
+
+QUERY_BACKEND = {
+ DEFAULT: """
+SELECT
+ count(*) - (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_active,
+ (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_idle
+FROM pg_stat_activity;
+""",
+}
+
+QUERY_BACKEND_USAGE = {
+ DEFAULT: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE backend_type IN ('client backend', 'background worker');
+""",
+ V10: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE backend_type IN ('client backend', 'background worker')
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V92: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V91: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE current_query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V82: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE current_query NOT LIKE 'autovacuum: %%';
+""",
+ V72: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity s
+JOIN pg_catalog.pg_database d ON d.oid = s.datid
+WHERE d.datallowconn;
+""",
+}
+
+QUERY_TABLE_STATS = {
+ DEFAULT: """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
+FROM pg_class
+WHERE relkind IN ('r', 't');
+""",
+}
+
+QUERY_INDEX_STATS = {
+ DEFAULT: """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
+FROM pg_class
+WHERE relkind = 'i';
+""",
+}
+
+QUERY_DATABASE = {
+ DEFAULT: """
+SELECT
+ datname AS database_name,
+ numbackends AS connections,
+ xact_commit AS xact_commit,
+ xact_rollback AS xact_rollback,
+ blks_read AS blks_read,
+ blks_hit AS blks_hit,
+ tup_returned AS tup_returned,
+ tup_fetched AS tup_fetched,
+ tup_inserted AS tup_inserted,
+ tup_updated AS tup_updated,
+ tup_deleted AS tup_deleted,
+ conflicts AS conflicts,
+ pg_database_size(datname) AS size,
+ temp_files AS temp_files,
+ temp_bytes AS temp_bytes
+FROM pg_stat_database
+WHERE datname IN %(databases)s ;
+""",
+}
+
+QUERY_BGWRITER = {
+ DEFAULT: """
+SELECT
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoints_req AS checkpoint_requested,
+ buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
+ buffers_clean * current_setting('block_size')::numeric buffers_clean,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric buffers_backend,
+ buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
+ buffers_backend_fsync
+FROM pg_stat_bgwriter;
+""",
+}
+
+QUERY_LOCKS = {
+ DEFAULT: """
+SELECT
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS locks_count
+FROM pg_locks
+INNER JOIN pg_database
+ ON pg_database.oid = pg_locks.database
+GROUP BY datname, mode
+ORDER BY datname, mode;
+""",
+}
+
+QUERY_DATABASES = {
+ DEFAULT: """
+SELECT
+ datname
+FROM pg_stat_database
+WHERE
+ has_database_privilege(
+ (SELECT current_user), datname, 'connect')
+ AND NOT datname ~* '^template\d';
+""",
+}
+
+QUERY_STANDBY = {
+ DEFAULT: """
+SELECT
+ application_name
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL
+GROUP BY application_name;
+""",
+}
+
+QUERY_REPLICATION_SLOT = {
+ DEFAULT: """
+SELECT slot_name
+FROM pg_replication_slots;
+"""
+}
+
+QUERY_STANDBY_DELTA = {
+ DEFAULT: """
+SELECT
+ application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+""",
+ V96: """
+SELECT
+ application_name,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ write_location) AS write_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ flush_location) AS flush_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ replay_location) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+""",
+}
+
+QUERY_REPSLOT_FILES = {
+ DEFAULT: """
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+ V10: """
+WITH wal_size AS (
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+}
+
+QUERY_SUPERUSER = {
+ DEFAULT: """
+SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+""",
+}
+
+QUERY_SHOW_VERSION = {
+ DEFAULT: """
+SHOW server_version_num;
+""",
+}
+
+QUERY_AUTOVACUUM = {
+ DEFAULT: """
+SELECT
+ count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
+ AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
+ count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
+FROM pg_stat_activity
+WHERE query NOT LIKE '%%pg_stat_activity%%';
+""",
+}
+
+QUERY_DIFF_LSN = {
+ DEFAULT: """
+SELECT
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ '0/0') as wal_writes ;
+""",
+ V96: """
+SELECT
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ '0/0') as wal_writes ;
+""",
+}
+
+def query_factory(name, version=NO_VERSION):
+ if name == QUERY_NAME_BACKENDS:
+ return QUERY_BACKEND[DEFAULT]
+ elif name == QUERY_NAME_BACKEND_USAGE:
+ if version < 80200:
+ return QUERY_BACKEND_USAGE[V72]
+ if version < 90100:
+ return QUERY_BACKEND_USAGE[V82]
+ if version < 90200:
+ return QUERY_BACKEND_USAGE[V91]
+ if version < 100000:
+ return QUERY_BACKEND_USAGE[V92]
+ elif version < 120000:
+ return QUERY_BACKEND_USAGE[V10]
+ return QUERY_BACKEND_USAGE[DEFAULT]
+ elif name == QUERY_NAME_TABLE_STATS:
+ return QUERY_TABLE_STATS[DEFAULT]
+ elif name == QUERY_NAME_INDEX_STATS:
+ return QUERY_INDEX_STATS[DEFAULT]
+ elif name == QUERY_NAME_DATABASE:
+ return QUERY_DATABASE[DEFAULT]
+ elif name == QUERY_NAME_BGWRITER:
+ return QUERY_BGWRITER[DEFAULT]
+ elif name == QUERY_NAME_LOCKS:
+ return QUERY_LOCKS[DEFAULT]
+ elif name == QUERY_NAME_DATABASES:
+ return QUERY_DATABASES[DEFAULT]
+ elif name == QUERY_NAME_STANDBY:
+ return QUERY_STANDBY[DEFAULT]
+ elif name == QUERY_NAME_REPLICATION_SLOT:
+ return QUERY_REPLICATION_SLOT[DEFAULT]
+ elif name == QUERY_NAME_IF_SUPERUSER:
+ return QUERY_SUPERUSER[DEFAULT]
+ elif name == QUERY_NAME_SERVER_VERSION:
+ return QUERY_SHOW_VERSION[DEFAULT]
+ elif name == QUERY_NAME_AUTOVACUUM:
+ return QUERY_AUTOVACUUM[DEFAULT]
+ elif name == QUERY_NAME_WAL:
+ if version < 100000:
+ return QUERY_WAL[V96]
+ return QUERY_WAL[DEFAULT]
+ elif name == QUERY_NAME_ARCHIVE:
+ if version < 100000:
+ return QUERY_ARCHIVE[V96]
+ return QUERY_ARCHIVE[DEFAULT]
+ elif name == QUERY_NAME_STANDBY_DELTA:
+ if version < 100000:
+ return QUERY_STANDBY_DELTA[V96]
+ return QUERY_STANDBY_DELTA[DEFAULT]
+ elif name == QUERY_NAME_REPSLOT_FILES:
+ if version < 110000:
+ return QUERY_REPSLOT_FILES[V10]
+ return QUERY_REPSLOT_FILES[DEFAULT]
+ elif name == QUERY_NAME_DIFF_LSN:
+ if version < 100000:
+ return QUERY_DIFF_LSN[V96]
+ return QUERY_DIFF_LSN[DEFAULT]
+
+ raise ValueError('unknown query')
+
+
+ORDER = [
+ 'db_stat_temp_files',
+ 'db_stat_temp_bytes',
+ 'db_stat_blks',
+ 'db_stat_tuple_returned',
+ 'db_stat_tuple_write',
+ 'db_stat_transactions',
+ 'db_stat_connections',
+ 'database_size',
+ 'backend_process',
+ 'backend_usage',
+ 'index_count',
+ 'index_size',
+ 'table_count',
+ 'table_size',
+ 'wal',
+ 'wal_writes',
+ 'archive_wal',
+ 'checkpointer',
+ 'stat_bgwriter_alloc',
+ 'stat_bgwriter_checkpoint',
+ 'stat_bgwriter_backend',
+ 'stat_bgwriter_backend_fsync',
+ 'stat_bgwriter_bgwriter',
+ 'stat_bgwriter_maxwritten',
+ 'replication_slot',
+ 'standby_delta',
+ 'autovacuum'
+]
+
+CHARTS = {
+ 'db_stat_transactions': {
+ 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions',
+ 'line'],
+ 'lines': [
+ ['xact_commit', 'committed', 'incremental'],
+ ['xact_rollback', 'rolled back', 'incremental']
+ ]
+ },
+ 'db_stat_connections': {
+ 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
+ 'line'],
+ 'lines': [
+ ['connections', 'connections', 'absolute']
+ ]
+ },
+ 'db_stat_blks': {
+ 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
+ 'lines': [
+ ['blks_read', 'disk', 'incremental'],
+ ['blks_hit', 'cache', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_returned': {
+ 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
+ 'line'],
+ 'lines': [
+ ['tup_returned', 'sequential', 'incremental'],
+ ['tup_fetched', 'bitmap', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_write': {
+ 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
+ 'lines': [
+ ['tup_inserted', 'inserted', 'incremental'],
+ ['tup_updated', 'updated', 'incremental'],
+ ['tup_deleted', 'deleted', 'incremental'],
+ ['conflicts', 'conflicts', 'incremental']
+ ]
+ },
+ 'db_stat_temp_bytes': {
+ 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'line'],
+ 'lines': [
+ ['temp_bytes', 'size', 'incremental', 1, 1024]
+ ]
+ },
+ 'db_stat_temp_files': {
+ 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files',
+ 'line'],
+ 'lines': [
+ ['temp_files', 'files', 'incremental']
+ ]
+ },
+ 'database_size': {
+ 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
+ 'lines': [
+ ]
+ },
+ 'backend_process': {
+ 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
+ 'line'],
+ 'lines': [
+ ['backends_active', 'active', 'absolute'],
+ ['backends_idle', 'idle', 'absolute']
+ ]
+ },
+ 'backend_usage': {
+ 'options': [None, '% of Connections in use', 'percentage', 'backend processes', 'postgres.backend_usage', 'stacked'],
+ 'lines': [
+ ['available', 'available', 'percentage-of-absolute-row'],
+ ['used', 'used', 'percentage-of-absolute-row']
+ ]
+ },
+ 'index_count': {
+ 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
+ 'lines': [
+ ['index_count', 'total', 'absolute']
+ ]
+ },
+ 'index_size': {
+ 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
+ 'lines': [
+ ['index_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'table_count': {
+ 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
+ 'lines': [
+ ['table_count', 'total', 'absolute']
+ ]
+ },
+ 'table_size': {
+ 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
+ 'lines': [
+ ['table_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'wal': {
+ 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
+ 'lines': [
+ ['written_wal', 'written', 'absolute'],
+ ['recycled_wal', 'recycled', 'absolute'],
+ ['total_wal', 'total', 'absolute']
+ ]
+ },
+ 'wal_writes': {
+ 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
+ 'lines': [
+ ['wal_writes', 'writes', 'incremental', 1, 1024]
+ ]
+ },
+ 'archive_wal': {
+ 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
+ 'lines': [
+ ['file_count', 'total', 'incremental'],
+ ['ready_count', 'ready', 'incremental'],
+ ['done_count', 'done', 'incremental']
+ ]
+ },
+ 'checkpointer': {
+ 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
+ 'lines': [
+ ['checkpoint_scheduled', 'scheduled', 'incremental'],
+ ['checkpoint_requested', 'requested', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_alloc': {
+ 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
+ 'lines': [
+ ['buffers_alloc', 'alloc', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_checkpoint': {
+ 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
+ 'postgres.stat_bgwriter_checkpoint', 'line'],
+ 'lines': [
+ ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend': {
+ 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
+ 'postgres.stat_bgwriter_backend', 'line'],
+ 'lines': [
+ ['buffers_backend', 'backend', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend_fsync': {
+ 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
+ 'lines': [
+ ['buffers_backend_fsync', 'backend fsync', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_bgwriter': {
+ 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
+ 'postgres.bgwriter_bgwriter', 'line'],
+ 'lines': [
+ ['buffers_clean', 'clean', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_maxwritten': {
+ 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten',
+ 'line'],
+ 'lines': [
+ ['maxwritten_clean', 'maxwritten', 'incremental']
+ ]
+ },
+ 'autovacuum': {
+ 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
+ 'lines': [
+ ['analyze', 'analyze', 'absolute'],
+ ['vacuum', 'vacuum', 'absolute'],
+ ['vacuum_analyze', 'vacuum analyze', 'absolute'],
+ ['vacuum_freeze', 'vacuum freeze', 'absolute'],
+ ['brin_summarize', 'brin summarize', 'absolute']
+ ]
+ },
+ 'standby_delta': {
+ 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
+ 'lines': [
+ ['sent_delta', 'sent delta', 'absolute', 1, 1024],
+ ['write_delta', 'write delta', 'absolute', 1, 1024],
+ ['flush_delta', 'flush delta', 'absolute', 1, 1024],
+ ['replay_delta', 'replay delta', 'absolute', 1, 1024]
+ ]
+ },
+ 'replication_slot': {
+ 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
+ 'lines': [
+ ['replslot_wal_keep', 'wal keeped', 'absolute'],
+ ['replslot_files', 'pg_replslot files', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.do_table_stats = configuration.pop('table_stats', False)
+ self.do_index_stats = configuration.pop('index_stats', False)
+ self.databases_to_poll = configuration.pop('database_poll', None)
+ self.configuration = configuration
+ self.conn = None
+ self.conn_params = dict()
+ self.server_version = None
+ self.is_superuser = False
+ self.alive = False
+ self.databases = list()
+ self.secondaries = list()
+ self.replication_slots = list()
+ self.queries = dict()
+ self.data = dict()
+
+ def reconnect(self):
+ return self.connect()
+
+ def build_conn_params(self):
+ conf = self.configuration
+
+ # connection URIs: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
+ if conf.get(CONN_PARAM_DSN):
+ return {'dsn': conf[CONN_PARAM_DSN]}
+
+ params = {
+ CONN_PARAM_HOST: conf.get(CONN_PARAM_HOST),
+ CONN_PARAM_PORT: conf.get(CONN_PARAM_PORT, DEFAULT_PORT),
+ CONN_PARAM_DATABASE: conf.get(CONN_PARAM_DATABASE),
+ CONN_PARAM_USER: conf.get(CONN_PARAM_USER, DEFAULT_USER),
+ CONN_PARAM_PASSWORD: conf.get(CONN_PARAM_PASSWORD),
+ CONN_PARAM_CONN_TIMEOUT: conf.get(CONN_PARAM_CONN_TIMEOUT, DEFAULT_CONNECT_TIMEOUT),
+ 'options': '-c statement_timeout={0}'.format(
+ conf.get(CONN_PARAM_STATEMENT_TIMEOUT, DEFAULT_STATEMENT_TIMEOUT)),
+ }
+
+ # https://www.postgresql.org/docs/current/libpq-ssl.html
+ ssl_params = dict(
+ (k, v) for k, v in {
+ CONN_PARAM_SSL_MODE: conf.get(CONN_PARAM_SSL_MODE),
+ CONN_PARAM_SSL_ROOT_CERT: conf.get(CONN_PARAM_SSL_ROOT_CERT),
+ CONN_PARAM_SSL_CRL: conf.get(CONN_PARAM_SSL_CRL),
+ CONN_PARAM_SSL_CERT: conf.get(CONN_PARAM_SSL_CERT),
+ CONN_PARAM_SSL_KEY: conf.get(CONN_PARAM_SSL_KEY),
+ }.items() if v)
+
+ if CONN_PARAM_SSL_MODE not in ssl_params and len(ssl_params) > 0:
+ raise ValueError("mandatory 'sslmode' param is missing, please set")
+
+ params.update(ssl_params)
+
+ return params
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ try:
+ self.conn = psycopg2.connect(**self.conn_params)
+ self.conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.conn.set_session(readonly=True)
+ except OperationalError as error:
+ self.error(error)
+ self.alive = False
+ else:
+ self.alive = True
+
+ return self.alive
+
+ def check(self):
+ if not PSYCOPG2:
+ self.error("'python-psycopg2' package is needed to use postgres module")
+ return False
+
+ try:
+ self.conn_params = self.build_conn_params()
+ except ValueError as error:
+ self.error('error on creating connection params : {0}', error)
+ return False
+
+ if not self.connect():
+ self.error('failed to connect to {0}'.format(hide_password(self.conn_params)))
+ return False
+
+ try:
+ self.check_queries()
+ except Exception as error:
+ self.error(error)
+ return False
+
+ self.populate_queries()
+ self.create_dynamic_charts()
+
+ return True
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ self.data = dict()
+ try:
+ cursor = self.conn.cursor(cursor_factory=DictCursor)
+
+ self.data.update(zero_lock_types(self.databases))
+
+ for query, metrics in self.queries.items():
+ self.query_stats(cursor, query, metrics)
+
+ except OperationalError:
+ self.alive = False
+ return None
+
+ cursor.close()
+
+ return self.data
+
+ def query_stats(self, cursor, query, metrics):
+ cursor.execute(query, dict(databases=tuple(self.databases)))
+
+ for row in cursor:
+ for metric in metrics:
+ # databases
+ if 'database_name' in row:
+ dimension_id = '_'.join([row['database_name'], metric])
+ # secondaries
+ elif 'application_name' in row:
+ dimension_id = '_'.join([row['application_name'], metric])
+ # replication slots
+ elif 'slot_name' in row:
+ dimension_id = '_'.join([row['slot_name'], metric])
+ # other
+ else:
+ dimension_id = metric
+
+ if metric in row:
+ if row[metric] is not None:
+ self.data[dimension_id] = int(row[metric])
+ elif 'locks_count' in row:
+ if metric == row['mode']:
+ self.data[dimension_id] = row['locks_count']
+
+ def check_queries(self):
+ cursor = self.conn.cursor()
+
+ self.server_version = detect_server_version(cursor, query_factory(QUERY_NAME_SERVER_VERSION))
+ self.debug('server version: {0}'.format(self.server_version))
+
+ self.is_superuser = check_if_superuser(cursor, query_factory(QUERY_NAME_IF_SUPERUSER))
+ self.debug('superuser: {0}'.format(self.is_superuser))
+
+ self.databases = discover(cursor, query_factory(QUERY_NAME_DATABASES))
+ self.debug('discovered databases {0}'.format(self.databases))
+ if self.databases_to_poll:
+ to_poll = self.databases_to_poll.split()
+ self.databases = [db for db in self.databases if db in to_poll] or self.databases
+
+ self.secondaries = discover(cursor, query_factory(QUERY_NAME_STANDBY))
+ self.debug('discovered secondaries: {0}'.format(self.secondaries))
+
+ if self.server_version >= 94000:
+ self.replication_slots = discover(cursor, query_factory(QUERY_NAME_REPLICATION_SLOT))
+ self.debug('discovered replication slots: {0}'.format(self.replication_slots))
+
+ cursor.close()
+
+ def populate_queries(self):
+ self.queries[query_factory(QUERY_NAME_DATABASE)] = METRICS[QUERY_NAME_DATABASE]
+ self.queries[query_factory(QUERY_NAME_BACKENDS)] = METRICS[QUERY_NAME_BACKENDS]
+ self.queries[query_factory(QUERY_NAME_BACKEND_USAGE, self.server_version)] = METRICS[QUERY_NAME_BACKEND_USAGE]
+ self.queries[query_factory(QUERY_NAME_LOCKS)] = METRICS[QUERY_NAME_LOCKS]
+ self.queries[query_factory(QUERY_NAME_BGWRITER)] = METRICS[QUERY_NAME_BGWRITER]
+ self.queries[query_factory(QUERY_NAME_DIFF_LSN, self.server_version)] = METRICS[QUERY_NAME_WAL_WRITES]
+ self.queries[query_factory(QUERY_NAME_STANDBY_DELTA, self.server_version)] = METRICS[QUERY_NAME_STANDBY_DELTA]
+
+ if self.do_index_stats:
+ self.queries[query_factory(QUERY_NAME_INDEX_STATS)] = METRICS[QUERY_NAME_INDEX_STATS]
+ if self.do_table_stats:
+ self.queries[query_factory(QUERY_NAME_TABLE_STATS)] = METRICS[QUERY_NAME_TABLE_STATS]
+
+ if self.is_superuser:
+ self.queries[query_factory(QUERY_NAME_ARCHIVE, self.server_version)] = METRICS[QUERY_NAME_ARCHIVE]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(QUERY_NAME_WAL, self.server_version)] = METRICS[QUERY_NAME_WAL]
+
+ if self.server_version >= 100000:
+ v = METRICS[QUERY_NAME_REPSLOT_FILES]
+ self.queries[query_factory(QUERY_NAME_REPSLOT_FILES, self.server_version)] = v
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(QUERY_NAME_AUTOVACUUM)] = METRICS[QUERY_NAME_AUTOVACUUM]
+
+ def create_dynamic_charts(self):
+ for database_name in self.databases[::-1]:
+ dim = [
+ database_name + '_size',
+ database_name,
+ 'absolute',
+ 1,
+ 1024 * 1024,
+ ]
+ self.definitions['database_size']['lines'].append(dim)
+ for chart_name in [name for name in self.order if name.startswith('db_stat')]:
+ add_database_stat_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name=chart_name,
+ database_name=database_name,
+ )
+ add_database_lock_chart(
+ order=self.order,
+ definitions=self.definitions,
+ database_name=database_name,
+ )
+
+ for application_name in self.secondaries[::-1]:
+ add_replication_delta_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name,
+ )
+
+ for slot_name in self.replication_slots[::-1]:
+ add_replication_slot_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name,
+ )
+
+
+def discover(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for v in [value[0] for value in cursor]:
+ if v not in result:
+ result.append(v)
+ return result
+
+
+def check_if_superuser(cursor, query):
+ cursor.execute(query)
+ return cursor.fetchone()[0]
+
+
+def detect_server_version(cursor, query):
+ cursor.execute(query)
+ return int(cursor.fetchone()[0])
+
+
+def zero_lock_types(databases):
+ result = dict()
+ for database in databases:
+ for lock_type in METRICS['LOCKS']:
+ key = '_'.join([database, lock_type])
+ result[key] = 0
+
+ return result
+
+
+def hide_password(config):
+ return dict((k, v if k != 'password' or not v else '*****') for k, v in config.items())
+
+
+def add_database_lock_chart(order, definitions, database_name):
+ def create_lines(database):
+ result = list()
+ for lock_type in METRICS['LOCKS']:
+ dimension_id = '_'.join([database, lock_type])
+ result.append([dimension_id, lock_type, 'absolute'])
+ return result
+
+ chart_name = database_name + '_locks'
+ order.insert(-1, chart_name)
+ definitions[chart_name] = {
+ 'options':
+ [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'],
+ 'lines': create_lines(database_name)
+ }
+
+
+def add_database_stat_chart(order, definitions, name, database_name):
+ def create_lines(database, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([database, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([database_name, name])
+ order.insert(0, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
+ 'lines': create_lines(database_name, chart_template['lines'])}
+
+
+def add_replication_delta_chart(order, definitions, name, application_name):
+ def create_lines(standby, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([standby, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([application_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
+ 'lines': create_lines(application_name, chart_template['lines'])}
+
+
+def add_replication_slot_chart(order, definitions, name, slot_name):
+ def create_lines(slot, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([slot, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([slot_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
+ 'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
new file mode 100644
index 0000000..1970a7a
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -0,0 +1,141 @@
+# netdata python.d.plugin configuration for postgresql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# A single connection is required in order to pull statistics.
+#
+# Connections can be configured with the following options:
+#
+# dsn : 'connection URI' # see https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
+#
+# OR
+#
+# database : 'example_db_name'
+# user : 'example_user'
+# password : 'example_pass'
+# host : 'localhost'
+# port : 5432
+# connect_timeout : 2 # in seconds, default is 2
+# statement_timeout : 2000 # in ms, default is 2000
+# sslmode : mode # one of [disable, allow, prefer, require, verify-ca, verify-full]
+# sslrootcert : path/to/rootcert # the location of the root certificate file
+# sslcrl : path/to/crl # the location of the CRL file
+# sslcert : path/to/cert # the location of the client certificate file
+# sslkey : path/to/key # the location of the client key file
+#
+# SSL connection parameters description: https://www.postgresql.org/docs/current/libpq-ssl.html
+#
+# Additionally, the following options allow selective disabling of charts
+#
+# table_stats : false
+# index_stats : false
+# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
+#
+# Postgres permissions are configured at its pg_hba.conf file. You can
+# "trust" local clients to allow netdata to connect, or you can create
+# a postgres user for netdata and add its password below to allow
+# netdata connect.
+#
+# Please note that when running Postgres from inside the container,
+# the client (Netdata) is not considered local, unless it runs from inside
+# the same container.
+#
+# Postgres supported versions are :
+# - 9.3 (without autovacuum)
+# - 9.4
+# - 9.5
+# - 9.6
+# - 10
+#
+# Superuser access is needed for theses charts:
+# Write-Ahead Logs
+# Archive Write-Ahead Logs
+#
+# Autovacuum charts is allowed since Postgres 9.4
+# ----------------------------------------------------------------------
+
+socket:
+ name : 'local'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ password : 'postgres'
+ host : 'localhost'
+ port : 5432
+
+tcpipv4:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ password : 'postgres'
+ host : '127.0.0.1'
+ port : 5432
+
+tcpipv6:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ password : 'postgres'
+ host : '::1'
+ port : 5432
diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc
new file mode 100644
index 0000000..256d32a
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += powerdns/powerdns.chart.py
+dist_pythonconfig_DATA += powerdns/powerdns.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc
+
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
new file mode 100644
index 0000000..610a665
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -0,0 +1,104 @@
+<!--
+title: "PowerDNS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/powerdns/README.md
+sidebar_label: "PowerDNS"
+-->
+
+# PowerDNS monitoring with Netdata
+
+Monitors authoritative server and recursor statistics.
+
+Powerdns charts:
+
+1. **Queries and Answers**
+
+ - udp-queries
+ - udp-answers
+ - tcp-queries
+ - tcp-answers
+
+2. **Cache Usage**
+
+ - query-cache-hit
+ - query-cache-miss
+ - packetcache-hit
+ - packetcache-miss
+
+3. **Cache Size**
+
+ - query-cache-size
+ - packetcache-size
+ - key-cache-size
+ - meta-cache-size
+
+4. **Latency**
+
+ - latency
+
+ Powerdns Recursor charts:
+
+1. **Questions In**
+
+ - questions
+ - ipv6-questions
+ - tcp-queries
+
+2. **Questions Out**
+
+ - all-outqueries
+ - ipv6-outqueries
+ - tcp-outqueries
+ - throttled-outqueries
+
+3. **Answer Times**
+
+ - answers-slow
+ - answers0-1
+ - answers1-10
+ - answers10-100
+ - answers100-1000
+
+4. **Timeouts**
+
+ - outgoing-timeouts
+ - outgoing4-timeouts
+ - outgoing6-timeouts
+
+5. **Drops**
+
+ - over-capacity-drops
+
+6. **Cache Usage**
+
+ - cache-hits
+ - cache-misses
+ - packetcache-hits
+ - packetcache-misses
+
+7. **Cache Size**
+
+ - cache-entries
+ - packetcache-entries
+ - negcache-entries
+
+## Configuration
+
+Edit the `python.d/powerdns.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/powerdns.conf
+```
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+ header :
+ X-API-Key: 'change_me'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
new file mode 100644
index 0000000..b951e0c
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# Description: powerdns netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# Author: Luke Whitworth
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'questions',
+ 'cache_usage',
+ 'cache_size',
+ 'latency',
+]
+
+CHARTS = {
+ 'questions': {
+ 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
+ 'lines': [
+ ['udp-queries', None, 'incremental'],
+ ['udp-answers', None, 'incremental'],
+ ['tcp-queries', None, 'incremental'],
+ ['tcp-answers', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
+ 'lines': [
+ ['query-cache-hit', None, 'incremental'],
+ ['query-cache-miss', None, 'incremental'],
+ ['packetcache-hit', 'packet-cache-hit', 'incremental'],
+ ['packetcache-miss', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
+ 'lines': [
+ ['query-cache-size', None, 'absolute'],
+ ['packetcache-size', 'packet-cache-size', 'absolute'],
+ ['key-cache-size', None, 'absolute'],
+ ['meta-cache-size', None, 'absolute']
+ ]
+ },
+ 'latency': {
+ 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
+ 'lines': [
+ ['latency', None, 'absolute']
+ ]
+ }
+}
+
+RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size']
+
+RECURSOR_CHARTS = {
+ 'questions-in': {
+ 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in',
+ 'line'],
+ 'lines': [
+ ['questions', None, 'incremental'],
+ ['ipv6-questions', None, 'incremental'],
+ ['tcp-questions', None, 'incremental']
+ ]
+ },
+ 'questions-out': {
+ 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out',
+ 'line'],
+ 'lines': [
+ ['all-outqueries', None, 'incremental'],
+ ['ipv6-outqueries', None, 'incremental'],
+ ['tcp-outqueries', None, 'incremental'],
+ ['throttled-outqueries', None, 'incremental']
+ ]
+ },
+ 'answer-times': {
+ 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times',
+ 'line'],
+ 'lines': [
+ ['answers-slow', None, 'incremental'],
+ ['answers0-1', None, 'incremental'],
+ ['answers1-10', None, 'incremental'],
+ ['answers10-100', None, 'incremental'],
+ ['answers100-1000', None, 'incremental']
+ ]
+ },
+ 'timeouts': {
+ 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts',
+ 'line'],
+ 'lines': [
+ ['outgoing-timeouts', None, 'incremental'],
+ ['outgoing4-timeouts', None, 'incremental'],
+ ['outgoing6-timeouts', None, 'incremental']
+ ]
+ },
+ 'drops': {
+ 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'],
+ 'lines': [
+ ['over-capacity-drops', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'],
+ 'lines': [
+ ['cache-hits', None, 'incremental'],
+ ['cache-misses', None, 'incremental'],
+ ['packetcache-hits', 'packet-cache-hit', 'incremental'],
+ ['packetcache-misses', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'],
+ 'lines': [
+ ['cache-entries', None, 'absolute'],
+ ['packetcache-entries', None, 'absolute'],
+ ['negcache-entries', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = configuration.get('url', 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics')
+
+ def check(self):
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ d = self._get_data()
+ if not d:
+ return False
+
+ if is_recursor(d):
+ self.order = RECURSOR_ORDER
+ self.definitions = RECURSOR_CHARTS
+ self.module_name = 'powerdns_recursor'
+
+ return True
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+ return dict((d['name'], d['value']) for d in loads(data))
+
+
+def is_recursor(d):
+ return 'over-capacity-drops' in d and 'tcp-questions' in d
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
new file mode 100644
index 0000000..559bf17
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for powerdns
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch powerdns performance statistics
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+# header:
+# X-API-Key: 'change_me'
diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc
new file mode 100644
index 0000000..66be372
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += proxysql/proxysql.chart.py
+dist_pythonconfig_DATA += proxysql/proxysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
new file mode 100644
index 0000000..f1b369a
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -0,0 +1,106 @@
+<!--
+title: "ProxySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/proxysql/README.md
+sidebar_label: "ProxySQL"
+-->
+
+# ProxySQL monitoring with Netdata
+
+Monitors database backend and frontend performance metrics.
+
+## Requirements
+
+- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+- `netdata` local user to connect to the ProxySQL server.
+
+To create the `netdata` user, follow [the documentation](https://github.com/sysown/proxysql/wiki/Users-configuration#creating-a-new-user).
+
+## Charts
+
+It produces:
+
+1. **Connections (frontend)**
+
+ - connected: number of frontend connections currently connected
+ - aborted: number of frontend connections aborted due to invalid credential or max_connections reached
+ - non_idle: number of frontend connections that are not currently idle
+ - created: number of frontend connections created
+
+2. **Questions (frontend)**
+
+ - questions: total number of queries sent from frontends
+ - slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
+
+3. **Overall Bandwidth (backends)**
+
+ - in
+ - out
+
+4. **Status (backends)**
+
+ - Backends
+ - `1=ONLINE`: backend server is fully operational
+ - `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
+ - `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
+ - `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
+ - `-1`: Unknown status
+
+5. **Bandwidth (backends)**
+
+ - Backends
+ - in
+ - out
+
+6. **Queries (backends)**
+
+ - Backends
+ - queries
+
+7. **Latency (backends)**
+
+ - Backends
+ - ping time
+
+8. **Pool connections (backends)**
+
+ - Backends
+ - Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
+ - Free: The number of connections are currently free.
+ - Established/OK: The number of connections were established successfully.
+ - Error: The number of connections weren't established successfully.
+
+9. **Commands**
+
+ - Commands
+ - Count
+ - Duration (Total duration for each command)
+
+10. **Commands Histogram**
+
+ - Commands
+ - 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
+
+## Configuration
+
+Edit the `python.d/proxysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/proxysql.conf
+```
+
+```yaml
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
new file mode 100644
index 0000000..982c28e
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+# Description: Proxysql netdata python.d module
+# Author: Ali Borhani (alibo)
+# SPDX-License-Identifier: GPL-3.0+
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+
+def query(table, *params):
+ return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
+
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global
+QUERY_GLOBAL = query(
+ "stats_mysql_global",
+ "Variable_Name",
+ "Variable_Value"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool
+QUERY_CONNECTION_POOL = query(
+ "stats_mysql_connection_pool",
+ "hostgroup",
+ "srv_host",
+ "srv_port",
+ "status",
+ "ConnUsed",
+ "ConnFree",
+ "ConnOK",
+ "ConnERR",
+ "Queries",
+ "Bytes_data_sent",
+ "Bytes_data_recv",
+ "Latency_us"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters
+QUERY_COMMANDS = query(
+ "stats_mysql_commands_counters",
+ "Command",
+ "Total_Time_us",
+ "Total_cnt",
+ "cnt_100us",
+ "cnt_500us",
+ "cnt_1ms",
+ "cnt_5ms",
+ "cnt_10ms",
+ "cnt_50ms",
+ "cnt_100ms",
+ "cnt_500ms",
+ "cnt_1s",
+ "cnt_5s",
+ "cnt_10s",
+ "cnt_INFs"
+)
+
+GLOBAL_STATS = [
+ 'client_connections_aborted',
+ 'client_connections_connected',
+ 'client_connections_created',
+ 'client_connections_non_idle',
+ 'proxysql_uptime',
+ 'questions',
+ 'slow_queries'
+]
+
+CONNECTION_POOL_STATS = [
+ 'status',
+ 'connused',
+ 'connfree',
+ 'connok',
+ 'connerr',
+ 'queries',
+ 'bytes_data_sent',
+ 'bytes_data_recv',
+ 'latency_us'
+]
+
+ORDER = [
+ 'connections',
+ 'active_transactions',
+ 'questions',
+ 'pool_overall_net',
+ 'commands_count',
+ 'commands_duration',
+ 'pool_status',
+ 'pool_net',
+ 'pool_queries',
+ 'pool_latency',
+ 'pool_connection_used',
+ 'pool_connection_free',
+ 'pool_connection_ok',
+ 'pool_connection_error'
+]
+
+HISTOGRAM_ORDER = [
+ '100us',
+ '500us',
+ '1ms',
+ '5ms',
+ '10ms',
+ '50ms',
+ '100ms',
+ '500ms',
+ '1s',
+ '5s',
+ '10s',
+ 'inf'
+]
+
+STATUS = {
+ "ONLINE": 1,
+ "SHUNNED": 2,
+ "OFFLINE_SOFT": 3,
+ "OFFLINE_HARD": 4
+}
+
+CHARTS = {
+ 'pool_status': {
+ 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'],
+ 'lines': []
+ },
+ 'pool_net': {
+ 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'],
+ 'lines': []
+ },
+ 'pool_overall_net': {
+ 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
+ 'proxysql.pool_overall_net', 'area'],
+ 'lines': [
+ ['bytes_data_recv', 'in', 'incremental', 8, 1000],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'questions': {
+ 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'],
+ 'lines': [
+ ['questions', 'questions', 'incremental'],
+ ['slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'pool_queries': {
+ 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'],
+ 'lines': []
+ },
+ 'active_transactions': {
+ 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions',
+ 'proxysql.active_transactions', 'line'],
+ 'lines': [
+ ['active_transactions', 'active_transactions', 'absolute']
+ ]
+ },
+ 'pool_latency': {
+ 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
+ 'lines': []
+ },
+ 'connections': {
+ 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections',
+ 'line'],
+ 'lines': [
+ ['client_connections_connected', 'connected', 'absolute'],
+ ['client_connections_created', 'created', 'incremental'],
+ ['client_connections_aborted', 'aborted', 'incremental'],
+ ['client_connections_non_idle', 'non_idle', 'absolute']
+ ]
+ },
+ 'pool_connection_used': {
+ 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_used_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_free': {
+ 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_free_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_ok': {
+ 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_ok_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_error': {
+ 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_error_connections', 'line'],
+ 'lines': []
+ },
+ 'commands_count': {
+ 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'],
+ 'lines': []
+ },
+ 'commands_duration': {
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration',
+ 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ connection_pool_status=QUERY_CONNECTION_POOL,
+ commands_status=QUERY_COMMANDS
+ )
+
+ def _get_data(self):
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in global_status:
+ if key.lower() in GLOBAL_STATS:
+ to_netdata[key.lower()] = global_status[key]
+
+ if 'connection_pool_status' in raw_data:
+
+ to_netdata['bytes_data_recv'] = 0
+ to_netdata['bytes_data_sent'] = 0
+
+ for record in raw_data['connection_pool_status'][0]:
+ backend = self.generate_backend(record)
+ name = self.generate_backend_name(backend)
+
+ for key in backend:
+ if key in CONNECTION_POOL_STATS:
+ if key == 'status':
+ backend[key] = self.convert_status(backend[key])
+
+ if len(self.charts) > 0:
+ if (name + '_status') not in self.charts['pool_status']:
+ self.add_backend_dimensions(name)
+
+ to_netdata["{0}_{1}".format(name, key)] = backend[key]
+
+ if key == 'bytes_data_recv':
+ to_netdata['bytes_data_recv'] += int(backend[key])
+
+ if key == 'bytes_data_sent':
+ to_netdata['bytes_data_sent'] += int(backend[key])
+
+ if 'commands_status' in raw_data:
+ for record in raw_data['commands_status'][0]:
+ cmd = self.generate_command_stats(record)
+ name = cmd['name']
+
+ if len(self.charts) > 0:
+ if (name + '_count') not in self.charts['commands_count']:
+ self.add_command_dimensions(name)
+ self.add_histogram_chart(cmd)
+
+ to_netdata[name + '_count'] = cmd['count']
+ to_netdata[name + '_duration'] = cmd['duration']
+ for histogram in cmd['histogram']:
+ dimId = 'commands_histogram_{0}_{1}'.format(name, histogram)
+ to_netdata[dimId] = cmd['histogram'][histogram]
+
+ return to_netdata or None
+
+ def add_backend_dimensions(self, name):
+ self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute'])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024])
+ self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental'])
+ self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000])
+ self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute'])
+ self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute'])
+ self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental'])
+ self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental'])
+
+ def add_command_dimensions(self, cmd):
+ self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental'])
+ self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000])
+
+ def add_histogram_chart(self, cmd):
+ chart = self.charts.add_chart(self.histogram_chart(cmd))
+
+ for histogram in HISTOGRAM_ORDER:
+ dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram)
+ chart.add_dimension([dimId, histogram, 'incremental'])
+
+ @staticmethod
+ def histogram_chart(cmd):
+ return [
+ 'commands_histogram_' + cmd['name'],
+ None,
+ 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
+ 'commands',
+ 'commands_histogram',
+ 'proxysql.commands_histogram_' + cmd['name'],
+ 'stacked'
+ ]
+
+ @staticmethod
+ def generate_backend(data):
+ return {
+ 'hostgroup': data[0],
+ 'srv_host': data[1],
+ 'srv_port': data[2],
+ 'status': data[3],
+ 'connused': data[4],
+ 'connfree': data[5],
+ 'connok': data[6],
+ 'connerr': data[7],
+ 'queries': data[8],
+ 'bytes_data_sent': data[9],
+ 'bytes_data_recv': data[10],
+ 'latency_us': data[11]
+ }
+
+ @staticmethod
+ def generate_command_stats(data):
+ return {
+ 'name': data[0].lower(),
+ 'duration': data[1],
+ 'count': data[2],
+ 'histogram': {
+ '100us': data[3],
+ '500us': data[4],
+ '1ms': data[5],
+ '5ms': data[6],
+ '10ms': data[7],
+ '50ms': data[8],
+ '100ms': data[9],
+ '500ms': data[10],
+ '1s': data[11],
+ '5s': data[12],
+ '10s': data[13],
+ 'inf': data[14]
+ }
+ }
+
+ @staticmethod
+ def generate_backend_name(backend):
+ hostgroup = backend['hostgroup'].replace(' ', '_').lower()
+ host = backend['srv_host'].replace('.', '_')
+
+ return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port'])
+
+ @staticmethod
+ def convert_status(status):
+ if status in STATUS:
+ return STATUS[status]
+ return -1
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
new file mode 100644
index 0000000..3c503a8
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -0,0 +1,116 @@
+# netdata python.d.plugin configuration for ProxySQL
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, proxysql also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the proxysql username to use
+# pass: 'password' # the proxysql password to use
+#
+
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '::1'
+ port : '6032'
+
+tcp_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '::1'
+ port : '6032'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
new file mode 100644
index 0000000..fe94b92
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += puppet/puppet.chart.py
+dist_pythonconfig_DATA += puppet/puppet.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
+
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 100644
index 0000000..9b7c0a2
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1,67 @@
+<!--
+title: "Puppet monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md
+sidebar_label: "Puppet"
+-->
+
+# Puppet monitoring with Netdata
+
+Monitor status of Puppet Server and Puppet DB.
+
+Following charts are drawn:
+
+1. **JVM Heap**
+
+ - committed (allocated from OS)
+ - used (actual use)
+
+2. **JVM Non-Heap**
+
+ - committed (allocated from OS)
+ - used (actual use)
+
+3. **CPU Usage**
+
+ - execution
+ - GC (taken by garbage collection)
+
+4. **File Descriptors**
+
+ - max
+ - used
+
+## Configuration
+
+Edit the `python.d/puppet.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/puppet.conf
+```
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+```
+
+When no configuration is given, module uses `https://fqdn.example.com:8140`.
+
+### notes
+
+- Exact Fully Qualified Domain Name of the node should be used.
+- Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+- Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
new file mode 100644
index 0000000..f8adf60
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Description: puppet netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module should work both with OpenSource and PE versions
+# of PuppetServer and PuppetDB.
+#
+# NOTE: PuppetDB may be configured to require proper TLS
+# client certificate for security reasons. Use tls_key_file
+# and tls_cert_file options then.
+#
+
+import socket
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 5
+
+MiB = 1 << 20
+CPU_SCALE = 1000
+
+ORDER = [
+ 'jvm_heap',
+ 'jvm_nonheap',
+ 'cpu',
+ 'fd_open',
+]
+
+CHARTS = {
+ 'jvm_heap': {
+ 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_heap_max'],
+ ['jvm_heap_init'],
+ ],
+ },
+ 'jvm_nonheap': {
+ 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_nonheap_max'],
+ ['jvm_nonheap_init'],
+ ],
+ },
+ 'cpu': {
+ 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
+ 'lines': [
+ ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
+ ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
+ ]
+ },
+ 'fd_open': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute'],
+ ],
+ 'variables': [
+ ['fd_max'],
+ ],
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
+
+ def _get_data(self):
+ # NOTE: there are several ways to retrieve data
+ # 1. Only PE versions:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
+ # 2. Individual Metrics API (JMX):
+ # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
+ # 3. Extended status at debug level:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
+ #
+ # For sake of simplicity and efficiency the status one is used..
+
+ raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
+
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ data = {}
+
+ try:
+ try:
+ jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
+ except KeyError:
+ jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
+
+ heap_mem = jvm_metrics['heap-memory']
+ non_heap_mem = jvm_metrics['non-heap-memory']
+
+ for k in ['max', 'committed', 'used', 'init']:
+ data['jvm_heap_' + k] = heap_mem[k]
+ data['jvm_nonheap_' + k] = non_heap_mem[k]
+
+ fd_open = jvm_metrics['file-descriptors']
+ data['fd_max'] = fd_open['max']
+ data['fd_used'] = fd_open['used']
+
+ data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
+ data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
+ except KeyError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
new file mode 100644
index 0000000..ff5c3d0
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -0,0 +1,94 @@
+# netdata python.d.plugin configuration for Puppet Server and Puppet DB
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# These configuration comes from UrlService base:
+# url: # HTTP or HTTPS URL
+# tls_verify: False # Control HTTPS server certificate verification
+# tls_ca_file: # Optional CA (bundle) file to use
+# tls_cert_file: # Optional client certificate file
+# tls_key_file: # Optional client key file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+# puppet:
+# url: 'https://<FQDN>:8140'
+#
+
+#
+# Production configuration should look like below.
+#
+# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
+# be quite reasonable retry count.
+#
+# NOTE: secure PuppetDB config may require client certificate.
+# Not applies to default PuppetDB configuration though.
+#
+# puppetdb:
+# url: 'https://fqdn.example.com:8081'
+# tls_cert_file: /path/to/client.crt
+# tls_key_file: /path/to/client.key
+# autodetection_retry: 1
+#
+# puppetserver:
+# url: 'https://fqdn.example.com:8140'
+# autodetection_retry: 1
+#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
new file mode 100644
index 0000000..61cfd60
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.conf
@@ -0,0 +1,109 @@
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
+
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
+
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+
+# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
+gc_run: yes
+
+# Garbage collection interval in seconds. Default is 300.
+gc_interval: 300
+
+# apache: yes
+
+# apache_cache has been replaced by web_log
+# adaptec_raid: yes
+# alarms: yes
+# am2320: yes
+# anomalies: no
+apache_cache: no
+# beanstalk: yes
+# bind_rndc: yes
+# boinc: yes
+# ceph: yes
+chrony: no
+# couchdb: yes
+# dns_query_time: yes
+# dnsdist: yes
+# dockerd: yes
+# dovecot: yes
+# elasticsearch: yes
+# energid: yes
+
+# this is just an example
+example: no
+
+# exim: yes
+# fail2ban: yes
+# freeradius: yes
+# gearman: yes
+go_expvar: no
+
+# gunicorn_log has been replaced by web_log
+gunicorn_log: no
+# haproxy: yes
+# hddtemp: yes
+# httpcheck: yes
+hpssa: no
+# icecast: yes
+# ipfs: yes
+# isc_dhcpd: yes
+# litespeed: yes
+logind: no
+# megacli: yes
+# memcached: yes
+# mongodb: yes
+# monit: yes
+# mysql: yes
+# nginx: yes
+# nginx_plus: yes
+# nvidia_smi: yes
+
+# nginx_log has been replaced by web_log
+nginx_log: no
+# nsd: yes
+# ntpd: yes
+# openldap: yes
+# oracledb: yes
+# ovpn_status_log: yes
+# phpfpm: yes
+# portcheck: yes
+# postfix: yes
+# postgres: yes
+# powerdns: yes
+# proxysql: yes
+# puppet: yes
+# rabbitmq: yes
+# redis: yes
+# rethinkdbs: yes
+# retroshare: yes
+# riakkv: yes
+# samba: yes
+# sensors: yes
+# smartd_log: yes
+# spigotmc: yes
+# springboot: yes
+# squid: yes
+# traefik: yes
+# tomcat: yes
+# tor: yes
+# uwsgi: yes
+# varnish: yes
+# w1sensor: yes
+# web_log: yes
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
new file mode 100644
index 0000000..9d575d8
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -0,0 +1,852 @@
+#!/usr/bin/env bash
+'''':;
+pybinary=$(which python || which python3 || which python2)
+filtered=()
+for arg in "$@"
+do
+ case $arg in
+ -p*) pybinary=${arg:2}
+ shift 1 ;;
+ *) filtered+=("$arg") ;;
+ esac
+done
+if [ "$pybinary" = "" ]
+then
+ echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
+ exit 1
+fi
+exec "$pybinary" "$0" "${filtered[@]}" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import collections
+import copy
+import gc
+import json
+import os
+import pprint
+import re
+import sys
+import time
+import threading
+import types
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
+
+if PY_VERSION > (3, 1):
+ from importlib.machinery import SourceFileLoader
+else:
+ from imp import load_source as SourceFileLoader
+
+ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
+ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
+ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
+ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
+ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
+ENV_NETDATA_LOCK_DIR = 'NETDATA_LOCK_DIR'
+
+
+def add_pythond_packages():
+ pluginsd = os.getenv(ENV_NETDATA_PLUGINS_DIR, os.path.dirname(__file__))
+ pythond = os.path.abspath(pluginsd + '/../python.d')
+ packages = os.path.join(pythond, 'python_modules')
+ sys.path.append(packages)
+
+
+add_pythond_packages()
+
+from bases.collection import safe_print
+from bases.loggers import PythonDLogger
+from bases.loaders import load_config
+from third_party import filelock
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+def dirs():
+ var_lib = os.getenv(
+ ENV_NETDATA_LIB_DIR,
+ '@varlibdir_POST@',
+ )
+ plugin_user_config = os.getenv(
+ ENV_NETDATA_USER_CONFIG_DIR,
+ '@configdir_POST@',
+ )
+ plugin_stock_config = os.getenv(
+ ENV_NETDATA_STOCK_CONFIG_DIR,
+ '@libconfigdir_POST@',
+ )
+ pluginsd = os.getenv(
+ ENV_NETDATA_PLUGINS_DIR,
+ os.path.dirname(__file__),
+ )
+ locks = os.getenv(
+ ENV_NETDATA_LOCK_DIR,
+ os.path.join('@varlibdir_POST@', 'lock')
+ )
+ modules_user_config = os.path.join(plugin_user_config, 'python.d')
+ modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
+ modules = os.path.abspath(pluginsd + '/../python.d')
+
+ Dirs = collections.namedtuple(
+ 'Dirs',
+ [
+ 'plugin_user_config',
+ 'plugin_stock_config',
+ 'modules_user_config',
+ 'modules_stock_config',
+ 'modules',
+ 'var_lib',
+ 'locks',
+ ]
+ )
+ return Dirs(
+ plugin_user_config,
+ plugin_stock_config,
+ modules_user_config,
+ modules_stock_config,
+ modules,
+ var_lib,
+ locks,
+ )
+
+
+DIRS = dirs()
+
+IS_ATTY = sys.stdout.isatty()
+
+MODULE_SUFFIX = '.chart.py'
+
+
+def available_modules():
+ obsolete = (
+ 'apache_cache', # replaced by web_log
+ 'cpuidle', # rewritten in C
+ 'cpufreq', # rewritten in C
+ 'gunicorn_log', # replaced by web_log
+ 'linux_power_supply', # rewritten in C
+ 'nginx_log', # replaced by web_log
+ 'mdstat', # rewritten in C
+ 'sslcheck', # rewritten in Go, memory leak bug https://github.com/netdata/netdata/issues/5624
+ 'unbound', # rewritten in Go
+ )
+
+ files = sorted(os.listdir(DIRS.modules))
+ modules = [m[:-len(MODULE_SUFFIX)] for m in files if m.endswith(MODULE_SUFFIX)]
+ avail = [m for m in modules if m not in obsolete]
+ return tuple(avail)
+
+
+AVAILABLE_MODULES = available_modules()
+
+JOB_BASE_CONF = {
+ 'update_every': int(os.getenv(ENV_NETDATA_UPDATE_EVERY, 1)),
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'penalty': True,
+ 'name': str(),
+}
+
+PLUGIN_BASE_CONF = {
+ 'enabled': True,
+ 'default_run': True,
+ 'gc_run': True,
+ 'gc_interval': 300,
+}
+
+
+def multi_path_find(name, *paths):
+ for path in paths:
+ abs_name = os.path.join(path, name)
+ if os.path.isfile(abs_name):
+ return abs_name
+ return str()
+
+
+def load_module(name):
+ abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
+ module = SourceFileLoader('pythond_' + name, abs_path)
+ if isinstance(module, types.ModuleType):
+ return module
+ return module.load_module()
+
+
+class ModuleConfig:
+ def __init__(self, name, config=None):
+ self.name = name
+ self.config = config or OrderedDict()
+
+ def load(self, abs_path):
+ self.config.update(load_config(abs_path) or dict())
+
+ def defaults(self):
+ keys = (
+ 'update_every',
+ 'priority',
+ 'autodetection_retry',
+ 'chart_cleanup',
+ 'penalty',
+ )
+ return dict((k, self.config[k]) for k in keys if k in self.config)
+
+ def create_job(self, job_name, job_config=None):
+ job_config = job_config or dict()
+
+ config = OrderedDict()
+ config.update(job_config)
+ config['job_name'] = job_name
+ for k, v in self.defaults().items():
+ config.setdefault(k, v)
+
+ return config
+
+ def job_names(self):
+ return [v for v in self.config if isinstance(self.config.get(v), dict)]
+
+ def single_job(self):
+ return [self.create_job(self.name, self.config)]
+
+ def multi_job(self):
+ return [self.create_job(n, self.config[n]) for n in self.job_names()]
+
+ def create_jobs(self):
+ return self.multi_job() or self.single_job()
+
+
+class JobsConfigsBuilder:
+ def __init__(self, config_dirs):
+ self.config_dirs = config_dirs
+ self.log = PythonDLogger()
+ self.job_defaults = None
+ self.module_defaults = None
+ self.min_update_every = None
+
+ def load_module_config(self, module_name):
+ name = '{0}.conf'.format(module_name)
+ self.log.debug("[{0}] looking for '{1}' in {2}".format(module_name, name, self.config_dirs))
+ config = ModuleConfig(module_name)
+
+ abs_path = multi_path_find(name, *self.config_dirs)
+ if not abs_path:
+ self.log.warning("[{0}] '{1}' was not found".format(module_name, name))
+ return config
+
+ self.log.debug("[{0}] loading '{1}'".format(module_name, abs_path))
+ try:
+ config.load(abs_path)
+ except Exception as error:
+ self.log.error("[{0}] error on loading '{1}' : {2}".format(module_name, abs_path, repr(error)))
+ return None
+
+ self.log.debug("[{0}] '{1}' is loaded".format(module_name, abs_path))
+ return config
+
+ @staticmethod
+ def apply_defaults(jobs, defaults):
+ if defaults is None:
+ return
+ for k, v in defaults.items():
+ for job in jobs:
+ job.setdefault(k, v)
+
+ def set_min_update_every(self, jobs, min_update_every):
+ if min_update_every is None:
+ return
+ for job in jobs:
+ if 'update_every' in job and job['update_every'] < self.min_update_every:
+ job['update_every'] = self.min_update_every
+
+ def build(self, module_name):
+ config = self.load_module_config(module_name)
+ if config is None:
+ return None
+
+ configs = config.create_jobs()
+ self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
+
+ self.apply_defaults(configs, self.module_defaults)
+ self.apply_defaults(configs, self.job_defaults)
+ self.set_min_update_every(configs, self.min_update_every)
+
+ return configs
+
+
+JOB_STATUS_ACTIVE = 'active'
+JOB_STATUS_RECOVERING = 'recovering'
+JOB_STATUS_DROPPED = 'dropped'
+JOB_STATUS_INIT = 'initial'
+
+
+class Job(threading.Thread):
+ inf = -1
+
+ def __init__(self, service, module_name, config):
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.service = service
+ self.module_name = module_name
+ self.config = config
+ self.real_name = config['job_name']
+ self.actual_name = config['override_name'] or self.real_name
+ self.autodetection_retry = config['autodetection_retry']
+ self.checks = self.inf
+ self.job = None
+ self.status = JOB_STATUS_INIT
+
+ def is_inited(self):
+ return self.job is not None
+
+ def init(self):
+ self.job = self.service(configuration=copy.deepcopy(self.config))
+
+ def full_name(self):
+ return self.job.name
+
+ def check(self):
+ ok = self.job.check()
+ self.checks -= self.checks != self.inf and not ok
+ return ok
+
+ def create(self):
+ self.job.create()
+
+ def need_to_recheck(self):
+ return self.autodetection_retry != 0 and self.checks != 0
+
+ def run(self):
+ self.job.run()
+
+
+class ModuleSrc:
+ def __init__(self, name):
+ self.name = name
+ self.src = None
+
+ def load(self):
+ self.src = load_module(self.name)
+
+ def get(self, key):
+ return getattr(self.src, key, None)
+
+ def service(self):
+ return self.get('Service')
+
+ def defaults(self):
+ keys = (
+ 'update_every',
+ 'priority',
+ 'autodetection_retry',
+ 'chart_cleanup',
+ 'penalty',
+ )
+ return dict((k, self.get(k)) for k in keys if self.get(k) is not None)
+
+ def is_disabled_by_default(self):
+ return bool(self.get('disabled_by_default'))
+
+
+class JobsStatuses:
+ def __init__(self):
+ self.items = OrderedDict()
+
+ def dump(self):
+ return json.dumps(self.items, indent=2)
+
+ def get(self, module_name, job_name):
+ if module_name not in self.items:
+ return None
+ return self.items[module_name].get(job_name)
+
+ def has(self, module_name, job_name):
+ return self.get(module_name, job_name) is not None
+
+ def from_file(self, path):
+ with open(path) as f:
+ data = json.load(f)
+ return self.from_json(data)
+
+ @staticmethod
+ def from_json(items):
+ if not isinstance(items, dict):
+ raise Exception('items obj has wrong type : {0}'.format(type(items)))
+ if not items:
+ return JobsStatuses()
+
+ v = OrderedDict()
+ for mod_name in sorted(items):
+ if not items[mod_name]:
+ continue
+ v[mod_name] = OrderedDict()
+ for job_name in sorted(items[mod_name]):
+ v[mod_name][job_name] = items[mod_name][job_name]
+
+ rv = JobsStatuses()
+ rv.items = v
+ return rv
+
+ @staticmethod
+ def from_jobs(jobs):
+ v = OrderedDict()
+ for job in jobs:
+ status = job.status
+ if status not in (JOB_STATUS_ACTIVE, JOB_STATUS_RECOVERING):
+ continue
+ if job.module_name not in v:
+ v[job.module_name] = OrderedDict()
+ v[job.module_name][job.real_name] = status
+
+ rv = JobsStatuses()
+ rv.items = v
+ return rv
+
+
+class StdoutSaver:
+ @staticmethod
+ def save(dump):
+ print(dump)
+
+
+class CachedFileSaver:
+ def __init__(self, path):
+ self.last_save_success = False
+ self.last_saved_dump = str()
+ self.path = path
+
+ def save(self, dump):
+ if self.last_save_success and self.last_saved_dump == dump:
+ return
+ try:
+ with open(self.path, 'w') as out:
+ out.write(dump)
+ except Exception:
+ self.last_save_success = False
+ raise
+ self.last_saved_dump = dump
+ self.last_save_success = True
+
+
+class PluginConfig(dict):
+ def __init__(self, *args):
+ dict.__init__(self, *args)
+
+ def is_module_explicitly_enabled(self, module_name):
+ return self._is_module_enabled(module_name, True)
+
+ def is_module_enabled(self, module_name):
+ return self._is_module_enabled(module_name, False)
+
+ def _is_module_enabled(self, module_name, explicit):
+ if module_name in self:
+ return self[module_name]
+ if explicit:
+ return False
+ return self['default_run']
+
+
+class FileLockRegistry:
+ def __init__(self, path):
+ self.path = path
+ self.locks = dict()
+
+ def register(self, name):
+ if name in self.locks:
+ return
+ file = os.path.join(self.path, '{0}.collector.lock'.format(name))
+ lock = filelock.FileLock(file)
+ lock.acquire(timeout=0)
+ self.locks[name] = lock
+
+ def unregister(self, name):
+ if name not in self.locks:
+ return
+ lock = self.locks[name]
+ lock.release()
+ del self.locks[name]
+
+
+class DummyRegistry:
+ def register(self, name):
+ pass
+
+ def unregister(self, name):
+ pass
+
+
+class Plugin:
+ config_name = 'python.d.conf'
+ jobs_status_dump_name = 'pythond-jobs-statuses.json'
+
+ def __init__(self, modules_to_run, min_update_every, registry):
+ self.modules_to_run = modules_to_run
+ self.min_update_every = min_update_every
+ self.config = PluginConfig(PLUGIN_BASE_CONF)
+ self.log = PythonDLogger()
+ self.registry = registry
+ self.started_jobs = collections.defaultdict(dict)
+ self.jobs = list()
+ self.saver = None
+ self.runs = 0
+
+ def load_config(self):
+ paths = [
+ DIRS.plugin_user_config,
+ DIRS.plugin_stock_config,
+ ]
+ self.log.debug("looking for '{0}' in {1}".format(self.config_name, paths))
+ abs_path = multi_path_find(self.config_name, *paths)
+ if not abs_path:
+ self.log.warning("'{0}' was not found, using defaults".format(self.config_name))
+ return True
+
+ self.log.debug("loading '{0}'".format(abs_path))
+ try:
+ config = load_config(abs_path)
+ except Exception as error:
+ self.log.error("error on loading '{0}' : {1}".format(abs_path, repr(error)))
+ return False
+
+ self.log.debug("'{0}' is loaded".format(abs_path))
+ self.config.update(config)
+ return True
+
+ def load_job_statuses(self):
+ self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib))
+ abs_path = multi_path_find(self.jobs_status_dump_name, DIRS.var_lib)
+ if not abs_path:
+ self.log.warning("'{0}' was not found".format(self.jobs_status_dump_name))
+ return
+
+ self.log.debug("loading '{0}'".format(abs_path))
+ try:
+ statuses = JobsStatuses().from_file(abs_path)
+ except Exception as error:
+ self.log.warning("error on loading '{0}' : {1}".format(abs_path, repr(error)))
+ return None
+ self.log.debug("'{0}' is loaded".format(abs_path))
+ return statuses
+
+ def create_jobs(self, job_statuses=None):
+ paths = [
+ DIRS.modules_user_config,
+ DIRS.modules_stock_config,
+ ]
+
+ builder = JobsConfigsBuilder(paths)
+ builder.job_defaults = JOB_BASE_CONF
+ builder.min_update_every = self.min_update_every
+
+ jobs = list()
+ for mod_name in self.modules_to_run:
+ if not self.config.is_module_enabled(mod_name):
+ self.log.info("[{0}] is disabled in the configuration file, skipping it".format(mod_name))
+ continue
+
+ src = ModuleSrc(mod_name)
+ try:
+ src.load()
+ except Exception as error:
+ self.log.warning("[{0}] error on loading source : {1}, skipping it".format(mod_name, repr(error)))
+ continue
+
+ if not (src.service() and callable(src.service())):
+ self.log.warning("[{0}] has no callable Service object, skipping it".format(mod_name))
+ continue
+
+ if src.is_disabled_by_default() and not self.config.is_module_explicitly_enabled(mod_name):
+ self.log.info("[{0}] is disabled by default, skipping it".format(mod_name))
+ continue
+
+ builder.module_defaults = src.defaults()
+ configs = builder.build(mod_name)
+ if not configs:
+ self.log.info("[{0}] has no job configs, skipping it".format(mod_name))
+ continue
+
+ for config in configs:
+ config['job_name'] = re.sub(r'\s+', '_', config['job_name'])
+ config['override_name'] = re.sub(r'\s+', '_', config.pop('name'))
+
+ job = Job(src.service(), mod_name, config)
+
+ was_previously_active = job_statuses and job_statuses.has(job.module_name, job.real_name)
+ if was_previously_active and job.autodetection_retry == 0:
+ self.log.debug('{0}[{1}] was previously active, applying recovering settings'.format(
+ job.module_name, job.real_name))
+ job.checks = 11
+ job.autodetection_retry = 30
+
+ jobs.append(job)
+
+ return jobs
+
+ def setup(self):
+ if not self.load_config():
+ return False
+
+ if not self.config['enabled']:
+ self.log.info('disabled in the configuration file')
+ return False
+
+ statuses = self.load_job_statuses()
+
+ self.jobs = self.create_jobs(statuses)
+ if not self.jobs:
+ self.log.info('no jobs to run')
+ return False
+
+ if not IS_ATTY:
+ abs_path = os.path.join(DIRS.var_lib, self.jobs_status_dump_name)
+ self.saver = CachedFileSaver(abs_path)
+ return True
+
+ def start_jobs(self, *jobs):
+ for job in jobs:
+ if job.status not in (JOB_STATUS_INIT, JOB_STATUS_RECOVERING):
+ continue
+
+ if job.actual_name in self.started_jobs[job.module_name]:
+ self.log.info('{0}[{1}] : already served by another job, skipping it'.format(
+ job.module_name, job.real_name))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ if not job.is_inited():
+ try:
+ job.init()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
+ ok = job.check()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ if not ok:
+ self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
+ job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
+ continue
+ self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
+
+ try:
+ self.registry.register(job.full_name())
+ except filelock.Timeout as error:
+ self.log.info('{0}[{1}] : already registered by another process, skipping the job ({2})'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ except Exception as error:
+ self.log.warning('{0}[{1}] : registration failed: {2}, skipping the job'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
+ job.create()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ try:
+ self.registry.unregister(job.full_name())
+ except Exception as error:
+ self.log.warning('{0}[{1}] : deregistration failed: {2}'.format(
+ job.module_name, job.real_name, error))
+ continue
+
+ self.started_jobs[job.module_name] = job.actual_name
+ job.status = JOB_STATUS_ACTIVE
+ job.start()
+
+ @staticmethod
+ def keep_alive():
+ if not IS_ATTY:
+ safe_print('\n')
+
+ def garbage_collection(self):
+ if self.config['gc_run'] and self.runs % self.config['gc_interval'] == 0:
+ v = gc.collect()
+ self.log.debug('GC collection run result: {0}'.format(v))
+
+ def restart_recovering_jobs(self):
+ for job in self.jobs:
+ if job.status != JOB_STATUS_RECOVERING:
+ continue
+ if self.runs % job.autodetection_retry != 0:
+ continue
+ self.start_jobs(job)
+
+ def cleanup_jobs(self):
+ self.jobs = [j for j in self.jobs if j.status != JOB_STATUS_DROPPED]
+
+ def have_alive_jobs(self):
+ return next(
+ (True for job in self.jobs if job.status in (JOB_STATUS_RECOVERING, JOB_STATUS_ACTIVE)),
+ False,
+ )
+
+ def save_job_statuses(self):
+ if self.saver is None:
+ return
+ if self.runs % 10 != 0:
+ return
+ dump = JobsStatuses().from_jobs(self.jobs).dump()
+ try:
+ self.saver.save(dump)
+ except Exception as error:
+ self.log.error("error on saving jobs statuses dump : {0}".format(repr(error)))
+
+ def serve_once(self):
+ if not self.have_alive_jobs():
+ self.log.info('no jobs to serve')
+ return False
+
+ time.sleep(1)
+ self.runs += 1
+
+ self.keep_alive()
+ self.garbage_collection()
+ self.cleanup_jobs()
+ self.restart_recovering_jobs()
+ self.save_job_statuses()
+ return True
+
+ def serve(self):
+ while self.serve_once():
+ pass
+
+ def run(self):
+ self.start_jobs(*self.jobs)
+ self.serve()
+
+
+def parse_command_line():
+ opts = sys.argv[:][1:]
+
+ debug = False
+ trace = False
+ nolock = False
+ update_every = 1
+ modules_to_run = list()
+
+ def find_first_positive_int(values):
+ return next((v for v in values if v.isdigit() and int(v) >= 1), None)
+
+ u = find_first_positive_int(opts)
+ if u is not None:
+ update_every = int(u)
+ opts.remove(u)
+ if 'debug' in opts:
+ debug = True
+ opts.remove('debug')
+ if 'trace' in opts:
+ trace = True
+ opts.remove('trace')
+ if 'nolock' in opts:
+ nolock = True
+ opts.remove('nolock')
+ if opts:
+ modules_to_run = list(opts)
+
+ cmd = collections.namedtuple(
+ 'CMD',
+ [
+ 'update_every',
+ 'debug',
+ 'trace',
+ 'nolock',
+ 'modules_to_run',
+ ])
+ return cmd(
+ update_every,
+ debug,
+ trace,
+ nolock,
+ modules_to_run,
+ )
+
+
+def guess_module(modules, *names):
+ def guess(n):
+ found = None
+ for i, _ in enumerate(n):
+ cur = [x for x in modules if x.startswith(name[:i + 1])]
+ if not cur:
+ return found
+ found = cur
+ return found
+
+ guessed = list()
+ for name in names:
+ name = name.lower()
+ m = guess(name)
+ if m:
+ guessed.extend(m)
+ return sorted(set(guessed))
+
+
+def disable():
+ if not IS_ATTY:
+ safe_print('DISABLE')
+ exit(0)
+
+
+def main():
+ cmd = parse_command_line()
+ log = PythonDLogger()
+
+ if cmd.debug:
+ log.logger.severity = 'DEBUG'
+ if cmd.trace:
+ log.log_traceback = True
+
+ log.info('using python v{0}'.format(PY_VERSION[0]))
+
+ unknown = set(cmd.modules_to_run) - set(AVAILABLE_MODULES)
+ if unknown:
+ log.error('unknown modules : {0}'.format(sorted(list(unknown))))
+ guessed = guess_module(AVAILABLE_MODULES, *cmd.modules_to_run)
+ if guessed:
+ log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
+ return
+
+ if DIRS.locks and not cmd.nolock:
+ registry = FileLockRegistry(DIRS.locks)
+ else:
+ registry = DummyRegistry()
+
+ p = Plugin(
+ cmd.modules_to_run or AVAILABLE_MODULES,
+ cmd.update_every,
+ registry,
+ )
+
+ try:
+ if not p.setup():
+ return
+ p.run()
+ except KeyboardInterrupt:
+ pass
+ log.info('exiting from main...')
+
+
+if __name__ == "__main__":
+ main()
+ disable()
diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
new file mode 100644
index 0000000..dea50ee
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from subprocess import Popen, PIPE
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+
+class ExecutableService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.command = None
+
+ def _get_raw_data(self, stderr=False, command=None):
+ """
+ Get raw data from executed command
+ :return: <list>
+ """
+ command = command or self.command
+ self.debug("Executing command '{0}'".format(' '.join(command)))
+ try:
+ p = Popen(command, stdout=PIPE, stderr=PIPE)
+ except Exception as error:
+ self.error('Executing command {0} resulted in error: {1}'.format(command, error))
+ return None
+
+ data = list()
+ std = p.stderr if stderr else p.stdout
+ for line in std:
+ try:
+ data.append(line.decode('utf-8'))
+ except TypeError:
+ continue
+
+ return data
+
+ def check(self):
+ """
+ Parse basic configuration, check if command is whitelisted and is returning values
+ :return: <boolean>
+ """
+ # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
+ if 'command' in self.configuration:
+ self.command = self.configuration['command']
+
+ # "command" must be: 1.not None 2. type <str>
+ if not (self.command and isinstance(self.command, str)):
+ self.error('Command is not defined or command type is not <str>')
+ return False
+
+ # Split "command" into: 1. command <str> 2. options <list>
+ command, opts = self.command.split()[0], self.command.split()[1:]
+
+ # Check for "bad" symbols in options. No pipes, redirects etc.
+ opts_list = ['&', '|', ';', '>', '<']
+ bad_opts = set(''.join(opts)) & set(opts_list)
+ if bad_opts:
+ self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
+ return False
+
+ # Find absolute path ('echo' => '/bin/echo')
+ if '/' not in command:
+ command = find_binary(command)
+ if not command:
+ self.error('Can\'t locate "{command}" binary'.format(command=self.command))
+ return False
+ # Check if binary exist and executable
+ else:
+ if not os.access(command, os.X_OK):
+ self.error('"{binary}" is not executable'.format(binary=command))
+ return False
+
+ self.command = [command] + opts if opts else [command]
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
+ error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('Command "{command}" returned no data'.format(command=self.command))
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
new file mode 100644
index 0000000..a55e33f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from glob import glob
+import sys
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class LogService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
+ self.__re_find = dict(current=0, run=0, maximum=60)
+ self.__open_args = {'errors': 'replace'} if sys.version_info[0] > 2 else {}
+
+ def _get_raw_data(self):
+ """
+ Get log lines since last poll
+ :return: list
+ """
+ lines = list()
+ try:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
+ self._last_position = 0 # read from beginning if file has shrunk
+
+ with open(self.log_path, **self.__open_args) as fp:
+ fp.seek(self._last_position)
+ for line in fp:
+ lines.append(line)
+ self._last_position = fp.tell()
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
+
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
+
+ def check(self):
+ """
+ Parse basic configuration and check if log file exists
+ :return: boolean
+ """
+ if not self.log_path:
+ self.error('No path to log specified')
+ return None
+
+ if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
+ return True
+ self.error('Cannot access {0}'.format(self.log_path))
+ return False
+
+ def create(self):
+ # set cursor at last byte of log file
+ self._last_position = os.path.getsize(self.log_path)
+ status = SimpleService.create(self)
+ return status
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
new file mode 100644
index 0000000..7f5c7d2
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from sys import exc_info
+
+try:
+ import MySQLdb
+
+ PY_MYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+
+ PY_MYSQL = True
+ except ImportError:
+ PY_MYSQL = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class MySQLService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
+
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if conf.get('user'):
+ properties['user'] = conf['user']
+ if conf.get('pass'):
+ properties['passwd'] = conf['pass']
+
+ if conf.get('socket'):
+ properties['unix_socket'] = conf['socket']
+ elif conf.get('host'):
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif conf.get('my.cnf'):
+ properties['read_default_file'] = conf['my.cnf']
+
+ if conf.get('ssl'):
+ properties['ssl'] = conf['ssl']
+
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): {queries}'.format(queries=bad_queries))
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
+
+ if not PY_MYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
+ return False
+
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: {error}'.format(error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ cursor = self.__connection.cursor()
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ cursor.close()
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor.close()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
new file mode 100644
index 0000000..c304cce
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from time import sleep, time
+
+from third_party.monotonic import monotonic
+
+from bases.charts import Charts, ChartError, create_runtime_chart
+from bases.collection import safe_print
+from bases.loggers import PythonDLimitedLogger
+
+RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
+ 'SET run_time = {elapsed}\n' \
+ 'END\n'
+
+PENALTY_EVERY = 5
+MAX_PENALTY = 10 * 60 # 10 minutes
+
+
+class RuntimeCounters:
+ def __init__(self, configuration):
+ """
+ :param configuration: <dict>
+ """
+ self.update_every = int(configuration.pop('update_every'))
+ self.do_penalty = configuration.pop('penalty')
+
+ self.start_mono = 0
+ self.start_real = 0
+ self.retries = 0
+ self.penalty = 0
+ self.elapsed = 0
+ self.prev_update = 0
+
+ self.runs = 1
+
+ def calc_next(self):
+ self.start_mono = monotonic()
+ return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
+
+ def sleep_until_next(self):
+ next_time = self.calc_next()
+ while self.start_mono < next_time:
+ sleep(next_time - self.start_mono)
+ self.start_mono = monotonic()
+ self.start_real = time()
+
+ def handle_retries(self):
+ self.retries += 1
+ if self.do_penalty and self.retries % PENALTY_EVERY == 0:
+ self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
+
+
+def clean_module_name(name):
+ if name.startswith('pythond_'):
+ return name[8:]
+ return name
+
+
+class SimpleService(PythonDLimitedLogger, object):
+ """
+ Prototype of Service class.
+ Implemented basic functionality to run jobs by `python.d.plugin`
+ """
+
+ def __init__(self, configuration, name=''):
+ """
+ :param configuration: <dict>
+ :param name: <str>
+ """
+ PythonDLimitedLogger.__init__(self)
+ self.configuration = configuration
+ self.order = list()
+ self.definitions = dict()
+
+ self.module_name = clean_module_name(self.__module__)
+ self.job_name = configuration.pop('job_name')
+ self.override_name = configuration.pop('override_name')
+ self.fake_name = None
+
+ self._runtime_counters = RuntimeCounters(configuration=configuration)
+ self.charts = Charts(job_name=self.actual_name,
+ priority=configuration.pop('priority'),
+ cleanup=configuration.pop('chart_cleanup'),
+ get_update_every=self.get_update_every,
+ module_name=self.module_name)
+
+ def __repr__(self):
+ return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
+ name=self.name)
+
+ @property
+ def name(self):
+ if self.job_name and self.job_name != self.module_name:
+ return '_'.join([self.module_name, self.override_name or self.job_name])
+ return self.module_name
+
+ def actual_name(self):
+ return self.fake_name or self.name
+
+ @property
+ def runs_counter(self):
+ return self._runtime_counters.runs
+
+ @property
+ def update_every(self):
+ return self._runtime_counters.update_every
+
+ @update_every.setter
+ def update_every(self, value):
+ """
+ :param value: <int>
+ :return:
+ """
+ self._runtime_counters.update_every = value
+
+ def get_update_every(self):
+ return self.update_every
+
+ def check(self):
+ """
+ check() prototype
+ :return: boolean
+ """
+ self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
+ data = self.get_data()
+ if data and isinstance(data, dict):
+ return True
+ self.debug('returned value is wrong: {0}'.format(data))
+ return False
+
+ @create_runtime_chart
+ def create(self):
+ for chart_name in self.order:
+ chart_config = self.definitions.get(chart_name)
+
+ if not chart_config:
+ self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
+ "Skipping it.".format(chart_name=chart_name))
+ continue
+
+ # create chart
+ chart_params = [chart_name] + chart_config['options']
+ try:
+ self.charts.add_chart(params=chart_params)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
+ error=error))
+ continue
+
+ # add dimensions to chart
+ for dimension in chart_config['lines']:
+ try:
+ self.charts[chart_name].add_dimension(dimension)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
+ error=error))
+ continue
+
+ # add variables to chart
+ if 'variables' in chart_config:
+ for variable in chart_config['variables']:
+ try:
+ self.charts[chart_name].add_variable(variable)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
+ error=error))
+ continue
+
+ del self.order
+ del self.definitions
+
+ # True if job has at least 1 chart else False
+ return bool(self.charts)
+
+ def run(self):
+ """
+ Runs job in thread. Handles retries.
+ Exits when job failed or timed out.
+ :return: None
+ """
+ job = self._runtime_counters
+ self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
+
+ while True:
+ job.sleep_until_next()
+
+ since = 0
+ if job.prev_update:
+ since = int((job.start_real - job.prev_update) * 1e6)
+
+ try:
+ updated = self.update(interval=since)
+ except Exception as error:
+ self.error('update() unhandled exception: {error}'.format(error=error))
+ updated = False
+
+ job.runs += 1
+
+ if not updated:
+ job.handle_retries()
+ else:
+ job.elapsed = int((monotonic() - job.start_mono) * 1e3)
+ job.prev_update = job.start_real
+ job.retries, job.penalty = 0, 0
+ safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
+ since_last=since,
+ elapsed=job.elapsed))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
+ status='OK' if updated else 'FAILED',
+ elapsed=job.elapsed if updated else '-',
+ retries=job.retries))
+
+ def update(self, interval):
+ """
+ :return:
+ """
+ data = self.get_data()
+ if not data:
+ self.debug('get_data() returned no data')
+ return False
+ elif not isinstance(data, dict):
+ self.debug('get_data() returned incorrect type data')
+ return False
+
+ updated = False
+
+ for chart in self.charts:
+ if chart.flags.obsoleted:
+ if chart.can_be_updated(data):
+ chart.refresh()
+ else:
+ continue
+ elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
+ chart.obsolete()
+ self.info("chart '{0}' was suppressed due to non updating".format(chart.name))
+ continue
+
+ ok = chart.update(data, interval)
+ if ok:
+ updated = True
+
+ if not updated:
+ self.debug('none of the charts has been updated')
+
+ return updated
+
+ def get_data(self):
+ return self._get_data()
+
+ def _get_data(self):
+ raise NotImplementedError
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
new file mode 100644
index 0000000..d6c7550
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -0,0 +1,336 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import errno
+import socket
+
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
+if _TLS_SUPPORT:
+ try:
+ PROTOCOL_TLS = ssl.PROTOCOL_TLS
+ except AttributeError:
+ PROTOCOL_TLS = ssl.PROTOCOL_SSLv23
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+DEFAULT_CONNECT_TIMEOUT = 2.0
+DEFAULT_READ_TIMEOUT = 2.0
+DEFAULT_WRITE_TIMEOUT = 2.0
+
+
+class SocketService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ self._sock = None
+ self._keep_alive = False
+ self.host = 'localhost'
+ self.port = None
+ self.unix_socket = None
+ self.dgram_socket = False
+ self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
+ self.__socket_config = None
+ self.__empty_request = "".encode()
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.connect_timeout = configuration.get('connect_timeout', DEFAULT_CONNECT_TIMEOUT)
+ self.read_timeout = configuration.get('read_timeout', DEFAULT_READ_TIMEOUT)
+ self.write_timeout = configuration.get('write_timeout', DEFAULT_WRITE_TIMEOUT)
+
+ def _socket_error(self, message=None):
+ if self.unix_socket is not None:
+ self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
+ message=message))
+ else:
+ if self.__socket_config is not None:
+ _, _, _, _, sa = self.__socket_config
+ self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
+ port=sa[1],
+ message=message))
+ else:
+ self.error('unknown socket: {0}'.format(message))
+
+ def _connect2socket(self, res=None):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: boolean
+ """
+ if res is None:
+ res = self.__socket_config
+ if res is None:
+ self.error("Cannot create socket to 'None':")
+ return False
+
+ af, sock_type, proto, _, sa = res
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock = socket.socket(af, sock_type, proto)
+ except socket.error as error:
+ self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._sock = None
+ self.__socket_config = None
+ return False
+
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self.debug('Using keyfile: {0}, certfile: {1}, cert_reqs: {2}, ssl_version: {3}'.format(
+ self.key, self.cert, ssl.CERT_NONE, PROTOCOL_TLS
+ ))
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE,
+ ssl_version=PROTOCOL_TLS,
+ )
+ except (socket.error, ssl.SSLError, IOError, OSError) as error:
+ self.error('failed to wrap socket : {0}'.format(repr(error)))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ try:
+ self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(sa)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self.__socket_config = res
+ return True
+
+ def _connect2unixsocket(self):
+ """
+ Connect to a unix socket, given its filename
+ :return: boolean
+ """
+ if self.unix_socket is None:
+ self.error("cannot connect to unix socket 'None'")
+ return False
+
+ try:
+ self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(self.unix_socket)
+ self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+
+ try:
+ self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(self.unix_socket)
+ self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+ self._sock = None
+ return False
+
+ def _connect(self):
+ """
+ Recreate socket and connect to it since sockets cannot be reused after closing
+ Available configurations are IPv6, IPv4 or UNIX socket
+ :return:
+ """
+ try:
+ if self.unix_socket is not None:
+ self._connect2unixsocket()
+
+ else:
+ if self.__socket_config is not None:
+ self._connect2socket()
+ else:
+ if self.dgram_socket:
+ sock_type = socket.SOCK_DGRAM
+ else:
+ sock_type = socket.SOCK_STREAM
+ for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
+ if self._connect2socket(res):
+ break
+
+ except Exception as error:
+ self.error('unhandled exception during connect : {0}'.format(repr(error)))
+ self._sock = None
+ self.__socket_config = None
+
+ def _disconnect(self):
+ """
+ Close socket connection
+ :return:
+ """
+ if self._sock is not None:
+ try:
+ self.debug('closing socket')
+ self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ self._sock.close()
+ except Exception as error:
+ if not (hasattr(error, 'errno') and error.errno == errno.ENOTCONN):
+ self.error(error)
+ self._sock = None
+
+ def _send(self, request=None):
+ """
+ Send request.
+ :return: boolean
+ """
+ # Send request if it is needed
+ if self.request != self.__empty_request:
+ try:
+ self.debug('set socket write timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.settimeout(self.write_timeout)
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
+ except Exception as error:
+ self._socket_error('error sending request: {0}'.format(error))
+ self._disconnect()
+ return False
+ return True
+
+ def _receive(self, raw=False):
+ """
+ Receive data from socket
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded str or raw bytes
+ :rtype: str/bytes
+ """
+ data = "" if not raw else b""
+ while True:
+ self.debug('receiving response')
+ try:
+ self.debug('set socket read timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.settimeout(self.read_timeout)
+ buf = self._sock.recv(4096)
+ except Exception as error:
+ self._socket_error('failed to receive response: {0}'.format(error))
+ self._disconnect()
+ break
+
+ if buf is None or len(buf) == 0: # handle server disconnect
+ if data == "" or data == b"":
+ self._socket_error('unexpectedly disconnected')
+ else:
+ self.debug('server closed the connection')
+ self._disconnect()
+ break
+
+ self.debug('received data')
+ data += buf.decode('utf-8', 'ignore') if not raw else buf
+ if self._check_raw_data(data):
+ break
+
+ self.debug(u'final response: {0}'.format(data if not raw else u'binary data'))
+ return data
+
+ def _get_raw_data(self, raw=False, request=None):
+ """
+ Get raw data with low-level "socket" module.
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded data (str) or raw data (bytes)
+ :rtype: str/bytes
+ """
+ if self._sock is None:
+ self._connect()
+ if self._sock is None:
+ return None
+
+ # Send request if it is needed
+ if not self._send(request):
+ return None
+
+ data = self._receive(raw)
+
+ if not self._keep_alive:
+ self._disconnect()
+
+ return data
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return bool(data)
+
+ def _parse_config(self):
+ """
+ Parse configuration data
+ :return: boolean
+ """
+ try:
+ self.unix_socket = str(self.configuration['socket'])
+ except (KeyError, TypeError):
+ self.debug('No unix socket specified. Trying TCP/IP socket.')
+ self.unix_socket = None
+ try:
+ self.host = str(self.configuration['host'])
+ except (KeyError, TypeError):
+ self.debug('No host specified. Using: "{0}"'.format(self.host))
+ try:
+ self.port = int(self.configuration['port'])
+ except (KeyError, TypeError):
+ self.debug('No port specified. Using: "{0}"'.format(self.port))
+
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
+ try:
+ self.request = str(self.configuration['request'])
+ except (KeyError, TypeError):
+ self.debug('No request specified. Using: "{0}"'.format(self.request))
+
+ self.request = self.request.encode()
+
+ def check(self):
+ self._parse_config()
+ return SimpleService.check(self)
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
new file mode 100644
index 0000000..1faf036
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+
+from distutils.version import StrictVersion as version
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ urllib3.disable_warnings()
+except AttributeError:
+ pass
+
+# https://github.com/urllib3/urllib3/blob/master/CHANGES.rst#19-2014-07-04
+# New retry logic and urllib3.util.retry.Retry configuration object. (Issue https://github.com/urllib3/urllib3/pull/326)
+URLLIB3_MIN_REQUIRED_VERSION = '1.9'
+URLLIB3_VERSION = urllib3.__version__
+URLLIB3 = 'urllib3'
+
+
+def version_check():
+ if version(URLLIB3_VERSION) >= version(URLLIB3_MIN_REQUIRED_VERSION):
+ return
+
+ err = '{0} version: {1}, minimum required version: {2}, please upgrade'.format(
+ URLLIB3,
+ URLLIB3_VERSION,
+ URLLIB3_MIN_REQUIRED_VERSION,
+ )
+ raise Exception(err)
+
+
+class UrlService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ version_check()
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.debug("{0} version: {1}".format(URLLIB3, URLLIB3_VERSION))
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.proxy_user = self.configuration.get('proxy_user')
+ self.proxy_password = self.configuration.get('proxy_pass')
+ self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
+ self.header = self.configuration.get('header')
+ self.body = self.configuration.get('body')
+ self.request_timeout = self.configuration.get('timeout', 1)
+ self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
+ self._manager = None
+
+ def __make_headers(self, **header_kw):
+ user = header_kw.get('user') or self.user
+ password = header_kw.get('pass') or self.password
+ proxy_user = header_kw.get('proxy_user') or self.proxy_user
+ proxy_password = header_kw.get('proxy_pass') or self.proxy_password
+ custom_header = header_kw.get('header') or self.header
+ header_params = dict(keep_alive=True)
+ proxy_header_params = dict()
+ if user and password:
+ header_params['basic_auth'] = '{user}:{password}'.format(user=user,
+ password=password)
+ if proxy_user and proxy_password:
+ proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
+ password=proxy_password)
+ try:
+ header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
+ except TypeError as error:
+ self.error('build_header() error: {error}'.format(error=error))
+ return None, None
+ else:
+ header.update(custom_header or dict())
+ return header, proxy_header
+
+ def _build_manager(self, **header_kw):
+ header, proxy_header = self.__make_headers(**header_kw)
+ if header is None or proxy_header is None:
+ return None
+ proxy_url = header_kw.get('proxy_url') or self.proxy_url
+ if proxy_url:
+ manager = urllib3.ProxyManager
+ params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
+ else:
+ manager = urllib3.PoolManager
+ params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
+ try:
+ url = header_kw.get('url') or self.url
+ is_https = url.startswith('https')
+ if skip_tls_verify(is_https, self.tls_verify, tls_ca_file):
+ params['ca_certs'] = None
+ params['cert_reqs'] = 'CERT_NONE'
+ if is_https:
+ params['assert_hostname'] = False
+ return manager(**params)
+ except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
+ self.error('build_manager() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, manager=None, **kwargs):
+ """
+ Get raw data from http request
+ :return: str
+ """
+ try:
+ response = self._do_request(url, manager, **kwargs)
+ except Exception as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
+ return None
+
+ if response.status == 200:
+ if isinstance(response.data, str):
+ return response.data
+ return response.data.decode(errors='ignore')
+ else:
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=response.status))
+ return None
+
+ def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get status and response body content from http request. Does not catch exceptions
+ :return: int, str
+ """
+ response = self._do_request(url, manager, retries, redirect, **kwargs)
+
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode(errors='ignore')
+
+ def _do_request(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get response from http request. Does not catch exceptions
+ :return: HTTPResponse
+ """
+ url = url or self.url
+ manager = manager or self._manager
+ retry = urllib3.Retry(retries)
+ if hasattr(retry, 'respect_retry_after_header'):
+ retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+
+ if self.body:
+ kwargs['body'] = self.body
+
+ response = manager.request(
+ method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retry,
+ headers=manager.headers,
+ redirect=redirect,
+ **kwargs
+ )
+ return response
+
+ def check(self):
+ """
+ Format configuration data and try to connect to server
+ :return: boolean
+ """
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
+
+
+def skip_tls_verify(is_https, tls_verify, tls_ca_file):
+ # default 'tls_verify' value is None
+ # logic is:
+ # - never skip if there is 'tls_ca_file' file
+ # - skip by default for https
+ # - do not skip by default for http
+ if tls_ca_file:
+ return False
+ if is_https and not tls_verify:
+ return True
+ return tls_verify is False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
new file mode 100644
index 0000000..93be43d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.collection import safe_print
+
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
+DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
+VARIABLE_PARAMS = ['id', 'value']
+
+CHART_TYPES = ['line', 'area', 'stacked']
+DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
+
+CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
+CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
+CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
+
+DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
+DIMENSION_SET = "SET '{id}' = {value}\n"
+
+CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
+
+RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
+ "netdata.pythond_runtime line 145000 {update_every}\n" \
+ "DIMENSION run_time 'run time' absolute 1 1\n"
+
+
+def create_runtime_chart(func):
+ """
+ Calls a wrapped function, then prints runtime chart to stdout.
+
+ Used as a decorator for SimpleService.create() method.
+ The whole point of making 'create runtime chart' functionality as a decorator was
+ to help users who re-implements create() in theirs classes.
+
+ :param func: class method
+ :return:
+ """
+
+ def wrapper(*args, **kwargs):
+ self = args[0]
+ chart = RUNTIME_CHART_CREATE.format(
+ job_name=self.name,
+ update_every=self._runtime_counters.update_every,
+ )
+ safe_print(chart)
+ ok = func(*args, **kwargs)
+ return ok
+
+ return wrapper
+
+
+class ChartError(Exception):
+ """Base-class for all exceptions raised by this module"""
+
+
+class DuplicateItemError(ChartError):
+ """Occurs when user re-adds a chart or a dimension that has already been added"""
+
+
+class ItemTypeError(ChartError):
+ """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
+
+
+class ItemValueError(ChartError):
+ """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
+
+
+class Charts:
+ """Represent a collection of charts
+
+ All charts stored in a dict.
+ Chart is a instance of Chart class.
+ Charts adding must be done using Charts.add_chart() method only"""
+
+ def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
+ """
+ :param job_name: <bound method>
+ :param priority: <int>
+ :param get_update_every: <bound method>
+ """
+ self.job_name = job_name
+ self.priority = priority
+ self.cleanup = cleanup
+ self.get_update_every = get_update_every
+ self.module_name = module_name
+ self.charts = dict()
+
+ def __len__(self):
+ return len(self.charts)
+
+ def __iter__(self):
+ return iter(self.charts.values())
+
+ def __repr__(self):
+ return 'Charts({0})'.format(self)
+
+ def __str__(self):
+ return str([chart for chart in self.charts])
+
+ def __contains__(self, item):
+ return item in self.charts
+
+ def __getitem__(self, item):
+ return self.charts[item]
+
+ def __delitem__(self, key):
+ del self.charts[key]
+
+ def __bool__(self):
+ return bool(self.charts)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def add_chart(self, params):
+ """
+ Create Chart instance and add it to the dict
+
+ Manually adds job name, priority and update_every to params.
+ :param params: <list>
+ :return:
+ """
+ params = [self.job_name()] + params
+ new_chart = Chart(params)
+
+ new_chart.params['update_every'] = self.get_update_every()
+ new_chart.params['priority'] = self.priority
+ new_chart.params['module_name'] = self.module_name
+
+ self.priority += 1
+ self.charts[new_chart.id] = new_chart
+
+ return new_chart
+
+ def active_charts(self):
+ return [chart.id for chart in self if not chart.flags.obsoleted]
+
+
+class Chart:
+ """Represent a chart"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'chart' must be a list type")
+ if not len(params) >= 8:
+ raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
+
+ self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
+ self.name = '{type}.{id}'.format(type=self.params['type'],
+ id=self.params['id'])
+ if self.params.get('chart_type') not in CHART_TYPES:
+ self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
+
+ self.dimensions = list()
+ self.variables = set()
+ self.flags = ChartFlags()
+ self.penalty = 0
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Chart({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __iter__(self):
+ return iter(self.dimensions)
+
+ def __contains__(self, item):
+ return item in [dimension.id for dimension in self.dimensions]
+
+ def add_variable(self, variable):
+ """
+ :param variable: <list>
+ :return:
+ """
+ self.variables.add(ChartVariable(variable))
+
+ def add_dimension(self, dimension):
+ """
+ :param dimension: <list>
+ :return:
+ """
+ dim = Dimension(dimension)
+
+ if dim.id in self:
+ raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
+ chart=self.name))
+ self.refresh()
+ self.dimensions.append(dim)
+ return dim
+
+ def del_dimension(self, dimension_id, hide=True):
+ if dimension_id not in self:
+ return
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ if hide:
+ dimension.params['hidden'] = 'hidden'
+ dimension.params['obsolete'] = 'obsolete'
+ self.create()
+ self.dimensions.remove(dimension)
+
+ def hide_dimension(self, dimension_id, reverse=False):
+ if dimension_id not in self:
+ return
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ dimension.params['hidden'] = 'hidden' if not reverse else str()
+ self.refresh()
+
+ def create(self):
+ """
+ :return:
+ """
+ chart = CHART_CREATE.format(**self.params)
+ dimensions = ''.join([dimension.create() for dimension in self.dimensions])
+ variables = ''.join([var.set(var.value) for var in self.variables if var])
+
+ self.flags.push = False
+ self.flags.created = True
+
+ safe_print(chart + dimensions + variables)
+
+ def can_be_updated(self, data):
+ for dim in self.dimensions:
+ if dim.get_value(data) is not None:
+ return True
+ return False
+
+ def update(self, data, interval):
+ updated_dimensions, updated_variables = str(), str()
+
+ for dim in self.dimensions:
+ value = dim.get_value(data)
+ if value is not None:
+ updated_dimensions += dim.set(value)
+
+ for var in self.variables:
+ value = var.get_value(data)
+ if value is not None:
+ updated_variables += var.set(value)
+
+ if updated_dimensions:
+ since_last = interval if self.flags.updated else 0
+
+ if self.flags.push:
+ self.create()
+
+ chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
+ safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
+
+ self.flags.updated = True
+ self.penalty = 0
+ else:
+ self.penalty += 1
+ self.flags.updated = False
+
+ return bool(updated_dimensions)
+
+ def obsolete(self):
+ self.flags.obsoleted = True
+ if self.flags.created:
+ safe_print(CHART_OBSOLETE.format(**self.params))
+
+ def refresh(self):
+ self.penalty = 0
+ self.flags.push = True
+ self.flags.obsoleted = False
+
+
+class Dimension:
+ """Represent a dimension"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'dimension' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
+
+ self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
+ self.params['name'] = self.params.get('name') or self.params['id']
+
+ if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
+ self.params['algorithm'] = 'absolute'
+ if not isinstance(self.params.get('multiplier'), int):
+ self.params['multiplier'] = 1
+ if not isinstance(self.params.get('divisor'), int):
+ self.params['divisor'] = 1
+ self.params.setdefault('hidden', '')
+ self.params.setdefault('obsolete', '')
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Dimension({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if not isinstance(other, Dimension):
+ return self.id == other
+ return self.id == other.id
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def create(self):
+ return DIMENSION_CREATE.format(**self.params)
+
+ def set(self, value):
+ """
+ :param value: <str>: must be a digit
+ :return:
+ """
+ return DIMENSION_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartVariable:
+ """Represent a chart variable"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'variable' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
+
+ self.params = dict(zip(VARIABLE_PARAMS, params))
+ self.params.setdefault('value', None)
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __bool__(self):
+ return self.value is not None
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __repr__(self):
+ return 'ChartVariable({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if isinstance(other, ChartVariable):
+ return self.id == other.id
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def set(self, value):
+ return CHART_VARIABLE_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartFlags:
+ def __init__(self):
+ self.push = True
+ self.created = False
+ self.updated = False
+ self.obsoleted = False
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
new file mode 100644
index 0000000..93bf8cf
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from threading import Lock
+
+PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
+
+CHART_BEGIN = 'BEGIN {0} {1}\n'
+CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
+DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
+DIMENSION_SET = "SET '{0}' = {1}\n"
+
+print_lock = Lock()
+
+
+def setdefault_values(config, base_dict):
+ for key, value in base_dict.items():
+ config.setdefault(key, value)
+ return config
+
+
+def run_and_exit(func):
+ def wrapper(*args, **kwargs):
+ func(*args, **kwargs)
+ exit(1)
+
+ return wrapper
+
+
+def on_try_except_finally(on_except=(None,), on_finally=(None,)):
+ except_func = on_except[0]
+ finally_func = on_finally[0]
+
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ if except_func:
+ except_func(*on_except[1:])
+ finally:
+ if finally_func:
+ finally_func(*on_finally[1:])
+
+ return wrapper
+
+ return decorator
+
+
+def static_vars(**kwargs):
+ def decorate(func):
+ for k in kwargs:
+ setattr(func, k, kwargs[k])
+ return func
+
+ return decorate
+
+
+@on_try_except_finally(on_except=(exit, 1))
+def safe_print(*msg):
+ """
+ :param msg:
+ :return:
+ """
+ print_lock.acquire()
+ print(''.join(msg))
+ print_lock.release()
+
+
+def find_binary(binary):
+ """
+ :param binary: <str>
+ :return:
+ """
+ for directory in PATH:
+ binary_name = os.path.join(directory, binary)
+ if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
+ return binary_name
+ return None
+
+
+def read_last_line(f):
+ with open(f, 'rb') as opened:
+ opened.seek(-2, 2)
+ while opened.read(1) != b'\n':
+ opened.seek(-2, 1)
+ if opened.tell() == 0:
+ break
+ result = opened.readline()
+ return result.decode()
+
+
+def unicode_str(arg):
+ """Return the argument as a unicode string.
+
+ The `unicode` function has been removed from Python3 and `str` takes its
+ place. This function is a helper which will try using Python 2's `unicode`
+ and if it doesn't exist, assume we're using Python 3 and use `str`.
+
+ :param arg:
+ :return: <str>
+ """
+ # TODO: fix
+ try:
+ # https://github.com/netdata/netdata/issues/7613
+ if isinstance(arg, unicode):
+ return arg
+ return unicode(arg, errors='ignore')
+ # https://github.com/netdata/netdata/issues/7642
+ except TypeError:
+ return unicode(arg)
+ except NameError:
+ return str(arg)
diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
new file mode 100644
index 0000000..095f3a3
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from sys import version_info
+
+PY_VERSION = version_info[:2]
+
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' if PY_VERSION > (3, 1) else u'tag:yaml.org,2002:map'
+
+
+def dict_constructor(loader, node):
+ return OrderedDict(loader.construct_pairs(node))
+
+
+YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
+
+
+def load_yaml(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
+def load_config(file_name):
+ with open(file_name, 'r') as stream:
+ return load_yaml(stream)
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
new file mode 100644
index 0000000..47f196a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import logging
+import traceback
+
+from sys import exc_info
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.collection import on_try_except_finally, unicode_str
+
+
+LOGGING_LEVELS = {'CRITICAL': 50,
+ 'ERROR': 40,
+ 'WARNING': 30,
+ 'INFO': 20,
+ 'DEBUG': 10,
+ 'NOTSET': 0}
+
+DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
+DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+
+PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s'
+PYTHON_D_LOG_NAME = 'python.d'
+
+
+def limiter(log_max_count=30, allowed_in_seconds=60):
+ def on_decorator(func):
+
+ def on_call(*args):
+ current_time = args[0]._runtime_counters.start_mono
+ lc = args[0]._logger_counters
+
+ if lc.logged and lc.logged % log_max_count == 0:
+ if current_time - lc.time_to_compare <= allowed_in_seconds:
+ lc.dropped += 1
+ return
+ lc.time_to_compare = current_time
+
+ lc.logged += 1
+ func(*args)
+
+ return on_call
+ return on_decorator
+
+
+def add_traceback(func):
+ def on_call(*args):
+ self = args[0]
+
+ if not self.log_traceback:
+ func(*args)
+ else:
+ if exc_info()[0]:
+ func(*args)
+ func(self, traceback.format_exc())
+ else:
+ func(*args)
+
+ return on_call
+
+
+class LoggerCounters:
+ def __init__(self):
+ self.logged = 0
+ self.dropped = 0
+ self.time_to_compare = time()
+
+ def __repr__(self):
+ return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged,
+ dropped=self.dropped)
+
+
+class BaseLogger(object):
+ def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT,
+ handler=logging.StreamHandler):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ :param date_fmt: <str>
+ :param handler: <logging handler>
+ """
+ self.logger = logging.getLogger(logger_name)
+ if not self.has_handlers():
+ self.severity = 'INFO'
+ self.logger.addHandler(handler())
+ self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
+
+ def __repr__(self):
+ return '<Logger: {name})>'.format(name=self.logger.name)
+
+ def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
+ """
+ :param fmt: <str>
+ :param date_fmt: <str>
+ :return:
+ """
+ if self.has_handlers():
+ self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
+
+ def has_handlers(self):
+ return self.logger.handlers
+
+ @property
+ def severity(self):
+ return self.logger.getEffectiveLevel()
+
+ @severity.setter
+ def severity(self, level):
+ """
+ :param level: <str> or <int>
+ :return:
+ """
+ if level in LOGGING_LEVELS:
+ self.logger.setLevel(LOGGING_LEVELS[level])
+
+ def debug(self, *msg, **kwargs):
+ self.logger.debug(' '.join(map(unicode_str, msg)), **kwargs)
+
+ def info(self, *msg, **kwargs):
+ self.logger.info(' '.join(map(unicode_str, msg)), **kwargs)
+
+ def warning(self, *msg, **kwargs):
+ self.logger.warning(' '.join(map(unicode_str, msg)), **kwargs)
+
+ def error(self, *msg, **kwargs):
+ self.logger.error(' '.join(map(unicode_str, msg)), **kwargs)
+
+ def alert(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
+
+ @on_try_except_finally(on_finally=(exit, 1))
+ def fatal(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
+
+
+class PythonDLogger(object):
+ def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ """
+ self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
+ self.module_name = 'plugin'
+ self.job_name = 'main'
+ self._logger_counters = LoggerCounters()
+
+ _LOG_TRACEBACK = False
+
+ @property
+ def log_traceback(self):
+ return PythonDLogger._LOG_TRACEBACK
+
+ @log_traceback.setter
+ def log_traceback(self, value):
+ PythonDLogger._LOG_TRACEBACK = value
+
+ def debug(self, *msg):
+ self.logger.debug(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def info(self, *msg):
+ self.logger.info(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def warning(self, *msg):
+ self.logger.warning(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def error(self, *msg):
+ self.logger.error(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def alert(self, *msg):
+ self.logger.alert(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def fatal(self, *msg):
+ self.logger.fatal(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+
+class PythonDLimitedLogger(PythonDLogger):
+ @limiter()
+ def info(self, *msg):
+ PythonDLogger.info(self, *msg)
+
+ @limiter()
+ def warning(self, *msg):
+ PythonDLogger.warning(self, *msg)
+
+ @limiter()
+ def error(self, *msg):
+ PythonDLogger.error(self, *msg)
+
+ @limiter()
+ def alert(self, *msg):
+ PythonDLogger.alert(self, *msg)
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
new file mode 100644
index 0000000..4d560e4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -0,0 +1,316 @@
+# SPDX-License-Identifier: MIT
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
new file mode 100644
index 0000000..6b41b80
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
new file mode 100644
index 0000000..8ad1b90
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
new file mode 100644
index 0000000..2858ab4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
new file mode 100644
index 0000000..3685cbe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
new file mode 100644
index 0000000..9a460a0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -0,0 +1,1141 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
new file mode 100644
index 0000000..5466be7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
new file mode 100644
index 0000000..283452a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
new file mode 100644
index 0000000..1c19553
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
new file mode 100644
index 0000000..ed2a1b4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
new file mode 100644
index 0000000..97ba083
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
new file mode 100644
index 0000000..8d42295
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
new file mode 100644
index 0000000..0a1404e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -0,0 +1,485 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
new file mode 100644
index 0000000..49922de
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
new file mode 100644
index 0000000..971da61
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -0,0 +1,1458 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
new file mode 100644
index 0000000..15fdbb0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
new file mode 100644
index 0000000..c5c4fb1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
new file mode 100644
index 0000000..a884b33
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: MIT
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.11'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
new file mode 100644
index 0000000..c418bba
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
new file mode 100644
index 0000000..ee09a7a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -0,0 +1,687 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
new file mode 100644
index 0000000..e6c16d8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
new file mode 100644
index 0000000..ba590c6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
new file mode 100644
index 0000000..d4be65a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -0,0 +1,1138 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
new file mode 100644
index 0000000..5fec7d4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
new file mode 100644
index 0000000..283452a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
new file mode 100644
index 0000000..7ef6cf8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
new file mode 100644
index 0000000..ed2a1b4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
new file mode 100644
index 0000000..bcec7f9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
new file mode 100644
index 0000000..0a515fd
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
new file mode 100644
index 0000000..756a18d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -0,0 +1,375 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
new file mode 100644
index 0000000..50945e0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
new file mode 100644
index 0000000..b55854e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -0,0 +1,1449 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
new file mode 100644
index 0000000..1ba2f7f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
new file mode 100644
index 0000000..c5c4fb1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
new file mode 100644
index 0000000..ec21779
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
+#
+# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
+# Copyright (C) 2017 Austin S. Hemmelgarn
+#
+# SPDX-License-Identifier: GPL-3.0
+
+# Based on client/boinc_cmd.cpp
+
+import hashlib
+import socket
+import sys
+import time
+from functools import total_ordering
+from xml.etree import ElementTree
+
+GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
+
+GUI_RPC_HOSTNAME = None # localhost
+GUI_RPC_PORT = 31416
+GUI_RPC_TIMEOUT = 1
+
+class Rpc(object):
+ ''' Class to perform GUI RPC calls to a BOINC core client.
+ Usage in a context manager ('with' block) is recommended to ensure
+ disconnect() is called. Using the same instance for all calls is also
+ recommended so it reuses the same socket connection
+ '''
+ def __init__(self, hostname="", port=0, timeout=0, text_output=False):
+ self.hostname = hostname
+ self.port = port
+ self.timeout = timeout
+ self.sock = None
+ self.text_output = text_output
+
+ @property
+ def sockargs(self):
+ return (self.hostname, self.port, self.timeout)
+
+ def __enter__(self): self.connect(*self.sockargs); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self, hostname="", port=0, timeout=0):
+ ''' Connect to (hostname, port) with timeout in seconds.
+ Hostname defaults to None (localhost), and port to 31416
+ Calling multiple times will disconnect previous connection (if any),
+ and (re-)connect to host.
+ '''
+ if self.sock:
+ self.disconnect()
+
+ self.hostname = hostname or GUI_RPC_HOSTNAME
+ self.port = port or GUI_RPC_PORT
+ self.timeout = timeout or GUI_RPC_TIMEOUT
+
+ self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
+
+ def disconnect(self):
+ ''' Disconnect from host. Calling multiple times is OK (idempotent)
+ '''
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+
+ def call(self, request, text_output=None):
+ ''' Do an RPC call. Pack and send the XML request and return the
+ unpacked reply. request can be either plain XML text or a
+ xml.etree.ElementTree.Element object. Return ElementTree.Element
+ or XML text according to text_output flag.
+ Will auto-connect if not connected.
+ '''
+ if text_output is None:
+ text_output = self.text_output
+
+ if not self.sock:
+ self.connect(*self.sockargs)
+
+ if not isinstance(request, ElementTree.Element):
+ request = ElementTree.fromstring(request)
+
+ # pack request
+ end = '\003'
+ if sys.version_info[0] < 3:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
+ else:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
+
+ try:
+ self.sock.sendall(req)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout):
+ raise
+
+ req = ""
+ while True:
+ try:
+ buf = self.sock.recv(8192)
+ if not buf:
+ raise socket.error("No data from socket")
+ if sys.version_info[0] >= 3:
+ buf = buf.decode()
+ except socket.error:
+ raise
+ n = buf.find(end)
+ if not n == -1: break
+ req += buf
+ req += buf[:n]
+
+ # unpack reply (remove root tag, ie: first and last lines)
+ req = '\n'.join(req.strip().rsplit('\n')[1:-1])
+
+ if text_output:
+ return req
+ else:
+ return ElementTree.fromstring(req)
+
+def setattrs_from_xml(obj, xml, attrfuncdict={}):
+ ''' Helper to set values for attributes of a class instance by mapping
+ matching tags from a XML file.
+ attrfuncdict is a dict of functions to customize value data type of
+ each attribute. It falls back to simple int/float/bool/str detection
+ based on values defined in __init__(). This would not be needed if
+ Boinc used standard RPC protocol, which includes data type in XML.
+ '''
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+ for e in list(xml):
+ if hasattr(obj, e.tag):
+ attr = getattr(obj, e.tag)
+ attrfunc = attrfuncdict.get(e.tag, None)
+ if attrfunc is None:
+ if isinstance(attr, bool): attrfunc = parse_bool
+ elif isinstance(attr, int): attrfunc = parse_int
+ elif isinstance(attr, float): attrfunc = parse_float
+ elif isinstance(attr, str): attrfunc = parse_str
+ elif isinstance(attr, list): attrfunc = parse_list
+ else: attrfunc = lambda x: x
+ setattr(obj, e.tag, attrfunc(e))
+ else:
+ pass
+ #print "class missing attribute '%s': %r" % (e.tag, obj)
+ return obj
+
+
+def parse_bool(e):
+ ''' Helper to convert ElementTree.Element.text to boolean.
+ Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
+ Treat '0' and 'false' as False
+ '''
+ if e.text is None:
+ return True
+ else:
+ return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
+
+
+def parse_int(e):
+ ''' Helper to convert ElementTree.Element.text to integer.
+ Treat '<foo/>' (and '<foo></foo>') as 0
+ '''
+ # int(float()) allows casting to int a value expressed as float in XML
+ return 0 if e.text is None else int(float(e.text.strip()))
+
+
+def parse_float(e):
+ ''' Helper to convert ElementTree.Element.text to float. '''
+ return 0.0 if e.text is None else float(e.text.strip())
+
+
+def parse_str(e):
+ ''' Helper to convert ElementTree.Element.text to string. '''
+ return "" if e.text is None else e.text.strip()
+
+
+def parse_list(e):
+ ''' Helper to convert ElementTree.Element to list. For now, simply return
+ the list of root element's children
+ '''
+ return list(e)
+
+
+class Enum(object):
+ UNKNOWN = -1 # Not in original API
+
+ @classmethod
+ def name(cls, value):
+ ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
+
+ # value as string, if it matches an enum attribute.
+ # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
+ if hasattr(cls, str(value)):
+ return cls.name(getattr(cls, value, None))
+
+ # value not handled in subclass name()
+ for k, v in cls.__dict__.items():
+ if v == value:
+ return k.lower().replace('_', ' ')
+
+ # value not found
+ return cls.name(Enum.UNKNOWN)
+
+
+class CpuSched(Enum):
+ ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
+ "SCHEDULED" is synonymous with "executing" except when CPU throttling
+ is in use.
+ '''
+ UNINITIALIZED = 0
+ PREEMPTED = 1
+ SCHEDULED = 2
+
+
+class ResultState(Enum):
+ ''' Values of RESULT::state in client.
+ THESE MUST BE IN NUMERICAL ORDER
+ (because of the > comparison in RESULT::computing_done())
+ see html/inc/common_defs.inc
+ '''
+ NEW = 0
+ #// New result
+ FILES_DOWNLOADING = 1
+ #// Input files for result (WU, app version) are being downloaded
+ FILES_DOWNLOADED = 2
+ #// Files are downloaded, result can be (or is being) computed
+ COMPUTE_ERROR = 3
+ #// computation failed; no file upload
+ FILES_UPLOADING = 4
+ #// Output files for result are being uploaded
+ FILES_UPLOADED = 5
+ #// Files are uploaded, notify scheduling server at some point
+ ABORTED = 6
+ #// result was aborted
+ UPLOAD_FAILED = 7
+ #// some output file permanent failure
+
+
+class Process(Enum):
+ ''' values of ACTIVE_TASK::task_state '''
+ UNINITIALIZED = 0
+ #// process doesn't exist yet
+ EXECUTING = 1
+ #// process is running, as far as we know
+ SUSPENDED = 9
+ #// we've sent it a "suspend" message
+ ABORT_PENDING = 5
+ #// process exceeded limits; send "abort" message, waiting to exit
+ QUIT_PENDING = 8
+ #// we've sent it a "quit" message, waiting to exit
+ COPY_PENDING = 10
+ #// waiting for async file copies to finish
+
+
+class _Struct(object):
+ ''' base helper class with common methods for all classes derived from
+ BOINC's C++ structs
+ '''
+ @classmethod
+ def parse(cls, xml):
+ return setattrs_from_xml(cls(), xml)
+
+ def __str__(self, indent=0):
+ buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
+ for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
+ buf += '\t]\n'
+ else:
+ buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
+ attr,
+ value.__str__(indent+2)
+ if isinstance(value, _Struct)
+ else repr(value))
+ return buf
+
+
+@total_ordering
+class VersionInfo(_Struct):
+ def __init__(self, major=0, minor=0, release=0):
+ self.major = major
+ self.minor = minor
+ self.release = release
+
+ @property
+ def _tuple(self):
+ return (self.major, self.minor, self.release)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._tuple == other._tuple
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._tuple > other._tuple
+
+ def __str__(self):
+ return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
+
+ def __repr__(self):
+ return "{0}{1}".format(self.__class__.__name__, self._tuple)
+
+
+class Result(_Struct):
+ ''' Also called "task" in some contexts '''
+ def __init__(self):
+ # Names and values follow lib/gui_rpc_client.h @ RESULT
+ # Order too, except when grouping contradicts client/result.cpp
+ # RESULT::write_gui(), then XML order is used.
+
+ self.name = ""
+ self.wu_name = ""
+ self.version_num = 0
+ #// identifies the app used
+ self.plan_class = ""
+ self.project_url = "" # from PROJECT.master_url
+ self.report_deadline = 0.0 # seconds since epoch
+ self.received_time = 0.0 # seconds since epoch
+ #// when we got this from server
+ self.ready_to_report = False
+ #// we're ready to report this result to the server;
+ #// either computation is done and all the files have been uploaded
+ #// or there was an error
+ self.got_server_ack = False
+ #// we've received the ack for this result from the server
+ self.final_cpu_time = 0.0
+ self.final_elapsed_time = 0.0
+ self.state = ResultState.NEW
+ self.estimated_cpu_time_remaining = 0.0
+ #// actually, estimated elapsed time remaining
+ self.exit_status = 0
+ #// return value from the application
+ self.suspended_via_gui = False
+ self.project_suspended_via_gui = False
+ self.edf_scheduled = False
+ #// temporary used to tell GUI that this result is deadline-scheduled
+ self.coproc_missing = False
+ #// a coproc needed by this job is missing
+ #// (e.g. because user removed their GPU board).
+ self.scheduler_wait = False
+ self.scheduler_wait_reason = ""
+ self.network_wait = False
+ self.resources = ""
+ #// textual description of resources used
+
+ #// the following defined if active
+ # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
+ self.active_task = False
+ self.active_task_state = Process.UNINITIALIZED
+ self.app_version_num = 0
+ self.slot = -1
+ self.pid = 0
+ self.scheduler_state = CpuSched.UNINITIALIZED
+ self.checkpoint_cpu_time = 0.0
+ self.current_cpu_time = 0.0
+ self.fraction_done = 0.0
+ self.elapsed_time = 0.0
+ self.swap_size = 0
+ self.working_set_size_smoothed = 0.0
+ self.too_large = False
+ self.needs_shmem = False
+ self.graphics_exec_path = ""
+ self.web_graphics_url = ""
+ self.remote_desktop_addr = ""
+ self.slot_path = ""
+ #// only present if graphics_exec_path is
+
+ # The following are not in original API, but are present in RPC XML reply
+ self.completed_time = 0.0
+ #// time when ready_to_report was set
+ self.report_immediately = False
+ self.working_set_size = 0
+ self.page_fault_rate = 0.0
+ #// derived by higher-level code
+
+ # The following are in API, but are NEVER in RPC XML reply. Go figure
+ self.signal = 0
+
+ self.app = None # APP*
+ self.wup = None # WORKUNIT*
+ self.project = None # PROJECT*
+ self.avp = None # APP_VERSION*
+
+ @classmethod
+ def parse(cls, xml):
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+
+ # parse main XML
+ result = super(Result, cls).parse(xml)
+
+ # parse '<active_task>' children
+ active_task = xml.find('active_task')
+ if active_task is None:
+ result.active_task = False # already the default after __init__()
+ else:
+ result.active_task = True # already the default after main parse
+ result = setattrs_from_xml(result, active_task)
+
+ #// if CPU time is nonzero but elapsed time is zero,
+ #// we must be talking to an old client.
+ #// Set elapsed = CPU
+ #// (easier to deal with this here than in the manager)
+ if result.current_cpu_time != 0 and result.elapsed_time == 0:
+ result.elapsed_time = result.current_cpu_time
+
+ if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
+ result.final_elapsed_time = result.final_cpu_time
+
+ return result
+
+ def __str__(self):
+ buf = '{0}:\n'.format(self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if attr in ['received_time', 'report_deadline']:
+ value = time.ctime(value)
+ buf += '\t{0}\t{1}\n'.format(attr, value)
+ return buf
+
+
+class BoincClient(object):
+
+ def __init__(self, host="", port=0, passwd=None):
+ self.hostname = host
+ self.port = port
+ self.passwd = passwd
+ self.rpc = Rpc(text_output=False)
+ self.version = None
+ self.authorized = False
+
+ # Informative, not authoritative. Records status of *last* RPC call,
+ # but does not infer success about the *next* one.
+ # Thus, it should be read *after* an RPC call, not prior to one
+ self.connected = False
+
+ def __enter__(self): self.connect(); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self):
+ try:
+ self.rpc.connect(self.hostname, self.port)
+ self.connected = True
+ except socket.error:
+ self.connected = False
+ return
+ self.authorized = self.authorize(self.passwd)
+ self.version = self.exchange_versions()
+
+ def disconnect(self):
+ self.rpc.disconnect()
+
+ def authorize(self, password):
+ ''' Request authorization. If password is None and we are connecting
+ to localhost, try to read password from the local config file
+ GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
+ permission to read), try to authorize with a blank password.
+ If authorization is requested and fails, all subsequent calls
+ will be refused with socket.error 'Connection reset by peer' (104).
+ Since most local calls do no require authorization, do not attempt
+ it if you're not sure about the password.
+ '''
+ if password is None and not self.hostname:
+ password = read_gui_rpc_password() or ""
+ nonce = self.rpc.call('<auth1/>').text
+ authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
+ reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
+
+ if reply.tag == 'authorized':
+ return True
+ else:
+ return False
+
+ def exchange_versions(self):
+ ''' Return VersionInfo instance with core client version info '''
+ return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
+
+ def get_tasks(self):
+ ''' Same as get_results(active_only=False) '''
+ return self.get_results(False)
+
+ def get_results(self, active_only=False):
+ ''' Get a list of results.
+ Those that are in progress will have information such as CPU time
+ and fraction done. Each result includes a name;
+ Use CC_STATE::lookup_result() to find this result in the current static state;
+ if it's not there, call get_state() again.
+ '''
+ reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
+ if not reply.tag == 'results':
+ return []
+
+ results = []
+ for item in list(reply):
+ results.append(Result.parse(item))
+
+ return results
+
+
+def read_gui_rpc_password():
+ ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
+ (if any), and return it
+ '''
+ try:
+ with open(GUI_RPC_PASSWD_FILE, 'r') as f:
+ buf = f.read()
+ if buf.endswith('\n'): return buf[:-1] # trim last CR
+ else: return buf
+ except IOError:
+ # Permission denied or File not found.
+ pass
diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py
new file mode 100644
index 0000000..4c98167
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/filelock.py
@@ -0,0 +1,451 @@
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# For more information, please refer to <http://unlicense.org>
+
+"""
+A platform independent file lock that supports the with-statement.
+"""
+
+
+# Modules
+# ------------------------------------------------
+import logging
+import os
+import threading
+import time
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+try:
+ import msvcrt
+except ImportError:
+ msvcrt = None
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+
+# Backward compatibility
+# ------------------------------------------------
+try:
+ TimeoutError
+except NameError:
+ TimeoutError = OSError
+
+
+# Data
+# ------------------------------------------------
+__all__ = [
+ "Timeout",
+ "BaseFileLock",
+ "WindowsFileLock",
+ "UnixFileLock",
+ "SoftFileLock",
+ "FileLock"
+]
+
+__version__ = "3.0.12"
+
+
+_logger = None
+def logger():
+ """Returns the logger instance used in this module."""
+ global _logger
+ _logger = _logger or logging.getLogger(__name__)
+ return _logger
+
+
+# Exceptions
+# ------------------------------------------------
+class Timeout(TimeoutError):
+ """
+ Raised when the lock could not be acquired in *timeout*
+ seconds.
+ """
+
+ def __init__(self, lock_file):
+ """
+ """
+ #: The path of the file lock.
+ self.lock_file = lock_file
+ return None
+
+ def __str__(self):
+ temp = "The file lock '{}' could not be acquired."\
+ .format(self.lock_file)
+ return temp
+
+
+# Classes
+# ------------------------------------------------
+
+# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
+# and wraps the lock to make sure __enter__ is not called twice when entering
+# the with statement.
+# If we would simply return *self*, the lock would be acquired again
+# in the *__enter__* method of the BaseFileLock, but not released again
+# automatically.
+#
+# :seealso: issue #37 (memory leak)
+class _Acquire_ReturnProxy(object):
+
+ def __init__(self, lock):
+ self.lock = lock
+ return None
+
+ def __enter__(self):
+ return self.lock
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.lock.release()
+ return None
+
+
+class BaseFileLock(object):
+ """
+ Implements the base class of a file lock.
+ """
+
+ def __init__(self, lock_file, timeout = -1):
+ """
+ """
+ # The path to the lock file.
+ self._lock_file = lock_file
+
+ # The file descriptor for the *_lock_file* as it is returned by the
+ # os.open() function.
+ # This file lock is only NOT None, if the object currently holds the
+ # lock.
+ self._lock_file_fd = None
+
+ # The default timeout value.
+ self.timeout = timeout
+
+ # We use this lock primarily for the lock counter.
+ self._thread_lock = threading.Lock()
+
+ # The lock counter is used for implementing the nested locking
+ # mechanism. Whenever the lock is acquired, the counter is increased and
+ # the lock is only released, when this value is 0 again.
+ self._lock_counter = 0
+ return None
+
+ @property
+ def lock_file(self):
+ """
+ The path to the lock file.
+ """
+ return self._lock_file
+
+ @property
+ def timeout(self):
+ """
+ You can set a default timeout for the filelock. It will be used as
+ fallback value in the acquire method, if no timeout value (*None*) is
+ given.
+
+ If you want to disable the timeout, set it to a negative value.
+
+ A timeout of 0 means, that there is exactly one attempt to acquire the
+ file lock.
+
+ .. versionadded:: 2.0.0
+ """
+ return self._timeout
+
+ @timeout.setter
+ def timeout(self, value):
+ """
+ """
+ self._timeout = float(value)
+ return None
+
+ # Platform dependent locking
+ # --------------------------------------------
+
+ def _acquire(self):
+ """
+ Platform dependent. If the file lock could be
+ acquired, self._lock_file_fd holds the file descriptor
+ of the lock file.
+ """
+ raise NotImplementedError()
+
+ def _release(self):
+ """
+ Releases the lock and sets self._lock_file_fd to None.
+ """
+ raise NotImplementedError()
+
+ # Platform independent methods
+ # --------------------------------------------
+
+ @property
+ def is_locked(self):
+ """
+ True, if the object holds the file lock.
+
+ .. versionchanged:: 2.0.0
+
+ This was previously a method and is now a property.
+ """
+ return self._lock_file_fd is not None
+
+ def acquire(self, timeout=None, poll_intervall=0.05):
+ """
+ Acquires the file lock or fails with a :exc:`Timeout` error.
+
+ .. code-block:: python
+
+ # You can use this method in the context manager (recommended)
+ with lock.acquire():
+ pass
+
+ # Or use an equivalent try-finally construct:
+ lock.acquire()
+ try:
+ pass
+ finally:
+ lock.release()
+
+ :arg float timeout:
+ The maximum time waited for the file lock.
+ If ``timeout < 0``, there is no timeout and this method will
+ block until the lock could be acquired.
+ If ``timeout`` is None, the default :attr:`~timeout` is used.
+
+ :arg float poll_intervall:
+ We check once in *poll_intervall* seconds if we can acquire the
+ file lock.
+
+ :raises Timeout:
+ if the lock could not be acquired in *timeout* seconds.
+
+ .. versionchanged:: 2.0.0
+
+ This method returns now a *proxy* object instead of *self*,
+ so that it can be used in a with statement without side effects.
+ """
+ # Use the default timeout, if no timeout is provided.
+ if timeout is None:
+ timeout = self.timeout
+
+ # Increment the number right at the beginning.
+ # We can still undo it, if something fails.
+ with self._thread_lock:
+ self._lock_counter += 1
+
+ lock_id = id(self)
+ lock_filename = self._lock_file
+ start_time = time.time()
+ try:
+ while True:
+ with self._thread_lock:
+ if not self.is_locked:
+ logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename)
+ self._acquire()
+
+ if self.is_locked:
+ logger().info('Lock %s acquired on %s', lock_id, lock_filename)
+ break
+ elif timeout >= 0 and time.time() - start_time > timeout:
+ logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename)
+ raise Timeout(self._lock_file)
+ else:
+ logger().debug(
+ 'Lock %s not acquired on %s, waiting %s seconds ...',
+ lock_id, lock_filename, poll_intervall
+ )
+ time.sleep(poll_intervall)
+ except:
+ # Something did go wrong, so decrement the counter.
+ with self._thread_lock:
+ self._lock_counter = max(0, self._lock_counter - 1)
+
+ raise
+ return _Acquire_ReturnProxy(lock = self)
+
+ def release(self, force = False):
+ """
+ Releases the file lock.
+
+ Please note, that the lock is only completly released, if the lock
+ counter is 0.
+
+ Also note, that the lock file itself is not automatically deleted.
+
+ :arg bool force:
+ If true, the lock counter is ignored and the lock is released in
+ every case.
+ """
+ with self._thread_lock:
+
+ if self.is_locked:
+ self._lock_counter -= 1
+
+ if self._lock_counter == 0 or force:
+ lock_id = id(self)
+ lock_filename = self._lock_file
+
+ logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename)
+ self._release()
+ self._lock_counter = 0
+ logger().info('Lock %s released on %s', lock_id, lock_filename)
+
+ return None
+
+ def __enter__(self):
+ self.acquire()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.release()
+ return None
+
+ def __del__(self):
+ self.release(force = True)
+ return None
+
+
+# Windows locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+class WindowsFileLock(BaseFileLock):
+ """
+ Uses the :func:`msvcrt.locking` function to hard lock the lock file on
+ windows systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except OSError:
+ pass
+ else:
+ try:
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
+ os.close(fd)
+
+ try:
+ os.remove(self._lock_file)
+ # Probably another instance of the application
+ # that acquired the file lock.
+ except OSError:
+ pass
+ return None
+
+# Unix locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~
+
+class UnixFileLock(BaseFileLock):
+ """
+ Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+ fd = os.open(self._lock_file, open_mode)
+
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ # Do not remove the lockfile:
+ #
+ # https://github.com/benediktschmitt/py-filelock/issues/31
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ fcntl.flock(fd, fcntl.LOCK_UN)
+ os.close(fd)
+ return None
+
+# Soft lock
+# ~~~~~~~~~
+
+class SoftFileLock(BaseFileLock):
+ """
+ Simply watches the existence of the lock file.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except (IOError, OSError):
+ pass
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ os.close(self._lock_file_fd)
+ self._lock_file_fd = None
+
+ try:
+ os.remove(self._lock_file)
+ # The file is already deleted and that's what we want.
+ except OSError:
+ pass
+ return None
+
+
+# Platform filelock
+# ~~~~~~~~~~~~~~~~~
+
+#: Alias for the lock, which should be used for the current platform. On
+#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
+#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
+FileLock = None
+
+if msvcrt:
+ FileLock = WindowsFileLock
+elif fcntl:
+ FileLock = UnixFileLock
+else:
+ FileLock = SoftFileLock
+
+ if warnings is not None:
+ warnings.warn("only soft file lock is available")
diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
new file mode 100644
index 0000000..f873eac
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1
+"""
+@package sensors.py
+Python Bindings for libsensors3
+
+use the documentation of libsensors for the low level API.
+see example.py for high level API usage.
+
+@author: Pavel Rojtberg (http://www.rojtberg.net)
+@see: https://github.com/paroj/sensors.py
+@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
+"""
+
+from ctypes import *
+import ctypes.util
+
+_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
+# see https://github.com/paroj/sensors.py/issues/1
+_libc.free.argtypes = [c_void_p]
+
+_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
+
+version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
+
+
+class SensorsError(Exception):
+ pass
+
+
+class ErrorWildcards(SensorsError):
+ pass
+
+
+class ErrorNoEntry(SensorsError):
+ pass
+
+
+class ErrorAccessRead(SensorsError, OSError):
+ pass
+
+
+class ErrorKernel(SensorsError, OSError):
+ pass
+
+
+class ErrorDivZero(SensorsError, ZeroDivisionError):
+ pass
+
+
+class ErrorChipName(SensorsError):
+ pass
+
+
+class ErrorBusName(SensorsError):
+ pass
+
+
+class ErrorParse(SensorsError):
+ pass
+
+
+class ErrorAccessWrite(SensorsError, OSError):
+ pass
+
+
+class ErrorIO(SensorsError, IOError):
+ pass
+
+
+class ErrorRecursion(SensorsError):
+ pass
+
+
+_ERR_MAP = {
+ 1: ErrorWildcards,
+ 2: ErrorNoEntry,
+ 3: ErrorAccessRead,
+ 4: ErrorKernel,
+ 5: ErrorDivZero,
+ 6: ErrorChipName,
+ 7: ErrorBusName,
+ 8: ErrorParse,
+ 9: ErrorAccessWrite,
+ 10: ErrorIO,
+ 11: ErrorRecursion
+}
+
+
+def raise_sensor_error(errno, message=''):
+ raise _ERR_MAP[abs(errno)](message)
+
+
+class bus_id(Structure):
+ _fields_ = [("type", c_short),
+ ("nr", c_short)]
+
+
+class chip_name(Structure):
+ _fields_ = [("prefix", c_char_p),
+ ("bus", bus_id),
+ ("addr", c_int),
+ ("path", c_char_p)]
+
+
+class feature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int)]
+
+ # sensors_feature_type
+ IN = 0x00
+ FAN = 0x01
+ TEMP = 0x02
+ POWER = 0x03
+ ENERGY = 0x04
+ CURR = 0x05
+ HUMIDITY = 0x06
+ MAX_MAIN = 0x7
+ VID = 0x10
+ INTRUSION = 0x11
+ MAX_OTHER = 0x12
+ BEEP_ENABLE = 0x18
+
+
+class subfeature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int),
+ ("mapping", c_int),
+ ("flags", c_uint)]
+
+
+_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
+_hdl.sensors_get_features.restype = POINTER(feature)
+_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
+_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
+_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
+_hdl.sensors_strerror.restype = c_char_p
+
+### RAW API ###
+MODE_R = 1
+MODE_W = 2
+COMPUTE_MAPPING = 4
+
+
+def init(cfg_file=None):
+ file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
+
+ result = _hdl.sensors_init(file)
+ if result != 0:
+ raise_sensor_error(result, "sensors_init failed")
+
+ if file is not None:
+ _libc.fclose(file)
+
+
+def cleanup():
+ _hdl.sensors_cleanup()
+
+
+def parse_chip_name(orig_name):
+ ret = chip_name()
+ err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret
+
+
+def strerror(errnum):
+ return _hdl.sensors_strerror(errnum).decode("utf-8")
+
+
+def free_chip_name(chip):
+ _hdl.sensors_free_chip_name(byref(chip))
+
+
+def get_detected_chips(match, nr):
+ """
+ @return: (chip, next nr to query)
+ """
+ _nr = c_int(nr)
+
+ if match is not None:
+ match = byref(match)
+
+ chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
+ chip = chip.contents if bool(chip) else None
+ return chip, _nr.value
+
+
+def chip_snprintf_name(chip, buffer_size=200):
+ """
+ @param buffer_size defaults to the size used in the sensors utility
+ """
+ ret = create_string_buffer(buffer_size)
+ err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret.value.decode("utf-8")
+
+
+def do_chip_sets(chip):
+ """
+ @attention this function was not tested
+ """
+ err = _hdl.sensors_do_chip_sets(byref(chip))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+def get_adapter_name(bus):
+ return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
+
+
+def get_features(chip, nr):
+ """
+ @return: (feature, next nr to query)
+ """
+ _nr = c_int(nr)
+ feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
+ feature = feature.contents if bool(feature) else None
+ return feature, _nr.value
+
+
+def get_label(chip, feature):
+ ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
+ val = cast(ptr, c_char_p).value.decode("utf-8")
+ _libc.free(ptr)
+ return val
+
+
+def get_all_subfeatures(chip, feature, nr):
+ """
+ @return: (subfeature, next nr to query)
+ """
+ _nr = c_int(nr)
+ subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
+ subfeature = subfeature.contents if bool(subfeature) else None
+ return subfeature, _nr.value
+
+
+def get_value(chip, subfeature_nr):
+ val = c_double()
+ err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+ return val.value
+
+
+def set_value(chip, subfeature_nr, value):
+ """
+ @attention this function was not tested
+ """
+ val = c_double(value)
+ err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+### Convenience API ###
+class ChipIterator:
+ def __init__(self, match=None):
+ self.match = parse_chip_name(match) if match is not None else None
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ chip, self.nr = get_detected_chips(self.match, self.nr)
+
+ if chip is None:
+ raise StopIteration
+
+ return chip
+
+ def __del__(self):
+ if self.match is not None:
+ free_chip_name(self.match)
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class FeatureIterator:
+ def __init__(self, chip):
+ self.chip = chip
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ feature, self.nr = get_features(self.chip, self.nr)
+
+ if feature is None:
+ raise StopIteration
+
+ return feature
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class SubFeatureIterator:
+ def __init__(self, chip, feature):
+ self.chip = chip
+ self.feature = feature
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
+
+ if subfeature is None:
+ raise StopIteration
+
+ return subfeature
+
+ def next(self): # python2 compability
+ return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
new file mode 100644
index 0000000..a65a304
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
@@ -0,0 +1,74 @@
+# Minecraft Remote Console module.
+#
+# Copyright (C) 2015 Barnaby Gale
+#
+# SPDX-License-Identifier: MIT
+
+import socket
+import select
+import struct
+import time
+
+
+class MCRconException(Exception):
+ pass
+
+
+class MCRcon(object):
+ socket = None
+
+ def connect(self, host, port, password):
+ if self.socket is not None:
+ raise MCRconException("Already connected")
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.settimeout(0.9)
+ self.socket.connect((host, port))
+ self.send(3, password)
+
+ def disconnect(self):
+ if self.socket is None:
+ raise MCRconException("Already disconnected")
+ self.socket.close()
+ self.socket = None
+
+ def read(self, length):
+ data = b""
+ while len(data) < length:
+ data += self.socket.recv(length - len(data))
+ return data
+
+ def send(self, out_type, out_data):
+ if self.socket is None:
+ raise MCRconException("Must connect before sending data")
+
+ # Send a request packet
+ out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
+ out_length = struct.pack('<i', len(out_payload))
+ self.socket.send(out_length + out_payload)
+
+ # Read response packets
+ in_data = ""
+ while True:
+ # Read a packet
+ in_length, = struct.unpack('<i', self.read(4))
+ in_payload = self.read(in_length)
+ in_id = struct.unpack('<ii', in_payload[:8])
+ in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
+
+ # Sanity checks
+ if in_padding != b'\x00\x00':
+ raise MCRconException("Incorrect padding")
+ if in_id == -1:
+ raise MCRconException("Login failed")
+
+ # Record the response
+ in_data += in_data_partial.decode('utf8')
+
+ # If there's nothing more to receive, return the response
+ if len(select.select([self.socket], [], [], 0)[0]) == 0:
+ return in_data
+
+ def command(self, command):
+ result = self.send(2, command)
+ time.sleep(0.003) # MC-72390 workaround
+ return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
new file mode 100644
index 0000000..4ebd556
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: Apache-2.0
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+
+
+ def clock_clock_gettime_c_library():
+ return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library():
+ return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_c_library_synology6():
+ return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library_synology6():
+ return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime
+
+
+ def clock_gettime_linux():
+ # see https://github.com/netdata/netdata/issues/7976
+ order = [
+ clock_clock_gettime_c_library,
+ clock_clock_gettime_rt_library,
+ clock_clock_gettime_c_library_synology6,
+ clock_clock_gettime_rt_library_synology6,
+ ]
+
+ for gettime in order:
+ try:
+ return gettime()
+ except (RuntimeError, AttributeError, OSError):
+ continue
+ raise RuntimeError('can not find c and rt libraries')
+
+
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return mach_absolute_time() / ticks_per_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ clock_gettime = clock_gettime_linux()
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
new file mode 100644
index 0000000..589401b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# SPDX-License-Identifier: MIT
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return self.__class__, (items,), inst_dict
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(self.items(), other.items()):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
new file mode 100644
index 0000000..3add848
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: MIT
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+ HTTPConnectionPool,
+ HTTPSConnectionPool,
+ connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.21.1'
+
+__all__ = (
+ 'HTTPConnectionPool',
+ 'HTTPSConnectionPool',
+ 'PoolManager',
+ 'ProxyManager',
+ 'HTTPResponse',
+ 'Retry',
+ 'Timeout',
+ 'add_stderr_logger',
+ 'connection_from_url',
+ 'disable_warnings',
+ 'encode_multipart_formdata',
+ 'get_host',
+ 'make_headers',
+ 'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug('Added a stderr logging handler to logger: %s', __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter('ignore', category)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
new file mode 100644
index 0000000..c1d2fad
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -0,0 +1,315 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try: # Python 2.7+
+ from collections import OrderedDict
+except ImportError:
+ from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
new file mode 100644
index 0000000..f757493
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -0,0 +1,374 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import sys
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+
+try: # Compiled with SSL?
+ import ssl
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError: # Python 2:
+ class ConnectionError(Exception):
+ pass
+
+
+from .exceptions import (
+ NewConnectionError,
+ ConnectTimeoutError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ assert_fingerprint,
+ create_urllib3_context,
+ ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+ 'http': 80,
+ 'https': 443,
+}
+
+# When updating RECENT_DATE, move it to
+# within two years of the current date, and no
+# earlier than 6 months ago.
+RECENT_DATE = datetime.date(2016, 1, 1)
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+ pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on httplib.HTTPConnection but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+
+ .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass::
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme['http']
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if six.PY3: # Python 3
+ kw.pop('strict', None)
+
+ # Pre-set source_address in case we have an older Python like 2.6.
+ self.source_address = kw.get('source_address')
+
+ if sys.version_info < (2, 7): # Python 2.6
+ # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+ # not newer versions. We can still use it when creating a
+ # connection though, so we pop it *after* we have saved it as
+ # self.source_address.
+ kw.pop('source_address', None)
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+ # Superclass also sets self.source_address in Python 2.7+.
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ def _new_conn(self):
+ """ Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self.host, self.port), self.timeout, **extra_kw)
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ # the _tunnel_host attribute was added in python 2.6.3 (via
+ # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+ # not have them.
+ if getattr(self, '_tunnel_host', None):
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = HTTPHeaderDict(headers if headers is not None else {})
+ skip_accept_encoding = 'accept-encoding' in headers
+ skip_host = 'host' in headers
+ self.putrequest(
+ method,
+ url,
+ skip_accept_encoding=skip_accept_encoding,
+ skip_host=skip_host
+ )
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if 'transfer-encoding' not in headers:
+ self.putheader('Transfer-Encoding', 'chunked')
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (six.binary_type,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, six.binary_type):
+ chunk = chunk.encode('utf8')
+ len_str = hex(len(chunk))[2:]
+ self.send(len_str.encode('utf-8'))
+ self.send(b'\r\n')
+ self.send(chunk)
+ self.send(b'\r\n')
+
+ # After the if clause, to always have a closed body
+ self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+ default_port = port_by_scheme['https']
+
+ ssl_version = None
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None, **kw):
+
+ HTTPConnection.__init__(self, host, port, strict=strict,
+ timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ssl_context = ssl_context
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = 'https'
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(None),
+ cert_reqs=resolve_cert_reqs(None),
+ )
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ssl_context=self.ssl_context,
+ )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Based on httplib.HTTPSConnection but wraps the socket with
+ SSL certification.
+ """
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ssl_version = None
+ assert_fingerprint = None
+
+ def set_cert(self, key_file=None, cert_file=None,
+ cert_reqs=None, ca_certs=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided, we can try to guess. If the user gave
+ # us a cert database, we assume they want to use it: otherwise, if
+ # they gave us an SSL Context object we should use whatever is set for
+ # it.
+ if cert_reqs is None:
+ if ca_certs or ca_cert_dir:
+ cert_reqs = 'CERT_REQUIRED'
+ elif self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+
+ hostname = self.host
+ if getattr(self, '_tunnel_host', None):
+ # _tunnel_host was added in Python 2.6.3
+ # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+ self.sock = conn
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn((
+ 'System time is way off (before {0}). This will probably '
+ 'lead to SSL verification errors').format(RECENT_DATE),
+ SystemTimeWarning
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ server_hostname=hostname,
+ ssl_context=context)
+
+ if self.assert_fingerprint:
+ assert_fingerprint(self.sock.getpeercert(binary_form=True),
+ self.assert_fingerprint)
+ elif context.verify_mode != ssl.CERT_NONE \
+ and not getattr(context, 'check_hostname', False) \
+ and self.assert_hostname is not False:
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get('subjectAltName', ()):
+ warnings.warn((
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
+ )
+ _match_hostname(cert, self.assert_hostname or hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED or
+ self.assert_fingerprint is not None
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.error(
+ 'Certificate did not match expected hostname: %s. '
+ 'Certificate: %s', asserted_hostname, cert
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+if ssl:
+ # Make a copy for testing.
+ UnverifiedHTTPSConnection = HTTPSConnection
+ HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
new file mode 100644
index 0000000..90e4c86
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -0,0 +1,900 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+ ClosedPoolError,
+ ProtocolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ LocationValueError,
+ MaxRetryError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+ InsecureRequestWarning,
+ NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+ port_by_scheme,
+ DummyConnection,
+ HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+ HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url
+
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows
+ import Queue as _unused_module_Queue # noqa: F401
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+ """
+
+ scheme = None
+ QueueCls = queue.LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _ipv6_host(host).lower()
+ self.port = port
+
+ def __str__(self):
+ return '%s(host=%r, port=%r)' % (type(self).__name__,
+ self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`httplib.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`httplib.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`httplib.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = 'http'
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(self, host, port=None, strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+ headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ **conn_kw):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault('socket_options', [])
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTP connection (%d): %s",
+ self.num_connections, self.host)
+
+ conn = self.ConnectionCls(host=self.host, port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(self,
+ "Pool reached maximum size and no more "
+ "connections are allowed.")
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, 'auto_open', 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # httplib._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s",
+ self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+ **httplib_request_kw):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls httplib.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, 'sock', None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try: # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError: # Python 2.6 and older, Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except Exception as e:
+ # Remove the TypeError from the exception chain in Python 3;
+ # otherwise it looks like a programming error was the cause.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+ log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+ method, url, http_version, httplib_response.status,
+ httplib_response.length)
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith('/'):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+
+ host = _ipv6_host(host).lower()
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(self, method, url, body=None, headers=None, retries=None,
+ redirect=True, assert_same_host=True, timeout=_Default,
+ pool_timeout=None, release_conn=None, chunked=False,
+ body_pos=None, **response_kw):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param body:
+ Data to send in the request body (useful for creating
+ POST requests, see HTTPConnectionPool.post_url for
+ more convenience).
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When False, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get('preload_content', True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/shazow/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ # Merge the proxy headers. Only do this in HTTP. We have to copy the
+ # headers dict so we can safely change it without those changes being
+ # reflected in anyone else's copy.
+ if self.scheme == 'http':
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(conn, method, url,
+ timeout=timeout_obj,
+ body=body, headers=headers,
+ chunked=chunked)
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw['request_method'] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw)
+
+ # Everything went great!
+ clean_exit = True
+
+ except queue.Empty:
+ # Timed out by queue.
+ raise EmptyPoolError(self, "No pool connections are available.")
+
+ except (BaseSSLError, CertificateError) as e:
+ # Close the connection. If a connection is reused on which there
+ # was a Certificate error, the next request will certainly raise
+ # another Certificate error.
+ clean_exit = False
+ raise SSLError(e)
+
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ clean_exit = False
+ raise
+
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ clean_exit = False
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError('Cannot connect to proxy.', e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError('Connection aborted.', e)
+
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning("Retrying (%r) after connection "
+ "broken by '%r': %s", retries, err, url)
+ return self.urlopen(method, url, body, headers, retries,
+ redirect, assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method, redirect_location, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader('Retry-After'))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method, url, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ body_pos=body_pos, **response_kw)
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ When Python is compiled with the :mod:`ssl` module, then
+ :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+ instead of :class:`.HTTPSConnection`.
+
+ :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = 'https'
+ ConnectionCls = HTTPSConnection
+
+ def __init__(self, host, port=None,
+ strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+ block=False, headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ key_file=None, cert_file=None, cert_reqs=None,
+ ca_certs=None, ssl_version=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None, **conn_kw):
+
+ HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+ block, headers, retries, _proxy, _proxy_headers,
+ **conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(key_file=self.key_file,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint)
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`httplib.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTPS connection (%d): %s",
+ self.num_connections, self.host)
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError("Can't connect to HTTPS URL because the SSL "
+ "module is not available.")
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(host=actual_host, port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn((
+ 'Unverified HTTPS request is being made. '
+ 'Adding certificate verification is strongly advised. See: '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings'),
+ InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == 'https':
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host):
+ """
+ Process IPv6 address literals
+ """
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ #
+ # Also if an IPv6 address literal has a zone identifier, the
+ # percent sign might be URIencoded, convert it back into ASCII
+ if host.startswith('[') and host.endswith(']'):
+ host = host.replace('%25', '%').strip('[]')
+ return host
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000..bb82667
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,591 @@
+# SPDX-License-Identifier: MIT
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+ c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+ c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+ raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+ raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+ raise OSError(
+ 'Only OS X 10.8 and newer are supported, not %s.%s' % (
+ version_info[0], version_info[1]
+ )
+ )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [
+ CFAllocatorRef,
+ CFDataRef
+ ]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [
+ SecCertificateRef
+ ]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef)
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef)
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [
+ SecKeychainRef
+ ]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef)
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+ Security.SSLSetIOFuncs.argtypes = [
+ SSLContextRef,
+ SSLReadFunc,
+ SSLWriteFunc
+ ]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [
+ SSLContextRef,
+ CFArrayRef
+ ]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [
+ SSLContextRef,
+ CFTypeRef,
+ Boolean
+ ]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [
+ SSLContextRef,
+ SSLConnectionRef
+ ]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite)
+ ]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol)
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [
+ SSLContextRef,
+ POINTER(SecTrustRef)
+ ]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [
+ SecTrustRef,
+ CFArrayRef
+ ]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+ SecTrustRef,
+ Boolean
+ ]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [
+ SecTrustRef,
+ POINTER(SecTrustResultType)
+ ]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [
+ SecTrustRef
+ ]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [
+ SecTrustRef,
+ CFIndex
+ ]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [
+ SSLContextRef,
+ SSLSessionOption,
+ Boolean
+ ]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, 'kSecImportExportPassphrase'
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, 'kSecImportItemIdentity'
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [
+ CFStringRef,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFIndex
+ ]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [
+ CFDictionaryRef,
+ CFTypeRef
+ ]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [
+ CFMutableArrayRef,
+ c_void_p
+ ]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [
+ CFArrayRef
+ ]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+ CFArrayRef,
+ CFIndex
+ ]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, 'kCFAllocatorDefault'
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000..0f79a13
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,344 @@
+# SPDX-License-Identifier: MIT
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p,
+ buffer,
+ 1024,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError('Error copying C string from CFStringRef')
+ string = buffer.value
+ if string is not None:
+ string = string.decode('utf-8')
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u'':
+ output = u'OSStatus %s' % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ der_certs = [
+ base64.b64decode(match.group(1))
+ for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
+ password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path,
+ len(password),
+ password,
+ False,
+ None,
+ ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, 'rb') as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ raw_filedata,
+ len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array) # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(
+ result_array, index
+ )
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(
+ keychain, file_path
+ )
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain,
+ certificates[0],
+ ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
new file mode 100644
index 0000000..e74589f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: MIT
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True,
+ urlfetch_retries=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_mvms():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = (
+ redirect and
+ retries.redirect != 0 and
+ retries.total)
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if (self.urlfetch_retries and retries.raise_on_redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method, redirect_url, body, headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader('Retry-After'))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == 'chunked':
+ encodings = transfer_encoding.split(",")
+ encodings.remove('chunked')
+ urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine() or
+ is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000..3f8c9eb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: MIT
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = 'https'
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split('\\', 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+ self.num_connections, self.host, self.authurl)
+
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ req_header = 'Authorization'
+ resp_header = 'www-authenticate'
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = (
+ 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', reshdr)
+ log.debug('Response data: %s [...]', res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(', ')
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == 'NTLM ':
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception('Unexpected %s response header: %s' %
+ (resp_header, reshdr[resp_header]))
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = \
+ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+ self.user,
+ self.domain,
+ self.pw,
+ NegotiateFlags)
+ headers[req_header] = 'NTLM %s' % auth_msg
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', dict(res.getheaders()))
+ log.debug('Response data: %s [...]', res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception('Server rejected request: wrong '
+ 'username or password')
+ raise Exception('Wrong server response: %s %s' %
+ (res.status, res.reason))
+
+ res.fp = None
+ log.debug('Connection established')
+ return conn
+
+ def urlopen(self, method, url, body=None, headers=None, retries=3,
+ redirect=True, assert_same_host=True):
+ if headers is None:
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ return super(NTLMConnectionPool, self).urlopen(method, url, body,
+ headers, retries,
+ redirect,
+ assert_same_host)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000..8d37350
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,458 @@
+# SPDX-License-Identifier: MIT
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+ pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+
+try:
+ import six
+except ImportError:
+ from ..packages import six
+
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+ _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+ pass
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED:
+ OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+ (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+ _validate_dependencies_met()
+
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ 'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError("'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer.")
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError("'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+ """
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ for prefix in [u'*.', u'.']:
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ return prefix.encode('ascii') + idna.encode(name)
+ return idna.encode(name)
+
+ name = idna_encode(name)
+ if sys.version_info >= (3, 0):
+ name = name.decode('utf-8')
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ # This is technically using private APIs, but should work across all
+ # relevant versions until PyOpenSSL gets something proper for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(
+ x509.SubjectAlternativeName
+ ).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (x509.DuplicateExtension, x509.UnsupportedExtension,
+ x509.UnsupportedGeneralNameType, UnicodeError) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ names = [
+ ('DNS', _dnsname_to_stdlib(name))
+ for name in ext.get_values_for_type(x509.DNSName)
+ ]
+ names.extend(
+ ('IP Address', str(name))
+ for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ '''
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return b''
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv(*args, **kwargs)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ wr = util.wait_for_write(self.socket, self.socket.gettimeout())
+ if not wr:
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1,
+ x509)
+
+ return {
+ 'subject': (
+ (('commonName', x509.get_subject().CN),),
+ ),
+ 'subjectAltName': get_subj_alt_name(x509)
+ }
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(
+ _stdlib_to_openssl_verify[value],
+ _verify_callback
+ )
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode('utf-8')
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode('utf-8')
+ if capath is not None:
+ capath = capath.encode('utf-8')
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_file(certfile)
+ if password is not None:
+ self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode('utf-8')
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(sock, sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError('bad handshake: %r' % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000..fcc3011
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -0,0 +1,808 @@
+# SPDX-License-Identifier: MIT
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+ Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+ _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+ _load_client_cert_chain
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+try:
+ memoryview(b'')
+except NameError:
+ raise ImportError("SecureTransport only works on Pythons with memoryview")
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this becuase this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+ ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+ )
+if hasattr(ssl, "PROTOCOL_TLS"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+ buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
+ buffer_view = memoryview(buffer)
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ readables = util.wait_for_read([base_socket], timeout)
+ if not readables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+
+ # We need to tell ctypes that we have a buffer that can be
+ # written to. Upsettingly, we do that like this:
+ chunk_size = base_socket.recv_into(
+ buffer_view[read_count:requested_length]
+ )
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ writables = util.wait_for_write([base_socket], timeout)
+ if not writables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, 'rb') as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(
+ trust, ctypes.byref(trust_result)
+ )
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is None:
+ CoreFoundation.CFRelease(cert_array)
+
+ # Ok, now we can look at what the result was.
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed
+ )
+ if trust_result.value not in successes:
+ raise ssl.SSLError(
+ "certificate verify failed, error code: %d" %
+ trust_result.value
+ )
+
+ def handshake(self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode('utf-8')
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context,
+ SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+ True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(
+ self.context, self._client_cert_chain
+ )
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if (result == SecurityConst.errSSLWouldBlock):
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError(
+ "SecureTransport only supports dumping binary certs"
+ )
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError(
+ "SecureTransport doesn't support custom cipher strings"
+ )
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError(
+ "SecureTransport does not support cert directories"
+ )
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname, self._verify, self._trust_bundle,
+ self._min_version, self._max_version, self._client_cert,
+ self._client_key, self._client_key_passphrase
+ )
+ return wrapped_socket
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
new file mode 100644
index 0000000..1cb7928
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+ IPv6 addresses. Any such connection attempt will fail. You must use a domain
+ name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+ such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+ from ..exceptions import DependencyWarning
+
+ warnings.warn((
+ 'SOCKS support in urllib3 requires the installation of optional '
+ 'dependencies: specifically, PySocks. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+ ),
+ DependencyWarning
+ )
+ raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+ HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+ HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop('_socks_options')
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options['socks_version'],
+ proxy_addr=self._socks_options['proxy_host'],
+ proxy_port=self._socks_options['proxy_port'],
+ proxy_username=self._socks_options['username'],
+ proxy_password=self._socks_options['password'],
+ proxy_rdns=self._socks_options['rdns'],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout)
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+ pool_classes_by_scheme = {
+ 'http': SOCKSHTTPConnectionPool,
+ 'https': SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(self, proxy_url, username=None, password=None,
+ num_pools=10, headers=None, **connection_pool_kw):
+ parsed = parse_url(proxy_url)
+
+ if parsed.scheme == 'socks5':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == 'socks5h':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == 'socks4':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == 'socks4a':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError(
+ "Unable to determine SOCKS version from %s" % proxy_url
+ )
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ 'socks_version': socks_version,
+ 'proxy_host': parsed.host,
+ 'proxy_port': parsed.port,
+ 'username': username,
+ 'password': password,
+ 'rdns': rdns
+ }
+ connection_pool_kw['_socks_options'] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
new file mode 100644
index 0000000..a71cabe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -0,0 +1,247 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+ IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ "Base exception used by this module."
+ pass
+
+
+class HTTPWarning(Warning):
+ "Base warning used by this module."
+ pass
+
+
+class PoolError(HTTPError):
+ "Base exception for errors caused within a pool."
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ "Base exception for PoolErrors that have associated URLs."
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ "Raised when SSL certificate fails in an HTTPS connection."
+ pass
+
+
+class ProxyError(HTTPError):
+ "Raised when the connection to a proxy fails."
+ pass
+
+
+class DecodeError(HTTPError):
+ "Raised when automatic decoding based on Content-Type fails."
+ pass
+
+
+class ProtocolError(HTTPError):
+ "Raised when something unexpected happens mid-request/response."
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (
+ url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ "Raised when an existing pool gets a request for a foreign host."
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """ Raised when passing an invalid state to a timeout """
+ pass
+
+
+class TimeoutError(HTTPError):
+ """ Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ "Raised when a socket timeout occurs while receiving data from a server"
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ "Raised when a socket timeout occurs while connecting to a server"
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
+
+
+class EmptyPoolError(PoolError):
+ "Raised when a pool runs out of connections and no more are allowed."
+ pass
+
+
+class ClosedPoolError(PoolError):
+ "Raised when a request enters a pool after the pool has been closed."
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ "Raised when there is something wrong with a given URL input."
+ pass
+
+
+class LocationParseError(LocationValueError):
+ "Raised when get_host or similar fails to parse the URL input."
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class ResponseError(HTTPError):
+ "Used as a container for an error reason supplied in a MaxRetryError."
+ GENERIC_ERROR = 'too many error responses'
+ SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+ "Warned when perfoming security reducing actions"
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ "Warned when making an unverified HTTPS request."
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ "Warned when system time is suspected to be wrong"
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ "Warned when making a HTTPS request without SNI available."
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be httplib.HTTPResponse like (have an fp attribute which
+ returns raw chunks) for read_chunked().
+ """
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of http_client.IncompleteRead to allow int value
+ for `partial` to avoid creating large objects on streamed
+ reads.
+ """
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return ('IncompleteRead(%i bytes read, '
+ '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+ "The header provided was somehow invalid."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ "urllib3 encountered an error when trying to rewind a body"
+ pass
diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
new file mode 100644
index 0000000..de7577b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param(name, value):
+ """
+ Helper function to format and quote a single header parameter.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows RFC 2231, as
+ suggested by RFC 2388 Section 4.4.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = '%s="%s"' % (name, value)
+ try:
+ result.encode('ascii')
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
+ value = value.encode('utf-8')
+ value = email.utils.encode_rfc2231(value, 'utf-8')
+ value = '%s*=%s' % (name, value)
+ return value
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ """
+ def __init__(self, name, data, filename=None, headers=None):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+
+ @classmethod
+ def from_tuples(cls, fieldname, value):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(fieldname, data, filename=filename)
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ return format_header_param(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return '; '.join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append('%s: %s' % (header_name, header_value))
+
+ lines.append('\r\n')
+ return '\r\n'.join(lines)
+
+ def make_multipart(self, content_disposition=None, content_type=None,
+ content_location=None):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers['Content-Disposition'] = content_disposition or 'form-data'
+ self.headers['Content-Disposition'] += '; '.join([
+ '', self._render_parts(
+ (('name', self._name), ('filename', self._filename))
+ )
+ ])
+ self.headers['Content-Type'] = content_type
+ self.headers['Content-Location'] = content_location
diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
new file mode 100644
index 0000000..3febc9c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import codecs
+
+from uuid import uuid4
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ return uuid4().hex
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`mimetools.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b('--%s\r\n' % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b'\r\n')
+
+ body.write(b('--%s--\r\n' % (boundary)))
+
+ content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+ return body.getvalue(), content_type
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
new file mode 100644
index 0000000..170e974
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
new file mode 100644
index 0000000..8ab122f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+ errors=None, newline=None):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= set(["r", "w", "b"]):
+ raise ValueError(
+ "invalid mode %r (only r, w, b allowed)" % (mode,)
+ )
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
new file mode 100644
index 0000000..9f7c0e6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -0,0 +1,260 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+# Copyright 2009 Raymond Hettinger, released under the MIT License.
+# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
new file mode 100644
index 0000000..31df501
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -0,0 +1,852 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# SPDX-License-Identifier: MIT
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 0000000..2aeeeff
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: MIT
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import CertificateError, match_hostname
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 0000000..647e081
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,156 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# SPDX-License-Identifier: Python-2.0
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used all the way back to
+# python-2.4.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding='ascii', errors='strict')
+ return obj
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
new file mode 100644
index 0000000..adea9bc
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -0,0 +1,441 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+ 'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ 'key_scheme', # str
+ 'key_host', # str
+ 'key_port', # int
+ 'key_timeout', # int or float or Timeout
+ 'key_retries', # int or Retry
+ 'key_strict', # bool
+ 'key_block', # bool
+ 'key_source_address', # str
+ 'key_key_file', # str
+ 'key_cert_file', # str
+ 'key_cert_reqs', # str
+ 'key_ca_certs', # str
+ 'key_ssl_version', # str
+ 'key_ca_cert_dir', # str
+ 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ 'key_maxsize', # int
+ 'key_headers', # dict
+ 'key__proxy', # parsed proxy url
+ 'key__proxy_headers', # dict
+ 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
+ 'key__socks_options', # dict
+ 'key_assert_hostname', # bool or string
+ 'key_assert_fingerprint', # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context['scheme'] = context['scheme'].lower()
+ context['host'] = context['host'].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ('headers', '_proxy_headers', '_socks_options'):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get('socket_options')
+ if socket_opts is not None:
+ context['socket_options'] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context['key_' + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ 'http': functools.partial(_default_key_normalizer, PoolKey),
+ 'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+ 'http': HTTPConnectionPool,
+ 'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools,
+ dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ('scheme', 'host', 'port'):
+ request_context.pop(key, None)
+
+ if scheme == 'http':
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ """
+ Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context['scheme'] = scheme or 'http'
+ if not port:
+ port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+ request_context['port'] = port
+ request_context['host'] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context['scheme'].lower()
+ pool_key_constructor = self.key_fn_by_scheme[scheme]
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context['scheme']
+ host = request_context['host']
+ port = request_context['port']
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+ pool_kwargs=pool_kwargs)
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw['assert_same_host'] = False
+ kw['redirect'] = False
+ if 'headers' not in kw:
+ kw['headers'] = self.headers
+
+ if self.proxy is not None and u.scheme == "http":
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = 'GET'
+
+ retries = kw.get('retries')
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
+ kw['redirect'] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary contaning headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(self, proxy_url, num_pools=10, headers=None,
+ proxy_headers=None, **connection_pool_kw):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+ proxy_url.port)
+ proxy = parse_url(proxy_url)
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+
+ connection_pool_kw['_proxy'] = self.proxy
+ connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+ super(ProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs)
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {'Accept': '*/*'}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_['Host'] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+
+ if u.scheme == "http":
+ # For proxied HTTPS requests, httplib sets the necessary headers
+ # on the CONNECT to the proxy. For HTTP, we'll definitely
+ # need to set 'Host' at the very least.
+ headers = kw.get('headers', self.headers)
+ kw['headers'] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
new file mode 100644
index 0000000..f783319
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+ :class:`~urllib3.poolmanager.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **kw): # Abstract
+ raise NotImplemented("Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method.")
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+ else:
+ return self.request_encode_body(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += '?' + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(self, method, url, fields=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :meth:`urllib.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimick behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one.")
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
new file mode 100644
index 0000000..cf14a30
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -0,0 +1,623 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+ BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+ ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+ def __init__(self):
+ self._first_try = True
+ self._data = binary_type()
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
+def _get_decoder(mode):
+ if mode == 'gzip':
+ return GzipDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, attempts to decode specific content-encoding's based on headers
+ (like 'gzip' and 'deflate') will be skipped and raw data will be used
+ instead.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ['gzip', 'deflate']
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+ strict=0, preload_content=True, decode_content=True,
+ original_response=None, pool=None, connection=None,
+ retries=None, enforce_content_length=False, request_method=None):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+
+ if body and isinstance(body, (basestring, binary_type)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, 'read'):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get('location')
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ @property
+ def data(self):
+ # For backwords-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+ are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get('content-length')
+
+ if length is not None and self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning("Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked.")
+ return None
+
+ elif length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(',')])
+ if len(lengths) > 1:
+ raise InvalidHeader("Content-Length contained multiple "
+ "unmatching values (%s)" % length)
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b'')
+ return buf + self._decoder.flush()
+
+ return b''
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ data = None
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (0, None):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2**16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, 'strict', 0)
+ resp = ResponseCls(body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw)
+ return resp
+
+ # Backwards-compatibility methods for httplib.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ @property
+ def closed(self):
+ if self._fp is None:
+ return True
+ elif hasattr(self._fp, 'isclosed'):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, 'closed'):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError("The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor")
+
+ def flush(self):
+ if self._fp is not None and hasattr(self._fp, 'flush'):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[:len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ httplib.HTTPResponse object. We do this by testing for the fp
+ attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, 'fp')
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be httplib.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(chunk, decode_content=decode_content,
+ flush_decoder=False)
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
new file mode 100644
index 0000000..bba628d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+ SSLContext,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import (
+ current_time,
+ Timeout,
+)
+
+from .retry import Retry
+from .url import (
+ get_host,
+ parse_url,
+ split_first,
+ Url,
+)
+from .wait import (
+ wait_for_read,
+ wait_for_write
+)
+
+__all__ = (
+ 'HAS_SNI',
+ 'IS_PYOPENSSL',
+ 'IS_SECURETRANSPORT',
+ 'SSLContext',
+ 'Retry',
+ 'Timeout',
+ 'Url',
+ 'assert_fingerprint',
+ 'current_time',
+ 'is_connection_dropped',
+ 'is_fp_closed',
+ 'get_host',
+ 'parse_url',
+ 'make_headers',
+ 'resolve_cert_reqs',
+ 'resolve_ssl_version',
+ 'split_first',
+ 'ssl_wrap_socket',
+ 'wait_for_read',
+ 'wait_for_write'
+)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
new file mode 100644
index 0000000..3bd69e8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import socket
+from .wait import wait_for_read
+from .selectors import HAS_SELECT, SelectorError
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`httplib.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, 'sock', False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+
+ if not HAS_SELECT:
+ return False
+
+ try:
+ return bool(wait_for_read(sock, timeout=0.0))
+ except SelectorError:
+ return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, socket_options=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
new file mode 100644
index 0000000..18f27b0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+ basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ','.join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers['accept-encoding'] = accept_encoding
+
+ if user_agent:
+ headers['user-agent'] = user_agent
+
+ if keep_alive:
+ headers['connection'] = 'keep-alive'
+
+ if basic_auth:
+ headers['authorization'] = 'Basic ' + \
+ b64encode(b(basic_auth)).decode('utf-8')
+
+ if proxy_basic_auth:
+ headers['proxy-authorization'] = 'Basic ' + \
+ b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+ if disable_cache:
+ headers['cache-control'] = 'no-cache'
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, 'tell', None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, 'seek', None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect/retry.")
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError("Unable to record file position for rewinding "
+ "request body during a redirect/retry.")
+ else:
+ raise ValueError("body_pos must be of type integer, "
+ "instead it was %s." % type(body_pos))
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
new file mode 100644
index 0000000..e4cda93
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {0}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
new file mode 100644
index 0000000..61e63af
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -0,0 +1,402 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ MaxRetryError,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseError,
+ InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+ "status", "redirect_location"])
+
+
+class Retry(object):
+ """ Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts. It's a good idea to set this to some sensibly-high value to
+ account for unexpected edge cases and avoid infinite retry loops.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param iterable method_whitelist:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``method_whitelist``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ """
+
+ DEFAULT_METHOD_WHITELIST = frozenset([
+ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+ method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+ backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+ history=None, respect_retry_after_header=True):
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.method_whitelist = method_whitelist
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+ method_whitelist=self.method_whitelist,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ )
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """ Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+ reversed(self.history))))
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ retry_date = time.mktime(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """ Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """ Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """ Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """ Checks if a given HTTP method should be retried upon, depending if
+ it is included on the method whitelist.
+ """
+ if self.method_whitelist and method.upper() not in self.method_whitelist:
+ return False
+
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """ Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (self.total and self.respect_retry_after_header and
+ has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(self, method=None, url=None, response=None, error=None,
+ _pool=None, _stacktrace=None):
+ """ Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ cause = 'unknown'
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = 'too many redirects'
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(
+ status_code=response.status)
+ status = response.status
+
+ history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+ new_retry = self.new(
+ total=total,
+ connect=connect, read=read, redirect=redirect, status=status_count,
+ history=history)
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+ 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+ cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
new file mode 100644
index 0000000..c0997b1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -0,0 +1,582 @@
+# SPDX-License-Identifier: MIT
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no dile descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple, Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+_DEFAULT_SELECTOR = None
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Determine which function to use to wrap system calls because Python 3.5+
+# already handles the case when system calls are interrupted.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all system calls automatically restart
+ and recalculate their timeouts. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno=errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as e: # Platform-specific: Windows.
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._kqueue.control, True,
+ None, max_events, timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+
+if not hasattr(select, 'select'): # Platform-specific: AppEngine
+ HAS_SELECT = False
+
+
+def _can_allocate(struct):
+ """ Checks that select structs can be allocated by the underlying
+ operating system, not just advertised by the select module. We don't
+ check select() because we'll be hopeful that most platforms that
+ don't have it available will not advertise it. (ie: GAE) """
+ try:
+ # select.poll() objects won't fail until used.
+ if struct == 'poll':
+ p = select.poll()
+ p.poll(0)
+
+ # All others will fail on allocation.
+ else:
+ getattr(select, struct)().close()
+ return True
+ except (OSError, AttributeError) as e:
+ return False
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll > poll > select. Devpoll not supported. (See above)
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+def DefaultSelector():
+ """ This function serves as a first call for DefaultSelector to
+ detect if the select module is being monkey-patched incorrectly
+ by eventlet, greenlet, and preserve proper behavior. """
+ global _DEFAULT_SELECTOR
+ if _DEFAULT_SELECTOR is None:
+ if _can_allocate('kqueue'):
+ _DEFAULT_SELECTOR = KqueueSelector
+ elif _can_allocate('epoll'):
+ _DEFAULT_SELECTOR = EpollSelector
+ elif _can_allocate('poll'):
+ _DEFAULT_SELECTOR = PollSelector
+ elif hasattr(select, 'select'):
+ _DEFAULT_SELECTOR = SelectSelector
+ else: # Platform-specific: AppEngine
+ raise ValueError('Platform does not have a selector')
+ return _DEFAULT_SELECTOR()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
new file mode 100644
index 0000000..ece3ec3
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -0,0 +1,338 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for l, r in zip(bytearray(a), bytearray(b)):
+ result |= l ^ r
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+ _const_compare_digest_backport)
+
+
+try: # Test for SSL features
+ import ssl
+ from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+
+try:
+ from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+ 'ECDH+AESGCM',
+ 'ECDH+CHACHA20',
+ 'DH+AESGCM',
+ 'DH+CHACHA20',
+ 'ECDH+AES256',
+ 'DH+AES256',
+ 'ECDH+AES128',
+ 'DH+AES',
+ 'RSA+AESGCM',
+ 'RSA+AES',
+ '!aNULL',
+ '!eNULL',
+ '!MD5',
+])
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+ import sys
+
+ class SSLContext(object): # Platform-specific: Python 2 & 3.1
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
+
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ if not self.supports_set_ciphers:
+ raise TypeError(
+ 'Your version of Python does not support setting '
+ 'a custom cipher suite. Please upgrade to Python '
+ '2.7, 3.2, or later if you need this functionality.'
+ )
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. You can upgrade to a newer '
+ 'version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ InsecurePlatformWarning
+ )
+ kwargs = {
+ 'keyfile': self.keyfile,
+ 'certfile': self.certfile,
+ 'ca_certs': self.ca_certs,
+ 'cert_reqs': self.verify_mode,
+ 'ssl_version': self.protocol,
+ 'server_side': server_side,
+ }
+ if self.supports_set_ciphers: # Platform-specific: Python 2.7+
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+ else: # Platform-specific: Python 2.6
+ return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(':', '').lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+ .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_NONE`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbrevation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_NONE
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'CERT_' + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_SSLv23
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'PROTOCOL_' + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+ options=None, ciphers=None):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+
+ context.options |= options
+
+ if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ context.verify_mode = cert_reqs
+ if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+ return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+ ca_certs=None, server_hostname=None,
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support. This is not
+ supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs,
+ ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir)
+ except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
+ raise SSLError(e)
+ # Py33 raises FileNotFoundError which subclasses OSError
+ # These are not equivalent unless we check the errno attribute
+ except OSError as e: # Platform-specific: Python 3.3 and beyond
+ if e.errno == errno.ENOENT:
+ raise SSLError(e)
+ raise
+ elif getattr(context, 'load_default_certs', None) is not None:
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ if certfile:
+ context.load_cert_chain(certfile, keyfile)
+ if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
+ return context.wrap_socket(sock, server_hostname=server_hostname)
+
+ warnings.warn(
+ 'An HTTPS request has been made, but the SNI (Subject Name '
+ 'Indication) extension to TLS is not available on this platform. '
+ 'This may cause the server to present an incorrect TLS '
+ 'certificate, which can cause validation failures. You can upgrade to '
+ 'a newer version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ SNIMissingWarning
+ )
+ return context.wrap_socket(sock)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
new file mode 100644
index 0000000..4041cf9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """ Timeout configuration.
+
+ Timeouts can be defined as a default for a pool::
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``::
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: integer, float, or None
+
+ :param connect:
+ The maximum amount of time to wait for a connection attempt to a server
+ to succeed. Omitting the parameter will default the connect timeout to
+ the system default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: integer, float, or None
+
+ :param read:
+ The maximum amount of time to wait between consecutive
+ read operations for a response from the server. Omitting
+ the parameter will default the read timeout to the system
+ default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: integer, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, 'connect')
+ self._read = self._validate_timeout(read, 'read')
+ self.total = self._validate_timeout(total, 'total')
+ self._start_connect = None
+
+ def __str__(self):
+ return '%s(connect=%r, read=%r, total=%r)' % (
+ type(self).__name__, self._connect, self._read, self.total)
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """ Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError("Timeout cannot be a boolean value. It must "
+ "be an int, float or None.")
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ try:
+ if value <= 0:
+ raise ValueError("Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value))
+ except TypeError: # Python 3
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """ Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """ Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read,
+ total=self.total)
+
+ def start_connect(self):
+ """ Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError("Can't get connect duration for timer "
+ "that has not started.")
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """ Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """ Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (self.total is not None and
+ self.total is not self.DEFAULT_TIMEOUT and
+ self._read is not None and
+ self._read is not self.DEFAULT_TIMEOUT):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(),
+ self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
new file mode 100644
index 0000000..99fd653
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -0,0 +1,231 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+ """
+ Datastructure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+ __slots__ = ()
+
+ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+ query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host and scheme in NORMALIZABLE_SCHEMES:
+ host = host.lower()
+ return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+ query, fragment)
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or '/'
+
+ if self.query is not None:
+ uri += '?' + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return '%s:%d' % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = ''
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + '://'
+ if auth is not None:
+ url += auth + '@'
+ if host is not None:
+ url += host
+ if port is not None:
+ url += ':' + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += '?' + query
+ if fragment is not None:
+ url += '#' + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, '', None
+
+ return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+
+ # While this code has overlap with stdlib's urlparse, it is much
+ # simplified for our needs and less annoying.
+ # Additionally, this implementations does silly things to be optimal
+ # on CPython.
+
+ if not url:
+ # Empty
+ return Url()
+
+ scheme = None
+ auth = None
+ host = None
+ port = None
+ path = None
+ fragment = None
+ query = None
+
+ # Scheme
+ if '://' in url:
+ scheme, url = url.split('://', 1)
+
+ # Find the earliest Authority Terminator
+ # (http://tools.ietf.org/html/rfc3986#section-3.2)
+ url, path_, delim = split_first(url, ['/', '?', '#'])
+
+ if delim:
+ # Reassemble the path
+ path = delim + path_
+
+ # Auth
+ if '@' in url:
+ # Last '@' denotes end of auth part
+ auth, url = url.rsplit('@', 1)
+
+ # IPv6
+ if url and url[0] == '[':
+ host, url = url.split(']', 1)
+ host += ']'
+
+ # Port
+ if ':' in url:
+ _host, port = url.split(':', 1)
+
+ if not host:
+ host = _host
+
+ if port:
+ # If given, ports must be integers. No whitespace, no plus or
+ # minus prefixes, no non-integer digits such as ^2 (superscript).
+ if not port.isdigit():
+ raise LocationParseError(url)
+ try:
+ port = int(port)
+ except ValueError:
+ raise LocationParseError(url)
+ else:
+ # Blank ports are cool, too. (rfc3986#section-3.2.3)
+ port = None
+
+ elif not host and url:
+ host = url
+
+ if not path:
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+ # Fragment
+ if '#' in path:
+ path, fragment = path.split('#', 1)
+
+ # Query
+ if '?' in path:
+ path, query = path.split('?', 1)
+
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or 'http', p.hostname, p.port
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
new file mode 100644
index 0000000..21e7297
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+from .selectors import (
+ HAS_SELECT,
+ DefaultSelector,
+ EVENT_READ,
+ EVENT_WRITE
+)
+
+
+def _wait_for_io_events(socks, events, timeout=None):
+ """ Waits for IO events to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be interacted with immediately. """
+ if not HAS_SELECT:
+ raise ValueError('Platform does not have a selector')
+ if not isinstance(socks, list):
+ # Probably just a single socket.
+ if hasattr(socks, "fileno"):
+ socks = [socks]
+ # Otherwise it might be a non-list iterable.
+ else:
+ socks = list(socks)
+ with DefaultSelector() as selector:
+ for sock in socks:
+ selector.register(sock, events)
+ return [key[0].fileobj for key in
+ selector.select(timeout) if key[1] & events]
+
+
+def wait_for_read(socks, timeout=None):
+ """ Waits for reading to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be read from immediately. """
+ return _wait_for_io_events(socks, EVENT_READ, timeout)
+
+
+def wait_for_write(socks, timeout=None):
+ """ Waits for writing to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be written to immediately. """
+ return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc
new file mode 100644
index 0000000..7e67ef5
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rabbitmq/rabbitmq.chart.py
+dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 0000000..2130a7b
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,116 @@
+<!--
+title: "RabbitMQ monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md
+sidebar_label: "RabbitMQ"
+-->
+
+# RabbitMQ monitoring with Netdata
+
+Collects message broker global and per virtual host metrics.
+
+
+Following charts are drawn:
+
+1. **Queued Messages**
+
+ - ready
+ - unacknowledged
+
+2. **Message Rates**
+
+ - ack
+ - redelivered
+ - deliver
+ - publish
+
+3. **Global Counts**
+
+ - channels
+ - consumers
+ - connections
+ - queues
+ - exchanges
+
+4. **File Descriptors**
+
+ - used descriptors
+
+5. **Socket Descriptors**
+
+ - used descriptors
+
+6. **Erlang processes**
+
+ - used processes
+
+7. **Erlang run queue**
+
+ - Erlang run queue
+
+8. **Memory**
+
+ - free memory in megabytes
+
+9. **Disk Space**
+
+ - free disk space in gigabytes
+
+
+Per Vhost charts:
+
+1. **Vhost Messages**
+
+ - ack
+ - confirm
+ - deliver
+ - get
+ - get_no_ack
+ - publish
+ - redeliver
+ - return_unroutable
+
+2. Per Queue charts:
+
+ 1. **Queued Messages**
+
+ - messages
+ - paged_out
+ - persistent
+ - ready
+ - unacknowledged
+
+ 2. **Queue Messages stats**
+
+ - ack
+ - confirm
+ - deliver
+ - get
+ - get_no_ack
+ - publish
+ - redeliver
+ - return_unroutable
+
+## Configuration
+
+Edit the `python.d/rabbitmq.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rabbitmq.conf
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
new file mode 100644
index 0000000..866b777
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -0,0 +1,443 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+API_NODE = 'api/nodes'
+API_OVERVIEW = 'api/overview'
+API_QUEUES = 'api/queues'
+API_VHOSTS = 'api/vhosts'
+
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish',
+ 'churn_rates.connection_created_details.rate',
+ 'churn_rates.connection_closed_details.rate',
+ 'churn_rates.channel_created_details.rate',
+ 'churn_rates.channel_closed_details.rate',
+ 'churn_rates.queue_created_details.rate',
+ 'churn_rates.queue_declared_details.rate',
+ 'churn_rates.queue_deleted_details.rate'
+]
+
+QUEUE_STATS = [
+ 'messages',
+ 'messages_paged_out',
+ 'messages_persistent',
+ 'messages_ready',
+ 'messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.confirm',
+ 'message_stats.deliver',
+ 'message_stats.get',
+ 'message_stats.get_no_ack',
+ 'message_stats.publish',
+ 'message_stats.redeliver',
+ 'message_stats.return_unroutable',
+]
+
+VHOST_MESSAGE_STATS = [
+ 'message_stats.ack',
+ 'message_stats.confirm',
+ 'message_stats.deliver',
+ 'message_stats.get',
+ 'message_stats.get_no_ack',
+ 'message_stats.publish',
+ 'message_stats.redeliver',
+ 'message_stats.return_unroutable',
+]
+
+ORDER = [
+ 'queued_messages',
+ 'connection_churn_rates',
+ 'channel_churn_rates',
+ 'queue_churn_rates',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1 << 30]
+ ]
+ },
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_run_queue': {
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
+ 'lines': [
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['object_totals_channels', 'channels', 'absolute'],
+ ['object_totals_consumers', 'consumers', 'absolute'],
+ ['object_totals_connections', 'connections', 'absolute'],
+ ['object_totals_queues', 'queues', 'absolute'],
+ ['object_totals_exchanges', 'exchanges', 'absolute']
+ ]
+ },
+ 'connection_churn_rates': {
+ 'options': [None, 'Connection Churn Rates', 'operations/s', 'overview', 'rabbitmq.connection_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_connection_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_connection_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'channel_churn_rates': {
+ 'options': [None, 'Channel Churn Rates', 'operations/s', 'overview', 'rabbitmq.channel_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_channel_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_channel_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'queue_churn_rates': {
+ 'options': [None, 'Queue Churn Rates', 'operations/s', 'overview', 'rabbitmq.queue_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_queue_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_queue_declared_details_rate', 'declared', 'absolute'],
+ ['churn_rates_queue_deleted_details_rate', 'deleted', 'absolute']
+ ]
+ },
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['queue_totals_messages_ready', 'ready', 'absolute'],
+ ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]
+ },
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'line'],
+ 'lines': [
+ ['message_stats_ack', 'ack', 'incremental'],
+ ['message_stats_redeliver', 'redeliver', 'incremental'],
+ ['message_stats_deliver', 'deliver', 'incremental'],
+ ['message_stats_publish', 'publish', 'incremental']
+ ]
+ }
+}
+
+
+def vhost_chart_template(name):
+ order = [
+ 'vhost_{0}_message_stats'.format(name),
+ ]
+ family = 'vhost {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [
+ None, 'Vhost "{0}" Messages'.format(name), 'messages/s', family, 'rabbitmq.vhost_messages', 'stacked'],
+ 'lines': [
+ ['vhost_{0}_message_stats_ack'.format(name), 'ack', 'incremental'],
+ ['vhost_{0}_message_stats_confirm'.format(name), 'confirm', 'incremental'],
+ ['vhost_{0}_message_stats_deliver'.format(name), 'deliver', 'incremental'],
+ ['vhost_{0}_message_stats_get'.format(name), 'get', 'incremental'],
+ ['vhost_{0}_message_stats_get_no_ack'.format(name), 'get_no_ack', 'incremental'],
+ ['vhost_{0}_message_stats_publish'.format(name), 'publish', 'incremental'],
+ ['vhost_{0}_message_stats_redeliver'.format(name), 'redeliver', 'incremental'],
+ ['vhost_{0}_message_stats_return_unroutable'.format(name), 'return_unroutable', 'incremental'],
+ ]
+ },
+ }
+
+ return order, charts
+
+def queue_chart_template(queue_id):
+ vhost, name = queue_id
+ order = [
+ 'vhost_{0}_queue_{1}_queued_message'.format(vhost, name),
+ 'vhost_{0}_queue_{1}_messages_stats'.format(vhost, name),
+ ]
+ family = 'vhost {0}'.format(vhost)
+
+ charts = {
+ order[0]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" queued messages'.format(name, vhost), 'messages', family, 'rabbitmq.queue_messages', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_messages'.format(vhost, name), 'messages', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_paged_out'.format(vhost, name), 'paged_out', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_persistent'.format(vhost, name), 'persistent', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_ready'.format(vhost, name), 'ready', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_unacknowledged'.format(vhost, name), 'unack', 'absolute'],
+ ]
+ },
+ order[1]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" messages stats'.format(name, vhost), 'messages/s', family, 'rabbitmq.queue_messages_stats', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_message_stats_ack'.format(vhost, name), 'ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_confirm'.format(vhost, name), 'confirm', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_deliver'.format(vhost, name), 'deliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get'.format(vhost, name), 'get', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get_no_ack'.format(vhost, name), 'get_no_ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_publish'.format(vhost, name), 'publish', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_redeliver'.format(vhost, name), 'redeliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_return_unroutable'.format(vhost, name), 'return_unroutable', 'incremental'],
+ ]
+ },
+ }
+
+ return order, charts
+
+
+class VhostStatsBuilder:
+ def __init__(self):
+ self.stats = None
+
+ def set(self, raw_stats):
+ self.stats = raw_stats
+
+ def name(self):
+ return self.stats['name']
+
+ def has_msg_stats(self):
+ return bool(self.stats.get('message_stats'))
+
+ def msg_stats(self):
+ name = self.name()
+ stats = fetch_data(raw_data=self.stats, metrics=VHOST_MESSAGE_STATS)
+ return dict(('vhost_{0}_{1}'.format(name, k), v) for k, v in stats.items())
+
+class QueueStatsBuilder:
+ def __init__(self):
+ self.stats = None
+
+ def set(self, raw_stats):
+ self.stats = raw_stats
+
+ def id(self):
+ return self.stats['vhost'], self.stats['name']
+
+ def queue_stats(self):
+ vhost, name = self.id()
+ stats = fetch_data(raw_data=self.stats, metrics=QUEUE_STATS)
+ return dict(('vhost_{0}_queue_{1}_{2}'.format(vhost, name, k), v) for k, v in stats.items())
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = '{0}://{1}:{2}'.format(
+ configuration.get('scheme', 'http'),
+ configuration.get('host', '127.0.0.1'),
+ configuration.get('port', 15672),
+ )
+ self.node_name = str()
+ self.vhost = VhostStatsBuilder()
+ self.collected_vhosts = set()
+ self.collect_queues_metrics = configuration.get('collect_queues_metrics', False)
+ self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled"))
+ if self.collect_queues_metrics:
+ self.queue = QueueStatsBuilder()
+ self.collected_queues = set()
+
+ def _get_data(self):
+ data = dict()
+
+ stats = self.get_overview_stats()
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ stats = self.get_nodes_stats()
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ stats = self.get_vhosts_stats()
+ if stats:
+ data.update(stats)
+
+ if self.collect_queues_metrics:
+ stats = self.get_queues_stats()
+ if stats:
+ data.update(stats)
+
+ return data or None
+
+ def get_overview_stats(self):
+ url = '{0}/{1}'.format(self.url, API_OVERVIEW)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = loads(raw)
+ self.node_name = data['node']
+ self.debug("found node name: '{0}'".format(self.node_name))
+
+ stats = fetch_data(raw_data=data, metrics=OVERVIEW_STATS)
+ self.debug("number of metrics: {0}".format(len(stats)))
+ return stats
+
+ def get_nodes_stats(self):
+ if self.node_name == "":
+ self.error("trying to get node stats, but node name is not set")
+ return None
+
+ url = '{0}/{1}/{2}'.format(self.url, API_NODE, self.node_name)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = loads(raw)
+ stats = fetch_data(raw_data=data, metrics=NODE_STATS)
+ handle_disabled_disk_monitoring(stats)
+ self.debug("number of metrics: {0}".format(len(stats)))
+ return stats
+
+ def get_vhosts_stats(self):
+ url = '{0}/{1}'.format(self.url, API_VHOSTS)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = dict()
+ vhosts = loads(raw)
+ charts_initialized = len(self.charts) > 0
+
+ for vhost in vhosts:
+ self.vhost.set(vhost)
+ if not self.vhost.has_msg_stats():
+ continue
+
+ if charts_initialized and self.vhost.name() not in self.collected_vhosts:
+ self.collected_vhosts.add(self.vhost.name())
+ self.add_vhost_charts(self.vhost.name())
+
+ data.update(self.vhost.msg_stats())
+
+ self.debug("number of vhosts: {0}, metrics: {1}".format(len(vhosts), len(data)))
+ return data
+
+ def get_queues_stats(self):
+ url = '{0}/{1}'.format(self.url, API_QUEUES)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = dict()
+ queues = loads(raw)
+ charts_initialized = len(self.charts) > 0
+
+ for queue in queues:
+ self.queue.set(queue)
+ if self.queue.id()[0] not in self.collected_vhosts:
+ continue
+
+ if charts_initialized and self.queue.id() not in self.collected_queues:
+ self.collected_queues.add(self.queue.id())
+ self.add_queue_charts(self.queue.id())
+
+ data.update(self.queue.queue_stats())
+
+ self.debug("number of queues: {0}, metrics: {1}".format(len(queues), len(data)))
+ return data
+
+ def add_vhost_charts(self, vhost_name):
+ order, charts = vhost_chart_template(vhost_name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+ def add_queue_charts(self, queue_id):
+ order, charts = queue_chart_template(queue_id)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+
+def fetch_data(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except (KeyError, TypeError):
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
+
+
+def handle_disabled_disk_monitoring(node_stats):
+ # https://github.com/netdata/netdata/issues/7218
+ # can be "disk_free": "disk_free_monitoring_disabled"
+ v = node_stats.get('disk_free')
+ if v and not isinstance(v, int):
+ del node_stats['disk_free']
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
new file mode 100644
index 0000000..47d47a1
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -0,0 +1,86 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Rabbitmq plugin can also collect stats per vhost per queues, which is disabled
+# by default. Please note that enabling this can induced a serious overhead on
+# both netdata and rabbitmq if a look of queues are configured and used.
+#
+# collect_queues_metrics: 'yes/no'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'
diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc
new file mode 100644
index 0000000..6aab089
--- /dev/null
+++ b/collectors/python.d.plugin/redis/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += redis/redis.chart.py
+dist_pythonconfig_DATA += redis/redis.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += redis/README.md redis/Makefile.inc
+
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
new file mode 100644
index 0000000..9fab56c
--- /dev/null
+++ b/collectors/python.d.plugin/redis/README.md
@@ -0,0 +1,64 @@
+<!--
+title: "Redis monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/redis/README.md
+sidebar_label: "Redis"
+-->
+
+# Redis monitoring with Netdata
+
+Monitors database status. It reads server response to `INFO` command.
+
+Following charts are drawn:
+
+1. **Operations** per second
+
+ - operations
+
+2. **Hit rate** in percent
+
+ - rate
+
+3. **Memory utilization** in kilobytes
+
+ - total
+ - lua
+
+4. **Database keys**
+
+ - lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+
+ - connected
+ - blocked
+
+6. **Slaves**
+
+ - connected
+
+## Configuration
+
+Edit the `python.d/redis.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/redis.conf
+```
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
new file mode 100644
index 0000000..e09916d
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+# Description: redis netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+REDIS_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_redis',
+ 'eviction',
+ 'net',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'persistence',
+ 'bgsave_now',
+ 'bgsave_health',
+ 'uptime',
+]
+
+PIKA_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_pika',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'uptime',
+]
+
+CHARTS = {
+ 'operations': {
+ 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
+ 'lines': [
+ ['total_commands_processed', 'commands', 'incremental'],
+ ['instantaneous_ops_per_sec', 'operations', 'absolute']
+ ]
+ },
+ 'hit_rate': {
+ 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
+ 'lines': [
+ ['hit_rate', 'rate', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'area'],
+ 'lines': [
+ ['maxmemory', 'max', 'absolute', 1, 1024],
+ ['used_memory', 'total', 'absolute', 1, 1024],
+ ['used_memory_lua', 'lua', 'absolute', 1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
+ 'lines': [
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'keys_redis': {
+ 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': []
+ },
+ 'keys_pika': {
+ 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': [
+ ['kv_keys', 'kv', 'absolute'],
+ ['hash_keys', 'hash', 'absolute'],
+ ['list_keys', 'list', 'absolute'],
+ ['zset_keys', 'zset', 'absolute'],
+ ['set_keys', 'set', 'absolute']
+ ]
+ },
+ 'eviction': {
+ 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'],
+ 'lines': [
+ ['evicted_keys', 'evicted', 'absolute']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
+ 'lines': [
+ ['total_connections_received', 'received', 'incremental', 1],
+ ['rejected_connections', 'rejected', 'incremental', -1]
+ ]
+ },
+ 'clients': {
+ 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'],
+ 'lines': [
+ ['connected_clients', 'connected', 'absolute', 1],
+ ['blocked_clients', 'blocked', 'absolute', -1]
+ ]
+ },
+ 'slaves': {
+ 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
+ 'lines': [
+ ['connected_slaves', 'connected', 'absolute']
+ ]
+ },
+ 'persistence': {
+ 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence',
+ 'redis.rdb_changes', 'line'],
+ 'lines': [
+ ['rdb_changes_since_last_save', 'changes', 'absolute']
+ ]
+ },
+ 'bgsave_now': {
+ 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
+ 'redis.bgsave_now', 'absolute'],
+ 'lines': [
+ ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
+ ]
+ },
+ 'bgsave_health': {
+ 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
+ 'redis.bgsave_health', 'line'],
+ 'lines': [
+ ['rdb_last_bgsave_status', 'rdb save', 'absolute']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'],
+ 'lines': [
+ ['uptime_in_seconds', 'uptime', 'absolute']
+ ]
+ }
+}
+
+
+def copy_chart(name):
+ return {name: deepcopy(CHARTS[name])}
+
+
+RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self._keep_alive = True
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 6379)
+ self.unix_socket = self.configuration.get('socket')
+ p = self.configuration.get('pass')
+ self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
+ self.request = 'INFO\r\n'.encode()
+ self.bgsave_time = 0
+ self.keyspace_dbs = set()
+
+ def do_auth(self):
+ resp = self._get_raw_data(request=self.auth_request)
+ if not resp:
+ return False
+ if resp.strip() != '+OK':
+ self.error('invalid password')
+ return False
+ return True
+
+ def get_raw_and_parse(self):
+ if self.auth_request and not self.do_auth():
+ return None
+
+ resp = self._get_raw_data()
+
+ if not resp:
+ return None
+
+ parsed = RE.findall(resp)
+
+ if not parsed:
+ self.error('response is invalid/empty')
+ return None
+
+ return dict((k.replace(' ', '_'), v) for k, v in parsed)
+
+ def get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = self.get_raw_and_parse()
+ if not data:
+ return None
+
+ self.calc_hit_rate(data)
+ self.calc_redis_keys(data)
+ self.calc_redis_rdb_save_operations(data)
+ return data
+
+ @staticmethod
+ def calc_hit_rate(data):
+ try:
+ hits = int(data['keyspace_hits'])
+ misses = int(data['keyspace_misses'])
+ data['hit_rate'] = hits * 100 / (hits + misses)
+ except (KeyError, ZeroDivisionError):
+ data['hit_rate'] = 0
+
+ def calc_redis_keys(self, data):
+ if not data.get('redis_version'):
+ return
+ # db0:keys=2,expires=0,avg_ttl=0
+ new_keyspace_dbs = [k for k in data if k.startswith('db') and k not in self.keyspace_dbs]
+ for db in new_keyspace_dbs:
+ self.keyspace_dbs.add(db)
+ self.charts['keys_redis'].add_dimension([db, None, 'absolute'])
+ for db in self.keyspace_dbs:
+ if db not in data:
+ data[db] = 0
+
+ def calc_redis_rdb_save_operations(self, data):
+ if not (data.get('redis_version') and data.get('rdb_bgsave_in_progress')):
+ return
+ if data['rdb_bgsave_in_progress'] != '0':
+ self.bgsave_time += self.update_every
+ else:
+ self.bgsave_time = 0
+
+ data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1
+ data['rdb_bgsave_in_progress'] = self.bgsave_time
+
+ def check(self):
+ """
+ Parse configuration, check if redis is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return False
+
+ self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER
+
+ for n in self.order:
+ self.definitions.update(copy_chart(n))
+
+ return True
+
+ def _check_raw_data(self, data):
+ """
+ Check if all data has been gathered from socket.
+ Parse first line containing message length and check against received message
+ :param data: str
+ :return: boolean
+ """
+ length = len(data)
+ supposed = data.split('\n')[0][1:-1]
+ offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
+ if not supposed.isdigit():
+ return True
+ supposed = int(supposed)
+
+ if length - offset >= supposed:
+ self.debug('received full response from redis')
+ return True
+
+ self.debug('waiting more data from redis')
+ return False
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
new file mode 100644
index 0000000..b456d75
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.conf
@@ -0,0 +1,110 @@
+# netdata python.d.plugin configuration for redis
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, redis also supports the following:
+#
+# socket: 'path/to/redis.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# and
+# pass: 'password' # the redis password to use for AUTH command
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+socket1:
+ name : 'local'
+ socket : '/tmp/redis.sock'
+ # pass : ''
+
+socket2:
+ name : 'local'
+ socket : '/var/run/redis/redis.sock'
+ # pass : ''
+
+socket3:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+ # pass : ''
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+ # pass : ''
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 6379
+ # pass : ''
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 6379
+ # pass : ''
+
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
new file mode 100644
index 0000000..dec6044
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
+dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 100644
index 0000000..85cebd9
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1,53 @@
+<!--
+title: "RethinkDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md
+sidebar_label: "RethinkDB"
+-->
+
+# RethinkDB monitoring with Netdata
+
+Collects database server and cluster statistics.
+
+Following charts are drawn:
+
+1. **Connected Servers**
+
+ - connected
+ - missing
+
+2. **Active Clients**
+
+ - active
+
+3. **Queries** per second
+
+ - queries
+
+4. **Documents** per second
+
+ - documents
+
+## Configuration
+
+Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rethinkdbs.conf
+```
+
+```yaml
+localhost:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 28015
+ user : "user"
+ password : "pass"
+```
+
+When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
new file mode 100644
index 0000000..e3fbc36
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: rethinkdb netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rethinkdb as rdb
+
+ HAS_RETHINKDB = True
+except ImportError:
+ HAS_RETHINKDB = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'cluster_connected_servers',
+ 'cluster_clients_active',
+ 'cluster_queries',
+ 'cluster_documents',
+]
+
+
+def cluster_charts():
+ return {
+ 'cluster_connected_servers': {
+ 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
+ 'stacked'],
+ 'lines': [
+ ['cluster_servers_connected', 'connected'],
+ ['cluster_servers_missing', 'missing'],
+ ]
+ },
+ 'cluster_clients_active': {
+ 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
+ 'line'],
+ 'lines': [
+ ['cluster_clients_active', 'active'],
+ ]
+ },
+ 'cluster_queries': {
+ 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
+ 'lines': [
+ ['cluster_queries_per_sec', 'queries'],
+ ]
+ },
+ 'cluster_documents': {
+ 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
+ 'lines': [
+ ['cluster_read_docs_per_sec', 'reads'],
+ ['cluster_written_docs_per_sec', 'writes'],
+ ]
+ },
+ }
+
+
+def server_charts(n):
+ o = [
+ '{0}_client_connections'.format(n),
+ '{0}_clients_active'.format(n),
+ '{0}_queries'.format(n),
+ '{0}_documents'.format(n),
+ ]
+ f = 'server {0}'.format(n)
+
+ c = {
+ o[0]: {
+ 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
+ 'lines': [
+ ['{0}_client_connections'.format(n), 'connections'],
+ ]
+ },
+ o[1]: {
+ 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
+ 'lines': [
+ ['{0}_clients_active'.format(n), 'active'],
+ ]
+ },
+ o[2]: {
+ 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
+ 'lines': [
+ ['{0}_queries_total'.format(n), 'queries', 'incremental'],
+ ]
+ },
+ o[3]: {
+ 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
+ 'lines': [
+ ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
+ ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
+ ]
+ },
+ }
+
+ return o, c
+
+
+class Cluster:
+ def __init__(self, raw):
+ self.raw = raw
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ return {
+ 'cluster_clients_active': qe['clients_active'],
+ 'cluster_queries_per_sec': qe['queries_per_sec'],
+ 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
+ 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
+ 'cluster_servers_connected': 0,
+ 'cluster_servers_missing': 0,
+ }
+
+
+class Server:
+ def __init__(self, raw):
+ self.name = raw['server']
+ self.raw = raw
+
+ def error(self):
+ return self.raw.get('error')
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ d = {
+ 'client_connections': qe['client_connections'],
+ 'clients_active': qe['clients_active'],
+ 'queries_total': qe['queries_total'],
+ 'read_docs_total': qe['read_docs_total'],
+ 'written_docs_total': qe['written_docs_total'],
+ }
+
+ return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
+
+
+# https://pypi.org/project/rethinkdb/2.4.0/
+# rdb.RethinkDB() can be used as rdb drop in replacement.
+# https://github.com/rethinkdb/rethinkdb-python#quickstart
+def get_rethinkdb():
+ if hasattr(rdb, 'RethinkDB'):
+ return rdb.RethinkDB()
+ return rdb
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = cluster_charts()
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 28015)
+ self.user = self.configuration.get('user', 'admin')
+ self.password = self.configuration.get('password')
+ self.timeout = self.configuration.get('timeout', 2)
+ self.rdb = None
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not HAS_RETHINKDB:
+ self.error('"rethinkdb" module is needed to use rethinkdbs.py')
+ return False
+
+ self.debug("rethinkdb driver version {0}".format(rdb.__version__))
+ self.rdb = get_rethinkdb()
+
+ if not self.connect():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ for v in stats[1:]:
+ if get_id(v) == 'server':
+ o, c = server_charts(v['server'])
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def get_data(self):
+ if not self.is_alive():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ data = dict()
+
+ # cluster
+ data.update(Cluster(stats[0]).data())
+
+ # servers
+ for v in stats[1:]:
+ if get_id(v) != 'server':
+ continue
+
+ s = Server(v)
+
+ if s.error():
+ data['cluster_servers_missing'] += 1
+ else:
+ data['cluster_servers_connected'] += 1
+ data.update(s.data())
+
+ return data
+
+ def get_stats(self):
+ try:
+ return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items)
+ except rdb.errors.ReqlError:
+ self.alive = False
+ return None
+
+ def connect(self):
+ try:
+ self.conn = self.rdb.connect(
+ host=self.host,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ timeout=self.timeout,
+ )
+ self.alive = True
+ return True
+ except rdb.errors.ReqlError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
+ return False
+
+ def reconnect(self):
+ # The connection is already closed after rdb.errors.ReqlError,
+ # so we do not need to call conn.close()
+ if self.connect():
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+
+def get_id(v):
+ return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
new file mode 100644
index 0000000..d671acb
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for rethinkdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rethinkdb also supports the following:
+#
+# host: IP or HOSTNAME # default is 'localhost'
+# port: PORT # default is 28015
+# user: USERNAME # default is 'admin'
+# password: PASSWORD # not set by default
+# timeout: TIMEOUT # default is 2
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+ host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
new file mode 100644
index 0000000..891193e
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += retroshare/retroshare.chart.py
+dist_pythonconfig_DATA += retroshare/retroshare.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
+
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 100644
index 0000000..d8bd3a9
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1,47 @@
+<!--
+title: "RetroShare monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md
+sidebar_label: "RetroShare"
+-->
+
+# RetroShare monitoring with Netdata
+
+Monitors application bandwidth, peers and DHT metrics.
+
+This module will monitor one or more `RetroShare` applications, depending on your configuration.
+
+## Charts
+
+This module produces the following charts:
+
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- DHT in `peers`
+
+
+## Configuration
+
+Edit the `python.d/retroshare.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/retroshare.conf
+```
+
+Here is an example for 2 servers:
+
+```yaml
+localhost:
+ url : 'http://localhost:9090'
+ user : "user"
+ password : "pass"
+
+remote:
+ url : 'http://203.0.113.1:9090'
+ user : "user"
+ password : "pass"
+```
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
new file mode 100644
index 0000000..3f9593e
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Description: RetroShare netdata python.d module
+# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'dht',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'lines': [
+ ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_down_kb', 'Download']
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
+ 'lines': [
+ ['peers_all', 'All friends'],
+ ['peers_connected', 'Connected friends']
+ ]
+ },
+ 'dht': {
+ 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
+ 'lines': [
+ ['dht_size_all', 'DHT nodes estimated'],
+ ['dht_size_rs', 'RS nodes estimated']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
+
+ def _get_stats(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ parsed = json.loads(raw)
+ if str(parsed['returncode']) != 'ok':
+ return None
+ except (TypeError, ValueError):
+ return None
+
+ return parsed['data'][0]
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ self.url = self.baseurl + '/api/v2/stats'
+ data = self._get_stats()
+ if data is None:
+ return None
+
+ data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
+ if data['dht_active'] is False:
+ data['dht_size_all'] = None
+ data['dht_size_rs'] = None
+
+ return data
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
new file mode 100644
index 0000000..3d0af53
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for RetroShare
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, RetroShare also supports the following:
+#
+# - url: 'url' # the URL to the WebUI
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:9090'
diff --git a/collectors/python.d.plugin/riakkv/Makefile.inc b/collectors/python.d.plugin/riakkv/Makefile.inc
new file mode 100644
index 0000000..87d29f8
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += riakkv/riakkv.chart.py
+dist_pythonconfig_DATA += riakkv/riakkv.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += riakkv/README.md riakkv/Makefile.inc
+
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
new file mode 100644
index 0000000..d0ea9a1
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -0,0 +1,128 @@
+<!--
+title: "Riak KV monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md
+sidebar_label: "Riak KV"
+-->
+
+# Riak KV monitoring with Netdata
+
+Collects database stats from `/stats` endpoint.
+
+## Requirements
+
+- An accessible `/stats` endpoint. See [the Riak KV configuration reference documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
+ for how to enable this.
+
+The following charts are included, which are mostly derived from the metrics
+listed
+[here](https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#riak-metrics-to-graph).
+
+1. **Throughput** in operations/s
+
+- **KV operations**
+ - gets
+ - puts
+
+- **Data type updates**
+ - counters
+ - sets
+ - maps
+
+- **Search queries**
+ - queries
+
+- **Search documents**
+ - indexed
+
+- **Strong consistency operations**
+ - gets
+ - puts
+
+2. **Latency** in milliseconds
+
+- **KV latency** of the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
+ - put (mean, median, 95th / 99th / 100th percentile)
+
+- **Data type latency** of the past minute
+ - counter_merge (mean, median, 95th / 99th / 100th percentile)
+ - set_merge (mean, median, 95th / 99th / 100th percentile)
+ - map_merge (mean, median, 95th / 99th / 100th percentile)
+
+- **Search latency** of the past minute
+ - query (median, min, max, 95th / 99th percentile)
+ - index (median, min, max, 95th / 99th percentile)
+
+- **Strong consistency latency** of the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
+ - put (mean, median, 95th / 99th / 100th percentile)
+
+3. **Erlang VM metrics**
+
+- **System counters**
+ - processes
+
+- **Memory allocation** in MB
+ - processes.allocated
+ - processes.used
+
+4. **General load / health metrics**
+
+- **Siblings encountered in KV operations** during the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
+
+- **Object size in KV operations** during the past minute in KB
+ - get (mean, median, 95th / 99th / 100th percentile)
+
+- **Message queue length** in unprocessed messages
+ - vnodeq_size (mean, median, 95th / 99th / 100th percentile)
+
+- **Index operations** encountered by Search
+ - errors
+
+- **Protocol buffer connections**
+ - active
+
+- **Repair operations coordinated by this node**
+ - read
+
+- **Active finite state machines by kind**
+ - get
+ - put
+ - secondary_index
+ - list_keys
+
+- **Rejected finite state machines**
+ - get
+ - put
+
+- **Number of writes to Search failed due to bad data format by reason**
+ - bad_entry
+ - extract_fail
+
+## Configuration
+
+Edit the `python.d/riakkv.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/riakkv.conf
+```
+
+The module needs to be passed the full URL to Riak's stats endpoint.
+For example:
+
+```yaml
+myriak:
+ url: http://myriak.example.com:8098/stats
+```
+
+With no explicit configuration given, the module will attempt to connect to
+`http://localhost:8098/stats`.
+
+The default update frequency for the plugin is set to 2 seconds as Riak
+internally updates the metrics every second. If we were to update the metrics
+every second, the resulting graph would contain odd jitter.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Friakkv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/riakkv/riakkv.chart.py b/collectors/python.d.plugin/riakkv/riakkv.chart.py
new file mode 100644
index 0000000..c390c8b
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/riakkv.chart.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Description: riak netdata python.d module
+#
+# See also:
+# https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# Riak updates the metrics at the /stats endpoint every 1 second.
+# If we use `update_every = 1` here, that means we might get weird jitter in the graph,
+# so the default is set to 2 seconds to prevent it.
+update_every = 2
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ # Throughput metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+ # Collected in totals.
+ "kv.node_operations", # K/V node operations.
+ "dt.vnode_updates", # Data type vnode updates.
+ "search.queries", # Search queries on the node.
+ "search.documents", # Documents indexed by Search.
+ "consistent.operations", # Consistent node operations.
+
+ # Latency metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+ # Collected for the past minute in milliseconds,
+ # returned from riak in microseconds.
+ "kv.latency.get", # K/V GET FSM traversal latency.
+ "kv.latency.put", # K/V PUT FSM traversal latency.
+ "dt.latency.counter", # Update Counter Data type latency.
+ "dt.latency.set", # Update Set Data type latency.
+ "dt.latency.map", # Update Map Data type latency.
+ "search.latency.query", # Search query latency.
+ "search.latency.index", # Time it takes for search to index a new document.
+ "consistent.latency.get", # Strong consistent read latency.
+ "consistent.latency.put", # Strong consistent write latency.
+
+ # Erlang resource usage metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
+ # Processes collected as a gauge,
+ # memory collected as Megabytes, returned as bytes from Riak.
+ "vm.processes", # Number of processes currently running in the Erlang VM.
+ "vm.memory.processes", # Total amount of memory allocated & used for Erlang processes.
+
+ # General Riak Load / Health metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
+ # The following are collected by Riak over the past minute:
+ "kv.siblings_encountered.get", # Siblings encountered during GET operations by this node.
+ "kv.objsize.get", # Object size encountered by this node.
+ "search.vnodeq_size", # Number of unprocessed messages in the vnode message queues (Search).
+ # The following are calculated in total, or as gauges:
+ "search.index_errors", # Errors of the search subsystem while indexing documents.
+ "core.pbc", # Number of currently active protocol buffer connections.
+ "core.repairs", # Total read repair operations coordinated by this node.
+ "core.fsm_active", # Active finite state machines by kind.
+ "core.fsm_rejected", # Rejected finite state machines by kind.
+
+ # General Riak Search Load / Health metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
+ # Reported as counters.
+ "search.errors", # Write and read errors of the Search subsystem.
+]
+
+CHARTS = {
+ # Throughput metrics
+ "kv.node_operations": {
+ "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
+ "line"],
+ "lines": [
+ ["node_gets_total", "gets", "incremental"],
+ ["node_puts_total", "puts", "incremental"]
+ ]
+ },
+ "dt.vnode_updates": {
+ "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
+ "riak.dt.vnode_updates", "line"],
+ "lines": [
+ ["vnode_counter_update_total", "counters", "incremental"],
+ ["vnode_set_update_total", "sets", "incremental"],
+ ["vnode_map_update_total", "maps", "incremental"],
+ ]
+ },
+ "search.queries": {
+ "options": [None, "Search queries on the node", "queries/s", "throughput", "riak.search", "line"],
+ "lines": [
+ ["search_query_throughput_count", "queries", "incremental"]
+ ]
+ },
+ "search.documents": {
+ "options": [None, "Documents indexed by search", "documents/s", "throughput", "riak.search.documents", "line"],
+ "lines": [
+ ["search_index_throughput_count", "indexed", "incremental"]
+ ]
+ },
+ "consistent.operations": {
+ "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
+ "line"],
+ "lines": [
+ ["consistent_gets_total", "gets", "incremental"],
+ ["consistent_puts_total", "puts", "incremental"],
+ ]
+ },
+
+ # Latency metrics
+ "kv.latency.get": {
+ "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.get", "line"],
+ "lines": [
+ ["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
+ ["node_get_fsm_time_median", "median", "absolute", 1, 1000],
+ ["node_get_fsm_time_95", "95", "absolute", 1, 1000],
+ ["node_get_fsm_time_99", "99", "absolute", 1, 1000],
+ ["node_get_fsm_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "kv.latency.put": {
+ "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.put", "line"],
+ "lines": [
+ ["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
+ ["node_put_fsm_time_median", "median", "absolute", 1, 1000],
+ ["node_put_fsm_time_95", "95", "absolute", 1, 1000],
+ ["node_put_fsm_time_99", "99", "absolute", 1, 1000],
+ ["node_put_fsm_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.counter": {
+ "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
+ "riak.dt.latency.counter_merge", "line"],
+ "lines": [
+ ["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_counter_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_counter_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_counter_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_counter_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.set": {
+ "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
+ "riak.dt.latency.set_merge", "line"],
+ "lines": [
+ ["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_set_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_set_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_set_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_set_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.map": {
+ "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
+ "riak.dt.latency.map_merge", "line"],
+ "lines": [
+ ["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_map_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_map_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_map_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_map_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "search.latency.query": {
+ "options": [None, "Search query latency", "ms", "latency", "riak.search.latency.query", "line"],
+ "lines": [
+ ["search_query_latency_median", "median", "absolute", 1, 1000],
+ ["search_query_latency_min", "min", "absolute", 1, 1000],
+ ["search_query_latency_95", "95", "absolute", 1, 1000],
+ ["search_query_latency_99", "99", "absolute", 1, 1000],
+ ["search_query_latency_999", "999", "absolute", 1, 1000],
+ ["search_query_latency_max", "max", "absolute", 1, 1000],
+ ]
+ },
+ "search.latency.index": {
+ "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
+ "line"],
+ "lines": [
+ ["search_index_latency_median", "median", "absolute", 1, 1000],
+ ["search_index_latency_min", "min", "absolute", 1, 1000],
+ ["search_index_latency_95", "95", "absolute", 1, 1000],
+ ["search_index_latency_99", "99", "absolute", 1, 1000],
+ ["search_index_latency_999", "999", "absolute", 1, 1000],
+ ["search_index_latency_max", "max", "absolute", 1, 1000],
+ ]
+ },
+
+ # Riak Strong Consistency metrics
+ "consistent.latency.get": {
+ "options": [None, "Strongly consistent read latency", "ms", "latency", "riak.consistent.latency.get", "line"],
+ "lines": [
+ ["consistent_get_time_mean", "mean", "absolute", 1, 1000],
+ ["consistent_get_time_median", "median", "absolute", 1, 1000],
+ ["consistent_get_time_95", "95", "absolute", 1, 1000],
+ ["consistent_get_time_99", "99", "absolute", 1, 1000],
+ ["consistent_get_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "consistent.latency.put": {
+ "options": [None, "Strongly consistent write latency", "ms", "latency", "riak.consistent.latency.put", "line"],
+ "lines": [
+ ["consistent_put_time_mean", "mean", "absolute", 1, 1000],
+ ["consistent_put_time_median", "median", "absolute", 1, 1000],
+ ["consistent_put_time_95", "95", "absolute", 1, 1000],
+ ["consistent_put_time_99", "99", "absolute", 1, 1000],
+ ["consistent_put_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+
+ # BEAM metrics
+ "vm.processes": {
+ "options": [None, "Total processes running in the Erlang VM", "total", "vm", "riak.vm", "line"],
+ "lines": [
+ ["sys_process_count", "processes", "absolute"],
+ ]
+ },
+ "vm.memory.processes": {
+ "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
+ "line"],
+ "lines": [
+ ["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
+ ["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
+ ]
+ },
+
+ # General Riak Load/Health metrics
+ "kv.siblings_encountered.get": {
+ "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
+ "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
+ "lines": [
+ ["node_get_fsm_siblings_mean", "mean", "absolute"],
+ ["node_get_fsm_siblings_median", "median", "absolute"],
+ ["node_get_fsm_siblings_95", "95", "absolute"],
+ ["node_get_fsm_siblings_99", "99", "absolute"],
+ ["node_get_fsm_siblings_100", "100", "absolute"],
+ ]
+ },
+ "kv.objsize.get": {
+ "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
+ "riak.kv.objsize.get", "line"],
+ "lines": [
+ ["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_95", "95", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_99", "99", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_100", "100", "absolute", 1, 1024],
+ ]
+ },
+ "search.vnodeq_size": {
+ "options": [None,
+ "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
+ "messages", "load", "riak.search.vnodeq_size", "line"],
+ "lines": [
+ ["riak_search_vnodeq_mean", "mean", "absolute"],
+ ["riak_search_vnodeq_median", "median", "absolute"],
+ ["riak_search_vnodeq_95", "95", "absolute"],
+ ["riak_search_vnodeq_99", "99", "absolute"],
+ ["riak_search_vnodeq_100", "100", "absolute"],
+ ]
+ },
+ "search.index_errors": {
+ "options": [None, "Number of document index errors encountered by Search", "errors", "load",
+ "riak.search.index", "line"],
+ "lines": [
+ ["search_index_fail_count", "errors", "absolute"]
+ ]
+ },
+ "core.pbc": {
+ "options": [None, "Protocol buffer connections by status", "connections", "load",
+ "riak.core.protobuf_connections", "line"],
+ "lines": [
+ ["pbc_active", "active", "absolute"],
+ # ["pbc_connects", "established_pastmin", "absolute"]
+ ]
+ },
+ "core.repairs": {
+ "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
+ "riak.core.repairs", "line"],
+ "lines": [
+ ["read_repairs", "read", "absolute"]
+ ]
+ },
+ "core.fsm_active": {
+ "options": [None, "Active finite state machines by kind", "fsms", "load", "riak.core.fsm_active", "line"],
+ "lines": [
+ ["node_get_fsm_active", "get", "absolute"],
+ ["node_put_fsm_active", "put", "absolute"],
+ ["index_fsm_active", "secondary index", "absolute"],
+ ["list_fsm_active", "list keys", "absolute"]
+ ]
+ },
+ "core.fsm_rejected": {
+ # Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
+ # its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
+ # dashboard for some reason.
+ "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
+ "riak.core.fsm_rejected", "line"],
+ "lines": [
+ ["node_get_fsm_rejected", "get", "absolute"],
+ ["node_put_fsm_rejected", "put", "absolute"]
+ ]
+ },
+
+ # General Riak Search Load / Health metrics
+ "search.errors": {
+ "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
+ "riak.search.index", "line"],
+ "lines": [
+ ["search_index_bad_entry_count", "bad_entry", "absolute"],
+ ["search_index_extract_fail_count", "extract_fail", "absolute"],
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ try:
+ return loads(raw)
+ except (TypeError, ValueError) as err:
+ self.error(err)
+ return None
diff --git a/collectors/python.d.plugin/riakkv/riakkv.conf b/collectors/python.d.plugin/riakkv/riakkv.conf
new file mode 100644
index 0000000..be01c48
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/riakkv.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for riak
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ url : 'http://localhost:8098/stats'
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
new file mode 100644
index 0000000..230a8ba
--- /dev/null
+++ b/collectors/python.d.plugin/samba/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += samba/samba.chart.py
+dist_pythonconfig_DATA += samba/samba.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += samba/README.md samba/Makefile.inc
+
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 100644
index 0000000..a512651
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1,121 @@
+<!--
+title: "Samba monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md
+sidebar_label: "Samba"
+-->
+
+# Samba monitoring with Netdata
+
+Monitors the performance metrics of Samba file sharing using `smbstatus` command-line tool.
+
+Executed commands:
+
+- `sudo -n smbstatus -P`
+
+## Requirements
+
+- `smbstatus` program
+- `sudo` program
+- `smbd` must be compiled with profiling enabled
+- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+
+The module uses `smbstatus`, which can only be executed by `root`. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a
+password.
+
+- Add to your `/etc/sudoers` file:
+
+`which smbstatus` shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+```
+
+- Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
+
+
+As the `root` user, do the following:
+
+```cmd
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+## Charts
+
+1. **Syscall R/Ws** in kilobytes/s
+
+ - sendfile
+ - recvfile
+
+2. **Smb2 R/Ws** in kilobytes/s
+
+ - readout
+ - writein
+ - readin
+ - writeout
+
+3. **Smb2 Create/Close** in operations/s
+
+ - create
+ - close
+
+4. **Smb2 Info** in operations/s
+
+ - getinfo
+ - setinfo
+
+5. **Smb2 Find** in operations/s
+
+ - find
+
+6. **Smb2 Notify** in operations/s
+
+ - notify
+
+7. **Smb2 Lesser Ops** as counters
+
+ - tcon
+ - negprot
+ - tdis
+ - cancel
+ - logoff
+ - flush
+ - lock
+ - keepalive
+ - break
+ - sessetup
+
+## Enable the collector
+
+The `samba` collector is disabled by default. To enable it, use `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent
+with `sudo systemctl restart netdata`, or the appropriate method for your system.
+
+## Configuration
+
+Edit the `python.d/samba.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/samba.conf
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
new file mode 100644
index 0000000..8eebcd6
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+import re
+import os
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'syscall_rw',
+ 'smb2_rw',
+ 'smb2_create_close',
+ 'smb2_info',
+ 'smb2_find',
+ 'smb2_notify',
+ 'smb2_sm_count'
+]
+
+CHARTS = {
+ 'syscall_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_create_close': {
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_info': {
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_find': {
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_notify': {
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_sm_count': {
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ]
+ }
+}
+
+SUDO = 'sudo'
+SMBSTATUS = 'smbstatus'
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ smbstatus_binary = find_binary(SMBSTATUS)
+ if not smbstatus_binary:
+ self.error("can't locate '{0}' binary".format(SMBSTATUS))
+ return False
+
+ if os.getuid() == 0:
+ self.command = ' '.join([smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ sudo_binary = find_binary(SUDO)
+ if not sudo_binary:
+ self.error("can't locate '{0}' binary".format(SUDO))
+ return False
+ command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
+ smbstatus = '{0} -P'.format(smbstatus_binary)
+ allowed = self._get_raw_data(command=command)
+ if not (allowed and allowed[0].strip() == smbstatus):
+ self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
+ return False
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
new file mode 100644
index 0000000..db15d4e
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
new file mode 100644
index 0000000..5fb26e1
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += sensors/sensors.chart.py
+dist_pythonconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 100644
index 0000000..5d29348
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1,31 @@
+<!--
+title: "Linux machine sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md
+sidebar_label: "Linux machine sensors"
+-->
+
+# Linux machine sensors monitoring with Netdata
+
+Reads system sensors information (temperature, voltage, electric current, power, etc.).
+
+Charts are created dynamically.
+
+## Configuration
+
+Edit the `python.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/sensors.conf
+```
+
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+Please join this discussion for help.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
new file mode 100644
index 0000000..8c0cde6
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Description: sensors netdata python.d plugin
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import lm_sensors as sensors
+
+ORDER = [
+ 'temperature',
+ 'fan',
+ 'voltage',
+ 'current',
+ 'power',
+ 'energy',
+ 'humidity',
+]
+
+# This is a prototype of chart definition which is used to dynamically create self.definitions
+CHARTS = {
+ 'temperature': {
+ 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'voltage': {
+ 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'current': {
+ 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'power': {
+ 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'fan': {
+ 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'energy': {
+ 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'line'],
+ 'lines': [
+ [None, None, 'incremental', 1, 1000]
+ ]
+ },
+ 'humidity': {
+ 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+LIMITS = {
+ 'temperature': [-127, 1000],
+ 'voltage': [-127, 127],
+ 'current': [-127, 127],
+ 'fan': [0, 65535]
+}
+
+TYPE_MAP = {
+ 0: 'voltage',
+ 1: 'fan',
+ 2: 'temperature',
+ 3: 'power',
+ 4: 'energy',
+ 5: 'current',
+ 6: 'humidity',
+ 7: 'max_main',
+ 16: 'vid',
+ 17: 'intrusion',
+ 18: 'max_other',
+ 24: 'beep_enable'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.chips = configuration.get('chips')
+
+ def get_data(self):
+ data = dict()
+ try:
+ for chip in sensors.ChipIterator():
+ prefix = sensors.chip_snprintf_name(chip)
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ val = None
+ for sf in sfi:
+ try:
+ val = sensors.get_value(chip, sf.number)
+ break
+ except sensors.SensorsError:
+ continue
+ if val is None:
+ continue
+ type_name = TYPE_MAP[feature.type]
+ if type_name in LIMITS:
+ limit = LIMITS[type_name]
+ if val < limit[0] or val > limit[1]:
+ continue
+ data[prefix + '_' + str(feature.name.decode())] = int(val * 1000)
+ except sensors.SensorsError as error:
+ self.error(error)
+ return None
+
+ return data or None
+
+ def create_definitions(self):
+ for sensor in ORDER:
+ for chip in sensors.ChipIterator():
+ chip_name = sensors.chip_snprintf_name(chip)
+ if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
+ continue
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ vals = list()
+ for sf in sfi:
+ try:
+ vals.append(sensors.get_value(chip, sf.number))
+ except sensors.SensorsError as error:
+ self.error('{0}: {1}'.format(sf.name, error))
+ continue
+ if not vals or (vals[0] == 0 and feature.type != 1):
+ continue
+ if TYPE_MAP[feature.type] == sensor:
+ # create chart
+ name = chip_name + '_' + TYPE_MAP[feature.type]
+ if name not in self.order:
+ self.order.append(name)
+ chart_def = list(CHARTS[sensor]['options'])
+ chart_def[1] = chip_name + chart_def[1]
+ self.definitions[name] = {'options': chart_def}
+ self.definitions[name]['lines'] = []
+ line = list(CHARTS[sensor]['lines'][0])
+ line[0] = chip_name + '_' + str(feature.name.decode())
+ line[1] = sensors.get_label(chip, feature)
+ self.definitions[name]['lines'].append(line)
+
+ def check(self):
+ try:
+ sensors.init()
+ except sensors.SensorsError as error:
+ self.error(error)
+ return False
+
+ self.create_definitions()
+
+ return bool(self.get_data())
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
new file mode 100644
index 0000000..d3369ba
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -0,0 +1,61 @@
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
+#chips:
+# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
+#
+#----------------------------------------------------------------------
+
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
new file mode 100644
index 0000000..dc1d0f3
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += smartd_log/smartd_log.chart.py
+dist_pythonconfig_DATA += smartd_log/smartd_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 100644
index 0000000..a1b41f4
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1,125 @@
+<!--
+title: "Storage devices monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md
+sidebar_label: "S.M.A.R.T. attributes"
+-->
+
+# Storage devices monitoring with Netdata
+
+Monitors `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+## Requirements
+
+- `smartmontools`
+
+It produces following charts for SCSI devices:
+
+1. **Read Error Corrected**
+
+2. **Read Error Uncorrected**
+
+3. **Write Error Corrected**
+
+4. **Write Error Uncorrected**
+
+5. **Verify Error Corrected**
+
+6. **Verify Error Uncorrected**
+
+7. **Temperature**
+
+For ATA devices:
+
+1. **Read Error Rate**
+
+2. **Seek Error Rate**
+
+3. **Soft Read Error Rate**
+
+4. **Write Error Rate**
+
+5. **SATA Interface Downshift**
+
+6. **UDMA CRC Error Count**
+
+7. **Throughput Performance**
+
+8. **Seek Time Performance**
+
+9. **Start/Stop Count**
+
+10. **Power-On Hours Count**
+
+11. **Power Cycle Count**
+
+12. **Unexpected Power Loss**
+
+13. **Spin-Up Time**
+
+14. **Spin-up Retries**
+
+15. **Calibration Retries**
+
+16. **Temperature**
+
+17. **Reallocated Sectors Count**
+
+18. **Reserved Block Count**
+
+19. **Program Fail Count**
+
+20. **Erase Fail Count**
+
+21. **Wear Leveller Worst Case Erase Count**
+
+22. **Unused Reserved NAND Blocks**
+
+23. **Reallocation Event Count**
+
+24. **Current Pending Sector Count**
+
+25. **Offline Uncorrectable Sector Count**
+
+26. **Percent Lifetime Used**
+
+## prerequisite
+
+`smartd` must be running with `-A` option to write smartd attribute information to files.
+
+For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
+
+```
+# dump smartd attrs info every 600 seconds
+smartd_opts="-A /var/log/smartd/ -i 600"
+```
+
+You may need to create the smartd directory before smartd will write to it:
+
+```sh
+mkdir -p /var/log/smartd
+```
+
+Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
+
+`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
+
+## Configuration
+
+Edit the `python.d/smartd_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/smartd_log.conf
+```
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
new file mode 100644
index 0000000..8f10a53
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -0,0 +1,752 @@
+# -*- coding: utf-8 -*-
+# Description: smart netdata python.d module
+# Author: ilyam8, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+from time import time
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import read_last_line
+
+INCREMENTAL = 'incremental'
+ABSOLUTE = 'absolute'
+
+ATA = 'ata'
+SCSI = 'scsi'
+CSV = '.csv'
+
+DEF_RESCAN_INTERVAL = 60
+DEF_AGE = 30
+DEF_PATH = '/var/log/smartd'
+
+ATTR1 = '1'
+ATTR2 = '2'
+ATTR3 = '3'
+ATTR4 = '4'
+ATTR5 = '5'
+ATTR7 = '7'
+ATTR8 = '8'
+ATTR9 = '9'
+ATTR10 = '10'
+ATTR11 = '11'
+ATTR12 = '12'
+ATTR13 = '13'
+ATTR170 = '170'
+ATTR171 = '171'
+ATTR172 = '172'
+ATTR173 = '173'
+ATTR174 = '174'
+ATTR180 = '180'
+ATTR183 = '183'
+ATTR190 = '190'
+ATTR194 = '194'
+ATTR196 = '196'
+ATTR197 = '197'
+ATTR198 = '198'
+ATTR199 = '199'
+ATTR202 = '202'
+ATTR206 = '206'
+ATTR_READ_ERR_COR = 'read-total-err-corrected'
+ATTR_READ_ERR_UNC = 'read-total-unc-errors'
+ATTR_WRITE_ERR_COR = 'write-total-err-corrected'
+ATTR_WRITE_ERR_UNC = 'write-total-unc-errors'
+ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
+ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
+ATTR_TEMPERATURE = 'temperature'
+
+RE_ATA = re.compile(
+ '(\d+);' # attribute
+ '(\d+);' # normalized value
+ '(\d+)', # raw value
+ re.X
+)
+
+RE_SCSI = re.compile(
+ '([a-z-]+);' # attribute
+ '([0-9.]+)', # raw value
+ re.X
+)
+
+ORDER = [
+ # errors
+ 'read_error_rate',
+ 'seek_error_rate',
+ 'soft_read_error_rate',
+ 'write_error_rate',
+ 'read_total_err_corrected',
+ 'read_total_unc_errors',
+ 'write_total_err_corrected',
+ 'write_total_unc_errors',
+ 'verify_total_err_corrected',
+ 'verify_total_unc_errors',
+ # external failure
+ 'sata_interface_downshift',
+ 'udma_crc_error_count',
+ # performance
+ 'throughput_performance',
+ 'seek_time_performance',
+ # power
+ 'start_stop_count',
+ 'power_on_hours_count',
+ 'power_cycle_count',
+ 'unexpected_power_loss',
+ # spin
+ 'spin_up_time',
+ 'spin_up_retries',
+ 'calibration_retries',
+ # temperature
+ 'airflow_temperature_celsius',
+ 'temperature_celsius',
+ # wear
+ 'reallocated_sectors_count',
+ 'reserved_block_count',
+ 'program_fail_count',
+ 'erase_fail_count',
+ 'wear_leveller_worst_case_erase_count',
+ 'unused_reserved_nand_blocks',
+ 'reallocation_event_count',
+ 'current_pending_sector_count',
+ 'offline_uncorrectable_sector_count',
+ 'percent_lifetime_used',
+]
+
+CHARTS = {
+ 'read_error_rate': {
+ 'options': [None, 'Read Error Rate', 'value', 'errors', 'smartd_log.read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR1],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_error_rate': {
+ 'options': [None, 'Seek Error Rate', 'value', 'errors', 'smartd_log.seek_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR7],
+ 'algo': ABSOLUTE,
+ },
+ 'soft_read_error_rate': {
+ 'options': [None, 'Soft Read Error Rate', 'errors', 'errors', 'smartd_log.soft_read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR13],
+ 'algo': INCREMENTAL,
+ },
+ 'write_error_rate': {
+ 'options': [None, 'Write Error Rate', 'value', 'errors', 'smartd_log.write_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR206],
+ 'algo': ABSOLUTE,
+ },
+ 'read_total_err_corrected': {
+ 'options': [None, 'Read Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'read_total_unc_errors': {
+ 'options': [None, 'Read Error Uncorrected', 'errors', 'errors', 'smartd_log.read_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_err_corrected': {
+ 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_unc_errors': {
+ 'options': [None, 'Write Error Uncorrected', 'errors', 'errors', 'smartd_log.write_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_err_corrected': {
+ 'options': [None, 'Verify Error Corrected', 'errors', 'errors', 'smartd_log.verify_total_err_corrected',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_unc_errors': {
+ 'options': [None, 'Verify Error Uncorrected', 'errors', 'errors', 'smartd_log.verify_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'sata_interface_downshift': {
+ 'options': [None, 'SATA Interface Downshift', 'events', 'external failure',
+ 'smartd_log.sata_interface_downshift', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR183],
+ 'algo': INCREMENTAL,
+ },
+ 'udma_crc_error_count': {
+ 'options': [None, 'UDMA CRC Error Count', 'errors', 'external failure', 'smartd_log.udma_crc_error_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR199],
+ 'algo': INCREMENTAL,
+ },
+ 'throughput_performance': {
+ 'options': [None, 'Throughput Performance', 'value', 'performance', 'smartd_log.throughput_performance',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR2],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_time_performance': {
+ 'options': [None, 'Seek Time Performance', 'value', 'performance', 'smartd_log.seek_time_performance', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR8],
+ 'algo': ABSOLUTE,
+ },
+ 'start_stop_count': {
+ 'options': [None, 'Start/Stop Count', 'events', 'power', 'smartd_log.start_stop_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR4],
+ 'algo': ABSOLUTE,
+ },
+ 'power_on_hours_count': {
+ 'options': [None, 'Power-On Hours Count', 'hours', 'power', 'smartd_log.power_on_hours_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR9],
+ 'algo': ABSOLUTE,
+ },
+ 'power_cycle_count': {
+ 'options': [None, 'Power Cycle Count', 'events', 'power', 'smartd_log.power_cycle_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR12],
+ 'algo': ABSOLUTE,
+ },
+ 'unexpected_power_loss': {
+ 'options': [None, 'Unexpected Power Loss', 'events', 'power', 'smartd_log.unexpected_power_loss', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR174],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_time': {
+ 'options': [None, 'Spin-Up Time', 'ms', 'spin', 'smartd_log.spin_up_time', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR3],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_retries': {
+ 'options': [None, 'Spin-up Retries', 'retries', 'spin', 'smartd_log.spin_up_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR10],
+ 'algo': INCREMENTAL,
+ },
+ 'calibration_retries': {
+ 'options': [None, 'Calibration Retries', 'retries', 'spin', 'smartd_log.calibration_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR11],
+ 'algo': INCREMENTAL,
+ },
+ 'airflow_temperature_celsius': {
+ 'options': [None, 'Airflow Temperature Celsius', 'celsius', 'temperature',
+ 'smartd_log.airflow_temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR190],
+ 'algo': ABSOLUTE,
+ },
+ 'temperature_celsius': {
+ 'options': [None, 'Temperature', 'celsius', 'temperature', 'smartd_log.temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR194, ATTR_TEMPERATURE],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocated_sectors_count': {
+ 'options': [None, 'Reallocated Sectors Count', 'sectors', 'wear', 'smartd_log.reallocated_sectors_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR5],
+ 'algo': ABSOLUTE,
+ },
+ 'reserved_block_count': {
+ 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR170],
+ 'algo': ABSOLUTE,
+ },
+ 'program_fail_count': {
+ 'options': [None, 'Program Fail Count', 'errors', 'wear', 'smartd_log.program_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR171],
+ 'algo': INCREMENTAL,
+ },
+ 'erase_fail_count': {
+ 'options': [None, 'Erase Fail Count', 'failures', 'wear', 'smartd_log.erase_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR172],
+ 'algo': INCREMENTAL,
+ },
+ 'wear_leveller_worst_case_erase_count': {
+ 'options': [None, 'Wear Leveller Worst Case Erase Count', 'erases', 'wear',
+ 'smartd_log.wear_leveller_worst_case_erase_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR173],
+ 'algo': ABSOLUTE,
+ },
+ 'unused_reserved_nand_blocks': {
+ 'options': [None, 'Unused Reserved NAND Blocks', 'blocks', 'wear', 'smartd_log.unused_reserved_nand_blocks',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR180],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocation_event_count': {
+ 'options': [None, 'Reallocation Event Count', 'events', 'wear', 'smartd_log.reallocation_event_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR196],
+ 'algo': INCREMENTAL,
+ },
+ 'current_pending_sector_count': {
+ 'options': [None, 'Current Pending Sector Count', 'sectors', 'wear', 'smartd_log.current_pending_sector_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR197],
+ 'algo': ABSOLUTE,
+ },
+ 'offline_uncorrectable_sector_count': {
+ 'options': [None, 'Offline Uncorrectable Sector Count', 'sectors', 'wear',
+ 'smartd_log.offline_uncorrectable_sector_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR198],
+ 'algo': ABSOLUTE,
+
+ },
+ 'percent_lifetime_used': {
+ 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR202],
+ 'algo': ABSOLUTE,
+ }
+}
+
+# NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin
+# C code:
+# https://github.com/smartmontools/smartmontools/blob/master/smartmontools/atacmds.cpp#L2051
+#
+# Calling 'parse_temp' on the raw value will return a 4-tuple, containing
+# * temperature
+# * minimum
+# * maximum
+# * over-temperature count
+# substituting None for values it can't decode.
+#
+# Example:
+# >>> parse_temp(42952491042)
+# >>> (34, 10, 43, None)
+#
+#
+# def check_temp_word(i):
+# if i <= 0x7F:
+# return 0x11
+# elif i <= 0xFF:
+# return 0x01
+# elif 0xFF80 <= i:
+# return 0x10
+# return 0x00
+#
+#
+# def check_temp_range(t, b0, b1):
+# if b0 > b1:
+# t0, t1 = b1, b0
+# else:
+# t0, t1 = b0, b1
+#
+# if all([
+# -60 <= t0,
+# t0 <= t,
+# t <= t1,
+# t1 <= 120,
+# not (t0 == -1 and t1 <= 0)
+# ]):
+# return t0, t1
+# return None, None
+#
+#
+# def parse_temp(raw):
+# byte = list()
+# word = list()
+# for i in range(0, 6):
+# byte.append(0xFF & (raw >> (i * 8)))
+# for i in range(0, 3):
+# word.append(0xFFFF & (raw >> (i * 16)))
+#
+# ctwd = check_temp_word(word[0])
+#
+# if not word[2]:
+# if ctwd and not word[1]:
+# # byte[0] is temp, no other data
+# return byte[0], None, None, None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[2], byte[3])):
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# return byte[0], trange[0], trange[1], None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[1], byte[2])):
+# # byte[0] is temp, byte[1] is max or min, byte[2] is min or max
+# trange = check_temp_range(byte[0], byte[1], byte[2])
+# return byte[0], trange[0], trange[1], None
+#
+# return None, None, None, None
+#
+# if ctwd:
+# if all(
+# [
+# ctwd & check_temp_word(word[1]) & check_temp_word(word[2]) != 0x00,
+# all(check_temp_range(byte[0], byte[2], byte[4])),
+# ]
+# ):
+# # byte[0] is temp, byte[2] is max or min, byte[4] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[4])
+# return byte[0], trange[0], trange[1], None
+# else:
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# if word[2] < 0x7FFF and all(trange) and trange[1] >= 40:
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max, word[2] is overtemp count
+# return byte[0], trange[0], trange[1], word[2]
+# # no data
+# return None, None, None, None
+
+
+CHARTED_ATTRS = dict((attr, k) for k, v in CHARTS.items() for attr in v['attrs'])
+
+
+class BaseAtaSmartAttribute:
+ def __init__(self, name, normalized_value, raw_value):
+ self.name = name
+ self.normalized_value = normalized_value
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class AtaRaw(BaseAtaSmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+class AtaNormalized(BaseAtaSmartAttribute):
+ def value(self):
+ return self.normalized_value
+
+
+class Ata3(BaseAtaSmartAttribute):
+ def value(self):
+ value = int(self.raw_value)
+ # https://github.com/netdata/netdata/issues/5919
+ #
+ # 3;151;38684000679;
+ # 423 (Average 447)
+ # 38684000679 & 0xFFF -> 423
+ # (38684000679 & 0xFFF0000) >> 16 -> 447
+ if value > 1e6:
+ return value & 0xFFF
+ return value
+
+
+class Ata9(BaseAtaSmartAttribute):
+ def value(self):
+ value = int(self.raw_value)
+ if value > 1e6:
+ return value & 0xFFFF
+ return value
+
+
+class Ata190(BaseAtaSmartAttribute):
+ def value(self):
+ return 100 - int(self.normalized_value)
+
+
+class Ata194(BaseAtaSmartAttribute):
+ # https://github.com/netdata/netdata/issues/3041
+ # https://github.com/netdata/netdata/issues/5919
+ #
+ # The low byte is the current temperature, the third lowest is the maximum, and the fifth lowest is the minimum
+ def value(self):
+ value = int(self.raw_value)
+ if value > 1e6:
+ return value & 0xFF
+ return min(int(self.normalized_value), int(self.raw_value))
+
+
+class BaseSCSISmartAttribute:
+ def __init__(self, name, raw_value):
+ self.name = name
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class SCSIRaw(BaseSCSISmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+def ata_attribute_factory(value):
+ name = value[0]
+
+ if name == ATTR3:
+ return Ata3(*value)
+ elif name == ATTR9:
+ return Ata9(*value)
+ elif name == ATTR190:
+ return Ata190(*value)
+ elif name == ATTR194:
+ return Ata194(*value)
+ elif name in [
+ ATTR1,
+ ATTR7,
+ ATTR202,
+ ATTR206,
+ ]:
+ return AtaNormalized(*value)
+
+ return AtaRaw(*value)
+
+
+def scsi_attribute_factory(value):
+ return SCSIRaw(*value)
+
+
+def attribute_factory(value):
+ name = value[0]
+ if name.isdigit():
+ return ata_attribute_factory(value)
+ return scsi_attribute_factory(value)
+
+
+def handle_error(*errors):
+ def on_method(method):
+ def on_call(*args):
+ try:
+ return method(*args)
+ except errors:
+ return None
+
+ return on_call
+
+ return on_method
+
+
+class DiskLogFile:
+ def __init__(self, full_path):
+ self.path = full_path
+ self.size = os.path.getsize(full_path)
+
+ @handle_error(OSError)
+ def is_changed(self):
+ return self.size != os.path.getsize(self.path)
+
+ @handle_error(OSError)
+ def is_active(self, current_time, limit):
+ return (current_time - os.path.getmtime(self.path)) / 60 < limit
+
+ @handle_error(OSError)
+ def read(self):
+ self.size = os.path.getsize(self.path)
+ return read_last_line(self.path)
+
+
+class BaseDisk:
+ def __init__(self, name, log_file):
+ self.raw_name = name
+ self.name = re.sub(r'_+', '_', name)
+ self.log_file = log_file
+ self.attrs = list()
+ self.alive = True
+ self.charted = False
+
+ def __eq__(self, other):
+ if isinstance(other, BaseDisk):
+ return self.raw_name == other.raw_name
+ return self.raw_name == other
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def parser(self, data):
+ raise NotImplementedError
+
+ @handle_error(TypeError)
+ def populate_attrs(self):
+ self.attrs = list()
+ line = self.log_file.read()
+ for value in self.parser(line):
+ self.attrs.append(attribute_factory(value))
+
+ return len(self.attrs)
+
+ def data(self):
+ data = dict()
+ for attr in self.attrs:
+ data['{0}_{1}'.format(self.name, attr.name)] = attr.value()
+ return data
+
+
+class ATADisk(BaseDisk):
+ def parser(self, data):
+ return RE_ATA.findall(data)
+
+
+class SCSIDisk(BaseDisk):
+ def parser(self, data):
+ return RE_SCSI.findall(data)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.log_path = configuration.get('log_path', DEF_PATH)
+ self.age = configuration.get('age', DEF_AGE)
+ self.exclude = configuration.get('exclude_disks', str()).split()
+ self.disks = list()
+ self.runs = 0
+
+ def check(self):
+ return self.scan() > 0
+
+ def get_data(self):
+ self.runs += 1
+
+ if self.runs % DEF_RESCAN_INTERVAL == 0:
+ self.cleanup()
+ self.scan()
+
+ data = dict()
+
+ for disk in self.disks:
+ if not disk.alive:
+ continue
+
+ if not disk.charted:
+ self.add_disk_to_charts(disk)
+
+ changed = disk.log_file.is_changed()
+
+ if changed is None:
+ disk.alive = False
+ continue
+
+ if changed and disk.populate_attrs() is None:
+ disk.alive = False
+ continue
+
+ data.update(disk.data())
+
+ return data
+
+ def cleanup(self):
+ current_time = time()
+ for disk in self.disks[:]:
+ if any(
+ [
+ not disk.alive,
+ not disk.log_file.is_active(current_time, self.age),
+ ]
+ ):
+ self.disks.remove(disk.raw_name)
+ self.remove_disk_from_charts(disk)
+
+ def scan(self):
+ self.debug('scanning {0}'.format(self.log_path))
+ current_time = time()
+
+ for full_name in os.listdir(self.log_path):
+ disk = self.create_disk_from_file(full_name, current_time)
+ if not disk:
+ continue
+ self.disks.append(disk)
+
+ return len(self.disks)
+
+ def create_disk_from_file(self, full_name, current_time):
+ if not full_name.endswith(CSV):
+ self.debug('skipping {0}: not a csv file'.format(full_name))
+ return None
+
+ name = os.path.basename(full_name).split('.')[-3]
+ path = os.path.join(self.log_path, full_name)
+
+ if name in self.disks:
+ self.debug('skipping {0}: already in disks'.format(full_name))
+ return None
+
+ if [p for p in self.exclude if p in name]:
+ self.debug('skipping {0}: filtered by `exclude` option'.format(full_name))
+ return None
+
+ if not os.access(path, os.R_OK):
+ self.debug('skipping {0}: not readable'.format(full_name))
+ return None
+
+ if os.path.getsize(path) == 0:
+ self.debug('skipping {0}: zero size'.format(full_name))
+ return None
+
+ if (current_time - os.path.getmtime(path)) / 60 > self.age:
+ self.debug('skipping {0}: haven\'t been updated for last {1} minutes'.format(full_name, self.age))
+ return None
+
+ if ATA in full_name:
+ disk = ATADisk(name, DiskLogFile(path))
+ elif SCSI in full_name:
+ disk = SCSIDisk(name, DiskLogFile(path))
+ else:
+ self.debug('skipping {0}: unknown type'.format(full_name))
+ return None
+
+ disk.populate_attrs()
+ if not disk.attrs:
+ self.error('skipping {0}: parsing failed'.format(full_name))
+ return None
+
+ self.debug('added {0}'.format(full_name))
+ return disk
+
+ def add_disk_to_charts(self, disk):
+ if len(self.charts) == 0 or disk.charted:
+ return
+ disk.charted = True
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ chart = self.charts[chart_id]
+ dim = [
+ '{0}_{1}'.format(disk.name, attr.name),
+ disk.name,
+ CHARTS[chart_id]['algo'],
+ ]
+
+ if dim[0] in self.charts[chart_id].dimensions:
+ chart.hide_dimension(dim[0], reverse=True)
+ else:
+ chart.add_dimension(dim)
+
+ def remove_disk_from_charts(self, disk):
+ if len(self.charts) == 0 or not disk.charted:
+ return
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ self.charts[chart_id].del_dimension('{0}_{1}'.format(disk.name, attr.name))
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
new file mode 100644
index 0000000..4f138d1
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -0,0 +1,67 @@
+# netdata python.d.plugin configuration for smartd log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, smartd_log also supports the following:
+#
+# log_path: '/path/to/smartd_logs' # path to smartd log files. Default is /var/log/smartd
+# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
new file mode 100644
index 0000000..f9fa8b6
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += spigotmc/spigotmc.chart.py
+dist_pythonconfig_DATA += spigotmc/spigotmc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 100644
index 0000000..9b297f6
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "SpigotMC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md
+sidebar_label: "SpigotMC"
+-->
+
+# SpigotMC monitoring with Netdata
+
+Performs basic monitoring for Spigot Minecraft servers.
+
+It provides two charts, one tracking server-side ticks-per-second in
+1, 5 and 15 minute averages, and one tracking the number of currently
+active users.
+
+This is not compatible with Spigot plugins which change the format of
+the data returned by the `tps` or `list` console commands.
+
+## Configuration
+
+Edit the `python.d/spigotmc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/spigotmc.conf
+```
+
+```yaml
+host: localhost
+port: 25575
+password: pass
+```
+
+By default, a connection to port 25575 on the local system is attempted with an empty password.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
new file mode 100644
index 0000000..f334113
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+# Description: spigotmc netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import platform
+import re
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import mcrcon
+
+# Update only every 5 seconds because collection takes in excess of
+# 100ms sometimes, and most people won't care about second-by-second data.
+update_every = 5
+
+PRECISION = 100
+
+COMMAND_TPS = 'tps'
+COMMAND_LIST = 'list'
+COMMAND_ONLINE = 'online'
+
+ORDER = [
+ 'tps',
+ 'users',
+]
+
+CHARTS = {
+ 'tps': {
+ 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
+ 'lines': [
+ ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
+ ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
+ ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
+ 'lines': [
+ ['users', 'Users', 'absolute', 1, 1]
+ ]
+ }
+}
+
+_TPS_REGEX = re.compile(
+ r'^.*: .*?' # Message lead-in
+ r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
+ r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
+ r'(\d{1,2}\.\d+).*$', # 15-minute TPS value
+ re.X
+)
+_LIST_REGEX = re.compile(
+ # Examples:
+ # There are 4 of a max 50 players online: player1, player2, player3, player4
+ # §6There are §c4§6 out of maximum §c50§6 players online.
+ # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online.
+ # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.
+ # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50
+ r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count.
+ re.X
+)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 25575)
+ self.password = self.configuration.get('password', '')
+ self.console = mcrcon.MCRcon()
+ self.alive = True
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ try:
+ self.connect()
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+
+ return self._get_data()
+
+ def connect(self):
+ self.console.connect(self.host, self.port, self.password)
+
+ def reconnect(self):
+ self.error('try reconnect.')
+ try:
+ try:
+ self.console.disconnect()
+ except mcrcon.MCRconException:
+ pass
+ self.console.connect(self.host, self.port, self.password)
+ self.alive = True
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def is_alive(self):
+ if any(
+ [
+ not self.alive,
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
+ ]
+ ):
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+
+ data = {}
+
+ try:
+ raw = self.console.command(COMMAND_TPS)
+ match = _TPS_REGEX.match(raw)
+ if match:
+ data['tps1'] = int(float(match.group(1)) * PRECISION)
+ data['tps5'] = int(float(match.group(2)) * PRECISION)
+ data['tps15'] = int(float(match.group(3)) * PRECISION)
+ else:
+ self.error('Unable to process TPS values.')
+ if not raw:
+ self.error(
+ "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch TPS values.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+
+ try:
+ raw = self.console.command(COMMAND_LIST)
+ match = _LIST_REGEX.search(raw)
+ if not match:
+ raw = self.console.command(COMMAND_ONLINE)
+ match = _LIST_REGEX.search(raw)
+ if match:
+ users = int(match.group(1))
+ hidden_users = match.group(2)
+ if hidden_users:
+ hidden_users = int(hidden_users)
+ else:
+ hidden_users = 0
+ data['users'] = users + hidden_users
+ else:
+ if not raw:
+ self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format(
+ COMMAND_LIST, COMMAND_ONLINE))
+ self.error('Unable to process user counts.')
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch user counts.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+
+ return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
new file mode 100644
index 0000000..f0064ea
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for spigotmc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above, spigotmc supports the following:
+#
+# host: localhost # The host to connect to. Defaults to the local system.
+# port: 25575 # The port the remote console is listening on.
+# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc
new file mode 100644
index 0000000..06775f9
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += springboot/springboot.chart.py
+dist_pythonconfig_DATA += springboot/springboot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += springboot/README.md springboot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
new file mode 100644
index 0000000..f38e8bf
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -0,0 +1,145 @@
+<!--
+title: "Java Spring Boot 2 application monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/springboot/README.md
+sidebar_label: "Java Spring Boot 2 applications"
+-->
+
+# Java Spring Boot 2 application monitoring with Netdata
+
+Monitors one or more Java Spring-boot applications depending on configuration.
+Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
+
+## Configuration
+
+The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
+
+- add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+- set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+You can create custom Metrics by add and inject a PublicMetrics in your application.
+This is a example to add custom metrics:
+
+```java
+package com.example;
+
+import org.springframework.boot.actuate.endpoint.PublicMetrics;
+import org.springframework.boot.actuate.metrics.Metric;
+import org.springframework.stereotype.Service;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.ArrayList;
+import java.util.Collection;
+
+@Service
+public class HeapPoolMetrics implements PublicMetrics {
+
+ private static final String PREFIX = "mempool.";
+ private static final String KEY_EDEN = PREFIX + "eden";
+ private static final String KEY_SURVIVOR = PREFIX + "survivor";
+ private static final String KEY_TENURED = PREFIX + "tenured";
+
+ @Override
+ public Collection<Metric<?>> metrics() {
+ Collection<Metric<?>> result = new ArrayList<>(4);
+ for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
+ String poolName = mem.getName();
+ String name = null;
+ if (poolName.indexOf("Eden Space") != -1) {
+ name = KEY_EDEN;
+ } else if (poolName.indexOf("Survivor Space") != -1) {
+ name = KEY_SURVIVOR;
+ } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
+ name = KEY_TENURED;
+ }
+
+ if (name != null) {
+ result.add(newMemoryMetric(name, mem.getUsage().getMax()));
+ result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
+ result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
+ result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
+ }
+ }
+ return result;
+ }
+
+ private Metric<Long> newMemoryMetric(String name, long bytes) {
+ return new Metric<>(name, bytes / 1024);
+ }
+}
+```
+
+Please refer [Spring Boot Actuator: Production-ready Features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto.html#howto-actuator) for more information.
+
+## Charts
+
+1. **Response Codes** in requests/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+ - others
+
+2. **Threads**
+
+ - daemon
+ - total
+
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
+
+ - Copy
+ - MarkSweep
+ - ...
+
+4. **Heap Memory Usage** in KB
+
+ - used
+ - committed
+
+## Usage
+
+Edit the `python.d/springboot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/springboot.conf
+```
+
+This module defines some common charts, and you can add custom charts by change the configurations.
+
+The configuration format is like:
+
+```yaml
+<id>:
+ name: '<name>'
+ url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
+ user: '<username>' # optional
+ pass: '<password>' # optional
+ defaults:
+ [<chart-id>]: true|false
+ extras:
+ - id: '<chart-id>'
+ options:
+ title: '***'
+ units: '***'
+ family: '***'
+ context: 'springboot.***'
+ charttype: 'stacked' | 'area' | 'line'
+ lines:
+ - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
+ - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
+```
+
+By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
+You can disable the default charts by set `defaults.<chart-id>: false`.
+
+The dimension name of extras charts should replace `.` to `_`.
+
+Please check
+[springboot.conf](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/springboot/springboot.conf)
+for more examples.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
new file mode 100644
index 0000000..dbe11d6
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Wing924
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+DEFAULT_ORDER = [
+ 'response_code',
+ 'threads',
+ 'gc_time',
+ 'gc_ope',
+ 'heap',
+]
+
+DEFAULT_CHARTS = {
+ 'response_code': {
+ 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
+ 'lines': [
+ ["resp_other", 'Other', 'incremental'],
+ ["resp_1xx", '1xx', 'incremental'],
+ ["resp_2xx", '2xx', 'incremental'],
+ ["resp_3xx", '3xx', 'incremental'],
+ ["resp_4xx", '4xx', 'incremental'],
+ ["resp_5xx", '5xx', 'incremental'],
+ ]
+ },
+ 'threads': {
+ 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
+ 'lines': [
+ ["threads_daemon", 'daemon', 'absolute'],
+ ["threads", 'total', 'absolute'],
+ ]
+ },
+ 'gc_time': {
+ 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
+ 'lines': [
+ ["gc_copy_time", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_time", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'gc_ope': {
+ 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
+ 'lines': [
+ ["gc_copy_count", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_count", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'heap': {
+ 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
+ 'lines': [
+ ["heap_committed", 'committed', "absolute"],
+ ["heap_used", 'used', "absolute"],
+ ]
+ }
+}
+
+
+class ExtraChartError(ValueError):
+ pass
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', "http://localhost:8080/metrics")
+ self._setup_charts()
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError:
+ self.debug('%s is not a valid JSON page' % self.url)
+ return None
+
+ result = {
+ 'resp_1xx': 0,
+ 'resp_2xx': 0,
+ 'resp_3xx': 0,
+ 'resp_4xx': 0,
+ 'resp_5xx': 0,
+ 'resp_other': 0,
+ }
+
+ for key, value in data.iteritems():
+ if 'counter.status.' in key:
+ status_type = key[15:16] + 'xx'
+ if status_type[0] not in '12345':
+ status_type = 'other'
+ result['resp_' + status_type] += value
+ else:
+ result[key.replace('.', '_')] = value
+
+ return result or None
+
+ def _setup_charts(self):
+ self.order = []
+ self.definitions = {}
+ defaults = self.configuration.get('defaults', {})
+
+ for chart in DEFAULT_ORDER:
+ if defaults.get(chart, True):
+ self.order.append(chart)
+ self.definitions[chart] = DEFAULT_CHARTS[chart]
+
+ for extra in self.configuration.get('extras', []):
+ self._add_extra_chart(extra)
+ self.order.append(extra['id'])
+
+ def _add_extra_chart(self, chart):
+ chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
+ options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
+ lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
+
+ title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
+ units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
+ family = options.get('family', title)
+ context = options.get('context', 'springboot.' + title)
+ charttype = options.get('charttype', 'line')
+
+ result = {
+ 'options': [None, title, units, family, context, charttype],
+ 'lines': [],
+ }
+
+ for line in lines:
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ name = line.get('name', dimension)
+ algorithm = line.get('algorithm', 'absolute')
+ multiplier = line.get('multiplier', 1)
+ divisor = line.get('divisor', 1)
+ result['lines'].append([dimension, name, algorithm, multiplier, divisor])
+
+ self.definitions[chart_id] = result
+
+ @staticmethod
+ def die(error_message):
+ raise ExtraChartError(error_message)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
new file mode 100644
index 0000000..0cb369c
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -0,0 +1,118 @@
+# netdata python.d.plugin configuration for springboot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# defaults:
+# [chart_id]: true | false # enables/disables default charts, defaults true.
+# extras: {} # defines extra charts to monitor, please see the example below
+# - id: [chart_id]
+# options: {}
+# lines: []
+#
+# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Configuration example
+# ---------------------
+# example:
+# name: 'example'
+# url: 'http://localhost:8080/metrics'
+# defaults:
+# response_code: true
+# threads: true
+# gc_time: true
+# gc_ope: true
+# heap: false
+# extras:
+# - id: 'heap'
+# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' }
+# lines:
+# - { dimension: 'mem_free', name: 'free'}
+# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - id: 'heap_eden'
+# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_eden_used', name: 'used'}
+# - { dimension: 'mempool_eden_committed', name: 'committed'}
+# - id: 'heap_survivor'
+# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_survivor_used', name: 'used'}
+# - { dimension: 'mempool_survivor_committed', name: 'committed'}
+# - id: 'heap_tenured'
+# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_tenured_used', name: 'used'}
+# - { dimension: 'mempool_tenured_committed', name: 'committed'}
+
+
+local:
+ name: 'local'
+ url: 'http://localhost:8080/metrics'
+
+local_ip:
+ name: 'local'
+ url: 'http://127.0.0.1:8080/metrics'
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
new file mode 100644
index 0000000..76ecff8
--- /dev/null
+++ b/collectors/python.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += squid/squid.chart.py
+dist_pythonconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 100644
index 0000000..e3ed4e0
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Squid monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md
+sidebar_label: "Squid"
+-->
+
+# Squid monitoring with Netdata
+
+Monitors one or more squid instances depending on configuration.
+
+It produces following charts:
+
+1. **Client Bandwidth** in kilobits/s
+
+ - in
+ - out
+ - hits
+
+2. **Client Requests** in requests/s
+
+ - requests
+ - hits
+ - errors
+
+3. **Server Bandwidth** in kilobits/s
+
+ - in
+ - out
+
+4. **Server Requests** in requests/s
+
+ - requests
+ - errors
+
+## Configuration
+
+Edit the `python.d/squid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/squid.conf
+```
+
+```yaml
+priority : 50000
+
+local:
+ request : 'cache_object://localhost:3128/counters'
+ host : 'localhost'
+ port : 3128
+```
+
+Without any configuration module will try to autodetect where squid presents its `counters` data
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
new file mode 100644
index 0000000..bcae2d8
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Description: squid netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'clients_net',
+ 'clients_requests',
+ 'servers_net',
+ 'servers_requests',
+]
+
+CHARTS = {
+ 'clients_net': {
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
+ 'lines': [
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
+ 'clients_requests': {
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
+ 'lines': [
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
+ 'servers_net': {
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
+ 'lines': [
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
+ 'servers_requests': {
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
+ 'lines': [
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+ self.request = ''
+ self.host = 'localhost'
+ self.port = 3128
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Get data via http request
+ :return: dict
+ """
+ response = self._get_raw_data()
+
+ data = dict()
+ try:
+ raw = ''
+ for tmp in response.split('\r\n'):
+ if tmp.startswith('sample_time'):
+ raw = tmp
+ break
+
+ if raw.startswith('<'):
+ self.error('invalid data received')
+ return None
+
+ for row in raw.split('\n'):
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
+ data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
+
+ except (ValueError, AttributeError, TypeError):
+ self.error('invalid data received')
+ return None
+
+ if not data:
+ self.error('no data received')
+ return None
+ return data
+
+ def _check_raw_data(self, data):
+ header = data[:1024].lower()
+
+ if 'connection: keep-alive' in header:
+ self._keep_alive = True
+ else:
+ self._keep_alive = False
+
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
+ return True
+
+ self.debug('waiting more data from squid')
+ return False
+
+ def check(self):
+ """
+ Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
+ :return: boolean
+ """
+ self._parse_config()
+ # format request
+ req = self.request.decode()
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
+ self.request = req.encode()
+ if self._get_data() is not None:
+ return True
+ else:
+ return False
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
new file mode 100644
index 0000000..b90a52c
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -0,0 +1,167 @@
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# SQUID CONFIGURATION
+#
+# See:
+# http://wiki.squid-cache.org/Features/CacheManager
+#
+# In short, add to your squid configuration these:
+#
+# http_access allow localhost manager
+# http_access deny manager
+#
+# To remotely monitor a squid:
+#
+# acl managerAdmin src 192.0.2.1
+# http_access allow localhost manager
+# http_access allow managerAdmin manager
+# http_access deny manager
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128old:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : 'cache_object://localhost:3128/counters'
+
+tcp8080old:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : 'cache_object://localhost:3128/counters'
+
+tcp3128new:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080new:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv4
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 0000000..940a783
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tomcat/tomcat.chart.py
+dist_pythonconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 100644
index 0000000..f9f2ffe
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1,53 @@
+<!--
+title: "Apache Tomcat monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md
+sidebar_label: "Tomcat"
+-->
+
+# Apache Tomcat monitoring with Netdata
+
+Presents memory utilization of tomcat containers.
+
+Charts:
+
+1. **Requests** per second
+
+ - accesses
+
+2. **Volume** in KB/s
+
+ - volume
+
+3. **Threads**
+
+ - current
+ - busy
+
+4. **JVM Free Memory** in MB
+
+ - jvm
+
+## Configuration
+
+Edit the `python.d/tomcat.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tomcat.conf
+```
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ user : 'tomcat_username'
+ pass : 'secret_tomcat_password'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
+So it will probably fail.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
new file mode 100644
index 0000000..90315f8
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import xml.etree.ElementTree as ET
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MiB = 1 << 20
+
+# Regex fix for Tomcat single quote XML attributes
+# affecting Tomcat < 8.5.24 & 9.0.2 running with Java > 9
+# cf. https://bz.apache.org/bugzilla/show_bug.cgi?id=61603
+single_quote_regex = re.compile(r"='([^']+)'([^']+)''")
+
+ORDER = [
+ 'accesses',
+ 'bandwidth',
+ 'processing_time',
+ 'threads',
+ 'jvm',
+ 'jvm_eden',
+ 'jvm_survivor',
+ 'jvm_tenured',
+]
+
+CHARTS = {
+ 'accesses': {
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
+ 'lines': [
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'lines': [
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
+ 'processing_time': {
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
+ 'lines': [
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
+ 'lines': [
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
+ 'jvm': {
+ 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'lines': [
+ ['free', 'free', 'absolute', 1, MiB],
+ ['eden_used', 'eden', 'absolute', 1, MiB],
+ ['survivor_used', 'survivor', 'absolute', 1, MiB],
+ ['tenured_used', 'tenured', 'absolute', 1, MiB],
+ ['code_cache_used', 'code cache', 'absolute', 1, MiB],
+ ['compressed_used', 'compressed', 'absolute', 1, MiB],
+ ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_eden': {
+ 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'lines': [
+ ['eden_used', 'used', 'absolute', 1, MiB],
+ ['eden_committed', 'committed', 'absolute', 1, MiB],
+ ['eden_max', 'max', 'absolute', 1, MiB]
+ ]
+ },
+ 'jvm_survivor': {
+ 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'lines': [
+ ['survivor_used', 'used', 'absolute', 1, MiB],
+ ['survivor_committed', 'committed', 'absolute', 1, MiB],
+ ['survivor_max', 'max', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_tenured': {
+ 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'lines': [
+ ['tenured_used', 'used', 'absolute', 1, MiB],
+ ['tenured_committed', 'committed', 'absolute', 1, MiB],
+ ['tenured_max', 'max', 'absolute', 1, MiB]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
+ self.parse = self.xml_parse
+
+ def xml_parse(self, data):
+ try:
+ return ET.fromstring(data)
+ except ET.ParseError:
+ self.debug('%s is not a valid XML page. Please add "?XML=true" to tomcat status page.' % self.url)
+ return None
+
+ def xml_single_quote_fix_parse(self, data):
+ data = single_quote_regex.sub(r"='\g<1>\g<2>'", data)
+ return self.xml_parse(data)
+
+ def check(self):
+ self._manager = self._build_manager()
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return False
+
+ if single_quote_regex.search(raw_data):
+ self.warning('Tomcat status page is returning invalid single quote XML, please consider upgrading '
+ 'your Tomcat installation. See https://bz.apache.org/bugzilla/show_bug.cgi?id=61603')
+ self.parse = self.xml_single_quote_fix_parse
+
+ return self.parse(raw_data) is not None
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = None
+ raw_data = self._get_raw_data()
+ if raw_data:
+ xml = self.parse(raw_data)
+ if xml is None:
+ return None
+
+ data = {}
+
+ jvm = xml.find('jvm')
+
+ connector = None
+ if self.connector_name:
+ for conn in xml.findall('connector'):
+ if self.connector_name in conn.get('name'):
+ connector = conn
+ break
+ else:
+ connector = xml.find('connector')
+
+ memory = jvm.find('memory')
+ data['free'] = memory.get('free')
+ data['total'] = memory.get('total')
+
+ for pool in jvm.findall('memorypool'):
+ name = pool.get('name')
+ if 'Eden Space' in name:
+ data['eden_used'] = pool.get('usageUsed')
+ data['eden_committed'] = pool.get('usageCommitted')
+ data['eden_max'] = pool.get('usageMax')
+ elif 'Survivor Space' in name:
+ data['survivor_used'] = pool.get('usageUsed')
+ data['survivor_committed'] = pool.get('usageCommitted')
+ data['survivor_max'] = pool.get('usageMax')
+ elif 'Tenured Gen' in name or 'Old Gen' in name:
+ data['tenured_used'] = pool.get('usageUsed')
+ data['tenured_committed'] = pool.get('usageCommitted')
+ data['tenured_max'] = pool.get('usageMax')
+ elif name == 'Code Cache':
+ data['code_cache_used'] = pool.get('usageUsed')
+ data['code_cache_committed'] = pool.get('usageCommitted')
+ data['code_cache_max'] = pool.get('usageMax')
+ elif name == 'Compressed':
+ data['compressed_used'] = pool.get('usageUsed')
+ data['compressed_committed'] = pool.get('usageCommitted')
+ data['compressed_max'] = pool.get('usageMax')
+ elif name == 'Metaspace':
+ data['metaspace_used'] = pool.get('usageUsed')
+ data['metaspace_committed'] = pool.get('usageCommitted')
+ data['metaspace_max'] = pool.get('usageMax')
+
+ if connector is not None:
+ thread_info = connector.find('threadInfo')
+ data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
+ data['currentThreadCount'] = thread_info.get('currentThreadCount')
+
+ request_info = connector.find('requestInfo')
+ data['processingTime'] = request_info.get('processingTime')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
+
+ return data or None
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 0000000..009591b
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if you have multiple connectors, the following are supported:
+#
+# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/collectors/python.d.plugin/tor/Makefile.inc b/collectors/python.d.plugin/tor/Makefile.inc
new file mode 100644
index 0000000..5a45f9b
--- /dev/null
+++ b/collectors/python.d.plugin/tor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tor/tor.chart.py
+dist_pythonconfig_DATA += tor/tor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tor/README.md tor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
new file mode 100644
index 0000000..192a86a
--- /dev/null
+++ b/collectors/python.d.plugin/tor/README.md
@@ -0,0 +1,64 @@
+<!--
+title: "Tor monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md
+sidebar_label: "Tor"
+-->
+
+# Tor monitoring with Netdata
+
+Connects to the Tor control port to collect traffic statistics.
+
+## Requirements
+
+- `tor` program
+- `stem` python package
+
+It produces only one chart:
+
+1. **Traffic**
+
+ - read
+ - write
+
+## Configuration
+
+Edit the `python.d/tor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tor.conf
+```
+
+Needs only `control_port`.
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local_tcp:
+ name: 'local'
+ control_port: 9051
+
+local_socket:
+ name: 'local'
+ control_port: '/var/run/tor/control'
+```
+
+### prerequisite
+
+Add to `/etc/tor/torrc`:
+
+```
+ControlPort 9051
+```
+
+For more options please read the manual.
+
+Without configuration, module attempts to connect to `127.0.0.1:9051`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
new file mode 100644
index 0000000..8dc021a
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Federico Ceratto <federico.ceratto@gmail.com>
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ import stem
+ import stem.connection
+ import stem.control
+
+ STEM_AVAILABLE = True
+except ImportError:
+ STEM_AVAILABLE = False
+
+DEF_PORT = 'default'
+
+ORDER = [
+ 'traffic',
+]
+
+CHARTS = {
+ 'traffic': {
+ 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
+ 'lines': [
+ ['read', 'read', 'incremental', 1, 1024],
+ ['write', 'write', 'incremental', 1, -1024],
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for Tor"""
+
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.port = self.configuration.get('control_port', DEF_PORT)
+ self.password = self.configuration.get('password')
+ self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
+ self.conn = None
+ self.alive = False
+
+ def check(self):
+ if not STEM_AVAILABLE:
+ self.error('the stem library is missing')
+ return False
+
+ return self.connect()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+
+ try:
+ data['read'] = self.conn.get_info('traffic/read')
+ data['write'] = self.conn.get_info('traffic/written')
+ except stem.ControllerError as error:
+ self.debug(error)
+ self.alive = False
+
+ return data or None
+
+ def authenticate(self):
+ try:
+ self.conn.authenticate(password=self.password)
+ except stem.connection.AuthenticationFailure as error:
+ self.error('authentication error: {0}'.format(error))
+ return False
+ return True
+
+ def connect_via_port(self):
+ try:
+ self.conn = stem.control.Controller.from_port(port=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect_via_socket(self):
+ try:
+ self.conn = stem.control.Controller.from_socket_file(path=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ if self.use_socket:
+ self.connect_via_socket()
+ else:
+ self.connect_via_port()
+
+ if self.conn and self.authenticate():
+ self.alive = True
+
+ return self.alive
+
+ def reconnect(self):
+ return self.connect()
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
new file mode 100644
index 0000000..91b517a
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for tor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tor plugin also supports the following:
+#
+# control_port: 'port' # tor control port
+# password: 'password' # tor control password
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# local_tcp:
+# name: 'local'
+# control_port: 9051
+#
+# local_socket:
+# name: 'local'
+# control_port: '/var/run/tor/control'
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
new file mode 100644
index 0000000..926d56d
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += traefik/traefik.chart.py
+dist_pythonconfig_DATA += traefik/traefik.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
+
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 0000000..2a1dd77
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,74 @@
+<!--
+title: "Traefik monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md
+sidebar_label: "Traefik"
+-->
+
+# Traefik monitoring with Netdata
+
+Uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+
+ - success (1xx, 2xx, 304)
+ - error (5xx)
+ - redirect (3xx except 304)
+ - bad (4xx)
+ - other (all other responses)
+
+2. **Responses** by codes
+
+ - 2xx (successful)
+ - 5xx (internal server errors)
+ - 3xx (redirect)
+ - 4xx (bad)
+ - 1xx (informational)
+ - other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+
+ - request statistics
+
+5. **Total response time**
+
+ - sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+
+ - Traefik server uptime
+
+## Configuration
+
+Edit the `python.d/traefik.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/traefik.conf
+```
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local:
+ url : 'http://localhost:8080/health'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
new file mode 100644
index 0000000..5a49846
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# Description: traefik netdata python.d module
+# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'response_statuses',
+ 'response_codes',
+ 'detailed_response_codes',
+ 'requests',
+ 'total_response_time',
+ 'average_response_time',
+ 'average_response_time_per_iteration',
+ 'uptime'
+]
+
+CHARTS = {
+ 'response_statuses': {
+ 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental'],
+ ['server_errors', 'error', 'incremental'],
+ ['redirects', 'redirect', 'incremental'],
+ ['bad_requests', 'bad', 'incremental'],
+ ['other_requests', 'other', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['other', None, 'incremental']
+ ]
+ },
+ 'detailed_response_codes': {
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
+ 'lines': [
+ ['total_count', 'requests', 'incremental']
+ ]
+ },
+ 'total_response_time': {
+ 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
+ 'lines': [
+ ['total_response_time_sec', 'response', 'absolute', 1, 10000]
+ ]
+ },
+ 'average_response_time': {
+ 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
+ 'lines': [
+ ['average_response_time_sec', 'response', 'absolute', 1, 1000]
+ ]
+ },
+ 'average_response_time_per_iteration': {
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
+ 'lines': [
+ ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
+ 'lines': [
+ ['uptime_sec', 'uptime', 'absolute']
+ ]
+ }
+}
+
+HEALTH_STATS = [
+ 'uptime_sec',
+ 'average_response_time_sec',
+ 'total_response_time_sec',
+ 'total_count',
+ 'total_status_code_count'
+]
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost:8080/health')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.last_total_response_time = 0
+ self.last_total_count = 0
+ self.data = {
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'average_response_time_per_iteration_sec': 0,
+ }
+
+ def _get_data(self):
+ data = self._get_raw_data()
+
+ if not data:
+ return None
+
+ data = loads(data)
+
+ self.get_data_per_code_status(raw_data=data)
+
+ self.get_data_per_code_family(raw_data=data)
+
+ self.get_data_per_code(raw_data=data)
+
+ self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
+
+ self.data['average_response_time_sec'] *= 1000000
+ self.data['total_response_time_sec'] *= 10000
+ if data['total_count'] != self.last_total_count:
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
+ else:
+ self.data['average_response_time_per_iteration_sec'] = 0
+ self.last_total_response_time = data['total_response_time_sec']
+ self.last_total_count = data['total_count']
+
+ return self.data or None
+
+ def get_data_per_code_status(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1' or code_prefix == '2' or code == '304':
+ data['successful_requests'] += value
+ elif code_prefix == '3':
+ data['redirects'] += value
+ elif code_prefix == '4':
+ data['bad_requests'] += value
+ elif code_prefix == '5':
+ data['server_errors'] += value
+ else:
+ data['other_requests'] += value
+ self.data.update(data)
+
+ def get_data_per_code_family(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1':
+ data['1xx'] += value
+ elif code_prefix == '2':
+ data['2xx'] += value
+ elif code_prefix == '3':
+ data['3xx'] += value
+ elif code_prefix == '4':
+ data['4xx'] += value
+ elif code_prefix == '5':
+ data['5xx'] += value
+ else:
+ data['other'] += value
+ self.data.update(data)
+
+ def get_data_per_code(self, raw_data):
+ for code, value in raw_data['total_status_code_count'].items():
+ if self.charts:
+ if code not in self.data:
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = value
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
new file mode 100644
index 0000000..e3f182d
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for traefik health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, traefik plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'http://localhost:8080/health'
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
new file mode 100644
index 0000000..75d96de
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += uwsgi/uwsgi.chart.py
+dist_pythonconfig_DATA += uwsgi/uwsgi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
new file mode 100644
index 0000000..f564821
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -0,0 +1,52 @@
+<!--
+title: "uWSGI monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md
+sidebar_label: "uWSGI"
+-->
+
+# uWSGI monitoring with Netdata
+
+Monitors performance metrics exposed by [`Stats Server`](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html).
+
+
+Following charts are drawn:
+
+1. **Requests**
+
+ - requests per second
+ - transmitted data
+ - average request time
+
+2. **Memory**
+
+ - rss
+ - vsz
+
+3. **Exceptions**
+4. **Harakiris**
+5. **Respawns**
+
+## Configuration
+
+Edit the `python.d/uwsgi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/uwsgi.conf
+```
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
new file mode 100644
index 0000000..e4d9000
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: uwsgi netdata python.d module
+# Author: Robbert Segeren (robbert-ef)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+ 'exceptions',
+ 'harakiri',
+ 'respawn',
+]
+
+DYNAMIC_CHARTS = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+]
+
+# NOTE: lines are created dynamically in `check()` method
+CHARTS = {
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
+ 'lines': [
+ ['requests', 'requests', 'incremental']
+ ]
+ },
+ 'tx': {
+ 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'lines': [
+ ['tx', 'tx', 'incremental']
+ ]
+ },
+ 'avg_rt': {
+ 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'lines': [
+ ['avg_rt', 'avg_rt', 'absolute']
+ ]
+ },
+ 'memory_rss': {
+ 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'lines': [
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'memory_vsz': {
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'lines': [
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'exceptions': {
+ 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
+ 'lines': [
+ ['exceptions', 'exceptions', 'incremental']
+ ]
+ },
+ 'harakiri': {
+ 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
+ 'lines': [
+ ['harakiri_count', 'harakiris', 'incremental']
+ ]
+ },
+ 'respawn': {
+ 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
+ 'lines': [
+ ['respawn_count', 'respawns', 'incremental']
+ ]
+ },
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
+ # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
+ for chart in DYNAMIC_CHARTS:
+ self.definitions[chart]['lines'] = []
+ self.last_result = {}
+ self.workers = []
+
+ def read_data(self):
+ """
+ Read data from socket and parse as JSON.
+ :return: (dict) stats
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ try:
+ return json.loads(raw_data)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def check(self):
+ """
+ Parse configuration and check if we can read data.
+ :return: boolean
+ """
+ self._parse_config()
+ return bool(self.read_data())
+
+ def add_worker_dimensions(self, key):
+ """
+ Helper to add dimensions for a worker.
+ :param key: (int or str) worker identifier
+ :return:
+ """
+ for chart in DYNAMIC_CHARTS:
+ for line in CHARTS[chart]['lines']:
+ dimension_id = '{}_{}'.format(line[0], key)
+ dimension_name = str(key)
+
+ dimension = [dimension_id, dimension_name] + line[2:]
+ self.charts[chart].add_dimension(dimension)
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ """
+ Read data from socket
+ :return: dict
+ """
+ stats = self.read_data()
+ if not stats:
+ return None
+
+ result = {
+ 'exceptions': 0,
+ 'harakiri_count': 0,
+ 'respawn_count': 0,
+ }
+
+ for worker in stats['workers']:
+ key = worker['pid']
+
+ # Add dimensions for new workers
+ if key not in self.workers:
+ self.add_worker_dimensions(key)
+ self.workers.append(key)
+
+ result['requests_{}'.format(key)] = worker['requests']
+ result['tx_{}'.format(key)] = worker['tx']
+ result['avg_rt_{}'.format(key)] = worker['avg_rt']
+
+ # avg_rt is not reset by uwsgi, so reset here
+ if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
+ result['avg_rt_{}'.format(key)] = 0
+
+ result['memory_rss_{}'.format(key)] = worker['rss']
+ result['memory_vsz_{}'.format(key)] = worker['vsz']
+
+ result['exceptions'] += worker['exceptions']
+ result['harakiri_count'] += worker['harakiri_count']
+ result['respawn_count'] += worker['respawn_count']
+
+ self.last_result = result
+ return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
new file mode 100644
index 0000000..7d09e73
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -0,0 +1,92 @@
+# netdata python.d.plugin configuration for uwsgi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, uwsgi also supports the following:
+#
+# socket: 'path/to/uwsgistats.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 0000000..2469b05
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 0000000..cb29738
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,65 @@
+<!--
+title: "Varnish Cache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md
+sidebar_label: "Varnish Cache"
+-->
+
+# Varnish Cache monitoring with Netdata
+
+Provides HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics using `varnishstat` tool.
+
+Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
+
+## Requirements
+
+- `netdata` user must be a member of the `varnish` group
+
+## Charts
+
+This module produces the following charts:
+
+- Connections Statistics in `connections/s`
+- Client Requests in `requests/s`
+- All History Hit Rate Ratio in `percent`
+- Current Poll Hit Rate Ratio in `percent`
+- Expired Objects in `expired/s`
+- Least Recently Used Nuked Objects in `nuked/s`
+- Number Of Threads In All Pools in `pools`
+- Threads Statistics in `threads/s`
+- Current Queue Length in `requests`
+- Backend Connections Statistics in `connections/s`
+- Requests To The Backend in `requests/s`
+- ESI Statistics in `problems/s`
+- Memory Usage in `MiB`
+- Uptime in `seconds`
+
+For every backend (VBE):
+
+- Backend Response Statistics in `kilobits/s`
+
+For every storage (SMF, SMA, or MSE):
+
+- Storage Usage in `KiB`
+- Storage Allocated Objects
+
+## Configuration
+
+Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/varnish.conf
+```
+
+Only one parameter is supported:
+
+```yaml
+instance_name: 'name'
+```
+
+The name of the `varnishd` instance to get logs from. If not specified, the host name is used.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
new file mode 100644
index 0000000..534d709
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
+
+CHARTS = {
+ 'session_connections': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'client metrics', 'varnish.session_connection', 'line'],
+ 'lines': [
+ ['sess_conn', 'accepted', 'incremental'],
+ ['sess_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'client_requests': {
+ 'options': [None, 'Client Requests', 'requests/s',
+ 'client metrics', 'varnish.client_requests', 'line'],
+ 'lines': [
+ ['client_req', 'received', 'incremental']
+ ]
+ },
+ 'all_time_hit_rate': {
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.all_time_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-absolute-row'],
+ ['cache_miss', 'miss', 'percentage-of-absolute-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
+ },
+ 'current_poll_hit_rate': {
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.current_poll_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-incremental-row'],
+ ['cache_miss', 'miss', 'percentage-of-incremental-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
+ ]
+ },
+ 'cached_objects_expired': {
+ 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
+ 'varnish.cached_objects_expired', 'line'],
+ 'lines': [
+ ['n_expired', 'objects', 'incremental']
+ ]
+ },
+ 'cached_objects_nuked': {
+ 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
+ 'varnish.cached_objects_nuked', 'line'],
+ 'lines': [
+ ['n_lru_nuked', 'objects', 'incremental']
+ ]
+ },
+ 'threads_total': {
+ 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
+ 'varnish.threads_total', 'line'],
+ 'lines': [
+ ['threads', None, 'absolute']
+ ]
+ },
+ 'threads_statistics': {
+ 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
+ 'varnish.threads_statistics', 'line'],
+ 'lines': [
+ ['threads_created', 'created', 'incremental'],
+ ['threads_failed', 'failed', 'incremental'],
+ ['threads_limited', 'limited', 'incremental']
+ ]
+ },
+ 'threads_queue_len': {
+ 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
+ 'varnish.threads_queue_len', 'line'],
+ 'lines': [
+ ['thread_queue_len', 'in queue']
+ ]
+ },
+ 'backend_connections': {
+ 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
+ 'varnish.backend_connections', 'line'],
+ 'lines': [
+ ['backend_conn', 'successful', 'incremental'],
+ ['backend_unhealthy', 'unhealthy', 'incremental'],
+ ['backend_reuse', 'reused', 'incremental'],
+ ['backend_toolate', 'closed', 'incremental'],
+ ['backend_recycle', 'recycled', 'incremental'],
+ ['backend_fail', 'failed', 'incremental']
+ ]
+ },
+ 'backend_requests': {
+ 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
+ 'varnish.backend_requests', 'line'],
+ 'lines': [
+ ['backend_req', 'sent', 'incremental']
+ ]
+ },
+ 'esi_statistics': {
+ 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
+ 'lines': [
+ ['esi_errors', 'errors', 'incremental'],
+ ['esi_warnings', 'warnings', 'incremental']
+ ]
+ },
+ 'memory_usage': {
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'lines': [
+ ['memory_free', 'free', 'absolute', 1, 1 << 20],
+ ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
+ },
+ 'uptime': {
+ 'lines': [
+ ['uptime', None, 'absolute']
+ ],
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
+ }
+}
+
+
+def backend_charts_template(name):
+ order = [
+ '{0}_response_statistics'.format(name),
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
+ ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def storage_charts_template(name):
+ order = [
+ 'storage_{0}_usage'.format(name),
+ 'storage_{0}_alloc_objs'.format(name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
+ 'lines': [
+ ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
+ ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
+ 'lines': [
+ ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+VARNISHSTAT = 'varnishstat'
+
+re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
+
+
+class VarnishVersion:
+ def __init__(self, major, minor, patch):
+ self.major = major
+ self.minor = minor
+ self.patch = patch
+
+ def __str__(self):
+ return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
+
+
+class Parser:
+ _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
+ _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)')
+ _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+
+ def __init__(self):
+ self.re_default = None
+ self.re_backend = None
+
+ def init(self, data):
+ data = ''.join(data)
+ parsed_main = Parser._default.findall(data)
+ if parsed_main:
+ self.re_default = Parser._default
+
+ parsed_backend = Parser._backend_new.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_new
+ else:
+ parsed_backend = Parser._backend_old.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_old
+
+ def server_stats(self, data):
+ return self.re_default.findall(''.join(data))
+
+ def backend_stats(self, data):
+ return self.re_backend.findall(''.join(data))
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.instance_name = configuration.get('instance_name')
+ self.parser = Parser()
+ self.command = None
+ self.collected_vbe = set()
+ self.collected_storages = set()
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ command = [varnishstat, '-V']
+ reply = self._get_raw_data(stderr=True, command=command)
+ if not reply:
+ self.error(
+ "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ ver = parse_varnish_version(reply)
+ if not ver:
+ self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
+ ' '.join(command), re_version.pattern, reply))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name]
+ else:
+ self.command = [varnishstat, '-1']
+
+ if ver.major > 4:
+ self.command.extend(['-t', '1'])
+
+ self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
+
+ return True
+
+ def check(self):
+ if not self.create_command():
+ return False
+
+ # STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ self.parser.init(reply)
+
+ # Output is parsable
+ if not self.parser.re_default:
+ self.error('cant parse the output...')
+ return False
+
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data = dict()
+ server_stats = self.parser.server_stats(raw)
+ if not server_stats:
+ return None
+
+ stats = dict((param, value) for _, param, value in server_stats)
+ data.update(stats)
+
+ self.get_vbe_backends(data, raw)
+ self.get_storages(server_stats)
+
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
+
+ return data
+
+ def get_vbe_backends(self, data, raw):
+ if not self.parser.re_backend:
+ return
+ stats = self.parser.backend_stats(raw)
+ if not stats:
+ return
+
+ for (name, param, value) in stats:
+ data['_'.join([name, param])] = value
+ if name in self.collected_vbe:
+ continue
+ self.collected_vbe.add(name)
+ self.add_backend_charts(name)
+
+ def get_storages(self, server_stats):
+ # Storage types:
+ # - SMF: File Storage
+ # - SMA: Malloc Storage
+ # - MSE: Massive Storage Engine (Varnish-Plus only)
+ #
+ # Stats example:
+ # [('SMF.', 'ssdStorage.c_req', '47686'),
+ # ('SMF.', 'ssdStorage.c_fail', '0'),
+ # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
+ # ('SMF.', 'ssdStorage.c_freed', '140980224'),
+ # ('SMF.', 'ssdStorage.g_alloc', '39753'),
+ # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
+ # ('SMF.', 'ssdStorage.g_space', '53159968768'),
+ # ('SMF.', 'ssdStorage.g_smf', '40130'),
+ # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
+ # ('SMF.', 'ssdStorage.g_smf_large', '66')]
+ storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
+ if not storages:
+ return
+ for storage in storages:
+ storage = storage.split('.')[0]
+ if storage in self.collected_storages:
+ continue
+ self.collected_storages.add(storage)
+ self.add_storage_charts(storage)
+
+ def add_backend_charts(self, backend_name):
+ self.add_charts(backend_name, backend_charts_template)
+
+ def add_storage_charts(self, storage_name):
+ self.add_charts(storage_name, storage_charts_template)
+
+ def add_charts(self, name, charts_template):
+ order, charts = charts_template(name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+
+def parse_varnish_version(lines):
+ m = re_version.search(lines[0])
+ if not m:
+ return None
+
+ m = m.groupdict()
+ return VarnishVersion(
+ int(m['major']),
+ int(m['minor']),
+ int(m['patch']),
+ )
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
new file mode 100644
index 0000000..54bfe4d
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for varnish
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
new file mode 100644
index 0000000..bddf146
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += w1sensor/w1sensor.chart.py
+dist_pythonconfig_DATA += w1sensor/w1sensor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 100644
index 0000000..31facef
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1,28 @@
+<!--
+title: "1-Wire Sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md
+sidebar_label: "1-Wire sensors"
+-->
+
+# 1-Wire Sensors monitoring with Netdata
+
+Monitors sensor temperature.
+
+On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
+Currently temperature sensors are supported and automatically detected.
+
+Charts are created dynamically based on the number of detected sensors.
+
+## Configuration
+
+Edit the `python.d/w1sensor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/w1sensor.conf
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
new file mode 100644
index 0000000..c4f847b
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Description: 1-wire temperature monitor netdata python.d module
+# Author: Diomidis Spinellis <http://www.spinellis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+# Location where 1-Wire devices can be found
+W1_DIR = '/sys/bus/w1/devices/'
+
+# Lines matching the following regular expression contain a temperature value
+RE_TEMP = re.compile(r' t=(\d+)')
+
+ORDER = [
+ 'temp',
+]
+
+CHARTS = {
+ 'temp': {
+ 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
+ 'lines': []
+ }
+}
+
+# Known and supported family members
+# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
+THERM_FAMILY = {
+ '10': 'W1_THERM_DS18S20',
+ '22': 'W1_THERM_DS1822',
+ '28': 'W1_THERM_DS18B20',
+ '3b': 'W1_THERM_DS1825',
+ '42': 'W1_THERM_DS28EA00',
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for 1-Wire sensors"""
+
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.probes = []
+
+ def check(self):
+ """Auto-detect available 1-Wire sensors, setting line definitions
+ and probes to be monitored."""
+ try:
+ file_names = os.listdir(W1_DIR)
+ except OSError as err:
+ self.error(err)
+ return False
+
+ lines = []
+ for file_name in file_names:
+ if file_name[2] != '-':
+ continue
+ if not file_name[0:2] in THERM_FAMILY:
+ continue
+
+ self.probes.append(file_name)
+ identifier = file_name[3:]
+ name = identifier
+ config_name = self.configuration.get('name_' + identifier)
+ if config_name:
+ name = config_name
+ lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
+ 1, 10])
+ self.definitions['temp']['lines'] = lines
+ return len(self.probes) > 0
+
+ def get_data(self):
+ """Return data read from sensors."""
+ data = dict()
+
+ for file_name in self.probes:
+ file_path = W1_DIR + file_name + '/w1_slave'
+ identifier = file_name[3:]
+ try:
+ with open(file_path, 'r') as device_file:
+ for line in device_file:
+ matched = RE_TEMP.search(line)
+ if matched:
+ # Round to one decimal digit to filter-out noise
+ value = round(int(matched.group(1)) / 1000., 1)
+ value = int(value * 10)
+ data['w1sensor_temp_' + identifier] = value
+ except (OSError, IOError) as err:
+ self.error(err)
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
new file mode 100644
index 0000000..1727100
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for w1sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# name_<1-Wire id>: '<human readable name>'
+# This allows associating a human readable name with a sensor's 1-Wire
+# identifier. Example:
+# name_00000022276e: 'Machine room'
+# name_00000022298f: 'Rack 12'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc
new file mode 100644
index 0000000..8931159
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += web_log/web_log.chart.py
+dist_pythonconfig_DATA += web_log/web_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += web_log/README.md web_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
new file mode 100644
index 0000000..2cf60ed
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -0,0 +1,219 @@
+<!--
+title: "Web server log (Apache, NGINX, Squid) monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/web_log/README.md
+sidebar_label: "Web server logs (Apache, NGINX, Squid)"
+-->
+
+# Web server log (Apache, NGINX, Squid) monitoring with Netdata
+
+Tails access log file and Collects web server/caching proxy metrics.
+
+## Motivation
+
+Web server log files exist for more than 20 years. All web servers of all kinds, from all vendors, [since the time NCSA httpd was powering the web](https://en.wikipedia.org/wiki/NCSA_HTTPd), produce log files, saving in real-time all accesses to web sites and APIs.
+
+Yet, after the appearance of google analytics and similar services, and the recent rise of APM (Application Performance Monitoring) with sophisticated time-series databases that collect and analyze metrics at the application level, all these web server log files are mostly just filling our disks, rotated every night without any use whatsoever.
+
+Netdata turns this "useless" log file, into a powerful performance and health monitoring tool, capable of detecting, **in real-time**, most common web server problems, such as:
+
+- too many redirects (i.e. **oops!** *this should not redirect clients to itself*)
+- too many bad requests (i.e. **oops!** *a few files were not uploaded*)
+- too many internal server errors (i.e. **oops!** *this release crashes too much*)
+- unreasonably too many requests (i.e. **oops!** *we are under attack*)
+- unreasonably few requests (i.e. **oops!** *call the network guys*)
+- unreasonably slow responses (i.e. **oops!** *the database is slow again*)
+- too few successful responses (i.e. **oops!** *help us God!*)
+
+## Usage
+
+If Netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
+*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
+
+> [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under [`/etc/netdata/python.d/`](../).
+
+## Configuration
+
+Edit the `python.d/web_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/web_log.conf
+```
+
+[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at `/etc/netdata/python.d/web_log.conf`, like this:
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+The module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+You can add one such section for each of your web server log files.
+
+> **Important**<br/>Keep in mind [**netdata**](https://my-netdata.io/) runs as user `netdata`. So, make sure user `netdata` has access to the logs directory and can read the log file.
+
+## Charts
+
+Once you have all log files configured and [**netdata**](https://my-netdata.io/) restarted, **for each log file** you will get a section at the [**netdata**](https://my-netdata.io/) dashboard, with the following charts.
+
+### Responses by status
+
+In this chart we tried to provide a meaningful status for all responses. So:
+
+- `success` counts all the valid responses (i.e. `1xx` informational, `2xx` successful and `304` not modified).
+- `error` are `5xx` internal server errors. These are very bad, they mean your web site or API is facing difficulties.
+- `redirect` are `3xx` responses, except `304`. All `3xx` are redirects, but `304` means "not modified" - it tells the browsers the content they already have is still valid and can be used as-is. So, we decided to account it as a successful response.
+- `bad` are bad requests that cannot be served.
+- `other` as all the other, non-standard, types of responses.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902194/ea0affc6-f23c-11e6-85f1-a4951dd4bb40.png)
+
+### Responses by type
+
+Then, we group all responses by code family, without interpreting their meaning.
+**Response by type** requests/s
+
+- success (1xx, 2xx, 304)
+- error (5xx)
+- redirect (3xx except 304)
+- bad (4xx)
+- other (all other responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22901883/dea7d33a-f23b-11e6-960d-00a913b58936.png)
+
+### Responses by code family
+
+Here we show all the response codes in detail.
+
+**Response by code family** requests/s
+
+- 1xx (informational)
+- 2xx (successful)
+- 3xx (redirect)
+- 4xx (bad)
+- 5xx (internal server errors)
+- other (non-standart responses)
+- unmatched (the lines in the log file that are not matched)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22901965/1a5d84ba-f23c-11e6-9d38-3deebcc8b879.png)
+
+> **Important**<br/>If your application is using hundreds of non-standard response codes, your browser may become slow while viewing this chart, so we have added a configuration [option to disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L63).
+
+### Detailed Response Codes
+
+Number of responses for each response code family individually (requests/s)
+
+### Bandwidth
+
+This is a nice view of the traffic the web server is receiving and is sending.
+
+What is important to know for this chart, is that the bandwidth used for each request and response is accounted at the time the log is written. Since [**netdata**](https://my-netdata.io/) refreshes this chart every single second, you may have unrealistic spikes is the size of the requests or responses is too big. The reason is simple: a response may have needed 1 minute to be completed, but all the bandwidth used during that minute for the specific response will be accounted at the second the log line is written.
+
+As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](/collectors/tc.plugin/README.md#tcplugin)...
+
+**Bandwidth** KB/s
+
+- received (bandwidth of requests)
+- send (bandwidth of responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902266/245141d6-f23d-11e6-90f9-98729733e0da.png)
+
+> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
+
+### Timings
+
+[**netdata**](https://my-netdata.io/) will also render the `minimum`, `average` and `maximum` time the web server needed to respond to requests.
+
+Keep in mind most web servers timings start at the reception of the full request, until the dispatch of the last byte of the response. So, they include network latencies of responses, but they do not include network latencies of requests.
+
+**Timings** ms (request processing time)
+
+- min (bandwidth of requests)
+- max (bandwidth of responses)
+- average (bandwidth of responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902283/369e3f92-f23d-11e6-9359-53e5d4ecb18e.png)
+
+> **Important**<br/>Most web servers do not log timing information by default.<br/>So, [unless you have configured your web server to also log timings](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), this chart will not exist.
+
+### URL patterns
+
+This is a very interesting chart. It is configured entirely by you.
+
+[**netdata**](https://my-netdata.io/) can map the URLs found in the log file into categories. You can define these categories, by providing names and regular expressions in `web_log.conf`.
+
+So, this configuration:
+
+```yaml
+nginx_netdata: # name the charts
+ path: '/var/log/nginx/access.log' # web server log file
+ categories:
+ badges : '^/api/v1/badge\.svg'
+ charts : '^/api/v1/(data|chart|charts)'
+ registry : '^/api/v1/registry'
+ alarms : '^/api/v1/alarm'
+ allmetrics : '^/api/v1/allmetrics'
+ api_other : '^/api/'
+ netdata_conf: '^/netdata.conf'
+ api_old : '^/(data|datasource|graph|list|all\.json)'
+```
+
+Produces the following chart. The `categories` section is matched in the order given. So, pay attention to the order you give your patterns.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902302/4d25bf06-f23d-11e6-844d-18c0876bdc3d.png)
+
+### HTTP methods
+
+This chart breaks down requests by HTTP method used.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902323/5ee376d4-f23d-11e6-8457-157d3f438843.png)
+
+### IP versions
+
+This one provides requests per IP version used by the clients (`IPv4`, `IPv6`).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902370/7091a770-f23d-11e6-8cd2-74e9a67b1397.png)
+
+### Unique clients
+
+The last charts are about the unique IPs accessing your web server.
+
+**Current Poll Unique Client IPs** unique ips/s. This one counts the unique IPs for each data collection iteration (i.e. **unique clients per second**).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902384/835aa168-f23d-11e6-914f-cfc3f06eaff8.png)
+
+**All Time Unique Client IPs** unique ips/s. Counts the unique IPs, since the last [**netdata**](https://my-netdata.io/) restart.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902407/92dd27e6-f23d-11e6-900d-eede7bc08e64.png)
+
+> **Important**<br/>To provide this information `web_log` plugin keeps in memory all the IPs seen by the web server. Although this does not require so much memory, if you have a web server with several million unique client IPs, we suggest to [disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L64).
+
+## Alarms
+
+The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the following alarms] to all `web_log` charts (i.e. to all log files configured, individually):
+
+| alarm|description|minimum<br/>requests|warning|critical|
+|:----|-----------|:------------------:|:-----:|:------:|
+| `1m_redirects`|The ratio of HTTP redirects (3xx except 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is suffering from too many or circular redirects.*<br/> <br/>(i.e. **oops!** *this should not redirect clients to itself*)|120/min|> 20%|> 30%|
+| `1m_bad_requests`|The ratio of HTTP bad requests (4xx) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is receiving too many bad requests, including `404`, not found.*<br/> <br/>(i.e. **oops!** *a few files were not uploaded*)|120/min|> 30%|> 50%|
+| `1m_internal_errors`|The ratio of HTTP internal server errors (5xx), over all the requests, during the last minute.<br/> <br/>*Detects if the site is facing difficulties to serve requests.*<br/> <br/>(i.e. **oops!** *this release crashes too much*)|120/min|> 2%|> 5%|
+| `5m_requests_ratio`|The percentage of successful web requests of the last 5 minutes, compared with the previous 5 minutes.<br/> <br/>*Detects if the site or the web API is suddenly getting too many or too few requests.*<br/> <br/>(i.e. too many = **oops!** *we are under attack*)<br/>(i.e. too few = **oops!** *call the network guys*)|120/5min|> double or \< half|> 4x or \< 1/4x|
+| `web_slow`|The average time to respond to requests, over the last 1 minute, compared to the average of last 10 minutes.<br/> <br/>*Detects if the site or the web API is suddenly a lot slower.*<br/> <br/>(i.e. **oops!** *the database is slow again*)|120/min|> 2x|> 4x|
+| `1m_successful`|The ratio of successful HTTP responses (1xx, 2xx, 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is performing within limits.*<br/> <br/>(i.e. **oops!** *help us God!*)|120/min|\< 85%|\< 75%|
+
+The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
+
+Netdata alarms are user-configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
new file mode 100644
index 0000000..04ecade
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -0,0 +1,1194 @@
+# -*- coding: utf-8 -*-
+# Description: web log netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import bisect
+import os
+import re
+from collections import namedtuple, defaultdict
+from copy import deepcopy
+
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilter as filter
+ from itertools import ifilterfalse as filterfalse
+
+try:
+ from sys import maxint
+except ImportError:
+ from sys import maxsize as maxint
+
+from bases.collection import read_last_line
+from bases.FrameworkServices.LogService import LogService
+
+ORDER_APACHE_CACHE = [
+ 'apache_cache',
+]
+
+ORDER_WEB = [
+ 'response_statuses',
+ 'response_codes',
+ 'bandwidth',
+ 'response_time',
+ 'response_time_hist',
+ 'response_time_upstream',
+ 'response_time_upstream_hist',
+ 'requests_per_url',
+ 'requests_per_user_defined',
+ 'http_method',
+ 'vhost',
+ 'port',
+ 'http_version',
+ 'requests_per_ipproto',
+ 'clients',
+ 'clients_all'
+]
+
+ORDER_SQUID = [
+ 'squid_response_statuses',
+ 'squid_response_codes',
+ 'squid_detailed_response_codes',
+ 'squid_method',
+ 'squid_mime_type',
+ 'squid_hier_code',
+ 'squid_transport_methods',
+ 'squid_transport_errors',
+ 'squid_code',
+ 'squid_handling_opts',
+ 'squid_object_types',
+ 'squid_cache_events',
+ 'squid_bytes',
+ 'squid_duration',
+ 'squid_clients',
+ 'squid_clients_all'
+]
+
+CHARTS_WEB = {
+ 'response_codes': {
+ 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', 'other', 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'lines': [
+ ['resp_length', 'received', 'incremental', 8, 1000],
+ ['bytes_sent', 'sent', 'incremental', -8, 1000]
+ ]
+ },
+ 'response_time': {
+ 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
+ 'lines': [
+ ['resp_time_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
+ 'lines': []
+ },
+ 'response_time_upstream': {
+ 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
+ 'web_log.response_time_upstream', 'area'],
+ 'lines': [
+ ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_upstream_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
+ 'web_log.response_time_upstream_hist', 'line'],
+ 'lines': []
+ },
+ 'clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
+ 'lines': [
+ ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
+ ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
+ ]
+ },
+ 'http_method': {
+ 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
+ 'lines': [
+ ['GET', 'GET', 'incremental', 1, 1]
+ ]
+ },
+ 'http_version': {
+ 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
+ 'web_log.http_version', 'stacked'],
+ 'lines': []
+ },
+ 'requests_per_ipproto': {
+ 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
+ 'stacked'],
+ 'lines': [
+ ['req_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['req_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'response_statuses': {
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_url': {
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
+ 'lines': [
+ ['url_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_user_defined': {
+ 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
+ 'web_log.requests_per_user_defined', 'stacked'],
+ 'lines': [
+ ['user_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'port': {
+ 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
+ 'lines': [
+ ['port_80', 'http', 'incremental', 1, 1],
+ ['port_443', 'https', 'incremental', 1, 1]
+ ]
+ },
+ 'vhost': {
+ 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
+ 'lines': []
+ }
+}
+
+CHARTS_APACHE_CACHE = {
+ 'apache_cache': {
+ 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
+ 'stacked'],
+ 'lines': [
+ ['hit', 'cache', 'percentage-of-absolute-row'],
+ ['miss', None, 'percentage-of-absolute-row'],
+ ['other', None, 'percentage-of-absolute-row']
+ ]
+ }
+}
+
+CHARTS_SQUID = {
+ 'squid_duration': {
+ 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
+ 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
+ 'lines': [
+ ['duration_min', 'min', 'incremental', 1, 1000],
+ ['duration_max', 'max', 'incremental', 1, 1000],
+ ['duration_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'squid_bytes': {
+ 'options': [None, 'Amount Of Data Delivered To The Clients',
+ 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
+ 'lines': [
+ ['bytes', 'sent', 'incremental', 8, 1000]
+ ]
+ },
+ 'squid_response_statuses': {
+ 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'squid_response_codes': {
+ 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
+ 'web_log.squid_response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', None, 'incremental'],
+ ['other', None, 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'squid_code': {
+ 'options': [None, 'Responses Per Cache Result Of The Request',
+ 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_detailed_response_codes': {
+ 'options': [None, 'Detailed Response Codes',
+ 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
+ 'lines': []
+ },
+ 'squid_hier_code': {
+ 'options': [None, 'Responses Per Hierarchy Code',
+ 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_method': {
+ 'options': [None, 'Requests Per Method',
+ 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
+ 'lines': []
+ },
+ 'squid_mime_type': {
+ 'options': [None, 'Requests Per MIME Type',
+ 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
+ 'lines': []
+ },
+ 'squid_clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients', 'stacked'],
+ 'lines': [
+ ['unique_ipv4', 'ipv4', 'incremental'],
+ ['unique_ipv6', 'ipv6', 'incremental']
+ ]
+ },
+ 'squid_clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute'],
+ ['unique_tot_ipv6', 'ipv6', 'absolute']
+ ]
+ },
+ 'squid_transport_methods': {
+ 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_methods', 'stacked'],
+ 'lines': []
+ },
+ 'squid_transport_errors': {
+ 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_errors', 'stacked'],
+ 'lines': []
+ },
+ 'squid_handling_opts': {
+ 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
+ 'web_log.squid_handling_opts', 'stacked'],
+ 'lines': []
+ },
+ 'squid_object_types': {
+ 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
+ 'web_log.squid_object_types', 'stacked'],
+ 'lines': []
+ },
+ 'squid_cache_events': {
+ 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
+ 'web_log.squid_cache_events', 'stacked'],
+ 'lines': []
+ }
+}
+
+NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
+
+DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+
+SQUID_CODES = {
+ 'TCP': 'squid_transport_methods',
+ 'UDP': 'squid_transport_methods',
+ 'NONE': 'squid_transport_methods',
+ 'CLIENT': 'squid_handling_opts',
+ 'IMS': 'squid_handling_opts',
+ 'ASYNC': 'squid_handling_opts',
+ 'SWAPFAIL': 'squid_handling_opts',
+ 'REFRESH': 'squid_handling_opts',
+ 'SHARED': 'squid_handling_opts',
+ 'REPLY': 'squid_handling_opts',
+ 'NEGATIVE': 'squid_object_types',
+ 'STALE': 'squid_object_types',
+ 'OFFLINE': 'squid_object_types',
+ 'INVALID': 'squid_object_types',
+ 'FAIL': 'squid_object_types',
+ 'MODIFIED': 'squid_object_types',
+ 'UNMODIFIED': 'squid_object_types',
+ 'REDIRECT': 'squid_object_types',
+ 'HIT': 'squid_cache_events',
+ 'MEM': 'squid_cache_events',
+ 'MISS': 'squid_cache_events',
+ 'DENIED': 'squid_cache_events',
+ 'NOFETCH': 'squid_cache_events',
+ 'TUNNEL': 'squid_cache_events',
+ 'ABORTED': 'squid_transport_errors',
+ 'TIMEOUT': 'squid_transport_errors'
+}
+
+REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
+
+MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ """
+ :param configuration:
+ :param name:
+ """
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.log_path = self.configuration.get('path')
+ self.job = None
+
+ def check(self):
+ """
+ :return: bool
+
+ 1. "log_path" is specified in the module configuration file
+ 2. "log_path" must be readable by netdata user and must exist
+ 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
+ 4. other checks depends on log "type"
+ """
+
+ log_type = self.configuration.get('type', 'web')
+ log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
+
+ if log_type not in log_types:
+ self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
+ types=log_types.keys()))
+ return False
+
+ if not self.log_path:
+ self.error('log path is not specified')
+ return False
+
+ if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
+ self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
+ return False
+
+ if not os.path.getsize(self.log_path):
+ self.error('{log_file} is empty'.format(log_file=self.log_path))
+ return False
+
+ self.job = log_types[log_type](self)
+ if self.job.check():
+ self.order = self.job.order
+ self.definitions = self.job.definitions
+ return True
+ return False
+
+ def _get_data(self):
+ return self.job.get_data(self._get_raw_data())
+
+
+class Web:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_WEB[:]
+ self.definitions = deepcopy(CHARTS_WEB)
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'bytes_sent': 0,
+ 'resp_length': 0,
+ 'resp_time_min': 0,
+ 'resp_time_max': 0,
+ 'resp_time_avg': 0,
+ 'resp_time_upstream_min': 0,
+ 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0,
+ 'unique_cur_ipv4': 0,
+ 'unique_cur_ipv6': 0,
+ '2xx': 0,
+ '5xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '1xx': 0,
+ '0xx': 0,
+ 'unmatched': 0,
+ 'req_ipv4': 0,
+ 'req_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ 'GET': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ # Custom_log_format or predefined log format.
+ if self.configuration.get('custom_log_format'):
+ match_dict, error = self.find_regex_custom(last_line)
+ else:
+ match_dict, error = self.find_regex(last_line)
+
+ # "match_dict" is None if there are any problems
+ if match_dict is None:
+ self.error(error)
+ return False
+
+ self.storage['unique_all_time'] = list()
+ self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
+ self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
+
+ self.create_web_charts(match_dict) # Create charts
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
+
+ def create_web_charts(self, match_dict):
+ """
+ :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
+ :return:
+ Create/remove additional charts depending on the 'match_dict' keys and configuration file options
+ """
+ if 'resp_time' not in match_dict:
+ self.order.remove('response_time')
+ self.order.remove('response_time_hist')
+ if 'resp_time_upstream' not in match_dict:
+ self.order.remove('response_time_upstream')
+ self.order.remove('response_time_upstream_hist')
+
+ # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
+ histogram = self.configuration.get('histogram', None)
+ if isinstance(histogram, list):
+ self.storage['bucket_index'] = histogram[:]
+ self.storage['bucket_index'].append(maxint)
+ self.storage['buckets'] = [0] * (len(histogram) + 1)
+ self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
+ hist_lines = self.definitions['response_time_hist']['lines']
+ upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
+ for i, le in enumerate(histogram):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
+ upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
+
+ hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ elif histogram is not None:
+ self.error('expect histogram list, but was {0}'.format(type(histogram)))
+
+ if not self.configuration.get('all_time', True):
+ self.order.remove('clients_all')
+
+ # Add 'detailed_response_codes' chart if specified in the configuration
+ if self.configuration.get('detailed_response_codes', True):
+ if self.configuration.get('detailed_response_aggregate', True):
+ codes = DET_RESP_AGGR[:1]
+ else:
+ codes = DET_RESP_AGGR[1:]
+
+ for code in codes:
+ self.order.append('detailed_response_codes%s' % code)
+ self.definitions['detailed_response_codes%s' % code] = {
+ 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []
+ }
+
+ # Add 'requests_per_url' chart if specified in the configuration
+ if self.storage['url_pattern']:
+ for elem in self.storage['url_pattern']:
+ dim = [elem.description, elem.description[12:], 'incremental']
+ self.definitions['requests_per_url']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['url_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_url')
+
+ # Add 'requests_per_user_defined' chart if specified in the configuration
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ for elem in self.storage['user_pattern']:
+ dim = [elem.description, elem.description[13:], 'incremental']
+ self.definitions['requests_per_user_defined']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['user_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_user_defined')
+
+ def get_data(self, raw_data=None):
+ """
+ Parses new log lines
+ :return: dict OR None
+ None if _get_raw_data method fails.
+ In all other cases - dict.
+ """
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_current = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for line in filtered_data:
+ match = self.storage['regex'].search(line)
+ if match:
+ match_dict = match.groupdict()
+ try:
+ code = match_dict['code'][0] + 'xx'
+ self.data[code] += 1
+ except KeyError:
+ self.data['0xx'] += 1
+ # detailed response code
+ if self.configuration.get('detailed_response_codes', True):
+ self.get_data_per_response_codes_detailed(code=match_dict['code'])
+ # response statuses
+ self.get_data_per_statuses(code=match_dict['code'])
+ # requests per user defined pattern
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ self.get_data_per_pattern(row=match_dict['user_defined'],
+ other='user_pattern_other',
+ pattern=self.storage['user_pattern'])
+ # method, url, http version
+ self.get_data_from_request_field(match_dict=match_dict)
+ # bandwidth sent
+ bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
+ self.data['bytes_sent'] += int(bytes_sent)
+ # request processing time and bandwidth received
+ if 'resp_length' in match_dict:
+ resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
+ self.data['resp_length'] += int(resp_length)
+ if 'resp_time' in match_dict:
+ resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
+ get_timings(timings=timings['resp_time'], time=resp_time)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
+ if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
+ resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
+ get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
+ # requests per ip proto
+ proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
+ self.data['req_' + proto] += 1
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+ if match_dict['address'] not in unique_current:
+ self.data['unique_cur_' + proto] += 1
+ unique_current.add(match_dict['address'])
+ else:
+ self.data['unmatched'] += 1
+
+ # timings
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+
+ # histogram
+ if 'bucket_index' in self.storage:
+ buckets = self.storage['buckets']
+ upstream_buckets = self.storage['upstream_buckets']
+ for i in range(0, len(self.storage['bucket_index'])):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ self.data[hist_key] = buckets[i]
+ self.data[upstream_hist_key] = upstream_buckets[i]
+
+ return self.data
+
+ def find_regex(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
+ """
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+) ')
+
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)'
+ r' (?P<resp_time_upstream>[\d.-]+)')
+
+ nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [apache_ext_insert, apache_ext_append,
+ nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
+ default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, func in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.storage['regex'] = regex
+ self.storage['func_resp_time'] = func
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
+
+ def find_regex_custom(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
+ """
+ if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, (int, float)):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+
+ match = regex.search(last_line)
+ if not match:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
+
+ match_dict = match.groupdict() or None
+ if match_dict is None:
+ return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ mandatory_dict = {'address': r'[\w.:-]+',
+ 'code': r'[1-9]\d{2}',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+|-',
+ 'resp_time': r'[\d.]+',
+ 'resp_time_upstream': r'[\d.-]+',
+ 'method': r'[A-Z]+',
+ 'http_version': r'\d(?:.\d)?'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
+
+ self.storage['regex'] = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def get_data_from_request_field(self, match_dict):
+ if match_dict.get('request'):
+ match_dict = REQUEST_REGEX.search(match_dict['request'])
+ if match_dict:
+ match_dict = match_dict.groupdict()
+ else:
+ return
+ # requests per url
+ if match_dict.get('url') and self.storage['url_pattern']:
+ self.get_data_per_pattern(row=match_dict['url'],
+ other='url_pattern_other',
+ pattern=self.storage['url_pattern'])
+ # requests per http method
+ if match_dict.get('method'):
+ if match_dict['method'] not in self.data:
+ self.charts['http_method'].add_dimension([match_dict['method'],
+ match_dict['method'],
+ 'incremental'])
+ self.data[match_dict['method']] = 0
+ self.data[match_dict['method']] += 1
+ # requests per http version
+ if match_dict.get('http_version'):
+ dim_id = match_dict['http_version'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['http_version'].add_dimension([dim_id,
+ match_dict['http_version'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+ # requests per port number
+ if match_dict.get('port'):
+ if match_dict['port'] not in self.data:
+ self.charts['port'].add_dimension([match_dict['port'],
+ match_dict['port'],
+ 'incremental'])
+ self.data[match_dict['port']] = 0
+ self.data[match_dict['port']] += 1
+ # requests per vhost
+ if match_dict.get('vhost'):
+ dim_id = match_dict['vhost'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['vhost'].add_dimension([dim_id,
+ match_dict['vhost'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+
+ def get_data_per_response_codes_detailed(self, code):
+ """
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ if code not in self.data:
+ if self.configuration.get('detailed_response_aggregate', True):
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.charts[chart_key].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ def get_data_per_pattern(self, row, other, pattern):
+ """
+ :param row: str:
+ :param other: str:
+ :param pattern: named tuple: (['pattern_description', 'regular expression'])
+ :return:
+ Scan through string looking for the first location where patterns produce a match for all user
+ defined patterns
+ """
+ match = None
+ for elem in pattern:
+ if elem.func(row):
+ self.data[elem.description] += 1
+ match = True
+ break
+ if not match:
+ self.data[other] += 1
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '401':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+
+class ApacheCache:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_APACHE_CACHE
+ self.definitions = CHARTS_APACHE_CACHE
+
+ @staticmethod
+ def check():
+ return True
+
+ @staticmethod
+ def get_data(raw_data=None):
+ data = dict(hit=0, miss=0, other=0)
+ if not raw_data:
+ return None if raw_data is None else data
+
+ for line in raw_data:
+ if 'cache hit' in line:
+ data['hit'] += 1
+ elif 'cache miss' in line:
+ data['miss'] += 1
+ else:
+ data['other'] += 1
+ return data
+
+
+class Squid:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_SQUID
+ self.definitions = CHARTS_SQUID
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'duration_max': 0,
+ 'duration_avg': 0,
+ 'duration_min': 0,
+ 'bytes': 0,
+ '0xx': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'unmatched': 0,
+ 'unique_ipv4': 0,
+ 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
+ r' (?P<client_address>[\da-f.:]+)'
+ r' (?P<squid_code>[A-Z_]+)/'
+ r'(?P<http_code>[0-9]+)'
+ r' (?P<bytes>[0-9]+)'
+ r' (?P<method>[A-Z_]+)'
+ r' (?P<url>[^ ]+)'
+ r' (?P<user>[^ ]+)'
+ r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
+ r' (?P<mime_type>[A-Za-z-]*)')
+
+ match = self.storage['regex'].search(last_line)
+ if not match:
+ self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
+ return False
+ self.storage['dynamic'] = {
+ 'http_code': {
+ 'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'hier_code': {
+ 'chart': 'squid_hier_code',
+ 'func_dim_id': None,
+ 'func_dim': lambda v: v.replace('HIER_', '')
+ },
+ 'method': {
+ 'chart': 'squid_method',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'mime_type': {
+ 'chart': 'squid_mime_type',
+ 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
+ 'func_dim': None
+ }
+ }
+ if not self.configuration.get('all_time', True):
+ self.order.remove('squid_clients_all')
+ return True
+
+ def get_data(self, raw_data=None):
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_ip = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for row in filtered_data:
+ match = self.storage['regex'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['duration'] != '0':
+ get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
+ try:
+ self.data[match['http_code'][0] + 'xx'] += 1
+ except KeyError:
+ self.data['other'] += 1
+
+ self.get_data_per_statuses(match['http_code'])
+
+ self.get_data_per_squid_code(match['squid_code'])
+
+ self.data['bytes'] += int(match['bytes'])
+
+ proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+
+ if match['client_address'] not in unique_ip:
+ self.data['unique_' + proto] += 1
+ unique_ip.add(match['client_address'])
+
+ for key, values in self.storage['dynamic'].items():
+ if match[key] == '-':
+ continue
+ dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
+ if dimension_id not in self.data:
+ dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
+ self.charts[values['chart']].add_dimension([dimension_id,
+ dimension,
+ 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+ return self.data
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '000':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5' or code_class == '6':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+ def get_data_per_squid_code(self, code):
+ """
+ :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
+ :return:
+ """
+ if code not in self.data:
+ self.charts['squid_code'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ for tag in code.split('_'):
+ try:
+ chart_key = SQUID_CODES[tag]
+ except KeyError:
+ continue
+ dimension_id = '_'.join(['code_detailed', tag])
+ if dimension_id not in self.data:
+ self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+
+
+def get_timings(timings, time):
+ """
+ :param timings:
+ :param time:
+ :return:
+ """
+ if timings['minimum'] is None:
+ timings['minimum'] = time
+ if time > timings['maximum']:
+ timings['maximum'] = time
+ elif time < timings['minimum']:
+ timings['minimum'] = time
+ timings['summary'] += time
+ timings['count'] += 1
+
+
+def get_hist(index, buckets, time):
+ """
+ :param index: histogram index (Ex. [10, 50, 100, 150, ...])
+ :param buckets: histogram buckets
+ :param time: time
+ :return: None
+ """
+ for i in range(len(index) - 1, -1, -1):
+ if time <= index[i]:
+ buckets[i] += 1
+ else:
+ break
+
+
+def address_not_in_pool(pool, address, pool_size):
+ """
+ :param pool: list of ip addresses
+ :param address: ip address
+ :param pool_size: current pool size
+ :return: True if address not in pool. False otherwise.
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ bisect.insort_left(pool, address)
+ return True
+ bisect.insort_left(pool, address)
+ return True
+
+
+def find_regex_return(match_dict=None, msg='Generic error message'):
+ """
+ :param match_dict: dict: re.search.groupdict() or None
+ :param msg: str: error description
+ :return: tuple:
+ """
+ return match_dict, msg
+
+
+def check_patterns(string, dimension_regex_dict):
+ """
+ :param string: str:
+ :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
+ :return: list of named tuples or None:
+ We need to make sure all patterns are valid regular expressions
+ """
+ if not hasattr(dimension_regex_dict, 'keys'):
+ return None
+
+ result = list()
+
+ def valid_pattern(pattern):
+ """
+ :param pattern: str
+ :return: re.compile(pattern) or None
+ """
+ if not isinstance(pattern, str):
+ return False
+ try:
+ return re.compile(pattern)
+ except re.error:
+ return False
+
+ def func_search(pattern):
+ def closure(v):
+ return pattern.search(v)
+
+ return closure
+
+ for dimension, regex in dimension_regex_dict.items():
+ valid = valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ func = func_search(valid)
+ result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
+ func=func))
+ return result or None
+
+
+def filter_data(raw_data, pre_filter):
+ """
+ :param raw_data:
+ :param pre_filter:
+ :return:
+ """
+
+ if not pre_filter:
+ return raw_data
+ filtered = raw_data
+ for elem in pre_filter:
+ if elem.description == 'filter_include':
+ filtered = filter(elem.func, filtered)
+ elif elem.description == 'filter_exclude':
+ filtered = filterfalse(elem.func, filtered)
+ return filtered
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
new file mode 100644
index 0000000..220b7c2
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -0,0 +1,219 @@
+# netdata python.d.plugin configuration for web log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+
+# ----------------------------------------------------------------------
+# PLUGIN CONFIGURATION
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, web_log also supports the following:
+#
+# path: 'PATH' # the path to web server log file
+# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
+# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
+# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
+# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# filter: # filter with regex
+# include: 'REGEX' # only those rows that matches the regex
+# exclude: 'REGEX' # all rows except those that matches the regex
+# categories: # requests per url chart configuration
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# custom_log_format: # define a custom log format
+# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
+# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds
+# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds
+
+# ----------------------------------------------------------------------
+# WEB SERVER CONFIGURATION
+#
+# Make sure the web server log directory and the web server log files
+# can be read by user 'netdata'.
+#
+# To enable the timings chart and the requests size dimension, the
+# web server needs to log them. This is how to add them:
+#
+# nginx:
+# log_format netdata '$remote_addr - $remote_user [$time_local] '
+# '"$request" $status $body_bytes_sent '
+# '$request_length $request_time $upstream_response_time '
+# '"$http_referer" "$http_user_agent"';
+# access_log /var/log/nginx/access.log netdata;
+#
+# apache (you need mod_logio enabled):
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata
+# CustomLog "/var/log/apache2/access.log" netdata
+
+# ----------------------------------------------------------------------
+# VHOST AND PORT
+# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https)
+
+# in apache : (%v gives the hostname, %p the port number)
+# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+#
+# and in this file in apache_vhosts_log section, add :
+# custom_log_format:
+# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
+
+# in nginx: ($host or $http_host give the hostname, $server_port the port number)
+# log_format netdatavhost '$host $server_port $remote_addr - $remote_user [$time_local] '
+# '"$request" $status $body_bytes_sent '
+# '$request_length $request_time $upstream_response_time '
+# '"$http_referer" "$http_user_agent"';
+#
+# access_log /var/log/nginx/access.log netdatavhost;
+#
+# be aware that the access_log directive in a server{} block overwrites the one in http{}, if your vhosts have individual log
+# files, you have to specify the general netdata log in each vhost as a second access_log statement.
+#
+# and in this file in nginx_log section, add :
+# custom_log_format:
+# pattern: '(?P<vhost>[a-zA-Z\d.-_\[\]]+) (?P<port>\d+) (?P<address>[\da-f.:]+) .* "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them per web server will run (when they have the same name)
+
+
+# -------------------------------------------
+# nginx log on various distros
+
+# debian, arch
+nginx_log:
+ name: 'nginx'
+ path: '/var/log/nginx/access.log'
+
+# gentoo
+nginx_log2:
+ name: 'nginx'
+ path: '/var/log/nginx/localhost.access_log'
+
+
+# -------------------------------------------
+# apache log on various distros
+
+# debian
+apache_log:
+ name: 'apache'
+ path: '/var/log/apache2/access.log'
+
+# gentoo
+apache_log2:
+ name: 'apache'
+ path: '/var/log/apache2/access_log'
+
+# arch
+apache_log3:
+ name: 'apache'
+ path: '/var/log/httpd/access_log'
+
+# debian
+apache_vhosts_log:
+ name: 'apache_vhosts'
+ path: '/var/log/apache2/other_vhosts_access.log'
+
+
+# -------------------------------------------
+# gunicorn log on various distros
+
+gunicorn_log:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/access.log'
+
+gunicorn_log2:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/gunicorn-access.log'
+
+# -------------------------------------------
+# Apache Cache
+apache_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache/cache.log'
+
+apache2_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache2/cache.log'
+
+httpd_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/httpd/cache.log'
+
+# -------------------------------------------
+# Squid
+
+# debian/ubuntu
+squid_log1:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid3/access.log'
+
+#gentoo
+squid_log2:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid/access.log'
diff --git a/collectors/slabinfo.plugin/Makefile.am b/collectors/slabinfo.plugin/Makefile.am
new file mode 100644
index 0000000..07796ea
--- /dev/null
+++ b/collectors/slabinfo.plugin/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ slabinfo.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md
new file mode 100644
index 0000000..21d83c9
--- /dev/null
+++ b/collectors/slabinfo.plugin/README.md
@@ -0,0 +1,29 @@
+<!--
+title: "slabinfo.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/slabinfo.plugin/README.md
+-->
+
+# slabinfo.plugin
+
+SLAB is a cache mechanism used by the Kernel to avoid fragmentation.
+
+Each internal structure (process, file descriptor, inode...) is stored within a SLAB.
+
+## configuring Netdata for slabinfo
+
+The plugin is disabled by default because it collects and displays a huge amount of metrics.
+To enable it set `slabinfo = yes` in the `plugins` section of the `netdata.conf` configuration file.
+
+There is currently no configuration needed for the plugin itself.
+
+As `/proc/slabinfo` is only readable by root, this plugin is setuid root.
+
+## For what use
+
+This slabinfo details allows to have clues on actions done on your system.
+In the following screenshot, you can clearly see a `find` done on a ext4 filesystem (the number of `ext4_inode_cache` & `dentry` are rising fast), and a few seconds later, an admin issued a `echo 3 > /proc/sys/vm/drop_cached` as their count dropped.
+
+![netdata_slabinfo](https://user-images.githubusercontent.com/9157986/64433811-7f06e500-d0bf-11e9-8e1e-087497e61033.png)
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fslabinfo.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/slabinfo.plugin/slabinfo.c b/collectors/slabinfo.plugin/slabinfo.c
new file mode 100644
index 0000000..00e0d39
--- /dev/null
+++ b/collectors/slabinfo.plugin/slabinfo.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "daemon/common.h"
+
+#define PLUGIN_SLABINFO_NAME "slabinfo.plugin"
+#define PLUGIN_SLABINFO_PROCFILE "/proc/slabinfo"
+
+#define CHART_TYPE "mem"
+#define CHART_FAMILY "slab"
+#define CHART_PRIO 3000
+
+// #define slabdebug(...) if (debug) { fprintf(stderr, __VA_ARGS__); }
+#define slabdebug(args...) if (debug) { \
+ fprintf(stderr, "slabinfo.plugin DEBUG (%04d@%-10.10s:%-15.15s)::", __LINE__, __FILE__, __FUNCTION__); \
+ fprintf(stderr, ##args); \
+ fprintf(stderr, "\n"); }
+
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics(const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+
+int running = 1;
+int debug = 0;
+
+// ----------------------------------------------------------------------------
+
+// Slabinfo format :
+// format 2.1 Was provided by 57ed3eda977a215f054102b460ab0eb5d8d112e6 (2.6.24-rc6) as:
+// seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
+// seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+// seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+//
+// With max values:
+// seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
+// cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order));
+// seq_printf(m, " : tunables %4u %4u %4u",
+// sinfo.limit, sinfo.batchcount, sinfo.shared);
+// seq_printf(m, " : slabdata %6lu %6lu %6lu",
+// sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
+//
+// If CONFIG_DEBUG_SLAB is set, it will also add columns from slabinfo_show_stats (for SLAB only):
+// seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
+// allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees, overflows);
+// seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+// allochit, allocmiss, freehit, freemiss);
+//
+// Implementation choices:
+// - Iterates through a linked list of kmem_cache.
+// - Name is a char* from struct kmem_cache (mm/slab.h).
+// - max name size found is 24:
+// grep -roP 'kmem_cache_create\(".+"'| awk '{split($0,a,"\""); print a[2],length(a[2]); }' | sort -k2 -n
+// - Using uint64 everywhere, as types fits and allows to use standard helpers
+
+struct slabinfo {
+ // procfile fields
+ const char *name;
+ uint64_t active_objs;
+ uint64_t num_objs;
+ uint64_t obj_size;
+ uint64_t obj_per_slab;
+ uint64_t pages_per_slab;
+ uint64_t tune_limit;
+ uint64_t tune_batchcnt;
+ uint64_t tune_shared_factor;
+ uint64_t data_active_slabs;
+ uint64_t data_num_slabs;
+ uint64_t data_shared_avail;
+
+ // Calculated fields
+ uint64_t mem_usage;
+ uint64_t mem_waste;
+ uint8_t obj_filling;
+
+ uint32_t hash;
+ struct slabinfo *next;
+} *slabinfo_root = NULL, *slabinfo_next = NULL, *slabinfo_last_used = NULL;
+
+// The code is very inspired from "proc_net_dev.c" and "perf_plugin.c"
+
+// Get the existing object, or create a new one
+static struct slabinfo *get_slabstruct(const char *name) {
+ struct slabinfo *s;
+
+ slabdebug("--> Requested slabstruct %s", name);
+
+ uint32_t hash = simple_hash(name);
+
+ // Search it, from the next to the end
+ for (s = slabinfo_next; s; s = s->next) {
+ if ((hash = s->hash) && !strcmp(name, s->name)) {
+ slabdebug("<-- Found existing slabstruct after %s", slabinfo_last_used->name);
+ // Prepare the next run
+ slabinfo_next = s->next;
+ slabinfo_last_used = s;
+ return s;
+ }
+ }
+
+ // Search it from the begining to the last position we used
+ for (s = slabinfo_root; s != slabinfo_last_used; s = s->next) {
+ if (hash == s->hash && !strcmp(name, s->name)) {
+ slabdebug("<-- Found existing slabstruct after root %s", slabinfo_root->name);
+ slabinfo_next = s->next;
+ slabinfo_last_used = s;
+ return s;
+ }
+ }
+
+ // Create a new one
+ s = callocz(1, sizeof(struct slabinfo));
+ s->name = strdupz(name);
+ s->hash = hash;
+
+ // Add it to the current postion
+ if (slabinfo_root) {
+ slabdebug("<-- Creating new slabstruct after %s", slabinfo_last_used->name);
+ s->next = slabinfo_last_used->next;
+ slabinfo_last_used->next = s;
+ slabinfo_last_used = s;
+ }
+ else {
+ slabdebug("<-- Creating new slabstruct as root");
+ slabinfo_root = slabinfo_last_used = s;
+ }
+
+ return s;
+}
+
+
+// Read a full pass of slabinfo to update the structs
+struct slabinfo *read_file_slabinfo() {
+
+ slabdebug("-> Reading procfile %s", PLUGIN_SLABINFO_PROCFILE);
+
+ static procfile *ff = NULL;
+ static long slab_pagesize = 0;
+
+ if (unlikely(!slab_pagesize)) {
+ slab_pagesize = sysconf(_SC_PAGESIZE);
+ slabdebug(" Discovered pagesize: %ld", slab_pagesize);
+ }
+
+ if(unlikely(!ff)) {
+ ff = procfile_reopen(ff, PLUGIN_SLABINFO_PROCFILE, " ,:" , PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("<- Cannot open file '%s", PLUGIN_SLABINFO_PROCFILE);
+ exit(1);
+ }
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ error("<- Cannot read file '%s'", PLUGIN_SLABINFO_PROCFILE);
+ exit(0);
+ }
+
+
+ // Iterate on all lines to populate / update the slabinfo struct
+ size_t lines = procfile_lines(ff), l;
+
+ slabdebug(" Read %lu lines from procfile", (unsigned long)lines);
+ for(l = 2; l < lines; l++) {
+ if (unlikely(procfile_linewords(ff, l) < 14)) {
+ slabdebug(" Line %zu has only %zu words, skipping", l, procfile_linewords(ff,l));
+ continue;
+ }
+
+ char *name = procfile_lineword(ff, l, 0);
+ struct slabinfo *s = get_slabstruct(name);
+
+ s->active_objs = str2uint64_t(procfile_lineword(ff, l, 1));
+ s->num_objs = str2uint64_t(procfile_lineword(ff, l, 2));
+ s->obj_size = str2uint64_t(procfile_lineword(ff, l, 3));
+ s->obj_per_slab = str2uint64_t(procfile_lineword(ff, l, 4));
+ s->pages_per_slab = str2uint64_t(procfile_lineword(ff, l, 5));
+
+ s->tune_limit = str2uint64_t(procfile_lineword(ff, l, 7));
+ s->tune_batchcnt = str2uint64_t(procfile_lineword(ff, l, 8));
+ s->tune_shared_factor = str2uint64_t(procfile_lineword(ff, l, 9));
+
+ s->data_active_slabs = str2uint64_t(procfile_lineword(ff, l, 11));
+ s->data_num_slabs = str2uint64_t(procfile_lineword(ff, l, 12));
+ s->data_shared_avail = str2uint64_t(procfile_lineword(ff, l, 13));
+
+ uint32_t memperslab = s->pages_per_slab * slab_pagesize;
+ // Internal fragmentation: loss per slab, due to objects not being a multiple of pagesize
+ //uint32_t lossperslab = memperslab - s->obj_per_slab * s->obj_size;
+
+ // Total usage = slabs * pages per slab * page size
+ s->mem_usage = (uint64_t)(s->data_num_slabs * memperslab);
+
+ // Wasted memory (filling): slabs allocated but not filled: sum total slab - sum total objects
+ s->mem_waste = s->mem_usage - (uint64_t)(s->active_objs * s->obj_size);
+ //if (s->data_num_slabs > 1)
+ // s->mem_waste += s->data_num_slabs * lossperslab;
+
+
+ // Slab filling efficiency
+ if (s->num_objs > 0)
+ s->obj_filling = 100 * s->active_objs / s->num_objs;
+ else
+ s->obj_filling = 0;
+
+ slabdebug(" Updated slab %s: %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %hhu",
+ name, s->active_objs, s->num_objs, s->obj_size, s->obj_per_slab, s->pages_per_slab,
+ s->tune_limit, s->tune_batchcnt, s->tune_shared_factor,
+ s->data_active_slabs, s->data_num_slabs, s->data_shared_avail,
+ s->mem_usage, s->mem_waste, s->obj_filling);
+ }
+
+ return slabinfo_root;
+}
+
+
+
+unsigned int do_slab_stats(int update_every) {
+
+ static unsigned int loops = 0;
+ struct slabinfo *sactive = NULL, *s = NULL;
+
+ // Main processing loop
+ while (running) {
+
+ sactive = read_file_slabinfo();
+
+ // Init Charts
+ if (unlikely(loops == 0)) {
+ // Memory Usage
+ printf("CHART %s.%s '' 'Memory Usage' 'B' '%s' '' line %d %d %s\n"
+ , CHART_TYPE
+ , "slabmemory"
+ , CHART_FAMILY
+ , CHART_PRIO
+ , update_every
+ , PLUGIN_SLABINFO_NAME
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("DIMENSION %s '' absolute 1 1\n", s->name);
+ }
+
+ // Slab active usage (filling)
+ printf("CHART %s.%s '' 'Object Filling' '%%' '%s' '' line %d %d %s\n"
+ , CHART_TYPE
+ , "slabfilling"
+ , CHART_FAMILY
+ , CHART_PRIO + 1
+ , update_every
+ , PLUGIN_SLABINFO_NAME
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("DIMENSION %s '' absolute 1 1\n", s->name);
+ }
+
+ // Memory waste
+ printf("CHART %s.%s '' 'Memory waste' 'B' '%s' '' line %d %d %s\n"
+ , CHART_TYPE
+ , "slabwaste"
+ , CHART_FAMILY
+ , CHART_PRIO + 2
+ , update_every
+ , PLUGIN_SLABINFO_NAME
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("DIMENSION %s '' absolute 1 1\n", s->name);
+ }
+ }
+
+
+ //
+ // Memory usage
+ //
+ printf("BEGIN %s.%s\n"
+ , CHART_TYPE
+ , "slabmemory"
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("SET %s = %"PRIu64"\n"
+ , s->name
+ , s->mem_usage
+ );
+ }
+ printf("END\n");
+
+ //
+ // Slab active usage
+ //
+ printf("BEGIN %s.%s\n"
+ , CHART_TYPE
+ , "slabfilling"
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("SET %s = %u\n"
+ , s->name
+ , s->obj_filling
+ );
+ }
+ printf("END\n");
+
+ //
+ // Memory waste
+ //
+ printf("BEGIN %s.%s\n"
+ , CHART_TYPE
+ , "slabwaste"
+ );
+ for (s = sactive; s; s = s->next) {
+ printf("SET %s = %"PRIu64"\n"
+ , s->name
+ , s->mem_waste
+ );
+ }
+ printf("END\n");
+
+
+ loops++;
+
+ sleep(update_every);
+ }
+
+ return loops;
+}
+
+
+
+
+// ----------------------------------------------------------------------------
+// main
+
+void usage(void) {
+ fprintf(stderr, "%s\n", program_name);
+ exit(1);
+}
+
+int main(int argc, char **argv) {
+
+ program_name = argv[0];
+ program_version = "0.1";
+ error_log_syslog = 0;
+
+ int update_every = 1, i, n, freq = 0;
+
+ for (i = 1; i < argc; i++) {
+ // Frequency parsing
+ if(isdigit(*argv[i]) && !freq) {
+ n = (int) str2l(argv[i]);
+ if (n > 0) {
+ if (n >= UPDATE_EVERY_MAX) {
+ error("Invalid interval value: %s", argv[i]);
+ exit(1);
+ }
+ freq = n;
+ }
+ }
+ else if (strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else {
+ fprintf(stderr,
+ "netdata slabinfo.plugin %s\n"
+ "This program is a data collector plugin for netdata.\n"
+ "\n"
+ "Available command line options:\n"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n",
+ program_version,
+ update_every
+ );
+ exit(1);
+ }
+ }
+
+ if(freq >= update_every)
+ update_every = freq;
+ else if(freq)
+ error("update frequency %d seconds is too small for slabinfo. Using %d.", freq, update_every);
+
+
+ // Call the main function. Time drift to be added
+ do_slab_stats(update_every);
+
+ return 0;
+}
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
new file mode 100644
index 0000000..b01302d
--- /dev/null
+++ b/collectors/statsd.plugin/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+statsdconfigdir=$(libconfigdir)/statsd.d
+dist_statsdconfig_DATA = \
+ example.conf \
+ $(NULL)
+
+userstatsdconfigdir=$(configdir)/statsd.d
+dist_userstatsdconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(userstatsdconfigdir)
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
new file mode 100644
index 0000000..070bfc5
--- /dev/null
+++ b/collectors/statsd.plugin/README.md
@@ -0,0 +1,605 @@
+<!--
+title: "statsd.plugin"
+description: "The Netdata Agent is a fully-featured statsd server that collects metrics from any custom application and visualizes them in real-time."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/statsd.plugin/README.md
+-->
+
+# statsd.plugin
+
+StatsD is a system to collect data from any application. Applications send metrics to it, usually via non-blocking UDP communication, and StatsD servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases.
+
+If you want to learn more about the StatsD protocol, we have written a [blog post](https://www.netdata.cloud/blog/introduction-to-statsd/) about it!
+
+There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics.
+
+Netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards and store them in it's database for long-term retention.
+
+Netdata statsd is inside Netdata (an internal plugin, running inside the Netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd port 8125. Netdata supports both tcp and udp packets at the same time.
+
+Since statsd is embedded in Netdata, it means you now have a statsd server embedded on all your servers.
+
+Netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core. The implementation uses two threads: one thread collects metrics, another one updates the charts from the collected data.
+
+## Metrics supported by Netdata
+
+Netdata fully supports the statsd protocol. All statsd client libraries can be used with Netdata too.
+
+- **Gauges**
+
+ The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events).
+
+ The application may increment or decrement a previous value, by setting the first character of the value to `+` or `-` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
+
+ [Sampling rate](#sampling-rates) is supported.
+
+ When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
+
+- **Counters** and **Meters**
+
+ The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events).
+
+ `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
+
+ - Counters use `|c` (etsy/statsd compatible) or `|C` (brubeck compatible)
+ - Meters use `|m`
+
+ [Sampling rate](#sampling-rates) is supported.
+
+ When a counter or meter is not collected, Netdata **defaults** to showing a zero value, until a data collection event changes the value.
+
+- **Timers** and **Histograms**
+
+ The application sends `name:value|ms` or `name:value|h`, where `value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events).
+
+ - Timers use `|ms`
+ - Histograms use `|h`
+
+ The only difference between the two, is the `units` of the charts, as timers report *miliseconds*.
+
+ [Sampling rate](#sampling-rates) is supported.
+
+ When a counter or meter is not collected, Netdata **defaults** to showing a zero value, until a data collection event changes the value.
+
+- **Sets**
+
+ The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events).
+
+ Sampling rate is **not** supported for Sets. `value` is always considered text.
+
+ When a counter or meter is not collected, Netdata **defaults** to showing a zero value, until a data collection event changes the value.
+
+#### Sampling Rates
+
+The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0` in order for StatD to extrapolate the value and predict the total for the entire period. If the application reports to StatsD a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd.
+
+#### Overlapping metrics
+
+Netdata's StatsD server maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently.
+
+#### Multiple metrics per packet
+
+Netdata accepts multiple metrics per packet if each is terminated with `\n`.
+
+#### TCP packets
+
+Netdata listens for both TCP and UDP packets. For TCP, is it important to always append `\n` on each metric, as Netdata will use the newline character to detect if a metric is split into multiple TCP packets.
+
+On disconnect, Netdata will process the entire buffer, even if it is not terminated with a `\n`.
+
+#### UDP packets
+
+When sending multiple packets over UDP, it is important not to exceed the network MTU, usually about 1500 packets.
+
+Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
+
+> You can read more about the network maxium transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/).
+
+## Configuration
+
+You can find the configuration at `/etc/netdata/netdata.conf`:
+
+```
+[statsd]
+ # enabled = yes
+ # decimal detail = 1000
+ # update every (flushInterval) = 1
+ # udp messages to process at once = 10
+ # create private charts for metrics matching = *
+ # max private charts allowed = 200
+ # max private charts hard limit = 1000
+ # private charts memory mode = save
+ # private charts history = 3996
+ # histograms and timers percentile (percentThreshold) = 95.00000
+ # add dimension for number of events received = yes
+ # gaps on gauges (deleteGauges) = no
+ # gaps on counters (deleteCounters) = no
+ # gaps on meters (deleteMeters) = no
+ # gaps on sets (deleteSets) = no
+ # gaps on histograms (deleteHistograms) = no
+ # gaps on timers (deleteTimers) = no
+ # listen backlog = 4096
+ # default port = 8125
+ # bind to = udp:localhost:8125 tcp:localhost:8125
+```
+
+### StatsD main config options
+
+- `enabled = yes|no`
+
+ controls if StatsD will be enabled for this Netdata. The default is enabled.
+
+- `default port = 8125`
+
+ controls the default port StatsD will use if no port is defined in the following setting.
+
+- `bind to = udp:localhost tcp:localhost`
+
+ is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname.
+
+- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts.
+
+- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
+
+The rest of the settings are discussed below.
+
+## StatsD charts
+
+Netdata can visualize StatsD collected metrics in 2 ways:
+
+1. Each metric gets its own **private chart**. This is the default and does not require any configuration. You can adjust the default parameters.
+
+2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
+
+### Private metric charts
+
+Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space separated list of [simple patterns](/libnetdata/simple_pattern/README.md). Netdata will create private charts for all metrics **by default**
+
+For example, to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use:
+
+```
+create private charts for metrics matching = !myapp.*.badmetric myapp.*
+```
+
+You can specify Netdata StatsD to have a different `memory mode` than the rest of the Netdata Agent. You can read more about `memory mode` in the [documentation](/database/README.md).
+
+The default behavior is to use the same settings as the rest of the Netdata Agent. If you wish to change them, edit the following settings:
+- `private charts memory mode`
+- `private charts history`
+
+### Optimise private metric charts visualization and storage
+
+If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the Netdata dashboard (this is a web browser issue we need to address at the Netdata UI). So, Netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached.
+
+The metrics above this soft limit are still processed by Netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, Netdata will still generate charts, but they will automatically be created with `memory mode = none` (Netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`.
+
+Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
+
+Example private charts (automatically generated without any configuration):
+
+#### Counters
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
+- StatsD increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png)
+
+#### Gauges
+
+- Scope: **report the value of something** (e.g. cache memory used by the application server)
+- Format: `name:FLOAT|g`
+- StatsD remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with `+` or `-`.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png)
+
+#### histograms
+
+- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
+- Format: `name:FLOAT|h`
+- StatsD maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png)
+
+The same chart with `sum` unselected, to show the detail of the dimensions supported:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131598/8076443a-3aa3-11e7-9ffa-ea535aee9c9f.png)
+
+#### Meters
+
+This is identical to `counter`.
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|m` or `name|m` or just `name`
+- StatsD increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png)
+
+#### Sets
+
+- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
+- Format: `name:TEXT|s`
+- StatsD maintains a unique index of all values supplied, and reports the unique entries in it.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png)
+
+#### Timers
+
+- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
+- Format: `name:FLOAT|ms`
+- StatsD maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131620/acbea6a4-3aa3-11e7-8bdd-4a8996847767.png)
+
+The same chart with the `sum` unselected:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png)
+
+### Synthetic StatsD charts
+
+Use synthetic charts to create dedicated sections on the dashboard to render your StatsD charts.
+
+Synthetic charts are organized in
+
+- **application** aka section in Netdata Dashboard.
+- **charts for each application** aka family in Netdata Dashboard.
+- **StatsD metrics for each chart** /aka charts and context Netdata Dashboard.
+
+> You can read more about how the Netdata Agent organizes information in the relevant [documentation](/web/README.md)
+
+For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
+
+For example, if you want to monitor the application `myapp` using StatD and Netdata, create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
+
+```
+[app]
+ name = myapp
+ metrics = myapp.*
+ private charts = no
+ gaps when not collected = no
+ memory mode = ram
+ history = 60
+
+[dictionary]
+ m1 = metric1
+ m2 = metric2
+
+# replace 'mychart' with the chart id
+# the chart will be named: myapp.mychart
+[mychart]
+ name = mychart
+ title = my chart title
+ family = my family
+ context = chart.context
+ units = tests/s
+ priority = 91000
+ type = area
+ dimension = myapp.metric1 m1
+ dimension = myapp.metric2 m2
+```
+
+Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions.
+
+`[app]` starts a new application definition. The supported settings in this section are:
+
+- `name` defines the name of the app.
+- `metrics` is a Netdata [simple pattern](/libnetdata/simple_pattern/README.md). This pattern should match all the possible StatsD metrics that will be participating in the application `myapp`.
+- `private charts = yes|no`, enables or disables private charts for the metrics matched.
+- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application in case that no metrics are collected.
+- `memory mode` sets the memory mode for all charts of the application. The default is the global default for Netdata (not the global default for StatsD private charts).
+- `history` sets the size of the round robin database for this application. The default is the global default for Netdata (not the global default for StatsD private charts). This is only relevant if you use `memory mode = save`. Read more on our guide: [longer metrics storage](https://learn.netdata.cloud/guides/longer-metrics-storage).
+
+`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
+
+Then, add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
+
+Add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
+
+1. the metric name, as it is collected (it has to be matched by the `metrics =` pattern of the app)
+2. the dimension name, as it should be shown on the chart
+3. an optional selector (type) of the value to shown (see below)
+4. an optional multiplier
+5. an optional divider
+6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms.
+
+So, the format is this:
+
+```
+dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
+```
+
+`pattern` is a keyword. When set, `METRIC` is expected to be a Netdata [simple pattern](/libnetdata/simple_pattern/README.md) that will be used to match all the StatsD metrics to be added to the chart. So, `pattern` automatically matches any number of StatsD metrics, all of which will be added as separate chart dimensions.
+
+`TYPE`, `MULTIPLIER`, `DIVIDER` and `OPTIONS` are optional.
+
+`TYPE` can be:
+
+- `events` to show the number of events received by StatsD for this metric
+- `last` to show the last value, as calculated at the flush interval of the metric (the default)
+
+Then for histograms and timers the following types are also supported:
+
+- `min`, show the minimum value
+- `max`, show the maximum value
+- `sum`, show the sum of all values
+- `average` (same as `last`)
+- `percentile`, show the 95th percentile (or any other percentile, as configured at StatsD global config)
+- `median`, show the median of all values (i.e. sort all values and get the middle value)
+- `stddev`, show the standard deviation of the values
+
+#### Example synthetic charts
+
+StatsD metrics: `foo` and `bar`.
+
+Contents of file `/etc/netdata/stats.d/foobar.conf`:
+
+```
+[app]
+ name = foobarapp
+ metrics = foo bar
+ private charts = yes
+
+[foobar_chart1]
+ title = Hey, foo and bar together
+ family = foobar_family
+ context = foobarapp.foobars
+ units = foobars
+ type = area
+ dimension = foo 'foo me' last 1 1
+ dimension = bar 'bar me' last 1 1
+```
+
+Metrics sent to statsd: `foo:10|g` and `bar:20|g`.
+
+Private charts:
+
+![screenshot from 2017-08-03 23-28-19](https://user-images.githubusercontent.com/2662304/28942295-7c3a73a8-78a3-11e7-88e5-a9a006bb7465.png)
+
+Synthetic chart:
+
+![screenshot from 2017-08-03 23-29-14](https://user-images.githubusercontent.com/2662304/28942317-958a2c68-78a3-11e7-853f-32850141dd36.png)
+
+#### Renaming StatsD metrics
+
+You can define a dictionary to rename metrics sent by StatsD clients. This enables you to send response `"200"` and Netdata visualize it as `succesful connection`
+
+The `[dictionary]` section accepts any number of `name = value` pairs.
+
+Netdata uses this dictionary as follows:
+
+1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
+
+2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original StatsD metric name is looked up at the dictionary.
+
+3. If any of the above succeeds, Netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original StatsD metric name, and as name, the dictionary value.
+
+Use the dictionary in 2 ways:
+
+1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
+2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
+
+In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
+
+> keep in mind that if you add multiple times the same StatsD metric to a chart, Netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, Netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
+
+#### Dimension patterns
+
+Netdata allows adding multiple dimensions to a chart, by matching the StatsD metrics with a Netdata simple pattern.
+
+Assume we have an API that provides StatsD metrics for each response code per method it supports, like these:
+
+```
+myapp.api.get.200
+myapp.api.get.400
+myapp.api.get.500
+myapp.api.del.200
+myapp.api.del.400
+myapp.api.del.500
+myapp.api.post.200
+myapp.api.post.400
+myapp.api.post.500
+myapp.api.all.200
+myapp.api.all.400
+myapp.api.all.500
+```
+
+In order to add all the response codes of `myapp.api.get` to a chart, we simply make the following configuration:
+
+```
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* '' last 1 1
+```
+
+The above will add dimension named `200`, `400` and `500`. Netdata extracts the wildcard part of the metric name - so the dimensions will be named with whatever the `*` matched.
+
+You can rename the dimensions with this:
+
+```
+[dictionary]
+ get.200 = 200 ok
+ get.400 = 400 bad request
+ get.500 = 500 cannot connect to db
+
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* 'get.' last 1 1
+```
+
+Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`).
+
+To add all API methods to a chart, you can do this:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern 'myapp.api.*.200 '' last 1 1
+```
+
+The above will add `get`, `post`, `del` and `all` to the chart.
+
+If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1
+```
+
+With the above, all methods except `all` will be added to the chart.
+
+To automatically rename the methods, you can use this:
+
+```
+[dictionary]
+ method.get = GET
+ method.post = ADD
+ method.del = DELETE
+
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1
+```
+
+Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
+
+## StatsD examples
+
+### Python
+
+It's really easy to instrument your python application with StatsD, for example using [jsocol/pystatsd](https://github.com/jsocol/pystatsd).
+
+```python
+import statsd
+c = statsd.StatsClient('localhost', 8125)
+c.incr('foo') # Increment the 'foo' counter.
+for i in range(100000000):
+ c.incr('bar')
+ c.incr('foo')
+ if i % 3:
+ c.decr('bar')
+ c.timing('stats.timed', 320) # Record a 320ms 'stats.timed'.
+```
+
+You can find detailed documentation in their [documentation page](https://statsd.readthedocs.io/en/v3.3/).
+
+### Javascript and Node.js
+
+Using the client library by [sivy/node-statsd](https://github.com/sivy/node-statsd), you can easily embed StatsD into your Node.js project.
+
+```javascript
+ var StatsD = require('node-statsd'),
+ client = new StatsD();
+
+ // Timing: sends a timing command with the specified milliseconds
+ client.timing('response_time', 42);
+
+ // Increment: Increments a stat by a value (default is 1)
+ client.increment('my_counter');
+
+ // Decrement: Decrements a stat by a value (default is -1)
+ client.decrement('my_counter');
+
+ // Using the callback
+ client.set(['foo', 'bar'], 42, function(error, bytes){
+ //this only gets called once after all messages have been sent
+ if(error){
+ console.error('Oh noes! There was an error:', error);
+ } else {
+ console.log('Successfully sent', bytes, 'bytes');
+ }
+ });
+
+ // Sampling, tags and callback are optional and could be used in any combination
+ client.histogram('my_histogram', 42, 0.25); // 25% Sample Rate
+ client.histogram('my_histogram', 42, ['tag']); // User-defined tag
+ client.histogram('my_histogram', 42, next); // Callback
+ client.histogram('my_histogram', 42, 0.25, ['tag']);
+ client.histogram('my_histogram', 42, 0.25, next);
+ client.histogram('my_histogram', 42, ['tag'], next);
+ client.histogram('my_histogram', 42, 0.25, ['tag'], next);
+```
+### Other languages
+
+You can also use StatsD with:
+- Golang, thanks to [alexcesaro/statsd](https://github.com/alexcesaro/statsd)
+- Ruby, thanks to [reinh/statsd](https://github.com/reinh/statsd)
+- Java, thanks to [DataDog/java-docstatsd-client](https://github.com/DataDog/java-dogstatsd-client)
+
+
+### Shell
+
+Getting the proper support for a programming language is not always easy, but shell is always available on most UNIX systems. You can use shell and `nc` to easily instrument your systems and send metric data to Netdata StatsD. Here is how:
+
+The command you need to run is:
+
+```sh
+echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125
+```
+
+Where:
+
+- `NAME` is the metric name
+- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything)
+- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type.
+
+So, to set `metric1` as gauge to value `10`, use:
+
+```sh
+echo "metric1:10|g" | nc -u --send-only localhost 8125
+```
+
+To increment `metric2` by `10`, as a counter, use:
+
+```sh
+echo "metric2:10|c" | nc -u --send-only localhost 8125
+```
+
+You can send multiple metrics like this:
+
+```sh
+# send multiple metrics via UDP
+printf "metric1:10|g\nmetric2:10|c\n" | nc -u --send-only localhost 8125
+```
+
+Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication:
+
+```sh
+# send multiple metrics via TCP
+printf "metric1:10|g\nmetric2:10|c\n" | nc --send-only localhost 8125
+```
+
+You can also use this little function to take care of all the details:
+
+```sh
+#!/usr/bin/env bash
+
+STATSD_HOST="localhost"
+STATSD_PORT="8125"
+statsd() {
+ local udp="-u" all="${*}"
+
+ # if the string length of all parameters given is above 1000, use TCP
+ [ "${#all}" -gt 1000 ] && udp=
+
+ while [ ! -z "${1}" ]
+ do
+ printf "${1}\n"
+ shift
+ done | nc ${udp} --send-only ${STATSD_HOST} ${STATSD_PORT} || return 1
+
+ return 0
+}
+```
+
+You can use it like this:
+
+```sh
+# first, source it in your script
+source statsd.sh
+
+# then, at any point:
+StatsD "metric1:10|g" "metric2:10|c" ...
+```
+The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
+
+If you have gotten thus far, make sure to check out our [Community Forums](https://community.netdata.cloud) to share your experience using Netdata with StatsD.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fstatsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/statsd.plugin/example.conf b/collectors/statsd.plugin/example.conf
new file mode 100644
index 0000000..2c7de6c
--- /dev/null
+++ b/collectors/statsd.plugin/example.conf
@@ -0,0 +1,64 @@
+# statsd synthetic charts configuration
+
+# You can add many .conf files in /etc/netdata/statsd.d/,
+# one for each of your apps.
+
+# start a new app - you can add many apps in the same file
+[app]
+ # give a name for this app
+ # this controls the main menu on the dashboard
+ # and will be the prefix for all charts of the app
+ name = myexampleapp
+
+ # match all the metrics of the app
+ metrics = myexampleapp.*
+
+ # shall private charts of these metrics be created?
+ private charts = no
+
+ # shall gaps be shown when metrics are not collected?
+ gaps when not collected = no
+
+ # the memory mode for the charts of this app: none|map|save
+ # the default is to use the global memory mode
+ #memory mode = ram
+
+ # the history size for the charts of this app, in seconds
+ # the default is to use the global history
+ #history = 3600
+
+# create a chart
+# this is its id - the chart will be named myexampleapp.myexamplechart
+[myexamplechart]
+ # a name for the chart, similar to the id (2 names for each chart)
+ name = myexamplechart
+
+ # the chart title
+ title = my chart title
+
+ # the submenu of the dashboard
+ family = my family
+
+ # the context for alarm templates
+ context = chart.context
+
+ # the units of the chart
+ units = tests/s
+
+ # the sorting priority of the chart on the dashboard
+ priority = 91000
+
+ # the type of chart to create: line | area | stacked
+ type = area
+
+ # one or more dimensions for the chart
+ # type = events | last | min | max | sum | average | percentile | median | stddev
+ # events = the number of events for this metric
+ # last = the last value collected
+ # all the others are only valid for histograms and timers
+ dimension = myexampleapp.metric1 avg average 1 1
+ dimension = myexampleapp.metric1 lower min 1 1
+ dimension = myexampleapp.metric1 upper max 1 1
+ dimension = myexampleapp.metric2 other last 1 1
+
+# You can add as many charts as needed
diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
new file mode 100644
index 0000000..a8f9413
--- /dev/null
+++ b/collectors/statsd.plugin/statsd.c
@@ -0,0 +1,2560 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "statsd.h"
+
+#define STATSD_CHART_PREFIX "statsd"
+
+#define PLUGIN_STATSD_NAME "statsd.plugin"
+
+// --------------------------------------------------------------------------------------
+
+// #define STATSD_MULTITHREADED 1
+
+#ifdef STATSD_MULTITHREADED
+// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED
+#define STATSD_AVL_TREE avl_tree_lock
+#define STATSD_AVL_INSERT avl_insert_lock
+#define STATSD_AVL_SEARCH avl_search_lock
+#define STATSD_AVL_INDEX_INIT { .avl_tree = { NULL, statsd_metric_compare }, .rwlock = AVL_LOCK_INITIALIZER }
+#define STATSD_FIRST_PTR_MUTEX netdata_mutex_t first_mutex
+#define STATSD_FIRST_PTR_MUTEX_INIT .first_mutex = NETDATA_MUTEX_INITIALIZER
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index) netdata_mutex_lock(&((index)->first_mutex))
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex))
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT
+#else
+#define STATSD_AVL_TREE avl_tree_type
+#define STATSD_AVL_INSERT avl_insert
+#define STATSD_AVL_SEARCH avl_search
+#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare }
+#define STATSD_FIRST_PTR_MUTEX
+#define STATSD_FIRST_PTR_MUTEX_INIT
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index)
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index)
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_SINGLE_THREADED
+#endif
+
+#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor
+
+// --------------------------------------------------------------------------------------------------------------------
+// data specific to each metric type
+
+typedef struct statsd_metric_gauge {
+ LONG_DOUBLE value;
+} STATSD_METRIC_GAUGE;
+
+typedef struct statsd_metric_counter { // counter and meter
+ long long value;
+} STATSD_METRIC_COUNTER;
+
+typedef struct statsd_histogram_extensions {
+ netdata_mutex_t mutex;
+
+ // average is stored in metric->last
+ collected_number last_min;
+ collected_number last_max;
+ collected_number last_percentile;
+ collected_number last_median;
+ collected_number last_stddev;
+ collected_number last_sum;
+
+ int zeroed;
+
+ RRDDIM *rd_min;
+ RRDDIM *rd_max;
+ RRDDIM *rd_percentile;
+ RRDDIM *rd_median;
+ RRDDIM *rd_stddev;
+ RRDDIM *rd_sum;
+
+ size_t size;
+ size_t used;
+ LONG_DOUBLE *values; // dynamic array of values collected
+} STATSD_METRIC_HISTOGRAM_EXTENSIONS;
+
+typedef struct statsd_metric_histogram { // histogram and timer
+ STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext;
+} STATSD_METRIC_HISTOGRAM;
+
+typedef struct statsd_metric_set {
+ DICTIONARY *dict;
+ size_t unique;
+} STATSD_METRIC_SET;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// this is a metric - for all types of metrics
+
+typedef enum statsd_metric_options {
+ STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set
+ STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected
+ STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric
+ STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not
+ STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart
+ STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked against apps
+ STATSD_METRIC_OPTION_USED_IN_APPS = 0x00000020, // set when this metric is used in apps
+ STATSD_METRIC_OPTION_CHECKED = 0x00000040, // set when the charting thread checks this metric for use in charts (its usefulness)
+ STATSD_METRIC_OPTION_USEFUL = 0x00000080, // set when the charting thread finds the metric useful (i.e. used in a chart)
+} STATS_METRIC_OPTIONS;
+
+typedef enum statsd_metric_type {
+ STATSD_METRIC_TYPE_GAUGE,
+ STATSD_METRIC_TYPE_COUNTER,
+ STATSD_METRIC_TYPE_METER,
+ STATSD_METRIC_TYPE_TIMER,
+ STATSD_METRIC_TYPE_HISTOGRAM,
+ STATSD_METRIC_TYPE_SET
+} STATSD_METRIC_TYPE;
+
+
+typedef struct statsd_metric {
+ avl avl; // indexing - has to be first
+
+ const char *name; // the name of the metric
+ uint32_t hash; // hash of the name
+
+ STATSD_METRIC_TYPE type;
+
+ // metadata about data collection
+ collected_number events; // the number of times this metric has been collected (never resets)
+ size_t count; // the number of times this metric has been collected since the last flush
+
+ // the actual collected data
+ union {
+ STATSD_METRIC_GAUGE gauge;
+ STATSD_METRIC_COUNTER counter;
+ STATSD_METRIC_HISTOGRAM histogram;
+ STATSD_METRIC_SET set;
+ };
+
+ // chart related members
+ STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield)
+ char reset; // set to 1 by the charting thread to instruct the collector thread(s) to reset this metric
+ collected_number last; // the last value sent to netdata
+ RRDSET *st; // the private chart of this metric
+ RRDDIM *rd_value; // the dimension of this metric value
+ RRDDIM *rd_count; // the dimension for the number of events received
+
+ // linking, used for walking through all metrics
+ struct statsd_metric *next;
+ struct statsd_metric *next_useful;
+} STATSD_METRIC;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// each type of metric has its own index
+
+typedef struct statsd_index {
+ char *name; // the name of the index of metrics
+ size_t events; // the number of events processed for this index
+ size_t metrics; // the number of metrics in this index
+ size_t useful; // the number of useful metrics in this index
+
+ STATSD_AVL_TREE index; // the AVL tree
+
+ STATSD_METRIC *first; // the linked list of metrics (new metrics are added in front)
+ STATSD_METRIC *first_useful; // the linked list of useful metrics (new metrics are added in front)
+ STATSD_FIRST_PTR_MUTEX; // when mutli-threading is enabled, a lock to protect the linked list
+
+ STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index
+} STATSD_INDEX;
+
+static int statsd_metric_compare(void* a, void* b);
+
+// --------------------------------------------------------------------------------------------------------------------
+// synthetic charts
+
+typedef enum statsd_app_chart_dimension_value_type {
+ STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_LAST,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_SUM,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MIN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MAX,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV
+} STATSD_APP_CHART_DIM_VALUE_TYPE;
+
+typedef struct statsd_app_chart_dimension {
+ const char *name; // the name of this dimension
+ const char *metric; // the source metric name of this dimension
+ uint32_t metric_hash; // hash for fast string comparisons
+
+ SIMPLE_PATTERN *metric_pattern; // set when the 'metric' is a simple pattern
+
+ collected_number multiplier; // the multipler of the dimension
+ collected_number divisor; // the divisor of the dimension
+ RRDDIM_FLAGS flags; // the RRDDIM flags for this dimension
+
+ STATSD_APP_CHART_DIM_VALUE_TYPE value_type; // which value to use of the source metric
+
+ RRDDIM *rd; // a pointer to the RRDDIM that has been created for this dimension
+ collected_number *value_ptr; // a pointer to the source metric value
+ RRD_ALGORITHM algorithm; // the algorithm of this dimension
+
+ struct statsd_app_chart_dimension *next; // the next dimension for this chart
+} STATSD_APP_CHART_DIM;
+
+typedef struct statsd_app_chart {
+ const char *source;
+ const char *id;
+ const char *name;
+ const char *title;
+ const char *family;
+ const char *context;
+ const char *units;
+ long priority;
+ RRDSET_TYPE chart_type;
+ STATSD_APP_CHART_DIM *dimensions;
+ size_t dimensions_count;
+ size_t dimensions_linked_count;
+
+ RRDSET *st;
+ struct statsd_app_chart *next;
+} STATSD_APP_CHART;
+
+typedef struct statsd_app {
+ const char *name;
+ SIMPLE_PATTERN *metrics;
+ STATS_METRIC_OPTIONS default_options;
+ RRD_MEMORY_MODE rrd_memory_mode;
+ DICTIONARY *dict;
+ long rrd_history_entries;
+
+ const char *source;
+ STATSD_APP_CHART *charts;
+ struct statsd_app *next;
+} STATSD_APP;
+
+// --------------------------------------------------------------------------------------------------------------------
+// global statsd data
+
+struct collection_thread_status {
+ int status;
+ size_t max_sockets;
+
+ netdata_thread_t thread;
+ struct rusage rusage;
+ RRDSET *st_cpu;
+ RRDDIM *rd_user;
+ RRDDIM *rd_system;
+};
+
+static struct statsd {
+ STATSD_INDEX gauges;
+ STATSD_INDEX counters;
+ STATSD_INDEX timers;
+ STATSD_INDEX histograms;
+ STATSD_INDEX meters;
+ STATSD_INDEX sets;
+ size_t unknown_types;
+ size_t socket_errors;
+ size_t tcp_socket_connects;
+ size_t tcp_socket_disconnects;
+ size_t tcp_socket_connected;
+ size_t tcp_socket_reads;
+ size_t tcp_packets_received;
+ size_t tcp_bytes_read;
+ size_t udp_socket_reads;
+ size_t udp_packets_received;
+ size_t udp_bytes_read;
+
+ int enabled;
+ int update_every;
+ SIMPLE_PATTERN *charts_for;
+
+ size_t tcp_idle_timeout;
+ collected_number decimal_detail;
+ size_t private_charts;
+ size_t max_private_charts;
+ size_t max_private_charts_hard;
+ RRD_MEMORY_MODE private_charts_memory_mode;
+ long private_charts_rrd_history_entries;
+ unsigned int private_charts_hidden:1;
+
+ STATSD_APP *apps;
+ size_t recvmmsg_size;
+ size_t histogram_increase_step;
+ double histogram_percentile;
+ char *histogram_percentile_str;
+
+ int threads;
+ struct collection_thread_status *collection_threads_status;
+
+ LISTEN_SOCKETS sockets;
+} statsd = {
+ .enabled = 1,
+ .max_private_charts = 200,
+ .max_private_charts_hard = 1000,
+ .private_charts_hidden = 0,
+ .recvmmsg_size = 10,
+ .decimal_detail = STATSD_DECIMAL_DETAIL,
+
+ .gauges = {
+ .name = "gauge",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .counters = {
+ .name = "counter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .timers = {
+ .name = "timer",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .histograms = {
+ .name = "histogram",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .meters = {
+ .name = "meter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .sets = {
+ .name = "set",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+
+ .tcp_idle_timeout = 600,
+
+ .apps = NULL,
+ .histogram_percentile = 95.0,
+ .histogram_increase_step = 10,
+ .threads = 0,
+ .collection_threads_status = NULL,
+ .sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_STATSD,
+ .default_bind_to = "udp:localhost tcp:localhost",
+ .default_port = STATSD_LISTEN_PORT,
+ .backlog = STATSD_LISTEN_BACKLOG
+ },
+};
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd index management - add/find metrics
+
+static int statsd_metric_compare(void* a, void* b) {
+ if(((STATSD_METRIC *)a)->hash < ((STATSD_METRIC *)b)->hash) return -1;
+ else if(((STATSD_METRIC *)a)->hash > ((STATSD_METRIC *)b)->hash) return 1;
+ else return strcmp(((STATSD_METRIC *)a)->name, ((STATSD_METRIC *)b)->name);
+}
+
+static inline STATSD_METRIC *statsd_metric_index_find(STATSD_INDEX *index, const char *name, uint32_t hash) {
+ STATSD_METRIC tmp;
+ tmp.name = name;
+ tmp.hash = (hash)?hash:simple_hash(tmp.name);
+
+ return (STATSD_METRIC *)STATSD_AVL_SEARCH(&index->index, (avl *)&tmp);
+}
+
+static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name, STATSD_METRIC_TYPE type) {
+ debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name);
+
+ uint32_t hash = simple_hash(name);
+
+ STATSD_METRIC *m = statsd_metric_index_find(index, name, hash);
+ if(unlikely(!m)) {
+ debug(D_STATSD, "Creating new %s metric '%s'", index->name, name);
+
+ m = (STATSD_METRIC *)callocz(sizeof(STATSD_METRIC), 1);
+ m->name = strdupz(name);
+ m->hash = hash;
+ m->type = type;
+ m->options = index->default_options;
+
+ if(type == STATSD_METRIC_TYPE_HISTOGRAM || type == STATSD_METRIC_TYPE_TIMER) {
+ m->histogram.ext = callocz(sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS), 1);
+ netdata_mutex_init(&m->histogram.ext->mutex);
+ }
+ STATSD_METRIC *n = (STATSD_METRIC *)STATSD_AVL_INSERT(&index->index, (avl *)m);
+ if(unlikely(n != m)) {
+ freez((void *)m->histogram.ext);
+ freez((void *)m->name);
+ freez((void *)m);
+ m = n;
+ }
+ else {
+ STATSD_FIRST_PTR_MUTEX_LOCK(index);
+ index->metrics++;
+ m->next = index->first;
+ index->first = m;
+ STATSD_FIRST_PTR_MUTEX_UNLOCK(index);
+ }
+ }
+
+ index->events++;
+ return m;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing numbers
+
+static inline LONG_DOUBLE statsd_parse_float(const char *v, LONG_DOUBLE def) {
+ LONG_DOUBLE value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ld(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+static inline LONG_DOUBLE statsd_parse_sampling_rate(const char *v) {
+ LONG_DOUBLE sampling_rate = statsd_parse_float(v, 1.0);
+ if(unlikely(isless(sampling_rate, 0.001))) sampling_rate = 0.001;
+ if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
+ return sampling_rate;
+}
+
+static inline long long statsd_parse_int(const char *v, long long def) {
+ long long value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ll(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd processors per metric type
+
+static inline void statsd_reset_metric(STATSD_METRIC *m) {
+ m->reset = 0;
+ m->count = 0;
+}
+
+static inline int value_is_zinit(const char *value) {
+ return (value && *value == 'z' && *++value == 'i' && *++value == 'n' && *++value == 'i' && *++value == 't' && *++value == '\0');
+}
+
+#define is_metric_checked(m) ((m)->options & STATSD_METRIC_OPTION_CHECKED)
+#define is_metric_useful_for_collection(m) (!is_metric_checked(m) || ((m)->options & STATSD_METRIC_OPTION_USEFUL))
+
+static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ // no need to reset anything specific for gauges
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ if (unlikely(*value == '+' || *value == '-'))
+ m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_sampling_rate(sampling);
+ else
+ m->gauge.value = statsd_parse_float(value, 1.0);
+
+ m->events++;
+ m->count++;
+ }
+}
+
+static inline void statsd_process_counter_or_meter(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ // we accept empty values for counters
+
+ if(unlikely(m->reset)) statsd_reset_metric(m);
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ m->counter.value += llrintl((LONG_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_sampling_rate(sampling));
+
+ m->events++;
+ m->count++;
+ }
+}
+
+#define statsd_process_counter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
+#define statsd_process_meter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
+
+static inline void statsd_process_histogram_or_timer(STATSD_METRIC *m, const char *value, const char *sampling, const char *type) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type %s, with empty value is ignored.", type);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ m->histogram.ext->used = 0;
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ LONG_DOUBLE v = statsd_parse_float(value, 1.0);
+ LONG_DOUBLE sampling_rate = statsd_parse_sampling_rate(sampling);
+ if(unlikely(isless(sampling_rate, 0.01))) sampling_rate = 0.01;
+ if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
+
+ long long samples = llrintl(1.0 / sampling_rate);
+ while(samples-- > 0) {
+
+ if(unlikely(m->histogram.ext->used == m->histogram.ext->size)) {
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+ m->histogram.ext->size += statsd.histogram_increase_step;
+ m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(LONG_DOUBLE) * m->histogram.ext->size);
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+ }
+
+ m->histogram.ext->values[m->histogram.ext->used++] = v;
+ }
+
+ m->events++;
+ m->count++;
+ }
+}
+
+#define statsd_process_timer(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "timer")
+#define statsd_process_histogram(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "histogram")
+
+static inline void statsd_process_set(STATSD_METRIC *m, const char *value) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type set, with empty value is ignored.");
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ if(likely(m->set.dict)) {
+ dictionary_destroy(m->set.dict);
+ m->set.dict = NULL;
+ }
+ statsd_reset_metric(m);
+ }
+
+ if (unlikely(!m->set.dict)) {
+ m->set.dict = dictionary_create(STATSD_DICTIONARY_OPTIONS | DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);
+ m->set.unique = 0;
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ void *t = dictionary_get(m->set.dict, value);
+ if (unlikely(!t)) {
+ dictionary_set(m->set.dict, value, NULL, 1);
+ m->set.unique++;
+ }
+
+ m->events++;
+ m->count++;
+ }
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing
+
+static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling, const char *tags) {
+ (void)tags;
+
+ debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', sampling '%s', tags '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)", tags?tags:"(null)");
+
+ if(unlikely(!name || !*name)) return;
+ if(unlikely(!type || !*type)) type = "m";
+
+ char t0 = type[0], t1 = type[1];
+
+ if(unlikely(t0 == 'g' && t1 == '\0')) {
+ statsd_process_gauge(
+ statsd_find_or_add_metric(&statsd.gauges, name, STATSD_METRIC_TYPE_GAUGE),
+ value, sampling);
+ }
+ else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) {
+ // etsy/statsd uses 'c'
+ // brubeck uses 'C'
+ statsd_process_counter(
+ statsd_find_or_add_metric(&statsd.counters, name, STATSD_METRIC_TYPE_COUNTER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'm' && t1 == '\0')) {
+ statsd_process_meter(
+ statsd_find_or_add_metric(&statsd.meters, name, STATSD_METRIC_TYPE_METER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'h' && t1 == '\0')) {
+ statsd_process_histogram(
+ statsd_find_or_add_metric(&statsd.histograms, name, STATSD_METRIC_TYPE_HISTOGRAM),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 's' && t1 == '\0')) {
+ statsd_process_set(
+ statsd_find_or_add_metric(&statsd.sets, name, STATSD_METRIC_TYPE_SET),
+ value);
+ }
+ else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) {
+ statsd_process_timer(
+ statsd_find_or_add_metric(&statsd.timers, name, STATSD_METRIC_TYPE_TIMER),
+ value, sampling);
+ }
+ else {
+ statsd.unknown_types++;
+ error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type);
+ }
+}
+
+static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2) {
+ char c;
+
+ for(c = *s; c && c != d1 && c != d2 && c != '\r' && c != '\n'; c = *++s) ;
+
+ return s;
+}
+
+const char *statsd_parse_skip_spaces(const char *s) {
+ char c;
+
+ for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ;
+
+ return s;
+}
+
+static inline const char *statsd_parse_field_trim(const char *start, char *end) {
+ if(unlikely(!start)) {
+ start = end;
+ return start;
+ }
+
+ while(start <= end && (*start == ' ' || *start == '\t'))
+ start++;
+
+ *end = '\0';
+ end--;
+ while(end >= start && (*end == ' ' || *end == '\t'))
+ *end-- = '\0';
+
+ return start;
+}
+
+static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) {
+ buffer[size] = '\0';
+ debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer);
+
+ const char *s = buffer;
+ while(*s) {
+ const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL, *tags = NULL;
+ char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL, *tags_end = NULL;
+
+ s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|');
+ if(name == name_end) {
+ if (*s) {
+ s++;
+ s = statsd_parse_skip_spaces(s);
+ }
+ continue;
+ }
+
+ if(likely(*s == ':'))
+ s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '|');
+
+ if(likely(*s == '|'))
+ s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@');
+
+ if(likely(*s == '|' || *s == '@')) {
+ s = sampling_end = (char *) statsd_parse_skip_up_to(sampling = ++s, '|', '#');
+ if(*sampling == '@') sampling++;
+ }
+
+ if(likely(*s == '|' || *s == '#')) {
+ s = tags_end = (char *) statsd_parse_skip_up_to(tags = ++s, '|', '|');
+ if(*tags == '#') tags++;
+ }
+
+ // skip everything until the end of the line
+ while(*s && *s != '\n') s++;
+
+ if(unlikely(require_newlines && *s != '\n' && s > buffer)) {
+ // move the remaining data to the beginning
+ size -= (name - buffer);
+ memmove(buffer, name, size);
+ return size;
+ }
+ else
+ s = statsd_parse_skip_spaces(s);
+
+ statsd_process_metric(
+ statsd_parse_field_trim(name, name_end)
+ , statsd_parse_field_trim(value, value_end)
+ , statsd_parse_field_trim(type, type_end)
+ , statsd_parse_field_trim(sampling, sampling_end)
+ , statsd_parse_field_trim(tags, tags_end)
+ );
+ }
+
+ return 0;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd pollfd interface
+
+#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads
+#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU
+
+typedef enum {
+ STATSD_SOCKET_DATA_TYPE_TCP,
+ STATSD_SOCKET_DATA_TYPE_UDP
+} STATSD_SOCKET_DATA_TYPE;
+
+struct statsd_tcp {
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ size_t len;
+ char buffer[];
+};
+
+#ifdef HAVE_RECVMMSG
+struct statsd_udp {
+ int *running;
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ struct iovec *iovecs;
+ struct mmsghdr *msgs;
+};
+#else
+struct statsd_udp {
+ int *running;
+ STATSD_SOCKET_DATA_TYPE type;
+ char buffer[STATSD_UDP_BUFFER_SIZE];
+};
+#endif
+
+// new TCP client connected
+static void *statsd_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)pi;
+ (void)data;
+
+ *events = POLLIN;
+
+ struct statsd_tcp *t = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1);
+ t->type = STATSD_SOCKET_DATA_TYPE_TCP;
+ t->size = STATSD_TCP_BUFFER_SIZE - 1;
+ statsd.tcp_socket_connects++;
+ statsd.tcp_socket_connected++;
+
+ return t;
+}
+
+// TCP client disconnected
+static void statsd_del_callback(POLLINFO *pi) {
+ struct statsd_tcp *t = pi->data;
+
+ if(likely(t)) {
+ if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) {
+ if(t->len != 0) {
+ statsd.socket_errors++;
+ error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer);
+ statsd_process(t->buffer, t->len, 0);
+ }
+ statsd.tcp_socket_disconnects++;
+ statsd.tcp_socket_connected--;
+ }
+ else
+ error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP);
+
+ freez(t);
+ }
+}
+
+// Receive data
+static int statsd_rcv_callback(POLLINFO *pi, short int *events) {
+ *events = POLLIN;
+
+ int fd = pi->fd;
+
+ switch(pi->socktype) {
+ case SOCK_STREAM: {
+ struct statsd_tcp *d = (struct statsd_tcp *)pi->data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected TCP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) {
+ error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+ int ret = 0;
+ ssize_t rc;
+ do {
+ rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on TCP socket %d failed.", fd);
+ statsd.socket_errors++;
+ ret = -1;
+ }
+ }
+ else if (!rc) {
+ // connection closed
+ debug(D_STATSD, "STATSD: client disconnected.");
+ ret = -1;
+ }
+ else {
+ // data received
+ d->len += rc;
+ statsd.tcp_socket_reads++;
+ statsd.tcp_bytes_read += rc;
+ }
+
+ if(likely(d->len > 0)) {
+ statsd.tcp_packets_received++;
+ d->len = statsd_process(d->buffer, d->len, 1);
+ }
+
+ if(unlikely(ret == -1))
+ return -1;
+
+ } while (rc != -1);
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ struct statsd_udp *d = (struct statsd_udp *)pi->data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected UDP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) {
+ error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+#ifdef HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recvmmsg() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received += rc;
+
+ size_t i;
+ for (i = 0; i < (size_t)rc; ++i) {
+ size_t len = (size_t)d->msgs[i].msg_len;
+ statsd.udp_bytes_read += len;
+ statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0);
+ }
+ }
+ } while (rc != -1);
+
+#else // !HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received++;
+ statsd.udp_bytes_read += rc;
+ statsd_process(d->buffer, (size_t) rc, 0);
+ }
+ } while (rc != -1);
+#endif
+
+ break;
+ }
+
+ default: {
+ error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int statsd_snd_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("STATSD: snd_callback() called, but we never requested to send data to statsd clients.");
+ return -1;
+}
+
+static void statsd_timer_callback(void *timer_data) {
+ struct collection_thread_status *status = timer_data;
+ getrusage(RUSAGE_THREAD, &status->rusage);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd child thread to collect metrics from network
+
+void statsd_collector_thread_cleanup(void *data) {
+ struct statsd_udp *d = data;
+ *d->running = 0;
+
+ info("cleaning up...");
+
+#ifdef HAVE_RECVMMSG
+ size_t i;
+ for (i = 0; i < d->size; i++)
+ freez(d->iovecs[i].iov_base);
+
+ freez(d->iovecs);
+ freez(d->msgs);
+#endif
+
+ freez(d);
+}
+
+void *statsd_collector_thread(void *ptr) {
+ struct collection_thread_status *status = ptr;
+ status->status = 1;
+
+ info("STATSD collector thread started with taskid %d", gettid());
+
+ struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1);
+ d->running = &status->status;
+
+ netdata_thread_cleanup_push(statsd_collector_thread_cleanup, d);
+
+#ifdef HAVE_RECVMMSG
+ d->type = STATSD_SOCKET_DATA_TYPE_UDP;
+ d->size = statsd.recvmmsg_size;
+ d->iovecs = callocz(sizeof(struct iovec), d->size);
+ d->msgs = callocz(sizeof(struct mmsghdr), d->size);
+
+ size_t i;
+ for (i = 0; i < d->size; i++) {
+ d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE);
+ d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1;
+ d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i];
+ d->msgs[i].msg_hdr.msg_iovlen = 1;
+ }
+#endif
+
+ poll_events(&statsd.sockets
+ , statsd_add_callback
+ , statsd_del_callback
+ , statsd_rcv_callback
+ , statsd_snd_callback
+ , statsd_timer_callback
+ , NULL // No access control pattern
+ , 0 // No dns lookups for access control pattern
+ , (void *)d
+ , 0 // tcp request timeout, 0 = disabled
+ , statsd.tcp_idle_timeout // tcp idle timeout, 0 = disabled
+ , statsd.update_every * 1000
+ , ptr // timer_data
+ , status->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd applications configuration files parsing
+
+#define STATSD_CONF_LINE_MAX 8192
+
+static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t line, const char *filename) {
+ if(!type || !*type) type = "last";
+
+ if(!strcmp(type, "events")) return STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS;
+ else if(!strcmp(type, "last")) return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+ else if(!strcmp(type, "min")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MIN;
+ else if(!strcmp(type, "max")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MAX;
+ else if(!strcmp(type, "sum")) return STATSD_APP_CHART_DIM_VALUE_TYPE_SUM;
+ else if(!strcmp(type, "average")) return STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE;
+ else if(!strcmp(type, "median")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN;
+ else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV;
+ else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE;
+
+ error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename);
+ return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+}
+
+static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) {
+ switch(type) {
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: return "events";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: return "last";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: return "min";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: return "max";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: return "sum";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: return "average";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: return "median";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: return "stddev";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: return "percentile";
+ }
+
+ return "unknown";
+}
+
+static STATSD_APP_CHART_DIM *add_dimension_to_app_chart(
+ STATSD_APP *app __maybe_unused
+ , STATSD_APP_CHART *chart
+ , const char *metric_name
+ , const char *dim_name
+ , collected_number multiplier
+ , collected_number divisor
+ , RRDDIM_FLAGS flags
+ , STATSD_APP_CHART_DIM_VALUE_TYPE value_type
+) {
+ STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1);
+
+ dim->metric = strdupz(metric_name);
+ dim->metric_hash = simple_hash(dim->metric);
+
+ dim->name = strdupz((dim_name)?dim_name:"");
+ dim->multiplier = multiplier;
+ dim->divisor = divisor;
+ dim->value_type = value_type;
+ dim->flags = flags;
+
+ if(!dim->multiplier)
+ dim->multiplier = 1;
+
+ if(!dim->divisor)
+ dim->divisor = 1;
+
+ // append it to the list of dimension
+ STATSD_APP_CHART_DIM *tdim;
+ for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ;
+ if(!tdim) {
+ dim->next = chart->dimensions;
+ chart->dimensions = dim;
+ }
+ else {
+ dim->next = tdim->next;
+ tdim->next = dim;
+ }
+ chart->dimensions_count++;
+
+ debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier " COLLECTED_NUMBER_FORMAT ", divisor " COLLECTED_NUMBER_FORMAT,
+ dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor);
+
+ return dim;
+}
+
+static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHART *chart, DICTIONARY *dict) {
+ debug(D_STATSD, "STATSD configuration reading file '%s'", filename);
+
+ char *buffer = mallocz(STATSD_CONF_LINE_MAX + 1);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ error("STATSD: cannot open file '%s'.", filename);
+ freez(buffer);
+ return -1;
+ }
+
+ size_t line = 0;
+ char *s;
+ while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) {
+ buffer[STATSD_CONF_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if (!s || *s == '#') {
+ debug(D_STATSD, "STATSD: ignoring line %zu of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ debug(D_STATSD, "STATSD: processing line %zu of file '%s': %s", line, filename, buffer);
+
+ if(*s == 'i' && strncmp(s, "include", 7) == 0) {
+ s = trim(&s[7]);
+ if(s && *s) {
+ char *tmp;
+ if(*s == '/')
+ tmp = strdupz(s);
+ else {
+ // the file to be included is relative to current file
+ // find the directory name from the file we already read
+ char *filename2 = strdupz(filename); // copy filename, since dirname() will change it
+ char *dir = dirname(filename2); // find the directory part of the filename
+ tmp = strdupz_path_subpath(dir, s); // compose the new filename to read;
+ freez(filename2); // free the filename we copied
+ }
+ statsd_readfile(tmp, app, chart, dict);
+ freez(tmp);
+ }
+ else
+ error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename);
+
+ continue;
+ }
+
+ int len = (int) strlen(s);
+ if (*s == '[' && s[len - 1] == ']') {
+ // new section
+ s[len - 1] = '\0';
+ s++;
+
+ if (!strcmp(s, "app")) {
+ // a new app
+ app = callocz(sizeof(STATSD_APP), 1);
+ app->name = strdupz("unnamed");
+ app->rrd_memory_mode = localhost->rrd_memory_mode;
+ app->rrd_history_entries = localhost->rrd_history_entries;
+
+ app->next = statsd.apps;
+ statsd.apps = app;
+ chart = NULL;
+ dict = NULL;
+
+ {
+ char lineandfile[FILENAME_MAX + 1];
+ snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename);
+ app->source = strdupz(lineandfile);
+ }
+ }
+ else if(app) {
+ if(!strcmp(s, "dictionary")) {
+ if(!app->dict)
+ app->dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+
+ dict = app->dict;
+ }
+ else {
+ dict = NULL;
+
+ // a new chart
+ chart = callocz(sizeof(STATSD_APP_CHART), 1);
+ netdata_fix_chart_id(s);
+ chart->id = strdupz(s);
+ chart->name = strdupz(s);
+ chart->title = strdupz("Statsd chart");
+ chart->context = strdupz(s);
+ chart->family = strdupz("overview");
+ chart->units = strdupz("value");
+ chart->priority = NETDATA_CHART_PRIO_STATSD_PRIVATE;
+ chart->chart_type = RRDSET_TYPE_LINE;
+
+ chart->next = app->charts;
+ app->charts = chart;
+
+ {
+ char lineandfile[FILENAME_MAX + 1];
+ snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename);
+ chart->source = strdupz(lineandfile);
+ }
+ }
+ }
+ else
+ error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename);
+
+ continue;
+ }
+
+ if(!app) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(!value) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(!name || *name == '#') {
+ error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename);
+ continue;
+ }
+ if(!value) {
+ debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s', value is empty.", line, filename);
+ continue;
+ }
+
+ if(unlikely(dict)) {
+ // parse [dictionary] members
+
+ dictionary_set(dict, name, value, strlen(value) + 1);
+ }
+ else if(!chart) {
+ // parse [app] members
+
+ if(!strcmp(name, "name")) {
+ freez((void *)app->name);
+ netdata_fix_chart_name(value);
+ app->name = strdupz(value);
+ }
+ else if (!strcmp(name, "metrics")) {
+ simple_pattern_free(app->metrics);
+ app->metrics = simple_pattern_create(value, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if (!strcmp(name, "private charts")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else if (!strcmp(name, "gaps when not collected")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ }
+ else if (!strcmp(name, "memory mode")) {
+ app->rrd_memory_mode = rrd_memory_mode_id(value);
+ }
+ else if (!strcmp(name, "history")) {
+ app->rrd_history_entries = atol(value);
+ if (app->rrd_history_entries < 5)
+ app->rrd_history_entries = 5;
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename);
+ continue;
+ }
+ }
+ else {
+ // parse [chart] members
+
+ if(!strcmp(name, "name")) {
+ freez((void *)chart->name);
+ netdata_fix_chart_id(value);
+ chart->name = strdupz(value);
+ }
+ else if(!strcmp(name, "title")) {
+ freez((void *)chart->title);
+ chart->title = strdupz(value);
+ }
+ else if (!strcmp(name, "family")) {
+ freez((void *)chart->family);
+ chart->family = strdupz(value);
+ }
+ else if (!strcmp(name, "context")) {
+ freez((void *)chart->context);
+ netdata_fix_chart_id(value);
+ chart->context = strdupz(value);
+ }
+ else if (!strcmp(name, "units")) {
+ freez((void *)chart->units);
+ chart->units = strdupz(value);
+ }
+ else if (!strcmp(name, "priority")) {
+ chart->priority = atol(value);
+ }
+ else if (!strcmp(name, "type")) {
+ chart->chart_type = rrdset_type_id(value);
+ }
+ else if (!strcmp(name, "dimension")) {
+ // metric [name [type [multiplier [divisor]]]]
+ char *words[10];
+ pluginsd_split_words(value, words, 10, NULL, NULL, 0);
+
+ int pattern = 0;
+ size_t i = 0;
+ char *metric_name = words[i++];
+
+ if(strcmp(metric_name, "pattern") == 0) {
+ metric_name = words[i++];
+ pattern = 1;
+ }
+
+ char *dim_name = words[i++];
+ char *type = words[i++];
+ char *multipler = words[i++];
+ char *divisor = words[i++];
+ char *options = words[i++];
+
+ RRDDIM_FLAGS flags = RRDDIM_FLAG_NONE;
+ if(options && *options) {
+ if(strstr(options, "hidden") != NULL) flags |= RRDDIM_FLAG_HIDDEN;
+ if(strstr(options, "noreset") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS;
+ if(strstr(options, "nooverflow") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS;
+ }
+
+ if(!pattern) {
+ if(app->dict) {
+ if(dim_name && *dim_name) {
+ char *n = dictionary_get(app->dict, dim_name);
+ if(n) dim_name = n;
+ }
+ else {
+ dim_name = dictionary_get(app->dict, metric_name);
+ }
+ }
+
+ if(!dim_name || !*dim_name)
+ dim_name = metric_name;
+ }
+
+ STATSD_APP_CHART_DIM *dim = add_dimension_to_app_chart(
+ app
+ , chart
+ , metric_name
+ , dim_name
+ , (multipler && *multipler)?str2l(multipler):1
+ , (divisor && *divisor)?str2l(divisor):1
+ , flags
+ , string2valuetype(type, line, filename)
+ );
+
+ if(pattern)
+ dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id);
+ continue;
+ }
+ }
+ }
+
+ freez(buffer);
+ fclose(fp);
+ return 0;
+}
+
+static int statsd_file_callback(const char *filename, void *data) {
+ (void)data;
+ return statsd_readfile(filename, NULL, NULL, NULL);
+}
+
+static inline void statsd_readdir(const char *user_path, const char *stock_path, const char *subpath) {
+ recursive_config_double_dir_load(user_path, stock_path, subpath, statsd_file_callback, NULL, 0);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// send metrics to netdata - in private charts - called from the main thread
+
+// extract chart type and chart id from metric name
+static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, const char *defid, size_t len) {
+ char *s;
+
+ snprintfz(type, len, "%s_%s_%s", STATSD_CHART_PREFIX, defid, m->name);
+ for(s = type; *s ;s++)
+ if(unlikely(*s == '.')) break;
+
+ if(*s == '.') {
+ *s++ = '\0';
+ strncpyz(id, s, len);
+ }
+ else {
+ strncpyz(id, defid, len);
+ }
+
+ netdata_fix_chart_id(type);
+ netdata_fix_chart_id(id);
+}
+
+static inline RRDSET *statsd_private_rrdset_create(
+ STATSD_METRIC *m
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+) {
+ RRD_MEMORY_MODE memory_mode = statsd.private_charts_memory_mode;
+ long history = statsd.private_charts_rrd_history_entries;
+
+ if(unlikely(statsd.private_charts >= statsd.max_private_charts)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ memory_mode = RRD_MEMORY_MODE_NONE;
+ history = 5;
+ }
+
+ statsd.private_charts++;
+ RRDSET *st = rrdset_create_custom(
+ localhost // host
+ , type // type
+ , id // id
+ , name // name
+ , family // family
+ , context // context
+ , title // title
+ , units // units
+ , PLUGIN_STATSD_NAME // plugin
+ , "private_chart" // module
+ , priority // priority
+ , update_every // update every
+ , chart_type // chart type
+ , memory_mode // memory mode
+ , history // history
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+
+ if(statsd.private_charts_hidden)
+ rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
+
+ // rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+ return st;
+}
+
+static inline void statsd_private_chart_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "gauge", RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_gauge.%s", m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for gauge %s", m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "gauges" // family (submenu)
+ , context // context
+ , title // title
+ , "value" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , context // context
+ , title // title
+ , "events/s" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->rd_value = rrddim_add(m->st, dim, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for set metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "set", RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_set.%s", m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for set %s", m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "sets" // family (submenu)
+ , context // context
+ , title // title
+ , "entries" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "set", "set size", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , context // context
+ , title // title
+ , units // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum);
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd flush metrics
+
+static inline void statsd_flush_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing gauge metric '%s'", m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = (collected_number) (m->gauge.value * statsd.decimal_detail);
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_gauge(m);
+}
+
+static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = m->counter.value;
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_counter_or_meter(m, dim, family);
+}
+
+static inline void statsd_flush_counter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "counter", "counters");
+}
+
+static inline void statsd_flush_meter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "meter", "meters");
+}
+
+static inline void statsd_flush_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing set metric '%s'", m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = (collected_number)m->set.unique;
+
+ m->reset = 1;
+ updated = 1;
+ }
+ else {
+ m->last = 0;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_set(m);
+}
+
+static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count && m->histogram.ext->used > 0)) {
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+
+ size_t len = m->histogram.ext->used;
+ LONG_DOUBLE *series = m->histogram.ext->values;
+ sort_series(series, len);
+
+ m->histogram.ext->last_min = (collected_number)roundl(series[0] * statsd.decimal_detail);
+ m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * statsd.decimal_detail);
+ m->last = (collected_number)roundl(average(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * statsd.decimal_detail);
+
+ size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0);
+ if(pct_len < 1)
+ m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail);
+ else
+ m->histogram.ext->last_percentile = (collected_number)roundl(series[pct_len - 1] * statsd.decimal_detail);
+
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+
+ debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT,
+ dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum);
+
+ m->histogram.ext->zeroed = 0;
+ m->reset = 1;
+ updated = 1;
+ }
+ else if(unlikely(!m->histogram.ext->zeroed)) {
+ // reset the metrics
+ // if we collected anything, they will be updated below
+ // this ensures that we report zeros if nothing is collected
+
+ m->histogram.ext->last_min = 0;
+ m->histogram.ext->last_max = 0;
+ m->last = 0;
+ m->histogram.ext->last_median = 0;
+ m->histogram.ext->last_stddev = 0;
+ m->histogram.ext->last_sum = 0;
+ m->histogram.ext->last_percentile = 0;
+
+ m->histogram.ext->zeroed = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_timer_or_histogram(m, dim, family, units);
+}
+
+static inline void statsd_flush_timer(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds");
+}
+
+static inline void statsd_flush_histogram(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value");
+}
+
+static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) {
+ switch(m->type) {
+ default:
+ case STATSD_METRIC_TYPE_GAUGE:
+ case STATSD_METRIC_TYPE_SET:
+ case STATSD_METRIC_TYPE_TIMER:
+ case STATSD_METRIC_TYPE_HISTOGRAM:
+ return RRD_ALGORITHM_ABSOLUTE;
+
+ case STATSD_METRIC_TYPE_METER:
+ case STATSD_METRIC_TYPE_COUNTER:
+ return RRD_ALGORITHM_INCREMENTAL;
+ }
+}
+
+static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC *m, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
+ if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) {
+ dim->value_ptr = &m->events;
+ dim->algorithm = RRD_ALGORITHM_INCREMENTAL;
+ }
+ else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
+ dim->algorithm = RRD_ALGORITHM_ABSOLUTE;
+ dim->divisor *= statsd.decimal_detail;
+
+ switch(dim->value_type) {
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS:
+ // will never match - added to avoid warning
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST:
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE:
+ dim->value_ptr = &m->last;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM:
+ dim->value_ptr = &m->histogram.ext->last_sum;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN:
+ dim->value_ptr = &m->histogram.ext->last_min;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX:
+ dim->value_ptr = &m->histogram.ext->last_max;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN:
+ dim->value_ptr = &m->histogram.ext->last_median;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE:
+ dim->value_ptr = &m->histogram.ext->last_percentile;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV:
+ dim->value_ptr = &m->histogram.ext->last_stddev;
+ break;
+ }
+ }
+ else {
+ if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST)
+ error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name);
+
+ dim->value_ptr = &m->last;
+ dim->algorithm = statsd_algorithm_for_metric(m);
+
+ if(m->type == STATSD_METRIC_TYPE_GAUGE)
+ dim->divisor *= statsd.decimal_detail;
+ }
+
+ if(unlikely(chart->st && dim->rd)) {
+ rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm);
+ rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier);
+ rrddim_set_divisor(chart->st, dim->rd, dim->divisor);
+ }
+
+ chart->dimensions_linked_count++;
+ m->options |= STATSD_METRIC_OPTION_USED_IN_APPS;
+ debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm));
+}
+
+static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) {
+ (void)index;
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ if(unlikely(simple_pattern_matches(app->metrics, m->name))) {
+ debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name);
+
+ // the metric should get the options from the app
+
+ if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED)
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+
+ if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)
+ m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+
+ // check if there is a chart in this app, willing to get this metric
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart; chart = chart->next) {
+
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ; dim = dim->next) {
+ if(unlikely(dim->metric_pattern)) {
+ size_t dim_name_len = strlen(dim->name);
+ size_t wildcarded_len = dim_name_len + strlen(m->name) + 1;
+ char wildcarded[wildcarded_len];
+
+ strcpy(wildcarded, dim->name);
+ char *ws = &wildcarded[dim_name_len];
+
+ if(simple_pattern_matches_extract(dim->metric_pattern, m->name, ws, wildcarded_len - dim_name_len)) {
+
+ char *final_name = NULL;
+
+ if(app->dict) {
+ if(likely(*wildcarded)) {
+ // use the name of the wildcarded string
+ final_name = dictionary_get(app->dict, wildcarded);
+ }
+
+ if(unlikely(!final_name)) {
+ // use the name of the metric
+ final_name = dictionary_get(app->dict, m->name);
+ }
+ }
+
+ if(unlikely(!final_name))
+ final_name = wildcarded;
+
+ add_dimension_to_app_chart(
+ app
+ , chart
+ , m->name
+ , final_name
+ , dim->multiplier
+ , dim->divisor
+ , dim->flags
+ , dim->value_type
+ );
+
+ // the new dimension is appended to the list
+ // so, it will be matched and linked later too
+ }
+ }
+ else if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) {
+ // we have a match - this metric should be linked to this dimension
+ link_metric_to_app_dimension(app, m, chart, dim);
+ }
+ }
+
+ }
+ }
+ }
+}
+
+static inline RRDDIM *statsd_add_dim_to_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
+ (void)app;
+
+ // allow the same statsd metric to be added multiple times to the same chart
+
+ STATSD_APP_CHART_DIM *tdim;
+ size_t count_same_metric = 0, count_same_metric_value_type = 0;
+ size_t pos_same_metric_value_type = 0;
+
+ for (tdim = chart->dimensions; tdim && tdim->next; tdim = tdim->next) {
+ if (dim->metric_hash == tdim->metric_hash && !strcmp(dim->metric, tdim->metric)) {
+ count_same_metric++;
+
+ if(dim->value_type == tdim->value_type) {
+ count_same_metric_value_type++;
+ if (tdim == dim)
+ pos_same_metric_value_type = count_same_metric_value_type;
+ }
+ }
+ }
+
+ if(count_same_metric > 1) {
+ // the same metric is found multiple times
+
+ size_t len = strlen(dim->metric) + 100;
+ char metric[ len + 1 ];
+
+ if(count_same_metric_value_type > 1) {
+ // the same metric, with the same value type, is added multiple times
+ snprintfz(metric, len, "%s_%s%zu", dim->metric, valuetype2string(dim->value_type), pos_same_metric_value_type);
+ }
+ else {
+ // the same metric, with different value type is added
+ snprintfz(metric, len, "%s_%s", dim->metric, valuetype2string(dim->value_type));
+ }
+
+ dim->rd = rrddim_add(chart->st, metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
+ if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
+ return dim->rd;
+ }
+
+ dim->rd = rrddim_add(chart->st, dim->metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
+ if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
+ return dim->rd;
+}
+
+static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) {
+ debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name);
+
+ if(!chart->st) {
+ chart->st = rrdset_create_custom(
+ localhost // host
+ , app->name // type
+ , chart->id // id
+ , chart->name // name
+ , chart->family // family
+ , chart->context // context
+ , chart->title // title
+ , chart->units // units
+ , PLUGIN_STATSD_NAME // plugin
+ , chart->source // module
+ , chart->priority // priority
+ , statsd.update_every // update every
+ , chart->chart_type // chart type
+ , app->rrd_memory_mode // memory mode
+ , app->rrd_history_entries // history
+ );
+
+ rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST);
+ // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG);
+ }
+ else rrdset_next(chart->st);
+
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ;dim = dim->next) {
+ if(likely(!dim->metric_pattern)) {
+ if (unlikely(!dim->rd))
+ statsd_add_dim_to_app_chart(app, chart, dim);
+
+ if (unlikely(dim->value_ptr)) {
+ debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, dim->rd->id, chart->id, chart->st->id, app->name, *dim->value_ptr);
+ rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr);
+ }
+ }
+ }
+
+ rrdset_done(chart->st);
+ debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name);
+}
+
+static inline void statsd_update_all_app_charts(void) {
+ // debug(D_STATSD, "updating app charts");
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ // debug(D_STATSD, "updating charts for app '%s'", app->name);
+
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart ;chart = chart->next) {
+ if(unlikely(chart->dimensions_linked_count)) {
+ statsd_update_app_chart(app, chart);
+ }
+ }
+ }
+
+ // debug(D_STATSD, "completed update of app charts");
+}
+
+const char *statsd_metric_type_string(STATSD_METRIC_TYPE type) {
+ switch(type) {
+ case STATSD_METRIC_TYPE_COUNTER: return "counter";
+ case STATSD_METRIC_TYPE_GAUGE: return "gauge";
+ case STATSD_METRIC_TYPE_HISTOGRAM: return "histogram";
+ case STATSD_METRIC_TYPE_METER: return "meter";
+ case STATSD_METRIC_TYPE_SET: return "set";
+ case STATSD_METRIC_TYPE_TIMER: return "timer";
+ default: return "unknown";
+ }
+}
+
+static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) {
+ STATSD_METRIC *m;
+
+ // find the useful metrics (incremental = each time we are called, we check the new metrics only)
+ for(m = index->first; m ; m = m->next) {
+ // since we add new metrics at the beginning
+ // check for useful charts, until the point we last checked
+ if(unlikely(is_metric_checked(m))) break;
+
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) {
+ log_access("NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name);
+ check_if_metric_is_for_app(index, m);
+ m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS;
+ }
+
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) {
+ if(unlikely(statsd.private_charts >= statsd.max_private_charts_hard)) {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else {
+ if (simple_pattern_matches(statsd.charts_for, m->name)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name);
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ } else {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ }
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+ }
+
+ // mark it as checked
+ m->options |= STATSD_METRIC_OPTION_CHECKED;
+
+ // check if it is used in charts
+ if((m->options & (STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED|STATSD_METRIC_OPTION_USED_IN_APPS)) && !(m->options & STATSD_METRIC_OPTION_USEFUL)) {
+ m->options |= STATSD_METRIC_OPTION_USEFUL;
+ index->useful++;
+ m->next_useful = index->first_useful;
+ index->first_useful = m;
+ }
+ }
+
+ // flush all the useful metrics
+ for(m = index->first_useful; m ; m = m->next_useful) {
+ flush_metric(m);
+ }
+}
+
+
+// --------------------------------------------------------------------------------------
+// statsd main thread
+
+static int statsd_listen_sockets_setup(void) {
+ return listen_sockets_setup(&statsd.sockets);
+}
+
+static void statsd_main_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+
+ if (statsd.collection_threads_status) {
+ int i;
+ for (i = 0; i < statsd.threads; i++) {
+ if(statsd.collection_threads_status[i].status) {
+ info("STATSD: stopping data collection thread %d...", i + 1);
+ netdata_thread_cancel(statsd.collection_threads_status[i].thread);
+ }
+ else {
+ info("STATSD: data collection thread %d found stopped.", i + 1);
+ }
+ }
+ }
+
+ info("STATSD: closing sockets...");
+ listen_sockets_close(&statsd.sockets);
+
+ info("STATSD: cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *statsd_main(void *ptr) {
+ netdata_thread_cleanup_push(statsd_main_cleanup, ptr);
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd configuration
+
+ statsd.enabled = config_get_boolean(CONFIG_SECTION_STATSD, "enabled", statsd.enabled);
+
+ statsd.update_every = default_rrd_update_every;
+ statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
+ if(statsd.update_every < default_rrd_update_every) {
+ error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
+ statsd.update_every = default_rrd_update_every;
+ }
+
+#ifdef HAVE_RECVMMSG
+ statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size);
+#endif
+
+ statsd.charts_for = simple_pattern_create(config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ statsd.max_private_charts = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts allowed", (long long)statsd.max_private_charts);
+ statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5);
+ statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
+ statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries);
+ statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
+ statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout);
+ statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
+
+ statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
+ if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
+ error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
+ statsd.histogram_percentile = 95.0;
+ }
+ {
+ char buffer[100 + 1];
+ snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile);
+ statsd.histogram_percentile_str = strdupz(buffer);
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 1)) {
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0))
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0))
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0))
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0))
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0))
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0))
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_STATSD, "statsd server max TCP sockets", (long long int)(rlimit_nofile.rlim_cur / 4));
+
+#ifdef STATSD_MULTITHREADED
+ statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors);
+ if(statsd.threads < 1) {
+ error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors);
+ statsd.threads = processors;
+ config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads);
+ }
+#else
+ statsd.threads = 1;
+#endif
+
+ // read custom application definitions
+ statsd_readdir(netdata_configured_user_config_dir, netdata_configured_stock_config_dir, "statsd.d");
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd setup
+
+ if(!statsd.enabled) goto cleanup;
+
+ statsd_listen_sockets_setup();
+ if(!statsd.sockets.opened) {
+ error("STATSD: No statsd sockets to listen to. statsd will be disabled.");
+ goto cleanup;
+ }
+
+ statsd.collection_threads_status = callocz((size_t)statsd.threads, sizeof(struct collection_thread_status));
+
+ int i;
+ for(i = 0; i < statsd.threads ;i++) {
+ statsd.collection_threads_status[i].max_sockets = max_sockets / statsd.threads;
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STATSD_COLLECTOR[%d]", i + 1);
+ netdata_thread_create(&statsd.collection_threads_status[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, statsd_collector_thread, &statsd.collection_threads_status[i]);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd monitoring charts
+
+ RRDSET *st_metrics = rrdset_create_localhost(
+ "netdata"
+ , "statsd_metrics"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Metrics in the netdata statsd database"
+ , "metrics"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132010
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_useful_metrics = rrdset_create_localhost(
+ "netdata"
+ , "statsd_useful_metrics"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Useful metrics in the netdata statsd database"
+ , "metrics"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132010
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_useful_metrics_gauge = rrddim_add(st_useful_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_counter = rrddim_add(st_useful_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_timer = rrddim_add(st_useful_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_meter = rrddim_add(st_useful_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_histogram = rrddim_add(st_useful_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_set = rrddim_add(st_useful_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_events = rrdset_create_localhost(
+ "netdata"
+ , "statsd_events"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Events processed by the netdata statsd server"
+ , "events/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132011
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_reads = rrdset_create_localhost(
+ "netdata"
+ , "statsd_reads"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Read operations made by the netdata statsd server"
+ , "reads/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132012
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_bytes = rrdset_create_localhost(
+ "netdata"
+ , "statsd_bytes"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Bytes read by the netdata statsd server"
+ , "kilobits/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132013
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_packets = rrdset_create_localhost(
+ "netdata"
+ , "statsd_packets"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Network packets processed by the netdata statsd server"
+ , "packets/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132014
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_tcp_connects = rrdset_create_localhost(
+ "netdata"
+ , "tcp_connects"
+ , NULL
+ , "statsd"
+ , NULL
+ , "statsd server TCP connects and disconnects"
+ , "events"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132015
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+ RRDDIM *rd_tcp_connects = rrddim_add(st_tcp_connects, "connects", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_tcp_disconnects = rrddim_add(st_tcp_connects, "disconnects", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_tcp_connected = rrdset_create_localhost(
+ "netdata"
+ , "tcp_connected"
+ , NULL
+ , "statsd"
+ , NULL
+ , "statsd server TCP connected sockets"
+ , "sockets"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132016
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+ RRDDIM *rd_tcp_connected = rrddim_add(st_tcp_connected, "connected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_pcharts = rrdset_create_localhost(
+ "netdata"
+ , "private_charts"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Private metric charts created by the netdata statsd server"
+ , "charts"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132020
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_statsd_charting_cpu"
+ , NULL
+ , "statsd"
+ , "netdata.statsd_cpu"
+ , "NetData statsd charting thread CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132001
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ RRDDIM *rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ struct rusage thread;
+
+ for(i = 0; i < statsd.threads ;i++) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "plugin_statsd_collector%d_cpu", i + 1);
+ snprintfz(title, 100, "NetData statsd collector thread No %d CPU usage", i + 1);
+
+ statsd.collection_threads_status[i].st_cpu = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "statsd"
+ , "netdata.statsd_cpu"
+ , title
+ , "milliseconds/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132002 + i
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ statsd.collection_threads_status[i].rd_user = rrddim_add(statsd.collection_threads_status[i].st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ statsd.collection_threads_status[i].rd_system = rrddim_add(statsd.collection_threads_status[i].st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd thread to turn metrics into charts
+
+ usec_t step = statsd.update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+
+ statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge);
+ statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter);
+ statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter);
+ statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer);
+ statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram);
+ statsd_flush_index_metrics(&statsd.sets, statsd_flush_set);
+
+ statsd_update_all_app_charts();
+
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(netdata_exit))
+ break;
+
+ if(likely(hb_dt)) {
+ rrdset_next(st_metrics);
+ rrdset_next(st_useful_metrics);
+ rrdset_next(st_events);
+ rrdset_next(st_reads);
+ rrdset_next(st_bytes);
+ rrdset_next(st_packets);
+ rrdset_next(st_tcp_connects);
+ rrdset_next(st_tcp_connected);
+ rrdset_next(st_pcharts);
+ rrdset_next(stcpu_thread);
+ for(i = 0; i < statsd.threads ;i++)
+ rrdset_next(statsd.collection_threads_status[i].st_cpu);
+ }
+
+ rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics);
+ rrdset_done(st_metrics);
+
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_gauge, (collected_number)statsd.gauges.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_counter, (collected_number)statsd.counters.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_timer, (collected_number)statsd.timers.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_meter, (collected_number)statsd.meters.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_histogram, (collected_number)statsd.histograms.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_set, (collected_number)statsd.sets.useful);
+ rrdset_done(st_useful_metrics);
+
+ rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events);
+ rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events);
+ rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events);
+ rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events);
+ rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events);
+ rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events);
+ rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types);
+ rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors);
+ rrdset_done(st_events);
+
+ rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads);
+ rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads);
+ rrdset_done(st_reads);
+
+ rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read);
+ rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read);
+ rrdset_done(st_bytes);
+
+ rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received);
+ rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received);
+ rrdset_done(st_packets);
+
+ rrddim_set_by_pointer(st_tcp_connects, rd_tcp_connects, (collected_number)statsd.tcp_socket_connects);
+ rrddim_set_by_pointer(st_tcp_connects, rd_tcp_disconnects, (collected_number)statsd.tcp_socket_disconnects);
+ rrdset_done(st_tcp_connects);
+
+ rrddim_set_by_pointer(st_tcp_connected, rd_tcp_connected, (collected_number)statsd.tcp_socket_connected);
+ rrdset_done(st_tcp_connected);
+
+ rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts);
+ rrdset_done(st_pcharts);
+
+ rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+
+ for(i = 0; i < statsd.threads ;i++) {
+ rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_user, statsd.collection_threads_status[i].rusage.ru_utime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_system, statsd.collection_threads_status[i].rusage.ru_stime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_stime.tv_usec);
+ rrdset_done(statsd.collection_threads_status[i].st_cpu);
+ }
+ }
+
+cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h
new file mode 100644
index 0000000..b741be7
--- /dev/null
+++ b/collectors/statsd.plugin/statsd.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STATSD_H
+#define NETDATA_STATSD_H 1
+
+#include "../../daemon/common.h"
+
+#define STATSD_LISTEN_PORT 8125
+#define STATSD_LISTEN_BACKLOG 4096
+
+#define NETDATA_PLUGIN_HOOK_STATSD \
+ { \
+ .name = "STATSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = statsd_main \
+ },
+
+
+extern void *statsd_main(void *ptr);
+
+#endif //NETDATA_STATSD_H
diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am
new file mode 100644
index 0000000..9bc6cf2
--- /dev/null
+++ b/collectors/tc.plugin/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ tc-qos-helper.sh.in \
+ README.md \
+ $(NULL)
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
new file mode 100644
index 0000000..4800760
--- /dev/null
+++ b/collectors/tc.plugin/README.md
@@ -0,0 +1,205 @@
+<!--
+title: "tc.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/tc.plugin/README.md
+-->
+
+# tc.plugin
+
+Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** !
+
+![qos](https://cloud.githubusercontent.com/assets/2662304/14439411/b7f36254-0033-11e6-93f0-c739bb6a1c3a.gif)
+
+Netdata monitors `tc` QoS classes for all interfaces.
+
+If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect interface and class names.
+
+There is a [shell helper](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
+
+The source of the tc plugin is [here](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates.
+
+## Motivation
+
+One category of metrics missing in Linux monitoring, is bandwidth consumption for each open socket (inbound and outbound traffic). So, you cannot tell how much bandwidth your web server, your database server, your backup, your ssh sessions, etc are using.
+
+To solve this problem, the most *adventurous* Linux monitoring tools install kernel modules to capture all traffic, analyze it and provide reports per application. A lot of work, CPU intensive and with a great degree of risk (due to the kernel modules involved which might affect the stability of the whole system). Not to mention that such solutions are probably better suited for a core linux router in your network.
+
+Others use NFACCT, the netfilter accounting module which is already part of the Linux firewall. However, this would require configuring a firewall on every system you want to measure bandwidth (just FYI, I do install a firewall on every server - and I strongly advise you to do so too - but configuring accounting on all servers seems overkill when you don't really need it for billing purposes).
+
+**There is however a much simpler approach**.
+
+## QoS
+
+One of the features the Linux kernel has, but it is rarely used, is its ability to **apply QoS on traffic**. Even most interesting is that it can apply QoS to **both inbound and outbound traffic**.
+
+QoS is about 2 features:
+
+1. **Classify traffic**
+
+ Classification is the process of organizing traffic in groups, called **classes**. Classification can evaluate every aspect of network packets, like source and destination ports, source and destination IPs, netfilter marks, etc.
+
+ When you classify traffic, you just assign a label to it. Of course classes have some properties themselves (like queuing mechanisms), but let's say it is that simple: **a label**. For example **I call `web server` traffic, the traffic from my server's tcp/80, tcp/443 and to my server's tcp/80, tcp/443, while I call `web surfing` all other tcp/80 and tcp/443 traffic**. You can use any combinations you like. There is no limit.
+
+2. **Apply traffic shaping rules to these classes**
+
+ Traffic shaping is used to control how network interface bandwidth should be shared among the classes. Normally, you need to do this, when there is not enough bandwidth to satisfy all the demand, or when you want to control the supply of bandwidth to certain services. Of course classification is sufficient for monitoring traffic, but traffic shaping is also quite important, as we will explain in the next section.
+
+## Why you want QoS
+
+1. **Monitoring the bandwidth used by services**
+
+ Netdata provides wonderful real-time charts, like this one (wait to see the orange `rsync` part):
+
+ ![qos3](https://cloud.githubusercontent.com/assets/2662304/14474189/713ede84-0104-11e6-8c9c-8dca5c2abd63.gif)
+
+2. **Ensure sensitive administrative tasks will not starve for bandwidth**
+
+ Have you tried to ssh to a server when the network is congested? If you have, you already know it does not work very well. QoS can guarantee that services like ssh, dns, ntp, etc will always have a small supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and DNS will always work.
+
+3. **Ensure administrative tasks will not monopolize all the bandwidth**
+
+ Services like backups, file copies, database dumps, etc can easily monopolize all the available bandwidth. It is common for example a nightly backup, or a huge file transfer to negatively influence the end-user experience. QoS can fix that.
+
+4. **Ensure each end-user connection will get a fair cut of the available bandwidth.**
+
+ Several QoS queuing disciplines in Linux do this automatically, without any configuration from you. The result is that new sockets are favored over older ones, so that users will get a snappier experience, while others are transferring large amounts of traffic.
+
+5. **Protect the servers from DDoS attacks.**
+
+ When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it can handle and probably your applications will crash. Setting a limit on the inbound traffic using QoS, will protect your servers (throttle the requests) and depending on the size of the attack may allow your legitimate users to access the server, while the attack is taking place.
+
+ Using QoS together with a [SYNPROXY](/collectors/proc.plugin/README.md) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the Netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the Netdata server. What saved Netdata was QoS. The Netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](/collectors/proc.plugin/README.md).
+
+On top of all these, QoS is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
+
+```
+- ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed bandwidth. So, no matter what happens, I will be able to ssh to my server and DNS will work.
+
+- ensure other administrative tasks will not monopolize all the available bandwidth. So, my nightly backup will not hurt my users, a developer that is copying files over the net will not get all the available bandwidth, etc.
+
+- ensure each end-user connection will get a fair cut of the available bandwidth.
+```
+
+Once **traffic classification** is applied, we can use **[netdata](https://github.com/netdata/netdata)** to visualize the bandwidth consumption per class in real-time (no configuration is needed for Netdata - it will figure it out).
+
+QoS, is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
+
+This is QoS from a home linux router. Check these features:
+
+1. It is real-time (per second updates)
+2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
+
+![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif)
+
+---
+
+## QoS in Linux?
+
+Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux.
+
+For example, do you know that for matching a simple port range in `tc`, e.g. all the high ports, from 1025 to 65535 inclusive, you have to match these:
+
+```
+1025/0xffff
+1026/0xfffe
+1028/0xfffc
+1032/0xfff8
+1040/0xfff0
+1056/0xffe0
+1088/0xffc0
+1152/0xff80
+1280/0xff00
+1536/0xfe00
+2048/0xf800
+4096/0xf000
+8192/0xe000
+16384/0xc000
+32768/0x8000
+```
+
+To do it the hard way, you can go through the [tc configuration steps](#qos-configuration-with-tc). An easier way is to use **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool that simplifies QoS management in Linux.
+
+## Qos Configuration with FireHOL
+
+The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** to learn how to write your own QoS configuration.
+
+With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the Netdata demo site:
+
+```sh
+ # configure the Netdata ports
+ server_netdata_ports="tcp/19999"
+
+ interface eth0 world bidirectional ethernet balanced rate 50Mbit
+ class arp
+ match arp
+
+ class icmp
+ match icmp
+
+ class dns commit 1Mbit
+ server dns
+ client dns
+
+ class ntp
+ server ntp
+ client ntp
+
+ class ssh commit 2Mbit
+ server ssh
+ client ssh
+
+ class rsync commit 2Mbit max 10Mbit
+ server rsync
+ client rsync
+
+ class web_server commit 40Mbit
+ server http
+ server netdata
+
+ class client
+ client surfing
+
+ class nms commit 1Mbit
+ match input src 10.2.3.5
+```
+
+Nothing more is needed. You just run `fireqos start` to apply this configuration, restart Netdata and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will exit.
+
+**IMPORTANT**: If you copy this configuration to apply it to your system, please adapt the speeds - experiment in non-production environments to learn the tool, before applying it on your servers.
+
+And this is what you are going to get:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png)
+
+## QoS Configuration with tc
+
+First, setup the tc rules in rc.local using commands to assign different QoS markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
+
+Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example:
+
+```
+2:1 Standard
+2:8 LowPriorityData
+2:10 HighThroughputData
+2:16 OAM
+2:18 LowLatencyData
+2:24 BroadcastVideo
+2:26 MultimediaStreaming
+2:32 RealTimeInteractive
+2:34 MultimediaConferencing
+2:40 Signalling
+2:46 Telephony
+2:48 NetworkControl
+```
+
+Add the following configuration option in `/etc/netdata.conf`:
+
+```\[plugin:tc]
+ enable show all classes and qdiscs for all interfaces = yes
+```
+
+Finally, create `/etc/netdata/tc-qos-helper.conf` with this content:
+`tc_show="class"`
+
+Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
new file mode 100644
index 0000000..b92450e
--- /dev/null
+++ b/collectors/tc.plugin/plugin_tc.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_tc.h"
+
+#define RRD_TYPE_TC "tc"
+#define PLUGIN_TC_NAME "tc.plugin"
+
+// ----------------------------------------------------------------------------
+// /sbin/tc processor
+// this requires the script plugins.d/tc-qos-helper.sh
+
+#define TC_LINE_MAX 1024
+
+struct tc_class {
+ avl avl;
+
+ char *id;
+ uint32_t hash;
+
+ char *name;
+
+ char *leafid;
+ uint32_t leaf_hash;
+
+ char *parentid;
+ uint32_t parent_hash;
+
+ char hasparent;
+ char isleaf;
+ char isqdisc;
+ char render;
+
+ unsigned long long bytes;
+ unsigned long long packets;
+ unsigned long long dropped;
+ unsigned long long overlimits;
+ unsigned long long requeues;
+ unsigned long long lended;
+ unsigned long long borrowed;
+ unsigned long long giants;
+ unsigned long long tokens;
+ unsigned long long ctokens;
+
+ RRDDIM *rd_bytes;
+ RRDDIM *rd_packets;
+ RRDDIM *rd_dropped;
+ RRDDIM *rd_tokens;
+ RRDDIM *rd_ctokens;
+
+ char name_updated;
+ char updated; // updated bytes
+ int unupdated; // the number of times, this has been found un-updated
+
+ struct tc_class *next;
+ struct tc_class *prev;
+};
+
+struct tc_device {
+ avl avl;
+
+ char *id;
+ uint32_t hash;
+
+ char *name;
+ char *family;
+
+ char name_updated;
+ char family_updated;
+
+ char enabled;
+ char enabled_bytes;
+ char enabled_packets;
+ char enabled_dropped;
+ char enabled_tokens;
+ char enabled_ctokens;
+ char enabled_all_classes_qdiscs;
+
+ RRDSET *st_bytes;
+ RRDSET *st_packets;
+ RRDSET *st_dropped;
+ RRDSET *st_tokens;
+ RRDSET *st_ctokens;
+
+ avl_tree_type classes_index;
+
+ struct tc_class *classes;
+ struct tc_class *last_class;
+
+ struct tc_device *next;
+ struct tc_device *prev;
+};
+
+
+struct tc_device *tc_device_root = NULL;
+
+// ----------------------------------------------------------------------------
+// tc_device index
+
+static int tc_device_compare(void* a, void* b) {
+ if(((struct tc_device *)a)->hash < ((struct tc_device *)b)->hash) return -1;
+ else if(((struct tc_device *)a)->hash > ((struct tc_device *)b)->hash) return 1;
+ else return strcmp(((struct tc_device *)a)->id, ((struct tc_device *)b)->id);
+}
+
+avl_tree_type tc_device_root_index = {
+ NULL,
+ tc_device_compare
+};
+
+#define tc_device_index_add(st) (struct tc_device *)avl_insert(&tc_device_root_index, (avl *)(st))
+#define tc_device_index_del(st) (struct tc_device *)avl_remove(&tc_device_root_index, (avl *)(st))
+
+static inline struct tc_device *tc_device_index_find(const char *id, uint32_t hash) {
+ struct tc_device tmp;
+ tmp.id = (char *)id;
+ tmp.hash = (hash)?hash:simple_hash(tmp.id);
+
+ return (struct tc_device *)avl_search(&(tc_device_root_index), (avl *)&tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// tc_class index
+
+static int tc_class_compare(void* a, void* b) {
+ if(((struct tc_class *)a)->hash < ((struct tc_class *)b)->hash) return -1;
+ else if(((struct tc_class *)a)->hash > ((struct tc_class *)b)->hash) return 1;
+ else return strcmp(((struct tc_class *)a)->id, ((struct tc_class *)b)->id);
+}
+
+#define tc_class_index_add(st, rd) (struct tc_class *)avl_insert(&((st)->classes_index), (avl *)(rd))
+#define tc_class_index_del(st, rd) (struct tc_class *)avl_remove(&((st)->classes_index), (avl *)(rd))
+
+static inline struct tc_class *tc_class_index_find(struct tc_device *st, const char *id, uint32_t hash) {
+ struct tc_class tmp;
+ tmp.id = (char *)id;
+ tmp.hash = (hash)?hash:simple_hash(tmp.id);
+
+ return (struct tc_class *)avl_search(&(st->classes_index), (avl *) &tmp);
+}
+
+// ----------------------------------------------------------------------------
+
+static inline void tc_class_free(struct tc_device *n, struct tc_class *c) {
+ if(c == n->classes) {
+ if(likely(c->next))
+ n->classes = c->next;
+ else
+ n->classes = c->prev;
+ }
+
+ if(c == n->last_class) {
+ if(unlikely(c->next))
+ n->last_class = c->next;
+ else
+ n->last_class = c->prev;
+ }
+
+ if(c->next) c->next->prev = c->prev;
+ if(c->prev) c->prev->next = c->next;
+
+ debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d", n->id, c->id, c->parentid?c->parentid:"", c->leafid?c->leafid:"", c->unupdated);
+
+ if(unlikely(tc_class_index_del(n, c) != c))
+ error("plugin_tc: INTERNAL ERROR: attempt remove class '%s' from device '%s': removed a different calls", c->id, n->id);
+
+ freez(c->id);
+ freez(c->name);
+ freez(c->leafid);
+ freez(c->parentid);
+ freez(c);
+}
+
+static inline void tc_device_classes_cleanup(struct tc_device *d) {
+ static int cleanup_every = 999;
+
+ if(unlikely(cleanup_every > 0)) {
+ cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120);
+ if(cleanup_every < 0) cleanup_every = -cleanup_every;
+ }
+
+ d->name_updated = 0;
+ d->family_updated = 0;
+
+ struct tc_class *c = d->classes;
+ while(c) {
+ if(unlikely(cleanup_every && c->unupdated >= cleanup_every)) {
+ struct tc_class *nc = c->next;
+ tc_class_free(d, c);
+ c = nc;
+ }
+ else {
+ c->updated = 0;
+ c->name_updated = 0;
+
+ c = c->next;
+ }
+ }
+}
+
+static inline void tc_device_commit(struct tc_device *d) {
+ static int enable_new_interfaces = -1, enable_bytes = -1, enable_packets = -1, enable_dropped = -1, enable_tokens = -1, enable_ctokens = -1, enabled_all_classes_qdiscs = -1;
+
+ if(unlikely(enable_new_interfaces == -1)) {
+ enable_new_interfaces = config_get_boolean_ondemand("plugin:tc", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_YES);
+ enable_bytes = config_get_boolean_ondemand("plugin:tc", "enable traffic charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_packets = config_get_boolean_ondemand("plugin:tc", "enable packets charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_dropped = config_get_boolean_ondemand("plugin:tc", "enable dropped charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_tokens = config_get_boolean_ondemand("plugin:tc", "enable tokens charts for all interfaces", CONFIG_BOOLEAN_NO);
+ enable_ctokens = config_get_boolean_ondemand("plugin:tc", "enable ctokens charts for all interfaces", CONFIG_BOOLEAN_NO);
+ enabled_all_classes_qdiscs = config_get_boolean_ondemand("plugin:tc", "enable show all classes and qdiscs for all interfaces", CONFIG_BOOLEAN_NO);
+ }
+
+ if(unlikely(d->enabled == (char)-1)) {
+ char var_name[CONFIG_MAX_NAME + 1];
+ snprintfz(var_name, CONFIG_MAX_NAME, "qos for %s", d->id);
+
+ d->enabled = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_new_interfaces);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "traffic chart for %s", d->id);
+ d->enabled_bytes = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_bytes);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "packets chart for %s", d->id);
+ d->enabled_packets = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_packets);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "dropped packets chart for %s", d->id);
+ d->enabled_dropped = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_dropped);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "tokens chart for %s", d->id);
+ d->enabled_tokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_tokens);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "ctokens chart for %s", d->id);
+ d->enabled_ctokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_ctokens);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "show all classes for %s", d->id);
+ d->enabled_all_classes_qdiscs = (char)config_get_boolean_ondemand("plugin:tc", var_name, enabled_all_classes_qdiscs);
+ }
+
+ // we only need to add leaf classes
+ struct tc_class *c, *x /*, *root = NULL */;
+ unsigned long long bytes_sum = 0, packets_sum = 0, dropped_sum = 0, tokens_sum = 0, ctokens_sum = 0;
+ int active_nodes = 0, updated_classes = 0, updated_qdiscs = 0;
+
+ // prepare all classes
+ // we set reasonable defaults for the rest of the code below
+
+ for(c = d->classes ; c ; c = c->next) {
+ c->render = 0; // do not render this class
+
+ c->isleaf = 1; // this is a leaf class
+ c->hasparent = 0; // without a parent
+
+ if(unlikely(!c->updated))
+ c->unupdated++; // increase its unupdated counter
+ else {
+ c->unupdated = 0; // reset its unupdated counter
+
+ // count how many of each kind
+ if(c->isqdisc)
+ updated_qdiscs++;
+ else
+ updated_classes++;
+ }
+ }
+
+ if(unlikely(!d->enabled || (!updated_classes && !updated_qdiscs))) {
+ debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. It is not enabled/updated.", d->name?d->name:d->id);
+ tc_device_classes_cleanup(d);
+ return;
+ }
+
+ if(unlikely(updated_classes && updated_qdiscs)) {
+ error("TC: device '%s' has active both classes (%d) and qdiscs (%d). Will render only qdiscs.", d->id, updated_classes, updated_qdiscs);
+
+ // set all classes to !updated
+ for(c = d->classes ; c ; c = c->next)
+ if(unlikely(!c->isqdisc && c->updated))
+ c->updated = 0;
+
+ updated_classes = 0;
+ }
+
+ // mark the classes as leafs and parents
+ //
+ // TC is hierarchical:
+ // - classes can have other classes in them
+ // - the same is true for qdiscs (i.e. qdiscs have classes, that have other qdiscs)
+ //
+ // we need to present a chart with leaf nodes only, so that the sum
+ // of all dimensions of the chart, will be the total utilization
+ // of the interface.
+ //
+ // here we try to find the ones we need to report
+ // by default all nodes are marked with: isleaf = 1 (see above)
+ //
+ // so, here we remove the isleaf flag from nodes in the middle
+ // and we add the hasparent flag to leaf nodes we found their parent
+ if(likely(!d->enabled_all_classes_qdiscs)) {
+ for(c = d->classes; c; c = c->next) {
+ if(unlikely(!c->updated)) continue;
+
+ //debug(D_TC_LOOP, "TC: In device '%s', %s '%s' has leafid: '%s' and parentid '%s'.",
+ // d->id,
+ // c->isqdisc?"qdisc":"class",
+ // c->id,
+ // c->leafid?c->leafid:"NULL",
+ // c->parentid?c->parentid:"NULL");
+
+ // find if c is leaf or not
+ for(x = d->classes; x; x = x->next) {
+ if(unlikely(!x->updated || c == x || !x->parentid)) continue;
+
+ // classes have both parentid and leafid
+ // qdiscs have only parentid
+ // the following works for both (it is an OR)
+
+ if((c->hash == x->parent_hash && strcmp(c->id, x->parentid) == 0) ||
+ (c->leafid && c->leaf_hash == x->parent_hash && strcmp(c->leafid, x->parentid) == 0)) {
+ // debug(D_TC_LOOP, "TC: In device '%s', %s '%s' (leafid: '%s') has as leaf %s '%s' (parentid: '%s').", d->name?d->name:d->id, c->isqdisc?"qdisc":"class", c->name?c->name:c->id, c->leafid?c->leafid:c->id, x->isqdisc?"qdisc":"class", x->name?x->name:x->id, x->parentid?x->parentid:x->id);
+ c->isleaf = 0;
+ x->hasparent = 1;
+ }
+ }
+ }
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->updated)) continue;
+
+ // debug(D_TC_LOOP, "TC: device '%s', %s '%s' isleaf=%d, hasparent=%d", d->id, (c->isqdisc)?"qdisc":"class", c->id, c->isleaf, c->hasparent);
+
+ if(unlikely((c->isleaf && c->hasparent) || d->enabled_all_classes_qdiscs)) {
+ c->render = 1;
+ active_nodes++;
+ bytes_sum += c->bytes;
+ packets_sum += c->packets;
+ dropped_sum += c->dropped;
+ tokens_sum += c->tokens;
+ ctokens_sum += c->ctokens;
+ }
+
+ //if(unlikely(!c->hasparent)) {
+ // if(root) error("TC: multiple root class/qdisc for device '%s' (old: '%s', new: '%s')", d->id, root->id, c->id);
+ // root = c;
+ // debug(D_TC_LOOP, "TC: found root class/qdisc '%s'", root->id);
+ //}
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // dump all the list to see what we know
+
+ if(unlikely(debug_flags & D_TC_LOOP)) {
+ for(c = d->classes ; c ; c = c->next) {
+ if(c->render) debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, OK", d->name, c->id);
+ else debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, IGNORE (updated: %d, isleaf: %d, hasparent: %d, parent: %s)", d->name?d->name:d->id, c->id, c->updated, c->isleaf, c->hasparent, c->parentid?c->parentid:"(unset)");
+ }
+ }
+#endif
+
+ if(unlikely(!active_nodes)) {
+ debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. No useful classes/qdiscs.", d->name?d->name:d->id);
+ tc_device_classes_cleanup(d);
+ return;
+ }
+
+ debug(D_TC_LOOP, "TC: evaluating TC device '%s'. enabled = %d/%d (bytes: %d/%d, packets: %d/%d, dropped: %d/%d, tokens: %d/%d, ctokens: %d/%d, all_classes_qdiscs: %d/%d), classes: (bytes = %llu, packets = %llu, dropped = %llu, tokens = %llu, ctokens = %llu).",
+ d->name?d->name:d->id,
+ d->enabled, enable_new_interfaces,
+ d->enabled_bytes, enable_bytes,
+ d->enabled_packets, enable_packets,
+ d->enabled_dropped, enable_dropped,
+ d->enabled_tokens, enable_tokens,
+ d->enabled_ctokens, enable_ctokens,
+ d->enabled_all_classes_qdiscs, enabled_all_classes_qdiscs,
+ bytes_sum,
+ packets_sum,
+ dropped_sum,
+ tokens_sum,
+ ctokens_sum
+ );
+
+ // --------------------------------------------------------------------
+ // bytes
+
+ if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO &&
+ (bytes_sum ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->enabled_bytes = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_bytes))
+ d->st_bytes = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , d->id
+ , d->name ? d->name : d->id
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos"
+ , "Class Usage"
+ , "kilobits/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+
+ else {
+ rrdset_next(d->st_bytes);
+ if(unlikely(d->name_updated)) rrdset_set_name(d->st_bytes, d->name);
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_bytes))
+ c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_bytes, c->rd_bytes, c->name);
+
+ rrddim_set_by_pointer(d->st_bytes, c->rd_bytes, c->bytes);
+ }
+ rrdset_done(d->st_bytes);
+ }
+
+ // --------------------------------------------------------------------
+ // packets
+
+ if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO &&
+ (packets_sum ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->enabled_packets = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_packets)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_packets", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id);
+
+ d->st_packets = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_packets"
+ , "Class Packets"
+ , "packets/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_PACKETS
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+ }
+ else {
+ rrdset_next(d->st_packets);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id);
+ rrdset_set_name(d->st_packets, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_packets))
+ c->rd_packets = rrddim_add(d->st_packets, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_packets, c->rd_packets, c->name);
+
+ rrddim_set_by_pointer(d->st_packets, c->rd_packets, c->packets);
+ }
+ rrdset_done(d->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+ // dropped
+
+ if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO &&
+ (dropped_sum ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->enabled_dropped = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_dropped)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_dropped", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id);
+
+ d->st_dropped = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_dropped"
+ , "Class Dropped Packets"
+ , "packets/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_DROPPED
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+ }
+ else {
+ rrdset_next(d->st_dropped);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id);
+ rrdset_set_name(d->st_dropped, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_dropped))
+ c->rd_dropped = rrddim_add(d->st_dropped, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_dropped, c->rd_dropped, c->name);
+
+ rrddim_set_by_pointer(d->st_dropped, c->rd_dropped, c->dropped);
+ }
+ rrdset_done(d->st_dropped);
+ }
+
+ // --------------------------------------------------------------------
+ // tokens
+
+ if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO &&
+ (tokens_sum ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->enabled_tokens = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_tokens)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_tokens", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id);
+
+ d->st_tokens = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_tokens"
+ , "Class Tokens"
+ , "tokens"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_TOCKENS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else {
+ rrdset_next(d->st_tokens);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id);
+ rrdset_set_name(d->st_tokens, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_tokens)) {
+ c->rd_tokens = rrddim_add(d->st_tokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_tokens, c->rd_tokens, c->name);
+
+ rrddim_set_by_pointer(d->st_tokens, c->rd_tokens, c->tokens);
+ }
+ rrdset_done(d->st_tokens);
+ }
+
+ // --------------------------------------------------------------------
+ // ctokens
+
+ if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO &&
+ (ctokens_sum ||
+ netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
+ d->enabled_ctokens = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_ctokens)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_ctokens", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id);
+
+ d->st_ctokens = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_ctokens"
+ , "Class cTokens"
+ , "ctokens"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_CTOCKENS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else {
+ debug(D_TC_LOOP, "TC: Updating _ctokens chart for device '%s'", d->name?d->name:d->id);
+ rrdset_next(d->st_ctokens);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id);
+ rrdset_set_name(d->st_ctokens, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_ctokens))
+ c->rd_ctokens = rrddim_add(d->st_ctokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_ctokens, c->rd_ctokens, c->name);
+
+ rrddim_set_by_pointer(d->st_ctokens, c->rd_ctokens, c->ctokens);
+ }
+ rrdset_done(d->st_ctokens);
+ }
+
+ tc_device_classes_cleanup(d);
+}
+
+static inline void tc_device_set_class_name(struct tc_device *d, char *id, char *name) {
+ if(unlikely(!name || !*name)) return;
+
+ struct tc_class *c = tc_class_index_find(d, id, 0);
+ if(likely(c)) {
+ if(likely(c->name)) {
+ if(!strcmp(c->name, name)) return;
+ freez(c->name);
+ c->name = NULL;
+ }
+
+ if(likely(name && *name && strcmp(c->id, name) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s', class '%s' name to '%s'", d->id, id, name);
+ c->name = strdupz(name);
+ c->name_updated = 1;
+ }
+ }
+}
+
+static inline void tc_device_set_device_name(struct tc_device *d, char *name) {
+ if(unlikely(!name || !*name)) return;
+
+ if(d->name) {
+ if(!strcmp(d->name, name)) return;
+ freez(d->name);
+ d->name = NULL;
+ }
+
+ if(likely(name && *name && strcmp(d->id, name) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s' name to '%s'", d->id, name);
+ d->name = strdupz(name);
+ d->name_updated = 1;
+ }
+}
+
+static inline void tc_device_set_device_family(struct tc_device *d, char *family) {
+ freez(d->family);
+ d->family = NULL;
+
+ if(likely(family && *family && strcmp(d->id, family) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s' family to '%s'", d->id, family);
+ d->family = strdupz(family);
+ d->family_updated = 1;
+ }
+ // no need for null termination - it is already null
+}
+
+static inline struct tc_device *tc_device_create(char *id)
+{
+ struct tc_device *d = tc_device_index_find(id, 0);
+
+ if(!d) {
+ debug(D_TC_LOOP, "TC: Creating device '%s'", id);
+
+ d = callocz(1, sizeof(struct tc_device));
+
+ d->id = strdupz(id);
+ d->hash = simple_hash(d->id);
+ d->enabled = (char)-1;
+
+ avl_init(&d->classes_index, tc_class_compare);
+ if(unlikely(tc_device_index_add(d) != d))
+ error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", d->id);
+
+ if(!tc_device_root) {
+ tc_device_root = d;
+ }
+ else {
+ d->next = tc_device_root;
+ tc_device_root->prev = d;
+ tc_device_root = d;
+ }
+ }
+
+ return(d);
+}
+
+static inline struct tc_class *tc_class_add(struct tc_device *n, char *id, char qdisc, char *parentid, char *leafid)
+{
+ struct tc_class *c = tc_class_index_find(n, id, 0);
+
+ if(!c) {
+ debug(D_TC_LOOP, "TC: Creating in device '%s', class id '%s', parentid '%s', leafid '%s'", n->id, id, parentid?parentid:"", leafid?leafid:"");
+
+ c = callocz(1, sizeof(struct tc_class));
+
+ if(unlikely(!n->classes))
+ n->classes = c;
+
+ else if(likely(n->last_class)) {
+ n->last_class->next = c;
+ c->prev = n->last_class;
+ }
+
+ n->last_class = c;
+
+ c->id = strdupz(id);
+ c->hash = simple_hash(c->id);
+
+ c->isqdisc = qdisc;
+ if(parentid && *parentid) {
+ c->parentid = strdupz(parentid);
+ c->parent_hash = simple_hash(c->parentid);
+ }
+
+ if(leafid && *leafid) {
+ c->leafid = strdupz(leafid);
+ c->leaf_hash = simple_hash(c->leafid);
+ }
+
+ if(unlikely(tc_class_index_add(n, c) != c))
+ error("plugin_tc: INTERNAL ERROR: attempt index class '%s' on device '%s': already exists", c->id, n->id);
+ }
+ return(c);
+}
+
+static inline void tc_device_free(struct tc_device *n)
+{
+ if(n->next) n->next->prev = n->prev;
+ if(n->prev) n->prev->next = n->next;
+ if(tc_device_root == n) {
+ if(n->next) tc_device_root = n->next;
+ else tc_device_root = n->prev;
+ }
+
+ if(unlikely(tc_device_index_del(n) != n))
+ error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", n->id);
+
+ while(n->classes) tc_class_free(n, n->classes);
+
+ freez(n->id);
+ freez(n->name);
+ freez(n->family);
+ freez(n);
+}
+
+static inline void tc_device_free_all()
+{
+ while(tc_device_root)
+ tc_device_free(tc_device_root);
+}
+
+#define PLUGINSD_MAX_WORDS 20
+
+static inline int tc_space(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static inline void tc_split_words(char *str, char **words, int max_words) {
+ char *s = str;
+ int i = 0;
+
+ // skip all white space
+ while(tc_space(*s)) s++;
+
+ // store the first word
+ words[i++] = s;
+
+ // while we have something
+ while(*s) {
+ // if it is a space
+ if(unlikely(tc_space(*s))) {
+
+ // terminate the word
+ *s++ = '\0';
+
+ // skip all white space
+ while(tc_space(*s)) s++;
+
+ // if we reached the end, stop
+ if(!*s) break;
+
+ // store the next word
+ if(i < max_words) words[i++] = s;
+ else break;
+ }
+ else s++;
+ }
+
+ // terminate the words
+ while(i < max_words) words[i++] = NULL;
+}
+
+static pid_t tc_child_pid = 0;
+
+static void tc_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ if(tc_child_pid) {
+ info("TC: killing with SIGTERM tc-qos-helper process %d", tc_child_pid);
+ if(killpid(tc_child_pid) != -1) {
+ siginfo_t info;
+
+ info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid);
+ waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED);
+ }
+
+ tc_child_pid = 0;
+ }
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *tc_main(void *ptr) {
+ netdata_thread_cleanup_push(tc_main_cleanup, ptr);
+
+ struct rusage thread;
+
+ char command[FILENAME_MAX + 1];
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
+
+ uint32_t BEGIN_HASH = simple_hash("BEGIN");
+ uint32_t END_HASH = simple_hash("END");
+ uint32_t QDISC_HASH = simple_hash("qdisc");
+ uint32_t CLASS_HASH = simple_hash("class");
+ uint32_t SENT_HASH = simple_hash("Sent");
+ uint32_t LENDED_HASH = simple_hash("lended:");
+ uint32_t TOKENS_HASH = simple_hash("tokens:");
+ uint32_t SETDEVICENAME_HASH = simple_hash("SETDEVICENAME");
+ uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP");
+ uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME");
+ uint32_t WORKTIME_HASH = simple_hash("WORKTIME");
+ uint32_t first_hash;
+
+ snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir);
+ char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
+
+ while(!netdata_exit) {
+ FILE *fp;
+ struct tc_device *device = NULL;
+ struct tc_class *class = NULL;
+
+ snprintfz(command, TC_LINE_MAX, "exec %s %d", tc_script, localhost->rrd_update_every);
+ debug(D_TC_LOOP, "executing '%s'", command);
+
+ fp = mypopen(command, (pid_t *)&tc_child_pid);
+ if(unlikely(!fp)) {
+ error("TC: Cannot popen(\"%s\", \"r\").", command);
+ goto cleanup;
+ }
+
+ char buffer[TC_LINE_MAX+1] = "";
+ while(fgets(buffer, TC_LINE_MAX, fp) != NULL) {
+ if(unlikely(netdata_exit)) break;
+
+ buffer[TC_LINE_MAX] = '\0';
+ // debug(D_TC_LOOP, "TC: read '%s'", buffer);
+
+ tc_split_words(buffer, words, PLUGINSD_MAX_WORDS);
+
+ if(unlikely(!words[0] || !*words[0])) {
+ // debug(D_TC_LOOP, "empty line");
+ continue;
+ }
+ // else debug(D_TC_LOOP, "First word is '%s'", words[0]);
+
+ first_hash = simple_hash(words[0]);
+
+ if(unlikely(device && ((first_hash == CLASS_HASH && strcmp(words[0], "class") == 0) || (first_hash == QDISC_HASH && strcmp(words[0], "qdisc") == 0)))) {
+ // debug(D_TC_LOOP, "CLASS line on class id='%s', parent='%s', parentid='%s', leaf='%s', leafid='%s'", words[2], words[3], words[4], words[5], words[6]);
+
+ char *type = words[1]; // the class/qdisc type: htb, fq_codel, etc
+ char *id = words[2]; // the class/qdisc major:minor
+ char *parent = words[3]; // the word 'parent' or 'root'
+ char *parentid = words[4]; // parentid
+ char *leaf = words[5]; // the word 'leaf'
+ char *leafid = words[6]; // leafid
+
+ int parent_is_root = 0;
+ int parent_is_parent = 0;
+ if(likely(parent)) {
+ parent_is_parent = !strcmp(parent, "parent");
+
+ if(!parent_is_parent)
+ parent_is_root = !strcmp(parent, "root");
+ }
+
+ if(likely(type && id && (parent_is_root || parent_is_parent))) {
+ char qdisc = 0;
+
+ if(first_hash == QDISC_HASH) {
+ qdisc = 1;
+
+ if(!strcmp(type, "ingress")) {
+ // we don't want to get the ingress qdisc
+ // there should be an IFB interface for this
+
+ class = NULL;
+ continue;
+ }
+
+ if(parent_is_parent && parentid) {
+ // eliminate the minor number from parentid
+ // why: parentid is the id of the parent class
+ // but major: is also the id of the parent qdisc
+
+ char *s = parentid;
+ while(*s && *s != ':') s++;
+ if(*s == ':') s[1] = '\0';
+ }
+ }
+
+ if(parent_is_root) {
+ parentid = NULL;
+ leafid = NULL;
+ }
+ else if(!leaf || strcmp(leaf, "leaf") != 0)
+ leafid = NULL;
+
+ char leafbuf[20 + 1] = "";
+ if(leafid && leafid[strlen(leafid) - 1] == ':') {
+ strncpyz(leafbuf, leafid, 20 - 1);
+ strcat(leafbuf, "1");
+ leafid = leafbuf;
+ }
+
+ class = tc_class_add(device, id, qdisc, parentid, leafid);
+ }
+ else {
+ // clear the last class
+ class = NULL;
+ }
+ }
+ else if(unlikely(first_hash == END_HASH && strcmp(words[0], "END") == 0)) {
+ // debug(D_TC_LOOP, "END line");
+
+ if(likely(device)) {
+ netdata_thread_disable_cancelability();
+ tc_device_commit(device);
+ // tc_device_free(device);
+ netdata_thread_enable_cancelability();
+ }
+
+ device = NULL;
+ class = NULL;
+ }
+ else if(unlikely(first_hash == BEGIN_HASH && strcmp(words[0], "BEGIN") == 0)) {
+ // debug(D_TC_LOOP, "BEGIN line on device '%s'", words[1]);
+
+ if(likely(words[1] && *words[1])) {
+ device = tc_device_create(words[1]);
+ }
+ else {
+ // tc_device_free(device);
+ device = NULL;
+ }
+
+ class = NULL;
+ }
+ else if(unlikely(device && class && first_hash == SENT_HASH && strcmp(words[0], "Sent") == 0)) {
+ // debug(D_TC_LOOP, "SENT line '%s'", words[1]);
+ if(likely(words[1] && *words[1])) {
+ class->bytes = str2ull(words[1]);
+ class->updated = 1;
+ }
+ else {
+ class->updated = 0;
+ }
+
+ if(likely(words[3] && *words[3]))
+ class->packets = str2ull(words[3]);
+
+ if(likely(words[6] && *words[6]))
+ class->dropped = str2ull(words[6]);
+
+ if(likely(words[8] && *words[8]))
+ class->overlimits = str2ull(words[8]);
+
+ if(likely(words[10] && *words[10]))
+ class->requeues = str2ull(words[8]);
+ }
+ else if(unlikely(device && class && class->updated && first_hash == LENDED_HASH && strcmp(words[0], "lended:") == 0)) {
+ // debug(D_TC_LOOP, "LENDED line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ class->lended = str2ull(words[1]);
+
+ if(likely(words[3] && *words[3]))
+ class->borrowed = str2ull(words[3]);
+
+ if(likely(words[5] && *words[5]))
+ class->giants = str2ull(words[5]);
+ }
+ else if(unlikely(device && class && class->updated && first_hash == TOKENS_HASH && strcmp(words[0], "tokens:") == 0)) {
+ // debug(D_TC_LOOP, "TOKENS line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ class->tokens = str2ull(words[1]);
+
+ if(likely(words[3] && *words[3]))
+ class->ctokens = str2ull(words[3]);
+ }
+ else if(unlikely(device && first_hash == SETDEVICENAME_HASH && strcmp(words[0], "SETDEVICENAME") == 0)) {
+ // debug(D_TC_LOOP, "SETDEVICENAME line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ tc_device_set_device_name(device, words[1]);
+ }
+ else if(unlikely(device && first_hash == SETDEVICEGROUP_HASH && strcmp(words[0], "SETDEVICEGROUP") == 0)) {
+ // debug(D_TC_LOOP, "SETDEVICEGROUP line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ tc_device_set_device_family(device, words[1]);
+ }
+ else if(unlikely(device && first_hash == SETCLASSNAME_HASH && strcmp(words[0], "SETCLASSNAME") == 0)) {
+ // debug(D_TC_LOOP, "SETCLASSNAME line '%s' '%s'", words[1], words[2]);
+ char *id = words[1];
+ char *path = words[2];
+ if(likely(id && *id && path && *path))
+ tc_device_set_class_name(device, id, path);
+ }
+ else if(unlikely(first_hash == WORKTIME_HASH && strcmp(words[0], "WORKTIME") == 0)) {
+ // debug(D_TC_LOOP, "WORKTIME line '%s' '%s'", words[1], words[2]);
+ getrusage(RUSAGE_THREAD, &thread);
+
+ static RRDSET *stcpu = NULL;
+ static RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(!stcpu)) {
+ stcpu = rrdset_create_localhost(
+ "netdata"
+ , "plugin_tc_cpu"
+ , NULL
+ , "tc.helper"
+ , NULL
+ , "NetData TC CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_TC_CPU
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rd_user = rrddim_add(stcpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(stcpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(stcpu);
+
+ rrddim_set_by_pointer(stcpu, rd_user , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu);
+
+ static RRDSET *sttime = NULL;
+ static RRDDIM *rd_run_time = NULL;
+
+ if(unlikely(!sttime)) {
+ sttime = rrdset_create_localhost(
+ "netdata"
+ , "plugin_tc_time"
+ , NULL
+ , "tc.helper"
+ , NULL
+ , "NetData TC script execution"
+ , "milliseconds/run"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_TC_TIME
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_run_time = rrddim_add(sttime, "run_time", "run time", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(sttime);
+
+ rrddim_set_by_pointer(sttime, rd_run_time, str2ll(words[1], NULL));
+ rrdset_done(sttime);
+
+ }
+ //else {
+ // debug(D_TC_LOOP, "IGNORED line");
+ //}
+ }
+
+ // fgets() failed or loop broke
+ int code = mypclose(fp, (pid_t)tc_child_pid);
+ tc_child_pid = 0;
+
+ if(unlikely(device)) {
+ // tc_device_free(device);
+ device = NULL;
+ class = NULL;
+ }
+
+ if(unlikely(netdata_exit)) {
+ tc_device_free_all();
+ goto cleanup;
+ }
+
+ if(code == 1 || code == 127) {
+ // 1 = DISABLE
+ // 127 = cannot even run it
+ error("TC: tc-qos-helper.sh exited with code %d. Disabling it.", code);
+
+ tc_device_free_all();
+ goto cleanup;
+ }
+
+ sleep((unsigned int) localhost->rrd_update_every);
+ }
+
+cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h
new file mode 100644
index 0000000..c646584
--- /dev/null
+++ b/collectors/tc.plugin/plugin_tc.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_TC_H
+#define NETDATA_PLUGIN_TC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC \
+ { \
+ .name = "PLUGIN[tc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "tc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = tc_main \
+ },
+
+extern void *tc_main(void *ptr);
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_TC_H */
+
diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
new file mode 100755
index 0000000..65d3315
--- /dev/null
+++ b/collectors/tc.plugin/tc-qos-helper.sh.in
@@ -0,0 +1,297 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is a helper to allow netdata collect tc data.
+# tc output parsing has been implemented in C, inside netdata
+# This script allows setting names to dimensions.
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+# logging functions
+
+PROGRAM_NAME="$(basename "$0")"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# find /var/run/fireqos
+
+# the default
+fireqos_run_dir="/var/run/fireqos"
+
+function realdir() {
+ local r
+ local t
+ r="$1"
+ t="$(readlink "$r")"
+
+ while [ "$t" ]; do
+ r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t")
+ t=$(readlink "$r")
+ done
+
+ dirname "$r"
+}
+
+if [ ! -d "${fireqos_run_dir}" ]; then
+
+ # the fireqos executable - we will use it to find its config
+ fireqos="$(command -v fireqos 2>/dev/null)"
+
+ if [ -n "${fireqos}" ]; then
+
+ fireqos_exec_dir="$(realdir "${fireqos}")"
+
+ if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then
+ LOCALSTATEDIR=
+ #shellcheck source=/dev/null
+ source "${fireqos_exec_dir}/install.config"
+
+ if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then
+ fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
+ fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
+ fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+plugins_dir="${NETDATA_PLUGINS_DIR}"
+tc="$(command -v tc 2>/dev/null)"
+
+# -----------------------------------------------------------------------------
+# user configuration
+
+# time in seconds to refresh QoS class/qdisc names
+qos_get_class_names_every=120
+
+# time in seconds to exit - netdata will restart the script
+qos_exit_every=3600
+
+# what to use? classes or qdiscs?
+tc_show="qdisc" # can also be "class"
+
+# -----------------------------------------------------------------------------
+# check if we have a valid number for interval
+
+t=${1}
+update_every=$((t))
+[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
+[ $((update_every)) -lt 1 ] && update_every=1
+
+# -----------------------------------------------------------------------------
+# allow the user to override our defaults
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ info "Loading config file '${CONFIG}'..."
+ #shellcheck source=/dev/null
+ source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+case "${tc_show}" in
+qdisc | class) ;;
+
+*)
+ error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
+ tc_show="qdisc"
+ ;;
+esac
+
+# -----------------------------------------------------------------------------
+# default sleep function
+
+LOOPSLEEPMS_LASTWORK=0
+loopsleepms() {
+ sleep "$1"
+}
+
+# if found and included, this file overwrites loopsleepms()
+# with a high resolution timer function for precise looping.
+#shellcheck source=/dev/null
+. "${plugins_dir}/loopsleepms.sh.inc"
+
+# -----------------------------------------------------------------------------
+# final checks we can run
+
+if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then
+ fatal "cannot find command 'tc' in this system."
+fi
+
+tc_devices=
+fix_names=
+
+# -----------------------------------------------------------------------------
+
+setclassname() {
+ if [ "${tc_show}" = "qdisc" ]; then
+ echo "SETCLASSNAME $4 $2"
+ else
+ echo "SETCLASSNAME $3 $2"
+ fi
+}
+
+show_tc_cls() {
+ [ "${tc_show}" = "qdisc" ] && return 1
+
+ local x="${1}"
+
+ if [ -f /etc/iproute2/tc_cls ]; then
+ local classid name rest
+ while read -r classid name rest; do
+ if [ -z "${classid}" ] ||
+ [ -z "${name}" ] ||
+ [ "${classid}" = "#" ] ||
+ [ "${name}" = "#" ] ||
+ [ "${classid:0:1}" = "#" ] ||
+ [ "${name:0:1}" = "#" ]; then
+ continue
+ fi
+ setclassname "" "${name}" "${classid}"
+ done </etc/iproute2/tc_cls
+ return 0
+ fi
+ return 1
+}
+
+show_fireqos_names() {
+ local x="${1}" name n interface_dev interface_classes_monitor
+
+ if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
+ name="$(<"${fireqos_run_dir}/ifaces/${x}")"
+ echo "SETDEVICENAME ${name}" || exit
+
+ #shellcheck source=/dev/null
+ source "${fireqos_run_dir}/${name}.conf"
+ for n in ${interface_classes_monitor}; do
+ # shellcheck disable=SC2086
+ setclassname ${n//|/ }
+ done
+ [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" || exit
+
+ return 0
+ fi
+
+ return 1
+}
+
+show_tc() {
+ local x="${1}"
+
+ echo "BEGIN ${x}" || exit
+
+ # netdata can parse the output of tc
+ ${tc} -s ${tc_show} show dev "${x}"
+
+ # check FireQOS names for classes
+ if [ -n "${fix_names}" ]; then
+ show_fireqos_names "${x}" || show_tc_cls "${x}"
+ fi
+
+ echo "END ${x}" || exit
+}
+
+find_tc_devices() {
+ local count=0 devs dev rest l
+
+ # find all the devices in the system
+ # without forking
+ while IFS=":| " read -r dev rest; do
+ count=$((count + 1))
+ [ ${count} -le 2 ] && continue
+ devs="${devs} ${dev}"
+ done </proc/net/dev
+
+ # from all the devices find the ones
+ # that have QoS defined
+ # unfortunately, one fork per device cannot be avoided
+ tc_devices=
+ for dev in ${devs}; do
+ l="$(${tc} class show dev "${dev}" 2>/dev/null)"
+ [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}"
+ done
+}
+
+# update devices and class names
+# once every 2 minutes
+names_every=$((qos_get_class_names_every / update_every))
+
+# exit this script every hour
+# it will be restarted automatically
+exit_after=$((qos_exit_every / update_every))
+
+c=0
+gc=0
+while true; do
+ fix_names=
+ c=$((c + 1))
+ gc=$((gc + 1))
+
+ if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then
+ c=1
+ fix_names="YES"
+ find_tc_devices
+ fi
+
+ for d in ${tc_devices}; do
+ show_tc "${d}"
+ done
+
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" || exit
+
+ loopsleepms ${update_every}
+
+ [ ${gc} -gt ${exit_after} ] && exit 0
+done
diff --git a/collectors/xenstat.plugin/Makefile.am b/collectors/xenstat.plugin/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/collectors/xenstat.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md
new file mode 100644
index 0000000..61be271
--- /dev/null
+++ b/collectors/xenstat.plugin/README.md
@@ -0,0 +1,53 @@
+<!--
+title: "xenstat.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/xenstat.plugin/README.md
+-->
+
+# xenstat.plugin
+
+`xenstat.plugin` collects XenServer and XCP-ng statistics.
+
+## Prerequisites
+
+1. install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.
+ Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`
+
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.
+
+Keep in mind that `libxenstat` requires root access, so the plugin is setuid to root.
+
+## Charts
+
+The plugin provides XenServer and XCP-ng host and domains statistics:
+
+Host:
+
+1. Number of domains.
+
+Domain:
+
+1. CPU.
+2. Memory.
+3. Networks.
+4. VBDs.
+
+## Configuration
+
+If you need to disable xenstat for Netdata, edit /etc/netdata/netdata.conf and set:
+
+```
+[plugins]
+ xenstat = no
+```
+
+## Debugging
+
+You can run the plugin by hand:
+
+```
+sudo /usr/libexec/netdata/plugins.d/xenstat.plugin 1 debug
+```
+
+You will get verbose output on what the plugin does.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c
new file mode 100644
index 0000000..647ac1d
--- /dev/null
+++ b/collectors/xenstat.plugin/xenstat_plugin.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../libnetdata/libnetdata.h"
+
+#define PLUGIN_XENSTAT_NAME "xenstat.plugin"
+
+#define NETDATA_CHART_PRIO_XENSTAT_NODE_CPUS 30001
+#define NETDATA_CHART_PRIO_XENSTAT_NODE_CPU_FREQ 30002
+#define NETDATA_CHART_PRIO_XENSTAT_NODE_MEM 30003
+#define NETDATA_CHART_PRIO_XENSTAT_NODE_TMEM 30004
+#define NETDATA_CHART_PRIO_XENSTAT_NODE_DOMAINS 30005
+
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_STATES 30101
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_CPU 30102
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VCPU 30103
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_MEM 30104
+
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_PAGES 30104
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_OPERATIONS 30105
+
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_OO_REQ 30200
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_REQUESTS 30300
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_SECTORS 30400
+
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_BYTES 30500
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS 30600
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_ERRORS 30700
+#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_DROPS 30800
+
+#define TYPE_LENGTH_MAX 200
+
+#define CHART_IS_OBSOLETE 1
+#define CHART_IS_NOT_OBSOLETE 0
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+static int debug = 0;
+
+static int netdata_update_every = 1;
+
+#ifdef HAVE_LIBXENSTAT
+#include <xenstat.h>
+#include <libxl.h>
+
+struct vcpu_metrics {
+ unsigned int id;
+
+ unsigned int online;
+ unsigned long long ns;
+
+ int chart_generated;
+ int updated;
+
+ struct vcpu_metrics *next;
+};
+
+struct vbd_metrics {
+ unsigned int id;
+
+ unsigned int error;
+ unsigned long long oo_reqs;
+ unsigned long long rd_reqs;
+ unsigned long long wr_reqs;
+ unsigned long long rd_sects;
+ unsigned long long wr_sects;
+
+ int oo_req_chart_generated;
+ int requests_chart_generated;
+ int sectors_chart_generated;
+ int updated;
+
+ struct vbd_metrics *next;
+};
+
+struct network_metrics {
+ unsigned int id;
+
+ unsigned long long rbytes;
+ unsigned long long rpackets;
+ unsigned long long rerrs;
+ unsigned long long rdrops;
+
+ unsigned long long tbytes;
+ unsigned long long tpackets;
+ unsigned long long terrs;
+ unsigned long long tdrops;
+
+ int bytes_chart_generated;
+ int packets_chart_generated;
+ int errors_chart_generated;
+ int drops_chart_generated;
+ int updated;
+
+ struct network_metrics *next;
+};
+
+struct domain_metrics {
+ char *uuid;
+ uint32_t hash;
+
+ unsigned int id;
+ char *name;
+
+ // states
+ unsigned int running;
+ unsigned int blocked;
+ unsigned int paused;
+ unsigned int shutdown;
+ unsigned int crashed;
+ unsigned int dying;
+ unsigned int cur_vcpus;
+
+ unsigned long long cpu_ns;
+ unsigned long long cur_mem;
+ unsigned long long max_mem;
+
+ struct vcpu_metrics *vcpu_root;
+ struct vbd_metrics *vbd_root;
+ struct network_metrics *network_root;
+
+ int states_chart_generated;
+ int cpu_chart_generated;
+ int vcpu_chart_generated;
+ int num_vcpus_changed;
+ int mem_chart_generated;
+ int updated;
+
+ struct domain_metrics *next;
+};
+
+struct node_metrics{
+ unsigned long long tot_mem;
+ unsigned long long free_mem;
+ int num_domains;
+ unsigned int num_cpus;
+ unsigned long long node_cpu_hz;
+
+ struct domain_metrics *domain_root;
+};
+
+static struct node_metrics node_metrics = {
+ .domain_root = NULL
+};
+
+static inline struct domain_metrics *domain_metrics_get(const char *uuid, uint32_t hash) {
+ struct domain_metrics *d = NULL, *last = NULL;
+ for(d = node_metrics.domain_root; d ; last = d, d = d->next) {
+ if(unlikely(d->hash == hash && !strcmp(d->uuid, uuid)))
+ return d;
+ }
+
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: allocating memory for domain with uuid %s\n", uuid);
+
+ d = callocz(1, sizeof(struct domain_metrics));
+ d->uuid = strdupz(uuid);
+ d->hash = hash;
+
+ if(unlikely(!last)) {
+ d->next = node_metrics.domain_root;
+ node_metrics.domain_root = d;
+ }
+ else {
+ d->next = last->next;
+ last->next = d;
+ }
+
+ return d;
+}
+
+static struct domain_metrics *domain_metrics_free(struct domain_metrics *d) {
+ struct domain_metrics *cur = NULL, *last = NULL;
+ struct vcpu_metrics *vcpu, *vcpu_f;
+ struct vbd_metrics *vbd, *vbd_f;
+ struct network_metrics *network, *network_f;
+
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: freeing memory for domain '%s' id %d, uuid %s\n", d->name, d->id, d->uuid);
+
+ for(cur = node_metrics.domain_root; cur ; last = cur, cur = cur->next) {
+ if(unlikely(cur->hash == d->hash && !strcmp(cur->uuid, d->uuid))) break;
+ }
+
+ if(unlikely(!cur)) {
+ error("XENSTAT: failed to free domain metrics.");
+ return NULL;
+ }
+
+ if(likely(last))
+ last->next = cur->next;
+ else
+ node_metrics.domain_root = NULL;
+
+ freez(cur->uuid);
+ freez(cur->name);
+
+ vcpu = cur->vcpu_root;
+ while(vcpu) {
+ vcpu_f = vcpu;
+ vcpu = vcpu->next;
+ freez(vcpu_f);
+ }
+
+ vbd = cur->vbd_root;
+ while(vbd) {
+ vbd_f = vbd;
+ vbd = vbd->next;
+ freez(vbd_f);
+ }
+
+ network = cur->network_root;
+ while(network) {
+ network_f = network;
+ network = network->next;
+ freez(network_f);
+ }
+
+ freez(cur);
+
+ return last ? last : NULL;
+}
+
+static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
+ unsigned int num_vcpus = 0;
+ xenstat_vcpu *vcpu = NULL;
+ struct vcpu_metrics *vcpu_m = NULL, *last_vcpu_m = NULL;
+
+ num_vcpus = xenstat_domain_num_vcpus(domain);
+
+ for(vcpu_m = d->vcpu_root; vcpu_m ; vcpu_m = vcpu_m->next)
+ vcpu_m->updated = 0;
+
+ vcpu_m = d->vcpu_root;
+
+ unsigned int i, num_online_vcpus=0;
+ for(i = 0; i < num_vcpus; i++) {
+ if(unlikely(!vcpu_m)) {
+ vcpu_m = callocz(1, sizeof(struct vcpu_metrics));
+
+ if(unlikely(i == 0)) d->vcpu_root = vcpu_m;
+ else last_vcpu_m->next = vcpu_m;
+ }
+
+ vcpu_m->id = i;
+
+ vcpu = xenstat_domain_vcpu(domain, i);
+
+ if(unlikely(!vcpu)) {
+ error("XENSTAT: cannot get VCPU statistics.");
+ return 1;
+ }
+
+ vcpu_m->online = xenstat_vcpu_online(vcpu);
+ if(likely(vcpu_m->online)) { num_online_vcpus++; }
+ vcpu_m->ns = xenstat_vcpu_ns(vcpu);
+
+ vcpu_m->updated = 1;
+
+ last_vcpu_m = vcpu_m;
+ vcpu_m = vcpu_m->next;
+ }
+
+ if(unlikely(num_online_vcpus != d->cur_vcpus)) {
+ d->num_vcpus_changed = 1;
+ d->cur_vcpus = num_online_vcpus;
+ }
+
+ return 0;
+}
+
+static int vbd_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
+ unsigned int num_vbds = xenstat_domain_num_vbds(domain);
+ xenstat_vbd *vbd = NULL;
+ struct vbd_metrics *vbd_m = NULL, *last_vbd_m = NULL;
+
+ for(vbd_m = d->vbd_root; vbd_m ; vbd_m = vbd_m->next)
+ vbd_m->updated = 0;
+
+ vbd_m = d->vbd_root;
+
+ unsigned int i;
+ for(i = 0; i < num_vbds; i++) {
+ if(unlikely(!vbd_m)) {
+ vbd_m = callocz(1, sizeof(struct vbd_metrics));
+
+ if(unlikely(i == 0)) d->vbd_root = vbd_m;
+ else last_vbd_m->next = vbd_m;
+ }
+
+ vbd_m->id = i;
+
+ vbd = xenstat_domain_vbd(domain, i);
+
+ if(unlikely(!vbd)) {
+ error("XENSTAT: cannot get VBD statistics.");
+ return 1;
+ }
+
+#ifdef HAVE_XENSTAT_VBD_ERROR
+ vbd_m->error = xenstat_vbd_error(vbd);
+#else
+ vbd_m->error = 0;
+#endif
+ vbd_m->oo_reqs = xenstat_vbd_oo_reqs(vbd);
+ vbd_m->rd_reqs = xenstat_vbd_rd_reqs(vbd);
+ vbd_m->wr_reqs = xenstat_vbd_wr_reqs(vbd);
+ vbd_m->rd_sects = xenstat_vbd_rd_sects(vbd);
+ vbd_m->wr_sects = xenstat_vbd_wr_sects(vbd);
+
+ vbd_m->updated = 1;
+
+ last_vbd_m = vbd_m;
+ vbd_m = vbd_m->next;
+ }
+
+ return 0;
+}
+
+static int network_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
+ unsigned int num_networks = xenstat_domain_num_networks(domain);
+ xenstat_network *network = NULL;
+ struct network_metrics *network_m = NULL, *last_network_m = NULL;
+
+ for(network_m = d->network_root; network_m ; network_m = network_m->next)
+ network_m->updated = 0;
+
+ network_m = d->network_root;
+
+ unsigned int i;
+ for(i = 0; i < num_networks; i++) {
+ if(unlikely(!network_m)) {
+ network_m = callocz(1, sizeof(struct network_metrics));
+
+ if(unlikely(i == 0)) d->network_root = network_m;
+ else last_network_m->next = network_m;
+ }
+
+ network_m->id = i;
+
+ network = xenstat_domain_network(domain, i);
+
+ if(unlikely(!network)) {
+ error("XENSTAT: cannot get network statistics.");
+ return 1;
+ }
+
+ network_m->rbytes = xenstat_network_rbytes(network);
+ network_m->rpackets = xenstat_network_rpackets(network);
+ network_m->rerrs = xenstat_network_rerrs(network);
+ network_m->rdrops = xenstat_network_rdrop(network);
+
+ network_m->tbytes = xenstat_network_tbytes(network);
+ network_m->tpackets = xenstat_network_tpackets(network);
+ network_m->terrs = xenstat_network_terrs(network);
+ network_m->tdrops = xenstat_network_tdrop(network);
+
+ network_m->updated = 1;
+
+ last_network_m = network_m;
+ network_m = network_m->next;
+ }
+
+ return 0;
+}
+
+static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominfo *info) {
+
+ // mark all old metrics as not-updated
+ struct domain_metrics *d;
+ for(d = node_metrics.domain_root; d ; d = d->next)
+ d->updated = 0;
+
+ xenstat_node *node = xenstat_get_node(xhandle, XENSTAT_ALL);
+ if (unlikely(!node)) {
+ error("XENSTAT: failed to retrieve statistics from libxenstat.");
+ return 1;
+ }
+
+ node_metrics.tot_mem = xenstat_node_tot_mem(node);
+ node_metrics.free_mem = xenstat_node_free_mem(node);
+ node_metrics.num_domains = xenstat_node_num_domains(node);
+ node_metrics.num_cpus = xenstat_node_num_cpus(node);
+ node_metrics.node_cpu_hz = xenstat_node_cpu_hz(node);
+
+ int i;
+ for(i = 0; i < node_metrics.num_domains; i++) {
+ xenstat_domain *domain = NULL;
+ char uuid[LIBXL_UUID_FMTLEN + 1];
+
+ domain = xenstat_node_domain_by_index(node, i);
+
+ // get domain UUID
+ unsigned int id = xenstat_domain_id(domain);
+ if(unlikely(libxl_domain_info(ctx, info, id))) {
+ error("XENSTAT: cannot get domain info.");
+ }
+ else {
+ snprintfz(uuid, LIBXL_UUID_FMTLEN, LIBXL_UUID_FMT "\n", LIBXL_UUID_BYTES(info->uuid));
+ }
+
+ uint32_t hash = simple_hash(uuid);
+ d = domain_metrics_get(uuid, hash);
+
+ d->id = id;
+ if(unlikely(!d->name)) {
+ d->name = strdupz(xenstat_domain_name(domain));
+ netdata_fix_chart_id(d->name);
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: domain id %d, uuid %s has name '%s'\n", d->id, d->uuid, d->name);
+ }
+
+ d->running = xenstat_domain_running(domain);
+ d->blocked = xenstat_domain_blocked(domain);
+ d->paused = xenstat_domain_paused(domain);
+ d->shutdown = xenstat_domain_shutdown(domain);
+ d->crashed = xenstat_domain_crashed(domain);
+ d->dying = xenstat_domain_dying(domain);
+
+ d->cpu_ns = xenstat_domain_cpu_ns(domain);
+ d->cur_mem = xenstat_domain_cur_mem(domain);
+ d->max_mem = xenstat_domain_max_mem(domain);
+
+ if(unlikely(vcpu_metrics_collect(d, domain) || vbd_metrics_collect(d, domain) || network_metrics_collect(d, domain))) {
+ xenstat_free_node(node);
+ return 1;
+ }
+
+ d->updated = 1;
+ }
+
+ xenstat_free_node(node);
+
+ return 0;
+}
+
+static void xenstat_send_node_metrics() {
+ static int mem_chart_generated = 0, domains_chart_generated = 0, cpus_chart_generated = 0, cpu_freq_chart_generated = 0;
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!mem_chart_generated)) {
+ printf("CHART xenstat.mem '' 'Memory Usage' 'MiB' 'memory' '' stacked %d %d '' %s\n"
+ , NETDATA_CHART_PRIO_XENSTAT_NODE_MEM
+ , netdata_update_every
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 %d\n", "free", netdata_update_every * 1024 * 1024);
+ printf("DIMENSION %s '' absolute 1 %d\n", "used", netdata_update_every * 1024 * 1024);
+ mem_chart_generated = 1;
+ }
+
+ printf(
+ "BEGIN xenstat.mem\n"
+ "SET free = %lld\n"
+ "SET used = %lld\n"
+ "END\n"
+ , (collected_number) node_metrics.free_mem
+ , (collected_number) (node_metrics.tot_mem - node_metrics.free_mem)
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!domains_chart_generated)) {
+ printf("CHART xenstat.domains '' 'Number of Domains' 'domains' 'domains' '' line %d %d '' %s\n"
+ , NETDATA_CHART_PRIO_XENSTAT_NODE_DOMAINS
+ , netdata_update_every
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 %d\n", "domains", netdata_update_every);
+ domains_chart_generated = 1;
+ }
+
+ printf(
+ "BEGIN xenstat.domains\n"
+ "SET domains = %lld\n"
+ "END\n"
+ , (collected_number) node_metrics.num_domains
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!cpus_chart_generated)) {
+ printf("CHART xenstat.cpus '' 'Number of CPUs' 'cpus' 'cpu' '' line %d %d '' %s\n"
+ , NETDATA_CHART_PRIO_XENSTAT_NODE_CPUS
+ , netdata_update_every
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 %d\n", "cpus", netdata_update_every);
+ cpus_chart_generated = 1;
+ }
+
+ printf(
+ "BEGIN xenstat.cpus\n"
+ "SET cpus = %lld\n"
+ "END\n"
+ , (collected_number) node_metrics.num_cpus
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!cpu_freq_chart_generated)) {
+ printf("CHART xenstat.cpu_freq '' 'CPU Frequency' 'MHz' 'cpu' '' line %d %d '' %s\n"
+ , NETDATA_CHART_PRIO_XENSTAT_NODE_CPU_FREQ
+ , netdata_update_every
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION %s '' absolute 1 %d\n", "frequency", netdata_update_every * 1024 * 1024);
+ cpu_freq_chart_generated = 1;
+ }
+
+ printf(
+ "BEGIN xenstat.cpu_freq\n"
+ "SET frequency = %lld\n"
+ "END\n"
+ , (collected_number) node_metrics.node_cpu_hz
+ );
+}
+
+static void print_domain_states_chart_definition(char *type, int obsolete_flag) {
+ printf("CHART %s.states '' 'Domain States' 'boolean' 'states' 'xendomain.states' line %d %d %s %s\n"
+ , type
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_STATES
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION running '' absolute 1 %d\n", netdata_update_every);
+ printf("DIMENSION blocked '' absolute 1 %d\n", netdata_update_every);
+ printf("DIMENSION paused '' absolute 1 %d\n", netdata_update_every);
+ printf("DIMENSION shutdown '' absolute 1 %d\n", netdata_update_every);
+ printf("DIMENSION crashed '' absolute 1 %d\n", netdata_update_every);
+ printf("DIMENSION dying '' absolute 1 %d\n", netdata_update_every);
+}
+
+static void print_domain_cpu_chart_definition(char *type, int obsolete_flag) {
+ printf("CHART %s.cpu '' 'CPU Usage (100%% = 1 core)' 'percentage' 'cpu' 'xendomain.cpu' line %d %d %s %s\n"
+ , type
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_CPU
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION used '' incremental 100 %d\n", netdata_update_every * 1000000000);
+}
+
+static void print_domain_mem_chart_definition(char *type, int obsolete_flag) {
+ printf("CHART %s.mem '' 'Memory Reservation' 'MiB' 'memory' 'xendomain.mem' line %d %d %s %s\n"
+ , type
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_MEM
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION maximum '' absolute 1 %d\n", netdata_update_every * 1024 * 1024);
+ printf("DIMENSION current '' absolute 1 %d\n", netdata_update_every * 1024 * 1024);
+}
+
+static void print_domain_vcpu_chart_definition(char *type, struct domain_metrics *d, int obsolete_flag) {
+ struct vcpu_metrics *vcpu_m;
+
+ printf("CHART %s.vcpu '' 'CPU Usage per VCPU' 'percentage' 'cpu' 'xendomain.vcpu' line %d %d %s %s\n"
+ , type
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VCPU
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+
+ for(vcpu_m = d->vcpu_root; vcpu_m; vcpu_m = vcpu_m->next) {
+ if(likely(vcpu_m->updated && vcpu_m->online)) {
+ printf("DIMENSION vcpu%u '' incremental 100 %d\n", vcpu_m->id, netdata_update_every * 1000000000);
+ }
+ }
+}
+
+static void print_domain_vbd_oo_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
+ printf("CHART %s.oo_req_vbd%u '' 'VBD%u \"Out Of\" Requests' 'requests/s' 'vbd' 'xendomain.oo_req_vbd' line %d %d %s %s\n"
+ , type
+ , vbd
+ , vbd
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_OO_REQ + vbd
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION requests '' incremental 1 %d\n", netdata_update_every);
+}
+
+static void print_domain_vbd_requests_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
+ printf("CHART %s.requests_vbd%u '' 'VBD%u Requests' 'requests/s' 'vbd' 'xendomain.requests_vbd' line %d %d %s %s\n"
+ , type
+ , vbd
+ , vbd
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_REQUESTS + vbd
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION read '' incremental 1 %d\n", netdata_update_every);
+ printf("DIMENSION write '' incremental -1 %d\n", netdata_update_every);
+}
+
+static void print_domain_vbd_sectors_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
+ printf("CHART %s.sectors_vbd%u '' 'VBD%u Read/Written Sectors' 'sectors/s' 'vbd' 'xendomain.sectors_vbd' line %d %d %s %s\n"
+ , type
+ , vbd
+ , vbd
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_SECTORS + vbd
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION read '' incremental 1 %d\n", netdata_update_every);
+ printf("DIMENSION write '' incremental -1 %d\n", netdata_update_every);
+}
+
+static void print_domain_network_bytes_chart_definition(char *type, unsigned int network, int obsolete_flag) {
+ printf("CHART %s.bytes_network%u '' 'Network%u Received/Sent Bytes' 'kilobits/s' 'network' 'xendomain.bytes_network' line %d %d %s %s\n"
+ , type
+ , network
+ , network
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_BYTES + network
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION received '' incremental 8 %d\n", netdata_update_every * 1000);
+ printf("DIMENSION sent '' incremental -8 %d\n", netdata_update_every * 1000);
+}
+
+static void print_domain_network_packets_chart_definition(char *type, unsigned int network, int obsolete_flag) {
+ printf("CHART %s.packets_network%u '' 'Network%u Received/Sent Packets' 'packets/s' 'network' 'xendomain.packets_network' line %d %d %s %s\n"
+ , type
+ , network
+ , network
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
+ printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
+}
+
+static void print_domain_network_errors_chart_definition(char *type, unsigned int network, int obsolete_flag) {
+ printf("CHART %s.errors_network%u '' 'Network%u Receive/Transmit Errors' 'errors/s' 'network' 'xendomain.errors_network' line %d %d %s %s\n"
+ , type
+ , network
+ , network
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
+ printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
+}
+
+static void print_domain_network_drops_chart_definition(char *type, unsigned int network, int obsolete_flag) {
+ printf("CHART %s.drops_network%u '' 'Network%u Receive/Transmit Drops' 'drops/s' 'network' 'xendomain.drops_network' line %d %d %s %s\n"
+ , type
+ , network
+ , network
+ , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
+ , netdata_update_every
+ , obsolete_flag ? "obsolete": "''"
+ , PLUGIN_XENSTAT_NAME
+ );
+ printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
+ printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
+}
+
+static void xenstat_send_domain_metrics() {
+
+ if(unlikely(!node_metrics.domain_root)) return;
+ struct domain_metrics *d;
+
+ for(d = node_metrics.domain_root; d; d = d->next) {
+ char type[TYPE_LENGTH_MAX + 1];
+ snprintfz(type, TYPE_LENGTH_MAX, "xendomain_%s_%s", d->name, d->uuid);
+
+ if(likely(d->updated)) {
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!d->states_chart_generated)) {
+ print_domain_states_chart_definition(type, CHART_IS_NOT_OBSOLETE);
+ d->states_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.states\n"
+ "SET running = %lld\n"
+ "SET blocked = %lld\n"
+ "SET paused = %lld\n"
+ "SET shutdown = %lld\n"
+ "SET crashed = %lld\n"
+ "SET dying = %lld\n"
+ "END\n"
+ , type
+ , (collected_number)d->running
+ , (collected_number)d->blocked
+ , (collected_number)d->paused
+ , (collected_number)d->shutdown
+ , (collected_number)d->crashed
+ , (collected_number)d->dying
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!d->cpu_chart_generated)) {
+ print_domain_cpu_chart_definition(type, CHART_IS_NOT_OBSOLETE);
+ d->cpu_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.cpu\n"
+ "SET used = %lld\n"
+ "END\n"
+ , type
+ , (collected_number)d->cpu_ns
+ );
+
+ // ----------------------------------------------------------------
+
+ struct vcpu_metrics *vcpu_m;
+
+ if(unlikely(!d->vcpu_chart_generated || d->num_vcpus_changed)) {
+ print_domain_vcpu_chart_definition(type, d, CHART_IS_NOT_OBSOLETE);
+ d->num_vcpus_changed = 0;
+ d->vcpu_chart_generated = 1;
+ }
+
+ printf("BEGIN %s.vcpu\n", type);
+ for(vcpu_m = d->vcpu_root; vcpu_m; vcpu_m = vcpu_m->next) {
+ if(likely(vcpu_m->updated && vcpu_m->online)) {
+ printf(
+ "SET vcpu%u = %lld\n"
+ , vcpu_m->id
+ , (collected_number)vcpu_m->ns
+ );
+ }
+ }
+ printf("END\n");
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!d->mem_chart_generated)) {
+ print_domain_mem_chart_definition(type, CHART_IS_NOT_OBSOLETE);
+ d->mem_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.mem\n"
+ "SET maximum = %lld\n"
+ "SET current = %lld\n"
+ "END\n"
+ , type
+ , (collected_number)d->max_mem
+ , (collected_number)d->cur_mem
+ );
+
+ // ----------------------------------------------------------------
+
+ struct vbd_metrics *vbd_m;
+ for(vbd_m = d->vbd_root; vbd_m; vbd_m = vbd_m->next) {
+ if(likely(vbd_m->updated && !vbd_m->error)) {
+ if(unlikely(!vbd_m->oo_req_chart_generated)) {
+ print_domain_vbd_oo_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
+ vbd_m->oo_req_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.oo_req_vbd%u\n"
+ "SET requests = %lld\n"
+ "END\n"
+ , type
+ , vbd_m->id
+ , (collected_number)vbd_m->oo_reqs
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!vbd_m->requests_chart_generated)) {
+ print_domain_vbd_requests_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
+ vbd_m->requests_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.requests_vbd%u\n"
+ "SET read = %lld\n"
+ "SET write = %lld\n"
+ "END\n"
+ , type
+ , vbd_m->id
+ , (collected_number)vbd_m->rd_reqs
+ , (collected_number)vbd_m->wr_reqs
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!vbd_m->sectors_chart_generated)) {
+ print_domain_vbd_sectors_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
+ vbd_m->sectors_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.sectors_vbd%u\n"
+ "SET read = %lld\n"
+ "SET write = %lld\n"
+ "END\n"
+ , type
+ , vbd_m->id
+ , (collected_number)vbd_m->rd_sects
+ , (collected_number)vbd_m->wr_sects
+ );
+ }
+ else {
+ if(unlikely(vbd_m->oo_req_chart_generated
+ || vbd_m->requests_chart_generated
+ || vbd_m->sectors_chart_generated)) {
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for vbd %d, domain '%s', id %d, uuid %s\n", vbd_m->id, d->name, d->id, d->uuid);
+ print_domain_vbd_oo_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
+ print_domain_vbd_requests_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
+ print_domain_vbd_sectors_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
+ vbd_m->oo_req_chart_generated = 0;
+ vbd_m->requests_chart_generated = 0;
+ vbd_m->sectors_chart_generated = 0;
+ }
+ }
+ }
+
+ // ----------------------------------------------------------------
+
+ struct network_metrics *network_m;
+ for(network_m = d->network_root; network_m; network_m = network_m->next) {
+ if(likely(network_m->updated)) {
+ if(unlikely(!network_m->bytes_chart_generated)) {
+ print_domain_network_bytes_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
+ network_m->bytes_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.bytes_network%u\n"
+ "SET received = %lld\n"
+ "SET sent = %lld\n"
+ "END\n"
+ , type
+ , network_m->id
+ , (collected_number)network_m->rbytes
+ , (collected_number)network_m->tbytes
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!network_m->packets_chart_generated)) {
+ print_domain_network_packets_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
+ network_m->packets_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.packets_network%u\n"
+ "SET received = %lld\n"
+ "SET sent = %lld\n"
+ "END\n"
+ , type
+ , network_m->id
+ , (collected_number)network_m->rpackets
+ , (collected_number)network_m->tpackets
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!network_m->errors_chart_generated)) {
+ print_domain_network_errors_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
+ network_m->errors_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.errors_network%u\n"
+ "SET received = %lld\n"
+ "SET sent = %lld\n"
+ "END\n"
+ , type
+ , network_m->id
+ , (collected_number)network_m->rerrs
+ , (collected_number)network_m->terrs
+ );
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!network_m->drops_chart_generated)) {
+ print_domain_network_drops_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
+ network_m->drops_chart_generated = 1;
+ }
+ printf(
+ "BEGIN %s.drops_network%u\n"
+ "SET received = %lld\n"
+ "SET sent = %lld\n"
+ "END\n"
+ , type
+ , network_m->id
+ , (collected_number)network_m->rdrops
+ , (collected_number)network_m->tdrops
+ );
+ }
+ else {
+ if(unlikely(network_m->bytes_chart_generated
+ || network_m->packets_chart_generated
+ || network_m->errors_chart_generated
+ || network_m->drops_chart_generated))
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for network %d, domain '%s', id %d, uuid %s\n", network_m->id, d->name, d->id, d->uuid);
+ print_domain_network_bytes_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
+ print_domain_network_packets_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
+ print_domain_network_errors_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
+ print_domain_network_drops_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
+ network_m->bytes_chart_generated = 0;
+ network_m->packets_chart_generated = 0;
+ network_m->errors_chart_generated = 0;
+ network_m->drops_chart_generated = 0;
+ }
+ }
+ }
+ else{
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for domain '%s', id %d, uuid %s\n", d->name, d->id, d->uuid);
+ print_domain_states_chart_definition(type, CHART_IS_OBSOLETE);
+ print_domain_cpu_chart_definition(type, CHART_IS_OBSOLETE);
+ print_domain_vcpu_chart_definition(type, d, CHART_IS_OBSOLETE);
+ print_domain_mem_chart_definition(type, CHART_IS_OBSOLETE);
+
+ d = domain_metrics_free(d);
+ }
+ }
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "xenstat.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ // ------------------------------------------------------------------------
+ // parse command line parameters
+
+ int i, freq = 0;
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("xenstat.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata xenstat.plugin %s\n"
+ " Copyright (C) 2019 Netdata Inc.\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " --version print version and exit\n"
+ "\n"
+ " -h\n"
+ " --help print this message and exit\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/netdata/netdata/tree/master/collectors/xenstat.plugin\n"
+ "\n"
+ , VERSION
+ , netdata_update_every
+ );
+ exit(1);
+ }
+
+ error("xenstat.plugin: ignoring parameter '%s'", argv[i]);
+ }
+
+ errno = 0;
+
+ if(freq >= netdata_update_every)
+ netdata_update_every = freq;
+ else if(freq)
+ error("update frequency %d seconds is too small for XENSTAT. Using %d.", freq, netdata_update_every);
+
+ // ------------------------------------------------------------------------
+ // initialize xen API handles
+ xenstat_handle *xhandle = NULL;
+ libxl_ctx *ctx = NULL;
+ libxl_dominfo info;
+
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_init()\n");
+ xhandle = xenstat_init();
+ if (xhandle == NULL)
+ error("XENSTAT: failed to initialize xenstat library.");
+
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling libxl_ctx_alloc()\n");
+ if (libxl_ctx_alloc(&ctx, LIBXL_VERSION, 0, NULL)) {
+ error("XENSTAT: failed to initialize xl context.");
+ }
+ libxl_dominfo_init(&info);
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+ if(unlikely(debug && iteration))
+ fprintf(stderr, "xenstat.plugin: iteration %zu, dt %llu usec\n"
+ , iteration
+ , dt
+ );
+
+ if(likely(xhandle)) {
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_collect()\n");
+ int ret = xenstat_collect(xhandle, ctx, &info);
+
+ if(likely(!ret)) {
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_send_node_metrics()\n");
+ xenstat_send_node_metrics();
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_send_domain_metrics()\n");
+ xenstat_send_domain_metrics();
+ }
+ else {
+ if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: can't collect data\n");
+ }
+ }
+
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(unlikely(now_monotonic_sec() - started_t > 14400)) break;
+ }
+
+ libxl_ctx_free(ctx);
+ xenstat_uninit(xhandle);
+ info("XENSTAT process exiting");
+}
+
+#else // !HAVE_LIBXENSTAT
+
+int main(int argc, char **argv) {
+ (void)argc;
+ (void)argv;
+
+ fatal("xenstat.plugin is not compiled.");
+}
+
+#endif // !HAVE_LIBXENSTAT
diff --git a/configs.signatures b/configs.signatures
new file mode 100644
index 0000000..994b62a
--- /dev/null
+++ b/configs.signatures
@@ -0,0 +1,764 @@
+declare -A configs_signatures=(
+ ['00049600c2f9237e6c46b8b0f703c13c']='health.d/bcache.conf'
+ ['00403e687213f3b7db9bf4563a5a92cc']='python.d/isc_dhcpd.conf'
+ ['0056936ce99788ed9ae1c611c87aa6d8']='apps_groups.conf'
+ ['007fc019fb32e952b509d455c016a002']='health.d/tcp_resets.conf'
+ ['0083884a6cec6a48ee61665fbe131142']='charts.d/sensors.conf'
+ ['0102351817595a85d01ebd54a5f2f36b']='python.d/ovpn_status_log.conf'
+ ['01302e01162d465614276de43fad7546']='python.d.conf'
+ ['0147c7e8f8f57e37c5dade4e8aacacf9']='python.d/example.conf'
+ ['017036c1dc32c9312b2704b839bd078f']='python.d/haproxy.conf'
+ ['01c54057e0ca55b5bb49df1662d6b8c3']='python.d/web_log.conf'
+ ['024f4a6a431bcbc6acdb4184aa9661f3']='python.d/httpcheck.conf'
+ ['02fa10fa85ab88e9723998de48d1aca0']='health.d/disks.conf'
+ ['0314f0f1f88773c0ed9e9a908335e7ca']='health.d/tcp_mem.conf'
+ ['032ee2b3b6cb200bfdf1a0698c2457d6']='health.d/boinc.conf'
+ ['03510c8b3a2a6e8535320cfb9ebef06a']='python.d/httpcheck.conf'
+ ['036dc300bd7b0e0ef229b9822686d63e']='python.d/isc_dhcpd.conf'
+ ['0388b873d0d7e47c19005b7241db77d8']='python.d/tomcat.conf'
+ ['04138a3d8e907c75329fe60ce2e27c1c']='health.d/tcp_resets.conf'
+ ['0433d98a19d3b08e6f13884e46d39b47']='health.d/disks.conf'
+ ['043f0a35dde85837fabeb85b990a41c1']='health.d/swap.conf'
+ ['044496086420b531487d3c57600ca673']='apps_groups.conf'
+ ['0529b679d3c0e7e6332753c7f6484731']='health.d/net.conf'
+ ['054a2eece27ee2f5928b8167f5989b65']='python.d/dockerd.conf'
+ ['057d12aaff0467e64529e839a258806b']='health.d/entropy.conf'
+ ['05809c6662ba39f19cbec90234433d62']='health_alarm_notify.conf'
+ ['059d98d0c562e1c81653d1e64673deab']='python.d/web_log.conf'
+ ['05a8f39f134850c1e8d6267dbe706273']='health.d/web_log.conf'
+ ['061c45b0e34170d357e47883166ecf40']='python.d/nginx.conf'
+ ['06f055543a6b4d038d92c226952d777f']='health.d/isc_dhcpd.conf'
+ ['074d618a7e9c72f9bdfda7611e01e0ca']='python.d/redis.conf'
+ ['074df527cc70b5f38c0714f08f20e57c']='health.d/apache.conf'
+ ['0787e67357804b934d2866f1b7c60b14']='health.d/ipc.conf'
+ ['08042325ab27256b938575deafee8ecf']='python.d/nginx.conf'
+ ['0847d54a7a0c7e0381c52e9d4d3fa7db']='health.d/mdstat.conf'
+ ['084ee72d64760f2641b0720e79c922f3']='health.d/cpu.conf'
+ ['0856124b1eecf01681b4fdf4e21efb3f']='health.d/net.conf'
+ ['0862e7cf3d32ef48795702c6aefd27e0']='python.d/fail2ban.conf'
+ ['08de9ae0765a4161abe24c09e47b3454']='python.d/go_expvar.conf'
+ ['08ff5218f938fc48e09e718821169d14']='health.d/redis.conf'
+ ['091572888425bc3b8b559c3c53367ec7']='apps_groups.conf'
+ ['09225283977a6584f8063016091cc4f2']='health.d/tcp_resets.conf'
+ ['09264cec953ae1c4c2985e6446abb386']='health.d/mysql.conf'
+ ['093540fdc2a228e976ce5d48a3adf9fc']='health.d/disks.conf'
+ ['09e030d26be08a13fa3560e47fa27825']='apps_groups.conf'
+ ['0a5bc649295ba08f7e22e175901c1380']='python.d/unbound.conf'
+ ['0a7039ecc7a86b480d9d499b12b02763']='python.d/freeradius.conf'
+ ['0ad10fa896346202aee99384b0ec968d']='health.d/cpu.conf'
+ ['0b6903f981cdb018c17802ac4e295609']='health.d/btrfs.conf'
+ ['0bd66be0e8d99abc3a1d816036343f0a']='health_alarm_notify.conf'
+ ['0c5e0fa364d7bdf7c16e8459a0544572']='health.d/netfilter.conf'
+ ['0cd4e1fb57497e4d4c2451a9e58f724d']='python.d/redis.conf'
+ ['0d29fe9919a2975107db1f2583344e7a']='health.d/mdstat.conf'
+ ['0dd38dcd2473ddb9f8b1b41147432d10']='health_alarm_notify.conf'
+ ['0e59bc11d0a869ea0247c04c08c8d72e']='python.d/ipfs.conf'
+ ['0ee63c201892b21abc8bf6c712e815e3']='python.d/mysql.conf'
+ ['0ef8af1f358741afa7fd5d0ffabefaac']='charts.d/mysql.conf'
+ ['0f65b08edebedd06e376274021196a6b']='health.d/lighttpd.conf'
+ ['0ff6d725acde27a53de4646d69e9e3d1']='health.d/net.conf'
+ ['107de0cfeafcb6ab22fe7dd4a25d200d']='health.d/udp_errors.conf'
+ ['107e6ac69b30fb9837ac64c35f891ec7']='health.d/tcp_resets.conf'
+ ['10ac8106a109fdabdcc0405e9f43dbe1']='charts.d/mem_apps.conf'
+ ['10c3b525850a1cb9de760a8ee96fbc6e']='charts.d/opensips.conf'
+ ['1112c848ef91ebb9c622020d09712d67']='health.d/net.conf'
+ ['111401c47f94015cd12ad0b8a4393c4f']='health.d/softnet.conf'
+ ['111ead4b350593dd69b6f7ac0307b49b']='python.d/httpcheck.conf'
+ ['12a4c7803ae79506a14ea784fea60dce']='health.d/net.conf'
+ ['12d27b9f4d1696c2d49a77ed71d68e6f']='python.d/w1sensor.conf'
+ ['12e57bea1127933a4fe49ce2e9674f4d']='statsd.d/example.conf'
+ ['13141998a5d71308d9c119834c27bfd3']='python.d.conf'
+ ['13ccf65fd879795f0fcea89ade27c2d0']='health.d/swap.conf'
+ ['13e861a3d2f3075de883994ab54df658']='health.d/megacli.conf'
+ ['1423e4f8c25a66c316be37da0d5c9c54']='health.d/btrfs.conf'
+ ['142a5b693d34b0308bb0b8aec71fad79']='python.d/postfix.conf'
+ ['14783e051650442ec9e2ed38d81d667e']='charts.d/exim.conf'
+ ['156fe032bfd9da822060d0f515b326d9']='health.d/isc_dhcpd.conf'
+ ['15d8401b56a74120f9f832873ec9c578']='health.d/postgres.conf'
+ ['15e32114994b92be7853b88091e7c6fb']='python.d/exim.conf'
+ ['167a2ce21035ac6b5a8720c7c2b4413c']='health.d/web_log.conf'
+ ['174c21a6ce5de97bda83d502aa47a9f8']='health.d/apache.conf'
+ ['17555c7418c801ceb6c93adbe485d6f9']='apps_groups.conf'
+ ['178281aa2241d4a3e6b798bb9c4ae577']='python.d/haproxy.conf'
+ ['17dc745a76ab4c37ee31a3224f644fc1']='charts.d/postfix.conf'
+ ['18710ef6523cef8630d644ab270bfe02']='health.d/varnish.conf'
+ ['18c46f55d45a17d60c72877807d9a3d2']='health.d/udp_errors.conf'
+ ['18ee1c6197a4381b1c1631ef6129824f']='apps_groups.conf'
+ ['1972e48345e6c3f0d65f94a03317622b']='health_alarm_notify.conf'
+ ['1bc518219377499d1d05d6ee770f1a87']='python.d/go_expvar.conf'
+ ['1be2d92f2934601e18e6d709590569b7']='charts.d/apcupsd.conf'
+ ['1c12b678ab65f271a96da1bbd0a1ab1c']='health.d/softnet.conf'
+ ['1c3168c95b53e999df3d45162b3f50b8']='health.d/fping.conf'
+ ['1c71a8792c5c0ed035dd97af93a04838']='health_alarm_notify.conf'
+ ['1d6efba856acaaaf3b50bc6d66611b92']='python.d/web_log.conf'
+ ['1e09f326178acf07d361c08a44d8b1f3']='python.d/rabbitmq.conf'
+ ['1e0bc6a0ff701d16225383e5de76585b']='python.d/spigotmc.conf'
+ ['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
+ ['1eb0bc80934a3166fcde4d153c476d14']='health.d/fping.conf'
+ ['1ef0fd38e7969c023bc3fa6d89eaf6d6']='python.d/mdstat.conf'
+ ['1f43a9a820c02e0525de594299b55b15']='python.d.conf'
+ ['1f5545b3ff52b3eb75ee05401f67a9bc']='fping.conf'
+ ['1fa47f32ab52a22f8e7087cae85dd25e']='health.d/net.conf'
+ ['203678a5be09d65993dcb3a9d475d187']='health.d/ipfs.conf'
+ ['20be73f473e59bc7de1fe61d53466aba']='health.d/ram.conf'
+ ['21913b96f540333094a972614f5da8f1']='charts.d/tomcat.conf'
+ ['21924a6ab8008d16ffac340f226ebad9']='python.d/nginx.conf'
+ ['219c5bb81965fa17d4940d4aa343c282']='health.d/mysql.conf'
+ ['225792e33ddeea72992ffa5ab36d505f']='python.d/ntpd.conf'
+ ['22952dbf42647c583b005054b23b545f']='health.d/disks.conf'
+ ['22ceb822983134a7ca67343241f30341']='health.d/disks.conf'
+ ['2320314191d0f8e7548b9273b77ac5e3']='apps_groups.conf'
+ ['2385e5d35b440619621c4af62492d91b']='health.d/disks.conf'
+ ['23989e04f9c7694b7eab646f8949cd52']='python.d/portcheck.conf'
+ ['23a5afe5260a7ad388e447709cb009df']='python.d/web_log.conf'
+ ['23ae815aefa221b1929f96752a1f7556']='health.d/squid.conf'
+ ['243503ceee1d5b4e1e55a28768a116ae']='health.d/net.conf'
+ ['2472e49550326f7142e2c425ccbca005']='health.d/softnet.conf'
+ ['24d02e4086fd60943c45d8de2e52a4fb']='python.d/springboot.conf'
+ ['254de8ec49602bea2da3631676d7cfec']='health.d/cpu.conf'
+ ['256a7f06f7e579a61752fc64418cffe5']='charts.d/nut.conf'
+ ['25a35a7c3c6092a839865e9be250c024']='health.d/ram.conf'
+ ['262f98b3d88b98978cb08d566ce85a9d']='charts.d/squid.conf'
+ ['27a1dbd43abc7394dcd72efe797ee9af']='python.d.conf'
+ ['2827de41cf34a91b7a8e4d8724f59668']='health.d/net.conf'
+ ['28df44a90e8ea4c6156314c03e88bf44']='health.d/softnet.conf'
+ ['292c6cbbb5c819bb91f87c02a45890c1']='health.d/swap.conf'
+ ['29485dc362202095d3d80e4f744d0538']='health_alarm_notify.conf'
+ ['297160ae7ee01a547ed14f857b4f2c8d']='health.d/memcached.conf'
+ ['298504f331c55dff4055321ff3a7b5cc']='health.d/web_log.conf'
+ ['29c37d59e8801dffd18617738c1b4b71']='python.d.conf'
+ ['29f97e10b92333790fbe0d2a3617b736']='health_alarm_notify.conf'
+ ['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf'
+ ['2acae80dbdbe536a160f5b216bac84bc']='python.d/samba.conf'
+ ['2ad55a5d1e885cf142849a78d4b00401']='health.d/net.conf'
+ ['2b0106e89ce622da2869cb0d201246d1']='python.d/unbound.conf'
+ ['2bbbebf52f84fd27fbefecd2a8a8076f']='health.d/memcached.conf'
+ ['2c2b7e8df922b2bf121fb7db32bbc3bd']='health.d/udp_errors.conf'
+ ['2d1d7498c72f4245cf32902c2b7e71e0']='health.d/entropy.conf'
+ ['2da0a2e7117292ece11d69723a294bd7']='python.d/mongodb.conf'
+ ['2ee5df033fe9c65a45566b6760b856e3']='python.d/web_log.conf'
+ ['2f05e09b69ea20cda56d8f8b6fd3e86d']='health.d/couchdb.conf'
+ ['2f13a6b7d11eda826ff26569b2a77080']='health.d/apcupsd.conf'
+ ['2f3a8e33df83f14e0af8ca2465697215']='python.d/exim.conf'
+ ['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
+ ['2fa8fb929fd597f2ab97b6efc540a043']='health_alarm_notify.conf'
+ ['307ac41f6c67fcf007d6f7135fac314c']='stream.conf'
+ ['312b4b8e2805e19cf9be554b319567d6']='health.d/softnet.conf'
+ ['31471ee7eb6cfbb412587a837ffcfe6f']='python.d.conf'
+ ['3161290af7c1909768253e714ea2c3de']='python.d/ceph.conf'
+ ['318bb45755726a25120bb33413d4b582']='health.d/net.conf'
+ ['318db50a701442890c269ab547041e97']='health.d/tcp_orphans.conf'
+ ['31e4058cfe0a01dd9ce4ae425fd7b4f1']='python.d/web_log.conf'
+ ['322ec5e7095912221110623c9d7130cf']='health_alarm_notify.conf'
+ ['325617412a628e3bc776e3fbb777a2a6']='health.d/redis.conf'
+ ['326e1477131e0f73304711135f70a2a5']='health.d/memcached.conf'
+ ['32fde0057c790964f2c743cb3c9aad29']='health.d/nginx.conf'
+ ['33486497112127badc4c47ed2008969c']='python.d/freeradius.conf'
+ ['33b135e28aeaef2b8224ba69a0fde245']='health.d/cpu.conf'
+ ['343bc919a2fbc93f687f9d1339ec5f79']='health.d/net.conf'
+ ['34f6cf10f8da18ddd7edd2a28a4fe5f9']='python.d/sensors.conf'
+ ['35024ebd94542484c0866e6ee6b950cb']='health.d/net.conf'
+ ['35ac63a2f08b2c6dd901c542629ae5df']='python.d/postgres.conf'
+ ['35eb9785c844afd43fa7931915e2d566']='python.d/elasticsearch.conf'
+ ['3634d5eddc46fb0d50cf47f370670c2c']='health.d/redis.conf'
+ ['364b6e0081b116c9ec073b4d329a6dcc']='health_alarm_notify.conf'
+ ['367d1463e520eb9dc89223bab161c6d1']='python.d/postgres.conf'
+ ['36fdd55665cf10b0db164c2a0cca5e57']='health.d/qos.conf'
+ ['373160658e7d5f1a129de397b9347365']='health.d/entropy.conf'
+ ['373c1276dc9e65884ff2b26e1f08afe7']='health.d/named.conf'
+ ['3798445a7faaf45c7a8047908678e690']='python.d/varnish.conf'
+ ['37a5218f42e0ffd1becfb7db14cae568']='health.d/fronius.conf'
+ ['37bc2b50ade9f334da4775dfea59f785']='python.d.conf'
+ ['3807c37ac57046ae867e34dcfe6dbfd9']='health.d/httpcheck.conf'
+ ['3848172053221b95279ba9bf789cd4e0']='health.d/apache.conf'
+ ['3866efafd38e161136428d0f818cac43']='health.d/net.conf'
+ ['38d1bf04fe9901481dd6febcc0404a86']='python.d.conf'
+ ['392ab65af875a8daf0041113b1b40c2f']='python.d.conf'
+ ['39304b2570611c3acb35b72762b46778']='charts.d/sensors.conf'
+ ['394b7e91c97b7adb776460d309b335ff']='python.d/nginx.conf'
+ ['39571e9fad9b759200c5d5b2ee13feb4']='python.d/redis.conf'
+ ['39b65042cafdd9b849a44ec81aa66cac']='health_alarm_notify.conf'
+ ['39f9422b0f0c3eec11a31aff79d89514']='health.d/retroshare.conf'
+ ['3a04a3bc66c49d0c24f65a44fd9caa80']='python.d/postgres.conf'
+ ['3a0f1f988d2111ba003199deca722d9b']='health_alarm_notify.conf'
+ ['3a278ef6c66c122a407a0236c251119d']='python.d/nginx_plus.conf'
+ ['3af522d65b50a5e447607ffb28c81ff5']='apps_groups.conf'
+ ['3b1bfa40a4ff6a200bb2fc00bc51a664']='apps_groups.conf'
+ ['3b535da82cf2ff53e03d735d02fb2357']='python.d/squid.conf'
+ ['3bc2776623889744a98178bad6fb3b79']='health.d/disks.conf'
+ ['3bc2c4423b19779d49ee7935b2ea1431']='health.d/stiebeleltron.conf'
+ ['3bc65e997ab59b9de390fdf63d77f5e1']='python.d/postgres.conf'
+ ['3c60691eb05d4d5bf78a41ed46303bb6']='python.d.conf'
+ ['3c9c47163e9d4dbcb0079b6232398f2f']='apps_groups.conf'
+ ['3ca696189911fb38a0319ddd71e9a395']='python.d/phpfpm.conf'
+ ['3cc6255457d4cba881ae0554ae5d9190']='health.d/squid.conf'
+ ['3d974ac9fdaa44d4527d6503bec35e34']='stream.conf'
+ ['3d9b33da0f40c2ceecd006ddfd44fd14']='python.d.conf'
+ ['3f170e3343cd784983b019163393f5af']='health.d/nginx.conf'
+ ['3f7b669fde5c63bd55cb6dd88866d306']='python.d/ceph.conf'
+ ['3fbe85671efd5d07e51584ab8262b48b']='health.d/tcp_listen.conf'
+ ['3fc45cc18e884c22482524dff6d27833']='python.d/hddtemp.conf'
+ ['3fcc3c449ce8e0388f9c23ca07cab608']='health.d/backend.conf'
+ ['40225ee41bc3a85ce9b7f7af4d90e3e9']='charts.d/cpu_apps.conf'
+ ['4063a01bffb43b0423425d1ba3004967']='health.d/tcp_resets.conf'
+ ['41fa6bb109763561be59d7bcd07bbe82']='python.d/dnsdist.conf'
+ ['421d5dc6c2fce22d0816b6e6363bea57']='python.d/hddtemp.conf'
+ ['42ad0e70b1365b6e7244cc305dbaa529']='health_alarm_notify.conf'
+ ['42bf1c7c64ed77038a0aa094d792a9e2']='python.d/mysql.conf'
+ ['4332dee96e4f38fc73c962df3494ab7c']='health_alarm_notify.conf'
+ ['43739017b6195a6abec14a70fe0df224']='python.d/rethinkdbs.conf'
+ ['43ebb7f224c3b232d8ad044d7e9508b6']='health.d/net.conf'
+ ['43ef8c1e77054f53f9be9f381eb6cd67']='python.d/portcheck.conf'
+ ['4401f0c6a101d35d2cb833e7b0aeb421']='health.d/qos.conf'
+ ['444e20cf75e2cd019e8d412d5d1f4a7f']='charts.d/cpu_apps.conf'
+ ['4461bfacf9a3da47770fb3ca31f4c91f']='health.d/net.conf'
+ ['450667c552ab7a7d8d4a2c214fdacca5']='health.d/entropy.conf'
+ ['459e57e6acb389f4243f695a1e53ab2b']='health.d/boinc.conf'
+ ['45a77ac36ba9f1898144b902de17204b']='health.d/memcached.conf'
+ ['46798cda21e1a5faa769abf4e5d27c48']='health.d/disks.conf'
+ ['46dfa2b6a7e7c76532e00c1344d5d171']='python.d/logind.conf'
+ ['46ef6c1b638e40a7dfd62defdc5f99a3']='health.d/retroshare.conf'
+ ['47180421d580baeaedf8c0ef3d647fb5']='python.d/hddtemp.conf'
+ ['48195c5c8c0476a49b714b4c76bdb570']='python.d/squid.conf'
+ ['48eef63bcf744bae114b502b6dacb4a1']='charts.d/phpfpm.conf'
+ ['4960852f8951b54ca2fe10065752143e']='python.d.conf'
+ ['4a448831776de8acf2e0bdc4cc994cb4']='apps_groups.conf'
+ ['4aba3b6a28ccd75faf5326aca997ee0d']='health_alarm_notify.conf'
+ ['4b775fb31342f1478b3773d041a72911']='python.d.conf'
+ ['4ccb06fff1ce06dc5bc80e0a9f568f6e']='charts.d.conf'
+ ['4cd585f5dfdacaf287413ad037b4e60a']='apps_groups.conf'
+ ['4d13684cadfa90e73ab465409bf7263b']='health.d/mysql.conf'
+ ['4d91ee6fe4c887ea3865ef36ac63da3c']='health.d/mysql.conf'
+ ['4da1c0f009d87995ed66d84fae07f09a']='health.d/memory.conf'
+ ['4dee2390e0bc89938dafa34a390dcf36']='charts.d/squid.conf'
+ ['4e07ea46dd54eb0bbb4f1c0982a71973']='python.d/cpuidle.conf'
+ ['4e37502fdf1944d094dd8be1e1f5e9e6']='health.d/cpu.conf'
+ ['4e59e91d800059183028bbb44cf5afd2']='health.d/httpcheck.conf'
+ ['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf'
+ ['4f6a5b47a13f5912cc89e9286701dd08']='health.d/redis.conf'
+ ['4f6f4d39c19d7d954f769d3f9d3b4da5']='health.d/memcached.conf'
+ ['4fc3fa3dc89b789c8820ce109ea6e385']='python.d/httpcheck.conf'
+ ['4fdf72784296326e0b46cb526a5d77a1']='python.d.conf'
+ ['4fef19afccd9a591165b72f0b1a2ac2e']='python.d/freeradius.conf'
+ ['501eb2484b459b410b3f792c2dbaa955']='health.d/swap.conf'
+ ['5050b5963599f13ad5dc0263fa39a906']='python.d/fail2ban.conf'
+ ['508771d8e4611a058991a1bc11039dea']='health.d/disks.conf'
+ ['5120492fa26be3749192607f62dc05f8']='health.d/mdstat.conf'
+ ['5271cf9fc0fd10915a9759add70f7d78']='health.d/swap.conf'
+ ['5278ebbae19c60db600f0a119cb3664e']='python.d/apache.conf'
+ ['52d230aff57850a5aacc4e0420fcd8f5']='python.d.conf'
+ ['52d4131cf9df84e2550b1a5d899ec61d']='health.d/swap.conf'
+ ['5306b64a1e6baacd9de721e1f56961a8']='health_alarm_notify.conf'
+ ['53160707fdc6ce46c195b1b55bb0bcb1']='health.d/swap.conf'
+ ['535e5113b07b0fc6f3abd59546c276f6']='charts.d.conf'
+ ['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf'
+ ['5452eccad2f220d1191411737f6f4b2b']='python.d/isc_dhcpd.conf'
+ ['54614490a14e1a4b7b3d9fecb6b4cfa5']='python.d/exim.conf'
+ ['547779cdc460a926980de1590294b96b']='health.d/softnet.conf'
+ ['54c3934a03453453b8d7d0e8b84a7cd8']='health_alarm_notify.conf'
+ ['5523c092be7d667b228c70aeda6f44eb']='stream.conf'
+ ['55608bdd908a3806df1468f6ee318b2b']='health.d/qos.conf'
+ ['5598b83e915e31f68027afe324a427cd']='apps_groups.conf'
+ ['55cc7e3fe365a77f8e92d01d7a428276']='health.d/ram.conf'
+ ['56324751bae48308f155c079ee7ed43f']='python.d/megacli.conf'
+ ['565f11c38ae6bd5cc9d3c2adb542bc1b']='health.d/softnet.conf'
+ ['5664a814f9351b55da76edd472169a73']='health_alarm_notify.conf'
+ ['56b689031cdcf138064825f31474b37d']='apps_groups.conf'
+ ['56d072c4756b898d0c91f143b89e366b']='python.d.conf'
+ ['573398335c0c71c075fa57f702bce287']='health.d/disks.conf'
+ ['579c7c41c756745f57afd11c94a879d7']='python.d.conf'
+ ['57be306944cb09b7f024079728fd04b9']='apps_groups.conf'
+ ['5829812db29598db5857c9f433e96fef']='python.d/apache.conf'
+ ['58439d9c1253e33c74b399e6853143ab']='health.d/apcupsd.conf'
+ ['5855dd70d71c8497e5591b0690162c9c']='health.d/tcp_resets.conf'
+ ['58660dfcc260f77deec94b328b3838e8']='health_alarm_notify.conf'
+ ['58e835b7176865ec5a6f59f7aba832bf']='health.d/named.conf'
+ ['598f9814966a9e2fe48e8218151d3fa6']='stream.conf'
+ ['59dded33e3adfe622f36c557a4f4bed7']='health.d/net.conf'
+ ['59dea5e3872e5fe4e6c535b216c516b4']='health.d/disks.conf'
+ ['5b5588b00d6829908c2c5ea3220cfa1c']='health.d/load.conf'
+ ['5b917d894bb6a755d59264e9d48e9d56']='fping.conf'
+ ['5bbef0708f5eff4d4a53aaf35fc48a62']='health.d/disks.conf'
+ ['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf'
+ ['5c1694184557813a6948db0872556bf0']='charts.d/libreswan.conf'
+ ['5da15d6e17a15213a720749045e5d419']='health.d/disks.conf'
+ ['5dddb6c9670f4aa605abe4b0d901acc4']='python.d/bind_rndc.conf'
+ ['5e6fd588ef6934cf04ddb5e662aa02ea']='health.d/postgres.conf'
+ ['5eb670b6fe39da5fec2523d910b0dd1e']='health.d/cpu.conf'
+ ['5f05d4b248ab2637ada319b4e8c4e4c3']='python.d/varnish.conf'
+ ['5f109df927d5f20409c81f4bfca0c83e']='python.d/web_log.conf'
+ ['5ff1bcaa58695754e2f6980bfe19f579']='health.d/entropy.conf'
+ ['609c6c57605033da96ea65e50c90201c']='charts.d/apache.conf'
+ ['60a13375b3072300bd7552cb5ee9762b']='health.d/netfilter.conf'
+ ['611130db85bad90f966b52055147c81e']='python.d/httpcheck.conf'
+ ['61b7ed36f35e7bd930f5f7f91694a112']='charts.d/postfix.conf'
+ ['621f10b257a11add5ff5aff41e9662e3']='health.d/memcached.conf'
+ ['623771eecb3c277fc728b5304793f93b']='health.d/cpu.conf'
+ ['6265b7465e38839c3543190e638156aa']='python.d/ntpd.conf'
+ ['6319e4ae3810e9eabb61e852e1305785']='python.d.conf'
+ ['632c28d714c87a4969d11cf36a5edaa8']='health.d/web_log.conf'
+ ['636d032928ea0f4741eab264fb49c099']='apps_groups.conf'
+ ['6398ef37a15cb6a0bc921f58948d2b39']='health.d/softnet.conf'
+ ['63c626bc64b3d7bc46a72fbccf9b1926']='health.d/net.conf'
+ ['64070d856ab1b47a18ec871e49bbc13b']='python.d/squid.conf'
+ ['647361e99b5f4e0d73470c569bb9461c']='apps_groups.conf'
+ ['64ac37868097a462e5ee6905c350267e']='python.d/postgres.conf'
+ ['64c48f9726ab987baec9c617a9fef7a6']='health.d/nginx.conf'
+ ['64ffc1b6878c81b87564b0f48642c790']='health.d/elasticsearch.conf'
+ ['650b5fc9da23b25ee7ee1481e4aa2851']='health_alarm_notify.conf'
+ ['653e0c014c8fcfb4db6cd3351d87d720']='python.d.conf'
+ ['6546909d10cc5efcef9dd873bea85956']='python.d/mysql.conf'
+ ['65a59d96c039d0180603ffd945a8968c']='apps_groups.conf'
+ ['65c6933a17fb6b7f8e6baeab73431c17']='charts.d/apcupsd.conf'
+ ['6608c6546b3c6bde084fc1d34b1163c1']='health.d/retroshare.conf'
+ ['66628e70f70c6e991f4fe641b8e9bdde']='python.d/nginx_plus.conf'
+ ['669ebef43ee341f6889d382e86d0e200']='health.d/named.conf'
+ ['66c068eaa3672fbe4e2448e330b3511c']='python.d/web_log.conf'
+ ['66dfe138058ca26a31a118007eb31f35']='health.d/nginx.conf'
+ ['6814b9bc84483db428f6a479ba221855']='python.d/mysql.conf'
+ ['6848e78a5a1c349c6c42d6245d6530ad']='python.d/boinc.conf'
+ ['68607aef1802ed3dc0cd593bf6073beb']='python.d/postfix.conf'
+ ['6a18f61a595c0d48c3363bcc0dbfa6b9']='health_alarm_notify.conf'
+ ['6a47af861ad3dd112124c37fbf09672b']='apps_groups.conf'
+ ['6aa4507f86657383917a0407f2a9cc0d']='python.d.conf'
+ ['6acad8ce5c33e642742825db0eb9bb56']='python.d/web_log.conf'
+ ['6b39de5d85db45115db236347a6896d4']='health.d/named.conf'
+ ['6b598533309e08d71023e46801d45d7e']='apps_groups.conf'
+ ['6bb278bd9e171c4cb5c0fe639231288b']='python.d/web_log.conf'
+ ['6bf0de6e3b251b765b10a71d8c5c319d']='python.d/apache.conf'
+ ['6c9f2f0abe49a6f1a69db052ebcef1bf']='python.d/elasticsearch.conf'
+ ['6ca08ea2a238cad26578b8b85edae160']='health.d/udp_errors.conf'
+ ['6d02c2dd0863e09ad9dbba53e3b58116']='health.d/mysql.conf'
+ ['6df13c6ad582ef339a2a93901b6f0196']='health_alarm_notify.conf'
+ ['6e8366993709652fe7fc00e5d6a0a136']='charts.d/mysql.conf'
+ ['6ea958ca521e0514af57c08b518d8c5c']='health.d/backend.conf'
+ ['6f303ccfdc21c7b122758cea8c15e249']='python.d.conf'
+ ['6f54474c885234af0c792d135644d230']='python.d.conf'
+ ['7005feb3eb5d06416d07cdf7e7c54425']='python.d/ntpd.conf'
+ ['70105b1744a8e13f49083d7f1981aea2']='python.d/ipfs.conf'
+ ['707a63f53f4b32e01d134ae90ba94aad']='health_alarm_notify.conf'
+ ['707a63f53f4b32e01d134ae90ba94aad']='health_email_recipients.conf'
+ ['70d82dabecb09a1da4684f293abef0c9']='health_alarm_notify.conf'
+ ['7117b7067ac2b712aa4c9e92a6cdbf5a']='python.d/couchdb.conf'
+ ['7120cba2f55b1c0a97a0e10d4f6ef751']='health.d/ipmi.conf'
+ ['72246c32511197d87b004e67e4c8da36']='python.d/portcheck.conf'
+ ['729b3e24a72f7d566fd429617d51a21b']='health.d/web_log.conf'
+ ['72ea87f658483f47c38994291af488e8']='health_alarm_notify.conf'
+ ['73125ae64d5c6e9361944cd9bd14844e']='python.d/exim.conf'
+ ['731a1fcfe9b2da1b9d685056a59541b8']='python.d/hddtemp.conf'
+ ['73a8e10dfe4183aca751e9e2a80dabe3']='node.d.conf'
+ ['7454ed74511d7b9819dfe173f9020786']='python.d/redis.conf'
+ ['749fe31362969d75f1ea66d15231d98d']='python.d/retroshare.conf'
+ ['74e5e8d3a4b324f1770f61f78ee4b0e6']='health.d/beanstalkd.conf'
+ ['7502c931aa9acbb92f54c67978d75983']='stream.conf'
+ ['751f15371d0987018abc4d4ad60819f5']='apps_groups.conf'
+ ['7596ae54d46ce199ac599429ef753caf']='health.d/cpu.conf'
+ ['75a9c4b0b1c73956df55585eb0619f6c']='charts.d/ap.conf'
+ ['75ddb2b9bc38a5306bceb5acb0422fe3']='python.d/icecast.conf'
+ ['76205037196767f6877392862eb00d7b']='health.d/ram.conf'
+ ['763e24621c63f5aa05fd6dddf0c855ba']='health.d/nginx_plus.conf'
+ ['7673ea6afe0a286a77a390b9d042c191']='python.d/httpcheck.conf'
+ ['769aa4cdcdc3d78d0328d1f9e4edcdf9']='python.d/mysql.conf'
+ ['76a0c1b21e49850442a43efddb15a81e']='health.d/tcp_orphans.conf'
+ ['76a31091f42f2be1fab3bb56bb7ea400']='health_alarm_notify.conf'
+ ['76edb4cc11935aadaff53129c63457aa']='python.d.conf'
+ ['777f4da70f461ef675bde07fb3644312']='python.d/redis.conf'
+ ['777f55a95c5c25cf6176fece1ebbf4b8']='apps_groups.conf'
+ ['77b256144293ebfabad31779a5326948']='python.d/phpfpm.conf'
+ ['7808ba2ca26bd0642270740cf6a8ee59']='charts.d/mem_apps.conf'
+ ['7830066c46a7e5f9682b8d3f4566b4e5']='python.d/cpufreq.conf'
+ ['78bb08809dffcb62e9bc493840f9c039']='python.d/squid.conf'
+ ['78e0065738394f5bf15023f41d66ed4b']='python.d/squid.conf'
+ ['79a37756869d9b4629285922572d6b9b']='apps_groups.conf'
+ ['7a21ccc76be2968ce5d0b52ec1166788']='python.d.conf'
+ ['7a985528cc9176564640001aa73e3492']='health.d/nginx.conf'
+ ['7aa209fa287c95b3ca04c23681b40770']='health.d/disks.conf'
+ ['7ad46e684775d186251eb71b1e9be530']='charts.d/ap.conf'
+ ['7b3281b6dbdbbc0b48c53fd76033e0db']='health.d/disks.conf'
+ ['7bac18d8d5ff8f117be8d489a21c0c65']='python.d/mysql.conf'
+ ['7cf6402b51e5070f2be3ad6fe059ff89']='charts.d.conf'
+ ['7d8bd884ec26cb35d16c4fc05f969799']='python.d/squid.conf'
+ ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
+ ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf'
+ ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf'
+ ['82f1dc0a477a175ae31d7b815411e44e']='health.d/dbengine.conf'
+ ['7fb8184d56a27040e73261ed9c6fc76f']='health_alarm_notify.conf'
+ ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf'
+ ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf'
+ ['80d242d619eb7e91cebfdbf58d79b0f8']='health.d/disks.conf'
+ ['80df37b89e852d585209b8c02bb94312']='python.d/bind_rndc.conf'
+ ['80f109ff293ac94222bf3959432751bd']='health.d/qos.conf'
+ ['81255035f6d53534938085df72cdef23']='health.d/nginx.conf'
+ ['8170ba3ae507cf9322bd60350348552e']='health.d/net.conf'
+ ['81af92c7050873c7de2fb42e0c3f04f4']='python.d/tomcat.conf'
+ ['81f7c857a9a7bcf12f500166bd6c7499']='health.d/linux_power_supply.conf'
+ ['81fd16f29d5f3d422fe1cee82dc8ed9d']='health.d/cpu.conf'
+ ['8213d921b6a8382e27052fb42d81db3d']='python.d/freeradius.conf'
+ ['8214bb8f4b005aa4691fcd38f7331e8f']='health.d/swap.conf'
+ ['830c4e7307b58a4c4bb5034f091e008d']='charts.d/nginx.conf'
+ ['8320bf7600afcaa3d419d268d5563133']='python.d/web_log.conf'
+ ['837480f77ba1a85677a36747fbc2cd2e']='python.d/sensors.conf'
+ ['8422e71761d22e817e3cfcb1befc6080']='health.d/mongodb.conf'
+ ['8425a60ea3d28ed40bb0bac4c3f182e8']='python.d/sensors.conf'
+ ['842b1ad5b89bfa5f421d9c5b72e001a4']='health.d/apache.conf'
+ ['845023f9b4a526aa0e6493756dbe6034']='health.d/squid.conf'
+ ['846ce94bfeeb90c0dc6a89e8d25f1a68']='health.d/named.conf'
+ ['846f6039460aa317f165d91a54cd8b07']='health.d/stiebeleltron.conf'
+ ['8490f690d97adacc4e2096df82e7e8a5']='charts.d/cpufreq.conf'
+ ['86a0d8d5619b5134e2d050805b45d6c3']='python.d/unbound.conf'
+ ['87155bea7383028b0c1846c802cfdd81']='python.d/mdstat.conf'
+ ['871bbeea33b83ea9755600b6d574919c']='python.d/web_log.conf'
+ ['87224d2f2b87646f3c0d38cc1eb30112']='python.d/nsd.conf'
+ ['87615ae5ac2412d853c717383fa53781']='python.d/chrony.conf'
+ ['87642c568093daf3b2c30c5beffe2225']='python.d/elasticsearch.conf'
+ ['8810140ce9c09af1d18b9602c4003904']='health_alarm_notify.conf'
+ ['886975ecb9a4e856151dc71024b122e6']='health.d/apcupsd.conf'
+ ['8891fb423f6b987281d7913bb6c1c024']='health.d/ipc.conf'
+ ['88e3b51b6b3fe8f317df82a2d4fbb990']='python.d.conf'
+ ['88f77865f75c9fb61c97d700bd4561ee']='python.d/mysql.conf'
+ ['8989b5e2f4ef9cd278ef58be0fae4074']='health.d/disks.conf'
+ ['899bcb0b3f4375b0a1280296be930201']='health.d/named.conf'
+ ['89fb3cbb223be4fa0cb676cfa3b07055']='health.d/backend.conf'
+ ['8a1b95d375992d7b11330a0ac46f369c']='health.d/disks.conf'
+ ['8a66a3085ad8892a002ff39b18b2cb07']='python.d/fail2ban.conf'
+ ['8abc7f66746b201b5b0af45c419d53bc']='health.d/bind_rndc.conf'
+ ['8b834a0f343a8e620dbb639270a84cce']='health.d/mdstat.conf'
+ ['8c0f037f8ad506c41acdbc4f9f6cead6']='health_alarm_notify.conf'
+ ['8c1d41e2c88aeca78bc319ed74c8748c']='python.d/phpfpm.conf'
+ ['8d0552371a7c9725a04196fa560813d1']='health.d/cpu.conf'
+ ['8d24873bb25c195026918f15626310ea']='health.d/softnet.conf'
+ ['8d736f551571675244d853bb2f53b3da']='health.d/load.conf'
+ ['8dc0bd0a70b5117454bd5f5b98f91c2c']='health.d/disks.conf'
+ ['8dc6a32b8e2995cbdd527c621a72c4fb']='health.d/ram.conf'
+ ['8ec636a4f96158044d2cec0fd1ff8452']='python.d/rabbitmq.conf'
+ ['8ed596c4f6f85b24a890cfe95f10ce9a']='python.d/ntpd.conf'
+ ['8f4f925c1e97dd164007495ec5135ffc']='health.d/fping.conf'
+ ['8f520e787d995943e61a777c826bddf7']='python.d/litespeed.conf'
+ ['8f7b734ea0f89abf8acbb47c50234477']='health.d/web_log.conf'
+ ['8fd472a854b0996327e8ed3562161182']='health_alarm_notify.conf'
+ ['919911d13901d60a7580f5dfd7fc87bb']='health.d/ram.conf'
+ ['91c377e7d26a1120cfbbd488332f0398']='python.d/dns_query_time.conf'
+ ['91c757ef6be3abdb86906d9dbb9c217a']='fping.conf'
+ ['91cf3b3d42cac969b8b3fd4f531ecfb3']='python.d/squid.conf'
+ ['91e1a9703debbdc64edf124419fdc14b']='python.d/elasticsearch.conf'
+ ['91f0a626c19f76241cadf9dbf28fb5a7']='health.d/beanstalkd.conf'
+ ['92024bbe088e55251665fb666305ff66']='python.d/mysql.conf'
+ ['920574fcfe56d5c9c11a583905e9db62']='health.d/tcp_conn.conf'
+ ['9347bcce0b3574ac5193d43248d2e3cc']='python.d/chrony.conf'
+ ['93c7c00103f63ea3b4eea1951dd16c95']='health_alarm_notify.conf'
+ ['94bb961f83ec724cf86239328f73a3db']='health.d/redis.conf'
+ ['94e567bbefd37db0c55d880ff61188a6']='health_alarm_notify.conf'
+ ['9542f80def48ba105190f6cdaa18248e']='health.d/mysql.conf'
+ ['95a27691df972832a5e7626ae59b0af6']='python.d/portcheck.conf'
+ ['96997c8bf3a65b9eac848cafa8c127d2']='python.d/portcheck.conf'
+ ['978daf0777ffe774e5a9576d33972e97']='python.d/smartd_log.conf'
+ ['97eee7a30e6419df4537242e9d4a719d']='health.d/mysql.conf'
+ ['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf'
+ ['98e4dd6ba71bf76767bc59c63a51b617']='apps_groups.conf'
+ ['98f6f917138949228b9fb88c61e5aea8']='charts.d/cpufreq.conf'
+ ['9962e20641036566279ac800861b7963']='python.d.conf'
+ ['99a3de85d1e7826ed64a5f8576712e5d']='python.d.conf'
+ ['99b06e68f1da5917ae4cf60e901439f6']='health.d/ram.conf'
+ ['99b6030ce25c8fee4598179c0f95fb0b']='health.d/redis.conf'
+ ['99c1617448abbdc493976ab9bda5ce02']='apps_groups.conf'
+ ['9a525125e705ca5a3146b3399be4510a']='python.d/nginx_plus.conf'
+ ['9a89bbdf5d9732b9a29a0aa82714059b']='health.d/dockerd.conf'
+ ['9a8a459a3841b78d4c6ef07428ad2fe1']='health.d/entropy.conf'
+ ['9b6eee7f2febb29efac2b7ea9fcab9be']='charts.d/nut.conf'
+ ['9c0185ceff15415bc59b2ce2c1f04367']='apps_groups.conf'
+ ['9c457056c9ee0d50f9717da647bbd444']='health_alarm_notify.conf'
+ ['9c8ddfa810d83ae58c8614ee5229e66b']='health.d/disks.conf'
+ ['9c981c75bdf4b1637f7113e7e45eb2bf']='health.d/memcached.conf'
+ ['9d304e41e32721224a743f25534263d9']='python.d/retroshare.conf'
+ ['9e0553ebdc21b64295873fc104cfa79d']='python.d.conf'
+ ['9e07d51bb83a38dcc37a39ca92fe4865']='charts.d/opensips.conf'
+ ['9e33f51e56d258e7f4336048edde2f5c']='health.d/httpcheck.conf'
+ ['9eb3326ae2ee9badeaad31d8dd2eaa2b']='python.d/isc_dhcpd.conf'
+ ['a02d14124b19c635c1426cee2e98bac5']='charts.d.conf'
+ ['a03f3e38378385bf87d4c0f81eb1f108']='health.d/tcp_resets.conf'
+ ['a09714b5942cf25a89ec3da1dbc18063']='health.d/ram.conf'
+ ['a0b3a12389c9c56dfe35964b20b59836']='health.d/bind_rndc.conf'
+ ['a0c0ef7ca9671f4b5e797d4276e5c0dd']='health.d/disks.conf'
+ ['a0ee8f351f213c0e8af9eb7a4a09cb95']='apps_groups.conf'
+ ['a1b53a225f225911cd8ac892bba8118b']='python.d/powerdns.conf'
+ ['a1b6dfe312b896b0b1ba471e8ac07f95']='python.d/isc_dhcpd.conf'
+ ['a1bb5823c4926b65ef4b2dae467fc847']='python.d/couchdb.conf'
+ ['a250e12f1ab4c18796fdaff5b0ba8968']='python.d/varnish.conf'
+ ['a2944a309f8ce1a3195451856478d6ae']='python.d.conf'
+ ['a2a647dc492dc2d6ed1f5c0fdc97a96e']='python.d/mongodb.conf'
+ ['a305b400378d6492efd15f9940c2779b']='health.d/softnet.conf'
+ ['a41885acf112563e3446f9d937362c9b']='python.d/chrony.conf'
+ ['a4407787e4beb23a701a8a614dca461d']='health.d/disks.conf'
+ ['a44899a5795bed2863c1d11aa3e85586']='health.d/swap.conf'
+ ['a4a8660728c6afcb528cc6b378897d6b']='health.d/squid.conf'
+ ['a4be524cc5b7192878c292a17c767c28']='health.d/redis.conf'
+ ['a4e8c35f8973049f4db5c8900e9a2354']='health_alarm_notify.conf'
+ ['a5114d5b0d3816dba75024b9444f4b40']='health.d/disks.conf'
+ ['a5134d7cfbe27f5791e788c2add51abb']='apps_groups.conf'
+ ['a55133f1b0be0a4255057849dd451b09']='health_alarm_notify.conf'
+ ['a6d5ce2572bf7a1dce9e545fcd29273e']='health.d/apache.conf'
+ ['a70e14bda17b076d2486232355652ae6']='apps_groups.conf'
+ ['a71d9082410200bf92e823675d78121c']='python.d/retroshare.conf'
+ ['a731b7b164f42717c1c9a778ee637ff3']='health.d/memcached.conf'
+ ['a7320c6f26191b9599ec3bc4be007a93']='health.d/swap.conf'
+ ['a752e51d923e15add4a11fa8f3be935a']='health_email_recipients.conf'
+ ['a78d59c2ad14a17b9b8c7fa5d796b427']='python.d.conf'
+ ['a7cceeafb1e6ef1ead503ab65f687902']='apps_groups.conf'
+ ['a8167dafeac0b66696a1d9b08e815cda']='health.d/disks.conf'
+ ['a837986be634fd7648bcdf939019424a']='apps_groups.conf'
+ ['a89c516a1144435a88decf25509318ac']='health_alarm_notify.conf'
+ ['a8bb4e1d0525f59692778ad8f675a77a']='python.d/example.conf'
+ ['a8feb36776005bf419c90278787a1be8']='health.d/entropy.conf'
+ ['a9150a0c61e1b360cf8c265ea2413d02']='python.d/couchdb.conf'
+ ['a94af1c808aafdf00537d85ff2197ec8']='python.d/exim.conf'
+ ['a9827518560ae7e811ef74e08cd4d3a6']='charts.d/load_average.conf'
+ ['a9ab68845db2fb695b7060273a6ac68e']='health_alarm_notify.conf'
+ ['a9cd91675467c5426f5b51c47602c889']='apps_groups.conf'
+ ['aa4bee249bfc0c4a88ac8c2ffb97aa0d']='health.d/squid.conf'
+ ['aa620b7017c8b864d80aa6c8acab01cf']='python.d/smartd_log.conf'
+ ['aa6c4a270e6276f2deddf127ee1a24f6']='statsd.d/example.conf'
+ ['aa8b57a733c2035917acf81a8ebdfbe7']='health.d/haproxy.conf'
+ ['aac44691a1cf95fa8f8990a79bab4ce1']='python.d/web_log.conf'
+ ['ab3902bf769ed35219691c95a3954ebb']='python.d/portcheck.conf'
+ ['abaf2e021f9f6ee5d1c4e4726f47348e']='health.d/ipc.conf'
+ ['abe1a80ac6d6f97bd324e72f31e8256e']='health.d/ram.conf'
+ ['ac8a91f0297bf7ebb8970f8cae4b3477']='health.d/ipc.conf'
+ ['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf'
+ ['ad15b251b93f8b16bb33ec508f44a598']='health.d/netfilter.conf'
+ ['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf'
+ ['adf69efd83cb5079d0a5746e3568032f']='charts.d/exim.conf'
+ ['ae5ac0a3521e50aa6f6eda2a330b4075']='python.d/example.conf'
+ ['aee501b7f9b122b962521c45893371bb']='python.d/smartd_log.conf'
+ ['af12051cf57dd4e484ef8e64502b7549']='health.d/net.conf'
+ ['af14667ee7993acea810f6d50923bdc9']='health.d/web_log.conf'
+ ['af44cc53aa2bc5cc8935667119567522']='python.d.conf'
+ ['afdae4646c755ff2d117527fbf761c8e']='health.d/disks.conf'
+ ['b06d1063bc2200bb2d864021fa1a9cbd']='python.d.conf'
+ ['b07eebc6f58d19721ac069171b911d2a']='health_alarm_notify.conf'
+ ['b0c59b2bd7a10f6a3f2be6b4b27857db']='health.d/haproxy.conf'
+ ['b0f0a0ac415e4b1a82187b80d211e83b']='python.d/mysql.conf'
+ ['b181dcca01a258d9792ad703583baed2']='statsd.d/example.conf'
+ ['b185914d4f795e1732273dc4c7a35845']='health.d/memory.conf'
+ ['b210982cac9accfe43173cef5f46b361']='health.d/beanstalkd.conf'
+ ['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
+ ['b28c77dceeb398ca4ceec44c646f5431']='stream.conf'
+ ['b32164929eda7449a9677044e11151bf']='python.d.conf'
+ ['b3d48935ab7f44a57d40ad349df0033d']='python.d/postgres.conf'
+ ['b3fc4749b132e55ac0d3a0f92859237e']='health.d/tcp_resets.conf'
+ ['b44e33ba5c7a7306b467ac9c9b698895']='health.d/bcache.conf'
+ ['b4825f731cc7eb03b374eade14a453c1']='health.d/net.conf'
+ ['b5246eed059e33e0903a819fa5460ce0']='python.d/ipfs.conf'
+ ['b544e5934ac79a9548b5af6756c042a6']='apps_groups.conf'
+ ['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf'
+ ['b636e5e603f9d93e52c7577ac8c6bf0c']='health.d/entropy.conf'
+ ['b68706bb8101ef85192db92f865a5d80']='health_alarm_notify.conf'
+ ['b6ee82968de8fbf974c0d35b55fe6fae']='python.d/web_log.conf'
+ ['b735732fbe993d8191d6b3317082efa2']='health.d/qos.conf'
+ ['b75e2d3e69c1fe89c2f900bc201f7390']='health_alarm_notify.conf'
+ ['b7d769ce86a7aebba01315da5c0799e6']='health.d/ram.conf'
+ ['b81b8f331161b0d48e03f6fbf6b6d062']='health.d/memcached.conf'
+ ['b846ca1f99fa6a65303b58186f47d7a4']='python.d/squid.conf'
+ ['b854fcb711ee4d052741de5fc888682e']='health.d/backend.conf'
+ ['b8969be5b3ceb4a99477937119bd4323']='python.d.conf'
+ ['b8aff60806fb6829a4e72a824e655375']='health.d/beanstalkd.conf'
+ ['b8b87574fd496a66ede884c5336493bd']='python.d/phpfpm.conf'
+ ['b8ca1449d142b7f1cd202d875d400882']='health.d/apcupsd.conf'
+ ['b915126262d08aa9da81de539a58a3fb']='python.d/redis.conf'
+ ['ba11ea2d2f632b2de4b1224bcdc54f07']='python.d/smartd_log.conf'
+ ['bb51112d01ff20053196a57632df8962']='apps_groups.conf'
+ ['bba2f3886587f137ea08a6e63dd3d376']='python.d.conf'
+ ['bcaba2347951b301127fd502a219b26a']='python.d/apache.conf'
+ ['bcd94c4fa2f89c710ff807de061ab11c']='health.d/net.conf'
+ ['bd12233b529e3066d5b4a78da20c495e']='python.d/ntpd.conf'
+ ['bda5517ea01640cfdfa0a27549619d6a']='health.d/memcached.conf'
+ ['bdec19a255367f22b6fb652d0bef6bad']='python.d/httpcheck.conf'
+ ['bf66f113b2dd8d8fb444cbd5650f284c']='health_alarm_notify.conf'
+ ['bfa2f469e83cf2961963841e143049e6']='health.d/tcp_listen.conf'
+ ['bfd35a87c77c3a1dbe218fd02b529208']='charts.d/example.conf'
+ ['bff38dfe6c879f93ac49b77990fce1cc']='python.d/ipfs.conf'
+ ['c004430f55310ae9ed489c4905ed02cb']='charts.d/apache.conf'
+ ['c0385cdf9e87aca01f5dee2a5d89c467']='health_alarm_notify.conf'
+ ['c080e006f544c949baca33cc24a9c126']='health_alarm_notify.conf'
+ ['c0c4c63384ef408f0715331e7615aa60']='python.d/ceph.conf'
+ ['c132d2e257fc4df2925be7ad75100d5b']='health.d/entropy.conf'
+ ['c1a7e634b5b8aad523a0d115a93379cd']='health.d/memcached.conf'
+ ['c1d014ffaebfa0952968aeaf330e5337']='python.d.conf'
+ ['c30ee008173ba9f77adfcacbf138143e']='python.d/ovpn_status_log.conf'
+ ['c3296c08260bcd556e74711c820817be']='health.d/cpu.conf'
+ ['c3661b68232e06de90bb5e63e725b8b6']='health_alarm_notify.conf'
+ ['c45ab106725e94615bccf8be4b136d0f']='python.d.conf'
+ ['c482676558420c4b47162651a24b8baf']='python.d/httpcheck.conf'
+ ['c4f203b4b12c40640dd578af16a49bb1']='health.d/portcheck.conf'
+ ['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
+ ['c6403d8b1bcfa52d3abb941be155fc03']='python.d.conf'
+ ['c6b9f31e14adca433f82054f62388c47']='python.d/web_log.conf'
+ ['c84fd3292710091802e443c8e688dee1']='health_alarm_notify.conf'
+ ['c878060687b85c46006e9041f3632d88']='health_alarm_notify.conf'
+ ['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
+ ['c8e339491a83df22decbdf5f1f8a037f']='python.d.conf'
+ ['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf'
+ ['c9a16df512b4a9ce7fa65f5a69bda20a']='python.d/web_log.conf'
+ ['c9b792755de59d842ba95f8c315d94c8']='health.d/swap.conf'
+ ['c9d102c49da0cb57886c42d1016fa163']='python.d/httpcheck.conf'
+ ['ca026d7c779f0a7cb7787713c5be5c47']='charts.d.conf'
+ ['ca08a9b18d38ae0a0f5081a7cdc96863']='health.d/swap.conf'
+ ['ca0eb92bdd3de67582ea6db37462895f']='health.d/tcp_resets.conf'
+ ['ca249db7a0637d55abb938d969f9b486']='python.d/postfix.conf'
+ ['ca761cbf8a28317abe526ab3c2428472']='health.d/portcheck.conf'
+ ['ca9e52b3ee3c71d3d042dc531753a1fd']='apps_groups.conf'
+ ['cad263c67f779029a663b620d6c34704']='charts.d/libreswan.conf'
+ ['cb178b15427274d7def5b14bc4c09441']='health.d/net.conf'
+ ['cb60badf376d246ad8ec9d3f524db430']='health.d/disks.conf'
+ ['cb7f80cd2768c649d7448e01f8aa6579']='python.d.conf'
+ ['cc4d31a0d1ff9c339892c1f8a0c5fcd3']='charts.d/load_average.conf'
+ ['cca26b4d2384043f1737e0ed4a995600']='python.d/bind_rndc.conf'
+ ['ccde91d209aeb02c4a6be0e43a8d92b3']='health.d/apache.conf'
+ ['cce5176664d29d137fa7575b77de01e4']='health.d/tcp_resets.conf'
+ ['cd08e5534c94bf1f2cd28396c76b8bbc']='health.d/ram.conf'
+ ['cd15a9a77a46d66ca0beb55f2acb7538']='health.d/mysql.conf'
+ ['cd9a7de356d6424c4a71d87053726c86']='python.d/bind_rndc.conf'
+ ['cdd504812ff93073c57d02209d4d0f69']='health.d/cpu.conf'
+ ['cde652b15742e377e98e79fb9eb2acab']='health_alarm_notify.conf'
+ ['ce0fa3485a0d8d3aa80b25ab0c70cc5a']='charts.d/apcupsd.conf'
+ ['ce2e8768964a936f58c4c2144aee8a01']='health_alarm_notify.conf'
+ ['ce3b65eac6c472b21905f7f72104f4c9']='python.d/nginx.conf'
+ ['ce937f8b9ab7820b61ce9fcde6b946e8']='charts.d/nut.conf'
+ ['cf2c9096b3a8c506a3ec76fa52574395']='charts.d/phpfpm.conf'
+ ['cf46545065f7698c4d529fdc77955274']='python.d/puppet.conf'
+ ['cf48dfd828af70bea04db7a809f94358']='health.d/haproxy.conf'
+ ['cf8b87ede2d3233b6f55f4690af7fb08']='python.d/smartd_log.conf'
+ ['cfecf298bdafaa7e0a3a263548e82132']='python.d/sensors.conf'
+ ['d11711b3647bc2bdd0292dd7deebbeb1']='health.d/net.conf'
+ ['d1596fe068c8674efade49a4a8e22b5d']='health.d/isc_dhcpd.conf'
+ ['d162b7465a56151312e60151c1d74fba']='health.d/squid.conf'
+ ['d1e79707cd9b51a14288e8dd40694fcc']='fping.conf'
+ ['d297104e43ce2b544003271181e26ff6']='python.d/cpufreq.conf'
+ ['d29c5fa5faf74b86d01c2270a79388d8']='health.d/disks.conf'
+ ['d2b2ad30e277a69d8713e620dabc18bc']='python.d/phpfpm.conf'
+ ['d3bccdfe06c099673592d5375994c329']='charts.d/hddtemp.conf'
+ ['d3f397ead7f2ac8f88a99d7c5b8cff1d']='python.d/dovecot.conf'
+ ['d41d8cd98f00b204e9800998ecf8427e']='python.d/portcheck.conf'
+ ['d4adcebadc4c86332df247922b85aadc']='python.d/freeradius.conf'
+ ['d54f9652d6510d04339682d97cd7b6e4']='python.d/httpcheck.conf'
+ ['d55bdb83b9ff606852f6a97c1430258c']='health.d/ram.conf'
+ ['d55be5bb5e108da1e7645da007c53cd4']='python.d.conf'
+ ['d56c28ece8354850011f213d94d02fe0']='python.d/hddtemp.conf'
+ ['d5dab509d8792f795bece27de39dd476']='health.d/mysql.conf'
+ ['d69eba15d3e968187a938a7b98e22dda']='python.d.conf'
+ ['d6cd34c96e47a8a63732a6f1512f5c39']='python.d/ovpn_status_log.conf'
+ ['d712df81b17971884443a4a9bc996c9e']='health_alarm_notify.conf'
+ ['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf'
+ ['d7e0bd12d4a60a761dcab3531a841711']='python.d/phpfpm.conf'
+ ['d86e0502e394e0a16c0ca574db462653']='health.d/megacli.conf'
+ ['d8dc489e32f7114c6298fce94e86a8ef']='health.d/entropy.conf'
+ ['d9036091e2232fc2b8bfa8c7484dea28']='apps_groups.conf'
+ ['d9258e671d0d0b6498af1ce16ef030d2']='apps_groups.conf'
+ ['d9fa0290cdfe4153188bb52dd31191df']='apps_groups.conf'
+ ['da29d2ab1ab7b8fda189960c840e5144']='health.d/swap.conf'
+ ['dad303c5cca7a69345811a01a74f5892']='health.d/net.conf'
+ ['db305937e884c9f871bb076d5ed2946f']='health_alarm_notify.conf'
+ ['dc0d2b96378f290eec3fcf98b89ad824']='python.d/cpufreq.conf'
+ ['dc9c2a66778623a759706c14c3d91983']='health.d/net.conf'
+ ['dd220677c42c487549952048ee1f7750']='python.d/postgres.conf'
+ ['dd221c29dfb7c5586fc906748aa7c831']='health.d/tcp_listen.conf'
+ ['dd7764507804a2296bfd091a58ad4ad7']='health.d/memcached.conf'
+ ['dd8254ef74509a3e38cb2838e30f7e63']='health.d/disks.conf'
+ ['ddda2bb1c88be03b637d3285406f7910']='health.d/named.conf'
+ ['dddc4f93e6187fe4220eb6bf5e20f095']='health.d/ram.conf'
+ ['de02f899a61f21b86adb646940f0bcae']='health.d/net.conf'
+ ['de581daa11e267a583776bd8b8179884']='python.d/postgres.conf'
+ ['de5fe159e14b481d6bd69856eaddd242']='health_alarm_notify.conf'
+ ['def883f35986c9d25de63b1a8e7d0f46']='health.d/entropy.conf'
+ ['df381f3a7ca9fb2b4b43ae7cb7a4c492']='python.d/mysql.conf'
+ ['df7e8044902b5e155fad8430c2ddcfa8']='health.d/fping.conf'
+ ['dfd5431b11cf2f3852a40d390c1d5a92']='python.d/varnish.conf'
+ ['e0242003fd2e3f9ac1b9314e802ada79']='python.d/hddtemp.conf'
+ ['e0ba3bc216ffc9933b4741dbb6b1f8c8']='health.d/web_log.conf'
+ ['e0ffc0c34424b35666fddf7f61e05def']='health.d/tcp_resets.conf'
+ ['e100d98f3ed1eff59678f035b3b8daf2']='python.d/beanstalk.conf'
+ ['e12ab150198467aaa56b1091ba219587']='charts.d/nut.conf'
+ ['e1822c48067954e26649f7ad5fdb71f5']='health.d/softnet.conf'
+ ['e1a8bf99d36683c10225100f207a2b59']='python.d/web_log.conf'
+ ['e22f30680148a29d9738bd4bfe8b252c']='health_alarm_notify.conf'
+ ['e2e7adf66a28b8277f55e246b007f25a']='python.d/ntpd.conf'
+ ['e2f3388c06726154c10ec22bad5bc7ec']='fping.conf'
+ ['e3023092e3b2bbb5351e0fe6682f4fe9']='health_alarm_notify.conf'
+ ['e3112d8e06fa77888aab02e8fcd22e25']='apps_groups.conf'
+ ['e3996f70a4b09315b4a64e3df7d34d43']='python.d/rabbitmq.conf'
+ ['e3d100c2d0347c08efbf6245e05620c6']='python.d/fail2ban.conf'
+ ['e3e0c742427c9609ce923e845a0c8532']='health.d/ceph.conf'
+ ['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf'
+ ['e40947d22f7ed5359f12fc89e3512963']='python.d/dovecot.conf'
+ ['e445de5a4d6953bddec36d85b1b2771e']='python.d/linux_power_supply.conf'
+ ['e449e5582279742496550df14b6fca95']='health.d/entropy.conf'
+ ['e4ed13f996434ac17b40a2228c96283b']='python.d/tomcat.conf'
+ ['e5f32f54d6d6728f21f9ac26f37d6573']='python.d/example.conf'
+ ['e707ad89a146004ae281d66a4e01e5c1']='health.d/load.conf'
+ ['e70a7ee4999f30c6ceb75f31088a3a34']='python.d/powerdns.conf'
+ ['e734c5951a8764d4d9de046dd7cf7407']='health.d/softnet.conf'
+ ['e7ae3f2b00b9e5178acfe4f5e46228b7']='health.d/tcp_resets.conf'
+ ['e7bc22a1942cffbd2b1b0cfd119ee328']='health.d/ipfs.conf'
+ ['e8656d72dbd3b6fe603048ded751499a']='python.d/memcached.conf'
+ ['e8ec8046c7007af6ca3e8c51e62c99f8']='health.d/disks.conf'
+ ['ea031c1c0c36edee3bd08fae559c4203']='health_alarm_notify.conf'
+ ['ea1a96c42ad464c354fb250e3408c3e8']='stream.conf'
+ ['eaa7beb935cae9c48a40fb934eb105a7']='health.d/web_log.conf'
+ ['eb5168f0b516bc982aac45e59da6e52e']='health.d/nginx.conf'
+ ['eb748d6fb69d11b0d29c5794657e206c']='health.d/qos.conf'
+ ['eb9fedc3c1dface77312d9bf48f673a8']='stream.conf'
+ ['ebd0612ccc5807524ebb2b647e3e56c9']='apps_groups.conf'
+ ['eca875b2e4402ee07972589bad003e01']='python.d/traefik.conf'
+ ['ecb6c01fae255d369748406945a50435']='apps_groups.conf'
+ ['ecd3aa97e2581f88eb466d6612690ef2']='charts.d/nginx.conf'
+ ['ed43efac299c31f8fd5e2abccff30071']='python.d/samba.conf'
+ ['ed80e6b2cfc8b08adea7027fc03daa68']='python.d.conf'
+ ['edb48efc8f446624001e07d04f6cad1a']='apps_groups.conf'
+ ['ee5343881744e6a97e6ee5cdd329cfb8']='health.d/retroshare.conf'
+ ['eee974cea7534aeed2d38bcf0edf3f9e']='python.d/springboot.conf'
+ ['ef067629c7456cb934f110ce15200131']='stream.conf'
+ ['ef1861bf5725d91e773cbdba05687597']='python.d.conf'
+ ['ef9916ea144878a9f37cbb6b1b29da10']='health.d/squid.conf'
+ ['f075be84c5bfac7e34de2a091841360c']='statsd.d/example.conf'
+ ['f0a86c5bae3c4b32b266dacbf74ca4a3']='python.d/web_log.conf'
+ ['f1446cb3f1a905ee06defa2aa15ee806']='python.d/web_log.conf'
+ ['f1682835e3414f60284c13bf1662e50f']='health.d/web_log.conf'
+ ['f1f114647ed185c4812c361b1d870b44']='python.d/sensors.conf'
+ ['f2622abcee86b514976a053b528553d4']='python.d/web_log.conf'
+ ['f2f1b8656f5011e965ac45b818cf668d']='apps_groups.conf'
+ ['f3c56a769ffdf811ec13467d8f7cd3c0']='python.d/apache.conf'
+ ['f42389e5497a28205ba6fef4f716db4f']='python.d/nginx.conf'
+ ['f42df9f13abfae2426519c6728b34882']='charts.d/example.conf'
+ ['f4609cbd5e748f41ad4f1ea3d2dcfde0']='python.d.conf'
+ ['f4c5d88c34d3fb853498124177cc77f1']='python.d.conf'
+ ['f5736e0b2945182cb659cb0713eff923']='apps_groups.conf'
+ ['f66e5236ba1245bb2e5fd99191f114c6']='charts.d/hddtemp.conf'
+ ['f68ac0fca6b4ffc96097779344cabac6']='health.d/tcp_listen.conf'
+ ['f6c6656f900ff52d159dca12d624016a']='python.d/postgres.conf'
+ ['f72e44a305567c9b21a244ebd6da6800']='health_alarm_notify.conf'
+ ['f7401a6e7c7d4fe2e0e2be7f7f523275']='health.d/web_log.conf'
+ ['f7a99e94231beda85c6254912d8d31c1']='python.d/tomcat.conf'
+ ['f82924563e41d99cdae5431f0af69155']='python.d.conf'
+ ['f8c30f22df92765e2c0fab3c8174e2fc']='health.d/memcached.conf'
+ ['f8dade4484f1b6a48655388502df7d5a']='health_alarm_notify.conf'
+ ['f8e7d23a83fc8ee58f403da7bdbe7f8a']='python.d/phpfpm.conf'
+ ['f96acba4b14b0c1b50d0187a04416151']='health_alarm_notify.conf'
+ ['f9be549a849d023595d19d5d74263e0f']='health.d/tcp_resets.conf'
+ ['fa4396513b358d6ec6a7f5bfb08439b8']='health.d/net.conf'
+ ['fb1c75159f855f4b4671835f5bca1ef6']='python.d.conf'
+ ['fbdb6f5d3906d3d8ea4e28f6ba6965a6']='python.d/go_expvar.conf'
+ ['fc11bd9255ac382f442f31c1f1a32532']='health_alarm_notify.conf'
+ ['fc40b83f173bc4676d686867a8369a62']='python.d/dns_query_time.conf'
+ ['fc64f44eb19a8b5a88ba8dc28de355f6']='python.d/puppet.conf'
+ ['fc987459f82e251e31c41d822e5e8202']='python.d/nsd.conf'
+ ['fd3164e6e8cb6726706267eae49aa082']='health_alarm_notify.conf'
+ ['fdd11640ba626cc2064c2fe3ea3eee4c']='health.d/cpu.conf'
+ ['fde44f62c8d7e52f09705cd273fae6b1']='charts.d/tomcat.conf'
+ ['fdea185e0e52b459b48852aa37f20e0f']='apps_groups.conf'
+ ['fe069e4d6579ecdda7f36ac2318ffefc']='python.d/exim.conf'
+ ['fe2b15369de13b83b18e2ff5c7594a57']='python.d/monit.conf'
+ ['fe478efe2e721724edb1fe2ef1addf93']='health_alarm_notify.conf'
+ ['feb8bcf828aa2529a7ee4a140feeb12d']='health.d/net.conf'
+ ['ff1b3d8ae8b2149c711d8da9b7a9c4bd']='health_alarm_notify.conf'
+ ['ff3f0a9b1bf488a5075850cc16de3c26']='python.d/monit.conf'
+ ['ff940c5396f16d05deb5c5859832ee48']='health.d/swap.conf'
+)
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..252cd3d
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,1625 @@
+#
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+AC_PREREQ(2.60)
+
+# We do not use m4_esyscmd_s to support older autoconf.
+define([VERSION_STRING], m4_esyscmd([git describe 2>/dev/null | tr -d '\n']))
+define([VERSION_FROM_FILE], m4_esyscmd([cat packaging/version | tr -d '\n']))
+m4_ifval(VERSION_STRING, [], [define([VERSION_STRING], VERSION_FROM_FILE)])
+
+AC_INIT([netdata], VERSION_STRING[])
+
+AM_MAINTAINER_MODE([disable])
+if test x"$USE_MAINTAINER_MODE" = xyes; then
+AC_MSG_NOTICE(***************** MAINTAINER MODE *****************)
+fi
+
+PACKAGE_RPM_VERSION="VERSION_STRING"
+AC_SUBST([PACKAGE_RPM_VERSION])
+
+# -----------------------------------------------------------------------------
+# autoconf initialization
+
+AC_CONFIG_AUX_DIR([.])
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_MACRO_DIR([build/m4])
+AC_CONFIG_SRCDIR([daemon/main.c])
+define([AUTOMATE_INIT_OPTIONS], [tar-pax subdir-objects])
+m4_ifdef([AM_SILENT_RULES], [
+ define([AUTOMATE_INIT_OPTIONS], [tar-pax silent-rules subdir-objects])
+ ])
+AM_INIT_AUTOMAKE(AUTOMATE_INIT_OPTIONS)
+m4_ifdef([AM_SILENT_RULES], [
+ AM_SILENT_RULES([yes])
+ ])
+AC_CANONICAL_HOST
+AC_PROG_CC
+AC_PROG_CC_C99
+AM_PROG_CC_C_O
+AC_PROG_CXX
+AC_PROG_INSTALL
+PKG_PROG_PKG_CONFIG
+AC_USE_SYSTEM_EXTENSIONS
+
+# -----------------------------------------------------------------------------
+# configurable options
+
+AC_ARG_ENABLE(
+ [plugin-nfacct],
+ [AS_HELP_STRING([--enable-plugin-nfacct], [enable nfacct plugin @<:@default autodetect@:>@])],
+ ,
+ [enable_plugin_nfacct="detect"]
+)
+AC_ARG_ENABLE(
+ [plugin-freeipmi],
+ [AS_HELP_STRING([--enable-plugin-freeipmi], [enable freeipmi plugin @<:@default autodetect@:>@])],
+ ,
+ [enable_plugin_freeipmi="detect"]
+)
+AC_ARG_ENABLE(
+ [plugin-cups],
+ [AS_HELP_STRING([--enable-plugin-cups], [enable cups plugin @<:@default autodetect@:>@])],
+ ,
+ [enable_plugin_cups="detect"]
+)
+AC_ARG_ENABLE(
+ [plugin-xenstat],
+ [AS_HELP_STRING([--enable-plugin-xenstat], [enable xenstat plugin @<:@default autodetect@:>@])],
+ ,
+ [enable_plugin_xenstat="detect"]
+)
+AC_ARG_ENABLE(
+ [backend-kinesis],
+ [AS_HELP_STRING([--enable-backend-kinesis], [enable kinesis backend @<:@default autodetect@:>@])],
+ ,
+ [enable_backend_kinesis="detect"]
+)
+AC_ARG_ENABLE(
+ [exporting-pubsub],
+ [AS_HELP_STRING([--enable-exporting-pubsub], [enable pubsub exporting connector @<:@default autodetect@:>@])],
+ ,
+ [enable_exporting_pubsub="detect"]
+)
+AC_ARG_ENABLE(
+ [backend-prometheus-remote-write],
+ [AS_HELP_STRING([--enable-backend-prometheus-remote-write], [enable prometheus remote write backend @<:@default autodetect@:>@])],
+ ,
+ [enable_backend_prometheus_remote_write="detect"]
+)
+AC_ARG_ENABLE(
+ [backend-mongodb],
+ [AS_HELP_STRING([--enable-backend-mongodb], [enable mongodb backend @<:@default autodetect@:>@])],
+ ,
+ [enable_backend_mongodb="detect"]
+)
+AC_ARG_ENABLE(
+ [pedantic],
+ [AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])],
+ ,
+ [enable_pedantic="no"]
+)
+AC_ARG_ENABLE(
+ [accept4],
+ [AS_HELP_STRING([--disable-accept4], [System does not have accept4 @<:@default autodetect@:>@])],
+ ,
+ [enable_accept4="detect"]
+)
+AC_ARG_WITH(
+ [webdir],
+ [AS_HELP_STRING([--with-webdir], [location of webdir @<:@PKGDATADIR/web@:>@])],
+ [webdir="${withval}"],
+ [webdir="\$(pkgdatadir)/web"]
+)
+AC_ARG_WITH(
+ [libcap],
+ [AS_HELP_STRING([--with-libcap], [build with libcap @<:@default autodetect@:>@])],
+ ,
+ [with_libcap="detect"]
+)
+AC_ARG_WITH(
+ [zlib],
+ [AS_HELP_STRING([--without-zlib], [build without zlib @<:@default enabled@:>@])],
+ ,
+ [with_zlib="yes"]
+)
+AC_ARG_WITH(
+ [math],
+ [AS_HELP_STRING([--without-math], [build without math @<:@default enabled@:>@])],
+ ,
+ [with_math="yes"]
+)
+AC_ARG_WITH(
+ [user],
+ [AS_HELP_STRING([--with-user], [use this user to drop privilege @<:@default nobody@:>@])],
+ ,
+ [with_user="nobody"]
+)
+AC_ARG_ENABLE(
+ [x86-sse],
+ [AS_HELP_STRING([--disable-x86-sse], [SSE/SS2 optimizations on x86 @<:@default enabled@:>@])],
+ ,
+ [enable_x86_sse="yes"]
+)
+AC_ARG_ENABLE(
+ [lto],
+ [AS_HELP_STRING([--disable-lto], [Link Time Optimizations @<:@default autodetect@:>@])],
+ ,
+ [enable_lto="detect"]
+)
+AC_ARG_ENABLE(
+ [https],
+ [AS_HELP_STRING([--enable-https], [Enable SSL support @<:@default autodetect@:>@])],
+ ,
+ [enable_https="detect"]
+)
+AC_ARG_ENABLE(
+ [dbengine],
+ [AS_HELP_STRING([--disable-dbengine], [disable netdata dbengine @<:@default autodetect@:>@])],
+ ,
+ [enable_dbengine="detect"]
+)
+AC_ARG_ENABLE(
+ [jsonc],
+ [AS_HELP_STRING([--enable-jsonc], [Enable JSON-C support @<:@default autodetect@:>@])],
+ ,
+ [enable_jsonc="detect"]
+)
+AC_ARG_ENABLE(
+ [ebpf],
+ [AS_HELP_STRING([--disable-ebpf], [Disable eBPF support @<:@default autodetect@:>@])],
+ ,
+ [enable_ebpf="detect"]
+)
+AC_ARG_WITH(
+ [bundled-lws],
+ [AS_HELP_STRING([--with-bundled-lws=DIR], [Use a specific Libwebsockets static library @<:@default use system library@:>@])],
+ [
+ with_bundled_lws="yes"
+ bundled_lws_dir="${withval}"
+ ],
+ [with_bundled_lws="no"]
+)
+
+# -----------------------------------------------------------------------------
+# Enforce building with C99, bail early if we can't.
+test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata rquires a compiler that supports C99 to build])
+
+# -----------------------------------------------------------------------------
+# Check if cloud is enabled and if the functionality is available
+
+AC_ARG_ENABLE(
+ [cloud],
+ [AS_HELP_STRING([--disable-cloud],
+ [Disables all cloud functionality])],
+ [ enable_cloud="$enableval" ],
+ [ enable_cloud="detect" ]
+)
+
+if test "${enable_cloud}" = "no"; then
+ AC_DEFINE([DISABLE_CLOUD], [1], [disable netdata cloud functionality])
+fi
+
+# -----------------------------------------------------------------------------
+# netdata required checks
+
+# fails on centos6
+#AX_CHECK_ENABLE_DEBUG()
+
+AX_GCC_FUNC_ATTRIBUTE([returns_nonnull])
+AX_GCC_FUNC_ATTRIBUTE([malloc])
+AX_GCC_FUNC_ATTRIBUTE([noreturn])
+AX_GCC_FUNC_ATTRIBUTE([noinline])
+AX_GCC_FUNC_ATTRIBUTE([format])
+AX_GCC_FUNC_ATTRIBUTE([warn_unused_result])
+
+AC_CHECK_TYPES([struct timespec, clockid_t], [], [], [[#include <time.h>]])
+AC_SEARCH_LIBS([clock_gettime], [rt posix4])
+AC_CHECK_FUNCS([clock_gettime])
+AC_CHECK_FUNCS([sched_setscheduler sched_getscheduler sched_getparam sched_get_priority_min sched_get_priority_max getpriority setpriority nice])
+AC_CHECK_FUNCS([recvmmsg])
+
+AC_TYPE_INT8_T
+AC_TYPE_INT16_T
+AC_TYPE_INT32_T
+AC_TYPE_INT64_T
+AC_TYPE_UINT8_T
+AC_TYPE_UINT16_T
+AC_TYPE_UINT32_T
+AC_TYPE_UINT64_T
+AC_C_INLINE
+AC_FUNC_STRERROR_R
+AC_C__GENERIC
+AC_C___ATOMIC
+# AC_C_STMT_EXPR
+AC_CHECK_SIZEOF([void *])
+AC_CANONICAL_HOST
+AC_HEADER_MAJOR
+AC_HEADER_RESOLV
+
+AC_CHECK_HEADERS_ONCE([sys/prctl.h])
+AC_CHECK_HEADERS_ONCE([sys/vfs.h])
+AC_CHECK_HEADERS_ONCE([sys/statfs.h])
+AC_CHECK_HEADERS_ONCE([sys/statvfs.h])
+AC_CHECK_HEADERS_ONCE([sys/mount.h])
+
+if test "${enable_accept4}" != "no"; then
+ AC_CHECK_FUNCS_ONCE(accept4)
+fi
+
+# -----------------------------------------------------------------------------
+# operating system detection
+
+AC_MSG_CHECKING([operating system])
+case "$host_os" in
+freebsd*)
+ build_target=freebsd
+ build_target_id=2
+ CFLAGS="${CFLAGS} -I/usr/local/include -L/usr/local/lib"
+ ;;
+darwin*)
+ build_target=macos
+ build_target_id=3
+ LDFLAGS="${LDFLAGS} -framework CoreFoundation -framework IOKit"
+ ;;
+*)
+ build_target=linux
+ build_target_id=1
+ ;;
+esac
+
+AM_CONDITIONAL([FREEBSD], [test "${build_target}" = "freebsd"])
+AM_CONDITIONAL([MACOS], [test "${build_target}" = "macos"])
+AM_CONDITIONAL([LINUX], [test "${build_target}" = "linux"])
+AC_MSG_RESULT([${build_target} with id ${build_target_id}])
+
+
+# -----------------------------------------------------------------------------
+# pthreads
+
+ACX_PTHREAD(, [AC_MSG_ERROR([Cannot initialize pthread environment])])
+LIBS="${PTHREAD_LIBS} ${LIBS}"
+CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS}"
+CC="${PTHREAD_CC}"
+
+AC_CHECK_LIB(
+[pthread],
+[pthread_getname_np],
+[AC_DEFINE([HAVE_PTHREAD_GETNAME_NP], [1], [Is set if pthread_getname_np is available])]
+)
+
+
+# -----------------------------------------------------------------------------
+# libm
+
+AC_ARG_VAR([MATH_CFLAGS], [C compiler flags for math])
+AC_ARG_VAR([MATH_LIBS], [linker flags for math])
+if test -z "${MATH_LIBS}"; then
+ AC_CHECK_LIB(
+ [m],
+ [sin],
+ [MATH_LIBS="-lm"]
+ )
+fi
+test "${with_math}" = "yes" -a -z "${MATH_LIBS}" && AC_MSG_ERROR([math required but not found])
+
+AC_MSG_CHECKING([if libm should be used])
+if test "${with_math}" != "no" -a ! -z "${MATH_LIBS}"; then
+ with_math="yes"
+ AC_DEFINE([STORAGE_WITH_MATH], [1], [math usability])
+ OPTIONAL_MATH_CFLAGS="${MATH_CFLAGS}"
+ OPTIONAL_MATH_LIBS="${MATH_LIBS}"
+else
+ with_math="no"
+fi
+AC_MSG_RESULT([${with_math}])
+
+
+# -----------------------------------------------------------------------------
+# libuv multi-platform support library with a focus on asynchronous I/O
+# TODO: check version, uv_fs_scandir_next only available in version >= 1.0
+
+AC_CHECK_LIB(
+ [uv],
+ [uv_fs_scandir_next],
+ [UV_LIBS="-luv"]
+)
+test -z "${UV_LIBS}" && \
+ AC_MSG_ERROR([libuv required but not found. Try installing 'libuv1-dev' or 'libuv-devel'.])
+OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
+OPTIONAL_UV_LIBS="${UV_LIBS}"
+
+
+# -----------------------------------------------------------------------------
+# lz4 Extremely Fast Compression algorithm
+
+AC_CHECK_LIB(
+ [lz4],
+ [LZ4_compress_default],
+ [LZ4_LIBS="-llz4"]
+)
+
+
+# -----------------------------------------------------------------------------
+# zlib
+
+PKG_CHECK_MODULES(
+ [ZLIB],
+ [zlib],
+ [have_zlib=yes],
+ [have_zlib=no]
+)
+test "${with_zlib}" = "yes" -a "${have_zlib}" != "yes" && AC_MSG_ERROR([zlib required but not found. Try installing 'zlib1g-dev' or 'zlib-devel'.])
+
+AC_MSG_CHECKING([if zlib should be used])
+if test "${with_zlib}" != "no" -a "${have_zlib}" = "yes"; then
+ with_zlib="yes"
+ AC_DEFINE([NETDATA_WITH_ZLIB], [1], [zlib usability])
+ OPTIONAL_ZLIB_CFLAGS="${ZLIB_CFLAGS}"
+ OPTIONAL_ZLIB_LIBS="${ZLIB_LIBS}"
+else
+ with_zlib="no"
+fi
+AC_MSG_RESULT([${with_zlib}])
+
+
+# -----------------------------------------------------------------------------
+# libuuid
+
+PKG_CHECK_MODULES(
+ [UUID],
+ [uuid],
+ [have_uuid=yes],
+ [AC_MSG_ERROR([libuuid required but not found. Try installing 'uuid-dev' or 'libuuid-devel'.])]
+)
+AC_DEFINE([NETDATA_WITH_UUID], [1], [uuid usability])
+OPTIONAL_UUID_CFLAGS="${UUID_CFLAGS}"
+OPTIONAL_UUID_LIBS="${UUID_LIBS}"
+
+# -----------------------------------------------------------------------------
+# OpenSSL Cryptography and SSL/TLS Toolkit
+
+AC_CHECK_LIB(
+ [crypto],
+ [SHA256_Init],
+ [SSL_LIBS="-lcrypto -lssl"]
+)
+
+AC_CHECK_LIB(
+ [crypto],
+ [X509_VERIFY_PARAM_set1_host],
+ [ssl_host_validation="yes"],
+ [ssl_host_validation="no"]
+)
+
+test -z "${SSL_LIBS}" || \
+ AC_DEFINE([HAVE_CRYPTO], [1], [libcrypto availability])
+
+if test "${ssl_host_validation}" = "no"; then
+ AC_DEFINE([HAVE_X509_VERIFY_PARAM_set1_host], [0], [ssl host validation])
+ AC_MSG_WARN([DISABLING SSL HOSTNAME VALIDATION BECAUSE IT IS NOT AVAILABLE ON THIS SYSTEM.])
+else
+ AC_DEFINE([HAVE_X509_VERIFY_PARAM_set1_host], [1], [ssl host validation])
+fi
+
+# -----------------------------------------------------------------------------
+# JSON-C library
+
+PKG_CHECK_MODULES([JSON],[json-c],AC_CHECK_LIB(
+ [json-c],
+ [json_object_get_type],
+ [JSONC_LIBS="-ljson-c"]),AC_CHECK_LIB(
+ [json],
+ [json_object_get_type],
+ [JSONC_LIBS="-ljson"])
+ )
+
+OPTIONAL_JSONC_LIBS="${JSONC_LIBS}"
+
+# -----------------------------------------------------------------------------
+# DB engine and HTTPS
+test "${enable_dbengine}" = "yes" -a -z "${LZ4_LIBS}" && \
+ AC_MSG_ERROR([liblz4 required but not found. Try installing 'liblz4-dev' or 'lz4-devel'.])
+
+
+AC_ARG_WITH([libJudy],
+ [AS_HELP_STRING([--with-libJudy=PREFIX],[Use a specific Judy library (default is system-library)])],
+ [
+ libJudy_dir="$withval"
+ AC_MSG_CHECKING(for libJudy in $withval)
+ if test -f "${libJudy_dir}/libJudy.a" -a -f "${libJudy_dir}/Judy.h"; then
+ LIBS_BACKUP="${LIBS}"
+ LIBS="${libJudy_dir}/libJudy.a"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[#include "${libJudy_dir}/Judy.h"
+ int main (int argc, char **argv) {
+ Pvoid_t PJLArray = (Pvoid_t) NULL;
+ Word_t * PValue;
+ Word_t Index;
+ JLI(PValue, PJLArray, Index);
+ }]])],
+ [HAVE_libJudy_a="yes"],
+ [HAVE_libJudy_a="no"])
+ LIBS="${LIBS_BACKUP}"
+ JUDY_LIBS="${libJudy_dir}/libJudy.a"
+ JUDY_CFLAGS="-I${libJudy_dir}"
+ AC_MSG_RESULT([$HAVE_libJudy_a])
+ else
+ libjudy_dir=""
+ HAVE_libJudy_a="no"
+ AC_MSG_RESULT([$HAVE_libJudy_a])
+ fi
+ ],
+ [HAVE_libJudy_a="no"])
+
+if test "${HAVE_libJudy_a}" = "no"; then
+ AC_CHECK_LIB(
+ [Judy],
+ [JudyLIns],
+ [JUDY_LIBS="-lJudy"]
+ )
+fi
+
+test "${enable_dbengine}" = "yes" -a -z "${JUDY_LIBS}" && \
+ AC_MSG_ERROR([libJudy required but not found. Try installing 'libjudy-dev' or 'Judy-devel'.])
+
+test "${enable_https}" = "yes" -a -z "${SSL_LIBS}" && \
+ AC_MSG_ERROR([OpenSSL required for HTTPS but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
+
+test "${enable_dbengine}" = "yes" -a -z "${SSL_LIBS}" && \
+ AC_MSG_ERROR([OpenSSL required for DBENGINE but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
+
+AC_MSG_CHECKING([if netdata dbengine should be used])
+if test "${enable_dbengine}" != "no" -a "${UV_LIBS}" -a "${LZ4_LIBS}" -a "${JUDY_LIBS}" -a "${SSL_LIBS}"; then
+ enable_dbengine="yes"
+ AC_DEFINE([ENABLE_DBENGINE], [1], [netdata dbengine usability])
+ OPTIONAL_LZ4_CFLAGS="${LZ4_CFLAGS}"
+ OPTIONAL_LZ4_LIBS="${LZ4_LIBS}"
+ OPTIONAL_JUDY_CFLAGS="${JUDY_CFLAGS}"
+ OPTIONAL_JUDY_LIBS="${JUDY_LIBS}"
+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
+else
+ enable_dbengine="no"
+fi
+AC_MSG_RESULT([${enable_dbengine}])
+AM_CONDITIONAL([ENABLE_DBENGINE], [test "${enable_dbengine}" = "yes"])
+
+AC_MSG_CHECKING([if netdata https should be used])
+if test "${enable_https}" != "no" -a "${SSL_LIBS}"; then
+ enable_https="yes"
+ AC_DEFINE([ENABLE_HTTPS], [1], [netdata HTTPS usability])
+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
+else
+ enable_https="no"
+fi
+AC_MSG_RESULT([${enable_https}])
+AM_CONDITIONAL([ENABLE_HTTPS], [test "${enable_https}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# JSON-C
+
+if test "${enable_jsonc}" != "no" -a -z "${JSONC_LIBS}"; then
+ # Try and detect manual static build presence (from netdata-installer.sh)
+ AC_MSG_CHECKING([if statically built json-c is present])
+ HAVE_libjson_c_a="no"
+ if test -f "externaldeps/jsonc/libjson-c.a"; then
+ LIBS_BKP="${LIBS}"
+ LIBS="externaldeps/jsonc/libjson-c.a"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[#include "externaldeps/jsonc/json-c/json.h"
+ int main (int argc, char **argv) {
+ struct json_object *jobj;
+ char *str = "{ \"msg-type\": \"random\" }";
+ jobj = json_tokener_parse(str);
+ json_object_get_type(jobj);
+ }]])],
+ [HAVE_libjson_c_a="yes"],
+ [HAVE_libjson_c_a="no"])
+ LIBS="${LIBS_BKP}"
+ fi
+
+ if test "${HAVE_libjson_c_a}" = "yes"; then
+ AC_DEFINE([LINK_STATIC_JSONC], [1], [static json-c should be used])
+ JSONC_LIBS="static"
+ OPTIONAL_JSONC_STATIC_CFLAGS="-I externaldeps/jsonc"
+ fi
+ AC_MSG_RESULT([${HAVE_libjson_c_a}])
+fi
+AM_CONDITIONAL([LINK_STATIC_JSONC], [test "${JSONC_LIBS}" = "static"])
+
+test "${enable_jsonc}" = "yes" -a -z "${JSONC_LIBS}" && \
+ AC_MSG_ERROR([JSON-C required but not found. Try installing 'libjson-c-dev' or 'json-c'.])
+
+AC_MSG_CHECKING([if json-c should be used])
+if test "${enable_jsonc}" != "no" -a "${JSONC_LIBS}"; then
+ enable_jsonc="yes"
+ AC_DEFINE([ENABLE_JSONC], [1], [netdata json-c usability])
+else
+ enable_jsonc="no"
+fi
+AC_MSG_RESULT([${enable_jsonc}])
+AM_CONDITIONAL([ENABLE_JSONC], [test "${enable_jsonc}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# compiler options
+
+AC_ARG_VAR([SSE_CANDIDATE], [C compiler flags for SSE])
+AS_CASE([$host_cpu],
+ [i?86], [SSE_CANDIDATE="yes"]
+)
+AC_SUBST([SSE_CANDIDATE])
+if test "${SSE_CANDIDATE}" = "yes" -a "${enable_x86_sse}" = "yes"; then
+ opt="-msse2 -mfpmath=sse"
+ AX_CHECK_COMPILE_FLAG(${opt}, [CFLAGS="${CFLAGS} ${opt}"], [])
+fi
+
+if test "${GCC}" = "yes"; then
+ AC_DEFINE_UNQUOTED([likely(x)], [__builtin_expect(!!(x), 1)], [gcc branch optimization])
+ AC_DEFINE_UNQUOTED([unlikely(x)], [__builtin_expect(!!(x), 0)], [gcc branch optimization])
+else
+ AC_DEFINE_UNQUOTED([likely(x)], [(x)], [gcc branch optimization])
+ AC_DEFINE_UNQUOTED([unlikely(x)], [(x)], [gcc branch optimization])
+fi
+
+if test "${GCC}" = "yes"; then
+ AC_DEFINE([__always_unused], [__attribute__((unused))], [gcc unused attribute])
+ AC_DEFINE([__maybe_unused], [__attribute__((unused))], [gcc unused attribute])
+else
+ AC_DEFINE([__always_unused], [], [dummy unused attribute])
+ AC_DEFINE([__maybe_unused], [], [dummy unused attribute])
+fi
+
+if test "${enable_pedantic}" = "yes"; then
+ enable_strict="yes"
+ CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long"
+fi
+
+
+# -----------------------------------------------------------------------------
+# memory allocation library
+
+AC_MSG_CHECKING([for memory allocator])
+TS_CHECK_JEMALLOC
+if test "$has_jemalloc" = "1"; then
+ AC_DEFINE([ENABLE_JEMALLOC], [1], [compile and link with jemalloc])
+ AC_MSG_RESULT([jemalloc])
+else
+ TS_CHECK_TCMALLOC
+ if test "$has_tcmalloc" = "1"; then
+ AC_DEFINE([ENABLE_TCMALLOC], [1], [compile and link with tcmalloc])
+ AC_MSG_RESULT([tcmalloc])
+ else
+ AC_MSG_RESULT([system])
+ AC_C_MALLOPT
+ AC_C_MALLINFO
+ fi
+fi
+
+
+# -----------------------------------------------------------------------------
+# libcap
+
+PKG_CHECK_MODULES(
+ [LIBCAP],
+ [libcap],
+ [AC_CHECK_LIB([cap], [cap_get_proc, cap_set_proc],
+ [AC_CHECK_HEADER(
+ [sys/capability.h],
+ [have_libcap=yes],
+ [have_libcap=no]
+ )],
+ [have_libcap=no]
+ )],
+ [have_libcap=no]
+)
+test "${with_libcap}" = "yes" -a "${have_libcap}" != "yes" && AC_MSG_ERROR([libcap required but not found.])
+
+AC_MSG_CHECKING([if libcap should be used])
+if test "${with_libcap}" != "no" -a "${have_libcap}" = "yes"; then
+ with_libcap="yes"
+ AC_DEFINE([HAVE_CAPABILITY], [1], [libcap usability])
+ OPTIONAL_LIBCAP_CFLAGS="${LIBCAP_CFLAGS}"
+ OPTIONAL_LIBCAP_LIBS="${LIBCAP_LIBS}"
+else
+ with_libcap="no"
+fi
+AC_MSG_RESULT([${with_libcap}])
+AM_CONDITIONAL([ENABLE_CAPABILITY], [test "${with_libcap}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# ACLK
+
+AC_MSG_CHECKING([if cloud functionality should be enabled])
+AC_MSG_RESULT([${enable_cloud}])
+if test "$enable_cloud" != "no"; then
+ # just to have all messages that can fail ACLK build in one place
+ # so it is easier to see why it can't be built
+ if test -n "${SSL_LIBS}"; then
+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
+ else
+ AC_MSG_WARN([OpenSSL required for agent-cloud-link but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
+ fi
+
+ AC_MSG_CHECKING([if libmosquitto static lib is present (and builds)])
+ if test -f "externaldeps/mosquitto/libmosquitto.a"; then
+ LIBS_BKP="${LIBS}"
+ LIBS="externaldeps/mosquitto/libmosquitto.a ${OPTIONAL_SSL_LIBS} ${LIBS_BKP}"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[#include "externaldeps/mosquitto/mosquitto.h"
+ int main (int argc, char **argv) {
+ int m,mm,r;
+ mosquitto_lib_version(&m, &mm, &r);
+ }]])],
+ [HAVE_libmosquitto_a="yes"],
+ [HAVE_libmosquitto_a="no"])
+ LIBS="${LIBS_BKP}"
+ else
+ HAVE_libmosquitto_a="no"
+ AC_DEFINE([ACLK_NO_LIBMOSQ], [1], [Libmosquitto.a was not found during build.])
+ fi
+ AC_MSG_RESULT([${HAVE_libmosquitto_a}])
+
+ if test "${with_bundled_lws}" = "yes"; then
+ AC_MSG_CHECKING([if libwebsockets static lib is present])
+ if test -f "${bundled_lws_dir}/libwebsockets.a"; then
+ LWS_CFLAGS="-I ${bundled_lws_dir}/include"
+ OPTIONAL_LWS_LIBS="${bundled_lws_dir}/libwebsockets.a"
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([BUNDLED_LWS], [1], [using statically linked libwebsockets])
+ else
+ AC_DEFINE([ACLK_NO_LWS], [1], [libwebsockets.a was not found during build.])
+ # this should be error if installer ever changes default to system
+ # as currently this is default we prefer building netdata without ACLK
+ # instead of error fail
+ AC_MSG_RESULT([no])
+ AC_MSG_WARN([You required static libwebsockets to be used but we can't use it. Disabling ACLK])
+ fi
+ else
+ AC_CHECK_LIB([websockets],
+ [lws_get_vhost_by_name],
+ [OPTIONAL_LWS_LIBS="-lwebsockets"],
+ [AC_DEFINE([ACLK_NO_LWS], [1], [usable system libwebsockets was not found during build.])])
+ fi
+
+ if test "${build_target}" = "linux" -a "${enable_cloud}" != "no"; then
+ if test "${have_libcap}" = "yes" -a "${with_libcap}" = "no"; then
+ AC_MSG_ERROR([agent-cloud-link can't be built without libcap. Disable it by --disable-cloud or enable libcap])
+ fi
+ if test "${with_libcap}" = "yes"; then
+ LWS_CFLAGS+=" ${LIBCAP_CFLAGS}"
+ fi
+ fi
+
+ # next 2 lines are just to have info for ACLK dependencies in common place
+ AC_MSG_CHECKING([if json-c available for ACLK])
+ AC_MSG_RESULT([${enable_jsonc}])
+
+ test "${enable_cloud}" = "yes" -a "${enable_jsonc}" = "no" && \
+ AC_MSG_ERROR([You have asked for ACLK to be built but no json-c available. ACLK requires json-c])
+
+ AC_MSG_CHECKING([if netdata agent-cloud-link can be enabled])
+ if test "${HAVE_libmosquitto_a}" = "yes" -a -n "${OPTIONAL_LWS_LIBS}" -a -n "${SSL_LIBS}" -a "${enable_jsonc}" = "yes"; then
+ can_enable_aclk="yes"
+ else
+ can_enable_aclk="no"
+ fi
+ AC_MSG_RESULT([${can_enable_aclk}])
+
+ test "${enable_cloud}" = "yes" -a "${can_enable_aclk}" = "no" && \
+ AC_MSG_ERROR([User required agent-cloud-link but it can't be built!])
+
+ AC_MSG_CHECKING([if netdata agent-cloud-link should/will be enabled])
+ if test "${enable_cloud}" = "detect"; then
+ enable_aclk=$can_enable_aclk
+ else
+ enable_aclk=$enable_cloud
+ fi
+ AC_SUBST([can_enable_aclk])
+
+ if test "${enable_aclk}" = "yes"; then
+ AC_DEFINE([ENABLE_ACLK], [1], [netdata ACLK])
+ fi
+
+ AC_MSG_RESULT([${enable_aclk}])
+fi
+AC_SUBST([enable_cloud])
+AM_CONDITIONAL([ENABLE_ACLK], [test "${enable_aclk}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# apps.plugin
+
+AC_MSG_CHECKING([if apps.plugin should be enabled])
+if test "${build_target}" != "macos"; then
+ AC_DEFINE([ENABLE_APPS_PLUGIN], [1], [apps.plugin])
+ enable_plugin_apps="yes"
+else
+ enable_plugin_apps="no"
+fi
+AC_MSG_RESULT([${enable_plugin_apps}])
+AM_CONDITIONAL([ENABLE_PLUGIN_APPS], [test "${enable_plugin_apps}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# freeipmi.plugin - libipmimonitoring
+
+PKG_CHECK_MODULES(
+ [IPMIMONITORING],
+ [libipmimonitoring],
+ [AC_CHECK_LIB([ipmimonitoring], [
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ],
+ [AC_CHECK_HEADER(
+ [ipmi_monitoring.h],
+ [AC_CHECK_HEADER(
+ [ipmi_monitoring_bitmasks.h],
+ [have_ipmimonitoring=yes],
+ [have_ipmimonitoring=no]
+ )],
+ [have_ipmimonitoring=no]
+ )],
+ [have_ipmimonitoring=no]
+ )],
+ [have_ipmimonitoring=no]
+)
+test "${enable_plugin_freeipmi}" = "yes" -a "${have_ipmimonitoring}" != "yes" && \
+ AC_MSG_ERROR([ipmimonitoring required but not found. Try installing 'libipmimonitoring-dev' or 'libipmimonitoring-devel'])
+
+AC_MSG_CHECKING([if freeipmi.plugin should be enabled])
+if test "${enable_plugin_freeipmi}" != "no" -a "${have_ipmimonitoring}" = "yes"; then
+ enable_plugin_freeipmi="yes"
+ AC_DEFINE([HAVE_FREEIPMI], [1], [ipmimonitoring usability])
+ OPTIONAL_IPMIMONITORING_CFLAGS="${IPMIMONITORING_CFLAGS}"
+ OPTIONAL_IPMIMONITORING_LIBS="${IPMIMONITORING_LIBS}"
+else
+ enable_plugin_freeipmi="no"
+fi
+AC_MSG_RESULT([${enable_plugin_freeipmi}])
+AM_CONDITIONAL([ENABLE_PLUGIN_FREEIPMI], [test "${enable_plugin_freeipmi}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# cups.plugin - libcups
+
+# Only check most recently added method of cups
+AC_CHECK_LIB([cups], [httpConnect2],
+ [AC_CHECK_HEADER(
+ [cups/cups.h],
+ [have_cups=yes],
+ [have_cups=no]
+ )],
+ [have_cups=no]
+)
+
+test "${enable_plugin_cups}" = "yes" -a "${have_cups}" != "yes" && \
+ AC_MSG_ERROR([cups required but not found. Try installing 'cups'])
+
+AC_ARG_WITH([cups-config],
+ [AS_HELP_STRING([--with-cups-config=path], [Specify path to cups-config executable.])],
+ [with_cups_config="$withval"],
+ [with_cups_config=system]
+ )
+
+AS_IF([test "x$with_cups_config" != "xsystem"], [
+ CUPSCONFIG=$with_cups_config
+], [
+ AC_PATH_TOOL(CUPSCONFIG, [cups-config])
+ AS_IF([test -z "$CUPSCONFIG"], [
+ have_cups=no
+ ])
+])
+
+AC_MSG_CHECKING([if cups.plugin should be enabled])
+if test "${enable_plugin_cups}" != "no" -a "${have_cups}" = "yes"; then
+ enable_plugin_cups="yes"
+ AC_DEFINE([HAVE_CUPS], [1], [cups usability])
+
+ CUPS_CFLAGS="${CUPS_CFLAGS} `$CUPSCONFIG --cflags`"
+ CUPS_LIBS="${CUPS_LIBS} `$CUPSCONFIG --libs`"
+
+ OPTIONAL_CUPS_CFLAGS="${CUPS_CFLAGS}"
+ OPTIONAL_CUPS_LIBS="${CUPS_LIBS}"
+else
+ enable_plugin_cups="no"
+fi
+AC_MSG_RESULT([${enable_plugin_cups}])
+AM_CONDITIONAL([ENABLE_PLUGIN_CUPS], [test "${enable_plugin_cups}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# nfacct.plugin - libmnl, libnetfilter_acct
+
+AC_CHECK_HEADER(
+ [linux/netfilter/nfnetlink_conntrack.h],
+ [AC_CHECK_DECL(
+ [CTA_STATS_MAX],
+ [have_nfnetlink_conntrack=yes],
+ [have_nfnetlink_conntrack=no],
+ [#include <linux/netfilter/nfnetlink_conntrack.h>]
+ )],
+ [have_nfnetlink_conntrack=no]
+)
+
+PKG_CHECK_MODULES(
+ [NFACCT],
+ [libnetfilter_acct],
+ [AC_CHECK_LIB(
+ [netfilter_acct],
+ [nfacct_alloc],
+ [have_libnetfilter_acct=yes],
+ [have_libnetfilter_acct=no]
+ )],
+ [have_libnetfilter_acct=no]
+)
+
+PKG_CHECK_MODULES(
+ [LIBMNL],
+ [libmnl],
+ [AC_CHECK_LIB(
+ [mnl],
+ [mnl_socket_open],
+ [have_libmnl=yes],
+ [have_libmnl=no]
+ )],
+ [have_libmnl=no]
+)
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_nfnetlink_conntrack}" != "yes" && \
+ AC_MSG_ERROR([nfnetlink_conntrack.h required but not found or too old])
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_libnetfilter_acct}" != "yes" && \
+ AC_MSG_ERROR([netfilter_acct required but not found])
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_libmnl}" != "yes" && \
+ AC_MSG_ERROR([libmnl required but not found. Try installing 'libmnl-dev' or 'libmnl-devel'])
+
+AC_MSG_CHECKING([if nfacct.plugin should be enabled])
+if test "${enable_plugin_nfacct}" != "no" -a "${have_libnetfilter_acct}" = "yes" \
+ -a "${have_libmnl}" = "yes" \
+ -a "${have_nfnetlink_conntrack}" = "yes"; then
+ enable_plugin_nfacct="yes"
+ AC_DEFINE([HAVE_LIBMNL], [1], [libmnl usability])
+ AC_DEFINE([HAVE_LIBNETFILTER_ACCT], [1], [libnetfilter_acct usability])
+ AC_DEFINE([HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H], [1], [libnetfilter_nfnetlink_conntrack header usability])
+ OPTIONAL_NFACCT_CFLAGS="${NFACCT_CFLAGS} ${LIBMNL_CFLAGS}"
+ OPTIONAL_NFACCT_LIBS="${NFACCT_LIBS} ${LIBMNL_LIBS}"
+else
+ enable_plugin_nfacct="no"
+fi
+AC_MSG_RESULT([${enable_plugin_nfacct}])
+AM_CONDITIONAL([ENABLE_PLUGIN_NFACCT], [test "${enable_plugin_nfacct}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# xenstat.plugin - libxenstat
+
+PKG_CHECK_MODULES(
+ [YAJL],
+ [yajl],
+ [AC_CHECK_LIB(
+ [yajl],
+ [yajl_tree_get],
+ [have_libyajl=yes],
+ [have_libyajl=no]
+ )],
+ [have_libyajl=no]
+)
+
+AC_CHECK_LIB(
+ [xenstat],
+ [xenstat_init],
+ [AC_CHECK_HEADER(
+ [xenstat.h],
+ [have_libxenstat=yes],
+ [have_libxenstat=no]
+ )],
+ [have_libxenstat=no],
+ [-lyajl]
+)
+
+PKG_CHECK_MODULES(
+ [XENLIGHT],
+ [xenlight],
+ [AC_CHECK_LIB(
+ [xenlight],
+ [libxl_domain_info],
+ [AC_CHECK_HEADER(
+ [libxl.h],
+ [have_libxenlight=yes],
+ [have_libxenlight=no]
+ )],
+ [have_libxenlight=no]
+ )],
+ [have_libxenlight=no]
+)
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libxenstat}" != "yes" && \
+ AC_MSG_ERROR([libxenstat required but not found. try installing 'xen-dom0-libs-devel'])
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libxenlight}" != "yes" && \
+ AC_MSG_ERROR([libxenlight required but not found. try installing 'xen-dom0-libs-devel'])
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libyajl}" != "yes" && \
+ AC_MSG_ERROR([libyajl required but not found. Try installing 'yajl-devel'])
+
+AC_MSG_CHECKING([if xenstat.plugin should be enabled])
+if test "${enable_plugin_xenstat}" != "no" -a "${have_libxenstat}" = "yes" -a "${have_libxenlight}" = "yes" -a "${have_libyajl}" = "yes"; then
+ enable_plugin_xenstat="yes"
+ AC_DEFINE([HAVE_LIBXENSTAT], [1], [libxenstat usability])
+ AC_DEFINE([HAVE_LIBXENLIGHT], [1], [libxenlight usability])
+ AC_DEFINE([HAVE_LIBYAJL], [1], [libyajl usability])
+ OPTIONAL_XENSTAT_CFLAGS="${XENLIGHT_CFLAGS} ${YAJL_CFLAGS}"
+ OPTIONAL_XENSTAT_LIBS="-lxenstat ${XENLIGHT_LIBS} ${YAJL_LIBS}"
+else
+ enable_plugin_xenstat="no"
+fi
+AC_MSG_RESULT([${enable_plugin_xenstat}])
+AM_CONDITIONAL([ENABLE_PLUGIN_XENSTAT], [test "${enable_plugin_xenstat}" = "yes"])
+
+if test "${enable_plugin_xenstat}" == "yes"; then
+ AC_MSG_CHECKING([for xenstat_vbd_error in -lxenstat])
+ AC_TRY_LINK(
+ [ #include <xenstat.h> ],
+ [
+ xenstat_vbd * vbd;
+ int out = xenstat_vbd_error(vbd);
+ ],
+ [
+ have_xenstat_vbd_error=yes
+ AC_DEFINE([HAVE_XENSTAT_VBD_ERROR], [1], [xenstat_vbd_error usability])
+ ],
+ [ have_xenstat_vbd_error=no ]
+ )
+ AC_MSG_RESULT([${have_xenstat_vbd_error}])
+fi
+
+# -----------------------------------------------------------------------------
+# perf.plugin
+
+AC_CHECK_HEADER(
+ [linux/perf_event.h],
+ [AC_CHECK_DECL(
+ [PERF_COUNT_HW_REF_CPU_CYCLES],
+ [have_perf_event=yes],
+ [have_perf_event=no],
+ [#include <linux/perf_event.h>]
+ )],
+ [have_perf_event=no]
+)
+
+AC_MSG_CHECKING([if perf.plugin should be enabled])
+if test "${build_target}" == "linux" -a "${have_perf_event}" = "yes"; then
+ AC_DEFINE([ENABLE_PERF_PLUGIN], [1], [perf.plugin])
+ enable_plugin_perf="yes"
+else
+ enable_plugin_perf="no"
+fi
+AC_MSG_RESULT([${enable_plugin_perf}])
+AM_CONDITIONAL([ENABLE_PLUGIN_PERF], [test "${enable_plugin_perf}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# ebpf.plugin
+
+if test "${build_target}" = "linux" -a "${enable_ebpf}" != "no"; then
+ PKG_CHECK_MODULES(
+ [LIBELF],
+ [libelf],
+ [have_libelf=yes],
+ [have_libelf=no]
+ )
+
+ AC_CHECK_TYPE(
+ [struct bpf_prog_info],
+ [have_bpf=yes],
+ [have_bpf=no],
+ [#include <linux/bpf.h>]
+ )
+
+ AC_CHECK_FILE(
+ externaldeps/libbpf/libbpf.a,
+ [have_libbpf=yes],
+ [have_libbpf=no]
+ )
+
+ AC_MSG_CHECKING([if ebpf.plugin should be enabled])
+ if test "${have_libelf}" = "yes" -a \
+ "${have_bpf}" = "yes" -a \
+ "${have_libbpf}" = "yes"; then
+ OPTIONAL_BPF_CFLAGS="${LIBELF_CFLAGS} -I externaldeps/libbpf/include"
+ OPTIONAL_BPF_LIBS="externaldeps/libbpf/libbpf.a ${LIBELF_LIBS}"
+ AC_DEFINE([HAVE_LIBBPF], [1], [libbpf usability])
+ enable_ebpf="yes"
+ else
+ enable_ebpf="no"
+ fi
+else
+ enable_ebpf="no"
+fi
+AC_MSG_RESULT([${enable_ebpf}])
+AM_CONDITIONAL([ENABLE_PLUGIN_EBPF], [test "${enable_ebpf}" = "yes"])
+
+# -----------------------------------------------------------------------------
+# slabinfo.plugin
+
+AC_MSG_CHECKING([if slabinfo.plugin should be enabled])
+if test "${build_target}" == "linux"; then
+ AC_DEFINE([ENABLE_SLABINFO], [1], [slabinfo plugin])
+ enable_plugin_slabinfo="yes"
+else
+ enable_plugin_slabinfo="no"
+fi
+AC_MSG_RESULT([${enable_plugin_slabinfo}])
+AM_CONDITIONAL([ENABLE_PLUGIN_SLABINFO], [test "${enable_plugin_slabinfo}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# AWS Kinesis backend - libaws-cpp-sdk-kinesis, libaws-cpp-sdk-core, libssl, libcrypto, libcurl
+
+PKG_CHECK_MODULES(
+ [LIBCRYPTO],
+ [libcrypto],
+ [AC_CHECK_LIB(
+ [crypto],
+ [CRYPTO_new_ex_data],
+ [have_libcrypto=yes],
+ [have_libcrypto=no]
+ )],
+ [have_libcrypto=no]
+)
+
+PKG_CHECK_MODULES(
+ [LIBSSL],
+ [libssl],
+ [AC_CHECK_LIB(
+ [ssl],
+ [SSL_connect],
+ [have_libssl=yes],
+ [have_libssl=no]
+ )],
+ [have_libssl=no]
+)
+
+PKG_CHECK_MODULES(
+ [LIBCURL],
+ [libcurl],
+ [AC_CHECK_LIB(
+ [curl],
+ [curl_easy_init],
+ [have_libcurl=yes],
+ [have_libcurl=no]
+ )],
+ [have_libcurl=no]
+)
+
+PKG_CHECK_MODULES(
+ [AWS_CPP_SDK_CORE],
+ [aws-cpp-sdk-core],
+ [AC_CHECK_LIB(
+ [aws-cpp-sdk-core],
+ [cJSON_free],
+ [have_libaws_cpp_sdk_core=yes],
+ [have_libaws_cpp_sdk_core=no]
+ )],
+ [have_libaws_cpp_sdk_core=no]
+)
+
+PKG_CHECK_MODULES(
+ [AWS_CPP_SDK_KINESIS],
+ [aws-cpp-sdk-kinesis],
+ [have_libaws_cpp_sdk_kinesis=yes],
+ [have_libaws_cpp_sdk_kinesis=no]
+)
+
+AC_CHECK_LIB(
+ [aws-checksums],
+ [aws_checksums_crc32],
+ [have_libaws_checksums=yes],
+ [have_libaws_checksums=no]
+)
+
+AC_CHECK_LIB(
+ [aws-c-common],
+ [aws_default_allocator],
+ [have_libaws_c_common=yes],
+ [have_libaws_c_common=no]
+)
+
+AC_CHECK_LIB(
+ [aws-c-event-stream],
+ [aws_event_stream_library_init],
+ [have_libaws_c_event_stream=yes],
+ [have_libaws_c_event_stream=no]
+)
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_kinesis}" != "yes" && \
+ AC_MSG_ERROR([libaws-cpp-sdk-kinesis required but not found. try installing AWS C++ SDK])
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_core}" != "yes" && \
+ AC_MSG_ERROR([libaws-cpp-sdk-core required but not found. try installing AWS C++ SDK])
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libcurl}" != "yes" && \
+ AC_MSG_ERROR([libcurl required but not found])
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libssl}" != "yes" && \
+ AC_MSG_ERROR([libssl required but not found])
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libcrypto}" != "yes" && \
+ AC_MSG_ERROR([libcrypto required but not found])
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_checksums}" != "yes" \
+ -a "${have_libaws_c_common}" != "yes" \
+ -a "${have_libaws_c_event_stream}" != "yes" && \
+ AC_MSG_ERROR([AWS SKD third party dependencies required but not found])
+
+AC_MSG_CHECKING([if kinesis backend should be enabled])
+if test "${enable_backend_kinesis}" != "no" -a "${have_libaws_cpp_sdk_kinesis}" = "yes" \
+ -a "${have_libaws_cpp_sdk_core}" = "yes" \
+ -a "${have_libaws_checksums}" = "yes" \
+ -a "${have_libaws_c_common}" = "yes" \
+ -a "${have_libaws_c_event_stream}" = "yes" \
+ -a "${have_libcurl}" = "yes" \
+ -a "${have_libssl}" = "yes" \
+ -a "${have_libcrypto}" = "yes"; then
+ enable_backend_kinesis="yes"
+ AC_DEFINE([HAVE_KINESIS], [1], [libaws-cpp-sdk-kinesis usability])
+ OPTIONAL_KINESIS_CFLAGS="${LIBCRYPTO_CFLAGS} ${LIBSSL_CFLAGS} ${LIBCURL_CFLAGS}"
+ CXX11FLAG="${AWS_CPP_SDK_KINESIS_CFLAGS} ${AWS_CPP_SDK_CORE_CFLAGS}"
+ OPTIONAL_KINESIS_LIBS="${AWS_CPP_SDK_KINESIS_LIBS} ${AWS_CPP_SDK_CORE_LIBS} \
+ ${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}"
+else
+ enable_backend_kinesis="no"
+fi
+
+AC_MSG_RESULT([${enable_backend_kinesis}])
+AM_CONDITIONAL([ENABLE_BACKEND_KINESIS], [test "${enable_backend_kinesis}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# Pub/Sub exporting connector - googleapis
+
+PKG_CHECK_MODULES(
+ [GRPC],
+ [grpc],
+ [have_libgrpc=yes],
+ [have_libgrpc=no]
+)
+
+PKG_CHECK_MODULES(
+ [PUBSUB],
+ [googleapis_cpp_pubsub_protos],
+ [have_pubsub_protos=yes],
+ [have_pubsub_protos=no]
+)
+
+AC_PATH_PROG([CXX_BINARY], [${CXX}], [no])
+AS_IF(
+ [test x"${CXX_BINARY}" == x"no"],
+ [have_CXX_compiler=no],
+ [have_CXX_compiler=yes]
+)
+
+test "${enable_pubsub}" = "yes" -a "${have_grpc}" != "yes" && \
+ AC_MSG_ERROR([libgrpc required but not found. try installing grpc])
+
+test "${enable_pubsub}" = "yes" -a "${have_pubsub_protos}" != "yes" && \
+ AC_MSG_ERROR([libgoogleapis_cpp_pubsub_protos required but not found. try installing googleapis])
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
+ AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
+
+AC_MSG_CHECKING([if pubsub exporting connector should be enabled])
+if test "${enable_exporting_pubsub}" != "no" -a "${have_pubsub_protos}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
+ enable_exporting_pubsub="yes"
+ AC_DEFINE([ENABLE_EXPORTING_PUBSUB], [1], [Pub/Sub API usability])
+ OPTIONAL_PUBSUB_CFLAGS="${GRPC_CFLAGS} ${PUBSUB_CFLAGS}"
+ CXX11FLAG="-std=c++11"
+ OPTIONAL_PUBSUB_LIBS="${GRPC_LIBS} ${PUBSUB_LIBS}"
+else
+ enable_pubsub="no"
+fi
+
+AC_MSG_RESULT([${enable_exporting_pubsub}])
+AM_CONDITIONAL([ENABLE_EXPORTING_PUBSUB], [test "${enable_exporting_pubsub}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# Prometheus remote write backend - libprotobuf, libsnappy, protoc
+
+PKG_CHECK_MODULES(
+ [PROTOBUF],
+ [protobuf >= 3],
+ [have_libprotobuf=yes],
+ [have_libprotobuf=no]
+)
+
+AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy])
+
+ AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
+ save_LIBS="${LIBS}"
+ LIBS="-lsnappy"
+ save_CXXFLAGS="${CXXFLAGS}"
+ CXXFLAGS="${CXXFLAGS} -std=c++11"
+
+ AC_TRY_LINK(
+ [
+ #include <stdlib.h>
+ #include <snappy.h>
+ ],
+ [
+ const char *input = "test";
+ size_t compressed_length;
+ char *buffer = (char *)malloc(5 * sizeof(char));
+ snappy::RawCompress(input, 4, buffer, &compressed_length);
+ free(buffer);
+ ],
+ [
+ have_libsnappy=yes
+ SNAPPY_CFLAGS=""
+ SNAPPY_LIBS="-lsnappy"
+ ],
+ [have_libsnappy=no]
+ )
+
+ LIBS="${save_LIBS}"
+ CXXFLAGS="${save_CXXFLAGS}"
+ AC_LANG_RESTORE
+
+AC_MSG_RESULT([${have_libsnappy}])
+
+AC_PATH_PROG([PROTOC], [protoc], [no])
+AS_IF(
+ [test x"${PROTOC}" == x"no"],
+ [have_protoc=no],
+ [have_protoc=yes]
+)
+
+AC_PATH_PROG([CXX_BINARY], [${CXX}], [no])
+AS_IF(
+ [test x"${CXX_BINARY}" == x"no"],
+ [have_CXX_compiler=no],
+ [have_CXX_compiler=yes]
+)
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \
+ AC_MSG_ERROR([libprotobuf required but not found. try installing protobuf])
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libsnappy}" != "yes" && \
+ AC_MSG_ERROR([libsnappy required but not found. try installing snappy])
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_protoc}" != "yes" && \
+ AC_MSG_ERROR([protoc compiler required but not found. try installing protobuf])
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
+ AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
+
+AC_MSG_CHECKING([if prometheus remote write backend should be enabled])
+if test "${enable_backend_prometheus_remote_write}" != "no" -a "${have_libprotobuf}" = "yes" -a "${have_libsnappy}" = "yes" \
+ -a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
+ enable_backend_prometheus_remote_write="yes"
+ AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability])
+ OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS} -Iexporting/prometheus/remote_write"
+ CXX11FLAG="-std=c++11"
+ OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS}"
+else
+ enable_backend_prometheus_remote_write="no"
+fi
+
+AC_MSG_RESULT([${enable_backend_prometheus_remote_write}])
+AM_CONDITIONAL([ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE], [test "${enable_backend_prometheus_remote_write}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# MongoDB backend - libmongoc
+
+PKG_CHECK_MODULES(
+ [LIBMONGOC],
+ [libmongoc-1.0 >= 1.7],
+ [have_libmongoc=yes],
+ [have_libmongoc=no]
+)
+
+test "${enable_backend_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \
+ AC_MSG_ERROR([libmongoc required but not found. Try installing `mongoc`.])
+
+AC_MSG_CHECKING([if mongodb backend should be enabled])
+if test "${enable_backend_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then
+ enable_backend_mongodb="yes"
+ AC_DEFINE([HAVE_MONGOC], [1], [libmongoc usability])
+ OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}"
+ OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}"
+else
+ enable_backend_mongodb="no"
+fi
+
+AC_MSG_RESULT([${enable_backend_mongodb}])
+AM_CONDITIONAL([ENABLE_BACKEND_MONGODB], [test "${enable_backend_mongodb}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# check for setns() - cgroup-network
+
+AC_CHECK_FUNC([setns])
+AC_MSG_CHECKING([if cgroup-network can be enabled])
+if test "$ac_cv_func_setns" = "yes" ; then
+ have_setns="yes"
+ AC_DEFINE([HAVE_SETNS], [1], [Define 1 if you have setns() function])
+else
+ have_setns="no"
+fi
+AC_MSG_RESULT([${have_setns}])
+AM_CONDITIONAL([ENABLE_PLUGIN_CGROUP_NETWORK], [test "${have_setns}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
+# Link-Time-Optimization
+
+if test "${enable_lto}" != "no"; then
+ opt="-flto"
+ AX_CHECK_COMPILE_FLAG(${opt}, [have_lto=yes], [have_lto=no])
+fi
+if test "${have_lto}" = "yes"; then
+ oCFLAGS="${CFLAGS}"
+ CFLAGS="${CFLAGS} -flto"
+ ac_cv_c_lto_cross_compile="${enable_lto}"
+ test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
+ AC_C_LTO
+ CFLAGS="${oCFLAGS}"
+ test "${ac_cv_c_lto}" != "yes" && have_lto="no"
+fi
+test "${enable_lto}" = "yes" -a "${have_lto}" != "yes" && \
+ AC_MSG_ERROR([LTO is required but is not available.])
+AC_MSG_CHECKING([if LTO should be enabled])
+if test "${enable_lto}" != "no" -a "${have_lto}" = "yes"; then
+ enable_lto="yes"
+ CFLAGS="${CFLAGS} -flto"
+else
+ enable_lto="no"
+fi
+AC_MSG_RESULT([${enable_lto}])
+
+
+# -----------------------------------------------------------------------------
+
+AM_CONDITIONAL([ENABLE_CXX_LINKER], [test "${enable_backend_kinesis}" = "yes" \
+ -o "${enable_exporting_pubsub}" = "yes" \
+ -o "${enable_backend_prometheus_remote_write}" = "yes"])
+
+AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged])
+
+varlibdir="${localstatedir}/lib/netdata"
+registrydir="${localstatedir}/lib/netdata/registry"
+cachedir="${localstatedir}/cache/netdata"
+chartsdir="${libexecdir}/netdata/charts.d"
+nodedir="${libexecdir}/netdata/node.d"
+pythondir="${libexecdir}/netdata/python.d"
+configdir="${sysconfdir}/netdata"
+libconfigdir="${libdir}/netdata/conf.d"
+logdir="${localstatedir}/log/netdata"
+pluginsdir="${libexecdir}/netdata/plugins.d"
+
+AC_SUBST([build_target])
+AC_SUBST([varlibdir])
+AC_SUBST([registrydir])
+AC_SUBST([cachedir])
+AC_SUBST([chartsdir])
+AC_SUBST([nodedir])
+AC_SUBST([pythondir])
+AC_SUBST([configdir])
+AC_SUBST([libconfigdir])
+AC_SUBST([logdir])
+AC_SUBST([pluginsdir])
+AC_SUBST([webdir])
+
+CFLAGS="${CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \
+ ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \
+ ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PUBSUB_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} \
+ ${OPTIONAL_MONGOC_CFLAGS} ${LWS_CFLAGS} ${OPTIONAL_JSONC_STATIC_CFLAGS} ${OPTIONAL_BPF_CFLAGS} ${OPTIONAL_JUDY_CFLAGS}"
+
+CXXFLAGS="${CFLAGS} ${CXX11FLAG}"
+
+CPPFLAGS="\
+ -DTARGET_OS=${build_target_id} \
+ -DVARLIB_DIR=\"\\\"${varlibdir}\\\"\" \
+ -DCACHE_DIR=\"\\\"${cachedir}\\\"\" \
+ -DCONFIG_DIR=\"\\\"${configdir}\\\"\" \
+ -DLIBCONFIG_DIR=\"\\\"${libconfigdir}\\\"\" \
+ -DLOG_DIR=\"\\\"${logdir}\\\"\" \
+ -DPLUGINS_DIR=\"\\\"${pluginsdir}\\\"\" \
+ -DRUN_DIR=\"\\\"${localstatedir}/run/netdata\\\"\" \
+ -DWEB_DIR=\"\\\"${webdir}\\\"\" \
+"
+
+AC_SUBST([OPTIONAL_MATH_CFLAGS])
+AC_SUBST([OPTIONAL_MATH_LIBS])
+AC_SUBST([OPTIONAL_UV_LIBS])
+AC_SUBST([OPTIONAL_LZ4_LIBS])
+AC_SUBST([OPTIONAL_JUDY_CFLAGS])
+AC_SUBST([OPTIONAL_JUDY_LIBS])
+AC_SUBST([OPTIONAL_SSL_LIBS])
+AC_SUBST([OPTIONAL_JSONC_LIBS])
+AC_SUBST([OPTIONAL_NFACCT_CFLAGS])
+AC_SUBST([OPTIONAL_NFACCT_LIBS])
+AC_SUBST([OPTIONAL_ZLIB_CFLAGS])
+AC_SUBST([OPTIONAL_ZLIB_LIBS])
+AC_SUBST([OPTIONAL_UUID_CFLAGS])
+AC_SUBST([OPTIONAL_UUID_LIBS])
+AC_SUBST([OPTIONAL_BPF_CFLAGS])
+AC_SUBST([OPTIONAL_BPF_LIBS])
+AC_SUBST([OPTIONAL_MQTT_LIBS])
+AC_SUBST([OPTIONAL_LIBCAP_CFLAGS])
+AC_SUBST([OPTIONAL_LIBCAP_LIBS])
+AC_SUBST([OPTIONAL_IPMIMONITORING_CFLAGS])
+AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS])
+AC_SUBST([OPTIONAL_CUPS_CFLAGS])
+AC_SUBST([OPTIONAL_CUPS_LIBS])
+AC_SUBST([OPTIONAL_XENSTAT_CFLAGS])
+AC_SUBST([OPTIONAL_XENSTAT_LIBS])
+AC_SUBST([OPTIONAL_KINESIS_CFLAGS])
+AC_SUBST([OPTIONAL_KINESIS_LIBS])
+AC_SUBST([OPTIONAL_PUBSUB_CFLAGS])
+AC_SUBST([OPTIONAL_PUBSUB_LIBS])
+AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS])
+AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS])
+AC_SUBST([OPTIONAL_MONGOC_CFLAGS])
+AC_SUBST([OPTIONAL_MONGOC_LIBS])
+AC_SUBST([OPTIONAL_LWS_LIBS])
+
+# -----------------------------------------------------------------------------
+# Check if cmocka is available - needed for unit testing
+
+AC_ARG_ENABLE(
+ [unit-tests],
+ [AS_HELP_STRING([--disable-unit-tests],
+ [Disables building and running the unit tests suite])],
+ [],
+ [enable_unit_tests="yes"]
+)
+
+
+PKG_CHECK_MODULES(
+ [CMOCKA],
+ [cmocka],
+ [have_cmocka="yes"],
+ [AC_MSG_NOTICE([CMocka not found on the system. Unit tests disabled])]
+)
+AM_CONDITIONAL([ENABLE_UNITTESTS], [test "${enable_unit_tests}" = "yes" -a "${have_cmocka}" = "yes" ])
+AC_SUBST([ENABLE_UNITTESTS])
+
+TEST_CFLAGS="${CFLAGS} ${CMOCKA_CFLAGS}"
+TEST_LIBS="${CMOCKA_LIBS}"
+
+AC_SUBST([TEST_CFLAGS])
+AC_SUBST([TEST_LIBS])
+
+# -----------------------------------------------------------------------------
+# save configure options for build info
+AC_DEFINE_UNQUOTED(
+ [CONFIGURE_COMMAND],
+ ["$ac_configure_args"],
+ [options passed to configure script]
+)
+
+AC_CONFIG_FILES([
+ Makefile
+ netdata.spec
+ backends/graphite/Makefile
+ backends/json/Makefile
+ backends/Makefile
+ backends/opentsdb/Makefile
+ backends/prometheus/Makefile
+ backends/prometheus/remote_write/Makefile
+ backends/aws_kinesis/Makefile
+ backends/mongodb/Makefile
+ collectors/Makefile
+ collectors/apps.plugin/Makefile
+ collectors/cgroups.plugin/Makefile
+ collectors/charts.d.plugin/Makefile
+ collectors/checks.plugin/Makefile
+ collectors/diskspace.plugin/Makefile
+ collectors/fping.plugin/Makefile
+ collectors/ioping.plugin/Makefile
+ collectors/freebsd.plugin/Makefile
+ collectors/freeipmi.plugin/Makefile
+ collectors/cups.plugin/Makefile
+ collectors/idlejitter.plugin/Makefile
+ collectors/macos.plugin/Makefile
+ collectors/nfacct.plugin/Makefile
+ collectors/node.d.plugin/Makefile
+ collectors/plugins.d/Makefile
+ collectors/proc.plugin/Makefile
+ collectors/python.d.plugin/Makefile
+ collectors/slabinfo.plugin/Makefile
+ collectors/statsd.plugin/Makefile
+ collectors/ebpf.plugin/Makefile
+ collectors/tc.plugin/Makefile
+ collectors/xenstat.plugin/Makefile
+ collectors/perf.plugin/Makefile
+ daemon/Makefile
+ database/Makefile
+ database/engine/Makefile
+ database/engine/metadata_log/Makefile
+ diagrams/Makefile
+ exporting/Makefile
+ exporting/graphite/Makefile
+ exporting/json/Makefile
+ exporting/opentsdb/Makefile
+ exporting/prometheus/Makefile
+ exporting/prometheus/remote_write/Makefile
+ exporting/aws_kinesis/Makefile
+ exporting/pubsub/Makefile
+ exporting/mongodb/Makefile
+ exporting/tests/Makefile
+ health/Makefile
+ health/notifications/Makefile
+ libnetdata/Makefile
+ libnetdata/tests/Makefile
+ libnetdata/adaptive_resortable_list/Makefile
+ libnetdata/avl/Makefile
+ libnetdata/buffer/Makefile
+ libnetdata/clocks/Makefile
+ libnetdata/config/Makefile
+ libnetdata/dictionary/Makefile
+ libnetdata/ebpf/Makefile
+ libnetdata/eval/Makefile
+ libnetdata/locks/Makefile
+ libnetdata/log/Makefile
+ libnetdata/popen/Makefile
+ libnetdata/procfile/Makefile
+ libnetdata/simple_pattern/Makefile
+ libnetdata/socket/Makefile
+ libnetdata/statistical/Makefile
+ libnetdata/storage_number/Makefile
+ libnetdata/storage_number/tests/Makefile
+ libnetdata/threads/Makefile
+ libnetdata/url/Makefile
+ libnetdata/json/Makefile
+ libnetdata/health/Makefile
+ registry/Makefile
+ streaming/Makefile
+ system/Makefile
+ tests/Makefile
+ web/Makefile
+ web/api/Makefile
+ web/api/badges/Makefile
+ web/api/exporters/Makefile
+ web/api/exporters/shell/Makefile
+ web/api/exporters/prometheus/Makefile
+ web/api/formatters/Makefile
+ web/api/formatters/csv/Makefile
+ web/api/formatters/json/Makefile
+ web/api/formatters/ssv/Makefile
+ web/api/formatters/value/Makefile
+ web/api/queries/Makefile
+ web/api/queries/average/Makefile
+ web/api/queries/des/Makefile
+ web/api/queries/incremental_sum/Makefile
+ web/api/queries/max/Makefile
+ web/api/queries/median/Makefile
+ web/api/queries/min/Makefile
+ web/api/queries/ses/Makefile
+ web/api/queries/stddev/Makefile
+ web/api/queries/sum/Makefile
+ web/api/health/Makefile
+ web/gui/Makefile
+ web/server/Makefile
+ web/server/static/Makefile
+ claim/Makefile
+ aclk/legacy/Makefile
+ spawn/Makefile
+ parser/Makefile
+])
+AC_OUTPUT
+
+test "${with_math}" != "yes" && AC_MSG_WARN([You are building without math. math allows accurate calculations. It should be enabled.]) || :
+test "${with_zlib}" != "yes" && AC_MSG_WARN([You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled.]) || :
diff --git a/contrib/README.md b/contrib/README.md
new file mode 100644
index 0000000..e253efa
--- /dev/null
+++ b/contrib/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "Netdata contrib"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/contrib/README.md
+-->
+
+# Netdata contrib
+
+## Building .deb packages
+
+The `contrib/debian/` directory contains basic rules to build a
+Debian package. It has been tested on Debian Jessie and Wheezy,
+but should work, possibly with minor changes, if you have other
+dpkg-based systems such as Ubuntu or Mint.
+
+To build Netdata for a Debian Jessie system, the debian directory
+has to be available in the root of the Netdata source. The easiest
+way to do this is with a symlink:
+
+```sh
+ln -s contrib/debian
+```
+
+Edit the `debian/changelog` file to reflect the package version and
+the build time:
+
+```sh
+netdata (1.21.0) unstable; urgency=medium
+
+ * Initial Release
+
+ -- Netdata Builder <bot@netdata.cloud> Tue, 12 May 2020 10:36:52 +0200
+```
+
+Then build the debian package:
+
+```sh
+dpkg-buildpackage -us -uc -rfakeroot
+```
+
+This should give a package that can be installed in the parent
+directory, which you can install manually with dpkg.
+
+```sh
+ls -1 ../*.deb
+../netdata_1.21.0_amd64.deb
+../netdata-dbgsym_1.21.0_amd64.deb
+../netdata-plugin-cups_1.21.0_amd64.deb
+../netdata-plugin-cups-dbgsym_1.21.0_amd64.deb
+../netdata-plugin-freeipmi_1.21.0_amd64.deb
+../netdata-plugin-freeipmi-dbgsym_1.21.0_amd64.deb
+sudo dpkg -i ../netdata_1.21.0_amd64.deb
+```
+
+### Reinstalling Netdata
+
+The recommended way to upgrade Netdata packages built from this
+source is to remove the current package from your system, then
+install the new package. Upgrading on wheezy is known to not
+work cleanly; Jessie may behave as expected.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/contrib/debian/changelog b/contrib/debian/changelog
new file mode 100644
index 0000000..5ac1edb
--- /dev/null
+++ b/contrib/debian/changelog
@@ -0,0 +1,6 @@
+netdata (PREVIOUS_PACKAGE_VERSION) unstable; urgency=medium
+
+ * Initial Release
+
+ -- Netdata Builder <bot@netdata.cloud> PREVIOUS_PACKAGE_DATE
+
diff --git a/contrib/debian/compat b/contrib/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/contrib/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/contrib/debian/control b/contrib/debian/control
new file mode 100644
index 0000000..2659c38
--- /dev/null
+++ b/contrib/debian/control
@@ -0,0 +1,62 @@
+Source: netdata
+Build-Depends: debhelper (>= 9),
+ dh-autoreconf,
+ dh-systemd (>= 1.5),
+ dpkg-dev (>= 1.13.19),
+ zlib1g-dev,
+ uuid-dev,
+ libelf-dev,
+ libuv1-dev,
+ liblz4-dev,
+ libjudy-dev,
+ libssl-dev,
+ libmnl-dev,
+ libjson-c-dev,
+ libcups2-dev,
+ libipmimonitoring-dev,
+ libnetfilter-acct-dev,
+ libsnappy-dev,
+ libprotobuf-dev,
+ libprotoc-dev,
+ cmake,
+ autogen,
+ autoconf,
+ automake,
+ pkg-config,
+ curl,
+ protobuf-compiler
+Section: net
+Priority: optional
+Maintainer: Netdata Builder <bot@netdata.cloud>
+Standards-Version: 3.9.6
+Homepage: https://netdata.cloud
+
+Package: netdata
+Architecture: any
+Depends: adduser,
+ libcap2-bin (>= 1:2.0),
+ lsb-base (>= 3.1-23.2),
+ openssl,
+ ${misc:Depends},
+ ${shlibs:Depends}
+Pre-Depends: dpkg (>= 1.17.14)
+Description: real-time charts for system monitoring
+ Netdata is a daemon that collects data in realtime (per second)
+ and presents a web site to view and analyze them. The presentation
+ is also real-time and full of interactive charts that precisely
+ render all collected values.
+
+Package: netdata-plugin-cups
+Architecture: any
+Depends: cups,
+ netdata (>= ${source:Version})
+Description: The Common Unix Printing System plugin for metrics collection from cupsd
+
+Package: netdata-plugin-freeipmi
+Architecture: any
+Depends: freeipmi,
+ netdata (= ${source:Version})
+Description: FreeIPMI - The Intelligent Platform Management System.
+ The IPMI specification defines a set of interfaces for platform management.
+ It is implemented by a number vendors for system management. The features of IPMI that most users will be interested in
+ are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
diff --git a/contrib/debian/copyright b/contrib/debian/copyright
new file mode 100644
index 0000000..085580e
--- /dev/null
+++ b/contrib/debian/copyright
@@ -0,0 +1,10 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: Netdata
+Upstream-Contact: Costa Tsaousis <costa@tsaousis.gr>
+Source: https://github.com/netdata/netdata
+
+Files: *
+Copyright: 2014-2016, Costa Tsaousis
+License: GPL-3+
+ On Debian systems, the complete text of the GNU General Public
+ License version 3 can be found in /usr/share/common-licenses/GPL-3.
diff --git a/contrib/debian/install_go.sh b/contrib/debian/install_go.sh
new file mode 100755
index 0000000..22ff691
--- /dev/null
+++ b/contrib/debian/install_go.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+GO_PACKAGE_VERSION="$1"
+LIB_DIR="$2"
+LIBEXEC_DIR="$3"
+
+# ############################################################
+# Package Go within netdata (TBD: Package it separately)
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum >/dev/null 2>&1; then
+ sha256sum $@
+ elif command -v shasum >/dev/null 2>&1; then
+ shasum -a 256 $@
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+download_go() {
+ url="${1}"
+ dest="${2}"
+
+ if command -v curl >/dev/null 2>&1; then
+ curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
+ elif command -v wget >/dev/null 2>&1; then
+ wget -T 15 -O - "${url}" > "${dest}"
+ else
+ echo >&2
+ echo >&2 "Downloading go.d plugin from '${url}' failed because of missing mandatory packages."
+ echo >&2 "Either add packages or disable it by issuing '--disable-go' in the installer"
+ echo >&2
+ exit 1
+ fi
+}
+
+install_go() {
+ ARCH_MAP=(
+ 'i386::386'
+ 'i686::386'
+ 'x86_64::amd64'
+ 'aarch64::arm64'
+ 'armv64::arm64'
+ 'armv6l::arm'
+ 'armv7l::arm'
+ 'armv5tel::arm'
+ )
+
+ if [ -z "${NETDATA_DISABLE_GO+x}" ]; then
+ echo >&2 "Install go.d.plugin"
+ ARCH=$(uname -m)
+ OS=$(uname -s | tr '[:upper:]' '[:lower:]')
+
+ for index in "${ARCH_MAP[@]}" ; do
+ KEY="${index%%::*}"
+ VALUE="${index##*::}"
+ if [ "$KEY" = "$ARCH" ]; then
+ ARCH="${VALUE}"
+ break
+ fi
+ done
+ tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
+
+ if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+ return 1
+ fi
+
+ grep "${GO_PACKAGE_BASENAME}\$" "packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null
+ grep "config.tar.gz" "packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+
+ echo >&2 "go.d plugin checksum validation failure."
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+
+ echo "go.d.plugin package files checksum validation failed."
+ exit 1
+ fi
+
+ # Install files
+ tar -xf "${tmp}/config.tar.gz" -C "${LIB_DIR}/conf.d/"
+ tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "${LIBEXEC_DIR}/plugins.d/go.d.plugin"
+
+ rm -rf "${tmp}"
+ fi
+ return 0
+}
+
+install_go
diff --git a/contrib/debian/netdata.default b/contrib/debian/netdata.default
new file mode 100644
index 0000000..1d9a9c4
--- /dev/null
+++ b/contrib/debian/netdata.default
@@ -0,0 +1,7 @@
+# Extra arguments to pass to netdata
+#
+#EXTRA_OPTS=""
+#
+# Location of pidfile. Only used on SysV init based systems.
+#
+#PIDFILE="/var/run/netdata/netdata.pid"
diff --git a/contrib/debian/netdata.docs b/contrib/debian/netdata.docs
new file mode 100644
index 0000000..1b763b1
--- /dev/null
+++ b/contrib/debian/netdata.docs
@@ -0,0 +1 @@
+CHANGELOG.md
diff --git a/contrib/debian/netdata.init b/contrib/debian/netdata.init
new file mode 100755
index 0000000..c2706ca
--- /dev/null
+++ b/contrib/debian/netdata.init
@@ -0,0 +1,56 @@
+#!/bin/sh
+# Start/stop the netdata daemon.
+#
+### BEGIN INIT INFO
+# Provides: netdata
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Should-Start: $network
+# Should-Stop: $network
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Real-time charts for system monitoring
+# Description: Netdata is a daemon that collects data in realtime (per second)
+# and presents a web site to view and analyze them. The presentation
+# is also real-time and full of interactive charts that precisely
+# render all collected values.
+### END INIT INFO
+
+PATH=/bin:/usr/bin:/sbin:/usr/sbin
+DESC="netdata daemon"
+NAME=netdata
+DAEMON=/usr/sbin/netdata
+PIDFILE=/var/run/netdata/netdata.pid
+SCRIPTNAME=/etc/init.d/"$NAME"
+
+test -f $DAEMON || exit 0
+
+. /lib/lsb/init-functions
+
+[ -r /etc/default/netdata ] && . /etc/default/netdata
+
+case "$1" in
+start) log_daemon_msg "Starting real-time system monitoring" "netdata"
+ start_daemon -p $PIDFILE $DAEMON -P $PIDFILE $EXTRA_OPTS
+ log_end_msg $?
+ ;;
+stop) log_daemon_msg "Stopping real-time system monitoring" "netdata"
+ killproc -p $PIDFILE $DAEMON
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE
+ log_end_msg $RETVAL
+ # wait for plugins to exit
+ sleep 1
+ ;;
+restart|force-reload) log_daemon_msg "Restarting real-time system monitoring" "netdata"
+ $0 stop
+ $0 start
+ ;;
+status)
+ status_of_proc -p $PIDFILE $DAEMON $NAME && exit 0 || exit $?
+ ;;
+*) log_action_msg "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}"
+ exit 2
+ ;;
+esac
+exit 0
diff --git a/contrib/debian/netdata.install b/contrib/debian/netdata.install
new file mode 100644
index 0000000..45d42b6
--- /dev/null
+++ b/contrib/debian/netdata.install
@@ -0,0 +1 @@
+debian/netdata.conf /etc/netdata/
diff --git a/contrib/debian/netdata.lintian-overrides b/contrib/debian/netdata.lintian-overrides
new file mode 100644
index 0000000..45b2d86
--- /dev/null
+++ b/contrib/debian/netdata.lintian-overrides
@@ -0,0 +1,16 @@
+
+# See Debian policy 10.9. apps.plugin has extra capabilities, so don't let
+# normal users run it.
+netdata: non-standard-executable-perm usr/lib/*/netdata/plugins.d/apps.plugin 0754 != 0755
+
+
+# FontAwesome is at least in the fonts-font-awesome package, but this is
+# not available in wheezy. glyphicons-halflings-regular isn't currently in
+# a Debian package. Therefore don't complain about shipping them with netdata
+# for the time being.
+netdata: duplicate-font-file usr/share/netdata/fonts/*
+netdata: font-in-non-font-package usr/share/netdata/fonts/*
+
+# Files here are marked as conffiles so that local updates to the html files
+# isn't clobbered on upgrade.
+netdata: non-etc-file-marked-as-conffile var/lib/netdata/www/*
diff --git a/contrib/debian/netdata.postinst b/contrib/debian/netdata.postinst
new file mode 100644
index 0000000..eb9104b
--- /dev/null
+++ b/contrib/debian/netdata.postinst
@@ -0,0 +1,74 @@
+#! /bin/sh
+
+set -e
+
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/.well-known /usr/share/netdata/www/.well-known 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/css /usr/share/netdata/www/css 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/fonts /usr/share/netdata/www/fonts 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/images /usr/share/netdata/www/images 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/lib /usr/share/netdata/www/lib 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/static /usr/share/netdata/www/static 1.18.1~ netdata -- "$@"
+
+case "$1" in
+ configure)
+ if [ -z "$2" ]; then
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+
+ if ! getent passwd netdata > /dev/null; then
+ adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/lib/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/lib/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/lib/netdata/www > /dev/null 2>&1; then
+ dpkg-statoverride --update --add root netdata 0755 /var/lib/netdata/www
+ fi
+
+ if ! dpkg-statoverride --list /var/cache/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/cache/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/run/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/run/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/log/netdata > /dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata adm 02750 /var/log/netdata
+ fi
+
+ fi
+
+ dpkg-statoverride --force --update --add root netdata 0775 /var/lib/netdata/registry > /dev/null 2>&1
+
+ chown -R root:netdata /usr/share/netdata
+ chown -R root:netdata /usr/libexec/netdata/plugins.d
+ chown -R root:netdata /var/lib/netdata/www
+ setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/libexec/netdata/plugins.d/apps.plugin
+
+ chmod 4750 /usr/libexec/netdata/plugins.d/perf.plugin
+ chmod 4750 /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network
+ chmod 4750 /usr/libexec/netdata/plugins.d/nfacct.plugin
+
+ # Workaround if system does not have ebpf.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/ebpf.plugin || true
+
+ # Workaround for other plugins not installed directly by this package
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/freeipmi.plugin || true
+
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata.postrm b/contrib/debian/netdata.postrm
new file mode 100644
index 0000000..5644e22
--- /dev/null
+++ b/contrib/debian/netdata.postrm
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+set -e
+
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/.well-known /usr/share/netdata/www/.well-known 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/css /usr/share/netdata/www/css 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/fonts /usr/share/netdata/www/fonts 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/images /usr/share/netdata/www/images 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/lib /usr/share/netdata/www/lib 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/static /usr/share/netdata/www/static 1.18.1~ netdata -- "$@"
+
+case "$1" in
+ remove) ;;
+
+ purge)
+ if dpkg-statoverride --list | grep -qw /var/cache/netdata; then
+ dpkg-statoverride --remove /var/cache/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then
+ dpkg-statoverride --remove /var/lib/netdata/www
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata/registry; then
+ dpkg-statoverride --remove /var/lib/netdata/registry
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata; then
+ dpkg-statoverride --remove /var/lib/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/run/netdata; then
+ dpkg-statoverride --remove /var/run/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/log/netdata; then
+ dpkg-statoverride --remove /var/log/netdata
+ fi
+ ;;
+
+ *) ;;
+
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata.preinst b/contrib/debian/netdata.preinst
new file mode 100644
index 0000000..3bbdea0
--- /dev/null
+++ b/contrib/debian/netdata.preinst
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/.well-known /usr/share/netdata/www/.well-known 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/css /usr/share/netdata/www/css 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/fonts /usr/share/netdata/www/fonts 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/images /usr/share/netdata/www/images 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/lib /usr/share/netdata/www/lib 1.18.1~ netdata -- "$@"
+dpkg-maintscript-helper dir_to_symlink \
+ /var/lib/netdata/www/static /usr/share/netdata/www/static 1.18.1~ netdata -- "$@"
+
+#DEBHELPER#
diff --git a/contrib/debian/rules b/contrib/debian/rules
new file mode 100755
index 0000000..9bfd057
--- /dev/null
+++ b/contrib/debian/rules
@@ -0,0 +1,127 @@
+#!/usr/bin/make -f
+
+# Find the arch we are building for, as this determines
+# the location of plugins in /usr/lib
+TOP = $(CURDIR)/debian/netdata
+TEMPTOP = $(CURDIR)/debian/tmp
+
+BASE_CONFIG = system/netdata.conf
+
+SYSTEMD_VERSION = $(shell /bin/sh -c "systemd --version 2>&1 | head -n 1 | cut -f 2 -d ' '")
+
+ifeq ($(shell test $(SYSTEMD_VERSION) -ge 235 && echo "1"), 1)
+SYSTEMD_UNIT = system/netdata.service.v235
+else
+SYSTEMD_UNIT = system/netdata.service
+endif
+
+%:
+ # For jessie and beyond
+ #
+ dh $@ --with autoreconf,systemd
+
+ # For wheezy or other non-systemd distributions use the following. You
+ # should also see contrib/README.md which gives details of updates to
+ # make to debian/control.
+ #
+ #dh $@ --with autoreconf
+
+override_dh_installinit:
+ echo "SystemD Version: $(SYSTEMD_VERSION)"
+ echo "Using SystemD Unit: $(SYSTEMD_UNIT)"
+ cp -v $(SYSTEMD_UNIT) debian/netdata.service
+
+ dh_installinit
+
+override_dh_auto_configure:
+ packaging/bundle-mosquitto.sh .
+ packaging/bundle-lws.sh .
+ packaging/bundle-libbpf.sh .
+ autoreconf -ivf
+ dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib \
+ --libexecdir=/usr/libexec --with-user=netdata --with-math --with-zlib --with-webdir=/var/lib/netdata/www \
+ --with-bundled-lws=externaldeps/libwebsockets
+
+override_dh_install:
+ cp -v $(BASE_CONFIG) debian/netdata.conf
+
+ dh_install
+
+ # Set the CUPS plugin install rule
+ #
+ mkdir -p $(TOP)-plugin-cups/usr/libexec/netdata/plugins.d
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/cups.plugin \
+ $(TOP)-plugin-cups/usr/libexec/netdata/plugins.d/cups.plugin
+
+ # Add free IPMI plugin install rules
+ #
+ mkdir -p $(TOP)-plugin-freeipmi/usr/libexec/netdata/plugins.d
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/freeipmi.plugin \
+ $(TOP)-plugin-freeipmi/usr/libexec/netdata/plugins.d/freeipmi.plugin
+
+ # Set the rest of the software in the main package
+ #
+ cp -rp $(TEMPTOP)/usr $(TOP)
+ cp -rp $(TEMPTOP)/var $(TOP)
+ cp -rp $(TEMPTOP)/etc $(TOP)
+ mkdir -p "$(TOP)/var/log/netdata"
+ mkdir -p "$(TOP)/var/cache/netdata"
+ mkdir -p "$(TOP)/var/run/netdata"
+
+ # Move files that local user shouldn't be editing to /usr/share/netdata
+ #
+ packaging/bundle-dashboard.sh . ${TOP}/var/lib/netdata/www
+ mkdir -p "$(TOP)/usr/share/netdata/www"
+ for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type d -printf '%f '); do \
+ echo Relocating $$D; \
+ mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/www/$$D"; \
+ ln -s "/usr/share/netdata/www/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \
+ done
+
+ packaging/bundle-ebpf.sh . ${TOP}/usr/libexec/netdata/plugins.d
+
+ # Install go
+ #
+ debian/install_go.sh $$(cat ${CURDIR}/packaging/go.d.version) $(TOP)/usr/lib/netdata $(TOP)/usr/libexec/netdata
+
+override_dh_installdocs:
+ dh_installdocs
+
+ find . \
+ -name README.md \
+ -not -path './.travis/*' \
+ -not -path './debian/*' \
+ -not -path './contrib/*' \
+ -exec cp \
+ --parents \
+ --target $(TOP)/usr/share/doc/netdata/ \
+ {} \;
+
+override_dh_fixperms:
+ dh_fixperms
+
+ # apps.plugin should only be runnable by the netdata user. It will be
+ # given extra capabilities in the postinst script.
+ #
+ chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/apps.plugin
+ chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/perf.plugin
+ chmod 0754 $(TOP)/usr/libexec/netdata/plugins.d/slabinfo.plugin
+ chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/go.d.plugin
+
+ # CUPS plugin package
+ chmod 0750 $(TOP)-plugin-cups/usr/libexec/netdata/plugins.d/cups.plugin
+
+ # freeIPMI plugin package
+ chmod 4750 $(TOP)-plugin-freeipmi/usr/libexec/netdata/plugins.d/freeipmi.plugin
+
+override_dh_installlogrotate:
+ cp system/netdata.logrotate debian/netdata.logrotate
+ dh_installlogrotate
+
+override_dh_clean:
+ dh_clean
+
+ # Tidy up copied/generated files
+ #
+ -[ -r $(CURDIR)/debian/netdata.logrotate ] && rm $(CURDIR)/debian/netdata.logrotate
+ -[ -r $(CURDIR)/debian/netdata.conffiles ] && rm $(CURDIR)/debian/netdata.conffiles
diff --git a/contrib/debian/source/format b/contrib/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/contrib/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/contrib/rhel/build-netdata-rpm.sh b/contrib/rhel/build-netdata-rpm.sh
new file mode 100755
index 0000000..9ce3863
--- /dev/null
+++ b/contrib/rhel/build-netdata-rpm.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+# docker run -it --rm centos:6.9 /bin/sh
+# yum -y install rpm-build redhat-rpm-config yum-utils autoconf automake curl gcc git libmnl-devel libuuid-devel make pkgconfig zlib-devel
+
+cd "$(dirname "$0")/../../" || exit 1
+# shellcheck disable=SC1091
+source "packaging/installer/functions.sh" || exit 1
+
+set -e
+
+run autoreconf -ivf
+run ./configure --enable-maintainer-mode
+run make dist
+
+typeset version="$(grep PACKAGE_VERSION < config.h | cut -d '"' -f 2)"
+if [[ -z "${version}" ]]; then
+ run_failed "Cannot find netdata version."
+ exit 1
+fi
+
+if [[ "${version//-/}" != "$version" ]]; then
+ # Remove all -* and _* suffixes to be as close as netdata release
+ typeset versionfix="${version%%-*}"; versionfix="${versionfix%%_*}"
+ # Append the current datetime fox a 'unique' build
+ versionfix+="_$(date '+%m%d%H%M%S')"
+ # And issue hints & details on why this failed, and how to fix it
+ run_failed "Current version contains '-' which is fobidden by rpm. You must create a git annotated tag and rerun this script. Exemple:"
+ run_failed " git tag -a $versionfix -m 'Release by $(id -nu) on $(uname -n)' && $0"
+ exit 1
+fi
+
+
+typeset tgz="netdata-${version}.tar.gz"
+if [[ ! -f "${tgz}" ]]; then
+ run_failed "Cannot find the generated tar.gz file '${tgz}'"
+ exit 1
+fi
+
+typeset srpm="$(run rpmbuild -ts "${tgz}" | cut -d ' ' -f 2)"
+if [[ -z "${srpm}" ]] || ! [[ -f "${srpm}" ]]; then
+ run_failed "Cannot find the generated SRPM file '${srpm}'"
+ exit 1
+fi
+
+#if which yum-builddep 2>/dev/null
+#then
+# run yum-builddep "${srpm}"
+#elif which dnf 2>/dev/null
+#then
+# [ "${UID}" = 0 ] && run dnf builddep "${srpm}"
+#fi
+
+run rpmbuild --rebuild "${srpm}"
+
+run_ok "All done! Packages created in '$(rpm -E '%_rpmdir/%_arch')'"
diff --git a/contrib/sles11/README.md b/contrib/sles11/README.md
new file mode 100644
index 0000000..4cdf7e9
--- /dev/null
+++ b/contrib/sles11/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "Spec to build Netdata RPM for sles 11"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/contrib/sles11/README.md
+-->
+
+# Spec to build Netdata RPM for sles 11
+
+Based on [openSUSE rpm spec](https://build.opensuse.org/package/show/network/netdata) with some
+changes and additions for sles 11 backport, namely:
+
+- init.d script
+- run-time dependency on python ordereddict backport
+- patch for Netdata python.d plugin to work with older python
+- crude hack of notification script to work with bash 3 (email and syslog only, one destination,
+ see comments at the top)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2Fsles11%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/contrib/sles11/alarm-notify-basic.bash3.sh b/contrib/sles11/alarm-notify-basic.bash3.sh
new file mode 100755
index 0000000..df38292
--- /dev/null
+++ b/contrib/sles11/alarm-notify-basic.bash3.sh
@@ -0,0 +1,755 @@
+#!/usr/bin/env bash
+
+# basic version of netdata notifier to work with bash3
+# only mail and syslog destinations are supported, one recipient each
+# - email: DEFAULT_RECIPIENT_EMAIL, "root" by default
+# - syslog: "netdata" with local6 facility; disabled by default
+# - also: setting recipient to "disabled" or "silent" stops notifications for this alert
+
+# in /etc/netdata/health_alarm_notify.conf set something like
+# EMAIL_SENDER="netdata@gesdev-vm.m0.maxidom.ru"
+# SEND_EMAIL="YES"
+# DEFAULT_RECIPIENT_EMAIL="root"
+# SEND_SYSLOG="YES"
+# SYSLOG_FACILITY="local6"
+# DEFAULT_RECIPIENT_SYSLOG="netdata"
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# Script to send alarm notifications for netdata
+#
+# Supported notification methods:
+# - emails by @ktsaou
+# - syslog messages by @Ferroin
+# - all the rest is pruned :)
+
+# -----------------------------------------------------------------------------
+# testing notifications
+
+if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ]
+then
+ if [ "${2}" = "test" ]
+ then
+ recipient="${1}"
+ else
+ recipient="${2}"
+ fi
+
+ [ -z "${recipient}" ] && recipient="sysadmin"
+
+ id=1
+ last="CLEAR"
+ test_res=0
+ for x in "WARNING" "CRITICAL" "CLEAR"
+ do
+ echo >&2
+ echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}"
+
+ "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value"
+ if [ $? -ne 0 ]
+ then
+ echo >&2 "# FAILED"
+ test_res=1
+ else
+ echo >&2 "# OK"
+ fi
+
+ last="${x}"
+ id=$((id + 1))
+ done
+
+ exit $test_res
+fi
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=${NETDATA_ALARM_NOTIFY_DEBUG-0}
+debug() {
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
+}
+
+docurl() {
+ if [ -z "${curl}" ]
+ then
+ error "\${curl} is unset."
+ return 1
+ fi
+
+ if [ "${debug}" = "1" ]
+ then
+ echo >&2 "--- BEGIN curl command ---"
+ printf >&2 "%q " ${curl} "${@}"
+ echo >&2
+ echo >&2 "--- END curl command ---"
+
+ local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX)
+ local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}")
+ local ret=$?
+ echo >&2 "--- BEGIN received response ---"
+ cat >&2 "${out}"
+ echo >&2
+ echo >&2 "--- END received response ---"
+ echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}"
+ rm "${out}"
+ echo "${code}"
+ return ${ret}
+ fi
+
+ ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}"
+ return $?
+}
+
+# -----------------------------------------------------------------------------
+# this is to be overwritten by the config file
+
+custom_sender() {
+ info "not sending custom notification for ${status} of '${host}.${chart}.${name}'"
+}
+
+
+# -----------------------------------------------------------------------------
+
+# check for BASH v4+ (required for associative arrays)
+[ $(( ${BASH_VERSINFO[0]} )) -lt 3 ] && \
+ fatal "BASH version 3 or later is required (this is ${BASH_VERSION})."
+
+# -----------------------------------------------------------------------------
+# defaults to allow running this script by hand
+
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
+[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
+
+# -----------------------------------------------------------------------------
+# parse command line parameters
+
+roles="${1}" # the roles that should be notified for this event
+host="${2}" # the host generated this event
+unique_id="${3}" # the unique id of this event
+alarm_id="${4}" # the unique id of the alarm that generated this event
+event_id="${5}" # the incremental id of the event, for this alarm id
+when="${6}" # the timestamp this event occurred
+name="${7}" # the name of the alarm, as given in netdata health.d entries
+chart="${8}" # the name of the chart (type.id)
+family="${9}" # the family of the chart
+status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+value="${12}" # the current value of the alarm
+old_value="${13}" # the previous value of the alarm
+src="${14}" # the line number and file the alarm has been configured
+duration="${15}" # the duration in seconds of the previous alarm state
+non_clear_duration="${16}" # the total duration in seconds this is/was non-clear
+units="${17}" # the units of the value
+info="${18}" # a short description of the alarm
+value_string="${19}" # friendly value (with units)
+old_value_string="${20}" # friendly old value (with units)
+
+# -----------------------------------------------------------------------------
+# find a suitable hostname to use, if netdata did not supply a hostname
+
+this_host=$(hostname -s 2>/dev/null)
+[ -z "${host}" ] && host="${this_host}"
+
+# -----------------------------------------------------------------------------
+# screen statuses we don't need to send a notification
+
+# don't do anything if this is not WARNING, CRITICAL or CLEAR
+if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ]
+then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}'"
+ exit 1
+fi
+
+# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL
+if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ]
+then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})"
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# load configuration
+
+# By default fetch images from the global public registry.
+# This is required by default, since all notification methods need to download
+# images via the Internet, and private registries might not be reachable.
+# This can be overwritten at the configuration file.
+images_base_url="https://registry.my-netdata.io"
+
+# curl options to use
+curl_options=
+
+# needed commands
+# if empty they will be searched in the system path
+curl=
+sendmail=
+
+# enable / disable features
+SEND_EMAIL="YES"
+SEND_SYSLOG="YES"
+
+# syslog configs
+SYSLOG_FACILITY="local6"
+
+# email configs
+EMAIL_SENDER=
+DEFAULT_RECIPIENT_EMAIL="root"
+EMAIL_CHARSET=$(locale charmap 2>/dev/null)
+
+# load the user configuration
+# this will overwrite the variables above
+if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ]
+ then
+ source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf"
+else
+ error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults."
+fi
+
+# If we didn't autodetect the character set for e-mail and it wasn't
+# set by the user, we need to set it to a reasonable default. UTF-8
+# should be correct for almost all modern UNIX systems.
+if [ -z ${EMAIL_CHARSET} ]
+ then
+ EMAIL_CHARSET="UTF-8"
+fi
+
+# disable if role = silent or disabled
+if [[ "${role}" = "silent" || "${role}" = "disabled" ]]; then
+ SEND_EMAIL="NO"
+ SEND_SYSLOG="NO"
+fi
+
+if [[ "SEND_EMAIL" != "NO" ]]; then
+ to_email=$DEFAULT_RECIPIENT_EMAIL
+else
+ to_email=''
+fi
+
+if [[ "SEND_SYSLOG" != "NO" ]]; then
+ to_syslog=$DEFAULT_RECIPIENT_SYSLOG
+else
+ to_syslog=''
+fi
+
+# if we need sendmail, check for the sendmail command
+if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ]
+ then
+ sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)"
+ if [ -z "${sendmail}" ]
+ then
+ debug "Cannot find sendmail command in the system path. Disabling email notifications."
+ SEND_EMAIL="NO"
+ fi
+fi
+
+# if we need logger, check for the logger command
+if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ]
+ then
+ logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)"
+ if [ -z "${logger}" ]
+ then
+ debug "Cannot find logger command in the system path. Disabling syslog notifications."
+ SEND_SYSLOG="NO"
+ fi
+fi
+
+# check that we have at least a method enabled
+if [ "${SEND_EMAIL}" != "YES" \
+ -a "${SEND_SYSLOG}" != "YES" \
+ ]
+ then
+ fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
+fi
+
+# -----------------------------------------------------------------------------
+# get the date the alarm happened
+
+date=$(date --date=@${when} "${DATE_FORMAT}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date "${DATE_FORMAT}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null)
+[ -z "${date}" ] && date=$(date 2>/dev/null)
+
+# -----------------------------------------------------------------------------
+# function to URL encode a string
+
+urlencode() {
+ local string="${1}" strlen encoded pos c o
+
+ strlen=${#string}
+ for (( pos=0 ; pos<strlen ; pos++ ))
+ do
+ c=${string:${pos}:1}
+ case "${c}" in
+ [-_.~a-zA-Z0-9])
+ o="${c}"
+ ;;
+
+ *)
+ printf -v o '%%%02x' "'${c}"
+ ;;
+ esac
+ encoded+="${o}"
+ done
+
+ REPLY="${encoded}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# function to convert a duration in seconds, to a human readable duration
+# using DAYS, MINUTES, SECONDS
+
+duration4human() {
+ local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret
+ d=$(( s / 86400 ))
+ s=$(( s - (d * 86400) ))
+ h=$(( s / 3600 ))
+ s=$(( s - (h * 3600) ))
+ m=$(( s / 60 ))
+ s=$(( s - (m * 60) ))
+
+ if [ ${d} -gt 0 ]
+ then
+ [ ${m} -ge 30 ] && h=$(( h + 1 ))
+ [ ${d} -gt 1 ] && ds="days"
+ [ ${h} -gt 1 ] && hs="hours"
+ if [ ${h} -gt 0 ]
+ then
+ ret="${d} ${ds} and ${h} ${hs}"
+ else
+ ret="${d} ${ds}"
+ fi
+ elif [ ${h} -gt 0 ]
+ then
+ [ ${s} -ge 30 ] && m=$(( m + 1 ))
+ [ ${h} -gt 1 ] && hs="hours"
+ [ ${m} -gt 1 ] && ms="minutes"
+ if [ ${m} -gt 0 ]
+ then
+ ret="${h} ${hs} and ${m} ${ms}"
+ else
+ ret="${h} ${hs}"
+ fi
+ elif [ ${m} -gt 0 ]
+ then
+ [ ${m} -gt 1 ] && ms="minutes"
+ [ ${s} -gt 1 ] && ss="seconds"
+ if [ ${s} -gt 0 ]
+ then
+ ret="${m} ${ms} and ${s} ${ss}"
+ else
+ ret="${m} ${ms}"
+ fi
+ else
+ [ ${s} -gt 1 ] && ss="seconds"
+ ret="${s} ${ss}"
+ fi
+
+ REPLY="${ret}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# email sender
+
+send_email() {
+ local ret= opts=
+ if [[ "${SEND_EMAIL}" == "YES" ]]
+ then
+
+ if [[ ! -z "${EMAIL_SENDER}" ]]
+ then
+ if [[ "${EMAIL_SENDER}" =~ \".*\"\ \<.*\> ]]
+ then
+ # the name includes single quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]]
+ then
+ # the name includes double quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]]
+ then
+ # the name does not have any quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'"
+ else
+ # no name at all
+ opts=" -f ${EMAIL_SENDER}"
+ fi
+ fi
+
+ if [[ "${debug}" = "1" ]]
+ then
+ echo >&2 "--- BEGIN sendmail command ---"
+ printf >&2 "%q " "${sendmail}" -t ${opts}
+ echo >&2
+ echo >&2 "--- END sendmail command ---"
+ fi
+
+ "${sendmail}" -t ${opts}
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'"
+ return 0
+ else
+ error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}."
+ return 1
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# syslog sender
+
+send_syslog() {
+ local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}"
+ local priority='' message='' host='' port='' prefix=''
+ local temp1='' temp2=''
+
+ [[ "${SEND_SYSLOG}" == "YES" ]] || return 1
+
+ if [[ "${status}" == "CRITICAL" ]] ; then
+ level='crit'
+ elif [[ "${status}" == "WARNING" ]] ; then
+ level='warning'
+ fi
+
+ for target in ${targets} ; do
+ priority="${facility}.${level}"
+ message=''
+ host=''
+ port=''
+ prefix=''
+ temp1=''
+ temp2=''
+
+ prefix=$(echo ${target} | cut -d '/' -f 2)
+ temp1=$(echo ${target} | cut -d '/' -f 1)
+
+ if [[ ${prefix} != ${temp1} ]] ; then
+ if (echo ${temp1} | grep -q '@' ) ; then
+ temp2=$(echo ${temp1} | cut -d '@' -f 1)
+ host=$(echo ${temp1} | cut -d '@' -f 2)
+
+ if [ ${temp2} != ${host} ] ; then
+ priority=${temp2}
+ fi
+
+ port=$(echo ${host} | rev | cut -d ':' -f 1 | rev)
+
+ if ( echo ${host} | grep -E -q '\[.*\]' ) ; then
+ if ( echo ${port} | grep -q ']' ) ; then
+ port=''
+ else
+ host=$(echo ${host} | rev | cut -d ':' -f 2- | rev)
+ fi
+ else
+ if [ ${port} = ${host} ] ; then
+ port=''
+ else
+ host=$(echo ${host} | cut -d ':' -f 1)
+ fi
+ fi
+ else
+ priority=${temp1}
+ fi
+ fi
+
+ # message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ message="${prefix} ${status}: ${chart} ${value_string}"
+
+ if [ ${host} ] ; then
+ logger_options="${logger_options} -n ${host}"
+ if [ ${port} ] ; then
+ logger_options="${logger_options} -P ${port}"
+ fi
+ fi
+
+ ${logger} -p ${priority} ${logger_options} "${message}"
+ done
+
+ return $?
+}
+
+
+# -----------------------------------------------------------------------------
+# prepare the content of the notification
+
+# the url to send the user on click
+urlencode "${host}" >/dev/null; url_host="${REPLY}"
+urlencode "${chart}" >/dev/null; url_chart="${REPLY}"
+urlencode "${family}" >/dev/null; url_family="${REPLY}"
+urlencode "${name}" >/dev/null; url_name="${REPLY}"
+goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}"
+
+# the severity of the alarm
+severity="${status}"
+
+# the time the alarm was raised
+duration4human ${duration} >/dev/null; duration_txt="${REPLY}"
+duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}"
+raised_for="(was ${old_status} for ${duration_txt})"
+
+# the key status message
+status_message="status unknown"
+
+# the color of the alarm
+color="grey"
+
+# the alarm value
+alarm="${name//_/ } = ${value_string}"
+
+# the image of the alarm
+image="${images_base_url}/images/seo-performance-128.png"
+
+# prepare the title based on status
+case "${status}" in
+ CRITICAL)
+ image="${images_base_url}/images/alert-128-red.png"
+ status_message="is critical"
+ color="#ca414b"
+ ;;
+
+ WARNING)
+ image="${images_base_url}/images/alert-128-orange.png"
+ status_message="needs attention"
+ color="#ffc107"
+ ;;
+
+ CLEAR)
+ image="${images_base_url}/images/check-mark-2-128-green.png"
+ status_message="recovered"
+ color="#77ca6d"
+ ;;
+esac
+
+if [ "${status}" = "CLEAR" ]
+then
+ severity="Recovered from ${old_status}"
+ if [ ${non_clear_duration} -gt ${duration} ]
+ then
+ raised_for="(alarm was raised for ${non_clear_duration_txt})"
+ fi
+
+ # don't show the value when the status is CLEAR
+ # for certain alarms, this value might not have any meaning
+ alarm="${name//_/ } ${raised_for}"
+
+elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ]
+then
+ severity="Escalated to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]
+ then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ]
+then
+ severity="Demoted to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]
+ then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+else
+ raised_for=
+fi
+
+# prepare HTML versions of elements
+info_html=
+[ ! -z "${info}" ] && info_html=" <small><br/>${info}</small>"
+
+raised_for_html=
+[ ! -z "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>"
+
+
+# -----------------------------------------------------------------------------
+# send the syslog message
+
+send_syslog ${to_syslog}
+
+SENT_SYSLOG=$?
+
+# -----------------------------------------------------------------------------
+# send the email
+
+send_email <<EOF
+To: ${to_email}
+Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+Content-Type: text/plain; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+${host} ${status_message}
+
+${alarm} ${info}
+${raised_for}
+
+Chart : ${chart}
+Family : ${family}
+Severity: ${severity}
+URL : ${goto_url}
+Source : ${src}
+Date : ${date}
+Value : ${value_string}
+Notification generated on ${this_host}
+
+--multipart-boundary
+Content-Type: text/html; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
+<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
+<table>
+ <tbody>
+ <tr>
+ <td style="vertical-align: top;" valign="top"></td>
+ <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top">
+ <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;">
+ <tbody>
+ <tr>
+ <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;">
+ <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
+ </td>
+ </tr>
+ <tr>
+ <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top">
+ <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
+ </td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;" valign="top">
+ <div style="margin: 0; padding: 20px; max-width: 700px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px">
+ <tbody>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
+ <span>${chart}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span>
+ </td>
+ </tr>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${value_string}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Value</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span><b>${alarm}</b>${info_html}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span>
+ </td>
+ </tr>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${family}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${severity}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
+ <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
+ <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a>
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs)
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
+ <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>.
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+</table>
+</body>
+</html>
+--multipart-boundary--
+EOF
+
+SENT_EMAIL=$?
+
+# -----------------------------------------------------------------------------
+# let netdata know
+
+if [ ${SENT_EMAIL} -eq 0 \
+ -o ${SENT_SYSLOG} -eq 0 \
+ ]
+ then
+ # we did send something
+ exit 0
+fi
+
+# we did not send anything
+exit 1
diff --git a/contrib/sles11/netdata-alarms-bash3.patch b/contrib/sles11/netdata-alarms-bash3.patch
new file mode 100644
index 0000000..142659b
--- /dev/null
+++ b/contrib/sles11/netdata-alarms-bash3.patch
@@ -0,0 +1,10 @@
+--- system/netdata.conf.orig 2018-05-10 19:44:49.000000000 +0300
++++ system/netdata.conf 2018-05-10 19:45:14.000000000 +0300
+@@ -22,3 +22,7 @@
+ [web]
+ web files owner = root
+ web files group = netdata
++
++[health]
++ # script for sles 11, mail notifications only
++ script to execute on alarm = /usr/lib64/netdata/plugins.d/alarm-notify.bash3.sh
diff --git a/contrib/sles11/netdata-automake-no-dist-xz.patch b/contrib/sles11/netdata-automake-no-dist-xz.patch
new file mode 100644
index 0000000..d373efa
--- /dev/null
+++ b/contrib/sles11/netdata-automake-no-dist-xz.patch
@@ -0,0 +1,13 @@
+diff -u netdata-1.6.0.orig/Makefile.am netdata-1.6.0/Makefile.am
+--- netdata-1.6.0.orig/Makefile.am 2017-03-20 19:32:47.000000000 +0100
++++ netdata-1.6.0/Makefile.am 2017-06-25 23:46:14.403426661 +0200
+@@ -1,7 +1,7 @@
+ #
+ # Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+ #
+-AUTOMAKE_OPTIONS=foreign dist-bzip2 dist-xz 1.10
++AUTOMAKE_OPTIONS=foreign dist-bzip2 1.10
+ ACLOCAL_AMFLAGS = -I m4
+
+ MAINTAINERCLEANFILES= \
+
diff --git a/contrib/sles11/netdata-python-plugin-sles11.patch b/contrib/sles11/netdata-python-plugin-sles11.patch
new file mode 100644
index 0000000..d2e8f6b
--- /dev/null
+++ b/contrib/sles11/netdata-python-plugin-sles11.patch
@@ -0,0 +1,28 @@
+diff -u netdata-1.10.0.orig/plugins.d/python.d.plugin netdata-1.10.0/plugins.d/python.d.plugin
+--- netdata-1.10.0.orig/plugins.d/python.d.plugin 2018-05-08 20:01:40.000000000 +0300
++++ netdata-1.10.0/plugins.d/python.d.plugin 2018-05-08 20:06:57.000000000 +0300
+@@ -15,10 +15,8 @@
+ from sys import version_info, argv
+ from time import sleep
+
+-try:
+- from time import monotonic as time
+-except ImportError:
+- from time import time
++# from time import monotonic as time
++from time import time
+
+ PY_VERSION = version_info[:2]
+ PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/'
+@@ -32,10 +30,7 @@
+ from bases.loggers import PythonDLogger
+ from bases.collection import setdefault_values, run_and_exit
+
+-try:
+- from collections import OrderedDict
+-except ImportError:
+- from third_party.ordereddict import OrderedDict
++from ordereddict import OrderedDict
+
+ BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
+ 'retries': 60,
diff --git a/contrib/sles11/netdata.init b/contrib/sles11/netdata.init
new file mode 100755
index 0000000..3081c42
--- /dev/null
+++ b/contrib/sles11/netdata.init
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: netdata
+# Required-Start: $all
+# Should-Start:
+# Required-Stop: $all
+# Should-Stop:
+# Default-Start: 2 3 5
+# Default-Stop:
+# Short-Description: Start and stop the netdata real-time monitoring server daemon
+# Description: Controls the main netdata monitoring server daemon "netdata".
+### END INIT INFO
+
+DAEMON="netdata"
+DAEMON_BIN="/usr/sbin/${DAEMON}"
+DAEMON_PID="/var/run/${DAEMON}.pid"
+DAEMON_ARGS=""
+
+. /etc/rc.status
+rc_reset
+
+if [ ! -x $DAEMON_BIN ]; then
+ echo -n >&2 "${DAEMON} binary is not installed. "
+ rc_status -s
+ exit 5
+fi
+
+case "$1" in
+ start)
+ echo -n "Starting $DAEMON"
+ /sbin/startproc $DAEMON_BIN $DAEMON_ARGS
+ rc_status -v
+ ;;
+
+ stop)
+ echo -n "Stopping $DAEMON"
+ /sbin/killproc $DAEMON_BIN
+ rc_status -v
+ ;;
+
+ reload)
+ # netdata: HUP reopen log files, USR1 save DB, USR2 reload health config
+ echo -n "Reloading $DAEMON config"
+ /sbin/killproc -USR2 $DAEMON_BIN
+ ;;
+
+ restart)
+ $0 stop
+ $0 start
+ ;;
+
+ status)
+ echo -n "Checking $DAEMON"
+ /sbin/checkproc $DAEMON_BIN
+ rc_status -v
+ ;;
+
+ *)
+ echo "Usage: $0 {start|stop|status|reload|restart}"
+ exit 1
+ ;;
+
+esac
+rc_exit
diff --git a/coverity-scan.sh b/coverity-scan.sh
new file mode 100755
index 0000000..cd2ff02
--- /dev/null
+++ b/coverity-scan.sh
@@ -0,0 +1,204 @@
+#!/usr/bin/env bash
+#
+# Coverity scan script
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Costa Tsaousis (costa@netdata.cloud)
+# Author : Pawel Krupa (paulfantom)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+# shellcheck disable=SC1091,SC2230,SC2086
+
+# To run manually, save configuration to .coverity-scan.conf like this:
+#
+# the repository to report to coverity - devs can set here their own fork
+# REPOSITORY="netdata/netdata"
+#
+# the email of the developer, as given to coverity
+# COVERITY_SCAN_SUBMIT_MAIL="you@example.com"
+#
+# the token given by coverity to the developer
+# COVERITY_SCAN_TOKEN="TOKEN taken from Coverity site"
+#
+# the absolute path of the cov-build - optional
+# COVERITY_BUILD_PATH="/opt/cov-analysis-linux64-2019.03/bin/cov-build"
+#
+# when set, the script will print on screen the curl command that submits the build to coverity
+# this includes the token, so the default is not to print it.
+# COVERITY_SUBMIT_DEBUG=1
+#
+# Override the standard coverity build version we know is supported
+# COVERITY_BUILD_VERSION="cov-analysis-linux64-2019.03"
+#
+# All these variables can also be exported before running this script.
+#
+# If the first parameter of this script is "install",
+# coverity build tools will be downloaded and installed in /opt/coverity
+
+set -e
+
+INSTALL_DIR="/opt"
+
+# the version of coverity to use
+COVERITY_BUILD_VERSION="${COVERITY_BUILD_VERSION:-cov-analysis-linux64-2019.03}"
+
+# TODO: For some reasons this does not fully load on Debian 10 (Haven't checked if it happens on other distros yet), it breaks
+source packaging/installer/functions.sh || echo "Failed to fully load the functions library"
+
+cpus=$(find_processors)
+[ -z "${cpus}" ] && cpus=1
+
+if [ -f ".coverity-scan.conf" ]; then
+ source ".coverity-scan.conf"
+fi
+
+repo="${REPOSITORY}"
+if [ -z "${repo}" ]; then
+ fatal "export variable REPOSITORY or set it in .coverity-scan.conf"
+fi
+repo="${repo//\//%2F}"
+
+email="${COVERITY_SCAN_SUBMIT_MAIL}"
+if [ -z "${email}" ]; then
+ fatal "export variable COVERITY_SCAN_SUBMIT_MAIL or set it in .coverity-scan.conf"
+fi
+
+token="${COVERITY_SCAN_TOKEN}"
+if [ -z "${token}" ]; then
+ fatal "export variable COVERITY_SCAN_TOKEN or set it in .coverity-scan.conf"
+fi
+
+if ! command -v curl > /dev/null 2>&1; then
+ fatal "CURL is required for coverity scan to work"
+fi
+
+# only print the output of a command
+# when debugging is enabled
+# used to hide the token when debugging is not enabled
+debugrun() {
+ if [ "${COVERITY_SUBMIT_DEBUG}" = "1" ]; then
+ run "${@}"
+ return $?
+ else
+ "${@}"
+ return $?
+ fi
+}
+
+scanit() {
+ progress "Scanning using coverity"
+ export PATH="${PATH}:${INSTALL_DIR}/${COVERITY_BUILD_VERSION}/bin/"
+ covbuild="${COVERITY_BUILD_PATH}"
+ [ -z "${covbuild}" ] && covbuild="$(which cov-build 2> /dev/null || command -v cov-build 2> /dev/null)"
+
+ if [ -z "${covbuild}" ]; then
+ fatal "Cannot find 'cov-build' binary in \$PATH. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ elif [ ! -x "${covbuild}" ]; then
+ fatal "The command '${covbuild}' is not executable. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ fi
+
+ version="$(grep "^#define PACKAGE_VERSION" config.h | cut -d '"' -f 2)"
+ progress "Working on netdata version: ${version}"
+
+ progress "Cleaning up old builds..."
+ run make clean || echo >&2 "Nothing to clean"
+
+ [ -d "cov-int" ] && rm -rf "cov-int"
+
+ [ -f netdata-coverity-analysis.tgz ] && run rm netdata-coverity-analysis.tgz
+
+ progress "Configuring netdata source..."
+
+ run autoreconf -ivf
+ run ./configure ${OTHER_OPTIONS}
+
+ progress "Analyzing netdata..."
+ run "${covbuild}" --dir cov-int make -j${cpus}
+
+ echo >&2 "Compressing analysis..."
+ run tar czvf netdata-coverity-analysis.tgz cov-int
+
+ echo >&2 "Sending analysis to coverity for netdata version ${version} ..."
+ COVERITY_SUBMIT_RESULT=$(debugrun curl --progress-bar \
+ --form token="${token}" \
+ --form email="${email}" \
+ --form file=@netdata-coverity-analysis.tgz \
+ --form version="${version}" \
+ --form description="netdata, monitor everything, in real-time." \
+ https://scan.coverity.com/builds?project="${repo}")
+
+ echo "${COVERITY_SUBMIT_RESULT}" | grep -q -e 'Build successfully submitted' || echo >&2 "scan results were not pushed to coverity. Message was: ${COVERITY_SUBMIT_RESULT}"
+
+ progress "Coverity scan completed"
+}
+
+installit() {
+ ORIGINAL_DIR="${PWD}"
+ TMP_DIR="$(mktemp -d /tmp/netdata-coverity-scan-XXXXX)"
+ progress "Downloading coverity in ${TMP_DIR}..."
+ cd "${TMP_DIR}"
+
+ debugrun curl --remote-name --remote-header-name --show-error --location --data "token=${token}&project=${repo}" https://scan.coverity.com/download/linux64
+
+ if [ -f "${COVERITY_BUILD_VERSION}.tar.gz" ]; then
+ progress "Installing coverity..."
+ cd "${INSTALL_DIR}"
+
+ run sudo tar -z -x -f "${TMP_DIR}/${COVERITY_BUILD_VERSION}.tar.gz" || exit 1
+ rm "${TMP_DIR}/${COVERITY_BUILD_VERSION}.tar.gz"
+ export PATH=${PATH}:${INSTALL_DIR}/${COVERITY_BUILD_VERSION}/bin/
+ else
+ fatal "Failed to download coverity tool tarball!"
+ fi
+
+ # Validate the installation
+ covbuild="$(which cov-build 2> /dev/null || command -v cov-build 2> /dev/null)"
+ if [ -z "$covbuild" ]; then
+ fatal "Failed to install coverity."
+ fi
+
+ # Clean temp directory
+ [ -n "${TMP_DIR}" ] && rm -rf "${TMP_DIR}"
+
+ progress "Coverity scan tools are installed."
+ cd "$ORIGINAL_DIR"
+ return 0
+}
+
+OTHER_OPTIONS="--disable-lto"
+OTHER_OPTIONS+=" --with-zlib"
+OTHER_OPTIONS+=" --with-math"
+OTHER_OPTIONS+=" --enable-https"
+OTHER_OPTIONS+=" --enable-jsonc"
+OTHER_OPTIONS+=" --enable-plugin-nfacct"
+OTHER_OPTIONS+=" --enable-plugin-freeipmi"
+OTHER_OPTIONS+=" --enable-plugin-cups"
+OTHER_OPTIONS+=" --enable-backend-prometheus-remote-write"
+# TODO: enable these plugins too
+#OTHER_OPTIONS+=" --enable-plugin-xenstat"
+#OTHER_OPTIONS+=" --enable-backend-kinesis"
+#OTHER_OPTIONS+=" --enable-backend-mongodb"
+
+FOUND_OPTS="NO"
+while [ -n "${1}" ]; do
+ if [ "${1}" = "--with-install" ]; then
+ progress "Running coverity install"
+ installit
+ shift 1
+ elif [ -n "${1}" ]; then
+ # Clear the default arguments, once you bump into the first argument
+ if [ "${FOUND_OPTS}" = "NO" ]; then
+ OTHER_OPTIONS="${1}"
+ FOUND_OPTS="YES"
+ else
+ OTHER_OPTIONS+=" ${1}"
+ fi
+
+ shift 1
+ else
+ break
+ fi
+done
+
+echo "Running coverity scan with extra options ${OTHER_OPTIONS}"
+scanit "${OTHER_OPTIONS}"
diff --git a/cppcheck.sh b/cppcheck.sh
new file mode 100755
index 0000000..ebbeeaf
--- /dev/null
+++ b/cppcheck.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# echo >>/tmp/cppcheck.log "cppcheck ${*}"
+
+# shellcheck disable=SC2230
+cppcheck=$(which cppcheck 2>/dev/null || command -v cppcheck 2>/dev/null)
+[ -z "${cppcheck}" ] && echo >&2 "install cppcheck." && exit 1
+
+processors=$(grep -c ^processor /proc/cpuinfo)
+[ $(( processors )) -lt 1 ] && processors=1
+
+base="$(dirname "${0}")"
+[ "${base}" = "." ] && base="${PWD}"
+
+cd "${base}" || exit 1
+
+[ ! -d "cppcheck-build" ] && mkdir "cppcheck-build"
+
+file="${1}"
+shift
+# shellcheck disable=SC2235
+([ "${file}" = "${base}" ] || [ -z "${file}" ]) && file="${base}"
+
+"${cppcheck}" \
+ -j ${processors} \
+ --cppcheck-build-dir="cppcheck-build" \
+ -I .. \
+ --force \
+ --enable=warning,performance,portability,information \
+ --library=gnu \
+ --library=posix \
+ --suppress="unusedFunction:*" \
+ --suppress="nullPointerRedundantCheck:*" \
+ --suppress="readdirCalled:*" \
+ "${file}" "${@}"
diff --git a/daemon/Makefile.am b/daemon/Makefile.am
new file mode 100644
index 0000000..d3102f6
--- /dev/null
+++ b/daemon/Makefile.am
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ anonymous-statistics.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_noinst_DATA = \
+ README.md \
+ config/README.md \
+ anonymous-statistics.sh.in \
+ get-kubernetes-labels.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ anonymous-statistics.sh \
+ system-info.sh \
+ get-kubernetes-labels.sh \
+ $(NULL)
diff --git a/daemon/README.md b/daemon/README.md
new file mode 100644
index 0000000..ec1f1c7
--- /dev/null
+++ b/daemon/README.md
@@ -0,0 +1,554 @@
+<!--
+title: "Netdata daemon"
+date: 2020-04-29
+custom_edit_url: https://github.com/netdata/netdata/edit/master/daemon/README.md
+-->
+
+# Netdata daemon
+
+## Starting netdata
+
+- You can start Netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
+
+- You can stop Netdata by killing it with `killall netdata`. You can stop and start Netdata at any point. When
+ exiting, the [database engine](/database/engine/README.md) saves metrics to `/var/cache/netdata/dbengine/` so that
+ it can continue when started again.
+
+Access to the web site, for all graphs, is by default on port `19999`, so go to:
+
+```sh
+http://127.0.0.1:19999/
+```
+
+You can get the running config file at any time, by accessing `http://127.0.0.1:19999/netdata.conf`.
+
+### Starting Netdata at boot
+
+In the `system` directory you can find scripts and configurations for the
+various distros.
+
+#### systemd
+
+The installer already installs `netdata.service` if it detects a systemd system.
+
+To install `netdata.service` by hand, run:
+
+```sh
+# stop Netdata
+killall netdata
+
+# copy netdata.service to systemd
+cp system/netdata.service /etc/systemd/system/
+
+# let systemd know there is a new service
+systemctl daemon-reload
+
+# enable Netdata at boot
+systemctl enable netdata
+
+# start Netdata
+systemctl start netdata
+```
+
+#### init.d
+
+In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution
+documentation. For Ubuntu, this can be done via running the following commands as root.
+
+```sh
+# copy the Netdata startup file to /etc/init.d
+cp system/netdata-lsb /etc/init.d/netdata
+
+# make sure it is executable
+chmod +x /etc/init.d/netdata
+
+# enable it
+update-rc.d netdata defaults
+```
+
+#### openrc (gentoo)
+
+In the `system` directory you can find `netdata-openrc`. Copy it to the proper
+place according to your distribution documentation.
+
+#### CentOS / Red Hat Enterprise Linux
+
+For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can
+be installed by running the following commands as root.
+
+```sh
+# copy the Netdata startup file to /etc/init.d
+cp system/netdata-init-d /etc/init.d/netdata
+
+# make sure it is executable
+chmod +x /etc/init.d/netdata
+
+# enable it
+chkconfig --add netdata
+```
+
+_There have been some recent work on the init script, see PR
+<https://github.com/netdata/netdata/pull/403>_
+
+#### other systems
+
+You can start Netdata by running it from `/etc/rc.local` or equivalent.
+
+## Command line options
+
+Normally you don't need to supply any command line arguments to netdata.
+
+If you do though, they override the configuration equivalent options.
+
+To get a list of all command line parameters supported, run:
+
+```sh
+netdata -h
+```
+
+The program will print the supported command line parameters.
+
+The command line options of the Netdata 1.10.0 version are the following:
+
+```sh
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ Copyright (C) 2016-2020, Netdata, Inc. <info@netdata.cloud>
+ Released under GNU General Public License v3 or later.
+ All rights reserved.
+
+ Home Page : https://netdata.cloud
+ Source Code: https://github.com/netdata/netdata
+ Docs : https://learn.netdata.cloud
+ Support : https://github.com/netdata/netdata/issues
+ License : https://github.com/netdata/netdata/blob/master/LICENSE.md
+
+ Twitter : https://twitter.com/linuxnetdata
+ Facebook : https://www.facebook.com/linuxnetdata/
+
+
+ SYNOPSIS: netdata [options]
+
+ Options:
+
+ -c filename Configuration file to load.
+ Default: /etc/netdata/netdata.conf
+
+ -D Do not fork. Run in the foreground.
+ Default: run in the background
+
+ -h Display this help message.
+
+ -P filename File to save a pid while running.
+ Default: do not save pid to a file
+
+ -i IP The IP address to listen to.
+ Default: all IP addresses IPv4 and IPv6
+
+ -p port API/Web port to use.
+ Default: 19999
+
+ -s path Prefix for /proc and /sys (for containers).
+ Default: no prefix
+
+ -t seconds The internal clock of netdata.
+ Default: 1
+
+ -u username Run as user.
+ Default: netdata
+
+ -v Print netdata version and exit.
+
+ -V Print netdata version and exit.
+
+ -W options See Advanced options below.
+
+
+ Advanced options:
+
+ -W stacksize=N Set the stacksize (in bytes).
+
+ -W debug_flags=N Set runtime tracing to debug.log.
+
+ -W unittest Run internal unittests and exit.
+
+ -W createdataset=N Create a DB engine dataset of N seconds and exit.
+
+ -W set section option value
+ set netdata.conf option from the command line.
+
+ -W simple-pattern pattern string
+ Check if string matches pattern and exit.
+
+ -W "claim -token=TOKEN -rooms=ROOM1,ROOM2 url=https://app.netdata.cloud"
+ Claim the agent to the workspace rooms pointed to by TOKEN and ROOM*.
+
+ Signals netdata handles:
+
+ - HUP Close and reopen log files.
+ - USR1 Save internal DB to disk.
+ - USR2 Reload health configuration.
+```
+
+You can send commands during runtime via [netdatacli](/cli/README.md).
+
+## Log files
+
+Netdata uses 3 log files:
+
+1. `error.log`
+2. `access.log`
+3. `debug.log`
+
+Any of them can be disabled by setting it to `/dev/null` or `none` in `netdata.conf`. By default `error.log` and
+`access.log` are enabled. `debug.log` is only enabled if debugging/tracing is also enabled (Netdata needs to be compiled
+with debugging enabled).
+
+Log files are stored in `/var/log/netdata/` by default.
+
+### error.log
+
+The `error.log` is the `stderr` of the `netdata` daemon and all external plugins
+run by `netdata`.
+
+So if any process, in the Netdata process tree, writes anything to its standard error,
+it will appear in `error.log`.
+
+For most Netdata programs (including standard external plugins shipped by netdata), the following lines may appear:
+
+| tag | description |
+|:-:|:----------|
+| `INFO` | Something important the user should know. |
+| `ERROR` | Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero). |
+| `FATAL` | Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited. |
+
+So, when auto-detection of data collection fail, `ERROR` lines are logged and the relevant modules are disabled, but the
+program continues to run.
+
+When a Netdata program cannot run at all, a `FATAL` line is logged.
+
+### access.log
+
+The `access.log` logs web requests. The format is:
+
+```txt
+DATE: ID: (sent/all = SENT_BYTES/ALL_BYTES bytes PERCENT_COMPRESSION%, prep/sent/total PREP_TIME/SENT_TIME/TOTAL_TIME ms): ACTION CODE URL
+```
+
+where:
+
+- `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata.
+- `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header.
+- `ALL_BYTES` is the number of bytes of the response, before compression.
+- `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression.
+- `PREP_TIME` is the time in milliseconds needed to prepared the response.
+- `SENT_TIME` is the time in milliseconds needed to sent the response to the client.
+- `TOTAL_TIME` is the total time the request was inside Netdata (from the first byte of the request to the last byte
+ of the response).
+- `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call).
+
+### debug.log
+
+See [debugging](#debugging).
+
+## OOM Score
+
+Netdata runs with `OOMScore = 1000`. This means Netdata will be the first to be killed when your server runs out of
+memory.
+
+You can set Netdata OOMScore in `netdata.conf`, like this:
+
+```conf
+[global]
+ OOM score = 1000
+```
+
+Netdata logs its OOM score when it starts:
+
+```sh
+# grep OOM /var/log/netdata/error.log
+2017-10-15 03:47:31: netdata INFO : Adjusted my Out-Of-Memory (OOM) score from 0 to 1000.
+```
+
+### OOM score and systemd
+
+Netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata` user (systemd case).
+
+To allow Netdata control its OOM Score in such cases, you will need to edit `netdata.service` and set:
+
+```sh
+[Service]
+# The minimum Netdata Out-Of-Memory (OOM) score.
+# Netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
+# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
+OOMScoreAdjust=-1000
+```
+
+Run `systemctl daemon-reload` to reload these changes.
+
+The above, sets and OOMScore for Netdata to `-1000`, so that Netdata can increase it via `netdata.conf`.
+
+If you want to control it entirely via systemd, you can set in `netdata.conf`:
+
+```conf
+[global]
+ OOM score = keep
+```
+
+Using the above, whatever OOM Score you have set at `netdata.service` will be maintained by netdata.
+
+## Netdata process scheduling policy
+
+By default Netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is
+idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts.
+
+You can set Netdata scheduling policy in `netdata.conf`, like this:
+
+```conf
+[global]
+ process scheduling policy = idle
+```
+
+You can use the following:
+
+| policy | description |
+| :-----------------------: | :---------- |
+| `idle` | use CPU only when there is spare - this is lower than nice 19 - it is the default for Netdata and it is so low that Netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts. |
+| `other`<br/>or<br/>`nice` | this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata. |
+| `batch` | This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions. |
+| `fifo` | `fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing. |
+| `rr` | a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum. |
+| `keep`<br/>or<br/>`none` | do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd). |
+
+For more information see `man sched`.
+
+### scheduling priority for `rr` and `fifo`
+
+Once the policy is set to one of `rr` or `fifo`, the following will appear:
+
+```conf
+[global]
+ process scheduling priority = 0
+```
+
+These priorities are usually from 0 to 99. Higher numbers make the process more
+important.
+
+### nice level for policies `other` or `batch`
+
+When the policy is set to `other`, `nice`, or `batch`, the following will appear:
+
+```conf
+[global]
+ process nice level = 19
+```
+
+## scheduling settings and systemd
+
+Netdata will not be able to set its scheduling policy and priority to more important values when it is started as the
+`netdata` user (systemd case).
+
+You can set these settings at `/etc/systemd/system/netdata.service`:
+
+```sh
+[Service]
+# By default Netdata switches to scheduling policy idle, which makes it use CPU, only
+# when there is spare available.
+# Valid policies: other (the system default) | batch | idle | fifo | rr
+#CPUSchedulingPolicy=other
+
+# This sets the maximum scheduling priority Netdata can set (for policies: rr and fifo).
+# Netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
+# Priority gets values 1 (lowest) to 99 (highest).
+#CPUSchedulingPriority=1
+
+# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata.
+# Netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
+#Nice=0
+```
+
+Run `systemctl daemon-reload` to reload these changes.
+
+Now, tell Netdata to keep these settings, as set by systemd, by editing
+`netdata.conf` and setting:
+
+```conf
+[global]
+ process scheduling policy = keep
+```
+
+Using the above, whatever scheduling settings you have set at `netdata.service`
+will be maintained by netdata.
+
+### Example 1: Netdata with nice -1 on non-systemd systems
+
+On a system that is not based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for
+all programs), edit `netdata.conf` and set:
+
+```conf
+[global]
+ process scheduling policy = other
+ process nice level = -1
+```
+
+then execute this to restart netdata:
+
+```sh
+sudo service netdata restart
+```
+
+#### Example 2: Netdata with nice -1 on systemd systems
+
+On a system that is based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all
+programs), edit `netdata.conf` and set:
+
+```conf
+[global]
+ process scheduling policy = keep
+```
+
+edit /etc/systemd/system/netdata.service and set:
+
+```sh
+[Service]
+CPUSchedulingPolicy=other
+Nice=-1
+```
+
+then execute:
+
+```sh
+sudo systemctl daemon-reload
+sudo systemctl restart netdata
+```
+
+## Virtual memory
+
+You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` (or even netdata's
+applications virtual memory chart) is unrealistically high.
+
+For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be
+reported for Netdata plugins too.
+
+Check this for example: A Netdata installation with default settings on Ubuntu
+16.04LTS. The top chart is **real memory used**, while the bottom one is
+**virtual memory**:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/19013772/5eb7173e-87e3-11e6-8f2b-a2ccfeb06faf.png)
+
+### Why does this happen?
+
+The system memory allocator allocates virtual memory arenas, per thread running. On Linux systems this defaults to 16MB
+per thread on 64 bit machines. So, if you get the difference between real and virtual memory and divide it by 16MB you
+will roughly get the number of threads running.
+
+The system does this for speed. Having a separate memory arena for each thread, allows the threads to run in parallel in
+multi-core systems, without any locks between them.
+
+This behaviour is system specific. For example, the chart above when running
+Netdata on Alpine Linux (that uses **musl** instead of **glibc**) is this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png)
+
+### Can we do anything to lower it?
+
+Since Netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while
+repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to
+minimize the memory arenas for each thread. We have also added [2 configuration
+options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418) to allow
+you tweak these settings: `glibc malloc arena max for plugins` and `glibc malloc arena max for netdata`.
+
+However, even if we instructed the memory allocator to use just one arena, it
+seems it allocates an arena per thread.
+
+Netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the
+same to the glibc memory allocator in this aspect.
+
+### Is this a problem?
+
+No, it is not.
+
+Linux reserves real memory (physical RAM) in pages (on x86 machines pages are 4KB each). So even if the system memory
+allocator is allocating huge amounts of virtual memory, only the 4KB pages that are actually used are reserving physical
+RAM. The **real memory** chart on Netdata application section, shows the amount of physical memory these pages occupy(it
+accounts the whole pages, even if parts of them are actually used).
+
+## Debugging
+
+When you compile Netdata with debugging:
+
+1. compiler optimizations for your CPU are disabled (Netdata will run somewhat slower)
+
+2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is
+ printed by default. Netdata allows you to select which sections of Netdata you want to trace. Tracing is activated
+ via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find
+ the options supported at [log.h](https://raw.githubusercontent.com/netdata/netdata/master/libnetdata/log/log.h).
+ They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags.
+
+Once Netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log`
+will contain the messages.
+
+> Do not forget to disable tracing (`debug flags = 0`) when you are done tracing. The file `debug.log` can grow too
+> fast.
+
+### compiling Netdata with debugging
+
+To compile Netdata with debugging, use this:
+
+```sh
+# step into the Netdata source directory
+cd /usr/src/netdata.git
+
+# run the installer with debugging enabled
+CFLAGS="-O1 -ggdb -DNETDATA_INTERNAL_CHECKS=1" ./netdata-installer.sh
+```
+
+The above will compile and install Netdata with debugging info embedded. You can now use `debug flags` to set the
+section(s) you need to trace.
+
+### debugging crashes
+
+We have made the most to make Netdata crash free. If however, Netdata crashes on your system, it would be very helpful
+to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is
+quite large to find such an issue by just observing it).
+
+To provide stack traces, **you need to have Netdata compiled with debugging**. There is no need to enable any tracing
+(`debug flags`).
+
+Then you need to be in one of the following 2 cases:
+
+1. Netdata crashes and you have a core dump
+
+2. you can reproduce the crash
+
+If you are not on these cases, you need to find a way to be (i.e. if your system does not produce core dumps, check your
+distro documentation to enable them).
+
+### Netdata crashes and you have a core dump
+
+> you need to have Netdata compiled with debugging info for this to work (check above)
+
+Run the following command and post the output on a github issue.
+
+```sh
+gdb $(which netdata) /path/to/core/dump
+```
+
+### you can reproduce a Netdata crash on your system
+
+> you need to have Netdata compiled with debugging info for this to work (check above)
+
+Install the package `valgrind` and run:
+
+```sh
+valgrind $(which netdata) -D
+```
+
+Netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the
+stack trace. Open a new github issue and post the output.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in
new file mode 100755
index 0000000..f0d9c10
--- /dev/null
+++ b/daemon/anonymous-statistics.sh.in
@@ -0,0 +1,104 @@
+#!/usr/bin/env sh
+
+# Valid actions:
+
+# - FATAL - netdata exited due to a fatal condition
+# ACTION_RESULT -- program name and thread tag
+# ACTION_DATA -- fmt, args passed to fatal
+# - START - netdata started
+# ACTION_DATA -- nan
+# - EXIT - installation action
+# ACTION_DATA -- ret value of
+
+ACTION="${1}"
+ACTION_RESULT="${2}"
+ACTION_DATA="${3}"
+ACTION_DATA=$(echo "${ACTION_DATA}" | tr '"' "'")
+
+# -------------------------------------------------------------------------------------------------
+# check opt-out
+
+if [ -f "@configdir_POST@/.opt-out-from-anonymous-statistics" ] || [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then
+ exit 0
+fi
+
+# Shorten version for easier reporting
+NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v')
+
+# -------------------------------------------------------------------------------------------------
+# send the anonymous statistics to GA
+# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
+# The maximum index for a cd parameter is 20 so we have effectively run out.
+if [ -n "$(command -v curl 2> /dev/null)" ]; then
+ curl -X POST -Ss --max-time 2 \
+ --data "v=1" \
+ --data "tid=UA-64295674-3" \
+ --data "aip=1" \
+ --data "ds=shell" \
+ --data-urlencode "cid=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data-urlencode "cs=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data "t=event" \
+ --data "ni=1" \
+ --data "an=anonymous-statistics" \
+ --data-urlencode "av=${NETDATA_VERSION}" \
+ --data-urlencode "ec=${ACTION}" \
+ --data-urlencode "ea=${ACTION_RESULT}" \
+ --data-urlencode "el=${ACTION_DATA}" \
+ --data-urlencode "cd1=${NETDATA_HOST_OS_NAME}" \
+ --data-urlencode "cd2=${NETDATA_HOST_OS_ID}" \
+ --data-urlencode "cd3=${NETDATA_HOST_OS_ID_LIKE}" \
+ --data-urlencode "cd4=${NETDATA_HOST_OS_VERSION}" \
+ --data-urlencode "cd5=${NETDATA_HOST_OS_VERSION_ID}" \
+ --data-urlencode "cd6=${NETDATA_HOST_OS_DETECTION}" \
+ --data-urlencode "cd7=${NETDATA_SYSTEM_KERNEL_NAME}" \
+ --data-urlencode "cd8=${NETDATA_SYSTEM_KERNEL_VERSION}" \
+ --data-urlencode "cd9=${NETDATA_SYSTEM_ARCHITECTURE}" \
+ --data-urlencode "cd10=${NETDATA_SYSTEM_VIRTUALIZATION}" \
+ --data-urlencode "cd11=${NETDATA_SYSTEM_VIRT_DETECTION}" \
+ --data-urlencode "cd12=${NETDATA_SYSTEM_CONTAINER}" \
+ --data-urlencode "cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}" \
+ --data-urlencode "cd14=${NETDATA_CONTAINER_OS_NAME}" \
+ --data-urlencode "cd15=${NETDATA_CONTAINER_OS_ID}" \
+ --data-urlencode "cd16=${NETDATA_CONTAINER_OS_ID_LIKE}" \
+ --data-urlencode "cd17=${NETDATA_CONTAINER_OS_VERSION}" \
+ --data-urlencode "cd18=${NETDATA_CONTAINER_OS_VERSION_ID}" \
+ --data-urlencode "cd19=${NETDATA_CONTAINER_OS_DETECTION}" \
+ --data-urlencode "cd20=${NETDATA_HOST_IS_K8S_NODE}" \
+ "https://www.google-analytics.com/collect" > /dev/null 2>&1
+else
+ wget -q -O - --timeout=1 "https://www.google-analytics.com/collect?\
+&v=1\
+&tid=UA-64295674-3\
+&aip=1\
+&ds=shell\
+&cid=${NETDATA_REGISTRY_UNIQUE_ID}\
+&cs=${NETDATA_REGISTRY_UNIQUE_ID}\
+&t=event\
+&ni=1\
+&an=anonymous-statistics\
+&av=${NETDATA_VERSION}\
+&ec=${ACTION}\
+&ea=${ACTION_RESULT}\
+&el=${ACTION_DATA}\
+&cd1=${NETDATA_HOST_OS_NAME}\
+&cd2=${NETDATA_HOST_OS_ID}\
+&cd3=${NETDATA_HOST_OS_ID_LIKE}\
+&cd4=${NETDATA_HOST_OS_VERSION}\
+&cd5=${NETDATA_HOST_OS_VERSION_ID}\
+&cd6=${NETDATA_HOST_OS_DETECTION}\
+&cd7=${NETDATA_SYSTEM_KERNEL_NAME}\
+&cd8=${NETDATA_SYSTEM_KERNEL_VERSION}\
+&cd9=${NETDATA_SYSTEM_ARCHITECTURE}\
+&cd10=${NETDATA_SYSTEM_VIRTUALIZATION}\
+&cd11=${NETDATA_SYSTEM_VIRT_DETECTION}\
+&cd12=${NETDATA_SYSTEM_CONTAINER}\
+&cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}\
+&cd14=${NETDATA_CONTAINER_OS_NAME} \
+&cd15=${NETDATA_CONTAINER_OS_ID} \
+&cd16=${NETDATA_CONTAINER_OS_ID_LIKE} \
+&cd17=${NETDATA_CONTAINER_OS_VERSION} \
+&cd18=${NETDATA_CONTAINER_OS_VERSION_ID} \
+&cd19=${NETDATA_CONTAINER_OS_DETECTION} \
+&cd20=${NETDATA_HOST_IS_K8S_NODE} \
+" > /dev/null 2>&1
+fi
diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c
new file mode 100644
index 0000000..de02a72
--- /dev/null
+++ b/daemon/buildinfo.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <stdio.h>
+#include "./config.h"
+
+// Optional features
+
+#ifdef ENABLE_ACLK
+#define FEAT_CLOUD "YES"
+#else
+#ifdef DISABLE_CLOUD
+#define FEAT_CLOUD "NO (by user request e.g. '--disable-cloud')"
+#else
+#define FEAT_CLOUD "NO"
+#endif
+#endif
+
+#ifdef ENABLE_DBENGINE
+#define FEAT_DBENGINE "YES"
+#else
+#define FEAT_DBENGINE "NO"
+#endif
+
+#if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 1
+#define FEAT_TLS_HOST_VERIFY "YES"
+#else
+#define FEAT_TLS_HOST_VERIFY "NO"
+#endif
+
+#ifdef ENABLE_HTTPS
+#define FEAT_NATIVE_HTTPS "YES"
+#else
+#define FEAT_NATIVE_HTTPS "NO"
+#endif
+
+// Optional libraries
+
+#ifdef ENABLE_JSONC
+#define FEAT_JSONC "YES"
+#else
+#define FEAT_JSONC "NO"
+#endif
+
+#ifdef ENABLE_JEMALLOC
+#define FEAT_JEMALLOC "YES"
+#else
+#define FEAT_JEMALLOC "NO"
+#endif
+
+#ifdef ENABLE_TCMALLOC
+#define FEAT_TCMALLOC "YES"
+#else
+#define FEAT_TCMALLOC "NO"
+#endif
+
+#ifdef HAVE_CAPABILITY
+#define FEAT_LIBCAP "YES"
+#else
+#define FEAT_LIBCAP "NO"
+#endif
+
+#ifdef ACLK_NO_LIBMOSQ
+#define FEAT_MOSQUITTO "NO"
+#else
+#define FEAT_MOSQUITTO "YES"
+#endif
+
+#ifdef ACLK_NO_LWS
+#define FEAT_LWS "NO"
+#else
+#ifdef ENABLE_ACLK
+#include <libwebsockets.h>
+#endif
+#ifdef BUNDLED_LWS
+#define FEAT_LWS "YES static"
+#else
+#define FEAT_LWS "YES shared-lib"
+#endif
+#endif
+
+#ifdef NETDATA_WITH_ZLIB
+#define FEAT_ZLIB "YES"
+#else
+#define FEAT_ZLIB "NO"
+#endif
+
+#ifdef STORAGE_WITH_MATH
+#define FEAT_LIBM "YES"
+#else
+#define FEAT_LIBM "NO"
+#endif
+
+#ifdef HAVE_CRYPTO
+#define FEAT_CRYPTO "YES"
+#else
+#define FEAT_CRYPTO "NO"
+#endif
+
+// Optional plugins
+
+#ifdef ENABLE_APPS_PLUGIN
+#define FEAT_APPS_PLUGIN "YES"
+#else
+#define FEAT_APPS_PLUGIN "NO"
+#endif
+
+#ifdef HAVE_FREEIPMI
+#define FEAT_IPMI "YES"
+#else
+#define FEAT_IPMI "NO"
+#endif
+
+#ifdef HAVE_CUPS
+#define FEAT_CUPS "YES"
+#else
+#define FEAT_CUPS "NO"
+#endif
+
+#ifdef HAVE_LIBMNL
+#define FEAT_NFACCT "YES"
+#else
+#define FEAT_NFACCT "NO"
+#endif
+
+#ifdef HAVE_LIBXENSTAT
+#define FEAT_XEN "YES"
+#else
+#define FEAT_XEN "NO"
+#endif
+
+#ifdef HAVE_XENSTAT_VBD_ERROR
+#define FEAT_XEN_VBD_ERROR "YES"
+#else
+#define FEAT_XEN_VBD_ERROR "NO"
+#endif
+
+#ifdef HAVE_LIBBPF
+#define FEAT_EBPF "YES"
+#else
+#define FEAT_EBPF "NO"
+#endif
+
+#ifdef HAVE_SETNS
+#define FEAT_CGROUP_NET "YES"
+#else
+#define FEAT_CGROUP_NET "NO"
+#endif
+
+#ifdef ENABLE_PERF_PLUGIN
+#define FEAT_PERF "YES"
+#else
+#define FEAT_PERF "NO"
+#endif
+
+#ifdef ENABLE_SLABINFO
+#define FEAT_SLABINFO "YES"
+#else
+#define FEAT_SLABINFO "NO"
+#endif
+
+// Optional Exporters
+
+#ifdef HAVE_KINESIS
+#define FEAT_KINESIS "YES"
+#else
+#define FEAT_KINESIS "NO"
+#endif
+
+#ifdef ENABLE_EXPORTING_PUBSUB
+#define FEAT_PUBSUB "YES"
+#else
+#define FEAT_PUBSUB "NO"
+#endif
+
+#ifdef HAVE_MONGOC
+#define FEAT_MONGO "YES"
+#else
+#define FEAT_MONGO "NO"
+#endif
+
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
+#define FEAT_REMOTE_WRITE "YES"
+#else
+#define FEAT_REMOTE_WRITE "NO"
+#endif
+
+
+void print_build_info(void) {
+ printf("Configure options: %s\n", CONFIGURE_COMMAND);
+
+ printf("Features:\n");
+ printf(" dbengine: %s\n", FEAT_DBENGINE);
+ printf(" Native HTTPS: %s\n", FEAT_NATIVE_HTTPS);
+ printf(" Netdata Cloud: %s\n", FEAT_CLOUD);
+ printf(" TLS Host Verification: %s\n", FEAT_TLS_HOST_VERIFY);
+
+ printf("Libraries:\n");
+ printf(" jemalloc: %s\n", FEAT_JEMALLOC);
+ printf(" JSON-C: %s\n", FEAT_JSONC);
+ printf(" libcap: %s\n", FEAT_LIBCAP);
+ printf(" libcrypto: %s\n", FEAT_CRYPTO);
+ printf(" libm: %s\n", FEAT_LIBM);
+#if defined(ENABLE_ACLK)
+ printf(" LWS: %s v%d.%d.%d\n", FEAT_LWS, LWS_LIBRARY_VERSION_MAJOR, LWS_LIBRARY_VERSION_MINOR, LWS_LIBRARY_VERSION_PATCH);
+#else
+ printf(" LWS: %s\n", FEAT_LWS);
+#endif
+ printf(" mosquitto: %s\n", FEAT_MOSQUITTO);
+ printf(" tcalloc: %s\n", FEAT_TCMALLOC);
+ printf(" zlib: %s\n", FEAT_ZLIB);
+
+ printf("Plugins:\n");
+ printf(" apps: %s\n", FEAT_APPS_PLUGIN);
+ printf(" cgroup Network Tracking: %s\n", FEAT_CGROUP_NET);
+ printf(" CUPS: %s\n", FEAT_CUPS);
+ printf(" EBPF: %s\n", FEAT_EBPF);
+ printf(" IPMI: %s\n", FEAT_IPMI);
+ printf(" NFACCT: %s\n", FEAT_NFACCT);
+ printf(" perf: %s\n", FEAT_PERF);
+ printf(" slabinfo: %s\n", FEAT_SLABINFO);
+ printf(" Xen: %s\n", FEAT_XEN);
+ printf(" Xen VBD Error Tracking: %s\n", FEAT_XEN_VBD_ERROR);
+
+ printf("Exporters:\n");
+ printf(" AWS Kinesis: %s\n", FEAT_KINESIS);
+ printf(" GCP PubSub: %s\n", FEAT_PUBSUB);
+ printf(" MongoDB: %s\n", FEAT_MONGO);
+ printf(" Prometheus Remote Write: %s\n", FEAT_REMOTE_WRITE);
+};
diff --git a/daemon/buildinfo.h b/daemon/buildinfo.h
new file mode 100644
index 0000000..76912ea
--- /dev/null
+++ b/daemon/buildinfo.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BUILDINFO_H
+#define NETDATA_BUILDINFO_H 1
+
+extern void print_build_info(void);
+
+#endif // NETDATA_BUILDINFO_H
diff --git a/daemon/commands.c b/daemon/commands.c
new file mode 100644
index 0000000..eac392e
--- /dev/null
+++ b/daemon/commands.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+#include "../database/engine/rrdenginelib.h"
+
+static uv_thread_t thread;
+static uv_loop_t* loop;
+static uv_async_t async;
+static struct completion completion;
+static uv_pipe_t server_pipe;
+
+char cmd_prefix_by_status[] = {
+ CMD_PREFIX_INFO,
+ CMD_PREFIX_ERROR,
+ CMD_PREFIX_ERROR
+};
+
+static int command_server_initialized = 0;
+static int command_thread_error;
+static int command_thread_shutdown;
+static unsigned clients = 0;
+
+struct command_context {
+ /* embedded client pipe structure at address 0 */
+ uv_pipe_t client;
+
+ uv_work_t work;
+ uv_write_t write_req;
+ cmd_t idx;
+ char *args;
+ char *message;
+ cmd_status_t status;
+ char command_string[MAX_COMMAND_LENGTH];
+ unsigned command_string_size;
+};
+
+/* Forward declarations */
+static cmd_status_t cmd_help_execute(char *args, char **message);
+static cmd_status_t cmd_reload_health_execute(char *args, char **message);
+static cmd_status_t cmd_save_database_execute(char *args, char **message);
+static cmd_status_t cmd_reopen_logs_execute(char *args, char **message);
+static cmd_status_t cmd_exit_execute(char *args, char **message);
+static cmd_status_t cmd_fatal_execute(char *args, char **message);
+static cmd_status_t cmd_reload_claiming_state_execute(char *args, char **message);
+static cmd_status_t cmd_reload_labels_execute(char *args, char **message);
+static cmd_status_t cmd_read_config_execute(char *args, char **message);
+static cmd_status_t cmd_write_config_execute(char *args, char **message);
+static cmd_status_t cmd_ping_execute(char *args, char **message);
+
+static command_info_t command_info_array[] = {
+ {"help", cmd_help_execute, CMD_TYPE_HIGH_PRIORITY}, // show help menu
+ {"reload-health", cmd_reload_health_execute, CMD_TYPE_ORTHOGONAL}, // reload health configuration
+ {"save-database", cmd_save_database_execute, CMD_TYPE_ORTHOGONAL}, // save database for memory mode save
+ {"reopen-logs", cmd_reopen_logs_execute, CMD_TYPE_ORTHOGONAL}, // Close and reopen log files
+ {"shutdown-agent", cmd_exit_execute, CMD_TYPE_EXCLUSIVE}, // exit cleanly
+ {"fatal-agent", cmd_fatal_execute, CMD_TYPE_HIGH_PRIORITY}, // exit with fatal error
+ {"reload-claiming-state", cmd_reload_claiming_state_execute, CMD_TYPE_ORTHOGONAL}, // reload claiming state
+ {"reload-labels", cmd_reload_labels_execute, CMD_TYPE_ORTHOGONAL}, // reload the labels
+ {"read-config", cmd_read_config_execute, CMD_TYPE_CONCURRENT},
+ {"write-config", cmd_write_config_execute, CMD_TYPE_ORTHOGONAL},
+ {"ping", cmd_ping_execute, CMD_TYPE_ORTHOGONAL}
+};
+
+/* Mutexes for commands of type CMD_TYPE_ORTHOGONAL */
+static uv_mutex_t command_lock_array[CMD_TOTAL_COMMANDS];
+/* Commands of type CMD_TYPE_EXCLUSIVE are writers */
+static uv_rwlock_t exclusive_rwlock;
+/*
+ * Locking order:
+ * 1. exclusive_rwlock
+ * 2. command_lock_array[]
+ */
+
+/* Forward declarations */
+static void cmd_lock_exclusive(unsigned index);
+static void cmd_lock_orthogonal(unsigned index);
+static void cmd_lock_idempotent(unsigned index);
+static void cmd_lock_high_priority(unsigned index);
+
+static command_lock_t *cmd_lock_by_type[] = {
+ cmd_lock_exclusive,
+ cmd_lock_orthogonal,
+ cmd_lock_idempotent,
+ cmd_lock_high_priority
+};
+
+/* Forward declarations */
+static void cmd_unlock_exclusive(unsigned index);
+static void cmd_unlock_orthogonal(unsigned index);
+static void cmd_unlock_idempotent(unsigned index);
+static void cmd_unlock_high_priority(unsigned index);
+
+static command_lock_t *cmd_unlock_by_type[] = {
+ cmd_unlock_exclusive,
+ cmd_unlock_orthogonal,
+ cmd_unlock_idempotent,
+ cmd_unlock_high_priority
+};
+
+static cmd_status_t cmd_help_execute(char *args, char **message)
+{
+ (void)args;
+
+ *message = mallocz(MAX_COMMAND_LENGTH);
+ strncpyz(*message,
+ "\nThe commands are (arguments are in brackets):\n"
+ "help\n"
+ " Show this help menu.\n"
+ "reload-health\n"
+ " Reload health configuration.\n"
+ "reload-labels\n"
+ " Reload all labels.\n"
+ "save-database\n"
+ " Save internal DB to disk for memory mode save.\n"
+ "reopen-logs\n"
+ " Close and reopen log files.\n"
+ "shutdown-agent\n"
+ " Cleanup and exit the netdata agent.\n"
+ "fatal-agent\n"
+ " Log the state and halt the netdata agent.\n"
+ "reload-claiming-state\n"
+ " Reload agent claiming state from disk.\n"
+ "ping\n"
+ " Return with 'pong' if agent is alive.\n",
+ MAX_COMMAND_LENGTH - 1);
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_reload_health_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+
+ error_log_limit_unlimited();
+ info("COMMAND: Reloading HEALTH configuration.");
+ health_reload();
+ error_log_limit_reset();
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_save_database_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+
+ error_log_limit_unlimited();
+ info("COMMAND: Saving databases.");
+ rrdhost_save_all();
+ info("COMMAND: Databases saved.");
+ error_log_limit_reset();
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_reopen_logs_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+
+ error_log_limit_unlimited();
+ info("COMMAND: Reopening all log files.");
+ reopen_all_log_files();
+ error_log_limit_reset();
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_exit_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+
+ error_log_limit_unlimited();
+ info("COMMAND: Cleaning up to exit.");
+ netdata_cleanup_and_exit(0);
+ exit(0);
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_fatal_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+
+ fatal("COMMAND: netdata now exits.");
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_reload_claiming_state_execute(char *args, char **message)
+{
+ (void)args;
+ (void)message;
+#if defined(DISABLE_CLOUD) || !defined(ENABLE_ACLK)
+ info("The claiming feature has been explicitly disabled");
+ *message = strdupz("This agent cannot be claimed, it was built without support for Cloud");
+ return CMD_STATUS_FAILURE;
+#endif
+ error_log_limit_unlimited();
+ info("COMMAND: Reloading Agent Claiming configuration.");
+ load_claiming_state();
+ registry_update_cloud_base_url();
+ rrdpush_claimed_id(localhost);
+ error_log_limit_reset();
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_reload_labels_execute(char *args, char **message)
+{
+ (void)args;
+ info("COMMAND: reloading host labels.");
+ reload_host_labels();
+
+ BUFFER *wb = buffer_create(10);
+
+ rrdhost_rdlock(localhost);
+ netdata_rwlock_rdlock(&localhost->labels.labels_rwlock);
+ struct label *l = localhost->labels.head;
+ while (l != NULL) {
+ buffer_sprintf(wb,"Label [source id=%s]: \"%s\" -> \"%s\"\n", translate_label_source(l->label_source), l->key, l->value);
+ l = l->next;
+ }
+ netdata_rwlock_unlock(&localhost->labels.labels_rwlock);
+ rrdhost_unlock(localhost);
+
+ (*message)=strdupz(buffer_tostring(wb));
+ buffer_free(wb);
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_read_config_execute(char *args, char **message)
+{
+ size_t n = strlen(args);
+ char *separator = strchr(args,'|');
+ if (separator == NULL)
+ return CMD_STATUS_FAILURE;
+ char *separator2 = strchr(separator + 1,'|');
+ if (separator2 == NULL)
+ return CMD_STATUS_FAILURE;
+
+ char *temp = callocz(n + 1, 1);
+ strcpy(temp, args);
+ size_t offset = separator - args;
+ temp[offset] = 0;
+ size_t offset2 = separator2 - args;
+ temp[offset2] = 0;
+
+ const char *conf_file = temp; /* "cloud" is cloud.conf, otherwise netdata.conf */
+ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
+
+ char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL);
+ if (value == NULL)
+ {
+ error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", conf_file,
+ temp + offset + 1, temp + offset2 + 1);
+ freez(temp);
+ return CMD_STATUS_FAILURE;
+ }
+ else
+ {
+ (*message) = strdupz(value);
+ freez(temp);
+ return CMD_STATUS_SUCCESS;
+ }
+
+}
+
+static cmd_status_t cmd_write_config_execute(char *args, char **message)
+{
+ UNUSED(message);
+ info("write-config %s", args);
+ size_t n = strlen(args);
+ char *separator = strchr(args,'|');
+ if (separator == NULL)
+ return CMD_STATUS_FAILURE;
+ char *separator2 = strchr(separator + 1,'|');
+ if (separator2 == NULL)
+ return CMD_STATUS_FAILURE;
+ char *separator3 = strchr(separator2 + 1,'|');
+ if (separator3 == NULL)
+ return CMD_STATUS_FAILURE;
+ char *temp = callocz(n + 1, 1);
+ strcpy(temp, args);
+ size_t offset = separator - args;
+ temp[offset] = 0;
+ size_t offset2 = separator2 - args;
+ temp[offset2] = 0;
+ size_t offset3 = separator3 - args;
+ temp[offset3] = 0;
+
+ const char *conf_file = temp; /* "cloud" is cloud.conf, otherwise netdata.conf */
+ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
+
+ appconfig_set(tmp_config, temp + offset + 1, temp + offset2 + 1, temp + offset3 + 1);
+ info("write-config conf_file=%s section=%s key=%s value=%s",conf_file, temp + offset + 1, temp + offset2 + 1,
+ temp + offset3 + 1);
+ freez(temp);
+ return CMD_STATUS_SUCCESS;
+}
+
+static cmd_status_t cmd_ping_execute(char *args, char **message)
+{
+ (void)args;
+
+ *message = strdupz("pong");
+
+ return CMD_STATUS_SUCCESS;
+}
+
+static void cmd_lock_exclusive(unsigned index)
+{
+ (void)index;
+
+ uv_rwlock_wrlock(&exclusive_rwlock);
+}
+
+static void cmd_lock_orthogonal(unsigned index)
+{
+ uv_rwlock_rdlock(&exclusive_rwlock);
+ uv_mutex_lock(&command_lock_array[index]);
+}
+
+static void cmd_lock_idempotent(unsigned index)
+{
+ (void)index;
+
+ uv_rwlock_rdlock(&exclusive_rwlock);
+}
+
+static void cmd_lock_high_priority(unsigned index)
+{
+ (void)index;
+}
+
+static void cmd_unlock_exclusive(unsigned index)
+{
+ (void)index;
+
+ uv_rwlock_wrunlock(&exclusive_rwlock);
+}
+
+static void cmd_unlock_orthogonal(unsigned index)
+{
+ uv_rwlock_rdunlock(&exclusive_rwlock);
+ uv_mutex_unlock(&command_lock_array[index]);
+}
+
+static void cmd_unlock_idempotent(unsigned index)
+{
+ (void)index;
+
+ uv_rwlock_rdunlock(&exclusive_rwlock);
+}
+
+static void cmd_unlock_high_priority(unsigned index)
+{
+ (void)index;
+}
+
+static void pipe_close_cb(uv_handle_t* handle)
+{
+ /* Also frees command context */
+ freez(handle);
+}
+
+static void pipe_write_cb(uv_write_t* req, int status)
+{
+ (void)status;
+ uv_pipe_t *client = req->data;
+
+ uv_close((uv_handle_t *)client, pipe_close_cb);
+ --clients;
+ info("Command Clients = %u\n", clients);
+}
+
+static inline void add_char_to_command_reply(char *reply_string, unsigned *reply_string_size, char character)
+{
+ reply_string[(*reply_string_size)++] = character;
+}
+
+static inline void add_string_to_command_reply(char *reply_string, unsigned *reply_string_size, char *str)
+{
+ unsigned len;
+
+ len = strlen(str);
+ strncpyz(reply_string + *reply_string_size, str, len);
+ *reply_string_size += len;
+}
+
+static void send_command_reply(struct command_context *cmd_ctx, cmd_status_t status, char *message)
+{
+ int ret;
+ char reply_string[MAX_COMMAND_LENGTH] = {'\0', };
+ char exit_status_string[MAX_EXIT_STATUS_LENGTH + 1] = {'\0', };
+ unsigned reply_string_size = 0;
+ uv_buf_t write_buf;
+ uv_stream_t *client = (uv_stream_t *)(uv_pipe_t *)cmd_ctx;
+
+ snprintfz(exit_status_string, MAX_EXIT_STATUS_LENGTH, "%u", status);
+ add_char_to_command_reply(reply_string, &reply_string_size, CMD_PREFIX_EXIT_CODE);
+ add_string_to_command_reply(reply_string, &reply_string_size, exit_status_string);
+ add_char_to_command_reply(reply_string, &reply_string_size, '\0');
+
+ if (message) {
+ add_char_to_command_reply(reply_string, &reply_string_size, cmd_prefix_by_status[status]);
+ add_string_to_command_reply(reply_string, &reply_string_size, message);
+ }
+
+ cmd_ctx->write_req.data = client;
+ write_buf.base = reply_string;
+ write_buf.len = reply_string_size;
+ ret = uv_write(&cmd_ctx->write_req, (uv_stream_t *)client, &write_buf, 1, pipe_write_cb);
+ if (ret) {
+ error("uv_write(): %s", uv_strerror(ret));
+ }
+ info("COMMAND: Sending reply: \"%s\"", reply_string);
+}
+
+cmd_status_t execute_command(cmd_t idx, char *args, char **message)
+{
+ cmd_status_t status;
+ cmd_type_t type = command_info_array[idx].type;
+
+ cmd_lock_by_type[type](idx);
+ status = command_info_array[idx].func(args, message);
+ cmd_unlock_by_type[type](idx);
+
+ return status;
+}
+
+static void after_schedule_command(uv_work_t *req, int status)
+{
+ struct command_context *cmd_ctx = req->data;
+
+ (void)status;
+
+ send_command_reply(cmd_ctx, cmd_ctx->status, cmd_ctx->message);
+ if (cmd_ctx->message)
+ freez(cmd_ctx->message);
+}
+
+static void schedule_command(uv_work_t *req)
+{
+ struct command_context *cmd_ctx = req->data;
+
+ cmd_ctx->status = execute_command(cmd_ctx->idx, cmd_ctx->args, &cmd_ctx->message);
+}
+
+/* This will alter the state of the command_info_array.cmd_str
+*/
+static void parse_commands(struct command_context *cmd_ctx)
+{
+ char *message = NULL, *pos, *lstrip, *rstrip;
+ cmd_t i;
+ cmd_status_t status;
+
+ status = CMD_STATUS_FAILURE;
+
+ /* Skip white-space characters */
+ for (pos = cmd_ctx->command_string ; isspace(*pos) && ('\0' != *pos) ; ++pos) {;}
+ for (i = 0 ; i < CMD_TOTAL_COMMANDS ; ++i) {
+ if (!strncmp(pos, command_info_array[i].cmd_str, strlen(command_info_array[i].cmd_str))) {
+ if (CMD_EXIT == i) {
+ /* musl C does not like libuv workqueues calling exit() */
+ execute_command(CMD_EXIT, NULL, NULL);
+ }
+ for (lstrip=pos + strlen(command_info_array[i].cmd_str); isspace(*lstrip) && ('\0' != *lstrip); ++lstrip) {;}
+ for (rstrip=lstrip+strlen(lstrip)-1; rstrip>lstrip && isspace(*rstrip); *(rstrip--) = 0 );
+
+ cmd_ctx->work.data = cmd_ctx;
+ cmd_ctx->idx = i;
+ cmd_ctx->args = lstrip;
+ cmd_ctx->message = NULL;
+
+ fatal_assert(0 == uv_queue_work(loop, &cmd_ctx->work, schedule_command, after_schedule_command));
+ break;
+ }
+ }
+ if (CMD_TOTAL_COMMANDS == i) {
+ /* no command found */
+ message = strdupz("Illegal command. Please type \"help\" for instructions.");
+ send_command_reply(cmd_ctx, status, message);
+ freez(message);
+ }
+}
+
+static void pipe_read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf)
+{
+ struct command_context *cmd_ctx = (struct command_context *)client;
+
+ if (0 == nread) {
+ info("%s: Zero bytes read by command pipe.", __func__);
+ } else if (UV_EOF == nread) {
+ info("EOF found in command pipe.");
+ parse_commands(cmd_ctx);
+ } else if (nread < 0) {
+ error("%s: %s", __func__, uv_strerror(nread));
+ }
+
+ if (nread < 0) { /* stop stream due to EOF or error */
+ (void)uv_read_stop((uv_stream_t *)client);
+ } else if (nread) {
+ size_t to_copy;
+
+ to_copy = MIN(nread, MAX_COMMAND_LENGTH - 1 - cmd_ctx->command_string_size);
+ memcpy(cmd_ctx->command_string + cmd_ctx->command_string_size, buf->base, to_copy);
+ cmd_ctx->command_string_size += to_copy;
+ cmd_ctx->command_string[cmd_ctx->command_string_size] = '\0';
+ }
+ if (buf && buf->len) {
+ freez(buf->base);
+ }
+
+ if (nread < 0 && UV_EOF != nread) {
+ uv_close((uv_handle_t *)client, pipe_close_cb);
+ --clients;
+ info("Command Clients = %u\n", clients);
+ }
+}
+
+static void alloc_cb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf)
+{
+ (void)handle;
+
+ buf->base = mallocz(suggested_size);
+ buf->len = suggested_size;
+}
+
+static void connection_cb(uv_stream_t *server, int status)
+{
+ int ret;
+ uv_pipe_t *client;
+ struct command_context *cmd_ctx;
+ fatal_assert(status == 0);
+
+ /* combined allocation of client pipe and command context */
+ cmd_ctx = mallocz(sizeof(*cmd_ctx));
+ client = (uv_pipe_t *)cmd_ctx;
+ ret = uv_pipe_init(server->loop, client, 1);
+ if (ret) {
+ error("uv_pipe_init(): %s", uv_strerror(ret));
+ freez(cmd_ctx);
+ return;
+ }
+ ret = uv_accept(server, (uv_stream_t *)client);
+ if (ret) {
+ error("uv_accept(): %s", uv_strerror(ret));
+ uv_close((uv_handle_t *)client, pipe_close_cb);
+ return;
+ }
+
+ ++clients;
+ info("Command Clients = %u\n", clients);
+ /* Start parsing a new command */
+ cmd_ctx->command_string_size = 0;
+ cmd_ctx->command_string[0] = '\0';
+
+ ret = uv_read_start((uv_stream_t*)client, alloc_cb, pipe_read_cb);
+ if (ret) {
+ error("uv_read_start(): %s", uv_strerror(ret));
+ uv_close((uv_handle_t *)client, pipe_close_cb);
+ --clients;
+ info("Command Clients = %u\n", clients);
+ return;
+ }
+}
+
+static void async_cb(uv_async_t *handle)
+{
+ uv_stop(handle->loop);
+}
+
+static void command_thread(void *arg)
+{
+ int ret;
+ uv_fs_t req;
+
+ (void) arg;
+ loop = mallocz(sizeof(uv_loop_t));
+ ret = uv_loop_init(loop);
+ if (ret) {
+ error("uv_loop_init(): %s", uv_strerror(ret));
+ command_thread_error = ret;
+ goto error_after_loop_init;
+ }
+ loop->data = NULL;
+
+ ret = uv_async_init(loop, &async, async_cb);
+ if (ret) {
+ error("uv_async_init(): %s", uv_strerror(ret));
+ command_thread_error = ret;
+ goto error_after_async_init;
+ }
+ async.data = NULL;
+
+ ret = uv_pipe_init(loop, &server_pipe, 0);
+ if (ret) {
+ error("uv_pipe_init(): %s", uv_strerror(ret));
+ command_thread_error = ret;
+ goto error_after_pipe_init;
+ }
+ (void)uv_fs_unlink(loop, &req, PIPENAME, NULL);
+ uv_fs_req_cleanup(&req);
+ ret = uv_pipe_bind(&server_pipe, PIPENAME);
+ if (ret) {
+ error("uv_pipe_bind(): %s", uv_strerror(ret));
+ command_thread_error = ret;
+ goto error_after_pipe_bind;
+ }
+ ret = uv_listen((uv_stream_t *)&server_pipe, SOMAXCONN, connection_cb);
+ if (ret) {
+ /* Fallback to backlog of 1 */
+ info("uv_listen() failed with backlog = %d, falling back to backlog = 1.", SOMAXCONN);
+ ret = uv_listen((uv_stream_t *)&server_pipe, 1, connection_cb);
+ }
+ if (ret) {
+ error("uv_listen(): %s", uv_strerror(ret));
+ command_thread_error = ret;
+ goto error_after_uv_listen;
+ }
+
+ command_thread_error = 0;
+ command_thread_shutdown = 0;
+ /* wake up initialization thread */
+ complete(&completion);
+
+ while (command_thread_shutdown == 0) {
+ uv_run(loop, UV_RUN_DEFAULT);
+ }
+ /* cleanup operations of the event loop */
+ info("Shutting down command event loop.");
+ uv_close((uv_handle_t *)&async, NULL);
+ uv_close((uv_handle_t*)&server_pipe, NULL);
+ uv_run(loop, UV_RUN_DEFAULT); /* flush all libuv handles */
+
+ info("Shutting down command loop complete.");
+ fatal_assert(0 == uv_loop_close(loop));
+ freez(loop);
+
+ return;
+
+error_after_uv_listen:
+error_after_pipe_bind:
+ uv_close((uv_handle_t*)&server_pipe, NULL);
+error_after_pipe_init:
+ uv_close((uv_handle_t *)&async, NULL);
+error_after_async_init:
+ uv_run(loop, UV_RUN_DEFAULT); /* flush all libuv handles */
+ fatal_assert(0 == uv_loop_close(loop));
+error_after_loop_init:
+ freez(loop);
+
+ /* wake up initialization thread */
+ complete(&completion);
+}
+
+static void sanity_check(void)
+{
+ /* The size of command_info_array must be CMD_TOTAL_COMMANDS elements */
+ BUILD_BUG_ON(CMD_TOTAL_COMMANDS != sizeof(command_info_array) / sizeof(command_info_array[0]));
+}
+
+void commands_init(void)
+{
+ cmd_t i;
+ int error;
+
+ sanity_check();
+ if (command_server_initialized)
+ return;
+
+ info("Initializing command server.");
+ for (i = 0 ; i < CMD_TOTAL_COMMANDS ; ++i) {
+ fatal_assert(0 == uv_mutex_init(&command_lock_array[i]));
+ }
+ fatal_assert(0 == uv_rwlock_init(&exclusive_rwlock));
+
+ init_completion(&completion);
+ error = uv_thread_create(&thread, command_thread, NULL);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ goto after_error;
+ }
+ /* wait for worker thread to initialize */
+ wait_for_completion(&completion);
+ destroy_completion(&completion);
+ uv_thread_set_name_np(thread, "DAEMON_COMMAND");
+
+ if (command_thread_error) {
+ error = uv_thread_join(&thread);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ }
+ goto after_error;
+ }
+
+ command_server_initialized = 1;
+ return;
+
+after_error:
+ error("Failed to initialize command server. The netdata cli tool will be unable to send commands.");
+}
+
+void commands_exit(void)
+{
+ cmd_t i;
+
+ if (!command_server_initialized)
+ return;
+
+ command_thread_shutdown = 1;
+ info("Shutting down command server.");
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&async));
+ fatal_assert(0 == uv_thread_join(&thread));
+
+ for (i = 0 ; i < CMD_TOTAL_COMMANDS ; ++i) {
+ uv_mutex_destroy(&command_lock_array[i]);
+ }
+ uv_rwlock_destroy(&exclusive_rwlock);
+ info("Command server has stopped.");
+ command_server_initialized = 0;
+}
diff --git a/daemon/commands.h b/daemon/commands.h
new file mode 100644
index 0000000..bd4aabf
--- /dev/null
+++ b/daemon/commands.h
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COMMANDS_H
+#define NETDATA_COMMANDS_H 1
+
+#ifdef _WIN32
+# define PIPENAME "\\\\?\\pipe\\netdata-cli"
+#else
+# define PIPENAME "/tmp/netdata-ipc"
+#endif
+
+#define MAX_COMMAND_LENGTH 4096
+#define MAX_EXIT_STATUS_LENGTH 23 /* Can't ever be bigger than "X-18446744073709551616" */
+
+typedef enum cmd {
+ CMD_HELP = 0,
+ CMD_RELOAD_HEALTH,
+ CMD_SAVE_DATABASE,
+ CMD_REOPEN_LOGS,
+ CMD_EXIT,
+ CMD_FATAL,
+ CMD_RELOAD_CLAIMING_STATE,
+ CMD_RELOAD_LABELS,
+ CMD_READ_CONFIG,
+ CMD_WRITE_CONFIG,
+ CMD_PING,
+ CMD_TOTAL_COMMANDS
+} cmd_t;
+
+typedef enum cmd_status {
+ CMD_STATUS_SUCCESS = 0,
+ CMD_STATUS_FAILURE,
+ CMD_STATUS_BUSY
+} cmd_status_t;
+
+#define CMD_PREFIX_INFO 'O' /* Following string should go to cli stdout */
+#define CMD_PREFIX_ERROR 'E' /* Following string should go to cli stderr */
+#define CMD_PREFIX_EXIT_CODE 'X' /* Following string is cli integer exit code */
+
+typedef enum cmd_type {
+ /*
+ * No other command is allowed to run at the same time (except for CMD_TYPE_HIGH_PRIORITY).
+ */
+ CMD_TYPE_EXCLUSIVE = 0,
+ /*
+ * Other commands are allowed to run concurrently (except for CMD_TYPE_EXCLUSIVE) but calls to this command are
+ * serialized.
+ */
+ CMD_TYPE_ORTHOGONAL,
+ /*
+ * Other commands are allowed to run concurrently (except for CMD_TYPE_EXCLUSIVE) as are calls to this command.
+ */
+ CMD_TYPE_CONCURRENT,
+ /*
+ * Those commands are always allowed to run.
+ */
+ CMD_TYPE_HIGH_PRIORITY
+} cmd_type_t;
+
+/**
+ * Executes a command and returns the status.
+ *
+ * @param args a string that may contain additional parameters to be parsed
+ * @param message allocate and return a message if need be (up to MAX_COMMAND_LENGTH bytes)
+ * @return CMD_FAILURE or CMD_SUCCESS
+ */
+typedef cmd_status_t (command_action_t) (char *args, char **message);
+
+typedef struct command_info {
+ char *cmd_str; // the command string
+ command_action_t *func; // the function that executes the command
+ cmd_type_t type; // Concurrency control information for the command
+} command_info_t;
+
+typedef void (command_lock_t) (unsigned index);
+
+cmd_status_t execute_command(cmd_t idx, char *args, char **message);
+extern void commands_init(void);
+extern void commands_exit(void);
+
+#endif //NETDATA_COMMANDS_H
diff --git a/daemon/common.c b/daemon/common.c
new file mode 100644
index 0000000..45d5fa3
--- /dev/null
+++ b/daemon/common.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+
+char *netdata_configured_hostname = NULL;
+char *netdata_configured_user_config_dir = CONFIG_DIR;
+char *netdata_configured_stock_config_dir = LIBCONFIG_DIR;
+char *netdata_configured_log_dir = LOG_DIR;
+char *netdata_configured_primary_plugins_dir = NULL;
+char *netdata_configured_web_dir = WEB_DIR;
+char *netdata_configured_cache_dir = CACHE_DIR;
+char *netdata_configured_varlib_dir = VARLIB_DIR;
+char *netdata_configured_lock_dir = NULL;
+char *netdata_configured_home_dir = VARLIB_DIR;
+char *netdata_configured_host_prefix = NULL;
+char *netdata_configured_timezone = NULL;
+int netdata_ready;
+int netdata_cloud_setting;
+
diff --git a/daemon/common.h b/daemon/common.h
new file mode 100644
index 0000000..68af957
--- /dev/null
+++ b/daemon/common.h
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COMMON_H
+#define NETDATA_COMMON_H 1
+
+#include "../libnetdata/libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// shortcuts for the default netdata configuration
+
+#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section)
+#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
+#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
+#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value)
+#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
+#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
+#define config_get_duration(section, name, value) appconfig_get_duration(&netdata_config, section, name, value)
+
+#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value)
+#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value)
+#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
+#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
+#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
+
+#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
+#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new)
+
+#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed)
+
+// ----------------------------------------------------------------------------
+// netdata include files
+
+#include "global_statistics.h"
+
+// the netdata database
+#include "database/rrd.h"
+
+// the netdata webserver(s)
+#include "web/server/web_server.h"
+
+// streaming metrics between netdata servers
+#include "streaming/rrdpush.h"
+
+// health monitoring and alarm notifications
+#include "health/health.h"
+
+// the netdata registry
+// the registry is actually an API feature
+#include "registry/registry.h"
+
+// backends for archiving the metrics
+#include "backends/backends.h"
+// the new exporting engine for archiving the metrics
+#include "exporting/exporting_engine.h"
+
+// the netdata API
+#include "web/api/web_api_v1.h"
+
+// all data collection plugins
+#include "collectors/all.h"
+
+// netdata unit tests
+#include "unit_test.h"
+
+// netdata agent claiming
+#include "claim/claim.h"
+
+// netdata agent cloud link
+#include "aclk/legacy/agent_cloud_link.h"
+
+// global GUID map functions
+
+// netdata agent spawn server
+#include "spawn/spawn.h"
+
+// the netdata deamon
+#include "daemon.h"
+#include "main.h"
+#include "signals.h"
+#include "commands.h"
+
+// global netdata daemon variables
+extern char *netdata_configured_hostname;
+extern char *netdata_configured_user_config_dir;
+extern char *netdata_configured_stock_config_dir;
+extern char *netdata_configured_log_dir;
+extern char *netdata_configured_primary_plugins_dir;
+extern char *netdata_configured_web_dir;
+extern char *netdata_configured_cache_dir;
+extern char *netdata_configured_varlib_dir;
+extern char *netdata_configured_lock_dir;
+extern char *netdata_configured_home_dir;
+extern char *netdata_configured_host_prefix;
+extern char *netdata_configured_timezone;
+extern int netdata_zero_metrics_enabled;
+extern int netdata_anonymous_statistics_enabled;
+
+extern int netdata_ready;
+extern int netdata_cloud_setting;
+
+#endif /* NETDATA_COMMON_H */
diff --git a/daemon/config/README.md b/daemon/config/README.md
new file mode 100644
index 0000000..a1e2b04
--- /dev/null
+++ b/daemon/config/README.md
@@ -0,0 +1,229 @@
+<!--
+title: "Daemon configuration"
+description: "The Netdata Agent's daemon is installed preconfigured to collect thousands of metrics every second, but is highly configurable for real-world workloads."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/daemon/config/README.md
+-->
+
+# Daemon configuration
+
+<details markdown="1"><summary>The daemon configuration file is read from `/etc/netdata/netdata.conf`.</summary>
+Depending on your installation method, Netdata will have been installed either directly under `/`, or under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will be `/opt/netdata/etc/netdata`).</details>
+
+This config file **is not needed by default**. Netdata works fine out of the box without it. But it does allow you to adapt the general behavior of Netdata, in great detail. You can find all these settings, with their default values, by accessing the URL `https://netdata.server.hostname:19999/netdata.conf`. For example check the configuration file of [netdata.firehol.org](http://netdata.firehol.org/netdata.conf). HTTP access to this file is limited by default to private IPs, via the [web server access lists](/web/server/README.md#access-lists).
+
+`netdata.conf` has sections stated with `[section]`. You will see the following sections:
+
+1. `[global]` to [configure](#global-section-options) the [Netdata daemon](/daemon/README.md).
+2. `[web]` to [configure the web server](/web/server/README.md).
+3. `[plugins]` to [configure](#plugins-section-options) which [collectors](/collectors/README.md) to use and PATH
+ settings.
+4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/health/README.md)
+5. `[registry]` for the [Netdata registry](/registry/README.md).
+6. `[backend]` to set up [streaming and replication](/streaming/README.md) options.
+7. `[statsd]` for the general settings of the [stats.d.plugin](/collectors/statsd.plugin/README.md).
+8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
+9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
+
+The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a comment on settings it does not currently use.
+
+## Applying changes
+
+After `netdata.conf` has been modified, Netdata needs to be restarted for changes to apply:
+
+```bash
+sudo service netdata restart
+```
+
+If the above does not work, try the following:
+
+```bash
+sudo killall netdata; sleep 10; sudo netdata
+```
+
+Please note that your data history will be lost if you have modified `history` parameter in section `[global]`.
+
+## Sections
+
+### [global] section options
+
+| setting|default|info|||
+|:-----:|:-----:|:---|---|---|
+| process scheduling policy|`keep`|See [Netdata process scheduling policy](/daemon/README.md#netdata-process-scheduling-policy)|||
+| OOM score|`1000`|See [OOM score](../#oom-score)|||
+| glibc malloc arena max for plugins|`1`|See [Virtual memory](/daemon/README.md#virtual-memory).|||
+| glibc malloc arena max for Netdata|`1`|See [Virtual memory](/daemon/README.md#virtual-memory).|||
+| hostname|auto-detected|The hostname of the computer running Netdata.|||
+| history|`3996`| Used with `memory mode = save/map/ram/alloc`, not the default `memory mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](/database/README.md) for more information. |||
+| update every|`1`|The frequency in seconds, for data collection. For more information see the [performance guide](/docs/guides/configure/performance.md).|||
+| config directory|`/etc/netdata`|The directory configuration files are kept.|||
+| stock config directory|`/usr/lib/netdata/conf.d`||||
+| log directory|`/var/log/netdata`|The directory in which the [log files](/daemon/README.md#log-files) are kept.|||
+| web files directory|`/usr/share/netdata/web`|The directory the web static files are kept.|||
+| cache directory|`/var/cache/netdata`|The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.|||
+| lib directory|`/var/lib/netdata`|Contains the alarm log and the Netdata instance guid.|||
+| home directory|`/var/cache/netdata`|Contains the db files for the collected metrics|||
+| plugins directory|`"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"`|The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.|||
+| memory mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `page cache size` and `dbengine disk space`. <br />`save`: Netdata will save its round robin database on exit and load it on startup. <br />`map`: Cache files will be updated in real-time. Not ideal for systems with high load or slow disks (check `man mmap`). <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. |
+| page cache size | 32 | Determines the amount of RAM in MiB that is dedicated to caching Netdata metric values. |||
+| dbengine disk space | 256 | Determines the amount of disk space in MiB that is dedicated to storing Netdata metric values and all related metadata describing them. |||
+| dbengine multihost disk space | 256 | Same functionality as `dbengine disk space`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. |||
+| host access prefix||This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).|
+| memory deduplication (ksm)|`yes`|When set to `yes`, Netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/database/README.md#ksm)|||
+| TZ environment variable|`:/etc/localtime`|Where to find the timezone|||
+| timezone|auto-detected|The timezone retrieved from the environment variable|||
+| debug flags|`0x0000000000000000`|Bitmap of debug options to enable. For more information check [Tracing Options](/daemon/README.md#debugging).|||
+| debug log|`/var/log/netdata/debug.log`|The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](/daemon/README.md#debugging).|||
+| error log|`/var/log/netdata/error.log`|The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.|||
+| access log|`/var/log/netdata/access.log`|The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.|||
+| errors flood protection period|`1200`|Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.|||
+| errors to trigger flood protection|`200`|Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.|||
+| run as user|`netdata`|The user Netdata will run as.|||
+| pthread stack size|auto-detected||||
+| cleanup obsolete charts after seconds|`3600`|See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions|||
+| gap when lost iterations above|`1`||||
+| cleanup orphan hosts after seconds|`3600`|How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data.|||
+| delete obsolete charts files|`yes`|See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions|||
+| delete orphan hosts files|`yes`|Set to `no` to disable non-responsive host removal.|||
+| enable zero metrics|`no`|Set to `yes` to show charts when all their metrics are zero.|||
+
+### [web] section options
+
+Refer to the [web server documentation](/web/server/README.md)
+
+### [plugins] section options
+
+In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d`, `charts.d` and `node.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled.
+
+Additionally, there will be the following options:
+
+| setting|default|info|
+|:-----:|:-----:|:---|
+| PATH environment variable|`auto-detected`||
+| PYTHONPATH environment variable||Used to set a custom python path|
+| enable running new plugins|`yes`|When set to `yes`, Netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configured in this file with a `yes`|
+| check for new plugins every|60|The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for Netdata.|
+| checks|`no`|This is a debugging plugin for the internal latency|
+
+### [health] section options
+
+This section controls the general behavior of the health monitoring capabilities of Netdata.
+
+Specific alarms are configured in per-collector config files under the `health.d` directory. For more info, see [health
+monitoring](/health/README.md).
+
+[Alarm notifications](/health/notifications/README.md) are configured in `health_alarm_notify.conf`.
+
+| setting|default|info|
+|:-----:|:-----:|:---|
+| enabled|`yes`|Set to `no` to disable all alarms and notifications|
+| in memory max health log entries|1000|Size of the alarm history held in RAM|
+| script to execute on alarm|`/usr/libexec/netdata/plugins.d/alarm-notify.sh`|The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).|
+| stock health configuration directory|`/usr/lib/netdata/conf.d/health.d`|Contains the stock alarm configuration files for each collector|
+| health configuration directory|`/etc/netdata/health.d`|The directory containing the user alarm configuration files, to override the stock configurations|
+| run at least every seconds|`10`|Controls how often all alarm conditions should be evaluated.|
+| postpone alarms during hibernation for seconds|`60`|Prevents false alarms. May need to be increased if you get alarms during hibernation.|
+| rotate log every lines|2000|Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [\[global\] section](#global-section-options)|
+
+### [registry] section options
+
+To understand what this section is and how it should be configured, please refer to the [registry documentation](/registry/README.md).
+
+### [backend]
+
+Refer to the [streaming and replication](/streaming/README.md) documentation.
+
+## Per-plugin configuration
+
+The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`.
+
+### Internal plugins
+
+Most internal plugins will provide additional options. Check [Internal Plugins](/collectors/README.md) for more
+information.
+
+Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+
+### External plugins
+
+External plugins will have only 2 options at `netdata.conf`:
+
+| setting | default | info |
+| :-----:|:-----:|:---|
+| update every | the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check the [performance guide](/docs/guides/configure/performance.md).|
+| command options | _empty_ | Additional command line options to pass to the plugin.|
+
+External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation.
+
+## Per-chart configuration
+
+In this area of `netdata.conf` you can find configuration options for individual charts. They appear in sections
+following the pattern `[NAME]`.
+
+Using the settings and values under these sections, you can control all aspects of a specific chart. You can change its
+title, make it appear higher in Netdata's [menu](/web/gui/README.md#metrics-menus), tweak its dimensions, and much more.
+
+To find the name of a given chart, and thus the name of its section in `netdata.conf`, look at the top-left corner of a
+chart:
+
+![Finding the unique ID of a
+chart](https://user-images.githubusercontent.com/1153921/67443082-43b16e80-f5b8-11e9-8d33-d6ee052c6678.png)
+
+Every per-chart configuration section has several common settings, which are listed in the table just below. Beneath
+that is information about lines that begin with `dim`, which affect a chart's dimensions.
+
+| Setting | Function |
+| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `history` | Override the `history` setting in the [[global] options](#global-section-options) for this particular chart. Should be less than or equal to the global `history` setting. |
+| `enabled` | A boolean (`yes` or `no`) that explicitly enables or disables the chart in question. |
+| `cache directory` | The directory where cache files for this plugin, if needed, are stored. |
+| `chart type` | Defines what type of chart to display. It can be `line`, `area`, or `stacked`. If empty or missing, `line` will be used. |
+| `type` | Uniquely identify which [metrics menu](/web/gui/README.md#metrics-menus) on the Netdata dashboard this chart should appear under. Some examples include `system` (**System**), `disk` (**Disks**), `net` (**Network Interfaces**), and `netdata` (**Netdata Monitoring**). |
+| `family` | Change the chart's [family](/web/README.md#families) from its default. For example, you could force a disk space chart to collect metrics for family `sdb` instead of family `sda`. |
+| `units` | Text for the label of the vertical axis of the chart. This means all dimensions should have the same unit of measurement. |
+| `context` | Change the default [context](/web/README.md#contexts) of the chart. Changing this setting will affect what metrics and metrics the chart displays, and which alarms are attached to it. |
+| `priority` | Define where the chart should appear on the Netdata dashboard. Lower values equal higher priority, so a priority of `1` will place the chart highest, while a priority of `9999999` would place the chart at the bottom of the Netdata dashboard. |
+| `name` | The name of the chart that appears in the top-left corner, after the chart's title. You can also use this name when writing [health entities](/health/REFERENCE.md#health-entity-reference). |
+| `title` | The text that appears above the chart in the Netdata dashboard. |
+
+### Dimension settings
+
+You may notice some settings that begin with `dim` beneath the ones defined in the table above. These settings determine
+which dimensions appear on the given chart and how Netdata calculates them.
+
+Each dimension setting has the following structure: `dim [DIMENSION ID] [OPTION] = [VALUE]`. The available options are `name`, `algorithm`, `multiplier`, and `divisor`.
+
+| Setting | Function |
+| :----------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `name` | The name of the dimension as it will appear in the legend of the chart. If left empty, or is missing, Netdata will use the `[DIMENSION ID]` instead. |
+| `algorithm` | Can be `absolute`, `incremental`, `percentage-of-absolute-row`, or `percentage-of-incremental-row`. If this setting is empty, invalid, or missing, Netdata will use `absolute`. See the list beneath this table for descriptions of what each algorithm does. |
+| `multiplier` | An integer value by which to multiply the collected value. If empty or missing, Netdata will use `1`. This setting is often used with the value `1024` to convert metabytes to kilobytes, kilobytes to bytes, and so on. |
+| `divisor` | An integer value by which to divide the collected value. If empty or missing, Netdata will use `1`. This setting is often used with the value `1024` to convert bytes to kilobytes, kilobytes to megabytes, and so on. |
+
+Here are the options for the `algorithm` setting:
+
+- `absolute`: The value is drawn as-is (interpolated to second boundary).
+- `incremental`: To be used when the value always increases over time, such as the I/O on a disk. Netdata takes the
+ difference between the current metric and the past metric to calculate a per-second figure.
+- `percentage-of-absolute-row`: The % of this value compared to the total of all dimensions.
+- `percentage-of-incremental-row`: The % of this value compared to the incremental total of all dimensions.
+
+For example, the `system.io` chart has the following default settings:
+
+```conf
+ # dim in name = in
+ # dim in algorithm = incremental
+ # dim in multiplier = 1
+ # dim in divisor = 1
+ # dim out name = out
+ # dim out algorithm = incremental
+ # dim out multiplier = -1
+ # dim out divisor = 1
+```
+
+These `dim` settings produce two dimensions, `in` and `out`, both of which use the `incremental` algorithm. By
+multiplying the value of `out` by -1, Netdata creates the negative values seen in the following area chart:
+
+![The system.io chart on a macOS
+laptop](https://user-images.githubusercontent.com/1153921/69286708-2cfb3900-0bb1-11ea-9fcd-dd8fbb2adf11.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/daemon.c b/daemon/daemon.c
new file mode 100644
index 0000000..8319110
--- /dev/null
+++ b/daemon/daemon.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+#include <sched.h>
+
+char pidfile[FILENAME_MAX + 1] = "";
+char claimingdirectory[FILENAME_MAX + 1];
+char exepath[FILENAME_MAX + 1];
+
+void get_netdata_execution_path(void)
+{
+ int ret;
+ size_t exepath_size = 0;
+ struct passwd *passwd = NULL;
+ char *user = NULL;
+
+ passwd = getpwuid(getuid());
+ user = (passwd && passwd->pw_name) ? passwd->pw_name : "";
+
+ exepath_size = sizeof(exepath) - 1;
+ ret = uv_exepath(exepath, &exepath_size);
+ if (0 != ret) {
+ error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", exepath, (unsigned)exepath_size, user,
+ uv_strerror(ret));
+ fatal("Cannot start netdata without getting execution path.");
+ }
+ exepath[exepath_size] = '\0';
+}
+
+static void chown_open_file(int fd, uid_t uid, gid_t gid) {
+ if(fd == -1) return;
+
+ struct stat buf;
+
+ if(fstat(fd, &buf) == -1) {
+ error("Cannot fstat() fd %d", fd);
+ return;
+ }
+
+ if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) {
+ if(fchown(fd, uid, gid) == -1)
+ error("Cannot fchown() fd %d.", fd);
+ }
+}
+
+void create_needed_dir(const char *dir, uid_t uid, gid_t gid)
+{
+ // attempt to create the directory
+ if(mkdir(dir, 0755) == 0) {
+ // we created it
+
+ // chown it to match the required user
+ if(chown(dir, uid, gid) == -1)
+ error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid);
+ }
+ else if(errno != EEXIST)
+ // log an error only if the directory does not exist
+ error("Cannot create directory '%s'", dir);
+}
+
+void clean_directory(char *dirname)
+{
+ DIR *dir = opendir(dirname);
+ if(!dir) return;
+
+ int dir_fd = dirfd(dir);
+ struct dirent *de = NULL;
+
+ while((de = readdir(dir)))
+ if(de->d_type == DT_REG)
+ if (unlinkat(dir_fd, de->d_name, 0))
+ error("Cannot delete %s/%s", dirname, de->d_name);
+
+ closedir(dir);
+}
+
+int become_user(const char *username, int pid_fd) {
+ int am_i_root = (getuid() == 0)?1:0;
+
+ struct passwd *pw = getpwnam(username);
+ if(!pw) {
+ error("User %s is not present.", username);
+ return -1;
+ }
+
+ uid_t uid = pw->pw_uid;
+ gid_t gid = pw->pw_gid;
+
+ create_needed_dir(netdata_configured_cache_dir, uid, gid);
+ create_needed_dir(netdata_configured_varlib_dir, uid, gid);
+ create_needed_dir(netdata_configured_lock_dir, uid, gid);
+ create_needed_dir(claimingdirectory, uid, gid);
+
+ clean_directory(netdata_configured_lock_dir);
+
+ if(pidfile[0]) {
+ if(chown(pidfile, uid, gid) == -1)
+ error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid);
+ }
+
+ int ngroups = (int)sysconf(_SC_NGROUPS_MAX);
+ gid_t *supplementary_groups = NULL;
+ if(ngroups > 0) {
+ supplementary_groups = mallocz(sizeof(gid_t) * ngroups);
+ if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) {
+ if(am_i_root)
+ error("Cannot get supplementary groups of user '%s'.", username);
+
+ ngroups = 0;
+ }
+ }
+
+ chown_open_file(STDOUT_FILENO, uid, gid);
+ chown_open_file(STDERR_FILENO, uid, gid);
+ chown_open_file(stdaccess_fd, uid, gid);
+ chown_open_file(pid_fd, uid, gid);
+
+ if(supplementary_groups && ngroups > 0) {
+ if(setgroups((size_t)ngroups, supplementary_groups) == -1) {
+ if(am_i_root)
+ error("Cannot set supplementary groups for user '%s'", username);
+ }
+ ngroups = 0;
+ }
+
+ if(supplementary_groups)
+ freez(supplementary_groups);
+
+#ifdef __APPLE__
+ if(setregid(gid, gid) != 0) {
+#else
+ if(setresgid(gid, gid, gid) != 0) {
+#endif /* __APPLE__ */
+ error("Cannot switch to user's %s group (gid: %u).", username, gid);
+ return -1;
+ }
+
+#ifdef __APPLE__
+ if(setreuid(uid, uid) != 0) {
+#else
+ if(setresuid(uid, uid, uid) != 0) {
+#endif /* __APPLE__ */
+ error("Cannot switch to user %s (uid: %u).", username, uid);
+ return -1;
+ }
+
+ if(setgid(gid) != 0) {
+ error("Cannot switch to user's %s group (gid: %u).", username, gid);
+ return -1;
+ }
+ if(setegid(gid) != 0) {
+ error("Cannot effectively switch to user's %s group (gid: %u).", username, gid);
+ return -1;
+ }
+ if(setuid(uid) != 0) {
+ error("Cannot switch to user %s (uid: %u).", username, uid);
+ return -1;
+ }
+ if(seteuid(uid) != 0) {
+ error("Cannot effectively switch to user %s (uid: %u).", username, uid);
+ return -1;
+ }
+
+ return(0);
+}
+
+#ifndef OOM_SCORE_ADJ_MAX
+#define OOM_SCORE_ADJ_MAX (1000)
+#endif
+#ifndef OOM_SCORE_ADJ_MIN
+#define OOM_SCORE_ADJ_MIN (-1000)
+#endif
+
+static void oom_score_adj(void) {
+ char buf[30 + 1];
+ long long int old_score, wanted_score = OOM_SCORE_ADJ_MAX, final_score = 0;
+
+ // read the existing score
+ if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) {
+ error("Out-Of-Memory (OOM) score setting is not supported on this system.");
+ return;
+ }
+
+ if(old_score != 0)
+ wanted_score = old_score;
+
+ // check the environment
+ char *s = getenv("OOMScoreAdjust");
+ if(!s || !*s) {
+ snprintfz(buf, 30, "%d", (int)wanted_score);
+ s = buf;
+ }
+
+ // check netdata.conf configuration
+ s = config_get(CONFIG_SECTION_GLOBAL, "OOM score", s);
+ if(s && *s && (isdigit(*s) || *s == '-' || *s == '+'))
+ wanted_score = atoll(s);
+ else if(s && !strcmp(s, "keep")) {
+ info("Out-Of-Memory (OOM) kept as-is (running with %d)", (int) old_score);
+ return;
+ }
+ else {
+ info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score);
+ return;
+ }
+
+ if(wanted_score < OOM_SCORE_ADJ_MIN) {
+ error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN);
+ wanted_score = OOM_SCORE_ADJ_MIN;
+ }
+
+ if(wanted_score > OOM_SCORE_ADJ_MAX) {
+ error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX);
+ wanted_score = OOM_SCORE_ADJ_MAX;
+ }
+
+ if(old_score == wanted_score) {
+ info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score);
+ return;
+ }
+
+ int written = 0;
+ int fd = open("/proc/self/oom_score_adj", O_WRONLY);
+ if(fd != -1) {
+ snprintfz(buf, 30, "%d", (int)wanted_score);
+ ssize_t len = strlen(buf);
+ if(len > 0 && write(fd, buf, (size_t)len) == len) written = 1;
+ close(fd);
+
+ if(written) {
+ if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score))
+ error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score);
+ else if(final_score == wanted_score)
+ info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score);
+ else
+ error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score);
+ }
+ else
+ error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score);
+ }
+ else
+ error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing.");
+}
+
+static void process_nice_level(void) {
+#ifdef HAVE_NICE
+ int nice_level = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process nice level", 19);
+ if(nice(nice_level) == -1) error("Cannot set netdata CPU nice level to %d.", nice_level);
+ else debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level);
+#endif // HAVE_NICE
+};
+
+#define SCHED_FLAG_NONE 0x00
+#define SCHED_FLAG_PRIORITY_CONFIGURABLE 0x01 // the priority is user configurable
+#define SCHED_FLAG_KEEP_AS_IS 0x04 // do not attempt to set policy, priority or nice()
+#define SCHED_FLAG_USE_NICE 0x08 // use nice() after setting this policy
+
+struct sched_def {
+ char *name;
+ int policy;
+ int priority;
+ uint8_t flags;
+} scheduler_defaults[] = {
+
+ // the order of array members is important!
+ // the first defined is the default used by netdata
+
+ // the available members are important too!
+ // these are all the possible scheduling policies supported by netdata
+
+#ifdef SCHED_IDLE
+ { "idle", SCHED_IDLE, 0, SCHED_FLAG_NONE },
+#endif
+
+#ifdef SCHED_OTHER
+ { "other", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE },
+ { "nice", SCHED_OTHER, 0, SCHED_FLAG_USE_NICE },
+#endif
+
+#ifdef SCHED_RR
+ { "rr", SCHED_RR, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE },
+#endif
+
+#ifdef SCHED_FIFO
+ { "fifo", SCHED_FIFO, 0, SCHED_FLAG_PRIORITY_CONFIGURABLE },
+#endif
+
+#ifdef SCHED_BATCH
+ { "batch", SCHED_BATCH, 0, SCHED_FLAG_USE_NICE },
+#endif
+
+ // do not change the scheduling priority
+ { "keep", 0, 0, SCHED_FLAG_KEEP_AS_IS },
+ { "none", 0, 0, SCHED_FLAG_KEEP_AS_IS },
+
+ // array termination
+ { NULL, 0, 0, 0 }
+};
+
+
+#ifdef HAVE_SCHED_GETSCHEDULER
+static void sched_getscheduler_report(void) {
+ int sched = sched_getscheduler(0);
+ if(sched == -1) {
+ error("Cannot get my current process scheduling policy.");
+ return;
+ }
+ else {
+ int i;
+ for(i = 0 ; scheduler_defaults[i].name ; i++) {
+ if(scheduler_defaults[i].policy == sched) {
+ if(scheduler_defaults[i].flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) {
+ struct sched_param param;
+ if(sched_getparam(0, &param) == -1) {
+ error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name);
+ return;
+ }
+ else {
+ info("Running with process scheduling policy '%s', priority %d", scheduler_defaults[i].name, param.sched_priority);
+ }
+ }
+ else if(scheduler_defaults[i].flags & SCHED_FLAG_USE_NICE) {
+ #ifdef HAVE_GETPRIORITY
+ int n = getpriority(PRIO_PROCESS, 0);
+ info("Running with process scheduling policy '%s', nice level %d", scheduler_defaults[i].name, n);
+ #else // !HAVE_GETPRIORITY
+ info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
+ #endif // !HAVE_GETPRIORITY
+ }
+ else {
+ info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
+ }
+
+ return;
+ }
+ }
+ }
+}
+#else // !HAVE_SCHED_GETSCHEDULER
+static void sched_getscheduler_report(void) {
+#ifdef HAVE_GETPRIORITY
+ info("Running with priority %d", getpriority(PRIO_PROCESS, 0));
+#endif // HAVE_GETPRIORITY
+}
+#endif // !HAVE_SCHED_GETSCHEDULER
+
+#ifdef HAVE_SCHED_SETSCHEDULER
+
+static void sched_setscheduler_set(void) {
+
+ if(scheduler_defaults[0].name) {
+ const char *name = scheduler_defaults[0].name;
+ int policy = scheduler_defaults[0].policy, priority = scheduler_defaults[0].priority;
+ uint8_t flags = scheduler_defaults[0].flags;
+ int found = 0;
+
+ // read the configuration
+ name = config_get(CONFIG_SECTION_GLOBAL, "process scheduling policy", name);
+ int i;
+ for(i = 0 ; scheduler_defaults[i].name ; i++) {
+ if(!strcmp(name, scheduler_defaults[i].name)) {
+ found = 1;
+ policy = scheduler_defaults[i].policy;
+ priority = scheduler_defaults[i].priority;
+ flags = scheduler_defaults[i].flags;
+
+ if(flags & SCHED_FLAG_KEEP_AS_IS)
+ goto report;
+
+ if(flags & SCHED_FLAG_PRIORITY_CONFIGURABLE)
+ priority = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process scheduling priority", priority);
+
+#ifdef HAVE_SCHED_GET_PRIORITY_MIN
+ errno = 0;
+ if(priority < sched_get_priority_min(policy)) {
+ error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy));
+ priority = sched_get_priority_min(policy);
+ }
+#endif
+#ifdef HAVE_SCHED_GET_PRIORITY_MAX
+ errno = 0;
+ if(priority > sched_get_priority_max(policy)) {
+ error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy));
+ priority = sched_get_priority_max(policy);
+ }
+#endif
+ break;
+ }
+ }
+
+ if(!found) {
+ error("Unknown scheduling policy '%s' - falling back to nice", name);
+ goto fallback;
+ }
+
+ const struct sched_param param = {
+ .sched_priority = priority
+ };
+
+ errno = 0;
+ i = sched_setscheduler(0, policy, &param);
+ if(i != 0) {
+ error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", name, policy, priority);
+ }
+ else {
+ info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority);
+ if(!(flags & SCHED_FLAG_USE_NICE))
+ goto report;
+ }
+ }
+
+fallback:
+ process_nice_level();
+
+report:
+ sched_getscheduler_report();
+}
+#else // !HAVE_SCHED_SETSCHEDULER
+static void sched_setscheduler_set(void) {
+ process_nice_level();
+}
+#endif // !HAVE_SCHED_SETSCHEDULER
+
+int become_daemon(int dont_fork, const char *user)
+{
+ if(!dont_fork) {
+ int i = fork();
+ if(i == -1) {
+ perror("cannot fork");
+ exit(1);
+ }
+ if(i != 0) {
+ exit(0); // the parent
+ }
+
+ // become session leader
+ if (setsid() < 0) {
+ perror("Cannot become session leader.");
+ exit(2);
+ }
+
+ // fork() again
+ i = fork();
+ if(i == -1) {
+ perror("cannot fork");
+ exit(1);
+ }
+ if(i != 0) {
+ exit(0); // the parent
+ }
+ }
+
+ // generate our pid file
+ int pidfd = -1;
+ if(pidfile[0]) {
+ pidfd = open(pidfile, O_WRONLY | O_CREAT, 0644);
+ if(pidfd >= 0) {
+ if(ftruncate(pidfd, 0) != 0)
+ error("Cannot truncate pidfile '%s'.", pidfile);
+
+ char b[100];
+ sprintf(b, "%d\n", getpid());
+ ssize_t i = write(pidfd, b, strlen(b));
+ if(i <= 0)
+ error("Cannot write pidfile '%s'.", pidfile);
+ }
+ else error("Failed to open pidfile '%s'.", pidfile);
+ }
+
+ // Set new file permissions
+ umask(0007);
+
+ // adjust my Out-Of-Memory score
+ oom_score_adj();
+
+ // never become a problem
+ sched_setscheduler_set();
+
+ // Set claiming directory based on user config directory with correct ownership
+ snprintfz(claimingdirectory, FILENAME_MAX, "%s/cloud.d", netdata_configured_varlib_dir);
+
+ if(user && *user) {
+ if(become_user(user, pidfd) != 0) {
+ error("Cannot become user '%s'. Continuing as we are.", user);
+ }
+ else debug(D_SYSTEM, "Successfully became user '%s'.", user);
+ }
+ else {
+ create_needed_dir(netdata_configured_cache_dir, getuid(), getgid());
+ create_needed_dir(netdata_configured_varlib_dir, getuid(), getgid());
+ create_needed_dir(netdata_configured_lock_dir, getuid(), getgid());
+ create_needed_dir(claimingdirectory, getuid(), getgid());
+
+ clean_directory(netdata_configured_lock_dir);
+ }
+
+ if(pidfd != -1)
+ close(pidfd);
+
+ return(0);
+}
diff --git a/daemon/daemon.h b/daemon/daemon.h
new file mode 100644
index 0000000..bec3df9
--- /dev/null
+++ b/daemon/daemon.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DAEMON_H
+#define NETDATA_DAEMON_H 1
+
+extern int become_user(const char *username, int pid_fd);
+
+extern int become_daemon(int dont_fork, const char *user);
+
+extern void netdata_cleanup_and_exit(int i);
+extern void send_statistics(const char *action, const char *action_result, const char *action_data);
+
+extern void get_netdata_execution_path(void);
+
+extern char pidfile[];
+extern char exepath[];
+
+#endif /* NETDATA_DAEMON_H */
diff --git a/daemon/get-kubernetes-labels.sh.in b/daemon/get-kubernetes-labels.sh.in
new file mode 100644
index 0000000..5aa89ab
--- /dev/null
+++ b/daemon/get-kubernetes-labels.sh.in
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Checks if netdata is running in a kubernetes pod and fetches that pod's labels
+
+if [ -z "${KUBERNETES_SERVICE_HOST}" ] || [ -z "${KUBERNETES_PORT_443_TCP_PORT}" ] || [ -z "${MY_POD_NAMESPACE}" ] || [ -z "${MY_POD_NAME}" ]; then
+ exit 0
+fi
+
+if ! command -v jq > /dev/null 2>&1; then
+ echo "jq command not available. Please install jq to get host labels for kubernetes pods."
+ exit 1
+fi
+
+TOKEN="$(< /var/run/secrets/kubernetes.io/serviceaccount/token)"
+HEADER="Authorization: Bearer $TOKEN"
+HOST="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
+
+URL="https://$HOST/api/v1/namespaces/$MY_POD_NAMESPACE/pods/$MY_POD_NAME"
+if ! POD_DATA=$(curl -sSk -H "$HEADER" "$URL" 2>&1); then
+ echo "error on curl '${URL}': ${POD_DATA}."
+ exit 1
+fi
+
+URL="https://$HOST/api/v1/namespaces/kube-system"
+if ! KUBE_SYSTEM_NS_DATA=$(curl -sSk -H "$HEADER" "$URL" 2>&1); then
+ echo "error on curl '${URL}': ${KUBE_SYSTEM_NS_DATA}."
+ exit 1
+fi
+
+if ! POD_LABELS=$(jq -r '.metadata.labels' <<< "$POD_DATA" | grep ':' | tr -d '," ' 2>&1); then
+ echo "error on 'jq' parse pod data: ${POD_LABELS}."
+ exit 1
+fi
+
+if ! KUBE_SYSTEM_NS_UID=$(jq -r '.metadata.uid' <<< "$KUBE_SYSTEM_NS_DATA" 2>&1); then
+ echo "error on 'jq' parse kube_system_ns: ${KUBE_SYSTEM_NS_UID}."
+ exit 1
+fi
+
+echo -e "$POD_LABELS\nk8s_cluster_id:$KUBE_SYSTEM_NS_UID"
+exit 0
diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c
new file mode 100644
index 0000000..7e78355
--- /dev/null
+++ b/daemon/global_statistics.c
@@ -0,0 +1,950 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+
+#define GLOBAL_STATS_RESET_WEB_USEC_MAX 0x01
+
+
+static struct global_statistics {
+ volatile uint16_t connected_clients;
+
+ volatile uint64_t web_requests;
+ volatile uint64_t web_usec;
+ volatile uint64_t web_usec_max;
+ volatile uint64_t bytes_received;
+ volatile uint64_t bytes_sent;
+ volatile uint64_t content_size;
+ volatile uint64_t compressed_content_size;
+
+ volatile uint64_t web_client_count;
+
+ volatile uint64_t rrdr_queries_made;
+ volatile uint64_t rrdr_db_points_read;
+ volatile uint64_t rrdr_result_points_generated;
+} global_statistics = {
+ .connected_clients = 0,
+ .web_requests = 0,
+ .web_usec = 0,
+ .bytes_received = 0,
+ .bytes_sent = 0,
+ .content_size = 0,
+ .compressed_content_size = 0,
+ .web_client_count = 1,
+
+ .rrdr_queries_made = 0,
+ .rrdr_db_points_read = 0,
+ .rrdr_result_points_generated = 0,
+};
+
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+#else
+netdata_mutex_t global_statistics_mutex = NETDATA_MUTEX_INITIALIZER;
+
+static inline void global_statistics_lock(void) {
+ netdata_mutex_lock(&global_statistics_mutex);
+}
+
+static inline void global_statistics_unlock(void) {
+ netdata_mutex_unlock(&global_statistics_mutex);
+}
+#endif
+
+
+void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated) {
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+ __atomic_fetch_add(&global_statistics.rrdr_queries_made, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.rrdr_db_points_read, db_points_read, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, result_points_generated, __ATOMIC_SEQ_CST);
+#else
+ #warning NOT using atomic operations - using locks for global statistics
+ if (web_server_is_multithreaded)
+ global_statistics_lock();
+
+ global_statistics.rrdr_queries_made++;
+ global_statistics.rrdr_db_points_read += db_points_read;
+ global_statistics.rrdr_result_points_generated += result_points_generated;
+
+ if (web_server_is_multithreaded)
+ global_statistics_unlock();
+#endif
+}
+
+void finished_web_request_statistics(uint64_t dt,
+ uint64_t bytes_received,
+ uint64_t bytes_sent,
+ uint64_t content_size,
+ uint64_t compressed_content_size) {
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+ uint64_t old_web_usec_max = global_statistics.web_usec_max;
+ while(dt > old_web_usec_max)
+ __atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+
+ __atomic_fetch_add(&global_statistics.web_requests, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.web_usec, dt, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.bytes_received, bytes_received, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_SEQ_CST);
+#else
+#warning NOT using atomic operations - using locks for global statistics
+ if (web_server_is_multithreaded)
+ global_statistics_lock();
+
+ if (dt > global_statistics.web_usec_max)
+ global_statistics.web_usec_max = dt;
+
+ global_statistics.web_requests++;
+ global_statistics.web_usec += dt;
+ global_statistics.bytes_received += bytes_received;
+ global_statistics.bytes_sent += bytes_sent;
+ global_statistics.content_size += content_size;
+ global_statistics.compressed_content_size += compressed_content_size;
+
+ if (web_server_is_multithreaded)
+ global_statistics_unlock();
+#endif
+}
+
+uint64_t web_client_connected(void) {
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+ __atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
+ uint64_t id = __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST);
+#else
+ if (web_server_is_multithreaded)
+ global_statistics_lock();
+
+ global_statistics.connected_clients++;
+ uint64_t id = global_statistics.web_client_count++;
+
+ if (web_server_is_multithreaded)
+ global_statistics_unlock();
+#endif
+
+ return id;
+}
+
+void web_client_disconnected(void) {
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+ __atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
+#else
+ if (web_server_is_multithreaded)
+ global_statistics_lock();
+
+ global_statistics.connected_clients--;
+
+ if (web_server_is_multithreaded)
+ global_statistics_unlock();
+#endif
+}
+
+
+static inline void global_statistics_copy(struct global_statistics *gs, uint8_t options) {
+#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
+ gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_SEQ_CST);
+ gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_SEQ_CST);
+ gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_SEQ_CST);
+ gs->web_usec_max = __atomic_fetch_add(&global_statistics.web_usec_max, 0, __ATOMIC_SEQ_CST);
+ gs->bytes_received = __atomic_fetch_add(&global_statistics.bytes_received, 0, __ATOMIC_SEQ_CST);
+ gs->bytes_sent = __atomic_fetch_add(&global_statistics.bytes_sent, 0, __ATOMIC_SEQ_CST);
+ gs->content_size = __atomic_fetch_add(&global_statistics.content_size, 0, __ATOMIC_SEQ_CST);
+ gs->compressed_content_size = __atomic_fetch_add(&global_statistics.compressed_content_size, 0, __ATOMIC_SEQ_CST);
+ gs->web_client_count = __atomic_fetch_add(&global_statistics.web_client_count, 0, __ATOMIC_SEQ_CST);
+
+ gs->rrdr_queries_made = __atomic_fetch_add(&global_statistics.rrdr_queries_made, 0, __ATOMIC_SEQ_CST);
+ gs->rrdr_db_points_read = __atomic_fetch_add(&global_statistics.rrdr_db_points_read, 0, __ATOMIC_SEQ_CST);
+ gs->rrdr_result_points_generated = __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, 0, __ATOMIC_SEQ_CST);
+
+ if(options & GLOBAL_STATS_RESET_WEB_USEC_MAX) {
+ uint64_t n = 0;
+ __atomic_compare_exchange(&global_statistics.web_usec_max, &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ }
+#else
+ global_statistics_lock();
+
+ memcpy(gs, (const void *)&global_statistics, sizeof(struct global_statistics));
+
+ if (options & GLOBAL_STATS_RESET_WEB_USEC_MAX)
+ global_statistics.web_usec_max = 0;
+
+ global_statistics_unlock();
+#endif
+}
+
+void global_statistics_charts(void) {
+ static unsigned long long old_web_requests = 0,
+ old_web_usec = 0,
+ old_content_size = 0,
+ old_compressed_content_size = 0;
+
+ static collected_number compression_ratio = -1,
+ average_response_time = -1;
+
+ struct global_statistics gs;
+ struct rusage me, thread;
+
+ global_statistics_copy(&gs, GLOBAL_STATS_RESET_WEB_USEC_MAX);
+ getrusage(RUSAGE_THREAD, &thread);
+ getrusage(RUSAGE_SELF, &me);
+
+ {
+ static RRDSET *st_cpu_thread = NULL;
+ static RRDDIM *rd_cpu_thread_user = NULL,
+ *rd_cpu_thread_system = NULL;
+
+#ifdef __FreeBSD__
+ if (unlikely(!st_cpu_thread)) {
+ st_cpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_freebsd_cpu"
+ , NULL
+ , "freebsd"
+ , NULL
+ , "NetData FreeBSD Plugin CPU usage"
+ , "milliseconds/s"
+ , "netdata"
+ , "stats"
+ , 132000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+#else
+ if (unlikely(!st_cpu_thread)) {
+ st_cpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_proc_cpu"
+ , NULL
+ , "proc"
+ , NULL
+ , "NetData Proc Plugin CPU usage"
+ , "milliseconds/s"
+ , "netdata"
+ , "stats"
+ , 132000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+#endif
+
+ rd_cpu_thread_user = rrddim_add(st_cpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_cpu_thread_system = rrddim_add(st_cpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_cpu_thread);
+
+ rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st_cpu_thread, rd_cpu_thread_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(st_cpu_thread);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_cpu = NULL;
+ static RRDDIM *rd_cpu_user = NULL,
+ *rd_cpu_system = NULL;
+
+ if (unlikely(!st_cpu)) {
+ st_cpu = rrdset_create_localhost(
+ "netdata"
+ , "server_cpu"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData CPU usage"
+ , "milliseconds/s"
+ , "netdata"
+ , "stats"
+ , 130000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_cpu_user = rrddim_add(st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_cpu_system = rrddim_add(st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_cpu);
+
+ rrddim_set_by_pointer(st_cpu, rd_cpu_user, me.ru_utime.tv_sec * 1000000ULL + me.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st_cpu, rd_cpu_system, me.ru_stime.tv_sec * 1000000ULL + me.ru_stime.tv_usec);
+ rrdset_done(st_cpu);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_clients = NULL;
+ static RRDDIM *rd_clients = NULL;
+
+ if (unlikely(!st_clients)) {
+ st_clients = rrdset_create_localhost(
+ "netdata"
+ , "clients"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData Web Clients"
+ , "connected clients"
+ , "netdata"
+ , "stats"
+ , 130200
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_clients = rrddim_add(st_clients, "clients", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_clients);
+
+ rrddim_set_by_pointer(st_clients, rd_clients, gs.connected_clients);
+ rrdset_done(st_clients);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_reqs = NULL;
+ static RRDDIM *rd_requests = NULL;
+
+ if (unlikely(!st_reqs)) {
+ st_reqs = rrdset_create_localhost(
+ "netdata"
+ , "requests"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData Web Requests"
+ , "requests/s"
+ , "netdata"
+ , "stats"
+ , 130300
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_requests = rrddim_add(st_reqs, "requests", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_reqs);
+
+ rrddim_set_by_pointer(st_reqs, rd_requests, (collected_number) gs.web_requests);
+ rrdset_done(st_reqs);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_bytes = NULL;
+ static RRDDIM *rd_in = NULL,
+ *rd_out = NULL;
+
+ if (unlikely(!st_bytes)) {
+ st_bytes = rrdset_create_localhost(
+ "netdata"
+ , "net"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData Network Traffic"
+ , "kilobits/s"
+ , "netdata"
+ , "stats"
+ , 130000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_bytes, "in", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_bytes, "out", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_bytes);
+
+ rrddim_set_by_pointer(st_bytes, rd_in, (collected_number) gs.bytes_received);
+ rrddim_set_by_pointer(st_bytes, rd_out, (collected_number) gs.bytes_sent);
+ rrdset_done(st_bytes);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_duration = NULL;
+ static RRDDIM *rd_average = NULL,
+ *rd_max = NULL;
+
+ if (unlikely(!st_duration)) {
+ st_duration = rrdset_create_localhost(
+ "netdata"
+ , "response_time"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData API Response Time"
+ , "milliseconds/request"
+ , "netdata"
+ , "stats"
+ , 130400
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_average = rrddim_add(st_duration, "average", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_max = rrddim_add(st_duration, "max", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_duration);
+
+ uint64_t gweb_usec = gs.web_usec;
+ uint64_t gweb_requests = gs.web_requests;
+
+ uint64_t web_usec = (gweb_usec >= old_web_usec) ? gweb_usec - old_web_usec : 0;
+ uint64_t web_requests = (gweb_requests >= old_web_requests) ? gweb_requests - old_web_requests : 0;
+
+ old_web_usec = gweb_usec;
+ old_web_requests = gweb_requests;
+
+ if (web_requests)
+ average_response_time = (collected_number) (web_usec / web_requests);
+
+ if (unlikely(average_response_time != -1))
+ rrddim_set_by_pointer(st_duration, rd_average, average_response_time);
+ else
+ rrddim_set_by_pointer(st_duration, rd_average, 0);
+
+ rrddim_set_by_pointer(st_duration, rd_max, ((gs.web_usec_max)?(collected_number)gs.web_usec_max:average_response_time));
+ rrdset_done(st_duration);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_compression = NULL;
+ static RRDDIM *rd_savings = NULL;
+
+ if (unlikely(!st_compression)) {
+ st_compression = rrdset_create_localhost(
+ "netdata"
+ , "compression_ratio"
+ , NULL
+ , "netdata"
+ , NULL
+ , "NetData API Responses Compression Savings Ratio"
+ , "percentage"
+ , "netdata"
+ , "stats"
+ , 130500
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_savings = rrddim_add(st_compression, "savings", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_compression);
+
+ // since we don't lock here to read the global statistics
+ // read the smaller value first
+ unsigned long long gcompressed_content_size = gs.compressed_content_size;
+ unsigned long long gcontent_size = gs.content_size;
+
+ unsigned long long compressed_content_size = gcompressed_content_size - old_compressed_content_size;
+ unsigned long long content_size = gcontent_size - old_content_size;
+
+ old_compressed_content_size = gcompressed_content_size;
+ old_content_size = gcontent_size;
+
+ if (content_size && content_size >= compressed_content_size)
+ compression_ratio = ((content_size - compressed_content_size) * 100 * 1000) / content_size;
+
+ if (compression_ratio != -1)
+ rrddim_set_by_pointer(st_compression, rd_savings, compression_ratio);
+
+ rrdset_done(st_compression);
+ }
+
+ // ----------------------------------------------------------------
+
+ if(gs.rrdr_queries_made) {
+ static RRDSET *st_rrdr_queries = NULL;
+ static RRDDIM *rd_queries = NULL;
+
+ if (unlikely(!st_rrdr_queries)) {
+ st_rrdr_queries = rrdset_create_localhost(
+ "netdata"
+ , "queries"
+ , NULL
+ , "queries"
+ , NULL
+ , "NetData API Queries"
+ , "queries/s"
+ , "netdata"
+ , "stats"
+ , 130500
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_queries = rrddim_add(st_rrdr_queries, "queries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_rrdr_queries);
+
+ rrddim_set_by_pointer(st_rrdr_queries, rd_queries, (collected_number)gs.rrdr_queries_made);
+
+ rrdset_done(st_rrdr_queries);
+ }
+
+ // ----------------------------------------------------------------
+
+ if(gs.rrdr_db_points_read || gs.rrdr_result_points_generated) {
+ static RRDSET *st_rrdr_points = NULL;
+ static RRDDIM *rd_points_read = NULL;
+ static RRDDIM *rd_points_generated = NULL;
+
+ if (unlikely(!st_rrdr_points)) {
+ st_rrdr_points = rrdset_create_localhost(
+ "netdata"
+ , "db_points"
+ , NULL
+ , "queries"
+ , NULL
+ , "NetData API Points"
+ , "points/s"
+ , "netdata"
+ , "stats"
+ , 130501
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_points_read = rrddim_add(st_rrdr_points, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_points_generated = rrddim_add(st_rrdr_points, "generated", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_rrdr_points);
+
+ rrddim_set_by_pointer(st_rrdr_points, rd_points_read, (collected_number)gs.rrdr_db_points_read);
+ rrddim_set_by_pointer(st_rrdr_points, rd_points_generated, (collected_number)gs.rrdr_result_points_generated);
+
+ rrdset_done(st_rrdr_points);
+ }
+
+ // ----------------------------------------------------------------
+
+#ifdef ENABLE_DBENGINE
+ RRDHOST *host;
+ unsigned long long stats_array[RRDENG_NR_STATS] = {0};
+ unsigned long long local_stats_array[RRDENG_NR_STATS];
+ unsigned dbengine_contexts = 0, counted_multihost_db = 0, i;
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && !rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) {
+ if (&multidb_ctx == host->rrdeng_ctx) {
+ if (counted_multihost_db)
+ continue; /* Only count multi-host DB once */
+ counted_multihost_db = 1;
+ }
+ ++dbengine_contexts;
+ /* get localhost's DB engine's statistics */
+ rrdeng_get_37_statistics(host->rrdeng_ctx, local_stats_array);
+ for (i = 0 ; i < RRDENG_NR_STATS ; ++i) {
+ /* aggregate statistics across hosts */
+ stats_array[i] += local_stats_array[i];
+ }
+ }
+ }
+ rrd_unlock();
+
+ if (dbengine_contexts) {
+ /* deduplicate global statistics by getting the ones from the last context */
+ stats_array[30] = local_stats_array[30];
+ stats_array[31] = local_stats_array[31];
+ stats_array[32] = local_stats_array[32];
+ stats_array[34] = local_stats_array[34];
+ stats_array[36] = local_stats_array[36];
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_compression = NULL;
+ static RRDDIM *rd_savings = NULL;
+
+ if (unlikely(!st_compression)) {
+ st_compression = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_compression_ratio"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine data extents' compression savings ratio"
+ , "percentage"
+ , "netdata"
+ , "stats"
+ , 130502
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_savings = rrddim_add(st_compression, "savings", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_compression);
+
+ unsigned long long ratio;
+ unsigned long long compressed_content_size = stats_array[12];
+ unsigned long long content_size = stats_array[11];
+
+ if (content_size) {
+ // allow negative savings
+ ratio = ((content_size - compressed_content_size) * 100 * 1000) / content_size;
+ } else {
+ ratio = 0;
+ }
+ rrddim_set_by_pointer(st_compression, rd_savings, ratio);
+
+ rrdset_done(st_compression);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_pg_cache_hit_ratio = NULL;
+ static RRDDIM *rd_hit_ratio = NULL;
+
+ if (unlikely(!st_pg_cache_hit_ratio)) {
+ st_pg_cache_hit_ratio = rrdset_create_localhost(
+ "netdata"
+ , "page_cache_hit_ratio"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine page cache hit ratio"
+ , "percentage"
+ , "netdata"
+ , "stats"
+ , 130503
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hit_ratio = rrddim_add(st_pg_cache_hit_ratio, "ratio", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_pg_cache_hit_ratio);
+
+ static unsigned long long old_hits = 0;
+ static unsigned long long old_misses = 0;
+ unsigned long long hits = stats_array[7];
+ unsigned long long misses = stats_array[8];
+ unsigned long long hits_delta;
+ unsigned long long misses_delta;
+ unsigned long long ratio;
+
+ hits_delta = hits - old_hits;
+ misses_delta = misses - old_misses;
+ old_hits = hits;
+ old_misses = misses;
+
+ if (hits_delta + misses_delta) {
+ ratio = (hits_delta * 100 * 1000) / (hits_delta + misses_delta);
+ } else {
+ ratio = 0;
+ }
+ rrddim_set_by_pointer(st_pg_cache_hit_ratio, rd_hit_ratio, ratio);
+
+ rrdset_done(st_pg_cache_hit_ratio);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_pg_cache_pages = NULL;
+ static RRDDIM *rd_descriptors = NULL;
+ static RRDDIM *rd_populated = NULL;
+ static RRDDIM *rd_dirty = NULL;
+ static RRDDIM *rd_backfills = NULL;
+ static RRDDIM *rd_evictions = NULL;
+ static RRDDIM *rd_used_by_collectors = NULL;
+
+ if (unlikely(!st_pg_cache_pages)) {
+ st_pg_cache_pages = rrdset_create_localhost(
+ "netdata"
+ , "page_cache_stats"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData dbengine page cache statistics"
+ , "pages"
+ , "netdata"
+ , "stats"
+ , 130504
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_descriptors = rrddim_add(st_pg_cache_pages, "descriptors", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_populated = rrddim_add(st_pg_cache_pages, "populated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_dirty = rrddim_add(st_pg_cache_pages, "dirty", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_backfills = rrddim_add(st_pg_cache_pages, "backfills", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_evictions = rrddim_add(st_pg_cache_pages, "evictions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_used_by_collectors = rrddim_add(st_pg_cache_pages, "used_by_collectors", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_pg_cache_pages);
+
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_descriptors, (collected_number)stats_array[27]);
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_populated, (collected_number)stats_array[3]);
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_dirty, (collected_number)stats_array[0] + stats_array[4]);
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_backfills, (collected_number)stats_array[9]);
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_evictions, (collected_number)stats_array[10]);
+ rrddim_set_by_pointer(st_pg_cache_pages, rd_used_by_collectors, (collected_number)stats_array[0]);
+ rrdset_done(st_pg_cache_pages);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_long_term_pages = NULL;
+ static RRDDIM *rd_total = NULL;
+ static RRDDIM *rd_insertions = NULL;
+ static RRDDIM *rd_deletions = NULL;
+ static RRDDIM *rd_flushing_pressure_deletions = NULL;
+
+ if (unlikely(!st_long_term_pages)) {
+ st_long_term_pages = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_long_term_page_stats"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData dbengine long-term page statistics"
+ , "pages"
+ , "netdata"
+ , "stats"
+ , 130505
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_total = rrddim_add(st_long_term_pages, "total", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_insertions = rrddim_add(st_long_term_pages, "insertions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deletions = rrddim_add(st_long_term_pages, "deletions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_flushing_pressure_deletions = rrddim_add(st_long_term_pages, "flushing_pressure_deletions", NULL, -1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_long_term_pages);
+
+ rrddim_set_by_pointer(st_long_term_pages, rd_total, (collected_number)stats_array[2]);
+ rrddim_set_by_pointer(st_long_term_pages, rd_insertions, (collected_number)stats_array[5]);
+ rrddim_set_by_pointer(st_long_term_pages, rd_deletions, (collected_number)stats_array[6]);
+ rrddim_set_by_pointer(st_long_term_pages, rd_flushing_pressure_deletions,
+ (collected_number)stats_array[36]);
+ rrdset_done(st_long_term_pages);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_io_stats = NULL;
+ static RRDDIM *rd_reads = NULL;
+ static RRDDIM *rd_writes = NULL;
+
+ if (unlikely(!st_io_stats)) {
+ st_io_stats = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_io_throughput"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine I/O throughput"
+ , "MiB/s"
+ , "netdata"
+ , "stats"
+ , 130506
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_reads = rrddim_add(st_io_stats, "reads", NULL, 1, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_writes = rrddim_add(st_io_stats, "writes", NULL, -1, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_io_stats);
+
+ rrddim_set_by_pointer(st_io_stats, rd_reads, (collected_number)stats_array[17]);
+ rrddim_set_by_pointer(st_io_stats, rd_writes, (collected_number)stats_array[15]);
+ rrdset_done(st_io_stats);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_io_stats = NULL;
+ static RRDDIM *rd_reads = NULL;
+ static RRDDIM *rd_writes = NULL;
+
+ if (unlikely(!st_io_stats)) {
+ st_io_stats = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_io_operations"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine I/O operations"
+ , "operations/s"
+ , "netdata"
+ , "stats"
+ , 130507
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_reads = rrddim_add(st_io_stats, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_writes = rrddim_add(st_io_stats, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_io_stats);
+
+ rrddim_set_by_pointer(st_io_stats, rd_reads, (collected_number)stats_array[18]);
+ rrddim_set_by_pointer(st_io_stats, rd_writes, (collected_number)stats_array[16]);
+ rrdset_done(st_io_stats);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_errors = NULL;
+ static RRDDIM *rd_fs_errors = NULL;
+ static RRDDIM *rd_io_errors = NULL;
+ static RRDDIM *pg_cache_over_half_dirty_events = NULL;
+
+ if (unlikely(!st_errors)) {
+ st_errors = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_global_errors"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine errors"
+ , "errors/s"
+ , "netdata"
+ , "stats"
+ , 130508
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_io_errors = rrddim_add(st_errors, "io_errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fs_errors = rrddim_add(st_errors, "fs_errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ pg_cache_over_half_dirty_events = rrddim_add(st_errors, "pg_cache_over_half_dirty_events", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_errors);
+
+ rrddim_set_by_pointer(st_errors, rd_io_errors, (collected_number)stats_array[30]);
+ rrddim_set_by_pointer(st_errors, rd_fs_errors, (collected_number)stats_array[31]);
+ rrddim_set_by_pointer(st_errors, pg_cache_over_half_dirty_events, (collected_number)stats_array[34]);
+ rrdset_done(st_errors);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_fd = NULL;
+ static RRDDIM *rd_fd_current = NULL;
+ static RRDDIM *rd_fd_max = NULL;
+
+ if (unlikely(!st_fd)) {
+ st_fd = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_global_file_descriptors"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine File Descriptors"
+ , "descriptors"
+ , "netdata"
+ , "stats"
+ , 130509
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_fd_current = rrddim_add(st_fd, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_fd_max = rrddim_add(st_fd, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_fd);
+
+ rrddim_set_by_pointer(st_fd, rd_fd_current, (collected_number)stats_array[32]);
+ /* Careful here, modify this accordingly if the File-Descriptor budget ever changes */
+ rrddim_set_by_pointer(st_fd, rd_fd_max, (collected_number)rlimit_nofile.rlim_cur / 4);
+ rrdset_done(st_fd);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_ram_usage = NULL;
+ static RRDDIM *rd_cached = NULL;
+ static RRDDIM *rd_pinned = NULL;
+ static RRDDIM *rd_metadata = NULL;
+
+ collected_number cached_pages, pinned_pages, API_producers, populated_pages, metadata, pages_on_disk,
+ page_cache_descriptors;
+
+ if (unlikely(!st_ram_usage)) {
+ st_ram_usage = rrdset_create_localhost(
+ "netdata"
+ , "dbengine_ram"
+ , NULL
+ , "dbengine"
+ , NULL
+ , "NetData DB engine RAM usage"
+ , "MiB"
+ , "netdata"
+ , "stats"
+ , 130510
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_cached = rrddim_add(st_ram_usage, "cache", NULL, 1, 256, RRD_ALGORITHM_ABSOLUTE);
+ rd_pinned = rrddim_add(st_ram_usage, "collectors", NULL, 1, 256, RRD_ALGORITHM_ABSOLUTE);
+ rd_metadata = rrddim_add(st_ram_usage, "metadata", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_ram_usage);
+
+ API_producers = (collected_number)stats_array[0];
+ pages_on_disk = (collected_number)stats_array[2];
+ populated_pages = (collected_number)stats_array[3];
+ page_cache_descriptors = (collected_number)stats_array[27];
+
+ if (API_producers * 2 > populated_pages) {
+ pinned_pages = API_producers;
+ } else{
+ pinned_pages = API_producers * 2;
+ }
+ cached_pages = populated_pages - pinned_pages;
+
+ metadata = page_cache_descriptors * sizeof(struct page_cache_descr);
+ metadata += pages_on_disk * sizeof(struct rrdeng_page_descr);
+ /* This is an empirical estimation for Judy array indexing and extent structures */
+ metadata += pages_on_disk * 58;
+
+ rrddim_set_by_pointer(st_ram_usage, rd_cached, cached_pages);
+ rrddim_set_by_pointer(st_ram_usage, rd_pinned, pinned_pages);
+ rrddim_set_by_pointer(st_ram_usage, rd_metadata, metadata);
+ rrdset_done(st_ram_usage);
+ }
+ }
+#endif
+
+}
diff --git a/daemon/global_statistics.h b/daemon/global_statistics.h
new file mode 100644
index 0000000..9dd7db5
--- /dev/null
+++ b/daemon/global_statistics.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_GLOBAL_STATISTICS_H
+#define NETDATA_GLOBAL_STATISTICS_H 1
+
+#include "common.h"
+
+// ----------------------------------------------------------------------------
+// global statistics
+
+extern void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated);
+
+extern void finished_web_request_statistics(uint64_t dt,
+ uint64_t bytes_received,
+ uint64_t bytes_sent,
+ uint64_t content_size,
+ uint64_t compressed_content_size);
+
+extern uint64_t web_client_connected(void);
+extern void web_client_disconnected(void);
+extern void global_statistics_charts(void);
+
+#endif /* NETDATA_GLOBAL_STATISTICS_H */
diff --git a/daemon/main.c b/daemon/main.c
new file mode 100644
index 0000000..7c002ac
--- /dev/null
+++ b/daemon/main.c
@@ -0,0 +1,1532 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+#include "buildinfo.h"
+
+int netdata_zero_metrics_enabled;
+int netdata_anonymous_statistics_enabled;
+
+struct config netdata_config = {
+ .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {
+ .avl_tree = {
+ .root = NULL,
+ .compar = appconfig_section_compare
+ },
+ .rwlock = AVL_LOCK_INITIALIZER
+ }
+};
+
+void netdata_cleanup_and_exit(int ret) {
+ // enabling this, is wrong
+ // because the threads will be cancelled while cleaning up
+ // netdata_exit = 1;
+
+ error_log_limit_unlimited();
+ info("EXIT: netdata prepares to exit with code %d...", ret);
+
+ send_statistics("EXIT", ret?"ERROR":"OK","-");
+
+ // cleanup/save the database and exit
+ info("EXIT: cleaning up the database...");
+ rrdhost_cleanup_all();
+
+ if(!ret) {
+ // exit cleanly
+
+ // stop everything
+ info("EXIT: stopping static threads...");
+ cancel_main_threads();
+
+ // free the database
+ info("EXIT: freeing database memory...");
+#ifdef ENABLE_DBENGINE
+ rrdeng_prepare_exit(&multidb_ctx);
+#endif
+ rrdhost_free_all();
+#ifdef ENABLE_DBENGINE
+ rrdeng_exit(&multidb_ctx);
+#endif
+ }
+
+ // unlink the pid
+ if(pidfile[0]) {
+ info("EXIT: removing netdata PID file '%s'...", pidfile);
+ if(unlink(pidfile) != 0)
+ error("EXIT: cannot unlink pidfile '%s'.", pidfile);
+ }
+
+#ifdef ENABLE_HTTPS
+ security_clean_openssl();
+#endif
+ info("EXIT: all done - netdata is now exiting - bye bye...");
+ exit(ret);
+}
+
+struct netdata_static_thread static_threads[] = {
+
+ NETDATA_PLUGIN_HOOK_CHECKS
+ NETDATA_PLUGIN_HOOK_FREEBSD
+ NETDATA_PLUGIN_HOOK_MACOS
+
+ // linux internal plugins
+ NETDATA_PLUGIN_HOOK_LINUX_PROC
+ NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE
+ NETDATA_PLUGIN_HOOK_LINUX_CGROUPS
+ NETDATA_PLUGIN_HOOK_LINUX_TC
+
+ NETDATA_PLUGIN_HOOK_IDLEJITTER
+ NETDATA_PLUGIN_HOOK_STATSD
+
+#ifdef ENABLE_ACLK
+ NETDATA_ACLK_HOOK
+#endif
+
+ // common plugins for all systems
+ {"BACKENDS", NULL, NULL, 1, NULL, NULL, backends_main},
+ {"EXPORTING", NULL, NULL, 1, NULL, NULL, exporting_main},
+ {"WEB_SERVER[static1]", NULL, NULL, 0, NULL, NULL, socket_listen_main_static_threaded},
+ {"STREAM", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread},
+
+ NETDATA_PLUGIN_HOOK_PLUGINSD
+ NETDATA_PLUGIN_HOOK_HEALTH
+
+ {NULL, NULL, NULL, 0, NULL, NULL, NULL}
+};
+
+void web_server_threading_selection(void) {
+ web_server_mode = web_server_mode_id(config_get(CONFIG_SECTION_WEB, "mode", web_server_mode_name(web_server_mode)));
+
+ int static_threaded = (web_server_mode == WEB_SERVER_MODE_STATIC_THREADED);
+
+ int i;
+ for (i = 0; static_threads[i].name; i++) {
+ if (static_threads[i].start_routine == socket_listen_main_static_threaded)
+ static_threads[i].enabled = static_threaded;
+ }
+}
+
+int make_dns_decision(const char *section_name, const char *config_name, const char *default_value, SIMPLE_PATTERN *p)
+{
+ char *value = config_get(section_name,config_name,default_value);
+ if(!strcmp("yes",value))
+ return 1;
+ if(!strcmp("no",value))
+ return 0;
+ if(strcmp("heuristic",value))
+ error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'",
+ value, section_name, config_name);
+
+ return simple_pattern_is_potential_name(p);
+}
+
+void web_server_config_options(void)
+{
+ web_client_timeout =
+ (int)config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout);
+ web_client_first_request_timeout =
+ (int)config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout);
+ web_client_streaming_rate_t =
+ config_get_number(CONFIG_SECTION_WEB, "accept a streaming request every seconds", web_client_streaming_rate_t);
+
+ respect_web_browser_do_not_track_policy =
+ config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy);
+ web_x_frame_options = config_get(CONFIG_SECTION_WEB, "x-frame-options response header", "");
+ if(!*web_x_frame_options)
+ web_x_frame_options = NULL;
+
+ web_allow_connections_from =
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow connections from", "localhost *"),
+ NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_connections_dns =
+ make_dns_decision(CONFIG_SECTION_WEB, "allow connections by dns", "heuristic", web_allow_connections_from);
+ web_allow_dashboard_from =
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow dashboard from", "localhost *"),
+ NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_dashboard_dns =
+ make_dns_decision(CONFIG_SECTION_WEB, "allow dashboard by dns", "heuristic", web_allow_dashboard_from);
+ web_allow_badges_from =
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow badges from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_badges_dns =
+ make_dns_decision(CONFIG_SECTION_WEB, "allow badges by dns", "heuristic", web_allow_badges_from);
+ web_allow_registry_from =
+ simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_registry_dns = make_dns_decision(CONFIG_SECTION_REGISTRY, "allow by dns", "heuristic",
+ web_allow_registry_from);
+ web_allow_streaming_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow streaming from", "*"),
+ NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_streaming_dns = make_dns_decision(CONFIG_SECTION_WEB, "allow streaming by dns", "heuristic",
+ web_allow_streaming_from);
+ // Note the default is not heuristic, the wildcards could match DNS but the intent is ip-addresses.
+ web_allow_netdataconf_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow netdata.conf from",
+ "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.*"
+ " 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.*"
+ " 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.*"
+ " 172.31.* UNKNOWN"), NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_netdataconf_dns =
+ make_dns_decision(CONFIG_SECTION_WEB, "allow netdata.conf by dns", "no", web_allow_netdataconf_from);
+ web_allow_mgmt_from =
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow management from", "localhost"),
+ NULL, SIMPLE_PATTERN_EXACT);
+ web_allow_mgmt_dns =
+ make_dns_decision(CONFIG_SECTION_WEB, "allow management by dns","heuristic",web_allow_mgmt_from);
+
+
+#ifdef NETDATA_WITH_ZLIB
+ web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip);
+
+ char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default");
+ if(!strcmp(s, "default"))
+ web_gzip_strategy = Z_DEFAULT_STRATEGY;
+ else if(!strcmp(s, "filtered"))
+ web_gzip_strategy = Z_FILTERED;
+ else if(!strcmp(s, "huffman only"))
+ web_gzip_strategy = Z_HUFFMAN_ONLY;
+ else if(!strcmp(s, "rle"))
+ web_gzip_strategy = Z_RLE;
+ else if(!strcmp(s, "fixed"))
+ web_gzip_strategy = Z_FIXED;
+ else {
+ error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s);
+ web_gzip_strategy = Z_DEFAULT_STRATEGY;
+ }
+
+ web_gzip_level = (int)config_get_number(CONFIG_SECTION_WEB, "gzip compression level", 3);
+ if(web_gzip_level < 1) {
+ error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level);
+ web_gzip_level = 1;
+ }
+ else if(web_gzip_level > 9) {
+ error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level);
+ web_gzip_level = 9;
+ }
+#endif /* NETDATA_WITH_ZLIB */
+}
+
+
+// killpid kills pid with SIGTERM.
+int killpid(pid_t pid) {
+ int ret;
+ debug(D_EXIT, "Request to kill pid %d", pid);
+
+ errno = 0;
+ ret = kill(pid, SIGTERM);
+ if (ret == -1) {
+ switch(errno) {
+ case ESRCH:
+ // We wanted the process to exit so just let the caller handle.
+ return ret;
+
+ case EPERM:
+ error("Cannot kill pid %d, but I do not have enough permissions.", pid);
+ break;
+
+ default:
+ error("Cannot kill pid %d, but I received an error.", pid);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void cancel_main_threads() {
+ error_log_limit_unlimited();
+
+ int i, found = 0;
+ usec_t max = 5 * USEC_PER_SEC, step = 100000;
+ for (i = 0; static_threads[i].name != NULL ; i++) {
+ if(static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) {
+ info("EXIT: Stopping main thread: %s", static_threads[i].name);
+ netdata_thread_cancel(*static_threads[i].thread);
+ found++;
+ }
+ }
+
+ netdata_exit = 1;
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+ for (i = 0; static_threads[i].name != NULL ; i++) {
+ if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED)
+ found++;
+ }
+ }
+
+ if(found) {
+ for (i = 0; static_threads[i].name != NULL ; i++) {
+ if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED)
+ error("Main thread %s takes too long to exit. Giving up...", static_threads[i].name);
+ }
+ }
+ else
+ info("All threads finished.");
+}
+
+struct option_def option_definitions[] = {
+ // opt description arg name default value
+ { 'c', "Configuration file to load.", "filename", CONFIG_DIR "/" CONFIG_FILENAME},
+ { 'D', "Do not fork. Run in the foreground.", NULL, "run in the background"},
+ { 'd', "Fork. Run in the background.", NULL, "run in the background"},
+ { 'h', "Display this help message.", NULL, NULL},
+ { 'P', "File to save a pid while running.", "filename", "do not save pid to a file"},
+ { 'i', "The IP address to listen to.", "IP", "all IP addresses IPv4 and IPv6"},
+ { 'p', "API/Web port to use.", "port", "19999"},
+ { 's', "Prefix for /proc and /sys (for containers).", "path", "no prefix"},
+ { 't', "The internal clock of netdata.", "seconds", "1"},
+ { 'u', "Run as user.", "username", "netdata"},
+ { 'v', "Print netdata version and exit.", NULL, NULL},
+ { 'V', "Print netdata version and exit.", NULL, NULL},
+ { 'W', "See Advanced options below.", "options", NULL},
+};
+
+int help(int exitcode) {
+ FILE *stream;
+ if(exitcode == 0)
+ stream = stdout;
+ else
+ stream = stderr;
+
+ int num_opts = sizeof(option_definitions) / sizeof(struct option_def);
+ int i;
+ int max_len_arg = 0;
+
+ // Compute maximum argument length
+ for( i = 0; i < num_opts; i++ ) {
+ if(option_definitions[i].arg_name) {
+ int len_arg = (int)strlen(option_definitions[i].arg_name);
+ if(len_arg > max_len_arg) max_len_arg = len_arg;
+ }
+ }
+
+ if(max_len_arg > 30) max_len_arg = 30;
+ if(max_len_arg < 20) max_len_arg = 20;
+
+ fprintf(stream, "%s", "\n"
+ " ^\n"
+ " |.-. .-. .-. .-. . netdata \n"
+ " | '-' '-' '-' '-' real-time performance monitoring, done right! \n"
+ " +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->\n"
+ "\n"
+ " Copyright (C) 2016-2020, Netdata, Inc. <info@netdata.cloud>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " Home Page : https://netdata.cloud\n"
+ " Source Code: https://github.com/netdata/netdata\n"
+ " Docs : https://learn.netdata.cloud\n"
+ " Support : https://github.com/netdata/netdata/issues\n"
+ " License : https://github.com/netdata/netdata/blob/master/LICENSE.md\n"
+ "\n"
+ " Twitter : https://twitter.com/linuxnetdata\n"
+ " Facebook : https://www.facebook.com/linuxnetdata/\n"
+ "\n"
+ "\n"
+ );
+
+ fprintf(stream, " SYNOPSIS: netdata [options]\n");
+ fprintf(stream, "\n");
+ fprintf(stream, " Options:\n\n");
+
+ // Output options description.
+ for( i = 0; i < num_opts; i++ ) {
+ fprintf(stream, " -%c %-*s %s", option_definitions[i].val, max_len_arg, option_definitions[i].arg_name ? option_definitions[i].arg_name : "", option_definitions[i].description);
+ if(option_definitions[i].default_value) {
+ fprintf(stream, "\n %c %-*s Default: %s\n", ' ', max_len_arg, "", option_definitions[i].default_value);
+ } else {
+ fprintf(stream, "\n");
+ }
+ fprintf(stream, "\n");
+ }
+
+ fprintf(stream, "\n Advanced options:\n\n"
+ " -W stacksize=N Set the stacksize (in bytes).\n\n"
+ " -W debug_flags=N Set runtime tracing to debug.log.\n\n"
+ " -W unittest Run internal unittests and exit.\n\n"
+#ifdef ENABLE_DBENGINE
+ " -W createdataset=N Create a DB engine dataset of N seconds and exit.\n\n"
+ " -W stresstest=A,B,C,D,E,F\n"
+ " Run a DB engine stress test for A seconds,\n"
+ " with B writers and C readers, with a ramp up\n"
+ " time of D seconds for writers, a page cache\n"
+ " size of E MiB, an optional disk space limit"
+ " of F MiB and exit.\n\n"
+#endif
+ " -W set section option value\n"
+ " set netdata.conf option from the command line.\n\n"
+ " -W simple-pattern pattern string\n"
+ " Check if string matches pattern and exit.\n\n"
+ " -W \"claim -token=TOKEN -rooms=ROOM1,ROOM2\"\n"
+ " Claim the agent to the workspace rooms pointed to by TOKEN and ROOM*.\n\n"
+ );
+
+ fprintf(stream, "\n Signals netdata handles:\n\n"
+ " - HUP Close and reopen log files.\n"
+ " - USR1 Save internal DB to disk.\n"
+ " - USR2 Reload health configuration.\n"
+ "\n"
+ );
+
+ fflush(stream);
+ return exitcode;
+}
+
+// TODO: Remove this function with the nix major release.
+void remove_option(int opt_index, int *argc, char **argv) {
+ int i;
+
+ // remove the options.
+ do {
+ *argc = *argc - 1;
+ for(i = opt_index; i < *argc; i++) {
+ argv[i] = argv[i+1];
+ }
+ i = opt_index;
+ } while(argv[i][0] != '-' && opt_index >= *argc);
+}
+
+static const char *verify_required_directory(const char *dir) {
+ if(chdir(dir) == -1)
+ fatal("Cannot cd to directory '%s'", dir);
+
+ DIR *d = opendir(dir);
+ if(!d)
+ fatal("Cannot examine the contents of directory '%s'", dir);
+ closedir(d);
+
+ return dir;
+}
+
+#ifdef ENABLE_HTTPS
+static void security_init(){
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/ssl/key.pem",netdata_configured_user_config_dir);
+ security_key = config_get(CONFIG_SECTION_WEB, "ssl key", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/ssl/cert.pem",netdata_configured_user_config_dir);
+ security_cert = config_get(CONFIG_SECTION_WEB, "ssl certificate", filename);
+
+ tls_version = config_get(CONFIG_SECTION_WEB, "tls version", "1.3");
+ tls_ciphers = config_get(CONFIG_SECTION_WEB, "tls ciphers", "none");
+
+ security_openssl_library();
+}
+#endif
+
+static void log_init(void) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir);
+ stdout_filename = config_get(CONFIG_SECTION_GLOBAL, "debug log", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/error.log", netdata_configured_log_dir);
+ stderr_filename = config_get(CONFIG_SECTION_GLOBAL, "error log", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir);
+ stdaccess_filename = config_get(CONFIG_SECTION_GLOBAL, "access log", filename);
+
+ char deffacility[8];
+ snprintfz(deffacility,7,"%s","daemon");
+ facility_log = config_get(CONFIG_SECTION_GLOBAL, "facility log", deffacility);
+
+ error_log_throttle_period = config_get_number(CONFIG_SECTION_GLOBAL, "errors flood protection period", error_log_throttle_period);
+ error_log_errors_per_period = (unsigned long)config_get_number(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", (long long int)error_log_errors_per_period);
+ error_log_errors_per_period_backup = error_log_errors_per_period;
+
+ setenv("NETDATA_ERRORS_THROTTLE_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors flood protection period" , ""), 1);
+ setenv("NETDATA_ERRORS_PER_PERIOD", config_get(CONFIG_SECTION_GLOBAL, "errors to trigger flood protection", ""), 1);
+}
+
+char *initialize_lock_directory_path(char *prefix)
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/lock", prefix);
+
+ return config_get(CONFIG_SECTION_GLOBAL, "lock directory", filename);
+}
+
+static void backwards_compatible_config() {
+ // move [global] options to the [web] section
+ config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog",
+ CONFIG_SECTION_WEB, "listen backlog");
+
+ config_move(CONFIG_SECTION_GLOBAL, "bind socket to IP",
+ CONFIG_SECTION_WEB, "bind to");
+
+ config_move(CONFIG_SECTION_GLOBAL, "bind to",
+ CONFIG_SECTION_WEB, "bind to");
+
+ config_move(CONFIG_SECTION_GLOBAL, "port",
+ CONFIG_SECTION_WEB, "default port");
+
+ config_move(CONFIG_SECTION_GLOBAL, "default port",
+ CONFIG_SECTION_WEB, "default port");
+
+ config_move(CONFIG_SECTION_GLOBAL, "disconnect idle web clients after seconds",
+ CONFIG_SECTION_WEB, "disconnect idle clients after seconds");
+
+ config_move(CONFIG_SECTION_GLOBAL, "respect web browser do not track policy",
+ CONFIG_SECTION_WEB, "respect do not track policy");
+
+ config_move(CONFIG_SECTION_GLOBAL, "web x-frame-options header",
+ CONFIG_SECTION_WEB, "x-frame-options response header");
+
+ config_move(CONFIG_SECTION_GLOBAL, "enable web responses gzip compression",
+ CONFIG_SECTION_WEB, "enable gzip compression");
+
+ config_move(CONFIG_SECTION_GLOBAL, "web compression strategy",
+ CONFIG_SECTION_WEB, "gzip compression strategy");
+
+ config_move(CONFIG_SECTION_GLOBAL, "web compression level",
+ CONFIG_SECTION_WEB, "gzip compression level");
+
+ config_move(CONFIG_SECTION_GLOBAL, "web files owner",
+ CONFIG_SECTION_WEB, "web files owner");
+
+ config_move(CONFIG_SECTION_GLOBAL, "web files group",
+ CONFIG_SECTION_WEB, "web files group");
+
+ config_move(CONFIG_SECTION_BACKEND, "opentsdb host tags",
+ CONFIG_SECTION_BACKEND, "host tags");
+}
+
+static void get_netdata_configured_variables() {
+ backwards_compatible_config();
+
+ // ------------------------------------------------------------------------
+ // get the hostname
+
+ char buf[HOSTNAME_MAX + 1];
+ if(gethostname(buf, HOSTNAME_MAX) == -1){
+ error("Cannot get machine hostname.");
+ }
+
+ netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf);
+ debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
+
+ // ------------------------------------------------------------------------
+ // get default database size
+
+ default_rrd_history_entries = (int) config_get_number(CONFIG_SECTION_GLOBAL, "history", align_entries_to_pagesize(default_rrd_memory_mode, RRD_DEFAULT_HISTORY_ENTRIES));
+
+ long h = align_entries_to_pagesize(default_rrd_memory_mode, default_rrd_history_entries);
+ if(h != default_rrd_history_entries) {
+ config_set_number(CONFIG_SECTION_GLOBAL, "history", h);
+ default_rrd_history_entries = (int)h;
+ }
+
+ if(default_rrd_history_entries < 5 || default_rrd_history_entries > RRD_HISTORY_ENTRIES_MAX) {
+ error("Invalid history entries %d given. Defaulting to %d.", default_rrd_history_entries, RRD_DEFAULT_HISTORY_ENTRIES);
+ default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES;
+ }
+
+ // ------------------------------------------------------------------------
+ // get default database update frequency
+
+ default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_GLOBAL, "update every", UPDATE_EVERY);
+ if(default_rrd_update_every < 1 || default_rrd_update_every > 600) {
+ error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY_MAX);
+ default_rrd_update_every = UPDATE_EVERY;
+ }
+
+ // ------------------------------------------------------------------------
+ // get system paths
+
+ netdata_configured_user_config_dir = config_get(CONFIG_SECTION_GLOBAL, "config directory", netdata_configured_user_config_dir);
+ netdata_configured_stock_config_dir = config_get(CONFIG_SECTION_GLOBAL, "stock config directory", netdata_configured_stock_config_dir);
+ netdata_configured_log_dir = config_get(CONFIG_SECTION_GLOBAL, "log directory", netdata_configured_log_dir);
+ netdata_configured_web_dir = config_get(CONFIG_SECTION_GLOBAL, "web files directory", netdata_configured_web_dir);
+ netdata_configured_cache_dir = config_get(CONFIG_SECTION_GLOBAL, "cache directory", netdata_configured_cache_dir);
+ netdata_configured_varlib_dir = config_get(CONFIG_SECTION_GLOBAL, "lib directory", netdata_configured_varlib_dir);
+ char *env_home=getenv("HOME");
+ netdata_configured_home_dir = config_get(CONFIG_SECTION_GLOBAL, "home directory", env_home?env_home:netdata_configured_home_dir);
+
+ netdata_configured_lock_dir = initialize_lock_directory_path(netdata_configured_varlib_dir);
+
+ {
+ pluginsd_initialize_plugin_directories();
+ netdata_configured_primary_plugins_dir = plugin_directories[PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH];
+ }
+
+ // ------------------------------------------------------------------------
+ // get default memory mode for the database
+
+ default_rrd_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_GLOBAL, "memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
+
+#ifdef ENABLE_DBENGINE
+ // ------------------------------------------------------------------------
+ // get default Database Engine page cache size in MiB
+
+ default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "page cache size", default_rrdeng_page_cache_mb);
+ if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) {
+ error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
+ default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
+ }
+
+ // ------------------------------------------------------------------------
+ // get default Database Engine disk space quota in MiB
+
+ default_rrdeng_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "dbengine disk space", default_rrdeng_disk_quota_mb);
+ if(default_rrdeng_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
+ error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB);
+ default_rrdeng_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB;
+ }
+
+ default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", compute_multidb_diskspace());
+ if(default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
+ error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb);
+ default_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb;
+ }
+
+#endif
+ // ------------------------------------------------------------------------
+
+ netdata_configured_host_prefix = config_get(CONFIG_SECTION_GLOBAL, "host access prefix", "");
+ verify_netdata_host_prefix();
+
+ // --------------------------------------------------------------------
+ // get KSM settings
+
+#ifdef MADV_MERGEABLE
+ enable_ksm = config_get_boolean(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", enable_ksm);
+#endif
+
+ // --------------------------------------------------------------------
+ // get various system parameters
+
+ get_system_HZ();
+ get_system_cpus();
+ get_system_pid_max();
+
+
+}
+
+static void get_system_timezone(void) {
+ // avoid flood calls to stat(/etc/localtime)
+ // http://stackoverflow.com/questions/4554271/how-to-avoid-excessive-stat-etc-localtime-calls-in-strftime-on-linux
+ const char *tz = getenv("TZ");
+ if(!tz || !*tz)
+ setenv("TZ", config_get(CONFIG_SECTION_GLOBAL, "TZ environment variable", ":/etc/localtime"), 0);
+
+ char buffer[FILENAME_MAX + 1] = "";
+ const char *timezone = NULL;
+ ssize_t ret;
+
+ // use the TZ variable
+ if(tz && *tz && *tz != ':') {
+ timezone = tz;
+ // info("TIMEZONE: using TZ variable '%s'", timezone);
+ }
+
+ // use the contents of /etc/timezone
+ if(!timezone && !read_file("/etc/timezone", buffer, FILENAME_MAX)) {
+ timezone = buffer;
+ // info("TIMEZONE: using the contents of /etc/timezone: '%s'", timezone);
+ }
+
+ // read the link /etc/localtime
+ if(!timezone) {
+ ret = readlink("/etc/localtime", buffer, FILENAME_MAX);
+
+ if(ret > 0) {
+ buffer[ret] = '\0';
+
+ char *cmp = "/usr/share/zoneinfo/";
+ size_t cmp_len = strlen(cmp);
+
+ char *s = strstr(buffer, cmp);
+ if (s && s[cmp_len]) {
+ timezone = &s[cmp_len];
+ // info("TIMEZONE: using the link of /etc/localtime: '%s'", timezone);
+ }
+ }
+ else
+ buffer[0] = '\0';
+ }
+
+ // find the timezone from strftime()
+ if(!timezone) {
+ time_t t;
+ struct tm *tmp, tmbuf;
+
+ t = now_realtime_sec();
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (tmp != NULL) {
+ if(strftime(buffer, FILENAME_MAX, "%Z", tmp) == 0)
+ buffer[0] = '\0';
+ else {
+ buffer[FILENAME_MAX] = '\0';
+ timezone = buffer;
+ // info("TIMEZONE: using strftime(): '%s'", timezone);
+ }
+ }
+ }
+
+ if(timezone && *timezone) {
+ // make sure it does not have illegal characters
+ // info("TIMEZONE: fixing '%s'", timezone);
+
+ size_t len = strlen(timezone);
+ char tmp[len + 1];
+ char *d = tmp;
+ *d = '\0';
+
+ while(*timezone) {
+ if(isalnum(*timezone) || *timezone == '_' || *timezone == '/')
+ *d++ = *timezone++;
+ else
+ timezone++;
+ }
+ *d = '\0';
+ strncpyz(buffer, tmp, len);
+ timezone = buffer;
+ // info("TIMEZONE: fixed as '%s'", timezone);
+ }
+
+ if(!timezone || !*timezone)
+ timezone = "unknown";
+
+ netdata_configured_timezone = config_get(CONFIG_SECTION_GLOBAL, "timezone", timezone);
+}
+
+void set_global_environment() {
+ {
+ char b[16];
+ snprintfz(b, 15, "%d", default_rrd_update_every);
+ setenv("NETDATA_UPDATE_EVERY", b, 1);
+ }
+
+ setenv("NETDATA_VERSION" , program_version, 1);
+ setenv("NETDATA_HOSTNAME" , netdata_configured_hostname, 1);
+ setenv("NETDATA_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1);
+ setenv("NETDATA_USER_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1);
+ setenv("NETDATA_STOCK_CONFIG_DIR" , verify_required_directory(netdata_configured_stock_config_dir), 1);
+ setenv("NETDATA_PLUGINS_DIR" , verify_required_directory(netdata_configured_primary_plugins_dir), 1);
+ setenv("NETDATA_WEB_DIR" , verify_required_directory(netdata_configured_web_dir), 1);
+ setenv("NETDATA_CACHE_DIR" , verify_required_directory(netdata_configured_cache_dir), 1);
+ setenv("NETDATA_LIB_DIR" , verify_required_directory(netdata_configured_varlib_dir), 1);
+ setenv("NETDATA_LOCK_DIR" , netdata_configured_lock_dir, 1);
+ setenv("NETDATA_LOG_DIR" , verify_required_directory(netdata_configured_log_dir), 1);
+ setenv("HOME" , verify_required_directory(netdata_configured_home_dir), 1);
+ setenv("NETDATA_HOST_PREFIX" , netdata_configured_host_prefix, 1);
+
+ char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL);
+ int clean = 0;
+ if (!default_port) {
+ default_port = strdupz("19999");
+ clean = 1;
+ }
+
+ setenv("NETDATA_LISTEN_PORT" , default_port, 1);
+ if(clean)
+ freez(default_port);
+
+ get_system_timezone();
+
+ // set the path we need
+ char path[1024 + 1], *p = getenv("PATH");
+ if(!p) p = "/bin:/usr/bin";
+ snprintfz(path, 1024, "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin");
+ setenv("PATH", config_get(CONFIG_SECTION_PLUGINS, "PATH environment variable", path), 1);
+
+ // python options
+ p = getenv("PYTHONPATH");
+ if(!p) p = "";
+ setenv("PYTHONPATH", config_get(CONFIG_SECTION_PLUGINS, "PYTHONPATH environment variable", p), 1);
+
+ // disable buffering for python plugins
+ setenv("PYTHONUNBUFFERED", "1", 1);
+
+ // switch to standard locale for plugins
+ setenv("LC_ALL", "C", 1);
+}
+
+static int load_netdata_conf(char *filename, char overwrite_used) {
+ errno = 0;
+
+ int ret = 0;
+
+ if(filename && *filename) {
+ ret = config_load(filename, overwrite_used, NULL);
+ if(!ret)
+ error("CONFIG: cannot load config file '%s'.", filename);
+ }
+ else {
+ filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf");
+
+ ret = config_load(filename, overwrite_used, NULL);
+ if(!ret) {
+ info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename);
+ freez(filename);
+
+ filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "netdata.conf");
+ ret = config_load(filename, overwrite_used, NULL);
+ if(!ret)
+ info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename);
+ }
+
+ freez(filename);
+ }
+
+ return ret;
+}
+
+// coverity[ +tainted_string_sanitize_content : arg-0 ]
+static inline void coverity_remove_taint(char *s)
+{
+ (void)s;
+}
+
+int get_system_info(struct rrdhost_system_info *system_info) {
+ char *script;
+ script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("system-info.sh") + 2));
+ sprintf(script, "%s/%s", netdata_configured_primary_plugins_dir, "system-info.sh");
+ if (unlikely(access(script, R_OK) != 0)) {
+ info("System info script %s not found.",script);
+ freez(script);
+ return 1;
+ }
+
+ pid_t command_pid;
+
+ info("Executing %s", script);
+
+ FILE *fp = mypopen(script, &command_pid);
+ if(fp) {
+ char line[200 + 1];
+ // Removed the double strlens, if the Coverity tainted string warning reappears I'll revert.
+ // One time init code, but I'm curious about the warning...
+ while (fgets(line, 200, fp) != NULL) {
+ char *value=line;
+ while (*value && *value != '=') value++;
+ if (*value=='=') {
+ *value='\0';
+ value++;
+ char *end = value;
+ while (*end && *end != '\n') end++;
+ *end = '\0'; // Overwrite newline if present
+ coverity_remove_taint(line); // I/O is controlled result of system_info.sh - not tainted
+ coverity_remove_taint(value);
+
+ if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) {
+ info("Unexpected environment variable %s=%s", line, value);
+ }
+ else {
+ info("%s=%s", line, value);
+ setenv(line, value, 1);
+ }
+ }
+ }
+ mypclose(fp, command_pid);
+ }
+ freez(script);
+ return 0;
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ static char *as_script;
+
+ if (netdata_anonymous_statistics_enabled == -1) {
+ char *optout_file = mallocz(sizeof(char) * (strlen(netdata_configured_user_config_dir) +strlen(".opt-out-from-anonymous-statistics") + 2));
+ sprintf(optout_file, "%s/%s", netdata_configured_user_config_dir, ".opt-out-from-anonymous-statistics");
+ if (likely(access(optout_file, R_OK) != 0)) {
+ as_script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("anonymous-statistics.sh") + 2));
+ sprintf(as_script, "%s/%s", netdata_configured_primary_plugins_dir, "anonymous-statistics.sh");
+ if (unlikely(access(as_script, R_OK) != 0)) {
+ netdata_anonymous_statistics_enabled=0;
+ info("Anonymous statistics script %s not found.",as_script);
+ freez(as_script);
+ } else {
+ netdata_anonymous_statistics_enabled=1;
+ }
+ } else {
+ netdata_anonymous_statistics_enabled = 0;
+ as_script = NULL;
+ }
+ freez(optout_file);
+ }
+ if(!netdata_anonymous_statistics_enabled) return;
+ if (!action) return;
+ if (!action_result) action_result="";
+ if (!action_data) action_data="";
+ char *command_to_run=mallocz(sizeof(char) * (strlen(action) + strlen(action_result) + strlen(action_data) + strlen(as_script) + 10));
+ pid_t command_pid;
+
+ sprintf(command_to_run,"%s '%s' '%s' '%s'", as_script, action, action_result, action_data);
+ info("%s", command_to_run);
+
+ FILE *fp = mypopen(command_to_run, &command_pid);
+ if(fp) {
+ char buffer[100 + 1];
+ while (fgets(buffer, 100, fp) != NULL);
+ mypclose(fp, command_pid);
+ }
+ freez(command_to_run);
+}
+
+void set_silencers_filename() {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/health.silencers.json", netdata_configured_varlib_dir);
+ silencers_filename = config_get(CONFIG_SECTION_HEALTH, "silencers file", filename);
+}
+
+/* Any config setting that can be accessed without a default value i.e. configget(...,...,NULL) *MUST*
+ be set in this procedure to be called in all the relevant code paths.
+*/
+void post_conf_load(char **user)
+{
+ // --------------------------------------------------------------------
+ // get the user we should run
+
+ // IMPORTANT: this is required before web_files_uid()
+ if(getuid() == 0) {
+ *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", NETDATA_USER);
+ }
+ else {
+ struct passwd *passwd = getpwuid(getuid());
+ *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:"");
+ }
+
+ // --------------------------------------------------------------------
+ // Check if the cloud is enabled
+#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK )
+ netdata_cloud_setting = 0;
+#else
+ netdata_cloud_setting = appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1);
+#endif
+ // This must be set before any point in the code that accesses it. Do not move it from this function.
+ appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL);
+}
+
+int main(int argc, char **argv) {
+ int i;
+ int config_loaded = 0;
+ int dont_fork = 0;
+ size_t default_stacksize;
+ char *user = NULL;
+
+
+ netdata_ready=0;
+ // set the name for logging
+ program_name = "netdata";
+
+ // parse depercated options
+ // TODO: Remove this block with the next major release.
+ {
+ i = 1;
+ while(i < argc) {
+ if(strcmp(argv[i], "-pidfile") == 0 && (i+1) < argc) {
+ strncpyz(pidfile, argv[i+1], FILENAME_MAX);
+ fprintf(stderr, "%s: deprecated option -- %s -- please use -P instead.\n", argv[0], argv[i]);
+ remove_option(i, &argc, argv);
+ }
+ else if(strcmp(argv[i], "-nodaemon") == 0 || strcmp(argv[i], "-nd") == 0) {
+ dont_fork = 1;
+ fprintf(stderr, "%s: deprecated option -- %s -- please use -D instead.\n ", argv[0], argv[i]);
+ remove_option(i, &argc, argv);
+ }
+ else if(strcmp(argv[i], "-ch") == 0 && (i+1) < argc) {
+ config_set(CONFIG_SECTION_GLOBAL, "host access prefix", argv[i+1]);
+ fprintf(stderr, "%s: deprecated option -- %s -- please use -s instead.\n", argv[0], argv[i]);
+ remove_option(i, &argc, argv);
+ }
+ else if(strcmp(argv[i], "-l") == 0 && (i+1) < argc) {
+ config_set(CONFIG_SECTION_GLOBAL, "history", argv[i+1]);
+ fprintf(stderr, "%s: deprecated option -- %s -- This option will be removed with V2.*.\n", argv[0], argv[i]);
+ remove_option(i, &argc, argv);
+ }
+ else i++;
+ }
+ }
+ if (argc > 1 && strcmp(argv[1], SPAWN_SERVER_COMMAND_LINE_ARGUMENT) == 0) {
+ // don't run netdata, this is the spawn server
+ spawn_server();
+ exit(0);
+ }
+
+ // parse options
+ {
+ int num_opts = sizeof(option_definitions) / sizeof(struct option_def);
+ char optstring[(num_opts * 2) + 1];
+
+ int string_i = 0;
+ for( i = 0; i < num_opts; i++ ) {
+ optstring[string_i] = option_definitions[i].val;
+ string_i++;
+ if(option_definitions[i].arg_name) {
+ optstring[string_i] = ':';
+ string_i++;
+ }
+ }
+ // terminate optstring
+ optstring[string_i] ='\0';
+ optstring[(num_opts *2)] ='\0';
+
+ int opt;
+ while( (opt = getopt(argc, argv, optstring)) != -1 ) {
+ switch(opt) {
+ case 'c':
+ if(load_netdata_conf(optarg, 1) != 1) {
+ error("Cannot load configuration file %s.", optarg);
+ return 1;
+ }
+ else {
+ debug(D_OPTIONS, "Configuration loaded from %s.", optarg);
+ post_conf_load(&user);
+ load_cloud_conf(1);
+ config_loaded = 1;
+ }
+ break;
+ case 'D':
+ dont_fork = 1;
+ break;
+ case 'd':
+ dont_fork = 0;
+ break;
+ case 'h':
+ return help(0);
+ case 'i':
+ config_set(CONFIG_SECTION_WEB, "bind to", optarg);
+ break;
+ case 'P':
+ strncpy(pidfile, optarg, FILENAME_MAX);
+ pidfile[FILENAME_MAX] = '\0';
+ break;
+ case 'p':
+ config_set(CONFIG_SECTION_GLOBAL, "default port", optarg);
+ break;
+ case 's':
+ config_set(CONFIG_SECTION_GLOBAL, "host access prefix", optarg);
+ break;
+ case 't':
+ config_set(CONFIG_SECTION_GLOBAL, "update every", optarg);
+ break;
+ case 'u':
+ config_set(CONFIG_SECTION_GLOBAL, "run as user", optarg);
+ break;
+ case 'v':
+ case 'V':
+ printf("%s %s\n", program_name, program_version);
+ return 0;
+ case 'W':
+ {
+ char* stacksize_string = "stacksize=";
+ char* debug_flags_string = "debug_flags=";
+ char* claim_string = "claim";
+#ifdef ENABLE_DBENGINE
+ char* createdataset_string = "createdataset=";
+ char* stresstest_string = "stresstest=";
+#endif
+
+ if(strcmp(optarg, "unittest") == 0) {
+ if(unit_test_buffer()) return 1;
+ if(unit_test_str2ld()) return 1;
+ // No call to load the config file on this code-path
+ post_conf_load(&user);
+ get_netdata_configured_variables();
+ default_rrd_update_every = 1;
+ default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
+ default_health_enabled = 0;
+ registry_init();
+ if(rrd_init("unittest", NULL)) {
+ fprintf(stderr, "rrd_init failed for unittest\n");
+ return 1;
+ }
+ default_rrdpush_enabled = 0;
+ if(run_all_mockup_tests()) return 1;
+ if(unit_test_storage()) return 1;
+#ifdef ENABLE_DBENGINE
+ if(test_dbengine()) return 1;
+#endif
+ fprintf(stderr, "\n\nALL TESTS PASSED\n\n");
+ return 0;
+ }
+#ifdef ENABLE_DBENGINE
+ else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) {
+ optarg += strlen(createdataset_string);
+ unsigned history_seconds = strtoul(optarg, NULL, 0);
+ generate_dbengine_dataset(history_seconds);
+ return 0;
+ }
+ else if(strncmp(optarg, stresstest_string, strlen(stresstest_string)) == 0) {
+ char *endptr;
+ unsigned test_duration_sec = 0, dset_charts = 0, query_threads = 0, ramp_up_seconds = 0,
+ page_cache_mb = 0, disk_space_mb = 0;
+
+ optarg += strlen(stresstest_string);
+ test_duration_sec = (unsigned)strtoul(optarg, &endptr, 0);
+ if (',' == *endptr)
+ dset_charts = (unsigned)strtoul(endptr + 1, &endptr, 0);
+ if (',' == *endptr)
+ query_threads = (unsigned)strtoul(endptr + 1, &endptr, 0);
+ if (',' == *endptr)
+ ramp_up_seconds = (unsigned)strtoul(endptr + 1, &endptr, 0);
+ if (',' == *endptr)
+ page_cache_mb = (unsigned)strtoul(endptr + 1, &endptr, 0);
+ if (',' == *endptr)
+ disk_space_mb = (unsigned)strtoul(endptr + 1, &endptr, 0);
+
+ dbengine_stress_test(test_duration_sec, dset_charts, query_threads, ramp_up_seconds,
+ page_cache_mb, disk_space_mb);
+ return 0;
+ }
+#endif
+ else if(strcmp(optarg, "simple-pattern") == 0) {
+ if(optind + 2 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W simple-pattern 'pattern' 'string'\n\n"
+ " Checks if 'pattern' matches the given 'string'.\n"
+ " - 'pattern' can be one or more space separated words.\n"
+ " - each 'word' can contain one or more asterisks.\n"
+ " - words starting with '!' give negative matches.\n"
+ " - words are processed left to right\n"
+ "\n"
+ "Examples:\n"
+ "\n"
+ " > match all veth interfaces, except veth0:\n"
+ "\n"
+ " -W simple-pattern '!veth0 veth*' 'veth12'\n"
+ "\n"
+ "\n"
+ " > match all *.ext files directly in /path/:\n"
+ " (this will not match *.ext files in a subdir of /path/)\n"
+ "\n"
+ " -W simple-pattern '!/path/*/*.ext /path/*.ext' '/path/test.ext'\n"
+ "\n"
+ );
+ return 1;
+ }
+
+ const char *heystack = argv[optind];
+ const char *needle = argv[optind + 1];
+ size_t len = strlen(needle) + 1;
+ char wildcarded[len];
+
+ SIMPLE_PATTERN *p = simple_pattern_create(heystack, NULL, SIMPLE_PATTERN_EXACT);
+ int ret = simple_pattern_matches_extract(p, needle, wildcarded, len);
+ simple_pattern_free(p);
+
+ if(ret) {
+ fprintf(stdout, "RESULT: MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", heystack, needle, wildcarded);
+ return 0;
+ }
+ else {
+ fprintf(stdout, "RESULT: NOT MATCHED - pattern '%s' does not match '%s', wildcarded '%s'\n", heystack, needle, wildcarded);
+ return 1;
+ }
+ }
+ else if(strncmp(optarg, stacksize_string, strlen(stacksize_string)) == 0) {
+ optarg += strlen(stacksize_string);
+ config_set(CONFIG_SECTION_GLOBAL, "pthread stack size", optarg);
+ }
+ else if(strncmp(optarg, debug_flags_string, strlen(debug_flags_string)) == 0) {
+ optarg += strlen(debug_flags_string);
+ config_set(CONFIG_SECTION_GLOBAL, "debug flags", optarg);
+ debug_flags = strtoull(optarg, NULL, 0);
+ }
+ else if(strcmp(optarg, "set") == 0) {
+ if(optind + 3 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W set 'section' 'key' 'value'\n\n"
+ " Overwrites settings of netdata.conf.\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " If -c netdata.conf is given on the command line,\n"
+ " before -W set... the user may overwrite command\n"
+ " line parameters at netdata.conf\n"
+ " If -c netdata.conf is given after (or missing)\n"
+ " -W set... the user cannot overwrite the command line\n"
+ " parameters."
+ "\n"
+ );
+ return 1;
+ }
+ const char *section = argv[optind];
+ const char *key = argv[optind + 1];
+ const char *value = argv[optind + 2];
+ optind += 3;
+
+ // set this one as the default
+ // only if it is not already set in the config file
+ // so the caller can use -c netdata.conf before or
+ // after this parameter to prevent or allow overwriting
+ // variables at netdata.conf
+ config_set_default(section, key, value);
+
+ // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value);
+ }
+ else if(strcmp(optarg, "set2") == 0) {
+ if(optind + 4 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W set 'conf_file' 'section' 'key' 'value'\n\n"
+ " Overwrites settings of netdata.conf or cloud.conf\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " If -c netdata.conf is given on the command line,\n"
+ " before -W set... the user may overwrite command\n"
+ " line parameters at netdata.conf\n"
+ " If -c netdata.conf is given after (or missing)\n"
+ " -W set... the user cannot overwrite the command line\n"
+ " parameters."
+ " conf_file can be \"cloud\" or \"netdata\".\n"
+ "\n"
+ );
+ return 1;
+ }
+ const char *conf_file = argv[optind]; /* "cloud" is cloud.conf, otherwise netdata.conf */
+ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
+ const char *section = argv[optind + 1];
+ const char *key = argv[optind + 2];
+ const char *value = argv[optind + 3];
+ optind += 4;
+
+ // set this one as the default
+ // only if it is not already set in the config file
+ // so the caller can use -c netdata.conf before or
+ // after this parameter to prevent or allow overwriting
+ // variables at netdata.conf
+ appconfig_set_default(tmp_config, section, key, value);
+
+ // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value);
+ }
+ else if(strcmp(optarg, "get") == 0) {
+ if(optind + 3 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W get 'section' 'key' 'value'\n\n"
+ " Prints settings of netdata.conf.\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " -c netdata.conf has to be given before -W get.\n"
+ "\n"
+ );
+ return 1;
+ }
+
+ if(!config_loaded) {
+ fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n");
+ load_netdata_conf(NULL, 0);
+ post_conf_load(&user);
+ }
+
+ get_netdata_configured_variables();
+
+ const char *section = argv[optind];
+ const char *key = argv[optind + 1];
+ const char *def = argv[optind + 2];
+ const char *value = config_get(section, key, def);
+ printf("%s\n", value);
+ return 0;
+ }
+ else if(strcmp(optarg, "get2") == 0) {
+ if(optind + 4 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W get2 'conf_file' 'section' 'key' 'value'\n\n"
+ " Prints settings of netdata.conf or cloud.conf\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " -c netdata.conf has to be given before -W get2.\n"
+ " conf_file can be \"cloud\" or \"netdata\".\n"
+ "\n"
+ );
+ return 1;
+ }
+
+ if(!config_loaded) {
+ fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n");
+ load_netdata_conf(NULL, 0);
+ post_conf_load(&user);
+ load_cloud_conf(1);
+ }
+
+ get_netdata_configured_variables();
+
+ const char *conf_file = argv[optind]; /* "cloud" is cloud.conf, otherwise netdata.conf */
+ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
+ const char *section = argv[optind + 1];
+ const char *key = argv[optind + 2];
+ const char *def = argv[optind + 3];
+ const char *value = appconfig_get(tmp_config, section, key, def);
+ printf("%s\n", value);
+ return 0;
+ }
+ else if(strncmp(optarg, claim_string, strlen(claim_string)) == 0) {
+ /* will trigger a claiming attempt when the agent is initialized */
+ claiming_pending_arguments = optarg + strlen(claim_string);
+ }
+ else if(strcmp(optarg, "buildinfo") == 0) {
+ printf("Version: %s %s\n", program_name, program_version);
+ print_build_info();
+ return 0;
+ }
+ else {
+ fprintf(stderr, "Unknown -W parameter '%s'\n", optarg);
+ return help(1);
+ }
+ }
+ break;
+
+ default: /* ? */
+ fprintf(stderr, "Unknown parameter '%c'\n", opt);
+ return help(1);
+ }
+ }
+ }
+
+#ifdef _SC_OPEN_MAX
+ // close all open file descriptors, except the standard ones
+ // the caller may have left open files (lxc-attach has this issue)
+ {
+ int fd;
+ for(fd = (int) (sysconf(_SC_OPEN_MAX) - 1); fd > 2; fd--)
+ if(fd_is_valid(fd)) close(fd);
+ }
+#endif
+
+ if(!config_loaded)
+ {
+ load_netdata_conf(NULL, 0);
+ post_conf_load(&user);
+ load_cloud_conf(0);
+ }
+
+
+ // ------------------------------------------------------------------------
+ // initialize netdata
+ {
+ char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1");
+ if(pmax && *pmax)
+ setenv("MALLOC_ARENA_MAX", pmax, 1);
+
+#if defined(HAVE_C_MALLOPT)
+ i = (int)config_get_number(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for netdata", 1);
+ if(i > 0)
+ mallopt(M_ARENA_MAX, 1);
+#endif
+ test_clock_boottime();
+ test_clock_monotonic_coarse();
+
+ // prepare configuration environment variables for the plugins
+
+ get_netdata_configured_variables();
+ set_global_environment();
+
+ // work while we are cd into config_dir
+ // to allow the plugins refer to their config
+ // files using relative filenames
+ if(chdir(netdata_configured_user_config_dir) == -1)
+ fatal("Cannot cd to '%s'", netdata_configured_user_config_dir);
+
+ // Get execution path before switching user to avoid permission issues
+ get_netdata_execution_path();
+ }
+
+ {
+ // --------------------------------------------------------------------
+ // get the debugging flags from the configuration file
+
+ char *flags = config_get(CONFIG_SECTION_GLOBAL, "debug flags", "0x0000000000000000");
+ setenv("NETDATA_DEBUG_FLAGS", flags, 1);
+
+ debug_flags = strtoull(flags, NULL, 0);
+ debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags);
+
+ if(debug_flags != 0) {
+ struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
+ if(setrlimit(RLIMIT_CORE, &rl) != 0)
+ error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+
+#ifdef HAVE_SYS_PRCTL_H
+ prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+#endif
+ }
+
+
+ // --------------------------------------------------------------------
+ // get log filenames and settings
+ log_init();
+ error_log_limit_unlimited();
+
+ // --------------------------------------------------------------------
+ // get the certificate and start security
+#ifdef ENABLE_HTTPS
+ security_init();
+#endif
+
+ // --------------------------------------------------------------------
+ // This is the safest place to start the SILENCERS structure
+ set_silencers_filename();
+ health_initialize_global_silencers();
+
+ // --------------------------------------------------------------------
+ // setup process signals
+
+ // block signals while initializing threads.
+ // this causes the threads to block signals.
+ signals_block();
+
+ // setup the signals we want to use
+ signals_init();
+
+ // setup threads configs
+ default_stacksize = netdata_threads_init();
+
+
+ // --------------------------------------------------------------------
+ // check which threads are enabled and initialize them
+
+ for (i = 0; static_threads[i].name != NULL ; i++) {
+ struct netdata_static_thread *st = &static_threads[i];
+
+ if(st->config_name)
+ st->enabled = config_get_boolean(st->config_section, st->config_name, st->enabled);
+
+ if(st->enabled && st->init_routine)
+ st->init_routine();
+ }
+
+
+ // --------------------------------------------------------------------
+ // create the listening sockets
+
+ web_client_api_v1_init();
+ web_server_threading_selection();
+
+ if(web_server_mode != WEB_SERVER_MODE_NONE)
+ api_listen_sockets_setup();
+ }
+
+ // initialize the log files
+ open_all_log_files();
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(debug_flags != 0) {
+ struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
+ if(setrlimit(RLIMIT_CORE, &rl) != 0)
+ error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+#ifdef HAVE_SYS_PRCTL_H
+ prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+#endif
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ // get the max file limit
+ if(getrlimit(RLIMIT_NOFILE, &rlimit_nofile) != 0)
+ error("getrlimit(RLIMIT_NOFILE) failed");
+ else
+ info("resources control: allowed file descriptors: soft = %zu, max = %zu", (size_t)rlimit_nofile.rlim_cur, (size_t)rlimit_nofile.rlim_max);
+
+ // fork, switch user, create pid file, set process priority
+ if(become_daemon(dont_fork, user) == -1)
+ fatal("Cannot daemonize myself.");
+
+ info("netdata started on pid %d.", getpid());
+
+ // IMPORTANT: these have to run once, while single threaded
+ // but after we have switched user
+ web_files_uid();
+ web_files_gid();
+
+ netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize));
+
+ // initialyze internal registry
+ registry_init();
+ // fork the spawn server
+ spawn_init();
+ /*
+ * Libuv uv_spawn() uses SIGCHLD internally:
+ * https://github.com/libuv/libuv/blob/cc51217a317e96510fbb284721d5e6bc2af31e33/src/unix/process.c#L485
+ * and inadvertently replaces the netdata signal handler which was setup during initialization.
+ * Thusly, we must explicitly restore the signal handler for SIGCHLD.
+ * Warning: extreme care is needed when mixing and matching POSIX and libuv.
+ */
+ signals_restore_SIGCHLD();
+
+ // ------------------------------------------------------------------------
+ // initialize rrd, registry, health, rrdpush, etc.
+
+ netdata_anonymous_statistics_enabled=-1;
+ struct rrdhost_system_info *system_info = calloc(1, sizeof(struct rrdhost_system_info));
+ get_system_info(system_info);
+
+ if(rrd_init(netdata_configured_hostname, system_info))
+ fatal("Cannot initialize localhost instance with name '%s'.", netdata_configured_hostname);
+
+ // ------------------------------------------------------------------------
+ // Claim netdata agent to a cloud endpoint
+
+ if (claiming_pending_arguments)
+ claim_agent(claiming_pending_arguments);
+ load_claiming_state();
+
+ // ------------------------------------------------------------------------
+ // enable log flood protection
+
+ error_log_limit_reset();
+
+ // Load host labels
+ reload_host_labels();
+
+ // ------------------------------------------------------------------------
+ // spawn the threads
+
+ web_server_config_options();
+
+ netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_BOOLEAN_NO);
+
+ for (i = 0; static_threads[i].name != NULL ; i++) {
+ struct netdata_static_thread *st = &static_threads[i];
+
+ if(st->enabled) {
+ st->thread = mallocz(sizeof(netdata_thread_t));
+ debug(D_SYSTEM, "Starting thread %s.", st->name);
+ netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, st);
+ }
+ else debug(D_SYSTEM, "Not starting thread %s.", st->name);
+ }
+
+ // ------------------------------------------------------------------------
+ // Initialize netdata agent command serving from cli and signals
+
+ commands_init();
+
+ info("netdata initialization completed. Enjoy real-time performance monitoring!");
+ netdata_ready = 1;
+
+ send_statistics("START", "-", "-");
+
+ // ------------------------------------------------------------------------
+ // Report ACLK build failure
+#ifndef ENABLE_ACLK
+ error("This agent doesn't have ACLK.");
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/.aclk_report_sent", netdata_configured_varlib_dir);
+ if (netdata_anonymous_statistics_enabled > 0 && access(filename, F_OK)) { // -1 -> not initialized
+ send_statistics("ACLK_DISABLED", "-", "-");
+#ifdef ACLK_NO_LWS
+ send_statistics("BUILD_FAIL_LWS", "-", "-");
+#endif
+#ifdef ACLK_NO_LIBMOSQ
+ send_statistics("BUILD_FAIL_MOSQ", "-", "-");
+#endif
+ int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 444);
+ if (fd == -1)
+ error("Cannot create file '%s'. Please fix this.", filename);
+ else
+ close(fd);
+ }
+#endif
+
+ // ------------------------------------------------------------------------
+ // unblock signals
+
+ signals_unblock();
+
+ // ------------------------------------------------------------------------
+ // Handle signals
+
+ signals_handle();
+
+ // should never reach this point
+ // but we need it for rpmlint #2752
+ return 1;
+}
diff --git a/daemon/main.h b/daemon/main.h
new file mode 100644
index 0000000..9d9f4ef
--- /dev/null
+++ b/daemon/main.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_MAIN_H
+#define NETDATA_MAIN_H 1
+
+#include "common.h"
+
+extern struct config netdata_config;
+
+#define NETDATA_MAIN_THREAD_RUNNING CONFIG_BOOLEAN_YES
+#define NETDATA_MAIN_THREAD_EXITING (CONFIG_BOOLEAN_YES + 1)
+#define NETDATA_MAIN_THREAD_EXITED CONFIG_BOOLEAN_NO
+
+/**
+ * This struct contains information about command line options.
+ */
+struct option_def {
+ /** The option character */
+ const char val;
+ /** The name of the long option. */
+ const char *description;
+ /** Short descripton what the option does */
+ /** Name of the argument displayed in SYNOPSIS */
+ const char *arg_name;
+ /** Default value if not set */
+ const char *default_value;
+};
+
+struct netdata_static_thread {
+ char *name; // the name of the thread as it should appear in the logs
+
+ char *config_section; // the section of netdata.conf to check if this is enabled or not
+ char *config_name; // the name of the config option to check if it is true or false
+
+ volatile sig_atomic_t enabled; // the current status of the thread
+
+ netdata_thread_t *thread; // internal use, to maintain a pointer to the created thread
+
+ void (*init_routine) (void); // an initialization function to run before spawning the thread
+ void *(*start_routine) (void *); // the threaded worker
+};
+
+extern void cancel_main_threads(void);
+extern int killpid(pid_t pid);
+extern void netdata_cleanup_and_exit(int ret) NORETURN;
+extern void send_statistics(const char *action, const char *action_result, const char *action_data);
+
+#endif /* NETDATA_MAIN_H */
diff --git a/daemon/signals.c b/daemon/signals.c
new file mode 100644
index 0000000..9e30bf1
--- /dev/null
+++ b/daemon/signals.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+
+static int reaper_enabled = 0;
+
+typedef enum signal_action {
+ NETDATA_SIGNAL_END_OF_LIST,
+ NETDATA_SIGNAL_IGNORE,
+ NETDATA_SIGNAL_EXIT_CLEANLY,
+ NETDATA_SIGNAL_SAVE_DATABASE,
+ NETDATA_SIGNAL_REOPEN_LOGS,
+ NETDATA_SIGNAL_RELOAD_HEALTH,
+ NETDATA_SIGNAL_FATAL,
+ NETDATA_SIGNAL_CHILD,
+} SIGNAL_ACTION;
+
+static struct {
+ int signo; // the signal
+ const char *name; // the name of the signal
+ size_t count; // the number of signals received
+ SIGNAL_ACTION action; // the action to take
+} signals_waiting[] = {
+ { SIGPIPE, "SIGPIPE", 0, NETDATA_SIGNAL_IGNORE },
+ { SIGINT , "SIGINT", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGQUIT, "SIGQUIT", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGTERM, "SIGTERM", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGHUP, "SIGHUP", 0, NETDATA_SIGNAL_REOPEN_LOGS },
+ { SIGUSR1, "SIGUSR1", 0, NETDATA_SIGNAL_SAVE_DATABASE },
+ { SIGUSR2, "SIGUSR2", 0, NETDATA_SIGNAL_RELOAD_HEALTH },
+ { SIGBUS, "SIGBUS", 0, NETDATA_SIGNAL_FATAL },
+ { SIGCHLD, "SIGCHLD", 0, NETDATA_SIGNAL_CHILD },
+
+ // terminator
+ { 0, "NONE", 0, NETDATA_SIGNAL_END_OF_LIST }
+};
+
+static void signal_handler(int signo) {
+ // find the entry in the list
+ int i;
+ for(i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST ; i++) {
+ if(unlikely(signals_waiting[i].signo == signo)) {
+ signals_waiting[i].count++;
+
+ if(signals_waiting[i].action == NETDATA_SIGNAL_FATAL) {
+ char buffer[200 + 1];
+ snprintfz(buffer, 200, "\nSIGNAL HANLDER: received: %s. Oops! This is bad!\n", signals_waiting[i].name);
+ if(write(STDERR_FILENO, buffer, strlen(buffer)) == -1) {
+ // nothing to do - we cannot write but there is no way to complain about it
+ ;
+ }
+ }
+
+ return;
+ }
+ }
+}
+
+void signals_block(void) {
+ sigset_t sigset;
+ sigfillset(&sigset);
+
+ if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1)
+ error("SIGNAL: Could not block signals for threads");
+}
+
+void signals_unblock(void) {
+ sigset_t sigset;
+ sigfillset(&sigset);
+
+ if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) {
+ error("SIGNAL: Could not unblock signals for threads");
+ }
+}
+
+void signals_init(void) {
+ // Catch signals which we want to use
+ struct sigaction sa;
+ sa.sa_flags = 0;
+
+ // Enable process tracking / reaper if running as init (pid == 1).
+ // This prevents zombie processes when running in a container.
+ if (getpid() == 1) {
+ info("SIGNAL: Enabling reaper");
+ myp_init();
+ reaper_enabled = 1;
+ } else {
+ info("SIGNAL: Not enabling reaper");
+ }
+
+ // ignore all signals while we run in a signal handler
+ sigfillset(&sa.sa_mask);
+
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ switch (signals_waiting[i].action) {
+ case NETDATA_SIGNAL_IGNORE:
+ sa.sa_handler = SIG_IGN;
+ break;
+ case NETDATA_SIGNAL_CHILD:
+ if (reaper_enabled == 0)
+ continue;
+ // FALLTHROUGH
+ default:
+ sa.sa_handler = signal_handler;
+ break;
+ }
+
+ if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
+ error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name);
+ }
+}
+
+void signals_restore_SIGCHLD(void)
+{
+ struct sigaction sa;
+
+ if (reaper_enabled == 0)
+ return;
+
+ sa.sa_flags = 0;
+ sigfillset(&sa.sa_mask);
+ sa.sa_handler = signal_handler;
+
+ if(sigaction(SIGCHLD, &sa, NULL) == -1)
+ error("SIGNAL: Failed to change signal handler for: SIGCHLD");
+}
+
+void signals_reset(void) {
+ struct sigaction sa;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_handler = SIG_DFL;
+ sa.sa_flags = 0;
+
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
+ error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name);
+ }
+
+ if (reaper_enabled == 1)
+ myp_free();
+}
+
+// reap_child reaps the child identified by pid.
+static void reap_child(pid_t pid) {
+ siginfo_t i;
+
+ errno = 0;
+ debug(D_CHILDS, "SIGNAL: Reaping pid: %d...", pid);
+ if (waitid(P_PID, (id_t)pid, &i, WEXITED|WNOHANG) == -1) {
+ if (errno != ECHILD)
+ error("SIGNAL: Failed to wait for: %d", pid);
+ else
+ debug(D_CHILDS, "SIGNAL: Already reaped: %d", pid);
+ return;
+ } else if (i.si_pid == 0) {
+ // Process didn't exit, this shouldn't happen.
+ return;
+ }
+
+ switch (i.si_code) {
+ case CLD_EXITED:
+ debug(D_CHILDS, "SIGNAL: Child %d exited: %d", pid, i.si_status);
+ break;
+ case CLD_KILLED:
+ debug(D_CHILDS, "SIGNAL: Child %d killed by signal: %d", pid, i.si_status);
+ break;
+ case CLD_DUMPED:
+ debug(D_CHILDS, "SIGNAL: Child %d dumped core by signal: %d", pid, i.si_status);
+ break;
+ case CLD_STOPPED:
+ debug(D_CHILDS, "SIGNAL: Child %d stopped by signal: %d", pid, i.si_status);
+ break;
+ case CLD_TRAPPED:
+ debug(D_CHILDS, "SIGNAL: Child %d trapped by signal: %d", pid, i.si_status);
+ break;
+ case CLD_CONTINUED:
+ debug(D_CHILDS, "SIGNAL: Child %d continued by signal: %d", pid, i.si_status);
+ break;
+ default:
+ debug(D_CHILDS, "SIGNAL: Child %d gave us a SIGCHLD with code %d and status %d.", pid, i.si_code, i.si_status);
+ }
+}
+
+// reap_children reaps all pending children which are not managed by myp.
+static void reap_children() {
+ siginfo_t i;
+
+ while (1 == 1) {
+ // Identify which process caused the signal so we can determine
+ // if we need to reap a re-parented process.
+ i.si_pid = 0;
+ if (waitid(P_ALL, (id_t)0, &i, WEXITED|WNOHANG|WNOWAIT) == -1) {
+ if (errno != ECHILD) // This shouldn't happen with WNOHANG but does.
+ error("SIGNAL: Failed to wait");
+ return;
+ } else if (i.si_pid == 0) {
+ // No child exited.
+ return;
+ } else if (myp_reap(i.si_pid) == 0) {
+ // myp managed, sleep for a short time to avoid busy wait while
+ // this is handled by myp.
+ usleep(10000);
+ } else {
+ // Unknown process, likely a re-parented child, reap it.
+ reap_child(i.si_pid);
+ }
+ }
+}
+
+void signals_handle(void) {
+ while(1) {
+
+ // pause() causes the calling process (or thread) to sleep until a signal
+ // is delivered that either terminates the process or causes the invocation
+ // of a signal-catching function.
+ if(pause() == -1 && errno == EINTR) {
+
+ // loop once, but keep looping while signals are coming in
+ // this is needed because a few operations may take some time
+ // so we need to check for new signals before pausing again
+ int found = 1;
+ while(found) {
+ found = 0;
+
+ // execute the actions of the signals
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ if (signals_waiting[i].count) {
+ found = 1;
+ signals_waiting[i].count = 0;
+ const char *name = signals_waiting[i].name;
+
+ switch (signals_waiting[i].action) {
+ case NETDATA_SIGNAL_RELOAD_HEALTH:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Reloading HEALTH configuration...", name);
+ error_log_limit_reset();
+ execute_command(CMD_RELOAD_HEALTH, NULL, NULL);
+ break;
+
+ case NETDATA_SIGNAL_SAVE_DATABASE:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Saving databases...", name);
+ error_log_limit_reset();
+ execute_command(CMD_SAVE_DATABASE, NULL, NULL);
+ break;
+
+ case NETDATA_SIGNAL_REOPEN_LOGS:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Reopening all log files...", name);
+ error_log_limit_reset();
+ execute_command(CMD_REOPEN_LOGS, NULL, NULL);
+ break;
+
+ case NETDATA_SIGNAL_EXIT_CLEANLY:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Cleaning up to exit...", name);
+ commands_exit();
+ netdata_cleanup_and_exit(0);
+ exit(0);
+ break;
+
+ case NETDATA_SIGNAL_FATAL:
+ fatal("SIGNAL: Received %s. netdata now exits.", name);
+ break;
+
+ case NETDATA_SIGNAL_CHILD:
+ debug(D_CHILDS, "SIGNAL: Received %s. Reaping...", name);
+ reap_children();
+ break;
+
+ default:
+ info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name);
+ break;
+ }
+ }
+ }
+ }
+ }
+ else
+ error("SIGNAL: pause() returned but it was not interrupted by a signal.");
+ }
+}
diff --git a/daemon/signals.h b/daemon/signals.h
new file mode 100644
index 0000000..3fa2b0f
--- /dev/null
+++ b/daemon/signals.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SIGNALS_H
+#define NETDATA_SIGNALS_H 1
+
+extern void signals_init(void);
+extern void signals_block(void);
+extern void signals_unblock(void);
+extern void signals_restore_SIGCHLD(void);
+extern void signals_reset(void);
+extern void signals_handle(void) NORETURN;
+
+#endif //NETDATA_SIGNALS_H
diff --git a/daemon/system-info.sh b/daemon/system-info.sh
new file mode 100755
index 0000000..05d8667
--- /dev/null
+++ b/daemon/system-info.sh
@@ -0,0 +1,406 @@
+#!/usr/bin/env sh
+
+# -------------------------------------------------------------------------------------------------
+# detect the kernel
+
+KERNEL_NAME="$(uname -s)"
+KERNEL_VERSION="$(uname -r)"
+ARCHITECTURE="$(uname -m)"
+
+# -------------------------------------------------------------------------------------------------
+# detect the virtualization and possibly the container technology
+
+CONTAINER="unknown"
+CONT_DETECTION="none"
+
+if [ -z "${VIRTUALIZATION}" ]; then
+ VIRTUALIZATION="unknown"
+ VIRT_DETECTION="none"
+
+ if [ -n "$(command -v systemd-detect-virt 2> /dev/null)" ]; then
+ VIRTUALIZATION="$(systemd-detect-virt -v)"
+ VIRT_DETECTION="systemd-detect-virt"
+ CONTAINER="$(systemd-detect-virt -c)"
+ CONT_DETECTION="systemd-detect-virt"
+ else
+ if grep -q "^flags.*hypervisor" /proc/cpuinfo 2> /dev/null; then
+ VIRTUALIZATION="hypervisor"
+ VIRT_DETECTION="/proc/cpuinfo"
+ elif [ -n "$(command -v dmidecode)" ] && dmidecode -s system-product-name 2> /dev/null | grep -q "VMware\|Virtual\|KVM\|Bochs"; then
+ VIRTUALIZATION="$(dmidecode -s system-product-name)"
+ VIRT_DETECTION="dmidecode"
+ else
+ VIRTUALIZATION="none"
+ fi
+ fi
+ if [ -z "${VIRTUALIZATION}" ]; then
+ # Output from the command is outside of spec
+ VIRTUALIZATION="unknown"
+ fi
+else
+ # Passed from outside - probably in docker run
+ VIRT_DETECTION="provided"
+fi
+
+# -------------------------------------------------------------------------------------------------
+# detect containers with heuristics
+
+if [ "${CONTAINER}" = "unknown" ]; then
+ if [ -f /proc/1/sched ]; then
+ IFS='(, ' read -r process _ < /proc/1/sched
+ if [ "${process}" = "netdata" ]; then
+ CONTAINER="container"
+ CONT_DETECTION="process"
+ fi
+ fi
+ # ubuntu and debian supply /bin/running-in-container
+ # https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/upstart/1.12.1-0ubuntu4/file/bin/running-in-container
+ if /bin/running-in-container > /dev/null 2>&1; then
+ CONTAINER="container"
+ CONT_DETECTION="/bin/running-in-container"
+ fi
+
+ # lxc sets environment variable 'container'
+ #shellcheck disable=SC2154
+ if [ -n "${container}" ]; then
+ CONTAINER="lxc"
+ CONT_DETECTION="containerenv"
+ fi
+
+ # docker creates /.dockerenv
+ # http://stackoverflow.com/a/25518345
+ if [ -f "/.dockerenv" ]; then
+ CONTAINER="docker"
+ CONT_DETECTION="dockerenv"
+ fi
+
+fi
+
+# -------------------------------------------------------------------------------------------------
+# detect the operating system
+
+# Initially assume all OS detection values are for a container, these are moved later if we are bare-metal
+
+CONTAINER_OS_DETECTION="unknown"
+CONTAINER_NAME="unknown"
+CONTAINER_VERSION="unknown"
+CONTAINER_VERSION_ID="unknown"
+CONTAINER_ID="unknown"
+CONTAINER_ID_LIKE="unknown"
+
+if [ "${KERNEL_NAME}" = "Darwin" ]; then
+ CONTAINER_ID=$(sw_vers -productName)
+ CONTAINER_ID_LIKE="mac"
+ CONTAINER_NAME="mac"
+ CONTAINER_VERSION=$(sw_vers -productVersion)
+ CONTAINER_OS_DETECTION="sw_vers"
+elif [ "${KERNEL_NAME}" = "FreeBSD" ]; then
+ CONTAINER_ID="FreeBSD"
+ CONTAINER_ID_LIKE="FreeBSD"
+ CONTAINER_NAME="FreeBSD"
+ CONTAINER_OS_DETECTION="uname"
+ CONTAINER_VERSION=$(uname -r)
+ KERNEL_VERSION=$(uname -K)
+else
+ if [ -f "/etc/os-release" ]; then
+ eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" < /etc/os-release | sed 's/^/CONTAINER_/')"
+ CONTAINER_OS_DETECTION="/etc/os-release"
+ fi
+
+ # shellcheck disable=SC2153
+ if [ "${CONTAINER_NAME}" = "unknown" ] || [ "${CONTAINER_VERSION}" = "unknown" ] || [ "${CONTAINER_ID}" = "unknown" ]; then
+ if [ -f "/etc/lsb-release" ]; then
+ if [ "${CONTAINER_OS_DETECTION}" = "unknown" ]; then
+ CONTAINER_OS_DETECTION="/etc/lsb-release"
+ else
+ CONTAINER_OS_DETECTION="Mixed"
+ fi
+ DISTRIB_ID="unknown"
+ DISTRIB_RELEASE="unknown"
+ DISTRIB_CODENAME="unknown"
+ eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" < /etc/lsb-release)"
+ if [ "${CONTAINER_NAME}" = "unknown" ]; then CONTAINER_NAME="${DISTRIB_ID}"; fi
+ if [ "${CONTAINER_VERSION}" = "unknown" ]; then CONTAINER_VERSION="${DISTRIB_RELEASE}"; fi
+ if [ "${CONTAINER_ID}" = "unknown" ]; then CONTAINER_ID="${DISTRIB_CODENAME}"; fi
+ fi
+ if [ -n "$(command -v lsb_release 2> /dev/null)" ]; then
+ if [ "${CONTAINER_OS_DETECTION}" = "unknown" ]; then
+ CONTAINER_OS_DETECTION="lsb_release"
+ else
+ CONTAINER_OS_DETECTION="Mixed"
+ fi
+ if [ "${CONTAINER_NAME}" = "unknown" ]; then CONTAINER_NAME="$(lsb_release -is 2> /dev/null)"; fi
+ if [ "${CONTAINER_VERSION}" = "unknown" ]; then CONTAINER_VERSION="$(lsb_release -rs 2> /dev/null)"; fi
+ if [ "${CONTAINER_ID}" = "unknown" ]; then CONTAINER_ID="$(lsb_release -cs 2> /dev/null)"; fi
+ fi
+ fi
+fi
+
+# If Netdata is not running in a container then use the local detection as the host
+HOST_OS_DETECTION="unknown"
+HOST_NAME="unknown"
+HOST_VERSION="unknown"
+HOST_VERSION_ID="unknown"
+HOST_ID="unknown"
+HOST_ID_LIKE="unknown"
+
+# 'systemd-detect-virt' returns 'none' if there is no hardware/container virtualization.
+if [ "${CONTAINER}" = "unknown" ] || [ "${CONTAINER}" = "none" ]; then
+ for v in NAME ID ID_LIKE VERSION VERSION_ID OS_DETECTION; do
+ eval "HOST_$v=\$CONTAINER_$v; CONTAINER_$v=none"
+ done
+else
+ # Otherwise try and use a user-supplied bind-mount into the container to resolve the host details
+ if [ -e "/host/etc/os-release" ]; then
+ OS_DETECTION="/etc/os-release"
+ eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" < /host/etc/os-release | sed 's/^/HOST_/')"
+ HOST_OS_DETECTION="/host/etc/os-release"
+ fi
+ if [ "${HOST_NAME}" = "unknown" ] || [ "${HOST_VERSION}" = "unknown" ] || [ "${HOST_ID}" = "unknown" ]; then
+ if [ -f "/host/etc/lsb-release" ]; then
+ if [ "${HOST_OS_DETECTION}" = "unknown" ]; then
+ HOST_OS_DETECTION="/etc/lsb-release"
+ else
+ HOST_OS_DETECTION="Mixed"
+ fi
+ DISTRIB_ID="unknown"
+ DISTRIB_RELEASE="unknown"
+ DISTRIB_CODENAME="unknown"
+ eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" < /etc/lsb-release)"
+ if [ "${HOST_NAME}" = "unknown" ]; then HOST_NAME="${DISTRIB_ID}"; fi
+ if [ "${HOST_VERSION}" = "unknown" ]; then HOST_VERSION="${DISTRIB_RELEASE}"; fi
+ if [ "${HOST_ID}" = "unknown" ]; then HOST_ID="${DISTRIB_CODENAME}"; fi
+ fi
+ fi
+fi
+
+# -------------------------------------------------------------------------------------------------
+# Detect information about the CPU
+
+LCPU_COUNT="unknown"
+CPU_MODEL="unknown"
+CPU_VENDOR="unknown"
+CPU_FREQ="unknown"
+CPU_INFO_SOURCE="none"
+
+possible_cpu_freq=""
+nproc="$(command -v nproc)"
+lscpu="$(command -v lscpu)"
+lscpu_output=""
+dmidecode="$(command -v dmidecode)"
+dmidecode_output=""
+
+if [ -n "${lscpu}" ] && lscpu > /dev/null 2>&1; then
+ lscpu_output="$(LC_NUMERIC=C ${lscpu} 2> /dev/null)"
+ CPU_INFO_SOURCE="lscpu"
+ LCPU_COUNT="$(echo "${lscpu_output}" | grep "^CPU(s):" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ CPU_VENDOR="$(echo "${lscpu_output}" | grep "^Vendor ID:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ CPU_MODEL="$(echo "${lscpu_output}" | grep "^Model name:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ possible_cpu_freq="$(echo "${lscpu_output}" | grep -F "CPU max MHz:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | grep -o '^[0-9]*') MHz"
+ if [ "${possible_cpu_freq}" = " MHz" ]; then
+ possible_cpu_freq="$(echo "${lscpu_output}" | grep -F "CPU MHz:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | grep -o '^[0-9]*') MHz"
+ fi
+elif [ -n "${dmidecode}" ] && dmidecode -t processor > /dev/null 2>&1; then
+ dmidecode_output="$(${dmidecode} -t processor 2> /dev/null)"
+ CPU_INFO_SOURCE="dmidecode"
+ LCPU_COUNT="$(echo "${dmidecode_output}" | grep -F "Thread Count:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ CPU_VENDOR="$(echo "${dmidecode_output}" | grep -F "Manufacturer:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ CPU_MODEL="$(echo "${dmidecode_output}" | grep -F "Version:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ possible_cpu_freq="$(echo "${dmidecode_output}" | grep -F "Current Speed:" | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+else
+ if [ -n "${nproc}" ]; then
+ CPU_INFO_SOURCE="nproc"
+ LCPU_COUNT="$(${nproc})"
+ elif [ "${KERNEL_NAME}" = FreeBSD ]; then
+ CPU_INFO_SOURCE="sysctl"
+ LCPU_COUNT="$(sysctl -n kern.smp.cpus)"
+ elif [ -d /sys/devices/system/cpu ]; then
+ CPU_INFO_SOURCE="sysfs"
+ # This is potentially more accurate than checking `/proc/cpuinfo`.
+ LCPU_COUNT="$(find /sys/devices/system/cpu -mindepth 1 -maxdepth 1 -type d -name 'cpu*' | grep -cEv 'idle|freq')"
+ elif [ -r /proc/cpuinfo ]; then
+ CPU_INFO_SOURCE="procfs"
+ LCPU_COUNT="$(grep -c ^processor /proc/cpuinfo)"
+ fi
+
+ # If we have GNU uname, we can use that to get CPU info (probably).
+ if uname --version 2> /dev/null | grep -qF 'GNU coreutils'; then
+ CPU_INFO_SOURCE="${CPU_INFO_SOURCE} uname"
+ CPU_MODEL="$(uname -p)"
+ CPU_VENDOR="$(uname -i)"
+ elif [ "${KERNEL_NAME}" = FreeBSD ]; then
+ if (echo "${CPU_INFO_SOURCE}" | grep -qv sysctl); then
+ CPU_INFO_SOURCE="${CPU_INFO_SOURCE} sysctl"
+ fi
+
+ CPU_MODEL="$(sysctl -n hw.model)"
+ elif [ -r /proc/cpuinfo ]; then
+ if (echo "${CPU_INFO_SOURCE}" | grep -qv procfs); then
+ CPU_INFO_SOURCE="${CPU_INFO_SOURCE} procfs"
+ fi
+
+ CPU_MODEL="$(grep -F "model name" /proc/cpuinfo | head -n 1 | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ CPU_VENDOR="$(grep -F "vendor_id" /proc/cpuinfo | head -n 1 | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ fi
+fi
+
+if [ -r /sys/devices/system/cpu/cpu0/cpufreq/base_frequency ]; then
+ if (echo "${CPU_INFO_SOURCE}" | grep -qv sysfs); then
+ CPU_INFO_SOURCE="${CPU_INFO_SOURCE} sysfs"
+ fi
+
+ CPU_FREQ="$(cat /sys/devices/system/cpu/cpu0/cpufreq/base_frequency)"
+elif [ -n "${possible_cpu_freq}" ]; then
+ CPU_FREQ="${possible_cpu_freq}"
+elif [ -r /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq ]; then
+ if (echo "${CPU_INFO_SOURCE}" | grep -qv sysfs); then
+ CPU_INFO_SOURCE="${CPU_INFO_SOURCE} sysfs"
+ fi
+
+ CPU_FREQ="$(cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq)"
+fi
+
+freq_units="$(echo "${CPU_FREQ}" | cut -f 2 -d ' ')"
+
+case "${freq_units}" in
+ GHz)
+ value="$(echo "${CPU_FREQ}" | cut -f 1 -d ' ')"
+ CPU_FREQ="$((value * 1000 * 1000 * 1000))"
+ ;;
+ MHz)
+ value="$(echo "${CPU_FREQ}" | cut -f 1 -d ' ')"
+ CPU_FREQ="$((value * 1000 * 1000))"
+ ;;
+ KHz)
+ value="$(echo "${CPU_FREQ}" | cut -f 1 -d ' ')"
+ CPU_FREQ="$((value * 1000))"
+ ;;
+ *) ;;
+
+esac
+
+# -------------------------------------------------------------------------------------------------
+# Detect the total system RAM
+
+TOTAL_RAM="unknown"
+RAM_DETECTION="none"
+
+if [ "${KERNEL_NAME}" = FreeBSD ]; then
+ RAM_DETECTION="sysctl"
+ TOTAL_RAM="$(sysctl -n hw.physmem)"
+elif [ "${KERNEL_NAME}" = Darwin ]; then
+ RAM_DETECTION="sysctl"
+ TOTAL_RAM="$(sysctl -n hw.physmem)"
+elif [ -r /proc/meminfo ]; then
+ RAM_DETECTION="procfs"
+ TOTAL_RAM="$(grep -F MemTotal /proc/meminfo | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | cut -f 1 -d ' ')"
+ TOTAL_RAM="$((TOTAL_RAM * 1024))"
+fi
+
+# -------------------------------------------------------------------------------------------------
+# Detect the total system disk space
+
+DISK_SIZE="unknown"
+DISK_DETECTION="none"
+
+if [ "${KERNEL_NAME}" = "Darwin" ]; then
+ types='hfs'
+
+ if (lsvfs | grep -q apfs); then
+ types="${types},apfs"
+ fi
+
+ if (lsvfs | grep -q ufs); then
+ types="${types},ufs"
+ fi
+
+ DISK_DETECTION="df"
+ DISK_SIZE=$(($(/bin/df -k -t ${types} | tail -n +2 | sed -E 's/\/dev\/disk([[:digit:]]*)s[[:digit:]]*/\/dev\/disk\1/g' | sort -k 1 | awk -F ' ' '{s=$NF;for(i=NF-1;i>=1;i--)s=s FS $i;print s}' | uniq -f 9 | awk '{print $8}' | tr '\n' '+' | rev | cut -f 2- -d '+' | rev) * 1024))
+elif [ "${KERNEL_NAME}" = FreeBSD ]; then
+ types='ufs'
+
+ if (lsvfs | grep -q zfs); then
+ types="${types},zfs"
+ fi
+
+ DISK_DETECTION="df"
+ total="$(df -t ${types} -c -k | tail -n 1 | awk '{print $2}')"
+ DISK_SIZE="$((total * 1024))"
+else
+ if [ -d /sys/block ] && [ -r /proc/devices ]; then
+ dev_major_whitelist=''
+
+ # This is a list of device names used for block storage devices.
+ # These translate to the prefixs of files in `/dev` indicating the device type.
+ # They are sorted by lowest used device major number, with dynamically assigned ones at the end.
+ # We use this to look up device major numbers in `/proc/devices`
+ device_names='hd sd mfm ad ftl pd nftl dasd intfl mmcblk ub xvd rfd vbd nvme'
+
+ for name in ${device_names}; do
+ if grep -qE " ${name}\$" /proc/devices; then
+ dev_major_whitelist="${dev_major_whitelist}:$(grep -E "${name}\$" /proc/devices | sed -e 's/^[[:space:]]*//' | cut -f 1 -d ' ' | tr '\n' ':'):"
+ fi
+ done
+
+ DISK_DETECTION="sysfs"
+ DISK_SIZE="0"
+ for disk in /sys/block/*; do
+ if [ -r "${disk}/size" ] \
+ && (echo "${dev_major_whitelist}" | grep -q ":$(cut -f 1 -d ':' "${disk}/dev"):") \
+ && grep -qv 1 "${disk}/removable"; then
+ size="$(($(cat "${disk}/size") * 512))"
+ DISK_SIZE="$((DISK_SIZE + size))"
+ fi
+ done
+ elif df --version 2> /dev/null | grep -qF "GNU coreutils"; then
+ DISK_DETECTION="df"
+ DISK_SIZE=$(($(df -x tmpfs -x devtmpfs -x squashfs -l -B1 --output=source,size | tail -n +2 | sort -u -k 1 | awk '{print $2}' | tr '\n' '+' | head -c -1)))
+ else
+ DISK_DETECTION="df"
+ include_fs_types="ext*|btrfs|xfs|jfs|reiser*|zfs"
+ DISK_SIZE=$(($(df -T -P | tail -n +2 | sort -u -k 1 | grep "${include_fs_types}" | awk '{print $3}' | tr '\n' '+' | head -c -1) * 1024))
+ fi
+fi
+
+# -------------------------------------------------------------------------------------------------
+# Detect whether the node is kubernetes node
+
+HOST_IS_K8S_NODE="false"
+
+if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_SERVICE_PORT}" ]; then
+ # These env vars are set for every container managed by k8s.
+ HOST_IS_K8S_NODE="true"
+elif pgrep "kubelet"; then
+ # The kubelet is the primary "node agent" that runs on each node.
+ HOST_IS_K8S_NODE="true"
+fi
+
+echo "NETDATA_CONTAINER_OS_NAME=${CONTAINER_NAME}"
+echo "NETDATA_CONTAINER_OS_ID=${CONTAINER_ID}"
+echo "NETDATA_CONTAINER_OS_ID_LIKE=${CONTAINER_ID_LIKE}"
+echo "NETDATA_CONTAINER_OS_VERSION=${CONTAINER_VERSION}"
+echo "NETDATA_CONTAINER_OS_VERSION_ID=${CONTAINER_VERSION_ID}"
+echo "NETDATA_CONTAINER_OS_DETECTION=${CONTAINER_OS_DETECTION}"
+echo "NETDATA_HOST_OS_NAME=${HOST_NAME}"
+echo "NETDATA_HOST_OS_ID=${HOST_ID}"
+echo "NETDATA_HOST_OS_ID_LIKE=${HOST_ID_LIKE}"
+echo "NETDATA_HOST_OS_VERSION=${HOST_VERSION}"
+echo "NETDATA_HOST_OS_VERSION_ID=${HOST_VERSION_ID}"
+echo "NETDATA_HOST_OS_DETECTION=${HOST_OS_DETECTION}"
+echo "NETDATA_HOST_IS_K8S_NODE=${HOST_IS_K8S_NODE}"
+echo "NETDATA_SYSTEM_KERNEL_NAME=${KERNEL_NAME}"
+echo "NETDATA_SYSTEM_KERNEL_VERSION=${KERNEL_VERSION}"
+echo "NETDATA_SYSTEM_ARCHITECTURE=${ARCHITECTURE}"
+echo "NETDATA_SYSTEM_VIRTUALIZATION=${VIRTUALIZATION}"
+echo "NETDATA_SYSTEM_VIRT_DETECTION=${VIRT_DETECTION}"
+echo "NETDATA_SYSTEM_CONTAINER=${CONTAINER}"
+echo "NETDATA_SYSTEM_CONTAINER_DETECTION=${CONT_DETECTION}"
+echo "NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT=${LCPU_COUNT}"
+echo "NETDATA_SYSTEM_CPU_VENDOR=${CPU_VENDOR}"
+echo "NETDATA_SYSTEM_CPU_MODEL=${CPU_MODEL}"
+echo "NETDATA_SYSTEM_CPU_FREQ=${CPU_FREQ}"
+echo "NETDATA_SYSTEM_CPU_DETECTION=${CPU_INFO_SOURCE}"
+echo "NETDATA_SYSTEM_TOTAL_RAM=${TOTAL_RAM}"
+echo "NETDATA_SYSTEM_RAM_DETECTION=${RAM_DETECTION}"
+echo "NETDATA_SYSTEM_TOTAL_DISK_SIZE=${DISK_SIZE}"
+echo "NETDATA_SYSTEM_DISK_DETECTION=${DISK_DETECTION}"
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
new file mode 100644
index 0000000..e6a69e3
--- /dev/null
+++ b/daemon/unit_test.c
@@ -0,0 +1,2235 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common.h"
+
+static int check_number_printing(void) {
+ struct {
+ calculated_number n;
+ const char *correct;
+ } values[] = {
+ { .n = 0, .correct = "0" },
+ { .n = 0.0000001, .correct = "0.0000001" },
+ { .n = 0.00000009, .correct = "0.0000001" },
+ { .n = 0.000000001, .correct = "0" },
+ { .n = 99.99999999999999999, .correct = "100" },
+ { .n = -99.99999999999999999, .correct = "-100" },
+ { .n = 123.4567890123456789, .correct = "123.456789" },
+ { .n = 9999.9999999, .correct = "9999.9999999" },
+ { .n = -9999.9999999, .correct = "-9999.9999999" },
+ { .n = 0, .correct = NULL },
+ };
+
+ char netdata[50], system[50];
+ int i, failed = 0;
+ for(i = 0; values[i].correct ; i++) {
+ print_calculated_number(netdata, values[i].n);
+ snprintfz(system, 49, "%0.12" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)values[i].n);
+
+ int ok = 1;
+ if(strcmp(netdata, values[i].correct) != 0) {
+ ok = 0;
+ failed++;
+ }
+
+ fprintf(stderr, "'%s' (system) printed as '%s' (netdata): %s\n", system, netdata, ok?"OK":"FAILED");
+ }
+
+ if(failed) return 1;
+ return 0;
+}
+
+static int check_rrdcalc_comparisons(void) {
+ RRDCALC_STATUS a, b;
+
+ // make sure calloc() sets the status to UNINITIALIZED
+ memset(&a, 0, sizeof(RRDCALC_STATUS));
+ if(a != RRDCALC_STATUS_UNINITIALIZED) {
+ fprintf(stderr, "%s is not zero.\n", rrdcalc_status2string(RRDCALC_STATUS_UNINITIALIZED));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_REMOVED;
+ b = RRDCALC_STATUS_UNDEFINED;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_UNDEFINED;
+ b = RRDCALC_STATUS_UNINITIALIZED;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_UNINITIALIZED;
+ b = RRDCALC_STATUS_CLEAR;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_CLEAR;
+ b = RRDCALC_STATUS_RAISED;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_RAISED;
+ b = RRDCALC_STATUS_WARNING;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ a = RRDCALC_STATUS_WARNING;
+ b = RRDCALC_STATUS_CRITICAL;
+ if(!(a < b)) {
+ fprintf(stderr, "%s is not less than %s\n", rrdcalc_status2string(a), rrdcalc_status2string(b));
+ return 1;
+ }
+
+ fprintf(stderr, "RRDCALC_STATUSes are sortable.\n");
+
+ return 0;
+}
+
+int check_storage_number(calculated_number n, int debug) {
+ char buffer[100];
+ uint32_t flags = SN_EXISTS;
+
+ storage_number s = pack_storage_number(n, flags);
+ calculated_number d = unpack_storage_number(s);
+
+ if(!does_storage_number_exist(s)) {
+ fprintf(stderr, "Exists flags missing for number " CALCULATED_NUMBER_FORMAT "!\n", n);
+ return 5;
+ }
+
+ calculated_number ddiff = d - n;
+ calculated_number dcdiff = ddiff * 100.0 / n;
+
+ if(dcdiff < 0) dcdiff = -dcdiff;
+
+ size_t len = (size_t)print_calculated_number(buffer, d);
+ calculated_number p = str2ld(buffer, NULL);
+ calculated_number pdiff = n - p;
+ calculated_number pcdiff = pdiff * 100.0 / n;
+ if(pcdiff < 0) pcdiff = -pcdiff;
+
+ if(debug) {
+ fprintf(stderr,
+ CALCULATED_NUMBER_FORMAT " original\n"
+ CALCULATED_NUMBER_FORMAT " packed and unpacked, (stored as 0x%08X, diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n"
+ "%s printed after unpacked (%zu bytes)\n"
+ CALCULATED_NUMBER_FORMAT " re-parsed from printed (diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n\n",
+ n,
+ d, s, ddiff, dcdiff,
+ buffer, len,
+ p, pdiff, pcdiff
+ );
+ if(len != strlen(buffer)) fprintf(stderr, "ERROR: printed number %s is reported to have length %zu but it has %zu\n", buffer, len, strlen(buffer));
+
+ if(dcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT)
+ fprintf(stderr, "WARNING: packing number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, dcdiff);
+
+ if(pcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT)
+ fprintf(stderr, "WARNING: re-parsing the packed, unpacked and printed number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, pcdiff);
+ }
+
+ if(len != strlen(buffer)) return 1;
+ if(dcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT) return 3;
+ if(pcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT) return 4;
+ return 0;
+}
+
+calculated_number storage_number_min(calculated_number n) {
+ calculated_number r = 1, last;
+
+ do {
+ last = n;
+ n /= 2.0;
+ storage_number t = pack_storage_number(n, SN_EXISTS);
+ r = unpack_storage_number(t);
+ } while(r != 0.0 && r != last);
+
+ return last;
+}
+
+void benchmark_storage_number(int loop, int multiplier) {
+ int i, j;
+ calculated_number n, d;
+ storage_number s;
+ unsigned long long user, system, total, mine, their;
+
+ calculated_number storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
+ calculated_number storage_number_positive_max = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW);
+
+ char buffer[100];
+
+ struct rusage now, last;
+
+ fprintf(stderr, "\n\nBenchmarking %d numbers, please wait...\n\n", loop);
+
+ // ------------------------------------------------------------------------
+
+ fprintf(stderr, "SYSTEM LONG DOUBLE SIZE: %zu bytes\n", sizeof(calculated_number));
+ fprintf(stderr, "NETDATA FLOATING POINT SIZE: %zu bytes\n", sizeof(storage_number));
+
+ mine = (calculated_number)sizeof(storage_number) * (calculated_number)loop;
+ their = (calculated_number)sizeof(calculated_number) * (calculated_number)loop;
+
+ if(mine > their) {
+ fprintf(stderr, "\nNETDATA NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES MORE MEMORY. Sorry!\n", (LONG_DOUBLE)(mine / their));
+ }
+ else {
+ fprintf(stderr, "\nNETDATA INTERNAL FLOATING POINT ARITHMETICS NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES LESS MEMORY.\n", (LONG_DOUBLE)(their / mine));
+ }
+
+ fprintf(stderr, "\nNETDATA FLOATING POINT\n");
+ fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW));
+ fprintf(stderr, "MAX POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW));
+ fprintf(stderr, "MIN NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MIN_RAW));
+ fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW));
+ fprintf(stderr, "Maximum accuracy loss accepted: " CALCULATED_NUMBER_FORMAT "%%\n\n\n", (calculated_number)ACCURACY_LOSS_ACCEPTED_PERCENT);
+
+ // ------------------------------------------------------------------------
+
+ fprintf(stderr, "INTERNAL LONG DOUBLE PRINTING: ");
+ getrusage(RUSAGE_SELF, &last);
+
+ // do the job
+ for(j = 1; j < 11 ;j++) {
+ n = storage_number_positive_min * j;
+
+ for(i = 0; i < loop ;i++) {
+ n *= multiplier;
+ if(n > storage_number_positive_max) n = storage_number_positive_min;
+
+ print_calculated_number(buffer, n);
+ }
+ }
+
+ getrusage(RUSAGE_SELF, &now);
+ user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec;
+ system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec;
+ total = user + system;
+ mine = total;
+
+ fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+
+ // ------------------------------------------------------------------------
+
+ fprintf(stderr, "SYSTEM LONG DOUBLE PRINTING: ");
+ getrusage(RUSAGE_SELF, &last);
+
+ // do the job
+ for(j = 1; j < 11 ;j++) {
+ n = storage_number_positive_min * j;
+
+ for(i = 0; i < loop ;i++) {
+ n *= multiplier;
+ if(n > storage_number_positive_max) n = storage_number_positive_min;
+ snprintfz(buffer, 100, CALCULATED_NUMBER_FORMAT, n);
+ }
+ }
+
+ getrusage(RUSAGE_SELF, &now);
+ user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec;
+ system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec;
+ total = user + system;
+ their = total;
+
+ fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+
+ if(mine > total) {
+ fprintf(stderr, "NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0));
+ }
+ else {
+ fprintf(stderr, "NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0));
+ }
+
+ // ------------------------------------------------------------------------
+
+ fprintf(stderr, "\nINTERNAL LONG DOUBLE PRINTING WITH PACK / UNPACK: ");
+ getrusage(RUSAGE_SELF, &last);
+
+ // do the job
+ for(j = 1; j < 11 ;j++) {
+ n = storage_number_positive_min * j;
+
+ for(i = 0; i < loop ;i++) {
+ n *= multiplier;
+ if(n > storage_number_positive_max) n = storage_number_positive_min;
+
+ s = pack_storage_number(n, SN_EXISTS);
+ d = unpack_storage_number(s);
+ print_calculated_number(buffer, d);
+ }
+ }
+
+ getrusage(RUSAGE_SELF, &now);
+ user = now.ru_utime.tv_sec * 1000000ULL + now.ru_utime.tv_usec - last.ru_utime.tv_sec * 1000000ULL + last.ru_utime.tv_usec;
+ system = now.ru_stime.tv_sec * 1000000ULL + now.ru_stime.tv_usec - last.ru_stime.tv_sec * 1000000ULL + last.ru_stime.tv_usec;
+ total = user + system;
+ mine = total;
+
+ fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+
+ if(mine > their) {
+ fprintf(stderr, "WITH PACKING UNPACKING NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0));
+ }
+ else {
+ fprintf(stderr, "EVEN WITH PACKING AND UNPACKING, NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0));
+ }
+
+ // ------------------------------------------------------------------------
+
+}
+
+static int check_storage_number_exists() {
+ uint32_t flags;
+
+
+ for(flags = 0; flags < 7 ; flags++) {
+ if(get_storage_number_flags(flags << 24) != flags << 24) {
+ fprintf(stderr, "Flag 0x%08x is not checked correctly. It became 0x%08x\n", flags << 24, get_storage_number_flags(flags << 24));
+ return 1;
+ }
+ }
+
+ flags = SN_EXISTS;
+ calculated_number n = 0.0;
+
+ storage_number s = pack_storage_number(n, flags);
+ calculated_number d = unpack_storage_number(s);
+ if(get_storage_number_flags(s) != flags) {
+ fprintf(stderr, "Wrong flags. Given %08x, Got %08x!\n", flags, get_storage_number_flags(s));
+ return 1;
+ }
+ if(n != d) {
+ fprintf(stderr, "Wrong number returned. Expected " CALCULATED_NUMBER_FORMAT ", returned " CALCULATED_NUMBER_FORMAT "!\n", n, d);
+ return 1;
+ }
+
+ return 0;
+}
+
+int unit_test_storage() {
+ if(check_storage_number_exists()) return 0;
+
+ calculated_number storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
+ calculated_number storage_number_negative_max = unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW);
+
+ calculated_number c, a = 0;
+ int i, j, g, r = 0;
+
+ for(g = -1; g <= 1 ; g++) {
+ a = 0;
+
+ if(!g) continue;
+
+ for(j = 0; j < 9 ;j++) {
+ a += 0.0000001;
+ c = a * g;
+ for(i = 0; i < 21 ;i++, c *= 10) {
+ if(c > 0 && c < storage_number_positive_min) continue;
+ if(c < 0 && c > storage_number_negative_max) continue;
+
+ if(check_storage_number(c, 1)) return 1;
+ }
+ }
+ }
+
+ // if(check_storage_number(858993459.1234567, 1)) return 1;
+ benchmark_storage_number(1000000, 2);
+ return r;
+}
+
+int unit_test_str2ld() {
+ char *values[] = {
+ "1.2345678", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10",
+ "hello", "1wrong", "nan", "inf", NULL
+ };
+
+ int i;
+ for(i = 0; values[i] ; i++) {
+ char *e_mine = "hello", *e_sys = "world";
+ LONG_DOUBLE mine = str2ld(values[i], &e_mine);
+ LONG_DOUBLE sys = strtold(values[i], &e_sys);
+
+ if(isnan(mine)) {
+ if(!isnan(sys)) {
+ fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
+ return -1;
+ }
+ }
+ else if(isinf(mine)) {
+ if(!isinf(sys)) {
+ fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
+ return -1;
+ }
+ }
+ else if(mine != sys && abs(mine-sys) > 0.000001) {
+ fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ", delta %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys, sys-mine);
+ return -1;
+ }
+
+ if(e_mine != e_sys) {
+ fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right\n", values[i]);
+ return -1;
+ }
+
+ fprintf(stderr, "str2ld() parsed value '%s' exactly the same way with strtold(), returned %" LONG_DOUBLE_MODIFIER " vs %" LONG_DOUBLE_MODIFIER "\n", values[i], mine, sys);
+ }
+
+ return 0;
+}
+
+int unit_test_buffer() {
+ BUFFER *wb = buffer_create(1);
+ char string[2048 + 1];
+ char final[9000 + 1];
+ int i;
+
+ for(i = 0; i < 2048; i++)
+ string[i] = (char)((i % 24) + 'a');
+ string[2048] = '\0';
+
+ const char *fmt = "string1: %s\nstring2: %s\nstring3: %s\nstring4: %s";
+ buffer_sprintf(wb, fmt, string, string, string, string);
+ snprintfz(final, 9000, fmt, string, string, string, string);
+
+ const char *s = buffer_tostring(wb);
+
+ if(buffer_strlen(wb) != strlen(final) || strcmp(s, final) != 0) {
+ fprintf(stderr, "\nbuffer_sprintf() is faulty.\n");
+ fprintf(stderr, "\nstring : %s (length %zu)\n", string, strlen(string));
+ fprintf(stderr, "\nbuffer : %s (length %zu)\n", s, buffer_strlen(wb));
+ fprintf(stderr, "\nexpected: %s (length %zu)\n", final, strlen(final));
+ buffer_free(wb);
+ return -1;
+ }
+
+ fprintf(stderr, "buffer_sprintf() works as expected.\n");
+ buffer_free(wb);
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+struct feed_values {
+ unsigned long long microseconds;
+ collected_number value;
+};
+
+struct test {
+ char name[100];
+ char description[1024];
+
+ int update_every;
+ unsigned long long multiplier;
+ unsigned long long divisor;
+ RRD_ALGORITHM algorithm;
+
+ unsigned long feed_entries;
+ unsigned long result_entries;
+ struct feed_values *feed;
+ calculated_number *results;
+
+ collected_number *feed2;
+ calculated_number *results2;
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test1
+// test absolute values stored
+
+struct feed_values test1_feed[] = {
+ { 0, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+calculated_number test1_results[] = {
+ 20, 30, 40, 50, 60, 70, 80, 90, 100
+};
+
+struct test test1 = {
+ "test1", // name
+ "test absolute values stored at exactly second boundaries",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_ABSOLUTE, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test1_feed, // feed
+ test1_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test2
+// test absolute values stored in the middle of second boundaries
+
+struct feed_values test2_feed[] = {
+ { 500000, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+calculated_number test2_results[] = {
+ 20, 30, 40, 50, 60, 70, 80, 90, 100
+};
+
+struct test test2 = {
+ "test2", // name
+ "test absolute values stored in the middle of second boundaries",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_ABSOLUTE, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test2_feed, // feed
+ test2_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test3
+
+struct feed_values test3_feed[] = {
+ { 0, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+calculated_number test3_results[] = {
+ 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+struct test test3 = {
+ "test3", // name
+ "test incremental values stored at exactly second boundaries",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test3_feed, // feed
+ test3_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test4
+
+struct feed_values test4_feed[] = {
+ { 500000, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+calculated_number test4_results[] = {
+ 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+struct test test4 = {
+ "test4", // name
+ "test incremental values stored in the middle of second boundaries",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test4_feed, // feed
+ test4_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test5 - 32 bit overflows
+
+struct feed_values test5_feed[] = {
+ { 0, 0x00000000FFFFFFFFULL / 15 * 0 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 7 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 14 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 0 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 7 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 14 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 0 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 7 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 14 },
+ { 1000000, 0x00000000FFFFFFFFULL / 15 * 0 },
+};
+
+calculated_number test5_results[] = {
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15,
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15,
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15 * 7,
+ 0x00000000FFFFFFFFULL / 15,
+};
+
+struct test test5 = {
+ "test5", // name
+ "test 32-bit incremental values overflow",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test5_feed, // feed
+ test5_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test5b - 64 bit overflows
+
+struct feed_values test5b_feed[] = {
+ { 0, 0xFFFFFFFFFFFFFFFFULL / 15 * 0 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 7 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 14 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 0 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 7 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 14 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 0 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 7 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 14 },
+ { 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 0 },
+};
+
+calculated_number test5b_results[] = {
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15,
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15,
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15 * 7,
+ 0xFFFFFFFFFFFFFFFFULL / 15,
+};
+
+struct test test5b = {
+ "test5b", // name
+ "test 64-bit incremental values overflow",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test5b_feed, // feed
+ test5b_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test6
+
+struct feed_values test6_feed[] = {
+ { 250000, 1000 },
+ { 250000, 2000 },
+ { 250000, 3000 },
+ { 250000, 4000 },
+ { 250000, 5000 },
+ { 250000, 6000 },
+ { 250000, 7000 },
+ { 250000, 8000 },
+ { 250000, 9000 },
+ { 250000, 10000 },
+ { 250000, 11000 },
+ { 250000, 12000 },
+ { 250000, 13000 },
+ { 250000, 14000 },
+ { 250000, 15000 },
+ { 250000, 16000 },
+};
+
+calculated_number test6_results[] = {
+ 4000, 4000, 4000, 4000
+};
+
+struct test test6 = {
+ "test6", // name
+ "test incremental values updated within the same second",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 16, // feed entries
+ 4, // result entries
+ test6_feed, // feed
+ test6_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test7
+
+struct feed_values test7_feed[] = {
+ { 500000, 1000 },
+ { 2000000, 2000 },
+ { 2000000, 3000 },
+ { 2000000, 4000 },
+ { 2000000, 5000 },
+ { 2000000, 6000 },
+ { 2000000, 7000 },
+ { 2000000, 8000 },
+ { 2000000, 9000 },
+ { 2000000, 10000 },
+};
+
+calculated_number test7_results[] = {
+ 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500
+};
+
+struct test test7 = {
+ "test7", // name
+ "test incremental values updated in long durations",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 18, // result entries
+ test7_feed, // feed
+ test7_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test8
+
+struct feed_values test8_feed[] = {
+ { 500000, 1000 },
+ { 2000000, 2000 },
+ { 2000000, 3000 },
+ { 2000000, 4000 },
+ { 2000000, 5000 },
+ { 2000000, 6000 },
+};
+
+calculated_number test8_results[] = {
+ 1250, 2000, 2250, 3000, 3250, 4000, 4250, 5000, 5250, 6000
+};
+
+struct test test8 = {
+ "test8", // name
+ "test absolute values updated in long durations",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_ABSOLUTE, // algorithm
+ 6, // feed entries
+ 10, // result entries
+ test8_feed, // feed
+ test8_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test9
+
+struct feed_values test9_feed[] = {
+ { 250000, 1000 },
+ { 250000, 2000 },
+ { 250000, 3000 },
+ { 250000, 4000 },
+ { 250000, 5000 },
+ { 250000, 6000 },
+ { 250000, 7000 },
+ { 250000, 8000 },
+ { 250000, 9000 },
+ { 250000, 10000 },
+ { 250000, 11000 },
+ { 250000, 12000 },
+ { 250000, 13000 },
+ { 250000, 14000 },
+ { 250000, 15000 },
+ { 250000, 16000 },
+};
+
+calculated_number test9_results[] = {
+ 4000, 8000, 12000, 16000
+};
+
+struct test test9 = {
+ "test9", // name
+ "test absolute values updated within the same second",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_ABSOLUTE, // algorithm
+ 16, // feed entries
+ 4, // result entries
+ test9_feed, // feed
+ test9_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test10
+
+struct feed_values test10_feed[] = {
+ { 500000, 1000 },
+ { 600000, 1000 + 600 },
+ { 200000, 1600 + 200 },
+ { 1000000, 1800 + 1000 },
+ { 200000, 2800 + 200 },
+ { 2000000, 3000 + 2000 },
+ { 600000, 5000 + 600 },
+ { 400000, 5600 + 400 },
+ { 900000, 6000 + 900 },
+ { 1000000, 6900 + 1000 },
+};
+
+calculated_number test10_results[] = {
+ 1000, 1000, 1000, 1000, 1000, 1000, 1000
+};
+
+struct test test10 = {
+ "test10", // name
+ "test incremental values updated in short and long durations",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 7, // result entries
+ test10_feed, // feed
+ test10_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test11
+
+struct feed_values test11_feed[] = {
+ { 0, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+collected_number test11_feed2[] = {
+ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
+};
+
+calculated_number test11_results[] = {
+ 50, 50, 50, 50, 50, 50, 50, 50, 50
+};
+
+calculated_number test11_results2[] = {
+ 50, 50, 50, 50, 50, 50, 50, 50, 50
+};
+
+struct test test11 = {
+ "test11", // name
+ "test percentage-of-incremental-row with equal values",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test11_feed, // feed
+ test11_results, // results
+ test11_feed2, // feed2
+ test11_results2 // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test12
+
+struct feed_values test12_feed[] = {
+ { 0, 10 },
+ { 1000000, 20 },
+ { 1000000, 30 },
+ { 1000000, 40 },
+ { 1000000, 50 },
+ { 1000000, 60 },
+ { 1000000, 70 },
+ { 1000000, 80 },
+ { 1000000, 90 },
+ { 1000000, 100 },
+};
+
+collected_number test12_feed2[] = {
+ 10*3, 20*3, 30*3, 40*3, 50*3, 60*3, 70*3, 80*3, 90*3, 100*3
+};
+
+calculated_number test12_results[] = {
+ 25, 25, 25, 25, 25, 25, 25, 25, 25
+};
+
+calculated_number test12_results2[] = {
+ 75, 75, 75, 75, 75, 75, 75, 75, 75
+};
+
+struct test test12 = {
+ "test12", // name
+ "test percentage-of-incremental-row with equal values",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test12_feed, // feed
+ test12_results, // results
+ test12_feed2, // feed2
+ test12_results2 // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test13
+
+struct feed_values test13_feed[] = {
+ { 500000, 1000 },
+ { 600000, 1000 + 600 },
+ { 200000, 1600 + 200 },
+ { 1000000, 1800 + 1000 },
+ { 200000, 2800 + 200 },
+ { 2000000, 3000 + 2000 },
+ { 600000, 5000 + 600 },
+ { 400000, 5600 + 400 },
+ { 900000, 6000 + 900 },
+ { 1000000, 6900 + 1000 },
+};
+
+calculated_number test13_results[] = {
+ 83.3333300, 100, 100, 100, 100, 100, 100
+};
+
+struct test test13 = {
+ "test13", // name
+ "test incremental values updated in short and long durations",
+ 1, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL, // algorithm
+ 10, // feed entries
+ 7, // result entries
+ test13_feed, // feed
+ test13_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test14
+
+struct feed_values test14_feed[] = {
+ { 0, 0x015397dc42151c41ULL },
+ { 13573000, 0x015397e612e3ff5dULL },
+ { 29969000, 0x015397f905ecdaa8ULL },
+ { 29958000, 0x0153980c2a6cb5e4ULL },
+ { 30054000, 0x0153981f4032fb83ULL },
+ { 34952000, 0x015398355efadaccULL },
+ { 25046000, 0x01539845ba4b09f8ULL },
+ { 29947000, 0x0153985948bf381dULL },
+ { 30054000, 0x0153986c5b9c27e2ULL },
+ { 29942000, 0x0153987f888982d0ULL },
+};
+
+calculated_number test14_results[] = {
+ 23.1383300, 21.8515600, 21.8804600, 21.7788000, 22.0112200, 22.4386100, 22.0906100, 21.9150800
+};
+
+struct test test14 = {
+ "test14", // name
+ "issue #981 with real data",
+ 30, // update_every
+ 8, // multiplier
+ 1000000000, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 8, // result entries
+ test14_feed, // feed
+ test14_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+struct feed_values test14b_feed[] = {
+ { 0, 0 },
+ { 13573000, 13573000 },
+ { 29969000, 13573000 + 29969000 },
+ { 29958000, 13573000 + 29969000 + 29958000 },
+ { 30054000, 13573000 + 29969000 + 29958000 + 30054000 },
+ { 34952000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 },
+ { 25046000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 },
+ { 29947000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 },
+ { 30054000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 },
+ { 29942000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 + 29942000 },
+};
+
+calculated_number test14b_results[] = {
+ 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000
+};
+
+struct test test14b = {
+ "test14b", // name
+ "issue #981 with dummy data",
+ 30, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 8, // result entries
+ test14b_feed, // feed
+ test14b_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+struct feed_values test14c_feed[] = {
+ { 29000000, 29000000 },
+ { 1000000, 29000000 + 1000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 },
+ { 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 },
+};
+
+calculated_number test14c_results[] = {
+ 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000
+};
+
+struct test test14c = {
+ "test14c", // name
+ "issue #981 with dummy data, checking for late start",
+ 30, // update_every
+ 1, // multiplier
+ 1, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test14c_feed, // feed
+ test14c_results, // results
+ NULL, // feed2
+ NULL // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+// test15
+
+struct feed_values test15_feed[] = {
+ { 0, 1068066388 },
+ { 1008752, 1068822698 },
+ { 993809, 1069573072 },
+ { 995911, 1070324135 },
+ { 1014562, 1071078166 },
+ { 994684, 1071831349 },
+ { 993128, 1072235739 },
+ { 1010332, 1072958871 },
+ { 1003394, 1073707019 },
+ { 995201, 1074460255 },
+};
+
+collected_number test15_feed2[] = {
+ 178825286, 178825286, 178825286, 178825286, 178825498, 178825498, 179165652, 179202964, 179203282, 179204130
+};
+
+calculated_number test15_results[] = {
+ 5857.4080000, 5898.4540000, 5891.6590000, 5806.3160000, 5914.2640000, 3202.2630000, 5589.6560000, 5822.5260000, 5911.7520000
+};
+
+calculated_number test15_results2[] = {
+ 0.0000000, 0.0000000, 0.0024944, 1.6324779, 0.0212777, 2655.1890000, 290.5387000, 5.6733610, 6.5960220
+};
+
+struct test test15 = {
+ "test15", // name
+ "test incremental with 2 dimensions",
+ 1, // update_every
+ 8, // multiplier
+ 1024, // divisor
+ RRD_ALGORITHM_INCREMENTAL, // algorithm
+ 10, // feed entries
+ 9, // result entries
+ test15_feed, // feed
+ test15_results, // results
+ test15_feed2, // feed2
+ test15_results2 // results2
+};
+
+// --------------------------------------------------------------------------------------------------------------------
+
+int run_test(struct test *test)
+{
+ fprintf(stderr, "\nRunning test '%s':\n%s\n", test->name, test->description);
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC;
+ default_rrd_update_every = test->update_every;
+
+ char name[101];
+ snprintfz(name, 100, "unittest-%s", test->name);
+
+ // create the chart
+ RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1
+ , test->update_every, RRDSET_TYPE_LINE);
+ RRDDIM *rd = rrddim_add(st, "dim1", NULL, test->multiplier, test->divisor, test->algorithm);
+
+ RRDDIM *rd2 = NULL;
+ if(test->feed2)
+ rd2 = rrddim_add(st, "dim2", NULL, test->multiplier, test->divisor, test->algorithm);
+
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ // feed it with the test data
+ time_t time_now = 0, time_start = now_realtime_sec();
+ unsigned long c;
+ collected_number last = 0;
+ for(c = 0; c < test->feed_entries; c++) {
+ if(debug_flags) fprintf(stderr, "\n\n");
+
+ if(c) {
+ time_now += test->feed[c].microseconds;
+ fprintf(stderr, " > %s: feeding position %lu, after %0.3f seconds (%0.3f seconds from start), delta " CALCULATED_NUMBER_FORMAT ", rate " CALCULATED_NUMBER_FORMAT "\n",
+ test->name, c+1,
+ (float)test->feed[c].microseconds / 1000000.0,
+ (float)time_now / 1000000.0,
+ ((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor,
+ (((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor) / (calculated_number)test->feed[c].microseconds * (calculated_number)1000000);
+
+ // rrdset_next_usec_unfiltered(st, test->feed[c].microseconds);
+ st->usec_since_last_update = test->feed[c].microseconds;
+ }
+ else {
+ fprintf(stderr, " > %s: feeding position %lu\n", test->name, c+1);
+ }
+
+ fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd->name, test->feed[c].value);
+ rrddim_set(st, "dim1", test->feed[c].value);
+ last = test->feed[c].value;
+
+ if(rd2) {
+ fprintf(stderr, " >> %s with value " COLLECTED_NUMBER_FORMAT "\n", rd2->name, test->feed2[c]);
+ rrddim_set(st, "dim2", test->feed2[c]);
+ }
+
+ rrdset_done(st);
+
+ // align the first entry to second boundary
+ if(!c) {
+ fprintf(stderr, " > %s: fixing first collection time to be %llu microseconds to second boundary\n", test->name, test->feed[c].microseconds);
+ rd->last_collected_time.tv_usec = st->last_collected_time.tv_usec = st->last_updated.tv_usec = test->feed[c].microseconds;
+ // time_start = st->last_collected_time.tv_sec;
+ }
+ }
+
+ // check the result
+ int errors = 0;
+
+ if(st->counter != test->result_entries) {
+ fprintf(stderr, " %s stored %zu entries, but we were expecting %lu, ### E R R O R ###\n", test->name, st->counter, test->result_entries);
+ errors++;
+ }
+
+ unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries;
+ for(c = 0 ; c < max ; c++) {
+ calculated_number v = unpack_storage_number(rd->values[c]);
+ calculated_number n = unpack_storage_number(pack_storage_number(test->results[c], SN_EXISTS));
+ int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
+ fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
+ test->name, rd->name, c+1,
+ (rrdset_first_entry_t(st) + c * st->update_every) - time_start,
+ n, v, (same)?"OK":"### E R R O R ###");
+
+ if(!same) errors++;
+
+ if(rd2) {
+ v = unpack_storage_number(rd2->values[c]);
+ n = test->results2[c];
+ same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
+ fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
+ test->name, rd2->name, c+1,
+ (rrdset_first_entry_t(st) + c * st->update_every) - time_start,
+ n, v, (same)?"OK":"### E R R O R ###");
+ if(!same) errors++;
+ }
+ }
+
+ return errors;
+}
+
+static int test_variable_renames(void) {
+ fprintf(stderr, "Creating chart\n");
+ RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", "unittest", NULL, 1, 1, RRDSET_TYPE_LINE);
+ fprintf(stderr, "Created chart with id '%s', name '%s'\n", st->id, st->name);
+
+ fprintf(stderr, "Creating dimension DIM1\n");
+ RRDDIM *rd1 = rrddim_add(st, "DIM1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd1->id, rd1->name);
+
+ fprintf(stderr, "Creating dimension DIM2\n");
+ RRDDIM *rd2 = rrddim_add(st, "DIM2", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ fprintf(stderr, "Created dimension with id '%s', name '%s'\n", rd2->id, rd2->name);
+
+ fprintf(stderr, "Renaming chart to CHARTNAME1\n");
+ rrdset_set_name(st, "CHARTNAME1");
+ fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name);
+
+ fprintf(stderr, "Renaming chart to CHARTNAME2\n");
+ rrdset_set_name(st, "CHARTNAME2");
+ fprintf(stderr, "Renamed chart with id '%s' to name '%s'\n", st->id, st->name);
+
+ fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME1\n");
+ rrddim_set_name(st, rd1, "DIM1NAME1");
+ fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name);
+
+ fprintf(stderr, "Renaming dimension DIM1 to DIM1NAME2\n");
+ rrddim_set_name(st, rd1, "DIM1NAME2");
+ fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd1->id, rd1->name);
+
+ fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME1\n");
+ rrddim_set_name(st, rd2, "DIM2NAME1");
+ fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name);
+
+ fprintf(stderr, "Renaming dimension DIM2 to DIM2NAME2\n");
+ rrddim_set_name(st, rd2, "DIM2NAME2");
+ fprintf(stderr, "Renamed dimension with id '%s' to name '%s'\n", rd2->id, rd2->name);
+
+ BUFFER *buf = buffer_create(1);
+ health_api_v1_chart_variables2json(st, buf);
+ fprintf(stderr, "%s", buffer_tostring(buf));
+ buffer_free(buf);
+ return 1;
+}
+
+int check_strdupz_path_subpath() {
+
+ struct strdupz_path_subpath_checks {
+ const char *path;
+ const char *subpath;
+ const char *result;
+ } checks[] = {
+ { "", "", "." },
+ { "/", "", "/" },
+ { "/etc/netdata", "", "/etc/netdata" },
+ { "/etc/netdata///", "", "/etc/netdata" },
+ { "/etc/netdata///", "health.d", "/etc/netdata/health.d" },
+ { "/etc/netdata///", "///health.d", "/etc/netdata/health.d" },
+ { "/etc/netdata", "///health.d", "/etc/netdata/health.d" },
+ { "", "///health.d", "./health.d" },
+ { "/", "///health.d", "/health.d" },
+
+ // terminator
+ { NULL, NULL, NULL }
+ };
+
+ size_t i;
+ for(i = 0; checks[i].result ; i++) {
+ char *s = strdupz_path_subpath(checks[i].path, checks[i].subpath);
+ fprintf(stderr, "strdupz_path_subpath(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s);
+ if(!s || strcmp(s, checks[i].result) != 0) {
+ freez(s);
+ fprintf(stderr, "FAILED\n");
+ return 1;
+ }
+ else {
+ freez(s);
+ fprintf(stderr, "OK\n");
+ }
+ }
+
+ return 0;
+}
+
+int run_all_mockup_tests(void)
+{
+ if(check_strdupz_path_subpath())
+ return 1;
+
+ if(check_number_printing())
+ return 1;
+
+ if(check_rrdcalc_comparisons())
+ return 1;
+
+ if(!test_variable_renames())
+ return 1;
+
+ if(run_test(&test1))
+ return 1;
+
+ if(run_test(&test2))
+ return 1;
+
+ if(run_test(&test3))
+ return 1;
+
+ if(run_test(&test4))
+ return 1;
+
+ if(run_test(&test5))
+ return 1;
+
+ if(run_test(&test5b))
+ return 1;
+
+ if(run_test(&test6))
+ return 1;
+
+ if(run_test(&test7))
+ return 1;
+
+ if(run_test(&test8))
+ return 1;
+
+ if(run_test(&test9))
+ return 1;
+
+ if(run_test(&test10))
+ return 1;
+
+ if(run_test(&test11))
+ return 1;
+
+ if(run_test(&test12))
+ return 1;
+
+ if(run_test(&test13))
+ return 1;
+
+ if(run_test(&test14))
+ return 1;
+
+ if(run_test(&test14b))
+ return 1;
+
+ if(run_test(&test14c))
+ return 1;
+
+ if(run_test(&test15))
+ return 1;
+
+
+
+ return 0;
+}
+
+int unit_test(long delay, long shift)
+{
+ static int repeat = 0;
+ repeat++;
+
+ char name[101];
+ snprintfz(name, 100, "unittest-%d-%ld-%ld", repeat, delay, shift);
+
+ //debug_flags = 0xffffffff;
+ default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC;
+ default_rrd_update_every = 1;
+
+ int do_abs = 1;
+ int do_inc = 1;
+ int do_abst = 0;
+ int do_absi = 0;
+
+ RRDSET *st = rrdset_create_localhost("netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest", NULL, 1, 1
+ , RRDSET_TYPE_LINE);
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ RRDDIM *rdabs = NULL;
+ RRDDIM *rdinc = NULL;
+ RRDDIM *rdabst = NULL;
+ RRDDIM *rdabsi = NULL;
+
+ if(do_abs) rdabs = rrddim_add(st, "absolute", "absolute", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if(do_inc) rdinc = rrddim_add(st, "incremental", "incremental", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if(do_abst) rdabst = rrddim_add(st, "percentage-of-absolute-row", "percentage-of-absolute-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ if(do_absi) rdabsi = rrddim_add(st, "percentage-of-incremental-row", "percentage-of-incremental-row", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+
+ long increment = 1000;
+ collected_number i = 0;
+
+ unsigned long c, dimensions = 0;
+ RRDDIM *rd;
+ for(rd = st->dimensions ; rd ; rd = rd->next) dimensions++;
+
+ for(c = 0; c < 20 ;c++) {
+ i += increment;
+
+ fprintf(stderr, "\n\nLOOP = %lu, DELAY = %ld, VALUE = " COLLECTED_NUMBER_FORMAT "\n", c, delay, i);
+ if(c) {
+ // rrdset_next_usec_unfiltered(st, delay);
+ st->usec_since_last_update = delay;
+ }
+ if(do_abs) rrddim_set(st, "absolute", i);
+ if(do_inc) rrddim_set(st, "incremental", i);
+ if(do_abst) rrddim_set(st, "percentage-of-absolute-row", i);
+ if(do_absi) rrddim_set(st, "percentage-of-incremental-row", i);
+
+ if(!c) {
+ now_realtime_timeval(&st->last_collected_time);
+ st->last_collected_time.tv_usec = shift;
+ }
+
+ // prevent it from deleting the dimensions
+ for(rd = st->dimensions ; rd ; rd = rd->next)
+ rd->last_collected_time.tv_sec = st->last_collected_time.tv_sec;
+
+ rrdset_done(st);
+ }
+
+ unsigned long oincrement = increment;
+ increment = increment * st->update_every * 1000000 / delay;
+ fprintf(stderr, "\n\nORIGINAL INCREMENT: %lu, INCREMENT %ld, DELAY %ld, SHIFT %ld\n", oincrement * 10, increment * 10, delay, shift);
+
+ int ret = 0;
+ storage_number sn;
+ calculated_number cn, v;
+ for(c = 0 ; c < st->counter ; c++) {
+ fprintf(stderr, "\nPOSITION: c = %lu, EXPECTED VALUE %lu\n", c, (oincrement + c * increment + increment * (1000000 - shift) / 1000000 )* 10);
+
+ for(rd = st->dimensions ; rd ; rd = rd->next) {
+ sn = rd->values[c];
+ cn = unpack_storage_number(sn);
+ fprintf(stderr, "\t %s " CALCULATED_NUMBER_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rd->id, cn, sn);
+
+ if(rd == rdabs) v =
+ ( oincrement
+ // + (increment * (1000000 - shift) / 1000000)
+ + (c + 1) * increment
+ );
+
+ else if(rd == rdinc) v = (c?(increment):(increment * (1000000 - shift) / 1000000));
+ else if(rd == rdabst) v = oincrement / dimensions / 10;
+ else if(rd == rdabsi) v = oincrement / dimensions / 10;
+ else v = 0;
+
+ if(v == cn) fprintf(stderr, "passed.\n");
+ else {
+ fprintf(stderr, "ERROR! (expected " CALCULATED_NUMBER_FORMAT ")\n", v);
+ ret = 1;
+ }
+ }
+ }
+
+ if(ret)
+ fprintf(stderr, "\n\nUNIT TEST(%ld, %ld) FAILED\n\n", delay, shift);
+
+ return ret;
+}
+
+#ifdef ENABLE_DBENGINE
+static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number value, time_t now)
+{
+ rd->last_collected_time.tv_sec = now;
+ rd->last_collected_time.tv_usec = 0;
+ rd->collected_value = value;
+ rd->updated = 1;
+
+ rd->collections_counter++;
+
+ collected_number v = (value >= 0) ? value : -value;
+ if(unlikely(v > rd->collected_value_max)) rd->collected_value_max = v;
+}
+
+static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
+{
+ /* We don't want to drop metrics when generating load, we prefer to block data generation itself */
+ rrdeng_drop_metrics_under_page_cache_pressure = 0;
+
+ return rrdhost_find_or_create(
+ name
+ , name
+ , name
+ , os_type
+ , netdata_configured_timezone
+ , config_get(CONFIG_SECTION_BACKEND, "host tags", "")
+ , program_name
+ , program_version
+ , default_rrd_update_every
+ , default_rrd_history_entries
+ , RRD_MEMORY_MODE_DBENGINE
+ , default_health_enabled
+ , default_rrdpush_enabled
+ , default_rrdpush_destination
+ , default_rrdpush_api_key
+ , default_rrdpush_send_charts_matching
+ , NULL
+ );
+}
+
+// costants for test_dbengine
+static const int CHARTS = 64;
+static const int DIMS = 16; // That gives us 64 * 16 = 1024 metrics
+#define REGIONS (3) // 3 regions of update_every
+// first region update_every is 2, second is 3, third is 1
+static const int REGION_UPDATE_EVERY[REGIONS] = {2, 3, 1};
+static const int REGION_POINTS[REGIONS] = {
+ 16384, // This produces 64MiB of metric data for the first region: update_every = 2
+ 16384, // This produces 64MiB of metric data for the second region: update_every = 3
+ 16384, // This produces 64MiB of metric data for the third region: update_every = 1
+};
+static const int QUERY_BATCH = 4096;
+
+static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int update_every)
+{
+ int i, j;
+ char name[101];
+
+ for (i = 0 ; i < CHARTS ; ++i) {
+ snprintfz(name, 100, "dbengine-chart-%d", i);
+
+ // create the chart
+ st[i] = rrdset_create(host, "netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest",
+ NULL, 1, update_every, RRDSET_TYPE_LINE);
+ rrdset_flag_set(st[i], RRDSET_FLAG_DEBUG);
+ rrdset_flag_set(st[i], RRDSET_FLAG_STORE_FIRST);
+ for (j = 0 ; j < DIMS ; ++j) {
+ snprintfz(name, 100, "dim-%d", j);
+
+ rd[i][j] = rrddim_add(st[i], name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+
+ // Initialize DB with the very first entries
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0 ; j < DIMS ; ++j) {
+ rd[i][j]->last_collected_time.tv_sec =
+ st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = 2 * API_RELATIVE_TIME_MAX - 1;
+ rd[i][j]->last_collected_time.tv_usec =
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ }
+ }
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->usec_since_last_update = USEC_PER_SEC;
+
+ for (j = 0; j < DIMS; ++j) {
+ rrddim_set_by_pointer_fake_time(rd[i][j], 69, 2 * API_RELATIVE_TIME_MAX); // set first value to 69
+ }
+ rrdset_done(st[i]);
+ }
+ // Fluh pages for subsequent real values
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+}
+
+// Feeds the database region with test data, returns last timestamp of region
+static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ time_t time_now;
+ int i, j, c, update_every;
+ collected_number next;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
+ time_now = time_start + update_every;
+ // feed it with the test data
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0 ; j < DIMS ; ++j) {
+ rd[i][j]->last_collected_time.tv_sec =
+ st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = time_now;
+ rd[i][j]->last_collected_time.tv_usec =
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ }
+ }
+ for (c = 0; c < REGION_POINTS[current_region] ; ++c) {
+ time_now += update_every; // time_now = start + (c + 2) * update_every
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->usec_since_last_update = USEC_PER_SEC * update_every;
+
+ for (j = 0; j < DIMS; ++j) {
+ next = ((collected_number)i * DIMS) * REGION_POINTS[current_region] +
+ j * REGION_POINTS[current_region] + c;
+ rrddim_set_by_pointer_fake_time(rd[i][j], next, time_now);
+ }
+ rrdset_done(st[i]);
+ }
+ }
+ return time_now; //time_end
+}
+
+// Checks the metric data for the given region, returns number of errors
+static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, k, c, errors, update_every;
+ collected_number last;
+ calculated_number value, expected;
+ storage_number n;
+ struct rrddim_query_handle handle;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
+ errors = 0;
+
+ // check the result
+ for (c = 0; c < REGION_POINTS[current_region] ; c += QUERY_BATCH) {
+ time_now = time_start + (c + 2) * update_every;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0; j < DIMS; ++j) {
+ rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH * update_every);
+ for (k = 0; k < QUERY_BATCH; ++k) {
+ last = ((collected_number)i * DIMS) * REGION_POINTS[current_region] +
+ j * REGION_POINTS[current_region] + c + k;
+ expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ n = rd[i][j]->state->query_ops.next_metric(&handle, &time_retrieved);
+ value = unpack_storage_number(n);
+
+ same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now + k * update_every) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ rd[i][j]->state->query_ops.finalize(&handle);
+ }
+ }
+ }
+ return errors;
+}
+
+// Check rrdr transformations
+static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start, time_t time_end)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, errors, update_every;
+ long c;
+ collected_number last;
+ calculated_number value, expected;
+
+ errors = 0;
+ update_every = REGION_UPDATE_EVERY[current_region];
+ long points = (time_end - time_start) / update_every - 1;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start + update_every, time_end, RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ return ++errors;
+ } else {
+ assert(r->st == st[i]);
+ for (c = 0; c != rrdr_rows(r) ; ++c) {
+ RRDDIM *d;
+ time_now = time_start + (c + 2) * update_every;
+ time_retrieved = r->t[c];
+
+ // for each dimension
+ for (j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ value = cn[j];
+ assert(rd[i][j] == d);
+
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+ return errors;
+}
+
+int test_dbengine(void)
+{
+ int i, j, errors, update_every, current_region;
+ RRDHOST *host = NULL;
+ RRDSET *st[CHARTS];
+ RRDDIM *rd[CHARTS][DIMS];
+ time_t time_start[REGIONS], time_end[REGIONS];
+
+ error_log_limit_unlimited();
+ fprintf(stderr, "\nRunning DB-engine test\n");
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+
+ fprintf(stderr, "Initializing localhost with hostname 'unittest-dbengine'");
+ host = dbengine_rrdhost_find_or_create("unittest-dbengine");
+ if (NULL == host)
+ return 1;
+
+ current_region = 0; // this is the first region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 2 seconds
+ test_dbengine_create_charts(host, st, rd, update_every);
+
+ time_start[current_region] = 2 * API_RELATIVE_TIME_MAX;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 1; //this is the second region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 3 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 2; //this is the third region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 1 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ for (current_region = 0 ; current_region < REGIONS ; ++current_region) {
+ errors = test_dbengine_check_rrdr(st, rd, current_region, time_start[current_region], time_end[current_region]);
+ if (errors)
+ goto error_out;
+ }
+
+ current_region = 1;
+ update_every = REGION_UPDATE_EVERY[current_region]; // use the maximum update_every = 3
+ errors = 0;
+ long points = (time_end[REGIONS - 1] - time_start[0]) / update_every - 1; // cover all time regions with RRDR
+ long point_offset = (time_start[current_region] - time_start[0]) / update_every;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start[0] + update_every, time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ ++errors;
+ } else {
+ long c;
+
+ assert(r->st == st[i]);
+ // test current region values only, since they must be left unchanged
+ for (c = point_offset ; c < point_offset + rrdr_rows(r) / REGIONS / 2 ; ++c) {
+ RRDDIM *d;
+ time_t time_now = time_start[current_region] + (c - point_offset + 2) * update_every;
+ time_t time_retrieved = r->t[c];
+
+ // for each dimension
+ for(j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ calculated_number value = cn[j];
+ assert(rd[i][j] == d);
+
+ collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset;
+ calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ uint8_t same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+error_out:
+ rrd_wrlock();
+ rrdeng_prepare_exit(host->rrdeng_ctx);
+ rrdhost_delete_charts(host);
+ rrdeng_exit(host->rrdeng_ctx);
+ rrd_unlock();
+
+ return errors;
+}
+
+struct dbengine_chart_thread {
+ uv_thread_t thread;
+ RRDHOST *host;
+ char *chartname; /* Will be prefixed by type, e.g. "example_local1.", "example_local2." etc */
+ unsigned dset_charts; /* number of charts */
+ unsigned dset_dims; /* dimensions per chart */
+ unsigned chart_i; /* current chart offset */
+ time_t time_present; /* current virtual time of the benchmark */
+ volatile time_t time_max; /* latest timestamp of stored values */
+ unsigned history_seconds; /* how far back in the past to go */
+
+ volatile long done; /* initialize to 0, set to 1 to stop thread */
+ struct completion charts_initialized;
+ unsigned long errors, stored_metrics_nr; /* statistics */
+
+ RRDSET *st;
+ RRDDIM *rd[]; /* dset_dims elements */
+};
+
+collected_number generate_dbengine_chart_value(int chart_i, int dim_i, time_t time_current)
+{
+ collected_number value;
+
+ value = ((collected_number)time_current) * (chart_i + 1);
+ value += ((collected_number)time_current) * (dim_i + 1);
+ value %= 1024LLU;
+
+ return value;
+}
+
+static void generate_dbengine_chart(void *arg)
+{
+ struct dbengine_chart_thread *thread_info = (struct dbengine_chart_thread *)arg;
+ RRDHOST *host = thread_info->host;
+ char *chartname = thread_info->chartname;
+ const unsigned DSET_DIMS = thread_info->dset_dims;
+ unsigned history_seconds = thread_info->history_seconds;
+ time_t time_present = thread_info->time_present;
+
+ unsigned j, update_every = 1;
+ RRDSET *st;
+ RRDDIM *rd[DSET_DIMS];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ time_t time_current;
+
+ // create the chart
+ snprintfz(name, RRD_ID_LENGTH_MAX, "example_local%u", thread_info->chart_i + 1);
+ thread_info->st = st = rrdset_create(host, name, chartname, chartname, "example", NULL, chartname, chartname,
+ chartname, NULL, 1, update_every, RRDSET_TYPE_LINE);
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s%u", chartname, j + 1);
+
+ thread_info->rd[j] = rd[j] = rrddim_add(st, name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ complete(&thread_info->charts_initialized);
+
+ // feed it with the test data
+ time_current = time_present - history_seconds;
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
+ rd[j]->last_collected_time.tv_sec =
+ st->last_collected_time.tv_sec = st->last_updated.tv_sec = time_current - update_every;
+ rd[j]->last_collected_time.tv_usec =
+ st->last_collected_time.tv_usec = st->last_updated.tv_usec = 0;
+ }
+ for( ; !thread_info->done && time_current < time_present ; time_current += update_every) {
+ st->usec_since_last_update = USEC_PER_SEC * update_every;
+
+ for (j = 0; j < DSET_DIMS; ++j) {
+ collected_number value;
+
+ value = generate_dbengine_chart_value(thread_info->chart_i, j, time_current);
+ rrddim_set_by_pointer_fake_time(rd[j], value, time_current);
+ ++thread_info->stored_metrics_nr;
+ }
+ rrdset_done(st);
+ thread_info->time_max = time_current;
+ }
+ for (j = 0; j < DSET_DIMS; ++j) {
+ rrdeng_store_metric_finalize(rd[j]);
+ }
+}
+
+void generate_dbengine_dataset(unsigned history_seconds)
+{
+ const int DSET_CHARTS = 16;
+ const int DSET_DIMS = 128;
+ const uint64_t EXPECTED_COMPRESSION_RATIO = 20;
+ RRDHOST *host = NULL;
+ struct dbengine_chart_thread **thread_info;
+ int i;
+ time_t time_present;
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+ default_rrdeng_page_cache_mb = 128;
+ // Worst case for uncompressible data
+ default_rrdeng_disk_quota_mb = (((uint64_t)DSET_DIMS * DSET_CHARTS) * sizeof(storage_number) * history_seconds) /
+ (1024 * 1024);
+ default_rrdeng_disk_quota_mb -= default_rrdeng_disk_quota_mb * EXPECTED_COMPRESSION_RATIO / 100;
+
+ error_log_limit_unlimited();
+ fprintf(stderr, "Initializing localhost with hostname 'dbengine-dataset'");
+
+ host = dbengine_rrdhost_find_or_create("dbengine-dataset");
+ if (NULL == host)
+ return;
+
+ thread_info = mallocz(sizeof(*thread_info) * DSET_CHARTS);
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ thread_info[i] = mallocz(sizeof(*thread_info[i]) + sizeof(RRDDIM *) * DSET_DIMS);
+ }
+ fprintf(stderr, "\nRunning DB-engine workload generator\n");
+
+ time_present = now_realtime_sec();
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ thread_info[i]->host = host;
+ thread_info[i]->chartname = "random";
+ thread_info[i]->dset_charts = DSET_CHARTS;
+ thread_info[i]->chart_i = i;
+ thread_info[i]->dset_dims = DSET_DIMS;
+ thread_info[i]->history_seconds = history_seconds;
+ thread_info[i]->time_present = time_present;
+ thread_info[i]->time_max = 0;
+ thread_info[i]->done = 0;
+ init_completion(&thread_info[i]->charts_initialized);
+ assert(0 == uv_thread_create(&thread_info[i]->thread, generate_dbengine_chart, thread_info[i]));
+ wait_for_completion(&thread_info[i]->charts_initialized);
+ destroy_completion(&thread_info[i]->charts_initialized);
+ }
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ assert(0 == uv_thread_join(&thread_info[i]->thread));
+ }
+
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ freez(thread_info[i]);
+ }
+ freez(thread_info);
+ rrd_wrlock();
+ rrdhost_free(host);
+ rrd_unlock();
+}
+
+struct dbengine_query_thread {
+ uv_thread_t thread;
+ RRDHOST *host;
+ char *chartname; /* Will be prefixed by type, e.g. "example_local1.", "example_local2." etc */
+ unsigned dset_charts; /* number of charts */
+ unsigned dset_dims; /* dimensions per chart */
+ time_t time_present; /* current virtual time of the benchmark */
+ unsigned history_seconds; /* how far back in the past to go */
+ volatile long done; /* initialize to 0, set to 1 to stop thread */
+ unsigned long errors, queries_nr, queried_metrics_nr; /* statistics */
+ uint8_t delete_old_data; /* if non zero then data are deleted when disk space is exhausted */
+
+ struct dbengine_chart_thread *chart_threads[]; /* dset_charts elements */
+};
+
+static void query_dbengine_chart(void *arg)
+{
+ struct dbengine_query_thread *thread_info = (struct dbengine_query_thread *)arg;
+ const int DSET_CHARTS = thread_info->dset_charts;
+ const int DSET_DIMS = thread_info->dset_dims;
+ time_t time_after, time_before, time_min, time_approx_min, time_max, duration;
+ int i, j, update_every = 1;
+ RRDSET *st;
+ RRDDIM *rd;
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ collected_number generatedv;
+ calculated_number value, expected;
+ storage_number n;
+ struct rrddim_query_handle handle;
+
+ do {
+ // pick a chart and dimension
+ i = random() % DSET_CHARTS;
+ st = thread_info->chart_threads[i]->st;
+ j = random() % DSET_DIMS;
+ rd = thread_info->chart_threads[i]->rd[j];
+
+ time_min = thread_info->time_present - thread_info->history_seconds + 1;
+ time_max = thread_info->chart_threads[i]->time_max;
+
+ if (thread_info->delete_old_data) {
+ /* A time window of twice the disk space is sufficient for compression space savings of up to 50% */
+ time_approx_min = time_max - (default_rrdeng_disk_quota_mb * 2 * 1024 * 1024) /
+ (((uint64_t) DSET_DIMS * DSET_CHARTS) * sizeof(storage_number));
+ time_min = MAX(time_min, time_approx_min);
+ }
+ if (!time_max) {
+ time_before = time_after = time_min;
+ } else {
+ time_after = time_min + random() % (MAX(time_max - time_min, 1));
+ duration = random() % 3600;
+ time_before = MIN(time_after + duration, time_max); /* up to 1 hour queries */
+ }
+
+ rd->state->query_ops.init(rd, &handle, time_after, time_before);
+ ++thread_info->queries_nr;
+ for (time_now = time_after ; time_now <= time_before ; time_now += update_every) {
+ generatedv = generate_dbengine_chart_value(i, j, time_now);
+ expected = unpack_storage_number(pack_storage_number((calculated_number) generatedv, SN_EXISTS));
+
+ if (unlikely(rd->state->query_ops.is_finished(&handle))) {
+ if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", found data gap, ### E R R O R ###\n",
+ st->name, rd->name, (unsigned long) time_now, expected);
+ ++thread_info->errors;
+ }
+ break;
+ }
+ n = rd->state->query_ops.next_metric(&handle, &time_retrieved);
+ if (SN_EMPTY_SLOT == n) {
+ if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", found data gap, ### E R R O R ###\n",
+ st->name, rd->name, (unsigned long) time_now, expected);
+ ++thread_info->errors;
+ }
+ break;
+ }
+ ++thread_info->queried_metrics_nr;
+ value = unpack_storage_number(n);
+
+ same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ if (!same) {
+ if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT
+ ", ### E R R O R ###\n",
+ st->name, rd->name, (unsigned long) time_now, expected, value);
+ ++thread_info->errors;
+ }
+ }
+ if (time_retrieved != time_now) {
+ if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
+ fprintf(stderr,
+ " DB-engine stresstest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
+ st->name, rd->name, (unsigned long) time_now, (unsigned long) time_retrieved);
+ ++thread_info->errors;
+ }
+ }
+ }
+ rd->state->query_ops.finalize(&handle);
+ } while(!thread_info->done);
+}
+
+void dbengine_stress_test(unsigned TEST_DURATION_SEC, unsigned DSET_CHARTS, unsigned QUERY_THREADS,
+ unsigned RAMP_UP_SECONDS, unsigned PAGE_CACHE_MB, unsigned DISK_SPACE_MB)
+{
+ const unsigned DSET_DIMS = 128;
+ const uint64_t EXPECTED_COMPRESSION_RATIO = 20;
+ const unsigned HISTORY_SECONDS = 3600 * 24 * 365 * 50; /* 50 year of history */
+ RRDHOST *host = NULL;
+ struct dbengine_chart_thread **chart_threads;
+ struct dbengine_query_thread **query_threads;
+ unsigned i, j;
+ time_t time_start, test_duration;
+
+ error_log_limit_unlimited();
+
+ if (!TEST_DURATION_SEC)
+ TEST_DURATION_SEC = 10;
+ if (!DSET_CHARTS)
+ DSET_CHARTS = 1;
+ if (!QUERY_THREADS)
+ QUERY_THREADS = 1;
+ if (PAGE_CACHE_MB < RRDENG_MIN_PAGE_CACHE_SIZE_MB)
+ PAGE_CACHE_MB = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+ default_rrdeng_page_cache_mb = PAGE_CACHE_MB;
+ if (DISK_SPACE_MB) {
+ fprintf(stderr, "By setting disk space limit data are allowed to be deleted. "
+ "Data validation is turned off for this run.\n");
+ default_rrdeng_disk_quota_mb = DISK_SPACE_MB;
+ } else {
+ // Worst case for uncompressible data
+ default_rrdeng_disk_quota_mb =
+ (((uint64_t) DSET_DIMS * DSET_CHARTS) * sizeof(storage_number) * HISTORY_SECONDS) / (1024 * 1024);
+ default_rrdeng_disk_quota_mb -= default_rrdeng_disk_quota_mb * EXPECTED_COMPRESSION_RATIO / 100;
+ }
+
+ fprintf(stderr, "Initializing localhost with hostname 'dbengine-stress-test'\n");
+
+ host = dbengine_rrdhost_find_or_create("dbengine-stress-test");
+ if (NULL == host)
+ return;
+
+ chart_threads = mallocz(sizeof(*chart_threads) * DSET_CHARTS);
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ chart_threads[i] = mallocz(sizeof(*chart_threads[i]) + sizeof(RRDDIM *) * DSET_DIMS);
+ }
+ query_threads = mallocz(sizeof(*query_threads) * QUERY_THREADS);
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ query_threads[i] = mallocz(sizeof(*query_threads[i]) + sizeof(struct dbengine_chart_thread *) * DSET_CHARTS);
+ }
+ fprintf(stderr, "\nRunning DB-engine stress test, %u seconds writers ramp-up time,\n"
+ "%u seconds of concurrent readers and writers, %u writer threads, %u reader threads,\n"
+ "%u MiB of page cache.\n",
+ RAMP_UP_SECONDS, TEST_DURATION_SEC, DSET_CHARTS, QUERY_THREADS, PAGE_CACHE_MB);
+
+ time_start = now_realtime_sec() + HISTORY_SECONDS; /* move history to the future */
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ chart_threads[i]->host = host;
+ chart_threads[i]->chartname = "random";
+ chart_threads[i]->dset_charts = DSET_CHARTS;
+ chart_threads[i]->chart_i = i;
+ chart_threads[i]->dset_dims = DSET_DIMS;
+ chart_threads[i]->history_seconds = HISTORY_SECONDS;
+ chart_threads[i]->time_present = time_start;
+ chart_threads[i]->time_max = 0;
+ chart_threads[i]->done = 0;
+ chart_threads[i]->errors = chart_threads[i]->stored_metrics_nr = 0;
+ init_completion(&chart_threads[i]->charts_initialized);
+ assert(0 == uv_thread_create(&chart_threads[i]->thread, generate_dbengine_chart, chart_threads[i]));
+ }
+ /* barrier so that subsequent queries can access valid chart data */
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ wait_for_completion(&chart_threads[i]->charts_initialized);
+ destroy_completion(&chart_threads[i]->charts_initialized);
+ }
+ sleep(RAMP_UP_SECONDS);
+ /* at this point data have already began being written to the database */
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ query_threads[i]->host = host;
+ query_threads[i]->chartname = "random";
+ query_threads[i]->dset_charts = DSET_CHARTS;
+ query_threads[i]->dset_dims = DSET_DIMS;
+ query_threads[i]->history_seconds = HISTORY_SECONDS;
+ query_threads[i]->time_present = time_start;
+ query_threads[i]->done = 0;
+ query_threads[i]->errors = query_threads[i]->queries_nr = query_threads[i]->queried_metrics_nr = 0;
+ for (j = 0 ; j < DSET_CHARTS ; ++j) {
+ query_threads[i]->chart_threads[j] = chart_threads[j];
+ }
+ query_threads[i]->delete_old_data = DISK_SPACE_MB ? 1 : 0;
+ assert(0 == uv_thread_create(&query_threads[i]->thread, query_dbengine_chart, query_threads[i]));
+ }
+ sleep(TEST_DURATION_SEC);
+ /* stop workload */
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ chart_threads[i]->done = 1;
+ }
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ query_threads[i]->done = 1;
+ }
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ assert(0 == uv_thread_join(&chart_threads[i]->thread));
+ }
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ assert(0 == uv_thread_join(&query_threads[i]->thread));
+ }
+ test_duration = now_realtime_sec() - (time_start - HISTORY_SECONDS);
+ if (!test_duration)
+ test_duration = 1;
+ fprintf(stderr, "\nDB-engine stress test finished in %ld seconds.\n", test_duration);
+ unsigned long stored_metrics_nr = 0;
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ stored_metrics_nr += chart_threads[i]->stored_metrics_nr;
+ }
+ unsigned long queries_nr = 0, queried_metrics_nr = 0;
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ queries_nr += query_threads[i]->queries_nr;
+ queried_metrics_nr += query_threads[i]->queried_metrics_nr;
+ }
+ fprintf(stderr, "%u metrics were stored (dataset size of %lu MiB) in %u charts by 1 writer thread per chart.\n",
+ DSET_CHARTS * DSET_DIMS, stored_metrics_nr * sizeof(storage_number) / (1024 * 1024), DSET_CHARTS);
+ fprintf(stderr, "Metrics were being generated per 1 emulated second and time was accelerated.\n");
+ fprintf(stderr, "%lu metric data points were queried by %u reader threads.\n", queried_metrics_nr, QUERY_THREADS);
+ fprintf(stderr, "Query starting time is randomly chosen from the beginning of the time-series up to the time of\n"
+ "the latest data point, and ending time from 1 second up to 1 hour after the starting time.\n");
+ fprintf(stderr, "Performance is %lu written data points/sec and %lu read data points/sec.\n",
+ stored_metrics_nr / test_duration, queried_metrics_nr / test_duration);
+
+ for (i = 0 ; i < DSET_CHARTS ; ++i) {
+ freez(chart_threads[i]);
+ }
+ freez(chart_threads);
+ for (i = 0 ; i < QUERY_THREADS ; ++i) {
+ freez(query_threads[i]);
+ }
+ freez(query_threads);
+ rrd_wrlock();
+ rrdeng_prepare_exit(host->rrdeng_ctx);
+ rrdhost_delete_charts(host);
+ rrdeng_exit(host->rrdeng_ctx);
+ rrd_unlock();
+}
+
+#endif
diff --git a/daemon/unit_test.h b/daemon/unit_test.h
new file mode 100644
index 0000000..79d415b
--- /dev/null
+++ b/daemon/unit_test.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_UNIT_TEST_H
+#define NETDATA_UNIT_TEST_H 1
+
+extern int unit_test_storage(void);
+extern int unit_test(long delay, long shift);
+extern int run_all_mockup_tests(void);
+extern int unit_test_str2ld(void);
+extern int unit_test_buffer(void);
+#ifdef ENABLE_DBENGINE
+extern int test_dbengine(void);
+extern void generate_dbengine_dataset(unsigned history_seconds);
+extern void dbengine_stress_test(unsigned TEST_DURATION_SEC, unsigned DSET_CHARTS, unsigned QUERY_THREADS,
+ unsigned RAMP_UP_SECONDS, unsigned PAGE_CACHE_MB, unsigned DISK_SPACE_MB);
+
+#endif
+
+#endif /* NETDATA_UNIT_TEST_H */
diff --git a/database/Makefile.am b/database/Makefile.am
new file mode 100644
index 0000000..9ab85b4
--- /dev/null
+++ b/database/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ engine \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/database/README.md b/database/README.md
new file mode 100644
index 0000000..9fef705
--- /dev/null
+++ b/database/README.md
@@ -0,0 +1,215 @@
+<!--
+title: "Database"
+description: "The Netdata Agent leverages multiple, user-configurable time-series databases that use RAM and/or disk to store metrics on any type of node."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/database/README.md
+-->
+
+# Database
+
+Netdata is fully capable of long-term metrics storage, at per-second granularity, via its default database engine
+(`dbengine`). But to remain as flexible as possible, Netdata supports a number of types of metrics storage:
+
+1. `dbengine`, (the default) data are in database files. The [Database Engine](/database/engine/README.md) works like a
+ traditional database. There is some amount of RAM dedicated to data caching and indexing and the rest of the data
+ reside compressed on disk. The number of history entries is not fixed in this case, but depends on the configured
+ disk space and the effective compression ratio of the data stored. This is the **only mode** that supports changing
+ the data collection update frequency (`update_every`) **without losing** the previously stored metrics. For more
+ details see [here](/database/engine/README.md).
+
+2. `ram`, data are purely in memory. Data are never saved on disk. This mode uses `mmap()` and supports [KSM](#ksm).
+
+3. `save`, data are only in RAM while Netdata runs and are saved to / loaded from disk on Netdata
+ restart. It also uses `mmap()` and supports [KSM](#ksm).
+
+4. `map`, data are in memory mapped files. This works like the swap. Keep in mind though, this will have a constant
+ write on your disk. When Netdata writes data on its memory, the Linux kernel marks the related memory pages as dirty
+ and automatically starts updating them on disk. Unfortunately we cannot control how frequently this works. The Linux
+ kernel uses exactly the same algorithm it uses for its swap memory. Check below for additional information on
+ running a dedicated central Netdata server. This mode uses `mmap()` but does not support [KSM](#ksm).
+
+5. `none`, without a database (collected metrics can only be streamed to another Netdata).
+
+6. `alloc`, like `ram` but it uses `calloc()` and does not support [KSM](#ksm). This mode is the fallback for all
+ others except `none`.
+
+You can select the memory mode by editing `netdata.conf` and setting:
+
+```conf
+[global]
+ # dbengine (default), ram, save (the default if dbengine not available), map (swap like), none, alloc
+ memory mode = dbengine
+
+ # the directory where data are saved
+ cache directory = /var/cache/netdata
+```
+
+## Running Netdata in embedded devices
+
+Embedded devices usually have very limited RAM resources available.
+
+There are 2 settings for you to tweak:
+
+1. `update every`, which controls the data collection frequency
+2. `history`, which controls the size of the database in RAM (except for `memory mode = dbengine`)
+
+By default `update every = 1` and `history = 3600`. This gives you an hour of data with per second updates.
+
+If you set `update every = 2` and `history = 1800`, you will still have an hour of data, but collected once every 2
+seconds. This will **cut in half** both CPU and RAM resources consumed by Netdata. Of course experiment a bit. On very
+weak devices you might have to use `update every = 5` and `history = 720` (still 1 hour of data, but 1/5 of the CPU and
+RAM resources).
+
+You can also disable [data collection plugins](/collectors/README.md) you don't need. Disabling such plugins will also free both
+CPU and RAM resources.
+
+## Running a dedicated central Netdata server
+
+Netdata allows streaming data between Netdata nodes. This allows us to have a central Netdata server that will maintain
+the entire database for all nodes, and will also run health checks/alarms for all nodes.
+
+For this central Netdata, memory size can be a problem. Fortunately, Netdata supports several memory modes. **One
+interesting option** for this setup is `memory mode = map`.
+
+### map
+
+In this mode, the database of Netdata is stored in memory mapped files. Netdata continues to read and write the database
+in memory, but the kernel automatically loads and saves memory pages from/to disk.
+
+**We suggest _not_ to use this mode on nodes that run other applications.** There will always be dirty memory to be
+synced and this syncing process may influence the way other applications work. This mode however is useful when we need
+a central Netdata server that would normally need huge amounts of memory. Using memory mode `map` we can overcome all
+memory restrictions.
+
+There are a few kernel options that provide finer control on the way this syncing works. But before explaining them, a
+brief introduction of how Netdata database works is needed.
+
+For each chart, Netdata maps the following files:
+
+1. `chart/main.db`, this is the file that maintains chart information. Every time data are collected for a chart, this
+ is updated.
+2. `chart/dimension_name.db`, this is the file for each dimension. At its beginning there is a header, followed by the
+ round robin database where metrics are stored.
+
+So, every time Netdata collects data, the following pages will become dirty:
+
+1. the chart file
+2. the header part of all dimension files
+3. if the collected metrics are stored far enough in the dimension file, another page will become dirty, for each
+ dimension
+
+Each page in Linux is 4KB. So, with 200 charts and 1000 dimensions, there will be 1200 to 2200 4KB pages dirty pages
+every second. Of course 1200 of them will always be dirty (the chart header and the dimensions headers) and 1000 will be
+dirty for about 1000 seconds (4 bytes per metric, 4KB per page, so 1000 seconds, or 16 minutes per page).
+
+Hopefully, the Linux kernel does not sync all these data every second. The frequency they are synced is controlled by
+`/proc/sys/vm/dirty_expire_centisecs` or the `sysctl` `vm.dirty_expire_centisecs`. The default on most systems is 3000
+(30 seconds).
+
+On a busy server centralizing metrics from 20+ servers you will experience this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23834750/429ab0dc-0764-11e7-821a-d7908bc881ac.png)
+
+As you can see, there is quite some stress (this is `iowait`) every 30 seconds.
+
+A simple solution is to increase this time to 10 minutes (60000). This is the same system with this setting in 10
+minutes:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23834784/d2304f72-0764-11e7-8389-fb830ffd973a.png)
+
+Of course, setting this to 10 minutes means that data on disk might be up to 10 minutes old if you get an abnormal
+shutdown.
+
+There are 2 more options to tweak:
+
+1. `dirty_background_ratio`, by default `10`.
+2. `dirty_ratio`, by default `20`.
+
+These control the amount of memory that should be dirty for disk syncing to be triggered. On dedicated Netdata servers,
+you can use: `80` and `90` respectively, so that all RAM is given to Netdata.
+
+With these settings, you can expect a little `iowait` spike once every 10 minutes and in case of system crash, data on
+disk will be up to 10 minutes old.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23835030/ba4bf506-0768-11e7-9bc6-3b23e080c69f.png)
+
+To have these settings automatically applied on boot, create the file `/etc/sysctl.d/netdata-memory.conf` with these
+contents:
+
+```conf
+vm.dirty_expire_centisecs = 60000
+vm.dirty_background_ratio = 80
+vm.dirty_ratio = 90
+vm.dirty_writeback_centisecs = 0
+```
+
+There is another memory mode to help overcome the memory size problem. What is **most interesting for this setup** is
+`memory mode = dbengine`.
+
+### dbengine
+
+In this mode, the database of Netdata is stored in database files. The [Database Engine](/database/engine/README.md)
+works like a traditional database. There is some amount of RAM dedicated to data caching and indexing and the rest of
+the data reside compressed on disk. The number of history entries is not fixed in this case, but depends on the
+configured disk space and the effective compression ratio of the data stored.
+
+We suggest to use **this** mode on nodes that also run other applications. The Database Engine uses direct I/O to avoid
+polluting the OS filesystem caches and does not generate excessive I/O traffic so as to create the minimum possible
+interference with other applications. Using memory mode `dbengine` we can overcome most memory restrictions. For more
+details see [here](/database/engine/README.md).
+
+## KSM
+
+Netdata offers all its round robin database to kernel for deduplication (except for `memory mode = dbengine`).
+
+In the past KSM has been criticized for consuming a lot of CPU resources. Although this is true when KSM is used for
+deduplicating certain applications, it is not true with netdata, since the Netdata memory is written very infrequently
+(if you have 24 hours of metrics in netdata, each byte at the in-memory database will be updated just once per day).
+
+KSM is a solution that will provide 60+% memory savings to Netdata.
+
+### Enable KSM in kernel
+
+You need to run a kernel compiled with:
+
+```sh
+CONFIG_KSM=y
+```
+
+When KSM is enabled at the kernel is just available for the user to enable it.
+
+So, if you build a kernel with `CONFIG_KSM=y` you will just get a few files in `/sys/kernel/mm/ksm`. Nothing else
+happens. There is no performance penalty (apart I guess from the memory this code occupies into the kernel).
+
+The files that `CONFIG_KSM=y` offers include:
+
+- `/sys/kernel/mm/ksm/run` by default `0`. You have to set this to `1` for the
+ kernel to spawn `ksmd`.
+- `/sys/kernel/mm/ksm/sleep_millisecs`, by default `20`. The frequency ksmd
+ should evaluate memory for deduplication.
+- `/sys/kernel/mm/ksm/pages_to_scan`, by default `100`. The amount of pages
+ ksmd will evaluate on each run.
+
+So, by default `ksmd` is just disabled. It will not harm performance and the user/admin can control the CPU resources
+he/she is willing `ksmd` to use.
+
+### Run `ksmd` kernel daemon
+
+To activate / run `ksmd` you need to run:
+
+```sh
+echo 1 >/sys/kernel/mm/ksm/run
+echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs
+```
+
+With these settings ksmd does not even appear in the running process list (it will run once per second and evaluate 100
+pages for de-duplication).
+
+Put the above lines in your boot sequence (`/etc/rc.local` or equivalent) to have `ksmd` run at boot.
+
+## Monitoring Kernel Memory de-duplication performance
+
+Netdata will create charts for kernel memory de-duplication performance, like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/11998786/eb23ae54-aab6-11e5-94d4-e848e8a5c56a.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/database/engine/Makefile.am b/database/engine/Makefile.am
new file mode 100644
index 0000000..4340500
--- /dev/null
+++ b/database/engine/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ metadata_log \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/database/engine/README.md b/database/engine/README.md
new file mode 100644
index 0000000..191366a
--- /dev/null
+++ b/database/engine/README.md
@@ -0,0 +1,259 @@
+<!--
+title: "Database engine"
+description: "Netdata's highly-efficient database engine use both RAM and disk for distributed, long-term storage of per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/database/engine/README.md
+-->
+
+# Database engine
+
+The Database Engine works like a traditional database. It dedicates a certain amount of RAM to data caching and
+indexing, while the rest of the data resides compressed on disk. Unlike other [memory modes](/database/README.md), the
+amount of historical metrics stored is based on the amount of disk space you allocate and the effective compression
+ratio, not a fixed number of metrics collected.
+
+By using both RAM and disk space, the database engine allows for long-term storage of per-second metrics inside of the
+Agent itself.
+
+In addition, the database engine is the only memory mode that supports changing the data collection update frequency
+(`update_every`) without losing the metrics your Agent already gathered and stored.
+
+## Configuration
+
+To use the database engine, open `netdata.conf` and set `memory mode` to `dbengine`.
+
+```conf
+[global]
+ memory mode = dbengine
+```
+
+To configure the database engine, look for the `page cache size` and `dbengine multihost disk space` settings in the
+`[global]` section of your `netdata.conf`. The Agent ignores the `history` setting when using the database engine.
+
+```conf
+[global]
+ page cache size = 32
+ dbengine multihost disk space = 256
+```
+
+The above values are the default values for Page Cache size and DB engine disk space quota. Both numbers are
+in **MiB**.
+
+The `page cache size` option determines the amount of RAM in **MiB** dedicated to caching Netdata metric values. The
+actual page cache size will be slightly larger than this figure—see the [memory requirements](#memory-requirements)
+section for details.
+
+The `dbengine multihost disk space` option determines the amount of disk space in **MiB** that is dedicated to storing
+Netdata metric values and all related metadata describing them. You can use the [**database engine
+calculator**](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+to correctly set `dbengine multihost disk space` based on your metrics retention policy. The calculator gives an
+accurate estimate based on how many child nodes you have, how many metrics your Agent collects, and more.
+
+### Legacy configuration
+
+The deprecated `dbengine disk space` option determines the amount of disk space in **MiB** that is dedicated to storing
+Netdata metric values per legacy database engine instance (see [details on the legacy mode](#legacy-mode) below).
+
+```conf
+[global]
+ dbengine disk space = 256
+```
+
+### Streaming metrics to the database engine
+
+When using the multihost database engine, all parent and child nodes share the same `page cache size` and `dbengine
+multihost disk space` in a single dbengine instance. The [**database engine
+calculator**](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+helps you properly set `page cache size` and `dbengine multihost disk space` on your parent node to allocate enough
+resources based on your metrics retention policy and how many child nodes you have.
+
+#### Legacy mode
+
+_For Netdata Agents earlier than v1.23.2_, the Agent on the parent node uses one dbengine instance for itself, and
+another instance for every child node it receives metrics from. If you had four streaming nodes, you would have five
+instances in total (`1 parent + 4 child nodes = 5 instances`).
+
+The Agent allocates resources for each instance separately using the `dbengine disk space` (**deprecated**) setting. If
+`dbengine disk space`(**deprecated**) is set to the default `256`, each instance is given 256 MiB in disk space, which
+means the total disk space required to store all instances is, roughly, `256 MiB * 1 parent * 4 child nodes = 1280 MiB`.
+
+#### Backward compatibility
+
+All existing metrics belonging to child nodes are automatically converted to legacy dbengine instances and the localhost
+metrics are transferred to the multihost dbengine instance.
+
+All new child nodes are automatically transferred to the multihost dbengine instance and share its page cache and disk
+space. If you want to migrate a child node from its legacy dbengine instance to the multihost dbengine instance, you
+must delete the instance's directory, which is located in `/var/cache/netdata/MACHINE_GUID/dbengine`, after stopping the
+Agent.
+
+##### Information
+
+For more information about setting `memory mode` on your nodes, in addition to other streaming configurations, see
+[streaming](/streaming/README.md).
+
+### Memory requirements
+
+Using memory mode `dbengine` we can overcome most memory restrictions and store a dataset that is much larger than the
+available memory.
+
+There are explicit memory requirements **per** DB engine **instance**:
+
+- The total page cache memory footprint will be an additional `#dimensions-being-collected x 4096 x 2` bytes over what
+ the user configured with `page cache size`.
+
+- an additional `#pages-on-disk x 4096 x 0.03` bytes of RAM are allocated for metadata.
+
+ - roughly speaking this is 3% of the uncompressed disk space taken by the DB files.
+
+ - for very highly compressible data (compression ratio > 90%) this RAM overhead is comparable to the disk space
+ footprint.
+
+An important observation is that RAM usage depends on both the `page cache size` and the `dbengine multihost disk space`
+options.
+
+You can use our [database engine
+calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+to validate the memory requirements for your particular system(s) and configuration (**out-of-date**).
+
+### Disk space requirements
+
+There are explicit disk space requirements **per** DB engine **instance**:
+
+- The total disk space footprint will be the maximum between `#dimensions-being-collected x 4096 x 2` bytes or what
+ the user configured with `dbengine multihost disk space` or `dbengine disk space`.
+
+### File descriptor requirements
+
+The Database Engine may keep a **significant** amount of files open per instance (e.g. per streaming child or
+parent server). When configuring your system you should make sure there are at least 50 file descriptors available per
+`dbengine` instance.
+
+Netdata allocates 25% of the available file descriptors to its Database Engine instances. This means that only 25% of
+the file descriptors that are available to the Netdata service are accessible by dbengine instances. You should take
+that into account when configuring your service or system-wide file descriptor limits. You can roughly estimate that the
+Netdata service needs 2048 file descriptors for every 10 streaming child hosts when streaming is configured to use
+`memory mode = dbengine`.
+
+If for example one wants to allocate 65536 file descriptors to the Netdata service on a systemd system one needs to
+override the Netdata service by running `sudo systemctl edit netdata` and creating a file with contents:
+
+```sh
+[Service]
+LimitNOFILE=65536
+```
+
+For other types of services one can add the line:
+
+```sh
+ulimit -n 65536
+```
+
+at the beginning of the service file. Alternatively you can change the system-wide limits of the kernel by changing
+ `/etc/sysctl.conf`. For linux that would be:
+
+```conf
+fs.file-max = 65536
+```
+
+In FreeBSD and OS X you change the lines like this:
+
+```conf
+kern.maxfilesperproc=65536
+kern.maxfiles=65536
+```
+
+You can apply the settings by running `sysctl -p` or by rebooting.
+
+## Files
+
+With the DB engine memory mode the metric data are stored in database files. These files are organized in pairs, the
+datafiles and their corresponding journalfiles, e.g.:
+
+```sh
+datafile-1-0000000001.ndf
+journalfile-1-0000000001.njf
+datafile-1-0000000002.ndf
+journalfile-1-0000000002.njf
+datafile-1-0000000003.ndf
+journalfile-1-0000000003.njf
+...
+```
+
+They are located under their host's cache directory in the directory `./dbengine` (e.g. for localhost the default
+location is `/var/cache/netdata/dbengine/*`). The higher numbered filenames contain more recent metric data. The user
+can safely delete some pairs of files when Netdata is stopped to manually free up some space.
+
+_Users should_ **back up** _their `./dbengine` folders if they consider this data to be important._ You can also set up
+one or more [exporting connectors](/exporting/README.md) to send your Netdata metrics to other databases for long-term
+storage at lower granularity.
+
+## Operation
+
+The DB engine stores chart metric values in 4096-byte pages in memory. Each chart dimension gets its own page to store
+consecutive values generated from the data collectors. Those pages comprise the **Page Cache**.
+
+When those pages fill up they are slowly compressed and flushed to disk. It can take `4096 / 4 = 1024 seconds = 17
+minutes`, for a chart dimension that is being collected every 1 second, to fill a page. Pages can be cut short when we
+stop Netdata or the DB engine instance so as to not lose the data. When we query the DB engine for data we trigger disk
+read I/O requests that fill the Page Cache with the requested pages and potentially evict cold (not recently used)
+pages.
+
+When the disk quota is exceeded the oldest values are removed from the DB engine at real time, by automatically deleting
+the oldest datafile and journalfile pair. Any corresponding pages residing in the Page Cache will also be invalidated
+and removed. The DB engine logic will try to maintain between 10 and 20 file pairs at any point in time.
+
+The Database Engine uses direct I/O to avoid polluting the OS filesystem caches and does not generate excessive I/O
+traffic so as to create the minimum possible interference with other applications.
+
+## Evaluation
+
+We have evaluated the performance of the `dbengine` API that the netdata daemon uses internally. This is **not** the web
+API of netdata. Our benchmarks ran on a **single** `dbengine` instance, multiple of which can be running in a Netdata
+parent node. We used a server with an AMD Ryzen Threadripper 2950X 16-Core Processor and 2 disk drives, a Seagate
+Constellation ES.3 2TB magnetic HDD and a SAMSUNG MZQLB960HAJR-00007 960GB NAND Flash SSD.
+
+For our workload, we defined 32 charts with 128 metrics each, giving us a total of 4096 metrics. We defined 1 worker
+thread per chart (32 threads) that generates new data points with a data generation interval of 1 second. The time axis
+of the time-series is emulated and accelerated so that the worker threads can generate as many data points as possible
+without delays.
+
+We also defined 32 worker threads that perform queries on random metrics with semi-random time ranges. The
+starting time of the query is randomly selected between the beginning of the time-series and the time of the latest data
+point. The ending time is randomly selected between 1 second and 1 hour after the starting time. The pseudo-random
+numbers are generated with a uniform distribution.
+
+The data are written to the database at the same time as they are read from it. This is a concurrent read/write mixed
+workload with a duration of 60 seconds. The faster `dbengine` runs, the bigger the dataset size becomes since more
+data points will be generated. We set a page cache size of 64MiB for the two disk-bound scenarios. This way, the dataset
+size of the metric data is much bigger than the RAM that is being used for caching so as to trigger I/O requests most
+of the time. In our final scenario, we set the page cache size to 16 GiB. That way, the dataset fits in the page cache
+so as to avoid all disk bottlenecks.
+
+The reported numbers are the following:
+
+| device | page cache | dataset | reads/sec | writes/sec |
+| :----: | :--------: | ------: | --------: | ---------: |
+| HDD | 64 MiB | 4.1 GiB | 813K | 18.0M |
+| SSD | 64 MiB | 9.8 GiB | 1.7M | 43.0M |
+| N/A | 16 GiB | 6.8 GiB | 118.2M | 30.2M |
+
+where "reads/sec" is the number of metric data points being read from the database via its API per second and
+"writes/sec" is the number of metric data points being written to the database per second.
+
+Notice that the HDD numbers are pretty high and not much slower than the SSD numbers. This is thanks to the database
+engine design being optimized for rotating media. In the database engine disk I/O requests are:
+
+- asynchronous to mask the high I/O latency of HDDs.
+- mostly large to reduce the amount of HDD seeking time.
+- mostly sequential to reduce the amount of HDD seeking time.
+- compressed to reduce the amount of required throughput.
+
+As a result, the HDD is not thousands of times slower than the SSD, which is typical for other workloads.
+
+An interesting observation to make is that the CPU-bound run (16 GiB page cache) generates fewer data than the SSD run
+(6.8 GiB vs 9.8 GiB). The reason is that the 32 reader threads in the SSD scenario are more frequently blocked by I/O,
+and generate a read load of 1.7M/sec, whereas in the CPU-bound scenario the read load is 70 times higher at 118M/sec.
+Consequently, there is a significant degree of interference by the reader threads, that slow down the writer threads.
+This is also possible because the interference effects are greater than the SSD impact on data generation throughput.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2Fengine%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/database/engine/datafile.c b/database/engine/datafile.c
new file mode 100644
index 0000000..7a052f9
--- /dev/null
+++ b/database/engine/datafile.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include "rrdengine.h"
+
+void df_extent_insert(struct extent_info *extent)
+{
+ struct rrdengine_datafile *datafile = extent->datafile;
+
+ if (likely(NULL != datafile->extents.last)) {
+ datafile->extents.last->next = extent;
+ }
+ if (unlikely(NULL == datafile->extents.first)) {
+ datafile->extents.first = extent;
+ }
+ datafile->extents.last = extent;
+}
+
+void datafile_list_insert(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
+{
+ if (likely(NULL != ctx->datafiles.last)) {
+ ctx->datafiles.last->next = datafile;
+ }
+ if (unlikely(NULL == ctx->datafiles.first)) {
+ ctx->datafiles.first = datafile;
+ }
+ ctx->datafiles.last = datafile;
+}
+
+void datafile_list_delete(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_datafile *next;
+
+ next = datafile->next;
+ fatal_assert((NULL != next) && (ctx->datafiles.first == datafile) && (ctx->datafiles.last != datafile));
+ ctx->datafiles.first = next;
+}
+
+
+static void datafile_init(struct rrdengine_datafile *datafile, struct rrdengine_instance *ctx,
+ unsigned tier, unsigned fileno)
+{
+ fatal_assert(tier == 1);
+ datafile->tier = tier;
+ datafile->fileno = fileno;
+ datafile->file = (uv_file)0;
+ datafile->pos = 0;
+ datafile->extents.first = datafile->extents.last = NULL; /* will be populated by journalfile */
+ datafile->journalfile = NULL;
+ datafile->next = NULL;
+ datafile->ctx = ctx;
+}
+
+void generate_datafilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen)
+{
+ (void) snprintf(str, maxlen, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION,
+ datafile->ctx->dbfiles_path, datafile->tier, datafile->fileno);
+}
+
+int close_data_file(struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_datafilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_close(NULL, &req, datafile->file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ return ret;
+}
+
+int unlink_data_file(struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_datafilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_unlink(NULL, &req, path, NULL);
+ if (ret < 0) {
+ error("uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ++ctx->stats.datafile_deletions;
+
+ return ret;
+}
+
+int destroy_data_file(struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_datafilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_ftruncate(NULL, &req, datafile->file, 0, NULL);
+ if (ret < 0) {
+ error("uv_fs_ftruncate(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ret = uv_fs_close(NULL, &req, datafile->file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ret = uv_fs_unlink(NULL, &req, path, NULL);
+ if (ret < 0) {
+ error("uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ++ctx->stats.datafile_deletions;
+
+ return ret;
+}
+
+int create_data_file(struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ uv_file file;
+ int ret, fd;
+ struct rrdeng_df_sb *superblock;
+ uv_buf_t iov;
+ char path[RRDENG_PATH_MAX];
+
+ generate_datafilepath(datafile, path, sizeof(path));
+ fd = open_file_direct_io(path, O_CREAT | O_RDWR | O_TRUNC, &file);
+ if (fd < 0) {
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return fd;
+ }
+ datafile->file = file;
+ ++ctx->stats.datafile_creations;
+
+ ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ (void) strncpy(superblock->magic_number, RRDENG_DF_MAGIC, RRDENG_MAGIC_SZ);
+ (void) strncpy(superblock->version, RRDENG_DF_VER, RRDENG_VER_SZ);
+ superblock->tier = 1;
+
+ iov = uv_buf_init((void *)superblock, sizeof(*superblock));
+
+ ret = uv_fs_write(NULL, &req, file, &iov, 1, 0, NULL);
+ if (ret < 0) {
+ fatal_assert(req.result < 0);
+ error("uv_fs_write: %s", uv_strerror(ret));
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+ free(superblock);
+ if (ret < 0) {
+ destroy_data_file(datafile);
+ return ret;
+ }
+
+ datafile->pos = sizeof(*superblock);
+ ctx->stats.io_write_bytes += sizeof(*superblock);
+ ++ctx->stats.io_write_requests;
+
+ return 0;
+}
+
+static int check_data_file_superblock(uv_file file)
+{
+ int ret;
+ struct rrdeng_df_sb *superblock;
+ uv_buf_t iov;
+ uv_fs_t req;
+
+ ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ iov = uv_buf_init((void *)superblock, sizeof(*superblock));
+
+ ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL);
+ if (ret < 0) {
+ error("uv_fs_read: %s", uv_strerror(ret));
+ uv_fs_req_cleanup(&req);
+ goto error;
+ }
+ fatal_assert(req.result >= 0);
+ uv_fs_req_cleanup(&req);
+
+ if (strncmp(superblock->magic_number, RRDENG_DF_MAGIC, RRDENG_MAGIC_SZ) ||
+ strncmp(superblock->version, RRDENG_DF_VER, RRDENG_VER_SZ) ||
+ superblock->tier != 1) {
+ error("File has invalid superblock.");
+ ret = UV_EINVAL;
+ } else {
+ ret = 0;
+ }
+ error:
+ free(superblock);
+ return ret;
+}
+
+static int load_data_file(struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ uv_file file;
+ int ret, fd, error;
+ uint64_t file_size;
+ char path[RRDENG_PATH_MAX];
+
+ generate_datafilepath(datafile, path, sizeof(path));
+ fd = open_file_direct_io(path, O_RDWR, &file);
+ if (fd < 0) {
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return fd;
+ }
+ info("Initializing data file \"%s\".", path);
+
+ ret = check_file_properties(file, &file_size, sizeof(struct rrdeng_df_sb));
+ if (ret)
+ goto error;
+ file_size = ALIGN_BYTES_CEILING(file_size);
+
+ ret = check_data_file_superblock(file);
+ if (ret)
+ goto error;
+ ctx->stats.io_read_bytes += sizeof(struct rrdeng_df_sb);
+ ++ctx->stats.io_read_requests;
+
+ datafile->file = file;
+ datafile->pos = file_size;
+
+ info("Data file \"%s\" initialized (size:%"PRIu64").", path, file_size);
+ return 0;
+
+ error:
+ error = ret;
+ ret = uv_fs_close(NULL, &req, file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+ return error;
+}
+
+static int scan_data_files_cmp(const void *a, const void *b)
+{
+ struct rrdengine_datafile *file1, *file2;
+ char path1[RRDENG_PATH_MAX], path2[RRDENG_PATH_MAX];
+
+ file1 = *(struct rrdengine_datafile **)a;
+ file2 = *(struct rrdengine_datafile **)b;
+ generate_datafilepath(file1, path1, sizeof(path1));
+ generate_datafilepath(file2, path2, sizeof(path2));
+ return strcmp(path1, path2);
+}
+
+/* Returns number of datafiles that were loaded or < 0 on error */
+static int scan_data_files(struct rrdengine_instance *ctx)
+{
+ int ret;
+ unsigned tier, no, matched_files, i,failed_to_load;
+ static uv_fs_t req;
+ uv_dirent_t dent;
+ struct rrdengine_datafile **datafiles, *datafile;
+ struct rrdengine_journalfile *journalfile;
+
+ ret = uv_fs_scandir(NULL, &req, ctx->dbfiles_path, 0, NULL);
+ if (ret < 0) {
+ fatal_assert(req.result < 0);
+ uv_fs_req_cleanup(&req);
+ error("uv_fs_scandir(%s): %s", ctx->dbfiles_path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return ret;
+ }
+ info("Found %d files in path %s", ret, ctx->dbfiles_path);
+
+ datafiles = callocz(MIN(ret, MAX_DATAFILES), sizeof(*datafiles));
+ for (matched_files = 0 ; UV_EOF != uv_fs_scandir_next(&req, &dent) && matched_files < MAX_DATAFILES ; ) {
+ info("Scanning file \"%s/%s\"", ctx->dbfiles_path, dent.name);
+ ret = sscanf(dent.name, DATAFILE_PREFIX RRDENG_FILE_NUMBER_SCAN_TMPL DATAFILE_EXTENSION, &tier, &no);
+ if (2 == ret) {
+ info("Matched file \"%s/%s\"", ctx->dbfiles_path, dent.name);
+ datafile = mallocz(sizeof(*datafile));
+ datafile_init(datafile, ctx, tier, no);
+ datafiles[matched_files++] = datafile;
+ }
+ }
+ uv_fs_req_cleanup(&req);
+
+ if (0 == matched_files) {
+ freez(datafiles);
+ return 0;
+ }
+ if (matched_files == MAX_DATAFILES) {
+ error("Warning: hit maximum database engine file limit of %d files", MAX_DATAFILES);
+ }
+ qsort(datafiles, matched_files, sizeof(*datafiles), scan_data_files_cmp);
+ /* TODO: change this when tiering is implemented */
+ ctx->last_fileno = datafiles[matched_files - 1]->fileno;
+
+ for (failed_to_load = 0, i = 0 ; i < matched_files ; ++i) {
+ uint8_t must_delete_pair = 0;
+
+ datafile = datafiles[i];
+ ret = load_data_file(datafile);
+ if (0 != ret) {
+ must_delete_pair = 1;
+ }
+ journalfile = mallocz(sizeof(*journalfile));
+ datafile->journalfile = journalfile;
+ journalfile_init(journalfile, datafile);
+ ret = load_journal_file(ctx, journalfile, datafile);
+ if (0 != ret) {
+ if (!must_delete_pair) /* If datafile is still open close it */
+ close_data_file(datafile);
+ must_delete_pair = 1;
+ }
+ if (must_delete_pair) {
+ char path[RRDENG_PATH_MAX];
+
+ error("Deleting invalid data and journal file pair.");
+ ret = unlink_journal_file(journalfile);
+ if (!ret) {
+ generate_journalfilepath(datafile, path, sizeof(path));
+ info("Deleted journal file \"%s\".", path);
+ }
+ ret = unlink_data_file(datafile);
+ if (!ret) {
+ generate_datafilepath(datafile, path, sizeof(path));
+ info("Deleted data file \"%s\".", path);
+ }
+ freez(journalfile);
+ freez(datafile);
+ ++failed_to_load;
+ continue;
+ }
+
+ datafile_list_insert(ctx, datafile);
+ ctx->disk_space += datafile->pos + journalfile->pos;
+ }
+ matched_files -= failed_to_load;
+ freez(datafiles);
+
+ return matched_files;
+}
+
+/* Creates a datafile and a journalfile pair */
+int create_new_datafile_pair(struct rrdengine_instance *ctx, unsigned tier, unsigned fileno)
+{
+ struct rrdengine_datafile *datafile;
+ struct rrdengine_journalfile *journalfile;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ info("Creating new data and journal files in path %s", ctx->dbfiles_path);
+ datafile = mallocz(sizeof(*datafile));
+ datafile_init(datafile, ctx, tier, fileno);
+ ret = create_data_file(datafile);
+ if (!ret) {
+ generate_datafilepath(datafile, path, sizeof(path));
+ info("Created data file \"%s\".", path);
+ } else {
+ goto error_after_datafile;
+ }
+
+ journalfile = mallocz(sizeof(*journalfile));
+ datafile->journalfile = journalfile;
+ journalfile_init(journalfile, datafile);
+ ret = create_journal_file(journalfile, datafile);
+ if (!ret) {
+ generate_journalfilepath(datafile, path, sizeof(path));
+ info("Created journal file \"%s\".", path);
+ } else {
+ goto error_after_journalfile;
+ }
+ datafile_list_insert(ctx, datafile);
+ ctx->disk_space += datafile->pos + journalfile->pos;
+
+ return 0;
+
+error_after_journalfile:
+ destroy_data_file(datafile);
+ freez(journalfile);
+error_after_datafile:
+ freez(datafile);
+ return ret;
+}
+
+/* Page cache must already be initialized.
+ * Return 0 on success.
+ */
+int init_data_files(struct rrdengine_instance *ctx)
+{
+ int ret;
+
+ ret = scan_data_files(ctx);
+ if (ret < 0) {
+ error("Failed to scan path \"%s\".", ctx->dbfiles_path);
+ return ret;
+ } else if (0 == ret) {
+ info("Data files not found, creating in path \"%s\".", ctx->dbfiles_path);
+ ret = create_new_datafile_pair(ctx, 1, 1);
+ if (ret) {
+ error("Failed to create data and journal files in path \"%s\".", ctx->dbfiles_path);
+ return ret;
+ }
+ ctx->last_fileno = 1;
+ }
+
+ return 0;
+}
+
+void finalize_data_files(struct rrdengine_instance *ctx)
+{
+ struct rrdengine_datafile *datafile, *next_datafile;
+ struct rrdengine_journalfile *journalfile;
+ struct extent_info *extent, *next_extent;
+
+ for (datafile = ctx->datafiles.first ; datafile != NULL ; datafile = next_datafile) {
+ journalfile = datafile->journalfile;
+ next_datafile = datafile->next;
+
+ for (extent = datafile->extents.first ; extent != NULL ; extent = next_extent) {
+ next_extent = extent->next;
+ freez(extent);
+ }
+ close_journal_file(journalfile, datafile);
+ close_data_file(datafile);
+ freez(journalfile);
+ freez(datafile);
+
+ }
+} \ No newline at end of file
diff --git a/database/engine/datafile.h b/database/engine/datafile.h
new file mode 100644
index 0000000..ae94bfd
--- /dev/null
+++ b/database/engine/datafile.h
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DATAFILE_H
+#define NETDATA_DATAFILE_H
+
+#include "rrdengine.h"
+
+/* Forward declarations */
+struct rrdengine_datafile;
+struct rrdengine_journalfile;
+struct rrdengine_instance;
+
+#define DATAFILE_PREFIX "datafile-"
+#define DATAFILE_EXTENSION ".ndf"
+
+#define MAX_DATAFILE_SIZE (1073741824LU)
+#define MIN_DATAFILE_SIZE (4194304LU)
+#define MAX_DATAFILES (65536) /* Supports up to 64TiB for now */
+#define TARGET_DATAFILES (20)
+
+#define DATAFILE_IDEAL_IO_SIZE (1048576U)
+
+struct extent_info {
+ uint64_t offset;
+ uint32_t size;
+ uint8_t number_of_pages;
+ struct rrdengine_datafile *datafile;
+ struct extent_info *next;
+ struct rrdeng_page_descr *pages[];
+};
+
+struct rrdengine_df_extents {
+ /* the extent list is sorted based on disk offset */
+ struct extent_info *first;
+ struct extent_info *last;
+};
+
+/* only one event loop is supported for now */
+struct rrdengine_datafile {
+ unsigned tier;
+ unsigned fileno;
+ uv_file file;
+ uint64_t pos;
+ struct rrdengine_instance *ctx;
+ struct rrdengine_df_extents extents;
+ struct rrdengine_journalfile *journalfile;
+ struct rrdengine_datafile *next;
+};
+
+struct rrdengine_datafile_list {
+ struct rrdengine_datafile *first; /* oldest */
+ struct rrdengine_datafile *last; /* newest */
+};
+
+extern void df_extent_insert(struct extent_info *extent);
+extern void datafile_list_insert(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile);
+extern void datafile_list_delete(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile);
+extern void generate_datafilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen);
+extern int close_data_file(struct rrdengine_datafile *datafile);
+extern int unlink_data_file(struct rrdengine_datafile *datafile);
+extern int destroy_data_file(struct rrdengine_datafile *datafile);
+extern int create_data_file(struct rrdengine_datafile *datafile);
+extern int create_new_datafile_pair(struct rrdengine_instance *ctx, unsigned tier, unsigned fileno);
+extern int init_data_files(struct rrdengine_instance *ctx);
+extern void finalize_data_files(struct rrdengine_instance *ctx);
+
+#endif /* NETDATA_DATAFILE_H */ \ No newline at end of file
diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c
new file mode 100644
index 0000000..9fecc48
--- /dev/null
+++ b/database/engine/journalfile.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include "rrdengine.h"
+
+static void flush_transaction_buffer_cb(uv_fs_t* req)
+{
+ struct generic_io_descriptor *io_descr = req->data;
+ struct rrdengine_worker_config* wc = req->loop->data;
+ struct rrdengine_instance *ctx = wc->ctx;
+
+ debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__);
+ if (req->result < 0) {
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result));
+ } else {
+ debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__);
+ }
+
+ uv_fs_req_cleanup(req);
+ free(io_descr->buf);
+ freez(io_descr);
+}
+
+/* Careful to always call this before creating a new journal file */
+void wal_flush_transaction_buffer(struct rrdengine_worker_config* wc)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ int ret;
+ struct generic_io_descriptor *io_descr;
+ unsigned pos, size;
+ struct rrdengine_journalfile *journalfile;
+
+ if (unlikely(NULL == ctx->commit_log.buf || 0 == ctx->commit_log.buf_pos)) {
+ return;
+ }
+ /* care with outstanding transactions when switching journal files */
+ journalfile = ctx->datafiles.last->journalfile;
+
+ io_descr = mallocz(sizeof(*io_descr));
+ pos = ctx->commit_log.buf_pos;
+ size = ctx->commit_log.buf_size;
+ if (pos < size) {
+ /* simulate an empty transaction to skip the rest of the block */
+ *(uint8_t *) (ctx->commit_log.buf + pos) = STORE_PADDING;
+ }
+ io_descr->buf = ctx->commit_log.buf;
+ io_descr->bytes = size;
+ io_descr->pos = journalfile->pos;
+ io_descr->req.data = io_descr;
+ io_descr->completion = NULL;
+
+ io_descr->iov = uv_buf_init((void *)io_descr->buf, size);
+ ret = uv_fs_write(wc->loop, &io_descr->req, journalfile->file, &io_descr->iov, 1,
+ journalfile->pos, flush_transaction_buffer_cb);
+ fatal_assert(-1 != ret);
+ journalfile->pos += RRDENG_BLOCK_SIZE;
+ ctx->disk_space += RRDENG_BLOCK_SIZE;
+ ctx->commit_log.buf = NULL;
+ ctx->stats.io_write_bytes += RRDENG_BLOCK_SIZE;
+ ++ctx->stats.io_write_requests;
+}
+
+void * wal_get_transaction_buffer(struct rrdengine_worker_config* wc, unsigned size)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ int ret;
+ unsigned buf_pos = 0, buf_size;
+
+ fatal_assert(size);
+ if (ctx->commit_log.buf) {
+ unsigned remaining;
+
+ buf_pos = ctx->commit_log.buf_pos;
+ buf_size = ctx->commit_log.buf_size;
+ remaining = buf_size - buf_pos;
+ if (size > remaining) {
+ /* we need a new buffer */
+ wal_flush_transaction_buffer(wc);
+ }
+ }
+ if (NULL == ctx->commit_log.buf) {
+ buf_size = ALIGN_BYTES_CEILING(size);
+ ret = posix_memalign((void *)&ctx->commit_log.buf, RRDFILE_ALIGNMENT, buf_size);
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ buf_pos = ctx->commit_log.buf_pos = 0;
+ ctx->commit_log.buf_size = buf_size;
+ }
+ ctx->commit_log.buf_pos += size;
+
+ return ctx->commit_log.buf + buf_pos;
+}
+
+void generate_journalfilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen)
+{
+ (void) snprintf(str, maxlen, "%s/" WALFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL WALFILE_EXTENSION,
+ datafile->ctx->dbfiles_path, datafile->tier, datafile->fileno);
+}
+
+void journalfile_init(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile)
+{
+ journalfile->file = (uv_file)0;
+ journalfile->pos = 0;
+ journalfile->datafile = datafile;
+}
+
+int close_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_journalfilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_close(NULL, &req, journalfile->file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ return ret;
+}
+
+int unlink_journal_file(struct rrdengine_journalfile *journalfile)
+{
+ struct rrdengine_datafile *datafile = journalfile->datafile;
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_journalfilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_unlink(NULL, &req, path, NULL);
+ if (ret < 0) {
+ error("uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ++ctx->stats.journalfile_deletions;
+
+ return ret;
+}
+
+int destroy_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_journalfilepath(datafile, path, sizeof(path));
+
+ ret = uv_fs_ftruncate(NULL, &req, journalfile->file, 0, NULL);
+ if (ret < 0) {
+ error("uv_fs_ftruncate(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ret = uv_fs_close(NULL, &req, journalfile->file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ret = uv_fs_unlink(NULL, &req, path, NULL);
+ if (ret < 0) {
+ error("uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ ++ctx->stats.journalfile_deletions;
+
+ return ret;
+}
+
+int create_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile)
+{
+ struct rrdengine_instance *ctx = datafile->ctx;
+ uv_fs_t req;
+ uv_file file;
+ int ret, fd;
+ struct rrdeng_jf_sb *superblock;
+ uv_buf_t iov;
+ char path[RRDENG_PATH_MAX];
+
+ generate_journalfilepath(datafile, path, sizeof(path));
+ fd = open_file_direct_io(path, O_CREAT | O_RDWR | O_TRUNC, &file);
+ if (fd < 0) {
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return fd;
+ }
+ journalfile->file = file;
+ ++ctx->stats.journalfile_creations;
+
+ ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ (void) strncpy(superblock->magic_number, RRDENG_JF_MAGIC, RRDENG_MAGIC_SZ);
+ (void) strncpy(superblock->version, RRDENG_JF_VER, RRDENG_VER_SZ);
+
+ iov = uv_buf_init((void *)superblock, sizeof(*superblock));
+
+ ret = uv_fs_write(NULL, &req, file, &iov, 1, 0, NULL);
+ if (ret < 0) {
+ fatal_assert(req.result < 0);
+ error("uv_fs_write: %s", uv_strerror(ret));
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+ free(superblock);
+ if (ret < 0) {
+ destroy_journal_file(journalfile, datafile);
+ return ret;
+ }
+
+ journalfile->pos = sizeof(*superblock);
+ ctx->stats.io_write_bytes += sizeof(*superblock);
+ ++ctx->stats.io_write_requests;
+
+ return 0;
+}
+
+static int check_journal_file_superblock(uv_file file)
+{
+ int ret;
+ struct rrdeng_jf_sb *superblock;
+ uv_buf_t iov;
+ uv_fs_t req;
+
+ ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ iov = uv_buf_init((void *)superblock, sizeof(*superblock));
+
+ ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL);
+ if (ret < 0) {
+ error("uv_fs_read: %s", uv_strerror(ret));
+ uv_fs_req_cleanup(&req);
+ goto error;
+ }
+ fatal_assert(req.result >= 0);
+ uv_fs_req_cleanup(&req);
+
+ if (strncmp(superblock->magic_number, RRDENG_JF_MAGIC, RRDENG_MAGIC_SZ) ||
+ strncmp(superblock->version, RRDENG_JF_VER, RRDENG_VER_SZ)) {
+ error("File has invalid superblock.");
+ ret = UV_EINVAL;
+ } else {
+ ret = 0;
+ }
+ error:
+ free(superblock);
+ return ret;
+}
+
+static void restore_extent_metadata(struct rrdengine_instance *ctx, struct rrdengine_journalfile *journalfile,
+ void *buf, unsigned max_size)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned i, count, payload_length, descr_size, valid_pages;
+ struct rrdeng_page_descr *descr;
+ struct extent_info *extent;
+ /* persistent structures */
+ struct rrdeng_jf_store_data *jf_metric_data;
+
+ jf_metric_data = buf;
+ count = jf_metric_data->number_of_pages;
+ descr_size = sizeof(*jf_metric_data->descr) * count;
+ payload_length = sizeof(*jf_metric_data) + descr_size;
+ if (payload_length > max_size) {
+ error("Corrupted transaction payload.");
+ return;
+ }
+
+ extent = mallocz(sizeof(*extent) + count * sizeof(extent->pages[0]));
+ extent->offset = jf_metric_data->extent_offset;
+ extent->size = jf_metric_data->extent_size;
+ extent->datafile = journalfile->datafile;
+ extent->next = NULL;
+
+ for (i = 0, valid_pages = 0 ; i < count ; ++i) {
+ uuid_t *temp_id;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+
+ if (PAGE_METRICS != jf_metric_data->descr[i].type) {
+ error("Unknown page type encountered.");
+ continue;
+ }
+ temp_id = (uuid_t *)jf_metric_data->descr[i].uuid;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, temp_id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ /* First time we see the UUID */
+ uv_rwlock_wrlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSIns(&pg_cache->metrics_index.JudyHS_array, temp_id, sizeof(uuid_t), PJE0);
+ fatal_assert(NULL == *PValue); /* TODO: figure out concurrency model */
+ *PValue = page_index = create_page_index(temp_id);
+ page_index->prev = pg_cache->metrics_index.last_page_index;
+ pg_cache->metrics_index.last_page_index = page_index;
+ uv_rwlock_wrunlock(&pg_cache->metrics_index.lock);
+ }
+
+ descr = pg_cache_create_descr();
+ descr->page_length = jf_metric_data->descr[i].page_length;
+ descr->start_time = jf_metric_data->descr[i].start_time;
+ descr->end_time = jf_metric_data->descr[i].end_time;
+ descr->id = &page_index->id;
+ descr->extent = extent;
+ extent->pages[valid_pages++] = descr;
+ pg_cache_insert(ctx, page_index, descr);
+ }
+
+ extent->number_of_pages = valid_pages;
+
+ if (likely(valid_pages))
+ df_extent_insert(extent);
+ else
+ freez(extent);
+}
+
+/*
+ * Replays transaction by interpreting up to max_size bytes from buf.
+ * Sets id to the current transaction id or to 0 if unknown.
+ * Returns size of transaction record or 0 for unknown size.
+ */
+static unsigned replay_transaction(struct rrdengine_instance *ctx, struct rrdengine_journalfile *journalfile,
+ void *buf, uint64_t *id, unsigned max_size)
+{
+ unsigned payload_length, size_bytes;
+ int ret;
+ /* persistent structures */
+ struct rrdeng_jf_transaction_header *jf_header;
+ struct rrdeng_jf_transaction_trailer *jf_trailer;
+ uLong crc;
+
+ *id = 0;
+ jf_header = buf;
+ if (STORE_PADDING == jf_header->type) {
+ debug(D_RRDENGINE, "Skipping padding.");
+ return 0;
+ }
+ if (sizeof(*jf_header) > max_size) {
+ error("Corrupted transaction record, skipping.");
+ return 0;
+ }
+ *id = jf_header->id;
+ payload_length = jf_header->payload_length;
+ size_bytes = sizeof(*jf_header) + payload_length + sizeof(*jf_trailer);
+ if (size_bytes > max_size) {
+ error("Corrupted transaction record, skipping.");
+ return 0;
+ }
+ jf_trailer = buf + sizeof(*jf_header) + payload_length;
+ crc = crc32(0L, Z_NULL, 0);
+ crc = crc32(crc, buf, sizeof(*jf_header) + payload_length);
+ ret = crc32cmp(jf_trailer->checksum, crc);
+ debug(D_RRDENGINE, "Transaction %"PRIu64" was read from disk. CRC32 check: %s", *id, ret ? "FAILED" : "SUCCEEDED");
+ if (unlikely(ret)) {
+ error("Transaction %"PRIu64" was read from disk. CRC32 check: FAILED", *id);
+ return size_bytes;
+ }
+ switch (jf_header->type) {
+ case STORE_DATA:
+ debug(D_RRDENGINE, "Replaying transaction %"PRIu64"", jf_header->id);
+ restore_extent_metadata(ctx, journalfile, buf + sizeof(*jf_header), payload_length);
+ break;
+ default:
+ error("Unknown transaction type. Skipping record.");
+ break;
+ }
+
+ return size_bytes;
+}
+
+
+#define READAHEAD_BYTES (RRDENG_BLOCK_SIZE * 256)
+/*
+ * Iterates journal file transactions and populates the page cache.
+ * Page cache must already be initialized.
+ * Returns the maximum transaction id it discovered.
+ */
+static uint64_t iterate_transactions(struct rrdengine_instance *ctx, struct rrdengine_journalfile *journalfile)
+{
+ uv_file file;
+ uint64_t file_size;//, data_file_size;
+ int ret;
+ uint64_t pos, pos_i, max_id, id;
+ unsigned size_bytes;
+ void *buf;
+ uv_buf_t iov;
+ uv_fs_t req;
+
+ file = journalfile->file;
+ file_size = journalfile->pos;
+ //data_file_size = journalfile->datafile->pos; TODO: utilize this?
+
+ max_id = 1;
+ ret = posix_memalign((void *)&buf, RRDFILE_ALIGNMENT, READAHEAD_BYTES);
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+
+ for (pos = sizeof(struct rrdeng_jf_sb) ; pos < file_size ; pos += READAHEAD_BYTES) {
+ size_bytes = MIN(READAHEAD_BYTES, file_size - pos);
+ iov = uv_buf_init(buf, size_bytes);
+ ret = uv_fs_read(NULL, &req, file, &iov, 1, pos, NULL);
+ if (ret < 0) {
+ fatal("uv_fs_read: %s", uv_strerror(ret));
+ /*uv_fs_req_cleanup(&req);*/
+ }
+ fatal_assert(req.result >= 0);
+ uv_fs_req_cleanup(&req);
+ ctx->stats.io_read_bytes += size_bytes;
+ ++ctx->stats.io_read_requests;
+
+ //pos_i = pos;
+ //while (pos_i < pos + size_bytes) {
+ for (pos_i = 0 ; pos_i < size_bytes ; ) {
+ unsigned max_size;
+
+ max_size = pos + size_bytes - pos_i;
+ ret = replay_transaction(ctx, journalfile, buf + pos_i, &id, max_size);
+ if (!ret) /* TODO: support transactions bigger than 4K */
+ /* unknown transaction size, move on to the next block */
+ pos_i = ALIGN_BYTES_FLOOR(pos_i + RRDENG_BLOCK_SIZE);
+ else
+ pos_i += ret;
+ max_id = MAX(max_id, id);
+ }
+ }
+
+ free(buf);
+ return max_id;
+}
+
+int load_journal_file(struct rrdengine_instance *ctx, struct rrdengine_journalfile *journalfile,
+ struct rrdengine_datafile *datafile)
+{
+ uv_fs_t req;
+ uv_file file;
+ int ret, fd, error;
+ uint64_t file_size, max_id;
+ char path[RRDENG_PATH_MAX];
+
+ generate_journalfilepath(datafile, path, sizeof(path));
+ fd = open_file_direct_io(path, O_RDWR, &file);
+ if (fd < 0) {
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return fd;
+ }
+ info("Loading journal file \"%s\".", path);
+
+ ret = check_file_properties(file, &file_size, sizeof(struct rrdeng_df_sb));
+ if (ret)
+ goto error;
+ file_size = ALIGN_BYTES_FLOOR(file_size);
+
+ ret = check_journal_file_superblock(file);
+ if (ret)
+ goto error;
+ ctx->stats.io_read_bytes += sizeof(struct rrdeng_jf_sb);
+ ++ctx->stats.io_read_requests;
+
+ journalfile->file = file;
+ journalfile->pos = file_size;
+
+ max_id = iterate_transactions(ctx, journalfile);
+
+ ctx->commit_log.transaction_id = MAX(ctx->commit_log.transaction_id, max_id + 1);
+
+ info("Journal file \"%s\" loaded (size:%"PRIu64").", path, file_size);
+ return 0;
+
+ error:
+ error = ret;
+ ret = uv_fs_close(NULL, &req, file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+ ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+ return error;
+}
+
+void init_commit_log(struct rrdengine_instance *ctx)
+{
+ ctx->commit_log.buf = NULL;
+ ctx->commit_log.buf_pos = 0;
+ ctx->commit_log.transaction_id = 1;
+} \ No newline at end of file
diff --git a/database/engine/journalfile.h b/database/engine/journalfile.h
new file mode 100644
index 0000000..f6c43cd
--- /dev/null
+++ b/database/engine/journalfile.h
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_JOURNALFILE_H
+#define NETDATA_JOURNALFILE_H
+
+#include "rrdengine.h"
+
+/* Forward declarations */
+struct rrdengine_instance;
+struct rrdengine_worker_config;
+struct rrdengine_datafile;
+struct rrdengine_journalfile;
+
+#define WALFILE_PREFIX "journalfile-"
+#define WALFILE_EXTENSION ".njf"
+
+
+/* only one event loop is supported for now */
+struct rrdengine_journalfile {
+ uv_file file;
+ uint64_t pos;
+
+ struct rrdengine_datafile *datafile;
+};
+
+/* only one event loop is supported for now */
+struct transaction_commit_log {
+ uint64_t transaction_id;
+
+ /* outstanding transaction buffer */
+ void *buf;
+ unsigned buf_pos;
+ unsigned buf_size;
+};
+
+extern void generate_journalfilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen);
+extern void journalfile_init(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile);
+extern void *wal_get_transaction_buffer(struct rrdengine_worker_config* wc, unsigned size);
+extern void wal_flush_transaction_buffer(struct rrdengine_worker_config* wc);
+extern int close_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile);
+extern int unlink_journal_file(struct rrdengine_journalfile *journalfile);
+extern int destroy_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile);
+extern int create_journal_file(struct rrdengine_journalfile *journalfile, struct rrdengine_datafile *datafile);
+extern int load_journal_file(struct rrdengine_instance *ctx, struct rrdengine_journalfile *journalfile,
+ struct rrdengine_datafile *datafile);
+extern void init_commit_log(struct rrdengine_instance *ctx);
+
+
+#endif /* NETDATA_JOURNALFILE_H */ \ No newline at end of file
diff --git a/database/engine/metadata_log/Makefile.am b/database/engine/metadata_log/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/database/engine/metadata_log/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/database/engine/metadata_log/README.md b/database/engine/metadata_log/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/database/engine/metadata_log/README.md
diff --git a/database/engine/metadata_log/compaction.c b/database/engine/metadata_log/compaction.c
new file mode 100644
index 0000000..ba19e1e
--- /dev/null
+++ b/database/engine/metadata_log/compaction.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS
+
+#include "metadatalog.h"
+
+/* Return 0 on success. */
+int compaction_failure_recovery(struct metalog_instance *ctx, struct metadata_logfile **metalogfiles,
+ unsigned *matched_files)
+{
+ int ret;
+ unsigned starting_fileno, fileno, i, j, recovered_files;
+ struct metadata_logfile *metalogfile = NULL, *compactionfile = NULL, **tmp_metalogfiles;
+ char *dbfiles_path = ctx->rrdeng_ctx->dbfiles_path;
+
+ for (i = 0 ; i < *matched_files ; ++i) {
+ metalogfile = metalogfiles[i];
+ if (0 == metalogfile->starting_fileno)
+ continue; /* skip standard metadata log files */
+ break; /* this is a compaction temporary file */
+ }
+ if (i == *matched_files) /* no recovery needed */
+ return 0;
+ info("Starting metadata log file failure recovery procedure in \"%s\".", dbfiles_path);
+
+ if (*matched_files - i > 1) { /* Can't have more than 1 temporary compaction files */
+ error("Metadata log files are in an invalid state. Cannot proceed.");
+ return 1;
+ }
+ compactionfile = metalogfile;
+ starting_fileno = compactionfile->starting_fileno;
+ fileno = compactionfile->fileno;
+ /* scratchpad space to move file pointers around */
+ tmp_metalogfiles = callocz(*matched_files, sizeof(*tmp_metalogfiles));
+
+ for (j = 0, recovered_files = 0 ; j < i ; ++j) {
+ metalogfile = metalogfiles[j];
+ fatal_assert(0 == metalogfile->starting_fileno);
+ if (metalogfile->fileno < starting_fileno) {
+ tmp_metalogfiles[recovered_files++] = metalogfile;
+ continue;
+ }
+ break; /* reached compaction file serial number */
+ }
+
+ if ((j == i) /* Shouldn't be possible, invalid compaction temporary file */ ||
+ (metalogfile->fileno == starting_fileno && metalogfile->fileno == fileno)) {
+ error("Deleting invalid compaction temporary file \"%s/"METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL
+ METALOG_EXTENSION"\"", dbfiles_path, starting_fileno, fileno);
+ unlink_metadata_logfile(compactionfile);
+ freez(compactionfile);
+ freez(tmp_metalogfiles);
+ --*matched_files; /* delete the last one */
+
+ info("Finished metadata log file failure recovery procedure in \"%s\".", dbfiles_path);
+ return 0;
+ }
+
+ for ( ; j < i ; ++j) { /* continue iterating through normal metadata log files */
+ metalogfile = metalogfiles[j];
+ fatal_assert(0 == metalogfile->starting_fileno);
+ if (metalogfile->fileno < fileno) { /* It has already been compacted */
+ error("Deleting invalid metadata log file \"%s/"METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL
+ METALOG_EXTENSION"\"", dbfiles_path, 0U, metalogfile->fileno);
+ unlink_metadata_logfile(metalogfile);
+ freez(metalogfile);
+ continue;
+ }
+ tmp_metalogfiles[recovered_files++] = metalogfile;
+ }
+
+ /* compaction temporary file is valid */
+ tmp_metalogfiles[recovered_files++] = compactionfile;
+ ret = rename_metadata_logfile(compactionfile, 0, starting_fileno);
+ if (ret < 0) {
+ error("Cannot rename temporary compaction files. Cannot proceed.");
+ freez(tmp_metalogfiles);
+ return 1;
+ }
+
+ memcpy(metalogfiles, tmp_metalogfiles, recovered_files * sizeof(*metalogfiles));
+ *matched_files = recovered_files;
+ freez(tmp_metalogfiles);
+
+ info("Finished metadata log file failure recovery procedure in \"%s\".", dbfiles_path);
+ return 0;
+}
diff --git a/database/engine/metadata_log/compaction.h b/database/engine/metadata_log/compaction.h
new file mode 100644
index 0000000..d046134
--- /dev/null
+++ b/database/engine/metadata_log/compaction.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COMPACTION_H
+#define NETDATA_COMPACTION_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include "../rrdengine.h"
+
+extern int compaction_failure_recovery(struct metalog_instance *ctx, struct metadata_logfile **metalogfiles,
+ unsigned *matched_files);
+
+#endif /* NETDATA_COMPACTION_H */
diff --git a/database/engine/metadata_log/logfile.c b/database/engine/metadata_log/logfile.c
new file mode 100644
index 0000000..b7c5c06
--- /dev/null
+++ b/database/engine/metadata_log/logfile.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include <database/sqlite/sqlite_functions.h>
+#include "metadatalog.h"
+#include "metalogpluginsd.h"
+
+
+void generate_metadata_logfile_path(struct metadata_logfile *metalogfile, char *str, size_t maxlen)
+{
+ (void) snprintf(str, maxlen, "%s/" METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL METALOG_EXTENSION,
+ metalogfile->ctx->rrdeng_ctx->dbfiles_path, metalogfile->starting_fileno, metalogfile->fileno);
+}
+
+void metadata_logfile_init(struct metadata_logfile *metalogfile, struct metalog_instance *ctx, unsigned starting_fileno,
+ unsigned fileno)
+{
+ metalogfile->starting_fileno = starting_fileno;
+ metalogfile->fileno = fileno;
+ metalogfile->file = (uv_file)0;
+ metalogfile->pos = 0;
+ metalogfile->next = NULL;
+ metalogfile->ctx = ctx;
+}
+
+int rename_metadata_logfile(struct metadata_logfile *metalogfile, unsigned new_starting_fileno, unsigned new_fileno)
+{
+ //struct metalog_instance *ctx = metalogfile->ctx;
+ uv_fs_t req;
+ int ret;
+ char oldpath[RRDENG_PATH_MAX], newpath[RRDENG_PATH_MAX];
+ unsigned backup_starting_fileno, backup_fileno;
+
+ backup_starting_fileno = metalogfile->starting_fileno;
+ backup_fileno = metalogfile->fileno;
+ generate_metadata_logfile_path(metalogfile, oldpath, sizeof(oldpath));
+ metalogfile->starting_fileno = new_starting_fileno;
+ metalogfile->fileno = new_fileno;
+ generate_metadata_logfile_path(metalogfile, newpath, sizeof(newpath));
+
+ info("Renaming metadata log file \"%s\" to \"%s\".", oldpath, newpath);
+ ret = uv_fs_rename(NULL, &req, oldpath, newpath, NULL);
+ if (ret < 0) {
+ error("uv_fs_rename(%s): %s", oldpath, uv_strerror(ret));
+ //++ctx->stats.fs_errors; /* this is racy, may miss some errors */
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ /* restore previous values */
+ metalogfile->starting_fileno = backup_starting_fileno;
+ metalogfile->fileno = backup_fileno;
+ }
+ uv_fs_req_cleanup(&req);
+
+ return ret;
+}
+
+int unlink_metadata_logfile(struct metadata_logfile *metalogfile)
+{
+ //struct metalog_instance *ctx = metalogfile->ctx;
+ uv_fs_t req;
+ int ret;
+ char path[RRDENG_PATH_MAX];
+
+ generate_metadata_logfile_path(metalogfile, path, sizeof(path));
+
+ ret = uv_fs_unlink(NULL, &req, path, NULL);
+ if (ret < 0) {
+ error("uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
+// ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+
+ return ret;
+}
+
+static int check_metadata_logfile_superblock(uv_file file)
+{
+ int ret;
+ struct rrdeng_metalog_sb *superblock;
+ uv_buf_t iov;
+ uv_fs_t req;
+
+ ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ }
+ iov = uv_buf_init((void *)superblock, sizeof(*superblock));
+
+ ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL);
+ if (ret < 0) {
+ error("uv_fs_read: %s", uv_strerror(ret));
+ uv_fs_req_cleanup(&req);
+ goto error;
+ }
+ fatal_assert(req.result >= 0);
+ uv_fs_req_cleanup(&req);
+
+ if (strncmp(superblock->magic_number, RRDENG_METALOG_MAGIC, RRDENG_MAGIC_SZ)) {
+ error("File has invalid superblock.");
+ ret = UV_EINVAL;
+ } else {
+ ret = 0;
+ }
+ if (superblock->version > RRDENG_METALOG_VER) {
+ error("File has unknown version %"PRIu16". Compatibility is not guaranteed.", superblock->version);
+ }
+error:
+ free(superblock);
+ return ret;
+}
+
+void replay_record(struct metadata_logfile *metalogfile, struct rrdeng_metalog_record_header *header, void *payload)
+{
+ struct metalog_instance *ctx = metalogfile->ctx;
+ char *line, *nextline, *record_end;
+ int ret;
+
+ debug(D_METADATALOG, "RECORD contents: %.*s", (int)header->payload_length, (char *)payload);
+ record_end = (char *)payload + header->payload_length - 1;
+ *record_end = '\0';
+
+ for (line = payload ; line ; line = nextline) {
+ nextline = strchr(line, '\n');
+ if (nextline) {
+ *nextline++ = '\0';
+ }
+ ret = parser_action(ctx->metalog_parser_object->parser, line);
+ debug(D_METADATALOG, "parser_action ret:%d", ret);
+ if (ret)
+ return; /* skip record due to error */
+ };
+}
+
+/* This function only works with buffered I/O */
+static inline int metalogfile_read(struct metadata_logfile *metalogfile, void *buf, size_t len, uint64_t offset)
+{
+// struct metalog_instance *ctx;
+ uv_file file;
+ uv_buf_t iov;
+ uv_fs_t req;
+ int ret;
+
+// ctx = metalogfile->ctx;
+ file = metalogfile->file;
+ iov = uv_buf_init(buf, len);
+ ret = uv_fs_read(NULL, &req, file, &iov, 1, offset, NULL);
+ if (unlikely(ret < 0 && ret != req.result)) {
+ fatal("uv_fs_read: %s", uv_strerror(ret));
+ }
+ if (req.result < 0) {
+// ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ error("%s: uv_fs_read - %s - record at offset %"PRIu64"(%u) in metadata logfile %u-%u.", __func__,
+ uv_strerror((int)req.result), offset, (unsigned)len, metalogfile->starting_fileno, metalogfile->fileno);
+ }
+ uv_fs_req_cleanup(&req);
+// ctx->stats.io_read_bytes += len;
+// ++ctx->stats.io_read_requests;
+
+ return ret;
+}
+
+/* Return 0 on success */
+static int metadata_record_integrity_check(void *record)
+{
+ int ret;
+ uint32_t data_size;
+ struct rrdeng_metalog_record_header *header;
+ struct rrdeng_metalog_record_trailer *trailer;
+ uLong crc;
+
+ header = record;
+ data_size = header->header_length + header->payload_length;
+ trailer = record + data_size;
+
+ crc = crc32(0L, Z_NULL, 0);
+ crc = crc32(crc, record, data_size);
+ ret = crc32cmp(trailer->checksum, crc);
+
+ return ret;
+}
+
+#define MAX_READ_BYTES (RRDENG_BLOCK_SIZE * 32) /* no record should be over 128KiB in this version */
+
+/*
+ * Iterates metadata log file records and creates database objects (host/chart/dimension)
+ */
+static void iterate_records(struct metadata_logfile *metalogfile)
+{
+ uint32_t file_size, pos, bytes_remaining, record_size;
+ void *buf;
+ struct rrdeng_metalog_record_header *header;
+ struct metalog_instance *ctx = metalogfile->ctx;
+ struct metalog_pluginsd_state *state = ctx->metalog_parser_object->private;
+ const size_t min_header_size = offsetof(struct rrdeng_metalog_record_header, header_length) +
+ sizeof(header->header_length);
+
+ file_size = metalogfile->pos;
+ state->metalogfile = metalogfile;
+
+ buf = mallocz(MAX_READ_BYTES);
+
+ for (pos = sizeof(struct rrdeng_metalog_sb) ; pos < file_size ; pos += record_size) {
+ bytes_remaining = file_size - pos;
+ if (bytes_remaining < min_header_size) {
+ error("%s: unexpected end of file in metadata logfile %u-%u.", __func__, metalogfile->starting_fileno,
+ metalogfile->fileno);
+ break;
+ }
+ if (metalogfile_read(metalogfile, buf, min_header_size, pos) < 0)
+ break;
+ header = (struct rrdeng_metalog_record_header *)buf;
+ if (METALOG_STORE_PADDING == header->type) {
+ info("%s: Skipping padding in metadata logfile %u-%u.", __func__, metalogfile->starting_fileno,
+ metalogfile->fileno);
+ record_size = ALIGN_BYTES_FLOOR(pos + RRDENG_BLOCK_SIZE) - pos;
+ continue;
+ }
+ if (metalogfile_read(metalogfile, buf + min_header_size, sizeof(*header) - min_header_size,
+ pos + min_header_size) < 0)
+ break;
+ record_size = header->header_length + header->payload_length + sizeof(struct rrdeng_metalog_record_trailer);
+ if (header->header_length < min_header_size || record_size > bytes_remaining) {
+ error("%s: Corrupted record in metadata logfile %u-%u.", __func__, metalogfile->starting_fileno,
+ metalogfile->fileno);
+ break;
+ }
+ if (record_size > MAX_READ_BYTES) {
+ error("%s: Record is too long (%u bytes) in metadata logfile %u-%u.", __func__, record_size,
+ metalogfile->starting_fileno, metalogfile->fileno);
+ continue;
+ }
+ if (metalogfile_read(metalogfile, buf + sizeof(*header), record_size - sizeof(*header),
+ pos + sizeof(*header)) < 0)
+ break;
+ if (metadata_record_integrity_check(buf)) {
+ error("%s: Record at offset %"PRIu32" was read from disk. CRC32 check: FAILED", __func__, pos);
+ continue;
+ }
+ debug(D_METADATALOG, "%s: Record at offset %"PRIu32" was read from disk. CRC32 check: SUCCEEDED", __func__,
+ pos);
+
+ replay_record(metalogfile, header, buf + header->header_length);
+ }
+
+ freez(buf);
+}
+
+int load_metadata_logfile(struct metalog_instance *ctx, struct metadata_logfile *metalogfile)
+{
+ UNUSED(ctx);
+ uv_fs_t req;
+ uv_file file;
+ int ret, fd, error;
+ uint64_t file_size;
+ char path[RRDENG_PATH_MAX];
+
+ generate_metadata_logfile_path(metalogfile, path, sizeof(path));
+ if (file_is_migrated(path))
+ return 0;
+
+ fd = open_file_buffered_io(path, O_RDWR, &file);
+ if (fd < 0) {
+// ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return fd;
+ }
+ info("Loading metadata log \"%s\".", path);
+
+ ret = check_file_properties(file, &file_size, sizeof(struct rrdeng_metalog_sb));
+ if (ret)
+ goto error;
+
+ ret = check_metadata_logfile_superblock(file);
+ if (ret)
+ goto error;
+// ctx->stats.io_read_bytes += sizeof(struct rrdeng_jf_sb);
+// ++ctx->stats.io_read_requests;
+
+ metalogfile->file = file;
+ metalogfile->pos = file_size;
+
+ iterate_records(metalogfile);
+
+ info("Metadata log \"%s\" migrated to the database (size:%"PRIu64").", path, file_size);
+ add_migrated_file(path, file_size);
+ return 0;
+
+error:
+ error = ret;
+ ret = uv_fs_close(NULL, &req, file, NULL);
+ if (ret < 0) {
+ error("uv_fs_close(%s): %s", path, uv_strerror(ret));
+// ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ }
+ uv_fs_req_cleanup(&req);
+ return error;
+}
+
+static int scan_metalog_files_cmp(const void *a, const void *b)
+{
+ struct metadata_logfile *file1, *file2;
+ char path1[RRDENG_PATH_MAX], path2[RRDENG_PATH_MAX];
+
+ file1 = *(struct metadata_logfile **)a;
+ file2 = *(struct metadata_logfile **)b;
+ generate_metadata_logfile_path(file1, path1, sizeof(path1));
+ generate_metadata_logfile_path(file2, path2, sizeof(path2));
+ return strcmp(path1, path2);
+}
+
+/* Returns number of metadata logfiles that were loaded or < 0 on error */
+static int scan_metalog_files(struct metalog_instance *ctx)
+{
+ int ret;
+ unsigned starting_no, no, matched_files, i, failed_to_load;
+ static uv_fs_t req;
+ uv_dirent_t dent;
+ struct metadata_logfile **metalogfiles, *metalogfile;
+ char *dbfiles_path = ctx->rrdeng_ctx->dbfiles_path;
+
+ ret = uv_fs_scandir(NULL, &req, dbfiles_path, 0, NULL);
+ if (ret < 0) {
+ fatal_assert(req.result < 0);
+ uv_fs_req_cleanup(&req);
+ error("uv_fs_scandir(%s): %s", dbfiles_path, uv_strerror(ret));
+// ++ctx->stats.fs_errors;
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ return ret;
+ }
+ info("Found %d files in path %s", ret, dbfiles_path);
+
+ metalogfiles = callocz(MIN(ret, MAX_DATAFILES), sizeof(*metalogfiles));
+ for (matched_files = 0 ; UV_EOF != uv_fs_scandir_next(&req, &dent) && matched_files < MAX_DATAFILES ; ) {
+ info("Scanning file \"%s/%s\"", dbfiles_path, dent.name);
+ ret = sscanf(dent.name, METALOG_PREFIX METALOG_FILE_NUMBER_SCAN_TMPL METALOG_EXTENSION, &starting_no, &no);
+ if (2 == ret) {
+ info("Matched file \"%s/%s\"", dbfiles_path, dent.name);
+ metalogfile = mallocz(sizeof(*metalogfile));
+ metadata_logfile_init(metalogfile, ctx, starting_no, no);
+ metalogfiles[matched_files++] = metalogfile;
+ }
+ }
+ uv_fs_req_cleanup(&req);
+
+ if (0 == matched_files) {
+ freez(metalogfiles);
+ return 0;
+ }
+ if (matched_files == MAX_DATAFILES) {
+ error("Warning: hit maximum database engine file limit of %d files", MAX_DATAFILES);
+ }
+ qsort(metalogfiles, matched_files, sizeof(*metalogfiles), scan_metalog_files_cmp);
+ ret = compaction_failure_recovery(ctx, metalogfiles, &matched_files);
+ if (ret) { /* If the files are corrupted fail */
+ for (i = 0 ; i < matched_files ; ++i) {
+ freez(metalogfiles[i]);
+ }
+ freez(metalogfiles);
+ return UV_EINVAL;
+ }
+ //ctx->last_fileno = metalogfiles[matched_files - 1]->fileno;
+
+ struct plugind cd = {
+ .enabled = 1,
+ .update_every = 0,
+ .pid = 0,
+ .serial_failures = 0,
+ .successful_collections = 0,
+ .obsolete = 0,
+ .started_t = INVALID_TIME,
+ .next = NULL,
+ .version = 0,
+ };
+
+ struct metalog_pluginsd_state metalog_parser_state;
+ metalog_pluginsd_state_init(&metalog_parser_state, ctx);
+
+ PARSER_USER_OBJECT metalog_parser_object;
+ metalog_parser_object.enabled = cd.enabled;
+ metalog_parser_object.host = ctx->rrdeng_ctx->host;
+ metalog_parser_object.cd = &cd;
+ metalog_parser_object.trust_durations = 0;
+ metalog_parser_object.private = &metalog_parser_state;
+
+ PARSER *parser = parser_init(metalog_parser_object.host, &metalog_parser_object, NULL, PARSER_INPUT_SPLIT);
+ if (unlikely(!parser)) {
+ error("Failed to initialize metadata log parser.");
+ failed_to_load = matched_files;
+ goto after_failed_to_parse;
+ }
+ parser_add_keyword(parser, PLUGINSD_KEYWORD_HOST, metalog_pluginsd_host);
+ parser_add_keyword(parser, PLUGINSD_KEYWORD_GUID, pluginsd_guid);
+ parser_add_keyword(parser, PLUGINSD_KEYWORD_CONTEXT, pluginsd_context);
+ parser_add_keyword(parser, PLUGINSD_KEYWORD_TOMBSTONE, pluginsd_tombstone);
+ parser->plugins_action->dimension_action = &metalog_pluginsd_dimension_action;
+ parser->plugins_action->chart_action = &metalog_pluginsd_chart_action;
+ parser->plugins_action->guid_action = &metalog_pluginsd_guid_action;
+ parser->plugins_action->context_action = &metalog_pluginsd_context_action;
+ parser->plugins_action->tombstone_action = &metalog_pluginsd_tombstone_action;
+ parser->plugins_action->host_action = &metalog_pluginsd_host_action;
+
+
+ metalog_parser_object.parser = parser;
+ ctx->metalog_parser_object = &metalog_parser_object;
+
+ for (failed_to_load = 0, i = 0 ; i < matched_files ; ++i) {
+ metalogfile = metalogfiles[i];
+ db_lock();
+ db_execute("BEGIN TRANSACTION;");
+ ret = load_metadata_logfile(ctx, metalogfile);
+ if (0 != ret) {
+ error("Deleting invalid metadata log file \"%s/"METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL
+ METALOG_EXTENSION"\"", dbfiles_path, metalogfile->starting_fileno, metalogfile->fileno);
+ unlink_metadata_logfile(metalogfile);
+ ++failed_to_load;
+ db_execute("ROLLBACK TRANSACTION;");
+ }
+ else
+ db_execute("COMMIT TRANSACTION;");
+ db_unlock();
+ freez(metalogfile);
+ }
+ matched_files -= failed_to_load;
+ debug(D_METADATALOG, "PARSER ended");
+
+ parser_destroy(parser);
+
+ size_t count __maybe_unused = metalog_parser_object.count;
+
+ debug(D_METADATALOG, "Parsing count=%u", (unsigned)count);
+after_failed_to_parse:
+
+ freez(metalogfiles);
+
+ return matched_files;
+}
+
+/* Return 0 on success. */
+int init_metalog_files(struct metalog_instance *ctx)
+{
+ int ret;
+ char *dbfiles_path = ctx->rrdeng_ctx->dbfiles_path;
+
+ ret = scan_metalog_files(ctx);
+ if (ret < 0) {
+ error("Failed to scan path \"%s\".", dbfiles_path);
+ return ret;
+ }/* else if (0 == ret) {
+ ctx->last_fileno = 1;
+ }*/
+
+ return 0;
+}
diff --git a/database/engine/metadata_log/logfile.h b/database/engine/metadata_log/logfile.h
new file mode 100644
index 0000000..df12ac7
--- /dev/null
+++ b/database/engine/metadata_log/logfile.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOGFILE_H
+#define NETDATA_LOGFILE_H
+
+#include "metadatalogprotocol.h"
+#include "../rrdengine.h"
+
+/* Forward declarations */
+struct metadata_logfile;
+struct metalog_worker_config;
+
+#define METALOG_PREFIX "metadatalog-"
+#define METALOG_EXTENSION ".mlf"
+
+/* only one event loop is supported for now */
+struct metadata_logfile {
+ unsigned fileno; /* Starts at 1 */
+ unsigned starting_fileno; /* 0 for normal files, staring number during compaction */
+ uv_file file;
+ uint64_t pos;
+ struct metalog_instance *ctx;
+ struct metadata_logfile *next;
+};
+
+struct metadata_logfile_list {
+ struct metadata_logfile *first; /* oldest */
+ struct metadata_logfile *last; /* newest */
+};
+
+extern void generate_metadata_logfile_path(struct metadata_logfile *metadatalog, char *str, size_t maxlen);
+extern int rename_metadata_logfile(struct metadata_logfile *metalogfile, unsigned new_starting_fileno,
+ unsigned new_fileno);
+extern int unlink_metadata_logfile(struct metadata_logfile *metalogfile);
+extern int load_metadata_logfile(struct metalog_instance *ctx, struct metadata_logfile *logfile);
+extern int init_metalog_files(struct metalog_instance *ctx);
+
+
+#endif /* NETDATA_LOGFILE_H */
diff --git a/database/engine/metadata_log/metadatalog.h b/database/engine/metadata_log/metadatalog.h
new file mode 100644
index 0000000..b484686
--- /dev/null
+++ b/database/engine/metadata_log/metadatalog.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_METADATALOG_H
+#define NETDATA_METADATALOG_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include "../rrdengine.h"
+#include "metadatalogprotocol.h"
+#include "logfile.h"
+#include "metadatalogapi.h"
+#include "compaction.h"
+
+/* Forward declerations */
+struct metalog_instance;
+struct parser_user_object;
+
+#define METALOG_FILE_NUMBER_SCAN_TMPL "%5u-%5u"
+#define METALOG_FILE_NUMBER_PRINT_TMPL "%5.5u-%5.5u"
+
+struct metalog_instance {
+ struct rrdengine_instance *rrdeng_ctx;
+ struct parser_user_object *metalog_parser_object;
+ uint8_t initialized; /* set to 1 to mark context initialized */
+};
+
+#endif /* NETDATA_METADATALOG_H */
diff --git a/database/engine/metadata_log/metadatalogapi.c b/database/engine/metadata_log/metadatalogapi.c
new file mode 100755
index 0000000..b206cca
--- /dev/null
+++ b/database/engine/metadata_log/metadatalogapi.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS
+
+#include "metadatalog.h"
+
+/*
+ * Returns 0 on success, negative on error
+ */
+int metalog_init(struct rrdengine_instance *rrdeng_parent_ctx)
+{
+ struct metalog_instance *ctx;
+ int error;
+
+ ctx = callocz(1, sizeof(*ctx));
+ ctx->initialized = 0;
+ rrdeng_parent_ctx->metalog_ctx = ctx;
+
+ ctx->rrdeng_ctx = rrdeng_parent_ctx;
+ error = init_metalog_files(ctx);
+ if (error) {
+ goto error_after_init_rrd_files;
+ }
+ ctx->initialized = 1; /* notify dbengine that the metadata log has finished initializing */
+ return 0;
+
+error_after_init_rrd_files:
+ freez(ctx);
+ return UV_EIO;
+}
+
+/* This function is called by dbengine rotation logic when the metric has no writers */
+void metalog_delete_dimension_by_uuid(struct metalog_instance *ctx, uuid_t *metric_uuid)
+{
+ uuid_t multihost_uuid;
+
+ delete_dimension_uuid(metric_uuid);
+ rrdeng_convert_legacy_uuid_to_multihost(ctx->rrdeng_ctx->machine_guid, metric_uuid, &multihost_uuid);
+ delete_dimension_uuid(&multihost_uuid);
+}
diff --git a/database/engine/metadata_log/metadatalogapi.h b/database/engine/metadata_log/metadatalogapi.h
new file mode 100644
index 0000000..d558b93
--- /dev/null
+++ b/database/engine/metadata_log/metadatalogapi.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_METADATALOGAPI_H
+#define NETDATA_METADATALOGAPI_H
+
+extern void metalog_commit_delete_chart(RRDSET *st);
+extern void metalog_delete_dimension_by_uuid(struct metalog_instance *ctx, uuid_t *metric_uuid);
+
+/* must call once before using anything */
+extern int metalog_init(struct rrdengine_instance *rrdeng_parent_ctx);
+
+#endif /* NETDATA_METADATALOGAPI_H */
diff --git a/database/engine/metadata_log/metadatalogprotocol.h b/database/engine/metadata_log/metadatalogprotocol.h
new file mode 100644
index 0000000..1017213
--- /dev/null
+++ b/database/engine/metadata_log/metadatalogprotocol.h
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_METADATALOGPROTOCOL_H
+#define NETDATA_METADATALOGPROTOCOL_H
+
+#include "../rrddiskprotocol.h"
+
+#define RRDENG_METALOG_MAGIC "netdata-metadata-log"
+
+#define RRDENG_METALOG_VER (1)
+
+#define RRDENG_METALOG_SB_PADDING_SZ (RRDENG_BLOCK_SIZE - (RRDENG_MAGIC_SZ + sizeof(uint16_t)))
+/*
+ * Metadata log persistent super-block
+ */
+struct rrdeng_metalog_sb {
+ char magic_number[RRDENG_MAGIC_SZ];
+ uint16_t version;
+ uint8_t padding[RRDENG_METALOG_SB_PADDING_SZ];
+} __attribute__ ((packed));
+
+/*
+ * Metadata log record types
+ */
+#define METALOG_STORE_PADDING (0)
+#define METALOG_CREATE_OBJECT (1)
+#define METALOG_DELETE_OBJECT (2)
+#define METALOG_OTHER (3) /* reserved */
+
+/*
+ * Metadata log record header
+ */
+struct rrdeng_metalog_record_header {
+ /* when set to METALOG_STORE_PADDING jump to start of next block */
+ uint8_t type;
+
+ uint16_t header_length;
+ uint32_t payload_length;
+ /******************************************************
+ * No fields above this point can ever change. *
+ ******************************************************
+ * All fields below this point are subject to change. *
+ ******************************************************/
+} __attribute__ ((packed));
+
+/*
+ * Metadata log record trailer
+ */
+struct rrdeng_metalog_record_trailer {
+ uint8_t checksum[CHECKSUM_SZ]; /* CRC32 */
+} __attribute__ ((packed));
+
+#endif /* NETDATA_METADATALOGPROTOCOL_H */
diff --git a/database/engine/metadata_log/metalogpluginsd.c b/database/engine/metadata_log/metalogpluginsd.c
new file mode 100755
index 0000000..88c1453
--- /dev/null
+++ b/database/engine/metadata_log/metalogpluginsd.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS
+
+#include "metadatalog.h"
+#include "metalogpluginsd.h"
+
+extern struct config stream_config;
+
+PARSER_RC metalog_pluginsd_host_action(
+ void *user, char *machine_guid, char *hostname, char *registry_hostname, int update_every, char *os, char *timezone,
+ char *tags)
+{
+ struct metalog_pluginsd_state *state = ((PARSER_USER_OBJECT *)user)->private;
+
+ RRDHOST *host = rrdhost_find_by_guid(machine_guid, 0);
+ if (host) {
+ if (unlikely(host->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE)) {
+ error("Archived host '%s' has memory mode '%s', but the archived one is '%s'. Ignoring archived state.",
+ host->hostname, rrd_memory_mode_name(host->rrd_memory_mode),
+ rrd_memory_mode_name(RRD_MEMORY_MODE_DBENGINE));
+ ((PARSER_USER_OBJECT *) user)->host = NULL; /* Ignore objects if memory mode is not dbengine */
+ }
+ ((PARSER_USER_OBJECT *) user)->host = host;
+ return PARSER_RC_OK;
+ }
+
+ if (strcmp(machine_guid, registry_get_this_machine_guid()) == 0) {
+ ((PARSER_USER_OBJECT *) user)->host = host;
+ return PARSER_RC_OK;
+ }
+
+ if (likely(!uuid_parse(machine_guid, state->host_uuid))) {
+ int rc = sql_store_host(&state->host_uuid, hostname, registry_hostname, update_every, os, timezone, tags);
+ if (unlikely(rc)) {
+ errno = 0;
+ error("Failed to store host %s with UUID %s in the database", hostname, machine_guid);
+ }
+ }
+ else {
+ errno = 0;
+ error("Host machine GUID %s is not valid", machine_guid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family, char *context,
+ char *title, char *units, char *plugin, char *module, int priority,
+ int update_every, RRDSET_TYPE chart_type, char *options)
+{
+ UNUSED(options);
+
+ struct metalog_pluginsd_state *state = ((PARSER_USER_OBJECT *)user)->private;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ if (unlikely(uuid_is_null(state->host_uuid))) {
+ debug(D_METADATALOG, "Ignoring chart belonging to missing or ignored host.");
+ return PARSER_RC_OK;
+ }
+ uuid_copy(state->chart_uuid, state->uuid);
+ uuid_clear(state->uuid); /* Consume UUID */
+ (void) sql_store_chart(&state->chart_uuid, &state->host_uuid,
+ type, id, name, family, context, title, units,
+ plugin, module, priority, update_every,
+ chart_type, RRD_MEMORY_MODE_DBENGINE, host ? host->rrd_history_entries : 1);
+ ((PARSER_USER_OBJECT *)user)->st_exists = 1;
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm,
+ long multiplier, long divisor, char *options, RRD_ALGORITHM algorithm_type)
+{
+ struct metalog_pluginsd_state *state = ((PARSER_USER_OBJECT *)user)->private;
+ UNUSED(user);
+ UNUSED(options);
+ UNUSED(algorithm);
+ UNUSED(st);
+
+ if (unlikely(uuid_is_null(state->chart_uuid))) {
+ debug(D_METADATALOG, "Ignoring dimension belonging to missing or ignored chart.");
+ info("Ignoring dimension belonging to missing or ignored chart.");
+ return PARSER_RC_OK;
+ }
+
+ if (unlikely(uuid_is_null(state->uuid))) {
+ debug(D_METADATALOG, "Ignoring dimension without unknown UUID");
+ info("Ignoring dimension without unknown UUID");
+ return PARSER_RC_OK;
+ }
+
+ (void) sql_store_dimension(&state->uuid, &state->chart_uuid, id, name, multiplier, divisor, algorithm_type);
+ uuid_clear(state->uuid); /* Consume UUID */
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_guid_action(void *user, uuid_t *uuid)
+{
+ struct metalog_pluginsd_state *state = ((PARSER_USER_OBJECT *)user)->private;
+
+ uuid_copy(state->uuid, *uuid);
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_context_action(void *user, uuid_t *uuid)
+{
+ struct metalog_pluginsd_state *state = ((PARSER_USER_OBJECT *)user)->private;
+
+ int rc = find_uuid_type(uuid);
+
+ if (rc == 1) {
+ uuid_copy(state->host_uuid, *uuid);
+ ((PARSER_USER_OBJECT *)user)->st_exists = 0;
+ ((PARSER_USER_OBJECT *)user)->host_exists = 1;
+ } else if (rc == 2) {
+ uuid_copy(state->chart_uuid, *uuid);
+ ((PARSER_USER_OBJECT *)user)->st_exists = 1;
+ } else
+ uuid_copy(state->uuid, *uuid);
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_tombstone_action(void *user, uuid_t *uuid)
+{
+ UNUSED(user);
+ UNUSED(uuid);
+
+ return PARSER_RC_OK;
+}
+
+void metalog_pluginsd_state_init(struct metalog_pluginsd_state *state, struct metalog_instance *ctx)
+{
+ state->ctx = ctx;
+ state->skip_record = 0;
+ uuid_clear(state->uuid);
+ state->metalogfile = NULL;
+}
diff --git a/database/engine/metadata_log/metalogpluginsd.h b/database/engine/metadata_log/metalogpluginsd.h
new file mode 100644
index 0000000..96808aa
--- /dev/null
+++ b/database/engine/metadata_log/metalogpluginsd.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_METALOGPLUGINSD_H
+#define NETDATA_METALOGPLUGINSD_H
+
+#include "../../../collectors/plugins.d/pluginsd_parser.h"
+#include "../../../collectors/plugins.d/plugins_d.h"
+#include "../../../parser/parser.h"
+
+struct metalog_pluginsd_state {
+ struct metalog_instance *ctx;
+ uuid_t uuid;
+ uuid_t host_uuid;
+ uuid_t chart_uuid;
+ uint8_t skip_record; /* skip this record due to errors in parsing */
+ struct metadata_logfile *metalogfile; /* current metadata log file being replayed */
+};
+
+extern void metalog_pluginsd_state_init(struct metalog_pluginsd_state *state, struct metalog_instance *ctx);
+
+extern PARSER_RC metalog_pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family,
+ char *context, char *title, char *units, char *plugin, char *module,
+ int priority, int update_every, RRDSET_TYPE chart_type, char *options);
+extern PARSER_RC metalog_pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm,
+ long multiplier, long divisor, char *options,
+ RRD_ALGORITHM algorithm_type);
+extern PARSER_RC metalog_pluginsd_guid_action(void *user, uuid_t *uuid);
+extern PARSER_RC metalog_pluginsd_context_action(void *user, uuid_t *uuid);
+extern PARSER_RC metalog_pluginsd_tombstone_action(void *user, uuid_t *uuid);
+extern PARSER_RC metalog_pluginsd_host(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC metalog_pluginsd_host_action(void *user, char *machine_guid, char *hostname, char *registry_hostname, int update_every, char *os, char *timezone, char *tags);
+
+#endif /* NETDATA_METALOGPLUGINSD_H */
diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c
new file mode 100644
index 0000000..a182071
--- /dev/null
+++ b/database/engine/pagecache.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS
+
+#include "rrdengine.h"
+
+/* Forward declerations */
+static int pg_cache_try_evict_one_page_unsafe(struct rrdengine_instance *ctx);
+
+/* always inserts into tail */
+static inline void pg_cache_replaceQ_insert_unsafe(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ if (likely(NULL != pg_cache->replaceQ.tail)) {
+ pg_cache_descr->prev = pg_cache->replaceQ.tail;
+ pg_cache->replaceQ.tail->next = pg_cache_descr;
+ }
+ if (unlikely(NULL == pg_cache->replaceQ.head)) {
+ pg_cache->replaceQ.head = pg_cache_descr;
+ }
+ pg_cache->replaceQ.tail = pg_cache_descr;
+}
+
+static inline void pg_cache_replaceQ_delete_unsafe(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr, *prev, *next;
+
+ prev = pg_cache_descr->prev;
+ next = pg_cache_descr->next;
+
+ if (likely(NULL != prev)) {
+ prev->next = next;
+ }
+ if (likely(NULL != next)) {
+ next->prev = prev;
+ }
+ if (unlikely(pg_cache_descr == pg_cache->replaceQ.head)) {
+ pg_cache->replaceQ.head = next;
+ }
+ if (unlikely(pg_cache_descr == pg_cache->replaceQ.tail)) {
+ pg_cache->replaceQ.tail = prev;
+ }
+ pg_cache_descr->prev = pg_cache_descr->next = NULL;
+}
+
+void pg_cache_replaceQ_insert(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
+ pg_cache_replaceQ_insert_unsafe(ctx, descr);
+ uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
+}
+
+void pg_cache_replaceQ_delete(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
+ pg_cache_replaceQ_delete_unsafe(ctx, descr);
+ uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
+}
+void pg_cache_replaceQ_set_hot(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
+ pg_cache_replaceQ_delete_unsafe(ctx, descr);
+ pg_cache_replaceQ_insert_unsafe(ctx, descr);
+ uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
+}
+
+struct rrdeng_page_descr *pg_cache_create_descr(void)
+{
+ struct rrdeng_page_descr *descr;
+
+ descr = mallocz(sizeof(*descr));
+ descr->page_length = 0;
+ descr->start_time = INVALID_TIME;
+ descr->end_time = INVALID_TIME;
+ descr->id = NULL;
+ descr->extent = NULL;
+ descr->pg_cache_descr_state = 0;
+ descr->pg_cache_descr = NULL;
+
+ return descr;
+}
+
+/* The caller must hold page descriptor lock. */
+void pg_cache_wake_up_waiters_unsafe(struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+ if (pg_cache_descr->waiters)
+ uv_cond_broadcast(&pg_cache_descr->cond);
+}
+
+void pg_cache_wake_up_waiters(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+}
+
+/*
+ * The caller must hold page descriptor lock.
+ * The lock will be released and re-acquired. The descriptor is not guaranteed
+ * to exist after this function returns.
+ */
+void pg_cache_wait_event_unsafe(struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ ++pg_cache_descr->waiters;
+ uv_cond_wait(&pg_cache_descr->cond, &pg_cache_descr->mutex);
+ --pg_cache_descr->waiters;
+}
+
+/*
+ * The caller must hold page descriptor lock.
+ * The lock will be released and re-acquired. The descriptor is not guaranteed
+ * to exist after this function returns.
+ * Returns UV_ETIMEDOUT if timeout_sec seconds pass.
+ */
+int pg_cache_timedwait_event_unsafe(struct rrdeng_page_descr *descr, uint64_t timeout_sec)
+{
+ int ret;
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ ++pg_cache_descr->waiters;
+ ret = uv_cond_timedwait(&pg_cache_descr->cond, &pg_cache_descr->mutex, timeout_sec * NSEC_PER_SEC);
+ --pg_cache_descr->waiters;
+
+ return ret;
+}
+
+/*
+ * Returns page flags.
+ * The lock will be released and re-acquired. The descriptor is not guaranteed
+ * to exist after this function returns.
+ */
+unsigned long pg_cache_wait_event(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+ unsigned long flags;
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_wait_event_unsafe(descr);
+ flags = pg_cache_descr->flags;
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ return flags;
+}
+
+/*
+ * The caller must hold page descriptor lock.
+ */
+int pg_cache_can_get_unsafe(struct rrdeng_page_descr *descr, int exclusive_access)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ if ((pg_cache_descr->flags & (RRD_PAGE_LOCKED | RRD_PAGE_READ_PENDING)) ||
+ (exclusive_access && pg_cache_descr->refcnt)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * The caller must hold page descriptor lock.
+ * Gets a reference to the page descriptor.
+ * Returns 1 on success and 0 on failure.
+ */
+int pg_cache_try_get_unsafe(struct rrdeng_page_descr *descr, int exclusive_access)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ if (!pg_cache_can_get_unsafe(descr, exclusive_access))
+ return 0;
+
+ if (exclusive_access)
+ pg_cache_descr->flags |= RRD_PAGE_LOCKED;
+ ++pg_cache_descr->refcnt;
+
+ return 1;
+}
+
+/*
+ * The caller must hold the page descriptor lock.
+ * This function may block doing cleanup.
+ */
+void pg_cache_put_unsafe(struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ pg_cache_descr->flags &= ~RRD_PAGE_LOCKED;
+ if (0 == --pg_cache_descr->refcnt) {
+ pg_cache_wake_up_waiters_unsafe(descr);
+ }
+}
+
+/*
+ * This function may block doing cleanup.
+ */
+void pg_cache_put(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_put_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+}
+
+/* The caller must hold the page cache lock */
+static void pg_cache_release_pages_unsafe(struct rrdengine_instance *ctx, unsigned number)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ pg_cache->populated_pages -= number;
+}
+
+static void pg_cache_release_pages(struct rrdengine_instance *ctx, unsigned number)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ pg_cache_release_pages_unsafe(ctx, number);
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+}
+
+/*
+ * This function returns the maximum number of pages allowed in the page cache.
+ */
+unsigned long pg_cache_hard_limit(struct rrdengine_instance *ctx)
+{
+ /* it's twice the number of producers since we pin 2 pages per producer */
+ return ctx->max_cache_pages + 2 * (unsigned long)ctx->metric_API_max_producers;
+}
+
+/*
+ * This function returns the low watermark number of pages in the page cache. The page cache should strive to keep the
+ * number of pages below that number.
+ */
+unsigned long pg_cache_soft_limit(struct rrdengine_instance *ctx)
+{
+ /* it's twice the number of producers since we pin 2 pages per producer */
+ return ctx->cache_pages_low_watermark + 2 * (unsigned long)ctx->metric_API_max_producers;
+}
+
+/*
+ * This function returns the maximum number of dirty pages that are committed to be written to disk allowed in the page
+ * cache.
+ */
+unsigned long pg_cache_committed_hard_limit(struct rrdengine_instance *ctx)
+{
+ /* We remove the active pages of the producers from the calculation and only allow the extra pinned pages */
+ return ctx->cache_pages_low_watermark + (unsigned long)ctx->metric_API_max_producers;
+}
+
+/*
+ * This function will block until it reserves #number populated pages.
+ * It will trigger evictions or dirty page flushing if the pg_cache_hard_limit() limit is hit.
+ */
+static void pg_cache_reserve_pages(struct rrdengine_instance *ctx, unsigned number)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned failures = 0;
+ const unsigned FAILURES_CEILING = 10; /* truncates exponential backoff to (2^FAILURES_CEILING x slot) */
+ unsigned long exp_backoff_slot_usec = USEC_PER_MS * 10;
+
+ assert(number < ctx->max_cache_pages);
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ if (pg_cache->populated_pages + number >= pg_cache_hard_limit(ctx) + 1)
+ debug(D_RRDENGINE, "==Page cache full. Reserving %u pages.==",
+ number);
+ while (pg_cache->populated_pages + number >= pg_cache_hard_limit(ctx) + 1) {
+
+ if (!pg_cache_try_evict_one_page_unsafe(ctx)) {
+ /* failed to evict */
+ struct completion compl;
+ struct rrdeng_cmd cmd;
+
+ ++failures;
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+
+ init_completion(&compl);
+ cmd.opcode = RRDENG_FLUSH_PAGES;
+ cmd.completion = &compl;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+ /* wait for some pages to be flushed */
+ debug(D_RRDENGINE, "%s: waiting for pages to be written to disk before evicting.", __func__);
+ wait_for_completion(&compl);
+ destroy_completion(&compl);
+
+ if (unlikely(failures > 1)) {
+ unsigned long slots;
+ /* exponential backoff */
+ slots = random() % (2LU << MIN(failures, FAILURES_CEILING));
+ (void)sleep_usec(slots * exp_backoff_slot_usec);
+ }
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ }
+ }
+ pg_cache->populated_pages += number;
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+}
+
+/*
+ * This function will attempt to reserve #number populated pages.
+ * It may trigger evictions if the pg_cache_soft_limit() limit is hit.
+ * Returns 0 on failure and 1 on success.
+ */
+static int pg_cache_try_reserve_pages(struct rrdengine_instance *ctx, unsigned number)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned count = 0;
+ int ret = 0;
+
+ assert(number < ctx->max_cache_pages);
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ if (pg_cache->populated_pages + number >= pg_cache_soft_limit(ctx) + 1) {
+ debug(D_RRDENGINE,
+ "==Page cache full. Trying to reserve %u pages.==",
+ number);
+ do {
+ if (!pg_cache_try_evict_one_page_unsafe(ctx))
+ break;
+ ++count;
+ } while (pg_cache->populated_pages + number >= pg_cache_soft_limit(ctx) + 1);
+ debug(D_RRDENGINE, "Evicted %u pages.", count);
+ }
+
+ if (pg_cache->populated_pages + number < pg_cache_hard_limit(ctx) + 1) {
+ pg_cache->populated_pages += number;
+ ret = 1; /* success */
+ }
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+
+ return ret;
+}
+
+/* The caller must hold the page cache and the page descriptor locks in that order */
+static void pg_cache_evict_unsafe(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ freez(pg_cache_descr->page);
+ pg_cache_descr->page = NULL;
+ pg_cache_descr->flags &= ~RRD_PAGE_POPULATED;
+ pg_cache_release_pages_unsafe(ctx, 1);
+ ++ctx->stats.pg_cache_evictions;
+}
+
+/*
+ * The caller must hold the page cache lock.
+ * Lock order: page cache -> replaceQ -> page descriptor
+ * This function iterates all pages and tries to evict one.
+ * If it fails it sets in_flight_descr to the oldest descriptor that has write-back in progress,
+ * or it sets it to NULL if no write-back is in progress.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
+static int pg_cache_try_evict_one_page_unsafe(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned long old_flags;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr = NULL;
+
+ uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
+ for (pg_cache_descr = pg_cache->replaceQ.head ; NULL != pg_cache_descr ; pg_cache_descr = pg_cache_descr->next) {
+ descr = pg_cache_descr->descr;
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ old_flags = pg_cache_descr->flags;
+ if ((old_flags & RRD_PAGE_POPULATED) && !(old_flags & RRD_PAGE_DIRTY) && pg_cache_try_get_unsafe(descr, 1)) {
+ /* must evict */
+ pg_cache_evict_unsafe(ctx, descr);
+ pg_cache_put_unsafe(descr);
+ pg_cache_replaceQ_delete_unsafe(ctx, descr);
+
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
+
+ rrdeng_try_deallocate_pg_cache_descr(ctx, descr);
+
+ return 1;
+ }
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ }
+ uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
+
+ /* failed to evict */
+ return 0;
+}
+
+/**
+ * Deletes a page from the database.
+ * Callers of this function need to make sure they're not deleting the same descriptor concurrently.
+ * @param ctx is the database instance.
+ * @param descr is the page descriptor.
+ * @param remove_dirty must be non-zero if the page to be deleted is dirty.
+ * @param is_exclusive_holder must be non-zero if the caller holds an exclusive page reference.
+ * @param metric_id is set to the metric the page belongs to, if it's safe to delete the metric and metric_id is not
+ * NULL. Otherwise, metric_id is not set.
+ * @return 1 if it's safe to delete the metric, 0 otherwise.
+ */
+uint8_t pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr, uint8_t remove_dirty,
+ uint8_t is_exclusive_holder, uuid_t *metric_id)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+ int ret;
+ uint8_t can_delete_metric = 0;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
+ fatal_assert(NULL != PValue);
+ page_index = *PValue;
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+
+ uv_rwlock_wrlock(&page_index->lock);
+ ret = JudyLDel(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
+ if (unlikely(0 == ret)) {
+ uv_rwlock_wrunlock(&page_index->lock);
+ error("Page under deletion was not in index.");
+ if (unlikely(debug_flags & D_RRDENGINE)) {
+ print_page_descr(descr);
+ }
+ goto destroy;
+ }
+ --page_index->page_count;
+ if (!page_index->writers && !page_index->page_count) {
+ can_delete_metric = 1;
+ if (metric_id) {
+ memcpy(metric_id, page_index->id, sizeof(uuid_t));
+ }
+ }
+ uv_rwlock_wrunlock(&page_index->lock);
+ fatal_assert(1 == ret);
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ ++ctx->stats.pg_cache_deletions;
+ --pg_cache->page_descriptors;
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ if (!is_exclusive_holder) {
+ /* If we don't hold an exclusive page reference get one */
+ while (!pg_cache_try_get_unsafe(descr, 1)) {
+ debug(D_RRDENGINE, "%s: Waiting for locked page:", __func__);
+ if (unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ pg_cache_wait_event_unsafe(descr);
+ }
+ }
+ if (remove_dirty) {
+ pg_cache_descr->flags &= ~RRD_PAGE_DIRTY;
+ } else {
+ /* even a locked page could be dirty */
+ while (unlikely(pg_cache_descr->flags & RRD_PAGE_DIRTY)) {
+ debug(D_RRDENGINE, "%s: Found dirty page, waiting for it to be flushed:", __func__);
+ if (unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ pg_cache_wait_event_unsafe(descr);
+ }
+ }
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ if (pg_cache_descr->flags & RRD_PAGE_POPULATED) {
+ /* only after locking can it be safely deleted from LRU */
+ pg_cache_replaceQ_delete(ctx, descr);
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ pg_cache_evict_unsafe(ctx, descr);
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+ }
+ pg_cache_put(ctx, descr);
+ rrdeng_try_deallocate_pg_cache_descr(ctx, descr);
+ while (descr->pg_cache_descr_state & PG_CACHE_DESCR_ALLOCATED) {
+ rrdeng_try_deallocate_pg_cache_descr(ctx, descr); /* spin */
+ (void)sleep_usec(1000); /* 1 msec */
+ }
+destroy:
+ freez(descr);
+ pg_cache_update_metric_times(page_index);
+
+ return can_delete_metric;
+}
+
+static inline int is_page_in_time_range(struct rrdeng_page_descr *descr, usec_t start_time, usec_t end_time)
+{
+ usec_t pg_start, pg_end;
+
+ pg_start = descr->start_time;
+ pg_end = descr->end_time;
+
+ return (pg_start < start_time && pg_end >= start_time) ||
+ (pg_start >= start_time && pg_start <= end_time);
+}
+
+static inline int is_point_in_time_in_page(struct rrdeng_page_descr *descr, usec_t point_in_time)
+{
+ return (point_in_time >= descr->start_time && point_in_time <= descr->end_time);
+}
+
+/* The caller must hold the page index lock */
+static inline struct rrdeng_page_descr *
+ find_first_page_in_time_range(struct pg_cache_page_index *page_index, usec_t start_time, usec_t end_time)
+{
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ return NULL;
+}
+
+/* Update metric oldest and latest timestamps efficiently when adding new values */
+void pg_cache_add_new_metric_time(struct pg_cache_page_index *page_index, struct rrdeng_page_descr *descr)
+{
+ usec_t oldest_time = page_index->oldest_time;
+ usec_t latest_time = page_index->latest_time;
+
+ if (unlikely(oldest_time == INVALID_TIME || descr->start_time < oldest_time)) {
+ page_index->oldest_time = descr->start_time;
+ }
+ if (likely(descr->end_time > latest_time || latest_time == INVALID_TIME)) {
+ page_index->latest_time = descr->end_time;
+ }
+}
+
+/* Update metric oldest and latest timestamps when removing old values */
+void pg_cache_update_metric_times(struct pg_cache_page_index *page_index)
+{
+ Pvoid_t *firstPValue, *lastPValue;
+ Word_t firstIndex, lastIndex;
+ struct rrdeng_page_descr *descr;
+ usec_t oldest_time = INVALID_TIME;
+ usec_t latest_time = INVALID_TIME;
+
+ uv_rwlock_rdlock(&page_index->lock);
+ /* Find first page in range */
+ firstIndex = (Word_t)0;
+ firstPValue = JudyLFirst(page_index->JudyL_array, &firstIndex, PJE0);
+ if (likely(NULL != firstPValue)) {
+ descr = *firstPValue;
+ oldest_time = descr->start_time;
+ }
+ lastIndex = (Word_t)-1;
+ lastPValue = JudyLLast(page_index->JudyL_array, &lastIndex, PJE0);
+ if (likely(NULL != lastPValue)) {
+ descr = *lastPValue;
+ latest_time = descr->end_time;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ if (unlikely(NULL == firstPValue)) {
+ fatal_assert(NULL == lastPValue);
+ page_index->oldest_time = page_index->latest_time = INVALID_TIME;
+ return;
+ }
+ page_index->oldest_time = oldest_time;
+ page_index->latest_time = latest_time;
+}
+
+/* If index is NULL lookup by UUID (descr->id) */
+void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
+ struct rrdeng_page_descr *descr)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index;
+ unsigned long pg_cache_descr_state = descr->pg_cache_descr_state;
+
+ if (0 != pg_cache_descr_state) {
+ /* there is page cache descriptor pre-allocated state */
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+
+ fatal_assert(pg_cache_descr_state & PG_CACHE_DESCR_ALLOCATED);
+ if (pg_cache_descr->flags & RRD_PAGE_POPULATED) {
+ pg_cache_reserve_pages(ctx, 1);
+ if (!(pg_cache_descr->flags & RRD_PAGE_DIRTY))
+ pg_cache_replaceQ_insert(ctx, descr);
+ }
+ }
+
+ if (unlikely(NULL == index)) {
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
+ fatal_assert(NULL != PValue);
+ page_index = *PValue;
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ } else {
+ page_index = index;
+ }
+
+ uv_rwlock_wrlock(&page_index->lock);
+ PValue = JudyLIns(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
+ *PValue = descr;
+ ++page_index->page_count;
+ pg_cache_add_new_metric_time(page_index, descr);
+ uv_rwlock_wrunlock(&page_index->lock);
+
+ uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
+ ++ctx->stats.pg_cache_insertions;
+ ++pg_cache->page_descriptors;
+ uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
+}
+
+usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return INVALID_TIME;
+ }
+
+ uv_rwlock_rdlock(&page_index->lock);
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
+ uv_rwlock_rdunlock(&page_index->lock);
+ return INVALID_TIME;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ return descr->start_time;
+}
+
+/**
+ * Return page information for the first page before point_in_time that satisfies the filter.
+ * @param ctx DB context
+ * @param page_index page index of a metric
+ * @param point_in_time the pages that are searched must be older than this timestamp
+ * @param filter decides if the page satisfies the caller's criteria
+ * @param page_info the result of the search is set in this pointer
+ */
+void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ (void)pg_cache;
+ fatal_assert(NULL != page_index);
+
+ Index = (Word_t)(point_in_time / USEC_PER_SEC);
+ uv_rwlock_rdlock(&page_index->lock);
+ do {
+ PValue = JudyLPrev(page_index->JudyL_array, &Index, PJE0);
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+ } while (descr != NULL && !filter(descr));
+ if (unlikely(NULL == descr)) {
+ page_info->page_length = 0;
+ page_info->start_time = INVALID_TIME;
+ page_info->end_time = INVALID_TIME;
+ } else {
+ page_info->page_length = descr->page_length;
+ page_info->start_time = descr->start_time;
+ page_info->end_time = descr->end_time;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+}
+/**
+ * Searches for pages in a time range and triggers disk I/O if necessary and possible.
+ * Does not get a reference.
+ * @param ctx DB context
+ * @param id UUID
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param page_info_arrayp It allocates (*page_arrayp) and populates it with information of pages that overlap
+ * with the time range [start_time,end_time]. The caller must free (*page_info_arrayp) with freez().
+ * If page_info_arrayp is set to NULL nothing was allocated.
+ * @param ret_page_indexp Sets the page index pointer (*ret_page_indexp) for the given UUID.
+ * @return the number of pages that overlap with the time range [start_time,end_time].
+ */
+unsigned pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL, *preload_array[PAGE_CACHE_MAX_PRELOAD_PAGES];
+ struct page_cache_descr *pg_cache_descr = NULL;
+ unsigned i, j, k, preload_count, count, page_info_array_max_size;
+ unsigned long flags;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+ Word_t Index;
+ uint8_t failed_to_reserve;
+
+ fatal_assert(NULL != ret_page_indexp);
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ *ret_page_indexp = page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
+ *ret_page_indexp = NULL;
+ return 0;
+ }
+
+ uv_rwlock_rdlock(&page_index->lock);
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
+ uv_rwlock_rdunlock(&page_index->lock);
+ debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
+ *ret_page_indexp = NULL;
+ return 0;
+ } else {
+ Index = (Word_t)(descr->start_time / USEC_PER_SEC);
+ }
+ if (page_info_arrayp) {
+ page_info_array_max_size = PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = mallocz(page_info_array_max_size);
+ }
+
+ for (count = 0, preload_count = 0 ;
+ descr != NULL && is_page_in_time_range(descr, start_time, end_time) ;
+ PValue = JudyLNext(page_index->JudyL_array, &Index, PJE0),
+ descr = unlikely(NULL == PValue) ? NULL : *PValue) {
+ /* Iterate all pages in range */
+
+ if (unlikely(0 == descr->page_length))
+ continue;
+ if (page_info_arrayp) {
+ if (unlikely(count >= page_info_array_max_size / sizeof(struct rrdeng_page_info))) {
+ page_info_array_max_size += PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = reallocz(*page_info_arrayp, page_info_array_max_size);
+ }
+ (*page_info_arrayp)[count].start_time = descr->start_time;
+ (*page_info_arrayp)[count].end_time = descr->end_time;
+ (*page_info_arrayp)[count].page_length = descr->page_length;
+ }
+ ++count;
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ flags = pg_cache_descr->flags;
+ if (pg_cache_can_get_unsafe(descr, 0)) {
+ if (flags & RRD_PAGE_POPULATED) {
+ /* success */
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
+ continue;
+ }
+ }
+ if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
+ preload_array[preload_count++] = descr;
+ if (PAGE_CACHE_MAX_PRELOAD_PAGES == preload_count) {
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ break;
+ }
+ }
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ failed_to_reserve = 0;
+ for (i = 0 ; i < preload_count && !failed_to_reserve ; ++i) {
+ struct rrdeng_cmd cmd;
+ struct rrdeng_page_descr *next;
+
+ descr = preload_array[i];
+ if (NULL == descr) {
+ continue;
+ }
+ if (!pg_cache_try_reserve_pages(ctx, 1)) {
+ failed_to_reserve = 1;
+ break;
+ }
+ cmd.opcode = RRDENG_READ_EXTENT;
+ cmd.read_extent.page_cache_descr[0] = descr;
+ /* don't use this page again */
+ preload_array[i] = NULL;
+ for (j = 0, k = 1 ; j < preload_count ; ++j) {
+ next = preload_array[j];
+ if (NULL == next) {
+ continue;
+ }
+ if (descr->extent == next->extent) {
+ /* same extent, consolidate */
+ if (!pg_cache_try_reserve_pages(ctx, 1)) {
+ failed_to_reserve = 1;
+ break;
+ }
+ cmd.read_extent.page_cache_descr[k++] = next;
+ /* don't use this page again */
+ preload_array[j] = NULL;
+ }
+ }
+ cmd.read_extent.page_count = k;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+ }
+ if (failed_to_reserve) {
+ debug(D_RRDENGINE, "%s: Failed to reserve enough memory, canceling I/O.", __func__);
+ for (i = 0 ; i < preload_count ; ++i) {
+ descr = preload_array[i];
+ if (NULL == descr) {
+ continue;
+ }
+ pg_cache_put(ctx, descr);
+ }
+ }
+ if (!preload_count) {
+ /* no such page */
+ debug(D_RRDENGINE, "%s: No page was eligible to attempt preload.", __func__);
+ }
+ if (unlikely(0 == count && page_info_arrayp)) {
+ freez(*page_info_arrayp);
+ *page_info_arrayp = NULL;
+ }
+ return count;
+}
+
+/*
+ * Searches for a page and gets a reference.
+ * When point_in_time is INVALID_TIME get any page.
+ * If index is NULL lookup by UUID (id).
+ */
+struct rrdeng_page_descr *
+ pg_cache_lookup(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t point_in_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ unsigned long flags;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+ Word_t Index;
+ uint8_t page_not_in_cache;
+
+ if (unlikely(NULL == index)) {
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return NULL;
+ }
+ } else {
+ page_index = index;
+ }
+ pg_cache_reserve_pages(ctx, 1);
+
+ page_not_in_cache = 0;
+ uv_rwlock_rdlock(&page_index->lock);
+ while (1) {
+ Index = (Word_t)(point_in_time / USEC_PER_SEC);
+ PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ }
+ if (NULL == PValue ||
+ 0 == descr->page_length ||
+ (INVALID_TIME != point_in_time &&
+ !is_point_in_time_in_page(descr, point_in_time))) {
+ /* non-empty page not found */
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ pg_cache_release_pages(ctx, 1);
+ return NULL;
+ }
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ flags = pg_cache_descr->flags;
+ if ((flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 0)) {
+ /* success */
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
+ break;
+ }
+ if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
+ struct rrdeng_cmd cmd;
+
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ cmd.opcode = RRDENG_READ_PAGE;
+ cmd.read_page.page_cache_descr = descr;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ debug(D_RRDENGINE, "%s: Waiting for page to be asynchronously read from disk:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ while (!(pg_cache_descr->flags & RRD_PAGE_POPULATED)) {
+ pg_cache_wait_event_unsafe(descr);
+ }
+ /* success */
+ /* Downgrade exclusive reference to allow other readers */
+ pg_cache_descr->flags &= ~RRD_PAGE_LOCKED;
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ return descr;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ debug(D_RRDENGINE, "%s: Waiting for page to be unlocked:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ if (!(flags & RRD_PAGE_POPULATED))
+ page_not_in_cache = 1;
+ pg_cache_wait_event_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ /* reset scan to find again */
+ uv_rwlock_rdlock(&page_index->lock);
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ if (!(flags & RRD_PAGE_DIRTY))
+ pg_cache_replaceQ_set_hot(ctx, descr);
+ pg_cache_release_pages(ctx, 1);
+ if (page_not_in_cache)
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ else
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_hits, 1);
+ return descr;
+}
+
+/*
+ * Searches for the first page between start_time and end_time and gets a reference.
+ * start_time and end_time are inclusive.
+ * If index is NULL lookup by UUID (id).
+ */
+struct rrdeng_page_descr *
+pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ unsigned long flags;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+ uint8_t page_not_in_cache;
+
+ if (unlikely(NULL == index)) {
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return NULL;
+ }
+ } else {
+ page_index = index;
+ }
+ pg_cache_reserve_pages(ctx, 1);
+
+ page_not_in_cache = 0;
+ uv_rwlock_rdlock(&page_index->lock);
+ while (1) {
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr || 0 == descr->page_length) {
+ /* non-empty page not found */
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ pg_cache_release_pages(ctx, 1);
+ return NULL;
+ }
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ flags = pg_cache_descr->flags;
+ if ((flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 0)) {
+ /* success */
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
+ break;
+ }
+ if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
+ struct rrdeng_cmd cmd;
+
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ cmd.opcode = RRDENG_READ_PAGE;
+ cmd.read_page.page_cache_descr = descr;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ debug(D_RRDENGINE, "%s: Waiting for page to be asynchronously read from disk:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ while (!(pg_cache_descr->flags & RRD_PAGE_POPULATED)) {
+ pg_cache_wait_event_unsafe(descr);
+ }
+ /* success */
+ /* Downgrade exclusive reference to allow other readers */
+ pg_cache_descr->flags &= ~RRD_PAGE_LOCKED;
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ return descr;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ debug(D_RRDENGINE, "%s: Waiting for page to be unlocked:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ if (!(flags & RRD_PAGE_POPULATED))
+ page_not_in_cache = 1;
+ pg_cache_wait_event_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ /* reset scan to find again */
+ uv_rwlock_rdlock(&page_index->lock);
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ if (!(flags & RRD_PAGE_DIRTY))
+ pg_cache_replaceQ_set_hot(ctx, descr);
+ pg_cache_release_pages(ctx, 1);
+ if (page_not_in_cache)
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ else
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_hits, 1);
+ return descr;
+}
+
+struct pg_cache_page_index *create_page_index(uuid_t *id)
+{
+ struct pg_cache_page_index *page_index;
+
+ page_index = mallocz(sizeof(*page_index));
+ page_index->JudyL_array = (Pvoid_t) NULL;
+ uuid_copy(page_index->id, *id);
+ fatal_assert(0 == uv_rwlock_init(&page_index->lock));
+ page_index->oldest_time = INVALID_TIME;
+ page_index->latest_time = INVALID_TIME;
+ page_index->prev = NULL;
+ page_index->page_count = 0;
+ page_index->writers = 0;
+
+ return page_index;
+}
+
+static void init_metrics_index(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ pg_cache->metrics_index.JudyHS_array = (Pvoid_t) NULL;
+ pg_cache->metrics_index.last_page_index = NULL;
+ fatal_assert(0 == uv_rwlock_init(&pg_cache->metrics_index.lock));
+}
+
+static void init_replaceQ(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ pg_cache->replaceQ.head = NULL;
+ pg_cache->replaceQ.tail = NULL;
+ fatal_assert(0 == uv_rwlock_init(&pg_cache->replaceQ.lock));
+}
+
+static void init_committed_page_index(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ pg_cache->committed_page_index.JudyL_array = (Pvoid_t) NULL;
+ fatal_assert(0 == uv_rwlock_init(&pg_cache->committed_page_index.lock));
+ pg_cache->committed_page_index.latest_corr_id = 0;
+ pg_cache->committed_page_index.nr_committed_pages = 0;
+}
+
+void init_page_cache(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ pg_cache->page_descriptors = 0;
+ pg_cache->populated_pages = 0;
+ fatal_assert(0 == uv_rwlock_init(&pg_cache->pg_cache_rwlock));
+
+ init_metrics_index(ctx);
+ init_replaceQ(ctx);
+ init_committed_page_index(ctx);
+}
+
+void free_page_cache(struct rrdengine_instance *ctx)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ Word_t ret_Judy, bytes_freed = 0;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index, *prev_page_index;
+ Word_t Index;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+
+ /* Free committed page index */
+ ret_Judy = JudyLFreeArray(&pg_cache->committed_page_index.JudyL_array, PJE0);
+ fatal_assert(NULL == pg_cache->committed_page_index.JudyL_array);
+ bytes_freed += ret_Judy;
+
+ for (page_index = pg_cache->metrics_index.last_page_index ;
+ page_index != NULL ;
+ page_index = prev_page_index) {
+ prev_page_index = page_index->prev;
+
+ /* Find first page in range */
+ Index = (Word_t) 0;
+ PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+
+ while (descr != NULL) {
+ /* Iterate all page descriptors of this metric */
+
+ if (descr->pg_cache_descr_state & PG_CACHE_DESCR_ALLOCATED) {
+ /* Check rrdenglocking.c */
+ pg_cache_descr = descr->pg_cache_descr;
+ if (pg_cache_descr->flags & RRD_PAGE_POPULATED) {
+ freez(pg_cache_descr->page);
+ bytes_freed += RRDENG_BLOCK_SIZE;
+ }
+ rrdeng_destroy_pg_cache_descr(ctx, pg_cache_descr);
+ bytes_freed += sizeof(*pg_cache_descr);
+ }
+ freez(descr);
+ bytes_freed += sizeof(*descr);
+
+ PValue = JudyLNext(page_index->JudyL_array, &Index, PJE0);
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+ }
+
+ /* Free page index */
+ ret_Judy = JudyLFreeArray(&page_index->JudyL_array, PJE0);
+ fatal_assert(NULL == page_index->JudyL_array);
+ bytes_freed += ret_Judy;
+ freez(page_index);
+ bytes_freed += sizeof(*page_index);
+ }
+ /* Free metrics index */
+ ret_Judy = JudyHSFreeArray(&pg_cache->metrics_index.JudyHS_array, PJE0);
+ fatal_assert(NULL == pg_cache->metrics_index.JudyHS_array);
+ bytes_freed += ret_Judy;
+
+ info("Freed %lu bytes of memory from page cache.", bytes_freed);
+} \ No newline at end of file
diff --git a/database/engine/pagecache.h b/database/engine/pagecache.h
new file mode 100644
index 0000000..31e9739
--- /dev/null
+++ b/database/engine/pagecache.h
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PAGECACHE_H
+#define NETDATA_PAGECACHE_H
+
+#include "rrdengine.h"
+
+/* Forward declarations */
+struct rrdengine_instance;
+struct extent_info;
+struct rrdeng_page_descr;
+
+#define INVALID_TIME (0)
+
+/* Page flags */
+#define RRD_PAGE_DIRTY (1LU << 0)
+#define RRD_PAGE_LOCKED (1LU << 1)
+#define RRD_PAGE_READ_PENDING (1LU << 2)
+#define RRD_PAGE_WRITE_PENDING (1LU << 3)
+#define RRD_PAGE_POPULATED (1LU << 4)
+
+struct page_cache_descr {
+ struct rrdeng_page_descr *descr; /* parent descriptor */
+ void *page;
+ unsigned long flags;
+ struct page_cache_descr *prev; /* LRU */
+ struct page_cache_descr *next; /* LRU */
+
+ unsigned refcnt;
+ uv_mutex_t mutex; /* always take it after the page cache lock or after the commit lock */
+ uv_cond_t cond;
+ unsigned waiters;
+};
+
+/* Page cache descriptor flags, state = 0 means no descriptor */
+#define PG_CACHE_DESCR_ALLOCATED (1LU << 0)
+#define PG_CACHE_DESCR_DESTROY (1LU << 1)
+#define PG_CACHE_DESCR_LOCKED (1LU << 2)
+#define PG_CACHE_DESCR_SHIFT (3)
+#define PG_CACHE_DESCR_USERS_MASK (((unsigned long)-1) << PG_CACHE_DESCR_SHIFT)
+#define PG_CACHE_DESCR_FLAGS_MASK (((unsigned long)-1) >> (BITS_PER_ULONG - PG_CACHE_DESCR_SHIFT))
+
+/*
+ * Page cache descriptor state bits (works for both 32-bit and 64-bit architectures):
+ *
+ * 63 ... 31 ... 3 | 2 | 1 | 0|
+ * -----------------------------+------------+------------+-----------|
+ * number of descriptor users | DESTROY | LOCKED | ALLOCATED |
+ */
+struct rrdeng_page_descr {
+ uuid_t *id; /* never changes */
+ struct extent_info *extent;
+
+ /* points to ephemeral page cache descriptor if the page resides in the cache */
+ struct page_cache_descr *pg_cache_descr;
+
+ /* Compare-And-Swap target for page cache descriptor allocation algorithm */
+ volatile unsigned long pg_cache_descr_state;
+
+ /* page information */
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
+};
+
+#define PAGE_INFO_SCRATCH_SZ (8)
+struct rrdeng_page_info {
+ uint8_t scratch[PAGE_INFO_SCRATCH_SZ]; /* scratch area to be used by page-cache users */
+
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
+};
+
+/* returns 1 for success, 0 for failure */
+typedef int pg_cache_page_info_filter_t(struct rrdeng_page_descr *);
+
+#define PAGE_CACHE_MAX_PRELOAD_PAGES (256)
+
+/* maps time ranges to pages */
+struct pg_cache_page_index {
+ uuid_t id;
+ /*
+ * care: JudyL_array indices are converted from useconds to seconds to fit in one word in 32-bit architectures
+ * TODO: examine if we want to support better granularity than seconds
+ */
+ Pvoid_t JudyL_array;
+ Word_t page_count;
+ unsigned short writers;
+ uv_rwlock_t lock;
+
+ /*
+ * Only one effective writer, data deletion workqueue.
+ * It's also written during the DB loading phase.
+ */
+ usec_t oldest_time;
+
+ /*
+ * Only one effective writer, data collection thread.
+ * It's also written by the data deletion workqueue when data collection is disabled for this metric.
+ */
+ usec_t latest_time;
+
+ struct pg_cache_page_index *prev;
+};
+
+/* maps UUIDs to page indices */
+struct pg_cache_metrics_index {
+ uv_rwlock_t lock;
+ Pvoid_t JudyHS_array;
+ struct pg_cache_page_index *last_page_index;
+};
+
+/* gathers dirty pages to be written on disk */
+struct pg_cache_committed_page_index {
+ uv_rwlock_t lock;
+
+ Pvoid_t JudyL_array;
+
+ /*
+ * Dirty page correlation ID is a hint. Dirty pages that are correlated should have
+ * a small correlation ID difference. Dirty pages in memory should never have the
+ * same ID at the same time for correctness.
+ */
+ Word_t latest_corr_id;
+
+ unsigned nr_committed_pages;
+};
+
+/*
+ * Gathers populated pages to be evicted.
+ * Relies on page cache descriptors being there as it uses their memory.
+ */
+struct pg_cache_replaceQ {
+ uv_rwlock_t lock; /* LRU lock */
+
+ struct page_cache_descr *head; /* LRU */
+ struct page_cache_descr *tail; /* MRU */
+};
+
+struct page_cache { /* TODO: add statistics */
+ uv_rwlock_t pg_cache_rwlock; /* page cache lock */
+
+ struct pg_cache_metrics_index metrics_index;
+ struct pg_cache_committed_page_index committed_page_index;
+ struct pg_cache_replaceQ replaceQ;
+
+ unsigned page_descriptors;
+ unsigned populated_pages;
+};
+
+extern void pg_cache_wake_up_waiters_unsafe(struct rrdeng_page_descr *descr);
+extern void pg_cache_wake_up_waiters(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+extern void pg_cache_wait_event_unsafe(struct rrdeng_page_descr *descr);
+extern unsigned long pg_cache_wait_event(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+extern void pg_cache_replaceQ_insert(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr);
+extern void pg_cache_replaceQ_delete(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr);
+extern void pg_cache_replaceQ_set_hot(struct rrdengine_instance *ctx,
+ struct rrdeng_page_descr *descr);
+extern struct rrdeng_page_descr *pg_cache_create_descr(void);
+extern int pg_cache_try_get_unsafe(struct rrdeng_page_descr *descr, int exclusive_access);
+extern void pg_cache_put_unsafe(struct rrdeng_page_descr *descr);
+extern void pg_cache_put(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+extern void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
+ struct rrdeng_page_descr *descr);
+extern uint8_t pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr,
+ uint8_t remove_dirty, uint8_t is_exclusive_holder, uuid_t *metric_id);
+extern usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id,
+ usec_t start_time, usec_t end_time);
+extern void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info);
+extern unsigned
+ pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp);
+extern struct rrdeng_page_descr *
+ pg_cache_lookup(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t point_in_time);
+extern struct rrdeng_page_descr *
+ pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time);
+extern struct pg_cache_page_index *create_page_index(uuid_t *id);
+extern void init_page_cache(struct rrdengine_instance *ctx);
+extern void free_page_cache(struct rrdengine_instance *ctx);
+extern void pg_cache_add_new_metric_time(struct pg_cache_page_index *page_index, struct rrdeng_page_descr *descr);
+extern void pg_cache_update_metric_times(struct pg_cache_page_index *page_index);
+extern unsigned long pg_cache_hard_limit(struct rrdengine_instance *ctx);
+extern unsigned long pg_cache_soft_limit(struct rrdengine_instance *ctx);
+extern unsigned long pg_cache_committed_hard_limit(struct rrdengine_instance *ctx);
+
+static inline void
+ pg_cache_atomic_get_pg_info(struct rrdeng_page_descr *descr, usec_t *end_timep, uint32_t *page_lengthp)
+{
+ usec_t end_time, old_end_time;
+ uint32_t page_length;
+
+ if (NULL == descr->extent) {
+ /* this page is currently being modified, get consistent info locklessly */
+ do {
+ end_time = descr->end_time;
+ __sync_synchronize();
+ old_end_time = end_time;
+ page_length = descr->page_length;
+ __sync_synchronize();
+ end_time = descr->end_time;
+ __sync_synchronize();
+ } while ((end_time != old_end_time || (end_time & 1) != 0));
+
+ *end_timep = end_time;
+ *page_lengthp = page_length;
+ } else {
+ *end_timep = descr->end_time;
+ *page_lengthp = descr->page_length;
+ }
+}
+
+/* The caller must hold a reference to the page and must have already set the new data */
+static inline void pg_cache_atomic_set_pg_info(struct rrdeng_page_descr *descr, usec_t end_time, uint32_t page_length)
+{
+ fatal_assert(!(end_time & 1));
+ __sync_synchronize();
+ descr->end_time |= 1; /* mark start of uncertainty period by adding 1 microsecond */
+ __sync_synchronize();
+ descr->page_length = page_length;
+ __sync_synchronize();
+ descr->end_time = end_time; /* mark end of uncertainty period */
+}
+
+#endif /* NETDATA_PAGECACHE_H */
diff --git a/database/engine/rrddiskprotocol.h b/database/engine/rrddiskprotocol.h
new file mode 100644
index 0000000..db47af5
--- /dev/null
+++ b/database/engine/rrddiskprotocol.h
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDDISKPROTOCOL_H
+#define NETDATA_RRDDISKPROTOCOL_H
+
+#define RRDENG_BLOCK_SIZE (4096)
+#define RRDFILE_ALIGNMENT RRDENG_BLOCK_SIZE
+
+#define RRDENG_MAGIC_SZ (32)
+#define RRDENG_DF_MAGIC "netdata-data-file"
+#define RRDENG_JF_MAGIC "netdata-journal-file"
+
+#define RRDENG_VER_SZ (16)
+#define RRDENG_DF_VER "1.0"
+#define RRDENG_JF_VER "1.0"
+
+#define UUID_SZ (16)
+#define CHECKSUM_SZ (4) /* CRC32 */
+
+#define RRD_NO_COMPRESSION (0)
+#define RRD_LZ4 (1)
+
+#define RRDENG_DF_SB_PADDING_SZ (RRDENG_BLOCK_SIZE - (RRDENG_MAGIC_SZ + RRDENG_VER_SZ + sizeof(uint8_t)))
+/*
+ * Data file persistent super-block
+ */
+struct rrdeng_df_sb {
+ char magic_number[RRDENG_MAGIC_SZ];
+ char version[RRDENG_VER_SZ];
+ uint8_t tier;
+ uint8_t padding[RRDENG_DF_SB_PADDING_SZ];
+} __attribute__ ((packed));
+
+/*
+ * Page types
+ */
+#define PAGE_METRICS (0)
+#define PAGE_LOGS (1) /* reserved */
+
+/*
+ * Data file page descriptor
+ */
+struct rrdeng_extent_page_descr {
+ uint8_t type;
+
+ uint8_t uuid[UUID_SZ];
+ uint32_t page_length;
+ uint64_t start_time;
+ uint64_t end_time;
+} __attribute__ ((packed));
+
+/*
+ * Data file extent header
+ */
+struct rrdeng_df_extent_header {
+ uint32_t payload_length;
+ uint8_t compression_algorithm;
+ uint8_t number_of_pages;
+ /* #number_of_pages page descriptors follow */
+ struct rrdeng_extent_page_descr descr[];
+} __attribute__ ((packed));
+
+/*
+ * Data file extent trailer
+ */
+struct rrdeng_df_extent_trailer {
+ uint8_t checksum[CHECKSUM_SZ]; /* CRC32 */
+} __attribute__ ((packed));
+
+#define RRDENG_JF_SB_PADDING_SZ (RRDENG_BLOCK_SIZE - (RRDENG_MAGIC_SZ + RRDENG_VER_SZ))
+/*
+ * Journal file super-block
+ */
+struct rrdeng_jf_sb {
+ char magic_number[RRDENG_MAGIC_SZ];
+ char version[RRDENG_VER_SZ];
+ uint8_t padding[RRDENG_JF_SB_PADDING_SZ];
+} __attribute__ ((packed));
+
+/*
+ * Transaction record types
+ */
+#define STORE_PADDING (0)
+#define STORE_DATA (1)
+#define STORE_LOGS (2) /* reserved */
+
+/*
+ * Journal file transaction record header
+ */
+struct rrdeng_jf_transaction_header {
+ /* when set to STORE_PADDING jump to start of next block */
+ uint8_t type;
+
+ uint32_t reserved; /* reserved for future use */
+ uint64_t id;
+ uint16_t payload_length;
+} __attribute__ ((packed));
+
+/*
+ * Journal file transaction record trailer
+ */
+struct rrdeng_jf_transaction_trailer {
+ uint8_t checksum[CHECKSUM_SZ]; /* CRC32 */
+} __attribute__ ((packed));
+
+/*
+ * Journal file STORE_DATA action
+ */
+struct rrdeng_jf_store_data {
+ /* data file extent information */
+ uint64_t extent_offset;
+ uint32_t extent_size;
+
+ uint8_t number_of_pages;
+ /* #number_of_pages page descriptors follow */
+ struct rrdeng_extent_page_descr descr[];
+} __attribute__ ((packed));
+
+#endif /* NETDATA_RRDDISKPROTOCOL_H */ \ No newline at end of file
diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c
new file mode 100644
index 0000000..43135ff
--- /dev/null
+++ b/database/engine/rrdengine.c
@@ -0,0 +1,1266 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS
+
+#include "rrdengine.h"
+
+rrdeng_stats_t global_io_errors = 0;
+rrdeng_stats_t global_fs_errors = 0;
+rrdeng_stats_t rrdeng_reserved_file_descriptors = 0;
+rrdeng_stats_t global_pg_cache_over_half_dirty_events = 0;
+rrdeng_stats_t global_flushing_pressure_page_deletions = 0;
+
+static void sanity_check(void)
+{
+ /* Magic numbers must fit in the super-blocks */
+ BUILD_BUG_ON(strlen(RRDENG_DF_MAGIC) > RRDENG_MAGIC_SZ);
+ BUILD_BUG_ON(strlen(RRDENG_JF_MAGIC) > RRDENG_MAGIC_SZ);
+
+ /* Version strings must fit in the super-blocks */
+ BUILD_BUG_ON(strlen(RRDENG_DF_VER) > RRDENG_VER_SZ);
+ BUILD_BUG_ON(strlen(RRDENG_JF_VER) > RRDENG_VER_SZ);
+
+ /* Data file super-block cannot be larger than RRDENG_BLOCK_SIZE */
+ BUILD_BUG_ON(RRDENG_DF_SB_PADDING_SZ < 0);
+
+ BUILD_BUG_ON(sizeof(uuid_t) != UUID_SZ); /* check UUID size */
+
+ /* page count must fit in 8 bits */
+ BUILD_BUG_ON(MAX_PAGES_PER_EXTENT > 255);
+
+ /* extent cache count must fit in 32 bits */
+ BUILD_BUG_ON(MAX_CACHED_EXTENTS > 32);
+
+ /* page info scratch space must be able to hold 2 32-bit integers */
+ BUILD_BUG_ON(sizeof(((struct rrdeng_page_info *)0)->scratch) < 2 * sizeof(uint32_t));
+}
+
+/* always inserts into tail */
+static inline void xt_cache_replaceQ_insert(struct rrdengine_worker_config* wc,
+ struct extent_cache_element *xt_cache_elem)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+
+ xt_cache_elem->prev = NULL;
+ xt_cache_elem->next = NULL;
+
+ if (likely(NULL != xt_cache->replaceQ_tail)) {
+ xt_cache_elem->prev = xt_cache->replaceQ_tail;
+ xt_cache->replaceQ_tail->next = xt_cache_elem;
+ }
+ if (unlikely(NULL == xt_cache->replaceQ_head)) {
+ xt_cache->replaceQ_head = xt_cache_elem;
+ }
+ xt_cache->replaceQ_tail = xt_cache_elem;
+}
+
+static inline void xt_cache_replaceQ_delete(struct rrdengine_worker_config* wc,
+ struct extent_cache_element *xt_cache_elem)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *prev, *next;
+
+ prev = xt_cache_elem->prev;
+ next = xt_cache_elem->next;
+
+ if (likely(NULL != prev)) {
+ prev->next = next;
+ }
+ if (likely(NULL != next)) {
+ next->prev = prev;
+ }
+ if (unlikely(xt_cache_elem == xt_cache->replaceQ_head)) {
+ xt_cache->replaceQ_head = next;
+ }
+ if (unlikely(xt_cache_elem == xt_cache->replaceQ_tail)) {
+ xt_cache->replaceQ_tail = prev;
+ }
+ xt_cache_elem->prev = xt_cache_elem->next = NULL;
+}
+
+static inline void xt_cache_replaceQ_set_hot(struct rrdengine_worker_config* wc,
+ struct extent_cache_element *xt_cache_elem)
+{
+ xt_cache_replaceQ_delete(wc, xt_cache_elem);
+ xt_cache_replaceQ_insert(wc, xt_cache_elem);
+}
+
+/* Returns the index of the cached extent if it was successfully inserted in the extent cache, otherwise -1 */
+static int try_insert_into_xt_cache(struct rrdengine_worker_config* wc, struct extent_info *extent)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *xt_cache_elem;
+ unsigned idx;
+ int ret;
+
+ ret = find_first_zero(xt_cache->allocation_bitmap);
+ if (-1 == ret || ret >= MAX_CACHED_EXTENTS) {
+ for (xt_cache_elem = xt_cache->replaceQ_head ; NULL != xt_cache_elem ; xt_cache_elem = xt_cache_elem->next) {
+ idx = xt_cache_elem - xt_cache->extent_array;
+ if (!check_bit(xt_cache->inflight_bitmap, idx)) {
+ xt_cache_replaceQ_delete(wc, xt_cache_elem);
+ break;
+ }
+ }
+ if (NULL == xt_cache_elem)
+ return -1;
+ } else {
+ idx = (unsigned)ret;
+ xt_cache_elem = &xt_cache->extent_array[idx];
+ }
+ xt_cache_elem->extent = extent;
+ xt_cache_elem->fileno = extent->datafile->fileno;
+ xt_cache_elem->inflight_io_descr = NULL;
+ xt_cache_replaceQ_insert(wc, xt_cache_elem);
+ modify_bit(&xt_cache->allocation_bitmap, idx, 1);
+
+ return (int)idx;
+}
+
+/**
+ * Returns 0 if the cached extent was found in the extent cache, 1 otherwise.
+ * Sets *idx to point to the position of the extent inside the cache.
+ **/
+static uint8_t lookup_in_xt_cache(struct rrdengine_worker_config* wc, struct extent_info *extent, unsigned *idx)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *xt_cache_elem;
+ unsigned i;
+
+ for (i = 0 ; i < MAX_CACHED_EXTENTS ; ++i) {
+ xt_cache_elem = &xt_cache->extent_array[i];
+ if (check_bit(xt_cache->allocation_bitmap, i) && xt_cache_elem->extent == extent &&
+ xt_cache_elem->fileno == extent->datafile->fileno) {
+ *idx = i;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+#if 0 /* disabled code */
+static void delete_from_xt_cache(struct rrdengine_worker_config* wc, unsigned idx)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *xt_cache_elem;
+
+ xt_cache_elem = &xt_cache->extent_array[idx];
+ xt_cache_replaceQ_delete(wc, xt_cache_elem);
+ xt_cache_elem->extent = NULL;
+ modify_bit(&wc->xt_cache.allocation_bitmap, idx, 0); /* invalidate it */
+ modify_bit(&wc->xt_cache.inflight_bitmap, idx, 0); /* not in-flight anymore */
+}
+#endif
+
+void enqueue_inflight_read_to_xt_cache(struct rrdengine_worker_config* wc, unsigned idx,
+ struct extent_io_descriptor *xt_io_descr)
+{
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *xt_cache_elem;
+ struct extent_io_descriptor *old_next;
+
+ xt_cache_elem = &xt_cache->extent_array[idx];
+ old_next = xt_cache_elem->inflight_io_descr->next;
+ xt_cache_elem->inflight_io_descr->next = xt_io_descr;
+ xt_io_descr->next = old_next;
+}
+
+void read_cached_extent_cb(struct rrdengine_worker_config* wc, unsigned idx, struct extent_io_descriptor *xt_io_descr)
+{
+ unsigned i, j, page_offset;
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+ void *page;
+ struct extent_info *extent = xt_io_descr->descr_array[0]->extent;
+
+ for (i = 0 ; i < xt_io_descr->descr_count; ++i) {
+ page = mallocz(RRDENG_BLOCK_SIZE);
+ descr = xt_io_descr->descr_array[i];
+ for (j = 0, page_offset = 0 ; j < extent->number_of_pages ; ++j) {
+ /* care, we don't hold the descriptor mutex */
+ if (!uuid_compare(*extent->pages[j]->id, *descr->id) &&
+ extent->pages[j]->page_length == descr->page_length &&
+ extent->pages[j]->start_time == descr->start_time &&
+ extent->pages[j]->end_time == descr->end_time) {
+ break;
+ }
+ page_offset += extent->pages[j]->page_length;
+
+ }
+ /* care, we don't hold the descriptor mutex */
+ (void) memcpy(page, wc->xt_cache.extent_array[idx].pages + page_offset, descr->page_length);
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ pg_cache_descr->page = page;
+ pg_cache_descr->flags |= RRD_PAGE_POPULATED;
+ pg_cache_descr->flags &= ~RRD_PAGE_READ_PENDING;
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ pg_cache_replaceQ_insert(ctx, descr);
+ if (xt_io_descr->release_descr) {
+ pg_cache_put(ctx, descr);
+ } else {
+ debug(D_RRDENGINE, "%s: Waking up waiters.", __func__);
+ pg_cache_wake_up_waiters(ctx, descr);
+ }
+ }
+ if (xt_io_descr->completion)
+ complete(xt_io_descr->completion);
+ freez(xt_io_descr);
+}
+
+void read_extent_cb(uv_fs_t* req)
+{
+ struct rrdengine_worker_config* wc = req->loop->data;
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct extent_io_descriptor *xt_io_descr;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+ int ret;
+ unsigned i, j, count;
+ void *page, *uncompressed_buf = NULL;
+ uint32_t payload_length, payload_offset, page_offset, uncompressed_payload_length = 0;
+ uint8_t have_read_error = 0;
+ /* persistent structures */
+ struct rrdeng_df_extent_header *header;
+ struct rrdeng_df_extent_trailer *trailer;
+ uLong crc;
+
+ xt_io_descr = req->data;
+ header = xt_io_descr->buf;
+ payload_length = header->payload_length;
+ count = header->number_of_pages;
+ payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count;
+ trailer = xt_io_descr->buf + xt_io_descr->bytes - sizeof(*trailer);
+
+ if (req->result < 0) {
+ struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile;
+
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ have_read_error = 1;
+ error("%s: uv_fs_read - %s - extent at offset %"PRIu64"(%u) in datafile %u-%u.", __func__,
+ uv_strerror((int)req->result), xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno);
+ goto after_crc_check;
+ }
+ crc = crc32(0L, Z_NULL, 0);
+ crc = crc32(crc, xt_io_descr->buf, xt_io_descr->bytes - sizeof(*trailer));
+ ret = crc32cmp(trailer->checksum, crc);
+#ifdef NETDATA_INTERNAL_CHECKS
+ {
+ struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile;
+ debug(D_RRDENGINE, "%s: Extent at offset %"PRIu64"(%u) was read from datafile %u-%u. CRC32 check: %s", __func__,
+ xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno, ret ? "FAILED" : "SUCCEEDED");
+ }
+#endif
+ if (unlikely(ret)) {
+ struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile;
+
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ have_read_error = 1;
+ error("%s: Extent at offset %"PRIu64"(%u) was read from datafile %u-%u. CRC32 check: FAILED", __func__,
+ xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno);
+ }
+
+after_crc_check:
+ if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) {
+ uncompressed_payload_length = 0;
+ for (i = 0 ; i < count ; ++i) {
+ uncompressed_payload_length += header->descr[i].page_length;
+ }
+ uncompressed_buf = mallocz(uncompressed_payload_length);
+ ret = LZ4_decompress_safe(xt_io_descr->buf + payload_offset, uncompressed_buf,
+ payload_length, uncompressed_payload_length);
+ ctx->stats.before_decompress_bytes += payload_length;
+ ctx->stats.after_decompress_bytes += ret;
+ debug(D_RRDENGINE, "LZ4 decompressed %u bytes to %d bytes.", payload_length, ret);
+ /* care, we don't hold the descriptor mutex */
+ }
+ {
+ uint8_t xt_is_cached = 0;
+ unsigned xt_idx;
+ struct extent_info *extent = xt_io_descr->descr_array[0]->extent;
+
+ xt_is_cached = !lookup_in_xt_cache(wc, extent, &xt_idx);
+ if (xt_is_cached && check_bit(wc->xt_cache.inflight_bitmap, xt_idx)) {
+ struct extent_cache *xt_cache = &wc->xt_cache;
+ struct extent_cache_element *xt_cache_elem = &xt_cache->extent_array[xt_idx];
+ struct extent_io_descriptor *curr, *next;
+
+ if (have_read_error) {
+ memset(xt_cache_elem->pages, 0, sizeof(xt_cache_elem->pages));
+ } else if (RRD_NO_COMPRESSION == header->compression_algorithm) {
+ (void)memcpy(xt_cache_elem->pages, xt_io_descr->buf + payload_offset, payload_length);
+ } else {
+ (void)memcpy(xt_cache_elem->pages, uncompressed_buf, uncompressed_payload_length);
+ }
+ /* complete all connected in-flight read requests */
+ for (curr = xt_cache_elem->inflight_io_descr->next ; curr ; curr = next) {
+ next = curr->next;
+ read_cached_extent_cb(wc, xt_idx, curr);
+ }
+ xt_cache_elem->inflight_io_descr = NULL;
+ modify_bit(&xt_cache->inflight_bitmap, xt_idx, 0); /* not in-flight anymore */
+ }
+ }
+
+ for (i = 0 ; i < xt_io_descr->descr_count; ++i) {
+ page = mallocz(RRDENG_BLOCK_SIZE);
+ descr = xt_io_descr->descr_array[i];
+ for (j = 0, page_offset = 0; j < count; ++j) {
+ /* care, we don't hold the descriptor mutex */
+ if (!uuid_compare(*(uuid_t *) header->descr[j].uuid, *descr->id) &&
+ header->descr[j].page_length == descr->page_length &&
+ header->descr[j].start_time == descr->start_time &&
+ header->descr[j].end_time == descr->end_time) {
+ break;
+ }
+ page_offset += header->descr[j].page_length;
+ }
+ /* care, we don't hold the descriptor mutex */
+ if (have_read_error) {
+ /* Applications should make sure NULL values match 0 as does SN_EMPTY_SLOT */
+ memset(page, 0, descr->page_length);
+ } else if (RRD_NO_COMPRESSION == header->compression_algorithm) {
+ (void) memcpy(page, xt_io_descr->buf + payload_offset + page_offset, descr->page_length);
+ } else {
+ (void) memcpy(page, uncompressed_buf + page_offset, descr->page_length);
+ }
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ pg_cache_descr->page = page;
+ pg_cache_descr->flags |= RRD_PAGE_POPULATED;
+ pg_cache_descr->flags &= ~RRD_PAGE_READ_PENDING;
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ pg_cache_replaceQ_insert(ctx, descr);
+ if (xt_io_descr->release_descr) {
+ pg_cache_put(ctx, descr);
+ } else {
+ debug(D_RRDENGINE, "%s: Waking up waiters.", __func__);
+ pg_cache_wake_up_waiters(ctx, descr);
+ }
+ }
+ if (!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm) {
+ freez(uncompressed_buf);
+ }
+ if (xt_io_descr->completion)
+ complete(xt_io_descr->completion);
+ uv_fs_req_cleanup(req);
+ free(xt_io_descr->buf);
+ freez(xt_io_descr);
+}
+
+
+static void do_read_extent(struct rrdengine_worker_config* wc,
+ struct rrdeng_page_descr **descr,
+ unsigned count,
+ uint8_t release_descr)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct page_cache_descr *pg_cache_descr;
+ int ret;
+ unsigned i, size_bytes, pos, real_io_size;
+// uint32_t payload_length;
+ struct extent_io_descriptor *xt_io_descr;
+ struct rrdengine_datafile *datafile;
+ struct extent_info *extent = descr[0]->extent;
+ uint8_t xt_is_cached = 0, xt_is_inflight = 0;
+ unsigned xt_idx;
+
+ datafile = extent->datafile;
+ pos = extent->offset;
+ size_bytes = extent->size;
+
+ xt_io_descr = callocz(1, sizeof(*xt_io_descr));
+ for (i = 0 ; i < count; ++i) {
+ rrdeng_page_descr_mutex_lock(ctx, descr[i]);
+ pg_cache_descr = descr[i]->pg_cache_descr;
+ pg_cache_descr->flags |= RRD_PAGE_READ_PENDING;
+// payload_length = descr[i]->page_length;
+ rrdeng_page_descr_mutex_unlock(ctx, descr[i]);
+
+ xt_io_descr->descr_array[i] = descr[i];
+ }
+ xt_io_descr->descr_count = count;
+ xt_io_descr->bytes = size_bytes;
+ xt_io_descr->pos = pos;
+ xt_io_descr->req.data = xt_io_descr;
+ xt_io_descr->completion = NULL;
+ /* xt_io_descr->descr_commit_idx_array[0] */
+ xt_io_descr->release_descr = release_descr;
+
+ xt_is_cached = !lookup_in_xt_cache(wc, extent, &xt_idx);
+ if (xt_is_cached) {
+ xt_cache_replaceQ_set_hot(wc, &wc->xt_cache.extent_array[xt_idx]);
+ xt_is_inflight = check_bit(wc->xt_cache.inflight_bitmap, xt_idx);
+ if (xt_is_inflight) {
+ enqueue_inflight_read_to_xt_cache(wc, xt_idx, xt_io_descr);
+ return;
+ }
+ return read_cached_extent_cb(wc, xt_idx, xt_io_descr);
+ } else {
+ ret = try_insert_into_xt_cache(wc, extent);
+ if (-1 != ret) {
+ xt_idx = (unsigned)ret;
+ modify_bit(&wc->xt_cache.inflight_bitmap, xt_idx, 1);
+ wc->xt_cache.extent_array[xt_idx].inflight_io_descr = xt_io_descr;
+ }
+ }
+
+ ret = posix_memalign((void *)&xt_io_descr->buf, RRDFILE_ALIGNMENT, ALIGN_BYTES_CEILING(size_bytes));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ /* freez(xt_io_descr);
+ return;*/
+ }
+ real_io_size = ALIGN_BYTES_CEILING(size_bytes);
+ xt_io_descr->iov = uv_buf_init((void *)xt_io_descr->buf, real_io_size);
+ ret = uv_fs_read(wc->loop, &xt_io_descr->req, datafile->file, &xt_io_descr->iov, 1, pos, read_extent_cb);
+ fatal_assert(-1 != ret);
+ ctx->stats.io_read_bytes += real_io_size;
+ ++ctx->stats.io_read_requests;
+ ctx->stats.io_read_extent_bytes += real_io_size;
+ ++ctx->stats.io_read_extents;
+ ctx->stats.pg_cache_backfills += count;
+}
+
+static void commit_data_extent(struct rrdengine_worker_config* wc, struct extent_io_descriptor *xt_io_descr)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ unsigned count, payload_length, descr_size, size_bytes;
+ void *buf;
+ /* persistent structures */
+ struct rrdeng_df_extent_header *df_header;
+ struct rrdeng_jf_transaction_header *jf_header;
+ struct rrdeng_jf_store_data *jf_metric_data;
+ struct rrdeng_jf_transaction_trailer *jf_trailer;
+ uLong crc;
+
+ df_header = xt_io_descr->buf;
+ count = df_header->number_of_pages;
+ descr_size = sizeof(*jf_metric_data->descr) * count;
+ payload_length = sizeof(*jf_metric_data) + descr_size;
+ size_bytes = sizeof(*jf_header) + payload_length + sizeof(*jf_trailer);
+
+ buf = wal_get_transaction_buffer(wc, size_bytes);
+
+ jf_header = buf;
+ jf_header->type = STORE_DATA;
+ jf_header->reserved = 0;
+ jf_header->id = ctx->commit_log.transaction_id++;
+ jf_header->payload_length = payload_length;
+
+ jf_metric_data = buf + sizeof(*jf_header);
+ jf_metric_data->extent_offset = xt_io_descr->pos;
+ jf_metric_data->extent_size = xt_io_descr->bytes;
+ jf_metric_data->number_of_pages = count;
+ memcpy(jf_metric_data->descr, df_header->descr, descr_size);
+
+ jf_trailer = buf + sizeof(*jf_header) + payload_length;
+ crc = crc32(0L, Z_NULL, 0);
+ crc = crc32(crc, buf, sizeof(*jf_header) + payload_length);
+ crc32set(jf_trailer->checksum, crc);
+}
+
+static void do_commit_transaction(struct rrdengine_worker_config* wc, uint8_t type, void *data)
+{
+ switch (type) {
+ case STORE_DATA:
+ commit_data_extent(wc, (struct extent_io_descriptor *)data);
+ break;
+ default:
+ fatal_assert(type == STORE_DATA);
+ break;
+ }
+}
+
+static void after_invalidate_oldest_committed(struct rrdengine_worker_config* wc)
+{
+ int error;
+
+ error = uv_thread_join(wc->now_invalidating_dirty_pages);
+ if (error) {
+ error("uv_thread_join(): %s", uv_strerror(error));
+ }
+ freez(wc->now_invalidating_dirty_pages);
+ wc->now_invalidating_dirty_pages = NULL;
+ wc->cleanup_thread_invalidating_dirty_pages = 0;
+}
+
+static void invalidate_oldest_committed(void *arg)
+{
+ struct rrdengine_instance *ctx = arg;
+ struct rrdengine_worker_config *wc = &ctx->worker_config;
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ int ret;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+ Pvoid_t *PValue;
+ Word_t Index;
+ unsigned nr_committed_pages;
+
+ do {
+ uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
+ for (Index = 0,
+ PValue = JudyLFirst(pg_cache->committed_page_index.JudyL_array, &Index, PJE0),
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+
+ descr != NULL;
+
+ PValue = JudyLNext(pg_cache->committed_page_index.JudyL_array, &Index, PJE0),
+ descr = unlikely(NULL == PValue) ? NULL : *PValue) {
+ fatal_assert(0 != descr->page_length);
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ if (!(pg_cache_descr->flags & RRD_PAGE_WRITE_PENDING) && pg_cache_try_get_unsafe(descr, 1)) {
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ ret = JudyLDel(&pg_cache->committed_page_index.JudyL_array, Index, PJE0);
+ fatal_assert(1 == ret);
+ break;
+ }
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ }
+ uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
+
+ if (!descr) {
+ info("Failed to invalidate any dirty pages to relieve page cache pressure.");
+
+ goto out;
+ }
+ pg_cache_punch_hole(ctx, descr, 1, 1, NULL);
+
+ uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
+ nr_committed_pages = --pg_cache->committed_page_index.nr_committed_pages;
+ uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
+ rrd_stat_atomic_add(&ctx->stats.flushing_pressure_page_deletions, 1);
+ rrd_stat_atomic_add(&global_flushing_pressure_page_deletions, 1);
+
+ } while (nr_committed_pages >= pg_cache_committed_hard_limit(ctx));
+out:
+ wc->cleanup_thread_invalidating_dirty_pages = 1;
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&wc->async));
+}
+
+void rrdeng_invalidate_oldest_committed(struct rrdengine_worker_config* wc)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned nr_committed_pages;
+ int error;
+
+ if (unlikely(ctx->quiesce != NO_QUIESCE)) /* Shutting down */
+ return;
+
+ uv_rwlock_rdlock(&pg_cache->committed_page_index.lock);
+ nr_committed_pages = pg_cache->committed_page_index.nr_committed_pages;
+ uv_rwlock_rdunlock(&pg_cache->committed_page_index.lock);
+
+ if (nr_committed_pages >= pg_cache_committed_hard_limit(ctx)) {
+ /* delete the oldest page in memory */
+ if (wc->now_invalidating_dirty_pages) {
+ /* already deleting a page */
+ return;
+ }
+ errno = 0;
+ error("Failed to flush dirty buffers quickly enough in dbengine instance \"%s\". "
+ "Metric data are being deleted, please reduce disk load or use a faster disk.", ctx->dbfiles_path);
+
+ wc->now_invalidating_dirty_pages = mallocz(sizeof(*wc->now_invalidating_dirty_pages));
+ wc->cleanup_thread_invalidating_dirty_pages = 0;
+
+ error = uv_thread_create(wc->now_invalidating_dirty_pages, invalidate_oldest_committed, ctx);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ freez(wc->now_invalidating_dirty_pages);
+ wc->now_invalidating_dirty_pages = NULL;
+ }
+ }
+}
+
+void flush_pages_cb(uv_fs_t* req)
+{
+ struct rrdengine_worker_config* wc = req->loop->data;
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct extent_io_descriptor *xt_io_descr;
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+ unsigned i, count;
+
+ xt_io_descr = req->data;
+ if (req->result < 0) {
+ ++ctx->stats.io_errors;
+ rrd_stat_atomic_add(&global_io_errors, 1);
+ error("%s: uv_fs_write: %s", __func__, uv_strerror((int)req->result));
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ {
+ struct rrdengine_datafile *datafile = xt_io_descr->descr_array[0]->extent->datafile;
+ debug(D_RRDENGINE, "%s: Extent at offset %"PRIu64"(%u) was written to datafile %u-%u. Waking up waiters.",
+ __func__, xt_io_descr->pos, xt_io_descr->bytes, datafile->tier, datafile->fileno);
+ }
+#endif
+ count = xt_io_descr->descr_count;
+ for (i = 0 ; i < count ; ++i) {
+ /* care, we don't hold the descriptor mutex */
+ descr = xt_io_descr->descr_array[i];
+
+ pg_cache_replaceQ_insert(ctx, descr);
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ pg_cache_descr->flags &= ~(RRD_PAGE_DIRTY | RRD_PAGE_WRITE_PENDING);
+ /* wake up waiters, care no reference being held */
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ }
+ if (xt_io_descr->completion)
+ complete(xt_io_descr->completion);
+ uv_fs_req_cleanup(req);
+ free(xt_io_descr->buf);
+ freez(xt_io_descr);
+
+ uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
+ pg_cache->committed_page_index.nr_committed_pages -= count;
+ uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
+ wc->inflight_dirty_pages -= count;
+}
+
+/*
+ * completion must be NULL or valid.
+ * Returns 0 when no flushing can take place.
+ * Returns datafile bytes to be written on successful flushing initiation.
+ */
+static int do_flush_pages(struct rrdengine_worker_config* wc, int force, struct completion *completion)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ int ret;
+ int compressed_size, max_compressed_size = 0;
+ unsigned i, count, size_bytes, pos, real_io_size;
+ uint32_t uncompressed_payload_length, payload_offset;
+ struct rrdeng_page_descr *descr, *eligible_pages[MAX_PAGES_PER_EXTENT];
+ struct page_cache_descr *pg_cache_descr;
+ struct extent_io_descriptor *xt_io_descr;
+ void *compressed_buf = NULL;
+ Word_t descr_commit_idx_array[MAX_PAGES_PER_EXTENT];
+ Pvoid_t *PValue;
+ Word_t Index;
+ uint8_t compression_algorithm = ctx->global_compress_alg;
+ struct extent_info *extent;
+ struct rrdengine_datafile *datafile;
+ /* persistent structures */
+ struct rrdeng_df_extent_header *header;
+ struct rrdeng_df_extent_trailer *trailer;
+ uLong crc;
+
+ if (force) {
+ debug(D_RRDENGINE, "Asynchronous flushing of extent has been forced by page pressure.");
+ }
+ uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
+ for (Index = 0, count = 0, uncompressed_payload_length = 0,
+ PValue = JudyLFirst(pg_cache->committed_page_index.JudyL_array, &Index, PJE0),
+ descr = unlikely(NULL == PValue) ? NULL : *PValue ;
+
+ descr != NULL && count != MAX_PAGES_PER_EXTENT ;
+
+ PValue = JudyLNext(pg_cache->committed_page_index.JudyL_array, &Index, PJE0),
+ descr = unlikely(NULL == PValue) ? NULL : *PValue) {
+ uint8_t page_write_pending;
+
+ fatal_assert(0 != descr->page_length);
+ page_write_pending = 0;
+
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ if (!(pg_cache_descr->flags & RRD_PAGE_WRITE_PENDING)) {
+ page_write_pending = 1;
+ /* care, no reference being held */
+ pg_cache_descr->flags |= RRD_PAGE_WRITE_PENDING;
+ uncompressed_payload_length += descr->page_length;
+ descr_commit_idx_array[count] = Index;
+ eligible_pages[count++] = descr;
+ }
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ if (page_write_pending) {
+ ret = JudyLDel(&pg_cache->committed_page_index.JudyL_array, Index, PJE0);
+ fatal_assert(1 == ret);
+ }
+ }
+ uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
+
+ if (!count) {
+ debug(D_RRDENGINE, "%s: no pages eligible for flushing.", __func__);
+ if (completion)
+ complete(completion);
+ return 0;
+ }
+ wc->inflight_dirty_pages += count;
+
+ xt_io_descr = mallocz(sizeof(*xt_io_descr));
+ payload_offset = sizeof(*header) + count * sizeof(header->descr[0]);
+ switch (compression_algorithm) {
+ case RRD_NO_COMPRESSION:
+ size_bytes = payload_offset + uncompressed_payload_length + sizeof(*trailer);
+ break;
+ default: /* Compress */
+ fatal_assert(uncompressed_payload_length < LZ4_MAX_INPUT_SIZE);
+ max_compressed_size = LZ4_compressBound(uncompressed_payload_length);
+ compressed_buf = mallocz(max_compressed_size);
+ size_bytes = payload_offset + MAX(uncompressed_payload_length, (unsigned)max_compressed_size) + sizeof(*trailer);
+ break;
+ }
+ ret = posix_memalign((void *)&xt_io_descr->buf, RRDFILE_ALIGNMENT, ALIGN_BYTES_CEILING(size_bytes));
+ if (unlikely(ret)) {
+ fatal("posix_memalign:%s", strerror(ret));
+ /* freez(xt_io_descr);*/
+ }
+ (void) memcpy(xt_io_descr->descr_array, eligible_pages, sizeof(struct rrdeng_page_descr *) * count);
+ xt_io_descr->descr_count = count;
+
+ pos = 0;
+ header = xt_io_descr->buf;
+ header->compression_algorithm = compression_algorithm;
+ header->number_of_pages = count;
+ pos += sizeof(*header);
+
+ extent = mallocz(sizeof(*extent) + count * sizeof(extent->pages[0]));
+ datafile = ctx->datafiles.last; /* TODO: check for exceeded size quota */
+ extent->offset = datafile->pos;
+ extent->number_of_pages = count;
+ extent->datafile = datafile;
+ extent->next = NULL;
+
+ for (i = 0 ; i < count ; ++i) {
+ /* This is here for performance reasons */
+ xt_io_descr->descr_commit_idx_array[i] = descr_commit_idx_array[i];
+
+ descr = xt_io_descr->descr_array[i];
+ header->descr[i].type = PAGE_METRICS;
+ uuid_copy(*(uuid_t *)header->descr[i].uuid, *descr->id);
+ header->descr[i].page_length = descr->page_length;
+ header->descr[i].start_time = descr->start_time;
+ header->descr[i].end_time = descr->end_time;
+ pos += sizeof(header->descr[i]);
+ }
+ for (i = 0 ; i < count ; ++i) {
+ descr = xt_io_descr->descr_array[i];
+ /* care, we don't hold the descriptor mutex */
+ (void) memcpy(xt_io_descr->buf + pos, descr->pg_cache_descr->page, descr->page_length);
+ descr->extent = extent;
+ extent->pages[i] = descr;
+
+ pos += descr->page_length;
+ }
+ df_extent_insert(extent);
+
+ switch (compression_algorithm) {
+ case RRD_NO_COMPRESSION:
+ header->payload_length = uncompressed_payload_length;
+ break;
+ default: /* Compress */
+ compressed_size = LZ4_compress_default(xt_io_descr->buf + payload_offset, compressed_buf,
+ uncompressed_payload_length, max_compressed_size);
+ ctx->stats.before_compress_bytes += uncompressed_payload_length;
+ ctx->stats.after_compress_bytes += compressed_size;
+ debug(D_RRDENGINE, "LZ4 compressed %"PRIu32" bytes to %d bytes.", uncompressed_payload_length, compressed_size);
+ (void) memcpy(xt_io_descr->buf + payload_offset, compressed_buf, compressed_size);
+ freez(compressed_buf);
+ size_bytes = payload_offset + compressed_size + sizeof(*trailer);
+ header->payload_length = compressed_size;
+ break;
+ }
+ extent->size = size_bytes;
+ xt_io_descr->bytes = size_bytes;
+ xt_io_descr->pos = datafile->pos;
+ xt_io_descr->req.data = xt_io_descr;
+ xt_io_descr->completion = completion;
+
+ trailer = xt_io_descr->buf + size_bytes - sizeof(*trailer);
+ crc = crc32(0L, Z_NULL, 0);
+ crc = crc32(crc, xt_io_descr->buf, size_bytes - sizeof(*trailer));
+ crc32set(trailer->checksum, crc);
+
+ real_io_size = ALIGN_BYTES_CEILING(size_bytes);
+ xt_io_descr->iov = uv_buf_init((void *)xt_io_descr->buf, real_io_size);
+ ret = uv_fs_write(wc->loop, &xt_io_descr->req, datafile->file, &xt_io_descr->iov, 1, datafile->pos, flush_pages_cb);
+ fatal_assert(-1 != ret);
+ ctx->stats.io_write_bytes += real_io_size;
+ ++ctx->stats.io_write_requests;
+ ctx->stats.io_write_extent_bytes += real_io_size;
+ ++ctx->stats.io_write_extents;
+ do_commit_transaction(wc, STORE_DATA, xt_io_descr);
+ datafile->pos += ALIGN_BYTES_CEILING(size_bytes);
+ ctx->disk_space += ALIGN_BYTES_CEILING(size_bytes);
+ rrdeng_test_quota(wc);
+
+ return ALIGN_BYTES_CEILING(size_bytes);
+}
+
+static void after_delete_old_data(struct rrdengine_worker_config* wc)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct rrdengine_datafile *datafile;
+ struct rrdengine_journalfile *journalfile;
+ unsigned deleted_bytes, journalfile_bytes, datafile_bytes;
+ int ret, error;
+ char path[RRDENG_PATH_MAX];
+
+ datafile = ctx->datafiles.first;
+ journalfile = datafile->journalfile;
+ datafile_bytes = datafile->pos;
+ journalfile_bytes = journalfile->pos;
+ deleted_bytes = 0;
+
+ info("Deleting data and journal file pair.");
+ datafile_list_delete(ctx, datafile);
+ ret = destroy_journal_file(journalfile, datafile);
+ if (!ret) {
+ generate_journalfilepath(datafile, path, sizeof(path));
+ info("Deleted journal file \"%s\".", path);
+ deleted_bytes += journalfile_bytes;
+ }
+ ret = destroy_data_file(datafile);
+ if (!ret) {
+ generate_datafilepath(datafile, path, sizeof(path));
+ info("Deleted data file \"%s\".", path);
+ deleted_bytes += datafile_bytes;
+ }
+ freez(journalfile);
+ freez(datafile);
+
+ ctx->disk_space -= deleted_bytes;
+ info("Reclaimed %u bytes of disk space.", deleted_bytes);
+
+ error = uv_thread_join(wc->now_deleting_files);
+ if (error) {
+ error("uv_thread_join(): %s", uv_strerror(error));
+ }
+ freez(wc->now_deleting_files);
+ /* unfreeze command processing */
+ wc->now_deleting_files = NULL;
+
+ wc->cleanup_thread_deleting_files = 0;
+
+ /* interrupt event loop */
+ uv_stop(wc->loop);
+}
+
+static void delete_old_data(void *arg)
+{
+ struct rrdengine_instance *ctx = arg;
+ struct rrdengine_worker_config* wc = &ctx->worker_config;
+ struct rrdengine_datafile *datafile;
+ struct extent_info *extent, *next;
+ struct rrdeng_page_descr *descr;
+ unsigned count, i;
+ uint8_t can_delete_metric;
+ uuid_t metric_id;
+
+ /* Safe to use since it will be deleted after we are done */
+ datafile = ctx->datafiles.first;
+
+ for (extent = datafile->extents.first ; extent != NULL ; extent = next) {
+ count = extent->number_of_pages;
+ for (i = 0 ; i < count ; ++i) {
+ descr = extent->pages[i];
+ can_delete_metric = pg_cache_punch_hole(ctx, descr, 0, 0, &metric_id);
+ if (unlikely(can_delete_metric && ctx->metalog_ctx->initialized)) {
+ /*
+ * If the metric is empty, has no active writers and if the metadata log has been initialized then
+ * attempt to delete the corresponding netdata dimension.
+ */
+ metalog_delete_dimension_by_uuid(ctx->metalog_ctx, &metric_id);
+ }
+ }
+ next = extent->next;
+ freez(extent);
+ }
+ wc->cleanup_thread_deleting_files = 1;
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&wc->async));
+}
+
+void rrdeng_test_quota(struct rrdengine_worker_config* wc)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+ struct rrdengine_datafile *datafile;
+ unsigned current_size, target_size;
+ uint8_t out_of_space, only_one_datafile;
+ int ret, error;
+
+ out_of_space = 0;
+ /* Do not allow the pinned pages to exceed the disk space quota to avoid deadlocks */
+ if (unlikely(ctx->disk_space > MAX(ctx->max_disk_space, 2 * ctx->metric_API_max_producers * RRDENG_BLOCK_SIZE))) {
+ out_of_space = 1;
+ }
+ datafile = ctx->datafiles.last;
+ current_size = datafile->pos;
+ target_size = ctx->max_disk_space / TARGET_DATAFILES;
+ target_size = MIN(target_size, MAX_DATAFILE_SIZE);
+ target_size = MAX(target_size, MIN_DATAFILE_SIZE);
+ only_one_datafile = (datafile == ctx->datafiles.first) ? 1 : 0;
+ if (unlikely(current_size >= target_size || (out_of_space && only_one_datafile))) {
+ /* Finalize data and journal file and create a new pair */
+ wal_flush_transaction_buffer(wc);
+ ret = create_new_datafile_pair(ctx, 1, ctx->last_fileno + 1);
+ if (likely(!ret)) {
+ ++ctx->last_fileno;
+ }
+ }
+ if (unlikely(out_of_space && NO_QUIESCE == ctx->quiesce)) {
+ /* delete old data */
+ if (wc->now_deleting_files) {
+ /* already deleting data */
+ return;
+ }
+ if (NULL == ctx->datafiles.first->next) {
+ error("Cannot delete data file \"%s/"DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION"\""
+ " to reclaim space, there are no other file pairs left.",
+ ctx->dbfiles_path, ctx->datafiles.first->tier, ctx->datafiles.first->fileno);
+ return;
+ }
+ info("Deleting data file \"%s/"DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION"\".",
+ ctx->dbfiles_path, ctx->datafiles.first->tier, ctx->datafiles.first->fileno);
+ wc->now_deleting_files = mallocz(sizeof(*wc->now_deleting_files));
+ wc->cleanup_thread_deleting_files = 0;
+
+ error = uv_thread_create(wc->now_deleting_files, delete_old_data, ctx);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ freez(wc->now_deleting_files);
+ wc->now_deleting_files = NULL;
+ }
+ }
+}
+
+static inline int rrdeng_threads_alive(struct rrdengine_worker_config* wc)
+{
+ if (wc->now_invalidating_dirty_pages || wc->now_deleting_files) {
+ return 1;
+ }
+ return 0;
+}
+
+static void rrdeng_cleanup_finished_threads(struct rrdengine_worker_config* wc)
+{
+ struct rrdengine_instance *ctx = wc->ctx;
+
+ if (unlikely(wc->cleanup_thread_invalidating_dirty_pages)) {
+ after_invalidate_oldest_committed(wc);
+ }
+ if (unlikely(wc->cleanup_thread_deleting_files)) {
+ after_delete_old_data(wc);
+ }
+ if (unlikely(SET_QUIESCE == ctx->quiesce && !rrdeng_threads_alive(wc))) {
+ ctx->quiesce = QUIESCED;
+ complete(&ctx->rrdengine_completion);
+ }
+}
+
+/* return 0 on success */
+int init_rrd_files(struct rrdengine_instance *ctx)
+{
+ return init_data_files(ctx);
+}
+
+void finalize_rrd_files(struct rrdengine_instance *ctx)
+{
+ return finalize_data_files(ctx);
+}
+
+void rrdeng_init_cmd_queue(struct rrdengine_worker_config* wc)
+{
+ wc->cmd_queue.head = wc->cmd_queue.tail = 0;
+ wc->queue_size = 0;
+ fatal_assert(0 == uv_cond_init(&wc->cmd_cond));
+ fatal_assert(0 == uv_mutex_init(&wc->cmd_mutex));
+}
+
+void rrdeng_enq_cmd(struct rrdengine_worker_config* wc, struct rrdeng_cmd *cmd)
+{
+ unsigned queue_size;
+
+ /* wait for free space in queue */
+ uv_mutex_lock(&wc->cmd_mutex);
+ while ((queue_size = wc->queue_size) == RRDENG_CMD_Q_MAX_SIZE) {
+ uv_cond_wait(&wc->cmd_cond, &wc->cmd_mutex);
+ }
+ fatal_assert(queue_size < RRDENG_CMD_Q_MAX_SIZE);
+ /* enqueue command */
+ wc->cmd_queue.cmd_array[wc->cmd_queue.tail] = *cmd;
+ wc->cmd_queue.tail = wc->cmd_queue.tail != RRDENG_CMD_Q_MAX_SIZE - 1 ?
+ wc->cmd_queue.tail + 1 : 0;
+ wc->queue_size = queue_size + 1;
+ uv_mutex_unlock(&wc->cmd_mutex);
+
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&wc->async));
+}
+
+struct rrdeng_cmd rrdeng_deq_cmd(struct rrdengine_worker_config* wc)
+{
+ struct rrdeng_cmd ret;
+ unsigned queue_size;
+
+ uv_mutex_lock(&wc->cmd_mutex);
+ queue_size = wc->queue_size;
+ if (queue_size == 0) {
+ ret.opcode = RRDENG_NOOP;
+ } else {
+ /* dequeue command */
+ ret = wc->cmd_queue.cmd_array[wc->cmd_queue.head];
+ if (queue_size == 1) {
+ wc->cmd_queue.head = wc->cmd_queue.tail = 0;
+ } else {
+ wc->cmd_queue.head = wc->cmd_queue.head != RRDENG_CMD_Q_MAX_SIZE - 1 ?
+ wc->cmd_queue.head + 1 : 0;
+ }
+ wc->queue_size = queue_size - 1;
+
+ /* wake up producers */
+ uv_cond_signal(&wc->cmd_cond);
+ }
+ uv_mutex_unlock(&wc->cmd_mutex);
+
+ return ret;
+}
+
+void async_cb(uv_async_t *handle)
+{
+ uv_stop(handle->loop);
+ uv_update_time(handle->loop);
+ debug(D_RRDENGINE, "%s called, active=%d.", __func__, uv_is_active((uv_handle_t *)handle));
+}
+
+/* Flushes dirty pages when timer expires */
+#define TIMER_PERIOD_MS (1000)
+
+void timer_cb(uv_timer_t* handle)
+{
+ struct rrdengine_worker_config* wc = handle->data;
+ struct rrdengine_instance *ctx = wc->ctx;
+
+ uv_stop(handle->loop);
+ uv_update_time(handle->loop);
+ if (unlikely(!ctx->metalog_ctx->initialized))
+ return; /* Wait for the metadata log to initialize */
+ rrdeng_test_quota(wc);
+ debug(D_RRDENGINE, "%s: timeout reached.", __func__);
+ if (likely(!wc->now_deleting_files && !wc->now_invalidating_dirty_pages)) {
+ /* There is free space so we can write to disk and we are not actively deleting dirty buffers */
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ unsigned long total_bytes, bytes_written, nr_committed_pages, bytes_to_write = 0, producers, low_watermark,
+ high_watermark;
+
+ uv_rwlock_rdlock(&pg_cache->committed_page_index.lock);
+ nr_committed_pages = pg_cache->committed_page_index.nr_committed_pages;
+ uv_rwlock_rdunlock(&pg_cache->committed_page_index.lock);
+ producers = ctx->metric_API_max_producers;
+ /* are flushable pages more than 25% of the maximum page cache size */
+ high_watermark = (ctx->max_cache_pages * 25LLU) / 100;
+ low_watermark = (ctx->max_cache_pages * 5LLU) / 100; /* 5%, must be smaller than high_watermark */
+
+ /* Flush more pages only if disk can keep up */
+ if (wc->inflight_dirty_pages < high_watermark + producers) {
+ if (nr_committed_pages > producers &&
+ /* committed to be written pages are more than the produced number */
+ nr_committed_pages - producers > high_watermark) {
+ /* Flushing speed must increase to stop page cache from filling with dirty pages */
+ bytes_to_write = (nr_committed_pages - producers - low_watermark) * RRDENG_BLOCK_SIZE;
+ }
+ bytes_to_write = MAX(DATAFILE_IDEAL_IO_SIZE, bytes_to_write);
+
+ debug(D_RRDENGINE, "Flushing pages to disk.");
+ for (total_bytes = bytes_written = do_flush_pages(wc, 0, NULL);
+ bytes_written && (total_bytes < bytes_to_write);
+ total_bytes += bytes_written) {
+ bytes_written = do_flush_pages(wc, 0, NULL);
+ }
+ }
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ {
+ char buf[4096];
+ debug(D_RRDENGINE, "%s", get_rrdeng_statistics(wc->ctx, buf, sizeof(buf)));
+ }
+#endif
+}
+
+#define MAX_CMD_BATCH_SIZE (256)
+
+void rrdeng_worker(void* arg)
+{
+ struct rrdengine_worker_config* wc = arg;
+ struct rrdengine_instance *ctx = wc->ctx;
+ uv_loop_t* loop;
+ int shutdown, ret;
+ enum rrdeng_opcode opcode;
+ uv_timer_t timer_req;
+ struct rrdeng_cmd cmd;
+ unsigned cmd_batch_size;
+
+ rrdeng_init_cmd_queue(wc);
+
+ loop = wc->loop = mallocz(sizeof(uv_loop_t));
+ ret = uv_loop_init(loop);
+ if (ret) {
+ error("uv_loop_init(): %s", uv_strerror(ret));
+ goto error_after_loop_init;
+ }
+ loop->data = wc;
+
+ ret = uv_async_init(wc->loop, &wc->async, async_cb);
+ if (ret) {
+ error("uv_async_init(): %s", uv_strerror(ret));
+ goto error_after_async_init;
+ }
+ wc->async.data = wc;
+
+ wc->now_deleting_files = NULL;
+ wc->cleanup_thread_deleting_files = 0;
+
+ wc->now_invalidating_dirty_pages = NULL;
+ wc->cleanup_thread_invalidating_dirty_pages = 0;
+ wc->inflight_dirty_pages = 0;
+
+ /* dirty page flushing timer */
+ ret = uv_timer_init(loop, &timer_req);
+ if (ret) {
+ error("uv_timer_init(): %s", uv_strerror(ret));
+ goto error_after_timer_init;
+ }
+ timer_req.data = wc;
+
+ wc->error = 0;
+ /* wake up initialization thread */
+ complete(&ctx->rrdengine_completion);
+
+ fatal_assert(0 == uv_timer_start(&timer_req, timer_cb, TIMER_PERIOD_MS, TIMER_PERIOD_MS));
+ shutdown = 0;
+ while (likely(shutdown == 0 || rrdeng_threads_alive(wc))) {
+ uv_run(loop, UV_RUN_DEFAULT);
+ rrdeng_cleanup_finished_threads(wc);
+
+ /* wait for commands */
+ cmd_batch_size = 0;
+ do {
+ /*
+ * Avoid starving the loop when there are too many commands coming in.
+ * timer_cb will interrupt the loop again to allow serving more commands.
+ */
+ if (unlikely(cmd_batch_size >= MAX_CMD_BATCH_SIZE))
+ break;
+
+ cmd = rrdeng_deq_cmd(wc);
+ opcode = cmd.opcode;
+ ++cmd_batch_size;
+
+ switch (opcode) {
+ case RRDENG_NOOP:
+ /* the command queue was empty, do nothing */
+ break;
+ case RRDENG_SHUTDOWN:
+ shutdown = 1;
+ break;
+ case RRDENG_QUIESCE:
+ ctx->drop_metrics_under_page_cache_pressure = 0;
+ ctx->quiesce = SET_QUIESCE;
+ fatal_assert(0 == uv_timer_stop(&timer_req));
+ uv_close((uv_handle_t *)&timer_req, NULL);
+ while (do_flush_pages(wc, 1, NULL)) {
+ ; /* Force flushing of all committed pages. */
+ }
+ wal_flush_transaction_buffer(wc);
+ if (!rrdeng_threads_alive(wc)) {
+ ctx->quiesce = QUIESCED;
+ complete(&ctx->rrdengine_completion);
+ }
+ break;
+ case RRDENG_READ_PAGE:
+ do_read_extent(wc, &cmd.read_page.page_cache_descr, 1, 0);
+ break;
+ case RRDENG_READ_EXTENT:
+ do_read_extent(wc, cmd.read_extent.page_cache_descr, cmd.read_extent.page_count, 1);
+ break;
+ case RRDENG_COMMIT_PAGE:
+ do_commit_transaction(wc, STORE_DATA, NULL);
+ break;
+ case RRDENG_FLUSH_PAGES: {
+ if (wc->now_invalidating_dirty_pages) {
+ /* Do not flush if the disk cannot keep up */
+ complete(cmd.completion);
+ } else {
+ (void)do_flush_pages(wc, 1, cmd.completion);
+ }
+ break;
+ case RRDENG_INVALIDATE_OLDEST_MEMORY_PAGE:
+ rrdeng_invalidate_oldest_committed(wc);
+ break;
+ }
+ default:
+ debug(D_RRDENGINE, "%s: default.", __func__);
+ break;
+ }
+ } while (opcode != RRDENG_NOOP);
+ }
+
+ /* cleanup operations of the event loop */
+ info("Shutting down RRD engine event loop.");
+
+ /*
+ * uv_async_send after uv_close does not seem to crash in linux at the moment,
+ * it is however undocumented behaviour and we need to be aware if this becomes
+ * an issue in the future.
+ */
+ uv_close((uv_handle_t *)&wc->async, NULL);
+
+ while (do_flush_pages(wc, 1, NULL)) {
+ ; /* Force flushing of all committed pages. */
+ }
+ wal_flush_transaction_buffer(wc);
+ uv_run(loop, UV_RUN_DEFAULT);
+
+ info("Shutting down RRD engine event loop complete.");
+ /* TODO: don't let the API block by waiting to enqueue commands */
+ uv_cond_destroy(&wc->cmd_cond);
+/* uv_mutex_destroy(&wc->cmd_mutex); */
+ fatal_assert(0 == uv_loop_close(loop));
+ freez(loop);
+
+ return;
+
+error_after_timer_init:
+ uv_close((uv_handle_t *)&wc->async, NULL);
+error_after_async_init:
+ fatal_assert(0 == uv_loop_close(loop));
+error_after_loop_init:
+ freez(loop);
+
+ wc->error = UV_EAGAIN;
+ /* wake up initialization thread */
+ complete(&ctx->rrdengine_completion);
+}
+
+/* C entry point for development purposes
+ * make "LDFLAGS=-errdengine_main"
+ */
+void rrdengine_main(void)
+{
+ int ret;
+ struct rrdengine_instance *ctx;
+
+ sanity_check();
+ ret = rrdeng_init(NULL, &ctx, "/tmp", RRDENG_MIN_PAGE_CACHE_SIZE_MB, RRDENG_MIN_DISK_SPACE_MB);
+ if (ret) {
+ exit(ret);
+ }
+ rrdeng_exit(ctx);
+ fprintf(stderr, "Hello world!");
+ exit(0);
+}
diff --git a/database/engine/rrdengine.h b/database/engine/rrdengine.h
new file mode 100644
index 0000000..87af04b
--- /dev/null
+++ b/database/engine/rrdengine.h
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDENGINE_H
+#define NETDATA_RRDENGINE_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <fcntl.h>
+#include <lz4.h>
+#include <Judy.h>
+#include <openssl/sha.h>
+#include <openssl/evp.h>
+#include "../../daemon/common.h"
+#include "../rrd.h"
+#include "rrddiskprotocol.h"
+#include "rrdenginelib.h"
+#include "datafile.h"
+#include "journalfile.h"
+#include "metadata_log/metadatalog.h"
+#include "rrdengineapi.h"
+#include "pagecache.h"
+#include "rrdenglocking.h"
+
+#ifdef NETDATA_RRD_INTERNALS
+
+#endif /* NETDATA_RRD_INTERNALS */
+
+/* Forward declerations */
+struct rrdengine_instance;
+
+#define MAX_PAGES_PER_EXTENT (64) /* TODO: can go higher only when journal supports bigger than 4KiB transactions */
+
+#define RRDENG_FILE_NUMBER_SCAN_TMPL "%1u-%10u"
+#define RRDENG_FILE_NUMBER_PRINT_TMPL "%1.1u-%10.10u"
+
+
+typedef enum {
+ RRDENGINE_STATUS_UNINITIALIZED = 0,
+ RRDENGINE_STATUS_INITIALIZING,
+ RRDENGINE_STATUS_INITIALIZED
+} rrdengine_state_t;
+
+enum rrdeng_opcode {
+ /* can be used to return empty status or flush the command queue */
+ RRDENG_NOOP = 0,
+
+ RRDENG_READ_PAGE,
+ RRDENG_READ_EXTENT,
+ RRDENG_COMMIT_PAGE,
+ RRDENG_FLUSH_PAGES,
+ RRDENG_SHUTDOWN,
+ RRDENG_INVALIDATE_OLDEST_MEMORY_PAGE,
+ RRDENG_QUIESCE,
+
+ RRDENG_MAX_OPCODE
+};
+
+struct rrdeng_cmd {
+ enum rrdeng_opcode opcode;
+ union {
+ struct rrdeng_read_page {
+ struct rrdeng_page_descr *page_cache_descr;
+ } read_page;
+ struct rrdeng_read_extent {
+ struct rrdeng_page_descr *page_cache_descr[MAX_PAGES_PER_EXTENT];
+ int page_count;
+ } read_extent;
+ struct completion *completion;
+ };
+};
+
+#define RRDENG_CMD_Q_MAX_SIZE (2048)
+
+struct rrdeng_cmdqueue {
+ unsigned head, tail;
+ struct rrdeng_cmd cmd_array[RRDENG_CMD_Q_MAX_SIZE];
+};
+
+struct extent_io_descriptor {
+ uv_fs_t req;
+ uv_buf_t iov;
+ void *buf;
+ uint64_t pos;
+ unsigned bytes;
+ struct completion *completion;
+ unsigned descr_count;
+ int release_descr;
+ struct rrdeng_page_descr *descr_array[MAX_PAGES_PER_EXTENT];
+ Word_t descr_commit_idx_array[MAX_PAGES_PER_EXTENT];
+ struct extent_io_descriptor *next; /* multiple requests to be served by the same cached extent */
+};
+
+struct generic_io_descriptor {
+ uv_fs_t req;
+ uv_buf_t iov;
+ void *buf;
+ uint64_t pos;
+ unsigned bytes;
+ struct completion *completion;
+};
+
+struct extent_cache_element {
+ struct extent_info *extent; /* The ABA problem is avoided with the help of fileno below */
+ unsigned fileno;
+ struct extent_cache_element *prev; /* LRU */
+ struct extent_cache_element *next; /* LRU */
+ struct extent_io_descriptor *inflight_io_descr; /* I/O descriptor for in-flight extent */
+ uint8_t pages[MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE];
+};
+
+#define MAX_CACHED_EXTENTS 16 /* cannot be over 32 to fit in 32-bit architectures */
+
+/* Initialize by setting the structure to zero */
+struct extent_cache {
+ struct extent_cache_element extent_array[MAX_CACHED_EXTENTS];
+ unsigned allocation_bitmap; /* 1 if the corresponding position in the extent_array is allocated */
+ unsigned inflight_bitmap; /* 1 if the corresponding position in the extent_array is waiting for I/O */
+
+ struct extent_cache_element *replaceQ_head; /* LRU */
+ struct extent_cache_element *replaceQ_tail; /* MRU */
+};
+
+struct rrdengine_worker_config {
+ struct rrdengine_instance *ctx;
+
+ uv_thread_t thread;
+ uv_loop_t* loop;
+ uv_async_t async;
+
+ /* file deletion thread */
+ uv_thread_t *now_deleting_files;
+ unsigned long cleanup_thread_deleting_files; /* set to 0 when now_deleting_files is still running */
+
+ /* dirty page deletion thread */
+ uv_thread_t *now_invalidating_dirty_pages;
+ /* set to 0 when now_invalidating_dirty_pages is still running */
+ unsigned long cleanup_thread_invalidating_dirty_pages;
+ unsigned inflight_dirty_pages;
+
+ /* FIFO command queue */
+ uv_mutex_t cmd_mutex;
+ uv_cond_t cmd_cond;
+ volatile unsigned queue_size;
+ struct rrdeng_cmdqueue cmd_queue;
+
+ struct extent_cache xt_cache;
+
+ int error;
+};
+
+/*
+ * Debug statistics not used by code logic.
+ * They only describe operations since DB engine instance load time.
+ */
+struct rrdengine_statistics {
+ rrdeng_stats_t metric_API_producers;
+ rrdeng_stats_t metric_API_consumers;
+ rrdeng_stats_t pg_cache_insertions;
+ rrdeng_stats_t pg_cache_deletions;
+ rrdeng_stats_t pg_cache_hits;
+ rrdeng_stats_t pg_cache_misses;
+ rrdeng_stats_t pg_cache_backfills;
+ rrdeng_stats_t pg_cache_evictions;
+ rrdeng_stats_t before_decompress_bytes;
+ rrdeng_stats_t after_decompress_bytes;
+ rrdeng_stats_t before_compress_bytes;
+ rrdeng_stats_t after_compress_bytes;
+ rrdeng_stats_t io_write_bytes;
+ rrdeng_stats_t io_write_requests;
+ rrdeng_stats_t io_read_bytes;
+ rrdeng_stats_t io_read_requests;
+ rrdeng_stats_t io_write_extent_bytes;
+ rrdeng_stats_t io_write_extents;
+ rrdeng_stats_t io_read_extent_bytes;
+ rrdeng_stats_t io_read_extents;
+ rrdeng_stats_t datafile_creations;
+ rrdeng_stats_t datafile_deletions;
+ rrdeng_stats_t journalfile_creations;
+ rrdeng_stats_t journalfile_deletions;
+ rrdeng_stats_t page_cache_descriptors;
+ rrdeng_stats_t io_errors;
+ rrdeng_stats_t fs_errors;
+ rrdeng_stats_t pg_cache_over_half_dirty_events;
+ rrdeng_stats_t flushing_pressure_page_deletions;
+};
+
+/* I/O errors global counter */
+extern rrdeng_stats_t global_io_errors;
+/* File-System errors global counter */
+extern rrdeng_stats_t global_fs_errors;
+/* number of File-Descriptors that have been reserved by dbengine */
+extern rrdeng_stats_t rrdeng_reserved_file_descriptors;
+/* inability to flush global counters */
+extern rrdeng_stats_t global_pg_cache_over_half_dirty_events;
+extern rrdeng_stats_t global_flushing_pressure_page_deletions; /* number of deleted pages */
+
+#define NO_QUIESCE (0) /* initial state when all operations function normally */
+#define SET_QUIESCE (1) /* set it before shutting down the instance, quiesce long running operations */
+#define QUIESCED (2) /* is set after all threads have finished running */
+
+struct rrdengine_instance {
+ struct metalog_instance *metalog_ctx;
+ struct rrdengine_worker_config worker_config;
+ struct completion rrdengine_completion;
+ struct page_cache pg_cache;
+ uint8_t drop_metrics_under_page_cache_pressure; /* boolean */
+ uint8_t global_compress_alg;
+ struct transaction_commit_log commit_log;
+ struct rrdengine_datafile_list datafiles;
+ RRDHOST *host; /* the legacy host, or NULL for multi-host DB */
+ char dbfiles_path[FILENAME_MAX + 1];
+ char machine_guid[GUID_LEN + 1]; /* the unique ID of the corresponding host, or localhost for multihost DB */
+ uint64_t disk_space;
+ uint64_t max_disk_space;
+ unsigned last_fileno; /* newest index of datafile and journalfile */
+ unsigned long max_cache_pages;
+ unsigned long cache_pages_low_watermark;
+ unsigned long metric_API_max_producers;
+
+ uint8_t quiesce; /* set to SET_QUIESCE before shutdown of the engine */
+
+ struct rrdengine_statistics stats;
+};
+
+extern int init_rrd_files(struct rrdengine_instance *ctx);
+extern void finalize_rrd_files(struct rrdengine_instance *ctx);
+extern void rrdeng_test_quota(struct rrdengine_worker_config* wc);
+extern void rrdeng_worker(void* arg);
+extern void rrdeng_enq_cmd(struct rrdengine_worker_config* wc, struct rrdeng_cmd *cmd);
+extern struct rrdeng_cmd rrdeng_deq_cmd(struct rrdengine_worker_config* wc);
+
+#endif /* NETDATA_RRDENGINE_H */ \ No newline at end of file
diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c
new file mode 100755
index 0000000..7b2ff5b
--- /dev/null
+++ b/database/engine/rrdengineapi.c
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include "rrdengine.h"
+
+/* Default global database instance */
+struct rrdengine_instance multidb_ctx;
+
+int default_rrdeng_page_cache_mb = 32;
+int default_rrdeng_disk_quota_mb = 256;
+int default_multidb_disk_quota_mb = 256;
+/* Default behaviour is to unblock data collection if the page cache is full of dirty pages by dropping metrics */
+uint8_t rrdeng_drop_metrics_under_page_cache_pressure = 1;
+
+static inline struct rrdengine_instance *get_rrdeng_ctx_from_host(RRDHOST *host)
+{
+ return host->rrdeng_ctx;
+}
+
+/* This UUID is not unique across hosts */
+void rrdeng_generate_legacy_uuid(const char *dim_id, char *chart_id, uuid_t *ret_uuid)
+{
+ EVP_MD_CTX *evpctx;
+ unsigned char hash_value[EVP_MAX_MD_SIZE];
+ unsigned int hash_len;
+
+ evpctx = EVP_MD_CTX_create();
+ EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL);
+ EVP_DigestUpdate(evpctx, dim_id, strlen(dim_id));
+ EVP_DigestUpdate(evpctx, chart_id, strlen(chart_id));
+ EVP_DigestFinal_ex(evpctx, hash_value, &hash_len);
+ EVP_MD_CTX_destroy(evpctx);
+ fatal_assert(hash_len > sizeof(uuid_t));
+ memcpy(ret_uuid, hash_value, sizeof(uuid_t));
+}
+
+/* Transform legacy UUID to be unique across hosts deterministacally */
+void rrdeng_convert_legacy_uuid_to_multihost(char machine_guid[GUID_LEN + 1], uuid_t *legacy_uuid, uuid_t *ret_uuid)
+{
+ EVP_MD_CTX *evpctx;
+ unsigned char hash_value[EVP_MAX_MD_SIZE];
+ unsigned int hash_len;
+
+ evpctx = EVP_MD_CTX_create();
+ EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL);
+ EVP_DigestUpdate(evpctx, machine_guid, GUID_LEN);
+ EVP_DigestUpdate(evpctx, *legacy_uuid, sizeof(uuid_t));
+ EVP_DigestFinal_ex(evpctx, hash_value, &hash_len);
+ EVP_MD_CTX_destroy(evpctx);
+ fatal_assert(hash_len > sizeof(uuid_t));
+ memcpy(ret_uuid, hash_value, sizeof(uuid_t));
+}
+
+void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid)
+{
+ struct page_cache *pg_cache;
+ struct rrdengine_instance *ctx;
+ uuid_t legacy_uuid;
+ uuid_t multihost_legacy_uuid;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index = NULL;
+ int is_multihost_child = 0;
+ RRDHOST *host = rd->rrdset->rrdhost;
+
+ ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
+ if (unlikely(!ctx)) {
+ error("Failed to fetch multidb context");
+ return;
+ }
+ pg_cache = &ctx->pg_cache;
+
+ rrdeng_generate_legacy_uuid(rd->id, rd->rrdset->id, &legacy_uuid);
+ rd->state->metric_uuid = dim_uuid;
+ if (host != localhost && host->rrdeng_ctx == &multidb_ctx)
+ is_multihost_child = 1;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, &legacy_uuid, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (is_multihost_child || NULL == PValue) {
+ /* First time we see the legacy UUID or metric belongs to child host in multi-host DB.
+ * Drop legacy support, normal path */
+
+ if (unlikely(!rd->state->metric_uuid))
+ rd->state->metric_uuid = create_dimension_uuid(rd->rrdset, rd);
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, rd->state->metric_uuid, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ uv_rwlock_wrlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSIns(&pg_cache->metrics_index.JudyHS_array, rd->state->metric_uuid, sizeof(uuid_t), PJE0);
+ fatal_assert(NULL == *PValue); /* TODO: figure out concurrency model */
+ *PValue = page_index = create_page_index(rd->state->metric_uuid);
+ page_index->prev = pg_cache->metrics_index.last_page_index;
+ pg_cache->metrics_index.last_page_index = page_index;
+ uv_rwlock_wrunlock(&pg_cache->metrics_index.lock);
+ }
+ } else {
+ /* There are legacy UUIDs in the database, implement backward compatibility */
+
+ rrdeng_convert_legacy_uuid_to_multihost(rd->rrdset->rrdhost->machine_guid, &legacy_uuid,
+ &multihost_legacy_uuid);
+
+ if (unlikely(!rd->state->metric_uuid))
+ rd->state->metric_uuid = mallocz(sizeof(uuid_t));
+
+ int need_to_store = (dim_uuid == NULL || uuid_compare(*rd->state->metric_uuid, multihost_legacy_uuid));
+
+ uuid_copy(*rd->state->metric_uuid, multihost_legacy_uuid);
+
+ if (unlikely(need_to_store))
+ (void)sql_store_dimension(rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor,
+ rd->algorithm);
+
+ }
+ rd->state->rrdeng_uuid = &page_index->id;
+ rd->state->page_index = page_index;
+}
+
+/*
+ * Gets a handle for storing metrics to the database.
+ * The handle must be released with rrdeng_store_metric_final().
+ */
+void rrdeng_store_metric_init(RRDDIM *rd)
+{
+ struct rrdeng_collect_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct pg_cache_page_index *page_index;
+
+ ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
+ handle = &rd->state->handle.rrdeng;
+ handle->ctx = ctx;
+
+ handle->descr = NULL;
+ handle->prev_descr = NULL;
+ handle->unaligned_page = 0;
+
+ page_index = rd->state->page_index;
+ uv_rwlock_wrlock(&page_index->lock);
+ ++page_index->writers;
+ uv_rwlock_wrunlock(&page_index->lock);
+}
+
+/* The page must be populated and referenced */
+static int page_has_only_empty_metrics(struct rrdeng_page_descr *descr)
+{
+ unsigned i;
+ uint8_t has_only_empty_metrics = 1;
+ storage_number *page;
+
+ page = descr->pg_cache_descr->page;
+ for (i = 0 ; i < descr->page_length / sizeof(storage_number); ++i) {
+ if (SN_EMPTY_SLOT != page[i]) {
+ has_only_empty_metrics = 0;
+ break;
+ }
+ }
+ return has_only_empty_metrics;
+}
+
+void rrdeng_store_metric_flush_current_page(RRDDIM *rd)
+{
+ struct rrdeng_collect_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct rrdeng_page_descr *descr;
+
+ handle = &rd->state->handle.rrdeng;
+ ctx = handle->ctx;
+ if (unlikely(!ctx))
+ return;
+ descr = handle->descr;
+ if (unlikely(NULL == descr)) {
+ return;
+ }
+ if (likely(descr->page_length)) {
+ int page_is_empty;
+
+ rrd_stat_atomic_add(&ctx->stats.metric_API_producers, -1);
+
+ if (handle->prev_descr) {
+ /* unpin old second page */
+ pg_cache_put(ctx, handle->prev_descr);
+ }
+ page_is_empty = page_has_only_empty_metrics(descr);
+ if (page_is_empty) {
+ debug(D_RRDENGINE, "Page has empty metrics only, deleting:");
+ if (unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ pg_cache_put(ctx, descr);
+ pg_cache_punch_hole(ctx, descr, 1, 0, NULL);
+ handle->prev_descr = NULL;
+ } else {
+ /*
+ * Disable pinning for now as it leads to deadlocks. When a collector stops collecting the extra pinned page
+ * eventually gets rotated but it cannot be destroyed due to the extra reference.
+ */
+ /* added 1 extra reference to keep 2 dirty pages pinned per metric, expected refcnt = 2 */
+/* rrdeng_page_descr_mutex_lock(ctx, descr);
+ ret = pg_cache_try_get_unsafe(descr, 0);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ fatal_assert(1 == ret);*/
+
+ rrdeng_commit_page(ctx, descr, handle->page_correlation_id);
+ /* handle->prev_descr = descr;*/
+ }
+ } else {
+ freez(descr->pg_cache_descr->page);
+ rrdeng_destroy_pg_cache_descr(ctx, descr->pg_cache_descr);
+ freez(descr);
+ }
+ handle->descr = NULL;
+}
+
+void rrdeng_store_metric_next(RRDDIM *rd, usec_t point_in_time, storage_number number)
+{
+ struct rrdeng_collect_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct page_cache *pg_cache;
+ struct rrdeng_page_descr *descr;
+ storage_number *page;
+ uint8_t must_flush_unaligned_page = 0, perfect_page_alignment = 0;
+
+ handle = &rd->state->handle.rrdeng;
+ ctx = handle->ctx;
+ pg_cache = &ctx->pg_cache;
+ descr = handle->descr;
+
+ if (descr) {
+ /* Make alignment decisions */
+
+ if (descr->page_length == rd->rrdset->rrddim_page_alignment) {
+ /* this is the leading dimension that defines chart alignment */
+ perfect_page_alignment = 1;
+ }
+ /* is the metric far enough out of alignment with the others? */
+ if (unlikely(descr->page_length + sizeof(number) < rd->rrdset->rrddim_page_alignment)) {
+ handle->unaligned_page = 1;
+ debug(D_RRDENGINE, "Metric page is not aligned with chart:");
+ if (unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ }
+ if (unlikely(handle->unaligned_page &&
+ /* did the other metrics change page? */
+ rd->rrdset->rrddim_page_alignment <= sizeof(number))) {
+ debug(D_RRDENGINE, "Flushing unaligned metric page.");
+ must_flush_unaligned_page = 1;
+ handle->unaligned_page = 0;
+ }
+ }
+ if (unlikely(NULL == descr ||
+ descr->page_length + sizeof(number) > RRDENG_BLOCK_SIZE ||
+ must_flush_unaligned_page)) {
+ rrdeng_store_metric_flush_current_page(rd);
+
+ page = rrdeng_create_page(ctx, &rd->state->page_index->id, &descr);
+ fatal_assert(page);
+
+ handle->descr = descr;
+
+ handle->page_correlation_id = rrd_atomic_fetch_add(&pg_cache->committed_page_index.latest_corr_id, 1);
+
+ if (0 == rd->rrdset->rrddim_page_alignment) {
+ /* this is the leading dimension that defines chart alignment */
+ perfect_page_alignment = 1;
+ }
+ }
+ page = descr->pg_cache_descr->page;
+ page[descr->page_length / sizeof(number)] = number;
+ pg_cache_atomic_set_pg_info(descr, point_in_time, descr->page_length + sizeof(number));
+
+ if (perfect_page_alignment)
+ rd->rrdset->rrddim_page_alignment = descr->page_length;
+ if (unlikely(INVALID_TIME == descr->start_time)) {
+ unsigned long new_metric_API_producers, old_metric_API_max_producers, ret_metric_API_max_producers;
+ descr->start_time = point_in_time;
+
+ new_metric_API_producers = rrd_atomic_add_fetch(&ctx->stats.metric_API_producers, 1);
+ while (unlikely(new_metric_API_producers > (old_metric_API_max_producers = ctx->metric_API_max_producers))) {
+ /* Increase ctx->metric_API_max_producers */
+ ret_metric_API_max_producers = ulong_compare_and_swap(&ctx->metric_API_max_producers,
+ old_metric_API_max_producers,
+ new_metric_API_producers);
+ if (old_metric_API_max_producers == ret_metric_API_max_producers) {
+ /* success */
+ break;
+ }
+ }
+
+ pg_cache_insert(ctx, rd->state->page_index, descr);
+ } else {
+ pg_cache_add_new_metric_time(rd->state->page_index, descr);
+ }
+}
+
+/*
+ * Releases the database reference from the handle for storing metrics.
+ * Returns 1 if it's safe to delete the dimension.
+ */
+int rrdeng_store_metric_finalize(RRDDIM *rd)
+{
+ struct rrdeng_collect_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct pg_cache_page_index *page_index;
+ uint8_t can_delete_metric = 0;
+
+ handle = &rd->state->handle.rrdeng;
+ ctx = handle->ctx;
+ page_index = rd->state->page_index;
+ rrdeng_store_metric_flush_current_page(rd);
+ if (handle->prev_descr) {
+ /* unpin old second page */
+ pg_cache_put(ctx, handle->prev_descr);
+ }
+ uv_rwlock_wrlock(&page_index->lock);
+ if (!--page_index->writers && !page_index->page_count) {
+ can_delete_metric = 1;
+ }
+ uv_rwlock_wrunlock(&page_index->lock);
+
+ return can_delete_metric;
+}
+
+/* Returns 1 if the data collection interval is well defined, 0 otherwise */
+static int metrics_with_known_interval(struct rrdeng_page_descr *descr)
+{
+ unsigned page_entries;
+
+ if (unlikely(INVALID_TIME == descr->start_time || INVALID_TIME == descr->end_time))
+ return 0;
+ page_entries = descr->page_length / sizeof(storage_number);
+ if (likely(page_entries > 1)) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline uint32_t *pginfo_to_dt(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[0];
+}
+
+static inline uint32_t *pginfo_to_points(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[sizeof(uint32_t)];
+}
+
+/**
+ * Calculates the regions of different data collection intervals in a netdata chart in the time range
+ * [start_time,end_time]. This call takes the netdata chart read lock.
+ * @param st the netdata chart whose data collection interval boundaries are calculated.
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param region_info_arrayp It allocates (*region_info_arrayp) and populates it with information of regions of a
+ * reference dimension that that have different data collection intervals and overlap with the time range
+ * [start_time,end_time]. The caller must free (*region_info_arrayp) with freez(). If region_info_arrayp is set
+ * to NULL nothing was allocated.
+ * @param max_intervalp is derefenced and set to be the largest data collection interval of all regions.
+ * @return number of regions with different data collection intervals.
+ */
+unsigned rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
+ struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp, struct context_param *context_param_list)
+{
+ struct pg_cache_page_index *page_index;
+ struct rrdengine_instance *ctx;
+ unsigned pages_nr;
+ RRDDIM *rd_iter, *rd;
+ struct rrdeng_page_info *page_info_array, *curr, *prev, *old_prev;
+ unsigned i, j, page_entries, region_points, page_points, regions, max_interval;
+ time_t now;
+ usec_t dt, current_position_time, max_time = 0, min_time, curr_time, first_valid_time_in_page;
+ struct rrdeng_region_info *region_info_array;
+ uint8_t is_first_region_initialized;
+
+ ctx = get_rrdeng_ctx_from_host(st->rrdhost);
+ regions = 1;
+ *max_intervalp = max_interval = 0;
+ region_info_array = NULL;
+ *region_info_arrayp = NULL;
+ page_info_array = NULL;
+
+ RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+ rrdset_rdlock(st);
+ for(rd_iter = temp_rd?temp_rd:st->dimensions, rd = NULL, min_time = (usec_t)-1 ; rd_iter ; rd_iter = rd_iter->next) {
+ /*
+ * Choose oldest dimension as reference. This is not equivalent to the union of all dimensions
+ * but it is a best effort approximation with a bias towards older metrics in a chart. It
+ * matches netdata behaviour in the sense that dimensions are generally aligned in a chart
+ * and older dimensions contain more information about the time range. It does not work well
+ * for metrics that have recently stopped being collected.
+ */
+ curr_time = pg_cache_oldest_time_in_range(ctx, rd_iter->state->rrdeng_uuid,
+ start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
+ if (INVALID_TIME != curr_time && curr_time < min_time) {
+ rd = rd_iter;
+ min_time = curr_time;
+ }
+ }
+ rrdset_unlock(st);
+ if (NULL == rd) {
+ return 1;
+ }
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ &page_info_array, &page_index);
+ if (pages_nr) {
+ /* conservative allocation, will reduce the size later if necessary */
+ region_info_array = mallocz(sizeof(*region_info_array) * pages_nr);
+ }
+ is_first_region_initialized = 0;
+ region_points = 0;
+
+ /* pages loop */
+ for (i = 0, curr = NULL, prev = NULL ; i < pages_nr ; ++i) {
+ old_prev = prev;
+ prev = curr;
+ curr = &page_info_array[i];
+ *pginfo_to_points(curr) = 0; /* initialize to invalid page */
+ *pginfo_to_dt(curr) = 0; /* no known data collection interval yet */
+ if (unlikely(INVALID_TIME == curr->start_time || INVALID_TIME == curr->end_time ||
+ curr->end_time < curr->start_time)) {
+ info("Ignoring page with invalid timestamps.");
+ prev = old_prev;
+ continue;
+ }
+ page_entries = curr->page_length / sizeof(storage_number);
+ fatal_assert(0 != page_entries);
+ if (likely(1 != page_entries)) {
+ dt = (curr->end_time - curr->start_time) / (page_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+ } else {
+ dt = 0;
+ }
+ for (j = 0, page_points = 0 ; j < page_entries ; ++j) {
+ uint8_t is_metric_out_of_order, is_metric_earlier_than_range;
+
+ is_metric_earlier_than_range = 0;
+ is_metric_out_of_order = 0;
+
+ current_position_time = curr->start_time + j * dt;
+ now = current_position_time / USEC_PER_SEC;
+ if (now > end_time) { /* there will be no more pages in the time range */
+ break;
+ }
+ if (now < start_time)
+ is_metric_earlier_than_range = 1;
+ if (unlikely(current_position_time < max_time)) /* just went back in time */
+ is_metric_out_of_order = 1;
+ if (is_metric_earlier_than_range || unlikely(is_metric_out_of_order)) {
+ if (unlikely(is_metric_out_of_order))
+ info("Ignoring metric with out of order timestamp.");
+ continue; /* next entry */
+ }
+ /* here is a valid metric */
+ ++page_points;
+ region_info_array[regions - 1].points = ++region_points;
+ max_time = current_position_time;
+ if (1 == page_points)
+ first_valid_time_in_page = current_position_time;
+ if (unlikely(!is_first_region_initialized)) {
+ fatal_assert(1 == regions);
+ /* this is the first region */
+ region_info_array[0].start_time = current_position_time;
+ is_first_region_initialized = 1;
+ }
+ }
+ *pginfo_to_points(curr) = page_points;
+ if (0 == page_points) {
+ prev = old_prev;
+ continue;
+ }
+
+ if (unlikely(0 == *pginfo_to_dt(curr))) { /* unknown data collection interval */
+ fatal_assert(1 == page_points);
+
+ if (likely(NULL != prev)) { /* get interval from previous page */
+ *pginfo_to_dt(curr) = *pginfo_to_dt(prev);
+ } else { /* there is no previous page in the query */
+ struct rrdeng_page_info db_page_info;
+
+ /* go to database */
+ pg_cache_get_filtered_info_prev(ctx, page_index, curr->start_time,
+ metrics_with_known_interval, &db_page_info);
+ if (unlikely(db_page_info.start_time == INVALID_TIME || db_page_info.end_time == INVALID_TIME ||
+ 0 == db_page_info.page_length)) { /* nothing in the database, default to update_every */
+ *pginfo_to_dt(curr) = rd->update_every;
+ } else {
+ unsigned db_entries;
+ usec_t db_dt;
+
+ db_entries = db_page_info.page_length / sizeof(storage_number);
+ db_dt = (db_page_info.end_time - db_page_info.start_time) / (db_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(db_dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+
+ }
+ }
+ }
+ if (likely(prev) && unlikely(*pginfo_to_dt(curr) != *pginfo_to_dt(prev))) {
+ info("Data collection interval change detected in query: %"PRIu32" -> %"PRIu32,
+ *pginfo_to_dt(prev), *pginfo_to_dt(curr));
+ region_info_array[regions++ - 1].points -= page_points;
+ region_info_array[regions - 1].points = region_points = page_points;
+ region_info_array[regions - 1].start_time = first_valid_time_in_page;
+ }
+ if (*pginfo_to_dt(curr) > max_interval)
+ max_interval = *pginfo_to_dt(curr);
+ region_info_array[regions - 1].update_every = *pginfo_to_dt(curr);
+ }
+ if (page_info_array)
+ freez(page_info_array);
+ if (region_info_array) {
+ if (likely(is_first_region_initialized)) {
+ /* free unnecessary memory */
+ region_info_array = reallocz(region_info_array, sizeof(*region_info_array) * regions);
+ *region_info_arrayp = region_info_array;
+ *max_intervalp = max_interval;
+ } else {
+ /* empty result */
+ freez(region_info_array);
+ }
+ }
+ return regions;
+}
+
+/*
+ * Gets a handle for loading metrics from the database.
+ * The handle must be released with rrdeng_load_metric_final().
+ */
+void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_handle, time_t start_time, time_t end_time)
+{
+ struct rrdeng_query_handle *handle;
+ struct rrdengine_instance *ctx;
+ unsigned pages_nr;
+
+ ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
+ rrdimm_handle->start_time = start_time;
+ rrdimm_handle->end_time = end_time;
+ handle = &rrdimm_handle->rrdeng;
+ handle->next_page_time = start_time;
+ handle->now = start_time;
+ handle->position = 0;
+ handle->ctx = ctx;
+ handle->descr = NULL;
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ NULL, &handle->page_index);
+ if (unlikely(NULL == handle->page_index || 0 == pages_nr))
+ /* there are no metrics to load */
+ handle->next_page_time = INVALID_TIME;
+}
+
+/* Returns the metric and sets its timestamp into current_time */
+storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time)
+{
+ struct rrdeng_query_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct rrdeng_page_descr *descr;
+ storage_number *page, ret;
+ unsigned position, entries;
+ usec_t next_page_time = 0, current_position_time, page_end_time = 0;
+ uint32_t page_length;
+
+ handle = &rrdimm_handle->rrdeng;
+ if (unlikely(INVALID_TIME == handle->next_page_time)) {
+ return SN_EMPTY_SLOT;
+ }
+ ctx = handle->ctx;
+ if (unlikely(NULL == (descr = handle->descr))) {
+ /* it's the first call */
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
+ } else {
+ pg_cache_atomic_get_pg_info(descr, &page_end_time, &page_length);
+ }
+ position = handle->position + 1;
+
+ if (unlikely(NULL == descr ||
+ position >= (page_length / sizeof(storage_number)))) {
+ /* We need to get a new page */
+ if (descr) {
+ /* Drop old page's reference */
+#ifdef NETDATA_INTERNAL_CHECKS
+ rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
+#endif
+ pg_cache_put(ctx, descr);
+ handle->descr = NULL;
+ handle->next_page_time = (page_end_time / USEC_PER_SEC) + 1;
+ if (unlikely(handle->next_page_time > rrdimm_handle->end_time)) {
+ goto no_more_metrics;
+ }
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
+ }
+
+ descr = pg_cache_lookup_next(ctx, handle->page_index, &handle->page_index->id,
+ next_page_time, rrdimm_handle->end_time * USEC_PER_SEC);
+ if (NULL == descr) {
+ goto no_more_metrics;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, 1);
+#endif
+ handle->descr = descr;
+ pg_cache_atomic_get_pg_info(descr, &page_end_time, &page_length);
+ if (unlikely(INVALID_TIME == descr->start_time ||
+ INVALID_TIME == page_end_time)) {
+ goto no_more_metrics;
+ }
+ if (unlikely(descr->start_time != page_end_time && next_page_time > descr->start_time)) {
+ /* we're in the middle of the page somewhere */
+ entries = page_length / sizeof(storage_number);
+ position = ((uint64_t)(next_page_time - descr->start_time)) * (entries - 1) /
+ (page_end_time - descr->start_time);
+ } else {
+ position = 0;
+ }
+ }
+ page = descr->pg_cache_descr->page;
+ ret = page[position];
+ entries = page_length / sizeof(storage_number);
+ if (entries > 1) {
+ usec_t dt;
+
+ dt = (page_end_time - descr->start_time) / (entries - 1);
+ current_position_time = descr->start_time + position * dt;
+ } else {
+ current_position_time = descr->start_time;
+ }
+ handle->position = position;
+ handle->now = current_position_time / USEC_PER_SEC;
+/* fatal_assert(handle->now >= rrdimm_handle->start_time && handle->now <= rrdimm_handle->end_time);
+ The above assertion is an approximation and needs to take update_every into account */
+ if (unlikely(handle->now >= rrdimm_handle->end_time)) {
+ /* next calls will not load any more metrics */
+ handle->next_page_time = INVALID_TIME;
+ }
+ *current_time = handle->now;
+ return ret;
+
+no_more_metrics:
+ handle->next_page_time = INVALID_TIME;
+ return SN_EMPTY_SLOT;
+}
+
+int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle)
+{
+ struct rrdeng_query_handle *handle;
+
+ handle = &rrdimm_handle->rrdeng;
+ return (INVALID_TIME == handle->next_page_time);
+}
+
+/*
+ * Releases the database reference from the handle for loading metrics.
+ */
+void rrdeng_load_metric_finalize(struct rrddim_query_handle *rrdimm_handle)
+{
+ struct rrdeng_query_handle *handle;
+ struct rrdengine_instance *ctx;
+ struct rrdeng_page_descr *descr;
+
+ handle = &rrdimm_handle->rrdeng;
+ ctx = handle->ctx;
+ descr = handle->descr;
+ if (descr) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
+#endif
+ pg_cache_put(ctx, descr);
+ }
+}
+
+time_t rrdeng_metric_latest_time(RRDDIM *rd)
+{
+ struct pg_cache_page_index *page_index;
+
+ page_index = rd->state->page_index;
+
+ return page_index->latest_time / USEC_PER_SEC;
+}
+time_t rrdeng_metric_oldest_time(RRDDIM *rd)
+{
+ struct pg_cache_page_index *page_index;
+
+ page_index = rd->state->page_index;
+
+ return page_index->oldest_time / USEC_PER_SEC;
+}
+
+/* Also gets a reference for the page */
+void *rrdeng_create_page(struct rrdengine_instance *ctx, uuid_t *id, struct rrdeng_page_descr **ret_descr)
+{
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+ void *page;
+ /* TODO: check maximum number of pages in page cache limit */
+
+ descr = pg_cache_create_descr();
+ descr->id = id; /* TODO: add page type: metric, log, something? */
+ page = mallocz(RRDENG_BLOCK_SIZE); /*TODO: add page size */
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ pg_cache_descr->page = page;
+ pg_cache_descr->flags = RRD_PAGE_DIRTY /*| RRD_PAGE_LOCKED */ | RRD_PAGE_POPULATED /* | BEING_COLLECTED */;
+ pg_cache_descr->refcnt = 1;
+
+ debug(D_RRDENGINE, "Created new page:");
+ if (unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ *ret_descr = descr;
+ return page;
+}
+
+/* The page must not be empty */
+void rrdeng_commit_page(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr,
+ Word_t page_correlation_id)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ Pvoid_t *PValue;
+ unsigned nr_committed_pages;
+
+ if (unlikely(NULL == descr)) {
+ debug(D_RRDENGINE, "%s: page descriptor is NULL, page has already been force-committed.", __func__);
+ return;
+ }
+ fatal_assert(descr->page_length);
+
+ uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
+ PValue = JudyLIns(&pg_cache->committed_page_index.JudyL_array, page_correlation_id, PJE0);
+ *PValue = descr;
+ nr_committed_pages = ++pg_cache->committed_page_index.nr_committed_pages;
+ uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
+
+ if (nr_committed_pages >= pg_cache_hard_limit(ctx) / 2) {
+ /* over 50% of pages have not been committed yet */
+
+ if (ctx->drop_metrics_under_page_cache_pressure &&
+ nr_committed_pages >= pg_cache_committed_hard_limit(ctx)) {
+ /* 100% of pages are dirty */
+ struct rrdeng_cmd cmd;
+
+ cmd.opcode = RRDENG_INVALIDATE_OLDEST_MEMORY_PAGE;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+ } else {
+ if (0 == (unsigned long) ctx->stats.pg_cache_over_half_dirty_events) {
+ /* only print the first time */
+ errno = 0;
+ error("Failed to flush dirty buffers quickly enough in dbengine instance \"%s\". "
+ "Metric data at risk of not being stored in the database, "
+ "please reduce disk load or use a faster disk.", ctx->dbfiles_path);
+ }
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_over_half_dirty_events, 1);
+ rrd_stat_atomic_add(&global_pg_cache_over_half_dirty_events, 1);
+ }
+ }
+
+ pg_cache_put(ctx, descr);
+}
+
+/* Gets a reference for the page */
+void *rrdeng_get_latest_page(struct rrdengine_instance *ctx, uuid_t *id, void **handle)
+{
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+
+ debug(D_RRDENGINE, "Reading existing page:");
+ descr = pg_cache_lookup(ctx, NULL, id, INVALID_TIME);
+ if (NULL == descr) {
+ *handle = NULL;
+
+ return NULL;
+ }
+ *handle = descr;
+ pg_cache_descr = descr->pg_cache_descr;
+
+ return pg_cache_descr->page;
+}
+
+/* Gets a reference for the page */
+void *rrdeng_get_page(struct rrdengine_instance *ctx, uuid_t *id, usec_t point_in_time, void **handle)
+{
+ struct rrdeng_page_descr *descr;
+ struct page_cache_descr *pg_cache_descr;
+
+ debug(D_RRDENGINE, "Reading existing page:");
+ descr = pg_cache_lookup(ctx, NULL, id, point_in_time);
+ if (NULL == descr) {
+ *handle = NULL;
+
+ return NULL;
+ }
+ *handle = descr;
+ pg_cache_descr = descr->pg_cache_descr;
+
+ return pg_cache_descr->page;
+}
+
+/*
+ * Gathers Database Engine statistics.
+ * Careful when modifying this function.
+ * You must not change the indices of the statistics or user code will break.
+ * You must not exceed RRDENG_NR_STATS or it will crash.
+ */
+void rrdeng_get_37_statistics(struct rrdengine_instance *ctx, unsigned long long *array)
+{
+ if (ctx == NULL)
+ return;
+
+ struct page_cache *pg_cache = &ctx->pg_cache;
+
+ array[0] = (uint64_t)ctx->stats.metric_API_producers;
+ array[1] = (uint64_t)ctx->stats.metric_API_consumers;
+ array[2] = (uint64_t)pg_cache->page_descriptors;
+ array[3] = (uint64_t)pg_cache->populated_pages;
+ array[4] = (uint64_t)pg_cache->committed_page_index.nr_committed_pages;
+ array[5] = (uint64_t)ctx->stats.pg_cache_insertions;
+ array[6] = (uint64_t)ctx->stats.pg_cache_deletions;
+ array[7] = (uint64_t)ctx->stats.pg_cache_hits;
+ array[8] = (uint64_t)ctx->stats.pg_cache_misses;
+ array[9] = (uint64_t)ctx->stats.pg_cache_backfills;
+ array[10] = (uint64_t)ctx->stats.pg_cache_evictions;
+ array[11] = (uint64_t)ctx->stats.before_compress_bytes;
+ array[12] = (uint64_t)ctx->stats.after_compress_bytes;
+ array[13] = (uint64_t)ctx->stats.before_decompress_bytes;
+ array[14] = (uint64_t)ctx->stats.after_decompress_bytes;
+ array[15] = (uint64_t)ctx->stats.io_write_bytes;
+ array[16] = (uint64_t)ctx->stats.io_write_requests;
+ array[17] = (uint64_t)ctx->stats.io_read_bytes;
+ array[18] = (uint64_t)ctx->stats.io_read_requests;
+ array[19] = (uint64_t)ctx->stats.io_write_extent_bytes;
+ array[20] = (uint64_t)ctx->stats.io_write_extents;
+ array[21] = (uint64_t)ctx->stats.io_read_extent_bytes;
+ array[22] = (uint64_t)ctx->stats.io_read_extents;
+ array[23] = (uint64_t)ctx->stats.datafile_creations;
+ array[24] = (uint64_t)ctx->stats.datafile_deletions;
+ array[25] = (uint64_t)ctx->stats.journalfile_creations;
+ array[26] = (uint64_t)ctx->stats.journalfile_deletions;
+ array[27] = (uint64_t)ctx->stats.page_cache_descriptors;
+ array[28] = (uint64_t)ctx->stats.io_errors;
+ array[29] = (uint64_t)ctx->stats.fs_errors;
+ array[30] = (uint64_t)global_io_errors;
+ array[31] = (uint64_t)global_fs_errors;
+ array[32] = (uint64_t)rrdeng_reserved_file_descriptors;
+ array[33] = (uint64_t)ctx->stats.pg_cache_over_half_dirty_events;
+ array[34] = (uint64_t)global_pg_cache_over_half_dirty_events;
+ array[35] = (uint64_t)ctx->stats.flushing_pressure_page_deletions;
+ array[36] = (uint64_t)global_flushing_pressure_page_deletions;
+ fatal_assert(RRDENG_NR_STATS == 37);
+}
+
+/* Releases reference to page */
+void rrdeng_put_page(struct rrdengine_instance *ctx, void *handle)
+{
+ (void)ctx;
+ pg_cache_put(ctx, (struct rrdeng_page_descr *)handle);
+}
+
+/*
+ * Returns 0 on success, negative on error
+ */
+int rrdeng_init(RRDHOST *host, struct rrdengine_instance **ctxp, char *dbfiles_path, unsigned page_cache_mb,
+ unsigned disk_space_mb)
+{
+ struct rrdengine_instance *ctx;
+ int error;
+ uint32_t max_open_files;
+
+ max_open_files = rlimit_nofile.rlim_cur / 4;
+
+ /* reserve RRDENG_FD_BUDGET_PER_INSTANCE file descriptors for this instance */
+ rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, RRDENG_FD_BUDGET_PER_INSTANCE);
+ if (rrdeng_reserved_file_descriptors > max_open_files) {
+ error(
+ "Exceeded the budget of available file descriptors (%u/%u), cannot create new dbengine instance.",
+ (unsigned)rrdeng_reserved_file_descriptors, (unsigned)max_open_files);
+
+ rrd_stat_atomic_add(&global_fs_errors, 1);
+ rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
+ return UV_EMFILE;
+ }
+
+ if (NULL == ctxp) {
+ ctx = &multidb_ctx;
+ memset(ctx, 0, sizeof(*ctx));
+ } else {
+ *ctxp = ctx = callocz(1, sizeof(*ctx));
+ }
+ ctx->global_compress_alg = RRD_LZ4;
+ if (page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB)
+ page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
+ ctx->max_cache_pages = page_cache_mb * (1048576LU / RRDENG_BLOCK_SIZE);
+ /* try to keep 5% of the page cache free */
+ ctx->cache_pages_low_watermark = (ctx->max_cache_pages * 95LLU) / 100;
+ if (disk_space_mb < RRDENG_MIN_DISK_SPACE_MB)
+ disk_space_mb = RRDENG_MIN_DISK_SPACE_MB;
+ ctx->max_disk_space = disk_space_mb * 1048576LLU;
+ strncpyz(ctx->dbfiles_path, dbfiles_path, sizeof(ctx->dbfiles_path) - 1);
+ ctx->dbfiles_path[sizeof(ctx->dbfiles_path) - 1] = '\0';
+ if (NULL == host)
+ strncpyz(ctx->machine_guid, registry_get_this_machine_guid(), GUID_LEN);
+ else
+ strncpyz(ctx->machine_guid, host->machine_guid, GUID_LEN);
+
+ ctx->drop_metrics_under_page_cache_pressure = rrdeng_drop_metrics_under_page_cache_pressure;
+ ctx->metric_API_max_producers = 0;
+ ctx->quiesce = NO_QUIESCE;
+ ctx->metalog_ctx = NULL; /* only set this after the metadata log has finished initializing */
+ ctx->host = host;
+
+ memset(&ctx->worker_config, 0, sizeof(ctx->worker_config));
+ ctx->worker_config.ctx = ctx;
+ init_page_cache(ctx);
+ init_commit_log(ctx);
+ error = init_rrd_files(ctx);
+ if (error) {
+ goto error_after_init_rrd_files;
+ }
+
+ init_completion(&ctx->rrdengine_completion);
+ fatal_assert(0 == uv_thread_create(&ctx->worker_config.thread, rrdeng_worker, &ctx->worker_config));
+ /* wait for worker thread to initialize */
+ wait_for_completion(&ctx->rrdengine_completion);
+ destroy_completion(&ctx->rrdengine_completion);
+ uv_thread_set_name_np(ctx->worker_config.thread, "DBENGINE");
+ if (ctx->worker_config.error) {
+ goto error_after_rrdeng_worker;
+ }
+ error = metalog_init(ctx);
+ if (error) {
+ error("Failed to initialize metadata log file event loop.");
+ goto error_after_rrdeng_worker;
+ }
+
+ return 0;
+
+error_after_rrdeng_worker:
+ finalize_rrd_files(ctx);
+error_after_init_rrd_files:
+ free_page_cache(ctx);
+ if (ctx != &multidb_ctx) {
+ freez(ctx);
+ *ctxp = NULL;
+ }
+ rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
+ return UV_EIO;
+}
+
+/*
+ * Returns 0 on success, 1 on error
+ */
+int rrdeng_exit(struct rrdengine_instance *ctx)
+{
+ struct rrdeng_cmd cmd;
+
+ if (NULL == ctx) {
+ return 1;
+ }
+
+ /* TODO: add page to page cache */
+ cmd.opcode = RRDENG_SHUTDOWN;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ fatal_assert(0 == uv_thread_join(&ctx->worker_config.thread));
+
+ finalize_rrd_files(ctx);
+ //metalog_exit(ctx->metalog_ctx);
+ free_page_cache(ctx);
+
+ if (ctx != &multidb_ctx) {
+ freez(ctx);
+ }
+ rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
+ return 0;
+}
+
+void rrdeng_prepare_exit(struct rrdengine_instance *ctx)
+{
+ struct rrdeng_cmd cmd;
+
+ if (NULL == ctx) {
+ return;
+ }
+
+ init_completion(&ctx->rrdengine_completion);
+ cmd.opcode = RRDENG_QUIESCE;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ /* wait for dbengine to quiesce */
+ wait_for_completion(&ctx->rrdengine_completion);
+ destroy_completion(&ctx->rrdengine_completion);
+
+ //metalog_prepare_exit(ctx->metalog_ctx);
+}
+
diff --git a/database/engine/rrdengineapi.h b/database/engine/rrdengineapi.h
new file mode 100644
index 0000000..41375b9
--- /dev/null
+++ b/database/engine/rrdengineapi.h
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDENGINEAPI_H
+#define NETDATA_RRDENGINEAPI_H
+
+#include "rrdengine.h"
+
+#define RRDENG_MIN_PAGE_CACHE_SIZE_MB (8)
+#define RRDENG_MIN_DISK_SPACE_MB (64)
+
+#define RRDENG_NR_STATS (37)
+
+#define RRDENG_FD_BUDGET_PER_INSTANCE (50)
+
+extern int default_rrdeng_page_cache_mb;
+extern int default_rrdeng_disk_quota_mb;
+extern int default_multidb_disk_quota_mb;
+extern uint8_t rrdeng_drop_metrics_under_page_cache_pressure;
+extern struct rrdengine_instance multidb_ctx;
+
+struct rrdeng_region_info {
+ time_t start_time;
+ int update_every;
+ unsigned points;
+};
+
+extern void *rrdeng_create_page(struct rrdengine_instance *ctx, uuid_t *id, struct rrdeng_page_descr **ret_descr);
+extern void rrdeng_commit_page(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr,
+ Word_t page_correlation_id);
+extern void *rrdeng_get_latest_page(struct rrdengine_instance *ctx, uuid_t *id, void **handle);
+extern void *rrdeng_get_page(struct rrdengine_instance *ctx, uuid_t *id, usec_t point_in_time, void **handle);
+extern void rrdeng_put_page(struct rrdengine_instance *ctx, void *handle);
+
+extern void rrdeng_generate_legacy_uuid(const char *dim_id, char *chart_id, uuid_t *ret_uuid);
+extern void rrdeng_convert_legacy_uuid_to_multihost(char machine_guid[GUID_LEN + 1], uuid_t *legacy_uuid,
+ uuid_t *ret_uuid);
+
+
+extern void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid);
+extern void rrdeng_store_metric_init(RRDDIM *rd);
+extern void rrdeng_store_metric_flush_current_page(RRDDIM *rd);
+extern void rrdeng_store_metric_next(RRDDIM *rd, usec_t point_in_time, storage_number number);
+extern int rrdeng_store_metric_finalize(RRDDIM *rd);
+extern unsigned
+ rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
+ struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp, struct context_param *context_param_list);
+extern void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_handle,
+ time_t start_time, time_t end_time);
+extern storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time);
+extern int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle);
+extern void rrdeng_load_metric_finalize(struct rrddim_query_handle *rrdimm_handle);
+extern time_t rrdeng_metric_latest_time(RRDDIM *rd);
+extern time_t rrdeng_metric_oldest_time(RRDDIM *rd);
+extern void rrdeng_get_37_statistics(struct rrdengine_instance *ctx, unsigned long long *array);
+
+/* must call once before using anything */
+extern int rrdeng_init(RRDHOST *host, struct rrdengine_instance **ctxp, char *dbfiles_path, unsigned page_cache_mb,
+ unsigned disk_space_mb);
+
+extern int rrdeng_exit(struct rrdengine_instance *ctx);
+extern void rrdeng_prepare_exit(struct rrdengine_instance *ctx);
+
+#endif /* NETDATA_RRDENGINEAPI_H */
diff --git a/database/engine/rrdenginelib.c b/database/engine/rrdenginelib.c
new file mode 100644
index 0000000..287b86b
--- /dev/null
+++ b/database/engine/rrdenginelib.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include "rrdengine.h"
+
+#define BUFSIZE (512)
+
+/* Caller must hold descriptor lock */
+void print_page_cache_descr(struct rrdeng_page_descr *descr)
+{
+ struct page_cache_descr *pg_cache_descr = descr->pg_cache_descr;
+ char uuid_str[UUID_STR_LEN];
+ char str[BUFSIZE + 1];
+ int pos = 0;
+
+ uuid_unparse_lower(*descr->id, uuid_str);
+ pos += snprintfz(str, BUFSIZE - pos, "page(%p) id=%s\n"
+ "--->len:%"PRIu32" time:%"PRIu64"->%"PRIu64" xt_offset:",
+ pg_cache_descr->page, uuid_str,
+ descr->page_length,
+ (uint64_t)descr->start_time,
+ (uint64_t)descr->end_time);
+ if (!descr->extent) {
+ pos += snprintfz(str + pos, BUFSIZE - pos, "N/A");
+ } else {
+ pos += snprintfz(str + pos, BUFSIZE - pos, "%"PRIu64, descr->extent->offset);
+ }
+
+ snprintfz(str + pos, BUFSIZE - pos, " flags:0x%2.2lX refcnt:%u\n\n", pg_cache_descr->flags, pg_cache_descr->refcnt);
+ debug(D_RRDENGINE, "%s", str);
+}
+
+void print_page_descr(struct rrdeng_page_descr *descr)
+{
+ char uuid_str[UUID_STR_LEN];
+ char str[BUFSIZE + 1];
+ int pos = 0;
+
+ uuid_unparse_lower(*descr->id, uuid_str);
+ pos += snprintfz(str, BUFSIZE - pos, "id=%s\n"
+ "--->len:%"PRIu32" time:%"PRIu64"->%"PRIu64" xt_offset:",
+ uuid_str,
+ descr->page_length,
+ (uint64_t)descr->start_time,
+ (uint64_t)descr->end_time);
+ if (!descr->extent) {
+ pos += snprintfz(str + pos, BUFSIZE - pos, "N/A");
+ } else {
+ pos += snprintfz(str + pos, BUFSIZE - pos, "%"PRIu64, descr->extent->offset);
+ }
+ snprintfz(str + pos, BUFSIZE - pos, "\n\n");
+ fputs(str, stderr);
+}
+
+int check_file_properties(uv_file file, uint64_t *file_size, size_t min_size)
+{
+ int ret;
+ uv_fs_t req;
+ uv_stat_t* s;
+
+ ret = uv_fs_fstat(NULL, &req, file, NULL);
+ if (ret < 0) {
+ fatal("uv_fs_fstat: %s\n", uv_strerror(ret));
+ }
+ fatal_assert(req.result == 0);
+ s = req.ptr;
+ if (!(s->st_mode & S_IFREG)) {
+ error("Not a regular file.\n");
+ uv_fs_req_cleanup(&req);
+ return UV_EINVAL;
+ }
+ if (s->st_size < min_size) {
+ error("File length is too short.\n");
+ uv_fs_req_cleanup(&req);
+ return UV_EINVAL;
+ }
+ *file_size = s->st_size;
+ uv_fs_req_cleanup(&req);
+
+ return 0;
+}
+
+/**
+ * Open file for I/O.
+ *
+ * @param path The full path of the file.
+ * @param flags Same flags as the open() system call uses.
+ * @param file On success sets (*file) to be the uv_file that was opened.
+ * @param direct Tries to open a file in direct I/O mode when direct=1, falls back to buffered mode if not possible.
+ * @return Returns UV error number that is < 0 on failure. 0 on success.
+ */
+int open_file_for_io(char *path, int flags, uv_file *file, int direct)
+{
+ uv_fs_t req;
+ int fd = -1, current_flags;
+
+ fatal_assert(0 == direct || 1 == direct);
+ for ( ; direct >= 0 ; --direct) {
+#ifdef __APPLE__
+ /* Apple OS does not support O_DIRECT */
+ direct = 0;
+#endif
+ current_flags = flags;
+ if (direct) {
+ current_flags |= O_DIRECT;
+ }
+ fd = uv_fs_open(NULL, &req, path, current_flags, S_IRUSR | S_IWUSR, NULL);
+ if (fd < 0) {
+ if ((direct) && (UV_EINVAL == fd)) {
+ error("File \"%s\" does not support direct I/O, falling back to buffered I/O.", path);
+ } else {
+ error("Failed to open file \"%s\".", path);
+ --direct; /* break the loop */
+ }
+ } else {
+ fatal_assert(req.result >= 0);
+ *file = req.result;
+#ifdef __APPLE__
+ info("Disabling OS X caching for file \"%s\".", path);
+ fcntl(fd, F_NOCACHE, 1);
+#endif
+ --direct; /* break the loop */
+ }
+ uv_fs_req_cleanup(&req);
+ }
+
+ return fd;
+}
+
+char *get_rrdeng_statistics(struct rrdengine_instance *ctx, char *str, size_t size)
+{
+ struct page_cache *pg_cache;
+
+ pg_cache = &ctx->pg_cache;
+ snprintfz(str, size,
+ "metric_API_producers: %ld\n"
+ "metric_API_consumers: %ld\n"
+ "page_cache_total_pages: %ld\n"
+ "page_cache_descriptors: %ld\n"
+ "page_cache_populated_pages: %ld\n"
+ "page_cache_committed_pages: %ld\n"
+ "page_cache_insertions: %ld\n"
+ "page_cache_deletions: %ld\n"
+ "page_cache_hits: %ld\n"
+ "page_cache_misses: %ld\n"
+ "page_cache_backfills: %ld\n"
+ "page_cache_evictions: %ld\n"
+ "compress_before_bytes: %ld\n"
+ "compress_after_bytes: %ld\n"
+ "decompress_before_bytes: %ld\n"
+ "decompress_after_bytes: %ld\n"
+ "io_write_bytes: %ld\n"
+ "io_write_requests: %ld\n"
+ "io_read_bytes: %ld\n"
+ "io_read_requests: %ld\n"
+ "io_write_extent_bytes: %ld\n"
+ "io_write_extents: %ld\n"
+ "io_read_extent_bytes: %ld\n"
+ "io_read_extents: %ld\n"
+ "datafile_creations: %ld\n"
+ "datafile_deletions: %ld\n"
+ "journalfile_creations: %ld\n"
+ "journalfile_deletions: %ld\n"
+ "io_errors: %ld\n"
+ "fs_errors: %ld\n"
+ "global_io_errors: %ld\n"
+ "global_fs_errors: %ld\n"
+ "rrdeng_reserved_file_descriptors: %ld\n"
+ "pg_cache_over_half_dirty_events: %ld\n"
+ "global_pg_cache_over_half_dirty_events: %ld\n"
+ "flushing_pressure_page_deletions: %ld\n"
+ "global_flushing_pressure_page_deletions: %ld\n",
+ (long)ctx->stats.metric_API_producers,
+ (long)ctx->stats.metric_API_consumers,
+ (long)pg_cache->page_descriptors,
+ (long)ctx->stats.page_cache_descriptors,
+ (long)pg_cache->populated_pages,
+ (long)pg_cache->committed_page_index.nr_committed_pages,
+ (long)ctx->stats.pg_cache_insertions,
+ (long)ctx->stats.pg_cache_deletions,
+ (long)ctx->stats.pg_cache_hits,
+ (long)ctx->stats.pg_cache_misses,
+ (long)ctx->stats.pg_cache_backfills,
+ (long)ctx->stats.pg_cache_evictions,
+ (long)ctx->stats.before_compress_bytes,
+ (long)ctx->stats.after_compress_bytes,
+ (long)ctx->stats.before_decompress_bytes,
+ (long)ctx->stats.after_decompress_bytes,
+ (long)ctx->stats.io_write_bytes,
+ (long)ctx->stats.io_write_requests,
+ (long)ctx->stats.io_read_bytes,
+ (long)ctx->stats.io_read_requests,
+ (long)ctx->stats.io_write_extent_bytes,
+ (long)ctx->stats.io_write_extents,
+ (long)ctx->stats.io_read_extent_bytes,
+ (long)ctx->stats.io_read_extents,
+ (long)ctx->stats.datafile_creations,
+ (long)ctx->stats.datafile_deletions,
+ (long)ctx->stats.journalfile_creations,
+ (long)ctx->stats.journalfile_deletions,
+ (long)ctx->stats.io_errors,
+ (long)ctx->stats.fs_errors,
+ (long)global_io_errors,
+ (long)global_fs_errors,
+ (long)rrdeng_reserved_file_descriptors,
+ (long)ctx->stats.pg_cache_over_half_dirty_events,
+ (long)global_pg_cache_over_half_dirty_events,
+ (long)ctx->stats.flushing_pressure_page_deletions,
+ (long)global_flushing_pressure_page_deletions
+ );
+ return str;
+}
+
+int is_legacy_child(const char *machine_guid)
+{
+ uuid_t uuid;
+ char dbengine_file[FILENAME_MAX+1];
+
+ if (unlikely(!strcmp(machine_guid, "unittest-dbengine") || !strcmp(machine_guid, "dbengine-dataset") ||
+ !strcmp(machine_guid, "dbengine-stress-test"))) {
+ return 1;
+ }
+ if (!uuid_parse(machine_guid, uuid)) {
+ uv_fs_t stat_req;
+ snprintfz(dbengine_file, FILENAME_MAX, "%s/%s/dbengine", netdata_configured_cache_dir, machine_guid);
+ int rc = uv_fs_stat(NULL, &stat_req, dbengine_file, NULL);
+ if (likely(rc == 0 && ((stat_req.statbuf.st_mode & S_IFMT) == S_IFDIR))) {
+ //info("Found legacy engine folder \"%s\"", dbengine_file);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int count_legacy_children(char *dbfiles_path)
+{
+ int ret;
+ uv_fs_t req;
+ uv_dirent_t dent;
+ int legacy_engines = 0;
+
+ ret = uv_fs_scandir(NULL, &req, dbfiles_path, 0, NULL);
+ if (ret < 0) {
+ uv_fs_req_cleanup(&req);
+ error("uv_fs_scandir(%s): %s", dbfiles_path, uv_strerror(ret));
+ return ret;
+ }
+
+ while(UV_EOF != uv_fs_scandir_next(&req, &dent)) {
+ if (dent.type == UV_DIRENT_DIR) {
+ if (is_legacy_child(dent.name))
+ legacy_engines++;
+ }
+ }
+ uv_fs_req_cleanup(&req);
+ return legacy_engines;
+}
+
+int compute_multidb_diskspace()
+{
+ char multidb_disk_space_file[FILENAME_MAX + 1];
+ FILE *fp;
+ int computed_multidb_disk_quota_mb = -1;
+
+ snprintfz(multidb_disk_space_file, FILENAME_MAX, "%s/dbengine_multihost_size", netdata_configured_varlib_dir);
+ fp = fopen(multidb_disk_space_file, "r");
+ if (likely(fp)) {
+ int rc = fscanf(fp, "%d", &computed_multidb_disk_quota_mb);
+ fclose(fp);
+ if (unlikely(rc != 1 || computed_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB)) {
+ errno = 0;
+ error("File '%s' contains invalid input, it will be rebuild", multidb_disk_space_file);
+ computed_multidb_disk_quota_mb = -1;
+ }
+ }
+
+ if (computed_multidb_disk_quota_mb == -1) {
+ int rc = count_legacy_children(netdata_configured_cache_dir);
+ if (likely(rc >= 0)) {
+ computed_multidb_disk_quota_mb = (rc + 1) * default_rrdeng_disk_quota_mb;
+ info("Found %d legacy dbengines, setting multidb diskspace to %dMB", rc, computed_multidb_disk_quota_mb);
+
+ fp = fopen(multidb_disk_space_file, "w");
+ if (likely(fp)) {
+ fprintf(fp, "%d", computed_multidb_disk_quota_mb);
+ info("Created file '%s' to store the computed value", multidb_disk_space_file);
+ fclose(fp);
+ } else
+ error("Failed to store the default multidb disk quota size on '%s'", multidb_disk_space_file);
+ }
+ else
+ computed_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb;
+ }
+
+ return computed_multidb_disk_quota_mb;
+}
diff --git a/database/engine/rrdenginelib.h b/database/engine/rrdenginelib.h
new file mode 100644
index 0000000..ebab93c
--- /dev/null
+++ b/database/engine/rrdenginelib.h
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDENGINELIB_H
+#define NETDATA_RRDENGINELIB_H
+
+/* Forward declarations */
+struct rrdeng_page_descr;
+struct rrdengine_instance;
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+#define BITS_PER_ULONG (sizeof(unsigned long) * 8)
+
+#ifndef UUID_STR_LEN
+#define UUID_STR_LEN (37)
+#endif
+
+/* Taken from linux kernel */
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+
+#define ALIGN_BYTES_FLOOR(x) (((x) / RRDENG_BLOCK_SIZE) * RRDENG_BLOCK_SIZE)
+#define ALIGN_BYTES_CEILING(x) ((((x) + RRDENG_BLOCK_SIZE - 1) / RRDENG_BLOCK_SIZE) * RRDENG_BLOCK_SIZE)
+
+#define ROUND_USEC_TO_SEC(x) (((x) + USEC_PER_SEC / 2 - 1) / USEC_PER_SEC)
+
+typedef uintptr_t rrdeng_stats_t;
+
+#ifdef __ATOMIC_RELAXED
+#define rrd_atomic_fetch_add(p, n) __atomic_fetch_add(p, n, __ATOMIC_RELAXED)
+#define rrd_atomic_add_fetch(p, n) __atomic_add_fetch(p, n, __ATOMIC_RELAXED)
+#else
+#define rrd_atomic_fetch_add(p, n) __sync_fetch_and_add(p, n)
+#define rrd_atomic_add_fetch(p, n) __sync_add_and_fetch(p, n)
+#endif
+
+#define rrd_stat_atomic_add(p, n) rrd_atomic_fetch_add(p, n)
+
+/* returns -1 if it didn't find the first cleared bit, the position otherwise. Starts from LSB. */
+static inline int find_first_zero(unsigned x)
+{
+ return ffs((int)(~x)) - 1;
+}
+
+/* Starts from LSB. */
+static inline uint8_t check_bit(unsigned x, size_t pos)
+{
+ return !!(x & (1 << pos));
+}
+
+/* Starts from LSB. val is 0 or 1 */
+static inline void modify_bit(unsigned *x, unsigned pos, uint8_t val)
+{
+ switch(val) {
+ case 0:
+ *x &= ~(1U << pos);
+ break;
+ case 1:
+ *x |= 1U << pos;
+ break;
+ default:
+ error("modify_bit() called with invalid argument.");
+ break;
+ }
+}
+
+#define RRDENG_PATH_MAX (4096)
+
+/* returns old *ptr value */
+static inline unsigned long ulong_compare_and_swap(volatile unsigned long *ptr,
+ unsigned long oldval, unsigned long newval)
+{
+ return __sync_val_compare_and_swap(ptr, oldval, newval);
+}
+
+#ifndef O_DIRECT
+/* Workaround for OS X */
+#define O_DIRECT (0)
+#endif
+
+struct completion {
+ uv_mutex_t mutex;
+ uv_cond_t cond;
+ volatile unsigned completed;
+};
+
+static inline void init_completion(struct completion *p)
+{
+ p->completed = 0;
+ fatal_assert(0 == uv_cond_init(&p->cond));
+ fatal_assert(0 == uv_mutex_init(&p->mutex));
+}
+
+static inline void destroy_completion(struct completion *p)
+{
+ uv_cond_destroy(&p->cond);
+ uv_mutex_destroy(&p->mutex);
+}
+
+static inline void wait_for_completion(struct completion *p)
+{
+ uv_mutex_lock(&p->mutex);
+ while (0 == p->completed) {
+ uv_cond_wait(&p->cond, &p->mutex);
+ }
+ fatal_assert(1 == p->completed);
+ uv_mutex_unlock(&p->mutex);
+}
+
+static inline void complete(struct completion *p)
+{
+ uv_mutex_lock(&p->mutex);
+ p->completed = 1;
+ uv_mutex_unlock(&p->mutex);
+ uv_cond_broadcast(&p->cond);
+}
+
+static inline int crc32cmp(void *crcp, uLong crc)
+{
+ return (*(uint32_t *)crcp != crc);
+}
+
+static inline void crc32set(void *crcp, uLong crc)
+{
+ *(uint32_t *)crcp = crc;
+}
+
+extern void print_page_cache_descr(struct rrdeng_page_descr *page_cache_descr);
+extern void print_page_descr(struct rrdeng_page_descr *descr);
+extern int check_file_properties(uv_file file, uint64_t *file_size, size_t min_size);
+extern int open_file_for_io(char *path, int flags, uv_file *file, int direct);
+static inline int open_file_direct_io(char *path, int flags, uv_file *file)
+{
+ return open_file_for_io(path, flags, file, 1);
+}
+static inline int open_file_buffered_io(char *path, int flags, uv_file *file)
+{
+ return open_file_for_io(path, flags, file, 0);
+}
+extern char *get_rrdeng_statistics(struct rrdengine_instance *ctx, char *str, size_t size);
+extern int compute_multidb_diskspace();
+extern int is_legacy_child(const char *machine_guid);
+
+#endif /* NETDATA_RRDENGINELIB_H */ \ No newline at end of file
diff --git a/database/engine/rrdenglocking.c b/database/engine/rrdenglocking.c
new file mode 100644
index 0000000..a23abf3
--- /dev/null
+++ b/database/engine/rrdenglocking.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include "rrdengine.h"
+
+struct page_cache_descr *rrdeng_create_pg_cache_descr(struct rrdengine_instance *ctx)
+{
+ struct page_cache_descr *pg_cache_descr;
+
+ pg_cache_descr = mallocz(sizeof(*pg_cache_descr));
+ rrd_stat_atomic_add(&ctx->stats.page_cache_descriptors, 1);
+ pg_cache_descr->page = NULL;
+ pg_cache_descr->flags = 0;
+ pg_cache_descr->prev = pg_cache_descr->next = NULL;
+ pg_cache_descr->refcnt = 0;
+ pg_cache_descr->waiters = 0;
+ fatal_assert(0 == uv_cond_init(&pg_cache_descr->cond));
+ fatal_assert(0 == uv_mutex_init(&pg_cache_descr->mutex));
+
+ return pg_cache_descr;
+}
+
+void rrdeng_destroy_pg_cache_descr(struct rrdengine_instance *ctx, struct page_cache_descr *pg_cache_descr)
+{
+ uv_cond_destroy(&pg_cache_descr->cond);
+ uv_mutex_destroy(&pg_cache_descr->mutex);
+ freez(pg_cache_descr);
+ rrd_stat_atomic_add(&ctx->stats.page_cache_descriptors, -1);
+}
+
+/* also allocates page cache descriptor if missing */
+void rrdeng_page_descr_mutex_lock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ unsigned long old_state, old_users, new_state, ret_state;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ uint8_t we_locked;
+
+ we_locked = 0;
+ while (1) { /* spin */
+ old_state = descr->pg_cache_descr_state;
+ old_users = old_state >> PG_CACHE_DESCR_SHIFT;
+
+ if (unlikely(we_locked)) {
+ fatal_assert(old_state & PG_CACHE_DESCR_LOCKED);
+ new_state = (1 << PG_CACHE_DESCR_SHIFT) | PG_CACHE_DESCR_ALLOCATED;
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ /* success */
+ break;
+ }
+ continue; /* spin */
+ }
+ if (old_state & PG_CACHE_DESCR_LOCKED) {
+ fatal_assert(0 == old_users);
+ continue; /* spin */
+ }
+ if (0 == old_state) {
+ /* no page cache descriptor has been allocated */
+
+ if (NULL == pg_cache_descr) {
+ pg_cache_descr = rrdeng_create_pg_cache_descr(ctx);
+ }
+ new_state = PG_CACHE_DESCR_LOCKED;
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, 0, new_state);
+ if (0 == ret_state) {
+ we_locked = 1;
+ descr->pg_cache_descr = pg_cache_descr;
+ pg_cache_descr->descr = descr;
+ pg_cache_descr = NULL; /* make sure we don't free pg_cache_descr */
+ /* retry */
+ continue;
+ }
+ continue; /* spin */
+ }
+ /* page cache descriptor is already allocated */
+ if (unlikely(!(old_state & PG_CACHE_DESCR_ALLOCATED))) {
+ fatal("Invalid page cache descriptor locking state:%#lX", old_state);
+ }
+ new_state = (old_users + 1) << PG_CACHE_DESCR_SHIFT;
+ new_state |= old_state & PG_CACHE_DESCR_FLAGS_MASK;
+
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ /* success */
+ break;
+ }
+ /* spin */
+ }
+
+ if (pg_cache_descr) {
+ rrdeng_destroy_pg_cache_descr(ctx, pg_cache_descr);
+ }
+ pg_cache_descr = descr->pg_cache_descr;
+ uv_mutex_lock(&pg_cache_descr->mutex);
+}
+
+void rrdeng_page_descr_mutex_unlock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ unsigned long old_state, new_state, ret_state, old_users;
+ struct page_cache_descr *pg_cache_descr, *delete_pg_cache_descr = NULL;
+ uint8_t we_locked;
+
+ uv_mutex_unlock(&descr->pg_cache_descr->mutex);
+
+ we_locked = 0;
+ while (1) { /* spin */
+ old_state = descr->pg_cache_descr_state;
+ old_users = old_state >> PG_CACHE_DESCR_SHIFT;
+
+ if (unlikely(we_locked)) {
+ fatal_assert(0 == old_users);
+
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, 0);
+ if (old_state == ret_state) {
+ /* success */
+ rrdeng_destroy_pg_cache_descr(ctx, delete_pg_cache_descr);
+ return;
+ }
+ continue; /* spin */
+ }
+ if (old_state & PG_CACHE_DESCR_LOCKED) {
+ fatal_assert(0 == old_users);
+ continue; /* spin */
+ }
+ fatal_assert(old_state & PG_CACHE_DESCR_ALLOCATED);
+ pg_cache_descr = descr->pg_cache_descr;
+ /* caller is the only page cache descriptor user and there are no pending references on the page */
+ if ((old_state & PG_CACHE_DESCR_DESTROY) && (1 == old_users) &&
+ !pg_cache_descr->flags && !pg_cache_descr->refcnt) {
+ fatal_assert(!pg_cache_descr->waiters);
+
+ new_state = PG_CACHE_DESCR_LOCKED;
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ we_locked = 1;
+ delete_pg_cache_descr = pg_cache_descr;
+ descr->pg_cache_descr = NULL;
+ /* retry */
+ continue;
+ }
+ continue; /* spin */
+ }
+ fatal_assert(old_users > 0);
+ new_state = (old_users - 1) << PG_CACHE_DESCR_SHIFT;
+ new_state |= old_state & PG_CACHE_DESCR_FLAGS_MASK;
+
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ /* success */
+ break;
+ }
+ /* spin */
+ }
+}
+
+/*
+ * Tries to deallocate page cache descriptor. If it fails, it postpones deallocation by setting the
+ * PG_CACHE_DESCR_DESTROY flag which will be eventually cleared by a different context after doing
+ * the deallocation.
+ */
+void rrdeng_try_deallocate_pg_cache_descr(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
+{
+ unsigned long old_state, new_state, ret_state, old_users;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ uint8_t just_locked, can_free, must_unlock;
+
+ just_locked = 0;
+ can_free = 0;
+ must_unlock = 0;
+ while (1) { /* spin */
+ old_state = descr->pg_cache_descr_state;
+ old_users = old_state >> PG_CACHE_DESCR_SHIFT;
+
+ if (unlikely(just_locked)) {
+ fatal_assert(0 == old_users);
+
+ must_unlock = 1;
+ just_locked = 0;
+ /* Try deallocate if there are no pending references on the page */
+ if (!pg_cache_descr->flags && !pg_cache_descr->refcnt) {
+ fatal_assert(!pg_cache_descr->waiters);
+
+ descr->pg_cache_descr = NULL;
+ can_free = 1;
+ /* success */
+ continue;
+ }
+ continue; /* spin */
+ }
+ if (unlikely(must_unlock)) {
+ fatal_assert(0 == old_users);
+
+ if (can_free) {
+ /* success */
+ new_state = 0;
+ } else {
+ new_state = old_state | PG_CACHE_DESCR_DESTROY;
+ new_state &= ~PG_CACHE_DESCR_LOCKED;
+ }
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ /* unlocked */
+ if (can_free)
+ rrdeng_destroy_pg_cache_descr(ctx, pg_cache_descr);
+ return;
+ }
+ continue; /* spin */
+ }
+ if (!(old_state & PG_CACHE_DESCR_ALLOCATED)) {
+ /* don't do anything */
+ return;
+ }
+ if (old_state & PG_CACHE_DESCR_LOCKED) {
+ fatal_assert(0 == old_users);
+ continue; /* spin */
+ }
+ /* caller is the only page cache descriptor user */
+ if (0 == old_users) {
+ new_state = old_state | PG_CACHE_DESCR_LOCKED;
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ just_locked = 1;
+ pg_cache_descr = descr->pg_cache_descr;
+ /* retry */
+ continue;
+ }
+ continue; /* spin */
+ }
+ if (old_state & PG_CACHE_DESCR_DESTROY) {
+ /* don't do anything */
+ return;
+ }
+ /* plant PG_CACHE_DESCR_DESTROY so that other contexts eventually free the page cache descriptor */
+ new_state = old_state | PG_CACHE_DESCR_DESTROY;
+
+ ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
+ if (old_state == ret_state) {
+ /* success */
+ return;
+ }
+ /* spin */
+ }
+} \ No newline at end of file
diff --git a/database/engine/rrdenglocking.h b/database/engine/rrdenglocking.h
new file mode 100644
index 0000000..127ddc9
--- /dev/null
+++ b/database/engine/rrdenglocking.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDENGLOCKING_H
+#define NETDATA_RRDENGLOCKING_H
+
+#include "rrdengine.h"
+
+/* Forward declarations */
+struct page_cache_descr;
+
+extern struct page_cache_descr *rrdeng_create_pg_cache_descr(struct rrdengine_instance *ctx);
+extern void rrdeng_destroy_pg_cache_descr(struct rrdengine_instance *ctx, struct page_cache_descr *pg_cache_descr);
+extern void rrdeng_page_descr_mutex_lock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+extern void rrdeng_page_descr_mutex_unlock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+extern void rrdeng_try_deallocate_pg_cache_descr(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr);
+
+#endif /* NETDATA_RRDENGLOCKING_H */ \ No newline at end of file
diff --git a/database/rrd.c b/database/rrd.c
new file mode 100644
index 0000000..dcab651
--- /dev/null
+++ b/database/rrd.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#define NETDATA_RRD_INTERNALS 1
+
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// globals
+
+/*
+// if not zero it gives the time (in seconds) to remove un-updated dimensions
+// DO NOT ENABLE
+// if dimensions are removed, the chart generation will have to run again
+int rrd_delete_unupdated_dimensions = 0;
+*/
+
+int default_rrd_update_every = UPDATE_EVERY;
+int default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES;
+#ifdef ENABLE_DBENGINE
+RRD_MEMORY_MODE default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+#else
+RRD_MEMORY_MODE default_rrd_memory_mode = RRD_MEMORY_MODE_SAVE;
+#endif
+int gap_when_lost_iterations_above = 1;
+
+
+// ----------------------------------------------------------------------------
+// RRD - memory modes
+
+inline const char *rrd_memory_mode_name(RRD_MEMORY_MODE id) {
+ switch(id) {
+ case RRD_MEMORY_MODE_RAM:
+ return RRD_MEMORY_MODE_RAM_NAME;
+
+ case RRD_MEMORY_MODE_MAP:
+ return RRD_MEMORY_MODE_MAP_NAME;
+
+ case RRD_MEMORY_MODE_NONE:
+ return RRD_MEMORY_MODE_NONE_NAME;
+
+ case RRD_MEMORY_MODE_SAVE:
+ return RRD_MEMORY_MODE_SAVE_NAME;
+
+ case RRD_MEMORY_MODE_ALLOC:
+ return RRD_MEMORY_MODE_ALLOC_NAME;
+
+ case RRD_MEMORY_MODE_DBENGINE:
+ return RRD_MEMORY_MODE_DBENGINE_NAME;
+ }
+
+ return RRD_MEMORY_MODE_SAVE_NAME;
+}
+
+RRD_MEMORY_MODE rrd_memory_mode_id(const char *name) {
+ if(unlikely(!strcmp(name, RRD_MEMORY_MODE_RAM_NAME)))
+ return RRD_MEMORY_MODE_RAM;
+
+ else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_MAP_NAME)))
+ return RRD_MEMORY_MODE_MAP;
+
+ else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_NONE_NAME)))
+ return RRD_MEMORY_MODE_NONE;
+
+ else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_ALLOC_NAME)))
+ return RRD_MEMORY_MODE_ALLOC;
+
+ else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_DBENGINE_NAME)))
+ return RRD_MEMORY_MODE_DBENGINE;
+
+ return RRD_MEMORY_MODE_SAVE;
+}
+
+
+// ----------------------------------------------------------------------------
+// RRD - algorithms types
+
+RRD_ALGORITHM rrd_algorithm_id(const char *name) {
+ if(strcmp(name, RRD_ALGORITHM_INCREMENTAL_NAME) == 0)
+ return RRD_ALGORITHM_INCREMENTAL;
+
+ else if(strcmp(name, RRD_ALGORITHM_ABSOLUTE_NAME) == 0)
+ return RRD_ALGORITHM_ABSOLUTE;
+
+ else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME) == 0)
+ return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL;
+
+ else if(strcmp(name, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME) == 0)
+ return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL;
+
+ else
+ return RRD_ALGORITHM_ABSOLUTE;
+}
+
+const char *rrd_algorithm_name(RRD_ALGORITHM algorithm) {
+ switch(algorithm) {
+ case RRD_ALGORITHM_ABSOLUTE:
+ default:
+ return RRD_ALGORITHM_ABSOLUTE_NAME;
+
+ case RRD_ALGORITHM_INCREMENTAL:
+ return RRD_ALGORITHM_INCREMENTAL_NAME;
+
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME;
+
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME;
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// RRD - chart types
+
+inline RRDSET_TYPE rrdset_type_id(const char *name) {
+ if(unlikely(strcmp(name, RRDSET_TYPE_AREA_NAME) == 0))
+ return RRDSET_TYPE_AREA;
+
+ else if(unlikely(strcmp(name, RRDSET_TYPE_STACKED_NAME) == 0))
+ return RRDSET_TYPE_STACKED;
+
+ else // if(unlikely(strcmp(name, RRDSET_TYPE_LINE_NAME) == 0))
+ return RRDSET_TYPE_LINE;
+}
+
+const char *rrdset_type_name(RRDSET_TYPE chart_type) {
+ switch(chart_type) {
+ case RRDSET_TYPE_LINE:
+ default:
+ return RRDSET_TYPE_LINE_NAME;
+
+ case RRDSET_TYPE_AREA:
+ return RRDSET_TYPE_AREA_NAME;
+
+ case RRDSET_TYPE_STACKED:
+ return RRDSET_TYPE_STACKED_NAME;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRD - cache directory
+
+char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section) {
+ char *ret = NULL;
+
+ char b[FILENAME_MAX + 1];
+ char n[FILENAME_MAX + 1];
+ rrdset_strncpyz_name(b, id, FILENAME_MAX);
+
+ snprintfz(n, FILENAME_MAX, "%s/%s", host->cache_dir, b);
+ ret = config_get(config_section, "cache directory", n);
+
+ if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) {
+ int r = mkdir(ret, 0775);
+ if(r != 0 && errno != EEXIST)
+ error("Cannot create directory '%s'", ret);
+ }
+
+ return ret;
+}
+
diff --git a/database/rrd.h b/database/rrd.h
new file mode 100644
index 0000000..b16fcdc
--- /dev/null
+++ b/database/rrd.h
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRD_H
+#define NETDATA_RRD_H 1
+
+// forward typedefs
+typedef struct rrdhost RRDHOST;
+typedef struct rrddim RRDDIM;
+typedef struct rrdset RRDSET;
+typedef struct rrdvar RRDVAR;
+typedef struct rrdsetvar RRDSETVAR;
+typedef struct rrddimvar RRDDIMVAR;
+typedef struct rrdcalc RRDCALC;
+typedef struct rrdcalctemplate RRDCALCTEMPLATE;
+typedef struct alarm_entry ALARM_ENTRY;
+typedef struct context_param CONTEXT_PARAM;
+
+// forward declarations
+struct rrddim_volatile;
+struct rrdset_volatile;
+struct context_param;
+struct label;
+#ifdef ENABLE_DBENGINE
+struct rrdeng_page_descr;
+struct rrdengine_instance;
+struct pg_cache_page_index;
+#endif
+
+#include "../daemon/common.h"
+#include "web/api/queries/query.h"
+#include "rrdvar.h"
+#include "rrdsetvar.h"
+#include "rrddimvar.h"
+#include "rrdcalc.h"
+#include "rrdcalctemplate.h"
+#include "../streaming/rrdpush.h"
+#include "../aclk/legacy/aclk_rrdhost_state.h"
+
+struct context_param {
+ RRDDIM *rd;
+ time_t first_entry_t;
+ time_t last_entry_t;
+};
+
+#define META_CHART_UPDATED 1
+#define META_PLUGIN_UPDATED 2
+#define META_MODULE_UPDATED 4
+#define META_CHART_ACTIVATED 8
+
+#define UPDATE_EVERY 1
+#define UPDATE_EVERY_MAX 3600
+
+#define RRD_DEFAULT_HISTORY_ENTRIES 3600
+#define RRD_HISTORY_ENTRIES_MAX (86400*365)
+
+extern int default_rrd_update_every;
+extern int default_rrd_history_entries;
+extern int gap_when_lost_iterations_above;
+extern time_t rrdset_free_obsolete_time;
+
+#define RRD_ID_LENGTH_MAX 200
+
+#define RRDSET_MAGIC "NETDATA RRD SET FILE V019"
+#define RRDDIMENSION_MAGIC "NETDATA RRD DIMENSION FILE V019"
+
+typedef long long total_number;
+#define TOTAL_NUMBER_FORMAT "%lld"
+
+// ----------------------------------------------------------------------------
+// chart types
+
+typedef enum rrdset_type {
+ RRDSET_TYPE_LINE = 0,
+ RRDSET_TYPE_AREA = 1,
+ RRDSET_TYPE_STACKED = 2
+} RRDSET_TYPE;
+
+#define RRDSET_TYPE_LINE_NAME "line"
+#define RRDSET_TYPE_AREA_NAME "area"
+#define RRDSET_TYPE_STACKED_NAME "stacked"
+
+RRDSET_TYPE rrdset_type_id(const char *name);
+const char *rrdset_type_name(RRDSET_TYPE chart_type);
+
+
+// ----------------------------------------------------------------------------
+// memory mode
+
+typedef enum rrd_memory_mode {
+ RRD_MEMORY_MODE_NONE = 0,
+ RRD_MEMORY_MODE_RAM = 1,
+ RRD_MEMORY_MODE_MAP = 2,
+ RRD_MEMORY_MODE_SAVE = 3,
+ RRD_MEMORY_MODE_ALLOC = 4,
+ RRD_MEMORY_MODE_DBENGINE = 5
+} RRD_MEMORY_MODE;
+
+#define RRD_MEMORY_MODE_NONE_NAME "none"
+#define RRD_MEMORY_MODE_RAM_NAME "ram"
+#define RRD_MEMORY_MODE_MAP_NAME "map"
+#define RRD_MEMORY_MODE_SAVE_NAME "save"
+#define RRD_MEMORY_MODE_ALLOC_NAME "alloc"
+#define RRD_MEMORY_MODE_DBENGINE_NAME "dbengine"
+
+extern RRD_MEMORY_MODE default_rrd_memory_mode;
+
+extern const char *rrd_memory_mode_name(RRD_MEMORY_MODE id);
+extern RRD_MEMORY_MODE rrd_memory_mode_id(const char *name);
+
+
+// ----------------------------------------------------------------------------
+// algorithms types
+
+typedef enum rrd_algorithm {
+ RRD_ALGORITHM_ABSOLUTE = 0,
+ RRD_ALGORITHM_INCREMENTAL = 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL = 2,
+ RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL = 3
+} RRD_ALGORITHM;
+
+#define RRD_ALGORITHM_ABSOLUTE_NAME "absolute"
+#define RRD_ALGORITHM_INCREMENTAL_NAME "incremental"
+#define RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME "percentage-of-incremental-row"
+#define RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME "percentage-of-absolute-row"
+
+extern RRD_ALGORITHM rrd_algorithm_id(const char *name);
+extern const char *rrd_algorithm_name(RRD_ALGORITHM algorithm);
+
+// ----------------------------------------------------------------------------
+// RRD FAMILY
+
+struct rrdfamily {
+ avl avl;
+
+ const char *family;
+ uint32_t hash_family;
+
+ size_t use_count;
+
+ avl_tree_lock rrdvar_root_index;
+};
+typedef struct rrdfamily RRDFAMILY;
+
+
+// ----------------------------------------------------------------------------
+// flags
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrddim_flags {
+ RRDDIM_FLAG_NONE = 0,
+ RRDDIM_FLAG_HIDDEN = (1 << 0), // this dimension will not be offered to callers
+ RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1), // do not offer RESET or OVERFLOW info to callers
+ RRDDIM_FLAG_OBSOLETE = (1 << 2), // this is marked by the collector/module as obsolete
+ // No new values have been collected for this dimension since agent start or it was marked RRDDIM_FLAG_OBSOLETE at
+ // least rrdset_free_obsolete_time seconds ago.
+ RRDDIM_FLAG_ARCHIVED = (1 << 3)
+} RRDDIM_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST)
+#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST)
+#else
+#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag))
+#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag)
+#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag)
+#endif
+
+typedef enum label_source {
+ LABEL_SOURCE_AUTO = 0,
+ LABEL_SOURCE_NETDATA_CONF = 1,
+ LABEL_SOURCE_DOCKER = 2,
+ LABEL_SOURCE_ENVIRONMENT = 3,
+ LABEL_SOURCE_KUBERNETES = 4
+} LABEL_SOURCE;
+
+#define LABEL_FLAG_UPDATE_STREAM 1
+#define LABEL_FLAG_STOP_STREAM 2
+
+struct label {
+ char *key, *value;
+ uint32_t key_hash;
+ LABEL_SOURCE label_source;
+ struct label *next;
+};
+
+struct label_index {
+ struct label *head; // Label list
+ netdata_rwlock_t labels_rwlock; // lock for the label list
+ uint32_t labels_flag; // Flags for labels
+};
+
+typedef enum strip_quotes {
+ DO_NOT_STRIP_QUOTES,
+ STRIP_QUOTES
+} STRIP_QUOTES_OPTION;
+
+typedef enum skip_escaped_characters {
+ DO_NOT_SKIP_ESCAPED_CHARACTERS,
+ SKIP_ESCAPED_CHARACTERS
+} SKIP_ESCAPED_CHARACTERS_OPTION;
+
+char *translate_label_source(LABEL_SOURCE l);
+struct label *create_label(char *key, char *value, LABEL_SOURCE label_source);
+extern struct label *add_label_to_list(struct label *l, char *key, char *value, LABEL_SOURCE label_source);
+extern void update_label_list(struct label **labels, struct label *new_labels);
+extern void replace_label_list(struct label_index *labels, struct label *new_labels);
+extern int is_valid_label_value(char *value);
+extern int is_valid_label_key(char *key);
+extern void free_label_list(struct label *labels);
+extern struct label *label_list_lookup_key(struct label *head, char *key, uint32_t key_hash);
+extern struct label *label_list_lookup_keylist(struct label *head, char *keylist);
+extern int label_list_contains_keylist(struct label *head, char *keylist);
+extern int label_list_contains_key(struct label *head, char *key, uint32_t key_hash);
+extern int label_list_contains(struct label *head, struct label *check);
+extern struct label *merge_label_lists(struct label *lo_pri, struct label *hi_pri);
+extern void strip_last_symbol(
+ char *str,
+ char symbol,
+ SKIP_ESCAPED_CHARACTERS_OPTION skip_escaped_characters);
+extern char *strip_double_quotes(char *str, SKIP_ESCAPED_CHARACTERS_OPTION skip_escaped_characters);
+void reload_host_labels(void);
+extern void rrdset_add_label_to_new_list(RRDSET *st, char *key, char *value, LABEL_SOURCE source);
+extern void rrdset_finalize_labels(RRDSET *st);
+extern void rrdset_update_labels(RRDSET *st, struct label *labels);
+extern int rrdset_contains_label_keylist(RRDSET *st, char *key);
+extern struct label *rrdset_lookup_label_key(RRDSET *st, char *key, uint32_t key_hash);
+
+// ----------------------------------------------------------------------------
+// RRD DIMENSION - this is a metric
+
+struct rrddim {
+ // ------------------------------------------------------------------------
+ // binary indexing structures
+
+ avl avl; // the binary index - this has to be first member!
+
+ // ------------------------------------------------------------------------
+ // the dimension definition
+
+ const char *id; // the id of this dimension (for internal identification)
+ const char *name; // the name of this dimension (as presented to user)
+ // this is a pointer to the config structure
+ // since the config always has a higher priority
+ // (the user overwrites the name of the charts)
+ // DO NOT FREE THIS - IT IS ALLOCATED IN CONFIG
+
+ RRD_ALGORITHM algorithm; // the algorithm that is applied to add new collected values
+ RRD_MEMORY_MODE rrd_memory_mode; // the memory mode for this dimension
+
+ collected_number multiplier; // the multiplier of the collected values
+ collected_number divisor; // the divider of the collected values
+
+ uint32_t flags; // configuration flags for the dimension
+
+ // ------------------------------------------------------------------------
+ // members for temporary data we need for calculations
+
+ uint32_t hash; // a simple hash of the id, to speed up searching / indexing
+ // instead of strcmp() every item in the binary index
+ // we first compare the hashes
+
+ uint32_t hash_name; // a simple hash of the name
+
+ char *cache_filename; // the filename we load/save from/to this set
+
+ size_t collections_counter; // the number of times we added values to this rrdim
+ struct rrddim_volatile *state; // volatile state that is not persistently stored
+ size_t unused[8];
+
+ collected_number collected_value_max; // the absolute maximum of the collected value
+
+ unsigned int updated:1; // 1 when the dimension has been updated since the last processing
+ unsigned int exposed:1; // 1 when set what have sent this dimension to the central netdata
+
+ struct timeval last_collected_time; // when was this dimension last updated
+ // this is actual date time we updated the last_collected_value
+ // THIS IS DIFFERENT FROM THE SAME MEMBER OF RRDSET
+
+ calculated_number calculated_value; // the current calculated value, after applying the algorithm - resets to zero after being used
+ calculated_number last_calculated_value; // the last calculated value processed
+
+ calculated_number last_stored_value; // the last value as stored in the database (after interpolation)
+
+ collected_number collected_value; // the current value, as collected - resets to 0 after being used
+ collected_number last_collected_value; // the last value that was collected, after being processed
+
+ // the *_volume members are used to calculate the accuracy of the rounding done by the
+ // storage number - they are printed to debug.log when debug is enabled for a set.
+ calculated_number collected_volume; // the sum of all collected values so far
+ calculated_number stored_volume; // the sum of all stored values so far
+
+ struct rrddim *next; // linking of dimensions within the same data set
+ struct rrdset *rrdset;
+
+ // ------------------------------------------------------------------------
+ // members for checking the data when loading from disk
+
+ long entries; // how many entries this dimension has in ram
+ // this is the same to the entries of the data set
+ // we set it here, to check the data when we load it from disk.
+
+ int update_every; // every how many seconds is this updated
+
+ size_t memsize; // the memory allocated for this dimension
+
+ char magic[sizeof(RRDDIMENSION_MAGIC) + 1]; // a string to be saved, used to identify our data file
+
+ struct rrddimvar *variables;
+
+ // ------------------------------------------------------------------------
+ // the values stored in this dimension, using our floating point numbers
+
+ storage_number values[]; // the array of values - THIS HAS TO BE THE LAST MEMBER
+};
+
+// ----------------------------------------------------------------------------
+// iterator state for RRD dimension data collection
+union rrddim_collect_handle {
+ struct {
+ long slot;
+ long entries;
+ } slotted; // state the legacy code uses
+#ifdef ENABLE_DBENGINE
+ struct rrdeng_collect_handle {
+ struct rrdeng_page_descr *descr, *prev_descr;
+ unsigned long page_correlation_id;
+ struct rrdengine_instance *ctx;
+ // set to 1 when this dimension is not page aligned with the other dimensions in the chart
+ uint8_t unaligned_page;
+ } rrdeng; // state the database engine uses
+#endif
+};
+
+// ----------------------------------------------------------------------------
+// iterator state for RRD dimension data queries
+struct rrddim_query_handle {
+ RRDDIM *rd;
+ time_t start_time;
+ time_t end_time;
+ union {
+ struct {
+ long slot;
+ long last_slot;
+ uint8_t finished;
+ } slotted; // state the legacy code uses
+#ifdef ENABLE_DBENGINE
+ struct rrdeng_query_handle {
+ struct rrdeng_page_descr *descr;
+ struct rrdengine_instance *ctx;
+ struct pg_cache_page_index *page_index;
+ time_t next_page_time;
+ time_t now;
+ unsigned position;
+ } rrdeng; // state the database engine uses
+#endif
+ };
+};
+
+
+// ----------------------------------------------------------------------------
+// volatile state per RRD dimension
+struct rrddim_volatile {
+#ifdef ENABLE_DBENGINE
+ uuid_t *rrdeng_uuid; // database engine metric UUID
+ uuid_t *metric_uuid; // global UUID for this metric (unique_across hosts)
+ struct pg_cache_page_index *page_index;
+#endif
+ union rrddim_collect_handle handle;
+ // ------------------------------------------------------------------------
+ // function pointers that handle data collection
+ struct rrddim_collect_ops {
+ // an initialization function to run before starting collection
+ void (*init)(RRDDIM *rd);
+
+ // run this to store each metric into the database
+ void (*store_metric)(RRDDIM *rd, usec_t point_in_time, storage_number number);
+
+ // an finalization function to run after collection is over
+ // returns 1 if it's safe to delete the dimension
+ int (*finalize)(RRDDIM *rd);
+ } collect_ops;
+
+ // function pointers that handle database queries
+ struct rrddim_query_ops {
+ // run this before starting a series of next_metric() database queries
+ void (*init)(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
+
+ // run this to load each metric number from the database
+ storage_number (*next_metric)(struct rrddim_query_handle *handle, time_t *current_time);
+
+ // run this to test if the series of next_metric() database queries is finished
+ int (*is_finished)(struct rrddim_query_handle *handle);
+
+ // run this after finishing a series of load_metric() database queries
+ void (*finalize)(struct rrddim_query_handle *handle);
+
+ // get the timestamp of the last entry of this metric
+ time_t (*latest_time)(RRDDIM *rd);
+
+ // get the timestamp of the first entry of this metric
+ time_t (*oldest_time)(RRDDIM *rd);
+ } query_ops;
+};
+
+// ----------------------------------------------------------------------------
+// volatile state per chart
+struct rrdset_volatile {
+ char *old_title;
+ char *old_context;
+ struct label *new_labels;
+ struct label_index labels;
+};
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrddim_foreach_read(rd, st) \
+ for((rd) = (st)->dimensions, rrdset_check_rdlock(st); (rd) ; (rd) = (rd)->next)
+
+#define rrddim_foreach_write(rd, st) \
+ for((rd) = (st)->dimensions, rrdset_check_wrlock(st); (rd) ; (rd) = (rd)->next)
+
+
+// ----------------------------------------------------------------------------
+// RRDSET - this is a chart
+
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrdset_flags {
+ RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
+ RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
+ // (the master data set should be the one that has the same family and is not detail)
+ RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
+ RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
+ RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
+ RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
+ RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
+ RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
+ RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata parent (streaming)
+ RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
+ RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
+ RRDSET_FLAG_HOMOGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions are homogeneous
+ RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
+ RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
+ RRDSET_FLAG_OBSOLETE_DIMENSIONS = 1 << 14, // this is marked by the collector/module when a chart has obsolete dimensions
+ // No new values have been collected for this chart since agent start or it was marked RRDSET_FLAG_OBSOLETE at
+ // least rrdset_free_obsolete_time seconds ago.
+ RRDSET_FLAG_ARCHIVED = 1 << 15,
+ RRDSET_FLAG_ACLK = 1 << 16
+} RRDSET_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
+#define rrdset_flag_check(st, flag) ((st)->flags & (flag))
+#define rrdset_flag_set(st, flag) (st)->flags |= (flag)
+#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag)
+#endif
+#define rrdset_flag_check_noatomic(st, flag) ((st)->flags & (flag))
+
+struct rrdset {
+ // ------------------------------------------------------------------------
+ // binary indexing structures
+
+ avl avl; // the index, with key the id - this has to be first!
+ avl avlname; // the index, with key the name
+
+ // ------------------------------------------------------------------------
+ // the set configuration
+
+ char id[RRD_ID_LENGTH_MAX + 1]; // id of the data set
+
+ const char *name; // the name of this dimension (as presented to user)
+ // this is a pointer to the config structure
+ // since the config always has a higher priority
+ // (the user overwrites the name of the charts)
+
+ char *config_section; // the config section for the chart
+
+ char *type; // the type of graph RRD_TYPE_* (a category, for determining graphing options)
+ char *family; // grouping sets under the same family
+ char *title; // title shown to user
+ char *units; // units of measurement
+
+ char *context; // the template of this data set
+ uint32_t hash_context; // the hash of the chart's context
+
+ RRDSET_TYPE chart_type; // line, area, stacked
+
+ int update_every; // every how many seconds is this updated?
+
+ long entries; // total number of entries in the data set
+
+ long current_entry; // the entry that is currently being updated
+ // it goes around in a round-robin fashion
+
+ RRDSET_FLAGS flags; // configuration flags
+ RRDSET_FLAGS *exporting_flags; // array of flags for exporting connector instances
+
+ int gap_when_lost_iterations_above; // after how many lost iterations a gap should be stored
+ // netdata will interpolate values for gaps lower than this
+
+ long priority; // the sorting priority of this chart
+
+
+ // ------------------------------------------------------------------------
+ // members for temporary data we need for calculations
+
+ RRD_MEMORY_MODE rrd_memory_mode; // if set to 1, this is memory mapped
+
+ char *cache_dir; // the directory to store dimensions
+ char cache_filename[FILENAME_MAX+1]; // the filename to store this set
+
+ netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list
+
+ size_t counter; // the number of times we added values to this database
+ size_t counter_done; // the number of times rrdset_done() has been called
+
+ time_t last_accessed_time; // the last time this RRDSET has been accessed
+ time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream
+
+ char *plugin_name; // the name of the plugin that generated this
+ char *module_name; // the name of the plugin module that generated this
+ uuid_t *chart_uuid; // Store the global GUID for this chart
+ // this object.
+ struct rrdset_volatile *state; // volatile state that is not persistently stored
+ size_t unused[3];
+
+ size_t rrddim_page_alignment; // keeps metric pages in alignment when using dbengine
+
+ uint32_t hash; // a simple hash on the id, to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ uint32_t hash_name; // a simple hash on the name
+
+ usec_t usec_since_last_update; // the time in microseconds since the last collection of data
+
+ struct timeval last_updated; // when this data set was last updated (updated every time the rrd_stats_done() function)
+ struct timeval last_collected_time; // when did this data set last collected values
+
+ total_number collected_total; // used internally to calculate percentages
+ total_number last_collected_total; // used internally to calculate percentages
+
+ RRDFAMILY *rrdfamily; // pointer to RRDFAMILY this chart belongs to
+ RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to
+
+ struct rrdset *next; // linking of rrdsets
+
+ // ------------------------------------------------------------------------
+ // local variables
+
+ calculated_number green; // green threshold for this chart
+ calculated_number red; // red threshold for this chart
+
+ avl_tree_lock rrdvar_root_index; // RRDVAR index for this chart
+ RRDSETVAR *variables; // RRDSETVAR linked list for this chart (one RRDSETVAR, many RRDVARs)
+ RRDCALC *alarms; // RRDCALC linked list for this chart
+
+ // ------------------------------------------------------------------------
+ // members for checking the data when loading from disk
+
+ unsigned long memsize; // how much mem we have allocated for this (without dimensions)
+
+ char magic[sizeof(RRDSET_MAGIC) + 1]; // our magic
+
+ // ------------------------------------------------------------------------
+ // the dimensions
+
+ avl_tree_lock dimensions_index; // the root of the dimensions index
+ RRDDIM *dimensions; // the actual data for every dimension
+
+};
+
+#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock))
+#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock))
+#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock))
+
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrdset_foreach_read(st, host) \
+ for((st) = (host)->rrdset_root, rrdhost_check_rdlock(host); st ; (st) = (st)->next)
+
+#define rrdset_foreach_write(st, host) \
+ for((st) = (host)->rrdset_root, rrdhost_check_wrlock(host); st ; (st) = (st)->next)
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST flags
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrdhost_flags {
+ RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
+ RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
+ RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan
+ RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends
+ RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends
+ RRDHOST_FLAG_ARCHIVED = 1 << 5, // The host is archived, no collected charts yet
+ RRDHOST_FLAG_MULTIHOST = 1 << 6, // Host belongs to localhost/megadb
+} RRDHOST_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
+#define rrdhost_flag_check(host, flag) ((host)->flags & (flag))
+#define rrdhost_flag_set(host, flag) (host)->flags |= (flag)
+#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag)
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \
+ debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0)
+#else
+#define rrdset_debug(st, fmt, args...) debug_dummy()
+#endif
+
+// ----------------------------------------------------------------------------
+// Health data
+
+struct alarm_entry {
+ uint32_t unique_id;
+ uint32_t alarm_id;
+ uint32_t alarm_event_id;
+
+ time_t when;
+ time_t duration;
+ time_t non_clear_duration;
+
+ char *name;
+ uint32_t hash_name;
+
+ char *chart;
+ uint32_t hash_chart;
+
+ char *family;
+
+ char *exec;
+ char *recipient;
+ time_t exec_run_timestamp;
+ int exec_code;
+ uint64_t exec_spawn_serial;
+
+ char *source;
+ char *units;
+ char *info;
+
+ calculated_number old_value;
+ calculated_number new_value;
+
+ char *old_value_string;
+ char *new_value_string;
+
+ RRDCALC_STATUS old_status;
+ RRDCALC_STATUS new_status;
+
+ uint32_t flags;
+
+ int delay;
+ time_t delay_up_to_timestamp;
+
+ uint32_t updated_by_id;
+ uint32_t updates_id;
+
+ time_t last_repeat;
+
+ struct alarm_entry *next;
+ struct alarm_entry *next_in_progress;
+ struct alarm_entry *prev_in_progress;
+};
+
+
+typedef struct alarm_log {
+ uint32_t next_log_id;
+ uint32_t next_alarm_id;
+ unsigned int count;
+ unsigned int max;
+ ALARM_ENTRY *alarms;
+ netdata_rwlock_t alarm_log_rwlock;
+} ALARM_LOG;
+
+
+// ----------------------------------------------------------------------------
+// RRD HOST
+
+struct rrdhost_system_info {
+ char *host_os_name;
+ char *host_os_id;
+ char *host_os_id_like;
+ char *host_os_version;
+ char *host_os_version_id;
+ char *host_os_detection;
+ char *host_cores;
+ char *host_cpu_freq;
+ char *host_ram_total;
+ char *host_disk_space;
+ char *container_os_name;
+ char *container_os_id;
+ char *container_os_id_like;
+ char *container_os_version;
+ char *container_os_version_id;
+ char *container_os_detection;
+ char *kernel_name;
+ char *kernel_version;
+ char *architecture;
+ char *virtualization;
+ char *virt_detection;
+ char *container;
+ char *container_detection;
+ char *is_k8s_node;
+};
+
+struct rrdhost {
+ avl avl; // the index of hosts
+
+ // ------------------------------------------------------------------------
+ // host information
+
+ char *hostname; // the hostname of this host
+ uint32_t hash_hostname; // the hostname hash
+
+ char *registry_hostname; // the registry hostname for this host
+
+ char machine_guid[GUID_LEN + 1]; // the unique ID of this host
+ uint32_t hash_machine_guid; // the hash of the unique ID
+
+ const char *os; // the O/S type of the host
+ const char *tags; // tags for this host
+ const char *timezone; // the timezone of the host
+
+ RRDHOST_FLAGS flags; // flags about this RRDHOST
+ RRDHOST_FLAGS *exporting_flags; // array of flags for exporting connector instances
+
+ int rrd_update_every; // the update frequency of the host
+ long rrd_history_entries; // the number of history entries for the host's charts
+ RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host
+
+ char *cache_dir; // the directory to save RRD cache files
+ char *varlib_dir; // the directory to save health log
+
+ char *program_name; // the program name that collects metrics for this host
+ char *program_version; // the program version that collects metrics for this host
+
+ struct rrdhost_system_info *system_info; // information collected from the host environment
+
+ // ------------------------------------------------------------------------
+ // streaming of data to remote hosts - rrdpush
+
+ unsigned int rrdpush_send_enabled:1; // 1 when this host sends metrics to another netdata
+ char *rrdpush_send_destination; // where to send metrics to
+ char *rrdpush_send_api_key; // the api key at the receiving netdata
+
+ // the following are state information for the threading
+ // streaming metrics from this netdata to an upstream netdata
+ struct sender_state *sender;
+ volatile unsigned int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn
+ netdata_thread_t rrdpush_sender_thread; // the sender thread
+
+ volatile unsigned int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics
+ int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1
+
+ volatile unsigned int rrdpush_sender_error_shown:1; // 1 when we have logged a communication error
+ volatile unsigned int rrdpush_sender_join:1; // 1 when we have to join the sending thread
+
+ SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent
+
+ int rrdpush_sender_pipe[2]; // collector to sender thread signaling
+ //BUFFER *rrdpush_sender_buffer; // collector fills it, sender sends it
+
+ //uint32_t stream_version; //Set the current version of the stream.
+
+ // ------------------------------------------------------------------------
+ // streaming of data from remote hosts - rrdpush
+
+ volatile size_t connected_senders; // when remote hosts are streaming to this
+ // host, this is the counter of connected clients
+
+ time_t senders_disconnected_time; // the time the last sender was disconnected
+
+ struct receiver_state *receiver;
+ netdata_mutex_t receiver_lock;
+
+ // ------------------------------------------------------------------------
+ // health monitoring options
+
+ unsigned int health_enabled:1; // 1 when this host has health enabled
+ time_t health_delay_up_to; // a timestamp to delay alarms processing up to
+ char *health_default_exec; // the full path of the alarms notifications program
+ char *health_default_recipient; // the default recipient for all alarms
+ char *health_log_filename; // the alarms event log filename
+ size_t health_log_entries_written; // the number of alarm events writtern to the alarms event log
+ FILE *health_log_fp; // the FILE pointer to the open alarms event log file
+ uint32_t health_default_warn_repeat_every; // the default value for the interval between repeating warning notifications
+ uint32_t health_default_crit_repeat_every; // the default value for the interval between repeating critical notifications
+
+
+ // all RRDCALCs are primarily allocated and linked here
+ // RRDCALCs may be linked to charts at any point
+ // (charts may or may not exist when these are loaded)
+ RRDCALC *alarms;
+ RRDCALC *alarms_with_foreach;
+ avl_tree_lock alarms_idx_health_log;
+ avl_tree_lock alarms_idx_name;
+
+ ALARM_LOG health_log; // alarms historical events (event log)
+ uint32_t health_last_processed_id; // the last processed health id from the log
+ uint32_t health_max_unique_id; // the max alarm log unique id given for the host
+ uint32_t health_max_alarm_id; // the max alarm id given for the host
+
+ // templates of alarms
+ // these are used to create alarms when charts
+ // are created or renamed, that match them
+ RRDCALCTEMPLATE *templates;
+ RRDCALCTEMPLATE *alarms_template_with_foreach;
+
+
+ // ------------------------------------------------------------------------
+ // the charts of the host
+
+ RRDSET *rrdset_root; // the host charts
+
+
+ // ------------------------------------------------------------------------
+ // locks
+
+ netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list)
+
+ // ------------------------------------------------------------------------
+ // Support for host-level labels
+ struct label_index labels;
+
+ // ------------------------------------------------------------------------
+ // indexes
+
+ avl_tree_lock rrdset_root_index; // the host's charts index (by id)
+ avl_tree_lock rrdset_root_index_name; // the host's charts index (by name)
+
+ avl_tree_lock rrdfamily_root_index; // the host's chart families index
+ avl_tree_lock rrdvar_root_index; // the host's chart variables index
+
+#ifdef ENABLE_DBENGINE
+ struct rrdengine_instance *rrdeng_ctx; // DB engine instance for this host
+ uuid_t host_uuid; // Global GUID for this host
+#endif
+
+#ifdef ENABLE_HTTPS
+ struct netdata_ssl ssl; //Structure used to encrypt the connection
+ struct netdata_ssl stream_ssl; //Structure used to encrypt the stream
+#endif
+
+ netdata_mutex_t aclk_state_lock;
+ aclk_rrdhost_state aclk_state;
+
+ struct rrdhost *next;
+};
+extern RRDHOST *localhost;
+
+#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock))
+#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock))
+#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock))
+
+#define rrdhost_aclk_state_lock(host) netdata_mutex_lock(&((host)->aclk_state_lock))
+#define rrdhost_aclk_state_unlock(host) netdata_mutex_unlock(&((host)->aclk_state_lock))
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrdhost_foreach_read(var) \
+ for((var) = localhost, rrd_check_rdlock(); var ; (var) = (var)->next)
+
+#define rrdhost_foreach_write(var) \
+ for((var) = localhost, rrd_check_wrlock(); var ; (var) = (var)->next)
+
+
+// ----------------------------------------------------------------------------
+// global lock for all RRDHOSTs
+
+extern netdata_rwlock_t rrd_rwlock;
+
+#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock)
+#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock)
+#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock)
+
+// ----------------------------------------------------------------------------
+
+extern size_t rrd_hosts_available;
+extern time_t rrdhost_free_orphan_time;
+
+extern int rrd_init(char *hostname, struct rrdhost_system_info *system_info);
+
+extern RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash);
+extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash);
+
+extern RRDHOST *rrdhost_find_or_create(
+ const char *hostname
+ , const char *registry_hostname
+ , const char *guid
+ , const char *os
+ , const char *timezone
+ , const char *tags
+ , const char *program_name
+ , const char *program_version
+ , int update_every
+ , long history
+ , RRD_MEMORY_MODE mode
+ , unsigned int health_enabled
+ , unsigned int rrdpush_enabled
+ , char *rrdpush_destination
+ , char *rrdpush_api_key
+ , char *rrdpush_send_charts_matching
+ , struct rrdhost_system_info *system_info
+);
+
+extern void rrdhost_update(RRDHOST *host
+ , const char *hostname
+ , const char *registry_hostname
+ , const char *guid
+ , const char *os
+ , const char *timezone
+ , const char *tags
+ , const char *program_name
+ , const char *program_version
+ , int update_every
+ , long history
+ , RRD_MEMORY_MODE mode
+ , unsigned int health_enabled
+ , unsigned int rrdpush_enabled
+ , char *rrdpush_destination
+ , char *rrdpush_api_key
+ , char *rrdpush_send_charts_matching
+ , struct rrdhost_system_info *system_info
+);
+
+extern int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, char *name, char *value);
+
+#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS)
+extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line);
+
+#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__)
+
+#else
+#define rrdhost_check_rdlock(host) (void)0
+#define rrdhost_check_wrlock(host) (void)0
+#define rrdset_check_rdlock(st) (void)0
+#define rrdset_check_wrlock(st) (void)0
+#define rrd_check_rdlock() (void)0
+#define rrd_check_wrlock() (void)0
+#endif
+
+// ----------------------------------------------------------------------------
+// RRDSET functions
+
+extern int rrdset_set_name(RRDSET *st, const char *name);
+
+extern RRDSET *rrdset_create_custom(RRDHOST *host
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , const char *plugin
+ , const char *module
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+ , RRD_MEMORY_MODE memory_mode
+ , long history_entries);
+
+#define rrdset_create(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
+ rrdset_create_custom(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries)
+
+#define rrdset_create_localhost(type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
+ rrdset_create(localhost, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type)
+
+extern void rrdhost_free_all(void);
+extern void rrdhost_save_all(void);
+extern void rrdhost_cleanup_all(void);
+
+extern void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected);
+extern void rrdhost_system_info_free(struct rrdhost_system_info *system_info);
+extern void rrdhost_free(RRDHOST *host);
+extern void rrdhost_save_charts(RRDHOST *host);
+extern void rrdhost_delete_charts(RRDHOST *host);
+
+extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now);
+
+extern void rrdset_update_heterogeneous_flag(RRDSET *st);
+
+extern RRDSET *rrdset_find(RRDHOST *host, const char *id);
+#define rrdset_find_localhost(id) rrdset_find(localhost, id)
+/* This will not return charts that are archived */
+static inline RRDSET *rrdset_find_active_localhost(const char *id)
+{
+ RRDSET *st = rrdset_find_localhost(id);
+ if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED)))
+ return NULL;
+ return st;
+}
+
+extern RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id);
+#define rrdset_find_bytype_localhost(type, id) rrdset_find_bytype(localhost, type, id)
+/* This will not return charts that are archived */
+static inline RRDSET *rrdset_find_active_bytype_localhost(const char *type, const char *id)
+{
+ RRDSET *st = rrdset_find_bytype_localhost(type, id);
+ if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED)))
+ return NULL;
+ return st;
+}
+
+extern RRDSET *rrdset_find_byname(RRDHOST *host, const char *name);
+#define rrdset_find_byname_localhost(name) rrdset_find_byname(localhost, name)
+/* This will not return charts that are archived */
+static inline RRDSET *rrdset_find_active_byname_localhost(const char *name)
+{
+ RRDSET *st = rrdset_find_byname_localhost(name);
+ if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED)))
+ return NULL;
+ return st;
+}
+
+extern void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds);
+extern void rrdset_next_usec(RRDSET *st, usec_t microseconds);
+#define rrdset_next(st) rrdset_next_usec(st, 0ULL)
+
+extern void rrdset_done(RRDSET *st);
+
+extern void rrdset_is_obsolete(RRDSET *st);
+extern void rrdset_isnot_obsolete(RRDSET *st);
+
+// checks if the RRDSET should be offered to viewers
+#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE)
+#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && (st)->dimensions)
+#define rrdset_is_archived(st) (rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED) && (st)->dimensions)
+
+// get the total duration in seconds of the round robin database
+#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every ))
+
+// get the timestamp of the last entry in the round robin database
+static inline time_t rrdset_last_entry_t_nolock(RRDSET *st)
+{
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ RRDDIM *rd;
+ time_t last_entry_t = 0;
+
+ rrddim_foreach_read(rd, st) {
+ last_entry_t = MAX(last_entry_t, rd->state->query_ops.latest_time(rd));
+ }
+
+ return last_entry_t;
+ } else {
+ return (time_t)st->last_updated.tv_sec;
+ }
+}
+
+static inline time_t rrdset_last_entry_t(RRDSET *st)
+{
+ time_t last_entry_t;
+
+ netdata_rwlock_rdlock(&st->rrdset_rwlock);
+ last_entry_t = rrdset_last_entry_t_nolock(st);
+ netdata_rwlock_unlock(&st->rrdset_rwlock);
+
+ return last_entry_t;
+}
+
+// get the timestamp of first entry in the round robin database
+static inline time_t rrdset_first_entry_t_nolock(RRDSET *st)
+{
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ RRDDIM *rd;
+ time_t first_entry_t = LONG_MAX;
+
+ rrddim_foreach_read(rd, st) {
+ first_entry_t = MIN(first_entry_t, rd->state->query_ops.oldest_time(rd));
+ }
+
+ if (unlikely(LONG_MAX == first_entry_t)) return 0;
+ return first_entry_t;
+ } else {
+ return (time_t)(rrdset_last_entry_t_nolock(st) - rrdset_duration(st));
+ }
+}
+
+static inline time_t rrdset_first_entry_t(RRDSET *st)
+{
+ time_t first_entry_t;
+
+ netdata_rwlock_rdlock(&st->rrdset_rwlock);
+ first_entry_t = rrdset_first_entry_t_nolock(st);
+ netdata_rwlock_unlock(&st->rrdset_rwlock);
+
+ return first_entry_t;
+}
+
+// get the timestamp of the last entry in the round robin database
+static inline time_t rrddim_last_entry_t(RRDDIM *rd) {
+ if (rd->rrdset->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ return rd->state->query_ops.latest_time(rd);
+ return (time_t)rd->rrdset->last_updated.tv_sec;
+}
+
+static inline time_t rrddim_first_entry_t(RRDDIM *rd) {
+ if (rd->rrdset->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ return rd->state->query_ops.oldest_time(rd);
+ return (time_t)(rd->rrdset->last_updated.tv_sec - rrdset_duration(rd->rrdset));
+}
+
+time_t rrdhost_last_entry_t(RRDHOST *h);
+
+// get the last slot updated in the round robin database
+#define rrdset_last_slot(st) ((size_t)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1))
+
+// get the first / oldest slot updated in the round robin database
+// #define rrdset_first_slot(st) ((size_t)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) ))
+
+// return the slot that has the oldest value
+
+static inline size_t rrdset_first_slot(RRDSET *st) {
+ if(st->counter >= (size_t)st->entries) {
+ // the database has been rotated at least once
+ // the oldest entry is the one that will be next
+ // overwritten by data collection
+ return (size_t)st->current_entry;
+ }
+
+ // we do not have rotated the db yet
+ // so 0 is the first entry
+ return 0;
+}
+
+// get the slot of the round robin database, for the given timestamp (t)
+// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database
+// only valid when not using dbengine
+static inline size_t rrdset_time2slot(RRDSET *st, time_t t) {
+ size_t ret = 0;
+ time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+ time_t first_entry_t = rrdset_first_entry_t_nolock(st);
+
+ if(t >= last_entry_t) {
+ // the requested time is after the last entry we have
+ ret = rrdset_last_slot(st);
+ }
+ else {
+ if(t <= first_entry_t) {
+ // the requested time is before the first entry we have
+ ret = rrdset_first_slot(st);
+ }
+ else {
+ if(rrdset_last_slot(st) >= ((last_entry_t - t) / (size_t)(st->update_every)))
+ ret = rrdset_last_slot(st) - ((last_entry_t - t) / (size_t)(st->update_every));
+ else
+ ret = rrdset_last_slot(st) - ((last_entry_t - t) / (size_t)(st->update_every)) + (unsigned long)st->entries;
+ }
+ }
+
+ if(unlikely(ret >= (size_t)st->entries)) {
+ error("INTERNAL ERROR: rrdset_time2slot() on %s returns values outside entries", st->name);
+ ret = (size_t)(st->entries - 1);
+ }
+
+ return ret;
+}
+
+// get the timestamp of a specific slot in the round robin database
+// only valid when not using dbengine
+static inline time_t rrdset_slot2time(RRDSET *st, size_t slot) {
+ time_t ret;
+ time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+ time_t first_entry_t = rrdset_first_entry_t_nolock(st);
+
+ if(slot >= (size_t)st->entries) {
+ error("INTERNAL ERROR: caller of rrdset_slot2time() gives invalid slot %zu", slot);
+ slot = (size_t)st->entries - 1;
+ }
+
+ if(slot > rrdset_last_slot(st)) {
+ ret = last_entry_t - (size_t)st->update_every * (rrdset_last_slot(st) - slot + (size_t)st->entries);
+ }
+ else {
+ ret = last_entry_t - (size_t)st->update_every;
+ }
+
+ if(unlikely(ret < first_entry_t)) {
+ error("INTERNAL ERROR: rrdset_slot2time() on %s returns time too far in the past", st->name);
+ ret = first_entry_t;
+ }
+
+ if(unlikely(ret > last_entry_t)) {
+ error("INTERNAL ERROR: rrdset_slot2time() on %s returns time into the future", st->name);
+ ret = last_entry_t;
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// RRD DIMENSION functions
+
+extern void rrdcalc_link_to_rrddim(RRDDIM *rd, RRDSET *st, RRDHOST *host);
+extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier,
+ collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode);//,
+ //int is_archived, uuid_t *dim_uuid);
+#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, \
+ algorithm, (st)->rrd_memory_mode)//, 0, NULL)
+
+extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name);
+extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm);
+extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier);
+extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor);
+
+extern RRDDIM *rrddim_find(RRDSET *st, const char *id);
+/* This will not return dimensions that are archived */
+static inline RRDDIM *rrddim_find_active(RRDSET *st, const char *id)
+{
+ RRDDIM *rd = rrddim_find(st, id);
+ if (unlikely(rd && rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)))
+ return NULL;
+ return rd;
+}
+
+
+extern int rrddim_hide(RRDSET *st, const char *id);
+extern int rrddim_unhide(RRDSET *st, const char *id);
+
+extern void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd);
+extern void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd);
+
+extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value);
+extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value);
+
+extern long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries);
+
+// ----------------------------------------------------------------------------
+// Miscellaneous functions
+
+extern int alarm_compare_id(void *a, void *b);
+extern int alarm_compare_name(void *a, void *b);
+
+// ----------------------------------------------------------------------------
+// RRD internal functions
+
+#ifdef NETDATA_RRD_INTERNALS
+
+extern avl_tree_lock rrdhost_root_index;
+
+extern char *rrdset_strncpyz_name(char *to, const char *from, size_t length);
+extern char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section);
+
+#define rrddim_free(st, rd) rrddim_free_custom(st, rd, 0)
+extern void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated);
+
+extern int rrddim_compare(void* a, void* b);
+extern int rrdset_compare(void* a, void* b);
+extern int rrdset_compare_name(void* a, void* b);
+extern int rrdfamily_compare(void *a, void *b);
+
+extern RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id);
+extern void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc);
+
+#define rrdset_index_add(host, st) (RRDSET *)avl_insert_lock(&((host)->rrdset_root_index), (avl *)(st))
+#define rrdset_index_del(host, st) (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index), (avl *)(st))
+extern RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st);
+
+extern void rrdset_free(RRDSET *st);
+extern void rrdset_reset(RRDSET *st);
+extern void rrdset_save(RRDSET *st);
+#define rrdset_delete(st) rrdset_delete_custom(st, 0)
+extern void rrdset_delete_custom(RRDSET *st, int db_rotated);
+extern void rrdset_delete_obsolete_dimensions(RRDSET *st);
+
+extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host);
+extern RRDHOST *rrdhost_create(
+ const char *hostname, const char *registry_hostname, const char *guid, const char *os, const char *timezone,
+ const char *tags, const char *program_name, const char *program_version, int update_every, long entries,
+ RRD_MEMORY_MODE memory_mode, unsigned int health_enabled, unsigned int rrdpush_enabled, char *rrdpush_destination,
+ char *rrdpush_api_key, char *rrdpush_send_charts_matching, struct rrdhost_system_info *system_info,
+ int is_localhost); //TODO: Remove , int is_archived);
+
+#endif /* NETDATA_RRD_INTERNALS */
+
+extern void set_host_properties(
+ RRDHOST *host, int update_every, RRD_MEMORY_MODE memory_mode, const char *hostname, const char *registry_hostname,
+ const char *guid, const char *os, const char *tags, const char *tzone, const char *program_name,
+ const char *program_version);
+
+// ----------------------------------------------------------------------------
+// RRD DB engine declarations
+
+#ifdef ENABLE_DBENGINE
+#include "database/engine/rrdengineapi.h"
+#include "sqlite/sqlite_functions.h"
+#endif
+
+#endif /* NETDATA_RRD_H */
diff --git a/database/rrdcalc.c b/database/rrdcalc.c
new file mode 100644
index 0000000..935ee9c
--- /dev/null
+++ b/database/rrdcalc.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_HEALTH_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// RRDCALC management
+
+inline const char *rrdcalc_status2string(RRDCALC_STATUS status) {
+ switch(status) {
+ case RRDCALC_STATUS_REMOVED:
+ return "REMOVED";
+
+ case RRDCALC_STATUS_UNDEFINED:
+ return "UNDEFINED";
+
+ case RRDCALC_STATUS_UNINITIALIZED:
+ return "UNINITIALIZED";
+
+ case RRDCALC_STATUS_CLEAR:
+ return "CLEAR";
+
+ case RRDCALC_STATUS_RAISED:
+ return "RAISED";
+
+ case RRDCALC_STATUS_WARNING:
+ return "WARNING";
+
+ case RRDCALC_STATUS_CRITICAL:
+ return "CRITICAL";
+
+ default:
+ error("Unknown alarm status %d", status);
+ return "UNKNOWN";
+ }
+}
+
+static void rrdsetcalc_link(RRDSET *st, RRDCALC *rc) {
+ RRDHOST *host = st->rrdhost;
+
+ debug(D_HEALTH, "Health linking alarm '%s.%s' to chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname);
+
+ rc->last_status_change = now_realtime_sec();
+ rc->rrdset = st;
+
+ rc->rrdset_next = st->alarms;
+ rc->rrdset_prev = NULL;
+
+ if(rc->rrdset_next)
+ rc->rrdset_next->rrdset_prev = rc;
+
+ st->alarms = rc;
+
+ if(rc->update_every < rc->rrdset->update_every) {
+ error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rc->rrdset->id, rc->name, rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every);
+ rc->update_every = rc->rrdset->update_every;
+ }
+
+ if(!isnan(rc->green) && isnan(st->green)) {
+ debug(D_HEALTH, "Health alarm '%s.%s' green threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->green, rc->green);
+ st->green = rc->green;
+ }
+
+ if(!isnan(rc->red) && isnan(st->red)) {
+ debug(D_HEALTH, "Health alarm '%s.%s' red threshold set from " CALCULATED_NUMBER_FORMAT_AUTO " to " CALCULATED_NUMBER_FORMAT_AUTO ".", rc->rrdset->id, rc->name, rc->rrdset->red, rc->red);
+ st->red = rc->red;
+ }
+
+ rc->local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_LOCAL_VAR, &rc->value);
+ rc->family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rc->name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_FAMILY_VAR, &rc->value);
+
+ char fullname[RRDVAR_MAX_LENGTH + 1];
+ snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rc->name);
+ rc->hostid = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_HOST_CHARTID_VAR, &rc->value);
+
+ snprintfz(fullname, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rc->name);
+ rc->hostname = rrdvar_create_and_index("host", &host->rrdvar_root_index, fullname, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR, &rc->value);
+
+ if(rc->hostid && !rc->hostname)
+ rc->hostid->options |= RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR;
+
+ if(!rc->units) rc->units = strdupz(st->units);
+
+ if(!rrdcalc_isrepeating(rc)) {
+ time_t now = now_realtime_sec();
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host,
+ rc->id,
+ rc->next_event_id++,
+ now,
+ rc->name,
+ rc->rrdset->id,
+ rc->rrdset->family,
+ rc->exec,
+ rc->recipient,
+ now - rc->last_status_change,
+ rc->old_value,
+ rc->value,
+ rc->status,
+ RRDCALC_STATUS_UNINITIALIZED,
+ rc->source,
+ rc->units,
+ rc->info,
+ 0,
+ 0
+ );
+ health_alarm_log(host, ae);
+ }
+}
+
+static inline int rrdcalc_test_additional_restriction(RRDCALC *rc, RRDSET *st){
+ if (rc->module_match && !simple_pattern_matches(rc->module_pattern, st->module_name))
+ return 0;
+
+ if (rc->plugin_match && !simple_pattern_matches(rc->plugin_pattern, st->plugin_name))
+ return 0;
+
+ return 1;
+}
+
+static inline int rrdcalc_is_matching_this_rrdset(RRDCALC *rc, RRDSET *st) {
+ if(((rc->hash_chart == st->hash && !strcmp(rc->chart, st->id)) ||
+ (rc->hash_chart == st->hash_name && !strcmp(rc->chart, st->name))) &&
+ rrdcalc_test_additional_restriction(rc, st)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+// this has to be called while the RRDHOST is locked
+inline void rrdsetcalc_link_matching(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+ // debug(D_HEALTH, "find matching alarms for chart '%s'", st->id);
+
+ RRDCALC *rc;
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(rc->rrdset))
+ continue;
+
+ if(unlikely(rrdcalc_is_matching_this_rrdset(rc, st)))
+ rrdsetcalc_link(st, rc);
+ }
+}
+
+// this has to be called while the RRDHOST is locked
+inline void rrdsetcalc_unlink(RRDCALC *rc) {
+ RRDSET *st = rc->rrdset;
+
+ if(!st) {
+ debug(D_HEALTH, "Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name);
+ error("Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rc->chart?rc->chart:"NOCHART", rc->name);
+ return;
+ }
+
+ RRDHOST *host = st->rrdhost;
+
+ if(!rrdcalc_isrepeating(rc)) {
+ time_t now = now_realtime_sec();
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host,
+ rc->id,
+ rc->next_event_id++,
+ now,
+ rc->name,
+ rc->rrdset->id,
+ rc->rrdset->family,
+ rc->exec,
+ rc->recipient,
+ now - rc->last_status_change,
+ rc->old_value,
+ rc->value,
+ rc->status,
+ RRDCALC_STATUS_REMOVED,
+ rc->source,
+ rc->units,
+ rc->info,
+ 0,
+ 0
+ );
+ health_alarm_log(host, ae);
+ }
+
+ debug(D_HEALTH, "Health unlinking alarm '%s.%s' from chart '%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, st->id, host->hostname);
+
+ // unlink it
+ if(rc->rrdset_prev)
+ rc->rrdset_prev->rrdset_next = rc->rrdset_next;
+
+ if(rc->rrdset_next)
+ rc->rrdset_next->rrdset_prev = rc->rrdset_prev;
+
+ if(st->alarms == rc)
+ st->alarms = rc->rrdset_next;
+
+ rc->rrdset_prev = rc->rrdset_next = NULL;
+
+ rrdvar_free(host, &st->rrdvar_root_index, rc->local);
+ rc->local = NULL;
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rc->family);
+ rc->family = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rc->hostid);
+ rc->hostid = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rc->hostname);
+ rc->hostname = NULL;
+
+ rc->rrdset = NULL;
+
+ // RRDCALC will remain in RRDHOST
+ // so that if the matching chart is found in the future
+ // it will be applied automatically
+}
+
+RRDCALC *rrdcalc_find(RRDSET *st, const char *name) {
+ RRDCALC *rc;
+ uint32_t hash = simple_hash(name);
+
+ for( rc = st->alarms; rc ; rc = rc->rrdset_next ) {
+ if(unlikely(rc->hash == hash && !strcmp(rc->name, name)))
+ return rc;
+ }
+
+ return NULL;
+}
+
+inline int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name) {
+ RRDCALC *rc;
+
+ if(unlikely(!chart)) {
+ error("attempt to find RRDCALC '%s' without giving a chart name", name);
+ return 1;
+ }
+
+ if(unlikely(!hash_chart)) hash_chart = simple_hash(chart);
+ if(unlikely(!hash_name)) hash_name = simple_hash(name);
+
+ // make sure it does not already exist
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if (unlikely(rc->chart && rc->hash == hash_name && rc->hash_chart == hash_chart && !strcmp(name, rc->name) && !strcmp(chart, rc->chart))) {
+ debug(D_HEALTH, "Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname);
+ info("Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+inline uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id) {
+ if(chart && name) {
+ uint32_t hash_chart = simple_hash(chart);
+ uint32_t hash_name = simple_hash(name);
+
+ // re-use old IDs, by looking them up in the alarm log
+ ALARM_ENTRY *ae;
+ for(ae = host->health_log.alarms; ae ;ae = ae->next) {
+ if(unlikely(ae->hash_name == hash_name && ae->hash_chart == hash_chart && !strcmp(name, ae->name) && !strcmp(chart, ae->chart))) {
+ if(next_event_id) *next_event_id = ae->alarm_event_id + 1;
+ return ae->alarm_id;
+ }
+ }
+ }
+
+ if (unlikely(!host->health_log.next_alarm_id))
+ host->health_log.next_alarm_id = (uint32_t)now_realtime_sec();
+
+ return host->health_log.next_alarm_id++;
+}
+
+/**
+ * Alarm name with dimension
+ *
+ * Change the name of the current alarm appending a new diagram.
+ *
+ * @param name the alarm name
+ * @param namelen is the length of the previous vector.
+ * @param dim the dimension of the chart.
+ * @param dimlen is the length of the previous vector.
+ *
+ * @return It returns the new name on success and the old otherwise
+ */
+char *alarm_name_with_dim(char *name, size_t namelen, const char *dim, size_t dimlen) {
+ char *newname,*move;
+
+ newname = malloc(namelen + dimlen + 2);
+ if(newname) {
+ move = newname;
+ memcpy(move, name, namelen);
+ move += namelen;
+
+ *move++ = '_';
+ memcpy(move, dim, dimlen);
+ move += dimlen;
+ *move = '\0';
+ } else {
+ newname = name;
+ }
+
+ return newname;
+}
+
+/**
+ * Remove pipe comma
+ *
+ * Remove the pipes and commas converting to space.
+ *
+ * @param str the string to change.
+ */
+void dimension_remove_pipe_comma(char *str) {
+ while(*str) {
+ if(*str == '|' || *str == ',') *str = ' ';
+
+ str++;
+ }
+}
+
+inline void rrdcalc_add_to_host(RRDHOST *host, RRDCALC *rc) {
+ rrdhost_check_rdlock(host);
+
+ if(rc->calculation) {
+ rc->calculation->status = &rc->status;
+ rc->calculation->myself = &rc->value;
+ rc->calculation->after = &rc->db_after;
+ rc->calculation->before = &rc->db_before;
+ rc->calculation->rrdcalc = rc;
+ }
+
+ if(rc->warning) {
+ rc->warning->status = &rc->status;
+ rc->warning->myself = &rc->value;
+ rc->warning->after = &rc->db_after;
+ rc->warning->before = &rc->db_before;
+ rc->warning->rrdcalc = rc;
+ }
+
+ if(rc->critical) {
+ rc->critical->status = &rc->status;
+ rc->critical->myself = &rc->value;
+ rc->critical->after = &rc->db_after;
+ rc->critical->before = &rc->db_before;
+ rc->critical->rrdcalc = rc;
+ }
+
+ if(!rc->foreachdim) {
+ // link it to the host alarms list
+ if(likely(host->alarms)) {
+ // append it
+ RRDCALC *t;
+ for(t = host->alarms; t && t->next ; t = t->next) ;
+ t->next = rc;
+ }
+ else {
+ host->alarms = rc;
+ }
+
+ // link it to its chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if(rrdcalc_is_matching_this_rrdset(rc, st)) {
+ rrdsetcalc_link(st, rc);
+ break;
+ }
+ }
+ } else {
+ //link it case there is a foreach
+ if(likely(host->alarms_with_foreach)) {
+ // append it
+ RRDCALC *t;
+ for(t = host->alarms_with_foreach; t && t->next ; t = t->next) ;
+ t->next = rc;
+ }
+ else {
+ host->alarms_with_foreach = rc;
+ }
+
+ //I am not linking this alarm direct to the host here, this will be done when the children is created
+ }
+}
+
+inline RRDCALC *rrdcalc_create_from_template(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart) {
+ debug(D_HEALTH, "Health creating dynamic alarm (from template) '%s.%s'", chart, rt->name);
+
+ if(rrdcalc_exists(host, chart, rt->name, 0, 0))
+ return NULL;
+
+ RRDCALC *rc = callocz(1, sizeof(RRDCALC));
+ rc->next_event_id = 1;
+ rc->name = strdupz(rt->name);
+ rc->hash = simple_hash(rc->name);
+ rc->chart = strdupz(chart);
+ rc->hash_chart = simple_hash(rc->chart);
+
+ rc->id = rrdcalc_get_unique_id(host, rc->chart, rc->name, &rc->next_event_id);
+
+ if(rt->dimensions) rc->dimensions = strdupz(rt->dimensions);
+ if(rt->foreachdim) {
+ rc->foreachdim = strdupz(rt->foreachdim);
+ rc->spdim = health_pattern_from_foreach(rc->foreachdim);
+ }
+ rc->foreachcounter = rt->foreachcounter;
+
+ rc->green = rt->green;
+ rc->red = rt->red;
+ rc->value = NAN;
+ rc->old_value = NAN;
+
+ rc->delay_up_duration = rt->delay_up_duration;
+ rc->delay_down_duration = rt->delay_down_duration;
+ rc->delay_max_duration = rt->delay_max_duration;
+ rc->delay_multiplier = rt->delay_multiplier;
+
+ rc->last_repeat = 0;
+ rc->warn_repeat_every = rt->warn_repeat_every;
+ rc->crit_repeat_every = rt->crit_repeat_every;
+
+ rc->group = rt->group;
+ rc->after = rt->after;
+ rc->before = rt->before;
+ rc->update_every = rt->update_every;
+ rc->options = rt->options;
+
+ if(rt->exec) rc->exec = strdupz(rt->exec);
+ if(rt->recipient) rc->recipient = strdupz(rt->recipient);
+ if(rt->source) rc->source = strdupz(rt->source);
+ if(rt->units) rc->units = strdupz(rt->units);
+ if(rt->info) rc->info = strdupz(rt->info);
+
+ if(rt->calculation) {
+ rc->calculation = expression_parse(rt->calculation->source, NULL, NULL);
+ if(!rc->calculation)
+ error("Health alarm '%s.%s': failed to parse calculation expression '%s'", chart, rt->name, rt->calculation->source);
+ }
+ if(rt->warning) {
+ rc->warning = expression_parse(rt->warning->source, NULL, NULL);
+ if(!rc->warning)
+ error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", chart, rt->name, rt->warning->source);
+ }
+ if(rt->critical) {
+ rc->critical = expression_parse(rt->critical->source, NULL, NULL);
+ if(!rc->critical)
+ error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", chart, rt->name, rt->critical->source);
+ }
+
+ debug(D_HEALTH, "Health runtime added alarm '%s.%s': exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', for each dimension '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f, warn_repeat_every %u, crit_repeat_every %u",
+ (rc->chart)?rc->chart:"NOCHART",
+ rc->name,
+ (rc->exec)?rc->exec:"DEFAULT",
+ (rc->recipient)?rc->recipient:"DEFAULT",
+ rc->green,
+ rc->red,
+ (int)rc->group,
+ rc->after,
+ rc->before,
+ rc->options,
+ (rc->dimensions)?rc->dimensions:"NONE",
+ (rc->foreachdim)?rc->foreachdim:"NONE",
+ rc->update_every,
+ (rc->calculation)?rc->calculation->parsed_as:"NONE",
+ (rc->warning)?rc->warning->parsed_as:"NONE",
+ (rc->critical)?rc->critical->parsed_as:"NONE",
+ rc->source,
+ rc->delay_up_duration,
+ rc->delay_down_duration,
+ rc->delay_max_duration,
+ rc->delay_multiplier,
+ rc->warn_repeat_every,
+ rc->crit_repeat_every
+ );
+
+ rrdcalc_add_to_host(host, rc);
+ if(!rt->foreachdim) {
+ RRDCALC *rdcmp = (RRDCALC *) avl_insert_lock(&(host)->alarms_idx_health_log,(avl *)rc);
+ if (rdcmp != rc) {
+ error("Cannot insert the alarm index ID %s",rc->name);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Create from RRDCALC
+ *
+ * Create a new alarm using another alarm as template.
+ *
+ * @param rc is the alarm that will be used as source
+ * @param host is the host structure.
+ * @param name is the newest chart name.
+ * @param dimension is the current dimension
+ * @param foreachdim the whole list of dimension
+ *
+ * @return it returns the new alarm changed.
+ */
+inline RRDCALC *rrdcalc_create_from_rrdcalc(RRDCALC *rc, RRDHOST *host, const char *name, const char *dimension) {
+ RRDCALC *newrc = callocz(1, sizeof(RRDCALC));
+
+ newrc->next_event_id = 1;
+ newrc->id = rrdcalc_get_unique_id(host, rc->chart, name, &rc->next_event_id);
+ newrc->name = (char *)name;
+ newrc->hash = simple_hash(newrc->name);
+ newrc->chart = strdupz(rc->chart);
+ newrc->hash_chart = simple_hash(rc->chart);
+
+ newrc->dimensions = strdupz(dimension);
+ newrc->foreachdim = NULL;
+ rc->foreachcounter++;
+ newrc->foreachcounter = rc->foreachcounter;
+
+ newrc->green = rc->green;
+ newrc->red = rc->red;
+ newrc->value = NAN;
+ newrc->old_value = NAN;
+
+ newrc->delay_up_duration = rc->delay_up_duration;
+ newrc->delay_down_duration = rc->delay_down_duration;
+ newrc->delay_max_duration = rc->delay_max_duration;
+ newrc->delay_multiplier = rc->delay_multiplier;
+
+ newrc->last_repeat = 0;
+ newrc->warn_repeat_every = rc->warn_repeat_every;
+ newrc->crit_repeat_every = rc->crit_repeat_every;
+
+ newrc->group = rc->group;
+ newrc->after = rc->after;
+ newrc->before = rc->before;
+ newrc->update_every = rc->update_every;
+ newrc->options = rc->options;
+
+ if(rc->exec) newrc->exec = strdupz(rc->exec);
+ if(rc->recipient) newrc->recipient = strdupz(rc->recipient);
+ if(rc->source) newrc->source = strdupz(rc->source);
+ if(rc->units) newrc->units = strdupz(rc->units);
+ if(rc->info) newrc->info = strdupz(rc->info);
+
+ if(rc->calculation) {
+ newrc->calculation = expression_parse(rc->calculation->source, NULL, NULL);
+ if(!newrc->calculation)
+ error("Health alarm '%s.%s': failed to parse calculation expression '%s'", rc->chart, rc->name, rc->calculation->source);
+ }
+
+ if(rc->warning) {
+ newrc->warning = expression_parse(rc->warning->source, NULL, NULL);
+ if(!newrc->warning)
+ error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", rc->chart, rc->name, rc->warning->source);
+ }
+
+ if(rc->critical) {
+ newrc->critical = expression_parse(rc->critical->source, NULL, NULL);
+ if(!newrc->critical)
+ error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", rc->chart, rc->name, rc->critical->source);
+ }
+
+ return newrc;
+}
+
+void rrdcalc_free(RRDCALC *rc) {
+ if(unlikely(!rc)) return;
+
+ expression_free(rc->calculation);
+ expression_free(rc->warning);
+ expression_free(rc->critical);
+
+ freez(rc->name);
+ freez(rc->chart);
+ freez(rc->family);
+ freez(rc->dimensions);
+ freez(rc->foreachdim);
+ freez(rc->exec);
+ freez(rc->recipient);
+ freez(rc->source);
+ freez(rc->units);
+ freez(rc->info);
+ simple_pattern_free(rc->spdim);
+ freez(rc->labels);
+ simple_pattern_free(rc->splabels);
+ freez(rc->module_match);
+ simple_pattern_free(rc->module_pattern);
+ freez(rc->plugin_match);
+ simple_pattern_free(rc->plugin_pattern);
+ freez(rc);
+}
+
+void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc) {
+ if(unlikely(!rc)) return;
+
+ debug(D_HEALTH, "Health removing alarm '%s.%s' of host '%s'", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname);
+
+ // unlink it from RRDSET
+ if(rc->rrdset) rrdsetcalc_unlink(rc);
+
+ // unlink it from RRDHOST
+ if(unlikely(rc == host->alarms))
+ host->alarms = rc->next;
+ else {
+ RRDCALC *t;
+ for(t = host->alarms; t && t->next != rc; t = t->next) ;
+ if(t) {
+ t->next = rc->next;
+ rc->next = NULL;
+ }
+ else
+ error("Cannot unlink alarm '%s.%s' from host '%s': not found", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname);
+ }
+
+ RRDCALC *rdcmp = (RRDCALC *) avl_search_lock(&(host)->alarms_idx_health_log, (avl *)rc);
+ if (rdcmp) {
+ rdcmp = (RRDCALC *) avl_remove_lock(&(host)->alarms_idx_health_log, (avl *)rc);
+ if (!rdcmp) {
+ error("Cannot remove the health alarm index from health_log");
+ }
+ }
+
+ rdcmp = (RRDCALC *) avl_search_lock(&(host)->alarms_idx_name, (avl *)rc);
+ if (rdcmp) {
+ rdcmp = (RRDCALC *) avl_remove_lock(&(host)->alarms_idx_name, (avl *)rc);
+ if (!rdcmp) {
+ error("Cannot remove the health alarm index from idx_name");
+ }
+ }
+
+ rrdcalc_free(rc);
+}
+
+void rrdcalc_foreach_unlink_and_free(RRDHOST *host, RRDCALC *rc) {
+
+ if(unlikely(rc == host->alarms_with_foreach))
+ host->alarms_with_foreach = rc->next;
+ else {
+ RRDCALC *t;
+ for(t = host->alarms_with_foreach; t && t->next != rc; t = t->next) ;
+ if(t) {
+ t->next = rc->next;
+ rc->next = NULL;
+ }
+ else
+ error("Cannot unlink alarm '%s.%s' from host '%s': not found", rc->chart?rc->chart:"NOCHART", rc->name, host->hostname);
+ }
+
+ rrdcalc_free(rc);
+}
+
+static void rrdcalc_labels_unlink_alarm_loop(RRDHOST *host, RRDCALC *alarms) {
+ RRDCALC *rc = alarms;
+ while (rc) {
+ if (!rc->labels) {
+ rc = rc->next;
+ continue;
+ }
+
+ char cmp[CONFIG_FILE_LINE_MAX+1];
+ struct label *move = host->labels.head;
+ while(move) {
+ snprintf(cmp, CONFIG_FILE_LINE_MAX, "%s=%s", move->key, move->value);
+ if (simple_pattern_matches(rc->splabels, move->key) ||
+ simple_pattern_matches(rc->splabels, cmp)) {
+ break;
+ }
+
+ move = move->next;
+ }
+
+ RRDCALC *next = rc->next;
+ if(!move) {
+ info("Health configuration for alarm '%s' cannot be applied, because the host %s does not have the label(s) '%s'",
+ rc->name,
+ host->hostname,
+ rc->labels);
+
+ if(host->alarms == alarms) {
+ rrdcalc_unlink_and_free(host, rc);
+ } else
+ rrdcalc_foreach_unlink_and_free(host, rc);
+
+ }
+
+ rc = next;
+ }
+}
+
+void rrdcalc_labels_unlink_alarm_from_host(RRDHOST *host) {
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+
+ rrdcalc_labels_unlink_alarm_loop(host, host->alarms);
+ rrdcalc_labels_unlink_alarm_loop(host, host->alarms_with_foreach);
+
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+}
+
+void rrdcalc_labels_unlink() {
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if (unlikely(!host->health_enabled))
+ continue;
+
+ if (host->labels.head) {
+ rrdhost_wrlock(host);
+
+ rrdcalc_labels_unlink_alarm_from_host(host);
+
+ rrdhost_unlock(host);
+ }
+ }
+
+ rrd_unlock();
+}
+
+// ----------------------------------------------------------------------------
+// Alarm
+
+
+/**
+ * Alarm is repeating
+ *
+ * Is this alarm repeating ?
+ *
+ * @param host The structure that has the binary tree
+ * @param alarm_id the id of the alarm to search
+ *
+ * @return It returns 1 case it is repeating and 0 otherwise
+ */
+int alarm_isrepeating(RRDHOST *host, uint32_t alarm_id) {
+ RRDCALC findme;
+ findme.id = alarm_id;
+ RRDCALC *rc = (RRDCALC *)avl_search_lock(&host->alarms_idx_health_log, (avl *)&findme);
+ if (!rc) {
+ return 0;
+ }
+ return rrdcalc_isrepeating(rc);
+}
+
+/**
+ * Entry is repeating
+ *
+ * Check whether the id of alarm entry is yet present in the host structure
+ *
+ * @param host The structure that has the binary tree
+ * @param ae the alarm entry
+ *
+ * @return It returns 1 case it is repeating and 0 otherwise
+ */
+int alarm_entry_isrepeating(RRDHOST *host, ALARM_ENTRY *ae) {
+ return alarm_isrepeating(host, ae->alarm_id);
+}
+
+/**
+ * Max last repeat
+ *
+ * Check the maximum last_repeat for the alarms associated a host
+ *
+ * @param host The structure that has the binary tree
+ *
+ * @return It returns 1 case it is repeating and 0 otherwise
+ */
+RRDCALC *alarm_max_last_repeat(RRDHOST *host, char *alarm_name,uint32_t hash) {
+ RRDCALC findme;
+ findme.name = alarm_name;
+ findme.hash = hash;
+ RRDCALC *rc = (RRDCALC *)avl_search_lock(&host->alarms_idx_name, (avl *)&findme);
+
+ return rc;
+}
diff --git a/database/rrdcalc.h b/database/rrdcalc.h
new file mode 100644
index 0000000..cd0d700
--- /dev/null
+++ b/database/rrdcalc.h
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrd.h"
+
+#ifndef NETDATA_RRDCALC_H
+#define NETDATA_RRDCALC_H 1
+
+// calculated variables (defined in health configuration)
+// These aggregate time-series data at fixed intervals
+// (defined in their update_every member below)
+// They increase the overhead of netdata.
+//
+// These calculations are allocated and linked (->next)
+// under RRDHOST.
+// Then are also linked to RRDSET (of course only when the
+// chart is found, via ->rrdset_next and ->rrdset_prev).
+// This double-linked list is maintained sorted at all times
+// having as RRDSET.calculations the RRDCALC to be processed
+// next.
+
+#define RRDCALC_FLAG_DB_ERROR 0x00000001
+#define RRDCALC_FLAG_DB_NAN 0x00000002
+/* #define RRDCALC_FLAG_DB_STALE 0x00000004 */
+#define RRDCALC_FLAG_CALC_ERROR 0x00000008
+#define RRDCALC_FLAG_WARN_ERROR 0x00000010
+#define RRDCALC_FLAG_CRIT_ERROR 0x00000020
+#define RRDCALC_FLAG_RUNNABLE 0x00000040
+#define RRDCALC_FLAG_DISABLED 0x00000080
+#define RRDCALC_FLAG_SILENCED 0x00000100
+#define RRDCALC_FLAG_RUN_ONCE 0x00000200
+#define RRDCALC_FLAG_NO_CLEAR_NOTIFICATION 0x80000000
+
+
+struct rrdcalc {
+ avl avl; // the index, with key the id - this has to be first!
+ uint32_t id; // the unique id of this alarm
+ uint32_t next_event_id; // the next event id that will be used for this alarm
+
+ char *name; // the name of this alarm
+ uint32_t hash; // the hash of the alarm name
+
+ char *exec; // the command to execute when this alarm switches state
+ char *recipient; // the recipient of the alarm (the first parameter to exec)
+
+ char *chart; // the chart id this should be linked to
+ uint32_t hash_chart;
+
+
+ char *plugin_match; //the plugin name that should be linked to
+ SIMPLE_PATTERN *plugin_pattern;
+
+ char *module_match; //the module name that should be linked to
+ SIMPLE_PATTERN *module_pattern;
+
+ char *source; // the source of this alarm
+ char *units; // the units of the alarm
+ char *info; // a short description of the alarm
+
+ int update_every; // update frequency for the alarm
+
+ // the red and green threshold of this alarm (to be set to the chart)
+ calculated_number green;
+ calculated_number red;
+
+ // ------------------------------------------------------------------------
+ // database lookup settings
+
+ char *dimensions; // the chart dimensions
+ char *foreachdim; // the group of dimensions that the `foreach` will be applied.
+ SIMPLE_PATTERN *spdim; // used if and only if there is a simple pattern for the chart.
+ int foreachcounter; // the number of alarms created with foreachdim, this also works as an id of the
+ // children
+ RRDR_GROUPING group; // grouping method: average, max, etc.
+ int before; // ending point in time-series
+ int after; // starting point in time-series
+ uint32_t options; // calculation options
+
+ // ------------------------------------------------------------------------
+ // expressions related to the alarm
+
+ EVAL_EXPRESSION *calculation; // expression to calculate the value of the alarm
+ EVAL_EXPRESSION *warning; // expression to check the warning condition
+ EVAL_EXPRESSION *critical; // expression to check the critical condition
+
+ // ------------------------------------------------------------------------
+ // notification delay settings
+
+ int delay_up_duration; // duration to delay notifications when alarm raises
+ int delay_down_duration; // duration to delay notifications when alarm lowers
+ int delay_max_duration; // the absolute max delay to apply to this alarm
+ float delay_multiplier; // multiplier for all delays when alarms switch status
+ // while now < delay_up_to
+
+ // ------------------------------------------------------------------------
+ // notification repeat settings
+
+ uint32_t warn_repeat_every; // interval between repeating warning notifications
+ uint32_t crit_repeat_every; // interval between repeating critical notifications
+
+ // ------------------------------------------------------------------------
+ // Labels settings
+ char *labels; // the label read from an alarm file
+ SIMPLE_PATTERN *splabels; // the simple pattern of labels
+
+ // ------------------------------------------------------------------------
+ // runtime information
+
+ RRDCALC_STATUS old_status; // the old status of the alarm
+ RRDCALC_STATUS status; // the current status of the alarm
+
+ calculated_number value; // the current value of the alarm
+ calculated_number old_value; // the previous value of the alarm
+
+ uint32_t rrdcalc_flags; // check RRDCALC_FLAG_*
+
+ time_t last_updated; // the last update timestamp of the alarm
+ time_t next_update; // the next update timestamp of the alarm
+ time_t last_status_change; // the timestamp of the last time this alarm changed status
+ time_t last_repeat; // the last time the alarm got repeated
+
+ time_t db_after; // the first timestamp evaluated by the db lookup
+ time_t db_before; // the last timestamp evaluated by the db lookup
+
+ time_t delay_up_to_timestamp; // the timestamp up to which we should delay notifications
+ int delay_up_current; // the current up notification delay duration
+ int delay_down_current; // the current down notification delay duration
+ int delay_last; // the last delay we used
+
+ // ------------------------------------------------------------------------
+ // variables this alarm exposes to the rest of the alarms
+
+ RRDVAR *local;
+ RRDVAR *family;
+ RRDVAR *hostid;
+ RRDVAR *hostname;
+
+ // ------------------------------------------------------------------------
+ // the chart this alarm it is linked to
+
+ struct rrdset *rrdset;
+
+ // linking of this alarm on its chart
+ struct rrdcalc *rrdset_next;
+ struct rrdcalc *rrdset_prev;
+
+ struct rrdcalc *next;
+};
+
+extern int alarm_isrepeating(RRDHOST *host, uint32_t alarm_id);
+extern int alarm_entry_isrepeating(RRDHOST *host, ALARM_ENTRY *ae);
+extern RRDCALC *alarm_max_last_repeat(RRDHOST *host, char *alarm_name, uint32_t hash);
+
+#define RRDCALC_HAS_DB_LOOKUP(rc) ((rc)->after)
+
+extern void rrdsetcalc_link_matching(RRDSET *st);
+extern void rrdsetcalc_unlink(RRDCALC *rc);
+extern RRDCALC *rrdcalc_find(RRDSET *st, const char *name);
+
+extern const char *rrdcalc_status2string(RRDCALC_STATUS status);
+
+extern void rrdcalc_free(RRDCALC *rc);
+extern void rrdcalc_unlink_and_free(RRDHOST *host, RRDCALC *rc);
+
+extern int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, uint32_t hash_chart, uint32_t hash_name);
+extern uint32_t rrdcalc_get_unique_id(RRDHOST *host, const char *chart, const char *name, uint32_t *next_event_id);
+extern RRDCALC *rrdcalc_create_from_template(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *chart);
+extern RRDCALC *rrdcalc_create_from_rrdcalc(RRDCALC *rc, RRDHOST *host, const char *name, const char *dimension);
+extern void rrdcalc_add_to_host(RRDHOST *host, RRDCALC *rc);
+extern void dimension_remove_pipe_comma(char *str);
+extern char *alarm_name_with_dim(char *name, size_t namelen, const char *dim, size_t dimlen);
+
+extern void rrdcalc_labels_unlink();
+extern void rrdcalc_labels_unlink_alarm_from_host(RRDHOST *host);
+
+static inline int rrdcalc_isrepeating(RRDCALC *rc) {
+ if (unlikely(rc->warn_repeat_every > 0 || rc->crit_repeat_every > 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+#endif //NETDATA_RRDCALC_H
diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c
new file mode 100644
index 0000000..007b8c5
--- /dev/null
+++ b/database/rrdcalctemplate.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_HEALTH_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+
+static int rrdcalctemplate_is_there_label_restriction(RRDCALCTEMPLATE *rt, RRDHOST *host) {
+ if(!rt->labels)
+ return 0;
+
+ errno = 0;
+ struct label *move = host->labels.head;
+ char cmp[CONFIG_FILE_LINE_MAX+1];
+
+ int ret;
+ if(move) {
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ while(move) {
+ snprintfz(cmp, CONFIG_FILE_LINE_MAX, "%s=%s", move->key, move->value);
+ if (simple_pattern_matches(rt->splabels, move->key) ||
+ simple_pattern_matches(rt->splabels, cmp)) {
+ break;
+ }
+ move = move->next;
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ if(!move) {
+ error("Health template '%s' cannot be applied, because the host %s does not have the label(s) '%s'",
+ rt->name,
+ host->hostname,
+ rt->labels
+ );
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+ } else {
+ ret =0;
+ }
+
+ return ret;
+}
+
+static inline int rrdcalctemplate_test_additional_restriction(RRDCALCTEMPLATE *rt, RRDSET *st) {
+ if (rt->family_pattern && !simple_pattern_matches(rt->family_pattern, st->family))
+ return 0;
+
+ if (rt->module_pattern && !simple_pattern_matches(rt->module_pattern, st->module_name))
+ return 0;
+
+ if (rt->plugin_pattern && !simple_pattern_matches(rt->plugin_pattern, st->plugin_name))
+ return 0;
+
+ return 1;
+}
+
+// RRDCALCTEMPLATE management
+/**
+ * RRDCALC TEMPLATE LINK MATCHING
+ *
+ * @param rt is the template used to create the chart.
+ * @param st is the chart where the alarm will be attached.
+ */
+void rrdcalctemplate_link_matching_test(RRDCALCTEMPLATE *rt, RRDSET *st, RRDHOST *host) {
+ if(rt->hash_context == st->hash_context && !strcmp(rt->context, st->context) &&
+ rrdcalctemplate_test_additional_restriction(rt, st) ) {
+ if (!rrdcalctemplate_is_there_label_restriction(rt, host)) {
+ RRDCALC *rc = rrdcalc_create_from_template(host, rt, st->id);
+ if (unlikely(!rc))
+ info("Health tried to create alarm from template '%s' on chart '%s' of host '%s', but it failed",
+ rt->name, st->id, host->hostname);
+#ifdef NETDATA_INTERNAL_CHECKS
+ else if (rc->rrdset != st &&
+ !rc->foreachdim) //When we have a template with foreadhdim, the child will be added to the index late
+ error("Health alarm '%s.%s' should be linked to chart '%s', but it is not",
+ rc->chart ? rc->chart : "NOCHART", rc->name, st->id);
+#endif
+ }
+ }
+}
+
+void rrdcalctemplate_link_matching(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+ RRDCALCTEMPLATE *rt;
+
+ for(rt = host->templates; rt ; rt = rt->next) {
+ rrdcalctemplate_link_matching_test(rt, st, host);
+ }
+
+ for(rt = host->alarms_template_with_foreach; rt ; rt = rt->next) {
+ rrdcalctemplate_link_matching_test(rt, st, host);
+ }
+}
+
+inline void rrdcalctemplate_free(RRDCALCTEMPLATE *rt) {
+ if(unlikely(!rt)) return;
+
+ expression_free(rt->calculation);
+ expression_free(rt->warning);
+ expression_free(rt->critical);
+
+ freez(rt->family_match);
+ simple_pattern_free(rt->family_pattern);
+
+ freez(rt->plugin_match);
+ simple_pattern_free(rt->plugin_pattern);
+
+ freez(rt->module_match);
+ simple_pattern_free(rt->module_pattern);
+
+ freez(rt->name);
+ freez(rt->exec);
+ freez(rt->recipient);
+ freez(rt->context);
+ freez(rt->source);
+ freez(rt->units);
+ freez(rt->info);
+ freez(rt->dimensions);
+ freez(rt->foreachdim);
+ freez(rt->labels);
+ simple_pattern_free(rt->spdim);
+ simple_pattern_free(rt->splabels);
+ freez(rt);
+}
+
+inline void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt) {
+ if(unlikely(!rt)) return;
+
+ debug(D_HEALTH, "Health removing template '%s' of host '%s'", rt->name, host->hostname);
+
+ if(host->templates == rt) {
+ host->templates = rt->next;
+ }
+ else {
+ RRDCALCTEMPLATE *t;
+ for (t = host->templates; t && t->next != rt; t = t->next ) ;
+ if(t) {
+ t->next = rt->next;
+ rt->next = NULL;
+ }
+ else
+ error("Cannot find RRDCALCTEMPLATE '%s' linked in host '%s'", rt->name, host->hostname);
+ }
+
+ rrdcalctemplate_free(rt);
+}
diff --git a/database/rrdcalctemplate.h b/database/rrdcalctemplate.h
new file mode 100644
index 0000000..fb9347d
--- /dev/null
+++ b/database/rrdcalctemplate.h
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDCALCTEMPLATE_H
+#define NETDATA_RRDCALCTEMPLATE_H 1
+
+#include "rrd.h"
+
+// RRDCALCTEMPLATE
+// these are to be applied to charts found dynamically
+// based on their context.
+struct rrdcalctemplate {
+ char *name;
+ uint32_t hash_name;
+
+ char *exec;
+ char *recipient;
+
+ char *context;
+ uint32_t hash_context;
+
+ char *family_match;
+ SIMPLE_PATTERN *family_pattern;
+
+ char *plugin_match;
+ SIMPLE_PATTERN *plugin_pattern;
+
+ char *module_match;
+ SIMPLE_PATTERN *module_pattern;
+
+ char *source; // the source of this alarm
+ char *units; // the units of the alarm
+ char *info; // a short description of the alarm
+
+ int update_every; // update frequency for the alarm
+
+ // the red and green threshold of this alarm (to be set to the chart)
+ calculated_number green;
+ calculated_number red;
+
+ // ------------------------------------------------------------------------
+ // database lookup settings
+
+ char *dimensions; // the chart dimensions
+ char *foreachdim; // the group of dimensions that the lookup will be applied.
+ SIMPLE_PATTERN *spdim; // used if and only if there is a simple pattern for the chart.
+ int foreachcounter; // the number of alarms created with foreachdim, this also works as an id of the
+ // children
+ RRDR_GROUPING group; // grouping method: average, max, etc.
+ int before; // ending point in time-series
+ int after; // starting point in time-series
+ uint32_t options; // calculation options
+
+ // ------------------------------------------------------------------------
+ // notification delay settings
+
+ int delay_up_duration; // duration to delay notifications when alarm raises
+ int delay_down_duration; // duration to delay notifications when alarm lowers
+ int delay_max_duration; // the absolute max delay to apply to this alarm
+ float delay_multiplier; // multiplier for all delays when alarms switch status
+
+ // ------------------------------------------------------------------------
+ // notification repeat settings
+
+ uint32_t warn_repeat_every; // interval between repeating warning notifications
+ uint32_t crit_repeat_every; // interval between repeating critical notifications
+
+ // ------------------------------------------------------------------------
+ // Labels settings
+ char *labels; // the label read from an alarm file
+ SIMPLE_PATTERN *splabels; // the simple pattern of labels
+
+ // ------------------------------------------------------------------------
+ // expressions related to the alarm
+
+ EVAL_EXPRESSION *calculation;
+ EVAL_EXPRESSION *warning;
+ EVAL_EXPRESSION *critical;
+
+ struct rrdcalctemplate *next;
+};
+
+#define RRDCALCTEMPLATE_HAS_DB_LOOKUP(rt) ((rt)->after)
+
+extern void rrdcalctemplate_link_matching(RRDSET *st);
+
+extern void rrdcalctemplate_free(RRDCALCTEMPLATE *rt);
+extern void rrdcalctemplate_unlink_and_free(RRDHOST *host, RRDCALCTEMPLATE *rt);
+extern void rrdcalctemplate_create_alarms(RRDHOST *host, RRDCALCTEMPLATE *rt, RRDSET *st);
+#endif //NETDATA_RRDCALCTEMPLATE_H
diff --git a/database/rrddim.c b/database/rrddim.c
new file mode 100644
index 0000000..6a14085
--- /dev/null
+++ b/database/rrddim.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_RRD_INTERNALS
+#include "rrd.h"
+
+static inline void calc_link_to_rrddim(RRDDIM *rd)
+{
+ RRDHOST *host = rd->rrdset->rrdhost;
+ RRDSET *st = rd->rrdset;
+ if (host->alarms_with_foreach || host->alarms_template_with_foreach) {
+ int count = 0;
+ int hostlocked;
+ for (count = 0; count < 5; count++) {
+ hostlocked = netdata_rwlock_trywrlock(&host->rrdhost_rwlock);
+ if (!hostlocked) {
+ rrdcalc_link_to_rrddim(rd, st, host);
+ rrdhost_unlock(host);
+ break;
+ } else if (hostlocked != EBUSY) {
+ error("Cannot lock host to create an alarm for the dimension.");
+ }
+ sleep_usec(USEC_PER_MS * 200);
+ }
+
+ if (count == 5) {
+ error(
+ "Failed to create an alarm for dimension %s of chart %s 5 times. Skipping alarm.", rd->name, st->name);
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRDDIM index
+
+int rrddim_compare(void* a, void* b) {
+ if(((RRDDIM *)a)->hash < ((RRDDIM *)b)->hash) return -1;
+ else if(((RRDDIM *)a)->hash > ((RRDDIM *)b)->hash) return 1;
+ else return strcmp(((RRDDIM *)a)->id, ((RRDDIM *)b)->id);
+}
+
+#define rrddim_index_add(st, rd) (RRDDIM *)avl_insert_lock(&((st)->dimensions_index), (avl *)(rd))
+#define rrddim_index_del(st,rd ) (RRDDIM *)avl_remove_lock(&((st)->dimensions_index), (avl *)(rd))
+
+static inline RRDDIM *rrddim_index_find(RRDSET *st, const char *id, uint32_t hash) {
+ RRDDIM tmp = {
+ .id = id,
+ .hash = (hash)?hash:simple_hash(id)
+ };
+ return (RRDDIM *)avl_search_lock(&(st->dimensions_index), (avl *) &tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDDIM - find a dimension
+
+inline RRDDIM *rrddim_find(RRDSET *st, const char *id) {
+ debug(D_RRD_CALLS, "rrddim_find() for chart %s, dimension %s", st->name, id);
+
+ return rrddim_index_find(st, id, 0);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDDIM rename a dimension
+
+inline int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) {
+ if(unlikely(!name || !*name || !strcmp(rd->name, name)))
+ return 0;
+
+ debug(D_RRD_CALLS, "rrddim_set_name() from %s.%s to %s.%s", st->name, rd->name, st->name, name);
+
+ char varname[CONFIG_MAX_NAME + 1];
+ snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id);
+ rd->name = config_set_default(st->config_section, varname, name);
+ rd->hash_name = simple_hash(rd->name);
+ rrddimvar_rename_all(rd);
+ rd->exposed = 0;
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ return 1;
+}
+
+inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm) {
+ if(unlikely(rd->algorithm == algorithm))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", st->id, rd->name, rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm));
+ rd->algorithm = algorithm;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ return 1;
+}
+
+inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier) {
+ if(unlikely(rd->multiplier == multiplier))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->multiplier, multiplier);
+ rd->multiplier = multiplier;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ return 1;
+}
+
+inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor) {
+ if(unlikely(rd->divisor == divisor))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->divisor, divisor);
+ rd->divisor = divisor;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// RRDDIM legacy data collection functions
+
+static void rrddim_collect_init(RRDDIM *rd) {
+ rd->values[rd->rrdset->current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS);
+}
+static void rrddim_collect_store_metric(RRDDIM *rd, usec_t point_in_time, storage_number number) {
+ (void)point_in_time;
+
+ rd->values[rd->rrdset->current_entry] = number;
+}
+static int rrddim_collect_finalize(RRDDIM *rd) {
+ (void)rd;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// RRDDIM legacy database query functions
+
+static void rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time) {
+ handle->rd = rd;
+ handle->start_time = start_time;
+ handle->end_time = end_time;
+ handle->slotted.slot = rrdset_time2slot(rd->rrdset, start_time);
+ handle->slotted.last_slot = rrdset_time2slot(rd->rrdset, end_time);
+ handle->slotted.finished = 0;
+}
+
+static storage_number rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time) {
+ RRDDIM *rd = handle->rd;
+ long entries = rd->rrdset->entries;
+ long slot = handle->slotted.slot;
+
+ (void)current_time;
+ if (unlikely(handle->slotted.slot == handle->slotted.last_slot))
+ handle->slotted.finished = 1;
+ storage_number n = rd->values[slot++];
+
+ if(unlikely(slot >= entries)) slot = 0;
+ handle->slotted.slot = slot;
+
+ return n;
+}
+
+static int rrddim_query_is_finished(struct rrddim_query_handle *handle) {
+ return handle->slotted.finished;
+}
+
+static void rrddim_query_finalize(struct rrddim_query_handle *handle) {
+ (void)handle;
+
+ return;
+}
+
+static time_t rrddim_query_latest_time(RRDDIM *rd) {
+ return rrdset_last_entry_t_nolock(rd->rrdset);
+}
+
+static time_t rrddim_query_oldest_time(RRDDIM *rd) {
+ return rrdset_first_entry_t_nolock(rd->rrdset);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDDIM create a dimension
+
+void rrdcalc_link_to_rrddim(RRDDIM *rd, RRDSET *st, RRDHOST *host) {
+ RRDCALC *rrdc;
+ for (rrdc = host->alarms_with_foreach; rrdc ; rrdc = rrdc->next) {
+ if (simple_pattern_matches(rrdc->spdim, rd->id) || simple_pattern_matches(rrdc->spdim, rd->name)) {
+ if (rrdc->hash_chart == st->hash_name || !strcmp(rrdc->chart, st->name) || !strcmp(rrdc->chart, st->id)) {
+ char *usename = alarm_name_with_dim(rrdc->name, strlen(rrdc->name), rd->name, strlen(rd->name));
+ if (usename) {
+ if(rrdcalc_exists(host, st->name, usename, 0, 0)){
+ freez(usename);
+ continue;
+ }
+
+ RRDCALC *child = rrdcalc_create_from_rrdcalc(rrdc, host, usename, rd->name);
+ if (child) {
+ rrdcalc_add_to_host(host, child);
+ RRDCALC *rdcmp = (RRDCALC *) avl_insert_lock(&(host)->alarms_idx_health_log,(avl *)child);
+ if (rdcmp != child) {
+ error("Cannot insert the alarm index ID %s",child->name);
+ }
+ } else {
+ error("Cannot allocate a new alarm.");
+ rrdc->foreachcounter--;
+ }
+ }
+ }
+ }
+ }
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+}
+
+RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier,
+ collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode)
+{
+ RRDHOST *host = st->rrdhost;
+ rrdset_wrlock(st);
+
+ rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ RRDDIM *rd = rrddim_find(st, id);
+ if(unlikely(rd)) {
+ debug(D_RRD_CALLS, "Cannot create rrd dimension '%s/%s', it already exists.", st->id, name?name:"<NONAME>");
+
+ int rc = rrddim_set_name(st, rd, name);
+ rc += rrddim_set_algorithm(st, rd, algorithm);
+ rc += rrddim_set_multiplier(st, rd, multiplier);
+ rc += rrddim_set_divisor(st, rd, divisor);
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) {
+#ifdef ENABLE_DBENGINE
+ store_active_dimension(rd->state->metric_uuid);
+#endif
+ rd->state->collect_ops.init(rd);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_ARCHIVED);
+ rrddimvar_create(rd, RRDVAR_TYPE_CALCULATED, NULL, NULL, &rd->last_stored_value, RRDVAR_OPTION_DEFAULT);
+ rrddimvar_create(rd, RRDVAR_TYPE_COLLECTED, NULL, "_raw", &rd->last_collected_value, RRDVAR_OPTION_DEFAULT);
+ rrddimvar_create(rd, RRDVAR_TYPE_TIME_T, NULL, "_last_collected_t", &rd->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT);
+ calc_link_to_rrddim(rd);
+ }
+ // DBENGINE available and activated?
+#ifdef ENABLE_DBENGINE
+ if (likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) && unlikely(rc)) {
+ debug(D_METADATALOG, "DIMENSION [%s] metadata updated", rd->id);
+ (void)sql_store_dimension(rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor,
+ rd->algorithm);
+ }
+#endif
+ rrdset_unlock(st);
+ return rd;
+ }
+
+ char filename[FILENAME_MAX + 1];
+ char fullfilename[FILENAME_MAX + 1];
+
+ char varname[CONFIG_MAX_NAME + 1];
+ unsigned long size = sizeof(RRDDIM) + (st->entries * sizeof(storage_number));
+
+ debug(D_RRD_CALLS, "Adding dimension '%s/%s'.", st->id, id);
+
+ rrdset_strncpyz_name(filename, id, FILENAME_MAX);
+ snprintfz(fullfilename, FILENAME_MAX, "%s/%s.db", st->cache_dir, filename);
+
+ if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP ||
+ memory_mode == RRD_MEMORY_MODE_RAM) {
+ rd = (RRDDIM *)mymmap(
+ (memory_mode == RRD_MEMORY_MODE_RAM) ? NULL : fullfilename
+ , size
+ , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE)
+ , 1
+ );
+
+ if(likely(rd)) {
+ // we have a file mapped for rd
+
+ memset(&rd->avl, 0, sizeof(avl));
+ rd->id = NULL;
+ rd->name = NULL;
+ rd->cache_filename = NULL;
+ rd->variables = NULL;
+ rd->next = NULL;
+ rd->rrdset = NULL;
+ rd->exposed = 0;
+
+ struct timeval now;
+ now_realtime_timeval(&now);
+
+ if(memory_mode == RRD_MEMORY_MODE_RAM) {
+ memset(rd, 0, size);
+ }
+ else {
+ int reset = 0;
+
+ if(strcmp(rd->magic, RRDDIMENSION_MAGIC) != 0) {
+ info("Initializing file %s.", fullfilename);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(rd->memsize != size) {
+ error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, rd->memsize);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(rd->update_every != st->update_every) {
+ error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd->update_every);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(dt_usec(&now, &rd->last_collected_time) > (rd->entries * rd->update_every * USEC_PER_SEC)) {
+ info("File %s is too old (last collected %llu seconds ago, but the database is %ld seconds). Clearing it.", fullfilename, dt_usec(&now, &rd->last_collected_time) / USEC_PER_SEC, rd->entries * rd->update_every);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+
+ if(!reset) {
+ if(rd->algorithm != algorithm) {
+ info("File %s does not have the expected algorithm (expected %u '%s', found %u '%s'). Previous values may be wrong.",
+ fullfilename, algorithm, rrd_algorithm_name(algorithm), rd->algorithm, rrd_algorithm_name(rd->algorithm));
+ }
+
+ if(rd->multiplier != multiplier) {
+ info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, multiplier, rd->multiplier);
+ }
+
+ if(rd->divisor != divisor) {
+ info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, divisor, rd->divisor);
+ }
+ }
+ }
+
+ // make sure we have the right memory mode
+ // even if we cleared the memory
+ rd->rrd_memory_mode = memory_mode;
+ }
+ }
+
+ if(unlikely(!rd)) {
+ // if we didn't manage to get a mmap'd dimension, just create one
+ rd = callocz(1, size);
+ if (memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ rd->rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+ else
+ rd->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC;
+ }
+ rd->memsize = size;
+
+ strcpy(rd->magic, RRDDIMENSION_MAGIC);
+
+ rd->id = strdupz(id);
+ rd->hash = simple_hash(rd->id);
+
+ rd->cache_filename = strdupz(fullfilename);
+
+ snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id);
+ rd->name = config_get(st->config_section, varname, (name && *name)?name:rd->id);
+ rd->hash_name = simple_hash(rd->name);
+
+ snprintfz(varname, CONFIG_MAX_NAME, "dim %s algorithm", rd->id);
+ rd->algorithm = rrd_algorithm_id(config_get(st->config_section, varname, rrd_algorithm_name(algorithm)));
+
+ snprintfz(varname, CONFIG_MAX_NAME, "dim %s multiplier", rd->id);
+ rd->multiplier = config_get_number(st->config_section, varname, multiplier);
+
+ snprintfz(varname, CONFIG_MAX_NAME, "dim %s divisor", rd->id);
+ rd->divisor = config_get_number(st->config_section, varname, divisor);
+ if(!rd->divisor) rd->divisor = 1;
+
+ rd->entries = st->entries;
+ rd->update_every = st->update_every;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))
+ rd->collections_counter = 1;
+ else
+ rd->collections_counter = 0;
+
+ rd->updated = 0;
+ rd->flags = 0x00000000;
+
+ rd->calculated_value = 0;
+ rd->last_calculated_value = 0;
+ rd->collected_value = 0;
+ rd->last_collected_value = 0;
+ rd->collected_value_max = 0;
+ rd->collected_volume = 0;
+ rd->stored_volume = 0;
+ rd->last_stored_value = 0;
+ rd->last_collected_time.tv_sec = 0;
+ rd->last_collected_time.tv_usec = 0;
+ rd->rrdset = st;
+ rd->state = mallocz(sizeof(*rd->state));
+ if(memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+#ifdef ENABLE_DBENGINE
+ uuid_t *dim_uuid = find_dimension_uuid(st, rd);
+ rrdeng_metric_init(rd, dim_uuid);
+ store_active_dimension(rd->state->metric_uuid);
+ rd->state->collect_ops.init = rrdeng_store_metric_init;
+ rd->state->collect_ops.store_metric = rrdeng_store_metric_next;
+ rd->state->collect_ops.finalize = rrdeng_store_metric_finalize;
+ rd->state->query_ops.init = rrdeng_load_metric_init;
+ rd->state->query_ops.next_metric = rrdeng_load_metric_next;
+ rd->state->query_ops.is_finished = rrdeng_load_metric_is_finished;
+ rd->state->query_ops.finalize = rrdeng_load_metric_finalize;
+ rd->state->query_ops.latest_time = rrdeng_metric_latest_time;
+ rd->state->query_ops.oldest_time = rrdeng_metric_oldest_time;
+#endif
+ } else {
+ rd->state->collect_ops.init = rrddim_collect_init;
+ rd->state->collect_ops.store_metric = rrddim_collect_store_metric;
+ rd->state->collect_ops.finalize = rrddim_collect_finalize;
+ rd->state->query_ops.init = rrddim_query_init;
+ rd->state->query_ops.next_metric = rrddim_query_next_metric;
+ rd->state->query_ops.is_finished = rrddim_query_is_finished;
+ rd->state->query_ops.finalize = rrddim_query_finalize;
+ rd->state->query_ops.latest_time = rrddim_query_latest_time;
+ rd->state->query_ops.oldest_time = rrddim_query_oldest_time;
+ }
+ rd->state->collect_ops.init(rd);
+ // append this dimension
+ if(!st->dimensions)
+ st->dimensions = rd;
+ else {
+ RRDDIM *td = st->dimensions;
+
+ if(td->algorithm != rd->algorithm || abs(td->multiplier) != abs(rd->multiplier) || abs(td->divisor) != abs(rd->divisor)) {
+ if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").",
+ rd->name,
+ st->name,
+ host->hostname,
+ rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(td->algorithm),
+ rd->multiplier, td->multiplier,
+ rd->divisor, td->divisor
+ );
+ #endif
+ rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS);
+ }
+ }
+
+ for(; td->next; td = td->next) ;
+ td->next = rd;
+ }
+
+ if(host->health_enabled) {
+ rrddimvar_create(rd, RRDVAR_TYPE_CALCULATED, NULL, NULL, &rd->last_stored_value, RRDVAR_OPTION_DEFAULT);
+ rrddimvar_create(rd, RRDVAR_TYPE_COLLECTED, NULL, "_raw", &rd->last_collected_value, RRDVAR_OPTION_DEFAULT);
+ rrddimvar_create(rd, RRDVAR_TYPE_TIME_T, NULL, "_last_collected_t", &rd->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT);
+ }
+
+ if(unlikely(rrddim_index_add(st, rd) != rd))
+ error("RRDDIM: INTERNAL ERROR: attempt to index duplicate dimension '%s' on chart '%s'", rd->id, st->id);
+
+ calc_link_to_rrddim(rd);
+
+ rrdset_unlock(st);
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+ return(rd);
+}
+
+// ----------------------------------------------------------------------------
+// RRDDIM remove / free a dimension
+
+void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated)
+{
+#ifndef ENABLE_ACLK
+ UNUSED(db_rotated);
+#endif
+ debug(D_RRD_CALLS, "rrddim_free() %s.%s", st->name, rd->name);
+
+ if (!rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) {
+ uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd);
+ if (can_delete_metric && rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+#ifdef ENABLE_DBENGINE
+ /* This metric has no data and no references */
+ delete_dimension_uuid(rd->state->metric_uuid);
+#endif
+ }
+ }
+
+ if(rd == st->dimensions)
+ st->dimensions = rd->next;
+ else {
+ RRDDIM *i;
+ for (i = st->dimensions; i && i->next != rd; i = i->next) ;
+
+ if (i && i->next == rd)
+ i->next = rd->next;
+ else
+ error("Request to free dimension '%s.%s' but it is not linked.", st->id, rd->name);
+ }
+ rd->next = NULL;
+
+ while(rd->variables)
+ rrddimvar_free(rd->variables);
+
+ if(unlikely(rrddim_index_del(st, rd) != rd))
+ error("RRDDIM: INTERNAL ERROR: attempt to remove from index dimension '%s' on chart '%s', removed a different dimension.", rd->id, st->id);
+
+ // free(rd->annotations);
+
+ RRD_MEMORY_MODE rrd_memory_mode = rd->rrd_memory_mode;
+ switch(rrd_memory_mode) {
+ case RRD_MEMORY_MODE_SAVE:
+ case RRD_MEMORY_MODE_MAP:
+ case RRD_MEMORY_MODE_RAM:
+ debug(D_RRD_CALLS, "Unmapping dimension '%s'.", rd->name);
+ freez((void *)rd->id);
+ freez(rd->cache_filename);
+ freez(rd->state);
+ munmap(rd, rd->memsize);
+ break;
+
+ case RRD_MEMORY_MODE_ALLOC:
+ case RRD_MEMORY_MODE_NONE:
+ case RRD_MEMORY_MODE_DBENGINE:
+ debug(D_RRD_CALLS, "Removing dimension '%s'.", rd->name);
+ freez((void *)rd->id);
+ freez(rd->cache_filename);
+#ifdef ENABLE_DBENGINE
+ if (rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ freez(rd->state->metric_uuid);
+ }
+#endif
+ freez(rd->state);
+ freez(rd);
+ break;
+ }
+#ifdef ENABLE_ACLK
+ if (db_rotated || RRD_MEMORY_MODE_DBENGINE != rrd_memory_mode)
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDDIM - set dimension options
+
+int rrddim_hide(RRDSET *st, const char *id) {
+ debug(D_RRD_CALLS, "rrddim_hide() for chart %s, dimension %s", st->name, id);
+
+ RRDHOST *host = st->rrdhost;
+
+ RRDDIM *rd = rrddim_find(st, id);
+ if(unlikely(!rd)) {
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname);
+ return 1;
+ }
+
+ rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+ return 0;
+}
+
+int rrddim_unhide(RRDSET *st, const char *id) {
+ debug(D_RRD_CALLS, "rrddim_unhide() for chart %s, dimension %s", st->name, id);
+
+ RRDHOST *host = st->rrdhost;
+ RRDDIM *rd = rrddim_find(st, id);
+ if(unlikely(!rd)) {
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname);
+ return 1;
+ }
+
+ rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+ return 0;
+}
+
+inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) {
+ debug(D_RRD_CALLS, "rrddim_is_obsolete() for chart %s, dimension %s", st->name, rd->name);
+
+ if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))) {
+ info("Cannot obsolete already archived dimension %s from chart %s", rd->name, st->name);
+ return;
+ }
+ rrddim_flag_set(rd, RRDDIM_FLAG_OBSOLETE);
+ rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS);
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+}
+
+inline void rrddim_isnot_obsolete(RRDSET *st __maybe_unused, RRDDIM *rd) {
+ debug(D_RRD_CALLS, "rrddim_isnot_obsolete() for chart %s, dimension %s", st->name, rd->name);
+
+ rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE);
+#ifdef ENABLE_ACLK
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+}
+
+// ----------------------------------------------------------------------------
+// RRDDIM - collect values for a dimension
+
+inline collected_number rrddim_set_by_pointer(RRDSET *st __maybe_unused, RRDDIM *rd, collected_number value) {
+ debug(D_RRD_CALLS, "rrddim_set_by_pointer() for chart %s, dimension %s, value " COLLECTED_NUMBER_FORMAT, st->name, rd->name, value);
+
+ now_realtime_timeval(&rd->last_collected_time);
+ rd->collected_value = value;
+ rd->updated = 1;
+
+ rd->collections_counter++;
+
+ collected_number v = (value >= 0) ? value : -value;
+ if(unlikely(v > rd->collected_value_max)) rd->collected_value_max = v;
+
+ // fprintf(stderr, "%s.%s %llu " COLLECTED_NUMBER_FORMAT " dt %0.6f" " rate " CALCULATED_NUMBER_FORMAT "\n", st->name, rd->name, st->usec_since_last_update, value, (float)((double)st->usec_since_last_update / (double)1000000), (calculated_number)((value - rd->last_collected_value) * (calculated_number)rd->multiplier / (calculated_number)rd->divisor * 1000000.0 / (calculated_number)st->usec_since_last_update));
+
+ return rd->last_collected_value;
+}
+
+collected_number rrddim_set(RRDSET *st, const char *id, collected_number value) {
+ RRDHOST *host = st->rrdhost;
+ RRDDIM *rd = rrddim_find(st, id);
+ if(unlikely(!rd)) {
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, host->hostname);
+ return 0;
+ }
+
+ return rrddim_set_by_pointer(st, rd, value);
+}
diff --git a/database/rrddimvar.c b/database/rrddimvar.c
new file mode 100644
index 0000000..3c2ed75
--- /dev/null
+++ b/database/rrddimvar.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_HEALTH_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// RRDDIMVAR management
+// DIMENSION VARIABLES
+
+#define RRDDIMVAR_ID_MAX 1024
+
+static inline void rrddimvar_free_variables(RRDDIMVAR *rs) {
+ RRDDIM *rd = rs->rrddim;
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ // CHART VARIABLES FOR THIS DIMENSION
+
+ rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_id);
+ rs->var_local_id = NULL;
+
+ rrdvar_free(host, &st->rrdvar_root_index, rs->var_local_name);
+ rs->var_local_name = NULL;
+
+ // FAMILY VARIABLES FOR THIS DIMENSION
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_id);
+ rs->var_family_id = NULL;
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name);
+ rs->var_family_name = NULL;
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextid);
+ rs->var_family_contextid = NULL;
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_contextname);
+ rs->var_family_contextname = NULL;
+
+ // HOST VARIABLES FOR THIS DIMENSION
+
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidid);
+ rs->var_host_chartidid = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartidname);
+ rs->var_host_chartidname = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnameid);
+ rs->var_host_chartnameid = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_chartnamename);
+ rs->var_host_chartnamename = NULL;
+
+ // KEYS
+
+ freez(rs->key_id);
+ rs->key_id = NULL;
+
+ freez(rs->key_name);
+ rs->key_name = NULL;
+
+ freez(rs->key_fullidid);
+ rs->key_fullidid = NULL;
+
+ freez(rs->key_fullidname);
+ rs->key_fullidname = NULL;
+
+ freez(rs->key_contextid);
+ rs->key_contextid = NULL;
+
+ freez(rs->key_contextname);
+ rs->key_contextname = NULL;
+
+ freez(rs->key_fullnameid);
+ rs->key_fullnameid = NULL;
+
+ freez(rs->key_fullnamename);
+ rs->key_fullnamename = NULL;
+}
+
+static inline void rrddimvar_create_variables(RRDDIMVAR *rs) {
+ rrddimvar_free_variables(rs);
+
+ RRDDIM *rd = rs->rrddim;
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char buffer[RRDDIMVAR_ID_MAX + 1];
+
+ // KEYS
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->id, rs->suffix);
+ rs->key_id = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s%s%s", rs->prefix, rd->name, rs->suffix);
+ rs->key_name = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_id);
+ rs->key_fullidid = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->id, rs->key_name);
+ rs->key_fullidname = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_id);
+ rs->key_contextid = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->context, rs->key_name);
+ rs->key_contextname = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_id);
+ rs->key_fullnameid = strdupz(buffer);
+
+ snprintfz(buffer, RRDDIMVAR_ID_MAX, "%s.%s", st->name, rs->key_name);
+ rs->key_fullnamename = strdupz(buffer);
+
+ // CHART VARIABLES FOR THIS DIMENSION
+ // -----------------------------------
+ //
+ // dimensions are available as:
+ // - $id
+ // - $name
+
+ rs->var_local_id = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_id, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_local_name = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->key_name, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+
+ // FAMILY VARIABLES FOR THIS DIMENSION
+ // -----------------------------------
+ //
+ // dimensions are available as:
+ // - $id (only the first, when multiple overlap)
+ // - $name (only the first, when multiple overlap)
+ // - $chart-context.id
+ // - $chart-context.name
+
+ rs->var_family_id = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_id, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_name, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_family_contextid = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_family_contextname = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_contextname, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+
+ // HOST VARIABLES FOR THIS DIMENSION
+ // -----------------------------------
+ //
+ // dimensions are available as:
+ // - $chart-id.id
+ // - $chart-id.name
+ // - $chart-name.id
+ // - $chart-name.name
+
+ rs->var_host_chartidid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_host_chartidname = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullidname, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_host_chartnameid = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnameid, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+ rs->var_host_chartnamename = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullnamename, rs->type, RRDVAR_OPTION_DEFAULT, rs->value);
+}
+
+RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options) {
+ RRDSET *st = rd->rrdset;
+ (void)st;
+
+ debug(D_VARIABLES, "RRDDIMSET create for chart id '%s' name '%s', dimension id '%s', name '%s%s%s'", st->id, st->name, rd->id, (prefix)?prefix:"", rd->name, (suffix)?suffix:"");
+
+ if(!prefix) prefix = "";
+ if(!suffix) suffix = "";
+
+ RRDDIMVAR *rs = (RRDDIMVAR *)callocz(1, sizeof(RRDDIMVAR));
+
+ rs->prefix = strdupz(prefix);
+ rs->suffix = strdupz(suffix);
+
+ rs->type = type;
+ rs->value = value;
+ rs->options = options;
+ rs->rrddim = rd;
+
+ rs->next = rd->variables;
+ rd->variables = rs;
+
+ rrddimvar_create_variables(rs);
+
+ return rs;
+}
+
+void rrddimvar_rename_all(RRDDIM *rd) {
+ RRDSET *st = rd->rrdset;
+ (void)st;
+
+ debug(D_VARIABLES, "RRDDIMSET rename for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name);
+
+ RRDDIMVAR *rs, *next = rd->variables;
+ while((rs = next)) {
+ next = rs->next;
+ rrddimvar_create_variables(rs);
+ }
+}
+
+void rrddimvar_free(RRDDIMVAR *rs) {
+ RRDDIM *rd = rs->rrddim;
+ RRDSET *st = rd->rrdset;
+ debug(D_VARIABLES, "RRDDIMSET free for chart id '%s' name '%s', dimension id '%s', name '%s', prefix='%s', suffix='%s'", st->id, st->name, rd->id, rd->name, rs->prefix, rs->suffix);
+
+ rrddimvar_free_variables(rs);
+
+ if(rd->variables == rs) {
+ debug(D_VARIABLES, "RRDDIMSET removing first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name);
+ rd->variables = rs->next;
+ }
+ else {
+ debug(D_VARIABLES, "RRDDIMSET removing non-first entry for chart id '%s' name '%s', dimension id '%s', name '%s'", st->id, st->name, rd->id, rd->name);
+ RRDDIMVAR *t;
+ for (t = rd->variables; t && t->next != rs; t = t->next) ;
+ if(!t) error("RRDDIMVAR '%s' not found in dimension '%s/%s' variables linked list", rs->key_name, st->id, rd->id);
+ else t->next = rs->next;
+ }
+
+ freez(rs->prefix);
+ freez(rs->suffix);
+ freez(rs);
+}
+
diff --git a/database/rrddimvar.h b/database/rrddimvar.h
new file mode 100644
index 0000000..3494824
--- /dev/null
+++ b/database/rrddimvar.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDDIMVAR_H
+#define NETDATA_RRDDIMVAR_H 1
+
+#include "rrd.h"
+
+// variables linked to individual dimensions
+// We link variables to point the values that are already
+// calculated / processed by the normal data collection process
+// This means, there will be no speed penalty for using
+// these variables
+struct rrddimvar {
+ char *prefix;
+ char *suffix;
+
+ char *key_id; // dimension id
+ char *key_name; // dimension name
+ char *key_contextid; // context + dimension id
+ char *key_contextname; // context + dimension name
+ char *key_fullidid; // chart type.chart id + dimension id
+ char *key_fullidname; // chart type.chart id + dimension name
+ char *key_fullnameid; // chart type.chart name + dimension id
+ char *key_fullnamename; // chart type.chart name + dimension name
+
+ RRDVAR_TYPE type;
+ void *value;
+
+ RRDVAR_OPTIONS options;
+
+ RRDVAR *var_local_id;
+ RRDVAR *var_local_name;
+
+ RRDVAR *var_family_id;
+ RRDVAR *var_family_name;
+ RRDVAR *var_family_contextid;
+ RRDVAR *var_family_contextname;
+
+ RRDVAR *var_host_chartidid;
+ RRDVAR *var_host_chartidname;
+ RRDVAR *var_host_chartnameid;
+ RRDVAR *var_host_chartnamename;
+
+ struct rrddim *rrddim;
+
+ struct rrddimvar *next;
+};
+
+
+extern void rrddimvar_rename_all(RRDDIM *rd);
+extern RRDDIMVAR *rrddimvar_create(RRDDIM *rd, RRDVAR_TYPE type, const char *prefix, const char *suffix, void *value, RRDVAR_OPTIONS options);
+extern void rrddimvar_free(RRDDIMVAR *rs);
+
+
+
+#endif //NETDATA_RRDDIMVAR_H
diff --git a/database/rrdfamily.c b/database/rrdfamily.c
new file mode 100644
index 0000000..f75f0ad
--- /dev/null
+++ b/database/rrdfamily.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_RRD_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// RRDFAMILY index
+
+int rrdfamily_compare(void *a, void *b) {
+ if(((RRDFAMILY *)a)->hash_family < ((RRDFAMILY *)b)->hash_family) return -1;
+ else if(((RRDFAMILY *)a)->hash_family > ((RRDFAMILY *)b)->hash_family) return 1;
+ else return strcmp(((RRDFAMILY *)a)->family, ((RRDFAMILY *)b)->family);
+}
+
+#define rrdfamily_index_add(host, rc) (RRDFAMILY *)avl_insert_lock(&((host)->rrdfamily_root_index), (avl *)(rc))
+#define rrdfamily_index_del(host, rc) (RRDFAMILY *)avl_remove_lock(&((host)->rrdfamily_root_index), (avl *)(rc))
+
+static RRDFAMILY *rrdfamily_index_find(RRDHOST *host, const char *id, uint32_t hash) {
+ RRDFAMILY tmp;
+ tmp.family = id;
+ tmp.hash_family = (hash)?hash:simple_hash(tmp.family);
+
+ return (RRDFAMILY *)avl_search_lock(&(host->rrdfamily_root_index), (avl *) &tmp);
+}
+
+RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id) {
+ RRDFAMILY *rc = rrdfamily_index_find(host, id, 0);
+ if(!rc) {
+ rc = callocz(1, sizeof(RRDFAMILY));
+
+ rc->family = strdupz(id);
+ rc->hash_family = simple_hash(rc->family);
+
+ // initialize the variables index
+ avl_init_lock(&rc->rrdvar_root_index, rrdvar_compare);
+
+ RRDFAMILY *ret = rrdfamily_index_add(host, rc);
+ if(ret != rc)
+ error("RRDFAMILY: INTERNAL ERROR: Expected to INSERT RRDFAMILY '%s' into index, but inserted '%s'.", rc->family, (ret)?ret->family:"NONE");
+ }
+
+ rc->use_count++;
+ return rc;
+}
+
+void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc) {
+ rc->use_count--;
+ if(!rc->use_count) {
+ RRDFAMILY *ret = rrdfamily_index_del(host, rc);
+ if(ret != rc)
+ error("RRDFAMILY: INTERNAL ERROR: Expected to DELETE RRDFAMILY '%s' from index, but deleted '%s'.", rc->family, (ret)?ret->family:"NONE");
+ else {
+ debug(D_RRD_CALLS, "RRDFAMILY: Cleaning up remaining family variables for host '%s', family '%s'", host->hostname, rc->family);
+ rrdvar_free_remaining_variables(host, &rc->rrdvar_root_index);
+
+ freez((void *) rc->family);
+ freez(rc);
+ }
+ }
+}
+
diff --git a/database/rrdhost.c b/database/rrdhost.c
new file mode 100644
index 0000000..45c3146
--- /dev/null
+++ b/database/rrdhost.c
@@ -0,0 +1,1606 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_RRD_INTERNALS
+#include "rrd.h"
+
+RRDHOST *localhost = NULL;
+size_t rrd_hosts_available = 0;
+netdata_rwlock_t rrd_rwlock = NETDATA_RWLOCK_INITIALIZER;
+
+time_t rrdset_free_obsolete_time = 3600;
+time_t rrdhost_free_orphan_time = 3600;
+
+// ----------------------------------------------------------------------------
+// RRDHOST index
+
+int rrdhost_compare(void* a, void* b) {
+ if(((RRDHOST *)a)->hash_machine_guid < ((RRDHOST *)b)->hash_machine_guid) return -1;
+ else if(((RRDHOST *)a)->hash_machine_guid > ((RRDHOST *)b)->hash_machine_guid) return 1;
+ else return strcmp(((RRDHOST *)a)->machine_guid, ((RRDHOST *)b)->machine_guid);
+}
+
+avl_tree_lock rrdhost_root_index = {
+ .avl_tree = { NULL, rrdhost_compare },
+ .rwlock = AVL_LOCK_INITIALIZER
+};
+
+RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash) {
+ debug(D_RRDHOST, "Searching in index for host with guid '%s'", guid);
+
+ RRDHOST tmp;
+ strncpyz(tmp.machine_guid, guid, GUID_LEN);
+ tmp.hash_machine_guid = (hash)?hash:simple_hash(tmp.machine_guid);
+
+ return (RRDHOST *)avl_search_lock(&(rrdhost_root_index), (avl *) &tmp);
+}
+
+RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash) {
+ if(unlikely(!strcmp(hostname, "localhost")))
+ return localhost;
+
+ if(unlikely(!hash)) hash = simple_hash(hostname);
+
+ rrd_rdlock();
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if(unlikely((hash == host->hash_hostname && !strcmp(hostname, host->hostname)))) {
+ rrd_unlock();
+ return host;
+ }
+ }
+ rrd_unlock();
+
+ return NULL;
+}
+
+#define rrdhost_index_add(rrdhost) (RRDHOST *)avl_insert_lock(&(rrdhost_root_index), (avl *)(rrdhost))
+#define rrdhost_index_del(rrdhost) (RRDHOST *)avl_remove_lock(&(rrdhost_root_index), (avl *)(rrdhost))
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST - internal helpers
+
+static inline void rrdhost_init_tags(RRDHOST *host, const char *tags) {
+ if(host->tags && tags && !strcmp(host->tags, tags))
+ return;
+
+ void *old = (void *)host->tags;
+ host->tags = (tags && *tags)?strdupz(tags):NULL;
+ freez(old);
+}
+
+static inline void rrdhost_init_hostname(RRDHOST *host, const char *hostname) {
+ if(host->hostname && hostname && !strcmp(host->hostname, hostname))
+ return;
+
+ void *old = host->hostname;
+ host->hostname = strdupz(hostname?hostname:"localhost");
+ host->hash_hostname = simple_hash(host->hostname);
+ freez(old);
+}
+
+static inline void rrdhost_init_os(RRDHOST *host, const char *os) {
+ if(host->os && os && !strcmp(host->os, os))
+ return;
+
+ void *old = (void *)host->os;
+ host->os = strdupz(os?os:"unknown");
+ freez(old);
+}
+
+static inline void rrdhost_init_timezone(RRDHOST *host, const char *timezone) {
+ if(host->timezone && timezone && !strcmp(host->timezone, timezone))
+ return;
+
+ void *old = (void *)host->timezone;
+ host->timezone = strdupz((timezone && *timezone)?timezone:"unknown");
+ freez(old);
+}
+
+static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_guid) {
+ strncpy(host->machine_guid, machine_guid, GUID_LEN);
+ host->machine_guid[GUID_LEN] = '\0';
+ host->hash_machine_guid = simple_hash(host->machine_guid);
+}
+
+void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory_mode, const char *hostname,
+ const char *registry_hostname, const char *guid, const char *os, const char *tags,
+ const char *tzone, const char *program_name, const char *program_version)
+{
+
+ host->rrd_update_every = update_every;
+ host->rrd_memory_mode = memory_mode;
+
+ rrdhost_init_hostname(host, hostname);
+
+ rrdhost_init_machine_guid(host, guid);
+
+ rrdhost_init_os(host, os);
+ rrdhost_init_timezone(host, tzone);
+ rrdhost_init_tags(host, tags);
+
+ host->program_name = strdupz((program_name && *program_name) ? program_name : "unknown");
+ host->program_version = strdupz((program_version && *program_version) ? program_version : "unknown");
+
+ host->registry_hostname = strdupz((registry_hostname && *registry_hostname) ? registry_hostname : host->hostname);
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - add a host
+
+RRDHOST *rrdhost_create(const char *hostname,
+ const char *registry_hostname,
+ const char *guid,
+ const char *os,
+ const char *timezone,
+ const char *tags,
+ const char *program_name,
+ const char *program_version,
+ int update_every,
+ long entries,
+ RRD_MEMORY_MODE memory_mode,
+ unsigned int health_enabled,
+ unsigned int rrdpush_enabled,
+ char *rrdpush_destination,
+ char *rrdpush_api_key,
+ char *rrdpush_send_charts_matching,
+ struct rrdhost_system_info *system_info,
+ int is_localhost
+) {
+ debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid);
+
+#ifdef ENABLE_DBENGINE
+ int is_legacy = (memory_mode == RRD_MEMORY_MODE_DBENGINE) && is_legacy_child(guid);
+#else
+ int is_legacy = 1;
+#endif
+ rrd_check_wrlock();
+
+ int is_in_multihost = (memory_mode == RRD_MEMORY_MODE_DBENGINE && !is_legacy);
+ RRDHOST *host = callocz(1, sizeof(RRDHOST));
+
+ set_host_properties(host, (update_every > 0)?update_every:1, memory_mode, hostname, registry_hostname, guid, os,
+ tags, timezone, program_name, program_version);
+
+ host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries);
+ host->health_enabled = ((memory_mode == RRD_MEMORY_MODE_NONE)) ? 0 : health_enabled;
+
+ host->sender = mallocz(sizeof(*host->sender));
+ sender_init(host->sender, host);
+ netdata_mutex_init(&host->receiver_lock);
+
+ host->rrdpush_send_enabled = (rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key) ? 1 : 0;
+ host->rrdpush_send_destination = (host->rrdpush_send_enabled)?strdupz(rrdpush_destination):NULL;
+ host->rrdpush_send_api_key = (host->rrdpush_send_enabled)?strdupz(rrdpush_api_key):NULL;
+ host->rrdpush_send_charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, SIMPLE_PATTERN_EXACT);
+
+ host->rrdpush_sender_pipe[0] = -1;
+ host->rrdpush_sender_pipe[1] = -1;
+ host->rrdpush_sender_socket = -1;
+
+ //host->stream_version = STREAMING_PROTOCOL_CURRENT_VERSION; Unused?
+#ifdef ENABLE_HTTPS
+ host->ssl.conn = NULL;
+ host->ssl.flags = NETDATA_SSL_START;
+ host->stream_ssl.conn = NULL;
+ host->stream_ssl.flags = NETDATA_SSL_START;
+#endif
+
+ netdata_rwlock_init(&host->rrdhost_rwlock);
+ netdata_rwlock_init(&host->labels.labels_rwlock);
+
+ netdata_mutex_init(&host->aclk_state_lock);
+
+ host->system_info = system_info;
+
+ avl_init_lock(&(host->rrdset_root_index), rrdset_compare);
+ avl_init_lock(&(host->rrdset_root_index_name), rrdset_compare_name);
+ avl_init_lock(&(host->rrdfamily_root_index), rrdfamily_compare);
+ avl_init_lock(&(host->rrdvar_root_index), rrdvar_compare);
+
+ if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete obsolete charts files", 1))
+ rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS);
+
+ if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete orphan hosts files", 1) && !is_localhost)
+ rrdhost_flag_set(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST);
+
+ host->health_default_warn_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never");
+ host->health_default_crit_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never");
+ avl_init_lock(&(host->alarms_idx_health_log), alarm_compare_id);
+ avl_init_lock(&(host->alarms_idx_name), alarm_compare_name);
+
+ // ------------------------------------------------------------------------
+ // initialize health variables
+
+ host->health_log.next_log_id = 1;
+ host->health_log.next_alarm_id = 1;
+ host->health_log.max = 1000;
+ host->health_log.next_log_id = (uint32_t)now_realtime_sec();
+ host->health_log.next_alarm_id = 0;
+
+ long n = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", host->health_log.max);
+ if(n < 10) {
+ error("Host '%s': health configuration has invalid max log entries %ld. Using default %u", host->hostname, n, host->health_log.max);
+ config_set_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", (long)host->health_log.max);
+ }
+ else
+ host->health_log.max = (unsigned int)n;
+
+ netdata_rwlock_init(&host->health_log.alarm_log_rwlock);
+
+ char filename[FILENAME_MAX + 1];
+
+ if(is_localhost) {
+
+ host->cache_dir = strdupz(netdata_configured_cache_dir);
+ host->varlib_dir = strdupz(netdata_configured_varlib_dir);
+
+ }
+ else {
+ // this is not localhost - append our GUID to localhost path
+ if (is_in_multihost) { // don't append to cache dir in multihost
+ host->cache_dir = strdupz(netdata_configured_cache_dir);
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_cache_dir, host->machine_guid);
+ host->cache_dir = strdupz(filename);
+ }
+
+ if((host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || (
+ host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && is_legacy))) {
+ int r = mkdir(host->cache_dir, 0775);
+ if(r != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, host->cache_dir);
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s", netdata_configured_varlib_dir, host->machine_guid);
+ host->varlib_dir = strdupz(filename);
+
+ if(host->health_enabled) {
+ int r = mkdir(host->varlib_dir, 0775);
+ if(r != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, host->varlib_dir);
+ }
+
+ }
+
+ if(host->health_enabled) {
+ snprintfz(filename, FILENAME_MAX, "%s/health", host->varlib_dir);
+ int r = mkdir(filename, 0775);
+ if(r != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, filename);
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s/health/health-log.db", host->varlib_dir);
+ host->health_log_filename = strdupz(filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_primary_plugins_dir);
+ host->health_default_exec = strdupz(config_get(CONFIG_SECTION_HEALTH, "script to execute on alarm", filename));
+ host->health_default_recipient = strdupz("root");
+
+
+ // ------------------------------------------------------------------------
+ // load health configuration
+
+ if(host->health_enabled) {
+ rrdhost_wrlock(host);
+ health_readdir(host, health_user_config_dir(), health_stock_config_dir(), NULL);
+ rrdhost_unlock(host);
+
+ health_alarm_log_load(host);
+ health_alarm_log_open(host);
+ }
+
+ RRDHOST *t = rrdhost_index_add(host);
+
+ if(t != host) {
+ error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", host->hostname, host->machine_guid, t->hostname, t->machine_guid);
+ rrdhost_free(host);
+ return NULL;
+ }
+
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+#ifdef ENABLE_DBENGINE
+ if (likely(!uuid_parse(host->machine_guid, host->host_uuid))) {
+ int rc = sql_store_host(&host->host_uuid, hostname, registry_hostname, update_every, os, timezone, tags);
+ if (unlikely(rc))
+ error_report("Failed to store machine GUID to the database");
+ }
+ else
+ error_report("Host machine GUID %s is not valid", host->machine_guid);
+ char dbenginepath[FILENAME_MAX + 1];
+ int ret;
+
+ snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine", host->cache_dir);
+ ret = mkdir(dbenginepath, 0775);
+ if (ret != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, dbenginepath);
+ else ret = 0; // succeed
+ if (is_legacy) // initialize legacy dbengine instance as needed
+ ret = rrdeng_init(host, &host->rrdeng_ctx, dbenginepath, default_rrdeng_page_cache_mb,
+ default_rrdeng_disk_quota_mb); // may fail here for legacy dbengine initialization
+ else
+ host->rrdeng_ctx = &multidb_ctx;
+ if (ret) { // check legacy or multihost initialization success
+ error(
+ "Host '%s': cannot initialize host with machine guid '%s'. Failed to initialize DB engine at '%s'.",
+ host->hostname, host->machine_guid, host->cache_dir);
+ rrdhost_free(host);
+ host = NULL;
+ //rrd_hosts_available++; //TODO: maybe we want this?
+
+ return host;
+ }
+
+#else
+ fatal("RRD_MEMORY_MODE_DBENGINE is not supported in this platform.");
+#endif
+ }
+
+ // ------------------------------------------------------------------------
+ // link it and add it to the index
+
+ if(is_localhost) {
+ host->next = localhost;
+ localhost = host;
+ }
+ else {
+ if(localhost) {
+ host->next = localhost->next;
+ localhost->next = host;
+ }
+ else localhost = host;
+ }
+
+ info("Host '%s' (at registry as '%s') with guid '%s' initialized"
+ ", os '%s'"
+ ", timezone '%s'"
+ ", tags '%s'"
+ ", program_name '%s'"
+ ", program_version '%s'"
+ ", update every %d"
+ ", memory mode %s"
+ ", history entries %ld"
+ ", streaming %s"
+ " (to '%s' with api key '%s')"
+ ", health %s"
+ ", cache_dir '%s'"
+ ", varlib_dir '%s'"
+ ", health_log '%s'"
+ ", alarms default handler '%s'"
+ ", alarms default recipient '%s'"
+ , host->hostname
+ , host->registry_hostname
+ , host->machine_guid
+ , host->os
+ , host->timezone
+ , (host->tags)?host->tags:""
+ , host->program_name
+ , host->program_version
+ , host->rrd_update_every
+ , rrd_memory_mode_name(host->rrd_memory_mode)
+ , host->rrd_history_entries
+ , host->rrdpush_send_enabled?"enabled":"disabled"
+ , host->rrdpush_send_destination?host->rrdpush_send_destination:""
+ , host->rrdpush_send_api_key?host->rrdpush_send_api_key:""
+ , host->health_enabled?"enabled":"disabled"
+ , host->cache_dir
+ , host->varlib_dir
+ , host->health_log_filename
+ , host->health_default_exec
+ , host->health_default_recipient
+ );
+
+ rrd_hosts_available++;
+
+ return host;
+}
+
+void rrdhost_update(RRDHOST *host
+ , const char *hostname
+ , const char *registry_hostname
+ , const char *guid
+ , const char *os
+ , const char *timezone
+ , const char *tags
+ , const char *program_name
+ , const char *program_version
+ , int update_every
+ , long history
+ , RRD_MEMORY_MODE mode
+ , unsigned int health_enabled
+ , unsigned int rrdpush_enabled
+ , char *rrdpush_destination
+ , char *rrdpush_api_key
+ , char *rrdpush_send_charts_matching
+ , struct rrdhost_system_info *system_info
+)
+{
+ UNUSED(guid);
+ UNUSED(rrdpush_enabled);
+ UNUSED(rrdpush_destination);
+ UNUSED(rrdpush_api_key);
+ UNUSED(rrdpush_send_charts_matching);
+
+ host->health_enabled = (mode == RRD_MEMORY_MODE_NONE) ? 0 : health_enabled;
+ //host->stream_version = STREAMING_PROTOCOL_CURRENT_VERSION; Unused?
+
+ rrdhost_system_info_free(host->system_info);
+ host->system_info = system_info;
+
+ rrdhost_init_os(host, os);
+ rrdhost_init_timezone(host, timezone);
+
+ freez(host->registry_hostname);
+ host->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname);
+
+ if(strcmp(host->hostname, hostname) != 0) {
+ info("Host '%s' has been renamed to '%s'. If this is not intentional it may mean multiple hosts are using the same machine_guid.", host->hostname, hostname);
+ char *t = host->hostname;
+ host->hostname = strdupz(hostname);
+ host->hash_hostname = simple_hash(host->hostname);
+ freez(t);
+ }
+
+ if(strcmp(host->program_name, program_name) != 0) {
+ info("Host '%s' switched program name from '%s' to '%s'", host->hostname, host->program_name, program_name);
+ char *t = host->program_name;
+ host->program_name = strdupz(program_name);
+ freez(t);
+ }
+
+ if(strcmp(host->program_version, program_version) != 0) {
+ info("Host '%s' switched program version from '%s' to '%s'", host->hostname, host->program_version, program_version);
+ char *t = host->program_version;
+ host->program_version = strdupz(program_version);
+ freez(t);
+ }
+
+ if(host->rrd_update_every != update_every)
+ error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. Restart netdata here to apply the new settings.", host->hostname, host->rrd_update_every, update_every);
+
+ if(host->rrd_history_entries < history)
+ error("Host '%s' has history of %ld entries, but the wanted one is %ld entries. Restart netdata here to apply the new settings.", host->hostname, host->rrd_history_entries, history);
+
+ if(host->rrd_memory_mode != mode)
+ error("Host '%s' has memory mode '%s', but the wanted one is '%s'. Restart netdata here to apply the new settings.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode));
+
+ // update host tags
+ rrdhost_init_tags(host, tags);
+
+ if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) {
+ rrdhost_flag_clear(host, RRDHOST_FLAG_ARCHIVED);
+ if(host->health_enabled) {
+ int r;
+ char filename[FILENAME_MAX + 1];
+
+ if (host != localhost) {
+ r = mkdir(host->varlib_dir, 0775);
+ if (r != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, host->varlib_dir);
+ }
+ snprintfz(filename, FILENAME_MAX, "%s/health", host->varlib_dir);
+ r = mkdir(filename, 0775);
+ if(r != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", host->hostname, filename);
+
+ rrdhost_wrlock(host);
+ health_readdir(host, health_user_config_dir(), health_stock_config_dir(), NULL);
+ rrdhost_unlock(host);
+
+ health_alarm_log_load(host);
+ health_alarm_log_open(host);
+ }
+ rrd_hosts_available++;
+ info("Host %s is not in archived mode anymore", host->hostname);
+ }
+
+ return;
+}
+
+RRDHOST *rrdhost_find_or_create(
+ const char *hostname
+ , const char *registry_hostname
+ , const char *guid
+ , const char *os
+ , const char *timezone
+ , const char *tags
+ , const char *program_name
+ , const char *program_version
+ , int update_every
+ , long history
+ , RRD_MEMORY_MODE mode
+ , unsigned int health_enabled
+ , unsigned int rrdpush_enabled
+ , char *rrdpush_destination
+ , char *rrdpush_api_key
+ , char *rrdpush_send_charts_matching
+ , struct rrdhost_system_info *system_info
+) {
+ debug(D_RRDHOST, "Searching for host '%s' with guid '%s'", hostname, guid);
+
+ rrd_wrlock();
+ RRDHOST *host = rrdhost_find_by_guid(guid, 0);
+ if (unlikely(host && RRD_MEMORY_MODE_DBENGINE != mode && rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))) {
+ /* If a legacy memory mode instantiates all dbengine state must be discarded to avoid inconsistencies */
+ error("Archived host '%s' has memory mode '%s', but the wanted one is '%s'. Discarding archived state.",
+ host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode));
+ rrdhost_free(host);
+ host = NULL;
+ }
+ if(!host) {
+ host = rrdhost_create(
+ hostname
+ , registry_hostname
+ , guid
+ , os
+ , timezone
+ , tags
+ , program_name
+ , program_version
+ , update_every
+ , history
+ , mode
+ , health_enabled
+ , rrdpush_enabled
+ , rrdpush_destination
+ , rrdpush_api_key
+ , rrdpush_send_charts_matching
+ , system_info
+ , 0
+ );
+ }
+ else {
+ rrdhost_update(host
+ , hostname
+ , registry_hostname
+ , guid
+ , os
+ , timezone
+ , tags
+ , program_name
+ , program_version
+ , update_every
+ , history
+ , mode
+ , health_enabled
+ , rrdpush_enabled
+ , rrdpush_destination
+ , rrdpush_api_key
+ , rrdpush_send_charts_matching
+ , system_info);
+ }
+ if (host) {
+ rrdhost_wrlock(host);
+ rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN);
+ host->senders_disconnected_time = 0;
+ rrdhost_unlock(host);
+ }
+
+ rrdhost_cleanup_orphan_hosts_nolock(host);
+
+ rrd_unlock();
+
+ return host;
+}
+inline int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now) {
+ if(host != protected
+ && host != localhost
+ && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN)
+ && host->receiver
+ && host->senders_disconnected_time
+ && host->senders_disconnected_time + rrdhost_free_orphan_time < now)
+ return 1;
+
+ return 0;
+}
+
+void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected) {
+ time_t now = now_realtime_sec();
+
+ RRDHOST *host;
+
+restart_after_removal:
+ rrdhost_foreach_write(host) {
+ if(rrdhost_should_be_removed(host, protected, now)) {
+ info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", host->hostname, host->machine_guid);
+
+ if (rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST)
+#ifdef ENABLE_DBENGINE
+ /* don't delete multi-host DB host files */
+ && !(host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && host->rrdeng_ctx == &multidb_ctx)
+#endif
+ )
+ rrdhost_delete_charts(host);
+ else
+ rrdhost_save_charts(host);
+
+ rrdhost_free(host);
+ goto restart_after_removal;
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST global / startup initialization
+
+int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
+ rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time);
+ gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above);
+ if (gap_when_lost_iterations_above < 1)
+ gap_when_lost_iterations_above = 1;
+
+#ifdef ENABLE_DBENGINE
+ if (unlikely(sql_init_database())) {
+ return 1;
+ }
+#endif
+
+ health_init();
+
+ rrdpush_init();
+
+ debug(D_RRDHOST, "Initializing localhost with hostname '%s'", hostname);
+ rrd_wrlock();
+ localhost = rrdhost_create(
+ hostname
+ , registry_get_this_machine_hostname()
+ , registry_get_this_machine_guid()
+ , os_type
+ , netdata_configured_timezone
+ , config_get(CONFIG_SECTION_BACKEND, "host tags", "")
+ , program_name
+ , program_version
+ , default_rrd_update_every
+ , default_rrd_history_entries
+ , default_rrd_memory_mode
+ , default_health_enabled
+ , default_rrdpush_enabled
+ , default_rrdpush_destination
+ , default_rrdpush_api_key
+ , default_rrdpush_send_charts_matching
+ , system_info
+ , 1
+ );
+ if (unlikely(!localhost)) {
+ rrd_unlock();
+ return 1;
+ }
+
+#ifdef ENABLE_DBENGINE
+ char dbenginepath[FILENAME_MAX + 1];
+ int ret;
+ snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine", localhost->cache_dir);
+ ret = mkdir(dbenginepath, 0775);
+ if (ret != 0 && errno != EEXIST)
+ error("Host '%s': cannot create directory '%s'", localhost->hostname, dbenginepath);
+ else // Unconditionally create multihost db to support on demand host creation
+ ret = rrdeng_init(NULL, NULL, dbenginepath, default_rrdeng_page_cache_mb, default_multidb_disk_quota_mb);
+ if (ret) {
+ error(
+ "Host '%s' with machine guid '%s' failed to initialize multi-host DB engine instance at '%s'.",
+ localhost->hostname, localhost->machine_guid, localhost->cache_dir);
+ rrdhost_free(localhost);
+ localhost = NULL;
+ rrd_unlock();
+ return 1;
+ }
+#endif
+ rrd_unlock();
+
+ web_client_api_v1_management_init();
+ return localhost==NULL;
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - lock validations
+// there are only used when NETDATA_INTERNAL_CHECKS is set
+
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
+ debug(D_RRDHOST, "Checking read lock on host '%s'", host->hostname);
+
+ int ret = netdata_rwlock_trywrlock(&host->rrdhost_rwlock);
+ if(ret == 0)
+ fatal("RRDHOST '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file);
+}
+
+void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
+ debug(D_RRDHOST, "Checking write lock on host '%s'", host->hostname);
+
+ int ret = netdata_rwlock_tryrdlock(&host->rrdhost_rwlock);
+ if(ret == 0)
+ fatal("RRDHOST '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file);
+}
+
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line) {
+ debug(D_RRDHOST, "Checking read lock on all RRDs");
+
+ int ret = netdata_rwlock_trywrlock(&rrd_rwlock);
+ if(ret == 0)
+ fatal("RRDs should be read-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file);
+}
+
+void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line) {
+ debug(D_RRDHOST, "Checking write lock on all RRDs");
+
+ int ret = netdata_rwlock_tryrdlock(&rrd_rwlock);
+ if(ret == 0)
+ fatal("RRDs should be write-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file);
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - free
+
+void rrdhost_system_info_free(struct rrdhost_system_info *system_info) {
+ info("SYSTEM_INFO: free %p", system_info);
+
+ if(likely(system_info)) {
+ freez(system_info->host_os_name);
+ freez(system_info->host_os_id);
+ freez(system_info->host_os_id_like);
+ freez(system_info->host_os_version);
+ freez(system_info->host_os_version_id);
+ freez(system_info->host_os_detection);
+ freez(system_info->host_cores);
+ freez(system_info->host_cpu_freq);
+ freez(system_info->host_ram_total);
+ freez(system_info->host_disk_space);
+ freez(system_info->container_os_name);
+ freez(system_info->container_os_id);
+ freez(system_info->container_os_id_like);
+ freez(system_info->container_os_version);
+ freez(system_info->container_os_version_id);
+ freez(system_info->container_os_detection);
+ freez(system_info->kernel_name);
+ freez(system_info->kernel_version);
+ freez(system_info->architecture);
+ freez(system_info->virtualization);
+ freez(system_info->virt_detection);
+ freez(system_info->container);
+ freez(system_info->container_detection);
+ freez(system_info->is_k8s_node);
+ freez(system_info);
+ }
+}
+
+void destroy_receiver_state(struct receiver_state *rpt);
+void rrdhost_free(RRDHOST *host) {
+ if(!host) return;
+
+ info("Freeing all memory for host '%s'...", host->hostname);
+
+ rrd_check_wrlock(); // make sure the RRDs are write locked
+
+ // ------------------------------------------------------------------------
+ // clean up streaming
+ rrdpush_sender_thread_stop(host); // stop a possibly running thread
+ cbuffer_free(host->sender->buffer);
+ buffer_free(host->sender->build);
+ freez(host->sender);
+ host->sender = NULL;
+ if (netdata_exit) {
+ netdata_mutex_lock(&host->receiver_lock);
+ if (host->receiver) {
+ if (!host->receiver->exited)
+ netdata_thread_cancel(host->receiver->thread);
+ netdata_mutex_unlock(&host->receiver_lock);
+ struct receiver_state *rpt = host->receiver;
+ while (host->receiver && !rpt->exited)
+ sleep_usec(50 * USEC_PER_MS);
+ // If the receiver detached from the host then its thread will destroy the state
+ if (host->receiver == rpt)
+ destroy_receiver_state(host->receiver);
+ }
+ else
+ netdata_mutex_unlock(&host->receiver_lock);
+ }
+
+
+
+ rrdhost_wrlock(host); // lock this RRDHOST
+
+ // ------------------------------------------------------------------------
+ // release its children resources
+
+#ifdef ENABLE_DBENGINE
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ if (host->rrdeng_ctx != &multidb_ctx)
+ rrdeng_prepare_exit(host->rrdeng_ctx);
+ }
+#endif
+ while(host->rrdset_root)
+ rrdset_free(host->rrdset_root);
+
+ freez(host->exporting_flags);
+
+ while(host->alarms)
+ rrdcalc_unlink_and_free(host, host->alarms);
+
+ RRDCALC *rc,*nc;
+ for(rc = host->alarms_with_foreach; rc ; rc = nc) {
+ nc = rc->next;
+ rrdcalc_free(rc);
+ }
+ host->alarms_with_foreach = NULL;
+
+ while(host->templates)
+ rrdcalctemplate_unlink_and_free(host, host->templates);
+
+ RRDCALCTEMPLATE *rt,*next;
+ for(rt = host->alarms_template_with_foreach; rt ; rt = next) {
+ next = rt->next;
+ rrdcalctemplate_free(rt);
+ }
+ host->alarms_template_with_foreach = NULL;
+
+ debug(D_RRD_CALLS, "RRDHOST: Cleaning up remaining host variables for host '%s'", host->hostname);
+ rrdvar_free_remaining_variables(host, &host->rrdvar_root_index);
+
+ health_alarm_log_free(host);
+
+#ifdef ENABLE_DBENGINE
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && host->rrdeng_ctx != &multidb_ctx)
+ rrdeng_exit(host->rrdeng_ctx);
+#endif
+
+ // ------------------------------------------------------------------------
+ // remove it from the indexes
+
+ if(rrdhost_index_del(host) != host)
+ error("RRDHOST '%s' removed from index, deleted the wrong entry.", host->hostname);
+
+
+ // ------------------------------------------------------------------------
+ // unlink it from the host
+
+ if(host == localhost) {
+ localhost = host->next;
+ }
+ else {
+ // find the previous one
+ RRDHOST *h;
+ for(h = localhost; h && h->next != host ; h = h->next) ;
+
+ // bypass it
+ if(h) h->next = host->next;
+ else error("Request to free RRDHOST '%s': cannot find it", host->hostname);
+ }
+
+
+
+ // ------------------------------------------------------------------------
+ // free it
+
+ pthread_mutex_destroy(&host->aclk_state_lock);
+ freez(host->aclk_state.claimed_id);
+ freez((void *)host->tags);
+ free_label_list(host->labels.head);
+ freez((void *)host->os);
+ freez((void *)host->timezone);
+ freez(host->program_version);
+ freez(host->program_name);
+ rrdhost_system_info_free(host->system_info);
+ freez(host->cache_dir);
+ freez(host->varlib_dir);
+ freez(host->rrdpush_send_api_key);
+ freez(host->rrdpush_send_destination);
+ freez(host->health_default_exec);
+ freez(host->health_default_recipient);
+ freez(host->health_log_filename);
+ freez(host->hostname);
+ freez(host->registry_hostname);
+ simple_pattern_free(host->rrdpush_send_charts_matching);
+ rrdhost_unlock(host);
+ netdata_rwlock_destroy(&host->labels.labels_rwlock);
+ netdata_rwlock_destroy(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_destroy(&host->rrdhost_rwlock);
+
+ freez(host);
+
+ rrd_hosts_available--;
+}
+
+void rrdhost_free_all(void) {
+ rrd_wrlock();
+ /* Make sure child-hosts are released before the localhost. */
+ while(localhost->next) rrdhost_free(localhost->next);
+ rrdhost_free(localhost);
+ rrd_unlock();
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save host files
+
+void rrdhost_save_charts(RRDHOST *host) {
+ if(!host) return;
+
+ info("Saving/Closing database of host '%s'...", host->hostname);
+
+ RRDSET *st;
+
+ // we get a write lock
+ // to ensure only one thread is saving the database
+ rrdhost_wrlock(host);
+
+ rrdset_foreach_write(st, host) {
+ rrdset_rdlock(st);
+ rrdset_save(st);
+ rrdset_unlock(st);
+ }
+
+ rrdhost_unlock(host);
+}
+
+static struct label *rrdhost_load_auto_labels(void)
+{
+ struct label *label_list = NULL;
+
+ if (localhost->system_info->host_os_name)
+ label_list =
+ add_label_to_list(label_list, "_os_name", localhost->system_info->host_os_name, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->host_os_version)
+ label_list =
+ add_label_to_list(label_list, "_os_version", localhost->system_info->host_os_version, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->kernel_version)
+ label_list =
+ add_label_to_list(label_list, "_kernel_version", localhost->system_info->kernel_version, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->host_cores)
+ label_list =
+ add_label_to_list(label_list, "_system_cores", localhost->system_info->host_cores, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->host_cpu_freq)
+ label_list =
+ add_label_to_list(label_list, "_system_cpu_freq", localhost->system_info->host_cpu_freq, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->host_ram_total)
+ label_list =
+ add_label_to_list(label_list, "_system_ram_total", localhost->system_info->host_ram_total, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->host_disk_space)
+ label_list =
+ add_label_to_list(label_list, "_system_disk_space", localhost->system_info->host_disk_space, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->architecture)
+ label_list =
+ add_label_to_list(label_list, "_architecture", localhost->system_info->architecture, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->virtualization)
+ label_list =
+ add_label_to_list(label_list, "_virtualization", localhost->system_info->virtualization, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->container)
+ label_list =
+ add_label_to_list(label_list, "_container", localhost->system_info->container, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->container_detection)
+ label_list =
+ add_label_to_list(label_list, "_container_detection", localhost->system_info->container_detection, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->virt_detection)
+ label_list =
+ add_label_to_list(label_list, "_virt_detection", localhost->system_info->virt_detection, LABEL_SOURCE_AUTO);
+
+ if (localhost->system_info->is_k8s_node)
+ label_list =
+ add_label_to_list(label_list, "_is_k8s_node", localhost->system_info->is_k8s_node, LABEL_SOURCE_AUTO);
+
+ label_list = add_aclk_host_labels(label_list);
+
+ label_list = add_label_to_list(
+ label_list, "_is_parent", (localhost->next || configured_as_parent()) ? "true" : "false", LABEL_SOURCE_AUTO);
+
+ if (localhost->rrdpush_send_destination)
+ label_list =
+ add_label_to_list(label_list, "_streams_to", localhost->rrdpush_send_destination, LABEL_SOURCE_AUTO);
+
+ return label_list;
+}
+
+static inline int rrdhost_is_valid_label_config_option(char *name, char *value)
+{
+ return (is_valid_label_key(name) && is_valid_label_value(value) && strcmp(name, "from environment") &&
+ strcmp(name, "from kubernetes pods"));
+}
+
+static struct label *rrdhost_load_config_labels()
+{
+ int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL);
+ if(!status) {
+ char *filename = CONFIG_DIR "/" CONFIG_FILENAME;
+ error("LABEL: Cannot reload the configuration file '%s', using labels in memory", filename);
+ }
+
+ struct label *l = NULL;
+ struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL);
+ if(co) {
+ config_section_wrlock(co);
+ struct config_option *cv;
+ for(cv = co->values; cv ; cv = cv->next) {
+ if(rrdhost_is_valid_label_config_option(cv->name, cv->value)) {
+ l = add_label_to_list(l, cv->name, cv->value, LABEL_SOURCE_NETDATA_CONF);
+ cv->flags |= CONFIG_VALUE_USED;
+ } else {
+ error("LABELS: It was not possible to create the label '%s' because it contains invalid character(s) or values."
+ , cv->name);
+ }
+ }
+ config_section_unlock(co);
+ }
+
+ return l;
+}
+
+struct label *parse_simple_tags(
+ struct label *label_list,
+ const char *tags,
+ char key_value_separator,
+ char label_separator,
+ STRIP_QUOTES_OPTION strip_quotes_from_key,
+ STRIP_QUOTES_OPTION strip_quotes_from_value,
+ SKIP_ESCAPED_CHARACTERS_OPTION skip_escaped_characters)
+{
+ const char *end = tags;
+
+ while (*end) {
+ const char *start = end;
+ char key[CONFIG_MAX_VALUE + 1];
+ char value[CONFIG_MAX_VALUE + 1];
+
+ while (*end && *end != key_value_separator)
+ end++;
+ strncpyz(key, start, end - start);
+
+ if (*end)
+ start = ++end;
+ while (*end && *end != label_separator)
+ end++;
+ strncpyz(value, start, end - start);
+
+ label_list = add_label_to_list(
+ label_list,
+ strip_quotes_from_key ? strip_double_quotes(trim(key), skip_escaped_characters) : trim(key),
+ strip_quotes_from_value ? strip_double_quotes(trim(value), skip_escaped_characters) : trim(value),
+ LABEL_SOURCE_NETDATA_CONF);
+
+ if (*end)
+ end++;
+ }
+
+ return label_list;
+}
+
+struct label *parse_json_tags(struct label *label_list, const char *tags)
+{
+ char tags_buf[CONFIG_MAX_VALUE + 1];
+ strncpy(tags_buf, tags, CONFIG_MAX_VALUE);
+ char *str = tags_buf;
+
+ switch (*str) {
+ case '{':
+ str++;
+ strip_last_symbol(str, '}', SKIP_ESCAPED_CHARACTERS);
+
+ label_list = parse_simple_tags(label_list, str, ':', ',', STRIP_QUOTES, STRIP_QUOTES, SKIP_ESCAPED_CHARACTERS);
+
+ break;
+ case '[':
+ str++;
+ strip_last_symbol(str, ']', SKIP_ESCAPED_CHARACTERS);
+
+ char *end = str + strlen(str);
+ size_t i = 0;
+
+ while (str < end) {
+ char key[CONFIG_MAX_VALUE + 1];
+ snprintfz(key, CONFIG_MAX_VALUE, "host_tag%zu", i);
+
+ str = strip_double_quotes(trim(str), SKIP_ESCAPED_CHARACTERS);
+
+ label_list = add_label_to_list(label_list, key, str, LABEL_SOURCE_NETDATA_CONF);
+
+ // skip to the next element in the array
+ str += strlen(str) + 1;
+ while (*str && *str != ',')
+ str++;
+ str++;
+ i++;
+ }
+
+ break;
+ case '"':
+ label_list = add_label_to_list(
+ label_list, "host_tag", strip_double_quotes(str, SKIP_ESCAPED_CHARACTERS), LABEL_SOURCE_NETDATA_CONF);
+ break;
+ default:
+ label_list = add_label_to_list(label_list, "host_tag", str, LABEL_SOURCE_NETDATA_CONF);
+ break;
+ }
+
+ return label_list;
+}
+
+static struct label *rrdhost_load_labels_from_tags(void)
+{
+ if (!localhost->tags)
+ return NULL;
+
+ struct label *label_list = NULL;
+ BACKEND_TYPE type = BACKEND_TYPE_UNKNOWN;
+
+ if (config_exists(CONFIG_SECTION_BACKEND, "enabled")) {
+ if (config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", CONFIG_BOOLEAN_NO) != CONFIG_BOOLEAN_NO) {
+ const char *type_name = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
+ type = backend_select_type(type_name);
+ }
+ }
+
+ switch (type) {
+ case BACKEND_TYPE_GRAPHITE:
+ label_list = parse_simple_tags(
+ label_list, localhost->tags, '=', ';', DO_NOT_STRIP_QUOTES, DO_NOT_STRIP_QUOTES,
+ DO_NOT_SKIP_ESCAPED_CHARACTERS);
+ break;
+ case BACKEND_TYPE_OPENTSDB_USING_TELNET:
+ label_list = parse_simple_tags(
+ label_list, localhost->tags, '=', ' ', DO_NOT_STRIP_QUOTES, DO_NOT_STRIP_QUOTES,
+ DO_NOT_SKIP_ESCAPED_CHARACTERS);
+ break;
+ case BACKEND_TYPE_OPENTSDB_USING_HTTP:
+ label_list = parse_simple_tags(
+ label_list, localhost->tags, ':', ',', STRIP_QUOTES, STRIP_QUOTES,
+ DO_NOT_SKIP_ESCAPED_CHARACTERS);
+ break;
+ case BACKEND_TYPE_JSON:
+ label_list = parse_json_tags(label_list, localhost->tags);
+ break;
+ default:
+ label_list = parse_simple_tags(
+ label_list, localhost->tags, '=', ',', DO_NOT_STRIP_QUOTES, STRIP_QUOTES,
+ DO_NOT_SKIP_ESCAPED_CHARACTERS);
+ break;
+ }
+
+ return label_list;
+}
+
+static struct label *rrdhost_load_kubernetes_labels(void)
+{
+ struct label *l=NULL;
+ char *label_script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("get-kubernetes-labels.sh") + 2));
+ sprintf(label_script, "%s/%s", netdata_configured_primary_plugins_dir, "get-kubernetes-labels.sh");
+ if (unlikely(access(label_script, R_OK) != 0)) {
+ error("Kubernetes pod label fetching script %s not found.",label_script);
+ freez(label_script);
+ } else {
+ pid_t command_pid;
+
+ debug(D_RRDHOST, "Attempting to fetch external labels via %s", label_script);
+
+ FILE *fp = mypopen(label_script, &command_pid);
+ if(fp) {
+ int MAX_LINE_SIZE=300;
+ char buffer[MAX_LINE_SIZE + 1];
+ while (fgets(buffer, MAX_LINE_SIZE, fp) != NULL) {
+ char *name=buffer;
+ char *value=buffer;
+ while (*value && *value != ':') value++;
+ if (*value == ':') {
+ *value = '\0';
+ value++;
+ }
+ char *eos=value;
+ while (*eos && *eos != '\n') eos++;
+ if (*eos == '\n') *eos = '\0';
+ if (strlen(value)>0) {
+ if (is_valid_label_key(name)){
+ l = add_label_to_list(l, name, value, LABEL_SOURCE_KUBERNETES);
+ } else {
+ info("Ignoring invalid label name '%s'", name);
+ }
+ } else {
+ error("%s outputted unexpected result: '%s'", label_script, name);
+ }
+ };
+ // Non-zero exit code means that all the script output is error messages. We've shown already any message that didn't include a ':'
+ // Here we'll inform with an ERROR that the script failed, show whatever (if anything) was added to the list of labels, free the memory and set the return to null
+ int retcode=mypclose(fp, command_pid);
+ if (retcode) {
+ error("%s exited abnormally. No kubernetes labels will be added to the host.", label_script);
+ struct label *ll=l;
+ while (ll != NULL) {
+ info("Ignoring Label [source id=%s]: \"%s\" -> \"%s\"\n", translate_label_source(ll->label_source), ll->key, ll->value);
+ ll = ll->next;
+ freez(l);
+ l=ll;
+ }
+ }
+ }
+ freez(label_script);
+ }
+
+ return l;
+}
+
+void reload_host_labels(void)
+{
+ struct label *from_auto = rrdhost_load_auto_labels();
+ struct label *from_k8s = rrdhost_load_kubernetes_labels();
+ struct label *from_config = rrdhost_load_config_labels();
+ struct label *from_tags = rrdhost_load_labels_from_tags();
+
+ struct label *new_labels = merge_label_lists(from_auto, from_k8s);
+ new_labels = merge_label_lists(new_labels, from_tags);
+ new_labels = merge_label_lists(new_labels, from_config);
+
+ rrdhost_rdlock(localhost);
+ replace_label_list(&localhost->labels, new_labels);
+
+ health_label_log_save(localhost);
+ rrdhost_unlock(localhost);
+
+/* TODO-GAPS - fix this so that it looks properly at the state and version of the sender
+ if(localhost->rrdpush_send_enabled && localhost->rrdpush_sender_buffer){
+ localhost->labels.labels_flag |= LABEL_FLAG_UPDATE_STREAM;
+ rrdpush_send_labels(localhost);
+ }
+*/
+ health_reload();
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - delete host files
+
+void rrdhost_delete_charts(RRDHOST *host) {
+ if(!host) return;
+
+ info("Deleting database of host '%s'...", host->hostname);
+
+ RRDSET *st;
+
+ // we get a write lock
+ // to ensure only one thread is saving the database
+ rrdhost_wrlock(host);
+
+ rrdset_foreach_write(st, host) {
+ rrdset_rdlock(st);
+ rrdset_delete(st);
+ rrdset_unlock(st);
+ }
+
+ recursively_delete_dir(host->cache_dir, "left over host");
+
+ rrdhost_unlock(host);
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - cleanup host files
+
+void rrdhost_cleanup_charts(RRDHOST *host) {
+ if(!host) return;
+
+ info("Cleaning up database of host '%s'...", host->hostname);
+
+ RRDSET *st;
+ uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS);
+
+ // we get a write lock
+ // to ensure only one thread is saving the database
+ rrdhost_wrlock(host);
+
+ rrdset_foreach_write(st, host) {
+ rrdset_rdlock(st);
+
+ if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))
+ rrdset_delete(st);
+ else if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS))
+ rrdset_delete_obsolete_dimensions(st);
+ else
+ rrdset_save(st);
+
+ rrdset_unlock(st);
+ }
+
+ rrdhost_unlock(host);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save all hosts to disk
+
+void rrdhost_save_all(void) {
+ info("Saving database [%zu hosts(s)]...", rrd_hosts_available);
+
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host)
+ rrdhost_save_charts(host);
+
+ rrd_unlock();
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save or delete all hosts from disk
+
+void rrdhost_cleanup_all(void) {
+ info("Cleaning up database [%zu hosts(s)]...", rrd_hosts_available);
+
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if (host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_ORPHAN_HOST) && !host->receiver
+#ifdef ENABLE_DBENGINE
+ /* don't delete multi-host DB host files */
+ && !(host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && host->rrdeng_ctx == &multidb_ctx)
+#endif
+ )
+ rrdhost_delete_charts(host);
+ else
+ rrdhost_cleanup_charts(host);
+ }
+
+ rrd_unlock();
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save or delete all the host charts from disk
+
+void rrdhost_cleanup_obsolete_charts(RRDHOST *host) {
+ time_t now = now_realtime_sec();
+
+ RRDSET *st;
+
+ uint32_t rrdhost_delete_obsolete_charts = rrdhost_flag_check(host, RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS);
+
+restart_after_removal:
+ rrdset_foreach_write(st, host) {
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)
+ && st->last_accessed_time + rrdset_free_obsolete_time < now
+ && st->last_updated.tv_sec + rrdset_free_obsolete_time < now
+ && st->last_collected_time.tv_sec + rrdset_free_obsolete_time < now
+ )) {
+#ifdef ENABLE_DBENGINE
+ if(st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ RRDDIM *rd, *last;
+
+ rrdset_flag_set(st, RRDSET_FLAG_ARCHIVED);
+ while (st->variables) rrdsetvar_free(st->variables);
+ while (st->alarms) rrdsetcalc_unlink(st->alarms);
+ rrdset_wrlock(st);
+ for (rd = st->dimensions, last = NULL ; likely(rd) ; ) {
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) {
+ last = rd;
+ rd = rd->next;
+ continue;
+ }
+
+ rrddim_flag_set(rd, RRDDIM_FLAG_ARCHIVED);
+ while (rd->variables)
+ rrddimvar_free(rd->variables);
+
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE);
+ /* only a collector can mark a chart as obsolete, so we must remove the reference */
+ uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd);
+ if (can_delete_metric) {
+ /* This metric has no data and no references */
+ delete_dimension_uuid(rd->state->metric_uuid);
+ rrddim_free(st, rd);
+ if (unlikely(!last)) {
+ rd = st->dimensions;
+ }
+ else {
+ rd = last->next;
+ }
+ continue;
+ }
+ }
+ last = rd;
+ rd = rd->next;
+ }
+ rrdset_unlock(st);
+
+ debug(D_RRD_CALLS, "RRDSET: Cleaning up remaining chart variables for host '%s', chart '%s'", host->hostname, st->id);
+ rrdvar_free_remaining_variables(host, &st->rrdvar_root_index);
+
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ if (st->dimensions) {
+ /* If the chart still has dimensions don't delete it from the metadata log */
+ continue;
+ }
+ }
+#endif
+ rrdset_rdlock(st);
+
+ if(rrdhost_delete_obsolete_charts)
+ rrdset_delete(st);
+ else
+ rrdset_save(st);
+
+ rrdset_unlock(st);
+
+ rrdset_free(st);
+ goto restart_after_removal;
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRDHOST - set system info from environment variables
+// system_info fields must be heap allocated or NULL
+int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, char *name, char *value) {
+ int res = 0;
+
+ if (!strcmp(name, "NETDATA_PROTOCOL_VERSION"))
+ return res;
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_NAME")){
+ freez(system_info->container_os_name);
+ system_info->container_os_name = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_ID")){
+ freez(system_info->container_os_id);
+ system_info->container_os_id = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_ID_LIKE")){
+ freez(system_info->container_os_id_like);
+ system_info->container_os_id_like = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_VERSION")){
+ freez(system_info->container_os_version);
+ system_info->container_os_version = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_VERSION_ID")){
+ freez(system_info->container_os_version_id);
+ system_info->container_os_version_id = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_CONTAINER_OS_DETECTION")){
+ freez(system_info->host_os_detection);
+ system_info->host_os_detection = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_NAME")){
+ freez(system_info->host_os_name);
+ system_info->host_os_name = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_ID")){
+ freez(system_info->host_os_id);
+ system_info->host_os_id = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_ID_LIKE")){
+ freez(system_info->host_os_id_like);
+ system_info->host_os_id_like = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_VERSION")){
+ freez(system_info->host_os_version);
+ system_info->host_os_version = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_VERSION_ID")){
+ freez(system_info->host_os_version_id);
+ system_info->host_os_version_id = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_OS_DETECTION")){
+ freez(system_info->host_os_detection);
+ system_info->host_os_detection = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_KERNEL_NAME")){
+ freez(system_info->kernel_name);
+ system_info->kernel_name = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT")){
+ freez(system_info->host_cores);
+ system_info->host_cores = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_CPU_FREQ")){
+ freez(system_info->host_cpu_freq);
+ system_info->host_cpu_freq = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_TOTAL_RAM")){
+ freez(system_info->host_ram_total);
+ system_info->host_ram_total = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_TOTAL_DISK_SIZE")){
+ freez(system_info->host_disk_space);
+ system_info->host_disk_space = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_KERNEL_VERSION")){
+ freez(system_info->kernel_version);
+ system_info->kernel_version = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_ARCHITECTURE")){
+ freez(system_info->architecture);
+ system_info->architecture = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_VIRTUALIZATION")){
+ freez(system_info->virtualization);
+ system_info->virtualization = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_VIRT_DETECTION")){
+ freez(system_info->virt_detection);
+ system_info->virt_detection = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_CONTAINER")){
+ freez(system_info->container);
+ system_info->container = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_SYSTEM_CONTAINER_DETECTION")){
+ freez(system_info->container_detection);
+ system_info->container_detection = strdupz(value);
+ }
+ else if(!strcmp(name, "NETDATA_HOST_IS_K8S_NODE")){
+ freez(system_info->is_k8s_node);
+ system_info->is_k8s_node = strdupz(value);
+ }
+ else if (!strcmp(name, "NETDATA_SYSTEM_CPU_VENDOR"))
+ return res;
+ else if (!strcmp(name, "NETDATA_SYSTEM_CPU_MODEL"))
+ return res;
+ else if (!strcmp(name, "NETDATA_SYSTEM_CPU_DETECTION"))
+ return res;
+ else if (!strcmp(name, "NETDATA_SYSTEM_RAM_DETECTION"))
+ return res;
+ else if (!strcmp(name, "NETDATA_SYSTEM_DISK_DETECTION"))
+ return res;
+ else {
+ res = 1;
+ }
+
+ return res;
+}
+
+/**
+ * Alarm Compare ID
+ *
+ * Callback function used with the binary trees to compare the id of RRDCALC
+ *
+ * @param a a pointer to the RRDCAL item to insert,compare or update the binary tree
+ * @param b the pointer to the binary tree.
+ *
+ * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
+ */
+int alarm_compare_id(void *a, void *b) {
+ register uint32_t hash1 = ((RRDCALC *)a)->id;
+ register uint32_t hash2 = ((RRDCALC *)b)->id;
+
+ if(hash1 < hash2) return -1;
+ else if(hash1 > hash2) return 1;
+
+ return 0;
+}
+
+/**
+ * Alarm Compare NAME
+ *
+ * Callback function used with the binary trees to compare the name of RRDCALC
+ *
+ * @param a a pointer to the RRDCAL item to insert,compare or update the binary tree
+ * @param b the pointer to the binary tree.
+ *
+ * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
+ */
+int alarm_compare_name(void *a, void *b) {
+ RRDCALC *in1 = (RRDCALC *)a;
+ RRDCALC *in2 = (RRDCALC *)b;
+
+ if(in1->hash < in2->hash) return -1;
+ else if(in1->hash > in2->hash) return 1;
+
+ return strcmp(in1->name,in2->name);
+}
+
+// Added for gap-filling, if this proves to be a bottleneck in large-scale systems then we will need to cache
+// the last entry times as the metric updates, but let's see if it is a problem first.
+time_t rrdhost_last_entry_t(RRDHOST *h) {
+ rrdhost_rdlock(h);
+ RRDSET *st;
+ time_t result = 0;
+ rrdset_foreach_read(st, h) {
+ time_t st_last = rrdset_last_entry_t(st);
+ if (st_last > result)
+ result = st_last;
+ }
+ rrdhost_unlock(h);
+ return result;
+}
diff --git a/database/rrdlabels.c b/database/rrdlabels.c
new file mode 100644
index 0000000..f958376
--- /dev/null
+++ b/database/rrdlabels.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_RRD_INTERNALS
+#include "rrd.h"
+
+char *translate_label_source(LABEL_SOURCE l) {
+ switch (l) {
+ case LABEL_SOURCE_AUTO:
+ return "AUTO";
+ case LABEL_SOURCE_NETDATA_CONF:
+ return "NETDATA.CONF";
+ case LABEL_SOURCE_DOCKER :
+ return "DOCKER";
+ case LABEL_SOURCE_ENVIRONMENT :
+ return "ENVIRONMENT";
+ case LABEL_SOURCE_KUBERNETES :
+ return "KUBERNETES";
+ default:
+ return "Invalid label source";
+ }
+}
+
+int is_valid_label_value(char *value) {
+ while(*value) {
+ if(*value == '"' || *value == '\'' || *value == '*' || *value == '!') {
+ return 0;
+ }
+
+ value++;
+ }
+
+ return 1;
+}
+
+int is_valid_label_key(char *key) {
+ //Prometheus exporter
+ if(!strcmp(key, "chart") || !strcmp(key, "family") || !strcmp(key, "dimension"))
+ return 0;
+
+ //Netdata and Prometheus internal
+ if (*key == '_')
+ return 0;
+
+ while(*key) {
+ if(!(isdigit(*key) || isalpha(*key) || *key == '.' || *key == '_' || *key == '-'))
+ return 0;
+
+ key++;
+ }
+
+ return 1;
+}
+
+void strip_last_symbol(
+ char *str,
+ char symbol,
+ SKIP_ESCAPED_CHARACTERS_OPTION skip_escaped_characters)
+{
+ char *end = str;
+
+ while (*end && *end != symbol) {
+ if (unlikely(skip_escaped_characters && *end == '\\')) {
+ end++;
+ if (unlikely(!*end))
+ break;
+ }
+ end++;
+ }
+ if (likely(*end == symbol))
+ *end = '\0';
+}
+
+char *strip_double_quotes(char *str, SKIP_ESCAPED_CHARACTERS_OPTION skip_escaped_characters)
+{
+ if (*str == '"') {
+ str++;
+ strip_last_symbol(str, '"', skip_escaped_characters);
+ }
+
+ return str;
+}
+
+struct label *create_label(char *key, char *value, LABEL_SOURCE label_source)
+{
+ size_t key_len = strlen(key), value_len = strlen(value);
+ size_t n = sizeof(struct label) + key_len + 1 + value_len + 1;
+ struct label *result = callocz(1,n);
+ if (result != NULL) {
+ char *c = (char *)result;
+ c += sizeof(struct label);
+ strcpy(c, key);
+ result->key = c;
+ c += key_len + 1;
+ strcpy(c, value);
+ result->value = c;
+ result->label_source = label_source;
+ result->key_hash = simple_hash(result->key);
+ }
+ return result;
+}
+
+void free_label_list(struct label *labels)
+{
+ while (labels != NULL)
+ {
+ struct label *current = labels;
+ labels = labels->next;
+ freez(current);
+ }
+}
+
+void replace_label_list(struct label_index *labels, struct label *new_labels)
+{
+ netdata_rwlock_wrlock(&labels->labels_rwlock);
+ struct label *old_labels = labels->head;
+ labels->head = new_labels;
+ netdata_rwlock_unlock(&labels->labels_rwlock);
+
+ free_label_list(old_labels);
+}
+
+struct label *add_label_to_list(struct label *l, char *key, char *value, LABEL_SOURCE label_source)
+{
+ struct label *lab = create_label(key, value, label_source);
+ lab->next = l;
+ return lab;
+}
+
+void update_label_list(struct label **labels, struct label *new_labels)
+{
+ free_label_list(*labels);
+ *labels = NULL;
+
+ while (new_labels != NULL)
+ {
+ *labels = add_label_to_list(*labels, new_labels->key, new_labels->value, new_labels->label_source);
+ new_labels = new_labels->next;
+ }
+}
+
+struct label *label_list_lookup_key(struct label *head, char *key, uint32_t key_hash)
+{
+ while (head != NULL)
+ {
+ if (head->key_hash == key_hash && !strcmp(head->key, key))
+ return head;
+ head = head->next;
+ }
+ return NULL;
+}
+
+int label_list_contains_key(struct label *head, char *key, uint32_t key_hash)
+{
+ return (label_list_lookup_key(head, key, key_hash) != NULL);
+}
+
+int label_list_contains(struct label *head, struct label *check)
+{
+ return label_list_contains_key(head, check->key, check->key_hash);
+}
+
+struct label *label_list_lookup_keylist(struct label *head, char *key)
+{
+ SIMPLE_PATTERN *pattern = NULL;
+
+ pattern = simple_pattern_create(key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ while (head != NULL)
+ {
+ if (simple_pattern_matches(pattern, head->key))
+ break;
+ head = head->next;
+ }
+ simple_pattern_free(pattern);
+ return head;
+}
+
+int label_list_contains_keylist(struct label *head, char *keylist)
+{
+ return (label_list_lookup_keylist(head, keylist) != NULL);
+}
+
+
+/* Create a list with entries from both lists.
+ If any entry in the low priority list is masked by an entry in the high priority list then delete it.
+*/
+struct label *merge_label_lists(struct label *lo_pri, struct label *hi_pri)
+{
+ struct label *result = hi_pri;
+ while (lo_pri != NULL)
+ {
+ struct label *current = lo_pri;
+ lo_pri = lo_pri->next;
+ if (!label_list_contains(result, current)) {
+ current->next = result;
+ result = current;
+ }
+ else
+ freez(current);
+ }
+ return result;
+}
+
diff --git a/database/rrdset.c b/database/rrdset.c
new file mode 100644
index 0000000..d16fe73
--- /dev/null
+++ b/database/rrdset.c
@@ -0,0 +1,1973 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_RRD_INTERNALS
+#include "rrd.h"
+#include <sched.h>
+
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line) {
+ debug(D_RRD_CALLS, "Checking read lock on chart '%s'", st->id);
+
+ int ret = netdata_rwlock_trywrlock(&st->rrdset_rwlock);
+ if(ret == 0)
+ fatal("RRDSET '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file);
+}
+
+void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line) {
+ debug(D_RRD_CALLS, "Checking write lock on chart '%s'", st->id);
+
+ int ret = netdata_rwlock_tryrdlock(&st->rrdset_rwlock);
+ if(ret == 0)
+ fatal("RRDSET '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDSET index
+
+int rrdset_compare(void* a, void* b) {
+ if(((RRDSET *)a)->hash < ((RRDSET *)b)->hash) return -1;
+ else if(((RRDSET *)a)->hash > ((RRDSET *)b)->hash) return 1;
+ else return strcmp(((RRDSET *)a)->id, ((RRDSET *)b)->id);
+}
+
+static RRDSET *rrdset_index_find(RRDHOST *host, const char *id, uint32_t hash) {
+ RRDSET tmp;
+ strncpyz(tmp.id, id, RRD_ID_LENGTH_MAX);
+ tmp.hash = (hash)?hash:simple_hash(tmp.id);
+
+ return (RRDSET *)avl_search_lock(&(host->rrdset_root_index), (avl *) &tmp);
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET name index
+
+#define rrdset_from_avlname(avlname_ptr) ((RRDSET *)((avlname_ptr) - offsetof(RRDSET, avlname)))
+
+int rrdset_compare_name(void* a, void* b) {
+ RRDSET *A = rrdset_from_avlname(a);
+ RRDSET *B = rrdset_from_avlname(b);
+
+ // fprintf(stderr, "COMPARING: %s with %s\n", A->name, B->name);
+
+ if(A->hash_name < B->hash_name) return -1;
+ else if(A->hash_name > B->hash_name) return 1;
+ else return strcmp(A->name, B->name);
+}
+
+RRDSET *rrdset_index_add_name(RRDHOST *host, RRDSET *st) {
+ void *result;
+ // fprintf(stderr, "ADDING: %s (name: %s)\n", st->id, st->name);
+ result = avl_insert_lock(&host->rrdset_root_index_name, (avl *) (&st->avlname));
+ if(result) return rrdset_from_avlname(result);
+ return NULL;
+}
+
+RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st) {
+ void *result;
+ // fprintf(stderr, "DELETING: %s (name: %s)\n", st->id, st->name);
+ result = (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index_name), (avl *)(&st->avlname));
+ if(result) return rrdset_from_avlname(result);
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDSET - find charts
+
+static inline RRDSET *rrdset_index_find_name(RRDHOST *host, const char *name, uint32_t hash) {
+ void *result = NULL;
+ RRDSET tmp;
+ tmp.name = name;
+ tmp.hash_name = (hash)?hash:simple_hash(tmp.name);
+
+ // fprintf(stderr, "SEARCHING: %s\n", name);
+ result = avl_search_lock(&host->rrdset_root_index_name, (avl *) (&(tmp.avlname)));
+ if(result) {
+ RRDSET *st = rrdset_from_avlname(result);
+ if(strcmp(st->magic, RRDSET_MAGIC) != 0)
+ error("Search for RRDSET %s returned an invalid RRDSET %s (name %s)", name, st->id, st->name);
+
+ // fprintf(stderr, "FOUND: %s\n", name);
+ return rrdset_from_avlname(result);
+ }
+ // fprintf(stderr, "NOT FOUND: %s\n", name);
+ return NULL;
+}
+
+inline RRDSET *rrdset_find(RRDHOST *host, const char *id) {
+ debug(D_RRD_CALLS, "rrdset_find() for chart '%s' in host '%s'", id, host->hostname);
+ RRDSET *st = rrdset_index_find(host, id, 0);
+ return(st);
+}
+
+inline RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id) {
+ debug(D_RRD_CALLS, "rrdset_find_bytype() for chart '%s.%s' in host '%s'", type, id, host->hostname);
+
+ char buf[RRD_ID_LENGTH_MAX + 1];
+ strncpyz(buf, type, RRD_ID_LENGTH_MAX - 1);
+ strcat(buf, ".");
+ int len = (int) strlen(buf);
+ strncpyz(&buf[len], id, (size_t) (RRD_ID_LENGTH_MAX - len));
+
+ return(rrdset_find(host, buf));
+}
+
+inline RRDSET *rrdset_find_byname(RRDHOST *host, const char *name) {
+ debug(D_RRD_CALLS, "rrdset_find_byname() for chart '%s' in host '%s'", name, host->hostname);
+ RRDSET *st = rrdset_index_find_name(host, name, 0);
+ return(st);
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET - rename charts
+
+char *rrdset_strncpyz_name(char *to, const char *from, size_t length) {
+ char c, *p = to;
+
+ while (length-- && (c = *from++)) {
+ if(c != '.' && !isalnum(c))
+ c = '_';
+
+ *p++ = c;
+ }
+
+ *p = '\0';
+
+ return to;
+}
+
+int rrdset_set_name(RRDSET *st, const char *name) {
+ if(unlikely(st->name && !strcmp(st->name, name)))
+ return 1;
+
+ RRDHOST *host = st->rrdhost;
+
+ debug(D_RRD_CALLS, "rrdset_set_name() old: '%s', new: '%s'", st->name?st->name:"", name);
+
+ char b[CONFIG_MAX_VALUE + 1];
+ char n[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(n, RRD_ID_LENGTH_MAX, "%s.%s", st->type, name);
+ rrdset_strncpyz_name(b, n, CONFIG_MAX_VALUE);
+
+ if(rrdset_index_find_name(host, b, 0)) {
+ info("RRDSET: chart name '%s' on host '%s' already exists.", b, host->hostname);
+ return 0;
+ }
+
+ if(st->name) {
+ rrdset_index_del_name(host, st);
+ st->name = config_set_default(st->config_section, "name", b);
+ st->hash_name = simple_hash(st->name);
+ rrdsetvar_rename_all(st);
+ }
+ else {
+ st->name = config_get(st->config_section, "name", b);
+ st->hash_name = simple_hash(st->name);
+ }
+
+ rrdset_wrlock(st);
+ RRDDIM *rd;
+ rrddim_foreach_write(rd, st)
+ rrddimvar_rename_all(rd);
+ rrdset_unlock(st);
+
+ if(unlikely(rrdset_index_add_name(host, st) != st))
+ error("RRDSET: INTERNAL ERROR: attempted to index duplicate chart name '%s'", st->name);
+
+ rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND);
+ rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ return 2;
+}
+
+inline void rrdset_is_obsolete(RRDSET *st) {
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))) {
+ info("Cannot obsolete already archived chart %s", st->name);
+ return;
+ }
+
+ if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) {
+ rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ // the chart will not get more updates (data collection)
+ // so, we have to push its definition now
+ rrdset_push_chart_definition_now(st);
+ }
+}
+
+inline void rrdset_isnot_obsolete(RRDSET *st) {
+ if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) {
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ // the chart will be pushed upstream automatically
+ // due to data collection
+ }
+}
+
+inline void rrdset_update_heterogeneous_flag(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+ (void)host;
+
+ RRDDIM *rd;
+
+ rrdset_flag_clear(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
+
+ RRD_ALGORITHM algorithm = st->dimensions->algorithm;
+ collected_number multiplier = abs(st->dimensions->multiplier);
+ collected_number divisor = abs(st->dimensions->divisor);
+
+ rrddim_foreach_read(rd, st) {
+ if(algorithm != rd->algorithm || multiplier != abs(rd->multiplier) || divisor != abs(rd->divisor)) {
+ if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").",
+ rd->name,
+ st->name,
+ host->hostname,
+ rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm),
+ rd->multiplier, multiplier,
+ rd->divisor, divisor
+ );
+ #endif
+ rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS);
+ }
+ return;
+ }
+ }
+
+ rrdset_flag_clear(st, RRDSET_FLAG_HETEROGENEOUS);
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET - reset a chart
+
+void rrdset_reset(RRDSET *st) {
+ debug(D_RRD_CALLS, "rrdset_reset() %s", st->name);
+
+ st->last_collected_time.tv_sec = 0;
+ st->last_collected_time.tv_usec = 0;
+ st->last_updated.tv_sec = 0;
+ st->last_updated.tv_usec = 0;
+ st->current_entry = 0;
+ st->counter = 0;
+ st->counter_done = 0;
+ st->rrddim_page_alignment = 0;
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ rd->last_collected_time.tv_sec = 0;
+ rd->last_collected_time.tv_usec = 0;
+ rd->collections_counter = 0;
+ // memset(rd->values, 0, rd->entries * sizeof(storage_number));
+#ifdef ENABLE_DBENGINE
+ if (RRD_MEMORY_MODE_DBENGINE == st->rrd_memory_mode && !rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) {
+ rrdeng_store_metric_flush_current_page(rd);
+ }
+#endif
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET - helpers for rrdset_create()
+
+inline long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries) {
+ if(unlikely(entries < 5)) entries = 5;
+ if(unlikely(entries > RRD_HISTORY_ENTRIES_MAX)) entries = RRD_HISTORY_ENTRIES_MAX;
+
+ if(unlikely(mode == RRD_MEMORY_MODE_NONE || mode == RRD_MEMORY_MODE_ALLOC))
+ return entries;
+
+ long page = (size_t)sysconf(_SC_PAGESIZE);
+ long size = sizeof(RRDDIM) + entries * sizeof(storage_number);
+ if(unlikely(size % page)) {
+ size -= (size % page);
+ size += page;
+
+ long n = (size - sizeof(RRDDIM)) / sizeof(storage_number);
+ return n;
+ }
+
+ return entries;
+}
+
+static inline void last_collected_time_align(RRDSET *st) {
+ st->last_collected_time.tv_sec -= st->last_collected_time.tv_sec % st->update_every;
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)))
+ st->last_collected_time.tv_usec = 0;
+ else
+ st->last_collected_time.tv_usec = 500000;
+}
+
+static inline void last_updated_time_align(RRDSET *st) {
+ st->last_updated.tv_sec -= st->last_updated.tv_sec % st->update_every;
+ st->last_updated.tv_usec = 0;
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET - free a chart
+
+void rrdset_free(RRDSET *st) {
+ if(unlikely(!st)) return;
+
+ RRDHOST *host = st->rrdhost;
+
+ rrdhost_check_wrlock(host); // make sure we have a write lock on the host
+ rrdset_wrlock(st); // lock this RRDSET
+ // info("Removing chart '%s' ('%s')", st->id, st->name);
+
+ // ------------------------------------------------------------------------
+ // remove it from the indexes
+
+ if(unlikely(rrdset_index_del(host, st) != st))
+ error("RRDSET: INTERNAL ERROR: attempt to remove from index chart '%s', removed a different chart.", st->id);
+
+ rrdset_index_del_name(host, st);
+
+ // ------------------------------------------------------------------------
+ // free its children structures
+
+ freez(st->exporting_flags);
+
+ while(st->variables) rrdsetvar_free(st->variables);
+// while(st->alarms) rrdsetcalc_unlink(st->alarms);
+ /* We must free all connected alarms here in case this has been an ephemeral chart whose alarm was
+ * created by a template. This leads to an effective memory leak, which cannot be detected since the
+ * alarms will still be connected to the host, and freed during shutdown. */
+ while(st->alarms) rrdcalc_unlink_and_free(st->rrdhost, st->alarms);
+ while(st->dimensions) rrddim_free(st, st->dimensions);
+
+ rrdfamily_free(host, st->rrdfamily);
+
+ debug(D_RRD_CALLS, "RRDSET: Cleaning up remaining chart variables for host '%s', chart '%s'", host->hostname, st->id);
+ rrdvar_free_remaining_variables(host, &st->rrdvar_root_index);
+
+ // ------------------------------------------------------------------------
+ // remove it from the configuration
+
+ appconfig_section_destroy_non_loaded(&netdata_config, st->config_section);
+
+ // ------------------------------------------------------------------------
+ // unlink it from the host
+
+ if(st == host->rrdset_root) {
+ host->rrdset_root = st->next;
+ }
+ else {
+ // find the previous one
+ RRDSET *s;
+ for(s = host->rrdset_root; s && s->next != st ; s = s->next) ;
+
+ // bypass it
+ if(s) s->next = st->next;
+ else error("Request to free RRDSET '%s': cannot find it under host '%s'", st->id, host->hostname);
+ }
+
+ rrdset_unlock(st);
+
+ // ------------------------------------------------------------------------
+ // free it
+
+ netdata_rwlock_destroy(&st->rrdset_rwlock);
+ netdata_rwlock_destroy(&st->state->labels.labels_rwlock);
+
+ // free directly allocated members
+ freez(st->config_section);
+ freez(st->plugin_name);
+ freez(st->module_name);
+ freez(st->state->old_title);
+ freez(st->state->old_context);
+ free_label_list(st->state->labels.head);
+ freez(st->state);
+
+ switch(st->rrd_memory_mode) {
+ case RRD_MEMORY_MODE_SAVE:
+ case RRD_MEMORY_MODE_MAP:
+ case RRD_MEMORY_MODE_RAM:
+ debug(D_RRD_CALLS, "Unmapping stats '%s'.", st->name);
+ munmap(st, st->memsize);
+ break;
+
+ case RRD_MEMORY_MODE_ALLOC:
+ case RRD_MEMORY_MODE_NONE:
+ case RRD_MEMORY_MODE_DBENGINE:
+#ifdef ENABLE_DBENGINE
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ freez(st->chart_uuid);
+#endif
+ freez(st);
+ break;
+ }
+
+}
+
+void rrdset_save(RRDSET *st) {
+ rrdset_check_rdlock(st);
+
+ // info("Saving chart '%s' ('%s')", st->id, st->name);
+
+ if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) {
+ debug(D_RRD_STATS, "Saving stats '%s' to '%s'.", st->name, st->cache_filename);
+ memory_file_save(st->cache_filename, st, st->memsize);
+ }
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE)) {
+ debug(D_RRD_STATS, "Saving dimension '%s' to '%s'.", rd->name, rd->cache_filename);
+ memory_file_save(rd->cache_filename, rd, rd->memsize);
+ }
+ }
+}
+
+void rrdset_delete_custom(RRDSET *st, int db_rotated) {
+ RRDDIM *rd;
+#ifndef ENABLE_ACLK
+ UNUSED(db_rotated);
+#endif
+ rrdset_check_rdlock(st);
+
+ info("Deleting chart '%s' ('%s') from disk...", st->id, st->name);
+
+ if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) {
+ info("Deleting chart header file '%s'.", st->cache_filename);
+ if(unlikely(unlink(st->cache_filename) == -1))
+ error("Cannot delete chart header file '%s'", st->cache_filename);
+ }
+
+ rrddim_foreach_read(rd, st) {
+ if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
+ info("Deleting dimension file '%s'.", rd->cache_filename);
+ if(unlikely(unlink(rd->cache_filename) == -1))
+ error("Cannot delete dimension file '%s'", rd->cache_filename);
+ }
+ }
+
+ recursively_delete_dir(st->cache_dir, "left-over chart");
+#ifdef ENABLE_ACLK
+ if ((netdata_cloud_setting) && (db_rotated || RRD_MEMORY_MODE_DBENGINE != st->rrd_memory_mode)) {
+ aclk_del_collector(st->rrdhost, st->plugin_name, st->module_name);
+ aclk_update_chart(st->rrdhost, st->id, ACLK_CMD_CHARTDEL);
+ }
+#endif
+
+}
+
+void rrdset_delete_obsolete_dimensions(RRDSET *st) {
+ RRDDIM *rd;
+
+ rrdset_check_rdlock(st);
+
+ info("Deleting dimensions of chart '%s' ('%s') from disk...", st->id, st->name);
+
+ rrddim_foreach_read(rd, st) {
+ if(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
+ info("Deleting dimension file '%s'.", rd->cache_filename);
+ if(unlikely(unlink(rd->cache_filename) == -1))
+ error("Cannot delete dimension file '%s'", rd->cache_filename);
+ }
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RRDSET - create a chart
+
+static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) {
+ RRDSET *st = rrdset_find(host, fullid);
+ if(unlikely(st)) {
+ rrdset_isnot_obsolete(st);
+ debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid);
+ return st;
+ }
+
+ return NULL;
+}
+
+RRDSET *rrdset_create_custom(
+ RRDHOST *host
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , const char *plugin
+ , const char *module
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+ , RRD_MEMORY_MODE memory_mode
+ , long history_entries
+) {
+ if(!type || !type[0]) {
+ fatal("Cannot create rrd stats without a type: id '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'."
+ , (id && *id)?id:"<unset>"
+ , (name && *name)?name:"<unset>"
+ , (family && *family)?family:"<unset>"
+ , (context && *context)?context:"<unset>"
+ , (title && *title)?title:"<unset>"
+ , (units && *units)?units:"<unset>"
+ , (plugin && *plugin)?plugin:"<unset>"
+ , (module && *module)?module:"<unset>"
+ );
+ return NULL;
+ }
+
+ if(!id || !id[0]) {
+ fatal("Cannot create rrd stats without an id: type '%s', name '%s', family '%s', context '%s', title '%s', units '%s', plugin '%s', module '%s'."
+ , type
+ , (name && *name)?name:"<unset>"
+ , (family && *family)?family:"<unset>"
+ , (context && *context)?context:"<unset>"
+ , (title && *title)?title:"<unset>"
+ , (units && *units)?units:"<unset>"
+ , (plugin && *plugin)?plugin:"<unset>"
+ , (module && *module)?module:"<unset>"
+ );
+ return NULL;
+ }
+
+ // ------------------------------------------------------------------------
+ // check if it already exists
+
+ char fullid[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(fullid, RRD_ID_LENGTH_MAX, "%s.%s", type, id);
+
+ int changed_from_archived_to_active = 0;
+ RRDSET *st = rrdset_find_on_create(host, fullid);
+ if (st) {
+ int mark_rebuild = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ if (rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED)) {
+ rrdset_flag_clear(st, RRDSET_FLAG_ARCHIVED);
+ changed_from_archived_to_active = 1;
+ mark_rebuild |= META_CHART_ACTIVATED;
+ }
+ char *old_plugin = NULL, *old_module = NULL, *old_title = NULL, *old_context = NULL,
+ *old_title_v = NULL, *old_context_v = NULL;
+ int rc;
+
+ if(unlikely(name))
+ rc = rrdset_set_name(st, name);
+ else
+ rc = rrdset_set_name(st, id);
+
+ if (rc == 2)
+ mark_rebuild |= META_CHART_UPDATED;
+
+ if (unlikely(st->priority != priority)) {
+ st->priority = priority;
+ mark_rebuild |= META_CHART_UPDATED;
+ }
+ if (unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && st->update_every != update_every)) {
+ st->update_every = update_every;
+ mark_rebuild |= META_CHART_UPDATED;
+ }
+
+ if (plugin && st->plugin_name) {
+ if (unlikely(strcmp(plugin, st->plugin_name))) {
+ old_plugin = st->plugin_name;
+ st->plugin_name = strdupz(plugin);
+ mark_rebuild |= META_PLUGIN_UPDATED;
+ }
+ } else {
+ if (plugin != st->plugin_name) { // one is NULL?
+ old_plugin = st->plugin_name;
+ st->plugin_name = plugin ? strdupz(plugin) : NULL;
+ mark_rebuild |= META_PLUGIN_UPDATED;
+ }
+ }
+
+ if (module && st->module_name) {
+ if (unlikely(strcmp(module, st->module_name))) {
+ old_module = st->module_name;
+ st->module_name = strdupz(module);
+ mark_rebuild |= META_MODULE_UPDATED;
+ }
+ } else {
+ if (module != st->module_name) {
+ if (st->module_name && *st->module_name) {
+ old_module = st->module_name;
+ st->module_name = module ? strdupz(module) : NULL;
+ mark_rebuild |= META_MODULE_UPDATED;
+ }
+ }
+ }
+
+ if (unlikely(title && st->state->old_title && strcmp(st->state->old_title, title))) {
+ char *new_title = strdupz(title);
+ old_title_v = st->state->old_title;
+ st->state->old_title = strdupz(title);
+ json_fix_string(new_title);
+ old_title = st->title;
+ st->title = new_title;
+ mark_rebuild |= META_CHART_UPDATED;
+ }
+
+ RRDSET_TYPE new_chart_type =
+ rrdset_type_id(config_get(st->config_section, "chart type", rrdset_type_name(chart_type)));
+ if (st->chart_type != new_chart_type) {
+ st->chart_type = new_chart_type;
+ mark_rebuild |= META_CHART_UPDATED;
+ }
+
+ if (unlikely(context && st->state->old_context && strcmp(st->state->old_context, context))) {
+ char *new_context = strdupz(context);
+ old_context_v = st->state->old_context;
+ st->state->old_context = strdupz(context);
+ json_fix_string(new_context);
+ old_context = st->context;
+ st->context = new_context;
+ st->hash_context = simple_hash(st->context);
+ mark_rebuild |= META_CHART_UPDATED;
+ }
+
+ if (mark_rebuild) {
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ if (mark_rebuild & META_CHART_ACTIVATED) {
+ aclk_add_collector(host, st->plugin_name, st->module_name);
+ }
+ else {
+ if (mark_rebuild & (META_PLUGIN_UPDATED | META_MODULE_UPDATED)) {
+ aclk_del_collector(
+ host, mark_rebuild & META_PLUGIN_UPDATED ? old_plugin : st->plugin_name,
+ mark_rebuild & META_MODULE_UPDATED ? old_module : st->module_name);
+ aclk_add_collector(host, st->plugin_name, st->module_name);
+ }
+ }
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+ }
+#endif
+ freez(old_plugin);
+ freez(old_module);
+ freez(old_title);
+ freez(old_context);
+ freez(old_title_v);
+ freez(old_context_v);
+ if (mark_rebuild != META_CHART_ACTIVATED) {
+ info("Collector updated metadata for chart %s", st->id);
+ sched_yield();
+ }
+ }
+#ifdef ENABLE_DBENGINE
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE &&
+ (mark_rebuild & (META_CHART_UPDATED | META_PLUGIN_UPDATED | META_MODULE_UPDATED))) {
+ debug(D_METADATALOG, "CHART [%s] metadata updated", st->id);
+ int rc = update_chart_metadata(st->chart_uuid, st, id, name);
+ if (unlikely(rc))
+ error_report("Failed to update chart metadata in the database");
+ }
+#endif
+ /* Fall-through during switch from archived to active so that the host lock is taken and health is linked */
+ if (!changed_from_archived_to_active)
+ return st;
+ }
+
+ rrdhost_wrlock(host);
+
+ st = rrdset_find_on_create(host, fullid);
+ if(st) {
+ if (changed_from_archived_to_active) {
+ rrdset_flag_clear(st, RRDSET_FLAG_ARCHIVED);
+ rrdsetvar_create(st, "last_collected_t", RRDVAR_TYPE_TIME_T, &st->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "collected_total_raw", RRDVAR_TYPE_TOTAL, &st->last_collected_total, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "green", RRDVAR_TYPE_CALCULATED, &st->green, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "red", RRDVAR_TYPE_CALCULATED, &st->red, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "update_every", RRDVAR_TYPE_INT, &st->update_every, RRDVAR_OPTION_DEFAULT);
+ rrdsetcalc_link_matching(st);
+ rrdcalctemplate_link_matching(st);
+ }
+ rrdhost_unlock(host);
+ rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ return st;
+ }
+
+ char fullfilename[FILENAME_MAX + 1];
+
+ // ------------------------------------------------------------------------
+ // compose the config_section for this chart
+
+ char config_section[RRD_ID_LENGTH_MAX + 1];
+ if(host == localhost)
+ strcpy(config_section, fullid);
+ else
+ snprintfz(config_section, RRD_ID_LENGTH_MAX, "%s/%s", host->machine_guid, fullid);
+
+ // ------------------------------------------------------------------------
+ // get the options from the config, we need to create it
+
+ long entries;
+ if(memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ // only sets it the first time
+ entries = config_get_number(config_section, "history", 5);
+ } else {
+ long rentries = config_get_number(config_section, "history", history_entries);
+ entries = align_entries_to_pagesize(memory_mode, rentries);
+ if (entries != rentries) entries = config_set_number(config_section, "history", entries);
+
+ if (memory_mode == RRD_MEMORY_MODE_NONE && entries != rentries)
+ entries = config_set_number(config_section, "history", 10);
+ }
+ int enabled = config_get_boolean(config_section, "enabled", 1);
+ if(!enabled) entries = 5;
+
+ unsigned long size = sizeof(RRDSET);
+ char *cache_dir = rrdset_cache_dir(host, fullid, config_section);
+
+ time_t now = now_realtime_sec();
+
+ // ------------------------------------------------------------------------
+ // load it or allocate it
+
+ debug(D_RRD_CALLS, "Creating RRD_STATS for '%s.%s'.", type, id);
+
+ snprintfz(fullfilename, FILENAME_MAX, "%s/main.db", cache_dir);
+ if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP ||
+ memory_mode == RRD_MEMORY_MODE_RAM) {
+ st = (RRDSET *) mymmap(
+ (memory_mode == RRD_MEMORY_MODE_RAM) ? NULL : fullfilename
+ , size
+ , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE)
+ , 0
+ );
+
+ if(st) {
+ memset(&st->avl, 0, sizeof(avl));
+ memset(&st->avlname, 0, sizeof(avl));
+ memset(&st->rrdvar_root_index, 0, sizeof(avl_tree_lock));
+ memset(&st->dimensions_index, 0, sizeof(avl_tree_lock));
+ memset(&st->rrdset_rwlock, 0, sizeof(netdata_rwlock_t));
+
+ st->name = NULL;
+ st->config_section = NULL;
+ st->type = NULL;
+ st->family = NULL;
+ st->title = NULL;
+ st->units = NULL;
+ st->context = NULL;
+ st->cache_dir = NULL;
+ st->plugin_name = NULL;
+ st->module_name = NULL;
+ st->dimensions = NULL;
+ st->rrdfamily = NULL;
+ st->rrdhost = NULL;
+ st->next = NULL;
+ st->variables = NULL;
+ st->alarms = NULL;
+ st->flags = 0x00000000;
+ st->exporting_flags = NULL;
+
+ if(memory_mode == RRD_MEMORY_MODE_RAM) {
+ memset(st, 0, size);
+ }
+ else {
+ if(strcmp(st->magic, RRDSET_MAGIC) != 0) {
+ info("Initializing file %s.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(strcmp(st->id, fullid) != 0) {
+ error("File %s contents are not for chart %s. Clearing it.", fullfilename, fullid);
+ // munmap(st, size);
+ // st = NULL;
+ memset(st, 0, size);
+ }
+ else if(st->memsize != size || st->entries != entries) {
+ error("File %s does not have the desired size. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(st->update_every != update_every) {
+ error("File %s does not have the desired update frequency. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if((now - st->last_updated.tv_sec) > update_every * entries) {
+ info("File %s is too old. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(st->last_updated.tv_sec > now + update_every) {
+ error("File %s refers to the future by %zd secs. Resetting it to now.", fullfilename, (ssize_t)(st->last_updated.tv_sec - now));
+ st->last_updated.tv_sec = now;
+ }
+
+ // make sure the database is aligned
+ if(st->last_updated.tv_sec) {
+ st->update_every = update_every;
+ last_updated_time_align(st);
+ }
+ }
+
+ // make sure we have the right memory mode
+ // even if we cleared the memory
+ st->rrd_memory_mode = memory_mode;
+ }
+ }
+
+ if(unlikely(!st)) {
+ st = callocz(1, size);
+ if (memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ st->rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+ else
+ st->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC;
+ }
+
+ st->plugin_name = plugin?strdupz(plugin):NULL;
+ st->module_name = module?strdupz(module):NULL;
+
+ st->config_section = strdupz(config_section);
+ st->rrdhost = host;
+ st->memsize = size;
+ st->entries = entries;
+ st->update_every = update_every;
+
+ if(st->current_entry >= st->entries) st->current_entry = 0;
+
+ strcpy(st->cache_filename, fullfilename);
+ strcpy(st->magic, RRDSET_MAGIC);
+
+ strcpy(st->id, fullid);
+ st->hash = simple_hash(st->id);
+
+ st->cache_dir = cache_dir;
+
+ st->chart_type = rrdset_type_id(config_get(st->config_section, "chart type", rrdset_type_name(chart_type)));
+ st->type = config_get(st->config_section, "type", type);
+
+ st->state = callocz(1, sizeof(*st->state));
+ st->family = config_get(st->config_section, "family", family?family:st->type);
+ json_fix_string(st->family);
+
+ st->units = config_get(st->config_section, "units", units?units:"");
+ json_fix_string(st->units);
+
+ st->context = config_get(st->config_section, "context", context?context:st->id);
+ st->state->old_context = strdupz(st->context);
+ json_fix_string(st->context);
+ st->hash_context = simple_hash(st->context);
+
+ st->priority = config_get_number(st->config_section, "priority", priority);
+ if(enabled)
+ rrdset_flag_set(st, RRDSET_FLAG_ENABLED);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_ENABLED);
+
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+ rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_SEND);
+ rrdset_flag_clear(st, RRDSET_FLAG_BACKEND_IGNORE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ rrdset_flag_set(st, RRDSET_FLAG_SYNC_CLOCK);
+
+ // if(!strcmp(st->id, "disk_util.dm-0")) {
+ // st->debug = 1;
+ // error("enabled debugging for '%s'", st->id);
+ // }
+ // else error("not enabled debugging for '%s'", st->id);
+
+ st->green = NAN;
+ st->red = NAN;
+
+ st->last_collected_time.tv_sec = 0;
+ st->last_collected_time.tv_usec = 0;
+ st->counter_done = 0;
+ st->rrddim_page_alignment = 0;
+
+ st->gap_when_lost_iterations_above = (int) (gap_when_lost_iterations_above + 2);
+
+ st->last_accessed_time = 0;
+ st->upstream_resync_time = 0;
+
+ avl_init_lock(&st->dimensions_index, rrddim_compare);
+ avl_init_lock(&st->rrdvar_root_index, rrdvar_compare);
+
+ netdata_rwlock_init(&st->rrdset_rwlock);
+ netdata_rwlock_init(&st->state->labels.labels_rwlock);
+
+ if(name && *name && rrdset_set_name(st, name))
+ // we did set the name
+ ;
+ else
+ // could not use the name, use the id
+ rrdset_set_name(st, id);
+
+ st->title = config_get(st->config_section, "title", title);
+ st->state->old_title = strdupz(st->title);
+ json_fix_string(st->title);
+
+ st->rrdfamily = rrdfamily_create(host, st->family);
+
+ st->next = host->rrdset_root;
+ host->rrdset_root = st;
+
+ if(host->health_enabled) {
+ rrdsetvar_create(st, "last_collected_t", RRDVAR_TYPE_TIME_T, &st->last_collected_time.tv_sec, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "collected_total_raw", RRDVAR_TYPE_TOTAL, &st->last_collected_total, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "green", RRDVAR_TYPE_CALCULATED, &st->green, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "red", RRDVAR_TYPE_CALCULATED, &st->red, RRDVAR_OPTION_DEFAULT);
+ rrdsetvar_create(st, "update_every", RRDVAR_TYPE_INT, &st->update_every, RRDVAR_OPTION_DEFAULT);
+ }
+
+ if(unlikely(rrdset_index_add(host, st) != st))
+ error("RRDSET: INTERNAL ERROR: attempt to index duplicate chart '%s'", st->id);
+
+ rrdsetcalc_link_matching(st);
+ rrdcalctemplate_link_matching(st);
+#ifdef ENABLE_DBENGINE
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ st->chart_uuid = find_chart_uuid(host, type, id, name);
+ if (unlikely(!st->chart_uuid))
+ st->chart_uuid = create_chart_uuid(st, id, name);
+
+ store_active_chart(st->chart_uuid);
+ }
+#endif
+
+ rrdhost_cleanup_obsolete_charts(host);
+
+ rrdhost_unlock(host);
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting)
+ aclk_add_collector(host, plugin, module);
+ rrdset_flag_set(st, RRDSET_FLAG_ACLK);
+#endif
+ return(st);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDSET - data collection iteration control
+
+inline void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds) {
+ if(unlikely(!st->last_collected_time.tv_sec || !microseconds || (rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK)))) {
+ // call the full next_usec() function
+ rrdset_next_usec(st, microseconds);
+ return;
+ }
+
+ st->usec_since_last_update = microseconds;
+}
+
+inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) {
+ struct timeval now;
+ now_realtime_timeval(&now);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ char *discard_reason = NULL;
+ usec_t discarded = microseconds;
+ #endif
+
+ if(unlikely(rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK))) {
+ // the chart needs to be re-synced to current time
+ rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK);
+
+ // discard the microseconds supplied
+ microseconds = 0;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(!discard_reason) discard_reason = "SYNC CLOCK FLAG";
+ #endif
+ }
+
+ if(unlikely(!st->last_collected_time.tv_sec)) {
+ // the first entry
+ microseconds = st->update_every * USEC_PER_SEC;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(!discard_reason) discard_reason = "FIRST DATA COLLECTION";
+ #endif
+ }
+ else if(unlikely(!microseconds)) {
+ // no dt given by the plugin
+ microseconds = dt_usec(&now, &st->last_collected_time);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(!discard_reason) discard_reason = "NO USEC GIVEN BY COLLECTOR";
+ #endif
+ }
+ else {
+ // microseconds has the time since the last collection
+ susec_t since_last_usec = dt_usec_signed(&now, &st->last_collected_time);
+
+ if(unlikely(since_last_usec < 0)) {
+ // oops! the database is in the future
+ info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the future (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)-since_last_usec / USEC_PER_SEC, st->counter, st->counter_done);
+
+ st->last_collected_time.tv_sec = now.tv_sec - st->update_every;
+ st->last_collected_time.tv_usec = now.tv_usec;
+ last_collected_time_align(st);
+
+ st->last_updated.tv_sec = now.tv_sec - st->update_every;
+ st->last_updated.tv_usec = now.tv_usec;
+ last_updated_time_align(st);
+
+ microseconds = st->update_every * USEC_PER_SEC;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(!discard_reason) discard_reason = "COLLECTION TIME IN FUTURE";
+ #endif
+ }
+ else if(unlikely((usec_t)since_last_usec > (usec_t)(st->update_every * 5 * USEC_PER_SEC))) {
+ // oops! the database is too far behind
+ info("RRD database for chart '%s' on host '%s' is %0.5" LONG_DOUBLE_MODIFIER " secs in the past (counter #%zu, update #%zu). Adjusting it to current time.", st->id, st->rrdhost->hostname, (LONG_DOUBLE)since_last_usec / USEC_PER_SEC, st->counter, st->counter_done);
+
+ microseconds = (usec_t)since_last_usec;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(!discard_reason) discard_reason = "COLLECTION TIME TOO FAR IN THE PAST";
+ #endif
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(since_last_usec > 0 && (susec_t)microseconds < since_last_usec) {
+ static __thread susec_t min_delta = USEC_PER_SEC * 3600, permanent_min_delta = 0;
+ static __thread time_t last_t = 0;
+
+ // the first time initialize it so that it will make the check later
+ if(last_t == 0) last_t = now.tv_sec + 60;
+
+ susec_t delta = since_last_usec - (susec_t)microseconds;
+ if(delta < min_delta) min_delta = delta;
+
+ if(now.tv_sec >= last_t + 60) {
+ last_t = now.tv_sec;
+
+ if(min_delta > permanent_min_delta) {
+ info("MINIMUM MICROSECONDS DELTA of thread %d increased from %lld to %lld (+%lld)", gettid(), permanent_min_delta, min_delta, min_delta - permanent_min_delta);
+ permanent_min_delta = min_delta;
+ }
+
+ min_delta = USEC_PER_SEC * 3600;
+ }
+ }
+#endif
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ debug(D_RRD_CALLS, "rrdset_next_usec() for chart %s with microseconds %llu", st->name, microseconds);
+ rrdset_debug(st, "NEXT: %llu microseconds", microseconds);
+
+ if(discarded && discarded != microseconds)
+ info("host '%s', chart '%s': discarded data collection time of %llu usec, replaced with %llu usec, reason: '%s'", st->rrdhost->hostname, st->id, discarded, microseconds, discard_reason?discard_reason:"UNDEFINED");
+
+ #endif
+
+ st->usec_since_last_update = microseconds;
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDSET - process the collected values for all dimensions of a chart
+
+static inline usec_t rrdset_init_last_collected_time(RRDSET *st) {
+ now_realtime_timeval(&st->last_collected_time);
+ last_collected_time_align(st);
+
+ usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "initialized last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC);
+ #endif
+
+ return last_collect_ut;
+}
+
+static inline usec_t rrdset_update_last_collected_time(RRDSET *st) {
+ usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+ usec_t ut = last_collect_ut + st->usec_since_last_update;
+ st->last_collected_time.tv_sec = (time_t) (ut / USEC_PER_SEC);
+ st->last_collected_time.tv_usec = (suseconds_t) (ut % USEC_PER_SEC);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "updated last collected time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_collect_ut / USEC_PER_SEC);
+ #endif
+
+ return last_collect_ut;
+}
+
+static inline usec_t rrdset_init_last_updated_time(RRDSET *st) {
+ // copy the last collected time to last updated time
+ st->last_updated.tv_sec = st->last_collected_time.tv_sec;
+ st->last_updated.tv_usec = st->last_collected_time.tv_usec;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))
+ st->last_updated.tv_sec -= st->update_every;
+
+ last_updated_time_align(st);
+
+ usec_t last_updated_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "initialized last updated time to %0.3" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)last_updated_ut / USEC_PER_SEC);
+ #endif
+
+ return last_updated_ut;
+}
+
+static inline size_t rrdset_done_interpolate(
+ RRDSET *st
+ , usec_t update_every_ut
+ , usec_t last_stored_ut
+ , usec_t next_store_ut
+ , usec_t last_collect_ut
+ , usec_t now_collect_ut
+ , char store_this_entry
+ , uint32_t storage_flags
+) {
+ RRDDIM *rd;
+
+ size_t stored_entries = 0; // the number of entries we have stored in the db, during this call to rrdset_done()
+
+ usec_t first_ut = last_stored_ut, last_ut = 0;
+ (void)first_ut;
+
+ ssize_t iterations = (ssize_t)((now_collect_ut - last_stored_ut) / (update_every_ut));
+ if((now_collect_ut % (update_every_ut)) == 0) iterations++;
+
+ size_t counter = st->counter;
+ long current_entry = st->current_entry;
+
+ for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(iterations < 0) { error("INTERNAL CHECK: %s: iterations calculation wrapped! first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu", st->name, first_ut, last_stored_ut, next_store_ut, now_collect_ut); }
+ rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC);
+ rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC);
+ #endif
+
+ last_ut = next_store_ut;
+
+ rrddim_foreach_read(rd, st) {
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))
+ continue;
+
+ calculated_number new_value;
+
+ switch(rd->algorithm) {
+ case RRD_ALGORITHM_INCREMENTAL:
+ new_value = (calculated_number)
+ ( rd->calculated_value
+ * (calculated_number)(next_store_ut - last_collect_ut)
+ / (calculated_number)(now_collect_ut - last_collect_ut)
+ );
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC2 INC "
+ CALCULATED_NUMBER_FORMAT " = "
+ CALCULATED_NUMBER_FORMAT
+ " * (%llu - %llu)"
+ " / (%llu - %llu)"
+ , rd->name
+ , new_value
+ , rd->calculated_value
+ , next_store_ut, last_collect_ut
+ , now_collect_ut, last_collect_ut
+ );
+ #endif
+
+ rd->calculated_value -= new_value;
+ new_value += rd->last_calculated_value;
+ rd->last_calculated_value = 0;
+ new_value /= (calculated_number)st->update_every;
+
+ if(unlikely(next_store_ut - last_stored_ut < update_every_ut)) {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: COLLECTION POINT IS SHORT " CALCULATED_NUMBER_FORMAT " - EXTRAPOLATING",
+ rd->name
+ , (calculated_number)(next_store_ut - last_stored_ut)
+ );
+ #endif
+
+ new_value = new_value * (calculated_number)(st->update_every * USEC_PER_SEC) / (calculated_number)(next_store_ut - last_stored_ut);
+ }
+ break;
+
+ case RRD_ALGORITHM_ABSOLUTE:
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ default:
+ if(iterations == 1) {
+ // this is the last iteration
+ // do not interpolate
+ // just show the calculated value
+
+ new_value = rd->calculated_value;
+ }
+ else {
+ // we have missed an update
+ // interpolate in the middle values
+
+ new_value = (calculated_number)
+ ( ( (rd->calculated_value - rd->last_calculated_value)
+ * (calculated_number)(next_store_ut - last_collect_ut)
+ / (calculated_number)(now_collect_ut - last_collect_ut)
+ )
+ + rd->last_calculated_value
+ );
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC2 DEF "
+ CALCULATED_NUMBER_FORMAT " = ((("
+ "(" CALCULATED_NUMBER_FORMAT " - " CALCULATED_NUMBER_FORMAT ")"
+ " * %llu"
+ " / %llu) + " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , new_value
+ , rd->calculated_value, rd->last_calculated_value
+ , (next_store_ut - first_ut)
+ , (now_collect_ut - first_ut), rd->last_calculated_value
+ );
+ #endif
+ }
+ break;
+ }
+
+ if(unlikely(!store_this_entry)) {
+ rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); //pack_storage_number(0, SN_NOT_EXISTS)
+// rd->values[current_entry] = SN_EMPTY_SLOT; //pack_storage_number(0, SN_NOT_EXISTS);
+ continue;
+ }
+
+ if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) {
+ rd->state->collect_ops.store_metric(rd, next_store_ut, pack_storage_number(new_value, storage_flags));
+// rd->values[current_entry] = pack_storage_number(new_value, storage_flags );
+ rd->last_stored_value = new_value;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] "
+ CALCULATED_NUMBER_FORMAT " = " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , current_entry
+ , unpack_storage_number(rd->values[current_entry]), new_value
+ );
+ #endif
+
+ }
+ else {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING "
+ , rd->name
+ , current_entry
+ );
+ #endif
+
+// rd->values[current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS);
+ rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); //pack_storage_number(0, SN_NOT_EXISTS)
+ rd->last_stored_value = NAN;
+ }
+
+ stored_entries++;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) {
+ calculated_number t1 = new_value * (calculated_number)rd->multiplier / (calculated_number)rd->divisor;
+ calculated_number t2 = unpack_storage_number(rd->values[current_entry]);
+
+ calculated_number accuracy = accuracy_loss(t1, t2);
+ debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)"
+ , st->id, rd->name
+ , current_entry
+ , t2
+ , get_storage_number_flags(rd->values[current_entry])
+ , t1
+ , accuracy
+ , (accuracy > ACCURACY_LOSS_ACCEPTED_PERCENT) ? " **TOO BIG** " : ""
+ );
+
+ rd->collected_volume += t1;
+ rd->stored_volume += t2;
+
+ accuracy = accuracy_loss(rd->collected_volume, rd->stored_volume);
+ debug(D_RRD_STATS, "%s/%s: VOLUME[%ld] = " CALCULATED_NUMBER_FORMAT ", calculated = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s"
+ , st->id, rd->name
+ , current_entry
+ , rd->stored_volume
+ , rd->collected_volume
+ , accuracy
+ , (accuracy > ACCURACY_LOSS_ACCEPTED_PERCENT) ? " **TOO BIG** " : ""
+ );
+ }
+ #endif
+ }
+ // reset the storage flags for the next point, if any;
+ storage_flags = SN_EXISTS;
+
+ st->counter = ++counter;
+ st->current_entry = current_entry = ((current_entry + 1) >= st->entries) ? 0 : current_entry + 1;
+
+ st->last_updated.tv_sec = (time_t) (last_ut / USEC_PER_SEC);
+ st->last_updated.tv_usec = 0;
+
+ last_stored_ut = next_store_ut;
+ }
+
+/*
+ st->counter = counter;
+ st->current_entry = current_entry;
+
+ if(likely(last_ut)) {
+ st->last_updated.tv_sec = (time_t) (last_ut / USEC_PER_SEC);
+ st->last_updated.tv_usec = 0;
+ }
+*/
+
+ return stored_entries;
+}
+
+static inline void rrdset_done_fill_the_gap(RRDSET *st) {
+ usec_t update_every_ut = st->update_every * USEC_PER_SEC;
+ usec_t now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+
+ long c = 0, entries = st->entries;
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ usec_t next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+ long current_entry = st->current_entry;
+
+ for(c = 0; c < entries && next_store_ut <= now_collect_ut ; next_store_ut += update_every_ut, c++) {
+ rd->values[current_entry] = SN_EMPTY_SLOT;
+ current_entry = ((current_entry + 1) >= entries) ? 0 : current_entry + 1;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING (FILLED THE GAP)", rd->name, current_entry);
+ #endif
+ }
+ }
+
+ if(c > 0) {
+ c--;
+ st->last_updated.tv_sec += c * st->update_every;
+
+ st->current_entry += c;
+ if(st->current_entry >= st->entries)
+ st->current_entry -= st->entries;
+ }
+}
+
+void rrdset_done(RRDSET *st) {
+ if(unlikely(netdata_exit)) return;
+
+ debug(D_RRD_CALLS, "rrdset_done() for chart %s", st->name);
+
+ RRDDIM *rd;
+
+ char
+ store_this_entry = 1, // boolean: 1 = store this entry, 0 = don't store this entry
+ first_entry = 0; // boolean: 1 = this is the first entry seen for this chart, 0 = all other entries
+
+ usec_t
+ last_collect_ut = 0, // the timestamp in microseconds, of the last collected value
+ now_collect_ut = 0, // the timestamp in microseconds, of this collected value (this is NOW)
+ last_stored_ut = 0, // the timestamp in microseconds, of the last stored entry in the db
+ next_store_ut = 0, // the timestamp in microseconds, of the next entry to store in the db
+ update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds
+
+ netdata_thread_disable_cancelability();
+
+ // a read lock is OK here
+ rrdset_rdlock(st);
+
+#ifdef ENABLE_ACLK
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_ACLK))) {
+ rrdset_flag_clear(st, RRDSET_FLAG_ACLK);
+ aclk_update_chart(st->rrdhost, st->id, ACLK_CMD_CHART);
+ }
+#endif
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) {
+ error("Chart '%s' has the OBSOLETE flag set, but it is collected.", st->id);
+ rrdset_isnot_obsolete(st);
+ }
+
+ // check if the chart has a long time to be updated
+ if(unlikely(st->usec_since_last_update > st->entries * update_every_ut &&
+ st->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && st->rrd_memory_mode != RRD_MEMORY_MODE_NONE)) {
+ info("host '%s', chart %s: took too long to be updated (counter #%zu, update #%zu, %0.3" LONG_DOUBLE_MODIFIER " secs). Resetting it.", st->rrdhost->hostname, st->name, st->counter, st->counter_done, (LONG_DOUBLE)st->usec_since_last_update / USEC_PER_SEC);
+ rrdset_reset(st);
+ st->usec_since_last_update = update_every_ut;
+ store_this_entry = 0;
+ first_entry = 1;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "microseconds since last update: %llu", st->usec_since_last_update);
+ #endif
+
+ // set last_collected_time
+ if(unlikely(!st->last_collected_time.tv_sec)) {
+ // it is the first entry
+ // set the last_collected_time to now
+ last_collect_ut = rrdset_init_last_collected_time(st) - update_every_ut;
+
+ // the first entry should not be stored
+ store_this_entry = 0;
+ first_entry = 1;
+ }
+ else {
+ // it is not the first entry
+ // calculate the proper last_collected_time, using usec_since_last_update
+ last_collect_ut = rrdset_update_last_collected_time(st);
+ }
+ if (unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
+ goto after_first_database_work;
+ }
+
+ // if this set has not been updated in the past
+ // we fake the last_update time to be = now - usec_since_last_update
+ if(unlikely(!st->last_updated.tv_sec)) {
+ // it has never been updated before
+ // set a fake last_updated, in the past using usec_since_last_update
+ rrdset_init_last_updated_time(st);
+
+ // the first entry should not be stored
+ store_this_entry = 0;
+ first_entry = 1;
+ }
+
+ // check if we will re-write the entire data set
+ if(unlikely(dt_usec(&st->last_collected_time, &st->last_updated) > st->entries * update_every_ut &&
+ st->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE)) {
+ info("%s: too old data (last updated at %ld.%ld, last collected at %ld.%ld). Resetting it. Will not store the next entry.", st->name, st->last_updated.tv_sec, st->last_updated.tv_usec, st->last_collected_time.tv_sec, st->last_collected_time.tv_usec);
+ rrdset_reset(st);
+ rrdset_init_last_updated_time(st);
+
+ st->usec_since_last_update = update_every_ut;
+
+ // the first entry should not be stored
+ store_this_entry = 0;
+ first_entry = 1;
+ }
+
+#ifdef ENABLE_DBENGINE
+ // check if we will re-write the entire page
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE &&
+ dt_usec(&st->last_collected_time, &st->last_updated) > (RRDENG_BLOCK_SIZE / sizeof(storage_number)) * update_every_ut)) {
+ info("%s: too old data (last updated at %ld.%ld, last collected at %ld.%ld). Resetting it. Will not store the next entry.", st->name, st->last_updated.tv_sec, st->last_updated.tv_usec, st->last_collected_time.tv_sec, st->last_collected_time.tv_usec);
+ rrdset_reset(st);
+ rrdset_init_last_updated_time(st);
+
+ st->usec_since_last_update = update_every_ut;
+
+ // the first entry should not be stored
+ store_this_entry = 0;
+ first_entry = 1;
+ }
+#endif
+
+ // these are the 3 variables that will help us in interpolation
+ // last_stored_ut = the last time we added a value to the storage
+ // now_collect_ut = the time the current value has been collected
+ // next_store_ut = the time of the next interpolation point
+ now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+ last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+ next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+
+ if(unlikely(!st->counter_done)) {
+ // if we have not collected metrics this session (st->counter_done == 0)
+ // and we have collected metrics for this chart in the past (st->counter != 0)
+ // fill the gap (the chart has been just loaded from disk)
+ if(unlikely(st->counter) && st->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) {
+ rrdset_done_fill_the_gap(st);
+ last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+ next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+ }
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ // set a fake last_updated to jump to current time
+ rrdset_init_last_updated_time(st);
+ last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+ next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+ }
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) {
+ store_this_entry = 1;
+ last_collect_ut = next_store_ut - update_every_ut;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "Fixed first entry.");
+ #endif
+ }
+ else {
+ store_this_entry = 0;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "Will not store the next entry.");
+ #endif
+ }
+ }
+after_first_database_work:
+ st->counter_done++;
+
+ if(unlikely(st->rrdhost->rrdpush_send_enabled))
+ rrdset_done_push(st);
+ if (unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
+ goto after_second_database_work;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "last_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (last collection time)", (LONG_DOUBLE)last_collect_ut/USEC_PER_SEC);
+ rrdset_debug(st, "now_collect_ut = %0.3" LONG_DOUBLE_MODIFIER " (current collection time)", (LONG_DOUBLE)now_collect_ut/USEC_PER_SEC);
+ rrdset_debug(st, "last_stored_ut = %0.3" LONG_DOUBLE_MODIFIER " (last updated time)", (LONG_DOUBLE)last_stored_ut/USEC_PER_SEC);
+ rrdset_debug(st, "next_store_ut = %0.3" LONG_DOUBLE_MODIFIER " (next interpolation point)", (LONG_DOUBLE)next_store_ut/USEC_PER_SEC);
+ #endif
+
+ // calculate totals and count the dimensions
+ int dimensions = 0;
+ st->collected_total = 0;
+ rrddim_foreach_read(rd, st) {
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))
+ continue;
+ dimensions++;
+ if(likely(rd->updated))
+ st->collected_total += rd->collected_value;
+ }
+
+ uint32_t storage_flags = SN_EXISTS;
+
+ // process all dimensions to calculate their values
+ // based on the collected figures only
+ // at this stage we do not interpolate anything
+ rrddim_foreach_read(rd, st) {
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))
+ continue;
+
+ if(unlikely(!rd->updated)) {
+ rd->calculated_value = 0;
+ continue;
+ }
+
+ if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE))) {
+ error("Dimension %s in chart '%s' has the OBSOLETE flag set, but it is collected.", rd->name, st->id);
+ rrddim_isnot_obsolete(st, rd);
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: START "
+ " last_collected_value = " COLLECTED_NUMBER_FORMAT
+ " collected_value = " COLLECTED_NUMBER_FORMAT
+ " last_calculated_value = " CALCULATED_NUMBER_FORMAT
+ " calculated_value = " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , rd->last_collected_value
+ , rd->collected_value
+ , rd->last_calculated_value
+ , rd->calculated_value
+ );
+ #endif
+
+ switch(rd->algorithm) {
+ case RRD_ALGORITHM_ABSOLUTE:
+ rd->calculated_value = (calculated_number)rd->collected_value
+ * (calculated_number)rd->multiplier
+ / (calculated_number)rd->divisor;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC ABS/ABS-NO-IN "
+ CALCULATED_NUMBER_FORMAT " = "
+ COLLECTED_NUMBER_FORMAT
+ " * " CALCULATED_NUMBER_FORMAT
+ " / " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , rd->calculated_value
+ , rd->collected_value
+ , (calculated_number)rd->multiplier
+ , (calculated_number)rd->divisor
+ );
+ #endif
+
+ break;
+
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ if(unlikely(!st->collected_total))
+ rd->calculated_value = 0;
+ else
+ // the percentage of the current value
+ // over the total of all dimensions
+ rd->calculated_value =
+ (calculated_number)100
+ * (calculated_number)rd->collected_value
+ / (calculated_number)st->collected_total;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC PCENT-ROW "
+ CALCULATED_NUMBER_FORMAT " = 100"
+ " * " COLLECTED_NUMBER_FORMAT
+ " / " COLLECTED_NUMBER_FORMAT
+ , rd->name
+ , rd->calculated_value
+ , rd->collected_value
+ , st->collected_total
+ );
+ #endif
+
+ break;
+
+ case RRD_ALGORITHM_INCREMENTAL:
+ if(unlikely(rd->collections_counter <= 1)) {
+ rd->calculated_value = 0;
+ continue;
+ }
+
+ // If the new is smaller than the old (an overflow, or reset), set the old equal to the new
+ // to reset the calculation (it will give zero as the calculation for this second).
+ // It is imperative to set the comparison to uint64_t since type collected_number is signed and
+ // produces wrong results as far as incremental counters are concerned.
+ if(unlikely((uint64_t)rd->last_collected_value > (uint64_t)rd->collected_value)) {
+ debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT
+ , st->name, rd->name
+ , rd->last_collected_value
+ , rd->collected_value);
+
+ if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)))
+ storage_flags = SN_EXISTS_RESET;
+
+ uint64_t last = (uint64_t)rd->last_collected_value;
+ uint64_t new = (uint64_t)rd->collected_value;
+ uint64_t max = (uint64_t)rd->collected_value_max;
+ uint64_t cap = 0;
+
+ // Signed values are handled by exploiting two's complement which will produce positive deltas
+ if (max > 0x00000000FFFFFFFFULL)
+ cap = 0xFFFFFFFFFFFFFFFFULL; // handles signed and unsigned 64-bit counters
+ else
+ cap = 0x00000000FFFFFFFFULL; // handles signed and unsigned 32-bit counters
+
+ uint64_t delta = cap - last + new;
+ uint64_t max_acceptable_rate = (cap / 100) * MAX_INCREMENTAL_PERCENT_RATE;
+
+ // If the delta is less than the maximum acceptable rate and the previous value was near the cap
+ // then this is an overflow. There can be false positives such that a reset is detected as an
+ // overflow.
+ // TODO: remember recent history of rates and compare with current rate to reduce this chance.
+ if (delta < max_acceptable_rate) {
+ rd->calculated_value +=
+ (calculated_number) delta
+ * (calculated_number) rd->multiplier
+ / (calculated_number) rd->divisor;
+ } else {
+ // This is a reset. Any overflow with a rate greater than MAX_INCREMENTAL_PERCENT_RATE will also
+ // be detected as a reset instead.
+ rd->calculated_value += (calculated_number)0;
+ }
+ }
+ else {
+ rd->calculated_value +=
+ (calculated_number) (rd->collected_value - rd->last_collected_value)
+ * (calculated_number) rd->multiplier
+ / (calculated_number) rd->divisor;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC INC PRE "
+ CALCULATED_NUMBER_FORMAT " = ("
+ COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT
+ ")"
+ " * " CALCULATED_NUMBER_FORMAT
+ " / " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , rd->calculated_value
+ , rd->collected_value, rd->last_collected_value
+ , (calculated_number)rd->multiplier
+ , (calculated_number)rd->divisor
+ );
+ #endif
+
+ break;
+
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ if(unlikely(rd->collections_counter <= 1)) {
+ rd->calculated_value = 0;
+ continue;
+ }
+
+ // if the new is smaller than the old (an overflow, or reset), set the old equal to the new
+ // to reset the calculation (it will give zero as the calculation for this second)
+ if(unlikely(rd->last_collected_value > rd->collected_value)) {
+ debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT
+ , st->name, rd->name
+ , rd->last_collected_value
+ , rd->collected_value
+ );
+
+ if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)))
+ storage_flags = SN_EXISTS_RESET;
+
+ rd->last_collected_value = rd->collected_value;
+ }
+
+ // the percentage of the current increment
+ // over the increment of all dimensions together
+ if(unlikely(st->collected_total == st->last_collected_total))
+ rd->calculated_value = 0;
+ else
+ rd->calculated_value =
+ (calculated_number)100
+ * (calculated_number)(rd->collected_value - rd->last_collected_value)
+ / (calculated_number)(st->collected_total - st->last_collected_total);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC PCENT-DIFF "
+ CALCULATED_NUMBER_FORMAT " = 100"
+ " * (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
+ " / (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
+ , rd->name
+ , rd->calculated_value
+ , rd->collected_value, rd->last_collected_value
+ , st->collected_total, st->last_collected_total
+ );
+ #endif
+
+ break;
+
+ default:
+ // make the default zero, to make sure
+ // it gets noticed when we add new types
+ rd->calculated_value = 0;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC "
+ CALCULATED_NUMBER_FORMAT " = 0"
+ , rd->name
+ , rd->calculated_value
+ );
+ #endif
+
+ break;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: PHASE2 "
+ " last_collected_value = " COLLECTED_NUMBER_FORMAT
+ " collected_value = " COLLECTED_NUMBER_FORMAT
+ " last_calculated_value = " CALCULATED_NUMBER_FORMAT
+ " calculated_value = " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , rd->last_collected_value
+ , rd->collected_value
+ , rd->last_calculated_value
+ , rd->calculated_value
+ );
+ #endif
+
+ }
+
+ // at this point we have all the calculated values ready
+ // it is now time to interpolate values on a second boundary
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(now_collect_ut < next_store_ut)) {
+ // this is collected in the same interpolation point
+ rrdset_debug(st, "THIS IS IN THE SAME INTERPOLATION POINT");
+ info("INTERNAL CHECK: host '%s', chart '%s' is collected in the same interpolation point: short by %llu microseconds", st->rrdhost->hostname, st->name, next_store_ut - now_collect_ut);
+ }
+#endif
+
+ rrdset_done_interpolate(st
+ , update_every_ut
+ , last_stored_ut
+ , next_store_ut
+ , last_collect_ut
+ , now_collect_ut
+ , store_this_entry
+ , storage_flags
+ );
+
+after_second_database_work:
+ st->last_collected_total = st->collected_total;
+
+ rrddim_foreach_read(rd, st) {
+ if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED))
+ continue;
+ if(unlikely(!rd->updated))
+ continue;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_collected_value (old: " COLLECTED_NUMBER_FORMAT ") to last_collected_value (new: " COLLECTED_NUMBER_FORMAT ")", rd->name, rd->last_collected_value, rd->collected_value);
+ #endif
+
+ rd->last_collected_value = rd->collected_value;
+
+ switch(rd->algorithm) {
+ case RRD_ALGORITHM_INCREMENTAL:
+ if(unlikely(!first_entry)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value + rd->calculated_value, rd->calculated_value);
+ #endif
+
+ rd->last_calculated_value += rd->calculated_value;
+ }
+ else {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "THIS IS THE FIRST POINT");
+ #endif
+ }
+ break;
+
+ case RRD_ALGORITHM_ABSOLUTE:
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value, rd->calculated_value);
+ #endif
+
+ rd->last_calculated_value = rd->calculated_value;
+ break;
+ }
+
+ rd->calculated_value = 0;
+ rd->collected_value = 0;
+ rd->updated = 0;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: END "
+ " last_collected_value = " COLLECTED_NUMBER_FORMAT
+ " collected_value = " COLLECTED_NUMBER_FORMAT
+ " last_calculated_value = " CALCULATED_NUMBER_FORMAT
+ " calculated_value = " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , rd->last_collected_value
+ , rd->collected_value
+ , rd->last_calculated_value
+ , rd->calculated_value
+ );
+ #endif
+
+ }
+
+ // ALL DONE ABOUT THE DATA UPDATE
+ // --------------------------------------------------------------------
+
+ // find if there are any obsolete dimensions
+ time_t now = now_realtime_sec();
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS))) {
+ rrddim_foreach_read(rd, st)
+ if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)))
+ break;
+
+ if(unlikely(rd)) {
+ RRDDIM *last;
+ // there is a dimension to free
+ // upgrade our read lock to a write lock
+ rrdset_unlock(st);
+ rrdset_wrlock(st);
+
+ for( rd = st->dimensions, last = NULL ; likely(rd) ; ) {
+ if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && (rd->last_collected_time.tv_sec + rrdset_free_obsolete_time < now))) {
+ info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rd->name, rd->id, st->name, st->id);
+
+ if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
+ info("Deleting dimension file '%s'.", rd->cache_filename);
+ if(unlikely(unlink(rd->cache_filename) == -1))
+ error("Cannot delete dimension file '%s'", rd->cache_filename);
+ }
+
+#ifdef ENABLE_DBENGINE
+ if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ rrddim_flag_set(rd, RRDDIM_FLAG_ARCHIVED);
+ while(rd->variables)
+ rrddimvar_free(rd->variables);
+
+ rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE);
+ /* only a collector can mark a chart as obsolete, so we must remove the reference */
+ uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd);
+ if (can_delete_metric) {
+ /* This metric has no data and no references */
+ delete_dimension_uuid(rd->state->metric_uuid);
+ } else {
+ /* Do not delete this dimension */
+ last = rd;
+ rd = rd->next;
+ continue;
+ }
+ }
+#endif
+ if(unlikely(!last)) {
+ rrddim_free(st, rd);
+ rd = st->dimensions;
+ continue;
+ }
+ else {
+ rrddim_free(st, rd);
+ rd = last->next;
+ continue;
+ }
+ }
+
+ last = rd;
+ rd = rd->next;
+ }
+ }
+ else {
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS);
+ }
+ }
+
+ rrdset_unlock(st);
+
+ netdata_thread_enable_cancelability();
+}
+
+void rrdset_add_label_to_new_list(RRDSET *st, char *key, char *value, LABEL_SOURCE source)
+{
+ st->state->new_labels = add_label_to_list(st->state->new_labels, key, value, source);
+}
+
+void rrdset_finalize_labels(RRDSET *st)
+{
+ struct label *new_labels = st->state->new_labels;
+ struct label_index *labels = &st->state->labels;
+
+ if (!labels->head) {
+ labels->head = new_labels;
+ } else {
+ replace_label_list(labels, new_labels);
+ }
+ st->state->new_labels = NULL;
+}
+
+void rrdset_update_labels(RRDSET *st, struct label *labels)
+{
+ if (!labels)
+ return;
+
+ update_label_list(&st->state->new_labels, labels);
+ rrdset_finalize_labels(st);
+}
+
+int rrdset_contains_label_keylist(RRDSET *st, char *keylist)
+{
+ struct label_index *labels = &st->state->labels;
+ int ret;
+
+ if (!labels->head)
+ return 0;
+
+ netdata_rwlock_rdlock(&labels->labels_rwlock);
+ ret = label_list_contains_keylist(labels->head, keylist);
+ netdata_rwlock_unlock(&labels->labels_rwlock);
+
+ return ret;
+}
+
+struct label *rrdset_lookup_label_key(RRDSET *st, char *key, uint32_t key_hash)
+{
+ struct label_index *labels = &st->state->labels;
+ struct label *ret = NULL;
+
+ if (labels->head) {
+ netdata_rwlock_rdlock(&labels->labels_rwlock);
+ ret = label_list_lookup_key(labels->head, key, key_hash);
+ netdata_rwlock_unlock(&labels->labels_rwlock);
+ }
+ return ret;
+}
diff --git a/database/rrdsetvar.c b/database/rrdsetvar.c
new file mode 100644
index 0000000..9da4193
--- /dev/null
+++ b/database/rrdsetvar.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_HEALTH_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// RRDSETVAR management
+// CHART VARIABLES
+
+static inline void rrdsetvar_free_variables(RRDSETVAR *rs) {
+ RRDSET *st = rs->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ // ------------------------------------------------------------------------
+ // CHART
+ rrdvar_free(host, &st->rrdvar_root_index, rs->var_local);
+ rs->var_local = NULL;
+
+ // ------------------------------------------------------------------------
+ // FAMILY
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family);
+ rs->var_family = NULL;
+
+ rrdvar_free(host, &st->rrdfamily->rrdvar_root_index, rs->var_family_name);
+ rs->var_family_name = NULL;
+
+ // ------------------------------------------------------------------------
+ // HOST
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host);
+ rs->var_host = NULL;
+
+ rrdvar_free(host, &host->rrdvar_root_index, rs->var_host_name);
+ rs->var_host_name = NULL;
+
+ // ------------------------------------------------------------------------
+ // KEYS
+ freez(rs->key_fullid);
+ rs->key_fullid = NULL;
+
+ freez(rs->key_fullname);
+ rs->key_fullname = NULL;
+}
+
+static inline void rrdsetvar_create_variables(RRDSETVAR *rs) {
+ RRDSET *st = rs->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ RRDVAR_OPTIONS options = rs->options;
+ if(rs->options & RRDVAR_OPTION_ALLOCATED)
+ options &= ~ RRDVAR_OPTION_ALLOCATED;
+
+ // ------------------------------------------------------------------------
+ // free the old ones (if any)
+
+ rrdsetvar_free_variables(rs);
+
+ // ------------------------------------------------------------------------
+ // KEYS
+
+ char buffer[RRDVAR_MAX_LENGTH + 1];
+ snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->id, rs->variable);
+ rs->key_fullid = strdupz(buffer);
+
+ snprintfz(buffer, RRDVAR_MAX_LENGTH, "%s.%s", st->name, rs->variable);
+ rs->key_fullname = strdupz(buffer);
+
+ // ------------------------------------------------------------------------
+ // CHART
+ rs->var_local = rrdvar_create_and_index("local", &st->rrdvar_root_index, rs->variable, rs->type, options, rs->value);
+
+ // ------------------------------------------------------------------------
+ // FAMILY
+ rs->var_family = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullid, rs->type, options, rs->value);
+ rs->var_family_name = rrdvar_create_and_index("family", &st->rrdfamily->rrdvar_root_index, rs->key_fullname, rs->type, options, rs->value);
+
+ // ------------------------------------------------------------------------
+ // HOST
+ rs->var_host = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullid, rs->type, options, rs->value);
+ rs->var_host_name = rrdvar_create_and_index("host", &host->rrdvar_root_index, rs->key_fullname, rs->type, options, rs->value);
+}
+
+RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options) {
+ debug(D_VARIABLES, "RRDVARSET create for chart id '%s' name '%s' with variable name '%s'", st->id, st->name, variable);
+ RRDSETVAR *rs = (RRDSETVAR *)callocz(1, sizeof(RRDSETVAR));
+
+ rs->variable = strdupz(variable);
+ rs->hash = simple_hash(rs->variable);
+ rs->type = type;
+ rs->value = value;
+ rs->options = options;
+ rs->rrdset = st;
+
+ rs->next = st->variables;
+ st->variables = rs;
+
+ rrdsetvar_create_variables(rs);
+
+ return rs;
+}
+
+void rrdsetvar_rename_all(RRDSET *st) {
+ debug(D_VARIABLES, "RRDSETVAR rename for chart id '%s' name '%s'", st->id, st->name);
+
+ RRDSETVAR *rs;
+ for(rs = st->variables; rs ; rs = rs->next)
+ rrdsetvar_create_variables(rs);
+
+ rrdsetcalc_link_matching(st);
+}
+
+void rrdsetvar_free(RRDSETVAR *rs) {
+ RRDSET *st = rs->rrdset;
+ debug(D_VARIABLES, "RRDSETVAR free for chart id '%s' name '%s', variable '%s'", st->id, st->name, rs->variable);
+
+ if(st->variables == rs) {
+ st->variables = rs->next;
+ }
+ else {
+ RRDSETVAR *t;
+ for (t = st->variables; t && t->next != rs; t = t->next);
+ if(!t) error("RRDSETVAR '%s' not found in chart '%s' variables linked list", rs->key_fullname, st->id);
+ else t->next = rs->next;
+ }
+
+ rrdsetvar_free_variables(rs);
+
+ freez(rs->variable);
+
+ if(rs->options & RRDVAR_OPTION_ALLOCATED)
+ freez(rs->value);
+
+ freez(rs);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// custom chart variables
+
+RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name) {
+ RRDHOST *host = st->rrdhost;
+
+ char *n = strdupz(name);
+ rrdvar_fix_name(n);
+ uint32_t hash = simple_hash(n);
+
+ rrdset_wrlock(st);
+
+ // find it
+ RRDSETVAR *rs;
+ for(rs = st->variables; rs ; rs = rs->next) {
+ if(hash == rs->hash && strcmp(n, rs->variable) == 0) {
+ rrdset_unlock(st);
+ if(rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR) {
+ freez(n);
+ return rs;
+ }
+ else {
+ error("RRDSETVAR: custom variable '%s' on chart '%s' of host '%s', conflicts with an internal chart variable", n, st->id, host->hostname);
+ freez(n);
+ return NULL;
+ }
+ }
+ }
+
+ // not found, allocate one
+
+ calculated_number *v = mallocz(sizeof(calculated_number));
+ *v = NAN;
+
+ rs = rrdsetvar_create(st, n, RRDVAR_TYPE_CALCULATED, v, RRDVAR_OPTION_ALLOCATED|RRDVAR_OPTION_CUSTOM_CHART_VAR);
+ rrdset_unlock(st);
+
+ freez(n);
+ return rs;
+}
+
+void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rs, calculated_number value) {
+ if(rs->type != RRDVAR_TYPE_CALCULATED || !(rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR) || !(rs->options & RRDVAR_OPTION_ALLOCATED)) {
+ error("RRDSETVAR: requested to set variable '%s' of chart '%s' on host '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom chart one.", rs->variable, rs->rrdset->id, rs->rrdset->rrdhost->hostname, value);
+ }
+ else {
+ calculated_number *v = rs->value;
+ if(*v != value) {
+ *v = value;
+
+ // mark the chart to be sent upstream
+ rrdset_flag_clear(rs->rrdset, RRDSET_FLAG_UPSTREAM_EXPOSED);
+ }
+ }
+}
diff --git a/database/rrdsetvar.h b/database/rrdsetvar.h
new file mode 100644
index 0000000..34a26d2
--- /dev/null
+++ b/database/rrdsetvar.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDSETVAR_H
+#define NETDATA_RRDSETVAR_H 1
+
+#include "rrd.h"
+
+// variables linked to charts
+// We link variables to point to the values that are already
+// calculated / processed by the normal data collection process
+// This means, there will be no speed penalty for using
+// these variables
+
+struct rrdsetvar {
+ char *variable; // variable name
+ uint32_t hash; // variable name hash
+
+ char *key_fullid; // chart type.chart id.variable
+ char *key_fullname; // chart type.chart name.variable
+
+ RRDVAR_TYPE type;
+ void *value;
+
+ RRDVAR_OPTIONS options;
+
+ RRDVAR *var_local;
+ RRDVAR *var_family;
+ RRDVAR *var_host;
+ RRDVAR *var_family_name;
+ RRDVAR *var_host_name;
+
+ struct rrdset *rrdset;
+
+ struct rrdsetvar *next;
+};
+
+extern RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name);
+extern void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rv, calculated_number value);
+
+extern void rrdsetvar_rename_all(RRDSET *st);
+extern RRDSETVAR *rrdsetvar_create(RRDSET *st, const char *variable, RRDVAR_TYPE type, void *value, RRDVAR_OPTIONS options);
+extern void rrdsetvar_free(RRDSETVAR *rs);
+
+#endif //NETDATA_RRDSETVAR_H
diff --git a/database/rrdvar.c b/database/rrdvar.c
new file mode 100644
index 0000000..6b824d0
--- /dev/null
+++ b/database/rrdvar.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define NETDATA_HEALTH_INTERNALS
+#include "rrd.h"
+
+// ----------------------------------------------------------------------------
+// RRDVAR management
+
+inline int rrdvar_fix_name(char *variable) {
+ int fixed = 0;
+ while(*variable) {
+ if (!isalnum(*variable) && *variable != '.' && *variable != '_') {
+ *variable++ = '_';
+ fixed++;
+ }
+ else
+ variable++;
+ }
+
+ return fixed;
+}
+
+int rrdvar_compare(void* a, void* b) {
+ if(((RRDVAR *)a)->hash < ((RRDVAR *)b)->hash) return -1;
+ else if(((RRDVAR *)a)->hash > ((RRDVAR *)b)->hash) return 1;
+ else return strcmp(((RRDVAR *)a)->name, ((RRDVAR *)b)->name);
+}
+
+static inline RRDVAR *rrdvar_index_add(avl_tree_lock *tree, RRDVAR *rv) {
+ RRDVAR *ret = (RRDVAR *)avl_insert_lock(tree, (avl *)(rv));
+ if(ret != rv)
+ debug(D_VARIABLES, "Request to insert RRDVAR '%s' into index failed. Already exists.", rv->name);
+
+ return ret;
+}
+
+static inline RRDVAR *rrdvar_index_del(avl_tree_lock *tree, RRDVAR *rv) {
+ RRDVAR *ret = (RRDVAR *)avl_remove_lock(tree, (avl *)(rv));
+ if(!ret)
+ error("Request to remove RRDVAR '%s' from index failed. Not Found.", rv->name);
+
+ return ret;
+}
+
+static inline RRDVAR *rrdvar_index_find(avl_tree_lock *tree, const char *name, uint32_t hash) {
+ RRDVAR tmp;
+ tmp.name = (char *)name;
+ tmp.hash = (hash)?hash:simple_hash(tmp.name);
+
+ return (RRDVAR *)avl_search_lock(tree, (avl *)&tmp);
+}
+
+inline void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv) {
+ (void)host;
+
+ if(!rv) return;
+
+ if(tree) {
+ debug(D_VARIABLES, "Deleting variable '%s'", rv->name);
+ if(unlikely(!rrdvar_index_del(tree, rv)))
+ error("RRDVAR: Attempted to delete variable '%s' from host '%s', but it is not found.", rv->name, host->hostname);
+ }
+
+ if(rv->options & RRDVAR_OPTION_ALLOCATED)
+ freez(rv->value);
+
+ freez(rv->name);
+ freez(rv);
+}
+
+inline RRDVAR *rrdvar_create_and_index(const char *scope __maybe_unused, avl_tree_lock *tree, const char *name,
+ RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value) {
+ char *variable = strdupz(name);
+ rrdvar_fix_name(variable);
+ uint32_t hash = simple_hash(variable);
+
+ RRDVAR *rv = rrdvar_index_find(tree, variable, hash);
+ if(unlikely(!rv)) {
+ debug(D_VARIABLES, "Variable '%s' not found in scope '%s'. Creating a new one.", variable, scope);
+
+ rv = callocz(1, sizeof(RRDVAR));
+ rv->name = variable;
+ rv->hash = hash;
+ rv->type = type;
+ rv->options = options;
+ rv->value = value;
+ rv->last_updated = now_realtime_sec();
+
+ RRDVAR *ret = rrdvar_index_add(tree, rv);
+ if(unlikely(ret != rv)) {
+ debug(D_VARIABLES, "Variable '%s' in scope '%s' already exists", variable, scope);
+ freez(rv);
+ freez(variable);
+ rv = NULL;
+ }
+ else
+ debug(D_VARIABLES, "Variable '%s' created in scope '%s'", variable, scope);
+ }
+ else {
+ debug(D_VARIABLES, "Variable '%s' is already found in scope '%s'.", variable, scope);
+
+ // already exists
+ freez(variable);
+
+ // this is important
+ // it must return NULL - not the existing variable - or double-free will happen
+ rv = NULL;
+ }
+
+ return rv;
+}
+
+void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock) {
+ // This is not bullet proof - avl should support some means to destroy it
+ // with a callback for each item already in the index
+
+ RRDVAR *rv, *last = NULL;
+ while((rv = (RRDVAR *)tree_lock->avl_tree.root)) {
+ if(unlikely(rv == last)) {
+ error("RRDVAR: INTERNAL ERROR: Cannot cleanup tree of RRDVARs");
+ break;
+ }
+ last = rv;
+ rrdvar_free(host, tree_lock, rv);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// CUSTOM HOST VARIABLES
+
+inline int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void * /*rrdvar*/, void * /*data*/), void *data) {
+ return avl_traverse_lock(&host->rrdvar_root_index, callback, data);
+}
+
+static RRDVAR *rrdvar_custom_variable_create(const char *scope, avl_tree_lock *tree_lock, const char *name) {
+ calculated_number *v = callocz(1, sizeof(calculated_number));
+ *v = NAN;
+
+ RRDVAR *rv = rrdvar_create_and_index(scope, tree_lock, name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_ALLOCATED, v);
+ if(unlikely(!rv)) {
+ freez(v);
+ debug(D_VARIABLES, "Requested variable '%s' already exists - possibly 2 plugins are updating it at the same time.", name);
+
+ char *variable = strdupz(name);
+ rrdvar_fix_name(variable);
+ uint32_t hash = simple_hash(variable);
+
+ // find the existing one to return it
+ rv = rrdvar_index_find(tree_lock, variable, hash);
+
+ freez(variable);
+ }
+
+ return rv;
+}
+
+RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name) {
+ return rrdvar_custom_variable_create("host", &host->rrdvar_root_index, name);
+}
+
+void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value) {
+ if(rv->type != RRDVAR_TYPE_CALCULATED || !(rv->options & RRDVAR_OPTION_CUSTOM_HOST_VAR) || !(rv->options & RRDVAR_OPTION_ALLOCATED))
+ error("requested to set variable '%s' to value " CALCULATED_NUMBER_FORMAT " but the variable is not a custom one.", rv->name, value);
+ else {
+ calculated_number *v = rv->value;
+ if(*v != value) {
+ *v = value;
+
+ rv->last_updated = now_realtime_sec();
+
+ // if the host is streaming, send this variable upstream immediately
+ rrdpush_sender_send_this_host_variable_now(host, rv);
+ }
+ }
+}
+
+int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR * /*rv*/, void * /*data*/), void *data) {
+ return avl_traverse_lock(&host->rrdvar_root_index, (int (*)(void *, void *))callback, data);
+}
+
+// ----------------------------------------------------------------------------
+// RRDVAR lookup
+
+calculated_number rrdvar2number(RRDVAR *rv) {
+ switch(rv->type) {
+ case RRDVAR_TYPE_CALCULATED: {
+ calculated_number *n = (calculated_number *)rv->value;
+ return *n;
+ }
+
+ case RRDVAR_TYPE_TIME_T: {
+ time_t *n = (time_t *)rv->value;
+ return *n;
+ }
+
+ case RRDVAR_TYPE_COLLECTED: {
+ collected_number *n = (collected_number *)rv->value;
+ return *n;
+ }
+
+ case RRDVAR_TYPE_TOTAL: {
+ total_number *n = (total_number *)rv->value;
+ return *n;
+ }
+
+ case RRDVAR_TYPE_INT: {
+ int *n = (int *)rv->value;
+ return *n;
+ }
+
+ default:
+ error("I don't know how to convert RRDVAR type %u to calculated_number", rv->type);
+ return NAN;
+ }
+}
+
+int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) {
+ RRDSET *st = rc->rrdset;
+ if(!st) return 0;
+
+ RRDHOST *host = st->rrdhost;
+ RRDVAR *rv;
+
+ rv = rrdvar_index_find(&st->rrdvar_root_index, variable, hash);
+ if(rv) {
+ *result = rrdvar2number(rv);
+ return 1;
+ }
+
+ rv = rrdvar_index_find(&st->rrdfamily->rrdvar_root_index, variable, hash);
+ if(rv) {
+ *result = rrdvar2number(rv);
+ return 1;
+ }
+
+ rv = rrdvar_index_find(&host->rrdvar_root_index, variable, hash);
+ if(rv) {
+ *result = rrdvar2number(rv);
+ return 1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// RRDVAR to JSON
+
+struct variable2json_helper {
+ BUFFER *buf;
+ size_t counter;
+ RRDVAR_OPTIONS options;
+};
+
+static int single_variable2json(void *entry, void *data) {
+ struct variable2json_helper *helper = (struct variable2json_helper *)data;
+ RRDVAR *rv = (RRDVAR *)entry;
+ calculated_number value = rrdvar2number(rv);
+
+ if (helper->options == RRDVAR_OPTION_DEFAULT || rv->options & helper->options) {
+ if(unlikely(isnan(value) || isinf(value)))
+ buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rv->name);
+ else
+ buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" LONG_DOUBLE_MODIFIER, helper->counter?",":"", rv->name, (LONG_DOUBLE)value);
+
+ helper->counter++;
+ }
+
+ return 0;
+}
+
+void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf) {
+ struct variable2json_helper helper = {
+ .buf = buf,
+ .counter = 0,
+ .options = RRDVAR_OPTION_CUSTOM_CHART_VAR
+ };
+
+ buffer_sprintf(buf, "{");
+ avl_traverse_lock(&st->rrdvar_root_index, single_variable2json, (void *)&helper);
+ buffer_strcat(buf, "\n\t\t\t}");
+}
+
+void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf) {
+ RRDHOST *host = st->rrdhost;
+
+ struct variable2json_helper helper = {
+ .buf = buf,
+ .counter = 0,
+ .options = RRDVAR_OPTION_DEFAULT
+ };
+
+ buffer_sprintf(buf, "{\n\t\"chart\": \"%s\",\n\t\"chart_name\": \"%s\",\n\t\"chart_context\": \"%s\",\n\t\"chart_variables\": {", st->id, st->name, st->context);
+ avl_traverse_lock(&st->rrdvar_root_index, single_variable2json, (void *)&helper);
+
+ buffer_sprintf(buf, "\n\t},\n\t\"family\": \"%s\",\n\t\"family_variables\": {", st->family);
+ helper.counter = 0;
+ avl_traverse_lock(&st->rrdfamily->rrdvar_root_index, single_variable2json, (void *)&helper);
+
+ buffer_sprintf(buf, "\n\t},\n\t\"host\": \"%s\",\n\t\"host_variables\": {", host->hostname);
+ helper.counter = 0;
+ avl_traverse_lock(&host->rrdvar_root_index, single_variable2json, (void *)&helper);
+
+ buffer_strcat(buf, "\n\t}\n}\n");
+}
+
diff --git a/database/rrdvar.h b/database/rrdvar.h
new file mode 100644
index 0000000..6d1461b
--- /dev/null
+++ b/database/rrdvar.h
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDVAR_H
+#define NETDATA_RRDVAR_H 1
+
+#include "libnetdata/libnetdata.h"
+
+extern int rrdvar_compare(void *a, void *b);
+
+typedef enum rrdvar_type {
+ RRDVAR_TYPE_CALCULATED = 1,
+ RRDVAR_TYPE_TIME_T = 2,
+ RRDVAR_TYPE_COLLECTED = 3,
+ RRDVAR_TYPE_TOTAL = 4,
+ RRDVAR_TYPE_INT = 5
+} RRDVAR_TYPE;
+
+typedef enum rrdvar_options {
+ RRDVAR_OPTION_DEFAULT = 0,
+ RRDVAR_OPTION_ALLOCATED = (1 << 0), // the value ptr is allocated (not a reference)
+ RRDVAR_OPTION_CUSTOM_HOST_VAR = (1 << 1), // this is a custom host variable, not associated with a dimension
+ RRDVAR_OPTION_CUSTOM_CHART_VAR = (1 << 2), // this is a custom chart variable, not associated with a dimension
+ RRDVAR_OPTION_RRDCALC_LOCAL_VAR = (1 << 3), // this is a an alarm variable, attached to a chart
+ RRDVAR_OPTION_RRDCALC_FAMILY_VAR = (1 << 4), // this is a an alarm variable, attached to a family
+ RRDVAR_OPTION_RRDCALC_HOST_CHARTID_VAR = (1 << 5), // this is a an alarm variable, attached to the host, using the chart id
+ RRDVAR_OPTION_RRDCALC_HOST_CHARTNAME_VAR = (1 << 6), // this is a an alarm variable, attached to the host, using the chart name
+} RRDVAR_OPTIONS;
+
+// the variables as stored in the variables indexes
+// there are 3 indexes:
+// 1. at each chart (RRDSET.rrdvar_root_index)
+// 2. at each context (RRDFAMILY.rrdvar_root_index)
+// 3. at each host (RRDHOST.rrdvar_root_index)
+struct rrdvar {
+ avl avl;
+
+ char *name;
+ uint32_t hash;
+
+ RRDVAR_TYPE type;
+ RRDVAR_OPTIONS options;
+
+ void *value;
+
+ time_t last_updated;
+};
+
+#define RRDVAR_MAX_LENGTH 1024
+
+extern int rrdvar_fix_name(char *variable);
+
+#include "rrd.h"
+
+extern RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name);
+extern void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value);
+extern int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR *rv, void *data), void *data);
+extern void rrdvar_free_remaining_variables(RRDHOST *host, avl_tree_lock *tree_lock);
+
+extern int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback)(void *rrdvar, void *data), void *data);
+
+extern calculated_number rrdvar2number(RRDVAR *rv);
+
+extern RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value);
+extern void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv);
+
+#endif //NETDATA_RRDVAR_H
diff --git a/database/sqlite/sqlite3.c b/database/sqlite/sqlite3.c
new file mode 100644
index 0000000..5a804fb
--- /dev/null
+++ b/database/sqlite/sqlite3.c
@@ -0,0 +1,230536 @@
+/******************************************************************************
+** This file is an amalgamation of many separate C source files from SQLite
+** version 3.33.0. By combining all the individual C code files into this
+** single large file, the entire code can be compiled as a single translation
+** unit. This allows many compilers to do optimizations that would not be
+** possible if the files were compiled separately. Performance improvements
+** of 5% or more are commonly seen when SQLite is compiled as a single
+** translation unit.
+**
+** This file is all you need to compile SQLite. To use SQLite in other
+** programs, you need this file and the "sqlite3.h" header file that defines
+** the programming interface to the SQLite library. (If you do not have
+** the "sqlite3.h" header file at hand, you will find a copy embedded within
+** the text of this file. Search for "Begin file sqlite3.h" to find the start
+** of the embedded sqlite3.h header file.) Additional code files may be needed
+** if you want a wrapper to interface SQLite with your choice of programming
+** language. The code for the "sqlite3" command-line shell is also in a
+** separate file. This file contains only code for the core SQLite library.
+*/
+#define __maybe_unused __attribute__((unused))
+#define SQLITE_CORE 1
+#define SQLITE_AMALGAMATION 1
+#ifndef SQLITE_PRIVATE
+# define SQLITE_PRIVATE static
+#endif
+/************** Begin file ctime.c *******************************************/
+/*
+** 2010 February 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file implements routines used to report what compile-time options
+** SQLite was built with.
+*/
+#define SQLITE_ENABLE_UPDATE_DELETE_LIMIT 1
+#define SQLITE_OMIT_LOAD_EXTENSION 1
+#define SQLITE_ENABLE_DBSTAT_VTAB 1
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* IMP: R-16824-07538 */
+
+/*
+** Include the configuration header output by 'configure' if we're using the
+** autoconf-based build
+*/
+#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H)
+#include "config.h"
+#define SQLITECONFIG_H 1
+#endif
+
+/* These macros are provided to "stringify" the value of the define
+** for those options in which the value is meaningful. */
+#define CTIMEOPT_VAL_(opt) #opt
+#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt)
+
+/* Like CTIMEOPT_VAL, but especially for SQLITE_DEFAULT_LOOKASIDE. This
+** option requires a separate macro because legal values contain a single
+** comma. e.g. (-DSQLITE_DEFAULT_LOOKASIDE="100,100") */
+#define CTIMEOPT_VAL2_(opt1,opt2) #opt1 "," #opt2
+#define CTIMEOPT_VAL2(opt) CTIMEOPT_VAL2_(opt)
+
+/*
+** An array of names of all compile-time options. This array should
+** be sorted A-Z.
+**
+** This array looks large, but in a typical installation actually uses
+** only a handful of compile-time options, so most times this array is usually
+** rather short and uses little memory space.
+*/
+static const char * const sqlite3azCompileOpt[] = {
+
+/*
+** BEGIN CODE GENERATED BY tool/mkctime.tcl
+*/
+#if SQLITE_32BIT_ROWID
+ "32BIT_ROWID",
+#endif
+#if SQLITE_4_BYTE_ALIGNED_MALLOC
+ "4_BYTE_ALIGNED_MALLOC",
+#endif
+#if SQLITE_64BIT_STATS
+ "64BIT_STATS",
+#endif
+#if SQLITE_ALLOW_COVERING_INDEX_SCAN
+ "ALLOW_COVERING_INDEX_SCAN",
+#endif
+#if SQLITE_ALLOW_URI_AUTHORITY
+ "ALLOW_URI_AUTHORITY",
+#endif
+#ifdef SQLITE_BITMASK_TYPE
+ "BITMASK_TYPE=" CTIMEOPT_VAL(SQLITE_BITMASK_TYPE),
+#endif
+#if SQLITE_BUG_COMPATIBLE_20160819
+ "BUG_COMPATIBLE_20160819",
+#endif
+#if SQLITE_CASE_SENSITIVE_LIKE
+ "CASE_SENSITIVE_LIKE",
+#endif
+#if SQLITE_CHECK_PAGES
+ "CHECK_PAGES",
+#endif
+#if defined(__clang__) && defined(__clang_major__)
+ "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "."
+ CTIMEOPT_VAL(__clang_minor__) "."
+ CTIMEOPT_VAL(__clang_patchlevel__),
+#elif defined(_MSC_VER)
+ "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER),
+#elif defined(__GNUC__) && defined(__VERSION__)
+ "COMPILER=gcc-" __VERSION__,
+#endif
+#if SQLITE_COVERAGE_TEST
+ "COVERAGE_TEST",
+#endif
+#if SQLITE_DEBUG
+ "DEBUG",
+#endif
+#if SQLITE_DEFAULT_AUTOMATIC_INDEX
+ "DEFAULT_AUTOMATIC_INDEX",
+#endif
+#if SQLITE_DEFAULT_AUTOVACUUM
+ "DEFAULT_AUTOVACUUM",
+#endif
+#ifdef SQLITE_DEFAULT_CACHE_SIZE
+ "DEFAULT_CACHE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_CACHE_SIZE),
+#endif
+#if SQLITE_DEFAULT_CKPTFULLFSYNC
+ "DEFAULT_CKPTFULLFSYNC",
+#endif
+#ifdef SQLITE_DEFAULT_FILE_FORMAT
+ "DEFAULT_FILE_FORMAT=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_FORMAT),
+#endif
+#ifdef SQLITE_DEFAULT_FILE_PERMISSIONS
+ "DEFAULT_FILE_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_PERMISSIONS),
+#endif
+#if SQLITE_DEFAULT_FOREIGN_KEYS
+ "DEFAULT_FOREIGN_KEYS",
+#endif
+#ifdef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT
+ "DEFAULT_JOURNAL_SIZE_LIMIT=" CTIMEOPT_VAL(SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT),
+#endif
+#ifdef SQLITE_DEFAULT_LOCKING_MODE
+ "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE),
+#endif
+#ifdef SQLITE_DEFAULT_LOOKASIDE
+ "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL2(SQLITE_DEFAULT_LOOKASIDE),
+#endif
+#if SQLITE_DEFAULT_MEMSTATUS
+ "DEFAULT_MEMSTATUS",
+#endif
+#ifdef SQLITE_DEFAULT_MMAP_SIZE
+ "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE),
+#endif
+#ifdef SQLITE_DEFAULT_PAGE_SIZE
+ "DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_PAGE_SIZE),
+#endif
+#ifdef SQLITE_DEFAULT_PCACHE_INITSZ
+ "DEFAULT_PCACHE_INITSZ=" CTIMEOPT_VAL(SQLITE_DEFAULT_PCACHE_INITSZ),
+#endif
+#ifdef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS
+ "DEFAULT_PROXYDIR_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_PROXYDIR_PERMISSIONS),
+#endif
+#if SQLITE_DEFAULT_RECURSIVE_TRIGGERS
+ "DEFAULT_RECURSIVE_TRIGGERS",
+#endif
+#ifdef SQLITE_DEFAULT_ROWEST
+ "DEFAULT_ROWEST=" CTIMEOPT_VAL(SQLITE_DEFAULT_ROWEST),
+#endif
+#ifdef SQLITE_DEFAULT_SECTOR_SIZE
+ "DEFAULT_SECTOR_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_SECTOR_SIZE),
+#endif
+#ifdef SQLITE_DEFAULT_SYNCHRONOUS
+ "DEFAULT_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_SYNCHRONOUS),
+#endif
+#ifdef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT
+ "DEFAULT_WAL_AUTOCHECKPOINT=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_AUTOCHECKPOINT),
+#endif
+#ifdef SQLITE_DEFAULT_WAL_SYNCHRONOUS
+ "DEFAULT_WAL_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_SYNCHRONOUS),
+#endif
+#ifdef SQLITE_DEFAULT_WORKER_THREADS
+ "DEFAULT_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WORKER_THREADS),
+#endif
+#if SQLITE_DIRECT_OVERFLOW_READ
+ "DIRECT_OVERFLOW_READ",
+#endif
+#if SQLITE_DISABLE_DIRSYNC
+ "DISABLE_DIRSYNC",
+#endif
+#if SQLITE_DISABLE_FTS3_UNICODE
+ "DISABLE_FTS3_UNICODE",
+#endif
+#if SQLITE_DISABLE_FTS4_DEFERRED
+ "DISABLE_FTS4_DEFERRED",
+#endif
+#if SQLITE_DISABLE_INTRINSIC
+ "DISABLE_INTRINSIC",
+#endif
+#if SQLITE_DISABLE_LFS
+ "DISABLE_LFS",
+#endif
+#if SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
+ "DISABLE_PAGECACHE_OVERFLOW_STATS",
+#endif
+#if SQLITE_DISABLE_SKIPAHEAD_DISTINCT
+ "DISABLE_SKIPAHEAD_DISTINCT",
+#endif
+#ifdef SQLITE_ENABLE_8_3_NAMES
+ "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES),
+#endif
+#if SQLITE_ENABLE_API_ARMOR
+ "ENABLE_API_ARMOR",
+#endif
+#if SQLITE_ENABLE_ATOMIC_WRITE
+ "ENABLE_ATOMIC_WRITE",
+#endif
+#if SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ "ENABLE_BATCH_ATOMIC_WRITE",
+#endif
+#if SQLITE_ENABLE_BYTECODE_VTAB
+ "ENABLE_BYTECODE_VTAB",
+#endif
+#if SQLITE_ENABLE_CEROD
+ "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD),
+#endif
+#if SQLITE_ENABLE_COLUMN_METADATA
+ "ENABLE_COLUMN_METADATA",
+#endif
+#if SQLITE_ENABLE_COLUMN_USED_MASK
+ "ENABLE_COLUMN_USED_MASK",
+#endif
+#if SQLITE_ENABLE_COSTMULT
+ "ENABLE_COSTMULT",
+#endif
+#if SQLITE_ENABLE_CURSOR_HINTS
+ "ENABLE_CURSOR_HINTS",
+#endif
+#if SQLITE_ENABLE_DBSTAT_VTAB
+ "ENABLE_DBSTAT_VTAB",
+#endif
+#if SQLITE_ENABLE_EXPENSIVE_ASSERT
+ "ENABLE_EXPENSIVE_ASSERT",
+#endif
+#if SQLITE_ENABLE_FTS1
+ "ENABLE_FTS1",
+#endif
+#if SQLITE_ENABLE_FTS2
+ "ENABLE_FTS2",
+#endif
+#if SQLITE_ENABLE_FTS3
+ "ENABLE_FTS3",
+#endif
+#if SQLITE_ENABLE_FTS3_PARENTHESIS
+ "ENABLE_FTS3_PARENTHESIS",
+#endif
+#if SQLITE_ENABLE_FTS3_TOKENIZER
+ "ENABLE_FTS3_TOKENIZER",
+#endif
+#if SQLITE_ENABLE_FTS4
+ "ENABLE_FTS4",
+#endif
+#if SQLITE_ENABLE_FTS5
+ "ENABLE_FTS5",
+#endif
+#if SQLITE_ENABLE_GEOPOLY
+ "ENABLE_GEOPOLY",
+#endif
+#if SQLITE_ENABLE_HIDDEN_COLUMNS
+ "ENABLE_HIDDEN_COLUMNS",
+#endif
+#if SQLITE_ENABLE_ICU
+ "ENABLE_ICU",
+#endif
+#if SQLITE_ENABLE_IOTRACE
+ "ENABLE_IOTRACE",
+#endif
+#if SQLITE_ENABLE_JSON1
+ "ENABLE_JSON1",
+#endif
+#if SQLITE_ENABLE_LOAD_EXTENSION
+ "ENABLE_LOAD_EXTENSION",
+#endif
+#ifdef SQLITE_ENABLE_LOCKING_STYLE
+ "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE),
+#endif
+#if SQLITE_ENABLE_MEMORY_MANAGEMENT
+ "ENABLE_MEMORY_MANAGEMENT",
+#endif
+#if SQLITE_ENABLE_MEMSYS3
+ "ENABLE_MEMSYS3",
+#endif
+#if SQLITE_ENABLE_MEMSYS5
+ "ENABLE_MEMSYS5",
+#endif
+#if SQLITE_ENABLE_MULTIPLEX
+ "ENABLE_MULTIPLEX",
+#endif
+#if SQLITE_ENABLE_NORMALIZE
+ "ENABLE_NORMALIZE",
+#endif
+#if SQLITE_ENABLE_NULL_TRIM
+ "ENABLE_NULL_TRIM",
+#endif
+#if SQLITE_ENABLE_OVERSIZE_CELL_CHECK
+ "ENABLE_OVERSIZE_CELL_CHECK",
+#endif
+#if SQLITE_ENABLE_PREUPDATE_HOOK
+ "ENABLE_PREUPDATE_HOOK",
+#endif
+#if SQLITE_ENABLE_QPSG
+ "ENABLE_QPSG",
+#endif
+#if SQLITE_ENABLE_RBU
+ "ENABLE_RBU",
+#endif
+#if SQLITE_ENABLE_RTREE
+ "ENABLE_RTREE",
+#endif
+#if SQLITE_ENABLE_SELECTTRACE
+ "ENABLE_SELECTTRACE",
+#endif
+#if SQLITE_ENABLE_SESSION
+ "ENABLE_SESSION",
+#endif
+#if SQLITE_ENABLE_SNAPSHOT
+ "ENABLE_SNAPSHOT",
+#endif
+#if SQLITE_ENABLE_SORTER_REFERENCES
+ "ENABLE_SORTER_REFERENCES",
+#endif
+#if SQLITE_ENABLE_SQLLOG
+ "ENABLE_SQLLOG",
+#endif
+#if defined(SQLITE_ENABLE_STAT4)
+ "ENABLE_STAT4",
+#endif
+#if SQLITE_ENABLE_STMTVTAB
+ "ENABLE_STMTVTAB",
+#endif
+#if SQLITE_ENABLE_STMT_SCANSTATUS
+ "ENABLE_STMT_SCANSTATUS",
+#endif
+#if SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+ "ENABLE_UNKNOWN_SQL_FUNCTION",
+#endif
+#if SQLITE_ENABLE_UNLOCK_NOTIFY
+ "ENABLE_UNLOCK_NOTIFY",
+#endif
+#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ "ENABLE_UPDATE_DELETE_LIMIT",
+#endif
+#if SQLITE_ENABLE_URI_00_ERROR
+ "ENABLE_URI_00_ERROR",
+#endif
+#if SQLITE_ENABLE_VFSTRACE
+ "ENABLE_VFSTRACE",
+#endif
+#if SQLITE_ENABLE_WHERETRACE
+ "ENABLE_WHERETRACE",
+#endif
+#if SQLITE_ENABLE_ZIPVFS
+ "ENABLE_ZIPVFS",
+#endif
+#if SQLITE_EXPLAIN_ESTIMATED_ROWS
+ "EXPLAIN_ESTIMATED_ROWS",
+#endif
+#if SQLITE_EXTRA_IFNULLROW
+ "EXTRA_IFNULLROW",
+#endif
+#ifdef SQLITE_EXTRA_INIT
+ "EXTRA_INIT=" CTIMEOPT_VAL(SQLITE_EXTRA_INIT),
+#endif
+#ifdef SQLITE_EXTRA_SHUTDOWN
+ "EXTRA_SHUTDOWN=" CTIMEOPT_VAL(SQLITE_EXTRA_SHUTDOWN),
+#endif
+#ifdef SQLITE_FTS3_MAX_EXPR_DEPTH
+ "FTS3_MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_FTS3_MAX_EXPR_DEPTH),
+#endif
+#if SQLITE_FTS5_ENABLE_TEST_MI
+ "FTS5_ENABLE_TEST_MI",
+#endif
+#if SQLITE_FTS5_NO_WITHOUT_ROWID
+ "FTS5_NO_WITHOUT_ROWID",
+#endif
+#if HAVE_ISNAN || SQLITE_HAVE_ISNAN
+ "HAVE_ISNAN",
+#endif
+#if SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+ "HOMEGROWN_RECURSIVE_MUTEX",
+#endif
+#if SQLITE_IGNORE_AFP_LOCK_ERRORS
+ "IGNORE_AFP_LOCK_ERRORS",
+#endif
+#if SQLITE_IGNORE_FLOCK_LOCK_ERRORS
+ "IGNORE_FLOCK_LOCK_ERRORS",
+#endif
+#if SQLITE_INLINE_MEMCPY
+ "INLINE_MEMCPY",
+#endif
+#if SQLITE_INT64_TYPE
+ "INT64_TYPE",
+#endif
+#ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX
+ "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX),
+#endif
+#if SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ "LIKE_DOESNT_MATCH_BLOBS",
+#endif
+#if SQLITE_LOCK_TRACE
+ "LOCK_TRACE",
+#endif
+#if SQLITE_LOG_CACHE_SPILL
+ "LOG_CACHE_SPILL",
+#endif
+#ifdef SQLITE_MALLOC_SOFT_LIMIT
+ "MALLOC_SOFT_LIMIT=" CTIMEOPT_VAL(SQLITE_MALLOC_SOFT_LIMIT),
+#endif
+#ifdef SQLITE_MAX_ATTACHED
+ "MAX_ATTACHED=" CTIMEOPT_VAL(SQLITE_MAX_ATTACHED),
+#endif
+#ifdef SQLITE_MAX_COLUMN
+ "MAX_COLUMN=" CTIMEOPT_VAL(SQLITE_MAX_COLUMN),
+#endif
+#ifdef SQLITE_MAX_COMPOUND_SELECT
+ "MAX_COMPOUND_SELECT=" CTIMEOPT_VAL(SQLITE_MAX_COMPOUND_SELECT),
+#endif
+#ifdef SQLITE_MAX_DEFAULT_PAGE_SIZE
+ "MAX_DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_DEFAULT_PAGE_SIZE),
+#endif
+#ifdef SQLITE_MAX_EXPR_DEPTH
+ "MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_EXPR_DEPTH),
+#endif
+#ifdef SQLITE_MAX_FUNCTION_ARG
+ "MAX_FUNCTION_ARG=" CTIMEOPT_VAL(SQLITE_MAX_FUNCTION_ARG),
+#endif
+#ifdef SQLITE_MAX_LENGTH
+ "MAX_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LENGTH),
+#endif
+#ifdef SQLITE_MAX_LIKE_PATTERN_LENGTH
+ "MAX_LIKE_PATTERN_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LIKE_PATTERN_LENGTH),
+#endif
+#ifdef SQLITE_MAX_MEMORY
+ "MAX_MEMORY=" CTIMEOPT_VAL(SQLITE_MAX_MEMORY),
+#endif
+#ifdef SQLITE_MAX_MMAP_SIZE
+ "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE),
+#endif
+#ifdef SQLITE_MAX_MMAP_SIZE_
+ "MAX_MMAP_SIZE_=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE_),
+#endif
+#ifdef SQLITE_MAX_PAGE_COUNT
+ "MAX_PAGE_COUNT=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_COUNT),
+#endif
+#ifdef SQLITE_MAX_PAGE_SIZE
+ "MAX_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_SIZE),
+#endif
+#ifdef SQLITE_MAX_SCHEMA_RETRY
+ "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY),
+#endif
+#ifdef SQLITE_MAX_SQL_LENGTH
+ "MAX_SQL_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_SQL_LENGTH),
+#endif
+#ifdef SQLITE_MAX_TRIGGER_DEPTH
+ "MAX_TRIGGER_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_TRIGGER_DEPTH),
+#endif
+#ifdef SQLITE_MAX_VARIABLE_NUMBER
+ "MAX_VARIABLE_NUMBER=" CTIMEOPT_VAL(SQLITE_MAX_VARIABLE_NUMBER),
+#endif
+#ifdef SQLITE_MAX_VDBE_OP
+ "MAX_VDBE_OP=" CTIMEOPT_VAL(SQLITE_MAX_VDBE_OP),
+#endif
+#ifdef SQLITE_MAX_WORKER_THREADS
+ "MAX_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_MAX_WORKER_THREADS),
+#endif
+#if SQLITE_MEMDEBUG
+ "MEMDEBUG",
+#endif
+#if SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+ "MIXED_ENDIAN_64BIT_FLOAT",
+#endif
+#if SQLITE_MMAP_READWRITE
+ "MMAP_READWRITE",
+#endif
+#if SQLITE_MUTEX_NOOP
+ "MUTEX_NOOP",
+#endif
+#if SQLITE_MUTEX_NREF
+ "MUTEX_NREF",
+#endif
+#if SQLITE_MUTEX_OMIT
+ "MUTEX_OMIT",
+#endif
+#if SQLITE_MUTEX_PTHREADS
+ "MUTEX_PTHREADS",
+#endif
+#if SQLITE_MUTEX_W32
+ "MUTEX_W32",
+#endif
+#if SQLITE_NEED_ERR_NAME
+ "NEED_ERR_NAME",
+#endif
+#if SQLITE_NOINLINE
+ "NOINLINE",
+#endif
+#if SQLITE_NO_SYNC
+ "NO_SYNC",
+#endif
+#if SQLITE_OMIT_ALTERTABLE
+ "OMIT_ALTERTABLE",
+#endif
+#if SQLITE_OMIT_ANALYZE
+ "OMIT_ANALYZE",
+#endif
+#if SQLITE_OMIT_ATTACH
+ "OMIT_ATTACH",
+#endif
+#if SQLITE_OMIT_AUTHORIZATION
+ "OMIT_AUTHORIZATION",
+#endif
+#if SQLITE_OMIT_AUTOINCREMENT
+ "OMIT_AUTOINCREMENT",
+#endif
+#if SQLITE_OMIT_AUTOINIT
+ "OMIT_AUTOINIT",
+#endif
+#if SQLITE_OMIT_AUTOMATIC_INDEX
+ "OMIT_AUTOMATIC_INDEX",
+#endif
+#if SQLITE_OMIT_AUTORESET
+ "OMIT_AUTORESET",
+#endif
+#if SQLITE_OMIT_AUTOVACUUM
+ "OMIT_AUTOVACUUM",
+#endif
+#if SQLITE_OMIT_BETWEEN_OPTIMIZATION
+ "OMIT_BETWEEN_OPTIMIZATION",
+#endif
+#if SQLITE_OMIT_BLOB_LITERAL
+ "OMIT_BLOB_LITERAL",
+#endif
+#if SQLITE_OMIT_CAST
+ "OMIT_CAST",
+#endif
+#if SQLITE_OMIT_CHECK
+ "OMIT_CHECK",
+#endif
+#if SQLITE_OMIT_COMPLETE
+ "OMIT_COMPLETE",
+#endif
+#if SQLITE_OMIT_COMPOUND_SELECT
+ "OMIT_COMPOUND_SELECT",
+#endif
+#if SQLITE_OMIT_CONFLICT_CLAUSE
+ "OMIT_CONFLICT_CLAUSE",
+#endif
+#if SQLITE_OMIT_CTE
+ "OMIT_CTE",
+#endif
+#if SQLITE_OMIT_DATETIME_FUNCS
+ "OMIT_DATETIME_FUNCS",
+#endif
+#if SQLITE_OMIT_DECLTYPE
+ "OMIT_DECLTYPE",
+#endif
+#if SQLITE_OMIT_DEPRECATED
+ "OMIT_DEPRECATED",
+#endif
+#if SQLITE_OMIT_DISKIO
+ "OMIT_DISKIO",
+#endif
+#if SQLITE_OMIT_EXPLAIN
+ "OMIT_EXPLAIN",
+#endif
+#if SQLITE_OMIT_FLAG_PRAGMAS
+ "OMIT_FLAG_PRAGMAS",
+#endif
+#if SQLITE_OMIT_FLOATING_POINT
+ "OMIT_FLOATING_POINT",
+#endif
+#if SQLITE_OMIT_FOREIGN_KEY
+ "OMIT_FOREIGN_KEY",
+#endif
+#if SQLITE_OMIT_GET_TABLE
+ "OMIT_GET_TABLE",
+#endif
+#if SQLITE_OMIT_HEX_INTEGER
+ "OMIT_HEX_INTEGER",
+#endif
+#if SQLITE_OMIT_INCRBLOB
+ "OMIT_INCRBLOB",
+#endif
+#if SQLITE_OMIT_INTEGRITY_CHECK
+ "OMIT_INTEGRITY_CHECK",
+#endif
+#if SQLITE_OMIT_LIKE_OPTIMIZATION
+ "OMIT_LIKE_OPTIMIZATION",
+#endif
+#if SQLITE_OMIT_LOAD_EXTENSION
+ "OMIT_LOAD_EXTENSION",
+#endif
+#if SQLITE_OMIT_LOCALTIME
+ "OMIT_LOCALTIME",
+#endif
+#if SQLITE_OMIT_LOOKASIDE
+ "OMIT_LOOKASIDE",
+#endif
+#if SQLITE_OMIT_MEMORYDB
+ "OMIT_MEMORYDB",
+#endif
+#if SQLITE_OMIT_OR_OPTIMIZATION
+ "OMIT_OR_OPTIMIZATION",
+#endif
+#if SQLITE_OMIT_PAGER_PRAGMAS
+ "OMIT_PAGER_PRAGMAS",
+#endif
+#if SQLITE_OMIT_PARSER_TRACE
+ "OMIT_PARSER_TRACE",
+#endif
+#if SQLITE_OMIT_POPEN
+ "OMIT_POPEN",
+#endif
+#if SQLITE_OMIT_PRAGMA
+ "OMIT_PRAGMA",
+#endif
+#if SQLITE_OMIT_PROGRESS_CALLBACK
+ "OMIT_PROGRESS_CALLBACK",
+#endif
+#if SQLITE_OMIT_QUICKBALANCE
+ "OMIT_QUICKBALANCE",
+#endif
+#if SQLITE_OMIT_REINDEX
+ "OMIT_REINDEX",
+#endif
+#if SQLITE_OMIT_SCHEMA_PRAGMAS
+ "OMIT_SCHEMA_PRAGMAS",
+#endif
+#if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
+ "OMIT_SCHEMA_VERSION_PRAGMAS",
+#endif
+#if SQLITE_OMIT_SHARED_CACHE
+ "OMIT_SHARED_CACHE",
+#endif
+#if SQLITE_OMIT_SHUTDOWN_DIRECTORIES
+ "OMIT_SHUTDOWN_DIRECTORIES",
+#endif
+#if SQLITE_OMIT_SUBQUERY
+ "OMIT_SUBQUERY",
+#endif
+#if SQLITE_OMIT_TCL_VARIABLE
+ "OMIT_TCL_VARIABLE",
+#endif
+#if SQLITE_OMIT_TEMPDB
+ "OMIT_TEMPDB",
+#endif
+#if SQLITE_OMIT_TEST_CONTROL
+ "OMIT_TEST_CONTROL",
+#endif
+#if SQLITE_OMIT_TRACE
+ "OMIT_TRACE",
+#endif
+#if SQLITE_OMIT_TRIGGER
+ "OMIT_TRIGGER",
+#endif
+#if SQLITE_OMIT_TRUNCATE_OPTIMIZATION
+ "OMIT_TRUNCATE_OPTIMIZATION",
+#endif
+#if SQLITE_OMIT_UTF16
+ "OMIT_UTF16",
+#endif
+#if SQLITE_OMIT_VACUUM
+ "OMIT_VACUUM",
+#endif
+#if SQLITE_OMIT_VIEW
+ "OMIT_VIEW",
+#endif
+#if SQLITE_OMIT_VIRTUALTABLE
+ "OMIT_VIRTUALTABLE",
+#endif
+#if SQLITE_OMIT_WAL
+ "OMIT_WAL",
+#endif
+#if SQLITE_OMIT_WSD
+ "OMIT_WSD",
+#endif
+#if SQLITE_OMIT_XFER_OPT
+ "OMIT_XFER_OPT",
+#endif
+#if SQLITE_PCACHE_SEPARATE_HEADER
+ "PCACHE_SEPARATE_HEADER",
+#endif
+#if SQLITE_PERFORMANCE_TRACE
+ "PERFORMANCE_TRACE",
+#endif
+#if SQLITE_POWERSAFE_OVERWRITE
+ "POWERSAFE_OVERWRITE",
+#endif
+#if SQLITE_PREFER_PROXY_LOCKING
+ "PREFER_PROXY_LOCKING",
+#endif
+#if SQLITE_PROXY_DEBUG
+ "PROXY_DEBUG",
+#endif
+#if SQLITE_REVERSE_UNORDERED_SELECTS
+ "REVERSE_UNORDERED_SELECTS",
+#endif
+#if SQLITE_RTREE_INT_ONLY
+ "RTREE_INT_ONLY",
+#endif
+#if SQLITE_SECURE_DELETE
+ "SECURE_DELETE",
+#endif
+#if SQLITE_SMALL_STACK
+ "SMALL_STACK",
+#endif
+#ifdef SQLITE_SORTER_PMASZ
+ "SORTER_PMASZ=" CTIMEOPT_VAL(SQLITE_SORTER_PMASZ),
+#endif
+#if SQLITE_SOUNDEX
+ "SOUNDEX",
+#endif
+#ifdef SQLITE_STAT4_SAMPLES
+ "STAT4_SAMPLES=" CTIMEOPT_VAL(SQLITE_STAT4_SAMPLES),
+#endif
+#ifdef SQLITE_STMTJRNL_SPILL
+ "STMTJRNL_SPILL=" CTIMEOPT_VAL(SQLITE_STMTJRNL_SPILL),
+#endif
+#if SQLITE_SUBSTR_COMPATIBILITY
+ "SUBSTR_COMPATIBILITY",
+#endif
+#if SQLITE_SYSTEM_MALLOC
+ "SYSTEM_MALLOC",
+#endif
+#if SQLITE_TCL
+ "TCL",
+#endif
+#ifdef SQLITE_TEMP_STORE
+ "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE),
+#endif
+#if SQLITE_TEST
+ "TEST",
+#endif
+#if defined(SQLITE_THREADSAFE)
+ "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE),
+#elif defined(THREADSAFE)
+ "THREADSAFE=" CTIMEOPT_VAL(THREADSAFE),
+#else
+ "THREADSAFE=1",
+#endif
+#if SQLITE_UNLINK_AFTER_CLOSE
+ "UNLINK_AFTER_CLOSE",
+#endif
+#if SQLITE_UNTESTABLE
+ "UNTESTABLE",
+#endif
+#if SQLITE_USER_AUTHENTICATION
+ "USER_AUTHENTICATION",
+#endif
+#if SQLITE_USE_ALLOCA
+ "USE_ALLOCA",
+#endif
+#if SQLITE_USE_FCNTL_TRACE
+ "USE_FCNTL_TRACE",
+#endif
+#if SQLITE_USE_URI
+ "USE_URI",
+#endif
+#if SQLITE_VDBE_COVERAGE
+ "VDBE_COVERAGE",
+#endif
+#if SQLITE_WIN32_MALLOC
+ "WIN32_MALLOC",
+#endif
+#if SQLITE_ZERO_MALLOC
+ "ZERO_MALLOC",
+#endif
+/*
+** END CODE GENERATED BY tool/mkctime.tcl
+*/
+};
+
+SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){
+ *pnOpt = sizeof(sqlite3azCompileOpt) / sizeof(sqlite3azCompileOpt[0]);
+ return (const char**)sqlite3azCompileOpt;
+}
+
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+
+/************** End of ctime.c ***********************************************/
+/************** Begin file sqliteInt.h ***************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Internal interface definitions for SQLite.
+**
+*/
+#ifndef SQLITEINT_H
+#define SQLITEINT_H
+
+/* Special Comments:
+**
+** Some comments have special meaning to the tools that measure test
+** coverage:
+**
+** NO_TEST - The branches on this line are not
+** measured by branch coverage. This is
+** used on lines of code that actually
+** implement parts of coverage testing.
+**
+** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false
+** and the correct answer is still obtained,
+** though perhaps more slowly.
+**
+** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true
+** and the correct answer is still obtained,
+** though perhaps more slowly.
+**
+** PREVENTS-HARMLESS-OVERREAD - This branch prevents a buffer overread
+** that would be harmless and undetectable
+** if it did occur.
+**
+** In all cases, the special comment must be enclosed in the usual
+** slash-asterisk...asterisk-slash comment marks, with no spaces between the
+** asterisks and the comment text.
+*/
+
+/*
+** Make sure the Tcl calling convention macro is defined. This macro is
+** only used by test code and Tcl integration code.
+*/
+#ifndef SQLITE_TCLAPI
+# define SQLITE_TCLAPI
+#endif
+
+/*
+** Include the header file used to customize the compiler options for MSVC.
+** This should be done first so that it can successfully prevent spurious
+** compiler warnings due to subsequent content in this file and other files
+** that are included by this file.
+*/
+/************** Include msvc.h in the middle of sqliteInt.h ******************/
+/************** Begin file msvc.h ********************************************/
+/*
+** 2015 January 12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code that is specific to MSVC.
+*/
+#ifndef SQLITE_MSVC_H
+#define SQLITE_MSVC_H
+
+#if defined(_MSC_VER)
+#pragma warning(disable : 4054)
+#pragma warning(disable : 4055)
+#pragma warning(disable : 4100)
+#pragma warning(disable : 4127)
+#pragma warning(disable : 4130)
+#pragma warning(disable : 4152)
+#pragma warning(disable : 4189)
+#pragma warning(disable : 4206)
+#pragma warning(disable : 4210)
+#pragma warning(disable : 4232)
+#pragma warning(disable : 4244)
+#pragma warning(disable : 4305)
+#pragma warning(disable : 4306)
+#pragma warning(disable : 4702)
+#pragma warning(disable : 4706)
+#endif /* defined(_MSC_VER) */
+
+#if defined(_MSC_VER) && !defined(_WIN64)
+#undef SQLITE_4_BYTE_ALIGNED_MALLOC
+#define SQLITE_4_BYTE_ALIGNED_MALLOC
+#endif /* defined(_MSC_VER) && !defined(_WIN64) */
+
+#endif /* SQLITE_MSVC_H */
+
+/************** End of msvc.h ************************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+
+/*
+** Special setup for VxWorks
+*/
+/************** Include vxworks.h in the middle of sqliteInt.h ***************/
+/************** Begin file vxworks.h *****************************************/
+/*
+** 2015-03-02
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code that is specific to Wind River's VxWorks
+*/
+#if defined(__RTP__) || defined(_WRS_KERNEL)
+/* This is VxWorks. Set up things specially for that OS
+*/
+#include <vxWorks.h>
+#include <pthread.h> /* amalgamator: dontcache */
+#define OS_VXWORKS 1
+#define SQLITE_OS_OTHER 0
+#define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1
+#define SQLITE_OMIT_LOAD_EXTENSION 1
+#define SQLITE_ENABLE_LOCKING_STYLE 0
+#define HAVE_UTIME 1
+#else
+/* This is not VxWorks. */
+#define OS_VXWORKS 0
+#define HAVE_FCHOWN 1
+#define HAVE_READLINK 1
+#define HAVE_LSTAT 1
+#endif /* defined(_WRS_KERNEL) */
+
+/************** End of vxworks.h *********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+
+/*
+** These #defines should enable >2GB file support on POSIX if the
+** underlying operating system supports it. If the OS lacks
+** large file support, or if the OS is windows, these should be no-ops.
+**
+** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any
+** system #includes. Hence, this block of code must be the very first
+** code in all source files.
+**
+** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
+** on the compiler command line. This is necessary if you are compiling
+** on a recent machine (ex: Red Hat 7.2) but you want your code to work
+** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2
+** without this option, LFS is enable. But LFS does not exist in the kernel
+** in Red Hat 6.0, so the code won't work. Hence, for maximum binary
+** portability you should omit LFS.
+**
+** The previous paragraph was written in 2005. (This paragraph is written
+** on 2008-11-28.) These days, all Linux kernels support large files, so
+** you should probably leave LFS enabled. But some embedded platforms might
+** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful.
+**
+** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later.
+*/
+#ifndef SQLITE_DISABLE_LFS
+# define _LARGE_FILE 1
+# ifndef _FILE_OFFSET_BITS
+# define _FILE_OFFSET_BITS 64
+# endif
+# define _LARGEFILE_SOURCE 1
+#endif
+
+/* The GCC_VERSION and MSVC_VERSION macros are used to
+** conditionally include optimizations for each of these compilers. A
+** value of 0 means that compiler is not being used. The
+** SQLITE_DISABLE_INTRINSIC macro means do not use any compiler-specific
+** optimizations, and hence set all compiler macros to 0
+**
+** There was once also a CLANG_VERSION macro. However, we learn that the
+** version numbers in clang are for "marketing" only and are inconsistent
+** and unreliable. Fortunately, all versions of clang also recognize the
+** gcc version numbers and have reasonable settings for gcc version numbers,
+** so the GCC_VERSION macro will be set to a correct non-zero value even
+** when compiling with clang.
+*/
+#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
+#else
+# define GCC_VERSION 0
+#endif
+#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define MSVC_VERSION _MSC_VER
+#else
+# define MSVC_VERSION 0
+#endif
+
+/* Needed for various definitions... */
+#if defined(__GNUC__) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+#endif
+
+#if defined(__OpenBSD__) && !defined(_BSD_SOURCE)
+# define _BSD_SOURCE
+#endif
+
+/*
+** Macro to disable warnings about missing "break" at the end of a "case".
+*/
+#if GCC_VERSION>=7000000
+# define deliberate_fall_through __attribute__((fallthrough));
+#else
+# define deliberate_fall_through
+#endif
+
+/*
+** For MinGW, check to see if we can include the header file containing its
+** version information, among other things. Normally, this internal MinGW
+** header file would [only] be included automatically by other MinGW header
+** files; however, the contained version information is now required by this
+** header file to work around binary compatibility issues (see below) and
+** this is the only known way to reliably obtain it. This entire #if block
+** would be completely unnecessary if there was any other way of detecting
+** MinGW via their preprocessor (e.g. if they customized their GCC to define
+** some MinGW-specific macros). When compiling for MinGW, either the
+** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be
+** defined; otherwise, detection of conditions specific to MinGW will be
+** disabled.
+*/
+#if defined(_HAVE_MINGW_H)
+# include "mingw.h"
+#elif defined(_HAVE__MINGW_H)
+# include "_mingw.h"
+#endif
+
+/*
+** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T
+** define is required to maintain binary compatibility with the MSVC runtime
+** library in use (e.g. for Windows XP).
+*/
+#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \
+ defined(_WIN32) && !defined(_WIN64) && \
+ defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \
+ defined(__MSVCRT__)
+# define _USE_32BIT_TIME_T
+#endif
+
+/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear
+** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for
+** MinGW.
+*/
+/************** Include sqlite3.h in the middle of sqliteInt.h ***************/
+/************** Begin file sqlite3.h *****************************************/
+/*
+** 2001-09-15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the SQLite library
+** presents to client programs. If a C-function, structure, datatype,
+** or constant definition does not appear in this file, then it is
+** not a published API of SQLite, is subject to change without
+** notice, and should not be referenced by programs that use SQLite.
+**
+** Some of the definitions that are in this file are marked as
+** "experimental". Experimental interfaces are normally new
+** features recently added to SQLite. We do not anticipate changes
+** to experimental interfaces but reserve the right to make minor changes
+** if experience from use "in the wild" suggest such changes are prudent.
+**
+** The official C-language API documentation for SQLite is derived
+** from comments in this file. This file is the authoritative source
+** on how SQLite interfaces are supposed to operate.
+**
+** The name of this file under configuration management is "sqlite.h.in".
+** The makefile makes some minor changes to this file (such as inserting
+** the version number) and changes its name to "sqlite3.h" as
+** part of the build process.
+*/
+#ifndef SQLITE3_H
+#define SQLITE3_H
+#include <stdarg.h> /* Needed for the definition of va_list */
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#if 0
+extern "C" {
+#endif
+
+
+/*
+** Provide the ability to override linkage features of the interface.
+*/
+#ifndef SQLITE_EXTERN
+# define SQLITE_EXTERN extern
+#endif
+#ifndef SQLITE_API
+# define SQLITE_API
+#endif
+#ifndef SQLITE_CDECL
+# define SQLITE_CDECL
+#endif
+#ifndef SQLITE_APICALL
+# define SQLITE_APICALL
+#endif
+#ifndef SQLITE_STDCALL
+# define SQLITE_STDCALL SQLITE_APICALL
+#endif
+#ifndef SQLITE_CALLBACK
+# define SQLITE_CALLBACK
+#endif
+#ifndef SQLITE_SYSAPI
+# define SQLITE_SYSAPI
+#endif
+
+/*
+** These no-op macros are used in front of interfaces to mark those
+** interfaces as either deprecated or experimental. New applications
+** should not use deprecated interfaces - they are supported for backwards
+** compatibility only. Application writers should be aware that
+** experimental interfaces are subject to change in point releases.
+**
+** These macros used to resolve to various kinds of compiler magic that
+** would generate warning messages when they were used. But that
+** compiler magic ended up generating such a flurry of bug reports
+** that we have taken it all out and gone back to using simple
+** noop macros.
+*/
+#define SQLITE_DEPRECATED
+#define SQLITE_EXPERIMENTAL
+
+/*
+** Ensure these symbols were not defined by some previous header file.
+*/
+#ifdef SQLITE_VERSION
+# undef SQLITE_VERSION
+#endif
+#ifdef SQLITE_VERSION_NUMBER
+# undef SQLITE_VERSION_NUMBER
+#endif
+
+/*
+** CAPI3REF: Compile-Time Library Version Numbers
+**
+** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header
+** evaluates to a string literal that is the SQLite version in the
+** format "X.Y.Z" where X is the major version number (always 3 for
+** SQLite3) and Y is the minor version number and Z is the release number.)^
+** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer
+** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same
+** numbers used in [SQLITE_VERSION].)^
+** The SQLITE_VERSION_NUMBER for any given release of SQLite will also
+** be larger than the release from which it is derived. Either Y will
+** be held constant and Z will be incremented or else Y will be incremented
+** and Z will be reset to zero.
+**
+** Since [version 3.6.18] ([dateof:3.6.18]),
+** SQLite source code has been stored in the
+** <a href="http://www.fossil-scm.org/">Fossil configuration management
+** system</a>. ^The SQLITE_SOURCE_ID macro evaluates to
+** a string which identifies a particular check-in of SQLite
+** within its configuration management system. ^The SQLITE_SOURCE_ID
+** string contains the date and time of the check-in (UTC) and a SHA1
+** or SHA3-256 hash of the entire source tree. If the source code has
+** been edited in any way since it was last checked in, then the last
+** four hexadecimal digits of the hash may be modified.
+**
+** See also: [sqlite3_libversion()],
+** [sqlite3_libversion_number()], [sqlite3_sourceid()],
+** [sqlite_version()] and [sqlite_source_id()].
+*/
+#define SQLITE_VERSION "3.33.0"
+#define SQLITE_VERSION_NUMBER 3033000
+#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt1"
+
+/*
+** CAPI3REF: Run-Time Library Version Numbers
+** KEYWORDS: sqlite3_version sqlite3_sourceid
+**
+** These interfaces provide the same information as the [SQLITE_VERSION],
+** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
+** but are associated with the library instead of the header file. ^(Cautious
+** programmers might include assert() statements in their application to
+** verify that values returned by these interfaces match the macros in
+** the header, and thus ensure that the application is
+** compiled with matching library and header files.
+**
+** <blockquote><pre>
+** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
+** assert( strncmp(sqlite3_sourceid(),SQLITE_SOURCE_ID,80)==0 );
+** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
+** </pre></blockquote>)^
+**
+** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION]
+** macro. ^The sqlite3_libversion() function returns a pointer to the
+** to the sqlite3_version[] string constant. The sqlite3_libversion()
+** function is provided for use in DLLs since DLL users usually do not have
+** direct access to string constants within the DLL. ^The
+** sqlite3_libversion_number() function returns an integer equal to
+** [SQLITE_VERSION_NUMBER]. ^(The sqlite3_sourceid() function returns
+** a pointer to a string constant whose value is the same as the
+** [SQLITE_SOURCE_ID] C preprocessor macro. Except if SQLite is built
+** using an edited copy of [the amalgamation], then the last four characters
+** of the hash might be different from [SQLITE_SOURCE_ID].)^
+**
+** See also: [sqlite_version()] and [sqlite_source_id()].
+*/
+SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
+
+/*
+** CAPI3REF: Run-Time Library Compilation Options Diagnostics
+**
+** ^The sqlite3_compileoption_used() function returns 0 or 1
+** indicating whether the specified option was defined at
+** compile time. ^The SQLITE_ prefix may be omitted from the
+** option name passed to sqlite3_compileoption_used().
+**
+** ^The sqlite3_compileoption_get() function allows iterating
+** over the list of options that were defined at compile time by
+** returning the N-th compile time option string. ^If N is out of range,
+** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_
+** prefix is omitted from any strings returned by
+** sqlite3_compileoption_get().
+**
+** ^Support for the diagnostic functions sqlite3_compileoption_used()
+** and sqlite3_compileoption_get() may be omitted by specifying the
+** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time.
+**
+** See also: SQL functions [sqlite_compileoption_used()] and
+** [sqlite_compileoption_get()] and the [compile_options pragma].
+*/
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
+#else
+# define sqlite3_compileoption_used(X) 0
+# define sqlite3_compileoption_get(X) ((void*)0)
+#endif
+
+/*
+** CAPI3REF: Test To See If The Library Is Threadsafe
+**
+** ^The sqlite3_threadsafe() function returns zero if and only if
+** SQLite was compiled with mutexing code omitted due to the
+** [SQLITE_THREADSAFE] compile-time option being set to 0.
+**
+** SQLite can be compiled with or without mutexes. When
+** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes
+** are enabled and SQLite is threadsafe. When the
+** [SQLITE_THREADSAFE] macro is 0,
+** the mutexes are omitted. Without the mutexes, it is not safe
+** to use SQLite concurrently from more than one thread.
+**
+** Enabling mutexes incurs a measurable performance penalty.
+** So if speed is of utmost importance, it makes sense to disable
+** the mutexes. But for maximum safety, mutexes should be enabled.
+** ^The default behavior is for mutexes to be enabled.
+**
+** This interface can be used by an application to make sure that the
+** version of SQLite that it is linking against was compiled with
+** the desired setting of the [SQLITE_THREADSAFE] macro.
+**
+** This interface only reports on the compile-time mutex setting
+** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with
+** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but
+** can be fully or partially disabled using a call to [sqlite3_config()]
+** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD],
+** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the
+** sqlite3_threadsafe() function shows only the compile-time setting of
+** thread safety, not any run-time changes to that setting made by
+** sqlite3_config(). In other words, the return value from sqlite3_threadsafe()
+** is unchanged by calls to sqlite3_config().)^
+**
+** See the [threading mode] documentation for additional information.
+*/
+SQLITE_API int sqlite3_threadsafe(void);
+
+/*
+** CAPI3REF: Database Connection Handle
+** KEYWORDS: {database connection} {database connections}
+**
+** Each open SQLite database is represented by a pointer to an instance of
+** the opaque structure named "sqlite3". It is useful to think of an sqlite3
+** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and
+** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()]
+** and [sqlite3_close_v2()] are its destructors. There are many other
+** interfaces (such as
+** [sqlite3_prepare_v2()], [sqlite3_create_function()], and
+** [sqlite3_busy_timeout()] to name but three) that are methods on an
+** sqlite3 object.
+*/
+typedef struct sqlite3 sqlite3;
+
+/*
+** CAPI3REF: 64-Bit Integer Types
+** KEYWORDS: sqlite_int64 sqlite_uint64
+**
+** Because there is no cross-platform way to specify 64-bit integer types
+** SQLite includes typedefs for 64-bit signed and unsigned integers.
+**
+** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions.
+** The sqlite_int64 and sqlite_uint64 types are supported for backwards
+** compatibility only.
+**
+** ^The sqlite3_int64 and sqlite_int64 types can store integer values
+** between -9223372036854775808 and +9223372036854775807 inclusive. ^The
+** sqlite3_uint64 and sqlite_uint64 types can store integer values
+** between 0 and +18446744073709551615 inclusive.
+*/
+#ifdef SQLITE_INT64_TYPE
+ typedef SQLITE_INT64_TYPE sqlite_int64;
+# ifdef SQLITE_UINT64_TYPE
+ typedef SQLITE_UINT64_TYPE sqlite_uint64;
+# else
+ typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# endif
+#elif defined(_MSC_VER) || defined(__BORLANDC__)
+ typedef __int64 sqlite_int64;
+ typedef unsigned __int64 sqlite_uint64;
+#else
+ typedef long long int sqlite_int64;
+ typedef unsigned long long int sqlite_uint64;
+#endif
+typedef sqlite_int64 sqlite3_int64;
+typedef sqlite_uint64 sqlite3_uint64;
+
+/*
+** If compiling for a processor that lacks floating point support,
+** substitute integer for floating-point.
+*/
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# define double sqlite3_int64
+#endif
+
+/*
+** CAPI3REF: Closing A Database Connection
+** DESTRUCTOR: sqlite3
+**
+** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors
+** for the [sqlite3] object.
+** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if
+** the [sqlite3] object is successfully destroyed and all associated
+** resources are deallocated.
+**
+** Ideally, applications should [sqlite3_finalize | finalize] all
+** [prepared statements], [sqlite3_blob_close | close] all [BLOB handles], and
+** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated
+** with the [sqlite3] object prior to attempting to close the object.
+** ^If the database connection is associated with unfinalized prepared
+** statements, BLOB handlers, and/or unfinished sqlite3_backup objects then
+** sqlite3_close() will leave the database connection open and return
+** [SQLITE_BUSY]. ^If sqlite3_close_v2() is called with unfinalized prepared
+** statements, unclosed BLOB handlers, and/or unfinished sqlite3_backups,
+** it returns [SQLITE_OK] regardless, but instead of deallocating the database
+** connection immediately, it marks the database connection as an unusable
+** "zombie" and makes arrangements to automatically deallocate the database
+** connection after all prepared statements are finalized, all BLOB handles
+** are closed, and all backups have finished. The sqlite3_close_v2() interface
+** is intended for use with host languages that are garbage collected, and
+** where the order in which destructors are called is arbitrary.
+**
+** ^If an [sqlite3] object is destroyed while a transaction is open,
+** the transaction is automatically rolled back.
+**
+** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)]
+** must be either a NULL
+** pointer or an [sqlite3] object pointer obtained
+** from [sqlite3_open()], [sqlite3_open16()], or
+** [sqlite3_open_v2()], and not previously closed.
+** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
+** argument is a harmless no-op.
+*/
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
+
+/*
+** The type for a callback function.
+** This is legacy and deprecated. It is included for historical
+** compatibility and is not documented.
+*/
+typedef int (*sqlite3_callback)(void*,int,char**, char**);
+
+/*
+** CAPI3REF: One-Step Query Execution Interface
+** METHOD: sqlite3
+**
+** The sqlite3_exec() interface is a convenience wrapper around
+** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()],
+** that allows an application to run multiple statements of SQL
+** without having to use a lot of C code.
+**
+** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded,
+** semicolon-separate SQL statements passed into its 2nd argument,
+** in the context of the [database connection] passed in as its 1st
+** argument. ^If the callback function of the 3rd argument to
+** sqlite3_exec() is not NULL, then it is invoked for each result row
+** coming out of the evaluated SQL statements. ^The 4th argument to
+** sqlite3_exec() is relayed through to the 1st argument of each
+** callback invocation. ^If the callback pointer to sqlite3_exec()
+** is NULL, then no callback is ever invoked and result rows are
+** ignored.
+**
+** ^If an error occurs while evaluating the SQL statements passed into
+** sqlite3_exec(), then execution of the current statement stops and
+** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec()
+** is not NULL then any error message is written into memory obtained
+** from [sqlite3_malloc()] and passed back through the 5th parameter.
+** To avoid memory leaks, the application should invoke [sqlite3_free()]
+** on error message strings returned through the 5th parameter of
+** sqlite3_exec() after the error message string is no longer needed.
+** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors
+** occur, then sqlite3_exec() sets the pointer in its 5th parameter to
+** NULL before returning.
+**
+** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec()
+** routine returns SQLITE_ABORT without invoking the callback again and
+** without running any subsequent SQL statements.
+**
+** ^The 2nd argument to the sqlite3_exec() callback function is the
+** number of columns in the result. ^The 3rd argument to the sqlite3_exec()
+** callback is an array of pointers to strings obtained as if from
+** [sqlite3_column_text()], one for each column. ^If an element of a
+** result row is NULL then the corresponding string pointer for the
+** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the
+** sqlite3_exec() callback is an array of pointers to strings where each
+** entry represents the name of corresponding result column as obtained
+** from [sqlite3_column_name()].
+**
+** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer
+** to an empty string, or a pointer that contains only whitespace and/or
+** SQL comments, then no SQL statements are evaluated and the database
+** is not changed.
+**
+** Restrictions:
+**
+** <ul>
+** <li> The application must ensure that the 1st parameter to sqlite3_exec()
+** is a valid and open [database connection].
+** <li> The application must not close the [database connection] specified by
+** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
+** <li> The application must not modify the SQL statement text passed into
+** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
+** </ul>
+*/
+SQLITE_API int sqlite3_exec(
+ sqlite3*, /* An open database */
+ const char *sql, /* SQL to be evaluated */
+ int (*callback)(void*,int,char**,char**), /* Callback function */
+ void *, /* 1st argument to callback */
+ char **errmsg /* Error msg written here */
+);
+
+/*
+** CAPI3REF: Result Codes
+** KEYWORDS: {result code definitions}
+**
+** Many SQLite functions return an integer result code from the set shown
+** here in order to indicate success or failure.
+**
+** New error codes may be added in future versions of SQLite.
+**
+** See also: [extended result code definitions]
+*/
+#define SQLITE_OK 0 /* Successful result */
+/* beginning-of-error-codes */
+#define SQLITE_ERROR 1 /* Generic error */
+#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */
+#define SQLITE_PERM 3 /* Access permission denied */
+#define SQLITE_ABORT 4 /* Callback routine requested an abort */
+#define SQLITE_BUSY 5 /* The database file is locked */
+#define SQLITE_LOCKED 6 /* A table in the database is locked */
+#define SQLITE_NOMEM 7 /* A malloc() failed */
+#define SQLITE_READONLY 8 /* Attempt to write a readonly database */
+#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/
+#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */
+#define SQLITE_CORRUPT 11 /* The database disk image is malformed */
+#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */
+#define SQLITE_FULL 13 /* Insertion failed because database is full */
+#define SQLITE_CANTOPEN 14 /* Unable to open the database file */
+#define SQLITE_PROTOCOL 15 /* Database lock protocol error */
+#define SQLITE_EMPTY 16 /* Internal use only */
+#define SQLITE_SCHEMA 17 /* The database schema changed */
+#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */
+#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */
+#define SQLITE_MISMATCH 20 /* Data type mismatch */
+#define SQLITE_MISUSE 21 /* Library used incorrectly */
+#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */
+#define SQLITE_AUTH 23 /* Authorization denied */
+#define SQLITE_FORMAT 24 /* Not used */
+#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */
+#define SQLITE_NOTADB 26 /* File opened that is not a database file */
+#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */
+#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */
+#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
+#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
+/* end-of-error-codes */
+
+/*
+** CAPI3REF: Extended Result Codes
+** KEYWORDS: {extended result code definitions}
+**
+** In its default configuration, SQLite API routines return one of 30 integer
+** [result codes]. However, experience has shown that many of
+** these result codes are too coarse-grained. They do not provide as
+** much information about problems as programmers might like. In an effort to
+** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8]
+** and later) include
+** support for additional result codes that provide more detailed information
+** about errors. These [extended result codes] are enabled or disabled
+** on a per database connection basis using the
+** [sqlite3_extended_result_codes()] API. Or, the extended code for
+** the most recent error can be obtained using
+** [sqlite3_extended_errcode()].
+*/
+#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8))
+#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8))
+#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8))
+#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
+#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
+#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
+#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8))
+#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8))
+#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8))
+#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8))
+#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8))
+#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8))
+#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8))
+#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8))
+#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8))
+#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8))
+#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8))
+#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8))
+#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8))
+#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8))
+#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8))
+#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8))
+#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8))
+#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8))
+#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8))
+#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8))
+#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
+#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8))
+#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8))
+#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8))
+#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8))
+#define SQLITE_IOERR_BEGIN_ATOMIC (SQLITE_IOERR | (29<<8))
+#define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8))
+#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
+#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
+#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
+#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
+#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
+#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8))
+#define SQLITE_BUSY_TIMEOUT (SQLITE_BUSY | (3<<8))
+#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8))
+#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
+#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
+#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
+#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */
+#define SQLITE_CANTOPEN_SYMLINK (SQLITE_CANTOPEN | (6<<8))
+#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
+#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8))
+#define SQLITE_CORRUPT_INDEX (SQLITE_CORRUPT | (3<<8))
+#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
+#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8))
+#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8))
+#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8))
+#define SQLITE_READONLY_CANTINIT (SQLITE_READONLY | (5<<8))
+#define SQLITE_READONLY_DIRECTORY (SQLITE_READONLY | (6<<8))
+#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8))
+#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8))
+#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8))
+#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8))
+#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8))
+#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8))
+#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8))
+#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8))
+#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8))
+#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8))
+#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8))
+#define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8))
+#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
+#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
+#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
+#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
+#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))
+#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8))
+
+/*
+** CAPI3REF: Flags For File Open Operations
+**
+** These bit values are intended for use in the
+** 3rd parameter to the [sqlite3_open_v2()] interface and
+** in the 4th parameter to the [sqlite3_vfs.xOpen] method.
+*/
+#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */
+#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */
+#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */
+#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */
+#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */
+#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */
+#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */
+#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */
+#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */
+#define SQLITE_OPEN_SUPER_JOURNAL 0x00004000 /* VFS only */
+#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */
+#define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */
+
+/* Reserved: 0x00F00000 */
+/* Legacy compatibility: */
+#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */
+
+
+/*
+** CAPI3REF: Device Characteristics
+**
+** The xDeviceCharacteristics method of the [sqlite3_io_methods]
+** object returns an integer which is a vector of these
+** bit values expressing I/O characteristics of the mass storage
+** device that holds the file that the [sqlite3_io_methods]
+** refers to.
+**
+** The SQLITE_IOCAP_ATOMIC property means that all writes of
+** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
+** mean that writes of blocks that are nnn bytes in size and
+** are aligned to an address which is an integer multiple of
+** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
+** that when data is appended to a file, the data is appended
+** first then the size of the file is extended, never the other
+** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
+** information is written to disk in the same order as calls
+** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that
+** after reboot following a crash or power loss, the only bytes in a
+** file that were written at the application level might have changed
+** and that adjacent bytes, even bytes within the same sector are
+** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
+** flag indicates that a file cannot be deleted when open. The
+** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
+** read-only media and cannot be changed even by processes with
+** elevated privileges.
+**
+** The SQLITE_IOCAP_BATCH_ATOMIC property means that the underlying
+** filesystem supports doing multiple write operations atomically when those
+** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and
+** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE].
+*/
+#define SQLITE_IOCAP_ATOMIC 0x00000001
+#define SQLITE_IOCAP_ATOMIC512 0x00000002
+#define SQLITE_IOCAP_ATOMIC1K 0x00000004
+#define SQLITE_IOCAP_ATOMIC2K 0x00000008
+#define SQLITE_IOCAP_ATOMIC4K 0x00000010
+#define SQLITE_IOCAP_ATOMIC8K 0x00000020
+#define SQLITE_IOCAP_ATOMIC16K 0x00000040
+#define SQLITE_IOCAP_ATOMIC32K 0x00000080
+#define SQLITE_IOCAP_ATOMIC64K 0x00000100
+#define SQLITE_IOCAP_SAFE_APPEND 0x00000200
+#define SQLITE_IOCAP_SEQUENTIAL 0x00000400
+#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800
+#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000
+#define SQLITE_IOCAP_IMMUTABLE 0x00002000
+#define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000
+
+/*
+** CAPI3REF: File Locking Levels
+**
+** SQLite uses one of these integer values as the second
+** argument to calls it makes to the xLock() and xUnlock() methods
+** of an [sqlite3_io_methods] object.
+*/
+#define SQLITE_LOCK_NONE 0
+#define SQLITE_LOCK_SHARED 1
+#define SQLITE_LOCK_RESERVED 2
+#define SQLITE_LOCK_PENDING 3
+#define SQLITE_LOCK_EXCLUSIVE 4
+
+/*
+** CAPI3REF: Synchronization Type Flags
+**
+** When SQLite invokes the xSync() method of an
+** [sqlite3_io_methods] object it uses a combination of
+** these integer values as the second argument.
+**
+** When the SQLITE_SYNC_DATAONLY flag is used, it means that the
+** sync operation only needs to flush data to mass storage. Inode
+** information need not be flushed. If the lower four bits of the flag
+** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics.
+** If the lower four bits equal SQLITE_SYNC_FULL, that means
+** to use Mac OS X style fullsync instead of fsync().
+**
+** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags
+** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL
+** settings. The [synchronous pragma] determines when calls to the
+** xSync VFS method occur and applies uniformly across all platforms.
+** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how
+** energetic or rigorous or forceful the sync operations are and
+** only make a difference on Mac OSX for the default SQLite code.
+** (Third-party VFS implementations might also make the distinction
+** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the
+** operating systems natively supported by SQLite, only Mac OSX
+** cares about the difference.)
+*/
+#define SQLITE_SYNC_NORMAL 0x00002
+#define SQLITE_SYNC_FULL 0x00003
+#define SQLITE_SYNC_DATAONLY 0x00010
+
+/*
+** CAPI3REF: OS Interface Open File Handle
+**
+** An [sqlite3_file] object represents an open file in the
+** [sqlite3_vfs | OS interface layer]. Individual OS interface
+** implementations will
+** want to subclass this object by appending additional fields
+** for their own use. The pMethods entry is a pointer to an
+** [sqlite3_io_methods] object that defines methods for performing
+** I/O operations on the open file.
+*/
+typedef struct sqlite3_file sqlite3_file;
+struct sqlite3_file {
+ const struct sqlite3_io_methods *pMethods; /* Methods for an open file */
+};
+
+/*
+** CAPI3REF: OS Interface File Virtual Methods Object
+**
+** Every file opened by the [sqlite3_vfs.xOpen] method populates an
+** [sqlite3_file] object (or, more commonly, a subclass of the
+** [sqlite3_file] object) with a pointer to an instance of this object.
+** This object defines the methods used to perform various operations
+** against the open file represented by the [sqlite3_file] object.
+**
+** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element
+** to a non-NULL pointer, then the sqlite3_io_methods.xClose method
+** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The
+** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen]
+** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element
+** to NULL.
+**
+** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or
+** [SQLITE_SYNC_FULL]. The first choice is the normal fsync().
+** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY]
+** flag may be ORed in to indicate that only the data of the file
+** and not its inode needs to be synced.
+**
+** The integer values to xLock() and xUnlock() are one of
+** <ul>
+** <li> [SQLITE_LOCK_NONE],
+** <li> [SQLITE_LOCK_SHARED],
+** <li> [SQLITE_LOCK_RESERVED],
+** <li> [SQLITE_LOCK_PENDING], or
+** <li> [SQLITE_LOCK_EXCLUSIVE].
+** </ul>
+** xLock() increases the lock. xUnlock() decreases the lock.
+** The xCheckReservedLock() method checks whether any database connection,
+** either in this process or in some other process, is holding a RESERVED,
+** PENDING, or EXCLUSIVE lock on the file. It returns true
+** if such a lock exists and false otherwise.
+**
+** The xFileControl() method is a generic interface that allows custom
+** VFS implementations to directly control an open file using the
+** [sqlite3_file_control()] interface. The second "op" argument is an
+** integer opcode. The third argument is a generic pointer intended to
+** point to a structure that may contain arguments or space in which to
+** write return values. Potential uses for xFileControl() might be
+** functions to enable blocking locks with timeouts, to change the
+** locking strategy (for example to use dot-file locks), to inquire
+** about the status of a lock, or to break stale locks. The SQLite
+** core reserves all opcodes less than 100 for its own use.
+** A [file control opcodes | list of opcodes] less than 100 is available.
+** Applications that define a custom xFileControl method should use opcodes
+** greater than 100 to avoid conflicts. VFS implementations should
+** return [SQLITE_NOTFOUND] for file control opcodes that they do not
+** recognize.
+**
+** The xSectorSize() method returns the sector size of the
+** device that underlies the file. The sector size is the
+** minimum write that can be performed without disturbing
+** other bytes in the file. The xDeviceCharacteristics()
+** method returns a bit vector describing behaviors of the
+** underlying device:
+**
+** <ul>
+** <li> [SQLITE_IOCAP_ATOMIC]
+** <li> [SQLITE_IOCAP_ATOMIC512]
+** <li> [SQLITE_IOCAP_ATOMIC1K]
+** <li> [SQLITE_IOCAP_ATOMIC2K]
+** <li> [SQLITE_IOCAP_ATOMIC4K]
+** <li> [SQLITE_IOCAP_ATOMIC8K]
+** <li> [SQLITE_IOCAP_ATOMIC16K]
+** <li> [SQLITE_IOCAP_ATOMIC32K]
+** <li> [SQLITE_IOCAP_ATOMIC64K]
+** <li> [SQLITE_IOCAP_SAFE_APPEND]
+** <li> [SQLITE_IOCAP_SEQUENTIAL]
+** <li> [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN]
+** <li> [SQLITE_IOCAP_POWERSAFE_OVERWRITE]
+** <li> [SQLITE_IOCAP_IMMUTABLE]
+** <li> [SQLITE_IOCAP_BATCH_ATOMIC]
+** </ul>
+**
+** The SQLITE_IOCAP_ATOMIC property means that all writes of
+** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
+** mean that writes of blocks that are nnn bytes in size and
+** are aligned to an address which is an integer multiple of
+** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
+** that when data is appended to a file, the data is appended
+** first then the size of the file is extended, never the other
+** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
+** information is written to disk in the same order as calls
+** to xWrite().
+**
+** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill
+** in the unread portions of the buffer with zeros. A VFS that
+** fails to zero-fill short reads might seem to work. However,
+** failure to zero-fill short reads will eventually lead to
+** database corruption.
+*/
+typedef struct sqlite3_io_methods sqlite3_io_methods;
+struct sqlite3_io_methods {
+ int iVersion;
+ int (*xClose)(sqlite3_file*);
+ int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
+ int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
+ int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
+ int (*xSync)(sqlite3_file*, int flags);
+ int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
+ int (*xLock)(sqlite3_file*, int);
+ int (*xUnlock)(sqlite3_file*, int);
+ int (*xCheckReservedLock)(sqlite3_file*, int *pResOut);
+ int (*xFileControl)(sqlite3_file*, int op, void *pArg);
+ int (*xSectorSize)(sqlite3_file*);
+ int (*xDeviceCharacteristics)(sqlite3_file*);
+ /* Methods above are valid for version 1 */
+ int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**);
+ int (*xShmLock)(sqlite3_file*, int offset, int n, int flags);
+ void (*xShmBarrier)(sqlite3_file*);
+ int (*xShmUnmap)(sqlite3_file*, int deleteFlag);
+ /* Methods above are valid for version 2 */
+ int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
+ int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p);
+ /* Methods above are valid for version 3 */
+ /* Additional methods may be added in future releases */
+};
+
+/*
+** CAPI3REF: Standard File Control Opcodes
+** KEYWORDS: {file control opcodes} {file control opcode}
+**
+** These integer constants are opcodes for the xFileControl method
+** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()]
+** interface.
+**
+** <ul>
+** <li>[[SQLITE_FCNTL_LOCKSTATE]]
+** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This
+** opcode causes the xFileControl method to write the current state of
+** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
+** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
+** into an integer that the pArg argument points to. This capability
+** is used during testing and is only available when the SQLITE_TEST
+** compile-time option is used.
+**
+** <li>[[SQLITE_FCNTL_SIZE_HINT]]
+** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
+** layer a hint of how large the database file will grow to be during the
+** current transaction. This hint is not guaranteed to be accurate but it
+** is often close. The underlying VFS might choose to preallocate database
+** file space based on this hint in order to help writes to the database
+** file run faster.
+**
+** <li>[[SQLITE_FCNTL_SIZE_LIMIT]]
+** The [SQLITE_FCNTL_SIZE_LIMIT] opcode is used by in-memory VFS that
+** implements [sqlite3_deserialize()] to set an upper bound on the size
+** of the in-memory database. The argument is a pointer to a [sqlite3_int64].
+** If the integer pointed to is negative, then it is filled in with the
+** current limit. Otherwise the limit is set to the larger of the value
+** of the integer pointed to and the current database size. The integer
+** pointed to is set to the new limit.
+**
+** <li>[[SQLITE_FCNTL_CHUNK_SIZE]]
+** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS
+** extends and truncates the database file in chunks of a size specified
+** by the user. The fourth argument to [sqlite3_file_control()] should
+** point to an integer (type int) containing the new chunk-size to use
+** for the nominated database. Allocating database file space in large
+** chunks (say 1MB at a time), may reduce file-system fragmentation and
+** improve performance on some systems.
+**
+** <li>[[SQLITE_FCNTL_FILE_POINTER]]
+** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer
+** to the [sqlite3_file] object associated with a particular database
+** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER].
+**
+** <li>[[SQLITE_FCNTL_JOURNAL_POINTER]]
+** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer
+** to the [sqlite3_file] object associated with the journal file (either
+** the [rollback journal] or the [write-ahead log]) for a particular database
+** connection. See also [SQLITE_FCNTL_FILE_POINTER].
+**
+** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
+** No longer in use.
+**
+** <li>[[SQLITE_FCNTL_SYNC]]
+** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
+** sent to the VFS immediately before the xSync method is invoked on a
+** database file descriptor. Or, if the xSync method is not invoked
+** because the user has configured SQLite with
+** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place
+** of the xSync method. In most cases, the pointer argument passed with
+** this file-control is NULL. However, if the database file is being synced
+** as part of a multi-database commit, the argument points to a nul-terminated
+** string containing the transactions super-journal file name. VFSes that
+** do not need this signal should silently ignore this opcode. Applications
+** should not call [sqlite3_file_control()] with this opcode as doing so may
+** disrupt the operation of the specialized VFSes that do require it.
+**
+** <li>[[SQLITE_FCNTL_COMMIT_PHASETWO]]
+** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite
+** and sent to the VFS after a transaction has been committed immediately
+** but before the database is unlocked. VFSes that do not need this signal
+** should silently ignore this opcode. Applications should not call
+** [sqlite3_file_control()] with this opcode as doing so may disrupt the
+** operation of the specialized VFSes that do require it.
+**
+** <li>[[SQLITE_FCNTL_WIN32_AV_RETRY]]
+** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic
+** retry counts and intervals for certain disk I/O operations for the
+** windows [VFS] in order to provide robustness in the presence of
+** anti-virus programs. By default, the windows VFS will retry file read,
+** file write, and file delete operations up to 10 times, with a delay
+** of 25 milliseconds before the first retry and with the delay increasing
+** by an additional 25 milliseconds with each subsequent retry. This
+** opcode allows these two values (10 retries and 25 milliseconds of delay)
+** to be adjusted. The values are changed for all database connections
+** within the same process. The argument is a pointer to an array of two
+** integers where the first integer is the new retry count and the second
+** integer is the delay. If either integer is negative, then the setting
+** is not changed but instead the prior value of that setting is written
+** into the array entry, allowing the current retry settings to be
+** interrogated. The zDbName parameter is ignored.
+**
+** <li>[[SQLITE_FCNTL_PERSIST_WAL]]
+** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the
+** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary
+** write ahead log ([WAL file]) and shared memory
+** files used for transaction control
+** are automatically deleted when the latest connection to the database
+** closes. Setting persistent WAL mode causes those files to persist after
+** close. Persisting the files is useful when other processes that do not
+** have write permission on the directory containing the database file want
+** to read the database file, as the WAL and shared memory files must exist
+** in order for the database to be readable. The fourth parameter to
+** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
+** That integer is 0 to disable persistent WAL mode or 1 to enable persistent
+** WAL mode. If the integer is -1, then it is overwritten with the current
+** WAL persistence setting.
+**
+** <li>[[SQLITE_FCNTL_POWERSAFE_OVERWRITE]]
+** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the
+** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting
+** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the
+** xDeviceCharacteristics methods. The fourth parameter to
+** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
+** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage
+** mode. If the integer is -1, then it is overwritten with the current
+** zero-damage mode setting.
+**
+** <li>[[SQLITE_FCNTL_OVERWRITE]]
+** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening
+** a write transaction to indicate that, unless it is rolled back for some
+** reason, the entire database file will be overwritten by the current
+** transaction. This is used by VACUUM operations.
+**
+** <li>[[SQLITE_FCNTL_VFSNAME]]
+** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of
+** all [VFSes] in the VFS stack. The names are of all VFS shims and the
+** final bottom-level VFS are written into memory obtained from
+** [sqlite3_malloc()] and the result is stored in the char* variable
+** that the fourth parameter of [sqlite3_file_control()] points to.
+** The caller is responsible for freeing the memory when done. As with
+** all file-control actions, there is no guarantee that this will actually
+** do anything. Callers should initialize the char* variable to a NULL
+** pointer in case this file-control is not implemented. This file-control
+** is intended for diagnostic use only.
+**
+** <li>[[SQLITE_FCNTL_VFS_POINTER]]
+** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level
+** [VFSes] currently in use. ^(The argument X in
+** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be
+** of type "[sqlite3_vfs] **". This opcodes will set *X
+** to a pointer to the top-level VFS.)^
+** ^When there are multiple VFS shims in the stack, this opcode finds the
+** upper-most shim only.
+**
+** <li>[[SQLITE_FCNTL_PRAGMA]]
+** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA]
+** file control is sent to the open [sqlite3_file] object corresponding
+** to the database file to which the pragma statement refers. ^The argument
+** to the [SQLITE_FCNTL_PRAGMA] file control is an array of
+** pointers to strings (char**) in which the second element of the array
+** is the name of the pragma and the third element is the argument to the
+** pragma or NULL if the pragma has no argument. ^The handler for an
+** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element
+** of the char** argument point to a string obtained from [sqlite3_mprintf()]
+** or the equivalent and that string will become the result of the pragma or
+** the error message if the pragma fails. ^If the
+** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal
+** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA]
+** file control returns [SQLITE_OK], then the parser assumes that the
+** VFS has handled the PRAGMA itself and the parser generates a no-op
+** prepared statement if result string is NULL, or that returns a copy
+** of the result string if the string is non-NULL.
+** ^If the [SQLITE_FCNTL_PRAGMA] file control returns
+** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means
+** that the VFS encountered an error while handling the [PRAGMA] and the
+** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA]
+** file control occurs at the beginning of pragma statement analysis and so
+** it is able to override built-in [PRAGMA] statements.
+**
+** <li>[[SQLITE_FCNTL_BUSYHANDLER]]
+** ^The [SQLITE_FCNTL_BUSYHANDLER]
+** file-control may be invoked by SQLite on the database file handle
+** shortly after it is opened in order to provide a custom VFS with access
+** to the connection's busy-handler callback. The argument is of type (void**)
+** - an array of two (void *) values. The first (void *) actually points
+** to a function of type (int (*)(void *)). In order to invoke the connection's
+** busy-handler, this function should be invoked with the second (void *) in
+** the array as the only argument. If it returns non-zero, then the operation
+** should be retried. If it returns zero, the custom VFS should abandon the
+** current operation.
+**
+** <li>[[SQLITE_FCNTL_TEMPFILENAME]]
+** ^Applications can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control
+** to have SQLite generate a
+** temporary filename using the same algorithm that is followed to generate
+** temporary filenames for TEMP tables and other internal uses. The
+** argument should be a char** which will be filled with the filename
+** written into memory obtained from [sqlite3_malloc()]. The caller should
+** invoke [sqlite3_free()] on the result to avoid a memory leak.
+**
+** <li>[[SQLITE_FCNTL_MMAP_SIZE]]
+** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the
+** maximum number of bytes that will be used for memory-mapped I/O.
+** The argument is a pointer to a value of type sqlite3_int64 that
+** is an advisory maximum number of bytes in the file to memory map. The
+** pointer is overwritten with the old value. The limit is not changed if
+** the value originally pointed to is negative, and so the current limit
+** can be queried by passing in a pointer to a negative number. This
+** file-control is used internally to implement [PRAGMA mmap_size].
+**
+** <li>[[SQLITE_FCNTL_TRACE]]
+** The [SQLITE_FCNTL_TRACE] file control provides advisory information
+** to the VFS about what the higher layers of the SQLite stack are doing.
+** This file control is used by some VFS activity tracing [shims].
+** The argument is a zero-terminated string. Higher layers in the
+** SQLite stack may generate instances of this file control if
+** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled.
+**
+** <li>[[SQLITE_FCNTL_HAS_MOVED]]
+** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a
+** pointer to an integer and it writes a boolean into that integer depending
+** on whether or not the file has been renamed, moved, or deleted since it
+** was first opened.
+**
+** <li>[[SQLITE_FCNTL_WIN32_GET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the
+** underlying native file handle associated with a file handle. This file
+** control interprets its argument as a pointer to a native file handle and
+** writes the resulting value there.
+**
+** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
+** opcode causes the xFileControl method to swap the file handle with the one
+** pointed to by the pArg argument. This capability is used during testing
+** and only needs to be supported when SQLITE_TEST is defined.
+**
+** <li>[[SQLITE_FCNTL_WAL_BLOCK]]
+** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might
+** be advantageous to block on the next WAL lock if the lock is not immediately
+** available. The WAL subsystem issues this signal during rare
+** circumstances in order to fix a problem with priority inversion.
+** Applications should <em>not</em> use this file-control.
+**
+** <li>[[SQLITE_FCNTL_ZIPVFS]]
+** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other
+** VFS should return SQLITE_NOTFOUND for this opcode.
+**
+** <li>[[SQLITE_FCNTL_RBU]]
+** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by
+** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for
+** this opcode.
+**
+** <li>[[SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]]
+** If the [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] opcode returns SQLITE_OK, then
+** the file descriptor is placed in "batch write mode", which
+** means all subsequent write operations will be deferred and done
+** atomically at the next [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. Systems
+** that do not support batch atomic writes will return SQLITE_NOTFOUND.
+** ^Following a successful SQLITE_FCNTL_BEGIN_ATOMIC_WRITE and prior to
+** the closing [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] or
+** [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE], SQLite will make
+** no VFS interface calls on the same [sqlite3_file] file descriptor
+** except for calls to the xWrite method and the xFileControl method
+** with [SQLITE_FCNTL_SIZE_HINT].
+**
+** <li>[[SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]]
+** The [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] opcode causes all write
+** operations since the previous successful call to
+** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be performed atomically.
+** This file control returns [SQLITE_OK] if and only if the writes were
+** all performed successfully and have been committed to persistent storage.
+** ^Regardless of whether or not it is successful, this file control takes
+** the file descriptor out of batch write mode so that all subsequent
+** write operations are independent.
+** ^SQLite will never invoke SQLITE_FCNTL_COMMIT_ATOMIC_WRITE without
+** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE].
+**
+** <li>[[SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE]]
+** The [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE] opcode causes all write
+** operations since the previous successful call to
+** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be rolled back.
+** ^This file control takes the file descriptor out of batch write mode
+** so that all subsequent write operations are independent.
+** ^SQLite will never invoke SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE without
+** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE].
+**
+** <li>[[SQLITE_FCNTL_LOCK_TIMEOUT]]
+** The [SQLITE_FCNTL_LOCK_TIMEOUT] opcode is used to configure a VFS
+** to block for up to M milliseconds before failing when attempting to
+** obtain a file lock using the xLock or xShmLock methods of the VFS.
+** The parameter is a pointer to a 32-bit signed integer that contains
+** the value that M is to be set to. Before returning, the 32-bit signed
+** integer is overwritten with the previous value of M.
+**
+** <li>[[SQLITE_FCNTL_DATA_VERSION]]
+** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to
+** a database file. The argument is a pointer to a 32-bit unsigned integer.
+** The "data version" for the pager is written into the pointer. The
+** "data version" changes whenever any change occurs to the corresponding
+** database file, either through SQL statements on the same database
+** connection or through transactions committed by separate database
+** connections possibly in other processes. The [sqlite3_total_changes()]
+** interface can be used to find if any database on the connection has changed,
+** but that interface responds to changes on TEMP as well as MAIN and does
+** not provide a mechanism to detect changes to MAIN only. Also, the
+** [sqlite3_total_changes()] interface responds to internal changes only and
+** omits changes made by other database connections. The
+** [PRAGMA data_version] command provides a mechanism to detect changes to
+** a single attached database that occur due to other database connections,
+** but omits changes implemented by the database connection on which it is
+** called. This file control is the only mechanism to detect changes that
+** happen either internally or externally and that are associated with
+** a particular attached database.
+**
+** <li>[[SQLITE_FCNTL_CKPT_START]]
+** The [SQLITE_FCNTL_CKPT_START] opcode is invoked from within a checkpoint
+** in wal mode before the client starts to copy pages from the wal
+** file to the database file.
+**
+** <li>[[SQLITE_FCNTL_CKPT_DONE]]
+** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint
+** in wal mode after the client has finished copying pages from the wal
+** file to the database file, but before the *-shm file is updated to
+** record the fact that the pages have been checkpointed.
+** </ul>
+*/
+#define SQLITE_FCNTL_LOCKSTATE 1
+#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2
+#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3
+#define SQLITE_FCNTL_LAST_ERRNO 4
+#define SQLITE_FCNTL_SIZE_HINT 5
+#define SQLITE_FCNTL_CHUNK_SIZE 6
+#define SQLITE_FCNTL_FILE_POINTER 7
+#define SQLITE_FCNTL_SYNC_OMITTED 8
+#define SQLITE_FCNTL_WIN32_AV_RETRY 9
+#define SQLITE_FCNTL_PERSIST_WAL 10
+#define SQLITE_FCNTL_OVERWRITE 11
+#define SQLITE_FCNTL_VFSNAME 12
+#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13
+#define SQLITE_FCNTL_PRAGMA 14
+#define SQLITE_FCNTL_BUSYHANDLER 15
+#define SQLITE_FCNTL_TEMPFILENAME 16
+#define SQLITE_FCNTL_MMAP_SIZE 18
+#define SQLITE_FCNTL_TRACE 19
+#define SQLITE_FCNTL_HAS_MOVED 20
+#define SQLITE_FCNTL_SYNC 21
+#define SQLITE_FCNTL_COMMIT_PHASETWO 22
+#define SQLITE_FCNTL_WIN32_SET_HANDLE 23
+#define SQLITE_FCNTL_WAL_BLOCK 24
+#define SQLITE_FCNTL_ZIPVFS 25
+#define SQLITE_FCNTL_RBU 26
+#define SQLITE_FCNTL_VFS_POINTER 27
+#define SQLITE_FCNTL_JOURNAL_POINTER 28
+#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
+#define SQLITE_FCNTL_PDB 30
+#define SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31
+#define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32
+#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
+#define SQLITE_FCNTL_LOCK_TIMEOUT 34
+#define SQLITE_FCNTL_DATA_VERSION 35
+#define SQLITE_FCNTL_SIZE_LIMIT 36
+#define SQLITE_FCNTL_CKPT_DONE 37
+#define SQLITE_FCNTL_RESERVE_BYTES 38
+#define SQLITE_FCNTL_CKPT_START 39
+
+/* deprecated names */
+#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
+#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE
+#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO
+
+
+/*
+** CAPI3REF: Mutex Handle
+**
+** The mutex module within SQLite defines [sqlite3_mutex] to be an
+** abstract type for a mutex object. The SQLite core never looks
+** at the internal representation of an [sqlite3_mutex]. It only
+** deals with pointers to the [sqlite3_mutex] object.
+**
+** Mutexes are created using [sqlite3_mutex_alloc()].
+*/
+typedef struct sqlite3_mutex sqlite3_mutex;
+
+/*
+** CAPI3REF: Loadable Extension Thunk
+**
+** A pointer to the opaque sqlite3_api_routines structure is passed as
+** the third parameter to entry points of [loadable extensions]. This
+** structure must be typedefed in order to work around compiler warnings
+** on some platforms.
+*/
+typedef struct sqlite3_api_routines sqlite3_api_routines;
+
+/*
+** CAPI3REF: OS Interface Object
+**
+** An instance of the sqlite3_vfs object defines the interface between
+** the SQLite core and the underlying operating system. The "vfs"
+** in the name of the object stands for "virtual file system". See
+** the [VFS | VFS documentation] for further information.
+**
+** The VFS interface is sometimes extended by adding new methods onto
+** the end. Each time such an extension occurs, the iVersion field
+** is incremented. The iVersion value started out as 1 in
+** SQLite [version 3.5.0] on [dateof:3.5.0], then increased to 2
+** with SQLite [version 3.7.0] on [dateof:3.7.0], and then increased
+** to 3 with SQLite [version 3.7.6] on [dateof:3.7.6]. Additional fields
+** may be appended to the sqlite3_vfs object and the iVersion value
+** may increase again in future versions of SQLite.
+** Note that due to an oversight, the structure
+** of the sqlite3_vfs object changed in the transition from
+** SQLite [version 3.5.9] to [version 3.6.0] on [dateof:3.6.0]
+** and yet the iVersion field was not increased.
+**
+** The szOsFile field is the size of the subclassed [sqlite3_file]
+** structure used by this VFS. mxPathname is the maximum length of
+** a pathname in this VFS.
+**
+** Registered sqlite3_vfs objects are kept on a linked list formed by
+** the pNext pointer. The [sqlite3_vfs_register()]
+** and [sqlite3_vfs_unregister()] interfaces manage this list
+** in a thread-safe way. The [sqlite3_vfs_find()] interface
+** searches the list. Neither the application code nor the VFS
+** implementation should use the pNext pointer.
+**
+** The pNext field is the only field in the sqlite3_vfs
+** structure that SQLite will ever modify. SQLite will only access
+** or modify this field while holding a particular static mutex.
+** The application should never modify anything within the sqlite3_vfs
+** object once the object has been registered.
+**
+** The zName field holds the name of the VFS module. The name must
+** be unique across all VFS modules.
+**
+** [[sqlite3_vfs.xOpen]]
+** ^SQLite guarantees that the zFilename parameter to xOpen
+** is either a NULL pointer or string obtained
+** from xFullPathname() with an optional suffix added.
+** ^If a suffix is added to the zFilename parameter, it will
+** consist of a single "-" character followed by no more than
+** 11 alphanumeric and/or "-" characters.
+** ^SQLite further guarantees that
+** the string will be valid and unchanged until xClose() is
+** called. Because of the previous sentence,
+** the [sqlite3_file] can safely store a pointer to the
+** filename if it needs to remember the filename for some reason.
+** If the zFilename parameter to xOpen is a NULL pointer then xOpen
+** must invent its own temporary name for the file. ^Whenever the
+** xFilename parameter is NULL it will also be the case that the
+** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE].
+**
+** The flags argument to xOpen() includes all bits set in
+** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()]
+** or [sqlite3_open16()] is used, then flags includes at least
+** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE].
+** If xOpen() opens a file read-only then it sets *pOutFlags to
+** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set.
+**
+** ^(SQLite will also add one of the following flags to the xOpen()
+** call, depending on the object being opened:
+**
+** <ul>
+** <li> [SQLITE_OPEN_MAIN_DB]
+** <li> [SQLITE_OPEN_MAIN_JOURNAL]
+** <li> [SQLITE_OPEN_TEMP_DB]
+** <li> [SQLITE_OPEN_TEMP_JOURNAL]
+** <li> [SQLITE_OPEN_TRANSIENT_DB]
+** <li> [SQLITE_OPEN_SUBJOURNAL]
+** <li> [SQLITE_OPEN_SUPER_JOURNAL]
+** <li> [SQLITE_OPEN_WAL]
+** </ul>)^
+**
+** The file I/O implementation can use the object type flags to
+** change the way it deals with files. For example, an application
+** that does not care about crash recovery or rollback might make
+** the open of a journal file a no-op. Writes to this journal would
+** also be no-ops, and any attempt to read the journal would return
+** SQLITE_IOERR. Or the implementation might recognize that a database
+** file will be doing page-aligned sector reads and writes in a random
+** order and set up its I/O subsystem accordingly.
+**
+** SQLite might also add one of the following flags to the xOpen method:
+**
+** <ul>
+** <li> [SQLITE_OPEN_DELETEONCLOSE]
+** <li> [SQLITE_OPEN_EXCLUSIVE]
+** </ul>
+**
+** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be
+** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE]
+** will be set for TEMP databases and their journals, transient
+** databases, and subjournals.
+**
+** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction
+** with the [SQLITE_OPEN_CREATE] flag, which are both directly
+** analogous to the O_EXCL and O_CREAT flags of the POSIX open()
+** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the
+** SQLITE_OPEN_CREATE, is used to indicate that file should always
+** be created, and that it is an error if it already exists.
+** It is <i>not</i> used to indicate the file should be opened
+** for exclusive access.
+**
+** ^At least szOsFile bytes of memory are allocated by SQLite
+** to hold the [sqlite3_file] structure passed as the third
+** argument to xOpen. The xOpen method does not have to
+** allocate the structure; it should just fill it in. Note that
+** the xOpen method must set the sqlite3_file.pMethods to either
+** a valid [sqlite3_io_methods] object or to NULL. xOpen must do
+** this even if the open fails. SQLite expects that the sqlite3_file.pMethods
+** element will be valid after xOpen returns regardless of the success
+** or failure of the xOpen call.
+**
+** [[sqlite3_vfs.xAccess]]
+** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
+** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
+** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
+** to test whether a file is at least readable. The SQLITE_ACCESS_READ
+** flag is never actually used and is not implemented in the built-in
+** VFSes of SQLite. The file is named by the second argument and can be a
+** directory. The xAccess method returns [SQLITE_OK] on success or some
+** non-zero error code if there is an I/O error or if the name of
+** the file given in the second argument is illegal. If SQLITE_OK
+** is returned, then non-zero or zero is written into *pResOut to indicate
+** whether or not the file is accessible.
+**
+** ^SQLite will always allocate at least mxPathname+1 bytes for the
+** output buffer xFullPathname. The exact size of the output buffer
+** is also passed as a parameter to both methods. If the output buffer
+** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is
+** handled as a fatal error by SQLite, vfs implementations should endeavor
+** to prevent this by setting mxPathname to a sufficiently large value.
+**
+** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64()
+** interfaces are not strictly a part of the filesystem, but they are
+** included in the VFS structure for completeness.
+** The xRandomness() function attempts to return nBytes bytes
+** of good-quality randomness into zOut. The return value is
+** the actual number of bytes of randomness obtained.
+** The xSleep() method causes the calling thread to sleep for at
+** least the number of microseconds given. ^The xCurrentTime()
+** method returns a Julian Day Number for the current date and time as
+** a floating point value.
+** ^The xCurrentTimeInt64() method returns, as an integer, the Julian
+** Day Number multiplied by 86400000 (the number of milliseconds in
+** a 24-hour day).
+** ^SQLite will use the xCurrentTimeInt64() method to get the current
+** date and time if that method is available (if iVersion is 2 or
+** greater and the function pointer is not NULL) and will fall back
+** to xCurrentTime() if xCurrentTimeInt64() is unavailable.
+**
+** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces
+** are not used by the SQLite core. These optional interfaces are provided
+** by some VFSes to facilitate testing of the VFS code. By overriding
+** system calls with functions under its control, a test program can
+** simulate faults and error conditions that would otherwise be difficult
+** or impossible to induce. The set of system calls that can be overridden
+** varies from one VFS to another, and from one version of the same VFS to the
+** next. Applications that use these interfaces must be prepared for any
+** or all of these interfaces to be NULL or for their behavior to change
+** from one release to the next. Applications must not attempt to access
+** any of these methods if the iVersion of the VFS is less than 3.
+*/
+typedef struct sqlite3_vfs sqlite3_vfs;
+typedef void (*sqlite3_syscall_ptr)(void);
+struct sqlite3_vfs {
+ int iVersion; /* Structure version number (currently 3) */
+ int szOsFile; /* Size of subclassed sqlite3_file */
+ int mxPathname; /* Maximum file pathname length */
+ sqlite3_vfs *pNext; /* Next registered VFS */
+ const char *zName; /* Name of this virtual file system */
+ void *pAppData; /* Pointer to application-specific data */
+ int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
+ int flags, int *pOutFlags);
+ int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
+ int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
+ int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut);
+ void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
+ void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
+ void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void);
+ void (*xDlClose)(sqlite3_vfs*, void*);
+ int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
+ int (*xSleep)(sqlite3_vfs*, int microseconds);
+ int (*xCurrentTime)(sqlite3_vfs*, double*);
+ int (*xGetLastError)(sqlite3_vfs*, int, char *);
+ /*
+ ** The methods above are in version 1 of the sqlite_vfs object
+ ** definition. Those that follow are added in version 2 or later
+ */
+ int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*);
+ /*
+ ** The methods above are in versions 1 and 2 of the sqlite_vfs object.
+ ** Those below are for version 3 and greater.
+ */
+ int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr);
+ sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName);
+ const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName);
+ /*
+ ** The methods above are in versions 1 through 3 of the sqlite_vfs object.
+ ** New fields may be appended in future versions. The iVersion
+ ** value will increment whenever this happens.
+ */
+};
+
+/*
+** CAPI3REF: Flags for the xAccess VFS method
+**
+** These integer constants can be used as the third parameter to
+** the xAccess method of an [sqlite3_vfs] object. They determine
+** what kind of permissions the xAccess method is looking for.
+** With SQLITE_ACCESS_EXISTS, the xAccess method
+** simply checks whether the file exists.
+** With SQLITE_ACCESS_READWRITE, the xAccess method
+** checks whether the named directory is both readable and writable
+** (in other words, if files can be added, removed, and renamed within
+** the directory).
+** The SQLITE_ACCESS_READWRITE constant is currently used only by the
+** [temp_store_directory pragma], though this could change in a future
+** release of SQLite.
+** With SQLITE_ACCESS_READ, the xAccess method
+** checks whether the file is readable. The SQLITE_ACCESS_READ constant is
+** currently unused, though it might be used in a future release of
+** SQLite.
+*/
+#define SQLITE_ACCESS_EXISTS 0
+#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */
+#define SQLITE_ACCESS_READ 2 /* Unused */
+
+/*
+** CAPI3REF: Flags for the xShmLock VFS method
+**
+** These integer constants define the various locking operations
+** allowed by the xShmLock method of [sqlite3_io_methods]. The
+** following are the only legal combinations of flags to the
+** xShmLock method:
+**
+** <ul>
+** <li> SQLITE_SHM_LOCK | SQLITE_SHM_SHARED
+** <li> SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE
+** <li> SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED
+** <li> SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE
+** </ul>
+**
+** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as
+** was given on the corresponding lock.
+**
+** The xShmLock method can transition between unlocked and SHARED or
+** between unlocked and EXCLUSIVE. It cannot transition between SHARED
+** and EXCLUSIVE.
+*/
+#define SQLITE_SHM_UNLOCK 1
+#define SQLITE_SHM_LOCK 2
+#define SQLITE_SHM_SHARED 4
+#define SQLITE_SHM_EXCLUSIVE 8
+
+/*
+** CAPI3REF: Maximum xShmLock index
+**
+** The xShmLock method on [sqlite3_io_methods] may use values
+** between 0 and this upper bound as its "offset" argument.
+** The SQLite core will never attempt to acquire or release a
+** lock outside of this range
+*/
+#define SQLITE_SHM_NLOCK 8
+
+
+/*
+** CAPI3REF: Initialize The SQLite Library
+**
+** ^The sqlite3_initialize() routine initializes the
+** SQLite library. ^The sqlite3_shutdown() routine
+** deallocates any resources that were allocated by sqlite3_initialize().
+** These routines are designed to aid in process initialization and
+** shutdown on embedded systems. Workstation applications using
+** SQLite normally do not need to invoke either of these routines.
+**
+** A call to sqlite3_initialize() is an "effective" call if it is
+** the first time sqlite3_initialize() is invoked during the lifetime of
+** the process, or if it is the first time sqlite3_initialize() is invoked
+** following a call to sqlite3_shutdown(). ^(Only an effective call
+** of sqlite3_initialize() does any initialization. All other calls
+** are harmless no-ops.)^
+**
+** A call to sqlite3_shutdown() is an "effective" call if it is the first
+** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only
+** an effective call to sqlite3_shutdown() does any deinitialization.
+** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^
+**
+** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown()
+** is not. The sqlite3_shutdown() interface must only be called from a
+** single thread. All open [database connections] must be closed and all
+** other SQLite resources must be deallocated prior to invoking
+** sqlite3_shutdown().
+**
+** Among other things, ^sqlite3_initialize() will invoke
+** sqlite3_os_init(). Similarly, ^sqlite3_shutdown()
+** will invoke sqlite3_os_end().
+**
+** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success.
+** ^If for some reason, sqlite3_initialize() is unable to initialize
+** the library (perhaps it is unable to allocate a needed resource such
+** as a mutex) it returns an [error code] other than [SQLITE_OK].
+**
+** ^The sqlite3_initialize() routine is called internally by many other
+** SQLite interfaces so that an application usually does not need to
+** invoke sqlite3_initialize() directly. For example, [sqlite3_open()]
+** calls sqlite3_initialize() so the SQLite library will be automatically
+** initialized when [sqlite3_open()] is called if it has not be initialized
+** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT]
+** compile-time option, then the automatic calls to sqlite3_initialize()
+** are omitted and the application must call sqlite3_initialize() directly
+** prior to using any other SQLite interface. For maximum portability,
+** it is recommended that applications always invoke sqlite3_initialize()
+** directly prior to using any other SQLite interface. Future releases
+** of SQLite may require this. In other words, the behavior exhibited
+** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the
+** default behavior in some future release of SQLite.
+**
+** The sqlite3_os_init() routine does operating-system specific
+** initialization of the SQLite library. The sqlite3_os_end()
+** routine undoes the effect of sqlite3_os_init(). Typical tasks
+** performed by these routines include allocation or deallocation
+** of static resources, initialization of global variables,
+** setting up a default [sqlite3_vfs] module, or setting up
+** a default configuration using [sqlite3_config()].
+**
+** The application should never invoke either sqlite3_os_init()
+** or sqlite3_os_end() directly. The application should only invoke
+** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init()
+** interface is called automatically by sqlite3_initialize() and
+** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate
+** implementations for sqlite3_os_init() and sqlite3_os_end()
+** are built into SQLite when it is compiled for Unix, Windows, or OS/2.
+** When [custom builds | built for other platforms]
+** (using the [SQLITE_OS_OTHER=1] compile-time
+** option) the application must supply a suitable implementation for
+** sqlite3_os_init() and sqlite3_os_end(). An application-supplied
+** implementation of sqlite3_os_init() or sqlite3_os_end()
+** must return [SQLITE_OK] on success and some other [error code] upon
+** failure.
+*/
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
+
+/*
+** CAPI3REF: Configuring The SQLite Library
+**
+** The sqlite3_config() interface is used to make global configuration
+** changes to SQLite in order to tune SQLite to the specific needs of
+** the application. The default configuration is recommended for most
+** applications and so this routine is usually not necessary. It is
+** provided to support rare applications with unusual needs.
+**
+** <b>The sqlite3_config() interface is not threadsafe. The application
+** must ensure that no other SQLite interfaces are invoked by other
+** threads while sqlite3_config() is running.</b>
+**
+** The sqlite3_config() interface
+** may only be invoked prior to library initialization using
+** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
+** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
+** [sqlite3_shutdown()] then it will return SQLITE_MISUSE.
+** Note, however, that ^sqlite3_config() can be called as part of the
+** implementation of an application-defined [sqlite3_os_init()].
+**
+** The first argument to sqlite3_config() is an integer
+** [configuration option] that determines
+** what property of SQLite is to be configured. Subsequent arguments
+** vary depending on the [configuration option]
+** in the first argument.
+**
+** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK].
+** ^If the option is unknown or SQLite is unable to set the option
+** then this routine returns a non-zero [error code].
+*/
+SQLITE_API int sqlite3_config(int, ...);
+
+/*
+** CAPI3REF: Configure database connections
+** METHOD: sqlite3
+**
+** The sqlite3_db_config() interface is used to make configuration
+** changes to a [database connection]. The interface is similar to
+** [sqlite3_config()] except that the changes apply to a single
+** [database connection] (specified in the first argument).
+**
+** The second argument to sqlite3_db_config(D,V,...) is the
+** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code
+** that indicates what aspect of the [database connection] is being configured.
+** Subsequent arguments vary depending on the configuration verb.
+**
+** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
+** the call is considered successful.
+*/
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
+
+/*
+** CAPI3REF: Memory Allocation Routines
+**
+** An instance of this object defines the interface between SQLite
+** and low-level memory allocation routines.
+**
+** This object is used in only one place in the SQLite interface.
+** A pointer to an instance of this object is the argument to
+** [sqlite3_config()] when the configuration option is
+** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC].
+** By creating an instance of this object
+** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC])
+** during configuration, an application can specify an alternative
+** memory allocation subsystem for SQLite to use for all of its
+** dynamic memory needs.
+**
+** Note that SQLite comes with several [built-in memory allocators]
+** that are perfectly adequate for the overwhelming majority of applications
+** and that this object is only useful to a tiny minority of applications
+** with specialized memory allocation requirements. This object is
+** also used during testing of SQLite in order to specify an alternative
+** memory allocator that simulates memory out-of-memory conditions in
+** order to verify that SQLite recovers gracefully from such
+** conditions.
+**
+** The xMalloc, xRealloc, and xFree methods must work like the
+** malloc(), realloc() and free() functions from the standard C library.
+** ^SQLite guarantees that the second argument to
+** xRealloc is always a value returned by a prior call to xRoundup.
+**
+** xSize should return the allocated size of a memory allocation
+** previously obtained from xMalloc or xRealloc. The allocated size
+** is always at least as big as the requested size but may be larger.
+**
+** The xRoundup method returns what would be the allocated size of
+** a memory allocation given a particular requested size. Most memory
+** allocators round up memory allocations at least to the next multiple
+** of 8. Some allocators round up to a larger multiple or to a power of 2.
+** Every memory allocation request coming in through [sqlite3_malloc()]
+** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0,
+** that causes the corresponding memory allocation to fail.
+**
+** The xInit method initializes the memory allocator. For example,
+** it might allocate any required mutexes or initialize internal data
+** structures. The xShutdown method is invoked (indirectly) by
+** [sqlite3_shutdown()] and should deallocate any resources acquired
+** by xInit. The pAppData pointer is used as the only parameter to
+** xInit and xShutdown.
+**
+** SQLite holds the [SQLITE_MUTEX_STATIC_MAIN] mutex when it invokes
+** the xInit method, so the xInit method need not be threadsafe. The
+** xShutdown method is only called from [sqlite3_shutdown()] so it does
+** not need to be threadsafe either. For all other methods, SQLite
+** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the
+** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which
+** it is by default) and so the methods are automatically serialized.
+** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other
+** methods must be threadsafe or else make their own arrangements for
+** serialization.
+**
+** SQLite will never invoke xInit() more than once without an intervening
+** call to xShutdown().
+*/
+typedef struct sqlite3_mem_methods sqlite3_mem_methods;
+struct sqlite3_mem_methods {
+ void *(*xMalloc)(int); /* Memory allocation function */
+ void (*xFree)(void*); /* Free a prior allocation */
+ void *(*xRealloc)(void*,int); /* Resize an allocation */
+ int (*xSize)(void*); /* Return the size of an allocation */
+ int (*xRoundup)(int); /* Round up request size to allocation size */
+ int (*xInit)(void*); /* Initialize the memory allocator */
+ void (*xShutdown)(void*); /* Deinitialize the memory allocator */
+ void *pAppData; /* Argument to xInit() and xShutdown() */
+};
+
+/*
+** CAPI3REF: Configuration Options
+** KEYWORDS: {configuration option}
+**
+** These constants are the available integer configuration options that
+** can be passed as the first argument to the [sqlite3_config()] interface.
+**
+** New configuration options may be added in future releases of SQLite.
+** Existing configuration options might be discontinued. Applications
+** should check the return code from [sqlite3_config()] to make sure that
+** the call worked. The [sqlite3_config()] interface will return a
+** non-zero [error code] if a discontinued or unsupported configuration option
+** is invoked.
+**
+** <dl>
+** [[SQLITE_CONFIG_SINGLETHREAD]] <dt>SQLITE_CONFIG_SINGLETHREAD</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Single-thread. In other words, it disables
+** all mutexing and puts SQLite into a mode where it can only be used
+** by a single thread. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to change the [threading mode] from its default
+** value of Single-thread and so [sqlite3_config()] will return
+** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD
+** configuration option.</dd>
+**
+** [[SQLITE_CONFIG_MULTITHREAD]] <dt>SQLITE_CONFIG_MULTITHREAD</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Multi-thread. In other words, it disables
+** mutexing on [database connection] and [prepared statement] objects.
+** The application is responsible for serializing access to
+** [database connections] and [prepared statements]. But other mutexes
+** are enabled so that SQLite will be safe to use in a multi-threaded
+** environment as long as no two threads attempt to use the same
+** [database connection] at the same time. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to set the Multi-thread [threading mode] and
+** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
+** SQLITE_CONFIG_MULTITHREAD configuration option.</dd>
+**
+** [[SQLITE_CONFIG_SERIALIZED]] <dt>SQLITE_CONFIG_SERIALIZED</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Serialized. In other words, this option enables
+** all mutexes including the recursive
+** mutexes on [database connection] and [prepared statement] objects.
+** In this mode (which is the default when SQLite is compiled with
+** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access
+** to [database connections] and [prepared statements] so that the
+** application is free to use the same [database connection] or the
+** same [prepared statement] in different threads at the same time.
+** ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to set the Serialized [threading mode] and
+** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
+** SQLITE_CONFIG_SERIALIZED configuration option.</dd>
+**
+** [[SQLITE_CONFIG_MALLOC]] <dt>SQLITE_CONFIG_MALLOC</dt>
+** <dd> ^(The SQLITE_CONFIG_MALLOC option takes a single argument which is
+** a pointer to an instance of the [sqlite3_mem_methods] structure.
+** The argument specifies
+** alternative low-level memory allocation routines to be used in place of
+** the memory allocation routines built into SQLite.)^ ^SQLite makes
+** its own private copy of the content of the [sqlite3_mem_methods] structure
+** before the [sqlite3_config()] call returns.</dd>
+**
+** [[SQLITE_CONFIG_GETMALLOC]] <dt>SQLITE_CONFIG_GETMALLOC</dt>
+** <dd> ^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which
+** is a pointer to an instance of the [sqlite3_mem_methods] structure.
+** The [sqlite3_mem_methods]
+** structure is filled with the currently defined memory allocation routines.)^
+** This option can be used to overload the default memory allocation
+** routines with a wrapper that simulations memory allocation failure or
+** tracks memory usage, for example. </dd>
+**
+** [[SQLITE_CONFIG_SMALL_MALLOC]] <dt>SQLITE_CONFIG_SMALL_MALLOC</dt>
+** <dd> ^The SQLITE_CONFIG_SMALL_MALLOC option takes single argument of
+** type int, interpreted as a boolean, which if true provides a hint to
+** SQLite that it should avoid large memory allocations if possible.
+** SQLite will run faster if it is free to make large memory allocations,
+** but some application might prefer to run slower in exchange for
+** guarantees about memory fragmentation that are possible if large
+** allocations are avoided. This hint is normally off.
+** </dd>
+**
+** [[SQLITE_CONFIG_MEMSTATUS]] <dt>SQLITE_CONFIG_MEMSTATUS</dt>
+** <dd> ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int,
+** interpreted as a boolean, which enables or disables the collection of
+** memory allocation statistics. ^(When memory allocation statistics are
+** disabled, the following SQLite interfaces become non-operational:
+** <ul>
+** <li> [sqlite3_hard_heap_limit64()]
+** <li> [sqlite3_memory_used()]
+** <li> [sqlite3_memory_highwater()]
+** <li> [sqlite3_soft_heap_limit64()]
+** <li> [sqlite3_status64()]
+** </ul>)^
+** ^Memory allocation statistics are enabled by default unless SQLite is
+** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory
+** allocation statistics are disabled by default.
+** </dd>
+**
+** [[SQLITE_CONFIG_SCRATCH]] <dt>SQLITE_CONFIG_SCRATCH</dt>
+** <dd> The SQLITE_CONFIG_SCRATCH option is no longer used.
+** </dd>
+**
+** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
+** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool
+** that SQLite can use for the database page cache with the default page
+** cache implementation.
+** This configuration option is a no-op if an application-defined page
+** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2].
+** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
+** 8-byte aligned memory (pMem), the size of each page cache line (sz),
+** and the number of cache lines (N).
+** The sz argument should be the size of the largest database page
+** (a power of two between 512 and 65536) plus some extra bytes for each
+** page header. ^The number of extra bytes needed by the page header
+** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ].
+** ^It is harmless, apart from the wasted memory,
+** for the sz parameter to be larger than necessary. The pMem
+** argument must be either a NULL pointer or a pointer to an 8-byte
+** aligned block of memory of at least sz*N bytes, otherwise
+** subsequent behavior is undefined.
+** ^When pMem is not NULL, SQLite will strive to use the memory provided
+** to satisfy page cache needs, falling back to [sqlite3_malloc()] if
+** a page cache line is larger than sz bytes or if all of the pMem buffer
+** is exhausted.
+** ^If pMem is NULL and N is non-zero, then each database connection
+** does an initial bulk allocation for page cache memory
+** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or
+** of -1024*N bytes if N is negative, . ^If additional
+** page cache memory is needed beyond what is provided by the initial
+** allocation, then SQLite goes to [sqlite3_malloc()] separately for each
+** additional cache line. </dd>
+**
+** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
+** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer
+** that SQLite will use for all of its dynamic memory allocation needs
+** beyond those provided for by [SQLITE_CONFIG_PAGECACHE].
+** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
+** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns
+** [SQLITE_ERROR] if invoked otherwise.
+** ^There are three arguments to SQLITE_CONFIG_HEAP:
+** An 8-byte aligned pointer to the memory,
+** the number of bytes in the memory buffer, and the minimum allocation size.
+** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts
+** to using its default memory allocator (the system malloc() implementation),
+** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the
+** memory pointer is not NULL then the alternative memory
+** allocator is engaged to handle all of SQLites memory allocation needs.
+** The first pointer (the memory pointer) must be aligned to an 8-byte
+** boundary or subsequent behavior of SQLite will be undefined.
+** The minimum allocation size is capped at 2**12. Reasonable values
+** for the minimum allocation size are 2**5 through 2**8.</dd>
+**
+** [[SQLITE_CONFIG_MUTEX]] <dt>SQLITE_CONFIG_MUTEX</dt>
+** <dd> ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a
+** pointer to an instance of the [sqlite3_mutex_methods] structure.
+** The argument specifies alternative low-level mutex routines to be used
+** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of
+** the content of the [sqlite3_mutex_methods] structure before the call to
+** [sqlite3_config()] returns. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** the entire mutexing subsystem is omitted from the build and hence calls to
+** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will
+** return [SQLITE_ERROR].</dd>
+**
+** [[SQLITE_CONFIG_GETMUTEX]] <dt>SQLITE_CONFIG_GETMUTEX</dt>
+** <dd> ^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which
+** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The
+** [sqlite3_mutex_methods]
+** structure is filled with the currently defined mutex routines.)^
+** This option can be used to overload the default mutex allocation
+** routines with a wrapper used to track mutex usage for performance
+** profiling or testing, for example. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** the entire mutexing subsystem is omitted from the build and hence calls to
+** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will
+** return [SQLITE_ERROR].</dd>
+**
+** [[SQLITE_CONFIG_LOOKASIDE]] <dt>SQLITE_CONFIG_LOOKASIDE</dt>
+** <dd> ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine
+** the default size of lookaside memory on each [database connection].
+** The first argument is the
+** size of each lookaside buffer slot and the second is the number of
+** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE
+** sets the <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
+** option to [sqlite3_db_config()] can be used to change the lookaside
+** configuration on individual connections.)^ </dd>
+**
+** [[SQLITE_CONFIG_PCACHE2]] <dt>SQLITE_CONFIG_PCACHE2</dt>
+** <dd> ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is
+** a pointer to an [sqlite3_pcache_methods2] object. This object specifies
+** the interface to a custom page cache implementation.)^
+** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.</dd>
+**
+** [[SQLITE_CONFIG_GETPCACHE2]] <dt>SQLITE_CONFIG_GETPCACHE2</dt>
+** <dd> ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which
+** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of
+** the current page cache implementation into that object.)^ </dd>
+**
+** [[SQLITE_CONFIG_LOG]] <dt>SQLITE_CONFIG_LOG</dt>
+** <dd> The SQLITE_CONFIG_LOG option is used to configure the SQLite
+** global [error log].
+** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
+** function with a call signature of void(*)(void*,int,const char*),
+** and a pointer to void. ^If the function pointer is not NULL, it is
+** invoked by [sqlite3_log()] to process each logging event. ^If the
+** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op.
+** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is
+** passed through as the first parameter to the application-defined logger
+** function whenever that function is invoked. ^The second parameter to
+** the logger function is a copy of the first parameter to the corresponding
+** [sqlite3_log()] call and is intended to be a [result code] or an
+** [extended result code]. ^The third parameter passed to the logger is
+** log message after formatting via [sqlite3_snprintf()].
+** The SQLite logging interface is not reentrant; the logger function
+** supplied by the application must not invoke any SQLite interface.
+** In a multi-threaded application, the application-defined logger
+** function must be threadsafe. </dd>
+**
+** [[SQLITE_CONFIG_URI]] <dt>SQLITE_CONFIG_URI
+** <dd>^(The SQLITE_CONFIG_URI option takes a single argument of type int.
+** If non-zero, then URI handling is globally enabled. If the parameter is zero,
+** then URI handling is globally disabled.)^ ^If URI handling is globally
+** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()],
+** [sqlite3_open16()] or
+** specified as part of [ATTACH] commands are interpreted as URIs, regardless
+** of whether or not the [SQLITE_OPEN_URI] flag is set when the database
+** connection is opened. ^If it is globally disabled, filenames are
+** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the
+** database connection is opened. ^(By default, URI handling is globally
+** disabled. The default value may be changed by compiling with the
+** [SQLITE_USE_URI] symbol defined.)^
+**
+** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]] <dt>SQLITE_CONFIG_COVERING_INDEX_SCAN
+** <dd>^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer
+** argument which is interpreted as a boolean in order to enable or disable
+** the use of covering indices for full table scans in the query optimizer.
+** ^The default setting is determined
+** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on"
+** if that compile-time option is omitted.
+** The ability to disable the use of covering indices for full table scans
+** is because some incorrectly coded legacy applications might malfunction
+** when the optimization is enabled. Providing the ability to
+** disable the optimization allows the older, buggy application code to work
+** without change even with newer versions of SQLite.
+**
+** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]]
+** <dt>SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE
+** <dd> These options are obsolete and should not be used by new code.
+** They are retained for backwards compatibility but are now no-ops.
+** </dd>
+**
+** [[SQLITE_CONFIG_SQLLOG]]
+** <dt>SQLITE_CONFIG_SQLLOG
+** <dd>This option is only available if sqlite is compiled with the
+** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should
+** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int).
+** The second should be of type (void*). The callback is invoked by the library
+** in three separate circumstances, identified by the value passed as the
+** fourth parameter. If the fourth parameter is 0, then the database connection
+** passed as the second argument has just been opened. The third argument
+** points to a buffer containing the name of the main database file. If the
+** fourth parameter is 1, then the SQL statement that the third parameter
+** points to has just been executed. Or, if the fourth parameter is 2, then
+** the connection being passed as the second parameter is being closed. The
+** third parameter is passed NULL In this case. An example of using this
+** configuration option can be seen in the "test_sqllog.c" source file in
+** the canonical SQLite source tree.</dd>
+**
+** [[SQLITE_CONFIG_MMAP_SIZE]]
+** <dt>SQLITE_CONFIG_MMAP_SIZE
+** <dd>^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values
+** that are the default mmap size limit (the default setting for
+** [PRAGMA mmap_size]) and the maximum allowed mmap size limit.
+** ^The default setting can be overridden by each database connection using
+** either the [PRAGMA mmap_size] command, or by using the
+** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size
+** will be silently truncated if necessary so that it does not exceed the
+** compile-time maximum mmap size set by the
+** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^
+** ^If either argument to this option is negative, then that argument is
+** changed to its compile-time default.
+**
+** [[SQLITE_CONFIG_WIN32_HEAPSIZE]]
+** <dt>SQLITE_CONFIG_WIN32_HEAPSIZE
+** <dd>^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is
+** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro
+** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
+** that specifies the maximum size of the created heap.
+**
+** [[SQLITE_CONFIG_PCACHE_HDRSZ]]
+** <dt>SQLITE_CONFIG_PCACHE_HDRSZ
+** <dd>^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which
+** is a pointer to an integer and writes into that integer the number of extra
+** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE].
+** The amount of extra space required can change depending on the compiler,
+** target platform, and SQLite version.
+**
+** [[SQLITE_CONFIG_PMASZ]]
+** <dt>SQLITE_CONFIG_PMASZ
+** <dd>^The SQLITE_CONFIG_PMASZ option takes a single parameter which
+** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded
+** sorter to that integer. The default minimum PMA Size is set by the
+** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched
+** to help with sort operations when multithreaded sorting
+** is enabled (using the [PRAGMA threads] command) and the amount of content
+** to be sorted exceeds the page size times the minimum of the
+** [PRAGMA cache_size] setting and this value.
+**
+** [[SQLITE_CONFIG_STMTJRNL_SPILL]]
+** <dt>SQLITE_CONFIG_STMTJRNL_SPILL
+** <dd>^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which
+** becomes the [statement journal] spill-to-disk threshold.
+** [Statement journals] are held in memory until their size (in bytes)
+** exceeds this threshold, at which point they are written to disk.
+** Or if the threshold is -1, statement journals are always held
+** exclusively in memory.
+** Since many statement journals never become large, setting the spill
+** threshold to a value such as 64KiB can greatly reduce the amount of
+** I/O required to support statement rollback.
+** The default value for this setting is controlled by the
+** [SQLITE_STMTJRNL_SPILL] compile-time option.
+**
+** [[SQLITE_CONFIG_SORTERREF_SIZE]]
+** <dt>SQLITE_CONFIG_SORTERREF_SIZE
+** <dd>The SQLITE_CONFIG_SORTERREF_SIZE option accepts a single parameter
+** of type (int) - the new value of the sorter-reference size threshold.
+** Usually, when SQLite uses an external sort to order records according
+** to an ORDER BY clause, all fields required by the caller are present in the
+** sorted records. However, if SQLite determines based on the declared type
+** of a table column that its values are likely to be very large - larger
+** than the configured sorter-reference size threshold - then a reference
+** is stored in each sorted record and the required column values loaded
+** from the database as records are returned in sorted order. The default
+** value for this option is to never use this optimization. Specifying a
+** negative value for this option restores the default behaviour.
+** This option is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
+**
+** [[SQLITE_CONFIG_MEMDB_MAXSIZE]]
+** <dt>SQLITE_CONFIG_MEMDB_MAXSIZE
+** <dd>The SQLITE_CONFIG_MEMDB_MAXSIZE option accepts a single parameter
+** [sqlite3_int64] parameter which is the default maximum size for an in-memory
+** database created using [sqlite3_deserialize()]. This default maximum
+** size can be adjusted up or down for individual databases using the
+** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this
+** configuration setting is never used, then the default maximum is determined
+** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
+** compile-time option is not set, then the default maximum is 1073741824.
+** </dl>
+*/
+#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
+#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */
+#define SQLITE_CONFIG_SERIALIZED 3 /* nil */
+#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */
+#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */
+#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */
+#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */
+#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */
+#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */
+#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */
+#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */
+/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */
+#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */
+#define SQLITE_CONFIG_PCACHE 14 /* no-op */
+#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */
+#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */
+#define SQLITE_CONFIG_URI 17 /* int */
+#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */
+#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */
+#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */
+#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
+#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
+#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */
+#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */
+#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
+#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
+#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
+#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
+#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
+
+/*
+** CAPI3REF: Database Connection Configuration Options
+**
+** These constants are the available integer configuration options that
+** can be passed as the second argument to the [sqlite3_db_config()] interface.
+**
+** New configuration options may be added in future releases of SQLite.
+** Existing configuration options might be discontinued. Applications
+** should check the return code from [sqlite3_db_config()] to make sure that
+** the call worked. ^The [sqlite3_db_config()] interface will return a
+** non-zero [error code] if a discontinued or unsupported configuration option
+** is invoked.
+**
+** <dl>
+** [[SQLITE_DBCONFIG_LOOKASIDE]]
+** <dt>SQLITE_DBCONFIG_LOOKASIDE</dt>
+** <dd> ^This option takes three additional arguments that determine the
+** [lookaside memory allocator] configuration for the [database connection].
+** ^The first argument (the third parameter to [sqlite3_db_config()] is a
+** pointer to a memory buffer to use for lookaside memory.
+** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb
+** may be NULL in which case SQLite will allocate the
+** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the
+** size of each lookaside buffer slot. ^The third argument is the number of
+** slots. The size of the buffer in the first argument must be greater than
+** or equal to the product of the second and third arguments. The buffer
+** must be aligned to an 8-byte boundary. ^If the second argument to
+** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally
+** rounded down to the next smaller multiple of 8. ^(The lookaside memory
+** configuration for a database connection can only be changed when that
+** connection is not currently using lookaside memory, or in other words
+** when the "current value" returned by
+** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero.
+** Any attempt to change the lookaside memory configuration when lookaside
+** memory is in use leaves the configuration unchanged and returns
+** [SQLITE_BUSY].)^</dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_FKEY]]
+** <dt>SQLITE_DBCONFIG_ENABLE_FKEY</dt>
+** <dd> ^This option is used to enable or disable the enforcement of
+** [foreign key constraints]. There should be two additional arguments.
+** The first argument is an integer which is 0 to disable FK enforcement,
+** positive to enable FK enforcement or negative to leave FK enforcement
+** unchanged. The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether FK enforcement is off or on
+** following this call. The second parameter may be a NULL pointer, in
+** which case the FK enforcement setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_TRIGGER]]
+** <dt>SQLITE_DBCONFIG_ENABLE_TRIGGER</dt>
+** <dd> ^This option is used to enable or disable [CREATE TRIGGER | triggers].
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable triggers,
+** positive to enable triggers or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether triggers are disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the trigger setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_VIEW]]
+** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt>
+** <dd> ^This option is used to enable or disable [CREATE VIEW | views].
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable views,
+** positive to enable views or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether views are disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the view setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]]
+** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt>
+** <dd> ^This option is used to enable or disable the
+** [fts3_tokenizer()] function which is part of the
+** [FTS3] full-text search engine extension.
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable fts3_tokenizer() or
+** positive to enable fts3_tokenizer() or negative to leave the setting
+** unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the new setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]]
+** <dt>SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION</dt>
+** <dd> ^This option is used to enable or disable the [sqlite3_load_extension()]
+** interface independently of the [load_extension()] SQL function.
+** The [sqlite3_enable_load_extension()] API enables or disables both the
+** C-API [sqlite3_load_extension()] and the SQL function [load_extension()].
+** There should be two additional arguments.
+** When the first argument to this interface is 1, then only the C-API is
+** enabled and the SQL function remains disabled. If the first argument to
+** this interface is 0, then both the C-API and the SQL function are disabled.
+** If the first argument is -1, then no changes are made to state of either the
+** C-API or the SQL function.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface
+** is disabled or enabled following this call. The second parameter may
+** be a NULL pointer, in which case the new setting is not reported back.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_MAINDBNAME]] <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
+** <dd> ^This option is used to change the name of the "main" database
+** schema. ^The sole argument is a pointer to a constant UTF8 string
+** which will become the new schema name in place of "main". ^SQLite
+** does not make a copy of the new main schema name string, so the application
+** must ensure that the argument passed into this DBCONFIG option is unchanged
+** until after the database connection closes.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]]
+** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
+** <dd> Usually, when a database in wal mode is closed or detached from a
+** database handle, SQLite checks if this will mean that there are now no
+** connections at all to the database. If so, it performs a checkpoint
+** operation before closing the connection. This option may be used to
+** override this behaviour. The first parameter passed to this operation
+** is an integer - positive to disable checkpoints-on-close, or zero (the
+** default) to enable them, and negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer
+** into which is written 0 or 1 to indicate whether checkpoints-on-close
+** have been disabled - 0 if they are not disabled, 1 if they are.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_QPSG]] <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
+** <dd>^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates
+** the [query planner stability guarantee] (QPSG). When the QPSG is active,
+** a single SQL query statement will always use the same algorithm regardless
+** of values of [bound parameters].)^ The QPSG disables some query optimizations
+** that look at the values of bound parameters, which can make some queries
+** slower. But the QPSG has the advantage of more predictable behavior. With
+** the QPSG active, SQLite will always use the same query plan in the field as
+** was used during testing in the lab.
+** The first argument to this setting is an integer which is 0 to disable
+** the QPSG, positive to enable QPSG, or negative to leave the setting
+** unchanged. The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether the QPSG is disabled or enabled
+** following this call.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_TRIGGER_EQP]] <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
+** <dd> By default, the output of EXPLAIN QUERY PLAN commands does not
+** include output for any operations performed by trigger programs. This
+** option is used to set or clear (the default) a flag that governs this
+** behavior. The first parameter passed to this operation is an integer -
+** positive to enable output for trigger programs, or zero to disable it,
+** or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which is written
+** 0 or 1 to indicate whether output-for-triggers has been disabled - 0 if
+** it is not disabled, 1 if it is.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_RESET_DATABASE]] <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
+** <dd> Set the SQLITE_DBCONFIG_RESET_DATABASE flag and then run
+** [VACUUM] in order to reset a database back to an empty database
+** with no schema and no content. The following process works even for
+** a badly corrupted database file:
+** <ol>
+** <li> If the database connection is newly opened, make sure it has read the
+** database schema by preparing then discarding some query against the
+** database, or calling sqlite3_table_column_metadata(), ignoring any
+** errors. This step is only necessary if the application desires to keep
+** the database in WAL mode after the reset if it was in WAL mode before
+** the reset.
+** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0);
+** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);
+** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
+** </ol>
+** Because resetting a database is destructive and irreversible, the
+** process requires the use of this obscure API and multiple steps to help
+** ensure that it does not happen by accident.
+**
+** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
+** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
+** "defensive" flag for a database connection. When the defensive
+** flag is enabled, language features that allow ordinary SQL to
+** deliberately corrupt the database file are disabled. The disabled
+** features include but are not limited to the following:
+** <ul>
+** <li> The [PRAGMA writable_schema=ON] statement.
+** <li> The [PRAGMA journal_mode=OFF] statement.
+** <li> Writes to the [sqlite_dbpage] virtual table.
+** <li> Direct writes to [shadow tables].
+** </ul>
+** </dd>
+**
+** [[SQLITE_DBCONFIG_WRITABLE_SCHEMA]] <dt>SQLITE_DBCONFIG_WRITABLE_SCHEMA</dt>
+** <dd>The SQLITE_DBCONFIG_WRITABLE_SCHEMA option activates or deactivates the
+** "writable_schema" flag. This has the same effect and is logically equivalent
+** to setting [PRAGMA writable_schema=ON] or [PRAGMA writable_schema=OFF].
+** The first argument to this setting is an integer which is 0 to disable
+** the writable_schema, positive to enable writable_schema, or negative to
+** leave the setting unchanged. The second parameter is a pointer to an
+** integer into which is written 0 or 1 to indicate whether the writable_schema
+** is enabled or disabled following this call.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]]
+** <dt>SQLITE_DBCONFIG_LEGACY_ALTER_TABLE</dt>
+** <dd>The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates
+** the legacy behavior of the [ALTER TABLE RENAME] command such it
+** behaves as it did prior to [version 3.24.0] (2018-06-04). See the
+** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for
+** additional information. This feature can also be turned on and off
+** using the [PRAGMA legacy_alter_table] statement.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_DQS_DML]]
+** <dt>SQLITE_DBCONFIG_DQS_DML</td>
+** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates
+** the legacy [double-quoted string literal] misfeature for DML statements
+** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The
+** default value of this setting is determined by the [-DSQLITE_DQS]
+** compile-time option.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_DQS_DDL]]
+** <dt>SQLITE_DBCONFIG_DQS_DDL</td>
+** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates
+** the legacy [double-quoted string literal] misfeature for DDL statements,
+** such as CREATE TABLE and CREATE INDEX. The
+** default value of this setting is determined by the [-DSQLITE_DQS]
+** compile-time option.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]]
+** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</td>
+** <dd>The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to
+** assume that database schemas are untainted by malicious content.
+** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite
+** takes additional defensive steps to protect the application from harm
+** including:
+** <ul>
+** <li> Prohibit the use of SQL functions inside triggers, views,
+** CHECK constraints, DEFAULT clauses, expression indexes,
+** partial indexes, or generated columns
+** unless those functions are tagged with [SQLITE_INNOCUOUS].
+** <li> Prohibit the use of virtual tables inside of triggers or views
+** unless those virtual tables are tagged with [SQLITE_VTAB_INNOCUOUS].
+** </ul>
+** This setting defaults to "on" for legacy compatibility, however
+** all applications are advised to turn it off if possible. This setting
+** can also be controlled using the [PRAGMA trusted_schema] statement.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]]
+** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</td>
+** <dd>The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates
+** the legacy file format flag. When activated, this flag causes all newly
+** created database file to have a schema format version number (the 4-byte
+** integer found at offset 44 into the database header) of 1. This in turn
+** means that the resulting database file will be readable and writable by
+** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting,
+** newly created databases are generally not understandable by SQLite versions
+** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there
+** is now scarcely any need to generated database files that are compatible
+** all the way back to version 3.0.0, and so this setting is of little
+** practical use, but is provided so that SQLite can continue to claim the
+** ability to generate new database files that are compatible with version
+** 3.0.0.
+** <p>Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on,
+** the [VACUUM] command will fail with an obscure error when attempting to
+** process a table with generated columns and a descending index. This is
+** not considered a bug since SQLite versions 3.3.0 and earlier do not support
+** either generated columns or decending indexes.
+** </dd>
+** </dl>
+*/
+#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
+#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */
+#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */
+#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_QPSG 1007 /* int int* */
+#define SQLITE_DBCONFIG_TRIGGER_EQP 1008 /* int int* */
+#define SQLITE_DBCONFIG_RESET_DATABASE 1009 /* int int* */
+#define SQLITE_DBCONFIG_DEFENSIVE 1010 /* int int* */
+#define SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011 /* int int* */
+#define SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012 /* int int* */
+#define SQLITE_DBCONFIG_DQS_DML 1013 /* int int* */
+#define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */
+#define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */
+#define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */
+#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */
+
+/*
+** CAPI3REF: Enable Or Disable Extended Result Codes
+** METHOD: sqlite3
+**
+** ^The sqlite3_extended_result_codes() routine enables or disables the
+** [extended result codes] feature of SQLite. ^The extended result
+** codes are disabled by default for historical compatibility.
+*/
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
+
+/*
+** CAPI3REF: Last Insert Rowid
+** METHOD: sqlite3
+**
+** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables)
+** has a unique 64-bit signed
+** integer key called the [ROWID | "rowid"]. ^The rowid is always available
+** as an undeclared column named ROWID, OID, or _ROWID_ as long as those
+** names are not also used by explicitly declared columns. ^If
+** the table has a column of type [INTEGER PRIMARY KEY] then that column
+** is another alias for the rowid.
+**
+** ^The sqlite3_last_insert_rowid(D) interface usually returns the [rowid] of
+** the most recent successful [INSERT] into a rowid table or [virtual table]
+** on database connection D. ^Inserts into [WITHOUT ROWID] tables are not
+** recorded. ^If no successful [INSERT]s into rowid tables have ever occurred
+** on the database connection D, then sqlite3_last_insert_rowid(D) returns
+** zero.
+**
+** As well as being set automatically as rows are inserted into database
+** tables, the value returned by this function may be set explicitly by
+** [sqlite3_set_last_insert_rowid()]
+**
+** Some virtual table implementations may INSERT rows into rowid tables as
+** part of committing a transaction (e.g. to flush data accumulated in memory
+** to disk). In this case subsequent calls to this function return the rowid
+** associated with these internal INSERT operations, which leads to
+** unintuitive results. Virtual table implementations that do write to rowid
+** tables in this way can avoid this problem by restoring the original
+** rowid value using [sqlite3_set_last_insert_rowid()] before returning
+** control to the user.
+**
+** ^(If an [INSERT] occurs within a trigger then this routine will
+** return the [rowid] of the inserted row as long as the trigger is
+** running. Once the trigger program ends, the value returned
+** by this routine reverts to what it was before the trigger was fired.)^
+**
+** ^An [INSERT] that fails due to a constraint violation is not a
+** successful [INSERT] and does not change the value returned by this
+** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK,
+** and INSERT OR ABORT make no changes to the return value of this
+** routine when their insertion fails. ^(When INSERT OR REPLACE
+** encounters a constraint violation, it does not fail. The
+** INSERT continues to completion after deleting rows that caused
+** the constraint problem so INSERT OR REPLACE will always change
+** the return value of this interface.)^
+**
+** ^For the purposes of this routine, an [INSERT] is considered to
+** be successful even if it is subsequently rolled back.
+**
+** This function is accessible to SQL statements via the
+** [last_insert_rowid() SQL function].
+**
+** If a separate thread performs a new [INSERT] on the same
+** database connection while the [sqlite3_last_insert_rowid()]
+** function is running and thus changes the last insert [rowid],
+** then the value returned by [sqlite3_last_insert_rowid()] is
+** unpredictable and might not equal either the old or the new
+** last insert [rowid].
+*/
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
+
+/*
+** CAPI3REF: Set the Last Insert Rowid value.
+** METHOD: sqlite3
+**
+** The sqlite3_set_last_insert_rowid(D, R) method allows the application to
+** set the value returned by calling sqlite3_last_insert_rowid(D) to R
+** without inserting a row into the database.
+*/
+SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64);
+
+/*
+** CAPI3REF: Count The Number Of Rows Modified
+** METHOD: sqlite3
+**
+** ^This function returns the number of rows modified, inserted or
+** deleted by the most recently completed INSERT, UPDATE or DELETE
+** statement on the database connection specified by the only parameter.
+** ^Executing any other type of SQL statement does not modify the value
+** returned by this function.
+**
+** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are
+** considered - auxiliary changes caused by [CREATE TRIGGER | triggers],
+** [foreign key actions] or [REPLACE] constraint resolution are not counted.
+**
+** Changes to a view that are intercepted by
+** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value
+** returned by sqlite3_changes() immediately after an INSERT, UPDATE or
+** DELETE statement run on a view is always zero. Only changes made to real
+** tables are counted.
+**
+** Things are more complicated if the sqlite3_changes() function is
+** executed while a trigger program is running. This may happen if the
+** program uses the [changes() SQL function], or if some other callback
+** function invokes sqlite3_changes() directly. Essentially:
+**
+** <ul>
+** <li> ^(Before entering a trigger program the value returned by
+** sqlite3_changes() function is saved. After the trigger program
+** has finished, the original value is restored.)^
+**
+** <li> ^(Within a trigger program each INSERT, UPDATE and DELETE
+** statement sets the value returned by sqlite3_changes()
+** upon completion as normal. Of course, this value will not include
+** any changes performed by sub-triggers, as the sqlite3_changes()
+** value will be saved and restored after each sub-trigger has run.)^
+** </ul>
+**
+** ^This means that if the changes() SQL function (or similar) is used
+** by the first INSERT, UPDATE or DELETE statement within a trigger, it
+** returns the value as set when the calling statement began executing.
+** ^If it is used by the second or subsequent such statement within a trigger
+** program, the value returned reflects the number of rows modified by the
+** previous INSERT, UPDATE or DELETE statement within the same trigger.
+**
+** If a separate thread makes changes on the same database connection
+** while [sqlite3_changes()] is running then the value returned
+** is unpredictable and not meaningful.
+**
+** See also:
+** <ul>
+** <li> the [sqlite3_total_changes()] interface
+** <li> the [count_changes pragma]
+** <li> the [changes() SQL function]
+** <li> the [data_version pragma]
+** </ul>
+*/
+SQLITE_API int sqlite3_changes(sqlite3*);
+
+/*
+** CAPI3REF: Total Number Of Rows Modified
+** METHOD: sqlite3
+**
+** ^This function returns the total number of rows inserted, modified or
+** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed
+** since the database connection was opened, including those executed as
+** part of trigger programs. ^Executing any other type of SQL statement
+** does not affect the value returned by sqlite3_total_changes().
+**
+** ^Changes made as part of [foreign key actions] are included in the
+** count, but those made as part of REPLACE constraint resolution are
+** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
+** are not counted.
+**
+** The [sqlite3_total_changes(D)] interface only reports the number
+** of rows that changed due to SQL statement run against database
+** connection D. Any changes by other database connections are ignored.
+** To detect changes against a database file from other database
+** connections use the [PRAGMA data_version] command or the
+** [SQLITE_FCNTL_DATA_VERSION] [file control].
+**
+** If a separate thread makes changes on the same database connection
+** while [sqlite3_total_changes()] is running then the value
+** returned is unpredictable and not meaningful.
+**
+** See also:
+** <ul>
+** <li> the [sqlite3_changes()] interface
+** <li> the [count_changes pragma]
+** <li> the [changes() SQL function]
+** <li> the [data_version pragma]
+** <li> the [SQLITE_FCNTL_DATA_VERSION] [file control]
+** </ul>
+*/
+SQLITE_API int sqlite3_total_changes(sqlite3*);
+
+/*
+** CAPI3REF: Interrupt A Long-Running Query
+** METHOD: sqlite3
+**
+** ^This function causes any pending database operation to abort and
+** return at its earliest opportunity. This routine is typically
+** called in response to a user action such as pressing "Cancel"
+** or Ctrl-C where the user wants a long query operation to halt
+** immediately.
+**
+** ^It is safe to call this routine from a thread different from the
+** thread that is currently running the database operation. But it
+** is not safe to call this routine with a [database connection] that
+** is closed or might close before sqlite3_interrupt() returns.
+**
+** ^If an SQL operation is very nearly finished at the time when
+** sqlite3_interrupt() is called, then it might not have an opportunity
+** to be interrupted and might continue to completion.
+**
+** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT].
+** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE
+** that is inside an explicit transaction, then the entire transaction
+** will be rolled back automatically.
+**
+** ^The sqlite3_interrupt(D) call is in effect until all currently running
+** SQL statements on [database connection] D complete. ^Any new SQL statements
+** that are started after the sqlite3_interrupt() call and before the
+** running statement count reaches zero are interrupted as if they had been
+** running prior to the sqlite3_interrupt() call. ^New SQL statements
+** that are started after the running statement count reaches zero are
+** not effected by the sqlite3_interrupt().
+** ^A call to sqlite3_interrupt(D) that occurs when there are no running
+** SQL statements is a no-op and has no effect on SQL statements
+** that are started after the sqlite3_interrupt() call returns.
+*/
+SQLITE_API void sqlite3_interrupt(sqlite3*);
+
+/*
+** CAPI3REF: Determine If An SQL Statement Is Complete
+**
+** These routines are useful during command-line input to determine if the
+** currently entered text seems to form a complete SQL statement or
+** if additional input is needed before sending the text into
+** SQLite for parsing. ^These routines return 1 if the input string
+** appears to be a complete SQL statement. ^A statement is judged to be
+** complete if it ends with a semicolon token and is not a prefix of a
+** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within
+** string literals or quoted identifier names or comments are not
+** independent tokens (they are part of the token in which they are
+** embedded) and thus do not count as a statement terminator. ^Whitespace
+** and comments that follow the final semicolon are ignored.
+**
+** ^These routines return 0 if the statement is incomplete. ^If a
+** memory allocation fails, then SQLITE_NOMEM is returned.
+**
+** ^These routines do not parse the SQL statements thus
+** will not detect syntactically incorrect SQL.
+**
+** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior
+** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked
+** automatically by sqlite3_complete16(). If that initialization fails,
+** then the return value from sqlite3_complete16() will be non-zero
+** regardless of whether or not the input SQL is complete.)^
+**
+** The input to [sqlite3_complete()] must be a zero-terminated
+** UTF-8 string.
+**
+** The input to [sqlite3_complete16()] must be a zero-terminated
+** UTF-16 string in native byte order.
+*/
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
+
+/*
+** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
+** KEYWORDS: {busy-handler callback} {busy handler}
+** METHOD: sqlite3
+**
+** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X
+** that might be invoked with argument P whenever
+** an attempt is made to access a database table associated with
+** [database connection] D when another thread
+** or process has the table locked.
+** The sqlite3_busy_handler() interface is used to implement
+** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout].
+**
+** ^If the busy callback is NULL, then [SQLITE_BUSY]
+** is returned immediately upon encountering the lock. ^If the busy callback
+** is not NULL, then the callback might be invoked with two arguments.
+**
+** ^The first argument to the busy handler is a copy of the void* pointer which
+** is the third argument to sqlite3_busy_handler(). ^The second argument to
+** the busy handler callback is the number of times that the busy handler has
+** been invoked previously for the same locking event. ^If the
+** busy callback returns 0, then no additional attempts are made to
+** access the database and [SQLITE_BUSY] is returned
+** to the application.
+** ^If the callback returns non-zero, then another attempt
+** is made to access the database and the cycle repeats.
+**
+** The presence of a busy handler does not guarantee that it will be invoked
+** when there is lock contention. ^If SQLite determines that invoking the busy
+** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY]
+** to the application instead of invoking the
+** busy handler.
+** Consider a scenario where one process is holding a read lock that
+** it is trying to promote to a reserved lock and
+** a second process is holding a reserved lock that it is trying
+** to promote to an exclusive lock. The first process cannot proceed
+** because it is blocked by the second and the second process cannot
+** proceed because it is blocked by the first. If both processes
+** invoke the busy handlers, neither will make any progress. Therefore,
+** SQLite returns [SQLITE_BUSY] for the first process, hoping that this
+** will induce the first process to release its read lock and allow
+** the second process to proceed.
+**
+** ^The default busy callback is NULL.
+**
+** ^(There can only be a single busy handler defined for each
+** [database connection]. Setting a new busy handler clears any
+** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()]
+** or evaluating [PRAGMA busy_timeout=N] will change the
+** busy handler and thus clear any previously set busy handler.
+**
+** The busy callback should not take any actions which modify the
+** database connection that invoked the busy handler. In other words,
+** the busy handler is not reentrant. Any such actions
+** result in undefined behavior.
+**
+** A busy handler must not close the database connection
+** or [prepared statement] that invoked the busy handler.
+*/
+SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
+
+/*
+** CAPI3REF: Set A Busy Timeout
+** METHOD: sqlite3
+**
+** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps
+** for a specified amount of time when a table is locked. ^The handler
+** will sleep multiple times until at least "ms" milliseconds of sleeping
+** have accumulated. ^After at least "ms" milliseconds of sleeping,
+** the handler returns 0 which causes [sqlite3_step()] to return
+** [SQLITE_BUSY].
+**
+** ^Calling this routine with an argument less than or equal to zero
+** turns off all busy handlers.
+**
+** ^(There can only be a single busy handler for a particular
+** [database connection] at any given moment. If another busy handler
+** was defined (using [sqlite3_busy_handler()]) prior to calling
+** this routine, that other busy handler is cleared.)^
+**
+** See also: [PRAGMA busy_timeout]
+*/
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
+
+/*
+** CAPI3REF: Convenience Routines For Running Queries
+** METHOD: sqlite3
+**
+** This is a legacy interface that is preserved for backwards compatibility.
+** Use of this interface is not recommended.
+**
+** Definition: A <b>result table</b> is memory data structure created by the
+** [sqlite3_get_table()] interface. A result table records the
+** complete query results from one or more queries.
+**
+** The table conceptually has a number of rows and columns. But
+** these numbers are not part of the result table itself. These
+** numbers are obtained separately. Let N be the number of rows
+** and M be the number of columns.
+**
+** A result table is an array of pointers to zero-terminated UTF-8 strings.
+** There are (N+1)*M elements in the array. The first M pointers point
+** to zero-terminated strings that contain the names of the columns.
+** The remaining entries all point to query results. NULL values result
+** in NULL pointers. All other values are in their UTF-8 zero-terminated
+** string representation as returned by [sqlite3_column_text()].
+**
+** A result table might consist of one or more memory allocations.
+** It is not safe to pass a result table directly to [sqlite3_free()].
+** A result table should be deallocated using [sqlite3_free_table()].
+**
+** ^(As an example of the result table format, suppose a query result
+** is as follows:
+**
+** <blockquote><pre>
+** Name | Age
+** -----------------------
+** Alice | 43
+** Bob | 28
+** Cindy | 21
+** </pre></blockquote>
+**
+** There are two columns (M==2) and three rows (N==3). Thus the
+** result table has 8 entries. Suppose the result table is stored
+** in an array named azResult. Then azResult holds this content:
+**
+** <blockquote><pre>
+** azResult&#91;0] = "Name";
+** azResult&#91;1] = "Age";
+** azResult&#91;2] = "Alice";
+** azResult&#91;3] = "43";
+** azResult&#91;4] = "Bob";
+** azResult&#91;5] = "28";
+** azResult&#91;6] = "Cindy";
+** azResult&#91;7] = "21";
+** </pre></blockquote>)^
+**
+** ^The sqlite3_get_table() function evaluates one or more
+** semicolon-separated SQL statements in the zero-terminated UTF-8
+** string of its 2nd parameter and returns a result table to the
+** pointer given in its 3rd parameter.
+**
+** After the application has finished with the result from sqlite3_get_table(),
+** it must pass the result table pointer to sqlite3_free_table() in order to
+** release the memory that was malloced. Because of the way the
+** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling
+** function must not try to call [sqlite3_free()] directly. Only
+** [sqlite3_free_table()] is able to release the memory properly and safely.
+**
+** The sqlite3_get_table() interface is implemented as a wrapper around
+** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access
+** to any internal data structures of SQLite. It uses only the public
+** interface defined here. As a consequence, errors that occur in the
+** wrapper layer outside of the internal [sqlite3_exec()] call are not
+** reflected in subsequent calls to [sqlite3_errcode()] or
+** [sqlite3_errmsg()].
+*/
+SQLITE_API int sqlite3_get_table(
+ sqlite3 *db, /* An open database */
+ const char *zSql, /* SQL to be evaluated */
+ char ***pazResult, /* Results of the query */
+ int *pnRow, /* Number of result rows written here */
+ int *pnColumn, /* Number of result columns written here */
+ char **pzErrmsg /* Error msg written here */
+);
+SQLITE_API void sqlite3_free_table(char **result);
+
+/*
+** CAPI3REF: Formatted String Printing Functions
+**
+** These routines are work-alikes of the "printf()" family of functions
+** from the standard C library.
+** These routines understand most of the common formatting options from
+** the standard library printf()
+** plus some additional non-standard formats ([%q], [%Q], [%w], and [%z]).
+** See the [built-in printf()] documentation for details.
+**
+** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their
+** results into memory obtained from [sqlite3_malloc64()].
+** The strings returned by these two routines should be
+** released by [sqlite3_free()]. ^Both routines return a
+** NULL pointer if [sqlite3_malloc64()] is unable to allocate enough
+** memory to hold the resulting string.
+**
+** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from
+** the standard C library. The result is written into the
+** buffer supplied as the second parameter whose size is given by
+** the first parameter. Note that the order of the
+** first two parameters is reversed from snprintf().)^ This is an
+** historical accident that cannot be fixed without breaking
+** backwards compatibility. ^(Note also that sqlite3_snprintf()
+** returns a pointer to its buffer instead of the number of
+** characters actually written into the buffer.)^ We admit that
+** the number of characters written would be a more useful return
+** value but we cannot change the implementation of sqlite3_snprintf()
+** now without breaking compatibility.
+**
+** ^As long as the buffer size is greater than zero, sqlite3_snprintf()
+** guarantees that the buffer is always zero-terminated. ^The first
+** parameter "n" is the total size of the buffer, including space for
+** the zero terminator. So the longest string that can be completely
+** written will be n-1 characters.
+**
+** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf().
+**
+** See also: [built-in printf()], [printf() SQL function]
+*/
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
+
+/*
+** CAPI3REF: Memory Allocation Subsystem
+**
+** The SQLite core uses these three routines for all of its own
+** internal memory allocation needs. "Core" in the previous sentence
+** does not include operating-system specific [VFS] implementation. The
+** Windows VFS uses native malloc() and free() for some operations.
+**
+** ^The sqlite3_malloc() routine returns a pointer to a block
+** of memory at least N bytes in length, where N is the parameter.
+** ^If sqlite3_malloc() is unable to obtain sufficient free
+** memory, it returns a NULL pointer. ^If the parameter N to
+** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
+** a NULL pointer.
+**
+** ^The sqlite3_malloc64(N) routine works just like
+** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead
+** of a signed 32-bit integer.
+**
+** ^Calling sqlite3_free() with a pointer previously returned
+** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
+** that it might be reused. ^The sqlite3_free() routine is
+** a no-op if is called with a NULL pointer. Passing a NULL pointer
+** to sqlite3_free() is harmless. After being freed, memory
+** should neither be read nor written. Even reading previously freed
+** memory might result in a segmentation fault or other severe error.
+** Memory corruption, a segmentation fault, or other severe error
+** might result if sqlite3_free() is called with a non-NULL pointer that
+** was not obtained from sqlite3_malloc() or sqlite3_realloc().
+**
+** ^The sqlite3_realloc(X,N) interface attempts to resize a
+** prior memory allocation X to be at least N bytes.
+** ^If the X parameter to sqlite3_realloc(X,N)
+** is a NULL pointer then its behavior is identical to calling
+** sqlite3_malloc(N).
+** ^If the N parameter to sqlite3_realloc(X,N) is zero or
+** negative then the behavior is exactly the same as calling
+** sqlite3_free(X).
+** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation
+** of at least N bytes in size or NULL if insufficient memory is available.
+** ^If M is the size of the prior allocation, then min(N,M) bytes
+** of the prior allocation are copied into the beginning of buffer returned
+** by sqlite3_realloc(X,N) and the prior allocation is freed.
+** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the
+** prior allocation is not freed.
+**
+** ^The sqlite3_realloc64(X,N) interfaces works the same as
+** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead
+** of a 32-bit signed integer.
+**
+** ^If X is a memory allocation previously obtained from sqlite3_malloc(),
+** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then
+** sqlite3_msize(X) returns the size of that memory allocation in bytes.
+** ^The value returned by sqlite3_msize(X) might be larger than the number
+** of bytes requested when X was allocated. ^If X is a NULL pointer then
+** sqlite3_msize(X) returns zero. If X points to something that is not
+** the beginning of memory allocation, or if it points to a formerly
+** valid memory allocation that has now been freed, then the behavior
+** of sqlite3_msize(X) is undefined and possibly harmful.
+**
+** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(),
+** sqlite3_malloc64(), and sqlite3_realloc64()
+** is always aligned to at least an 8 byte boundary, or to a
+** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time
+** option is used.
+**
+** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()]
+** must be either NULL or else pointers obtained from a prior
+** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have
+** not yet been released.
+**
+** The application must not read or write any part of
+** a block of memory after it has been released using
+** [sqlite3_free()] or [sqlite3_realloc()].
+*/
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64);
+SQLITE_API void sqlite3_free(void*);
+SQLITE_API sqlite3_uint64 sqlite3_msize(void*);
+
+/*
+** CAPI3REF: Memory Allocator Statistics
+**
+** SQLite provides these two interfaces for reporting on the status
+** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()]
+** routines, which form the built-in memory allocation subsystem.
+**
+** ^The [sqlite3_memory_used()] routine returns the number of bytes
+** of memory currently outstanding (malloced but not freed).
+** ^The [sqlite3_memory_highwater()] routine returns the maximum
+** value of [sqlite3_memory_used()] since the high-water mark
+** was last reset. ^The values returned by [sqlite3_memory_used()] and
+** [sqlite3_memory_highwater()] include any overhead
+** added by SQLite in its implementation of [sqlite3_malloc()],
+** but not overhead added by the any underlying system library
+** routines that [sqlite3_malloc()] may call.
+**
+** ^The memory high-water mark is reset to the current value of
+** [sqlite3_memory_used()] if and only if the parameter to
+** [sqlite3_memory_highwater()] is true. ^The value returned
+** by [sqlite3_memory_highwater(1)] is the high-water mark
+** prior to the reset.
+*/
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
+
+/*
+** CAPI3REF: Pseudo-Random Number Generator
+**
+** SQLite contains a high-quality pseudo-random number generator (PRNG) used to
+** select random [ROWID | ROWIDs] when inserting new records into a table that
+** already uses the largest possible [ROWID]. The PRNG is also used for
+** the built-in random() and randomblob() SQL functions. This interface allows
+** applications to access the same PRNG for other purposes.
+**
+** ^A call to this routine stores N bytes of randomness into buffer P.
+** ^The P parameter can be a NULL pointer.
+**
+** ^If this routine has not been previously called or if the previous
+** call had N less than one or a NULL pointer for P, then the PRNG is
+** seeded using randomness obtained from the xRandomness method of
+** the default [sqlite3_vfs] object.
+** ^If the previous call to this routine had an N of 1 or more and a
+** non-NULL P then the pseudo-randomness is generated
+** internally and without recourse to the [sqlite3_vfs] xRandomness
+** method.
+*/
+SQLITE_API void sqlite3_randomness(int N, void *P);
+
+/*
+** CAPI3REF: Compile-Time Authorization Callbacks
+** METHOD: sqlite3
+** KEYWORDS: {authorizer callback}
+**
+** ^This routine registers an authorizer callback with a particular
+** [database connection], supplied in the first argument.
+** ^The authorizer callback is invoked as SQL statements are being compiled
+** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()],
+** [sqlite3_prepare_v3()], [sqlite3_prepare16()], [sqlite3_prepare16_v2()],
+** and [sqlite3_prepare16_v3()]. ^At various
+** points during the compilation process, as logic is being created
+** to perform various actions, the authorizer callback is invoked to
+** see if those actions are allowed. ^The authorizer callback should
+** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the
+** specific action but allow the SQL statement to continue to be
+** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be
+** rejected with an error. ^If the authorizer callback returns
+** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY]
+** then the [sqlite3_prepare_v2()] or equivalent call that triggered
+** the authorizer will fail with an error message.
+**
+** When the callback returns [SQLITE_OK], that means the operation
+** requested is ok. ^When the callback returns [SQLITE_DENY], the
+** [sqlite3_prepare_v2()] or equivalent call that triggered the
+** authorizer will fail with an error message explaining that
+** access is denied.
+**
+** ^The first parameter to the authorizer callback is a copy of the third
+** parameter to the sqlite3_set_authorizer() interface. ^The second parameter
+** to the callback is an integer [SQLITE_COPY | action code] that specifies
+** the particular action to be authorized. ^The third through sixth parameters
+** to the callback are either NULL pointers or zero-terminated strings
+** that contain additional details about the action to be authorized.
+** Applications must always be prepared to encounter a NULL pointer in any
+** of the third through the sixth parameters of the authorization callback.
+**
+** ^If the action code is [SQLITE_READ]
+** and the callback returns [SQLITE_IGNORE] then the
+** [prepared statement] statement is constructed to substitute
+** a NULL value in place of the table column that would have
+** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE]
+** return can be used to deny an untrusted user access to individual
+** columns of a table.
+** ^When a table is referenced by a [SELECT] but no column values are
+** extracted from that table (for example in a query like
+** "SELECT count(*) FROM tab") then the [SQLITE_READ] authorizer callback
+** is invoked once for that table with a column name that is an empty string.
+** ^If the action code is [SQLITE_DELETE] and the callback returns
+** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the
+** [truncate optimization] is disabled and all rows are deleted individually.
+**
+** An authorizer is used when [sqlite3_prepare | preparing]
+** SQL statements from an untrusted source, to ensure that the SQL statements
+** do not try to access data they are not allowed to see, or that they do not
+** try to execute malicious statements that damage the database. For
+** example, an application may allow a user to enter arbitrary
+** SQL queries for evaluation by a database. But the application does
+** not want the user to be able to make arbitrary changes to the
+** database. An authorizer could then be put in place while the
+** user-entered SQL is being [sqlite3_prepare | prepared] that
+** disallows everything except [SELECT] statements.
+**
+** Applications that need to process SQL from untrusted sources
+** might also consider lowering resource limits using [sqlite3_limit()]
+** and limiting database size using the [max_page_count] [PRAGMA]
+** in addition to using an authorizer.
+**
+** ^(Only a single authorizer can be in place on a database connection
+** at a time. Each call to sqlite3_set_authorizer overrides the
+** previous call.)^ ^Disable the authorizer by installing a NULL callback.
+** The authorizer is disabled by default.
+**
+** The authorizer callback must not do anything that will modify
+** the database connection that invoked the authorizer callback.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the
+** statement might be re-prepared during [sqlite3_step()] due to a
+** schema change. Hence, the application should ensure that the
+** correct authorizer callback remains in place during the [sqlite3_step()].
+**
+** ^Note that the authorizer callback is invoked only during
+** [sqlite3_prepare()] or its variants. Authorization is not
+** performed during statement evaluation in [sqlite3_step()], unless
+** as stated in the previous paragraph, sqlite3_step() invokes
+** sqlite3_prepare_v2() to reprepare a statement after a schema change.
+*/
+SQLITE_API int sqlite3_set_authorizer(
+ sqlite3*,
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
+ void *pUserData
+);
+
+/*
+** CAPI3REF: Authorizer Return Codes
+**
+** The [sqlite3_set_authorizer | authorizer callback function] must
+** return either [SQLITE_OK] or one of these two constants in order
+** to signal SQLite whether or not the action is permitted. See the
+** [sqlite3_set_authorizer | authorizer documentation] for additional
+** information.
+**
+** Note that SQLITE_IGNORE is also used as a [conflict resolution mode]
+** returned from the [sqlite3_vtab_on_conflict()] interface.
+*/
+#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
+#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
+
+/*
+** CAPI3REF: Authorizer Action Codes
+**
+** The [sqlite3_set_authorizer()] interface registers a callback function
+** that is invoked to authorize certain SQL statement actions. The
+** second parameter to the callback is an integer code that specifies
+** what action is being authorized. These are the integer action codes that
+** the authorizer callback may be passed.
+**
+** These action code values signify what kind of operation is to be
+** authorized. The 3rd and 4th parameters to the authorization
+** callback function will be parameters or NULL depending on which of these
+** codes is used as the second parameter. ^(The 5th parameter to the
+** authorizer callback is the name of the database ("main", "temp",
+** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback
+** is the name of the inner-most trigger or view that is responsible for
+** the access attempt or NULL if this access attempt is directly from
+** top-level SQL code.
+*/
+/******************************************* 3rd ************ 4th ***********/
+#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */
+#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */
+#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */
+#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */
+#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */
+#define SQLITE_CREATE_VIEW 8 /* View Name NULL */
+#define SQLITE_DELETE 9 /* Table Name NULL */
+#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */
+#define SQLITE_DROP_TABLE 11 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */
+#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */
+#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */
+#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */
+#define SQLITE_DROP_VIEW 17 /* View Name NULL */
+#define SQLITE_INSERT 18 /* Table Name NULL */
+#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */
+#define SQLITE_READ 20 /* Table Name Column Name */
+#define SQLITE_SELECT 21 /* NULL NULL */
+#define SQLITE_TRANSACTION 22 /* Operation NULL */
+#define SQLITE_UPDATE 23 /* Table Name Column Name */
+#define SQLITE_ATTACH 24 /* Filename NULL */
+#define SQLITE_DETACH 25 /* Database Name NULL */
+#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */
+#define SQLITE_REINDEX 27 /* Index Name NULL */
+#define SQLITE_ANALYZE 28 /* Table Name NULL */
+#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */
+#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */
+#define SQLITE_FUNCTION 31 /* NULL Function Name */
+#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */
+#define SQLITE_COPY 0 /* No longer used */
+#define SQLITE_RECURSIVE 33 /* NULL NULL */
+
+/*
+** CAPI3REF: Tracing And Profiling Functions
+** METHOD: sqlite3
+**
+** These routines are deprecated. Use the [sqlite3_trace_v2()] interface
+** instead of the routines described here.
+**
+** These routines register callback functions that can be used for
+** tracing and profiling the execution of SQL statements.
+**
+** ^The callback function registered by sqlite3_trace() is invoked at
+** various times when an SQL statement is being run by [sqlite3_step()].
+** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the
+** SQL statement text as the statement first begins executing.
+** ^(Additional sqlite3_trace() callbacks might occur
+** as each triggered subprogram is entered. The callbacks for triggers
+** contain a UTF-8 SQL comment that identifies the trigger.)^
+**
+** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit
+** the length of [bound parameter] expansion in the output of sqlite3_trace().
+**
+** ^The callback function registered by sqlite3_profile() is invoked
+** as each SQL statement finishes. ^The profile callback contains
+** the original statement text and an estimate of wall-clock time
+** of how long that statement took to run. ^The profile callback
+** time is in units of nanoseconds, however the current implementation
+** is only capable of millisecond resolution so the six least significant
+** digits in the time are meaningless. Future versions of SQLite
+** might provide greater resolution on the profiler callback. Invoking
+** either [sqlite3_trace()] or [sqlite3_trace_v2()] will cancel the
+** profile callback.
+*/
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
+ void(*xTrace)(void*,const char*), void*);
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
+ void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
+
+/*
+** CAPI3REF: SQL Trace Event Codes
+** KEYWORDS: SQLITE_TRACE
+**
+** These constants identify classes of events that can be monitored
+** using the [sqlite3_trace_v2()] tracing logic. The M argument
+** to [sqlite3_trace_v2(D,M,X,P)] is an OR-ed combination of one or more of
+** the following constants. ^The first argument to the trace callback
+** is one of the following constants.
+**
+** New tracing constants may be added in future releases.
+**
+** ^A trace callback has four arguments: xCallback(T,C,P,X).
+** ^The T argument is one of the integer type codes above.
+** ^The C argument is a copy of the context pointer passed in as the
+** fourth argument to [sqlite3_trace_v2()].
+** The P and X arguments are pointers whose meanings depend on T.
+**
+** <dl>
+** [[SQLITE_TRACE_STMT]] <dt>SQLITE_TRACE_STMT</dt>
+** <dd>^An SQLITE_TRACE_STMT callback is invoked when a prepared statement
+** first begins running and possibly at other times during the
+** execution of the prepared statement, such as at the start of each
+** trigger subprogram. ^The P argument is a pointer to the
+** [prepared statement]. ^The X argument is a pointer to a string which
+** is the unexpanded SQL text of the prepared statement or an SQL comment
+** that indicates the invocation of a trigger. ^The callback can compute
+** the same text that would have been returned by the legacy [sqlite3_trace()]
+** interface by using the X argument when X begins with "--" and invoking
+** [sqlite3_expanded_sql(P)] otherwise.
+**
+** [[SQLITE_TRACE_PROFILE]] <dt>SQLITE_TRACE_PROFILE</dt>
+** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same
+** information as is provided by the [sqlite3_profile()] callback.
+** ^The P argument is a pointer to the [prepared statement] and the
+** X argument points to a 64-bit integer which is the estimated of
+** the number of nanosecond that the prepared statement took to run.
+** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes.
+**
+** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>
+** <dd>^An SQLITE_TRACE_ROW callback is invoked whenever a prepared
+** statement generates a single row of result.
+** ^The P argument is a pointer to the [prepared statement] and the
+** X argument is unused.
+**
+** [[SQLITE_TRACE_CLOSE]] <dt>SQLITE_TRACE_CLOSE</dt>
+** <dd>^An SQLITE_TRACE_CLOSE callback is invoked when a database
+** connection closes.
+** ^The P argument is a pointer to the [database connection] object
+** and the X argument is unused.
+** </dl>
+*/
+#define SQLITE_TRACE_STMT 0x01
+#define SQLITE_TRACE_PROFILE 0x02
+#define SQLITE_TRACE_ROW 0x04
+#define SQLITE_TRACE_CLOSE 0x08
+
+/*
+** CAPI3REF: SQL Trace Hook
+** METHOD: sqlite3
+**
+** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback
+** function X against [database connection] D, using property mask M
+** and context pointer P. ^If the X callback is
+** NULL or if the M mask is zero, then tracing is disabled. The
+** M argument should be the bitwise OR-ed combination of
+** zero or more [SQLITE_TRACE] constants.
+**
+** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
+** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+**
+** ^The X callback is invoked whenever any of the events identified by
+** mask M occur. ^The integer return value from the callback is currently
+** ignored, though this may change in future releases. Callback
+** implementations should return zero to ensure future compatibility.
+**
+** ^A trace callback is invoked with four arguments: callback(T,C,P,X).
+** ^The T argument is one of the [SQLITE_TRACE]
+** constants to indicate why the callback was invoked.
+** ^The C argument is a copy of the context pointer.
+** The P and X arguments are pointers whose meanings depend on T.
+**
+** The sqlite3_trace_v2() interface is intended to replace the legacy
+** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which
+** are deprecated.
+*/
+SQLITE_API int sqlite3_trace_v2(
+ sqlite3*,
+ unsigned uMask,
+ int(*xCallback)(unsigned,void*,void*,void*),
+ void *pCtx
+);
+
+/*
+** CAPI3REF: Query Progress Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
+** function X to be invoked periodically during long running calls to
+** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for
+** database connection D. An example use for this
+** interface is to keep a GUI updated during a large query.
+**
+** ^The parameter P is passed through as the only parameter to the
+** callback function X. ^The parameter N is the approximate number of
+** [virtual machine instructions] that are evaluated between successive
+** invocations of the callback X. ^If N is less than one then the progress
+** handler is disabled.
+**
+** ^Only a single progress handler may be defined at one time per
+** [database connection]; setting a new progress handler cancels the
+** old one. ^Setting parameter X to NULL disables the progress handler.
+** ^The progress handler is also disabled by setting N to a value less
+** than 1.
+**
+** ^If the progress callback returns non-zero, the operation is
+** interrupted. This feature can be used to implement a
+** "Cancel" button on a GUI progress dialog box.
+**
+** The progress handler callback must not do anything that will modify
+** the database connection that invoked the progress handler.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+*/
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+
+/*
+** CAPI3REF: Opening A New Database Connection
+** CONSTRUCTOR: sqlite3
+**
+** ^These routines open an SQLite database file as specified by the
+** filename argument. ^The filename argument is interpreted as UTF-8 for
+** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte
+** order for sqlite3_open16(). ^(A [database connection] handle is usually
+** returned in *ppDb, even if an error occurs. The only exception is that
+** if SQLite is unable to allocate memory to hold the [sqlite3] object,
+** a NULL will be written into *ppDb instead of a pointer to the [sqlite3]
+** object.)^ ^(If the database is opened (and/or created) successfully, then
+** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The
+** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain
+** an English language description of the error following a failure of any
+** of the sqlite3_open() routines.
+**
+** ^The default encoding will be UTF-8 for databases created using
+** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases
+** created using sqlite3_open16() will be UTF-16 in the native byte order.
+**
+** Whether or not an error occurs when it is opened, resources
+** associated with the [database connection] handle should be released by
+** passing it to [sqlite3_close()] when it is no longer required.
+**
+** The sqlite3_open_v2() interface works like sqlite3_open()
+** except that it accepts two additional parameters for additional control
+** over the new database connection. ^(The flags parameter to
+** sqlite3_open_v2() must include, at a minimum, one of the following
+** three flag combinations:)^
+**
+** <dl>
+** ^(<dt>[SQLITE_OPEN_READONLY]</dt>
+** <dd>The database is opened in read-only mode. If the database does not
+** already exist, an error is returned.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_READWRITE]</dt>
+** <dd>The database is opened for reading and writing if possible, or reading
+** only if the file is write protected by the operating system. In either
+** case the database must already exist, otherwise an error is returned.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt>
+** <dd>The database is opened for reading and writing, and is created if
+** it does not already exist. This is the behavior that is always used for
+** sqlite3_open() and sqlite3_open16().</dd>)^
+** </dl>
+**
+** In addition to the required flags, the following optional flags are
+** also supported:
+**
+** <dl>
+** ^(<dt>[SQLITE_OPEN_URI]</dt>
+** <dd>The filename can be interpreted as a URI if this flag is set.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_MEMORY]</dt>
+** <dd>The database will be opened as an in-memory database. The database
+** is named by the "filename" argument for the purposes of cache-sharing,
+** if shared cache mode is enabled, but the "filename" is otherwise ignored.
+** </dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_NOMUTEX]</dt>
+** <dd>The new database connection will use the "multi-thread"
+** [threading mode].)^ This means that separate threads are allowed
+** to use SQLite at the same time, as long as each thread is using
+** a different [database connection].
+**
+** ^(<dt>[SQLITE_OPEN_FULLMUTEX]</dt>
+** <dd>The new database connection will use the "serialized"
+** [threading mode].)^ This means the multiple threads can safely
+** attempt to use the same database connection at the same time.
+** (Mutexes will block any actual concurrency, but in this mode
+** there is no harm in trying.)
+**
+** ^(<dt>[SQLITE_OPEN_SHAREDCACHE]</dt>
+** <dd>The database is opened [shared cache] enabled, overriding
+** the default shared cache setting provided by
+** [sqlite3_enable_shared_cache()].)^
+**
+** ^(<dt>[SQLITE_OPEN_PRIVATECACHE]</dt>
+** <dd>The database is opened [shared cache] disabled, overriding
+** the default shared cache setting provided by
+** [sqlite3_enable_shared_cache()].)^
+**
+** [[OPEN_NOFOLLOW]] ^(<dt>[SQLITE_OPEN_NOFOLLOW]</dt>
+** <dd>The database filename is not allowed to be a symbolic link</dd>
+** </dl>)^
+**
+** If the 3rd parameter to sqlite3_open_v2() is not one of the
+** required combinations shown above optionally combined with other
+** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits]
+** then the behavior is undefined.
+**
+** ^The fourth parameter to sqlite3_open_v2() is the name of the
+** [sqlite3_vfs] object that defines the operating system interface that
+** the new database connection should use. ^If the fourth parameter is
+** a NULL pointer then the default [sqlite3_vfs] object is used.
+**
+** ^If the filename is ":memory:", then a private, temporary in-memory database
+** is created for the connection. ^This in-memory database will vanish when
+** the database connection is closed. Future versions of SQLite might
+** make use of additional special filenames that begin with the ":" character.
+** It is recommended that when a database filename actually does begin with
+** a ":" character you should prefix the filename with a pathname such as
+** "./" to avoid ambiguity.
+**
+** ^If the filename is an empty string, then a private, temporary
+** on-disk database will be created. ^This private database will be
+** automatically deleted as soon as the database connection is closed.
+**
+** [[URI filenames in sqlite3_open()]] <h3>URI Filenames</h3>
+**
+** ^If [URI filename] interpretation is enabled, and the filename argument
+** begins with "file:", then the filename is interpreted as a URI. ^URI
+** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is
+** set in the third argument to sqlite3_open_v2(), or if it has
+** been enabled globally using the [SQLITE_CONFIG_URI] option with the
+** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option.
+** URI filename interpretation is turned off
+** by default, but future releases of SQLite might enable URI filename
+** interpretation by default. See "[URI filenames]" for additional
+** information.
+**
+** URI filenames are parsed according to RFC 3986. ^If the URI contains an
+** authority, then it must be either an empty string or the string
+** "localhost". ^If the authority is not an empty string or "localhost", an
+** error is returned to the caller. ^The fragment component of a URI, if
+** present, is ignored.
+**
+** ^SQLite uses the path component of the URI as the name of the disk file
+** which contains the database. ^If the path begins with a '/' character,
+** then it is interpreted as an absolute path. ^If the path does not begin
+** with a '/' (meaning that the authority section is omitted from the URI)
+** then the path is interpreted as a relative path.
+** ^(On windows, the first component of an absolute path
+** is a drive specification (e.g. "C:").)^
+**
+** [[core URI query parameters]]
+** The query component of a URI may contain parameters that are interpreted
+** either by SQLite itself, or by a [VFS | custom VFS implementation].
+** SQLite and its built-in [VFSes] interpret the
+** following query parameters:
+**
+** <ul>
+** <li> <b>vfs</b>: ^The "vfs" parameter may be used to specify the name of
+** a VFS object that provides the operating system interface that should
+** be used to access the database file on disk. ^If this option is set to
+** an empty string the default VFS object is used. ^Specifying an unknown
+** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is
+** present, then the VFS specified by the option takes precedence over
+** the value passed as the fourth parameter to sqlite3_open_v2().
+**
+** <li> <b>mode</b>: ^(The mode parameter may be set to either "ro", "rw",
+** "rwc", or "memory". Attempting to set it to any other value is
+** an error)^.
+** ^If "ro" is specified, then the database is opened for read-only
+** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the
+** third argument to sqlite3_open_v2(). ^If the mode option is set to
+** "rw", then the database is opened for read-write (but not create)
+** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had
+** been set. ^Value "rwc" is equivalent to setting both
+** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is
+** set to "memory" then a pure [in-memory database] that never reads
+** or writes from disk is used. ^It is an error to specify a value for
+** the mode parameter that is less restrictive than that specified by
+** the flags passed in the third parameter to sqlite3_open_v2().
+**
+** <li> <b>cache</b>: ^The cache parameter may be set to either "shared" or
+** "private". ^Setting it to "shared" is equivalent to setting the
+** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to
+** sqlite3_open_v2(). ^Setting the cache parameter to "private" is
+** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit.
+** ^If sqlite3_open_v2() is used and the "cache" parameter is present in
+** a URI filename, its value overrides any behavior requested by setting
+** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag.
+**
+** <li> <b>psow</b>: ^The psow parameter indicates whether or not the
+** [powersafe overwrite] property does or does not apply to the
+** storage media on which the database file resides.
+**
+** <li> <b>nolock</b>: ^The nolock parameter is a boolean query parameter
+** which if set disables file locking in rollback journal modes. This
+** is useful for accessing a database on a filesystem that does not
+** support locking. Caution: Database corruption might result if two
+** or more processes write to the same database and any one of those
+** processes uses nolock=1.
+**
+** <li> <b>immutable</b>: ^The immutable parameter is a boolean query
+** parameter that indicates that the database file is stored on
+** read-only media. ^When immutable is set, SQLite assumes that the
+** database file cannot be changed, even by a process with higher
+** privilege, and so the database is opened read-only and all locking
+** and change detection is disabled. Caution: Setting the immutable
+** property on a database file that does in fact change can result
+** in incorrect query results and/or [SQLITE_CORRUPT] errors.
+** See also: [SQLITE_IOCAP_IMMUTABLE].
+**
+** </ul>
+**
+** ^Specifying an unknown parameter in the query component of a URI is not an
+** error. Future versions of SQLite might understand additional query
+** parameters. See "[query parameters with special meaning to SQLite]" for
+** additional information.
+**
+** [[URI filename examples]] <h3>URI filename examples</h3>
+**
+** <table border="1" align=center cellpadding=5>
+** <tr><th> URI filenames <th> Results
+** <tr><td> file:data.db <td>
+** Open the file "data.db" in the current directory.
+** <tr><td> file:/home/fred/data.db<br>
+** file:///home/fred/data.db <br>
+** file://localhost/home/fred/data.db <br> <td>
+** Open the database file "/home/fred/data.db".
+** <tr><td> file://darkstar/home/fred/data.db <td>
+** An error. "darkstar" is not a recognized authority.
+** <tr><td style="white-space:nowrap">
+** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db
+** <td> Windows only: Open the file "data.db" on fred's desktop on drive
+** C:. Note that the %20 escaping in this example is not strictly
+** necessary - space characters can be used literally
+** in URI filenames.
+** <tr><td> file:data.db?mode=ro&cache=private <td>
+** Open file "data.db" in the current directory for read-only access.
+** Regardless of whether or not shared-cache mode is enabled by
+** default, use a private cache.
+** <tr><td> file:/home/fred/data.db?vfs=unix-dotfile <td>
+** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile"
+** that uses dot-files in place of posix advisory locking.
+** <tr><td> file:data.db?mode=readonly <td>
+** An error. "readonly" is not a valid option for the "mode" parameter.
+** </table>
+**
+** ^URI hexadecimal escape sequences (%HH) are supported within the path and
+** query components of a URI. A hexadecimal escape sequence consists of a
+** percent sign - "%" - followed by exactly two hexadecimal digits
+** specifying an octet value. ^Before the path or query components of a
+** URI filename are interpreted, they are encoded using UTF-8 and all
+** hexadecimal escape sequences replaced by a single byte containing the
+** corresponding octet. If this process generates an invalid UTF-8 encoding,
+** the results are undefined.
+**
+** <b>Note to Windows users:</b> The encoding used for the filename argument
+** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever
+** codepage is currently defined. Filenames containing international
+** characters must be converted to UTF-8 prior to passing them into
+** sqlite3_open() or sqlite3_open_v2().
+**
+** <b>Note to Windows Runtime users:</b> The temporary directory must be set
+** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various
+** features that require the use of temporary files may fail.
+**
+** See also: [sqlite3_temp_directory]
+*/
+SQLITE_API int sqlite3_open(
+ const char *filename, /* Database filename (UTF-8) */
+ sqlite3 **ppDb /* OUT: SQLite db handle */
+);
+SQLITE_API int sqlite3_open16(
+ const void *filename, /* Database filename (UTF-16) */
+ sqlite3 **ppDb /* OUT: SQLite db handle */
+);
+SQLITE_API int sqlite3_open_v2(
+ const char *filename, /* Database filename (UTF-8) */
+ sqlite3 **ppDb, /* OUT: SQLite db handle */
+ int flags, /* Flags */
+ const char *zVfs /* Name of VFS module to use */
+);
+
+/*
+** CAPI3REF: Obtain Values For URI Parameters
+**
+** These are utility routines, useful to [VFS|custom VFS implementations],
+** that check if a database file was a URI that contained a specific query
+** parameter, and if so obtains the value of that query parameter.
+**
+** The first parameter to these interfaces (hereafter referred to
+** as F) must be one of:
+** <ul>
+** <li> A database filename pointer created by the SQLite core and
+** passed into the xOpen() method of a VFS implemention, or
+** <li> A filename obtained from [sqlite3_db_filename()], or
+** <li> A new filename constructed using [sqlite3_create_filename()].
+** </ul>
+** If the F parameter is not one of the above, then the behavior is
+** undefined and probably undesirable. Older versions of SQLite were
+** more tolerant of invalid F parameters than newer versions.
+**
+** If F is a suitable filename (as described in the previous paragraph)
+** and if P is the name of the query parameter, then
+** sqlite3_uri_parameter(F,P) returns the value of the P
+** parameter if it exists or a NULL pointer if P does not appear as a
+** query parameter on F. If P is a query parameter of F and it
+** has no explicit value, then sqlite3_uri_parameter(F,P) returns
+** a pointer to an empty string.
+**
+** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean
+** parameter and returns true (1) or false (0) according to the value
+** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the
+** value of query parameter P is one of "yes", "true", or "on" in any
+** case or if the value begins with a non-zero number. The
+** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of
+** query parameter P is one of "no", "false", or "off" in any case or
+** if the value begins with a numeric zero. If P is not a query
+** parameter on F or if the value of P does not match any of the
+** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0).
+**
+** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a
+** 64-bit signed integer and returns that integer, or D if P does not
+** exist. If the value of P is something other than an integer, then
+** zero is returned.
+**
+** The sqlite3_uri_key(F,N) returns a pointer to the name (not
+** the value) of the N-th query parameter for filename F, or a NULL
+** pointer if N is less than zero or greater than the number of query
+** parameters minus 1. The N value is zero-based so N should be 0 to obtain
+** the name of the first query parameter, 1 for the second parameter, and
+** so forth.
+**
+** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and
+** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and
+** is not a database file pathname pointer that the SQLite core passed
+** into the xOpen VFS method, then the behavior of this routine is undefined
+** and probably undesirable.
+**
+** Beginning with SQLite [version 3.31.0] ([dateof:3.31.0]) the input F
+** parameter can also be the name of a rollback journal file or WAL file
+** in addition to the main database file. Prior to version 3.31.0, these
+** routines would only work if F was the name of the main database file.
+** When the F parameter is the name of the rollback journal or WAL file,
+** it has access to all the same query parameters as were found on the
+** main database file.
+**
+** See the [URI filename] documentation for additional information.
+*/
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N);
+
+/*
+** CAPI3REF: Translate filenames
+**
+** These routines are available to [VFS|custom VFS implementations] for
+** translating filenames between the main database file, the journal file,
+** and the WAL file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** passed by the SQLite core into the VFS, then sqlite3_filename_database(F)
+** returns the name of the corresponding database file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** passed by the SQLite core into the VFS, or if F is a database filename
+** obtained from [sqlite3_db_filename()], then sqlite3_filename_journal(F)
+** returns the name of the corresponding rollback journal file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** that was passed by the SQLite core into the VFS, or if F is a database
+** filename obtained from [sqlite3_db_filename()], then
+** sqlite3_filename_wal(F) returns the name of the corresponding
+** WAL file.
+**
+** In all of the above, if F is not the name of a database, journal or WAL
+** filename passed into the VFS from the SQLite core and F is not the
+** return value from [sqlite3_db_filename()], then the result is
+** undefined and is likely a memory access violation.
+*/
+SQLITE_API const char *sqlite3_filename_database(const char*);
+SQLITE_API const char *sqlite3_filename_journal(const char*);
+SQLITE_API const char *sqlite3_filename_wal(const char*);
+
+/*
+** CAPI3REF: Database File Corresponding To A Journal
+**
+** ^If X is the name of a rollback or WAL-mode journal file that is
+** passed into the xOpen method of [sqlite3_vfs], then
+** sqlite3_database_file_object(X) returns a pointer to the [sqlite3_file]
+** object that represents the main database file.
+**
+** This routine is intended for use in custom [VFS] implementations
+** only. It is not a general-purpose interface.
+** The argument sqlite3_file_object(X) must be a filename pointer that
+** has been passed into [sqlite3_vfs].xOpen method where the
+** flags parameter to xOpen contains one of the bits
+** [SQLITE_OPEN_MAIN_JOURNAL] or [SQLITE_OPEN_WAL]. Any other use
+** of this routine results in undefined and probably undesirable
+** behavior.
+*/
+SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
+
+/*
+** CAPI3REF: Create and Destroy VFS Filenames
+**
+** These interfces are provided for use by [VFS shim] implementations and
+** are not useful outside of that context.
+**
+** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
+** database filename D with corresponding journal file J and WAL file W and
+** with N URI parameters key/values pairs in the array P. The result from
+** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that
+** is safe to pass to routines like:
+** <ul>
+** <li> [sqlite3_uri_parameter()],
+** <li> [sqlite3_uri_boolean()],
+** <li> [sqlite3_uri_int64()],
+** <li> [sqlite3_uri_key()],
+** <li> [sqlite3_filename_database()],
+** <li> [sqlite3_filename_journal()], or
+** <li> [sqlite3_filename_wal()].
+** </ul>
+** If a memory allocation error occurs, sqlite3_create_filename() might
+** return a NULL pointer. The memory obtained from sqlite3_create_filename(X)
+** must be released by a corresponding call to sqlite3_free_filename(Y).
+**
+** The P parameter in sqlite3_create_filename(D,J,W,N,P) should be an array
+** of 2*N pointers to strings. Each pair of pointers in this array corresponds
+** to a key and value for a query parameter. The P parameter may be a NULL
+** pointer if N is zero. None of the 2*N pointers in the P array may be
+** NULL pointers and key pointers should not be empty strings.
+** None of the D, J, or W parameters to sqlite3_create_filename(D,J,W,N,P) may
+** be NULL pointers, though they can be empty strings.
+**
+** The sqlite3_free_filename(Y) routine releases a memory allocation
+** previously obtained from sqlite3_create_filename(). Invoking
+** sqlite3_free_filename(Y) where Y is a NULL pointer is a harmless no-op.
+**
+** If the Y parameter to sqlite3_free_filename(Y) is anything other
+** than a NULL pointer or a pointer previously acquired from
+** sqlite3_create_filename(), then bad things such as heap
+** corruption or segfaults may occur. The value Y should be
+** used again after sqlite3_free_filename(Y) has been called. This means
+** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y,
+** then the corresponding [sqlite3_module.xClose() method should also be
+** invoked prior to calling sqlite3_free_filename(Y).
+*/
+SQLITE_API char *sqlite3_create_filename(
+ const char *zDatabase,
+ const char *zJournal,
+ const char *zWal,
+ int nParam,
+ const char **azParam
+);
+SQLITE_API void sqlite3_free_filename(char*);
+
+/*
+** CAPI3REF: Error Codes And Messages
+** METHOD: sqlite3
+**
+** ^If the most recent sqlite3_* API call associated with
+** [database connection] D failed, then the sqlite3_errcode(D) interface
+** returns the numeric [result code] or [extended result code] for that
+** API call.
+** ^The sqlite3_extended_errcode()
+** interface is the same except that it always returns the
+** [extended result code] even when extended result codes are
+** disabled.
+**
+** The values returned by sqlite3_errcode() and/or
+** sqlite3_extended_errcode() might change with each API call.
+** Except, there are some interfaces that are guaranteed to never
+** change the value of the error code. The error-code preserving
+** interfaces are:
+**
+** <ul>
+** <li> sqlite3_errcode()
+** <li> sqlite3_extended_errcode()
+** <li> sqlite3_errmsg()
+** <li> sqlite3_errmsg16()
+** </ul>
+**
+** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
+** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** ^(Memory to hold the error message string is managed internally.
+** The application does not need to worry about freeing the result.
+** However, the error string might be overwritten or deallocated by
+** subsequent calls to other SQLite interface functions.)^
+**
+** ^The sqlite3_errstr() interface returns the English-language text
+** that describes the [result code], as UTF-8.
+** ^(Memory to hold the error message string is managed internally
+** and must not be freed by the application)^.
+**
+** When the serialized [threading mode] is in use, it might be the
+** case that a second error occurs on a separate thread in between
+** the time of the first error and the call to these interfaces.
+** When that happens, the second error will be reported since these
+** interfaces always report the most recent result. To avoid
+** this, each thread can obtain exclusive use of the [database connection] D
+** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning
+** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after
+** all calls to the interfaces listed here are completed.
+**
+** If an interface fails with SQLITE_MISUSE, that means the interface
+** was invoked incorrectly by the application. In that case, the
+** error code and message may or may not be set.
+*/
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
+
+/*
+** CAPI3REF: Prepared Statement Object
+** KEYWORDS: {prepared statement} {prepared statements}
+**
+** An instance of this object represents a single SQL statement that
+** has been compiled into binary form and is ready to be evaluated.
+**
+** Think of each SQL statement as a separate computer program. The
+** original SQL text is source code. A prepared statement object
+** is the compiled object code. All SQL must be converted into a
+** prepared statement before it can be run.
+**
+** The life-cycle of a prepared statement object usually goes like this:
+**
+** <ol>
+** <li> Create the prepared statement object using [sqlite3_prepare_v2()].
+** <li> Bind values to [parameters] using the sqlite3_bind_*()
+** interfaces.
+** <li> Run the SQL by calling [sqlite3_step()] one or more times.
+** <li> Reset the prepared statement using [sqlite3_reset()] then go back
+** to step 2. Do this zero or more times.
+** <li> Destroy the object using [sqlite3_finalize()].
+** </ol>
+*/
+typedef struct sqlite3_stmt sqlite3_stmt;
+
+/*
+** CAPI3REF: Run-time Limits
+** METHOD: sqlite3
+**
+** ^(This interface allows the size of various constructs to be limited
+** on a connection by connection basis. The first parameter is the
+** [database connection] whose limit is to be set or queried. The
+** second parameter is one of the [limit categories] that define a
+** class of constructs to be size limited. The third parameter is the
+** new limit for that construct.)^
+**
+** ^If the new limit is a negative number, the limit is unchanged.
+** ^(For each limit category SQLITE_LIMIT_<i>NAME</i> there is a
+** [limits | hard upper bound]
+** set at compile-time by a C preprocessor macro called
+** [limits | SQLITE_MAX_<i>NAME</i>].
+** (The "_LIMIT_" in the name is changed to "_MAX_".))^
+** ^Attempts to increase a limit above its hard upper bound are
+** silently truncated to the hard upper bound.
+**
+** ^Regardless of whether or not the limit was changed, the
+** [sqlite3_limit()] interface returns the prior value of the limit.
+** ^Hence, to find the current value of a limit without changing it,
+** simply invoke this interface with the third parameter set to -1.
+**
+** Run-time limits are intended for use in applications that manage
+** both their own internal database and also databases that are controlled
+** by untrusted external sources. An example application might be a
+** web browser that has its own databases for storing history and
+** separate databases controlled by JavaScript applications downloaded
+** off the Internet. The internal databases can be given the
+** large, default limits. Databases managed by external sources can
+** be given much smaller limits designed to prevent a denial of service
+** attack. Developers might also want to use the [sqlite3_set_authorizer()]
+** interface to further control untrusted SQL. The size of the database
+** created by an untrusted script can be contained using the
+** [max_page_count] [PRAGMA].
+**
+** New run-time limit categories may be added in future releases.
+*/
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
+
+/*
+** CAPI3REF: Run-Time Limit Categories
+** KEYWORDS: {limit category} {*limit categories}
+**
+** These constants define various performance limits
+** that can be lowered at run-time using [sqlite3_limit()].
+** The synopsis of the meanings of the various limits is shown below.
+** Additional information is available at [limits | Limits in SQLite].
+**
+** <dl>
+** [[SQLITE_LIMIT_LENGTH]] ^(<dt>SQLITE_LIMIT_LENGTH</dt>
+** <dd>The maximum size of any string or BLOB or table row, in bytes.<dd>)^
+**
+** [[SQLITE_LIMIT_SQL_LENGTH]] ^(<dt>SQLITE_LIMIT_SQL_LENGTH</dt>
+** <dd>The maximum length of an SQL statement, in bytes.</dd>)^
+**
+** [[SQLITE_LIMIT_COLUMN]] ^(<dt>SQLITE_LIMIT_COLUMN</dt>
+** <dd>The maximum number of columns in a table definition or in the
+** result set of a [SELECT] or the maximum number of columns in an index
+** or in an ORDER BY or GROUP BY clause.</dd>)^
+**
+** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(<dt>SQLITE_LIMIT_EXPR_DEPTH</dt>
+** <dd>The maximum depth of the parse tree on any expression.</dd>)^
+**
+** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(<dt>SQLITE_LIMIT_COMPOUND_SELECT</dt>
+** <dd>The maximum number of terms in a compound SELECT statement.</dd>)^
+**
+** [[SQLITE_LIMIT_VDBE_OP]] ^(<dt>SQLITE_LIMIT_VDBE_OP</dt>
+** <dd>The maximum number of instructions in a virtual machine program
+** used to implement an SQL statement. If [sqlite3_prepare_v2()] or
+** the equivalent tries to allocate space for more than this many opcodes
+** in a single prepared statement, an SQLITE_NOMEM error is returned.</dd>)^
+**
+** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(<dt>SQLITE_LIMIT_FUNCTION_ARG</dt>
+** <dd>The maximum number of arguments on a function.</dd>)^
+**
+** [[SQLITE_LIMIT_ATTACHED]] ^(<dt>SQLITE_LIMIT_ATTACHED</dt>
+** <dd>The maximum number of [ATTACH | attached databases].)^</dd>
+**
+** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]]
+** ^(<dt>SQLITE_LIMIT_LIKE_PATTERN_LENGTH</dt>
+** <dd>The maximum length of the pattern argument to the [LIKE] or
+** [GLOB] operators.</dd>)^
+**
+** [[SQLITE_LIMIT_VARIABLE_NUMBER]]
+** ^(<dt>SQLITE_LIMIT_VARIABLE_NUMBER</dt>
+** <dd>The maximum index number of any [parameter] in an SQL statement.)^
+**
+** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(<dt>SQLITE_LIMIT_TRIGGER_DEPTH</dt>
+** <dd>The maximum depth of recursion for triggers.</dd>)^
+**
+** [[SQLITE_LIMIT_WORKER_THREADS]] ^(<dt>SQLITE_LIMIT_WORKER_THREADS</dt>
+** <dd>The maximum number of auxiliary worker threads that a single
+** [prepared statement] may start.</dd>)^
+** </dl>
+*/
+#define SQLITE_LIMIT_LENGTH 0
+#define SQLITE_LIMIT_SQL_LENGTH 1
+#define SQLITE_LIMIT_COLUMN 2
+#define SQLITE_LIMIT_EXPR_DEPTH 3
+#define SQLITE_LIMIT_COMPOUND_SELECT 4
+#define SQLITE_LIMIT_VDBE_OP 5
+#define SQLITE_LIMIT_FUNCTION_ARG 6
+#define SQLITE_LIMIT_ATTACHED 7
+#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8
+#define SQLITE_LIMIT_VARIABLE_NUMBER 9
+#define SQLITE_LIMIT_TRIGGER_DEPTH 10
+#define SQLITE_LIMIT_WORKER_THREADS 11
+
+/*
+** CAPI3REF: Prepare Flags
+**
+** These constants define various flags that can be passed into
+** "prepFlags" parameter of the [sqlite3_prepare_v3()] and
+** [sqlite3_prepare16_v3()] interfaces.
+**
+** New flags may be added in future releases of SQLite.
+**
+** <dl>
+** [[SQLITE_PREPARE_PERSISTENT]] ^(<dt>SQLITE_PREPARE_PERSISTENT</dt>
+** <dd>The SQLITE_PREPARE_PERSISTENT flag is a hint to the query planner
+** that the prepared statement will be retained for a long time and
+** probably reused many times.)^ ^Without this flag, [sqlite3_prepare_v3()]
+** and [sqlite3_prepare16_v3()] assume that the prepared statement will
+** be used just once or at most a few times and then destroyed using
+** [sqlite3_finalize()] relatively soon. The current implementation acts
+** on this hint by avoiding the use of [lookaside memory] so as not to
+** deplete the limited store of lookaside memory. Future versions of
+** SQLite may act on this hint differently.
+**
+** [[SQLITE_PREPARE_NORMALIZE]] <dt>SQLITE_PREPARE_NORMALIZE</dt>
+** <dd>The SQLITE_PREPARE_NORMALIZE flag is a no-op. This flag used
+** to be required for any prepared statement that wanted to use the
+** [sqlite3_normalized_sql()] interface. However, the
+** [sqlite3_normalized_sql()] interface is now available to all
+** prepared statements, regardless of whether or not they use this
+** flag.
+**
+** [[SQLITE_PREPARE_NO_VTAB]] <dt>SQLITE_PREPARE_NO_VTAB</dt>
+** <dd>The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler
+** to return an error (error code SQLITE_ERROR) if the statement uses
+** any virtual tables.
+** </dl>
+*/
+#define SQLITE_PREPARE_PERSISTENT 0x01
+#define SQLITE_PREPARE_NORMALIZE 0x02
+#define SQLITE_PREPARE_NO_VTAB 0x04
+
+/*
+** CAPI3REF: Compiling An SQL Statement
+** KEYWORDS: {SQL statement compiler}
+** METHOD: sqlite3
+** CONSTRUCTOR: sqlite3_stmt
+**
+** To execute an SQL statement, it must first be compiled into a byte-code
+** program using one of these routines. Or, in other words, these routines
+** are constructors for the [prepared statement] object.
+**
+** The preferred routine to use is [sqlite3_prepare_v2()]. The
+** [sqlite3_prepare()] interface is legacy and should be avoided.
+** [sqlite3_prepare_v3()] has an extra "prepFlags" option that is used
+** for special purposes.
+**
+** The use of the UTF-8 interfaces is preferred, as SQLite currently
+** does all parsing using UTF-8. The UTF-16 interfaces are provided
+** as a convenience. The UTF-16 interfaces work by converting the
+** input text into UTF-8, then invoking the corresponding UTF-8 interface.
+**
+** The first argument, "db", is a [database connection] obtained from a
+** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or
+** [sqlite3_open16()]. The database connection must not have been closed.
+**
+** The second argument, "zSql", is the statement to be compiled, encoded
+** as either UTF-8 or UTF-16. The sqlite3_prepare(), sqlite3_prepare_v2(),
+** and sqlite3_prepare_v3()
+** interfaces use UTF-8, and sqlite3_prepare16(), sqlite3_prepare16_v2(),
+** and sqlite3_prepare16_v3() use UTF-16.
+**
+** ^If the nByte argument is negative, then zSql is read up to the
+** first zero terminator. ^If nByte is positive, then it is the
+** number of bytes read from zSql. ^If nByte is zero, then no prepared
+** statement is generated.
+** If the caller knows that the supplied string is nul-terminated, then
+** there is a small performance advantage to passing an nByte parameter that
+** is the number of bytes in the input string <i>including</i>
+** the nul-terminator.
+**
+** ^If pzTail is not NULL then *pzTail is made to point to the first byte
+** past the end of the first SQL statement in zSql. These routines only
+** compile the first statement in zSql, so *pzTail is left pointing to
+** what remains uncompiled.
+**
+** ^*ppStmt is left pointing to a compiled [prepared statement] that can be
+** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set
+** to NULL. ^If the input text contains no SQL (if the input is an empty
+** string or a comment) then *ppStmt is set to NULL.
+** The calling procedure is responsible for deleting the compiled
+** SQL statement using [sqlite3_finalize()] after it has finished with it.
+** ppStmt may not be NULL.
+**
+** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK];
+** otherwise an [error code] is returned.
+**
+** The sqlite3_prepare_v2(), sqlite3_prepare_v3(), sqlite3_prepare16_v2(),
+** and sqlite3_prepare16_v3() interfaces are recommended for all new programs.
+** The older interfaces (sqlite3_prepare() and sqlite3_prepare16())
+** are retained for backwards compatibility, but their use is discouraged.
+** ^In the "vX" interfaces, the prepared statement
+** that is returned (the [sqlite3_stmt] object) contains a copy of the
+** original SQL text. This causes the [sqlite3_step()] interface to
+** behave differently in three ways:
+**
+** <ol>
+** <li>
+** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it
+** always used to do, [sqlite3_step()] will automatically recompile the SQL
+** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY]
+** retries will occur before sqlite3_step() gives up and returns an error.
+** </li>
+**
+** <li>
+** ^When an error occurs, [sqlite3_step()] will return one of the detailed
+** [error codes] or [extended error codes]. ^The legacy behavior was that
+** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code
+** and the application would have to make a second call to [sqlite3_reset()]
+** in order to find the underlying cause of the problem. With the "v2" prepare
+** interfaces, the underlying reason for the error is returned immediately.
+** </li>
+**
+** <li>
+** ^If the specific value bound to a [parameter | host parameter] in the
+** WHERE clause might influence the choice of query plan for a statement,
+** then the statement will be automatically recompiled, as if there had been
+** a schema change, on the first [sqlite3_step()] call following any change
+** to the [sqlite3_bind_text | bindings] of that [parameter].
+** ^The specific value of a WHERE-clause [parameter] might influence the
+** choice of query plan if the parameter is the left-hand side of a [LIKE]
+** or [GLOB] operator or if the parameter is compared to an indexed column
+** and the [SQLITE_ENABLE_STAT4] compile-time option is enabled.
+** </li>
+** </ol>
+**
+** <p>^sqlite3_prepare_v3() differs from sqlite3_prepare_v2() only in having
+** the extra prepFlags parameter, which is a bit array consisting of zero or
+** more of the [SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_*] flags. ^The
+** sqlite3_prepare_v2() interface works exactly the same as
+** sqlite3_prepare_v3() with a zero prepFlags parameter.
+*/
+SQLITE_API int sqlite3_prepare(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare_v2(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare_v3(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16_v2(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16_v3(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+
+/*
+** CAPI3REF: Retrieving Statement SQL
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8
+** SQL text used to create [prepared statement] P if P was
+** created by [sqlite3_prepare_v2()], [sqlite3_prepare_v3()],
+** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()].
+** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8
+** string containing the SQL text of prepared statement P with
+** [bound parameters] expanded.
+** ^The sqlite3_normalized_sql(P) interface returns a pointer to a UTF-8
+** string containing the normalized SQL text of prepared statement P. The
+** semantics used to normalize a SQL statement are unspecified and subject
+** to change. At a minimum, literal values will be replaced with suitable
+** placeholders.
+**
+** ^(For example, if a prepared statement is created using the SQL
+** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345
+** and parameter :xyz is unbound, then sqlite3_sql() will return
+** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql()
+** will return "SELECT 2345,NULL".)^
+**
+** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory
+** is available to hold the result, or if the result would exceed the
+** the maximum string length determined by the [SQLITE_LIMIT_LENGTH].
+**
+** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of
+** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time
+** option causes sqlite3_expanded_sql() to always return NULL.
+**
+** ^The strings returned by sqlite3_sql(P) and sqlite3_normalized_sql(P)
+** are managed by SQLite and are automatically freed when the prepared
+** statement is finalized.
+** ^The string returned by sqlite3_expanded_sql(P), on the other hand,
+** is obtained from [sqlite3_malloc()] and must be free by the application
+** by passing it to [sqlite3_free()].
+*/
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Determine If An SQL Statement Writes The Database
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if
+** and only if the [prepared statement] X makes no direct changes to
+** the content of the database file.
+**
+** Note that [application-defined SQL functions] or
+** [virtual tables] might change the database indirectly as a side effect.
+** ^(For example, if an application defines a function "eval()" that
+** calls [sqlite3_exec()], then the following SQL statement would
+** change the database file through side-effects:
+**
+** <blockquote><pre>
+** SELECT eval('DELETE FROM t1') FROM t2;
+** </pre></blockquote>
+**
+** But because the [SELECT] statement does not change the database file
+** directly, sqlite3_stmt_readonly() would still return true.)^
+**
+** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK],
+** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true,
+** since the statements themselves do not actually modify the database but
+** rather they control the timing of when other statements modify the
+** database. ^The [ATTACH] and [DETACH] statements also cause
+** sqlite3_stmt_readonly() to return true since, while those statements
+** change the configuration of a database connection, they do not make
+** changes to the content of the database files on disk.
+** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since
+** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and
+** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so
+** sqlite3_stmt_readonly() returns false for those commands.
+*/
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Query The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_isexplain(S) interface returns 1 if the
+** prepared statement S is an EXPLAIN statement, or 2 if the
+** statement S is an EXPLAIN QUERY PLAN.
+** ^The sqlite3_stmt_isexplain(S) interface returns 0 if S is
+** an ordinary statement or a NULL pointer.
+*/
+SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Determine If A Prepared Statement Has Been Reset
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
+** [prepared statement] S has been stepped at least once using
+** [sqlite3_step(S)] but has neither run to completion (returned
+** [SQLITE_DONE] from [sqlite3_step(S)]) nor
+** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S)
+** interface returns false if S is a NULL pointer. If S is not a
+** NULL pointer and is not a pointer to a valid [prepared statement]
+** object, then the behavior is undefined and probably undesirable.
+**
+** This interface can be used in combination [sqlite3_next_stmt()]
+** to locate all prepared statements associated with a database
+** connection that are in need of being reset. This can be used,
+** for example, in diagnostic routines to search for prepared
+** statements that are holding a transaction open.
+*/
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Dynamically Typed Value Object
+** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value}
+**
+** SQLite uses the sqlite3_value object to represent all values
+** that can be stored in a database table. SQLite uses dynamic typing
+** for the values it stores. ^Values stored in sqlite3_value objects
+** can be integers, floating point values, strings, BLOBs, or NULL.
+**
+** An sqlite3_value object may be either "protected" or "unprotected".
+** Some interfaces require a protected sqlite3_value. Other interfaces
+** will accept either a protected or an unprotected sqlite3_value.
+** Every interface that accepts sqlite3_value arguments specifies
+** whether or not it requires a protected sqlite3_value. The
+** [sqlite3_value_dup()] interface can be used to construct a new
+** protected sqlite3_value from an unprotected sqlite3_value.
+**
+** The terms "protected" and "unprotected" refer to whether or not
+** a mutex is held. An internal mutex is held for a protected
+** sqlite3_value object but no mutex is held for an unprotected
+** sqlite3_value object. If SQLite is compiled to be single-threaded
+** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0)
+** or if SQLite is run in one of reduced mutex modes
+** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD]
+** then there is no distinction between protected and unprotected
+** sqlite3_value objects and they can be used interchangeably. However,
+** for maximum code portability it is recommended that applications
+** still make the distinction between protected and unprotected
+** sqlite3_value objects even when not strictly required.
+**
+** ^The sqlite3_value objects that are passed as parameters into the
+** implementation of [application-defined SQL functions] are protected.
+** ^The sqlite3_value object returned by
+** [sqlite3_column_value()] is unprotected.
+** Unprotected sqlite3_value objects may only be used as arguments
+** to [sqlite3_result_value()], [sqlite3_bind_value()], and
+** [sqlite3_value_dup()].
+** The [sqlite3_value_blob | sqlite3_value_type()] family of
+** interfaces require protected sqlite3_value objects.
+*/
+typedef struct sqlite3_value sqlite3_value;
+
+/*
+** CAPI3REF: SQL Function Context Object
+**
+** The context in which an SQL function executes is stored in an
+** sqlite3_context object. ^A pointer to an sqlite3_context object
+** is always first parameter to [application-defined SQL functions].
+** The application-defined SQL function implementation will pass this
+** pointer through into calls to [sqlite3_result_int | sqlite3_result()],
+** [sqlite3_aggregate_context()], [sqlite3_user_data()],
+** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()],
+** and/or [sqlite3_set_auxdata()].
+*/
+typedef struct sqlite3_context sqlite3_context;
+
+/*
+** CAPI3REF: Binding Values To Prepared Statements
+** KEYWORDS: {host parameter} {host parameters} {host parameter name}
+** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding}
+** METHOD: sqlite3_stmt
+**
+** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants,
+** literals may be replaced by a [parameter] that matches one of following
+** templates:
+**
+** <ul>
+** <li> ?
+** <li> ?NNN
+** <li> :VVV
+** <li> @VVV
+** <li> $VVV
+** </ul>
+**
+** In the templates above, NNN represents an integer literal,
+** and VVV represents an alphanumeric identifier.)^ ^The values of these
+** parameters (also called "host parameter names" or "SQL parameters")
+** can be set using the sqlite3_bind_*() routines defined here.
+**
+** ^The first argument to the sqlite3_bind_*() routines is always
+** a pointer to the [sqlite3_stmt] object returned from
+** [sqlite3_prepare_v2()] or its variants.
+**
+** ^The second argument is the index of the SQL parameter to be set.
+** ^The leftmost SQL parameter has an index of 1. ^When the same named
+** SQL parameter is used more than once, second and subsequent
+** occurrences have the same index as the first occurrence.
+** ^The index for named parameters can be looked up using the
+** [sqlite3_bind_parameter_index()] API if desired. ^The index
+** for "?NNN" parameters is the value of NNN.
+** ^The NNN value must be between 1 and the [sqlite3_limit()]
+** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 32766).
+**
+** ^The third argument is the value to bind to the parameter.
+** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter
+** is ignored and the end result is the same as sqlite3_bind_null().
+** ^If the third parameter to sqlite3_bind_text() is not NULL, then
+** it should be a pointer to well-formed UTF8 text.
+** ^If the third parameter to sqlite3_bind_text16() is not NULL, then
+** it should be a pointer to well-formed UTF16 text.
+** ^If the third parameter to sqlite3_bind_text64() is not NULL, then
+** it should be a pointer to a well-formed unicode string that is
+** either UTF8 if the sixth parameter is SQLITE_UTF8, or UTF16
+** otherwise.
+**
+** [[byte-order determination rules]] ^The byte-order of
+** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF)
+** found in first character, which is removed, or in the absence of a BOM
+** the byte order is the native byte order of the host
+** machine for sqlite3_bind_text16() or the byte order specified in
+** the 6th parameter for sqlite3_bind_text64().)^
+** ^If UTF16 input text contains invalid unicode
+** characters, then SQLite might change those invalid characters
+** into the unicode replacement character: U+FFFD.
+**
+** ^(In those routines that have a fourth argument, its value is the
+** number of bytes in the parameter. To be clear: the value is the
+** number of <u>bytes</u> in the value, not the number of characters.)^
+** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** is negative, then the length of the string is
+** the number of bytes up to the first zero terminator.
+** If the fourth parameter to sqlite3_bind_blob() is negative, then
+** the behavior is undefined.
+** If a non-negative fourth parameter is provided to sqlite3_bind_text()
+** or sqlite3_bind_text16() or sqlite3_bind_text64() then
+** that parameter must be the byte offset
+** where the NUL terminator would occur assuming the string were NUL
+** terminated. If any NUL characters occurs at byte offsets less than
+** the value of the fourth parameter then the resulting string value will
+** contain embedded NULs. The result of expressions involving strings
+** with embedded NULs is undefined.
+**
+** ^The fifth argument to the BLOB and string binding interfaces
+** is a destructor used to dispose of the BLOB or
+** string after SQLite has finished with it. ^The destructor is called
+** to dispose of the BLOB or string even if the call to the bind API fails,
+** except the destructor is not called if the third parameter is a NULL
+** pointer or the fourth parameter is negative.
+** ^If the fifth argument is
+** the special value [SQLITE_STATIC], then SQLite assumes that the
+** information is in static, unmanaged space and does not need to be freed.
+** ^If the fifth argument has the value [SQLITE_TRANSIENT], then
+** SQLite makes its own private copy of the data immediately, before
+** the sqlite3_bind_*() routine returns.
+**
+** ^The sixth argument to sqlite3_bind_text64() must be one of
+** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]
+** to specify the encoding of the text in the third parameter. If
+** the sixth argument to sqlite3_bind_text64() is not one of the
+** allowed values shown above, or if the text encoding is different
+** from the encoding specified by the sixth parameter, then the behavior
+** is undefined.
+**
+** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that
+** is filled with zeroes. ^A zeroblob uses a fixed amount of memory
+** (just an integer to hold its size) while it is being processed.
+** Zeroblobs are intended to serve as placeholders for BLOBs whose
+** content is later written using
+** [sqlite3_blob_open | incremental BLOB I/O] routines.
+** ^A negative value for the zeroblob results in a zero-length BLOB.
+**
+** ^The sqlite3_bind_pointer(S,I,P,T,D) routine causes the I-th parameter in
+** [prepared statement] S to have an SQL value of NULL, but to also be
+** associated with the pointer P of type T. ^D is either a NULL pointer or
+** a pointer to a destructor function for P. ^SQLite will invoke the
+** destructor D with a single argument of P when it is finished using
+** P. The T parameter should be a static string, preferably a string
+** literal. The sqlite3_bind_pointer() routine is part of the
+** [pointer passing interface] added for SQLite 3.20.0.
+**
+** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer
+** for the [prepared statement] or with a prepared statement for which
+** [sqlite3_step()] has been called more recently than [sqlite3_reset()],
+** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_()
+** routine is passed a [prepared statement] that has been finalized, the
+** result is undefined and probably harmful.
+**
+** ^Bindings are not cleared by the [sqlite3_reset()] routine.
+** ^Unbound parameters are interpreted as NULL.
+**
+** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an
+** [error code] if anything goes wrong.
+** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB
+** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or
+** [SQLITE_MAX_LENGTH].
+** ^[SQLITE_RANGE] is returned if the parameter
+** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails.
+**
+** See also: [sqlite3_bind_parameter_count()],
+** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
+ void(*)(void*));
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
+ void(*)(void*), unsigned char encoding);
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_pointer(sqlite3_stmt*, int, void*, const char*,void(*)(void*));
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+
+/*
+** CAPI3REF: Number Of SQL Parameters
+** METHOD: sqlite3_stmt
+**
+** ^This routine can be used to find the number of [SQL parameters]
+** in a [prepared statement]. SQL parameters are tokens of the
+** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as
+** placeholders for values that are [sqlite3_bind_blob | bound]
+** to the parameters at a later time.
+**
+** ^(This routine actually returns the index of the largest (rightmost)
+** parameter. For all forms except ?NNN, this will correspond to the
+** number of unique parameters. If parameters of the ?NNN form are used,
+** there may be gaps in the list.)^
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_name()], and
+** [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Name Of A Host Parameter
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_bind_parameter_name(P,N) interface returns
+** the name of the N-th [SQL parameter] in the [prepared statement] P.
+** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA"
+** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA"
+** respectively.
+** In other words, the initial ":" or "$" or "@" or "?"
+** is included as part of the name.)^
+** ^Parameters of the form "?" without a following integer have no name
+** and are referred to as "nameless" or "anonymous parameters".
+**
+** ^The first host parameter has an index of 1, not 0.
+**
+** ^If the value N is out of range or if the N-th parameter is
+** nameless, then NULL is returned. ^The returned string is
+** always in UTF-8 encoding even if the named parameter was
+** originally specified as UTF-16 in [sqlite3_prepare16()],
+** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()].
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_count()], and
+** [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+
+/*
+** CAPI3REF: Index Of A Parameter With A Given Name
+** METHOD: sqlite3_stmt
+**
+** ^Return the index of an SQL parameter given its name. ^The
+** index value returned is suitable for use as the second
+** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero
+** is returned if no matching parameter is found. ^The parameter
+** name must be given in UTF-8 even if the original statement
+** was prepared from UTF-16 text using [sqlite3_prepare16_v2()] or
+** [sqlite3_prepare16_v3()].
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_count()], and
+** [sqlite3_bind_parameter_name()].
+*/
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+
+/*
+** CAPI3REF: Reset All Bindings On A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset
+** the [sqlite3_bind_blob | bindings] on a [prepared statement].
+** ^Use this routine to reset all host parameters to NULL.
+*/
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Number Of Columns In A Result Set
+** METHOD: sqlite3_stmt
+**
+** ^Return the number of columns in the result set returned by the
+** [prepared statement]. ^If this routine returns 0, that means the
+** [prepared statement] returns no data (for example an [UPDATE]).
+** ^However, just because this routine returns a positive number does not
+** mean that one or more rows of data will be returned. ^A SELECT statement
+** will always have a positive sqlite3_column_count() but depending on the
+** WHERE clause constraints and the table content, it might return no rows.
+**
+** See also: [sqlite3_data_count()]
+*/
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Column Names In A Result Set
+** METHOD: sqlite3_stmt
+**
+** ^These routines return the name assigned to a particular column
+** in the result set of a [SELECT] statement. ^The sqlite3_column_name()
+** interface returns a pointer to a zero-terminated UTF-8 string
+** and sqlite3_column_name16() returns a pointer to a zero-terminated
+** UTF-16 string. ^The first parameter is the [prepared statement]
+** that implements the [SELECT] statement. ^The second parameter is the
+** column number. ^The leftmost column is number 0.
+**
+** ^The returned string pointer is valid until either the [prepared statement]
+** is destroyed by [sqlite3_finalize()] or until the statement is automatically
+** reprepared by the first call to [sqlite3_step()] for a particular run
+** or until the next call to
+** sqlite3_column_name() or sqlite3_column_name16() on the same column.
+**
+** ^If sqlite3_malloc() fails during the processing of either routine
+** (for example during a conversion from UTF-8 to UTF-16) then a
+** NULL pointer is returned.
+**
+** ^The name of a result column is the value of the "AS" clause for
+** that column, if there is an AS clause. If there is no AS clause
+** then the name of the column is unspecified and may change from
+** one release of SQLite to the next.
+*/
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
+
+/*
+** CAPI3REF: Source Of Data In A Query Result
+** METHOD: sqlite3_stmt
+**
+** ^These routines provide a means to determine the database, table, and
+** table column that is the origin of a particular result column in
+** [SELECT] statement.
+** ^The name of the database or table or column can be returned as
+** either a UTF-8 or UTF-16 string. ^The _database_ routines return
+** the database name, the _table_ routines return the table name, and
+** the origin_ routines return the column name.
+** ^The returned string is valid until the [prepared statement] is destroyed
+** using [sqlite3_finalize()] or until the statement is automatically
+** reprepared by the first call to [sqlite3_step()] for a particular run
+** or until the same information is requested
+** again in a different encoding.
+**
+** ^The names returned are the original un-aliased names of the
+** database, table, and column.
+**
+** ^The first argument to these interfaces is a [prepared statement].
+** ^These functions return information about the Nth result column returned by
+** the statement, where N is the second function argument.
+** ^The left-most column is column 0 for these routines.
+**
+** ^If the Nth column returned by the statement is an expression or
+** subquery and is not a column value, then all of these functions return
+** NULL. ^These routines might also return NULL if a memory allocation error
+** occurs. ^Otherwise, they return the name of the attached database, table,
+** or column that query result column was extracted from.
+**
+** ^As with all other SQLite APIs, those whose names end with "16" return
+** UTF-16 encoded strings and the other functions return UTF-8.
+**
+** ^These APIs are only available if the library was compiled with the
+** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol.
+**
+** If two or more threads call one or more
+** [sqlite3_column_database_name | column metadata interfaces]
+** for the same [prepared statement] and result column
+** at the same time then the results are undefined.
+*/
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
+
+/*
+** CAPI3REF: Declared Datatype Of A Query Result
+** METHOD: sqlite3_stmt
+**
+** ^(The first parameter is a [prepared statement].
+** If this statement is a [SELECT] statement and the Nth column of the
+** returned result set of that [SELECT] is a table column (not an
+** expression or subquery) then the declared type of the table
+** column is returned.)^ ^If the Nth column of the result set is an
+** expression or subquery, then a NULL pointer is returned.
+** ^The returned string is always UTF-8 encoded.
+**
+** ^(For example, given the database schema:
+**
+** CREATE TABLE t1(c1 VARIANT);
+**
+** and the following statement to be compiled:
+**
+** SELECT c1 + 1, c1 FROM t1;
+**
+** this routine would return the string "VARIANT" for the second result
+** column (i==1), and a NULL pointer for the first result column (i==0).)^
+**
+** ^SQLite uses dynamic run-time typing. ^So just because a column
+** is declared to contain a particular type does not mean that the
+** data stored in that column is of the declared type. SQLite is
+** strongly typed, but the typing is dynamic not static. ^Type
+** is associated with individual values, not with the containers
+** used to hold those values.
+*/
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
+
+/*
+** CAPI3REF: Evaluate An SQL Statement
+** METHOD: sqlite3_stmt
+**
+** After a [prepared statement] has been prepared using any of
+** [sqlite3_prepare_v2()], [sqlite3_prepare_v3()], [sqlite3_prepare16_v2()],
+** or [sqlite3_prepare16_v3()] or one of the legacy
+** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function
+** must be called one or more times to evaluate the statement.
+**
+** The details of the behavior of the sqlite3_step() interface depend
+** on whether the statement was prepared using the newer "vX" interfaces
+** [sqlite3_prepare_v3()], [sqlite3_prepare_v2()], [sqlite3_prepare16_v3()],
+** [sqlite3_prepare16_v2()] or the older legacy
+** interfaces [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the
+** new "vX" interface is recommended for new applications but the legacy
+** interface will continue to be supported.
+**
+** ^In the legacy interface, the return value will be either [SQLITE_BUSY],
+** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE].
+** ^With the "v2" interface, any of the other [result codes] or
+** [extended result codes] might be returned as well.
+**
+** ^[SQLITE_BUSY] means that the database engine was unable to acquire the
+** database locks it needs to do its job. ^If the statement is a [COMMIT]
+** or occurs outside of an explicit transaction, then you can retry the
+** statement. If the statement is not a [COMMIT] and occurs within an
+** explicit transaction then you should rollback the transaction before
+** continuing.
+**
+** ^[SQLITE_DONE] means that the statement has finished executing
+** successfully. sqlite3_step() should not be called again on this virtual
+** machine without first calling [sqlite3_reset()] to reset the virtual
+** machine back to its initial state.
+**
+** ^If the SQL statement being executed returns any data, then [SQLITE_ROW]
+** is returned each time a new row of data is ready for processing by the
+** caller. The values may be accessed using the [column access functions].
+** sqlite3_step() is called again to retrieve the next row of data.
+**
+** ^[SQLITE_ERROR] means that a run-time error (such as a constraint
+** violation) has occurred. sqlite3_step() should not be called again on
+** the VM. More information may be found by calling [sqlite3_errmsg()].
+** ^With the legacy interface, a more specific error code (for example,
+** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth)
+** can be obtained by calling [sqlite3_reset()] on the
+** [prepared statement]. ^In the "v2" interface,
+** the more specific error code is returned directly by sqlite3_step().
+**
+** [SQLITE_MISUSE] means that the this routine was called inappropriately.
+** Perhaps it was called on a [prepared statement] that has
+** already been [sqlite3_finalize | finalized] or on one that had
+** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could
+** be the case that the same database connection is being used by two or
+** more threads at the same moment in time.
+**
+** For all versions of SQLite up to and including 3.6.23.1, a call to
+** [sqlite3_reset()] was required after sqlite3_step() returned anything
+** other than [SQLITE_ROW] before any subsequent invocation of
+** sqlite3_step(). Failure to reset the prepared statement using
+** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from
+** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1],
+** sqlite3_step() began
+** calling [sqlite3_reset()] automatically in this circumstance rather
+** than returning [SQLITE_MISUSE]. This is not considered a compatibility
+** break because any application that ever receives an SQLITE_MISUSE error
+** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option
+** can be used to restore the legacy behavior.
+**
+** <b>Goofy Interface Alert:</b> In the legacy interface, the sqlite3_step()
+** API always returns a generic error code, [SQLITE_ERROR], following any
+** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call
+** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the
+** specific [error codes] that better describes the error.
+** We admit that this is a goofy design. The problem has been fixed
+** with the "v2" interface. If you prepare all of your SQL statements
+** using [sqlite3_prepare_v3()] or [sqlite3_prepare_v2()]
+** or [sqlite3_prepare16_v2()] or [sqlite3_prepare16_v3()] instead
+** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces,
+** then the more specific [error codes] are returned directly
+** by sqlite3_step(). The use of the "vX" interfaces is recommended.
+*/
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Number of columns in a result set
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_data_count(P) interface returns the number of columns in the
+** current row of the result set of [prepared statement] P.
+** ^If prepared statement P does not have results ready to return
+** (via calls to the [sqlite3_column_int | sqlite3_column()] family of
+** interfaces) then sqlite3_data_count(P) returns 0.
+** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer.
+** ^The sqlite3_data_count(P) routine returns 0 if the previous call to
+** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P)
+** will return non-zero if previous call to [sqlite3_step](P) returned
+** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum]
+** where it always returns zero since each step of that multi-step
+** pragma returns 0 columns of data.
+**
+** See also: [sqlite3_column_count()]
+*/
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Fundamental Datatypes
+** KEYWORDS: SQLITE_TEXT
+**
+** ^(Every value in SQLite has one of five fundamental datatypes:
+**
+** <ul>
+** <li> 64-bit signed integer
+** <li> 64-bit IEEE floating point number
+** <li> string
+** <li> BLOB
+** <li> NULL
+** </ul>)^
+**
+** These constants are codes for each of those types.
+**
+** Note that the SQLITE_TEXT constant was also used in SQLite version 2
+** for a completely different meaning. Software that links against both
+** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not
+** SQLITE_TEXT.
+*/
+#define SQLITE_INTEGER 1
+#define SQLITE_FLOAT 2
+#define SQLITE_BLOB 4
+#define SQLITE_NULL 5
+#ifdef SQLITE_TEXT
+# undef SQLITE_TEXT
+#else
+# define SQLITE_TEXT 3
+#endif
+#define SQLITE3_TEXT 3
+
+/*
+** CAPI3REF: Result Values From A Query
+** KEYWORDS: {column access functions}
+** METHOD: sqlite3_stmt
+**
+** <b>Summary:</b>
+** <blockquote><table border=0 cellpadding=0 cellspacing=0>
+** <tr><td><b>sqlite3_column_blob</b><td>&rarr;<td>BLOB result
+** <tr><td><b>sqlite3_column_double</b><td>&rarr;<td>REAL result
+** <tr><td><b>sqlite3_column_int</b><td>&rarr;<td>32-bit INTEGER result
+** <tr><td><b>sqlite3_column_int64</b><td>&rarr;<td>64-bit INTEGER result
+** <tr><td><b>sqlite3_column_text</b><td>&rarr;<td>UTF-8 TEXT result
+** <tr><td><b>sqlite3_column_text16</b><td>&rarr;<td>UTF-16 TEXT result
+** <tr><td><b>sqlite3_column_value</b><td>&rarr;<td>The result as an
+** [sqlite3_value|unprotected sqlite3_value] object.
+** <tr><td>&nbsp;<td>&nbsp;<td>&nbsp;
+** <tr><td><b>sqlite3_column_bytes</b><td>&rarr;<td>Size of a BLOB
+** or a UTF-8 TEXT result in bytes
+** <tr><td><b>sqlite3_column_bytes16&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Size of UTF-16
+** TEXT in bytes
+** <tr><td><b>sqlite3_column_type</b><td>&rarr;<td>Default
+** datatype of the result
+** </table></blockquote>
+**
+** <b>Details:</b>
+**
+** ^These routines return information about a single column of the current
+** result row of a query. ^In every case the first argument is a pointer
+** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*]
+** that was returned from [sqlite3_prepare_v2()] or one of its variants)
+** and the second argument is the index of the column for which information
+** should be returned. ^The leftmost column of the result set has the index 0.
+** ^The number of columns in the result can be determined using
+** [sqlite3_column_count()].
+**
+** If the SQL statement does not currently point to a valid row, or if the
+** column index is out of range, the result is undefined.
+** These routines may only be called when the most recent call to
+** [sqlite3_step()] has returned [SQLITE_ROW] and neither
+** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently.
+** If any of these routines are called after [sqlite3_reset()] or
+** [sqlite3_finalize()] or after [sqlite3_step()] has returned
+** something other than [SQLITE_ROW], the results are undefined.
+** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()]
+** are called from a different thread while any of these routines
+** are pending, then the results are undefined.
+**
+** The first six interfaces (_blob, _double, _int, _int64, _text, and _text16)
+** each return the value of a result column in a specific data format. If
+** the result column is not initially in the requested format (for example,
+** if the query returns an integer but the sqlite3_column_text() interface
+** is used to extract the value) then an automatic type conversion is performed.
+**
+** ^The sqlite3_column_type() routine returns the
+** [SQLITE_INTEGER | datatype code] for the initial data type
+** of the result column. ^The returned value is one of [SQLITE_INTEGER],
+** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL].
+** The return value of sqlite3_column_type() can be used to decide which
+** of the first six interface should be used to extract the column value.
+** The value returned by sqlite3_column_type() is only meaningful if no
+** automatic type conversions have occurred for the value in question.
+** After a type conversion, the result of calling sqlite3_column_type()
+** is undefined, though harmless. Future
+** versions of SQLite may change the behavior of sqlite3_column_type()
+** following a type conversion.
+**
+** If the result is a BLOB or a TEXT string, then the sqlite3_column_bytes()
+** or sqlite3_column_bytes16() interfaces can be used to determine the size
+** of that BLOB or string.
+**
+** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes()
+** routine returns the number of bytes in that BLOB or string.
+** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts
+** the string to UTF-8 and then returns the number of bytes.
+** ^If the result is a numeric value then sqlite3_column_bytes() uses
+** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns
+** the number of bytes in that string.
+** ^If the result is NULL, then sqlite3_column_bytes() returns zero.
+**
+** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16()
+** routine returns the number of bytes in that BLOB or string.
+** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts
+** the string to UTF-16 and then returns the number of bytes.
+** ^If the result is a numeric value then sqlite3_column_bytes16() uses
+** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns
+** the number of bytes in that string.
+** ^If the result is NULL, then sqlite3_column_bytes16() returns zero.
+**
+** ^The values returned by [sqlite3_column_bytes()] and
+** [sqlite3_column_bytes16()] do not include the zero terminators at the end
+** of the string. ^For clarity: the values returned by
+** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of
+** bytes in the string, not the number of characters.
+**
+** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(),
+** even empty strings, are always zero-terminated. ^The return
+** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer.
+**
+** <b>Warning:</b> ^The object returned by [sqlite3_column_value()] is an
+** [unprotected sqlite3_value] object. In a multithreaded environment,
+** an unprotected sqlite3_value object may only be used safely with
+** [sqlite3_bind_value()] and [sqlite3_result_value()].
+** If the [unprotected sqlite3_value] object returned by
+** [sqlite3_column_value()] is used in any other way, including calls
+** to routines like [sqlite3_value_int()], [sqlite3_value_text()],
+** or [sqlite3_value_bytes()], the behavior is not threadsafe.
+** Hence, the sqlite3_column_value() interface
+** is normally only useful within the implementation of
+** [application-defined SQL functions] or [virtual tables], not within
+** top-level application code.
+**
+** The these routines may attempt to convert the datatype of the result.
+** ^For example, if the internal representation is FLOAT and a text result
+** is requested, [sqlite3_snprintf()] is used internally to perform the
+** conversion automatically. ^(The following table details the conversions
+** that are applied:
+**
+** <blockquote>
+** <table border="1">
+** <tr><th> Internal<br>Type <th> Requested<br>Type <th> Conversion
+**
+** <tr><td> NULL <td> INTEGER <td> Result is 0
+** <tr><td> NULL <td> FLOAT <td> Result is 0.0
+** <tr><td> NULL <td> TEXT <td> Result is a NULL pointer
+** <tr><td> NULL <td> BLOB <td> Result is a NULL pointer
+** <tr><td> INTEGER <td> FLOAT <td> Convert from integer to float
+** <tr><td> INTEGER <td> TEXT <td> ASCII rendering of the integer
+** <tr><td> INTEGER <td> BLOB <td> Same as INTEGER->TEXT
+** <tr><td> FLOAT <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> FLOAT <td> TEXT <td> ASCII rendering of the float
+** <tr><td> FLOAT <td> BLOB <td> [CAST] to BLOB
+** <tr><td> TEXT <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> TEXT <td> FLOAT <td> [CAST] to REAL
+** <tr><td> TEXT <td> BLOB <td> No change
+** <tr><td> BLOB <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> BLOB <td> FLOAT <td> [CAST] to REAL
+** <tr><td> BLOB <td> TEXT <td> Add a zero terminator if needed
+** </table>
+** </blockquote>)^
+**
+** Note that when type conversions occur, pointers returned by prior
+** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or
+** sqlite3_column_text16() may be invalidated.
+** Type conversions and pointer invalidations might occur
+** in the following cases:
+**
+** <ul>
+** <li> The initial content is a BLOB and sqlite3_column_text() or
+** sqlite3_column_text16() is called. A zero-terminator might
+** need to be added to the string.</li>
+** <li> The initial content is UTF-8 text and sqlite3_column_bytes16() or
+** sqlite3_column_text16() is called. The content must be converted
+** to UTF-16.</li>
+** <li> The initial content is UTF-16 text and sqlite3_column_bytes() or
+** sqlite3_column_text() is called. The content must be converted
+** to UTF-8.</li>
+** </ul>
+**
+** ^Conversions between UTF-16be and UTF-16le are always done in place and do
+** not invalidate a prior pointer, though of course the content of the buffer
+** that the prior pointer references will have been modified. Other kinds
+** of conversion are done in place when it is possible, but sometimes they
+** are not possible and in those cases prior pointers are invalidated.
+**
+** The safest policy is to invoke these routines
+** in one of the following ways:
+**
+** <ul>
+** <li>sqlite3_column_text() followed by sqlite3_column_bytes()</li>
+** <li>sqlite3_column_blob() followed by sqlite3_column_bytes()</li>
+** <li>sqlite3_column_text16() followed by sqlite3_column_bytes16()</li>
+** </ul>
+**
+** In other words, you should call sqlite3_column_text(),
+** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result
+** into the desired format, then invoke sqlite3_column_bytes() or
+** sqlite3_column_bytes16() to find the size of the result. Do not mix calls
+** to sqlite3_column_text() or sqlite3_column_blob() with calls to
+** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16()
+** with calls to sqlite3_column_bytes().
+**
+** ^The pointers returned are valid until a type conversion occurs as
+** described above, or until [sqlite3_step()] or [sqlite3_reset()] or
+** [sqlite3_finalize()] is called. ^The memory space used to hold strings
+** and BLOBs is freed automatically. Do not pass the pointers returned
+** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
+** [sqlite3_free()].
+**
+** As long as the input parameters are correct, these routines will only
+** fail if an out-of-memory error occurs during a format conversion.
+** Only the following subset of interfaces are subject to out-of-memory
+** errors:
+**
+** <ul>
+** <li> sqlite3_column_blob()
+** <li> sqlite3_column_text()
+** <li> sqlite3_column_text16()
+** <li> sqlite3_column_bytes()
+** <li> sqlite3_column_bytes16()
+** </ul>
+**
+** If an out-of-memory error occurs, then the return value from these
+** routines is the same as if the column had contained an SQL NULL value.
+** Valid SQL NULL returns can be distinguished from out-of-memory errors
+** by invoking the [sqlite3_errcode()] immediately after the suspect
+** return value is obtained and before any
+** other SQLite interface is called on the same [database connection].
+*/
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+
+/*
+** CAPI3REF: Destroy A Prepared Statement Object
+** DESTRUCTOR: sqlite3_stmt
+**
+** ^The sqlite3_finalize() function is called to delete a [prepared statement].
+** ^If the most recent evaluation of the statement encountered no errors
+** or if the statement is never been evaluated, then sqlite3_finalize() returns
+** SQLITE_OK. ^If the most recent evaluation of statement S failed, then
+** sqlite3_finalize(S) returns the appropriate [error code] or
+** [extended error code].
+**
+** ^The sqlite3_finalize(S) routine can be called at any point during
+** the life cycle of [prepared statement] S:
+** before statement S is ever evaluated, after
+** one or more calls to [sqlite3_reset()], or after any call
+** to [sqlite3_step()] regardless of whether or not the statement has
+** completed execution.
+**
+** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op.
+**
+** The application must finalize every [prepared statement] in order to avoid
+** resource leaks. It is a grievous error for the application to try to use
+** a prepared statement after it has been finalized. Any use of a prepared
+** statement after it has been finalized can result in undefined and
+** undesirable behavior such as segfaults and heap corruption.
+*/
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Reset A Prepared Statement Object
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_reset() function is called to reset a [prepared statement]
+** object back to its initial state, ready to be re-executed.
+** ^Any SQL statement variables that had values bound to them using
+** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values.
+** Use [sqlite3_clear_bindings()] to reset the bindings.
+**
+** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
+** back to the beginning of its program.
+**
+** ^If the most recent call to [sqlite3_step(S)] for the
+** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
+** or if [sqlite3_step(S)] has never before been called on S,
+** then [sqlite3_reset(S)] returns [SQLITE_OK].
+**
+** ^If the most recent call to [sqlite3_step(S)] for the
+** [prepared statement] S indicated an error, then
+** [sqlite3_reset(S)] returns an appropriate [error code].
+**
+** ^The [sqlite3_reset(S)] interface does not change the values
+** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
+*/
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Create Or Redefine SQL Functions
+** KEYWORDS: {function creation routines}
+** METHOD: sqlite3
+**
+** ^These functions (collectively known as "function creation routines")
+** are used to add SQL functions or aggregates or to redefine the behavior
+** of existing SQL functions or aggregates. The only differences between
+** the three "sqlite3_create_function*" routines are the text encoding
+** expected for the second parameter (the name of the function being
+** created) and the presence or absence of a destructor callback for
+** the application data pointer. Function sqlite3_create_window_function()
+** is similar, but allows the user to supply the extra callback functions
+** needed by [aggregate window functions].
+**
+** ^The first parameter is the [database connection] to which the SQL
+** function is to be added. ^If an application uses more than one database
+** connection then application-defined SQL functions must be added
+** to each database connection separately.
+**
+** ^The second parameter is the name of the SQL function to be created or
+** redefined. ^The length of the name is limited to 255 bytes in a UTF-8
+** representation, exclusive of the zero-terminator. ^Note that the name
+** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes.
+** ^Any attempt to create a function with a longer name
+** will result in [SQLITE_MISUSE] being returned.
+**
+** ^The third parameter (nArg)
+** is the number of arguments that the SQL function or
+** aggregate takes. ^If this parameter is -1, then the SQL function or
+** aggregate may take any number of arguments between 0 and the limit
+** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third
+** parameter is less than -1 or greater than 127 then the behavior is
+** undefined.
+**
+** ^The fourth parameter, eTextRep, specifies what
+** [SQLITE_UTF8 | text encoding] this SQL function prefers for
+** its parameters. The application should set this parameter to
+** [SQLITE_UTF16LE] if the function implementation invokes
+** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the
+** implementation invokes [sqlite3_value_text16be()] on an input, or
+** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8]
+** otherwise. ^The same SQL function may be registered multiple times using
+** different preferred text encodings, with different implementations for
+** each encoding.
+** ^When multiple implementations of the same function are available, SQLite
+** will pick the one that involves the least amount of data conversion.
+**
+** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC]
+** to signal that the function will always return the same result given
+** the same inputs within a single SQL statement. Most SQL functions are
+** deterministic. The built-in [random()] SQL function is an example of a
+** function that is not deterministic. The SQLite query planner is able to
+** perform additional optimizations on deterministic functions, so use
+** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
+**
+** ^The fourth parameter may also optionally include the [SQLITE_DIRECTONLY]
+** flag, which if present prevents the function from being invoked from
+** within VIEWs, TRIGGERs, CHECK constraints, generated column expressions,
+** index expressions, or the WHERE clause of partial indexes.
+**
+** <span style="background-color:#ffff90;">
+** For best security, the [SQLITE_DIRECTONLY] flag is recommended for
+** all application-defined SQL functions that do not need to be
+** used inside of triggers, view, CHECK constraints, or other elements of
+** the database schema. This flags is especially recommended for SQL
+** functions that have side effects or reveal internal application state.
+** Without this flag, an attacker might be able to modify the schema of
+** a database file to include invocations of the function with parameters
+** chosen by the attacker, which the application will then execute when
+** the database file is opened and read.
+** </span>
+**
+** ^(The fifth parameter is an arbitrary pointer. The implementation of the
+** function can gain access to this pointer using [sqlite3_user_data()].)^
+**
+** ^The sixth, seventh and eighth parameters passed to the three
+** "sqlite3_create_function*" functions, xFunc, xStep and xFinal, are
+** pointers to C-language functions that implement the SQL function or
+** aggregate. ^A scalar SQL function requires an implementation of the xFunc
+** callback only; NULL pointers must be passed as the xStep and xFinal
+** parameters. ^An aggregate SQL function requires an implementation of xStep
+** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing
+** SQL function or aggregate, pass NULL pointers for all three function
+** callbacks.
+**
+** ^The sixth, seventh, eighth and ninth parameters (xStep, xFinal, xValue
+** and xInverse) passed to sqlite3_create_window_function are pointers to
+** C-language callbacks that implement the new function. xStep and xFinal
+** must both be non-NULL. xValue and xInverse may either both be NULL, in
+** which case a regular aggregate function is created, or must both be
+** non-NULL, in which case the new function may be used as either an aggregate
+** or aggregate window function. More details regarding the implementation
+** of aggregate window functions are
+** [user-defined window functions|available here].
+**
+** ^(If the final parameter to sqlite3_create_function_v2() or
+** sqlite3_create_window_function() is not NULL, then it is destructor for
+** the application data pointer. The destructor is invoked when the function
+** is deleted, either by being overloaded or when the database connection
+** closes.)^ ^The destructor is also invoked if the call to
+** sqlite3_create_function_v2() fails. ^When the destructor callback is
+** invoked, it is passed a single argument which is a copy of the application
+** data pointer which was the fifth parameter to sqlite3_create_function_v2().
+**
+** ^It is permitted to register multiple implementations of the same
+** functions with the same name but with either differing numbers of
+** arguments or differing preferred text encodings. ^SQLite will use
+** the implementation that most closely matches the way in which the
+** SQL function is used. ^A function implementation with a non-negative
+** nArg parameter is a better match than a function implementation with
+** a negative nArg. ^A function where the preferred text encoding
+** matches the database encoding is a better
+** match than a function where the encoding is different.
+** ^A function where the encoding difference is between UTF16le and UTF16be
+** is a closer match than a function where the encoding difference is
+** between UTF8 and UTF16.
+**
+** ^Built-in functions may be overloaded by new application-defined functions.
+**
+** ^An application-defined function is permitted to call other
+** SQLite interfaces. However, such calls must not
+** close the database connection nor finalize or reset the prepared
+** statement in which the function is running.
+*/
+SQLITE_API int sqlite3_create_function(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*)
+);
+SQLITE_API int sqlite3_create_function16(
+ sqlite3 *db,
+ const void *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*)
+);
+SQLITE_API int sqlite3_create_function_v2(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void(*xDestroy)(void*)
+);
+SQLITE_API int sqlite3_create_window_function(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
+ void(*xDestroy)(void*)
+);
+
+/*
+** CAPI3REF: Text Encodings
+**
+** These constant define integer codes that represent the various
+** text encodings supported by SQLite.
+*/
+#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */
+#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */
+#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */
+#define SQLITE_UTF16 4 /* Use native byte order */
+#define SQLITE_ANY 5 /* Deprecated */
+#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */
+
+/*
+** CAPI3REF: Function Flags
+**
+** These constants may be ORed together with the
+** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
+** to [sqlite3_create_function()], [sqlite3_create_function16()], or
+** [sqlite3_create_function_v2()].
+**
+** <dl>
+** [[SQLITE_DETERMINISTIC]] <dt>SQLITE_DETERMINISTIC</dt><dd>
+** The SQLITE_DETERMINISTIC flag means that the new function always gives
+** the same output when the input parameters are the same.
+** The [abs|abs() function] is deterministic, for example, but
+** [randomblob|randomblob()] is not. Functions must
+** be deterministic in order to be used in certain contexts such as
+** with the WHERE clause of [partial indexes] or in [generated columns].
+** SQLite might also optimize deterministic functions by factoring them
+** out of inner loops.
+** </dd>
+**
+** [[SQLITE_DIRECTONLY]] <dt>SQLITE_DIRECTONLY</dt><dd>
+** The SQLITE_DIRECTONLY flag means that the function may only be invoked
+** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in
+** schema structures such as [CHECK constraints], [DEFAULT clauses],
+** [expression indexes], [partial indexes], or [generated columns].
+** The SQLITE_DIRECTONLY flags is a security feature which is recommended
+** for all [application-defined SQL functions], and especially for functions
+** that have side-effects or that could potentially leak sensitive
+** information.
+** </dd>
+**
+** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>
+** The SQLITE_INNOCUOUS flag means that the function is unlikely
+** to cause problems even if misused. An innocuous function should have
+** no side effects and should not depend on any values other than its
+** input parameters. The [abs|abs() function] is an example of an
+** innocuous function.
+** The [load_extension() SQL function] is not innocuous because of its
+** side effects.
+** <p> SQLITE_INNOCUOUS is similar to SQLITE_DETERMINISTIC, but is not
+** exactly the same. The [random|random() function] is an example of a
+** function that is innocuous but not deterministic.
+** <p>Some heightened security settings
+** ([SQLITE_DBCONFIG_TRUSTED_SCHEMA] and [PRAGMA trusted_schema=OFF])
+** disable the use of SQL functions inside views and triggers and in
+** schema structures such as [CHECK constraints], [DEFAULT clauses],
+** [expression indexes], [partial indexes], and [generated columns] unless
+** the function is tagged with SQLITE_INNOCUOUS. Most built-in functions
+** are innocuous. Developers are advised to avoid using the
+** SQLITE_INNOCUOUS flag for application-defined functions unless the
+** function has been carefully audited and found to be free of potentially
+** security-adverse side-effects and information-leaks.
+** </dd>
+**
+** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
+** Specifying this flag makes no difference for scalar or aggregate user
+** functions. However, if it is not specified for a user-defined window
+** function, then any sub-types belonging to arguments passed to the window
+** function may be discarded before the window function is called (i.e.
+** sqlite3_value_subtype() will always return 0).
+** </dd>
+** </dl>
+*/
+#define SQLITE_DETERMINISTIC 0x000000800
+#define SQLITE_DIRECTONLY 0x000080000
+#define SQLITE_SUBTYPE 0x000100000
+#define SQLITE_INNOCUOUS 0x000200000
+
+/*
+** CAPI3REF: Deprecated Functions
+** DEPRECATED
+**
+** These functions are [deprecated]. In order to maintain
+** backwards compatibility with older code, these functions continue
+** to be supported. However, new applications should avoid
+** the use of these functions. To encourage programmers to avoid
+** these functions, we will not explain what they do.
+*/
+#ifndef SQLITE_OMIT_DEPRECATED
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+ void*,sqlite3_int64);
+#endif
+
+/*
+** CAPI3REF: Obtaining SQL Values
+** METHOD: sqlite3_value
+**
+** <b>Summary:</b>
+** <blockquote><table border=0 cellpadding=0 cellspacing=0>
+** <tr><td><b>sqlite3_value_blob</b><td>&rarr;<td>BLOB value
+** <tr><td><b>sqlite3_value_double</b><td>&rarr;<td>REAL value
+** <tr><td><b>sqlite3_value_int</b><td>&rarr;<td>32-bit INTEGER value
+** <tr><td><b>sqlite3_value_int64</b><td>&rarr;<td>64-bit INTEGER value
+** <tr><td><b>sqlite3_value_pointer</b><td>&rarr;<td>Pointer value
+** <tr><td><b>sqlite3_value_text</b><td>&rarr;<td>UTF-8 TEXT value
+** <tr><td><b>sqlite3_value_text16</b><td>&rarr;<td>UTF-16 TEXT value in
+** the native byteorder
+** <tr><td><b>sqlite3_value_text16be</b><td>&rarr;<td>UTF-16be TEXT value
+** <tr><td><b>sqlite3_value_text16le</b><td>&rarr;<td>UTF-16le TEXT value
+** <tr><td>&nbsp;<td>&nbsp;<td>&nbsp;
+** <tr><td><b>sqlite3_value_bytes</b><td>&rarr;<td>Size of a BLOB
+** or a UTF-8 TEXT in bytes
+** <tr><td><b>sqlite3_value_bytes16&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Size of UTF-16
+** TEXT in bytes
+** <tr><td><b>sqlite3_value_type</b><td>&rarr;<td>Default
+** datatype of the value
+** <tr><td><b>sqlite3_value_numeric_type&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Best numeric datatype of the value
+** <tr><td><b>sqlite3_value_nochange&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>True if the column is unchanged in an UPDATE
+** against a virtual table.
+** <tr><td><b>sqlite3_value_frombind&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>True if value originated from a [bound parameter]
+** </table></blockquote>
+**
+** <b>Details:</b>
+**
+** These routines extract type, size, and content information from
+** [protected sqlite3_value] objects. Protected sqlite3_value objects
+** are used to pass parameter information into the functions that
+** implement [application-defined SQL functions] and [virtual tables].
+**
+** These routines work only with [protected sqlite3_value] objects.
+** Any attempt to use these routines on an [unprotected sqlite3_value]
+** is not threadsafe.
+**
+** ^These routines work just like the corresponding [column access functions]
+** except that these routines take a single [protected sqlite3_value] object
+** pointer instead of a [sqlite3_stmt*] pointer and an integer column number.
+**
+** ^The sqlite3_value_text16() interface extracts a UTF-16 string
+** in the native byte-order of the host machine. ^The
+** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces
+** extract UTF-16 strings as big-endian and little-endian respectively.
+**
+** ^If [sqlite3_value] object V was initialized
+** using [sqlite3_bind_pointer(S,I,P,X,D)] or [sqlite3_result_pointer(C,P,X,D)]
+** and if X and Y are strings that compare equal according to strcmp(X,Y),
+** then sqlite3_value_pointer(V,Y) will return the pointer P. ^Otherwise,
+** sqlite3_value_pointer(V,Y) returns a NULL. The sqlite3_bind_pointer()
+** routine is part of the [pointer passing interface] added for SQLite 3.20.0.
+**
+** ^(The sqlite3_value_type(V) interface returns the
+** [SQLITE_INTEGER | datatype code] for the initial datatype of the
+** [sqlite3_value] object V. The returned value is one of [SQLITE_INTEGER],
+** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL].)^
+** Other interfaces might change the datatype for an sqlite3_value object.
+** For example, if the datatype is initially SQLITE_INTEGER and
+** sqlite3_value_text(V) is called to extract a text value for that
+** integer, then subsequent calls to sqlite3_value_type(V) might return
+** SQLITE_TEXT. Whether or not a persistent internal datatype conversion
+** occurs is undefined and may change from one release of SQLite to the next.
+**
+** ^(The sqlite3_value_numeric_type() interface attempts to apply
+** numeric affinity to the value. This means that an attempt is
+** made to convert the value to an integer or floating point. If
+** such a conversion is possible without loss of information (in other
+** words, if the value is a string that looks like a number)
+** then the conversion is performed. Otherwise no conversion occurs.
+** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
+**
+** ^Within the [xUpdate] method of a [virtual table], the
+** sqlite3_value_nochange(X) interface returns true if and only if
+** the column corresponding to X is unchanged by the UPDATE operation
+** that the xUpdate method call was invoked to implement and if
+** and the prior [xColumn] method call that was invoked to extracted
+** the value for that column returned without setting a result (probably
+** because it queried [sqlite3_vtab_nochange()] and found that the column
+** was unchanging). ^Within an [xUpdate] method, any value for which
+** sqlite3_value_nochange(X) is true will in all other respects appear
+** to be a NULL value. If sqlite3_value_nochange(X) is invoked anywhere other
+** than within an [xUpdate] method call for an UPDATE statement, then
+** the return value is arbitrary and meaningless.
+**
+** ^The sqlite3_value_frombind(X) interface returns non-zero if the
+** value X originated from one of the [sqlite3_bind_int|sqlite3_bind()]
+** interfaces. ^If X comes from an SQL literal value, or a table column,
+** or an expression, then sqlite3_value_frombind(X) returns zero.
+**
+** Please pay particular attention to the fact that the pointer returned
+** from [sqlite3_value_blob()], [sqlite3_value_text()], or
+** [sqlite3_value_text16()] can be invalidated by a subsequent call to
+** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()],
+** or [sqlite3_value_text16()].
+**
+** These routines must be called from the same thread as
+** the SQL function that supplied the [sqlite3_value*] parameters.
+**
+** As long as the input parameter is correct, these routines can only
+** fail if an out-of-memory error occurs during a format conversion.
+** Only the following subset of interfaces are subject to out-of-memory
+** errors:
+**
+** <ul>
+** <li> sqlite3_value_blob()
+** <li> sqlite3_value_text()
+** <li> sqlite3_value_text16()
+** <li> sqlite3_value_text16le()
+** <li> sqlite3_value_text16be()
+** <li> sqlite3_value_bytes()
+** <li> sqlite3_value_bytes16()
+** </ul>
+**
+** If an out-of-memory error occurs, then the return value from these
+** routines is the same as if the column had contained an SQL NULL value.
+** Valid SQL NULL returns can be distinguished from out-of-memory errors
+** by invoking the [sqlite3_errcode()] immediately after the suspect
+** return value is obtained and before any
+** other SQLite interface is called on the same [database connection].
+*/
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API void *sqlite3_value_pointer(sqlite3_value*, const char*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
+SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
+
+/*
+** CAPI3REF: Finding The Subtype Of SQL Values
+** METHOD: sqlite3_value
+**
+** The sqlite3_value_subtype(V) function returns the subtype for
+** an [application-defined SQL function] argument V. The subtype
+** information can be used to pass a limited amount of context from
+** one SQL function to another. Use the [sqlite3_result_subtype()]
+** routine to set the subtype for the return value of an SQL function.
+*/
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
+
+/*
+** CAPI3REF: Copy And Free SQL Values
+** METHOD: sqlite3_value
+**
+** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
+** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
+** is a [protected sqlite3_value] object even if the input is not.
+** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
+** memory allocation fails.
+**
+** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
+** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
+** then sqlite3_value_free(V) is a harmless no-op.
+*/
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*);
+SQLITE_API void sqlite3_value_free(sqlite3_value*);
+
+/*
+** CAPI3REF: Obtain Aggregate Function Context
+** METHOD: sqlite3_context
+**
+** Implementations of aggregate SQL functions use this
+** routine to allocate memory for storing their state.
+**
+** ^The first time the sqlite3_aggregate_context(C,N) routine is called
+** for a particular aggregate function, SQLite allocates
+** N bytes of memory, zeroes out that memory, and returns a pointer
+** to the new memory. ^On second and subsequent calls to
+** sqlite3_aggregate_context() for the same aggregate function instance,
+** the same buffer is returned. Sqlite3_aggregate_context() is normally
+** called once for each invocation of the xStep callback and then one
+** last time when the xFinal callback is invoked. ^(When no rows match
+** an aggregate query, the xStep() callback of the aggregate function
+** implementation is never called and xFinal() is called exactly once.
+** In those cases, sqlite3_aggregate_context() might be called for the
+** first time from within xFinal().)^
+**
+** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer
+** when first called if N is less than or equal to zero or if a memory
+** allocate error occurs.
+**
+** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is
+** determined by the N parameter on first successful call. Changing the
+** value of N in any subsequent call to sqlite3_aggregate_context() within
+** the same aggregate function instance will not resize the memory
+** allocation.)^ Within the xFinal callback, it is customary to set
+** N=0 in calls to sqlite3_aggregate_context(C,N) so that no
+** pointless memory allocations occur.
+**
+** ^SQLite automatically frees the memory allocated by
+** sqlite3_aggregate_context() when the aggregate query concludes.
+**
+** The first parameter must be a copy of the
+** [sqlite3_context | SQL function context] that is the first parameter
+** to the xStep or xFinal callback routine that implements the aggregate
+** function.
+**
+** This routine must be called from the same thread in which
+** the aggregate SQL function is running.
+*/
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+
+/*
+** CAPI3REF: User Data For Functions
+** METHOD: sqlite3_context
+**
+** ^The sqlite3_user_data() interface returns a copy of
+** the pointer that was the pUserData parameter (the 5th parameter)
+** of the [sqlite3_create_function()]
+** and [sqlite3_create_function16()] routines that originally
+** registered the application defined function.
+**
+** This routine must be called from the same thread in which
+** the application-defined function is running.
+*/
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
+
+/*
+** CAPI3REF: Database Connection For Functions
+** METHOD: sqlite3_context
+**
+** ^The sqlite3_context_db_handle() interface returns a copy of
+** the pointer to the [database connection] (the 1st parameter)
+** of the [sqlite3_create_function()]
+** and [sqlite3_create_function16()] routines that originally
+** registered the application defined function.
+*/
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
+
+/*
+** CAPI3REF: Function Auxiliary Data
+** METHOD: sqlite3_context
+**
+** These functions may be used by (non-aggregate) SQL functions to
+** associate metadata with argument values. If the same value is passed to
+** multiple invocations of the same SQL function during query execution, under
+** some circumstances the associated metadata may be preserved. An example
+** of where this might be useful is in a regular-expression matching
+** function. The compiled version of the regular expression can be stored as
+** metadata associated with the pattern string.
+** Then as long as the pattern string remains the same,
+** the compiled regular expression can be reused on multiple
+** invocations of the same function.
+**
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
+** value to the application-defined function. ^N is zero for the left-most
+** function argument. ^If there is no metadata
+** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
+** returns a NULL pointer.
+**
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
+** argument of the application-defined function. ^Subsequent
+** calls to sqlite3_get_auxdata(C,N) return P from the most recent
+** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
+** NULL if the metadata has been discarded.
+** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
+** SQLite will invoke the destructor function X with parameter P exactly
+** once, when the metadata is discarded.
+** SQLite is free to discard the metadata at any time, including: <ul>
+** <li> ^(when the corresponding function parameter changes)^, or
+** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
+** SQL statement)^, or
+** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
+** parameter)^, or
+** <li> ^(during the original sqlite3_set_auxdata() call when a memory
+** allocation error occurs.)^ </ul>
+**
+** Note the last bullet in particular. The destructor X in
+** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
+** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
+** should be called near the end of the function implementation and the
+** function implementation should not make any use of P after
+** sqlite3_set_auxdata() has been called.
+**
+** ^(In practice, metadata is preserved between function calls for
+** function parameters that are compile-time constants, including literal
+** values and [parameters] and expressions composed from the same.)^
+**
+** The value of the N parameter to these interfaces should be non-negative.
+** Future enhancements may make use of negative N values to define new
+** kinds of function caching behavior.
+**
+** These routines must be called from the same thread in which
+** the SQL function is running.
+*/
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+
+
+/*
+** CAPI3REF: Constants Defining Special Destructor Behavior
+**
+** These are special values for the destructor that is passed in as the
+** final argument to routines like [sqlite3_result_blob()]. ^If the destructor
+** argument is SQLITE_STATIC, it means that the content pointer is constant
+** and will never change. It does not need to be destroyed. ^The
+** SQLITE_TRANSIENT value means that the content will likely change in
+** the near future and that SQLite should make its own private copy of
+** the content before returning.
+**
+** The typedef is necessary to work around problems in certain
+** C++ compilers.
+*/
+typedef void (*sqlite3_destructor_type)(void*);
+#define SQLITE_STATIC ((sqlite3_destructor_type)0)
+#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1)
+
+/*
+** CAPI3REF: Setting The Result Of An SQL Function
+** METHOD: sqlite3_context
+**
+** These routines are used by the xFunc or xFinal callbacks that
+** implement SQL functions and aggregates. See
+** [sqlite3_create_function()] and [sqlite3_create_function16()]
+** for additional information.
+**
+** These functions work very much like the [parameter binding] family of
+** functions used to bind values to host parameters in prepared statements.
+** Refer to the [SQL parameter] documentation for additional information.
+**
+** ^The sqlite3_result_blob() interface sets the result from
+** an application-defined function to be the BLOB whose content is pointed
+** to by the second parameter and which is N bytes long where N is the
+** third parameter.
+**
+** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
+** interfaces set the result of the application-defined function to be
+** a BLOB containing all zero bytes and N bytes in size.
+**
+** ^The sqlite3_result_double() interface sets the result from
+** an application-defined function to be a floating point value specified
+** by its 2nd argument.
+**
+** ^The sqlite3_result_error() and sqlite3_result_error16() functions
+** cause the implemented SQL function to throw an exception.
+** ^SQLite uses the string pointed to by the
+** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16()
+** as the text of an error message. ^SQLite interprets the error
+** message string from sqlite3_result_error() as UTF-8. ^SQLite
+** interprets the string from sqlite3_result_error16() as UTF-16 using
+** the same [byte-order determination rules] as [sqlite3_bind_text16()].
+** ^If the third parameter to sqlite3_result_error()
+** or sqlite3_result_error16() is negative then SQLite takes as the error
+** message all text up through the first zero character.
+** ^If the third parameter to sqlite3_result_error() or
+** sqlite3_result_error16() is non-negative then SQLite takes that many
+** bytes (not characters) from the 2nd parameter as the error message.
+** ^The sqlite3_result_error() and sqlite3_result_error16()
+** routines make a private copy of the error message text before
+** they return. Hence, the calling function can deallocate or
+** modify the text after they return without harm.
+** ^The sqlite3_result_error_code() function changes the error code
+** returned by SQLite as a result of an error in a function. ^By default,
+** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error()
+** or sqlite3_result_error16() resets the error code to SQLITE_ERROR.
+**
+** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an
+** error indicating that a string or BLOB is too long to represent.
+**
+** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an
+** error indicating that a memory allocation failed.
+**
+** ^The sqlite3_result_int() interface sets the return value
+** of the application-defined function to be the 32-bit signed integer
+** value given in the 2nd argument.
+** ^The sqlite3_result_int64() interface sets the return value
+** of the application-defined function to be the 64-bit signed integer
+** value given in the 2nd argument.
+**
+** ^The sqlite3_result_null() interface sets the return value
+** of the application-defined function to be NULL.
+**
+** ^The sqlite3_result_text(), sqlite3_result_text16(),
+** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces
+** set the return value of the application-defined function to be
+** a text string which is represented as UTF-8, UTF-16 native byte order,
+** UTF-16 little endian, or UTF-16 big endian, respectively.
+** ^The sqlite3_result_text64() interface sets the return value of an
+** application-defined function to be a text string in an encoding
+** specified by the fifth (and last) parameter, which must be one
+** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
+** ^SQLite takes the text result from the application from
+** the 2nd parameter of the sqlite3_result_text* interfaces.
+** ^If the 3rd parameter to the sqlite3_result_text* interfaces
+** is negative, then SQLite takes result text from the 2nd parameter
+** through the first zero character.
+** ^If the 3rd parameter to the sqlite3_result_text* interfaces
+** is non-negative, then as many bytes (not characters) of the text
+** pointed to by the 2nd parameter are taken as the application-defined
+** function result. If the 3rd parameter is non-negative, then it
+** must be the byte offset into the string where the NUL terminator would
+** appear if the string where NUL terminated. If any NUL characters occur
+** in the string at a byte offset that is less than the value of the 3rd
+** parameter, then the resulting string will contain embedded NULs and the
+** result of expressions operating on strings with embedded NULs is undefined.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces
+** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that
+** function as the destructor on the text or BLOB result when it has
+** finished using that result.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces or to
+** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite
+** assumes that the text or BLOB result is in constant space and does not
+** copy the content of the parameter nor call a destructor on the content
+** when it has finished using that result.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces
+** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT
+** then SQLite makes a copy of the result into space obtained
+** from [sqlite3_malloc()] before it returns.
+**
+** ^For the sqlite3_result_text16(), sqlite3_result_text16le(), and
+** sqlite3_result_text16be() routines, and for sqlite3_result_text64()
+** when the encoding is not UTF8, if the input UTF16 begins with a
+** byte-order mark (BOM, U+FEFF) then the BOM is removed from the
+** string and the rest of the string is interpreted according to the
+** byte-order specified by the BOM. ^The byte-order specified by
+** the BOM at the beginning of the text overrides the byte-order
+** specified by the interface procedure. ^So, for example, if
+** sqlite3_result_text16le() is invoked with text that begins
+** with bytes 0xfe, 0xff (a big-endian byte-order mark) then the
+** first two bytes of input are skipped and the remaining input
+** is interpreted as UTF16BE text.
+**
+** ^For UTF16 input text to the sqlite3_result_text16(),
+** sqlite3_result_text16be(), sqlite3_result_text16le(), and
+** sqlite3_result_text64() routines, if the text contains invalid
+** UTF16 characters, the invalid characters might be converted
+** into the unicode replacement character, U+FFFD.
+**
+** ^The sqlite3_result_value() interface sets the result of
+** the application-defined function to be a copy of the
+** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The
+** sqlite3_result_value() interface makes a copy of the [sqlite3_value]
+** so that the [sqlite3_value] specified in the parameter may change or
+** be deallocated after sqlite3_result_value() returns without harm.
+** ^A [protected sqlite3_value] object may always be used where an
+** [unprotected sqlite3_value] object is required, so either
+** kind of [sqlite3_value] object can be used with this interface.
+**
+** ^The sqlite3_result_pointer(C,P,T,D) interface sets the result to an
+** SQL NULL value, just like [sqlite3_result_null(C)], except that it
+** also associates the host-language pointer P or type T with that
+** NULL value such that the pointer can be retrieved within an
+** [application-defined SQL function] using [sqlite3_value_pointer()].
+** ^If the D parameter is not NULL, then it is a pointer to a destructor
+** for the P parameter. ^SQLite invokes D with P as its only argument
+** when SQLite is finished with P. The T parameter should be a static
+** string and preferably a string literal. The sqlite3_result_pointer()
+** routine is part of the [pointer passing interface] added for SQLite 3.20.0.
+**
+** If these routines are called from within the different thread
+** than the one containing the application-defined function that received
+** the [sqlite3_context] pointer, the results are undefined.
+*/
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*,
+ sqlite3_uint64,void(*)(void*));
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
+ void(*)(void*), unsigned char encoding);
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_pointer(sqlite3_context*, void*,const char*,void(*)(void*));
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
+
+
+/*
+** CAPI3REF: Setting The Subtype Of An SQL Function
+** METHOD: sqlite3_context
+**
+** The sqlite3_result_subtype(C,T) function causes the subtype of
+** the result from the [application-defined SQL function] with
+** [sqlite3_context] C to be the value T. Only the lower 8 bits
+** of the subtype T are preserved in current versions of SQLite;
+** higher order bits are discarded.
+** The number of subtype bytes preserved by SQLite might increase
+** in future releases of SQLite.
+*/
+SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
+
+/*
+** CAPI3REF: Define New Collating Sequences
+** METHOD: sqlite3
+**
+** ^These functions add, remove, or modify a [collation] associated
+** with the [database connection] specified as the first argument.
+**
+** ^The name of the collation is a UTF-8 string
+** for sqlite3_create_collation() and sqlite3_create_collation_v2()
+** and a UTF-16 string in native byte order for sqlite3_create_collation16().
+** ^Collation names that compare equal according to [sqlite3_strnicmp()] are
+** considered to be the same name.
+**
+** ^(The third argument (eTextRep) must be one of the constants:
+** <ul>
+** <li> [SQLITE_UTF8],
+** <li> [SQLITE_UTF16LE],
+** <li> [SQLITE_UTF16BE],
+** <li> [SQLITE_UTF16], or
+** <li> [SQLITE_UTF16_ALIGNED].
+** </ul>)^
+** ^The eTextRep argument determines the encoding of strings passed
+** to the collating function callback, xCompare.
+** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep
+** force strings to be UTF16 with native byte order.
+** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin
+** on an even byte address.
+**
+** ^The fourth argument, pArg, is an application data pointer that is passed
+** through as the first argument to the collating function callback.
+**
+** ^The fifth argument, xCompare, is a pointer to the collating function.
+** ^Multiple collating functions can be registered using the same name but
+** with different eTextRep parameters and SQLite will use whichever
+** function requires the least amount of data transformation.
+** ^If the xCompare argument is NULL then the collating function is
+** deleted. ^When all collating functions having the same name are deleted,
+** that collation is no longer usable.
+**
+** ^The collating function callback is invoked with a copy of the pArg
+** application data pointer and with two strings in the encoding specified
+** by the eTextRep argument. The two integer parameters to the collating
+** function callback are the length of the two strings, in bytes. The collating
+** function must return an integer that is negative, zero, or positive
+** if the first string is less than, equal to, or greater than the second,
+** respectively. A collating function must always return the same answer
+** given the same inputs. If two or more collating functions are registered
+** to the same collation name (using different eTextRep values) then all
+** must give an equivalent answer when invoked with equivalent strings.
+** The collating function must obey the following properties for all
+** strings A, B, and C:
+**
+** <ol>
+** <li> If A==B then B==A.
+** <li> If A==B and B==C then A==C.
+** <li> If A&lt;B THEN B&gt;A.
+** <li> If A&lt;B and B&lt;C then A&lt;C.
+** </ol>
+**
+** If a collating function fails any of the above constraints and that
+** collating function is registered and used, then the behavior of SQLite
+** is undefined.
+**
+** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation()
+** with the addition that the xDestroy callback is invoked on pArg when
+** the collating function is deleted.
+** ^Collating functions are deleted when they are overridden by later
+** calls to the collation creation functions or when the
+** [database connection] is closed using [sqlite3_close()].
+**
+** ^The xDestroy callback is <u>not</u> called if the
+** sqlite3_create_collation_v2() function fails. Applications that invoke
+** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should
+** check the return code and dispose of the application data pointer
+** themselves rather than expecting SQLite to deal with it for them.
+** This is different from every other SQLite interface. The inconsistency
+** is unfortunate but cannot be changed without breaking backwards
+** compatibility.
+**
+** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
+*/
+SQLITE_API int sqlite3_create_collation(
+ sqlite3*,
+ const char *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+);
+SQLITE_API int sqlite3_create_collation_v2(
+ sqlite3*,
+ const char *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*),
+ void(*xDestroy)(void*)
+);
+SQLITE_API int sqlite3_create_collation16(
+ sqlite3*,
+ const void *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+);
+
+/*
+** CAPI3REF: Collation Needed Callbacks
+** METHOD: sqlite3
+**
+** ^To avoid having to register all collation sequences before a database
+** can be used, a single callback function may be registered with the
+** [database connection] to be invoked whenever an undefined collation
+** sequence is required.
+**
+** ^If the function is registered using the sqlite3_collation_needed() API,
+** then it is passed the names of undefined collation sequences as strings
+** encoded in UTF-8. ^If sqlite3_collation_needed16() is used,
+** the names are passed as UTF-16 in machine native byte order.
+** ^A call to either function replaces the existing collation-needed callback.
+**
+** ^(When the callback is invoked, the first argument passed is a copy
+** of the second argument to sqlite3_collation_needed() or
+** sqlite3_collation_needed16(). The second argument is the database
+** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE],
+** or [SQLITE_UTF16LE], indicating the most desirable form of the collation
+** sequence function required. The fourth parameter is the name of the
+** required collation sequence.)^
+**
+** The callback function should register the desired collation using
+** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
+** [sqlite3_create_collation_v2()].
+*/
+SQLITE_API int sqlite3_collation_needed(
+ sqlite3*,
+ void*,
+ void(*)(void*,sqlite3*,int eTextRep,const char*)
+);
+SQLITE_API int sqlite3_collation_needed16(
+ sqlite3*,
+ void*,
+ void(*)(void*,sqlite3*,int eTextRep,const void*)
+);
+
+#ifdef SQLITE_ENABLE_CEROD
+/*
+** Specify the activation key for a CEROD database. Unless
+** activated, none of the CEROD routines will work.
+*/
+SQLITE_API void sqlite3_activate_cerod(
+ const char *zPassPhrase /* Activation phrase */
+);
+#endif
+
+/*
+** CAPI3REF: Suspend Execution For A Short Time
+**
+** The sqlite3_sleep() function causes the current thread to suspend execution
+** for at least a number of milliseconds specified in its parameter.
+**
+** If the operating system does not support sleep requests with
+** millisecond time resolution, then the time will be rounded up to
+** the nearest second. The number of milliseconds of sleep actually
+** requested from the operating system is returned.
+**
+** ^SQLite implements this interface by calling the xSleep()
+** method of the default [sqlite3_vfs] object. If the xSleep() method
+** of the default VFS is not implemented correctly, or not implemented at
+** all, then the behavior of sqlite3_sleep() may deviate from the description
+** in the previous paragraphs.
+*/
+SQLITE_API int sqlite3_sleep(int);
+
+/*
+** CAPI3REF: Name Of The Folder Holding Temporary Files
+**
+** ^(If this global variable is made to point to a string which is
+** the name of a folder (a.k.a. directory), then all temporary files
+** created by SQLite when using a built-in [sqlite3_vfs | VFS]
+** will be placed in that directory.)^ ^If this variable
+** is a NULL pointer, then SQLite performs a search for an appropriate
+** temporary file directory.
+**
+** Applications are strongly discouraged from using this global variable.
+** It is required to set a temporary folder on Windows Runtime (WinRT).
+** But for all other platforms, it is highly recommended that applications
+** neither read nor write this variable. This global variable is a relic
+** that exists for backwards compatibility of legacy applications and should
+** be avoided in new projects.
+**
+** It is not safe to read or modify this variable in more than one
+** thread at a time. It is not safe to read or modify this variable
+** if a [database connection] is being used at the same time in a separate
+** thread.
+** It is intended that this variable be set once
+** as part of process initialization and before any SQLite interface
+** routines have been called and that this variable remain unchanged
+** thereafter.
+**
+** ^The [temp_store_directory pragma] may modify this variable and cause
+** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
+** the [temp_store_directory pragma] always assumes that any string
+** that this variable points to is held in memory obtained from
+** [sqlite3_malloc] and the pragma may attempt to free that memory
+** using [sqlite3_free].
+** Hence, if this variable is modified directly, either it should be
+** made NULL or made to point to memory obtained from [sqlite3_malloc]
+** or else the use of the [temp_store_directory pragma] should be avoided.
+** Except when requested by the [temp_store_directory pragma], SQLite
+** does not free the memory that sqlite3_temp_directory points to. If
+** the application wants that memory to be freed, it must do
+** so itself, taking care to only do so after all [database connection]
+** objects have been destroyed.
+**
+** <b>Note to Windows Runtime users:</b> The temporary directory must be set
+** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various
+** features that require the use of temporary files may fail. Here is an
+** example of how to do this using C++ with the Windows Runtime:
+**
+** <blockquote><pre>
+** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
+** &nbsp; TemporaryFolder->Path->Data();
+** char zPathBuf&#91;MAX_PATH + 1&#93;;
+** memset(zPathBuf, 0, sizeof(zPathBuf));
+** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
+** &nbsp; NULL, NULL);
+** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
+** </pre></blockquote>
+*/
+SQLITE_API char *sqlite3_temp_directory;
+
+/*
+** CAPI3REF: Name Of The Folder Holding Database Files
+**
+** ^(If this global variable is made to point to a string which is
+** the name of a folder (a.k.a. directory), then all database files
+** specified with a relative pathname and created or accessed by
+** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed
+** to be relative to that directory.)^ ^If this variable is a NULL
+** pointer, then SQLite assumes that all database files specified
+** with a relative pathname are relative to the current directory
+** for the process. Only the windows VFS makes use of this global
+** variable; it is ignored by the unix VFS.
+**
+** Changing the value of this variable while a database connection is
+** open can result in a corrupt database.
+**
+** It is not safe to read or modify this variable in more than one
+** thread at a time. It is not safe to read or modify this variable
+** if a [database connection] is being used at the same time in a separate
+** thread.
+** It is intended that this variable be set once
+** as part of process initialization and before any SQLite interface
+** routines have been called and that this variable remain unchanged
+** thereafter.
+**
+** ^The [data_store_directory pragma] may modify this variable and cause
+** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
+** the [data_store_directory pragma] always assumes that any string
+** that this variable points to is held in memory obtained from
+** [sqlite3_malloc] and the pragma may attempt to free that memory
+** using [sqlite3_free].
+** Hence, if this variable is modified directly, either it should be
+** made NULL or made to point to memory obtained from [sqlite3_malloc]
+** or else the use of the [data_store_directory pragma] should be avoided.
+*/
+SQLITE_API char *sqlite3_data_directory;
+
+/*
+** CAPI3REF: Win32 Specific Interface
+**
+** These interfaces are available only on Windows. The
+** [sqlite3_win32_set_directory] interface is used to set the value associated
+** with the [sqlite3_temp_directory] or [sqlite3_data_directory] variable, to
+** zValue, depending on the value of the type parameter. The zValue parameter
+** should be NULL to cause the previous value to be freed via [sqlite3_free];
+** a non-NULL value will be copied into memory obtained from [sqlite3_malloc]
+** prior to being used. The [sqlite3_win32_set_directory] interface returns
+** [SQLITE_OK] to indicate success, [SQLITE_ERROR] if the type is unsupported,
+** or [SQLITE_NOMEM] if memory could not be allocated. The value of the
+** [sqlite3_data_directory] variable is intended to act as a replacement for
+** the current directory on the sub-platforms of Win32 where that concept is
+** not present, e.g. WinRT and UWP. The [sqlite3_win32_set_directory8] and
+** [sqlite3_win32_set_directory16] interfaces behave exactly the same as the
+** sqlite3_win32_set_directory interface except the string parameter must be
+** UTF-8 or UTF-16, respectively.
+*/
+SQLITE_API int sqlite3_win32_set_directory(
+ unsigned long type, /* Identifier for directory being set or reset */
+ void *zValue /* New value for directory being set or reset */
+);
+SQLITE_API int sqlite3_win32_set_directory8(unsigned long type, const char *zValue);
+SQLITE_API int sqlite3_win32_set_directory16(unsigned long type, const void *zValue);
+
+/*
+** CAPI3REF: Win32 Directory Types
+**
+** These macros are only available on Windows. They define the allowed values
+** for the type argument to the [sqlite3_win32_set_directory] interface.
+*/
+#define SQLITE_WIN32_DATA_DIRECTORY_TYPE 1
+#define SQLITE_WIN32_TEMP_DIRECTORY_TYPE 2
+
+/*
+** CAPI3REF: Test For Auto-Commit Mode
+** KEYWORDS: {autocommit mode}
+** METHOD: sqlite3
+**
+** ^The sqlite3_get_autocommit() interface returns non-zero or
+** zero if the given database connection is or is not in autocommit mode,
+** respectively. ^Autocommit mode is on by default.
+** ^Autocommit mode is disabled by a [BEGIN] statement.
+** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK].
+**
+** If certain kinds of errors occur on a statement within a multi-statement
+** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR],
+** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the
+** transaction might be rolled back automatically. The only way to
+** find out whether SQLite automatically rolled back the transaction after
+** an error is to use this function.
+**
+** If another thread changes the autocommit status of the database
+** connection while this routine is running, then the return value
+** is undefined.
+*/
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
+
+/*
+** CAPI3REF: Find The Database Handle Of A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_db_handle interface returns the [database connection] handle
+** to which a [prepared statement] belongs. ^The [database connection]
+** returned by sqlite3_db_handle is the same [database connection]
+** that was the first argument
+** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
+** create the statement in the first place.
+*/
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Return The Filename For A Database Connection
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_filename(D,N) interface returns a pointer to the filename
+** associated with database N of connection D.
+** ^If there is no attached database N on the database
+** connection D, or if database N is a temporary or in-memory database, then
+** this function will return either a NULL pointer or an empty string.
+**
+** ^The string value returned by this routine is owned and managed by
+** the database connection. ^The value will be valid until the database N
+** is [DETACH]-ed or until the database connection closes.
+**
+** ^The filename returned by this function is the output of the
+** xFullPathname method of the [VFS]. ^In other words, the filename
+** will be an absolute pathname, even if the filename used
+** to open the database originally was a URI or relative pathname.
+**
+** If the filename pointer returned by this routine is not NULL, then it
+** can be used as the filename input parameter to these routines:
+** <ul>
+** <li> [sqlite3_uri_parameter()]
+** <li> [sqlite3_uri_boolean()]
+** <li> [sqlite3_uri_int64()]
+** <li> [sqlite3_filename_database()]
+** <li> [sqlite3_filename_journal()]
+** <li> [sqlite3_filename_wal()]
+** </ul>
+*/
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+
+/*
+** CAPI3REF: Determine if a database is read-only
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N
+** of connection D is read-only, 0 if it is read/write, or -1 if N is not
+** the name of a database on connection D.
+*/
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+
+/*
+** CAPI3REF: Find the next prepared statement
+** METHOD: sqlite3
+**
+** ^This interface returns a pointer to the next [prepared statement] after
+** pStmt associated with the [database connection] pDb. ^If pStmt is NULL
+** then this interface returns a pointer to the first prepared statement
+** associated with the database connection pDb. ^If no prepared statement
+** satisfies the conditions of this routine, it returns NULL.
+**
+** The [database connection] pointer D in a call to
+** [sqlite3_next_stmt(D,S)] must refer to an open database
+** connection and in particular must not be a NULL pointer.
+*/
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Commit And Rollback Notification Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_commit_hook() interface registers a callback
+** function to be invoked whenever a transaction is [COMMIT | committed].
+** ^Any callback set by a previous call to sqlite3_commit_hook()
+** for the same database connection is overridden.
+** ^The sqlite3_rollback_hook() interface registers a callback
+** function to be invoked whenever a transaction is [ROLLBACK | rolled back].
+** ^Any callback set by a previous call to sqlite3_rollback_hook()
+** for the same database connection is overridden.
+** ^The pArg argument is passed through to the callback.
+** ^If the callback on a commit hook function returns non-zero,
+** then the commit is converted into a rollback.
+**
+** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions
+** return the P argument from the previous call of the same function
+** on the same [database connection] D, or NULL for
+** the first call for each function on D.
+**
+** The commit and rollback hook callbacks are not reentrant.
+** The callback implementation must not do anything that will modify
+** the database connection that invoked the callback. Any actions
+** to modify the database connection must be deferred until after the
+** completion of the [sqlite3_step()] call that triggered the commit
+** or rollback hook in the first place.
+** Note that running any other SQL statements, including SELECT statements,
+** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify
+** the database connections for the meaning of "modify" in this paragraph.
+**
+** ^Registering a NULL function disables the callback.
+**
+** ^When the commit hook callback routine returns zero, the [COMMIT]
+** operation is allowed to continue normally. ^If the commit hook
+** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK].
+** ^The rollback hook is invoked on a rollback that results from a commit
+** hook returning non-zero, just as it would be with any other rollback.
+**
+** ^For the purposes of this API, a transaction is said to have been
+** rolled back if an explicit "ROLLBACK" statement is executed, or
+** an error or constraint causes an implicit rollback to occur.
+** ^The rollback callback is not invoked if a transaction is
+** automatically rolled back because the database connection is closed.
+**
+** See also the [sqlite3_update_hook()] interface.
+*/
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+
+/*
+** CAPI3REF: Data Change Notification Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_update_hook() interface registers a callback function
+** with the [database connection] identified by the first argument
+** to be invoked whenever a row is updated, inserted or deleted in
+** a [rowid table].
+** ^Any callback set by a previous call to this function
+** for the same database connection is overridden.
+**
+** ^The second argument is a pointer to the function to invoke when a
+** row is updated, inserted or deleted in a rowid table.
+** ^The first argument to the callback is a copy of the third argument
+** to sqlite3_update_hook().
+** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE],
+** or [SQLITE_UPDATE], depending on the operation that caused the callback
+** to be invoked.
+** ^The third and fourth arguments to the callback contain pointers to the
+** database and table name containing the affected row.
+** ^The final callback parameter is the [rowid] of the row.
+** ^In the case of an update, this is the [rowid] after the update takes place.
+**
+** ^(The update hook is not invoked when internal system tables are
+** modified (i.e. sqlite_sequence).)^
+** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified.
+**
+** ^In the current implementation, the update hook
+** is not invoked when conflicting rows are deleted because of an
+** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook
+** invoked when rows are deleted using the [truncate optimization].
+** The exceptions defined in this paragraph might change in a future
+** release of SQLite.
+**
+** The update hook implementation must not do anything that will modify
+** the database connection that invoked the update hook. Any actions
+** to modify the database connection must be deferred until after the
+** completion of the [sqlite3_step()] call that triggered the update hook.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+** ^The sqlite3_update_hook(D,C,P) function
+** returns the P argument from the previous call
+** on the same [database connection] D, or NULL for
+** the first call on D.
+**
+** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()],
+** and [sqlite3_preupdate_hook()] interfaces.
+*/
+SQLITE_API void *sqlite3_update_hook(
+ sqlite3*,
+ void(*)(void *,int ,char const *,char const *,sqlite3_int64),
+ void*
+);
+
+/*
+** CAPI3REF: Enable Or Disable Shared Pager Cache
+**
+** ^(This routine enables or disables the sharing of the database cache
+** and schema data structures between [database connection | connections]
+** to the same database. Sharing is enabled if the argument is true
+** and disabled if the argument is false.)^
+**
+** ^Cache sharing is enabled and disabled for an entire process.
+** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]).
+** In prior versions of SQLite,
+** sharing was enabled or disabled for each thread separately.
+**
+** ^(The cache sharing mode set by this interface effects all subsequent
+** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()].
+** Existing database connections continue to use the sharing mode
+** that was in effect at the time they were opened.)^
+**
+** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled
+** successfully. An [error code] is returned otherwise.)^
+**
+** ^Shared cache is disabled by default. It is recommended that it stay
+** that way. In other words, do not use this routine. This interface
+** continues to be provided for historical compatibility, but its use is
+** discouraged. Any use of shared cache is discouraged. If shared cache
+** must be used, it is recommended that shared cache only be enabled for
+** individual database connections using the [sqlite3_open_v2()] interface
+** with the [SQLITE_OPEN_SHAREDCACHE] flag.
+**
+** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0
+** and will always return SQLITE_MISUSE. On those systems,
+** shared cache mode should be enabled per-database connection via
+** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE].
+**
+** This interface is threadsafe on processors where writing a
+** 32-bit integer is atomic.
+**
+** See Also: [SQLite Shared-Cache Mode]
+*/
+SQLITE_API int sqlite3_enable_shared_cache(int);
+
+/*
+** CAPI3REF: Attempt To Free Heap Memory
+**
+** ^The sqlite3_release_memory() interface attempts to free N bytes
+** of heap memory by deallocating non-essential memory allocations
+** held by the database library. Memory used to cache database
+** pages to improve performance is an example of non-essential memory.
+** ^sqlite3_release_memory() returns the number of bytes actually freed,
+** which might be more or less than the amount requested.
+** ^The sqlite3_release_memory() routine is a no-op returning zero
+** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT].
+**
+** See also: [sqlite3_db_release_memory()]
+*/
+SQLITE_API int sqlite3_release_memory(int);
+
+/*
+** CAPI3REF: Free Memory Used By A Database Connection
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap
+** memory as possible from database connection D. Unlike the
+** [sqlite3_release_memory()] interface, this interface is in effect even
+** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is
+** omitted.
+**
+** See also: [sqlite3_release_memory()]
+*/
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
+
+/*
+** CAPI3REF: Impose A Limit On Heap Size
+**
+** These interfaces impose limits on the amount of heap memory that will be
+** by all database connections within a single process.
+**
+** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the
+** soft limit on the amount of heap memory that may be allocated by SQLite.
+** ^SQLite strives to keep heap memory utilization below the soft heap
+** limit by reducing the number of pages held in the page cache
+** as heap memory usages approaches the limit.
+** ^The soft heap limit is "soft" because even though SQLite strives to stay
+** below the limit, it will exceed the limit rather than generate
+** an [SQLITE_NOMEM] error. In other words, the soft heap limit
+** is advisory only.
+**
+** ^The sqlite3_hard_heap_limit64(N) interface sets a hard upper bound of
+** N bytes on the amount of memory that will be allocated. ^The
+** sqlite3_hard_heap_limit64(N) interface is similar to
+** sqlite3_soft_heap_limit64(N) except that memory allocations will fail
+** when the hard heap limit is reached.
+**
+** ^The return value from both sqlite3_soft_heap_limit64() and
+** sqlite3_hard_heap_limit64() is the size of
+** the heap limit prior to the call, or negative in the case of an
+** error. ^If the argument N is negative
+** then no change is made to the heap limit. Hence, the current
+** size of heap limits can be determined by invoking
+** sqlite3_soft_heap_limit64(-1) or sqlite3_hard_heap_limit(-1).
+**
+** ^Setting the heap limits to zero disables the heap limiter mechanism.
+**
+** ^The soft heap limit may not be greater than the hard heap limit.
+** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N)
+** is invoked with a value of N that is greater than the hard heap limit,
+** the the soft heap limit is set to the value of the hard heap limit.
+** ^The soft heap limit is automatically enabled whenever the hard heap
+** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and
+** the soft heap limit is outside the range of 1..N, then the soft heap
+** limit is set to N. ^Invoking sqlite3_soft_heap_limit64(0) when the
+** hard heap limit is enabled makes the soft heap limit equal to the
+** hard heap limit.
+**
+** The memory allocation limits can also be adjusted using
+** [PRAGMA soft_heap_limit] and [PRAGMA hard_heap_limit].
+**
+** ^(The heap limits are not enforced in the current implementation
+** if one or more of following conditions are true:
+**
+** <ul>
+** <li> The limit value is set to zero.
+** <li> Memory accounting is disabled using a combination of the
+** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and
+** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option.
+** <li> An alternative page cache implementation is specified using
+** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...).
+** <li> The page cache allocates from its own memory pool supplied
+** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than
+** from the heap.
+** </ul>)^
+**
+** The circumstances under which SQLite will enforce the heap limits may
+** changes in future releases of SQLite.
+*/
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N);
+
+/*
+** CAPI3REF: Deprecated Soft Heap Limit Interface
+** DEPRECATED
+**
+** This is a deprecated version of the [sqlite3_soft_heap_limit64()]
+** interface. This routine is provided for historical compatibility
+** only. All new applications should use the
+** [sqlite3_soft_heap_limit64()] interface rather than this one.
+*/
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
+
+
+/*
+** CAPI3REF: Extract Metadata About A Column Of A Table
+** METHOD: sqlite3
+**
+** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns
+** information about column C of table T in database D
+** on [database connection] X.)^ ^The sqlite3_table_column_metadata()
+** interface returns SQLITE_OK and fills in the non-NULL pointers in
+** the final five arguments with appropriate values if the specified
+** column exists. ^The sqlite3_table_column_metadata() interface returns
+** SQLITE_ERROR if the specified column does not exist.
+** ^If the column-name parameter to sqlite3_table_column_metadata() is a
+** NULL pointer, then this routine simply checks for the existence of the
+** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it
+** does not. If the table name parameter T in a call to
+** sqlite3_table_column_metadata(X,D,T,C,...) is NULL then the result is
+** undefined behavior.
+**
+** ^The column is identified by the second, third and fourth parameters to
+** this function. ^(The second parameter is either the name of the database
+** (i.e. "main", "temp", or an attached database) containing the specified
+** table or NULL.)^ ^If it is NULL, then all attached databases are searched
+** for the table using the same algorithm used by the database engine to
+** resolve unqualified table references.
+**
+** ^The third and fourth parameters to this function are the table and column
+** name of the desired column, respectively.
+**
+** ^Metadata is returned by writing to the memory locations passed as the 5th
+** and subsequent parameters to this function. ^Any of these arguments may be
+** NULL, in which case the corresponding element of metadata is omitted.
+**
+** ^(<blockquote>
+** <table border="1">
+** <tr><th> Parameter <th> Output<br>Type <th> Description
+**
+** <tr><td> 5th <td> const char* <td> Data type
+** <tr><td> 6th <td> const char* <td> Name of default collation sequence
+** <tr><td> 7th <td> int <td> True if column has a NOT NULL constraint
+** <tr><td> 8th <td> int <td> True if column is part of the PRIMARY KEY
+** <tr><td> 9th <td> int <td> True if column is [AUTOINCREMENT]
+** </table>
+** </blockquote>)^
+**
+** ^The memory pointed to by the character pointers returned for the
+** declaration type and collation sequence is valid until the next
+** call to any SQLite API function.
+**
+** ^If the specified table is actually a view, an [error code] is returned.
+**
+** ^If the specified column is "rowid", "oid" or "_rowid_" and the table
+** is not a [WITHOUT ROWID] table and an
+** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output
+** parameters are set for the explicitly declared column. ^(If there is no
+** [INTEGER PRIMARY KEY] column, then the outputs
+** for the [rowid] are set as follows:
+**
+** <pre>
+** data type: "INTEGER"
+** collation sequence: "BINARY"
+** not null: 0
+** primary key: 1
+** auto increment: 0
+** </pre>)^
+**
+** ^This function causes all database schemas to be read from disk and
+** parsed, if that has not already been done, and returns an error if
+** any errors are encountered while loading the schema.
+*/
+SQLITE_API int sqlite3_table_column_metadata(
+ sqlite3 *db, /* Connection handle */
+ const char *zDbName, /* Database name or NULL */
+ const char *zTableName, /* Table name */
+ const char *zColumnName, /* Column name */
+ char const **pzDataType, /* OUTPUT: Declared data type */
+ char const **pzCollSeq, /* OUTPUT: Collation sequence name */
+ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */
+ int *pPrimaryKey, /* OUTPUT: True if column part of PK */
+ int *pAutoinc /* OUTPUT: True if column is auto-increment */
+);
+
+/*
+** CAPI3REF: Load An Extension
+** METHOD: sqlite3
+**
+** ^This interface loads an SQLite extension library from the named file.
+**
+** ^The sqlite3_load_extension() interface attempts to load an
+** [SQLite extension] library contained in the file zFile. If
+** the file cannot be loaded directly, attempts are made to load
+** with various operating-system specific extensions added.
+** So for example, if "samplelib" cannot be loaded, then names like
+** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might
+** be tried also.
+**
+** ^The entry point is zProc.
+** ^(zProc may be 0, in which case SQLite will try to come up with an
+** entry point name on its own. It first tries "sqlite3_extension_init".
+** If that does not work, it constructs a name "sqlite3_X_init" where the
+** X is consists of the lower-case equivalent of all ASCII alphabetic
+** characters in the filename from the last "/" to the first following
+** "." and omitting any initial "lib".)^
+** ^The sqlite3_load_extension() interface returns
+** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
+** ^If an error occurs and pzErrMsg is not 0, then the
+** [sqlite3_load_extension()] interface shall attempt to
+** fill *pzErrMsg with error message text stored in memory
+** obtained from [sqlite3_malloc()]. The calling function
+** should free this memory by calling [sqlite3_free()].
+**
+** ^Extension loading must be enabled using
+** [sqlite3_enable_load_extension()] or
+** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL)
+** prior to calling this API,
+** otherwise an error will be returned.
+**
+** <b>Security warning:</b> It is recommended that the
+** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this
+** interface. The use of the [sqlite3_enable_load_extension()] interface
+** should be avoided. This will keep the SQL function [load_extension()]
+** disabled and prevent SQL injections from giving attackers
+** access to extension loading capabilities.
+**
+** See also the [load_extension() SQL function].
+*/
+SQLITE_API int sqlite3_load_extension(
+ sqlite3 *db, /* Load the extension into this database connection */
+ const char *zFile, /* Name of the shared library containing extension */
+ const char *zProc, /* Entry point. Derived from zFile if 0 */
+ char **pzErrMsg /* Put error message here if not 0 */
+);
+
+/*
+** CAPI3REF: Enable Or Disable Extension Loading
+** METHOD: sqlite3
+**
+** ^So as not to open security holes in older applications that are
+** unprepared to deal with [extension loading], and as a means of disabling
+** [extension loading] while evaluating user-entered SQL, the following API
+** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
+**
+** ^Extension loading is off by default.
+** ^Call the sqlite3_enable_load_extension() routine with onoff==1
+** to turn extension loading on and call it with onoff==0 to turn
+** it back off again.
+**
+** ^This interface enables or disables both the C-API
+** [sqlite3_load_extension()] and the SQL function [load_extension()].
+** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..)
+** to enable or disable only the C-API.)^
+**
+** <b>Security warning:</b> It is recommended that extension loading
+** be enabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method
+** rather than this interface, so the [load_extension()] SQL function
+** remains disabled. This will prevent SQL injections from giving attackers
+** access to extension loading capabilities.
+*/
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+
+/*
+** CAPI3REF: Automatically Load Statically Linked Extensions
+**
+** ^This interface causes the xEntryPoint() function to be invoked for
+** each new [database connection] that is created. The idea here is that
+** xEntryPoint() is the entry point for a statically linked [SQLite extension]
+** that is to be automatically loaded into all new database connections.
+**
+** ^(Even though the function prototype shows that xEntryPoint() takes
+** no arguments and returns void, SQLite invokes xEntryPoint() with three
+** arguments and expects an integer result as if the signature of the
+** entry point where as follows:
+**
+** <blockquote><pre>
+** &nbsp; int xEntryPoint(
+** &nbsp; sqlite3 *db,
+** &nbsp; const char **pzErrMsg,
+** &nbsp; const struct sqlite3_api_routines *pThunk
+** &nbsp; );
+** </pre></blockquote>)^
+**
+** If the xEntryPoint routine encounters an error, it should make *pzErrMsg
+** point to an appropriate error message (obtained from [sqlite3_mprintf()])
+** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg
+** is NULL before calling the xEntryPoint(). ^SQLite will invoke
+** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any
+** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()],
+** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail.
+**
+** ^Calling sqlite3_auto_extension(X) with an entry point X that is already
+** on the list of automatic extensions is a harmless no-op. ^No entry point
+** will be called more than once for each database connection that is opened.
+**
+** See also: [sqlite3_reset_auto_extension()]
+** and [sqlite3_cancel_auto_extension()]
+*/
+SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void));
+
+/*
+** CAPI3REF: Cancel Automatic Extension Loading
+**
+** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the
+** initialization routine X that was registered using a prior call to
+** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)]
+** routine returns 1 if initialization routine X was successfully
+** unregistered and it returns 0 if X was not on the list of initialization
+** routines.
+*/
+SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
+
+/*
+** CAPI3REF: Reset Automatic Extension Loading
+**
+** ^This interface disables all automatic extensions previously
+** registered using [sqlite3_auto_extension()].
+*/
+SQLITE_API void sqlite3_reset_auto_extension(void);
+
+/*
+** The interface to the virtual-table mechanism is currently considered
+** to be experimental. The interface might change in incompatible ways.
+** If this is a problem for you, do not use the interface at this time.
+**
+** When the virtual-table mechanism stabilizes, we will declare the
+** interface fixed, support it indefinitely, and remove this comment.
+*/
+
+/*
+** Structures used by the virtual table interface
+*/
+typedef struct sqlite3_vtab sqlite3_vtab;
+typedef struct sqlite3_index_info sqlite3_index_info;
+typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor;
+typedef struct sqlite3_module sqlite3_module;
+
+/*
+** CAPI3REF: Virtual Table Object
+** KEYWORDS: sqlite3_module {virtual table module}
+**
+** This structure, sometimes called a "virtual table module",
+** defines the implementation of a [virtual table].
+** This structure consists mostly of methods for the module.
+**
+** ^A virtual table module is created by filling in a persistent
+** instance of this structure and passing a pointer to that instance
+** to [sqlite3_create_module()] or [sqlite3_create_module_v2()].
+** ^The registration remains valid until it is replaced by a different
+** module or until the [database connection] closes. The content
+** of this structure must not change while it is registered with
+** any database connection.
+*/
+struct sqlite3_module {
+ int iVersion;
+ int (*xCreate)(sqlite3*, void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVTab, char**);
+ int (*xConnect)(sqlite3*, void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVTab, char**);
+ int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
+ int (*xDisconnect)(sqlite3_vtab *pVTab);
+ int (*xDestroy)(sqlite3_vtab *pVTab);
+ int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
+ int (*xClose)(sqlite3_vtab_cursor*);
+ int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv);
+ int (*xNext)(sqlite3_vtab_cursor*);
+ int (*xEof)(sqlite3_vtab_cursor*);
+ int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
+ int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
+ int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
+ int (*xBegin)(sqlite3_vtab *pVTab);
+ int (*xSync)(sqlite3_vtab *pVTab);
+ int (*xCommit)(sqlite3_vtab *pVTab);
+ int (*xRollback)(sqlite3_vtab *pVTab);
+ int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
+ void **ppArg);
+ int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
+ /* The methods above are in version 1 of the sqlite_module object. Those
+ ** below are for version 2 and greater. */
+ int (*xSavepoint)(sqlite3_vtab *pVTab, int);
+ int (*xRelease)(sqlite3_vtab *pVTab, int);
+ int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
+ /* The methods above are in versions 1 and 2 of the sqlite_module object.
+ ** Those below are for version 3 and greater. */
+ int (*xShadowName)(const char*);
+};
+
+/*
+** CAPI3REF: Virtual Table Indexing Information
+** KEYWORDS: sqlite3_index_info
+**
+** The sqlite3_index_info structure and its substructures is used as part
+** of the [virtual table] interface to
+** pass information into and receive the reply from the [xBestIndex]
+** method of a [virtual table module]. The fields under **Inputs** are the
+** inputs to xBestIndex and are read-only. xBestIndex inserts its
+** results into the **Outputs** fields.
+**
+** ^(The aConstraint[] array records WHERE clause constraints of the form:
+**
+** <blockquote>column OP expr</blockquote>
+**
+** where OP is =, &lt;, &lt;=, &gt;, or &gt;=.)^ ^(The particular operator is
+** stored in aConstraint[].op using one of the
+** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^
+** ^(The index of the column is stored in
+** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the
+** expr on the right-hand side can be evaluated (and thus the constraint
+** is usable) and false if it cannot.)^
+**
+** ^The optimizer automatically inverts terms of the form "expr OP column"
+** and makes other simplifications to the WHERE clause in an attempt to
+** get as many WHERE clause terms into the form shown above as possible.
+** ^The aConstraint[] array only reports WHERE clause terms that are
+** relevant to the particular virtual table being queried.
+**
+** ^Information about the ORDER BY clause is stored in aOrderBy[].
+** ^Each term of aOrderBy records a column of the ORDER BY clause.
+**
+** The colUsed field indicates which columns of the virtual table may be
+** required by the current scan. Virtual table columns are numbered from
+** zero in the order in which they appear within the CREATE TABLE statement
+** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62),
+** the corresponding bit is set within the colUsed mask if the column may be
+** required by SQLite. If the table has at least 64 columns and any column
+** to the right of the first 63 is required, then bit 63 of colUsed is also
+** set. In other words, column iCol may be required if the expression
+** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to
+** non-zero.
+**
+** The [xBestIndex] method must fill aConstraintUsage[] with information
+** about what parameters to pass to xFilter. ^If argvIndex>0 then
+** the right-hand side of the corresponding aConstraint[] is evaluated
+** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit
+** is true, then the constraint is assumed to be fully handled by the
+** virtual table and might not be checked again by the byte code.)^ ^(The
+** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
+** is left in its default setting of false, the constraint will always be
+** checked separately in byte code. If the omit flag is change to true, then
+** the constraint may or may not be checked in byte code. In other words,
+** when the omit flag is true there is no guarantee that the constraint will
+** not be checked again using byte code.)^
+**
+** ^The idxNum and idxPtr values are recorded and passed into the
+** [xFilter] method.
+** ^[sqlite3_free()] is used to free idxPtr if and only if
+** needToFreeIdxPtr is true.
+**
+** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
+** the correct order to satisfy the ORDER BY clause so that no separate
+** sorting step is required.
+**
+** ^The estimatedCost value is an estimate of the cost of a particular
+** strategy. A cost of N indicates that the cost of the strategy is similar
+** to a linear scan of an SQLite table with N rows. A cost of log(N)
+** indicates that the expense of the operation is similar to that of a
+** binary search on a unique indexed field of an SQLite table with N rows.
+**
+** ^The estimatedRows value is an estimate of the number of rows that
+** will be returned by the strategy.
+**
+** The xBestIndex method may optionally populate the idxFlags field with a
+** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
+** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
+** assumes that the strategy may visit at most one row.
+**
+** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
+** SQLite also assumes that if a call to the xUpdate() method is made as
+** part of the same statement to delete or update a virtual table row and the
+** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
+** any database changes. In other words, if the xUpdate() returns
+** SQLITE_CONSTRAINT, the database contents must be exactly as they were
+** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
+** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
+** the xUpdate method are automatically rolled back by SQLite.
+**
+** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
+** structure for SQLite [version 3.8.2] ([dateof:3.8.2]).
+** If a virtual table extension is
+** used with an SQLite version earlier than 3.8.2, the results of attempting
+** to read or write the estimatedRows field are undefined (but are likely
+** to include crashing the application). The estimatedRows field should
+** therefore only be used if [sqlite3_libversion_number()] returns a
+** value greater than or equal to 3008002. Similarly, the idxFlags field
+** was added for [version 3.9.0] ([dateof:3.9.0]).
+** It may therefore only be used if
+** sqlite3_libversion_number() returns a value greater than or equal to
+** 3009000.
+*/
+struct sqlite3_index_info {
+ /* Inputs */
+ int nConstraint; /* Number of entries in aConstraint */
+ struct sqlite3_index_constraint {
+ int iColumn; /* Column constrained. -1 for ROWID */
+ unsigned char op; /* Constraint operator */
+ unsigned char usable; /* True if this constraint is usable */
+ int iTermOffset; /* Used internally - xBestIndex should ignore */
+ } *aConstraint; /* Table of WHERE clause constraints */
+ int nOrderBy; /* Number of terms in the ORDER BY clause */
+ struct sqlite3_index_orderby {
+ int iColumn; /* Column number */
+ unsigned char desc; /* True for DESC. False for ASC. */
+ } *aOrderBy; /* The ORDER BY clause */
+ /* Outputs */
+ struct sqlite3_index_constraint_usage {
+ int argvIndex; /* if >0, constraint is part of argv to xFilter */
+ unsigned char omit; /* Do not code a test for this constraint */
+ } *aConstraintUsage;
+ int idxNum; /* Number used to identify the index */
+ char *idxStr; /* String, possibly obtained from sqlite3_malloc */
+ int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */
+ int orderByConsumed; /* True if output is already ordered */
+ double estimatedCost; /* Estimated cost of using this index */
+ /* Fields below are only available in SQLite 3.8.2 and later */
+ sqlite3_int64 estimatedRows; /* Estimated number of rows returned */
+ /* Fields below are only available in SQLite 3.9.0 and later */
+ int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */
+ /* Fields below are only available in SQLite 3.10.0 and later */
+ sqlite3_uint64 colUsed; /* Input: Mask of columns used by statement */
+};
+
+/*
+** CAPI3REF: Virtual Table Scan Flags
+**
+** Virtual table implementations are allowed to set the
+** [sqlite3_index_info].idxFlags field to some combination of
+** these bits.
+*/
+#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */
+
+/*
+** CAPI3REF: Virtual Table Constraint Operator Codes
+**
+** These macros define the allowed values for the
+** [sqlite3_index_info].aConstraint[].op field. Each value represents
+** an operator that is part of a constraint term in the wHERE clause of
+** a query that uses a [virtual table].
+*/
+#define SQLITE_INDEX_CONSTRAINT_EQ 2
+#define SQLITE_INDEX_CONSTRAINT_GT 4
+#define SQLITE_INDEX_CONSTRAINT_LE 8
+#define SQLITE_INDEX_CONSTRAINT_LT 16
+#define SQLITE_INDEX_CONSTRAINT_GE 32
+#define SQLITE_INDEX_CONSTRAINT_MATCH 64
+#define SQLITE_INDEX_CONSTRAINT_LIKE 65
+#define SQLITE_INDEX_CONSTRAINT_GLOB 66
+#define SQLITE_INDEX_CONSTRAINT_REGEXP 67
+#define SQLITE_INDEX_CONSTRAINT_NE 68
+#define SQLITE_INDEX_CONSTRAINT_ISNOT 69
+#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70
+#define SQLITE_INDEX_CONSTRAINT_ISNULL 71
+#define SQLITE_INDEX_CONSTRAINT_IS 72
+#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150
+
+/*
+** CAPI3REF: Register A Virtual Table Implementation
+** METHOD: sqlite3
+**
+** ^These routines are used to register a new [virtual table module] name.
+** ^Module names must be registered before
+** creating a new [virtual table] using the module and before using a
+** preexisting [virtual table] for the module.
+**
+** ^The module name is registered on the [database connection] specified
+** by the first parameter. ^The name of the module is given by the
+** second parameter. ^The third parameter is a pointer to
+** the implementation of the [virtual table module]. ^The fourth
+** parameter is an arbitrary client data pointer that is passed through
+** into the [xCreate] and [xConnect] methods of the virtual table module
+** when a new virtual table is be being created or reinitialized.
+**
+** ^The sqlite3_create_module_v2() interface has a fifth parameter which
+** is a pointer to a destructor for the pClientData. ^SQLite will
+** invoke the destructor function (if it is not NULL) when SQLite
+** no longer needs the pClientData pointer. ^The destructor will also
+** be invoked if the call to sqlite3_create_module_v2() fails.
+** ^The sqlite3_create_module()
+** interface is equivalent to sqlite3_create_module_v2() with a NULL
+** destructor.
+**
+** ^If the third parameter (the pointer to the sqlite3_module object) is
+** NULL then no new module is create and any existing modules with the
+** same name are dropped.
+**
+** See also: [sqlite3_drop_modules()]
+*/
+SQLITE_API int sqlite3_create_module(
+ sqlite3 *db, /* SQLite connection to register module with */
+ const char *zName, /* Name of the module */
+ const sqlite3_module *p, /* Methods for the module */
+ void *pClientData /* Client data for xCreate/xConnect */
+);
+SQLITE_API int sqlite3_create_module_v2(
+ sqlite3 *db, /* SQLite connection to register module with */
+ const char *zName, /* Name of the module */
+ const sqlite3_module *p, /* Methods for the module */
+ void *pClientData, /* Client data for xCreate/xConnect */
+ void(*xDestroy)(void*) /* Module destructor function */
+);
+
+/*
+** CAPI3REF: Remove Unnecessary Virtual Table Implementations
+** METHOD: sqlite3
+**
+** ^The sqlite3_drop_modules(D,L) interface removes all virtual
+** table modules from database connection D except those named on list L.
+** The L parameter must be either NULL or a pointer to an array of pointers
+** to strings where the array is terminated by a single NULL pointer.
+** ^If the L parameter is NULL, then all virtual table modules are removed.
+**
+** See also: [sqlite3_create_module()]
+*/
+SQLITE_API int sqlite3_drop_modules(
+ sqlite3 *db, /* Remove modules from this connection */
+ const char **azKeep /* Except, do not remove the ones named here */
+);
+
+/*
+** CAPI3REF: Virtual Table Instance Object
+** KEYWORDS: sqlite3_vtab
+**
+** Every [virtual table module] implementation uses a subclass
+** of this object to describe a particular instance
+** of the [virtual table]. Each subclass will
+** be tailored to the specific needs of the module implementation.
+** The purpose of this superclass is to define certain fields that are
+** common to all module implementations.
+**
+** ^Virtual tables methods can set an error message by assigning a
+** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should
+** take care that any prior string is freed by a call to [sqlite3_free()]
+** prior to assigning a new string to zErrMsg. ^After the error message
+** is delivered up to the client application, the string will be automatically
+** freed by sqlite3_free() and the zErrMsg field will be zeroed.
+*/
+struct sqlite3_vtab {
+ const sqlite3_module *pModule; /* The module for this virtual table */
+ int nRef; /* Number of open cursors */
+ char *zErrMsg; /* Error message from sqlite3_mprintf() */
+ /* Virtual table implementations will typically add additional fields */
+};
+
+/*
+** CAPI3REF: Virtual Table Cursor Object
+** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor}
+**
+** Every [virtual table module] implementation uses a subclass of the
+** following structure to describe cursors that point into the
+** [virtual table] and are used
+** to loop through the virtual table. Cursors are created using the
+** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed
+** by the [sqlite3_module.xClose | xClose] method. Cursors are used
+** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods
+** of the module. Each module implementation will define
+** the content of a cursor structure to suit its own needs.
+**
+** This superclass exists in order to define fields of the cursor that
+** are common to all implementations.
+*/
+struct sqlite3_vtab_cursor {
+ sqlite3_vtab *pVtab; /* Virtual table of this cursor */
+ /* Virtual table implementations will typically add additional fields */
+};
+
+/*
+** CAPI3REF: Declare The Schema Of A Virtual Table
+**
+** ^The [xCreate] and [xConnect] methods of a
+** [virtual table module] call this interface
+** to declare the format (the names and datatypes of the columns) of
+** the virtual tables they implement.
+*/
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+
+/*
+** CAPI3REF: Overload A Function For A Virtual Table
+** METHOD: sqlite3
+**
+** ^(Virtual tables can provide alternative implementations of functions
+** using the [xFindFunction] method of the [virtual table module].
+** But global versions of those functions
+** must exist in order to be overloaded.)^
+**
+** ^(This API makes sure a global version of a function with a particular
+** name and number of parameters exists. If no such function exists
+** before this API is called, a new function is created.)^ ^The implementation
+** of the new function always causes an exception to be thrown. So
+** the new function is not good for anything by itself. Its only
+** purpose is to be a placeholder function that can be overloaded
+** by a [virtual table].
+*/
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+
+/*
+** The interface to the virtual-table mechanism defined above (back up
+** to a comment remarkably similar to this one) is currently considered
+** to be experimental. The interface might change in incompatible ways.
+** If this is a problem for you, do not use the interface at this time.
+**
+** When the virtual-table mechanism stabilizes, we will declare the
+** interface fixed, support it indefinitely, and remove this comment.
+*/
+
+/*
+** CAPI3REF: A Handle To An Open BLOB
+** KEYWORDS: {BLOB handle} {BLOB handles}
+**
+** An instance of this object represents an open BLOB on which
+** [sqlite3_blob_open | incremental BLOB I/O] can be performed.
+** ^Objects of this type are created by [sqlite3_blob_open()]
+** and destroyed by [sqlite3_blob_close()].
+** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces
+** can be used to read or write small subsections of the BLOB.
+** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes.
+*/
+typedef struct sqlite3_blob sqlite3_blob;
+
+/*
+** CAPI3REF: Open A BLOB For Incremental I/O
+** METHOD: sqlite3
+** CONSTRUCTOR: sqlite3_blob
+**
+** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located
+** in row iRow, column zColumn, table zTable in database zDb;
+** in other words, the same BLOB that would be selected by:
+**
+** <pre>
+** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
+** </pre>)^
+**
+** ^(Parameter zDb is not the filename that contains the database, but
+** rather the symbolic name of the database. For attached databases, this is
+** the name that appears after the AS keyword in the [ATTACH] statement.
+** For the main database file, the database name is "main". For TEMP
+** tables, the database name is "temp".)^
+**
+** ^If the flags parameter is non-zero, then the BLOB is opened for read
+** and write access. ^If the flags parameter is zero, the BLOB is opened for
+** read-only access.
+**
+** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored
+** in *ppBlob. Otherwise an [error code] is returned and, unless the error
+** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
+** the API is not misused, it is always safe to call [sqlite3_blob_close()]
+** on *ppBlob after this function it returns.
+**
+** This function fails with SQLITE_ERROR if any of the following are true:
+** <ul>
+** <li> ^(Database zDb does not exist)^,
+** <li> ^(Table zTable does not exist within database zDb)^,
+** <li> ^(Table zTable is a WITHOUT ROWID table)^,
+** <li> ^(Column zColumn does not exist)^,
+** <li> ^(Row iRow is not present in the table)^,
+** <li> ^(The specified column of row iRow contains a value that is not
+** a TEXT or BLOB value)^,
+** <li> ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE
+** constraint and the blob is being opened for read/write access)^,
+** <li> ^([foreign key constraints | Foreign key constraints] are enabled,
+** column zColumn is part of a [child key] definition and the blob is
+** being opened for read/write access)^.
+** </ul>
+**
+** ^Unless it returns SQLITE_MISUSE, this function sets the
+** [database connection] error code and message accessible via
+** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+**
+** A BLOB referenced by sqlite3_blob_open() may be read using the
+** [sqlite3_blob_read()] interface and modified by using
+** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a
+** different row of the same table using the [sqlite3_blob_reopen()]
+** interface. However, the column, table, or database of a [BLOB handle]
+** cannot be changed after the [BLOB handle] is opened.
+**
+** ^(If the row that a BLOB handle points to is modified by an
+** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
+** then the BLOB handle is marked as "expired".
+** This is true if any column of the row is changed, even a column
+** other than the one the BLOB handle is open on.)^
+** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for
+** an expired BLOB handle fail with a return code of [SQLITE_ABORT].
+** ^(Changes written into a BLOB prior to the BLOB expiring are not
+** rolled back by the expiration of the BLOB. Such changes will eventually
+** commit if the transaction continues to completion.)^
+**
+** ^Use the [sqlite3_blob_bytes()] interface to determine the size of
+** the opened blob. ^The size of a blob may not be changed by this
+** interface. Use the [UPDATE] SQL command to change the size of a
+** blob.
+**
+** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces
+** and the built-in [zeroblob] SQL function may be used to create a
+** zero-filled blob to read or write using the incremental-blob interface.
+**
+** To avoid a resource leak, every open [BLOB handle] should eventually
+** be released by a call to [sqlite3_blob_close()].
+**
+** See also: [sqlite3_blob_close()],
+** [sqlite3_blob_reopen()], [sqlite3_blob_read()],
+** [sqlite3_blob_bytes()], [sqlite3_blob_write()].
+*/
+SQLITE_API int sqlite3_blob_open(
+ sqlite3*,
+ const char *zDb,
+ const char *zTable,
+ const char *zColumn,
+ sqlite3_int64 iRow,
+ int flags,
+ sqlite3_blob **ppBlob
+);
+
+/*
+** CAPI3REF: Move a BLOB Handle to a New Row
+** METHOD: sqlite3_blob
+**
+** ^This function is used to move an existing [BLOB handle] so that it points
+** to a different row of the same database table. ^The new row is identified
+** by the rowid value passed as the second argument. Only the row can be
+** changed. ^The database, table and column on which the blob handle is open
+** remain the same. Moving an existing [BLOB handle] to a new row is
+** faster than closing the existing handle and opening a new one.
+**
+** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] -
+** it must exist and there must be either a blob or text value stored in
+** the nominated column.)^ ^If the new row is not present in the table, or if
+** it does not contain a blob or text value, or if another error occurs, an
+** SQLite error code is returned and the blob handle is considered aborted.
+** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or
+** [sqlite3_blob_reopen()] on an aborted blob handle immediately return
+** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle
+** always returns zero.
+**
+** ^This function sets the database handle error code and message.
+*/
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+
+/*
+** CAPI3REF: Close A BLOB Handle
+** DESTRUCTOR: sqlite3_blob
+**
+** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed
+** unconditionally. Even if this routine returns an error code, the
+** handle is still closed.)^
+**
+** ^If the blob handle being closed was opened for read-write access, and if
+** the database is in auto-commit mode and there are no other open read-write
+** blob handles or active write statements, the current transaction is
+** committed. ^If an error occurs while committing the transaction, an error
+** code is returned and the transaction rolled back.
+**
+** Calling this function with an argument that is not a NULL pointer or an
+** open blob handle results in undefined behaviour. ^Calling this routine
+** with a null pointer (such as would be returned by a failed call to
+** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
+** is passed a valid open blob handle, the values returned by the
+** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
+*/
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
+
+/*
+** CAPI3REF: Return The Size Of An Open BLOB
+** METHOD: sqlite3_blob
+**
+** ^Returns the size in bytes of the BLOB accessible via the
+** successfully opened [BLOB handle] in its only argument. ^The
+** incremental blob I/O routines can only read or overwriting existing
+** blob content; they cannot change the size of a blob.
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+*/
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
+
+/*
+** CAPI3REF: Read Data From A BLOB Incrementally
+** METHOD: sqlite3_blob
+**
+** ^(This function is used to read data from an open [BLOB handle] into a
+** caller-supplied buffer. N bytes of data are copied into buffer Z
+** from the open BLOB, starting at offset iOffset.)^
+**
+** ^If offset iOffset is less than N bytes from the end of the BLOB,
+** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is
+** less than zero, [SQLITE_ERROR] is returned and no data is read.
+** ^The size of the blob (and hence the maximum value of N+iOffset)
+** can be determined using the [sqlite3_blob_bytes()] interface.
+**
+** ^An attempt to read from an expired [BLOB handle] fails with an
+** error code of [SQLITE_ABORT].
+**
+** ^(On success, sqlite3_blob_read() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+**
+** See also: [sqlite3_blob_write()].
+*/
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+
+/*
+** CAPI3REF: Write Data Into A BLOB Incrementally
+** METHOD: sqlite3_blob
+**
+** ^(This function is used to write data into an open [BLOB handle] from a
+** caller-supplied buffer. N bytes of data are copied from the buffer Z
+** into the open BLOB, starting at offset iOffset.)^
+**
+** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+** ^Unless SQLITE_MISUSE is returned, this function sets the
+** [database connection] error code and message accessible via
+** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+**
+** ^If the [BLOB handle] passed as the first argument was not opened for
+** writing (the flags parameter to [sqlite3_blob_open()] was zero),
+** this function returns [SQLITE_READONLY].
+**
+** This function may only modify the contents of the BLOB; it is
+** not possible to increase the size of a BLOB using this API.
+** ^If offset iOffset is less than N bytes from the end of the BLOB,
+** [SQLITE_ERROR] is returned and no data is written. The size of the
+** BLOB (and hence the maximum value of N+iOffset) can be determined
+** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less
+** than zero [SQLITE_ERROR] is returned and no data is written.
+**
+** ^An attempt to write to an expired [BLOB handle] fails with an
+** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred
+** before the [BLOB handle] expired are not rolled back by the
+** expiration of the handle, though of course those changes might
+** have been overwritten by the statement that expired the BLOB handle
+** or by other independent statements.
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+**
+** See also: [sqlite3_blob_read()].
+*/
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+
+/*
+** CAPI3REF: Virtual File System Objects
+**
+** A virtual filesystem (VFS) is an [sqlite3_vfs] object
+** that SQLite uses to interact
+** with the underlying operating system. Most SQLite builds come with a
+** single default VFS that is appropriate for the host computer.
+** New VFSes can be registered and existing VFSes can be unregistered.
+** The following interfaces are provided.
+**
+** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name.
+** ^Names are case sensitive.
+** ^Names are zero-terminated UTF-8 strings.
+** ^If there is no match, a NULL pointer is returned.
+** ^If zVfsName is NULL then the default VFS is returned.
+**
+** ^New VFSes are registered with sqlite3_vfs_register().
+** ^Each new VFS becomes the default VFS if the makeDflt flag is set.
+** ^The same VFS can be registered multiple times without injury.
+** ^To make an existing VFS into the default VFS, register it again
+** with the makeDflt flag set. If two different VFSes with the
+** same name are registered, the behavior is undefined. If a
+** VFS is registered with a name that is NULL or an empty string,
+** then the behavior is undefined.
+**
+** ^Unregister a VFS with the sqlite3_vfs_unregister() interface.
+** ^(If the default VFS is unregistered, another VFS is chosen as
+** the default. The choice for the new VFS is arbitrary.)^
+*/
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
+
+/*
+** CAPI3REF: Mutexes
+**
+** The SQLite core uses these routines for thread
+** synchronization. Though they are intended for internal
+** use by SQLite, code that links against SQLite is
+** permitted to use any of these routines.
+**
+** The SQLite source code contains multiple implementations
+** of these mutex routines. An appropriate implementation
+** is selected automatically at compile-time. The following
+** implementations are available in the SQLite core:
+**
+** <ul>
+** <li> SQLITE_MUTEX_PTHREADS
+** <li> SQLITE_MUTEX_W32
+** <li> SQLITE_MUTEX_NOOP
+** </ul>
+**
+** The SQLITE_MUTEX_NOOP implementation is a set of routines
+** that does no real locking and is appropriate for use in
+** a single-threaded application. The SQLITE_MUTEX_PTHREADS and
+** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix
+** and Windows.
+**
+** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
+** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex
+** implementation is included with the library. In this case the
+** application must supply a custom mutex implementation using the
+** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function
+** before calling sqlite3_initialize() or any other public sqlite3_
+** function that calls sqlite3_initialize().
+**
+** ^The sqlite3_mutex_alloc() routine allocates a new
+** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc()
+** routine returns NULL if it is unable to allocate the requested
+** mutex. The argument to sqlite3_mutex_alloc() must one of these
+** integer constants:
+**
+** <ul>
+** <li> SQLITE_MUTEX_FAST
+** <li> SQLITE_MUTEX_RECURSIVE
+** <li> SQLITE_MUTEX_STATIC_MAIN
+** <li> SQLITE_MUTEX_STATIC_MEM
+** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_PRNG
+** <li> SQLITE_MUTEX_STATIC_LRU
+** <li> SQLITE_MUTEX_STATIC_PMEM
+** <li> SQLITE_MUTEX_STATIC_APP1
+** <li> SQLITE_MUTEX_STATIC_APP2
+** <li> SQLITE_MUTEX_STATIC_APP3
+** <li> SQLITE_MUTEX_STATIC_VFS1
+** <li> SQLITE_MUTEX_STATIC_VFS2
+** <li> SQLITE_MUTEX_STATIC_VFS3
+** </ul>
+**
+** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
+** cause sqlite3_mutex_alloc() to create
+** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
+** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
+** The mutex implementation does not need to make a distinction
+** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
+** not want to. SQLite will only request a recursive mutex in
+** cases where it really needs one. If a faster non-recursive mutex
+** implementation is available on the host platform, the mutex subsystem
+** might return such a mutex in response to SQLITE_MUTEX_FAST.
+**
+** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other
+** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return
+** a pointer to a static preexisting mutex. ^Nine static mutexes are
+** used by the current version of SQLite. Future versions of SQLite
+** may add additional static mutexes. Static mutexes are for internal
+** use by SQLite only. Applications that use SQLite mutexes should
+** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
+** SQLITE_MUTEX_RECURSIVE.
+**
+** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
+** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
+** returns a different mutex on every call. ^For the static
+** mutex types, the same mutex is returned on every call that has
+** the same type number.
+**
+** ^The sqlite3_mutex_free() routine deallocates a previously
+** allocated dynamic mutex. Attempting to deallocate a static
+** mutex results in undefined behavior.
+**
+** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
+** to enter a mutex. ^If another thread is already within the mutex,
+** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
+** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK]
+** upon successful entry. ^(Mutexes created using
+** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread.
+** In such cases, the
+** mutex must be exited an equal number of times before another thread
+** can enter.)^ If the same thread tries to enter any mutex other
+** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
+**
+** ^(Some systems (for example, Windows 95) do not support the operation
+** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
+** will always return SQLITE_BUSY. The SQLite core only ever uses
+** sqlite3_mutex_try() as an optimization so this is acceptable
+** behavior.)^
+**
+** ^The sqlite3_mutex_leave() routine exits a mutex that was
+** previously entered by the same thread. The behavior
+** is undefined if the mutex is not currently entered by the
+** calling thread or is not currently allocated.
+**
+** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or
+** sqlite3_mutex_leave() is a NULL pointer, then all three routines
+** behave as no-ops.
+**
+** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
+*/
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
+
+/*
+** CAPI3REF: Mutex Methods Object
+**
+** An instance of this structure defines the low-level routines
+** used to allocate and use mutexes.
+**
+** Usually, the default mutex implementations provided by SQLite are
+** sufficient, however the application has the option of substituting a custom
+** implementation for specialized deployments or systems for which SQLite
+** does not provide a suitable implementation. In this case, the application
+** creates and populates an instance of this structure to pass
+** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
+** Additionally, an instance of this structure can be used as an
+** output variable when querying the system for the current mutex
+** implementation, using the [SQLITE_CONFIG_GETMUTEX] option.
+**
+** ^The xMutexInit method defined by this structure is invoked as
+** part of system initialization by the sqlite3_initialize() function.
+** ^The xMutexInit routine is called by SQLite exactly once for each
+** effective call to [sqlite3_initialize()].
+**
+** ^The xMutexEnd method defined by this structure is invoked as
+** part of system shutdown by the sqlite3_shutdown() function. The
+** implementation of this method is expected to release all outstanding
+** resources obtained by the mutex methods implementation, especially
+** those obtained by the xMutexInit method. ^The xMutexEnd()
+** interface is invoked exactly once for each call to [sqlite3_shutdown()].
+**
+** ^(The remaining seven methods defined by this structure (xMutexAlloc,
+** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and
+** xMutexNotheld) implement the following interfaces (respectively):
+**
+** <ul>
+** <li> [sqlite3_mutex_alloc()] </li>
+** <li> [sqlite3_mutex_free()] </li>
+** <li> [sqlite3_mutex_enter()] </li>
+** <li> [sqlite3_mutex_try()] </li>
+** <li> [sqlite3_mutex_leave()] </li>
+** <li> [sqlite3_mutex_held()] </li>
+** <li> [sqlite3_mutex_notheld()] </li>
+** </ul>)^
+**
+** The only difference is that the public sqlite3_XXX functions enumerated
+** above silently ignore any invocations that pass a NULL pointer instead
+** of a valid mutex handle. The implementations of the methods defined
+** by this structure are not required to handle this case. The results
+** of passing a NULL pointer instead of a valid mutex handle are undefined
+** (i.e. it is acceptable to provide an implementation that segfaults if
+** it is passed a NULL pointer).
+**
+** The xMutexInit() method must be threadsafe. It must be harmless to
+** invoke xMutexInit() multiple times within the same process and without
+** intervening calls to xMutexEnd(). Second and subsequent calls to
+** xMutexInit() must be no-ops.
+**
+** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
+** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
+** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
+** memory allocation for a fast or recursive mutex.
+**
+** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is
+** called, but only if the prior call to xMutexInit returned SQLITE_OK.
+** If xMutexInit fails in any way, it is expected to clean up after itself
+** prior to returning.
+*/
+typedef struct sqlite3_mutex_methods sqlite3_mutex_methods;
+struct sqlite3_mutex_methods {
+ int (*xMutexInit)(void);
+ int (*xMutexEnd)(void);
+ sqlite3_mutex *(*xMutexAlloc)(int);
+ void (*xMutexFree)(sqlite3_mutex *);
+ void (*xMutexEnter)(sqlite3_mutex *);
+ int (*xMutexTry)(sqlite3_mutex *);
+ void (*xMutexLeave)(sqlite3_mutex *);
+ int (*xMutexHeld)(sqlite3_mutex *);
+ int (*xMutexNotheld)(sqlite3_mutex *);
+};
+
+/*
+** CAPI3REF: Mutex Verification Routines
+**
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines
+** are intended for use inside assert() statements. The SQLite core
+** never uses these routines except inside an assert() and applications
+** are advised to follow the lead of the core. The SQLite core only
+** provides implementations for these routines when it is compiled
+** with the SQLITE_DEBUG flag. External mutex implementations
+** are only required to provide these routines if SQLITE_DEBUG is
+** defined and if NDEBUG is not defined.
+**
+** These routines should return true if the mutex in their argument
+** is held or not held, respectively, by the calling thread.
+**
+** The implementation is not required to provide versions of these
+** routines that actually work. If the implementation does not provide working
+** versions of these routines, it should at least provide stubs that always
+** return true so that one does not get spurious assertion failures.
+**
+** If the argument to sqlite3_mutex_held() is a NULL pointer then
+** the routine should return 1. This seems counter-intuitive since
+** clearly the mutex cannot be held if it does not exist. But
+** the reason the mutex does not exist is because the build is not
+** using mutexes. And we do not want the assert() containing the
+** call to sqlite3_mutex_held() to fail, so a non-zero return is
+** the appropriate thing to do. The sqlite3_mutex_notheld()
+** interface should also return 1 when given a NULL pointer.
+*/
+#ifndef NDEBUG
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
+#endif
+
+/*
+** CAPI3REF: Mutex Types
+**
+** The [sqlite3_mutex_alloc()] interface takes a single argument
+** which is one of these integer constants.
+**
+** The set of static mutexes may change from one SQLite release to the
+** next. Applications that override the built-in mutex logic must be
+** prepared to accommodate additional static mutexes.
+*/
+#define SQLITE_MUTEX_FAST 0
+#define SQLITE_MUTEX_RECURSIVE 1
+#define SQLITE_MUTEX_STATIC_MAIN 2
+#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */
+#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */
+#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */
+#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */
+#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
+#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
+#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
+#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */
+#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */
+#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */
+#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */
+#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */
+#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */
+
+/* Legacy compatibility: */
+#define SQLITE_MUTEX_STATIC_MASTER 2
+
+
+/*
+** CAPI3REF: Retrieve the mutex for a database connection
+** METHOD: sqlite3
+**
+** ^This interface returns a pointer the [sqlite3_mutex] object that
+** serializes access to the [database connection] given in the argument
+** when the [threading mode] is Serialized.
+** ^If the [threading mode] is Single-thread or Multi-thread then this
+** routine returns a NULL pointer.
+*/
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
+
+/*
+** CAPI3REF: Low-Level Control Of Database Files
+** METHOD: sqlite3
+** KEYWORDS: {file control}
+**
+** ^The [sqlite3_file_control()] interface makes a direct call to the
+** xFileControl method for the [sqlite3_io_methods] object associated
+** with a particular database identified by the second argument. ^The
+** name of the database is "main" for the main database or "temp" for the
+** TEMP database, or the name that appears after the AS keyword for
+** databases that are added using the [ATTACH] SQL command.
+** ^A NULL pointer can be used in place of "main" to refer to the
+** main database file.
+** ^The third and fourth parameters to this routine
+** are passed directly through to the second and third parameters of
+** the xFileControl method. ^The return value of the xFileControl
+** method becomes the return value of this routine.
+**
+** A few opcodes for [sqlite3_file_control()] are handled directly
+** by the SQLite core and never invoke the
+** sqlite3_io_methods.xFileControl method.
+** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes
+** a pointer to the underlying [sqlite3_file] object to be written into
+** the space pointed to by the 4th parameter. The
+** [SQLITE_FCNTL_JOURNAL_POINTER] works similarly except that it returns
+** the [sqlite3_file] object associated with the journal file instead of
+** the main database. The [SQLITE_FCNTL_VFS_POINTER] opcode returns
+** a pointer to the underlying [sqlite3_vfs] object for the file.
+** The [SQLITE_FCNTL_DATA_VERSION] returns the data version counter
+** from the pager.
+**
+** ^If the second parameter (zDbName) does not match the name of any
+** open database file, then SQLITE_ERROR is returned. ^This error
+** code is not remembered and will not be recalled by [sqlite3_errcode()]
+** or [sqlite3_errmsg()]. The underlying xFileControl method might
+** also return SQLITE_ERROR. There is no way to distinguish between
+** an incorrect zDbName and an SQLITE_ERROR return from the underlying
+** xFileControl method.
+**
+** See also: [file control opcodes]
+*/
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+
+/*
+** CAPI3REF: Testing Interface
+**
+** ^The sqlite3_test_control() interface is used to read out internal
+** state of SQLite and to inject faults into SQLite for testing
+** purposes. ^The first parameter is an operation code that determines
+** the number, meaning, and operation of all subsequent parameters.
+**
+** This interface is not for use by applications. It exists solely
+** for verifying the correct operation of the SQLite library. Depending
+** on how the SQLite library is compiled, this interface might not exist.
+**
+** The details of the operation codes, their meanings, the parameters
+** they take, and what they do are all subject to change without notice.
+** Unlike most of the SQLite API, this function is not guaranteed to
+** operate consistently from one release to the next.
+*/
+SQLITE_API int sqlite3_test_control(int op, ...);
+
+/*
+** CAPI3REF: Testing Interface Operation Codes
+**
+** These constants are the valid operation code parameters used
+** as the first argument to [sqlite3_test_control()].
+**
+** These parameters and their meanings are subject to change
+** without notice. These values are for testing purposes only.
+** Applications should not use any of these parameters or the
+** [sqlite3_test_control()] interface.
+*/
+#define SQLITE_TESTCTRL_FIRST 5
+#define SQLITE_TESTCTRL_PRNG_SAVE 5
+#define SQLITE_TESTCTRL_PRNG_RESTORE 6
+#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_BITVEC_TEST 8
+#define SQLITE_TESTCTRL_FAULT_INSTALL 9
+#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
+#define SQLITE_TESTCTRL_PENDING_BYTE 11
+#define SQLITE_TESTCTRL_ASSERT 12
+#define SQLITE_TESTCTRL_ALWAYS 13
+#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
+#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
+#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
+#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17
+#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
+#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
+#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
+#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
+#define SQLITE_TESTCTRL_BYTEORDER 22
+#define SQLITE_TESTCTRL_ISINIT 23
+#define SQLITE_TESTCTRL_SORTER_MMAP 24
+#define SQLITE_TESTCTRL_IMPOSTER 25
+#define SQLITE_TESTCTRL_PARSER_COVERAGE 26
+#define SQLITE_TESTCTRL_RESULT_INTREAL 27
+#define SQLITE_TESTCTRL_PRNG_SEED 28
+#define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29
+#define SQLITE_TESTCTRL_LAST 29 /* Largest TESTCTRL */
+
+/*
+** CAPI3REF: SQL Keyword Checking
+**
+** These routines provide access to the set of SQL language keywords
+** recognized by SQLite. Applications can uses these routines to determine
+** whether or not a specific identifier needs to be escaped (for example,
+** by enclosing in double-quotes) so as not to confuse the parser.
+**
+** The sqlite3_keyword_count() interface returns the number of distinct
+** keywords understood by SQLite.
+**
+** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and
+** makes *Z point to that keyword expressed as UTF8 and writes the number
+** of bytes in the keyword into *L. The string that *Z points to is not
+** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns
+** SQLITE_OK if N is within bounds and SQLITE_ERROR if not. If either Z
+** or L are NULL or invalid pointers then calls to
+** sqlite3_keyword_name(N,Z,L) result in undefined behavior.
+**
+** The sqlite3_keyword_check(Z,L) interface checks to see whether or not
+** the L-byte UTF8 identifier that Z points to is a keyword, returning non-zero
+** if it is and zero if not.
+**
+** The parser used by SQLite is forgiving. It is often possible to use
+** a keyword as an identifier as long as such use does not result in a
+** parsing ambiguity. For example, the statement
+** "CREATE TABLE BEGIN(REPLACE,PRAGMA,END);" is accepted by SQLite, and
+** creates a new table named "BEGIN" with three columns named
+** "REPLACE", "PRAGMA", and "END". Nevertheless, best practice is to avoid
+** using keywords as identifiers. Common techniques used to avoid keyword
+** name collisions include:
+** <ul>
+** <li> Put all identifier names inside double-quotes. This is the official
+** SQL way to escape identifier names.
+** <li> Put identifier names inside &#91;...&#93;. This is not standard SQL,
+** but it is what SQL Server does and so lots of programmers use this
+** technique.
+** <li> Begin every identifier with the letter "Z" as no SQL keywords start
+** with "Z".
+** <li> Include a digit somewhere in every identifier name.
+** </ul>
+**
+** Note that the number of keywords understood by SQLite can depend on
+** compile-time options. For example, "VACUUM" is not a keyword if
+** SQLite is compiled with the [-DSQLITE_OMIT_VACUUM] option. Also,
+** new keywords may be added to future releases of SQLite.
+*/
+SQLITE_API int sqlite3_keyword_count(void);
+SQLITE_API int sqlite3_keyword_name(int,const char**,int*);
+SQLITE_API int sqlite3_keyword_check(const char*,int);
+
+/*
+** CAPI3REF: Dynamic String Object
+** KEYWORDS: {dynamic string}
+**
+** An instance of the sqlite3_str object contains a dynamically-sized
+** string under construction.
+**
+** The lifecycle of an sqlite3_str object is as follows:
+** <ol>
+** <li> ^The sqlite3_str object is created using [sqlite3_str_new()].
+** <li> ^Text is appended to the sqlite3_str object using various
+** methods, such as [sqlite3_str_appendf()].
+** <li> ^The sqlite3_str object is destroyed and the string it created
+** is returned using the [sqlite3_str_finish()] interface.
+** </ol>
+*/
+typedef struct sqlite3_str sqlite3_str;
+
+/*
+** CAPI3REF: Create A New Dynamic String Object
+** CONSTRUCTOR: sqlite3_str
+**
+** ^The [sqlite3_str_new(D)] interface allocates and initializes
+** a new [sqlite3_str] object. To avoid memory leaks, the object returned by
+** [sqlite3_str_new()] must be freed by a subsequent call to
+** [sqlite3_str_finish(X)].
+**
+** ^The [sqlite3_str_new(D)] interface always returns a pointer to a
+** valid [sqlite3_str] object, though in the event of an out-of-memory
+** error the returned object might be a special singleton that will
+** silently reject new text, always return SQLITE_NOMEM from
+** [sqlite3_str_errcode()], always return 0 for
+** [sqlite3_str_length()], and always return NULL from
+** [sqlite3_str_finish(X)]. It is always safe to use the value
+** returned by [sqlite3_str_new(D)] as the sqlite3_str parameter
+** to any of the other [sqlite3_str] methods.
+**
+** The D parameter to [sqlite3_str_new(D)] may be NULL. If the
+** D parameter in [sqlite3_str_new(D)] is not NULL, then the maximum
+** length of the string contained in the [sqlite3_str] object will be
+** the value set for [sqlite3_limit](D,[SQLITE_LIMIT_LENGTH]) instead
+** of [SQLITE_MAX_LENGTH].
+*/
+SQLITE_API sqlite3_str *sqlite3_str_new(sqlite3*);
+
+/*
+** CAPI3REF: Finalize A Dynamic String
+** DESTRUCTOR: sqlite3_str
+**
+** ^The [sqlite3_str_finish(X)] interface destroys the sqlite3_str object X
+** and returns a pointer to a memory buffer obtained from [sqlite3_malloc64()]
+** that contains the constructed string. The calling application should
+** pass the returned value to [sqlite3_free()] to avoid a memory leak.
+** ^The [sqlite3_str_finish(X)] interface may return a NULL pointer if any
+** errors were encountered during construction of the string. ^The
+** [sqlite3_str_finish(X)] interface will also return a NULL pointer if the
+** string in [sqlite3_str] object X is zero bytes long.
+*/
+SQLITE_API char *sqlite3_str_finish(sqlite3_str*);
+
+/*
+** CAPI3REF: Add Content To A Dynamic String
+** METHOD: sqlite3_str
+**
+** These interfaces add content to an sqlite3_str object previously obtained
+** from [sqlite3_str_new()].
+**
+** ^The [sqlite3_str_appendf(X,F,...)] and
+** [sqlite3_str_vappendf(X,F,V)] interfaces uses the [built-in printf]
+** functionality of SQLite to append formatted text onto the end of
+** [sqlite3_str] object X.
+**
+** ^The [sqlite3_str_append(X,S,N)] method appends exactly N bytes from string S
+** onto the end of the [sqlite3_str] object X. N must be non-negative.
+** S must contain at least N non-zero bytes of content. To append a
+** zero-terminated string in its entirety, use the [sqlite3_str_appendall()]
+** method instead.
+**
+** ^The [sqlite3_str_appendall(X,S)] method appends the complete content of
+** zero-terminated string S onto the end of [sqlite3_str] object X.
+**
+** ^The [sqlite3_str_appendchar(X,N,C)] method appends N copies of the
+** single-byte character C onto the end of [sqlite3_str] object X.
+** ^This method can be used, for example, to add whitespace indentation.
+**
+** ^The [sqlite3_str_reset(X)] method resets the string under construction
+** inside [sqlite3_str] object X back to zero bytes in length.
+**
+** These methods do not return a result code. ^If an error occurs, that fact
+** is recorded in the [sqlite3_str] object and can be recovered by a
+** subsequent call to [sqlite3_str_errcode(X)].
+*/
+SQLITE_API void sqlite3_str_appendf(sqlite3_str*, const char *zFormat, ...);
+SQLITE_API void sqlite3_str_vappendf(sqlite3_str*, const char *zFormat, va_list);
+SQLITE_API void sqlite3_str_append(sqlite3_str*, const char *zIn, int N);
+SQLITE_API void sqlite3_str_appendall(sqlite3_str*, const char *zIn);
+SQLITE_API void sqlite3_str_appendchar(sqlite3_str*, int N, char C);
+SQLITE_API void sqlite3_str_reset(sqlite3_str*);
+
+/*
+** CAPI3REF: Status Of A Dynamic String
+** METHOD: sqlite3_str
+**
+** These interfaces return the current status of an [sqlite3_str] object.
+**
+** ^If any prior errors have occurred while constructing the dynamic string
+** in sqlite3_str X, then the [sqlite3_str_errcode(X)] method will return
+** an appropriate error code. ^The [sqlite3_str_errcode(X)] method returns
+** [SQLITE_NOMEM] following any out-of-memory error, or
+** [SQLITE_TOOBIG] if the size of the dynamic string exceeds
+** [SQLITE_MAX_LENGTH], or [SQLITE_OK] if there have been no errors.
+**
+** ^The [sqlite3_str_length(X)] method returns the current length, in bytes,
+** of the dynamic string under construction in [sqlite3_str] object X.
+** ^The length returned by [sqlite3_str_length(X)] does not include the
+** zero-termination byte.
+**
+** ^The [sqlite3_str_value(X)] method returns a pointer to the current
+** content of the dynamic string under construction in X. The value
+** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X
+** and might be freed or altered by any subsequent method on the same
+** [sqlite3_str] object. Applications must not used the pointer returned
+** [sqlite3_str_value(X)] after any subsequent method call on the same
+** object. ^Applications may change the content of the string returned
+** by [sqlite3_str_value(X)] as long as they do not write into any bytes
+** outside the range of 0 to [sqlite3_str_length(X)] and do not read or
+** write any byte after any subsequent sqlite3_str method call.
+*/
+SQLITE_API int sqlite3_str_errcode(sqlite3_str*);
+SQLITE_API int sqlite3_str_length(sqlite3_str*);
+SQLITE_API char *sqlite3_str_value(sqlite3_str*);
+
+/*
+** CAPI3REF: SQLite Runtime Status
+**
+** ^These interfaces are used to retrieve runtime status information
+** about the performance of SQLite, and optionally to reset various
+** highwater marks. ^The first argument is an integer code for
+** the specific parameter to measure. ^(Recognized integer codes
+** are of the form [status parameters | SQLITE_STATUS_...].)^
+** ^The current value of the parameter is returned into *pCurrent.
+** ^The highest recorded value is returned in *pHighwater. ^If the
+** resetFlag is true, then the highest record value is reset after
+** *pHighwater is written. ^(Some parameters do not record the highest
+** value. For those parameters
+** nothing is written into *pHighwater and the resetFlag is ignored.)^
+** ^(Other parameters record only the highwater mark and not the current
+** value. For these latter parameters nothing is written into *pCurrent.)^
+**
+** ^The sqlite3_status() and sqlite3_status64() routines return
+** SQLITE_OK on success and a non-zero [error code] on failure.
+**
+** If either the current value or the highwater mark is too large to
+** be represented by a 32-bit integer, then the values returned by
+** sqlite3_status() are undefined.
+**
+** See also: [sqlite3_db_status()]
+*/
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
+SQLITE_API int sqlite3_status64(
+ int op,
+ sqlite3_int64 *pCurrent,
+ sqlite3_int64 *pHighwater,
+ int resetFlag
+);
+
+
+/*
+** CAPI3REF: Status Parameters
+** KEYWORDS: {status parameters}
+**
+** These integer constants designate various run-time status parameters
+** that can be returned by [sqlite3_status()].
+**
+** <dl>
+** [[SQLITE_STATUS_MEMORY_USED]] ^(<dt>SQLITE_STATUS_MEMORY_USED</dt>
+** <dd>This parameter is the current amount of memory checked out
+** using [sqlite3_malloc()], either directly or indirectly. The
+** figure includes calls made to [sqlite3_malloc()] by the application
+** and internal memory usage by the SQLite library. Auxiliary page-cache
+** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in
+** this parameter. The amount returned is the sum of the allocation
+** sizes as reported by the xSize method in [sqlite3_mem_methods].</dd>)^
+**
+** [[SQLITE_STATUS_MALLOC_SIZE]] ^(<dt>SQLITE_STATUS_MALLOC_SIZE</dt>
+** <dd>This parameter records the largest memory allocation request
+** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their
+** internal equivalents). Only the value returned in the
+** *pHighwater parameter to [sqlite3_status()] is of interest.
+** The value written into the *pCurrent parameter is undefined.</dd>)^
+**
+** [[SQLITE_STATUS_MALLOC_COUNT]] ^(<dt>SQLITE_STATUS_MALLOC_COUNT</dt>
+** <dd>This parameter records the number of separate memory allocations
+** currently checked out.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_USED]] ^(<dt>SQLITE_STATUS_PAGECACHE_USED</dt>
+** <dd>This parameter returns the number of pages used out of the
+** [pagecache memory allocator] that was configured using
+** [SQLITE_CONFIG_PAGECACHE]. The
+** value returned is in pages, not in bytes.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]]
+** ^(<dt>SQLITE_STATUS_PAGECACHE_OVERFLOW</dt>
+** <dd>This parameter returns the number of bytes of page cache
+** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE]
+** buffer and where forced to overflow to [sqlite3_malloc()]. The
+** returned value includes allocations that overflowed because they
+** where too large (they were larger than the "sz" parameter to
+** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because
+** no space was left in the page cache.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(<dt>SQLITE_STATUS_PAGECACHE_SIZE</dt>
+** <dd>This parameter records the largest memory allocation request
+** handed to the [pagecache memory allocator]. Only the value returned in the
+** *pHighwater parameter to [sqlite3_status()] is of interest.
+** The value written into the *pCurrent parameter is undefined.</dd>)^
+**
+** [[SQLITE_STATUS_SCRATCH_USED]] <dt>SQLITE_STATUS_SCRATCH_USED</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(<dt>SQLITE_STATUS_SCRATCH_OVERFLOW</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_SCRATCH_SIZE]] <dt>SQLITE_STATUS_SCRATCH_SIZE</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_PARSER_STACK]] ^(<dt>SQLITE_STATUS_PARSER_STACK</dt>
+** <dd>The *pHighwater parameter records the deepest parser stack.
+** The *pCurrent value is undefined. The *pHighwater value is only
+** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].</dd>)^
+** </dl>
+**
+** New status parameters may be added from time to time.
+*/
+#define SQLITE_STATUS_MEMORY_USED 0
+#define SQLITE_STATUS_PAGECACHE_USED 1
+#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2
+#define SQLITE_STATUS_SCRATCH_USED 3 /* NOT USED */
+#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 /* NOT USED */
+#define SQLITE_STATUS_MALLOC_SIZE 5
+#define SQLITE_STATUS_PARSER_STACK 6
+#define SQLITE_STATUS_PAGECACHE_SIZE 7
+#define SQLITE_STATUS_SCRATCH_SIZE 8 /* NOT USED */
+#define SQLITE_STATUS_MALLOC_COUNT 9
+
+/*
+** CAPI3REF: Database Connection Status
+** METHOD: sqlite3
+**
+** ^This interface is used to retrieve runtime status information
+** about a single [database connection]. ^The first argument is the
+** database connection object to be interrogated. ^The second argument
+** is an integer constant, taken from the set of
+** [SQLITE_DBSTATUS options], that
+** determines the parameter to interrogate. The set of
+** [SQLITE_DBSTATUS options] is likely
+** to grow in future releases of SQLite.
+**
+** ^The current value of the requested parameter is written into *pCur
+** and the highest instantaneous value is written into *pHiwtr. ^If
+** the resetFlg is true, then the highest instantaneous value is
+** reset back down to the current value.
+**
+** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a
+** non-zero [error code] on failure.
+**
+** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
+*/
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+
+/*
+** CAPI3REF: Status Parameters for database connections
+** KEYWORDS: {SQLITE_DBSTATUS options}
+**
+** These constants are the available integer "verbs" that can be passed as
+** the second argument to the [sqlite3_db_status()] interface.
+**
+** New verbs may be added in future releases of SQLite. Existing verbs
+** might be discontinued. Applications should check the return code from
+** [sqlite3_db_status()] to make sure that the call worked.
+** The [sqlite3_db_status()] interface will return a non-zero error code
+** if a discontinued or unsupported verb is invoked.
+**
+** <dl>
+** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_USED</dt>
+** <dd>This parameter returns the number of lookaside memory slots currently
+** checked out.</dd>)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_HIT</dt>
+** <dd>This parameter returns the number of malloc attempts that were
+** satisfied using lookaside memory. Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]]
+** ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE</dt>
+** <dd>This parameter returns the number malloc attempts that might have
+** been satisfied using lookaside memory but failed due to the amount of
+** memory requested being larger than the lookaside slot size.
+** Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]]
+** ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL</dt>
+** <dd>This parameter returns the number malloc attempts that might have
+** been satisfied using lookaside memory but failed due to all lookaside
+** memory already being in use.
+** Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_CACHE_USED]] ^(<dt>SQLITE_DBSTATUS_CACHE_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** memory used by all pager caches associated with the database connection.)^
+** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
+**
+** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]]
+** ^(<dt>SQLITE_DBSTATUS_CACHE_USED_SHARED</dt>
+** <dd>This parameter is similar to DBSTATUS_CACHE_USED, except that if a
+** pager cache is shared between two or more connections the bytes of heap
+** memory used by that pager cache is divided evenly between the attached
+** connections.)^ In other words, if none of the pager caches associated
+** with the database connection are shared, this request returns the same
+** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are
+** shared, the value returned by this call will be smaller than that returned
+** by DBSTATUS_CACHE_USED. ^The highwater mark associated with
+** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0.
+**
+** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(<dt>SQLITE_DBSTATUS_SCHEMA_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** memory used to store the schema for all databases associated
+** with the connection - main, temp, and any [ATTACH]-ed databases.)^
+** ^The full amount of memory used by the schemas is reported, even if the
+** schema memory is shared with other database connections due to
+** [shared cache mode] being enabled.
+** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0.
+**
+** [[SQLITE_DBSTATUS_STMT_USED]] ^(<dt>SQLITE_DBSTATUS_STMT_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** and lookaside memory used by all prepared statements associated with
+** the database connection.)^
+** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(<dt>SQLITE_DBSTATUS_CACHE_HIT</dt>
+** <dd>This parameter returns the number of pager cache hits that have
+** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT
+** is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(<dt>SQLITE_DBSTATUS_CACHE_MISS</dt>
+** <dd>This parameter returns the number of pager cache misses that have
+** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS
+** is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(<dt>SQLITE_DBSTATUS_CACHE_WRITE</dt>
+** <dd>This parameter returns the number of dirty cache entries that have
+** been written to disk. Specifically, the number of pages written to the
+** wal file in wal mode databases, or the number of pages written to the
+** database file in rollback mode databases. Any pages written as part of
+** transaction rollback or database recovery operations are not included.
+** If an IO or other error occurs while writing a page to disk, the effect
+** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The
+** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(<dt>SQLITE_DBSTATUS_CACHE_SPILL</dt>
+** <dd>This parameter returns the number of dirty cache entries that have
+** been written to disk in the middle of a transaction due to the page
+** cache overflowing. Transactions are more efficient if they are written
+** to disk all at once. When pages spill mid-transaction, that introduces
+** additional overhead. This parameter can be used help identify
+** inefficiencies that can be resolved by increasing the cache size.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(<dt>SQLITE_DBSTATUS_DEFERRED_FKS</dt>
+** <dd>This parameter returns zero for the current value if and only if
+** all foreign key constraints (deferred or immediate) have been
+** resolved.)^ ^The highwater mark is always 0.
+** </dd>
+** </dl>
+*/
+#define SQLITE_DBSTATUS_LOOKASIDE_USED 0
+#define SQLITE_DBSTATUS_CACHE_USED 1
+#define SQLITE_DBSTATUS_SCHEMA_USED 2
+#define SQLITE_DBSTATUS_STMT_USED 3
+#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4
+#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5
+#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6
+#define SQLITE_DBSTATUS_CACHE_HIT 7
+#define SQLITE_DBSTATUS_CACHE_MISS 8
+#define SQLITE_DBSTATUS_CACHE_WRITE 9
+#define SQLITE_DBSTATUS_DEFERRED_FKS 10
+#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11
+#define SQLITE_DBSTATUS_CACHE_SPILL 12
+#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */
+
+
+/*
+** CAPI3REF: Prepared Statement Status
+** METHOD: sqlite3_stmt
+**
+** ^(Each prepared statement maintains various
+** [SQLITE_STMTSTATUS counters] that measure the number
+** of times it has performed specific operations.)^ These counters can
+** be used to monitor the performance characteristics of the prepared
+** statements. For example, if the number of table steps greatly exceeds
+** the number of table searches or result rows, that would tend to indicate
+** that the prepared statement is using a full table scan rather than
+** an index.
+**
+** ^(This interface is used to retrieve and reset counter values from
+** a [prepared statement]. The first argument is the prepared statement
+** object to be interrogated. The second argument
+** is an integer code for a specific [SQLITE_STMTSTATUS counter]
+** to be interrogated.)^
+** ^The current value of the requested counter is returned.
+** ^If the resetFlg is true, then the counter is reset to zero after this
+** interface call returns.
+**
+** See also: [sqlite3_status()] and [sqlite3_db_status()].
+*/
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+
+/*
+** CAPI3REF: Status Parameters for prepared statements
+** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters}
+**
+** These preprocessor macros define integer codes that name counter
+** values associated with the [sqlite3_stmt_status()] interface.
+** The meanings of the various counters are as follows:
+**
+** <dl>
+** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]] <dt>SQLITE_STMTSTATUS_FULLSCAN_STEP</dt>
+** <dd>^This is the number of times that SQLite has stepped forward in
+** a table as part of a full table scan. Large numbers for this counter
+** may indicate opportunities for performance improvement through
+** careful use of indices.</dd>
+**
+** [[SQLITE_STMTSTATUS_SORT]] <dt>SQLITE_STMTSTATUS_SORT</dt>
+** <dd>^This is the number of sort operations that have occurred.
+** A non-zero value in this counter may indicate an opportunity to
+** improvement performance through careful use of indices.</dd>
+**
+** [[SQLITE_STMTSTATUS_AUTOINDEX]] <dt>SQLITE_STMTSTATUS_AUTOINDEX</dt>
+** <dd>^This is the number of rows inserted into transient indices that
+** were created automatically in order to help joins run faster.
+** A non-zero value in this counter may indicate an opportunity to
+** improvement performance by adding permanent indices that do not
+** need to be reinitialized each time the statement is run.</dd>
+**
+** [[SQLITE_STMTSTATUS_VM_STEP]] <dt>SQLITE_STMTSTATUS_VM_STEP</dt>
+** <dd>^This is the number of virtual machine operations executed
+** by the prepared statement if that number is less than or equal
+** to 2147483647. The number of virtual machine operations can be
+** used as a proxy for the total work done by the prepared statement.
+** If the number of virtual machine operations exceeds 2147483647
+** then the value returned by this statement status code is undefined.
+**
+** [[SQLITE_STMTSTATUS_REPREPARE]] <dt>SQLITE_STMTSTATUS_REPREPARE</dt>
+** <dd>^This is the number of times that the prepare statement has been
+** automatically regenerated due to schema changes or changes to
+** [bound parameters] that might affect the query plan.
+**
+** [[SQLITE_STMTSTATUS_RUN]] <dt>SQLITE_STMTSTATUS_RUN</dt>
+** <dd>^This is the number of times that the prepared statement has
+** been run. A single "run" for the purposes of this counter is one
+** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()].
+** The counter is incremented on the first [sqlite3_step()] call of each
+** cycle.
+**
+** [[SQLITE_STMTSTATUS_MEMUSED]] <dt>SQLITE_STMTSTATUS_MEMUSED</dt>
+** <dd>^This is the approximate number of bytes of heap memory
+** used to store the prepared statement. ^This value is not actually
+** a counter, and so the resetFlg parameter to sqlite3_stmt_status()
+** is ignored when the opcode is SQLITE_STMTSTATUS_MEMUSED.
+** </dd>
+** </dl>
+*/
+#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1
+#define SQLITE_STMTSTATUS_SORT 2
+#define SQLITE_STMTSTATUS_AUTOINDEX 3
+#define SQLITE_STMTSTATUS_VM_STEP 4
+#define SQLITE_STMTSTATUS_REPREPARE 5
+#define SQLITE_STMTSTATUS_RUN 6
+#define SQLITE_STMTSTATUS_MEMUSED 99
+
+/*
+** CAPI3REF: Custom Page Cache Object
+**
+** The sqlite3_pcache type is opaque. It is implemented by
+** the pluggable module. The SQLite core has no knowledge of
+** its size or internal structure and never deals with the
+** sqlite3_pcache object except by holding and passing pointers
+** to the object.
+**
+** See [sqlite3_pcache_methods2] for additional information.
+*/
+typedef struct sqlite3_pcache sqlite3_pcache;
+
+/*
+** CAPI3REF: Custom Page Cache Object
+**
+** The sqlite3_pcache_page object represents a single page in the
+** page cache. The page cache will allocate instances of this
+** object. Various methods of the page cache use pointers to instances
+** of this object as parameters or as their return value.
+**
+** See [sqlite3_pcache_methods2] for additional information.
+*/
+typedef struct sqlite3_pcache_page sqlite3_pcache_page;
+struct sqlite3_pcache_page {
+ void *pBuf; /* The content of the page */
+ void *pExtra; /* Extra information associated with the page */
+};
+
+/*
+** CAPI3REF: Application Defined Page Cache.
+** KEYWORDS: {page cache}
+**
+** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can
+** register an alternative page cache implementation by passing in an
+** instance of the sqlite3_pcache_methods2 structure.)^
+** In many applications, most of the heap memory allocated by
+** SQLite is used for the page cache.
+** By implementing a
+** custom page cache using this API, an application can better control
+** the amount of memory consumed by SQLite, the way in which
+** that memory is allocated and released, and the policies used to
+** determine exactly which parts of a database file are cached and for
+** how long.
+**
+** The alternative page cache mechanism is an
+** extreme measure that is only needed by the most demanding applications.
+** The built-in page cache is recommended for most uses.
+**
+** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an
+** internal buffer by SQLite within the call to [sqlite3_config]. Hence
+** the application may discard the parameter after the call to
+** [sqlite3_config()] returns.)^
+**
+** [[the xInit() page cache method]]
+** ^(The xInit() method is called once for each effective
+** call to [sqlite3_initialize()])^
+** (usually only once during the lifetime of the process). ^(The xInit()
+** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^
+** The intent of the xInit() method is to set up global data structures
+** required by the custom page cache implementation.
+** ^(If the xInit() method is NULL, then the
+** built-in default page cache is used instead of the application defined
+** page cache.)^
+**
+** [[the xShutdown() page cache method]]
+** ^The xShutdown() method is called by [sqlite3_shutdown()].
+** It can be used to clean up
+** any outstanding resources before process shutdown, if required.
+** ^The xShutdown() method may be NULL.
+**
+** ^SQLite automatically serializes calls to the xInit method,
+** so the xInit method need not be threadsafe. ^The
+** xShutdown method is only called from [sqlite3_shutdown()] so it does
+** not need to be threadsafe either. All other methods must be threadsafe
+** in multithreaded applications.
+**
+** ^SQLite will never invoke xInit() more than once without an intervening
+** call to xShutdown().
+**
+** [[the xCreate() page cache methods]]
+** ^SQLite invokes the xCreate() method to construct a new cache instance.
+** SQLite will typically create one cache instance for each open database file,
+** though this is not guaranteed. ^The
+** first parameter, szPage, is the size in bytes of the pages that must
+** be allocated by the cache. ^szPage will always a power of two. ^The
+** second parameter szExtra is a number of bytes of extra storage
+** associated with each page cache entry. ^The szExtra parameter will
+** a number less than 250. SQLite will use the
+** extra szExtra bytes on each page to store metadata about the underlying
+** database page on disk. The value passed into szExtra depends
+** on the SQLite version, the target platform, and how SQLite was compiled.
+** ^The third argument to xCreate(), bPurgeable, is true if the cache being
+** created will be used to cache database pages of a file stored on disk, or
+** false if it is used for an in-memory database. The cache implementation
+** does not have to do anything special based with the value of bPurgeable;
+** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will
+** never invoke xUnpin() except to deliberately delete a page.
+** ^In other words, calls to xUnpin() on a cache with bPurgeable set to
+** false will always have the "discard" flag set to true.
+** ^Hence, a cache created with bPurgeable false will
+** never contain any unpinned pages.
+**
+** [[the xCachesize() page cache method]]
+** ^(The xCachesize() method may be called at any time by SQLite to set the
+** suggested maximum cache-size (number of pages stored by) the cache
+** instance passed as the first argument. This is the value configured using
+** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable
+** parameter, the implementation is not required to do anything with this
+** value; it is advisory only.
+**
+** [[the xPagecount() page cache methods]]
+** The xPagecount() method must return the number of pages currently
+** stored in the cache, both pinned and unpinned.
+**
+** [[the xFetch() page cache methods]]
+** The xFetch() method locates a page in the cache and returns a pointer to
+** an sqlite3_pcache_page object associated with that page, or a NULL pointer.
+** The pBuf element of the returned sqlite3_pcache_page object will be a
+** pointer to a buffer of szPage bytes used to store the content of a
+** single database page. The pExtra element of sqlite3_pcache_page will be
+** a pointer to the szExtra bytes of extra storage that SQLite has requested
+** for each entry in the page cache.
+**
+** The page to be fetched is determined by the key. ^The minimum key value
+** is 1. After it has been retrieved using xFetch, the page is considered
+** to be "pinned".
+**
+** If the requested page is already in the page cache, then the page cache
+** implementation must return a pointer to the page buffer with its content
+** intact. If the requested page is not already in the cache, then the
+** cache implementation should use the value of the createFlag
+** parameter to help it determined what action to take:
+**
+** <table border=1 width=85% align=center>
+** <tr><th> createFlag <th> Behavior when page is not already in cache
+** <tr><td> 0 <td> Do not allocate a new page. Return NULL.
+** <tr><td> 1 <td> Allocate a new page if it easy and convenient to do so.
+** Otherwise return NULL.
+** <tr><td> 2 <td> Make every effort to allocate a new page. Only return
+** NULL if allocating a new page is effectively impossible.
+** </table>
+**
+** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite
+** will only use a createFlag of 2 after a prior call with a createFlag of 1
+** failed.)^ In between the xFetch() calls, SQLite may
+** attempt to unpin one or more cache pages by spilling the content of
+** pinned pages to disk and synching the operating system disk cache.
+**
+** [[the xUnpin() page cache method]]
+** ^xUnpin() is called by SQLite with a pointer to a currently pinned page
+** as its second argument. If the third parameter, discard, is non-zero,
+** then the page must be evicted from the cache.
+** ^If the discard parameter is
+** zero, then the page may be discarded or retained at the discretion of
+** page cache implementation. ^The page cache implementation
+** may choose to evict unpinned pages at any time.
+**
+** The cache must not perform any reference counting. A single
+** call to xUnpin() unpins the page regardless of the number of prior calls
+** to xFetch().
+**
+** [[the xRekey() page cache methods]]
+** The xRekey() method is used to change the key value associated with the
+** page passed as the second argument. If the cache
+** previously contains an entry associated with newKey, it must be
+** discarded. ^Any prior cache entry associated with newKey is guaranteed not
+** to be pinned.
+**
+** When SQLite calls the xTruncate() method, the cache must discard all
+** existing cache entries with page numbers (keys) greater than or equal
+** to the value of the iLimit parameter passed to xTruncate(). If any
+** of these pages are pinned, they are implicitly unpinned, meaning that
+** they can be safely discarded.
+**
+** [[the xDestroy() page cache method]]
+** ^The xDestroy() method is used to delete a cache allocated by xCreate().
+** All resources associated with the specified cache should be freed. ^After
+** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*]
+** handle invalid, and will not use it with any other sqlite3_pcache_methods2
+** functions.
+**
+** [[the xShrink() page cache method]]
+** ^SQLite invokes the xShrink() method when it wants the page cache to
+** free up as much of heap memory as possible. The page cache implementation
+** is not obligated to free any memory, but well-behaved implementations should
+** do their best.
+*/
+typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2;
+struct sqlite3_pcache_methods2 {
+ int iVersion;
+ void *pArg;
+ int (*xInit)(void*);
+ void (*xShutdown)(void*);
+ sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable);
+ void (*xCachesize)(sqlite3_pcache*, int nCachesize);
+ int (*xPagecount)(sqlite3_pcache*);
+ sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
+ void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard);
+ void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*,
+ unsigned oldKey, unsigned newKey);
+ void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
+ void (*xDestroy)(sqlite3_pcache*);
+ void (*xShrink)(sqlite3_pcache*);
+};
+
+/*
+** This is the obsolete pcache_methods object that has now been replaced
+** by sqlite3_pcache_methods2. This object is not used by SQLite. It is
+** retained in the header file for backwards compatibility only.
+*/
+typedef struct sqlite3_pcache_methods sqlite3_pcache_methods;
+struct sqlite3_pcache_methods {
+ void *pArg;
+ int (*xInit)(void*);
+ void (*xShutdown)(void*);
+ sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable);
+ void (*xCachesize)(sqlite3_pcache*, int nCachesize);
+ int (*xPagecount)(sqlite3_pcache*);
+ void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
+ void (*xUnpin)(sqlite3_pcache*, void*, int discard);
+ void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey);
+ void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
+ void (*xDestroy)(sqlite3_pcache*);
+};
+
+
+/*
+** CAPI3REF: Online Backup Object
+**
+** The sqlite3_backup object records state information about an ongoing
+** online backup operation. ^The sqlite3_backup object is created by
+** a call to [sqlite3_backup_init()] and is destroyed by a call to
+** [sqlite3_backup_finish()].
+**
+** See Also: [Using the SQLite Online Backup API]
+*/
+typedef struct sqlite3_backup sqlite3_backup;
+
+/*
+** CAPI3REF: Online Backup API.
+**
+** The backup API copies the content of one database into another.
+** It is useful either for creating backups of databases or
+** for copying in-memory databases to or from persistent files.
+**
+** See Also: [Using the SQLite Online Backup API]
+**
+** ^SQLite holds a write transaction open on the destination database file
+** for the duration of the backup operation.
+** ^The source database is read-locked only while it is being read;
+** it is not locked continuously for the entire backup operation.
+** ^Thus, the backup may be performed on a live source database without
+** preventing other database connections from
+** reading or writing to the source database while the backup is underway.
+**
+** ^(To perform a backup operation:
+** <ol>
+** <li><b>sqlite3_backup_init()</b> is called once to initialize the
+** backup,
+** <li><b>sqlite3_backup_step()</b> is called one or more times to transfer
+** the data between the two databases, and finally
+** <li><b>sqlite3_backup_finish()</b> is called to release all resources
+** associated with the backup operation.
+** </ol>)^
+** There should be exactly one call to sqlite3_backup_finish() for each
+** successful call to sqlite3_backup_init().
+**
+** [[sqlite3_backup_init()]] <b>sqlite3_backup_init()</b>
+**
+** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the
+** [database connection] associated with the destination database
+** and the database name, respectively.
+** ^The database name is "main" for the main database, "temp" for the
+** temporary database, or the name specified after the AS keyword in
+** an [ATTACH] statement for an attached database.
+** ^The S and M arguments passed to
+** sqlite3_backup_init(D,N,S,M) identify the [database connection]
+** and database name of the source database, respectively.
+** ^The source and destination [database connections] (parameters S and D)
+** must be different or else sqlite3_backup_init(D,N,S,M) will fail with
+** an error.
+**
+** ^A call to sqlite3_backup_init() will fail, returning NULL, if
+** there is already a read or read-write transaction open on the
+** destination database.
+**
+** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is
+** returned and an error code and error message are stored in the
+** destination [database connection] D.
+** ^The error code and message for the failed call to sqlite3_backup_init()
+** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or
+** [sqlite3_errmsg16()] functions.
+** ^A successful call to sqlite3_backup_init() returns a pointer to an
+** [sqlite3_backup] object.
+** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and
+** sqlite3_backup_finish() functions to perform the specified backup
+** operation.
+**
+** [[sqlite3_backup_step()]] <b>sqlite3_backup_step()</b>
+**
+** ^Function sqlite3_backup_step(B,N) will copy up to N pages between
+** the source and destination databases specified by [sqlite3_backup] object B.
+** ^If N is negative, all remaining source pages are copied.
+** ^If sqlite3_backup_step(B,N) successfully copies N pages and there
+** are still more pages to be copied, then the function returns [SQLITE_OK].
+** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages
+** from source to destination, then it returns [SQLITE_DONE].
+** ^If an error occurs while running sqlite3_backup_step(B,N),
+** then an [error code] is returned. ^As well as [SQLITE_OK] and
+** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY],
+** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an
+** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code.
+**
+** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if
+** <ol>
+** <li> the destination database was opened read-only, or
+** <li> the destination database is using write-ahead-log journaling
+** and the destination and source page sizes differ, or
+** <li> the destination database is an in-memory database and the
+** destination and source page sizes differ.
+** </ol>)^
+**
+** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then
+** the [sqlite3_busy_handler | busy-handler function]
+** is invoked (if one is specified). ^If the
+** busy-handler returns non-zero before the lock is available, then
+** [SQLITE_BUSY] is returned to the caller. ^In this case the call to
+** sqlite3_backup_step() can be retried later. ^If the source
+** [database connection]
+** is being used to write to the source database when sqlite3_backup_step()
+** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this
+** case the call to sqlite3_backup_step() can be retried later on. ^(If
+** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or
+** [SQLITE_READONLY] is returned, then
+** there is no point in retrying the call to sqlite3_backup_step(). These
+** errors are considered fatal.)^ The application must accept
+** that the backup operation has failed and pass the backup operation handle
+** to the sqlite3_backup_finish() to release associated resources.
+**
+** ^The first call to sqlite3_backup_step() obtains an exclusive lock
+** on the destination file. ^The exclusive lock is not released until either
+** sqlite3_backup_finish() is called or the backup operation is complete
+** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to
+** sqlite3_backup_step() obtains a [shared lock] on the source database that
+** lasts for the duration of the sqlite3_backup_step() call.
+** ^Because the source database is not locked between calls to
+** sqlite3_backup_step(), the source database may be modified mid-way
+** through the backup process. ^If the source database is modified by an
+** external process or via a database connection other than the one being
+** used by the backup operation, then the backup will be automatically
+** restarted by the next call to sqlite3_backup_step(). ^If the source
+** database is modified by the using the same database connection as is used
+** by the backup operation, then the backup database is automatically
+** updated at the same time.
+**
+** [[sqlite3_backup_finish()]] <b>sqlite3_backup_finish()</b>
+**
+** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the
+** application wishes to abandon the backup operation, the application
+** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish().
+** ^The sqlite3_backup_finish() interfaces releases all
+** resources associated with the [sqlite3_backup] object.
+** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any
+** active write-transaction on the destination database is rolled back.
+** The [sqlite3_backup] object is invalid
+** and may not be used following a call to sqlite3_backup_finish().
+**
+** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no
+** sqlite3_backup_step() errors occurred, regardless or whether or not
+** sqlite3_backup_step() completed.
+** ^If an out-of-memory condition or IO error occurred during any prior
+** sqlite3_backup_step() call on the same [sqlite3_backup] object, then
+** sqlite3_backup_finish() returns the corresponding [error code].
+**
+** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step()
+** is not a permanent error and does not affect the return value of
+** sqlite3_backup_finish().
+**
+** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]]
+** <b>sqlite3_backup_remaining() and sqlite3_backup_pagecount()</b>
+**
+** ^The sqlite3_backup_remaining() routine returns the number of pages still
+** to be backed up at the conclusion of the most recent sqlite3_backup_step().
+** ^The sqlite3_backup_pagecount() routine returns the total number of pages
+** in the source database at the conclusion of the most recent
+** sqlite3_backup_step().
+** ^(The values returned by these functions are only updated by
+** sqlite3_backup_step(). If the source database is modified in a way that
+** changes the size of the source database or the number of pages remaining,
+** those changes are not reflected in the output of sqlite3_backup_pagecount()
+** and sqlite3_backup_remaining() until after the next
+** sqlite3_backup_step().)^
+**
+** <b>Concurrent Usage of Database Handles</b>
+**
+** ^The source [database connection] may be used by the application for other
+** purposes while a backup operation is underway or being initialized.
+** ^If SQLite is compiled and configured to support threadsafe database
+** connections, then the source database connection may be used concurrently
+** from within other threads.
+**
+** However, the application must guarantee that the destination
+** [database connection] is not passed to any other API (by any thread) after
+** sqlite3_backup_init() is called and before the corresponding call to
+** sqlite3_backup_finish(). SQLite does not currently check to see
+** if the application incorrectly accesses the destination [database connection]
+** and so no error code is reported, but the operations may malfunction
+** nevertheless. Use of the destination database connection while a
+** backup is in progress might also also cause a mutex deadlock.
+**
+** If running in [shared cache mode], the application must
+** guarantee that the shared cache used by the destination database
+** is not accessed while the backup is running. In practice this means
+** that the application must guarantee that the disk file being
+** backed up to is not accessed by any connection within the process,
+** not just the specific connection that was passed to sqlite3_backup_init().
+**
+** The [sqlite3_backup] object itself is partially threadsafe. Multiple
+** threads may safely make multiple concurrent calls to sqlite3_backup_step().
+** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount()
+** APIs are not strictly speaking threadsafe. If they are invoked at the
+** same time as another thread is invoking sqlite3_backup_step() it is
+** possible that they return invalid values.
+*/
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
+ sqlite3 *pDest, /* Destination database handle */
+ const char *zDestName, /* Destination database name */
+ sqlite3 *pSource, /* Source database handle */
+ const char *zSourceName /* Source database name */
+);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
+
+/*
+** CAPI3REF: Unlock Notification
+** METHOD: sqlite3
+**
+** ^When running in shared-cache mode, a database operation may fail with
+** an [SQLITE_LOCKED] error if the required locks on the shared-cache or
+** individual tables within the shared-cache cannot be obtained. See
+** [SQLite Shared-Cache Mode] for a description of shared-cache locking.
+** ^This API may be used to register a callback that SQLite will invoke
+** when the connection currently holding the required lock relinquishes it.
+** ^This API is only available if the library was compiled with the
+** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined.
+**
+** See Also: [Using the SQLite Unlock Notification Feature].
+**
+** ^Shared-cache locks are released when a database connection concludes
+** its current transaction, either by committing it or rolling it back.
+**
+** ^When a connection (known as the blocked connection) fails to obtain a
+** shared-cache lock and SQLITE_LOCKED is returned to the caller, the
+** identity of the database connection (the blocking connection) that
+** has locked the required resource is stored internally. ^After an
+** application receives an SQLITE_LOCKED error, it may call the
+** sqlite3_unlock_notify() method with the blocked connection handle as
+** the first argument to register for a callback that will be invoked
+** when the blocking connections current transaction is concluded. ^The
+** callback is invoked from within the [sqlite3_step] or [sqlite3_close]
+** call that concludes the blocking connection's transaction.
+**
+** ^(If sqlite3_unlock_notify() is called in a multi-threaded application,
+** there is a chance that the blocking connection will have already
+** concluded its transaction by the time sqlite3_unlock_notify() is invoked.
+** If this happens, then the specified callback is invoked immediately,
+** from within the call to sqlite3_unlock_notify().)^
+**
+** ^If the blocked connection is attempting to obtain a write-lock on a
+** shared-cache table, and more than one other connection currently holds
+** a read-lock on the same table, then SQLite arbitrarily selects one of
+** the other connections to use as the blocking connection.
+**
+** ^(There may be at most one unlock-notify callback registered by a
+** blocked connection. If sqlite3_unlock_notify() is called when the
+** blocked connection already has a registered unlock-notify callback,
+** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is
+** called with a NULL pointer as its second argument, then any existing
+** unlock-notify callback is canceled. ^The blocked connections
+** unlock-notify callback may also be canceled by closing the blocked
+** connection using [sqlite3_close()].
+**
+** The unlock-notify callback is not reentrant. If an application invokes
+** any sqlite3_xxx API functions from within an unlock-notify callback, a
+** crash or deadlock may be the result.
+**
+** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always
+** returns SQLITE_OK.
+**
+** <b>Callback Invocation Details</b>
+**
+** When an unlock-notify callback is registered, the application provides a
+** single void* pointer that is passed to the callback when it is invoked.
+** However, the signature of the callback function allows SQLite to pass
+** it an array of void* context pointers. The first argument passed to
+** an unlock-notify callback is a pointer to an array of void* pointers,
+** and the second is the number of entries in the array.
+**
+** When a blocking connection's transaction is concluded, there may be
+** more than one blocked connection that has registered for an unlock-notify
+** callback. ^If two or more such blocked connections have specified the
+** same callback function, then instead of invoking the callback function
+** multiple times, it is invoked once with the set of void* context pointers
+** specified by the blocked connections bundled together into an array.
+** This gives the application an opportunity to prioritize any actions
+** related to the set of unblocked database connections.
+**
+** <b>Deadlock Detection</b>
+**
+** Assuming that after registering for an unlock-notify callback a
+** database waits for the callback to be issued before taking any further
+** action (a reasonable assumption), then using this API may cause the
+** application to deadlock. For example, if connection X is waiting for
+** connection Y's transaction to be concluded, and similarly connection
+** Y is waiting on connection X's transaction, then neither connection
+** will proceed and the system may remain deadlocked indefinitely.
+**
+** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock
+** detection. ^If a given call to sqlite3_unlock_notify() would put the
+** system in a deadlocked state, then SQLITE_LOCKED is returned and no
+** unlock-notify callback is registered. The system is said to be in
+** a deadlocked state if connection A has registered for an unlock-notify
+** callback on the conclusion of connection B's transaction, and connection
+** B has itself registered for an unlock-notify callback when connection
+** A's transaction is concluded. ^Indirect deadlock is also detected, so
+** the system is also considered to be deadlocked if connection B has
+** registered for an unlock-notify callback on the conclusion of connection
+** C's transaction, where connection C is waiting on connection A. ^Any
+** number of levels of indirection are allowed.
+**
+** <b>The "DROP TABLE" Exception</b>
+**
+** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost
+** always appropriate to call sqlite3_unlock_notify(). There is however,
+** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement,
+** SQLite checks if there are any currently executing SELECT statements
+** that belong to the same connection. If there are, SQLITE_LOCKED is
+** returned. In this case there is no "blocking connection", so invoking
+** sqlite3_unlock_notify() results in the unlock-notify callback being
+** invoked immediately. If the application then re-attempts the "DROP TABLE"
+** or "DROP INDEX" query, an infinite loop might be the result.
+**
+** One way around this problem is to check the extended error code returned
+** by an sqlite3_step() call. ^(If there is a blocking connection, then the
+** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in
+** the special "DROP TABLE/INDEX" case, the extended error code is just
+** SQLITE_LOCKED.)^
+*/
+SQLITE_API int sqlite3_unlock_notify(
+ sqlite3 *pBlocked, /* Waiting connection */
+ void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
+ void *pNotifyArg /* Argument to pass to xNotify */
+);
+
+
+/*
+** CAPI3REF: String Comparison
+**
+** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications
+** and extensions to compare the contents of two buffers containing UTF-8
+** strings in a case-independent fashion, using the same definition of "case
+** independence" that SQLite uses internally when comparing identifiers.
+*/
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
+
+/*
+** CAPI3REF: String Globbing
+*
+** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if
+** string X matches the [GLOB] pattern P.
+** ^The definition of [GLOB] pattern matching used in
+** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
+** SQL dialect understood by SQLite. ^The [sqlite3_strglob(P,X)] function
+** is case sensitive.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+**
+** See also: [sqlite3_strlike()].
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
+
+/*
+** CAPI3REF: String LIKE Matching
+*
+** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if
+** string X matches the [LIKE] pattern P with escape character E.
+** ^The definition of [LIKE] pattern matching used in
+** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E"
+** operator in the SQL dialect understood by SQLite. ^For "X LIKE P" without
+** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0.
+** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case
+** insensitive - equivalent upper and lower case ASCII characters match
+** one another.
+**
+** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though
+** only ASCII characters are case folded.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+**
+** See also: [sqlite3_strglob()].
+*/
+SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
+
+/*
+** CAPI3REF: Error Logging Interface
+**
+** ^The [sqlite3_log()] interface writes a message into the [error log]
+** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
+** ^If logging is enabled, the zFormat string and subsequent arguments are
+** used with [sqlite3_snprintf()] to generate the final output string.
+**
+** The sqlite3_log() interface is intended for use by extensions such as
+** virtual tables, collating functions, and SQL functions. While there is
+** nothing to prevent an application from calling sqlite3_log(), doing so
+** is considered bad form.
+**
+** The zFormat string must not be NULL.
+**
+** To avoid deadlocks and other threading problems, the sqlite3_log() routine
+** will not use dynamically allocated memory. The log message is stored in
+** a fixed-length buffer on the stack. If the log message is longer than
+** a few hundred characters, it will be truncated to the length of the
+** buffer.
+*/
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
+
+/*
+** CAPI3REF: Write-Ahead Log Commit Hook
+** METHOD: sqlite3
+**
+** ^The [sqlite3_wal_hook()] function is used to register a callback that
+** is invoked each time data is committed to a database in wal mode.
+**
+** ^(The callback is invoked by SQLite after the commit has taken place and
+** the associated write-lock on the database released)^, so the implementation
+** may read, write or [checkpoint] the database as required.
+**
+** ^The first parameter passed to the callback function when it is invoked
+** is a copy of the third parameter passed to sqlite3_wal_hook() when
+** registering the callback. ^The second is a copy of the database handle.
+** ^The third parameter is the name of the database that was written to -
+** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter
+** is the number of pages currently in the write-ahead log file,
+** including those that were just committed.
+**
+** The callback function should normally return [SQLITE_OK]. ^If an error
+** code is returned, that error will propagate back up through the
+** SQLite code base to cause the statement that provoked the callback
+** to report an error, though the commit will have still occurred. If the
+** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value
+** that does not correspond to any valid SQLite error code, the results
+** are undefined.
+**
+** A single database handle may have at most a single write-ahead log callback
+** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any
+** previously registered write-ahead log callback. ^Note that the
+** [sqlite3_wal_autocheckpoint()] interface and the
+** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
+** overwrite any prior [sqlite3_wal_hook()] settings.
+*/
+SQLITE_API void *sqlite3_wal_hook(
+ sqlite3*,
+ int(*)(void *,sqlite3*,const char*,int),
+ void*
+);
+
+/*
+** CAPI3REF: Configure an auto-checkpoint
+** METHOD: sqlite3
+**
+** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around
+** [sqlite3_wal_hook()] that causes any database on [database connection] D
+** to automatically [checkpoint]
+** after committing a transaction if there are N or
+** more frames in the [write-ahead log] file. ^Passing zero or
+** a negative value as the nFrame parameter disables automatic
+** checkpoints entirely.
+**
+** ^The callback registered by this function replaces any existing callback
+** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback
+** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism
+** configured by this function.
+**
+** ^The [wal_autocheckpoint pragma] can be used to invoke this interface
+** from SQL.
+**
+** ^Checkpoints initiated by this mechanism are
+** [sqlite3_wal_checkpoint_v2|PASSIVE].
+**
+** ^Every new [database connection] defaults to having the auto-checkpoint
+** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT]
+** pages. The use of this interface
+** is only necessary if the default setting is found to be suboptimal
+** for a particular application.
+*/
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+
+/*
+** CAPI3REF: Checkpoint a database
+** METHOD: sqlite3
+**
+** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to
+** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^
+**
+** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the
+** [write-ahead log] for database X on [database connection] D to be
+** transferred into the database file and for the write-ahead log to
+** be reset. See the [checkpointing] documentation for addition
+** information.
+**
+** This interface used to be the only way to cause a checkpoint to
+** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()]
+** interface was added. This interface is retained for backwards
+** compatibility and as a convenience for applications that need to manually
+** start a callback but which do not need the full power (and corresponding
+** complication) of [sqlite3_wal_checkpoint_v2()].
+*/
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+
+/*
+** CAPI3REF: Checkpoint a database
+** METHOD: sqlite3
+**
+** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint
+** operation on database X of [database connection] D in mode M. Status
+** information is written back into integers pointed to by L and C.)^
+** ^(The M parameter must be a valid [checkpoint mode]:)^
+**
+** <dl>
+** <dt>SQLITE_CHECKPOINT_PASSIVE<dd>
+** ^Checkpoint as many frames as possible without waiting for any database
+** readers or writers to finish, then sync the database file if all frames
+** in the log were checkpointed. ^The [busy-handler callback]
+** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode.
+** ^On the other hand, passive mode might leave the checkpoint unfinished
+** if there are concurrent readers or writers.
+**
+** <dt>SQLITE_CHECKPOINT_FULL<dd>
+** ^This mode blocks (it invokes the
+** [sqlite3_busy_handler|busy-handler callback]) until there is no
+** database writer and all readers are reading from the most recent database
+** snapshot. ^It then checkpoints all frames in the log file and syncs the
+** database file. ^This mode blocks new database writers while it is pending,
+** but new database readers are allowed to continue unimpeded.
+**
+** <dt>SQLITE_CHECKPOINT_RESTART<dd>
+** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition
+** that after checkpointing the log file it blocks (calls the
+** [busy-handler callback])
+** until all readers are reading from the database file only. ^This ensures
+** that the next writer will restart the log file from the beginning.
+** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new
+** database writer attempts while it is pending, but does not impede readers.
+**
+** <dt>SQLITE_CHECKPOINT_TRUNCATE<dd>
+** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the
+** addition that it also truncates the log file to zero bytes just prior
+** to a successful return.
+** </dl>
+**
+** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in
+** the log file or to -1 if the checkpoint could not run because
+** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not
+** NULL,then *pnCkpt is set to the total number of checkpointed frames in the
+** log file (including any that were already checkpointed before the function
+** was called) or to -1 if the checkpoint could not run due to an error or
+** because the database is not in WAL mode. ^Note that upon successful
+** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been
+** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero.
+**
+** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If
+** any other process is running a checkpoint operation at the same time, the
+** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a
+** busy-handler configured, it will not be invoked in this case.
+**
+** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the
+** exclusive "writer" lock on the database file. ^If the writer lock cannot be
+** obtained immediately, and a busy-handler is configured, it is invoked and
+** the writer lock retried until either the busy-handler returns 0 or the lock
+** is successfully obtained. ^The busy-handler is also invoked while waiting for
+** database readers as described above. ^If the busy-handler returns 0 before
+** the writer lock is obtained or while waiting for database readers, the
+** checkpoint operation proceeds from that point in the same way as
+** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible
+** without blocking any further. ^SQLITE_BUSY is returned in this case.
+**
+** ^If parameter zDb is NULL or points to a zero length string, then the
+** specified operation is attempted on all WAL databases [attached] to
+** [database connection] db. In this case the
+** values written to output parameters *pnLog and *pnCkpt are undefined. ^If
+** an SQLITE_BUSY error is encountered when processing one or more of the
+** attached WAL databases, the operation is still attempted on any remaining
+** attached databases and SQLITE_BUSY is returned at the end. ^If any other
+** error occurs while processing an attached database, processing is abandoned
+** and the error code is returned to the caller immediately. ^If no error
+** (SQLITE_BUSY or otherwise) is encountered while processing the attached
+** databases, SQLITE_OK is returned.
+**
+** ^If database zDb is the name of an attached database that is not in WAL
+** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If
+** zDb is not NULL (or a zero length string) and is not the name of any
+** attached database, SQLITE_ERROR is returned to the caller.
+**
+** ^Unless it returns SQLITE_MISUSE,
+** the sqlite3_wal_checkpoint_v2() interface
+** sets the error information that is queried by
+** [sqlite3_errcode()] and [sqlite3_errmsg()].
+**
+** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
+** from SQL.
+*/
+SQLITE_API int sqlite3_wal_checkpoint_v2(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of attached database (or NULL) */
+ int eMode, /* SQLITE_CHECKPOINT_* value */
+ int *pnLog, /* OUT: Size of WAL log in frames */
+ int *pnCkpt /* OUT: Total number of frames checkpointed */
+);
+
+/*
+** CAPI3REF: Checkpoint Mode Values
+** KEYWORDS: {checkpoint mode}
+**
+** These constants define all valid values for the "checkpoint mode" passed
+** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface.
+** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the
+** meaning of each of these checkpoint modes.
+*/
+#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
+#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
+#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
+#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
+
+/*
+** CAPI3REF: Virtual Table Interface Configuration
+**
+** This function may be called by either the [xConnect] or [xCreate] method
+** of a [virtual table] implementation to configure
+** various facets of the virtual table interface.
+**
+** If this interface is invoked outside the context of an xConnect or
+** xCreate virtual table method then the behavior is undefined.
+**
+** In the call sqlite3_vtab_config(D,C,...) the D parameter is the
+** [database connection] in which the virtual table is being created and
+** which is passed in as the first argument to the [xConnect] or [xCreate]
+** method that is invoking sqlite3_vtab_config(). The C parameter is one
+** of the [virtual table configuration options]. The presence and meaning
+** of parameters after C depend on which [virtual table configuration option]
+** is used.
+*/
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
+
+/*
+** CAPI3REF: Virtual Table Configuration Options
+** KEYWORDS: {virtual table configuration options}
+** KEYWORDS: {virtual table configuration option}
+**
+** These macros define the various options to the
+** [sqlite3_vtab_config()] interface that [virtual table] implementations
+** can use to customize and optimize their behavior.
+**
+** <dl>
+** [[SQLITE_VTAB_CONSTRAINT_SUPPORT]]
+** <dt>SQLITE_VTAB_CONSTRAINT_SUPPORT</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported,
+** where X is an integer. If X is zero, then the [virtual table] whose
+** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not
+** support constraints. In this configuration (which is the default) if
+** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire
+** statement is rolled back as if [ON CONFLICT | OR ABORT] had been
+** specified as part of the users SQL statement, regardless of the actual
+** ON CONFLICT mode specified.
+**
+** If X is non-zero, then the virtual table implementation guarantees
+** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before
+** any modifications to internal or persistent data structures have been made.
+** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite
+** is able to roll back a statement or database transaction, and abandon
+** or continue processing the current SQL statement as appropriate.
+** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns
+** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode
+** had been ABORT.
+**
+** Virtual table implementations that are required to handle OR REPLACE
+** must do so within the [xUpdate] method. If a call to the
+** [sqlite3_vtab_on_conflict()] function indicates that the current ON
+** CONFLICT policy is REPLACE, the virtual table implementation should
+** silently replace the appropriate rows within the xUpdate callback and
+** return SQLITE_OK. Or, if this is not possible, it may return
+** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT
+** constraint handling.
+** </dd>
+**
+** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
+** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** prohibits that virtual table from being used from within triggers and
+** views.
+** </dd>
+**
+** [[SQLITE_VTAB_INNOCUOUS]]<dt>SQLITE_VTAB_INNOCUOUS</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the
+** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** identify that virtual table as being safe to use from within triggers
+** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the
+** virtual table can do no serious harm even if it is controlled by a
+** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS
+** flag unless absolutely necessary.
+** </dd>
+** </dl>
+*/
+#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1
+#define SQLITE_VTAB_INNOCUOUS 2
+#define SQLITE_VTAB_DIRECTONLY 3
+
+/*
+** CAPI3REF: Determine The Virtual Table Conflict Policy
+**
+** This function may only be called from within a call to the [xUpdate] method
+** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The
+** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL],
+** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode
+** of the SQL statement that triggered the call to the [xUpdate] method of the
+** [virtual table].
+*/
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
+
+/*
+** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
+**
+** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
+** method of a [virtual table], then it returns true if and only if the
+** column is being fetched as part of an UPDATE operation during which the
+** column value will not change. Applications might use this to substitute
+** a return value that is less expensive to compute and that the corresponding
+** [xUpdate] method understands as a "no-change" value.
+**
+** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
+** the column is not changed by the UPDATE statement, then the xColumn
+** method can optionally return without setting a result, without calling
+** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
+** In that case, [sqlite3_value_nochange(X)] will return true for the
+** same column in the [xUpdate] method.
+*/
+SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
+
+/*
+** CAPI3REF: Determine The Collation For a Virtual Table Constraint
+**
+** This function may only be called from within a call to the [xBestIndex]
+** method of a [virtual table].
+**
+** The first argument must be the sqlite3_index_info object that is the
+** first parameter to the xBestIndex() method. The second argument must be
+** an index into the aConstraint[] array belonging to the sqlite3_index_info
+** structure passed to xBestIndex. This function returns a pointer to a buffer
+** containing the name of the collation sequence for the corresponding
+** constraint.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
+
+/*
+** CAPI3REF: Conflict resolution modes
+** KEYWORDS: {conflict resolution mode}
+**
+** These constants are returned by [sqlite3_vtab_on_conflict()] to
+** inform a [virtual table] implementation what the [ON CONFLICT] mode
+** is for the SQL statement being evaluated.
+**
+** Note that the [SQLITE_IGNORE] constant is also used as a potential
+** return value from the [sqlite3_set_authorizer()] callback and that
+** [SQLITE_ABORT] is also a [result code].
+*/
+#define SQLITE_ROLLBACK 1
+/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */
+#define SQLITE_FAIL 3
+/* #define SQLITE_ABORT 4 // Also an error code */
+#define SQLITE_REPLACE 5
+
+/*
+** CAPI3REF: Prepared Statement Scan Status Opcodes
+** KEYWORDS: {scanstatus options}
+**
+** The following constants can be used for the T parameter to the
+** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a
+** different metric for sqlite3_stmt_scanstatus() to return.
+**
+** When the value returned to V is a string, space to hold that string is
+** managed by the prepared statement S and will be automatically freed when
+** S is finalized.
+**
+** <dl>
+** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
+** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be
+** set to the total number of times that the X-th loop has run.</dd>
+**
+** [[SQLITE_SCANSTAT_NVISIT]] <dt>SQLITE_SCANSTAT_NVISIT</dt>
+** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be set
+** to the total number of rows examined by all iterations of the X-th loop.</dd>
+**
+** [[SQLITE_SCANSTAT_EST]] <dt>SQLITE_SCANSTAT_EST</dt>
+** <dd>^The "double" variable pointed to by the V parameter will be set to the
+** query planner's estimate for the average number of rows output from each
+** iteration of the X-th loop. If the query planner's estimates was accurate,
+** then this value will approximate the quotient NVISIT/NLOOP and the
+** product of this value for all prior loops with the same SELECTID will
+** be the NLOOP value for the current loop.
+**
+** [[SQLITE_SCANSTAT_NAME]] <dt>SQLITE_SCANSTAT_NAME</dt>
+** <dd>^The "const char *" variable pointed to by the V parameter will be set
+** to a zero-terminated UTF-8 string containing the name of the index or table
+** used for the X-th loop.
+**
+** [[SQLITE_SCANSTAT_EXPLAIN]] <dt>SQLITE_SCANSTAT_EXPLAIN</dt>
+** <dd>^The "const char *" variable pointed to by the V parameter will be set
+** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
+** description for the X-th loop.
+**
+** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
+** <dd>^The "int" variable pointed to by the V parameter will be set to the
+** "select-id" for the X-th loop. The select-id identifies which query or
+** subquery the loop is part of. The main query has a select-id of zero.
+** The select-id is the same value as is output in the first column
+** of an [EXPLAIN QUERY PLAN] query.
+** </dl>
+*/
+#define SQLITE_SCANSTAT_NLOOP 0
+#define SQLITE_SCANSTAT_NVISIT 1
+#define SQLITE_SCANSTAT_EST 2
+#define SQLITE_SCANSTAT_NAME 3
+#define SQLITE_SCANSTAT_EXPLAIN 4
+#define SQLITE_SCANSTAT_SELECTID 5
+
+/*
+** CAPI3REF: Prepared Statement Scan Status
+** METHOD: sqlite3_stmt
+**
+** This interface returns information about the predicted and measured
+** performance for pStmt. Advanced applications can use this
+** interface to compare the predicted and the measured performance and
+** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
+**
+** Since this interface is expected to be rarely used, it is only
+** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS]
+** compile-time option.
+**
+** The "iScanStatusOp" parameter determines which status information to return.
+** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
+** of this interface is undefined.
+** ^The requested measurement is written into a variable pointed to by
+** the "pOut" parameter.
+** Parameter "idx" identifies the specific loop to retrieve statistics for.
+** Loops are numbered starting from zero. ^If idx is out of range - less than
+** zero or greater than or equal to the total number of loops used to implement
+** the statement - a non-zero value is returned and the variable that pOut
+** points to is unchanged.
+**
+** ^Statistics might not be available for all loops in all statements. ^In cases
+** where there exist loops with no available statistics, this function behaves
+** as if the loop did not exist - it returns non-zero and leave the variable
+** that pOut points to unchanged.
+**
+** See also: [sqlite3_stmt_scanstatus_reset()]
+*/
+SQLITE_API int sqlite3_stmt_scanstatus(
+ sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
+ void *pOut /* Result written here */
+);
+
+/*
+** CAPI3REF: Zero Scan-Status Counters
+** METHOD: sqlite3_stmt
+**
+** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
+**
+** This API is only available if the library is built with pre-processor
+** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
+*/
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Flush caches to disk mid-transaction
+**
+** ^If a write-transaction is open on [database connection] D when the
+** [sqlite3_db_cacheflush(D)] interface invoked, any dirty
+** pages in the pager-cache that are not currently in use are written out
+** to disk. A dirty page may be in use if a database cursor created by an
+** active SQL statement is reading from it, or if it is page 1 of a database
+** file (page 1 is always "in use"). ^The [sqlite3_db_cacheflush(D)]
+** interface flushes caches for all schemas - "main", "temp", and
+** any [attached] databases.
+**
+** ^If this function needs to obtain extra database locks before dirty pages
+** can be flushed to disk, it does so. ^If those locks cannot be obtained
+** immediately and there is a busy-handler callback configured, it is invoked
+** in the usual manner. ^If the required lock still cannot be obtained, then
+** the database is skipped and an attempt made to flush any dirty pages
+** belonging to the next (if any) database. ^If any databases are skipped
+** because locks cannot be obtained, but no other error occurs, this
+** function returns SQLITE_BUSY.
+**
+** ^If any other error occurs while flushing dirty pages to disk (for
+** example an IO error or out-of-memory condition), then processing is
+** abandoned and an SQLite [error code] is returned to the caller immediately.
+**
+** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK.
+**
+** ^This function does not set the database handle error code or message
+** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
+*/
+SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
+
+/*
+** CAPI3REF: The pre-update hook.
+**
+** ^These interfaces are only available if SQLite is compiled using the
+** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option.
+**
+** ^The [sqlite3_preupdate_hook()] interface registers a callback function
+** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation
+** on a database table.
+** ^At most one preupdate hook may be registered at a time on a single
+** [database connection]; each call to [sqlite3_preupdate_hook()] overrides
+** the previous setting.
+** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()]
+** with a NULL pointer as the second parameter.
+** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as
+** the first parameter to callbacks.
+**
+** ^The preupdate hook only fires for changes to real database tables; the
+** preupdate hook is not invoked for changes to [virtual tables] or to
+** system tables like sqlite_sequence or sqlite_stat1.
+**
+** ^The second parameter to the preupdate callback is a pointer to
+** the [database connection] that registered the preupdate hook.
+** ^The third parameter to the preupdate callback is one of the constants
+** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the
+** kind of update operation that is about to occur.
+** ^(The fourth parameter to the preupdate callback is the name of the
+** database within the database connection that is being modified. This
+** will be "main" for the main database or "temp" for TEMP tables or
+** the name given after the AS keyword in the [ATTACH] statement for attached
+** databases.)^
+** ^The fifth parameter to the preupdate callback is the name of the
+** table that is being modified.
+**
+** For an UPDATE or DELETE operation on a [rowid table], the sixth
+** parameter passed to the preupdate callback is the initial [rowid] of the
+** row being modified or deleted. For an INSERT operation on a rowid table,
+** or any operation on a WITHOUT ROWID table, the value of the sixth
+** parameter is undefined. For an INSERT or UPDATE on a rowid table the
+** seventh parameter is the final rowid value of the row being inserted
+** or updated. The value of the seventh parameter passed to the callback
+** function is not defined for operations on WITHOUT ROWID tables, or for
+** INSERT operations on rowid tables.
+**
+** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
+** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
+** provide additional information about a preupdate event. These routines
+** may only be called from within a preupdate callback. Invoking any of
+** these routines from outside of a preupdate callback or with a
+** [database connection] pointer that is different from the one supplied
+** to the preupdate callback results in undefined and probably undesirable
+** behavior.
+**
+** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns
+** in the row that is being inserted, updated, or deleted.
+**
+** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to
+** a [protected sqlite3_value] that contains the value of the Nth column of
+** the table row before it is updated. The N parameter must be between 0
+** and one less than the number of columns or the behavior will be
+** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE
+** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the
+** behavior is undefined. The [sqlite3_value] that P points to
+** will be destroyed when the preupdate callback returns.
+**
+** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to
+** a [protected sqlite3_value] that contains the value of the Nth column of
+** the table row after it is updated. The N parameter must be between 0
+** and one less than the number of columns or the behavior will be
+** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE
+** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the
+** behavior is undefined. The [sqlite3_value] that P points to
+** will be destroyed when the preupdate callback returns.
+**
+** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate
+** callback was invoked as a result of a direct insert, update, or delete
+** operation; or 1 for inserts, updates, or deletes invoked by top-level
+** triggers; or 2 for changes resulting from triggers called by top-level
+** triggers; and so forth.
+**
+** See also: [sqlite3_update_hook()]
+*/
+#if defined(SQLITE_ENABLE_PREUPDATE_HOOK)
+SQLITE_API void *sqlite3_preupdate_hook(
+ sqlite3 *db,
+ void(*xPreUpdate)(
+ void *pCtx, /* Copy of third arg to preupdate_hook() */
+ sqlite3 *db, /* Database handle */
+ int op, /* SQLITE_UPDATE, DELETE or INSERT */
+ char const *zDb, /* Database name */
+ char const *zName, /* Table name */
+ sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */
+ sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */
+ ),
+ void*
+);
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+#endif
+
+/*
+** CAPI3REF: Low-level system error code
+**
+** ^Attempt to return the underlying operating system error code or error
+** number that caused the most recent I/O error or failure to open a file.
+** The return value is OS-dependent. For example, on unix systems, after
+** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be
+** called to get back the underlying "errno" that caused the problem, such
+** as ENOSPC, EAUTH, EISDIR, and so forth.
+*/
+SQLITE_API int sqlite3_system_errno(sqlite3*);
+
+/*
+** CAPI3REF: Database Snapshot
+** KEYWORDS: {snapshot} {sqlite3_snapshot}
+**
+** An instance of the snapshot object records the state of a [WAL mode]
+** database for some specific point in history.
+**
+** In [WAL mode], multiple [database connections] that are open on the
+** same database file can each be reading a different historical version
+** of the database file. When a [database connection] begins a read
+** transaction, that connection sees an unchanging copy of the database
+** as it existed for the point in time when the transaction first started.
+** Subsequent changes to the database from other connections are not seen
+** by the reader until a new read transaction is started.
+**
+** The sqlite3_snapshot object records state information about an historical
+** version of the database file so that it is possible to later open a new read
+** transaction that sees that historical version of the database rather than
+** the most recent version.
+*/
+typedef struct sqlite3_snapshot {
+ unsigned char hidden[48];
+} sqlite3_snapshot;
+
+/*
+** CAPI3REF: Record A Database Snapshot
+** CONSTRUCTOR: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
+** new [sqlite3_snapshot] object that records the current state of
+** schema S in database connection D. ^On success, the
+** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
+** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
+** If there is not already a read-transaction open on schema S when
+** this function is called, one is opened automatically.
+**
+** The following must be true for this function to succeed. If any of
+** the following statements are false when sqlite3_snapshot_get() is
+** called, SQLITE_ERROR is returned. The final value of *P is undefined
+** in this case.
+**
+** <ul>
+** <li> The database handle must not be in [autocommit mode].
+**
+** <li> Schema S of [database connection] D must be a [WAL mode] database.
+**
+** <li> There must not be a write transaction open on schema S of database
+** connection D.
+**
+** <li> One or more transactions must have been written to the current wal
+** file since it was created on disk (by any connection). This means
+** that a snapshot cannot be taken on a wal mode database with no wal
+** file immediately after it is first opened. At least one transaction
+** must be written to it first.
+** </ul>
+**
+** This function may also return SQLITE_NOMEM. If it is called with the
+** database handle in autocommit mode but fails for some other reason,
+** whether or not a read transaction is opened on schema S is undefined.
+**
+** The [sqlite3_snapshot] object returned from a successful call to
+** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
+** to avoid a memory leak.
+**
+** The [sqlite3_snapshot_get()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
+ sqlite3 *db,
+ const char *zSchema,
+ sqlite3_snapshot **ppSnapshot
+);
+
+/*
+** CAPI3REF: Start a read transaction on an historical snapshot
+** METHOD: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_open(D,S,P)] interface either starts a new read
+** transaction or upgrades an existing one for schema S of
+** [database connection] D such that the read transaction refers to
+** historical [snapshot] P, rather than the most recent change to the
+** database. ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK
+** on success or an appropriate [error code] if it fails.
+**
+** ^In order to succeed, the database connection must not be in
+** [autocommit mode] when [sqlite3_snapshot_open(D,S,P)] is called. If there
+** is already a read transaction open on schema S, then the database handle
+** must have no active statements (SELECT statements that have been passed
+** to sqlite3_step() but not sqlite3_reset() or sqlite3_finalize()).
+** SQLITE_ERROR is returned if either of these conditions is violated, or
+** if schema S does not exist, or if the snapshot object is invalid.
+**
+** ^A call to sqlite3_snapshot_open() will fail to open if the specified
+** snapshot has been overwritten by a [checkpoint]. In this case
+** SQLITE_ERROR_SNAPSHOT is returned.
+**
+** If there is already a read transaction open when this function is
+** invoked, then the same read transaction remains open (on the same
+** database snapshot) if SQLITE_ERROR, SQLITE_BUSY or SQLITE_ERROR_SNAPSHOT
+** is returned. If another error code - for example SQLITE_PROTOCOL or an
+** SQLITE_IOERR error code - is returned, then the final state of the
+** read transaction is undefined. If SQLITE_OK is returned, then the
+** read transaction is now open on database snapshot P.
+**
+** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the
+** database connection D does not know that the database file for
+** schema S is in [WAL mode]. A database connection might not know
+** that the database file is in [WAL mode] if there has been no prior
+** I/O on that database connection, or if the database entered [WAL mode]
+** after the most recent I/O on the database connection.)^
+** (Hint: Run "[PRAGMA application_id]" against a newly opened
+** database connection in order to make it ready to use snapshots.)
+**
+** The [sqlite3_snapshot_open()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
+ sqlite3 *db,
+ const char *zSchema,
+ sqlite3_snapshot *pSnapshot
+);
+
+/*
+** CAPI3REF: Destroy a snapshot
+** DESTRUCTOR: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
+** The application must eventually free every [sqlite3_snapshot] object
+** using this routine to avoid a memory leak.
+**
+** The [sqlite3_snapshot_free()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
+
+/*
+** CAPI3REF: Compare the ages of two snapshot handles.
+** METHOD: sqlite3_snapshot
+**
+** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages
+** of two valid snapshot handles.
+**
+** If the two snapshot handles are not associated with the same database
+** file, the result of the comparison is undefined.
+**
+** Additionally, the result of the comparison is only valid if both of the
+** snapshot handles were obtained by calling sqlite3_snapshot_get() since the
+** last time the wal file was deleted. The wal file is deleted when the
+** database is changed back to rollback mode or when the number of database
+** clients drops to zero. If either snapshot handle was obtained before the
+** wal file was last deleted, the value returned by this function
+** is undefined.
+**
+** Otherwise, this API returns a negative value if P1 refers to an older
+** snapshot than P2, zero if the two handles refer to the same database
+** snapshot, and a positive value if P1 is a newer snapshot than P2.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SNAPSHOT] option.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
+ sqlite3_snapshot *p1,
+ sqlite3_snapshot *p2
+);
+
+/*
+** CAPI3REF: Recover snapshots from a wal file
+** METHOD: sqlite3_snapshot
+**
+** If a [WAL file] remains on disk after all database connections close
+** (either through the use of the [SQLITE_FCNTL_PERSIST_WAL] [file control]
+** or because the last process to have the database opened exited without
+** calling [sqlite3_close()]) and a new connection is subsequently opened
+** on that database and [WAL file], the [sqlite3_snapshot_open()] interface
+** will only be able to open the last transaction added to the WAL file
+** even though the WAL file contains other valid transactions.
+**
+** This function attempts to scan the WAL file associated with database zDb
+** of database handle db and make all valid snapshots available to
+** sqlite3_snapshot_open(). It is an error if there is already a read
+** transaction open on the database, or if the database is not a WAL mode
+** database.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SNAPSHOT] option.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
+
+/*
+** CAPI3REF: Serialize a database
+**
+** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory
+** that is a serialization of the S database on [database connection] D.
+** If P is not a NULL pointer, then the size of the database in bytes
+** is written into *P.
+**
+** For an ordinary on-disk database file, the serialization is just a
+** copy of the disk file. For an in-memory database or a "TEMP" database,
+** the serialization is the same sequence of bytes which would be written
+** to disk if that database where backed up to disk.
+**
+** The usual case is that sqlite3_serialize() copies the serialization of
+** the database into memory obtained from [sqlite3_malloc64()] and returns
+** a pointer to that memory. The caller is responsible for freeing the
+** returned value to avoid a memory leak. However, if the F argument
+** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations
+** are made, and the sqlite3_serialize() function will return a pointer
+** to the contiguous memory representation of the database that SQLite
+** is currently using for that database, or NULL if the no such contiguous
+** memory representation of the database exists. A contiguous memory
+** representation of the database will usually only exist if there has
+** been a prior call to [sqlite3_deserialize(D,S,...)] with the same
+** values of D and S.
+** The size of the database is written into *P even if the
+** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
+** of the database exists.
+**
+** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
+** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
+** allocation error occurs.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_DESERIALIZE] option.
+*/
+SQLITE_API unsigned char *sqlite3_serialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which DB to serialize. ex: "main", "temp", ... */
+ sqlite3_int64 *piSize, /* Write size of the DB here, if not NULL */
+ unsigned int mFlags /* Zero or more SQLITE_SERIALIZE_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3_serialize
+**
+** Zero or more of the following constants can be OR-ed together for
+** the F argument to [sqlite3_serialize(D,S,P,F)].
+**
+** SQLITE_SERIALIZE_NOCOPY means that [sqlite3_serialize()] will return
+** a pointer to contiguous in-memory database that it is currently using,
+** without making a copy of the database. If SQLite is not currently using
+** a contiguous in-memory database, then this option causes
+** [sqlite3_serialize()] to return a NULL pointer. SQLite will only be
+** using a contiguous in-memory database if it has been initialized by a
+** prior call to [sqlite3_deserialize()].
+*/
+#define SQLITE_SERIALIZE_NOCOPY 0x001 /* Do no memory allocations */
+
+/*
+** CAPI3REF: Deserialize a database
+**
+** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the
+** [database connection] D to disconnect from database S and then
+** reopen S as an in-memory database based on the serialization contained
+** in P. The serialized database P is N bytes in size. M is the size of
+** the buffer P, which might be larger than N. If M is larger than N, and
+** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is
+** permitted to add content to the in-memory database as long as the total
+** size does not exceed M bytes.
+**
+** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will
+** invoke sqlite3_free() on the serialization buffer when the database
+** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then
+** SQLite will try to increase the buffer size using sqlite3_realloc64()
+** if writes on the database cause it to grow larger than M bytes.
+**
+** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
+** database is currently in a read transaction or is involved in a backup
+** operation.
+**
+** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
+** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
+** [sqlite3_free()] is invoked on argument P prior to returning.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_DESERIALIZE] option.
+*/
+SQLITE_API int sqlite3_deserialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which DB to reopen with the deserialization */
+ unsigned char *pData, /* The serialized database content */
+ sqlite3_int64 szDb, /* Number bytes in the deserialization */
+ sqlite3_int64 szBuf, /* Total size of buffer pData[] */
+ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3_deserialize()
+**
+** The following are allowed values for 6th argument (the F argument) to
+** the [sqlite3_deserialize(D,S,P,N,M,F)] interface.
+**
+** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization
+** in the P argument is held in memory obtained from [sqlite3_malloc64()]
+** and that SQLite should take ownership of this memory and automatically
+** free it when it has finished using it. Without this flag, the caller
+** is responsible for freeing any dynamically allocated memory.
+**
+** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to
+** grow the size of the database using calls to [sqlite3_realloc64()]. This
+** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used.
+** Without this flag, the deserialized database cannot increase in size beyond
+** the number of bytes specified by the M parameter.
+**
+** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database
+** should be treated as read-only.
+*/
+#define SQLITE_DESERIALIZE_FREEONCLOSE 1 /* Call sqlite3_free() on close */
+#define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */
+#define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */
+
+/*
+** Undo the hack that converts floating point types to integer for
+** builds on processors without floating point support.
+*/
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# undef double
+#endif
+
+#if 0
+} /* End of the 'extern "C"' block */
+#endif
+#endif /* SQLITE3_H */
+
+/******** Begin file sqlite3rtree.h *********/
+/*
+** 2010 August 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+*/
+
+#ifndef _SQLITE3RTREE_H_
+#define _SQLITE3RTREE_H_
+
+
+#if 0
+extern "C" {
+#endif
+
+typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry;
+typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
+
+/* The double-precision datatype used by RTree depends on the
+** SQLITE_RTREE_INT_ONLY compile-time option.
+*/
+#ifdef SQLITE_RTREE_INT_ONLY
+ typedef sqlite3_int64 sqlite3_rtree_dbl;
+#else
+ typedef double sqlite3_rtree_dbl;
+#endif
+
+/*
+** Register a geometry callback named zGeom that can be used as part of an
+** R-Tree geometry query as follows:
+**
+** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
+*/
+SQLITE_API int sqlite3_rtree_geometry_callback(
+ sqlite3 *db,
+ const char *zGeom,
+ int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
+ void *pContext
+);
+
+
+/*
+** A pointer to a structure of the following type is passed as the first
+** argument to callbacks registered using rtree_geometry_callback().
+*/
+struct sqlite3_rtree_geometry {
+ void *pContext; /* Copy of pContext passed to s_r_g_c() */
+ int nParam; /* Size of array aParam[] */
+ sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */
+ void *pUser; /* Callback implementation user data */
+ void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */
+};
+
+/*
+** Register a 2nd-generation geometry callback named zScore that can be
+** used as part of an R-Tree geometry query as follows:
+**
+** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
+*/
+SQLITE_API int sqlite3_rtree_query_callback(
+ sqlite3 *db,
+ const char *zQueryFunc,
+ int (*xQueryFunc)(sqlite3_rtree_query_info*),
+ void *pContext,
+ void (*xDestructor)(void*)
+);
+
+
+/*
+** A pointer to a structure of the following type is passed as the
+** argument to scored geometry callback registered using
+** sqlite3_rtree_query_callback().
+**
+** Note that the first 5 fields of this structure are identical to
+** sqlite3_rtree_geometry. This structure is a subclass of
+** sqlite3_rtree_geometry.
+*/
+struct sqlite3_rtree_query_info {
+ void *pContext; /* pContext from when function registered */
+ int nParam; /* Number of function parameters */
+ sqlite3_rtree_dbl *aParam; /* value of function parameters */
+ void *pUser; /* callback can use this, if desired */
+ void (*xDelUser)(void*); /* function to free pUser */
+ sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */
+ unsigned int *anQueue; /* Number of pending entries in the queue */
+ int nCoord; /* Number of coordinates */
+ int iLevel; /* Level of current node or entry */
+ int mxLevel; /* The largest iLevel value in the tree */
+ sqlite3_int64 iRowid; /* Rowid for current entry */
+ sqlite3_rtree_dbl rParentScore; /* Score of parent node */
+ int eParentWithin; /* Visibility of parent node */
+ int eWithin; /* OUT: Visibility */
+ sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
+ /* The following fields are only available in 3.8.11 and later */
+ sqlite3_value **apSqlParam; /* Original SQL values of parameters */
+};
+
+/*
+** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
+*/
+#define NOT_WITHIN 0 /* Object completely outside of query region */
+#define PARTLY_WITHIN 1 /* Object partially overlaps query region */
+#define FULLY_WITHIN 2 /* Object fully contained within query region */
+
+
+#if 0
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* ifndef _SQLITE3RTREE_H_ */
+
+/******** End of sqlite3rtree.h *********/
+/******** Begin file sqlite3session.h *********/
+
+#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION)
+#define __SQLITESESSION_H_ 1
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#if 0
+extern "C" {
+#endif
+
+
+/*
+** CAPI3REF: Session Object Handle
+**
+** An instance of this object is a [session] that can be used to
+** record changes to a database.
+*/
+typedef struct sqlite3_session sqlite3_session;
+
+/*
+** CAPI3REF: Changeset Iterator Handle
+**
+** An instance of this object acts as a cursor for iterating
+** over the elements of a [changeset] or [patchset].
+*/
+typedef struct sqlite3_changeset_iter sqlite3_changeset_iter;
+
+/*
+** CAPI3REF: Create A New Session Object
+** CONSTRUCTOR: sqlite3_session
+**
+** Create a new session object attached to database handle db. If successful,
+** a pointer to the new object is written to *ppSession and SQLITE_OK is
+** returned. If an error occurs, *ppSession is set to NULL and an SQLite
+** error code (e.g. SQLITE_NOMEM) is returned.
+**
+** It is possible to create multiple session objects attached to a single
+** database handle.
+**
+** Session objects created using this function should be deleted using the
+** [sqlite3session_delete()] function before the database handle that they
+** are attached to is itself closed. If the database handle is closed before
+** the session object is deleted, then the results of calling any session
+** module function, including [sqlite3session_delete()] on the session object
+** are undefined.
+**
+** Because the session module uses the [sqlite3_preupdate_hook()] API, it
+** is not possible for an application to register a pre-update hook on a
+** database handle that has one or more session objects attached. Nor is
+** it possible to create a session object attached to a database handle for
+** which a pre-update hook is already defined. The results of attempting
+** either of these things are undefined.
+**
+** The session object will be used to create changesets for tables in
+** database zDb, where zDb is either "main", or "temp", or the name of an
+** attached database. It is not an error if database zDb is not attached
+** to the database when the session object is created.
+*/
+SQLITE_API int sqlite3session_create(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of db (e.g. "main") */
+ sqlite3_session **ppSession /* OUT: New session object */
+);
+
+/*
+** CAPI3REF: Delete A Session Object
+** DESTRUCTOR: sqlite3_session
+**
+** Delete a session object previously allocated using
+** [sqlite3session_create()]. Once a session object has been deleted, the
+** results of attempting to use pSession with any other session module
+** function are undefined.
+**
+** Session objects must be deleted before the database handle to which they
+** are attached is closed. Refer to the documentation for
+** [sqlite3session_create()] for details.
+*/
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession);
+
+
+/*
+** CAPI3REF: Enable Or Disable A Session Object
+** METHOD: sqlite3_session
+**
+** Enable or disable the recording of changes by a session object. When
+** enabled, a session object records changes made to the database. When
+** disabled - it does not. A newly created session object is enabled.
+** Refer to the documentation for [sqlite3session_changeset()] for further
+** details regarding how enabling and disabling a session object affects
+** the eventual changesets.
+**
+** Passing zero to this function disables the session. Passing a value
+** greater than zero enables it. Passing a value less than zero is a
+** no-op, and may be used to query the current state of the session.
+**
+** The return value indicates the final state of the session object: 0 if
+** the session is disabled, or 1 if it is enabled.
+*/
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
+
+/*
+** CAPI3REF: Set Or Clear the Indirect Change Flag
+** METHOD: sqlite3_session
+**
+** Each change recorded by a session object is marked as either direct or
+** indirect. A change is marked as indirect if either:
+**
+** <ul>
+** <li> The session object "indirect" flag is set when the change is
+** made, or
+** <li> The change is made by an SQL trigger or foreign key action
+** instead of directly as a result of a users SQL statement.
+** </ul>
+**
+** If a single row is affected by more than one operation within a session,
+** then the change is considered indirect if all operations meet the criteria
+** for an indirect change above, or direct otherwise.
+**
+** This function is used to set, clear or query the session object indirect
+** flag. If the second argument passed to this function is zero, then the
+** indirect flag is cleared. If it is greater than zero, the indirect flag
+** is set. Passing a value less than zero does not modify the current value
+** of the indirect flag, and may be used to query the current state of the
+** indirect flag for the specified session object.
+**
+** The return value indicates the final state of the indirect flag: 0 if
+** it is clear, or 1 if it is set.
+*/
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
+
+/*
+** CAPI3REF: Attach A Table To A Session Object
+** METHOD: sqlite3_session
+**
+** If argument zTab is not NULL, then it is the name of a table to attach
+** to the session object passed as the first argument. All subsequent changes
+** made to the table while the session object is enabled will be recorded. See
+** documentation for [sqlite3session_changeset()] for further details.
+**
+** Or, if argument zTab is NULL, then changes are recorded for all tables
+** in the database. If additional tables are added to the database (by
+** executing "CREATE TABLE" statements) after this call is made, changes for
+** the new tables are also recorded.
+**
+** Changes can only be recorded for tables that have a PRIMARY KEY explicitly
+** defined as part of their CREATE TABLE statement. It does not matter if the
+** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY
+** KEY may consist of a single column, or may be a composite key.
+**
+** It is not an error if the named table does not exist in the database. Nor
+** is it an error if the named table does not have a PRIMARY KEY. However,
+** no changes will be recorded in either of these scenarios.
+**
+** Changes are not recorded for individual rows that have NULL values stored
+** in one or more of their PRIMARY KEY columns.
+**
+** SQLITE_OK is returned if the call completes without error. Or, if an error
+** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
+**
+** <h3>Special sqlite_stat1 Handling</h3>
+**
+** As of SQLite version 3.22.0, the "sqlite_stat1" table is an exception to
+** some of the rules above. In SQLite, the schema of sqlite_stat1 is:
+** <pre>
+** &nbsp; CREATE TABLE sqlite_stat1(tbl,idx,stat)
+** </pre>
+**
+** Even though sqlite_stat1 does not have a PRIMARY KEY, changes are
+** recorded for it as if the PRIMARY KEY is (tbl,idx). Additionally, changes
+** are recorded for rows for which (idx IS NULL) is true. However, for such
+** rows a zero-length blob (SQL value X'') is stored in the changeset or
+** patchset instead of a NULL value. This allows such changesets to be
+** manipulated by legacy implementations of sqlite3changeset_invert(),
+** concat() and similar.
+**
+** The sqlite3changeset_apply() function automatically converts the
+** zero-length blob back to a NULL value when updating the sqlite_stat1
+** table. However, if the application calls sqlite3changeset_new(),
+** sqlite3changeset_old() or sqlite3changeset_conflict on a changeset
+** iterator directly (including on a changeset iterator passed to a
+** conflict-handler callback) then the X'' value is returned. The application
+** must translate X'' to NULL itself if required.
+**
+** Legacy (older than 3.22.0) versions of the sessions module cannot capture
+** changes made to the sqlite_stat1 table. Legacy versions of the
+** sqlite3changeset_apply() function silently ignore any modifications to the
+** sqlite_stat1 table that are part of a changeset or patchset.
+*/
+SQLITE_API int sqlite3session_attach(
+ sqlite3_session *pSession, /* Session object */
+ const char *zTab /* Table name */
+);
+
+/*
+** CAPI3REF: Set a table filter on a Session Object.
+** METHOD: sqlite3_session
+**
+** The second argument (xFilter) is the "filter callback". For changes to rows
+** in tables that are not attached to the Session object, the filter is called
+** to determine whether changes to the table's rows should be tracked or not.
+** If xFilter returns 0, changes are not tracked. Note that once a table is
+** attached, xFilter will not be called again.
+*/
+SQLITE_API void sqlite3session_table_filter(
+ sqlite3_session *pSession, /* Session object */
+ int(*xFilter)(
+ void *pCtx, /* Copy of third arg to _filter_table() */
+ const char *zTab /* Table name */
+ ),
+ void *pCtx /* First argument passed to xFilter */
+);
+
+/*
+** CAPI3REF: Generate A Changeset From A Session Object
+** METHOD: sqlite3_session
+**
+** Obtain a changeset containing changes to the tables attached to the
+** session object passed as the first argument. If successful,
+** set *ppChangeset to point to a buffer containing the changeset
+** and *pnChangeset to the size of the changeset in bytes before returning
+** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to
+** zero and return an SQLite error code.
+**
+** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes,
+** each representing a change to a single row of an attached table. An INSERT
+** change contains the values of each field of a new database row. A DELETE
+** contains the original values of each field of a deleted database row. An
+** UPDATE change contains the original values of each field of an updated
+** database row along with the updated values for each updated non-primary-key
+** column. It is not possible for an UPDATE change to represent a change that
+** modifies the values of primary key columns. If such a change is made, it
+** is represented in a changeset as a DELETE followed by an INSERT.
+**
+** Changes are not recorded for rows that have NULL values stored in one or
+** more of their PRIMARY KEY columns. If such a row is inserted or deleted,
+** no corresponding change is present in the changesets returned by this
+** function. If an existing row with one or more NULL values stored in
+** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL,
+** only an INSERT is appears in the changeset. Similarly, if an existing row
+** with non-NULL PRIMARY KEY values is updated so that one or more of its
+** PRIMARY KEY columns are set to NULL, the resulting changeset contains a
+** DELETE change only.
+**
+** The contents of a changeset may be traversed using an iterator created
+** using the [sqlite3changeset_start()] API. A changeset may be applied to
+** a database with a compatible schema using the [sqlite3changeset_apply()]
+** API.
+**
+** Within a changeset generated by this function, all changes related to a
+** single table are grouped together. In other words, when iterating through
+** a changeset or when applying a changeset to a database, all changes related
+** to a single table are processed before moving on to the next table. Tables
+** are sorted in the same order in which they were attached (or auto-attached)
+** to the sqlite3_session object. The order in which the changes related to
+** a single table are stored is undefined.
+**
+** Following a successful call to this function, it is the responsibility of
+** the caller to eventually free the buffer that *ppChangeset points to using
+** [sqlite3_free()].
+**
+** <h3>Changeset Generation</h3>
+**
+** Once a table has been attached to a session object, the session object
+** records the primary key values of all new rows inserted into the table.
+** It also records the original primary key and other column values of any
+** deleted or updated rows. For each unique primary key value, data is only
+** recorded once - the first time a row with said primary key is inserted,
+** updated or deleted in the lifetime of the session.
+**
+** There is one exception to the previous paragraph: when a row is inserted,
+** updated or deleted, if one or more of its primary key columns contain a
+** NULL value, no record of the change is made.
+**
+** The session object therefore accumulates two types of records - those
+** that consist of primary key values only (created when the user inserts
+** a new record) and those that consist of the primary key values and the
+** original values of other table columns (created when the users deletes
+** or updates a record).
+**
+** When this function is called, the requested changeset is created using
+** both the accumulated records and the current contents of the database
+** file. Specifically:
+**
+** <ul>
+** <li> For each record generated by an insert, the database is queried
+** for a row with a matching primary key. If one is found, an INSERT
+** change is added to the changeset. If no such row is found, no change
+** is added to the changeset.
+**
+** <li> For each record generated by an update or delete, the database is
+** queried for a row with a matching primary key. If such a row is
+** found and one or more of the non-primary key fields have been
+** modified from their original values, an UPDATE change is added to
+** the changeset. Or, if no such row is found in the table, a DELETE
+** change is added to the changeset. If there is a row with a matching
+** primary key in the database, but all fields contain their original
+** values, no change is added to the changeset.
+** </ul>
+**
+** This means, amongst other things, that if a row is inserted and then later
+** deleted while a session object is active, neither the insert nor the delete
+** will be present in the changeset. Or if a row is deleted and then later a
+** row with the same primary key values inserted while a session object is
+** active, the resulting changeset will contain an UPDATE change instead of
+** a DELETE and an INSERT.
+**
+** When a session object is disabled (see the [sqlite3session_enable()] API),
+** it does not accumulate records when rows are inserted, updated or deleted.
+** This may appear to have some counter-intuitive effects if a single row
+** is written to more than once during a session. For example, if a row
+** is inserted while a session object is enabled, then later deleted while
+** the same session object is disabled, no INSERT record will appear in the
+** changeset, even though the delete took place while the session was disabled.
+** Or, if one field of a row is updated while a session is disabled, and
+** another field of the same row is updated while the session is enabled, the
+** resulting changeset will contain an UPDATE change that updates both fields.
+*/
+SQLITE_API int sqlite3session_changeset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
+ void **ppChangeset /* OUT: Buffer containing changeset */
+);
+
+/*
+** CAPI3REF: Load The Difference Between Tables Into A Session
+** METHOD: sqlite3_session
+**
+** If it is not already attached to the session object passed as the first
+** argument, this function attaches table zTbl in the same manner as the
+** [sqlite3session_attach()] function. If zTbl does not exist, or if it
+** does not have a primary key, this function is a no-op (but does not return
+** an error).
+**
+** Argument zFromDb must be the name of a database ("main", "temp" etc.)
+** attached to the same database handle as the session object that contains
+** a table compatible with the table attached to the session by this function.
+** A table is considered compatible if it:
+**
+** <ul>
+** <li> Has the same name,
+** <li> Has the same set of columns declared in the same order, and
+** <li> Has the same PRIMARY KEY definition.
+** </ul>
+**
+** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables
+** are compatible but do not have any PRIMARY KEY columns, it is not an error
+** but no changes are added to the session object. As with other session
+** APIs, tables without PRIMARY KEYs are simply ignored.
+**
+** This function adds a set of changes to the session object that could be
+** used to update the table in database zFrom (call this the "from-table")
+** so that its content is the same as the table attached to the session
+** object (call this the "to-table"). Specifically:
+**
+** <ul>
+** <li> For each row (primary key) that exists in the to-table but not in
+** the from-table, an INSERT record is added to the session object.
+**
+** <li> For each row (primary key) that exists in the to-table but not in
+** the from-table, a DELETE record is added to the session object.
+**
+** <li> For each row (primary key) that exists in both tables, but features
+** different non-PK values in each, an UPDATE record is added to the
+** session.
+** </ul>
+**
+** To clarify, if this function is called and then a changeset constructed
+** using [sqlite3session_changeset()], then after applying that changeset to
+** database zFrom the contents of the two compatible tables would be
+** identical.
+**
+** It an error if database zFrom does not exist or does not contain the
+** required compatible table.
+**
+** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite
+** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg
+** may be set to point to a buffer containing an English language error
+** message. It is the responsibility of the caller to free this buffer using
+** sqlite3_free().
+*/
+SQLITE_API int sqlite3session_diff(
+ sqlite3_session *pSession,
+ const char *zFromDb,
+ const char *zTbl,
+ char **pzErrMsg
+);
+
+
+/*
+** CAPI3REF: Generate A Patchset From A Session Object
+** METHOD: sqlite3_session
+**
+** The differences between a patchset and a changeset are that:
+**
+** <ul>
+** <li> DELETE records consist of the primary key fields only. The
+** original values of other fields are omitted.
+** <li> The original values of any modified fields are omitted from
+** UPDATE records.
+** </ul>
+**
+** A patchset blob may be used with up to date versions of all
+** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(),
+** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly,
+** attempting to use a patchset blob with old versions of the
+** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error.
+**
+** Because the non-primary key "old.*" fields are omitted, no
+** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset
+** is passed to the sqlite3changeset_apply() API. Other conflict types work
+** in the same way as for changesets.
+**
+** Changes within a patchset are ordered in the same way as for changesets
+** generated by the sqlite3session_changeset() function (i.e. all changes for
+** a single table are grouped together, tables appear in the order in which
+** they were attached to the session object).
+*/
+SQLITE_API int sqlite3session_patchset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnPatchset, /* OUT: Size of buffer at *ppPatchset */
+ void **ppPatchset /* OUT: Buffer containing patchset */
+);
+
+/*
+** CAPI3REF: Test if a changeset has recorded any changes.
+**
+** Return non-zero if no changes to attached tables have been recorded by
+** the session object passed as the first argument. Otherwise, if one or
+** more changes have been recorded, return zero.
+**
+** Even if this function returns zero, it is possible that calling
+** [sqlite3session_changeset()] on the session handle may still return a
+** changeset that contains no changes. This can happen when a row in
+** an attached table is modified and then later on the original values
+** are restored. However, if this function returns non-zero, then it is
+** guaranteed that a call to sqlite3session_changeset() will return a
+** changeset containing zero changes.
+*/
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
+
+/*
+** CAPI3REF: Create An Iterator To Traverse A Changeset
+** CONSTRUCTOR: sqlite3_changeset_iter
+**
+** Create an iterator used to iterate through the contents of a changeset.
+** If successful, *pp is set to point to the iterator handle and SQLITE_OK
+** is returned. Otherwise, if an error occurs, *pp is set to zero and an
+** SQLite error code is returned.
+**
+** The following functions can be used to advance and query a changeset
+** iterator created by this function:
+**
+** <ul>
+** <li> [sqlite3changeset_next()]
+** <li> [sqlite3changeset_op()]
+** <li> [sqlite3changeset_new()]
+** <li> [sqlite3changeset_old()]
+** </ul>
+**
+** It is the responsibility of the caller to eventually destroy the iterator
+** by passing it to [sqlite3changeset_finalize()]. The buffer containing the
+** changeset (pChangeset) must remain valid until after the iterator is
+** destroyed.
+**
+** Assuming the changeset blob was created by one of the
+** [sqlite3session_changeset()], [sqlite3changeset_concat()] or
+** [sqlite3changeset_invert()] functions, all changes within the changeset
+** that apply to a single table are grouped together. This means that when
+** an application iterates through a changeset using an iterator created by
+** this function, all changes that relate to a single table are visited
+** consecutively. There is no chance that the iterator will visit a change
+** the applies to table X, then one for table Y, and then later on visit
+** another change for table X.
+**
+** The behavior of sqlite3changeset_start_v2() and its streaming equivalent
+** may be modified by passing a combination of
+** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter.
+**
+** Note that the sqlite3changeset_start_v2() API is still <b>experimental</b>
+** and therefore subject to change.
+*/
+SQLITE_API int sqlite3changeset_start(
+ sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
+ int nChangeset, /* Size of changeset blob in bytes */
+ void *pChangeset /* Pointer to blob containing changeset */
+);
+SQLITE_API int sqlite3changeset_start_v2(
+ sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
+ int nChangeset, /* Size of changeset blob in bytes */
+ void *pChangeset, /* Pointer to blob containing changeset */
+ int flags /* SESSION_CHANGESETSTART_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3changeset_start_v2
+**
+** The following flags may passed via the 4th parameter to
+** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]:
+**
+** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
+** Invert the changeset while iterating through it. This is equivalent to
+** inverting a changeset using sqlite3changeset_invert() before applying it.
+** It is an error to specify this flag with a patchset.
+*/
+#define SQLITE_CHANGESETSTART_INVERT 0x0002
+
+
+/*
+** CAPI3REF: Advance A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function may only be used with iterators created by the function
+** [sqlite3changeset_start()]. If it is called on an iterator passed to
+** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE
+** is returned and the call has no effect.
+**
+** Immediately after an iterator is created by sqlite3changeset_start(), it
+** does not point to any change in the changeset. Assuming the changeset
+** is not empty, the first call to this function advances the iterator to
+** point to the first change in the changeset. Each subsequent call advances
+** the iterator to point to the next change in the changeset (if any). If
+** no error occurs and the iterator points to a valid change after a call
+** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned.
+** Otherwise, if all changes in the changeset have already been visited,
+** SQLITE_DONE is returned.
+**
+** If an error occurs, an SQLite error code is returned. Possible error
+** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or
+** SQLITE_NOMEM.
+*/
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
+
+/*
+** CAPI3REF: Obtain The Current Operation From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this
+** is not the case, this function returns [SQLITE_MISUSE].
+**
+** If argument pzTab is not NULL, then *pzTab is set to point to a
+** nul-terminated utf-8 encoded string containing the name of the table
+** affected by the current change. The buffer remains valid until either
+** sqlite3changeset_next() is called on the iterator or until the
+** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
+** set to the number of columns in the table affected by the change. If
+** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
+** is an indirect change, or false (0) otherwise. See the documentation for
+** [sqlite3session_indirect()] for a description of direct and indirect
+** changes. Finally, if pOp is not NULL, then *pOp is set to one of
+** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the
+** type of change that the iterator currently points to.
+**
+** If no error occurs, SQLITE_OK is returned. If an error does occur, an
+** SQLite error code is returned. The values of the output variables may not
+** be trusted in this case.
+*/
+SQLITE_API int sqlite3changeset_op(
+ sqlite3_changeset_iter *pIter, /* Iterator object */
+ const char **pzTab, /* OUT: Pointer to table name */
+ int *pnCol, /* OUT: Number of columns in table */
+ int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */
+ int *pbIndirect /* OUT: True for an 'indirect' change */
+);
+
+/*
+** CAPI3REF: Obtain The Primary Key Definition Of A Table
+** METHOD: sqlite3_changeset_iter
+**
+** For each modified table, a changeset includes the following:
+**
+** <ul>
+** <li> The number of columns in the table, and
+** <li> Which of those columns make up the tables PRIMARY KEY.
+** </ul>
+**
+** This function is used to find which columns comprise the PRIMARY KEY of
+** the table modified by the change that iterator pIter currently points to.
+** If successful, *pabPK is set to point to an array of nCol entries, where
+** nCol is the number of columns in the table. Elements of *pabPK are set to
+** 0x01 if the corresponding column is part of the tables primary key, or
+** 0x00 if it is not.
+**
+** If argument pnCol is not NULL, then *pnCol is set to the number of columns
+** in the table.
+**
+** If this function is called when the iterator does not point to a valid
+** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise,
+** SQLITE_OK is returned and the output variables populated as described
+** above.
+*/
+SQLITE_API int sqlite3changeset_pk(
+ sqlite3_changeset_iter *pIter, /* Iterator object */
+ unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
+ int *pnCol /* OUT: Number of entries in output array */
+);
+
+/*
+** CAPI3REF: Obtain old.* Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
+** Furthermore, it may only be called if the type of change that the iterator
+** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise,
+** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the vector of
+** original row values stored as part of the UPDATE or DELETE change and
+** returns SQLITE_OK. The name of the function comes from the fact that this
+** is similar to the "old.*" columns available to update or delete triggers.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_old(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
+);
+
+/*
+** CAPI3REF: Obtain new.* Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
+** Furthermore, it may only be called if the type of change that the iterator
+** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise,
+** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the vector of
+** new row values stored as part of the UPDATE or INSERT change and
+** returns SQLITE_OK. If the change is an UPDATE and does not include
+** a new value for the requested column, *ppValue is set to NULL and
+** SQLITE_OK returned. The name of the function comes from the fact that
+** this is similar to the "new.*" columns available to update or delete
+** triggers.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_new(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
+);
+
+/*
+** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function should only be used with iterator objects passed to a
+** conflict-handler callback by [sqlite3changeset_apply()] with either
+** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function
+** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue
+** is set to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the
+** "conflicting row" associated with the current conflict-handler callback
+** and returns SQLITE_OK.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_conflict(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: Value from conflicting row */
+);
+
+/*
+** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations
+** METHOD: sqlite3_changeset_iter
+**
+** This function may only be called with an iterator passed to an
+** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case
+** it sets the output variable to the total number of known foreign key
+** violations in the destination database and returns SQLITE_OK.
+**
+** In all other cases this function returns SQLITE_MISUSE.
+*/
+SQLITE_API int sqlite3changeset_fk_conflicts(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int *pnOut /* OUT: Number of FK violations */
+);
+
+
+/*
+** CAPI3REF: Finalize A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function is used to finalize an iterator allocated with
+** [sqlite3changeset_start()].
+**
+** This function should only be called on iterators created using the
+** [sqlite3changeset_start()] function. If an application calls this
+** function with an iterator passed to a conflict-handler by
+** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the
+** call has no effect.
+**
+** If an error was encountered within a call to an sqlite3changeset_xxx()
+** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an
+** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding
+** to that error is returned by this function. Otherwise, SQLITE_OK is
+** returned. This is to allow the following pattern (pseudo-code):
+**
+** <pre>
+** sqlite3changeset_start();
+** while( SQLITE_ROW==sqlite3changeset_next() ){
+** // Do something with change.
+** }
+** rc = sqlite3changeset_finalize();
+** if( rc!=SQLITE_OK ){
+** // An error has occurred
+** }
+** </pre>
+*/
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
+
+/*
+** CAPI3REF: Invert A Changeset
+**
+** This function is used to "invert" a changeset object. Applying an inverted
+** changeset to a database reverses the effects of applying the uninverted
+** changeset. Specifically:
+**
+** <ul>
+** <li> Each DELETE change is changed to an INSERT, and
+** <li> Each INSERT change is changed to a DELETE, and
+** <li> For each UPDATE change, the old.* and new.* values are exchanged.
+** </ul>
+**
+** This function does not change the order in which changes appear within
+** the changeset. It merely reverses the sense of each individual change.
+**
+** If successful, a pointer to a buffer containing the inverted changeset
+** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and
+** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are
+** zeroed and an SQLite error code returned.
+**
+** It is the responsibility of the caller to eventually call sqlite3_free()
+** on the *ppOut pointer to free the buffer allocation following a successful
+** call to this function.
+**
+** WARNING/TODO: This function currently assumes that the input is a valid
+** changeset. If it is not, the results are undefined.
+*/
+SQLITE_API int sqlite3changeset_invert(
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+/*
+** CAPI3REF: Concatenate Two Changeset Objects
+**
+** This function is used to concatenate two changesets, A and B, into a
+** single changeset. The result is a changeset equivalent to applying
+** changeset A followed by changeset B.
+**
+** This function combines the two input changesets using an
+** sqlite3_changegroup object. Calling it produces similar results as the
+** following code fragment:
+**
+** <pre>
+** sqlite3_changegroup *pGrp;
+** rc = sqlite3_changegroup_new(&pGrp);
+** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
+** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB);
+** if( rc==SQLITE_OK ){
+** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut);
+** }else{
+** *ppOut = 0;
+** *pnOut = 0;
+** }
+** </pre>
+**
+** Refer to the sqlite3_changegroup documentation below for details.
+*/
+SQLITE_API int sqlite3changeset_concat(
+ int nA, /* Number of bytes in buffer pA */
+ void *pA, /* Pointer to buffer containing changeset A */
+ int nB, /* Number of bytes in buffer pB */
+ void *pB, /* Pointer to buffer containing changeset B */
+ int *pnOut, /* OUT: Number of bytes in output changeset */
+ void **ppOut /* OUT: Buffer containing output changeset */
+);
+
+
+/*
+** CAPI3REF: Changegroup Handle
+**
+** A changegroup is an object used to combine two or more
+** [changesets] or [patchsets]
+*/
+typedef struct sqlite3_changegroup sqlite3_changegroup;
+
+/*
+** CAPI3REF: Create A New Changegroup Object
+** CONSTRUCTOR: sqlite3_changegroup
+**
+** An sqlite3_changegroup object is used to combine two or more changesets
+** (or patchsets) into a single changeset (or patchset). A single changegroup
+** object may combine changesets or patchsets, but not both. The output is
+** always in the same format as the input.
+**
+** If successful, this function returns SQLITE_OK and populates (*pp) with
+** a pointer to a new sqlite3_changegroup object before returning. The caller
+** should eventually free the returned object using a call to
+** sqlite3changegroup_delete(). If an error occurs, an SQLite error code
+** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL.
+**
+** The usual usage pattern for an sqlite3_changegroup object is as follows:
+**
+** <ul>
+** <li> It is created using a call to sqlite3changegroup_new().
+**
+** <li> Zero or more changesets (or patchsets) are added to the object
+** by calling sqlite3changegroup_add().
+**
+** <li> The result of combining all input changesets together is obtained
+** by the application via a call to sqlite3changegroup_output().
+**
+** <li> The object is deleted using a call to sqlite3changegroup_delete().
+** </ul>
+**
+** Any number of calls to add() and output() may be made between the calls to
+** new() and delete(), and in any order.
+**
+** As well as the regular sqlite3changegroup_add() and
+** sqlite3changegroup_output() functions, also available are the streaming
+** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm().
+*/
+SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
+
+/*
+** CAPI3REF: Add A Changeset To A Changegroup
+** METHOD: sqlite3_changegroup
+**
+** Add all changes within the changeset (or patchset) in buffer pData (size
+** nData bytes) to the changegroup.
+**
+** If the buffer contains a patchset, then all prior calls to this function
+** on the same changegroup object must also have specified patchsets. Or, if
+** the buffer contains a changeset, so must have the earlier calls to this
+** function. Otherwise, SQLITE_ERROR is returned and no changes are added
+** to the changegroup.
+**
+** Rows within the changeset and changegroup are identified by the values in
+** their PRIMARY KEY columns. A change in the changeset is considered to
+** apply to the same row as a change already present in the changegroup if
+** the two rows have the same primary key.
+**
+** Changes to rows that do not already appear in the changegroup are
+** simply copied into it. Or, if both the new changeset and the changegroup
+** contain changes that apply to a single row, the final contents of the
+** changegroup depends on the type of each change, as follows:
+**
+** <table border=1 style="margin-left:8ex;margin-right:8ex">
+** <tr><th style="white-space:pre">Existing Change </th>
+** <th style="white-space:pre">New Change </th>
+** <th>Output Change
+** <tr><td>INSERT <td>INSERT <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>INSERT <td>UPDATE <td>
+** The INSERT change remains in the changegroup. The values in the
+** INSERT change are modified as if the row was inserted by the
+** existing change and then updated according to the new change.
+** <tr><td>INSERT <td>DELETE <td>
+** The existing INSERT is removed from the changegroup. The DELETE is
+** not added.
+** <tr><td>UPDATE <td>INSERT <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>UPDATE <td>UPDATE <td>
+** The existing UPDATE remains within the changegroup. It is amended
+** so that the accompanying values are as if the row was updated once
+** by the existing change and then again by the new change.
+** <tr><td>UPDATE <td>DELETE <td>
+** The existing UPDATE is replaced by the new DELETE within the
+** changegroup.
+** <tr><td>DELETE <td>INSERT <td>
+** If one or more of the column values in the row inserted by the
+** new change differ from those in the row deleted by the existing
+** change, the existing DELETE is replaced by an UPDATE within the
+** changegroup. Otherwise, if the inserted row is exactly the same
+** as the deleted row, the existing DELETE is simply discarded.
+** <tr><td>DELETE <td>UPDATE <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>DELETE <td>DELETE <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** </table>
+**
+** If the new changeset contains changes to a table that is already present
+** in the changegroup, then the number of columns and the position of the
+** primary key columns for the table must be consistent. If this is not the
+** case, this function fails with SQLITE_SCHEMA. If the input changeset
+** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
+** returned. Or, if an out-of-memory condition occurs during processing, this
+** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
+** of the final contents of the changegroup is undefined.
+**
+** If no error occurs, SQLITE_OK is returned.
+*/
+SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
+
+/*
+** CAPI3REF: Obtain A Composite Changeset From A Changegroup
+** METHOD: sqlite3_changegroup
+**
+** Obtain a buffer containing a changeset (or patchset) representing the
+** current contents of the changegroup. If the inputs to the changegroup
+** were themselves changesets, the output is a changeset. Or, if the
+** inputs were patchsets, the output is also a patchset.
+**
+** As with the output of the sqlite3session_changeset() and
+** sqlite3session_patchset() functions, all changes related to a single
+** table are grouped together in the output of this function. Tables appear
+** in the same order as for the very first changeset added to the changegroup.
+** If the second or subsequent changesets added to the changegroup contain
+** changes for tables that do not appear in the first changeset, they are
+** appended onto the end of the output changeset, again in the order in
+** which they are first encountered.
+**
+** If an error occurs, an SQLite error code is returned and the output
+** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK
+** is returned and the output variables are set to the size of and a
+** pointer to the output buffer, respectively. In this case it is the
+** responsibility of the caller to eventually free the buffer using a
+** call to sqlite3_free().
+*/
+SQLITE_API int sqlite3changegroup_output(
+ sqlite3_changegroup*,
+ int *pnData, /* OUT: Size of output buffer in bytes */
+ void **ppData /* OUT: Pointer to output buffer */
+);
+
+/*
+** CAPI3REF: Delete A Changegroup Object
+** DESTRUCTOR: sqlite3_changegroup
+*/
+SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*);
+
+/*
+** CAPI3REF: Apply A Changeset To A Database
+**
+** Apply a changeset or patchset to a database. These functions attempt to
+** update the "main" database attached to handle db with the changes found in
+** the changeset passed via the second and third arguments.
+**
+** The fourth argument (xFilter) passed to these functions is the "filter
+** callback". If it is not NULL, then for each table affected by at least one
+** change in the changeset, the filter callback is invoked with
+** the table name as the second argument, and a copy of the context pointer
+** passed as the sixth argument as the first. If the "filter callback"
+** returns zero, then no attempt is made to apply any changes to the table.
+** Otherwise, if the return value is non-zero or the xFilter argument to
+** is NULL, all changes related to the table are attempted.
+**
+** For each table that is not excluded by the filter callback, this function
+** tests that the target database contains a compatible table. A table is
+** considered compatible if all of the following are true:
+**
+** <ul>
+** <li> The table has the same name as the name recorded in the
+** changeset, and
+** <li> The table has at least as many columns as recorded in the
+** changeset, and
+** <li> The table has primary key columns in the same position as
+** recorded in the changeset.
+** </ul>
+**
+** If there is no compatible table, it is not an error, but none of the
+** changes associated with the table are applied. A warning message is issued
+** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most
+** one such warning is issued for each table in the changeset.
+**
+** For each change for which there is a compatible table, an attempt is made
+** to modify the table contents according to the UPDATE, INSERT or DELETE
+** change. If a change cannot be applied cleanly, the conflict handler
+** function passed as the fifth argument to sqlite3changeset_apply() may be
+** invoked. A description of exactly when the conflict handler is invoked for
+** each type of change is below.
+**
+** Unlike the xFilter argument, xConflict may not be passed NULL. The results
+** of passing anything other than a valid function pointer as the xConflict
+** argument are undefined.
+**
+** Each time the conflict handler function is invoked, it must return one
+** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or
+** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned
+** if the second argument passed to the conflict handler is either
+** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler
+** returns an illegal value, any changes already made are rolled back and
+** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different
+** actions are taken by sqlite3changeset_apply() depending on the value
+** returned by each invocation of the conflict-handler function. Refer to
+** the documentation for the three
+** [SQLITE_CHANGESET_OMIT|available return values] for details.
+**
+** <dl>
+** <dt>DELETE Changes<dd>
+** For each DELETE change, the function checks if the target database
+** contains a row with the same primary key value (or values) as the
+** original row values stored in the changeset. If it does, and the values
+** stored in all non-primary key columns also match the values stored in
+** the changeset the row is deleted from the target database.
+**
+** If a row with matching primary key values is found, but one or more of
+** the non-primary key fields contains a value different from the original
+** row value stored in the changeset, the conflict-handler function is
+** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the
+** database table has more columns than are recorded in the changeset,
+** only the values of those non-primary key fields are compared against
+** the current database contents - any trailing database table columns
+** are ignored.
+**
+** If no row with matching primary key values is found in the database,
+** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
+** passed as the second argument.
+**
+** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT
+** (which can only happen if a foreign key constraint is violated), the
+** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT]
+** passed as the second argument. This includes the case where the DELETE
+** operation is attempted because an earlier call to the conflict handler
+** function returned [SQLITE_CHANGESET_REPLACE].
+**
+** <dt>INSERT Changes<dd>
+** For each INSERT change, an attempt is made to insert the new row into
+** the database. If the changeset row contains fewer fields than the
+** database table, the trailing fields are populated with their default
+** values.
+**
+** If the attempt to insert the row fails because the database already
+** contains a row with the same primary key values, the conflict handler
+** function is invoked with the second argument set to
+** [SQLITE_CHANGESET_CONFLICT].
+**
+** If the attempt to insert the row fails because of some other constraint
+** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is
+** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT].
+** This includes the case where the INSERT operation is re-attempted because
+** an earlier call to the conflict handler function returned
+** [SQLITE_CHANGESET_REPLACE].
+**
+** <dt>UPDATE Changes<dd>
+** For each UPDATE change, the function checks if the target database
+** contains a row with the same primary key value (or values) as the
+** original row values stored in the changeset. If it does, and the values
+** stored in all modified non-primary key columns also match the values
+** stored in the changeset the row is updated within the target database.
+**
+** If a row with matching primary key values is found, but one or more of
+** the modified non-primary key fields contains a value different from an
+** original row value stored in the changeset, the conflict-handler function
+** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
+** UPDATE changes only contain values for non-primary key fields that are
+** to be modified, only those fields need to match the original values to
+** avoid the SQLITE_CHANGESET_DATA conflict-handler callback.
+**
+** If no row with matching primary key values is found in the database,
+** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
+** passed as the second argument.
+**
+** If the UPDATE operation is attempted, but SQLite returns
+** SQLITE_CONSTRAINT, the conflict-handler function is invoked with
+** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument.
+** This includes the case where the UPDATE operation is attempted after
+** an earlier call to the conflict handler function returned
+** [SQLITE_CHANGESET_REPLACE].
+** </dl>
+**
+** It is safe to execute SQL statements, including those that write to the
+** table that the callback related to, from within the xConflict callback.
+** This can be used to further customize the application's conflict
+** resolution strategy.
+**
+** All changes made by these functions are enclosed in a savepoint transaction.
+** If any other error (aside from a constraint failure when attempting to
+** write to the target database) occurs, then the savepoint transaction is
+** rolled back, restoring the target database to its original state, and an
+** SQLite error code returned.
+**
+** If the output parameters (ppRebase) and (pnRebase) are non-NULL and
+** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2()
+** may set (*ppRebase) to point to a "rebase" that may be used with the
+** sqlite3_rebaser APIs buffer before returning. In this case (*pnRebase)
+** is set to the size of the buffer in bytes. It is the responsibility of the
+** caller to eventually free any such buffer using sqlite3_free(). The buffer
+** is only allocated and populated if one or more conflicts were encountered
+** while applying the patchset. See comments surrounding the sqlite3_rebaser
+** APIs for further details.
+**
+** The behavior of sqlite3changeset_apply_v2() and its streaming equivalent
+** may be modified by passing a combination of
+** [SQLITE_CHANGESETAPPLY_NOSAVEPOINT | supported flags] as the 9th parameter.
+**
+** Note that the sqlite3changeset_apply_v2() API is still <b>experimental</b>
+** and therefore subject to change.
+*/
+SQLITE_API int sqlite3changeset_apply(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+);
+SQLITE_API int sqlite3changeset_apply_v2(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase, /* OUT: Rebase data */
+ int flags /* SESSION_CHANGESETAPPLY_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3changeset_apply_v2
+**
+** The following flags may passed via the 9th parameter to
+** [sqlite3changeset_apply_v2] and [sqlite3changeset_apply_v2_strm]:
+**
+** <dl>
+** <dt>SQLITE_CHANGESETAPPLY_NOSAVEPOINT <dd>
+** Usually, the sessions module encloses all operations performed by
+** a single call to apply_v2() or apply_v2_strm() in a [SAVEPOINT]. The
+** SAVEPOINT is committed if the changeset or patchset is successfully
+** applied, or rolled back if an error occurs. Specifying this flag
+** causes the sessions module to omit this savepoint. In this case, if the
+** caller has an open transaction or savepoint when apply_v2() is called,
+** it may revert the partially applied changeset by rolling it back.
+**
+** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
+** Invert the changeset before applying it. This is equivalent to inverting
+** a changeset using sqlite3changeset_invert() before applying it. It is
+** an error to specify this flag with a patchset.
+*/
+#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
+#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
+
+/*
+** CAPI3REF: Constants Passed To The Conflict Handler
+**
+** Values that may be passed as the second argument to a conflict-handler.
+**
+** <dl>
+** <dt>SQLITE_CHANGESET_DATA<dd>
+** The conflict handler is invoked with CHANGESET_DATA as the second argument
+** when processing a DELETE or UPDATE change if a row with the required
+** PRIMARY KEY fields is present in the database, but one or more other
+** (non primary-key) fields modified by the update do not contain the
+** expected "before" values.
+**
+** The conflicting row, in this case, is the database row with the matching
+** primary key.
+**
+** <dt>SQLITE_CHANGESET_NOTFOUND<dd>
+** The conflict handler is invoked with CHANGESET_NOTFOUND as the second
+** argument when processing a DELETE or UPDATE change if a row with the
+** required PRIMARY KEY fields is not present in the database.
+**
+** There is no conflicting row in this case. The results of invoking the
+** sqlite3changeset_conflict() API are undefined.
+**
+** <dt>SQLITE_CHANGESET_CONFLICT<dd>
+** CHANGESET_CONFLICT is passed as the second argument to the conflict
+** handler while processing an INSERT change if the operation would result
+** in duplicate primary key values.
+**
+** The conflicting row in this case is the database row with the matching
+** primary key.
+**
+** <dt>SQLITE_CHANGESET_FOREIGN_KEY<dd>
+** If foreign key handling is enabled, and applying a changeset leaves the
+** database in a state containing foreign key violations, the conflict
+** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument
+** exactly once before the changeset is committed. If the conflict handler
+** returns CHANGESET_OMIT, the changes, including those that caused the
+** foreign key constraint violation, are committed. Or, if it returns
+** CHANGESET_ABORT, the changeset is rolled back.
+**
+** No current or conflicting row information is provided. The only function
+** it is possible to call on the supplied sqlite3_changeset_iter handle
+** is sqlite3changeset_fk_conflicts().
+**
+** <dt>SQLITE_CHANGESET_CONSTRAINT<dd>
+** If any other constraint violation occurs while applying a change (i.e.
+** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is
+** invoked with CHANGESET_CONSTRAINT as the second argument.
+**
+** There is no conflicting row in this case. The results of invoking the
+** sqlite3changeset_conflict() API are undefined.
+**
+** </dl>
+*/
+#define SQLITE_CHANGESET_DATA 1
+#define SQLITE_CHANGESET_NOTFOUND 2
+#define SQLITE_CHANGESET_CONFLICT 3
+#define SQLITE_CHANGESET_CONSTRAINT 4
+#define SQLITE_CHANGESET_FOREIGN_KEY 5
+
+/*
+** CAPI3REF: Constants Returned By The Conflict Handler
+**
+** A conflict handler callback must return one of the following three values.
+**
+** <dl>
+** <dt>SQLITE_CHANGESET_OMIT<dd>
+** If a conflict handler returns this value no special action is taken. The
+** change that caused the conflict is not applied. The session module
+** continues to the next change in the changeset.
+**
+** <dt>SQLITE_CHANGESET_REPLACE<dd>
+** This value may only be returned if the second argument to the conflict
+** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this
+** is not the case, any changes applied so far are rolled back and the
+** call to sqlite3changeset_apply() returns SQLITE_MISUSE.
+**
+** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict
+** handler, then the conflicting row is either updated or deleted, depending
+** on the type of change.
+**
+** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict
+** handler, then the conflicting row is removed from the database and a
+** second attempt to apply the change is made. If this second attempt fails,
+** the original row is restored to the database before continuing.
+**
+** <dt>SQLITE_CHANGESET_ABORT<dd>
+** If this value is returned, any changes applied so far are rolled back
+** and the call to sqlite3changeset_apply() returns SQLITE_ABORT.
+** </dl>
+*/
+#define SQLITE_CHANGESET_OMIT 0
+#define SQLITE_CHANGESET_REPLACE 1
+#define SQLITE_CHANGESET_ABORT 2
+
+/*
+** CAPI3REF: Rebasing changesets
+** EXPERIMENTAL
+**
+** Suppose there is a site hosting a database in state S0. And that
+** modifications are made that move that database to state S1 and a
+** changeset recorded (the "local" changeset). Then, a changeset based
+** on S0 is received from another site (the "remote" changeset) and
+** applied to the database. The database is then in state
+** (S1+"remote"), where the exact state depends on any conflict
+** resolution decisions (OMIT or REPLACE) made while applying "remote".
+** Rebasing a changeset is to update it to take those conflict
+** resolution decisions into account, so that the same conflicts
+** do not have to be resolved elsewhere in the network.
+**
+** For example, if both the local and remote changesets contain an
+** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)":
+**
+** local: INSERT INTO t1 VALUES(1, 'v1');
+** remote: INSERT INTO t1 VALUES(1, 'v2');
+**
+** and the conflict resolution is REPLACE, then the INSERT change is
+** removed from the local changeset (it was overridden). Or, if the
+** conflict resolution was "OMIT", then the local changeset is modified
+** to instead contain:
+**
+** UPDATE t1 SET b = 'v2' WHERE a=1;
+**
+** Changes within the local changeset are rebased as follows:
+**
+** <dl>
+** <dt>Local INSERT<dd>
+** This may only conflict with a remote INSERT. If the conflict
+** resolution was OMIT, then add an UPDATE change to the rebased
+** changeset. Or, if the conflict resolution was REPLACE, add
+** nothing to the rebased changeset.
+**
+** <dt>Local DELETE<dd>
+** This may conflict with a remote UPDATE or DELETE. In both cases the
+** only possible resolution is OMIT. If the remote operation was a
+** DELETE, then add no change to the rebased changeset. If the remote
+** operation was an UPDATE, then the old.* fields of change are updated
+** to reflect the new.* values in the UPDATE.
+**
+** <dt>Local UPDATE<dd>
+** This may conflict with a remote UPDATE or DELETE. If it conflicts
+** with a DELETE, and the conflict resolution was OMIT, then the update
+** is changed into an INSERT. Any undefined values in the new.* record
+** from the update change are filled in using the old.* values from
+** the conflicting DELETE. Or, if the conflict resolution was REPLACE,
+** the UPDATE change is simply omitted from the rebased changeset.
+**
+** If conflict is with a remote UPDATE and the resolution is OMIT, then
+** the old.* values are rebased using the new.* values in the remote
+** change. Or, if the resolution is REPLACE, then the change is copied
+** into the rebased changeset with updates to columns also updated by
+** the conflicting remote UPDATE removed. If this means no columns would
+** be updated, the change is omitted.
+** </dl>
+**
+** A local change may be rebased against multiple remote changes
+** simultaneously. If a single key is modified by multiple remote
+** changesets, they are combined as follows before the local changeset
+** is rebased:
+**
+** <ul>
+** <li> If there has been one or more REPLACE resolutions on a
+** key, it is rebased according to a REPLACE.
+**
+** <li> If there have been no REPLACE resolutions on a key, then
+** the local changeset is rebased according to the most recent
+** of the OMIT resolutions.
+** </ul>
+**
+** Note that conflict resolutions from multiple remote changesets are
+** combined on a per-field basis, not per-row. This means that in the
+** case of multiple remote UPDATE operations, some fields of a single
+** local change may be rebased for REPLACE while others are rebased for
+** OMIT.
+**
+** In order to rebase a local changeset, the remote changeset must first
+** be applied to the local database using sqlite3changeset_apply_v2() and
+** the buffer of rebase information captured. Then:
+**
+** <ol>
+** <li> An sqlite3_rebaser object is created by calling
+** sqlite3rebaser_create().
+** <li> The new object is configured with the rebase buffer obtained from
+** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure().
+** If the local changeset is to be rebased against multiple remote
+** changesets, then sqlite3rebaser_configure() should be called
+** multiple times, in the same order that the multiple
+** sqlite3changeset_apply_v2() calls were made.
+** <li> Each local changeset is rebased by calling sqlite3rebaser_rebase().
+** <li> The sqlite3_rebaser object is deleted by calling
+** sqlite3rebaser_delete().
+** </ol>
+*/
+typedef struct sqlite3_rebaser sqlite3_rebaser;
+
+/*
+** CAPI3REF: Create a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Allocate a new changeset rebaser object. If successful, set (*ppNew) to
+** point to the new object and return SQLITE_OK. Otherwise, if an error
+** occurs, return an SQLite error code (e.g. SQLITE_NOMEM) and set (*ppNew)
+** to NULL.
+*/
+SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew);
+
+/*
+** CAPI3REF: Configure a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Configure the changeset rebaser object to rebase changesets according
+** to the conflict resolutions described by buffer pRebase (size nRebase
+** bytes), which must have been obtained from a previous call to
+** sqlite3changeset_apply_v2().
+*/
+SQLITE_API int sqlite3rebaser_configure(
+ sqlite3_rebaser*,
+ int nRebase, const void *pRebase
+);
+
+/*
+** CAPI3REF: Rebase a changeset
+** EXPERIMENTAL
+**
+** Argument pIn must point to a buffer containing a changeset nIn bytes
+** in size. This function allocates and populates a buffer with a copy
+** of the changeset rebased according to the configuration of the
+** rebaser object passed as the first argument. If successful, (*ppOut)
+** is set to point to the new buffer containing the rebased changeset and
+** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the
+** responsibility of the caller to eventually free the new buffer using
+** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut)
+** are set to zero and an SQLite error code returned.
+*/
+SQLITE_API int sqlite3rebaser_rebase(
+ sqlite3_rebaser*,
+ int nIn, const void *pIn,
+ int *pnOut, void **ppOut
+);
+
+/*
+** CAPI3REF: Delete a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Delete the changeset rebaser object and all associated resources. There
+** should be one call to this function for each successful invocation
+** of sqlite3rebaser_create().
+*/
+SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p);
+
+/*
+** CAPI3REF: Streaming Versions of API functions.
+**
+** The six streaming API xxx_strm() functions serve similar purposes to the
+** corresponding non-streaming API functions:
+**
+** <table border=1 style="margin-left:8ex;margin-right:8ex">
+** <tr><th>Streaming function<th>Non-streaming equivalent</th>
+** <tr><td>sqlite3changeset_apply_strm<td>[sqlite3changeset_apply]
+** <tr><td>sqlite3changeset_apply_strm_v2<td>[sqlite3changeset_apply_v2]
+** <tr><td>sqlite3changeset_concat_strm<td>[sqlite3changeset_concat]
+** <tr><td>sqlite3changeset_invert_strm<td>[sqlite3changeset_invert]
+** <tr><td>sqlite3changeset_start_strm<td>[sqlite3changeset_start]
+** <tr><td>sqlite3session_changeset_strm<td>[sqlite3session_changeset]
+** <tr><td>sqlite3session_patchset_strm<td>[sqlite3session_patchset]
+** </table>
+**
+** Non-streaming functions that accept changesets (or patchsets) as input
+** require that the entire changeset be stored in a single buffer in memory.
+** Similarly, those that return a changeset or patchset do so by returning
+** a pointer to a single large buffer allocated using sqlite3_malloc().
+** Normally this is convenient. However, if an application running in a
+** low-memory environment is required to handle very large changesets, the
+** large contiguous memory allocations required can become onerous.
+**
+** In order to avoid this problem, instead of a single large buffer, input
+** is passed to a streaming API functions by way of a callback function that
+** the sessions module invokes to incrementally request input data as it is
+** required. In all cases, a pair of API function parameters such as
+**
+** <pre>
+** &nbsp; int nChangeset,
+** &nbsp; void *pChangeset,
+** </pre>
+**
+** Is replaced by:
+**
+** <pre>
+** &nbsp; int (*xInput)(void *pIn, void *pData, int *pnData),
+** &nbsp; void *pIn,
+** </pre>
+**
+** Each time the xInput callback is invoked by the sessions module, the first
+** argument passed is a copy of the supplied pIn context pointer. The second
+** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no
+** error occurs the xInput method should copy up to (*pnData) bytes of data
+** into the buffer and set (*pnData) to the actual number of bytes copied
+** before returning SQLITE_OK. If the input is completely exhausted, (*pnData)
+** should be set to zero to indicate this. Or, if an error occurs, an SQLite
+** error code should be returned. In all cases, if an xInput callback returns
+** an error, all processing is abandoned and the streaming API function
+** returns a copy of the error code to the caller.
+**
+** In the case of sqlite3changeset_start_strm(), the xInput callback may be
+** invoked by the sessions module at any point during the lifetime of the
+** iterator. If such an xInput callback returns an error, the iterator enters
+** an error state, whereby all subsequent calls to iterator functions
+** immediately fail with the same error code as returned by xInput.
+**
+** Similarly, streaming API functions that return changesets (or patchsets)
+** return them in chunks by way of a callback function instead of via a
+** pointer to a single large buffer. In this case, a pair of parameters such
+** as:
+**
+** <pre>
+** &nbsp; int *pnChangeset,
+** &nbsp; void **ppChangeset,
+** </pre>
+**
+** Is replaced by:
+**
+** <pre>
+** &nbsp; int (*xOutput)(void *pOut, const void *pData, int nData),
+** &nbsp; void *pOut
+** </pre>
+**
+** The xOutput callback is invoked zero or more times to return data to
+** the application. The first parameter passed to each call is a copy of the
+** pOut pointer supplied by the application. The second parameter, pData,
+** points to a buffer nData bytes in size containing the chunk of output
+** data being returned. If the xOutput callback successfully processes the
+** supplied data, it should return SQLITE_OK to indicate success. Otherwise,
+** it should return some other SQLite error code. In this case processing
+** is immediately abandoned and the streaming API function returns a copy
+** of the xOutput error code to the application.
+**
+** The sessions module never invokes an xOutput callback with the third
+** parameter set to a value less than or equal to zero. Other than this,
+** no guarantees are made as to the size of the chunks of data returned.
+*/
+SQLITE_API int sqlite3changeset_apply_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+);
+SQLITE_API int sqlite3changeset_apply_v2_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase,
+ int flags
+);
+SQLITE_API int sqlite3changeset_concat_strm(
+ int (*xInputA)(void *pIn, void *pData, int *pnData),
+ void *pInA,
+ int (*xInputB)(void *pIn, void *pData, int *pnData),
+ void *pInB,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changeset_invert_strm(
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changeset_start_strm(
+ sqlite3_changeset_iter **pp,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+);
+SQLITE_API int sqlite3changeset_start_v2_strm(
+ sqlite3_changeset_iter **pp,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int flags
+);
+SQLITE_API int sqlite3session_changeset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3session_patchset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changegroup_add_strm(sqlite3_changegroup*,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+);
+SQLITE_API int sqlite3changegroup_output_strm(sqlite3_changegroup*,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3rebaser_rebase_strm(
+ sqlite3_rebaser *pRebaser,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+
+/*
+** CAPI3REF: Configure global parameters
+**
+** The sqlite3session_config() interface is used to make global configuration
+** changes to the sessions module in order to tune it to the specific needs
+** of the application.
+**
+** The sqlite3session_config() interface is not threadsafe. If it is invoked
+** while any other thread is inside any other sessions method then the
+** results are undefined. Furthermore, if it is invoked after any sessions
+** related objects have been created, the results are also undefined.
+**
+** The first argument to the sqlite3session_config() function must be one
+** of the SQLITE_SESSION_CONFIG_XXX constants defined below. The
+** interpretation of the (void*) value passed as the second parameter and
+** the effect of calling this function depends on the value of the first
+** parameter.
+**
+** <dl>
+** <dt>SQLITE_SESSION_CONFIG_STRMSIZE<dd>
+** By default, the sessions module streaming interfaces attempt to input
+** and output data in approximately 1 KiB chunks. This operand may be used
+** to set and query the value of this configuration setting. The pointer
+** passed as the second argument must point to a value of type (int).
+** If this value is greater than 0, it is used as the new streaming data
+** chunk size for both input and output. Before returning, the (int) value
+** pointed to by pArg is set to the final value of the streaming interface
+** chunk size.
+** </dl>
+**
+** This function returns SQLITE_OK if successful, or an SQLite error code
+** otherwise.
+*/
+SQLITE_API int sqlite3session_config(int op, void *pArg);
+
+/*
+** CAPI3REF: Values for sqlite3session_config().
+*/
+#define SQLITE_SESSION_CONFIG_STRMSIZE 1
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#if 0
+}
+#endif
+
+#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */
+
+/******** End of sqlite3session.h *********/
+/******** Begin file fts5.h *********/
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Interfaces to extend FTS5. Using the interfaces defined in this file,
+** FTS5 may be extended with:
+**
+** * custom tokenizers, and
+** * custom auxiliary functions.
+*/
+
+
+#ifndef _FTS5_H
+#define _FTS5_H
+
+
+#if 0
+extern "C" {
+#endif
+
+/*************************************************************************
+** CUSTOM AUXILIARY FUNCTIONS
+**
+** Virtual table implementations may overload SQL functions by implementing
+** the sqlite3_module.xFindFunction() method.
+*/
+
+typedef struct Fts5ExtensionApi Fts5ExtensionApi;
+typedef struct Fts5Context Fts5Context;
+typedef struct Fts5PhraseIter Fts5PhraseIter;
+
+typedef void (*fts5_extension_function)(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+);
+
+struct Fts5PhraseIter {
+ const unsigned char *a;
+ const unsigned char *b;
+};
+
+/*
+** EXTENSION API FUNCTIONS
+**
+** xUserData(pFts):
+** Return a copy of the context pointer the extension function was
+** registered with.
+**
+** xColumnTotalSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the FTS5 table. Or, if iCol is
+** non-negative but less than the number of columns in the table, return
+** the total number of tokens in column iCol, considering all rows in
+** the FTS5 table.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** xColumnCount(pFts):
+** Return the number of columns in the table.
+**
+** xColumnSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the current row. Or, if iCol is
+** non-negative but less than the number of columns in the table, set
+** *pnToken to the number of tokens in column iCol of the current row.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** This function may be quite inefficient if used with an FTS5 table
+** created with the "columnsize=0" option.
+**
+** xColumnText:
+** This function attempts to retrieve the text of column iCol of the
+** current document. If successful, (*pz) is set to point to a buffer
+** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
+** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
+** if an error occurs, an SQLite error code is returned and the final values
+** of (*pz) and (*pn) are undefined.
+**
+** xPhraseCount:
+** Returns the number of phrases in the current query expression.
+**
+** xPhraseSize:
+** Returns the number of tokens in phrase iPhrase of the query. Phrases
+** are numbered starting from zero.
+**
+** xInstCount:
+** Set *pnInst to the total number of occurrences of all phrases within
+** the query within the current row. Return SQLITE_OK if successful, or
+** an error code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always returns 0.
+**
+** xInst:
+** Query for the details of phrase match iIdx within the current row.
+** Phrase matches are numbered starting from zero, so the iIdx argument
+** should be greater than or equal to zero and smaller than the value
+** output by xInstCount().
+**
+** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** to the column in which it occurs and *piOff the token offset of the
+** first token of the phrase. Returns SQLITE_OK if successful, or an error
+** code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
+**
+** xRowid:
+** Returns the rowid of the current row.
+**
+** xTokenize:
+** Tokenize text using the tokenizer belonging to the FTS5 table.
+**
+** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
+** This API function is used to query the FTS table for phrase iPhrase
+** of the current query. Specifically, a query equivalent to:
+**
+** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
+**
+** with $p set to a phrase equivalent to the phrase iPhrase of the
+** current query is executed. Any column filter that applies to
+** phrase iPhrase of the current query is included in $p. For each
+** row visited, the callback function passed as the fourth argument
+** is invoked. The context and API objects passed to the callback
+** function may be used to access the properties of each matched row.
+** Invoking Api.xUserData() returns a copy of the pointer passed as
+** the third argument to pUserData.
+**
+** If the callback function returns any value other than SQLITE_OK, the
+** query is abandoned and the xQueryPhrase function returns immediately.
+** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
+** Otherwise, the error code is propagated upwards.
+**
+** If the query runs to completion without incident, SQLITE_OK is returned.
+** Or, if some error occurs before the query completes or is aborted by
+** the callback, an SQLite error code is returned.
+**
+**
+** xSetAuxdata(pFts5, pAux, xDelete)
+**
+** Save the pointer passed as the second argument as the extension function's
+** "auxiliary data". The pointer may then be retrieved by the current or any
+** future invocation of the same fts5 extension function made as part of
+** the same MATCH query using the xGetAuxdata() API.
+**
+** Each extension function is allocated a single auxiliary data slot for
+** each FTS query (MATCH expression). If the extension function is invoked
+** more than once for a single FTS query, then all invocations share a
+** single auxiliary data context.
+**
+** If there is already an auxiliary data pointer when this function is
+** invoked, then it is replaced by the new pointer. If an xDelete callback
+** was specified along with the original pointer, it is invoked at this
+** point.
+**
+** The xDelete callback, if one is specified, is also invoked on the
+** auxiliary data pointer after the FTS5 query has finished.
+**
+** If an error (e.g. an OOM condition) occurs within this function,
+** the auxiliary data is set to NULL and an error code returned. If the
+** xDelete parameter was not NULL, it is invoked on the auxiliary data
+** pointer before returning.
+**
+**
+** xGetAuxdata(pFts5, bClear)
+**
+** Returns the current auxiliary data pointer for the fts5 extension
+** function. See the xSetAuxdata() method for details.
+**
+** If the bClear argument is non-zero, then the auxiliary data is cleared
+** (set to NULL) before this function returns. In this case the xDelete,
+** if any, is not invoked.
+**
+**
+** xRowCount(pFts5, pnRow)
+**
+** This function is used to retrieve the total number of rows in the table.
+** In other words, the same value that would be returned by:
+**
+** SELECT count(*) FROM ftstable;
+**
+** xPhraseFirst()
+** This function is used, along with type Fts5PhraseIter and the xPhraseNext
+** method, to iterate through all instances of a single query phrase within
+** the current row. This is the same information as is accessible via the
+** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
+** to use, this API may be faster under some circumstances. To iterate
+** through instances of phrase iPhrase, use the following code:
+**
+** Fts5PhraseIter iter;
+** int iCol, iOff;
+** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
+** iCol>=0;
+** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
+** ){
+** // An instance of phrase iPhrase at offset iOff of column iCol
+** }
+**
+** The Fts5PhraseIter structure is defined above. Applications should not
+** modify this structure directly - it should only be used as shown above
+** with the xPhraseFirst() and xPhraseNext() API methods (and by
+** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always iterates
+** through an empty set (all calls to xPhraseFirst() set iCol to -1).
+**
+** xPhraseNext()
+** See xPhraseFirst above.
+**
+** xPhraseFirstColumn()
+** This function and xPhraseNextColumn() are similar to the xPhraseFirst()
+** and xPhraseNext() APIs described above. The difference is that instead
+** of iterating through all instances of a phrase in the current row, these
+** APIs are used to iterate through the set of columns in the current row
+** that contain one or more instances of a specified phrase. For example:
+**
+** Fts5PhraseIter iter;
+** int iCol;
+** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
+** iCol>=0;
+** pApi->xPhraseNextColumn(pFts, &iter, &iCol)
+** ){
+** // Column iCol contains at least one instance of phrase iPhrase
+** }
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" option. If the FTS5 table is created with either
+** "detail=none" "content=" option (i.e. if it is a contentless table),
+** then this API always iterates through an empty set (all calls to
+** xPhraseFirstColumn() set iCol to -1).
+**
+** The information accessed using this API and its companion
+** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
+** (or xInst/xInstCount). The chief advantage of this API is that it is
+** significantly more efficient than those alternatives when used with
+** "detail=column" tables.
+**
+** xPhraseNextColumn()
+** See xPhraseFirstColumn above.
+*/
+struct Fts5ExtensionApi {
+ int iVersion; /* Currently always set to 3 */
+
+ void *(*xUserData)(Fts5Context*);
+
+ int (*xColumnCount)(Fts5Context*);
+ int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
+ int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
+
+ int (*xTokenize)(Fts5Context*,
+ const char *pText, int nText, /* Text to tokenize */
+ void *pCtx, /* Context passed to xToken() */
+ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
+ );
+
+ int (*xPhraseCount)(Fts5Context*);
+ int (*xPhraseSize)(Fts5Context*, int iPhrase);
+
+ int (*xInstCount)(Fts5Context*, int *pnInst);
+ int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
+
+ sqlite3_int64 (*xRowid)(Fts5Context*);
+ int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
+ int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
+
+ int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
+ int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
+ );
+ int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
+ void *(*xGetAuxdata)(Fts5Context*, int bClear);
+
+ int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
+ void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
+
+ int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
+ void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+};
+
+/*
+** CUSTOM AUXILIARY FUNCTIONS
+*************************************************************************/
+
+/*************************************************************************
+** CUSTOM TOKENIZERS
+**
+** Applications may also register custom tokenizer types. A tokenizer
+** is registered by providing fts5 with a populated instance of the
+** following structure. All structure methods must be defined, setting
+** any member of the fts5_tokenizer struct to NULL leads to undefined
+** behaviour. The structure methods are expected to function as follows:
+**
+** xCreate:
+** This function is used to allocate and initialize a tokenizer instance.
+** A tokenizer instance is required to actually tokenize text.
+**
+** The first argument passed to this function is a copy of the (void*)
+** pointer provided by the application when the fts5_tokenizer object
+** was registered with FTS5 (the third argument to xCreateTokenizer()).
+** The second and third arguments are an array of nul-terminated strings
+** containing the tokenizer arguments, if any, specified following the
+** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
+** to create the FTS5 table.
+**
+** The final argument is an output variable. If successful, (*ppOut)
+** should be set to point to the new tokenizer handle and SQLITE_OK
+** returned. If an error occurs, some value other than SQLITE_OK should
+** be returned. In this case, fts5 assumes that the final value of *ppOut
+** is undefined.
+**
+** xDelete:
+** This function is invoked to delete a tokenizer handle previously
+** allocated using xCreate(). Fts5 guarantees that this function will
+** be invoked exactly once for each successful call to xCreate().
+**
+** xTokenize:
+** This function is expected to tokenize the nText byte string indicated
+** by argument pText. pText may or may not be nul-terminated. The first
+** argument passed to this function is a pointer to an Fts5Tokenizer object
+** returned by an earlier call to xCreate().
+**
+** The second argument indicates the reason that FTS5 is requesting
+** tokenization of the supplied text. This is always one of the following
+** four values:
+**
+** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
+** or removed from the FTS table. The tokenizer is being invoked to
+** determine the set of tokens to add to (or delete from) the
+** FTS index.
+**
+** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
+** against the FTS index. The tokenizer is being called to tokenize
+** a bareword or quoted string specified as part of the query.
+**
+** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
+** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
+** followed by a "*" character, indicating that the last token
+** returned by the tokenizer will be treated as a token prefix.
+**
+** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
+** satisfy an fts5_api.xTokenize() request made by an auxiliary
+** function. Or an fts5_api.xColumnSize() request made by the same
+** on a columnsize=0 database.
+** </ul>
+**
+** For each token in the input string, the supplied callback xToken() must
+** be invoked. The first argument to it should be a copy of the pointer
+** passed as the second argument to xTokenize(). The third and fourth
+** arguments are a pointer to a buffer containing the token text, and the
+** size of the token in bytes. The 4th and 5th arguments are the byte offsets
+** of the first byte of and first byte immediately following the text from
+** which the token is derived within the input.
+**
+** The second argument passed to the xToken() callback ("tflags") should
+** normally be set to 0. The exception is if the tokenizer supports
+** synonyms. In this case see the discussion below for details.
+**
+** FTS5 assumes the xToken() callback is invoked for each token in the
+** order that they occur within the input text.
+**
+** If an xToken() callback returns any value other than SQLITE_OK, then
+** the tokenization should be abandoned and the xTokenize() method should
+** immediately return a copy of the xToken() return value. Or, if the
+** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
+** if an error occurs with the xTokenize() implementation itself, it
+** may abandon the tokenization and return any error code other than
+** SQLITE_OK or SQLITE_DONE.
+**
+** SYNONYM SUPPORT
+**
+** Custom tokenizers may also support synonyms. Consider a case in which a
+** user wishes to query for a phrase such as "first place". Using the
+** built-in tokenizers, the FTS5 query 'first + place' will match instances
+** of "first place" within the document set, but not alternative forms
+** such as "1st place". In some applications, it would be better to match
+** all instances of "first place" or "1st place" regardless of which form
+** the user specified in the MATCH query text.
+**
+** There are several ways to approach this in FTS5:
+**
+** <ol><li> By mapping all synonyms to a single token. In this case, using
+** the above example, this means that the tokenizer returns the
+** same token for inputs "first" and "1st". Say that token is in
+** fact "first", so that when the user inserts the document "I won
+** 1st place" entries are added to the index for tokens "i", "won",
+** "first" and "place". If the user then queries for '1st + place',
+** the tokenizer substitutes "first" for "1st" and the query works
+** as expected.
+**
+** <li> By querying the index for all synonyms of each query term
+** separately. In this case, when tokenizing query text, the
+** tokenizer may provide multiple synonyms for a single term
+** within the document. FTS5 then queries the index for each
+** synonym individually. For example, faced with the query:
+**
+** <codeblock>
+** ... MATCH 'first place'</codeblock>
+**
+** the tokenizer offers both "1st" and "first" as synonyms for the
+** first token in the MATCH query and FTS5 effectively runs a query
+** similar to:
+**
+** <codeblock>
+** ... MATCH '(first OR 1st) place'</codeblock>
+**
+** except that, for the purposes of auxiliary functions, the query
+** still appears to contain just two phrases - "(first OR 1st)"
+** being treated as a single phrase.
+**
+** <li> By adding multiple synonyms for a single term to the FTS index.
+** Using this method, when tokenizing document text, the tokenizer
+** provides multiple synonyms for each token. So that when a
+** document such as "I won first place" is tokenized, entries are
+** added to the FTS index for "i", "won", "first", "1st" and
+** "place".
+**
+** This way, even if the tokenizer does not provide synonyms
+** when tokenizing query text (it should not - to do so would be
+** inefficient), it doesn't matter if the user queries for
+** 'first + place' or '1st + place', as there are entries in the
+** FTS index corresponding to both forms of the first token.
+** </ol>
+**
+** Whether it is parsing document or query text, any call to xToken that
+** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
+** is considered to supply a synonym for the previous token. For example,
+** when parsing the document "I won first place", a tokenizer that supports
+** synonyms would call xToken() 5 times, as follows:
+**
+** <codeblock>
+** xToken(pCtx, 0, "i", 1, 0, 1);
+** xToken(pCtx, 0, "won", 3, 2, 5);
+** xToken(pCtx, 0, "first", 5, 6, 11);
+** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
+** xToken(pCtx, 0, "place", 5, 12, 17);
+**</codeblock>
+**
+** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
+** xToken() is called. Multiple synonyms may be specified for a single token
+** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
+** There is no limit to the number of synonyms that may be provided for a
+** single token.
+**
+** In many cases, method (1) above is the best approach. It does not add
+** extra data to the FTS index or require FTS5 to query for multiple terms,
+** so it is efficient in terms of disk space and query speed. However, it
+** does not support prefix queries very well. If, as suggested above, the
+** token "first" is substituted for "1st" by the tokenizer, then the query:
+**
+** <codeblock>
+** ... MATCH '1s*'</codeblock>
+**
+** will not match documents that contain the token "1st" (as the tokenizer
+** will probably not map "1s" to any prefix of "first").
+**
+** For full prefix support, method (3) may be preferred. In this case,
+** because the index contains entries for both "first" and "1st", prefix
+** queries such as 'fi*' or '1s*' will match correctly. However, because
+** extra entries are added to the FTS index, this method uses more space
+** within the database.
+**
+** Method (2) offers a midpoint between (1) and (3). Using this method,
+** a query such as '1s*' will match documents that contain the literal
+** token "1st", but not "first" (assuming the tokenizer is not able to
+** provide synonyms for prefixes). However, a non-prefix query like '1st'
+** will match against "1st" and "first". This method does not require
+** extra disk space, as no extra entries are added to the FTS index.
+** On the other hand, it may require more CPU cycles to run MATCH queries,
+** as separate queries of the FTS index are required for each synonym.
+**
+** When using methods (2) or (3), it is important that the tokenizer only
+** provide synonyms when tokenizing document text (method (2)) or query
+** text (method (3)), not both. Doing so will not cause any errors, but is
+** inefficient.
+*/
+typedef struct Fts5Tokenizer Fts5Tokenizer;
+typedef struct fts5_tokenizer fts5_tokenizer;
+struct fts5_tokenizer {
+ int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
+ void (*xDelete)(Fts5Tokenizer*);
+ int (*xTokenize)(Fts5Tokenizer*,
+ void *pCtx,
+ int flags, /* Mask of FTS5_TOKENIZE_* flags */
+ const char *pText, int nText,
+ int (*xToken)(
+ void *pCtx, /* Copy of 2nd argument to xTokenize() */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Pointer to buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStart, /* Byte offset of token within input text */
+ int iEnd /* Byte offset of end of token within input text */
+ )
+ );
+};
+
+/* Flags that may be passed as the third argument to xTokenize() */
+#define FTS5_TOKENIZE_QUERY 0x0001
+#define FTS5_TOKENIZE_PREFIX 0x0002
+#define FTS5_TOKENIZE_DOCUMENT 0x0004
+#define FTS5_TOKENIZE_AUX 0x0008
+
+/* Flags that may be passed by the tokenizer implementation back to FTS5
+** as the third argument to the supplied xToken callback. */
+#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
+
+/*
+** END OF CUSTOM TOKENIZERS
+*************************************************************************/
+
+/*************************************************************************
+** FTS5 EXTENSION REGISTRATION API
+*/
+typedef struct fts5_api fts5_api;
+struct fts5_api {
+ int iVersion; /* Currently always set to 2 */
+
+ /* Create a new tokenizer */
+ int (*xCreateTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_tokenizer *pTokenizer,
+ void (*xDestroy)(void*)
+ );
+
+ /* Find an existing tokenizer */
+ int (*xFindTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void **ppContext,
+ fts5_tokenizer *pTokenizer
+ );
+
+ /* Create a new auxiliary function */
+ int (*xCreateFunction)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_extension_function xFunction,
+ void (*xDestroy)(void*)
+ );
+};
+
+/*
+** END OF REGISTRATION API
+*************************************************************************/
+
+#if 0
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* _FTS5_H */
+
+/******** End of fts5.h *********/
+
+/************** End of sqlite3.h *********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+
+/*
+** Include the configuration header output by 'configure' if we're using the
+** autoconf-based build
+*/
+#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H)
+/* #include "config.h" */
+#define SQLITECONFIG_H 1
+#endif
+
+/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/
+/************** Begin file sqliteLimit.h *************************************/
+/*
+** 2007 May 7
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file defines various limits of what SQLite can process.
+*/
+
+/*
+** The maximum length of a TEXT or BLOB in bytes. This also
+** limits the size of a row in a table or index.
+**
+** The hard limit is the ability of a 32-bit signed integer
+** to count the size: 2^31-1 or 2147483647.
+*/
+#ifndef SQLITE_MAX_LENGTH
+# define SQLITE_MAX_LENGTH 1000000000
+#endif
+
+/*
+** This is the maximum number of
+**
+** * Columns in a table
+** * Columns in an index
+** * Columns in a view
+** * Terms in the SET clause of an UPDATE statement
+** * Terms in the result set of a SELECT statement
+** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement.
+** * Terms in the VALUES clause of an INSERT statement
+**
+** The hard upper limit here is 32676. Most database people will
+** tell you that in a well-normalized database, you usually should
+** not have more than a dozen or so columns in any table. And if
+** that is the case, there is no point in having more than a few
+** dozen values in any of the other situations described above.
+*/
+#ifndef SQLITE_MAX_COLUMN
+# define SQLITE_MAX_COLUMN 2000
+#endif
+
+/*
+** The maximum length of a single SQL statement in bytes.
+**
+** It used to be the case that setting this value to zero would
+** turn the limit off. That is no longer true. It is not possible
+** to turn this limit off.
+*/
+#ifndef SQLITE_MAX_SQL_LENGTH
+# define SQLITE_MAX_SQL_LENGTH 1000000000
+#endif
+
+/*
+** The maximum depth of an expression tree. This is limited to
+** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might
+** want to place more severe limits on the complexity of an
+** expression.
+**
+** A value of 0 used to mean that the limit was not enforced.
+** But that is no longer true. The limit is now strictly enforced
+** at all times.
+*/
+#ifndef SQLITE_MAX_EXPR_DEPTH
+# define SQLITE_MAX_EXPR_DEPTH 1000
+#endif
+
+/*
+** The maximum number of terms in a compound SELECT statement.
+** The code generator for compound SELECT statements does one
+** level of recursion for each term. A stack overflow can result
+** if the number of terms is too large. In practice, most SQL
+** never has more than 3 or 4 terms. Use a value of 0 to disable
+** any limit on the number of terms in a compount SELECT.
+*/
+#ifndef SQLITE_MAX_COMPOUND_SELECT
+# define SQLITE_MAX_COMPOUND_SELECT 500
+#endif
+
+/*
+** The maximum number of opcodes in a VDBE program.
+** Not currently enforced.
+*/
+#ifndef SQLITE_MAX_VDBE_OP
+# define SQLITE_MAX_VDBE_OP 250000000
+#endif
+
+/*
+** The maximum number of arguments to an SQL function.
+*/
+#ifndef SQLITE_MAX_FUNCTION_ARG
+# define SQLITE_MAX_FUNCTION_ARG 127
+#endif
+
+/*
+** The suggested maximum number of in-memory pages to use for
+** the main database table and for temporary tables.
+**
+** IMPLEMENTATION-OF: R-30185-15359 The default suggested cache size is -2000,
+** which means the cache size is limited to 2048000 bytes of memory.
+** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be
+** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options.
+*/
+#ifndef SQLITE_DEFAULT_CACHE_SIZE
+# define SQLITE_DEFAULT_CACHE_SIZE -2000
+#endif
+
+/*
+** The default number of frames to accumulate in the log file before
+** checkpointing the database in WAL mode.
+*/
+#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT
+# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000
+#endif
+
+/*
+** The maximum number of attached databases. This must be between 0
+** and 125. The upper bound of 125 is because the attached databases are
+** counted using a signed 8-bit integer which has a maximum value of 127
+** and we have to allow 2 extra counts for the "main" and "temp" databases.
+*/
+#ifndef SQLITE_MAX_ATTACHED
+# define SQLITE_MAX_ATTACHED 10
+#endif
+
+
+/*
+** The maximum value of a ?nnn wildcard that the parser will accept.
+** If the value exceeds 32767 then extra space is required for the Expr
+** structure. But otherwise, we believe that the number can be as large
+** as a signed 32-bit integer can hold.
+*/
+#ifndef SQLITE_MAX_VARIABLE_NUMBER
+# define SQLITE_MAX_VARIABLE_NUMBER 32766
+#endif
+
+/* Maximum page size. The upper bound on this value is 65536. This a limit
+** imposed by the use of 16-bit offsets within each page.
+**
+** Earlier versions of SQLite allowed the user to change this value at
+** compile time. This is no longer permitted, on the grounds that it creates
+** a library that is technically incompatible with an SQLite library
+** compiled with a different limit. If a process operating on a database
+** with a page-size of 65536 bytes crashes, then an instance of SQLite
+** compiled with the default page-size limit will not be able to rollback
+** the aborted transaction. This could lead to database corruption.
+*/
+#ifdef SQLITE_MAX_PAGE_SIZE
+# undef SQLITE_MAX_PAGE_SIZE
+#endif
+#define SQLITE_MAX_PAGE_SIZE 65536
+
+
+/*
+** The default size of a database page.
+*/
+#ifndef SQLITE_DEFAULT_PAGE_SIZE
+# define SQLITE_DEFAULT_PAGE_SIZE 4096
+#endif
+#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE
+# undef SQLITE_DEFAULT_PAGE_SIZE
+# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE
+#endif
+
+/*
+** Ordinarily, if no value is explicitly provided, SQLite creates databases
+** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain
+** device characteristics (sector-size and atomic write() support),
+** SQLite may choose a larger value. This constant is the maximum value
+** SQLite will choose on its own.
+*/
+#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE
+# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192
+#endif
+#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE
+# undef SQLITE_MAX_DEFAULT_PAGE_SIZE
+# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE
+#endif
+
+
+/*
+** Maximum number of pages in one database file.
+**
+** This is really just the default value for the max_page_count pragma.
+** This value can be lowered (or raised) at run-time using that the
+** max_page_count macro.
+*/
+#ifndef SQLITE_MAX_PAGE_COUNT
+# define SQLITE_MAX_PAGE_COUNT 1073741823
+#endif
+
+/*
+** Maximum length (in bytes) of the pattern in a LIKE or GLOB
+** operator.
+*/
+#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH
+# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000
+#endif
+
+/*
+** Maximum depth of recursion for triggers.
+**
+** A value of 1 means that a trigger program will not be able to itself
+** fire any triggers. A value of 0 means that no trigger programs at all
+** may be executed.
+*/
+#ifndef SQLITE_MAX_TRIGGER_DEPTH
+# define SQLITE_MAX_TRIGGER_DEPTH 1000
+#endif
+
+/************** End of sqliteLimit.h *****************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+
+/* Disable nuisance warnings on Borland compilers */
+#if defined(__BORLANDC__)
+#pragma warn -rch /* unreachable code */
+#pragma warn -ccc /* Condition is always true or false */
+#pragma warn -aus /* Assigned value is never used */
+#pragma warn -csu /* Comparing signed and unsigned */
+#pragma warn -spa /* Suspicious pointer arithmetic */
+#endif
+
+/*
+** WAL mode depends on atomic aligned 32-bit loads and stores in a few
+** places. The following macros try to make this explicit.
+*/
+#ifndef __has_extension
+# define __has_extension(x) 0 /* compatibility with non-clang compilers */
+#endif
+#if GCC_VERSION>=4007000 || __has_extension(c_atomic)
+# define AtomicLoad(PTR) __atomic_load_n((PTR),__ATOMIC_RELAXED)
+# define AtomicStore(PTR,VAL) __atomic_store_n((PTR),(VAL),__ATOMIC_RELAXED)
+#else
+# define AtomicLoad(PTR) (*(PTR))
+# define AtomicStore(PTR,VAL) (*(PTR) = (VAL))
+#endif
+
+/*
+** Include standard header files as necessary
+*/
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+/*
+** The following macros are used to cast pointers to integers and
+** integers to pointers. The way you do this varies from one compiler
+** to the next, so we have developed the following set of #if statements
+** to generate appropriate macros for a wide range of compilers.
+**
+** The correct "ANSI" way to do this is to use the intptr_t type.
+** Unfortunately, that typedef is not available on all compilers, or
+** if it is available, it requires an #include of specific headers
+** that vary from one machine to the next.
+**
+** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on
+** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)).
+** So we have to define the macros in different ways depending on the
+** compiler.
+*/
+#if defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */
+# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X))
+# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X))
+#elif defined(__PTRDIFF_TYPE__) /* This case should work for GCC */
+# define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X))
+# define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X))
+#elif !defined(__GNUC__) /* Works for compilers other than LLVM */
+# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X])
+# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0))
+#else /* Generates a warning - but it always works */
+# define SQLITE_INT_TO_PTR(X) ((void*)(X))
+# define SQLITE_PTR_TO_INT(X) ((int)(X))
+#endif
+
+/*
+** A macro to hint to the compiler that a function should not be
+** inlined.
+*/
+#if defined(__GNUC__)
+# define SQLITE_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER) && _MSC_VER>=1310
+# define SQLITE_NOINLINE __declspec(noinline)
+#else
+# define SQLITE_NOINLINE
+#endif
+
+/*
+** Make sure that the compiler intrinsics we desire are enabled when
+** compiling with an appropriate version of MSVC unless prevented by
+** the SQLITE_DISABLE_INTRINSIC define.
+*/
+#if !defined(SQLITE_DISABLE_INTRINSIC)
+# if defined(_MSC_VER) && _MSC_VER>=1400
+# if !defined(_WIN32_WCE)
+# include <intrin.h>
+# pragma intrinsic(_byteswap_ushort)
+# pragma intrinsic(_byteswap_ulong)
+# pragma intrinsic(_byteswap_uint64)
+# pragma intrinsic(_ReadWriteBarrier)
+# else
+# include <cmnintrin.h>
+# endif
+# endif
+#endif
+
+/*
+** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
+** 0 means mutexes are permanently disable and the library is never
+** threadsafe. 1 means the library is serialized which is the highest
+** level of threadsafety. 2 means the library is multithreaded - multiple
+** threads can use SQLite as long as no two threads try to use the same
+** database connection at the same time.
+**
+** Older versions of SQLite used an optional THREADSAFE macro.
+** We support that for legacy.
+**
+** To ensure that the correct value of "THREADSAFE" is reported when querying
+** for compile-time options at runtime (e.g. "PRAGMA compile_options"), this
+** logic is partially replicated in ctime.c. If it is updated here, it should
+** also be updated there.
+*/
+#if !defined(SQLITE_THREADSAFE)
+# if defined(THREADSAFE)
+# define SQLITE_THREADSAFE THREADSAFE
+# else
+# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */
+# endif
+#endif
+
+/*
+** Powersafe overwrite is on by default. But can be turned off using
+** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option.
+*/
+#ifndef SQLITE_POWERSAFE_OVERWRITE
+# define SQLITE_POWERSAFE_OVERWRITE 1
+#endif
+
+/*
+** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by
+** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in
+** which case memory allocation statistics are disabled by default.
+*/
+#if !defined(SQLITE_DEFAULT_MEMSTATUS)
+# define SQLITE_DEFAULT_MEMSTATUS 1
+#endif
+
+/*
+** Exactly one of the following macros must be defined in order to
+** specify which memory allocation subsystem to use.
+**
+** SQLITE_SYSTEM_MALLOC // Use normal system malloc()
+** SQLITE_WIN32_MALLOC // Use Win32 native heap API
+** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails
+** SQLITE_MEMDEBUG // Debugging version of system malloc()
+**
+** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the
+** assert() macro is enabled, each call into the Win32 native heap subsystem
+** will cause HeapValidate to be called. If heap validation should fail, an
+** assertion will be triggered.
+**
+** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as
+** the default.
+*/
+#if defined(SQLITE_SYSTEM_MALLOC) \
+ + defined(SQLITE_WIN32_MALLOC) \
+ + defined(SQLITE_ZERO_MALLOC) \
+ + defined(SQLITE_MEMDEBUG)>1
+# error "Two or more of the following compile-time configuration options\
+ are defined but at most one is allowed:\
+ SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG,\
+ SQLITE_ZERO_MALLOC"
+#endif
+#if defined(SQLITE_SYSTEM_MALLOC) \
+ + defined(SQLITE_WIN32_MALLOC) \
+ + defined(SQLITE_ZERO_MALLOC) \
+ + defined(SQLITE_MEMDEBUG)==0
+# define SQLITE_SYSTEM_MALLOC 1
+#endif
+
+/*
+** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the
+** sizes of memory allocations below this value where possible.
+*/
+#if !defined(SQLITE_MALLOC_SOFT_LIMIT)
+# define SQLITE_MALLOC_SOFT_LIMIT 1024
+#endif
+
+/*
+** We need to define _XOPEN_SOURCE as follows in order to enable
+** recursive mutexes on most Unix systems and fchmod() on OpenBSD.
+** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit
+** it.
+*/
+#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__)
+# define _XOPEN_SOURCE 600
+#endif
+
+/*
+** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that
+** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true,
+** make it true by defining or undefining NDEBUG.
+**
+** Setting NDEBUG makes the code smaller and faster by disabling the
+** assert() statements in the code. So we want the default action
+** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG
+** is set. Thus NDEBUG becomes an opt-in rather than an opt-out
+** feature.
+*/
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+#if defined(NDEBUG) && defined(SQLITE_DEBUG)
+# undef NDEBUG
+#endif
+
+/*
+** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on.
+*/
+#if !defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) && defined(SQLITE_DEBUG)
+# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1
+#endif
+
+/*
+** The testcase() macro is used to aid in coverage testing. When
+** doing coverage testing, the condition inside the argument to
+** testcase() must be evaluated both true and false in order to
+** get full branch coverage. The testcase() macro is inserted
+** to help ensure adequate test coverage in places where simple
+** condition/decision coverage is inadequate. For example, testcase()
+** can be used to make sure boundary values are tested. For
+** bitmask tests, testcase() can be used to make sure each bit
+** is significant and used at least once. On switch statements
+** where multiple cases go to the same block of code, testcase()
+** can insure that all cases are evaluated.
+**
+*/
+#ifdef SQLITE_COVERAGE_TEST
+SQLITE_PRIVATE void sqlite3Coverage(int);
+# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); }
+#else
+# define testcase(X)
+#endif
+
+/*
+** The TESTONLY macro is used to enclose variable declarations or
+** other bits of code that are needed to support the arguments
+** within testcase() and assert() macros.
+*/
+#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST)
+# define TESTONLY(X) X
+#else
+# define TESTONLY(X)
+#endif
+
+/*
+** Sometimes we need a small amount of code such as a variable initialization
+** to setup for a later assert() statement. We do not want this code to
+** appear when assert() is disabled. The following macro is therefore
+** used to contain that setup code. The "VVA" acronym stands for
+** "Verification, Validation, and Accreditation". In other words, the
+** code within VVA_ONLY() will only run during verification processes.
+*/
+#ifndef NDEBUG
+# define VVA_ONLY(X) X
+#else
+# define VVA_ONLY(X)
+#endif
+
+/*
+** The ALWAYS and NEVER macros surround boolean expressions which
+** are intended to always be true or false, respectively. Such
+** expressions could be omitted from the code completely. But they
+** are included in a few cases in order to enhance the resilience
+** of SQLite to unexpected behavior - to make the code "self-healing"
+** or "ductile" rather than being "brittle" and crashing at the first
+** hint of unplanned behavior.
+**
+** In other words, ALWAYS and NEVER are added for defensive code.
+**
+** When doing coverage testing ALWAYS and NEVER are hard-coded to
+** be true and false so that the unreachable code they specify will
+** not be counted as untested code.
+*/
+#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST)
+# define ALWAYS(X) (1)
+# define NEVER(X) (0)
+#elif !defined(NDEBUG)
+# define ALWAYS(X) ((X)?1:(assert(0),0))
+# define NEVER(X) ((X)?(assert(0),1):0)
+#else
+# define ALWAYS(X) (X)
+# define NEVER(X) (X)
+#endif
+
+/*
+** The harmless(X) macro indicates that expression X is usually false
+** but can be true without causing any problems, but we don't know of
+** any way to cause X to be true.
+**
+** In debugging and testing builds, this macro will abort if X is ever
+** true. In this way, developers are alerted to a possible test case
+** that causes X to be true. If a harmless macro ever fails, that is
+** an opportunity to change the macro into a testcase() and add a new
+** test case to the test suite.
+**
+** For normal production builds, harmless(X) is a no-op, since it does
+** not matter whether expression X is true or false.
+*/
+#ifdef SQLITE_DEBUG
+# define harmless(X) assert(!(X));
+#else
+# define harmless(X)
+#endif
+
+/*
+** Some conditionals are optimizations only. In other words, if the
+** conditionals are replaced with a constant 1 (true) or 0 (false) then
+** the correct answer is still obtained, though perhaps not as quickly.
+**
+** The following macros mark these optimizations conditionals.
+*/
+#if defined(SQLITE_MUTATION_TEST)
+# define OK_IF_ALWAYS_TRUE(X) (1)
+# define OK_IF_ALWAYS_FALSE(X) (0)
+#else
+# define OK_IF_ALWAYS_TRUE(X) (X)
+# define OK_IF_ALWAYS_FALSE(X) (X)
+#endif
+
+/*
+** Some malloc failures are only possible if SQLITE_TEST_REALLOC_STRESS is
+** defined. We need to defend against those failures when testing with
+** SQLITE_TEST_REALLOC_STRESS, but we don't want the unreachable branches
+** during a normal build. The following macro can be used to disable tests
+** that are always false except when SQLITE_TEST_REALLOC_STRESS is set.
+*/
+#if defined(SQLITE_TEST_REALLOC_STRESS)
+# define ONLY_IF_REALLOC_STRESS(X) (X)
+#elif !defined(NDEBUG)
+# define ONLY_IF_REALLOC_STRESS(X) ((X)?(assert(0),1):0)
+#else
+# define ONLY_IF_REALLOC_STRESS(X) (0)
+#endif
+
+/*
+** Declarations used for tracing the operating system interfaces.
+*/
+#if defined(SQLITE_FORCE_OS_TRACE) || defined(SQLITE_TEST) || \
+ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
+ extern int sqlite3OSTrace;
+# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X
+# define SQLITE_HAVE_OS_TRACE
+#else
+# define OSTRACE(X)
+# undef SQLITE_HAVE_OS_TRACE
+#endif
+
+/*
+** Is the sqlite3ErrName() function needed in the build? Currently,
+** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when
+** OSTRACE is enabled), and by several "test*.c" files (which are
+** compiled using SQLITE_TEST).
+*/
+#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \
+ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
+# define SQLITE_NEED_ERR_NAME
+#else
+# undef SQLITE_NEED_ERR_NAME
+#endif
+
+/*
+** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN
+*/
+#ifdef SQLITE_OMIT_EXPLAIN
+# undef SQLITE_ENABLE_EXPLAIN_COMMENTS
+#endif
+
+/*
+** Return true (non-zero) if the input is an integer that is too large
+** to fit in 32-bits. This macro is used inside of various testcase()
+** macros to verify that we have tested SQLite for large-file support.
+*/
+#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0)
+
+/*
+** The macro unlikely() is a hint that surrounds a boolean
+** expression that is usually false. Macro likely() surrounds
+** a boolean expression that is usually true. These hints could,
+** in theory, be used by the compiler to generate better code, but
+** currently they are just comments for human readers.
+*/
+#define likely(X) (X)
+#define unlikely(X) (X)
+
+/************** Include hash.h in the middle of sqliteInt.h ******************/
+/************** Begin file hash.h ********************************************/
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the header file for the generic hash-table implementation
+** used in SQLite.
+*/
+#ifndef SQLITE_HASH_H
+#define SQLITE_HASH_H
+
+/* Forward declarations of structures. */
+typedef struct Hash Hash;
+typedef struct HashElem HashElem;
+
+/* A complete hash table is an instance of the following structure.
+** The internals of this structure are intended to be opaque -- client
+** code should not attempt to access or modify the fields of this structure
+** directly. Change this structure only by using the routines below.
+** However, some of the "procedures" and "functions" for modifying and
+** accessing this structure are really macros, so we can't really make
+** this structure opaque.
+**
+** All elements of the hash table are on a single doubly-linked list.
+** Hash.first points to the head of this list.
+**
+** There are Hash.htsize buckets. Each bucket points to a spot in
+** the global doubly-linked list. The contents of the bucket are the
+** element pointed to plus the next _ht.count-1 elements in the list.
+**
+** Hash.htsize and Hash.ht may be zero. In that case lookup is done
+** by a linear search of the global list. For small tables, the
+** Hash.ht table is never allocated because if there are few elements
+** in the table, it is faster to do a linear search than to manage
+** the hash table.
+*/
+struct Hash {
+ unsigned int htsize; /* Number of buckets in the hash table */
+ unsigned int count; /* Number of entries in this table */
+ HashElem *first; /* The first element of the array */
+ struct _ht { /* the hash table */
+ unsigned int count; /* Number of entries with this hash */
+ HashElem *chain; /* Pointer to first entry with this hash */
+ } *ht;
+};
+
+/* Each element in the hash table is an instance of the following
+** structure. All elements are stored on a single doubly-linked list.
+**
+** Again, this structure is intended to be opaque, but it can't really
+** be opaque because it is used by macros.
+*/
+struct HashElem {
+ HashElem *next, *prev; /* Next and previous elements in the table */
+ void *data; /* Data associated with this element */
+ const char *pKey; /* Key associated with this element */
+};
+
+/*
+** Access routines. To delete, insert a NULL pointer.
+*/
+SQLITE_PRIVATE void sqlite3HashInit(Hash*);
+SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, void *pData);
+SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey);
+SQLITE_PRIVATE void sqlite3HashClear(Hash*);
+
+/*
+** Macros for looping over all elements of a hash table. The idiom is
+** like this:
+**
+** Hash h;
+** HashElem *p;
+** ...
+** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){
+** SomeStructure *pData = sqliteHashData(p);
+** // do something with pData
+** }
+*/
+#define sqliteHashFirst(H) ((H)->first)
+#define sqliteHashNext(E) ((E)->next)
+#define sqliteHashData(E) ((E)->data)
+/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */
+/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */
+
+/*
+** Number of entries in a hash table
+*/
+/* #define sqliteHashCount(H) ((H)->count) // NOT USED */
+
+#endif /* SQLITE_HASH_H */
+
+/************** End of hash.h ************************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include parse.h in the middle of sqliteInt.h *****************/
+/************** Begin file parse.h *******************************************/
+#define TK_SEMI 1
+#define TK_EXPLAIN 2
+#define TK_QUERY 3
+#define TK_PLAN 4
+#define TK_BEGIN 5
+#define TK_TRANSACTION 6
+#define TK_DEFERRED 7
+#define TK_IMMEDIATE 8
+#define TK_EXCLUSIVE 9
+#define TK_COMMIT 10
+#define TK_END 11
+#define TK_ROLLBACK 12
+#define TK_SAVEPOINT 13
+#define TK_RELEASE 14
+#define TK_TO 15
+#define TK_TABLE 16
+#define TK_CREATE 17
+#define TK_IF 18
+#define TK_NOT 19
+#define TK_EXISTS 20
+#define TK_TEMP 21
+#define TK_LP 22
+#define TK_RP 23
+#define TK_AS 24
+#define TK_WITHOUT 25
+#define TK_COMMA 26
+#define TK_ABORT 27
+#define TK_ACTION 28
+#define TK_AFTER 29
+#define TK_ANALYZE 30
+#define TK_ASC 31
+#define TK_ATTACH 32
+#define TK_BEFORE 33
+#define TK_BY 34
+#define TK_CASCADE 35
+#define TK_CAST 36
+#define TK_CONFLICT 37
+#define TK_DATABASE 38
+#define TK_DESC 39
+#define TK_DETACH 40
+#define TK_EACH 41
+#define TK_FAIL 42
+#define TK_OR 43
+#define TK_AND 44
+#define TK_IS 45
+#define TK_MATCH 46
+#define TK_LIKE_KW 47
+#define TK_BETWEEN 48
+#define TK_IN 49
+#define TK_ISNULL 50
+#define TK_NOTNULL 51
+#define TK_NE 52
+#define TK_EQ 53
+#define TK_GT 54
+#define TK_LE 55
+#define TK_LT 56
+#define TK_GE 57
+#define TK_ESCAPE 58
+#define TK_ID 59
+#define TK_COLUMNKW 60
+#define TK_DO 61
+#define TK_FOR 62
+#define TK_IGNORE 63
+#define TK_INITIALLY 64
+#define TK_INSTEAD 65
+#define TK_NO 66
+#define TK_KEY 67
+#define TK_OF 68
+#define TK_OFFSET 69
+#define TK_PRAGMA 70
+#define TK_RAISE 71
+#define TK_RECURSIVE 72
+#define TK_REPLACE 73
+#define TK_RESTRICT 74
+#define TK_ROW 75
+#define TK_ROWS 76
+#define TK_TRIGGER 77
+#define TK_VACUUM 78
+#define TK_VIEW 79
+#define TK_VIRTUAL 80
+#define TK_WITH 81
+#define TK_NULLS 82
+#define TK_FIRST 83
+#define TK_LAST 84
+#define TK_CURRENT 85
+#define TK_FOLLOWING 86
+#define TK_PARTITION 87
+#define TK_PRECEDING 88
+#define TK_RANGE 89
+#define TK_UNBOUNDED 90
+#define TK_EXCLUDE 91
+#define TK_GROUPS 92
+#define TK_OTHERS 93
+#define TK_TIES 94
+#define TK_GENERATED 95
+#define TK_ALWAYS 96
+#define TK_REINDEX 97
+#define TK_RENAME 98
+#define TK_CTIME_KW 99
+#define TK_ANY 100
+#define TK_BITAND 101
+#define TK_BITOR 102
+#define TK_LSHIFT 103
+#define TK_RSHIFT 104
+#define TK_PLUS 105
+#define TK_MINUS 106
+#define TK_STAR 107
+#define TK_SLASH 108
+#define TK_REM 109
+#define TK_CONCAT 110
+#define TK_COLLATE 111
+#define TK_BITNOT 112
+#define TK_ON 113
+#define TK_INDEXED 114
+#define TK_STRING 115
+#define TK_JOIN_KW 116
+#define TK_CONSTRAINT 117
+#define TK_DEFAULT 118
+#define TK_NULL 119
+#define TK_PRIMARY 120
+#define TK_UNIQUE 121
+#define TK_CHECK 122
+#define TK_REFERENCES 123
+#define TK_AUTOINCR 124
+#define TK_INSERT 125
+#define TK_DELETE 126
+#define TK_UPDATE 127
+#define TK_SET 128
+#define TK_DEFERRABLE 129
+#define TK_FOREIGN 130
+#define TK_DROP 131
+#define TK_UNION 132
+#define TK_ALL 133
+#define TK_EXCEPT 134
+#define TK_INTERSECT 135
+#define TK_SELECT 136
+#define TK_VALUES 137
+#define TK_DISTINCT 138
+#define TK_DOT 139
+#define TK_FROM 140
+#define TK_JOIN 141
+#define TK_USING 142
+#define TK_ORDER 143
+#define TK_GROUP 144
+#define TK_HAVING 145
+#define TK_LIMIT 146
+#define TK_WHERE 147
+#define TK_INTO 148
+#define TK_NOTHING 149
+#define TK_FLOAT 150
+#define TK_BLOB 151
+#define TK_INTEGER 152
+#define TK_VARIABLE 153
+#define TK_CASE 154
+#define TK_WHEN 155
+#define TK_THEN 156
+#define TK_ELSE 157
+#define TK_INDEX 158
+#define TK_ALTER 159
+#define TK_ADD 160
+#define TK_WINDOW 161
+#define TK_OVER 162
+#define TK_FILTER 163
+#define TK_COLUMN 164
+#define TK_AGG_FUNCTION 165
+#define TK_AGG_COLUMN 166
+#define TK_TRUEFALSE 167
+#define TK_ISNOT 168
+#define TK_FUNCTION 169
+#define TK_UMINUS 170
+#define TK_UPLUS 171
+#define TK_TRUTH 172
+#define TK_REGISTER 173
+#define TK_VECTOR 174
+#define TK_SELECT_COLUMN 175
+#define TK_IF_NULL_ROW 176
+#define TK_ASTERISK 177
+#define TK_SPAN 178
+#define TK_SPACE 179
+#define TK_ILLEGAL 180
+
+/************** End of parse.h ***********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <stddef.h>
+
+/*
+** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY.
+** This allows better measurements of where memcpy() is used when running
+** cachegrind. But this macro version of memcpy() is very slow so it
+** should not be used in production. This is a performance measurement
+** hack only.
+*/
+#ifdef SQLITE_INLINE_MEMCPY
+# define memcpy(D,S,N) {char*xxd=(char*)(D);const char*xxs=(const char*)(S);\
+ int xxn=(N);while(xxn-->0)*(xxd++)=*(xxs++);}
+#endif
+
+/*
+** If compiling for a processor that lacks floating point support,
+** substitute integer for floating-point
+*/
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# define double sqlite_int64
+# define float sqlite_int64
+# define LONGDOUBLE_TYPE sqlite_int64
+# ifndef SQLITE_BIG_DBL
+# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50)
+# endif
+# define SQLITE_OMIT_DATETIME_FUNCS 1
+# define SQLITE_OMIT_TRACE 1
+# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+# undef SQLITE_HAVE_ISNAN
+#endif
+#ifndef SQLITE_BIG_DBL
+# define SQLITE_BIG_DBL (1e99)
+#endif
+
+/*
+** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0
+** afterward. Having this macro allows us to cause the C compiler
+** to omit code used by TEMP tables without messy #ifndef statements.
+*/
+#ifdef SQLITE_OMIT_TEMPDB
+#define OMIT_TEMPDB 1
+#else
+#define OMIT_TEMPDB 0
+#endif
+
+/*
+** The "file format" number is an integer that is incremented whenever
+** the VDBE-level file format changes. The following macros define the
+** the default file format for new databases and the maximum file format
+** that the library can read.
+*/
+#define SQLITE_MAX_FILE_FORMAT 4
+#ifndef SQLITE_DEFAULT_FILE_FORMAT
+# define SQLITE_DEFAULT_FILE_FORMAT 4
+#endif
+
+/*
+** Determine whether triggers are recursive by default. This can be
+** changed at run-time using a pragma.
+*/
+#ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS
+# define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0
+#endif
+
+/*
+** Provide a default value for SQLITE_TEMP_STORE in case it is not specified
+** on the command-line
+*/
+#ifndef SQLITE_TEMP_STORE
+# define SQLITE_TEMP_STORE 1
+#endif
+
+/*
+** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if
+** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it
+** to zero.
+*/
+#if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0
+# undef SQLITE_MAX_WORKER_THREADS
+# define SQLITE_MAX_WORKER_THREADS 0
+#endif
+#ifndef SQLITE_MAX_WORKER_THREADS
+# define SQLITE_MAX_WORKER_THREADS 8
+#endif
+#ifndef SQLITE_DEFAULT_WORKER_THREADS
+# define SQLITE_DEFAULT_WORKER_THREADS 0
+#endif
+#if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS
+# undef SQLITE_MAX_WORKER_THREADS
+# define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS
+#endif
+
+/*
+** The default initial allocation for the pagecache when using separate
+** pagecaches for each database connection. A positive number is the
+** number of pages. A negative number N translations means that a buffer
+** of -1024*N bytes is allocated and used for as many pages as it will hold.
+**
+** The default value of "20" was choosen to minimize the run-time of the
+** speedtest1 test program with options: --shrink-memory --reprepare
+*/
+#ifndef SQLITE_DEFAULT_PCACHE_INITSZ
+# define SQLITE_DEFAULT_PCACHE_INITSZ 20
+#endif
+
+/*
+** Default value for the SQLITE_CONFIG_SORTERREF_SIZE option.
+*/
+#ifndef SQLITE_DEFAULT_SORTERREF_SIZE
+# define SQLITE_DEFAULT_SORTERREF_SIZE 0x7fffffff
+#endif
+
+/*
+** The compile-time options SQLITE_MMAP_READWRITE and
+** SQLITE_ENABLE_BATCH_ATOMIC_WRITE are not compatible with one another.
+** You must choose one or the other (or neither) but not both.
+*/
+#if defined(SQLITE_MMAP_READWRITE) && defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+#error Cannot use both SQLITE_MMAP_READWRITE and SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+#endif
+
+/*
+** GCC does not define the offsetof() macro so we'll have to do it
+** ourselves.
+*/
+#ifndef offsetof
+#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD))
+#endif
+
+/*
+** Macros to compute minimum and maximum of two numbers.
+*/
+#ifndef MIN
+# define MIN(A,B) ((A)<(B)?(A):(B))
+#endif
+#ifndef MAX
+# define MAX(A,B) ((A)>(B)?(A):(B))
+#endif
+
+/*
+** Swap two objects of type TYPE.
+*/
+#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
+
+/*
+** Check to see if this machine uses EBCDIC. (Yes, believe it or
+** not, there are still machines out there that use EBCDIC.)
+*/
+#if 'A' == '\301'
+# define SQLITE_EBCDIC 1
+#else
+# define SQLITE_ASCII 1
+#endif
+
+/*
+** Integers of known sizes. These typedefs might change for architectures
+** where the sizes very. Preprocessor macros are available so that the
+** types can be conveniently redefined at compile-type. Like this:
+**
+** cc '-DUINTPTR_TYPE=long long int' ...
+*/
+#ifndef UINT32_TYPE
+# ifdef HAVE_UINT32_T
+# define UINT32_TYPE uint32_t
+# else
+# define UINT32_TYPE unsigned int
+# endif
+#endif
+#ifndef UINT16_TYPE
+# ifdef HAVE_UINT16_T
+# define UINT16_TYPE uint16_t
+# else
+# define UINT16_TYPE unsigned short int
+# endif
+#endif
+#ifndef INT16_TYPE
+# ifdef HAVE_INT16_T
+# define INT16_TYPE int16_t
+# else
+# define INT16_TYPE short int
+# endif
+#endif
+#ifndef UINT8_TYPE
+# ifdef HAVE_UINT8_T
+# define UINT8_TYPE uint8_t
+# else
+# define UINT8_TYPE unsigned char
+# endif
+#endif
+#ifndef INT8_TYPE
+# ifdef HAVE_INT8_T
+# define INT8_TYPE int8_t
+# else
+# define INT8_TYPE signed char
+# endif
+#endif
+#ifndef LONGDOUBLE_TYPE
+# define LONGDOUBLE_TYPE long double
+#endif
+typedef sqlite_int64 i64; /* 8-byte signed integer */
+typedef sqlite_uint64 u64; /* 8-byte unsigned integer */
+typedef UINT32_TYPE u32; /* 4-byte unsigned integer */
+typedef UINT16_TYPE u16; /* 2-byte unsigned integer */
+typedef INT16_TYPE i16; /* 2-byte signed integer */
+typedef UINT8_TYPE u8; /* 1-byte unsigned integer */
+typedef INT8_TYPE i8; /* 1-byte signed integer */
+
+/*
+** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value
+** that can be stored in a u32 without loss of data. The value
+** is 0x00000000ffffffff. But because of quirks of some compilers, we
+** have to specify the value in the less intuitive manner shown:
+*/
+#define SQLITE_MAX_U32 ((((u64)1)<<32)-1)
+
+/*
+** The datatype used to store estimates of the number of rows in a
+** table or index. This is an unsigned integer type. For 99.9% of
+** the world, a 32-bit integer is sufficient. But a 64-bit integer
+** can be used at compile-time if desired.
+*/
+#ifdef SQLITE_64BIT_STATS
+ typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */
+#else
+ typedef u32 tRowcnt; /* 32-bit is the default */
+#endif
+
+/*
+** Estimated quantities used for query planning are stored as 16-bit
+** logarithms. For quantity X, the value stored is 10*log2(X). This
+** gives a possible range of values of approximately 1.0e986 to 1e-986.
+** But the allowed values are "grainy". Not every value is representable.
+** For example, quantities 16 and 17 are both represented by a LogEst
+** of 40. However, since LogEst quantities are suppose to be estimates,
+** not exact values, this imprecision is not a problem.
+**
+** "LogEst" is short for "Logarithmic Estimate".
+**
+** Examples:
+** 1 -> 0 20 -> 43 10000 -> 132
+** 2 -> 10 25 -> 46 25000 -> 146
+** 3 -> 16 100 -> 66 1000000 -> 199
+** 4 -> 20 1000 -> 99 1048576 -> 200
+** 10 -> 33 1024 -> 100 4294967296 -> 320
+**
+** The LogEst can be negative to indicate fractional values.
+** Examples:
+**
+** 0.5 -> -10 0.1 -> -33 0.0625 -> -40
+*/
+typedef INT16_TYPE LogEst;
+
+/*
+** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer
+*/
+#ifndef SQLITE_PTRSIZE
+# if defined(__SIZEOF_POINTER__)
+# define SQLITE_PTRSIZE __SIZEOF_POINTER__
+# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+ defined(_M_ARM) || defined(__arm__) || defined(__x86) || \
+ (defined(__TOS_AIX__) && !defined(__64BIT__))
+# define SQLITE_PTRSIZE 4
+# else
+# define SQLITE_PTRSIZE 8
+# endif
+#endif
+
+/* The uptr type is an unsigned integer large enough to hold a pointer
+*/
+#if defined(HAVE_STDINT_H)
+ typedef uintptr_t uptr;
+#elif SQLITE_PTRSIZE==4
+ typedef u32 uptr;
+#else
+ typedef u64 uptr;
+#endif
+
+/*
+** The SQLITE_WITHIN(P,S,E) macro checks to see if pointer P points to
+** something between S (inclusive) and E (exclusive).
+**
+** In other words, S is a buffer and E is a pointer to the first byte after
+** the end of buffer S. This macro returns true if P points to something
+** contained within the buffer S.
+*/
+#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
+
+
+/*
+** Macros to determine whether the machine is big or little endian,
+** and whether or not that determination is run-time or compile-time.
+**
+** For best performance, an attempt is made to guess at the byte-order
+** using C-preprocessor macros. If that is unsuccessful, or if
+** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined
+** at run-time.
+*/
+#ifndef SQLITE_BYTEORDER
+# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
+ defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ppc__) || \
+ defined(__ARMEB__) || defined(__AARCH64EB__)
+# define SQLITE_BYTEORDER 4321
+# else
+# define SQLITE_BYTEORDER 0
+# endif
+#endif
+#if SQLITE_BYTEORDER==4321
+# define SQLITE_BIGENDIAN 1
+# define SQLITE_LITTLEENDIAN 0
+# define SQLITE_UTF16NATIVE SQLITE_UTF16BE
+#elif SQLITE_BYTEORDER==1234
+# define SQLITE_BIGENDIAN 0
+# define SQLITE_LITTLEENDIAN 1
+# define SQLITE_UTF16NATIVE SQLITE_UTF16LE
+#else
+# ifdef SQLITE_AMALGAMATION
+ const int sqlite3one = 1;
+# else
+ extern const int sqlite3one;
+# endif
+# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0)
+# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1)
+# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE)
+#endif
+
+/*
+** Constants for the largest and smallest possible 64-bit signed integers.
+** These macros are designed to work correctly on both 32-bit and 64-bit
+** compilers.
+*/
+#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
+#define LARGEST_UINT64 (0xffffffff|(((u64)0xffffffff)<<32))
+#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64)
+
+/*
+** Round up a number to the next larger multiple of 8. This is used
+** to force 8-byte alignment on 64-bit architectures.
+*/
+#define ROUND8(x) (((x)+7)&~7)
+
+/*
+** Round down to the nearest multiple of 8
+*/
+#define ROUNDDOWN8(x) ((x)&~7)
+
+/*
+** Assert that the pointer X is aligned to an 8-byte boundary. This
+** macro is used only within assert() to verify that the code gets
+** all alignment restrictions correct.
+**
+** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the
+** underlying malloc() implementation might return us 4-byte aligned
+** pointers. In that case, only verify 4-byte alignment.
+*/
+#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
+# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0)
+#else
+# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0)
+#endif
+
+/*
+** Disable MMAP on platforms where it is known to not work
+*/
+#if defined(__OpenBSD__) || defined(__QNXNTO__)
+# undef SQLITE_MAX_MMAP_SIZE
+# define SQLITE_MAX_MMAP_SIZE 0
+#endif
+
+/*
+** Default maximum size of memory used by memory-mapped I/O in the VFS
+*/
+#ifdef __APPLE__
+# include <TargetConditionals.h>
+#endif
+#ifndef SQLITE_MAX_MMAP_SIZE
+# if defined(__linux__) \
+ || defined(_WIN32) \
+ || (defined(__APPLE__) && defined(__MACH__)) \
+ || defined(__sun) \
+ || defined(__FreeBSD__) \
+ || defined(__DragonFly__)
+# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */
+# else
+# define SQLITE_MAX_MMAP_SIZE 0
+# endif
+#endif
+
+/*
+** The default MMAP_SIZE is zero on all platforms. Or, even if a larger
+** default MMAP_SIZE is specified at compile-time, make sure that it does
+** not exceed the maximum mmap size.
+*/
+#ifndef SQLITE_DEFAULT_MMAP_SIZE
+# define SQLITE_DEFAULT_MMAP_SIZE 0
+#endif
+#if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE
+# undef SQLITE_DEFAULT_MMAP_SIZE
+# define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE
+#endif
+
+/*
+** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not
+** the Select query generator tracing logic is turned on.
+*/
+#if defined(SQLITE_ENABLE_SELECTTRACE)
+# define SELECTTRACE_ENABLED 1
+#else
+# define SELECTTRACE_ENABLED 0
+#endif
+#if defined(SQLITE_ENABLE_SELECTTRACE)
+# define SELECTTRACE_ENABLED 1
+# define SELECTTRACE(K,P,S,X) \
+ if(sqlite3_unsupported_selecttrace&(K)) \
+ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\
+ sqlite3DebugPrintf X
+#else
+# define SELECTTRACE(K,P,S,X)
+# define SELECTTRACE_ENABLED 0
+#endif
+
+/*
+** An instance of the following structure is used to store the busy-handler
+** callback for a given sqlite handle.
+**
+** The sqlite.busyHandler member of the sqlite struct contains the busy
+** callback for the database handle. Each pager opened via the sqlite
+** handle is passed a pointer to sqlite.busyHandler. The busy-handler
+** callback is currently invoked only from within pager.c.
+*/
+typedef struct BusyHandler BusyHandler;
+struct BusyHandler {
+ int (*xBusyHandler)(void *,int); /* The busy callback */
+ void *pBusyArg; /* First arg to busy callback */
+ int nBusy; /* Incremented with each busy call */
+};
+
+/*
+** Name of table that holds the database schema.
+*/
+#define DFLT_SCHEMA_TABLE "sqlite_master"
+#define DFLT_TEMP_SCHEMA_TABLE "sqlite_temp_master"
+#define ALT_SCHEMA_TABLE "sqlite_schema"
+#define ALT_TEMP_SCHEMA_TABLE "sqlite_temp_schema"
+
+
+/*
+** The root-page of the schema table.
+*/
+#define SCHEMA_ROOT 1
+
+/*
+** The name of the schema table. The name is different for TEMP.
+*/
+#define SCHEMA_TABLE(x) \
+ ((!OMIT_TEMPDB)&&(x==1)?DFLT_TEMP_SCHEMA_TABLE:DFLT_SCHEMA_TABLE)
+
+/*
+** A convenience macro that returns the number of elements in
+** an array.
+*/
+#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0])))
+
+/*
+** Determine if the argument is a power of two
+*/
+#define IsPowerOfTwo(X) (((X)&((X)-1))==0)
+
+/*
+** The following value as a destructor means to use sqlite3DbFree().
+** The sqlite3DbFree() routine requires two parameters instead of the
+** one parameter that destructors normally want. So we have to introduce
+** this magic value that the code knows to handle differently. Any
+** pointer will work here as long as it is distinct from SQLITE_STATIC
+** and SQLITE_TRANSIENT.
+*/
+#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3OomFault)
+
+/*
+** When SQLITE_OMIT_WSD is defined, it means that the target platform does
+** not support Writable Static Data (WSD) such as global and static variables.
+** All variables must either be on the stack or dynamically allocated from
+** the heap. When WSD is unsupported, the variable declarations scattered
+** throughout the SQLite code must become constants instead. The SQLITE_WSD
+** macro is used for this purpose. And instead of referencing the variable
+** directly, we use its constant as a key to lookup the run-time allocated
+** buffer that holds real variable. The constant is also the initializer
+** for the run-time allocated buffer.
+**
+** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL
+** macros become no-ops and have zero performance impact.
+*/
+#ifdef SQLITE_OMIT_WSD
+ #define SQLITE_WSD const
+ #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v)))
+ #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config)
+SQLITE_API int sqlite3_wsd_init(int N, int J);
+SQLITE_API void *sqlite3_wsd_find(void *K, int L);
+#else
+ #define SQLITE_WSD
+ #define GLOBAL(t,v) v
+ #define sqlite3GlobalConfig sqlite3Config
+#endif
+
+/*
+** The following macros are used to suppress compiler warnings and to
+** make it clear to human readers when a function parameter is deliberately
+** left unused within the body of a function. This usually happens when
+** a function is called via a function pointer. For example the
+** implementation of an SQL aggregate step callback may not use the
+** parameter indicating the number of arguments passed to the aggregate,
+** if it knows that this is enforced elsewhere.
+**
+** When a function parameter is not used at all within the body of a function,
+** it is generally named "NotUsed" or "NotUsed2" to make things even clearer.
+** However, these macros may also be used to suppress warnings related to
+** parameters that may or may not be used depending on compilation options.
+** For example those parameters only used in assert() statements. In these
+** cases the parameters are named as per the usual conventions.
+*/
+#define UNUSED_PARAMETER(x) (void)(x)
+#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y)
+
+/*
+** Forward references to structures
+*/
+typedef struct AggInfo AggInfo;
+typedef struct AuthContext AuthContext;
+typedef struct AutoincInfo AutoincInfo;
+typedef struct Bitvec Bitvec;
+typedef struct CollSeq CollSeq;
+typedef struct Column Column;
+typedef struct Db Db;
+typedef struct Schema Schema;
+typedef struct Expr Expr;
+typedef struct ExprList ExprList;
+typedef struct FKey FKey;
+typedef struct FuncDestructor FuncDestructor;
+typedef struct FuncDef FuncDef;
+typedef struct FuncDefHash FuncDefHash;
+typedef struct IdList IdList;
+typedef struct Index Index;
+typedef struct IndexSample IndexSample;
+typedef struct KeyClass KeyClass;
+typedef struct KeyInfo KeyInfo;
+typedef struct Lookaside Lookaside;
+typedef struct LookasideSlot LookasideSlot;
+typedef struct Module Module;
+typedef struct NameContext NameContext;
+typedef struct Parse Parse;
+typedef struct PreUpdate PreUpdate;
+typedef struct PrintfArguments PrintfArguments;
+typedef struct RenameToken RenameToken;
+typedef struct RowSet RowSet;
+typedef struct Savepoint Savepoint;
+typedef struct Select Select;
+typedef struct SQLiteThread SQLiteThread;
+typedef struct SelectDest SelectDest;
+typedef struct SrcList SrcList;
+typedef struct sqlite3_str StrAccum; /* Internal alias for sqlite3_str */
+typedef struct Table Table;
+typedef struct TableLock TableLock;
+typedef struct Token Token;
+typedef struct TreeView TreeView;
+typedef struct Trigger Trigger;
+typedef struct TriggerPrg TriggerPrg;
+typedef struct TriggerStep TriggerStep;
+typedef struct UnpackedRecord UnpackedRecord;
+typedef struct Upsert Upsert;
+typedef struct VTable VTable;
+typedef struct VtabCtx VtabCtx;
+typedef struct Walker Walker;
+typedef struct WhereInfo WhereInfo;
+typedef struct Window Window;
+typedef struct With With;
+
+
+/*
+** The bitmask datatype defined below is used for various optimizations.
+**
+** Changing this from a 64-bit to a 32-bit type limits the number of
+** tables in a join to 32 instead of 64. But it also reduces the size
+** of the library by 738 bytes on ix86.
+*/
+#ifdef SQLITE_BITMASK_TYPE
+ typedef SQLITE_BITMASK_TYPE Bitmask;
+#else
+ typedef u64 Bitmask;
+#endif
+
+/*
+** The number of bits in a Bitmask. "BMS" means "BitMask Size".
+*/
+#define BMS ((int)(sizeof(Bitmask)*8))
+
+/*
+** A bit in a Bitmask
+*/
+#define MASKBIT(n) (((Bitmask)1)<<(n))
+#define MASKBIT64(n) (((u64)1)<<(n))
+#define MASKBIT32(n) (((unsigned int)1)<<(n))
+#define ALLBITS ((Bitmask)-1)
+
+/* A VList object records a mapping between parameters/variables/wildcards
+** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer
+** variable number associated with that parameter. See the format description
+** on the sqlite3VListAdd() routine for more information. A VList is really
+** just an array of integers.
+*/
+typedef int VList;
+
+/*
+** Defer sourcing vdbe.h and btree.h until after the "u8" and
+** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
+** pointer types (i.e. FuncDef) defined above.
+*/
+/************** Include pager.h in the middle of sqliteInt.h *****************/
+/************** Begin file pager.h *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the sqlite page cache
+** subsystem. The page cache subsystem reads and writes a file a page
+** at a time and provides a journal for rollback.
+*/
+
+#ifndef SQLITE_PAGER_H
+#define SQLITE_PAGER_H
+
+/*
+** Default maximum size for persistent journal files. A negative
+** value means no limit. This value may be overridden using the
+** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit".
+*/
+#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT
+ #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1
+#endif
+
+/*
+** The type used to represent a page number. The first page in a file
+** is called page 1. 0 is used to represent "not a page".
+*/
+typedef u32 Pgno;
+
+/*
+** Each open file is managed by a separate instance of the "Pager" structure.
+*/
+typedef struct Pager Pager;
+
+/*
+** Handle type for pages.
+*/
+typedef struct PgHdr DbPage;
+
+/*
+** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is
+** reserved for working around a windows/posix incompatibility). It is
+** used in the journal to signify that the remainder of the journal file
+** is devoted to storing a super-journal name - there are no more pages to
+** roll back. See comments for function writeSuperJournal() in pager.c
+** for details.
+*/
+#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1))
+
+/*
+** Allowed values for the flags parameter to sqlite3PagerOpen().
+**
+** NOTE: These values must match the corresponding BTREE_ values in btree.h.
+*/
+#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */
+#define PAGER_MEMORY 0x0002 /* In-memory database */
+
+/*
+** Valid values for the second argument to sqlite3PagerLockingMode().
+*/
+#define PAGER_LOCKINGMODE_QUERY -1
+#define PAGER_LOCKINGMODE_NORMAL 0
+#define PAGER_LOCKINGMODE_EXCLUSIVE 1
+
+/*
+** Numeric constants that encode the journalmode.
+**
+** The numeric values encoded here (other than PAGER_JOURNALMODE_QUERY)
+** are exposed in the API via the "PRAGMA journal_mode" command and
+** therefore cannot be changed without a compatibility break.
+*/
+#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */
+#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */
+#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */
+#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */
+#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */
+#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */
+#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */
+
+/*
+** Flags that make up the mask passed to sqlite3PagerGet().
+*/
+#define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */
+#define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */
+
+/*
+** Flags for sqlite3PagerSetFlags()
+**
+** Value constraints (enforced via assert()):
+** PAGER_FULLFSYNC == SQLITE_FullFSync
+** PAGER_CKPT_FULLFSYNC == SQLITE_CkptFullFSync
+** PAGER_CACHE_SPILL == SQLITE_CacheSpill
+*/
+#define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */
+#define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */
+#define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */
+#define PAGER_SYNCHRONOUS_EXTRA 0x04 /* PRAGMA synchronous=EXTRA */
+#define PAGER_SYNCHRONOUS_MASK 0x07 /* Mask for four values above */
+#define PAGER_FULLFSYNC 0x08 /* PRAGMA fullfsync=ON */
+#define PAGER_CKPT_FULLFSYNC 0x10 /* PRAGMA checkpoint_fullfsync=ON */
+#define PAGER_CACHESPILL 0x20 /* PRAGMA cache_spill=ON */
+#define PAGER_FLAGS_MASK 0x38 /* All above except SYNCHRONOUS */
+
+/*
+** The remainder of this file contains the declarations of the functions
+** that make up the Pager sub-system API. See source code comments for
+** a detailed description of each routine.
+*/
+
+/* Open and close a Pager connection. */
+SQLITE_PRIVATE int sqlite3PagerOpen(
+ sqlite3_vfs*,
+ Pager **ppPager,
+ const char*,
+ int,
+ int,
+ int,
+ void(*)(DbPage*)
+);
+SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3*);
+SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*);
+
+/* Functions used to configure a Pager object. */
+SQLITE_PRIVATE void sqlite3PagerSetBusyHandler(Pager*, int(*)(void *), void *);
+SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int);
+SQLITE_PRIVATE Pgno sqlite3PagerMaxPageCount(Pager*, Pgno);
+SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int);
+SQLITE_PRIVATE int sqlite3PagerSetSpillsize(Pager*, int);
+SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64);
+SQLITE_PRIVATE void sqlite3PagerShrink(Pager*);
+SQLITE_PRIVATE void sqlite3PagerSetFlags(Pager*,unsigned);
+SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int);
+SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int);
+SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*);
+SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*);
+SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64);
+SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*);
+SQLITE_PRIVATE int sqlite3PagerFlush(Pager*);
+
+/* Functions used to obtain and release page references. */
+SQLITE_PRIVATE int sqlite3PagerGet(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag);
+SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno);
+SQLITE_PRIVATE void sqlite3PagerRef(DbPage*);
+SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*);
+SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*);
+SQLITE_PRIVATE void sqlite3PagerUnrefPageOne(DbPage*);
+
+/* Operations on page references. */
+SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*);
+SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*);
+SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int);
+SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*);
+SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *);
+SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *);
+
+/* Functions used to manage pager transactions and savepoints. */
+SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*);
+SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int);
+SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zSuper, int);
+SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*);
+SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zSuper);
+SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*);
+SQLITE_PRIVATE int sqlite3PagerRollback(Pager*);
+SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n);
+SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint);
+SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager);
+
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, sqlite3*, int, int*, int*);
+SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen);
+SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3*);
+# ifdef SQLITE_ENABLE_SNAPSHOT
+SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager*, sqlite3_snapshot **ppSnapshot);
+SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager*, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerSnapshotCheck(Pager *pPager, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE void sqlite3PagerSnapshotUnlock(Pager *pPager);
+# endif
+#endif
+
+#if !defined(SQLITE_OMIT_WAL) && defined(SQLITE_ENABLE_SETLK_TIMEOUT)
+SQLITE_PRIVATE int sqlite3PagerWalWriteLock(Pager*, int);
+SQLITE_PRIVATE void sqlite3PagerWalDb(Pager*, sqlite3*);
+#else
+# define sqlite3PagerWalWriteLock(y,z) SQLITE_OK
+# define sqlite3PagerWalDb(x,y)
+#endif
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno);
+#endif
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager);
+#endif
+
+/* Functions used to query pager state and configuration. */
+SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*);
+SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager*);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*);
+#endif
+SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*);
+SQLITE_PRIVATE const char *sqlite3PagerFilename(const Pager*, int);
+SQLITE_PRIVATE sqlite3_vfs *sqlite3PagerVfs(Pager*);
+SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*);
+SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*);
+SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*);
+SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*);
+SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*);
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *);
+SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*);
+SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *);
+
+/* Functions used to truncate the database file. */
+SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno);
+
+SQLITE_PRIVATE void sqlite3PagerRekey(DbPage*, Pgno, u16);
+
+/* Functions to support testing and debugging. */
+#if !defined(NDEBUG) || defined(SQLITE_TEST)
+SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*);
+SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*);
+#endif
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE int *sqlite3PagerStats(Pager*);
+SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*);
+ void disable_simulated_io_errors(void);
+ void enable_simulated_io_errors(void);
+#else
+# define disable_simulated_io_errors()
+# define enable_simulated_io_errors()
+#endif
+
+#endif /* SQLITE_PAGER_H */
+
+/************** End of pager.h ***********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include btree.h in the middle of sqliteInt.h *****************/
+/************** Begin file btree.h *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the sqlite B-Tree file
+** subsystem. See comments in the source code for a detailed description
+** of what each interface routine does.
+*/
+#ifndef SQLITE_BTREE_H
+#define SQLITE_BTREE_H
+
+/* TODO: This definition is just included so other modules compile. It
+** needs to be revisited.
+*/
+#define SQLITE_N_BTREE_META 16
+
+/*
+** If defined as non-zero, auto-vacuum is enabled by default. Otherwise
+** it must be turned on for each database using "PRAGMA auto_vacuum = 1".
+*/
+#ifndef SQLITE_DEFAULT_AUTOVACUUM
+ #define SQLITE_DEFAULT_AUTOVACUUM 0
+#endif
+
+#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */
+#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */
+#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */
+
+/*
+** Forward declarations of structure
+*/
+typedef struct Btree Btree;
+typedef struct BtCursor BtCursor;
+typedef struct BtShared BtShared;
+typedef struct BtreePayload BtreePayload;
+
+
+SQLITE_PRIVATE int sqlite3BtreeOpen(
+ sqlite3_vfs *pVfs, /* VFS to use with this b-tree */
+ const char *zFilename, /* Name of database file to open */
+ sqlite3 *db, /* Associated database connection */
+ Btree **ppBtree, /* Return open Btree* here */
+ int flags, /* Flags */
+ int vfsFlags /* Flags passed through to VFS open */
+);
+
+/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the
+** following values.
+**
+** NOTE: These values must match the corresponding PAGER_ values in
+** pager.h.
+*/
+#define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */
+#define BTREE_MEMORY 2 /* This is an in-memory DB */
+#define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */
+#define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */
+
+SQLITE_PRIVATE int sqlite3BtreeClose(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int);
+SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree*,int);
+#if SQLITE_MAX_MMAP_SIZE>0
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64);
+#endif
+SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned);
+SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix);
+SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*);
+SQLITE_PRIVATE Pgno sqlite3BtreeMaxPageCount(Btree*,Pgno);
+SQLITE_PRIVATE Pgno sqlite3BtreeLastPage(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int);
+SQLITE_PRIVATE int sqlite3BtreeGetRequestedReserve(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p);
+SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int);
+SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *);
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int,int*);
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char*);
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int);
+SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int);
+SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int);
+SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, Pgno*, int flags);
+SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*);
+SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *));
+SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree);
+#ifndef SQLITE_OMIT_SHARED_CACHE
+SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock);
+#endif
+SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int);
+
+SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *);
+SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *);
+SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *);
+
+SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *);
+
+/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR
+** of the flags shown below.
+**
+** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set.
+** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data
+** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With
+** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored
+** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL
+** indices.)
+*/
+#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */
+#define BTREE_BLOBKEY 2 /* Table has keys only - no data */
+
+SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*);
+SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*);
+SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*);
+SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int);
+
+SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue);
+SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value);
+
+SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
+
+/*
+** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta
+** should be one of the following values. The integer values are assigned
+** to constants so that the offset of the corresponding field in an
+** SQLite database header may be found using the following formula:
+**
+** offset = 36 + (idx * 4)
+**
+** For example, the free-page-count field is located at byte offset 36 of
+** the database file header. The incr-vacuum-flag field is located at
+** byte offset 64 (== 36+4*7).
+**
+** The BTREE_DATA_VERSION value is not really a value stored in the header.
+** It is a read-only number computed by the pager. But we merge it with
+** the header value access routines since its access pattern is the same.
+** Call it a "virtual meta value".
+*/
+#define BTREE_FREE_PAGE_COUNT 0
+#define BTREE_SCHEMA_VERSION 1
+#define BTREE_FILE_FORMAT 2
+#define BTREE_DEFAULT_CACHE_SIZE 3
+#define BTREE_LARGEST_ROOT_PAGE 4
+#define BTREE_TEXT_ENCODING 5
+#define BTREE_USER_VERSION 6
+#define BTREE_INCR_VACUUM 7
+#define BTREE_APPLICATION_ID 8
+#define BTREE_DATA_VERSION 15 /* A virtual meta-value */
+
+/*
+** Kinds of hints that can be passed into the sqlite3BtreeCursorHint()
+** interface.
+**
+** BTREE_HINT_RANGE (arguments: Expr*, Mem*)
+**
+** The first argument is an Expr* (which is guaranteed to be constant for
+** the lifetime of the cursor) that defines constraints on which rows
+** might be fetched with this cursor. The Expr* tree may contain
+** TK_REGISTER nodes that refer to values stored in the array of registers
+** passed as the second parameter. In other words, if Expr.op==TK_REGISTER
+** then the value of the node is the value in Mem[pExpr.iTable]. Any
+** TK_COLUMN node in the expression tree refers to the Expr.iColumn-th
+** column of the b-tree of the cursor. The Expr tree will not contain
+** any function calls nor subqueries nor references to b-trees other than
+** the cursor being hinted.
+**
+** The design of the _RANGE hint is aid b-tree implementations that try
+** to prefetch content from remote machines - to provide those
+** implementations with limits on what needs to be prefetched and thereby
+** reduce network bandwidth.
+**
+** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by
+** standard SQLite. The other hints are provided for extentions that use
+** the SQLite parser and code generator but substitute their own storage
+** engine.
+*/
+#define BTREE_HINT_RANGE 0 /* Range constraints on queries */
+
+/*
+** Values that may be OR'd together to form the argument to the
+** BTREE_HINT_FLAGS hint for sqlite3BtreeCursorHint():
+**
+** The BTREE_BULKLOAD flag is set on index cursors when the index is going
+** to be filled with content that is already in sorted order.
+**
+** The BTREE_SEEK_EQ flag is set on cursors that will get OP_SeekGE or
+** OP_SeekLE opcodes for a range search, but where the range of entries
+** selected will all have the same key. In other words, the cursor will
+** be used only for equality key searches.
+**
+*/
+#define BTREE_BULKLOAD 0x00000001 /* Used to full index in sorted order */
+#define BTREE_SEEK_EQ 0x00000002 /* EQ seeks only - no range seeks */
+
+/*
+** Flags passed as the third argument to sqlite3BtreeCursor().
+**
+** For read-only cursors the wrFlag argument is always zero. For read-write
+** cursors it may be set to either (BTREE_WRCSR|BTREE_FORDELETE) or just
+** (BTREE_WRCSR). If the BTREE_FORDELETE bit is set, then the cursor will
+** only be used by SQLite for the following:
+**
+** * to seek to and then delete specific entries, and/or
+**
+** * to read values that will be used to create keys that other
+** BTREE_FORDELETE cursors will seek to and delete.
+**
+** The BTREE_FORDELETE flag is an optimization hint. It is not used by
+** by this, the native b-tree engine of SQLite, but it is available to
+** alternative storage engines that might be substituted in place of this
+** b-tree system. For alternative storage engines in which a delete of
+** the main table row automatically deletes corresponding index rows,
+** the FORDELETE flag hint allows those alternative storage engines to
+** skip a lot of work. Namely: FORDELETE cursors may treat all SEEK
+** and DELETE operations as no-ops, and any READ operation against a
+** FORDELETE cursor may return a null row: 0x01 0x00.
+*/
+#define BTREE_WRCSR 0x00000004 /* read-write cursor */
+#define BTREE_FORDELETE 0x00000008 /* Cursor is for seek/delete only */
+
+SQLITE_PRIVATE int sqlite3BtreeCursor(
+ Btree*, /* BTree containing table to open */
+ Pgno iTable, /* Index of root page */
+ int wrFlag, /* 1 for writing. 0 for read-only */
+ struct KeyInfo*, /* First argument to compare function */
+ BtCursor *pCursor /* Space to write cursor structure */
+);
+SQLITE_PRIVATE BtCursor *sqlite3BtreeFakeValidCursor(void);
+SQLITE_PRIVATE int sqlite3BtreeCursorSize(void);
+SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*);
+SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor*, unsigned);
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor*, int, ...);
+#endif
+
+SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*);
+SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
+ BtCursor*,
+ UnpackedRecord *pUnKey,
+ i64 intKey,
+ int bias,
+ int *pRes
+);
+SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*);
+SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*);
+SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags);
+
+/* Allowed flags for sqlite3BtreeDelete() and sqlite3BtreeInsert() */
+#define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */
+#define BTREE_AUXDELETE 0x04 /* not the primary delete operation */
+#define BTREE_APPEND 0x08 /* Insert is likely an append */
+
+/* An instance of the BtreePayload object describes the content of a single
+** entry in either an index or table btree.
+**
+** Index btrees (used for indexes and also WITHOUT ROWID tables) contain
+** an arbitrary key and no data. These btrees have pKey,nKey set to the
+** key and the pData,nData,nZero fields are uninitialized. The aMem,nMem
+** fields give an array of Mem objects that are a decomposition of the key.
+** The nMem field might be zero, indicating that no decomposition is available.
+**
+** Table btrees (used for rowid tables) contain an integer rowid used as
+** the key and passed in the nKey field. The pKey field is zero.
+** pData,nData hold the content of the new entry. nZero extra zero bytes
+** are appended to the end of the content when constructing the entry.
+** The aMem,nMem fields are uninitialized for table btrees.
+**
+** Field usage summary:
+**
+** Table BTrees Index Btrees
+**
+** pKey always NULL encoded key
+** nKey the ROWID length of pKey
+** pData data not used
+** aMem not used decomposed key value
+** nMem not used entries in aMem
+** nData length of pData not used
+** nZero extra zeros after pData not used
+**
+** This object is used to pass information into sqlite3BtreeInsert(). The
+** same information used to be passed as five separate parameters. But placing
+** the information into this object helps to keep the interface more
+** organized and understandable, and it also helps the resulting code to
+** run a little faster by using fewer registers for parameter passing.
+*/
+struct BtreePayload {
+ const void *pKey; /* Key content for indexes. NULL for tables */
+ sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */
+ const void *pData; /* Data for tables. */
+ sqlite3_value *aMem; /* First of nMem value in the unpacked pKey */
+ u16 nMem; /* Number of aMem[] value. Might be zero */
+ int nData; /* Size of pData. 0 if none. */
+ int nZero; /* Extra zero data appended after pData,nData */
+};
+
+SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload,
+ int flags, int seekResult);
+SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes);
+SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes);
+SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int flags);
+SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*);
+SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags);
+SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*);
+SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*);
+SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*);
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*);
+#endif
+SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*);
+SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
+SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
+SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*);
+
+SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(sqlite3*,Btree*,Pgno*aRoot,int nRoot,int,int*);
+SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
+SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor*);
+
+#ifndef SQLITE_OMIT_INCRBLOB
+SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*);
+SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
+SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *);
+#endif
+SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *);
+SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion);
+SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask);
+SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt);
+SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void);
+
+#ifndef NDEBUG
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*);
+#endif
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor*);
+
+SQLITE_PRIVATE int sqlite3BtreeCount(sqlite3*, BtCursor*, i64*);
+
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int);
+SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*);
+#endif
+
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *);
+#endif
+
+/*
+** If we are not using shared cache, then there is no need to
+** use mutexes to access the BtShared structures. So make the
+** Enter and Leave procedures no-ops.
+*/
+#ifndef SQLITE_OMIT_SHARED_CACHE
+SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*);
+SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*);
+SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*);
+SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*);
+SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree*);
+#else
+# define sqlite3BtreeEnter(X)
+# define sqlite3BtreeEnterAll(X)
+# define sqlite3BtreeSharable(X) 0
+# define sqlite3BtreeEnterCursor(X)
+# define sqlite3BtreeConnectionCount(X) 1
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE
+SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*);
+SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*);
+SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*);
+#ifndef NDEBUG
+ /* These routines are used inside assert() statements only. */
+SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*);
+SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*);
+#endif
+#else
+
+# define sqlite3BtreeLeave(X)
+# define sqlite3BtreeLeaveCursor(X)
+# define sqlite3BtreeLeaveAll(X)
+
+# define sqlite3BtreeHoldsMutex(X) 1
+# define sqlite3BtreeHoldsAllMutexes(X) 1
+# define sqlite3SchemaMutexHeld(X,Y,Z) 1
+#endif
+
+
+#endif /* SQLITE_BTREE_H */
+
+/************** End of btree.h ***********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include vdbe.h in the middle of sqliteInt.h ******************/
+/************** Begin file vdbe.h ********************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Header file for the Virtual DataBase Engine (VDBE)
+**
+** This header defines the interface to the virtual database engine
+** or VDBE. The VDBE implements an abstract machine that runs a
+** simple program to access and modify the underlying database.
+*/
+#ifndef SQLITE_VDBE_H
+#define SQLITE_VDBE_H
+/* #include <stdio.h> */
+
+/*
+** A single VDBE is an opaque structure named "Vdbe". Only routines
+** in the source file sqliteVdbe.c are allowed to see the insides
+** of this structure.
+*/
+typedef struct Vdbe Vdbe;
+
+/*
+** The names of the following types declared in vdbeInt.h are required
+** for the VdbeOp definition.
+*/
+typedef struct sqlite3_value Mem;
+typedef struct SubProgram SubProgram;
+
+/*
+** A single instruction of the virtual machine has an opcode
+** and as many as three operands. The instruction is recorded
+** as an instance of the following structure:
+*/
+struct VdbeOp {
+ u8 opcode; /* What operation to perform */
+ signed char p4type; /* One of the P4_xxx constants for p4 */
+ u16 p5; /* Fifth parameter is an unsigned 16-bit integer */
+ int p1; /* First operand */
+ int p2; /* Second parameter (often the jump destination) */
+ int p3; /* The third parameter */
+ union p4union { /* fourth parameter */
+ int i; /* Integer value if p4type==P4_INT32 */
+ void *p; /* Generic pointer */
+ char *z; /* Pointer to data for string (char array) types */
+ i64 *pI64; /* Used when p4type is P4_INT64 */
+ double *pReal; /* Used when p4type is P4_REAL */
+ FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */
+ sqlite3_context *pCtx; /* Used when p4type is P4_FUNCCTX */
+ CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */
+ Mem *pMem; /* Used when p4type is P4_MEM */
+ VTable *pVtab; /* Used when p4type is P4_VTAB */
+ KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */
+ u32 *ai; /* Used when p4type is P4_INTARRAY */
+ SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */
+ Table *pTab; /* Used when p4type is P4_TABLE */
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ Expr *pExpr; /* Used when p4type is P4_EXPR */
+#endif
+ int (*xAdvance)(BtCursor *, int);
+ } p4;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ char *zComment; /* Comment to improve readability */
+#endif
+#ifdef VDBE_PROFILE
+ u32 cnt; /* Number of times this instruction was executed */
+ u64 cycles; /* Total time spent executing this instruction */
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ u32 iSrcLine; /* Source-code line that generated this opcode
+ ** with flags in the upper 8 bits */
+#endif
+};
+typedef struct VdbeOp VdbeOp;
+
+
+/*
+** A sub-routine used to implement a trigger program.
+*/
+struct SubProgram {
+ VdbeOp *aOp; /* Array of opcodes for sub-program */
+ int nOp; /* Elements in aOp[] */
+ int nMem; /* Number of memory cells required */
+ int nCsr; /* Number of cursors required */
+ u8 *aOnce; /* Array of OP_Once flags */
+ void *token; /* id that may be used to recursive triggers */
+ SubProgram *pNext; /* Next sub-program already visited */
+};
+
+/*
+** A smaller version of VdbeOp used for the VdbeAddOpList() function because
+** it takes up less space.
+*/
+struct VdbeOpList {
+ u8 opcode; /* What operation to perform */
+ signed char p1; /* First operand */
+ signed char p2; /* Second parameter (often the jump destination) */
+ signed char p3; /* Third parameter */
+};
+typedef struct VdbeOpList VdbeOpList;
+
+/*
+** Allowed values of VdbeOp.p4type
+*/
+#define P4_NOTUSED 0 /* The P4 parameter is not used */
+#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */
+#define P4_STATIC (-1) /* Pointer to a static string */
+#define P4_COLLSEQ (-2) /* P4 is a pointer to a CollSeq structure */
+#define P4_INT32 (-3) /* P4 is a 32-bit signed integer */
+#define P4_SUBPROGRAM (-4) /* P4 is a pointer to a SubProgram structure */
+#define P4_ADVANCE (-5) /* P4 is a pointer to BtreeNext() or BtreePrev() */
+#define P4_TABLE (-6) /* P4 is a pointer to a Table structure */
+/* Above do not own any resources. Must free those below */
+#define P4_FREE_IF_LE (-7)
+#define P4_DYNAMIC (-7) /* Pointer to memory from sqliteMalloc() */
+#define P4_FUNCDEF (-8) /* P4 is a pointer to a FuncDef structure */
+#define P4_KEYINFO (-9) /* P4 is a pointer to a KeyInfo structure */
+#define P4_EXPR (-10) /* P4 is a pointer to an Expr tree */
+#define P4_MEM (-11) /* P4 is a pointer to a Mem* structure */
+#define P4_VTAB (-12) /* P4 is a pointer to an sqlite3_vtab structure */
+#define P4_REAL (-13) /* P4 is a 64-bit floating point value */
+#define P4_INT64 (-14) /* P4 is a 64-bit signed integer */
+#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */
+#define P4_FUNCCTX (-16) /* P4 is a pointer to an sqlite3_context object */
+#define P4_DYNBLOB (-17) /* Pointer to memory from sqliteMalloc() */
+
+/* Error message codes for OP_Halt */
+#define P5_ConstraintNotNull 1
+#define P5_ConstraintUnique 2
+#define P5_ConstraintCheck 3
+#define P5_ConstraintFK 4
+
+/*
+** The Vdbe.aColName array contains 5n Mem structures, where n is the
+** number of columns of data returned by the statement.
+*/
+#define COLNAME_NAME 0
+#define COLNAME_DECLTYPE 1
+#define COLNAME_DATABASE 2
+#define COLNAME_TABLE 3
+#define COLNAME_COLUMN 4
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */
+#else
+# ifdef SQLITE_OMIT_DECLTYPE
+# define COLNAME_N 1 /* Store only the name */
+# else
+# define COLNAME_N 2 /* Store the name and decltype */
+# endif
+#endif
+
+/*
+** The following macro converts a label returned by sqlite3VdbeMakeLabel()
+** into an index into the Parse.aLabel[] array that contains the resolved
+** address of that label.
+*/
+#define ADDR(X) (~(X))
+
+/*
+** The makefile scans the vdbe.c source file and creates the "opcodes.h"
+** header file that defines a number for each opcode used by the VDBE.
+*/
+/************** Include opcodes.h in the middle of vdbe.h ********************/
+/************** Begin file opcodes.h *****************************************/
+/* Automatically generated. Do not edit */
+/* See the tool/mkopcodeh.tcl script for details */
+#define OP_Savepoint 0
+#define OP_AutoCommit 1
+#define OP_Transaction 2
+#define OP_SorterNext 3 /* jump */
+#define OP_Prev 4 /* jump */
+#define OP_Next 5 /* jump */
+#define OP_Checkpoint 6
+#define OP_JournalMode 7
+#define OP_Vacuum 8
+#define OP_VFilter 9 /* jump, synopsis: iplan=r[P3] zplan='P4' */
+#define OP_VUpdate 10 /* synopsis: data=r[P3@P2] */
+#define OP_Goto 11 /* jump */
+#define OP_Gosub 12 /* jump */
+#define OP_InitCoroutine 13 /* jump */
+#define OP_Yield 14 /* jump */
+#define OP_MustBeInt 15 /* jump */
+#define OP_Jump 16 /* jump */
+#define OP_Once 17 /* jump */
+#define OP_If 18 /* jump */
+#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */
+#define OP_IfNot 20 /* jump */
+#define OP_IfNullRow 21 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */
+#define OP_SeekLT 22 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekLE 23 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGE 24 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGT 25 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IfNotOpen 26 /* jump, synopsis: if( !csr[P1] ) goto P2 */
+#define OP_IfNoHope 27 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NoConflict 28 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NotFound 29 /* jump, synopsis: key=r[P3@P4] */
+#define OP_Found 30 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekRowid 31 /* jump, synopsis: intkey=r[P3] */
+#define OP_NotExists 32 /* jump, synopsis: intkey=r[P3] */
+#define OP_Last 33 /* jump */
+#define OP_IfSmaller 34 /* jump */
+#define OP_SorterSort 35 /* jump */
+#define OP_Sort 36 /* jump */
+#define OP_Rewind 37 /* jump */
+#define OP_IdxLE 38 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxGT 39 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxLT 40 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxGE 41 /* jump, synopsis: key=r[P3@P4] */
+#define OP_RowSetRead 42 /* jump, synopsis: r[P3]=rowset(P1) */
+#define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
+#define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
+#define OP_RowSetTest 45 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Program 46 /* jump */
+#define OP_FkIfZero 47 /* jump, synopsis: if fkctr[P1]==0 goto P2 */
+#define OP_IfPos 48 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
+#define OP_IfNotZero 49 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
+#define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
+#define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
+#define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */
+#define OP_Eq 53 /* jump, same as TK_EQ, synopsis: IF r[P3]==r[P1] */
+#define OP_Gt 54 /* jump, same as TK_GT, synopsis: IF r[P3]>r[P1] */
+#define OP_Le 55 /* jump, same as TK_LE, synopsis: IF r[P3]<=r[P1] */
+#define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]<r[P1] */
+#define OP_Ge 57 /* jump, same as TK_GE, synopsis: IF r[P3]>=r[P1] */
+#define OP_ElseNotEq 58 /* jump, same as TK_ESCAPE */
+#define OP_DecrJumpZero 59 /* jump, synopsis: if (--r[P1])==0 goto P2 */
+#define OP_IncrVacuum 60 /* jump */
+#define OP_VNext 61 /* jump */
+#define OP_Init 62 /* jump, synopsis: Start at P2 */
+#define OP_PureFunc 63 /* synopsis: r[P3]=func(r[P2@NP]) */
+#define OP_Function 64 /* synopsis: r[P3]=func(r[P2@NP]) */
+#define OP_Return 65
+#define OP_EndCoroutine 66
+#define OP_HaltIfNull 67 /* synopsis: if r[P3]=null halt */
+#define OP_Halt 68
+#define OP_Integer 69 /* synopsis: r[P2]=P1 */
+#define OP_Int64 70 /* synopsis: r[P2]=P4 */
+#define OP_String 71 /* synopsis: r[P2]='P4' (len=P1) */
+#define OP_Null 72 /* synopsis: r[P2..P3]=NULL */
+#define OP_SoftNull 73 /* synopsis: r[P1]=NULL */
+#define OP_Blob 74 /* synopsis: r[P2]=P4 (len=P1) */
+#define OP_Variable 75 /* synopsis: r[P2]=parameter(P1,P4) */
+#define OP_Move 76 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_Copy 77 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
+#define OP_SCopy 78 /* synopsis: r[P2]=r[P1] */
+#define OP_IntCopy 79 /* synopsis: r[P2]=r[P1] */
+#define OP_ResultRow 80 /* synopsis: output=r[P1@P2] */
+#define OP_CollSeq 81
+#define OP_AddImm 82 /* synopsis: r[P1]=r[P1]+P2 */
+#define OP_RealAffinity 83
+#define OP_Cast 84 /* synopsis: affinity(r[P1]) */
+#define OP_Permutation 85
+#define OP_Compare 86 /* synopsis: r[P1@P3] <-> r[P2@P3] */
+#define OP_IsTrue 87 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */
+#define OP_Offset 88 /* synopsis: r[P3] = sqlite_offset(P1) */
+#define OP_Column 89 /* synopsis: r[P3]=PX */
+#define OP_Affinity 90 /* synopsis: affinity(r[P1@P2]) */
+#define OP_MakeRecord 91 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
+#define OP_Count 92 /* synopsis: r[P2]=count() */
+#define OP_ReadCookie 93
+#define OP_SetCookie 94
+#define OP_ReopenIdx 95 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenRead 96 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenWrite 97 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenDup 98
+#define OP_OpenAutoindex 99 /* synopsis: nColumn=P2 */
+#define OP_OpenEphemeral 100 /* synopsis: nColumn=P2 */
+#define OP_BitAnd 101 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
+#define OP_BitOr 102 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
+#define OP_ShiftLeft 103 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */
+#define OP_ShiftRight 104 /* same as TK_RSHIFT, synopsis: r[P3]=r[P2]>>r[P1] */
+#define OP_Add 105 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
+#define OP_Subtract 106 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
+#define OP_Multiply 107 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
+#define OP_Divide 108 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
+#define OP_Remainder 109 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
+#define OP_Concat 110 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
+#define OP_SorterOpen 111
+#define OP_BitNot 112 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */
+#define OP_SequenceTest 113 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
+#define OP_OpenPseudo 114 /* synopsis: P3 columns in r[P2] */
+#define OP_String8 115 /* same as TK_STRING, synopsis: r[P2]='P4' */
+#define OP_Close 116
+#define OP_ColumnsUsed 117
+#define OP_SeekHit 118 /* synopsis: seekHit=P2 */
+#define OP_Sequence 119 /* synopsis: r[P2]=cursor[P1].ctr++ */
+#define OP_NewRowid 120 /* synopsis: r[P2]=rowid */
+#define OP_Insert 121 /* synopsis: intkey=r[P3] data=r[P2] */
+#define OP_Delete 122
+#define OP_ResetCount 123
+#define OP_SorterCompare 124 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
+#define OP_SorterData 125 /* synopsis: r[P2]=data */
+#define OP_RowData 126 /* synopsis: r[P2]=data */
+#define OP_Rowid 127 /* synopsis: r[P2]=rowid */
+#define OP_NullRow 128
+#define OP_SeekEnd 129
+#define OP_IdxInsert 130 /* synopsis: key=r[P2] */
+#define OP_SorterInsert 131 /* synopsis: key=r[P2] */
+#define OP_IdxDelete 132 /* synopsis: key=r[P2@P3] */
+#define OP_DeferredSeek 133 /* synopsis: Move P3 to P1.rowid if needed */
+#define OP_IdxRowid 134 /* synopsis: r[P2]=rowid */
+#define OP_FinishSeek 135
+#define OP_Destroy 136
+#define OP_Clear 137
+#define OP_ResetSorter 138
+#define OP_CreateBtree 139 /* synopsis: r[P2]=root iDb=P1 flags=P3 */
+#define OP_SqlExec 140
+#define OP_ParseSchema 141
+#define OP_LoadAnalysis 142
+#define OP_DropTable 143
+#define OP_DropIndex 144
+#define OP_DropTrigger 145
+#define OP_IntegrityCk 146
+#define OP_RowSetAdd 147 /* synopsis: rowset(P1)=r[P2] */
+#define OP_Param 148
+#define OP_FkCounter 149 /* synopsis: fkctr[P1]+=P2 */
+#define OP_Real 150 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
+#define OP_MemMax 151 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_OffsetLimit 152 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
+#define OP_AggInverse 153 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */
+#define OP_AggStep 154 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggStep1 155 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggValue 156 /* synopsis: r[P3]=value N=P2 */
+#define OP_AggFinal 157 /* synopsis: accum=r[P1] N=P2 */
+#define OP_Expire 158
+#define OP_CursorLock 159
+#define OP_CursorUnlock 160
+#define OP_TableLock 161 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 162
+#define OP_VCreate 163
+#define OP_VDestroy 164
+#define OP_VOpen 165
+#define OP_VColumn 166 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 167
+#define OP_Pagecount 168
+#define OP_MaxPgcnt 169
+#define OP_Trace 170
+#define OP_CursorHint 171
+#define OP_ReleaseReg 172 /* synopsis: release r[P1@P2] mask P3 */
+#define OP_Noop 173
+#define OP_Explain 174
+#define OP_Abortable 175
+
+/* Properties such as "out2" or "jump" that are specified in
+** comments following the "case" for each opcode in the vdbe.c
+** are encoded into bitvectors as follows:
+*/
+#define OPFLG_JUMP 0x01 /* jump: P2 holds jmp target */
+#define OPFLG_IN1 0x02 /* in1: P1 is an input */
+#define OPFLG_IN2 0x04 /* in2: P2 is an input */
+#define OPFLG_IN3 0x08 /* in3: P3 is an input */
+#define OPFLG_OUT2 0x10 /* out2: P2 is an output */
+#define OPFLG_OUT3 0x20 /* out3: P3 is an output */
+#define OPFLG_INITIALIZER {\
+/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x10,\
+/* 8 */ 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x03,\
+/* 16 */ 0x01, 0x01, 0x03, 0x12, 0x03, 0x01, 0x09, 0x09,\
+/* 24 */ 0x09, 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09,\
+/* 32 */ 0x09, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
+/* 40 */ 0x01, 0x01, 0x23, 0x26, 0x26, 0x0b, 0x01, 0x01,\
+/* 48 */ 0x03, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
+/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x01, 0x01, 0x01, 0x00,\
+/* 64 */ 0x00, 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10,\
+/* 72 */ 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10,\
+/* 80 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x12,\
+/* 88 */ 0x20, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
+/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26, 0x26,\
+/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\
+/* 112 */ 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10,\
+/* 120 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\
+/* 128 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\
+/* 136 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\
+/* 144 */ 0x00, 0x00, 0x00, 0x06, 0x10, 0x00, 0x10, 0x04,\
+/* 152 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 168 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+}
+
+/* The sqlite3P2Values() routine is able to run faster if it knows
+** the value of the largest JUMP opcode. The smaller the maximum
+** JUMP opcode the better, so the mkopcodeh.tcl script that
+** generated this include file strives to group all JUMP opcodes
+** together near the beginning of the list.
+*/
+#define SQLITE_MX_JUMP_OPCODE 62 /* Maximum JUMP opcode */
+
+/************** End of opcodes.h *********************************************/
+/************** Continuing where we left off in vdbe.h ***********************/
+
+/*
+** Additional non-public SQLITE_PREPARE_* flags
+*/
+#define SQLITE_PREPARE_SAVESQL 0x80 /* Preserve SQL text */
+#define SQLITE_PREPARE_MASK 0x0f /* Mask of public flags */
+
+/*
+** Prototypes for the VDBE interface. See comments on the implementation
+** for a description of what each of these routines does.
+*/
+SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*);
+SQLITE_PRIVATE Parse *sqlite3VdbeParser(Vdbe*);
+SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int);
+SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int);
+SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int);
+SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe*,int);
+SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe*,int,const char*);
+SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe*,int,const char*,...);
+SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int);
+SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int);
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(Vdbe*,int,int,int,int,const u8*,int);
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int);
+SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall(Parse*,int,int,int,int,const FuncDef*,int);
+SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe*,int);
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N);
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p);
+#else
+# define sqlite3VdbeVerifyNoMallocRequired(A,B)
+# define sqlite3VdbeVerifyNoResultRow(A)
+#endif
+#if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE void sqlite3VdbeVerifyAbortable(Vdbe *p, int);
+#else
+# define sqlite3VdbeVerifyAbortable(A,B)
+#endif
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno);
+#ifndef SQLITE_OMIT_EXPLAIN
+SQLITE_PRIVATE void sqlite3VdbeExplain(Parse*,u8,const char*,...);
+SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse*);
+SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse*);
+# define ExplainQueryPlan(P) sqlite3VdbeExplain P
+# define ExplainQueryPlanPop(P) sqlite3VdbeExplainPop(P)
+# define ExplainQueryPlanParent(P) sqlite3VdbeExplainParent(P)
+#else
+# define ExplainQueryPlan(P)
+# define ExplainQueryPlanPop(P)
+# define ExplainQueryPlanParent(P) 0
+# define sqlite3ExplainBreakpoint(A,B) /*no-op*/
+#endif
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN)
+SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char*,const char*);
+#else
+# define sqlite3ExplainBreakpoint(A,B) /*no-op*/
+#endif
+SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*);
+SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, int addr, u8);
+SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1);
+SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2);
+SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3);
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5);
+SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
+SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe*, int addr);
+SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr);
+SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters(Parse*,int addr, int n, u32 mask, int);
+#else
+# define sqlite3VdbeReleaseRegisters(P,A,N,M,F)
+#endif
+SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N);
+SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type);
+SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*);
+SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int);
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int);
+SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Parse*);
+SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*);
+SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int);
+SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int);
+#endif
+SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*);
+SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int);
+SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*));
+SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*);
+SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*);
+SQLITE_PRIVATE u8 sqlite3VdbePrepareFlags(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, u8);
+#ifdef SQLITE_ENABLE_NORMALIZE
+SQLITE_PRIVATE void sqlite3VdbeAddDblquoteStr(sqlite3*,Vdbe*,const char*);
+SQLITE_PRIVATE int sqlite3VdbeUsesDoubleQuotedString(Vdbe*,const char*);
+#endif
+SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*);
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*);
+SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe*, int, u8);
+SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int);
+#ifndef SQLITE_OMIT_TRACE
+SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*);
+#endif
+SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
+SQLITE_PRIVATE int sqlite3BlobCompare(const Mem*, const Mem*);
+
+SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*);
+SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*);
+SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int);
+SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo*);
+
+typedef int (*RecordCompare)(int,const void*,UnpackedRecord*);
+SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*);
+
+SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *);
+SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe*);
+
+SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*);
+#ifdef SQLITE_ENABLE_BYTECODE_VTAB
+SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*);
+#endif
+
+/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on
+** each VDBE opcode.
+**
+** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op
+** comments in VDBE programs that show key decision points in the code
+** generator.
+*/
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...);
+# define VdbeComment(X) sqlite3VdbeComment X
+SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
+# define VdbeNoopComment(X) sqlite3VdbeNoopComment X
+# ifdef SQLITE_ENABLE_MODULE_COMMENTS
+# define VdbeModuleComment(X) sqlite3VdbeNoopComment X
+# else
+# define VdbeModuleComment(X)
+# endif
+#else
+# define VdbeComment(X)
+# define VdbeNoopComment(X)
+# define VdbeModuleComment(X)
+#endif
+
+/*
+** The VdbeCoverage macros are used to set a coverage testing point
+** for VDBE branch instructions. The coverage testing points are line
+** numbers in the sqlite3.c source file. VDBE branch coverage testing
+** only works with an amalagmation build. That's ok since a VDBE branch
+** coverage build designed for testing the test suite only. No application
+** should ever ship with VDBE branch coverage measuring turned on.
+**
+** VdbeCoverage(v) // Mark the previously coded instruction
+** // as a branch
+**
+** VdbeCoverageIf(v, conditional) // Mark previous if conditional true
+**
+** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken
+**
+** VdbeCoverageNeverTaken(v) // Previous branch is never taken
+**
+** VdbeCoverageNeverNull(v) // Previous three-way branch is only
+** // taken on the first two ways. The
+** // NULL option is not possible
+**
+** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested
+** // in distingishing equal and not-equal.
+**
+** Every VDBE branch operation must be tagged with one of the macros above.
+** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
+** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch()
+** routine in vdbe.c, alerting the developer to the missed tag.
+**
+** During testing, the test application will invoke
+** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback
+** routine that is invoked as each bytecode branch is taken. The callback
+** contains the sqlite3.c source line number ov the VdbeCoverage macro and
+** flags to indicate whether or not the branch was taken. The test application
+** is responsible for keeping track of this and reporting byte-code branches
+** that are never taken.
+**
+** See the VdbeBranchTaken() macro and vdbeTakeBranch() function in the
+** vdbe.c source file for additional information.
+*/
+#ifdef SQLITE_VDBE_COVERAGE
+SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int);
+# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__)
+# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__)
+# define VdbeCoverageAlwaysTaken(v) \
+ sqlite3VdbeSetLineNumber(v,__LINE__|0x5000000);
+# define VdbeCoverageNeverTaken(v) \
+ sqlite3VdbeSetLineNumber(v,__LINE__|0x6000000);
+# define VdbeCoverageNeverNull(v) \
+ sqlite3VdbeSetLineNumber(v,__LINE__|0x4000000);
+# define VdbeCoverageNeverNullIf(v,x) \
+ if(x)sqlite3VdbeSetLineNumber(v,__LINE__|0x4000000);
+# define VdbeCoverageEqNe(v) \
+ sqlite3VdbeSetLineNumber(v,__LINE__|0x8000000);
+# define VDBE_OFFSET_LINENO(x) (__LINE__+x)
+#else
+# define VdbeCoverage(v)
+# define VdbeCoverageIf(v,x)
+# define VdbeCoverageAlwaysTaken(v)
+# define VdbeCoverageNeverTaken(v)
+# define VdbeCoverageNeverNull(v)
+# define VdbeCoverageNeverNullIf(v,x)
+# define VdbeCoverageEqNe(v)
+# define VDBE_OFFSET_LINENO(x) 0
+#endif
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*);
+#else
+# define sqlite3VdbeScanStatus(a,b,c,d,e)
+#endif
+
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*);
+#endif
+
+#endif /* SQLITE_VDBE_H */
+
+/************** End of vdbe.h ************************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include pcache.h in the middle of sqliteInt.h ****************/
+/************** Begin file pcache.h ******************************************/
+/*
+** 2008 August 05
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the sqlite page cache
+** subsystem.
+*/
+
+#ifndef _PCACHE_H_
+
+typedef struct PgHdr PgHdr;
+typedef struct PCache PCache;
+
+/*
+** Every page in the cache is controlled by an instance of the following
+** structure.
+*/
+struct PgHdr {
+ sqlite3_pcache_page *pPage; /* Pcache object page handle */
+ void *pData; /* Page data */
+ void *pExtra; /* Extra content */
+ PCache *pCache; /* PRIVATE: Cache that owns this page */
+ PgHdr *pDirty; /* Transient list of dirty sorted by pgno */
+ Pager *pPager; /* The pager this page is part of */
+ Pgno pgno; /* Page number for this page */
+#ifdef SQLITE_CHECK_PAGES
+ u32 pageHash; /* Hash of page content */
+#endif
+ u16 flags; /* PGHDR flags defined below */
+
+ /**********************************************************************
+ ** Elements above, except pCache, are public. All that follow are
+ ** private to pcache.c and should not be accessed by other modules.
+ ** pCache is grouped with the public elements for efficiency.
+ */
+ i16 nRef; /* Number of users of this page */
+ PgHdr *pDirtyNext; /* Next element in list of dirty pages */
+ PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */
+ /* NB: pDirtyNext and pDirtyPrev are undefined if the
+ ** PgHdr object is not dirty */
+};
+
+/* Bit values for PgHdr.flags */
+#define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */
+#define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */
+#define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */
+#define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before
+ ** writing this page to the database */
+#define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */
+#define PGHDR_MMAP 0x020 /* This is an mmap page object */
+
+#define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */
+
+/* Initialize and shutdown the page cache subsystem */
+SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
+SQLITE_PRIVATE void sqlite3PcacheShutdown(void);
+
+/* Page cache buffer management:
+** These routines implement SQLITE_CONFIG_PAGECACHE.
+*/
+SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n);
+
+/* Create a new pager cache.
+** Under memory stress, invoke xStress to try to make pages clean.
+** Only clean and unpinned pages can be reclaimed.
+*/
+SQLITE_PRIVATE int sqlite3PcacheOpen(
+ int szPage, /* Size of every page */
+ int szExtra, /* Extra space associated with each page */
+ int bPurgeable, /* True if pages are on backing store */
+ int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */
+ void *pStress, /* Argument to xStress */
+ PCache *pToInit /* Preallocated space for the PCache */
+);
+
+/* Modify the page-size after the cache has been created. */
+SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *, int);
+
+/* Return the size in bytes of a PCache object. Used to preallocate
+** storage space.
+*/
+SQLITE_PRIVATE int sqlite3PcacheSize(void);
+
+/* One release per successful fetch. Page is pinned until released.
+** Reference counted.
+*/
+SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag);
+SQLITE_PRIVATE int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**);
+SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage);
+SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*);
+
+SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */
+SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */
+SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */
+SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */
+SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache*);
+
+/* Change a page number. Used by incr-vacuum. */
+SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno);
+
+/* Remove all pages with pgno>x. Reset the cache if x==0 */
+SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x);
+
+/* Get a list of all dirty pages in the cache, sorted by page number */
+SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*);
+
+/* Reset and close the cache object */
+SQLITE_PRIVATE void sqlite3PcacheClose(PCache*);
+
+/* Clear flags from pages of the page cache */
+SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *);
+
+/* Discard the contents of the cache */
+SQLITE_PRIVATE void sqlite3PcacheClear(PCache*);
+
+/* Return the total number of outstanding page references */
+SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*);
+
+/* Increment the reference count of an existing page */
+SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*);
+
+SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*);
+
+/* Return the total number of pages stored in the cache */
+SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*);
+
+#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
+/* Iterate through all dirty pages currently stored in the cache. This
+** interface is only available if SQLITE_CHECK_PAGES is defined when the
+** library is built.
+*/
+SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *));
+#endif
+
+#if defined(SQLITE_DEBUG)
+/* Check invariants on a PgHdr object */
+SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*);
+#endif
+
+/* Set and get the suggested cache-size for the specified pager-cache.
+**
+** If no global maximum is configured, then the system attempts to limit
+** the total number of pages cached by purgeable pager-caches to the sum
+** of the suggested cache-sizes.
+*/
+SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int);
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *);
+#endif
+
+/* Set or get the suggested spill-size for the specified pager-cache.
+**
+** The spill-size is the minimum number of pages in cache before the cache
+** will attempt to spill dirty pages by calling xStress.
+*/
+SQLITE_PRIVATE int sqlite3PcacheSetSpillsize(PCache *, int);
+
+/* Free up as much memory as possible from the page cache */
+SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*);
+
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+/* Try to return memory used by the pcache module to the main memory heap */
+SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int);
+#endif
+
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*);
+#endif
+
+SQLITE_PRIVATE void sqlite3PCacheSetDefault(void);
+
+/* Return the header size */
+SQLITE_PRIVATE int sqlite3HeaderSizePcache(void);
+SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void);
+
+/* Number of dirty pages as a percentage of the configured cache size */
+SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache*);
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+SQLITE_PRIVATE int sqlite3PCacheIsDirty(PCache *pCache);
+#endif
+
+#endif /* _PCACHE_H_ */
+
+/************** End of pcache.h **********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include os.h in the middle of sqliteInt.h ********************/
+/************** Begin file os.h **********************************************/
+/*
+** 2001 September 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file (together with is companion C source-code file
+** "os.c") attempt to abstract the underlying operating system so that
+** the SQLite library will work on both POSIX and windows systems.
+**
+** This header file is #include-ed by sqliteInt.h and thus ends up
+** being included by every source file.
+*/
+#ifndef _SQLITE_OS_H_
+#define _SQLITE_OS_H_
+
+/*
+** Attempt to automatically detect the operating system and setup the
+** necessary pre-processor macros for it.
+*/
+/************** Include os_setup.h in the middle of os.h *********************/
+/************** Begin file os_setup.h ****************************************/
+/*
+** 2013 November 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains pre-processor directives related to operating system
+** detection and/or setup.
+*/
+#ifndef SQLITE_OS_SETUP_H
+#define SQLITE_OS_SETUP_H
+
+/*
+** Figure out if we are dealing with Unix, Windows, or some other operating
+** system.
+**
+** After the following block of preprocess macros, all of SQLITE_OS_UNIX,
+** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of
+** the three will be 1. The other two will be 0.
+*/
+#if defined(SQLITE_OS_OTHER)
+# if SQLITE_OS_OTHER==1
+# undef SQLITE_OS_UNIX
+# define SQLITE_OS_UNIX 0
+# undef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+# else
+# undef SQLITE_OS_OTHER
+# endif
+#endif
+#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER)
+# define SQLITE_OS_OTHER 0
+# ifndef SQLITE_OS_WIN
+# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \
+ defined(__MINGW32__) || defined(__BORLANDC__)
+# define SQLITE_OS_WIN 1
+# define SQLITE_OS_UNIX 0
+# else
+# define SQLITE_OS_WIN 0
+# define SQLITE_OS_UNIX 1
+# endif
+# else
+# define SQLITE_OS_UNIX 0
+# endif
+#else
+# ifndef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+# endif
+#endif
+
+#endif /* SQLITE_OS_SETUP_H */
+
+/************** End of os_setup.h ********************************************/
+/************** Continuing where we left off in os.h *************************/
+
+/* If the SET_FULLSYNC macro is not defined above, then make it
+** a no-op
+*/
+#ifndef SET_FULLSYNC
+# define SET_FULLSYNC(x,y)
+#endif
+
+/*
+** The default size of a disk sector
+*/
+#ifndef SQLITE_DEFAULT_SECTOR_SIZE
+# define SQLITE_DEFAULT_SECTOR_SIZE 4096
+#endif
+
+/*
+** Temporary files are named starting with this prefix followed by 16 random
+** alphanumeric characters, and no file extension. They are stored in the
+** OS's standard temporary file directory, and are deleted prior to exit.
+** If sqlite is being embedded in another program, you may wish to change the
+** prefix to reflect your program's name, so that if your program exits
+** prematurely, old temporary files can be easily identified. This can be done
+** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line.
+**
+** 2006-10-31: The default prefix used to be "sqlite_". But then
+** Mcafee started using SQLite in their anti-virus product and it
+** started putting files with the "sqlite" name in the c:/temp folder.
+** This annoyed many windows users. Those users would then do a
+** Google search for "sqlite", find the telephone numbers of the
+** developers and call to wake them up at night and complain.
+** For this reason, the default name prefix is changed to be "sqlite"
+** spelled backwards. So the temp files are still identified, but
+** anybody smart enough to figure out the code is also likely smart
+** enough to know that calling the developer will not help get rid
+** of the file.
+*/
+#ifndef SQLITE_TEMP_FILE_PREFIX
+# define SQLITE_TEMP_FILE_PREFIX "etilqs_"
+#endif
+
+/*
+** The following values may be passed as the second argument to
+** sqlite3OsLock(). The various locks exhibit the following semantics:
+**
+** SHARED: Any number of processes may hold a SHARED lock simultaneously.
+** RESERVED: A single process may hold a RESERVED lock on a file at
+** any time. Other processes may hold and obtain new SHARED locks.
+** PENDING: A single process may hold a PENDING lock on a file at
+** any one time. Existing SHARED locks may persist, but no new
+** SHARED locks may be obtained by other processes.
+** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks.
+**
+** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a
+** process that requests an EXCLUSIVE lock may actually obtain a PENDING
+** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to
+** sqlite3OsLock().
+*/
+#define NO_LOCK 0
+#define SHARED_LOCK 1
+#define RESERVED_LOCK 2
+#define PENDING_LOCK 3
+#define EXCLUSIVE_LOCK 4
+
+/*
+** File Locking Notes: (Mostly about windows but also some info for Unix)
+**
+** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because
+** those functions are not available. So we use only LockFile() and
+** UnlockFile().
+**
+** LockFile() prevents not just writing but also reading by other processes.
+** A SHARED_LOCK is obtained by locking a single randomly-chosen
+** byte out of a specific range of bytes. The lock byte is obtained at
+** random so two separate readers can probably access the file at the
+** same time, unless they are unlucky and choose the same lock byte.
+** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range.
+** There can only be one writer. A RESERVED_LOCK is obtained by locking
+** a single byte of the file that is designated as the reserved lock byte.
+** A PENDING_LOCK is obtained by locking a designated byte different from
+** the RESERVED_LOCK byte.
+**
+** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available,
+** which means we can use reader/writer locks. When reader/writer locks
+** are used, the lock is placed on the same range of bytes that is used
+** for probabilistic locking in Win95/98/ME. Hence, the locking scheme
+** will support two or more Win95 readers or two or more WinNT readers.
+** But a single Win95 reader will lock out all WinNT readers and a single
+** WinNT reader will lock out all other Win95 readers.
+**
+** The following #defines specify the range of bytes used for locking.
+** SHARED_SIZE is the number of bytes available in the pool from which
+** a random byte is selected for a shared lock. The pool of bytes for
+** shared locks begins at SHARED_FIRST.
+**
+** The same locking strategy and
+** byte ranges are used for Unix. This leaves open the possibility of having
+** clients on win95, winNT, and unix all talking to the same shared file
+** and all locking correctly. To do so would require that samba (or whatever
+** tool is being used for file sharing) implements locks correctly between
+** windows and unix. I'm guessing that isn't likely to happen, but by
+** using the same locking range we are at least open to the possibility.
+**
+** Locking in windows is manditory. For this reason, we cannot store
+** actual data in the bytes used for locking. The pager never allocates
+** the pages involved in locking therefore. SHARED_SIZE is selected so
+** that all locks will fit on a single page even at the minimum page size.
+** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE
+** is set high so that we don't have to allocate an unused page except
+** for very large databases. But one should test the page skipping logic
+** by setting PENDING_BYTE low and running the entire regression suite.
+**
+** Changing the value of PENDING_BYTE results in a subtly incompatible
+** file format. Depending on how it is changed, you might not notice
+** the incompatibility right away, even running a full regression test.
+** The default location of PENDING_BYTE is the first byte past the
+** 1GB boundary.
+**
+*/
+#ifdef SQLITE_OMIT_WSD
+# define PENDING_BYTE (0x40000000)
+#else
+# define PENDING_BYTE sqlite3PendingByte
+#endif
+#define RESERVED_BYTE (PENDING_BYTE+1)
+#define SHARED_FIRST (PENDING_BYTE+2)
+#define SHARED_SIZE 510
+
+/*
+** Wrapper around OS specific sqlite3_os_init() function.
+*/
+SQLITE_PRIVATE int sqlite3OsInit(void);
+
+/*
+** Functions for accessing sqlite3_file methods
+*/
+SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*);
+SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset);
+SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset);
+SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size);
+SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize);
+SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut);
+SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*);
+SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*);
+#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0
+SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id);
+SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id);
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **);
+SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int);
+SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id);
+SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int);
+#endif /* SQLITE_OMIT_WAL */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **);
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *);
+
+
+/*
+** Functions for accessing sqlite3_vfs methods
+*/
+SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *);
+SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int);
+SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut);
+SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *);
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *);
+SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *);
+SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void);
+SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *);
+#endif /* SQLITE_OMIT_LOAD_EXTENSION */
+SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *);
+SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int);
+SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*);
+SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*);
+
+/*
+** Convenience functions for opening and closing files using
+** sqlite3_malloc() to obtain space for the file-handle structure.
+*/
+SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*);
+SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *);
+
+#endif /* _SQLITE_OS_H_ */
+
+/************** End of os.h **************************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+/************** Include mutex.h in the middle of sqliteInt.h *****************/
+/************** Begin file mutex.h *******************************************/
+/*
+** 2007 August 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains the common header for all mutex implementations.
+** The sqliteInt.h header #includes this file so that it is available
+** to all source files. We break it out in an effort to keep the code
+** better organized.
+**
+** NOTE: source files should *not* #include this header file directly.
+** Source files should #include the sqliteInt.h file and let that file
+** include this one indirectly.
+*/
+
+
+/*
+** Figure out what version of the code to use. The choices are
+**
+** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The
+** mutexes implementation cannot be overridden
+** at start-time.
+**
+** SQLITE_MUTEX_NOOP For single-threaded applications. No
+** mutual exclusion is provided. But this
+** implementation can be overridden at
+** start-time.
+**
+** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix.
+**
+** SQLITE_MUTEX_W32 For multi-threaded applications on Win32.
+*/
+#if !SQLITE_THREADSAFE
+# define SQLITE_MUTEX_OMIT
+#endif
+#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP)
+# if SQLITE_OS_UNIX
+# define SQLITE_MUTEX_PTHREADS
+# elif SQLITE_OS_WIN
+# define SQLITE_MUTEX_W32
+# else
+# define SQLITE_MUTEX_NOOP
+# endif
+#endif
+
+#ifdef SQLITE_MUTEX_OMIT
+/*
+** If this is a no-op implementation, implement everything as macros.
+*/
+#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8)
+#define sqlite3_mutex_free(X)
+#define sqlite3_mutex_enter(X)
+#define sqlite3_mutex_try(X) SQLITE_OK
+#define sqlite3_mutex_leave(X)
+#define sqlite3_mutex_held(X) ((void)(X),1)
+#define sqlite3_mutex_notheld(X) ((void)(X),1)
+#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8)
+#define sqlite3MutexInit() SQLITE_OK
+#define sqlite3MutexEnd()
+#define MUTEX_LOGIC(X)
+#else
+#define MUTEX_LOGIC(X) X
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+#endif /* defined(SQLITE_MUTEX_OMIT) */
+
+/************** End of mutex.h ***********************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
+
+/* The SQLITE_EXTRA_DURABLE compile-time option used to set the default
+** synchronous setting to EXTRA. It is no longer supported.
+*/
+#ifdef SQLITE_EXTRA_DURABLE
+# warning Use SQLITE_DEFAULT_SYNCHRONOUS=3 instead of SQLITE_EXTRA_DURABLE
+# define SQLITE_DEFAULT_SYNCHRONOUS 3
+#endif
+
+/*
+** Default synchronous levels.
+**
+** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ
+** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
+**
+** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS
+** OFF 1 0
+** NORMAL 2 1
+** FULL 3 2
+** EXTRA 4 3
+**
+** The "PRAGMA synchronous" statement also uses the zero-based numbers.
+** In other words, the zero-based numbers are used for all external interfaces
+** and the one-based values are used internally.
+*/
+#ifndef SQLITE_DEFAULT_SYNCHRONOUS
+# define SQLITE_DEFAULT_SYNCHRONOUS 2
+#endif
+#ifndef SQLITE_DEFAULT_WAL_SYNCHRONOUS
+# define SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_SYNCHRONOUS
+#endif
+
+/*
+** Each database file to be accessed by the system is an instance
+** of the following structure. There are normally two of these structures
+** in the sqlite.aDb[] array. aDb[0] is the main database file and
+** aDb[1] is the database file used to hold temporary tables. Additional
+** databases may be attached.
+*/
+struct Db {
+ char *zDbSName; /* Name of this database. (schema name, not filename) */
+ Btree *pBt; /* The B*Tree structure for this database file */
+ u8 safety_level; /* How aggressive at syncing data to disk */
+ u8 bSyncSet; /* True if "PRAGMA synchronous=N" has been run */
+ Schema *pSchema; /* Pointer to database schema (possibly shared) */
+};
+
+/*
+** An instance of the following structure stores a database schema.
+**
+** Most Schema objects are associated with a Btree. The exception is
+** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing.
+** In shared cache mode, a single Schema object can be shared by multiple
+** Btrees that refer to the same underlying BtShared object.
+**
+** Schema objects are automatically deallocated when the last Btree that
+** references them is destroyed. The TEMP Schema is manually freed by
+** sqlite3_close().
+*
+** A thread must be holding a mutex on the corresponding Btree in order
+** to access Schema content. This implies that the thread must also be
+** holding a mutex on the sqlite3 connection pointer that owns the Btree.
+** For a TEMP Schema, only the connection mutex is required.
+*/
+struct Schema {
+ int schema_cookie; /* Database schema version number for this file */
+ int iGeneration; /* Generation counter. Incremented with each change */
+ Hash tblHash; /* All tables indexed by name */
+ Hash idxHash; /* All (named) indices indexed by name */
+ Hash trigHash; /* All triggers indexed by name */
+ Hash fkeyHash; /* All foreign keys by referenced table name */
+ Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */
+ u8 file_format; /* Schema format version for this file */
+ u8 enc; /* Text encoding used by this database */
+ u16 schemaFlags; /* Flags associated with this schema */
+ int cache_size; /* Number of pages to use in the cache */
+};
+
+/*
+** These macros can be used to test, set, or clear bits in the
+** Db.pSchema->flags field.
+*/
+#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))==(P))
+#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))!=0)
+#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags|=(P)
+#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags&=~(P)
+
+/*
+** Allowed values for the DB.pSchema->flags field.
+**
+** The DB_SchemaLoaded flag is set after the database schema has been
+** read into internal hash tables.
+**
+** DB_UnresetViews means that one or more views have column names that
+** have been filled out. If the schema changes, these column names might
+** changes and so the view will need to be reset.
+*/
+#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */
+#define DB_UnresetViews 0x0002 /* Some views have defined column names */
+#define DB_ResetWanted 0x0008 /* Reset the schema when nSchemaLock==0 */
+
+/*
+** The number of different kinds of things that can be limited
+** using the sqlite3_limit() interface.
+*/
+#define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1)
+
+/*
+** Lookaside malloc is a set of fixed-size buffers that can be used
+** to satisfy small transient memory allocation requests for objects
+** associated with a particular database connection. The use of
+** lookaside malloc provides a significant performance enhancement
+** (approx 10%) by avoiding numerous malloc/free requests while parsing
+** SQL statements.
+**
+** The Lookaside structure holds configuration information about the
+** lookaside malloc subsystem. Each available memory allocation in
+** the lookaside subsystem is stored on a linked list of LookasideSlot
+** objects.
+**
+** Lookaside allocations are only allowed for objects that are associated
+** with a particular database connection. Hence, schema information cannot
+** be stored in lookaside because in shared cache mode the schema information
+** is shared by multiple database connections. Therefore, while parsing
+** schema information, the Lookaside.bEnabled flag is cleared so that
+** lookaside allocations are not used to construct the schema objects.
+**
+** New lookaside allocations are only allowed if bDisable==0. When
+** bDisable is greater than zero, sz is set to zero which effectively
+** disables lookaside without adding a new test for the bDisable flag
+** in a performance-critical path. sz should be set by to szTrue whenever
+** bDisable changes back to zero.
+**
+** Lookaside buffers are initially held on the pInit list. As they are
+** used and freed, they are added back to the pFree list. New allocations
+** come off of pFree first, then pInit as a fallback. This dual-list
+** allows use to compute a high-water mark - the maximum number of allocations
+** outstanding at any point in the past - by subtracting the number of
+** allocations on the pInit list from the total number of allocations.
+**
+** Enhancement on 2019-12-12: Two-size-lookaside
+** The default lookaside configuration is 100 slots of 1200 bytes each.
+** The larger slot sizes are important for performance, but they waste
+** a lot of space, as most lookaside allocations are less than 128 bytes.
+** The two-size-lookaside enhancement breaks up the lookaside allocation
+** into two pools: One of 128-byte slots and the other of the default size
+** (1200-byte) slots. Allocations are filled from the small-pool first,
+** failing over to the full-size pool if that does not work. Thus more
+** lookaside slots are available while also using less memory.
+** This enhancement can be omitted by compiling with
+** SQLITE_OMIT_TWOSIZE_LOOKASIDE.
+*/
+struct Lookaside {
+ u32 bDisable; /* Only operate the lookaside when zero */
+ u16 sz; /* Size of each buffer in bytes */
+ u16 szTrue; /* True value of sz, even if disabled */
+ u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */
+ u32 nSlot; /* Number of lookaside slots allocated */
+ u32 anStat[3]; /* 0: hits. 1: size misses. 2: full misses */
+ LookasideSlot *pInit; /* List of buffers not previously used */
+ LookasideSlot *pFree; /* List of available buffers */
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ LookasideSlot *pSmallInit; /* List of small buffers not prediously used */
+ LookasideSlot *pSmallFree; /* List of available small buffers */
+ void *pMiddle; /* First byte past end of full-size buffers and
+ ** the first byte of LOOKASIDE_SMALL buffers */
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ void *pStart; /* First byte of available memory space */
+ void *pEnd; /* First byte past end of available space */
+};
+struct LookasideSlot {
+ LookasideSlot *pNext; /* Next buffer in the list of free buffers */
+};
+
+#define DisableLookaside db->lookaside.bDisable++;db->lookaside.sz=0
+#define EnableLookaside db->lookaside.bDisable--;\
+ db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue
+
+/* Size of the smaller allocations in two-size lookside */
+#ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+# define LOOKASIDE_SMALL 0
+#else
+# define LOOKASIDE_SMALL 128
+#endif
+
+/*
+** A hash table for built-in function definitions. (Application-defined
+** functions use a regular table table from hash.h.)
+**
+** Hash each FuncDef structure into one of the FuncDefHash.a[] slots.
+** Collisions are on the FuncDef.u.pHash chain. Use the SQLITE_FUNC_HASH()
+** macro to compute a hash on the function name.
+*/
+#define SQLITE_FUNC_HASH_SZ 23
+struct FuncDefHash {
+ FuncDef *a[SQLITE_FUNC_HASH_SZ]; /* Hash table for functions */
+};
+#define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ)
+
+#ifdef SQLITE_USER_AUTHENTICATION
+/*
+** Information held in the "sqlite3" database connection object and used
+** to manage user authentication.
+*/
+typedef struct sqlite3_userauth sqlite3_userauth;
+struct sqlite3_userauth {
+ u8 authLevel; /* Current authentication level */
+ int nAuthPW; /* Size of the zAuthPW in bytes */
+ char *zAuthPW; /* Password used to authenticate */
+ char *zAuthUser; /* User name used to authenticate */
+};
+
+/* Allowed values for sqlite3_userauth.authLevel */
+#define UAUTH_Unknown 0 /* Authentication not yet checked */
+#define UAUTH_Fail 1 /* User authentication failed */
+#define UAUTH_User 2 /* Authenticated as a normal user */
+#define UAUTH_Admin 3 /* Authenticated as an administrator */
+
+/* Functions used only by user authorization logic */
+SQLITE_PRIVATE int sqlite3UserAuthTable(const char*);
+SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*);
+SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*);
+SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**);
+
+#endif /* SQLITE_USER_AUTHENTICATION */
+
+/*
+** typedef for the authorization callback function.
+*/
+#ifdef SQLITE_USER_AUTHENTICATION
+ typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
+ const char*, const char*);
+#else
+ typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
+ const char*);
+#endif
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing
+** in the style of sqlite3_trace()
+*/
+#define SQLITE_TRACE_LEGACY 0x40 /* Use the legacy xTrace */
+#define SQLITE_TRACE_XPROFILE 0x80 /* Use the legacy xProfile */
+#else
+#define SQLITE_TRACE_LEGACY 0
+#define SQLITE_TRACE_XPROFILE 0
+#endif /* SQLITE_OMIT_DEPRECATED */
+#define SQLITE_TRACE_NONLEGACY_MASK 0x0f /* Normal flags */
+
+
+/*
+** Each database connection is an instance of the following structure.
+*/
+struct sqlite3 {
+ sqlite3_vfs *pVfs; /* OS Interface */
+ struct Vdbe *pVdbe; /* List of active virtual machines */
+ CollSeq *pDfltColl; /* BINARY collseq for the database encoding */
+ sqlite3_mutex *mutex; /* Connection mutex */
+ Db *aDb; /* All backends */
+ int nDb; /* Number of backends currently in use */
+ u32 mDbFlags; /* flags recording internal state */
+ u64 flags; /* flags settable by pragmas. See below */
+ i64 lastRowid; /* ROWID of most recent insert (see above) */
+ i64 szMmap; /* Default mmap_size setting */
+ u32 nSchemaLock; /* Do not reset the schema when non-zero */
+ unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */
+ int errCode; /* Most recent error code (SQLITE_*) */
+ int errMask; /* & result codes with this before returning */
+ int iSysErrno; /* Errno value from last system error */
+ u16 dbOptFlags; /* Flags to enable/disable optimizations */
+ u8 enc; /* Text encoding */
+ u8 autoCommit; /* The auto-commit flag. */
+ u8 temp_store; /* 1: file 2: memory 0: default */
+ u8 mallocFailed; /* True if we have seen a malloc failure */
+ u8 bBenignMalloc; /* Do not require OOMs if true */
+ u8 dfltLockMode; /* Default locking-mode for attached dbs */
+ signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */
+ u8 suppressErr; /* Do not issue error messages if true */
+ u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */
+ u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */
+ u8 mTrace; /* zero or more SQLITE_TRACE flags */
+ u8 noSharedCache; /* True if no shared-cache backends */
+ u8 nSqlExec; /* Number of pending OP_SqlExec opcodes */
+ int nextPagesize; /* Pagesize after VACUUM if >0 */
+ u32 magic; /* Magic number for detect library misuse */
+ int nChange; /* Value returned by sqlite3_changes() */
+ int nTotalChange; /* Value returned by sqlite3_total_changes() */
+ int aLimit[SQLITE_N_LIMIT]; /* Limits */
+ int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */
+ struct sqlite3InitInfo { /* Information used during initialization */
+ Pgno newTnum; /* Rootpage of table being initialized */
+ u8 iDb; /* Which db file is being initialized */
+ u8 busy; /* TRUE if currently initializing */
+ unsigned orphanTrigger : 1; /* Last statement is orphaned TEMP trigger */
+ unsigned imposterTable : 1; /* Building an imposter table */
+ unsigned reopenMemdb : 1; /* ATTACH is really a reopen using MemDB */
+ char **azInit; /* "type", "name", and "tbl_name" columns */
+ } init;
+ int nVdbeActive; /* Number of VDBEs currently running */
+ int nVdbeRead; /* Number of active VDBEs that read or write */
+ int nVdbeWrite; /* Number of active VDBEs that read and write */
+ int nVdbeExec; /* Number of nested calls to VdbeExec() */
+ int nVDestroy; /* Number of active OP_VDestroy operations */
+ int nExtension; /* Number of loaded extensions */
+ void **aExtension; /* Array of shared library handles */
+ union {
+ void (*xLegacy)(void*,const char*); /* Legacy trace function */
+ int (*xV2)(u32,void*,void*,void*); /* V2 Trace function */
+ } trace;
+ void *pTraceArg; /* Argument to the trace function */
+#ifndef SQLITE_OMIT_DEPRECATED
+ void (*xProfile)(void*,const char*,u64); /* Profiling function */
+ void *pProfileArg; /* Argument to profile function */
+#endif
+ void *pCommitArg; /* Argument to xCommitCallback() */
+ int (*xCommitCallback)(void*); /* Invoked at every commit. */
+ void *pRollbackArg; /* Argument to xRollbackCallback() */
+ void (*xRollbackCallback)(void*); /* Invoked at every commit. */
+ void *pUpdateArg;
+ void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64);
+ Parse *pParse; /* Current parse */
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ void *pPreUpdateArg; /* First argument to xPreUpdateCallback */
+ void (*xPreUpdateCallback)( /* Registered using sqlite3_preupdate_hook() */
+ void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64
+ );
+ PreUpdate *pPreUpdate; /* Context for active pre-update callback */
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+#ifndef SQLITE_OMIT_WAL
+ int (*xWalCallback)(void *, sqlite3 *, const char *, int);
+ void *pWalArg;
+#endif
+ void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*);
+ void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*);
+ void *pCollNeededArg;
+ sqlite3_value *pErr; /* Most recent error message */
+ union {
+ volatile int isInterrupted; /* True if sqlite3_interrupt has been called */
+ double notUsed1; /* Spacer */
+ } u1;
+ Lookaside lookaside; /* Lookaside malloc configuration */
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth; /* Access authorization function */
+ void *pAuthArg; /* 1st argument to the access auth function */
+#endif
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ int (*xProgress)(void *); /* The progress callback */
+ void *pProgressArg; /* Argument to the progress callback */
+ unsigned nProgressOps; /* Number of opcodes for progress callback */
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ int nVTrans; /* Allocated size of aVTrans */
+ Hash aModule; /* populated by sqlite3_create_module() */
+ VtabCtx *pVtabCtx; /* Context for active vtab connect/create */
+ VTable **aVTrans; /* Virtual tables with open transactions */
+ VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */
+#endif
+ Hash aFunc; /* Hash table of connection functions */
+ Hash aCollSeq; /* All collating sequences */
+ BusyHandler busyHandler; /* Busy callback */
+ Db aDbStatic[2]; /* Static space for the 2 default backends */
+ Savepoint *pSavepoint; /* List of active savepoints */
+ int nAnalysisLimit; /* Number of index rows to ANALYZE */
+ int busyTimeout; /* Busy handler timeout, in msec */
+ int nSavepoint; /* Number of non-transaction savepoints */
+ int nStatement; /* Number of nested statement-transactions */
+ i64 nDeferredCons; /* Net deferred constraints this transaction. */
+ i64 nDeferredImmCons; /* Net deferred immediate constraints */
+ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */
+#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
+ /* The following variables are all protected by the STATIC_MAIN
+ ** mutex, not by sqlite3.mutex. They are used by code in notify.c.
+ **
+ ** When X.pUnlockConnection==Y, that means that X is waiting for Y to
+ ** unlock so that it can proceed.
+ **
+ ** When X.pBlockingConnection==Y, that means that something that X tried
+ ** tried to do recently failed with an SQLITE_LOCKED error due to locks
+ ** held by Y.
+ */
+ sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */
+ sqlite3 *pUnlockConnection; /* Connection to watch for unlock */
+ void *pUnlockArg; /* Argument to xUnlockNotify */
+ void (*xUnlockNotify)(void **, int); /* Unlock notify callback */
+ sqlite3 *pNextBlocked; /* Next in list of all blocked connections */
+#endif
+#ifdef SQLITE_USER_AUTHENTICATION
+ sqlite3_userauth auth; /* User authentication information */
+#endif
+};
+
+/*
+** A macro to discover the encoding of a database.
+*/
+#define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc)
+#define ENC(db) ((db)->enc)
+
+/*
+** A u64 constant where the lower 32 bits are all zeros. Only the
+** upper 32 bits are included in the argument. Necessary because some
+** C-compilers still do not accept LL integer literals.
+*/
+#define HI(X) ((u64)(X)<<32)
+
+/*
+** Possible values for the sqlite3.flags.
+**
+** Value constraints (enforced via assert()):
+** SQLITE_FullFSync == PAGER_FULLFSYNC
+** SQLITE_CkptFullFSync == PAGER_CKPT_FULLFSYNC
+** SQLITE_CacheSpill == PAGER_CACHE_SPILL
+*/
+#define SQLITE_WriteSchema 0x00000001 /* OK to update SQLITE_SCHEMA */
+#define SQLITE_LegacyFileFmt 0x00000002 /* Create new databases in format 1 */
+#define SQLITE_FullColNames 0x00000004 /* Show full column names on SELECT */
+#define SQLITE_FullFSync 0x00000008 /* Use full fsync on the backend */
+#define SQLITE_CkptFullFSync 0x00000010 /* Use full fsync for checkpoint */
+#define SQLITE_CacheSpill 0x00000020 /* OK to spill pager cache */
+#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */
+#define SQLITE_TrustedSchema 0x00000080 /* Allow unsafe functions and
+ ** vtabs in the schema definition */
+#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */
+ /* result set is empty */
+#define SQLITE_IgnoreChecks 0x00000200 /* Do not enforce check constraints */
+#define SQLITE_ReadUncommit 0x00000400 /* READ UNCOMMITTED in shared-cache */
+#define SQLITE_NoCkptOnClose 0x00000800 /* No checkpoint on close()/DETACH */
+#define SQLITE_ReverseOrder 0x00001000 /* Reverse unordered SELECTs */
+#define SQLITE_RecTriggers 0x00002000 /* Enable recursive triggers */
+#define SQLITE_ForeignKeys 0x00004000 /* Enforce foreign key constraints */
+#define SQLITE_AutoIndex 0x00008000 /* Enable automatic indexes */
+#define SQLITE_LoadExtension 0x00010000 /* Enable load_extension */
+#define SQLITE_LoadExtFunc 0x00020000 /* Enable load_extension() SQL func */
+#define SQLITE_EnableTrigger 0x00040000 /* True to enable triggers */
+#define SQLITE_DeferFKs 0x00080000 /* Defer all FK constraints */
+#define SQLITE_QueryOnly 0x00100000 /* Disable database changes */
+#define SQLITE_CellSizeCk 0x00200000 /* Check btree cell sizes on load */
+#define SQLITE_Fts3Tokenizer 0x00400000 /* Enable fts3_tokenizer(2) */
+#define SQLITE_EnableQPSG 0x00800000 /* Query Planner Stability Guarantee*/
+#define SQLITE_TriggerEQP 0x01000000 /* Show trigger EXPLAIN QUERY PLAN */
+#define SQLITE_ResetDatabase 0x02000000 /* Reset the database */
+#define SQLITE_LegacyAlter 0x04000000 /* Legacy ALTER TABLE behaviour */
+#define SQLITE_NoSchemaError 0x08000000 /* Do not report schema parse errors*/
+#define SQLITE_Defensive 0x10000000 /* Input SQL is likely hostile */
+#define SQLITE_DqsDDL 0x20000000 /* dbl-quoted strings allowed in DDL*/
+#define SQLITE_DqsDML 0x40000000 /* dbl-quoted strings allowed in DML*/
+#define SQLITE_EnableView 0x80000000 /* Enable the use of views */
+#define SQLITE_CountRows HI(0x00001) /* Count rows changed by INSERT, */
+ /* DELETE, or UPDATE and return */
+ /* the count using a callback. */
+
+/* Flags used only if debugging */
+#ifdef SQLITE_DEBUG
+#define SQLITE_SqlTrace HI(0x0100000) /* Debug print SQL as it executes */
+#define SQLITE_VdbeListing HI(0x0200000) /* Debug listings of VDBE progs */
+#define SQLITE_VdbeTrace HI(0x0400000) /* True to trace VDBE execution */
+#define SQLITE_VdbeAddopTrace HI(0x0800000) /* Trace sqlite3VdbeAddOp() calls */
+#define SQLITE_VdbeEQP HI(0x1000000) /* Debug EXPLAIN QUERY PLAN */
+#define SQLITE_ParserTrace HI(0x2000000) /* PRAGMA parser_trace=ON */
+#endif
+
+/*
+** Allowed values for sqlite3.mDbFlags
+*/
+#define DBFLAG_SchemaChange 0x0001 /* Uncommitted Hash table changes */
+#define DBFLAG_PreferBuiltin 0x0002 /* Preference to built-in funcs */
+#define DBFLAG_Vacuum 0x0004 /* Currently in a VACUUM */
+#define DBFLAG_VacuumInto 0x0008 /* Currently running VACUUM INTO */
+#define DBFLAG_SchemaKnownOk 0x0010 /* Schema is known to be valid */
+#define DBFLAG_InternalFunc 0x0020 /* Allow use of internal functions */
+#define DBFLAG_EncodingFixed 0x0040 /* No longer possible to change enc. */
+
+/*
+** Bits of the sqlite3.dbOptFlags field that are used by the
+** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to
+** selectively disable various optimizations.
+*/
+#define SQLITE_QueryFlattener 0x0001 /* Query flattening */
+#define SQLITE_WindowFunc 0x0002 /* Use xInverse for window functions */
+#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */
+#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */
+#define SQLITE_DistinctOpt 0x0010 /* DISTINCT using indexes */
+#define SQLITE_CoverIdxScan 0x0020 /* Covering index scans */
+#define SQLITE_OrderByIdxJoin 0x0040 /* ORDER BY of joins via index */
+#define SQLITE_Transitive 0x0080 /* Transitive constraints */
+#define SQLITE_OmitNoopJoin 0x0100 /* Omit unused tables in joins */
+#define SQLITE_CountOfView 0x0200 /* The count-of-view optimization */
+#define SQLITE_CursorHints 0x0400 /* Add OP_CursorHint opcodes */
+#define SQLITE_Stat4 0x0800 /* Use STAT4 data */
+ /* TH3 expects the Stat4 ^^^^^^ value to be 0x0800. Don't change it */
+#define SQLITE_PushDown 0x1000 /* The push-down optimization */
+#define SQLITE_SimplifyJoin 0x2000 /* Convert LEFT JOIN to JOIN */
+#define SQLITE_SkipScan 0x4000 /* Skip-scans */
+#define SQLITE_PropagateConst 0x8000 /* The constant propagation opt */
+#define SQLITE_AllOpts 0xffff /* All optimizations */
+
+/*
+** Macros for testing whether or not optimizations are enabled or disabled.
+*/
+#define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0)
+#define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0)
+
+/*
+** Return true if it OK to factor constant expressions into the initialization
+** code. The argument is a Parse object for the code generator.
+*/
+#define ConstFactorOk(P) ((P)->okConstFactor)
+
+/*
+** Possible values for the sqlite.magic field.
+** The numbers are obtained at random and have no special meaning, other
+** than being distinct from one another.
+*/
+#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */
+#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */
+#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */
+#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */
+#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */
+#define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */
+
+/*
+** Each SQL function is defined by an instance of the following
+** structure. For global built-in functions (ex: substr(), max(), count())
+** a pointer to this structure is held in the sqlite3BuiltinFunctions object.
+** For per-connection application-defined functions, a pointer to this
+** structure is held in the db->aHash hash table.
+**
+** The u.pHash field is used by the global built-ins. The u.pDestructor
+** field is used by per-connection app-def functions.
+*/
+struct FuncDef {
+ i8 nArg; /* Number of arguments. -1 means unlimited */
+ u32 funcFlags; /* Some combination of SQLITE_FUNC_* */
+ void *pUserData; /* User data parameter */
+ FuncDef *pNext; /* Next function with same name */
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value**); /* func or agg-step */
+ void (*xFinalize)(sqlite3_context*); /* Agg finalizer */
+ void (*xValue)(sqlite3_context*); /* Current agg value */
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value**); /* inverse agg-step */
+ const char *zName; /* SQL name of the function. */
+ union {
+ FuncDef *pHash; /* Next with a different name but the same hash */
+ FuncDestructor *pDestructor; /* Reference counted destructor function */
+ } u;
+};
+
+/*
+** This structure encapsulates a user-function destructor callback (as
+** configured using create_function_v2()) and a reference counter. When
+** create_function_v2() is called to create a function with a destructor,
+** a single object of this type is allocated. FuncDestructor.nRef is set to
+** the number of FuncDef objects created (either 1 or 3, depending on whether
+** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor
+** member of each of the new FuncDef objects is set to point to the allocated
+** FuncDestructor.
+**
+** Thereafter, when one of the FuncDef objects is deleted, the reference
+** count on this object is decremented. When it reaches 0, the destructor
+** is invoked and the FuncDestructor structure freed.
+*/
+struct FuncDestructor {
+ int nRef;
+ void (*xDestroy)(void *);
+ void *pUserData;
+};
+
+/*
+** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF
+** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. And
+** SQLITE_FUNC_CONSTANT must be the same as SQLITE_DETERMINISTIC. There
+** are assert() statements in the code to verify this.
+**
+** Value constraints (enforced via assert()):
+** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg
+** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG
+** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
+** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
+** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API
+** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS
+** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API
+*/
+#define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */
+#define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */
+#define SQLITE_FUNC_CASE 0x0008 /* Case-sensitive LIKE-type function */
+#define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */
+#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/
+#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */
+#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */
+#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */
+/* 0x0200 -- available for reuse */
+#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */
+#define SQLITE_FUNC_CONSTANT 0x0800 /* Constant inputs give a constant output */
+#define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */
+#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
+ ** single query - might change over time */
+#define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */
+#define SQLITE_FUNC_OFFSET 0x8000 /* Built-in sqlite_offset() function */
+#define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */
+#define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */
+#define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */
+#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */
+#define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */
+#define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */
+
+/* Identifier numbers for each in-line function */
+#define INLINEFUNC_coalesce 0
+#define INLINEFUNC_implies_nonnull_row 1
+#define INLINEFUNC_expr_implies_expr 2
+#define INLINEFUNC_expr_compare 3
+#define INLINEFUNC_affinity 4
+#define INLINEFUNC_iif 5
+#define INLINEFUNC_unlikely 99 /* Default case */
+
+/*
+** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
+** used to create the initializers for the FuncDef structures.
+**
+** FUNCTION(zName, nArg, iArg, bNC, xFunc)
+** Used to create a scalar function definition of a function zName
+** implemented by C function xFunc that accepts nArg arguments. The
+** value passed as iArg is cast to a (void*) and made available
+** as the user-data (sqlite3_user_data()) for the function. If
+** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set.
+**
+** VFUNCTION(zName, nArg, iArg, bNC, xFunc)
+** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag.
+**
+** SFUNCTION(zName, nArg, iArg, bNC, xFunc)
+** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
+** adds the SQLITE_DIRECTONLY flag.
+**
+** INLINE_FUNC(zName, nArg, iFuncId, mFlags)
+** zName is the name of a function that is implemented by in-line
+** byte code rather than by the usual callbacks. The iFuncId
+** parameter determines the function id. The mFlags parameter is
+** optional SQLITE_FUNC_ flags for this function.
+**
+** TEST_FUNC(zName, nArg, iFuncId, mFlags)
+** zName is the name of a test-only function implemented by in-line
+** byte code rather than by the usual callbacks. The iFuncId
+** parameter determines the function id. The mFlags parameter is
+** optional SQLITE_FUNC_ flags for this function.
+**
+** DFUNCTION(zName, nArg, iArg, bNC, xFunc)
+** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
+** adds the SQLITE_FUNC_SLOCHNG flag. Used for date & time functions
+** and functions like sqlite_version() that can change, but not during
+** a single query. The iArg is ignored. The user-data is always set
+** to a NULL pointer. The bNC parameter is not used.
+**
+** PURE_DATE(zName, nArg, iArg, bNC, xFunc)
+** Used for "pure" date/time functions, this macro is like DFUNCTION
+** except that it does set the SQLITE_FUNC_CONSTANT flags. iArg is
+** ignored and the user-data for these functions is set to an
+** arbitrary non-NULL pointer. The bNC parameter is not used.
+**
+** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal)
+** Used to create an aggregate function definition implemented by
+** the C functions xStep and xFinal. The first four parameters
+** are interpreted in the same way as the first 4 parameters to
+** FUNCTION().
+**
+** WFUNCTION(zName, nArg, iArg, xStep, xFinal, xValue, xInverse)
+** Used to create an aggregate function definition implemented by
+** the C functions xStep and xFinal. The first four parameters
+** are interpreted in the same way as the first 4 parameters to
+** FUNCTION().
+**
+** LIKEFUNC(zName, nArg, pArg, flags)
+** Used to create a scalar function definition of a function zName
+** that accepts nArg arguments and is implemented by a call to C
+** function likeFunc. Argument pArg is cast to a (void *) and made
+** available as the function user-data (sqlite3_user_data()). The
+** FuncDef.flags variable is set to the value passed as the flags
+** parameter.
+*/
+#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \
+ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \
+ {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define SFUNCTION(zName, nArg, iArg, bNC, xFunc) \
+ {nArg, SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define INLINE_FUNC(zName, nArg, iArg, mFlags) \
+ {nArg, SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \
+ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} }
+#define TEST_FUNC(zName, nArg, iArg, mFlags) \
+ {nArg, SQLITE_UTF8|SQLITE_FUNC_INTERNAL|SQLITE_FUNC_TEST| \
+ SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \
+ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} }
+#define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \
+ {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8, \
+ 0, 0, xFunc, 0, 0, 0, #zName, {0} }
+#define PURE_DATE(zName, nArg, iArg, bNC, xFunc) \
+ {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \
+ (void*)&sqlite3Config, 0, xFunc, 0, 0, 0, #zName, {0} }
+#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \
+ {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \
+ {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
+ pArg, 0, xFunc, 0, 0, 0, #zName, }
+#define LIKEFUNC(zName, nArg, arg, flags) \
+ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \
+ (void *)arg, 0, likeFunc, 0, 0, 0, #zName, {0} }
+#define WAGGREGATE(zName, nArg, arg, nc, xStep, xFinal, xValue, xInverse, f) \
+ {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|f, \
+ SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,xValue,xInverse,#zName, {0}}
+#define INTERNAL_FUNCTION(zName, nArg, xFunc) \
+ {nArg, SQLITE_FUNC_INTERNAL|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \
+ 0, 0, xFunc, 0, 0, 0, #zName, {0} }
+
+
+/*
+** All current savepoints are stored in a linked list starting at
+** sqlite3.pSavepoint. The first element in the list is the most recently
+** opened savepoint. Savepoints are added to the list by the vdbe
+** OP_Savepoint instruction.
+*/
+struct Savepoint {
+ char *zName; /* Savepoint name (nul-terminated) */
+ i64 nDeferredCons; /* Number of deferred fk violations */
+ i64 nDeferredImmCons; /* Number of deferred imm fk. */
+ Savepoint *pNext; /* Parent savepoint (if any) */
+};
+
+/*
+** The following are used as the second parameter to sqlite3Savepoint(),
+** and as the P1 argument to the OP_Savepoint instruction.
+*/
+#define SAVEPOINT_BEGIN 0
+#define SAVEPOINT_RELEASE 1
+#define SAVEPOINT_ROLLBACK 2
+
+
+/*
+** Each SQLite module (virtual table definition) is defined by an
+** instance of the following structure, stored in the sqlite3.aModule
+** hash table.
+*/
+struct Module {
+ const sqlite3_module *pModule; /* Callback pointers */
+ const char *zName; /* Name passed to create_module() */
+ int nRefModule; /* Number of pointers to this object */
+ void *pAux; /* pAux passed to create_module() */
+ void (*xDestroy)(void *); /* Module destructor function */
+ Table *pEpoTab; /* Eponymous table for this module */
+};
+
+/*
+** Information about each column of an SQL table is held in an instance
+** of the Column structure, in the Table.aCol[] array.
+**
+** Definitions:
+**
+** "table column index" This is the index of the column in the
+** Table.aCol[] array, and also the index of
+** the column in the original CREATE TABLE stmt.
+**
+** "storage column index" This is the index of the column in the
+** record BLOB generated by the OP_MakeRecord
+** opcode. The storage column index is less than
+** or equal to the table column index. It is
+** equal if and only if there are no VIRTUAL
+** columns to the left.
+*/
+struct Column {
+ char *zName; /* Name of this column, \000, then the type */
+ Expr *pDflt; /* Default value or GENERATED ALWAYS AS value */
+ char *zColl; /* Collating sequence. If NULL, use the default */
+ u8 notNull; /* An OE_ code for handling a NOT NULL constraint */
+ char affinity; /* One of the SQLITE_AFF_... values */
+ u8 szEst; /* Estimated size of value in this column. sizeof(INT)==1 */
+ u8 hName; /* Column name hash for faster lookup */
+ u16 colFlags; /* Boolean properties. See COLFLAG_ defines below */
+};
+
+/* Allowed values for Column.colFlags:
+*/
+#define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */
+#define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */
+#define COLFLAG_HASTYPE 0x0004 /* Type name follows column name */
+#define COLFLAG_UNIQUE 0x0008 /* Column def contains "UNIQUE" or "PK" */
+#define COLFLAG_SORTERREF 0x0010 /* Use sorter-refs with this column */
+#define COLFLAG_VIRTUAL 0x0020 /* GENERATED ALWAYS AS ... VIRTUAL */
+#define COLFLAG_STORED 0x0040 /* GENERATED ALWAYS AS ... STORED */
+#define COLFLAG_NOTAVAIL 0x0080 /* STORED column not yet calculated */
+#define COLFLAG_BUSY 0x0100 /* Blocks recursion on GENERATED columns */
+#define COLFLAG_GENERATED 0x0060 /* Combo: _STORED, _VIRTUAL */
+#define COLFLAG_NOINSERT 0x0062 /* Combo: _HIDDEN, _STORED, _VIRTUAL */
+
+/*
+** A "Collating Sequence" is defined by an instance of the following
+** structure. Conceptually, a collating sequence consists of a name and
+** a comparison routine that defines the order of that sequence.
+**
+** If CollSeq.xCmp is NULL, it means that the
+** collating sequence is undefined. Indices built on an undefined
+** collating sequence may not be read or written.
+*/
+struct CollSeq {
+ char *zName; /* Name of the collating sequence, UTF-8 encoded */
+ u8 enc; /* Text encoding handled by xCmp() */
+ void *pUser; /* First argument to xCmp() */
+ int (*xCmp)(void*,int, const void*, int, const void*);
+ void (*xDel)(void*); /* Destructor for pUser */
+};
+
+/*
+** A sort order can be either ASC or DESC.
+*/
+#define SQLITE_SO_ASC 0 /* Sort in ascending order */
+#define SQLITE_SO_DESC 1 /* Sort in ascending order */
+#define SQLITE_SO_UNDEFINED -1 /* No sort order specified */
+
+/*
+** Column affinity types.
+**
+** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and
+** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve
+** the speed a little by numbering the values consecutively.
+**
+** But rather than start with 0 or 1, we begin with 'A'. That way,
+** when multiple affinity types are concatenated into a string and
+** used as the P4 operand, they will be more readable.
+**
+** Note also that the numeric types are grouped together so that testing
+** for a numeric type is a single comparison. And the BLOB type is first.
+*/
+#define SQLITE_AFF_NONE 0x40 /* '@' */
+#define SQLITE_AFF_BLOB 0x41 /* 'A' */
+#define SQLITE_AFF_TEXT 0x42 /* 'B' */
+#define SQLITE_AFF_NUMERIC 0x43 /* 'C' */
+#define SQLITE_AFF_INTEGER 0x44 /* 'D' */
+#define SQLITE_AFF_REAL 0x45 /* 'E' */
+
+#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)
+
+/*
+** The SQLITE_AFF_MASK values masks off the significant bits of an
+** affinity value.
+*/
+#define SQLITE_AFF_MASK 0x47
+
+/*
+** Additional bit values that can be ORed with an affinity without
+** changing the affinity.
+**
+** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL.
+** It causes an assert() to fire if either operand to a comparison
+** operator is NULL. It is added to certain comparison operators to
+** prove that the operands are always NOT NULL.
+*/
+#define SQLITE_KEEPNULL 0x08 /* Used by vector == or <> */
+#define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */
+#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */
+#define SQLITE_NULLEQ 0x80 /* NULL=NULL */
+#define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */
+
+/*
+** An object of this type is created for each virtual table present in
+** the database schema.
+**
+** If the database schema is shared, then there is one instance of this
+** structure for each database connection (sqlite3*) that uses the shared
+** schema. This is because each database connection requires its own unique
+** instance of the sqlite3_vtab* handle used to access the virtual table
+** implementation. sqlite3_vtab* handles can not be shared between
+** database connections, even when the rest of the in-memory database
+** schema is shared, as the implementation often stores the database
+** connection handle passed to it via the xConnect() or xCreate() method
+** during initialization internally. This database connection handle may
+** then be used by the virtual table implementation to access real tables
+** within the database. So that they appear as part of the callers
+** transaction, these accesses need to be made via the same database
+** connection as that used to execute SQL operations on the virtual table.
+**
+** All VTable objects that correspond to a single table in a shared
+** database schema are initially stored in a linked-list pointed to by
+** the Table.pVTable member variable of the corresponding Table object.
+** When an sqlite3_prepare() operation is required to access the virtual
+** table, it searches the list for the VTable that corresponds to the
+** database connection doing the preparing so as to use the correct
+** sqlite3_vtab* handle in the compiled query.
+**
+** When an in-memory Table object is deleted (for example when the
+** schema is being reloaded for some reason), the VTable objects are not
+** deleted and the sqlite3_vtab* handles are not xDisconnect()ed
+** immediately. Instead, they are moved from the Table.pVTable list to
+** another linked list headed by the sqlite3.pDisconnect member of the
+** corresponding sqlite3 structure. They are then deleted/xDisconnected
+** next time a statement is prepared using said sqlite3*. This is done
+** to avoid deadlock issues involving multiple sqlite3.mutex mutexes.
+** Refer to comments above function sqlite3VtabUnlockList() for an
+** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect
+** list without holding the corresponding sqlite3.mutex mutex.
+**
+** The memory for objects of this type is always allocated by
+** sqlite3DbMalloc(), using the connection handle stored in VTable.db as
+** the first argument.
+*/
+struct VTable {
+ sqlite3 *db; /* Database connection associated with this table */
+ Module *pMod; /* Pointer to module implementation */
+ sqlite3_vtab *pVtab; /* Pointer to vtab instance */
+ int nRef; /* Number of pointers to this structure */
+ u8 bConstraint; /* True if constraints are supported */
+ u8 eVtabRisk; /* Riskiness of allowing hacker access */
+ int iSavepoint; /* Depth of the SAVEPOINT stack */
+ VTable *pNext; /* Next in linked list (see above) */
+};
+
+/* Allowed values for VTable.eVtabRisk
+*/
+#define SQLITE_VTABRISK_Low 0
+#define SQLITE_VTABRISK_Normal 1
+#define SQLITE_VTABRISK_High 2
+
+/*
+** The schema for each SQL table and view is represented in memory
+** by an instance of the following structure.
+*/
+struct Table {
+ char *zName; /* Name of the table or view */
+ Column *aCol; /* Information about each column */
+ Index *pIndex; /* List of SQL indexes on this table. */
+ Select *pSelect; /* NULL for tables. Points to definition if a view. */
+ FKey *pFKey; /* Linked list of all foreign keys in this table */
+ char *zColAff; /* String defining the affinity of each column */
+ ExprList *pCheck; /* All CHECK constraints */
+ /* ... also used as column name list in a VIEW */
+ Pgno tnum; /* Root BTree page for this table */
+ u32 nTabRef; /* Number of pointers to this Table */
+ u32 tabFlags; /* Mask of TF_* values */
+ i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */
+ i16 nCol; /* Number of columns in this table */
+ i16 nNVCol; /* Number of columns that are not VIRTUAL */
+ LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */
+ LogEst szTabRow; /* Estimated size of each table row in bytes */
+#ifdef SQLITE_ENABLE_COSTMULT
+ LogEst costMult; /* Cost multiplier for using this table */
+#endif
+ u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */
+#ifndef SQLITE_OMIT_ALTERTABLE
+ int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ int nModuleArg; /* Number of arguments to the module */
+ char **azModuleArg; /* 0: module 1: schema 2: vtab name 3...: args */
+ VTable *pVTable; /* List of VTable objects. */
+#endif
+ Trigger *pTrigger; /* List of triggers stored in pSchema */
+ Schema *pSchema; /* Schema that contains this table */
+ Table *pNextZombie; /* Next on the Parse.pZombieTab list */
+};
+
+/*
+** Allowed values for Table.tabFlags.
+**
+** TF_OOOHidden applies to tables or view that have hidden columns that are
+** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING
+** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden,
+** the TF_OOOHidden attribute would apply in this case. Such tables require
+** special handling during INSERT processing. The "OOO" means "Out Of Order".
+**
+** Constraints:
+**
+** TF_HasVirtual == COLFLAG_Virtual
+** TF_HasStored == COLFLAG_Stored
+*/
+#define TF_Readonly 0x0001 /* Read-only system table */
+#define TF_Ephemeral 0x0002 /* An ephemeral table */
+#define TF_HasPrimaryKey 0x0004 /* Table has a primary key */
+#define TF_Autoincrement 0x0008 /* Integer primary key is autoincrement */
+#define TF_HasStat1 0x0010 /* nRowLogEst set from sqlite_stat1 */
+#define TF_HasVirtual 0x0020 /* Has one or more VIRTUAL columns */
+#define TF_HasStored 0x0040 /* Has one or more STORED columns */
+#define TF_HasGenerated 0x0060 /* Combo: HasVirtual + HasStored */
+#define TF_WithoutRowid 0x0080 /* No rowid. PRIMARY KEY is the key */
+#define TF_StatsUsed 0x0100 /* Query planner decisions affected by
+ ** Index.aiRowLogEst[] values */
+#define TF_NoVisibleRowid 0x0200 /* No user-visible "rowid" column */
+#define TF_OOOHidden 0x0400 /* Out-of-Order hidden columns */
+#define TF_HasNotNull 0x0800 /* Contains NOT NULL constraints */
+#define TF_Shadow 0x1000 /* True for a shadow table */
+
+/*
+** Test to see whether or not a table is a virtual table. This is
+** done as a macro so that it will be optimized out when virtual
+** table support is omitted from the build.
+*/
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+# define IsVirtual(X) ((X)->nModuleArg)
+# define ExprIsVtab(X) \
+ ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->nModuleArg)
+#else
+# define IsVirtual(X) 0
+# define ExprIsVtab(X) 0
+#endif
+
+/*
+** Macros to determine if a column is hidden. IsOrdinaryHiddenColumn()
+** only works for non-virtual tables (ordinary tables and views) and is
+** always false unless SQLITE_ENABLE_HIDDEN_COLUMNS is defined. The
+** IsHiddenColumn() macro is general purpose.
+*/
+#if defined(SQLITE_ENABLE_HIDDEN_COLUMNS)
+# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
+# define IsOrdinaryHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
+#elif !defined(SQLITE_OMIT_VIRTUALTABLE)
+# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
+# define IsOrdinaryHiddenColumn(X) 0
+#else
+# define IsHiddenColumn(X) 0
+# define IsOrdinaryHiddenColumn(X) 0
+#endif
+
+
+/* Does the table have a rowid */
+#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0)
+#define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0)
+
+/*
+** Each foreign key constraint is an instance of the following structure.
+**
+** A foreign key is associated with two tables. The "from" table is
+** the table that contains the REFERENCES clause that creates the foreign
+** key. The "to" table is the table that is named in the REFERENCES clause.
+** Consider this example:
+**
+** CREATE TABLE ex1(
+** a INTEGER PRIMARY KEY,
+** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x)
+** );
+**
+** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2".
+** Equivalent names:
+**
+** from-table == child-table
+** to-table == parent-table
+**
+** Each REFERENCES clause generates an instance of the following structure
+** which is attached to the from-table. The to-table need not exist when
+** the from-table is created. The existence of the to-table is not checked.
+**
+** The list of all parents for child Table X is held at X.pFKey.
+**
+** A list of all children for a table named Z (which might not even exist)
+** is held in Schema.fkeyHash with a hash key of Z.
+*/
+struct FKey {
+ Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */
+ FKey *pNextFrom; /* Next FKey with the same in pFrom. Next parent of pFrom */
+ char *zTo; /* Name of table that the key points to (aka: Parent) */
+ FKey *pNextTo; /* Next with the same zTo. Next child of zTo. */
+ FKey *pPrevTo; /* Previous with the same zTo */
+ int nCol; /* Number of columns in this key */
+ /* EV: R-30323-21917 */
+ u8 isDeferred; /* True if constraint checking is deferred till COMMIT */
+ u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */
+ Trigger *apTrigger[2];/* Triggers for aAction[] actions */
+ struct sColMap { /* Mapping of columns in pFrom to columns in zTo */
+ int iFrom; /* Index of column in pFrom */
+ char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */
+ } aCol[1]; /* One entry for each of nCol columns */
+};
+
+/*
+** SQLite supports many different ways to resolve a constraint
+** error. ROLLBACK processing means that a constraint violation
+** causes the operation in process to fail and for the current transaction
+** to be rolled back. ABORT processing means the operation in process
+** fails and any prior changes from that one operation are backed out,
+** but the transaction is not rolled back. FAIL processing means that
+** the operation in progress stops and returns an error code. But prior
+** changes due to the same operation are not backed out and no rollback
+** occurs. IGNORE means that the particular row that caused the constraint
+** error is not inserted or updated. Processing continues and no error
+** is returned. REPLACE means that preexisting database rows that caused
+** a UNIQUE constraint violation are removed so that the new insert or
+** update can proceed. Processing continues and no error is reported.
+**
+** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys.
+** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the
+** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign
+** key is set to NULL. CASCADE means that a DELETE or UPDATE of the
+** referenced table row is propagated into the row that holds the
+** foreign key.
+**
+** The following symbolic values are used to record which type
+** of action to take.
+*/
+#define OE_None 0 /* There is no constraint to check */
+#define OE_Rollback 1 /* Fail the operation and rollback the transaction */
+#define OE_Abort 2 /* Back out changes but do no rollback transaction */
+#define OE_Fail 3 /* Stop the operation but leave all prior changes */
+#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */
+#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */
+#define OE_Update 6 /* Process as a DO UPDATE in an upsert */
+#define OE_Restrict 7 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */
+#define OE_SetNull 8 /* Set the foreign key value to NULL */
+#define OE_SetDflt 9 /* Set the foreign key value to its default */
+#define OE_Cascade 10 /* Cascade the changes */
+#define OE_Default 11 /* Do whatever the default action is */
+
+
+/*
+** An instance of the following structure is passed as the first
+** argument to sqlite3VdbeKeyCompare and is used to control the
+** comparison of the two index keys.
+**
+** Note that aSortOrder[] and aColl[] have nField+1 slots. There
+** are nField slots for the columns of an index then one extra slot
+** for the rowid at the end.
+*/
+struct KeyInfo {
+ u32 nRef; /* Number of references to this KeyInfo object */
+ u8 enc; /* Text encoding - one of the SQLITE_UTF* values */
+ u16 nKeyField; /* Number of key columns in the index */
+ u16 nAllField; /* Total columns, including key plus others */
+ sqlite3 *db; /* The database connection */
+ u8 *aSortFlags; /* Sort order for each column. */
+ CollSeq *aColl[1]; /* Collating sequence for each term of the key */
+};
+
+/*
+** Allowed bit values for entries in the KeyInfo.aSortFlags[] array.
+*/
+#define KEYINFO_ORDER_DESC 0x01 /* DESC sort order */
+#define KEYINFO_ORDER_BIGNULL 0x02 /* NULL is larger than any other value */
+
+/*
+** This object holds a record which has been parsed out into individual
+** fields, for the purposes of doing a comparison.
+**
+** A record is an object that contains one or more fields of data.
+** Records are used to store the content of a table row and to store
+** the key of an index. A blob encoding of a record is created by
+** the OP_MakeRecord opcode of the VDBE and is disassembled by the
+** OP_Column opcode.
+**
+** An instance of this object serves as a "key" for doing a search on
+** an index b+tree. The goal of the search is to find the entry that
+** is closed to the key described by this object. This object might hold
+** just a prefix of the key. The number of fields is given by
+** pKeyInfo->nField.
+**
+** The r1 and r2 fields are the values to return if this key is less than
+** or greater than a key in the btree, respectively. These are normally
+** -1 and +1 respectively, but might be inverted to +1 and -1 if the b-tree
+** is in DESC order.
+**
+** The key comparison functions actually return default_rc when they find
+** an equals comparison. default_rc can be -1, 0, or +1. If there are
+** multiple entries in the b-tree with the same key (when only looking
+** at the first pKeyInfo->nFields,) then default_rc can be set to -1 to
+** cause the search to find the last match, or +1 to cause the search to
+** find the first match.
+**
+** The key comparison functions will set eqSeen to true if they ever
+** get and equal results when comparing this structure to a b-tree record.
+** When default_rc!=0, the search might end up on the record immediately
+** before the first match or immediately after the last match. The
+** eqSeen field will indicate whether or not an exact match exists in the
+** b-tree.
+*/
+struct UnpackedRecord {
+ KeyInfo *pKeyInfo; /* Collation and sort-order information */
+ Mem *aMem; /* Values */
+ u16 nField; /* Number of entries in apMem[] */
+ i8 default_rc; /* Comparison result if keys are equal */
+ u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */
+ i8 r1; /* Value to return if (lhs < rhs) */
+ i8 r2; /* Value to return if (lhs > rhs) */
+ u8 eqSeen; /* True if an equality comparison has been seen */
+};
+
+
+/*
+** Each SQL index is represented in memory by an
+** instance of the following structure.
+**
+** The columns of the table that are to be indexed are described
+** by the aiColumn[] field of this structure. For example, suppose
+** we have the following table and index:
+**
+** CREATE TABLE Ex1(c1 int, c2 int, c3 text);
+** CREATE INDEX Ex2 ON Ex1(c3,c1);
+**
+** In the Table structure describing Ex1, nCol==3 because there are
+** three columns in the table. In the Index structure describing
+** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed.
+** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the
+** first column to be indexed (c3) has an index of 2 in Ex1.aCol[].
+** The second column to be indexed (c1) has an index of 0 in
+** Ex1.aCol[], hence Ex2.aiColumn[1]==0.
+**
+** The Index.onError field determines whether or not the indexed columns
+** must be unique and what to do if they are not. When Index.onError=OE_None,
+** it means this is not a unique index. Otherwise it is a unique index
+** and the value of Index.onError indicate the which conflict resolution
+** algorithm to employ whenever an attempt is made to insert a non-unique
+** element.
+**
+** While parsing a CREATE TABLE or CREATE INDEX statement in order to
+** generate VDBE code (as opposed to parsing one read from an sqlite_schema
+** table as part of parsing an existing database schema), transient instances
+** of this structure may be created. In this case the Index.tnum variable is
+** used to store the address of a VDBE instruction, not a database page
+** number (it cannot - the database page is not allocated until the VDBE
+** program is executed). See convertToWithoutRowidTable() for details.
+*/
+struct Index {
+ char *zName; /* Name of this index */
+ i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */
+ LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */
+ Table *pTable; /* The SQL table being indexed */
+ char *zColAff; /* String defining the affinity of each column */
+ Index *pNext; /* The next index associated with the same table */
+ Schema *pSchema; /* Schema containing this index */
+ u8 *aSortOrder; /* for each column: True==DESC, False==ASC */
+ const char **azColl; /* Array of collation sequence names for index */
+ Expr *pPartIdxWhere; /* WHERE clause for partial indices */
+ ExprList *aColExpr; /* Column expressions */
+ Pgno tnum; /* DB Page containing root of this index */
+ LogEst szIdxRow; /* Estimated average row size in bytes */
+ u16 nKeyCol; /* Number of columns forming the key */
+ u16 nColumn; /* Number of columns stored in the index */
+ u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
+ unsigned idxType:2; /* 0:Normal 1:UNIQUE, 2:PRIMARY KEY, 3:IPK */
+ unsigned bUnordered:1; /* Use this index for == or IN queries only */
+ unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */
+ unsigned isResized:1; /* True if resizeIndexObject() has been called */
+ unsigned isCovering:1; /* True if this is a covering index */
+ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
+ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */
+ unsigned bNoQuery:1; /* Do not use this index to optimize queries */
+ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */
+ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */
+#ifdef SQLITE_ENABLE_STAT4
+ int nSample; /* Number of elements in aSample[] */
+ int nSampleCol; /* Size of IndexSample.anEq[] and so on */
+ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */
+ IndexSample *aSample; /* Samples of the left-most key */
+ tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */
+ tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */
+#endif
+ Bitmask colNotIdxed; /* 0 for unindexed columns in pTab */
+};
+
+/*
+** Allowed values for Index.idxType
+*/
+#define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */
+#define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */
+#define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */
+#define SQLITE_IDXTYPE_IPK 3 /* INTEGER PRIMARY KEY index */
+
+/* Return true if index X is a PRIMARY KEY index */
+#define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY)
+
+/* Return true if index X is a UNIQUE index */
+#define IsUniqueIndex(X) ((X)->onError!=OE_None)
+
+/* The Index.aiColumn[] values are normally positive integer. But
+** there are some negative values that have special meaning:
+*/
+#define XN_ROWID (-1) /* Indexed column is the rowid */
+#define XN_EXPR (-2) /* Indexed column is an expression */
+
+/*
+** Each sample stored in the sqlite_stat4 table is represented in memory
+** using a structure of this type. See documentation at the top of the
+** analyze.c source file for additional information.
+*/
+struct IndexSample {
+ void *p; /* Pointer to sampled record */
+ int n; /* Size of record in bytes */
+ tRowcnt *anEq; /* Est. number of rows where the key equals this sample */
+ tRowcnt *anLt; /* Est. number of rows where key is less than this sample */
+ tRowcnt *anDLt; /* Est. number of distinct keys less than this sample */
+};
+
+/*
+** Possible values to use within the flags argument to sqlite3GetToken().
+*/
+#define SQLITE_TOKEN_QUOTED 0x1 /* Token is a quoted identifier. */
+#define SQLITE_TOKEN_KEYWORD 0x2 /* Token is a keyword. */
+
+/*
+** Each token coming out of the lexer is an instance of
+** this structure. Tokens are also used as part of an expression.
+**
+** The memory that "z" points to is owned by other objects. Take care
+** that the owner of the "z" string does not deallocate the string before
+** the Token goes out of scope! Very often, the "z" points to some place
+** in the middle of the Parse.zSql text. But it might also point to a
+** static string.
+*/
+struct Token {
+ const char *z; /* Text of the token. Not NULL-terminated! */
+ unsigned int n; /* Number of characters in this token */
+};
+
+/*
+** An instance of this structure contains information needed to generate
+** code for a SELECT that contains aggregate functions.
+**
+** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a
+** pointer to this structure. The Expr.iAgg field is the index in
+** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate
+** code for that node.
+**
+** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the
+** original Select structure that describes the SELECT statement. These
+** fields do not need to be freed when deallocating the AggInfo structure.
+*/
+struct AggInfo {
+ u8 directMode; /* Direct rendering mode means take data directly
+ ** from source tables rather than from accumulators */
+ u8 useSortingIdx; /* In direct mode, reference the sorting index rather
+ ** than the source table */
+ int sortingIdx; /* Cursor number of the sorting index */
+ int sortingIdxPTab; /* Cursor number of pseudo-table */
+ int nSortingColumn; /* Number of columns in the sorting index */
+ int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */
+ ExprList *pGroupBy; /* The group by clause */
+ struct AggInfo_col { /* For each column used in source tables */
+ Table *pTab; /* Source table */
+ Expr *pCExpr; /* The original expression */
+ int iTable; /* Cursor number of the source table */
+ int iMem; /* Memory location that acts as accumulator */
+ i16 iColumn; /* Column number within the source table */
+ i16 iSorterColumn; /* Column number in the sorting index */
+ } *aCol;
+ int nColumn; /* Number of used entries in aCol[] */
+ int nAccumulator; /* Number of columns that show through to the output.
+ ** Additional columns are used only as parameters to
+ ** aggregate functions */
+ struct AggInfo_func { /* For each aggregate function */
+ Expr *pFExpr; /* Expression encoding the function */
+ FuncDef *pFunc; /* The aggregate function implementation */
+ int iMem; /* Memory location that acts as accumulator */
+ int iDistinct; /* Ephemeral table used to enforce DISTINCT */
+ } *aFunc;
+ int nFunc; /* Number of entries in aFunc[] */
+ u32 selId; /* Select to which this AggInfo belongs */
+ AggInfo *pNext; /* Next in list of them all */
+};
+
+/*
+** The datatype ynVar is a signed integer, either 16-bit or 32-bit.
+** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater
+** than 32767 we have to make it 32-bit. 16-bit is preferred because
+** it uses less memory in the Expr object, which is a big memory user
+** in systems with lots of prepared statements. And few applications
+** need more than about 10 or 20 variables. But some extreme users want
+** to have prepared statements with over 32766 variables, and for them
+** the option is available (at compile-time).
+*/
+#if SQLITE_MAX_VARIABLE_NUMBER<32767
+typedef i16 ynVar;
+#else
+typedef int ynVar;
+#endif
+
+/*
+** Each node of an expression in the parse tree is an instance
+** of this structure.
+**
+** Expr.op is the opcode. The integer parser token codes are reused
+** as opcodes here. For example, the parser defines TK_GE to be an integer
+** code representing the ">=" operator. This same integer code is reused
+** to represent the greater-than-or-equal-to operator in the expression
+** tree.
+**
+** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB,
+** or TK_STRING), then Expr.token contains the text of the SQL literal. If
+** the expression is a variable (TK_VARIABLE), then Expr.token contains the
+** variable name. Finally, if the expression is an SQL function (TK_FUNCTION),
+** then Expr.token contains the name of the function.
+**
+** Expr.pRight and Expr.pLeft are the left and right subexpressions of a
+** binary operator. Either or both may be NULL.
+**
+** Expr.x.pList is a list of arguments if the expression is an SQL function,
+** a CASE expression or an IN expression of the form "<lhs> IN (<y>, <z>...)".
+** Expr.x.pSelect is used if the expression is a sub-select or an expression of
+** the form "<lhs> IN (SELECT ...)". If the EP_xIsSelect bit is set in the
+** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is
+** valid.
+**
+** An expression of the form ID or ID.ID refers to a column in a table.
+** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is
+** the integer cursor number of a VDBE cursor pointing to that table and
+** Expr.iColumn is the column number for the specific column. If the
+** expression is used as a result in an aggregate SELECT, then the
+** value is also stored in the Expr.iAgg column in the aggregate so that
+** it can be accessed after all aggregates are computed.
+**
+** If the expression is an unbound variable marker (a question mark
+** character '?' in the original SQL) then the Expr.iTable holds the index
+** number for that variable.
+**
+** If the expression is a subquery then Expr.iColumn holds an integer
+** register number containing the result of the subquery. If the
+** subquery gives a constant result, then iTable is -1. If the subquery
+** gives a different answer at different times during statement processing
+** then iTable is the address of a subroutine that computes the subquery.
+**
+** If the Expr is of type OP_Column, and the table it is selecting from
+** is a disk table or the "old.*" pseudo-table, then pTab points to the
+** corresponding table definition.
+**
+** ALLOCATION NOTES:
+**
+** Expr objects can use a lot of memory space in database schema. To
+** help reduce memory requirements, sometimes an Expr object will be
+** truncated. And to reduce the number of memory allocations, sometimes
+** two or more Expr objects will be stored in a single memory allocation,
+** together with Expr.zToken strings.
+**
+** If the EP_Reduced and EP_TokenOnly flags are set when
+** an Expr object is truncated. When EP_Reduced is set, then all
+** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees
+** are contained within the same memory allocation. Note, however, that
+** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately
+** allocated, regardless of whether or not EP_Reduced is set.
+*/
+struct Expr {
+ u8 op; /* Operation performed by this node */
+ char affExpr; /* affinity, or RAISE type */
+ u8 op2; /* TK_REGISTER/TK_TRUTH: original value of Expr.op
+ ** TK_COLUMN: the value of p5 for OP_Column
+ ** TK_AGG_FUNCTION: nesting depth
+ ** TK_FUNCTION: NC_SelfRef flag if needs OP_PureFunc */
+#ifdef SQLITE_DEBUG
+ u8 vvaFlags; /* Verification flags. */
+#endif
+ u32 flags; /* Various flags. EP_* See below */
+ union {
+ char *zToken; /* Token value. Zero terminated and dequoted */
+ int iValue; /* Non-negative integer value if EP_IntValue */
+ } u;
+
+ /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no
+ ** space is allocated for the fields below this point. An attempt to
+ ** access them will result in a segfault or malfunction.
+ *********************************************************************/
+
+ Expr *pLeft; /* Left subnode */
+ Expr *pRight; /* Right subnode */
+ union {
+ ExprList *pList; /* op = IN, EXISTS, SELECT, CASE, FUNCTION, BETWEEN */
+ Select *pSelect; /* EP_xIsSelect and op = IN, EXISTS, SELECT */
+ } x;
+
+ /* If the EP_Reduced flag is set in the Expr.flags mask, then no
+ ** space is allocated for the fields below this point. An attempt to
+ ** access them will result in a segfault or malfunction.
+ *********************************************************************/
+
+#if SQLITE_MAX_EXPR_DEPTH>0
+ int nHeight; /* Height of the tree headed by this node */
+#endif
+ int iTable; /* TK_COLUMN: cursor number of table holding column
+ ** TK_REGISTER: register number
+ ** TK_TRIGGER: 1 -> new, 0 -> old
+ ** EP_Unlikely: 134217728 times likelihood
+ ** TK_IN: ephemerial table holding RHS
+ ** TK_SELECT_COLUMN: Number of columns on the LHS
+ ** TK_SELECT: 1st register of result vector */
+ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
+ ** TK_VARIABLE: variable number (always >= 1).
+ ** TK_SELECT_COLUMN: column of the result vector */
+ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */
+ i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */
+ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */
+ union {
+ Table *pTab; /* TK_COLUMN: Table containing column. Can be NULL
+ ** for a column of an index on an expression */
+ Window *pWin; /* EP_WinFunc: Window/Filter defn for a function */
+ struct { /* TK_IN, TK_SELECT, and TK_EXISTS */
+ int iAddr; /* Subroutine entry address */
+ int regReturn; /* Register used to hold return address */
+ } sub;
+ } y;
+};
+
+/*
+** The following are the meanings of bits in the Expr.flags field.
+** Value restrictions:
+**
+** EP_Agg == NC_HasAgg == SF_HasAgg
+** EP_Win == NC_HasWin
+*/
+#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */
+#define EP_Distinct 0x000002 /* Aggregate function with DISTINCT keyword */
+#define EP_HasFunc 0x000004 /* Contains one or more functions of any kind */
+#define EP_FixedCol 0x000008 /* TK_Column with a known fixed value */
+#define EP_Agg 0x000010 /* Contains one or more aggregate functions */
+#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */
+#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */
+#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */
+#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */
+#define EP_Commuted 0x000200 /* Comparison operator has been commuted */
+#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */
+#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */
+#define EP_Skip 0x001000 /* Operator does not contribute to affinity */
+#define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
+#define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
+#define EP_Win 0x008000 /* Contains window functions */
+#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */
+ /* 0x020000 // available for reuse */
+#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */
+#define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
+#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */
+#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */
+#define EP_Alias 0x400000 /* Is an alias for a result set column */
+#define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */
+#define EP_WinFunc 0x1000000 /* TK_FUNCTION with Expr.y.pWin set */
+#define EP_Subrtn 0x2000000 /* Uses Expr.y.sub. TK_IN, _SELECT, or _EXISTS */
+#define EP_Quoted 0x4000000 /* TK_ID was originally quoted */
+#define EP_Static 0x8000000 /* Held in memory not obtained from malloc() */
+#define EP_IsTrue 0x10000000 /* Always has boolean value of TRUE */
+#define EP_IsFalse 0x20000000 /* Always has boolean value of FALSE */
+#define EP_FromDDL 0x40000000 /* Originates from sqlite_schema */
+ /* 0x80000000 // Available */
+
+/*
+** The EP_Propagate mask is a set of properties that automatically propagate
+** upwards into parent nodes.
+*/
+#define EP_Propagate (EP_Collate|EP_Subquery|EP_HasFunc)
+
+/*
+** These macros can be used to test, set, or clear bits in the
+** Expr.flags field.
+*/
+#define ExprHasProperty(E,P) (((E)->flags&(P))!=0)
+#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P))
+#define ExprSetProperty(E,P) (E)->flags|=(P)
+#define ExprClearProperty(E,P) (E)->flags&=~(P)
+#define ExprAlwaysTrue(E) (((E)->flags&(EP_FromJoin|EP_IsTrue))==EP_IsTrue)
+#define ExprAlwaysFalse(E) (((E)->flags&(EP_FromJoin|EP_IsFalse))==EP_IsFalse)
+
+
+/* Flags for use with Expr.vvaFlags
+*/
+#define EP_NoReduce 0x01 /* Cannot EXPRDUP_REDUCE this Expr */
+#define EP_Immutable 0x02 /* Do not change this Expr node */
+
+/* The ExprSetVVAProperty() macro is used for Verification, Validation,
+** and Accreditation only. It works like ExprSetProperty() during VVA
+** processes but is a no-op for delivery.
+*/
+#ifdef SQLITE_DEBUG
+# define ExprSetVVAProperty(E,P) (E)->vvaFlags|=(P)
+# define ExprHasVVAProperty(E,P) (((E)->vvaFlags&(P))!=0)
+# define ExprClearVVAProperties(E) (E)->vvaFlags = 0
+#else
+# define ExprSetVVAProperty(E,P)
+# define ExprHasVVAProperty(E,P) 0
+# define ExprClearVVAProperties(E)
+#endif
+
+/*
+** Macros to determine the number of bytes required by a normal Expr
+** struct, an Expr struct with the EP_Reduced flag set in Expr.flags
+** and an Expr struct with the EP_TokenOnly flag set.
+*/
+#define EXPR_FULLSIZE sizeof(Expr) /* Full size */
+#define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */
+#define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */
+
+/*
+** Flags passed to the sqlite3ExprDup() function. See the header comment
+** above sqlite3ExprDup() for details.
+*/
+#define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */
+
+/*
+** True if the expression passed as an argument was a function with
+** an OVER() clause (a window function).
+*/
+#ifdef SQLITE_OMIT_WINDOWFUNC
+# define IsWindowFunc(p) 0
+#else
+# define IsWindowFunc(p) ( \
+ ExprHasProperty((p), EP_WinFunc) && p->y.pWin->eFrmType!=TK_FILTER \
+ )
+#endif
+
+/*
+** A list of expressions. Each expression may optionally have a
+** name. An expr/name combination can be used in several ways, such
+** as the list of "expr AS ID" fields following a "SELECT" or in the
+** list of "ID = expr" items in an UPDATE. A list of expressions can
+** also be used as the argument to a function, in which case the a.zName
+** field is not used.
+**
+** In order to try to keep memory usage down, the Expr.a.zEName field
+** is used for multiple purposes:
+**
+** eEName Usage
+** ---------- -------------------------
+** ENAME_NAME (1) the AS of result set column
+** (2) COLUMN= of an UPDATE
+**
+** ENAME_TAB DB.TABLE.NAME used to resolve names
+** of subqueries
+**
+** ENAME_SPAN Text of the original result set
+** expression.
+*/
+struct ExprList {
+ int nExpr; /* Number of expressions on the list */
+ struct ExprList_item { /* For each expression in the list */
+ Expr *pExpr; /* The parse tree for this expression */
+ char *zEName; /* Token associated with this expression */
+ u8 sortFlags; /* Mask of KEYINFO_ORDER_* flags */
+ unsigned eEName :2; /* Meaning of zEName */
+ unsigned done :1; /* A flag to indicate when processing is finished */
+ unsigned reusable :1; /* Constant expression is reusable */
+ unsigned bSorterRef :1; /* Defer evaluation until after sorting */
+ unsigned bNulls: 1; /* True if explicit "NULLS FIRST/LAST" */
+ union {
+ struct {
+ u16 iOrderByCol; /* For ORDER BY, column number in result set */
+ u16 iAlias; /* Index into Parse.aAlias[] for zName */
+ } x;
+ int iConstExprReg; /* Register in which Expr value is cached */
+ } u;
+ } a[1]; /* One slot for each expression in the list */
+};
+
+/*
+** Allowed values for Expr.a.eEName
+*/
+#define ENAME_NAME 0 /* The AS clause of a result set */
+#define ENAME_SPAN 1 /* Complete text of the result set expression */
+#define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */
+
+/*
+** An instance of this structure can hold a simple list of identifiers,
+** such as the list "a,b,c" in the following statements:
+**
+** INSERT INTO t(a,b,c) VALUES ...;
+** CREATE INDEX idx ON t(a,b,c);
+** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...;
+**
+** The IdList.a.idx field is used when the IdList represents the list of
+** column names after a table name in an INSERT statement. In the statement
+**
+** INSERT INTO t(a,b,c) ...
+**
+** If "a" is the k-th column of table "t", then IdList.a[0].idx==k.
+*/
+struct IdList {
+ struct IdList_item {
+ char *zName; /* Name of the identifier */
+ int idx; /* Index in some Table.aCol[] of a column named zName */
+ } *a;
+ int nId; /* Number of identifiers on the list */
+};
+
+/*
+** The following structure describes the FROM clause of a SELECT statement.
+** Each table or subquery in the FROM clause is a separate element of
+** the SrcList.a[] array.
+**
+** With the addition of multiple database support, the following structure
+** can also be used to describe a particular table such as the table that
+** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL,
+** such a table must be a simple name: ID. But in SQLite, the table can
+** now be identified by a database name, a dot, then the table name: ID.ID.
+**
+** The jointype starts out showing the join type between the current table
+** and the next table on the list. The parser builds the list this way.
+** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each
+** jointype expresses the join between the table and the previous table.
+**
+** In the colUsed field, the high-order bit (bit 63) is set if the table
+** contains more than 63 columns and the 64-th or later column is used.
+*/
+struct SrcList {
+ int nSrc; /* Number of tables or subqueries in the FROM clause */
+ u32 nAlloc; /* Number of entries allocated in a[] below */
+ struct SrcList_item {
+ Schema *pSchema; /* Schema to which this item is fixed */
+ char *zDatabase; /* Name of database holding this table */
+ char *zName; /* Name of the table */
+ char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */
+ Table *pTab; /* An SQL table corresponding to zName */
+ Select *pSelect; /* A SELECT statement used in place of a table name */
+ int addrFillSub; /* Address of subroutine to manifest a subquery */
+ int regReturn; /* Register holding return address of addrFillSub */
+ int regResult; /* Registers holding results of a co-routine */
+ struct {
+ u8 jointype; /* Type of join between this table and the previous */
+ unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */
+ unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */
+ unsigned isTabFunc :1; /* True if table-valued-function syntax */
+ unsigned isCorrelated :1; /* True if sub-query is correlated */
+ unsigned viaCoroutine :1; /* Implemented as a co-routine */
+ unsigned isRecursive :1; /* True for recursive reference in WITH */
+ unsigned fromDDL :1; /* Comes from sqlite_schema */
+ } fg;
+ int iCursor; /* The VDBE cursor number used to access this table */
+ Expr *pOn; /* The ON clause of a join */
+ IdList *pUsing; /* The USING clause of a join */
+ Bitmask colUsed; /* Bit N (1<<N) set if column N of pTab is used */
+ union {
+ char *zIndexedBy; /* Identifier from "INDEXED BY <zIndex>" clause */
+ ExprList *pFuncArg; /* Arguments to table-valued-function */
+ } u1;
+ Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */
+ } a[1]; /* One entry for each identifier on the list */
+};
+
+/*
+** Permitted values of the SrcList.a.jointype field
+*/
+#define JT_INNER 0x0001 /* Any kind of inner or cross join */
+#define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */
+#define JT_NATURAL 0x0004 /* True for a "natural" join */
+#define JT_LEFT 0x0008 /* Left outer join */
+#define JT_RIGHT 0x0010 /* Right outer join */
+#define JT_OUTER 0x0020 /* The "OUTER" keyword is present */
+#define JT_ERROR 0x0040 /* unknown or unsupported join type */
+
+
+/*
+** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin()
+** and the WhereInfo.wctrlFlags member.
+**
+** Value constraints (enforced via assert()):
+** WHERE_USE_LIMIT == SF_FixedLimit
+*/
+#define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */
+#define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */
+#define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */
+#define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */
+#define WHERE_ONEPASS_MULTIROW 0x0008 /* ONEPASS is ok with multiple rows */
+#define WHERE_DUPLICATES_OK 0x0010 /* Ok to return a row more than once */
+#define WHERE_OR_SUBCLAUSE 0x0020 /* Processing a sub-WHERE as part of
+ ** the OR optimization */
+#define WHERE_GROUPBY 0x0040 /* pOrderBy is really a GROUP BY */
+#define WHERE_DISTINCTBY 0x0080 /* pOrderby is really a DISTINCT clause */
+#define WHERE_WANT_DISTINCT 0x0100 /* All output needs to be distinct */
+#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */
+#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */
+#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */
+#define WHERE_SEEK_UNIQ_TABLE 0x1000 /* Do not defer seeks if unique */
+ /* 0x2000 not currently used */
+#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */
+ /* 0x8000 not currently used */
+
+/* Allowed return values from sqlite3WhereIsDistinct()
+*/
+#define WHERE_DISTINCT_NOOP 0 /* DISTINCT keyword not used */
+#define WHERE_DISTINCT_UNIQUE 1 /* No duplicates */
+#define WHERE_DISTINCT_ORDERED 2 /* All duplicates are adjacent */
+#define WHERE_DISTINCT_UNORDERED 3 /* Duplicates are scattered */
+
+/*
+** A NameContext defines a context in which to resolve table and column
+** names. The context consists of a list of tables (the pSrcList) field and
+** a list of named expression (pEList). The named expression list may
+** be NULL. The pSrc corresponds to the FROM clause of a SELECT or
+** to the table being operated on by INSERT, UPDATE, or DELETE. The
+** pEList corresponds to the result set of a SELECT and is NULL for
+** other statements.
+**
+** NameContexts can be nested. When resolving names, the inner-most
+** context is searched first. If no match is found, the next outer
+** context is checked. If there is still no match, the next context
+** is checked. This process continues until either a match is found
+** or all contexts are check. When a match is found, the nRef member of
+** the context containing the match is incremented.
+**
+** Each subquery gets a new NameContext. The pNext field points to the
+** NameContext in the parent query. Thus the process of scanning the
+** NameContext list corresponds to searching through successively outer
+** subqueries looking for a match.
+*/
+struct NameContext {
+ Parse *pParse; /* The parser */
+ SrcList *pSrcList; /* One or more tables used to resolve names */
+ union {
+ ExprList *pEList; /* Optional list of result-set columns */
+ AggInfo *pAggInfo; /* Information about aggregates at this level */
+ Upsert *pUpsert; /* ON CONFLICT clause information from an upsert */
+ } uNC;
+ NameContext *pNext; /* Next outer name context. NULL for outermost */
+ int nRef; /* Number of names resolved by this context */
+ int nErr; /* Number of errors encountered while resolving names */
+ int ncFlags; /* Zero or more NC_* flags defined below */
+ Select *pWinSelect; /* SELECT statement for any window functions */
+};
+
+/*
+** Allowed values for the NameContext, ncFlags field.
+**
+** Value constraints (all checked via assert()):
+** NC_HasAgg == SF_HasAgg == EP_Agg
+** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX
+** NC_HasWin == EP_Win
+**
+*/
+#define NC_AllowAgg 0x00001 /* Aggregate functions are allowed here */
+#define NC_PartIdx 0x00002 /* True if resolving a partial index WHERE */
+#define NC_IsCheck 0x00004 /* True if resolving a CHECK constraint */
+#define NC_GenCol 0x00008 /* True for a GENERATED ALWAYS AS clause */
+#define NC_HasAgg 0x00010 /* One or more aggregate functions seen */
+#define NC_IdxExpr 0x00020 /* True if resolving columns of CREATE INDEX */
+#define NC_SelfRef 0x0002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */
+#define NC_VarSelect 0x00040 /* A correlated subquery has been seen */
+#define NC_UEList 0x00080 /* True if uNC.pEList is used */
+#define NC_UAggInfo 0x00100 /* True if uNC.pAggInfo is used */
+#define NC_UUpsert 0x00200 /* True if uNC.pUpsert is used */
+#define NC_MinMaxAgg 0x01000 /* min/max aggregates seen. See note above */
+#define NC_Complex 0x02000 /* True if a function or subquery seen */
+#define NC_AllowWin 0x04000 /* Window functions are allowed here */
+#define NC_HasWin 0x08000 /* One or more window functions seen */
+#define NC_IsDDL 0x10000 /* Resolving names in a CREATE statement */
+#define NC_InAggFunc 0x20000 /* True if analyzing arguments to an agg func */
+#define NC_FromDDL 0x40000 /* SQL text comes from sqlite_schema */
+
+/*
+** An instance of the following object describes a single ON CONFLICT
+** clause in an upsert.
+**
+** The pUpsertTarget field is only set if the ON CONFLICT clause includes
+** conflict-target clause. (In "ON CONFLICT(a,b)" the "(a,b)" is the
+** conflict-target clause.) The pUpsertTargetWhere is the optional
+** WHERE clause used to identify partial unique indexes.
+**
+** pUpsertSet is the list of column=expr terms of the UPDATE statement.
+** The pUpsertSet field is NULL for a ON CONFLICT DO NOTHING. The
+** pUpsertWhere is the WHERE clause for the UPDATE and is NULL if the
+** WHERE clause is omitted.
+*/
+struct Upsert {
+ ExprList *pUpsertTarget; /* Optional description of conflicting index */
+ Expr *pUpsertTargetWhere; /* WHERE clause for partial index targets */
+ ExprList *pUpsertSet; /* The SET clause from an ON CONFLICT UPDATE */
+ Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */
+ /* The fields above comprise the parse tree for the upsert clause.
+ ** The fields below are used to transfer information from the INSERT
+ ** processing down into the UPDATE processing while generating code.
+ ** Upsert owns the memory allocated above, but not the memory below. */
+ Index *pUpsertIdx; /* Constraint that pUpsertTarget identifies */
+ SrcList *pUpsertSrc; /* Table to be updated */
+ int regData; /* First register holding array of VALUES */
+ int iDataCur; /* Index of the data cursor */
+ int iIdxCur; /* Index of the first index cursor */
+};
+
+/*
+** An instance of the following structure contains all information
+** needed to generate code for a single SELECT statement.
+**
+** See the header comment on the computeLimitRegisters() routine for a
+** detailed description of the meaning of the iLimit and iOffset fields.
+**
+** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes.
+** These addresses must be stored so that we can go back and fill in
+** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor
+** the number of columns in P2 can be computed at the same time
+** as the OP_OpenEphm instruction is coded because not
+** enough information about the compound query is known at that point.
+** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences
+** for the result set. The KeyInfo for addrOpenEphm[2] contains collating
+** sequences for the ORDER BY clause.
+*/
+struct Select {
+ u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */
+ LogEst nSelectRow; /* Estimated number of result rows */
+ u32 selFlags; /* Various SF_* values */
+ int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */
+ u32 selId; /* Unique identifier number for this SELECT */
+ int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */
+ ExprList *pEList; /* The fields of the result */
+ SrcList *pSrc; /* The FROM clause */
+ Expr *pWhere; /* The WHERE clause */
+ ExprList *pGroupBy; /* The GROUP BY clause */
+ Expr *pHaving; /* The HAVING clause */
+ ExprList *pOrderBy; /* The ORDER BY clause */
+ Select *pPrior; /* Prior select in a compound select statement */
+ Select *pNext; /* Next select to the left in a compound */
+ Expr *pLimit; /* LIMIT expression. NULL means not used. */
+ With *pWith; /* WITH clause attached to this select. Or NULL. */
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ Window *pWin; /* List of window functions */
+ Window *pWinDefn; /* List of named window definitions */
+#endif
+};
+
+/*
+** Allowed values for Select.selFlags. The "SF" prefix stands for
+** "Select Flag".
+**
+** Value constraints (all checked via assert())
+** SF_HasAgg == NC_HasAgg
+** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX
+** SF_FixedLimit == WHERE_USE_LIMIT
+*/
+#define SF_Distinct 0x0000001 /* Output should be DISTINCT */
+#define SF_All 0x0000002 /* Includes the ALL keyword */
+#define SF_Resolved 0x0000004 /* Identifiers have been resolved */
+#define SF_Aggregate 0x0000008 /* Contains agg functions or a GROUP BY */
+#define SF_HasAgg 0x0000010 /* Contains aggregate functions */
+#define SF_UsesEphemeral 0x0000020 /* Uses the OpenEphemeral opcode */
+#define SF_Expanded 0x0000040 /* sqlite3SelectExpand() called on this */
+#define SF_HasTypeInfo 0x0000080 /* FROM subqueries have Table metadata */
+#define SF_Compound 0x0000100 /* Part of a compound query */
+#define SF_Values 0x0000200 /* Synthesized from VALUES clause */
+#define SF_MultiValue 0x0000400 /* Single VALUES term with multiple rows */
+#define SF_NestedFrom 0x0000800 /* Part of a parenthesized FROM clause */
+#define SF_MinMaxAgg 0x0001000 /* Aggregate containing min() or max() */
+#define SF_Recursive 0x0002000 /* The recursive part of a recursive CTE */
+#define SF_FixedLimit 0x0004000 /* nSelectRow set by a constant LIMIT */
+#define SF_MaybeConvert 0x0008000 /* Need convertCompoundSelectToSubquery() */
+#define SF_Converted 0x0010000 /* By convertCompoundSelectToSubquery() */
+#define SF_IncludeHidden 0x0020000 /* Include hidden columns in output */
+#define SF_ComplexResult 0x0040000 /* Result contains subquery or function */
+#define SF_WhereBegin 0x0080000 /* Really a WhereBegin() call. Debug Only */
+#define SF_WinRewrite 0x0100000 /* Window function rewrite accomplished */
+#define SF_View 0x0200000 /* SELECT statement is a view */
+#define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */
+#define SF_UpdateFrom 0x0800000 /* Statement is an UPDATE...FROM */
+
+/*
+** The results of a SELECT can be distributed in several ways, as defined
+** by one of the following macros. The "SRT" prefix means "SELECT Result
+** Type".
+**
+** SRT_Union Store results as a key in a temporary index
+** identified by pDest->iSDParm.
+**
+** SRT_Except Remove results from the temporary index pDest->iSDParm.
+**
+** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result
+** set is not empty.
+**
+** SRT_Discard Throw the results away. This is used by SELECT
+** statements within triggers whose only purpose is
+** the side-effects of functions.
+**
+** All of the above are free to ignore their ORDER BY clause. Those that
+** follow must honor the ORDER BY clause.
+**
+** SRT_Output Generate a row of output (using the OP_ResultRow
+** opcode) for each row in the result set.
+**
+** SRT_Mem Only valid if the result is a single column.
+** Store the first column of the first result row
+** in register pDest->iSDParm then abandon the rest
+** of the query. This destination implies "LIMIT 1".
+**
+** SRT_Set The result must be a single column. Store each
+** row of result as the key in table pDest->iSDParm.
+** Apply the affinity pDest->affSdst before storing
+** results. Used to implement "IN (SELECT ...)".
+**
+** SRT_EphemTab Create an temporary table pDest->iSDParm and store
+** the result there. The cursor is left open after
+** returning. This is like SRT_Table except that
+** this destination uses OP_OpenEphemeral to create
+** the table first.
+**
+** SRT_Coroutine Generate a co-routine that returns a new row of
+** results each time it is invoked. The entry point
+** of the co-routine is stored in register pDest->iSDParm
+** and the result row is stored in pDest->nDest registers
+** starting with pDest->iSdst.
+**
+** SRT_Table Store results in temporary table pDest->iSDParm.
+** SRT_Fifo This is like SRT_EphemTab except that the table
+** is assumed to already be open. SRT_Fifo has
+** the additional property of being able to ignore
+** the ORDER BY clause.
+**
+** SRT_DistFifo Store results in a temporary table pDest->iSDParm.
+** But also use temporary table pDest->iSDParm+1 as
+** a record of all prior results and ignore any duplicate
+** rows. Name means: "Distinct Fifo".
+**
+** SRT_Queue Store results in priority queue pDest->iSDParm (really
+** an index). Append a sequence number so that all entries
+** are distinct.
+**
+** SRT_DistQueue Store results in priority queue pDest->iSDParm only if
+** the same record has never been stored before. The
+** index at pDest->iSDParm+1 hold all prior stores.
+**
+** SRT_Upfrom Store results in the temporary table already opened by
+** pDest->iSDParm. If (pDest->iSDParm<0), then the temp
+** table is an intkey table - in this case the first
+** column returned by the SELECT is used as the integer
+** key. If (pDest->iSDParm>0), then the table is an index
+** table. (pDest->iSDParm) is the number of key columns in
+** each index record in this case.
+*/
+#define SRT_Union 1 /* Store result as keys in an index */
+#define SRT_Except 2 /* Remove result from a UNION index */
+#define SRT_Exists 3 /* Store 1 if the result is not empty */
+#define SRT_Discard 4 /* Do not save the results anywhere */
+#define SRT_Fifo 5 /* Store result as data with an automatic rowid */
+#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */
+#define SRT_Queue 7 /* Store result in an queue */
+#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */
+
+/* The ORDER BY clause is ignored for all of the above */
+#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue)
+
+#define SRT_Output 9 /* Output each row of result */
+#define SRT_Mem 10 /* Store result in a memory cell */
+#define SRT_Set 11 /* Store results as keys in an index */
+#define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */
+#define SRT_Coroutine 13 /* Generate a single row of result */
+#define SRT_Table 14 /* Store result as data with an automatic rowid */
+#define SRT_Upfrom 15 /* Store result as data with rowid */
+
+/*
+** An instance of this object describes where to put of the results of
+** a SELECT statement.
+*/
+struct SelectDest {
+ u8 eDest; /* How to dispose of the results. One of SRT_* above. */
+ int iSDParm; /* A parameter used by the eDest disposal method */
+ int iSDParm2; /* A second parameter for the eDest disposal method */
+ int iSdst; /* Base register where results are written */
+ int nSdst; /* Number of registers allocated */
+ char *zAffSdst; /* Affinity used when eDest==SRT_Set */
+ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */
+};
+
+/*
+** During code generation of statements that do inserts into AUTOINCREMENT
+** tables, the following information is attached to the Table.u.autoInc.p
+** pointer of each autoincrement table to record some side information that
+** the code generator needs. We have to keep per-table autoincrement
+** information in case inserts are done within triggers. Triggers do not
+** normally coordinate their activities, but we do need to coordinate the
+** loading and saving of autoincrement information.
+*/
+struct AutoincInfo {
+ AutoincInfo *pNext; /* Next info block in a list of them all */
+ Table *pTab; /* Table this info block refers to */
+ int iDb; /* Index in sqlite3.aDb[] of database holding pTab */
+ int regCtr; /* Memory register holding the rowid counter */
+};
+
+/*
+** At least one instance of the following structure is created for each
+** trigger that may be fired while parsing an INSERT, UPDATE or DELETE
+** statement. All such objects are stored in the linked list headed at
+** Parse.pTriggerPrg and deleted once statement compilation has been
+** completed.
+**
+** A Vdbe sub-program that implements the body and WHEN clause of trigger
+** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of
+** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable.
+** The Parse.pTriggerPrg list never contains two entries with the same
+** values for both pTrigger and orconf.
+**
+** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns
+** accessed (or set to 0 for triggers fired as a result of INSERT
+** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to
+** a mask of new.* columns used by the program.
+*/
+struct TriggerPrg {
+ Trigger *pTrigger; /* Trigger this program was coded from */
+ TriggerPrg *pNext; /* Next entry in Parse.pTriggerPrg list */
+ SubProgram *pProgram; /* Program implementing pTrigger/orconf */
+ int orconf; /* Default ON CONFLICT policy */
+ u32 aColmask[2]; /* Masks of old.*, new.* columns accessed */
+};
+
+/*
+** The yDbMask datatype for the bitmask of all attached databases.
+*/
+#if SQLITE_MAX_ATTACHED>30
+ typedef unsigned char yDbMask[(SQLITE_MAX_ATTACHED+9)/8];
+# define DbMaskTest(M,I) (((M)[(I)/8]&(1<<((I)&7)))!=0)
+# define DbMaskZero(M) memset((M),0,sizeof(M))
+# define DbMaskSet(M,I) (M)[(I)/8]|=(1<<((I)&7))
+# define DbMaskAllZero(M) sqlite3DbMaskAllZero(M)
+# define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0)
+#else
+ typedef unsigned int yDbMask;
+# define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0)
+# define DbMaskZero(M) (M)=0
+# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I))
+# define DbMaskAllZero(M) (M)==0
+# define DbMaskNonZero(M) (M)!=0
+#endif
+
+/*
+** An SQL parser context. A copy of this structure is passed through
+** the parser and down into all the parser action routine in order to
+** carry around information that is global to the entire parse.
+**
+** The structure is divided into two parts. When the parser and code
+** generate call themselves recursively, the first part of the structure
+** is constant but the second part is reset at the beginning and end of
+** each recursion.
+**
+** The nTableLock and aTableLock variables are only used if the shared-cache
+** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are
+** used to store the set of table-locks required by the statement being
+** compiled. Function sqlite3TableLock() is used to add entries to the
+** list.
+*/
+struct Parse {
+ sqlite3 *db; /* The main database structure */
+ char *zErrMsg; /* An error message */
+ Vdbe *pVdbe; /* An engine for executing database bytecode */
+ int rc; /* Return code from execution */
+ u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */
+ u8 checkSchema; /* Causes schema cookie check after an error */
+ u8 nested; /* Number of nested calls to the parser/code generator */
+ u8 nTempReg; /* Number of temporary registers in aTempReg[] */
+ u8 isMultiWrite; /* True if statement may modify/insert multiple rows */
+ u8 mayAbort; /* True if statement may throw an ABORT exception */
+ u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */
+ u8 okConstFactor; /* OK to factor out constants */
+ u8 disableLookaside; /* Number of times lookaside has been disabled */
+ u8 disableVtab; /* Disable all virtual tables for this parse */
+ int nRangeReg; /* Size of the temporary register block */
+ int iRangeReg; /* First register in temporary register block */
+ int nErr; /* Number of errors seen */
+ int nTab; /* Number of previously allocated VDBE cursors */
+ int nMem; /* Number of memory cells used so far */
+ int szOpAlloc; /* Bytes of memory space allocated for Vdbe.aOp[] */
+ int iSelfTab; /* Table associated with an index on expr, or negative
+ ** of the base register during check-constraint eval */
+ int nLabel; /* The *negative* of the number of labels used */
+ int nLabelAlloc; /* Number of slots in aLabel */
+ int *aLabel; /* Space to hold the labels */
+ ExprList *pConstExpr;/* Constant expressions */
+ Token constraintName;/* Name of the constraint currently being parsed */
+ yDbMask writeMask; /* Start a write transaction on these databases */
+ yDbMask cookieMask; /* Bitmask of schema verified databases */
+ int regRowid; /* Register holding rowid of CREATE TABLE entry */
+ int regRoot; /* Register holding root page number for new objects */
+ int nMaxArg; /* Max args passed to user function by sub-program */
+ int nSelect; /* Number of SELECT stmts. Counter for Select.selId */
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ int nTableLock; /* Number of locks in aTableLock */
+ TableLock *aTableLock; /* Required table locks for shared-cache mode */
+#endif
+ AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */
+ Parse *pToplevel; /* Parse structure for main program (or NULL) */
+ Table *pTriggerTab; /* Table triggers are being coded for */
+ Parse *pParentParse; /* Parent parser if this parser is nested */
+ AggInfo *pAggList; /* List of all AggInfo objects */
+ int addrCrTab; /* Address of OP_CreateBtree opcode on CREATE TABLE */
+ u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
+ u32 oldmask; /* Mask of old.* columns referenced */
+ u32 newmask; /* Mask of new.* columns referenced */
+ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */
+ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
+ u8 disableTriggers; /* True to disable triggers */
+
+ /**************************************************************************
+ ** Fields above must be initialized to zero. The fields that follow,
+ ** down to the beginning of the recursive section, do not need to be
+ ** initialized as they will be set before being used. The boundary is
+ ** determined by offsetof(Parse,aTempReg).
+ **************************************************************************/
+
+ int aTempReg[8]; /* Holding area for temporary registers */
+ Token sNameToken; /* Token with unqualified schema object name */
+
+ /************************************************************************
+ ** Above is constant between recursions. Below is reset before and after
+ ** each recursion. The boundary between these two regions is determined
+ ** using offsetof(Parse,sLastToken) so the sLastToken field must be the
+ ** first field in the recursive region.
+ ************************************************************************/
+
+ Token sLastToken; /* The last token parsed */
+ ynVar nVar; /* Number of '?' variables seen in the SQL so far */
+ u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */
+ u8 explain; /* True if the EXPLAIN flag is found on the query */
+ u8 eParseMode; /* PARSE_MODE_XXX constant */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ int nVtabLock; /* Number of virtual tables to lock */
+#endif
+ int nHeight; /* Expression tree height of current sub-select */
+#ifndef SQLITE_OMIT_EXPLAIN
+ int addrExplain; /* Address of current OP_Explain opcode */
+#endif
+ VList *pVList; /* Mapping between variable names and numbers */
+ Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */
+ const char *zTail; /* All SQL text past the last semicolon parsed */
+ Table *pNewTable; /* A table being constructed by CREATE TABLE */
+ Index *pNewIndex; /* An index being constructed by CREATE INDEX.
+ ** Also used to hold redundant UNIQUE constraints
+ ** during a RENAME COLUMN */
+ Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */
+ const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ Token sArg; /* Complete text of a module argument */
+ Table **apVtabLock; /* Pointer to virtual tables needing locking */
+#endif
+ Table *pZombieTab; /* List of Table objects to delete after code gen */
+ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */
+ With *pWith; /* Current WITH clause, or NULL */
+ With *pWithToFree; /* Free this WITH object at the end of the parse */
+#ifndef SQLITE_OMIT_ALTERTABLE
+ RenameToken *pRename; /* Tokens subject to renaming by ALTER TABLE */
+#endif
+};
+
+#define PARSE_MODE_NORMAL 0
+#define PARSE_MODE_DECLARE_VTAB 1
+#define PARSE_MODE_RENAME 2
+#define PARSE_MODE_UNMAP 3
+
+/*
+** Sizes and pointers of various parts of the Parse object.
+*/
+#define PARSE_HDR_SZ offsetof(Parse,aTempReg) /* Recursive part w/o aColCache*/
+#define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */
+#define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */
+#define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */
+
+/*
+** Return true if currently inside an sqlite3_declare_vtab() call.
+*/
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+ #define IN_DECLARE_VTAB 0
+#else
+ #define IN_DECLARE_VTAB (pParse->eParseMode==PARSE_MODE_DECLARE_VTAB)
+#endif
+
+#if defined(SQLITE_OMIT_ALTERTABLE)
+ #define IN_RENAME_OBJECT 0
+#else
+ #define IN_RENAME_OBJECT (pParse->eParseMode>=PARSE_MODE_RENAME)
+#endif
+
+#if defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_OMIT_ALTERTABLE)
+ #define IN_SPECIAL_PARSE 0
+#else
+ #define IN_SPECIAL_PARSE (pParse->eParseMode!=PARSE_MODE_NORMAL)
+#endif
+
+/*
+** An instance of the following structure can be declared on a stack and used
+** to save the Parse.zAuthContext value so that it can be restored later.
+*/
+struct AuthContext {
+ const char *zAuthContext; /* Put saved Parse.zAuthContext here */
+ Parse *pParse; /* The Parse structure */
+};
+
+/*
+** Bitfield flags for P5 value in various opcodes.
+**
+** Value constraints (enforced via assert()):
+** OPFLAG_LENGTHARG == SQLITE_FUNC_LENGTH
+** OPFLAG_TYPEOFARG == SQLITE_FUNC_TYPEOF
+** OPFLAG_BULKCSR == BTREE_BULKLOAD
+** OPFLAG_SEEKEQ == BTREE_SEEK_EQ
+** OPFLAG_FORDELETE == BTREE_FORDELETE
+** OPFLAG_SAVEPOSITION == BTREE_SAVEPOSITION
+** OPFLAG_AUXDELETE == BTREE_AUXDELETE
+*/
+#define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */
+ /* Also used in P2 (not P5) of OP_Delete */
+#define OPFLAG_NOCHNG 0x01 /* OP_VColumn nochange for UPDATE */
+#define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */
+#define OPFLAG_LASTROWID 0x20 /* Set to update db->lastRowid */
+#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */
+#define OPFLAG_APPEND 0x08 /* This is likely to be an append */
+#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */
+#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */
+#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
+#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
+#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
+#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */
+#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */
+#define OPFLAG_P2ISREG 0x10 /* P2 to OP_Open** is a register number */
+#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */
+#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */
+#define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */
+#define OPFLAG_NOCHNG_MAGIC 0x6d /* OP_MakeRecord: serialtype 10 is ok */
+
+/*
+ * Each trigger present in the database schema is stored as an instance of
+ * struct Trigger.
+ *
+ * Pointers to instances of struct Trigger are stored in two ways.
+ * 1. In the "trigHash" hash table (part of the sqlite3* that represents the
+ * database). This allows Trigger structures to be retrieved by name.
+ * 2. All triggers associated with a single table form a linked list, using the
+ * pNext member of struct Trigger. A pointer to the first element of the
+ * linked list is stored as the "pTrigger" member of the associated
+ * struct Table.
+ *
+ * The "step_list" member points to the first element of a linked list
+ * containing the SQL statements specified as the trigger program.
+ */
+struct Trigger {
+ char *zName; /* The name of the trigger */
+ char *table; /* The table or view to which the trigger applies */
+ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */
+ u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */
+ Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */
+ IdList *pColumns; /* If this is an UPDATE OF <column-list> trigger,
+ the <column-list> is stored here */
+ Schema *pSchema; /* Schema containing the trigger */
+ Schema *pTabSchema; /* Schema containing the table */
+ TriggerStep *step_list; /* Link list of trigger program steps */
+ Trigger *pNext; /* Next trigger associated with the table */
+};
+
+/*
+** A trigger is either a BEFORE or an AFTER trigger. The following constants
+** determine which.
+**
+** If there are multiple triggers, you might of some BEFORE and some AFTER.
+** In that cases, the constants below can be ORed together.
+*/
+#define TRIGGER_BEFORE 1
+#define TRIGGER_AFTER 2
+
+/*
+ * An instance of struct TriggerStep is used to store a single SQL statement
+ * that is a part of a trigger-program.
+ *
+ * Instances of struct TriggerStep are stored in a singly linked list (linked
+ * using the "pNext" member) referenced by the "step_list" member of the
+ * associated struct Trigger instance. The first element of the linked list is
+ * the first step of the trigger-program.
+ *
+ * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
+ * "SELECT" statement. The meanings of the other members is determined by the
+ * value of "op" as follows:
+ *
+ * (op == TK_INSERT)
+ * orconf -> stores the ON CONFLICT algorithm
+ * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then
+ * this stores a pointer to the SELECT statement. Otherwise NULL.
+ * zTarget -> Dequoted name of the table to insert into.
+ * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then
+ * this stores values to be inserted. Otherwise NULL.
+ * pIdList -> If this is an INSERT INTO ... (<column-names>) VALUES ...
+ * statement, then this stores the column-names to be
+ * inserted into.
+ *
+ * (op == TK_DELETE)
+ * zTarget -> Dequoted name of the table to delete from.
+ * pWhere -> The WHERE clause of the DELETE statement if one is specified.
+ * Otherwise NULL.
+ *
+ * (op == TK_UPDATE)
+ * zTarget -> Dequoted name of the table to update.
+ * pWhere -> The WHERE clause of the UPDATE statement if one is specified.
+ * Otherwise NULL.
+ * pExprList -> A list of the columns to update and the expressions to update
+ * them to. See sqlite3Update() documentation of "pChanges"
+ * argument.
+ *
+ */
+struct TriggerStep {
+ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */
+ u8 orconf; /* OE_Rollback etc. */
+ Trigger *pTrig; /* The trigger that this step is a part of */
+ Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */
+ char *zTarget; /* Target table for DELETE, UPDATE, INSERT */
+ SrcList *pFrom; /* FROM clause for UPDATE statement (if any) */
+ Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */
+ ExprList *pExprList; /* SET clause for UPDATE */
+ IdList *pIdList; /* Column names for INSERT */
+ Upsert *pUpsert; /* Upsert clauses on an INSERT */
+ char *zSpan; /* Original SQL text of this command */
+ TriggerStep *pNext; /* Next in the link-list */
+ TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */
+};
+
+/*
+** The following structure contains information used by the sqliteFix...
+** routines as they walk the parse tree to make database references
+** explicit.
+*/
+typedef struct DbFixer DbFixer;
+struct DbFixer {
+ Parse *pParse; /* The parsing context. Error messages written here */
+ Schema *pSchema; /* Fix items to this schema */
+ u8 bTemp; /* True for TEMP schema entries */
+ const char *zDb; /* Make sure all objects are contained in this database */
+ const char *zType; /* Type of the container - used for error messages */
+ const Token *pName; /* Name of the container - used for error messages */
+};
+
+/*
+** An objected used to accumulate the text of a string where we
+** do not necessarily know how big the string will be in the end.
+*/
+struct sqlite3_str {
+ sqlite3 *db; /* Optional database for lookaside. Can be NULL */
+ char *zText; /* The string collected so far */
+ u32 nAlloc; /* Amount of space allocated in zText */
+ u32 mxAlloc; /* Maximum allowed allocation. 0 for no malloc usage */
+ u32 nChar; /* Length of the string so far */
+ u8 accError; /* SQLITE_NOMEM or SQLITE_TOOBIG */
+ u8 printfFlags; /* SQLITE_PRINTF flags below */
+};
+#define SQLITE_PRINTF_INTERNAL 0x01 /* Internal-use-only converters allowed */
+#define SQLITE_PRINTF_SQLFUNC 0x02 /* SQL function arguments to VXPrintf */
+#define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */
+
+#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0)
+
+
+/*
+** A pointer to this structure is used to communicate information
+** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback.
+*/
+typedef struct {
+ sqlite3 *db; /* The database being initialized */
+ char **pzErrMsg; /* Error message stored here */
+ int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */
+ int rc; /* Result code stored here */
+ u32 mInitFlags; /* Flags controlling error messages */
+ u32 nInitRow; /* Number of rows processed */
+ Pgno mxPage; /* Maximum page number. 0 for no limit. */
+} InitData;
+
+/*
+** Allowed values for mInitFlags
+*/
+#define INITFLAG_AlterTable 0x0001 /* This is a reparse after ALTER TABLE */
+
+/*
+** Structure containing global configuration data for the SQLite library.
+**
+** This structure also contains some state information.
+*/
+struct Sqlite3Config {
+ int bMemstat; /* True to enable memory status */
+ u8 bCoreMutex; /* True to enable core mutexing */
+ u8 bFullMutex; /* True to enable full mutexing */
+ u8 bOpenUri; /* True to interpret filenames as URIs */
+ u8 bUseCis; /* Use covering indices for full-scans */
+ u8 bSmallMalloc; /* Avoid large memory allocations if true */
+ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */
+ int mxStrlen; /* Maximum string length */
+ int neverCorrupt; /* Database is always well-formed */
+ int szLookaside; /* Default lookaside buffer size */
+ int nLookaside; /* Default lookaside buffer count */
+ int nStmtSpill; /* Stmt-journal spill-to-disk threshold */
+ sqlite3_mem_methods m; /* Low-level memory allocation interface */
+ sqlite3_mutex_methods mutex; /* Low-level mutex interface */
+ sqlite3_pcache_methods2 pcache2; /* Low-level page-cache interface */
+ void *pHeap; /* Heap storage space */
+ int nHeap; /* Size of pHeap[] */
+ int mnReq, mxReq; /* Min and max heap requests sizes */
+ sqlite3_int64 szMmap; /* mmap() space per open file */
+ sqlite3_int64 mxMmap; /* Maximum value for szMmap */
+ void *pPage; /* Page cache memory */
+ int szPage; /* Size of each page in pPage[] */
+ int nPage; /* Number of pages in pPage[] */
+ int mxParserStack; /* maximum depth of the parser stack */
+ int sharedCacheEnabled; /* true if shared-cache mode enabled */
+ u32 szPma; /* Maximum Sorter PMA size */
+ /* The above might be initialized to non-zero. The following need to always
+ ** initially be zero, however. */
+ int isInit; /* True after initialization has finished */
+ int inProgress; /* True while initialization in progress */
+ int isMutexInit; /* True after mutexes are initialized */
+ int isMallocInit; /* True after malloc is initialized */
+ int isPCacheInit; /* True after malloc is initialized */
+ int nRefInitMutex; /* Number of users of pInitMutex */
+ sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */
+ void (*xLog)(void*,int,const char*); /* Function for logging */
+ void *pLogArg; /* First argument to xLog() */
+#ifdef SQLITE_ENABLE_SQLLOG
+ void(*xSqllog)(void*,sqlite3*,const char*, int);
+ void *pSqllogArg;
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ /* The following callback (if not NULL) is invoked on every VDBE branch
+ ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE.
+ */
+ void (*xVdbeBranch)(void*,unsigned iSrcLine,u8 eThis,u8 eMx); /* Callback */
+ void *pVdbeBranchArg; /* 1st argument */
+#endif
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ sqlite3_int64 mxMemdbSize; /* Default max memdb size */
+#endif
+#ifndef SQLITE_UNTESTABLE
+ int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
+#endif
+ int bLocaltimeFault; /* True to fail localtime() calls */
+ int iOnceResetThreshold; /* When to reset OP_Once counters */
+ u32 szSorterRef; /* Min size in bytes to use sorter-refs */
+ unsigned int iPrngSeed; /* Alternative fixed seed for the PRNG */
+};
+
+/*
+** This macro is used inside of assert() statements to indicate that
+** the assert is only valid on a well-formed database. Instead of:
+**
+** assert( X );
+**
+** One writes:
+**
+** assert( X || CORRUPT_DB );
+**
+** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate
+** that the database is definitely corrupt, only that it might be corrupt.
+** For most test cases, CORRUPT_DB is set to false using a special
+** sqlite3_test_control(). This enables assert() statements to prove
+** things that are always true for well-formed databases.
+*/
+#define CORRUPT_DB (sqlite3Config.neverCorrupt==0)
+
+/*
+** Context pointer passed down through the tree-walk.
+*/
+struct Walker {
+ Parse *pParse; /* Parser context. */
+ int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */
+ int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */
+ void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */
+ int walkerDepth; /* Number of subqueries */
+ u16 eCode; /* A small processing code */
+ union { /* Extra data for callback */
+ NameContext *pNC; /* Naming context */
+ int n; /* A counter */
+ int iCur; /* A cursor number */
+ SrcList *pSrcList; /* FROM clause */
+ struct SrcCount *pSrcCount; /* Counting column references */
+ struct CCurHint *pCCurHint; /* Used by codeCursorHint() */
+ int *aiCol; /* array of column indexes */
+ struct IdxCover *pIdxCover; /* Check for index coverage */
+ struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */
+ ExprList *pGroupBy; /* GROUP BY clause */
+ Select *pSelect; /* HAVING to WHERE clause ctx */
+ struct WindowRewrite *pRewrite; /* Window rewrite context */
+ struct WhereConst *pConst; /* WHERE clause constants */
+ struct RenameCtx *pRename; /* RENAME COLUMN context */
+ struct Table *pTab; /* Table of generated column */
+ struct SrcList_item *pSrcItem; /* A single FROM clause item */
+ } u;
+};
+
+/* Forward declarations */
+SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*);
+SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*);
+SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*);
+SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);
+SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*);
+SQLITE_PRIVATE int sqlite3ExprWalkNoop(Walker*, Expr*);
+SQLITE_PRIVATE int sqlite3SelectWalkNoop(Walker*, Select*);
+SQLITE_PRIVATE int sqlite3SelectWalkFail(Walker*, Select*);
+SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker*,Select*);
+SQLITE_PRIVATE void sqlite3WalkerDepthDecrease(Walker*,Select*);
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3SelectWalkAssert2(Walker*, Select*);
+#endif
+
+/*
+** Return code from the parse-tree walking primitives and their
+** callbacks.
+*/
+#define WRC_Continue 0 /* Continue down into children */
+#define WRC_Prune 1 /* Omit children but continue walking siblings */
+#define WRC_Abort 2 /* Abandon the tree walk */
+
+/*
+** An instance of this structure represents a set of one or more CTEs
+** (common table expressions) created by a single WITH clause.
+*/
+struct With {
+ int nCte; /* Number of CTEs in the WITH clause */
+ With *pOuter; /* Containing WITH clause, or NULL */
+ struct Cte { /* For each CTE in the WITH clause.... */
+ char *zName; /* Name of this CTE */
+ ExprList *pCols; /* List of explicit column names, or NULL */
+ Select *pSelect; /* The definition of this CTE */
+ const char *zCteErr; /* Error message for circular references */
+ } a[1];
+};
+
+#ifdef SQLITE_DEBUG
+/*
+** An instance of the TreeView object is used for printing the content of
+** data structures on sqlite3DebugPrintf() using a tree-like view.
+*/
+struct TreeView {
+ int iLevel; /* Which level of the tree we are on */
+ u8 bLine[100]; /* Draw vertical in column i if bLine[i] is true */
+};
+#endif /* SQLITE_DEBUG */
+
+/*
+** This object is used in various ways, most (but not all) related to window
+** functions.
+**
+** (1) A single instance of this structure is attached to the
+** the Expr.y.pWin field for each window function in an expression tree.
+** This object holds the information contained in the OVER clause,
+** plus additional fields used during code generation.
+**
+** (2) All window functions in a single SELECT form a linked-list
+** attached to Select.pWin. The Window.pFunc and Window.pExpr
+** fields point back to the expression that is the window function.
+**
+** (3) The terms of the WINDOW clause of a SELECT are instances of this
+** object on a linked list attached to Select.pWinDefn.
+**
+** (4) For an aggregate function with a FILTER clause, an instance
+** of this object is stored in Expr.y.pWin with eFrmType set to
+** TK_FILTER. In this case the only field used is Window.pFilter.
+**
+** The uses (1) and (2) are really the same Window object that just happens
+** to be accessible in two different ways. Use case (3) are separate objects.
+*/
+struct Window {
+ char *zName; /* Name of window (may be NULL) */
+ char *zBase; /* Name of base window for chaining (may be NULL) */
+ ExprList *pPartition; /* PARTITION BY clause */
+ ExprList *pOrderBy; /* ORDER BY clause */
+ u8 eFrmType; /* TK_RANGE, TK_GROUPS, TK_ROWS, or 0 */
+ u8 eStart; /* UNBOUNDED, CURRENT, PRECEDING or FOLLOWING */
+ u8 eEnd; /* UNBOUNDED, CURRENT, PRECEDING or FOLLOWING */
+ u8 bImplicitFrame; /* True if frame was implicitly specified */
+ u8 eExclude; /* TK_NO, TK_CURRENT, TK_TIES, TK_GROUP, or 0 */
+ Expr *pStart; /* Expression for "<expr> PRECEDING" */
+ Expr *pEnd; /* Expression for "<expr> FOLLOWING" */
+ Window **ppThis; /* Pointer to this object in Select.pWin list */
+ Window *pNextWin; /* Next window function belonging to this SELECT */
+ Expr *pFilter; /* The FILTER expression */
+ FuncDef *pFunc; /* The function */
+ int iEphCsr; /* Partition buffer or Peer buffer */
+ int regAccum; /* Accumulator */
+ int regResult; /* Interim result */
+ int csrApp; /* Function cursor (used by min/max) */
+ int regApp; /* Function register (also used by min/max) */
+ int regPart; /* Array of registers for PARTITION BY values */
+ Expr *pOwner; /* Expression object this window is attached to */
+ int nBufferCol; /* Number of columns in buffer table */
+ int iArgCol; /* Offset of first argument for this function */
+ int regOne; /* Register containing constant value 1 */
+ int regStartRowid;
+ int regEndRowid;
+ u8 bExprArgs; /* Defer evaluation of window function arguments
+ ** due to the SQLITE_SUBTYPE flag */
+};
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3*, Window*);
+SQLITE_PRIVATE void sqlite3WindowUnlinkFromSelect(Window*);
+SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p);
+SQLITE_PRIVATE Window *sqlite3WindowAlloc(Parse*, int, int, Expr*, int , Expr*, u8);
+SQLITE_PRIVATE void sqlite3WindowAttach(Parse*, Expr*, Window*);
+SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin);
+SQLITE_PRIVATE int sqlite3WindowCompare(Parse*, Window*, Window*, int);
+SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse*, Select*);
+SQLITE_PRIVATE void sqlite3WindowCodeStep(Parse*, Select*, WhereInfo*, int, int);
+SQLITE_PRIVATE int sqlite3WindowRewrite(Parse*, Select*);
+SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, struct SrcList_item*);
+SQLITE_PRIVATE void sqlite3WindowUpdate(Parse*, Window*, Window*, FuncDef*);
+SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p);
+SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p);
+SQLITE_PRIVATE void sqlite3WindowFunctions(void);
+SQLITE_PRIVATE void sqlite3WindowChain(Parse*, Window*, Window*);
+SQLITE_PRIVATE Window *sqlite3WindowAssemble(Parse*, Window*, ExprList*, ExprList*, Token*);
+#else
+# define sqlite3WindowDelete(a,b)
+# define sqlite3WindowFunctions()
+# define sqlite3WindowAttach(a,b,c)
+#endif
+
+/*
+** Assuming zIn points to the first byte of a UTF-8 character,
+** advance zIn to point to the first byte of the next UTF-8 character.
+*/
+#define SQLITE_SKIP_UTF8(zIn) { \
+ if( (*(zIn++))>=0xc0 ){ \
+ while( (*zIn & 0xc0)==0x80 ){ zIn++; } \
+ } \
+}
+
+/*
+** The SQLITE_*_BKPT macros are substitutes for the error codes with
+** the same name but without the _BKPT suffix. These macros invoke
+** routines that report the line-number on which the error originated
+** using sqlite3_log(). The routines also provide a convenient place
+** to set a debugger breakpoint.
+*/
+SQLITE_PRIVATE int sqlite3ReportError(int iErr, int lineno, const char *zType);
+SQLITE_PRIVATE int sqlite3CorruptError(int);
+SQLITE_PRIVATE int sqlite3MisuseError(int);
+SQLITE_PRIVATE int sqlite3CantopenError(int);
+#define SQLITE_CORRUPT_BKPT sqlite3CorruptError(__LINE__)
+#define SQLITE_MISUSE_BKPT sqlite3MisuseError(__LINE__)
+#define SQLITE_CANTOPEN_BKPT sqlite3CantopenError(__LINE__)
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3NomemError(int);
+SQLITE_PRIVATE int sqlite3IoerrnomemError(int);
+# define SQLITE_NOMEM_BKPT sqlite3NomemError(__LINE__)
+# define SQLITE_IOERR_NOMEM_BKPT sqlite3IoerrnomemError(__LINE__)
+#else
+# define SQLITE_NOMEM_BKPT SQLITE_NOMEM
+# define SQLITE_IOERR_NOMEM_BKPT SQLITE_IOERR_NOMEM
+#endif
+#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_CORRUPT_PGNO)
+SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno);
+# define SQLITE_CORRUPT_PGNO(P) sqlite3CorruptPgnoError(__LINE__,(P))
+#else
+# define SQLITE_CORRUPT_PGNO(P) sqlite3CorruptError(__LINE__)
+#endif
+
+/*
+** FTS3 and FTS4 both require virtual table support
+*/
+#if defined(SQLITE_OMIT_VIRTUALTABLE)
+# undef SQLITE_ENABLE_FTS3
+# undef SQLITE_ENABLE_FTS4
+#endif
+
+/*
+** FTS4 is really an extension for FTS3. It is enabled using the
+** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also call
+** the SQLITE_ENABLE_FTS4 macro to serve as an alias for SQLITE_ENABLE_FTS3.
+*/
+#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3)
+# define SQLITE_ENABLE_FTS3 1
+#endif
+
+/*
+** The ctype.h header is needed for non-ASCII systems. It is also
+** needed by FTS3 when FTS3 is included in the amalgamation.
+*/
+#if !defined(SQLITE_ASCII) || \
+ (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION))
+# include <ctype.h>
+#endif
+
+/*
+** The following macros mimic the standard library functions toupper(),
+** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The
+** sqlite versions only work for ASCII characters, regardless of locale.
+*/
+#ifdef SQLITE_ASCII
+# define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20))
+# define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01)
+# define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06)
+# define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02)
+# define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04)
+# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08)
+# define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)])
+# define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80)
+#else
+# define sqlite3Toupper(x) toupper((unsigned char)(x))
+# define sqlite3Isspace(x) isspace((unsigned char)(x))
+# define sqlite3Isalnum(x) isalnum((unsigned char)(x))
+# define sqlite3Isalpha(x) isalpha((unsigned char)(x))
+# define sqlite3Isdigit(x) isdigit((unsigned char)(x))
+# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x))
+# define sqlite3Tolower(x) tolower((unsigned char)(x))
+# define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`')
+#endif
+SQLITE_PRIVATE int sqlite3IsIdChar(u8);
+
+/*
+** Internal function prototypes
+*/
+SQLITE_PRIVATE int sqlite3StrICmp(const char*,const char*);
+SQLITE_PRIVATE int sqlite3Strlen30(const char*);
+#define sqlite3Strlen30NN(C) (strlen(C)&0x3fffffff)
+SQLITE_PRIVATE char *sqlite3ColumnType(Column*,char*);
+#define sqlite3StrNICmp sqlite3_strnicmp
+
+SQLITE_PRIVATE int sqlite3MallocInit(void);
+SQLITE_PRIVATE void sqlite3MallocEnd(void);
+SQLITE_PRIVATE void *sqlite3Malloc(u64);
+SQLITE_PRIVATE void *sqlite3MallocZero(u64);
+SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, u64);
+SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, u64);
+SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3*, u64);
+SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*);
+SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, u64);
+SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3*,const char*,const char*);
+SQLITE_PRIVATE void *sqlite3Realloc(void*, u64);
+SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64);
+SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64);
+SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*);
+SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3*, void*);
+SQLITE_PRIVATE int sqlite3MallocSize(void*);
+SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*);
+SQLITE_PRIVATE void *sqlite3PageMalloc(int);
+SQLITE_PRIVATE void sqlite3PageFree(void*);
+SQLITE_PRIVATE void sqlite3MemSetDefault(void);
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void));
+#endif
+SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
+
+/*
+** On systems with ample stack space and that support alloca(), make
+** use of alloca() to obtain space for large automatic objects. By default,
+** obtain space from malloc().
+**
+** The alloca() routine never returns NULL. This will cause code paths
+** that deal with sqlite3StackAlloc() failures to be unreachable.
+*/
+#ifdef SQLITE_USE_ALLOCA
+# define sqlite3StackAllocRaw(D,N) alloca(N)
+# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N)
+# define sqlite3StackFree(D,P)
+#else
+# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N)
+# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N)
+# define sqlite3StackFree(D,P) sqlite3DbFree(D,P)
+#endif
+
+/* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they
+** are, disable MEMSYS3
+*/
+#ifdef SQLITE_ENABLE_MEMSYS5
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void);
+#undef SQLITE_ENABLE_MEMSYS3
+#endif
+#ifdef SQLITE_ENABLE_MEMSYS3
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void);
+#endif
+
+
+#ifndef SQLITE_MUTEX_OMIT
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void);
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void);
+SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int);
+SQLITE_PRIVATE int sqlite3MutexInit(void);
+SQLITE_PRIVATE int sqlite3MutexEnd(void);
+#endif
+#if !defined(SQLITE_MUTEX_OMIT) && !defined(SQLITE_MUTEX_NOOP)
+SQLITE_PRIVATE void sqlite3MemoryBarrier(void);
+#else
+# define sqlite3MemoryBarrier()
+#endif
+
+SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int);
+SQLITE_PRIVATE void sqlite3StatusUp(int, int);
+SQLITE_PRIVATE void sqlite3StatusDown(int, int);
+SQLITE_PRIVATE void sqlite3StatusHighwater(int, int);
+SQLITE_PRIVATE int sqlite3LookasideUsed(sqlite3*,int*);
+
+/* Access to mutexes used by sqlite3_status() */
+SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void);
+SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void);
+
+#if defined(SQLITE_ENABLE_MULTITHREADED_CHECKS) && !defined(SQLITE_MUTEX_OMIT)
+SQLITE_PRIVATE void sqlite3MutexWarnOnContention(sqlite3_mutex*);
+#else
+# define sqlite3MutexWarnOnContention(x)
+#endif
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+# define EXP754 (((u64)0x7ff)<<52)
+# define MAN754 ((((u64)1)<<52)-1)
+# define IsNaN(X) (((X)&EXP754)==EXP754 && ((X)&MAN754)!=0)
+SQLITE_PRIVATE int sqlite3IsNaN(double);
+#else
+# define IsNaN(X) 0
+# define sqlite3IsNaN(X) 0
+#endif
+
+/*
+** An instance of the following structure holds information about SQL
+** functions arguments that are the parameters to the printf() function.
+*/
+struct PrintfArguments {
+ int nArg; /* Total number of arguments */
+ int nUsed; /* Number of arguments used so far */
+ sqlite3_value **apArg; /* The argument values */
+};
+
+SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...);
+SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list);
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...);
+#endif
+#if defined(SQLITE_TEST)
+SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*);
+#endif
+
+#if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8);
+SQLITE_PRIVATE void sqlite3TreeViewBareExprList(TreeView*, const ExprList*, const char*);
+SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
+SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView*, const SrcList*);
+SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
+SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView*, const Window*, u8);
+SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView*, const Window*, u8);
+#endif
+#endif
+
+
+SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*);
+SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...);
+SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int);
+SQLITE_PRIVATE void sqlite3Dequote(char*);
+SQLITE_PRIVATE void sqlite3DequoteExpr(Expr*);
+SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*);
+SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int);
+SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **);
+SQLITE_PRIVATE void sqlite3FinishCoding(Parse*);
+SQLITE_PRIVATE int sqlite3GetTempReg(Parse*);
+SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int);
+SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int);
+SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int);
+SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int);
+#endif
+SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int);
+SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*);
+SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*);
+SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*);
+SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
+SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*);
+SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*);
+SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*, int);
+SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,Expr*,FuncDef*);
+SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
+SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
+SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*);
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*);
+SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int);
+SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int);
+SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*);
+SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
+SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
+SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*);
+SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
+SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**);
+SQLITE_PRIVATE int sqlite3InitOne(sqlite3*, int, char**, u32);
+SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3*,const char *zName);
+#endif
+SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*);
+SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int);
+SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*);
+SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*);
+SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*);
+SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**);
+SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char);
+SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char);
+SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int);
+SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);
+SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index*, i16);
+#ifdef SQLITE_OMIT_GENERATED_COLUMNS
+# define sqlite3TableColumnToStorage(T,X) (X) /* No-op pass-through */
+# define sqlite3StorageColumnToTable(T,X) (X) /* No-op pass-through */
+#else
+SQLITE_PRIVATE i16 sqlite3TableColumnToStorage(Table*, i16);
+SQLITE_PRIVATE i16 sqlite3StorageColumnToTable(Table*, i16);
+#endif
+SQLITE_PRIVATE void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int);
+#if SQLITE_ENABLE_HIDDEN_COLUMNS
+SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table*, Column*);
+#else
+# define sqlite3ColumnPropertiesFromName(T,C) /* no-op */
+#endif
+SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*,Token*);
+SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int);
+SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int);
+SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*);
+SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,Expr*,const char*,const char*);
+SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*);
+SQLITE_PRIVATE void sqlite3AddGenerated(Parse*,Expr*,Token*);
+SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*);
+SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*,
+ sqlite3_vfs**,char**,char **);
+#define sqlite3CodecQueryParameters(A,B,C) 0
+SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*);
+
+#ifdef SQLITE_UNTESTABLE
+# define sqlite3FaultSim(X) SQLITE_OK
+#else
+SQLITE_PRIVATE int sqlite3FaultSim(int);
+#endif
+
+SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32);
+SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32);
+SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec*, u32);
+SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32);
+SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*);
+SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*);
+SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*);
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*);
+#endif
+
+SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*);
+SQLITE_PRIVATE void sqlite3RowSetDelete(void*);
+SQLITE_PRIVATE void sqlite3RowSetClear(void*);
+SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64);
+SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64);
+SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*);
+
+SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,ExprList*,Select*,int,int);
+
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
+SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*);
+#else
+# define sqlite3ViewGetColumnNames(A,B) 0
+#endif
+
+#if SQLITE_MAX_ATTACHED>30
+SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
+#endif
+SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
+SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
+SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
+SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*);
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse);
+SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse);
+#else
+# define sqlite3AutoincrementBegin(X)
+# define sqlite3AutoincrementEnd(X)
+#endif
+SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int, Upsert*);
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns(Parse*, int, Table*);
+#endif
+SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*);
+SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse*, IdList*, Token*);
+SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*);
+SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(Parse*, SrcList*, int, int);
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, SrcList *p2);
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(Parse*, SrcList*, Token*, Token*);
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*,
+ Token*, Select*, Expr*, IdList*);
+SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *);
+SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*);
+SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *);
+SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*);
+SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*);
+SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*);
+SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*);
+SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**);
+SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*,
+ Expr*, int, int, u8);
+SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int);
+SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*);
+SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
+ Expr*,ExprList*,u32,Expr*);
+SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*);
+SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*);
+SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int);
+SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int);
+#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY)
+SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*);
+#endif
+SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*, ExprList*, Expr*);
+SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*,Expr*,int,ExprList*,Expr*,
+ Upsert*);
+SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int);
+SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*);
+SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*);
+#define ONEPASS_OFF 0 /* Use of ONEPASS not allowed */
+#define ONEPASS_SINGLE 1 /* ONEPASS valid for a single row update */
+#define ONEPASS_MULTI 2 /* ONEPASS is valid for multiple rows */
+SQLITE_PRIVATE int sqlite3WhereUsesDeferredSeek(WhereInfo*);
+SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int);
+SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8);
+SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int);
+SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int);
+SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int);
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(Parse*, Column*, int);
+#endif
+SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int);
+SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*);
+SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8);
+#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */
+#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */
+#define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */
+#define SQLITE_ECEL_OMITREF 0x08 /* Omit if ExprList.u.x.iOrderByCol */
+SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int);
+SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int);
+SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int);
+SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*);
+#define LOCATE_VIEW 0x01
+#define LOCATE_NOERR 0x02
+SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*);
+SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *);
+SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*);
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*);
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*);
+SQLITE_PRIVATE void sqlite3Vacuum(Parse*,Token*,Expr*);
+SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*, int, sqlite3_value*);
+SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*);
+SQLITE_PRIVATE int sqlite3ExprCompare(Parse*,Expr*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int);
+SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse*,Expr*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int);
+SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*);
+SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*);
+SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
+SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx);
+SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*);
+SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*);
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE void sqlite3PrngSaveState(void);
+SQLITE_PRIVATE void sqlite3PrngRestoreState(void);
+#endif
+SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int);
+SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int);
+SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb);
+SQLITE_PRIVATE void sqlite3BeginTransaction(Parse*, int);
+SQLITE_PRIVATE void sqlite3EndTransaction(Parse*,int);
+SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*);
+SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *);
+SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*);
+SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char*);
+SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr*);
+SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr*);
+SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*);
+SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*);
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8);
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*);
+SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int);
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*);
+#endif
+SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*);
+SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*);
+SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char);
+SQLITE_PRIVATE int sqlite3IsRowid(const char*);
+SQLITE_PRIVATE void sqlite3GenerateRowDelete(
+ Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int);
+SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);
+SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int);
+SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int);
+SQLITE_PRIVATE int sqlite3ExprReferencesUpdatedColumn(Expr*,int*,int);
+SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int,
+ u8,u8,int,int*,int*,Upsert*);
+#ifdef SQLITE_ENABLE_NULL_TRIM
+SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe*,Table*);
+#else
+# define sqlite3SetMakeRecordP5(A,B)
+#endif
+SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int);
+SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, u8, int, u8*, int*, int*);
+SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int);
+SQLITE_PRIVATE void sqlite3MultiWrite(Parse*);
+SQLITE_PRIVATE void sqlite3MayAbort(Parse*);
+SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8);
+SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*);
+SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*);
+SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int);
+SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int);
+SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int);
+SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*);
+SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int);
+SQLITE_PRIVATE FuncDef *sqlite3FunctionSearch(int,const char*);
+SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int);
+SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8);
+SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void);
+SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void);
+SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*);
+SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*);
+SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*);
+SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int);
+
+#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
+SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, ExprList*,Expr*,int);
+#endif
+
+#ifndef SQLITE_OMIT_TRIGGER
+SQLITE_PRIVATE void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*,
+ Expr*,int, int);
+SQLITE_PRIVATE void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*);
+SQLITE_PRIVATE void sqlite3DropTrigger(Parse*, SrcList*, int);
+SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse*, Trigger*);
+SQLITE_PRIVATE Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask);
+SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *, Table *);
+SQLITE_PRIVATE void sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *,
+ int, int, int);
+SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, int, int, int);
+ void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*);
+SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*);
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*,
+ const char*,const char*);
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(Parse*,Token*, IdList*,
+ Select*,u8,Upsert*,
+ const char*,const char*);
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(Parse*,Token*,SrcList*,ExprList*,
+ Expr*, u8, const char*,const char*);
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(Parse*,Token*, Expr*,
+ const char*,const char*);
+SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*);
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*);
+SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int);
+SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(Parse*, TriggerStep*);
+# define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p))
+# define sqlite3IsToplevel(p) ((p)->pToplevel==0)
+#else
+# define sqlite3TriggersExist(B,C,D,E,F) 0
+# define sqlite3DeleteTrigger(A,B)
+# define sqlite3DropTriggerPtr(A,B)
+# define sqlite3UnlinkAndDeleteTrigger(A,B,C)
+# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I)
+# define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F)
+# define sqlite3TriggerList(X, Y) 0
+# define sqlite3ParseToplevel(p) p
+# define sqlite3IsToplevel(p) 1
+# define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0
+# define sqlite3TriggerStepSrc(A,B) 0
+#endif
+
+SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*);
+SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr*,int);
+SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int);
+SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+SQLITE_PRIVATE void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*);
+SQLITE_PRIVATE int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*);
+SQLITE_PRIVATE void sqlite3AuthContextPush(Parse*, AuthContext*, const char*);
+SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext*);
+SQLITE_PRIVATE int sqlite3AuthReadCol(Parse*, const char *, const char *, int);
+#else
+# define sqlite3AuthRead(a,b,c,d)
+# define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK
+# define sqlite3AuthContextPush(a,b,c)
+# define sqlite3AuthContextPop(a) ((void)(a))
+#endif
+SQLITE_PRIVATE int sqlite3DbIsNamed(sqlite3 *db, int iDb, const char *zName);
+SQLITE_PRIVATE void sqlite3Attach(Parse*, Expr*, Expr*, Expr*);
+SQLITE_PRIVATE void sqlite3Detach(Parse*, Expr*);
+SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*);
+SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);
+SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*);
+SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
+SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*);
+SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
+SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64);
+SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*);
+SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8);
+SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*);
+SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*);
+SQLITE_PRIVATE int sqlite3Atoi(const char*);
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar);
+#endif
+SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte);
+SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**);
+SQLITE_PRIVATE LogEst sqlite3LogEst(u64);
+SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double);
+#endif
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \
+ defined(SQLITE_ENABLE_STAT4) || \
+ defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
+SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst);
+#endif
+SQLITE_PRIVATE VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int);
+SQLITE_PRIVATE const char *sqlite3VListNumToName(VList*,int);
+SQLITE_PRIVATE int sqlite3VListNameToNum(VList*,const char*,int);
+
+/*
+** Routines to read and write variable-length integers. These used to
+** be defined locally, but now we use the varint routines in the util.c
+** file.
+*/
+SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64);
+SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *);
+SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *);
+SQLITE_PRIVATE int sqlite3VarintLen(u64 v);
+
+/*
+** The common case is for a varint to be a single byte. They following
+** macros handle the common case without a procedure call, but then call
+** the procedure for larger varints.
+*/
+#define getVarint32(A,B) \
+ (u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B)))
+#define getVarint32NR(A,B) \
+ B=(u32)*(A);if(B>=0x80)sqlite3GetVarint32((A),(u32*)&(B))
+#define putVarint32(A,B) \
+ (u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\
+ sqlite3PutVarint((A),(B)))
+#define getVarint sqlite3GetVarint
+#define putVarint sqlite3PutVarint
+
+
+SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*);
+SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int);
+SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2);
+SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity);
+SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table*,int);
+SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr);
+SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8);
+SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*);
+SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);
+SQLITE_PRIVATE void sqlite3Error(sqlite3*,int);
+SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int);
+SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n);
+SQLITE_PRIVATE u8 sqlite3HexToInt(int h);
+SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **);
+
+#if defined(SQLITE_NEED_ERR_NAME)
+SQLITE_PRIVATE const char *sqlite3ErrName(int);
+#endif
+
+#ifdef SQLITE_ENABLE_DESERIALIZE
+SQLITE_PRIVATE int sqlite3MemdbInit(void);
+#endif
+
+SQLITE_PRIVATE const char *sqlite3ErrStr(int);
+SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse);
+SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int);
+SQLITE_PRIVATE int sqlite3IsBinary(const CollSeq*);
+SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName);
+SQLITE_PRIVATE void sqlite3SetTextEncoding(sqlite3 *db, u8);
+SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr);
+SQLITE_PRIVATE CollSeq *sqlite3ExprNNCollSeq(Parse *pParse, const Expr *pExpr);
+SQLITE_PRIVATE int sqlite3ExprCollSeqMatch(Parse*,const Expr*,const Expr*);
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int);
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*);
+SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*);
+SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr*);
+SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *);
+SQLITE_PRIVATE int sqlite3WritableSchema(sqlite3*);
+SQLITE_PRIVATE int sqlite3CheckObjectName(Parse*, const char*,const char*,const char*);
+SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int);
+SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64);
+SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64);
+SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64);
+SQLITE_PRIVATE int sqlite3AbsInt32(int);
+#ifdef SQLITE_ENABLE_8_3_NAMES
+SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*);
+#else
+# define sqlite3FileSuffix3(X,Y)
+#endif
+SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8);
+
+SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8);
+SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8);
+SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8,
+ void(*)(void*));
+SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*);
+SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*);
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE void sqlite3ResultIntReal(sqlite3_context*);
+#endif
+SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *);
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8);
+#endif
+SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **);
+SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8);
+#ifndef SQLITE_AMALGAMATION
+SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[];
+SQLITE_PRIVATE const char sqlite3StrBINARY[];
+SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[];
+SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[];
+SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config;
+SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions;
+SQLITE_API extern u32 sqlite3_unsupported_selecttrace;
+#ifndef SQLITE_OMIT_WSD
+SQLITE_PRIVATE int sqlite3PendingByte;
+#endif
+#endif /* SQLITE_AMALGAMATION */
+#ifdef VDBE_PROFILE
+SQLITE_PRIVATE sqlite3_uint64 sqlite3NProfileCnt;
+#endif
+SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, Pgno, Pgno);
+SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*);
+SQLITE_PRIVATE void sqlite3AlterFunctions(void);
+SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*);
+SQLITE_PRIVATE void sqlite3AlterRenameColumn(Parse*, SrcList*, Token*, Token*);
+SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *);
+SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...);
+SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*, int);
+SQLITE_PRIVATE void sqlite3CodeRhsOfIN(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr*);
+SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*);
+SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p);
+SQLITE_PRIVATE int sqlite3MatchEName(
+ const struct ExprList_item*,
+ const char*,
+ const char*,
+ const char*
+);
+SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*);
+SQLITE_PRIVATE u8 sqlite3StrIHash(const char*);
+SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*);
+SQLITE_PRIVATE int sqlite3ResolveExprListNames(NameContext*, ExprList*);
+SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*);
+SQLITE_PRIVATE int sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*);
+SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*);
+SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int);
+SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *);
+SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *);
+SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse*, void*, Token*);
+SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, void *pTo, void *pFrom);
+SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse*, Expr*);
+SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse*, ExprList*);
+SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*);
+SQLITE_PRIVATE char sqlite3AffinityType(const char*, Column*);
+SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*);
+SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*);
+SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*);
+SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *);
+SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB);
+SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3*,Index*);
+SQLITE_PRIVATE void sqlite3DefaultRowEst(Index*);
+SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3*, int);
+SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*);
+SQLITE_PRIVATE void sqlite3SchemaClear(void *);
+SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *, Btree *);
+SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *);
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int);
+SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*);
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*);
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*);
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(Parse*, ExprList*, int, int);
+SQLITE_PRIVATE int sqlite3HasExplicitNulls(Parse*, ExprList*);
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*);
+#endif
+SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *,
+ void (*)(sqlite3_context*,int,sqlite3_value **),
+ void (*)(sqlite3_context*,int,sqlite3_value **),
+ void (*)(sqlite3_context*),
+ void (*)(sqlite3_context*),
+ void (*)(sqlite3_context*,int,sqlite3_value **),
+ FuncDestructor *pDestructor
+);
+SQLITE_PRIVATE void sqlite3NoopDestructor(void*);
+SQLITE_PRIVATE void sqlite3OomFault(sqlite3*);
+SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);
+SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
+SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
+
+SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
+SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
+SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int);
+SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int);
+
+SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *);
+SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *);
+
+#ifndef SQLITE_OMIT_SUBQUERY
+SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse*, Expr*);
+#else
+# define sqlite3ExprCheckIN(x,y) SQLITE_OK
+#endif
+
+#ifdef SQLITE_ENABLE_STAT4
+SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
+ Parse*,Index*,UnpackedRecord**,Expr*,int,int,int*);
+SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**);
+SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*);
+SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**);
+SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3*, Index*, int);
+#endif
+
+/*
+** The interface to the LEMON-generated parser
+*/
+#ifndef SQLITE_AMALGAMATION
+SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64), Parse*);
+SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
+#endif
+SQLITE_PRIVATE void sqlite3Parser(void*, int, Token);
+SQLITE_PRIVATE int sqlite3ParserFallback(int);
+#ifdef YYTRACKMAXSTACKDEPTH
+SQLITE_PRIVATE int sqlite3ParserStackPeak(void*);
+#endif
+
+SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3*);
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3*);
+#else
+# define sqlite3CloseExtensions(X)
+#endif
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+SQLITE_PRIVATE void sqlite3TableLock(Parse *, int, Pgno, u8, const char *);
+#else
+ #define sqlite3TableLock(v,w,x,y,z)
+#endif
+
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*);
+#endif
+
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+# define sqlite3VtabClear(Y)
+# define sqlite3VtabSync(X,Y) SQLITE_OK
+# define sqlite3VtabRollback(X)
+# define sqlite3VtabCommit(X)
+# define sqlite3VtabInSync(db) 0
+# define sqlite3VtabLock(X)
+# define sqlite3VtabUnlock(X)
+# define sqlite3VtabModuleUnref(D,X)
+# define sqlite3VtabUnlockList(X)
+# define sqlite3VtabSavepoint(X, Y, Z) SQLITE_OK
+# define sqlite3GetVTable(X,Y) ((VTable*)0)
+#else
+SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table*);
+SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p);
+SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe*);
+SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db);
+SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db);
+SQLITE_PRIVATE void sqlite3VtabLock(VTable *);
+SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *);
+SQLITE_PRIVATE void sqlite3VtabModuleUnref(sqlite3*,Module*);
+SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*);
+SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int);
+SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*);
+SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*);
+SQLITE_PRIVATE Module *sqlite3VtabCreateModule(
+ sqlite3*,
+ const char*,
+ const sqlite3_module*,
+ void*,
+ void(*)(void*)
+ );
+# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0)
+#endif
+SQLITE_PRIVATE int sqlite3ReadOnlyShadowTables(sqlite3 *db);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+SQLITE_PRIVATE int sqlite3ShadowTableName(sqlite3 *db, const char *zName);
+SQLITE_PRIVATE int sqlite3IsShadowTableOf(sqlite3*,Table*,const char*);
+#else
+# define sqlite3ShadowTableName(A,B) 0
+# define sqlite3IsShadowTableOf(A,B,C) 0
+#endif
+SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*);
+SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3*,Module*);
+SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*);
+SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int);
+SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*);
+SQLITE_PRIVATE void sqlite3VtabArgInit(Parse*);
+SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse*, Token*);
+SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **);
+SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*);
+SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *);
+SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *);
+SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*);
+SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*);
+SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int);
+SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *);
+SQLITE_PRIVATE void sqlite3ParserReset(Parse*);
+#ifdef SQLITE_ENABLE_NORMALIZE
+SQLITE_PRIVATE char *sqlite3Normalize(Vdbe*, const char*);
+#endif
+SQLITE_PRIVATE int sqlite3Reprepare(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*);
+SQLITE_PRIVATE CollSeq *sqlite3ExprCompareCollSeq(Parse*,const Expr*);
+SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(Parse *, const Expr*, const Expr*);
+SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3*);
+SQLITE_PRIVATE const char *sqlite3JournalModename(int);
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*);
+SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int);
+#endif
+#ifndef SQLITE_OMIT_CTE
+SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*);
+SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*);
+SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8);
+#else
+#define sqlite3WithPush(x,y,z)
+#define sqlite3WithDelete(x,y)
+#endif
+#ifndef SQLITE_OMIT_UPSERT
+SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*);
+SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3*,Upsert*);
+SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3*,Upsert*);
+SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*);
+SQLITE_PRIVATE void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int);
+#else
+#define sqlite3UpsertNew(v,w,x,y,z) ((Upsert*)0)
+#define sqlite3UpsertDelete(x,y)
+#define sqlite3UpsertDup(x,y) ((Upsert*)0)
+#endif
+
+
+/* Declarations for functions in fkey.c. All of these are replaced by
+** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign
+** key functionality is available. If OMIT_TRIGGER is defined but
+** OMIT_FOREIGN_KEY is not, only some of the functions are no-oped. In
+** this case foreign keys are parsed, but no other functionality is
+** provided (enforcement of FK constraints requires the triggers sub-system).
+*/
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+SQLITE_PRIVATE void sqlite3FkCheck(Parse*, Table*, int, int, int*, int);
+SQLITE_PRIVATE void sqlite3FkDropTable(Parse*, SrcList *, Table*);
+SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int);
+SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int);
+SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*);
+SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *);
+#else
+ #define sqlite3FkActions(a,b,c,d,e,f)
+ #define sqlite3FkCheck(a,b,c,d,e,f)
+ #define sqlite3FkDropTable(a,b,c)
+ #define sqlite3FkOldmask(a,b) 0
+ #define sqlite3FkRequired(a,b,c,d) 0
+ #define sqlite3FkReferences(a) 0
+#endif
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*);
+SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**);
+#else
+ #define sqlite3FkDelete(a,b)
+ #define sqlite3FkLocateIndex(a,b,c,d,e)
+#endif
+
+
+/*
+** Available fault injectors. Should be numbered beginning with 0.
+*/
+#define SQLITE_FAULTINJECTOR_MALLOC 0
+#define SQLITE_FAULTINJECTOR_COUNT 1
+
+/*
+** The interface to the code in fault.c used for identifying "benign"
+** malloc failures. This is only present if SQLITE_UNTESTABLE
+** is not defined.
+*/
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void);
+SQLITE_PRIVATE void sqlite3EndBenignMalloc(void);
+#else
+ #define sqlite3BeginBenignMalloc()
+ #define sqlite3EndBenignMalloc()
+#endif
+
+/*
+** Allowed return values from sqlite3FindInIndex()
+*/
+#define IN_INDEX_ROWID 1 /* Search the rowid of the table */
+#define IN_INDEX_EPH 2 /* Search an ephemeral b-tree */
+#define IN_INDEX_INDEX_ASC 3 /* Existing index ASCENDING */
+#define IN_INDEX_INDEX_DESC 4 /* Existing index DESCENDING */
+#define IN_INDEX_NOOP 5 /* No table available. Use comparisons */
+/*
+** Allowed flags for the 3rd parameter to sqlite3FindInIndex().
+*/
+#define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */
+#define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */
+#define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */
+SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*, int*, int*);
+
+SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int);
+SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *);
+#if defined(SQLITE_ENABLE_ATOMIC_WRITE) \
+ || defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *);
+#endif
+
+SQLITE_PRIVATE int sqlite3JournalIsInMemory(sqlite3_file *p);
+SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *);
+
+SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p);
+#if SQLITE_MAX_EXPR_DEPTH>0
+SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *);
+SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int);
+#else
+ #define sqlite3SelectExprHeight(x) 0
+ #define sqlite3ExprCheckHeight(x,y)
+#endif
+
+SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*);
+SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);
+
+#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
+SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *);
+SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db);
+SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db);
+#else
+ #define sqlite3ConnectionBlocked(x,y)
+ #define sqlite3ConnectionUnlocked(x)
+ #define sqlite3ConnectionClosed(x)
+#endif
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *);
+#endif
+#if defined(YYCOVERAGE)
+SQLITE_PRIVATE int sqlite3ParserCoverage(FILE*);
+#endif
+
+/*
+** If the SQLITE_ENABLE IOTRACE exists then the global variable
+** sqlite3IoTrace is a pointer to a printf-like routine used to
+** print I/O tracing messages.
+*/
+#ifdef SQLITE_ENABLE_IOTRACE
+# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; }
+SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*);
+SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...);
+#else
+# define IOTRACE(A)
+# define sqlite3VdbeIOTraceSql(X)
+#endif
+
+/*
+** These routines are available for the mem2.c debugging memory allocator
+** only. They are used to verify that different "types" of memory
+** allocations are properly tracked by the system.
+**
+** sqlite3MemdebugSetType() sets the "type" of an allocation to one of
+** the MEMTYPE_* macros defined below. The type must be a bitmask with
+** a single bit set.
+**
+** sqlite3MemdebugHasType() returns true if any of the bits in its second
+** argument match the type set by the previous sqlite3MemdebugSetType().
+** sqlite3MemdebugHasType() is intended for use inside assert() statements.
+**
+** sqlite3MemdebugNoType() returns true if none of the bits in its second
+** argument match the type set by the previous sqlite3MemdebugSetType().
+**
+** Perhaps the most important point is the difference between MEMTYPE_HEAP
+** and MEMTYPE_LOOKASIDE. If an allocation is MEMTYPE_LOOKASIDE, that means
+** it might have been allocated by lookaside, except the allocation was
+** too large or lookaside was already full. It is important to verify
+** that allocations that might have been satisfied by lookaside are not
+** passed back to non-lookaside free() routines. Asserts such as the
+** example above are placed on the non-lookaside free() routines to verify
+** this constraint.
+**
+** All of this is no-op for a production build. It only comes into
+** play when the SQLITE_MEMDEBUG compile-time option is used.
+*/
+#ifdef SQLITE_MEMDEBUG
+SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8);
+SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8);
+SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8);
+#else
+# define sqlite3MemdebugSetType(X,Y) /* no-op */
+# define sqlite3MemdebugHasType(X,Y) 1
+# define sqlite3MemdebugNoType(X,Y) 1
+#endif
+#define MEMTYPE_HEAP 0x01 /* General heap allocations */
+#define MEMTYPE_LOOKASIDE 0x02 /* Heap that might have been lookaside */
+#define MEMTYPE_PCACHE 0x04 /* Page cache allocations */
+
+/*
+** Threading interface
+*/
+#if SQLITE_MAX_WORKER_THREADS>0
+SQLITE_PRIVATE int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*);
+SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**);
+#endif
+
+#if defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)
+SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3*);
+#endif
+#if defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)
+SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*);
+#endif
+
+SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr);
+SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr);
+SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int);
+SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int);
+SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*);
+
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt);
+#endif
+
+#endif /* SQLITEINT_H */
+
+/************** End of sqliteInt.h *******************************************/
+/************** Begin file global.c ******************************************/
+/*
+** 2008 June 13
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains definitions of global variables and constants.
+*/
+/* #include "sqliteInt.h" */
+
+/* An array to map all upper-case characters into their corresponding
+** lower-case character.
+**
+** SQLite only considers US-ASCII (or EBCDIC) characters. We do not
+** handle case conversions for the UTF character set since the tables
+** involved are nearly as big or bigger than SQLite itself.
+*/
+SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = {
+#ifdef SQLITE_ASCII
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,
+ 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107,
+ 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,
+ 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+ 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,
+ 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,
+ 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,
+ 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
+ 252,253,254,255
+#endif
+#ifdef SQLITE_EBCDIC
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */
+ 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, /* 6x */
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, /* 7x */
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, /* 9x */
+ 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */
+ 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */
+ 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */
+ 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */
+#endif
+};
+
+/*
+** The following 256 byte lookup table is used to support SQLites built-in
+** equivalents to the following standard library functions:
+**
+** isspace() 0x01
+** isalpha() 0x02
+** isdigit() 0x04
+** isalnum() 0x06
+** isxdigit() 0x08
+** toupper() 0x20
+** SQLite identifier character 0x40
+** Quote character 0x80
+**
+** Bit 0x20 is set if the mapped character requires translation to upper
+** case. i.e. if the character is a lower-case ASCII character.
+** If x is a lower-case ASCII character, then its upper-case equivalent
+** is (x - 0x20). Therefore toupper() can be implemented as:
+**
+** (x & ~(map[x]&0x20))
+**
+** The equivalent of tolower() is implemented using the sqlite3UpperToLower[]
+** array. tolower() is used more often than toupper() by SQLite.
+**
+** Bit 0x40 is set if the character is non-alphanumeric and can be used in an
+** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any
+** non-ASCII UTF character. Hence the test for whether or not a character is
+** part of an identifier is 0x46.
+*/
+SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */
+ 0x01, 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x80, /* 20..27 !"#$%&' */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */
+ 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */
+
+ 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */
+ 0x02, 0x02, 0x02, 0x80, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */
+ 0x80, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */
+ 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */
+
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 80..87 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 88..8f ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 90..97 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 98..9f ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a0..a7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a8..af ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b0..b7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b8..bf ........ */
+
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c0..c7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c8..cf ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d0..d7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d8..df ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e0..e7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e8..ef ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* f0..f7 ........ */
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 /* f8..ff ........ */
+};
+
+/* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards
+** compatibility for legacy applications, the URI filename capability is
+** disabled by default.
+**
+** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled
+** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options.
+**
+** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally
+** disabled. The default value may be changed by compiling with the
+** SQLITE_USE_URI symbol defined.
+*/
+#ifndef SQLITE_USE_URI
+# define SQLITE_USE_URI 0
+#endif
+
+/* EVIDENCE-OF: R-38720-18127 The default setting is determined by the
+** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if
+** that compile-time option is omitted.
+*/
+#if !defined(SQLITE_ALLOW_COVERING_INDEX_SCAN)
+# define SQLITE_ALLOW_COVERING_INDEX_SCAN 1
+#else
+# if !SQLITE_ALLOW_COVERING_INDEX_SCAN
+# error "Compile-time disabling of covering index scan using the\
+ -DSQLITE_ALLOW_COVERING_INDEX_SCAN=0 option is deprecated.\
+ Contact SQLite developers if this is a problem for you, and\
+ delete this #error macro to continue with your build."
+# endif
+#endif
+
+/* The minimum PMA size is set to this value multiplied by the database
+** page size in bytes.
+*/
+#ifndef SQLITE_SORTER_PMASZ
+# define SQLITE_SORTER_PMASZ 250
+#endif
+
+/* Statement journals spill to disk when their size exceeds the following
+** threshold (in bytes). 0 means that statement journals are created and
+** written to disk immediately (the default behavior for SQLite versions
+** before 3.12.0). -1 means always keep the entire statement journal in
+** memory. (The statement journal is also always held entirely in memory
+** if journal_mode=MEMORY or if temp_store=MEMORY, regardless of this
+** setting.)
+*/
+#ifndef SQLITE_STMTJRNL_SPILL
+# define SQLITE_STMTJRNL_SPILL (64*1024)
+#endif
+
+/*
+** The default lookaside-configuration, the format "SZ,N". SZ is the
+** number of bytes in each lookaside slot (should be a multiple of 8)
+** and N is the number of slots. The lookaside-configuration can be
+** changed as start-time using sqlite3_config(SQLITE_CONFIG_LOOKASIDE)
+** or at run-time for an individual database connection using
+** sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE);
+**
+** With the two-size-lookaside enhancement, less lookaside is required.
+** The default configuration of 1200,40 actually provides 30 1200-byte slots
+** and 93 128-byte slots, which is more lookaside than is available
+** using the older 1200,100 configuration without two-size-lookaside.
+*/
+#ifndef SQLITE_DEFAULT_LOOKASIDE
+# ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+# define SQLITE_DEFAULT_LOOKASIDE 1200,100 /* 120KB of memory */
+# else
+# define SQLITE_DEFAULT_LOOKASIDE 1200,40 /* 48KB of memory */
+# endif
+#endif
+
+
+/* The default maximum size of an in-memory database created using
+** sqlite3_deserialize()
+*/
+#ifndef SQLITE_MEMDB_DEFAULT_MAXSIZE
+# define SQLITE_MEMDB_DEFAULT_MAXSIZE 1073741824
+#endif
+
+/*
+** The following singleton contains the global configuration for
+** the SQLite library.
+*/
+SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
+ SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */
+ 1, /* bCoreMutex */
+ SQLITE_THREADSAFE==1, /* bFullMutex */
+ SQLITE_USE_URI, /* bOpenUri */
+ SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
+ 0, /* bSmallMalloc */
+ 1, /* bExtraSchemaChecks */
+ 0x7ffffffe, /* mxStrlen */
+ 0, /* neverCorrupt */
+ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */
+ SQLITE_STMTJRNL_SPILL, /* nStmtSpill */
+ {0,0,0,0,0,0,0,0}, /* m */
+ {0,0,0,0,0,0,0,0,0}, /* mutex */
+ {0,0,0,0,0,0,0,0,0,0,0,0,0},/* pcache2 */
+ (void*)0, /* pHeap */
+ 0, /* nHeap */
+ 0, 0, /* mnHeap, mxHeap */
+ SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */
+ SQLITE_MAX_MMAP_SIZE, /* mxMmap */
+ (void*)0, /* pPage */
+ 0, /* szPage */
+ SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */
+ 0, /* mxParserStack */
+ 0, /* sharedCacheEnabled */
+ SQLITE_SORTER_PMASZ, /* szPma */
+ /* All the rest should always be initialized to zero */
+ 0, /* isInit */
+ 0, /* inProgress */
+ 0, /* isMutexInit */
+ 0, /* isMallocInit */
+ 0, /* isPCacheInit */
+ 0, /* nRefInitMutex */
+ 0, /* pInitMutex */
+ 0, /* xLog */
+ 0, /* pLogArg */
+#ifdef SQLITE_ENABLE_SQLLOG
+ 0, /* xSqllog */
+ 0, /* pSqllogArg */
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ 0, /* xVdbeBranch */
+ 0, /* pVbeBranchArg */
+#endif
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ SQLITE_MEMDB_DEFAULT_MAXSIZE, /* mxMemdbSize */
+#endif
+#ifndef SQLITE_UNTESTABLE
+ 0, /* xTestCallback */
+#endif
+ 0, /* bLocaltimeFault */
+ 0x7ffffffe, /* iOnceResetThreshold */
+ SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */
+ 0, /* iPrngSeed */
+};
+
+/*
+** Hash table for global functions - functions common to all
+** database connections. After initialization, this table is
+** read-only.
+*/
+SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions;
+
+#ifdef VDBE_PROFILE
+/*
+** The following performance counter can be used in place of
+** sqlite3Hwtime() for profiling. This is a no-op on standard builds.
+*/
+SQLITE_PRIVATE sqlite3_uint64 sqlite3NProfileCnt = 0;
+#endif
+
+/*
+** The value of the "pending" byte must be 0x40000000 (1 byte past the
+** 1-gibabyte boundary) in a compatible database. SQLite never uses
+** the database page that contains the pending byte. It never attempts
+** to read or write that page. The pending byte page is set aside
+** for use by the VFS layers as space for managing file locks.
+**
+** During testing, it is often desirable to move the pending byte to
+** a different position in the file. This allows code that has to
+** deal with the pending byte to run on files that are much smaller
+** than 1 GiB. The sqlite3_test_control() interface can be used to
+** move the pending byte.
+**
+** IMPORTANT: Changing the pending byte to any value other than
+** 0x40000000 results in an incompatible database file format!
+** Changing the pending byte during operation will result in undefined
+** and incorrect behavior.
+*/
+#ifndef SQLITE_OMIT_WSD
+SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000;
+#endif
+
+/*
+** Flags for select tracing and the ".selecttrace" macro of the CLI
+*/
+SQLITE_API u32 sqlite3_unsupported_selecttrace = 0;
+
+/* #include "opcodes.h" */
+/*
+** Properties of opcodes. The OPFLG_INITIALIZER macro is
+** created by mkopcodeh.awk during compilation. Data is obtained
+** from the comments following the "case OP_xxxx:" statements in
+** the vdbe.c file.
+*/
+SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER;
+
+/*
+** Name of the default collating sequence
+*/
+SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY";
+
+/************** End of global.c **********************************************/
+/************** Begin file status.c ******************************************/
+/*
+** 2008 June 18
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This module implements the sqlite3_status() interface and related
+** functionality.
+*/
+/* #include "sqliteInt.h" */
+/************** Include vdbeInt.h in the middle of status.c ******************/
+/************** Begin file vdbeInt.h *****************************************/
+/*
+** 2003 September 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the header file for information that is private to the
+** VDBE. This information used to all be at the top of the single
+** source code file "vdbe.c". When that file became too big (over
+** 6000 lines long) it was split up into several smaller files and
+** this header information was factored out.
+*/
+#ifndef SQLITE_VDBEINT_H
+#define SQLITE_VDBEINT_H
+
+/*
+** The maximum number of times that a statement will try to reparse
+** itself before giving up and returning SQLITE_SCHEMA.
+*/
+#ifndef SQLITE_MAX_SCHEMA_RETRY
+# define SQLITE_MAX_SCHEMA_RETRY 50
+#endif
+
+/*
+** VDBE_DISPLAY_P4 is true or false depending on whether or not the
+** "explain" P4 display logic is enabled.
+*/
+#if !defined(SQLITE_OMIT_EXPLAIN) || !defined(NDEBUG) \
+ || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) \
+ || defined(SQLITE_ENABLE_BYTECODE_VTAB)
+# define VDBE_DISPLAY_P4 1
+#else
+# define VDBE_DISPLAY_P4 0
+#endif
+
+/*
+** SQL is translated into a sequence of instructions to be
+** executed by a virtual machine. Each instruction is an instance
+** of the following structure.
+*/
+typedef struct VdbeOp Op;
+
+/*
+** Boolean values
+*/
+typedef unsigned Bool;
+
+/* Opaque type used by code in vdbesort.c */
+typedef struct VdbeSorter VdbeSorter;
+
+/* Elements of the linked list at Vdbe.pAuxData */
+typedef struct AuxData AuxData;
+
+/* Types of VDBE cursors */
+#define CURTYPE_BTREE 0
+#define CURTYPE_SORTER 1
+#define CURTYPE_VTAB 2
+#define CURTYPE_PSEUDO 3
+
+/*
+** A VdbeCursor is an superclass (a wrapper) for various cursor objects:
+**
+** * A b-tree cursor
+** - In the main database or in an ephemeral database
+** - On either an index or a table
+** * A sorter
+** * A virtual table
+** * A one-row "pseudotable" stored in a single register
+*/
+typedef struct VdbeCursor VdbeCursor;
+struct VdbeCursor {
+ u8 eCurType; /* One of the CURTYPE_* values above */
+ i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */
+ u8 nullRow; /* True if pointing to a row with no data */
+ u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */
+ u8 isTable; /* True for rowid tables. False for indexes */
+#ifdef SQLITE_DEBUG
+ u8 seekOp; /* Most recent seek operation on this cursor */
+ u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */
+#endif
+ Bool isEphemeral:1; /* True for an ephemeral table */
+ Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
+ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
+ Bool seekHit:1; /* See the OP_SeekHit and OP_IfNoHope opcodes */
+ Btree *pBtx; /* Separate file holding temporary table */
+ i64 seqCount; /* Sequence counter */
+ u32 *aAltMap; /* Mapping from table to index column numbers */
+
+ /* Cached OP_Column parse information is only valid if cacheStatus matches
+ ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of
+ ** CACHE_STALE (0) and so setting cacheStatus=CACHE_STALE guarantees that
+ ** the cache is out of date. */
+ u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */
+ int seekResult; /* Result of previous sqlite3BtreeMoveto() or 0
+ ** if there have been no prior seeks on the cursor. */
+ /* seekResult does not distinguish between "no seeks have ever occurred
+ ** on this cursor" and "the most recent seek was an exact match".
+ ** For CURTYPE_PSEUDO, seekResult is the register holding the record */
+
+ /* When a new VdbeCursor is allocated, only the fields above are zeroed.
+ ** The fields that follow are uninitialized, and must be individually
+ ** initialized prior to first use. */
+ VdbeCursor *pAltCursor; /* Associated index cursor from which to read */
+ union {
+ BtCursor *pCursor; /* CURTYPE_BTREE or _PSEUDO. Btree cursor */
+ sqlite3_vtab_cursor *pVCur; /* CURTYPE_VTAB. Vtab cursor */
+ VdbeSorter *pSorter; /* CURTYPE_SORTER. Sorter object */
+ } uc;
+ KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */
+ u32 iHdrOffset; /* Offset to next unparsed byte of the header */
+ Pgno pgnoRoot; /* Root page of the open btree cursor */
+ i16 nField; /* Number of fields in the header */
+ u16 nHdrParsed; /* Number of header fields parsed so far */
+ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */
+ u32 *aOffset; /* Pointer to aType[nField] */
+ const u8 *aRow; /* Data for the current row, if all on one page */
+ u32 payloadSize; /* Total number of bytes in the record */
+ u32 szRow; /* Byte available in aRow */
+#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
+ u64 maskUsed; /* Mask of columns used by this cursor */
+#endif
+
+ /* 2*nField extra array elements allocated for aType[], beyond the one
+ ** static element declared in the structure. nField total array slots for
+ ** aType[] and nField+1 array slots for aOffset[] */
+ u32 aType[1]; /* Type values record decode. MUST BE LAST */
+};
+
+
+/*
+** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
+*/
+#define CACHE_STALE 0
+
+/*
+** When a sub-program is executed (OP_Program), a structure of this type
+** is allocated to store the current value of the program counter, as
+** well as the current memory cell array and various other frame specific
+** values stored in the Vdbe struct. When the sub-program is finished,
+** these values are copied back to the Vdbe from the VdbeFrame structure,
+** restoring the state of the VM to as it was before the sub-program
+** began executing.
+**
+** The memory for a VdbeFrame object is allocated and managed by a memory
+** cell in the parent (calling) frame. When the memory cell is deleted or
+** overwritten, the VdbeFrame object is not freed immediately. Instead, it
+** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame
+** list is deleted when the VM is reset in VdbeHalt(). The reason for doing
+** this instead of deleting the VdbeFrame immediately is to avoid recursive
+** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the
+** child frame are released.
+**
+** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is
+** set to NULL if the currently executing frame is the main program.
+*/
+typedef struct VdbeFrame VdbeFrame;
+struct VdbeFrame {
+ Vdbe *v; /* VM this frame belongs to */
+ VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */
+ Op *aOp; /* Program instructions for parent frame */
+ i64 *anExec; /* Event counters from parent frame */
+ Mem *aMem; /* Array of memory cells for parent frame */
+ VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */
+ u8 *aOnce; /* Bitmask used by OP_Once */
+ void *token; /* Copy of SubProgram.token */
+ i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */
+ AuxData *pAuxData; /* Linked list of auxdata allocations */
+#if SQLITE_DEBUG
+ u32 iFrameMagic; /* magic number for sanity checking */
+#endif
+ int nCursor; /* Number of entries in apCsr */
+ int pc; /* Program Counter in parent (calling) frame */
+ int nOp; /* Size of aOp array */
+ int nMem; /* Number of entries in aMem */
+ int nChildMem; /* Number of memory cells for child frame */
+ int nChildCsr; /* Number of cursors for child frame */
+ int nChange; /* Statement changes (Vdbe.nChange) */
+ int nDbChange; /* Value of db->nChange */
+};
+
+/* Magic number for sanity checking on VdbeFrame objects */
+#define SQLITE_FRAME_MAGIC 0x879fb71e
+
+/*
+** Return a pointer to the array of registers allocated for use
+** by a VdbeFrame.
+*/
+#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))])
+
+/*
+** Internally, the vdbe manipulates nearly all SQL values as Mem
+** structures. Each Mem struct may cache multiple representations (string,
+** integer etc.) of the same value.
+*/
+struct sqlite3_value {
+ union MemValue {
+ double r; /* Real value used when MEM_Real is set in flags */
+ i64 i; /* Integer value used when MEM_Int is set in flags */
+ int nZero; /* Extra zero bytes when MEM_Zero and MEM_Blob set */
+ const char *zPType; /* Pointer type when MEM_Term|MEM_Subtype|MEM_Null */
+ FuncDef *pDef; /* Used only when flags==MEM_Agg */
+ } u;
+ u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
+ u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */
+ u8 eSubtype; /* Subtype for this value */
+ int n; /* Number of characters in string value, excluding '\0' */
+ char *z; /* String or BLOB value */
+ /* ShallowCopy only needs to copy the information above */
+ char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */
+ int szMalloc; /* Size of the zMalloc allocation */
+ u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */
+ sqlite3 *db; /* The associated database connection */
+ void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */
+#ifdef SQLITE_DEBUG
+ Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */
+ u16 mScopyFlags; /* flags value immediately after the shallow copy */
+#endif
+};
+
+/*
+** Size of struct Mem not including the Mem.zMalloc member or anything that
+** follows.
+*/
+#define MEMCELLSIZE offsetof(Mem,zMalloc)
+
+/* One or more of the following flags are set to indicate the validOK
+** representations of the value stored in the Mem struct.
+**
+** If the MEM_Null flag is set, then the value is an SQL NULL value.
+** For a pointer type created using sqlite3_bind_pointer() or
+** sqlite3_result_pointer() the MEM_Term and MEM_Subtype flags are also set.
+**
+** If the MEM_Str flag is set then Mem.z points at a string representation.
+** Usually this is encoded in the same unicode encoding as the main
+** database (see below for exceptions). If the MEM_Term flag is also
+** set, then the string is nul terminated. The MEM_Int and MEM_Real
+** flags may coexist with the MEM_Str flag.
+*/
+#define MEM_Null 0x0001 /* Value is NULL (or a pointer) */
+#define MEM_Str 0x0002 /* Value is a string */
+#define MEM_Int 0x0004 /* Value is an integer */
+#define MEM_Real 0x0008 /* Value is a real number */
+#define MEM_Blob 0x0010 /* Value is a BLOB */
+#define MEM_IntReal 0x0020 /* MEM_Int that stringifies like MEM_Real */
+#define MEM_AffMask 0x003f /* Mask of affinity bits */
+#define MEM_FromBind 0x0040 /* Value originates from sqlite3_bind() */
+#define MEM_Undefined 0x0080 /* Value is undefined */
+#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */
+#define MEM_TypeMask 0xc1bf /* Mask of type bits */
+
+
+/* Whenever Mem contains a valid string or blob representation, one of
+** the following flags must be set to determine the memory management
+** policy for Mem.z. The MEM_Term flag tells us whether or not the
+** string is \000 or \u0000 terminated
+*/
+#define MEM_Term 0x0200 /* String in Mem.z is zero terminated */
+#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */
+#define MEM_Static 0x0800 /* Mem.z points to a static string */
+#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */
+#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */
+#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */
+#define MEM_Subtype 0x8000 /* Mem.eSubtype is valid */
+#ifdef SQLITE_OMIT_INCRBLOB
+ #undef MEM_Zero
+ #define MEM_Zero 0x0000
+#endif
+
+/* Return TRUE if Mem X contains dynamically allocated content - anything
+** that needs to be deallocated to avoid a leak.
+*/
+#define VdbeMemDynamic(X) \
+ (((X)->flags&(MEM_Agg|MEM_Dyn))!=0)
+
+/*
+** Clear any existing type flags from a Mem and replace them with f
+*/
+#define MemSetTypeFlag(p, f) \
+ ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f)
+
+/*
+** True if Mem X is a NULL-nochng type.
+*/
+#define MemNullNochng(X) \
+ (((X)->flags&MEM_TypeMask)==(MEM_Null|MEM_Zero) \
+ && (X)->n==0 && (X)->u.nZero==0)
+
+/*
+** Return true if a memory cell is not marked as invalid. This macro
+** is for use inside assert() statements only.
+*/
+#ifdef SQLITE_DEBUG
+#define memIsValid(M) ((M)->flags & MEM_Undefined)==0
+#endif
+
+/*
+** Each auxiliary data pointer stored by a user defined function
+** implementation calling sqlite3_set_auxdata() is stored in an instance
+** of this structure. All such structures associated with a single VM
+** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed
+** when the VM is halted (if not before).
+*/
+struct AuxData {
+ int iAuxOp; /* Instruction number of OP_Function opcode */
+ int iAuxArg; /* Index of function argument. */
+ void *pAux; /* Aux data pointer */
+ void (*xDeleteAux)(void*); /* Destructor for the aux data */
+ AuxData *pNextAux; /* Next element in list */
+};
+
+/*
+** The "context" argument for an installable function. A pointer to an
+** instance of this structure is the first argument to the routines used
+** implement the SQL functions.
+**
+** There is a typedef for this structure in sqlite.h. So all routines,
+** even the public interface to SQLite, can use a pointer to this structure.
+** But this file is the only place where the internal details of this
+** structure are known.
+**
+** This structure is defined inside of vdbeInt.h because it uses substructures
+** (Mem) which are only defined there.
+*/
+struct sqlite3_context {
+ Mem *pOut; /* The return value is stored here */
+ FuncDef *pFunc; /* Pointer to function information */
+ Mem *pMem; /* Memory cell used to store aggregate context */
+ Vdbe *pVdbe; /* The VM that owns this context */
+ int iOp; /* Instruction number of OP_Function */
+ int isError; /* Error code returned by the function. */
+ u8 skipFlag; /* Skip accumulator loading if true */
+ u8 argc; /* Number of arguments */
+ sqlite3_value *argv[1]; /* Argument set */
+};
+
+/* A bitfield type for use inside of structures. Always follow with :N where
+** N is the number of bits.
+*/
+typedef unsigned bft; /* Bit Field Type */
+
+/* The ScanStatus object holds a single value for the
+** sqlite3_stmt_scanstatus() interface.
+*/
+typedef struct ScanStatus ScanStatus;
+struct ScanStatus {
+ int addrExplain; /* OP_Explain for loop */
+ int addrLoop; /* Address of "loops" counter */
+ int addrVisit; /* Address of "rows visited" counter */
+ int iSelectID; /* The "Select-ID" for this loop */
+ LogEst nEst; /* Estimated output rows per loop */
+ char *zName; /* Name of table or index */
+};
+
+/* The DblquoteStr object holds the text of a double-quoted
+** string for a prepared statement. A linked list of these objects
+** is constructed during statement parsing and is held on Vdbe.pDblStr.
+** When computing a normalized SQL statement for an SQL statement, that
+** list is consulted for each double-quoted identifier to see if the
+** identifier should really be a string literal.
+*/
+typedef struct DblquoteStr DblquoteStr;
+struct DblquoteStr {
+ DblquoteStr *pNextStr; /* Next string literal in the list */
+ char z[8]; /* Dequoted value for the string */
+};
+
+/*
+** An instance of the virtual machine. This structure contains the complete
+** state of the virtual machine.
+**
+** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare()
+** is really a pointer to an instance of this structure.
+*/
+struct Vdbe {
+ sqlite3 *db; /* The database connection that owns this statement */
+ Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
+ Parse *pParse; /* Parsing context used to create this Vdbe */
+ ynVar nVar; /* Number of entries in aVar[] */
+ u32 magic; /* Magic number for sanity checking */
+ int nMem; /* Number of memory locations currently allocated */
+ int nCursor; /* Number of slots in apCsr[] */
+ u32 cacheCtr; /* VdbeCursor row cache generation counter */
+ int pc; /* The program counter */
+ int rc; /* Value to return */
+ int nChange; /* Number of db changes made since last reset */
+ int iStatement; /* Statement number (or 0 if has no opened stmt) */
+ i64 iCurrentTime; /* Value of julianday('now') for this statement */
+ i64 nFkConstraint; /* Number of imm. FK constraints this VM */
+ i64 nStmtDefCons; /* Number of def. constraints when stmt started */
+ i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */
+ Mem *aMem; /* The memory locations */
+ Mem **apArg; /* Arguments to currently executing user function */
+ VdbeCursor **apCsr; /* One element of this array for each open cursor */
+ Mem *aVar; /* Values for the OP_Variable opcode. */
+
+ /* When allocating a new Vdbe object, all of the fields below should be
+ ** initialized to zero or NULL */
+
+ Op *aOp; /* Space to hold the virtual machine's program */
+ int nOp; /* Number of instructions in the program */
+ int nOpAlloc; /* Slots allocated for aOp[] */
+ Mem *aColName; /* Column names to return */
+ Mem *pResultSet; /* Pointer to an array of results */
+ char *zErrMsg; /* Error message written here */
+ VList *pVList; /* Name of variables */
+#ifndef SQLITE_OMIT_TRACE
+ i64 startTime; /* Time when query started - used for profiling */
+#endif
+#ifdef SQLITE_DEBUG
+ int rcApp; /* errcode set by sqlite3_result_error_code() */
+ u32 nWrite; /* Number of write operations that have occurred */
+#endif
+ u16 nResColumn; /* Number of columns in one row of the result set */
+ u8 errorAction; /* Recovery action to do in case of an error */
+ u8 minWriteFileFormat; /* Minimum file format for writable database files */
+ u8 prepFlags; /* SQLITE_PREPARE_* flags */
+ u8 doingRerun; /* True if rerunning after an auto-reprepare */
+ bft expired:2; /* 1: recompile VM immediately 2: when convenient */
+ bft explain:2; /* True if EXPLAIN present on SQL command */
+ bft changeCntOn:1; /* True to update the change-counter */
+ bft runOnlyOnce:1; /* Automatically expire on reset */
+ bft usesStmtJournal:1; /* True if uses a statement journal */
+ bft readOnly:1; /* True for statements that do not write */
+ bft bIsReader:1; /* True for statements that read */
+ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */
+ yDbMask lockMask; /* Subset of btreeMask that requires a lock */
+ u32 aCounter[7]; /* Counters used by sqlite3_stmt_status() */
+ char *zSql; /* Text of the SQL statement that generated this */
+#ifdef SQLITE_ENABLE_NORMALIZE
+ char *zNormSql; /* Normalization of the associated SQL statement */
+ DblquoteStr *pDblStr; /* List of double-quoted string literals */
+#endif
+ void *pFree; /* Free this when deleting the vdbe */
+ VdbeFrame *pFrame; /* Parent frame */
+ VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */
+ int nFrame; /* Number of frames in pFrame list */
+ u32 expmask; /* Binding to these vars invalidates VM */
+ SubProgram *pProgram; /* Linked list of all sub-programs used by VM */
+ AuxData *pAuxData; /* Linked list of auxdata allocations */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ i64 *anExec; /* Number of times each op has been executed */
+ int nScan; /* Entries in aScan[] */
+ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */
+#endif
+};
+
+/*
+** The following are allowed values for Vdbe.magic
+*/
+#define VDBE_MAGIC_INIT 0x16bceaa5 /* Building a VDBE program */
+#define VDBE_MAGIC_RUN 0x2df20da3 /* VDBE is ready to execute */
+#define VDBE_MAGIC_HALT 0x319c2973 /* VDBE has completed execution */
+#define VDBE_MAGIC_RESET 0x48fa9f76 /* Reset and ready to run again */
+#define VDBE_MAGIC_DEAD 0x5606c3c8 /* The VDBE has been deallocated */
+
+/*
+** Structure used to store the context required by the
+** sqlite3_preupdate_*() API functions.
+*/
+struct PreUpdate {
+ Vdbe *v;
+ VdbeCursor *pCsr; /* Cursor to read old values from */
+ int op; /* One of SQLITE_INSERT, UPDATE, DELETE */
+ u8 *aRecord; /* old.* database record */
+ KeyInfo keyinfo;
+ UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */
+ UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */
+ int iNewReg; /* Register for new.* values */
+ i64 iKey1; /* First key value passed to hook */
+ i64 iKey2; /* Second key value passed to hook */
+ Mem *aNew; /* Array of new.* values */
+ Table *pTab; /* Schema object being upated */
+ Index *pPk; /* PK index if pTab is WITHOUT ROWID */
+};
+
+/*
+** Function prototypes
+*/
+SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...);
+SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*);
+void sqliteVdbePopStack(Vdbe*,int);
+SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeFinishMoveto(VdbeCursor*);
+SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor**, u32*);
+SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*);
+SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32);
+SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8);
+SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32);
+SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*);
+SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3*, AuxData**, int, int);
+
+int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *);
+SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(sqlite3*,VdbeCursor*,UnpackedRecord*,int*);
+SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor*, i64*);
+SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*);
+#if !defined(SQLITE_OMIT_EXPLAIN) || defined(SQLITE_ENABLE_BYTECODE_VTAB)
+SQLITE_PRIVATE int sqlite3VdbeNextOpcode(Vdbe*,Mem*,int,int*,int*,Op**);
+SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3*,Op*);
+#endif
+#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS)
+SQLITE_PRIVATE char *sqlite3VdbeDisplayComment(sqlite3*,const Op*,const char*);
+#endif
+#if !defined(SQLITE_OMIT_EXPLAIN)
+SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*);
+#endif
+SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*);
+SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *, int);
+SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int);
+SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*));
+SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64);
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64
+#else
+SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double);
+#endif
+SQLITE_PRIVATE void sqlite3VdbeMemSetPointer(Mem*, void*, const char*, void(*)(void*));
+SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16);
+SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*);
+#endif
+SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
+SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*);
+SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem*, int ifNull);
+SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem*,u8,u8);
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtreeZeroOffset(BtCursor*,u32,Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
+SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem*, Mem*, FuncDef*);
+#endif
+#if !defined(SQLITE_OMIT_EXPLAIN) || defined(SQLITE_ENABLE_BYTECODE_VTAB)
+SQLITE_PRIVATE const char *sqlite3OpcodeName(int);
+#endif
+SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve);
+SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n);
+SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int);
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3VdbeFrameIsValid(VdbeFrame*);
+#endif
+SQLITE_PRIVATE void sqlite3VdbeFrameMemDel(void*); /* Destructor on Mem */
+SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); /* Actually deletes the Frame */
+SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *);
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int);
+#endif
+SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p);
+
+SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *);
+SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *);
+SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *);
+SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *);
+SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *);
+SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *);
+SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *);
+SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *);
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe*, VdbeCursor*);
+SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe*);
+#else
+# define sqlite3VdbeIncrWriteCounter(V,C)
+# define sqlite3VdbeAssertAbortable(V)
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE)
+SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe*);
+#else
+# define sqlite3VdbeEnter(X)
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0
+SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*);
+#else
+# define sqlite3VdbeLeave(X)
+#endif
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*);
+SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*);
+#endif
+
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int);
+#else
+# define sqlite3VdbeCheckFk(p,i) 0
+#endif
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe*);
+SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, StrAccum *pStr);
+#endif
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem*, u8);
+SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem);
+#endif
+
+#ifndef SQLITE_OMIT_INCRBLOB
+SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *);
+ #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0)
+#else
+ #define sqlite3VdbeMemExpandBlob(x) SQLITE_OK
+ #define ExpandBlob(P) SQLITE_OK
+#endif
+
+#endif /* !defined(SQLITE_VDBEINT_H) */
+
+/************** End of vdbeInt.h *********************************************/
+/************** Continuing where we left off in status.c *********************/
+
+/*
+** Variables in which to record status information.
+*/
+#if SQLITE_PTRSIZE>4
+typedef sqlite3_int64 sqlite3StatValueType;
+#else
+typedef u32 sqlite3StatValueType;
+#endif
+typedef struct sqlite3StatType sqlite3StatType;
+static SQLITE_WSD struct sqlite3StatType {
+ sqlite3StatValueType nowValue[10]; /* Current value */
+ sqlite3StatValueType mxValue[10]; /* Maximum value */
+} sqlite3Stat = { {0,}, {0,} };
+
+/*
+** Elements of sqlite3Stat[] are protected by either the memory allocator
+** mutex, or by the pcache1 mutex. The following array determines which.
+*/
+static const char statMutex[] = {
+ 0, /* SQLITE_STATUS_MEMORY_USED */
+ 1, /* SQLITE_STATUS_PAGECACHE_USED */
+ 1, /* SQLITE_STATUS_PAGECACHE_OVERFLOW */
+ 0, /* SQLITE_STATUS_SCRATCH_USED */
+ 0, /* SQLITE_STATUS_SCRATCH_OVERFLOW */
+ 0, /* SQLITE_STATUS_MALLOC_SIZE */
+ 0, /* SQLITE_STATUS_PARSER_STACK */
+ 1, /* SQLITE_STATUS_PAGECACHE_SIZE */
+ 0, /* SQLITE_STATUS_SCRATCH_SIZE */
+ 0, /* SQLITE_STATUS_MALLOC_COUNT */
+};
+
+
+/* The "wsdStat" macro will resolve to the status information
+** state vector. If writable static data is unsupported on the target,
+** we have to locate the state vector at run-time. In the more common
+** case where writable static data is supported, wsdStat can refer directly
+** to the "sqlite3Stat" state vector declared above.
+*/
+#ifdef SQLITE_OMIT_WSD
+# define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat)
+# define wsdStat x[0]
+#else
+# define wsdStatInit
+# define wsdStat sqlite3Stat
+#endif
+
+/*
+** Return the current value of a status parameter. The caller must
+** be holding the appropriate mutex.
+*/
+SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int op){
+ wsdStatInit;
+ assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
+ assert( op>=0 && op<ArraySize(statMutex) );
+ assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
+ : sqlite3MallocMutex()) );
+ return wsdStat.nowValue[op];
+}
+
+/*
+** Add N to the value of a status record. The caller must hold the
+** appropriate mutex. (Locking is checked by assert()).
+**
+** The StatusUp() routine can accept positive or negative values for N.
+** The value of N is added to the current status value and the high-water
+** mark is adjusted if necessary.
+**
+** The StatusDown() routine lowers the current value by N. The highwater
+** mark is unchanged. N must be non-negative for StatusDown().
+*/
+SQLITE_PRIVATE void sqlite3StatusUp(int op, int N){
+ wsdStatInit;
+ assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
+ assert( op>=0 && op<ArraySize(statMutex) );
+ assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
+ : sqlite3MallocMutex()) );
+ wsdStat.nowValue[op] += N;
+ if( wsdStat.nowValue[op]>wsdStat.mxValue[op] ){
+ wsdStat.mxValue[op] = wsdStat.nowValue[op];
+ }
+}
+SQLITE_PRIVATE void sqlite3StatusDown(int op, int N){
+ wsdStatInit;
+ assert( N>=0 );
+ assert( op>=0 && op<ArraySize(statMutex) );
+ assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
+ : sqlite3MallocMutex()) );
+ assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
+ wsdStat.nowValue[op] -= N;
+}
+
+/*
+** Adjust the highwater mark if necessary.
+** The caller must hold the appropriate mutex.
+*/
+SQLITE_PRIVATE void sqlite3StatusHighwater(int op, int X){
+ sqlite3StatValueType newValue;
+ wsdStatInit;
+ assert( X>=0 );
+ newValue = (sqlite3StatValueType)X;
+ assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
+ assert( op>=0 && op<ArraySize(statMutex) );
+ assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
+ : sqlite3MallocMutex()) );
+ assert( op==SQLITE_STATUS_MALLOC_SIZE
+ || op==SQLITE_STATUS_PAGECACHE_SIZE
+ || op==SQLITE_STATUS_PARSER_STACK );
+ if( newValue>wsdStat.mxValue[op] ){
+ wsdStat.mxValue[op] = newValue;
+ }
+}
+
+/*
+** Query status information.
+*/
+SQLITE_API int sqlite3_status64(
+ int op,
+ sqlite3_int64 *pCurrent,
+ sqlite3_int64 *pHighwater,
+ int resetFlag
+){
+ sqlite3_mutex *pMutex;
+ wsdStatInit;
+ if( op<0 || op>=ArraySize(wsdStat.nowValue) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ pMutex = statMutex[op] ? sqlite3Pcache1Mutex() : sqlite3MallocMutex();
+ sqlite3_mutex_enter(pMutex);
+ *pCurrent = wsdStat.nowValue[op];
+ *pHighwater = wsdStat.mxValue[op];
+ if( resetFlag ){
+ wsdStat.mxValue[op] = wsdStat.nowValue[op];
+ }
+ sqlite3_mutex_leave(pMutex);
+ (void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */
+ return SQLITE_OK;
+}
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
+ sqlite3_int64 iCur = 0, iHwtr = 0;
+ int rc;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ rc = sqlite3_status64(op, &iCur, &iHwtr, resetFlag);
+ if( rc==0 ){
+ *pCurrent = (int)iCur;
+ *pHighwater = (int)iHwtr;
+ }
+ return rc;
+}
+
+/*
+** Return the number of LookasideSlot elements on the linked list
+*/
+static u32 countLookasideSlots(LookasideSlot *p){
+ u32 cnt = 0;
+ while( p ){
+ p = p->pNext;
+ cnt++;
+ }
+ return cnt;
+}
+
+/*
+** Count the number of slots of lookaside memory that are outstanding
+*/
+SQLITE_PRIVATE int sqlite3LookasideUsed(sqlite3 *db, int *pHighwater){
+ u32 nInit = countLookasideSlots(db->lookaside.pInit);
+ u32 nFree = countLookasideSlots(db->lookaside.pFree);
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ nInit += countLookasideSlots(db->lookaside.pSmallInit);
+ nFree += countLookasideSlots(db->lookaside.pSmallFree);
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ if( pHighwater ) *pHighwater = db->lookaside.nSlot - nInit;
+ return db->lookaside.nSlot - (nInit+nFree);
+}
+
+/*
+** Query status information for a single database connection
+*/
+SQLITE_API int sqlite3_db_status(
+ sqlite3 *db, /* The database connection whose status is desired */
+ int op, /* Status verb */
+ int *pCurrent, /* Write current value here */
+ int *pHighwater, /* Write high-water mark here */
+ int resetFlag /* Reset high-water mark if true */
+){
+ int rc = SQLITE_OK; /* Return code */
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ switch( op ){
+ case SQLITE_DBSTATUS_LOOKASIDE_USED: {
+ *pCurrent = sqlite3LookasideUsed(db, pHighwater);
+ if( resetFlag ){
+ LookasideSlot *p = db->lookaside.pFree;
+ if( p ){
+ while( p->pNext ) p = p->pNext;
+ p->pNext = db->lookaside.pInit;
+ db->lookaside.pInit = db->lookaside.pFree;
+ db->lookaside.pFree = 0;
+ }
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ p = db->lookaside.pSmallFree;
+ if( p ){
+ while( p->pNext ) p = p->pNext;
+ p->pNext = db->lookaside.pSmallInit;
+ db->lookaside.pSmallInit = db->lookaside.pSmallFree;
+ db->lookaside.pSmallFree = 0;
+ }
+#endif
+ }
+ break;
+ }
+
+ case SQLITE_DBSTATUS_LOOKASIDE_HIT:
+ case SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE:
+ case SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: {
+ testcase( op==SQLITE_DBSTATUS_LOOKASIDE_HIT );
+ testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE );
+ testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL );
+ assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 );
+ assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 );
+ *pCurrent = 0;
+ *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT];
+ if( resetFlag ){
+ db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0;
+ }
+ break;
+ }
+
+ /*
+ ** Return an approximation for the amount of memory currently used
+ ** by all pagers associated with the given database connection. The
+ ** highwater mark is meaningless and is returned as zero.
+ */
+ case SQLITE_DBSTATUS_CACHE_USED_SHARED:
+ case SQLITE_DBSTATUS_CACHE_USED: {
+ int totalUsed = 0;
+ int i;
+ sqlite3BtreeEnterAll(db);
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ Pager *pPager = sqlite3BtreePager(pBt);
+ int nByte = sqlite3PagerMemUsed(pPager);
+ if( op==SQLITE_DBSTATUS_CACHE_USED_SHARED ){
+ nByte = nByte / sqlite3BtreeConnectionCount(pBt);
+ }
+ totalUsed += nByte;
+ }
+ }
+ sqlite3BtreeLeaveAll(db);
+ *pCurrent = totalUsed;
+ *pHighwater = 0;
+ break;
+ }
+
+ /*
+ ** *pCurrent gets an accurate estimate of the amount of memory used
+ ** to store the schema for all databases (main, temp, and any ATTACHed
+ ** databases. *pHighwater is set to zero.
+ */
+ case SQLITE_DBSTATUS_SCHEMA_USED: {
+ int i; /* Used to iterate through schemas */
+ int nByte = 0; /* Used to accumulate return value */
+
+ sqlite3BtreeEnterAll(db);
+ db->pnBytesFreed = &nByte;
+ for(i=0; i<db->nDb; i++){
+ Schema *pSchema = db->aDb[i].pSchema;
+ if( ALWAYS(pSchema!=0) ){
+ HashElem *p;
+
+ nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * (
+ pSchema->tblHash.count
+ + pSchema->trigHash.count
+ + pSchema->idxHash.count
+ + pSchema->fkeyHash.count
+ );
+ nByte += sqlite3_msize(pSchema->tblHash.ht);
+ nByte += sqlite3_msize(pSchema->trigHash.ht);
+ nByte += sqlite3_msize(pSchema->idxHash.ht);
+ nByte += sqlite3_msize(pSchema->fkeyHash.ht);
+
+ for(p=sqliteHashFirst(&pSchema->trigHash); p; p=sqliteHashNext(p)){
+ sqlite3DeleteTrigger(db, (Trigger*)sqliteHashData(p));
+ }
+ for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){
+ sqlite3DeleteTable(db, (Table *)sqliteHashData(p));
+ }
+ }
+ }
+ db->pnBytesFreed = 0;
+ sqlite3BtreeLeaveAll(db);
+
+ *pHighwater = 0;
+ *pCurrent = nByte;
+ break;
+ }
+
+ /*
+ ** *pCurrent gets an accurate estimate of the amount of memory used
+ ** to store all prepared statements.
+ ** *pHighwater is set to zero.
+ */
+ case SQLITE_DBSTATUS_STMT_USED: {
+ struct Vdbe *pVdbe; /* Used to iterate through VMs */
+ int nByte = 0; /* Used to accumulate return value */
+
+ db->pnBytesFreed = &nByte;
+ for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){
+ sqlite3VdbeClearObject(db, pVdbe);
+ sqlite3DbFree(db, pVdbe);
+ }
+ db->pnBytesFreed = 0;
+
+ *pHighwater = 0; /* IMP: R-64479-57858 */
+ *pCurrent = nByte;
+
+ break;
+ }
+
+ /*
+ ** Set *pCurrent to the total cache hits or misses encountered by all
+ ** pagers the database handle is connected to. *pHighwater is always set
+ ** to zero.
+ */
+ case SQLITE_DBSTATUS_CACHE_SPILL:
+ op = SQLITE_DBSTATUS_CACHE_WRITE+1;
+ /* no break */ deliberate_fall_through
+ case SQLITE_DBSTATUS_CACHE_HIT:
+ case SQLITE_DBSTATUS_CACHE_MISS:
+ case SQLITE_DBSTATUS_CACHE_WRITE:{
+ int i;
+ int nRet = 0;
+ assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 );
+ assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 );
+
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt ){
+ Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt);
+ sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet);
+ }
+ }
+ *pHighwater = 0; /* IMP: R-42420-56072 */
+ /* IMP: R-54100-20147 */
+ /* IMP: R-29431-39229 */
+ *pCurrent = nRet;
+ break;
+ }
+
+ /* Set *pCurrent to non-zero if there are unresolved deferred foreign
+ ** key constraints. Set *pCurrent to zero if all foreign key constraints
+ ** have been satisfied. The *pHighwater is always set to zero.
+ */
+ case SQLITE_DBSTATUS_DEFERRED_FKS: {
+ *pHighwater = 0; /* IMP: R-11967-56545 */
+ *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0;
+ break;
+ }
+
+ default: {
+ rc = SQLITE_ERROR;
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/************** End of status.c **********************************************/
+/************** Begin file date.c ********************************************/
+/*
+** 2003 October 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement date and time
+** functions for SQLite.
+**
+** There is only one exported symbol in this file - the function
+** sqlite3RegisterDateTimeFunctions() found at the bottom of the file.
+** All other code has file scope.
+**
+** SQLite processes all times and dates as julian day numbers. The
+** dates and times are stored as the number of days since noon
+** in Greenwich on November 24, 4714 B.C. according to the Gregorian
+** calendar system.
+**
+** 1970-01-01 00:00:00 is JD 2440587.5
+** 2000-01-01 00:00:00 is JD 2451544.5
+**
+** This implementation requires years to be expressed as a 4-digit number
+** which means that only dates between 0000-01-01 and 9999-12-31 can
+** be represented, even though julian day numbers allow a much wider
+** range of dates.
+**
+** The Gregorian calendar system is used for all dates and times,
+** even those that predate the Gregorian calendar. Historians usually
+** use the julian calendar for dates prior to 1582-10-15 and for some
+** dates afterwards, depending on locale. Beware of this difference.
+**
+** The conversion algorithms are implemented based on descriptions
+** in the following text:
+**
+** Jean Meeus
+** Astronomical Algorithms, 2nd Edition, 1998
+** ISBN 0-943396-61-1
+** Willmann-Bell, Inc
+** Richmond, Virginia (USA)
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdlib.h> */
+/* #include <assert.h> */
+#include <time.h>
+
+#ifndef SQLITE_OMIT_DATETIME_FUNCS
+
+/*
+** The MSVC CRT on Windows CE may not have a localtime() function.
+** So declare a substitute. The substitute function itself is
+** defined in "os_win.c".
+*/
+#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \
+ (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API)
+struct tm *__cdecl localtime(const time_t *);
+#endif
+
+/*
+** A structure for holding a single date and time.
+*/
+typedef struct DateTime DateTime;
+struct DateTime {
+ sqlite3_int64 iJD; /* The julian day number times 86400000 */
+ int Y, M, D; /* Year, month, and day */
+ int h, m; /* Hour and minutes */
+ int tz; /* Timezone offset in minutes */
+ double s; /* Seconds */
+ char validJD; /* True (1) if iJD is valid */
+ char rawS; /* Raw numeric value stored in s */
+ char validYMD; /* True (1) if Y,M,D are valid */
+ char validHMS; /* True (1) if h,m,s are valid */
+ char validTZ; /* True (1) if tz is valid */
+ char tzSet; /* Timezone was set explicitly */
+ char isError; /* An overflow has occurred */
+};
+
+
+/*
+** Convert zDate into one or more integers according to the conversion
+** specifier zFormat.
+**
+** zFormat[] contains 4 characters for each integer converted, except for
+** the last integer which is specified by three characters. The meaning
+** of a four-character format specifiers ABCD is:
+**
+** A: number of digits to convert. Always "2" or "4".
+** B: minimum value. Always "0" or "1".
+** C: maximum value, decoded as:
+** a: 12
+** b: 14
+** c: 24
+** d: 31
+** e: 59
+** f: 9999
+** D: the separator character, or \000 to indicate this is the
+** last number to convert.
+**
+** Example: To translate an ISO-8601 date YYYY-MM-DD, the format would
+** be "40f-21a-20c". The "40f-" indicates the 4-digit year followed by "-".
+** The "21a-" indicates the 2-digit month followed by "-". The "20c" indicates
+** the 2-digit day which is the last integer in the set.
+**
+** The function returns the number of successful conversions.
+*/
+static int getDigits(const char *zDate, const char *zFormat, ...){
+ /* The aMx[] array translates the 3rd character of each format
+ ** spec into a max size: a b c d e f */
+ static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 };
+ va_list ap;
+ int cnt = 0;
+ char nextC;
+ va_start(ap, zFormat);
+ do{
+ char N = zFormat[0] - '0';
+ char min = zFormat[1] - '0';
+ int val = 0;
+ u16 max;
+
+ assert( zFormat[2]>='a' && zFormat[2]<='f' );
+ max = aMx[zFormat[2] - 'a'];
+ nextC = zFormat[3];
+ val = 0;
+ while( N-- ){
+ if( !sqlite3Isdigit(*zDate) ){
+ goto end_getDigits;
+ }
+ val = val*10 + *zDate - '0';
+ zDate++;
+ }
+ if( val<(int)min || val>(int)max || (nextC!=0 && nextC!=*zDate) ){
+ goto end_getDigits;
+ }
+ *va_arg(ap,int*) = val;
+ zDate++;
+ cnt++;
+ zFormat += 4;
+ }while( nextC );
+end_getDigits:
+ va_end(ap);
+ return cnt;
+}
+
+/*
+** Parse a timezone extension on the end of a date-time.
+** The extension is of the form:
+**
+** (+/-)HH:MM
+**
+** Or the "zulu" notation:
+**
+** Z
+**
+** If the parse is successful, write the number of minutes
+** of change in p->tz and return 0. If a parser error occurs,
+** return non-zero.
+**
+** A missing specifier is not considered an error.
+*/
+static int parseTimezone(const char *zDate, DateTime *p){
+ int sgn = 0;
+ int nHr, nMn;
+ int c;
+ while( sqlite3Isspace(*zDate) ){ zDate++; }
+ p->tz = 0;
+ c = *zDate;
+ if( c=='-' ){
+ sgn = -1;
+ }else if( c=='+' ){
+ sgn = +1;
+ }else if( c=='Z' || c=='z' ){
+ zDate++;
+ goto zulu_time;
+ }else{
+ return c!=0;
+ }
+ zDate++;
+ if( getDigits(zDate, "20b:20e", &nHr, &nMn)!=2 ){
+ return 1;
+ }
+ zDate += 5;
+ p->tz = sgn*(nMn + nHr*60);
+zulu_time:
+ while( sqlite3Isspace(*zDate) ){ zDate++; }
+ p->tzSet = 1;
+ return *zDate!=0;
+}
+
+/*
+** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF.
+** The HH, MM, and SS must each be exactly 2 digits. The
+** fractional seconds FFFF can be one or more digits.
+**
+** Return 1 if there is a parsing error and 0 on success.
+*/
+static int parseHhMmSs(const char *zDate, DateTime *p){
+ int h, m, s;
+ double ms = 0.0;
+ if( getDigits(zDate, "20c:20e", &h, &m)!=2 ){
+ return 1;
+ }
+ zDate += 5;
+ if( *zDate==':' ){
+ zDate++;
+ if( getDigits(zDate, "20e", &s)!=1 ){
+ return 1;
+ }
+ zDate += 2;
+ if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){
+ double rScale = 1.0;
+ zDate++;
+ while( sqlite3Isdigit(*zDate) ){
+ ms = ms*10.0 + *zDate - '0';
+ rScale *= 10.0;
+ zDate++;
+ }
+ ms /= rScale;
+ }
+ }else{
+ s = 0;
+ }
+ p->validJD = 0;
+ p->rawS = 0;
+ p->validHMS = 1;
+ p->h = h;
+ p->m = m;
+ p->s = s + ms;
+ if( parseTimezone(zDate, p) ) return 1;
+ p->validTZ = (p->tz!=0)?1:0;
+ return 0;
+}
+
+/*
+** Put the DateTime object into its error state.
+*/
+static void datetimeError(DateTime *p){
+ memset(p, 0, sizeof(*p));
+ p->isError = 1;
+}
+
+/*
+** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume
+** that the YYYY-MM-DD is according to the Gregorian calendar.
+**
+** Reference: Meeus page 61
+*/
+static void computeJD(DateTime *p){
+ int Y, M, D, A, B, X1, X2;
+
+ if( p->validJD ) return;
+ if( p->validYMD ){
+ Y = p->Y;
+ M = p->M;
+ D = p->D;
+ }else{
+ Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */
+ M = 1;
+ D = 1;
+ }
+ if( Y<-4713 || Y>9999 || p->rawS ){
+ datetimeError(p);
+ return;
+ }
+ if( M<=2 ){
+ Y--;
+ M += 12;
+ }
+ A = Y/100;
+ B = 2 - A + (A/4);
+ X1 = 36525*(Y+4716)/100;
+ X2 = 306001*(M+1)/10000;
+ p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000);
+ p->validJD = 1;
+ if( p->validHMS ){
+ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000);
+ if( p->validTZ ){
+ p->iJD -= p->tz*60000;
+ p->validYMD = 0;
+ p->validHMS = 0;
+ p->validTZ = 0;
+ }
+ }
+}
+
+/*
+** Parse dates of the form
+**
+** YYYY-MM-DD HH:MM:SS.FFF
+** YYYY-MM-DD HH:MM:SS
+** YYYY-MM-DD HH:MM
+** YYYY-MM-DD
+**
+** Write the result into the DateTime structure and return 0
+** on success and 1 if the input string is not a well-formed
+** date.
+*/
+static int parseYyyyMmDd(const char *zDate, DateTime *p){
+ int Y, M, D, neg;
+
+ if( zDate[0]=='-' ){
+ zDate++;
+ neg = 1;
+ }else{
+ neg = 0;
+ }
+ if( getDigits(zDate, "40f-21a-21d", &Y, &M, &D)!=3 ){
+ return 1;
+ }
+ zDate += 10;
+ while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; }
+ if( parseHhMmSs(zDate, p)==0 ){
+ /* We got the time */
+ }else if( *zDate==0 ){
+ p->validHMS = 0;
+ }else{
+ return 1;
+ }
+ p->validJD = 0;
+ p->validYMD = 1;
+ p->Y = neg ? -Y : Y;
+ p->M = M;
+ p->D = D;
+ if( p->validTZ ){
+ computeJD(p);
+ }
+ return 0;
+}
+
+/*
+** Set the time to the current time reported by the VFS.
+**
+** Return the number of errors.
+*/
+static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){
+ p->iJD = sqlite3StmtCurrentTime(context);
+ if( p->iJD>0 ){
+ p->validJD = 1;
+ return 0;
+ }else{
+ return 1;
+ }
+}
+
+/*
+** Input "r" is a numeric quantity which might be a julian day number,
+** or the number of seconds since 1970. If the value if r is within
+** range of a julian day number, install it as such and set validJD.
+** If the value is a valid unix timestamp, put it in p->s and set p->rawS.
+*/
+static void setRawDateNumber(DateTime *p, double r){
+ p->s = r;
+ p->rawS = 1;
+ if( r>=0.0 && r<5373484.5 ){
+ p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5);
+ p->validJD = 1;
+ }
+}
+
+/*
+** Attempt to parse the given string into a julian day number. Return
+** the number of errors.
+**
+** The following are acceptable forms for the input string:
+**
+** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM
+** DDDD.DD
+** now
+**
+** In the first form, the +/-HH:MM is always optional. The fractional
+** seconds extension (the ".FFF") is optional. The seconds portion
+** (":SS.FFF") is option. The year and date can be omitted as long
+** as there is a time string. The time string can be omitted as long
+** as there is a year and date.
+*/
+static int parseDateOrTime(
+ sqlite3_context *context,
+ const char *zDate,
+ DateTime *p
+){
+ double r;
+ if( parseYyyyMmDd(zDate,p)==0 ){
+ return 0;
+ }else if( parseHhMmSs(zDate, p)==0 ){
+ return 0;
+ }else if( sqlite3StrICmp(zDate,"now")==0 && sqlite3NotPureFunc(context) ){
+ return setDateTimeToCurrent(context, p);
+ }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8)>0 ){
+ setRawDateNumber(p, r);
+ return 0;
+ }
+ return 1;
+}
+
+/* The julian day number for 9999-12-31 23:59:59.999 is 5373484.4999999.
+** Multiplying this by 86400000 gives 464269060799999 as the maximum value
+** for DateTime.iJD.
+**
+** But some older compilers (ex: gcc 4.2.1 on older Macs) cannot deal with
+** such a large integer literal, so we have to encode it.
+*/
+#define INT_464269060799999 ((((i64)0x1a640)<<32)|0x1072fdff)
+
+/*
+** Return TRUE if the given julian day number is within range.
+**
+** The input is the JulianDay times 86400000.
+*/
+static int validJulianDay(sqlite3_int64 iJD){
+ return iJD>=0 && iJD<=INT_464269060799999;
+}
+
+/*
+** Compute the Year, Month, and Day from the julian day number.
+*/
+static void computeYMD(DateTime *p){
+ int Z, A, B, C, D, E, X1;
+ if( p->validYMD ) return;
+ if( !p->validJD ){
+ p->Y = 2000;
+ p->M = 1;
+ p->D = 1;
+ }else if( !validJulianDay(p->iJD) ){
+ datetimeError(p);
+ return;
+ }else{
+ Z = (int)((p->iJD + 43200000)/86400000);
+ A = (int)((Z - 1867216.25)/36524.25);
+ A = Z + 1 + A - (A/4);
+ B = A + 1524;
+ C = (int)((B - 122.1)/365.25);
+ D = (36525*(C&32767))/100;
+ E = (int)((B-D)/30.6001);
+ X1 = (int)(30.6001*E);
+ p->D = B - D - X1;
+ p->M = E<14 ? E-1 : E-13;
+ p->Y = p->M>2 ? C - 4716 : C - 4715;
+ }
+ p->validYMD = 1;
+}
+
+/*
+** Compute the Hour, Minute, and Seconds from the julian day number.
+*/
+static void computeHMS(DateTime *p){
+ int s;
+ if( p->validHMS ) return;
+ computeJD(p);
+ s = (int)((p->iJD + 43200000) % 86400000);
+ p->s = s/1000.0;
+ s = (int)p->s;
+ p->s -= s;
+ p->h = s/3600;
+ s -= p->h*3600;
+ p->m = s/60;
+ p->s += s - p->m*60;
+ p->rawS = 0;
+ p->validHMS = 1;
+}
+
+/*
+** Compute both YMD and HMS
+*/
+static void computeYMD_HMS(DateTime *p){
+ computeYMD(p);
+ computeHMS(p);
+}
+
+/*
+** Clear the YMD and HMS and the TZ
+*/
+static void clearYMD_HMS_TZ(DateTime *p){
+ p->validYMD = 0;
+ p->validHMS = 0;
+ p->validTZ = 0;
+}
+
+#ifndef SQLITE_OMIT_LOCALTIME
+/*
+** On recent Windows platforms, the localtime_s() function is available
+** as part of the "Secure CRT". It is essentially equivalent to
+** localtime_r() available under most POSIX platforms, except that the
+** order of the parameters is reversed.
+**
+** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx.
+**
+** If the user has not indicated to use localtime_r() or localtime_s()
+** already, check for an MSVC build environment that provides
+** localtime_s().
+*/
+#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S \
+ && defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE)
+#undef HAVE_LOCALTIME_S
+#define HAVE_LOCALTIME_S 1
+#endif
+
+/*
+** The following routine implements the rough equivalent of localtime_r()
+** using whatever operating-system specific localtime facility that
+** is available. This routine returns 0 on success and
+** non-zero on any kind of error.
+**
+** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this
+** routine will always fail.
+**
+** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C
+** library function localtime_r() is used to assist in the calculation of
+** local time.
+*/
+static int osLocaltime(time_t *t, struct tm *pTm){
+ int rc;
+#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S
+ struct tm *pX;
+#if SQLITE_THREADSAFE>0
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+ sqlite3_mutex_enter(mutex);
+ pX = localtime(t);
+#ifndef SQLITE_UNTESTABLE
+ if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0;
+#endif
+ if( pX ) *pTm = *pX;
+ sqlite3_mutex_leave(mutex);
+ rc = pX==0;
+#else
+#ifndef SQLITE_UNTESTABLE
+ if( sqlite3GlobalConfig.bLocaltimeFault ) return 1;
+#endif
+#if HAVE_LOCALTIME_R
+ rc = localtime_r(t, pTm)==0;
+#else
+ rc = localtime_s(pTm, t);
+#endif /* HAVE_LOCALTIME_R */
+#endif /* HAVE_LOCALTIME_R || HAVE_LOCALTIME_S */
+ return rc;
+}
+#endif /* SQLITE_OMIT_LOCALTIME */
+
+
+#ifndef SQLITE_OMIT_LOCALTIME
+/*
+** Compute the difference (in milliseconds) between localtime and UTC
+** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs,
+** return this value and set *pRc to SQLITE_OK.
+**
+** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value
+** is undefined in this case.
+*/
+static sqlite3_int64 localtimeOffset(
+ DateTime *p, /* Date at which to calculate offset */
+ sqlite3_context *pCtx, /* Write error here if one occurs */
+ int *pRc /* OUT: Error code. SQLITE_OK or ERROR */
+){
+ DateTime x, y;
+ time_t t;
+ struct tm sLocal;
+
+ /* Initialize the contents of sLocal to avoid a compiler warning. */
+ memset(&sLocal, 0, sizeof(sLocal));
+
+ x = *p;
+ computeYMD_HMS(&x);
+ if( x.Y<1971 || x.Y>=2038 ){
+ /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only
+ ** works for years between 1970 and 2037. For dates outside this range,
+ ** SQLite attempts to map the year into an equivalent year within this
+ ** range, do the calculation, then map the year back.
+ */
+ x.Y = 2000;
+ x.M = 1;
+ x.D = 1;
+ x.h = 0;
+ x.m = 0;
+ x.s = 0.0;
+ } else {
+ int s = (int)(x.s + 0.5);
+ x.s = s;
+ }
+ x.tz = 0;
+ x.validJD = 0;
+ computeJD(&x);
+ t = (time_t)(x.iJD/1000 - 21086676*(i64)10000);
+ if( osLocaltime(&t, &sLocal) ){
+ sqlite3_result_error(pCtx, "local time unavailable", -1);
+ *pRc = SQLITE_ERROR;
+ return 0;
+ }
+ y.Y = sLocal.tm_year + 1900;
+ y.M = sLocal.tm_mon + 1;
+ y.D = sLocal.tm_mday;
+ y.h = sLocal.tm_hour;
+ y.m = sLocal.tm_min;
+ y.s = sLocal.tm_sec;
+ y.validYMD = 1;
+ y.validHMS = 1;
+ y.validJD = 0;
+ y.rawS = 0;
+ y.validTZ = 0;
+ y.isError = 0;
+ computeJD(&y);
+ *pRc = SQLITE_OK;
+ return y.iJD - x.iJD;
+}
+#endif /* SQLITE_OMIT_LOCALTIME */
+
+/*
+** The following table defines various date transformations of the form
+**
+** 'NNN days'
+**
+** Where NNN is an arbitrary floating-point number and "days" can be one
+** of several units of time.
+*/
+static const struct {
+ u8 eType; /* Transformation type code */
+ u8 nName; /* Length of th name */
+ char *zName; /* Name of the transformation */
+ double rLimit; /* Maximum NNN value for this transform */
+ double rXform; /* Constant used for this transform */
+} aXformType[] = {
+ { 0, 6, "second", 464269060800.0, 1000.0 },
+ { 0, 6, "minute", 7737817680.0, 60000.0 },
+ { 0, 4, "hour", 128963628.0, 3600000.0 },
+ { 0, 3, "day", 5373485.0, 86400000.0 },
+ { 1, 5, "month", 176546.0, 2592000000.0 },
+ { 2, 4, "year", 14713.0, 31536000000.0 },
+};
+
+/*
+** Process a modifier to a date-time stamp. The modifiers are
+** as follows:
+**
+** NNN days
+** NNN hours
+** NNN minutes
+** NNN.NNNN seconds
+** NNN months
+** NNN years
+** start of month
+** start of year
+** start of week
+** start of day
+** weekday N
+** unixepoch
+** localtime
+** utc
+**
+** Return 0 on success and 1 if there is any kind of error. If the error
+** is in a system call (i.e. localtime()), then an error message is written
+** to context pCtx. If the error is an unrecognized modifier, no error is
+** written to pCtx.
+*/
+static int parseModifier(
+ sqlite3_context *pCtx, /* Function context */
+ const char *z, /* The text of the modifier */
+ int n, /* Length of zMod in bytes */
+ DateTime *p /* The date/time value to be modified */
+){
+ int rc = 1;
+ double r;
+ switch(sqlite3UpperToLower[(u8)z[0]] ){
+#ifndef SQLITE_OMIT_LOCALTIME
+ case 'l': {
+ /* localtime
+ **
+ ** Assuming the current time value is UTC (a.k.a. GMT), shift it to
+ ** show local time.
+ */
+ if( sqlite3_stricmp(z, "localtime")==0 && sqlite3NotPureFunc(pCtx) ){
+ computeJD(p);
+ p->iJD += localtimeOffset(p, pCtx, &rc);
+ clearYMD_HMS_TZ(p);
+ }
+ break;
+ }
+#endif
+ case 'u': {
+ /*
+ ** unixepoch
+ **
+ ** Treat the current value of p->s as the number of
+ ** seconds since 1970. Convert to a real julian day number.
+ */
+ if( sqlite3_stricmp(z, "unixepoch")==0 && p->rawS ){
+ r = p->s*1000.0 + 210866760000000.0;
+ if( r>=0.0 && r<464269060800000.0 ){
+ clearYMD_HMS_TZ(p);
+ p->iJD = (sqlite3_int64)(r + 0.5);
+ p->validJD = 1;
+ p->rawS = 0;
+ rc = 0;
+ }
+ }
+#ifndef SQLITE_OMIT_LOCALTIME
+ else if( sqlite3_stricmp(z, "utc")==0 && sqlite3NotPureFunc(pCtx) ){
+ if( p->tzSet==0 ){
+ sqlite3_int64 c1;
+ computeJD(p);
+ c1 = localtimeOffset(p, pCtx, &rc);
+ if( rc==SQLITE_OK ){
+ p->iJD -= c1;
+ clearYMD_HMS_TZ(p);
+ p->iJD += c1 - localtimeOffset(p, pCtx, &rc);
+ }
+ p->tzSet = 1;
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+#endif
+ break;
+ }
+ case 'w': {
+ /*
+ ** weekday N
+ **
+ ** Move the date to the same time on the next occurrence of
+ ** weekday N where 0==Sunday, 1==Monday, and so forth. If the
+ ** date is already on the appropriate weekday, this is a no-op.
+ */
+ if( sqlite3_strnicmp(z, "weekday ", 8)==0
+ && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8)>0
+ && (n=(int)r)==r && n>=0 && r<7 ){
+ sqlite3_int64 Z;
+ computeYMD_HMS(p);
+ p->validTZ = 0;
+ p->validJD = 0;
+ computeJD(p);
+ Z = ((p->iJD + 129600000)/86400000) % 7;
+ if( Z>n ) Z -= 7;
+ p->iJD += (n - Z)*86400000;
+ clearYMD_HMS_TZ(p);
+ rc = 0;
+ }
+ break;
+ }
+ case 's': {
+ /*
+ ** start of TTTTT
+ **
+ ** Move the date backwards to the beginning of the current day,
+ ** or month or year.
+ */
+ if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break;
+ if( !p->validJD && !p->validYMD && !p->validHMS ) break;
+ z += 9;
+ computeYMD(p);
+ p->validHMS = 1;
+ p->h = p->m = 0;
+ p->s = 0.0;
+ p->rawS = 0;
+ p->validTZ = 0;
+ p->validJD = 0;
+ if( sqlite3_stricmp(z,"month")==0 ){
+ p->D = 1;
+ rc = 0;
+ }else if( sqlite3_stricmp(z,"year")==0 ){
+ p->M = 1;
+ p->D = 1;
+ rc = 0;
+ }else if( sqlite3_stricmp(z,"day")==0 ){
+ rc = 0;
+ }
+ break;
+ }
+ case '+':
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ double rRounder;
+ int i;
+ for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){}
+ if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){
+ rc = 1;
+ break;
+ }
+ if( z[n]==':' ){
+ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the
+ ** specified number of hours, minutes, seconds, and fractional seconds
+ ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be
+ ** omitted.
+ */
+ const char *z2 = z;
+ DateTime tx;
+ sqlite3_int64 day;
+ if( !sqlite3Isdigit(*z2) ) z2++;
+ memset(&tx, 0, sizeof(tx));
+ if( parseHhMmSs(z2, &tx) ) break;
+ computeJD(&tx);
+ tx.iJD -= 43200000;
+ day = tx.iJD/86400000;
+ tx.iJD -= day*86400000;
+ if( z[0]=='-' ) tx.iJD = -tx.iJD;
+ computeJD(p);
+ clearYMD_HMS_TZ(p);
+ p->iJD += tx.iJD;
+ rc = 0;
+ break;
+ }
+
+ /* If control reaches this point, it means the transformation is
+ ** one of the forms like "+NNN days". */
+ z += n;
+ while( sqlite3Isspace(*z) ) z++;
+ n = sqlite3Strlen30(z);
+ if( n>10 || n<3 ) break;
+ if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--;
+ computeJD(p);
+ rc = 1;
+ rRounder = r<0 ? -0.5 : +0.5;
+ for(i=0; i<ArraySize(aXformType); i++){
+ if( aXformType[i].nName==n
+ && sqlite3_strnicmp(aXformType[i].zName, z, n)==0
+ && r>-aXformType[i].rLimit && r<aXformType[i].rLimit
+ ){
+ switch( aXformType[i].eType ){
+ case 1: { /* Special processing to add months */
+ int x;
+ computeYMD_HMS(p);
+ p->M += (int)r;
+ x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
+ p->Y += x;
+ p->M -= x*12;
+ p->validJD = 0;
+ r -= (int)r;
+ break;
+ }
+ case 2: { /* Special processing to add years */
+ int y = (int)r;
+ computeYMD_HMS(p);
+ p->Y += y;
+ p->validJD = 0;
+ r -= (int)r;
+ break;
+ }
+ }
+ computeJD(p);
+ p->iJD += (sqlite3_int64)(r*aXformType[i].rXform + rRounder);
+ rc = 0;
+ break;
+ }
+ }
+ clearYMD_HMS_TZ(p);
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+** Process time function arguments. argv[0] is a date-time stamp.
+** argv[1] and following are modifiers. Parse them all and write
+** the resulting time into the DateTime structure p. Return 0
+** on success and 1 if there are any errors.
+**
+** If there are zero parameters (if even argv[0] is undefined)
+** then assume a default value of "now" for argv[0].
+*/
+static int isDate(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv,
+ DateTime *p
+){
+ int i, n;
+ const unsigned char *z;
+ int eType;
+ memset(p, 0, sizeof(*p));
+ if( argc==0 ){
+ return setDateTimeToCurrent(context, p);
+ }
+ if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT
+ || eType==SQLITE_INTEGER ){
+ setRawDateNumber(p, sqlite3_value_double(argv[0]));
+ }else{
+ z = sqlite3_value_text(argv[0]);
+ if( !z || parseDateOrTime(context, (char*)z, p) ){
+ return 1;
+ }
+ }
+ for(i=1; i<argc; i++){
+ z = sqlite3_value_text(argv[i]);
+ n = sqlite3_value_bytes(argv[i]);
+ if( z==0 || parseModifier(context, (char*)z, n, p) ) return 1;
+ }
+ computeJD(p);
+ if( p->isError || !validJulianDay(p->iJD) ) return 1;
+ return 0;
+}
+
+
+/*
+** The following routines implement the various date and time functions
+** of SQLite.
+*/
+
+/*
+** julianday( TIMESTRING, MOD, MOD, ...)
+**
+** Return the julian day number of the date specified in the arguments
+*/
+static void juliandayFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ DateTime x;
+ if( isDate(context, argc, argv, &x)==0 ){
+ computeJD(&x);
+ sqlite3_result_double(context, x.iJD/86400000.0);
+ }
+}
+
+/*
+** datetime( TIMESTRING, MOD, MOD, ...)
+**
+** Return YYYY-MM-DD HH:MM:SS
+*/
+static void datetimeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ DateTime x;
+ if( isDate(context, argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeYMD_HMS(&x);
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d %02d:%02d:%02d",
+ x.Y, x.M, x.D, x.h, x.m, (int)(x.s));
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ }
+}
+
+/*
+** time( TIMESTRING, MOD, MOD, ...)
+**
+** Return HH:MM:SS
+*/
+static void timeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ DateTime x;
+ if( isDate(context, argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeHMS(&x);
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%02d:%02d:%02d", x.h, x.m, (int)x.s);
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ }
+}
+
+/*
+** date( TIMESTRING, MOD, MOD, ...)
+**
+** Return YYYY-MM-DD
+*/
+static void dateFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ DateTime x;
+ if( isDate(context, argc, argv, &x)==0 ){
+ char zBuf[100];
+ computeYMD(&x);
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d", x.Y, x.M, x.D);
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ }
+}
+
+/*
+** strftime( FORMAT, TIMESTRING, MOD, MOD, ...)
+**
+** Return a string described by FORMAT. Conversions as follows:
+**
+** %d day of month
+** %f ** fractional seconds SS.SSS
+** %H hour 00-24
+** %j day of year 000-366
+** %J ** julian day number
+** %m month 01-12
+** %M minute 00-59
+** %s seconds since 1970-01-01
+** %S seconds 00-59
+** %w day of week 0-6 sunday==0
+** %W week of year 00-53
+** %Y year 0000-9999
+** %% %
+*/
+static void strftimeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ DateTime x;
+ u64 n;
+ size_t i,j;
+ char *z;
+ sqlite3 *db;
+ const char *zFmt;
+ char zBuf[100];
+ if( argc==0 ) return;
+ zFmt = (const char*)sqlite3_value_text(argv[0]);
+ if( zFmt==0 || isDate(context, argc-1, argv+1, &x) ) return;
+ db = sqlite3_context_db_handle(context);
+ for(i=0, n=1; zFmt[i]; i++, n++){
+ if( zFmt[i]=='%' ){
+ switch( zFmt[i+1] ){
+ case 'd':
+ case 'H':
+ case 'm':
+ case 'M':
+ case 'S':
+ case 'W':
+ n++;
+ /* fall thru */
+ case 'w':
+ case '%':
+ break;
+ case 'f':
+ n += 8;
+ break;
+ case 'j':
+ n += 3;
+ break;
+ case 'Y':
+ n += 8;
+ break;
+ case 's':
+ case 'J':
+ n += 50;
+ break;
+ default:
+ return; /* ERROR. return a NULL */
+ }
+ i++;
+ }
+ }
+ testcase( n==sizeof(zBuf)-1 );
+ testcase( n==sizeof(zBuf) );
+ testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH]+1 );
+ testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] );
+ if( n<sizeof(zBuf) ){
+ z = zBuf;
+ }else if( n>(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ sqlite3_result_error_toobig(context);
+ return;
+ }else{
+ z = sqlite3DbMallocRawNN(db, (int)n);
+ if( z==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ }
+ computeJD(&x);
+ computeYMD_HMS(&x);
+ for(i=j=0; zFmt[i]; i++){
+ if( zFmt[i]!='%' ){
+ z[j++] = zFmt[i];
+ }else{
+ i++;
+ switch( zFmt[i] ){
+ case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break;
+ case 'f': {
+ double s = x.s;
+ if( s>59.999 ) s = 59.999;
+ sqlite3_snprintf(7, &z[j],"%06.3f", s);
+ j += sqlite3Strlen30(&z[j]);
+ break;
+ }
+ case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break;
+ case 'W': /* Fall thru */
+ case 'j': {
+ int nDay; /* Number of days since 1st day of year */
+ DateTime y = x;
+ y.validJD = 0;
+ y.M = 1;
+ y.D = 1;
+ computeJD(&y);
+ nDay = (int)((x.iJD-y.iJD+43200000)/86400000);
+ if( zFmt[i]=='W' ){
+ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
+ wd = (int)(((x.iJD+43200000)/86400000)%7);
+ sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7);
+ j += 2;
+ }else{
+ sqlite3_snprintf(4, &z[j],"%03d",nDay+1);
+ j += 3;
+ }
+ break;
+ }
+ case 'J': {
+ sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0);
+ j+=sqlite3Strlen30(&z[j]);
+ break;
+ }
+ case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break;
+ case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break;
+ case 's': {
+ i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000);
+ sqlite3Int64ToText(iS, &z[j]);
+ j += sqlite3Strlen30(&z[j]);
+ break;
+ }
+ case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break;
+ case 'w': {
+ z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0';
+ break;
+ }
+ case 'Y': {
+ sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]);
+ break;
+ }
+ default: z[j++] = '%'; break;
+ }
+ }
+ }
+ z[j] = 0;
+ sqlite3_result_text(context, z, -1,
+ z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC);
+}
+
+/*
+** current_time()
+**
+** This function returns the same value as time('now').
+*/
+static void ctimeFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ timeFunc(context, 0, 0);
+}
+
+/*
+** current_date()
+**
+** This function returns the same value as date('now').
+*/
+static void cdateFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ dateFunc(context, 0, 0);
+}
+
+/*
+** current_timestamp()
+**
+** This function returns the same value as datetime('now').
+*/
+static void ctimestampFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ datetimeFunc(context, 0, 0);
+}
+#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */
+
+#ifdef SQLITE_OMIT_DATETIME_FUNCS
+/*
+** If the library is compiled to omit the full-scale date and time
+** handling (to get a smaller binary), the following minimal version
+** of the functions current_time(), current_date() and current_timestamp()
+** are included instead. This is to support column declarations that
+** include "DEFAULT CURRENT_TIME" etc.
+**
+** This function uses the C-library functions time(), gmtime()
+** and strftime(). The format string to pass to strftime() is supplied
+** as the user-data for the function.
+*/
+static void currentTimeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ time_t t;
+ char *zFormat = (char *)sqlite3_user_data(context);
+ sqlite3_int64 iT;
+ struct tm *pTm;
+ struct tm sNow;
+ char zBuf[20];
+
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(argv);
+
+ iT = sqlite3StmtCurrentTime(context);
+ if( iT<=0 ) return;
+ t = iT/1000 - 10000*(sqlite3_int64)21086676;
+#if HAVE_GMTIME_R
+ pTm = gmtime_r(&t, &sNow);
+#else
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN));
+ pTm = gmtime(&t);
+ if( pTm ) memcpy(&sNow, pTm, sizeof(sNow));
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN));
+#endif
+ if( pTm ){
+ strftime(zBuf, 20, zFormat, &sNow);
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ }
+}
+#endif
+
+/*
+** This function registered all of the above C functions as SQL
+** functions. This should be the only routine in this file with
+** external linkage.
+*/
+SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
+ static FuncDef aDateTimeFuncs[] = {
+#ifndef SQLITE_OMIT_DATETIME_FUNCS
+ PURE_DATE(julianday, -1, 0, 0, juliandayFunc ),
+ PURE_DATE(date, -1, 0, 0, dateFunc ),
+ PURE_DATE(time, -1, 0, 0, timeFunc ),
+ PURE_DATE(datetime, -1, 0, 0, datetimeFunc ),
+ PURE_DATE(strftime, -1, 0, 0, strftimeFunc ),
+ DFUNCTION(current_time, 0, 0, 0, ctimeFunc ),
+ DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
+ DFUNCTION(current_date, 0, 0, 0, cdateFunc ),
+#else
+ STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc),
+ STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc),
+ STR_FUNCTION(current_timestamp, 0, "%Y-%m-%d %H:%M:%S", 0, currentTimeFunc),
+#endif
+ };
+ sqlite3InsertBuiltinFuncs(aDateTimeFuncs, ArraySize(aDateTimeFuncs));
+}
+
+/************** End of date.c ************************************************/
+/************** Begin file os.c **********************************************/
+/*
+** 2005 November 29
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains OS interface code that is common to all
+** architectures.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** If we compile with the SQLITE_TEST macro set, then the following block
+** of code will give us the ability to simulate a disk I/O error. This
+** is used for testing the I/O recovery logic.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */
+SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */
+SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */
+SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */
+SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */
+SQLITE_API int sqlite3_diskfull_pending = 0;
+SQLITE_API int sqlite3_diskfull = 0;
+#endif /* defined(SQLITE_TEST) */
+
+/*
+** When testing, also keep a count of the number of open files.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API int sqlite3_open_file_count = 0;
+#endif /* defined(SQLITE_TEST) */
+
+/*
+** The default SQLite sqlite3_vfs implementations do not allocate
+** memory (actually, os_unix.c allocates a small amount of memory
+** from within OsOpen()), but some third-party implementations may.
+** So we test the effects of a malloc() failing and the sqlite3OsXXX()
+** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro.
+**
+** The following functions are instrumented for malloc() failure
+** testing:
+**
+** sqlite3OsRead()
+** sqlite3OsWrite()
+** sqlite3OsSync()
+** sqlite3OsFileSize()
+** sqlite3OsLock()
+** sqlite3OsCheckReservedLock()
+** sqlite3OsFileControl()
+** sqlite3OsShmMap()
+** sqlite3OsOpen()
+** sqlite3OsDelete()
+** sqlite3OsAccess()
+** sqlite3OsFullPathname()
+**
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API int sqlite3_memdebug_vfs_oom_test = 1;
+ #define DO_OS_MALLOC_TEST(x) \
+ if (sqlite3_memdebug_vfs_oom_test && (!x || !sqlite3JournalIsInMemory(x))) { \
+ void *pTstAlloc = sqlite3Malloc(10); \
+ if (!pTstAlloc) return SQLITE_IOERR_NOMEM_BKPT; \
+ sqlite3_free(pTstAlloc); \
+ }
+#else
+ #define DO_OS_MALLOC_TEST(x)
+#endif
+
+/*
+** The following routines are convenience wrappers around methods
+** of the sqlite3_file object. This is mostly just syntactic sugar. All
+** of this would be completely automatic if SQLite were coded using
+** C++ instead of plain old C.
+*/
+SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file *pId){
+ if( pId->pMethods ){
+ pId->pMethods->xClose(pId);
+ pId->pMethods = 0;
+ }
+}
+SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xRead(id, pBuf, amt, offset);
+}
+SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xWrite(id, pBuf, amt, offset);
+}
+SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file *id, i64 size){
+ return id->pMethods->xTruncate(id, size);
+}
+SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file *id, int flags){
+ DO_OS_MALLOC_TEST(id);
+ return flags ? id->pMethods->xSync(id, flags) : SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xFileSize(id, pSize);
+}
+SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xLock(id, lockType);
+}
+SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){
+ return id->pMethods->xUnlock(id, lockType);
+}
+SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xCheckReservedLock(id, pResOut);
+}
+
+/*
+** Use sqlite3OsFileControl() when we are doing something that might fail
+** and we need to know about the failures. Use sqlite3OsFileControlHint()
+** when simply tossing information over the wall to the VFS and we do not
+** really care if the VFS receives and understands the information since it
+** is only a hint and can be safely ignored. The sqlite3OsFileControlHint()
+** routine has no return value since the return value would be meaningless.
+*/
+SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){
+ if( id->pMethods==0 ) return SQLITE_NOTFOUND;
+#ifdef SQLITE_TEST
+ if( op!=SQLITE_FCNTL_COMMIT_PHASETWO
+ && op!=SQLITE_FCNTL_LOCK_TIMEOUT
+ ){
+ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite
+ ** is using a regular VFS, it is called after the corresponding
+ ** transaction has been committed. Injecting a fault at this point
+ ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM
+ ** but the transaction is committed anyway.
+ **
+ ** The core must call OsFileControl() though, not OsFileControlHint(),
+ ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably
+ ** means the commit really has failed and an error should be returned
+ ** to the user. */
+ DO_OS_MALLOC_TEST(id);
+ }
+#endif
+ return id->pMethods->xFileControl(id, op, pArg);
+}
+SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){
+ if( id->pMethods ) (void)id->pMethods->xFileControl(id, op, pArg);
+}
+
+SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){
+ int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize;
+ return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE);
+}
+SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id){
+ return id->pMethods->xDeviceCharacteristics(id);
+}
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){
+ return id->pMethods->xShmLock(id, offset, n, flags);
+}
+SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id){
+ id->pMethods->xShmBarrier(id);
+}
+SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){
+ return id->pMethods->xShmUnmap(id, deleteFlag);
+}
+SQLITE_PRIVATE int sqlite3OsShmMap(
+ sqlite3_file *id, /* Database file handle */
+ int iPage,
+ int pgsz,
+ int bExtend, /* True to extend file if necessary */
+ void volatile **pp /* OUT: Pointer to mapping */
+){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp);
+}
+#endif /* SQLITE_OMIT_WAL */
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/* The real implementation of xFetch and xUnfetch */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xFetch(id, iOff, iAmt, pp);
+}
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
+ return id->pMethods->xUnfetch(id, iOff, p);
+}
+#else
+/* No-op stubs to use when memory-mapped I/O is disabled */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
+ *pp = 0;
+ return SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** The next group of routines are convenience wrappers around the
+** VFS methods.
+*/
+SQLITE_PRIVATE int sqlite3OsOpen(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ sqlite3_file *pFile,
+ int flags,
+ int *pFlagsOut
+){
+ int rc;
+ DO_OS_MALLOC_TEST(0);
+ /* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed
+ ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example,
+ ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before
+ ** reaching the VFS. */
+ rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x1087f7f, pFlagsOut);
+ assert( rc==SQLITE_OK || pFile->pMethods==0 );
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
+ DO_OS_MALLOC_TEST(0);
+ assert( dirSync==0 || dirSync==1 );
+ return pVfs->xDelete(pVfs, zPath, dirSync);
+}
+SQLITE_PRIVATE int sqlite3OsAccess(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int flags,
+ int *pResOut
+){
+ DO_OS_MALLOC_TEST(0);
+ return pVfs->xAccess(pVfs, zPath, flags, pResOut);
+}
+SQLITE_PRIVATE int sqlite3OsFullPathname(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int nPathOut,
+ char *zPathOut
+){
+ DO_OS_MALLOC_TEST(0);
+ zPathOut[0] = 0;
+ return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut);
+}
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){
+ return pVfs->xDlOpen(pVfs, zPath);
+}
+SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
+ pVfs->xDlError(pVfs, nByte, zBufOut);
+}
+SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){
+ return pVfs->xDlSym(pVfs, pHdle, zSym);
+}
+SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){
+ pVfs->xDlClose(pVfs, pHandle);
+}
+#endif /* SQLITE_OMIT_LOAD_EXTENSION */
+SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
+ if( sqlite3Config.iPrngSeed ){
+ memset(zBufOut, 0, nByte);
+ if( ALWAYS(nByte>(signed)sizeof(unsigned)) ) nByte = sizeof(unsigned int);
+ memcpy(zBufOut, &sqlite3Config.iPrngSeed, nByte);
+ return SQLITE_OK;
+ }else{
+ return pVfs->xRandomness(pVfs, nByte, zBufOut);
+ }
+
+}
+SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){
+ return pVfs->xSleep(pVfs, nMicro);
+}
+SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs *pVfs){
+ return pVfs->xGetLastError ? pVfs->xGetLastError(pVfs, 0, 0) : 0;
+}
+SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){
+ int rc;
+ /* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64()
+ ** method to get the current date and time if that method is available
+ ** (if iVersion is 2 or greater and the function pointer is not NULL) and
+ ** will fall back to xCurrentTime() if xCurrentTimeInt64() is
+ ** unavailable.
+ */
+ if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){
+ rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut);
+ }else{
+ double r;
+ rc = pVfs->xCurrentTime(pVfs, &r);
+ *pTimeOut = (sqlite3_int64)(r*86400000.0);
+ }
+ return rc;
+}
+
+SQLITE_PRIVATE int sqlite3OsOpenMalloc(
+ sqlite3_vfs *pVfs,
+ const char *zFile,
+ sqlite3_file **ppFile,
+ int flags,
+ int *pOutFlags
+){
+ int rc;
+ sqlite3_file *pFile;
+ pFile = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile);
+ if( pFile ){
+ rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags);
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(pFile);
+ }else{
+ *ppFile = pFile;
+ }
+ }else{
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ return rc;
+}
+SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *pFile){
+ assert( pFile );
+ sqlite3OsClose(pFile);
+ sqlite3_free(pFile);
+}
+
+/*
+** This function is a wrapper around the OS specific implementation of
+** sqlite3_os_init(). The purpose of the wrapper is to provide the
+** ability to simulate a malloc failure, so that the handling of an
+** error in sqlite3_os_init() by the upper layers can be tested.
+*/
+SQLITE_PRIVATE int sqlite3OsInit(void){
+ void *p = sqlite3_malloc(10);
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ sqlite3_free(p);
+ return sqlite3_os_init();
+}
+
+/*
+** The list of all registered VFS implementations.
+*/
+static sqlite3_vfs * SQLITE_WSD vfsList = 0;
+#define vfsList GLOBAL(sqlite3_vfs *, vfsList)
+
+/*
+** Locate a VFS by name. If no name is given, simply return the
+** first VFS on the list.
+*/
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){
+ sqlite3_vfs *pVfs = 0;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex;
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return 0;
+#endif
+#if SQLITE_THREADSAFE
+ mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+ sqlite3_mutex_enter(mutex);
+ for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){
+ if( zVfs==0 ) break;
+ if( strcmp(zVfs, pVfs->zName)==0 ) break;
+ }
+ sqlite3_mutex_leave(mutex);
+ return pVfs;
+}
+
+/*
+** Unlink a VFS from the linked list
+*/
+static void vfsUnlink(sqlite3_vfs *pVfs){
+ assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN)) );
+ if( pVfs==0 ){
+ /* No-op */
+ }else if( vfsList==pVfs ){
+ vfsList = pVfs->pNext;
+ }else if( vfsList ){
+ sqlite3_vfs *p = vfsList;
+ while( p->pNext && p->pNext!=pVfs ){
+ p = p->pNext;
+ }
+ if( p->pNext==pVfs ){
+ p->pNext = pVfs->pNext;
+ }
+ }
+}
+
+/*
+** Register a VFS with the system. It is harmless to register the same
+** VFS multiple times. The new VFS becomes the default if makeDflt is
+** true.
+*/
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
+ MUTEX_LOGIC(sqlite3_mutex *mutex;)
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pVfs==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+
+ MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN); )
+ sqlite3_mutex_enter(mutex);
+ vfsUnlink(pVfs);
+ if( makeDflt || vfsList==0 ){
+ pVfs->pNext = vfsList;
+ vfsList = pVfs;
+ }else{
+ pVfs->pNext = vfsList->pNext;
+ vfsList->pNext = pVfs;
+ }
+ assert(vfsList);
+ sqlite3_mutex_leave(mutex);
+ return SQLITE_OK;
+}
+
+/*
+** Unregister a VFS so that it is no longer accessible.
+*/
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
+ MUTEX_LOGIC(sqlite3_mutex *mutex;)
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+ MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN); )
+ sqlite3_mutex_enter(mutex);
+ vfsUnlink(pVfs);
+ sqlite3_mutex_leave(mutex);
+ return SQLITE_OK;
+}
+
+/************** End of os.c **************************************************/
+/************** Begin file fault.c *******************************************/
+/*
+** 2008 Jan 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code to support the concept of "benign"
+** malloc failures (when the xMalloc() or xRealloc() method of the
+** sqlite3_mem_methods structure fails to allocate a block of memory
+** and returns 0).
+**
+** Most malloc failures are non-benign. After they occur, SQLite
+** abandons the current operation and returns an error code (usually
+** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily
+** fatal. For example, if a malloc fails while resizing a hash table, this
+** is completely recoverable simply by not carrying out the resize. The
+** hash table will continue to function normally. So a malloc failure
+** during a hash table resize is a benign fault.
+*/
+
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_UNTESTABLE
+
+/*
+** Global variables.
+*/
+typedef struct BenignMallocHooks BenignMallocHooks;
+static SQLITE_WSD struct BenignMallocHooks {
+ void (*xBenignBegin)(void);
+ void (*xBenignEnd)(void);
+} sqlite3Hooks = { 0, 0 };
+
+/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks
+** structure. If writable static data is unsupported on the target,
+** we have to locate the state vector at run-time. In the more common
+** case where writable static data is supported, wsdHooks can refer directly
+** to the "sqlite3Hooks" state vector declared above.
+*/
+#ifdef SQLITE_OMIT_WSD
+# define wsdHooksInit \
+ BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks)
+# define wsdHooks x[0]
+#else
+# define wsdHooksInit
+# define wsdHooks sqlite3Hooks
+#endif
+
+
+/*
+** Register hooks to call when sqlite3BeginBenignMalloc() and
+** sqlite3EndBenignMalloc() are called, respectively.
+*/
+SQLITE_PRIVATE void sqlite3BenignMallocHooks(
+ void (*xBenignBegin)(void),
+ void (*xBenignEnd)(void)
+){
+ wsdHooksInit;
+ wsdHooks.xBenignBegin = xBenignBegin;
+ wsdHooks.xBenignEnd = xBenignEnd;
+}
+
+/*
+** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that
+** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc()
+** indicates that subsequent malloc failures are non-benign.
+*/
+SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void){
+ wsdHooksInit;
+ if( wsdHooks.xBenignBegin ){
+ wsdHooks.xBenignBegin();
+ }
+}
+SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){
+ wsdHooksInit;
+ if( wsdHooks.xBenignEnd ){
+ wsdHooks.xBenignEnd();
+ }
+}
+
+#endif /* #ifndef SQLITE_UNTESTABLE */
+
+/************** End of fault.c ***********************************************/
+/************** Begin file mem0.c ********************************************/
+/*
+** 2008 October 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains a no-op memory allocation drivers for use when
+** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented
+** here always fail. SQLite will not operate with these drivers. These
+** are merely placeholders. Real drivers must be substituted using
+** sqlite3_config() before SQLite will operate.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** This version of the memory allocator is the default. It is
+** used when no other memory allocator is specified using compile-time
+** macros.
+*/
+#ifdef SQLITE_ZERO_MALLOC
+
+/*
+** No-op versions of all memory allocation routines
+*/
+static void *sqlite3MemMalloc(int nByte){ return 0; }
+static void sqlite3MemFree(void *pPrior){ return; }
+static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; }
+static int sqlite3MemSize(void *pPrior){ return 0; }
+static int sqlite3MemRoundup(int n){ return n; }
+static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; }
+static void sqlite3MemShutdown(void *NotUsed){ return; }
+
+/*
+** This routine is the only routine in this file with external linkage.
+**
+** Populate the low-level memory allocation function pointers in
+** sqlite3GlobalConfig.m with pointers to the routines in this file.
+*/
+SQLITE_PRIVATE void sqlite3MemSetDefault(void){
+ static const sqlite3_mem_methods defaultMethods = {
+ sqlite3MemMalloc,
+ sqlite3MemFree,
+ sqlite3MemRealloc,
+ sqlite3MemSize,
+ sqlite3MemRoundup,
+ sqlite3MemInit,
+ sqlite3MemShutdown,
+ 0
+ };
+ sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
+}
+
+#endif /* SQLITE_ZERO_MALLOC */
+
+/************** End of mem0.c ************************************************/
+/************** Begin file mem1.c ********************************************/
+/*
+** 2007 August 14
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains low-level memory allocation drivers for when
+** SQLite will use the standard C-library malloc/realloc/free interface
+** to obtain the memory it needs.
+**
+** This file contains implementations of the low-level memory allocation
+** routines specified in the sqlite3_mem_methods object. The content of
+** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The
+** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the
+** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The
+** default configuration is to use memory allocation routines in this
+** file.
+**
+** C-preprocessor macro summary:
+**
+** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if
+** the malloc_usable_size() interface exists
+** on the target platform. Or, this symbol
+** can be set manually, if desired.
+** If an equivalent interface exists by
+** a different name, using a separate -D
+** option to rename it.
+**
+** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone
+** memory allocator. Set this symbol to enable
+** building on older macs.
+**
+** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of
+** _msize() on windows systems. This might
+** be necessary when compiling for Delphi,
+** for example.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** This version of the memory allocator is the default. It is
+** used when no other memory allocator is specified using compile-time
+** macros.
+*/
+#ifdef SQLITE_SYSTEM_MALLOC
+#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC)
+
+/*
+** Use the zone allocator available on apple products unless the
+** SQLITE_WITHOUT_ZONEMALLOC symbol is defined.
+*/
+#include <sys/sysctl.h>
+#include <malloc/malloc.h>
+#ifdef SQLITE_MIGHT_BE_SINGLE_CORE
+#include <libkern/OSAtomic.h>
+#endif /* SQLITE_MIGHT_BE_SINGLE_CORE */
+static malloc_zone_t* _sqliteZone_;
+#define SQLITE_MALLOC(x) malloc_zone_malloc(_sqliteZone_, (x))
+#define SQLITE_FREE(x) malloc_zone_free(_sqliteZone_, (x));
+#define SQLITE_REALLOC(x,y) malloc_zone_realloc(_sqliteZone_, (x), (y))
+#define SQLITE_MALLOCSIZE(x) \
+ (_sqliteZone_ ? _sqliteZone_->size(_sqliteZone_,x) : malloc_size(x))
+
+#else /* if not __APPLE__ */
+
+/*
+** Use standard C library malloc and free on non-Apple systems.
+** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined.
+*/
+#define SQLITE_MALLOC(x) malloc(x)
+#define SQLITE_FREE(x) free(x)
+#define SQLITE_REALLOC(x,y) realloc((x),(y))
+
+/*
+** The malloc.h header file is needed for malloc_usable_size() function
+** on some systems (e.g. Linux).
+*/
+#if HAVE_MALLOC_H && HAVE_MALLOC_USABLE_SIZE
+# define SQLITE_USE_MALLOC_H 1
+# define SQLITE_USE_MALLOC_USABLE_SIZE 1
+/*
+** The MSVCRT has malloc_usable_size(), but it is called _msize(). The
+** use of _msize() is automatic, but can be disabled by compiling with
+** -DSQLITE_WITHOUT_MSIZE. Using the _msize() function also requires
+** the malloc.h header file.
+*/
+#elif defined(_MSC_VER) && !defined(SQLITE_WITHOUT_MSIZE)
+# define SQLITE_USE_MALLOC_H
+# define SQLITE_USE_MSIZE
+#endif
+
+/*
+** Include the malloc.h header file, if necessary. Also set define macro
+** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize()
+** for MSVC and malloc_usable_size() for most other systems (e.g. Linux).
+** The memory size function can always be overridden manually by defining
+** the macro SQLITE_MALLOCSIZE to the desired function name.
+*/
+#if defined(SQLITE_USE_MALLOC_H)
+# include <malloc.h>
+# if defined(SQLITE_USE_MALLOC_USABLE_SIZE)
+# if !defined(SQLITE_MALLOCSIZE)
+# define SQLITE_MALLOCSIZE(x) malloc_usable_size(x)
+# endif
+# elif defined(SQLITE_USE_MSIZE)
+# if !defined(SQLITE_MALLOCSIZE)
+# define SQLITE_MALLOCSIZE _msize
+# endif
+# endif
+#endif /* defined(SQLITE_USE_MALLOC_H) */
+
+#endif /* __APPLE__ or not __APPLE__ */
+
+/*
+** Like malloc(), but remember the size of the allocation
+** so that we can find it later using sqlite3MemSize().
+**
+** For this low-level routine, we are guaranteed that nByte>0 because
+** cases of nByte<=0 will be intercepted and dealt with by higher level
+** routines.
+*/
+static void *sqlite3MemMalloc(int nByte){
+#ifdef SQLITE_MALLOCSIZE
+ void *p;
+ testcase( ROUND8(nByte)==nByte );
+ p = SQLITE_MALLOC( nByte );
+ if( p==0 ){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte);
+ }
+ return p;
+#else
+ sqlite3_int64 *p;
+ assert( nByte>0 );
+ testcase( ROUND8(nByte)!=nByte );
+ p = SQLITE_MALLOC( nByte+8 );
+ if( p ){
+ p[0] = nByte;
+ p++;
+ }else{
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte);
+ }
+ return (void *)p;
+#endif
+}
+
+/*
+** Like free() but works for allocations obtained from sqlite3MemMalloc()
+** or sqlite3MemRealloc().
+**
+** For this low-level routine, we already know that pPrior!=0 since
+** cases where pPrior==0 will have been intecepted and dealt with
+** by higher-level routines.
+*/
+static void sqlite3MemFree(void *pPrior){
+#ifdef SQLITE_MALLOCSIZE
+ SQLITE_FREE(pPrior);
+#else
+ sqlite3_int64 *p = (sqlite3_int64*)pPrior;
+ assert( pPrior!=0 );
+ p--;
+ SQLITE_FREE(p);
+#endif
+}
+
+/*
+** Report the allocated size of a prior return from xMalloc()
+** or xRealloc().
+*/
+static int sqlite3MemSize(void *pPrior){
+#ifdef SQLITE_MALLOCSIZE
+ assert( pPrior!=0 );
+ return (int)SQLITE_MALLOCSIZE(pPrior);
+#else
+ sqlite3_int64 *p;
+ assert( pPrior!=0 );
+ p = (sqlite3_int64*)pPrior;
+ p--;
+ return (int)p[0];
+#endif
+}
+
+/*
+** Like realloc(). Resize an allocation previously obtained from
+** sqlite3MemMalloc().
+**
+** For this low-level interface, we know that pPrior!=0. Cases where
+** pPrior==0 while have been intercepted by higher-level routine and
+** redirected to xMalloc. Similarly, we know that nByte>0 because
+** cases where nByte<=0 will have been intercepted by higher-level
+** routines and redirected to xFree.
+*/
+static void *sqlite3MemRealloc(void *pPrior, int nByte){
+#ifdef SQLITE_MALLOCSIZE
+ void *p = SQLITE_REALLOC(pPrior, nByte);
+ if( p==0 ){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(SQLITE_NOMEM,
+ "failed memory resize %u to %u bytes",
+ SQLITE_MALLOCSIZE(pPrior), nByte);
+ }
+ return p;
+#else
+ sqlite3_int64 *p = (sqlite3_int64*)pPrior;
+ assert( pPrior!=0 && nByte>0 );
+ assert( nByte==ROUND8(nByte) ); /* EV: R-46199-30249 */
+ p--;
+ p = SQLITE_REALLOC(p, nByte+8 );
+ if( p ){
+ p[0] = nByte;
+ p++;
+ }else{
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(SQLITE_NOMEM,
+ "failed memory resize %u to %u bytes",
+ sqlite3MemSize(pPrior), nByte);
+ }
+ return (void*)p;
+#endif
+}
+
+/*
+** Round up a request size to the next valid allocation size.
+*/
+static int sqlite3MemRoundup(int n){
+ return ROUND8(n);
+}
+
+/*
+** Initialize this module.
+*/
+static int sqlite3MemInit(void *NotUsed){
+#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC)
+ int cpuCount;
+ size_t len;
+ if( _sqliteZone_ ){
+ return SQLITE_OK;
+ }
+ len = sizeof(cpuCount);
+ /* One usually wants to use hw.acctivecpu for MT decisions, but not here */
+ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0);
+ if( cpuCount>1 ){
+ /* defer MT decisions to system malloc */
+ _sqliteZone_ = malloc_default_zone();
+ }else{
+ /* only 1 core, use our own zone to contention over global locks,
+ ** e.g. we have our own dedicated locks */
+ _sqliteZone_ = malloc_create_zone(4096, 0);
+ malloc_set_zone_name(_sqliteZone_, "Sqlite_Heap");
+ }
+#endif /* defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) */
+ UNUSED_PARAMETER(NotUsed);
+ return SQLITE_OK;
+}
+
+/*
+** Deinitialize this module.
+*/
+static void sqlite3MemShutdown(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ return;
+}
+
+/*
+** This routine is the only routine in this file with external linkage.
+**
+** Populate the low-level memory allocation function pointers in
+** sqlite3GlobalConfig.m with pointers to the routines in this file.
+*/
+SQLITE_PRIVATE void sqlite3MemSetDefault(void){
+ static const sqlite3_mem_methods defaultMethods = {
+ sqlite3MemMalloc,
+ sqlite3MemFree,
+ sqlite3MemRealloc,
+ sqlite3MemSize,
+ sqlite3MemRoundup,
+ sqlite3MemInit,
+ sqlite3MemShutdown,
+ 0
+ };
+ sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
+}
+
+#endif /* SQLITE_SYSTEM_MALLOC */
+
+/************** End of mem1.c ************************************************/
+/************** Begin file mem2.c ********************************************/
+/*
+** 2007 August 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains low-level memory allocation drivers for when
+** SQLite will use the standard C-library malloc/realloc/free interface
+** to obtain the memory it needs while adding lots of additional debugging
+** information to each allocation in order to help detect and fix memory
+** leaks and memory usage errors.
+**
+** This file contains implementations of the low-level memory allocation
+** routines specified in the sqlite3_mem_methods object.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** This version of the memory allocator is used only if the
+** SQLITE_MEMDEBUG macro is defined
+*/
+#ifdef SQLITE_MEMDEBUG
+
+/*
+** The backtrace functionality is only available with GLIBC
+*/
+#ifdef __GLIBC__
+ extern int backtrace(void**,int);
+ extern void backtrace_symbols_fd(void*const*,int,int);
+#else
+# define backtrace(A,B) 1
+# define backtrace_symbols_fd(A,B,C)
+#endif
+/* #include <stdio.h> */
+
+/*
+** Each memory allocation looks like this:
+**
+** ------------------------------------------------------------------------
+** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard |
+** ------------------------------------------------------------------------
+**
+** The application code sees only a pointer to the allocation. We have
+** to back up from the allocation pointer to find the MemBlockHdr. The
+** MemBlockHdr tells us the size of the allocation and the number of
+** backtrace pointers. There is also a guard word at the end of the
+** MemBlockHdr.
+*/
+struct MemBlockHdr {
+ i64 iSize; /* Size of this allocation */
+ struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */
+ char nBacktrace; /* Number of backtraces on this alloc */
+ char nBacktraceSlots; /* Available backtrace slots */
+ u8 nTitle; /* Bytes of title; includes '\0' */
+ u8 eType; /* Allocation type code */
+ int iForeGuard; /* Guard word for sanity */
+};
+
+/*
+** Guard words
+*/
+#define FOREGUARD 0x80F5E153
+#define REARGUARD 0xE4676B53
+
+/*
+** Number of malloc size increments to track.
+*/
+#define NCSIZE 1000
+
+/*
+** All of the static variables used by this module are collected
+** into a single structure named "mem". This is to keep the
+** static variables organized and to reduce namespace pollution
+** when this module is combined with other in the amalgamation.
+*/
+static struct {
+
+ /*
+ ** Mutex to control access to the memory allocation subsystem.
+ */
+ sqlite3_mutex *mutex;
+
+ /*
+ ** Head and tail of a linked list of all outstanding allocations
+ */
+ struct MemBlockHdr *pFirst;
+ struct MemBlockHdr *pLast;
+
+ /*
+ ** The number of levels of backtrace to save in new allocations.
+ */
+ int nBacktrace;
+ void (*xBacktrace)(int, int, void **);
+
+ /*
+ ** Title text to insert in front of each block
+ */
+ int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */
+ char zTitle[100]; /* The title text */
+
+ /*
+ ** sqlite3MallocDisallow() increments the following counter.
+ ** sqlite3MallocAllow() decrements it.
+ */
+ int disallow; /* Do not allow memory allocation */
+
+ /*
+ ** Gather statistics on the sizes of memory allocations.
+ ** nAlloc[i] is the number of allocation attempts of i*8
+ ** bytes. i==NCSIZE is the number of allocation attempts for
+ ** sizes more than NCSIZE*8 bytes.
+ */
+ int nAlloc[NCSIZE]; /* Total number of allocations */
+ int nCurrent[NCSIZE]; /* Current number of allocations */
+ int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */
+
+} mem;
+
+
+/*
+** Adjust memory usage statistics
+*/
+static void adjustStats(int iSize, int increment){
+ int i = ROUND8(iSize)/8;
+ if( i>NCSIZE-1 ){
+ i = NCSIZE - 1;
+ }
+ if( increment>0 ){
+ mem.nAlloc[i]++;
+ mem.nCurrent[i]++;
+ if( mem.nCurrent[i]>mem.mxCurrent[i] ){
+ mem.mxCurrent[i] = mem.nCurrent[i];
+ }
+ }else{
+ mem.nCurrent[i]--;
+ assert( mem.nCurrent[i]>=0 );
+ }
+}
+
+/*
+** Given an allocation, find the MemBlockHdr for that allocation.
+**
+** This routine checks the guards at either end of the allocation and
+** if they are incorrect it asserts.
+*/
+static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){
+ struct MemBlockHdr *p;
+ int *pInt;
+ u8 *pU8;
+ int nReserve;
+
+ p = (struct MemBlockHdr*)pAllocation;
+ p--;
+ assert( p->iForeGuard==(int)FOREGUARD );
+ nReserve = ROUND8(p->iSize);
+ pInt = (int*)pAllocation;
+ pU8 = (u8*)pAllocation;
+ assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD );
+ /* This checks any of the "extra" bytes allocated due
+ ** to rounding up to an 8 byte boundary to ensure
+ ** they haven't been overwritten.
+ */
+ while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 );
+ return p;
+}
+
+/*
+** Return the number of bytes currently allocated at address p.
+*/
+static int sqlite3MemSize(void *p){
+ struct MemBlockHdr *pHdr;
+ if( !p ){
+ return 0;
+ }
+ pHdr = sqlite3MemsysGetHeader(p);
+ return (int)pHdr->iSize;
+}
+
+/*
+** Initialize the memory allocation subsystem.
+*/
+static int sqlite3MemInit(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ assert( (sizeof(struct MemBlockHdr)&7) == 0 );
+ if( !sqlite3GlobalConfig.bMemstat ){
+ /* If memory status is enabled, then the malloc.c wrapper will already
+ ** hold the STATIC_MEM mutex when the routines here are invoked. */
+ mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Deinitialize the memory allocation subsystem.
+*/
+static void sqlite3MemShutdown(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ mem.mutex = 0;
+}
+
+/*
+** Round up a request size to the next valid allocation size.
+*/
+static int sqlite3MemRoundup(int n){
+ return ROUND8(n);
+}
+
+/*
+** Fill a buffer with pseudo-random bytes. This is used to preset
+** the content of a new memory allocation to unpredictable values and
+** to clear the content of a freed allocation to unpredictable values.
+*/
+static void randomFill(char *pBuf, int nByte){
+ unsigned int x, y, r;
+ x = SQLITE_PTR_TO_INT(pBuf);
+ y = nByte | 1;
+ while( nByte >= 4 ){
+ x = (x>>1) ^ (-(int)(x&1) & 0xd0000001);
+ y = y*1103515245 + 12345;
+ r = x ^ y;
+ *(int*)pBuf = r;
+ pBuf += 4;
+ nByte -= 4;
+ }
+ while( nByte-- > 0 ){
+ x = (x>>1) ^ (-(int)(x&1) & 0xd0000001);
+ y = y*1103515245 + 12345;
+ r = x ^ y;
+ *(pBuf++) = r & 0xff;
+ }
+}
+
+/*
+** Allocate nByte bytes of memory.
+*/
+static void *sqlite3MemMalloc(int nByte){
+ struct MemBlockHdr *pHdr;
+ void **pBt;
+ char *z;
+ int *pInt;
+ void *p = 0;
+ int totalSize;
+ int nReserve;
+ sqlite3_mutex_enter(mem.mutex);
+ assert( mem.disallow==0 );
+ nReserve = ROUND8(nByte);
+ totalSize = nReserve + sizeof(*pHdr) + sizeof(int) +
+ mem.nBacktrace*sizeof(void*) + mem.nTitle;
+ p = malloc(totalSize);
+ if( p ){
+ z = p;
+ pBt = (void**)&z[mem.nTitle];
+ pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace];
+ pHdr->pNext = 0;
+ pHdr->pPrev = mem.pLast;
+ if( mem.pLast ){
+ mem.pLast->pNext = pHdr;
+ }else{
+ mem.pFirst = pHdr;
+ }
+ mem.pLast = pHdr;
+ pHdr->iForeGuard = FOREGUARD;
+ pHdr->eType = MEMTYPE_HEAP;
+ pHdr->nBacktraceSlots = mem.nBacktrace;
+ pHdr->nTitle = mem.nTitle;
+ if( mem.nBacktrace ){
+ void *aAddr[40];
+ pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1;
+ memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*));
+ assert(pBt[0]);
+ if( mem.xBacktrace ){
+ mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]);
+ }
+ }else{
+ pHdr->nBacktrace = 0;
+ }
+ if( mem.nTitle ){
+ memcpy(z, mem.zTitle, mem.nTitle);
+ }
+ pHdr->iSize = nByte;
+ adjustStats(nByte, +1);
+ pInt = (int*)&pHdr[1];
+ pInt[nReserve/sizeof(int)] = REARGUARD;
+ randomFill((char*)pInt, nByte);
+ memset(((char*)pInt)+nByte, 0x65, nReserve-nByte);
+ p = (void*)pInt;
+ }
+ sqlite3_mutex_leave(mem.mutex);
+ return p;
+}
+
+/*
+** Free memory.
+*/
+static void sqlite3MemFree(void *pPrior){
+ struct MemBlockHdr *pHdr;
+ void **pBt;
+ char *z;
+ assert( sqlite3GlobalConfig.bMemstat || sqlite3GlobalConfig.bCoreMutex==0
+ || mem.mutex!=0 );
+ pHdr = sqlite3MemsysGetHeader(pPrior);
+ pBt = (void**)pHdr;
+ pBt -= pHdr->nBacktraceSlots;
+ sqlite3_mutex_enter(mem.mutex);
+ if( pHdr->pPrev ){
+ assert( pHdr->pPrev->pNext==pHdr );
+ pHdr->pPrev->pNext = pHdr->pNext;
+ }else{
+ assert( mem.pFirst==pHdr );
+ mem.pFirst = pHdr->pNext;
+ }
+ if( pHdr->pNext ){
+ assert( pHdr->pNext->pPrev==pHdr );
+ pHdr->pNext->pPrev = pHdr->pPrev;
+ }else{
+ assert( mem.pLast==pHdr );
+ mem.pLast = pHdr->pPrev;
+ }
+ z = (char*)pBt;
+ z -= pHdr->nTitle;
+ adjustStats((int)pHdr->iSize, -1);
+ randomFill(z, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) +
+ (int)pHdr->iSize + sizeof(int) + pHdr->nTitle);
+ free(z);
+ sqlite3_mutex_leave(mem.mutex);
+}
+
+/*
+** Change the size of an existing memory allocation.
+**
+** For this debugging implementation, we *always* make a copy of the
+** allocation into a new place in memory. In this way, if the
+** higher level code is using pointer to the old allocation, it is
+** much more likely to break and we are much more liking to find
+** the error.
+*/
+static void *sqlite3MemRealloc(void *pPrior, int nByte){
+ struct MemBlockHdr *pOldHdr;
+ void *pNew;
+ assert( mem.disallow==0 );
+ assert( (nByte & 7)==0 ); /* EV: R-46199-30249 */
+ pOldHdr = sqlite3MemsysGetHeader(pPrior);
+ pNew = sqlite3MemMalloc(nByte);
+ if( pNew ){
+ memcpy(pNew, pPrior, (int)(nByte<pOldHdr->iSize ? nByte : pOldHdr->iSize));
+ if( nByte>pOldHdr->iSize ){
+ randomFill(&((char*)pNew)[pOldHdr->iSize], nByte - (int)pOldHdr->iSize);
+ }
+ sqlite3MemFree(pPrior);
+ }
+ return pNew;
+}
+
+/*
+** Populate the low-level memory allocation function pointers in
+** sqlite3GlobalConfig.m with pointers to the routines in this file.
+*/
+SQLITE_PRIVATE void sqlite3MemSetDefault(void){
+ static const sqlite3_mem_methods defaultMethods = {
+ sqlite3MemMalloc,
+ sqlite3MemFree,
+ sqlite3MemRealloc,
+ sqlite3MemSize,
+ sqlite3MemRoundup,
+ sqlite3MemInit,
+ sqlite3MemShutdown,
+ 0
+ };
+ sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
+}
+
+/*
+** Set the "type" of an allocation.
+*/
+SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){
+ if( p && sqlite3GlobalConfig.m.xFree==sqlite3MemFree ){
+ struct MemBlockHdr *pHdr;
+ pHdr = sqlite3MemsysGetHeader(p);
+ assert( pHdr->iForeGuard==FOREGUARD );
+ pHdr->eType = eType;
+ }
+}
+
+/*
+** Return TRUE if the mask of type in eType matches the type of the
+** allocation p. Also return true if p==NULL.
+**
+** This routine is designed for use within an assert() statement, to
+** verify the type of an allocation. For example:
+**
+** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+*/
+SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){
+ int rc = 1;
+ if( p && sqlite3GlobalConfig.m.xFree==sqlite3MemFree ){
+ struct MemBlockHdr *pHdr;
+ pHdr = sqlite3MemsysGetHeader(p);
+ assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */
+ if( (pHdr->eType&eType)==0 ){
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+/*
+** Return TRUE if the mask of type in eType matches no bits of the type of the
+** allocation p. Also return true if p==NULL.
+**
+** This routine is designed for use within an assert() statement, to
+** verify the type of an allocation. For example:
+**
+** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
+*/
+SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){
+ int rc = 1;
+ if( p && sqlite3GlobalConfig.m.xFree==sqlite3MemFree ){
+ struct MemBlockHdr *pHdr;
+ pHdr = sqlite3MemsysGetHeader(p);
+ assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */
+ if( (pHdr->eType&eType)!=0 ){
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+/*
+** Set the number of backtrace levels kept for each allocation.
+** A value of zero turns off backtracing. The number is always rounded
+** up to a multiple of 2.
+*/
+SQLITE_PRIVATE void sqlite3MemdebugBacktrace(int depth){
+ if( depth<0 ){ depth = 0; }
+ if( depth>20 ){ depth = 20; }
+ depth = (depth+1)&0xfe;
+ mem.nBacktrace = depth;
+}
+
+SQLITE_PRIVATE void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){
+ mem.xBacktrace = xBacktrace;
+}
+
+/*
+** Set the title string for subsequent allocations.
+*/
+SQLITE_PRIVATE void sqlite3MemdebugSettitle(const char *zTitle){
+ unsigned int n = sqlite3Strlen30(zTitle) + 1;
+ sqlite3_mutex_enter(mem.mutex);
+ if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1;
+ memcpy(mem.zTitle, zTitle, n);
+ mem.zTitle[n] = 0;
+ mem.nTitle = ROUND8(n);
+ sqlite3_mutex_leave(mem.mutex);
+}
+
+SQLITE_PRIVATE void sqlite3MemdebugSync(){
+ struct MemBlockHdr *pHdr;
+ for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){
+ void **pBt = (void**)pHdr;
+ pBt -= pHdr->nBacktraceSlots;
+ mem.xBacktrace((int)pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]);
+ }
+}
+
+/*
+** Open the file indicated and write a log of all unfreed memory
+** allocations into that log.
+*/
+SQLITE_PRIVATE void sqlite3MemdebugDump(const char *zFilename){
+ FILE *out;
+ struct MemBlockHdr *pHdr;
+ void **pBt;
+ int i;
+ out = fopen(zFilename, "w");
+ if( out==0 ){
+ fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
+ zFilename);
+ return;
+ }
+ for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){
+ char *z = (char*)pHdr;
+ z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle;
+ fprintf(out, "**** %lld bytes at %p from %s ****\n",
+ pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???");
+ if( pHdr->nBacktrace ){
+ fflush(out);
+ pBt = (void**)pHdr;
+ pBt -= pHdr->nBacktraceSlots;
+ backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out));
+ fprintf(out, "\n");
+ }
+ }
+ fprintf(out, "COUNTS:\n");
+ for(i=0; i<NCSIZE-1; i++){
+ if( mem.nAlloc[i] ){
+ fprintf(out, " %5d: %10d %10d %10d\n",
+ i*8, mem.nAlloc[i], mem.nCurrent[i], mem.mxCurrent[i]);
+ }
+ }
+ if( mem.nAlloc[NCSIZE-1] ){
+ fprintf(out, " %5d: %10d %10d %10d\n",
+ NCSIZE*8-8, mem.nAlloc[NCSIZE-1],
+ mem.nCurrent[NCSIZE-1], mem.mxCurrent[NCSIZE-1]);
+ }
+ fclose(out);
+}
+
+/*
+** Return the number of times sqlite3MemMalloc() has been called.
+*/
+SQLITE_PRIVATE int sqlite3MemdebugMallocCount(){
+ int i;
+ int nTotal = 0;
+ for(i=0; i<NCSIZE; i++){
+ nTotal += mem.nAlloc[i];
+ }
+ return nTotal;
+}
+
+
+#endif /* SQLITE_MEMDEBUG */
+
+/************** End of mem2.c ************************************************/
+/************** Begin file mem3.c ********************************************/
+/*
+** 2007 October 14
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement a memory
+** allocation subsystem for use by SQLite.
+**
+** This version of the memory allocation subsystem omits all
+** use of malloc(). The SQLite user supplies a block of memory
+** before calling sqlite3_initialize() from which allocations
+** are made and returned by the xMalloc() and xRealloc()
+** implementations. Once sqlite3_initialize() has been called,
+** the amount of memory available to SQLite is fixed and cannot
+** be changed.
+**
+** This version of the memory allocation subsystem is included
+** in the build only if SQLITE_ENABLE_MEMSYS3 is defined.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** This version of the memory allocator is only built into the library
+** SQLITE_ENABLE_MEMSYS3 is defined. Defining this symbol does not
+** mean that the library will use a memory-pool by default, just that
+** it is available. The mempool allocator is activated by calling
+** sqlite3_config().
+*/
+#ifdef SQLITE_ENABLE_MEMSYS3
+
+/*
+** Maximum size (in Mem3Blocks) of a "small" chunk.
+*/
+#define MX_SMALL 10
+
+
+/*
+** Number of freelist hash slots
+*/
+#define N_HASH 61
+
+/*
+** A memory allocation (also called a "chunk") consists of two or
+** more blocks where each block is 8 bytes. The first 8 bytes are
+** a header that is not returned to the user.
+**
+** A chunk is two or more blocks that is either checked out or
+** free. The first block has format u.hdr. u.hdr.size4x is 4 times the
+** size of the allocation in blocks if the allocation is free.
+** The u.hdr.size4x&1 bit is true if the chunk is checked out and
+** false if the chunk is on the freelist. The u.hdr.size4x&2 bit
+** is true if the previous chunk is checked out and false if the
+** previous chunk is free. The u.hdr.prevSize field is the size of
+** the previous chunk in blocks if the previous chunk is on the
+** freelist. If the previous chunk is checked out, then
+** u.hdr.prevSize can be part of the data for that chunk and should
+** not be read or written.
+**
+** We often identify a chunk by its index in mem3.aPool[]. When
+** this is done, the chunk index refers to the second block of
+** the chunk. In this way, the first chunk has an index of 1.
+** A chunk index of 0 means "no such chunk" and is the equivalent
+** of a NULL pointer.
+**
+** The second block of free chunks is of the form u.list. The
+** two fields form a double-linked list of chunks of related sizes.
+** Pointers to the head of the list are stored in mem3.aiSmall[]
+** for smaller chunks and mem3.aiHash[] for larger chunks.
+**
+** The second block of a chunk is user data if the chunk is checked
+** out. If a chunk is checked out, the user data may extend into
+** the u.hdr.prevSize value of the following chunk.
+*/
+typedef struct Mem3Block Mem3Block;
+struct Mem3Block {
+ union {
+ struct {
+ u32 prevSize; /* Size of previous chunk in Mem3Block elements */
+ u32 size4x; /* 4x the size of current chunk in Mem3Block elements */
+ } hdr;
+ struct {
+ u32 next; /* Index in mem3.aPool[] of next free chunk */
+ u32 prev; /* Index in mem3.aPool[] of previous free chunk */
+ } list;
+ } u;
+};
+
+/*
+** All of the static variables used by this module are collected
+** into a single structure named "mem3". This is to keep the
+** static variables organized and to reduce namespace pollution
+** when this module is combined with other in the amalgamation.
+*/
+static SQLITE_WSD struct Mem3Global {
+ /*
+ ** Memory available for allocation. nPool is the size of the array
+ ** (in Mem3Blocks) pointed to by aPool less 2.
+ */
+ u32 nPool;
+ Mem3Block *aPool;
+
+ /*
+ ** True if we are evaluating an out-of-memory callback.
+ */
+ int alarmBusy;
+
+ /*
+ ** Mutex to control access to the memory allocation subsystem.
+ */
+ sqlite3_mutex *mutex;
+
+ /*
+ ** The minimum amount of free space that we have seen.
+ */
+ u32 mnKeyBlk;
+
+ /*
+ ** iKeyBlk is the index of the key chunk. Most new allocations
+ ** occur off of this chunk. szKeyBlk is the size (in Mem3Blocks)
+ ** of the current key chunk. iKeyBlk is 0 if there is no key chunk.
+ ** The key chunk is not in either the aiHash[] or aiSmall[].
+ */
+ u32 iKeyBlk;
+ u32 szKeyBlk;
+
+ /*
+ ** Array of lists of free blocks according to the block size
+ ** for smaller chunks, or a hash on the block size for larger
+ ** chunks.
+ */
+ u32 aiSmall[MX_SMALL-1]; /* For sizes 2 through MX_SMALL, inclusive */
+ u32 aiHash[N_HASH]; /* For sizes MX_SMALL+1 and larger */
+} mem3 = { 97535575 };
+
+#define mem3 GLOBAL(struct Mem3Global, mem3)
+
+/*
+** Unlink the chunk at mem3.aPool[i] from list it is currently
+** on. *pRoot is the list that i is a member of.
+*/
+static void memsys3UnlinkFromList(u32 i, u32 *pRoot){
+ u32 next = mem3.aPool[i].u.list.next;
+ u32 prev = mem3.aPool[i].u.list.prev;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ if( prev==0 ){
+ *pRoot = next;
+ }else{
+ mem3.aPool[prev].u.list.next = next;
+ }
+ if( next ){
+ mem3.aPool[next].u.list.prev = prev;
+ }
+ mem3.aPool[i].u.list.next = 0;
+ mem3.aPool[i].u.list.prev = 0;
+}
+
+/*
+** Unlink the chunk at index i from
+** whatever list is currently a member of.
+*/
+static void memsys3Unlink(u32 i){
+ u32 size, hash;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 );
+ assert( i>=1 );
+ size = mem3.aPool[i-1].u.hdr.size4x/4;
+ assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
+ assert( size>=2 );
+ if( size <= MX_SMALL ){
+ memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]);
+ }else{
+ hash = size % N_HASH;
+ memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
+ }
+}
+
+/*
+** Link the chunk at mem3.aPool[i] so that is on the list rooted
+** at *pRoot.
+*/
+static void memsys3LinkIntoList(u32 i, u32 *pRoot){
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ mem3.aPool[i].u.list.next = *pRoot;
+ mem3.aPool[i].u.list.prev = 0;
+ if( *pRoot ){
+ mem3.aPool[*pRoot].u.list.prev = i;
+ }
+ *pRoot = i;
+}
+
+/*
+** Link the chunk at index i into either the appropriate
+** small chunk list, or into the large chunk hash table.
+*/
+static void memsys3Link(u32 i){
+ u32 size, hash;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( i>=1 );
+ assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 );
+ size = mem3.aPool[i-1].u.hdr.size4x/4;
+ assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
+ assert( size>=2 );
+ if( size <= MX_SMALL ){
+ memsys3LinkIntoList(i, &mem3.aiSmall[size-2]);
+ }else{
+ hash = size % N_HASH;
+ memsys3LinkIntoList(i, &mem3.aiHash[hash]);
+ }
+}
+
+/*
+** If the STATIC_MEM mutex is not already held, obtain it now. The mutex
+** will already be held (obtained by code in malloc.c) if
+** sqlite3GlobalConfig.bMemStat is true.
+*/
+static void memsys3Enter(void){
+ if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){
+ mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
+ }
+ sqlite3_mutex_enter(mem3.mutex);
+}
+static void memsys3Leave(void){
+ sqlite3_mutex_leave(mem3.mutex);
+}
+
+/*
+** Called when we are unable to satisfy an allocation of nBytes.
+*/
+static void memsys3OutOfMemory(int nByte){
+ if( !mem3.alarmBusy ){
+ mem3.alarmBusy = 1;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ sqlite3_mutex_leave(mem3.mutex);
+ sqlite3_release_memory(nByte);
+ sqlite3_mutex_enter(mem3.mutex);
+ mem3.alarmBusy = 0;
+ }
+}
+
+
+/*
+** Chunk i is a free chunk that has been unlinked. Adjust its
+** size parameters for check-out and return a pointer to the
+** user portion of the chunk.
+*/
+static void *memsys3Checkout(u32 i, u32 nBlock){
+ u32 x;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( i>=1 );
+ assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock );
+ assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock );
+ x = mem3.aPool[i-1].u.hdr.size4x;
+ mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2);
+ mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock;
+ mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2;
+ return &mem3.aPool[i];
+}
+
+/*
+** Carve a piece off of the end of the mem3.iKeyBlk free chunk.
+** Return a pointer to the new allocation. Or, if the key chunk
+** is not large enough, return 0.
+*/
+static void *memsys3FromKeyBlk(u32 nBlock){
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( mem3.szKeyBlk>=nBlock );
+ if( nBlock>=mem3.szKeyBlk-1 ){
+ /* Use the entire key chunk */
+ void *p = memsys3Checkout(mem3.iKeyBlk, mem3.szKeyBlk);
+ mem3.iKeyBlk = 0;
+ mem3.szKeyBlk = 0;
+ mem3.mnKeyBlk = 0;
+ return p;
+ }else{
+ /* Split the key block. Return the tail. */
+ u32 newi, x;
+ newi = mem3.iKeyBlk + mem3.szKeyBlk - nBlock;
+ assert( newi > mem3.iKeyBlk+1 );
+ mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.prevSize = nBlock;
+ mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.size4x |= 2;
+ mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1;
+ mem3.szKeyBlk -= nBlock;
+ mem3.aPool[newi-1].u.hdr.prevSize = mem3.szKeyBlk;
+ x = mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x & 2;
+ mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x = mem3.szKeyBlk*4 | x;
+ if( mem3.szKeyBlk < mem3.mnKeyBlk ){
+ mem3.mnKeyBlk = mem3.szKeyBlk;
+ }
+ return (void*)&mem3.aPool[newi];
+ }
+}
+
+/*
+** *pRoot is the head of a list of free chunks of the same size
+** or same size hash. In other words, *pRoot is an entry in either
+** mem3.aiSmall[] or mem3.aiHash[].
+**
+** This routine examines all entries on the given list and tries
+** to coalesce each entries with adjacent free chunks.
+**
+** If it sees a chunk that is larger than mem3.iKeyBlk, it replaces
+** the current mem3.iKeyBlk with the new larger chunk. In order for
+** this mem3.iKeyBlk replacement to work, the key chunk must be
+** linked into the hash tables. That is not the normal state of
+** affairs, of course. The calling routine must link the key
+** chunk before invoking this routine, then must unlink the (possibly
+** changed) key chunk once this routine has finished.
+*/
+static void memsys3Merge(u32 *pRoot){
+ u32 iNext, prev, size, i, x;
+
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ for(i=*pRoot; i>0; i=iNext){
+ iNext = mem3.aPool[i].u.list.next;
+ size = mem3.aPool[i-1].u.hdr.size4x;
+ assert( (size&1)==0 );
+ if( (size&2)==0 ){
+ memsys3UnlinkFromList(i, pRoot);
+ assert( i > mem3.aPool[i-1].u.hdr.prevSize );
+ prev = i - mem3.aPool[i-1].u.hdr.prevSize;
+ if( prev==iNext ){
+ iNext = mem3.aPool[prev].u.list.next;
+ }
+ memsys3Unlink(prev);
+ size = i + size/4 - prev;
+ x = mem3.aPool[prev-1].u.hdr.size4x & 2;
+ mem3.aPool[prev-1].u.hdr.size4x = size*4 | x;
+ mem3.aPool[prev+size-1].u.hdr.prevSize = size;
+ memsys3Link(prev);
+ i = prev;
+ }else{
+ size /= 4;
+ }
+ if( size>mem3.szKeyBlk ){
+ mem3.iKeyBlk = i;
+ mem3.szKeyBlk = size;
+ }
+ }
+}
+
+/*
+** Return a block of memory of at least nBytes in size.
+** Return NULL if unable.
+**
+** This function assumes that the necessary mutexes, if any, are
+** already held by the caller. Hence "Unsafe".
+*/
+static void *memsys3MallocUnsafe(int nByte){
+ u32 i;
+ u32 nBlock;
+ u32 toFree;
+
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( sizeof(Mem3Block)==8 );
+ if( nByte<=12 ){
+ nBlock = 2;
+ }else{
+ nBlock = (nByte + 11)/8;
+ }
+ assert( nBlock>=2 );
+
+ /* STEP 1:
+ ** Look for an entry of the correct size in either the small
+ ** chunk table or in the large chunk hash table. This is
+ ** successful most of the time (about 9 times out of 10).
+ */
+ if( nBlock <= MX_SMALL ){
+ i = mem3.aiSmall[nBlock-2];
+ if( i>0 ){
+ memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]);
+ return memsys3Checkout(i, nBlock);
+ }
+ }else{
+ int hash = nBlock % N_HASH;
+ for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){
+ if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){
+ memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
+ return memsys3Checkout(i, nBlock);
+ }
+ }
+ }
+
+ /* STEP 2:
+ ** Try to satisfy the allocation by carving a piece off of the end
+ ** of the key chunk. This step usually works if step 1 fails.
+ */
+ if( mem3.szKeyBlk>=nBlock ){
+ return memsys3FromKeyBlk(nBlock);
+ }
+
+
+ /* STEP 3:
+ ** Loop through the entire memory pool. Coalesce adjacent free
+ ** chunks. Recompute the key chunk as the largest free chunk.
+ ** Then try again to satisfy the allocation by carving a piece off
+ ** of the end of the key chunk. This step happens very
+ ** rarely (we hope!)
+ */
+ for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){
+ memsys3OutOfMemory(toFree);
+ if( mem3.iKeyBlk ){
+ memsys3Link(mem3.iKeyBlk);
+ mem3.iKeyBlk = 0;
+ mem3.szKeyBlk = 0;
+ }
+ for(i=0; i<N_HASH; i++){
+ memsys3Merge(&mem3.aiHash[i]);
+ }
+ for(i=0; i<MX_SMALL-1; i++){
+ memsys3Merge(&mem3.aiSmall[i]);
+ }
+ if( mem3.szKeyBlk ){
+ memsys3Unlink(mem3.iKeyBlk);
+ if( mem3.szKeyBlk>=nBlock ){
+ return memsys3FromKeyBlk(nBlock);
+ }
+ }
+ }
+
+ /* If none of the above worked, then we fail. */
+ return 0;
+}
+
+/*
+** Free an outstanding memory allocation.
+**
+** This function assumes that the necessary mutexes, if any, are
+** already held by the caller. Hence "Unsafe".
+*/
+static void memsys3FreeUnsafe(void *pOld){
+ Mem3Block *p = (Mem3Block*)pOld;
+ int i;
+ u32 size, x;
+ assert( sqlite3_mutex_held(mem3.mutex) );
+ assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] );
+ i = p - mem3.aPool;
+ assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 );
+ size = mem3.aPool[i-1].u.hdr.size4x/4;
+ assert( i+size<=mem3.nPool+1 );
+ mem3.aPool[i-1].u.hdr.size4x &= ~1;
+ mem3.aPool[i+size-1].u.hdr.prevSize = size;
+ mem3.aPool[i+size-1].u.hdr.size4x &= ~2;
+ memsys3Link(i);
+
+ /* Try to expand the key using the newly freed chunk */
+ if( mem3.iKeyBlk ){
+ while( (mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x&2)==0 ){
+ size = mem3.aPool[mem3.iKeyBlk-1].u.hdr.prevSize;
+ mem3.iKeyBlk -= size;
+ mem3.szKeyBlk += size;
+ memsys3Unlink(mem3.iKeyBlk);
+ x = mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x & 2;
+ mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x = mem3.szKeyBlk*4 | x;
+ mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.prevSize = mem3.szKeyBlk;
+ }
+ x = mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x & 2;
+ while( (mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.size4x&1)==0 ){
+ memsys3Unlink(mem3.iKeyBlk+mem3.szKeyBlk);
+ mem3.szKeyBlk += mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.size4x/4;
+ mem3.aPool[mem3.iKeyBlk-1].u.hdr.size4x = mem3.szKeyBlk*4 | x;
+ mem3.aPool[mem3.iKeyBlk+mem3.szKeyBlk-1].u.hdr.prevSize = mem3.szKeyBlk;
+ }
+ }
+}
+
+/*
+** Return the size of an outstanding allocation, in bytes. The
+** size returned omits the 8-byte header overhead. This only
+** works for chunks that are currently checked out.
+*/
+static int memsys3Size(void *p){
+ Mem3Block *pBlock;
+ assert( p!=0 );
+ pBlock = (Mem3Block*)p;
+ assert( (pBlock[-1].u.hdr.size4x&1)!=0 );
+ return (pBlock[-1].u.hdr.size4x&~3)*2 - 4;
+}
+
+/*
+** Round up a request size to the next valid allocation size.
+*/
+static int memsys3Roundup(int n){
+ if( n<=12 ){
+ return 12;
+ }else{
+ return ((n+11)&~7) - 4;
+ }
+}
+
+/*
+** Allocate nBytes of memory.
+*/
+static void *memsys3Malloc(int nBytes){
+ sqlite3_int64 *p;
+ assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */
+ memsys3Enter();
+ p = memsys3MallocUnsafe(nBytes);
+ memsys3Leave();
+ return (void*)p;
+}
+
+/*
+** Free memory.
+*/
+static void memsys3Free(void *pPrior){
+ assert( pPrior );
+ memsys3Enter();
+ memsys3FreeUnsafe(pPrior);
+ memsys3Leave();
+}
+
+/*
+** Change the size of an existing memory allocation
+*/
+static void *memsys3Realloc(void *pPrior, int nBytes){
+ int nOld;
+ void *p;
+ if( pPrior==0 ){
+ return sqlite3_malloc(nBytes);
+ }
+ if( nBytes<=0 ){
+ sqlite3_free(pPrior);
+ return 0;
+ }
+ nOld = memsys3Size(pPrior);
+ if( nBytes<=nOld && nBytes>=nOld-128 ){
+ return pPrior;
+ }
+ memsys3Enter();
+ p = memsys3MallocUnsafe(nBytes);
+ if( p ){
+ if( nOld<nBytes ){
+ memcpy(p, pPrior, nOld);
+ }else{
+ memcpy(p, pPrior, nBytes);
+ }
+ memsys3FreeUnsafe(pPrior);
+ }
+ memsys3Leave();
+ return p;
+}
+
+/*
+** Initialize this module.
+*/
+static int memsys3Init(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ if( !sqlite3GlobalConfig.pHeap ){
+ return SQLITE_ERROR;
+ }
+
+ /* Store a pointer to the memory block in global structure mem3. */
+ assert( sizeof(Mem3Block)==8 );
+ mem3.aPool = (Mem3Block *)sqlite3GlobalConfig.pHeap;
+ mem3.nPool = (sqlite3GlobalConfig.nHeap / sizeof(Mem3Block)) - 2;
+
+ /* Initialize the key block. */
+ mem3.szKeyBlk = mem3.nPool;
+ mem3.mnKeyBlk = mem3.szKeyBlk;
+ mem3.iKeyBlk = 1;
+ mem3.aPool[0].u.hdr.size4x = (mem3.szKeyBlk<<2) + 2;
+ mem3.aPool[mem3.nPool].u.hdr.prevSize = mem3.nPool;
+ mem3.aPool[mem3.nPool].u.hdr.size4x = 1;
+
+ return SQLITE_OK;
+}
+
+/*
+** Deinitialize this module.
+*/
+static void memsys3Shutdown(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ mem3.mutex = 0;
+ return;
+}
+
+
+
+/*
+** Open the file indicated and write a log of all unfreed memory
+** allocations into that log.
+*/
+SQLITE_PRIVATE void sqlite3Memsys3Dump(const char *zFilename){
+#ifdef SQLITE_DEBUG
+ FILE *out;
+ u32 i, j;
+ u32 size;
+ if( zFilename==0 || zFilename[0]==0 ){
+ out = stdout;
+ }else{
+ out = fopen(zFilename, "w");
+ if( out==0 ){
+ fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
+ zFilename);
+ return;
+ }
+ }
+ memsys3Enter();
+ fprintf(out, "CHUNKS:\n");
+ for(i=1; i<=mem3.nPool; i+=size/4){
+ size = mem3.aPool[i-1].u.hdr.size4x;
+ if( size/4<=1 ){
+ fprintf(out, "%p size error\n", &mem3.aPool[i]);
+ assert( 0 );
+ break;
+ }
+ if( (size&1)==0 && mem3.aPool[i+size/4-1].u.hdr.prevSize!=size/4 ){
+ fprintf(out, "%p tail size does not match\n", &mem3.aPool[i]);
+ assert( 0 );
+ break;
+ }
+ if( ((mem3.aPool[i+size/4-1].u.hdr.size4x&2)>>1)!=(size&1) ){
+ fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]);
+ assert( 0 );
+ break;
+ }
+ if( size&1 ){
+ fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8);
+ }else{
+ fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8,
+ i==mem3.iKeyBlk ? " **key**" : "");
+ }
+ }
+ for(i=0; i<MX_SMALL-1; i++){
+ if( mem3.aiSmall[i]==0 ) continue;
+ fprintf(out, "small(%2d):", i);
+ for(j = mem3.aiSmall[i]; j>0; j=mem3.aPool[j].u.list.next){
+ fprintf(out, " %p(%d)", &mem3.aPool[j],
+ (mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
+ }
+ fprintf(out, "\n");
+ }
+ for(i=0; i<N_HASH; i++){
+ if( mem3.aiHash[i]==0 ) continue;
+ fprintf(out, "hash(%2d):", i);
+ for(j = mem3.aiHash[i]; j>0; j=mem3.aPool[j].u.list.next){
+ fprintf(out, " %p(%d)", &mem3.aPool[j],
+ (mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
+ }
+ fprintf(out, "\n");
+ }
+ fprintf(out, "key=%d\n", mem3.iKeyBlk);
+ fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szKeyBlk*8);
+ fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnKeyBlk*8);
+ sqlite3_mutex_leave(mem3.mutex);
+ if( out==stdout ){
+ fflush(stdout);
+ }else{
+ fclose(out);
+ }
+#else
+ UNUSED_PARAMETER(zFilename);
+#endif
+}
+
+/*
+** This routine is the only routine in this file with external
+** linkage.
+**
+** Populate the low-level memory allocation function pointers in
+** sqlite3GlobalConfig.m with pointers to the routines in this file. The
+** arguments specify the block of memory to manage.
+**
+** This routine is only called by sqlite3_config(), and therefore
+** is not required to be threadsafe (it is not).
+*/
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){
+ static const sqlite3_mem_methods mempoolMethods = {
+ memsys3Malloc,
+ memsys3Free,
+ memsys3Realloc,
+ memsys3Size,
+ memsys3Roundup,
+ memsys3Init,
+ memsys3Shutdown,
+ 0
+ };
+ return &mempoolMethods;
+}
+
+#endif /* SQLITE_ENABLE_MEMSYS3 */
+
+/************** End of mem3.c ************************************************/
+/************** Begin file mem5.c ********************************************/
+/*
+** 2007 October 14
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement a memory
+** allocation subsystem for use by SQLite.
+**
+** This version of the memory allocation subsystem omits all
+** use of malloc(). The application gives SQLite a block of memory
+** before calling sqlite3_initialize() from which allocations
+** are made and returned by the xMalloc() and xRealloc()
+** implementations. Once sqlite3_initialize() has been called,
+** the amount of memory available to SQLite is fixed and cannot
+** be changed.
+**
+** This version of the memory allocation subsystem is included
+** in the build only if SQLITE_ENABLE_MEMSYS5 is defined.
+**
+** This memory allocator uses the following algorithm:
+**
+** 1. All memory allocation sizes are rounded up to a power of 2.
+**
+** 2. If two adjacent free blocks are the halves of a larger block,
+** then the two blocks are coalesced into the single larger block.
+**
+** 3. New memory is allocated from the first available free block.
+**
+** This algorithm is described in: J. M. Robson. "Bounds for Some Functions
+** Concerning Dynamic Storage Allocation". Journal of the Association for
+** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499.
+**
+** Let n be the size of the largest allocation divided by the minimum
+** allocation size (after rounding all sizes up to a power of 2.) Let M
+** be the maximum amount of memory ever outstanding at one time. Let
+** N be the total amount of memory available for allocation. Robson
+** proved that this memory allocator will never breakdown due to
+** fragmentation as long as the following constraint holds:
+**
+** N >= M*(1 + log2(n)/2) - n + 1
+**
+** The sqlite3_status() logic tracks the maximum values of n and M so
+** that an application can, at any time, verify this constraint.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** This version of the memory allocator is used only when
+** SQLITE_ENABLE_MEMSYS5 is defined.
+*/
+#ifdef SQLITE_ENABLE_MEMSYS5
+
+/*
+** A minimum allocation is an instance of the following structure.
+** Larger allocations are an array of these structures where the
+** size of the array is a power of 2.
+**
+** The size of this object must be a power of two. That fact is
+** verified in memsys5Init().
+*/
+typedef struct Mem5Link Mem5Link;
+struct Mem5Link {
+ int next; /* Index of next free chunk */
+ int prev; /* Index of previous free chunk */
+};
+
+/*
+** Maximum size of any allocation is ((1<<LOGMAX)*mem5.szAtom). Since
+** mem5.szAtom is always at least 8 and 32-bit integers are used,
+** it is not actually possible to reach this limit.
+*/
+#define LOGMAX 30
+
+/*
+** Masks used for mem5.aCtrl[] elements.
+*/
+#define CTRL_LOGSIZE 0x1f /* Log2 Size of this block */
+#define CTRL_FREE 0x20 /* True if not checked out */
+
+/*
+** All of the static variables used by this module are collected
+** into a single structure named "mem5". This is to keep the
+** static variables organized and to reduce namespace pollution
+** when this module is combined with other in the amalgamation.
+*/
+static SQLITE_WSD struct Mem5Global {
+ /*
+ ** Memory available for allocation
+ */
+ int szAtom; /* Smallest possible allocation in bytes */
+ int nBlock; /* Number of szAtom sized blocks in zPool */
+ u8 *zPool; /* Memory available to be allocated */
+
+ /*
+ ** Mutex to control access to the memory allocation subsystem.
+ */
+ sqlite3_mutex *mutex;
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ /*
+ ** Performance statistics
+ */
+ u64 nAlloc; /* Total number of calls to malloc */
+ u64 totalAlloc; /* Total of all malloc calls - includes internal frag */
+ u64 totalExcess; /* Total internal fragmentation */
+ u32 currentOut; /* Current checkout, including internal fragmentation */
+ u32 currentCount; /* Current number of distinct checkouts */
+ u32 maxOut; /* Maximum instantaneous currentOut */
+ u32 maxCount; /* Maximum instantaneous currentCount */
+ u32 maxRequest; /* Largest allocation (exclusive of internal frag) */
+#endif
+
+ /*
+ ** Lists of free blocks. aiFreelist[0] is a list of free blocks of
+ ** size mem5.szAtom. aiFreelist[1] holds blocks of size szAtom*2.
+ ** aiFreelist[2] holds free blocks of size szAtom*4. And so forth.
+ */
+ int aiFreelist[LOGMAX+1];
+
+ /*
+ ** Space for tracking which blocks are checked out and the size
+ ** of each block. One byte per block.
+ */
+ u8 *aCtrl;
+
+} mem5;
+
+/*
+** Access the static variable through a macro for SQLITE_OMIT_WSD.
+*/
+#define mem5 GLOBAL(struct Mem5Global, mem5)
+
+/*
+** Assuming mem5.zPool is divided up into an array of Mem5Link
+** structures, return a pointer to the idx-th such link.
+*/
+#define MEM5LINK(idx) ((Mem5Link *)(&mem5.zPool[(idx)*mem5.szAtom]))
+
+/*
+** Unlink the chunk at mem5.aPool[i] from list it is currently
+** on. It should be found on mem5.aiFreelist[iLogsize].
+*/
+static void memsys5Unlink(int i, int iLogsize){
+ int next, prev;
+ assert( i>=0 && i<mem5.nBlock );
+ assert( iLogsize>=0 && iLogsize<=LOGMAX );
+ assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize );
+
+ next = MEM5LINK(i)->next;
+ prev = MEM5LINK(i)->prev;
+ if( prev<0 ){
+ mem5.aiFreelist[iLogsize] = next;
+ }else{
+ MEM5LINK(prev)->next = next;
+ }
+ if( next>=0 ){
+ MEM5LINK(next)->prev = prev;
+ }
+}
+
+/*
+** Link the chunk at mem5.aPool[i] so that is on the iLogsize
+** free list.
+*/
+static void memsys5Link(int i, int iLogsize){
+ int x;
+ assert( sqlite3_mutex_held(mem5.mutex) );
+ assert( i>=0 && i<mem5.nBlock );
+ assert( iLogsize>=0 && iLogsize<=LOGMAX );
+ assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize );
+
+ x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize];
+ MEM5LINK(i)->prev = -1;
+ if( x>=0 ){
+ assert( x<mem5.nBlock );
+ MEM5LINK(x)->prev = i;
+ }
+ mem5.aiFreelist[iLogsize] = i;
+}
+
+/*
+** Obtain or release the mutex needed to access global data structures.
+*/
+static void memsys5Enter(void){
+ sqlite3_mutex_enter(mem5.mutex);
+}
+static void memsys5Leave(void){
+ sqlite3_mutex_leave(mem5.mutex);
+}
+
+/*
+** Return the size of an outstanding allocation, in bytes.
+** This only works for chunks that are currently checked out.
+*/
+static int memsys5Size(void *p){
+ int iSize, i;
+ assert( p!=0 );
+ i = (int)(((u8 *)p-mem5.zPool)/mem5.szAtom);
+ assert( i>=0 && i<mem5.nBlock );
+ iSize = mem5.szAtom * (1 << (mem5.aCtrl[i]&CTRL_LOGSIZE));
+ return iSize;
+}
+
+/*
+** Return a block of memory of at least nBytes in size.
+** Return NULL if unable. Return NULL if nBytes==0.
+**
+** The caller guarantees that nByte is positive.
+**
+** The caller has obtained a mutex prior to invoking this
+** routine so there is never any chance that two or more
+** threads can be in this routine at the same time.
+*/
+static void *memsys5MallocUnsafe(int nByte){
+ int i; /* Index of a mem5.aPool[] slot */
+ int iBin; /* Index into mem5.aiFreelist[] */
+ int iFullSz; /* Size of allocation rounded up to power of 2 */
+ int iLogsize; /* Log2 of iFullSz/POW2_MIN */
+
+ /* nByte must be a positive */
+ assert( nByte>0 );
+
+ /* No more than 1GiB per allocation */
+ if( nByte > 0x40000000 ) return 0;
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ /* Keep track of the maximum allocation request. Even unfulfilled
+ ** requests are counted */
+ if( (u32)nByte>mem5.maxRequest ){
+ mem5.maxRequest = nByte;
+ }
+#endif
+
+
+ /* Round nByte up to the next valid power of two */
+ for(iFullSz=mem5.szAtom,iLogsize=0; iFullSz<nByte; iFullSz*=2,iLogsize++){}
+
+ /* Make sure mem5.aiFreelist[iLogsize] contains at least one free
+ ** block. If not, then split a block of the next larger power of
+ ** two in order to create a new free block of size iLogsize.
+ */
+ for(iBin=iLogsize; iBin<=LOGMAX && mem5.aiFreelist[iBin]<0; iBin++){}
+ if( iBin>LOGMAX ){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte);
+ return 0;
+ }
+ i = mem5.aiFreelist[iBin];
+ memsys5Unlink(i, iBin);
+ while( iBin>iLogsize ){
+ int newSize;
+
+ iBin--;
+ newSize = 1 << iBin;
+ mem5.aCtrl[i+newSize] = CTRL_FREE | iBin;
+ memsys5Link(i+newSize, iBin);
+ }
+ mem5.aCtrl[i] = iLogsize;
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ /* Update allocator performance statistics. */
+ mem5.nAlloc++;
+ mem5.totalAlloc += iFullSz;
+ mem5.totalExcess += iFullSz - nByte;
+ mem5.currentCount++;
+ mem5.currentOut += iFullSz;
+ if( mem5.maxCount<mem5.currentCount ) mem5.maxCount = mem5.currentCount;
+ if( mem5.maxOut<mem5.currentOut ) mem5.maxOut = mem5.currentOut;
+#endif
+
+#ifdef SQLITE_DEBUG
+ /* Make sure the allocated memory does not assume that it is set to zero
+ ** or retains a value from a previous allocation */
+ memset(&mem5.zPool[i*mem5.szAtom], 0xAA, iFullSz);
+#endif
+
+ /* Return a pointer to the allocated memory. */
+ return (void*)&mem5.zPool[i*mem5.szAtom];
+}
+
+/*
+** Free an outstanding memory allocation.
+*/
+static void memsys5FreeUnsafe(void *pOld){
+ u32 size, iLogsize;
+ int iBlock;
+
+ /* Set iBlock to the index of the block pointed to by pOld in
+ ** the array of mem5.szAtom byte blocks pointed to by mem5.zPool.
+ */
+ iBlock = (int)(((u8 *)pOld-mem5.zPool)/mem5.szAtom);
+
+ /* Check that the pointer pOld points to a valid, non-free block. */
+ assert( iBlock>=0 && iBlock<mem5.nBlock );
+ assert( ((u8 *)pOld-mem5.zPool)%mem5.szAtom==0 );
+ assert( (mem5.aCtrl[iBlock] & CTRL_FREE)==0 );
+
+ iLogsize = mem5.aCtrl[iBlock] & CTRL_LOGSIZE;
+ size = 1<<iLogsize;
+ assert( iBlock+size-1<(u32)mem5.nBlock );
+
+ mem5.aCtrl[iBlock] |= CTRL_FREE;
+ mem5.aCtrl[iBlock+size-1] |= CTRL_FREE;
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ assert( mem5.currentCount>0 );
+ assert( mem5.currentOut>=(size*mem5.szAtom) );
+ mem5.currentCount--;
+ mem5.currentOut -= size*mem5.szAtom;
+ assert( mem5.currentOut>0 || mem5.currentCount==0 );
+ assert( mem5.currentCount>0 || mem5.currentOut==0 );
+#endif
+
+ mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize;
+ while( ALWAYS(iLogsize<LOGMAX) ){
+ int iBuddy;
+ if( (iBlock>>iLogsize) & 1 ){
+ iBuddy = iBlock - size;
+ assert( iBuddy>=0 );
+ }else{
+ iBuddy = iBlock + size;
+ if( iBuddy>=mem5.nBlock ) break;
+ }
+ if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break;
+ memsys5Unlink(iBuddy, iLogsize);
+ iLogsize++;
+ if( iBuddy<iBlock ){
+ mem5.aCtrl[iBuddy] = CTRL_FREE | iLogsize;
+ mem5.aCtrl[iBlock] = 0;
+ iBlock = iBuddy;
+ }else{
+ mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize;
+ mem5.aCtrl[iBuddy] = 0;
+ }
+ size *= 2;
+ }
+
+#ifdef SQLITE_DEBUG
+ /* Overwrite freed memory with the 0x55 bit pattern to verify that it is
+ ** not used after being freed */
+ memset(&mem5.zPool[iBlock*mem5.szAtom], 0x55, size);
+#endif
+
+ memsys5Link(iBlock, iLogsize);
+}
+
+/*
+** Allocate nBytes of memory.
+*/
+static void *memsys5Malloc(int nBytes){
+ sqlite3_int64 *p = 0;
+ if( nBytes>0 ){
+ memsys5Enter();
+ p = memsys5MallocUnsafe(nBytes);
+ memsys5Leave();
+ }
+ return (void*)p;
+}
+
+/*
+** Free memory.
+**
+** The outer layer memory allocator prevents this routine from
+** being called with pPrior==0.
+*/
+static void memsys5Free(void *pPrior){
+ assert( pPrior!=0 );
+ memsys5Enter();
+ memsys5FreeUnsafe(pPrior);
+ memsys5Leave();
+}
+
+/*
+** Change the size of an existing memory allocation.
+**
+** The outer layer memory allocator prevents this routine from
+** being called with pPrior==0.
+**
+** nBytes is always a value obtained from a prior call to
+** memsys5Round(). Hence nBytes is always a non-negative power
+** of two. If nBytes==0 that means that an oversize allocation
+** (an allocation larger than 0x40000000) was requested and this
+** routine should return 0 without freeing pPrior.
+*/
+static void *memsys5Realloc(void *pPrior, int nBytes){
+ int nOld;
+ void *p;
+ assert( pPrior!=0 );
+ assert( (nBytes&(nBytes-1))==0 ); /* EV: R-46199-30249 */
+ assert( nBytes>=0 );
+ if( nBytes==0 ){
+ return 0;
+ }
+ nOld = memsys5Size(pPrior);
+ if( nBytes<=nOld ){
+ return pPrior;
+ }
+ p = memsys5Malloc(nBytes);
+ if( p ){
+ memcpy(p, pPrior, nOld);
+ memsys5Free(pPrior);
+ }
+ return p;
+}
+
+/*
+** Round up a request size to the next valid allocation size. If
+** the allocation is too large to be handled by this allocation system,
+** return 0.
+**
+** All allocations must be a power of two and must be expressed by a
+** 32-bit signed integer. Hence the largest allocation is 0x40000000
+** or 1073741824 bytes.
+*/
+static int memsys5Roundup(int n){
+ int iFullSz;
+ if( n > 0x40000000 ) return 0;
+ for(iFullSz=mem5.szAtom; iFullSz<n; iFullSz *= 2);
+ return iFullSz;
+}
+
+/*
+** Return the ceiling of the logarithm base 2 of iValue.
+**
+** Examples: memsys5Log(1) -> 0
+** memsys5Log(2) -> 1
+** memsys5Log(4) -> 2
+** memsys5Log(5) -> 3
+** memsys5Log(8) -> 3
+** memsys5Log(9) -> 4
+*/
+static int memsys5Log(int iValue){
+ int iLog;
+ for(iLog=0; (iLog<(int)((sizeof(int)*8)-1)) && (1<<iLog)<iValue; iLog++);
+ return iLog;
+}
+
+/*
+** Initialize the memory allocator.
+**
+** This routine is not threadsafe. The caller must be holding a mutex
+** to prevent multiple threads from entering at the same time.
+*/
+static int memsys5Init(void *NotUsed){
+ int ii; /* Loop counter */
+ int nByte; /* Number of bytes of memory available to this allocator */
+ u8 *zByte; /* Memory usable by this allocator */
+ int nMinLog; /* Log base 2 of minimum allocation size in bytes */
+ int iOffset; /* An offset into mem5.aCtrl[] */
+
+ UNUSED_PARAMETER(NotUsed);
+
+ /* For the purposes of this routine, disable the mutex */
+ mem5.mutex = 0;
+
+ /* The size of a Mem5Link object must be a power of two. Verify that
+ ** this is case.
+ */
+ assert( (sizeof(Mem5Link)&(sizeof(Mem5Link)-1))==0 );
+
+ nByte = sqlite3GlobalConfig.nHeap;
+ zByte = (u8*)sqlite3GlobalConfig.pHeap;
+ assert( zByte!=0 ); /* sqlite3_config() does not allow otherwise */
+
+ /* boundaries on sqlite3GlobalConfig.mnReq are enforced in sqlite3_config() */
+ nMinLog = memsys5Log(sqlite3GlobalConfig.mnReq);
+ mem5.szAtom = (1<<nMinLog);
+ while( (int)sizeof(Mem5Link)>mem5.szAtom ){
+ mem5.szAtom = mem5.szAtom << 1;
+ }
+
+ mem5.nBlock = (nByte / (mem5.szAtom+sizeof(u8)));
+ mem5.zPool = zByte;
+ mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.szAtom];
+
+ for(ii=0; ii<=LOGMAX; ii++){
+ mem5.aiFreelist[ii] = -1;
+ }
+
+ iOffset = 0;
+ for(ii=LOGMAX; ii>=0; ii--){
+ int nAlloc = (1<<ii);
+ if( (iOffset+nAlloc)<=mem5.nBlock ){
+ mem5.aCtrl[iOffset] = ii | CTRL_FREE;
+ memsys5Link(iOffset, ii);
+ iOffset += nAlloc;
+ }
+ assert((iOffset+nAlloc)>mem5.nBlock);
+ }
+
+ /* If a mutex is required for normal operation, allocate one */
+ if( sqlite3GlobalConfig.bMemstat==0 ){
+ mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Deinitialize this module.
+*/
+static void memsys5Shutdown(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ mem5.mutex = 0;
+ return;
+}
+
+#ifdef SQLITE_TEST
+/*
+** Open the file indicated and write a log of all unfreed memory
+** allocations into that log.
+*/
+SQLITE_PRIVATE void sqlite3Memsys5Dump(const char *zFilename){
+ FILE *out;
+ int i, j, n;
+ int nMinLog;
+
+ if( zFilename==0 || zFilename[0]==0 ){
+ out = stdout;
+ }else{
+ out = fopen(zFilename, "w");
+ if( out==0 ){
+ fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
+ zFilename);
+ return;
+ }
+ }
+ memsys5Enter();
+ nMinLog = memsys5Log(mem5.szAtom);
+ for(i=0; i<=LOGMAX && i+nMinLog<32; i++){
+ for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){}
+ fprintf(out, "freelist items of size %d: %d\n", mem5.szAtom << i, n);
+ }
+ fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc);
+ fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc);
+ fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess);
+ fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut);
+ fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount);
+ fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut);
+ fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount);
+ fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest);
+ memsys5Leave();
+ if( out==stdout ){
+ fflush(stdout);
+ }else{
+ fclose(out);
+ }
+}
+#endif
+
+/*
+** This routine is the only routine in this file with external
+** linkage. It returns a pointer to a static sqlite3_mem_methods
+** struct populated with the memsys5 methods.
+*/
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){
+ static const sqlite3_mem_methods memsys5Methods = {
+ memsys5Malloc,
+ memsys5Free,
+ memsys5Realloc,
+ memsys5Size,
+ memsys5Roundup,
+ memsys5Init,
+ memsys5Shutdown,
+ 0
+ };
+ return &memsys5Methods;
+}
+
+#endif /* SQLITE_ENABLE_MEMSYS5 */
+
+/************** End of mem5.c ************************************************/
+/************** Begin file mutex.c *******************************************/
+/*
+** 2007 August 14
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement mutexes.
+**
+** This file contains code that is common across all mutex implementations.
+*/
+/* #include "sqliteInt.h" */
+
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT)
+/*
+** For debugging purposes, record when the mutex subsystem is initialized
+** and uninitialized so that we can assert() if there is an attempt to
+** allocate a mutex while the system is uninitialized.
+*/
+static SQLITE_WSD int mutexIsInit = 0;
+#endif /* SQLITE_DEBUG && !defined(SQLITE_MUTEX_OMIT) */
+
+
+#ifndef SQLITE_MUTEX_OMIT
+
+#ifdef SQLITE_ENABLE_MULTITHREADED_CHECKS
+/*
+** This block (enclosed by SQLITE_ENABLE_MULTITHREADED_CHECKS) contains
+** the implementation of a wrapper around the system default mutex
+** implementation (sqlite3DefaultMutex()).
+**
+** Most calls are passed directly through to the underlying default
+** mutex implementation. Except, if a mutex is configured by calling
+** sqlite3MutexWarnOnContention() on it, then if contention is ever
+** encountered within xMutexEnter() a warning is emitted via sqlite3_log().
+**
+** This type of mutex is used as the database handle mutex when testing
+** apps that usually use SQLITE_CONFIG_MULTITHREAD mode.
+*/
+
+/*
+** Type for all mutexes used when SQLITE_ENABLE_MULTITHREADED_CHECKS
+** is defined. Variable CheckMutex.mutex is a pointer to the real mutex
+** allocated by the system mutex implementation. Variable iType is usually set
+** to the type of mutex requested - SQLITE_MUTEX_RECURSIVE, SQLITE_MUTEX_FAST
+** or one of the static mutex identifiers. Or, if this is a recursive mutex
+** that has been configured using sqlite3MutexWarnOnContention(), it is
+** set to SQLITE_MUTEX_WARNONCONTENTION.
+*/
+typedef struct CheckMutex CheckMutex;
+struct CheckMutex {
+ int iType;
+ sqlite3_mutex *mutex;
+};
+
+#define SQLITE_MUTEX_WARNONCONTENTION (-1)
+
+/*
+** Pointer to real mutex methods object used by the CheckMutex
+** implementation. Set by checkMutexInit().
+*/
+static SQLITE_WSD const sqlite3_mutex_methods *pGlobalMutexMethods;
+
+#ifdef SQLITE_DEBUG
+static int checkMutexHeld(sqlite3_mutex *p){
+ return pGlobalMutexMethods->xMutexHeld(((CheckMutex*)p)->mutex);
+}
+static int checkMutexNotheld(sqlite3_mutex *p){
+ return pGlobalMutexMethods->xMutexNotheld(((CheckMutex*)p)->mutex);
+}
+#endif
+
+/*
+** Initialize and deinitialize the mutex subsystem.
+*/
+static int checkMutexInit(void){
+ pGlobalMutexMethods = sqlite3DefaultMutex();
+ return SQLITE_OK;
+}
+static int checkMutexEnd(void){
+ pGlobalMutexMethods = 0;
+ return SQLITE_OK;
+}
+
+/*
+** Allocate a mutex.
+*/
+static sqlite3_mutex *checkMutexAlloc(int iType){
+ static CheckMutex staticMutexes[] = {
+ {2, 0}, {3, 0}, {4, 0}, {5, 0},
+ {6, 0}, {7, 0}, {8, 0}, {9, 0},
+ {10, 0}, {11, 0}, {12, 0}, {13, 0}
+ };
+ CheckMutex *p = 0;
+
+ assert( SQLITE_MUTEX_RECURSIVE==1 && SQLITE_MUTEX_FAST==0 );
+ if( iType<2 ){
+ p = sqlite3MallocZero(sizeof(CheckMutex));
+ if( p==0 ) return 0;
+ p->iType = iType;
+ }else{
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( iType-2>=ArraySize(staticMutexes) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ p = &staticMutexes[iType-2];
+ }
+
+ if( p->mutex==0 ){
+ p->mutex = pGlobalMutexMethods->xMutexAlloc(iType);
+ if( p->mutex==0 ){
+ if( iType<2 ){
+ sqlite3_free(p);
+ }
+ p = 0;
+ }
+ }
+
+ return (sqlite3_mutex*)p;
+}
+
+/*
+** Free a mutex.
+*/
+static void checkMutexFree(sqlite3_mutex *p){
+ assert( SQLITE_MUTEX_RECURSIVE<2 );
+ assert( SQLITE_MUTEX_FAST<2 );
+ assert( SQLITE_MUTEX_WARNONCONTENTION<2 );
+
+#if SQLITE_ENABLE_API_ARMOR
+ if( ((CheckMutex*)p)->iType<2 )
+#endif
+ {
+ CheckMutex *pCheck = (CheckMutex*)p;
+ pGlobalMutexMethods->xMutexFree(pCheck->mutex);
+ sqlite3_free(pCheck);
+ }
+#ifdef SQLITE_ENABLE_API_ARMOR
+ else{
+ (void)SQLITE_MISUSE_BKPT;
+ }
+#endif
+}
+
+/*
+** Enter the mutex.
+*/
+static void checkMutexEnter(sqlite3_mutex *p){
+ CheckMutex *pCheck = (CheckMutex*)p;
+ if( pCheck->iType==SQLITE_MUTEX_WARNONCONTENTION ){
+ if( SQLITE_OK==pGlobalMutexMethods->xMutexTry(pCheck->mutex) ){
+ return;
+ }
+ sqlite3_log(SQLITE_MISUSE,
+ "illegal multi-threaded access to database connection"
+ );
+ }
+ pGlobalMutexMethods->xMutexEnter(pCheck->mutex);
+}
+
+/*
+** Enter the mutex (do not block).
+*/
+static int checkMutexTry(sqlite3_mutex *p){
+ CheckMutex *pCheck = (CheckMutex*)p;
+ return pGlobalMutexMethods->xMutexTry(pCheck->mutex);
+}
+
+/*
+** Leave the mutex.
+*/
+static void checkMutexLeave(sqlite3_mutex *p){
+ CheckMutex *pCheck = (CheckMutex*)p;
+ pGlobalMutexMethods->xMutexLeave(pCheck->mutex);
+}
+
+sqlite3_mutex_methods const *multiThreadedCheckMutex(void){
+ static const sqlite3_mutex_methods sMutex = {
+ checkMutexInit,
+ checkMutexEnd,
+ checkMutexAlloc,
+ checkMutexFree,
+ checkMutexEnter,
+ checkMutexTry,
+ checkMutexLeave,
+#ifdef SQLITE_DEBUG
+ checkMutexHeld,
+ checkMutexNotheld
+#else
+ 0,
+ 0
+#endif
+ };
+ return &sMutex;
+}
+
+/*
+** Mark the SQLITE_MUTEX_RECURSIVE mutex passed as the only argument as
+** one on which there should be no contention.
+*/
+SQLITE_PRIVATE void sqlite3MutexWarnOnContention(sqlite3_mutex *p){
+ if( sqlite3GlobalConfig.mutex.xMutexAlloc==checkMutexAlloc ){
+ CheckMutex *pCheck = (CheckMutex*)p;
+ assert( pCheck->iType==SQLITE_MUTEX_RECURSIVE );
+ pCheck->iType = SQLITE_MUTEX_WARNONCONTENTION;
+ }
+}
+#endif /* ifdef SQLITE_ENABLE_MULTITHREADED_CHECKS */
+
+/*
+** Initialize the mutex system.
+*/
+SQLITE_PRIVATE int sqlite3MutexInit(void){
+ int rc = SQLITE_OK;
+ if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){
+ /* If the xMutexAlloc method has not been set, then the user did not
+ ** install a mutex implementation via sqlite3_config() prior to
+ ** sqlite3_initialize() being called. This block copies pointers to
+ ** the default implementation into the sqlite3GlobalConfig structure.
+ */
+ sqlite3_mutex_methods const *pFrom;
+ sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex;
+
+ if( sqlite3GlobalConfig.bCoreMutex ){
+#ifdef SQLITE_ENABLE_MULTITHREADED_CHECKS
+ pFrom = multiThreadedCheckMutex();
+#else
+ pFrom = sqlite3DefaultMutex();
+#endif
+ }else{
+ pFrom = sqlite3NoopMutex();
+ }
+ pTo->xMutexInit = pFrom->xMutexInit;
+ pTo->xMutexEnd = pFrom->xMutexEnd;
+ pTo->xMutexFree = pFrom->xMutexFree;
+ pTo->xMutexEnter = pFrom->xMutexEnter;
+ pTo->xMutexTry = pFrom->xMutexTry;
+ pTo->xMutexLeave = pFrom->xMutexLeave;
+ pTo->xMutexHeld = pFrom->xMutexHeld;
+ pTo->xMutexNotheld = pFrom->xMutexNotheld;
+ sqlite3MemoryBarrier();
+ pTo->xMutexAlloc = pFrom->xMutexAlloc;
+ }
+ assert( sqlite3GlobalConfig.mutex.xMutexInit );
+ rc = sqlite3GlobalConfig.mutex.xMutexInit();
+
+#ifdef SQLITE_DEBUG
+ GLOBAL(int, mutexIsInit) = 1;
+#endif
+
+ sqlite3MemoryBarrier();
+ return rc;
+}
+
+/*
+** Shutdown the mutex system. This call frees resources allocated by
+** sqlite3MutexInit().
+*/
+SQLITE_PRIVATE int sqlite3MutexEnd(void){
+ int rc = SQLITE_OK;
+ if( sqlite3GlobalConfig.mutex.xMutexEnd ){
+ rc = sqlite3GlobalConfig.mutex.xMutexEnd();
+ }
+
+#ifdef SQLITE_DEBUG
+ GLOBAL(int, mutexIsInit) = 0;
+#endif
+
+ return rc;
+}
+
+/*
+** Retrieve a pointer to a static mutex or allocate a new dynamic one.
+*/
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int id){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0;
+ if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0;
+#endif
+ assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
+ return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
+}
+
+SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){
+ if( !sqlite3GlobalConfig.bCoreMutex ){
+ return 0;
+ }
+ assert( GLOBAL(int, mutexIsInit) );
+ assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
+ return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
+}
+
+/*
+** Free a dynamic mutex.
+*/
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex *p){
+ if( p ){
+ assert( sqlite3GlobalConfig.mutex.xMutexFree );
+ sqlite3GlobalConfig.mutex.xMutexFree(p);
+ }
+}
+
+/*
+** Obtain the mutex p. If some other thread already has the mutex, block
+** until it can be obtained.
+*/
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex *p){
+ if( p ){
+ assert( sqlite3GlobalConfig.mutex.xMutexEnter );
+ sqlite3GlobalConfig.mutex.xMutexEnter(p);
+ }
+}
+
+/*
+** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another
+** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY.
+*/
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex *p){
+ int rc = SQLITE_OK;
+ if( p ){
+ assert( sqlite3GlobalConfig.mutex.xMutexTry );
+ return sqlite3GlobalConfig.mutex.xMutexTry(p);
+ }
+ return rc;
+}
+
+/*
+** The sqlite3_mutex_leave() routine exits a mutex that was previously
+** entered by the same thread. The behavior is undefined if the mutex
+** is not currently entered. If a NULL pointer is passed as an argument
+** this function is a no-op.
+*/
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){
+ if( p ){
+ assert( sqlite3GlobalConfig.mutex.xMutexLeave );
+ sqlite3GlobalConfig.mutex.xMutexLeave(p);
+ }
+}
+
+#ifndef NDEBUG
+/*
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
+** intended for use inside assert() statements.
+*/
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){
+ assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld );
+ return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p);
+}
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){
+ assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld );
+ return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p);
+}
+#endif
+
+#endif /* !defined(SQLITE_MUTEX_OMIT) */
+
+/************** End of mutex.c ***********************************************/
+/************** Begin file mutex_noop.c **************************************/
+/*
+** 2008 October 07
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement mutexes.
+**
+** This implementation in this file does not provide any mutual
+** exclusion and is thus suitable for use only in applications
+** that use SQLite in a single thread. The routines defined
+** here are place-holders. Applications can substitute working
+** mutex routines at start-time using the
+**
+** sqlite3_config(SQLITE_CONFIG_MUTEX,...)
+**
+** interface.
+**
+** If compiled with SQLITE_DEBUG, then additional logic is inserted
+** that does error checking on mutexes to make sure they are being
+** called correctly.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_MUTEX_OMIT
+
+#ifndef SQLITE_DEBUG
+/*
+** Stub routines for all mutex methods.
+**
+** This routines provide no mutual exclusion or error checking.
+*/
+static int noopMutexInit(void){ return SQLITE_OK; }
+static int noopMutexEnd(void){ return SQLITE_OK; }
+static sqlite3_mutex *noopMutexAlloc(int id){
+ UNUSED_PARAMETER(id);
+ return (sqlite3_mutex*)8;
+}
+static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
+static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
+static int noopMutexTry(sqlite3_mutex *p){
+ UNUSED_PARAMETER(p);
+ return SQLITE_OK;
+}
+static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
+
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){
+ static const sqlite3_mutex_methods sMutex = {
+ noopMutexInit,
+ noopMutexEnd,
+ noopMutexAlloc,
+ noopMutexFree,
+ noopMutexEnter,
+ noopMutexTry,
+ noopMutexLeave,
+
+ 0,
+ 0,
+ };
+
+ return &sMutex;
+}
+#endif /* !SQLITE_DEBUG */
+
+#ifdef SQLITE_DEBUG
+/*
+** In this implementation, error checking is provided for testing
+** and debugging purposes. The mutexes still do not provide any
+** mutual exclusion.
+*/
+
+/*
+** The mutex object
+*/
+typedef struct sqlite3_debug_mutex {
+ int id; /* The mutex type */
+ int cnt; /* Number of entries without a matching leave */
+} sqlite3_debug_mutex;
+
+/*
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
+** intended for use inside assert() statements.
+*/
+static int debugMutexHeld(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ return p==0 || p->cnt>0;
+}
+static int debugMutexNotheld(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ return p==0 || p->cnt==0;
+}
+
+/*
+** Initialize and deinitialize the mutex subsystem.
+*/
+static int debugMutexInit(void){ return SQLITE_OK; }
+static int debugMutexEnd(void){ return SQLITE_OK; }
+
+/*
+** The sqlite3_mutex_alloc() routine allocates a new
+** mutex and returns a pointer to it. If it returns NULL
+** that means that a mutex could not be allocated.
+*/
+static sqlite3_mutex *debugMutexAlloc(int id){
+ static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1];
+ sqlite3_debug_mutex *pNew = 0;
+ switch( id ){
+ case SQLITE_MUTEX_FAST:
+ case SQLITE_MUTEX_RECURSIVE: {
+ pNew = sqlite3Malloc(sizeof(*pNew));
+ if( pNew ){
+ pNew->id = id;
+ pNew->cnt = 0;
+ }
+ break;
+ }
+ default: {
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( id-2<0 || id-2>=ArraySize(aStatic) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ pNew = &aStatic[id-2];
+ pNew->id = id;
+ break;
+ }
+ }
+ return (sqlite3_mutex*)pNew;
+}
+
+/*
+** This routine deallocates a previously allocated mutex.
+*/
+static void debugMutexFree(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ assert( p->cnt==0 );
+ if( p->id==SQLITE_MUTEX_RECURSIVE || p->id==SQLITE_MUTEX_FAST ){
+ sqlite3_free(p);
+ }else{
+#ifdef SQLITE_ENABLE_API_ARMOR
+ (void)SQLITE_MISUSE_BKPT;
+#endif
+ }
+}
+
+/*
+** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
+** to enter a mutex. If another thread is already within the mutex,
+** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
+** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
+** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
+** be entered multiple times by the same thread. In such cases the,
+** mutex must be exited an equal number of times before another thread
+** can enter. If the same thread tries to enter any other kind of mutex
+** more than once, the behavior is undefined.
+*/
+static void debugMutexEnter(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
+ p->cnt++;
+}
+static int debugMutexTry(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
+ p->cnt++;
+ return SQLITE_OK;
+}
+
+/*
+** The sqlite3_mutex_leave() routine exits a mutex that was
+** previously entered by the same thread. The behavior
+** is undefined if the mutex is not currently entered or
+** is not currently allocated. SQLite will never do either.
+*/
+static void debugMutexLeave(sqlite3_mutex *pX){
+ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
+ assert( debugMutexHeld(pX) );
+ p->cnt--;
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
+}
+
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){
+ static const sqlite3_mutex_methods sMutex = {
+ debugMutexInit,
+ debugMutexEnd,
+ debugMutexAlloc,
+ debugMutexFree,
+ debugMutexEnter,
+ debugMutexTry,
+ debugMutexLeave,
+
+ debugMutexHeld,
+ debugMutexNotheld
+ };
+
+ return &sMutex;
+}
+#endif /* SQLITE_DEBUG */
+
+/*
+** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation
+** is used regardless of the run-time threadsafety setting.
+*/
+#ifdef SQLITE_MUTEX_NOOP
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
+ return sqlite3NoopMutex();
+}
+#endif /* defined(SQLITE_MUTEX_NOOP) */
+#endif /* !defined(SQLITE_MUTEX_OMIT) */
+
+/************** End of mutex_noop.c ******************************************/
+/************** Begin file mutex_unix.c **************************************/
+/*
+** 2007 August 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement mutexes for pthreads
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** The code in this file is only used if we are compiling threadsafe
+** under unix with pthreads.
+**
+** Note that this implementation requires a version of pthreads that
+** supports recursive mutexes.
+*/
+#ifdef SQLITE_MUTEX_PTHREADS
+
+#include <pthread.h>
+
+/*
+** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields
+** are necessary under two condidtions: (1) Debug builds and (2) using
+** home-grown mutexes. Encapsulate these conditions into a single #define.
+*/
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)
+# define SQLITE_MUTEX_NREF 1
+#else
+# define SQLITE_MUTEX_NREF 0
+#endif
+
+/*
+** Each recursive mutex is an instance of the following structure.
+*/
+struct sqlite3_mutex {
+ pthread_mutex_t mutex; /* Mutex controlling the lock */
+#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
+ int id; /* Mutex type */
+#endif
+#if SQLITE_MUTEX_NREF
+ volatile int nRef; /* Number of entrances */
+ volatile pthread_t owner; /* Thread that is within this mutex */
+ int trace; /* True to trace changes */
+#endif
+};
+#if SQLITE_MUTEX_NREF
+# define SQLITE3_MUTEX_INITIALIZER(id) \
+ {PTHREAD_MUTEX_INITIALIZER,id,0,(pthread_t)0,0}
+#elif defined(SQLITE_ENABLE_API_ARMOR)
+# define SQLITE3_MUTEX_INITIALIZER(id) { PTHREAD_MUTEX_INITIALIZER, id }
+#else
+#define SQLITE3_MUTEX_INITIALIZER(id) { PTHREAD_MUTEX_INITIALIZER }
+#endif
+
+/*
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
+** intended for use only inside assert() statements. On some platforms,
+** there might be race conditions that can cause these routines to
+** deliver incorrect results. In particular, if pthread_equal() is
+** not an atomic operation, then these routines might delivery
+** incorrect results. On most platforms, pthread_equal() is a
+** comparison of two integers and is therefore atomic. But we are
+** told that HPUX is not such a platform. If so, then these routines
+** will not always work correctly on HPUX.
+**
+** On those platforms where pthread_equal() is not atomic, SQLite
+** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to
+** make sure no assert() statements are evaluated and hence these
+** routines are never called.
+*/
+#if !defined(NDEBUG) || defined(SQLITE_DEBUG)
+static int pthreadMutexHeld(sqlite3_mutex *p){
+ return (p->nRef!=0 && pthread_equal(p->owner, pthread_self()));
+}
+static int pthreadMutexNotheld(sqlite3_mutex *p){
+ return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0;
+}
+#endif
+
+/*
+** Try to provide a memory barrier operation, needed for initialization
+** and also for the implementation of xShmBarrier in the VFS in cases
+** where SQLite is compiled without mutexes.
+*/
+SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
+#if defined(SQLITE_MEMORY_BARRIER)
+ SQLITE_MEMORY_BARRIER;
+#elif defined(__GNUC__) && GCC_VERSION>=4001000
+ __sync_synchronize();
+#endif
+}
+
+/*
+** Initialize and deinitialize the mutex subsystem.
+*/
+static int pthreadMutexInit(void){ return SQLITE_OK; }
+static int pthreadMutexEnd(void){ return SQLITE_OK; }
+
+/*
+** The sqlite3_mutex_alloc() routine allocates a new
+** mutex and returns a pointer to it. If it returns NULL
+** that means that a mutex could not be allocated. SQLite
+** will unwind its stack and return an error. The argument
+** to sqlite3_mutex_alloc() is one of these integer constants:
+**
+** <ul>
+** <li> SQLITE_MUTEX_FAST
+** <li> SQLITE_MUTEX_RECURSIVE
+** <li> SQLITE_MUTEX_STATIC_MAIN
+** <li> SQLITE_MUTEX_STATIC_MEM
+** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_PRNG
+** <li> SQLITE_MUTEX_STATIC_LRU
+** <li> SQLITE_MUTEX_STATIC_PMEM
+** <li> SQLITE_MUTEX_STATIC_APP1
+** <li> SQLITE_MUTEX_STATIC_APP2
+** <li> SQLITE_MUTEX_STATIC_APP3
+** <li> SQLITE_MUTEX_STATIC_VFS1
+** <li> SQLITE_MUTEX_STATIC_VFS2
+** <li> SQLITE_MUTEX_STATIC_VFS3
+** </ul>
+**
+** The first two constants cause sqlite3_mutex_alloc() to create
+** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
+** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
+** The mutex implementation does not need to make a distinction
+** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
+** not want to. But SQLite will only request a recursive mutex in
+** cases where it really needs one. If a faster non-recursive mutex
+** implementation is available on the host platform, the mutex subsystem
+** might return such a mutex in response to SQLITE_MUTEX_FAST.
+**
+** The other allowed parameters to sqlite3_mutex_alloc() each return
+** a pointer to a static preexisting mutex. Six static mutexes are
+** used by the current version of SQLite. Future versions of SQLite
+** may add additional static mutexes. Static mutexes are for internal
+** use by SQLite only. Applications that use SQLite mutexes should
+** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
+** SQLITE_MUTEX_RECURSIVE.
+**
+** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
+** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
+** returns a different mutex on every call. But for the static
+** mutex types, the same mutex is returned on every call that has
+** the same type number.
+*/
+static sqlite3_mutex *pthreadMutexAlloc(int iType){
+ static sqlite3_mutex staticMutexes[] = {
+ SQLITE3_MUTEX_INITIALIZER(2),
+ SQLITE3_MUTEX_INITIALIZER(3),
+ SQLITE3_MUTEX_INITIALIZER(4),
+ SQLITE3_MUTEX_INITIALIZER(5),
+ SQLITE3_MUTEX_INITIALIZER(6),
+ SQLITE3_MUTEX_INITIALIZER(7),
+ SQLITE3_MUTEX_INITIALIZER(8),
+ SQLITE3_MUTEX_INITIALIZER(9),
+ SQLITE3_MUTEX_INITIALIZER(10),
+ SQLITE3_MUTEX_INITIALIZER(11),
+ SQLITE3_MUTEX_INITIALIZER(12),
+ SQLITE3_MUTEX_INITIALIZER(13)
+ };
+ sqlite3_mutex *p;
+ switch( iType ){
+ case SQLITE_MUTEX_RECURSIVE: {
+ p = sqlite3MallocZero( sizeof(*p) );
+ if( p ){
+#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+ /* If recursive mutexes are not available, we will have to
+ ** build our own. See below. */
+ pthread_mutex_init(&p->mutex, 0);
+#else
+ /* Use a recursive mutex if it is available */
+ pthread_mutexattr_t recursiveAttr;
+ pthread_mutexattr_init(&recursiveAttr);
+ pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&p->mutex, &recursiveAttr);
+ pthread_mutexattr_destroy(&recursiveAttr);
+#endif
+#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
+ p->id = SQLITE_MUTEX_RECURSIVE;
+#endif
+ }
+ break;
+ }
+ case SQLITE_MUTEX_FAST: {
+ p = sqlite3MallocZero( sizeof(*p) );
+ if( p ){
+ pthread_mutex_init(&p->mutex, 0);
+#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
+ p->id = SQLITE_MUTEX_FAST;
+#endif
+ }
+ break;
+ }
+ default: {
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( iType-2<0 || iType-2>=ArraySize(staticMutexes) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ p = &staticMutexes[iType-2];
+ break;
+ }
+ }
+#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
+ assert( p==0 || p->id==iType );
+#endif
+ return p;
+}
+
+
+/*
+** This routine deallocates a previously
+** allocated mutex. SQLite is careful to deallocate every
+** mutex that it allocates.
+*/
+static void pthreadMutexFree(sqlite3_mutex *p){
+ assert( p->nRef==0 );
+#if SQLITE_ENABLE_API_ARMOR
+ if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE )
+#endif
+ {
+ pthread_mutex_destroy(&p->mutex);
+ sqlite3_free(p);
+ }
+#ifdef SQLITE_ENABLE_API_ARMOR
+ else{
+ (void)SQLITE_MISUSE_BKPT;
+ }
+#endif
+}
+
+/*
+** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
+** to enter a mutex. If another thread is already within the mutex,
+** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
+** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
+** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
+** be entered multiple times by the same thread. In such cases the,
+** mutex must be exited an equal number of times before another thread
+** can enter. If the same thread tries to enter any other kind of mutex
+** more than once, the behavior is undefined.
+*/
+static void pthreadMutexEnter(sqlite3_mutex *p){
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) );
+
+#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+ /* If recursive mutexes are not available, then we have to grow
+ ** our own. This implementation assumes that pthread_equal()
+ ** is atomic - that it cannot be deceived into thinking self
+ ** and p->owner are equal if p->owner changes between two values
+ ** that are not equal to self while the comparison is taking place.
+ ** This implementation also assumes a coherent cache - that
+ ** separate processes cannot read different values from the same
+ ** address at the same time. If either of these two conditions
+ ** are not met, then the mutexes will fail and problems will result.
+ */
+ {
+ pthread_t self = pthread_self();
+ if( p->nRef>0 && pthread_equal(p->owner, self) ){
+ p->nRef++;
+ }else{
+ pthread_mutex_lock(&p->mutex);
+ assert( p->nRef==0 );
+ p->owner = self;
+ p->nRef = 1;
+ }
+ }
+#else
+ /* Use the built-in recursive mutexes if they are available.
+ */
+ pthread_mutex_lock(&p->mutex);
+#if SQLITE_MUTEX_NREF
+ assert( p->nRef>0 || p->owner==0 );
+ p->owner = pthread_self();
+ p->nRef++;
+#endif
+#endif
+
+#ifdef SQLITE_DEBUG
+ if( p->trace ){
+ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
+ }
+#endif
+}
+static int pthreadMutexTry(sqlite3_mutex *p){
+ int rc;
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) );
+
+#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+ /* If recursive mutexes are not available, then we have to grow
+ ** our own. This implementation assumes that pthread_equal()
+ ** is atomic - that it cannot be deceived into thinking self
+ ** and p->owner are equal if p->owner changes between two values
+ ** that are not equal to self while the comparison is taking place.
+ ** This implementation also assumes a coherent cache - that
+ ** separate processes cannot read different values from the same
+ ** address at the same time. If either of these two conditions
+ ** are not met, then the mutexes will fail and problems will result.
+ */
+ {
+ pthread_t self = pthread_self();
+ if( p->nRef>0 && pthread_equal(p->owner, self) ){
+ p->nRef++;
+ rc = SQLITE_OK;
+ }else if( pthread_mutex_trylock(&p->mutex)==0 ){
+ assert( p->nRef==0 );
+ p->owner = self;
+ p->nRef = 1;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+ }
+#else
+ /* Use the built-in recursive mutexes if they are available.
+ */
+ if( pthread_mutex_trylock(&p->mutex)==0 ){
+#if SQLITE_MUTEX_NREF
+ p->owner = pthread_self();
+ p->nRef++;
+#endif
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_BUSY;
+ }
+#endif
+
+#ifdef SQLITE_DEBUG
+ if( rc==SQLITE_OK && p->trace ){
+ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
+ }
+#endif
+ return rc;
+}
+
+/*
+** The sqlite3_mutex_leave() routine exits a mutex that was
+** previously entered by the same thread. The behavior
+** is undefined if the mutex is not currently entered or
+** is not currently allocated. SQLite will never do either.
+*/
+static void pthreadMutexLeave(sqlite3_mutex *p){
+ assert( pthreadMutexHeld(p) );
+#if SQLITE_MUTEX_NREF
+ p->nRef--;
+ if( p->nRef==0 ) p->owner = 0;
+#endif
+ assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE );
+
+#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+ if( p->nRef==0 ){
+ pthread_mutex_unlock(&p->mutex);
+ }
+#else
+ pthread_mutex_unlock(&p->mutex);
+#endif
+
+#ifdef SQLITE_DEBUG
+ if( p->trace ){
+ printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
+ }
+#endif
+}
+
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
+ static const sqlite3_mutex_methods sMutex = {
+ pthreadMutexInit,
+ pthreadMutexEnd,
+ pthreadMutexAlloc,
+ pthreadMutexFree,
+ pthreadMutexEnter,
+ pthreadMutexTry,
+ pthreadMutexLeave,
+#ifdef SQLITE_DEBUG
+ pthreadMutexHeld,
+ pthreadMutexNotheld
+#else
+ 0,
+ 0
+#endif
+ };
+
+ return &sMutex;
+}
+
+#endif /* SQLITE_MUTEX_PTHREADS */
+
+/************** End of mutex_unix.c ******************************************/
+/************** Begin file mutex_w32.c ***************************************/
+/*
+** 2007 August 14
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C functions that implement mutexes for Win32.
+*/
+/* #include "sqliteInt.h" */
+
+#if SQLITE_OS_WIN
+/*
+** Include code that is common to all os_*.c files
+*/
+/************** Include os_common.h in the middle of mutex_w32.c *************/
+/************** Begin file os_common.h ***************************************/
+/*
+** 2004 May 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains macros and a little bit of code that is common to
+** all of the platform-specific files (os_*.c) and is #included into those
+** files.
+**
+** This file should be #included by the os_*.c files only. It is not a
+** general purpose header file.
+*/
+#ifndef _OS_COMMON_H_
+#define _OS_COMMON_H_
+
+/*
+** At least two bugs have slipped in because we changed the MEMORY_DEBUG
+** macro to SQLITE_DEBUG and some older makefiles have not yet made the
+** switch. The following code should catch this problem at compile-time.
+*/
+#ifdef MEMORY_DEBUG
+# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
+#endif
+
+/*
+** Macros for performance tracing. Normally turned off. Only works
+** on i486 hardware.
+*/
+#ifdef SQLITE_PERFORMANCE_TRACE
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+/************** Include hwtime.h in the middle of os_common.h ****************/
+/************** Begin file hwtime.h ******************************************/
+/*
+** 2008 May 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains inline asm code for retrieving "high-performance"
+** counters for x86 and x86_64 class CPUs.
+*/
+#ifndef SQLITE_HWTIME_H
+#define SQLITE_HWTIME_H
+
+/*
+** The following routine only works on pentium-class (or newer) processors.
+** It uses the RDTSC opcode to read the cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+#if !defined(__STRICT_ANSI__) && \
+ (defined(__GNUC__) || defined(_MSC_VER)) && \
+ (defined(i386) || defined(__i386__) || defined(_M_IX86))
+
+ #if defined(__GNUC__)
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+ #elif defined(_MSC_VER)
+
+ __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
+ __asm {
+ rdtsc
+ ret ; return value at EDX:EAX
+ }
+ }
+
+ #endif
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long val;
+ __asm__ __volatile__ ("rdtsc" : "=A" (val));
+ return val;
+ }
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long long retval;
+ unsigned long junk;
+ __asm__ __volatile__ ("\n\
+ 1: mftbu %1\n\
+ mftb %L0\n\
+ mftbu %0\n\
+ cmpw %0,%1\n\
+ bne 1b"
+ : "=r" (retval), "=r" (junk));
+ return retval;
+ }
+
+#else
+
+ /*
+ ** asm() is needed for hardware timing support. Without asm(),
+ ** disable the sqlite3Hwtime() routine.
+ **
+ ** sqlite3Hwtime() is only used for some obscure debugging
+ ** and analysis configurations, not in any deliverable, so this
+ ** should not be a great loss.
+ */
+SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
+
+#endif
+
+#endif /* !defined(SQLITE_HWTIME_H) */
+
+/************** End of hwtime.h **********************************************/
+/************** Continuing where we left off in os_common.h ******************/
+
+static sqlite_uint64 g_start;
+static sqlite_uint64 g_elapsed;
+#define TIMER_START g_start=sqlite3Hwtime()
+#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start
+#define TIMER_ELAPSED g_elapsed
+#else
+#define TIMER_START
+#define TIMER_END
+#define TIMER_ELAPSED ((sqlite_uint64)0)
+#endif
+
+/*
+** If we compile with the SQLITE_TEST macro set, then the following block
+** of code will give us the ability to simulate a disk I/O error. This
+** is used for testing the I/O recovery logic.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_io_error_hit;
+SQLITE_API extern int sqlite3_io_error_hardhit;
+SQLITE_API extern int sqlite3_io_error_pending;
+SQLITE_API extern int sqlite3_io_error_persist;
+SQLITE_API extern int sqlite3_io_error_benign;
+SQLITE_API extern int sqlite3_diskfull_pending;
+SQLITE_API extern int sqlite3_diskfull;
+#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X)
+#define SimulateIOError(CODE) \
+ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \
+ || sqlite3_io_error_pending-- == 1 ) \
+ { local_ioerr(); CODE; }
+static void local_ioerr(){
+ IOTRACE(("IOERR\n"));
+ sqlite3_io_error_hit++;
+ if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++;
+}
+#define SimulateDiskfullError(CODE) \
+ if( sqlite3_diskfull_pending ){ \
+ if( sqlite3_diskfull_pending == 1 ){ \
+ local_ioerr(); \
+ sqlite3_diskfull = 1; \
+ sqlite3_io_error_hit = 1; \
+ CODE; \
+ }else{ \
+ sqlite3_diskfull_pending--; \
+ } \
+ }
+#else
+#define SimulateIOErrorBenign(X)
+#define SimulateIOError(A)
+#define SimulateDiskfullError(A)
+#endif /* defined(SQLITE_TEST) */
+
+/*
+** When testing, keep a count of the number of open files.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_open_file_count;
+#define OpenCounter(X) sqlite3_open_file_count+=(X)
+#else
+#define OpenCounter(X)
+#endif /* defined(SQLITE_TEST) */
+
+#endif /* !defined(_OS_COMMON_H_) */
+
+/************** End of os_common.h *******************************************/
+/************** Continuing where we left off in mutex_w32.c ******************/
+
+/*
+** Include the header file for the Windows VFS.
+*/
+/************** Include os_win.h in the middle of mutex_w32.c ****************/
+/************** Begin file os_win.h ******************************************/
+/*
+** 2013 November 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code that is specific to Windows.
+*/
+#ifndef SQLITE_OS_WIN_H
+#define SQLITE_OS_WIN_H
+
+/*
+** Include the primary Windows SDK header file.
+*/
+#include "windows.h"
+
+#ifdef __CYGWIN__
+# include <sys/cygwin.h>
+# include <errno.h> /* amalgamator: dontcache */
+#endif
+
+/*
+** Determine if we are dealing with Windows NT.
+**
+** We ought to be able to determine if we are compiling for Windows 9x or
+** Windows NT using the _WIN32_WINNT macro as follows:
+**
+** #if defined(_WIN32_WINNT)
+** # define SQLITE_OS_WINNT 1
+** #else
+** # define SQLITE_OS_WINNT 0
+** #endif
+**
+** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as
+** it ought to, so the above test does not work. We'll just assume that
+** everything is Windows NT unless the programmer explicitly says otherwise
+** by setting SQLITE_OS_WINNT to 0.
+*/
+#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT)
+# define SQLITE_OS_WINNT 1
+#endif
+
+/*
+** Determine if we are dealing with Windows CE - which has a much reduced
+** API.
+*/
+#if defined(_WIN32_WCE)
+# define SQLITE_OS_WINCE 1
+#else
+# define SQLITE_OS_WINCE 0
+#endif
+
+/*
+** Determine if we are dealing with WinRT, which provides only a subset of
+** the full Win32 API.
+*/
+#if !defined(SQLITE_OS_WINRT)
+# define SQLITE_OS_WINRT 0
+#endif
+
+/*
+** For WinCE, some API function parameters do not appear to be declared as
+** volatile.
+*/
+#if SQLITE_OS_WINCE
+# define SQLITE_WIN32_VOLATILE
+#else
+# define SQLITE_WIN32_VOLATILE volatile
+#endif
+
+/*
+** For some Windows sub-platforms, the _beginthreadex() / _endthreadex()
+** functions are not available (e.g. those not using MSVC, Cygwin, etc).
+*/
+#if SQLITE_OS_WIN && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
+ SQLITE_THREADSAFE>0 && !defined(__CYGWIN__)
+# define SQLITE_OS_WIN_THREADS 1
+#else
+# define SQLITE_OS_WIN_THREADS 0
+#endif
+
+#endif /* SQLITE_OS_WIN_H */
+
+/************** End of os_win.h **********************************************/
+/************** Continuing where we left off in mutex_w32.c ******************/
+#endif
+
+/*
+** The code in this file is only used if we are compiling multithreaded
+** on a Win32 system.
+*/
+#ifdef SQLITE_MUTEX_W32
+
+/*
+** Each recursive mutex is an instance of the following structure.
+*/
+struct sqlite3_mutex {
+ CRITICAL_SECTION mutex; /* Mutex controlling the lock */
+ int id; /* Mutex type */
+#ifdef SQLITE_DEBUG
+ volatile int nRef; /* Number of enterances */
+ volatile DWORD owner; /* Thread holding this mutex */
+ volatile LONG trace; /* True to trace changes */
+#endif
+};
+
+/*
+** These are the initializer values used when declaring a "static" mutex
+** on Win32. It should be noted that all mutexes require initialization
+** on the Win32 platform.
+*/
+#define SQLITE_W32_MUTEX_INITIALIZER { 0 }
+
+#ifdef SQLITE_DEBUG
+#define SQLITE3_MUTEX_INITIALIZER(id) { SQLITE_W32_MUTEX_INITIALIZER, id, \
+ 0L, (DWORD)0, 0 }
+#else
+#define SQLITE3_MUTEX_INITIALIZER(id) { SQLITE_W32_MUTEX_INITIALIZER, id }
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
+** intended for use only inside assert() statements.
+*/
+static int winMutexHeld(sqlite3_mutex *p){
+ return p->nRef!=0 && p->owner==GetCurrentThreadId();
+}
+
+static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){
+ return p->nRef==0 || p->owner!=tid;
+}
+
+static int winMutexNotheld(sqlite3_mutex *p){
+ DWORD tid = GetCurrentThreadId();
+ return winMutexNotheld2(p, tid);
+}
+#endif
+
+/*
+** Try to provide a memory barrier operation, needed for initialization
+** and also for the xShmBarrier method of the VFS in cases when SQLite is
+** compiled without mutexes (SQLITE_THREADSAFE=0).
+*/
+SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
+#if defined(SQLITE_MEMORY_BARRIER)
+ SQLITE_MEMORY_BARRIER;
+#elif defined(__GNUC__)
+ __sync_synchronize();
+#elif MSVC_VERSION>=1300
+ _ReadWriteBarrier();
+#elif defined(MemoryBarrier)
+ MemoryBarrier();
+#endif
+}
+
+/*
+** Initialize and deinitialize the mutex subsystem.
+*/
+static sqlite3_mutex winMutex_staticMutexes[] = {
+ SQLITE3_MUTEX_INITIALIZER(2),
+ SQLITE3_MUTEX_INITIALIZER(3),
+ SQLITE3_MUTEX_INITIALIZER(4),
+ SQLITE3_MUTEX_INITIALIZER(5),
+ SQLITE3_MUTEX_INITIALIZER(6),
+ SQLITE3_MUTEX_INITIALIZER(7),
+ SQLITE3_MUTEX_INITIALIZER(8),
+ SQLITE3_MUTEX_INITIALIZER(9),
+ SQLITE3_MUTEX_INITIALIZER(10),
+ SQLITE3_MUTEX_INITIALIZER(11),
+ SQLITE3_MUTEX_INITIALIZER(12),
+ SQLITE3_MUTEX_INITIALIZER(13)
+};
+
+static int winMutex_isInit = 0;
+static int winMutex_isNt = -1; /* <0 means "need to query" */
+
+/* As the winMutexInit() and winMutexEnd() functions are called as part
+** of the sqlite3_initialize() and sqlite3_shutdown() processing, the
+** "interlocked" magic used here is probably not strictly necessary.
+*/
+static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0;
+
+SQLITE_API int sqlite3_win32_is_nt(void); /* os_win.c */
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
+
+static int winMutexInit(void){
+ /* The first to increment to 1 does actual initialization */
+ if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){
+ int i;
+ for(i=0; i<ArraySize(winMutex_staticMutexes); i++){
+#if SQLITE_OS_WINRT
+ InitializeCriticalSectionEx(&winMutex_staticMutexes[i].mutex, 0, 0);
+#else
+ InitializeCriticalSection(&winMutex_staticMutexes[i].mutex);
+#endif
+ }
+ winMutex_isInit = 1;
+ }else{
+ /* Another thread is (in the process of) initializing the static
+ ** mutexes */
+ while( !winMutex_isInit ){
+ sqlite3_win32_sleep(1);
+ }
+ }
+ return SQLITE_OK;
+}
+
+static int winMutexEnd(void){
+ /* The first to decrement to 0 does actual shutdown
+ ** (which should be the last to shutdown.) */
+ if( InterlockedCompareExchange(&winMutex_lock, 0, 1)==1 ){
+ if( winMutex_isInit==1 ){
+ int i;
+ for(i=0; i<ArraySize(winMutex_staticMutexes); i++){
+ DeleteCriticalSection(&winMutex_staticMutexes[i].mutex);
+ }
+ winMutex_isInit = 0;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The sqlite3_mutex_alloc() routine allocates a new
+** mutex and returns a pointer to it. If it returns NULL
+** that means that a mutex could not be allocated. SQLite
+** will unwind its stack and return an error. The argument
+** to sqlite3_mutex_alloc() is one of these integer constants:
+**
+** <ul>
+** <li> SQLITE_MUTEX_FAST
+** <li> SQLITE_MUTEX_RECURSIVE
+** <li> SQLITE_MUTEX_STATIC_MAIN
+** <li> SQLITE_MUTEX_STATIC_MEM
+** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_PRNG
+** <li> SQLITE_MUTEX_STATIC_LRU
+** <li> SQLITE_MUTEX_STATIC_PMEM
+** <li> SQLITE_MUTEX_STATIC_APP1
+** <li> SQLITE_MUTEX_STATIC_APP2
+** <li> SQLITE_MUTEX_STATIC_APP3
+** <li> SQLITE_MUTEX_STATIC_VFS1
+** <li> SQLITE_MUTEX_STATIC_VFS2
+** <li> SQLITE_MUTEX_STATIC_VFS3
+** </ul>
+**
+** The first two constants cause sqlite3_mutex_alloc() to create
+** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
+** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
+** The mutex implementation does not need to make a distinction
+** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
+** not want to. But SQLite will only request a recursive mutex in
+** cases where it really needs one. If a faster non-recursive mutex
+** implementation is available on the host platform, the mutex subsystem
+** might return such a mutex in response to SQLITE_MUTEX_FAST.
+**
+** The other allowed parameters to sqlite3_mutex_alloc() each return
+** a pointer to a static preexisting mutex. Six static mutexes are
+** used by the current version of SQLite. Future versions of SQLite
+** may add additional static mutexes. Static mutexes are for internal
+** use by SQLite only. Applications that use SQLite mutexes should
+** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
+** SQLITE_MUTEX_RECURSIVE.
+**
+** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
+** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
+** returns a different mutex on every call. But for the static
+** mutex types, the same mutex is returned on every call that has
+** the same type number.
+*/
+static sqlite3_mutex *winMutexAlloc(int iType){
+ sqlite3_mutex *p;
+
+ switch( iType ){
+ case SQLITE_MUTEX_FAST:
+ case SQLITE_MUTEX_RECURSIVE: {
+ p = sqlite3MallocZero( sizeof(*p) );
+ if( p ){
+ p->id = iType;
+#ifdef SQLITE_DEBUG
+#ifdef SQLITE_WIN32_MUTEX_TRACE_DYNAMIC
+ p->trace = 1;
+#endif
+#endif
+#if SQLITE_OS_WINRT
+ InitializeCriticalSectionEx(&p->mutex, 0, 0);
+#else
+ InitializeCriticalSection(&p->mutex);
+#endif
+ }
+ break;
+ }
+ default: {
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( iType-2<0 || iType-2>=ArraySize(winMutex_staticMutexes) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ p = &winMutex_staticMutexes[iType-2];
+#ifdef SQLITE_DEBUG
+#ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC
+ InterlockedCompareExchange(&p->trace, 1, 0);
+#endif
+#endif
+ break;
+ }
+ }
+ assert( p==0 || p->id==iType );
+ return p;
+}
+
+
+/*
+** This routine deallocates a previously
+** allocated mutex. SQLite is careful to deallocate every
+** mutex that it allocates.
+*/
+static void winMutexFree(sqlite3_mutex *p){
+ assert( p );
+ assert( p->nRef==0 && p->owner==0 );
+ if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ){
+ DeleteCriticalSection(&p->mutex);
+ sqlite3_free(p);
+ }else{
+#ifdef SQLITE_ENABLE_API_ARMOR
+ (void)SQLITE_MISUSE_BKPT;
+#endif
+ }
+}
+
+/*
+** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
+** to enter a mutex. If another thread is already within the mutex,
+** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
+** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
+** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
+** be entered multiple times by the same thread. In such cases the,
+** mutex must be exited an equal number of times before another thread
+** can enter. If the same thread tries to enter any other kind of mutex
+** more than once, the behavior is undefined.
+*/
+static void winMutexEnter(sqlite3_mutex *p){
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ DWORD tid = GetCurrentThreadId();
+#endif
+#ifdef SQLITE_DEBUG
+ assert( p );
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
+#else
+ assert( p );
+#endif
+ assert( winMutex_isInit==1 );
+ EnterCriticalSection(&p->mutex);
+#ifdef SQLITE_DEBUG
+ assert( p->nRef>0 || p->owner==0 );
+ p->owner = tid;
+ p->nRef++;
+ if( p->trace ){
+ OSTRACE(("ENTER-MUTEX tid=%lu, mutex(%d)=%p (%d), nRef=%d\n",
+ tid, p->id, p, p->trace, p->nRef));
+ }
+#endif
+}
+
+static int winMutexTry(sqlite3_mutex *p){
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ DWORD tid = GetCurrentThreadId();
+#endif
+ int rc = SQLITE_BUSY;
+ assert( p );
+ assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
+ /*
+ ** The sqlite3_mutex_try() routine is very rarely used, and when it
+ ** is used it is merely an optimization. So it is OK for it to always
+ ** fail.
+ **
+ ** The TryEnterCriticalSection() interface is only available on WinNT.
+ ** And some windows compilers complain if you try to use it without
+ ** first doing some #defines that prevent SQLite from building on Win98.
+ ** For that reason, we will omit this optimization for now. See
+ ** ticket #2685.
+ */
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0400
+ assert( winMutex_isInit==1 );
+ assert( winMutex_isNt>=-1 && winMutex_isNt<=1 );
+ if( winMutex_isNt<0 ){
+ winMutex_isNt = sqlite3_win32_is_nt();
+ }
+ assert( winMutex_isNt==0 || winMutex_isNt==1 );
+ if( winMutex_isNt && TryEnterCriticalSection(&p->mutex) ){
+#ifdef SQLITE_DEBUG
+ p->owner = tid;
+ p->nRef++;
+#endif
+ rc = SQLITE_OK;
+ }
+#else
+ UNUSED_PARAMETER(p);
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->trace ){
+ OSTRACE(("TRY-MUTEX tid=%lu, mutex(%d)=%p (%d), owner=%lu, nRef=%d, rc=%s\n",
+ tid, p->id, p, p->trace, p->owner, p->nRef, sqlite3ErrName(rc)));
+ }
+#endif
+ return rc;
+}
+
+/*
+** The sqlite3_mutex_leave() routine exits a mutex that was
+** previously entered by the same thread. The behavior
+** is undefined if the mutex is not currently entered or
+** is not currently allocated. SQLite will never do either.
+*/
+static void winMutexLeave(sqlite3_mutex *p){
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ DWORD tid = GetCurrentThreadId();
+#endif
+ assert( p );
+#ifdef SQLITE_DEBUG
+ assert( p->nRef>0 );
+ assert( p->owner==tid );
+ p->nRef--;
+ if( p->nRef==0 ) p->owner = 0;
+ assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE );
+#endif
+ assert( winMutex_isInit==1 );
+ LeaveCriticalSection(&p->mutex);
+#ifdef SQLITE_DEBUG
+ if( p->trace ){
+ OSTRACE(("LEAVE-MUTEX tid=%lu, mutex(%d)=%p (%d), nRef=%d\n",
+ tid, p->id, p, p->trace, p->nRef));
+ }
+#endif
+}
+
+SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
+ static const sqlite3_mutex_methods sMutex = {
+ winMutexInit,
+ winMutexEnd,
+ winMutexAlloc,
+ winMutexFree,
+ winMutexEnter,
+ winMutexTry,
+ winMutexLeave,
+#ifdef SQLITE_DEBUG
+ winMutexHeld,
+ winMutexNotheld
+#else
+ 0,
+ 0
+#endif
+ };
+ return &sMutex;
+}
+
+#endif /* SQLITE_MUTEX_W32 */
+
+/************** End of mutex_w32.c *******************************************/
+/************** Begin file malloc.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** Memory allocation functions used throughout sqlite.
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdarg.h> */
+
+/*
+** Attempt to release up to n bytes of non-essential memory currently
+** held by SQLite. An example of non-essential memory is memory used to
+** cache database pages that are not currently in use.
+*/
+SQLITE_API int sqlite3_release_memory(int n){
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+ return sqlite3PcacheReleaseMemory(n);
+#else
+ /* IMPLEMENTATION-OF: R-34391-24921 The sqlite3_release_memory() routine
+ ** is a no-op returning zero if SQLite is not compiled with
+ ** SQLITE_ENABLE_MEMORY_MANAGEMENT. */
+ UNUSED_PARAMETER(n);
+ return 0;
+#endif
+}
+
+/*
+** Default value of the hard heap limit. 0 means "no limit".
+*/
+#ifndef SQLITE_MAX_MEMORY
+# define SQLITE_MAX_MEMORY 0
+#endif
+
+/*
+** State information local to the memory allocation subsystem.
+*/
+static SQLITE_WSD struct Mem0Global {
+ sqlite3_mutex *mutex; /* Mutex to serialize access */
+ sqlite3_int64 alarmThreshold; /* The soft heap limit */
+ sqlite3_int64 hardLimit; /* The hard upper bound on memory */
+
+ /*
+ ** True if heap is nearly "full" where "full" is defined by the
+ ** sqlite3_soft_heap_limit() setting.
+ */
+ int nearlyFull;
+} mem0 = { 0, SQLITE_MAX_MEMORY, SQLITE_MAX_MEMORY, 0 };
+
+#define mem0 GLOBAL(struct Mem0Global, mem0)
+
+/*
+** Return the memory allocator mutex. sqlite3_status() needs it.
+*/
+SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){
+ return mem0.mutex;
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Deprecated external interface. It used to set an alarm callback
+** that was invoked when memory usage grew too large. Now it is a
+** no-op.
+*/
+SQLITE_API int sqlite3_memory_alarm(
+ void(*xCallback)(void *pArg, sqlite3_int64 used,int N),
+ void *pArg,
+ sqlite3_int64 iThreshold
+){
+ (void)xCallback;
+ (void)pArg;
+ (void)iThreshold;
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Set the soft heap-size limit for the library. An argument of
+** zero disables the limit. A negative argument is a no-op used to
+** obtain the return value.
+**
+** The return value is the value of the heap limit just before this
+** interface was called.
+**
+** If the hard heap limit is enabled, then the soft heap limit cannot
+** be disabled nor raised above the hard heap limit.
+*/
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 n){
+ sqlite3_int64 priorLimit;
+ sqlite3_int64 excess;
+ sqlite3_int64 nUsed;
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return -1;
+#endif
+ sqlite3_mutex_enter(mem0.mutex);
+ priorLimit = mem0.alarmThreshold;
+ if( n<0 ){
+ sqlite3_mutex_leave(mem0.mutex);
+ return priorLimit;
+ }
+ if( mem0.hardLimit>0 && (n>mem0.hardLimit || n==0) ){
+ n = mem0.hardLimit;
+ }
+ mem0.alarmThreshold = n;
+ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ AtomicStore(&mem0.nearlyFull, n>0 && n<=nUsed);
+ sqlite3_mutex_leave(mem0.mutex);
+ excess = sqlite3_memory_used() - n;
+ if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff));
+ return priorLimit;
+}
+SQLITE_API void sqlite3_soft_heap_limit(int n){
+ if( n<0 ) n = 0;
+ sqlite3_soft_heap_limit64(n);
+}
+
+/*
+** Set the hard heap-size limit for the library. An argument of zero
+** disables the hard heap limit. A negative argument is a no-op used
+** to obtain the return value without affecting the hard heap limit.
+**
+** The return value is the value of the hard heap limit just prior to
+** calling this interface.
+**
+** Setting the hard heap limit will also activate the soft heap limit
+** and constrain the soft heap limit to be no more than the hard heap
+** limit.
+*/
+SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 n){
+ sqlite3_int64 priorLimit;
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return -1;
+#endif
+ sqlite3_mutex_enter(mem0.mutex);
+ priorLimit = mem0.hardLimit;
+ if( n>=0 ){
+ mem0.hardLimit = n;
+ if( n<mem0.alarmThreshold || mem0.alarmThreshold==0 ){
+ mem0.alarmThreshold = n;
+ }
+ }
+ sqlite3_mutex_leave(mem0.mutex);
+ return priorLimit;
+}
+
+
+/*
+** Initialize the memory allocation subsystem.
+*/
+SQLITE_PRIVATE int sqlite3MallocInit(void){
+ int rc;
+ if( sqlite3GlobalConfig.m.xMalloc==0 ){
+ sqlite3MemSetDefault();
+ }
+ memset(&mem0, 0, sizeof(mem0));
+ mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
+ if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512
+ || sqlite3GlobalConfig.nPage<=0 ){
+ sqlite3GlobalConfig.pPage = 0;
+ sqlite3GlobalConfig.szPage = 0;
+ }
+ rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData);
+ if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0));
+ return rc;
+}
+
+/*
+** Return true if the heap is currently under memory pressure - in other
+** words if the amount of heap used is close to the limit set by
+** sqlite3_soft_heap_limit().
+*/
+SQLITE_PRIVATE int sqlite3HeapNearlyFull(void){
+ return AtomicLoad(&mem0.nearlyFull);
+}
+
+/*
+** Deinitialize the memory allocation subsystem.
+*/
+SQLITE_PRIVATE void sqlite3MallocEnd(void){
+ if( sqlite3GlobalConfig.m.xShutdown ){
+ sqlite3GlobalConfig.m.xShutdown(sqlite3GlobalConfig.m.pAppData);
+ }
+ memset(&mem0, 0, sizeof(mem0));
+}
+
+/*
+** Return the amount of memory currently checked out.
+*/
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void){
+ sqlite3_int64 res, mx;
+ sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0);
+ return res;
+}
+
+/*
+** Return the maximum amount of memory that has ever been
+** checked out since either the beginning of this process
+** or since the most recent reset.
+*/
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag){
+ sqlite3_int64 res, mx;
+ sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag);
+ return mx;
+}
+
+/*
+** Trigger the alarm
+*/
+static void sqlite3MallocAlarm(int nByte){
+ if( mem0.alarmThreshold<=0 ) return;
+ sqlite3_mutex_leave(mem0.mutex);
+ sqlite3_release_memory(nByte);
+ sqlite3_mutex_enter(mem0.mutex);
+}
+
+/*
+** Do a memory allocation with statistics and alarms. Assume the
+** lock is already held.
+*/
+static void mallocWithAlarm(int n, void **pp){
+ void *p;
+ int nFull;
+ assert( sqlite3_mutex_held(mem0.mutex) );
+ assert( n>0 );
+
+ /* In Firefox (circa 2017-02-08), xRoundup() is remapped to an internal
+ ** implementation of malloc_good_size(), which must be called in debug
+ ** mode and specifically when the DMD "Dark Matter Detector" is enabled
+ ** or else a crash results. Hence, do not attempt to optimize out the
+ ** following xRoundup() call. */
+ nFull = sqlite3GlobalConfig.m.xRoundup(n);
+
+ sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, n);
+ if( mem0.alarmThreshold>0 ){
+ sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ if( nUsed >= mem0.alarmThreshold - nFull ){
+ AtomicStore(&mem0.nearlyFull, 1);
+ sqlite3MallocAlarm(nFull);
+ if( mem0.hardLimit ){
+ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ if( nUsed >= mem0.hardLimit - nFull ){
+ *pp = 0;
+ return;
+ }
+ }
+ }else{
+ AtomicStore(&mem0.nearlyFull, 0);
+ }
+ }
+ p = sqlite3GlobalConfig.m.xMalloc(nFull);
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+ if( p==0 && mem0.alarmThreshold>0 ){
+ sqlite3MallocAlarm(nFull);
+ p = sqlite3GlobalConfig.m.xMalloc(nFull);
+ }
+#endif
+ if( p ){
+ nFull = sqlite3MallocSize(p);
+ sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nFull);
+ sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1);
+ }
+ *pp = p;
+}
+
+/*
+** Allocate memory. This routine is like sqlite3_malloc() except that it
+** assumes the memory subsystem has already been initialized.
+*/
+SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
+ void *p;
+ if( n==0 || n>=0x7fffff00 ){
+ /* A memory allocation of a number of bytes which is near the maximum
+ ** signed integer value might cause an integer overflow inside of the
+ ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving
+ ** 255 bytes of overhead. SQLite itself will never use anything near
+ ** this amount. The only way to reach the limit is with sqlite3_malloc() */
+ p = 0;
+ }else if( sqlite3GlobalConfig.bMemstat ){
+ sqlite3_mutex_enter(mem0.mutex);
+ mallocWithAlarm((int)n, &p);
+ sqlite3_mutex_leave(mem0.mutex);
+ }else{
+ p = sqlite3GlobalConfig.m.xMalloc((int)n);
+ }
+ assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-11148-40995 */
+ return p;
+}
+
+/*
+** This version of the memory allocation is for use by the application.
+** First make sure the memory subsystem is initialized, then do the
+** allocation.
+*/
+SQLITE_API void *sqlite3_malloc(int n){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return n<=0 ? 0 : sqlite3Malloc(n);
+}
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return sqlite3Malloc(n);
+}
+
+/*
+** TRUE if p is a lookaside memory allocation from db
+*/
+#ifndef SQLITE_OMIT_LOOKASIDE
+static int isLookaside(sqlite3 *db, void *p){
+ return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd);
+}
+#else
+#define isLookaside(A,B) 0
+#endif
+
+/*
+** Return the size of a memory allocation previously obtained from
+** sqlite3Malloc() or sqlite3_malloc().
+*/
+SQLITE_PRIVATE int sqlite3MallocSize(void *p){
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ return sqlite3GlobalConfig.m.xSize(p);
+}
+static int lookasideMallocSize(sqlite3 *db, void *p){
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ return p<db->lookaside.pMiddle ? db->lookaside.szTrue : LOOKASIDE_SMALL;
+#else
+ return db->lookaside.szTrue;
+#endif
+}
+SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){
+ assert( p!=0 );
+#ifdef SQLITE_DEBUG
+ if( db==0 || !isLookaside(db,p) ){
+ if( db==0 ){
+ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ }else{
+ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ }
+ }
+#endif
+ if( db ){
+ if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){
+ assert( sqlite3_mutex_held(db->mutex) );
+ return LOOKASIDE_SMALL;
+ }
+#endif
+ if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){
+ assert( sqlite3_mutex_held(db->mutex) );
+ return db->lookaside.szTrue;
+ }
+ }
+ }
+ return sqlite3GlobalConfig.m.xSize(p);
+}
+SQLITE_API sqlite3_uint64 sqlite3_msize(void *p){
+ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ return p ? sqlite3GlobalConfig.m.xSize(p) : 0;
+}
+
+/*
+** Free memory previously obtained from sqlite3Malloc().
+*/
+SQLITE_API void sqlite3_free(void *p){
+ if( p==0 ) return; /* IMP: R-49053-54554 */
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
+ if( sqlite3GlobalConfig.bMemstat ){
+ sqlite3_mutex_enter(mem0.mutex);
+ sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, sqlite3MallocSize(p));
+ sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1);
+ sqlite3GlobalConfig.m.xFree(p);
+ sqlite3_mutex_leave(mem0.mutex);
+ }else{
+ sqlite3GlobalConfig.m.xFree(p);
+ }
+}
+
+/*
+** Add the size of memory allocation "p" to the count in
+** *db->pnBytesFreed.
+*/
+static SQLITE_NOINLINE void measureAllocationSize(sqlite3 *db, void *p){
+ *db->pnBytesFreed += sqlite3DbMallocSize(db,p);
+}
+
+/*
+** Free memory that might be associated with a particular database
+** connection. Calling sqlite3DbFree(D,X) for X==0 is a harmless no-op.
+** The sqlite3DbFreeNN(D,X) version requires that X be non-NULL.
+*/
+SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){
+ assert( db==0 || sqlite3_mutex_held(db->mutex) );
+ assert( p!=0 );
+ if( db ){
+ if( db->pnBytesFreed ){
+ measureAllocationSize(db, p);
+ return;
+ }
+ if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){
+ LookasideSlot *pBuf = (LookasideSlot*)p;
+#ifdef SQLITE_DEBUG
+ memset(p, 0xaa, LOOKASIDE_SMALL); /* Trash freed content */
+#endif
+ pBuf->pNext = db->lookaside.pSmallFree;
+ db->lookaside.pSmallFree = pBuf;
+ return;
+ }
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){
+ LookasideSlot *pBuf = (LookasideSlot*)p;
+#ifdef SQLITE_DEBUG
+ memset(p, 0xaa, db->lookaside.szTrue); /* Trash freed content */
+#endif
+ pBuf->pNext = db->lookaside.pFree;
+ db->lookaside.pFree = pBuf;
+ return;
+ }
+ }
+ }
+ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+ sqlite3_free(p);
+}
+SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
+ assert( db==0 || sqlite3_mutex_held(db->mutex) );
+ if( p ) sqlite3DbFreeNN(db, p);
+}
+
+/*
+** Change the size of an existing memory allocation
+*/
+SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
+ int nOld, nNew, nDiff;
+ void *pNew;
+ assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugNoType(pOld, (u8)~MEMTYPE_HEAP) );
+ if( pOld==0 ){
+ return sqlite3Malloc(nBytes); /* IMP: R-04300-56712 */
+ }
+ if( nBytes==0 ){
+ sqlite3_free(pOld); /* IMP: R-26507-47431 */
+ return 0;
+ }
+ if( nBytes>=0x7fffff00 ){
+ /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */
+ return 0;
+ }
+ nOld = sqlite3MallocSize(pOld);
+ /* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second
+ ** argument to xRealloc is always a value returned by a prior call to
+ ** xRoundup. */
+ nNew = sqlite3GlobalConfig.m.xRoundup((int)nBytes);
+ if( nOld==nNew ){
+ pNew = pOld;
+ }else if( sqlite3GlobalConfig.bMemstat ){
+ sqlite3_mutex_enter(mem0.mutex);
+ sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes);
+ nDiff = nNew - nOld;
+ if( nDiff>0 && sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >=
+ mem0.alarmThreshold-nDiff ){
+ sqlite3MallocAlarm(nDiff);
+ }
+ pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+ if( pNew==0 && mem0.alarmThreshold>0 ){
+ sqlite3MallocAlarm((int)nBytes);
+ pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
+ }
+#endif
+ if( pNew ){
+ nNew = sqlite3MallocSize(pNew);
+ sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nNew-nOld);
+ }
+ sqlite3_mutex_leave(mem0.mutex);
+ }else{
+ pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
+ }
+ assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-11148-40995 */
+ return pNew;
+}
+
+/*
+** The public interface to sqlite3Realloc. Make sure that the memory
+** subsystem is initialized prior to invoking sqliteRealloc.
+*/
+SQLITE_API void *sqlite3_realloc(void *pOld, int n){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ if( n<0 ) n = 0; /* IMP: R-26507-47431 */
+ return sqlite3Realloc(pOld, n);
+}
+SQLITE_API void *sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return sqlite3Realloc(pOld, n);
+}
+
+
+/*
+** Allocate and zero memory.
+*/
+SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){
+ void *p = sqlite3Malloc(n);
+ if( p ){
+ memset(p, 0, (size_t)n);
+ }
+ return p;
+}
+
+/*
+** Allocate and zero memory. If the allocation fails, make
+** the mallocFailed flag in the connection pointer.
+*/
+SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){
+ void *p;
+ testcase( db==0 );
+ p = sqlite3DbMallocRaw(db, n);
+ if( p ) memset(p, 0, (size_t)n);
+ return p;
+}
+
+
+/* Finish the work of sqlite3DbMallocRawNN for the unusual and
+** slower case when the allocation cannot be fulfilled using lookaside.
+*/
+static SQLITE_NOINLINE void *dbMallocRawFinish(sqlite3 *db, u64 n){
+ void *p;
+ assert( db!=0 );
+ p = sqlite3Malloc(n);
+ if( !p ) sqlite3OomFault(db);
+ sqlite3MemdebugSetType(p,
+ (db->lookaside.bDisable==0) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP);
+ return p;
+}
+
+/*
+** Allocate memory, either lookaside (if possible) or heap.
+** If the allocation fails, set the mallocFailed flag in
+** the connection pointer.
+**
+** If db!=0 and db->mallocFailed is true (indicating a prior malloc
+** failure on the same database connection) then always return 0.
+** Hence for a particular database connection, once malloc starts
+** failing, it fails consistently until mallocFailed is reset.
+** This is an important assumption. There are many places in the
+** code that do things like this:
+**
+** int *a = (int*)sqlite3DbMallocRaw(db, 100);
+** int *b = (int*)sqlite3DbMallocRaw(db, 200);
+** if( b ) a[10] = 9;
+**
+** In other words, if a subsequent malloc (ex: "b") worked, it is assumed
+** that all prior mallocs (ex: "a") worked too.
+**
+** The sqlite3MallocRawNN() variant guarantees that the "db" parameter is
+** not a NULL pointer.
+*/
+SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){
+ void *p;
+ if( db ) return sqlite3DbMallocRawNN(db, n);
+ p = sqlite3Malloc(n);
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+ return p;
+}
+SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3 *db, u64 n){
+#ifndef SQLITE_OMIT_LOOKASIDE
+ LookasideSlot *pBuf;
+ assert( db!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( db->pnBytesFreed==0 );
+ if( n>db->lookaside.sz ){
+ if( !db->lookaside.bDisable ){
+ db->lookaside.anStat[1]++;
+ }else if( db->mallocFailed ){
+ return 0;
+ }
+ return dbMallocRawFinish(db, n);
+ }
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( n<=LOOKASIDE_SMALL ){
+ if( (pBuf = db->lookaside.pSmallFree)!=0 ){
+ db->lookaside.pSmallFree = pBuf->pNext;
+ db->lookaside.anStat[0]++;
+ return (void*)pBuf;
+ }else if( (pBuf = db->lookaside.pSmallInit)!=0 ){
+ db->lookaside.pSmallInit = pBuf->pNext;
+ db->lookaside.anStat[0]++;
+ return (void*)pBuf;
+ }
+ }
+#endif
+ if( (pBuf = db->lookaside.pFree)!=0 ){
+ db->lookaside.pFree = pBuf->pNext;
+ db->lookaside.anStat[0]++;
+ return (void*)pBuf;
+ }else if( (pBuf = db->lookaside.pInit)!=0 ){
+ db->lookaside.pInit = pBuf->pNext;
+ db->lookaside.anStat[0]++;
+ return (void*)pBuf;
+ }else{
+ db->lookaside.anStat[2]++;
+ }
+#else
+ assert( db!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( db->pnBytesFreed==0 );
+ if( db->mallocFailed ){
+ return 0;
+ }
+#endif
+ return dbMallocRawFinish(db, n);
+}
+
+/* Forward declaration */
+static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n);
+
+/*
+** Resize the block of memory pointed to by p to n bytes. If the
+** resize fails, set the mallocFailed flag in the connection object.
+*/
+SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){
+ assert( db!=0 );
+ if( p==0 ) return sqlite3DbMallocRawNN(db, n);
+ assert( sqlite3_mutex_held(db->mutex) );
+ if( ((uptr)p)<(uptr)db->lookaside.pEnd ){
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( ((uptr)p)>=(uptr)db->lookaside.pMiddle ){
+ if( n<=LOOKASIDE_SMALL ) return p;
+ }else
+#endif
+ if( ((uptr)p)>=(uptr)db->lookaside.pStart ){
+ if( n<=db->lookaside.szTrue ) return p;
+ }
+ }
+ return dbReallocFinish(db, p, n);
+}
+static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n){
+ void *pNew = 0;
+ assert( db!=0 );
+ assert( p!=0 );
+ if( db->mallocFailed==0 ){
+ if( isLookaside(db, p) ){
+ pNew = sqlite3DbMallocRawNN(db, n);
+ if( pNew ){
+ memcpy(pNew, p, lookasideMallocSize(db, p));
+ sqlite3DbFree(db, p);
+ }
+ }else{
+ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+ pNew = sqlite3Realloc(p, n);
+ if( !pNew ){
+ sqlite3OomFault(db);
+ }
+ sqlite3MemdebugSetType(pNew,
+ (db->lookaside.bDisable==0 ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP));
+ }
+ }
+ return pNew;
+}
+
+/*
+** Attempt to reallocate p. If the reallocation fails, then free p
+** and set the mallocFailed flag in the database connection.
+*/
+SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, u64 n){
+ void *pNew;
+ pNew = sqlite3DbRealloc(db, p, n);
+ if( !pNew ){
+ sqlite3DbFree(db, p);
+ }
+ return pNew;
+}
+
+/*
+** Make a copy of a string in memory obtained from sqliteMalloc(). These
+** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This
+** is because when memory debugging is turned on, these two functions are
+** called via macros that record the current file and line number in the
+** ThreadData structure.
+*/
+SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){
+ char *zNew;
+ size_t n;
+ if( z==0 ){
+ return 0;
+ }
+ n = strlen(z) + 1;
+ zNew = sqlite3DbMallocRaw(db, n);
+ if( zNew ){
+ memcpy(zNew, z, n);
+ }
+ return zNew;
+}
+SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){
+ char *zNew;
+ assert( db!=0 );
+ assert( z!=0 || n==0 );
+ assert( (n&0x7fffffff)==n );
+ zNew = z ? sqlite3DbMallocRawNN(db, n+1) : 0;
+ if( zNew ){
+ memcpy(zNew, z, (size_t)n);
+ zNew[n] = 0;
+ }
+ return zNew;
+}
+
+/*
+** The text between zStart and zEnd represents a phrase within a larger
+** SQL statement. Make a copy of this phrase in space obtained form
+** sqlite3DbMalloc(). Omit leading and trailing whitespace.
+*/
+SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){
+ int n;
+ while( sqlite3Isspace(zStart[0]) ) zStart++;
+ n = (int)(zEnd - zStart);
+ while( ALWAYS(n>0) && sqlite3Isspace(zStart[n-1]) ) n--;
+ return sqlite3DbStrNDup(db, zStart, n);
+}
+
+/*
+** Free any prior content in *pz and replace it with a copy of zNew.
+*/
+SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){
+ sqlite3DbFree(db, *pz);
+ *pz = sqlite3DbStrDup(db, zNew);
+}
+
+/*
+** Call this routine to record the fact that an OOM (out-of-memory) error
+** has happened. This routine will set db->mallocFailed, and also
+** temporarily disable the lookaside memory allocator and interrupt
+** any running VDBEs.
+*/
+SQLITE_PRIVATE void sqlite3OomFault(sqlite3 *db){
+ if( db->mallocFailed==0 && db->bBenignMalloc==0 ){
+ db->mallocFailed = 1;
+ if( db->nVdbeExec>0 ){
+ AtomicStore(&db->u1.isInterrupted, 1);
+ }
+ DisableLookaside;
+ if( db->pParse ){
+ db->pParse->rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+}
+
+/*
+** This routine reactivates the memory allocator and clears the
+** db->mallocFailed flag as necessary.
+**
+** The memory allocator is not restarted if there are running
+** VDBEs.
+*/
+SQLITE_PRIVATE void sqlite3OomClear(sqlite3 *db){
+ if( db->mallocFailed && db->nVdbeExec==0 ){
+ db->mallocFailed = 0;
+ AtomicStore(&db->u1.isInterrupted, 0);
+ assert( db->lookaside.bDisable>0 );
+ EnableLookaside;
+ }
+}
+
+/*
+** Take actions at the end of an API call to indicate an OOM error
+*/
+static SQLITE_NOINLINE int apiOomError(sqlite3 *db){
+ sqlite3OomClear(db);
+ sqlite3Error(db, SQLITE_NOMEM);
+ return SQLITE_NOMEM_BKPT;
+}
+
+/*
+** This function must be called before exiting any API function (i.e.
+** returning control to the user) that has called sqlite3_malloc or
+** sqlite3_realloc.
+**
+** The returned value is normally a copy of the second argument to this
+** function. However, if a malloc() failure has occurred since the previous
+** invocation SQLITE_NOMEM is returned instead.
+**
+** If an OOM as occurred, then the connection error-code (the value
+** returned by sqlite3_errcode()) is set to SQLITE_NOMEM.
+*/
+SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){
+ /* If the db handle must hold the connection handle mutex here.
+ ** Otherwise the read (and possible write) of db->mallocFailed
+ ** is unsafe, as is the call to sqlite3Error().
+ */
+ assert( db!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){
+ return apiOomError(db);
+ }
+ return rc & db->errMask;
+}
+
+/************** End of malloc.c **********************************************/
+/************** Begin file printf.c ******************************************/
+/*
+** The "printf" code that follows dates from the 1980's. It is in
+** the public domain.
+**
+**************************************************************************
+**
+** This file contains code for a set of "printf"-like routines. These
+** routines format strings much like the printf() from the standard C
+** library, though the implementation here has enhancements to support
+** SQLite.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** Conversion types fall into various categories as defined by the
+** following enumeration.
+*/
+#define etRADIX 0 /* non-decimal integer types. %x %o */
+#define etFLOAT 1 /* Floating point. %f */
+#define etEXP 2 /* Exponentional notation. %e and %E */
+#define etGENERIC 3 /* Floating or exponential, depending on exponent. %g */
+#define etSIZE 4 /* Return number of characters processed so far. %n */
+#define etSTRING 5 /* Strings. %s */
+#define etDYNSTRING 6 /* Dynamically allocated strings. %z */
+#define etPERCENT 7 /* Percent symbol. %% */
+#define etCHARX 8 /* Characters. %c */
+/* The rest are extensions, not normally found in printf() */
+#define etSQLESCAPE 9 /* Strings with '\'' doubled. %q */
+#define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '',
+ NULL pointers replaced by SQL NULL. %Q */
+#define etTOKEN 11 /* a pointer to a Token structure */
+#define etSRCLIST 12 /* a pointer to a SrcList */
+#define etPOINTER 13 /* The %p conversion */
+#define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */
+#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */
+#define etDECIMAL 16 /* %d or %u, but not %x, %o */
+
+#define etINVALID 17 /* Any unrecognized conversion type */
+
+
+/*
+** An "etByte" is an 8-bit unsigned value.
+*/
+typedef unsigned char etByte;
+
+/*
+** Each builtin conversion character (ex: the 'd' in "%d") is described
+** by an instance of the following structure
+*/
+typedef struct et_info { /* Information about each format field */
+ char fmttype; /* The format field code letter */
+ etByte base; /* The base for radix conversion */
+ etByte flags; /* One or more of FLAG_ constants below */
+ etByte type; /* Conversion paradigm */
+ etByte charset; /* Offset into aDigits[] of the digits string */
+ etByte prefix; /* Offset into aPrefix[] of the prefix string */
+} et_info;
+
+/*
+** Allowed values for et_info.flags
+*/
+#define FLAG_SIGNED 1 /* True if the value to convert is signed */
+#define FLAG_STRING 4 /* Allow infinite precision */
+
+
+/*
+** The following table is searched linearly, so it is good to put the
+** most frequently used conversion types first.
+*/
+static const char aDigits[] = "0123456789ABCDEF0123456789abcdef";
+static const char aPrefix[] = "-x0\000X0";
+static const et_info fmtinfo[] = {
+ { 'd', 10, 1, etDECIMAL, 0, 0 },
+ { 's', 0, 4, etSTRING, 0, 0 },
+ { 'g', 0, 1, etGENERIC, 30, 0 },
+ { 'z', 0, 4, etDYNSTRING, 0, 0 },
+ { 'q', 0, 4, etSQLESCAPE, 0, 0 },
+ { 'Q', 0, 4, etSQLESCAPE2, 0, 0 },
+ { 'w', 0, 4, etSQLESCAPE3, 0, 0 },
+ { 'c', 0, 0, etCHARX, 0, 0 },
+ { 'o', 8, 0, etRADIX, 0, 2 },
+ { 'u', 10, 0, etDECIMAL, 0, 0 },
+ { 'x', 16, 0, etRADIX, 16, 1 },
+ { 'X', 16, 0, etRADIX, 0, 4 },
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ { 'f', 0, 1, etFLOAT, 0, 0 },
+ { 'e', 0, 1, etEXP, 30, 0 },
+ { 'E', 0, 1, etEXP, 14, 0 },
+ { 'G', 0, 1, etGENERIC, 14, 0 },
+#endif
+ { 'i', 10, 1, etDECIMAL, 0, 0 },
+ { 'n', 0, 0, etSIZE, 0, 0 },
+ { '%', 0, 0, etPERCENT, 0, 0 },
+ { 'p', 16, 0, etPOINTER, 0, 1 },
+
+ /* All the rest are undocumented and are for internal use only */
+ { 'T', 0, 0, etTOKEN, 0, 0 },
+ { 'S', 0, 0, etSRCLIST, 0, 0 },
+ { 'r', 10, 1, etORDINAL, 0, 0 },
+};
+
+/* Floating point constants used for rounding */
+static const double arRound[] = {
+ 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05,
+ 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10,
+};
+
+/*
+** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point
+** conversions will work.
+*/
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/*
+** "*val" is a double such that 0.1 <= *val < 10.0
+** Return the ascii code for the leading digit of *val, then
+** multiply "*val" by 10.0 to renormalize.
+**
+** Example:
+** input: *val = 3.14159
+** output: *val = 1.4159 function return = '3'
+**
+** The counter *cnt is incremented each time. After counter exceeds
+** 16 (the number of significant digits in a 64-bit float) '0' is
+** always returned.
+*/
+static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
+ int digit;
+ LONGDOUBLE_TYPE d;
+ if( (*cnt)<=0 ) return '0';
+ (*cnt)--;
+ digit = (int)*val;
+ d = digit;
+ digit += '0';
+ *val = (*val - d)*10.0;
+ return (char)digit;
+}
+#endif /* SQLITE_OMIT_FLOATING_POINT */
+
+/*
+** Set the StrAccum object to an error mode.
+*/
+static void setStrAccumError(StrAccum *p, u8 eError){
+ assert( eError==SQLITE_NOMEM || eError==SQLITE_TOOBIG );
+ p->accError = eError;
+ if( p->mxAlloc ) sqlite3_str_reset(p);
+ if( eError==SQLITE_TOOBIG ) sqlite3ErrorToParser(p->db, eError);
+}
+
+/*
+** Extra argument values from a PrintfArguments object
+*/
+static sqlite3_int64 getIntArg(PrintfArguments *p){
+ if( p->nArg<=p->nUsed ) return 0;
+ return sqlite3_value_int64(p->apArg[p->nUsed++]);
+}
+static double getDoubleArg(PrintfArguments *p){
+ if( p->nArg<=p->nUsed ) return 0.0;
+ return sqlite3_value_double(p->apArg[p->nUsed++]);
+}
+static char *getTextArg(PrintfArguments *p){
+ if( p->nArg<=p->nUsed ) return 0;
+ return (char*)sqlite3_value_text(p->apArg[p->nUsed++]);
+}
+
+/*
+** Allocate memory for a temporary buffer needed for printf rendering.
+**
+** If the requested size of the temp buffer is larger than the size
+** of the output buffer in pAccum, then cause an SQLITE_TOOBIG error.
+** Do the size check before the memory allocation to prevent rogue
+** SQL from requesting large allocations using the precision or width
+** field of the printf() function.
+*/
+static char *printfTempBuf(sqlite3_str *pAccum, sqlite3_int64 n){
+ char *z;
+ if( pAccum->accError ) return 0;
+ if( n>pAccum->nAlloc && n>pAccum->mxAlloc ){
+ setStrAccumError(pAccum, SQLITE_TOOBIG);
+ return 0;
+ }
+ z = sqlite3DbMallocRaw(pAccum->db, n);
+ if( z==0 ){
+ setStrAccumError(pAccum, SQLITE_NOMEM);
+ }
+ return z;
+}
+
+/*
+** On machines with a small stack size, you can redefine the
+** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired.
+*/
+#ifndef SQLITE_PRINT_BUF_SIZE
+# define SQLITE_PRINT_BUF_SIZE 70
+#endif
+#define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */
+
+/*
+** Hard limit on the precision of floating-point conversions.
+*/
+#ifndef SQLITE_PRINTF_PRECISION_LIMIT
+# define SQLITE_FP_PRECISION_LIMIT 100000000
+#endif
+
+/*
+** Render a string given by "fmt" into the StrAccum object.
+*/
+SQLITE_API void sqlite3_str_vappendf(
+ sqlite3_str *pAccum, /* Accumulate results here */
+ const char *fmt, /* Format string */
+ va_list ap /* arguments */
+){
+ int c; /* Next character in the format string */
+ char *bufpt; /* Pointer to the conversion buffer */
+ int precision; /* Precision of the current field */
+ int length; /* Length of the field */
+ int idx; /* A general purpose loop counter */
+ int width; /* Width of the current field */
+ etByte flag_leftjustify; /* True if "-" flag is present */
+ etByte flag_prefix; /* '+' or ' ' or 0 for prefix */
+ etByte flag_alternateform; /* True if "#" flag is present */
+ etByte flag_altform2; /* True if "!" flag is present */
+ etByte flag_zeropad; /* True if field width constant starts with zero */
+ etByte flag_long; /* 1 for the "l" flag, 2 for "ll", 0 by default */
+ etByte done; /* Loop termination flag */
+ etByte cThousand; /* Thousands separator for %d and %u */
+ etByte xtype = etINVALID; /* Conversion paradigm */
+ u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
+ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
+ sqlite_uint64 longvalue; /* Value for integer types */
+ LONGDOUBLE_TYPE realvalue; /* Value for real types */
+ const et_info *infop; /* Pointer to the appropriate info structure */
+ char *zOut; /* Rendering buffer */
+ int nOut; /* Size of the rendering buffer */
+ char *zExtra = 0; /* Malloced memory used by some conversion */
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ int exp, e2; /* exponent of real numbers */
+ int nsd; /* Number of significant digits returned */
+ double rounder; /* Used for rounding floating point values */
+ etByte flag_dp; /* True if decimal point should be shown */
+ etByte flag_rtz; /* True if trailing zeros should be removed */
+#endif
+ PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */
+ char buf[etBUFSIZE]; /* Conversion buffer */
+
+ /* pAccum never starts out with an empty buffer that was obtained from
+ ** malloc(). This precondition is required by the mprintf("%z...")
+ ** optimization. */
+ assert( pAccum->nChar>0 || (pAccum->printfFlags&SQLITE_PRINTF_MALLOCED)==0 );
+
+ bufpt = 0;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC)!=0 ){
+ pArgList = va_arg(ap, PrintfArguments*);
+ bArgList = 1;
+ }else{
+ bArgList = 0;
+ }
+ for(; (c=(*fmt))!=0; ++fmt){
+ if( c!='%' ){
+ bufpt = (char *)fmt;
+#if HAVE_STRCHRNUL
+ fmt = strchrnul(fmt, '%');
+#else
+ do{ fmt++; }while( *fmt && *fmt != '%' );
+#endif
+ sqlite3_str_append(pAccum, bufpt, (int)(fmt - bufpt));
+ if( *fmt==0 ) break;
+ }
+ if( (c=(*++fmt))==0 ){
+ sqlite3_str_append(pAccum, "%", 1);
+ break;
+ }
+ /* Find out what flags are present */
+ flag_leftjustify = flag_prefix = cThousand =
+ flag_alternateform = flag_altform2 = flag_zeropad = 0;
+ done = 0;
+ width = 0;
+ flag_long = 0;
+ precision = -1;
+ do{
+ switch( c ){
+ case '-': flag_leftjustify = 1; break;
+ case '+': flag_prefix = '+'; break;
+ case ' ': flag_prefix = ' '; break;
+ case '#': flag_alternateform = 1; break;
+ case '!': flag_altform2 = 1; break;
+ case '0': flag_zeropad = 1; break;
+ case ',': cThousand = ','; break;
+ default: done = 1; break;
+ case 'l': {
+ flag_long = 1;
+ c = *++fmt;
+ if( c=='l' ){
+ c = *++fmt;
+ flag_long = 2;
+ }
+ done = 1;
+ break;
+ }
+ case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9': {
+ unsigned wx = c - '0';
+ while( (c = *++fmt)>='0' && c<='9' ){
+ wx = wx*10 + c - '0';
+ }
+ testcase( wx>0x7fffffff );
+ width = wx & 0x7fffffff;
+#ifdef SQLITE_PRINTF_PRECISION_LIMIT
+ if( width>SQLITE_PRINTF_PRECISION_LIMIT ){
+ width = SQLITE_PRINTF_PRECISION_LIMIT;
+ }
+#endif
+ if( c!='.' && c!='l' ){
+ done = 1;
+ }else{
+ fmt--;
+ }
+ break;
+ }
+ case '*': {
+ if( bArgList ){
+ width = (int)getIntArg(pArgList);
+ }else{
+ width = va_arg(ap,int);
+ }
+ if( width<0 ){
+ flag_leftjustify = 1;
+ width = width >= -2147483647 ? -width : 0;
+ }
+#ifdef SQLITE_PRINTF_PRECISION_LIMIT
+ if( width>SQLITE_PRINTF_PRECISION_LIMIT ){
+ width = SQLITE_PRINTF_PRECISION_LIMIT;
+ }
+#endif
+ if( (c = fmt[1])!='.' && c!='l' ){
+ c = *++fmt;
+ done = 1;
+ }
+ break;
+ }
+ case '.': {
+ c = *++fmt;
+ if( c=='*' ){
+ if( bArgList ){
+ precision = (int)getIntArg(pArgList);
+ }else{
+ precision = va_arg(ap,int);
+ }
+ if( precision<0 ){
+ precision = precision >= -2147483647 ? -precision : -1;
+ }
+ c = *++fmt;
+ }else{
+ unsigned px = 0;
+ while( c>='0' && c<='9' ){
+ px = px*10 + c - '0';
+ c = *++fmt;
+ }
+ testcase( px>0x7fffffff );
+ precision = px & 0x7fffffff;
+ }
+#ifdef SQLITE_PRINTF_PRECISION_LIMIT
+ if( precision>SQLITE_PRINTF_PRECISION_LIMIT ){
+ precision = SQLITE_PRINTF_PRECISION_LIMIT;
+ }
+#endif
+ if( c=='l' ){
+ --fmt;
+ }else{
+ done = 1;
+ }
+ break;
+ }
+ }
+ }while( !done && (c=(*++fmt))!=0 );
+
+ /* Fetch the info entry for the field */
+ infop = &fmtinfo[0];
+ xtype = etINVALID;
+ for(idx=0; idx<ArraySize(fmtinfo); idx++){
+ if( c==fmtinfo[idx].fmttype ){
+ infop = &fmtinfo[idx];
+ xtype = infop->type;
+ break;
+ }
+ }
+
+ /*
+ ** At this point, variables are initialized as follows:
+ **
+ ** flag_alternateform TRUE if a '#' is present.
+ ** flag_altform2 TRUE if a '!' is present.
+ ** flag_prefix '+' or ' ' or zero
+ ** flag_leftjustify TRUE if a '-' is present or if the
+ ** field width was negative.
+ ** flag_zeropad TRUE if the width began with 0.
+ ** flag_long 1 for "l", 2 for "ll"
+ ** width The specified field width. This is
+ ** always non-negative. Zero is the default.
+ ** precision The specified precision. The default
+ ** is -1.
+ ** xtype The class of the conversion.
+ ** infop Pointer to the appropriate info struct.
+ */
+ assert( width>=0 );
+ assert( precision>=(-1) );
+ switch( xtype ){
+ case etPOINTER:
+ flag_long = sizeof(char*)==sizeof(i64) ? 2 :
+ sizeof(char*)==sizeof(long int) ? 1 : 0;
+ /* no break */ deliberate_fall_through
+ case etORDINAL:
+ case etRADIX:
+ cThousand = 0;
+ /* no break */ deliberate_fall_through
+ case etDECIMAL:
+ if( infop->flags & FLAG_SIGNED ){
+ i64 v;
+ if( bArgList ){
+ v = getIntArg(pArgList);
+ }else if( flag_long ){
+ if( flag_long==2 ){
+ v = va_arg(ap,i64) ;
+ }else{
+ v = va_arg(ap,long int);
+ }
+ }else{
+ v = va_arg(ap,int);
+ }
+ if( v<0 ){
+ if( v==SMALLEST_INT64 ){
+ longvalue = ((u64)1)<<63;
+ }else{
+ longvalue = -v;
+ }
+ prefix = '-';
+ }else{
+ longvalue = v;
+ prefix = flag_prefix;
+ }
+ }else{
+ if( bArgList ){
+ longvalue = (u64)getIntArg(pArgList);
+ }else if( flag_long ){
+ if( flag_long==2 ){
+ longvalue = va_arg(ap,u64);
+ }else{
+ longvalue = va_arg(ap,unsigned long int);
+ }
+ }else{
+ longvalue = va_arg(ap,unsigned int);
+ }
+ prefix = 0;
+ }
+ if( longvalue==0 ) flag_alternateform = 0;
+ if( flag_zeropad && precision<width-(prefix!=0) ){
+ precision = width-(prefix!=0);
+ }
+ if( precision<etBUFSIZE-10-etBUFSIZE/3 ){
+ nOut = etBUFSIZE;
+ zOut = buf;
+ }else{
+ u64 n;
+ n = (u64)precision + 10;
+ if( cThousand ) n += precision/3;
+ zOut = zExtra = printfTempBuf(pAccum, n);
+ if( zOut==0 ) return;
+ nOut = (int)n;
+ }
+ bufpt = &zOut[nOut-1];
+ if( xtype==etORDINAL ){
+ static const char zOrd[] = "thstndrd";
+ int x = (int)(longvalue % 10);
+ if( x>=4 || (longvalue/10)%10==1 ){
+ x = 0;
+ }
+ *(--bufpt) = zOrd[x*2+1];
+ *(--bufpt) = zOrd[x*2];
+ }
+ {
+ const char *cset = &aDigits[infop->charset];
+ u8 base = infop->base;
+ do{ /* Convert to ascii */
+ *(--bufpt) = cset[longvalue%base];
+ longvalue = longvalue/base;
+ }while( longvalue>0 );
+ }
+ length = (int)(&zOut[nOut-1]-bufpt);
+ while( precision>length ){
+ *(--bufpt) = '0'; /* Zero pad */
+ length++;
+ }
+ if( cThousand ){
+ int nn = (length - 1)/3; /* Number of "," to insert */
+ int ix = (length - 1)%3 + 1;
+ bufpt -= nn;
+ for(idx=0; nn>0; idx++){
+ bufpt[idx] = bufpt[idx+nn];
+ ix--;
+ if( ix==0 ){
+ bufpt[++idx] = cThousand;
+ nn--;
+ ix = 3;
+ }
+ }
+ }
+ if( prefix ) *(--bufpt) = prefix; /* Add sign */
+ if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */
+ const char *pre;
+ char x;
+ pre = &aPrefix[infop->prefix];
+ for(; (x=(*pre))!=0; pre++) *(--bufpt) = x;
+ }
+ length = (int)(&zOut[nOut-1]-bufpt);
+ break;
+ case etFLOAT:
+ case etEXP:
+ case etGENERIC:
+ if( bArgList ){
+ realvalue = getDoubleArg(pArgList);
+ }else{
+ realvalue = va_arg(ap,double);
+ }
+#ifdef SQLITE_OMIT_FLOATING_POINT
+ length = 0;
+#else
+ if( precision<0 ) precision = 6; /* Set default precision */
+#ifdef SQLITE_FP_PRECISION_LIMIT
+ if( precision>SQLITE_FP_PRECISION_LIMIT ){
+ precision = SQLITE_FP_PRECISION_LIMIT;
+ }
+#endif
+ if( realvalue<0.0 ){
+ realvalue = -realvalue;
+ prefix = '-';
+ }else{
+ prefix = flag_prefix;
+ }
+ if( xtype==etGENERIC && precision>0 ) precision--;
+ testcase( precision>0xfff );
+ idx = precision & 0xfff;
+ rounder = arRound[idx%10];
+ while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; }
+ if( xtype==etFLOAT ){
+ double rx = (double)realvalue;
+ sqlite3_uint64 u;
+ int ex;
+ memcpy(&u, &rx, sizeof(u));
+ ex = -1023 + (int)((u>>52)&0x7ff);
+ if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16;
+ realvalue += rounder;
+ }
+ /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */
+ exp = 0;
+ if( sqlite3IsNaN((double)realvalue) ){
+ bufpt = "NaN";
+ length = 3;
+ break;
+ }
+ if( realvalue>0.0 ){
+ LONGDOUBLE_TYPE scale = 1.0;
+ while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;}
+ while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; }
+ while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; }
+ realvalue /= scale;
+ while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; }
+ while( realvalue<1.0 ){ realvalue *= 10.0; exp--; }
+ if( exp>350 ){
+ bufpt = buf;
+ buf[0] = prefix;
+ memcpy(buf+(prefix!=0),"Inf",4);
+ length = 3+(prefix!=0);
+ break;
+ }
+ }
+ bufpt = buf;
+ /*
+ ** If the field type is etGENERIC, then convert to either etEXP
+ ** or etFLOAT, as appropriate.
+ */
+ if( xtype!=etFLOAT ){
+ realvalue += rounder;
+ if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; }
+ }
+ if( xtype==etGENERIC ){
+ flag_rtz = !flag_alternateform;
+ if( exp<-4 || exp>precision ){
+ xtype = etEXP;
+ }else{
+ precision = precision - exp;
+ xtype = etFLOAT;
+ }
+ }else{
+ flag_rtz = flag_altform2;
+ }
+ if( xtype==etEXP ){
+ e2 = 0;
+ }else{
+ e2 = exp;
+ }
+ {
+ i64 szBufNeeded; /* Size of a temporary buffer needed */
+ szBufNeeded = MAX(e2,0)+(i64)precision+(i64)width+15;
+ if( szBufNeeded > etBUFSIZE ){
+ bufpt = zExtra = printfTempBuf(pAccum, szBufNeeded);
+ if( bufpt==0 ) return;
+ }
+ }
+ zOut = bufpt;
+ nsd = 16 + flag_altform2*10;
+ flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2;
+ /* The sign in front of the number */
+ if( prefix ){
+ *(bufpt++) = prefix;
+ }
+ /* Digits prior to the decimal point */
+ if( e2<0 ){
+ *(bufpt++) = '0';
+ }else{
+ for(; e2>=0; e2--){
+ *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ }
+ }
+ /* The decimal point */
+ if( flag_dp ){
+ *(bufpt++) = '.';
+ }
+ /* "0" digits after the decimal point but before the first
+ ** significant digit of the number */
+ for(e2++; e2<0; precision--, e2++){
+ assert( precision>0 );
+ *(bufpt++) = '0';
+ }
+ /* Significant digits after the decimal point */
+ while( (precision--)>0 ){
+ *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ }
+ /* Remove trailing zeros and the "." if no digits follow the "." */
+ if( flag_rtz && flag_dp ){
+ while( bufpt[-1]=='0' ) *(--bufpt) = 0;
+ assert( bufpt>zOut );
+ if( bufpt[-1]=='.' ){
+ if( flag_altform2 ){
+ *(bufpt++) = '0';
+ }else{
+ *(--bufpt) = 0;
+ }
+ }
+ }
+ /* Add the "eNNN" suffix */
+ if( xtype==etEXP ){
+ *(bufpt++) = aDigits[infop->charset];
+ if( exp<0 ){
+ *(bufpt++) = '-'; exp = -exp;
+ }else{
+ *(bufpt++) = '+';
+ }
+ if( exp>=100 ){
+ *(bufpt++) = (char)((exp/100)+'0'); /* 100's digit */
+ exp %= 100;
+ }
+ *(bufpt++) = (char)(exp/10+'0'); /* 10's digit */
+ *(bufpt++) = (char)(exp%10+'0'); /* 1's digit */
+ }
+ *bufpt = 0;
+
+ /* The converted number is in buf[] and zero terminated. Output it.
+ ** Note that the number is in the usual order, not reversed as with
+ ** integer conversions. */
+ length = (int)(bufpt-zOut);
+ bufpt = zOut;
+
+ /* Special case: Add leading zeros if the flag_zeropad flag is
+ ** set and we are not left justified */
+ if( flag_zeropad && !flag_leftjustify && length < width){
+ int i;
+ int nPad = width - length;
+ for(i=width; i>=nPad; i--){
+ bufpt[i] = bufpt[i-nPad];
+ }
+ i = prefix!=0;
+ while( nPad-- ) bufpt[i++] = '0';
+ length = width;
+ }
+#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */
+ break;
+ case etSIZE:
+ if( !bArgList ){
+ *(va_arg(ap,int*)) = pAccum->nChar;
+ }
+ length = width = 0;
+ break;
+ case etPERCENT:
+ buf[0] = '%';
+ bufpt = buf;
+ length = 1;
+ break;
+ case etCHARX:
+ if( bArgList ){
+ bufpt = getTextArg(pArgList);
+ length = 1;
+ if( bufpt ){
+ buf[0] = c = *(bufpt++);
+ if( (c&0xc0)==0xc0 ){
+ while( length<4 && (bufpt[0]&0xc0)==0x80 ){
+ buf[length++] = *(bufpt++);
+ }
+ }
+ }else{
+ buf[0] = 0;
+ }
+ }else{
+ unsigned int ch = va_arg(ap,unsigned int);
+ if( ch<0x00080 ){
+ buf[0] = ch & 0xff;
+ length = 1;
+ }else if( ch<0x00800 ){
+ buf[0] = 0xc0 + (u8)((ch>>6)&0x1f);
+ buf[1] = 0x80 + (u8)(ch & 0x3f);
+ length = 2;
+ }else if( ch<0x10000 ){
+ buf[0] = 0xe0 + (u8)((ch>>12)&0x0f);
+ buf[1] = 0x80 + (u8)((ch>>6) & 0x3f);
+ buf[2] = 0x80 + (u8)(ch & 0x3f);
+ length = 3;
+ }else{
+ buf[0] = 0xf0 + (u8)((ch>>18) & 0x07);
+ buf[1] = 0x80 + (u8)((ch>>12) & 0x3f);
+ buf[2] = 0x80 + (u8)((ch>>6) & 0x3f);
+ buf[3] = 0x80 + (u8)(ch & 0x3f);
+ length = 4;
+ }
+ }
+ if( precision>1 ){
+ width -= precision-1;
+ if( width>1 && !flag_leftjustify ){
+ sqlite3_str_appendchar(pAccum, width-1, ' ');
+ width = 0;
+ }
+ while( precision-- > 1 ){
+ sqlite3_str_append(pAccum, buf, length);
+ }
+ }
+ bufpt = buf;
+ flag_altform2 = 1;
+ goto adjust_width_for_utf8;
+ case etSTRING:
+ case etDYNSTRING:
+ if( bArgList ){
+ bufpt = getTextArg(pArgList);
+ xtype = etSTRING;
+ }else{
+ bufpt = va_arg(ap,char*);
+ }
+ if( bufpt==0 ){
+ bufpt = "";
+ }else if( xtype==etDYNSTRING ){
+ if( pAccum->nChar==0
+ && pAccum->mxAlloc
+ && width==0
+ && precision<0
+ && pAccum->accError==0
+ ){
+ /* Special optimization for sqlite3_mprintf("%z..."):
+ ** Extend an existing memory allocation rather than creating
+ ** a new one. */
+ assert( (pAccum->printfFlags&SQLITE_PRINTF_MALLOCED)==0 );
+ pAccum->zText = bufpt;
+ pAccum->nAlloc = sqlite3DbMallocSize(pAccum->db, bufpt);
+ pAccum->nChar = 0x7fffffff & (int)strlen(bufpt);
+ pAccum->printfFlags |= SQLITE_PRINTF_MALLOCED;
+ length = 0;
+ break;
+ }
+ zExtra = bufpt;
+ }
+ if( precision>=0 ){
+ if( flag_altform2 ){
+ /* Set length to the number of bytes needed in order to display
+ ** precision characters */
+ unsigned char *z = (unsigned char*)bufpt;
+ while( precision-- > 0 && z[0] ){
+ SQLITE_SKIP_UTF8(z);
+ }
+ length = (int)(z - (unsigned char*)bufpt);
+ }else{
+ for(length=0; length<precision && bufpt[length]; length++){}
+ }
+ }else{
+ length = 0x7fffffff & (int)strlen(bufpt);
+ }
+ adjust_width_for_utf8:
+ if( flag_altform2 && width>0 ){
+ /* Adjust width to account for extra bytes in UTF-8 characters */
+ int ii = length - 1;
+ while( ii>=0 ) if( (bufpt[ii--] & 0xc0)==0x80 ) width++;
+ }
+ break;
+ case etSQLESCAPE: /* %q: Escape ' characters */
+ case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */
+ case etSQLESCAPE3: { /* %w: Escape " characters */
+ int i, j, k, n, isnull;
+ int needQuote;
+ char ch;
+ char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */
+ char *escarg;
+
+ if( bArgList ){
+ escarg = getTextArg(pArgList);
+ }else{
+ escarg = va_arg(ap,char*);
+ }
+ isnull = escarg==0;
+ if( isnull ) escarg = (xtype==etSQLESCAPE2 ? "NULL" : "(NULL)");
+ /* For %q, %Q, and %w, the precision is the number of bytes (or
+ ** characters if the ! flags is present) to use from the input.
+ ** Because of the extra quoting characters inserted, the number
+ ** of output characters may be larger than the precision.
+ */
+ k = precision;
+ for(i=n=0; k!=0 && (ch=escarg[i])!=0; i++, k--){
+ if( ch==q ) n++;
+ if( flag_altform2 && (ch&0xc0)==0xc0 ){
+ while( (escarg[i+1]&0xc0)==0x80 ){ i++; }
+ }
+ }
+ needQuote = !isnull && xtype==etSQLESCAPE2;
+ n += i + 3;
+ if( n>etBUFSIZE ){
+ bufpt = zExtra = printfTempBuf(pAccum, n);
+ if( bufpt==0 ) return;
+ }else{
+ bufpt = buf;
+ }
+ j = 0;
+ if( needQuote ) bufpt[j++] = q;
+ k = i;
+ for(i=0; i<k; i++){
+ bufpt[j++] = ch = escarg[i];
+ if( ch==q ) bufpt[j++] = ch;
+ }
+ if( needQuote ) bufpt[j++] = q;
+ bufpt[j] = 0;
+ length = j;
+ goto adjust_width_for_utf8;
+ }
+ case etTOKEN: {
+ Token *pToken;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return;
+ pToken = va_arg(ap, Token*);
+ assert( bArgList==0 );
+ if( pToken && pToken->n ){
+ sqlite3_str_append(pAccum, (const char*)pToken->z, pToken->n);
+ }
+ length = width = 0;
+ break;
+ }
+ case etSRCLIST: {
+ SrcList *pSrc;
+ int k;
+ struct SrcList_item *pItem;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return;
+ pSrc = va_arg(ap, SrcList*);
+ k = va_arg(ap, int);
+ pItem = &pSrc->a[k];
+ assert( bArgList==0 );
+ assert( k>=0 && k<pSrc->nSrc );
+ if( pItem->zDatabase ){
+ sqlite3_str_appendall(pAccum, pItem->zDatabase);
+ sqlite3_str_append(pAccum, ".", 1);
+ }
+ sqlite3_str_appendall(pAccum, pItem->zName);
+ length = width = 0;
+ break;
+ }
+ default: {
+ assert( xtype==etINVALID );
+ return;
+ }
+ }/* End switch over the format type */
+ /*
+ ** The text of the conversion is pointed to by "bufpt" and is
+ ** "length" characters long. The field width is "width". Do
+ ** the output. Both length and width are in bytes, not characters,
+ ** at this point. If the "!" flag was present on string conversions
+ ** indicating that width and precision should be expressed in characters,
+ ** then the values have been translated prior to reaching this point.
+ */
+ width -= length;
+ if( width>0 ){
+ if( !flag_leftjustify ) sqlite3_str_appendchar(pAccum, width, ' ');
+ sqlite3_str_append(pAccum, bufpt, length);
+ if( flag_leftjustify ) sqlite3_str_appendchar(pAccum, width, ' ');
+ }else{
+ sqlite3_str_append(pAccum, bufpt, length);
+ }
+
+ if( zExtra ){
+ sqlite3DbFree(pAccum->db, zExtra);
+ zExtra = 0;
+ }
+ }/* End for loop over the format string */
+} /* End of function */
+
+/*
+** Enlarge the memory allocation on a StrAccum object so that it is
+** able to accept at least N more bytes of text.
+**
+** Return the number of bytes of text that StrAccum is able to accept
+** after the attempted enlargement. The value returned might be zero.
+*/
+static int sqlite3StrAccumEnlarge(StrAccum *p, int N){
+ char *zNew;
+ assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */
+ if( p->accError ){
+ testcase(p->accError==SQLITE_TOOBIG);
+ testcase(p->accError==SQLITE_NOMEM);
+ return 0;
+ }
+ if( p->mxAlloc==0 ){
+ setStrAccumError(p, SQLITE_TOOBIG);
+ return p->nAlloc - p->nChar - 1;
+ }else{
+ char *zOld = isMalloced(p) ? p->zText : 0;
+ i64 szNew = p->nChar;
+ szNew += N + 1;
+ if( szNew+p->nChar<=p->mxAlloc ){
+ /* Force exponential buffer size growth as long as it does not overflow,
+ ** to avoid having to call this routine too often */
+ szNew += p->nChar;
+ }
+ if( szNew > p->mxAlloc ){
+ sqlite3_str_reset(p);
+ setStrAccumError(p, SQLITE_TOOBIG);
+ return 0;
+ }else{
+ p->nAlloc = (int)szNew;
+ }
+ if( p->db ){
+ zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc);
+ }else{
+ zNew = sqlite3Realloc(zOld, p->nAlloc);
+ }
+ if( zNew ){
+ assert( p->zText!=0 || p->nChar==0 );
+ if( !isMalloced(p) && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar);
+ p->zText = zNew;
+ p->nAlloc = sqlite3DbMallocSize(p->db, zNew);
+ p->printfFlags |= SQLITE_PRINTF_MALLOCED;
+ }else{
+ sqlite3_str_reset(p);
+ setStrAccumError(p, SQLITE_NOMEM);
+ return 0;
+ }
+ }
+ return N;
+}
+
+/*
+** Append N copies of character c to the given string buffer.
+*/
+SQLITE_API void sqlite3_str_appendchar(sqlite3_str *p, int N, char c){
+ testcase( p->nChar + (i64)N > 0x7fffffff );
+ if( p->nChar+(i64)N >= p->nAlloc && (N = sqlite3StrAccumEnlarge(p, N))<=0 ){
+ return;
+ }
+ while( (N--)>0 ) p->zText[p->nChar++] = c;
+}
+
+/*
+** The StrAccum "p" is not large enough to accept N new bytes of z[].
+** So enlarge if first, then do the append.
+**
+** This is a helper routine to sqlite3_str_append() that does special-case
+** work (enlarging the buffer) using tail recursion, so that the
+** sqlite3_str_append() routine can use fast calling semantics.
+*/
+static void SQLITE_NOINLINE enlargeAndAppend(StrAccum *p, const char *z, int N){
+ N = sqlite3StrAccumEnlarge(p, N);
+ if( N>0 ){
+ memcpy(&p->zText[p->nChar], z, N);
+ p->nChar += N;
+ }
+}
+
+/*
+** Append N bytes of text from z to the StrAccum object. Increase the
+** size of the memory allocation for StrAccum if necessary.
+*/
+SQLITE_API void sqlite3_str_append(sqlite3_str *p, const char *z, int N){
+ assert( z!=0 || N==0 );
+ assert( p->zText!=0 || p->nChar==0 || p->accError );
+ assert( N>=0 );
+ assert( p->accError==0 || p->nAlloc==0 || p->mxAlloc==0 );
+ if( p->nChar+N >= p->nAlloc ){
+ enlargeAndAppend(p,z,N);
+ }else if( N ){
+ assert( p->zText );
+ p->nChar += N;
+ memcpy(&p->zText[p->nChar-N], z, N);
+ }
+}
+
+/*
+** Append the complete text of zero-terminated string z[] to the p string.
+*/
+SQLITE_API void sqlite3_str_appendall(sqlite3_str *p, const char *z){
+ sqlite3_str_append(p, z, sqlite3Strlen30(z));
+}
+
+
+/*
+** Finish off a string by making sure it is zero-terminated.
+** Return a pointer to the resulting string. Return a NULL
+** pointer if any kind of error was encountered.
+*/
+static SQLITE_NOINLINE char *strAccumFinishRealloc(StrAccum *p){
+ char *zText;
+ assert( p->mxAlloc>0 && !isMalloced(p) );
+ zText = sqlite3DbMallocRaw(p->db, p->nChar+1 );
+ if( zText ){
+ memcpy(zText, p->zText, p->nChar+1);
+ p->printfFlags |= SQLITE_PRINTF_MALLOCED;
+ }else{
+ setStrAccumError(p, SQLITE_NOMEM);
+ }
+ p->zText = zText;
+ return zText;
+}
+SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){
+ if( p->zText ){
+ p->zText[p->nChar] = 0;
+ if( p->mxAlloc>0 && !isMalloced(p) ){
+ return strAccumFinishRealloc(p);
+ }
+ }
+ return p->zText;
+}
+
+/*
+** This singleton is an sqlite3_str object that is returned if
+** sqlite3_malloc() fails to provide space for a real one. This
+** sqlite3_str object accepts no new text and always returns
+** an SQLITE_NOMEM error.
+*/
+static sqlite3_str sqlite3OomStr = {
+ 0, 0, 0, 0, 0, SQLITE_NOMEM, 0
+};
+
+/* Finalize a string created using sqlite3_str_new().
+*/
+SQLITE_API char *sqlite3_str_finish(sqlite3_str *p){
+ char *z;
+ if( p!=0 && p!=&sqlite3OomStr ){
+ z = sqlite3StrAccumFinish(p);
+ sqlite3_free(p);
+ }else{
+ z = 0;
+ }
+ return z;
+}
+
+/* Return any error code associated with p */
+SQLITE_API int sqlite3_str_errcode(sqlite3_str *p){
+ return p ? p->accError : SQLITE_NOMEM;
+}
+
+/* Return the current length of p in bytes */
+SQLITE_API int sqlite3_str_length(sqlite3_str *p){
+ return p ? p->nChar : 0;
+}
+
+/* Return the current value for p */
+SQLITE_API char *sqlite3_str_value(sqlite3_str *p){
+ if( p==0 || p->nChar==0 ) return 0;
+ p->zText[p->nChar] = 0;
+ return p->zText;
+}
+
+/*
+** Reset an StrAccum string. Reclaim all malloced memory.
+*/
+SQLITE_API void sqlite3_str_reset(StrAccum *p){
+ if( isMalloced(p) ){
+ sqlite3DbFree(p->db, p->zText);
+ p->printfFlags &= ~SQLITE_PRINTF_MALLOCED;
+ }
+ p->nAlloc = 0;
+ p->nChar = 0;
+ p->zText = 0;
+}
+
+/*
+** Initialize a string accumulator.
+**
+** p: The accumulator to be initialized.
+** db: Pointer to a database connection. May be NULL. Lookaside
+** memory is used if not NULL. db->mallocFailed is set appropriately
+** when not NULL.
+** zBase: An initial buffer. May be NULL in which case the initial buffer
+** is malloced.
+** n: Size of zBase in bytes. If total space requirements never exceed
+** n then no memory allocations ever occur.
+** mx: Maximum number of bytes to accumulate. If mx==0 then no memory
+** allocations will ever occur.
+*/
+SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, sqlite3 *db, char *zBase, int n, int mx){
+ p->zText = zBase;
+ p->db = db;
+ p->nAlloc = n;
+ p->mxAlloc = mx;
+ p->nChar = 0;
+ p->accError = 0;
+ p->printfFlags = 0;
+}
+
+/* Allocate and initialize a new dynamic string object */
+SQLITE_API sqlite3_str *sqlite3_str_new(sqlite3 *db){
+ sqlite3_str *p = sqlite3_malloc64(sizeof(*p));
+ if( p ){
+ sqlite3StrAccumInit(p, 0, 0, 0,
+ db ? db->aLimit[SQLITE_LIMIT_LENGTH] : SQLITE_MAX_LENGTH);
+ }else{
+ p = &sqlite3OomStr;
+ }
+ return p;
+}
+
+/*
+** Print into memory obtained from sqliteMalloc(). Use the internal
+** %-conversion extensions.
+*/
+SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){
+ char *z;
+ char zBase[SQLITE_PRINT_BUF_SIZE];
+ StrAccum acc;
+ assert( db!=0 );
+ sqlite3StrAccumInit(&acc, db, zBase, sizeof(zBase),
+ db->aLimit[SQLITE_LIMIT_LENGTH]);
+ acc.printfFlags = SQLITE_PRINTF_INTERNAL;
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ z = sqlite3StrAccumFinish(&acc);
+ if( acc.accError==SQLITE_NOMEM ){
+ sqlite3OomFault(db);
+ }
+ return z;
+}
+
+/*
+** Print into memory obtained from sqliteMalloc(). Use the internal
+** %-conversion extensions.
+*/
+SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){
+ va_list ap;
+ char *z;
+ va_start(ap, zFormat);
+ z = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/*
+** Print into memory obtained from sqlite3_malloc(). Omit the internal
+** %-conversion extensions.
+*/
+SQLITE_API char *sqlite3_vmprintf(const char *zFormat, va_list ap){
+ char *z;
+ char zBase[SQLITE_PRINT_BUF_SIZE];
+ StrAccum acc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( zFormat==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ sqlite3StrAccumInit(&acc, 0, zBase, sizeof(zBase), SQLITE_MAX_LENGTH);
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ z = sqlite3StrAccumFinish(&acc);
+ return z;
+}
+
+/*
+** Print into memory obtained from sqlite3_malloc()(). Omit the internal
+** %-conversion extensions.
+*/
+SQLITE_API char *sqlite3_mprintf(const char *zFormat, ...){
+ va_list ap;
+ char *z;
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ va_start(ap, zFormat);
+ z = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/*
+** sqlite3_snprintf() works like snprintf() except that it ignores the
+** current locale settings. This is important for SQLite because we
+** are not able to use a "," as the decimal point in place of "." as
+** specified by some locales.
+**
+** Oops: The first two arguments of sqlite3_snprintf() are backwards
+** from the snprintf() standard. Unfortunately, it is too late to change
+** this without breaking compatibility, so we just have to live with the
+** mistake.
+**
+** sqlite3_vsnprintf() is the varargs version.
+*/
+SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){
+ StrAccum acc;
+ if( n<=0 ) return zBuf;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( zBuf==0 || zFormat==0 ) {
+ (void)SQLITE_MISUSE_BKPT;
+ if( zBuf ) zBuf[0] = 0;
+ return zBuf;
+ }
+#endif
+ sqlite3StrAccumInit(&acc, 0, zBuf, n, 0);
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ zBuf[acc.nChar] = 0;
+ return zBuf;
+}
+SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
+ char *z;
+ va_list ap;
+ va_start(ap,zFormat);
+ z = sqlite3_vsnprintf(n, zBuf, zFormat, ap);
+ va_end(ap);
+ return z;
+}
+
+/*
+** This is the routine that actually formats the sqlite3_log() message.
+** We house it in a separate routine from sqlite3_log() to avoid using
+** stack space on small-stack systems when logging is disabled.
+**
+** sqlite3_log() must render into a static buffer. It cannot dynamically
+** allocate memory because it might be called while the memory allocator
+** mutex is held.
+**
+** sqlite3_str_vappendf() might ask for *temporary* memory allocations for
+** certain format characters (%q) or for very large precisions or widths.
+** Care must be taken that any sqlite3_log() calls that occur while the
+** memory mutex is held do not use these mechanisms.
+*/
+static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){
+ StrAccum acc; /* String accumulator */
+ char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */
+
+ sqlite3StrAccumInit(&acc, 0, zMsg, sizeof(zMsg), 0);
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ sqlite3GlobalConfig.xLog(sqlite3GlobalConfig.pLogArg, iErrCode,
+ sqlite3StrAccumFinish(&acc));
+}
+
+/*
+** Format and write a message to the log if logging is enabled.
+*/
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...){
+ va_list ap; /* Vararg list */
+ if( sqlite3GlobalConfig.xLog ){
+ va_start(ap, zFormat);
+ renderLogMsg(iErrCode, zFormat, ap);
+ va_end(ap);
+ }
+}
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+/*
+** A version of printf() that understands %lld. Used for debugging.
+** The printf() built into some versions of windows does not understand %lld
+** and segfaults if you give it a long long int.
+*/
+SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){
+ va_list ap;
+ StrAccum acc;
+ char zBuf[SQLITE_PRINT_BUF_SIZE*10];
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ va_start(ap,zFormat);
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ va_end(ap);
+ sqlite3StrAccumFinish(&acc);
+#ifdef SQLITE_OS_TRACE_PROC
+ {
+ extern void SQLITE_OS_TRACE_PROC(const char *zBuf, int nBuf);
+ SQLITE_OS_TRACE_PROC(zBuf, sizeof(zBuf));
+ }
+#else
+ fprintf(stdout,"%s", zBuf);
+ fflush(stdout);
+#endif
+}
+#endif
+
+
+/*
+** variable-argument wrapper around sqlite3_str_vappendf(). The bFlags argument
+** can contain the bit SQLITE_PRINTF_INTERNAL enable internal formats.
+*/
+SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){
+ va_list ap;
+ va_start(ap,zFormat);
+ sqlite3_str_vappendf(p, zFormat, ap);
+ va_end(ap);
+}
+
+/************** End of printf.c **********************************************/
+/************** Begin file treeview.c ****************************************/
+/*
+** 2015-06-08
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains C code to implement the TreeView debugging routines.
+** These routines print a parse tree to standard output for debugging and
+** analysis.
+**
+** The interfaces in this file is only available when compiling
+** with SQLITE_DEBUG.
+*/
+/* #include "sqliteInt.h" */
+#ifdef SQLITE_DEBUG
+
+/*
+** Add a new subitem to the tree. The moreToFollow flag indicates that this
+** is not the last item in the tree.
+*/
+static TreeView *sqlite3TreeViewPush(TreeView *p, u8 moreToFollow){
+ if( p==0 ){
+ p = sqlite3_malloc64( sizeof(*p) );
+ if( p==0 ) return 0;
+ memset(p, 0, sizeof(*p));
+ }else{
+ p->iLevel++;
+ }
+ assert( moreToFollow==0 || moreToFollow==1 );
+ if( p->iLevel<sizeof(p->bLine) ) p->bLine[p->iLevel] = moreToFollow;
+ return p;
+}
+
+/*
+** Finished with one layer of the tree
+*/
+static void sqlite3TreeViewPop(TreeView *p){
+ if( p==0 ) return;
+ p->iLevel--;
+ if( p->iLevel<0 ) sqlite3_free(p);
+}
+
+/*
+** Generate a single line of output for the tree, with a prefix that contains
+** all the appropriate tree lines
+*/
+static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
+ va_list ap;
+ int i;
+ StrAccum acc;
+ char zBuf[500];
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ if( p ){
+ for(i=0; i<p->iLevel && i<sizeof(p->bLine)-1; i++){
+ sqlite3_str_append(&acc, p->bLine[i] ? "| " : " ", 4);
+ }
+ sqlite3_str_append(&acc, p->bLine[i] ? "|-- " : "'-- ", 4);
+ }
+ if( zFormat!=0 ){
+ va_start(ap, zFormat);
+ sqlite3_str_vappendf(&acc, zFormat, ap);
+ va_end(ap);
+ assert( acc.nChar>0 || acc.accError );
+ sqlite3_str_append(&acc, "\n", 1);
+ }
+ sqlite3StrAccumFinish(&acc);
+ fprintf(stdout,"%s", zBuf);
+ fflush(stdout);
+}
+
+/*
+** Shorthand for starting a new tree item that consists of a single label
+*/
+static void sqlite3TreeViewItem(TreeView *p, const char *zLabel,u8 moreFollows){
+ p = sqlite3TreeViewPush(p, moreFollows);
+ sqlite3TreeViewLine(p, "%s", zLabel);
+}
+
+/*
+** Generate a human-readable description of a WITH clause.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 moreToFollow){
+ int i;
+ if( pWith==0 ) return;
+ if( pWith->nCte==0 ) return;
+ if( pWith->pOuter ){
+ sqlite3TreeViewLine(pView, "WITH (0x%p, pOuter=0x%p)",pWith,pWith->pOuter);
+ }else{
+ sqlite3TreeViewLine(pView, "WITH (0x%p)", pWith);
+ }
+ if( pWith->nCte>0 ){
+ pView = sqlite3TreeViewPush(pView, 1);
+ for(i=0; i<pWith->nCte; i++){
+ StrAccum x;
+ char zLine[1000];
+ const struct Cte *pCte = &pWith->a[i];
+ sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0);
+ sqlite3_str_appendf(&x, "%s", pCte->zName);
+ if( pCte->pCols && pCte->pCols->nExpr>0 ){
+ char cSep = '(';
+ int j;
+ for(j=0; j<pCte->pCols->nExpr; j++){
+ sqlite3_str_appendf(&x, "%c%s", cSep, pCte->pCols->a[j].zEName);
+ cSep = ',';
+ }
+ sqlite3_str_appendf(&x, ")");
+ }
+ sqlite3_str_appendf(&x, " AS");
+ sqlite3StrAccumFinish(&x);
+ sqlite3TreeViewItem(pView, zLine, i<pWith->nCte-1);
+ sqlite3TreeViewSelect(pView, pCte->pSelect, 0);
+ sqlite3TreeViewPop(pView);
+ }
+ sqlite3TreeViewPop(pView);
+ }
+}
+
+/*
+** Generate a human-readable description of a SrcList object.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc){
+ int i;
+ for(i=0; i<pSrc->nSrc; i++){
+ const struct SrcList_item *pItem = &pSrc->a[i];
+ StrAccum x;
+ char zLine[100];
+ sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0);
+ sqlite3_str_appendf(&x, "{%d:*}", pItem->iCursor);
+ if( pItem->zDatabase ){
+ sqlite3_str_appendf(&x, " %s.%s", pItem->zDatabase, pItem->zName);
+ }else if( pItem->zName ){
+ sqlite3_str_appendf(&x, " %s", pItem->zName);
+ }
+ if( pItem->pTab ){
+ sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx",
+ pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed);
+ }
+ if( pItem->zAlias ){
+ sqlite3_str_appendf(&x, " (AS %s)", pItem->zAlias);
+ }
+ if( pItem->fg.jointype & JT_LEFT ){
+ sqlite3_str_appendf(&x, " LEFT-JOIN");
+ }
+ if( pItem->fg.fromDDL ){
+ sqlite3_str_appendf(&x, " DDL");
+ }
+ sqlite3StrAccumFinish(&x);
+ sqlite3TreeViewItem(pView, zLine, i<pSrc->nSrc-1);
+ if( pItem->pSelect ){
+ sqlite3TreeViewSelect(pView, pItem->pSelect, 0);
+ }
+ if( pItem->fg.isTabFunc ){
+ sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:");
+ }
+ sqlite3TreeViewPop(pView);
+ }
+}
+
+/*
+** Generate a human-readable description of a Select object.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 moreToFollow){
+ int n = 0;
+ int cnt = 0;
+ if( p==0 ){
+ sqlite3TreeViewLine(pView, "nil-SELECT");
+ return;
+ }
+ pView = sqlite3TreeViewPush(pView, moreToFollow);
+ if( p->pWith ){
+ sqlite3TreeViewWith(pView, p->pWith, 1);
+ cnt = 1;
+ sqlite3TreeViewPush(pView, 1);
+ }
+ do{
+ if( p->selFlags & SF_WhereBegin ){
+ sqlite3TreeViewLine(pView, "sqlite3WhereBegin()");
+ }else{
+ sqlite3TreeViewLine(pView,
+ "SELECT%s%s (%u/%p) selFlags=0x%x nSelectRow=%d",
+ ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""),
+ ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""),
+ p->selId, p, p->selFlags,
+ (int)p->nSelectRow
+ );
+ }
+ if( cnt++ ) sqlite3TreeViewPop(pView);
+ if( p->pPrior ){
+ n = 1000;
+ }else{
+ n = 0;
+ if( p->pSrc && p->pSrc->nSrc ) n++;
+ if( p->pWhere ) n++;
+ if( p->pGroupBy ) n++;
+ if( p->pHaving ) n++;
+ if( p->pOrderBy ) n++;
+ if( p->pLimit ) n++;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWin ) n++;
+ if( p->pWinDefn ) n++;
+#endif
+ }
+ if( p->pEList ){
+ sqlite3TreeViewExprList(pView, p->pEList, n>0, "result-set");
+ }
+ n--;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWin ){
+ Window *pX;
+ pView = sqlite3TreeViewPush(pView, (n--)>0);
+ sqlite3TreeViewLine(pView, "window-functions");
+ for(pX=p->pWin; pX; pX=pX->pNextWin){
+ sqlite3TreeViewWinFunc(pView, pX, pX->pNextWin!=0);
+ }
+ sqlite3TreeViewPop(pView);
+ }
+#endif
+ if( p->pSrc && p->pSrc->nSrc ){
+ pView = sqlite3TreeViewPush(pView, (n--)>0);
+ sqlite3TreeViewLine(pView, "FROM");
+ sqlite3TreeViewSrcList(pView, p->pSrc);
+ sqlite3TreeViewPop(pView);
+ }
+ if( p->pWhere ){
+ sqlite3TreeViewItem(pView, "WHERE", (n--)>0);
+ sqlite3TreeViewExpr(pView, p->pWhere, 0);
+ sqlite3TreeViewPop(pView);
+ }
+ if( p->pGroupBy ){
+ sqlite3TreeViewExprList(pView, p->pGroupBy, (n--)>0, "GROUPBY");
+ }
+ if( p->pHaving ){
+ sqlite3TreeViewItem(pView, "HAVING", (n--)>0);
+ sqlite3TreeViewExpr(pView, p->pHaving, 0);
+ sqlite3TreeViewPop(pView);
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWinDefn ){
+ Window *pX;
+ sqlite3TreeViewItem(pView, "WINDOW", (n--)>0);
+ for(pX=p->pWinDefn; pX; pX=pX->pNextWin){
+ sqlite3TreeViewWindow(pView, pX, pX->pNextWin!=0);
+ }
+ sqlite3TreeViewPop(pView);
+ }
+#endif
+ if( p->pOrderBy ){
+ sqlite3TreeViewExprList(pView, p->pOrderBy, (n--)>0, "ORDERBY");
+ }
+ if( p->pLimit ){
+ sqlite3TreeViewItem(pView, "LIMIT", (n--)>0);
+ sqlite3TreeViewExpr(pView, p->pLimit->pLeft, p->pLimit->pRight!=0);
+ if( p->pLimit->pRight ){
+ sqlite3TreeViewItem(pView, "OFFSET", (n--)>0);
+ sqlite3TreeViewExpr(pView, p->pLimit->pRight, 0);
+ sqlite3TreeViewPop(pView);
+ }
+ sqlite3TreeViewPop(pView);
+ }
+ if( p->pPrior ){
+ const char *zOp = "UNION";
+ switch( p->op ){
+ case TK_ALL: zOp = "UNION ALL"; break;
+ case TK_INTERSECT: zOp = "INTERSECT"; break;
+ case TK_EXCEPT: zOp = "EXCEPT"; break;
+ }
+ sqlite3TreeViewItem(pView, zOp, 1);
+ }
+ p = p->pPrior;
+ }while( p!=0 );
+ sqlite3TreeViewPop(pView);
+}
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** Generate a description of starting or stopping bounds
+*/
+SQLITE_PRIVATE void sqlite3TreeViewBound(
+ TreeView *pView, /* View context */
+ u8 eBound, /* UNBOUNDED, CURRENT, PRECEDING, FOLLOWING */
+ Expr *pExpr, /* Value for PRECEDING or FOLLOWING */
+ u8 moreToFollow /* True if more to follow */
+){
+ switch( eBound ){
+ case TK_UNBOUNDED: {
+ sqlite3TreeViewItem(pView, "UNBOUNDED", moreToFollow);
+ sqlite3TreeViewPop(pView);
+ break;
+ }
+ case TK_CURRENT: {
+ sqlite3TreeViewItem(pView, "CURRENT", moreToFollow);
+ sqlite3TreeViewPop(pView);
+ break;
+ }
+ case TK_PRECEDING: {
+ sqlite3TreeViewItem(pView, "PRECEDING", moreToFollow);
+ sqlite3TreeViewExpr(pView, pExpr, 0);
+ sqlite3TreeViewPop(pView);
+ break;
+ }
+ case TK_FOLLOWING: {
+ sqlite3TreeViewItem(pView, "FOLLOWING", moreToFollow);
+ sqlite3TreeViewExpr(pView, pExpr, 0);
+ sqlite3TreeViewPop(pView);
+ break;
+ }
+ }
+}
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** Generate a human-readable explanation for a Window object
+*/
+SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u8 more){
+ int nElement = 0;
+ if( pWin->pFilter ){
+ sqlite3TreeViewItem(pView, "FILTER", 1);
+ sqlite3TreeViewExpr(pView, pWin->pFilter, 0);
+ sqlite3TreeViewPop(pView);
+ }
+ pView = sqlite3TreeViewPush(pView, more);
+ if( pWin->zName ){
+ sqlite3TreeViewLine(pView, "OVER %s (%p)", pWin->zName, pWin);
+ }else{
+ sqlite3TreeViewLine(pView, "OVER (%p)", pWin);
+ }
+ if( pWin->zBase ) nElement++;
+ if( pWin->pOrderBy ) nElement++;
+ if( pWin->eFrmType ) nElement++;
+ if( pWin->eExclude ) nElement++;
+ if( pWin->zBase ){
+ sqlite3TreeViewPush(pView, (--nElement)>0);
+ sqlite3TreeViewLine(pView, "window: %s", pWin->zBase);
+ sqlite3TreeViewPop(pView);
+ }
+ if( pWin->pPartition ){
+ sqlite3TreeViewExprList(pView, pWin->pPartition, nElement>0,"PARTITION-BY");
+ }
+ if( pWin->pOrderBy ){
+ sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY");
+ }
+ if( pWin->eFrmType ){
+ char zBuf[30];
+ const char *zFrmType = "ROWS";
+ if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE";
+ if( pWin->eFrmType==TK_GROUPS ) zFrmType = "GROUPS";
+ sqlite3_snprintf(sizeof(zBuf),zBuf,"%s%s",zFrmType,
+ pWin->bImplicitFrame ? " (implied)" : "");
+ sqlite3TreeViewItem(pView, zBuf, (--nElement)>0);
+ sqlite3TreeViewBound(pView, pWin->eStart, pWin->pStart, 1);
+ sqlite3TreeViewBound(pView, pWin->eEnd, pWin->pEnd, 0);
+ sqlite3TreeViewPop(pView);
+ }
+ if( pWin->eExclude ){
+ char zBuf[30];
+ const char *zExclude;
+ switch( pWin->eExclude ){
+ case TK_NO: zExclude = "NO OTHERS"; break;
+ case TK_CURRENT: zExclude = "CURRENT ROW"; break;
+ case TK_GROUP: zExclude = "GROUP"; break;
+ case TK_TIES: zExclude = "TIES"; break;
+ default:
+ sqlite3_snprintf(sizeof(zBuf),zBuf,"invalid(%d)", pWin->eExclude);
+ zExclude = zBuf;
+ break;
+ }
+ sqlite3TreeViewPush(pView, 0);
+ sqlite3TreeViewLine(pView, "EXCLUDE %s", zExclude);
+ sqlite3TreeViewPop(pView);
+ }
+ sqlite3TreeViewPop(pView);
+}
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** Generate a human-readable explanation for a Window Function object
+*/
+SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView *pView, const Window *pWin, u8 more){
+ pView = sqlite3TreeViewPush(pView, more);
+ sqlite3TreeViewLine(pView, "WINFUNC %s(%d)",
+ pWin->pFunc->zName, pWin->pFunc->nArg);
+ sqlite3TreeViewWindow(pView, pWin, 0);
+ sqlite3TreeViewPop(pView);
+}
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** Generate a human-readable explanation of an expression tree.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 moreToFollow){
+ const char *zBinOp = 0; /* Binary operator */
+ const char *zUniOp = 0; /* Unary operator */
+ char zFlgs[200];
+ pView = sqlite3TreeViewPush(pView, moreToFollow);
+ if( pExpr==0 ){
+ sqlite3TreeViewLine(pView, "nil");
+ sqlite3TreeViewPop(pView);
+ return;
+ }
+ if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){
+ StrAccum x;
+ sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0);
+ sqlite3_str_appendf(&x, " fg.af=%x.%c",
+ pExpr->flags, pExpr->affExpr ? pExpr->affExpr : 'n');
+ if( ExprHasProperty(pExpr, EP_FromJoin) ){
+ sqlite3_str_appendf(&x, " iRJT=%d", pExpr->iRightJoinTable);
+ }
+ if( ExprHasProperty(pExpr, EP_FromDDL) ){
+ sqlite3_str_appendf(&x, " DDL");
+ }
+ if( ExprHasVVAProperty(pExpr, EP_Immutable) ){
+ sqlite3_str_appendf(&x, " IMMUTABLE");
+ }
+ sqlite3StrAccumFinish(&x);
+ }else{
+ zFlgs[0] = 0;
+ }
+ switch( pExpr->op ){
+ case TK_AGG_COLUMN: {
+ sqlite3TreeViewLine(pView, "AGG{%d:%d}%s",
+ pExpr->iTable, pExpr->iColumn, zFlgs);
+ break;
+ }
+ case TK_COLUMN: {
+ if( pExpr->iTable<0 ){
+ /* This only happens when coding check constraints */
+ char zOp2[16];
+ if( pExpr->op2 ){
+ sqlite3_snprintf(sizeof(zOp2),zOp2," op2=0x%02x",pExpr->op2);
+ }else{
+ zOp2[0] = 0;
+ }
+ sqlite3TreeViewLine(pView, "COLUMN(%d)%s%s",
+ pExpr->iColumn, zFlgs, zOp2);
+ }else{
+ sqlite3TreeViewLine(pView, "{%d:%d} pTab=%p%s",
+ pExpr->iTable, pExpr->iColumn,
+ pExpr->y.pTab, zFlgs);
+ }
+ if( ExprHasProperty(pExpr, EP_FixedCol) ){
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ }
+ break;
+ }
+ case TK_INTEGER: {
+ if( pExpr->flags & EP_IntValue ){
+ sqlite3TreeViewLine(pView, "%d", pExpr->u.iValue);
+ }else{
+ sqlite3TreeViewLine(pView, "%s", pExpr->u.zToken);
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ case TK_FLOAT: {
+ sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken);
+ break;
+ }
+#endif
+ case TK_STRING: {
+ sqlite3TreeViewLine(pView,"%Q", pExpr->u.zToken);
+ break;
+ }
+ case TK_NULL: {
+ sqlite3TreeViewLine(pView,"NULL");
+ break;
+ }
+ case TK_TRUEFALSE: {
+ sqlite3TreeViewLine(pView,
+ sqlite3ExprTruthValue(pExpr) ? "TRUE" : "FALSE");
+ break;
+ }
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ case TK_BLOB: {
+ sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken);
+ break;
+ }
+#endif
+ case TK_VARIABLE: {
+ sqlite3TreeViewLine(pView,"VARIABLE(%s,%d)",
+ pExpr->u.zToken, pExpr->iColumn);
+ break;
+ }
+ case TK_REGISTER: {
+ sqlite3TreeViewLine(pView,"REGISTER(%d)", pExpr->iTable);
+ break;
+ }
+ case TK_ID: {
+ sqlite3TreeViewLine(pView,"ID \"%w\"", pExpr->u.zToken);
+ break;
+ }
+#ifndef SQLITE_OMIT_CAST
+ case TK_CAST: {
+ /* Expressions of the form: CAST(pLeft AS token) */
+ sqlite3TreeViewLine(pView,"CAST %Q", pExpr->u.zToken);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ break;
+ }
+#endif /* SQLITE_OMIT_CAST */
+ case TK_LT: zBinOp = "LT"; break;
+ case TK_LE: zBinOp = "LE"; break;
+ case TK_GT: zBinOp = "GT"; break;
+ case TK_GE: zBinOp = "GE"; break;
+ case TK_NE: zBinOp = "NE"; break;
+ case TK_EQ: zBinOp = "EQ"; break;
+ case TK_IS: zBinOp = "IS"; break;
+ case TK_ISNOT: zBinOp = "ISNOT"; break;
+ case TK_AND: zBinOp = "AND"; break;
+ case TK_OR: zBinOp = "OR"; break;
+ case TK_PLUS: zBinOp = "ADD"; break;
+ case TK_STAR: zBinOp = "MUL"; break;
+ case TK_MINUS: zBinOp = "SUB"; break;
+ case TK_REM: zBinOp = "REM"; break;
+ case TK_BITAND: zBinOp = "BITAND"; break;
+ case TK_BITOR: zBinOp = "BITOR"; break;
+ case TK_SLASH: zBinOp = "DIV"; break;
+ case TK_LSHIFT: zBinOp = "LSHIFT"; break;
+ case TK_RSHIFT: zBinOp = "RSHIFT"; break;
+ case TK_CONCAT: zBinOp = "CONCAT"; break;
+ case TK_DOT: zBinOp = "DOT"; break;
+ case TK_LIMIT: zBinOp = "LIMIT"; break;
+
+ case TK_UMINUS: zUniOp = "UMINUS"; break;
+ case TK_UPLUS: zUniOp = "UPLUS"; break;
+ case TK_BITNOT: zUniOp = "BITNOT"; break;
+ case TK_NOT: zUniOp = "NOT"; break;
+ case TK_ISNULL: zUniOp = "ISNULL"; break;
+ case TK_NOTNULL: zUniOp = "NOTNULL"; break;
+
+ case TK_TRUTH: {
+ int x;
+ const char *azOp[] = {
+ "IS-FALSE", "IS-TRUE", "IS-NOT-FALSE", "IS-NOT-TRUE"
+ };
+ assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT );
+ assert( pExpr->pRight );
+ assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE );
+ x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight);
+ zUniOp = azOp[x];
+ break;
+ }
+
+ case TK_SPAN: {
+ sqlite3TreeViewLine(pView, "SPAN %Q", pExpr->u.zToken);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ break;
+ }
+
+ case TK_COLLATE: {
+ /* COLLATE operators without the EP_Collate flag are intended to
+ ** emulate collation associated with a table column. These show
+ ** up in the treeview output as "SOFT-COLLATE". Explicit COLLATE
+ ** operators that appear in the original SQL always have the
+ ** EP_Collate bit set and appear in treeview output as just "COLLATE" */
+ sqlite3TreeViewLine(pView, "%sCOLLATE %Q%s",
+ !ExprHasProperty(pExpr, EP_Collate) ? "SOFT-" : "",
+ pExpr->u.zToken, zFlgs);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ break;
+ }
+
+ case TK_AGG_FUNCTION:
+ case TK_FUNCTION: {
+ ExprList *pFarg; /* List of function arguments */
+ Window *pWin;
+ if( ExprHasProperty(pExpr, EP_TokenOnly) ){
+ pFarg = 0;
+ pWin = 0;
+ }else{
+ pFarg = pExpr->x.pList;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0;
+#else
+ pWin = 0;
+#endif
+ }
+ if( pExpr->op==TK_AGG_FUNCTION ){
+ sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q%s agg=%d[%d]/%p",
+ pExpr->op2, pExpr->u.zToken, zFlgs,
+ pExpr->pAggInfo ? pExpr->pAggInfo->selId : 0,
+ pExpr->iAgg, pExpr->pAggInfo);
+ }else if( pExpr->op2!=0 ){
+ const char *zOp2;
+ char zBuf[8];
+ sqlite3_snprintf(sizeof(zBuf),zBuf,"0x%02x",pExpr->op2);
+ zOp2 = zBuf;
+ if( pExpr->op2==NC_IsCheck ) zOp2 = "NC_IsCheck";
+ if( pExpr->op2==NC_IdxExpr ) zOp2 = "NC_IdxExpr";
+ if( pExpr->op2==NC_PartIdx ) zOp2 = "NC_PartIdx";
+ if( pExpr->op2==NC_GenCol ) zOp2 = "NC_GenCol";
+ sqlite3TreeViewLine(pView, "FUNCTION %Q%s op2=%s",
+ pExpr->u.zToken, zFlgs, zOp2);
+ }else{
+ sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs);
+ }
+ if( pFarg ){
+ sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0);
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( pWin ){
+ sqlite3TreeViewWindow(pView, pWin, 0);
+ }
+#endif
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_EXISTS: {
+ sqlite3TreeViewLine(pView, "EXISTS-expr flags=0x%x", pExpr->flags);
+ sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
+ break;
+ }
+ case TK_SELECT: {
+ sqlite3TreeViewLine(pView, "subquery-expr flags=0x%x", pExpr->flags);
+ sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
+ break;
+ }
+ case TK_IN: {
+ sqlite3TreeViewLine(pView, "IN flags=0x%x", pExpr->flags);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
+ }else{
+ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0);
+ }
+ break;
+ }
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+ /*
+ ** x BETWEEN y AND z
+ **
+ ** This is equivalent to
+ **
+ ** x>=y AND x<=z
+ **
+ ** X is stored in pExpr->pLeft.
+ ** Y is stored in pExpr->pList->a[0].pExpr.
+ ** Z is stored in pExpr->pList->a[1].pExpr.
+ */
+ case TK_BETWEEN: {
+ Expr *pX = pExpr->pLeft;
+ Expr *pY = pExpr->x.pList->a[0].pExpr;
+ Expr *pZ = pExpr->x.pList->a[1].pExpr;
+ sqlite3TreeViewLine(pView, "BETWEEN");
+ sqlite3TreeViewExpr(pView, pX, 1);
+ sqlite3TreeViewExpr(pView, pY, 1);
+ sqlite3TreeViewExpr(pView, pZ, 0);
+ break;
+ }
+ case TK_TRIGGER: {
+ /* If the opcode is TK_TRIGGER, then the expression is a reference
+ ** to a column in the new.* or old.* pseudo-tables available to
+ ** trigger programs. In this case Expr.iTable is set to 1 for the
+ ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn
+ ** is set to the column of the pseudo-table to read, or to -1 to
+ ** read the rowid field.
+ */
+ sqlite3TreeViewLine(pView, "%s(%d)",
+ pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn);
+ break;
+ }
+ case TK_CASE: {
+ sqlite3TreeViewLine(pView, "CASE");
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
+ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0);
+ break;
+ }
+#ifndef SQLITE_OMIT_TRIGGER
+ case TK_RAISE: {
+ const char *zType = "unk";
+ switch( pExpr->affExpr ){
+ case OE_Rollback: zType = "rollback"; break;
+ case OE_Abort: zType = "abort"; break;
+ case OE_Fail: zType = "fail"; break;
+ case OE_Ignore: zType = "ignore"; break;
+ }
+ sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken);
+ break;
+ }
+#endif
+ case TK_MATCH: {
+ sqlite3TreeViewLine(pView, "MATCH {%d:%d}%s",
+ pExpr->iTable, pExpr->iColumn, zFlgs);
+ sqlite3TreeViewExpr(pView, pExpr->pRight, 0);
+ break;
+ }
+ case TK_VECTOR: {
+ char *z = sqlite3_mprintf("VECTOR%s",zFlgs);
+ sqlite3TreeViewBareExprList(pView, pExpr->x.pList, z);
+ sqlite3_free(z);
+ break;
+ }
+ case TK_SELECT_COLUMN: {
+ sqlite3TreeViewLine(pView, "SELECT-COLUMN %d", pExpr->iColumn);
+ sqlite3TreeViewSelect(pView, pExpr->pLeft->x.pSelect, 0);
+ break;
+ }
+ case TK_IF_NULL_ROW: {
+ sqlite3TreeViewLine(pView, "IF-NULL-ROW %d", pExpr->iTable);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ break;
+ }
+ default: {
+ sqlite3TreeViewLine(pView, "op=%d", pExpr->op);
+ break;
+ }
+ }
+ if( zBinOp ){
+ sqlite3TreeViewLine(pView, "%s%s", zBinOp, zFlgs);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
+ sqlite3TreeViewExpr(pView, pExpr->pRight, 0);
+ }else if( zUniOp ){
+ sqlite3TreeViewLine(pView, "%s%s", zUniOp, zFlgs);
+ sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
+ }
+ sqlite3TreeViewPop(pView);
+}
+
+
+/*
+** Generate a human-readable explanation of an expression list.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewBareExprList(
+ TreeView *pView,
+ const ExprList *pList,
+ const char *zLabel
+){
+ if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST";
+ if( pList==0 ){
+ sqlite3TreeViewLine(pView, "%s (empty)", zLabel);
+ }else{
+ int i;
+ sqlite3TreeViewLine(pView, "%s", zLabel);
+ for(i=0; i<pList->nExpr; i++){
+ int j = pList->a[i].u.x.iOrderByCol;
+ char *zName = pList->a[i].zEName;
+ int moreToFollow = i<pList->nExpr - 1;
+ if( pList->a[i].eEName!=ENAME_NAME ) zName = 0;
+ if( j || zName ){
+ sqlite3TreeViewPush(pView, moreToFollow);
+ moreToFollow = 0;
+ sqlite3TreeViewLine(pView, 0);
+ if( zName ){
+ fprintf(stdout, "AS %s ", zName);
+ }
+ if( j ){
+ fprintf(stdout, "iOrderByCol=%d", j);
+ }
+ fprintf(stdout, "\n");
+ fflush(stdout);
+ }
+ sqlite3TreeViewExpr(pView, pList->a[i].pExpr, moreToFollow);
+ if( j || zName ){
+ sqlite3TreeViewPop(pView);
+ }
+ }
+ }
+}
+SQLITE_PRIVATE void sqlite3TreeViewExprList(
+ TreeView *pView,
+ const ExprList *pList,
+ u8 moreToFollow,
+ const char *zLabel
+){
+ pView = sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewBareExprList(pView, pList, zLabel);
+ sqlite3TreeViewPop(pView);
+}
+
+#endif /* SQLITE_DEBUG */
+
+/************** End of treeview.c ********************************************/
+/************** Begin file random.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code to implement a pseudo-random number
+** generator (PRNG) for SQLite.
+**
+** Random numbers are used by some of the database backends in order
+** to generate random integer keys for tables or random filenames.
+*/
+/* #include "sqliteInt.h" */
+
+
+/* All threads share a single random number generator.
+** This structure is the current state of the generator.
+*/
+static SQLITE_WSD struct sqlite3PrngType {
+ unsigned char isInit; /* True if initialized */
+ unsigned char i, j; /* State variables */
+ unsigned char s[256]; /* State variables */
+} sqlite3Prng;
+
+/*
+** Return N random bytes.
+*/
+SQLITE_API void sqlite3_randomness(int N, void *pBuf){
+ unsigned char t;
+ unsigned char *zBuf = pBuf;
+
+ /* The "wsdPrng" macro will resolve to the pseudo-random number generator
+ ** state vector. If writable static data is unsupported on the target,
+ ** we have to locate the state vector at run-time. In the more common
+ ** case where writable static data is supported, wsdPrng can refer directly
+ ** to the "sqlite3Prng" state vector declared above.
+ */
+#ifdef SQLITE_OMIT_WSD
+ struct sqlite3PrngType *p = &GLOBAL(struct sqlite3PrngType, sqlite3Prng);
+# define wsdPrng p[0]
+#else
+# define wsdPrng sqlite3Prng
+#endif
+
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex;
+#endif
+
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return;
+#endif
+
+#if SQLITE_THREADSAFE
+ mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG);
+#endif
+
+ sqlite3_mutex_enter(mutex);
+ if( N<=0 || pBuf==0 ){
+ wsdPrng.isInit = 0;
+ sqlite3_mutex_leave(mutex);
+ return;
+ }
+
+ /* Initialize the state of the random number generator once,
+ ** the first time this routine is called. The seed value does
+ ** not need to contain a lot of randomness since we are not
+ ** trying to do secure encryption or anything like that...
+ **
+ ** Nothing in this file or anywhere else in SQLite does any kind of
+ ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random
+ ** number generator) not as an encryption device.
+ */
+ if( !wsdPrng.isInit ){
+ int i;
+ char k[256];
+ wsdPrng.j = 0;
+ wsdPrng.i = 0;
+ sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k);
+ for(i=0; i<256; i++){
+ wsdPrng.s[i] = (u8)i;
+ }
+ for(i=0; i<256; i++){
+ wsdPrng.j += wsdPrng.s[i] + k[i];
+ t = wsdPrng.s[wsdPrng.j];
+ wsdPrng.s[wsdPrng.j] = wsdPrng.s[i];
+ wsdPrng.s[i] = t;
+ }
+ wsdPrng.isInit = 1;
+ }
+
+ assert( N>0 );
+ do{
+ wsdPrng.i++;
+ t = wsdPrng.s[wsdPrng.i];
+ wsdPrng.j += t;
+ wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j];
+ wsdPrng.s[wsdPrng.j] = t;
+ t += wsdPrng.s[wsdPrng.i];
+ *(zBuf++) = wsdPrng.s[t];
+ }while( --N );
+ sqlite3_mutex_leave(mutex);
+}
+
+#ifndef SQLITE_UNTESTABLE
+/*
+** For testing purposes, we sometimes want to preserve the state of
+** PRNG and restore the PRNG to its saved state at a later time, or
+** to reset the PRNG to its initial state. These routines accomplish
+** those tasks.
+**
+** The sqlite3_test_control() interface calls these routines to
+** control the PRNG.
+*/
+static SQLITE_WSD struct sqlite3PrngType sqlite3SavedPrng;
+SQLITE_PRIVATE void sqlite3PrngSaveState(void){
+ memcpy(
+ &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng),
+ &GLOBAL(struct sqlite3PrngType, sqlite3Prng),
+ sizeof(sqlite3Prng)
+ );
+}
+SQLITE_PRIVATE void sqlite3PrngRestoreState(void){
+ memcpy(
+ &GLOBAL(struct sqlite3PrngType, sqlite3Prng),
+ &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng),
+ sizeof(sqlite3Prng)
+ );
+}
+#endif /* SQLITE_UNTESTABLE */
+
+/************** End of random.c **********************************************/
+/************** Begin file threads.c *****************************************/
+/*
+** 2012 July 21
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file presents a simple cross-platform threading interface for
+** use internally by SQLite.
+**
+** A "thread" can be created using sqlite3ThreadCreate(). This thread
+** runs independently of its creator until it is joined using
+** sqlite3ThreadJoin(), at which point it terminates.
+**
+** Threads do not have to be real. It could be that the work of the
+** "thread" is done by the main thread at either the sqlite3ThreadCreate()
+** or sqlite3ThreadJoin() call. This is, in fact, what happens in
+** single threaded systems. Nothing in SQLite requires multiple threads.
+** This interface exists so that applications that want to take advantage
+** of multiple cores can do so, while also allowing applications to stay
+** single-threaded if desired.
+*/
+/* #include "sqliteInt.h" */
+#if SQLITE_OS_WIN
+/* # include "os_win.h" */
+#endif
+
+#if SQLITE_MAX_WORKER_THREADS>0
+
+/********************************* Unix Pthreads ****************************/
+#if SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) && SQLITE_THREADSAFE>0
+
+#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */
+/* #include <pthread.h> */
+
+/* A running thread */
+struct SQLiteThread {
+ pthread_t tid; /* Thread ID */
+ int done; /* Set to true when thread finishes */
+ void *pOut; /* Result returned by the thread */
+ void *(*xTask)(void*); /* The thread routine */
+ void *pIn; /* Argument to the thread */
+};
+
+/* Create a new thread */
+SQLITE_PRIVATE int sqlite3ThreadCreate(
+ SQLiteThread **ppThread, /* OUT: Write the thread object here */
+ void *(*xTask)(void*), /* Routine to run in a separate thread */
+ void *pIn /* Argument passed into xTask() */
+){
+ SQLiteThread *p;
+ int rc;
+
+ assert( ppThread!=0 );
+ assert( xTask!=0 );
+ /* This routine is never used in single-threaded mode */
+ assert( sqlite3GlobalConfig.bCoreMutex!=0 );
+
+ *ppThread = 0;
+ p = sqlite3Malloc(sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ memset(p, 0, sizeof(*p));
+ p->xTask = xTask;
+ p->pIn = pIn;
+ /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a
+ ** function that returns SQLITE_ERROR when passed the argument 200, that
+ ** forces worker threads to run sequentially and deterministically
+ ** for testing purposes. */
+ if( sqlite3FaultSim(200) ){
+ rc = 1;
+ }else{
+ rc = pthread_create(&p->tid, 0, xTask, pIn);
+ }
+ if( rc ){
+ p->done = 1;
+ p->pOut = xTask(pIn);
+ }
+ *ppThread = p;
+ return SQLITE_OK;
+}
+
+/* Get the results of the thread */
+SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
+ int rc;
+
+ assert( ppOut!=0 );
+ if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT;
+ if( p->done ){
+ *ppOut = p->pOut;
+ rc = SQLITE_OK;
+ }else{
+ rc = pthread_join(p->tid, ppOut) ? SQLITE_ERROR : SQLITE_OK;
+ }
+ sqlite3_free(p);
+ return rc;
+}
+
+#endif /* SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) */
+/******************************** End Unix Pthreads *************************/
+
+
+/********************************* Win32 Threads ****************************/
+#if SQLITE_OS_WIN_THREADS
+
+#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */
+#include <process.h>
+
+/* A running thread */
+struct SQLiteThread {
+ void *tid; /* The thread handle */
+ unsigned id; /* The thread identifier */
+ void *(*xTask)(void*); /* The routine to run as a thread */
+ void *pIn; /* Argument to xTask */
+ void *pResult; /* Result of xTask */
+};
+
+/* Thread procedure Win32 compatibility shim */
+static unsigned __stdcall sqlite3ThreadProc(
+ void *pArg /* IN: Pointer to the SQLiteThread structure */
+){
+ SQLiteThread *p = (SQLiteThread *)pArg;
+
+ assert( p!=0 );
+#if 0
+ /*
+ ** This assert appears to trigger spuriously on certain
+ ** versions of Windows, possibly due to _beginthreadex()
+ ** and/or CreateThread() not fully setting their thread
+ ** ID parameter before starting the thread.
+ */
+ assert( p->id==GetCurrentThreadId() );
+#endif
+ assert( p->xTask!=0 );
+ p->pResult = p->xTask(p->pIn);
+
+ _endthreadex(0);
+ return 0; /* NOT REACHED */
+}
+
+/* Create a new thread */
+SQLITE_PRIVATE int sqlite3ThreadCreate(
+ SQLiteThread **ppThread, /* OUT: Write the thread object here */
+ void *(*xTask)(void*), /* Routine to run in a separate thread */
+ void *pIn /* Argument passed into xTask() */
+){
+ SQLiteThread *p;
+
+ assert( ppThread!=0 );
+ assert( xTask!=0 );
+ *ppThread = 0;
+ p = sqlite3Malloc(sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a
+ ** function that returns SQLITE_ERROR when passed the argument 200, that
+ ** forces worker threads to run sequentially and deterministically
+ ** (via the sqlite3FaultSim() term of the conditional) for testing
+ ** purposes. */
+ if( sqlite3GlobalConfig.bCoreMutex==0 || sqlite3FaultSim(200) ){
+ memset(p, 0, sizeof(*p));
+ }else{
+ p->xTask = xTask;
+ p->pIn = pIn;
+ p->tid = (void*)_beginthreadex(0, 0, sqlite3ThreadProc, p, 0, &p->id);
+ if( p->tid==0 ){
+ memset(p, 0, sizeof(*p));
+ }
+ }
+ if( p->xTask==0 ){
+ p->id = GetCurrentThreadId();
+ p->pResult = xTask(pIn);
+ }
+ *ppThread = p;
+ return SQLITE_OK;
+}
+
+SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject); /* os_win.c */
+
+/* Get the results of the thread */
+SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
+ DWORD rc;
+ BOOL bRc;
+
+ assert( ppOut!=0 );
+ if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT;
+ if( p->xTask==0 ){
+ /* assert( p->id==GetCurrentThreadId() ); */
+ rc = WAIT_OBJECT_0;
+ assert( p->tid==0 );
+ }else{
+ assert( p->id!=0 && p->id!=GetCurrentThreadId() );
+ rc = sqlite3Win32Wait((HANDLE)p->tid);
+ assert( rc!=WAIT_IO_COMPLETION );
+ bRc = CloseHandle((HANDLE)p->tid);
+ assert( bRc );
+ }
+ if( rc==WAIT_OBJECT_0 ) *ppOut = p->pResult;
+ sqlite3_free(p);
+ return (rc==WAIT_OBJECT_0) ? SQLITE_OK : SQLITE_ERROR;
+}
+
+#endif /* SQLITE_OS_WIN_THREADS */
+/******************************** End Win32 Threads *************************/
+
+
+/********************************* Single-Threaded **************************/
+#ifndef SQLITE_THREADS_IMPLEMENTED
+/*
+** This implementation does not actually create a new thread. It does the
+** work of the thread in the main thread, when either the thread is created
+** or when it is joined
+*/
+
+/* A running thread */
+struct SQLiteThread {
+ void *(*xTask)(void*); /* The routine to run as a thread */
+ void *pIn; /* Argument to xTask */
+ void *pResult; /* Result of xTask */
+};
+
+/* Create a new thread */
+SQLITE_PRIVATE int sqlite3ThreadCreate(
+ SQLiteThread **ppThread, /* OUT: Write the thread object here */
+ void *(*xTask)(void*), /* Routine to run in a separate thread */
+ void *pIn /* Argument passed into xTask() */
+){
+ SQLiteThread *p;
+
+ assert( ppThread!=0 );
+ assert( xTask!=0 );
+ *ppThread = 0;
+ p = sqlite3Malloc(sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ if( (SQLITE_PTR_TO_INT(p)/17)&1 ){
+ p->xTask = xTask;
+ p->pIn = pIn;
+ }else{
+ p->xTask = 0;
+ p->pResult = xTask(pIn);
+ }
+ *ppThread = p;
+ return SQLITE_OK;
+}
+
+/* Get the results of the thread */
+SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
+
+ assert( ppOut!=0 );
+ if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT;
+ if( p->xTask ){
+ *ppOut = p->xTask(p->pIn);
+ }else{
+ *ppOut = p->pResult;
+ }
+ sqlite3_free(p);
+
+#if defined(SQLITE_TEST)
+ {
+ void *pTstAlloc = sqlite3Malloc(10);
+ if (!pTstAlloc) return SQLITE_NOMEM_BKPT;
+ sqlite3_free(pTstAlloc);
+ }
+#endif
+
+ return SQLITE_OK;
+}
+
+#endif /* !defined(SQLITE_THREADS_IMPLEMENTED) */
+/****************************** End Single-Threaded *************************/
+#endif /* SQLITE_MAX_WORKER_THREADS>0 */
+
+/************** End of threads.c *********************************************/
+/************** Begin file utf.c *********************************************/
+/*
+** 2004 April 13
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains routines used to translate between UTF-8,
+** UTF-16, UTF-16BE, and UTF-16LE.
+**
+** Notes on UTF-8:
+**
+** Byte-0 Byte-1 Byte-2 Byte-3 Value
+** 0xxxxxxx 00000000 00000000 0xxxxxxx
+** 110yyyyy 10xxxxxx 00000000 00000yyy yyxxxxxx
+** 1110zzzz 10yyyyyy 10xxxxxx 00000000 zzzzyyyy yyxxxxxx
+** 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx 000uuuuu zzzzyyyy yyxxxxxx
+**
+**
+** Notes on UTF-16: (with wwww+1==uuuuu)
+**
+** Word-0 Word-1 Value
+** 110110ww wwzzzzyy 110111yy yyxxxxxx 000uuuuu zzzzyyyy yyxxxxxx
+** zzzzyyyy yyxxxxxx 00000000 zzzzyyyy yyxxxxxx
+**
+**
+** BOM or Byte Order Mark:
+** 0xff 0xfe little-endian utf-16 follows
+** 0xfe 0xff big-endian utf-16 follows
+**
+*/
+/* #include "sqliteInt.h" */
+/* #include <assert.h> */
+/* #include "vdbeInt.h" */
+
+#if !defined(SQLITE_AMALGAMATION) && SQLITE_BYTEORDER==0
+/*
+** The following constant value is used by the SQLITE_BIGENDIAN and
+** SQLITE_LITTLEENDIAN macros.
+*/
+SQLITE_PRIVATE const int sqlite3one = 1;
+#endif /* SQLITE_AMALGAMATION && SQLITE_BYTEORDER==0 */
+
+/*
+** This lookup table is used to help decode the first byte of
+** a multi-byte UTF8 character.
+*/
+static const unsigned char sqlite3Utf8Trans1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00,
+};
+
+
+#define WRITE_UTF8(zOut, c) { \
+ if( c<0x00080 ){ \
+ *zOut++ = (u8)(c&0xFF); \
+ } \
+ else if( c<0x00800 ){ \
+ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ } \
+ else if( c<0x10000 ){ \
+ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ }else{ \
+ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \
+ *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ } \
+}
+
+#define WRITE_UTF16LE(zOut, c) { \
+ if( c<=0xFFFF ){ \
+ *zOut++ = (u8)(c&0x00FF); \
+ *zOut++ = (u8)((c>>8)&0x00FF); \
+ }else{ \
+ *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \
+ *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \
+ *zOut++ = (u8)(c&0x00FF); \
+ *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \
+ } \
+}
+
+#define WRITE_UTF16BE(zOut, c) { \
+ if( c<=0xFFFF ){ \
+ *zOut++ = (u8)((c>>8)&0x00FF); \
+ *zOut++ = (u8)(c&0x00FF); \
+ }else{ \
+ *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \
+ *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \
+ *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \
+ *zOut++ = (u8)(c&0x00FF); \
+ } \
+}
+
+/*
+** Translate a single UTF-8 character. Return the unicode value.
+**
+** During translation, assume that the byte that zTerm points
+** is a 0x00.
+**
+** Write a pointer to the next unread byte back into *pzNext.
+**
+** Notes On Invalid UTF-8:
+**
+** * This routine never allows a 7-bit character (0x00 through 0x7f) to
+** be encoded as a multi-byte character. Any multi-byte character that
+** attempts to encode a value between 0x00 and 0x7f is rendered as 0xfffd.
+**
+** * This routine never allows a UTF16 surrogate value to be encoded.
+** If a multi-byte character attempts to encode a value between
+** 0xd800 and 0xe000 then it is rendered as 0xfffd.
+**
+** * Bytes in the range of 0x80 through 0xbf which occur as the first
+** byte of a character are interpreted as single-byte characters
+** and rendered as themselves even though they are technically
+** invalid characters.
+**
+** * This routine accepts over-length UTF8 encodings
+** for unicode values 0x80 and greater. It does not change over-length
+** encodings to 0xfffd as some systems recommend.
+*/
+#define READ_UTF8(zIn, zTerm, c) \
+ c = *(zIn++); \
+ if( c>=0xc0 ){ \
+ c = sqlite3Utf8Trans1[c-0xc0]; \
+ while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \
+ c = (c<<6) + (0x3f & *(zIn++)); \
+ } \
+ if( c<0x80 \
+ || (c&0xFFFFF800)==0xD800 \
+ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \
+ }
+SQLITE_PRIVATE u32 sqlite3Utf8Read(
+ const unsigned char **pz /* Pointer to string from which to read char */
+){
+ unsigned int c;
+
+ /* Same as READ_UTF8() above but without the zTerm parameter.
+ ** For this routine, we assume the UTF8 string is always zero-terminated.
+ */
+ c = *((*pz)++);
+ if( c>=0xc0 ){
+ c = sqlite3Utf8Trans1[c-0xc0];
+ while( (*(*pz) & 0xc0)==0x80 ){
+ c = (c<<6) + (0x3f & *((*pz)++));
+ }
+ if( c<0x80
+ || (c&0xFFFFF800)==0xD800
+ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; }
+ }
+ return c;
+}
+
+
+
+
+/*
+** If the TRANSLATE_TRACE macro is defined, the value of each Mem is
+** printed on stderr on the way into and out of sqlite3VdbeMemTranslate().
+*/
+/* #define TRANSLATE_TRACE 1 */
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** This routine transforms the internal text encoding used by pMem to
+** desiredEnc. It is an error if the string is already of the desired
+** encoding, or if *pMem does not contain a string value.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){
+ sqlite3_int64 len; /* Maximum length of output string in bytes */
+ unsigned char *zOut; /* Output buffer */
+ unsigned char *zIn; /* Input iterator */
+ unsigned char *zTerm; /* End of input */
+ unsigned char *z; /* Output iterator */
+ unsigned int c;
+
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( pMem->flags&MEM_Str );
+ assert( pMem->enc!=desiredEnc );
+ assert( pMem->enc!=0 );
+ assert( pMem->n>=0 );
+
+#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG)
+ {
+ StrAccum acc;
+ char zBuf[1000];
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3VdbeMemPrettyPrint(pMem, &acc);
+ fprintf(stderr, "INPUT: %s\n", sqlite3StrAccumFinish(&acc));
+ }
+#endif
+
+ /* If the translation is between UTF-16 little and big endian, then
+ ** all that is required is to swap the byte order. This case is handled
+ ** differently from the others.
+ */
+ if( pMem->enc!=SQLITE_UTF8 && desiredEnc!=SQLITE_UTF8 ){
+ u8 temp;
+ int rc;
+ rc = sqlite3VdbeMemMakeWriteable(pMem);
+ if( rc!=SQLITE_OK ){
+ assert( rc==SQLITE_NOMEM );
+ return SQLITE_NOMEM_BKPT;
+ }
+ zIn = (u8*)pMem->z;
+ zTerm = &zIn[pMem->n&~1];
+ while( zIn<zTerm ){
+ temp = *zIn;
+ *zIn = *(zIn+1);
+ zIn++;
+ *zIn++ = temp;
+ }
+ pMem->enc = desiredEnc;
+ goto translate_out;
+ }
+
+ /* Set len to the maximum number of bytes required in the output buffer. */
+ if( desiredEnc==SQLITE_UTF8 ){
+ /* When converting from UTF-16, the maximum growth results from
+ ** translating a 2-byte character to a 4-byte UTF-8 character.
+ ** A single byte is required for the output string
+ ** nul-terminator.
+ */
+ pMem->n &= ~1;
+ len = 2 * (sqlite3_int64)pMem->n + 1;
+ }else{
+ /* When converting from UTF-8 to UTF-16 the maximum growth is caused
+ ** when a 1-byte UTF-8 character is translated into a 2-byte UTF-16
+ ** character. Two bytes are required in the output buffer for the
+ ** nul-terminator.
+ */
+ len = 2 * (sqlite3_int64)pMem->n + 2;
+ }
+
+ /* Set zIn to point at the start of the input buffer and zTerm to point 1
+ ** byte past the end.
+ **
+ ** Variable zOut is set to point at the output buffer, space obtained
+ ** from sqlite3_malloc().
+ */
+ zIn = (u8*)pMem->z;
+ zTerm = &zIn[pMem->n];
+ zOut = sqlite3DbMallocRaw(pMem->db, len);
+ if( !zOut ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ z = zOut;
+
+ if( pMem->enc==SQLITE_UTF8 ){
+ if( desiredEnc==SQLITE_UTF16LE ){
+ /* UTF-8 -> UTF-16 Little-endian */
+ while( zIn<zTerm ){
+ READ_UTF8(zIn, zTerm, c);
+ WRITE_UTF16LE(z, c);
+ }
+ }else{
+ assert( desiredEnc==SQLITE_UTF16BE );
+ /* UTF-8 -> UTF-16 Big-endian */
+ while( zIn<zTerm ){
+ READ_UTF8(zIn, zTerm, c);
+ WRITE_UTF16BE(z, c);
+ }
+ }
+ pMem->n = (int)(z - zOut);
+ *z++ = 0;
+ }else{
+ assert( desiredEnc==SQLITE_UTF8 );
+ if( pMem->enc==SQLITE_UTF16LE ){
+ /* UTF-16 Little-endian -> UTF-8 */
+ while( zIn<zTerm ){
+ c = *(zIn++);
+ c += (*(zIn++))<<8;
+ if( c>=0xd800 && c<0xe000 ){
+#ifdef SQLITE_REPLACE_INVALID_UTF
+ if( c>=0xdc00 || zIn>=zTerm ){
+ c = 0xfffd;
+ }else{
+ int c2 = *(zIn++);
+ c2 += (*(zIn++))<<8;
+ if( c2<0xdc00 || c2>=0xe000 ){
+ zIn -= 2;
+ c = 0xfffd;
+ }else{
+ c = ((c&0x3ff)<<10) + (c2&0x3ff) + 0x10000;
+ }
+ }
+#else
+ if( zIn<zTerm ){
+ int c2 = (*zIn++);
+ c2 += ((*zIn++)<<8);
+ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10);
+ }
+#endif
+ }
+ WRITE_UTF8(z, c);
+ }
+ }else{
+ /* UTF-16 Big-endian -> UTF-8 */
+ while( zIn<zTerm ){
+ c = (*(zIn++))<<8;
+ c += *(zIn++);
+ if( c>=0xd800 && c<0xe000 ){
+#ifdef SQLITE_REPLACE_INVALID_UTF
+ if( c>=0xdc00 || zIn>=zTerm ){
+ c = 0xfffd;
+ }else{
+ int c2 = (*(zIn++))<<8;
+ c2 += *(zIn++);
+ if( c2<0xdc00 || c2>=0xe000 ){
+ zIn -= 2;
+ c = 0xfffd;
+ }else{
+ c = ((c&0x3ff)<<10) + (c2&0x3ff) + 0x10000;
+ }
+ }
+#else
+ if( zIn<zTerm ){
+ int c2 = ((*zIn++)<<8);
+ c2 += (*zIn++);
+ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10);
+ }
+#endif
+ }
+ WRITE_UTF8(z, c);
+ }
+ }
+ pMem->n = (int)(z - zOut);
+ }
+ *z = 0;
+ assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len );
+
+ c = MEM_Str|MEM_Term|(pMem->flags&(MEM_AffMask|MEM_Subtype));
+ sqlite3VdbeMemRelease(pMem);
+ pMem->flags = c;
+ pMem->enc = desiredEnc;
+ pMem->z = (char*)zOut;
+ pMem->zMalloc = pMem->z;
+ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->z);
+
+translate_out:
+#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG)
+ {
+ StrAccum acc;
+ char zBuf[1000];
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3VdbeMemPrettyPrint(pMem, &acc);
+ fprintf(stderr, "OUTPUT: %s\n", sqlite3StrAccumFinish(&acc));
+ }
+#endif
+ return SQLITE_OK;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** This routine checks for a byte-order mark at the beginning of the
+** UTF-16 string stored in *pMem. If one is present, it is removed and
+** the encoding of the Mem adjusted. This routine does not do any
+** byte-swapping, it just sets Mem.enc appropriately.
+**
+** The allocation (static, dynamic etc.) and encoding of the Mem may be
+** changed by this function.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem){
+ int rc = SQLITE_OK;
+ u8 bom = 0;
+
+ assert( pMem->n>=0 );
+ if( pMem->n>1 ){
+ u8 b1 = *(u8 *)pMem->z;
+ u8 b2 = *(((u8 *)pMem->z) + 1);
+ if( b1==0xFE && b2==0xFF ){
+ bom = SQLITE_UTF16BE;
+ }
+ if( b1==0xFF && b2==0xFE ){
+ bom = SQLITE_UTF16LE;
+ }
+ }
+
+ if( bom ){
+ rc = sqlite3VdbeMemMakeWriteable(pMem);
+ if( rc==SQLITE_OK ){
+ pMem->n -= 2;
+ memmove(pMem->z, &pMem->z[2], pMem->n);
+ pMem->z[pMem->n] = '\0';
+ pMem->z[pMem->n+1] = '\0';
+ pMem->flags |= MEM_Term;
+ pMem->enc = bom;
+ }
+ }
+ return rc;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** pZ is a UTF-8 encoded unicode string. If nByte is less than zero,
+** return the number of unicode characters in pZ up to (but not including)
+** the first 0x00 byte. If nByte is not less than zero, return the
+** number of unicode characters in the first nByte of pZ (or up to
+** the first 0x00, whichever comes first).
+*/
+SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *zIn, int nByte){
+ int r = 0;
+ const u8 *z = (const u8*)zIn;
+ const u8 *zTerm;
+ if( nByte>=0 ){
+ zTerm = &z[nByte];
+ }else{
+ zTerm = (const u8*)(-1);
+ }
+ assert( z<=zTerm );
+ while( *z!=0 && z<zTerm ){
+ SQLITE_SKIP_UTF8(z);
+ r++;
+ }
+ return r;
+}
+
+/* This test function is not currently used by the automated test-suite.
+** Hence it is only available in debug builds.
+*/
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+/*
+** Translate UTF-8 to UTF-8.
+**
+** This has the effect of making sure that the string is well-formed
+** UTF-8. Miscoded characters are removed.
+**
+** The translation is done in-place and aborted if the output
+** overruns the input.
+*/
+SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char *zIn){
+ unsigned char *zOut = zIn;
+ unsigned char *zStart = zIn;
+ u32 c;
+
+ while( zIn[0] && zOut<=zIn ){
+ c = sqlite3Utf8Read((const u8**)&zIn);
+ if( c!=0xfffd ){
+ WRITE_UTF8(zOut, c);
+ }
+ }
+ *zOut = 0;
+ return (int)(zOut - zStart);
+}
+#endif
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Convert a UTF-16 string in the native encoding into a UTF-8 string.
+** Memory to hold the UTF-8 string is obtained from sqlite3_malloc and must
+** be freed by the calling function.
+**
+** NULL is returned if there is an allocation error.
+*/
+SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *db, const void *z, int nByte, u8 enc){
+ Mem m;
+ memset(&m, 0, sizeof(m));
+ m.db = db;
+ sqlite3VdbeMemSetStr(&m, z, nByte, enc, SQLITE_STATIC);
+ sqlite3VdbeChangeEncoding(&m, SQLITE_UTF8);
+ if( db->mallocFailed ){
+ sqlite3VdbeMemRelease(&m);
+ m.z = 0;
+ }
+ assert( (m.flags & MEM_Term)!=0 || db->mallocFailed );
+ assert( (m.flags & MEM_Str)!=0 || db->mallocFailed );
+ assert( m.z || db->mallocFailed );
+ return m.z;
+}
+
+/*
+** zIn is a UTF-16 encoded unicode string at least nChar characters long.
+** Return the number of bytes in the first nChar unicode characters
+** in pZ. nChar must be non-negative.
+*/
+SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *zIn, int nChar){
+ int c;
+ unsigned char const *z = zIn;
+ int n = 0;
+
+ if( SQLITE_UTF16NATIVE==SQLITE_UTF16LE ) z++;
+ while( n<nChar ){
+ c = z[0];
+ z += 2;
+ if( c>=0xd8 && c<0xdc && z[0]>=0xdc && z[0]<0xe0 ) z += 2;
+ n++;
+ }
+ return (int)(z-(unsigned char const *)zIn)
+ - (SQLITE_UTF16NATIVE==SQLITE_UTF16LE);
+}
+
+#if defined(SQLITE_TEST)
+/*
+** This routine is called from the TCL test function "translate_selftest".
+** It checks that the primitives for serializing and deserializing
+** characters in each encoding are inverses of each other.
+*/
+SQLITE_PRIVATE void sqlite3UtfSelfTest(void){
+ unsigned int i, t;
+ unsigned char zBuf[20];
+ unsigned char *z;
+ int n;
+ unsigned int c;
+
+ for(i=0; i<0x00110000; i++){
+ z = zBuf;
+ WRITE_UTF8(z, i);
+ n = (int)(z-zBuf);
+ assert( n>0 && n<=4 );
+ z[0] = 0;
+ z = zBuf;
+ c = sqlite3Utf8Read((const u8**)&z);
+ t = i;
+ if( i>=0xD800 && i<=0xDFFF ) t = 0xFFFD;
+ if( (i&0xFFFFFFFE)==0xFFFE ) t = 0xFFFD;
+ assert( c==t );
+ assert( (z-zBuf)==n );
+ }
+}
+#endif /* SQLITE_TEST */
+#endif /* SQLITE_OMIT_UTF16 */
+
+/************** End of utf.c *************************************************/
+/************** Begin file util.c ********************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Utility functions used throughout sqlite.
+**
+** This file contains functions for allocating memory, comparing
+** strings, and stuff like that.
+**
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdarg.h> */
+#ifndef SQLITE_OMIT_FLOATING_POINT
+#include <math.h>
+#endif
+
+/*
+** Routine needed to support the testcase() macro.
+*/
+#ifdef SQLITE_COVERAGE_TEST
+SQLITE_PRIVATE void sqlite3Coverage(int x){
+ static unsigned dummy = 0;
+ dummy += (unsigned)x;
+}
+#endif
+
+/*
+** Calls to sqlite3FaultSim() are used to simulate a failure during testing,
+** or to bypass normal error detection during testing in order to let
+** execute proceed futher downstream.
+**
+** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The
+** sqlite3FaultSim() function only returns non-zero during testing.
+**
+** During testing, if the test harness has set a fault-sim callback using
+** a call to sqlite3_test_control(SQLITE_TESTCTRL_FAULT_INSTALL), then
+** each call to sqlite3FaultSim() is relayed to that application-supplied
+** callback and the integer return value form the application-supplied
+** callback is returned by sqlite3FaultSim().
+**
+** The integer argument to sqlite3FaultSim() is a code to identify which
+** sqlite3FaultSim() instance is being invoked. Each call to sqlite3FaultSim()
+** should have a unique code. To prevent legacy testing applications from
+** breaking, the codes should not be changed or reused.
+*/
+#ifndef SQLITE_UNTESTABLE
+SQLITE_PRIVATE int sqlite3FaultSim(int iTest){
+ int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback;
+ return xCallback ? xCallback(iTest) : SQLITE_OK;
+}
+#endif
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/*
+** Return true if the floating point value is Not a Number (NaN).
+*/
+SQLITE_PRIVATE int sqlite3IsNaN(double x){
+ u64 y;
+ memcpy(&y,&x,sizeof(y));
+ return IsNaN(y);
+}
+#endif /* SQLITE_OMIT_FLOATING_POINT */
+
+/*
+** Compute a string length that is limited to what can be stored in
+** lower 30 bits of a 32-bit signed integer.
+**
+** The value returned will never be negative. Nor will it ever be greater
+** than the actual length of the string. For very long strings (greater
+** than 1GiB) the value returned might be less than the true string length.
+*/
+SQLITE_PRIVATE int sqlite3Strlen30(const char *z){
+ if( z==0 ) return 0;
+ return 0x3fffffff & (int)strlen(z);
+}
+
+/*
+** Return the declared type of a column. Or return zDflt if the column
+** has no declared type.
+**
+** The column type is an extra string stored after the zero-terminator on
+** the column name if and only if the COLFLAG_HASTYPE flag is set.
+*/
+SQLITE_PRIVATE char *sqlite3ColumnType(Column *pCol, char *zDflt){
+ if( (pCol->colFlags & COLFLAG_HASTYPE)==0 ) return zDflt;
+ return pCol->zName + strlen(pCol->zName) + 1;
+}
+
+/*
+** Helper function for sqlite3Error() - called rarely. Broken out into
+** a separate routine to avoid unnecessary register saves on entry to
+** sqlite3Error().
+*/
+static SQLITE_NOINLINE void sqlite3ErrorFinish(sqlite3 *db, int err_code){
+ if( db->pErr ) sqlite3ValueSetNull(db->pErr);
+ sqlite3SystemError(db, err_code);
+}
+
+/*
+** Set the current error code to err_code and clear any prior error message.
+** Also set iSysErrno (by calling sqlite3System) if the err_code indicates
+** that would be appropriate.
+*/
+SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){
+ assert( db!=0 );
+ db->errCode = err_code;
+ if( err_code || db->pErr ) sqlite3ErrorFinish(db, err_code);
+}
+
+/*
+** Load the sqlite3.iSysErrno field if that is an appropriate thing
+** to do based on the SQLite error code in rc.
+*/
+SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){
+ if( rc==SQLITE_IOERR_NOMEM ) return;
+ rc &= 0xff;
+ if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){
+ db->iSysErrno = sqlite3OsGetLastError(db->pVfs);
+ }
+}
+
+/*
+** Set the most recent error code and error string for the sqlite
+** handle "db". The error code is set to "err_code".
+**
+** If it is not NULL, string zFormat specifies the format of the
+** error string in the style of the printf functions: The following
+** format characters are allowed:
+**
+** %s Insert a string
+** %z A string that should be freed after use
+** %d Insert an integer
+** %T Insert a token
+** %S Insert the first element of a SrcList
+**
+** zFormat and any string tokens that follow it are assumed to be
+** encoded in UTF-8.
+**
+** To clear the most recent error for sqlite handle "db", sqlite3Error
+** should be called with err_code set to SQLITE_OK and zFormat set
+** to NULL.
+*/
+SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *zFormat, ...){
+ assert( db!=0 );
+ db->errCode = err_code;
+ sqlite3SystemError(db, err_code);
+ if( zFormat==0 ){
+ sqlite3Error(db, err_code);
+ }else if( db->pErr || (db->pErr = sqlite3ValueNew(db))!=0 ){
+ char *z;
+ va_list ap;
+ va_start(ap, zFormat);
+ z = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC);
+ }
+}
+
+/*
+** Add an error message to pParse->zErrMsg and increment pParse->nErr.
+** The following formatting characters are allowed:
+**
+** %s Insert a string
+** %z A string that should be freed after use
+** %d Insert an integer
+** %T Insert a token
+** %S Insert the first element of a SrcList
+**
+** This function should be used to report any error that occurs while
+** compiling an SQL statement (i.e. within sqlite3_prepare()). The
+** last thing the sqlite3_prepare() function does is copy the error
+** stored by this function into the database handle using sqlite3Error().
+** Functions sqlite3Error() or sqlite3ErrorWithMsg() should be used
+** during statement execution (sqlite3_step() etc.).
+*/
+SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){
+ char *zMsg;
+ va_list ap;
+ sqlite3 *db = pParse->db;
+ va_start(ap, zFormat);
+ zMsg = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ if( db->suppressErr ){
+ sqlite3DbFree(db, zMsg);
+ }else{
+ pParse->nErr++;
+ sqlite3DbFree(db, pParse->zErrMsg);
+ pParse->zErrMsg = zMsg;
+ pParse->rc = SQLITE_ERROR;
+ pParse->pWith = 0;
+ }
+}
+
+/*
+** If database connection db is currently parsing SQL, then transfer
+** error code errCode to that parser if the parser has not already
+** encountered some other kind of error.
+*/
+SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3 *db, int errCode){
+ Parse *pParse;
+ if( db==0 || (pParse = db->pParse)==0 ) return errCode;
+ pParse->rc = errCode;
+ pParse->nErr++;
+ return errCode;
+}
+
+/*
+** Convert an SQL-style quoted string into a normal string by removing
+** the quote characters. The conversion is done in-place. If the
+** input does not begin with a quote character, then this routine
+** is a no-op.
+**
+** The input string must be zero-terminated. A new zero-terminator
+** is added to the dequoted string.
+**
+** The return value is -1 if no dequoting occurs or the length of the
+** dequoted string, exclusive of the zero terminator, if dequoting does
+** occur.
+**
+** 2002-02-14: This routine is extended to remove MS-Access style
+** brackets from around identifiers. For example: "[a-b-c]" becomes
+** "a-b-c".
+*/
+SQLITE_PRIVATE void sqlite3Dequote(char *z){
+ char quote;
+ int i, j;
+ if( z==0 ) return;
+ quote = z[0];
+ if( !sqlite3Isquote(quote) ) return;
+ if( quote=='[' ) quote = ']';
+ for(i=1, j=0;; i++){
+ assert( z[i] );
+ if( z[i]==quote ){
+ if( z[i+1]==quote ){
+ z[j++] = quote;
+ i++;
+ }else{
+ break;
+ }
+ }else{
+ z[j++] = z[i];
+ }
+ }
+ z[j] = 0;
+}
+SQLITE_PRIVATE void sqlite3DequoteExpr(Expr *p){
+ assert( sqlite3Isquote(p->u.zToken[0]) );
+ p->flags |= p->u.zToken[0]=='"' ? EP_Quoted|EP_DblQuoted : EP_Quoted;
+ sqlite3Dequote(p->u.zToken);
+}
+
+/*
+** Generate a Token object from a string
+*/
+SQLITE_PRIVATE void sqlite3TokenInit(Token *p, char *z){
+ p->z = z;
+ p->n = sqlite3Strlen30(z);
+}
+
+/* Convenient short-hand */
+#define UpperToLower sqlite3UpperToLower
+
+/*
+** Some systems have stricmp(). Others have strcasecmp(). Because
+** there is no consistency, we will define our own.
+**
+** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and
+** sqlite3_strnicmp() APIs allow applications and extensions to compare
+** the contents of two buffers containing UTF-8 strings in a
+** case-independent fashion, using the same definition of "case
+** independence" that SQLite uses internally when comparing identifiers.
+*/
+SQLITE_API int sqlite3_stricmp(const char *zLeft, const char *zRight){
+ if( zLeft==0 ){
+ return zRight ? -1 : 0;
+ }else if( zRight==0 ){
+ return 1;
+ }
+ return sqlite3StrICmp(zLeft, zRight);
+}
+SQLITE_PRIVATE int sqlite3StrICmp(const char *zLeft, const char *zRight){
+ unsigned char *a, *b;
+ int c, x;
+ a = (unsigned char *)zLeft;
+ b = (unsigned char *)zRight;
+ for(;;){
+ c = *a;
+ x = *b;
+ if( c==x ){
+ if( c==0 ) break;
+ }else{
+ c = (int)UpperToLower[c] - (int)UpperToLower[x];
+ if( c ) break;
+ }
+ a++;
+ b++;
+ }
+ return c;
+}
+SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
+ register unsigned char *a, *b;
+ if( zLeft==0 ){
+ return zRight ? -1 : 0;
+ }else if( zRight==0 ){
+ return 1;
+ }
+ a = (unsigned char *)zLeft;
+ b = (unsigned char *)zRight;
+ while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
+ return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b];
+}
+
+/*
+** Compute an 8-bit hash on a string that is insensitive to case differences
+*/
+SQLITE_PRIVATE u8 sqlite3StrIHash(const char *z){
+ u8 h = 0;
+ if( z==0 ) return 0;
+ while( z[0] ){
+ h += UpperToLower[(unsigned char)z[0]];
+ z++;
+ }
+ return h;
+}
+
+/*
+** Compute 10 to the E-th power. Examples: E==1 results in 10.
+** E==2 results in 100. E==50 results in 1.0e50.
+**
+** This routine only works for values of E between 1 and 341.
+*/
+static LONGDOUBLE_TYPE sqlite3Pow10(int E){
+#if defined(_MSC_VER)
+ static const LONGDOUBLE_TYPE x[] = {
+ 1.0e+001L,
+ 1.0e+002L,
+ 1.0e+004L,
+ 1.0e+008L,
+ 1.0e+016L,
+ 1.0e+032L,
+ 1.0e+064L,
+ 1.0e+128L,
+ 1.0e+256L
+ };
+ LONGDOUBLE_TYPE r = 1.0;
+ int i;
+ assert( E>=0 && E<=307 );
+ for(i=0; E!=0; i++, E >>=1){
+ if( E & 1 ) r *= x[i];
+ }
+ return r;
+#else
+ LONGDOUBLE_TYPE x = 10.0;
+ LONGDOUBLE_TYPE r = 1.0;
+ while(1){
+ if( E & 1 ) r *= x;
+ E >>= 1;
+ if( E==0 ) break;
+ x *= x;
+ }
+ return r;
+#endif
+}
+
+/*
+** The string z[] is an text representation of a real number.
+** Convert this string to a double and write it into *pResult.
+**
+** The string z[] is length bytes in length (bytes, not characters) and
+** uses the encoding enc. The string is not necessarily zero-terminated.
+**
+** Return TRUE if the result is a valid real number (or integer) and FALSE
+** if the string is empty or contains extraneous text. More specifically
+** return
+** 1 => The input string is a pure integer
+** 2 or more => The input has a decimal point or eNNN clause
+** 0 or less => The input string is not a valid number
+** -1 => Not a valid number, but has a valid prefix which
+** includes a decimal point and/or an eNNN clause
+**
+** Valid numbers are in one of these formats:
+**
+** [+-]digits[E[+-]digits]
+** [+-]digits.[digits][E[+-]digits]
+** [+-].digits[E[+-]digits]
+**
+** Leading and trailing whitespace is ignored for the purpose of determining
+** validity.
+**
+** If some prefix of the input string is a valid number, this routine
+** returns FALSE but it still converts the prefix and writes the result
+** into *pResult.
+*/
+#if defined(_MSC_VER)
+#pragma warning(disable : 4756)
+#endif
+SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 enc){
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ int incr;
+ const char *zEnd;
+ /* sign * significand * (10 ^ (esign * exponent)) */
+ int sign = 1; /* sign of significand */
+ i64 s = 0; /* significand */
+ int d = 0; /* adjust exponent for shifting decimal point */
+ int esign = 1; /* sign of exponent */
+ int e = 0; /* exponent */
+ int eValid = 1; /* True exponent is either not used or is well-formed */
+ double result;
+ int nDigit = 0; /* Number of digits processed */
+ int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */
+
+ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE );
+ *pResult = 0.0; /* Default return value, in case of an error */
+ if( length==0 ) return 0;
+
+ if( enc==SQLITE_UTF8 ){
+ incr = 1;
+ zEnd = z + length;
+ }else{
+ int i;
+ incr = 2;
+ length &= ~1;
+ assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 );
+ testcase( enc==SQLITE_UTF16LE );
+ testcase( enc==SQLITE_UTF16BE );
+ for(i=3-enc; i<length && z[i]==0; i+=2){}
+ if( i<length ) eType = -100;
+ zEnd = &z[i^1];
+ z += (enc&1);
+ }
+
+ /* skip leading spaces */
+ while( z<zEnd && sqlite3Isspace(*z) ) z+=incr;
+ if( z>=zEnd ) return 0;
+
+ /* get sign of significand */
+ if( *z=='-' ){
+ sign = -1;
+ z+=incr;
+ }else if( *z=='+' ){
+ z+=incr;
+ }
+
+ /* copy max significant digits to significand */
+ while( z<zEnd && sqlite3Isdigit(*z) ){
+ s = s*10 + (*z - '0');
+ z+=incr; nDigit++;
+ if( s>=((LARGEST_INT64-9)/10) ){
+ /* skip non-significant significand digits
+ ** (increase exponent by d to shift decimal left) */
+ while( z<zEnd && sqlite3Isdigit(*z) ){ z+=incr; d++; }
+ }
+ }
+ if( z>=zEnd ) goto do_atof_calc;
+
+ /* if decimal point is present */
+ if( *z=='.' ){
+ z+=incr;
+ eType++;
+ /* copy digits from after decimal to significand
+ ** (decrease exponent by d to shift decimal right) */
+ while( z<zEnd && sqlite3Isdigit(*z) ){
+ if( s<((LARGEST_INT64-9)/10) ){
+ s = s*10 + (*z - '0');
+ d--;
+ nDigit++;
+ }
+ z+=incr;
+ }
+ }
+ if( z>=zEnd ) goto do_atof_calc;
+
+ /* if exponent is present */
+ if( *z=='e' || *z=='E' ){
+ z+=incr;
+ eValid = 0;
+ eType++;
+
+ /* This branch is needed to avoid a (harmless) buffer overread. The
+ ** special comment alerts the mutation tester that the correct answer
+ ** is obtained even if the branch is omitted */
+ if( z>=zEnd ) goto do_atof_calc; /*PREVENTS-HARMLESS-OVERREAD*/
+
+ /* get sign of exponent */
+ if( *z=='-' ){
+ esign = -1;
+ z+=incr;
+ }else if( *z=='+' ){
+ z+=incr;
+ }
+ /* copy digits to exponent */
+ while( z<zEnd && sqlite3Isdigit(*z) ){
+ e = e<10000 ? (e*10 + (*z - '0')) : 10000;
+ z+=incr;
+ eValid = 1;
+ }
+ }
+
+ /* skip trailing spaces */
+ while( z<zEnd && sqlite3Isspace(*z) ) z+=incr;
+
+do_atof_calc:
+ /* adjust exponent by d, and update sign */
+ e = (e*esign) + d;
+ if( e<0 ) {
+ esign = -1;
+ e *= -1;
+ } else {
+ esign = 1;
+ }
+
+ if( s==0 ) {
+ /* In the IEEE 754 standard, zero is signed. */
+ result = sign<0 ? -(double)0 : (double)0;
+ } else {
+ /* Attempt to reduce exponent.
+ **
+ ** Branches that are not required for the correct answer but which only
+ ** help to obtain the correct answer faster are marked with special
+ ** comments, as a hint to the mutation tester.
+ */
+ while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/
+ if( esign>0 ){
+ if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/
+ s *= 10;
+ }else{
+ if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/
+ s /= 10;
+ }
+ e--;
+ }
+
+ /* adjust the sign of significand */
+ s = sign<0 ? -s : s;
+
+ if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/
+ result = (double)s;
+ }else{
+ /* attempt to handle extremely small/large numbers better */
+ if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/
+ if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/
+ LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308);
+ if( esign<0 ){
+ result = s / scale;
+ result /= 1.0e+308;
+ }else{
+ result = s * scale;
+ result *= 1.0e+308;
+ }
+ }else{ assert( e>=342 );
+ if( esign<0 ){
+ result = 0.0*s;
+ }else{
+#ifdef INFINITY
+ result = INFINITY*s;
+#else
+ result = 1e308*1e308*s; /* Infinity */
+#endif
+ }
+ }
+ }else{
+ LONGDOUBLE_TYPE scale = sqlite3Pow10(e);
+ if( esign<0 ){
+ result = s / scale;
+ }else{
+ result = s * scale;
+ }
+ }
+ }
+ }
+
+ /* store the result */
+ *pResult = result;
+
+ /* return true if number and no extra non-whitespace chracters after */
+ if( z==zEnd && nDigit>0 && eValid && eType>0 ){
+ return eType;
+ }else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){
+ return -1;
+ }else{
+ return 0;
+ }
+#else
+ return !sqlite3Atoi64(z, pResult, length, enc);
+#endif /* SQLITE_OMIT_FLOATING_POINT */
+}
+#if defined(_MSC_VER)
+#pragma warning(default : 4756)
+#endif
+
+/*
+** Render an signed 64-bit integer as text. Store the result in zOut[].
+**
+** The caller must ensure that zOut[] is at least 21 bytes in size.
+*/
+SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){
+ int i;
+ u64 x;
+ char zTemp[22];
+ if( v<0 ){
+ x = (v==SMALLEST_INT64) ? ((u64)1)<<63 : (u64)-v;
+ }else{
+ x = v;
+ }
+ i = sizeof(zTemp)-2;
+ zTemp[sizeof(zTemp)-1] = 0;
+ do{
+ zTemp[i--] = (x%10) + '0';
+ x = x/10;
+ }while( x );
+ if( v<0 ) zTemp[i--] = '-';
+ memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i);
+}
+
+/*
+** Compare the 19-character string zNum against the text representation
+** value 2^63: 9223372036854775808. Return negative, zero, or positive
+** if zNum is less than, equal to, or greater than the string.
+** Note that zNum must contain exactly 19 characters.
+**
+** Unlike memcmp() this routine is guaranteed to return the difference
+** in the values of the last digit if the only difference is in the
+** last digit. So, for example,
+**
+** compare2pow63("9223372036854775800", 1)
+**
+** will return -8.
+*/
+static int compare2pow63(const char *zNum, int incr){
+ int c = 0;
+ int i;
+ /* 012345678901234567 */
+ const char *pow63 = "922337203685477580";
+ for(i=0; c==0 && i<18; i++){
+ c = (zNum[i*incr]-pow63[i])*10;
+ }
+ if( c==0 ){
+ c = zNum[18*incr] - '8';
+ testcase( c==(-1) );
+ testcase( c==0 );
+ testcase( c==(+1) );
+ }
+ return c;
+}
+
+/*
+** Convert zNum to a 64-bit signed integer. zNum must be decimal. This
+** routine does *not* accept hexadecimal notation.
+**
+** Returns:
+**
+** -1 Not even a prefix of the input text looks like an integer
+** 0 Successful transformation. Fits in a 64-bit signed integer.
+** 1 Excess non-space text after the integer value
+** 2 Integer too large for a 64-bit signed integer or is malformed
+** 3 Special case of 9223372036854775808
+**
+** length is the number of bytes in the string (bytes, not characters).
+** The string is not necessarily zero-terminated. The encoding is
+** given by enc.
+*/
+SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc){
+ int incr;
+ u64 u = 0;
+ int neg = 0; /* assume positive */
+ int i;
+ int c = 0;
+ int nonNum = 0; /* True if input contains UTF16 with high byte non-zero */
+ int rc; /* Baseline return code */
+ const char *zStart;
+ const char *zEnd = zNum + length;
+ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE );
+ if( enc==SQLITE_UTF8 ){
+ incr = 1;
+ }else{
+ incr = 2;
+ assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 );
+ for(i=3-enc; i<length && zNum[i]==0; i+=2){}
+ nonNum = i<length;
+ zEnd = &zNum[i^1];
+ zNum += (enc&1);
+ }
+ while( zNum<zEnd && sqlite3Isspace(*zNum) ) zNum+=incr;
+ if( zNum<zEnd ){
+ if( *zNum=='-' ){
+ neg = 1;
+ zNum+=incr;
+ }else if( *zNum=='+' ){
+ zNum+=incr;
+ }
+ }
+ zStart = zNum;
+ while( zNum<zEnd && zNum[0]=='0' ){ zNum+=incr; } /* Skip leading zeros. */
+ for(i=0; &zNum[i]<zEnd && (c=zNum[i])>='0' && c<='9'; i+=incr){
+ u = u*10 + c - '0';
+ }
+ testcase( i==18*incr );
+ testcase( i==19*incr );
+ testcase( i==20*incr );
+ if( u>LARGEST_INT64 ){
+ /* This test and assignment is needed only to suppress UB warnings
+ ** from clang and -fsanitize=undefined. This test and assignment make
+ ** the code a little larger and slower, and no harm comes from omitting
+ ** them, but we must appaise the undefined-behavior pharisees. */
+ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64;
+ }else if( neg ){
+ *pNum = -(i64)u;
+ }else{
+ *pNum = (i64)u;
+ }
+ rc = 0;
+ if( i==0 && zStart==zNum ){ /* No digits */
+ rc = -1;
+ }else if( nonNum ){ /* UTF16 with high-order bytes non-zero */
+ rc = 1;
+ }else if( &zNum[i]<zEnd ){ /* Extra bytes at the end */
+ int jj = i;
+ do{
+ if( !sqlite3Isspace(zNum[jj]) ){
+ rc = 1; /* Extra non-space text after the integer */
+ break;
+ }
+ jj += incr;
+ }while( &zNum[jj]<zEnd );
+ }
+ if( i<19*incr ){
+ /* Less than 19 digits, so we know that it fits in 64 bits */
+ assert( u<=LARGEST_INT64 );
+ return rc;
+ }else{
+ /* zNum is a 19-digit numbers. Compare it against 9223372036854775808. */
+ c = i>19*incr ? 1 : compare2pow63(zNum, incr);
+ if( c<0 ){
+ /* zNum is less than 9223372036854775808 so it fits */
+ assert( u<=LARGEST_INT64 );
+ return rc;
+ }else{
+ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64;
+ if( c>0 ){
+ /* zNum is greater than 9223372036854775808 so it overflows */
+ return 2;
+ }else{
+ /* zNum is exactly 9223372036854775808. Fits if negative. The
+ ** special case 2 overflow if positive */
+ assert( u-1==LARGEST_INT64 );
+ return neg ? rc : 3;
+ }
+ }
+ }
+}
+
+/*
+** Transform a UTF-8 integer literal, in either decimal or hexadecimal,
+** into a 64-bit signed integer. This routine accepts hexadecimal literals,
+** whereas sqlite3Atoi64() does not.
+**
+** Returns:
+**
+** 0 Successful transformation. Fits in a 64-bit signed integer.
+** 1 Excess text after the integer value
+** 2 Integer too large for a 64-bit signed integer or is malformed
+** 3 Special case of 9223372036854775808
+*/
+SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){
+#ifndef SQLITE_OMIT_HEX_INTEGER
+ if( z[0]=='0'
+ && (z[1]=='x' || z[1]=='X')
+ ){
+ u64 u = 0;
+ int i, k;
+ for(i=2; z[i]=='0'; i++){}
+ for(k=i; sqlite3Isxdigit(z[k]); k++){
+ u = u*16 + sqlite3HexToInt(z[k]);
+ }
+ memcpy(pOut, &u, 8);
+ return (z[k]==0 && k-i<=16) ? 0 : 2;
+ }else
+#endif /* SQLITE_OMIT_HEX_INTEGER */
+ {
+ return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8);
+ }
+}
+
+/*
+** If zNum represents an integer that will fit in 32-bits, then set
+** *pValue to that integer and return true. Otherwise return false.
+**
+** This routine accepts both decimal and hexadecimal notation for integers.
+**
+** Any non-numeric characters that following zNum are ignored.
+** This is different from sqlite3Atoi64() which requires the
+** input number to be zero-terminated.
+*/
+SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){
+ sqlite_int64 v = 0;
+ int i, c;
+ int neg = 0;
+ if( zNum[0]=='-' ){
+ neg = 1;
+ zNum++;
+ }else if( zNum[0]=='+' ){
+ zNum++;
+ }
+#ifndef SQLITE_OMIT_HEX_INTEGER
+ else if( zNum[0]=='0'
+ && (zNum[1]=='x' || zNum[1]=='X')
+ && sqlite3Isxdigit(zNum[2])
+ ){
+ u32 u = 0;
+ zNum += 2;
+ while( zNum[0]=='0' ) zNum++;
+ for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){
+ u = u*16 + sqlite3HexToInt(zNum[i]);
+ }
+ if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){
+ memcpy(pValue, &u, 4);
+ return 1;
+ }else{
+ return 0;
+ }
+ }
+#endif
+ if( !sqlite3Isdigit(zNum[0]) ) return 0;
+ while( zNum[0]=='0' ) zNum++;
+ for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){
+ v = v*10 + c;
+ }
+
+ /* The longest decimal representation of a 32 bit integer is 10 digits:
+ **
+ ** 1234567890
+ ** 2^31 -> 2147483648
+ */
+ testcase( i==10 );
+ if( i>10 ){
+ return 0;
+ }
+ testcase( v-neg==2147483647 );
+ if( v-neg>2147483647 ){
+ return 0;
+ }
+ if( neg ){
+ v = -v;
+ }
+ *pValue = (int)v;
+ return 1;
+}
+
+/*
+** Return a 32-bit integer value extracted from a string. If the
+** string is not an integer, just return 0.
+*/
+SQLITE_PRIVATE int sqlite3Atoi(const char *z){
+ int x = 0;
+ sqlite3GetInt32(z, &x);
+ return x;
+}
+
+/*
+** Try to convert z into an unsigned 32-bit integer. Return true on
+** success and false if there is an error.
+**
+** Only decimal notation is accepted.
+*/
+SQLITE_PRIVATE int sqlite3GetUInt32(const char *z, u32 *pI){
+ u64 v = 0;
+ int i;
+ for(i=0; sqlite3Isdigit(z[i]); i++){
+ v = v*10 + z[i] - '0';
+ if( v>4294967296LL ){ *pI = 0; return 0; }
+ }
+ if( i==0 || z[i]!=0 ){ *pI = 0; return 0; }
+ *pI = (u32)v;
+ return 1;
+}
+
+/*
+** The variable-length integer encoding is as follows:
+**
+** KEY:
+** A = 0xxxxxxx 7 bits of data and one flag bit
+** B = 1xxxxxxx 7 bits of data and one flag bit
+** C = xxxxxxxx 8 bits of data
+**
+** 7 bits - A
+** 14 bits - BA
+** 21 bits - BBA
+** 28 bits - BBBA
+** 35 bits - BBBBA
+** 42 bits - BBBBBA
+** 49 bits - BBBBBBA
+** 56 bits - BBBBBBBA
+** 64 bits - BBBBBBBBC
+*/
+
+/*
+** Write a 64-bit variable-length integer to memory starting at p[0].
+** The length of data write will be between 1 and 9 bytes. The number
+** of bytes written is returned.
+**
+** A variable-length integer consists of the lower 7 bits of each byte
+** for all bytes that have the 8th bit set and one byte with the 8th
+** bit clear. Except, if we get to the 9th byte, it stores the full
+** 8 bits and is the last byte.
+*/
+static int SQLITE_NOINLINE putVarint64(unsigned char *p, u64 v){
+ int i, j, n;
+ u8 buf[10];
+ if( v & (((u64)0xff000000)<<32) ){
+ p[8] = (u8)v;
+ v >>= 8;
+ for(i=7; i>=0; i--){
+ p[i] = (u8)((v & 0x7f) | 0x80);
+ v >>= 7;
+ }
+ return 9;
+ }
+ n = 0;
+ do{
+ buf[n++] = (u8)((v & 0x7f) | 0x80);
+ v >>= 7;
+ }while( v!=0 );
+ buf[0] &= 0x7f;
+ assert( n<=9 );
+ for(i=0, j=n-1; j>=0; j--, i++){
+ p[i] = buf[j];
+ }
+ return n;
+}
+SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){
+ if( v<=0x7f ){
+ p[0] = v&0x7f;
+ return 1;
+ }
+ if( v<=0x3fff ){
+ p[0] = ((v>>7)&0x7f)|0x80;
+ p[1] = v&0x7f;
+ return 2;
+ }
+ return putVarint64(p,v);
+}
+
+/*
+** Bitmasks used by sqlite3GetVarint(). These precomputed constants
+** are defined here rather than simply putting the constant expressions
+** inline in order to work around bugs in the RVT compiler.
+**
+** SLOT_2_0 A mask for (0x7f<<14) | 0x7f
+**
+** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0
+*/
+#define SLOT_2_0 0x001fc07f
+#define SLOT_4_2_0 0xf01fc07f
+
+
+/*
+** Read a 64-bit variable-length integer from memory starting at p[0].
+** Return the number of bytes read. The value is stored in *v.
+*/
+SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){
+ u32 a,b,s;
+
+ if( ((signed char*)p)[0]>=0 ){
+ *v = *p;
+ return 1;
+ }
+ if( ((signed char*)p)[1]>=0 ){
+ *v = ((u32)(p[0]&0x7f)<<7) | p[1];
+ return 2;
+ }
+
+ /* Verify that constants are precomputed correctly */
+ assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) );
+ assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) );
+
+ a = ((u32)p[0])<<14;
+ b = p[1];
+ p += 2;
+ a |= *p;
+ /* a: p0<<14 | p2 (unmasked) */
+ if (!(a&0x80))
+ {
+ a &= SLOT_2_0;
+ b &= 0x7f;
+ b = b<<7;
+ a |= b;
+ *v = a;
+ return 3;
+ }
+
+ /* CSE1 from below */
+ a &= SLOT_2_0;
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p1<<14 | p3 (unmasked) */
+ if (!(b&0x80))
+ {
+ b &= SLOT_2_0;
+ /* moved CSE1 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ a = a<<7;
+ a |= b;
+ *v = a;
+ return 4;
+ }
+
+ /* a: p0<<14 | p2 (masked) */
+ /* b: p1<<14 | p3 (unmasked) */
+ /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+ /* moved CSE1 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ b &= SLOT_2_0;
+ s = a;
+ /* s: p0<<14 | p2 (masked) */
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<28 | p2<<14 | p4 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* we can skip these cause they were (effectively) done above
+ ** while calculating s */
+ /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
+ /* b &= (0x7f<<14)|(0x7f); */
+ b = b<<7;
+ a |= b;
+ s = s>>18;
+ *v = ((u64)s)<<32 | a;
+ return 5;
+ }
+
+ /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+ s = s<<7;
+ s |= b;
+ /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p1<<28 | p3<<14 | p5 (unmasked) */
+ if (!(b&0x80))
+ {
+ /* we can skip this cause it was (effectively) done above in calc'ing s */
+ /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
+ a &= SLOT_2_0;
+ a = a<<7;
+ a |= b;
+ s = s>>18;
+ *v = ((u64)s)<<32 | a;
+ return 6;
+ }
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p2<<28 | p4<<14 | p6 (unmasked) */
+ if (!(a&0x80))
+ {
+ a &= SLOT_4_2_0;
+ b &= SLOT_2_0;
+ b = b<<7;
+ a |= b;
+ s = s>>11;
+ *v = ((u64)s)<<32 | a;
+ return 7;
+ }
+
+ /* CSE2 from below */
+ a &= SLOT_2_0;
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p3<<28 | p5<<14 | p7 (unmasked) */
+ if (!(b&0x80))
+ {
+ b &= SLOT_4_2_0;
+ /* moved CSE2 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ a = a<<7;
+ a |= b;
+ s = s>>4;
+ *v = ((u64)s)<<32 | a;
+ return 8;
+ }
+
+ p++;
+ a = a<<15;
+ a |= *p;
+ /* a: p4<<29 | p6<<15 | p8 (unmasked) */
+
+ /* moved CSE2 up */
+ /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */
+ b &= SLOT_2_0;
+ b = b<<8;
+ a |= b;
+
+ s = s<<4;
+ b = p[-4];
+ b &= 0x7f;
+ b = b>>3;
+ s |= b;
+
+ *v = ((u64)s)<<32 | a;
+
+ return 9;
+}
+
+/*
+** Read a 32-bit variable-length integer from memory starting at p[0].
+** Return the number of bytes read. The value is stored in *v.
+**
+** If the varint stored in p[0] is larger than can fit in a 32-bit unsigned
+** integer, then set *v to 0xffffffff.
+**
+** A MACRO version, getVarint32, is provided which inlines the
+** single-byte case. All code should use the MACRO version as
+** this function assumes the single-byte case has already been handled.
+*/
+SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){
+ u32 a,b;
+
+ /* The 1-byte case. Overwhelmingly the most common. Handled inline
+ ** by the getVarin32() macro */
+ a = *p;
+ /* a: p0 (unmasked) */
+#ifndef getVarint32
+ if (!(a&0x80))
+ {
+ /* Values between 0 and 127 */
+ *v = a;
+ return 1;
+ }
+#endif
+
+ /* The 2-byte case */
+ p++;
+ b = *p;
+ /* b: p1 (unmasked) */
+ if (!(b&0x80))
+ {
+ /* Values between 128 and 16383 */
+ a &= 0x7f;
+ a = a<<7;
+ *v = a | b;
+ return 2;
+ }
+
+ /* The 3-byte case */
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<14 | p2 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* Values between 16384 and 2097151 */
+ a &= (0x7f<<14)|(0x7f);
+ b &= 0x7f;
+ b = b<<7;
+ *v = a | b;
+ return 3;
+ }
+
+ /* A 32-bit varint is used to store size information in btrees.
+ ** Objects are rarely larger than 2MiB limit of a 3-byte varint.
+ ** A 3-byte varint is sufficient, for example, to record the size
+ ** of a 1048569-byte BLOB or string.
+ **
+ ** We only unroll the first 1-, 2-, and 3- byte cases. The very
+ ** rare larger cases can be handled by the slower 64-bit varint
+ ** routine.
+ */
+#if 1
+ {
+ u64 v64;
+ u8 n;
+
+ n = sqlite3GetVarint(p-2, &v64);
+ assert( n>3 && n<=9 );
+ if( (v64 & SQLITE_MAX_U32)!=v64 ){
+ *v = 0xffffffff;
+ }else{
+ *v = (u32)v64;
+ }
+ return n;
+ }
+
+#else
+ /* For following code (kept for historical record only) shows an
+ ** unrolling for the 3- and 4-byte varint cases. This code is
+ ** slightly faster, but it is also larger and much harder to test.
+ */
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p1<<14 | p3 (unmasked) */
+ if (!(b&0x80))
+ {
+ /* Values between 2097152 and 268435455 */
+ b &= (0x7f<<14)|(0x7f);
+ a &= (0x7f<<14)|(0x7f);
+ a = a<<7;
+ *v = a | b;
+ return 4;
+ }
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<28 | p2<<14 | p4 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* Values between 268435456 and 34359738367 */
+ a &= SLOT_4_2_0;
+ b &= SLOT_4_2_0;
+ b = b<<7;
+ *v = a | b;
+ return 5;
+ }
+
+ /* We can only reach this point when reading a corrupt database
+ ** file. In that case we are not in any hurry. Use the (relatively
+ ** slow) general-purpose sqlite3GetVarint() routine to extract the
+ ** value. */
+ {
+ u64 v64;
+ u8 n;
+
+ p -= 4;
+ n = sqlite3GetVarint(p, &v64);
+ assert( n>5 && n<=9 );
+ *v = (u32)v64;
+ return n;
+ }
+#endif
+}
+
+/*
+** Return the number of bytes that will be needed to store the given
+** 64-bit integer.
+*/
+SQLITE_PRIVATE int sqlite3VarintLen(u64 v){
+ int i;
+ for(i=1; (v >>= 7)!=0; i++){ assert( i<10 ); }
+ return i;
+}
+
+
+/*
+** Read or write a four-byte big-endian integer value.
+*/
+SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){
+#if SQLITE_BYTEORDER==4321
+ u32 x;
+ memcpy(&x,p,4);
+ return x;
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ u32 x;
+ memcpy(&x,p,4);
+ return __builtin_bswap32(x);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ u32 x;
+ memcpy(&x,p,4);
+ return _byteswap_ulong(x);
+#else
+ testcase( p[0]&0x80 );
+ return ((unsigned)p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3];
+#endif
+}
+SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){
+#if SQLITE_BYTEORDER==4321
+ memcpy(p,&v,4);
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ u32 x = __builtin_bswap32(v);
+ memcpy(p,&x,4);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ u32 x = _byteswap_ulong(v);
+ memcpy(p,&x,4);
+#else
+ p[0] = (u8)(v>>24);
+ p[1] = (u8)(v>>16);
+ p[2] = (u8)(v>>8);
+ p[3] = (u8)v;
+#endif
+}
+
+
+
+/*
+** Translate a single byte of Hex into an integer.
+** This routine only works if h really is a valid hexadecimal
+** character: 0..9a..fA..F
+*/
+SQLITE_PRIVATE u8 sqlite3HexToInt(int h){
+ assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') );
+#ifdef SQLITE_ASCII
+ h += 9*(1&(h>>6));
+#endif
+#ifdef SQLITE_EBCDIC
+ h += 9*(1&~(h>>4));
+#endif
+ return (u8)(h & 0xf);
+}
+
+#if !defined(SQLITE_OMIT_BLOB_LITERAL)
+/*
+** Convert a BLOB literal of the form "x'hhhhhh'" into its binary
+** value. Return a pointer to its binary value. Space to hold the
+** binary value has been obtained from malloc and must be freed by
+** the calling routine.
+*/
+SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3 *db, const char *z, int n){
+ char *zBlob;
+ int i;
+
+ zBlob = (char *)sqlite3DbMallocRawNN(db, n/2 + 1);
+ n--;
+ if( zBlob ){
+ for(i=0; i<n; i+=2){
+ zBlob[i/2] = (sqlite3HexToInt(z[i])<<4) | sqlite3HexToInt(z[i+1]);
+ }
+ zBlob[i/2] = 0;
+ }
+ return zBlob;
+}
+#endif /* !SQLITE_OMIT_BLOB_LITERAL */
+
+/*
+** Log an error that is an API call on a connection pointer that should
+** not have been used. The "type" of connection pointer is given as the
+** argument. The zType is a word like "NULL" or "closed" or "invalid".
+*/
+static void logBadConnection(const char *zType){
+ sqlite3_log(SQLITE_MISUSE,
+ "API call with %s database connection pointer",
+ zType
+ );
+}
+
+/*
+** Check to make sure we have a valid db pointer. This test is not
+** foolproof but it does provide some measure of protection against
+** misuse of the interface such as passing in db pointers that are
+** NULL or which have been previously closed. If this routine returns
+** 1 it means that the db pointer is valid and 0 if it should not be
+** dereferenced for any reason. The calling function should invoke
+** SQLITE_MISUSE immediately.
+**
+** sqlite3SafetyCheckOk() requires that the db pointer be valid for
+** use. sqlite3SafetyCheckSickOrOk() allows a db pointer that failed to
+** open properly and is not fit for general use but which can be
+** used as an argument to sqlite3_errmsg() or sqlite3_close().
+*/
+SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3 *db){
+ u32 magic;
+ if( db==0 ){
+ logBadConnection("NULL");
+ return 0;
+ }
+ magic = db->magic;
+ if( magic!=SQLITE_MAGIC_OPEN ){
+ if( sqlite3SafetyCheckSickOrOk(db) ){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ logBadConnection("unopened");
+ }
+ return 0;
+ }else{
+ return 1;
+ }
+}
+SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){
+ u32 magic;
+ magic = db->magic;
+ if( magic!=SQLITE_MAGIC_SICK &&
+ magic!=SQLITE_MAGIC_OPEN &&
+ magic!=SQLITE_MAGIC_BUSY ){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ logBadConnection("invalid");
+ return 0;
+ }else{
+ return 1;
+ }
+}
+
+/*
+** Attempt to add, substract, or multiply the 64-bit signed value iB against
+** the other 64-bit signed integer at *pA and store the result in *pA.
+** Return 0 on success. Or if the operation would have resulted in an
+** overflow, leave *pA unchanged and return 1.
+*/
+SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 && !defined(__INTEL_COMPILER)
+ return __builtin_add_overflow(*pA, iB, pA);
+#else
+ i64 iA = *pA;
+ testcase( iA==0 ); testcase( iA==1 );
+ testcase( iB==-1 ); testcase( iB==0 );
+ if( iB>=0 ){
+ testcase( iA>0 && LARGEST_INT64 - iA == iB );
+ testcase( iA>0 && LARGEST_INT64 - iA == iB - 1 );
+ if( iA>0 && LARGEST_INT64 - iA < iB ) return 1;
+ }else{
+ testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 1 );
+ testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 2 );
+ if( iA<0 && -(iA + LARGEST_INT64) > iB + 1 ) return 1;
+ }
+ *pA += iB;
+ return 0;
+#endif
+}
+SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 && !defined(__INTEL_COMPILER)
+ return __builtin_sub_overflow(*pA, iB, pA);
+#else
+ testcase( iB==SMALLEST_INT64+1 );
+ if( iB==SMALLEST_INT64 ){
+ testcase( (*pA)==(-1) ); testcase( (*pA)==0 );
+ if( (*pA)>=0 ) return 1;
+ *pA -= iB;
+ return 0;
+ }else{
+ return sqlite3AddInt64(pA, -iB);
+ }
+#endif
+}
+SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 && !defined(__INTEL_COMPILER)
+ return __builtin_mul_overflow(*pA, iB, pA);
+#else
+ i64 iA = *pA;
+ if( iB>0 ){
+ if( iA>LARGEST_INT64/iB ) return 1;
+ if( iA<SMALLEST_INT64/iB ) return 1;
+ }else if( iB<0 ){
+ if( iA>0 ){
+ if( iB<SMALLEST_INT64/iA ) return 1;
+ }else if( iA<0 ){
+ if( iB==SMALLEST_INT64 ) return 1;
+ if( iA==SMALLEST_INT64 ) return 1;
+ if( -iA>LARGEST_INT64/-iB ) return 1;
+ }
+ }
+ *pA = iA*iB;
+ return 0;
+#endif
+}
+
+/*
+** Compute the absolute value of a 32-bit signed integer, of possible. Or
+** if the integer has a value of -2147483648, return +2147483647
+*/
+SQLITE_PRIVATE int sqlite3AbsInt32(int x){
+ if( x>=0 ) return x;
+ if( x==(int)0x80000000 ) return 0x7fffffff;
+ return -x;
+}
+
+#ifdef SQLITE_ENABLE_8_3_NAMES
+/*
+** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database
+** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and
+** if filename in z[] has a suffix (a.k.a. "extension") that is longer than
+** three characters, then shorten the suffix on z[] to be the last three
+** characters of the original suffix.
+**
+** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always
+** do the suffix shortening regardless of URI parameter.
+**
+** Examples:
+**
+** test.db-journal => test.nal
+** test.db-wal => test.wal
+** test.db-shm => test.shm
+** test.db-mj7f3319fa => test.9fa
+*/
+SQLITE_PRIVATE void sqlite3FileSuffix3(const char *zBaseFilename, char *z){
+#if SQLITE_ENABLE_8_3_NAMES<2
+ if( sqlite3_uri_boolean(zBaseFilename, "8_3_names", 0) )
+#endif
+ {
+ int i, sz;
+ sz = sqlite3Strlen30(z);
+ for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){}
+ if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4);
+ }
+}
+#endif
+
+/*
+** Find (an approximate) sum of two LogEst values. This computation is
+** not a simple "+" operator because LogEst is stored as a logarithmic
+** value.
+**
+*/
+SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst a, LogEst b){
+ static const unsigned char x[] = {
+ 10, 10, /* 0,1 */
+ 9, 9, /* 2,3 */
+ 8, 8, /* 4,5 */
+ 7, 7, 7, /* 6,7,8 */
+ 6, 6, 6, /* 9,10,11 */
+ 5, 5, 5, /* 12-14 */
+ 4, 4, 4, 4, /* 15-18 */
+ 3, 3, 3, 3, 3, 3, /* 19-24 */
+ 2, 2, 2, 2, 2, 2, 2, /* 25-31 */
+ };
+ if( a>=b ){
+ if( a>b+49 ) return a;
+ if( a>b+31 ) return a+1;
+ return a+x[a-b];
+ }else{
+ if( b>a+49 ) return b;
+ if( b>a+31 ) return b+1;
+ return b+x[b-a];
+ }
+}
+
+/*
+** Convert an integer into a LogEst. In other words, compute an
+** approximation for 10*log2(x).
+*/
+SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){
+ static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 };
+ LogEst y = 40;
+ if( x<8 ){
+ if( x<2 ) return 0;
+ while( x<8 ){ y -= 10; x <<= 1; }
+ }else{
+#if GCC_VERSION>=5004000
+ int i = 60 - __builtin_clzll(x);
+ y += i*10;
+ x >>= i;
+#else
+ while( x>255 ){ y += 40; x >>= 4; } /*OPTIMIZATION-IF-TRUE*/
+ while( x>15 ){ y += 10; x >>= 1; }
+#endif
+ }
+ return a[x&7] + y - 10;
+}
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Convert a double into a LogEst
+** In other words, compute an approximation for 10*log2(x).
+*/
+SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){
+ u64 a;
+ LogEst e;
+ assert( sizeof(x)==8 && sizeof(a)==8 );
+ if( x<=1 ) return 0;
+ if( x<=2000000000 ) return sqlite3LogEst((u64)x);
+ memcpy(&a, &x, 8);
+ e = (a>>52) - 1022;
+ return e*10;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \
+ defined(SQLITE_ENABLE_STAT4) || \
+ defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
+/*
+** Convert a LogEst into an integer.
+**
+** Note that this routine is only used when one or more of various
+** non-standard compile-time options is enabled.
+*/
+SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){
+ u64 n;
+ n = x%10;
+ x /= 10;
+ if( n>=5 ) n -= 2;
+ else if( n>=1 ) n -= 1;
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \
+ defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
+ if( x>60 ) return (u64)LARGEST_INT64;
+#else
+ /* If only SQLITE_ENABLE_STAT4 is on, then the largest input
+ ** possible to this routine is 310, resulting in a maximum x of 31 */
+ assert( x<=60 );
+#endif
+ return x>=3 ? (n+8)<<(x-3) : (n+8)>>(3-x);
+}
+#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */
+
+/*
+** Add a new name/number pair to a VList. This might require that the
+** VList object be reallocated, so return the new VList. If an OOM
+** error occurs, the original VList returned and the
+** db->mallocFailed flag is set.
+**
+** A VList is really just an array of integers. To destroy a VList,
+** simply pass it to sqlite3DbFree().
+**
+** The first integer is the number of integers allocated for the whole
+** VList. The second integer is the number of integers actually used.
+** Each name/number pair is encoded by subsequent groups of 3 or more
+** integers.
+**
+** Each name/number pair starts with two integers which are the numeric
+** value for the pair and the size of the name/number pair, respectively.
+** The text name overlays one or more following integers. The text name
+** is always zero-terminated.
+**
+** Conceptually:
+**
+** struct VList {
+** int nAlloc; // Number of allocated slots
+** int nUsed; // Number of used slots
+** struct VListEntry {
+** int iValue; // Value for this entry
+** int nSlot; // Slots used by this entry
+** // ... variable name goes here
+** } a[0];
+** }
+**
+** During code generation, pointers to the variable names within the
+** VList are taken. When that happens, nAlloc is set to zero as an
+** indication that the VList may never again be enlarged, since the
+** accompanying realloc() would invalidate the pointers.
+*/
+SQLITE_PRIVATE VList *sqlite3VListAdd(
+ sqlite3 *db, /* The database connection used for malloc() */
+ VList *pIn, /* The input VList. Might be NULL */
+ const char *zName, /* Name of symbol to add */
+ int nName, /* Bytes of text in zName */
+ int iVal /* Value to associate with zName */
+){
+ int nInt; /* number of sizeof(int) objects needed for zName */
+ char *z; /* Pointer to where zName will be stored */
+ int i; /* Index in pIn[] where zName is stored */
+
+ nInt = nName/4 + 3;
+ assert( pIn==0 || pIn[0]>=3 ); /* Verify ok to add new elements */
+ if( pIn==0 || pIn[1]+nInt > pIn[0] ){
+ /* Enlarge the allocation */
+ sqlite3_int64 nAlloc = (pIn ? 2*(sqlite3_int64)pIn[0] : 10) + nInt;
+ VList *pOut = sqlite3DbRealloc(db, pIn, nAlloc*sizeof(int));
+ if( pOut==0 ) return pIn;
+ if( pIn==0 ) pOut[1] = 2;
+ pIn = pOut;
+ pIn[0] = nAlloc;
+ }
+ i = pIn[1];
+ pIn[i] = iVal;
+ pIn[i+1] = nInt;
+ z = (char*)&pIn[i+2];
+ pIn[1] = i+nInt;
+ assert( pIn[1]<=pIn[0] );
+ memcpy(z, zName, nName);
+ z[nName] = 0;
+ return pIn;
+}
+
+/*
+** Return a pointer to the name of a variable in the given VList that
+** has the value iVal. Or return a NULL if there is no such variable in
+** the list
+*/
+SQLITE_PRIVATE const char *sqlite3VListNumToName(VList *pIn, int iVal){
+ int i, mx;
+ if( pIn==0 ) return 0;
+ mx = pIn[1];
+ i = 2;
+ do{
+ if( pIn[i]==iVal ) return (char*)&pIn[i+2];
+ i += pIn[i+1];
+ }while( i<mx );
+ return 0;
+}
+
+/*
+** Return the number of the variable named zName, if it is in VList.
+** or return 0 if there is no such variable.
+*/
+SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nName){
+ int i, mx;
+ if( pIn==0 ) return 0;
+ mx = pIn[1];
+ i = 2;
+ do{
+ const char *z = (const char*)&pIn[i+2];
+ if( strncmp(z,zName,nName)==0 && z[nName]==0 ) return pIn[i];
+ i += pIn[i+1];
+ }while( i<mx );
+ return 0;
+}
+
+/************** End of util.c ************************************************/
+/************** Begin file hash.c ********************************************/
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the implementation of generic hash-tables
+** used in SQLite.
+*/
+/* #include "sqliteInt.h" */
+/* #include <assert.h> */
+
+/* Turn bulk memory into a hash table object by initializing the
+** fields of the Hash structure.
+**
+** "pNew" is a pointer to the hash table that is to be initialized.
+*/
+SQLITE_PRIVATE void sqlite3HashInit(Hash *pNew){
+ assert( pNew!=0 );
+ pNew->first = 0;
+ pNew->count = 0;
+ pNew->htsize = 0;
+ pNew->ht = 0;
+}
+
+/* Remove all entries from a hash table. Reclaim all memory.
+** Call this routine to delete a hash table or to reset a hash table
+** to the empty state.
+*/
+SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){
+ HashElem *elem; /* For looping over all elements of the table */
+
+ assert( pH!=0 );
+ elem = pH->first;
+ pH->first = 0;
+ sqlite3_free(pH->ht);
+ pH->ht = 0;
+ pH->htsize = 0;
+ while( elem ){
+ HashElem *next_elem = elem->next;
+ sqlite3_free(elem);
+ elem = next_elem;
+ }
+ pH->count = 0;
+}
+
+/*
+** The hashing function.
+*/
+static unsigned int strHash(const char *z){
+ unsigned int h = 0;
+ unsigned char c;
+ while( (c = (unsigned char)*z++)!=0 ){ /*OPTIMIZATION-IF-TRUE*/
+ /* Knuth multiplicative hashing. (Sorting & Searching, p. 510).
+ ** 0x9e3779b1 is 2654435761 which is the closest prime number to
+ ** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. */
+ h += sqlite3UpperToLower[c];
+ h *= 0x9e3779b1;
+ }
+ return h;
+}
+
+
+/* Link pNew element into the hash table pH. If pEntry!=0 then also
+** insert pNew into the pEntry hash bucket.
+*/
+static void insertElement(
+ Hash *pH, /* The complete hash table */
+ struct _ht *pEntry, /* The entry into which pNew is inserted */
+ HashElem *pNew /* The element to be inserted */
+){
+ HashElem *pHead; /* First element already in pEntry */
+ if( pEntry ){
+ pHead = pEntry->count ? pEntry->chain : 0;
+ pEntry->count++;
+ pEntry->chain = pNew;
+ }else{
+ pHead = 0;
+ }
+ if( pHead ){
+ pNew->next = pHead;
+ pNew->prev = pHead->prev;
+ if( pHead->prev ){ pHead->prev->next = pNew; }
+ else { pH->first = pNew; }
+ pHead->prev = pNew;
+ }else{
+ pNew->next = pH->first;
+ if( pH->first ){ pH->first->prev = pNew; }
+ pNew->prev = 0;
+ pH->first = pNew;
+ }
+}
+
+
+/* Resize the hash table so that it cantains "new_size" buckets.
+**
+** The hash table might fail to resize if sqlite3_malloc() fails or
+** if the new size is the same as the prior size.
+** Return TRUE if the resize occurs and false if not.
+*/
+static int rehash(Hash *pH, unsigned int new_size){
+ struct _ht *new_ht; /* The new hash table */
+ HashElem *elem, *next_elem; /* For looping over existing elements */
+
+#if SQLITE_MALLOC_SOFT_LIMIT>0
+ if( new_size*sizeof(struct _ht)>SQLITE_MALLOC_SOFT_LIMIT ){
+ new_size = SQLITE_MALLOC_SOFT_LIMIT/sizeof(struct _ht);
+ }
+ if( new_size==pH->htsize ) return 0;
+#endif
+
+ /* The inability to allocates space for a larger hash table is
+ ** a performance hit but it is not a fatal error. So mark the
+ ** allocation as a benign. Use sqlite3Malloc()/memset(0) instead of
+ ** sqlite3MallocZero() to make the allocation, as sqlite3MallocZero()
+ ** only zeroes the requested number of bytes whereas this module will
+ ** use the actual amount of space allocated for the hash table (which
+ ** may be larger than the requested amount).
+ */
+ sqlite3BeginBenignMalloc();
+ new_ht = (struct _ht *)sqlite3Malloc( new_size*sizeof(struct _ht) );
+ sqlite3EndBenignMalloc();
+
+ if( new_ht==0 ) return 0;
+ sqlite3_free(pH->ht);
+ pH->ht = new_ht;
+ pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht);
+ memset(new_ht, 0, new_size*sizeof(struct _ht));
+ for(elem=pH->first, pH->first=0; elem; elem = next_elem){
+ unsigned int h = strHash(elem->pKey) % new_size;
+ next_elem = elem->next;
+ insertElement(pH, &new_ht[h], elem);
+ }
+ return 1;
+}
+
+/* This function (for internal use only) locates an element in an
+** hash table that matches the given key. If no element is found,
+** a pointer to a static null element with HashElem.data==0 is returned.
+** If pH is not NULL, then the hash for this key is written to *pH.
+*/
+static HashElem *findElementWithHash(
+ const Hash *pH, /* The pH to be searched */
+ const char *pKey, /* The key we are searching for */
+ unsigned int *pHash /* Write the hash value here */
+){
+ HashElem *elem; /* Used to loop thru the element list */
+ unsigned int count; /* Number of elements left to test */
+ unsigned int h; /* The computed hash */
+ static HashElem nullElement = { 0, 0, 0, 0 };
+
+ if( pH->ht ){ /*OPTIMIZATION-IF-TRUE*/
+ struct _ht *pEntry;
+ h = strHash(pKey) % pH->htsize;
+ pEntry = &pH->ht[h];
+ elem = pEntry->chain;
+ count = pEntry->count;
+ }else{
+ h = 0;
+ elem = pH->first;
+ count = pH->count;
+ }
+ if( pHash ) *pHash = h;
+ while( count-- ){
+ assert( elem!=0 );
+ if( sqlite3StrICmp(elem->pKey,pKey)==0 ){
+ return elem;
+ }
+ elem = elem->next;
+ }
+ return &nullElement;
+}
+
+/* Remove a single entry from the hash table given a pointer to that
+** element and a hash on the element's key.
+*/
+static void removeElementGivenHash(
+ Hash *pH, /* The pH containing "elem" */
+ HashElem* elem, /* The element to be removed from the pH */
+ unsigned int h /* Hash value for the element */
+){
+ struct _ht *pEntry;
+ if( elem->prev ){
+ elem->prev->next = elem->next;
+ }else{
+ pH->first = elem->next;
+ }
+ if( elem->next ){
+ elem->next->prev = elem->prev;
+ }
+ if( pH->ht ){
+ pEntry = &pH->ht[h];
+ if( pEntry->chain==elem ){
+ pEntry->chain = elem->next;
+ }
+ assert( pEntry->count>0 );
+ pEntry->count--;
+ }
+ sqlite3_free( elem );
+ pH->count--;
+ if( pH->count==0 ){
+ assert( pH->first==0 );
+ assert( pH->count==0 );
+ sqlite3HashClear(pH);
+ }
+}
+
+/* Attempt to locate an element of the hash table pH with a key
+** that matches pKey. Return the data for this element if it is
+** found, or NULL if there is no match.
+*/
+SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey){
+ assert( pH!=0 );
+ assert( pKey!=0 );
+ return findElementWithHash(pH, pKey, 0)->data;
+}
+
+/* Insert an element into the hash table pH. The key is pKey
+** and the data is "data".
+**
+** If no element exists with a matching key, then a new
+** element is created and NULL is returned.
+**
+** If another element already exists with the same key, then the
+** new data replaces the old data and the old data is returned.
+** The key is not copied in this instance. If a malloc fails, then
+** the new data is returned and the hash table is unchanged.
+**
+** If the "data" parameter to this function is NULL, then the
+** element corresponding to "key" is removed from the hash table.
+*/
+SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){
+ unsigned int h; /* the hash of the key modulo hash table size */
+ HashElem *elem; /* Used to loop thru the element list */
+ HashElem *new_elem; /* New element added to the pH */
+
+ assert( pH!=0 );
+ assert( pKey!=0 );
+ elem = findElementWithHash(pH,pKey,&h);
+ if( elem->data ){
+ void *old_data = elem->data;
+ if( data==0 ){
+ removeElementGivenHash(pH,elem,h);
+ }else{
+ elem->data = data;
+ elem->pKey = pKey;
+ }
+ return old_data;
+ }
+ if( data==0 ) return 0;
+ new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) );
+ if( new_elem==0 ) return data;
+ new_elem->pKey = pKey;
+ new_elem->data = data;
+ pH->count++;
+ if( pH->count>=10 && pH->count > 2*pH->htsize ){
+ if( rehash(pH, pH->count*2) ){
+ assert( pH->htsize>0 );
+ h = strHash(pKey) % pH->htsize;
+ }
+ }
+ insertElement(pH, pH->ht ? &pH->ht[h] : 0, new_elem);
+ return 0;
+}
+
+/************** End of hash.c ************************************************/
+/************** Begin file opcodes.c *****************************************/
+/* Automatically generated. Do not edit */
+/* See the tool/mkopcodec.tcl script for details. */
+#if !defined(SQLITE_OMIT_EXPLAIN) \
+ || defined(VDBE_PROFILE) \
+ || defined(SQLITE_DEBUG)
+#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) || defined(SQLITE_DEBUG)
+# define OpHelp(X) "\0" X
+#else
+# define OpHelp(X)
+#endif
+SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
+ static const char *const azName[] = {
+ /* 0 */ "Savepoint" OpHelp(""),
+ /* 1 */ "AutoCommit" OpHelp(""),
+ /* 2 */ "Transaction" OpHelp(""),
+ /* 3 */ "SorterNext" OpHelp(""),
+ /* 4 */ "Prev" OpHelp(""),
+ /* 5 */ "Next" OpHelp(""),
+ /* 6 */ "Checkpoint" OpHelp(""),
+ /* 7 */ "JournalMode" OpHelp(""),
+ /* 8 */ "Vacuum" OpHelp(""),
+ /* 9 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
+ /* 10 */ "VUpdate" OpHelp("data=r[P3@P2]"),
+ /* 11 */ "Goto" OpHelp(""),
+ /* 12 */ "Gosub" OpHelp(""),
+ /* 13 */ "InitCoroutine" OpHelp(""),
+ /* 14 */ "Yield" OpHelp(""),
+ /* 15 */ "MustBeInt" OpHelp(""),
+ /* 16 */ "Jump" OpHelp(""),
+ /* 17 */ "Once" OpHelp(""),
+ /* 18 */ "If" OpHelp(""),
+ /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"),
+ /* 20 */ "IfNot" OpHelp(""),
+ /* 21 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"),
+ /* 22 */ "SeekLT" OpHelp("key=r[P3@P4]"),
+ /* 23 */ "SeekLE" OpHelp("key=r[P3@P4]"),
+ /* 24 */ "SeekGE" OpHelp("key=r[P3@P4]"),
+ /* 25 */ "SeekGT" OpHelp("key=r[P3@P4]"),
+ /* 26 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"),
+ /* 27 */ "IfNoHope" OpHelp("key=r[P3@P4]"),
+ /* 28 */ "NoConflict" OpHelp("key=r[P3@P4]"),
+ /* 29 */ "NotFound" OpHelp("key=r[P3@P4]"),
+ /* 30 */ "Found" OpHelp("key=r[P3@P4]"),
+ /* 31 */ "SeekRowid" OpHelp("intkey=r[P3]"),
+ /* 32 */ "NotExists" OpHelp("intkey=r[P3]"),
+ /* 33 */ "Last" OpHelp(""),
+ /* 34 */ "IfSmaller" OpHelp(""),
+ /* 35 */ "SorterSort" OpHelp(""),
+ /* 36 */ "Sort" OpHelp(""),
+ /* 37 */ "Rewind" OpHelp(""),
+ /* 38 */ "IdxLE" OpHelp("key=r[P3@P4]"),
+ /* 39 */ "IdxGT" OpHelp("key=r[P3@P4]"),
+ /* 40 */ "IdxLT" OpHelp("key=r[P3@P4]"),
+ /* 41 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 42 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
+ /* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"),
+ /* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"),
+ /* 45 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 46 */ "Program" OpHelp(""),
+ /* 47 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
+ /* 48 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
+ /* 49 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
+ /* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
+ /* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
+ /* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"),
+ /* 53 */ "Eq" OpHelp("IF r[P3]==r[P1]"),
+ /* 54 */ "Gt" OpHelp("IF r[P3]>r[P1]"),
+ /* 55 */ "Le" OpHelp("IF r[P3]<=r[P1]"),
+ /* 56 */ "Lt" OpHelp("IF r[P3]<r[P1]"),
+ /* 57 */ "Ge" OpHelp("IF r[P3]>=r[P1]"),
+ /* 58 */ "ElseNotEq" OpHelp(""),
+ /* 59 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
+ /* 60 */ "IncrVacuum" OpHelp(""),
+ /* 61 */ "VNext" OpHelp(""),
+ /* 62 */ "Init" OpHelp("Start at P2"),
+ /* 63 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"),
+ /* 64 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"),
+ /* 65 */ "Return" OpHelp(""),
+ /* 66 */ "EndCoroutine" OpHelp(""),
+ /* 67 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
+ /* 68 */ "Halt" OpHelp(""),
+ /* 69 */ "Integer" OpHelp("r[P2]=P1"),
+ /* 70 */ "Int64" OpHelp("r[P2]=P4"),
+ /* 71 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
+ /* 72 */ "Null" OpHelp("r[P2..P3]=NULL"),
+ /* 73 */ "SoftNull" OpHelp("r[P1]=NULL"),
+ /* 74 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
+ /* 75 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
+ /* 76 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 77 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
+ /* 78 */ "SCopy" OpHelp("r[P2]=r[P1]"),
+ /* 79 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
+ /* 80 */ "ResultRow" OpHelp("output=r[P1@P2]"),
+ /* 81 */ "CollSeq" OpHelp(""),
+ /* 82 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
+ /* 83 */ "RealAffinity" OpHelp(""),
+ /* 84 */ "Cast" OpHelp("affinity(r[P1])"),
+ /* 85 */ "Permutation" OpHelp(""),
+ /* 86 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
+ /* 87 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"),
+ /* 88 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"),
+ /* 89 */ "Column" OpHelp("r[P3]=PX"),
+ /* 90 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 91 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
+ /* 92 */ "Count" OpHelp("r[P2]=count()"),
+ /* 93 */ "ReadCookie" OpHelp(""),
+ /* 94 */ "SetCookie" OpHelp(""),
+ /* 95 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
+ /* 96 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
+ /* 97 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
+ /* 98 */ "OpenDup" OpHelp(""),
+ /* 99 */ "OpenAutoindex" OpHelp("nColumn=P2"),
+ /* 100 */ "OpenEphemeral" OpHelp("nColumn=P2"),
+ /* 101 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
+ /* 102 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
+ /* 103 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"),
+ /* 104 */ "ShiftRight" OpHelp("r[P3]=r[P2]>>r[P1]"),
+ /* 105 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"),
+ /* 106 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"),
+ /* 107 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"),
+ /* 108 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
+ /* 109 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
+ /* 110 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
+ /* 111 */ "SorterOpen" OpHelp(""),
+ /* 112 */ "BitNot" OpHelp("r[P2]= ~r[P1]"),
+ /* 113 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
+ /* 114 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
+ /* 115 */ "String8" OpHelp("r[P2]='P4'"),
+ /* 116 */ "Close" OpHelp(""),
+ /* 117 */ "ColumnsUsed" OpHelp(""),
+ /* 118 */ "SeekHit" OpHelp("seekHit=P2"),
+ /* 119 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
+ /* 120 */ "NewRowid" OpHelp("r[P2]=rowid"),
+ /* 121 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
+ /* 122 */ "Delete" OpHelp(""),
+ /* 123 */ "ResetCount" OpHelp(""),
+ /* 124 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
+ /* 125 */ "SorterData" OpHelp("r[P2]=data"),
+ /* 126 */ "RowData" OpHelp("r[P2]=data"),
+ /* 127 */ "Rowid" OpHelp("r[P2]=rowid"),
+ /* 128 */ "NullRow" OpHelp(""),
+ /* 129 */ "SeekEnd" OpHelp(""),
+ /* 130 */ "IdxInsert" OpHelp("key=r[P2]"),
+ /* 131 */ "SorterInsert" OpHelp("key=r[P2]"),
+ /* 132 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
+ /* 133 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"),
+ /* 134 */ "IdxRowid" OpHelp("r[P2]=rowid"),
+ /* 135 */ "FinishSeek" OpHelp(""),
+ /* 136 */ "Destroy" OpHelp(""),
+ /* 137 */ "Clear" OpHelp(""),
+ /* 138 */ "ResetSorter" OpHelp(""),
+ /* 139 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"),
+ /* 140 */ "SqlExec" OpHelp(""),
+ /* 141 */ "ParseSchema" OpHelp(""),
+ /* 142 */ "LoadAnalysis" OpHelp(""),
+ /* 143 */ "DropTable" OpHelp(""),
+ /* 144 */ "DropIndex" OpHelp(""),
+ /* 145 */ "DropTrigger" OpHelp(""),
+ /* 146 */ "IntegrityCk" OpHelp(""),
+ /* 147 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 148 */ "Param" OpHelp(""),
+ /* 149 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 150 */ "Real" OpHelp("r[P2]=P4"),
+ /* 151 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 152 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
+ /* 153 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"),
+ /* 154 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 155 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 156 */ "AggValue" OpHelp("r[P3]=value N=P2"),
+ /* 157 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 158 */ "Expire" OpHelp(""),
+ /* 159 */ "CursorLock" OpHelp(""),
+ /* 160 */ "CursorUnlock" OpHelp(""),
+ /* 161 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 162 */ "VBegin" OpHelp(""),
+ /* 163 */ "VCreate" OpHelp(""),
+ /* 164 */ "VDestroy" OpHelp(""),
+ /* 165 */ "VOpen" OpHelp(""),
+ /* 166 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 167 */ "VRename" OpHelp(""),
+ /* 168 */ "Pagecount" OpHelp(""),
+ /* 169 */ "MaxPgcnt" OpHelp(""),
+ /* 170 */ "Trace" OpHelp(""),
+ /* 171 */ "CursorHint" OpHelp(""),
+ /* 172 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
+ /* 173 */ "Noop" OpHelp(""),
+ /* 174 */ "Explain" OpHelp(""),
+ /* 175 */ "Abortable" OpHelp(""),
+ };
+ return azName[i];
+}
+#endif
+
+/************** End of opcodes.c *********************************************/
+/************** Begin file os_unix.c *****************************************/
+/*
+** 2004 May 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains the VFS implementation for unix-like operating systems
+** include Linux, MacOSX, *BSD, QNX, VxWorks, AIX, HPUX, and others.
+**
+** There are actually several different VFS implementations in this file.
+** The differences are in the way that file locking is done. The default
+** implementation uses Posix Advisory Locks. Alternative implementations
+** use flock(), dot-files, various proprietary locking schemas, or simply
+** skip locking all together.
+**
+** This source file is organized into divisions where the logic for various
+** subfunctions is contained within the appropriate division. PLEASE
+** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed
+** in the correct division and should be clearly labeled.
+**
+** The layout of divisions is as follows:
+**
+** * General-purpose declarations and utility functions.
+** * Unique file ID logic used by VxWorks.
+** * Various locking primitive implementations (all except proxy locking):
+** + for Posix Advisory Locks
+** + for no-op locks
+** + for dot-file locks
+** + for flock() locking
+** + for named semaphore locks (VxWorks only)
+** + for AFP filesystem locks (MacOSX only)
+** * sqlite3_file methods not associated with locking.
+** * Definitions of sqlite3_io_methods objects for all locking
+** methods plus "finder" functions for each locking method.
+** * sqlite3_vfs method implementations.
+** * Locking primitives for the proxy uber-locking-method. (MacOSX only)
+** * Definitions of sqlite3_vfs objects for all locking methods
+** plus implementations of sqlite3_os_init() and sqlite3_os_end().
+*/
+/* #include "sqliteInt.h" */
+#if SQLITE_OS_UNIX /* This file is used on unix only */
+
+/*
+** There are various methods for file locking used for concurrency
+** control:
+**
+** 1. POSIX locking (the default),
+** 2. No locking,
+** 3. Dot-file locking,
+** 4. flock() locking,
+** 5. AFP locking (OSX only),
+** 6. Named POSIX semaphores (VXWorks only),
+** 7. proxy locking. (OSX only)
+**
+** Styles 4, 5, and 7 are only available of SQLITE_ENABLE_LOCKING_STYLE
+** is defined to 1. The SQLITE_ENABLE_LOCKING_STYLE also enables automatic
+** selection of the appropriate locking style based on the filesystem
+** where the database is located.
+*/
+#if !defined(SQLITE_ENABLE_LOCKING_STYLE)
+# if defined(__APPLE__)
+# define SQLITE_ENABLE_LOCKING_STYLE 1
+# else
+# define SQLITE_ENABLE_LOCKING_STYLE 0
+# endif
+#endif
+
+/* Use pread() and pwrite() if they are available */
+#if defined(__APPLE__)
+# define HAVE_PREAD 1
+# define HAVE_PWRITE 1
+#endif
+#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64)
+# undef USE_PREAD
+# define USE_PREAD64 1
+#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+# undef USE_PREAD64
+# define USE_PREAD 1
+#endif
+
+/*
+** standard include files.
+*/
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+/* #include <time.h> */
+#include <sys/time.h>
+#include <errno.h>
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+# include <sys/mman.h>
+#endif
+
+#if SQLITE_ENABLE_LOCKING_STYLE
+/* # include <sys/ioctl.h> */
+# include <sys/file.h>
+# include <sys/param.h>
+#endif /* SQLITE_ENABLE_LOCKING_STYLE */
+
+/*
+** Try to determine if gethostuuid() is available based on standard
+** macros. This might sometimes compute the wrong value for some
+** obscure platforms. For those cases, simply compile with one of
+** the following:
+**
+** -DHAVE_GETHOSTUUID=0
+** -DHAVE_GETHOSTUUID=1
+**
+** None if this matters except when building on Apple products with
+** -DSQLITE_ENABLE_LOCKING_STYLE.
+*/
+#ifndef HAVE_GETHOSTUUID
+# define HAVE_GETHOSTUUID 0
+# if defined(__APPLE__) && ((__MAC_OS_X_VERSION_MIN_REQUIRED > 1050) || \
+ (__IPHONE_OS_VERSION_MIN_REQUIRED > 2000))
+# if (!defined(TARGET_OS_EMBEDDED) || (TARGET_OS_EMBEDDED==0)) \
+ && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0))
+# undef HAVE_GETHOSTUUID
+# define HAVE_GETHOSTUUID 1
+# else
+# warning "gethostuuid() is disabled."
+# endif
+# endif
+#endif
+
+
+#if OS_VXWORKS
+/* # include <sys/ioctl.h> */
+# include <semaphore.h>
+# include <limits.h>
+#endif /* OS_VXWORKS */
+
+#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE
+# include <sys/mount.h>
+#endif
+
+#ifdef HAVE_UTIME
+# include <utime.h>
+#endif
+
+/*
+** Allowed values of unixFile.fsFlags
+*/
+#define SQLITE_FSFLAGS_IS_MSDOS 0x1
+
+/*
+** If we are to be thread-safe, include the pthreads header.
+*/
+#if SQLITE_THREADSAFE
+/* # include <pthread.h> */
+#endif
+
+/*
+** Default permissions when creating a new file
+*/
+#ifndef SQLITE_DEFAULT_FILE_PERMISSIONS
+# define SQLITE_DEFAULT_FILE_PERMISSIONS 0644
+#endif
+
+/*
+** Default permissions when creating auto proxy dir
+*/
+#ifndef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS
+# define SQLITE_DEFAULT_PROXYDIR_PERMISSIONS 0755
+#endif
+
+/*
+** Maximum supported path-length.
+*/
+#define MAX_PATHNAME 512
+
+/*
+** Maximum supported symbolic links
+*/
+#define SQLITE_MAX_SYMLINKS 100
+
+/* Always cast the getpid() return type for compatibility with
+** kernel modules in VxWorks. */
+#define osGetpid(X) (pid_t)getpid()
+
+/*
+** Only set the lastErrno if the error code is a real error and not
+** a normal expected return code of SQLITE_BUSY or SQLITE_OK
+*/
+#define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY))
+
+/* Forward references */
+typedef struct unixShm unixShm; /* Connection shared memory */
+typedef struct unixShmNode unixShmNode; /* Shared memory instance */
+typedef struct unixInodeInfo unixInodeInfo; /* An i-node */
+typedef struct UnixUnusedFd UnixUnusedFd; /* An unused file descriptor */
+
+/*
+** Sometimes, after a file handle is closed by SQLite, the file descriptor
+** cannot be closed immediately. In these cases, instances of the following
+** structure are used to store the file descriptor while waiting for an
+** opportunity to either close or reuse it.
+*/
+struct UnixUnusedFd {
+ int fd; /* File descriptor to close */
+ int flags; /* Flags this file descriptor was opened with */
+ UnixUnusedFd *pNext; /* Next unused file descriptor on same file */
+};
+
+/*
+** The unixFile structure is subclass of sqlite3_file specific to the unix
+** VFS implementations.
+*/
+typedef struct unixFile unixFile;
+struct unixFile {
+ sqlite3_io_methods const *pMethod; /* Always the first entry */
+ sqlite3_vfs *pVfs; /* The VFS that created this unixFile */
+ unixInodeInfo *pInode; /* Info about locks on this inode */
+ int h; /* The file descriptor */
+ unsigned char eFileLock; /* The type of lock held on this fd */
+ unsigned short int ctrlFlags; /* Behavioral bits. UNIXFILE_* flags */
+ int lastErrno; /* The unix errno from last I/O error */
+ void *lockingContext; /* Locking style specific state */
+ UnixUnusedFd *pPreallocatedUnused; /* Pre-allocated UnixUnusedFd */
+ const char *zPath; /* Name of the file */
+ unixShm *pShm; /* Shared memory segment information */
+ int szChunk; /* Configured by FCNTL_CHUNK_SIZE */
+#if SQLITE_MAX_MMAP_SIZE>0
+ int nFetchOut; /* Number of outstanding xFetch refs */
+ sqlite3_int64 mmapSize; /* Usable size of mapping at pMapRegion */
+ sqlite3_int64 mmapSizeActual; /* Actual size of mapping at pMapRegion */
+ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */
+ void *pMapRegion; /* Memory mapped region */
+#endif
+ int sectorSize; /* Device sector size */
+ int deviceCharacteristics; /* Precomputed device characteristics */
+#if SQLITE_ENABLE_LOCKING_STYLE
+ int openFlags; /* The flags specified at open() */
+#endif
+#if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__)
+ unsigned fsFlags; /* cached details from statfs() */
+#endif
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ unsigned iBusyTimeout; /* Wait this many millisec on locks */
+#endif
+#if OS_VXWORKS
+ struct vxworksFileId *pId; /* Unique file ID */
+#endif
+#ifdef SQLITE_DEBUG
+ /* The next group of variables are used to track whether or not the
+ ** transaction counter in bytes 24-27 of database files are updated
+ ** whenever any part of the database changes. An assertion fault will
+ ** occur if a file is updated without also updating the transaction
+ ** counter. This test is made to avoid new problems similar to the
+ ** one described by ticket #3584.
+ */
+ unsigned char transCntrChng; /* True if the transaction counter changed */
+ unsigned char dbUpdate; /* True if any part of database file changed */
+ unsigned char inNormalWrite; /* True if in a normal write operation */
+
+#endif
+
+#ifdef SQLITE_TEST
+ /* In test mode, increase the size of this structure a bit so that
+ ** it is larger than the struct CrashFile defined in test6.c.
+ */
+ char aPadding[32];
+#endif
+};
+
+/* This variable holds the process id (pid) from when the xRandomness()
+** method was called. If xOpen() is called from a different process id,
+** indicating that a fork() has occurred, the PRNG will be reset.
+*/
+static pid_t randomnessPid = 0;
+
+/*
+** Allowed values for the unixFile.ctrlFlags bitmask:
+*/
+#define UNIXFILE_EXCL 0x01 /* Connections from one process only */
+#define UNIXFILE_RDONLY 0x02 /* Connection is read only */
+#define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */
+#ifndef SQLITE_DISABLE_DIRSYNC
+# define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */
+#else
+# define UNIXFILE_DIRSYNC 0x00
+#endif
+#define UNIXFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */
+#define UNIXFILE_DELETE 0x20 /* Delete on close */
+#define UNIXFILE_URI 0x40 /* Filename might have query parameters */
+#define UNIXFILE_NOLOCK 0x80 /* Do no file locking */
+
+/*
+** Include code that is common to all os_*.c files
+*/
+/************** Include os_common.h in the middle of os_unix.c ***************/
+/************** Begin file os_common.h ***************************************/
+/*
+** 2004 May 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains macros and a little bit of code that is common to
+** all of the platform-specific files (os_*.c) and is #included into those
+** files.
+**
+** This file should be #included by the os_*.c files only. It is not a
+** general purpose header file.
+*/
+#ifndef _OS_COMMON_H_
+#define _OS_COMMON_H_
+
+/*
+** At least two bugs have slipped in because we changed the MEMORY_DEBUG
+** macro to SQLITE_DEBUG and some older makefiles have not yet made the
+** switch. The following code should catch this problem at compile-time.
+*/
+#ifdef MEMORY_DEBUG
+# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
+#endif
+
+/*
+** Macros for performance tracing. Normally turned off. Only works
+** on i486 hardware.
+*/
+#ifdef SQLITE_PERFORMANCE_TRACE
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+/************** Include hwtime.h in the middle of os_common.h ****************/
+/************** Begin file hwtime.h ******************************************/
+/*
+** 2008 May 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains inline asm code for retrieving "high-performance"
+** counters for x86 and x86_64 class CPUs.
+*/
+#ifndef SQLITE_HWTIME_H
+#define SQLITE_HWTIME_H
+
+/*
+** The following routine only works on pentium-class (or newer) processors.
+** It uses the RDTSC opcode to read the cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+#if !defined(__STRICT_ANSI__) && \
+ (defined(__GNUC__) || defined(_MSC_VER)) && \
+ (defined(i386) || defined(__i386__) || defined(_M_IX86))
+
+ #if defined(__GNUC__)
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+ #elif defined(_MSC_VER)
+
+ __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
+ __asm {
+ rdtsc
+ ret ; return value at EDX:EAX
+ }
+ }
+
+ #endif
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long val;
+ __asm__ __volatile__ ("rdtsc" : "=A" (val));
+ return val;
+ }
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long long retval;
+ unsigned long junk;
+ __asm__ __volatile__ ("\n\
+ 1: mftbu %1\n\
+ mftb %L0\n\
+ mftbu %0\n\
+ cmpw %0,%1\n\
+ bne 1b"
+ : "=r" (retval), "=r" (junk));
+ return retval;
+ }
+
+#else
+
+ /*
+ ** asm() is needed for hardware timing support. Without asm(),
+ ** disable the sqlite3Hwtime() routine.
+ **
+ ** sqlite3Hwtime() is only used for some obscure debugging
+ ** and analysis configurations, not in any deliverable, so this
+ ** should not be a great loss.
+ */
+SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
+
+#endif
+
+#endif /* !defined(SQLITE_HWTIME_H) */
+
+/************** End of hwtime.h **********************************************/
+/************** Continuing where we left off in os_common.h ******************/
+
+static sqlite_uint64 g_start;
+static sqlite_uint64 g_elapsed;
+#define TIMER_START g_start=sqlite3Hwtime()
+#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start
+#define TIMER_ELAPSED g_elapsed
+#else
+#define TIMER_START
+#define TIMER_END
+#define TIMER_ELAPSED ((sqlite_uint64)0)
+#endif
+
+/*
+** If we compile with the SQLITE_TEST macro set, then the following block
+** of code will give us the ability to simulate a disk I/O error. This
+** is used for testing the I/O recovery logic.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_io_error_hit;
+SQLITE_API extern int sqlite3_io_error_hardhit;
+SQLITE_API extern int sqlite3_io_error_pending;
+SQLITE_API extern int sqlite3_io_error_persist;
+SQLITE_API extern int sqlite3_io_error_benign;
+SQLITE_API extern int sqlite3_diskfull_pending;
+SQLITE_API extern int sqlite3_diskfull;
+#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X)
+#define SimulateIOError(CODE) \
+ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \
+ || sqlite3_io_error_pending-- == 1 ) \
+ { local_ioerr(); CODE; }
+static void local_ioerr(){
+ IOTRACE(("IOERR\n"));
+ sqlite3_io_error_hit++;
+ if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++;
+}
+#define SimulateDiskfullError(CODE) \
+ if( sqlite3_diskfull_pending ){ \
+ if( sqlite3_diskfull_pending == 1 ){ \
+ local_ioerr(); \
+ sqlite3_diskfull = 1; \
+ sqlite3_io_error_hit = 1; \
+ CODE; \
+ }else{ \
+ sqlite3_diskfull_pending--; \
+ } \
+ }
+#else
+#define SimulateIOErrorBenign(X)
+#define SimulateIOError(A)
+#define SimulateDiskfullError(A)
+#endif /* defined(SQLITE_TEST) */
+
+/*
+** When testing, keep a count of the number of open files.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_open_file_count;
+#define OpenCounter(X) sqlite3_open_file_count+=(X)
+#else
+#define OpenCounter(X)
+#endif /* defined(SQLITE_TEST) */
+
+#endif /* !defined(_OS_COMMON_H_) */
+
+/************** End of os_common.h *******************************************/
+/************** Continuing where we left off in os_unix.c ********************/
+
+/*
+** Define various macros that are missing from some systems.
+*/
+#ifndef O_LARGEFILE
+# define O_LARGEFILE 0
+#endif
+#ifdef SQLITE_DISABLE_LFS
+# undef O_LARGEFILE
+# define O_LARGEFILE 0
+#endif
+#ifndef O_NOFOLLOW
+# define O_NOFOLLOW 0
+#endif
+#ifndef O_BINARY
+# define O_BINARY 0
+#endif
+
+/*
+** The threadid macro resolves to the thread-id or to 0. Used for
+** testing and debugging only.
+*/
+#if SQLITE_THREADSAFE
+#define threadid pthread_self()
+#else
+#define threadid 0
+#endif
+
+/*
+** HAVE_MREMAP defaults to true on Linux and false everywhere else.
+*/
+#if !defined(HAVE_MREMAP)
+# if defined(__linux__) && defined(_GNU_SOURCE)
+# define HAVE_MREMAP 1
+# else
+# define HAVE_MREMAP 0
+# endif
+#endif
+
+/*
+** Explicitly call the 64-bit version of lseek() on Android. Otherwise, lseek()
+** is the 32-bit version, even if _FILE_OFFSET_BITS=64 is defined.
+*/
+#ifdef __ANDROID__
+# define lseek lseek64
+#endif
+
+#ifdef __linux__
+/*
+** Linux-specific IOCTL magic numbers used for controlling F2FS
+*/
+#define F2FS_IOCTL_MAGIC 0xf5
+#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
+#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
+#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
+#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, u32)
+#define F2FS_FEATURE_ATOMIC_WRITE 0x0004
+#endif /* __linux__ */
+
+
+/*
+** Different Unix systems declare open() in different ways. Same use
+** open(const char*,int,mode_t). Others use open(const char*,int,...).
+** The difference is important when using a pointer to the function.
+**
+** The safest way to deal with the problem is to always use this wrapper
+** which always has the same well-defined interface.
+*/
+static int posixOpen(const char *zFile, int flags, int mode){
+ return open(zFile, flags, mode);
+}
+
+/* Forward reference */
+static int openDirectory(const char*, int*);
+static int unixGetpagesize(void);
+
+/*
+** Many system calls are accessed through pointer-to-functions so that
+** they may be overridden at runtime to facilitate fault injection during
+** testing and sandboxing. The following array holds the names and pointers
+** to all overrideable system calls.
+*/
+static struct unix_syscall {
+ const char *zName; /* Name of the system call */
+ sqlite3_syscall_ptr pCurrent; /* Current value of the system call */
+ sqlite3_syscall_ptr pDefault; /* Default value */
+} aSyscall[] = {
+ { "open", (sqlite3_syscall_ptr)posixOpen, 0 },
+#define osOpen ((int(*)(const char*,int,int))aSyscall[0].pCurrent)
+
+ { "close", (sqlite3_syscall_ptr)close, 0 },
+#define osClose ((int(*)(int))aSyscall[1].pCurrent)
+
+ { "access", (sqlite3_syscall_ptr)access, 0 },
+#define osAccess ((int(*)(const char*,int))aSyscall[2].pCurrent)
+
+ { "getcwd", (sqlite3_syscall_ptr)getcwd, 0 },
+#define osGetcwd ((char*(*)(char*,size_t))aSyscall[3].pCurrent)
+
+ { "stat", (sqlite3_syscall_ptr)stat, 0 },
+#define osStat ((int(*)(const char*,struct stat*))aSyscall[4].pCurrent)
+
+/*
+** The DJGPP compiler environment looks mostly like Unix, but it
+** lacks the fcntl() system call. So redefine fcntl() to be something
+** that always succeeds. This means that locking does not occur under
+** DJGPP. But it is DOS - what did you expect?
+*/
+#ifdef __DJGPP__
+ { "fstat", 0, 0 },
+#define osFstat(a,b,c) 0
+#else
+ { "fstat", (sqlite3_syscall_ptr)fstat, 0 },
+#define osFstat ((int(*)(int,struct stat*))aSyscall[5].pCurrent)
+#endif
+
+ { "ftruncate", (sqlite3_syscall_ptr)ftruncate, 0 },
+#define osFtruncate ((int(*)(int,off_t))aSyscall[6].pCurrent)
+
+ { "fcntl", (sqlite3_syscall_ptr)fcntl, 0 },
+#define osFcntl ((int(*)(int,int,...))aSyscall[7].pCurrent)
+
+ { "read", (sqlite3_syscall_ptr)read, 0 },
+#define osRead ((ssize_t(*)(int,void*,size_t))aSyscall[8].pCurrent)
+
+#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE
+ { "pread", (sqlite3_syscall_ptr)pread, 0 },
+#else
+ { "pread", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osPread ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[9].pCurrent)
+
+#if defined(USE_PREAD64)
+ { "pread64", (sqlite3_syscall_ptr)pread64, 0 },
+#else
+ { "pread64", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osPread64 ((ssize_t(*)(int,void*,size_t,off64_t))aSyscall[10].pCurrent)
+
+ { "write", (sqlite3_syscall_ptr)write, 0 },
+#define osWrite ((ssize_t(*)(int,const void*,size_t))aSyscall[11].pCurrent)
+
+#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE
+ { "pwrite", (sqlite3_syscall_ptr)pwrite, 0 },
+#else
+ { "pwrite", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osPwrite ((ssize_t(*)(int,const void*,size_t,off_t))\
+ aSyscall[12].pCurrent)
+
+#if defined(USE_PREAD64)
+ { "pwrite64", (sqlite3_syscall_ptr)pwrite64, 0 },
+#else
+ { "pwrite64", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\
+ aSyscall[13].pCurrent)
+
+ { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 },
+#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent)
+
+#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE
+ { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 },
+#else
+ { "fallocate", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent)
+
+ { "unlink", (sqlite3_syscall_ptr)unlink, 0 },
+#define osUnlink ((int(*)(const char*))aSyscall[16].pCurrent)
+
+ { "openDirectory", (sqlite3_syscall_ptr)openDirectory, 0 },
+#define osOpenDirectory ((int(*)(const char*,int*))aSyscall[17].pCurrent)
+
+ { "mkdir", (sqlite3_syscall_ptr)mkdir, 0 },
+#define osMkdir ((int(*)(const char*,mode_t))aSyscall[18].pCurrent)
+
+ { "rmdir", (sqlite3_syscall_ptr)rmdir, 0 },
+#define osRmdir ((int(*)(const char*))aSyscall[19].pCurrent)
+
+#if defined(HAVE_FCHOWN)
+ { "fchown", (sqlite3_syscall_ptr)fchown, 0 },
+#else
+ { "fchown", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent)
+
+#if defined(HAVE_FCHOWN)
+ { "geteuid", (sqlite3_syscall_ptr)geteuid, 0 },
+#else
+ { "geteuid", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent)
+
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+ { "mmap", (sqlite3_syscall_ptr)mmap, 0 },
+#else
+ { "mmap", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[22].pCurrent)
+
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+ { "munmap", (sqlite3_syscall_ptr)munmap, 0 },
+#else
+ { "munmap", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osMunmap ((int(*)(void*,size_t))aSyscall[23].pCurrent)
+
+#if HAVE_MREMAP && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+ { "mremap", (sqlite3_syscall_ptr)mremap, 0 },
+#else
+ { "mremap", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[24].pCurrent)
+
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+ { "getpagesize", (sqlite3_syscall_ptr)unixGetpagesize, 0 },
+#else
+ { "getpagesize", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osGetpagesize ((int(*)(void))aSyscall[25].pCurrent)
+
+#if defined(HAVE_READLINK)
+ { "readlink", (sqlite3_syscall_ptr)readlink, 0 },
+#else
+ { "readlink", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osReadlink ((ssize_t(*)(const char*,char*,size_t))aSyscall[26].pCurrent)
+
+#if defined(HAVE_LSTAT)
+ { "lstat", (sqlite3_syscall_ptr)lstat, 0 },
+#else
+ { "lstat", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osLstat ((int(*)(const char*,struct stat*))aSyscall[27].pCurrent)
+
+#if defined(__linux__) && defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+# ifdef __ANDROID__
+ { "ioctl", (sqlite3_syscall_ptr)(int(*)(int, int, ...))ioctl, 0 },
+#define osIoctl ((int(*)(int,int,...))aSyscall[28].pCurrent)
+# else
+ { "ioctl", (sqlite3_syscall_ptr)ioctl, 0 },
+#define osIoctl ((int(*)(int,unsigned long,...))aSyscall[28].pCurrent)
+# endif
+#else
+ { "ioctl", (sqlite3_syscall_ptr)0, 0 },
+#endif
+
+}; /* End of the overrideable system calls */
+
+
+/*
+** On some systems, calls to fchown() will trigger a message in a security
+** log if they come from non-root processes. So avoid calling fchown() if
+** we are not running as root.
+*/
+static int robustFchown(int fd, uid_t uid, gid_t gid){
+#if defined(HAVE_FCHOWN)
+ return osGeteuid() ? 0 : osFchown(fd,uid,gid);
+#else
+ return 0;
+#endif
+}
+
+/*
+** This is the xSetSystemCall() method of sqlite3_vfs for all of the
+** "unix" VFSes. Return SQLITE_OK opon successfully updating the
+** system call pointer, or SQLITE_NOTFOUND if there is no configurable
+** system call named zName.
+*/
+static int unixSetSystemCall(
+ sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */
+ const char *zName, /* Name of system call to override */
+ sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */
+){
+ unsigned int i;
+ int rc = SQLITE_NOTFOUND;
+
+ UNUSED_PARAMETER(pNotUsed);
+ if( zName==0 ){
+ /* If no zName is given, restore all system calls to their default
+ ** settings and return NULL
+ */
+ rc = SQLITE_OK;
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( aSyscall[i].pDefault ){
+ aSyscall[i].pCurrent = aSyscall[i].pDefault;
+ }
+ }
+ }else{
+ /* If zName is specified, operate on only the one system call
+ ** specified.
+ */
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ){
+ if( aSyscall[i].pDefault==0 ){
+ aSyscall[i].pDefault = aSyscall[i].pCurrent;
+ }
+ rc = SQLITE_OK;
+ if( pNewFunc==0 ) pNewFunc = aSyscall[i].pDefault;
+ aSyscall[i].pCurrent = pNewFunc;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Return the value of a system call. Return NULL if zName is not a
+** recognized system call name. NULL is also returned if the system call
+** is currently undefined.
+*/
+static sqlite3_syscall_ptr unixGetSystemCall(
+ sqlite3_vfs *pNotUsed,
+ const char *zName
+){
+ unsigned int i;
+
+ UNUSED_PARAMETER(pNotUsed);
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ) return aSyscall[i].pCurrent;
+ }
+ return 0;
+}
+
+/*
+** Return the name of the first system call after zName. If zName==NULL
+** then return the name of the first system call. Return NULL if zName
+** is the last system call or if zName is not the name of a valid
+** system call.
+*/
+static const char *unixNextSystemCall(sqlite3_vfs *p, const char *zName){
+ int i = -1;
+
+ UNUSED_PARAMETER(p);
+ if( zName ){
+ for(i=0; i<ArraySize(aSyscall)-1; i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ) break;
+ }
+ }
+ for(i++; i<ArraySize(aSyscall); i++){
+ if( aSyscall[i].pCurrent!=0 ) return aSyscall[i].zName;
+ }
+ return 0;
+}
+
+/*
+** Do not accept any file descriptor less than this value, in order to avoid
+** opening database file using file descriptors that are commonly used for
+** standard input, output, and error.
+*/
+#ifndef SQLITE_MINIMUM_FILE_DESCRIPTOR
+# define SQLITE_MINIMUM_FILE_DESCRIPTOR 3
+#endif
+
+/*
+** Invoke open(). Do so multiple times, until it either succeeds or
+** fails for some reason other than EINTR.
+**
+** If the file creation mode "m" is 0 then set it to the default for
+** SQLite. The default is SQLITE_DEFAULT_FILE_PERMISSIONS (normally
+** 0644) as modified by the system umask. If m is not 0, then
+** make the file creation mode be exactly m ignoring the umask.
+**
+** The m parameter will be non-zero only when creating -wal, -journal,
+** and -shm files. We want those files to have *exactly* the same
+** permissions as their original database, unadulterated by the umask.
+** In that way, if a database file is -rw-rw-rw or -rw-rw-r-, and a
+** transaction crashes and leaves behind hot journals, then any
+** process that is able to write to the database will also be able to
+** recover the hot journals.
+*/
+static int robust_open(const char *z, int f, mode_t m){
+ int fd;
+ mode_t m2 = m ? m : SQLITE_DEFAULT_FILE_PERMISSIONS;
+ while(1){
+#if defined(O_CLOEXEC)
+ fd = osOpen(z,f|O_CLOEXEC,m2);
+#else
+ fd = osOpen(z,f,m2);
+#endif
+ if( fd<0 ){
+ if( errno==EINTR ) continue;
+ break;
+ }
+ if( fd>=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break;
+ osClose(fd);
+ sqlite3_log(SQLITE_WARNING,
+ "attempt to open \"%s\" as file descriptor %d", z, fd);
+ fd = -1;
+ if( osOpen("/dev/null", O_RDONLY, m)<0 ) break;
+ }
+ if( fd>=0 ){
+ if( m!=0 ){
+ struct stat statbuf;
+ if( osFstat(fd, &statbuf)==0
+ && statbuf.st_size==0
+ && (statbuf.st_mode&0777)!=m
+ ){
+ osFchmod(fd, m);
+ }
+ }
+#if defined(FD_CLOEXEC) && (!defined(O_CLOEXEC) || O_CLOEXEC==0)
+ osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC);
+#endif
+ }
+ return fd;
+}
+
+/*
+** Helper functions to obtain and relinquish the global mutex. The
+** global mutex is used to protect the unixInodeInfo and
+** vxworksFileId objects used by this file, all of which may be
+** shared by multiple threads.
+**
+** Function unixMutexHeld() is used to assert() that the global mutex
+** is held when required. This function is only used as part of assert()
+** statements. e.g.
+**
+** unixEnterMutex()
+** assert( unixMutexHeld() );
+** unixEnterLeave()
+**
+** To prevent deadlock, the global unixBigLock must must be acquired
+** before the unixInodeInfo.pLockMutex mutex, if both are held. It is
+** OK to get the pLockMutex without holding unixBigLock first, but if
+** that happens, the unixBigLock mutex must not be acquired until after
+** pLockMutex is released.
+**
+** OK: enter(unixBigLock), enter(pLockInfo)
+** OK: enter(unixBigLock)
+** OK: enter(pLockInfo)
+** ERROR: enter(pLockInfo), enter(unixBigLock)
+*/
+static sqlite3_mutex *unixBigLock = 0;
+static void unixEnterMutex(void){
+ assert( sqlite3_mutex_notheld(unixBigLock) ); /* Not a recursive mutex */
+ sqlite3_mutex_enter(unixBigLock);
+}
+static void unixLeaveMutex(void){
+ assert( sqlite3_mutex_held(unixBigLock) );
+ sqlite3_mutex_leave(unixBigLock);
+}
+#ifdef SQLITE_DEBUG
+static int unixMutexHeld(void) {
+ return sqlite3_mutex_held(unixBigLock);
+}
+#endif
+
+
+#ifdef SQLITE_HAVE_OS_TRACE
+/*
+** Helper function for printing out trace information from debugging
+** binaries. This returns the string representation of the supplied
+** integer lock-type.
+*/
+static const char *azFileLock(int eFileLock){
+ switch( eFileLock ){
+ case NO_LOCK: return "NONE";
+ case SHARED_LOCK: return "SHARED";
+ case RESERVED_LOCK: return "RESERVED";
+ case PENDING_LOCK: return "PENDING";
+ case EXCLUSIVE_LOCK: return "EXCLUSIVE";
+ }
+ return "ERROR";
+}
+#endif
+
+#ifdef SQLITE_LOCK_TRACE
+/*
+** Print out information about all locking operations.
+**
+** This routine is used for troubleshooting locks on multithreaded
+** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE
+** command-line option on the compiler. This code is normally
+** turned off.
+*/
+static int lockTrace(int fd, int op, struct flock *p){
+ char *zOpName, *zType;
+ int s;
+ int savedErrno;
+ if( op==F_GETLK ){
+ zOpName = "GETLK";
+ }else if( op==F_SETLK ){
+ zOpName = "SETLK";
+ }else{
+ s = osFcntl(fd, op, p);
+ sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s);
+ return s;
+ }
+ if( p->l_type==F_RDLCK ){
+ zType = "RDLCK";
+ }else if( p->l_type==F_WRLCK ){
+ zType = "WRLCK";
+ }else if( p->l_type==F_UNLCK ){
+ zType = "UNLCK";
+ }else{
+ assert( 0 );
+ }
+ assert( p->l_whence==SEEK_SET );
+ s = osFcntl(fd, op, p);
+ savedErrno = errno;
+ sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n",
+ threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len,
+ (int)p->l_pid, s);
+ if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){
+ struct flock l2;
+ l2 = *p;
+ osFcntl(fd, F_GETLK, &l2);
+ if( l2.l_type==F_RDLCK ){
+ zType = "RDLCK";
+ }else if( l2.l_type==F_WRLCK ){
+ zType = "WRLCK";
+ }else if( l2.l_type==F_UNLCK ){
+ zType = "UNLCK";
+ }else{
+ assert( 0 );
+ }
+ sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n",
+ zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid);
+ }
+ errno = savedErrno;
+ return s;
+}
+#undef osFcntl
+#define osFcntl lockTrace
+#endif /* SQLITE_LOCK_TRACE */
+
+/*
+** Retry ftruncate() calls that fail due to EINTR
+**
+** All calls to ftruncate() within this file should be made through
+** this wrapper. On the Android platform, bypassing the logic below
+** could lead to a corrupt database.
+*/
+static int robust_ftruncate(int h, sqlite3_int64 sz){
+ int rc;
+#ifdef __ANDROID__
+ /* On Android, ftruncate() always uses 32-bit offsets, even if
+ ** _FILE_OFFSET_BITS=64 is defined. This means it is unsafe to attempt to
+ ** truncate a file to any size larger than 2GiB. Silently ignore any
+ ** such attempts. */
+ if( sz>(sqlite3_int64)0x7FFFFFFF ){
+ rc = SQLITE_OK;
+ }else
+#endif
+ do{ rc = osFtruncate(h,sz); }while( rc<0 && errno==EINTR );
+ return rc;
+}
+
+/*
+** This routine translates a standard POSIX errno code into something
+** useful to the clients of the sqlite3 functions. Specifically, it is
+** intended to translate a variety of "try again" errors into SQLITE_BUSY
+** and a variety of "please close the file descriptor NOW" errors into
+** SQLITE_IOERR
+**
+** Errors during initialization of locks, or file system support for locks,
+** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately.
+*/
+static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) {
+ assert( (sqliteIOErr == SQLITE_IOERR_LOCK) ||
+ (sqliteIOErr == SQLITE_IOERR_UNLOCK) ||
+ (sqliteIOErr == SQLITE_IOERR_RDLOCK) ||
+ (sqliteIOErr == SQLITE_IOERR_CHECKRESERVEDLOCK) );
+ switch (posixError) {
+ case EACCES:
+ case EAGAIN:
+ case ETIMEDOUT:
+ case EBUSY:
+ case EINTR:
+ case ENOLCK:
+ /* random NFS retry error, unless during file system support
+ * introspection, in which it actually means what it says */
+ return SQLITE_BUSY;
+
+ case EPERM:
+ return SQLITE_PERM;
+
+ default:
+ return sqliteIOErr;
+ }
+}
+
+
+/******************************************************************************
+****************** Begin Unique File ID Utility Used By VxWorks ***************
+**
+** On most versions of unix, we can get a unique ID for a file by concatenating
+** the device number and the inode number. But this does not work on VxWorks.
+** On VxWorks, a unique file id must be based on the canonical filename.
+**
+** A pointer to an instance of the following structure can be used as a
+** unique file ID in VxWorks. Each instance of this structure contains
+** a copy of the canonical filename. There is also a reference count.
+** The structure is reclaimed when the number of pointers to it drops to
+** zero.
+**
+** There are never very many files open at one time and lookups are not
+** a performance-critical path, so it is sufficient to put these
+** structures on a linked list.
+*/
+struct vxworksFileId {
+ struct vxworksFileId *pNext; /* Next in a list of them all */
+ int nRef; /* Number of references to this one */
+ int nName; /* Length of the zCanonicalName[] string */
+ char *zCanonicalName; /* Canonical filename */
+};
+
+#if OS_VXWORKS
+/*
+** All unique filenames are held on a linked list headed by this
+** variable:
+*/
+static struct vxworksFileId *vxworksFileList = 0;
+
+/*
+** Simplify a filename into its canonical form
+** by making the following changes:
+**
+** * removing any trailing and duplicate /
+** * convert /./ into just /
+** * convert /A/../ where A is any simple name into just /
+**
+** Changes are made in-place. Return the new name length.
+**
+** The original filename is in z[0..n-1]. Return the number of
+** characters in the simplified name.
+*/
+static int vxworksSimplifyName(char *z, int n){
+ int i, j;
+ while( n>1 && z[n-1]=='/' ){ n--; }
+ for(i=j=0; i<n; i++){
+ if( z[i]=='/' ){
+ if( z[i+1]=='/' ) continue;
+ if( z[i+1]=='.' && i+2<n && z[i+2]=='/' ){
+ i += 1;
+ continue;
+ }
+ if( z[i+1]=='.' && i+3<n && z[i+2]=='.' && z[i+3]=='/' ){
+ while( j>0 && z[j-1]!='/' ){ j--; }
+ if( j>0 ){ j--; }
+ i += 2;
+ continue;
+ }
+ }
+ z[j++] = z[i];
+ }
+ z[j] = 0;
+ return j;
+}
+
+/*
+** Find a unique file ID for the given absolute pathname. Return
+** a pointer to the vxworksFileId object. This pointer is the unique
+** file ID.
+**
+** The nRef field of the vxworksFileId object is incremented before
+** the object is returned. A new vxworksFileId object is created
+** and added to the global list if necessary.
+**
+** If a memory allocation error occurs, return NULL.
+*/
+static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){
+ struct vxworksFileId *pNew; /* search key and new file ID */
+ struct vxworksFileId *pCandidate; /* For looping over existing file IDs */
+ int n; /* Length of zAbsoluteName string */
+
+ assert( zAbsoluteName[0]=='/' );
+ n = (int)strlen(zAbsoluteName);
+ pNew = sqlite3_malloc64( sizeof(*pNew) + (n+1) );
+ if( pNew==0 ) return 0;
+ pNew->zCanonicalName = (char*)&pNew[1];
+ memcpy(pNew->zCanonicalName, zAbsoluteName, n+1);
+ n = vxworksSimplifyName(pNew->zCanonicalName, n);
+
+ /* Search for an existing entry that matching the canonical name.
+ ** If found, increment the reference count and return a pointer to
+ ** the existing file ID.
+ */
+ unixEnterMutex();
+ for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){
+ if( pCandidate->nName==n
+ && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0
+ ){
+ sqlite3_free(pNew);
+ pCandidate->nRef++;
+ unixLeaveMutex();
+ return pCandidate;
+ }
+ }
+
+ /* No match was found. We will make a new file ID */
+ pNew->nRef = 1;
+ pNew->nName = n;
+ pNew->pNext = vxworksFileList;
+ vxworksFileList = pNew;
+ unixLeaveMutex();
+ return pNew;
+}
+
+/*
+** Decrement the reference count on a vxworksFileId object. Free
+** the object when the reference count reaches zero.
+*/
+static void vxworksReleaseFileId(struct vxworksFileId *pId){
+ unixEnterMutex();
+ assert( pId->nRef>0 );
+ pId->nRef--;
+ if( pId->nRef==0 ){
+ struct vxworksFileId **pp;
+ for(pp=&vxworksFileList; *pp && *pp!=pId; pp = &((*pp)->pNext)){}
+ assert( *pp==pId );
+ *pp = pId->pNext;
+ sqlite3_free(pId);
+ }
+ unixLeaveMutex();
+}
+#endif /* OS_VXWORKS */
+/*************** End of Unique File ID Utility Used By VxWorks ****************
+******************************************************************************/
+
+
+/******************************************************************************
+*************************** Posix Advisory Locking ****************************
+**
+** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996)
+** section 6.5.2.2 lines 483 through 490 specify that when a process
+** sets or clears a lock, that operation overrides any prior locks set
+** by the same process. It does not explicitly say so, but this implies
+** that it overrides locks set by the same process using a different
+** file descriptor. Consider this test case:
+**
+** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644);
+** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644);
+**
+** Suppose ./file1 and ./file2 are really the same file (because
+** one is a hard or symbolic link to the other) then if you set
+** an exclusive lock on fd1, then try to get an exclusive lock
+** on fd2, it works. I would have expected the second lock to
+** fail since there was already a lock on the file due to fd1.
+** But not so. Since both locks came from the same process, the
+** second overrides the first, even though they were on different
+** file descriptors opened on different file names.
+**
+** This means that we cannot use POSIX locks to synchronize file access
+** among competing threads of the same process. POSIX locks will work fine
+** to synchronize access for threads in separate processes, but not
+** threads within the same process.
+**
+** To work around the problem, SQLite has to manage file locks internally
+** on its own. Whenever a new database is opened, we have to find the
+** specific inode of the database file (the inode is determined by the
+** st_dev and st_ino fields of the stat structure that fstat() fills in)
+** and check for locks already existing on that inode. When locks are
+** created or removed, we have to look at our own internal record of the
+** locks to see if another thread has previously set a lock on that same
+** inode.
+**
+** (Aside: The use of inode numbers as unique IDs does not work on VxWorks.
+** For VxWorks, we have to use the alternative unique ID system based on
+** canonical filename and implemented in the previous division.)
+**
+** The sqlite3_file structure for POSIX is no longer just an integer file
+** descriptor. It is now a structure that holds the integer file
+** descriptor and a pointer to a structure that describes the internal
+** locks on the corresponding inode. There is one locking structure
+** per inode, so if the same inode is opened twice, both unixFile structures
+** point to the same locking structure. The locking structure keeps
+** a reference count (so we will know when to delete it) and a "cnt"
+** field that tells us its internal lock status. cnt==0 means the
+** file is unlocked. cnt==-1 means the file has an exclusive lock.
+** cnt>0 means there are cnt shared locks on the file.
+**
+** Any attempt to lock or unlock a file first checks the locking
+** structure. The fcntl() system call is only invoked to set a
+** POSIX lock if the internal lock structure transitions between
+** a locked and an unlocked state.
+**
+** But wait: there are yet more problems with POSIX advisory locks.
+**
+** If you close a file descriptor that points to a file that has locks,
+** all locks on that file that are owned by the current process are
+** released. To work around this problem, each unixInodeInfo object
+** maintains a count of the number of pending locks on tha inode.
+** When an attempt is made to close an unixFile, if there are
+** other unixFile open on the same inode that are holding locks, the call
+** to close() the file descriptor is deferred until all of the locks clear.
+** The unixInodeInfo structure keeps a list of file descriptors that need to
+** be closed and that list is walked (and cleared) when the last lock
+** clears.
+**
+** Yet another problem: LinuxThreads do not play well with posix locks.
+**
+** Many older versions of linux use the LinuxThreads library which is
+** not posix compliant. Under LinuxThreads, a lock created by thread
+** A cannot be modified or overridden by a different thread B.
+** Only thread A can modify the lock. Locking behavior is correct
+** if the appliation uses the newer Native Posix Thread Library (NPTL)
+** on linux - with NPTL a lock created by thread A can override locks
+** in thread B. But there is no way to know at compile-time which
+** threading library is being used. So there is no way to know at
+** compile-time whether or not thread A can override locks on thread B.
+** One has to do a run-time check to discover the behavior of the
+** current process.
+**
+** SQLite used to support LinuxThreads. But support for LinuxThreads
+** was dropped beginning with version 3.7.0. SQLite will still work with
+** LinuxThreads provided that (1) there is no more than one connection
+** per database file in the same process and (2) database connections
+** do not move across threads.
+*/
+
+/*
+** An instance of the following structure serves as the key used
+** to locate a particular unixInodeInfo object.
+*/
+struct unixFileId {
+ dev_t dev; /* Device number */
+#if OS_VXWORKS
+ struct vxworksFileId *pId; /* Unique file ID for vxworks. */
+#else
+ /* We are told that some versions of Android contain a bug that
+ ** sizes ino_t at only 32-bits instead of 64-bits. (See
+ ** https://android-review.googlesource.com/#/c/115351/3/dist/sqlite3.c)
+ ** To work around this, always allocate 64-bits for the inode number.
+ ** On small machines that only have 32-bit inodes, this wastes 4 bytes,
+ ** but that should not be a big deal. */
+ /* WAS: ino_t ino; */
+ u64 ino; /* Inode number */
+#endif
+};
+
+/*
+** An instance of the following structure is allocated for each open
+** inode.
+**
+** A single inode can have multiple file descriptors, so each unixFile
+** structure contains a pointer to an instance of this object and this
+** object keeps a count of the number of unixFile pointing to it.
+**
+** Mutex rules:
+**
+** (1) Only the pLockMutex mutex must be held in order to read or write
+** any of the locking fields:
+** nShared, nLock, eFileLock, bProcessLock, pUnused
+**
+** (2) When nRef>0, then the following fields are unchanging and can
+** be read (but not written) without holding any mutex:
+** fileId, pLockMutex
+**
+** (3) With the exceptions above, all the fields may only be read
+** or written while holding the global unixBigLock mutex.
+**
+** Deadlock prevention: The global unixBigLock mutex may not
+** be acquired while holding the pLockMutex mutex. If both unixBigLock
+** and pLockMutex are needed, then unixBigLock must be acquired first.
+*/
+struct unixInodeInfo {
+ struct unixFileId fileId; /* The lookup key */
+ sqlite3_mutex *pLockMutex; /* Hold this mutex for... */
+ int nShared; /* Number of SHARED locks held */
+ int nLock; /* Number of outstanding file locks */
+ unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */
+ unsigned char bProcessLock; /* An exclusive process lock is held */
+ UnixUnusedFd *pUnused; /* Unused file descriptors to close */
+ int nRef; /* Number of pointers to this structure */
+ unixShmNode *pShmNode; /* Shared memory associated with this inode */
+ unixInodeInfo *pNext; /* List of all unixInodeInfo objects */
+ unixInodeInfo *pPrev; /* .... doubly linked */
+#if SQLITE_ENABLE_LOCKING_STYLE
+ unsigned long long sharedByte; /* for AFP simulated shared lock */
+#endif
+#if OS_VXWORKS
+ sem_t *pSem; /* Named POSIX semaphore */
+ char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */
+#endif
+};
+
+/*
+** A lists of all unixInodeInfo objects.
+**
+** Must hold unixBigLock in order to read or write this variable.
+*/
+static unixInodeInfo *inodeList = 0; /* All unixInodeInfo objects */
+
+#ifdef SQLITE_DEBUG
+/*
+** True if the inode mutex (on the unixFile.pFileMutex field) is held, or not.
+** This routine is used only within assert() to help verify correct mutex
+** usage.
+*/
+int unixFileMutexHeld(unixFile *pFile){
+ assert( pFile->pInode );
+ return sqlite3_mutex_held(pFile->pInode->pLockMutex);
+}
+int unixFileMutexNotheld(unixFile *pFile){
+ assert( pFile->pInode );
+ return sqlite3_mutex_notheld(pFile->pInode->pLockMutex);
+}
+#endif
+
+/*
+**
+** This function - unixLogErrorAtLine(), is only ever called via the macro
+** unixLogError().
+**
+** It is invoked after an error occurs in an OS function and errno has been
+** set. It logs a message using sqlite3_log() containing the current value of
+** errno and, if possible, the human-readable equivalent from strerror() or
+** strerror_r().
+**
+** The first argument passed to the macro should be the error code that
+** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN).
+** The two subsequent arguments should be the name of the OS function that
+** failed (e.g. "unlink", "open") and the associated file-system path,
+** if any.
+*/
+#define unixLogError(a,b,c) unixLogErrorAtLine(a,b,c,__LINE__)
+static int unixLogErrorAtLine(
+ int errcode, /* SQLite error code */
+ const char *zFunc, /* Name of OS function that failed */
+ const char *zPath, /* File path associated with error */
+ int iLine /* Source line number where error occurred */
+){
+ char *zErr; /* Message from strerror() or equivalent */
+ int iErrno = errno; /* Saved syscall error number */
+
+ /* If this is not a threadsafe build (SQLITE_THREADSAFE==0), then use
+ ** the strerror() function to obtain the human-readable error message
+ ** equivalent to errno. Otherwise, use strerror_r().
+ */
+#if SQLITE_THREADSAFE && defined(HAVE_STRERROR_R)
+ char aErr[80];
+ memset(aErr, 0, sizeof(aErr));
+ zErr = aErr;
+
+ /* If STRERROR_R_CHAR_P (set by autoconf scripts) or __USE_GNU is defined,
+ ** assume that the system provides the GNU version of strerror_r() that
+ ** returns a pointer to a buffer containing the error message. That pointer
+ ** may point to aErr[], or it may point to some static storage somewhere.
+ ** Otherwise, assume that the system provides the POSIX version of
+ ** strerror_r(), which always writes an error message into aErr[].
+ **
+ ** If the code incorrectly assumes that it is the POSIX version that is
+ ** available, the error message will often be an empty string. Not a
+ ** huge problem. Incorrectly concluding that the GNU version is available
+ ** could lead to a segfault though.
+ */
+#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU)
+ zErr =
+# endif
+ strerror_r(iErrno, aErr, sizeof(aErr)-1);
+
+#elif SQLITE_THREADSAFE
+ /* This is a threadsafe build, but strerror_r() is not available. */
+ zErr = "";
+#else
+ /* Non-threadsafe build, use strerror(). */
+ zErr = strerror(iErrno);
+#endif
+
+ if( zPath==0 ) zPath = "";
+ sqlite3_log(errcode,
+ "os_unix.c:%d: (%d) %s(%s) - %s",
+ iLine, iErrno, zFunc, zPath, zErr
+ );
+
+ return errcode;
+}
+
+/*
+** Close a file descriptor.
+**
+** We assume that close() almost always works, since it is only in a
+** very sick application or on a very sick platform that it might fail.
+** If it does fail, simply leak the file descriptor, but do log the
+** error.
+**
+** Note that it is not safe to retry close() after EINTR since the
+** file descriptor might have already been reused by another thread.
+** So we don't even try to recover from an EINTR. Just log the error
+** and move on.
+*/
+static void robust_close(unixFile *pFile, int h, int lineno){
+ if( osClose(h) ){
+ unixLogErrorAtLine(SQLITE_IOERR_CLOSE, "close",
+ pFile ? pFile->zPath : 0, lineno);
+ }
+}
+
+/*
+** Set the pFile->lastErrno. Do this in a subroutine as that provides
+** a convenient place to set a breakpoint.
+*/
+static void storeLastErrno(unixFile *pFile, int error){
+ pFile->lastErrno = error;
+}
+
+/*
+** Close all file descriptors accumuated in the unixInodeInfo->pUnused list.
+*/
+static void closePendingFds(unixFile *pFile){
+ unixInodeInfo *pInode = pFile->pInode;
+ UnixUnusedFd *p;
+ UnixUnusedFd *pNext;
+ assert( unixFileMutexHeld(pFile) );
+ for(p=pInode->pUnused; p; p=pNext){
+ pNext = p->pNext;
+ robust_close(pFile, p->fd, __LINE__);
+ sqlite3_free(p);
+ }
+ pInode->pUnused = 0;
+}
+
+/*
+** Release a unixInodeInfo structure previously allocated by findInodeInfo().
+**
+** The global mutex must be held when this routine is called, but the mutex
+** on the inode being deleted must NOT be held.
+*/
+static void releaseInodeInfo(unixFile *pFile){
+ unixInodeInfo *pInode = pFile->pInode;
+ assert( unixMutexHeld() );
+ assert( unixFileMutexNotheld(pFile) );
+ if( ALWAYS(pInode) ){
+ pInode->nRef--;
+ if( pInode->nRef==0 ){
+ assert( pInode->pShmNode==0 );
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ closePendingFds(pFile);
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ if( pInode->pPrev ){
+ assert( pInode->pPrev->pNext==pInode );
+ pInode->pPrev->pNext = pInode->pNext;
+ }else{
+ assert( inodeList==pInode );
+ inodeList = pInode->pNext;
+ }
+ if( pInode->pNext ){
+ assert( pInode->pNext->pPrev==pInode );
+ pInode->pNext->pPrev = pInode->pPrev;
+ }
+ sqlite3_mutex_free(pInode->pLockMutex);
+ sqlite3_free(pInode);
+ }
+ }
+}
+
+/*
+** Given a file descriptor, locate the unixInodeInfo object that
+** describes that file descriptor. Create a new one if necessary. The
+** return value might be uninitialized if an error occurs.
+**
+** The global mutex must held when calling this routine.
+**
+** Return an appropriate error code.
+*/
+static int findInodeInfo(
+ unixFile *pFile, /* Unix file with file desc used in the key */
+ unixInodeInfo **ppInode /* Return the unixInodeInfo object here */
+){
+ int rc; /* System call return code */
+ int fd; /* The file descriptor for pFile */
+ struct unixFileId fileId; /* Lookup key for the unixInodeInfo */
+ struct stat statbuf; /* Low-level file information */
+ unixInodeInfo *pInode = 0; /* Candidate unixInodeInfo object */
+
+ assert( unixMutexHeld() );
+
+ /* Get low-level information about the file that we can used to
+ ** create a unique name for the file.
+ */
+ fd = pFile->h;
+ rc = osFstat(fd, &statbuf);
+ if( rc!=0 ){
+ storeLastErrno(pFile, errno);
+#if defined(EOVERFLOW) && defined(SQLITE_DISABLE_LFS)
+ if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS;
+#endif
+ return SQLITE_IOERR;
+ }
+
+#ifdef __APPLE__
+ /* On OS X on an msdos filesystem, the inode number is reported
+ ** incorrectly for zero-size files. See ticket #3260. To work
+ ** around this problem (we consider it a bug in OS X, not SQLite)
+ ** we always increase the file size to 1 by writing a single byte
+ ** prior to accessing the inode number. The one byte written is
+ ** an ASCII 'S' character which also happens to be the first byte
+ ** in the header of every SQLite database. In this way, if there
+ ** is a race condition such that another thread has already populated
+ ** the first page of the database, no damage is done.
+ */
+ if( statbuf.st_size==0 && (pFile->fsFlags & SQLITE_FSFLAGS_IS_MSDOS)!=0 ){
+ do{ rc = osWrite(fd, "S", 1); }while( rc<0 && errno==EINTR );
+ if( rc!=1 ){
+ storeLastErrno(pFile, errno);
+ return SQLITE_IOERR;
+ }
+ rc = osFstat(fd, &statbuf);
+ if( rc!=0 ){
+ storeLastErrno(pFile, errno);
+ return SQLITE_IOERR;
+ }
+ }
+#endif
+
+ memset(&fileId, 0, sizeof(fileId));
+ fileId.dev = statbuf.st_dev;
+#if OS_VXWORKS
+ fileId.pId = pFile->pId;
+#else
+ fileId.ino = (u64)statbuf.st_ino;
+#endif
+ assert( unixMutexHeld() );
+ pInode = inodeList;
+ while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){
+ pInode = pInode->pNext;
+ }
+ if( pInode==0 ){
+ pInode = sqlite3_malloc64( sizeof(*pInode) );
+ if( pInode==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset(pInode, 0, sizeof(*pInode));
+ memcpy(&pInode->fileId, &fileId, sizeof(fileId));
+ if( sqlite3GlobalConfig.bCoreMutex ){
+ pInode->pLockMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pInode->pLockMutex==0 ){
+ sqlite3_free(pInode);
+ return SQLITE_NOMEM_BKPT;
+ }
+ }
+ pInode->nRef = 1;
+ assert( unixMutexHeld() );
+ pInode->pNext = inodeList;
+ pInode->pPrev = 0;
+ if( inodeList ) inodeList->pPrev = pInode;
+ inodeList = pInode;
+ }else{
+ pInode->nRef++;
+ }
+ *ppInode = pInode;
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if pFile has been renamed or unlinked since it was first opened.
+*/
+static int fileHasMoved(unixFile *pFile){
+#if OS_VXWORKS
+ return pFile->pInode!=0 && pFile->pId!=pFile->pInode->fileId.pId;
+#else
+ struct stat buf;
+ return pFile->pInode!=0 &&
+ (osStat(pFile->zPath, &buf)!=0
+ || (u64)buf.st_ino!=pFile->pInode->fileId.ino);
+#endif
+}
+
+
+/*
+** Check a unixFile that is a database. Verify the following:
+**
+** (1) There is exactly one hard link on the file
+** (2) The file is not a symbolic link
+** (3) The file has not been renamed or unlinked
+**
+** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right.
+*/
+static void verifyDbFile(unixFile *pFile){
+ struct stat buf;
+ int rc;
+
+ /* These verifications occurs for the main database only */
+ if( pFile->ctrlFlags & UNIXFILE_NOLOCK ) return;
+
+ rc = osFstat(pFile->h, &buf);
+ if( rc!=0 ){
+ sqlite3_log(SQLITE_WARNING, "cannot fstat db file %s", pFile->zPath);
+ return;
+ }
+ if( buf.st_nlink==0 ){
+ sqlite3_log(SQLITE_WARNING, "file unlinked while open: %s", pFile->zPath);
+ return;
+ }
+ if( buf.st_nlink>1 ){
+ sqlite3_log(SQLITE_WARNING, "multiple links to file: %s", pFile->zPath);
+ return;
+ }
+ if( fileHasMoved(pFile) ){
+ sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath);
+ return;
+ }
+}
+
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+*/
+static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){
+ int rc = SQLITE_OK;
+ int reserved = 0;
+ unixFile *pFile = (unixFile*)id;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+
+ assert( pFile );
+ assert( pFile->eFileLock<=SHARED_LOCK );
+ sqlite3_mutex_enter(pFile->pInode->pLockMutex);
+
+ /* Check if a thread in this process holds such a lock */
+ if( pFile->pInode->eFileLock>SHARED_LOCK ){
+ reserved = 1;
+ }
+
+ /* Otherwise see if some other process holds it.
+ */
+#ifndef __DJGPP__
+ if( !reserved && !pFile->pInode->bProcessLock ){
+ struct flock lock;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = RESERVED_BYTE;
+ lock.l_len = 1;
+ lock.l_type = F_WRLCK;
+ if( osFcntl(pFile->h, F_GETLK, &lock) ){
+ rc = SQLITE_IOERR_CHECKRESERVEDLOCK;
+ storeLastErrno(pFile, errno);
+ } else if( lock.l_type!=F_UNLCK ){
+ reserved = 1;
+ }
+ }
+#endif
+
+ sqlite3_mutex_leave(pFile->pInode->pLockMutex);
+ OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved));
+
+ *pResOut = reserved;
+ return rc;
+}
+
+/*
+** Set a posix-advisory-lock.
+**
+** There are two versions of this routine. If compiled with
+** SQLITE_ENABLE_SETLK_TIMEOUT then the routine has an extra parameter
+** which is a pointer to a unixFile. If the unixFile->iBusyTimeout
+** value is set, then it is the number of milliseconds to wait before
+** failing the lock. The iBusyTimeout value is always reset back to
+** zero on each call.
+**
+** If SQLITE_ENABLE_SETLK_TIMEOUT is not defined, then do a non-blocking
+** attempt to set the lock.
+*/
+#ifndef SQLITE_ENABLE_SETLK_TIMEOUT
+# define osSetPosixAdvisoryLock(h,x,t) osFcntl(h,F_SETLK,x)
+#else
+static int osSetPosixAdvisoryLock(
+ int h, /* The file descriptor on which to take the lock */
+ struct flock *pLock, /* The description of the lock */
+ unixFile *pFile /* Structure holding timeout value */
+){
+ int tm = pFile->iBusyTimeout;
+ int rc = osFcntl(h,F_SETLK,pLock);
+ while( rc<0 && tm>0 ){
+ /* On systems that support some kind of blocking file lock with a timeout,
+ ** make appropriate changes here to invoke that blocking file lock. On
+ ** generic posix, however, there is no such API. So we simply try the
+ ** lock once every millisecond until either the timeout expires, or until
+ ** the lock is obtained. */
+ usleep(1000);
+ rc = osFcntl(h,F_SETLK,pLock);
+ tm--;
+ }
+ return rc;
+}
+#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */
+
+
+/*
+** Attempt to set a system-lock on the file pFile. The lock is
+** described by pLock.
+**
+** If the pFile was opened read/write from unix-excl, then the only lock
+** ever obtained is an exclusive lock, and it is obtained exactly once
+** the first time any lock is attempted. All subsequent system locking
+** operations become no-ops. Locking operations still happen internally,
+** in order to coordinate access between separate database connections
+** within this process, but all of that is handled in memory and the
+** operating system does not participate.
+**
+** This function is a pass-through to fcntl(F_SETLK) if pFile is using
+** any VFS other than "unix-excl" or if pFile is opened on "unix-excl"
+** and is read-only.
+**
+** Zero is returned if the call completes successfully, or -1 if a call
+** to fcntl() fails. In this case, errno is set appropriately (by fcntl()).
+*/
+static int unixFileLock(unixFile *pFile, struct flock *pLock){
+ int rc;
+ unixInodeInfo *pInode = pFile->pInode;
+ assert( pInode!=0 );
+ assert( sqlite3_mutex_held(pInode->pLockMutex) );
+ if( (pFile->ctrlFlags & (UNIXFILE_EXCL|UNIXFILE_RDONLY))==UNIXFILE_EXCL ){
+ if( pInode->bProcessLock==0 ){
+ struct flock lock;
+ assert( pInode->nLock==0 );
+ lock.l_whence = SEEK_SET;
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = SHARED_SIZE;
+ lock.l_type = F_WRLCK;
+ rc = osSetPosixAdvisoryLock(pFile->h, &lock, pFile);
+ if( rc<0 ) return rc;
+ pInode->bProcessLock = 1;
+ pInode->nLock++;
+ }else{
+ rc = 0;
+ }
+ }else{
+ rc = osSetPosixAdvisoryLock(pFile->h, pLock, pFile);
+ }
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+*/
+static int unixLock(sqlite3_file *id, int eFileLock){
+ /* The following describes the implementation of the various locks and
+ ** lock transitions in terms of the POSIX advisory shared and exclusive
+ ** lock primitives (called read-locks and write-locks below, to avoid
+ ** confusion with SQLite lock names). The algorithms are complicated
+ ** slightly in order to be compatible with Windows95 systems simultaneously
+ ** accessing the same database file, in case that is ever required.
+ **
+ ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved
+ ** byte', each single bytes at well known offsets, and the 'shared byte
+ ** range', a range of 510 bytes at a well known offset.
+ **
+ ** To obtain a SHARED lock, a read-lock is obtained on the 'pending
+ ** byte'. If this is successful, 'shared byte range' is read-locked
+ ** and the lock on the 'pending byte' released. (Legacy note: When
+ ** SQLite was first developed, Windows95 systems were still very common,
+ ** and Widnows95 lacks a shared-lock capability. So on Windows95, a
+ ** single randomly selected by from the 'shared byte range' is locked.
+ ** Windows95 is now pretty much extinct, but this work-around for the
+ ** lack of shared-locks on Windows95 lives on, for backwards
+ ** compatibility.)
+ **
+ ** A process may only obtain a RESERVED lock after it has a SHARED lock.
+ ** A RESERVED lock is implemented by grabbing a write-lock on the
+ ** 'reserved byte'.
+ **
+ ** A process may only obtain a PENDING lock after it has obtained a
+ ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock
+ ** on the 'pending byte'. This ensures that no new SHARED locks can be
+ ** obtained, but existing SHARED locks are allowed to persist. A process
+ ** does not have to obtain a RESERVED lock on the way to a PENDING lock.
+ ** This property is used by the algorithm for rolling back a journal file
+ ** after a crash.
+ **
+ ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is
+ ** implemented by obtaining a write-lock on the entire 'shared byte
+ ** range'. Since all other locks require a read-lock on one of the bytes
+ ** within this range, this ensures that no other locks are held on the
+ ** database.
+ */
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile*)id;
+ unixInodeInfo *pInode;
+ struct flock lock;
+ int tErrno = 0;
+
+ assert( pFile );
+ OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h,
+ azFileLock(eFileLock), azFileLock(pFile->eFileLock),
+ azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared,
+ osGetpid(0)));
+
+ /* If there is already a lock of this type or more restrictive on the
+ ** unixFile, do nothing. Don't use the end_lock: exit path, as
+ ** unixEnterMutex() hasn't been called yet.
+ */
+ if( pFile->eFileLock>=eFileLock ){
+ OSTRACE(("LOCK %d %s ok (already held) (unix)\n", pFile->h,
+ azFileLock(eFileLock)));
+ return SQLITE_OK;
+ }
+
+ /* Make sure the locking sequence is correct.
+ ** (1) We never move from unlocked to anything higher than shared lock.
+ ** (2) SQLite never explicitly requests a pendig lock.
+ ** (3) A shared lock is always held when a reserve lock is requested.
+ */
+ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
+ assert( eFileLock!=PENDING_LOCK );
+ assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK );
+
+ /* This mutex is needed because pFile->pInode is shared across threads
+ */
+ pInode = pFile->pInode;
+ sqlite3_mutex_enter(pInode->pLockMutex);
+
+ /* If some thread using this PID has a lock via a different unixFile*
+ ** handle that precludes the requested lock, return BUSY.
+ */
+ if( (pFile->eFileLock!=pInode->eFileLock &&
+ (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK))
+ ){
+ rc = SQLITE_BUSY;
+ goto end_lock;
+ }
+
+ /* If a SHARED lock is requested, and some thread using this PID already
+ ** has a SHARED or RESERVED lock, then increment reference counts and
+ ** return SQLITE_OK.
+ */
+ if( eFileLock==SHARED_LOCK &&
+ (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){
+ assert( eFileLock==SHARED_LOCK );
+ assert( pFile->eFileLock==0 );
+ assert( pInode->nShared>0 );
+ pFile->eFileLock = SHARED_LOCK;
+ pInode->nShared++;
+ pInode->nLock++;
+ goto end_lock;
+ }
+
+
+ /* A PENDING lock is needed before acquiring a SHARED lock and before
+ ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will
+ ** be released.
+ */
+ lock.l_len = 1L;
+ lock.l_whence = SEEK_SET;
+ if( eFileLock==SHARED_LOCK
+ || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock<PENDING_LOCK)
+ ){
+ lock.l_type = (eFileLock==SHARED_LOCK?F_RDLCK:F_WRLCK);
+ lock.l_start = PENDING_BYTE;
+ if( unixFileLock(pFile, &lock) ){
+ tErrno = errno;
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ if( rc!=SQLITE_BUSY ){
+ storeLastErrno(pFile, tErrno);
+ }
+ goto end_lock;
+ }
+ }
+
+
+ /* If control gets to this point, then actually go ahead and make
+ ** operating system calls for the specified lock.
+ */
+ if( eFileLock==SHARED_LOCK ){
+ assert( pInode->nShared==0 );
+ assert( pInode->eFileLock==0 );
+ assert( rc==SQLITE_OK );
+
+ /* Now get the read-lock */
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = SHARED_SIZE;
+ if( unixFileLock(pFile, &lock) ){
+ tErrno = errno;
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ }
+
+ /* Drop the temporary PENDING lock */
+ lock.l_start = PENDING_BYTE;
+ lock.l_len = 1L;
+ lock.l_type = F_UNLCK;
+ if( unixFileLock(pFile, &lock) && rc==SQLITE_OK ){
+ /* This could happen with a network mount */
+ tErrno = errno;
+ rc = SQLITE_IOERR_UNLOCK;
+ }
+
+ if( rc ){
+ if( rc!=SQLITE_BUSY ){
+ storeLastErrno(pFile, tErrno);
+ }
+ goto end_lock;
+ }else{
+ pFile->eFileLock = SHARED_LOCK;
+ pInode->nLock++;
+ pInode->nShared = 1;
+ }
+ }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){
+ /* We are trying for an exclusive lock but another thread in this
+ ** same process is still holding a shared lock. */
+ rc = SQLITE_BUSY;
+ }else{
+ /* The request was for a RESERVED or EXCLUSIVE lock. It is
+ ** assumed that there is a SHARED or greater lock on the file
+ ** already.
+ */
+ assert( 0!=pFile->eFileLock );
+ lock.l_type = F_WRLCK;
+
+ assert( eFileLock==RESERVED_LOCK || eFileLock==EXCLUSIVE_LOCK );
+ if( eFileLock==RESERVED_LOCK ){
+ lock.l_start = RESERVED_BYTE;
+ lock.l_len = 1L;
+ }else{
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = SHARED_SIZE;
+ }
+
+ if( unixFileLock(pFile, &lock) ){
+ tErrno = errno;
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ if( rc!=SQLITE_BUSY ){
+ storeLastErrno(pFile, tErrno);
+ }
+ }
+ }
+
+
+#ifdef SQLITE_DEBUG
+ /* Set up the transaction-counter change checking flags when
+ ** transitioning from a SHARED to a RESERVED lock. The change
+ ** from SHARED to RESERVED marks the beginning of a normal
+ ** write operation (not a hot journal rollback).
+ */
+ if( rc==SQLITE_OK
+ && pFile->eFileLock<=SHARED_LOCK
+ && eFileLock==RESERVED_LOCK
+ ){
+ pFile->transCntrChng = 0;
+ pFile->dbUpdate = 0;
+ pFile->inNormalWrite = 1;
+ }
+#endif
+
+
+ if( rc==SQLITE_OK ){
+ pFile->eFileLock = eFileLock;
+ pInode->eFileLock = eFileLock;
+ }else if( eFileLock==EXCLUSIVE_LOCK ){
+ pFile->eFileLock = PENDING_LOCK;
+ pInode->eFileLock = PENDING_LOCK;
+ }
+
+end_lock:
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock),
+ rc==SQLITE_OK ? "ok" : "failed"));
+ return rc;
+}
+
+/*
+** Add the file descriptor used by file handle pFile to the corresponding
+** pUnused list.
+*/
+static void setPendingFd(unixFile *pFile){
+ unixInodeInfo *pInode = pFile->pInode;
+ UnixUnusedFd *p = pFile->pPreallocatedUnused;
+ assert( unixFileMutexHeld(pFile) );
+ p->pNext = pInode->pUnused;
+ pInode->pUnused = p;
+ pFile->h = -1;
+ pFile->pPreallocatedUnused = 0;
+}
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+**
+** If handleNFSUnlock is true, then on downgrading an EXCLUSIVE_LOCK to SHARED
+** the byte range is divided into 2 parts and the first part is unlocked then
+** set to a read lock, then the other part is simply unlocked. This works
+** around a bug in BSD NFS lockd (also seen on MacOSX 10.3+) that fails to
+** remove the write lock on a region when a read lock is set.
+*/
+static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
+ unixFile *pFile = (unixFile*)id;
+ unixInodeInfo *pInode;
+ struct flock lock;
+ int rc = SQLITE_OK;
+
+ assert( pFile );
+ OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock,
+ pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared,
+ osGetpid(0)));
+
+ assert( eFileLock<=SHARED_LOCK );
+ if( pFile->eFileLock<=eFileLock ){
+ return SQLITE_OK;
+ }
+ pInode = pFile->pInode;
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ assert( pInode->nShared!=0 );
+ if( pFile->eFileLock>SHARED_LOCK ){
+ assert( pInode->eFileLock==pFile->eFileLock );
+
+#ifdef SQLITE_DEBUG
+ /* When reducing a lock such that other processes can start
+ ** reading the database file again, make sure that the
+ ** transaction counter was updated if any part of the database
+ ** file changed. If the transaction counter is not updated,
+ ** other connections to the same file might not realize that
+ ** the file has changed and hence might not know to flush their
+ ** cache. The use of a stale cache can lead to database corruption.
+ */
+ pFile->inNormalWrite = 0;
+#endif
+
+ /* downgrading to a shared lock on NFS involves clearing the write lock
+ ** before establishing the readlock - to avoid a race condition we downgrade
+ ** the lock in 2 blocks, so that part of the range will be covered by a
+ ** write lock until the rest is covered by a read lock:
+ ** 1: [WWWWW]
+ ** 2: [....W]
+ ** 3: [RRRRW]
+ ** 4: [RRRR.]
+ */
+ if( eFileLock==SHARED_LOCK ){
+#if !defined(__APPLE__) || !SQLITE_ENABLE_LOCKING_STYLE
+ (void)handleNFSUnlock;
+ assert( handleNFSUnlock==0 );
+#endif
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+ if( handleNFSUnlock ){
+ int tErrno; /* Error code from system call errors */
+ off_t divSize = SHARED_SIZE - 1;
+
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = divSize;
+ if( unixFileLock(pFile, &lock)==(-1) ){
+ tErrno = errno;
+ rc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, tErrno);
+ goto end_unlock;
+ }
+ lock.l_type = F_RDLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = divSize;
+ if( unixFileLock(pFile, &lock)==(-1) ){
+ tErrno = errno;
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK);
+ if( IS_LOCK_ERROR(rc) ){
+ storeLastErrno(pFile, tErrno);
+ }
+ goto end_unlock;
+ }
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = SHARED_FIRST+divSize;
+ lock.l_len = SHARED_SIZE-divSize;
+ if( unixFileLock(pFile, &lock)==(-1) ){
+ tErrno = errno;
+ rc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, tErrno);
+ goto end_unlock;
+ }
+ }else
+#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
+ {
+ lock.l_type = F_RDLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = SHARED_FIRST;
+ lock.l_len = SHARED_SIZE;
+ if( unixFileLock(pFile, &lock) ){
+ /* In theory, the call to unixFileLock() cannot fail because another
+ ** process is holding an incompatible lock. If it does, this
+ ** indicates that the other process is not following the locking
+ ** protocol. If this happens, return SQLITE_IOERR_RDLOCK. Returning
+ ** SQLITE_BUSY would confuse the upper layer (in practice it causes
+ ** an assert to fail). */
+ rc = SQLITE_IOERR_RDLOCK;
+ storeLastErrno(pFile, errno);
+ goto end_unlock;
+ }
+ }
+ }
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = PENDING_BYTE;
+ lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE );
+ if( unixFileLock(pFile, &lock)==0 ){
+ pInode->eFileLock = SHARED_LOCK;
+ }else{
+ rc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, errno);
+ goto end_unlock;
+ }
+ }
+ if( eFileLock==NO_LOCK ){
+ /* Decrement the shared lock counter. Release the lock using an
+ ** OS call only when all threads in this same process have released
+ ** the lock.
+ */
+ pInode->nShared--;
+ if( pInode->nShared==0 ){
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = lock.l_len = 0L;
+ if( unixFileLock(pFile, &lock)==0 ){
+ pInode->eFileLock = NO_LOCK;
+ }else{
+ rc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, errno);
+ pInode->eFileLock = NO_LOCK;
+ pFile->eFileLock = NO_LOCK;
+ }
+ }
+
+ /* Decrement the count of locks against this same file. When the
+ ** count reaches zero, close any other file descriptors whose close
+ ** was deferred because of outstanding locks.
+ */
+ pInode->nLock--;
+ assert( pInode->nLock>=0 );
+ if( pInode->nLock==0 ) closePendingFds(pFile);
+ }
+
+end_unlock:
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ if( rc==SQLITE_OK ){
+ pFile->eFileLock = eFileLock;
+ }
+ return rc;
+}
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+*/
+static int unixUnlock(sqlite3_file *id, int eFileLock){
+#if SQLITE_MAX_MMAP_SIZE>0
+ assert( eFileLock==SHARED_LOCK || ((unixFile *)id)->nFetchOut==0 );
+#endif
+ return posixUnlock(id, eFileLock, 0);
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+static int unixMapfile(unixFile *pFd, i64 nByte);
+static void unixUnmapfile(unixFile *pFd);
+#endif
+
+/*
+** This function performs the parts of the "close file" operation
+** common to all locking schemes. It closes the directory and file
+** handles, if they are valid, and sets all fields of the unixFile
+** structure to 0.
+**
+** It is *not* necessary to hold the mutex when this routine is called,
+** even on VxWorks. A mutex will be acquired on VxWorks by the
+** vxworksReleaseFileId() routine.
+*/
+static int closeUnixFile(sqlite3_file *id){
+ unixFile *pFile = (unixFile*)id;
+#if SQLITE_MAX_MMAP_SIZE>0
+ unixUnmapfile(pFile);
+#endif
+ if( pFile->h>=0 ){
+ robust_close(pFile, pFile->h, __LINE__);
+ pFile->h = -1;
+ }
+#if OS_VXWORKS
+ if( pFile->pId ){
+ if( pFile->ctrlFlags & UNIXFILE_DELETE ){
+ osUnlink(pFile->pId->zCanonicalName);
+ }
+ vxworksReleaseFileId(pFile->pId);
+ pFile->pId = 0;
+ }
+#endif
+#ifdef SQLITE_UNLINK_AFTER_CLOSE
+ if( pFile->ctrlFlags & UNIXFILE_DELETE ){
+ osUnlink(pFile->zPath);
+ sqlite3_free(*(char**)&pFile->zPath);
+ pFile->zPath = 0;
+ }
+#endif
+ OSTRACE(("CLOSE %-3d\n", pFile->h));
+ OpenCounter(-1);
+ sqlite3_free(pFile->pPreallocatedUnused);
+ memset(pFile, 0, sizeof(unixFile));
+ return SQLITE_OK;
+}
+
+/*
+** Close a file.
+*/
+static int unixClose(sqlite3_file *id){
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile *)id;
+ unixInodeInfo *pInode = pFile->pInode;
+
+ assert( pInode!=0 );
+ verifyDbFile(pFile);
+ unixUnlock(id, NO_LOCK);
+ assert( unixFileMutexNotheld(pFile) );
+ unixEnterMutex();
+
+ /* unixFile.pInode is always valid here. Otherwise, a different close
+ ** routine (e.g. nolockClose()) would be called instead.
+ */
+ assert( pFile->pInode->nLock>0 || pFile->pInode->bProcessLock==0 );
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ if( pInode->nLock ){
+ /* If there are outstanding locks, do not actually close the file just
+ ** yet because that would clear those locks. Instead, add the file
+ ** descriptor to pInode->pUnused list. It will be automatically closed
+ ** when the last lock is cleared.
+ */
+ setPendingFd(pFile);
+ }
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ releaseInodeInfo(pFile);
+ rc = closeUnixFile(id);
+ unixLeaveMutex();
+ return rc;
+}
+
+/************** End of the posix advisory lock implementation *****************
+******************************************************************************/
+
+/******************************************************************************
+****************************** No-op Locking **********************************
+**
+** Of the various locking implementations available, this is by far the
+** simplest: locking is ignored. No attempt is made to lock the database
+** file for reading or writing.
+**
+** This locking mode is appropriate for use on read-only databases
+** (ex: databases that are burned into CD-ROM, for example.) It can
+** also be used if the application employs some external mechanism to
+** prevent simultaneous access of the same database by two or more
+** database connections. But there is a serious risk of database
+** corruption if this locking mode is used in situations where multiple
+** database connections are accessing the same database file at the same
+** time and one or more of those connections are writing.
+*/
+
+static int nolockCheckReservedLock(sqlite3_file *NotUsed, int *pResOut){
+ UNUSED_PARAMETER(NotUsed);
+ *pResOut = 0;
+ return SQLITE_OK;
+}
+static int nolockLock(sqlite3_file *NotUsed, int NotUsed2){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ return SQLITE_OK;
+}
+static int nolockUnlock(sqlite3_file *NotUsed, int NotUsed2){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ return SQLITE_OK;
+}
+
+/*
+** Close the file.
+*/
+static int nolockClose(sqlite3_file *id) {
+ return closeUnixFile(id);
+}
+
+/******************* End of the no-op lock implementation *********************
+******************************************************************************/
+
+/******************************************************************************
+************************* Begin dot-file Locking ******************************
+**
+** The dotfile locking implementation uses the existence of separate lock
+** files (really a directory) to control access to the database. This works
+** on just about every filesystem imaginable. But there are serious downsides:
+**
+** (1) There is zero concurrency. A single reader blocks all other
+** connections from reading or writing the database.
+**
+** (2) An application crash or power loss can leave stale lock files
+** sitting around that need to be cleared manually.
+**
+** Nevertheless, a dotlock is an appropriate locking mode for use if no
+** other locking strategy is available.
+**
+** Dotfile locking works by creating a subdirectory in the same directory as
+** the database and with the same name but with a ".lock" extension added.
+** The existence of a lock directory implies an EXCLUSIVE lock. All other
+** lock types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE.
+*/
+
+/*
+** The file suffix added to the data base filename in order to create the
+** lock directory.
+*/
+#define DOTLOCK_SUFFIX ".lock"
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+**
+** In dotfile locking, either a lock exists or it does not. So in this
+** variation of CheckReservedLock(), *pResOut is set to true if any lock
+** is held on the file and false if the file is unlocked.
+*/
+static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) {
+ int rc = SQLITE_OK;
+ int reserved = 0;
+ unixFile *pFile = (unixFile*)id;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+
+ assert( pFile );
+ reserved = osAccess((const char*)pFile->lockingContext, 0)==0;
+ OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved));
+ *pResOut = reserved;
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+**
+** With dotfile locking, we really only support state (4): EXCLUSIVE.
+** But we track the other locking levels internally.
+*/
+static int dotlockLock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ char *zLockFile = (char *)pFile->lockingContext;
+ int rc = SQLITE_OK;
+
+
+ /* If we have any lock, then the lock file already exists. All we have
+ ** to do is adjust our internal record of the lock level.
+ */
+ if( pFile->eFileLock > NO_LOCK ){
+ pFile->eFileLock = eFileLock;
+ /* Always update the timestamp on the old file */
+#ifdef HAVE_UTIME
+ utime(zLockFile, NULL);
+#else
+ utimes(zLockFile, NULL);
+#endif
+ return SQLITE_OK;
+ }
+
+ /* grab an exclusive lock */
+ rc = osMkdir(zLockFile, 0777);
+ if( rc<0 ){
+ /* failed to open/create the lock directory */
+ int tErrno = errno;
+ if( EEXIST == tErrno ){
+ rc = SQLITE_BUSY;
+ } else {
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ if( rc!=SQLITE_BUSY ){
+ storeLastErrno(pFile, tErrno);
+ }
+ }
+ return rc;
+ }
+
+ /* got it, set the type and return ok */
+ pFile->eFileLock = eFileLock;
+ return rc;
+}
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+**
+** When the locking level reaches NO_LOCK, delete the lock file.
+*/
+static int dotlockUnlock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ char *zLockFile = (char *)pFile->lockingContext;
+ int rc;
+
+ assert( pFile );
+ OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock,
+ pFile->eFileLock, osGetpid(0)));
+ assert( eFileLock<=SHARED_LOCK );
+
+ /* no-op if possible */
+ if( pFile->eFileLock==eFileLock ){
+ return SQLITE_OK;
+ }
+
+ /* To downgrade to shared, simply update our internal notion of the
+ ** lock state. No need to mess with the file on disk.
+ */
+ if( eFileLock==SHARED_LOCK ){
+ pFile->eFileLock = SHARED_LOCK;
+ return SQLITE_OK;
+ }
+
+ /* To fully unlock the database, delete the lock file */
+ assert( eFileLock==NO_LOCK );
+ rc = osRmdir(zLockFile);
+ if( rc<0 ){
+ int tErrno = errno;
+ if( tErrno==ENOENT ){
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, tErrno);
+ }
+ return rc;
+ }
+ pFile->eFileLock = NO_LOCK;
+ return SQLITE_OK;
+}
+
+/*
+** Close a file. Make sure the lock has been released before closing.
+*/
+static int dotlockClose(sqlite3_file *id) {
+ unixFile *pFile = (unixFile*)id;
+ assert( id!=0 );
+ dotlockUnlock(id, NO_LOCK);
+ sqlite3_free(pFile->lockingContext);
+ return closeUnixFile(id);
+}
+/****************** End of the dot-file lock implementation *******************
+******************************************************************************/
+
+/******************************************************************************
+************************** Begin flock Locking ********************************
+**
+** Use the flock() system call to do file locking.
+**
+** flock() locking is like dot-file locking in that the various
+** fine-grain locking levels supported by SQLite are collapsed into
+** a single exclusive lock. In other words, SHARED, RESERVED, and
+** PENDING locks are the same thing as an EXCLUSIVE lock. SQLite
+** still works when you do this, but concurrency is reduced since
+** only a single process can be reading the database at a time.
+**
+** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off
+*/
+#if SQLITE_ENABLE_LOCKING_STYLE
+
+/*
+** Retry flock() calls that fail with EINTR
+*/
+#ifdef EINTR
+static int robust_flock(int fd, int op){
+ int rc;
+ do{ rc = flock(fd,op); }while( rc<0 && errno==EINTR );
+ return rc;
+}
+#else
+# define robust_flock(a,b) flock(a,b)
+#endif
+
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+*/
+static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){
+ int rc = SQLITE_OK;
+ int reserved = 0;
+ unixFile *pFile = (unixFile*)id;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+
+ assert( pFile );
+
+ /* Check if a thread in this process holds such a lock */
+ if( pFile->eFileLock>SHARED_LOCK ){
+ reserved = 1;
+ }
+
+ /* Otherwise see if some other process holds it. */
+ if( !reserved ){
+ /* attempt to get the lock */
+ int lrc = robust_flock(pFile->h, LOCK_EX | LOCK_NB);
+ if( !lrc ){
+ /* got the lock, unlock it */
+ lrc = robust_flock(pFile->h, LOCK_UN);
+ if ( lrc ) {
+ int tErrno = errno;
+ /* unlock failed with an error */
+ lrc = SQLITE_IOERR_UNLOCK;
+ storeLastErrno(pFile, tErrno);
+ rc = lrc;
+ }
+ } else {
+ int tErrno = errno;
+ reserved = 1;
+ /* someone else might have it reserved */
+ lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ if( IS_LOCK_ERROR(lrc) ){
+ storeLastErrno(pFile, tErrno);
+ rc = lrc;
+ }
+ }
+ }
+ OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved));
+
+#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS
+ if( (rc & 0xff) == SQLITE_IOERR ){
+ rc = SQLITE_OK;
+ reserved=1;
+ }
+#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */
+ *pResOut = reserved;
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** flock() only really support EXCLUSIVE locks. We track intermediate
+** lock states in the sqlite3_file structure, but all locks SHARED or
+** above are really EXCLUSIVE locks and exclude all other processes from
+** access the file.
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+*/
+static int flockLock(sqlite3_file *id, int eFileLock) {
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile*)id;
+
+ assert( pFile );
+
+ /* if we already have a lock, it is exclusive.
+ ** Just adjust level and punt on outta here. */
+ if (pFile->eFileLock > NO_LOCK) {
+ pFile->eFileLock = eFileLock;
+ return SQLITE_OK;
+ }
+
+ /* grab an exclusive lock */
+
+ if (robust_flock(pFile->h, LOCK_EX | LOCK_NB)) {
+ int tErrno = errno;
+ /* didn't get, must be busy */
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
+ if( IS_LOCK_ERROR(rc) ){
+ storeLastErrno(pFile, tErrno);
+ }
+ } else {
+ /* got it, set the type and return ok */
+ pFile->eFileLock = eFileLock;
+ }
+ OSTRACE(("LOCK %d %s %s (flock)\n", pFile->h, azFileLock(eFileLock),
+ rc==SQLITE_OK ? "ok" : "failed"));
+#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS
+ if( (rc & 0xff) == SQLITE_IOERR ){
+ rc = SQLITE_BUSY;
+ }
+#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */
+ return rc;
+}
+
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+*/
+static int flockUnlock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+
+ assert( pFile );
+ OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock,
+ pFile->eFileLock, osGetpid(0)));
+ assert( eFileLock<=SHARED_LOCK );
+
+ /* no-op if possible */
+ if( pFile->eFileLock==eFileLock ){
+ return SQLITE_OK;
+ }
+
+ /* shared can just be set because we always have an exclusive */
+ if (eFileLock==SHARED_LOCK) {
+ pFile->eFileLock = eFileLock;
+ return SQLITE_OK;
+ }
+
+ /* no, really, unlock. */
+ if( robust_flock(pFile->h, LOCK_UN) ){
+#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS
+ return SQLITE_OK;
+#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */
+ return SQLITE_IOERR_UNLOCK;
+ }else{
+ pFile->eFileLock = NO_LOCK;
+ return SQLITE_OK;
+ }
+}
+
+/*
+** Close a file.
+*/
+static int flockClose(sqlite3_file *id) {
+ assert( id!=0 );
+ flockUnlock(id, NO_LOCK);
+ return closeUnixFile(id);
+}
+
+#endif /* SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORK */
+
+/******************* End of the flock lock implementation *********************
+******************************************************************************/
+
+/******************************************************************************
+************************ Begin Named Semaphore Locking ************************
+**
+** Named semaphore locking is only supported on VxWorks.
+**
+** Semaphore locking is like dot-lock and flock in that it really only
+** supports EXCLUSIVE locking. Only a single process can read or write
+** the database file at a time. This reduces potential concurrency, but
+** makes the lock implementation much easier.
+*/
+#if OS_VXWORKS
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+*/
+static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) {
+ int rc = SQLITE_OK;
+ int reserved = 0;
+ unixFile *pFile = (unixFile*)id;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+
+ assert( pFile );
+
+ /* Check if a thread in this process holds such a lock */
+ if( pFile->eFileLock>SHARED_LOCK ){
+ reserved = 1;
+ }
+
+ /* Otherwise see if some other process holds it. */
+ if( !reserved ){
+ sem_t *pSem = pFile->pInode->pSem;
+
+ if( sem_trywait(pSem)==-1 ){
+ int tErrno = errno;
+ if( EAGAIN != tErrno ){
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK);
+ storeLastErrno(pFile, tErrno);
+ } else {
+ /* someone else has the lock when we are in NO_LOCK */
+ reserved = (pFile->eFileLock < SHARED_LOCK);
+ }
+ }else{
+ /* we could have it if we want it */
+ sem_post(pSem);
+ }
+ }
+ OSTRACE(("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved));
+
+ *pResOut = reserved;
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** Semaphore locks only really support EXCLUSIVE locks. We track intermediate
+** lock states in the sqlite3_file structure, but all locks SHARED or
+** above are really EXCLUSIVE locks and exclude all other processes from
+** access the file.
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+*/
+static int semXLock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ sem_t *pSem = pFile->pInode->pSem;
+ int rc = SQLITE_OK;
+
+ /* if we already have a lock, it is exclusive.
+ ** Just adjust level and punt on outta here. */
+ if (pFile->eFileLock > NO_LOCK) {
+ pFile->eFileLock = eFileLock;
+ rc = SQLITE_OK;
+ goto sem_end_lock;
+ }
+
+ /* lock semaphore now but bail out when already locked. */
+ if( sem_trywait(pSem)==-1 ){
+ rc = SQLITE_BUSY;
+ goto sem_end_lock;
+ }
+
+ /* got it, set the type and return ok */
+ pFile->eFileLock = eFileLock;
+
+ sem_end_lock:
+ return rc;
+}
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+*/
+static int semXUnlock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ sem_t *pSem = pFile->pInode->pSem;
+
+ assert( pFile );
+ assert( pSem );
+ OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock,
+ pFile->eFileLock, osGetpid(0)));
+ assert( eFileLock<=SHARED_LOCK );
+
+ /* no-op if possible */
+ if( pFile->eFileLock==eFileLock ){
+ return SQLITE_OK;
+ }
+
+ /* shared can just be set because we always have an exclusive */
+ if (eFileLock==SHARED_LOCK) {
+ pFile->eFileLock = eFileLock;
+ return SQLITE_OK;
+ }
+
+ /* no, really unlock. */
+ if ( sem_post(pSem)==-1 ) {
+ int rc, tErrno = errno;
+ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK);
+ if( IS_LOCK_ERROR(rc) ){
+ storeLastErrno(pFile, tErrno);
+ }
+ return rc;
+ }
+ pFile->eFileLock = NO_LOCK;
+ return SQLITE_OK;
+}
+
+/*
+ ** Close a file.
+ */
+static int semXClose(sqlite3_file *id) {
+ if( id ){
+ unixFile *pFile = (unixFile*)id;
+ semXUnlock(id, NO_LOCK);
+ assert( pFile );
+ assert( unixFileMutexNotheld(pFile) );
+ unixEnterMutex();
+ releaseInodeInfo(pFile);
+ unixLeaveMutex();
+ closeUnixFile(id);
+ }
+ return SQLITE_OK;
+}
+
+#endif /* OS_VXWORKS */
+/*
+** Named semaphore locking is only available on VxWorks.
+**
+*************** End of the named semaphore lock implementation ****************
+******************************************************************************/
+
+
+/******************************************************************************
+*************************** Begin AFP Locking *********************************
+**
+** AFP is the Apple Filing Protocol. AFP is a network filesystem found
+** on Apple Macintosh computers - both OS9 and OSX.
+**
+** Third-party implementations of AFP are available. But this code here
+** only works on OSX.
+*/
+
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+/*
+** The afpLockingContext structure contains all afp lock specific state
+*/
+typedef struct afpLockingContext afpLockingContext;
+struct afpLockingContext {
+ int reserved;
+ const char *dbPath; /* Name of the open file */
+};
+
+struct ByteRangeLockPB2
+{
+ unsigned long long offset; /* offset to first byte to lock */
+ unsigned long long length; /* nbr of bytes to lock */
+ unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */
+ unsigned char unLockFlag; /* 1 = unlock, 0 = lock */
+ unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */
+ int fd; /* file desc to assoc this lock with */
+};
+
+#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2)
+
+/*
+** This is a utility for setting or clearing a bit-range lock on an
+** AFP filesystem.
+**
+** Return SQLITE_OK on success, SQLITE_BUSY on failure.
+*/
+static int afpSetLock(
+ const char *path, /* Name of the file to be locked or unlocked */
+ unixFile *pFile, /* Open file descriptor on path */
+ unsigned long long offset, /* First byte to be locked */
+ unsigned long long length, /* Number of bytes to lock */
+ int setLockFlag /* True to set lock. False to clear lock */
+){
+ struct ByteRangeLockPB2 pb;
+ int err;
+
+ pb.unLockFlag = setLockFlag ? 0 : 1;
+ pb.startEndFlag = 0;
+ pb.offset = offset;
+ pb.length = length;
+ pb.fd = pFile->h;
+
+ OSTRACE(("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n",
+ (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""),
+ offset, length));
+ err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0);
+ if ( err==-1 ) {
+ int rc;
+ int tErrno = errno;
+ OSTRACE(("AFPSETLOCK failed to fsctl() '%s' %d %s\n",
+ path, tErrno, strerror(tErrno)));
+#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS
+ rc = SQLITE_BUSY;
+#else
+ rc = sqliteErrorFromPosixError(tErrno,
+ setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK);
+#endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */
+ if( IS_LOCK_ERROR(rc) ){
+ storeLastErrno(pFile, tErrno);
+ }
+ return rc;
+ } else {
+ return SQLITE_OK;
+ }
+}
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+*/
+static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){
+ int rc = SQLITE_OK;
+ int reserved = 0;
+ unixFile *pFile = (unixFile*)id;
+ afpLockingContext *context;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+
+ assert( pFile );
+ context = (afpLockingContext *) pFile->lockingContext;
+ if( context->reserved ){
+ *pResOut = 1;
+ return SQLITE_OK;
+ }
+ sqlite3_mutex_enter(pFile->pInode->pLockMutex);
+ /* Check if a thread in this process holds such a lock */
+ if( pFile->pInode->eFileLock>SHARED_LOCK ){
+ reserved = 1;
+ }
+
+ /* Otherwise see if some other process holds it.
+ */
+ if( !reserved ){
+ /* lock the RESERVED byte */
+ int lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1);
+ if( SQLITE_OK==lrc ){
+ /* if we succeeded in taking the reserved lock, unlock it to restore
+ ** the original state */
+ lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0);
+ } else {
+ /* if we failed to get the lock then someone else must have it */
+ reserved = 1;
+ }
+ if( IS_LOCK_ERROR(lrc) ){
+ rc=lrc;
+ }
+ }
+
+ sqlite3_mutex_leave(pFile->pInode->pLockMutex);
+ OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved));
+
+ *pResOut = reserved;
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+*/
+static int afpLock(sqlite3_file *id, int eFileLock){
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile*)id;
+ unixInodeInfo *pInode = pFile->pInode;
+ afpLockingContext *context = (afpLockingContext *) pFile->lockingContext;
+
+ assert( pFile );
+ OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h,
+ azFileLock(eFileLock), azFileLock(pFile->eFileLock),
+ azFileLock(pInode->eFileLock), pInode->nShared , osGetpid(0)));
+
+ /* If there is already a lock of this type or more restrictive on the
+ ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as
+ ** unixEnterMutex() hasn't been called yet.
+ */
+ if( pFile->eFileLock>=eFileLock ){
+ OSTRACE(("LOCK %d %s ok (already held) (afp)\n", pFile->h,
+ azFileLock(eFileLock)));
+ return SQLITE_OK;
+ }
+
+ /* Make sure the locking sequence is correct
+ ** (1) We never move from unlocked to anything higher than shared lock.
+ ** (2) SQLite never explicitly requests a pendig lock.
+ ** (3) A shared lock is always held when a reserve lock is requested.
+ */
+ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
+ assert( eFileLock!=PENDING_LOCK );
+ assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK );
+
+ /* This mutex is needed because pFile->pInode is shared across threads
+ */
+ pInode = pFile->pInode;
+ sqlite3_mutex_enter(pInode->pLockMutex);
+
+ /* If some thread using this PID has a lock via a different unixFile*
+ ** handle that precludes the requested lock, return BUSY.
+ */
+ if( (pFile->eFileLock!=pInode->eFileLock &&
+ (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK))
+ ){
+ rc = SQLITE_BUSY;
+ goto afp_end_lock;
+ }
+
+ /* If a SHARED lock is requested, and some thread using this PID already
+ ** has a SHARED or RESERVED lock, then increment reference counts and
+ ** return SQLITE_OK.
+ */
+ if( eFileLock==SHARED_LOCK &&
+ (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){
+ assert( eFileLock==SHARED_LOCK );
+ assert( pFile->eFileLock==0 );
+ assert( pInode->nShared>0 );
+ pFile->eFileLock = SHARED_LOCK;
+ pInode->nShared++;
+ pInode->nLock++;
+ goto afp_end_lock;
+ }
+
+ /* A PENDING lock is needed before acquiring a SHARED lock and before
+ ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will
+ ** be released.
+ */
+ if( eFileLock==SHARED_LOCK
+ || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock<PENDING_LOCK)
+ ){
+ int failed;
+ failed = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 1);
+ if (failed) {
+ rc = failed;
+ goto afp_end_lock;
+ }
+ }
+
+ /* If control gets to this point, then actually go ahead and make
+ ** operating system calls for the specified lock.
+ */
+ if( eFileLock==SHARED_LOCK ){
+ int lrc1, lrc2, lrc1Errno = 0;
+ long lk, mask;
+
+ assert( pInode->nShared==0 );
+ assert( pInode->eFileLock==0 );
+
+ mask = (sizeof(long)==8) ? LARGEST_INT64 : 0x7fffffff;
+ /* Now get the read-lock SHARED_LOCK */
+ /* note that the quality of the randomness doesn't matter that much */
+ lk = random();
+ pInode->sharedByte = (lk & mask)%(SHARED_SIZE - 1);
+ lrc1 = afpSetLock(context->dbPath, pFile,
+ SHARED_FIRST+pInode->sharedByte, 1, 1);
+ if( IS_LOCK_ERROR(lrc1) ){
+ lrc1Errno = pFile->lastErrno;
+ }
+ /* Drop the temporary PENDING lock */
+ lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0);
+
+ if( IS_LOCK_ERROR(lrc1) ) {
+ storeLastErrno(pFile, lrc1Errno);
+ rc = lrc1;
+ goto afp_end_lock;
+ } else if( IS_LOCK_ERROR(lrc2) ){
+ rc = lrc2;
+ goto afp_end_lock;
+ } else if( lrc1 != SQLITE_OK ) {
+ rc = lrc1;
+ } else {
+ pFile->eFileLock = SHARED_LOCK;
+ pInode->nLock++;
+ pInode->nShared = 1;
+ }
+ }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){
+ /* We are trying for an exclusive lock but another thread in this
+ ** same process is still holding a shared lock. */
+ rc = SQLITE_BUSY;
+ }else{
+ /* The request was for a RESERVED or EXCLUSIVE lock. It is
+ ** assumed that there is a SHARED or greater lock on the file
+ ** already.
+ */
+ int failed = 0;
+ assert( 0!=pFile->eFileLock );
+ if (eFileLock >= RESERVED_LOCK && pFile->eFileLock < RESERVED_LOCK) {
+ /* Acquire a RESERVED lock */
+ failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1);
+ if( !failed ){
+ context->reserved = 1;
+ }
+ }
+ if (!failed && eFileLock == EXCLUSIVE_LOCK) {
+ /* Acquire an EXCLUSIVE lock */
+
+ /* Remove the shared lock before trying the range. we'll need to
+ ** reestablish the shared lock if we can't get the afpUnlock
+ */
+ if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST +
+ pInode->sharedByte, 1, 0)) ){
+ int failed2 = SQLITE_OK;
+ /* now attemmpt to get the exclusive lock range */
+ failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST,
+ SHARED_SIZE, 1);
+ if( failed && (failed2 = afpSetLock(context->dbPath, pFile,
+ SHARED_FIRST + pInode->sharedByte, 1, 1)) ){
+ /* Can't reestablish the shared lock. Sqlite can't deal, this is
+ ** a critical I/O error
+ */
+ rc = ((failed & 0xff) == SQLITE_IOERR) ? failed2 :
+ SQLITE_IOERR_LOCK;
+ goto afp_end_lock;
+ }
+ }else{
+ rc = failed;
+ }
+ }
+ if( failed ){
+ rc = failed;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ pFile->eFileLock = eFileLock;
+ pInode->eFileLock = eFileLock;
+ }else if( eFileLock==EXCLUSIVE_LOCK ){
+ pFile->eFileLock = PENDING_LOCK;
+ pInode->eFileLock = PENDING_LOCK;
+ }
+
+afp_end_lock:
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock),
+ rc==SQLITE_OK ? "ok" : "failed"));
+ return rc;
+}
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+*/
+static int afpUnlock(sqlite3_file *id, int eFileLock) {
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile*)id;
+ unixInodeInfo *pInode;
+ afpLockingContext *context = (afpLockingContext *) pFile->lockingContext;
+ int skipShared = 0;
+#ifdef SQLITE_TEST
+ int h = pFile->h;
+#endif
+
+ assert( pFile );
+ OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock,
+ pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared,
+ osGetpid(0)));
+
+ assert( eFileLock<=SHARED_LOCK );
+ if( pFile->eFileLock<=eFileLock ){
+ return SQLITE_OK;
+ }
+ pInode = pFile->pInode;
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ assert( pInode->nShared!=0 );
+ if( pFile->eFileLock>SHARED_LOCK ){
+ assert( pInode->eFileLock==pFile->eFileLock );
+ SimulateIOErrorBenign(1);
+ SimulateIOError( h=(-1) )
+ SimulateIOErrorBenign(0);
+
+#ifdef SQLITE_DEBUG
+ /* When reducing a lock such that other processes can start
+ ** reading the database file again, make sure that the
+ ** transaction counter was updated if any part of the database
+ ** file changed. If the transaction counter is not updated,
+ ** other connections to the same file might not realize that
+ ** the file has changed and hence might not know to flush their
+ ** cache. The use of a stale cache can lead to database corruption.
+ */
+ assert( pFile->inNormalWrite==0
+ || pFile->dbUpdate==0
+ || pFile->transCntrChng==1 );
+ pFile->inNormalWrite = 0;
+#endif
+
+ if( pFile->eFileLock==EXCLUSIVE_LOCK ){
+ rc = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0);
+ if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1) ){
+ /* only re-establish the shared lock if necessary */
+ int sharedLockByte = SHARED_FIRST+pInode->sharedByte;
+ rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 1);
+ } else {
+ skipShared = 1;
+ }
+ }
+ if( rc==SQLITE_OK && pFile->eFileLock>=PENDING_LOCK ){
+ rc = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0);
+ }
+ if( rc==SQLITE_OK && pFile->eFileLock>=RESERVED_LOCK && context->reserved ){
+ rc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0);
+ if( !rc ){
+ context->reserved = 0;
+ }
+ }
+ if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1)){
+ pInode->eFileLock = SHARED_LOCK;
+ }
+ }
+ if( rc==SQLITE_OK && eFileLock==NO_LOCK ){
+
+ /* Decrement the shared lock counter. Release the lock using an
+ ** OS call only when all threads in this same process have released
+ ** the lock.
+ */
+ unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte;
+ pInode->nShared--;
+ if( pInode->nShared==0 ){
+ SimulateIOErrorBenign(1);
+ SimulateIOError( h=(-1) )
+ SimulateIOErrorBenign(0);
+ if( !skipShared ){
+ rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0);
+ }
+ if( !rc ){
+ pInode->eFileLock = NO_LOCK;
+ pFile->eFileLock = NO_LOCK;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ pInode->nLock--;
+ assert( pInode->nLock>=0 );
+ if( pInode->nLock==0 ) closePendingFds(pFile);
+ }
+ }
+
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ if( rc==SQLITE_OK ){
+ pFile->eFileLock = eFileLock;
+ }
+ return rc;
+}
+
+/*
+** Close a file & cleanup AFP specific locking context
+*/
+static int afpClose(sqlite3_file *id) {
+ int rc = SQLITE_OK;
+ unixFile *pFile = (unixFile*)id;
+ assert( id!=0 );
+ afpUnlock(id, NO_LOCK);
+ assert( unixFileMutexNotheld(pFile) );
+ unixEnterMutex();
+ if( pFile->pInode ){
+ unixInodeInfo *pInode = pFile->pInode;
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ if( pInode->nLock ){
+ /* If there are outstanding locks, do not actually close the file just
+ ** yet because that would clear those locks. Instead, add the file
+ ** descriptor to pInode->aPending. It will be automatically closed when
+ ** the last lock is cleared.
+ */
+ setPendingFd(pFile);
+ }
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ }
+ releaseInodeInfo(pFile);
+ sqlite3_free(pFile->lockingContext);
+ rc = closeUnixFile(id);
+ unixLeaveMutex();
+ return rc;
+}
+
+#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
+/*
+** The code above is the AFP lock implementation. The code is specific
+** to MacOSX and does not work on other unix platforms. No alternative
+** is available. If you don't compile for a mac, then the "unix-afp"
+** VFS is not available.
+**
+********************* End of the AFP lock implementation **********************
+******************************************************************************/
+
+/******************************************************************************
+*************************** Begin NFS Locking ********************************/
+
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+/*
+ ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+ ** must be either NO_LOCK or SHARED_LOCK.
+ **
+ ** If the locking level of the file descriptor is already at or below
+ ** the requested locking level, this routine is a no-op.
+ */
+static int nfsUnlock(sqlite3_file *id, int eFileLock){
+ return posixUnlock(id, eFileLock, 1);
+}
+
+#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
+/*
+** The code above is the NFS lock implementation. The code is specific
+** to MacOSX and does not work on other unix platforms. No alternative
+** is available.
+**
+********************* End of the NFS lock implementation **********************
+******************************************************************************/
+
+/******************************************************************************
+**************** Non-locking sqlite3_file methods *****************************
+**
+** The next division contains implementations for all methods of the
+** sqlite3_file object other than the locking methods. The locking
+** methods were defined in divisions above (one locking method per
+** division). Those methods that are common to all locking modes
+** are gather together into this division.
+*/
+
+/*
+** Seek to the offset passed as the second argument, then read cnt
+** bytes into pBuf. Return the number of bytes actually read.
+**
+** NB: If you define USE_PREAD or USE_PREAD64, then it might also
+** be necessary to define _XOPEN_SOURCE to be 500. This varies from
+** one system to another. Since SQLite does not define USE_PREAD
+** in any form by default, we will not attempt to define _XOPEN_SOURCE.
+** See tickets #2741 and #2681.
+**
+** To avoid stomping the errno value on a failed read the lastErrno value
+** is set before returning.
+*/
+static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){
+ int got;
+ int prior = 0;
+#if (!defined(USE_PREAD) && !defined(USE_PREAD64))
+ i64 newOffset;
+#endif
+ TIMER_START;
+ assert( cnt==(cnt&0x1ffff) );
+ assert( id->h>2 );
+ do{
+#if defined(USE_PREAD)
+ got = osPread(id->h, pBuf, cnt, offset);
+ SimulateIOError( got = -1 );
+#elif defined(USE_PREAD64)
+ got = osPread64(id->h, pBuf, cnt, offset);
+ SimulateIOError( got = -1 );
+#else
+ newOffset = lseek(id->h, offset, SEEK_SET);
+ SimulateIOError( newOffset = -1 );
+ if( newOffset<0 ){
+ storeLastErrno((unixFile*)id, errno);
+ return -1;
+ }
+ got = osRead(id->h, pBuf, cnt);
+#endif
+ if( got==cnt ) break;
+ if( got<0 ){
+ if( errno==EINTR ){ got = 1; continue; }
+ prior = 0;
+ storeLastErrno((unixFile*)id, errno);
+ break;
+ }else if( got>0 ){
+ cnt -= got;
+ offset += got;
+ prior += got;
+ pBuf = (void*)(got + (char*)pBuf);
+ }
+ }while( got>0 );
+ TIMER_END;
+ OSTRACE(("READ %-3d %5d %7lld %llu\n",
+ id->h, got+prior, offset-prior, TIMER_ELAPSED));
+ return got+prior;
+}
+
+/*
+** Read data from a file into a buffer. Return SQLITE_OK if all
+** bytes were read successfully and SQLITE_IOERR if anything goes
+** wrong.
+*/
+static int unixRead(
+ sqlite3_file *id,
+ void *pBuf,
+ int amt,
+ sqlite3_int64 offset
+){
+ unixFile *pFile = (unixFile *)id;
+ int got;
+ assert( id );
+ assert( offset>=0 );
+ assert( amt>0 );
+
+ /* If this is a database file (not a journal, super-journal or temp
+ ** file), the bytes in the locking range should never be read or written. */
+#if 0
+ assert( pFile->pPreallocatedUnused==0
+ || offset>=PENDING_BYTE+512
+ || offset+amt<=PENDING_BYTE
+ );
+#endif
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this read request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offset<pFile->mmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt);
+ return SQLITE_OK;
+ }else{
+ int nCopy = pFile->mmapSize - offset;
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
+ got = seekAndRead(pFile, offset, pBuf, amt);
+ if( got==amt ){
+ return SQLITE_OK;
+ }else if( got<0 ){
+ /* lastErrno set by seekAndRead */
+ return SQLITE_IOERR_READ;
+ }else{
+ storeLastErrno(pFile, 0); /* not a system error */
+ /* Unread parts of the buffer must be zero-filled */
+ memset(&((char*)pBuf)[got], 0, amt-got);
+ return SQLITE_IOERR_SHORT_READ;
+ }
+}
+
+/*
+** Attempt to seek the file-descriptor passed as the first argument to
+** absolute offset iOff, then attempt to write nBuf bytes of data from
+** pBuf to it. If an error occurs, return -1 and set *piErrno. Otherwise,
+** return the actual number of bytes written (which may be less than
+** nBuf).
+*/
+static int seekAndWriteFd(
+ int fd, /* File descriptor to write to */
+ i64 iOff, /* File offset to begin writing at */
+ const void *pBuf, /* Copy data from this buffer to the file */
+ int nBuf, /* Size of buffer pBuf in bytes */
+ int *piErrno /* OUT: Error number if error occurs */
+){
+ int rc = 0; /* Value returned by system call */
+
+ assert( nBuf==(nBuf&0x1ffff) );
+ assert( fd>2 );
+ assert( piErrno!=0 );
+ nBuf &= 0x1ffff;
+ TIMER_START;
+
+#if defined(USE_PREAD)
+ do{ rc = (int)osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR );
+#elif defined(USE_PREAD64)
+ do{ rc = (int)osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR);
+#else
+ do{
+ i64 iSeek = lseek(fd, iOff, SEEK_SET);
+ SimulateIOError( iSeek = -1 );
+ if( iSeek<0 ){
+ rc = -1;
+ break;
+ }
+ rc = osWrite(fd, pBuf, nBuf);
+ }while( rc<0 && errno==EINTR );
+#endif
+
+ TIMER_END;
+ OSTRACE(("WRITE %-3d %5d %7lld %llu\n", fd, rc, iOff, TIMER_ELAPSED));
+
+ if( rc<0 ) *piErrno = errno;
+ return rc;
+}
+
+
+/*
+** Seek to the offset in id->offset then read cnt bytes into pBuf.
+** Return the number of bytes actually read. Update the offset.
+**
+** To avoid stomping the errno value on a failed write the lastErrno value
+** is set before returning.
+*/
+static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){
+ return seekAndWriteFd(id->h, offset, pBuf, cnt, &id->lastErrno);
+}
+
+
+/*
+** Write data from a buffer into a file. Return SQLITE_OK on success
+** or some other error code on failure.
+*/
+static int unixWrite(
+ sqlite3_file *id,
+ const void *pBuf,
+ int amt,
+ sqlite3_int64 offset
+){
+ unixFile *pFile = (unixFile*)id;
+ int wrote = 0;
+ assert( id );
+ assert( amt>0 );
+
+ /* If this is a database file (not a journal, super-journal or temp
+ ** file), the bytes in the locking range should never be read or written. */
+#if 0
+ assert( pFile->pPreallocatedUnused==0
+ || offset>=PENDING_BYTE+512
+ || offset+amt<=PENDING_BYTE
+ );
+#endif
+
+#ifdef SQLITE_DEBUG
+ /* If we are doing a normal write to a database file (as opposed to
+ ** doing a hot-journal rollback or a write to some file other than a
+ ** normal database file) then record the fact that the database
+ ** has changed. If the transaction counter is modified, record that
+ ** fact too.
+ */
+ if( pFile->inNormalWrite ){
+ pFile->dbUpdate = 1; /* The database has been modified */
+ if( offset<=24 && offset+amt>=27 ){
+ int rc;
+ char oldCntr[4];
+ SimulateIOErrorBenign(1);
+ rc = seekAndRead(pFile, 24, oldCntr, 4);
+ SimulateIOErrorBenign(0);
+ if( rc!=4 || memcmp(oldCntr, &((char*)pBuf)[24-offset], 4)!=0 ){
+ pFile->transCntrChng = 1; /* The transaction counter has changed */
+ }
+ }
+ }
+#endif
+
+#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this write request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offset<pFile->mmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt);
+ return SQLITE_OK;
+ }else{
+ int nCopy = pFile->mmapSize - offset;
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
+ while( (wrote = seekAndWrite(pFile, offset, pBuf, amt))<amt && wrote>0 ){
+ amt -= wrote;
+ offset += wrote;
+ pBuf = &((char*)pBuf)[wrote];
+ }
+ SimulateIOError(( wrote=(-1), amt=1 ));
+ SimulateDiskfullError(( wrote=0, amt=1 ));
+
+ if( amt>wrote ){
+ if( wrote<0 && pFile->lastErrno!=ENOSPC ){
+ /* lastErrno set by seekAndWrite */
+ return SQLITE_IOERR_WRITE;
+ }else{
+ storeLastErrno(pFile, 0); /* not a system error */
+ return SQLITE_FULL;
+ }
+ }
+
+ return SQLITE_OK;
+}
+
+#ifdef SQLITE_TEST
+/*
+** Count the number of fullsyncs and normal syncs. This is used to test
+** that syncs and fullsyncs are occurring at the right times.
+*/
+SQLITE_API int sqlite3_sync_count = 0;
+SQLITE_API int sqlite3_fullsync_count = 0;
+#endif
+
+/*
+** We do not trust systems to provide a working fdatasync(). Some do.
+** Others do no. To be safe, we will stick with the (slightly slower)
+** fsync(). If you know that your system does support fdatasync() correctly,
+** then simply compile with -Dfdatasync=fdatasync or -DHAVE_FDATASYNC
+*/
+#if !defined(fdatasync) && !HAVE_FDATASYNC
+# define fdatasync fsync
+#endif
+
+/*
+** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not
+** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently
+** only available on Mac OS X. But that could change.
+*/
+#ifdef F_FULLFSYNC
+# define HAVE_FULLFSYNC 1
+#else
+# define HAVE_FULLFSYNC 0
+#endif
+
+
+/*
+** The fsync() system call does not work as advertised on many
+** unix systems. The following procedure is an attempt to make
+** it work better.
+**
+** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful
+** for testing when we want to run through the test suite quickly.
+** You are strongly advised *not* to deploy with SQLITE_NO_SYNC
+** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash
+** or power failure will likely corrupt the database file.
+**
+** SQLite sets the dataOnly flag if the size of the file is unchanged.
+** The idea behind dataOnly is that it should only write the file content
+** to disk, not the inode. We only set dataOnly if the file size is
+** unchanged since the file size is part of the inode. However,
+** Ted Ts'o tells us that fdatasync() will also write the inode if the
+** file size has changed. The only real difference between fdatasync()
+** and fsync(), Ted tells us, is that fdatasync() will not flush the
+** inode if the mtime or owner or other inode attributes have changed.
+** We only care about the file size, not the other file attributes, so
+** as far as SQLite is concerned, an fdatasync() is always adequate.
+** So, we always use fdatasync() if it is available, regardless of
+** the value of the dataOnly flag.
+*/
+static int full_fsync(int fd, int fullSync, int dataOnly){
+ int rc;
+
+ /* The following "ifdef/elif/else/" block has the same structure as
+ ** the one below. It is replicated here solely to avoid cluttering
+ ** up the real code with the UNUSED_PARAMETER() macros.
+ */
+#ifdef SQLITE_NO_SYNC
+ UNUSED_PARAMETER(fd);
+ UNUSED_PARAMETER(fullSync);
+ UNUSED_PARAMETER(dataOnly);
+#elif HAVE_FULLFSYNC
+ UNUSED_PARAMETER(dataOnly);
+#else
+ UNUSED_PARAMETER(fullSync);
+ UNUSED_PARAMETER(dataOnly);
+#endif
+
+ /* Record the number of times that we do a normal fsync() and
+ ** FULLSYNC. This is used during testing to verify that this procedure
+ ** gets called with the correct arguments.
+ */
+#ifdef SQLITE_TEST
+ if( fullSync ) sqlite3_fullsync_count++;
+ sqlite3_sync_count++;
+#endif
+
+ /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a
+ ** no-op. But go ahead and call fstat() to validate the file
+ ** descriptor as we need a method to provoke a failure during
+ ** coverate testing.
+ */
+#ifdef SQLITE_NO_SYNC
+ {
+ struct stat buf;
+ rc = osFstat(fd, &buf);
+ }
+#elif HAVE_FULLFSYNC
+ if( fullSync ){
+ rc = osFcntl(fd, F_FULLFSYNC, 0);
+ }else{
+ rc = 1;
+ }
+ /* If the FULLFSYNC failed, fall back to attempting an fsync().
+ ** It shouldn't be possible for fullfsync to fail on the local
+ ** file system (on OSX), so failure indicates that FULLFSYNC
+ ** isn't supported for this file system. So, attempt an fsync
+ ** and (for now) ignore the overhead of a superfluous fcntl call.
+ ** It'd be better to detect fullfsync support once and avoid
+ ** the fcntl call every time sync is called.
+ */
+ if( rc ) rc = fsync(fd);
+
+#elif defined(__APPLE__)
+ /* fdatasync() on HFS+ doesn't yet flush the file size if it changed correctly
+ ** so currently we default to the macro that redefines fdatasync to fsync
+ */
+ rc = fsync(fd);
+#else
+ rc = fdatasync(fd);
+#if OS_VXWORKS
+ if( rc==-1 && errno==ENOTSUP ){
+ rc = fsync(fd);
+ }
+#endif /* OS_VXWORKS */
+#endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */
+
+ if( OS_VXWORKS && rc!= -1 ){
+ rc = 0;
+ }
+ return rc;
+}
+
+/*
+** Open a file descriptor to the directory containing file zFilename.
+** If successful, *pFd is set to the opened file descriptor and
+** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM
+** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined
+** value.
+**
+** The directory file descriptor is used for only one thing - to
+** fsync() a directory to make sure file creation and deletion events
+** are flushed to disk. Such fsyncs are not needed on newer
+** journaling filesystems, but are required on older filesystems.
+**
+** This routine can be overridden using the xSetSysCall interface.
+** The ability to override this routine was added in support of the
+** chromium sandbox. Opening a directory is a security risk (we are
+** told) so making it overrideable allows the chromium sandbox to
+** replace this routine with a harmless no-op. To make this routine
+** a no-op, replace it with a stub that returns SQLITE_OK but leaves
+** *pFd set to a negative number.
+**
+** If SQLITE_OK is returned, the caller is responsible for closing
+** the file descriptor *pFd using close().
+*/
+static int openDirectory(const char *zFilename, int *pFd){
+ int ii;
+ int fd = -1;
+ char zDirname[MAX_PATHNAME+1];
+
+ sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename);
+ for(ii=(int)strlen(zDirname); ii>0 && zDirname[ii]!='/'; ii--);
+ if( ii>0 ){
+ zDirname[ii] = '\0';
+ }else{
+ if( zDirname[0]!='/' ) zDirname[0] = '.';
+ zDirname[1] = 0;
+ }
+ fd = robust_open(zDirname, O_RDONLY|O_BINARY, 0);
+ if( fd>=0 ){
+ OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname));
+ }
+ *pFd = fd;
+ if( fd>=0 ) return SQLITE_OK;
+ return unixLogError(SQLITE_CANTOPEN_BKPT, "openDirectory", zDirname);
+}
+
+/*
+** Make sure all writes to a particular file are committed to disk.
+**
+** If dataOnly==0 then both the file itself and its metadata (file
+** size, access time, etc) are synced. If dataOnly!=0 then only the
+** file data is synced.
+**
+** Under Unix, also make sure that the directory entry for the file
+** has been created by fsync-ing the directory that contains the file.
+** If we do not do this and we encounter a power failure, the directory
+** entry for the journal might not exist after we reboot. The next
+** SQLite to access the file will not know that the journal exists (because
+** the directory entry for the journal was never created) and the transaction
+** will not roll back - possibly leading to database corruption.
+*/
+static int unixSync(sqlite3_file *id, int flags){
+ int rc;
+ unixFile *pFile = (unixFile*)id;
+
+ int isDataOnly = (flags&SQLITE_SYNC_DATAONLY);
+ int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL;
+
+ /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */
+ assert((flags&0x0F)==SQLITE_SYNC_NORMAL
+ || (flags&0x0F)==SQLITE_SYNC_FULL
+ );
+
+ /* Unix cannot, but some systems may return SQLITE_FULL from here. This
+ ** line is to test that doing so does not cause any problems.
+ */
+ SimulateDiskfullError( return SQLITE_FULL );
+
+ assert( pFile );
+ OSTRACE(("SYNC %-3d\n", pFile->h));
+ rc = full_fsync(pFile->h, isFullsync, isDataOnly);
+ SimulateIOError( rc=1 );
+ if( rc ){
+ storeLastErrno(pFile, errno);
+ return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath);
+ }
+
+ /* Also fsync the directory containing the file if the DIRSYNC flag
+ ** is set. This is a one-time occurrence. Many systems (examples: AIX)
+ ** are unable to fsync a directory, so ignore errors on the fsync.
+ */
+ if( pFile->ctrlFlags & UNIXFILE_DIRSYNC ){
+ int dirfd;
+ OSTRACE(("DIRSYNC %s (have_fullfsync=%d fullsync=%d)\n", pFile->zPath,
+ HAVE_FULLFSYNC, isFullsync));
+ rc = osOpenDirectory(pFile->zPath, &dirfd);
+ if( rc==SQLITE_OK ){
+ full_fsync(dirfd, 0, 0);
+ robust_close(pFile, dirfd, __LINE__);
+ }else{
+ assert( rc==SQLITE_CANTOPEN );
+ rc = SQLITE_OK;
+ }
+ pFile->ctrlFlags &= ~UNIXFILE_DIRSYNC;
+ }
+ return rc;
+}
+
+/*
+** Truncate an open file to a specified size
+*/
+static int unixTruncate(sqlite3_file *id, i64 nByte){
+ unixFile *pFile = (unixFile *)id;
+ int rc;
+ assert( pFile );
+ SimulateIOError( return SQLITE_IOERR_TRUNCATE );
+
+ /* If the user has configured a chunk-size for this file, truncate the
+ ** file so that it consists of an integer number of chunks (i.e. the
+ ** actual file size after the operation may be larger than the requested
+ ** size).
+ */
+ if( pFile->szChunk>0 ){
+ nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk;
+ }
+
+ rc = robust_ftruncate(pFile->h, nByte);
+ if( rc ){
+ storeLastErrno(pFile, errno);
+ return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
+ }else{
+#ifdef SQLITE_DEBUG
+ /* If we are doing a normal write to a database file (as opposed to
+ ** doing a hot-journal rollback or a write to some file other than a
+ ** normal database file) and we truncate the file to zero length,
+ ** that effectively updates the change counter. This might happen
+ ** when restoring a database using the backup API from a zero-length
+ ** source.
+ */
+ if( pFile->inNormalWrite && nByte==0 ){
+ pFile->transCntrChng = 1;
+ }
+#endif
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* If the file was just truncated to a size smaller than the currently
+ ** mapped region, reduce the effective mapping size as well. SQLite will
+ ** use read() and write() to access data beyond this point from now on.
+ */
+ if( nByte<pFile->mmapSize ){
+ pFile->mmapSize = nByte;
+ }
+#endif
+
+ return SQLITE_OK;
+ }
+}
+
+/*
+** Determine the current size of a file in bytes
+*/
+static int unixFileSize(sqlite3_file *id, i64 *pSize){
+ int rc;
+ struct stat buf;
+ assert( id );
+ rc = osFstat(((unixFile*)id)->h, &buf);
+ SimulateIOError( rc=1 );
+ if( rc!=0 ){
+ storeLastErrno((unixFile*)id, errno);
+ return SQLITE_IOERR_FSTAT;
+ }
+ *pSize = buf.st_size;
+
+ /* When opening a zero-size database, the findInodeInfo() procedure
+ ** writes a single byte into that file in order to work around a bug
+ ** in the OS-X msdos filesystem. In order to avoid problems with upper
+ ** layers, we need to report this file size as zero even though it is
+ ** really 1. Ticket #3260.
+ */
+ if( *pSize==1 ) *pSize = 0;
+
+
+ return SQLITE_OK;
+}
+
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+/*
+** Handler for proxy-locking file-control verbs. Defined below in the
+** proxying locking division.
+*/
+static int proxyFileControl(sqlite3_file*,int,void*);
+#endif
+
+/*
+** This function is called to handle the SQLITE_FCNTL_SIZE_HINT
+** file-control operation. Enlarge the database to nBytes in size
+** (rounded up to the next chunk-size). If the database is already
+** nBytes or larger, this routine is a no-op.
+*/
+static int fcntlSizeHint(unixFile *pFile, i64 nByte){
+ if( pFile->szChunk>0 ){
+ i64 nSize; /* Required file size */
+ struct stat buf; /* Used to hold return values of fstat() */
+
+ if( osFstat(pFile->h, &buf) ){
+ return SQLITE_IOERR_FSTAT;
+ }
+
+ nSize = ((nByte+pFile->szChunk-1) / pFile->szChunk) * pFile->szChunk;
+ if( nSize>(i64)buf.st_size ){
+
+#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE
+ /* The code below is handling the return value of osFallocate()
+ ** correctly. posix_fallocate() is defined to "returns zero on success,
+ ** or an error number on failure". See the manpage for details. */
+ int err;
+ do{
+ err = osFallocate(pFile->h, buf.st_size, nSize-buf.st_size);
+ }while( err==EINTR );
+ if( err && err!=EINVAL ) return SQLITE_IOERR_WRITE;
+#else
+ /* If the OS does not have posix_fallocate(), fake it. Write a
+ ** single byte to the last byte in each block that falls entirely
+ ** within the extended region. Then, if required, a single byte
+ ** at offset (nSize-1), to set the size of the file correctly.
+ ** This is a similar technique to that used by glibc on systems
+ ** that do not have a real fallocate() call.
+ */
+ int nBlk = buf.st_blksize; /* File-system block size */
+ int nWrite = 0; /* Number of bytes written by seekAndWrite */
+ i64 iWrite; /* Next offset to write to */
+
+ iWrite = (buf.st_size/nBlk)*nBlk + nBlk - 1;
+ assert( iWrite>=buf.st_size );
+ assert( ((iWrite+1)%nBlk)==0 );
+ for(/*no-op*/; iWrite<nSize+nBlk-1; iWrite+=nBlk ){
+ if( iWrite>=nSize ) iWrite = nSize - 1;
+ nWrite = seekAndWrite(pFile, iWrite, "", 1);
+ if( nWrite!=1 ) return SQLITE_IOERR_WRITE;
+ }
+#endif
+ }
+ }
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFile->mmapSizeMax>0 && nByte>pFile->mmapSize ){
+ int rc;
+ if( pFile->szChunk<=0 ){
+ if( robust_ftruncate(pFile->h, nByte) ){
+ storeLastErrno(pFile, errno);
+ return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
+ }
+ }
+
+ rc = unixMapfile(pFile, nByte);
+ return rc;
+ }
+#endif
+
+ return SQLITE_OK;
+}
+
+/*
+** If *pArg is initially negative then this is a query. Set *pArg to
+** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set.
+**
+** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags.
+*/
+static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){
+ if( *pArg<0 ){
+ *pArg = (pFile->ctrlFlags & mask)!=0;
+ }else if( (*pArg)==0 ){
+ pFile->ctrlFlags &= ~mask;
+ }else{
+ pFile->ctrlFlags |= mask;
+ }
+}
+
+/* Forward declaration */
+static int unixGetTempname(int nBuf, char *zBuf);
+
+/*
+** Information and control of an open file handle.
+*/
+static int unixFileControl(sqlite3_file *id, int op, void *pArg){
+ unixFile *pFile = (unixFile*)id;
+ switch( op ){
+#if defined(__linux__) && defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+ case SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: {
+ int rc = osIoctl(pFile->h, F2FS_IOC_START_ATOMIC_WRITE);
+ return rc ? SQLITE_IOERR_BEGIN_ATOMIC : SQLITE_OK;
+ }
+ case SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: {
+ int rc = osIoctl(pFile->h, F2FS_IOC_COMMIT_ATOMIC_WRITE);
+ return rc ? SQLITE_IOERR_COMMIT_ATOMIC : SQLITE_OK;
+ }
+ case SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: {
+ int rc = osIoctl(pFile->h, F2FS_IOC_ABORT_VOLATILE_WRITE);
+ return rc ? SQLITE_IOERR_ROLLBACK_ATOMIC : SQLITE_OK;
+ }
+#endif /* __linux__ && SQLITE_ENABLE_BATCH_ATOMIC_WRITE */
+
+ case SQLITE_FCNTL_LOCKSTATE: {
+ *(int*)pArg = pFile->eFileLock;
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_LAST_ERRNO: {
+ *(int*)pArg = pFile->lastErrno;
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_CHUNK_SIZE: {
+ pFile->szChunk = *(int *)pArg;
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_SIZE_HINT: {
+ int rc;
+ SimulateIOErrorBenign(1);
+ rc = fcntlSizeHint(pFile, *(i64 *)pArg);
+ SimulateIOErrorBenign(0);
+ return rc;
+ }
+ case SQLITE_FCNTL_PERSIST_WAL: {
+ unixModeBit(pFile, UNIXFILE_PERSIST_WAL, (int*)pArg);
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_POWERSAFE_OVERWRITE: {
+ unixModeBit(pFile, UNIXFILE_PSOW, (int*)pArg);
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_VFSNAME: {
+ *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName);
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_TEMPFILENAME: {
+ char *zTFile = sqlite3_malloc64( pFile->pVfs->mxPathname );
+ if( zTFile ){
+ unixGetTempname(pFile->pVfs->mxPathname, zTFile);
+ *(char**)pArg = zTFile;
+ }
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_HAS_MOVED: {
+ *(int*)pArg = fileHasMoved(pFile);
+ return SQLITE_OK;
+ }
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ case SQLITE_FCNTL_LOCK_TIMEOUT: {
+ int iOld = pFile->iBusyTimeout;
+ pFile->iBusyTimeout = *(int*)pArg;
+ *(int*)pArg = iOld;
+ return SQLITE_OK;
+ }
+#endif
+#if SQLITE_MAX_MMAP_SIZE>0
+ case SQLITE_FCNTL_MMAP_SIZE: {
+ i64 newLimit = *(i64*)pArg;
+ int rc = SQLITE_OK;
+ if( newLimit>sqlite3GlobalConfig.mxMmap ){
+ newLimit = sqlite3GlobalConfig.mxMmap;
+ }
+
+ /* The value of newLimit may be eventually cast to (size_t) and passed
+ ** to mmap(). Restrict its value to 2GB if (size_t) is not at least a
+ ** 64-bit type. */
+ if( newLimit>0 && sizeof(size_t)<8 ){
+ newLimit = (newLimit & 0x7FFFFFFF);
+ }
+
+ *(i64*)pArg = pFile->mmapSizeMax;
+ if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){
+ pFile->mmapSizeMax = newLimit;
+ if( pFile->mmapSize>0 ){
+ unixUnmapfile(pFile);
+ rc = unixMapfile(pFile, -1);
+ }
+ }
+ return rc;
+ }
+#endif
+#ifdef SQLITE_DEBUG
+ /* The pager calls this method to signal that it has done
+ ** a rollback and that the database is therefore unchanged and
+ ** it hence it is OK for the transaction change counter to be
+ ** unchanged.
+ */
+ case SQLITE_FCNTL_DB_UNCHANGED: {
+ ((unixFile*)id)->dbUpdate = 0;
+ return SQLITE_OK;
+ }
+#endif
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+ case SQLITE_FCNTL_SET_LOCKPROXYFILE:
+ case SQLITE_FCNTL_GET_LOCKPROXYFILE: {
+ return proxyFileControl(id,op,pArg);
+ }
+#endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */
+ }
+ return SQLITE_NOTFOUND;
+}
+
+/*
+** If pFd->sectorSize is non-zero when this function is called, it is a
+** no-op. Otherwise, the values of pFd->sectorSize and
+** pFd->deviceCharacteristics are set according to the file-system
+** characteristics.
+**
+** There are two versions of this function. One for QNX and one for all
+** other systems.
+*/
+#ifndef __QNXNTO__
+static void setDeviceCharacteristics(unixFile *pFd){
+ assert( pFd->deviceCharacteristics==0 || pFd->sectorSize!=0 );
+ if( pFd->sectorSize==0 ){
+#if defined(__linux__) && defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+ int res;
+ u32 f = 0;
+
+ /* Check for support for F2FS atomic batch writes. */
+ res = osIoctl(pFd->h, F2FS_IOC_GET_FEATURES, &f);
+ if( res==0 && (f & F2FS_FEATURE_ATOMIC_WRITE) ){
+ pFd->deviceCharacteristics = SQLITE_IOCAP_BATCH_ATOMIC;
+ }
+#endif /* __linux__ && SQLITE_ENABLE_BATCH_ATOMIC_WRITE */
+
+ /* Set the POWERSAFE_OVERWRITE flag if requested. */
+ if( pFd->ctrlFlags & UNIXFILE_PSOW ){
+ pFd->deviceCharacteristics |= SQLITE_IOCAP_POWERSAFE_OVERWRITE;
+ }
+
+ pFd->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE;
+ }
+}
+#else
+#include <sys/dcmd_blk.h>
+#include <sys/statvfs.h>
+static void setDeviceCharacteristics(unixFile *pFile){
+ if( pFile->sectorSize == 0 ){
+ struct statvfs fsInfo;
+
+ /* Set defaults for non-supported filesystems */
+ pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE;
+ pFile->deviceCharacteristics = 0;
+ if( fstatvfs(pFile->h, &fsInfo) == -1 ) {
+ return;
+ }
+
+ if( !strcmp(fsInfo.f_basetype, "tmp") ) {
+ pFile->sectorSize = fsInfo.f_bsize;
+ pFile->deviceCharacteristics =
+ SQLITE_IOCAP_ATOMIC4K | /* All ram filesystem writes are atomic */
+ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until
+ ** the write succeeds */
+ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind
+ ** so it is ordered */
+ 0;
+ }else if( strstr(fsInfo.f_basetype, "etfs") ){
+ pFile->sectorSize = fsInfo.f_bsize;
+ pFile->deviceCharacteristics =
+ /* etfs cluster size writes are atomic */
+ (pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) |
+ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until
+ ** the write succeeds */
+ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind
+ ** so it is ordered */
+ 0;
+ }else if( !strcmp(fsInfo.f_basetype, "qnx6") ){
+ pFile->sectorSize = fsInfo.f_bsize;
+ pFile->deviceCharacteristics =
+ SQLITE_IOCAP_ATOMIC | /* All filesystem writes are atomic */
+ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until
+ ** the write succeeds */
+ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind
+ ** so it is ordered */
+ 0;
+ }else if( !strcmp(fsInfo.f_basetype, "qnx4") ){
+ pFile->sectorSize = fsInfo.f_bsize;
+ pFile->deviceCharacteristics =
+ /* full bitset of atomics from max sector size and smaller */
+ ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 |
+ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind
+ ** so it is ordered */
+ 0;
+ }else if( strstr(fsInfo.f_basetype, "dos") ){
+ pFile->sectorSize = fsInfo.f_bsize;
+ pFile->deviceCharacteristics =
+ /* full bitset of atomics from max sector size and smaller */
+ ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 |
+ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind
+ ** so it is ordered */
+ 0;
+ }else{
+ pFile->deviceCharacteristics =
+ SQLITE_IOCAP_ATOMIC512 | /* blocks are atomic */
+ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until
+ ** the write succeeds */
+ 0;
+ }
+ }
+ /* Last chance verification. If the sector size isn't a multiple of 512
+ ** then it isn't valid.*/
+ if( pFile->sectorSize % 512 != 0 ){
+ pFile->deviceCharacteristics = 0;
+ pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE;
+ }
+}
+#endif
+
+/*
+** Return the sector size in bytes of the underlying block device for
+** the specified file. This is almost always 512 bytes, but may be
+** larger for some devices.
+**
+** SQLite code assumes this function cannot fail. It also assumes that
+** if two files are created in the same file-system directory (i.e.
+** a database and its journal file) that the sector size will be the
+** same for both.
+*/
+static int unixSectorSize(sqlite3_file *id){
+ unixFile *pFd = (unixFile*)id;
+ setDeviceCharacteristics(pFd);
+ return pFd->sectorSize;
+}
+
+/*
+** Return the device characteristics for the file.
+**
+** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default.
+** However, that choice is controversial since technically the underlying
+** file system does not always provide powersafe overwrites. (In other
+** words, after a power-loss event, parts of the file that were never
+** written might end up being altered.) However, non-PSOW behavior is very,
+** very rare. And asserting PSOW makes a large reduction in the amount
+** of required I/O for journaling, since a lot of padding is eliminated.
+** Hence, while POWERSAFE_OVERWRITE is on by default, there is a file-control
+** available to turn it off and URI query parameter available to turn it off.
+*/
+static int unixDeviceCharacteristics(sqlite3_file *id){
+ unixFile *pFd = (unixFile*)id;
+ setDeviceCharacteristics(pFd);
+ return pFd->deviceCharacteristics;
+}
+
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+
+/*
+** Return the system page size.
+**
+** This function should not be called directly by other code in this file.
+** Instead, it should be called via macro osGetpagesize().
+*/
+static int unixGetpagesize(void){
+#if OS_VXWORKS
+ return 1024;
+#elif defined(_BSD_SOURCE)
+ return getpagesize();
+#else
+ return (int)sysconf(_SC_PAGESIZE);
+#endif
+}
+
+#endif /* !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 */
+
+#ifndef SQLITE_OMIT_WAL
+
+/*
+** Object used to represent an shared memory buffer.
+**
+** When multiple threads all reference the same wal-index, each thread
+** has its own unixShm object, but they all point to a single instance
+** of this unixShmNode object. In other words, each wal-index is opened
+** only once per process.
+**
+** Each unixShmNode object is connected to a single unixInodeInfo object.
+** We could coalesce this object into unixInodeInfo, but that would mean
+** every open file that does not use shared memory (in other words, most
+** open files) would have to carry around this extra information. So
+** the unixInodeInfo object contains a pointer to this unixShmNode object
+** and the unixShmNode object is created only when needed.
+**
+** unixMutexHeld() must be true when creating or destroying
+** this object or while reading or writing the following fields:
+**
+** nRef
+**
+** The following fields are read-only after the object is created:
+**
+** hShm
+** zFilename
+**
+** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and
+** unixMutexHeld() is true when reading or writing any other field
+** in this structure.
+*/
+struct unixShmNode {
+ unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */
+ sqlite3_mutex *pShmMutex; /* Mutex to access this object */
+ char *zFilename; /* Name of the mmapped file */
+ int hShm; /* Open file descriptor */
+ int szRegion; /* Size of shared-memory regions */
+ u16 nRegion; /* Size of array apRegion */
+ u8 isReadonly; /* True if read-only */
+ u8 isUnlocked; /* True if no DMS lock held */
+ char **apRegion; /* Array of mapped shared-memory regions */
+ int nRef; /* Number of unixShm objects pointing to this */
+ unixShm *pFirst; /* All unixShm objects pointing to this */
+#ifdef SQLITE_DEBUG
+ u8 exclMask; /* Mask of exclusive locks held */
+ u8 sharedMask; /* Mask of shared locks held */
+ u8 nextShmId; /* Next available unixShm.id value */
+#endif
+};
+
+/*
+** Structure used internally by this VFS to record the state of an
+** open shared memory connection.
+**
+** The following fields are initialized when this object is created and
+** are read-only thereafter:
+**
+** unixShm.pShmNode
+** unixShm.id
+**
+** All other fields are read/write. The unixShm.pShmNode->pShmMutex must
+** be held while accessing any read/write fields.
+*/
+struct unixShm {
+ unixShmNode *pShmNode; /* The underlying unixShmNode object */
+ unixShm *pNext; /* Next unixShm with the same unixShmNode */
+ u8 hasMutex; /* True if holding the unixShmNode->pShmMutex */
+ u8 id; /* Id of this connection within its unixShmNode */
+ u16 sharedMask; /* Mask of shared locks held */
+ u16 exclMask; /* Mask of exclusive locks held */
+};
+
+/*
+** Constants used for locking
+*/
+#define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */
+#define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */
+
+/*
+** Apply posix advisory locks for all bytes from ofst through ofst+n-1.
+**
+** Locks block if the mask is exactly UNIX_SHM_C and are non-blocking
+** otherwise.
+*/
+static int unixShmSystemLock(
+ unixFile *pFile, /* Open connection to the WAL file */
+ int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */
+ int ofst, /* First byte of the locking range */
+ int n /* Number of bytes to lock */
+){
+ unixShmNode *pShmNode; /* Apply locks to this open shared-memory segment */
+ struct flock f; /* The posix advisory locking structure */
+ int rc = SQLITE_OK; /* Result code form fcntl() */
+
+ /* Access to the unixShmNode object is serialized by the caller */
+ pShmNode = pFile->pInode->pShmNode;
+ assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
+ assert( pShmNode->nRef>0 || unixMutexHeld() );
+
+ /* Shared locks never span more than one byte */
+ assert( n==1 || lockType!=F_RDLCK );
+
+ /* Locks are within range */
+ assert( n>=1 && n<=SQLITE_SHM_NLOCK );
+
+ if( pShmNode->hShm>=0 ){
+ int res;
+ /* Initialize the locking parameters */
+ f.l_type = lockType;
+ f.l_whence = SEEK_SET;
+ f.l_start = ofst;
+ f.l_len = n;
+ res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile);
+ if( res==-1 ){
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY);
+#else
+ rc = SQLITE_BUSY;
+#endif
+ }
+ }
+
+ /* Update the global lock state and do debug tracing */
+#ifdef SQLITE_DEBUG
+ { u16 mask;
+ OSTRACE(("SHM-LOCK "));
+ mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<<ofst);
+ if( rc==SQLITE_OK ){
+ if( lockType==F_UNLCK ){
+ OSTRACE(("unlock %d ok", ofst));
+ pShmNode->exclMask &= ~mask;
+ pShmNode->sharedMask &= ~mask;
+ }else if( lockType==F_RDLCK ){
+ OSTRACE(("read-lock %d ok", ofst));
+ pShmNode->exclMask &= ~mask;
+ pShmNode->sharedMask |= mask;
+ }else{
+ assert( lockType==F_WRLCK );
+ OSTRACE(("write-lock %d ok", ofst));
+ pShmNode->exclMask |= mask;
+ pShmNode->sharedMask &= ~mask;
+ }
+ }else{
+ if( lockType==F_UNLCK ){
+ OSTRACE(("unlock %d failed", ofst));
+ }else if( lockType==F_RDLCK ){
+ OSTRACE(("read-lock failed"));
+ }else{
+ assert( lockType==F_WRLCK );
+ OSTRACE(("write-lock %d failed", ofst));
+ }
+ }
+ OSTRACE((" - afterwards %03x,%03x\n",
+ pShmNode->sharedMask, pShmNode->exclMask));
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** Return the minimum number of 32KB shm regions that should be mapped at
+** a time, assuming that each mapping must be an integer multiple of the
+** current system page-size.
+**
+** Usually, this is 1. The exception seems to be systems that are configured
+** to use 64KB pages - in this case each mapping must cover at least two
+** shm regions.
+*/
+static int unixShmRegionPerMap(void){
+ int shmsz = 32*1024; /* SHM region size */
+ int pgsz = osGetpagesize(); /* System page size */
+ assert( ((pgsz-1)&pgsz)==0 ); /* Page size must be a power of 2 */
+ if( pgsz<shmsz ) return 1;
+ return pgsz/shmsz;
+}
+
+/*
+** Purge the unixShmNodeList list of all entries with unixShmNode.nRef==0.
+**
+** This is not a VFS shared-memory method; it is a utility function called
+** by VFS shared-memory methods.
+*/
+static void unixShmPurge(unixFile *pFd){
+ unixShmNode *p = pFd->pInode->pShmNode;
+ assert( unixMutexHeld() );
+ if( p && ALWAYS(p->nRef==0) ){
+ int nShmPerMap = unixShmRegionPerMap();
+ int i;
+ assert( p->pInode==pFd->pInode );
+ sqlite3_mutex_free(p->pShmMutex);
+ for(i=0; i<p->nRegion; i+=nShmPerMap){
+ if( p->hShm>=0 ){
+ osMunmap(p->apRegion[i], p->szRegion);
+ }else{
+ sqlite3_free(p->apRegion[i]);
+ }
+ }
+ sqlite3_free(p->apRegion);
+ if( p->hShm>=0 ){
+ robust_close(pFd, p->hShm, __LINE__);
+ p->hShm = -1;
+ }
+ p->pInode->pShmNode = 0;
+ sqlite3_free(p);
+ }
+}
+
+/*
+** The DMS lock has not yet been taken on shm file pShmNode. Attempt to
+** take it now. Return SQLITE_OK if successful, or an SQLite error
+** code otherwise.
+**
+** If the DMS cannot be locked because this is a readonly_shm=1
+** connection and no other process already holds a lock, return
+** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1.
+*/
+static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){
+ struct flock lock;
+ int rc = SQLITE_OK;
+
+ /* Use F_GETLK to determine the locks other processes are holding
+ ** on the DMS byte. If it indicates that another process is holding
+ ** a SHARED lock, then this process may also take a SHARED lock
+ ** and proceed with opening the *-shm file.
+ **
+ ** Or, if no other process is holding any lock, then this process
+ ** is the first to open it. In this case take an EXCLUSIVE lock on the
+ ** DMS byte and truncate the *-shm file to zero bytes in size. Then
+ ** downgrade to a SHARED lock on the DMS byte.
+ **
+ ** If another process is holding an EXCLUSIVE lock on the DMS byte,
+ ** return SQLITE_BUSY to the caller (it will try again). An earlier
+ ** version of this code attempted the SHARED lock at this point. But
+ ** this introduced a subtle race condition: if the process holding
+ ** EXCLUSIVE failed just before truncating the *-shm file, then this
+ ** process might open and use the *-shm file without truncating it.
+ ** And if the *-shm file has been corrupted by a power failure or
+ ** system crash, the database itself may also become corrupt. */
+ lock.l_whence = SEEK_SET;
+ lock.l_start = UNIX_SHM_DMS;
+ lock.l_len = 1;
+ lock.l_type = F_WRLCK;
+ if( osFcntl(pShmNode->hShm, F_GETLK, &lock)!=0 ) {
+ rc = SQLITE_IOERR_LOCK;
+ }else if( lock.l_type==F_UNLCK ){
+ if( pShmNode->isReadonly ){
+ pShmNode->isUnlocked = 1;
+ rc = SQLITE_READONLY_CANTINIT;
+ }else{
+ rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1);
+ /* The first connection to attach must truncate the -shm file. We
+ ** truncate to 3 bytes (an arbitrary small number, less than the
+ ** -shm header size) rather than 0 as a system debugging aid, to
+ ** help detect if a -shm file truncation is legitimate or is the work
+ ** or a rogue process. */
+ if( rc==SQLITE_OK && robust_ftruncate(pShmNode->hShm, 3) ){
+ rc = unixLogError(SQLITE_IOERR_SHMOPEN,"ftruncate",pShmNode->zFilename);
+ }
+ }
+ }else if( lock.l_type==F_WRLCK ){
+ rc = SQLITE_BUSY;
+ }
+
+ if( rc==SQLITE_OK ){
+ assert( lock.l_type==F_UNLCK || lock.l_type==F_RDLCK );
+ rc = unixShmSystemLock(pDbFd, F_RDLCK, UNIX_SHM_DMS, 1);
+ }
+ return rc;
+}
+
+/*
+** Open a shared-memory area associated with open database file pDbFd.
+** This particular implementation uses mmapped files.
+**
+** The file used to implement shared-memory is in the same directory
+** as the open database file and has the same name as the open database
+** file with the "-shm" suffix added. For example, if the database file
+** is "/home/user1/config.db" then the file that is created and mmapped
+** for shared memory will be called "/home/user1/config.db-shm".
+**
+** Another approach to is to use files in /dev/shm or /dev/tmp or an
+** some other tmpfs mount. But if a file in a different directory
+** from the database file is used, then differing access permissions
+** or a chroot() might cause two different processes on the same
+** database to end up using different files for shared memory -
+** meaning that their memory would not really be shared - resulting
+** in database corruption. Nevertheless, this tmpfs file usage
+** can be enabled at compile-time using -DSQLITE_SHM_DIRECTORY="/dev/shm"
+** or the equivalent. The use of the SQLITE_SHM_DIRECTORY compile-time
+** option results in an incompatible build of SQLite; builds of SQLite
+** that with differing SQLITE_SHM_DIRECTORY settings attempt to use the
+** same database file at the same time, database corruption will likely
+** result. The SQLITE_SHM_DIRECTORY compile-time option is considered
+** "unsupported" and may go away in a future SQLite release.
+**
+** When opening a new shared-memory file, if no other instances of that
+** file are currently open, in this process or in other processes, then
+** the file must be truncated to zero length or have its header cleared.
+**
+** If the original database file (pDbFd) is using the "unix-excl" VFS
+** that means that an exclusive lock is held on the database file and
+** that no other processes are able to read or write the database. In
+** that case, we do not really need shared memory. No shared memory
+** file is created. The shared memory will be simulated with heap memory.
+*/
+static int unixOpenSharedMemory(unixFile *pDbFd){
+ struct unixShm *p = 0; /* The connection to be opened */
+ struct unixShmNode *pShmNode; /* The underlying mmapped file */
+ int rc = SQLITE_OK; /* Result code */
+ unixInodeInfo *pInode; /* The inode of fd */
+ char *zShm; /* Name of the file used for SHM */
+ int nShmFilename; /* Size of the SHM filename in bytes */
+
+ /* Allocate space for the new unixShm object. */
+ p = sqlite3_malloc64( sizeof(*p) );
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ memset(p, 0, sizeof(*p));
+ assert( pDbFd->pShm==0 );
+
+ /* Check to see if a unixShmNode object already exists. Reuse an existing
+ ** one if present. Create a new one if necessary.
+ */
+ assert( unixFileMutexNotheld(pDbFd) );
+ unixEnterMutex();
+ pInode = pDbFd->pInode;
+ pShmNode = pInode->pShmNode;
+ if( pShmNode==0 ){
+ struct stat sStat; /* fstat() info for database file */
+#ifndef SQLITE_SHM_DIRECTORY
+ const char *zBasePath = pDbFd->zPath;
+#endif
+
+ /* Call fstat() to figure out the permissions on the database file. If
+ ** a new *-shm file is created, an attempt will be made to create it
+ ** with the same permissions.
+ */
+ if( osFstat(pDbFd->h, &sStat) ){
+ rc = SQLITE_IOERR_FSTAT;
+ goto shm_open_err;
+ }
+
+#ifdef SQLITE_SHM_DIRECTORY
+ nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 31;
+#else
+ nShmFilename = 6 + (int)strlen(zBasePath);
+#endif
+ pShmNode = sqlite3_malloc64( sizeof(*pShmNode) + nShmFilename );
+ if( pShmNode==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ memset(pShmNode, 0, sizeof(*pShmNode)+nShmFilename);
+ zShm = pShmNode->zFilename = (char*)&pShmNode[1];
+#ifdef SQLITE_SHM_DIRECTORY
+ sqlite3_snprintf(nShmFilename, zShm,
+ SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x",
+ (u32)sStat.st_ino, (u32)sStat.st_dev);
+#else
+ sqlite3_snprintf(nShmFilename, zShm, "%s-shm", zBasePath);
+ sqlite3FileSuffix3(pDbFd->zPath, zShm);
+#endif
+ pShmNode->hShm = -1;
+ pDbFd->pInode->pShmNode = pShmNode;
+ pShmNode->pInode = pDbFd->pInode;
+ if( sqlite3GlobalConfig.bCoreMutex ){
+ pShmNode->pShmMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pShmNode->pShmMutex==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ }
+
+ if( pInode->bProcessLock==0 ){
+ if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){
+ pShmNode->hShm = robust_open(zShm, O_RDWR|O_CREAT|O_NOFOLLOW,
+ (sStat.st_mode&0777));
+ }
+ if( pShmNode->hShm<0 ){
+ pShmNode->hShm = robust_open(zShm, O_RDONLY|O_NOFOLLOW,
+ (sStat.st_mode&0777));
+ if( pShmNode->hShm<0 ){
+ rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zShm);
+ goto shm_open_err;
+ }
+ pShmNode->isReadonly = 1;
+ }
+
+ /* If this process is running as root, make sure that the SHM file
+ ** is owned by the same user that owns the original database. Otherwise,
+ ** the original owner will not be able to connect.
+ */
+ robustFchown(pShmNode->hShm, sStat.st_uid, sStat.st_gid);
+
+ rc = unixLockSharedMemory(pDbFd, pShmNode);
+ if( rc!=SQLITE_OK && rc!=SQLITE_READONLY_CANTINIT ) goto shm_open_err;
+ }
+ }
+
+ /* Make the new connection a child of the unixShmNode */
+ p->pShmNode = pShmNode;
+#ifdef SQLITE_DEBUG
+ p->id = pShmNode->nextShmId++;
+#endif
+ pShmNode->nRef++;
+ pDbFd->pShm = p;
+ unixLeaveMutex();
+
+ /* The reference count on pShmNode has already been incremented under
+ ** the cover of the unixEnterMutex() mutex and the pointer from the
+ ** new (struct unixShm) object to the pShmNode has been set. All that is
+ ** left to do is to link the new object into the linked list starting
+ ** at pShmNode->pFirst. This must be done while holding the
+ ** pShmNode->pShmMutex.
+ */
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+ p->pNext = pShmNode->pFirst;
+ pShmNode->pFirst = p;
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+ return rc;
+
+ /* Jump here on any error */
+shm_open_err:
+ unixShmPurge(pDbFd); /* This call frees pShmNode if required */
+ sqlite3_free(p);
+ unixLeaveMutex();
+ return rc;
+}
+
+/*
+** This function is called to obtain a pointer to region iRegion of the
+** shared-memory associated with the database file fd. Shared-memory regions
+** are numbered starting from zero. Each shared-memory region is szRegion
+** bytes in size.
+**
+** If an error occurs, an error code is returned and *pp is set to NULL.
+**
+** Otherwise, if the bExtend parameter is 0 and the requested shared-memory
+** region has not been allocated (by any client, including one running in a
+** separate process), then *pp is set to NULL and SQLITE_OK returned. If
+** bExtend is non-zero and the requested shared-memory region has not yet
+** been allocated, it is allocated by this function.
+**
+** If the shared-memory region has already been allocated or is allocated by
+** this call as described above, then it is mapped into this processes
+** address space (if it is not already), *pp is set to point to the mapped
+** memory and SQLITE_OK returned.
+*/
+static int unixShmMap(
+ sqlite3_file *fd, /* Handle open on database file */
+ int iRegion, /* Region to retrieve */
+ int szRegion, /* Size of regions */
+ int bExtend, /* True to extend file if necessary */
+ void volatile **pp /* OUT: Mapped memory */
+){
+ unixFile *pDbFd = (unixFile*)fd;
+ unixShm *p;
+ unixShmNode *pShmNode;
+ int rc = SQLITE_OK;
+ int nShmPerMap = unixShmRegionPerMap();
+ int nReqRegion;
+
+ /* If the shared-memory file has not yet been opened, open it now. */
+ if( pDbFd->pShm==0 ){
+ rc = unixOpenSharedMemory(pDbFd);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+
+ p = pDbFd->pShm;
+ pShmNode = p->pShmNode;
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+ if( pShmNode->isUnlocked ){
+ rc = unixLockSharedMemory(pDbFd, pShmNode);
+ if( rc!=SQLITE_OK ) goto shmpage_out;
+ pShmNode->isUnlocked = 0;
+ }
+ assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 );
+ assert( pShmNode->pInode==pDbFd->pInode );
+ assert( pShmNode->hShm>=0 || pDbFd->pInode->bProcessLock==1 );
+ assert( pShmNode->hShm<0 || pDbFd->pInode->bProcessLock==0 );
+
+ /* Minimum number of regions required to be mapped. */
+ nReqRegion = ((iRegion+nShmPerMap) / nShmPerMap) * nShmPerMap;
+
+ if( pShmNode->nRegion<nReqRegion ){
+ char **apNew; /* New apRegion[] array */
+ int nByte = nReqRegion*szRegion; /* Minimum required file size */
+ struct stat sStat; /* Used by fstat() */
+
+ pShmNode->szRegion = szRegion;
+
+ if( pShmNode->hShm>=0 ){
+ /* The requested region is not mapped into this processes address space.
+ ** Check to see if it has been allocated (i.e. if the wal-index file is
+ ** large enough to contain the requested region).
+ */
+ if( osFstat(pShmNode->hShm, &sStat) ){
+ rc = SQLITE_IOERR_SHMSIZE;
+ goto shmpage_out;
+ }
+
+ if( sStat.st_size<nByte ){
+ /* The requested memory region does not exist. If bExtend is set to
+ ** false, exit early. *pp will be set to NULL and SQLITE_OK returned.
+ */
+ if( !bExtend ){
+ goto shmpage_out;
+ }
+
+ /* Alternatively, if bExtend is true, extend the file. Do this by
+ ** writing a single byte to the end of each (OS) page being
+ ** allocated or extended. Technically, we need only write to the
+ ** last page in order to extend the file. But writing to all new
+ ** pages forces the OS to allocate them immediately, which reduces
+ ** the chances of SIGBUS while accessing the mapped region later on.
+ */
+ else{
+ static const int pgsz = 4096;
+ int iPg;
+
+ /* Write to the last byte of each newly allocated or extended page */
+ assert( (nByte % pgsz)==0 );
+ for(iPg=(sStat.st_size/pgsz); iPg<(nByte/pgsz); iPg++){
+ int x = 0;
+ if( seekAndWriteFd(pShmNode->hShm, iPg*pgsz + pgsz-1,"",1,&x)!=1 ){
+ const char *zFile = pShmNode->zFilename;
+ rc = unixLogError(SQLITE_IOERR_SHMSIZE, "write", zFile);
+ goto shmpage_out;
+ }
+ }
+ }
+ }
+ }
+
+ /* Map the requested memory region into this processes address space. */
+ apNew = (char **)sqlite3_realloc(
+ pShmNode->apRegion, nReqRegion*sizeof(char *)
+ );
+ if( !apNew ){
+ rc = SQLITE_IOERR_NOMEM_BKPT;
+ goto shmpage_out;
+ }
+ pShmNode->apRegion = apNew;
+ while( pShmNode->nRegion<nReqRegion ){
+ int nMap = szRegion*nShmPerMap;
+ int i;
+ void *pMem;
+ if( pShmNode->hShm>=0 ){
+ pMem = osMmap(0, nMap,
+ pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE,
+ MAP_SHARED, pShmNode->hShm, szRegion*(i64)pShmNode->nRegion
+ );
+ if( pMem==MAP_FAILED ){
+ rc = unixLogError(SQLITE_IOERR_SHMMAP, "mmap", pShmNode->zFilename);
+ goto shmpage_out;
+ }
+ }else{
+ pMem = sqlite3_malloc64(nMap);
+ if( pMem==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shmpage_out;
+ }
+ memset(pMem, 0, nMap);
+ }
+
+ for(i=0; i<nShmPerMap; i++){
+ pShmNode->apRegion[pShmNode->nRegion+i] = &((char*)pMem)[szRegion*i];
+ }
+ pShmNode->nRegion += nShmPerMap;
+ }
+ }
+
+shmpage_out:
+ if( pShmNode->nRegion>iRegion ){
+ *pp = pShmNode->apRegion[iRegion];
+ }else{
+ *pp = 0;
+ }
+ if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY;
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+ return rc;
+}
+
+/*
+** Change the lock state for a shared-memory segment.
+**
+** Note that the relationship between SHAREd and EXCLUSIVE locks is a little
+** different here than in posix. In xShmLock(), one can go from unlocked
+** to shared and back or from unlocked to exclusive and back. But one may
+** not go from shared to exclusive or from exclusive to shared.
+*/
+static int unixShmLock(
+ sqlite3_file *fd, /* Database file holding the shared memory */
+ int ofst, /* First lock to acquire or release */
+ int n, /* Number of locks to acquire or release */
+ int flags /* What to do with the lock */
+){
+ unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */
+ unixShm *p = pDbFd->pShm; /* The shared memory being locked */
+ unixShm *pX; /* For looping over all siblings */
+ unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */
+ int rc = SQLITE_OK; /* Result code */
+ u16 mask; /* Mask of locks to take or release */
+
+ assert( pShmNode==pDbFd->pInode->pShmNode );
+ assert( pShmNode->pInode==pDbFd->pInode );
+ assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK );
+ assert( n>=1 );
+ assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED)
+ || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE)
+ || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED)
+ || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) );
+ assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 );
+ assert( pShmNode->hShm>=0 || pDbFd->pInode->bProcessLock==1 );
+ assert( pShmNode->hShm<0 || pDbFd->pInode->bProcessLock==0 );
+
+ /* Check that, if this to be a blocking lock, no locks that occur later
+ ** in the following list than the lock being obtained are already held:
+ **
+ ** 1. Checkpointer lock (ofst==1).
+ ** 2. Write lock (ofst==0).
+ ** 3. Read locks (ofst>=3 && ofst<SQLITE_SHM_NLOCK).
+ **
+ ** In other words, if this is a blocking lock, none of the locks that
+ ** occur later in the above list than the lock being obtained may be
+ ** held.
+ **
+ ** It is not permitted to block on the RECOVER lock.
+ */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
+ (ofst!=2) /* not RECOVER */
+ && (ofst!=1 || (p->exclMask|p->sharedMask)==0)
+ && (ofst!=0 || (p->exclMask|p->sharedMask)<3)
+ && (ofst<3 || (p->exclMask|p->sharedMask)<(1<<ofst))
+ ));
+#endif
+
+ mask = (1<<(ofst+n)) - (1<<ofst);
+ assert( n>1 || mask==(1<<ofst) );
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+ if( flags & SQLITE_SHM_UNLOCK ){
+ u16 allMask = 0; /* Mask of locks held by siblings */
+
+ /* See if any siblings hold this same lock */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( pX==p ) continue;
+ assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 );
+ allMask |= pX->sharedMask;
+ }
+
+ /* Unlock the system-level locks */
+ if( (mask & allMask)==0 ){
+ rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
+ }else{
+ rc = SQLITE_OK;
+ }
+
+ /* Undo the local locks */
+ if( rc==SQLITE_OK ){
+ p->exclMask &= ~mask;
+ p->sharedMask &= ~mask;
+ }
+ }else if( flags & SQLITE_SHM_SHARED ){
+ u16 allShared = 0; /* Union of locks held by connections other than "p" */
+
+ /* Find out which shared locks are already held by sibling connections.
+ ** If any sibling already holds an exclusive lock, go ahead and return
+ ** SQLITE_BUSY.
+ */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( (pX->exclMask & mask)!=0 ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ allShared |= pX->sharedMask;
+ }
+
+ /* Get shared locks at the system level, if necessary */
+ if( rc==SQLITE_OK ){
+ if( (allShared & mask)==0 ){
+ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+
+ /* Get the local shared locks */
+ if( rc==SQLITE_OK ){
+ p->sharedMask |= mask;
+ }
+ }else{
+ /* Make sure no sibling connections hold locks that will block this
+ ** lock. If any do, return SQLITE_BUSY right away.
+ */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ }
+
+ /* Get the exclusive locks at the system level. Then if successful
+ ** also mark the local connection as being locked.
+ */
+ if( rc==SQLITE_OK ){
+ rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ assert( (p->sharedMask & mask)==0 );
+ p->exclMask |= mask;
+ }
+ }
+ }
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+ OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n",
+ p->id, osGetpid(0), p->sharedMask, p->exclMask));
+ return rc;
+}
+
+/*
+** Implement a memory barrier or memory fence on shared memory.
+**
+** All loads and stores begun before the barrier must complete before
+** any load or store begun after the barrier.
+*/
+static void unixShmBarrier(
+ sqlite3_file *fd /* Database file holding the shared memory */
+){
+ UNUSED_PARAMETER(fd);
+ sqlite3MemoryBarrier(); /* compiler-defined memory barrier */
+ assert( fd->pMethods->xLock==nolockLock
+ || unixFileMutexNotheld((unixFile*)fd)
+ );
+ unixEnterMutex(); /* Also mutex, for redundancy */
+ unixLeaveMutex();
+}
+
+/*
+** Close a connection to shared-memory. Delete the underlying
+** storage if deleteFlag is true.
+**
+** If there is no shared memory associated with the connection then this
+** routine is a harmless no-op.
+*/
+static int unixShmUnmap(
+ sqlite3_file *fd, /* The underlying database file */
+ int deleteFlag /* Delete shared-memory if true */
+){
+ unixShm *p; /* The connection to be closed */
+ unixShmNode *pShmNode; /* The underlying shared-memory file */
+ unixShm **pp; /* For looping over sibling connections */
+ unixFile *pDbFd; /* The underlying database file */
+
+ pDbFd = (unixFile*)fd;
+ p = pDbFd->pShm;
+ if( p==0 ) return SQLITE_OK;
+ pShmNode = p->pShmNode;
+
+ assert( pShmNode==pDbFd->pInode->pShmNode );
+ assert( pShmNode->pInode==pDbFd->pInode );
+
+ /* Remove connection p from the set of connections associated
+ ** with pShmNode */
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+ for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){}
+ *pp = p->pNext;
+
+ /* Free the connection p */
+ sqlite3_free(p);
+ pDbFd->pShm = 0;
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+
+ /* If pShmNode->nRef has reached 0, then close the underlying
+ ** shared-memory file, too */
+ assert( unixFileMutexNotheld(pDbFd) );
+ unixEnterMutex();
+ assert( pShmNode->nRef>0 );
+ pShmNode->nRef--;
+ if( pShmNode->nRef==0 ){
+ if( deleteFlag && pShmNode->hShm>=0 ){
+ osUnlink(pShmNode->zFilename);
+ }
+ unixShmPurge(pDbFd);
+ }
+ unixLeaveMutex();
+
+ return SQLITE_OK;
+}
+
+
+#else
+# define unixShmMap 0
+# define unixShmLock 0
+# define unixShmBarrier 0
+# define unixShmUnmap 0
+#endif /* #ifndef SQLITE_OMIT_WAL */
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** If it is currently memory mapped, unmap file pFd.
+*/
+static void unixUnmapfile(unixFile *pFd){
+ assert( pFd->nFetchOut==0 );
+ if( pFd->pMapRegion ){
+ osMunmap(pFd->pMapRegion, pFd->mmapSizeActual);
+ pFd->pMapRegion = 0;
+ pFd->mmapSize = 0;
+ pFd->mmapSizeActual = 0;
+ }
+}
+
+/*
+** Attempt to set the size of the memory mapping maintained by file
+** descriptor pFd to nNew bytes. Any existing mapping is discarded.
+**
+** If successful, this function sets the following variables:
+**
+** unixFile.pMapRegion
+** unixFile.mmapSize
+** unixFile.mmapSizeActual
+**
+** If unsuccessful, an error message is logged via sqlite3_log() and
+** the three variables above are zeroed. In this case SQLite should
+** continue accessing the database using the xRead() and xWrite()
+** methods.
+*/
+static void unixRemapfile(
+ unixFile *pFd, /* File descriptor object */
+ i64 nNew /* Required mapping size */
+){
+ const char *zErr = "mmap";
+ int h = pFd->h; /* File descriptor open on db file */
+ u8 *pOrig = (u8 *)pFd->pMapRegion; /* Pointer to current file mapping */
+ i64 nOrig = pFd->mmapSizeActual; /* Size of pOrig region in bytes */
+ u8 *pNew = 0; /* Location of new mapping */
+ int flags = PROT_READ; /* Flags to pass to mmap() */
+
+ assert( pFd->nFetchOut==0 );
+ assert( nNew>pFd->mmapSize );
+ assert( nNew<=pFd->mmapSizeMax );
+ assert( nNew>0 );
+ assert( pFd->mmapSizeActual>=pFd->mmapSize );
+ assert( MAP_FAILED!=0 );
+
+#ifdef SQLITE_MMAP_READWRITE
+ if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE;
+#endif
+
+ if( pOrig ){
+#if HAVE_MREMAP
+ i64 nReuse = pFd->mmapSize;
+#else
+ const int szSyspage = osGetpagesize();
+ i64 nReuse = (pFd->mmapSize & ~(szSyspage-1));
+#endif
+ u8 *pReq = &pOrig[nReuse];
+
+ /* Unmap any pages of the existing mapping that cannot be reused. */
+ if( nReuse!=nOrig ){
+ osMunmap(pReq, nOrig-nReuse);
+ }
+
+#if HAVE_MREMAP
+ pNew = osMremap(pOrig, nReuse, nNew, MREMAP_MAYMOVE);
+ zErr = "mremap";
+#else
+ pNew = osMmap(pReq, nNew-nReuse, flags, MAP_SHARED, h, nReuse);
+ if( pNew!=MAP_FAILED ){
+ if( pNew!=pReq ){
+ osMunmap(pNew, nNew - nReuse);
+ pNew = 0;
+ }else{
+ pNew = pOrig;
+ }
+ }
+#endif
+
+ /* The attempt to extend the existing mapping failed. Free it. */
+ if( pNew==MAP_FAILED || pNew==0 ){
+ osMunmap(pOrig, nReuse);
+ }
+ }
+
+ /* If pNew is still NULL, try to create an entirely new mapping. */
+ if( pNew==0 ){
+ pNew = osMmap(0, nNew, flags, MAP_SHARED, h, 0);
+ }
+
+ if( pNew==MAP_FAILED ){
+ pNew = 0;
+ nNew = 0;
+ unixLogError(SQLITE_OK, zErr, pFd->zPath);
+
+ /* If the mmap() above failed, assume that all subsequent mmap() calls
+ ** will probably fail too. Fall back to using xRead/xWrite exclusively
+ ** in this case. */
+ pFd->mmapSizeMax = 0;
+ }
+ pFd->pMapRegion = (void *)pNew;
+ pFd->mmapSize = pFd->mmapSizeActual = nNew;
+}
+
+/*
+** Memory map or remap the file opened by file-descriptor pFd (if the file
+** is already mapped, the existing mapping is replaced by the new). Or, if
+** there already exists a mapping for this file, and there are still
+** outstanding xFetch() references to it, this function is a no-op.
+**
+** If parameter nByte is non-negative, then it is the requested size of
+** the mapping to create. Otherwise, if nByte is less than zero, then the
+** requested size is the size of the file on disk. The actual size of the
+** created mapping is either the requested size or the value configured
+** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller.
+**
+** SQLITE_OK is returned if no error occurs (even if the mapping is not
+** recreated as a result of outstanding references) or an SQLite error
+** code otherwise.
+*/
+static int unixMapfile(unixFile *pFd, i64 nMap){
+ assert( nMap>=0 || pFd->nFetchOut==0 );
+ assert( nMap>0 || (pFd->mmapSize==0 && pFd->pMapRegion==0) );
+ if( pFd->nFetchOut>0 ) return SQLITE_OK;
+
+ if( nMap<0 ){
+ struct stat statbuf; /* Low-level file information */
+ if( osFstat(pFd->h, &statbuf) ){
+ return SQLITE_IOERR_FSTAT;
+ }
+ nMap = statbuf.st_size;
+ }
+ if( nMap>pFd->mmapSizeMax ){
+ nMap = pFd->mmapSizeMax;
+ }
+
+ assert( nMap>0 || (pFd->mmapSize==0 && pFd->pMapRegion==0) );
+ if( nMap!=pFd->mmapSize ){
+ unixRemapfile(pFd, nMap);
+ }
+
+ return SQLITE_OK;
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/*
+** If possible, return a pointer to a mapping of file fd starting at offset
+** iOff. The mapping must be valid for at least nAmt bytes.
+**
+** If such a pointer can be obtained, store it in *pp and return SQLITE_OK.
+** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK.
+** Finally, if an error does occur, return an SQLite error code. The final
+** value of *pp is undefined in this case.
+**
+** If this function does return a pointer, the caller must eventually
+** release the reference by calling unixUnfetch().
+*/
+static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
+#if SQLITE_MAX_MMAP_SIZE>0
+ unixFile *pFd = (unixFile *)fd; /* The underlying database file */
+#endif
+ *pp = 0;
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFd->mmapSizeMax>0 ){
+ if( pFd->pMapRegion==0 ){
+ int rc = unixMapfile(pFd, -1);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ if( pFd->mmapSize >= iOff+nAmt ){
+ *pp = &((u8 *)pFd->pMapRegion)[iOff];
+ pFd->nFetchOut++;
+ }
+ }
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** If the third argument is non-NULL, then this function releases a
+** reference obtained by an earlier call to unixFetch(). The second
+** argument passed to this function must be the same as the corresponding
+** argument that was passed to the unixFetch() invocation.
+**
+** Or, if the third argument is NULL, then this function is being called
+** to inform the VFS layer that, according to POSIX, any existing mapping
+** may now be invalid and should be unmapped.
+*/
+static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
+#if SQLITE_MAX_MMAP_SIZE>0
+ unixFile *pFd = (unixFile *)fd; /* The underlying database file */
+ UNUSED_PARAMETER(iOff);
+
+ /* If p==0 (unmap the entire file) then there must be no outstanding
+ ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
+ ** then there must be at least one outstanding. */
+ assert( (p==0)==(pFd->nFetchOut==0) );
+
+ /* If p!=0, it must match the iOff value. */
+ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] );
+
+ if( p ){
+ pFd->nFetchOut--;
+ }else{
+ unixUnmapfile(pFd);
+ }
+
+ assert( pFd->nFetchOut>=0 );
+#else
+ UNUSED_PARAMETER(fd);
+ UNUSED_PARAMETER(p);
+ UNUSED_PARAMETER(iOff);
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Here ends the implementation of all sqlite3_file methods.
+**
+********************** End sqlite3_file Methods *******************************
+******************************************************************************/
+
+/*
+** This division contains definitions of sqlite3_io_methods objects that
+** implement various file locking strategies. It also contains definitions
+** of "finder" functions. A finder-function is used to locate the appropriate
+** sqlite3_io_methods object for a particular database file. The pAppData
+** field of the sqlite3_vfs VFS objects are initialized to be pointers to
+** the correct finder-function for that VFS.
+**
+** Most finder functions return a pointer to a fixed sqlite3_io_methods
+** object. The only interesting finder-function is autolockIoFinder, which
+** looks at the filesystem type and tries to guess the best locking
+** strategy from that.
+**
+** For finder-function F, two objects are created:
+**
+** (1) The real finder-function named "FImpt()".
+**
+** (2) A constant pointer to this function named just "F".
+**
+**
+** A pointer to the F pointer is used as the pAppData value for VFS
+** objects. We have to do this instead of letting pAppData point
+** directly at the finder-function since C90 rules prevent a void*
+** from be cast into a function pointer.
+**
+**
+** Each instance of this macro generates two objects:
+**
+** * A constant sqlite3_io_methods object call METHOD that has locking
+** methods CLOSE, LOCK, UNLOCK, CKRESLOCK.
+**
+** * An I/O method finder function called FINDER that returns a pointer
+** to the METHOD object in the previous bullet.
+*/
+#define IOMETHODS(FINDER,METHOD,VERSION,CLOSE,LOCK,UNLOCK,CKLOCK,SHMMAP) \
+static const sqlite3_io_methods METHOD = { \
+ VERSION, /* iVersion */ \
+ CLOSE, /* xClose */ \
+ unixRead, /* xRead */ \
+ unixWrite, /* xWrite */ \
+ unixTruncate, /* xTruncate */ \
+ unixSync, /* xSync */ \
+ unixFileSize, /* xFileSize */ \
+ LOCK, /* xLock */ \
+ UNLOCK, /* xUnlock */ \
+ CKLOCK, /* xCheckReservedLock */ \
+ unixFileControl, /* xFileControl */ \
+ unixSectorSize, /* xSectorSize */ \
+ unixDeviceCharacteristics, /* xDeviceCapabilities */ \
+ SHMMAP, /* xShmMap */ \
+ unixShmLock, /* xShmLock */ \
+ unixShmBarrier, /* xShmBarrier */ \
+ unixShmUnmap, /* xShmUnmap */ \
+ unixFetch, /* xFetch */ \
+ unixUnfetch, /* xUnfetch */ \
+}; \
+static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \
+ UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \
+ return &METHOD; \
+} \
+static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \
+ = FINDER##Impl;
+
+/*
+** Here are all of the sqlite3_io_methods objects for each of the
+** locking strategies. Functions that return pointers to these methods
+** are also created.
+*/
+IOMETHODS(
+ posixIoFinder, /* Finder function name */
+ posixIoMethods, /* sqlite3_io_methods object name */
+ 3, /* shared memory and mmap are enabled */
+ unixClose, /* xClose method */
+ unixLock, /* xLock method */
+ unixUnlock, /* xUnlock method */
+ unixCheckReservedLock, /* xCheckReservedLock method */
+ unixShmMap /* xShmMap method */
+)
+IOMETHODS(
+ nolockIoFinder, /* Finder function name */
+ nolockIoMethods, /* sqlite3_io_methods object name */
+ 3, /* shared memory and mmap are enabled */
+ nolockClose, /* xClose method */
+ nolockLock, /* xLock method */
+ nolockUnlock, /* xUnlock method */
+ nolockCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+IOMETHODS(
+ dotlockIoFinder, /* Finder function name */
+ dotlockIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ dotlockClose, /* xClose method */
+ dotlockLock, /* xLock method */
+ dotlockUnlock, /* xUnlock method */
+ dotlockCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+
+#if SQLITE_ENABLE_LOCKING_STYLE
+IOMETHODS(
+ flockIoFinder, /* Finder function name */
+ flockIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ flockClose, /* xClose method */
+ flockLock, /* xLock method */
+ flockUnlock, /* xUnlock method */
+ flockCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+#endif
+
+#if OS_VXWORKS
+IOMETHODS(
+ semIoFinder, /* Finder function name */
+ semIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ semXClose, /* xClose method */
+ semXLock, /* xLock method */
+ semXUnlock, /* xUnlock method */
+ semXCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+#endif
+
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+IOMETHODS(
+ afpIoFinder, /* Finder function name */
+ afpIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ afpClose, /* xClose method */
+ afpLock, /* xLock method */
+ afpUnlock, /* xUnlock method */
+ afpCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+#endif
+
+/*
+** The proxy locking method is a "super-method" in the sense that it
+** opens secondary file descriptors for the conch and lock files and
+** it uses proxy, dot-file, AFP, and flock() locking methods on those
+** secondary files. For this reason, the division that implements
+** proxy locking is located much further down in the file. But we need
+** to go ahead and define the sqlite3_io_methods and finder function
+** for proxy locking here. So we forward declare the I/O methods.
+*/
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+static int proxyClose(sqlite3_file*);
+static int proxyLock(sqlite3_file*, int);
+static int proxyUnlock(sqlite3_file*, int);
+static int proxyCheckReservedLock(sqlite3_file*, int*);
+IOMETHODS(
+ proxyIoFinder, /* Finder function name */
+ proxyIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ proxyClose, /* xClose method */
+ proxyLock, /* xLock method */
+ proxyUnlock, /* xUnlock method */
+ proxyCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+#endif
+
+/* nfs lockd on OSX 10.3+ doesn't clear write locks when a read lock is set */
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+IOMETHODS(
+ nfsIoFinder, /* Finder function name */
+ nfsIoMethods, /* sqlite3_io_methods object name */
+ 1, /* shared memory is disabled */
+ unixClose, /* xClose method */
+ unixLock, /* xLock method */
+ nfsUnlock, /* xUnlock method */
+ unixCheckReservedLock, /* xCheckReservedLock method */
+ 0 /* xShmMap method */
+)
+#endif
+
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+/*
+** This "finder" function attempts to determine the best locking strategy
+** for the database file "filePath". It then returns the sqlite3_io_methods
+** object that implements that strategy.
+**
+** This is for MacOSX only.
+*/
+static const sqlite3_io_methods *autolockIoFinderImpl(
+ const char *filePath, /* name of the database file */
+ unixFile *pNew /* open file object for the database file */
+){
+ static const struct Mapping {
+ const char *zFilesystem; /* Filesystem type name */
+ const sqlite3_io_methods *pMethods; /* Appropriate locking method */
+ } aMap[] = {
+ { "hfs", &posixIoMethods },
+ { "ufs", &posixIoMethods },
+ { "afpfs", &afpIoMethods },
+ { "smbfs", &afpIoMethods },
+ { "webdav", &nolockIoMethods },
+ { 0, 0 }
+ };
+ int i;
+ struct statfs fsInfo;
+ struct flock lockInfo;
+
+ if( !filePath ){
+ /* If filePath==NULL that means we are dealing with a transient file
+ ** that does not need to be locked. */
+ return &nolockIoMethods;
+ }
+ if( statfs(filePath, &fsInfo) != -1 ){
+ if( fsInfo.f_flags & MNT_RDONLY ){
+ return &nolockIoMethods;
+ }
+ for(i=0; aMap[i].zFilesystem; i++){
+ if( strcmp(fsInfo.f_fstypename, aMap[i].zFilesystem)==0 ){
+ return aMap[i].pMethods;
+ }
+ }
+ }
+
+ /* Default case. Handles, amongst others, "nfs".
+ ** Test byte-range lock using fcntl(). If the call succeeds,
+ ** assume that the file-system supports POSIX style locks.
+ */
+ lockInfo.l_len = 1;
+ lockInfo.l_start = 0;
+ lockInfo.l_whence = SEEK_SET;
+ lockInfo.l_type = F_RDLCK;
+ if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) {
+ if( strcmp(fsInfo.f_fstypename, "nfs")==0 ){
+ return &nfsIoMethods;
+ } else {
+ return &posixIoMethods;
+ }
+ }else{
+ return &dotlockIoMethods;
+ }
+}
+static const sqlite3_io_methods
+ *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl;
+
+#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
+
+#if OS_VXWORKS
+/*
+** This "finder" function for VxWorks checks to see if posix advisory
+** locking works. If it does, then that is what is used. If it does not
+** work, then fallback to named semaphore locking.
+*/
+static const sqlite3_io_methods *vxworksIoFinderImpl(
+ const char *filePath, /* name of the database file */
+ unixFile *pNew /* the open file object */
+){
+ struct flock lockInfo;
+
+ if( !filePath ){
+ /* If filePath==NULL that means we are dealing with a transient file
+ ** that does not need to be locked. */
+ return &nolockIoMethods;
+ }
+
+ /* Test if fcntl() is supported and use POSIX style locks.
+ ** Otherwise fall back to the named semaphore method.
+ */
+ lockInfo.l_len = 1;
+ lockInfo.l_start = 0;
+ lockInfo.l_whence = SEEK_SET;
+ lockInfo.l_type = F_RDLCK;
+ if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) {
+ return &posixIoMethods;
+ }else{
+ return &semIoMethods;
+ }
+}
+static const sqlite3_io_methods
+ *(*const vxworksIoFinder)(const char*,unixFile*) = vxworksIoFinderImpl;
+
+#endif /* OS_VXWORKS */
+
+/*
+** An abstract type for a pointer to an IO method finder function:
+*/
+typedef const sqlite3_io_methods *(*finder_type)(const char*,unixFile*);
+
+
+/****************************************************************************
+**************************** sqlite3_vfs methods ****************************
+**
+** This division contains the implementation of methods on the
+** sqlite3_vfs object.
+*/
+
+/*
+** Initialize the contents of the unixFile structure pointed to by pId.
+*/
+static int fillInUnixFile(
+ sqlite3_vfs *pVfs, /* Pointer to vfs object */
+ int h, /* Open file descriptor of file being opened */
+ sqlite3_file *pId, /* Write to the unixFile structure here */
+ const char *zFilename, /* Name of the file being opened */
+ int ctrlFlags /* Zero or more UNIXFILE_* values */
+){
+ const sqlite3_io_methods *pLockingStyle;
+ unixFile *pNew = (unixFile *)pId;
+ int rc = SQLITE_OK;
+
+ assert( pNew->pInode==NULL );
+
+ /* No locking occurs in temporary files */
+ assert( zFilename!=0 || (ctrlFlags & UNIXFILE_NOLOCK)!=0 );
+
+ OSTRACE(("OPEN %-3d %s\n", h, zFilename));
+ pNew->h = h;
+ pNew->pVfs = pVfs;
+ pNew->zPath = zFilename;
+ pNew->ctrlFlags = (u8)ctrlFlags;
+#if SQLITE_MAX_MMAP_SIZE>0
+ pNew->mmapSizeMax = sqlite3GlobalConfig.szMmap;
+#endif
+ if( sqlite3_uri_boolean(((ctrlFlags & UNIXFILE_URI) ? zFilename : 0),
+ "psow", SQLITE_POWERSAFE_OVERWRITE) ){
+ pNew->ctrlFlags |= UNIXFILE_PSOW;
+ }
+ if( strcmp(pVfs->zName,"unix-excl")==0 ){
+ pNew->ctrlFlags |= UNIXFILE_EXCL;
+ }
+
+#if OS_VXWORKS
+ pNew->pId = vxworksFindFileId(zFilename);
+ if( pNew->pId==0 ){
+ ctrlFlags |= UNIXFILE_NOLOCK;
+ rc = SQLITE_NOMEM_BKPT;
+ }
+#endif
+
+ if( ctrlFlags & UNIXFILE_NOLOCK ){
+ pLockingStyle = &nolockIoMethods;
+ }else{
+ pLockingStyle = (**(finder_type*)pVfs->pAppData)(zFilename, pNew);
+#if SQLITE_ENABLE_LOCKING_STYLE
+ /* Cache zFilename in the locking context (AFP and dotlock override) for
+ ** proxyLock activation is possible (remote proxy is based on db name)
+ ** zFilename remains valid until file is closed, to support */
+ pNew->lockingContext = (void*)zFilename;
+#endif
+ }
+
+ if( pLockingStyle == &posixIoMethods
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+ || pLockingStyle == &nfsIoMethods
+#endif
+ ){
+ unixEnterMutex();
+ rc = findInodeInfo(pNew, &pNew->pInode);
+ if( rc!=SQLITE_OK ){
+ /* If an error occurred in findInodeInfo(), close the file descriptor
+ ** immediately, before releasing the mutex. findInodeInfo() may fail
+ ** in two scenarios:
+ **
+ ** (a) A call to fstat() failed.
+ ** (b) A malloc failed.
+ **
+ ** Scenario (b) may only occur if the process is holding no other
+ ** file descriptors open on the same file. If there were other file
+ ** descriptors on this file, then no malloc would be required by
+ ** findInodeInfo(). If this is the case, it is quite safe to close
+ ** handle h - as it is guaranteed that no posix locks will be released
+ ** by doing so.
+ **
+ ** If scenario (a) caused the error then things are not so safe. The
+ ** implicit assumption here is that if fstat() fails, things are in
+ ** such bad shape that dropping a lock or two doesn't matter much.
+ */
+ robust_close(pNew, h, __LINE__);
+ h = -1;
+ }
+ unixLeaveMutex();
+ }
+
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+ else if( pLockingStyle == &afpIoMethods ){
+ /* AFP locking uses the file path so it needs to be included in
+ ** the afpLockingContext.
+ */
+ afpLockingContext *pCtx;
+ pNew->lockingContext = pCtx = sqlite3_malloc64( sizeof(*pCtx) );
+ if( pCtx==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ /* NB: zFilename exists and remains valid until the file is closed
+ ** according to requirement F11141. So we do not need to make a
+ ** copy of the filename. */
+ pCtx->dbPath = zFilename;
+ pCtx->reserved = 0;
+ srandomdev();
+ unixEnterMutex();
+ rc = findInodeInfo(pNew, &pNew->pInode);
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(pNew->lockingContext);
+ robust_close(pNew, h, __LINE__);
+ h = -1;
+ }
+ unixLeaveMutex();
+ }
+ }
+#endif
+
+ else if( pLockingStyle == &dotlockIoMethods ){
+ /* Dotfile locking uses the file path so it needs to be included in
+ ** the dotlockLockingContext
+ */
+ char *zLockFile;
+ int nFilename;
+ assert( zFilename!=0 );
+ nFilename = (int)strlen(zFilename) + 6;
+ zLockFile = (char *)sqlite3_malloc64(nFilename);
+ if( zLockFile==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ sqlite3_snprintf(nFilename, zLockFile, "%s" DOTLOCK_SUFFIX, zFilename);
+ }
+ pNew->lockingContext = zLockFile;
+ }
+
+#if OS_VXWORKS
+ else if( pLockingStyle == &semIoMethods ){
+ /* Named semaphore locking uses the file path so it needs to be
+ ** included in the semLockingContext
+ */
+ unixEnterMutex();
+ rc = findInodeInfo(pNew, &pNew->pInode);
+ if( (rc==SQLITE_OK) && (pNew->pInode->pSem==NULL) ){
+ char *zSemName = pNew->pInode->aSemName;
+ int n;
+ sqlite3_snprintf(MAX_PATHNAME, zSemName, "/%s.sem",
+ pNew->pId->zCanonicalName);
+ for( n=1; zSemName[n]; n++ )
+ if( zSemName[n]=='/' ) zSemName[n] = '_';
+ pNew->pInode->pSem = sem_open(zSemName, O_CREAT, 0666, 1);
+ if( pNew->pInode->pSem == SEM_FAILED ){
+ rc = SQLITE_NOMEM_BKPT;
+ pNew->pInode->aSemName[0] = '\0';
+ }
+ }
+ unixLeaveMutex();
+ }
+#endif
+
+ storeLastErrno(pNew, 0);
+#if OS_VXWORKS
+ if( rc!=SQLITE_OK ){
+ if( h>=0 ) robust_close(pNew, h, __LINE__);
+ h = -1;
+ osUnlink(zFilename);
+ pNew->ctrlFlags |= UNIXFILE_DELETE;
+ }
+#endif
+ if( rc!=SQLITE_OK ){
+ if( h>=0 ) robust_close(pNew, h, __LINE__);
+ }else{
+ pId->pMethods = pLockingStyle;
+ OpenCounter(+1);
+ verifyDbFile(pNew);
+ }
+ return rc;
+}
+
+/*
+** Return the name of a directory in which to put temporary files.
+** If no suitable temporary file directory can be found, return NULL.
+*/
+static const char *unixTempFileDir(void){
+ static const char *azDirs[] = {
+ 0,
+ 0,
+ "/var/tmp",
+ "/usr/tmp",
+ "/tmp",
+ "."
+ };
+ unsigned int i = 0;
+ struct stat buf;
+ const char *zDir = sqlite3_temp_directory;
+
+ if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR");
+ if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR");
+ while(1){
+ if( zDir!=0
+ && osStat(zDir, &buf)==0
+ && S_ISDIR(buf.st_mode)
+ && osAccess(zDir, 03)==0
+ ){
+ return zDir;
+ }
+ if( i>=sizeof(azDirs)/sizeof(azDirs[0]) ) break;
+ zDir = azDirs[i++];
+ }
+ return 0;
+}
+
+/*
+** Create a temporary file name in zBuf. zBuf must be allocated
+** by the calling process and must be big enough to hold at least
+** pVfs->mxPathname bytes.
+*/
+static int unixGetTempname(int nBuf, char *zBuf){
+ const char *zDir;
+ int iLimit = 0;
+
+ /* It's odd to simulate an io-error here, but really this is just
+ ** using the io-error infrastructure to test that SQLite handles this
+ ** function failing.
+ */
+ zBuf[0] = 0;
+ SimulateIOError( return SQLITE_IOERR );
+
+ zDir = unixTempFileDir();
+ if( zDir==0 ) return SQLITE_IOERR_GETTEMPPATH;
+ do{
+ u64 r;
+ sqlite3_randomness(sizeof(r), &r);
+ assert( nBuf>2 );
+ zBuf[nBuf-2] = 0;
+ sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c",
+ zDir, r, 0);
+ if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ) return SQLITE_ERROR;
+ }while( osAccess(zBuf,0)==0 );
+ return SQLITE_OK;
+}
+
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+/*
+** Routine to transform a unixFile into a proxy-locking unixFile.
+** Implementation in the proxy-lock division, but used by unixOpen()
+** if SQLITE_PREFER_PROXY_LOCKING is defined.
+*/
+static int proxyTransformUnixFile(unixFile*, const char*);
+#endif
+
+/*
+** Search for an unused file descriptor that was opened on the database
+** file (not a journal or super-journal file) identified by pathname
+** zPath with SQLITE_OPEN_XXX flags matching those passed as the second
+** argument to this function.
+**
+** Such a file descriptor may exist if a database connection was closed
+** but the associated file descriptor could not be closed because some
+** other file descriptor open on the same file is holding a file-lock.
+** Refer to comments in the unixClose() function and the lengthy comment
+** describing "Posix Advisory Locking" at the start of this file for
+** further details. Also, ticket #4018.
+**
+** If a suitable file descriptor is found, then it is returned. If no
+** such file descriptor is located, -1 is returned.
+*/
+static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
+ UnixUnusedFd *pUnused = 0;
+
+ /* Do not search for an unused file descriptor on vxworks. Not because
+ ** vxworks would not benefit from the change (it might, we're not sure),
+ ** but because no way to test it is currently available. It is better
+ ** not to risk breaking vxworks support for the sake of such an obscure
+ ** feature. */
+#if !OS_VXWORKS
+ struct stat sStat; /* Results of stat() call */
+
+ unixEnterMutex();
+
+ /* A stat() call may fail for various reasons. If this happens, it is
+ ** almost certain that an open() call on the same path will also fail.
+ ** For this reason, if an error occurs in the stat() call here, it is
+ ** ignored and -1 is returned. The caller will try to open a new file
+ ** descriptor on the same path, fail, and return an error to SQLite.
+ **
+ ** Even if a subsequent open() call does succeed, the consequences of
+ ** not searching for a reusable file descriptor are not dire. */
+ if( inodeList!=0 && 0==osStat(zPath, &sStat) ){
+ unixInodeInfo *pInode;
+
+ pInode = inodeList;
+ while( pInode && (pInode->fileId.dev!=sStat.st_dev
+ || pInode->fileId.ino!=(u64)sStat.st_ino) ){
+ pInode = pInode->pNext;
+ }
+ if( pInode ){
+ UnixUnusedFd **pp;
+ assert( sqlite3_mutex_notheld(pInode->pLockMutex) );
+ sqlite3_mutex_enter(pInode->pLockMutex);
+ flags &= (SQLITE_OPEN_READONLY|SQLITE_OPEN_READWRITE);
+ for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext));
+ pUnused = *pp;
+ if( pUnused ){
+ *pp = pUnused->pNext;
+ }
+ sqlite3_mutex_leave(pInode->pLockMutex);
+ }
+ }
+ unixLeaveMutex();
+#endif /* if !OS_VXWORKS */
+ return pUnused;
+}
+
+/*
+** Find the mode, uid and gid of file zFile.
+*/
+static int getFileMode(
+ const char *zFile, /* File name */
+ mode_t *pMode, /* OUT: Permissions of zFile */
+ uid_t *pUid, /* OUT: uid of zFile. */
+ gid_t *pGid /* OUT: gid of zFile. */
+){
+ struct stat sStat; /* Output of stat() on database file */
+ int rc = SQLITE_OK;
+ if( 0==osStat(zFile, &sStat) ){
+ *pMode = sStat.st_mode & 0777;
+ *pUid = sStat.st_uid;
+ *pGid = sStat.st_gid;
+ }else{
+ rc = SQLITE_IOERR_FSTAT;
+ }
+ return rc;
+}
+
+/*
+** This function is called by unixOpen() to determine the unix permissions
+** to create new files with. If no error occurs, then SQLITE_OK is returned
+** and a value suitable for passing as the third argument to open(2) is
+** written to *pMode. If an IO error occurs, an SQLite error code is
+** returned and the value of *pMode is not modified.
+**
+** In most cases, this routine sets *pMode to 0, which will become
+** an indication to robust_open() to create the file using
+** SQLITE_DEFAULT_FILE_PERMISSIONS adjusted by the umask.
+** But if the file being opened is a WAL or regular journal file, then
+** this function queries the file-system for the permissions on the
+** corresponding database file and sets *pMode to this value. Whenever
+** possible, WAL and journal files are created using the same permissions
+** as the associated database file.
+**
+** If the SQLITE_ENABLE_8_3_NAMES option is enabled, then the
+** original filename is unavailable. But 8_3_NAMES is only used for
+** FAT filesystems and permissions do not matter there, so just use
+** the default permissions. In 8_3_NAMES mode, leave *pMode set to zero.
+*/
+static int findCreateFileMode(
+ const char *zPath, /* Path of file (possibly) being created */
+ int flags, /* Flags passed as 4th argument to xOpen() */
+ mode_t *pMode, /* OUT: Permissions to open file with */
+ uid_t *pUid, /* OUT: uid to set on the file */
+ gid_t *pGid /* OUT: gid to set on the file */
+){
+ int rc = SQLITE_OK; /* Return Code */
+ *pMode = 0;
+ *pUid = 0;
+ *pGid = 0;
+ if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){
+ char zDb[MAX_PATHNAME+1]; /* Database file path */
+ int nDb; /* Number of valid bytes in zDb */
+
+ /* zPath is a path to a WAL or journal file. The following block derives
+ ** the path to the associated database file from zPath. This block handles
+ ** the following naming conventions:
+ **
+ ** "<path to db>-journal"
+ ** "<path to db>-wal"
+ ** "<path to db>-journalNN"
+ ** "<path to db>-walNN"
+ **
+ ** where NN is a decimal number. The NN naming schemes are
+ ** used by the test_multiplex.c module.
+ */
+ nDb = sqlite3Strlen30(zPath) - 1;
+ while( zPath[nDb]!='-' ){
+ /* In normal operation, the journal file name will always contain
+ ** a '-' character. However in 8+3 filename mode, or if a corrupt
+ ** rollback journal specifies a super-journal with a goofy name, then
+ ** the '-' might be missing. */
+ if( nDb==0 || zPath[nDb]=='.' ) return SQLITE_OK;
+ nDb--;
+ }
+ memcpy(zDb, zPath, nDb);
+ zDb[nDb] = '\0';
+
+ rc = getFileMode(zDb, pMode, pUid, pGid);
+ }else if( flags & SQLITE_OPEN_DELETEONCLOSE ){
+ *pMode = 0600;
+ }else if( flags & SQLITE_OPEN_URI ){
+ /* If this is a main database file and the file was opened using a URI
+ ** filename, check for the "modeof" parameter. If present, interpret
+ ** its value as a filename and try to copy the mode, uid and gid from
+ ** that file. */
+ const char *z = sqlite3_uri_parameter(zPath, "modeof");
+ if( z ){
+ rc = getFileMode(z, pMode, pUid, pGid);
+ }
+ }
+ return rc;
+}
+
+/*
+** Open the file zPath.
+**
+** Previously, the SQLite OS layer used three functions in place of this
+** one:
+**
+** sqlite3OsOpenReadWrite();
+** sqlite3OsOpenReadOnly();
+** sqlite3OsOpenExclusive();
+**
+** These calls correspond to the following combinations of flags:
+**
+** ReadWrite() -> (READWRITE | CREATE)
+** ReadOnly() -> (READONLY)
+** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE)
+**
+** The old OpenExclusive() accepted a boolean argument - "delFlag". If
+** true, the file was configured to be automatically deleted when the
+** file handle closed. To achieve the same effect using this new
+** interface, add the DELETEONCLOSE flag to those specified above for
+** OpenExclusive().
+*/
+static int unixOpen(
+ sqlite3_vfs *pVfs, /* The VFS for which this is the xOpen method */
+ const char *zPath, /* Pathname of file to be opened */
+ sqlite3_file *pFile, /* The file descriptor to be filled in */
+ int flags, /* Input flags to control the opening */
+ int *pOutFlags /* Output flags returned to SQLite core */
+){
+ unixFile *p = (unixFile *)pFile;
+ int fd = -1; /* File descriptor returned by open() */
+ int openFlags = 0; /* Flags to pass to open() */
+ int eType = flags&0x0FFF00; /* Type of file to open */
+ int noLock; /* True to omit locking primitives */
+ int rc = SQLITE_OK; /* Function Return Code */
+ int ctrlFlags = 0; /* UNIXFILE_* flags */
+
+ int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE);
+ int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE);
+ int isCreate = (flags & SQLITE_OPEN_CREATE);
+ int isReadonly = (flags & SQLITE_OPEN_READONLY);
+ int isReadWrite = (flags & SQLITE_OPEN_READWRITE);
+#if SQLITE_ENABLE_LOCKING_STYLE
+ int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY);
+#endif
+#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE
+ struct statfs fsInfo;
+#endif
+
+ /* If creating a super- or main-file journal, this function will open
+ ** a file-descriptor on the directory too. The first time unixSync()
+ ** is called the directory file descriptor will be fsync()ed and close()d.
+ */
+ int isNewJrnl = (isCreate && (
+ eType==SQLITE_OPEN_SUPER_JOURNAL
+ || eType==SQLITE_OPEN_MAIN_JOURNAL
+ || eType==SQLITE_OPEN_WAL
+ ));
+
+ /* If argument zPath is a NULL pointer, this function is required to open
+ ** a temporary file. Use this buffer to store the file name in.
+ */
+ char zTmpname[MAX_PATHNAME+2];
+ const char *zName = zPath;
+
+ /* Check the following statements are true:
+ **
+ ** (a) Exactly one of the READWRITE and READONLY flags must be set, and
+ ** (b) if CREATE is set, then READWRITE must also be set, and
+ ** (c) if EXCLUSIVE is set, then CREATE must also be set.
+ ** (d) if DELETEONCLOSE is set, then CREATE must also be set.
+ */
+ assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly));
+ assert(isCreate==0 || isReadWrite);
+ assert(isExclusive==0 || isCreate);
+ assert(isDelete==0 || isCreate);
+
+ /* The main DB, main journal, WAL file and super-journal are never
+ ** automatically deleted. Nor are they ever temporary files. */
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_SUPER_JOURNAL );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL );
+
+ /* Assert that the upper layer has set one of the "file-type" flags. */
+ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB
+ || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL
+ || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_SUPER_JOURNAL
+ || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL
+ );
+
+ /* Detect a pid change and reset the PRNG. There is a race condition
+ ** here such that two or more threads all trying to open databases at
+ ** the same instant might all reset the PRNG. But multiple resets
+ ** are harmless.
+ */
+ if( randomnessPid!=osGetpid(0) ){
+ randomnessPid = osGetpid(0);
+ sqlite3_randomness(0,0);
+ }
+ memset(p, 0, sizeof(unixFile));
+
+ if( eType==SQLITE_OPEN_MAIN_DB ){
+ UnixUnusedFd *pUnused;
+ pUnused = findReusableFd(zName, flags);
+ if( pUnused ){
+ fd = pUnused->fd;
+ }else{
+ pUnused = sqlite3_malloc64(sizeof(*pUnused));
+ if( !pUnused ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ }
+ p->pPreallocatedUnused = pUnused;
+
+ /* Database filenames are double-zero terminated if they are not
+ ** URIs with parameters. Hence, they can always be passed into
+ ** sqlite3_uri_parameter(). */
+ assert( (flags & SQLITE_OPEN_URI) || zName[strlen(zName)+1]==0 );
+
+ }else if( !zName ){
+ /* If zName is NULL, the upper layer is requesting a temp file. */
+ assert(isDelete && !isNewJrnl);
+ rc = unixGetTempname(pVfs->mxPathname, zTmpname);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ zName = zTmpname;
+
+ /* Generated temporary filenames are always double-zero terminated
+ ** for use by sqlite3_uri_parameter(). */
+ assert( zName[strlen(zName)+1]==0 );
+ }
+
+ /* Determine the value of the flags parameter passed to POSIX function
+ ** open(). These must be calculated even if open() is not called, as
+ ** they may be stored as part of the file handle and used by the
+ ** 'conch file' locking functions later on. */
+ if( isReadonly ) openFlags |= O_RDONLY;
+ if( isReadWrite ) openFlags |= O_RDWR;
+ if( isCreate ) openFlags |= O_CREAT;
+ if( isExclusive ) openFlags |= (O_EXCL|O_NOFOLLOW);
+ openFlags |= (O_LARGEFILE|O_BINARY|O_NOFOLLOW);
+
+ if( fd<0 ){
+ mode_t openMode; /* Permissions to create file with */
+ uid_t uid; /* Userid for the file */
+ gid_t gid; /* Groupid for the file */
+ rc = findCreateFileMode(zName, flags, &openMode, &uid, &gid);
+ if( rc!=SQLITE_OK ){
+ assert( !p->pPreallocatedUnused );
+ assert( eType==SQLITE_OPEN_WAL || eType==SQLITE_OPEN_MAIN_JOURNAL );
+ return rc;
+ }
+ fd = robust_open(zName, openFlags, openMode);
+ OSTRACE(("OPENX %-3d %s 0%o\n", fd, zName, openFlags));
+ assert( !isExclusive || (openFlags & O_CREAT)!=0 );
+ if( fd<0 ){
+ if( isNewJrnl && errno==EACCES && osAccess(zName, F_OK) ){
+ /* If unable to create a journal because the directory is not
+ ** writable, change the error code to indicate that. */
+ rc = SQLITE_READONLY_DIRECTORY;
+ }else if( errno!=EISDIR && isReadWrite ){
+ /* Failed to open the file for read/write access. Try read-only. */
+ flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE);
+ openFlags &= ~(O_RDWR|O_CREAT);
+ flags |= SQLITE_OPEN_READONLY;
+ openFlags |= O_RDONLY;
+ isReadonly = 1;
+ fd = robust_open(zName, openFlags, openMode);
+ }
+ }
+ if( fd<0 ){
+ int rc2 = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zName);
+ if( rc==SQLITE_OK ) rc = rc2;
+ goto open_finished;
+ }
+
+ /* The owner of the rollback journal or WAL file should always be the
+ ** same as the owner of the database file. Try to ensure that this is
+ ** the case. The chown() system call will be a no-op if the current
+ ** process lacks root privileges, be we should at least try. Without
+ ** this step, if a root process opens a database file, it can leave
+ ** behinds a journal/WAL that is owned by root and hence make the
+ ** database inaccessible to unprivileged processes.
+ **
+ ** If openMode==0, then that means uid and gid are not set correctly
+ ** (probably because SQLite is configured to use 8+3 filename mode) and
+ ** in that case we do not want to attempt the chown().
+ */
+ if( openMode && (flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL))!=0 ){
+ robustFchown(fd, uid, gid);
+ }
+ }
+ assert( fd>=0 );
+ if( pOutFlags ){
+ *pOutFlags = flags;
+ }
+
+ if( p->pPreallocatedUnused ){
+ p->pPreallocatedUnused->fd = fd;
+ p->pPreallocatedUnused->flags =
+ flags & (SQLITE_OPEN_READONLY|SQLITE_OPEN_READWRITE);
+ }
+
+ if( isDelete ){
+#if OS_VXWORKS
+ zPath = zName;
+#elif defined(SQLITE_UNLINK_AFTER_CLOSE)
+ zPath = sqlite3_mprintf("%s", zName);
+ if( zPath==0 ){
+ robust_close(p, fd, __LINE__);
+ return SQLITE_NOMEM_BKPT;
+ }
+#else
+ osUnlink(zName);
+#endif
+ }
+#if SQLITE_ENABLE_LOCKING_STYLE
+ else{
+ p->openFlags = openFlags;
+ }
+#endif
+
+#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE
+ if( fstatfs(fd, &fsInfo) == -1 ){
+ storeLastErrno(p, errno);
+ robust_close(p, fd, __LINE__);
+ return SQLITE_IOERR_ACCESS;
+ }
+ if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) {
+ ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS;
+ }
+ if (0 == strncmp("exfat", fsInfo.f_fstypename, 5)) {
+ ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS;
+ }
+#endif
+
+ /* Set up appropriate ctrlFlags */
+ if( isDelete ) ctrlFlags |= UNIXFILE_DELETE;
+ if( isReadonly ) ctrlFlags |= UNIXFILE_RDONLY;
+ noLock = eType!=SQLITE_OPEN_MAIN_DB;
+ if( noLock ) ctrlFlags |= UNIXFILE_NOLOCK;
+ if( isNewJrnl ) ctrlFlags |= UNIXFILE_DIRSYNC;
+ if( flags & SQLITE_OPEN_URI ) ctrlFlags |= UNIXFILE_URI;
+
+#if SQLITE_ENABLE_LOCKING_STYLE
+#if SQLITE_PREFER_PROXY_LOCKING
+ isAutoProxy = 1;
+#endif
+ if( isAutoProxy && (zPath!=NULL) && (!noLock) && pVfs->xOpen ){
+ char *envforce = getenv("SQLITE_FORCE_PROXY_LOCKING");
+ int useProxy = 0;
+
+ /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means
+ ** never use proxy, NULL means use proxy for non-local files only. */
+ if( envforce!=NULL ){
+ useProxy = atoi(envforce)>0;
+ }else{
+ useProxy = !(fsInfo.f_flags&MNT_LOCAL);
+ }
+ if( useProxy ){
+ rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags);
+ if( rc==SQLITE_OK ){
+ rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:");
+ if( rc!=SQLITE_OK ){
+ /* Use unixClose to clean up the resources added in fillInUnixFile
+ ** and clear all the structure's references. Specifically,
+ ** pFile->pMethods will be NULL so sqlite3OsClose will be a no-op
+ */
+ unixClose(pFile);
+ return rc;
+ }
+ }
+ goto open_finished;
+ }
+ }
+#endif
+
+ assert( zPath==0 || zPath[0]=='/'
+ || eType==SQLITE_OPEN_SUPER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL
+ );
+ rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags);
+
+open_finished:
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(p->pPreallocatedUnused);
+ }
+ return rc;
+}
+
+
+/*
+** Delete the file at zPath. If the dirSync argument is true, fsync()
+** the directory after deleting the file.
+*/
+static int unixDelete(
+ sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */
+ const char *zPath, /* Name of file to be deleted */
+ int dirSync /* If true, fsync() directory after deleting file */
+){
+ int rc = SQLITE_OK;
+ UNUSED_PARAMETER(NotUsed);
+ SimulateIOError(return SQLITE_IOERR_DELETE);
+ if( osUnlink(zPath)==(-1) ){
+ if( errno==ENOENT
+#if OS_VXWORKS
+ || osAccess(zPath,0)!=0
+#endif
+ ){
+ rc = SQLITE_IOERR_DELETE_NOENT;
+ }else{
+ rc = unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath);
+ }
+ return rc;
+ }
+#ifndef SQLITE_DISABLE_DIRSYNC
+ if( (dirSync & 1)!=0 ){
+ int fd;
+ rc = osOpenDirectory(zPath, &fd);
+ if( rc==SQLITE_OK ){
+ if( full_fsync(fd,0,0) ){
+ rc = unixLogError(SQLITE_IOERR_DIR_FSYNC, "fsync", zPath);
+ }
+ robust_close(0, fd, __LINE__);
+ }else{
+ assert( rc==SQLITE_CANTOPEN );
+ rc = SQLITE_OK;
+ }
+ }
+#endif
+ return rc;
+}
+
+/*
+** Test the existence of or access permissions of file zPath. The
+** test performed depends on the value of flags:
+**
+** SQLITE_ACCESS_EXISTS: Return 1 if the file exists
+** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable.
+** SQLITE_ACCESS_READONLY: Return 1 if the file is readable.
+**
+** Otherwise return 0.
+*/
+static int unixAccess(
+ sqlite3_vfs *NotUsed, /* The VFS containing this xAccess method */
+ const char *zPath, /* Path of the file to examine */
+ int flags, /* What do we want to learn about the zPath file? */
+ int *pResOut /* Write result boolean here */
+){
+ UNUSED_PARAMETER(NotUsed);
+ SimulateIOError( return SQLITE_IOERR_ACCESS; );
+ assert( pResOut!=0 );
+
+ /* The spec says there are three possible values for flags. But only
+ ** two of them are actually used */
+ assert( flags==SQLITE_ACCESS_EXISTS || flags==SQLITE_ACCESS_READWRITE );
+
+ if( flags==SQLITE_ACCESS_EXISTS ){
+ struct stat buf;
+ *pResOut = 0==osStat(zPath, &buf) &&
+ (!S_ISREG(buf.st_mode) || buf.st_size>0);
+ }else{
+ *pResOut = osAccess(zPath, W_OK|R_OK)==0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+**
+*/
+static int mkFullPathname(
+ const char *zPath, /* Input path */
+ char *zOut, /* Output buffer */
+ int nOut /* Allocated size of buffer zOut */
+){
+ int nPath = sqlite3Strlen30(zPath);
+ int iOff = 0;
+ if( zPath[0]!='/' ){
+ if( osGetcwd(zOut, nOut-2)==0 ){
+ return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath);
+ }
+ iOff = sqlite3Strlen30(zOut);
+ zOut[iOff++] = '/';
+ }
+ if( (iOff+nPath+1)>nOut ){
+ /* SQLite assumes that xFullPathname() nul-terminates the output buffer
+ ** even if it returns an error. */
+ zOut[iOff] = '\0';
+ return SQLITE_CANTOPEN_BKPT;
+ }
+ sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath);
+ return SQLITE_OK;
+}
+
+/*
+** Turn a relative pathname into a full pathname. The relative path
+** is stored as a nul-terminated string in the buffer pointed to by
+** zPath.
+**
+** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes
+** (in this case, MAX_PATHNAME bytes). The full-path is written to
+** this buffer before returning.
+*/
+static int unixFullPathname(
+ sqlite3_vfs *pVfs, /* Pointer to vfs object */
+ const char *zPath, /* Possibly relative input path */
+ int nOut, /* Size of output buffer in bytes */
+ char *zOut /* Output buffer */
+){
+#if !defined(HAVE_READLINK) || !defined(HAVE_LSTAT)
+ return mkFullPathname(zPath, zOut, nOut);
+#else
+ int rc = SQLITE_OK;
+ int nByte;
+ int nLink = 0; /* Number of symbolic links followed so far */
+ const char *zIn = zPath; /* Input path for each iteration of loop */
+ char *zDel = 0;
+
+ assert( pVfs->mxPathname==MAX_PATHNAME );
+ UNUSED_PARAMETER(pVfs);
+
+ /* It's odd to simulate an io-error here, but really this is just
+ ** using the io-error infrastructure to test that SQLite handles this
+ ** function failing. This function could fail if, for example, the
+ ** current working directory has been unlinked.
+ */
+ SimulateIOError( return SQLITE_ERROR );
+
+ do {
+
+ /* Call stat() on path zIn. Set bLink to true if the path is a symbolic
+ ** link, or false otherwise. */
+ int bLink = 0;
+ struct stat buf;
+ if( osLstat(zIn, &buf)!=0 ){
+ if( errno!=ENOENT ){
+ rc = unixLogError(SQLITE_CANTOPEN_BKPT, "lstat", zIn);
+ }
+ }else{
+ bLink = S_ISLNK(buf.st_mode);
+ }
+
+ if( bLink ){
+ nLink++;
+ if( zDel==0 ){
+ zDel = sqlite3_malloc(nOut);
+ if( zDel==0 ) rc = SQLITE_NOMEM_BKPT;
+ }else if( nLink>=SQLITE_MAX_SYMLINKS ){
+ rc = SQLITE_CANTOPEN_BKPT;
+ }
+
+ if( rc==SQLITE_OK ){
+ nByte = osReadlink(zIn, zDel, nOut-1);
+ if( nByte<0 ){
+ rc = unixLogError(SQLITE_CANTOPEN_BKPT, "readlink", zIn);
+ }else{
+ if( zDel[0]!='/' ){
+ int n;
+ for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--);
+ if( nByte+n+1>nOut ){
+ rc = SQLITE_CANTOPEN_BKPT;
+ }else{
+ memmove(&zDel[n], zDel, nByte+1);
+ memcpy(zDel, zIn, n);
+ nByte += n;
+ }
+ }
+ zDel[nByte] = '\0';
+ }
+ }
+
+ zIn = zDel;
+ }
+
+ assert( rc!=SQLITE_OK || zIn!=zOut || zIn[0]=='/' );
+ if( rc==SQLITE_OK && zIn!=zOut ){
+ rc = mkFullPathname(zIn, zOut, nOut);
+ }
+ if( bLink==0 ) break;
+ zIn = zOut;
+ }while( rc==SQLITE_OK );
+
+ sqlite3_free(zDel);
+ if( rc==SQLITE_OK && nLink ) rc = SQLITE_OK_SYMLINK;
+ return rc;
+#endif /* HAVE_READLINK && HAVE_LSTAT */
+}
+
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+/*
+** Interfaces for opening a shared library, finding entry points
+** within the shared library, and closing the shared library.
+*/
+#include <dlfcn.h>
+static void *unixDlOpen(sqlite3_vfs *NotUsed, const char *zFilename){
+ UNUSED_PARAMETER(NotUsed);
+ return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL);
+}
+
+/*
+** SQLite calls this function immediately after a call to unixDlSym() or
+** unixDlOpen() fails (returns a null pointer). If a more detailed error
+** message is available, it is written to zBufOut. If no error message
+** is available, zBufOut is left unmodified and SQLite uses a default
+** error message.
+*/
+static void unixDlError(sqlite3_vfs *NotUsed, int nBuf, char *zBufOut){
+ const char *zErr;
+ UNUSED_PARAMETER(NotUsed);
+ unixEnterMutex();
+ zErr = dlerror();
+ if( zErr ){
+ sqlite3_snprintf(nBuf, zBufOut, "%s", zErr);
+ }
+ unixLeaveMutex();
+}
+static void (*unixDlSym(sqlite3_vfs *NotUsed, void *p, const char*zSym))(void){
+ /*
+ ** GCC with -pedantic-errors says that C90 does not allow a void* to be
+ ** cast into a pointer to a function. And yet the library dlsym() routine
+ ** returns a void* which is really a pointer to a function. So how do we
+ ** use dlsym() with -pedantic-errors?
+ **
+ ** Variable x below is defined to be a pointer to a function taking
+ ** parameters void* and const char* and returning a pointer to a function.
+ ** We initialize x by assigning it a pointer to the dlsym() function.
+ ** (That assignment requires a cast.) Then we call the function that
+ ** x points to.
+ **
+ ** This work-around is unlikely to work correctly on any system where
+ ** you really cannot cast a function pointer into void*. But then, on the
+ ** other hand, dlsym() will not work on such a system either, so we have
+ ** not really lost anything.
+ */
+ void (*(*x)(void*,const char*))(void);
+ UNUSED_PARAMETER(NotUsed);
+ x = (void(*(*)(void*,const char*))(void))dlsym;
+ return (*x)(p, zSym);
+}
+static void unixDlClose(sqlite3_vfs *NotUsed, void *pHandle){
+ UNUSED_PARAMETER(NotUsed);
+ dlclose(pHandle);
+}
+#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */
+ #define unixDlOpen 0
+ #define unixDlError 0
+ #define unixDlSym 0
+ #define unixDlClose 0
+#endif
+
+/*
+** Write nBuf bytes of random data to the supplied buffer zBuf.
+*/
+static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){
+ UNUSED_PARAMETER(NotUsed);
+ assert((size_t)nBuf>=(sizeof(time_t)+sizeof(int)));
+
+ /* We have to initialize zBuf to prevent valgrind from reporting
+ ** errors. The reports issued by valgrind are incorrect - we would
+ ** prefer that the randomness be increased by making use of the
+ ** uninitialized space in zBuf - but valgrind errors tend to worry
+ ** some users. Rather than argue, it seems easier just to initialize
+ ** the whole array and silence valgrind, even if that means less randomness
+ ** in the random seed.
+ **
+ ** When testing, initializing zBuf[] to zero is all we do. That means
+ ** that we always use the same random number sequence. This makes the
+ ** tests repeatable.
+ */
+ memset(zBuf, 0, nBuf);
+ randomnessPid = osGetpid(0);
+#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS)
+ {
+ int fd, got;
+ fd = robust_open("/dev/urandom", O_RDONLY, 0);
+ if( fd<0 ){
+ time_t t;
+ time(&t);
+ memcpy(zBuf, &t, sizeof(t));
+ memcpy(&zBuf[sizeof(t)], &randomnessPid, sizeof(randomnessPid));
+ assert( sizeof(t)+sizeof(randomnessPid)<=(size_t)nBuf );
+ nBuf = sizeof(t) + sizeof(randomnessPid);
+ }else{
+ do{ got = osRead(fd, zBuf, nBuf); }while( got<0 && errno==EINTR );
+ robust_close(0, fd, __LINE__);
+ }
+ }
+#endif
+ return nBuf;
+}
+
+
+/*
+** Sleep for a little while. Return the amount of time slept.
+** The argument is the number of microseconds we want to sleep.
+** The return value is the number of microseconds of sleep actually
+** requested from the underlying operating system, a number which
+** might be greater than or equal to the argument, but not less
+** than the argument.
+*/
+static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){
+#if OS_VXWORKS
+ struct timespec sp;
+
+ sp.tv_sec = microseconds / 1000000;
+ sp.tv_nsec = (microseconds % 1000000) * 1000;
+ nanosleep(&sp, NULL);
+ UNUSED_PARAMETER(NotUsed);
+ return microseconds;
+#elif defined(HAVE_USLEEP) && HAVE_USLEEP
+ usleep(microseconds);
+ UNUSED_PARAMETER(NotUsed);
+ return microseconds;
+#else
+ int seconds = (microseconds+999999)/1000000;
+ sleep(seconds);
+ UNUSED_PARAMETER(NotUsed);
+ return seconds*1000000;
+#endif
+}
+
+/*
+** The following variable, if set to a non-zero value, is interpreted as
+** the number of seconds since 1970 and is used to set the result of
+** sqlite3OsCurrentTime() during testing.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */
+#endif
+
+/*
+** Find the current time (in Universal Coordinated Time). Write into *piNow
+** the current time and date as a Julian Day number times 86_400_000. In
+** other words, write into *piNow the number of milliseconds since the Julian
+** epoch of noon in Greenwich on November 24, 4714 B.C according to the
+** proleptic Gregorian calendar.
+**
+** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date
+** cannot be found.
+*/
+static int unixCurrentTimeInt64(sqlite3_vfs *NotUsed, sqlite3_int64 *piNow){
+ static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000;
+ int rc = SQLITE_OK;
+#if defined(NO_GETTOD)
+ time_t t;
+ time(&t);
+ *piNow = ((sqlite3_int64)t)*1000 + unixEpoch;
+#elif OS_VXWORKS
+ struct timespec sNow;
+ clock_gettime(CLOCK_REALTIME, &sNow);
+ *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_nsec/1000000;
+#else
+ struct timeval sNow;
+ (void)gettimeofday(&sNow, 0); /* Cannot fail given valid arguments */
+ *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000;
+#endif
+
+#ifdef SQLITE_TEST
+ if( sqlite3_current_time ){
+ *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch;
+ }
+#endif
+ UNUSED_PARAMETER(NotUsed);
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Find the current time (in Universal Coordinated Time). Write the
+** current time and date as a Julian Day number into *prNow and
+** return 0. Return 1 if the time and date cannot be found.
+*/
+static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){
+ sqlite3_int64 i = 0;
+ int rc;
+ UNUSED_PARAMETER(NotUsed);
+ rc = unixCurrentTimeInt64(0, &i);
+ *prNow = i/86400000.0;
+ return rc;
+}
+#else
+# define unixCurrentTime 0
+#endif
+
+/*
+** The xGetLastError() method is designed to return a better
+** low-level error message when operating-system problems come up
+** during SQLite operation. Only the integer return code is currently
+** used.
+*/
+static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){
+ UNUSED_PARAMETER(NotUsed);
+ UNUSED_PARAMETER(NotUsed2);
+ UNUSED_PARAMETER(NotUsed3);
+ return errno;
+}
+
+
+/*
+************************ End of sqlite3_vfs methods ***************************
+******************************************************************************/
+
+/******************************************************************************
+************************** Begin Proxy Locking ********************************
+**
+** Proxy locking is a "uber-locking-method" in this sense: It uses the
+** other locking methods on secondary lock files. Proxy locking is a
+** meta-layer over top of the primitive locking implemented above. For
+** this reason, the division that implements of proxy locking is deferred
+** until late in the file (here) after all of the other I/O methods have
+** been defined - so that the primitive locking methods are available
+** as services to help with the implementation of proxy locking.
+**
+****
+**
+** The default locking schemes in SQLite use byte-range locks on the
+** database file to coordinate safe, concurrent access by multiple readers
+** and writers [http://sqlite.org/lockingv3.html]. The five file locking
+** states (UNLOCKED, PENDING, SHARED, RESERVED, EXCLUSIVE) are implemented
+** as POSIX read & write locks over fixed set of locations (via fsctl),
+** on AFP and SMB only exclusive byte-range locks are available via fsctl
+** with _IOWR('z', 23, struct ByteRangeLockPB2) to track the same 5 states.
+** To simulate a F_RDLCK on the shared range, on AFP a randomly selected
+** address in the shared range is taken for a SHARED lock, the entire
+** shared range is taken for an EXCLUSIVE lock):
+**
+** PENDING_BYTE 0x40000000
+** RESERVED_BYTE 0x40000001
+** SHARED_RANGE 0x40000002 -> 0x40000200
+**
+** This works well on the local file system, but shows a nearly 100x
+** slowdown in read performance on AFP because the AFP client disables
+** the read cache when byte-range locks are present. Enabling the read
+** cache exposes a cache coherency problem that is present on all OS X
+** supported network file systems. NFS and AFP both observe the
+** close-to-open semantics for ensuring cache coherency
+** [http://nfs.sourceforge.net/#faq_a8], which does not effectively
+** address the requirements for concurrent database access by multiple
+** readers and writers
+** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html].
+**
+** To address the performance and cache coherency issues, proxy file locking
+** changes the way database access is controlled by limiting access to a
+** single host at a time and moving file locks off of the database file
+** and onto a proxy file on the local file system.
+**
+**
+** Using proxy locks
+** -----------------
+**
+** C APIs
+**
+** sqlite3_file_control(db, dbname, SQLITE_FCNTL_SET_LOCKPROXYFILE,
+** <proxy_path> | ":auto:");
+** sqlite3_file_control(db, dbname, SQLITE_FCNTL_GET_LOCKPROXYFILE,
+** &<proxy_path>);
+**
+**
+** SQL pragmas
+**
+** PRAGMA [database.]lock_proxy_file=<proxy_path> | :auto:
+** PRAGMA [database.]lock_proxy_file
+**
+** Specifying ":auto:" means that if there is a conch file with a matching
+** host ID in it, the proxy path in the conch file will be used, otherwise
+** a proxy path based on the user's temp dir
+** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the
+** actual proxy file name is generated from the name and path of the
+** database file. For example:
+**
+** For database path "/Users/me/foo.db"
+** The lock path will be "<tmpdir>/sqliteplocks/_Users_me_foo.db:auto:")
+**
+** Once a lock proxy is configured for a database connection, it can not
+** be removed, however it may be switched to a different proxy path via
+** the above APIs (assuming the conch file is not being held by another
+** connection or process).
+**
+**
+** How proxy locking works
+** -----------------------
+**
+** Proxy file locking relies primarily on two new supporting files:
+**
+** * conch file to limit access to the database file to a single host
+** at a time
+**
+** * proxy file to act as a proxy for the advisory locks normally
+** taken on the database
+**
+** The conch file - to use a proxy file, sqlite must first "hold the conch"
+** by taking an sqlite-style shared lock on the conch file, reading the
+** contents and comparing the host's unique host ID (see below) and lock
+** proxy path against the values stored in the conch. The conch file is
+** stored in the same directory as the database file and the file name
+** is patterned after the database file name as ".<databasename>-conch".
+** If the conch file does not exist, or its contents do not match the
+** host ID and/or proxy path, then the lock is escalated to an exclusive
+** lock and the conch file contents is updated with the host ID and proxy
+** path and the lock is downgraded to a shared lock again. If the conch
+** is held by another process (with a shared lock), the exclusive lock
+** will fail and SQLITE_BUSY is returned.
+**
+** The proxy file - a single-byte file used for all advisory file locks
+** normally taken on the database file. This allows for safe sharing
+** of the database file for multiple readers and writers on the same
+** host (the conch ensures that they all use the same local lock file).
+**
+** Requesting the lock proxy does not immediately take the conch, it is
+** only taken when the first request to lock database file is made.
+** This matches the semantics of the traditional locking behavior, where
+** opening a connection to a database file does not take a lock on it.
+** The shared lock and an open file descriptor are maintained until
+** the connection to the database is closed.
+**
+** The proxy file and the lock file are never deleted so they only need
+** to be created the first time they are used.
+**
+** Configuration options
+** ---------------------
+**
+** SQLITE_PREFER_PROXY_LOCKING
+**
+** Database files accessed on non-local file systems are
+** automatically configured for proxy locking, lock files are
+** named automatically using the same logic as
+** PRAGMA lock_proxy_file=":auto:"
+**
+** SQLITE_PROXY_DEBUG
+**
+** Enables the logging of error messages during host id file
+** retrieval and creation
+**
+** LOCKPROXYDIR
+**
+** Overrides the default directory used for lock proxy files that
+** are named automatically via the ":auto:" setting
+**
+** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS
+**
+** Permissions to use when creating a directory for storing the
+** lock proxy files, only used when LOCKPROXYDIR is not set.
+**
+**
+** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING,
+** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will
+** force proxy locking to be used for every database file opened, and 0
+** will force automatic proxy locking to be disabled for all database
+** files (explicitly calling the SQLITE_FCNTL_SET_LOCKPROXYFILE pragma or
+** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING).
+*/
+
+/*
+** Proxy locking is only available on MacOSX
+*/
+#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE
+
+/*
+** The proxyLockingContext has the path and file structures for the remote
+** and local proxy files in it
+*/
+typedef struct proxyLockingContext proxyLockingContext;
+struct proxyLockingContext {
+ unixFile *conchFile; /* Open conch file */
+ char *conchFilePath; /* Name of the conch file */
+ unixFile *lockProxy; /* Open proxy lock file */
+ char *lockProxyPath; /* Name of the proxy lock file */
+ char *dbPath; /* Name of the open file */
+ int conchHeld; /* 1 if the conch is held, -1 if lockless */
+ int nFails; /* Number of conch taking failures */
+ void *oldLockingContext; /* Original lockingcontext to restore on close */
+ sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */
+};
+
+/*
+** The proxy lock file path for the database at dbPath is written into lPath,
+** which must point to valid, writable memory large enough for a maxLen length
+** file path.
+*/
+static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){
+ int len;
+ int dbLen;
+ int i;
+
+#ifdef LOCKPROXYDIR
+ len = strlcpy(lPath, LOCKPROXYDIR, maxLen);
+#else
+# ifdef _CS_DARWIN_USER_TEMP_DIR
+ {
+ if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){
+ OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n",
+ lPath, errno, osGetpid(0)));
+ return SQLITE_IOERR_LOCK;
+ }
+ len = strlcat(lPath, "sqliteplocks", maxLen);
+ }
+# else
+ len = strlcpy(lPath, "/tmp/", maxLen);
+# endif
+#endif
+
+ if( lPath[len-1]!='/' ){
+ len = strlcat(lPath, "/", maxLen);
+ }
+
+ /* transform the db path to a unique cache name */
+ dbLen = (int)strlen(dbPath);
+ for( i=0; i<dbLen && (i+len+7)<(int)maxLen; i++){
+ char c = dbPath[i];
+ lPath[i+len] = (c=='/')?'_':c;
+ }
+ lPath[i+len]='\0';
+ strlcat(lPath, ":auto:", maxLen);
+ OSTRACE(("GETLOCKPATH proxy lock path=%s pid=%d\n", lPath, osGetpid(0)));
+ return SQLITE_OK;
+}
+
+/*
+ ** Creates the lock file and any missing directories in lockPath
+ */
+static int proxyCreateLockPath(const char *lockPath){
+ int i, len;
+ char buf[MAXPATHLEN];
+ int start = 0;
+
+ assert(lockPath!=NULL);
+ /* try to create all the intermediate directories */
+ len = (int)strlen(lockPath);
+ buf[0] = lockPath[0];
+ for( i=1; i<len; i++ ){
+ if( lockPath[i] == '/' && (i - start > 0) ){
+ /* only mkdir if leaf dir != "." or "/" or ".." */
+ if( i-start>2 || (i-start==1 && buf[start] != '.' && buf[start] != '/')
+ || (i-start==2 && buf[start] != '.' && buf[start+1] != '.') ){
+ buf[i]='\0';
+ if( osMkdir(buf, SQLITE_DEFAULT_PROXYDIR_PERMISSIONS) ){
+ int err=errno;
+ if( err!=EEXIST ) {
+ OSTRACE(("CREATELOCKPATH FAILED creating %s, "
+ "'%s' proxy lock path=%s pid=%d\n",
+ buf, strerror(err), lockPath, osGetpid(0)));
+ return err;
+ }
+ }
+ }
+ start=i+1;
+ }
+ buf[i] = lockPath[i];
+ }
+ OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n",lockPath,osGetpid(0)));
+ return 0;
+}
+
+/*
+** Create a new VFS file descriptor (stored in memory obtained from
+** sqlite3_malloc) and open the file named "path" in the file descriptor.
+**
+** The caller is responsible not only for closing the file descriptor
+** but also for freeing the memory associated with the file descriptor.
+*/
+static int proxyCreateUnixFile(
+ const char *path, /* path for the new unixFile */
+ unixFile **ppFile, /* unixFile created and returned by ref */
+ int islockfile /* if non zero missing dirs will be created */
+) {
+ int fd = -1;
+ unixFile *pNew;
+ int rc = SQLITE_OK;
+ int openFlags = O_RDWR | O_CREAT | O_NOFOLLOW;
+ sqlite3_vfs dummyVfs;
+ int terrno = 0;
+ UnixUnusedFd *pUnused = NULL;
+
+ /* 1. first try to open/create the file
+ ** 2. if that fails, and this is a lock file (not-conch), try creating
+ ** the parent directories and then try again.
+ ** 3. if that fails, try to open the file read-only
+ ** otherwise return BUSY (if lock file) or CANTOPEN for the conch file
+ */
+ pUnused = findReusableFd(path, openFlags);
+ if( pUnused ){
+ fd = pUnused->fd;
+ }else{
+ pUnused = sqlite3_malloc64(sizeof(*pUnused));
+ if( !pUnused ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ }
+ if( fd<0 ){
+ fd = robust_open(path, openFlags, 0);
+ terrno = errno;
+ if( fd<0 && errno==ENOENT && islockfile ){
+ if( proxyCreateLockPath(path) == SQLITE_OK ){
+ fd = robust_open(path, openFlags, 0);
+ }
+ }
+ }
+ if( fd<0 ){
+ openFlags = O_RDONLY | O_NOFOLLOW;
+ fd = robust_open(path, openFlags, 0);
+ terrno = errno;
+ }
+ if( fd<0 ){
+ if( islockfile ){
+ return SQLITE_BUSY;
+ }
+ switch (terrno) {
+ case EACCES:
+ return SQLITE_PERM;
+ case EIO:
+ return SQLITE_IOERR_LOCK; /* even though it is the conch */
+ default:
+ return SQLITE_CANTOPEN_BKPT;
+ }
+ }
+
+ pNew = (unixFile *)sqlite3_malloc64(sizeof(*pNew));
+ if( pNew==NULL ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto end_create_proxy;
+ }
+ memset(pNew, 0, sizeof(unixFile));
+ pNew->openFlags = openFlags;
+ memset(&dummyVfs, 0, sizeof(dummyVfs));
+ dummyVfs.pAppData = (void*)&autolockIoFinder;
+ dummyVfs.zName = "dummy";
+ pUnused->fd = fd;
+ pUnused->flags = openFlags;
+ pNew->pPreallocatedUnused = pUnused;
+
+ rc = fillInUnixFile(&dummyVfs, fd, (sqlite3_file*)pNew, path, 0);
+ if( rc==SQLITE_OK ){
+ *ppFile = pNew;
+ return SQLITE_OK;
+ }
+end_create_proxy:
+ robust_close(pNew, fd, __LINE__);
+ sqlite3_free(pNew);
+ sqlite3_free(pUnused);
+ return rc;
+}
+
+#ifdef SQLITE_TEST
+/* simulate multiple hosts by creating unique hostid file paths */
+SQLITE_API int sqlite3_hostid_num = 0;
+#endif
+
+#define PROXY_HOSTIDLEN 16 /* conch file host id length */
+
+#if HAVE_GETHOSTUUID
+/* Not always defined in the headers as it ought to be */
+extern int gethostuuid(uuid_t id, const struct timespec *wait);
+#endif
+
+/* get the host ID via gethostuuid(), pHostID must point to PROXY_HOSTIDLEN
+** bytes of writable memory.
+*/
+static int proxyGetHostID(unsigned char *pHostID, int *pError){
+ assert(PROXY_HOSTIDLEN == sizeof(uuid_t));
+ memset(pHostID, 0, PROXY_HOSTIDLEN);
+#if HAVE_GETHOSTUUID
+ {
+ struct timespec timeout = {1, 0}; /* 1 sec timeout */
+ if( gethostuuid(pHostID, &timeout) ){
+ int err = errno;
+ if( pError ){
+ *pError = err;
+ }
+ return SQLITE_IOERR;
+ }
+ }
+#else
+ UNUSED_PARAMETER(pError);
+#endif
+#ifdef SQLITE_TEST
+ /* simulate multiple hosts by creating unique hostid file paths */
+ if( sqlite3_hostid_num != 0){
+ pHostID[0] = (char)(pHostID[0] + (char)(sqlite3_hostid_num & 0xFF));
+ }
+#endif
+
+ return SQLITE_OK;
+}
+
+/* The conch file contains the header, host id and lock file path
+ */
+#define PROXY_CONCHVERSION 2 /* 1-byte header, 16-byte host id, path */
+#define PROXY_HEADERLEN 1 /* conch file header length */
+#define PROXY_PATHINDEX (PROXY_HEADERLEN+PROXY_HOSTIDLEN)
+#define PROXY_MAXCONCHLEN (PROXY_HEADERLEN+PROXY_HOSTIDLEN+MAXPATHLEN)
+
+/*
+** Takes an open conch file, copies the contents to a new path and then moves
+** it back. The newly created file's file descriptor is assigned to the
+** conch file structure and finally the original conch file descriptor is
+** closed. Returns zero if successful.
+*/
+static int proxyBreakConchLock(unixFile *pFile, uuid_t myHostID){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ unixFile *conchFile = pCtx->conchFile;
+ char tPath[MAXPATHLEN];
+ char buf[PROXY_MAXCONCHLEN];
+ char *cPath = pCtx->conchFilePath;
+ size_t readLen = 0;
+ size_t pathLen = 0;
+ char errmsg[64] = "";
+ int fd = -1;
+ int rc = -1;
+ UNUSED_PARAMETER(myHostID);
+
+ /* create a new path by replace the trailing '-conch' with '-break' */
+ pathLen = strlcpy(tPath, cPath, MAXPATHLEN);
+ if( pathLen>MAXPATHLEN || pathLen<6 ||
+ (strlcpy(&tPath[pathLen-5], "break", 6) != 5) ){
+ sqlite3_snprintf(sizeof(errmsg),errmsg,"path error (len %d)",(int)pathLen);
+ goto end_breaklock;
+ }
+ /* read the conch content */
+ readLen = osPread(conchFile->h, buf, PROXY_MAXCONCHLEN, 0);
+ if( readLen<PROXY_PATHINDEX ){
+ sqlite3_snprintf(sizeof(errmsg),errmsg,"read error (len %d)",(int)readLen);
+ goto end_breaklock;
+ }
+ /* write it out to the temporary break file */
+ fd = robust_open(tPath, (O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW), 0);
+ if( fd<0 ){
+ sqlite3_snprintf(sizeof(errmsg), errmsg, "create failed (%d)", errno);
+ goto end_breaklock;
+ }
+ if( osPwrite(fd, buf, readLen, 0) != (ssize_t)readLen ){
+ sqlite3_snprintf(sizeof(errmsg), errmsg, "write failed (%d)", errno);
+ goto end_breaklock;
+ }
+ if( rename(tPath, cPath) ){
+ sqlite3_snprintf(sizeof(errmsg), errmsg, "rename failed (%d)", errno);
+ goto end_breaklock;
+ }
+ rc = 0;
+ fprintf(stderr, "broke stale lock on %s\n", cPath);
+ robust_close(pFile, conchFile->h, __LINE__);
+ conchFile->h = fd;
+ conchFile->openFlags = O_RDWR | O_CREAT;
+
+end_breaklock:
+ if( rc ){
+ if( fd>=0 ){
+ osUnlink(tPath);
+ robust_close(pFile, fd, __LINE__);
+ }
+ fprintf(stderr, "failed to break stale lock on %s, %s\n", cPath, errmsg);
+ }
+ return rc;
+}
+
+/* Take the requested lock on the conch file and break a stale lock if the
+** host id matches.
+*/
+static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ unixFile *conchFile = pCtx->conchFile;
+ int rc = SQLITE_OK;
+ int nTries = 0;
+ struct timespec conchModTime;
+
+ memset(&conchModTime, 0, sizeof(conchModTime));
+ do {
+ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType);
+ nTries ++;
+ if( rc==SQLITE_BUSY ){
+ /* If the lock failed (busy):
+ * 1st try: get the mod time of the conch, wait 0.5s and try again.
+ * 2nd try: fail if the mod time changed or host id is different, wait
+ * 10 sec and try again
+ * 3rd try: break the lock unless the mod time has changed.
+ */
+ struct stat buf;
+ if( osFstat(conchFile->h, &buf) ){
+ storeLastErrno(pFile, errno);
+ return SQLITE_IOERR_LOCK;
+ }
+
+ if( nTries==1 ){
+ conchModTime = buf.st_mtimespec;
+ usleep(500000); /* wait 0.5 sec and try the lock again*/
+ continue;
+ }
+
+ assert( nTries>1 );
+ if( conchModTime.tv_sec != buf.st_mtimespec.tv_sec ||
+ conchModTime.tv_nsec != buf.st_mtimespec.tv_nsec ){
+ return SQLITE_BUSY;
+ }
+
+ if( nTries==2 ){
+ char tBuf[PROXY_MAXCONCHLEN];
+ int len = osPread(conchFile->h, tBuf, PROXY_MAXCONCHLEN, 0);
+ if( len<0 ){
+ storeLastErrno(pFile, errno);
+ return SQLITE_IOERR_LOCK;
+ }
+ if( len>PROXY_PATHINDEX && tBuf[0]==(char)PROXY_CONCHVERSION){
+ /* don't break the lock if the host id doesn't match */
+ if( 0!=memcmp(&tBuf[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN) ){
+ return SQLITE_BUSY;
+ }
+ }else{
+ /* don't break the lock on short read or a version mismatch */
+ return SQLITE_BUSY;
+ }
+ usleep(10000000); /* wait 10 sec and try the lock again */
+ continue;
+ }
+
+ assert( nTries==3 );
+ if( 0==proxyBreakConchLock(pFile, myHostID) ){
+ rc = SQLITE_OK;
+ if( lockType==EXCLUSIVE_LOCK ){
+ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK);
+ }
+ if( !rc ){
+ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType);
+ }
+ }
+ }
+ } while( rc==SQLITE_BUSY && nTries<3 );
+
+ return rc;
+}
+
+/* Takes the conch by taking a shared lock and read the contents conch, if
+** lockPath is non-NULL, the host ID and lock file path must match. A NULL
+** lockPath means that the lockPath in the conch file will be used if the
+** host IDs match, or a new lock path will be generated automatically
+** and written to the conch file.
+*/
+static int proxyTakeConch(unixFile *pFile){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+
+ if( pCtx->conchHeld!=0 ){
+ return SQLITE_OK;
+ }else{
+ unixFile *conchFile = pCtx->conchFile;
+ uuid_t myHostID;
+ int pError = 0;
+ char readBuf[PROXY_MAXCONCHLEN];
+ char lockPath[MAXPATHLEN];
+ char *tempLockPath = NULL;
+ int rc = SQLITE_OK;
+ int createConch = 0;
+ int hostIdMatch = 0;
+ int readLen = 0;
+ int tryOldLockPath = 0;
+ int forceNewLockPath = 0;
+
+ OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h,
+ (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"),
+ osGetpid(0)));
+
+ rc = proxyGetHostID(myHostID, &pError);
+ if( (rc&0xff)==SQLITE_IOERR ){
+ storeLastErrno(pFile, pError);
+ goto end_takeconch;
+ }
+ rc = proxyConchLock(pFile, myHostID, SHARED_LOCK);
+ if( rc!=SQLITE_OK ){
+ goto end_takeconch;
+ }
+ /* read the existing conch file */
+ readLen = seekAndRead((unixFile*)conchFile, 0, readBuf, PROXY_MAXCONCHLEN);
+ if( readLen<0 ){
+ /* I/O error: lastErrno set by seekAndRead */
+ storeLastErrno(pFile, conchFile->lastErrno);
+ rc = SQLITE_IOERR_READ;
+ goto end_takeconch;
+ }else if( readLen<=(PROXY_HEADERLEN+PROXY_HOSTIDLEN) ||
+ readBuf[0]!=(char)PROXY_CONCHVERSION ){
+ /* a short read or version format mismatch means we need to create a new
+ ** conch file.
+ */
+ createConch = 1;
+ }
+ /* if the host id matches and the lock path already exists in the conch
+ ** we'll try to use the path there, if we can't open that path, we'll
+ ** retry with a new auto-generated path
+ */
+ do { /* in case we need to try again for an :auto: named lock file */
+
+ if( !createConch && !forceNewLockPath ){
+ hostIdMatch = !memcmp(&readBuf[PROXY_HEADERLEN], myHostID,
+ PROXY_HOSTIDLEN);
+ /* if the conch has data compare the contents */
+ if( !pCtx->lockProxyPath ){
+ /* for auto-named local lock file, just check the host ID and we'll
+ ** use the local lock file path that's already in there
+ */
+ if( hostIdMatch ){
+ size_t pathLen = (readLen - PROXY_PATHINDEX);
+
+ if( pathLen>=MAXPATHLEN ){
+ pathLen=MAXPATHLEN-1;
+ }
+ memcpy(lockPath, &readBuf[PROXY_PATHINDEX], pathLen);
+ lockPath[pathLen] = 0;
+ tempLockPath = lockPath;
+ tryOldLockPath = 1;
+ /* create a copy of the lock path if the conch is taken */
+ goto end_takeconch;
+ }
+ }else if( hostIdMatch
+ && !strncmp(pCtx->lockProxyPath, &readBuf[PROXY_PATHINDEX],
+ readLen-PROXY_PATHINDEX)
+ ){
+ /* conch host and lock path match */
+ goto end_takeconch;
+ }
+ }
+
+ /* if the conch isn't writable and doesn't match, we can't take it */
+ if( (conchFile->openFlags&O_RDWR) == 0 ){
+ rc = SQLITE_BUSY;
+ goto end_takeconch;
+ }
+
+ /* either the conch didn't match or we need to create a new one */
+ if( !pCtx->lockProxyPath ){
+ proxyGetLockPath(pCtx->dbPath, lockPath, MAXPATHLEN);
+ tempLockPath = lockPath;
+ /* create a copy of the lock path _only_ if the conch is taken */
+ }
+
+ /* update conch with host and path (this will fail if other process
+ ** has a shared lock already), if the host id matches, use the big
+ ** stick.
+ */
+ futimes(conchFile->h, NULL);
+ if( hostIdMatch && !createConch ){
+ if( conchFile->pInode && conchFile->pInode->nShared>1 ){
+ /* We are trying for an exclusive lock but another thread in this
+ ** same process is still holding a shared lock. */
+ rc = SQLITE_BUSY;
+ } else {
+ rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK);
+ }
+ }else{
+ rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK);
+ }
+ if( rc==SQLITE_OK ){
+ char writeBuffer[PROXY_MAXCONCHLEN];
+ int writeSize = 0;
+
+ writeBuffer[0] = (char)PROXY_CONCHVERSION;
+ memcpy(&writeBuffer[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN);
+ if( pCtx->lockProxyPath!=NULL ){
+ strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath,
+ MAXPATHLEN);
+ }else{
+ strlcpy(&writeBuffer[PROXY_PATHINDEX], tempLockPath, MAXPATHLEN);
+ }
+ writeSize = PROXY_PATHINDEX + strlen(&writeBuffer[PROXY_PATHINDEX]);
+ robust_ftruncate(conchFile->h, writeSize);
+ rc = unixWrite((sqlite3_file *)conchFile, writeBuffer, writeSize, 0);
+ full_fsync(conchFile->h,0,0);
+ /* If we created a new conch file (not just updated the contents of a
+ ** valid conch file), try to match the permissions of the database
+ */
+ if( rc==SQLITE_OK && createConch ){
+ struct stat buf;
+ int err = osFstat(pFile->h, &buf);
+ if( err==0 ){
+ mode_t cmode = buf.st_mode&(S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP |
+ S_IROTH|S_IWOTH);
+ /* try to match the database file R/W permissions, ignore failure */
+#ifndef SQLITE_PROXY_DEBUG
+ osFchmod(conchFile->h, cmode);
+#else
+ do{
+ rc = osFchmod(conchFile->h, cmode);
+ }while( rc==(-1) && errno==EINTR );
+ if( rc!=0 ){
+ int code = errno;
+ fprintf(stderr, "fchmod %o FAILED with %d %s\n",
+ cmode, code, strerror(code));
+ } else {
+ fprintf(stderr, "fchmod %o SUCCEDED\n",cmode);
+ }
+ }else{
+ int code = errno;
+ fprintf(stderr, "STAT FAILED[%d] with %d %s\n",
+ err, code, strerror(code));
+#endif
+ }
+ }
+ }
+ conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK);
+
+ end_takeconch:
+ OSTRACE(("TRANSPROXY: CLOSE %d\n", pFile->h));
+ if( rc==SQLITE_OK && pFile->openFlags ){
+ int fd;
+ if( pFile->h>=0 ){
+ robust_close(pFile, pFile->h, __LINE__);
+ }
+ pFile->h = -1;
+ fd = robust_open(pCtx->dbPath, pFile->openFlags, 0);
+ OSTRACE(("TRANSPROXY: OPEN %d\n", fd));
+ if( fd>=0 ){
+ pFile->h = fd;
+ }else{
+ rc=SQLITE_CANTOPEN_BKPT; /* SQLITE_BUSY? proxyTakeConch called
+ during locking */
+ }
+ }
+ if( rc==SQLITE_OK && !pCtx->lockProxy ){
+ char *path = tempLockPath ? tempLockPath : pCtx->lockProxyPath;
+ rc = proxyCreateUnixFile(path, &pCtx->lockProxy, 1);
+ if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM && tryOldLockPath ){
+ /* we couldn't create the proxy lock file with the old lock file path
+ ** so try again via auto-naming
+ */
+ forceNewLockPath = 1;
+ tryOldLockPath = 0;
+ continue; /* go back to the do {} while start point, try again */
+ }
+ }
+ if( rc==SQLITE_OK ){
+ /* Need to make a copy of path if we extracted the value
+ ** from the conch file or the path was allocated on the stack
+ */
+ if( tempLockPath ){
+ pCtx->lockProxyPath = sqlite3DbStrDup(0, tempLockPath);
+ if( !pCtx->lockProxyPath ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ pCtx->conchHeld = 1;
+
+ if( pCtx->lockProxy->pMethod == &afpIoMethods ){
+ afpLockingContext *afpCtx;
+ afpCtx = (afpLockingContext *)pCtx->lockProxy->lockingContext;
+ afpCtx->dbPath = pCtx->lockProxyPath;
+ }
+ } else {
+ conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK);
+ }
+ OSTRACE(("TAKECONCH %d %s\n", conchFile->h,
+ rc==SQLITE_OK?"ok":"failed"));
+ return rc;
+ } while (1); /* in case we need to retry the :auto: lock file -
+ ** we should never get here except via the 'continue' call. */
+ }
+}
+
+/*
+** If pFile holds a lock on a conch file, then release that lock.
+*/
+static int proxyReleaseConch(unixFile *pFile){
+ int rc = SQLITE_OK; /* Subroutine return code */
+ proxyLockingContext *pCtx; /* The locking context for the proxy lock */
+ unixFile *conchFile; /* Name of the conch file */
+
+ pCtx = (proxyLockingContext *)pFile->lockingContext;
+ conchFile = pCtx->conchFile;
+ OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h,
+ (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"),
+ osGetpid(0)));
+ if( pCtx->conchHeld>0 ){
+ rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK);
+ }
+ pCtx->conchHeld = 0;
+ OSTRACE(("RELEASECONCH %d %s\n", conchFile->h,
+ (rc==SQLITE_OK ? "ok" : "failed")));
+ return rc;
+}
+
+/*
+** Given the name of a database file, compute the name of its conch file.
+** Store the conch filename in memory obtained from sqlite3_malloc64().
+** Make *pConchPath point to the new name. Return SQLITE_OK on success
+** or SQLITE_NOMEM if unable to obtain memory.
+**
+** The caller is responsible for ensuring that the allocated memory
+** space is eventually freed.
+**
+** *pConchPath is set to NULL if a memory allocation error occurs.
+*/
+static int proxyCreateConchPathname(char *dbPath, char **pConchPath){
+ int i; /* Loop counter */
+ int len = (int)strlen(dbPath); /* Length of database filename - dbPath */
+ char *conchPath; /* buffer in which to construct conch name */
+
+ /* Allocate space for the conch filename and initialize the name to
+ ** the name of the original database file. */
+ *pConchPath = conchPath = (char *)sqlite3_malloc64(len + 8);
+ if( conchPath==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memcpy(conchPath, dbPath, len+1);
+
+ /* now insert a "." before the last / character */
+ for( i=(len-1); i>=0; i-- ){
+ if( conchPath[i]=='/' ){
+ i++;
+ break;
+ }
+ }
+ conchPath[i]='.';
+ while ( i<len ){
+ conchPath[i+1]=dbPath[i];
+ i++;
+ }
+
+ /* append the "-conch" suffix to the file */
+ memcpy(&conchPath[i+1], "-conch", 7);
+ assert( (int)strlen(conchPath) == len+7 );
+
+ return SQLITE_OK;
+}
+
+
+/* Takes a fully configured proxy locking-style unix file and switches
+** the local lock file path
+*/
+static int switchLockProxyPath(unixFile *pFile, const char *path) {
+ proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext;
+ char *oldPath = pCtx->lockProxyPath;
+ int rc = SQLITE_OK;
+
+ if( pFile->eFileLock!=NO_LOCK ){
+ return SQLITE_BUSY;
+ }
+
+ /* nothing to do if the path is NULL, :auto: or matches the existing path */
+ if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ||
+ (oldPath && !strncmp(oldPath, path, MAXPATHLEN)) ){
+ return SQLITE_OK;
+ }else{
+ unixFile *lockProxy = pCtx->lockProxy;
+ pCtx->lockProxy=NULL;
+ pCtx->conchHeld = 0;
+ if( lockProxy!=NULL ){
+ rc=lockProxy->pMethod->xClose((sqlite3_file *)lockProxy);
+ if( rc ) return rc;
+ sqlite3_free(lockProxy);
+ }
+ sqlite3_free(oldPath);
+ pCtx->lockProxyPath = sqlite3DbStrDup(0, path);
+ }
+
+ return rc;
+}
+
+/*
+** pFile is a file that has been opened by a prior xOpen call. dbPath
+** is a string buffer at least MAXPATHLEN+1 characters in size.
+**
+** This routine find the filename associated with pFile and writes it
+** int dbPath.
+*/
+static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){
+#if defined(__APPLE__)
+ if( pFile->pMethod == &afpIoMethods ){
+ /* afp style keeps a reference to the db path in the filePath field
+ ** of the struct */
+ assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN );
+ strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath,
+ MAXPATHLEN);
+ } else
+#endif
+ if( pFile->pMethod == &dotlockIoMethods ){
+ /* dot lock style uses the locking context to store the dot lock
+ ** file path */
+ int len = strlen((char *)pFile->lockingContext) - strlen(DOTLOCK_SUFFIX);
+ memcpy(dbPath, (char *)pFile->lockingContext, len + 1);
+ }else{
+ /* all other styles use the locking context to store the db file path */
+ assert( strlen((char*)pFile->lockingContext)<=MAXPATHLEN );
+ strlcpy(dbPath, (char *)pFile->lockingContext, MAXPATHLEN);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Takes an already filled in unix file and alters it so all file locking
+** will be performed on the local proxy lock file. The following fields
+** are preserved in the locking context so that they can be restored and
+** the unix structure properly cleaned up at close time:
+** ->lockingContext
+** ->pMethod
+*/
+static int proxyTransformUnixFile(unixFile *pFile, const char *path) {
+ proxyLockingContext *pCtx;
+ char dbPath[MAXPATHLEN+1]; /* Name of the database file */
+ char *lockPath=NULL;
+ int rc = SQLITE_OK;
+
+ if( pFile->eFileLock!=NO_LOCK ){
+ return SQLITE_BUSY;
+ }
+ proxyGetDbPathForUnixFile(pFile, dbPath);
+ if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ){
+ lockPath=NULL;
+ }else{
+ lockPath=(char *)path;
+ }
+
+ OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h,
+ (lockPath ? lockPath : ":auto:"), osGetpid(0)));
+
+ pCtx = sqlite3_malloc64( sizeof(*pCtx) );
+ if( pCtx==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset(pCtx, 0, sizeof(*pCtx));
+
+ rc = proxyCreateConchPathname(dbPath, &pCtx->conchFilePath);
+ if( rc==SQLITE_OK ){
+ rc = proxyCreateUnixFile(pCtx->conchFilePath, &pCtx->conchFile, 0);
+ if( rc==SQLITE_CANTOPEN && ((pFile->openFlags&O_RDWR) == 0) ){
+ /* if (a) the open flags are not O_RDWR, (b) the conch isn't there, and
+ ** (c) the file system is read-only, then enable no-locking access.
+ ** Ugh, since O_RDONLY==0x0000 we test for !O_RDWR since unixOpen asserts
+ ** that openFlags will have only one of O_RDONLY or O_RDWR.
+ */
+ struct statfs fsInfo;
+ struct stat conchInfo;
+ int goLockless = 0;
+
+ if( osStat(pCtx->conchFilePath, &conchInfo) == -1 ) {
+ int err = errno;
+ if( (err==ENOENT) && (statfs(dbPath, &fsInfo) != -1) ){
+ goLockless = (fsInfo.f_flags&MNT_RDONLY) == MNT_RDONLY;
+ }
+ }
+ if( goLockless ){
+ pCtx->conchHeld = -1; /* read only FS/ lockless */
+ rc = SQLITE_OK;
+ }
+ }
+ }
+ if( rc==SQLITE_OK && lockPath ){
+ pCtx->lockProxyPath = sqlite3DbStrDup(0, lockPath);
+ }
+
+ if( rc==SQLITE_OK ){
+ pCtx->dbPath = sqlite3DbStrDup(0, dbPath);
+ if( pCtx->dbPath==NULL ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ /* all memory is allocated, proxys are created and assigned,
+ ** switch the locking context and pMethod then return.
+ */
+ pCtx->oldLockingContext = pFile->lockingContext;
+ pFile->lockingContext = pCtx;
+ pCtx->pOldMethod = pFile->pMethod;
+ pFile->pMethod = &proxyIoMethods;
+ }else{
+ if( pCtx->conchFile ){
+ pCtx->conchFile->pMethod->xClose((sqlite3_file *)pCtx->conchFile);
+ sqlite3_free(pCtx->conchFile);
+ }
+ sqlite3DbFree(0, pCtx->lockProxyPath);
+ sqlite3_free(pCtx->conchFilePath);
+ sqlite3_free(pCtx);
+ }
+ OSTRACE(("TRANSPROXY %d %s\n", pFile->h,
+ (rc==SQLITE_OK ? "ok" : "failed")));
+ return rc;
+}
+
+
+/*
+** This routine handles sqlite3_file_control() calls that are specific
+** to proxy locking.
+*/
+static int proxyFileControl(sqlite3_file *id, int op, void *pArg){
+ switch( op ){
+ case SQLITE_FCNTL_GET_LOCKPROXYFILE: {
+ unixFile *pFile = (unixFile*)id;
+ if( pFile->pMethod == &proxyIoMethods ){
+ proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext;
+ proxyTakeConch(pFile);
+ if( pCtx->lockProxyPath ){
+ *(const char **)pArg = pCtx->lockProxyPath;
+ }else{
+ *(const char **)pArg = ":auto: (not held)";
+ }
+ } else {
+ *(const char **)pArg = NULL;
+ }
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_SET_LOCKPROXYFILE: {
+ unixFile *pFile = (unixFile*)id;
+ int rc = SQLITE_OK;
+ int isProxyStyle = (pFile->pMethod == &proxyIoMethods);
+ if( pArg==NULL || (const char *)pArg==0 ){
+ if( isProxyStyle ){
+ /* turn off proxy locking - not supported. If support is added for
+ ** switching proxy locking mode off then it will need to fail if
+ ** the journal mode is WAL mode.
+ */
+ rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/;
+ }else{
+ /* turn off proxy locking - already off - NOOP */
+ rc = SQLITE_OK;
+ }
+ }else{
+ const char *proxyPath = (const char *)pArg;
+ if( isProxyStyle ){
+ proxyLockingContext *pCtx =
+ (proxyLockingContext*)pFile->lockingContext;
+ if( !strcmp(pArg, ":auto:")
+ || (pCtx->lockProxyPath &&
+ !strncmp(pCtx->lockProxyPath, proxyPath, MAXPATHLEN))
+ ){
+ rc = SQLITE_OK;
+ }else{
+ rc = switchLockProxyPath(pFile, proxyPath);
+ }
+ }else{
+ /* turn on proxy file locking */
+ rc = proxyTransformUnixFile(pFile, proxyPath);
+ }
+ }
+ return rc;
+ }
+ default: {
+ assert( 0 ); /* The call assures that only valid opcodes are sent */
+ }
+ }
+ /*NOTREACHED*/ assert(0);
+ return SQLITE_ERROR;
+}
+
+/*
+** Within this division (the proxying locking implementation) the procedures
+** above this point are all utilities. The lock-related methods of the
+** proxy-locking sqlite3_io_method object follow.
+*/
+
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, set *pResOut
+** to a non-zero value otherwise *pResOut is set to zero. The return value
+** is set to SQLITE_OK unless an I/O error occurs during lock checking.
+*/
+static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) {
+ unixFile *pFile = (unixFile*)id;
+ int rc = proxyTakeConch(pFile);
+ if( rc==SQLITE_OK ){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ if( pCtx->conchHeld>0 ){
+ unixFile *proxy = pCtx->lockProxy;
+ return proxy->pMethod->xCheckReservedLock((sqlite3_file*)proxy, pResOut);
+ }else{ /* conchHeld < 0 is lockless */
+ pResOut=0;
+ }
+ }
+ return rc;
+}
+
+/*
+** Lock the file with the lock specified by parameter eFileLock - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** This routine will only increase a lock. Use the sqlite3OsUnlock()
+** routine to lower a locking level.
+*/
+static int proxyLock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ int rc = proxyTakeConch(pFile);
+ if( rc==SQLITE_OK ){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ if( pCtx->conchHeld>0 ){
+ unixFile *proxy = pCtx->lockProxy;
+ rc = proxy->pMethod->xLock((sqlite3_file*)proxy, eFileLock);
+ pFile->eFileLock = proxy->eFileLock;
+ }else{
+ /* conchHeld < 0 is lockless */
+ }
+ }
+ return rc;
+}
+
+
+/*
+** Lower the locking level on file descriptor pFile to eFileLock. eFileLock
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+*/
+static int proxyUnlock(sqlite3_file *id, int eFileLock) {
+ unixFile *pFile = (unixFile*)id;
+ int rc = proxyTakeConch(pFile);
+ if( rc==SQLITE_OK ){
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ if( pCtx->conchHeld>0 ){
+ unixFile *proxy = pCtx->lockProxy;
+ rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, eFileLock);
+ pFile->eFileLock = proxy->eFileLock;
+ }else{
+ /* conchHeld < 0 is lockless */
+ }
+ }
+ return rc;
+}
+
+/*
+** Close a file that uses proxy locks.
+*/
+static int proxyClose(sqlite3_file *id) {
+ if( ALWAYS(id) ){
+ unixFile *pFile = (unixFile*)id;
+ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext;
+ unixFile *lockProxy = pCtx->lockProxy;
+ unixFile *conchFile = pCtx->conchFile;
+ int rc = SQLITE_OK;
+
+ if( lockProxy ){
+ rc = lockProxy->pMethod->xUnlock((sqlite3_file*)lockProxy, NO_LOCK);
+ if( rc ) return rc;
+ rc = lockProxy->pMethod->xClose((sqlite3_file*)lockProxy);
+ if( rc ) return rc;
+ sqlite3_free(lockProxy);
+ pCtx->lockProxy = 0;
+ }
+ if( conchFile ){
+ if( pCtx->conchHeld ){
+ rc = proxyReleaseConch(pFile);
+ if( rc ) return rc;
+ }
+ rc = conchFile->pMethod->xClose((sqlite3_file*)conchFile);
+ if( rc ) return rc;
+ sqlite3_free(conchFile);
+ }
+ sqlite3DbFree(0, pCtx->lockProxyPath);
+ sqlite3_free(pCtx->conchFilePath);
+ sqlite3DbFree(0, pCtx->dbPath);
+ /* restore the original locking context and pMethod then close it */
+ pFile->lockingContext = pCtx->oldLockingContext;
+ pFile->pMethod = pCtx->pOldMethod;
+ sqlite3_free(pCtx);
+ return pFile->pMethod->xClose(id);
+ }
+ return SQLITE_OK;
+}
+
+
+
+#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
+/*
+** The proxy locking style is intended for use with AFP filesystems.
+** And since AFP is only supported on MacOSX, the proxy locking is also
+** restricted to MacOSX.
+**
+**
+******************* End of the proxy lock implementation **********************
+******************************************************************************/
+
+/*
+** Initialize the operating system interface.
+**
+** This routine registers all VFS implementations for unix-like operating
+** systems. This routine, and the sqlite3_os_end() routine that follows,
+** should be the only routines in this file that are visible from other
+** files.
+**
+** This routine is called once during SQLite initialization and by a
+** single thread. The memory allocation and mutex subsystems have not
+** necessarily been initialized when this routine is called, and so they
+** should not be used.
+*/
+SQLITE_API int sqlite3_os_init(void){
+ /*
+ ** The following macro defines an initializer for an sqlite3_vfs object.
+ ** The name of the VFS is NAME. The pAppData is a pointer to a pointer
+ ** to the "finder" function. (pAppData is a pointer to a pointer because
+ ** silly C90 rules prohibit a void* from being cast to a function pointer
+ ** and so we have to go through the intermediate pointer to avoid problems
+ ** when compiling with -pedantic-errors on GCC.)
+ **
+ ** The FINDER parameter to this macro is the name of the pointer to the
+ ** finder-function. The finder-function returns a pointer to the
+ ** sqlite_io_methods object that implements the desired locking
+ ** behaviors. See the division above that contains the IOMETHODS
+ ** macro for addition information on finder-functions.
+ **
+ ** Most finders simply return a pointer to a fixed sqlite3_io_methods
+ ** object. But the "autolockIoFinder" available on MacOSX does a little
+ ** more than that; it looks at the filesystem type that hosts the
+ ** database file and tries to choose an locking method appropriate for
+ ** that filesystem time.
+ */
+ #define UNIXVFS(VFSNAME, FINDER) { \
+ 3, /* iVersion */ \
+ sizeof(unixFile), /* szOsFile */ \
+ MAX_PATHNAME, /* mxPathname */ \
+ 0, /* pNext */ \
+ VFSNAME, /* zName */ \
+ (void*)&FINDER, /* pAppData */ \
+ unixOpen, /* xOpen */ \
+ unixDelete, /* xDelete */ \
+ unixAccess, /* xAccess */ \
+ unixFullPathname, /* xFullPathname */ \
+ unixDlOpen, /* xDlOpen */ \
+ unixDlError, /* xDlError */ \
+ unixDlSym, /* xDlSym */ \
+ unixDlClose, /* xDlClose */ \
+ unixRandomness, /* xRandomness */ \
+ unixSleep, /* xSleep */ \
+ unixCurrentTime, /* xCurrentTime */ \
+ unixGetLastError, /* xGetLastError */ \
+ unixCurrentTimeInt64, /* xCurrentTimeInt64 */ \
+ unixSetSystemCall, /* xSetSystemCall */ \
+ unixGetSystemCall, /* xGetSystemCall */ \
+ unixNextSystemCall, /* xNextSystemCall */ \
+ }
+
+ /*
+ ** All default VFSes for unix are contained in the following array.
+ **
+ ** Note that the sqlite3_vfs.pNext field of the VFS object is modified
+ ** by the SQLite core when the VFS is registered. So the following
+ ** array cannot be const.
+ */
+ static sqlite3_vfs aVfs[] = {
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+ UNIXVFS("unix", autolockIoFinder ),
+#elif OS_VXWORKS
+ UNIXVFS("unix", vxworksIoFinder ),
+#else
+ UNIXVFS("unix", posixIoFinder ),
+#endif
+ UNIXVFS("unix-none", nolockIoFinder ),
+ UNIXVFS("unix-dotfile", dotlockIoFinder ),
+ UNIXVFS("unix-excl", posixIoFinder ),
+#if OS_VXWORKS
+ UNIXVFS("unix-namedsem", semIoFinder ),
+#endif
+#if SQLITE_ENABLE_LOCKING_STYLE || OS_VXWORKS
+ UNIXVFS("unix-posix", posixIoFinder ),
+#endif
+#if SQLITE_ENABLE_LOCKING_STYLE
+ UNIXVFS("unix-flock", flockIoFinder ),
+#endif
+#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+ UNIXVFS("unix-afp", afpIoFinder ),
+ UNIXVFS("unix-nfs", nfsIoFinder ),
+ UNIXVFS("unix-proxy", proxyIoFinder ),
+#endif
+ };
+ unsigned int i; /* Loop counter */
+
+ /* Double-check that the aSyscall[] array has been constructed
+ ** correctly. See ticket [bb3a86e890c8e96ab] */
+ assert( ArraySize(aSyscall)==29 );
+
+ /* Register all VFSes defined in the aVfs[] array */
+ for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){
+ sqlite3_vfs_register(&aVfs[i], i==0);
+ }
+ unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1);
+ return SQLITE_OK;
+}
+
+/*
+** Shutdown the operating system interface.
+**
+** Some operating systems might need to do some cleanup in this routine,
+** to release dynamically allocated objects. But not on unix.
+** This routine is a no-op for unix.
+*/
+SQLITE_API int sqlite3_os_end(void){
+ unixBigLock = 0;
+ return SQLITE_OK;
+}
+
+#endif /* SQLITE_OS_UNIX */
+
+/************** End of os_unix.c *********************************************/
+/************** Begin file os_win.c ******************************************/
+/*
+** 2004 May 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code that is specific to Windows.
+*/
+/* #include "sqliteInt.h" */
+#if SQLITE_OS_WIN /* This file is used for Windows only */
+
+/*
+** Include code that is common to all os_*.c files
+*/
+/************** Include os_common.h in the middle of os_win.c ****************/
+/************** Begin file os_common.h ***************************************/
+/*
+** 2004 May 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains macros and a little bit of code that is common to
+** all of the platform-specific files (os_*.c) and is #included into those
+** files.
+**
+** This file should be #included by the os_*.c files only. It is not a
+** general purpose header file.
+*/
+#ifndef _OS_COMMON_H_
+#define _OS_COMMON_H_
+
+/*
+** At least two bugs have slipped in because we changed the MEMORY_DEBUG
+** macro to SQLITE_DEBUG and some older makefiles have not yet made the
+** switch. The following code should catch this problem at compile-time.
+*/
+#ifdef MEMORY_DEBUG
+# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
+#endif
+
+/*
+** Macros for performance tracing. Normally turned off. Only works
+** on i486 hardware.
+*/
+#ifdef SQLITE_PERFORMANCE_TRACE
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+/************** Include hwtime.h in the middle of os_common.h ****************/
+/************** Begin file hwtime.h ******************************************/
+/*
+** 2008 May 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains inline asm code for retrieving "high-performance"
+** counters for x86 and x86_64 class CPUs.
+*/
+#ifndef SQLITE_HWTIME_H
+#define SQLITE_HWTIME_H
+
+/*
+** The following routine only works on pentium-class (or newer) processors.
+** It uses the RDTSC opcode to read the cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+#if !defined(__STRICT_ANSI__) && \
+ (defined(__GNUC__) || defined(_MSC_VER)) && \
+ (defined(i386) || defined(__i386__) || defined(_M_IX86))
+
+ #if defined(__GNUC__)
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+ #elif defined(_MSC_VER)
+
+ __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
+ __asm {
+ rdtsc
+ ret ; return value at EDX:EAX
+ }
+ }
+
+ #endif
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long val;
+ __asm__ __volatile__ ("rdtsc" : "=A" (val));
+ return val;
+ }
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long long retval;
+ unsigned long junk;
+ __asm__ __volatile__ ("\n\
+ 1: mftbu %1\n\
+ mftb %L0\n\
+ mftbu %0\n\
+ cmpw %0,%1\n\
+ bne 1b"
+ : "=r" (retval), "=r" (junk));
+ return retval;
+ }
+
+#else
+
+ /*
+ ** asm() is needed for hardware timing support. Without asm(),
+ ** disable the sqlite3Hwtime() routine.
+ **
+ ** sqlite3Hwtime() is only used for some obscure debugging
+ ** and analysis configurations, not in any deliverable, so this
+ ** should not be a great loss.
+ */
+SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
+
+#endif
+
+#endif /* !defined(SQLITE_HWTIME_H) */
+
+/************** End of hwtime.h **********************************************/
+/************** Continuing where we left off in os_common.h ******************/
+
+static sqlite_uint64 g_start;
+static sqlite_uint64 g_elapsed;
+#define TIMER_START g_start=sqlite3Hwtime()
+#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start
+#define TIMER_ELAPSED g_elapsed
+#else
+#define TIMER_START
+#define TIMER_END
+#define TIMER_ELAPSED ((sqlite_uint64)0)
+#endif
+
+/*
+** If we compile with the SQLITE_TEST macro set, then the following block
+** of code will give us the ability to simulate a disk I/O error. This
+** is used for testing the I/O recovery logic.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_io_error_hit;
+SQLITE_API extern int sqlite3_io_error_hardhit;
+SQLITE_API extern int sqlite3_io_error_pending;
+SQLITE_API extern int sqlite3_io_error_persist;
+SQLITE_API extern int sqlite3_io_error_benign;
+SQLITE_API extern int sqlite3_diskfull_pending;
+SQLITE_API extern int sqlite3_diskfull;
+#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X)
+#define SimulateIOError(CODE) \
+ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \
+ || sqlite3_io_error_pending-- == 1 ) \
+ { local_ioerr(); CODE; }
+static void local_ioerr(){
+ IOTRACE(("IOERR\n"));
+ sqlite3_io_error_hit++;
+ if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++;
+}
+#define SimulateDiskfullError(CODE) \
+ if( sqlite3_diskfull_pending ){ \
+ if( sqlite3_diskfull_pending == 1 ){ \
+ local_ioerr(); \
+ sqlite3_diskfull = 1; \
+ sqlite3_io_error_hit = 1; \
+ CODE; \
+ }else{ \
+ sqlite3_diskfull_pending--; \
+ } \
+ }
+#else
+#define SimulateIOErrorBenign(X)
+#define SimulateIOError(A)
+#define SimulateDiskfullError(A)
+#endif /* defined(SQLITE_TEST) */
+
+/*
+** When testing, keep a count of the number of open files.
+*/
+#if defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_open_file_count;
+#define OpenCounter(X) sqlite3_open_file_count+=(X)
+#else
+#define OpenCounter(X)
+#endif /* defined(SQLITE_TEST) */
+
+#endif /* !defined(_OS_COMMON_H_) */
+
+/************** End of os_common.h *******************************************/
+/************** Continuing where we left off in os_win.c *********************/
+
+/*
+** Include the header file for the Windows VFS.
+*/
+/* #include "os_win.h" */
+
+/*
+** Compiling and using WAL mode requires several APIs that are only
+** available in Windows platforms based on the NT kernel.
+*/
+#if !SQLITE_OS_WINNT && !defined(SQLITE_OMIT_WAL)
+# error "WAL mode requires support from the Windows NT kernel, compile\
+ with SQLITE_OMIT_WAL."
+#endif
+
+#if !SQLITE_OS_WINNT && SQLITE_MAX_MMAP_SIZE>0
+# error "Memory mapped files require support from the Windows NT kernel,\
+ compile with SQLITE_MAX_MMAP_SIZE=0."
+#endif
+
+/*
+** Are most of the Win32 ANSI APIs available (i.e. with certain exceptions
+** based on the sub-platform)?
+*/
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(SQLITE_WIN32_NO_ANSI)
+# define SQLITE_WIN32_HAS_ANSI
+#endif
+
+/*
+** Are most of the Win32 Unicode APIs available (i.e. with certain exceptions
+** based on the sub-platform)?
+*/
+#if (SQLITE_OS_WINCE || SQLITE_OS_WINNT || SQLITE_OS_WINRT) && \
+ !defined(SQLITE_WIN32_NO_WIDE)
+# define SQLITE_WIN32_HAS_WIDE
+#endif
+
+/*
+** Make sure at least one set of Win32 APIs is available.
+*/
+#if !defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_WIN32_HAS_WIDE)
+# error "At least one of SQLITE_WIN32_HAS_ANSI and SQLITE_WIN32_HAS_WIDE\
+ must be defined."
+#endif
+
+/*
+** Define the required Windows SDK version constants if they are not
+** already available.
+*/
+#ifndef NTDDI_WIN8
+# define NTDDI_WIN8 0x06020000
+#endif
+
+#ifndef NTDDI_WINBLUE
+# define NTDDI_WINBLUE 0x06030000
+#endif
+
+#ifndef NTDDI_WINTHRESHOLD
+# define NTDDI_WINTHRESHOLD 0x06040000
+#endif
+
+/*
+** Check to see if the GetVersionEx[AW] functions are deprecated on the
+** target system. GetVersionEx was first deprecated in Win8.1.
+*/
+#ifndef SQLITE_WIN32_GETVERSIONEX
+# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINBLUE
+# define SQLITE_WIN32_GETVERSIONEX 0 /* GetVersionEx() is deprecated */
+# else
+# define SQLITE_WIN32_GETVERSIONEX 1 /* GetVersionEx() is current */
+# endif
+#endif
+
+/*
+** Check to see if the CreateFileMappingA function is supported on the
+** target system. It is unavailable when using "mincore.lib" on Win10.
+** When compiling for Windows 10, always assume "mincore.lib" is in use.
+*/
+#ifndef SQLITE_WIN32_CREATEFILEMAPPINGA
+# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINTHRESHOLD
+# define SQLITE_WIN32_CREATEFILEMAPPINGA 0
+# else
+# define SQLITE_WIN32_CREATEFILEMAPPINGA 1
+# endif
+#endif
+
+/*
+** This constant should already be defined (in the "WinDef.h" SDK file).
+*/
+#ifndef MAX_PATH
+# define MAX_PATH (260)
+#endif
+
+/*
+** Maximum pathname length (in chars) for Win32. This should normally be
+** MAX_PATH.
+*/
+#ifndef SQLITE_WIN32_MAX_PATH_CHARS
+# define SQLITE_WIN32_MAX_PATH_CHARS (MAX_PATH)
+#endif
+
+/*
+** This constant should already be defined (in the "WinNT.h" SDK file).
+*/
+#ifndef UNICODE_STRING_MAX_CHARS
+# define UNICODE_STRING_MAX_CHARS (32767)
+#endif
+
+/*
+** Maximum pathname length (in chars) for WinNT. This should normally be
+** UNICODE_STRING_MAX_CHARS.
+*/
+#ifndef SQLITE_WINNT_MAX_PATH_CHARS
+# define SQLITE_WINNT_MAX_PATH_CHARS (UNICODE_STRING_MAX_CHARS)
+#endif
+
+/*
+** Maximum pathname length (in bytes) for Win32. The MAX_PATH macro is in
+** characters, so we allocate 4 bytes per character assuming worst-case of
+** 4-bytes-per-character for UTF8.
+*/
+#ifndef SQLITE_WIN32_MAX_PATH_BYTES
+# define SQLITE_WIN32_MAX_PATH_BYTES (SQLITE_WIN32_MAX_PATH_CHARS*4)
+#endif
+
+/*
+** Maximum pathname length (in bytes) for WinNT. This should normally be
+** UNICODE_STRING_MAX_CHARS * sizeof(WCHAR).
+*/
+#ifndef SQLITE_WINNT_MAX_PATH_BYTES
+# define SQLITE_WINNT_MAX_PATH_BYTES \
+ (sizeof(WCHAR) * SQLITE_WINNT_MAX_PATH_CHARS)
+#endif
+
+/*
+** Maximum error message length (in chars) for WinRT.
+*/
+#ifndef SQLITE_WIN32_MAX_ERRMSG_CHARS
+# define SQLITE_WIN32_MAX_ERRMSG_CHARS (1024)
+#endif
+
+/*
+** Returns non-zero if the character should be treated as a directory
+** separator.
+*/
+#ifndef winIsDirSep
+# define winIsDirSep(a) (((a) == '/') || ((a) == '\\'))
+#endif
+
+/*
+** This macro is used when a local variable is set to a value that is
+** [sometimes] not used by the code (e.g. via conditional compilation).
+*/
+#ifndef UNUSED_VARIABLE_VALUE
+# define UNUSED_VARIABLE_VALUE(x) (void)(x)
+#endif
+
+/*
+** Returns the character that should be used as the directory separator.
+*/
+#ifndef winGetDirSep
+# define winGetDirSep() '\\'
+#endif
+
+/*
+** Do we need to manually define the Win32 file mapping APIs for use with WAL
+** mode or memory mapped files (e.g. these APIs are available in the Windows
+** CE SDK; however, they are not present in the header file)?
+*/
+#if SQLITE_WIN32_FILEMAPPING_API && \
+ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+/*
+** Two of the file mapping APIs are different under WinRT. Figure out which
+** set we need.
+*/
+#if SQLITE_OS_WINRT
+WINBASEAPI HANDLE WINAPI CreateFileMappingFromApp(HANDLE, \
+ LPSECURITY_ATTRIBUTES, ULONG, ULONG64, LPCWSTR);
+
+WINBASEAPI LPVOID WINAPI MapViewOfFileFromApp(HANDLE, ULONG, ULONG64, SIZE_T);
+#else
+#if defined(SQLITE_WIN32_HAS_ANSI)
+WINBASEAPI HANDLE WINAPI CreateFileMappingA(HANDLE, LPSECURITY_ATTRIBUTES, \
+ DWORD, DWORD, DWORD, LPCSTR);
+#endif /* defined(SQLITE_WIN32_HAS_ANSI) */
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+WINBASEAPI HANDLE WINAPI CreateFileMappingW(HANDLE, LPSECURITY_ATTRIBUTES, \
+ DWORD, DWORD, DWORD, LPCWSTR);
+#endif /* defined(SQLITE_WIN32_HAS_WIDE) */
+
+WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE, DWORD, DWORD, DWORD, SIZE_T);
+#endif /* SQLITE_OS_WINRT */
+
+/*
+** These file mapping APIs are common to both Win32 and WinRT.
+*/
+
+WINBASEAPI BOOL WINAPI FlushViewOfFile(LPCVOID, SIZE_T);
+WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID);
+#endif /* SQLITE_WIN32_FILEMAPPING_API */
+
+/*
+** Some Microsoft compilers lack this definition.
+*/
+#ifndef INVALID_FILE_ATTRIBUTES
+# define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
+#endif
+
+#ifndef FILE_FLAG_MASK
+# define FILE_FLAG_MASK (0xFF3C0000)
+#endif
+
+#ifndef FILE_ATTRIBUTE_MASK
+# define FILE_ATTRIBUTE_MASK (0x0003FFF7)
+#endif
+
+#ifndef SQLITE_OMIT_WAL
+/* Forward references to structures used for WAL */
+typedef struct winShm winShm; /* A connection to shared-memory */
+typedef struct winShmNode winShmNode; /* A region of shared-memory */
+#endif
+
+/*
+** WinCE lacks native support for file locking so we have to fake it
+** with some code of our own.
+*/
+#if SQLITE_OS_WINCE
+typedef struct winceLock {
+ int nReaders; /* Number of reader locks obtained */
+ BOOL bPending; /* Indicates a pending lock has been obtained */
+ BOOL bReserved; /* Indicates a reserved lock has been obtained */
+ BOOL bExclusive; /* Indicates an exclusive lock has been obtained */
+} winceLock;
+#endif
+
+/*
+** The winFile structure is a subclass of sqlite3_file* specific to the win32
+** portability layer.
+*/
+typedef struct winFile winFile;
+struct winFile {
+ const sqlite3_io_methods *pMethod; /*** Must be first ***/
+ sqlite3_vfs *pVfs; /* The VFS used to open this file */
+ HANDLE h; /* Handle for accessing the file */
+ u8 locktype; /* Type of lock currently held on this file */
+ short sharedLockByte; /* Randomly chosen byte used as a shared lock */
+ u8 ctrlFlags; /* Flags. See WINFILE_* below */
+ DWORD lastErrno; /* The Windows errno from the last I/O error */
+#ifndef SQLITE_OMIT_WAL
+ winShm *pShm; /* Instance of shared memory on this file */
+#endif
+ const char *zPath; /* Full pathname of this file */
+ int szChunk; /* Chunk size configured by FCNTL_CHUNK_SIZE */
+#if SQLITE_OS_WINCE
+ LPWSTR zDeleteOnClose; /* Name of file to delete when closing */
+ HANDLE hMutex; /* Mutex used to control access to shared lock */
+ HANDLE hShared; /* Shared memory segment used for locking */
+ winceLock local; /* Locks obtained by this instance of winFile */
+ winceLock *shared; /* Global shared lock memory for the file */
+#endif
+#if SQLITE_MAX_MMAP_SIZE>0
+ int nFetchOut; /* Number of outstanding xFetch references */
+ HANDLE hMap; /* Handle for accessing memory mapping */
+ void *pMapRegion; /* Area memory mapped */
+ sqlite3_int64 mmapSize; /* Size of mapped region */
+ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */
+#endif
+};
+
+/*
+** The winVfsAppData structure is used for the pAppData member for all of the
+** Win32 VFS variants.
+*/
+typedef struct winVfsAppData winVfsAppData;
+struct winVfsAppData {
+ const sqlite3_io_methods *pMethod; /* The file I/O methods to use. */
+ void *pAppData; /* The extra pAppData, if any. */
+ BOOL bNoLock; /* Non-zero if locking is disabled. */
+};
+
+/*
+** Allowed values for winFile.ctrlFlags
+*/
+#define WINFILE_RDONLY 0x02 /* Connection is read only */
+#define WINFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */
+#define WINFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */
+
+/*
+ * The size of the buffer used by sqlite3_win32_write_debug().
+ */
+#ifndef SQLITE_WIN32_DBG_BUF_SIZE
+# define SQLITE_WIN32_DBG_BUF_SIZE ((int)(4096-sizeof(DWORD)))
+#endif
+
+/*
+ * If compiled with SQLITE_WIN32_MALLOC on Windows, we will use the
+ * various Win32 API heap functions instead of our own.
+ */
+#ifdef SQLITE_WIN32_MALLOC
+
+/*
+ * If this is non-zero, an isolated heap will be created by the native Win32
+ * allocator subsystem; otherwise, the default process heap will be used. This
+ * setting has no effect when compiling for WinRT. By default, this is enabled
+ * and an isolated heap will be created to store all allocated data.
+ *
+ ******************************************************************************
+ * WARNING: It is important to note that when this setting is non-zero and the
+ * winMemShutdown function is called (e.g. by the sqlite3_shutdown
+ * function), all data that was allocated using the isolated heap will
+ * be freed immediately and any attempt to access any of that freed
+ * data will almost certainly result in an immediate access violation.
+ ******************************************************************************
+ */
+#ifndef SQLITE_WIN32_HEAP_CREATE
+# define SQLITE_WIN32_HEAP_CREATE (TRUE)
+#endif
+
+/*
+ * This is the maximum possible initial size of the Win32-specific heap, in
+ * bytes.
+ */
+#ifndef SQLITE_WIN32_HEAP_MAX_INIT_SIZE
+# define SQLITE_WIN32_HEAP_MAX_INIT_SIZE (4294967295U)
+#endif
+
+/*
+ * This is the extra space for the initial size of the Win32-specific heap,
+ * in bytes. This value may be zero.
+ */
+#ifndef SQLITE_WIN32_HEAP_INIT_EXTRA
+# define SQLITE_WIN32_HEAP_INIT_EXTRA (4194304)
+#endif
+
+/*
+ * Calculate the maximum legal cache size, in pages, based on the maximum
+ * possible initial heap size and the default page size, setting aside the
+ * needed extra space.
+ */
+#ifndef SQLITE_WIN32_MAX_CACHE_SIZE
+# define SQLITE_WIN32_MAX_CACHE_SIZE (((SQLITE_WIN32_HEAP_MAX_INIT_SIZE) - \
+ (SQLITE_WIN32_HEAP_INIT_EXTRA)) / \
+ (SQLITE_DEFAULT_PAGE_SIZE))
+#endif
+
+/*
+ * This is cache size used in the calculation of the initial size of the
+ * Win32-specific heap. It cannot be negative.
+ */
+#ifndef SQLITE_WIN32_CACHE_SIZE
+# if SQLITE_DEFAULT_CACHE_SIZE>=0
+# define SQLITE_WIN32_CACHE_SIZE (SQLITE_DEFAULT_CACHE_SIZE)
+# else
+# define SQLITE_WIN32_CACHE_SIZE (-(SQLITE_DEFAULT_CACHE_SIZE))
+# endif
+#endif
+
+/*
+ * Make sure that the calculated cache size, in pages, cannot cause the
+ * initial size of the Win32-specific heap to exceed the maximum amount
+ * of memory that can be specified in the call to HeapCreate.
+ */
+#if SQLITE_WIN32_CACHE_SIZE>SQLITE_WIN32_MAX_CACHE_SIZE
+# undef SQLITE_WIN32_CACHE_SIZE
+# define SQLITE_WIN32_CACHE_SIZE (2000)
+#endif
+
+/*
+ * The initial size of the Win32-specific heap. This value may be zero.
+ */
+#ifndef SQLITE_WIN32_HEAP_INIT_SIZE
+# define SQLITE_WIN32_HEAP_INIT_SIZE ((SQLITE_WIN32_CACHE_SIZE) * \
+ (SQLITE_DEFAULT_PAGE_SIZE) + \
+ (SQLITE_WIN32_HEAP_INIT_EXTRA))
+#endif
+
+/*
+ * The maximum size of the Win32-specific heap. This value may be zero.
+ */
+#ifndef SQLITE_WIN32_HEAP_MAX_SIZE
+# define SQLITE_WIN32_HEAP_MAX_SIZE (0)
+#endif
+
+/*
+ * The extra flags to use in calls to the Win32 heap APIs. This value may be
+ * zero for the default behavior.
+ */
+#ifndef SQLITE_WIN32_HEAP_FLAGS
+# define SQLITE_WIN32_HEAP_FLAGS (0)
+#endif
+
+
+/*
+** The winMemData structure stores information required by the Win32-specific
+** sqlite3_mem_methods implementation.
+*/
+typedef struct winMemData winMemData;
+struct winMemData {
+#ifndef NDEBUG
+ u32 magic1; /* Magic number to detect structure corruption. */
+#endif
+ HANDLE hHeap; /* The handle to our heap. */
+ BOOL bOwned; /* Do we own the heap (i.e. destroy it on shutdown)? */
+#ifndef NDEBUG
+ u32 magic2; /* Magic number to detect structure corruption. */
+#endif
+};
+
+#ifndef NDEBUG
+#define WINMEM_MAGIC1 0x42b2830b
+#define WINMEM_MAGIC2 0xbd4d7cf4
+#endif
+
+static struct winMemData win_mem_data = {
+#ifndef NDEBUG
+ WINMEM_MAGIC1,
+#endif
+ NULL, FALSE
+#ifndef NDEBUG
+ ,WINMEM_MAGIC2
+#endif
+};
+
+#ifndef NDEBUG
+#define winMemAssertMagic1() assert( win_mem_data.magic1==WINMEM_MAGIC1 )
+#define winMemAssertMagic2() assert( win_mem_data.magic2==WINMEM_MAGIC2 )
+#define winMemAssertMagic() winMemAssertMagic1(); winMemAssertMagic2();
+#else
+#define winMemAssertMagic()
+#endif
+
+#define winMemGetDataPtr() &win_mem_data
+#define winMemGetHeap() win_mem_data.hHeap
+#define winMemGetOwned() win_mem_data.bOwned
+
+static void *winMemMalloc(int nBytes);
+static void winMemFree(void *pPrior);
+static void *winMemRealloc(void *pPrior, int nBytes);
+static int winMemSize(void *p);
+static int winMemRoundup(int n);
+static int winMemInit(void *pAppData);
+static void winMemShutdown(void *pAppData);
+
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void);
+#endif /* SQLITE_WIN32_MALLOC */
+
+/*
+** The following variable is (normally) set once and never changes
+** thereafter. It records whether the operating system is Win9x
+** or WinNT.
+**
+** 0: Operating system unknown.
+** 1: Operating system is Win9x.
+** 2: Operating system is WinNT.
+**
+** In order to facilitate testing on a WinNT system, the test fixture
+** can manually set this value to 1 to emulate Win98 behavior.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0;
+#else
+static LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0;
+#endif
+
+#ifndef SYSCALL
+# define SYSCALL sqlite3_syscall_ptr
+#endif
+
+/*
+** This function is not available on Windows CE or WinRT.
+ */
+
+#if SQLITE_OS_WINCE || SQLITE_OS_WINRT
+# define osAreFileApisANSI() 1
+#endif
+
+/*
+** Many system calls are accessed through pointer-to-functions so that
+** they may be overridden at runtime to facilitate fault injection during
+** testing and sandboxing. The following array holds the names and pointers
+** to all overrideable system calls.
+*/
+static struct win_syscall {
+ const char *zName; /* Name of the system call */
+ sqlite3_syscall_ptr pCurrent; /* Current value of the system call */
+ sqlite3_syscall_ptr pDefault; /* Default value */
+} aSyscall[] = {
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
+ { "AreFileApisANSI", (SYSCALL)AreFileApisANSI, 0 },
+#else
+ { "AreFileApisANSI", (SYSCALL)0, 0 },
+#endif
+
+#ifndef osAreFileApisANSI
+#define osAreFileApisANSI ((BOOL(WINAPI*)(VOID))aSyscall[0].pCurrent)
+#endif
+
+#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE)
+ { "CharLowerW", (SYSCALL)CharLowerW, 0 },
+#else
+ { "CharLowerW", (SYSCALL)0, 0 },
+#endif
+
+#define osCharLowerW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[1].pCurrent)
+
+#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE)
+ { "CharUpperW", (SYSCALL)CharUpperW, 0 },
+#else
+ { "CharUpperW", (SYSCALL)0, 0 },
+#endif
+
+#define osCharUpperW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[2].pCurrent)
+
+ { "CloseHandle", (SYSCALL)CloseHandle, 0 },
+
+#define osCloseHandle ((BOOL(WINAPI*)(HANDLE))aSyscall[3].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "CreateFileA", (SYSCALL)CreateFileA, 0 },
+#else
+ { "CreateFileA", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFileA ((HANDLE(WINAPI*)(LPCSTR,DWORD,DWORD, \
+ LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[4].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "CreateFileW", (SYSCALL)CreateFileW, 0 },
+#else
+ { "CreateFileW", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFileW ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD, \
+ LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[5].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_ANSI) && \
+ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) && \
+ SQLITE_WIN32_CREATEFILEMAPPINGA
+ { "CreateFileMappingA", (SYSCALL)CreateFileMappingA, 0 },
+#else
+ { "CreateFileMappingA", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFileMappingA ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \
+ DWORD,DWORD,DWORD,LPCSTR))aSyscall[6].pCurrent)
+
+#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \
+ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0))
+ { "CreateFileMappingW", (SYSCALL)CreateFileMappingW, 0 },
+#else
+ { "CreateFileMappingW", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFileMappingW ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \
+ DWORD,DWORD,DWORD,LPCWSTR))aSyscall[7].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "CreateMutexW", (SYSCALL)CreateMutexW, 0 },
+#else
+ { "CreateMutexW", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateMutexW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,BOOL, \
+ LPCWSTR))aSyscall[8].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "DeleteFileA", (SYSCALL)DeleteFileA, 0 },
+#else
+ { "DeleteFileA", (SYSCALL)0, 0 },
+#endif
+
+#define osDeleteFileA ((BOOL(WINAPI*)(LPCSTR))aSyscall[9].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ { "DeleteFileW", (SYSCALL)DeleteFileW, 0 },
+#else
+ { "DeleteFileW", (SYSCALL)0, 0 },
+#endif
+
+#define osDeleteFileW ((BOOL(WINAPI*)(LPCWSTR))aSyscall[10].pCurrent)
+
+#if SQLITE_OS_WINCE
+ { "FileTimeToLocalFileTime", (SYSCALL)FileTimeToLocalFileTime, 0 },
+#else
+ { "FileTimeToLocalFileTime", (SYSCALL)0, 0 },
+#endif
+
+#define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(CONST FILETIME*, \
+ LPFILETIME))aSyscall[11].pCurrent)
+
+#if SQLITE_OS_WINCE
+ { "FileTimeToSystemTime", (SYSCALL)FileTimeToSystemTime, 0 },
+#else
+ { "FileTimeToSystemTime", (SYSCALL)0, 0 },
+#endif
+
+#define osFileTimeToSystemTime ((BOOL(WINAPI*)(CONST FILETIME*, \
+ LPSYSTEMTIME))aSyscall[12].pCurrent)
+
+ { "FlushFileBuffers", (SYSCALL)FlushFileBuffers, 0 },
+
+#define osFlushFileBuffers ((BOOL(WINAPI*)(HANDLE))aSyscall[13].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "FormatMessageA", (SYSCALL)FormatMessageA, 0 },
+#else
+ { "FormatMessageA", (SYSCALL)0, 0 },
+#endif
+
+#define osFormatMessageA ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPSTR, \
+ DWORD,va_list*))aSyscall[14].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ { "FormatMessageW", (SYSCALL)FormatMessageW, 0 },
+#else
+ { "FormatMessageW", (SYSCALL)0, 0 },
+#endif
+
+#define osFormatMessageW ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPWSTR, \
+ DWORD,va_list*))aSyscall[15].pCurrent)
+
+#if !defined(SQLITE_OMIT_LOAD_EXTENSION)
+ { "FreeLibrary", (SYSCALL)FreeLibrary, 0 },
+#else
+ { "FreeLibrary", (SYSCALL)0, 0 },
+#endif
+
+#define osFreeLibrary ((BOOL(WINAPI*)(HMODULE))aSyscall[16].pCurrent)
+
+ { "GetCurrentProcessId", (SYSCALL)GetCurrentProcessId, 0 },
+
+#define osGetCurrentProcessId ((DWORD(WINAPI*)(VOID))aSyscall[17].pCurrent)
+
+#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI)
+ { "GetDiskFreeSpaceA", (SYSCALL)GetDiskFreeSpaceA, 0 },
+#else
+ { "GetDiskFreeSpaceA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetDiskFreeSpaceA ((BOOL(WINAPI*)(LPCSTR,LPDWORD,LPDWORD,LPDWORD, \
+ LPDWORD))aSyscall[18].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "GetDiskFreeSpaceW", (SYSCALL)GetDiskFreeSpaceW, 0 },
+#else
+ { "GetDiskFreeSpaceW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetDiskFreeSpaceW ((BOOL(WINAPI*)(LPCWSTR,LPDWORD,LPDWORD,LPDWORD, \
+ LPDWORD))aSyscall[19].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "GetFileAttributesA", (SYSCALL)GetFileAttributesA, 0 },
+#else
+ { "GetFileAttributesA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFileAttributesA ((DWORD(WINAPI*)(LPCSTR))aSyscall[20].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "GetFileAttributesW", (SYSCALL)GetFileAttributesW, 0 },
+#else
+ { "GetFileAttributesW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFileAttributesW ((DWORD(WINAPI*)(LPCWSTR))aSyscall[21].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ { "GetFileAttributesExW", (SYSCALL)GetFileAttributesExW, 0 },
+#else
+ { "GetFileAttributesExW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFileAttributesExW ((BOOL(WINAPI*)(LPCWSTR,GET_FILEEX_INFO_LEVELS, \
+ LPVOID))aSyscall[22].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "GetFileSize", (SYSCALL)GetFileSize, 0 },
+#else
+ { "GetFileSize", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFileSize ((DWORD(WINAPI*)(HANDLE,LPDWORD))aSyscall[23].pCurrent)
+
+#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI)
+ { "GetFullPathNameA", (SYSCALL)GetFullPathNameA, 0 },
+#else
+ { "GetFullPathNameA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFullPathNameA ((DWORD(WINAPI*)(LPCSTR,DWORD,LPSTR, \
+ LPSTR*))aSyscall[24].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "GetFullPathNameW", (SYSCALL)GetFullPathNameW, 0 },
+#else
+ { "GetFullPathNameW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFullPathNameW ((DWORD(WINAPI*)(LPCWSTR,DWORD,LPWSTR, \
+ LPWSTR*))aSyscall[25].pCurrent)
+
+ { "GetLastError", (SYSCALL)GetLastError, 0 },
+
+#define osGetLastError ((DWORD(WINAPI*)(VOID))aSyscall[26].pCurrent)
+
+#if !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#if SQLITE_OS_WINCE
+ /* The GetProcAddressA() routine is only available on Windows CE. */
+ { "GetProcAddressA", (SYSCALL)GetProcAddressA, 0 },
+#else
+ /* All other Windows platforms expect GetProcAddress() to take
+ ** an ANSI string regardless of the _UNICODE setting */
+ { "GetProcAddressA", (SYSCALL)GetProcAddress, 0 },
+#endif
+#else
+ { "GetProcAddressA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetProcAddressA ((FARPROC(WINAPI*)(HMODULE, \
+ LPCSTR))aSyscall[27].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "GetSystemInfo", (SYSCALL)GetSystemInfo, 0 },
+#else
+ { "GetSystemInfo", (SYSCALL)0, 0 },
+#endif
+
+#define osGetSystemInfo ((VOID(WINAPI*)(LPSYSTEM_INFO))aSyscall[28].pCurrent)
+
+ { "GetSystemTime", (SYSCALL)GetSystemTime, 0 },
+
+#define osGetSystemTime ((VOID(WINAPI*)(LPSYSTEMTIME))aSyscall[29].pCurrent)
+
+#if !SQLITE_OS_WINCE
+ { "GetSystemTimeAsFileTime", (SYSCALL)GetSystemTimeAsFileTime, 0 },
+#else
+ { "GetSystemTimeAsFileTime", (SYSCALL)0, 0 },
+#endif
+
+#define osGetSystemTimeAsFileTime ((VOID(WINAPI*)( \
+ LPFILETIME))aSyscall[30].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "GetTempPathA", (SYSCALL)GetTempPathA, 0 },
+#else
+ { "GetTempPathA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetTempPathA ((DWORD(WINAPI*)(DWORD,LPSTR))aSyscall[31].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE)
+ { "GetTempPathW", (SYSCALL)GetTempPathW, 0 },
+#else
+ { "GetTempPathW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetTempPathW ((DWORD(WINAPI*)(DWORD,LPWSTR))aSyscall[32].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "GetTickCount", (SYSCALL)GetTickCount, 0 },
+#else
+ { "GetTickCount", (SYSCALL)0, 0 },
+#endif
+
+#define osGetTickCount ((DWORD(WINAPI*)(VOID))aSyscall[33].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_GETVERSIONEX
+ { "GetVersionExA", (SYSCALL)GetVersionExA, 0 },
+#else
+ { "GetVersionExA", (SYSCALL)0, 0 },
+#endif
+
+#define osGetVersionExA ((BOOL(WINAPI*)( \
+ LPOSVERSIONINFOA))aSyscall[34].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \
+ SQLITE_WIN32_GETVERSIONEX
+ { "GetVersionExW", (SYSCALL)GetVersionExW, 0 },
+#else
+ { "GetVersionExW", (SYSCALL)0, 0 },
+#endif
+
+#define osGetVersionExW ((BOOL(WINAPI*)( \
+ LPOSVERSIONINFOW))aSyscall[35].pCurrent)
+
+ { "HeapAlloc", (SYSCALL)HeapAlloc, 0 },
+
+#define osHeapAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD, \
+ SIZE_T))aSyscall[36].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "HeapCreate", (SYSCALL)HeapCreate, 0 },
+#else
+ { "HeapCreate", (SYSCALL)0, 0 },
+#endif
+
+#define osHeapCreate ((HANDLE(WINAPI*)(DWORD,SIZE_T, \
+ SIZE_T))aSyscall[37].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "HeapDestroy", (SYSCALL)HeapDestroy, 0 },
+#else
+ { "HeapDestroy", (SYSCALL)0, 0 },
+#endif
+
+#define osHeapDestroy ((BOOL(WINAPI*)(HANDLE))aSyscall[38].pCurrent)
+
+ { "HeapFree", (SYSCALL)HeapFree, 0 },
+
+#define osHeapFree ((BOOL(WINAPI*)(HANDLE,DWORD,LPVOID))aSyscall[39].pCurrent)
+
+ { "HeapReAlloc", (SYSCALL)HeapReAlloc, 0 },
+
+#define osHeapReAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD,LPVOID, \
+ SIZE_T))aSyscall[40].pCurrent)
+
+ { "HeapSize", (SYSCALL)HeapSize, 0 },
+
+#define osHeapSize ((SIZE_T(WINAPI*)(HANDLE,DWORD, \
+ LPCVOID))aSyscall[41].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "HeapValidate", (SYSCALL)HeapValidate, 0 },
+#else
+ { "HeapValidate", (SYSCALL)0, 0 },
+#endif
+
+#define osHeapValidate ((BOOL(WINAPI*)(HANDLE,DWORD, \
+ LPCVOID))aSyscall[42].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
+ { "HeapCompact", (SYSCALL)HeapCompact, 0 },
+#else
+ { "HeapCompact", (SYSCALL)0, 0 },
+#endif
+
+#define osHeapCompact ((UINT(WINAPI*)(HANDLE,DWORD))aSyscall[43].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+ { "LoadLibraryA", (SYSCALL)LoadLibraryA, 0 },
+#else
+ { "LoadLibraryA", (SYSCALL)0, 0 },
+#endif
+
+#define osLoadLibraryA ((HMODULE(WINAPI*)(LPCSTR))aSyscall[44].pCurrent)
+
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \
+ !defined(SQLITE_OMIT_LOAD_EXTENSION)
+ { "LoadLibraryW", (SYSCALL)LoadLibraryW, 0 },
+#else
+ { "LoadLibraryW", (SYSCALL)0, 0 },
+#endif
+
+#define osLoadLibraryW ((HMODULE(WINAPI*)(LPCWSTR))aSyscall[45].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "LocalFree", (SYSCALL)LocalFree, 0 },
+#else
+ { "LocalFree", (SYSCALL)0, 0 },
+#endif
+
+#define osLocalFree ((HLOCAL(WINAPI*)(HLOCAL))aSyscall[46].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
+ { "LockFile", (SYSCALL)LockFile, 0 },
+#else
+ { "LockFile", (SYSCALL)0, 0 },
+#endif
+
+#ifndef osLockFile
+#define osLockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \
+ DWORD))aSyscall[47].pCurrent)
+#endif
+
+#if !SQLITE_OS_WINCE
+ { "LockFileEx", (SYSCALL)LockFileEx, 0 },
+#else
+ { "LockFileEx", (SYSCALL)0, 0 },
+#endif
+
+#ifndef osLockFileEx
+#define osLockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD,DWORD, \
+ LPOVERLAPPED))aSyscall[48].pCurrent)
+#endif
+
+#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && \
+ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0))
+ { "MapViewOfFile", (SYSCALL)MapViewOfFile, 0 },
+#else
+ { "MapViewOfFile", (SYSCALL)0, 0 },
+#endif
+
+#define osMapViewOfFile ((LPVOID(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \
+ SIZE_T))aSyscall[49].pCurrent)
+
+ { "MultiByteToWideChar", (SYSCALL)MultiByteToWideChar, 0 },
+
+#define osMultiByteToWideChar ((int(WINAPI*)(UINT,DWORD,LPCSTR,int,LPWSTR, \
+ int))aSyscall[50].pCurrent)
+
+ { "QueryPerformanceCounter", (SYSCALL)QueryPerformanceCounter, 0 },
+
+#define osQueryPerformanceCounter ((BOOL(WINAPI*)( \
+ LARGE_INTEGER*))aSyscall[51].pCurrent)
+
+ { "ReadFile", (SYSCALL)ReadFile, 0 },
+
+#define osReadFile ((BOOL(WINAPI*)(HANDLE,LPVOID,DWORD,LPDWORD, \
+ LPOVERLAPPED))aSyscall[52].pCurrent)
+
+ { "SetEndOfFile", (SYSCALL)SetEndOfFile, 0 },
+
+#define osSetEndOfFile ((BOOL(WINAPI*)(HANDLE))aSyscall[53].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "SetFilePointer", (SYSCALL)SetFilePointer, 0 },
+#else
+ { "SetFilePointer", (SYSCALL)0, 0 },
+#endif
+
+#define osSetFilePointer ((DWORD(WINAPI*)(HANDLE,LONG,PLONG, \
+ DWORD))aSyscall[54].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "Sleep", (SYSCALL)Sleep, 0 },
+#else
+ { "Sleep", (SYSCALL)0, 0 },
+#endif
+
+#define osSleep ((VOID(WINAPI*)(DWORD))aSyscall[55].pCurrent)
+
+ { "SystemTimeToFileTime", (SYSCALL)SystemTimeToFileTime, 0 },
+
+#define osSystemTimeToFileTime ((BOOL(WINAPI*)(CONST SYSTEMTIME*, \
+ LPFILETIME))aSyscall[56].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
+ { "UnlockFile", (SYSCALL)UnlockFile, 0 },
+#else
+ { "UnlockFile", (SYSCALL)0, 0 },
+#endif
+
+#ifndef osUnlockFile
+#define osUnlockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \
+ DWORD))aSyscall[57].pCurrent)
+#endif
+
+#if !SQLITE_OS_WINCE
+ { "UnlockFileEx", (SYSCALL)UnlockFileEx, 0 },
+#else
+ { "UnlockFileEx", (SYSCALL)0, 0 },
+#endif
+
+#define osUnlockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \
+ LPOVERLAPPED))aSyscall[58].pCurrent)
+
+#if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+ { "UnmapViewOfFile", (SYSCALL)UnmapViewOfFile, 0 },
+#else
+ { "UnmapViewOfFile", (SYSCALL)0, 0 },
+#endif
+
+#define osUnmapViewOfFile ((BOOL(WINAPI*)(LPCVOID))aSyscall[59].pCurrent)
+
+ { "WideCharToMultiByte", (SYSCALL)WideCharToMultiByte, 0 },
+
+#define osWideCharToMultiByte ((int(WINAPI*)(UINT,DWORD,LPCWSTR,int,LPSTR,int, \
+ LPCSTR,LPBOOL))aSyscall[60].pCurrent)
+
+ { "WriteFile", (SYSCALL)WriteFile, 0 },
+
+#define osWriteFile ((BOOL(WINAPI*)(HANDLE,LPCVOID,DWORD,LPDWORD, \
+ LPOVERLAPPED))aSyscall[61].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "CreateEventExW", (SYSCALL)CreateEventExW, 0 },
+#else
+ { "CreateEventExW", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateEventExW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,LPCWSTR, \
+ DWORD,DWORD))aSyscall[62].pCurrent)
+
+#if !SQLITE_OS_WINRT
+ { "WaitForSingleObject", (SYSCALL)WaitForSingleObject, 0 },
+#else
+ { "WaitForSingleObject", (SYSCALL)0, 0 },
+#endif
+
+#define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \
+ DWORD))aSyscall[63].pCurrent)
+
+#if !SQLITE_OS_WINCE
+ { "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 },
+#else
+ { "WaitForSingleObjectEx", (SYSCALL)0, 0 },
+#endif
+
+#define osWaitForSingleObjectEx ((DWORD(WINAPI*)(HANDLE,DWORD, \
+ BOOL))aSyscall[64].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "SetFilePointerEx", (SYSCALL)SetFilePointerEx, 0 },
+#else
+ { "SetFilePointerEx", (SYSCALL)0, 0 },
+#endif
+
+#define osSetFilePointerEx ((BOOL(WINAPI*)(HANDLE,LARGE_INTEGER, \
+ PLARGE_INTEGER,DWORD))aSyscall[65].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "GetFileInformationByHandleEx", (SYSCALL)GetFileInformationByHandleEx, 0 },
+#else
+ { "GetFileInformationByHandleEx", (SYSCALL)0, 0 },
+#endif
+
+#define osGetFileInformationByHandleEx ((BOOL(WINAPI*)(HANDLE, \
+ FILE_INFO_BY_HANDLE_CLASS,LPVOID,DWORD))aSyscall[66].pCurrent)
+
+#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+ { "MapViewOfFileFromApp", (SYSCALL)MapViewOfFileFromApp, 0 },
+#else
+ { "MapViewOfFileFromApp", (SYSCALL)0, 0 },
+#endif
+
+#define osMapViewOfFileFromApp ((LPVOID(WINAPI*)(HANDLE,ULONG,ULONG64, \
+ SIZE_T))aSyscall[67].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "CreateFile2", (SYSCALL)CreateFile2, 0 },
+#else
+ { "CreateFile2", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFile2 ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD,DWORD, \
+ LPCREATEFILE2_EXTENDED_PARAMETERS))aSyscall[68].pCurrent)
+
+#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+ { "LoadPackagedLibrary", (SYSCALL)LoadPackagedLibrary, 0 },
+#else
+ { "LoadPackagedLibrary", (SYSCALL)0, 0 },
+#endif
+
+#define osLoadPackagedLibrary ((HMODULE(WINAPI*)(LPCWSTR, \
+ DWORD))aSyscall[69].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "GetTickCount64", (SYSCALL)GetTickCount64, 0 },
+#else
+ { "GetTickCount64", (SYSCALL)0, 0 },
+#endif
+
+#define osGetTickCount64 ((ULONGLONG(WINAPI*)(VOID))aSyscall[70].pCurrent)
+
+#if SQLITE_OS_WINRT
+ { "GetNativeSystemInfo", (SYSCALL)GetNativeSystemInfo, 0 },
+#else
+ { "GetNativeSystemInfo", (SYSCALL)0, 0 },
+#endif
+
+#define osGetNativeSystemInfo ((VOID(WINAPI*)( \
+ LPSYSTEM_INFO))aSyscall[71].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ { "OutputDebugStringA", (SYSCALL)OutputDebugStringA, 0 },
+#else
+ { "OutputDebugStringA", (SYSCALL)0, 0 },
+#endif
+
+#define osOutputDebugStringA ((VOID(WINAPI*)(LPCSTR))aSyscall[72].pCurrent)
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ { "OutputDebugStringW", (SYSCALL)OutputDebugStringW, 0 },
+#else
+ { "OutputDebugStringW", (SYSCALL)0, 0 },
+#endif
+
+#define osOutputDebugStringW ((VOID(WINAPI*)(LPCWSTR))aSyscall[73].pCurrent)
+
+ { "GetProcessHeap", (SYSCALL)GetProcessHeap, 0 },
+
+#define osGetProcessHeap ((HANDLE(WINAPI*)(VOID))aSyscall[74].pCurrent)
+
+#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+ { "CreateFileMappingFromApp", (SYSCALL)CreateFileMappingFromApp, 0 },
+#else
+ { "CreateFileMappingFromApp", (SYSCALL)0, 0 },
+#endif
+
+#define osCreateFileMappingFromApp ((HANDLE(WINAPI*)(HANDLE, \
+ LPSECURITY_ATTRIBUTES,ULONG,ULONG64,LPCWSTR))aSyscall[75].pCurrent)
+
+/*
+** NOTE: On some sub-platforms, the InterlockedCompareExchange "function"
+** is really just a macro that uses a compiler intrinsic (e.g. x64).
+** So do not try to make this is into a redefinable interface.
+*/
+#if defined(InterlockedCompareExchange)
+ { "InterlockedCompareExchange", (SYSCALL)0, 0 },
+
+#define osInterlockedCompareExchange InterlockedCompareExchange
+#else
+ { "InterlockedCompareExchange", (SYSCALL)InterlockedCompareExchange, 0 },
+
+#define osInterlockedCompareExchange ((LONG(WINAPI*)(LONG \
+ SQLITE_WIN32_VOLATILE*, LONG,LONG))aSyscall[76].pCurrent)
+#endif /* defined(InterlockedCompareExchange) */
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
+ { "UuidCreate", (SYSCALL)UuidCreate, 0 },
+#else
+ { "UuidCreate", (SYSCALL)0, 0 },
+#endif
+
+#define osUuidCreate ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[77].pCurrent)
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
+ { "UuidCreateSequential", (SYSCALL)UuidCreateSequential, 0 },
+#else
+ { "UuidCreateSequential", (SYSCALL)0, 0 },
+#endif
+
+#define osUuidCreateSequential \
+ ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[78].pCurrent)
+
+#if !defined(SQLITE_NO_SYNC) && SQLITE_MAX_MMAP_SIZE>0
+ { "FlushViewOfFile", (SYSCALL)FlushViewOfFile, 0 },
+#else
+ { "FlushViewOfFile", (SYSCALL)0, 0 },
+#endif
+
+#define osFlushViewOfFile \
+ ((BOOL(WINAPI*)(LPCVOID,SIZE_T))aSyscall[79].pCurrent)
+
+}; /* End of the overrideable system calls */
+
+/*
+** This is the xSetSystemCall() method of sqlite3_vfs for all of the
+** "win32" VFSes. Return SQLITE_OK opon successfully updating the
+** system call pointer, or SQLITE_NOTFOUND if there is no configurable
+** system call named zName.
+*/
+static int winSetSystemCall(
+ sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */
+ const char *zName, /* Name of system call to override */
+ sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */
+){
+ unsigned int i;
+ int rc = SQLITE_NOTFOUND;
+
+ UNUSED_PARAMETER(pNotUsed);
+ if( zName==0 ){
+ /* If no zName is given, restore all system calls to their default
+ ** settings and return NULL
+ */
+ rc = SQLITE_OK;
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( aSyscall[i].pDefault ){
+ aSyscall[i].pCurrent = aSyscall[i].pDefault;
+ }
+ }
+ }else{
+ /* If zName is specified, operate on only the one system call
+ ** specified.
+ */
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ){
+ if( aSyscall[i].pDefault==0 ){
+ aSyscall[i].pDefault = aSyscall[i].pCurrent;
+ }
+ rc = SQLITE_OK;
+ if( pNewFunc==0 ) pNewFunc = aSyscall[i].pDefault;
+ aSyscall[i].pCurrent = pNewFunc;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Return the value of a system call. Return NULL if zName is not a
+** recognized system call name. NULL is also returned if the system call
+** is currently undefined.
+*/
+static sqlite3_syscall_ptr winGetSystemCall(
+ sqlite3_vfs *pNotUsed,
+ const char *zName
+){
+ unsigned int i;
+
+ UNUSED_PARAMETER(pNotUsed);
+ for(i=0; i<sizeof(aSyscall)/sizeof(aSyscall[0]); i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ) return aSyscall[i].pCurrent;
+ }
+ return 0;
+}
+
+/*
+** Return the name of the first system call after zName. If zName==NULL
+** then return the name of the first system call. Return NULL if zName
+** is the last system call or if zName is not the name of a valid
+** system call.
+*/
+static const char *winNextSystemCall(sqlite3_vfs *p, const char *zName){
+ int i = -1;
+
+ UNUSED_PARAMETER(p);
+ if( zName ){
+ for(i=0; i<ArraySize(aSyscall)-1; i++){
+ if( strcmp(zName, aSyscall[i].zName)==0 ) break;
+ }
+ }
+ for(i++; i<ArraySize(aSyscall); i++){
+ if( aSyscall[i].pCurrent!=0 ) return aSyscall[i].zName;
+ }
+ return 0;
+}
+
+#ifdef SQLITE_WIN32_MALLOC
+/*
+** If a Win32 native heap has been configured, this function will attempt to
+** compact it. Upon success, SQLITE_OK will be returned. Upon failure, one
+** of SQLITE_NOMEM, SQLITE_ERROR, or SQLITE_NOTFOUND will be returned. The
+** "pnLargest" argument, if non-zero, will be used to return the size of the
+** largest committed free block in the heap, in bytes.
+*/
+SQLITE_API int sqlite3_win32_compact_heap(LPUINT pnLargest){
+ int rc = SQLITE_OK;
+ UINT nLargest = 0;
+ HANDLE hHeap;
+
+ winMemAssertMagic();
+ hHeap = winMemGetHeap();
+ assert( hHeap!=0 );
+ assert( hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) );
+#endif
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
+ if( (nLargest=osHeapCompact(hHeap, SQLITE_WIN32_HEAP_FLAGS))==0 ){
+ DWORD lastErrno = osGetLastError();
+ if( lastErrno==NO_ERROR ){
+ sqlite3_log(SQLITE_NOMEM, "failed to HeapCompact (no space), heap=%p",
+ (void*)hHeap);
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ sqlite3_log(SQLITE_ERROR, "failed to HeapCompact (%lu), heap=%p",
+ osGetLastError(), (void*)hHeap);
+ rc = SQLITE_ERROR;
+ }
+ }
+#else
+ sqlite3_log(SQLITE_NOTFOUND, "failed to HeapCompact, heap=%p",
+ (void*)hHeap);
+ rc = SQLITE_NOTFOUND;
+#endif
+ if( pnLargest ) *pnLargest = nLargest;
+ return rc;
+}
+
+/*
+** If a Win32 native heap has been configured, this function will attempt to
+** destroy and recreate it. If the Win32 native heap is not isolated and/or
+** the sqlite3_memory_used() function does not return zero, SQLITE_BUSY will
+** be returned and no changes will be made to the Win32 native heap.
+*/
+SQLITE_API int sqlite3_win32_reset_heap(){
+ int rc;
+ MUTEX_LOGIC( sqlite3_mutex *pMainMtx; ) /* The main static mutex */
+ MUTEX_LOGIC( sqlite3_mutex *pMem; ) /* The memsys static mutex */
+ MUTEX_LOGIC( pMainMtx = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN); )
+ MUTEX_LOGIC( pMem = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); )
+ sqlite3_mutex_enter(pMainMtx);
+ sqlite3_mutex_enter(pMem);
+ winMemAssertMagic();
+ if( winMemGetHeap()!=NULL && winMemGetOwned() && sqlite3_memory_used()==0 ){
+ /*
+ ** At this point, there should be no outstanding memory allocations on
+ ** the heap. Also, since both the main and memsys locks are currently
+ ** being held by us, no other function (i.e. from another thread) should
+ ** be able to even access the heap. Attempt to destroy and recreate our
+ ** isolated Win32 native heap now.
+ */
+ assert( winMemGetHeap()!=NULL );
+ assert( winMemGetOwned() );
+ assert( sqlite3_memory_used()==0 );
+ winMemShutdown(winMemGetDataPtr());
+ assert( winMemGetHeap()==NULL );
+ assert( !winMemGetOwned() );
+ assert( sqlite3_memory_used()==0 );
+ rc = winMemInit(winMemGetDataPtr());
+ assert( rc!=SQLITE_OK || winMemGetHeap()!=NULL );
+ assert( rc!=SQLITE_OK || winMemGetOwned() );
+ assert( rc!=SQLITE_OK || sqlite3_memory_used()==0 );
+ }else{
+ /*
+ ** The Win32 native heap cannot be modified because it may be in use.
+ */
+ rc = SQLITE_BUSY;
+ }
+ sqlite3_mutex_leave(pMem);
+ sqlite3_mutex_leave(pMainMtx);
+ return rc;
+}
+#endif /* SQLITE_WIN32_MALLOC */
+
+/*
+** This function outputs the specified (ANSI) string to the Win32 debugger
+** (if available).
+*/
+
+SQLITE_API void sqlite3_win32_write_debug(const char *zBuf, int nBuf){
+ char zDbgBuf[SQLITE_WIN32_DBG_BUF_SIZE];
+ int nMin = MIN(nBuf, (SQLITE_WIN32_DBG_BUF_SIZE - 1)); /* may be negative. */
+ if( nMin<-1 ) nMin = -1; /* all negative values become -1. */
+ assert( nMin==-1 || nMin==0 || nMin<SQLITE_WIN32_DBG_BUF_SIZE );
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zBuf ){
+ (void)SQLITE_MISUSE_BKPT;
+ return;
+ }
+#endif
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ if( nMin>0 ){
+ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE);
+ memcpy(zDbgBuf, zBuf, nMin);
+ osOutputDebugStringA(zDbgBuf);
+ }else{
+ osOutputDebugStringA(zBuf);
+ }
+#elif defined(SQLITE_WIN32_HAS_WIDE)
+ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE);
+ if ( osMultiByteToWideChar(
+ osAreFileApisANSI() ? CP_ACP : CP_OEMCP, 0, zBuf,
+ nMin, (LPWSTR)zDbgBuf, SQLITE_WIN32_DBG_BUF_SIZE/sizeof(WCHAR))<=0 ){
+ return;
+ }
+ osOutputDebugStringW((LPCWSTR)zDbgBuf);
+#else
+ if( nMin>0 ){
+ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE);
+ memcpy(zDbgBuf, zBuf, nMin);
+ fprintf(stderr, "%s", zDbgBuf);
+ }else{
+ fprintf(stderr, "%s", zBuf);
+ }
+#endif
+}
+
+/*
+** The following routine suspends the current thread for at least ms
+** milliseconds. This is equivalent to the Win32 Sleep() interface.
+*/
+#if SQLITE_OS_WINRT
+static HANDLE sleepObj = NULL;
+#endif
+
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds){
+#if SQLITE_OS_WINRT
+ if ( sleepObj==NULL ){
+ sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET,
+ SYNCHRONIZE);
+ }
+ assert( sleepObj!=NULL );
+ osWaitForSingleObjectEx(sleepObj, milliseconds, FALSE);
+#else
+ osSleep(milliseconds);
+#endif
+}
+
+#if SQLITE_MAX_WORKER_THREADS>0 && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
+ SQLITE_THREADSAFE>0
+SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){
+ DWORD rc;
+ while( (rc = osWaitForSingleObjectEx(hObject, INFINITE,
+ TRUE))==WAIT_IO_COMPLETION ){}
+ return rc;
+}
+#endif
+
+/*
+** Return true (non-zero) if we are running under WinNT, Win2K, WinXP,
+** or WinCE. Return false (zero) for Win95, Win98, or WinME.
+**
+** Here is an interesting observation: Win95, Win98, and WinME lack
+** the LockFileEx() API. But we can still statically link against that
+** API as long as we don't call it when running Win95/98/ME. A call to
+** this routine is used to determine if the host is Win95/98/ME or
+** WinNT/2K/XP so that we will know whether or not we can safely call
+** the LockFileEx() API.
+*/
+
+#if !SQLITE_WIN32_GETVERSIONEX
+# define osIsNT() (1)
+#elif SQLITE_OS_WINCE || SQLITE_OS_WINRT || !defined(SQLITE_WIN32_HAS_ANSI)
+# define osIsNT() (1)
+#elif !defined(SQLITE_WIN32_HAS_WIDE)
+# define osIsNT() (0)
+#else
+# define osIsNT() ((sqlite3_os_type==2) || sqlite3_win32_is_nt())
+#endif
+
+/*
+** This function determines if the machine is running a version of Windows
+** based on the NT kernel.
+*/
+SQLITE_API int sqlite3_win32_is_nt(void){
+#if SQLITE_OS_WINRT
+ /*
+ ** NOTE: The WinRT sub-platform is always assumed to be based on the NT
+ ** kernel.
+ */
+ return 1;
+#elif SQLITE_WIN32_GETVERSIONEX
+ if( osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 ){
+#if defined(SQLITE_WIN32_HAS_ANSI)
+ OSVERSIONINFOA sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ osGetVersionExA(&sInfo);
+ osInterlockedCompareExchange(&sqlite3_os_type,
+ (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0);
+#elif defined(SQLITE_WIN32_HAS_WIDE)
+ OSVERSIONINFOW sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ osGetVersionExW(&sInfo);
+ osInterlockedCompareExchange(&sqlite3_os_type,
+ (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0);
+#endif
+ }
+ return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2;
+#elif SQLITE_TEST
+ return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2;
+#else
+ /*
+ ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are
+ ** deprecated are always assumed to be based on the NT kernel.
+ */
+ return 1;
+#endif
+}
+
+#ifdef SQLITE_WIN32_MALLOC
+/*
+** Allocate nBytes of memory.
+*/
+static void *winMemMalloc(int nBytes){
+ HANDLE hHeap;
+ void *p;
+
+ winMemAssertMagic();
+ hHeap = winMemGetHeap();
+ assert( hHeap!=0 );
+ assert( hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) );
+#endif
+ assert( nBytes>=0 );
+ p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes);
+ if( !p ){
+ sqlite3_log(SQLITE_NOMEM, "failed to HeapAlloc %u bytes (%lu), heap=%p",
+ nBytes, osGetLastError(), (void*)hHeap);
+ }
+ return p;
+}
+
+/*
+** Free memory.
+*/
+static void winMemFree(void *pPrior){
+ HANDLE hHeap;
+
+ winMemAssertMagic();
+ hHeap = winMemGetHeap();
+ assert( hHeap!=0 );
+ assert( hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) );
+#endif
+ if( !pPrior ) return; /* Passing NULL to HeapFree is undefined. */
+ if( !osHeapFree(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ){
+ sqlite3_log(SQLITE_NOMEM, "failed to HeapFree block %p (%lu), heap=%p",
+ pPrior, osGetLastError(), (void*)hHeap);
+ }
+}
+
+/*
+** Change the size of an existing memory allocation
+*/
+static void *winMemRealloc(void *pPrior, int nBytes){
+ HANDLE hHeap;
+ void *p;
+
+ winMemAssertMagic();
+ hHeap = winMemGetHeap();
+ assert( hHeap!=0 );
+ assert( hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) );
+#endif
+ assert( nBytes>=0 );
+ if( !pPrior ){
+ p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes);
+ }else{
+ p = osHeapReAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior, (SIZE_T)nBytes);
+ }
+ if( !p ){
+ sqlite3_log(SQLITE_NOMEM, "failed to %s %u bytes (%lu), heap=%p",
+ pPrior ? "HeapReAlloc" : "HeapAlloc", nBytes, osGetLastError(),
+ (void*)hHeap);
+ }
+ return p;
+}
+
+/*
+** Return the size of an outstanding allocation, in bytes.
+*/
+static int winMemSize(void *p){
+ HANDLE hHeap;
+ SIZE_T n;
+
+ winMemAssertMagic();
+ hHeap = winMemGetHeap();
+ assert( hHeap!=0 );
+ assert( hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, p) );
+#endif
+ if( !p ) return 0;
+ n = osHeapSize(hHeap, SQLITE_WIN32_HEAP_FLAGS, p);
+ if( n==(SIZE_T)-1 ){
+ sqlite3_log(SQLITE_NOMEM, "failed to HeapSize block %p (%lu), heap=%p",
+ p, osGetLastError(), (void*)hHeap);
+ return 0;
+ }
+ return (int)n;
+}
+
+/*
+** Round up a request size to the next valid allocation size.
+*/
+static int winMemRoundup(int n){
+ return n;
+}
+
+/*
+** Initialize this module.
+*/
+static int winMemInit(void *pAppData){
+ winMemData *pWinMemData = (winMemData *)pAppData;
+
+ if( !pWinMemData ) return SQLITE_ERROR;
+ assert( pWinMemData->magic1==WINMEM_MAGIC1 );
+ assert( pWinMemData->magic2==WINMEM_MAGIC2 );
+
+#if !SQLITE_OS_WINRT && SQLITE_WIN32_HEAP_CREATE
+ if( !pWinMemData->hHeap ){
+ DWORD dwInitialSize = SQLITE_WIN32_HEAP_INIT_SIZE;
+ DWORD dwMaximumSize = (DWORD)sqlite3GlobalConfig.nHeap;
+ if( dwMaximumSize==0 ){
+ dwMaximumSize = SQLITE_WIN32_HEAP_MAX_SIZE;
+ }else if( dwInitialSize>dwMaximumSize ){
+ dwInitialSize = dwMaximumSize;
+ }
+ pWinMemData->hHeap = osHeapCreate(SQLITE_WIN32_HEAP_FLAGS,
+ dwInitialSize, dwMaximumSize);
+ if( !pWinMemData->hHeap ){
+ sqlite3_log(SQLITE_NOMEM,
+ "failed to HeapCreate (%lu), flags=%u, initSize=%lu, maxSize=%lu",
+ osGetLastError(), SQLITE_WIN32_HEAP_FLAGS, dwInitialSize,
+ dwMaximumSize);
+ return SQLITE_NOMEM_BKPT;
+ }
+ pWinMemData->bOwned = TRUE;
+ assert( pWinMemData->bOwned );
+ }
+#else
+ pWinMemData->hHeap = osGetProcessHeap();
+ if( !pWinMemData->hHeap ){
+ sqlite3_log(SQLITE_NOMEM,
+ "failed to GetProcessHeap (%lu)", osGetLastError());
+ return SQLITE_NOMEM_BKPT;
+ }
+ pWinMemData->bOwned = FALSE;
+ assert( !pWinMemData->bOwned );
+#endif
+ assert( pWinMemData->hHeap!=0 );
+ assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) );
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Deinitialize this module.
+*/
+static void winMemShutdown(void *pAppData){
+ winMemData *pWinMemData = (winMemData *)pAppData;
+
+ if( !pWinMemData ) return;
+ assert( pWinMemData->magic1==WINMEM_MAGIC1 );
+ assert( pWinMemData->magic2==WINMEM_MAGIC2 );
+
+ if( pWinMemData->hHeap ){
+ assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE );
+#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE)
+ assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) );
+#endif
+ if( pWinMemData->bOwned ){
+ if( !osHeapDestroy(pWinMemData->hHeap) ){
+ sqlite3_log(SQLITE_NOMEM, "failed to HeapDestroy (%lu), heap=%p",
+ osGetLastError(), (void*)pWinMemData->hHeap);
+ }
+ pWinMemData->bOwned = FALSE;
+ }
+ pWinMemData->hHeap = NULL;
+ }
+}
+
+/*
+** Populate the low-level memory allocation function pointers in
+** sqlite3GlobalConfig.m with pointers to the routines in this file. The
+** arguments specify the block of memory to manage.
+**
+** This routine is only called by sqlite3_config(), and therefore
+** is not required to be threadsafe (it is not).
+*/
+SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void){
+ static const sqlite3_mem_methods winMemMethods = {
+ winMemMalloc,
+ winMemFree,
+ winMemRealloc,
+ winMemSize,
+ winMemRoundup,
+ winMemInit,
+ winMemShutdown,
+ &win_mem_data
+ };
+ return &winMemMethods;
+}
+
+SQLITE_PRIVATE void sqlite3MemSetDefault(void){
+ sqlite3_config(SQLITE_CONFIG_MALLOC, sqlite3MemGetWin32());
+}
+#endif /* SQLITE_WIN32_MALLOC */
+
+/*
+** Convert a UTF-8 string to Microsoft Unicode.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static LPWSTR winUtf8ToUnicode(const char *zText){
+ int nChar;
+ LPWSTR zWideText;
+
+ nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, NULL, 0);
+ if( nChar==0 ){
+ return 0;
+ }
+ zWideText = sqlite3MallocZero( nChar*sizeof(WCHAR) );
+ if( zWideText==0 ){
+ return 0;
+ }
+ nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, zWideText,
+ nChar);
+ if( nChar==0 ){
+ sqlite3_free(zWideText);
+ zWideText = 0;
+ }
+ return zWideText;
+}
+
+/*
+** Convert a Microsoft Unicode string to UTF-8.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static char *winUnicodeToUtf8(LPCWSTR zWideText){
+ int nByte;
+ char *zText;
+
+ nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, 0, 0, 0, 0);
+ if( nByte == 0 ){
+ return 0;
+ }
+ zText = sqlite3MallocZero( nByte );
+ if( zText==0 ){
+ return 0;
+ }
+ nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, zText, nByte,
+ 0, 0);
+ if( nByte == 0 ){
+ sqlite3_free(zText);
+ zText = 0;
+ }
+ return zText;
+}
+
+/*
+** Convert an ANSI string to Microsoft Unicode, using the ANSI or OEM
+** code page.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static LPWSTR winMbcsToUnicode(const char *zText, int useAnsi){
+ int nByte;
+ LPWSTR zMbcsText;
+ int codepage = useAnsi ? CP_ACP : CP_OEMCP;
+
+ nByte = osMultiByteToWideChar(codepage, 0, zText, -1, NULL,
+ 0)*sizeof(WCHAR);
+ if( nByte==0 ){
+ return 0;
+ }
+ zMbcsText = sqlite3MallocZero( nByte*sizeof(WCHAR) );
+ if( zMbcsText==0 ){
+ return 0;
+ }
+ nByte = osMultiByteToWideChar(codepage, 0, zText, -1, zMbcsText,
+ nByte);
+ if( nByte==0 ){
+ sqlite3_free(zMbcsText);
+ zMbcsText = 0;
+ }
+ return zMbcsText;
+}
+
+/*
+** Convert a Microsoft Unicode string to a multi-byte character string,
+** using the ANSI or OEM code page.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static char *winUnicodeToMbcs(LPCWSTR zWideText, int useAnsi){
+ int nByte;
+ char *zText;
+ int codepage = useAnsi ? CP_ACP : CP_OEMCP;
+
+ nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, 0, 0, 0, 0);
+ if( nByte == 0 ){
+ return 0;
+ }
+ zText = sqlite3MallocZero( nByte );
+ if( zText==0 ){
+ return 0;
+ }
+ nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, zText,
+ nByte, 0, 0);
+ if( nByte == 0 ){
+ sqlite3_free(zText);
+ zText = 0;
+ }
+ return zText;
+}
+
+/*
+** Convert a multi-byte character string to UTF-8.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static char *winMbcsToUtf8(const char *zText, int useAnsi){
+ char *zTextUtf8;
+ LPWSTR zTmpWide;
+
+ zTmpWide = winMbcsToUnicode(zText, useAnsi);
+ if( zTmpWide==0 ){
+ return 0;
+ }
+ zTextUtf8 = winUnicodeToUtf8(zTmpWide);
+ sqlite3_free(zTmpWide);
+ return zTextUtf8;
+}
+
+/*
+** Convert a UTF-8 string to a multi-byte character string.
+**
+** Space to hold the returned string is obtained from sqlite3_malloc().
+*/
+static char *winUtf8ToMbcs(const char *zText, int useAnsi){
+ char *zTextMbcs;
+ LPWSTR zTmpWide;
+
+ zTmpWide = winUtf8ToUnicode(zText);
+ if( zTmpWide==0 ){
+ return 0;
+ }
+ zTextMbcs = winUnicodeToMbcs(zTmpWide, useAnsi);
+ sqlite3_free(zTmpWide);
+ return zTextMbcs;
+}
+
+/*
+** This is a public wrapper for the winUtf8ToUnicode() function.
+*/
+SQLITE_API LPWSTR sqlite3_win32_utf8_to_unicode(const char *zText){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winUtf8ToUnicode(zText);
+}
+
+/*
+** This is a public wrapper for the winUnicodeToUtf8() function.
+*/
+SQLITE_API char *sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zWideText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winUnicodeToUtf8(zWideText);
+}
+
+/*
+** This is a public wrapper for the winMbcsToUtf8() function.
+*/
+SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zText){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winMbcsToUtf8(zText, osAreFileApisANSI());
+}
+
+/*
+** This is a public wrapper for the winMbcsToUtf8() function.
+*/
+SQLITE_API char *sqlite3_win32_mbcs_to_utf8_v2(const char *zText, int useAnsi){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winMbcsToUtf8(zText, useAnsi);
+}
+
+/*
+** This is a public wrapper for the winUtf8ToMbcs() function.
+*/
+SQLITE_API char *sqlite3_win32_utf8_to_mbcs(const char *zText){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winUtf8ToMbcs(zText, osAreFileApisANSI());
+}
+
+/*
+** This is a public wrapper for the winUtf8ToMbcs() function.
+*/
+SQLITE_API char *sqlite3_win32_utf8_to_mbcs_v2(const char *zText, int useAnsi){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !zText ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize() ) return 0;
+#endif
+ return winUtf8ToMbcs(zText, useAnsi);
+}
+
+/*
+** This function is the same as sqlite3_win32_set_directory (below); however,
+** it accepts a UTF-8 string.
+*/
+SQLITE_API int sqlite3_win32_set_directory8(
+ unsigned long type, /* Identifier for directory being set or reset */
+ const char *zValue /* New value for directory being set or reset */
+){
+ char **ppDirectory = 0;
+#ifndef SQLITE_OMIT_AUTOINIT
+ int rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+ if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){
+ ppDirectory = &sqlite3_data_directory;
+ }else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){
+ ppDirectory = &sqlite3_temp_directory;
+ }
+ assert( !ppDirectory || type==SQLITE_WIN32_DATA_DIRECTORY_TYPE
+ || type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE
+ );
+ assert( !ppDirectory || sqlite3MemdebugHasType(*ppDirectory, MEMTYPE_HEAP) );
+ if( ppDirectory ){
+ char *zCopy = 0;
+ if( zValue && zValue[0] ){
+ zCopy = sqlite3_mprintf("%s", zValue);
+ if ( zCopy==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ }
+ sqlite3_free(*ppDirectory);
+ *ppDirectory = zCopy;
+ return SQLITE_OK;
+ }
+ return SQLITE_ERROR;
+}
+
+/*
+** This function is the same as sqlite3_win32_set_directory (below); however,
+** it accepts a UTF-16 string.
+*/
+SQLITE_API int sqlite3_win32_set_directory16(
+ unsigned long type, /* Identifier for directory being set or reset */
+ const void *zValue /* New value for directory being set or reset */
+){
+ int rc;
+ char *zUtf8 = 0;
+ if( zValue ){
+ zUtf8 = sqlite3_win32_unicode_to_utf8(zValue);
+ if( zUtf8==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ rc = sqlite3_win32_set_directory8(type, zUtf8);
+ if( zUtf8 ) sqlite3_free(zUtf8);
+ return rc;
+}
+
+/*
+** This function sets the data directory or the temporary directory based on
+** the provided arguments. The type argument must be 1 in order to set the
+** data directory or 2 in order to set the temporary directory. The zValue
+** argument is the name of the directory to use. The return value will be
+** SQLITE_OK if successful.
+*/
+SQLITE_API int sqlite3_win32_set_directory(
+ unsigned long type, /* Identifier for directory being set or reset */
+ void *zValue /* New value for directory being set or reset */
+){
+ return sqlite3_win32_set_directory16(type, zValue);
+}
+
+/*
+** The return value of winGetLastErrorMsg
+** is zero if the error message fits in the buffer, or non-zero
+** otherwise (if the message was truncated).
+*/
+static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){
+ /* FormatMessage returns 0 on failure. Otherwise it
+ ** returns the number of TCHARs written to the output
+ ** buffer, excluding the terminating null char.
+ */
+ DWORD dwLen = 0;
+ char *zOut = 0;
+
+ if( osIsNT() ){
+#if SQLITE_OS_WINRT
+ WCHAR zTempWide[SQLITE_WIN32_MAX_ERRMSG_CHARS+1];
+ dwLen = osFormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ lastErrno,
+ 0,
+ zTempWide,
+ SQLITE_WIN32_MAX_ERRMSG_CHARS,
+ 0);
+#else
+ LPWSTR zTempWide = NULL;
+ dwLen = osFormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ lastErrno,
+ 0,
+ (LPWSTR) &zTempWide,
+ 0,
+ 0);
+#endif
+ if( dwLen > 0 ){
+ /* allocate a buffer and convert to UTF8 */
+ sqlite3BeginBenignMalloc();
+ zOut = winUnicodeToUtf8(zTempWide);
+ sqlite3EndBenignMalloc();
+#if !SQLITE_OS_WINRT
+ /* free the system buffer allocated by FormatMessage */
+ osLocalFree(zTempWide);
+#endif
+ }
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ char *zTemp = NULL;
+ dwLen = osFormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ lastErrno,
+ 0,
+ (LPSTR) &zTemp,
+ 0,
+ 0);
+ if( dwLen > 0 ){
+ /* allocate a buffer and convert to UTF8 */
+ sqlite3BeginBenignMalloc();
+ zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI());
+ sqlite3EndBenignMalloc();
+ /* free the system buffer allocated by FormatMessage */
+ osLocalFree(zTemp);
+ }
+ }
+#endif
+ if( 0 == dwLen ){
+ sqlite3_snprintf(nBuf, zBuf, "OsError 0x%lx (%lu)", lastErrno, lastErrno);
+ }else{
+ /* copy a maximum of nBuf chars to output buffer */
+ sqlite3_snprintf(nBuf, zBuf, "%s", zOut);
+ /* free the UTF8 buffer */
+ sqlite3_free(zOut);
+ }
+ return 0;
+}
+
+/*
+**
+** This function - winLogErrorAtLine() - is only ever called via the macro
+** winLogError().
+**
+** This routine is invoked after an error occurs in an OS function.
+** It logs a message using sqlite3_log() containing the current value of
+** error code and, if possible, the human-readable equivalent from
+** FormatMessage.
+**
+** The first argument passed to the macro should be the error code that
+** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN).
+** The two subsequent arguments should be the name of the OS function that
+** failed and the associated file-system path, if any.
+*/
+#define winLogError(a,b,c,d) winLogErrorAtLine(a,b,c,d,__LINE__)
+static int winLogErrorAtLine(
+ int errcode, /* SQLite error code */
+ DWORD lastErrno, /* Win32 last error */
+ const char *zFunc, /* Name of OS function that failed */
+ const char *zPath, /* File path associated with error */
+ int iLine /* Source line number where error occurred */
+){
+ char zMsg[500]; /* Human readable error text */
+ int i; /* Loop counter */
+
+ zMsg[0] = 0;
+ winGetLastErrorMsg(lastErrno, sizeof(zMsg), zMsg);
+ assert( errcode!=SQLITE_OK );
+ if( zPath==0 ) zPath = "";
+ for(i=0; zMsg[i] && zMsg[i]!='\r' && zMsg[i]!='\n'; i++){}
+ zMsg[i] = 0;
+ sqlite3_log(errcode,
+ "os_win.c:%d: (%lu) %s(%s) - %s",
+ iLine, lastErrno, zFunc, zPath, zMsg
+ );
+
+ return errcode;
+}
+
+/*
+** The number of times that a ReadFile(), WriteFile(), and DeleteFile()
+** will be retried following a locking error - probably caused by
+** antivirus software. Also the initial delay before the first retry.
+** The delay increases linearly with each retry.
+*/
+#ifndef SQLITE_WIN32_IOERR_RETRY
+# define SQLITE_WIN32_IOERR_RETRY 10
+#endif
+#ifndef SQLITE_WIN32_IOERR_RETRY_DELAY
+# define SQLITE_WIN32_IOERR_RETRY_DELAY 25
+#endif
+static int winIoerrRetry = SQLITE_WIN32_IOERR_RETRY;
+static int winIoerrRetryDelay = SQLITE_WIN32_IOERR_RETRY_DELAY;
+
+/*
+** The "winIoerrCanRetry1" macro is used to determine if a particular I/O
+** error code obtained via GetLastError() is eligible to be retried. It
+** must accept the error code DWORD as its only argument and should return
+** non-zero if the error code is transient in nature and the operation
+** responsible for generating the original error might succeed upon being
+** retried. The argument to this macro should be a variable.
+**
+** Additionally, a macro named "winIoerrCanRetry2" may be defined. If it
+** is defined, it will be consulted only when the macro "winIoerrCanRetry1"
+** returns zero. The "winIoerrCanRetry2" macro is completely optional and
+** may be used to include additional error codes in the set that should
+** result in the failing I/O operation being retried by the caller. If
+** defined, the "winIoerrCanRetry2" macro must exhibit external semantics
+** identical to those of the "winIoerrCanRetry1" macro.
+*/
+#if !defined(winIoerrCanRetry1)
+#define winIoerrCanRetry1(a) (((a)==ERROR_ACCESS_DENIED) || \
+ ((a)==ERROR_SHARING_VIOLATION) || \
+ ((a)==ERROR_LOCK_VIOLATION) || \
+ ((a)==ERROR_DEV_NOT_EXIST) || \
+ ((a)==ERROR_NETNAME_DELETED) || \
+ ((a)==ERROR_SEM_TIMEOUT) || \
+ ((a)==ERROR_NETWORK_UNREACHABLE))
+#endif
+
+/*
+** If a ReadFile() or WriteFile() error occurs, invoke this routine
+** to see if it should be retried. Return TRUE to retry. Return FALSE
+** to give up with an error.
+*/
+static int winRetryIoerr(int *pnRetry, DWORD *pError){
+ DWORD e = osGetLastError();
+ if( *pnRetry>=winIoerrRetry ){
+ if( pError ){
+ *pError = e;
+ }
+ return 0;
+ }
+ if( winIoerrCanRetry1(e) ){
+ sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry));
+ ++*pnRetry;
+ return 1;
+ }
+#if defined(winIoerrCanRetry2)
+ else if( winIoerrCanRetry2(e) ){
+ sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry));
+ ++*pnRetry;
+ return 1;
+ }
+#endif
+ if( pError ){
+ *pError = e;
+ }
+ return 0;
+}
+
+/*
+** Log a I/O error retry episode.
+*/
+static void winLogIoerr(int nRetry, int lineno){
+ if( nRetry ){
+ sqlite3_log(SQLITE_NOTICE,
+ "delayed %dms for lock/sharing conflict at line %d",
+ winIoerrRetryDelay*nRetry*(nRetry+1)/2, lineno
+ );
+ }
+}
+
+/*
+** This #if does not rely on the SQLITE_OS_WINCE define because the
+** corresponding section in "date.c" cannot use it.
+*/
+#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \
+ (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API)
+/*
+** The MSVC CRT on Windows CE may not have a localtime() function.
+** So define a substitute.
+*/
+/* # include <time.h> */
+struct tm *__cdecl localtime(const time_t *t)
+{
+ static struct tm y;
+ FILETIME uTm, lTm;
+ SYSTEMTIME pTm;
+ sqlite3_int64 t64;
+ t64 = *t;
+ t64 = (t64 + 11644473600)*10000000;
+ uTm.dwLowDateTime = (DWORD)(t64 & 0xFFFFFFFF);
+ uTm.dwHighDateTime= (DWORD)(t64 >> 32);
+ osFileTimeToLocalFileTime(&uTm,&lTm);
+ osFileTimeToSystemTime(&lTm,&pTm);
+ y.tm_year = pTm.wYear - 1900;
+ y.tm_mon = pTm.wMonth - 1;
+ y.tm_wday = pTm.wDayOfWeek;
+ y.tm_mday = pTm.wDay;
+ y.tm_hour = pTm.wHour;
+ y.tm_min = pTm.wMinute;
+ y.tm_sec = pTm.wSecond;
+ return &y;
+}
+#endif
+
+#if SQLITE_OS_WINCE
+/*************************************************************************
+** This section contains code for WinCE only.
+*/
+#define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-(int)offsetof(winFile,h)]
+
+/*
+** Acquire a lock on the handle h
+*/
+static void winceMutexAcquire(HANDLE h){
+ DWORD dwErr;
+ do {
+ dwErr = osWaitForSingleObject(h, INFINITE);
+ } while (dwErr != WAIT_OBJECT_0 && dwErr != WAIT_ABANDONED);
+}
+/*
+** Release a lock acquired by winceMutexAcquire()
+*/
+#define winceMutexRelease(h) ReleaseMutex(h)
+
+/*
+** Create the mutex and shared memory used for locking in the file
+** descriptor pFile
+*/
+static int winceCreateLock(const char *zFilename, winFile *pFile){
+ LPWSTR zTok;
+ LPWSTR zName;
+ DWORD lastErrno;
+ BOOL bLogged = FALSE;
+ BOOL bInit = TRUE;
+
+ zName = winUtf8ToUnicode(zFilename);
+ if( zName==0 ){
+ /* out of memory */
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+
+ /* Initialize the local lockdata */
+ memset(&pFile->local, 0, sizeof(pFile->local));
+
+ /* Replace the backslashes from the filename and lowercase it
+ ** to derive a mutex name. */
+ zTok = osCharLowerW(zName);
+ for (;*zTok;zTok++){
+ if (*zTok == '\\') *zTok = '_';
+ }
+
+ /* Create/open the named mutex */
+ pFile->hMutex = osCreateMutexW(NULL, FALSE, zName);
+ if (!pFile->hMutex){
+ pFile->lastErrno = osGetLastError();
+ sqlite3_free(zName);
+ return winLogError(SQLITE_IOERR, pFile->lastErrno,
+ "winceCreateLock1", zFilename);
+ }
+
+ /* Acquire the mutex before continuing */
+ winceMutexAcquire(pFile->hMutex);
+
+ /* Since the names of named mutexes, semaphores, file mappings etc are
+ ** case-sensitive, take advantage of that by uppercasing the mutex name
+ ** and using that as the shared filemapping name.
+ */
+ osCharUpperW(zName);
+ pFile->hShared = osCreateFileMappingW(INVALID_HANDLE_VALUE, NULL,
+ PAGE_READWRITE, 0, sizeof(winceLock),
+ zName);
+
+ /* Set a flag that indicates we're the first to create the memory so it
+ ** must be zero-initialized */
+ lastErrno = osGetLastError();
+ if (lastErrno == ERROR_ALREADY_EXISTS){
+ bInit = FALSE;
+ }
+
+ sqlite3_free(zName);
+
+ /* If we succeeded in making the shared memory handle, map it. */
+ if( pFile->hShared ){
+ pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared,
+ FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock));
+ /* If mapping failed, close the shared memory handle and erase it */
+ if( !pFile->shared ){
+ pFile->lastErrno = osGetLastError();
+ winLogError(SQLITE_IOERR, pFile->lastErrno,
+ "winceCreateLock2", zFilename);
+ bLogged = TRUE;
+ osCloseHandle(pFile->hShared);
+ pFile->hShared = NULL;
+ }
+ }
+
+ /* If shared memory could not be created, then close the mutex and fail */
+ if( pFile->hShared==NULL ){
+ if( !bLogged ){
+ pFile->lastErrno = lastErrno;
+ winLogError(SQLITE_IOERR, pFile->lastErrno,
+ "winceCreateLock3", zFilename);
+ bLogged = TRUE;
+ }
+ winceMutexRelease(pFile->hMutex);
+ osCloseHandle(pFile->hMutex);
+ pFile->hMutex = NULL;
+ return SQLITE_IOERR;
+ }
+
+ /* Initialize the shared memory if we're supposed to */
+ if( bInit ){
+ memset(pFile->shared, 0, sizeof(winceLock));
+ }
+
+ winceMutexRelease(pFile->hMutex);
+ return SQLITE_OK;
+}
+
+/*
+** Destroy the part of winFile that deals with wince locks
+*/
+static void winceDestroyLock(winFile *pFile){
+ if (pFile->hMutex){
+ /* Acquire the mutex */
+ winceMutexAcquire(pFile->hMutex);
+
+ /* The following blocks should probably assert in debug mode, but they
+ are to cleanup in case any locks remained open */
+ if (pFile->local.nReaders){
+ pFile->shared->nReaders --;
+ }
+ if (pFile->local.bReserved){
+ pFile->shared->bReserved = FALSE;
+ }
+ if (pFile->local.bPending){
+ pFile->shared->bPending = FALSE;
+ }
+ if (pFile->local.bExclusive){
+ pFile->shared->bExclusive = FALSE;
+ }
+
+ /* De-reference and close our copy of the shared memory handle */
+ osUnmapViewOfFile(pFile->shared);
+ osCloseHandle(pFile->hShared);
+
+ /* Done with the mutex */
+ winceMutexRelease(pFile->hMutex);
+ osCloseHandle(pFile->hMutex);
+ pFile->hMutex = NULL;
+ }
+}
+
+/*
+** An implementation of the LockFile() API of Windows for CE
+*/
+static BOOL winceLockFile(
+ LPHANDLE phFile,
+ DWORD dwFileOffsetLow,
+ DWORD dwFileOffsetHigh,
+ DWORD nNumberOfBytesToLockLow,
+ DWORD nNumberOfBytesToLockHigh
+){
+ winFile *pFile = HANDLE_TO_WINFILE(phFile);
+ BOOL bReturn = FALSE;
+
+ UNUSED_PARAMETER(dwFileOffsetHigh);
+ UNUSED_PARAMETER(nNumberOfBytesToLockHigh);
+
+ if (!pFile->hMutex) return TRUE;
+ winceMutexAcquire(pFile->hMutex);
+
+ /* Wanting an exclusive lock? */
+ if (dwFileOffsetLow == (DWORD)SHARED_FIRST
+ && nNumberOfBytesToLockLow == (DWORD)SHARED_SIZE){
+ if (pFile->shared->nReaders == 0 && pFile->shared->bExclusive == 0){
+ pFile->shared->bExclusive = TRUE;
+ pFile->local.bExclusive = TRUE;
+ bReturn = TRUE;
+ }
+ }
+
+ /* Want a read-only lock? */
+ else if (dwFileOffsetLow == (DWORD)SHARED_FIRST &&
+ nNumberOfBytesToLockLow == 1){
+ if (pFile->shared->bExclusive == 0){
+ pFile->local.nReaders ++;
+ if (pFile->local.nReaders == 1){
+ pFile->shared->nReaders ++;
+ }
+ bReturn = TRUE;
+ }
+ }
+
+ /* Want a pending lock? */
+ else if (dwFileOffsetLow == (DWORD)PENDING_BYTE
+ && nNumberOfBytesToLockLow == 1){
+ /* If no pending lock has been acquired, then acquire it */
+ if (pFile->shared->bPending == 0) {
+ pFile->shared->bPending = TRUE;
+ pFile->local.bPending = TRUE;
+ bReturn = TRUE;
+ }
+ }
+
+ /* Want a reserved lock? */
+ else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE
+ && nNumberOfBytesToLockLow == 1){
+ if (pFile->shared->bReserved == 0) {
+ pFile->shared->bReserved = TRUE;
+ pFile->local.bReserved = TRUE;
+ bReturn = TRUE;
+ }
+ }
+
+ winceMutexRelease(pFile->hMutex);
+ return bReturn;
+}
+
+/*
+** An implementation of the UnlockFile API of Windows for CE
+*/
+static BOOL winceUnlockFile(
+ LPHANDLE phFile,
+ DWORD dwFileOffsetLow,
+ DWORD dwFileOffsetHigh,
+ DWORD nNumberOfBytesToUnlockLow,
+ DWORD nNumberOfBytesToUnlockHigh
+){
+ winFile *pFile = HANDLE_TO_WINFILE(phFile);
+ BOOL bReturn = FALSE;
+
+ UNUSED_PARAMETER(dwFileOffsetHigh);
+ UNUSED_PARAMETER(nNumberOfBytesToUnlockHigh);
+
+ if (!pFile->hMutex) return TRUE;
+ winceMutexAcquire(pFile->hMutex);
+
+ /* Releasing a reader lock or an exclusive lock */
+ if (dwFileOffsetLow == (DWORD)SHARED_FIRST){
+ /* Did we have an exclusive lock? */
+ if (pFile->local.bExclusive){
+ assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE);
+ pFile->local.bExclusive = FALSE;
+ pFile->shared->bExclusive = FALSE;
+ bReturn = TRUE;
+ }
+
+ /* Did we just have a reader lock? */
+ else if (pFile->local.nReaders){
+ assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE
+ || nNumberOfBytesToUnlockLow == 1);
+ pFile->local.nReaders --;
+ if (pFile->local.nReaders == 0)
+ {
+ pFile->shared->nReaders --;
+ }
+ bReturn = TRUE;
+ }
+ }
+
+ /* Releasing a pending lock */
+ else if (dwFileOffsetLow == (DWORD)PENDING_BYTE
+ && nNumberOfBytesToUnlockLow == 1){
+ if (pFile->local.bPending){
+ pFile->local.bPending = FALSE;
+ pFile->shared->bPending = FALSE;
+ bReturn = TRUE;
+ }
+ }
+ /* Releasing a reserved lock */
+ else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE
+ && nNumberOfBytesToUnlockLow == 1){
+ if (pFile->local.bReserved) {
+ pFile->local.bReserved = FALSE;
+ pFile->shared->bReserved = FALSE;
+ bReturn = TRUE;
+ }
+ }
+
+ winceMutexRelease(pFile->hMutex);
+ return bReturn;
+}
+/*
+** End of the special code for wince
+*****************************************************************************/
+#endif /* SQLITE_OS_WINCE */
+
+/*
+** Lock a file region.
+*/
+static BOOL winLockFile(
+ LPHANDLE phFile,
+ DWORD flags,
+ DWORD offsetLow,
+ DWORD offsetHigh,
+ DWORD numBytesLow,
+ DWORD numBytesHigh
+){
+#if SQLITE_OS_WINCE
+ /*
+ ** NOTE: Windows CE is handled differently here due its lack of the Win32
+ ** API LockFile.
+ */
+ return winceLockFile(phFile, offsetLow, offsetHigh,
+ numBytesLow, numBytesHigh);
+#else
+ if( osIsNT() ){
+ OVERLAPPED ovlp;
+ memset(&ovlp, 0, sizeof(OVERLAPPED));
+ ovlp.Offset = offsetLow;
+ ovlp.OffsetHigh = offsetHigh;
+ return osLockFileEx(*phFile, flags, 0, numBytesLow, numBytesHigh, &ovlp);
+ }else{
+ return osLockFile(*phFile, offsetLow, offsetHigh, numBytesLow,
+ numBytesHigh);
+ }
+#endif
+}
+
+/*
+** Unlock a file region.
+ */
+static BOOL winUnlockFile(
+ LPHANDLE phFile,
+ DWORD offsetLow,
+ DWORD offsetHigh,
+ DWORD numBytesLow,
+ DWORD numBytesHigh
+){
+#if SQLITE_OS_WINCE
+ /*
+ ** NOTE: Windows CE is handled differently here due its lack of the Win32
+ ** API UnlockFile.
+ */
+ return winceUnlockFile(phFile, offsetLow, offsetHigh,
+ numBytesLow, numBytesHigh);
+#else
+ if( osIsNT() ){
+ OVERLAPPED ovlp;
+ memset(&ovlp, 0, sizeof(OVERLAPPED));
+ ovlp.Offset = offsetLow;
+ ovlp.OffsetHigh = offsetHigh;
+ return osUnlockFileEx(*phFile, 0, numBytesLow, numBytesHigh, &ovlp);
+ }else{
+ return osUnlockFile(*phFile, offsetLow, offsetHigh, numBytesLow,
+ numBytesHigh);
+ }
+#endif
+}
+
+/*****************************************************************************
+** The next group of routines implement the I/O methods specified
+** by the sqlite3_io_methods object.
+******************************************************************************/
+
+/*
+** Some Microsoft compilers lack this definition.
+*/
+#ifndef INVALID_SET_FILE_POINTER
+# define INVALID_SET_FILE_POINTER ((DWORD)-1)
+#endif
+
+/*
+** Move the current position of the file handle passed as the first
+** argument to offset iOffset within the file. If successful, return 0.
+** Otherwise, set pFile->lastErrno and return non-zero.
+*/
+static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){
+#if !SQLITE_OS_WINRT
+ LONG upperBits; /* Most sig. 32 bits of new offset */
+ LONG lowerBits; /* Least sig. 32 bits of new offset */
+ DWORD dwRet; /* Value returned by SetFilePointer() */
+ DWORD lastErrno; /* Value returned by GetLastError() */
+
+ OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset));
+
+ upperBits = (LONG)((iOffset>>32) & 0x7fffffff);
+ lowerBits = (LONG)(iOffset & 0xffffffff);
+
+ /* API oddity: If successful, SetFilePointer() returns a dword
+ ** containing the lower 32-bits of the new file-offset. Or, if it fails,
+ ** it returns INVALID_SET_FILE_POINTER. However according to MSDN,
+ ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine
+ ** whether an error has actually occurred, it is also necessary to call
+ ** GetLastError().
+ */
+ dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN);
+
+ if( (dwRet==INVALID_SET_FILE_POINTER
+ && ((lastErrno = osGetLastError())!=NO_ERROR)) ){
+ pFile->lastErrno = lastErrno;
+ winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno,
+ "winSeekFile", pFile->zPath);
+ OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h));
+ return 1;
+ }
+
+ OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h));
+ return 0;
+#else
+ /*
+ ** Same as above, except that this implementation works for WinRT.
+ */
+
+ LARGE_INTEGER x; /* The new offset */
+ BOOL bRet; /* Value returned by SetFilePointerEx() */
+
+ x.QuadPart = iOffset;
+ bRet = osSetFilePointerEx(pFile->h, x, 0, FILE_BEGIN);
+
+ if(!bRet){
+ pFile->lastErrno = osGetLastError();
+ winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno,
+ "winSeekFile", pFile->zPath);
+ OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h));
+ return 1;
+ }
+
+ OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h));
+ return 0;
+#endif
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/* Forward references to VFS helper methods used for memory mapped files */
+static int winMapfile(winFile*, sqlite3_int64);
+static int winUnmapfile(winFile*);
+#endif
+
+/*
+** Close a file.
+**
+** It is reported that an attempt to close a handle might sometimes
+** fail. This is a very unreasonable result, but Windows is notorious
+** for being unreasonable so I do not doubt that it might happen. If
+** the close fails, we pause for 100 milliseconds and try again. As
+** many as MX_CLOSE_ATTEMPT attempts to close the handle are made before
+** giving up and returning an error.
+*/
+#define MX_CLOSE_ATTEMPT 3
+static int winClose(sqlite3_file *id){
+ int rc, cnt = 0;
+ winFile *pFile = (winFile*)id;
+
+ assert( id!=0 );
+#ifndef SQLITE_OMIT_WAL
+ assert( pFile->pShm==0 );
+#endif
+ assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE );
+ OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ winUnmapfile(pFile);
+#endif
+
+ do{
+ rc = osCloseHandle(pFile->h);
+ /* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */
+ }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (sqlite3_win32_sleep(100), 1) );
+#if SQLITE_OS_WINCE
+#define WINCE_DELETION_ATTEMPTS 3
+ {
+ winVfsAppData *pAppData = (winVfsAppData*)pFile->pVfs->pAppData;
+ if( pAppData==NULL || !pAppData->bNoLock ){
+ winceDestroyLock(pFile);
+ }
+ }
+ if( pFile->zDeleteOnClose ){
+ int cnt = 0;
+ while(
+ osDeleteFileW(pFile->zDeleteOnClose)==0
+ && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff
+ && cnt++ < WINCE_DELETION_ATTEMPTS
+ ){
+ sqlite3_win32_sleep(100); /* Wait a little before trying again */
+ }
+ sqlite3_free(pFile->zDeleteOnClose);
+ }
+#endif
+ if( rc ){
+ pFile->h = NULL;
+ }
+ OpenCounter(-1);
+ OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFile, pFile->h, rc ? "ok" : "failed"));
+ return rc ? SQLITE_OK
+ : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(),
+ "winClose", pFile->zPath);
+}
+
+/*
+** Read data from a file into a buffer. Return SQLITE_OK if all
+** bytes were read successfully and SQLITE_IOERR if anything goes
+** wrong.
+*/
+static int winRead(
+ sqlite3_file *id, /* File to read from */
+ void *pBuf, /* Write content into this buffer */
+ int amt, /* Number of bytes to read */
+ sqlite3_int64 offset /* Begin reading at this offset */
+){
+#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+ OVERLAPPED overlapped; /* The offset for ReadFile. */
+#endif
+ winFile *pFile = (winFile*)id; /* file handle */
+ DWORD nRead; /* Number of bytes actually read from file */
+ int nRetry = 0; /* Number of retrys */
+
+ assert( id!=0 );
+ assert( amt>0 );
+ assert( offset>=0 );
+ SimulateIOError(return SQLITE_IOERR_READ);
+ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, "
+ "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,
+ pFile->h, pBuf, amt, offset, pFile->locktype));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this read request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offset<pFile->mmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt);
+ OSTRACE(("READ-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+ }else{
+ int nCopy = (int)(pFile->mmapSize - offset);
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
+#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+ if( winSeekFile(pFile, offset) ){
+ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_FULL;
+ }
+ while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){
+#else
+ memset(&overlapped, 0, sizeof(OVERLAPPED));
+ overlapped.Offset = (LONG)(offset & 0xffffffff);
+ overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff);
+ while( !osReadFile(pFile->h, pBuf, amt, &nRead, &overlapped) &&
+ osGetLastError()!=ERROR_HANDLE_EOF ){
+#endif
+ DWORD lastErrno;
+ if( winRetryIoerr(&nRetry, &lastErrno) ) continue;
+ pFile->lastErrno = lastErrno;
+ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_READ\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return winLogError(SQLITE_IOERR_READ, pFile->lastErrno,
+ "winRead", pFile->zPath);
+ }
+ winLogIoerr(nRetry, __LINE__);
+ if( nRead<(DWORD)amt ){
+ /* Unread parts of the buffer must be zero-filled */
+ memset(&((char*)pBuf)[nRead], 0, amt-nRead);
+ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_SHORT_READ\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_IOERR_SHORT_READ;
+ }
+
+ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+}
+
+/*
+** Write data from a buffer into a file. Return SQLITE_OK on success
+** or some other error code on failure.
+*/
+static int winWrite(
+ sqlite3_file *id, /* File to write into */
+ const void *pBuf, /* The bytes to be written */
+ int amt, /* Number of bytes to write */
+ sqlite3_int64 offset /* Offset into the file to begin writing at */
+){
+ int rc = 0; /* True if error has occurred, else false */
+ winFile *pFile = (winFile*)id; /* File handle */
+ int nRetry = 0; /* Number of retries */
+
+ assert( amt>0 );
+ assert( pFile );
+ SimulateIOError(return SQLITE_IOERR_WRITE);
+ SimulateDiskfullError(return SQLITE_FULL);
+
+ OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, "
+ "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,
+ pFile->h, pBuf, amt, offset, pFile->locktype));
+
+#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this write request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offset<pFile->mmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt);
+ OSTRACE(("WRITE-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+ }else{
+ int nCopy = (int)(pFile->mmapSize - offset);
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
+#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+ rc = winSeekFile(pFile, offset);
+ if( rc==0 ){
+#else
+ {
+#endif
+#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+ OVERLAPPED overlapped; /* The offset for WriteFile. */
+#endif
+ u8 *aRem = (u8 *)pBuf; /* Data yet to be written */
+ int nRem = amt; /* Number of bytes yet to be written */
+ DWORD nWrite; /* Bytes written by each WriteFile() call */
+ DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */
+
+#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+ memset(&overlapped, 0, sizeof(OVERLAPPED));
+ overlapped.Offset = (LONG)(offset & 0xffffffff);
+ overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff);
+#endif
+
+ while( nRem>0 ){
+#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+ if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){
+#else
+ if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){
+#endif
+ if( winRetryIoerr(&nRetry, &lastErrno) ) continue;
+ break;
+ }
+ assert( nWrite==0 || nWrite<=(DWORD)nRem );
+ if( nWrite==0 || nWrite>(DWORD)nRem ){
+ lastErrno = osGetLastError();
+ break;
+ }
+#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+ offset += nWrite;
+ overlapped.Offset = (LONG)(offset & 0xffffffff);
+ overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff);
+#endif
+ aRem += nWrite;
+ nRem -= nWrite;
+ }
+ if( nRem>0 ){
+ pFile->lastErrno = lastErrno;
+ rc = 1;
+ }
+ }
+
+ if( rc ){
+ if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL )
+ || ( pFile->lastErrno==ERROR_DISK_FULL )){
+ OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return winLogError(SQLITE_FULL, pFile->lastErrno,
+ "winWrite1", pFile->zPath);
+ }
+ OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_WRITE\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno,
+ "winWrite2", pFile->zPath);
+ }else{
+ winLogIoerr(nRetry, __LINE__);
+ }
+ OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+}
+
+/*
+** Truncate an open file to a specified size
+*/
+static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
+ winFile *pFile = (winFile*)id; /* File handle object */
+ int rc = SQLITE_OK; /* Return code for this function */
+ DWORD lastErrno;
+#if SQLITE_MAX_MMAP_SIZE>0
+ sqlite3_int64 oldMmapSize;
+ if( pFile->nFetchOut>0 ){
+ /* File truncation is a no-op if there are outstanding memory mapped
+ ** pages. This is because truncating the file means temporarily unmapping
+ ** the file, and that might delete memory out from under existing cursors.
+ **
+ ** This can result in incremental vacuum not truncating the file,
+ ** if there is an active read cursor when the incremental vacuum occurs.
+ ** No real harm comes of this - the database file is not corrupted,
+ ** though some folks might complain that the file is bigger than it
+ ** needs to be.
+ **
+ ** The only feasible work-around is to defer the truncation until after
+ ** all references to memory-mapped content are closed. That is doable,
+ ** but involves adding a few branches in the common write code path which
+ ** could slow down normal operations slightly. Hence, we have decided for
+ ** now to simply make trancations a no-op if there are pending reads. We
+ ** can maybe revisit this decision in the future.
+ */
+ return SQLITE_OK;
+ }
+#endif
+
+ assert( pFile );
+ SimulateIOError(return SQLITE_IOERR_TRUNCATE);
+ OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, size=%lld, lock=%d\n",
+ osGetCurrentProcessId(), pFile, pFile->h, nByte, pFile->locktype));
+
+ /* If the user has configured a chunk-size for this file, truncate the
+ ** file so that it consists of an integer number of chunks (i.e. the
+ ** actual file size after the operation may be larger than the requested
+ ** size).
+ */
+ if( pFile->szChunk>0 ){
+ nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk;
+ }
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFile->pMapRegion ){
+ oldMmapSize = pFile->mmapSize;
+ }else{
+ oldMmapSize = 0;
+ }
+ winUnmapfile(pFile);
+#endif
+
+ /* SetEndOfFile() returns non-zero when successful, or zero when it fails. */
+ if( winSeekFile(pFile, nByte) ){
+ rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno,
+ "winTruncate1", pFile->zPath);
+ }else if( 0==osSetEndOfFile(pFile->h) &&
+ ((lastErrno = osGetLastError())!=ERROR_USER_MAPPED_FILE) ){
+ pFile->lastErrno = lastErrno;
+ rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno,
+ "winTruncate2", pFile->zPath);
+ }
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( rc==SQLITE_OK && oldMmapSize>0 ){
+ if( oldMmapSize>nByte ){
+ winMapfile(pFile, -1);
+ }else{
+ winMapfile(pFile, oldMmapSize);
+ }
+ }
+#endif
+
+ OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFile, pFile->h, sqlite3ErrName(rc)));
+ return rc;
+}
+
+#ifdef SQLITE_TEST
+/*
+** Count the number of fullsyncs and normal syncs. This is used to test
+** that syncs and fullsyncs are occuring at the right times.
+*/
+SQLITE_API int sqlite3_sync_count = 0;
+SQLITE_API int sqlite3_fullsync_count = 0;
+#endif
+
+/*
+** Make sure all writes to a particular file are committed to disk.
+*/
+static int winSync(sqlite3_file *id, int flags){
+#ifndef SQLITE_NO_SYNC
+ /*
+ ** Used only when SQLITE_NO_SYNC is not defined.
+ */
+ BOOL rc;
+#endif
+#if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || \
+ defined(SQLITE_HAVE_OS_TRACE)
+ /*
+ ** Used when SQLITE_NO_SYNC is not defined and by the assert() and/or
+ ** OSTRACE() macros.
+ */
+ winFile *pFile = (winFile*)id;
+#else
+ UNUSED_PARAMETER(id);
+#endif
+
+ assert( pFile );
+ /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */
+ assert((flags&0x0F)==SQLITE_SYNC_NORMAL
+ || (flags&0x0F)==SQLITE_SYNC_FULL
+ );
+
+ /* Unix cannot, but some systems may return SQLITE_FULL from here. This
+ ** line is to test that doing so does not cause any problems.
+ */
+ SimulateDiskfullError( return SQLITE_FULL );
+
+ OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, flags=%x, lock=%d\n",
+ osGetCurrentProcessId(), pFile, pFile->h, flags,
+ pFile->locktype));
+
+#ifndef SQLITE_TEST
+ UNUSED_PARAMETER(flags);
+#else
+ if( (flags&0x0F)==SQLITE_SYNC_FULL ){
+ sqlite3_fullsync_count++;
+ }
+ sqlite3_sync_count++;
+#endif
+
+ /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a
+ ** no-op
+ */
+#ifdef SQLITE_NO_SYNC
+ OSTRACE(("SYNC-NOP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+#else
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFile->pMapRegion ){
+ if( osFlushViewOfFile(pFile->pMapRegion, 0) ){
+ OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, "
+ "rc=SQLITE_OK\n", osGetCurrentProcessId(),
+ pFile, pFile->pMapRegion));
+ }else{
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, "
+ "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(),
+ pFile, pFile->pMapRegion));
+ return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
+ "winSync1", pFile->zPath);
+ }
+ }
+#endif
+ rc = osFlushFileBuffers(pFile->h);
+ SimulateIOError( rc=FALSE );
+ if( rc ){
+ OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return SQLITE_OK;
+ }else{
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_FSYNC\n",
+ osGetCurrentProcessId(), pFile, pFile->h));
+ return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno,
+ "winSync2", pFile->zPath);
+ }
+#endif
+}
+
+/*
+** Determine the current size of a file in bytes
+*/
+static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){
+ winFile *pFile = (winFile*)id;
+ int rc = SQLITE_OK;
+
+ assert( id!=0 );
+ assert( pSize!=0 );
+ SimulateIOError(return SQLITE_IOERR_FSTAT);
+ OSTRACE(("SIZE file=%p, pSize=%p\n", pFile->h, pSize));
+
+#if SQLITE_OS_WINRT
+ {
+ FILE_STANDARD_INFO info;
+ if( osGetFileInformationByHandleEx(pFile->h, FileStandardInfo,
+ &info, sizeof(info)) ){
+ *pSize = info.EndOfFile.QuadPart;
+ }else{
+ pFile->lastErrno = osGetLastError();
+ rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno,
+ "winFileSize", pFile->zPath);
+ }
+ }
+#else
+ {
+ DWORD upperBits;
+ DWORD lowerBits;
+ DWORD lastErrno;
+
+ lowerBits = osGetFileSize(pFile->h, &upperBits);
+ *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits;
+ if( (lowerBits == INVALID_FILE_SIZE)
+ && ((lastErrno = osGetLastError())!=NO_ERROR) ){
+ pFile->lastErrno = lastErrno;
+ rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno,
+ "winFileSize", pFile->zPath);
+ }
+ }
+#endif
+ OSTRACE(("SIZE file=%p, pSize=%p, *pSize=%lld, rc=%s\n",
+ pFile->h, pSize, *pSize, sqlite3ErrName(rc)));
+ return rc;
+}
+
+/*
+** LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems.
+*/
+#ifndef LOCKFILE_FAIL_IMMEDIATELY
+# define LOCKFILE_FAIL_IMMEDIATELY 1
+#endif
+
+#ifndef LOCKFILE_EXCLUSIVE_LOCK
+# define LOCKFILE_EXCLUSIVE_LOCK 2
+#endif
+
+/*
+** Historically, SQLite has used both the LockFile and LockFileEx functions.
+** When the LockFile function was used, it was always expected to fail
+** immediately if the lock could not be obtained. Also, it always expected to
+** obtain an exclusive lock. These flags are used with the LockFileEx function
+** and reflect those expectations; therefore, they should not be changed.
+*/
+#ifndef SQLITE_LOCKFILE_FLAGS
+# define SQLITE_LOCKFILE_FLAGS (LOCKFILE_FAIL_IMMEDIATELY | \
+ LOCKFILE_EXCLUSIVE_LOCK)
+#endif
+
+/*
+** Currently, SQLite never calls the LockFileEx function without wanting the
+** call to fail immediately if the lock cannot be obtained.
+*/
+#ifndef SQLITE_LOCKFILEEX_FLAGS
+# define SQLITE_LOCKFILEEX_FLAGS (LOCKFILE_FAIL_IMMEDIATELY)
+#endif
+
+/*
+** Acquire a reader lock.
+** Different API routines are called depending on whether or not this
+** is Win9x or WinNT.
+*/
+static int winGetReadLock(winFile *pFile){
+ int res;
+ OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype));
+ if( osIsNT() ){
+#if SQLITE_OS_WINCE
+ /*
+ ** NOTE: Windows CE is handled differently here due its lack of the Win32
+ ** API LockFileEx.
+ */
+ res = winceLockFile(&pFile->h, SHARED_FIRST, 0, 1, 0);
+#else
+ res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS, SHARED_FIRST, 0,
+ SHARED_SIZE, 0);
+#endif
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ int lk;
+ sqlite3_randomness(sizeof(lk), &lk);
+ pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1));
+ res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS,
+ SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0);
+ }
+#endif
+ if( res == 0 ){
+ pFile->lastErrno = osGetLastError();
+ /* No need to log a failure to lock */
+ }
+ OSTRACE(("READ-LOCK file=%p, result=%d\n", pFile->h, res));
+ return res;
+}
+
+/*
+** Undo a readlock
+*/
+static int winUnlockReadLock(winFile *pFile){
+ int res;
+ DWORD lastErrno;
+ OSTRACE(("READ-UNLOCK file=%p, lock=%d\n", pFile->h, pFile->locktype));
+ if( osIsNT() ){
+ res = winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0);
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ res = winUnlockFile(&pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0);
+ }
+#endif
+ if( res==0 && ((lastErrno = osGetLastError())!=ERROR_NOT_LOCKED) ){
+ pFile->lastErrno = lastErrno;
+ winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno,
+ "winUnlockReadLock", pFile->zPath);
+ }
+ OSTRACE(("READ-UNLOCK file=%p, result=%d\n", pFile->h, res));
+ return res;
+}
+
+/*
+** Lock the file with the lock specified by parameter locktype - one
+** of the following:
+**
+** (1) SHARED_LOCK
+** (2) RESERVED_LOCK
+** (3) PENDING_LOCK
+** (4) EXCLUSIVE_LOCK
+**
+** Sometimes when requesting one lock state, additional lock states
+** are inserted in between. The locking might fail on one of the later
+** transitions leaving the lock state different from what it started but
+** still short of its goal. The following chart shows the allowed
+** transitions and the inserted intermediate states:
+**
+** UNLOCKED -> SHARED
+** SHARED -> RESERVED
+** SHARED -> (PENDING) -> EXCLUSIVE
+** RESERVED -> (PENDING) -> EXCLUSIVE
+** PENDING -> EXCLUSIVE
+**
+** This routine will only increase a lock. The winUnlock() routine
+** erases all locks at once and returns us immediately to locking level 0.
+** It is not possible to lower the locking level one step at a time. You
+** must go straight to locking level 0.
+*/
+static int winLock(sqlite3_file *id, int locktype){
+ int rc = SQLITE_OK; /* Return code from subroutines */
+ int res = 1; /* Result of a Windows lock call */
+ int newLocktype; /* Set pFile->locktype to this value before exiting */
+ int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */
+ winFile *pFile = (winFile*)id;
+ DWORD lastErrno = NO_ERROR;
+
+ assert( id!=0 );
+ OSTRACE(("LOCK file=%p, oldLock=%d(%d), newLock=%d\n",
+ pFile->h, pFile->locktype, pFile->sharedLockByte, locktype));
+
+ /* If there is already a lock of this type or more restrictive on the
+ ** OsFile, do nothing. Don't use the end_lock: exit path, as
+ ** sqlite3OsEnterMutex() hasn't been called yet.
+ */
+ if( pFile->locktype>=locktype ){
+ OSTRACE(("LOCK-HELD file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+
+ /* Do not allow any kind of write-lock on a read-only database
+ */
+ if( (pFile->ctrlFlags & WINFILE_RDONLY)!=0 && locktype>=RESERVED_LOCK ){
+ return SQLITE_IOERR_LOCK;
+ }
+
+ /* Make sure the locking sequence is correct
+ */
+ assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK );
+ assert( locktype!=PENDING_LOCK );
+ assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK );
+
+ /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or
+ ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of
+ ** the PENDING_LOCK byte is temporary.
+ */
+ newLocktype = pFile->locktype;
+ if( pFile->locktype==NO_LOCK
+ || (locktype==EXCLUSIVE_LOCK && pFile->locktype<=RESERVED_LOCK)
+ ){
+ int cnt = 3;
+ while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS,
+ PENDING_BYTE, 0, 1, 0))==0 ){
+ /* Try 3 times to get the pending lock. This is needed to work
+ ** around problems caused by indexing and/or anti-virus software on
+ ** Windows systems.
+ ** If you are using this code as a model for alternative VFSes, do not
+ ** copy this retry logic. It is a hack intended for Windows only.
+ */
+ lastErrno = osGetLastError();
+ OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n",
+ pFile->h, cnt, res));
+ if( lastErrno==ERROR_INVALID_HANDLE ){
+ pFile->lastErrno = lastErrno;
+ rc = SQLITE_IOERR_LOCK;
+ OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n",
+ pFile->h, cnt, sqlite3ErrName(rc)));
+ return rc;
+ }
+ if( cnt ) sqlite3_win32_sleep(1);
+ }
+ gotPendingLock = res;
+ if( !res ){
+ lastErrno = osGetLastError();
+ }
+ }
+
+ /* Acquire a shared lock
+ */
+ if( locktype==SHARED_LOCK && res ){
+ assert( pFile->locktype==NO_LOCK );
+ res = winGetReadLock(pFile);
+ if( res ){
+ newLocktype = SHARED_LOCK;
+ }else{
+ lastErrno = osGetLastError();
+ }
+ }
+
+ /* Acquire a RESERVED lock
+ */
+ if( locktype==RESERVED_LOCK && res ){
+ assert( pFile->locktype==SHARED_LOCK );
+ res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, RESERVED_BYTE, 0, 1, 0);
+ if( res ){
+ newLocktype = RESERVED_LOCK;
+ }else{
+ lastErrno = osGetLastError();
+ }
+ }
+
+ /* Acquire a PENDING lock
+ */
+ if( locktype==EXCLUSIVE_LOCK && res ){
+ newLocktype = PENDING_LOCK;
+ gotPendingLock = 0;
+ }
+
+ /* Acquire an EXCLUSIVE lock
+ */
+ if( locktype==EXCLUSIVE_LOCK && res ){
+ assert( pFile->locktype>=SHARED_LOCK );
+ res = winUnlockReadLock(pFile);
+ res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0,
+ SHARED_SIZE, 0);
+ if( res ){
+ newLocktype = EXCLUSIVE_LOCK;
+ }else{
+ lastErrno = osGetLastError();
+ winGetReadLock(pFile);
+ }
+ }
+
+ /* If we are holding a PENDING lock that ought to be released, then
+ ** release it now.
+ */
+ if( gotPendingLock && locktype==SHARED_LOCK ){
+ winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0);
+ }
+
+ /* Update the state of the lock has held in the file descriptor then
+ ** return the appropriate result code.
+ */
+ if( res ){
+ rc = SQLITE_OK;
+ }else{
+ pFile->lastErrno = lastErrno;
+ rc = SQLITE_BUSY;
+ OSTRACE(("LOCK-FAIL file=%p, wanted=%d, got=%d\n",
+ pFile->h, locktype, newLocktype));
+ }
+ pFile->locktype = (u8)newLocktype;
+ OSTRACE(("LOCK file=%p, lock=%d, rc=%s\n",
+ pFile->h, pFile->locktype, sqlite3ErrName(rc)));
+ return rc;
+}
+
+/*
+** This routine checks if there is a RESERVED lock held on the specified
+** file by this or any other process. If such a lock is held, return
+** non-zero, otherwise zero.
+*/
+static int winCheckReservedLock(sqlite3_file *id, int *pResOut){
+ int res;
+ winFile *pFile = (winFile*)id;
+
+ SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+ OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p\n", pFile->h, pResOut));
+
+ assert( id!=0 );
+ if( pFile->locktype>=RESERVED_LOCK ){
+ res = 1;
+ OSTRACE(("TEST-WR-LOCK file=%p, result=%d (local)\n", pFile->h, res));
+ }else{
+ res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE,0,1,0);
+ if( res ){
+ winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0);
+ }
+ res = !res;
+ OSTRACE(("TEST-WR-LOCK file=%p, result=%d (remote)\n", pFile->h, res));
+ }
+ *pResOut = res;
+ OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n",
+ pFile->h, pResOut, *pResOut));
+ return SQLITE_OK;
+}
+
+/*
+** Lower the locking level on file descriptor id to locktype. locktype
+** must be either NO_LOCK or SHARED_LOCK.
+**
+** If the locking level of the file descriptor is already at or below
+** the requested locking level, this routine is a no-op.
+**
+** It is not possible for this routine to fail if the second argument
+** is NO_LOCK. If the second argument is SHARED_LOCK then this routine
+** might return SQLITE_IOERR;
+*/
+static int winUnlock(sqlite3_file *id, int locktype){
+ int type;
+ winFile *pFile = (winFile*)id;
+ int rc = SQLITE_OK;
+ assert( pFile!=0 );
+ assert( locktype<=SHARED_LOCK );
+ OSTRACE(("UNLOCK file=%p, oldLock=%d(%d), newLock=%d\n",
+ pFile->h, pFile->locktype, pFile->sharedLockByte, locktype));
+ type = pFile->locktype;
+ if( type>=EXCLUSIVE_LOCK ){
+ winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0);
+ if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){
+ /* This should never happen. We should always be able to
+ ** reacquire the read lock */
+ rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(),
+ "winUnlock", pFile->zPath);
+ }
+ }
+ if( type>=RESERVED_LOCK ){
+ winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0);
+ }
+ if( locktype==NO_LOCK && type>=SHARED_LOCK ){
+ winUnlockReadLock(pFile);
+ }
+ if( type>=PENDING_LOCK ){
+ winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0);
+ }
+ pFile->locktype = (u8)locktype;
+ OSTRACE(("UNLOCK file=%p, lock=%d, rc=%s\n",
+ pFile->h, pFile->locktype, sqlite3ErrName(rc)));
+ return rc;
+}
+
+/******************************************************************************
+****************************** No-op Locking **********************************
+**
+** Of the various locking implementations available, this is by far the
+** simplest: locking is ignored. No attempt is made to lock the database
+** file for reading or writing.
+**
+** This locking mode is appropriate for use on read-only databases
+** (ex: databases that are burned into CD-ROM, for example.) It can
+** also be used if the application employs some external mechanism to
+** prevent simultaneous access of the same database by two or more
+** database connections. But there is a serious risk of database
+** corruption if this locking mode is used in situations where multiple
+** database connections are accessing the same database file at the same
+** time and one or more of those connections are writing.
+*/
+
+static int winNolockLock(sqlite3_file *id, int locktype){
+ UNUSED_PARAMETER(id);
+ UNUSED_PARAMETER(locktype);
+ return SQLITE_OK;
+}
+
+static int winNolockCheckReservedLock(sqlite3_file *id, int *pResOut){
+ UNUSED_PARAMETER(id);
+ UNUSED_PARAMETER(pResOut);
+ return SQLITE_OK;
+}
+
+static int winNolockUnlock(sqlite3_file *id, int locktype){
+ UNUSED_PARAMETER(id);
+ UNUSED_PARAMETER(locktype);
+ return SQLITE_OK;
+}
+
+/******************* End of the no-op lock implementation *********************
+******************************************************************************/
+
+/*
+** If *pArg is initially negative then this is a query. Set *pArg to
+** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set.
+**
+** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags.
+*/
+static void winModeBit(winFile *pFile, unsigned char mask, int *pArg){
+ if( *pArg<0 ){
+ *pArg = (pFile->ctrlFlags & mask)!=0;
+ }else if( (*pArg)==0 ){
+ pFile->ctrlFlags &= ~mask;
+ }else{
+ pFile->ctrlFlags |= mask;
+ }
+}
+
+/* Forward references to VFS helper methods used for temporary files */
+static int winGetTempname(sqlite3_vfs *, char **);
+static int winIsDir(const void *);
+static BOOL winIsLongPathPrefix(const char *);
+static BOOL winIsDriveLetterAndColon(const char *);
+
+/*
+** Control and query of the open file handle.
+*/
+static int winFileControl(sqlite3_file *id, int op, void *pArg){
+ winFile *pFile = (winFile*)id;
+ OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg));
+ switch( op ){
+ case SQLITE_FCNTL_LOCKSTATE: {
+ *(int*)pArg = pFile->locktype;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_LAST_ERRNO: {
+ *(int*)pArg = (int)pFile->lastErrno;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_CHUNK_SIZE: {
+ pFile->szChunk = *(int *)pArg;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_SIZE_HINT: {
+ if( pFile->szChunk>0 ){
+ sqlite3_int64 oldSz;
+ int rc = winFileSize(id, &oldSz);
+ if( rc==SQLITE_OK ){
+ sqlite3_int64 newSz = *(sqlite3_int64*)pArg;
+ if( newSz>oldSz ){
+ SimulateIOErrorBenign(1);
+ rc = winTruncate(id, newSz);
+ SimulateIOErrorBenign(0);
+ }
+ }
+ OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
+ return rc;
+ }
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_PERSIST_WAL: {
+ winModeBit(pFile, WINFILE_PERSIST_WAL, (int*)pArg);
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_POWERSAFE_OVERWRITE: {
+ winModeBit(pFile, WINFILE_PSOW, (int*)pArg);
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_VFSNAME: {
+ *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName);
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_WIN32_AV_RETRY: {
+ int *a = (int*)pArg;
+ if( a[0]>0 ){
+ winIoerrRetry = a[0];
+ }else{
+ a[0] = winIoerrRetry;
+ }
+ if( a[1]>0 ){
+ winIoerrRetryDelay = a[1];
+ }else{
+ a[1] = winIoerrRetryDelay;
+ }
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+ case SQLITE_FCNTL_WIN32_GET_HANDLE: {
+ LPHANDLE phFile = (LPHANDLE)pArg;
+ *phFile = pFile->h;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+#ifdef SQLITE_TEST
+ case SQLITE_FCNTL_WIN32_SET_HANDLE: {
+ LPHANDLE phFile = (LPHANDLE)pArg;
+ HANDLE hOldFile = pFile->h;
+ pFile->h = *phFile;
+ *phFile = hOldFile;
+ OSTRACE(("FCNTL oldFile=%p, newFile=%p, rc=SQLITE_OK\n",
+ hOldFile, pFile->h));
+ return SQLITE_OK;
+ }
+#endif
+ case SQLITE_FCNTL_TEMPFILENAME: {
+ char *zTFile = 0;
+ int rc = winGetTempname(pFile->pVfs, &zTFile);
+ if( rc==SQLITE_OK ){
+ *(char**)pArg = zTFile;
+ }
+ OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
+ return rc;
+ }
+#if SQLITE_MAX_MMAP_SIZE>0
+ case SQLITE_FCNTL_MMAP_SIZE: {
+ i64 newLimit = *(i64*)pArg;
+ int rc = SQLITE_OK;
+ if( newLimit>sqlite3GlobalConfig.mxMmap ){
+ newLimit = sqlite3GlobalConfig.mxMmap;
+ }
+
+ /* The value of newLimit may be eventually cast to (SIZE_T) and passed
+ ** to MapViewOfFile(). Restrict its value to 2GB if (SIZE_T) is not at
+ ** least a 64-bit type. */
+ if( newLimit>0 && sizeof(SIZE_T)<8 ){
+ newLimit = (newLimit & 0x7FFFFFFF);
+ }
+
+ *(i64*)pArg = pFile->mmapSizeMax;
+ if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){
+ pFile->mmapSizeMax = newLimit;
+ if( pFile->mmapSize>0 ){
+ winUnmapfile(pFile);
+ rc = winMapfile(pFile, -1);
+ }
+ }
+ OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
+ return rc;
+ }
+#endif
+ }
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h));
+ return SQLITE_NOTFOUND;
+}
+
+/*
+** Return the sector size in bytes of the underlying block device for
+** the specified file. This is almost always 512 bytes, but may be
+** larger for some devices.
+**
+** SQLite code assumes this function cannot fail. It also assumes that
+** if two files are created in the same file-system directory (i.e.
+** a database and its journal file) that the sector size will be the
+** same for both.
+*/
+static int winSectorSize(sqlite3_file *id){
+ (void)id;
+ return SQLITE_DEFAULT_SECTOR_SIZE;
+}
+
+/*
+** Return a vector of device characteristics.
+*/
+static int winDeviceCharacteristics(sqlite3_file *id){
+ winFile *p = (winFile*)id;
+ return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN |
+ ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0);
+}
+
+/*
+** Windows will only let you create file view mappings
+** on allocation size granularity boundaries.
+** During sqlite3_os_init() we do a GetSystemInfo()
+** to get the granularity size.
+*/
+static SYSTEM_INFO winSysInfo;
+
+#ifndef SQLITE_OMIT_WAL
+
+/*
+** Helper functions to obtain and relinquish the global mutex. The
+** global mutex is used to protect the winLockInfo objects used by
+** this file, all of which may be shared by multiple threads.
+**
+** Function winShmMutexHeld() is used to assert() that the global mutex
+** is held when required. This function is only used as part of assert()
+** statements. e.g.
+**
+** winShmEnterMutex()
+** assert( winShmMutexHeld() );
+** winShmLeaveMutex()
+*/
+static sqlite3_mutex *winBigLock = 0;
+static void winShmEnterMutex(void){
+ sqlite3_mutex_enter(winBigLock);
+}
+static void winShmLeaveMutex(void){
+ sqlite3_mutex_leave(winBigLock);
+}
+#ifndef NDEBUG
+static int winShmMutexHeld(void) {
+ return sqlite3_mutex_held(winBigLock);
+}
+#endif
+
+/*
+** Object used to represent a single file opened and mmapped to provide
+** shared memory. When multiple threads all reference the same
+** log-summary, each thread has its own winFile object, but they all
+** point to a single instance of this object. In other words, each
+** log-summary is opened only once per process.
+**
+** winShmMutexHeld() must be true when creating or destroying
+** this object or while reading or writing the following fields:
+**
+** nRef
+** pNext
+**
+** The following fields are read-only after the object is created:
+**
+** fid
+** zFilename
+**
+** Either winShmNode.mutex must be held or winShmNode.nRef==0 and
+** winShmMutexHeld() is true when reading or writing any other field
+** in this structure.
+**
+*/
+struct winShmNode {
+ sqlite3_mutex *mutex; /* Mutex to access this object */
+ char *zFilename; /* Name of the file */
+ winFile hFile; /* File handle from winOpen */
+
+ int szRegion; /* Size of shared-memory regions */
+ int nRegion; /* Size of array apRegion */
+ u8 isReadonly; /* True if read-only */
+ u8 isUnlocked; /* True if no DMS lock held */
+
+ struct ShmRegion {
+ HANDLE hMap; /* File handle from CreateFileMapping */
+ void *pMap;
+ } *aRegion;
+ DWORD lastErrno; /* The Windows errno from the last I/O error */
+
+ int nRef; /* Number of winShm objects pointing to this */
+ winShm *pFirst; /* All winShm objects pointing to this */
+ winShmNode *pNext; /* Next in list of all winShmNode objects */
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+ u8 nextShmId; /* Next available winShm.id value */
+#endif
+};
+
+/*
+** A global array of all winShmNode objects.
+**
+** The winShmMutexHeld() must be true while reading or writing this list.
+*/
+static winShmNode *winShmNodeList = 0;
+
+/*
+** Structure used internally by this VFS to record the state of an
+** open shared memory connection.
+**
+** The following fields are initialized when this object is created and
+** are read-only thereafter:
+**
+** winShm.pShmNode
+** winShm.id
+**
+** All other fields are read/write. The winShm.pShmNode->mutex must be held
+** while accessing any read/write fields.
+*/
+struct winShm {
+ winShmNode *pShmNode; /* The underlying winShmNode object */
+ winShm *pNext; /* Next winShm with the same winShmNode */
+ u8 hasMutex; /* True if holding the winShmNode mutex */
+ u16 sharedMask; /* Mask of shared locks held */
+ u16 exclMask; /* Mask of exclusive locks held */
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+ u8 id; /* Id of this connection with its winShmNode */
+#endif
+};
+
+/*
+** Constants used for locking
+*/
+#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */
+#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */
+
+/*
+** Apply advisory locks for all n bytes beginning at ofst.
+*/
+#define WINSHM_UNLCK 1
+#define WINSHM_RDLCK 2
+#define WINSHM_WRLCK 3
+static int winShmSystemLock(
+ winShmNode *pFile, /* Apply locks to this open shared-memory segment */
+ int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */
+ int ofst, /* Offset to first byte to be locked/unlocked */
+ int nByte /* Number of bytes to lock or unlock */
+){
+ int rc = 0; /* Result code form Lock/UnlockFileEx() */
+
+ /* Access to the winShmNode object is serialized by the caller */
+ assert( pFile->nRef==0 || sqlite3_mutex_held(pFile->mutex) );
+
+ OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n",
+ pFile->hFile.h, lockType, ofst, nByte));
+
+ /* Release/Acquire the system-level lock */
+ if( lockType==WINSHM_UNLCK ){
+ rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0);
+ }else{
+ /* Initialize the locking parameters */
+ DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY;
+ if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK;
+ rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0);
+ }
+
+ if( rc!= 0 ){
+ rc = SQLITE_OK;
+ }else{
+ pFile->lastErrno = osGetLastError();
+ rc = SQLITE_BUSY;
+ }
+
+ OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n",
+ pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" :
+ "winLockFile", pFile->lastErrno, sqlite3ErrName(rc)));
+
+ return rc;
+}
+
+/* Forward references to VFS methods */
+static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*);
+static int winDelete(sqlite3_vfs *,const char*,int);
+
+/*
+** Purge the winShmNodeList list of all entries with winShmNode.nRef==0.
+**
+** This is not a VFS shared-memory method; it is a utility function called
+** by VFS shared-memory methods.
+*/
+static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){
+ winShmNode **pp;
+ winShmNode *p;
+ assert( winShmMutexHeld() );
+ OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n",
+ osGetCurrentProcessId(), deleteFlag));
+ pp = &winShmNodeList;
+ while( (p = *pp)!=0 ){
+ if( p->nRef==0 ){
+ int i;
+ if( p->mutex ){ sqlite3_mutex_free(p->mutex); }
+ for(i=0; i<p->nRegion; i++){
+ BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap);
+ OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n",
+ osGetCurrentProcessId(), i, bRc ? "ok" : "failed"));
+ UNUSED_VARIABLE_VALUE(bRc);
+ bRc = osCloseHandle(p->aRegion[i].hMap);
+ OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n",
+ osGetCurrentProcessId(), i, bRc ? "ok" : "failed"));
+ UNUSED_VARIABLE_VALUE(bRc);
+ }
+ if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){
+ SimulateIOErrorBenign(1);
+ winClose((sqlite3_file *)&p->hFile);
+ SimulateIOErrorBenign(0);
+ }
+ if( deleteFlag ){
+ SimulateIOErrorBenign(1);
+ sqlite3BeginBenignMalloc();
+ winDelete(pVfs, p->zFilename, 0);
+ sqlite3EndBenignMalloc();
+ SimulateIOErrorBenign(0);
+ }
+ *pp = p->pNext;
+ sqlite3_free(p->aRegion);
+ sqlite3_free(p);
+ }else{
+ pp = &p->pNext;
+ }
+ }
+}
+
+/*
+** The DMS lock has not yet been taken on shm file pShmNode. Attempt to
+** take it now. Return SQLITE_OK if successful, or an SQLite error
+** code otherwise.
+**
+** If the DMS cannot be locked because this is a readonly_shm=1
+** connection and no other process already holds a lock, return
+** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1.
+*/
+static int winLockSharedMemory(winShmNode *pShmNode){
+ int rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1);
+
+ if( rc==SQLITE_OK ){
+ if( pShmNode->isReadonly ){
+ pShmNode->isUnlocked = 1;
+ winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1);
+ return SQLITE_READONLY_CANTINIT;
+ }else if( winTruncate((sqlite3_file*)&pShmNode->hFile, 0) ){
+ winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1);
+ return winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(),
+ "winLockSharedMemory", pShmNode->zFilename);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1);
+ }
+
+ return winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1);
+}
+
+/*
+** Open the shared-memory area associated with database file pDbFd.
+**
+** When opening a new shared-memory file, if no other instances of that
+** file are currently open, in this process or in other processes, then
+** the file must be truncated to zero length or have its header cleared.
+*/
+static int winOpenSharedMemory(winFile *pDbFd){
+ struct winShm *p; /* The connection to be opened */
+ winShmNode *pShmNode = 0; /* The underlying mmapped file */
+ int rc = SQLITE_OK; /* Result code */
+ winShmNode *pNew; /* Newly allocated winShmNode */
+ int nName; /* Size of zName in bytes */
+
+ assert( pDbFd->pShm==0 ); /* Not previously opened */
+
+ /* Allocate space for the new sqlite3_shm object. Also speculatively
+ ** allocate space for a new winShmNode and filename.
+ */
+ p = sqlite3MallocZero( sizeof(*p) );
+ if( p==0 ) return SQLITE_IOERR_NOMEM_BKPT;
+ nName = sqlite3Strlen30(pDbFd->zPath);
+ pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 );
+ if( pNew==0 ){
+ sqlite3_free(p);
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ pNew->zFilename = (char*)&pNew[1];
+ sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath);
+ sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename);
+
+ /* Look to see if there is an existing winShmNode that can be used.
+ ** If no matching winShmNode currently exists, create a new one.
+ */
+ winShmEnterMutex();
+ for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){
+ /* TBD need to come up with better match here. Perhaps
+ ** use FILE_ID_BOTH_DIR_INFO Structure.
+ */
+ if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break;
+ }
+ if( pShmNode ){
+ sqlite3_free(pNew);
+ }else{
+ int inFlags = SQLITE_OPEN_WAL;
+ int outFlags = 0;
+
+ pShmNode = pNew;
+ pNew = 0;
+ ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE;
+ pShmNode->pNext = winShmNodeList;
+ winShmNodeList = pShmNode;
+
+ if( sqlite3GlobalConfig.bCoreMutex ){
+ pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pShmNode->mutex==0 ){
+ rc = SQLITE_IOERR_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ }
+
+ if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){
+ inFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
+ }else{
+ inFlags |= SQLITE_OPEN_READONLY;
+ }
+ rc = winOpen(pDbFd->pVfs, pShmNode->zFilename,
+ (sqlite3_file*)&pShmNode->hFile,
+ inFlags, &outFlags);
+ if( rc!=SQLITE_OK ){
+ rc = winLogError(rc, osGetLastError(), "winOpenShm",
+ pShmNode->zFilename);
+ goto shm_open_err;
+ }
+ if( outFlags==SQLITE_OPEN_READONLY ) pShmNode->isReadonly = 1;
+
+ rc = winLockSharedMemory(pShmNode);
+ if( rc!=SQLITE_OK && rc!=SQLITE_READONLY_CANTINIT ) goto shm_open_err;
+ }
+
+ /* Make the new connection a child of the winShmNode */
+ p->pShmNode = pShmNode;
+#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+ p->id = pShmNode->nextShmId++;
+#endif
+ pShmNode->nRef++;
+ pDbFd->pShm = p;
+ winShmLeaveMutex();
+
+ /* The reference count on pShmNode has already been incremented under
+ ** the cover of the winShmEnterMutex() mutex and the pointer from the
+ ** new (struct winShm) object to the pShmNode has been set. All that is
+ ** left to do is to link the new object into the linked list starting
+ ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex
+ ** mutex.
+ */
+ sqlite3_mutex_enter(pShmNode->mutex);
+ p->pNext = pShmNode->pFirst;
+ pShmNode->pFirst = p;
+ sqlite3_mutex_leave(pShmNode->mutex);
+ return rc;
+
+ /* Jump here on any error */
+shm_open_err:
+ winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1);
+ winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */
+ sqlite3_free(p);
+ sqlite3_free(pNew);
+ winShmLeaveMutex();
+ return rc;
+}
+
+/*
+** Close a connection to shared-memory. Delete the underlying
+** storage if deleteFlag is true.
+*/
+static int winShmUnmap(
+ sqlite3_file *fd, /* Database holding shared memory */
+ int deleteFlag /* Delete after closing if true */
+){
+ winFile *pDbFd; /* Database holding shared-memory */
+ winShm *p; /* The connection to be closed */
+ winShmNode *pShmNode; /* The underlying shared-memory file */
+ winShm **pp; /* For looping over sibling connections */
+
+ pDbFd = (winFile*)fd;
+ p = pDbFd->pShm;
+ if( p==0 ) return SQLITE_OK;
+ pShmNode = p->pShmNode;
+
+ /* Remove connection p from the set of connections associated
+ ** with pShmNode */
+ sqlite3_mutex_enter(pShmNode->mutex);
+ for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){}
+ *pp = p->pNext;
+
+ /* Free the connection p */
+ sqlite3_free(p);
+ pDbFd->pShm = 0;
+ sqlite3_mutex_leave(pShmNode->mutex);
+
+ /* If pShmNode->nRef has reached 0, then close the underlying
+ ** shared-memory file, too */
+ winShmEnterMutex();
+ assert( pShmNode->nRef>0 );
+ pShmNode->nRef--;
+ if( pShmNode->nRef==0 ){
+ winShmPurge(pDbFd->pVfs, deleteFlag);
+ }
+ winShmLeaveMutex();
+
+ return SQLITE_OK;
+}
+
+/*
+** Change the lock state for a shared-memory segment.
+*/
+static int winShmLock(
+ sqlite3_file *fd, /* Database file holding the shared memory */
+ int ofst, /* First lock to acquire or release */
+ int n, /* Number of locks to acquire or release */
+ int flags /* What to do with the lock */
+){
+ winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */
+ winShm *p = pDbFd->pShm; /* The shared memory being locked */
+ winShm *pX; /* For looping over all siblings */
+ winShmNode *pShmNode = p->pShmNode;
+ int rc = SQLITE_OK; /* Result code */
+ u16 mask; /* Mask of locks to take or release */
+
+ assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK );
+ assert( n>=1 );
+ assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED)
+ || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE)
+ || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED)
+ || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) );
+ assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 );
+
+ mask = (u16)((1U<<(ofst+n)) - (1U<<ofst));
+ assert( n>1 || mask==(1<<ofst) );
+ sqlite3_mutex_enter(pShmNode->mutex);
+ if( flags & SQLITE_SHM_UNLOCK ){
+ u16 allMask = 0; /* Mask of locks held by siblings */
+
+ /* See if any siblings hold this same lock */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( pX==p ) continue;
+ assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 );
+ allMask |= pX->sharedMask;
+ }
+
+ /* Unlock the system-level locks */
+ if( (mask & allMask)==0 ){
+ rc = winShmSystemLock(pShmNode, WINSHM_UNLCK, ofst+WIN_SHM_BASE, n);
+ }else{
+ rc = SQLITE_OK;
+ }
+
+ /* Undo the local locks */
+ if( rc==SQLITE_OK ){
+ p->exclMask &= ~mask;
+ p->sharedMask &= ~mask;
+ }
+ }else if( flags & SQLITE_SHM_SHARED ){
+ u16 allShared = 0; /* Union of locks held by connections other than "p" */
+
+ /* Find out which shared locks are already held by sibling connections.
+ ** If any sibling already holds an exclusive lock, go ahead and return
+ ** SQLITE_BUSY.
+ */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( (pX->exclMask & mask)!=0 ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ allShared |= pX->sharedMask;
+ }
+
+ /* Get shared locks at the system level, if necessary */
+ if( rc==SQLITE_OK ){
+ if( (allShared & mask)==0 ){
+ rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n);
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+
+ /* Get the local shared locks */
+ if( rc==SQLITE_OK ){
+ p->sharedMask |= mask;
+ }
+ }else{
+ /* Make sure no sibling connections hold locks that will block this
+ ** lock. If any do, return SQLITE_BUSY right away.
+ */
+ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
+ if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ }
+
+ /* Get the exclusive locks at the system level. Then if successful
+ ** also mark the local connection as being locked.
+ */
+ if( rc==SQLITE_OK ){
+ rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ assert( (p->sharedMask & mask)==0 );
+ p->exclMask |= mask;
+ }
+ }
+ }
+ sqlite3_mutex_leave(pShmNode->mutex);
+ OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n",
+ osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask,
+ sqlite3ErrName(rc)));
+ return rc;
+}
+
+/*
+** Implement a memory barrier or memory fence on shared memory.
+**
+** All loads and stores begun before the barrier must complete before
+** any load or store begun after the barrier.
+*/
+static void winShmBarrier(
+ sqlite3_file *fd /* Database holding the shared memory */
+){
+ UNUSED_PARAMETER(fd);
+ sqlite3MemoryBarrier(); /* compiler-defined memory barrier */
+ winShmEnterMutex(); /* Also mutex, for redundancy */
+ winShmLeaveMutex();
+}
+
+/*
+** This function is called to obtain a pointer to region iRegion of the
+** shared-memory associated with the database file fd. Shared-memory regions
+** are numbered starting from zero. Each shared-memory region is szRegion
+** bytes in size.
+**
+** If an error occurs, an error code is returned and *pp is set to NULL.
+**
+** Otherwise, if the isWrite parameter is 0 and the requested shared-memory
+** region has not been allocated (by any client, including one running in a
+** separate process), then *pp is set to NULL and SQLITE_OK returned. If
+** isWrite is non-zero and the requested shared-memory region has not yet
+** been allocated, it is allocated by this function.
+**
+** If the shared-memory region has already been allocated or is allocated by
+** this call as described above, then it is mapped into this processes
+** address space (if it is not already), *pp is set to point to the mapped
+** memory and SQLITE_OK returned.
+*/
+static int winShmMap(
+ sqlite3_file *fd, /* Handle open on database file */
+ int iRegion, /* Region to retrieve */
+ int szRegion, /* Size of regions */
+ int isWrite, /* True to extend file if necessary */
+ void volatile **pp /* OUT: Mapped memory */
+){
+ winFile *pDbFd = (winFile*)fd;
+ winShm *pShm = pDbFd->pShm;
+ winShmNode *pShmNode;
+ DWORD protect = PAGE_READWRITE;
+ DWORD flags = FILE_MAP_WRITE | FILE_MAP_READ;
+ int rc = SQLITE_OK;
+
+ if( !pShm ){
+ rc = winOpenSharedMemory(pDbFd);
+ if( rc!=SQLITE_OK ) return rc;
+ pShm = pDbFd->pShm;
+ assert( pShm!=0 );
+ }
+ pShmNode = pShm->pShmNode;
+
+ sqlite3_mutex_enter(pShmNode->mutex);
+ if( pShmNode->isUnlocked ){
+ rc = winLockSharedMemory(pShmNode);
+ if( rc!=SQLITE_OK ) goto shmpage_out;
+ pShmNode->isUnlocked = 0;
+ }
+ assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 );
+
+ if( pShmNode->nRegion<=iRegion ){
+ struct ShmRegion *apNew; /* New aRegion[] array */
+ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */
+ sqlite3_int64 sz; /* Current size of wal-index file */
+
+ pShmNode->szRegion = szRegion;
+
+ /* The requested region is not mapped into this processes address space.
+ ** Check to see if it has been allocated (i.e. if the wal-index file is
+ ** large enough to contain the requested region).
+ */
+ rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz);
+ if( rc!=SQLITE_OK ){
+ rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(),
+ "winShmMap1", pDbFd->zPath);
+ goto shmpage_out;
+ }
+
+ if( sz<nByte ){
+ /* The requested memory region does not exist. If isWrite is set to
+ ** zero, exit early. *pp will be set to NULL and SQLITE_OK returned.
+ **
+ ** Alternatively, if isWrite is non-zero, use ftruncate() to allocate
+ ** the requested memory region.
+ */
+ if( !isWrite ) goto shmpage_out;
+ rc = winTruncate((sqlite3_file *)&pShmNode->hFile, nByte);
+ if( rc!=SQLITE_OK ){
+ rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(),
+ "winShmMap2", pDbFd->zPath);
+ goto shmpage_out;
+ }
+ }
+
+ /* Map the requested memory region into this processes address space. */
+ apNew = (struct ShmRegion *)sqlite3_realloc64(
+ pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0])
+ );
+ if( !apNew ){
+ rc = SQLITE_IOERR_NOMEM_BKPT;
+ goto shmpage_out;
+ }
+ pShmNode->aRegion = apNew;
+
+ if( pShmNode->isReadonly ){
+ protect = PAGE_READONLY;
+ flags = FILE_MAP_READ;
+ }
+
+ while( pShmNode->nRegion<=iRegion ){
+ HANDLE hMap = NULL; /* file-mapping handle */
+ void *pMap = 0; /* Mapped memory region */
+
+#if SQLITE_OS_WINRT
+ hMap = osCreateFileMappingFromApp(pShmNode->hFile.h,
+ NULL, protect, nByte, NULL
+ );
+#elif defined(SQLITE_WIN32_HAS_WIDE)
+ hMap = osCreateFileMappingW(pShmNode->hFile.h,
+ NULL, protect, 0, nByte, NULL
+ );
+#elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA
+ hMap = osCreateFileMappingA(pShmNode->hFile.h,
+ NULL, protect, 0, nByte, NULL
+ );
+#endif
+ OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n",
+ osGetCurrentProcessId(), pShmNode->nRegion, nByte,
+ hMap ? "ok" : "failed"));
+ if( hMap ){
+ int iOffset = pShmNode->nRegion*szRegion;
+ int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity;
+#if SQLITE_OS_WINRT
+ pMap = osMapViewOfFileFromApp(hMap, flags,
+ iOffset - iOffsetShift, szRegion + iOffsetShift
+ );
+#else
+ pMap = osMapViewOfFile(hMap, flags,
+ 0, iOffset - iOffsetShift, szRegion + iOffsetShift
+ );
+#endif
+ OSTRACE(("SHM-MAP-MAP pid=%lu, region=%d, offset=%d, size=%d, rc=%s\n",
+ osGetCurrentProcessId(), pShmNode->nRegion, iOffset,
+ szRegion, pMap ? "ok" : "failed"));
+ }
+ if( !pMap ){
+ pShmNode->lastErrno = osGetLastError();
+ rc = winLogError(SQLITE_IOERR_SHMMAP, pShmNode->lastErrno,
+ "winShmMap3", pDbFd->zPath);
+ if( hMap ) osCloseHandle(hMap);
+ goto shmpage_out;
+ }
+
+ pShmNode->aRegion[pShmNode->nRegion].pMap = pMap;
+ pShmNode->aRegion[pShmNode->nRegion].hMap = hMap;
+ pShmNode->nRegion++;
+ }
+ }
+
+shmpage_out:
+ if( pShmNode->nRegion>iRegion ){
+ int iOffset = iRegion*szRegion;
+ int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity;
+ char *p = (char *)pShmNode->aRegion[iRegion].pMap;
+ *pp = (void *)&p[iOffsetShift];
+ }else{
+ *pp = 0;
+ }
+ if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY;
+ sqlite3_mutex_leave(pShmNode->mutex);
+ return rc;
+}
+
+#else
+# define winShmMap 0
+# define winShmLock 0
+# define winShmBarrier 0
+# define winShmUnmap 0
+#endif /* #ifndef SQLITE_OMIT_WAL */
+
+/*
+** Cleans up the mapped region of the specified file, if any.
+*/
+#if SQLITE_MAX_MMAP_SIZE>0
+static int winUnmapfile(winFile *pFile){
+ assert( pFile!=0 );
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, pMapRegion=%p, "
+ "mmapSize=%lld, mmapSizeMax=%lld\n",
+ osGetCurrentProcessId(), pFile, pFile->hMap, pFile->pMapRegion,
+ pFile->mmapSize, pFile->mmapSizeMax));
+ if( pFile->pMapRegion ){
+ if( !osUnmapViewOfFile(pFile->pMapRegion) ){
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, pMapRegion=%p, "
+ "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile,
+ pFile->pMapRegion));
+ return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
+ "winUnmapfile1", pFile->zPath);
+ }
+ pFile->pMapRegion = 0;
+ pFile->mmapSize = 0;
+ }
+ if( pFile->hMap!=NULL ){
+ if( !osCloseHandle(pFile->hMap) ){
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, rc=SQLITE_IOERR_MMAP\n",
+ osGetCurrentProcessId(), pFile, pFile->hMap));
+ return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
+ "winUnmapfile2", pFile->zPath);
+ }
+ pFile->hMap = NULL;
+ }
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile));
+ return SQLITE_OK;
+}
+
+/*
+** Memory map or remap the file opened by file-descriptor pFd (if the file
+** is already mapped, the existing mapping is replaced by the new). Or, if
+** there already exists a mapping for this file, and there are still
+** outstanding xFetch() references to it, this function is a no-op.
+**
+** If parameter nByte is non-negative, then it is the requested size of
+** the mapping to create. Otherwise, if nByte is less than zero, then the
+** requested size is the size of the file on disk. The actual size of the
+** created mapping is either the requested size or the value configured
+** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller.
+**
+** SQLITE_OK is returned if no error occurs (even if the mapping is not
+** recreated as a result of outstanding references) or an SQLite error
+** code otherwise.
+*/
+static int winMapfile(winFile *pFd, sqlite3_int64 nByte){
+ sqlite3_int64 nMap = nByte;
+ int rc;
+
+ assert( nMap>=0 || pFd->nFetchOut==0 );
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, size=%lld\n",
+ osGetCurrentProcessId(), pFd, nByte));
+
+ if( pFd->nFetchOut>0 ) return SQLITE_OK;
+
+ if( nMap<0 ){
+ rc = winFileSize((sqlite3_file*)pFd, &nMap);
+ if( rc ){
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_IOERR_FSTAT\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_IOERR_FSTAT;
+ }
+ }
+ if( nMap>pFd->mmapSizeMax ){
+ nMap = pFd->mmapSizeMax;
+ }
+ nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1);
+
+ if( nMap==0 && pFd->mmapSize>0 ){
+ winUnmapfile(pFd);
+ }
+ if( nMap!=pFd->mmapSize ){
+ void *pNew = 0;
+ DWORD protect = PAGE_READONLY;
+ DWORD flags = FILE_MAP_READ;
+
+ winUnmapfile(pFd);
+#ifdef SQLITE_MMAP_READWRITE
+ if( (pFd->ctrlFlags & WINFILE_RDONLY)==0 ){
+ protect = PAGE_READWRITE;
+ flags |= FILE_MAP_WRITE;
+ }
+#endif
+#if SQLITE_OS_WINRT
+ pFd->hMap = osCreateFileMappingFromApp(pFd->h, NULL, protect, nMap, NULL);
+#elif defined(SQLITE_WIN32_HAS_WIDE)
+ pFd->hMap = osCreateFileMappingW(pFd->h, NULL, protect,
+ (DWORD)((nMap>>32) & 0xffffffff),
+ (DWORD)(nMap & 0xffffffff), NULL);
+#elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA
+ pFd->hMap = osCreateFileMappingA(pFd->h, NULL, protect,
+ (DWORD)((nMap>>32) & 0xffffffff),
+ (DWORD)(nMap & 0xffffffff), NULL);
+#endif
+ if( pFd->hMap==NULL ){
+ pFd->lastErrno = osGetLastError();
+ rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno,
+ "winMapfile1", pFd->zPath);
+ /* Log the error, but continue normal operation using xRead/xWrite */
+ OSTRACE(("MAP-FILE-CREATE pid=%lu, pFile=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFd, sqlite3ErrName(rc)));
+ return SQLITE_OK;
+ }
+ assert( (nMap % winSysInfo.dwPageSize)==0 );
+ assert( sizeof(SIZE_T)==sizeof(sqlite3_int64) || nMap<=0xffffffff );
+#if SQLITE_OS_WINRT
+ pNew = osMapViewOfFileFromApp(pFd->hMap, flags, 0, (SIZE_T)nMap);
+#else
+ pNew = osMapViewOfFile(pFd->hMap, flags, 0, 0, (SIZE_T)nMap);
+#endif
+ if( pNew==NULL ){
+ osCloseHandle(pFd->hMap);
+ pFd->hMap = NULL;
+ pFd->lastErrno = osGetLastError();
+ rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno,
+ "winMapfile2", pFd->zPath);
+ /* Log the error, but continue normal operation using xRead/xWrite */
+ OSTRACE(("MAP-FILE-MAP pid=%lu, pFile=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFd, sqlite3ErrName(rc)));
+ return SQLITE_OK;
+ }
+ pFd->pMapRegion = pNew;
+ pFd->mmapSize = nMap;
+ }
+
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_OK;
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/*
+** If possible, return a pointer to a mapping of file fd starting at offset
+** iOff. The mapping must be valid for at least nAmt bytes.
+**
+** If such a pointer can be obtained, store it in *pp and return SQLITE_OK.
+** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK.
+** Finally, if an error does occur, return an SQLite error code. The final
+** value of *pp is undefined in this case.
+**
+** If this function does return a pointer, the caller must eventually
+** release the reference by calling winUnfetch().
+*/
+static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
+#if SQLITE_MAX_MMAP_SIZE>0
+ winFile *pFd = (winFile*)fd; /* The underlying database file */
+#endif
+ *pp = 0;
+
+ OSTRACE(("FETCH pid=%lu, pFile=%p, offset=%lld, amount=%d, pp=%p\n",
+ osGetCurrentProcessId(), fd, iOff, nAmt, pp));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFd->mmapSizeMax>0 ){
+ if( pFd->pMapRegion==0 ){
+ int rc = winMapfile(pFd, -1);
+ if( rc!=SQLITE_OK ){
+ OSTRACE(("FETCH pid=%lu, pFile=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFd, sqlite3ErrName(rc)));
+ return rc;
+ }
+ }
+ if( pFd->mmapSize >= iOff+nAmt ){
+ assert( pFd->pMapRegion!=0 );
+ *pp = &((u8 *)pFd->pMapRegion)[iOff];
+ pFd->nFetchOut++;
+ }
+ }
+#endif
+
+ OSTRACE(("FETCH pid=%lu, pFile=%p, pp=%p, *pp=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), fd, pp, *pp));
+ return SQLITE_OK;
+}
+
+/*
+** If the third argument is non-NULL, then this function releases a
+** reference obtained by an earlier call to winFetch(). The second
+** argument passed to this function must be the same as the corresponding
+** argument that was passed to the winFetch() invocation.
+**
+** Or, if the third argument is NULL, then this function is being called
+** to inform the VFS layer that, according to POSIX, any existing mapping
+** may now be invalid and should be unmapped.
+*/
+static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){
+#if SQLITE_MAX_MMAP_SIZE>0
+ winFile *pFd = (winFile*)fd; /* The underlying database file */
+
+ /* If p==0 (unmap the entire file) then there must be no outstanding
+ ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
+ ** then there must be at least one outstanding. */
+ assert( (p==0)==(pFd->nFetchOut==0) );
+
+ /* If p!=0, it must match the iOff value. */
+ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] );
+
+ OSTRACE(("UNFETCH pid=%lu, pFile=%p, offset=%lld, p=%p\n",
+ osGetCurrentProcessId(), pFd, iOff, p));
+
+ if( p ){
+ pFd->nFetchOut--;
+ }else{
+ /* FIXME: If Windows truly always prevents truncating or deleting a
+ ** file while a mapping is held, then the following winUnmapfile() call
+ ** is unnecessary can be omitted - potentially improving
+ ** performance. */
+ winUnmapfile(pFd);
+ }
+
+ assert( pFd->nFetchOut>=0 );
+#endif
+
+ OSTRACE(("UNFETCH pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), fd));
+ return SQLITE_OK;
+}
+
+/*
+** Here ends the implementation of all sqlite3_file methods.
+**
+********************** End sqlite3_file Methods *******************************
+******************************************************************************/
+
+/*
+** This vector defines all the methods that can operate on an
+** sqlite3_file for win32.
+*/
+static const sqlite3_io_methods winIoMethod = {
+ 3, /* iVersion */
+ winClose, /* xClose */
+ winRead, /* xRead */
+ winWrite, /* xWrite */
+ winTruncate, /* xTruncate */
+ winSync, /* xSync */
+ winFileSize, /* xFileSize */
+ winLock, /* xLock */
+ winUnlock, /* xUnlock */
+ winCheckReservedLock, /* xCheckReservedLock */
+ winFileControl, /* xFileControl */
+ winSectorSize, /* xSectorSize */
+ winDeviceCharacteristics, /* xDeviceCharacteristics */
+ winShmMap, /* xShmMap */
+ winShmLock, /* xShmLock */
+ winShmBarrier, /* xShmBarrier */
+ winShmUnmap, /* xShmUnmap */
+ winFetch, /* xFetch */
+ winUnfetch /* xUnfetch */
+};
+
+/*
+** This vector defines all the methods that can operate on an
+** sqlite3_file for win32 without performing any locking.
+*/
+static const sqlite3_io_methods winIoNolockMethod = {
+ 3, /* iVersion */
+ winClose, /* xClose */
+ winRead, /* xRead */
+ winWrite, /* xWrite */
+ winTruncate, /* xTruncate */
+ winSync, /* xSync */
+ winFileSize, /* xFileSize */
+ winNolockLock, /* xLock */
+ winNolockUnlock, /* xUnlock */
+ winNolockCheckReservedLock, /* xCheckReservedLock */
+ winFileControl, /* xFileControl */
+ winSectorSize, /* xSectorSize */
+ winDeviceCharacteristics, /* xDeviceCharacteristics */
+ winShmMap, /* xShmMap */
+ winShmLock, /* xShmLock */
+ winShmBarrier, /* xShmBarrier */
+ winShmUnmap, /* xShmUnmap */
+ winFetch, /* xFetch */
+ winUnfetch /* xUnfetch */
+};
+
+static winVfsAppData winAppData = {
+ &winIoMethod, /* pMethod */
+ 0, /* pAppData */
+ 0 /* bNoLock */
+};
+
+static winVfsAppData winNolockAppData = {
+ &winIoNolockMethod, /* pMethod */
+ 0, /* pAppData */
+ 1 /* bNoLock */
+};
+
+/****************************************************************************
+**************************** sqlite3_vfs methods ****************************
+**
+** This division contains the implementation of methods on the
+** sqlite3_vfs object.
+*/
+
+#if defined(__CYGWIN__)
+/*
+** Convert a filename from whatever the underlying operating system
+** supports for filenames into UTF-8. Space to hold the result is
+** obtained from malloc and must be freed by the calling function.
+*/
+static char *winConvertToUtf8Filename(const void *zFilename){
+ char *zConverted = 0;
+ if( osIsNT() ){
+ zConverted = winUnicodeToUtf8(zFilename);
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI());
+ }
+#endif
+ /* caller will handle out of memory */
+ return zConverted;
+}
+#endif
+
+/*
+** Convert a UTF-8 filename into whatever form the underlying
+** operating system wants filenames in. Space to hold the result
+** is obtained from malloc and must be freed by the calling
+** function.
+*/
+static void *winConvertFromUtf8Filename(const char *zFilename){
+ void *zConverted = 0;
+ if( osIsNT() ){
+ zConverted = winUtf8ToUnicode(zFilename);
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI());
+ }
+#endif
+ /* caller will handle out of memory */
+ return zConverted;
+}
+
+/*
+** This function returns non-zero if the specified UTF-8 string buffer
+** ends with a directory separator character or one was successfully
+** added to it.
+*/
+static int winMakeEndInDirSep(int nBuf, char *zBuf){
+ if( zBuf ){
+ int nLen = sqlite3Strlen30(zBuf);
+ if( nLen>0 ){
+ if( winIsDirSep(zBuf[nLen-1]) ){
+ return 1;
+ }else if( nLen+1<nBuf ){
+ zBuf[nLen] = winGetDirSep();
+ zBuf[nLen+1] = '\0';
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Create a temporary file name and store the resulting pointer into pzBuf.
+** The pointer returned in pzBuf must be freed via sqlite3_free().
+*/
+static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
+ static char zChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789";
+ size_t i, j;
+ int nPre = sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX);
+ int nMax, nBuf, nDir, nLen;
+ char *zBuf;
+
+ /* It's odd to simulate an io-error here, but really this is just
+ ** using the io-error infrastructure to test that SQLite handles this
+ ** function failing.
+ */
+ SimulateIOError( return SQLITE_IOERR );
+
+ /* Allocate a temporary buffer to store the fully qualified file
+ ** name for the temporary file. If this fails, we cannot continue.
+ */
+ nMax = pVfs->mxPathname; nBuf = nMax + 2;
+ zBuf = sqlite3MallocZero( nBuf );
+ if( !zBuf ){
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+
+ /* Figure out the effective temporary directory. First, check if one
+ ** has been explicitly set by the application; otherwise, use the one
+ ** configured by the operating system.
+ */
+ nDir = nMax - (nPre + 15);
+ assert( nDir>0 );
+ if( sqlite3_temp_directory ){
+ int nDirLen = sqlite3Strlen30(sqlite3_temp_directory);
+ if( nDirLen>0 ){
+ if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){
+ nDirLen++;
+ }
+ if( nDirLen>nDir ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
+ return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0);
+ }
+ sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory);
+ }
+ }
+#if defined(__CYGWIN__)
+ else{
+ static const char *azDirs[] = {
+ 0, /* getenv("SQLITE_TMPDIR") */
+ 0, /* getenv("TMPDIR") */
+ 0, /* getenv("TMP") */
+ 0, /* getenv("TEMP") */
+ 0, /* getenv("USERPROFILE") */
+ "/var/tmp",
+ "/usr/tmp",
+ "/tmp",
+ ".",
+ 0 /* List terminator */
+ };
+ unsigned int i;
+ const char *zDir = 0;
+
+ if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR");
+ if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR");
+ if( !azDirs[2] ) azDirs[2] = getenv("TMP");
+ if( !azDirs[3] ) azDirs[3] = getenv("TEMP");
+ if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE");
+ for(i=0; i<sizeof(azDirs)/sizeof(azDirs[0]); zDir=azDirs[i++]){
+ void *zConverted;
+ if( zDir==0 ) continue;
+ /* If the path starts with a drive letter followed by the colon
+ ** character, assume it is already a native Win32 path; otherwise,
+ ** it must be converted to a native Win32 path via the Cygwin API
+ ** prior to using it.
+ */
+ if( winIsDriveLetterAndColon(zDir) ){
+ zConverted = winConvertFromUtf8Filename(zDir);
+ if( !zConverted ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( winIsDir(zConverted) ){
+ sqlite3_snprintf(nMax, zBuf, "%s", zDir);
+ sqlite3_free(zConverted);
+ break;
+ }
+ sqlite3_free(zConverted);
+ }else{
+ zConverted = sqlite3MallocZero( nMax+1 );
+ if( !zConverted ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( cygwin_conv_path(
+ osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A, zDir,
+ zConverted, nMax+1)<0 ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_CONVPATH\n"));
+ return winLogError(SQLITE_IOERR_CONVPATH, (DWORD)errno,
+ "winGetTempname2", zDir);
+ }
+ if( winIsDir(zConverted) ){
+ /* At this point, we know the candidate directory exists and should
+ ** be used. However, we may need to convert the string containing
+ ** its name into UTF-8 (i.e. if it is UTF-16 right now).
+ */
+ char *zUtf8 = winConvertToUtf8Filename(zConverted);
+ if( !zUtf8 ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ sqlite3_snprintf(nMax, zBuf, "%s", zUtf8);
+ sqlite3_free(zUtf8);
+ sqlite3_free(zConverted);
+ break;
+ }
+ sqlite3_free(zConverted);
+ }
+ }
+ }
+#elif !SQLITE_OS_WINRT && !defined(__CYGWIN__)
+ else if( osIsNT() ){
+ char *zMulti;
+ LPWSTR zWidePath = sqlite3MallocZero( nMax*sizeof(WCHAR) );
+ if( !zWidePath ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( osGetTempPathW(nMax, zWidePath)==0 ){
+ sqlite3_free(zWidePath);
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_GETTEMPPATH\n"));
+ return winLogError(SQLITE_IOERR_GETTEMPPATH, osGetLastError(),
+ "winGetTempname2", 0);
+ }
+ zMulti = winUnicodeToUtf8(zWidePath);
+ if( zMulti ){
+ sqlite3_snprintf(nMax, zBuf, "%s", zMulti);
+ sqlite3_free(zMulti);
+ sqlite3_free(zWidePath);
+ }else{
+ sqlite3_free(zWidePath);
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ char *zUtf8;
+ char *zMbcsPath = sqlite3MallocZero( nMax );
+ if( !zMbcsPath ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( osGetTempPathA(nMax, zMbcsPath)==0 ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_GETTEMPPATH\n"));
+ return winLogError(SQLITE_IOERR_GETTEMPPATH, osGetLastError(),
+ "winGetTempname3", 0);
+ }
+ zUtf8 = winMbcsToUtf8(zMbcsPath, osAreFileApisANSI());
+ if( zUtf8 ){
+ sqlite3_snprintf(nMax, zBuf, "%s", zUtf8);
+ sqlite3_free(zUtf8);
+ }else{
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ }
+#endif /* SQLITE_WIN32_HAS_ANSI */
+#endif /* !SQLITE_OS_WINRT */
+
+ /*
+ ** Check to make sure the temporary directory ends with an appropriate
+ ** separator. If it does not and there is not enough space left to add
+ ** one, fail.
+ */
+ if( !winMakeEndInDirSep(nDir+1, zBuf) ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
+ return winLogError(SQLITE_ERROR, 0, "winGetTempname4", 0);
+ }
+
+ /*
+ ** Check that the output buffer is large enough for the temporary file
+ ** name in the following format:
+ **
+ ** "<temporary_directory>/etilqs_XXXXXXXXXXXXXXX\0\0"
+ **
+ ** If not, return SQLITE_ERROR. The number 17 is used here in order to
+ ** account for the space used by the 15 character random suffix and the
+ ** two trailing NUL characters. The final directory separator character
+ ** has already added if it was not already present.
+ */
+ nLen = sqlite3Strlen30(zBuf);
+ if( (nLen + nPre + 17) > nBuf ){
+ sqlite3_free(zBuf);
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
+ return winLogError(SQLITE_ERROR, 0, "winGetTempname5", 0);
+ }
+
+ sqlite3_snprintf(nBuf-16-nLen, zBuf+nLen, SQLITE_TEMP_FILE_PREFIX);
+
+ j = sqlite3Strlen30(zBuf);
+ sqlite3_randomness(15, &zBuf[j]);
+ for(i=0; i<15; i++, j++){
+ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
+ }
+ zBuf[j] = 0;
+ zBuf[j+1] = 0;
+ *pzBuf = zBuf;
+
+ OSTRACE(("TEMP-FILENAME name=%s, rc=SQLITE_OK\n", zBuf));
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the named file is really a directory. Return false if
+** it is something other than a directory, or if there is any kind of memory
+** allocation failure.
+*/
+static int winIsDir(const void *zConverted){
+ DWORD attr;
+ int rc = 0;
+ DWORD lastErrno;
+
+ if( osIsNT() ){
+ int cnt = 0;
+ WIN32_FILE_ATTRIBUTE_DATA sAttrData;
+ memset(&sAttrData, 0, sizeof(sAttrData));
+ while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted,
+ GetFileExInfoStandard,
+ &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){}
+ if( !rc ){
+ return 0; /* Invalid name? */
+ }
+ attr = sAttrData.dwFileAttributes;
+#if SQLITE_OS_WINCE==0
+ }else{
+ attr = osGetFileAttributesA((char*)zConverted);
+#endif
+ }
+ return (attr!=INVALID_FILE_ATTRIBUTES) && (attr&FILE_ATTRIBUTE_DIRECTORY);
+}
+
+/* forward reference */
+static int winAccess(
+ sqlite3_vfs *pVfs, /* Not used on win32 */
+ const char *zFilename, /* Name of file to check */
+ int flags, /* Type of test to make on this file */
+ int *pResOut /* OUT: Result */
+);
+
+/*
+** Open a file.
+*/
+static int winOpen(
+ sqlite3_vfs *pVfs, /* Used to get maximum path length and AppData */
+ const char *zName, /* Name of the file (UTF-8) */
+ sqlite3_file *id, /* Write the SQLite file handle here */
+ int flags, /* Open mode flags */
+ int *pOutFlags /* Status return flags */
+){
+ HANDLE h;
+ DWORD lastErrno = 0;
+ DWORD dwDesiredAccess;
+ DWORD dwShareMode;
+ DWORD dwCreationDisposition;
+ DWORD dwFlagsAndAttributes = 0;
+#if SQLITE_OS_WINCE
+ int isTemp = 0;
+#endif
+ winVfsAppData *pAppData;
+ winFile *pFile = (winFile*)id;
+ void *zConverted; /* Filename in OS encoding */
+ const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */
+ int cnt = 0;
+
+ /* If argument zPath is a NULL pointer, this function is required to open
+ ** a temporary file. Use this buffer to store the file name in.
+ */
+ char *zTmpname = 0; /* For temporary filename, if necessary. */
+
+ int rc = SQLITE_OK; /* Function Return Code */
+#if !defined(NDEBUG) || SQLITE_OS_WINCE
+ int eType = flags&0xFFFFFF00; /* Type of file to open */
+#endif
+
+ int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE);
+ int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE);
+ int isCreate = (flags & SQLITE_OPEN_CREATE);
+ int isReadonly = (flags & SQLITE_OPEN_READONLY);
+ int isReadWrite = (flags & SQLITE_OPEN_READWRITE);
+
+#ifndef NDEBUG
+ int isOpenJournal = (isCreate && (
+ eType==SQLITE_OPEN_SUPER_JOURNAL
+ || eType==SQLITE_OPEN_MAIN_JOURNAL
+ || eType==SQLITE_OPEN_WAL
+ ));
+#endif
+
+ OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n",
+ zUtf8Name, id, flags, pOutFlags));
+
+ /* Check the following statements are true:
+ **
+ ** (a) Exactly one of the READWRITE and READONLY flags must be set, and
+ ** (b) if CREATE is set, then READWRITE must also be set, and
+ ** (c) if EXCLUSIVE is set, then CREATE must also be set.
+ ** (d) if DELETEONCLOSE is set, then CREATE must also be set.
+ */
+ assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly));
+ assert(isCreate==0 || isReadWrite);
+ assert(isExclusive==0 || isCreate);
+ assert(isDelete==0 || isCreate);
+
+ /* The main DB, main journal, WAL file and super-journal are never
+ ** automatically deleted. Nor are they ever temporary files. */
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_SUPER_JOURNAL );
+ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL );
+
+ /* Assert that the upper layer has set one of the "file-type" flags. */
+ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB
+ || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL
+ || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_SUPER_JOURNAL
+ || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL
+ );
+
+ assert( pFile!=0 );
+ memset(pFile, 0, sizeof(winFile));
+ pFile->h = INVALID_HANDLE_VALUE;
+
+#if SQLITE_OS_WINRT
+ if( !zUtf8Name && !sqlite3_temp_directory ){
+ sqlite3_log(SQLITE_ERROR,
+ "sqlite3_temp_directory variable should be set for WinRT");
+ }
+#endif
+
+ /* If the second argument to this function is NULL, generate a
+ ** temporary file name to use
+ */
+ if( !zUtf8Name ){
+ assert( isDelete && !isOpenJournal );
+ rc = winGetTempname(pVfs, &zTmpname);
+ if( rc!=SQLITE_OK ){
+ OSTRACE(("OPEN name=%s, rc=%s", zUtf8Name, sqlite3ErrName(rc)));
+ return rc;
+ }
+ zUtf8Name = zTmpname;
+ }
+
+ /* Database filenames are double-zero terminated if they are not
+ ** URIs with parameters. Hence, they can always be passed into
+ ** sqlite3_uri_parameter().
+ */
+ assert( (eType!=SQLITE_OPEN_MAIN_DB) || (flags & SQLITE_OPEN_URI) ||
+ zUtf8Name[sqlite3Strlen30(zUtf8Name)+1]==0 );
+
+ /* Convert the filename to the system encoding. */
+ zConverted = winConvertFromUtf8Filename(zUtf8Name);
+ if( zConverted==0 ){
+ sqlite3_free(zTmpname);
+ OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8Name));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+
+ if( winIsDir(zConverted) ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zTmpname);
+ OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8Name));
+ return SQLITE_CANTOPEN_ISDIR;
+ }
+
+ if( isReadWrite ){
+ dwDesiredAccess = GENERIC_READ | GENERIC_WRITE;
+ }else{
+ dwDesiredAccess = GENERIC_READ;
+ }
+
+ /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is
+ ** created. SQLite doesn't use it to indicate "exclusive access"
+ ** as it is usually understood.
+ */
+ if( isExclusive ){
+ /* Creates a new file, only if it does not already exist. */
+ /* If the file exists, it fails. */
+ dwCreationDisposition = CREATE_NEW;
+ }else if( isCreate ){
+ /* Open existing file, or create if it doesn't exist */
+ dwCreationDisposition = OPEN_ALWAYS;
+ }else{
+ /* Opens a file, only if it exists. */
+ dwCreationDisposition = OPEN_EXISTING;
+ }
+
+ dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
+
+ if( isDelete ){
+#if SQLITE_OS_WINCE
+ dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN;
+ isTemp = 1;
+#else
+ dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY
+ | FILE_ATTRIBUTE_HIDDEN
+ | FILE_FLAG_DELETE_ON_CLOSE;
+#endif
+ }else{
+ dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL;
+ }
+ /* Reports from the internet are that performance is always
+ ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */
+#if SQLITE_OS_WINCE
+ dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS;
+#endif
+
+ if( osIsNT() ){
+#if SQLITE_OS_WINRT
+ CREATEFILE2_EXTENDED_PARAMETERS extendedParameters;
+ extendedParameters.dwSize = sizeof(CREATEFILE2_EXTENDED_PARAMETERS);
+ extendedParameters.dwFileAttributes =
+ dwFlagsAndAttributes & FILE_ATTRIBUTE_MASK;
+ extendedParameters.dwFileFlags = dwFlagsAndAttributes & FILE_FLAG_MASK;
+ extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS;
+ extendedParameters.lpSecurityAttributes = NULL;
+ extendedParameters.hTemplateFile = NULL;
+ do{
+ h = osCreateFile2((LPCWSTR)zConverted,
+ dwDesiredAccess,
+ dwShareMode,
+ dwCreationDisposition,
+ &extendedParameters);
+ if( h!=INVALID_HANDLE_VALUE ) break;
+ if( isReadWrite ){
+ int rc2, isRO = 0;
+ sqlite3BeginBenignMalloc();
+ rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO);
+ sqlite3EndBenignMalloc();
+ if( rc2==SQLITE_OK && isRO ) break;
+ }
+ }while( winRetryIoerr(&cnt, &lastErrno) );
+#else
+ do{
+ h = osCreateFileW((LPCWSTR)zConverted,
+ dwDesiredAccess,
+ dwShareMode, NULL,
+ dwCreationDisposition,
+ dwFlagsAndAttributes,
+ NULL);
+ if( h!=INVALID_HANDLE_VALUE ) break;
+ if( isReadWrite ){
+ int rc2, isRO = 0;
+ sqlite3BeginBenignMalloc();
+ rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO);
+ sqlite3EndBenignMalloc();
+ if( rc2==SQLITE_OK && isRO ) break;
+ }
+ }while( winRetryIoerr(&cnt, &lastErrno) );
+#endif
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ do{
+ h = osCreateFileA((LPCSTR)zConverted,
+ dwDesiredAccess,
+ dwShareMode, NULL,
+ dwCreationDisposition,
+ dwFlagsAndAttributes,
+ NULL);
+ if( h!=INVALID_HANDLE_VALUE ) break;
+ if( isReadWrite ){
+ int rc2, isRO = 0;
+ sqlite3BeginBenignMalloc();
+ rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO);
+ sqlite3EndBenignMalloc();
+ if( rc2==SQLITE_OK && isRO ) break;
+ }
+ }while( winRetryIoerr(&cnt, &lastErrno) );
+ }
+#endif
+ winLogIoerr(cnt, __LINE__);
+
+ OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name,
+ dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok"));
+
+ if( h==INVALID_HANDLE_VALUE ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zTmpname);
+ if( isReadWrite && !isExclusive ){
+ return winOpen(pVfs, zName, id,
+ ((flags|SQLITE_OPEN_READONLY) &
+ ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE)),
+ pOutFlags);
+ }else{
+ pFile->lastErrno = lastErrno;
+ winLogError(SQLITE_CANTOPEN, pFile->lastErrno, "winOpen", zUtf8Name);
+ return SQLITE_CANTOPEN_BKPT;
+ }
+ }
+
+ if( pOutFlags ){
+ if( isReadWrite ){
+ *pOutFlags = SQLITE_OPEN_READWRITE;
+ }else{
+ *pOutFlags = SQLITE_OPEN_READONLY;
+ }
+ }
+
+ OSTRACE(("OPEN file=%p, name=%s, access=%lx, pOutFlags=%p, *pOutFlags=%d, "
+ "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ?
+ *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok"));
+
+ pAppData = (winVfsAppData*)pVfs->pAppData;
+
+#if SQLITE_OS_WINCE
+ {
+ if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB
+ && ((pAppData==NULL) || !pAppData->bNoLock)
+ && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK
+ ){
+ osCloseHandle(h);
+ sqlite3_free(zConverted);
+ sqlite3_free(zTmpname);
+ OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc)));
+ return rc;
+ }
+ }
+ if( isTemp ){
+ pFile->zDeleteOnClose = zConverted;
+ }else
+#endif
+ {
+ sqlite3_free(zConverted);
+ }
+
+ sqlite3_free(zTmpname);
+ id->pMethods = pAppData ? pAppData->pMethod : &winIoMethod;
+ pFile->pVfs = pVfs;
+ pFile->h = h;
+ if( isReadonly ){
+ pFile->ctrlFlags |= WINFILE_RDONLY;
+ }
+ if( (flags & SQLITE_OPEN_MAIN_DB)
+ && sqlite3_uri_boolean(zName, "psow", SQLITE_POWERSAFE_OVERWRITE)
+ ){
+ pFile->ctrlFlags |= WINFILE_PSOW;
+ }
+ pFile->lastErrno = NO_ERROR;
+ pFile->zPath = zName;
+#if SQLITE_MAX_MMAP_SIZE>0
+ pFile->hMap = NULL;
+ pFile->pMapRegion = 0;
+ pFile->mmapSize = 0;
+ pFile->mmapSizeMax = sqlite3GlobalConfig.szMmap;
+#endif
+
+ OpenCounter(+1);
+ return rc;
+}
+
+/*
+** Delete the named file.
+**
+** Note that Windows does not allow a file to be deleted if some other
+** process has it open. Sometimes a virus scanner or indexing program
+** will open a journal file shortly after it is created in order to do
+** whatever it does. While this other process is holding the
+** file open, we will be unable to delete it. To work around this
+** problem, we delay 100 milliseconds and try to delete again. Up
+** to MX_DELETION_ATTEMPTs deletion attempts are run before giving
+** up and returning an error.
+*/
+static int winDelete(
+ sqlite3_vfs *pVfs, /* Not used on win32 */
+ const char *zFilename, /* Name of file to delete */
+ int syncDir /* Not used on win32 */
+){
+ int cnt = 0;
+ int rc;
+ DWORD attr;
+ DWORD lastErrno = 0;
+ void *zConverted;
+ UNUSED_PARAMETER(pVfs);
+ UNUSED_PARAMETER(syncDir);
+
+ SimulateIOError(return SQLITE_IOERR_DELETE);
+ OSTRACE(("DELETE name=%s, syncDir=%d\n", zFilename, syncDir));
+
+ zConverted = winConvertFromUtf8Filename(zFilename);
+ if( zConverted==0 ){
+ OSTRACE(("DELETE name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( osIsNT() ){
+ do {
+#if SQLITE_OS_WINRT
+ WIN32_FILE_ATTRIBUTE_DATA sAttrData;
+ memset(&sAttrData, 0, sizeof(sAttrData));
+ if ( osGetFileAttributesExW(zConverted, GetFileExInfoStandard,
+ &sAttrData) ){
+ attr = sAttrData.dwFileAttributes;
+ }else{
+ lastErrno = osGetLastError();
+ if( lastErrno==ERROR_FILE_NOT_FOUND
+ || lastErrno==ERROR_PATH_NOT_FOUND ){
+ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ break;
+ }
+#else
+ attr = osGetFileAttributesW(zConverted);
+#endif
+ if ( attr==INVALID_FILE_ATTRIBUTES ){
+ lastErrno = osGetLastError();
+ if( lastErrno==ERROR_FILE_NOT_FOUND
+ || lastErrno==ERROR_PATH_NOT_FOUND ){
+ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ break;
+ }
+ if ( attr&FILE_ATTRIBUTE_DIRECTORY ){
+ rc = SQLITE_ERROR; /* Files only. */
+ break;
+ }
+ if ( osDeleteFileW(zConverted) ){
+ rc = SQLITE_OK; /* Deleted OK. */
+ break;
+ }
+ if ( !winRetryIoerr(&cnt, &lastErrno) ){
+ rc = SQLITE_ERROR; /* No more retries. */
+ break;
+ }
+ } while(1);
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ do {
+ attr = osGetFileAttributesA(zConverted);
+ if ( attr==INVALID_FILE_ATTRIBUTES ){
+ lastErrno = osGetLastError();
+ if( lastErrno==ERROR_FILE_NOT_FOUND
+ || lastErrno==ERROR_PATH_NOT_FOUND ){
+ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ break;
+ }
+ if ( attr&FILE_ATTRIBUTE_DIRECTORY ){
+ rc = SQLITE_ERROR; /* Files only. */
+ break;
+ }
+ if ( osDeleteFileA(zConverted) ){
+ rc = SQLITE_OK; /* Deleted OK. */
+ break;
+ }
+ if ( !winRetryIoerr(&cnt, &lastErrno) ){
+ rc = SQLITE_ERROR; /* No more retries. */
+ break;
+ }
+ } while(1);
+ }
+#endif
+ if( rc && rc!=SQLITE_IOERR_DELETE_NOENT ){
+ rc = winLogError(SQLITE_IOERR_DELETE, lastErrno, "winDelete", zFilename);
+ }else{
+ winLogIoerr(cnt, __LINE__);
+ }
+ sqlite3_free(zConverted);
+ OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc)));
+ return rc;
+}
+
+/*
+** Check the existence and status of a file.
+*/
+static int winAccess(
+ sqlite3_vfs *pVfs, /* Not used on win32 */
+ const char *zFilename, /* Name of file to check */
+ int flags, /* Type of test to make on this file */
+ int *pResOut /* OUT: Result */
+){
+ DWORD attr;
+ int rc = 0;
+ DWORD lastErrno = 0;
+ void *zConverted;
+ UNUSED_PARAMETER(pVfs);
+
+ SimulateIOError( return SQLITE_IOERR_ACCESS; );
+ OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n",
+ zFilename, flags, pResOut));
+
+ zConverted = winConvertFromUtf8Filename(zFilename);
+ if( zConverted==0 ){
+ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename));
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( osIsNT() ){
+ int cnt = 0;
+ WIN32_FILE_ATTRIBUTE_DATA sAttrData;
+ memset(&sAttrData, 0, sizeof(sAttrData));
+ while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted,
+ GetFileExInfoStandard,
+ &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){}
+ if( rc ){
+ /* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file
+ ** as if it does not exist.
+ */
+ if( flags==SQLITE_ACCESS_EXISTS
+ && sAttrData.nFileSizeHigh==0
+ && sAttrData.nFileSizeLow==0 ){
+ attr = INVALID_FILE_ATTRIBUTES;
+ }else{
+ attr = sAttrData.dwFileAttributes;
+ }
+ }else{
+ winLogIoerr(cnt, __LINE__);
+ if( lastErrno!=ERROR_FILE_NOT_FOUND && lastErrno!=ERROR_PATH_NOT_FOUND ){
+ sqlite3_free(zConverted);
+ return winLogError(SQLITE_IOERR_ACCESS, lastErrno, "winAccess",
+ zFilename);
+ }else{
+ attr = INVALID_FILE_ATTRIBUTES;
+ }
+ }
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ attr = osGetFileAttributesA((char*)zConverted);
+ }
+#endif
+ sqlite3_free(zConverted);
+ switch( flags ){
+ case SQLITE_ACCESS_READ:
+ case SQLITE_ACCESS_EXISTS:
+ rc = attr!=INVALID_FILE_ATTRIBUTES;
+ break;
+ case SQLITE_ACCESS_READWRITE:
+ rc = attr!=INVALID_FILE_ATTRIBUTES &&
+ (attr & FILE_ATTRIBUTE_READONLY)==0;
+ break;
+ default:
+ assert(!"Invalid flags argument");
+ }
+ *pResOut = rc;
+ OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n",
+ zFilename, pResOut, *pResOut));
+ return SQLITE_OK;
+}
+
+/*
+** Returns non-zero if the specified path name starts with the "long path"
+** prefix.
+*/
+static BOOL winIsLongPathPrefix(
+ const char *zPathname
+){
+ return ( zPathname[0]=='\\' && zPathname[1]=='\\'
+ && zPathname[2]=='?' && zPathname[3]=='\\' );
+}
+
+/*
+** Returns non-zero if the specified path name starts with a drive letter
+** followed by a colon character.
+*/
+static BOOL winIsDriveLetterAndColon(
+ const char *zPathname
+){
+ return ( sqlite3Isalpha(zPathname[0]) && zPathname[1]==':' );
+}
+
+/*
+** Returns non-zero if the specified path name should be used verbatim. If
+** non-zero is returned from this function, the calling function must simply
+** use the provided path name verbatim -OR- resolve it into a full path name
+** using the GetFullPathName Win32 API function (if available).
+*/
+static BOOL winIsVerbatimPathname(
+ const char *zPathname
+){
+ /*
+ ** If the path name starts with a forward slash or a backslash, it is either
+ ** a legal UNC name, a volume relative path, or an absolute path name in the
+ ** "Unix" format on Windows. There is no easy way to differentiate between
+ ** the final two cases; therefore, we return the safer return value of TRUE
+ ** so that callers of this function will simply use it verbatim.
+ */
+ if ( winIsDirSep(zPathname[0]) ){
+ return TRUE;
+ }
+
+ /*
+ ** If the path name starts with a letter and a colon it is either a volume
+ ** relative path or an absolute path. Callers of this function must not
+ ** attempt to treat it as a relative path name (i.e. they should simply use
+ ** it verbatim).
+ */
+ if ( winIsDriveLetterAndColon(zPathname) ){
+ return TRUE;
+ }
+
+ /*
+ ** If we get to this point, the path name should almost certainly be a purely
+ ** relative one (i.e. not a UNC name, not absolute, and not volume relative).
+ */
+ return FALSE;
+}
+
+/*
+** Turn a relative pathname into a full pathname. Write the full
+** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname
+** bytes in size.
+*/
+static int winFullPathname(
+ sqlite3_vfs *pVfs, /* Pointer to vfs object */
+ const char *zRelative, /* Possibly relative input path */
+ int nFull, /* Size of output buffer in bytes */
+ char *zFull /* Output buffer */
+){
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__)
+ DWORD nByte;
+ void *zConverted;
+ char *zOut;
+#endif
+
+ /* If this path name begins with "/X:" or "\\?\", where "X" is any
+ ** alphabetic character, discard the initial "/" from the pathname.
+ */
+ if( zRelative[0]=='/' && (winIsDriveLetterAndColon(zRelative+1)
+ || winIsLongPathPrefix(zRelative+1)) ){
+ zRelative++;
+ }
+
+#if defined(__CYGWIN__)
+ SimulateIOError( return SQLITE_ERROR );
+ UNUSED_PARAMETER(nFull);
+ assert( nFull>=pVfs->mxPathname );
+ if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){
+ /*
+ ** NOTE: We are dealing with a relative path name and the data
+ ** directory has been set. Therefore, use it as the basis
+ ** for converting the relative path name to an absolute
+ ** one by prepending the data directory and a slash.
+ */
+ char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 );
+ if( !zOut ){
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( cygwin_conv_path(
+ (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) |
+ CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){
+ sqlite3_free(zOut);
+ return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno,
+ "winFullPathname1", zRelative);
+ }else{
+ char *zUtf8 = winConvertToUtf8Filename(zOut);
+ if( !zUtf8 ){
+ sqlite3_free(zOut);
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s",
+ sqlite3_data_directory, winGetDirSep(), zUtf8);
+ sqlite3_free(zUtf8);
+ sqlite3_free(zOut);
+ }
+ }else{
+ char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 );
+ if( !zOut ){
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( cygwin_conv_path(
+ (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A),
+ zRelative, zOut, pVfs->mxPathname+1)<0 ){
+ sqlite3_free(zOut);
+ return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno,
+ "winFullPathname2", zRelative);
+ }else{
+ char *zUtf8 = winConvertToUtf8Filename(zOut);
+ if( !zUtf8 ){
+ sqlite3_free(zOut);
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8);
+ sqlite3_free(zUtf8);
+ sqlite3_free(zOut);
+ }
+ }
+ return SQLITE_OK;
+#endif
+
+#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__)
+ SimulateIOError( return SQLITE_ERROR );
+ /* WinCE has no concept of a relative pathname, or so I am told. */
+ /* WinRT has no way to convert a relative path to an absolute one. */
+ if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){
+ /*
+ ** NOTE: We are dealing with a relative path name and the data
+ ** directory has been set. Therefore, use it as the basis
+ ** for converting the relative path name to an absolute
+ ** one by prepending the data directory and a backslash.
+ */
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s",
+ sqlite3_data_directory, winGetDirSep(), zRelative);
+ }else{
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zRelative);
+ }
+ return SQLITE_OK;
+#endif
+
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__)
+ /* It's odd to simulate an io-error here, but really this is just
+ ** using the io-error infrastructure to test that SQLite handles this
+ ** function failing. This function could fail if, for example, the
+ ** current working directory has been unlinked.
+ */
+ SimulateIOError( return SQLITE_ERROR );
+ if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){
+ /*
+ ** NOTE: We are dealing with a relative path name and the data
+ ** directory has been set. Therefore, use it as the basis
+ ** for converting the relative path name to an absolute
+ ** one by prepending the data directory and a backslash.
+ */
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s",
+ sqlite3_data_directory, winGetDirSep(), zRelative);
+ return SQLITE_OK;
+ }
+ zConverted = winConvertFromUtf8Filename(zRelative);
+ if( zConverted==0 ){
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ if( osIsNT() ){
+ LPWSTR zTemp;
+ nByte = osGetFullPathNameW((LPCWSTR)zConverted, 0, 0, 0);
+ if( nByte==0 ){
+ sqlite3_free(zConverted);
+ return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(),
+ "winFullPathname1", zRelative);
+ }
+ nByte += 3;
+ zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) );
+ if( zTemp==0 ){
+ sqlite3_free(zConverted);
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ nByte = osGetFullPathNameW((LPCWSTR)zConverted, nByte, zTemp, 0);
+ if( nByte==0 ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zTemp);
+ return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(),
+ "winFullPathname2", zRelative);
+ }
+ sqlite3_free(zConverted);
+ zOut = winUnicodeToUtf8(zTemp);
+ sqlite3_free(zTemp);
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ char *zTemp;
+ nByte = osGetFullPathNameA((char*)zConverted, 0, 0, 0);
+ if( nByte==0 ){
+ sqlite3_free(zConverted);
+ return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(),
+ "winFullPathname3", zRelative);
+ }
+ nByte += 3;
+ zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) );
+ if( zTemp==0 ){
+ sqlite3_free(zConverted);
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0);
+ if( nByte==0 ){
+ sqlite3_free(zConverted);
+ sqlite3_free(zTemp);
+ return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(),
+ "winFullPathname4", zRelative);
+ }
+ sqlite3_free(zConverted);
+ zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI());
+ sqlite3_free(zTemp);
+ }
+#endif
+ if( zOut ){
+ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut);
+ sqlite3_free(zOut);
+ return SQLITE_OK;
+ }else{
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+#endif
+}
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+/*
+** Interfaces for opening a shared library, finding entry points
+** within the shared library, and closing the shared library.
+*/
+static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){
+ HANDLE h;
+#if defined(__CYGWIN__)
+ int nFull = pVfs->mxPathname+1;
+ char *zFull = sqlite3MallocZero( nFull );
+ void *zConverted = 0;
+ if( zFull==0 ){
+ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
+ return 0;
+ }
+ if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){
+ sqlite3_free(zFull);
+ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
+ return 0;
+ }
+ zConverted = winConvertFromUtf8Filename(zFull);
+ sqlite3_free(zFull);
+#else
+ void *zConverted = winConvertFromUtf8Filename(zFilename);
+ UNUSED_PARAMETER(pVfs);
+#endif
+ if( zConverted==0 ){
+ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
+ return 0;
+ }
+ if( osIsNT() ){
+#if SQLITE_OS_WINRT
+ h = osLoadPackagedLibrary((LPCWSTR)zConverted, 0);
+#else
+ h = osLoadLibraryW((LPCWSTR)zConverted);
+#endif
+ }
+#ifdef SQLITE_WIN32_HAS_ANSI
+ else{
+ h = osLoadLibraryA((char*)zConverted);
+ }
+#endif
+ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)h));
+ sqlite3_free(zConverted);
+ return (void*)h;
+}
+static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){
+ UNUSED_PARAMETER(pVfs);
+ winGetLastErrorMsg(osGetLastError(), nBuf, zBufOut);
+}
+static void (*winDlSym(sqlite3_vfs *pVfs,void *pH,const char *zSym))(void){
+ FARPROC proc;
+ UNUSED_PARAMETER(pVfs);
+ proc = osGetProcAddressA((HANDLE)pH, zSym);
+ OSTRACE(("DLSYM handle=%p, symbol=%s, address=%p\n",
+ (void*)pH, zSym, (void*)proc));
+ return (void(*)(void))proc;
+}
+static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){
+ UNUSED_PARAMETER(pVfs);
+ osFreeLibrary((HANDLE)pHandle);
+ OSTRACE(("DLCLOSE handle=%p\n", (void*)pHandle));
+}
+#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */
+ #define winDlOpen 0
+ #define winDlError 0
+ #define winDlSym 0
+ #define winDlClose 0
+#endif
+
+/* State information for the randomness gatherer. */
+typedef struct EntropyGatherer EntropyGatherer;
+struct EntropyGatherer {
+ unsigned char *a; /* Gather entropy into this buffer */
+ int na; /* Size of a[] in bytes */
+ int i; /* XOR next input into a[i] */
+ int nXor; /* Number of XOR operations done */
+};
+
+#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS)
+/* Mix sz bytes of entropy into p. */
+static void xorMemory(EntropyGatherer *p, unsigned char *x, int sz){
+ int j, k;
+ for(j=0, k=p->i; j<sz; j++){
+ p->a[k++] ^= x[j];
+ if( k>=p->na ) k = 0;
+ }
+ p->i = k;
+ p->nXor += sz;
+}
+#endif /* !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS) */
+
+/*
+** Write up to nBuf bytes of randomness into zBuf.
+*/
+static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
+#if defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS)
+ UNUSED_PARAMETER(pVfs);
+ memset(zBuf, 0, nBuf);
+ return nBuf;
+#else
+ EntropyGatherer e;
+ UNUSED_PARAMETER(pVfs);
+ memset(zBuf, 0, nBuf);
+ e.a = (unsigned char*)zBuf;
+ e.na = nBuf;
+ e.nXor = 0;
+ e.i = 0;
+ {
+ SYSTEMTIME x;
+ osGetSystemTime(&x);
+ xorMemory(&e, (unsigned char*)&x, sizeof(SYSTEMTIME));
+ }
+ {
+ DWORD pid = osGetCurrentProcessId();
+ xorMemory(&e, (unsigned char*)&pid, sizeof(DWORD));
+ }
+#if SQLITE_OS_WINRT
+ {
+ ULONGLONG cnt = osGetTickCount64();
+ xorMemory(&e, (unsigned char*)&cnt, sizeof(ULONGLONG));
+ }
+#else
+ {
+ DWORD cnt = osGetTickCount();
+ xorMemory(&e, (unsigned char*)&cnt, sizeof(DWORD));
+ }
+#endif /* SQLITE_OS_WINRT */
+ {
+ LARGE_INTEGER i;
+ osQueryPerformanceCounter(&i);
+ xorMemory(&e, (unsigned char*)&i, sizeof(LARGE_INTEGER));
+ }
+#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
+ {
+ UUID id;
+ memset(&id, 0, sizeof(UUID));
+ osUuidCreate(&id);
+ xorMemory(&e, (unsigned char*)&id, sizeof(UUID));
+ memset(&id, 0, sizeof(UUID));
+ osUuidCreateSequential(&id);
+ xorMemory(&e, (unsigned char*)&id, sizeof(UUID));
+ }
+#endif /* !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID */
+ return e.nXor>nBuf ? nBuf : e.nXor;
+#endif /* defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS) */
+}
+
+
+/*
+** Sleep for a little while. Return the amount of time slept.
+*/
+static int winSleep(sqlite3_vfs *pVfs, int microsec){
+ sqlite3_win32_sleep((microsec+999)/1000);
+ UNUSED_PARAMETER(pVfs);
+ return ((microsec+999)/1000)*1000;
+}
+
+/*
+** The following variable, if set to a non-zero value, is interpreted as
+** the number of seconds since 1970 and is used to set the result of
+** sqlite3OsCurrentTime() during testing.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */
+#endif
+
+/*
+** Find the current time (in Universal Coordinated Time). Write into *piNow
+** the current time and date as a Julian Day number times 86_400_000. In
+** other words, write into *piNow the number of milliseconds since the Julian
+** epoch of noon in Greenwich on November 24, 4714 B.C according to the
+** proleptic Gregorian calendar.
+**
+** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date
+** cannot be found.
+*/
+static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){
+ /* FILETIME structure is a 64-bit value representing the number of
+ 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5).
+ */
+ FILETIME ft;
+ static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000;
+#ifdef SQLITE_TEST
+ static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000;
+#endif
+ /* 2^32 - to avoid use of LL and warnings in gcc */
+ static const sqlite3_int64 max32BitValue =
+ (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 +
+ (sqlite3_int64)294967296;
+
+#if SQLITE_OS_WINCE
+ SYSTEMTIME time;
+ osGetSystemTime(&time);
+ /* if SystemTimeToFileTime() fails, it returns zero. */
+ if (!osSystemTimeToFileTime(&time,&ft)){
+ return SQLITE_ERROR;
+ }
+#else
+ osGetSystemTimeAsFileTime( &ft );
+#endif
+
+ *piNow = winFiletimeEpoch +
+ ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) +
+ (sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000;
+
+#ifdef SQLITE_TEST
+ if( sqlite3_current_time ){
+ *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch;
+ }
+#endif
+ UNUSED_PARAMETER(pVfs);
+ return SQLITE_OK;
+}
+
+/*
+** Find the current time (in Universal Coordinated Time). Write the
+** current time and date as a Julian Day number into *prNow and
+** return 0. Return 1 if the time and date cannot be found.
+*/
+static int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){
+ int rc;
+ sqlite3_int64 i;
+ rc = winCurrentTimeInt64(pVfs, &i);
+ if( !rc ){
+ *prNow = i/86400000.0;
+ }
+ return rc;
+}
+
+/*
+** The idea is that this function works like a combination of
+** GetLastError() and FormatMessage() on Windows (or errno and
+** strerror_r() on Unix). After an error is returned by an OS
+** function, SQLite calls this function with zBuf pointing to
+** a buffer of nBuf bytes. The OS layer should populate the
+** buffer with a nul-terminated UTF-8 encoded error message
+** describing the last IO error to have occurred within the calling
+** thread.
+**
+** If the error message is too large for the supplied buffer,
+** it should be truncated. The return value of xGetLastError
+** is zero if the error message fits in the buffer, or non-zero
+** otherwise (if the message was truncated). If non-zero is returned,
+** then it is not necessary to include the nul-terminator character
+** in the output buffer.
+**
+** Not supplying an error message will have no adverse effect
+** on SQLite. It is fine to have an implementation that never
+** returns an error message:
+**
+** int xGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
+** assert(zBuf[0]=='\0');
+** return 0;
+** }
+**
+** However if an error message is supplied, it will be incorporated
+** by sqlite into the error message available to the user using
+** sqlite3_errmsg(), possibly making IO errors easier to debug.
+*/
+static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
+ DWORD e = osGetLastError();
+ UNUSED_PARAMETER(pVfs);
+ if( nBuf>0 ) winGetLastErrorMsg(e, nBuf, zBuf);
+ return e;
+}
+
+/*
+** Initialize and deinitialize the operating system interface.
+*/
+SQLITE_API int sqlite3_os_init(void){
+ static sqlite3_vfs winVfs = {
+ 3, /* iVersion */
+ sizeof(winFile), /* szOsFile */
+ SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */
+ 0, /* pNext */
+ "win32", /* zName */
+ &winAppData, /* pAppData */
+ winOpen, /* xOpen */
+ winDelete, /* xDelete */
+ winAccess, /* xAccess */
+ winFullPathname, /* xFullPathname */
+ winDlOpen, /* xDlOpen */
+ winDlError, /* xDlError */
+ winDlSym, /* xDlSym */
+ winDlClose, /* xDlClose */
+ winRandomness, /* xRandomness */
+ winSleep, /* xSleep */
+ winCurrentTime, /* xCurrentTime */
+ winGetLastError, /* xGetLastError */
+ winCurrentTimeInt64, /* xCurrentTimeInt64 */
+ winSetSystemCall, /* xSetSystemCall */
+ winGetSystemCall, /* xGetSystemCall */
+ winNextSystemCall, /* xNextSystemCall */
+ };
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ static sqlite3_vfs winLongPathVfs = {
+ 3, /* iVersion */
+ sizeof(winFile), /* szOsFile */
+ SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */
+ 0, /* pNext */
+ "win32-longpath", /* zName */
+ &winAppData, /* pAppData */
+ winOpen, /* xOpen */
+ winDelete, /* xDelete */
+ winAccess, /* xAccess */
+ winFullPathname, /* xFullPathname */
+ winDlOpen, /* xDlOpen */
+ winDlError, /* xDlError */
+ winDlSym, /* xDlSym */
+ winDlClose, /* xDlClose */
+ winRandomness, /* xRandomness */
+ winSleep, /* xSleep */
+ winCurrentTime, /* xCurrentTime */
+ winGetLastError, /* xGetLastError */
+ winCurrentTimeInt64, /* xCurrentTimeInt64 */
+ winSetSystemCall, /* xSetSystemCall */
+ winGetSystemCall, /* xGetSystemCall */
+ winNextSystemCall, /* xNextSystemCall */
+ };
+#endif
+ static sqlite3_vfs winNolockVfs = {
+ 3, /* iVersion */
+ sizeof(winFile), /* szOsFile */
+ SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */
+ 0, /* pNext */
+ "win32-none", /* zName */
+ &winNolockAppData, /* pAppData */
+ winOpen, /* xOpen */
+ winDelete, /* xDelete */
+ winAccess, /* xAccess */
+ winFullPathname, /* xFullPathname */
+ winDlOpen, /* xDlOpen */
+ winDlError, /* xDlError */
+ winDlSym, /* xDlSym */
+ winDlClose, /* xDlClose */
+ winRandomness, /* xRandomness */
+ winSleep, /* xSleep */
+ winCurrentTime, /* xCurrentTime */
+ winGetLastError, /* xGetLastError */
+ winCurrentTimeInt64, /* xCurrentTimeInt64 */
+ winSetSystemCall, /* xSetSystemCall */
+ winGetSystemCall, /* xGetSystemCall */
+ winNextSystemCall, /* xNextSystemCall */
+ };
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ static sqlite3_vfs winLongPathNolockVfs = {
+ 3, /* iVersion */
+ sizeof(winFile), /* szOsFile */
+ SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */
+ 0, /* pNext */
+ "win32-longpath-none", /* zName */
+ &winNolockAppData, /* pAppData */
+ winOpen, /* xOpen */
+ winDelete, /* xDelete */
+ winAccess, /* xAccess */
+ winFullPathname, /* xFullPathname */
+ winDlOpen, /* xDlOpen */
+ winDlError, /* xDlError */
+ winDlSym, /* xDlSym */
+ winDlClose, /* xDlClose */
+ winRandomness, /* xRandomness */
+ winSleep, /* xSleep */
+ winCurrentTime, /* xCurrentTime */
+ winGetLastError, /* xGetLastError */
+ winCurrentTimeInt64, /* xCurrentTimeInt64 */
+ winSetSystemCall, /* xSetSystemCall */
+ winGetSystemCall, /* xGetSystemCall */
+ winNextSystemCall, /* xNextSystemCall */
+ };
+#endif
+
+ /* Double-check that the aSyscall[] array has been constructed
+ ** correctly. See ticket [bb3a86e890c8e96ab] */
+ assert( ArraySize(aSyscall)==80 );
+
+ /* get memory map allocation granularity */
+ memset(&winSysInfo, 0, sizeof(SYSTEM_INFO));
+#if SQLITE_OS_WINRT
+ osGetNativeSystemInfo(&winSysInfo);
+#else
+ osGetSystemInfo(&winSysInfo);
+#endif
+ assert( winSysInfo.dwAllocationGranularity>0 );
+ assert( winSysInfo.dwPageSize>0 );
+
+ sqlite3_vfs_register(&winVfs, 1);
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ sqlite3_vfs_register(&winLongPathVfs, 0);
+#endif
+
+ sqlite3_vfs_register(&winNolockVfs, 0);
+
+#if defined(SQLITE_WIN32_HAS_WIDE)
+ sqlite3_vfs_register(&winLongPathNolockVfs, 0);
+#endif
+
+#ifndef SQLITE_OMIT_WAL
+ winBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1);
+#endif
+
+ return SQLITE_OK;
+}
+
+SQLITE_API int sqlite3_os_end(void){
+#if SQLITE_OS_WINRT
+ if( sleepObj!=NULL ){
+ osCloseHandle(sleepObj);
+ sleepObj = NULL;
+ }
+#endif
+
+#ifndef SQLITE_OMIT_WAL
+ winBigLock = 0;
+#endif
+
+ return SQLITE_OK;
+}
+
+#endif /* SQLITE_OS_WIN */
+
+/************** End of os_win.c **********************************************/
+/************** Begin file memdb.c *******************************************/
+/*
+** 2016-09-07
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file implements an in-memory VFS. A database is held as a contiguous
+** block of memory.
+**
+** This file also implements interface sqlite3_serialize() and
+** sqlite3_deserialize().
+*/
+/* #include "sqliteInt.h" */
+#ifdef SQLITE_ENABLE_DESERIALIZE
+
+/*
+** Forward declaration of objects used by this utility
+*/
+typedef struct sqlite3_vfs MemVfs;
+typedef struct MemFile MemFile;
+
+/* Access to a lower-level VFS that (might) implement dynamic loading,
+** access to randomness, etc.
+*/
+#define ORIGVFS(p) ((sqlite3_vfs*)((p)->pAppData))
+
+/* An open file */
+struct MemFile {
+ sqlite3_file base; /* IO methods */
+ sqlite3_int64 sz; /* Size of the file */
+ sqlite3_int64 szAlloc; /* Space allocated to aData */
+ sqlite3_int64 szMax; /* Maximum allowed size of the file */
+ unsigned char *aData; /* content of the file */
+ int nMmap; /* Number of memory mapped pages */
+ unsigned mFlags; /* Flags */
+ int eLock; /* Most recent lock against this file */
+};
+
+/*
+** Methods for MemFile
+*/
+static int memdbClose(sqlite3_file*);
+static int memdbRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
+static int memdbWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst);
+static int memdbTruncate(sqlite3_file*, sqlite3_int64 size);
+static int memdbSync(sqlite3_file*, int flags);
+static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize);
+static int memdbLock(sqlite3_file*, int);
+/* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */
+static int memdbFileControl(sqlite3_file*, int op, void *pArg);
+/* static int memdbSectorSize(sqlite3_file*); // not used */
+static int memdbDeviceCharacteristics(sqlite3_file*);
+static int memdbFetch(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
+static int memdbUnfetch(sqlite3_file*, sqlite3_int64 iOfst, void *p);
+
+/*
+** Methods for MemVfs
+*/
+static int memdbOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *);
+/* static int memdbDelete(sqlite3_vfs*, const char *zName, int syncDir); */
+static int memdbAccess(sqlite3_vfs*, const char *zName, int flags, int *);
+static int memdbFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut);
+static void *memdbDlOpen(sqlite3_vfs*, const char *zFilename);
+static void memdbDlError(sqlite3_vfs*, int nByte, char *zErrMsg);
+static void (*memdbDlSym(sqlite3_vfs *pVfs, void *p, const char*zSym))(void);
+static void memdbDlClose(sqlite3_vfs*, void*);
+static int memdbRandomness(sqlite3_vfs*, int nByte, char *zOut);
+static int memdbSleep(sqlite3_vfs*, int microseconds);
+/* static int memdbCurrentTime(sqlite3_vfs*, double*); */
+static int memdbGetLastError(sqlite3_vfs*, int, char *);
+static int memdbCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*);
+
+static sqlite3_vfs memdb_vfs = {
+ 2, /* iVersion */
+ 0, /* szOsFile (set when registered) */
+ 1024, /* mxPathname */
+ 0, /* pNext */
+ "memdb", /* zName */
+ 0, /* pAppData (set when registered) */
+ memdbOpen, /* xOpen */
+ 0, /* memdbDelete, */ /* xDelete */
+ memdbAccess, /* xAccess */
+ memdbFullPathname, /* xFullPathname */
+ memdbDlOpen, /* xDlOpen */
+ memdbDlError, /* xDlError */
+ memdbDlSym, /* xDlSym */
+ memdbDlClose, /* xDlClose */
+ memdbRandomness, /* xRandomness */
+ memdbSleep, /* xSleep */
+ 0, /* memdbCurrentTime, */ /* xCurrentTime */
+ memdbGetLastError, /* xGetLastError */
+ memdbCurrentTimeInt64 /* xCurrentTimeInt64 */
+};
+
+static const sqlite3_io_methods memdb_io_methods = {
+ 3, /* iVersion */
+ memdbClose, /* xClose */
+ memdbRead, /* xRead */
+ memdbWrite, /* xWrite */
+ memdbTruncate, /* xTruncate */
+ memdbSync, /* xSync */
+ memdbFileSize, /* xFileSize */
+ memdbLock, /* xLock */
+ memdbLock, /* xUnlock - same as xLock in this case */
+ 0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */
+ memdbFileControl, /* xFileControl */
+ 0, /* memdbSectorSize,*/ /* xSectorSize */
+ memdbDeviceCharacteristics, /* xDeviceCharacteristics */
+ 0, /* xShmMap */
+ 0, /* xShmLock */
+ 0, /* xShmBarrier */
+ 0, /* xShmUnmap */
+ memdbFetch, /* xFetch */
+ memdbUnfetch /* xUnfetch */
+};
+
+
+
+/*
+** Close an memdb-file.
+**
+** The pData pointer is owned by the application, so there is nothing
+** to free.
+*/
+static int memdbClose(sqlite3_file *pFile){
+ MemFile *p = (MemFile *)pFile;
+ if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ) sqlite3_free(p->aData);
+ return SQLITE_OK;
+}
+
+/*
+** Read data from an memdb-file.
+*/
+static int memdbRead(
+ sqlite3_file *pFile,
+ void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ MemFile *p = (MemFile *)pFile;
+ if( iOfst+iAmt>p->sz ){
+ memset(zBuf, 0, iAmt);
+ if( iOfst<p->sz ) memcpy(zBuf, p->aData+iOfst, p->sz - iOfst);
+ return SQLITE_IOERR_SHORT_READ;
+ }
+ memcpy(zBuf, p->aData+iOfst, iAmt);
+ return SQLITE_OK;
+}
+
+/*
+** Try to enlarge the memory allocation to hold at least sz bytes
+*/
+static int memdbEnlarge(MemFile *p, sqlite3_int64 newSz){
+ unsigned char *pNew;
+ if( (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)==0 || p->nMmap>0 ){
+ return SQLITE_FULL;
+ }
+ if( newSz>p->szMax ){
+ return SQLITE_FULL;
+ }
+ newSz *= 2;
+ if( newSz>p->szMax ) newSz = p->szMax;
+ pNew = sqlite3Realloc(p->aData, newSz);
+ if( pNew==0 ) return SQLITE_NOMEM;
+ p->aData = pNew;
+ p->szAlloc = newSz;
+ return SQLITE_OK;
+}
+
+/*
+** Write data to an memdb-file.
+*/
+static int memdbWrite(
+ sqlite3_file *pFile,
+ const void *z,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ MemFile *p = (MemFile *)pFile;
+ if( NEVER(p->mFlags & SQLITE_DESERIALIZE_READONLY) ) return SQLITE_READONLY;
+ if( iOfst+iAmt>p->sz ){
+ int rc;
+ if( iOfst+iAmt>p->szAlloc
+ && (rc = memdbEnlarge(p, iOfst+iAmt))!=SQLITE_OK
+ ){
+ return rc;
+ }
+ if( iOfst>p->sz ) memset(p->aData+p->sz, 0, iOfst-p->sz);
+ p->sz = iOfst+iAmt;
+ }
+ memcpy(p->aData+iOfst, z, iAmt);
+ return SQLITE_OK;
+}
+
+/*
+** Truncate an memdb-file.
+**
+** In rollback mode (which is always the case for memdb, as it does not
+** support WAL mode) the truncate() method is only used to reduce
+** the size of a file, never to increase the size.
+*/
+static int memdbTruncate(sqlite3_file *pFile, sqlite_int64 size){
+ MemFile *p = (MemFile *)pFile;
+ if( NEVER(size>p->sz) ) return SQLITE_FULL;
+ p->sz = size;
+ return SQLITE_OK;
+}
+
+/*
+** Sync an memdb-file.
+*/
+static int memdbSync(sqlite3_file *pFile, int flags){
+ return SQLITE_OK;
+}
+
+/*
+** Return the current file-size of an memdb-file.
+*/
+static int memdbFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){
+ MemFile *p = (MemFile *)pFile;
+ *pSize = p->sz;
+ return SQLITE_OK;
+}
+
+/*
+** Lock an memdb-file.
+*/
+static int memdbLock(sqlite3_file *pFile, int eLock){
+ MemFile *p = (MemFile *)pFile;
+ if( eLock>SQLITE_LOCK_SHARED
+ && (p->mFlags & SQLITE_DESERIALIZE_READONLY)!=0
+ ){
+ return SQLITE_READONLY;
+ }
+ p->eLock = eLock;
+ return SQLITE_OK;
+}
+
+#if 0 /* Never used because memdbAccess() always returns false */
+/*
+** Check if another file-handle holds a RESERVED lock on an memdb-file.
+*/
+static int memdbCheckReservedLock(sqlite3_file *pFile, int *pResOut){
+ *pResOut = 0;
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** File control method. For custom operations on an memdb-file.
+*/
+static int memdbFileControl(sqlite3_file *pFile, int op, void *pArg){
+ MemFile *p = (MemFile *)pFile;
+ int rc = SQLITE_NOTFOUND;
+ if( op==SQLITE_FCNTL_VFSNAME ){
+ *(char**)pArg = sqlite3_mprintf("memdb(%p,%lld)", p->aData, p->sz);
+ rc = SQLITE_OK;
+ }
+ if( op==SQLITE_FCNTL_SIZE_LIMIT ){
+ sqlite3_int64 iLimit = *(sqlite3_int64*)pArg;
+ if( iLimit<p->sz ){
+ if( iLimit<0 ){
+ iLimit = p->szMax;
+ }else{
+ iLimit = p->sz;
+ }
+ }
+ p->szMax = iLimit;
+ *(sqlite3_int64*)pArg = iLimit;
+ rc = SQLITE_OK;
+ }
+ return rc;
+}
+
+#if 0 /* Not used because of SQLITE_IOCAP_POWERSAFE_OVERWRITE */
+/*
+** Return the sector-size in bytes for an memdb-file.
+*/
+static int memdbSectorSize(sqlite3_file *pFile){
+ return 1024;
+}
+#endif
+
+/*
+** Return the device characteristic flags supported by an memdb-file.
+*/
+static int memdbDeviceCharacteristics(sqlite3_file *pFile){
+ return SQLITE_IOCAP_ATOMIC |
+ SQLITE_IOCAP_POWERSAFE_OVERWRITE |
+ SQLITE_IOCAP_SAFE_APPEND |
+ SQLITE_IOCAP_SEQUENTIAL;
+}
+
+/* Fetch a page of a memory-mapped file */
+static int memdbFetch(
+ sqlite3_file *pFile,
+ sqlite3_int64 iOfst,
+ int iAmt,
+ void **pp
+){
+ MemFile *p = (MemFile *)pFile;
+ if( iOfst+iAmt>p->sz ){
+ *pp = 0;
+ }else{
+ p->nMmap++;
+ *pp = (void*)(p->aData + iOfst);
+ }
+ return SQLITE_OK;
+}
+
+/* Release a memory-mapped page */
+static int memdbUnfetch(sqlite3_file *pFile, sqlite3_int64 iOfst, void *pPage){
+ MemFile *p = (MemFile *)pFile;
+ p->nMmap--;
+ return SQLITE_OK;
+}
+
+/*
+** Open an mem file handle.
+*/
+static int memdbOpen(
+ sqlite3_vfs *pVfs,
+ const char *zName,
+ sqlite3_file *pFile,
+ int flags,
+ int *pOutFlags
+){
+ MemFile *p = (MemFile*)pFile;
+ if( (flags & SQLITE_OPEN_MAIN_DB)==0 ){
+ return ORIGVFS(pVfs)->xOpen(ORIGVFS(pVfs), zName, pFile, flags, pOutFlags);
+ }
+ memset(p, 0, sizeof(*p));
+ p->mFlags = SQLITE_DESERIALIZE_RESIZEABLE | SQLITE_DESERIALIZE_FREEONCLOSE;
+ assert( pOutFlags!=0 ); /* True because flags==SQLITE_OPEN_MAIN_DB */
+ *pOutFlags = flags | SQLITE_OPEN_MEMORY;
+ pFile->pMethods = &memdb_io_methods;
+ p->szMax = sqlite3GlobalConfig.mxMemdbSize;
+ return SQLITE_OK;
+}
+
+#if 0 /* Only used to delete rollback journals, super-journals, and WAL
+ ** files, none of which exist in memdb. So this routine is never used */
+/*
+** Delete the file located at zPath. If the dirSync argument is true,
+** ensure the file-system modifications are synced to disk before
+** returning.
+*/
+static int memdbDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
+ return SQLITE_IOERR_DELETE;
+}
+#endif
+
+/*
+** Test for access permissions. Return true if the requested permission
+** is available, or false otherwise.
+**
+** With memdb, no files ever exist on disk. So always return false.
+*/
+static int memdbAccess(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int flags,
+ int *pResOut
+){
+ *pResOut = 0;
+ return SQLITE_OK;
+}
+
+/*
+** Populate buffer zOut with the full canonical pathname corresponding
+** to the pathname in zPath. zOut is guaranteed to point to a buffer
+** of at least (INST_MAX_PATHNAME+1) bytes.
+*/
+static int memdbFullPathname(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int nOut,
+ char *zOut
+){
+ sqlite3_snprintf(nOut, zOut, "%s", zPath);
+ return SQLITE_OK;
+}
+
+/*
+** Open the dynamic library located at zPath and return a handle.
+*/
+static void *memdbDlOpen(sqlite3_vfs *pVfs, const char *zPath){
+ return ORIGVFS(pVfs)->xDlOpen(ORIGVFS(pVfs), zPath);
+}
+
+/*
+** Populate the buffer zErrMsg (size nByte bytes) with a human readable
+** utf-8 string describing the most recent error encountered associated
+** with dynamic libraries.
+*/
+static void memdbDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){
+ ORIGVFS(pVfs)->xDlError(ORIGVFS(pVfs), nByte, zErrMsg);
+}
+
+/*
+** Return a pointer to the symbol zSymbol in the dynamic library pHandle.
+*/
+static void (*memdbDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){
+ return ORIGVFS(pVfs)->xDlSym(ORIGVFS(pVfs), p, zSym);
+}
+
+/*
+** Close the dynamic library handle pHandle.
+*/
+static void memdbDlClose(sqlite3_vfs *pVfs, void *pHandle){
+ ORIGVFS(pVfs)->xDlClose(ORIGVFS(pVfs), pHandle);
+}
+
+/*
+** Populate the buffer pointed to by zBufOut with nByte bytes of
+** random data.
+*/
+static int memdbRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
+ return ORIGVFS(pVfs)->xRandomness(ORIGVFS(pVfs), nByte, zBufOut);
+}
+
+/*
+** Sleep for nMicro microseconds. Return the number of microseconds
+** actually slept.
+*/
+static int memdbSleep(sqlite3_vfs *pVfs, int nMicro){
+ return ORIGVFS(pVfs)->xSleep(ORIGVFS(pVfs), nMicro);
+}
+
+#if 0 /* Never used. Modern cores only call xCurrentTimeInt64() */
+/*
+** Return the current time as a Julian Day number in *pTimeOut.
+*/
+static int memdbCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){
+ return ORIGVFS(pVfs)->xCurrentTime(ORIGVFS(pVfs), pTimeOut);
+}
+#endif
+
+static int memdbGetLastError(sqlite3_vfs *pVfs, int a, char *b){
+ return ORIGVFS(pVfs)->xGetLastError(ORIGVFS(pVfs), a, b);
+}
+static int memdbCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *p){
+ return ORIGVFS(pVfs)->xCurrentTimeInt64(ORIGVFS(pVfs), p);
+}
+
+/*
+** Translate a database connection pointer and schema name into a
+** MemFile pointer.
+*/
+static MemFile *memdbFromDbSchema(sqlite3 *db, const char *zSchema){
+ MemFile *p = 0;
+ int rc = sqlite3_file_control(db, zSchema, SQLITE_FCNTL_FILE_POINTER, &p);
+ if( rc ) return 0;
+ if( p->base.pMethods!=&memdb_io_methods ) return 0;
+ return p;
+}
+
+/*
+** Return the serialization of a database
+*/
+SQLITE_API unsigned char *sqlite3_serialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which database within the connection */
+ sqlite3_int64 *piSize, /* Write size here, if not NULL */
+ unsigned int mFlags /* Maybe SQLITE_SERIALIZE_NOCOPY */
+){
+ MemFile *p;
+ int iDb;
+ Btree *pBt;
+ sqlite3_int64 sz;
+ int szPage = 0;
+ sqlite3_stmt *pStmt = 0;
+ unsigned char *pOut;
+ char *zSql;
+ int rc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+
+ if( zSchema==0 ) zSchema = db->aDb[0].zDbSName;
+ p = memdbFromDbSchema(db, zSchema);
+ iDb = sqlite3FindDbName(db, zSchema);
+ if( piSize ) *piSize = -1;
+ if( iDb<0 ) return 0;
+ if( p ){
+ if( piSize ) *piSize = p->sz;
+ if( mFlags & SQLITE_SERIALIZE_NOCOPY ){
+ pOut = p->aData;
+ }else{
+ pOut = sqlite3_malloc64( p->sz );
+ if( pOut ) memcpy(pOut, p->aData, p->sz);
+ }
+ return pOut;
+ }
+ pBt = db->aDb[iDb].pBt;
+ if( pBt==0 ) return 0;
+ szPage = sqlite3BtreeGetPageSize(pBt);
+ zSql = sqlite3_mprintf("PRAGMA \"%w\".page_count", zSchema);
+ rc = zSql ? sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0) : SQLITE_NOMEM;
+ sqlite3_free(zSql);
+ if( rc ) return 0;
+ rc = sqlite3_step(pStmt);
+ if( rc!=SQLITE_ROW ){
+ pOut = 0;
+ }else{
+ sz = sqlite3_column_int64(pStmt, 0)*szPage;
+ if( piSize ) *piSize = sz;
+ if( mFlags & SQLITE_SERIALIZE_NOCOPY ){
+ pOut = 0;
+ }else{
+ pOut = sqlite3_malloc64( sz );
+ if( pOut ){
+ int nPage = sqlite3_column_int(pStmt, 0);
+ Pager *pPager = sqlite3BtreePager(pBt);
+ int pgno;
+ for(pgno=1; pgno<=nPage; pgno++){
+ DbPage *pPage = 0;
+ unsigned char *pTo = pOut + szPage*(sqlite3_int64)(pgno-1);
+ rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pPage, 0);
+ if( rc==SQLITE_OK ){
+ memcpy(pTo, sqlite3PagerGetData(pPage), szPage);
+ }else{
+ memset(pTo, 0, szPage);
+ }
+ sqlite3PagerUnref(pPage);
+ }
+ }
+ }
+ }
+ sqlite3_finalize(pStmt);
+ return pOut;
+}
+
+/* Convert zSchema to a MemDB and initialize its content.
+*/
+SQLITE_API int sqlite3_deserialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which DB to reopen with the deserialization */
+ unsigned char *pData, /* The serialized database content */
+ sqlite3_int64 szDb, /* Number bytes in the deserialization */
+ sqlite3_int64 szBuf, /* Total size of buffer pData[] */
+ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */
+){
+ MemFile *p;
+ char *zSql;
+ sqlite3_stmt *pStmt = 0;
+ int rc;
+ int iDb;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( szDb<0 ) return SQLITE_MISUSE_BKPT;
+ if( szBuf<0 ) return SQLITE_MISUSE_BKPT;
+#endif
+
+ sqlite3_mutex_enter(db->mutex);
+ if( zSchema==0 ) zSchema = db->aDb[0].zDbSName;
+ iDb = sqlite3FindDbName(db, zSchema);
+ if( iDb<0 ){
+ rc = SQLITE_ERROR;
+ goto end_deserialize;
+ }
+ zSql = sqlite3_mprintf("ATTACH x AS %Q", zSchema);
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ sqlite3_free(zSql);
+ if( rc ) goto end_deserialize;
+ db->init.iDb = (u8)iDb;
+ db->init.reopenMemdb = 1;
+ rc = sqlite3_step(pStmt);
+ db->init.reopenMemdb = 0;
+ if( rc!=SQLITE_DONE ){
+ rc = SQLITE_ERROR;
+ goto end_deserialize;
+ }
+ p = memdbFromDbSchema(db, zSchema);
+ if( p==0 ){
+ rc = SQLITE_ERROR;
+ }else{
+ p->aData = pData;
+ p->sz = szDb;
+ p->szAlloc = szBuf;
+ p->szMax = szBuf;
+ if( p->szMax<sqlite3GlobalConfig.mxMemdbSize ){
+ p->szMax = sqlite3GlobalConfig.mxMemdbSize;
+ }
+ p->mFlags = mFlags;
+ rc = SQLITE_OK;
+ }
+
+end_deserialize:
+ sqlite3_finalize(pStmt);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** This routine is called when the extension is loaded.
+** Register the new VFS.
+*/
+SQLITE_PRIVATE int sqlite3MemdbInit(void){
+ sqlite3_vfs *pLower = sqlite3_vfs_find(0);
+ int sz = pLower->szOsFile;
+ memdb_vfs.pAppData = pLower;
+ /* The following conditional can only be true when compiled for
+ ** Windows x86 and SQLITE_MAX_MMAP_SIZE=0. We always leave
+ ** it in, to be safe, but it is marked as NO_TEST since there
+ ** is no way to reach it under most builds. */
+ if( sz<sizeof(MemFile) ) sz = sizeof(MemFile); /*NO_TEST*/
+ memdb_vfs.szOsFile = sz;
+ return sqlite3_vfs_register(&memdb_vfs, 0);
+}
+#endif /* SQLITE_ENABLE_DESERIALIZE */
+
+/************** End of memdb.c ***********************************************/
+/************** Begin file bitvec.c ******************************************/
+/*
+** 2008 February 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements an object that represents a fixed-length
+** bitmap. Bits are numbered starting with 1.
+**
+** A bitmap is used to record which pages of a database file have been
+** journalled during a transaction, or which pages have the "dont-write"
+** property. Usually only a few pages are meet either condition.
+** So the bitmap is usually sparse and has low cardinality.
+** But sometimes (for example when during a DROP of a large table) most
+** or all of the pages in a database can get journalled. In those cases,
+** the bitmap becomes dense with high cardinality. The algorithm needs
+** to handle both cases well.
+**
+** The size of the bitmap is fixed when the object is created.
+**
+** All bits are clear when the bitmap is created. Individual bits
+** may be set or cleared one at a time.
+**
+** Test operations are about 100 times more common that set operations.
+** Clear operations are exceedingly rare. There are usually between
+** 5 and 500 set operations per Bitvec object, though the number of sets can
+** sometimes grow into tens of thousands or larger. The size of the
+** Bitvec object is the number of pages in the database file at the
+** start of a transaction, and is thus usually less than a few thousand,
+** but can be as large as 2 billion for a really big database.
+*/
+/* #include "sqliteInt.h" */
+
+/* Size of the Bitvec structure in bytes. */
+#define BITVEC_SZ 512
+
+/* Round the union size down to the nearest pointer boundary, since that's how
+** it will be aligned within the Bitvec struct. */
+#define BITVEC_USIZE \
+ (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*))
+
+/* Type of the array "element" for the bitmap representation.
+** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE.
+** Setting this to the "natural word" size of your CPU may improve
+** performance. */
+#define BITVEC_TELEM u8
+/* Size, in bits, of the bitmap element. */
+#define BITVEC_SZELEM 8
+/* Number of elements in a bitmap array. */
+#define BITVEC_NELEM (BITVEC_USIZE/sizeof(BITVEC_TELEM))
+/* Number of bits in the bitmap array. */
+#define BITVEC_NBIT (BITVEC_NELEM*BITVEC_SZELEM)
+
+/* Number of u32 values in hash table. */
+#define BITVEC_NINT (BITVEC_USIZE/sizeof(u32))
+/* Maximum number of entries in hash table before
+** sub-dividing and re-hashing. */
+#define BITVEC_MXHASH (BITVEC_NINT/2)
+/* Hashing function for the aHash representation.
+** Empirical testing showed that the *37 multiplier
+** (an arbitrary prime)in the hash function provided
+** no fewer collisions than the no-op *1. */
+#define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT)
+
+#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *))
+
+
+/*
+** A bitmap is an instance of the following structure.
+**
+** This bitmap records the existence of zero or more bits
+** with values between 1 and iSize, inclusive.
+**
+** There are three possible representations of the bitmap.
+** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight
+** bitmap. The least significant bit is bit 1.
+**
+** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is
+** a hash table that will hold up to BITVEC_MXHASH distinct values.
+**
+** Otherwise, the value i is redirected into one of BITVEC_NPTR
+** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap
+** handles up to iDivisor separate values of i. apSub[0] holds
+** values between 1 and iDivisor. apSub[1] holds values between
+** iDivisor+1 and 2*iDivisor. apSub[N] holds values between
+** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized
+** to hold deal with values between 1 and iDivisor.
+*/
+struct Bitvec {
+ u32 iSize; /* Maximum bit index. Max iSize is 4,294,967,296. */
+ u32 nSet; /* Number of bits that are set - only valid for aHash
+ ** element. Max is BITVEC_NINT. For BITVEC_SZ of 512,
+ ** this would be 125. */
+ u32 iDivisor; /* Number of bits handled by each apSub[] entry. */
+ /* Should >=0 for apSub element. */
+ /* Max iDivisor is max(u32) / BITVEC_NPTR + 1. */
+ /* For a BITVEC_SZ of 512, this would be 34,359,739. */
+ union {
+ BITVEC_TELEM aBitmap[BITVEC_NELEM]; /* Bitmap representation */
+ u32 aHash[BITVEC_NINT]; /* Hash table representation */
+ Bitvec *apSub[BITVEC_NPTR]; /* Recursive representation */
+ } u;
+};
+
+/*
+** Create a new bitmap object able to handle bits between 0 and iSize,
+** inclusive. Return a pointer to the new object. Return NULL if
+** malloc fails.
+*/
+SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32 iSize){
+ Bitvec *p;
+ assert( sizeof(*p)==BITVEC_SZ );
+ p = sqlite3MallocZero( sizeof(*p) );
+ if( p ){
+ p->iSize = iSize;
+ }
+ return p;
+}
+
+/*
+** Check to see if the i-th bit is set. Return true or false.
+** If p is NULL (if the bitmap has not been created) or if
+** i is out of range, then return false.
+*/
+SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec *p, u32 i){
+ assert( p!=0 );
+ i--;
+ if( i>=p->iSize ) return 0;
+ while( p->iDivisor ){
+ u32 bin = i/p->iDivisor;
+ i = i%p->iDivisor;
+ p = p->u.apSub[bin];
+ if (!p) {
+ return 0;
+ }
+ }
+ if( p->iSize<=BITVEC_NBIT ){
+ return (p->u.aBitmap[i/BITVEC_SZELEM] & (1<<(i&(BITVEC_SZELEM-1))))!=0;
+ } else{
+ u32 h = BITVEC_HASH(i++);
+ while( p->u.aHash[h] ){
+ if( p->u.aHash[h]==i ) return 1;
+ h = (h+1) % BITVEC_NINT;
+ }
+ return 0;
+ }
+}
+SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){
+ return p!=0 && sqlite3BitvecTestNotNull(p,i);
+}
+
+/*
+** Set the i-th bit. Return 0 on success and an error code if
+** anything goes wrong.
+**
+** This routine might cause sub-bitmaps to be allocated. Failing
+** to get the memory needed to hold the sub-bitmap is the only
+** that can go wrong with an insert, assuming p and i are valid.
+**
+** The calling function must ensure that p is a valid Bitvec object
+** and that the value for "i" is within range of the Bitvec object.
+** Otherwise the behavior is undefined.
+*/
+SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){
+ u32 h;
+ if( p==0 ) return SQLITE_OK;
+ assert( i>0 );
+ assert( i<=p->iSize );
+ i--;
+ while((p->iSize > BITVEC_NBIT) && p->iDivisor) {
+ u32 bin = i/p->iDivisor;
+ i = i%p->iDivisor;
+ if( p->u.apSub[bin]==0 ){
+ p->u.apSub[bin] = sqlite3BitvecCreate( p->iDivisor );
+ if( p->u.apSub[bin]==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ p = p->u.apSub[bin];
+ }
+ if( p->iSize<=BITVEC_NBIT ){
+ p->u.aBitmap[i/BITVEC_SZELEM] |= 1 << (i&(BITVEC_SZELEM-1));
+ return SQLITE_OK;
+ }
+ h = BITVEC_HASH(i++);
+ /* if there wasn't a hash collision, and this doesn't */
+ /* completely fill the hash, then just add it without */
+ /* worring about sub-dividing and re-hashing. */
+ if( !p->u.aHash[h] ){
+ if (p->nSet<(BITVEC_NINT-1)) {
+ goto bitvec_set_end;
+ } else {
+ goto bitvec_set_rehash;
+ }
+ }
+ /* there was a collision, check to see if it's already */
+ /* in hash, if not, try to find a spot for it */
+ do {
+ if( p->u.aHash[h]==i ) return SQLITE_OK;
+ h++;
+ if( h>=BITVEC_NINT ) h = 0;
+ } while( p->u.aHash[h] );
+ /* we didn't find it in the hash. h points to the first */
+ /* available free spot. check to see if this is going to */
+ /* make our hash too "full". */
+bitvec_set_rehash:
+ if( p->nSet>=BITVEC_MXHASH ){
+ unsigned int j;
+ int rc;
+ u32 *aiValues = sqlite3StackAllocRaw(0, sizeof(p->u.aHash));
+ if( aiValues==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash));
+ memset(p->u.apSub, 0, sizeof(p->u.apSub));
+ p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR;
+ rc = sqlite3BitvecSet(p, i);
+ for(j=0; j<BITVEC_NINT; j++){
+ if( aiValues[j] ) rc |= sqlite3BitvecSet(p, aiValues[j]);
+ }
+ sqlite3StackFree(0, aiValues);
+ return rc;
+ }
+ }
+bitvec_set_end:
+ p->nSet++;
+ p->u.aHash[h] = i;
+ return SQLITE_OK;
+}
+
+/*
+** Clear the i-th bit.
+**
+** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage
+** that BitvecClear can use to rebuilt its hash table.
+*/
+SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec *p, u32 i, void *pBuf){
+ if( p==0 ) return;
+ assert( i>0 );
+ i--;
+ while( p->iDivisor ){
+ u32 bin = i/p->iDivisor;
+ i = i%p->iDivisor;
+ p = p->u.apSub[bin];
+ if (!p) {
+ return;
+ }
+ }
+ if( p->iSize<=BITVEC_NBIT ){
+ p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1)));
+ }else{
+ unsigned int j;
+ u32 *aiValues = pBuf;
+ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash));
+ memset(p->u.aHash, 0, sizeof(p->u.aHash));
+ p->nSet = 0;
+ for(j=0; j<BITVEC_NINT; j++){
+ if( aiValues[j] && aiValues[j]!=(i+1) ){
+ u32 h = BITVEC_HASH(aiValues[j]-1);
+ p->nSet++;
+ while( p->u.aHash[h] ){
+ h++;
+ if( h>=BITVEC_NINT ) h = 0;
+ }
+ p->u.aHash[h] = aiValues[j];
+ }
+ }
+ }
+}
+
+/*
+** Destroy a bitmap object. Reclaim all memory used.
+*/
+SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec *p){
+ if( p==0 ) return;
+ if( p->iDivisor ){
+ unsigned int i;
+ for(i=0; i<BITVEC_NPTR; i++){
+ sqlite3BitvecDestroy(p->u.apSub[i]);
+ }
+ }
+ sqlite3_free(p);
+}
+
+/*
+** Return the value of the iSize parameter specified when Bitvec *p
+** was created.
+*/
+SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){
+ return p->iSize;
+}
+
+#ifndef SQLITE_UNTESTABLE
+/*
+** Let V[] be an array of unsigned characters sufficient to hold
+** up to N bits. Let I be an integer between 0 and N. 0<=I<N.
+** Then the following macros can be used to set, clear, or test
+** individual bits within V.
+*/
+#define SETBIT(V,I) V[I>>3] |= (1<<(I&7))
+#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7))
+#define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0
+
+/*
+** This routine runs an extensive test of the Bitvec code.
+**
+** The input is an array of integers that acts as a program
+** to test the Bitvec. The integers are opcodes followed
+** by 0, 1, or 3 operands, depending on the opcode. Another
+** opcode follows immediately after the last operand.
+**
+** There are 6 opcodes numbered from 0 through 5. 0 is the
+** "halt" opcode and causes the test to end.
+**
+** 0 Halt and return the number of errors
+** 1 N S X Set N bits beginning with S and incrementing by X
+** 2 N S X Clear N bits beginning with S and incrementing by X
+** 3 N Set N randomly chosen bits
+** 4 N Clear N randomly chosen bits
+** 5 N S X Set N bits from S increment X in array only, not in bitvec
+**
+** The opcodes 1 through 4 perform set and clear operations are performed
+** on both a Bitvec object and on a linear array of bits obtained from malloc.
+** Opcode 5 works on the linear array only, not on the Bitvec.
+** Opcode 5 is used to deliberately induce a fault in order to
+** confirm that error detection works.
+**
+** At the conclusion of the test the linear array is compared
+** against the Bitvec object. If there are any differences,
+** an error is returned. If they are the same, zero is returned.
+**
+** If a memory allocation error occurs, return -1.
+*/
+SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){
+ Bitvec *pBitvec = 0;
+ unsigned char *pV = 0;
+ int rc = -1;
+ int i, nx, pc, op;
+ void *pTmpSpace;
+
+ /* Allocate the Bitvec to be tested and a linear array of
+ ** bits to act as the reference */
+ pBitvec = sqlite3BitvecCreate( sz );
+ pV = sqlite3MallocZero( (sz+7)/8 + 1 );
+ pTmpSpace = sqlite3_malloc64(BITVEC_SZ);
+ if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end;
+
+ /* NULL pBitvec tests */
+ sqlite3BitvecSet(0, 1);
+ sqlite3BitvecClear(0, 1, pTmpSpace);
+
+ /* Run the program */
+ pc = 0;
+ while( (op = aOp[pc])!=0 ){
+ switch( op ){
+ case 1:
+ case 2:
+ case 5: {
+ nx = 4;
+ i = aOp[pc+2] - 1;
+ aOp[pc+2] += aOp[pc+3];
+ break;
+ }
+ case 3:
+ case 4:
+ default: {
+ nx = 2;
+ sqlite3_randomness(sizeof(i), &i);
+ break;
+ }
+ }
+ if( (--aOp[pc+1]) > 0 ) nx = 0;
+ pc += nx;
+ i = (i & 0x7fffffff)%sz;
+ if( (op & 1)!=0 ){
+ SETBIT(pV, (i+1));
+ if( op!=5 ){
+ if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end;
+ }
+ }else{
+ CLEARBIT(pV, (i+1));
+ sqlite3BitvecClear(pBitvec, i+1, pTmpSpace);
+ }
+ }
+
+ /* Test to make sure the linear array exactly matches the
+ ** Bitvec object. Start with the assumption that they do
+ ** match (rc==0). Change rc to non-zero if a discrepancy
+ ** is found.
+ */
+ rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1)
+ + sqlite3BitvecTest(pBitvec, 0)
+ + (sqlite3BitvecSize(pBitvec) - sz);
+ for(i=1; i<=sz; i++){
+ if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){
+ rc = i;
+ break;
+ }
+ }
+
+ /* Free allocated structure */
+bitvec_end:
+ sqlite3_free(pTmpSpace);
+ sqlite3_free(pV);
+ sqlite3BitvecDestroy(pBitvec);
+ return rc;
+}
+#endif /* SQLITE_UNTESTABLE */
+
+/************** End of bitvec.c **********************************************/
+/************** Begin file pcache.c ******************************************/
+/*
+** 2008 August 05
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements that page cache.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** A complete page cache is an instance of this structure. Every
+** entry in the cache holds a single page of the database file. The
+** btree layer only operates on the cached copy of the database pages.
+**
+** A page cache entry is "clean" if it exactly matches what is currently
+** on disk. A page is "dirty" if it has been modified and needs to be
+** persisted to disk.
+**
+** pDirty, pDirtyTail, pSynced:
+** All dirty pages are linked into the doubly linked list using
+** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order
+** such that p was added to the list more recently than p->pDirtyNext.
+** PCache.pDirty points to the first (newest) element in the list and
+** pDirtyTail to the last (oldest).
+**
+** The PCache.pSynced variable is used to optimize searching for a dirty
+** page to eject from the cache mid-transaction. It is better to eject
+** a page that does not require a journal sync than one that does.
+** Therefore, pSynced is maintained so that it *almost* always points
+** to either the oldest page in the pDirty/pDirtyTail list that has a
+** clear PGHDR_NEED_SYNC flag or to a page that is older than this one
+** (so that the right page to eject can be found by following pDirtyPrev
+** pointers).
+*/
+struct PCache {
+ PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
+ PgHdr *pSynced; /* Last synced page in dirty page list */
+ int nRefSum; /* Sum of ref counts over all pages */
+ int szCache; /* Configured cache size */
+ int szSpill; /* Size before spilling occurs */
+ int szPage; /* Size of every page in this cache */
+ int szExtra; /* Size of extra space for each page */
+ u8 bPurgeable; /* True if pages are on backing store */
+ u8 eCreate; /* eCreate value for for xFetch() */
+ int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */
+ void *pStress; /* Argument to xStress */
+ sqlite3_pcache *pCache; /* Pluggable cache module */
+};
+
+/********************************** Test and Debug Logic **********************/
+/*
+** Debug tracing macros. Enable by by changing the "0" to "1" and
+** recompiling.
+**
+** When sqlite3PcacheTrace is 1, single line trace messages are issued.
+** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries
+** is displayed for many operations, resulting in a lot of output.
+*/
+#if defined(SQLITE_DEBUG) && 0
+ int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */
+ int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */
+# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
+ void pcacheDump(PCache *pCache){
+ int N;
+ int i, j;
+ sqlite3_pcache_page *pLower;
+ PgHdr *pPg;
+ unsigned char *a;
+
+ if( sqlite3PcacheTrace<2 ) return;
+ if( pCache->pCache==0 ) return;
+ N = sqlite3PcachePagecount(pCache);
+ if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump;
+ for(i=1; i<=N; i++){
+ pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0);
+ if( pLower==0 ) continue;
+ pPg = (PgHdr*)pLower->pExtra;
+ printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
+ a = (unsigned char *)pLower->pBuf;
+ for(j=0; j<12; j++) printf("%02x", a[j]);
+ printf("\n");
+ if( pPg->pPage==0 ){
+ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0);
+ }
+ }
+ }
+ #else
+# define pcacheTrace(X)
+# define pcacheDump(X)
+#endif
+
+/*
+** Check invariants on a PgHdr entry. Return true if everything is OK.
+** Return false if any invariant is violated.
+**
+** This routine is for use inside of assert() statements only. For
+** example:
+**
+** assert( sqlite3PcachePageSanity(pPg) );
+*/
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
+ PCache *pCache;
+ assert( pPg!=0 );
+ assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */
+ pCache = pPg->pCache;
+ assert( pCache!=0 ); /* Every page has an associated PCache */
+ if( pPg->flags & PGHDR_CLEAN ){
+ assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
+ assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */
+ assert( pCache->pDirtyTail!=pPg );
+ }
+ /* WRITEABLE pages must also be DIRTY */
+ if( pPg->flags & PGHDR_WRITEABLE ){
+ assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */
+ }
+ /* NEED_SYNC can be set independently of WRITEABLE. This can happen,
+ ** for example, when using the sqlite3PagerDontWrite() optimization:
+ ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK.
+ ** (2) Page X moved to freelist, WRITEABLE is cleared
+ ** (3) Page X reused, WRITEABLE is set again
+ ** If NEED_SYNC had been cleared in step 2, then it would not be reset
+ ** in step 3, and page might be written into the database without first
+ ** syncing the rollback journal, which might cause corruption on a power
+ ** loss.
+ **
+ ** Another example is when the database page size is smaller than the
+ ** disk sector size. When any page of a sector is journalled, all pages
+ ** in that sector are marked NEED_SYNC even if they are still CLEAN, just
+ ** in case they are later modified, since all pages in the same sector
+ ** must be journalled and synced before any of those pages can be safely
+ ** written.
+ */
+ return 1;
+}
+#endif /* SQLITE_DEBUG */
+
+
+/********************************** Linked List Management ********************/
+
+/* Allowed values for second argument to pcacheManageDirtyList() */
+#define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */
+#define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */
+#define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */
+
+/*
+** Manage pPage's participation on the dirty list. Bits of the addRemove
+** argument determines what operation to do. The 0x01 bit means first
+** remove pPage from the dirty list. The 0x02 means add pPage back to
+** the dirty list. Doing both moves pPage to the front of the dirty list.
+*/
+static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
+ PCache *p = pPage->pCache;
+
+ pcacheTrace(("%p.DIRTYLIST.%s %d\n", p,
+ addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT",
+ pPage->pgno));
+ if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
+ assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
+ assert( pPage->pDirtyPrev || pPage==p->pDirty );
+
+ /* Update the PCache1.pSynced variable if necessary. */
+ if( p->pSynced==pPage ){
+ p->pSynced = pPage->pDirtyPrev;
+ }
+
+ if( pPage->pDirtyNext ){
+ pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
+ }else{
+ assert( pPage==p->pDirtyTail );
+ p->pDirtyTail = pPage->pDirtyPrev;
+ }
+ if( pPage->pDirtyPrev ){
+ pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
+ }else{
+ /* If there are now no dirty pages in the cache, set eCreate to 2.
+ ** This is an optimization that allows sqlite3PcacheFetch() to skip
+ ** searching for a dirty page to eject from the cache when it might
+ ** otherwise have to. */
+ assert( pPage==p->pDirty );
+ p->pDirty = pPage->pDirtyNext;
+ assert( p->bPurgeable || p->eCreate==2 );
+ if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/
+ assert( p->bPurgeable==0 || p->eCreate==1 );
+ p->eCreate = 2;
+ }
+ }
+ }
+ if( addRemove & PCACHE_DIRTYLIST_ADD ){
+ pPage->pDirtyPrev = 0;
+ pPage->pDirtyNext = p->pDirty;
+ if( pPage->pDirtyNext ){
+ assert( pPage->pDirtyNext->pDirtyPrev==0 );
+ pPage->pDirtyNext->pDirtyPrev = pPage;
+ }else{
+ p->pDirtyTail = pPage;
+ if( p->bPurgeable ){
+ assert( p->eCreate==2 );
+ p->eCreate = 1;
+ }
+ }
+ p->pDirty = pPage;
+
+ /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set
+ ** pSynced to point to it. Checking the NEED_SYNC flag is an
+ ** optimization, as if pSynced points to a page with the NEED_SYNC
+ ** flag set sqlite3PcacheFetchStress() searches through all newer
+ ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */
+ if( !p->pSynced
+ && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/
+ ){
+ p->pSynced = pPage;
+ }
+ }
+ pcacheDump(p);
+}
+
+/*
+** Wrapper around the pluggable caches xUnpin method. If the cache is
+** being used for an in-memory database, this function is a no-op.
+*/
+static void pcacheUnpin(PgHdr *p){
+ if( p->pCache->bPurgeable ){
+ pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno));
+ sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);
+ pcacheDump(p->pCache);
+ }
+}
+
+/*
+** Compute the number of pages of cache requested. p->szCache is the
+** cache size requested by the "PRAGMA cache_size" statement.
+*/
+static int numberOfCachePages(PCache *p){
+ if( p->szCache>=0 ){
+ /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
+ ** suggested cache size is set to N. */
+ return p->szCache;
+ }else{
+ /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the
+ ** number of cache pages is adjusted to be a number of pages that would
+ ** use approximately abs(N*1024) bytes of memory based on the current
+ ** page size. */
+ return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra));
+ }
+}
+
+/*************************************************** General Interfaces ******
+**
+** Initialize and shutdown the page cache subsystem. Neither of these
+** functions are threadsafe.
+*/
+SQLITE_PRIVATE int sqlite3PcacheInitialize(void){
+ if( sqlite3GlobalConfig.pcache2.xInit==0 ){
+ /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the
+ ** built-in default page cache is used instead of the application defined
+ ** page cache. */
+ sqlite3PCacheSetDefault();
+ assert( sqlite3GlobalConfig.pcache2.xInit!=0 );
+ }
+ return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg);
+}
+SQLITE_PRIVATE void sqlite3PcacheShutdown(void){
+ if( sqlite3GlobalConfig.pcache2.xShutdown ){
+ /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */
+ sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg);
+ }
+}
+
+/*
+** Return the size in bytes of a PCache object.
+*/
+SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); }
+
+/*
+** Create a new PCache object. Storage space to hold the object
+** has already been allocated and is passed in as the p pointer.
+** The caller discovers how much space needs to be allocated by
+** calling sqlite3PcacheSize().
+**
+** szExtra is some extra space allocated for each page. The first
+** 8 bytes of the extra space will be zeroed as the page is allocated,
+** but remaining content will be uninitialized. Though it is opaque
+** to this module, the extra space really ends up being the MemPage
+** structure in the pager.
+*/
+SQLITE_PRIVATE int sqlite3PcacheOpen(
+ int szPage, /* Size of every page */
+ int szExtra, /* Extra space associated with each page */
+ int bPurgeable, /* True if pages are on backing store */
+ int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */
+ void *pStress, /* Argument to xStress */
+ PCache *p /* Preallocated space for the PCache */
+){
+ memset(p, 0, sizeof(PCache));
+ p->szPage = 1;
+ p->szExtra = szExtra;
+ assert( szExtra>=8 ); /* First 8 bytes will be zeroed */
+ p->bPurgeable = bPurgeable;
+ p->eCreate = 2;
+ p->xStress = xStress;
+ p->pStress = pStress;
+ p->szCache = 100;
+ p->szSpill = 1;
+ pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable));
+ return sqlite3PcacheSetPageSize(p, szPage);
+}
+
+/*
+** Change the page size for PCache object. The caller must ensure that there
+** are no outstanding page references when this function is called.
+*/
+SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
+ assert( pCache->nRefSum==0 && pCache->pDirty==0 );
+ if( pCache->szPage ){
+ sqlite3_pcache *pNew;
+ pNew = sqlite3GlobalConfig.pcache2.xCreate(
+ szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)),
+ pCache->bPurgeable
+ );
+ if( pNew==0 ) return SQLITE_NOMEM_BKPT;
+ sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
+ if( pCache->pCache ){
+ sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
+ }
+ pCache->pCache = pNew;
+ pCache->szPage = szPage;
+ pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage));
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Try to obtain a page from the cache.
+**
+** This routine returns a pointer to an sqlite3_pcache_page object if
+** such an object is already in cache, or if a new one is created.
+** This routine returns a NULL pointer if the object was not in cache
+** and could not be created.
+**
+** The createFlags should be 0 to check for existing pages and should
+** be 3 (not 1, but 3) to try to create a new page.
+**
+** If the createFlag is 0, then NULL is always returned if the page
+** is not already in the cache. If createFlag is 1, then a new page
+** is created only if that can be done without spilling dirty pages
+** and without exceeding the cache size limit.
+**
+** The caller needs to invoke sqlite3PcacheFetchFinish() to properly
+** initialize the sqlite3_pcache_page object and convert it into a
+** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish()
+** routines are split this way for performance reasons. When separated
+** they can both (usually) operate without having to push values to
+** the stack on entry and pop them back off on exit, which saves a
+** lot of pushing and popping.
+*/
+SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
+ PCache *pCache, /* Obtain the page from this cache */
+ Pgno pgno, /* Page number to obtain */
+ int createFlag /* If true, create page if it does not exist already */
+){
+ int eCreate;
+ sqlite3_pcache_page *pRes;
+
+ assert( pCache!=0 );
+ assert( pCache->pCache!=0 );
+ assert( createFlag==3 || createFlag==0 );
+ assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) );
+
+ /* eCreate defines what to do if the page does not exist.
+ ** 0 Do not allocate a new page. (createFlag==0)
+ ** 1 Allocate a new page if doing so is inexpensive.
+ ** (createFlag==1 AND bPurgeable AND pDirty)
+ ** 2 Allocate a new page even it doing so is difficult.
+ ** (createFlag==1 AND !(bPurgeable AND pDirty)
+ */
+ eCreate = createFlag & pCache->eCreate;
+ assert( eCreate==0 || eCreate==1 || eCreate==2 );
+ assert( createFlag==0 || pCache->eCreate==eCreate );
+ assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
+ pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
+ pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno,
+ createFlag?" create":"",pRes));
+ return pRes;
+}
+
+/*
+** If the sqlite3PcacheFetch() routine is unable to allocate a new
+** page because no clean pages are available for reuse and the cache
+** size limit has been reached, then this routine can be invoked to
+** try harder to allocate a page. This routine might invoke the stress
+** callback to spill dirty pages to the journal. It will then try to
+** allocate the new page and will only fail to allocate a new page on
+** an OOM error.
+**
+** This routine should be invoked only after sqlite3PcacheFetch() fails.
+*/
+SQLITE_PRIVATE int sqlite3PcacheFetchStress(
+ PCache *pCache, /* Obtain the page from this cache */
+ Pgno pgno, /* Page number to obtain */
+ sqlite3_pcache_page **ppPage /* Write result here */
+){
+ PgHdr *pPg;
+ if( pCache->eCreate==2 ) return 0;
+
+ if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){
+ /* Find a dirty page to write-out and recycle. First try to find a
+ ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
+ ** cleared), but if that is not possible settle for any other
+ ** unreferenced dirty page.
+ **
+ ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC
+ ** flag is currently referenced, then the following may leave pSynced
+ ** set incorrectly (pointing to other than the LRU page with NEED_SYNC
+ ** cleared). This is Ok, as pSynced is just an optimization. */
+ for(pPg=pCache->pSynced;
+ pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
+ pPg=pPg->pDirtyPrev
+ );
+ pCache->pSynced = pPg;
+ if( !pPg ){
+ for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
+ }
+ if( pPg ){
+ int rc;
+#ifdef SQLITE_LOG_CACHE_SPILL
+ sqlite3_log(SQLITE_FULL,
+ "spill page %d making room for %d - cache used: %d/%d",
+ pPg->pgno, pgno,
+ sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache),
+ numberOfCachePages(pCache));
+#endif
+ pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno));
+ rc = pCache->xStress(pCache->pStress, pPg);
+ pcacheDump(pCache);
+ if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
+ return rc;
+ }
+ }
+ }
+ *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
+ return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
+}
+
+/*
+** This is a helper routine for sqlite3PcacheFetchFinish()
+**
+** In the uncommon case where the page being fetched has not been
+** initialized, this routine is invoked to do the initialization.
+** This routine is broken out into a separate function since it
+** requires extra stack manipulation that can be avoided in the common
+** case.
+*/
+static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
+ PCache *pCache, /* Obtain the page from this cache */
+ Pgno pgno, /* Page number obtained */
+ sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
+){
+ PgHdr *pPgHdr;
+ assert( pPage!=0 );
+ pPgHdr = (PgHdr*)pPage->pExtra;
+ assert( pPgHdr->pPage==0 );
+ memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty));
+ pPgHdr->pPage = pPage;
+ pPgHdr->pData = pPage->pBuf;
+ pPgHdr->pExtra = (void *)&pPgHdr[1];
+ memset(pPgHdr->pExtra, 0, 8);
+ pPgHdr->pCache = pCache;
+ pPgHdr->pgno = pgno;
+ pPgHdr->flags = PGHDR_CLEAN;
+ return sqlite3PcacheFetchFinish(pCache,pgno,pPage);
+}
+
+/*
+** This routine converts the sqlite3_pcache_page object returned by
+** sqlite3PcacheFetch() into an initialized PgHdr object. This routine
+** must be called after sqlite3PcacheFetch() in order to get a usable
+** result.
+*/
+SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(
+ PCache *pCache, /* Obtain the page from this cache */
+ Pgno pgno, /* Page number obtained */
+ sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
+){
+ PgHdr *pPgHdr;
+
+ assert( pPage!=0 );
+ pPgHdr = (PgHdr *)pPage->pExtra;
+
+ if( !pPgHdr->pPage ){
+ return pcacheFetchFinishWithInit(pCache, pgno, pPage);
+ }
+ pCache->nRefSum++;
+ pPgHdr->nRef++;
+ assert( sqlite3PcachePageSanity(pPgHdr) );
+ return pPgHdr;
+}
+
+/*
+** Decrement the reference count on a page. If the page is clean and the
+** reference count drops to 0, then it is made eligible for recycling.
+*/
+SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
+ assert( p->nRef>0 );
+ p->pCache->nRefSum--;
+ if( (--p->nRef)==0 ){
+ if( p->flags&PGHDR_CLEAN ){
+ pcacheUnpin(p);
+ }else{
+ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ }
+ }
+}
+
+/*
+** Increase the reference count of a supplied page by 1.
+*/
+SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){
+ assert(p->nRef>0);
+ assert( sqlite3PcachePageSanity(p) );
+ p->nRef++;
+ p->pCache->nRefSum++;
+}
+
+/*
+** Drop a page from the cache. There must be exactly one reference to the
+** page. This function deletes that reference, so after it returns the
+** page pointed to by p is invalid.
+*/
+SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
+ assert( p->nRef==1 );
+ assert( sqlite3PcachePageSanity(p) );
+ if( p->flags&PGHDR_DIRTY ){
+ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
+ }
+ p->pCache->nRefSum--;
+ sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
+}
+
+/*
+** Make sure the page is marked as dirty. If it isn't dirty already,
+** make it so.
+*/
+SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
+ assert( p->nRef>0 );
+ assert( sqlite3PcachePageSanity(p) );
+ if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/
+ p->flags &= ~PGHDR_DONT_WRITE;
+ if( p->flags & PGHDR_CLEAN ){
+ p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);
+ pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno));
+ assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
+ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
+ }
+ assert( sqlite3PcachePageSanity(p) );
+ }
+}
+
+/*
+** Make sure the page is marked as clean. If it isn't clean already,
+** make it so.
+*/
+SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){
+ assert( sqlite3PcachePageSanity(p) );
+ assert( (p->flags & PGHDR_DIRTY)!=0 );
+ assert( (p->flags & PGHDR_CLEAN)==0 );
+ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
+ p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
+ p->flags |= PGHDR_CLEAN;
+ pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno));
+ assert( sqlite3PcachePageSanity(p) );
+ if( p->nRef==0 ){
+ pcacheUnpin(p);
+ }
+}
+
+/*
+** Make every page in the cache clean.
+*/
+SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){
+ PgHdr *p;
+ pcacheTrace(("%p.CLEAN-ALL\n",pCache));
+ while( (p = pCache->pDirty)!=0 ){
+ sqlite3PcacheMakeClean(p);
+ }
+}
+
+/*
+** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
+*/
+SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){
+ PgHdr *p;
+ pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache));
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
+ }
+ pCache->pSynced = pCache->pDirtyTail;
+}
+
+/*
+** Clear the PGHDR_NEED_SYNC flag from all dirty pages.
+*/
+SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){
+ PgHdr *p;
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ p->flags &= ~PGHDR_NEED_SYNC;
+ }
+ pCache->pSynced = pCache->pDirtyTail;
+}
+
+/*
+** Change the page number of page p to newPgno.
+*/
+SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
+ PCache *pCache = p->pCache;
+ assert( p->nRef>0 );
+ assert( newPgno>0 );
+ assert( sqlite3PcachePageSanity(p) );
+ pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
+ sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
+ p->pgno = newPgno;
+ if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
+ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ }
+}
+
+/*
+** Drop every cache entry whose page number is greater than "pgno". The
+** caller must ensure that there are no outstanding references to any pages
+** other than page 1 with a page number greater than pgno.
+**
+** If there is a reference to page 1 and the pgno parameter passed to this
+** function is 0, then the data area associated with page 1 is zeroed, but
+** the page object is not dropped.
+*/
+SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
+ if( pCache->pCache ){
+ PgHdr *p;
+ PgHdr *pNext;
+ pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno));
+ for(p=pCache->pDirty; p; p=pNext){
+ pNext = p->pDirtyNext;
+ /* This routine never gets call with a positive pgno except right
+ ** after sqlite3PcacheCleanAll(). So if there are dirty pages,
+ ** it must be that pgno==0.
+ */
+ assert( p->pgno>0 );
+ if( p->pgno>pgno ){
+ assert( p->flags&PGHDR_DIRTY );
+ sqlite3PcacheMakeClean(p);
+ }
+ }
+ if( pgno==0 && pCache->nRefSum ){
+ sqlite3_pcache_page *pPage1;
+ pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0);
+ if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because
+ ** pCache->nRefSum>0 */
+ memset(pPage1->pBuf, 0, pCache->szPage);
+ pgno = 1;
+ }
+ }
+ sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1);
+ }
+}
+
+/*
+** Close a cache.
+*/
+SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){
+ assert( pCache->pCache!=0 );
+ pcacheTrace(("%p.CLOSE\n",pCache));
+ sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
+}
+
+/*
+** Discard the contents of the cache.
+*/
+SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){
+ sqlite3PcacheTruncate(pCache, 0);
+}
+
+/*
+** Merge two lists of pages connected by pDirty and in pgno order.
+** Do not bother fixing the pDirtyPrev pointers.
+*/
+static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
+ PgHdr result, *pTail;
+ pTail = &result;
+ assert( pA!=0 && pB!=0 );
+ for(;;){
+ if( pA->pgno<pB->pgno ){
+ pTail->pDirty = pA;
+ pTail = pA;
+ pA = pA->pDirty;
+ if( pA==0 ){
+ pTail->pDirty = pB;
+ break;
+ }
+ }else{
+ pTail->pDirty = pB;
+ pTail = pB;
+ pB = pB->pDirty;
+ if( pB==0 ){
+ pTail->pDirty = pA;
+ break;
+ }
+ }
+ }
+ return result.pDirty;
+}
+
+/*
+** Sort the list of pages in accending order by pgno. Pages are
+** connected by pDirty pointers. The pDirtyPrev pointers are
+** corrupted by this sort.
+**
+** Since there cannot be more than 2^31 distinct pages in a database,
+** there cannot be more than 31 buckets required by the merge sorter.
+** One extra bucket is added to catch overflow in case something
+** ever changes to make the previous sentence incorrect.
+*/
+#define N_SORT_BUCKET 32
+static PgHdr *pcacheSortDirtyList(PgHdr *pIn){
+ PgHdr *a[N_SORT_BUCKET], *p;
+ int i;
+ memset(a, 0, sizeof(a));
+ while( pIn ){
+ p = pIn;
+ pIn = p->pDirty;
+ p->pDirty = 0;
+ for(i=0; ALWAYS(i<N_SORT_BUCKET-1); i++){
+ if( a[i]==0 ){
+ a[i] = p;
+ break;
+ }else{
+ p = pcacheMergeDirtyList(a[i], p);
+ a[i] = 0;
+ }
+ }
+ if( NEVER(i==N_SORT_BUCKET-1) ){
+ /* To get here, there need to be 2^(N_SORT_BUCKET) elements in
+ ** the input list. But that is impossible.
+ */
+ a[i] = pcacheMergeDirtyList(a[i], p);
+ }
+ }
+ p = a[0];
+ for(i=1; i<N_SORT_BUCKET; i++){
+ if( a[i]==0 ) continue;
+ p = p ? pcacheMergeDirtyList(p, a[i]) : a[i];
+ }
+ return p;
+}
+
+/*
+** Return a list of all dirty pages in the cache, sorted by page number.
+*/
+SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
+ PgHdr *p;
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ p->pDirty = p->pDirtyNext;
+ }
+ return pcacheSortDirtyList(pCache->pDirty);
+}
+
+/*
+** Return the total number of references to all pages held by the cache.
+**
+** This is not the total number of pages referenced, but the sum of the
+** reference count for all pages.
+*/
+SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){
+ return pCache->nRefSum;
+}
+
+/*
+** Return the number of references to the page supplied as an argument.
+*/
+SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){
+ return p->nRef;
+}
+
+/*
+** Return the total number of pages in the cache.
+*/
+SQLITE_PRIVATE int sqlite3PcachePagecount(PCache *pCache){
+ assert( pCache->pCache!=0 );
+ return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache);
+}
+
+#ifdef SQLITE_TEST
+/*
+** Get the suggested cache-size value.
+*/
+SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *pCache){
+ return numberOfCachePages(pCache);
+}
+#endif
+
+/*
+** Set the suggested cache-size value.
+*/
+SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){
+ assert( pCache->pCache!=0 );
+ pCache->szCache = mxPage;
+ sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache,
+ numberOfCachePages(pCache));
+}
+
+/*
+** Set the suggested cache-spill value. Make no changes if if the
+** argument is zero. Return the effective cache-spill size, which will
+** be the larger of the szSpill and szCache.
+*/
+SQLITE_PRIVATE int sqlite3PcacheSetSpillsize(PCache *p, int mxPage){
+ int res;
+ assert( p->pCache!=0 );
+ if( mxPage ){
+ if( mxPage<0 ){
+ mxPage = (int)((-1024*(i64)mxPage)/(p->szPage+p->szExtra));
+ }
+ p->szSpill = mxPage;
+ }
+ res = numberOfCachePages(p);
+ if( res<p->szSpill ) res = p->szSpill;
+ return res;
+}
+
+/*
+** Free up as much memory as possible from the page cache.
+*/
+SQLITE_PRIVATE void sqlite3PcacheShrink(PCache *pCache){
+ assert( pCache->pCache!=0 );
+ sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache);
+}
+
+/*
+** Return the size of the header added by this middleware layer
+** in the page-cache hierarchy.
+*/
+SQLITE_PRIVATE int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); }
+
+/*
+** Return the number of dirty pages currently in the cache, as a percentage
+** of the configured cache size.
+*/
+SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache *pCache){
+ PgHdr *pDirty;
+ int nDirty = 0;
+ int nCache = numberOfCachePages(pCache);
+ for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++;
+ return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0;
+}
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+/*
+** Return true if there are one or more dirty pages in the cache. Else false.
+*/
+SQLITE_PRIVATE int sqlite3PCacheIsDirty(PCache *pCache){
+ return (pCache->pDirty!=0);
+}
+#endif
+
+#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
+/*
+** For all dirty pages currently in the cache, invoke the specified
+** callback. This is only used if the SQLITE_CHECK_PAGES macro is
+** defined.
+*/
+SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){
+ PgHdr *pDirty;
+ for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){
+ xIter(pDirty);
+ }
+}
+#endif
+
+/************** End of pcache.c **********************************************/
+/************** Begin file pcache1.c *****************************************/
+/*
+** 2008 November 05
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file implements the default page cache implementation (the
+** sqlite3_pcache interface). It also contains part of the implementation
+** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features.
+** If the default page cache implementation is overridden, then neither of
+** these two features are available.
+**
+** A Page cache line looks like this:
+**
+** -------------------------------------------------------------
+** | database page content | PgHdr1 | MemPage | PgHdr |
+** -------------------------------------------------------------
+**
+** The database page content is up front (so that buffer overreads tend to
+** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage
+** is the extension added by the btree.c module containing information such
+** as the database page number and how that database page is used. PgHdr
+** is added by the pcache.c layer and contains information used to keep track
+** of which pages are "dirty". PgHdr1 is an extension added by this
+** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page.
+** PgHdr1 contains information needed to look up a page by its page number.
+** The superclass sqlite3_pcache_page.pBuf points to the start of the
+** database page content and sqlite3_pcache_page.pExtra points to PgHdr.
+**
+** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at
+** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The
+** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this
+** size can vary according to architecture, compile-time options, and
+** SQLite library version number.
+**
+** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained
+** using a separate memory allocation from the database page content. This
+** seeks to overcome the "clownshoe" problem (also called "internal
+** fragmentation" in academic literature) of allocating a few bytes more
+** than a power of two with the memory allocator rounding up to the next
+** power of two, and leaving the rounded-up space unused.
+**
+** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates
+** with this module. Information is passed back and forth as PgHdr1 pointers.
+**
+** The pcache.c and pager.c modules deal pointers to PgHdr objects.
+** The btree.c module deals with pointers to MemPage objects.
+**
+** SOURCE OF PAGE CACHE MEMORY:
+**
+** Memory for a page might come from any of three sources:
+**
+** (1) The general-purpose memory allocator - sqlite3Malloc()
+** (2) Global page-cache memory provided using sqlite3_config() with
+** SQLITE_CONFIG_PAGECACHE.
+** (3) PCache-local bulk allocation.
+**
+** The third case is a chunk of heap memory (defaulting to 100 pages worth)
+** that is allocated when the page cache is created. The size of the local
+** bulk allocation can be adjusted using
+**
+** sqlite3_config(SQLITE_CONFIG_PAGECACHE, (void*)0, 0, N).
+**
+** If N is positive, then N pages worth of memory are allocated using a single
+** sqlite3Malloc() call and that memory is used for the first N pages allocated.
+** Or if N is negative, then -1024*N bytes of memory are allocated and used
+** for as many pages as can be accomodated.
+**
+** Only one of (2) or (3) can be used. Once the memory available to (2) or
+** (3) is exhausted, subsequent allocations fail over to the general-purpose
+** memory allocator (1).
+**
+** Earlier versions of SQLite used only methods (1) and (2). But experiments
+** show that method (3) with N==100 provides about a 5% performance boost for
+** common workloads.
+*/
+/* #include "sqliteInt.h" */
+
+typedef struct PCache1 PCache1;
+typedef struct PgHdr1 PgHdr1;
+typedef struct PgFreeslot PgFreeslot;
+typedef struct PGroup PGroup;
+
+/*
+** Each cache entry is represented by an instance of the following
+** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of
+** PgHdr1.pCache->szPage bytes is allocated directly before this structure
+** in memory.
+**
+** Note: Variables isBulkLocal and isAnchor were once type "u8". That works,
+** but causes a 2-byte gap in the structure for most architectures (since
+** pointers must be either 4 or 8-byte aligned). As this structure is located
+** in memory directly after the associated page data, if the database is
+** corrupt, code at the b-tree layer may overread the page buffer and
+** read part of this structure before the corruption is detected. This
+** can cause a valgrind error if the unitialized gap is accessed. Using u16
+** ensures there is no such gap, and therefore no bytes of unitialized memory
+** in the structure.
+*/
+struct PgHdr1 {
+ sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */
+ unsigned int iKey; /* Key value (page number) */
+ u16 isBulkLocal; /* This page from bulk local storage */
+ u16 isAnchor; /* This is the PGroup.lru element */
+ PgHdr1 *pNext; /* Next in hash table chain */
+ PCache1 *pCache; /* Cache that currently owns this page */
+ PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */
+ PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */
+ /* NB: pLruPrev is only valid if pLruNext!=0 */
+};
+
+/*
+** A page is pinned if it is not on the LRU list. To be "pinned" means
+** that the page is in active use and must not be deallocated.
+*/
+#define PAGE_IS_PINNED(p) ((p)->pLruNext==0)
+#define PAGE_IS_UNPINNED(p) ((p)->pLruNext!=0)
+
+/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set
+** of one or more PCaches that are able to recycle each other's unpinned
+** pages when they are under memory pressure. A PGroup is an instance of
+** the following object.
+**
+** This page cache implementation works in one of two modes:
+**
+** (1) Every PCache is the sole member of its own PGroup. There is
+** one PGroup per PCache.
+**
+** (2) There is a single global PGroup that all PCaches are a member
+** of.
+**
+** Mode 1 uses more memory (since PCache instances are not able to rob
+** unused pages from other PCaches) but it also operates without a mutex,
+** and is therefore often faster. Mode 2 requires a mutex in order to be
+** threadsafe, but recycles pages more efficiently.
+**
+** For mode (1), PGroup.mutex is NULL. For mode (2) there is only a single
+** PGroup which is the pcache1.grp global variable and its mutex is
+** SQLITE_MUTEX_STATIC_LRU.
+*/
+struct PGroup {
+ sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */
+ unsigned int nMaxPage; /* Sum of nMax for purgeable caches */
+ unsigned int nMinPage; /* Sum of nMin for purgeable caches */
+ unsigned int mxPinned; /* nMaxpage + 10 - nMinPage */
+ unsigned int nPurgeable; /* Number of purgeable pages allocated */
+ PgHdr1 lru; /* The beginning and end of the LRU list */
+};
+
+/* Each page cache is an instance of the following object. Every
+** open database file (including each in-memory database and each
+** temporary or transient database) has a single page cache which
+** is an instance of this object.
+**
+** Pointers to structures of this type are cast and returned as
+** opaque sqlite3_pcache* handles.
+*/
+struct PCache1 {
+ /* Cache configuration parameters. Page size (szPage) and the purgeable
+ ** flag (bPurgeable) and the pnPurgeable pointer are all set when the
+ ** cache is created and are never changed thereafter. nMax may be
+ ** modified at any time by a call to the pcache1Cachesize() method.
+ ** The PGroup mutex must be held when accessing nMax.
+ */
+ PGroup *pGroup; /* PGroup this cache belongs to */
+ unsigned int *pnPurgeable; /* Pointer to pGroup->nPurgeable */
+ int szPage; /* Size of database content section */
+ int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */
+ int szAlloc; /* Total size of one pcache line */
+ int bPurgeable; /* True if cache is purgeable */
+ unsigned int nMin; /* Minimum number of pages reserved */
+ unsigned int nMax; /* Configured "cache_size" value */
+ unsigned int n90pct; /* nMax*9/10 */
+ unsigned int iMaxKey; /* Largest key seen since xTruncate() */
+ unsigned int nPurgeableDummy; /* pnPurgeable points here when not used*/
+
+ /* Hash table of all pages. The following variables may only be accessed
+ ** when the accessor is holding the PGroup mutex.
+ */
+ unsigned int nRecyclable; /* Number of pages in the LRU list */
+ unsigned int nPage; /* Total number of pages in apHash */
+ unsigned int nHash; /* Number of slots in apHash[] */
+ PgHdr1 **apHash; /* Hash table for fast lookup by key */
+ PgHdr1 *pFree; /* List of unused pcache-local pages */
+ void *pBulk; /* Bulk memory used by pcache-local */
+};
+
+/*
+** Free slots in the allocator used to divide up the global page cache
+** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism.
+*/
+struct PgFreeslot {
+ PgFreeslot *pNext; /* Next free slot */
+};
+
+/*
+** Global data used by this cache.
+*/
+static SQLITE_WSD struct PCacheGlobal {
+ PGroup grp; /* The global PGroup for mode (2) */
+
+ /* Variables related to SQLITE_CONFIG_PAGECACHE settings. The
+ ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all
+ ** fixed at sqlite3_initialize() time and do not require mutex protection.
+ ** The nFreeSlot and pFree values do require mutex protection.
+ */
+ int isInit; /* True if initialized */
+ int separateCache; /* Use a new PGroup for each PCache */
+ int nInitPage; /* Initial bulk allocation size */
+ int szSlot; /* Size of each free slot */
+ int nSlot; /* The number of pcache slots */
+ int nReserve; /* Try to keep nFreeSlot above this */
+ void *pStart, *pEnd; /* Bounds of global page cache memory */
+ /* Above requires no mutex. Use mutex below for variable that follow. */
+ sqlite3_mutex *mutex; /* Mutex for accessing the following: */
+ PgFreeslot *pFree; /* Free page blocks */
+ int nFreeSlot; /* Number of unused pcache slots */
+ /* The following value requires a mutex to change. We skip the mutex on
+ ** reading because (1) most platforms read a 32-bit integer atomically and
+ ** (2) even if an incorrect value is read, no great harm is done since this
+ ** is really just an optimization. */
+ int bUnderPressure; /* True if low on PAGECACHE memory */
+} pcache1_g;
+
+/*
+** All code in this file should access the global structure above via the
+** alias "pcache1". This ensures that the WSD emulation is used when
+** compiling for systems that do not support real WSD.
+*/
+#define pcache1 (GLOBAL(struct PCacheGlobal, pcache1_g))
+
+/*
+** Macros to enter and leave the PCache LRU mutex.
+*/
+#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0
+# define pcache1EnterMutex(X) assert((X)->mutex==0)
+# define pcache1LeaveMutex(X) assert((X)->mutex==0)
+# define PCACHE1_MIGHT_USE_GROUP_MUTEX 0
+#else
+# define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex)
+# define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex)
+# define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
+#endif
+
+/******************************************************************************/
+/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
+
+
+/*
+** This function is called during initialization if a static buffer is
+** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE
+** verb to sqlite3_config(). Parameter pBuf points to an allocation large
+** enough to contain 'n' buffers of 'sz' bytes each.
+**
+** This routine is called from sqlite3_initialize() and so it is guaranteed
+** to be serialized already. There is no need for further mutexing.
+*/
+SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){
+ if( pcache1.isInit ){
+ PgFreeslot *p;
+ if( pBuf==0 ) sz = n = 0;
+ if( n==0 ) sz = 0;
+ sz = ROUNDDOWN8(sz);
+ pcache1.szSlot = sz;
+ pcache1.nSlot = pcache1.nFreeSlot = n;
+ pcache1.nReserve = n>90 ? 10 : (n/10 + 1);
+ pcache1.pStart = pBuf;
+ pcache1.pFree = 0;
+ pcache1.bUnderPressure = 0;
+ while( n-- ){
+ p = (PgFreeslot*)pBuf;
+ p->pNext = pcache1.pFree;
+ pcache1.pFree = p;
+ pBuf = (void*)&((char*)pBuf)[sz];
+ }
+ pcache1.pEnd = pBuf;
+ }
+}
+
+/*
+** Try to initialize the pCache->pFree and pCache->pBulk fields. Return
+** true if pCache->pFree ends up containing one or more free pages.
+*/
+static int pcache1InitBulk(PCache1 *pCache){
+ i64 szBulk;
+ char *zBulk;
+ if( pcache1.nInitPage==0 ) return 0;
+ /* Do not bother with a bulk allocation if the cache size very small */
+ if( pCache->nMax<3 ) return 0;
+ sqlite3BeginBenignMalloc();
+ if( pcache1.nInitPage>0 ){
+ szBulk = pCache->szAlloc * (i64)pcache1.nInitPage;
+ }else{
+ szBulk = -1024 * (i64)pcache1.nInitPage;
+ }
+ if( szBulk > pCache->szAlloc*(i64)pCache->nMax ){
+ szBulk = pCache->szAlloc*(i64)pCache->nMax;
+ }
+ zBulk = pCache->pBulk = sqlite3Malloc( szBulk );
+ sqlite3EndBenignMalloc();
+ if( zBulk ){
+ int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc;
+ do{
+ PgHdr1 *pX = (PgHdr1*)&zBulk[pCache->szPage];
+ pX->page.pBuf = zBulk;
+ pX->page.pExtra = &pX[1];
+ pX->isBulkLocal = 1;
+ pX->isAnchor = 0;
+ pX->pNext = pCache->pFree;
+ pX->pLruPrev = 0; /* Initializing this saves a valgrind error */
+ pCache->pFree = pX;
+ zBulk += pCache->szAlloc;
+ }while( --nBulk );
+ }
+ return pCache->pFree!=0;
+}
+
+/*
+** Malloc function used within this file to allocate space from the buffer
+** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no
+** such buffer exists or there is no space left in it, this function falls
+** back to sqlite3Malloc().
+**
+** Multiple threads can run this routine at the same time. Global variables
+** in pcache1 need to be protected via mutex.
+*/
+static void *pcache1Alloc(int nByte){
+ void *p = 0;
+ assert( sqlite3_mutex_notheld(pcache1.grp.mutex) );
+ if( nByte<=pcache1.szSlot ){
+ sqlite3_mutex_enter(pcache1.mutex);
+ p = (PgHdr1 *)pcache1.pFree;
+ if( p ){
+ pcache1.pFree = pcache1.pFree->pNext;
+ pcache1.nFreeSlot--;
+ pcache1.bUnderPressure = pcache1.nFreeSlot<pcache1.nReserve;
+ assert( pcache1.nFreeSlot>=0 );
+ sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte);
+ sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1);
+ }
+ sqlite3_mutex_leave(pcache1.mutex);
+ }
+ if( p==0 ){
+ /* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get
+ ** it from sqlite3Malloc instead.
+ */
+ p = sqlite3Malloc(nByte);
+#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
+ if( p ){
+ int sz = sqlite3MallocSize(p);
+ sqlite3_mutex_enter(pcache1.mutex);
+ sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte);
+ sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz);
+ sqlite3_mutex_leave(pcache1.mutex);
+ }
+#endif
+ sqlite3MemdebugSetType(p, MEMTYPE_PCACHE);
+ }
+ return p;
+}
+
+/*
+** Free an allocated buffer obtained from pcache1Alloc().
+*/
+static void pcache1Free(void *p){
+ if( p==0 ) return;
+ if( SQLITE_WITHIN(p, pcache1.pStart, pcache1.pEnd) ){
+ PgFreeslot *pSlot;
+ sqlite3_mutex_enter(pcache1.mutex);
+ sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_USED, 1);
+ pSlot = (PgFreeslot*)p;
+ pSlot->pNext = pcache1.pFree;
+ pcache1.pFree = pSlot;
+ pcache1.nFreeSlot++;
+ pcache1.bUnderPressure = pcache1.nFreeSlot<pcache1.nReserve;
+ assert( pcache1.nFreeSlot<=pcache1.nSlot );
+ sqlite3_mutex_leave(pcache1.mutex);
+ }else{
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) );
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
+ {
+ int nFreed = 0;
+ nFreed = sqlite3MallocSize(p);
+ sqlite3_mutex_enter(pcache1.mutex);
+ sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_OVERFLOW, nFreed);
+ sqlite3_mutex_leave(pcache1.mutex);
+ }
+#endif
+ sqlite3_free(p);
+ }
+}
+
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+/*
+** Return the size of a pcache allocation
+*/
+static int pcache1MemSize(void *p){
+ if( p>=pcache1.pStart && p<pcache1.pEnd ){
+ return pcache1.szSlot;
+ }else{
+ int iSize;
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) );
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+ iSize = sqlite3MallocSize(p);
+ sqlite3MemdebugSetType(p, MEMTYPE_PCACHE);
+ return iSize;
+ }
+}
+#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */
+
+/*
+** Allocate a new page object initially associated with cache pCache.
+*/
+static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){
+ PgHdr1 *p = 0;
+ void *pPg;
+
+ assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
+ if( pCache->pFree || (pCache->nPage==0 && pcache1InitBulk(pCache)) ){
+ assert( pCache->pFree!=0 );
+ p = pCache->pFree;
+ pCache->pFree = p->pNext;
+ p->pNext = 0;
+ }else{
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+ /* The group mutex must be released before pcache1Alloc() is called. This
+ ** is because it might call sqlite3_release_memory(), which assumes that
+ ** this mutex is not held. */
+ assert( pcache1.separateCache==0 );
+ assert( pCache->pGroup==&pcache1.grp );
+ pcache1LeaveMutex(pCache->pGroup);
+#endif
+ if( benignMalloc ){ sqlite3BeginBenignMalloc(); }
+#ifdef SQLITE_PCACHE_SEPARATE_HEADER
+ pPg = pcache1Alloc(pCache->szPage);
+ p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra);
+ if( !pPg || !p ){
+ pcache1Free(pPg);
+ sqlite3_free(p);
+ pPg = 0;
+ }
+#else
+ pPg = pcache1Alloc(pCache->szAlloc);
+#endif
+ if( benignMalloc ){ sqlite3EndBenignMalloc(); }
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+ pcache1EnterMutex(pCache->pGroup);
+#endif
+ if( pPg==0 ) return 0;
+#ifndef SQLITE_PCACHE_SEPARATE_HEADER
+ p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage];
+#endif
+ p->page.pBuf = pPg;
+ p->page.pExtra = &p[1];
+ p->isBulkLocal = 0;
+ p->isAnchor = 0;
+ }
+ (*pCache->pnPurgeable)++;
+ return p;
+}
+
+/*
+** Free a page object allocated by pcache1AllocPage().
+*/
+static void pcache1FreePage(PgHdr1 *p){
+ PCache1 *pCache;
+ assert( p!=0 );
+ pCache = p->pCache;
+ assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) );
+ if( p->isBulkLocal ){
+ p->pNext = pCache->pFree;
+ pCache->pFree = p;
+ }else{
+ pcache1Free(p->page.pBuf);
+#ifdef SQLITE_PCACHE_SEPARATE_HEADER
+ sqlite3_free(p);
+#endif
+ }
+ (*pCache->pnPurgeable)--;
+}
+
+/*
+** Malloc function used by SQLite to obtain space from the buffer configured
+** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer
+** exists, this function falls back to sqlite3Malloc().
+*/
+SQLITE_PRIVATE void *sqlite3PageMalloc(int sz){
+ assert( sz<=65536+8 ); /* These allocations are never very large */
+ return pcache1Alloc(sz);
+}
+
+/*
+** Free an allocated buffer obtained from sqlite3PageMalloc().
+*/
+SQLITE_PRIVATE void sqlite3PageFree(void *p){
+ pcache1Free(p);
+}
+
+
+/*
+** Return true if it desirable to avoid allocating a new page cache
+** entry.
+**
+** If memory was allocated specifically to the page cache using
+** SQLITE_CONFIG_PAGECACHE but that memory has all been used, then
+** it is desirable to avoid allocating a new page cache entry because
+** presumably SQLITE_CONFIG_PAGECACHE was suppose to be sufficient
+** for all page cache needs and we should not need to spill the
+** allocation onto the heap.
+**
+** Or, the heap is used for all page cache memory but the heap is
+** under memory pressure, then again it is desirable to avoid
+** allocating a new page cache entry in order to avoid stressing
+** the heap even further.
+*/
+static int pcache1UnderMemoryPressure(PCache1 *pCache){
+ if( pcache1.nSlot && (pCache->szPage+pCache->szExtra)<=pcache1.szSlot ){
+ return pcache1.bUnderPressure;
+ }else{
+ return sqlite3HeapNearlyFull();
+ }
+}
+
+/******************************************************************************/
+/******** General Implementation Functions ************************************/
+
+/*
+** This function is used to resize the hash table used by the cache passed
+** as the first argument.
+**
+** The PCache mutex must be held when this function is called.
+*/
+static void pcache1ResizeHash(PCache1 *p){
+ PgHdr1 **apNew;
+ unsigned int nNew;
+ unsigned int i;
+
+ assert( sqlite3_mutex_held(p->pGroup->mutex) );
+
+ nNew = p->nHash*2;
+ if( nNew<256 ){
+ nNew = 256;
+ }
+
+ pcache1LeaveMutex(p->pGroup);
+ if( p->nHash ){ sqlite3BeginBenignMalloc(); }
+ apNew = (PgHdr1 **)sqlite3MallocZero(sizeof(PgHdr1 *)*nNew);
+ if( p->nHash ){ sqlite3EndBenignMalloc(); }
+ pcache1EnterMutex(p->pGroup);
+ if( apNew ){
+ for(i=0; i<p->nHash; i++){
+ PgHdr1 *pPage;
+ PgHdr1 *pNext = p->apHash[i];
+ while( (pPage = pNext)!=0 ){
+ unsigned int h = pPage->iKey % nNew;
+ pNext = pPage->pNext;
+ pPage->pNext = apNew[h];
+ apNew[h] = pPage;
+ }
+ }
+ sqlite3_free(p->apHash);
+ p->apHash = apNew;
+ p->nHash = nNew;
+ }
+}
+
+/*
+** This function is used internally to remove the page pPage from the
+** PGroup LRU list, if is part of it. If pPage is not part of the PGroup
+** LRU list, then this function is a no-op.
+**
+** The PGroup mutex must be held when this function is called.
+*/
+static PgHdr1 *pcache1PinPage(PgHdr1 *pPage){
+ assert( pPage!=0 );
+ assert( PAGE_IS_UNPINNED(pPage) );
+ assert( pPage->pLruNext );
+ assert( pPage->pLruPrev );
+ assert( sqlite3_mutex_held(pPage->pCache->pGroup->mutex) );
+ pPage->pLruPrev->pLruNext = pPage->pLruNext;
+ pPage->pLruNext->pLruPrev = pPage->pLruPrev;
+ pPage->pLruNext = 0;
+ /* pPage->pLruPrev = 0;
+ ** No need to clear pLruPrev as it is never accessed if pLruNext is 0 */
+ assert( pPage->isAnchor==0 );
+ assert( pPage->pCache->pGroup->lru.isAnchor==1 );
+ pPage->pCache->nRecyclable--;
+ return pPage;
+}
+
+
+/*
+** Remove the page supplied as an argument from the hash table
+** (PCache1.apHash structure) that it is currently stored in.
+** Also free the page if freePage is true.
+**
+** The PGroup mutex must be held when this function is called.
+*/
+static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag){
+ unsigned int h;
+ PCache1 *pCache = pPage->pCache;
+ PgHdr1 **pp;
+
+ assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
+ h = pPage->iKey % pCache->nHash;
+ for(pp=&pCache->apHash[h]; (*pp)!=pPage; pp=&(*pp)->pNext);
+ *pp = (*pp)->pNext;
+
+ pCache->nPage--;
+ if( freeFlag ) pcache1FreePage(pPage);
+}
+
+/*
+** If there are currently more than nMaxPage pages allocated, try
+** to recycle pages to reduce the number allocated to nMaxPage.
+*/
+static void pcache1EnforceMaxPage(PCache1 *pCache){
+ PGroup *pGroup = pCache->pGroup;
+ PgHdr1 *p;
+ assert( sqlite3_mutex_held(pGroup->mutex) );
+ while( pGroup->nPurgeable>pGroup->nMaxPage
+ && (p=pGroup->lru.pLruPrev)->isAnchor==0
+ ){
+ assert( p->pCache->pGroup==pGroup );
+ assert( PAGE_IS_UNPINNED(p) );
+ pcache1PinPage(p);
+ pcache1RemoveFromHash(p, 1);
+ }
+ if( pCache->nPage==0 && pCache->pBulk ){
+ sqlite3_free(pCache->pBulk);
+ pCache->pBulk = pCache->pFree = 0;
+ }
+}
+
+/*
+** Discard all pages from cache pCache with a page number (key value)
+** greater than or equal to iLimit. Any pinned pages that meet this
+** criteria are unpinned before they are discarded.
+**
+** The PCache mutex must be held when this function is called.
+*/
+static void pcache1TruncateUnsafe(
+ PCache1 *pCache, /* The cache to truncate */
+ unsigned int iLimit /* Drop pages with this pgno or larger */
+){
+ TESTONLY( int nPage = 0; ) /* To assert pCache->nPage is correct */
+ unsigned int h, iStop;
+ assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
+ assert( pCache->iMaxKey >= iLimit );
+ assert( pCache->nHash > 0 );
+ if( pCache->iMaxKey - iLimit < pCache->nHash ){
+ /* If we are just shaving the last few pages off the end of the
+ ** cache, then there is no point in scanning the entire hash table.
+ ** Only scan those hash slots that might contain pages that need to
+ ** be removed. */
+ h = iLimit % pCache->nHash;
+ iStop = pCache->iMaxKey % pCache->nHash;
+ TESTONLY( nPage = -10; ) /* Disable the pCache->nPage validity check */
+ }else{
+ /* This is the general case where many pages are being removed.
+ ** It is necessary to scan the entire hash table */
+ h = pCache->nHash/2;
+ iStop = h - 1;
+ }
+ for(;;){
+ PgHdr1 **pp;
+ PgHdr1 *pPage;
+ assert( h<pCache->nHash );
+ pp = &pCache->apHash[h];
+ while( (pPage = *pp)!=0 ){
+ if( pPage->iKey>=iLimit ){
+ pCache->nPage--;
+ *pp = pPage->pNext;
+ if( PAGE_IS_UNPINNED(pPage) ) pcache1PinPage(pPage);
+ pcache1FreePage(pPage);
+ }else{
+ pp = &pPage->pNext;
+ TESTONLY( if( nPage>=0 ) nPage++; )
+ }
+ }
+ if( h==iStop ) break;
+ h = (h+1) % pCache->nHash;
+ }
+ assert( nPage<0 || pCache->nPage==(unsigned)nPage );
+}
+
+/******************************************************************************/
+/******** sqlite3_pcache Methods **********************************************/
+
+/*
+** Implementation of the sqlite3_pcache.xInit method.
+*/
+static int pcache1Init(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ assert( pcache1.isInit==0 );
+ memset(&pcache1, 0, sizeof(pcache1));
+
+
+ /*
+ ** The pcache1.separateCache variable is true if each PCache has its own
+ ** private PGroup (mode-1). pcache1.separateCache is false if the single
+ ** PGroup in pcache1.grp is used for all page caches (mode-2).
+ **
+ ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
+ **
+ ** * Use a unified cache in single-threaded applications that have
+ ** configured a start-time buffer for use as page-cache memory using
+ ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL
+ ** pBuf argument.
+ **
+ ** * Otherwise use separate caches (mode-1)
+ */
+#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT)
+ pcache1.separateCache = 0;
+#elif SQLITE_THREADSAFE
+ pcache1.separateCache = sqlite3GlobalConfig.pPage==0
+ || sqlite3GlobalConfig.bCoreMutex>0;
+#else
+ pcache1.separateCache = sqlite3GlobalConfig.pPage==0;
+#endif
+
+#if SQLITE_THREADSAFE
+ if( sqlite3GlobalConfig.bCoreMutex ){
+ pcache1.grp.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_LRU);
+ pcache1.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PMEM);
+ }
+#endif
+ if( pcache1.separateCache
+ && sqlite3GlobalConfig.nPage!=0
+ && sqlite3GlobalConfig.pPage==0
+ ){
+ pcache1.nInitPage = sqlite3GlobalConfig.nPage;
+ }else{
+ pcache1.nInitPage = 0;
+ }
+ pcache1.grp.mxPinned = 10;
+ pcache1.isInit = 1;
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of the sqlite3_pcache.xShutdown method.
+** Note that the static mutex allocated in xInit does
+** not need to be freed.
+*/
+static void pcache1Shutdown(void *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ assert( pcache1.isInit!=0 );
+ memset(&pcache1, 0, sizeof(pcache1));
+}
+
+/* forward declaration */
+static void pcache1Destroy(sqlite3_pcache *p);
+
+/*
+** Implementation of the sqlite3_pcache.xCreate method.
+**
+** Allocate a new cache.
+*/
+static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){
+ PCache1 *pCache; /* The newly created page cache */
+ PGroup *pGroup; /* The group the new page cache will belong to */
+ int sz; /* Bytes of memory required to allocate the new cache */
+
+ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 );
+ assert( szExtra < 300 );
+
+ sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache;
+ pCache = (PCache1 *)sqlite3MallocZero(sz);
+ if( pCache ){
+ if( pcache1.separateCache ){
+ pGroup = (PGroup*)&pCache[1];
+ pGroup->mxPinned = 10;
+ }else{
+ pGroup = &pcache1.grp;
+ }
+ pcache1EnterMutex(pGroup);
+ if( pGroup->lru.isAnchor==0 ){
+ pGroup->lru.isAnchor = 1;
+ pGroup->lru.pLruPrev = pGroup->lru.pLruNext = &pGroup->lru;
+ }
+ pCache->pGroup = pGroup;
+ pCache->szPage = szPage;
+ pCache->szExtra = szExtra;
+ pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1));
+ pCache->bPurgeable = (bPurgeable ? 1 : 0);
+ pcache1ResizeHash(pCache);
+ if( bPurgeable ){
+ pCache->nMin = 10;
+ pGroup->nMinPage += pCache->nMin;
+ pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
+ pCache->pnPurgeable = &pGroup->nPurgeable;
+ }else{
+ pCache->pnPurgeable = &pCache->nPurgeableDummy;
+ }
+ pcache1LeaveMutex(pGroup);
+ if( pCache->nHash==0 ){
+ pcache1Destroy((sqlite3_pcache*)pCache);
+ pCache = 0;
+ }
+ }
+ return (sqlite3_pcache *)pCache;
+}
+
+/*
+** Implementation of the sqlite3_pcache.xCachesize method.
+**
+** Configure the cache_size limit for a cache.
+*/
+static void pcache1Cachesize(sqlite3_pcache *p, int nMax){
+ PCache1 *pCache = (PCache1 *)p;
+ if( pCache->bPurgeable ){
+ PGroup *pGroup = pCache->pGroup;
+ pcache1EnterMutex(pGroup);
+ pGroup->nMaxPage += (nMax - pCache->nMax);
+ pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
+ pCache->nMax = nMax;
+ pCache->n90pct = pCache->nMax*9/10;
+ pcache1EnforceMaxPage(pCache);
+ pcache1LeaveMutex(pGroup);
+ }
+}
+
+/*
+** Implementation of the sqlite3_pcache.xShrink method.
+**
+** Free up as much memory as possible.
+*/
+static void pcache1Shrink(sqlite3_pcache *p){
+ PCache1 *pCache = (PCache1*)p;
+ if( pCache->bPurgeable ){
+ PGroup *pGroup = pCache->pGroup;
+ int savedMaxPage;
+ pcache1EnterMutex(pGroup);
+ savedMaxPage = pGroup->nMaxPage;
+ pGroup->nMaxPage = 0;
+ pcache1EnforceMaxPage(pCache);
+ pGroup->nMaxPage = savedMaxPage;
+ pcache1LeaveMutex(pGroup);
+ }
+}
+
+/*
+** Implementation of the sqlite3_pcache.xPagecount method.
+*/
+static int pcache1Pagecount(sqlite3_pcache *p){
+ int n;
+ PCache1 *pCache = (PCache1*)p;
+ pcache1EnterMutex(pCache->pGroup);
+ n = pCache->nPage;
+ pcache1LeaveMutex(pCache->pGroup);
+ return n;
+}
+
+
+/*
+** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described
+** in the header of the pcache1Fetch() procedure.
+**
+** This steps are broken out into a separate procedure because they are
+** usually not needed, and by avoiding the stack initialization required
+** for these steps, the main pcache1Fetch() procedure can run faster.
+*/
+static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
+ PCache1 *pCache,
+ unsigned int iKey,
+ int createFlag
+){
+ unsigned int nPinned;
+ PGroup *pGroup = pCache->pGroup;
+ PgHdr1 *pPage = 0;
+
+ /* Step 3: Abort if createFlag is 1 but the cache is nearly full */
+ assert( pCache->nPage >= pCache->nRecyclable );
+ nPinned = pCache->nPage - pCache->nRecyclable;
+ assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage );
+ assert( pCache->n90pct == pCache->nMax*9/10 );
+ if( createFlag==1 && (
+ nPinned>=pGroup->mxPinned
+ || nPinned>=pCache->n90pct
+ || (pcache1UnderMemoryPressure(pCache) && pCache->nRecyclable<nPinned)
+ )){
+ return 0;
+ }
+
+ if( pCache->nPage>=pCache->nHash ) pcache1ResizeHash(pCache);
+ assert( pCache->nHash>0 && pCache->apHash );
+
+ /* Step 4. Try to recycle a page. */
+ if( pCache->bPurgeable
+ && !pGroup->lru.pLruPrev->isAnchor
+ && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache))
+ ){
+ PCache1 *pOther;
+ pPage = pGroup->lru.pLruPrev;
+ assert( PAGE_IS_UNPINNED(pPage) );
+ pcache1RemoveFromHash(pPage, 0);
+ pcache1PinPage(pPage);
+ pOther = pPage->pCache;
+ if( pOther->szAlloc != pCache->szAlloc ){
+ pcache1FreePage(pPage);
+ pPage = 0;
+ }else{
+ pGroup->nPurgeable -= (pOther->bPurgeable - pCache->bPurgeable);
+ }
+ }
+
+ /* Step 5. If a usable page buffer has still not been found,
+ ** attempt to allocate a new one.
+ */
+ if( !pPage ){
+ pPage = pcache1AllocPage(pCache, createFlag==1);
+ }
+
+ if( pPage ){
+ unsigned int h = iKey % pCache->nHash;
+ pCache->nPage++;
+ pPage->iKey = iKey;
+ pPage->pNext = pCache->apHash[h];
+ pPage->pCache = pCache;
+ pPage->pLruNext = 0;
+ /* pPage->pLruPrev = 0;
+ ** No need to clear pLruPrev since it is not accessed when pLruNext==0 */
+ *(void **)pPage->page.pExtra = 0;
+ pCache->apHash[h] = pPage;
+ if( iKey>pCache->iMaxKey ){
+ pCache->iMaxKey = iKey;
+ }
+ }
+ return pPage;
+}
+
+/*
+** Implementation of the sqlite3_pcache.xFetch method.
+**
+** Fetch a page by key value.
+**
+** Whether or not a new page may be allocated by this function depends on
+** the value of the createFlag argument. 0 means do not allocate a new
+** page. 1 means allocate a new page if space is easily available. 2
+** means to try really hard to allocate a new page.
+**
+** For a non-purgeable cache (a cache used as the storage for an in-memory
+** database) there is really no difference between createFlag 1 and 2. So
+** the calling function (pcache.c) will never have a createFlag of 1 on
+** a non-purgeable cache.
+**
+** There are three different approaches to obtaining space for a page,
+** depending on the value of parameter createFlag (which may be 0, 1 or 2).
+**
+** 1. Regardless of the value of createFlag, the cache is searched for a
+** copy of the requested page. If one is found, it is returned.
+**
+** 2. If createFlag==0 and the page is not already in the cache, NULL is
+** returned.
+**
+** 3. If createFlag is 1, and the page is not already in the cache, then
+** return NULL (do not allocate a new page) if any of the following
+** conditions are true:
+**
+** (a) the number of pages pinned by the cache is greater than
+** PCache1.nMax, or
+**
+** (b) the number of pages pinned by the cache is greater than
+** the sum of nMax for all purgeable caches, less the sum of
+** nMin for all other purgeable caches, or
+**
+** 4. If none of the first three conditions apply and the cache is marked
+** as purgeable, and if one of the following is true:
+**
+** (a) The number of pages allocated for the cache is already
+** PCache1.nMax, or
+**
+** (b) The number of pages allocated for all purgeable caches is
+** already equal to or greater than the sum of nMax for all
+** purgeable caches,
+**
+** (c) The system is under memory pressure and wants to avoid
+** unnecessary pages cache entry allocations
+**
+** then attempt to recycle a page from the LRU list. If it is the right
+** size, return the recycled buffer. Otherwise, free the buffer and
+** proceed to step 5.
+**
+** 5. Otherwise, allocate and return a new page buffer.
+**
+** There are two versions of this routine. pcache1FetchWithMutex() is
+** the general case. pcache1FetchNoMutex() is a faster implementation for
+** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper
+** invokes the appropriate routine.
+*/
+static PgHdr1 *pcache1FetchNoMutex(
+ sqlite3_pcache *p,
+ unsigned int iKey,
+ int createFlag
+){
+ PCache1 *pCache = (PCache1 *)p;
+ PgHdr1 *pPage = 0;
+
+ /* Step 1: Search the hash table for an existing entry. */
+ pPage = pCache->apHash[iKey % pCache->nHash];
+ while( pPage && pPage->iKey!=iKey ){ pPage = pPage->pNext; }
+
+ /* Step 2: If the page was found in the hash table, then return it.
+ ** If the page was not in the hash table and createFlag is 0, abort.
+ ** Otherwise (page not in hash and createFlag!=0) continue with
+ ** subsequent steps to try to create the page. */
+ if( pPage ){
+ if( PAGE_IS_UNPINNED(pPage) ){
+ return pcache1PinPage(pPage);
+ }else{
+ return pPage;
+ }
+ }else if( createFlag ){
+ /* Steps 3, 4, and 5 implemented by this subroutine */
+ return pcache1FetchStage2(pCache, iKey, createFlag);
+ }else{
+ return 0;
+ }
+}
+#if PCACHE1_MIGHT_USE_GROUP_MUTEX
+static PgHdr1 *pcache1FetchWithMutex(
+ sqlite3_pcache *p,
+ unsigned int iKey,
+ int createFlag
+){
+ PCache1 *pCache = (PCache1 *)p;
+ PgHdr1 *pPage;
+
+ pcache1EnterMutex(pCache->pGroup);
+ pPage = pcache1FetchNoMutex(p, iKey, createFlag);
+ assert( pPage==0 || pCache->iMaxKey>=iKey );
+ pcache1LeaveMutex(pCache->pGroup);
+ return pPage;
+}
+#endif
+static sqlite3_pcache_page *pcache1Fetch(
+ sqlite3_pcache *p,
+ unsigned int iKey,
+ int createFlag
+){
+#if PCACHE1_MIGHT_USE_GROUP_MUTEX || defined(SQLITE_DEBUG)
+ PCache1 *pCache = (PCache1 *)p;
+#endif
+
+ assert( offsetof(PgHdr1,page)==0 );
+ assert( pCache->bPurgeable || createFlag!=1 );
+ assert( pCache->bPurgeable || pCache->nMin==0 );
+ assert( pCache->bPurgeable==0 || pCache->nMin==10 );
+ assert( pCache->nMin==0 || pCache->bPurgeable );
+ assert( pCache->nHash>0 );
+#if PCACHE1_MIGHT_USE_GROUP_MUTEX
+ if( pCache->pGroup->mutex ){
+ return (sqlite3_pcache_page*)pcache1FetchWithMutex(p, iKey, createFlag);
+ }else
+#endif
+ {
+ return (sqlite3_pcache_page*)pcache1FetchNoMutex(p, iKey, createFlag);
+ }
+}
+
+
+/*
+** Implementation of the sqlite3_pcache.xUnpin method.
+**
+** Mark a page as unpinned (eligible for asynchronous recycling).
+*/
+static void pcache1Unpin(
+ sqlite3_pcache *p,
+ sqlite3_pcache_page *pPg,
+ int reuseUnlikely
+){
+ PCache1 *pCache = (PCache1 *)p;
+ PgHdr1 *pPage = (PgHdr1 *)pPg;
+ PGroup *pGroup = pCache->pGroup;
+
+ assert( pPage->pCache==pCache );
+ pcache1EnterMutex(pGroup);
+
+ /* It is an error to call this function if the page is already
+ ** part of the PGroup LRU list.
+ */
+ assert( pPage->pLruNext==0 );
+ assert( PAGE_IS_PINNED(pPage) );
+
+ if( reuseUnlikely || pGroup->nPurgeable>pGroup->nMaxPage ){
+ pcache1RemoveFromHash(pPage, 1);
+ }else{
+ /* Add the page to the PGroup LRU list. */
+ PgHdr1 **ppFirst = &pGroup->lru.pLruNext;
+ pPage->pLruPrev = &pGroup->lru;
+ (pPage->pLruNext = *ppFirst)->pLruPrev = pPage;
+ *ppFirst = pPage;
+ pCache->nRecyclable++;
+ }
+
+ pcache1LeaveMutex(pCache->pGroup);
+}
+
+/*
+** Implementation of the sqlite3_pcache.xRekey method.
+*/
+static void pcache1Rekey(
+ sqlite3_pcache *p,
+ sqlite3_pcache_page *pPg,
+ unsigned int iOld,
+ unsigned int iNew
+){
+ PCache1 *pCache = (PCache1 *)p;
+ PgHdr1 *pPage = (PgHdr1 *)pPg;
+ PgHdr1 **pp;
+ unsigned int h;
+ assert( pPage->iKey==iOld );
+ assert( pPage->pCache==pCache );
+
+ pcache1EnterMutex(pCache->pGroup);
+
+ h = iOld%pCache->nHash;
+ pp = &pCache->apHash[h];
+ while( (*pp)!=pPage ){
+ pp = &(*pp)->pNext;
+ }
+ *pp = pPage->pNext;
+
+ h = iNew%pCache->nHash;
+ pPage->iKey = iNew;
+ pPage->pNext = pCache->apHash[h];
+ pCache->apHash[h] = pPage;
+ if( iNew>pCache->iMaxKey ){
+ pCache->iMaxKey = iNew;
+ }
+
+ pcache1LeaveMutex(pCache->pGroup);
+}
+
+/*
+** Implementation of the sqlite3_pcache.xTruncate method.
+**
+** Discard all unpinned pages in the cache with a page number equal to
+** or greater than parameter iLimit. Any pinned pages with a page number
+** equal to or greater than iLimit are implicitly unpinned.
+*/
+static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){
+ PCache1 *pCache = (PCache1 *)p;
+ pcache1EnterMutex(pCache->pGroup);
+ if( iLimit<=pCache->iMaxKey ){
+ pcache1TruncateUnsafe(pCache, iLimit);
+ pCache->iMaxKey = iLimit-1;
+ }
+ pcache1LeaveMutex(pCache->pGroup);
+}
+
+/*
+** Implementation of the sqlite3_pcache.xDestroy method.
+**
+** Destroy a cache allocated using pcache1Create().
+*/
+static void pcache1Destroy(sqlite3_pcache *p){
+ PCache1 *pCache = (PCache1 *)p;
+ PGroup *pGroup = pCache->pGroup;
+ assert( pCache->bPurgeable || (pCache->nMax==0 && pCache->nMin==0) );
+ pcache1EnterMutex(pGroup);
+ if( pCache->nPage ) pcache1TruncateUnsafe(pCache, 0);
+ assert( pGroup->nMaxPage >= pCache->nMax );
+ pGroup->nMaxPage -= pCache->nMax;
+ assert( pGroup->nMinPage >= pCache->nMin );
+ pGroup->nMinPage -= pCache->nMin;
+ pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
+ pcache1EnforceMaxPage(pCache);
+ pcache1LeaveMutex(pGroup);
+ sqlite3_free(pCache->pBulk);
+ sqlite3_free(pCache->apHash);
+ sqlite3_free(pCache);
+}
+
+/*
+** This function is called during initialization (sqlite3_initialize()) to
+** install the default pluggable cache module, assuming the user has not
+** already provided an alternative.
+*/
+SQLITE_PRIVATE void sqlite3PCacheSetDefault(void){
+ static const sqlite3_pcache_methods2 defaultMethods = {
+ 1, /* iVersion */
+ 0, /* pArg */
+ pcache1Init, /* xInit */
+ pcache1Shutdown, /* xShutdown */
+ pcache1Create, /* xCreate */
+ pcache1Cachesize, /* xCachesize */
+ pcache1Pagecount, /* xPagecount */
+ pcache1Fetch, /* xFetch */
+ pcache1Unpin, /* xUnpin */
+ pcache1Rekey, /* xRekey */
+ pcache1Truncate, /* xTruncate */
+ pcache1Destroy, /* xDestroy */
+ pcache1Shrink /* xShrink */
+ };
+ sqlite3_config(SQLITE_CONFIG_PCACHE2, &defaultMethods);
+}
+
+/*
+** Return the size of the header on each page of this PCACHE implementation.
+*/
+SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void){ return ROUND8(sizeof(PgHdr1)); }
+
+/*
+** Return the global mutex used by this PCACHE implementation. The
+** sqlite3_status() routine needs access to this mutex.
+*/
+SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void){
+ return pcache1.mutex;
+}
+
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
+/*
+** This function is called to free superfluous dynamically allocated memory
+** held by the pager system. Memory in use by any SQLite pager allocated
+** by the current thread may be sqlite3_free()ed.
+**
+** nReq is the number of bytes of memory required. Once this much has
+** been released, the function returns. The return value is the total number
+** of bytes of memory released.
+*/
+SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){
+ int nFree = 0;
+ assert( sqlite3_mutex_notheld(pcache1.grp.mutex) );
+ assert( sqlite3_mutex_notheld(pcache1.mutex) );
+ if( sqlite3GlobalConfig.pPage==0 ){
+ PgHdr1 *p;
+ pcache1EnterMutex(&pcache1.grp);
+ while( (nReq<0 || nFree<nReq)
+ && (p=pcache1.grp.lru.pLruPrev)!=0
+ && p->isAnchor==0
+ ){
+ nFree += pcache1MemSize(p->page.pBuf);
+#ifdef SQLITE_PCACHE_SEPARATE_HEADER
+ nFree += sqlite3MemSize(p);
+#endif
+ assert( PAGE_IS_UNPINNED(p) );
+ pcache1PinPage(p);
+ pcache1RemoveFromHash(p, 1);
+ }
+ pcache1LeaveMutex(&pcache1.grp);
+ }
+ return nFree;
+}
+#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */
+
+#ifdef SQLITE_TEST
+/*
+** This function is used by test procedures to inspect the internal state
+** of the global cache.
+*/
+SQLITE_PRIVATE void sqlite3PcacheStats(
+ int *pnCurrent, /* OUT: Total number of pages cached */
+ int *pnMax, /* OUT: Global maximum cache size */
+ int *pnMin, /* OUT: Sum of PCache1.nMin for purgeable caches */
+ int *pnRecyclable /* OUT: Total number of pages available for recycling */
+){
+ PgHdr1 *p;
+ int nRecyclable = 0;
+ for(p=pcache1.grp.lru.pLruNext; p && !p->isAnchor; p=p->pLruNext){
+ assert( PAGE_IS_UNPINNED(p) );
+ nRecyclable++;
+ }
+ *pnCurrent = pcache1.grp.nPurgeable;
+ *pnMax = (int)pcache1.grp.nMaxPage;
+ *pnMin = (int)pcache1.grp.nMinPage;
+ *pnRecyclable = nRecyclable;
+}
+#endif
+
+/************** End of pcache1.c *********************************************/
+/************** Begin file rowset.c ******************************************/
+/*
+** 2008 December 3
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This module implements an object we call a "RowSet".
+**
+** The RowSet object is a collection of rowids. Rowids
+** are inserted into the RowSet in an arbitrary order. Inserts
+** can be intermixed with tests to see if a given rowid has been
+** previously inserted into the RowSet.
+**
+** After all inserts are finished, it is possible to extract the
+** elements of the RowSet in sorted order. Once this extraction
+** process has started, no new elements may be inserted.
+**
+** Hence, the primitive operations for a RowSet are:
+**
+** CREATE
+** INSERT
+** TEST
+** SMALLEST
+** DESTROY
+**
+** The CREATE and DESTROY primitives are the constructor and destructor,
+** obviously. The INSERT primitive adds a new element to the RowSet.
+** TEST checks to see if an element is already in the RowSet. SMALLEST
+** extracts the least value from the RowSet.
+**
+** The INSERT primitive might allocate additional memory. Memory is
+** allocated in chunks so most INSERTs do no allocation. There is an
+** upper bound on the size of allocated memory. No memory is freed
+** until DESTROY.
+**
+** The TEST primitive includes a "batch" number. The TEST primitive
+** will only see elements that were inserted before the last change
+** in the batch number. In other words, if an INSERT occurs between
+** two TESTs where the TESTs have the same batch nubmer, then the
+** value added by the INSERT will not be visible to the second TEST.
+** The initial batch number is zero, so if the very first TEST contains
+** a non-zero batch number, it will see all prior INSERTs.
+**
+** No INSERTs may occurs after a SMALLEST. An assertion will fail if
+** that is attempted.
+**
+** The cost of an INSERT is roughly constant. (Sometimes new memory
+** has to be allocated on an INSERT.) The cost of a TEST with a new
+** batch number is O(NlogN) where N is the number of elements in the RowSet.
+** The cost of a TEST using the same batch number is O(logN). The cost
+** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST
+** primitives are constant time. The cost of DESTROY is O(N).
+**
+** TEST and SMALLEST may not be used by the same RowSet. This used to
+** be possible, but the feature was not used, so it was removed in order
+** to simplify the code.
+*/
+/* #include "sqliteInt.h" */
+
+
+/*
+** Target size for allocation chunks.
+*/
+#define ROWSET_ALLOCATION_SIZE 1024
+
+/*
+** The number of rowset entries per allocation chunk.
+*/
+#define ROWSET_ENTRY_PER_CHUNK \
+ ((ROWSET_ALLOCATION_SIZE-8)/sizeof(struct RowSetEntry))
+
+/*
+** Each entry in a RowSet is an instance of the following object.
+**
+** This same object is reused to store a linked list of trees of RowSetEntry
+** objects. In that alternative use, pRight points to the next entry
+** in the list, pLeft points to the tree, and v is unused. The
+** RowSet.pForest value points to the head of this forest list.
+*/
+struct RowSetEntry {
+ i64 v; /* ROWID value for this entry */
+ struct RowSetEntry *pRight; /* Right subtree (larger entries) or list */
+ struct RowSetEntry *pLeft; /* Left subtree (smaller entries) */
+};
+
+/*
+** RowSetEntry objects are allocated in large chunks (instances of the
+** following structure) to reduce memory allocation overhead. The
+** chunks are kept on a linked list so that they can be deallocated
+** when the RowSet is destroyed.
+*/
+struct RowSetChunk {
+ struct RowSetChunk *pNextChunk; /* Next chunk on list of them all */
+ struct RowSetEntry aEntry[ROWSET_ENTRY_PER_CHUNK]; /* Allocated entries */
+};
+
+/*
+** A RowSet in an instance of the following structure.
+**
+** A typedef of this structure if found in sqliteInt.h.
+*/
+struct RowSet {
+ struct RowSetChunk *pChunk; /* List of all chunk allocations */
+ sqlite3 *db; /* The database connection */
+ struct RowSetEntry *pEntry; /* List of entries using pRight */
+ struct RowSetEntry *pLast; /* Last entry on the pEntry list */
+ struct RowSetEntry *pFresh; /* Source of new entry objects */
+ struct RowSetEntry *pForest; /* List of binary trees of entries */
+ u16 nFresh; /* Number of objects on pFresh */
+ u16 rsFlags; /* Various flags */
+ int iBatch; /* Current insert batch */
+};
+
+/*
+** Allowed values for RowSet.rsFlags
+*/
+#define ROWSET_SORTED 0x01 /* True if RowSet.pEntry is sorted */
+#define ROWSET_NEXT 0x02 /* True if sqlite3RowSetNext() has been called */
+
+/*
+** Allocate a RowSet object. Return NULL if a memory allocation
+** error occurs.
+*/
+SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db){
+ RowSet *p = sqlite3DbMallocRawNN(db, sizeof(*p));
+ if( p ){
+ int N = sqlite3DbMallocSize(db, p);
+ p->pChunk = 0;
+ p->db = db;
+ p->pEntry = 0;
+ p->pLast = 0;
+ p->pForest = 0;
+ p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p);
+ p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry));
+ p->rsFlags = ROWSET_SORTED;
+ p->iBatch = 0;
+ }
+ return p;
+}
+
+/*
+** Deallocate all chunks from a RowSet. This frees all memory that
+** the RowSet has allocated over its lifetime. This routine is
+** the destructor for the RowSet.
+*/
+SQLITE_PRIVATE void sqlite3RowSetClear(void *pArg){
+ RowSet *p = (RowSet*)pArg;
+ struct RowSetChunk *pChunk, *pNextChunk;
+ for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){
+ pNextChunk = pChunk->pNextChunk;
+ sqlite3DbFree(p->db, pChunk);
+ }
+ p->pChunk = 0;
+ p->nFresh = 0;
+ p->pEntry = 0;
+ p->pLast = 0;
+ p->pForest = 0;
+ p->rsFlags = ROWSET_SORTED;
+}
+
+/*
+** Deallocate all chunks from a RowSet. This frees all memory that
+** the RowSet has allocated over its lifetime. This routine is
+** the destructor for the RowSet.
+*/
+SQLITE_PRIVATE void sqlite3RowSetDelete(void *pArg){
+ sqlite3RowSetClear(pArg);
+ sqlite3DbFree(((RowSet*)pArg)->db, pArg);
+}
+
+/*
+** Allocate a new RowSetEntry object that is associated with the
+** given RowSet. Return a pointer to the new and completely uninitialized
+** object.
+**
+** In an OOM situation, the RowSet.db->mallocFailed flag is set and this
+** routine returns NULL.
+*/
+static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){
+ assert( p!=0 );
+ if( p->nFresh==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ /* We could allocate a fresh RowSetEntry each time one is needed, but it
+ ** is more efficient to pull a preallocated entry from the pool */
+ struct RowSetChunk *pNew;
+ pNew = sqlite3DbMallocRawNN(p->db, sizeof(*pNew));
+ if( pNew==0 ){
+ return 0;
+ }
+ pNew->pNextChunk = p->pChunk;
+ p->pChunk = pNew;
+ p->pFresh = pNew->aEntry;
+ p->nFresh = ROWSET_ENTRY_PER_CHUNK;
+ }
+ p->nFresh--;
+ return p->pFresh++;
+}
+
+/*
+** Insert a new value into a RowSet.
+**
+** The mallocFailed flag of the database connection is set if a
+** memory allocation fails.
+*/
+SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet *p, i64 rowid){
+ struct RowSetEntry *pEntry; /* The new entry */
+ struct RowSetEntry *pLast; /* The last prior entry */
+
+ /* This routine is never called after sqlite3RowSetNext() */
+ assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 );
+
+ pEntry = rowSetEntryAlloc(p);
+ if( pEntry==0 ) return;
+ pEntry->v = rowid;
+ pEntry->pRight = 0;
+ pLast = p->pLast;
+ if( pLast ){
+ if( rowid<=pLast->v ){ /*OPTIMIZATION-IF-FALSE*/
+ /* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags
+ ** where possible */
+ p->rsFlags &= ~ROWSET_SORTED;
+ }
+ pLast->pRight = pEntry;
+ }else{
+ p->pEntry = pEntry;
+ }
+ p->pLast = pEntry;
+}
+
+/*
+** Merge two lists of RowSetEntry objects. Remove duplicates.
+**
+** The input lists are connected via pRight pointers and are
+** assumed to each already be in sorted order.
+*/
+static struct RowSetEntry *rowSetEntryMerge(
+ struct RowSetEntry *pA, /* First sorted list to be merged */
+ struct RowSetEntry *pB /* Second sorted list to be merged */
+){
+ struct RowSetEntry head;
+ struct RowSetEntry *pTail;
+
+ pTail = &head;
+ assert( pA!=0 && pB!=0 );
+ for(;;){
+ assert( pA->pRight==0 || pA->v<=pA->pRight->v );
+ assert( pB->pRight==0 || pB->v<=pB->pRight->v );
+ if( pA->v<=pB->v ){
+ if( pA->v<pB->v ) pTail = pTail->pRight = pA;
+ pA = pA->pRight;
+ if( pA==0 ){
+ pTail->pRight = pB;
+ break;
+ }
+ }else{
+ pTail = pTail->pRight = pB;
+ pB = pB->pRight;
+ if( pB==0 ){
+ pTail->pRight = pA;
+ break;
+ }
+ }
+ }
+ return head.pRight;
+}
+
+/*
+** Sort all elements on the list of RowSetEntry objects into order of
+** increasing v.
+*/
+static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){
+ unsigned int i;
+ struct RowSetEntry *pNext, *aBucket[40];
+
+ memset(aBucket, 0, sizeof(aBucket));
+ while( pIn ){
+ pNext = pIn->pRight;
+ pIn->pRight = 0;
+ for(i=0; aBucket[i]; i++){
+ pIn = rowSetEntryMerge(aBucket[i], pIn);
+ aBucket[i] = 0;
+ }
+ aBucket[i] = pIn;
+ pIn = pNext;
+ }
+ pIn = aBucket[0];
+ for(i=1; i<sizeof(aBucket)/sizeof(aBucket[0]); i++){
+ if( aBucket[i]==0 ) continue;
+ pIn = pIn ? rowSetEntryMerge(pIn, aBucket[i]) : aBucket[i];
+ }
+ return pIn;
+}
+
+
+/*
+** The input, pIn, is a binary tree (or subtree) of RowSetEntry objects.
+** Convert this tree into a linked list connected by the pRight pointers
+** and return pointers to the first and last elements of the new list.
+*/
+static void rowSetTreeToList(
+ struct RowSetEntry *pIn, /* Root of the input tree */
+ struct RowSetEntry **ppFirst, /* Write head of the output list here */
+ struct RowSetEntry **ppLast /* Write tail of the output list here */
+){
+ assert( pIn!=0 );
+ if( pIn->pLeft ){
+ struct RowSetEntry *p;
+ rowSetTreeToList(pIn->pLeft, ppFirst, &p);
+ p->pRight = pIn;
+ }else{
+ *ppFirst = pIn;
+ }
+ if( pIn->pRight ){
+ rowSetTreeToList(pIn->pRight, &pIn->pRight, ppLast);
+ }else{
+ *ppLast = pIn;
+ }
+ assert( (*ppLast)->pRight==0 );
+}
+
+
+/*
+** Convert a sorted list of elements (connected by pRight) into a binary
+** tree with depth of iDepth. A depth of 1 means the tree contains a single
+** node taken from the head of *ppList. A depth of 2 means a tree with
+** three nodes. And so forth.
+**
+** Use as many entries from the input list as required and update the
+** *ppList to point to the unused elements of the list. If the input
+** list contains too few elements, then construct an incomplete tree
+** and leave *ppList set to NULL.
+**
+** Return a pointer to the root of the constructed binary tree.
+*/
+static struct RowSetEntry *rowSetNDeepTree(
+ struct RowSetEntry **ppList,
+ int iDepth
+){
+ struct RowSetEntry *p; /* Root of the new tree */
+ struct RowSetEntry *pLeft; /* Left subtree */
+ if( *ppList==0 ){ /*OPTIMIZATION-IF-TRUE*/
+ /* Prevent unnecessary deep recursion when we run out of entries */
+ return 0;
+ }
+ if( iDepth>1 ){ /*OPTIMIZATION-IF-TRUE*/
+ /* This branch causes a *balanced* tree to be generated. A valid tree
+ ** is still generated without this branch, but the tree is wildly
+ ** unbalanced and inefficient. */
+ pLeft = rowSetNDeepTree(ppList, iDepth-1);
+ p = *ppList;
+ if( p==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ /* It is safe to always return here, but the resulting tree
+ ** would be unbalanced */
+ return pLeft;
+ }
+ p->pLeft = pLeft;
+ *ppList = p->pRight;
+ p->pRight = rowSetNDeepTree(ppList, iDepth-1);
+ }else{
+ p = *ppList;
+ *ppList = p->pRight;
+ p->pLeft = p->pRight = 0;
+ }
+ return p;
+}
+
+/*
+** Convert a sorted list of elements into a binary tree. Make the tree
+** as deep as it needs to be in order to contain the entire list.
+*/
+static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){
+ int iDepth; /* Depth of the tree so far */
+ struct RowSetEntry *p; /* Current tree root */
+ struct RowSetEntry *pLeft; /* Left subtree */
+
+ assert( pList!=0 );
+ p = pList;
+ pList = p->pRight;
+ p->pLeft = p->pRight = 0;
+ for(iDepth=1; pList; iDepth++){
+ pLeft = p;
+ p = pList;
+ pList = p->pRight;
+ p->pLeft = pLeft;
+ p->pRight = rowSetNDeepTree(&pList, iDepth);
+ }
+ return p;
+}
+
+/*
+** Extract the smallest element from the RowSet.
+** Write the element into *pRowid. Return 1 on success. Return
+** 0 if the RowSet is already empty.
+**
+** After this routine has been called, the sqlite3RowSetInsert()
+** routine may not be called again.
+**
+** This routine may not be called after sqlite3RowSetTest() has
+** been used. Older versions of RowSet allowed that, but as the
+** capability was not used by the code generator, it was removed
+** for code economy.
+*/
+SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){
+ assert( p!=0 );
+ assert( p->pForest==0 ); /* Cannot be used with sqlite3RowSetText() */
+
+ /* Merge the forest into a single sorted list on first call */
+ if( (p->rsFlags & ROWSET_NEXT)==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ if( (p->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ p->pEntry = rowSetEntrySort(p->pEntry);
+ }
+ p->rsFlags |= ROWSET_SORTED|ROWSET_NEXT;
+ }
+
+ /* Return the next entry on the list */
+ if( p->pEntry ){
+ *pRowid = p->pEntry->v;
+ p->pEntry = p->pEntry->pRight;
+ if( p->pEntry==0 ){ /*OPTIMIZATION-IF-TRUE*/
+ /* Free memory immediately, rather than waiting on sqlite3_finalize() */
+ sqlite3RowSetClear(p);
+ }
+ return 1;
+ }else{
+ return 0;
+ }
+}
+
+/*
+** Check to see if element iRowid was inserted into the rowset as
+** part of any insert batch prior to iBatch. Return 1 or 0.
+**
+** If this is the first test of a new batch and if there exist entries
+** on pRowSet->pEntry, then sort those entries into the forest at
+** pRowSet->pForest so that they can be tested.
+*/
+SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){
+ struct RowSetEntry *p, *pTree;
+
+ /* This routine is never called after sqlite3RowSetNext() */
+ assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 );
+
+ /* Sort entries into the forest on the first test of a new batch.
+ ** To save unnecessary work, only do this when the batch number changes.
+ */
+ if( iBatch!=pRowSet->iBatch ){ /*OPTIMIZATION-IF-FALSE*/
+ p = pRowSet->pEntry;
+ if( p ){
+ struct RowSetEntry **ppPrevTree = &pRowSet->pForest;
+ if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ /* Only sort the current set of entries if they need it */
+ p = rowSetEntrySort(p);
+ }
+ for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){
+ ppPrevTree = &pTree->pRight;
+ if( pTree->pLeft==0 ){
+ pTree->pLeft = rowSetListToTree(p);
+ break;
+ }else{
+ struct RowSetEntry *pAux, *pTail;
+ rowSetTreeToList(pTree->pLeft, &pAux, &pTail);
+ pTree->pLeft = 0;
+ p = rowSetEntryMerge(pAux, p);
+ }
+ }
+ if( pTree==0 ){
+ *ppPrevTree = pTree = rowSetEntryAlloc(pRowSet);
+ if( pTree ){
+ pTree->v = 0;
+ pTree->pRight = 0;
+ pTree->pLeft = rowSetListToTree(p);
+ }
+ }
+ pRowSet->pEntry = 0;
+ pRowSet->pLast = 0;
+ pRowSet->rsFlags |= ROWSET_SORTED;
+ }
+ pRowSet->iBatch = iBatch;
+ }
+
+ /* Test to see if the iRowid value appears anywhere in the forest.
+ ** Return 1 if it does and 0 if not.
+ */
+ for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){
+ p = pTree->pLeft;
+ while( p ){
+ if( p->v<iRowid ){
+ p = p->pRight;
+ }else if( p->v>iRowid ){
+ p = p->pLeft;
+ }else{
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/************** End of rowset.c **********************************************/
+/************** Begin file pager.c *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the implementation of the page cache subsystem or "pager".
+**
+** The pager is used to access a database disk file. It implements
+** atomic commit and rollback through the use of a journal file that
+** is separate from the database file. The pager also implements file
+** locking to prevent two processes from writing the same database
+** file simultaneously, or one process from reading the database while
+** another is writing.
+*/
+#ifndef SQLITE_OMIT_DISKIO
+/* #include "sqliteInt.h" */
+/************** Include wal.h in the middle of pager.c ***********************/
+/************** Begin file wal.h *********************************************/
+/*
+** 2010 February 1
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface to the write-ahead logging
+** system. Refer to the comments below and the header comment attached to
+** the implementation of each function in log.c for further details.
+*/
+
+#ifndef SQLITE_WAL_H
+#define SQLITE_WAL_H
+
+/* #include "sqliteInt.h" */
+
+/* Macros for extracting appropriate sync flags for either transaction
+** commits (WAL_SYNC_FLAGS(X)) or for checkpoint ops (CKPT_SYNC_FLAGS(X)):
+*/
+#define WAL_SYNC_FLAGS(X) ((X)&0x03)
+#define CKPT_SYNC_FLAGS(X) (((X)>>2)&0x03)
+
+#ifdef SQLITE_OMIT_WAL
+# define sqlite3WalOpen(x,y,z) 0
+# define sqlite3WalLimit(x,y)
+# define sqlite3WalClose(v,w,x,y,z) 0
+# define sqlite3WalBeginReadTransaction(y,z) 0
+# define sqlite3WalEndReadTransaction(z)
+# define sqlite3WalDbsize(y) 0
+# define sqlite3WalBeginWriteTransaction(y) 0
+# define sqlite3WalEndWriteTransaction(x) 0
+# define sqlite3WalUndo(x,y,z) 0
+# define sqlite3WalSavepoint(y,z)
+# define sqlite3WalSavepointUndo(y,z) 0
+# define sqlite3WalFrames(u,v,w,x,y,z) 0
+# define sqlite3WalCheckpoint(q,r,s,t,u,v,w,x,y,z) 0
+# define sqlite3WalCallback(z) 0
+# define sqlite3WalExclusiveMode(y,z) 0
+# define sqlite3WalHeapMemory(z) 0
+# define sqlite3WalFramesize(z) 0
+# define sqlite3WalFindFrame(x,y,z) 0
+# define sqlite3WalFile(x) 0
+#else
+
+#define WAL_SAVEPOINT_NDATA 4
+
+/* Connection to a write-ahead log (WAL) file.
+** There is one object of this type for each pager.
+*/
+typedef struct Wal Wal;
+
+/* Open and close a connection to a write-ahead log. */
+SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**);
+SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, sqlite3*, int sync_flags, int, u8 *);
+
+/* Set the limiting size of a WAL file. */
+SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64);
+
+/* Used by readers to open (lock) and close (unlock) a snapshot. A
+** snapshot is like a read-transaction. It is the state of the database
+** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and
+** preserves the current state even if the other threads or processes
+** write to or checkpoint the WAL. sqlite3WalCloseSnapshot() closes the
+** transaction and releases the lock.
+*/
+SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *);
+SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal);
+
+/* Read a page from the write-ahead log, if it is present. */
+SQLITE_PRIVATE int sqlite3WalFindFrame(Wal *, Pgno, u32 *);
+SQLITE_PRIVATE int sqlite3WalReadFrame(Wal *, u32, int, u8 *);
+
+/* If the WAL is not empty, return the size of the database. */
+SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal);
+
+/* Obtain or release the WRITER lock. */
+SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal);
+SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal);
+
+/* Undo any frames written (but not committed) to the log */
+SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx);
+
+/* Return an integer that records the current (uncommitted) write
+** position in the WAL */
+SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData);
+
+/* Move the write position of the WAL back to iFrame. Called in
+** response to a ROLLBACK TO command. */
+SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData);
+
+/* Write a frame or frames to the log. */
+SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int);
+
+/* Copy pages from the log to the database file */
+SQLITE_PRIVATE int sqlite3WalCheckpoint(
+ Wal *pWal, /* Write-ahead log connection */
+ sqlite3 *db, /* Check this handle's interrupt flag */
+ int eMode, /* One of PASSIVE, FULL and RESTART */
+ int (*xBusy)(void*), /* Function to call when busy */
+ void *pBusyArg, /* Context argument for xBusyHandler */
+ int sync_flags, /* Flags to sync db file with (or 0) */
+ int nBuf, /* Size of buffer nBuf */
+ u8 *zBuf, /* Temporary buffer to use */
+ int *pnLog, /* OUT: Number of frames in WAL */
+ int *pnCkpt /* OUT: Number of backfilled frames in WAL */
+);
+
+/* Return the value to pass to a sqlite3_wal_hook callback, the
+** number of frames in the WAL at the point of the last commit since
+** sqlite3WalCallback() was called. If no commits have occurred since
+** the last call, then return 0.
+*/
+SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal);
+
+/* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released)
+** by the pager layer on the database file.
+*/
+SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op);
+
+/* Return true if the argument is non-NULL and the WAL module is using
+** heap-memory for the wal-index. Otherwise, if the argument is NULL or the
+** WAL module is using shared-memory, return false.
+*/
+SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal);
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot);
+SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal);
+SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE void sqlite3WalSnapshotUnlock(Wal *pWal);
+#endif
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+/* If the WAL file is not empty, return the number of bytes of content
+** stored in each frame (i.e. the db page-size when the WAL was created).
+*/
+SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal);
+#endif
+
+/* Return the sqlite3_file object for the WAL file */
+SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal);
+
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock);
+SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db);
+#endif
+
+#endif /* ifndef SQLITE_OMIT_WAL */
+#endif /* SQLITE_WAL_H */
+
+/************** End of wal.h *************************************************/
+/************** Continuing where we left off in pager.c **********************/
+
+
+/******************* NOTES ON THE DESIGN OF THE PAGER ************************
+**
+** This comment block describes invariants that hold when using a rollback
+** journal. These invariants do not apply for journal_mode=WAL,
+** journal_mode=MEMORY, or journal_mode=OFF.
+**
+** Within this comment block, a page is deemed to have been synced
+** automatically as soon as it is written when PRAGMA synchronous=OFF.
+** Otherwise, the page is not synced until the xSync method of the VFS
+** is called successfully on the file containing the page.
+**
+** Definition: A page of the database file is said to be "overwriteable" if
+** one or more of the following are true about the page:
+**
+** (a) The original content of the page as it was at the beginning of
+** the transaction has been written into the rollback journal and
+** synced.
+**
+** (b) The page was a freelist leaf page at the start of the transaction.
+**
+** (c) The page number is greater than the largest page that existed in
+** the database file at the start of the transaction.
+**
+** (1) A page of the database file is never overwritten unless one of the
+** following are true:
+**
+** (a) The page and all other pages on the same sector are overwriteable.
+**
+** (b) The atomic page write optimization is enabled, and the entire
+** transaction other than the update of the transaction sequence
+** number consists of a single page change.
+**
+** (2) The content of a page written into the rollback journal exactly matches
+** both the content in the database when the rollback journal was written
+** and the content in the database at the beginning of the current
+** transaction.
+**
+** (3) Writes to the database file are an integer multiple of the page size
+** in length and are aligned on a page boundary.
+**
+** (4) Reads from the database file are either aligned on a page boundary and
+** an integer multiple of the page size in length or are taken from the
+** first 100 bytes of the database file.
+**
+** (5) All writes to the database file are synced prior to the rollback journal
+** being deleted, truncated, or zeroed.
+**
+** (6) If a super-journal file is used, then all writes to the database file
+** are synced prior to the super-journal being deleted.
+**
+** Definition: Two databases (or the same database at two points it time)
+** are said to be "logically equivalent" if they give the same answer to
+** all queries. Note in particular the content of freelist leaf
+** pages can be changed arbitrarily without affecting the logical equivalence
+** of the database.
+**
+** (7) At any time, if any subset, including the empty set and the total set,
+** of the unsynced changes to a rollback journal are removed and the
+** journal is rolled back, the resulting database file will be logically
+** equivalent to the database file at the beginning of the transaction.
+**
+** (8) When a transaction is rolled back, the xTruncate method of the VFS
+** is called to restore the database file to the same size it was at
+** the beginning of the transaction. (In some VFSes, the xTruncate
+** method is a no-op, but that does not change the fact the SQLite will
+** invoke it.)
+**
+** (9) Whenever the database file is modified, at least one bit in the range
+** of bytes from 24 through 39 inclusive will be changed prior to releasing
+** the EXCLUSIVE lock, thus signaling other connections on the same
+** database to flush their caches.
+**
+** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less
+** than one billion transactions.
+**
+** (11) A database file is well-formed at the beginning and at the conclusion
+** of every transaction.
+**
+** (12) An EXCLUSIVE lock is held on the database file when writing to
+** the database file.
+**
+** (13) A SHARED lock is held on the database file while reading any
+** content out of the database file.
+**
+******************************************************************************/
+
+/*
+** Macros for troubleshooting. Normally turned off
+*/
+#if 0
+int sqlite3PagerTrace=1; /* True to enable tracing */
+#define sqlite3DebugPrintf printf
+#define PAGERTRACE(X) if( sqlite3PagerTrace ){ sqlite3DebugPrintf X; }
+#else
+#define PAGERTRACE(X)
+#endif
+
+/*
+** The following two macros are used within the PAGERTRACE() macros above
+** to print out file-descriptors.
+**
+** PAGERID() takes a pointer to a Pager struct as its argument. The
+** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file
+** struct as its argument.
+*/
+#define PAGERID(p) (SQLITE_PTR_TO_INT(p->fd))
+#define FILEHANDLEID(fd) (SQLITE_PTR_TO_INT(fd))
+
+/*
+** The Pager.eState variable stores the current 'state' of a pager. A
+** pager may be in any one of the seven states shown in the following
+** state diagram.
+**
+** OPEN <------+------+
+** | | |
+** V | |
+** +---------> READER-------+ |
+** | | |
+** | V |
+** |<-------WRITER_LOCKED------> ERROR
+** | | ^
+** | V |
+** |<------WRITER_CACHEMOD-------->|
+** | | |
+** | V |
+** |<-------WRITER_DBMOD---------->|
+** | | |
+** | V |
+** +<------WRITER_FINISHED-------->+
+**
+**
+** List of state transitions and the C [function] that performs each:
+**
+** OPEN -> READER [sqlite3PagerSharedLock]
+** READER -> OPEN [pager_unlock]
+**
+** READER -> WRITER_LOCKED [sqlite3PagerBegin]
+** WRITER_LOCKED -> WRITER_CACHEMOD [pager_open_journal]
+** WRITER_CACHEMOD -> WRITER_DBMOD [syncJournal]
+** WRITER_DBMOD -> WRITER_FINISHED [sqlite3PagerCommitPhaseOne]
+** WRITER_*** -> READER [pager_end_transaction]
+**
+** WRITER_*** -> ERROR [pager_error]
+** ERROR -> OPEN [pager_unlock]
+**
+**
+** OPEN:
+**
+** The pager starts up in this state. Nothing is guaranteed in this
+** state - the file may or may not be locked and the database size is
+** unknown. The database may not be read or written.
+**
+** * No read or write transaction is active.
+** * Any lock, or no lock at all, may be held on the database file.
+** * The dbSize, dbOrigSize and dbFileSize variables may not be trusted.
+**
+** READER:
+**
+** In this state all the requirements for reading the database in
+** rollback (non-WAL) mode are met. Unless the pager is (or recently
+** was) in exclusive-locking mode, a user-level read transaction is
+** open. The database size is known in this state.
+**
+** A connection running with locking_mode=normal enters this state when
+** it opens a read-transaction on the database and returns to state
+** OPEN after the read-transaction is completed. However a connection
+** running in locking_mode=exclusive (including temp databases) remains in
+** this state even after the read-transaction is closed. The only way
+** a locking_mode=exclusive connection can transition from READER to OPEN
+** is via the ERROR state (see below).
+**
+** * A read transaction may be active (but a write-transaction cannot).
+** * A SHARED or greater lock is held on the database file.
+** * The dbSize variable may be trusted (even if a user-level read
+** transaction is not active). The dbOrigSize and dbFileSize variables
+** may not be trusted at this point.
+** * If the database is a WAL database, then the WAL connection is open.
+** * Even if a read-transaction is not open, it is guaranteed that
+** there is no hot-journal in the file-system.
+**
+** WRITER_LOCKED:
+**
+** The pager moves to this state from READER when a write-transaction
+** is first opened on the database. In WRITER_LOCKED state, all locks
+** required to start a write-transaction are held, but no actual
+** modifications to the cache or database have taken place.
+**
+** In rollback mode, a RESERVED or (if the transaction was opened with
+** BEGIN EXCLUSIVE) EXCLUSIVE lock is obtained on the database file when
+** moving to this state, but the journal file is not written to or opened
+** to in this state. If the transaction is committed or rolled back while
+** in WRITER_LOCKED state, all that is required is to unlock the database
+** file.
+**
+** IN WAL mode, WalBeginWriteTransaction() is called to lock the log file.
+** If the connection is running with locking_mode=exclusive, an attempt
+** is made to obtain an EXCLUSIVE lock on the database file.
+**
+** * A write transaction is active.
+** * If the connection is open in rollback-mode, a RESERVED or greater
+** lock is held on the database file.
+** * If the connection is open in WAL-mode, a WAL write transaction
+** is open (i.e. sqlite3WalBeginWriteTransaction() has been successfully
+** called).
+** * The dbSize, dbOrigSize and dbFileSize variables are all valid.
+** * The contents of the pager cache have not been modified.
+** * The journal file may or may not be open.
+** * Nothing (not even the first header) has been written to the journal.
+**
+** WRITER_CACHEMOD:
+**
+** A pager moves from WRITER_LOCKED state to this state when a page is
+** first modified by the upper layer. In rollback mode the journal file
+** is opened (if it is not already open) and a header written to the
+** start of it. The database file on disk has not been modified.
+**
+** * A write transaction is active.
+** * A RESERVED or greater lock is held on the database file.
+** * The journal file is open and the first header has been written
+** to it, but the header has not been synced to disk.
+** * The contents of the page cache have been modified.
+**
+** WRITER_DBMOD:
+**
+** The pager transitions from WRITER_CACHEMOD into WRITER_DBMOD state
+** when it modifies the contents of the database file. WAL connections
+** never enter this state (since they do not modify the database file,
+** just the log file).
+**
+** * A write transaction is active.
+** * An EXCLUSIVE or greater lock is held on the database file.
+** * The journal file is open and the first header has been written
+** and synced to disk.
+** * The contents of the page cache have been modified (and possibly
+** written to disk).
+**
+** WRITER_FINISHED:
+**
+** It is not possible for a WAL connection to enter this state.
+**
+** A rollback-mode pager changes to WRITER_FINISHED state from WRITER_DBMOD
+** state after the entire transaction has been successfully written into the
+** database file. In this state the transaction may be committed simply
+** by finalizing the journal file. Once in WRITER_FINISHED state, it is
+** not possible to modify the database further. At this point, the upper
+** layer must either commit or rollback the transaction.
+**
+** * A write transaction is active.
+** * An EXCLUSIVE or greater lock is held on the database file.
+** * All writing and syncing of journal and database data has finished.
+** If no error occurred, all that remains is to finalize the journal to
+** commit the transaction. If an error did occur, the caller will need
+** to rollback the transaction.
+**
+** ERROR:
+**
+** The ERROR state is entered when an IO or disk-full error (including
+** SQLITE_IOERR_NOMEM) occurs at a point in the code that makes it
+** difficult to be sure that the in-memory pager state (cache contents,
+** db size etc.) are consistent with the contents of the file-system.
+**
+** Temporary pager files may enter the ERROR state, but in-memory pagers
+** cannot.
+**
+** For example, if an IO error occurs while performing a rollback,
+** the contents of the page-cache may be left in an inconsistent state.
+** At this point it would be dangerous to change back to READER state
+** (as usually happens after a rollback). Any subsequent readers might
+** report database corruption (due to the inconsistent cache), and if
+** they upgrade to writers, they may inadvertently corrupt the database
+** file. To avoid this hazard, the pager switches into the ERROR state
+** instead of READER following such an error.
+**
+** Once it has entered the ERROR state, any attempt to use the pager
+** to read or write data returns an error. Eventually, once all
+** outstanding transactions have been abandoned, the pager is able to
+** transition back to OPEN state, discarding the contents of the
+** page-cache and any other in-memory state at the same time. Everything
+** is reloaded from disk (and, if necessary, hot-journal rollback peformed)
+** when a read-transaction is next opened on the pager (transitioning
+** the pager into READER state). At that point the system has recovered
+** from the error.
+**
+** Specifically, the pager jumps into the ERROR state if:
+**
+** 1. An error occurs while attempting a rollback. This happens in
+** function sqlite3PagerRollback().
+**
+** 2. An error occurs while attempting to finalize a journal file
+** following a commit in function sqlite3PagerCommitPhaseTwo().
+**
+** 3. An error occurs while attempting to write to the journal or
+** database file in function pagerStress() in order to free up
+** memory.
+**
+** In other cases, the error is returned to the b-tree layer. The b-tree
+** layer then attempts a rollback operation. If the error condition
+** persists, the pager enters the ERROR state via condition (1) above.
+**
+** Condition (3) is necessary because it can be triggered by a read-only
+** statement executed within a transaction. In this case, if the error
+** code were simply returned to the user, the b-tree layer would not
+** automatically attempt a rollback, as it assumes that an error in a
+** read-only statement cannot leave the pager in an internally inconsistent
+** state.
+**
+** * The Pager.errCode variable is set to something other than SQLITE_OK.
+** * There are one or more outstanding references to pages (after the
+** last reference is dropped the pager should move back to OPEN state).
+** * The pager is not an in-memory pager.
+**
+**
+** Notes:
+**
+** * A pager is never in WRITER_DBMOD or WRITER_FINISHED state if the
+** connection is open in WAL mode. A WAL connection is always in one
+** of the first four states.
+**
+** * Normally, a connection open in exclusive mode is never in PAGER_OPEN
+** state. There are two exceptions: immediately after exclusive-mode has
+** been turned on (and before any read or write transactions are
+** executed), and when the pager is leaving the "error state".
+**
+** * See also: assert_pager_state().
+*/
+#define PAGER_OPEN 0
+#define PAGER_READER 1
+#define PAGER_WRITER_LOCKED 2
+#define PAGER_WRITER_CACHEMOD 3
+#define PAGER_WRITER_DBMOD 4
+#define PAGER_WRITER_FINISHED 5
+#define PAGER_ERROR 6
+
+/*
+** The Pager.eLock variable is almost always set to one of the
+** following locking-states, according to the lock currently held on
+** the database file: NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK.
+** This variable is kept up to date as locks are taken and released by
+** the pagerLockDb() and pagerUnlockDb() wrappers.
+**
+** If the VFS xLock() or xUnlock() returns an error other than SQLITE_BUSY
+** (i.e. one of the SQLITE_IOERR subtypes), it is not clear whether or not
+** the operation was successful. In these circumstances pagerLockDb() and
+** pagerUnlockDb() take a conservative approach - eLock is always updated
+** when unlocking the file, and only updated when locking the file if the
+** VFS call is successful. This way, the Pager.eLock variable may be set
+** to a less exclusive (lower) value than the lock that is actually held
+** at the system level, but it is never set to a more exclusive value.
+**
+** This is usually safe. If an xUnlock fails or appears to fail, there may
+** be a few redundant xLock() calls or a lock may be held for longer than
+** required, but nothing really goes wrong.
+**
+** The exception is when the database file is unlocked as the pager moves
+** from ERROR to OPEN state. At this point there may be a hot-journal file
+** in the file-system that needs to be rolled back (as part of an OPEN->SHARED
+** transition, by the same pager or any other). If the call to xUnlock()
+** fails at this point and the pager is left holding an EXCLUSIVE lock, this
+** can confuse the call to xCheckReservedLock() call made later as part
+** of hot-journal detection.
+**
+** xCheckReservedLock() is defined as returning true "if there is a RESERVED
+** lock held by this process or any others". So xCheckReservedLock may
+** return true because the caller itself is holding an EXCLUSIVE lock (but
+** doesn't know it because of a previous error in xUnlock). If this happens
+** a hot-journal may be mistaken for a journal being created by an active
+** transaction in another process, causing SQLite to read from the database
+** without rolling it back.
+**
+** To work around this, if a call to xUnlock() fails when unlocking the
+** database in the ERROR state, Pager.eLock is set to UNKNOWN_LOCK. It
+** is only changed back to a real locking state after a successful call
+** to xLock(EXCLUSIVE). Also, the code to do the OPEN->SHARED state transition
+** omits the check for a hot-journal if Pager.eLock is set to UNKNOWN_LOCK
+** lock. Instead, it assumes a hot-journal exists and obtains an EXCLUSIVE
+** lock on the database file before attempting to roll it back. See function
+** PagerSharedLock() for more detail.
+**
+** Pager.eLock may only be set to UNKNOWN_LOCK when the pager is in
+** PAGER_OPEN state.
+*/
+#define UNKNOWN_LOCK (EXCLUSIVE_LOCK+1)
+
+/*
+** The maximum allowed sector size. 64KiB. If the xSectorsize() method
+** returns a value larger than this, then MAX_SECTOR_SIZE is used instead.
+** This could conceivably cause corruption following a power failure on
+** such a system. This is currently an undocumented limit.
+*/
+#define MAX_SECTOR_SIZE 0x10000
+
+
+/*
+** An instance of the following structure is allocated for each active
+** savepoint and statement transaction in the system. All such structures
+** are stored in the Pager.aSavepoint[] array, which is allocated and
+** resized using sqlite3Realloc().
+**
+** When a savepoint is created, the PagerSavepoint.iHdrOffset field is
+** set to 0. If a journal-header is written into the main journal while
+** the savepoint is active, then iHdrOffset is set to the byte offset
+** immediately following the last journal record written into the main
+** journal before the journal-header. This is required during savepoint
+** rollback (see pagerPlaybackSavepoint()).
+*/
+typedef struct PagerSavepoint PagerSavepoint;
+struct PagerSavepoint {
+ i64 iOffset; /* Starting offset in main journal */
+ i64 iHdrOffset; /* See above */
+ Bitvec *pInSavepoint; /* Set of pages in this savepoint */
+ Pgno nOrig; /* Original number of pages in file */
+ Pgno iSubRec; /* Index of first record in sub-journal */
+#ifndef SQLITE_OMIT_WAL
+ u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */
+#endif
+};
+
+/*
+** Bits of the Pager.doNotSpill flag. See further description below.
+*/
+#define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */
+#define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */
+#define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */
+
+/*
+** An open page cache is an instance of struct Pager. A description of
+** some of the more important member variables follows:
+**
+** eState
+**
+** The current 'state' of the pager object. See the comment and state
+** diagram above for a description of the pager state.
+**
+** eLock
+**
+** For a real on-disk database, the current lock held on the database file -
+** NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK.
+**
+** For a temporary or in-memory database (neither of which require any
+** locks), this variable is always set to EXCLUSIVE_LOCK. Since such
+** databases always have Pager.exclusiveMode==1, this tricks the pager
+** logic into thinking that it already has all the locks it will ever
+** need (and no reason to release them).
+**
+** In some (obscure) circumstances, this variable may also be set to
+** UNKNOWN_LOCK. See the comment above the #define of UNKNOWN_LOCK for
+** details.
+**
+** changeCountDone
+**
+** This boolean variable is used to make sure that the change-counter
+** (the 4-byte header field at byte offset 24 of the database file) is
+** not updated more often than necessary.
+**
+** It is set to true when the change-counter field is updated, which
+** can only happen if an exclusive lock is held on the database file.
+** It is cleared (set to false) whenever an exclusive lock is
+** relinquished on the database file. Each time a transaction is committed,
+** The changeCountDone flag is inspected. If it is true, the work of
+** updating the change-counter is omitted for the current transaction.
+**
+** This mechanism means that when running in exclusive mode, a connection
+** need only update the change-counter once, for the first transaction
+** committed.
+**
+** setSuper
+**
+** When PagerCommitPhaseOne() is called to commit a transaction, it may
+** (or may not) specify a super-journal name to be written into the
+** journal file before it is synced to disk.
+**
+** Whether or not a journal file contains a super-journal pointer affects
+** the way in which the journal file is finalized after the transaction is
+** committed or rolled back when running in "journal_mode=PERSIST" mode.
+** If a journal file does not contain a super-journal pointer, it is
+** finalized by overwriting the first journal header with zeroes. If
+** it does contain a super-journal pointer the journal file is finalized
+** by truncating it to zero bytes, just as if the connection were
+** running in "journal_mode=truncate" mode.
+**
+** Journal files that contain super-journal pointers cannot be finalized
+** simply by overwriting the first journal-header with zeroes, as the
+** super-journal pointer could interfere with hot-journal rollback of any
+** subsequently interrupted transaction that reuses the journal file.
+**
+** The flag is cleared as soon as the journal file is finalized (either
+** by PagerCommitPhaseTwo or PagerRollback). If an IO error prevents the
+** journal file from being successfully finalized, the setSuper flag
+** is cleared anyway (and the pager will move to ERROR state).
+**
+** doNotSpill
+**
+** This variables control the behavior of cache-spills (calls made by
+** the pcache module to the pagerStress() routine to write cached data
+** to the file-system in order to free up memory).
+**
+** When bits SPILLFLAG_OFF or SPILLFLAG_ROLLBACK of doNotSpill are set,
+** writing to the database from pagerStress() is disabled altogether.
+** The SPILLFLAG_ROLLBACK case is done in a very obscure case that
+** comes up during savepoint rollback that requires the pcache module
+** to allocate a new page to prevent the journal file from being written
+** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF
+** case is a user preference.
+**
+** If the SPILLFLAG_NOSYNC bit is set, writing to the database from
+** pagerStress() is permitted, but syncing the journal file is not.
+** This flag is set by sqlite3PagerWrite() when the file-system sector-size
+** is larger than the database page-size in order to prevent a journal sync
+** from happening in between the journalling of two pages on the same sector.
+**
+** subjInMemory
+**
+** This is a boolean variable. If true, then any required sub-journal
+** is opened as an in-memory journal file. If false, then in-memory
+** sub-journals are only used for in-memory pager files.
+**
+** This variable is updated by the upper layer each time a new
+** write-transaction is opened.
+**
+** dbSize, dbOrigSize, dbFileSize
+**
+** Variable dbSize is set to the number of pages in the database file.
+** It is valid in PAGER_READER and higher states (all states except for
+** OPEN and ERROR).
+**
+** dbSize is set based on the size of the database file, which may be
+** larger than the size of the database (the value stored at offset
+** 28 of the database header by the btree). If the size of the file
+** is not an integer multiple of the page-size, the value stored in
+** dbSize is rounded down (i.e. a 5KB file with 2K page-size has dbSize==2).
+** Except, any file that is greater than 0 bytes in size is considered
+** to have at least one page. (i.e. a 1KB file with 2K page-size leads
+** to dbSize==1).
+**
+** During a write-transaction, if pages with page-numbers greater than
+** dbSize are modified in the cache, dbSize is updated accordingly.
+** Similarly, if the database is truncated using PagerTruncateImage(),
+** dbSize is updated.
+**
+** Variables dbOrigSize and dbFileSize are valid in states
+** PAGER_WRITER_LOCKED and higher. dbOrigSize is a copy of the dbSize
+** variable at the start of the transaction. It is used during rollback,
+** and to determine whether or not pages need to be journalled before
+** being modified.
+**
+** Throughout a write-transaction, dbFileSize contains the size of
+** the file on disk in pages. It is set to a copy of dbSize when the
+** write-transaction is first opened, and updated when VFS calls are made
+** to write or truncate the database file on disk.
+**
+** The only reason the dbFileSize variable is required is to suppress
+** unnecessary calls to xTruncate() after committing a transaction. If,
+** when a transaction is committed, the dbFileSize variable indicates
+** that the database file is larger than the database image (Pager.dbSize),
+** pager_truncate() is called. The pager_truncate() call uses xFilesize()
+** to measure the database file on disk, and then truncates it if required.
+** dbFileSize is not used when rolling back a transaction. In this case
+** pager_truncate() is called unconditionally (which means there may be
+** a call to xFilesize() that is not strictly required). In either case,
+** pager_truncate() may cause the file to become smaller or larger.
+**
+** dbHintSize
+**
+** The dbHintSize variable is used to limit the number of calls made to
+** the VFS xFileControl(FCNTL_SIZE_HINT) method.
+**
+** dbHintSize is set to a copy of the dbSize variable when a
+** write-transaction is opened (at the same time as dbFileSize and
+** dbOrigSize). If the xFileControl(FCNTL_SIZE_HINT) method is called,
+** dbHintSize is increased to the number of pages that correspond to the
+** size-hint passed to the method call. See pager_write_pagelist() for
+** details.
+**
+** errCode
+**
+** The Pager.errCode variable is only ever used in PAGER_ERROR state. It
+** is set to zero in all other states. In PAGER_ERROR state, Pager.errCode
+** is always set to SQLITE_FULL, SQLITE_IOERR or one of the SQLITE_IOERR_XXX
+** sub-codes.
+**
+** syncFlags, walSyncFlags
+**
+** syncFlags is either SQLITE_SYNC_NORMAL (0x02) or SQLITE_SYNC_FULL (0x03).
+** syncFlags is used for rollback mode. walSyncFlags is used for WAL mode
+** and contains the flags used to sync the checkpoint operations in the
+** lower two bits, and sync flags used for transaction commits in the WAL
+** file in bits 0x04 and 0x08. In other words, to get the correct sync flags
+** for checkpoint operations, use (walSyncFlags&0x03) and to get the correct
+** sync flags for transaction commit, use ((walSyncFlags>>2)&0x03). Note
+** that with synchronous=NORMAL in WAL mode, transaction commit is not synced
+** meaning that the 0x04 and 0x08 bits are both zero.
+*/
+struct Pager {
+ sqlite3_vfs *pVfs; /* OS functions to use for IO */
+ u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */
+ u8 journalMode; /* One of the PAGER_JOURNALMODE_* values */
+ u8 useJournal; /* Use a rollback journal on this file */
+ u8 noSync; /* Do not sync the journal if true */
+ u8 fullSync; /* Do extra syncs of the journal for robustness */
+ u8 extraSync; /* sync directory after journal delete */
+ u8 syncFlags; /* SYNC_NORMAL or SYNC_FULL otherwise */
+ u8 walSyncFlags; /* See description above */
+ u8 tempFile; /* zFilename is a temporary or immutable file */
+ u8 noLock; /* Do not lock (except in WAL mode) */
+ u8 readOnly; /* True for a read-only database */
+ u8 memDb; /* True to inhibit all file I/O */
+
+ /**************************************************************************
+ ** The following block contains those class members that change during
+ ** routine operation. Class members not in this block are either fixed
+ ** when the pager is first created or else only change when there is a
+ ** significant mode change (such as changing the page_size, locking_mode,
+ ** or the journal_mode). From another view, these class members describe
+ ** the "state" of the pager, while other class members describe the
+ ** "configuration" of the pager.
+ */
+ u8 eState; /* Pager state (OPEN, READER, WRITER_LOCKED..) */
+ u8 eLock; /* Current lock held on database file */
+ u8 changeCountDone; /* Set after incrementing the change-counter */
+ u8 setSuper; /* Super-jrnl name is written into jrnl */
+ u8 doNotSpill; /* Do not spill the cache when non-zero */
+ u8 subjInMemory; /* True to use in-memory sub-journals */
+ u8 bUseFetch; /* True to use xFetch() */
+ u8 hasHeldSharedLock; /* True if a shared lock has ever been held */
+ Pgno dbSize; /* Number of pages in the database */
+ Pgno dbOrigSize; /* dbSize before the current transaction */
+ Pgno dbFileSize; /* Number of pages in the database file */
+ Pgno dbHintSize; /* Value passed to FCNTL_SIZE_HINT call */
+ int errCode; /* One of several kinds of errors */
+ int nRec; /* Pages journalled since last j-header written */
+ u32 cksumInit; /* Quasi-random value added to every checksum */
+ u32 nSubRec; /* Number of records written to sub-journal */
+ Bitvec *pInJournal; /* One bit for each page in the database file */
+ sqlite3_file *fd; /* File descriptor for database */
+ sqlite3_file *jfd; /* File descriptor for main journal */
+ sqlite3_file *sjfd; /* File descriptor for sub-journal */
+ i64 journalOff; /* Current write offset in the journal file */
+ i64 journalHdr; /* Byte offset to previous journal header */
+ sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */
+ PagerSavepoint *aSavepoint; /* Array of active savepoints */
+ int nSavepoint; /* Number of elements in aSavepoint[] */
+ u32 iDataVersion; /* Changes whenever database content changes */
+ char dbFileVers[16]; /* Changes whenever database file changes */
+
+ int nMmapOut; /* Number of mmap pages currently outstanding */
+ sqlite3_int64 szMmap; /* Desired maximum mmap size */
+ PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */
+ /*
+ ** End of the routinely-changing class members
+ ***************************************************************************/
+
+ u16 nExtra; /* Add this many bytes to each in-memory page */
+ i16 nReserve; /* Number of unused bytes at end of each page */
+ u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */
+ u32 sectorSize; /* Assumed sector size during rollback */
+ int pageSize; /* Number of bytes in a page */
+ Pgno mxPgno; /* Maximum allowed size of the database */
+ i64 journalSizeLimit; /* Size limit for persistent journal files */
+ char *zFilename; /* Name of the database file */
+ char *zJournal; /* Name of the journal file */
+ int (*xBusyHandler)(void*); /* Function to call when busy */
+ void *pBusyHandlerArg; /* Context argument for xBusyHandler */
+ int aStat[4]; /* Total cache hits, misses, writes, spills */
+#ifdef SQLITE_TEST
+ int nRead; /* Database pages read */
+#endif
+ void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */
+ int (*xGet)(Pager*,Pgno,DbPage**,int); /* Routine to fetch a patch */
+ char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */
+ PCache *pPCache; /* Pointer to page cache object */
+#ifndef SQLITE_OMIT_WAL
+ Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */
+ char *zWal; /* File name for write-ahead log */
+#endif
+};
+
+/*
+** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains
+** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS
+** or CACHE_WRITE to sqlite3_db_status().
+*/
+#define PAGER_STAT_HIT 0
+#define PAGER_STAT_MISS 1
+#define PAGER_STAT_WRITE 2
+#define PAGER_STAT_SPILL 3
+
+/*
+** The following global variables hold counters used for
+** testing purposes only. These variables do not exist in
+** a non-testing build. These variables are not thread-safe.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */
+SQLITE_API int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */
+SQLITE_API int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */
+# define PAGER_INCR(v) v++
+#else
+# define PAGER_INCR(v)
+#endif
+
+
+
+/*
+** Journal files begin with the following magic string. The data
+** was obtained from /dev/random. It is used only as a sanity check.
+**
+** Since version 2.8.0, the journal format contains additional sanity
+** checking information. If the power fails while the journal is being
+** written, semi-random garbage data might appear in the journal
+** file after power is restored. If an attempt is then made
+** to roll the journal back, the database could be corrupted. The additional
+** sanity checking data is an attempt to discover the garbage in the
+** journal and ignore it.
+**
+** The sanity checking information for the new journal format consists
+** of a 32-bit checksum on each page of data. The checksum covers both
+** the page number and the pPager->pageSize bytes of data for the page.
+** This cksum is initialized to a 32-bit random value that appears in the
+** journal file right after the header. The random initializer is important,
+** because garbage data that appears at the end of a journal is likely
+** data that was once in other files that have now been deleted. If the
+** garbage data came from an obsolete journal file, the checksums might
+** be correct. But by initializing the checksum to random value which
+** is different for every journal, we minimize that risk.
+*/
+static const unsigned char aJournalMagic[] = {
+ 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7,
+};
+
+/*
+** The size of the of each page record in the journal is given by
+** the following macro.
+*/
+#define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8)
+
+/*
+** The journal header size for this pager. This is usually the same
+** size as a single disk sector. See also setSectorSize().
+*/
+#define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize)
+
+/*
+** The macro MEMDB is true if we are dealing with an in-memory database.
+** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set,
+** the value of MEMDB will be a constant and the compiler will optimize
+** out code that would never execute.
+*/
+#ifdef SQLITE_OMIT_MEMORYDB
+# define MEMDB 0
+#else
+# define MEMDB pPager->memDb
+#endif
+
+/*
+** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch
+** interfaces to access the database using memory-mapped I/O.
+*/
+#if SQLITE_MAX_MMAP_SIZE>0
+# define USEFETCH(x) ((x)->bUseFetch)
+#else
+# define USEFETCH(x) 0
+#endif
+
+/*
+** The argument to this macro is a file descriptor (type sqlite3_file*).
+** Return 0 if it is not open, or non-zero (but not 1) if it is.
+**
+** This is so that expressions can be written as:
+**
+** if( isOpen(pPager->jfd) ){ ...
+**
+** instead of
+**
+** if( pPager->jfd->pMethods ){ ...
+*/
+#define isOpen(pFd) ((pFd)->pMethods!=0)
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+/*
+** Return true if page pgno can be read directly from the database file
+** by the b-tree layer. This is the case if:
+**
+** * the database file is open,
+** * there are no dirty pages in the cache, and
+** * the desired page is not currently in the wal file.
+*/
+SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){
+ if( pPager->fd->pMethods==0 ) return 0;
+ if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0;
+#ifndef SQLITE_OMIT_WAL
+ if( pPager->pWal ){
+ u32 iRead = 0;
+ int rc;
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
+ return (rc==SQLITE_OK && iRead==0);
+ }
+#endif
+ return 1;
+}
+#endif
+
+#ifndef SQLITE_OMIT_WAL
+# define pagerUseWal(x) ((x)->pWal!=0)
+#else
+# define pagerUseWal(x) 0
+# define pagerRollbackWal(x) 0
+# define pagerWalFrames(v,w,x,y) 0
+# define pagerOpenWalIfPresent(z) SQLITE_OK
+# define pagerBeginReadTransaction(z) SQLITE_OK
+#endif
+
+#ifndef NDEBUG
+/*
+** Usage:
+**
+** assert( assert_pager_state(pPager) );
+**
+** This function runs many asserts to try to find inconsistencies in
+** the internal state of the Pager object.
+*/
+static int assert_pager_state(Pager *p){
+ Pager *pPager = p;
+
+ /* State must be valid. */
+ assert( p->eState==PAGER_OPEN
+ || p->eState==PAGER_READER
+ || p->eState==PAGER_WRITER_LOCKED
+ || p->eState==PAGER_WRITER_CACHEMOD
+ || p->eState==PAGER_WRITER_DBMOD
+ || p->eState==PAGER_WRITER_FINISHED
+ || p->eState==PAGER_ERROR
+ );
+
+ /* Regardless of the current state, a temp-file connection always behaves
+ ** as if it has an exclusive lock on the database file. It never updates
+ ** the change-counter field, so the changeCountDone flag is always set.
+ */
+ assert( p->tempFile==0 || p->eLock==EXCLUSIVE_LOCK );
+ assert( p->tempFile==0 || pPager->changeCountDone );
+
+ /* If the useJournal flag is clear, the journal-mode must be "OFF".
+ ** And if the journal-mode is "OFF", the journal file must not be open.
+ */
+ assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->useJournal );
+ assert( p->journalMode!=PAGER_JOURNALMODE_OFF || !isOpen(p->jfd) );
+
+ /* Check that MEMDB implies noSync. And an in-memory journal. Since
+ ** this means an in-memory pager performs no IO at all, it cannot encounter
+ ** either SQLITE_IOERR or SQLITE_FULL during rollback or while finalizing
+ ** a journal file. (although the in-memory journal implementation may
+ ** return SQLITE_IOERR_NOMEM while the journal file is being written). It
+ ** is therefore not possible for an in-memory pager to enter the ERROR
+ ** state.
+ */
+ if( MEMDB ){
+ assert( !isOpen(p->fd) );
+ assert( p->noSync );
+ assert( p->journalMode==PAGER_JOURNALMODE_OFF
+ || p->journalMode==PAGER_JOURNALMODE_MEMORY
+ );
+ assert( p->eState!=PAGER_ERROR && p->eState!=PAGER_OPEN );
+ assert( pagerUseWal(p)==0 );
+ }
+
+ /* If changeCountDone is set, a RESERVED lock or greater must be held
+ ** on the file.
+ */
+ assert( pPager->changeCountDone==0 || pPager->eLock>=RESERVED_LOCK );
+ assert( p->eLock!=PENDING_LOCK );
+
+ switch( p->eState ){
+ case PAGER_OPEN:
+ assert( !MEMDB );
+ assert( pPager->errCode==SQLITE_OK );
+ assert( sqlite3PcacheRefCount(pPager->pPCache)==0 || pPager->tempFile );
+ break;
+
+ case PAGER_READER:
+ assert( pPager->errCode==SQLITE_OK );
+ assert( p->eLock!=UNKNOWN_LOCK );
+ assert( p->eLock>=SHARED_LOCK );
+ break;
+
+ case PAGER_WRITER_LOCKED:
+ assert( p->eLock!=UNKNOWN_LOCK );
+ assert( pPager->errCode==SQLITE_OK );
+ if( !pagerUseWal(pPager) ){
+ assert( p->eLock>=RESERVED_LOCK );
+ }
+ assert( pPager->dbSize==pPager->dbOrigSize );
+ assert( pPager->dbOrigSize==pPager->dbFileSize );
+ assert( pPager->dbOrigSize==pPager->dbHintSize );
+ assert( pPager->setSuper==0 );
+ break;
+
+ case PAGER_WRITER_CACHEMOD:
+ assert( p->eLock!=UNKNOWN_LOCK );
+ assert( pPager->errCode==SQLITE_OK );
+ if( !pagerUseWal(pPager) ){
+ /* It is possible that if journal_mode=wal here that neither the
+ ** journal file nor the WAL file are open. This happens during
+ ** a rollback transaction that switches from journal_mode=off
+ ** to journal_mode=wal.
+ */
+ assert( p->eLock>=RESERVED_LOCK );
+ assert( isOpen(p->jfd)
+ || p->journalMode==PAGER_JOURNALMODE_OFF
+ || p->journalMode==PAGER_JOURNALMODE_WAL
+ );
+ }
+ assert( pPager->dbOrigSize==pPager->dbFileSize );
+ assert( pPager->dbOrigSize==pPager->dbHintSize );
+ break;
+
+ case PAGER_WRITER_DBMOD:
+ assert( p->eLock==EXCLUSIVE_LOCK );
+ assert( pPager->errCode==SQLITE_OK );
+ assert( !pagerUseWal(pPager) );
+ assert( p->eLock>=EXCLUSIVE_LOCK );
+ assert( isOpen(p->jfd)
+ || p->journalMode==PAGER_JOURNALMODE_OFF
+ || p->journalMode==PAGER_JOURNALMODE_WAL
+ || (sqlite3OsDeviceCharacteristics(p->fd)&SQLITE_IOCAP_BATCH_ATOMIC)
+ );
+ assert( pPager->dbOrigSize<=pPager->dbHintSize );
+ break;
+
+ case PAGER_WRITER_FINISHED:
+ assert( p->eLock==EXCLUSIVE_LOCK );
+ assert( pPager->errCode==SQLITE_OK );
+ assert( !pagerUseWal(pPager) );
+ assert( isOpen(p->jfd)
+ || p->journalMode==PAGER_JOURNALMODE_OFF
+ || p->journalMode==PAGER_JOURNALMODE_WAL
+ || (sqlite3OsDeviceCharacteristics(p->fd)&SQLITE_IOCAP_BATCH_ATOMIC)
+ );
+ break;
+
+ case PAGER_ERROR:
+ /* There must be at least one outstanding reference to the pager if
+ ** in ERROR state. Otherwise the pager should have already dropped
+ ** back to OPEN state.
+ */
+ assert( pPager->errCode!=SQLITE_OK );
+ assert( sqlite3PcacheRefCount(pPager->pPCache)>0 || pPager->tempFile );
+ break;
+ }
+
+ return 1;
+}
+#endif /* ifndef NDEBUG */
+
+#ifdef SQLITE_DEBUG
+/*
+** Return a pointer to a human readable string in a static buffer
+** containing the state of the Pager object passed as an argument. This
+** is intended to be used within debuggers. For example, as an alternative
+** to "print *pPager" in gdb:
+**
+** (gdb) printf "%s", print_pager_state(pPager)
+**
+** This routine has external linkage in order to suppress compiler warnings
+** about an unused function. It is enclosed within SQLITE_DEBUG and so does
+** not appear in normal builds.
+*/
+char *print_pager_state(Pager *p){
+ static char zRet[1024];
+
+ sqlite3_snprintf(1024, zRet,
+ "Filename: %s\n"
+ "State: %s errCode=%d\n"
+ "Lock: %s\n"
+ "Locking mode: locking_mode=%s\n"
+ "Journal mode: journal_mode=%s\n"
+ "Backing store: tempFile=%d memDb=%d useJournal=%d\n"
+ "Journal: journalOff=%lld journalHdr=%lld\n"
+ "Size: dbsize=%d dbOrigSize=%d dbFileSize=%d\n"
+ , p->zFilename
+ , p->eState==PAGER_OPEN ? "OPEN" :
+ p->eState==PAGER_READER ? "READER" :
+ p->eState==PAGER_WRITER_LOCKED ? "WRITER_LOCKED" :
+ p->eState==PAGER_WRITER_CACHEMOD ? "WRITER_CACHEMOD" :
+ p->eState==PAGER_WRITER_DBMOD ? "WRITER_DBMOD" :
+ p->eState==PAGER_WRITER_FINISHED ? "WRITER_FINISHED" :
+ p->eState==PAGER_ERROR ? "ERROR" : "?error?"
+ , (int)p->errCode
+ , p->eLock==NO_LOCK ? "NO_LOCK" :
+ p->eLock==RESERVED_LOCK ? "RESERVED" :
+ p->eLock==EXCLUSIVE_LOCK ? "EXCLUSIVE" :
+ p->eLock==SHARED_LOCK ? "SHARED" :
+ p->eLock==UNKNOWN_LOCK ? "UNKNOWN" : "?error?"
+ , p->exclusiveMode ? "exclusive" : "normal"
+ , p->journalMode==PAGER_JOURNALMODE_MEMORY ? "memory" :
+ p->journalMode==PAGER_JOURNALMODE_OFF ? "off" :
+ p->journalMode==PAGER_JOURNALMODE_DELETE ? "delete" :
+ p->journalMode==PAGER_JOURNALMODE_PERSIST ? "persist" :
+ p->journalMode==PAGER_JOURNALMODE_TRUNCATE ? "truncate" :
+ p->journalMode==PAGER_JOURNALMODE_WAL ? "wal" : "?error?"
+ , (int)p->tempFile, (int)p->memDb, (int)p->useJournal
+ , p->journalOff, p->journalHdr
+ , (int)p->dbSize, (int)p->dbOrigSize, (int)p->dbFileSize
+ );
+
+ return zRet;
+}
+#endif
+
+/* Forward references to the various page getters */
+static int getPageNormal(Pager*,Pgno,DbPage**,int);
+static int getPageError(Pager*,Pgno,DbPage**,int);
+#if SQLITE_MAX_MMAP_SIZE>0
+static int getPageMMap(Pager*,Pgno,DbPage**,int);
+#endif
+
+/*
+** Set the Pager.xGet method for the appropriate routine used to fetch
+** content from the pager.
+*/
+static void setGetterMethod(Pager *pPager){
+ if( pPager->errCode ){
+ pPager->xGet = getPageError;
+#if SQLITE_MAX_MMAP_SIZE>0
+ }else if( USEFETCH(pPager) ){
+ pPager->xGet = getPageMMap;
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+ }else{
+ pPager->xGet = getPageNormal;
+ }
+}
+
+/*
+** Return true if it is necessary to write page *pPg into the sub-journal.
+** A page needs to be written into the sub-journal if there exists one
+** or more open savepoints for which:
+**
+** * The page-number is less than or equal to PagerSavepoint.nOrig, and
+** * The bit corresponding to the page-number is not set in
+** PagerSavepoint.pInSavepoint.
+*/
+static int subjRequiresPage(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ PagerSavepoint *p;
+ Pgno pgno = pPg->pgno;
+ int i;
+ for(i=0; i<pPager->nSavepoint; i++){
+ p = &pPager->aSavepoint[i];
+ if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Return true if the page is already in the journal file.
+*/
+static int pageInJournal(Pager *pPager, PgHdr *pPg){
+ return sqlite3BitvecTest(pPager->pInJournal, pPg->pgno);
+}
+#endif
+
+/*
+** Read a 32-bit integer from the given file descriptor. Store the integer
+** that is read in *pRes. Return SQLITE_OK if everything worked, or an
+** error code is something goes wrong.
+**
+** All values are stored on disk as big-endian.
+*/
+static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){
+ unsigned char ac[4];
+ int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset);
+ if( rc==SQLITE_OK ){
+ *pRes = sqlite3Get4byte(ac);
+ }
+ return rc;
+}
+
+/*
+** Write a 32-bit integer into a string buffer in big-endian byte order.
+*/
+#define put32bits(A,B) sqlite3Put4byte((u8*)A,B)
+
+
+/*
+** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK
+** on success or an error code is something goes wrong.
+*/
+static int write32bits(sqlite3_file *fd, i64 offset, u32 val){
+ char ac[4];
+ put32bits(ac, val);
+ return sqlite3OsWrite(fd, ac, 4, offset);
+}
+
+/*
+** Unlock the database file to level eLock, which must be either NO_LOCK
+** or SHARED_LOCK. Regardless of whether or not the call to xUnlock()
+** succeeds, set the Pager.eLock variable to match the (attempted) new lock.
+**
+** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is
+** called, do not modify it. See the comment above the #define of
+** UNKNOWN_LOCK for an explanation of this.
+*/
+static int pagerUnlockDb(Pager *pPager, int eLock){
+ int rc = SQLITE_OK;
+
+ assert( !pPager->exclusiveMode || pPager->eLock==eLock );
+ assert( eLock==NO_LOCK || eLock==SHARED_LOCK );
+ assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 );
+ if( isOpen(pPager->fd) ){
+ assert( pPager->eLock>=eLock );
+ rc = pPager->noLock ? SQLITE_OK : sqlite3OsUnlock(pPager->fd, eLock);
+ if( pPager->eLock!=UNKNOWN_LOCK ){
+ pPager->eLock = (u8)eLock;
+ }
+ IOTRACE(("UNLOCK %p %d\n", pPager, eLock))
+ }
+ pPager->changeCountDone = pPager->tempFile; /* ticket fb3b3024ea238d5c */
+ return rc;
+}
+
+/*
+** Lock the database file to level eLock, which must be either SHARED_LOCK,
+** RESERVED_LOCK or EXCLUSIVE_LOCK. If the caller is successful, set the
+** Pager.eLock variable to the new locking state.
+**
+** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is
+** called, do not modify it unless the new locking state is EXCLUSIVE_LOCK.
+** See the comment above the #define of UNKNOWN_LOCK for an explanation
+** of this.
+*/
+static int pagerLockDb(Pager *pPager, int eLock){
+ int rc = SQLITE_OK;
+
+ assert( eLock==SHARED_LOCK || eLock==RESERVED_LOCK || eLock==EXCLUSIVE_LOCK );
+ if( pPager->eLock<eLock || pPager->eLock==UNKNOWN_LOCK ){
+ rc = pPager->noLock ? SQLITE_OK : sqlite3OsLock(pPager->fd, eLock);
+ if( rc==SQLITE_OK && (pPager->eLock!=UNKNOWN_LOCK||eLock==EXCLUSIVE_LOCK) ){
+ pPager->eLock = (u8)eLock;
+ IOTRACE(("LOCK %p %d\n", pPager, eLock))
+ }
+ }
+ return rc;
+}
+
+/*
+** This function determines whether or not the atomic-write or
+** atomic-batch-write optimizations can be used with this pager. The
+** atomic-write optimization can be used if:
+**
+** (a) the value returned by OsDeviceCharacteristics() indicates that
+** a database page may be written atomically, and
+** (b) the value returned by OsSectorSize() is less than or equal
+** to the page size.
+**
+** If it can be used, then the value returned is the size of the journal
+** file when it contains rollback data for exactly one page.
+**
+** The atomic-batch-write optimization can be used if OsDeviceCharacteristics()
+** returns a value with the SQLITE_IOCAP_BATCH_ATOMIC bit set. -1 is
+** returned in this case.
+**
+** If neither optimization can be used, 0 is returned.
+*/
+static int jrnlBufferSize(Pager *pPager){
+ assert( !MEMDB );
+
+#if defined(SQLITE_ENABLE_ATOMIC_WRITE) \
+ || defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+ int dc; /* Device characteristics */
+
+ assert( isOpen(pPager->fd) );
+ dc = sqlite3OsDeviceCharacteristics(pPager->fd);
+#else
+ UNUSED_PARAMETER(pPager);
+#endif
+
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ if( pPager->dbSize>0 && (dc&SQLITE_IOCAP_BATCH_ATOMIC) ){
+ return -1;
+ }
+#endif
+
+#ifdef SQLITE_ENABLE_ATOMIC_WRITE
+ {
+ int nSector = pPager->sectorSize;
+ int szPage = pPager->pageSize;
+
+ assert(SQLITE_IOCAP_ATOMIC512==(512>>8));
+ assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8));
+ if( 0==(dc&(SQLITE_IOCAP_ATOMIC|(szPage>>8)) || nSector>szPage) ){
+ return 0;
+ }
+ }
+
+ return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager);
+#endif
+
+ return 0;
+}
+
+/*
+** If SQLITE_CHECK_PAGES is defined then we do some sanity checking
+** on the cache using a hash function. This is used for testing
+** and debugging only.
+*/
+#ifdef SQLITE_CHECK_PAGES
+/*
+** Return a 32-bit hash of the page data for pPage.
+*/
+static u32 pager_datahash(int nByte, unsigned char *pData){
+ u32 hash = 0;
+ int i;
+ for(i=0; i<nByte; i++){
+ hash = (hash*1039) + pData[i];
+ }
+ return hash;
+}
+static u32 pager_pagehash(PgHdr *pPage){
+ return pager_datahash(pPage->pPager->pageSize, (unsigned char *)pPage->pData);
+}
+static void pager_set_pagehash(PgHdr *pPage){
+ pPage->pageHash = pager_pagehash(pPage);
+}
+
+/*
+** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES
+** is defined, and NDEBUG is not defined, an assert() statement checks
+** that the page is either dirty or still matches the calculated page-hash.
+*/
+#define CHECK_PAGE(x) checkPage(x)
+static void checkPage(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ assert( pPager->eState!=PAGER_ERROR );
+ assert( (pPg->flags&PGHDR_DIRTY) || pPg->pageHash==pager_pagehash(pPg) );
+}
+
+#else
+#define pager_datahash(X,Y) 0
+#define pager_pagehash(X) 0
+#define pager_set_pagehash(X)
+#define CHECK_PAGE(x)
+#endif /* SQLITE_CHECK_PAGES */
+
+/*
+** When this is called the journal file for pager pPager must be open.
+** This function attempts to read a super-journal file name from the
+** end of the file and, if successful, copies it into memory supplied
+** by the caller. See comments above writeSuperJournal() for the format
+** used to store a super-journal file name at the end of a journal file.
+**
+** zSuper must point to a buffer of at least nSuper bytes allocated by
+** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is
+** enough space to write the super-journal name). If the super-journal
+** name in the journal is longer than nSuper bytes (including a
+** nul-terminator), then this is handled as if no super-journal name
+** were present in the journal.
+**
+** If a super-journal file name is present at the end of the journal
+** file, then it is copied into the buffer pointed to by zSuper. A
+** nul-terminator byte is appended to the buffer following the
+** super-journal file name.
+**
+** If it is determined that no super-journal file name is present
+** zSuper[0] is set to 0 and SQLITE_OK returned.
+**
+** If an error occurs while reading from the journal file, an SQLite
+** error code is returned.
+*/
+static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u32 nSuper){
+ int rc; /* Return code */
+ u32 len; /* Length in bytes of super-journal name */
+ i64 szJ; /* Total size in bytes of journal file pJrnl */
+ u32 cksum; /* MJ checksum value read from journal */
+ u32 u; /* Unsigned loop counter */
+ unsigned char aMagic[8]; /* A buffer to hold the magic header */
+ zSuper[0] = '\0';
+
+ if( SQLITE_OK!=(rc = sqlite3OsFileSize(pJrnl, &szJ))
+ || szJ<16
+ || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len))
+ || len>=nSuper
+ || len>szJ-16
+ || len==0
+ || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum))
+ || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8))
+ || memcmp(aMagic, aJournalMagic, 8)
+ || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, zSuper, len, szJ-16-len))
+ ){
+ return rc;
+ }
+
+ /* See if the checksum matches the super-journal name */
+ for(u=0; u<len; u++){
+ cksum -= zSuper[u];
+ }
+ if( cksum ){
+ /* If the checksum doesn't add up, then one or more of the disk sectors
+ ** containing the super-journal filename is corrupted. This means
+ ** definitely roll back, so just return SQLITE_OK and report a (nul)
+ ** super-journal filename.
+ */
+ len = 0;
+ }
+ zSuper[len] = '\0';
+ zSuper[len+1] = '\0';
+
+ return SQLITE_OK;
+}
+
+/*
+** Return the offset of the sector boundary at or immediately
+** following the value in pPager->journalOff, assuming a sector
+** size of pPager->sectorSize bytes.
+**
+** i.e for a sector size of 512:
+**
+** Pager.journalOff Return value
+** ---------------------------------------
+** 0 0
+** 512 512
+** 100 512
+** 2000 2048
+**
+*/
+static i64 journalHdrOffset(Pager *pPager){
+ i64 offset = 0;
+ i64 c = pPager->journalOff;
+ if( c ){
+ offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager);
+ }
+ assert( offset%JOURNAL_HDR_SZ(pPager)==0 );
+ assert( offset>=c );
+ assert( (offset-c)<JOURNAL_HDR_SZ(pPager) );
+ return offset;
+}
+
+/*
+** The journal file must be open when this function is called.
+**
+** This function is a no-op if the journal file has not been written to
+** within the current transaction (i.e. if Pager.journalOff==0).
+**
+** If doTruncate is non-zero or the Pager.journalSizeLimit variable is
+** set to 0, then truncate the journal file to zero bytes in size. Otherwise,
+** zero the 28-byte header at the start of the journal file. In either case,
+** if the pager is not in no-sync mode, sync the journal file immediately
+** after writing or truncating it.
+**
+** If Pager.journalSizeLimit is set to a positive, non-zero value, and
+** following the truncation or zeroing described above the size of the
+** journal file in bytes is larger than this value, then truncate the
+** journal file to Pager.journalSizeLimit bytes. The journal file does
+** not need to be synced following this operation.
+**
+** If an IO error occurs, abandon processing and return the IO error code.
+** Otherwise, return SQLITE_OK.
+*/
+static int zeroJournalHdr(Pager *pPager, int doTruncate){
+ int rc = SQLITE_OK; /* Return code */
+ assert( isOpen(pPager->jfd) );
+ assert( !sqlite3JournalIsInMemory(pPager->jfd) );
+ if( pPager->journalOff ){
+ const i64 iLimit = pPager->journalSizeLimit; /* Local cache of jsl */
+
+ IOTRACE(("JZEROHDR %p\n", pPager))
+ if( doTruncate || iLimit==0 ){
+ rc = sqlite3OsTruncate(pPager->jfd, 0);
+ }else{
+ static const char zeroHdr[28] = {0};
+ rc = sqlite3OsWrite(pPager->jfd, zeroHdr, sizeof(zeroHdr), 0);
+ }
+ if( rc==SQLITE_OK && !pPager->noSync ){
+ rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_DATAONLY|pPager->syncFlags);
+ }
+
+ /* At this point the transaction is committed but the write lock
+ ** is still held on the file. If there is a size limit configured for
+ ** the persistent journal and the journal file currently consumes more
+ ** space than that limit allows for, truncate it now. There is no need
+ ** to sync the file following this operation.
+ */
+ if( rc==SQLITE_OK && iLimit>0 ){
+ i64 sz;
+ rc = sqlite3OsFileSize(pPager->jfd, &sz);
+ if( rc==SQLITE_OK && sz>iLimit ){
+ rc = sqlite3OsTruncate(pPager->jfd, iLimit);
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** The journal file must be open when this routine is called. A journal
+** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the
+** current location.
+**
+** The format for the journal header is as follows:
+** - 8 bytes: Magic identifying journal format.
+** - 4 bytes: Number of records in journal, or -1 no-sync mode is on.
+** - 4 bytes: Random number used for page hash.
+** - 4 bytes: Initial database page count.
+** - 4 bytes: Sector size used by the process that wrote this journal.
+** - 4 bytes: Database page size.
+**
+** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space.
+*/
+static int writeJournalHdr(Pager *pPager){
+ int rc = SQLITE_OK; /* Return code */
+ char *zHeader = pPager->pTmpSpace; /* Temporary space used to build header */
+ u32 nHeader = (u32)pPager->pageSize;/* Size of buffer pointed to by zHeader */
+ u32 nWrite; /* Bytes of header sector written */
+ int ii; /* Loop counter */
+
+ assert( isOpen(pPager->jfd) ); /* Journal file must be open. */
+
+ if( nHeader>JOURNAL_HDR_SZ(pPager) ){
+ nHeader = JOURNAL_HDR_SZ(pPager);
+ }
+
+ /* If there are active savepoints and any of them were created
+ ** since the most recent journal header was written, update the
+ ** PagerSavepoint.iHdrOffset fields now.
+ */
+ for(ii=0; ii<pPager->nSavepoint; ii++){
+ if( pPager->aSavepoint[ii].iHdrOffset==0 ){
+ pPager->aSavepoint[ii].iHdrOffset = pPager->journalOff;
+ }
+ }
+
+ pPager->journalHdr = pPager->journalOff = journalHdrOffset(pPager);
+
+ /*
+ ** Write the nRec Field - the number of page records that follow this
+ ** journal header. Normally, zero is written to this value at this time.
+ ** After the records are added to the journal (and the journal synced,
+ ** if in full-sync mode), the zero is overwritten with the true number
+ ** of records (see syncJournal()).
+ **
+ ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When
+ ** reading the journal this value tells SQLite to assume that the
+ ** rest of the journal file contains valid page records. This assumption
+ ** is dangerous, as if a failure occurred whilst writing to the journal
+ ** file it may contain some garbage data. There are two scenarios
+ ** where this risk can be ignored:
+ **
+ ** * When the pager is in no-sync mode. Corruption can follow a
+ ** power failure in this case anyway.
+ **
+ ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees
+ ** that garbage data is never appended to the journal file.
+ */
+ assert( isOpen(pPager->fd) || pPager->noSync );
+ if( pPager->noSync || (pPager->journalMode==PAGER_JOURNALMODE_MEMORY)
+ || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND)
+ ){
+ memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic));
+ put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff);
+ }else{
+ memset(zHeader, 0, sizeof(aJournalMagic)+4);
+ }
+
+ /* The random check-hash initializer */
+ sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit);
+ /* The initial database size */
+ put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize);
+ /* The assumed sector size for this process */
+ put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize);
+
+ /* The page size */
+ put32bits(&zHeader[sizeof(aJournalMagic)+16], pPager->pageSize);
+
+ /* Initializing the tail of the buffer is not necessary. Everything
+ ** works find if the following memset() is omitted. But initializing
+ ** the memory prevents valgrind from complaining, so we are willing to
+ ** take the performance hit.
+ */
+ memset(&zHeader[sizeof(aJournalMagic)+20], 0,
+ nHeader-(sizeof(aJournalMagic)+20));
+
+ /* In theory, it is only necessary to write the 28 bytes that the
+ ** journal header consumes to the journal file here. Then increment the
+ ** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next
+ ** record is written to the following sector (leaving a gap in the file
+ ** that will be implicitly filled in by the OS).
+ **
+ ** However it has been discovered that on some systems this pattern can
+ ** be significantly slower than contiguously writing data to the file,
+ ** even if that means explicitly writing data to the block of
+ ** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what
+ ** is done.
+ **
+ ** The loop is required here in case the sector-size is larger than the
+ ** database page size. Since the zHeader buffer is only Pager.pageSize
+ ** bytes in size, more than one call to sqlite3OsWrite() may be required
+ ** to populate the entire journal header sector.
+ */
+ for(nWrite=0; rc==SQLITE_OK&&nWrite<JOURNAL_HDR_SZ(pPager); nWrite+=nHeader){
+ IOTRACE(("JHDR %p %lld %d\n", pPager, pPager->journalHdr, nHeader))
+ rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff);
+ assert( pPager->journalHdr <= pPager->journalOff );
+ pPager->journalOff += nHeader;
+ }
+
+ return rc;
+}
+
+/*
+** The journal file must be open when this is called. A journal header file
+** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal
+** file. The current location in the journal file is given by
+** pPager->journalOff. See comments above function writeJournalHdr() for
+** a description of the journal header format.
+**
+** If the header is read successfully, *pNRec is set to the number of
+** page records following this header and *pDbSize is set to the size of the
+** database before the transaction began, in pages. Also, pPager->cksumInit
+** is set to the value read from the journal header. SQLITE_OK is returned
+** in this case.
+**
+** If the journal header file appears to be corrupted, SQLITE_DONE is
+** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes
+** cannot be read from the journal file an error code is returned.
+*/
+static int readJournalHdr(
+ Pager *pPager, /* Pager object */
+ int isHot,
+ i64 journalSize, /* Size of the open journal file in bytes */
+ u32 *pNRec, /* OUT: Value read from the nRec field */
+ u32 *pDbSize /* OUT: Value of original database size field */
+){
+ int rc; /* Return code */
+ unsigned char aMagic[8]; /* A buffer to hold the magic header */
+ i64 iHdrOff; /* Offset of journal header being read */
+
+ assert( isOpen(pPager->jfd) ); /* Journal file must be open. */
+
+ /* Advance Pager.journalOff to the start of the next sector. If the
+ ** journal file is too small for there to be a header stored at this
+ ** point, return SQLITE_DONE.
+ */
+ pPager->journalOff = journalHdrOffset(pPager);
+ if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){
+ return SQLITE_DONE;
+ }
+ iHdrOff = pPager->journalOff;
+
+ /* Read in the first 8 bytes of the journal header. If they do not match
+ ** the magic string found at the start of each journal header, return
+ ** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise,
+ ** proceed.
+ */
+ if( isHot || iHdrOff!=pPager->journalHdr ){
+ rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), iHdrOff);
+ if( rc ){
+ return rc;
+ }
+ if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){
+ return SQLITE_DONE;
+ }
+ }
+
+ /* Read the first three 32-bit fields of the journal header: The nRec
+ ** field, the checksum-initializer and the database size at the start
+ ** of the transaction. Return an error code if anything goes wrong.
+ */
+ if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+8, pNRec))
+ || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+12, &pPager->cksumInit))
+ || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+16, pDbSize))
+ ){
+ return rc;
+ }
+
+ if( pPager->journalOff==0 ){
+ u32 iPageSize; /* Page-size field of journal header */
+ u32 iSectorSize; /* Sector-size field of journal header */
+
+ /* Read the page-size and sector-size journal header fields. */
+ if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+20, &iSectorSize))
+ || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+24, &iPageSize))
+ ){
+ return rc;
+ }
+
+ /* Versions of SQLite prior to 3.5.8 set the page-size field of the
+ ** journal header to zero. In this case, assume that the Pager.pageSize
+ ** variable is already set to the correct page size.
+ */
+ if( iPageSize==0 ){
+ iPageSize = pPager->pageSize;
+ }
+
+ /* Check that the values read from the page-size and sector-size fields
+ ** are within range. To be 'in range', both values need to be a power
+ ** of two greater than or equal to 512 or 32, and not greater than their
+ ** respective compile time maximum limits.
+ */
+ if( iPageSize<512 || iSectorSize<32
+ || iPageSize>SQLITE_MAX_PAGE_SIZE || iSectorSize>MAX_SECTOR_SIZE
+ || ((iPageSize-1)&iPageSize)!=0 || ((iSectorSize-1)&iSectorSize)!=0
+ ){
+ /* If the either the page-size or sector-size in the journal-header is
+ ** invalid, then the process that wrote the journal-header must have
+ ** crashed before the header was synced. In this case stop reading
+ ** the journal file here.
+ */
+ return SQLITE_DONE;
+ }
+
+ /* Update the page-size to match the value read from the journal.
+ ** Use a testcase() macro to make sure that malloc failure within
+ ** PagerSetPagesize() is tested.
+ */
+ rc = sqlite3PagerSetPagesize(pPager, &iPageSize, -1);
+ testcase( rc!=SQLITE_OK );
+
+ /* Update the assumed sector-size to match the value used by
+ ** the process that created this journal. If this journal was
+ ** created by a process other than this one, then this routine
+ ** is being called from within pager_playback(). The local value
+ ** of Pager.sectorSize is restored at the end of that routine.
+ */
+ pPager->sectorSize = iSectorSize;
+ }
+
+ pPager->journalOff += JOURNAL_HDR_SZ(pPager);
+ return rc;
+}
+
+
+/*
+** Write the supplied super-journal name into the journal file for pager
+** pPager at the current location. The super-journal name must be the last
+** thing written to a journal file. If the pager is in full-sync mode, the
+** journal file descriptor is advanced to the next sector boundary before
+** anything is written. The format is:
+**
+** + 4 bytes: PAGER_MJ_PGNO.
+** + N bytes: super-journal filename in utf-8.
+** + 4 bytes: N (length of super-journal name in bytes, no nul-terminator).
+** + 4 bytes: super-journal name checksum.
+** + 8 bytes: aJournalMagic[].
+**
+** The super-journal page checksum is the sum of the bytes in thesuper-journal
+** name, where each byte is interpreted as a signed 8-bit integer.
+**
+** If zSuper is a NULL pointer (occurs for a single database transaction),
+** this call is a no-op.
+*/
+static int writeSuperJournal(Pager *pPager, const char *zSuper){
+ int rc; /* Return code */
+ int nSuper; /* Length of string zSuper */
+ i64 iHdrOff; /* Offset of header in journal file */
+ i64 jrnlSize; /* Size of journal file on disk */
+ u32 cksum = 0; /* Checksum of string zSuper */
+
+ assert( pPager->setSuper==0 );
+ assert( !pagerUseWal(pPager) );
+
+ if( !zSuper
+ || pPager->journalMode==PAGER_JOURNALMODE_MEMORY
+ || !isOpen(pPager->jfd)
+ ){
+ return SQLITE_OK;
+ }
+ pPager->setSuper = 1;
+ assert( pPager->journalHdr <= pPager->journalOff );
+
+ /* Calculate the length in bytes and the checksum of zSuper */
+ for(nSuper=0; zSuper[nSuper]; nSuper++){
+ cksum += zSuper[nSuper];
+ }
+
+ /* If in full-sync mode, advance to the next disk sector before writing
+ ** the super-journal name. This is in case the previous page written to
+ ** the journal has already been synced.
+ */
+ if( pPager->fullSync ){
+ pPager->journalOff = journalHdrOffset(pPager);
+ }
+ iHdrOff = pPager->journalOff;
+
+ /* Write the super-journal data to the end of the journal file. If
+ ** an error occurs, return the error code to the caller.
+ */
+ if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager))))
+ || (0 != (rc = sqlite3OsWrite(pPager->jfd, zSuper, nSuper, iHdrOff+4)))
+ || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nSuper, nSuper)))
+ || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nSuper+4, cksum)))
+ || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8,
+ iHdrOff+4+nSuper+8)))
+ ){
+ return rc;
+ }
+ pPager->journalOff += (nSuper+20);
+
+ /* If the pager is in peristent-journal mode, then the physical
+ ** journal-file may extend past the end of the super-journal name
+ ** and 8 bytes of magic data just written to the file. This is
+ ** dangerous because the code to rollback a hot-journal file
+ ** will not be able to find the super-journal name to determine
+ ** whether or not the journal is hot.
+ **
+ ** Easiest thing to do in this scenario is to truncate the journal
+ ** file to the required size.
+ */
+ if( SQLITE_OK==(rc = sqlite3OsFileSize(pPager->jfd, &jrnlSize))
+ && jrnlSize>pPager->journalOff
+ ){
+ rc = sqlite3OsTruncate(pPager->jfd, pPager->journalOff);
+ }
+ return rc;
+}
+
+/*
+** Discard the entire contents of the in-memory page-cache.
+*/
+static void pager_reset(Pager *pPager){
+ pPager->iDataVersion++;
+ sqlite3BackupRestart(pPager->pBackup);
+ sqlite3PcacheClear(pPager->pPCache);
+}
+
+/*
+** Return the pPager->iDataVersion value
+*/
+SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager *pPager){
+ return pPager->iDataVersion;
+}
+
+/*
+** Free all structures in the Pager.aSavepoint[] array and set both
+** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal
+** if it is open and the pager is not in exclusive mode.
+*/
+static void releaseAllSavepoints(Pager *pPager){
+ int ii; /* Iterator for looping through Pager.aSavepoint */
+ for(ii=0; ii<pPager->nSavepoint; ii++){
+ sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint);
+ }
+ if( !pPager->exclusiveMode || sqlite3JournalIsInMemory(pPager->sjfd) ){
+ sqlite3OsClose(pPager->sjfd);
+ }
+ sqlite3_free(pPager->aSavepoint);
+ pPager->aSavepoint = 0;
+ pPager->nSavepoint = 0;
+ pPager->nSubRec = 0;
+}
+
+/*
+** Set the bit number pgno in the PagerSavepoint.pInSavepoint
+** bitvecs of all open savepoints. Return SQLITE_OK if successful
+** or SQLITE_NOMEM if a malloc failure occurs.
+*/
+static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){
+ int ii; /* Loop counter */
+ int rc = SQLITE_OK; /* Result code */
+
+ for(ii=0; ii<pPager->nSavepoint; ii++){
+ PagerSavepoint *p = &pPager->aSavepoint[ii];
+ if( pgno<=p->nOrig ){
+ rc |= sqlite3BitvecSet(p->pInSavepoint, pgno);
+ testcase( rc==SQLITE_NOMEM );
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ }
+ }
+ return rc;
+}
+
+/*
+** This function is a no-op if the pager is in exclusive mode and not
+** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN
+** state.
+**
+** If the pager is not in exclusive-access mode, the database file is
+** completely unlocked. If the file is unlocked and the file-system does
+** not exhibit the UNDELETABLE_WHEN_OPEN property, the journal file is
+** closed (if it is open).
+**
+** If the pager is in ERROR state when this function is called, the
+** contents of the pager cache are discarded before switching back to
+** the OPEN state. Regardless of whether the pager is in exclusive-mode
+** or not, any journal file left in the file-system will be treated
+** as a hot-journal and rolled back the next time a read-transaction
+** is opened (by this or by any other connection).
+*/
+static void pager_unlock(Pager *pPager){
+
+ assert( pPager->eState==PAGER_READER
+ || pPager->eState==PAGER_OPEN
+ || pPager->eState==PAGER_ERROR
+ );
+
+ sqlite3BitvecDestroy(pPager->pInJournal);
+ pPager->pInJournal = 0;
+ releaseAllSavepoints(pPager);
+
+ if( pagerUseWal(pPager) ){
+ assert( !isOpen(pPager->jfd) );
+ sqlite3WalEndReadTransaction(pPager->pWal);
+ pPager->eState = PAGER_OPEN;
+ }else if( !pPager->exclusiveMode ){
+ int rc; /* Error code returned by pagerUnlockDb() */
+ int iDc = isOpen(pPager->fd)?sqlite3OsDeviceCharacteristics(pPager->fd):0;
+
+ /* If the operating system support deletion of open files, then
+ ** close the journal file when dropping the database lock. Otherwise
+ ** another connection with journal_mode=delete might delete the file
+ ** out from under us.
+ */
+ assert( (PAGER_JOURNALMODE_MEMORY & 5)!=1 );
+ assert( (PAGER_JOURNALMODE_OFF & 5)!=1 );
+ assert( (PAGER_JOURNALMODE_WAL & 5)!=1 );
+ assert( (PAGER_JOURNALMODE_DELETE & 5)!=1 );
+ assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 );
+ assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 );
+ if( 0==(iDc & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN)
+ || 1!=(pPager->journalMode & 5)
+ ){
+ sqlite3OsClose(pPager->jfd);
+ }
+
+ /* If the pager is in the ERROR state and the call to unlock the database
+ ** file fails, set the current lock to UNKNOWN_LOCK. See the comment
+ ** above the #define for UNKNOWN_LOCK for an explanation of why this
+ ** is necessary.
+ */
+ rc = pagerUnlockDb(pPager, NO_LOCK);
+ if( rc!=SQLITE_OK && pPager->eState==PAGER_ERROR ){
+ pPager->eLock = UNKNOWN_LOCK;
+ }
+
+ /* The pager state may be changed from PAGER_ERROR to PAGER_OPEN here
+ ** without clearing the error code. This is intentional - the error
+ ** code is cleared and the cache reset in the block below.
+ */
+ assert( pPager->errCode || pPager->eState!=PAGER_ERROR );
+ pPager->eState = PAGER_OPEN;
+ }
+
+ /* If Pager.errCode is set, the contents of the pager cache cannot be
+ ** trusted. Now that there are no outstanding references to the pager,
+ ** it can safely move back to PAGER_OPEN state. This happens in both
+ ** normal and exclusive-locking mode.
+ */
+ assert( pPager->errCode==SQLITE_OK || !MEMDB );
+ if( pPager->errCode ){
+ if( pPager->tempFile==0 ){
+ pager_reset(pPager);
+ pPager->changeCountDone = 0;
+ pPager->eState = PAGER_OPEN;
+ }else{
+ pPager->eState = (isOpen(pPager->jfd) ? PAGER_OPEN : PAGER_READER);
+ }
+ if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0);
+ pPager->errCode = SQLITE_OK;
+ setGetterMethod(pPager);
+ }
+
+ pPager->journalOff = 0;
+ pPager->journalHdr = 0;
+ pPager->setSuper = 0;
+}
+
+/*
+** This function is called whenever an IOERR or FULL error that requires
+** the pager to transition into the ERROR state may ahve occurred.
+** The first argument is a pointer to the pager structure, the second
+** the error-code about to be returned by a pager API function. The
+** value returned is a copy of the second argument to this function.
+**
+** If the second argument is SQLITE_FULL, SQLITE_IOERR or one of the
+** IOERR sub-codes, the pager enters the ERROR state and the error code
+** is stored in Pager.errCode. While the pager remains in the ERROR state,
+** all major API calls on the Pager will immediately return Pager.errCode.
+**
+** The ERROR state indicates that the contents of the pager-cache
+** cannot be trusted. This state can be cleared by completely discarding
+** the contents of the pager-cache. If a transaction was active when
+** the persistent error occurred, then the rollback journal may need
+** to be replayed to restore the contents of the database file (as if
+** it were a hot-journal).
+*/
+static int pager_error(Pager *pPager, int rc){
+ int rc2 = rc & 0xff;
+ assert( rc==SQLITE_OK || !MEMDB );
+ assert(
+ pPager->errCode==SQLITE_FULL ||
+ pPager->errCode==SQLITE_OK ||
+ (pPager->errCode & 0xff)==SQLITE_IOERR
+ );
+ if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){
+ pPager->errCode = rc;
+ pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
+ }
+ return rc;
+}
+
+static int pager_truncate(Pager *pPager, Pgno nPage);
+
+/*
+** The write transaction open on pPager is being committed (bCommit==1)
+** or rolled back (bCommit==0).
+**
+** Return TRUE if and only if all dirty pages should be flushed to disk.
+**
+** Rules:
+**
+** * For non-TEMP databases, always sync to disk. This is necessary
+** for transactions to be durable.
+**
+** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing
+** file has been created already (via a spill on pagerStress()) and
+** when the number of dirty pages in memory exceeds 25% of the total
+** cache size.
+*/
+static int pagerFlushOnCommit(Pager *pPager, int bCommit){
+ if( pPager->tempFile==0 ) return 1;
+ if( !bCommit ) return 0;
+ if( !isOpen(pPager->fd) ) return 0;
+ return (sqlite3PCachePercentDirty(pPager->pPCache)>=25);
+}
+
+/*
+** This routine ends a transaction. A transaction is usually ended by
+** either a COMMIT or a ROLLBACK operation. This routine may be called
+** after rollback of a hot-journal, or if an error occurs while opening
+** the journal file or writing the very first journal-header of a
+** database transaction.
+**
+** This routine is never called in PAGER_ERROR state. If it is called
+** in PAGER_NONE or PAGER_SHARED state and the lock held is less
+** exclusive than a RESERVED lock, it is a no-op.
+**
+** Otherwise, any active savepoints are released.
+**
+** If the journal file is open, then it is "finalized". Once a journal
+** file has been finalized it is not possible to use it to roll back a
+** transaction. Nor will it be considered to be a hot-journal by this
+** or any other database connection. Exactly how a journal is finalized
+** depends on whether or not the pager is running in exclusive mode and
+** the current journal-mode (Pager.journalMode value), as follows:
+**
+** journalMode==MEMORY
+** Journal file descriptor is simply closed. This destroys an
+** in-memory journal.
+**
+** journalMode==TRUNCATE
+** Journal file is truncated to zero bytes in size.
+**
+** journalMode==PERSIST
+** The first 28 bytes of the journal file are zeroed. This invalidates
+** the first journal header in the file, and hence the entire journal
+** file. An invalid journal file cannot be rolled back.
+**
+** journalMode==DELETE
+** The journal file is closed and deleted using sqlite3OsDelete().
+**
+** If the pager is running in exclusive mode, this method of finalizing
+** the journal file is never used. Instead, if the journalMode is
+** DELETE and the pager is in exclusive mode, the method described under
+** journalMode==PERSIST is used instead.
+**
+** After the journal is finalized, the pager moves to PAGER_READER state.
+** If running in non-exclusive rollback mode, the lock on the file is
+** downgraded to a SHARED_LOCK.
+**
+** SQLITE_OK is returned if no error occurs. If an error occurs during
+** any of the IO operations to finalize the journal file or unlock the
+** database then the IO error code is returned to the user. If the
+** operation to finalize the journal file fails, then the code still
+** tries to unlock the database file if not in exclusive mode. If the
+** unlock operation fails as well, then the first error code related
+** to the first error encountered (the journal finalization one) is
+** returned.
+*/
+static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){
+ int rc = SQLITE_OK; /* Error code from journal finalization operation */
+ int rc2 = SQLITE_OK; /* Error code from db file unlock operation */
+
+ /* Do nothing if the pager does not have an open write transaction
+ ** or at least a RESERVED lock. This function may be called when there
+ ** is no write-transaction active but a RESERVED or greater lock is
+ ** held under two circumstances:
+ **
+ ** 1. After a successful hot-journal rollback, it is called with
+ ** eState==PAGER_NONE and eLock==EXCLUSIVE_LOCK.
+ **
+ ** 2. If a connection with locking_mode=exclusive holding an EXCLUSIVE
+ ** lock switches back to locking_mode=normal and then executes a
+ ** read-transaction, this function is called with eState==PAGER_READER
+ ** and eLock==EXCLUSIVE_LOCK when the read-transaction is closed.
+ */
+ assert( assert_pager_state(pPager) );
+ assert( pPager->eState!=PAGER_ERROR );
+ if( pPager->eState<PAGER_WRITER_LOCKED && pPager->eLock<RESERVED_LOCK ){
+ return SQLITE_OK;
+ }
+
+ releaseAllSavepoints(pPager);
+ assert( isOpen(pPager->jfd) || pPager->pInJournal==0
+ || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_BATCH_ATOMIC)
+ );
+ if( isOpen(pPager->jfd) ){
+ assert( !pagerUseWal(pPager) );
+
+ /* Finalize the journal file. */
+ if( sqlite3JournalIsInMemory(pPager->jfd) ){
+ /* assert( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); */
+ sqlite3OsClose(pPager->jfd);
+ }else if( pPager->journalMode==PAGER_JOURNALMODE_TRUNCATE ){
+ if( pPager->journalOff==0 ){
+ rc = SQLITE_OK;
+ }else{
+ rc = sqlite3OsTruncate(pPager->jfd, 0);
+ if( rc==SQLITE_OK && pPager->fullSync ){
+ /* Make sure the new file size is written into the inode right away.
+ ** Otherwise the journal might resurrect following a power loss and
+ ** cause the last transaction to roll back. See
+ ** https://bugzilla.mozilla.org/show_bug.cgi?id=1072773
+ */
+ rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags);
+ }
+ }
+ pPager->journalOff = 0;
+ }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST
+ || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL)
+ ){
+ rc = zeroJournalHdr(pPager, hasSuper||pPager->tempFile);
+ pPager->journalOff = 0;
+ }else{
+ /* This branch may be executed with Pager.journalMode==MEMORY if
+ ** a hot-journal was just rolled back. In this case the journal
+ ** file should be closed and deleted. If this connection writes to
+ ** the database file, it will do so using an in-memory journal.
+ */
+ int bDelete = !pPager->tempFile;
+ assert( sqlite3JournalIsInMemory(pPager->jfd)==0 );
+ assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE
+ || pPager->journalMode==PAGER_JOURNALMODE_MEMORY
+ || pPager->journalMode==PAGER_JOURNALMODE_WAL
+ );
+ sqlite3OsClose(pPager->jfd);
+ if( bDelete ){
+ rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, pPager->extraSync);
+ }
+ }
+ }
+
+#ifdef SQLITE_CHECK_PAGES
+ sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash);
+ if( pPager->dbSize==0 && sqlite3PcacheRefCount(pPager->pPCache)>0 ){
+ PgHdr *p = sqlite3PagerLookup(pPager, 1);
+ if( p ){
+ p->pageHash = 0;
+ sqlite3PagerUnrefNotNull(p);
+ }
+ }
+#endif
+
+ sqlite3BitvecDestroy(pPager->pInJournal);
+ pPager->pInJournal = 0;
+ pPager->nRec = 0;
+ if( rc==SQLITE_OK ){
+ if( MEMDB || pagerFlushOnCommit(pPager, bCommit) ){
+ sqlite3PcacheCleanAll(pPager->pPCache);
+ }else{
+ sqlite3PcacheClearWritable(pPager->pPCache);
+ }
+ sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize);
+ }
+
+ if( pagerUseWal(pPager) ){
+ /* Drop the WAL write-lock, if any. Also, if the connection was in
+ ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE
+ ** lock held on the database file.
+ */
+ rc2 = sqlite3WalEndWriteTransaction(pPager->pWal);
+ assert( rc2==SQLITE_OK );
+ }else if( rc==SQLITE_OK && bCommit && pPager->dbFileSize>pPager->dbSize ){
+ /* This branch is taken when committing a transaction in rollback-journal
+ ** mode if the database file on disk is larger than the database image.
+ ** At this point the journal has been finalized and the transaction
+ ** successfully committed, but the EXCLUSIVE lock is still held on the
+ ** file. So it is safe to truncate the database file to its minimum
+ ** required size. */
+ assert( pPager->eLock==EXCLUSIVE_LOCK );
+ rc = pager_truncate(pPager, pPager->dbSize);
+ }
+
+ if( rc==SQLITE_OK && bCommit ){
+ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0);
+ if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
+ }
+
+ if( !pPager->exclusiveMode
+ && (!pagerUseWal(pPager) || sqlite3WalExclusiveMode(pPager->pWal, 0))
+ ){
+ rc2 = pagerUnlockDb(pPager, SHARED_LOCK);
+ }
+ pPager->eState = PAGER_READER;
+ pPager->setSuper = 0;
+
+ return (rc==SQLITE_OK?rc2:rc);
+}
+
+/*
+** Execute a rollback if a transaction is active and unlock the
+** database file.
+**
+** If the pager has already entered the ERROR state, do not attempt
+** the rollback at this time. Instead, pager_unlock() is called. The
+** call to pager_unlock() will discard all in-memory pages, unlock
+** the database file and move the pager back to OPEN state. If this
+** means that there is a hot-journal left in the file-system, the next
+** connection to obtain a shared lock on the pager (which may be this one)
+** will roll it back.
+**
+** If the pager has not already entered the ERROR state, but an IO or
+** malloc error occurs during a rollback, then this will itself cause
+** the pager to enter the ERROR state. Which will be cleared by the
+** call to pager_unlock(), as described above.
+*/
+static void pagerUnlockAndRollback(Pager *pPager){
+ if( pPager->eState!=PAGER_ERROR && pPager->eState!=PAGER_OPEN ){
+ assert( assert_pager_state(pPager) );
+ if( pPager->eState>=PAGER_WRITER_LOCKED ){
+ sqlite3BeginBenignMalloc();
+ sqlite3PagerRollback(pPager);
+ sqlite3EndBenignMalloc();
+ }else if( !pPager->exclusiveMode ){
+ assert( pPager->eState==PAGER_READER );
+ pager_end_transaction(pPager, 0, 0);
+ }
+ }
+ pager_unlock(pPager);
+}
+
+/*
+** Parameter aData must point to a buffer of pPager->pageSize bytes
+** of data. Compute and return a checksum based ont the contents of the
+** page of data and the current value of pPager->cksumInit.
+**
+** This is not a real checksum. It is really just the sum of the
+** random initial value (pPager->cksumInit) and every 200th byte
+** of the page data, starting with byte offset (pPager->pageSize%200).
+** Each byte is interpreted as an 8-bit unsigned integer.
+**
+** Changing the formula used to compute this checksum results in an
+** incompatible journal file format.
+**
+** If journal corruption occurs due to a power failure, the most likely
+** scenario is that one end or the other of the record will be changed.
+** It is much less likely that the two ends of the journal record will be
+** correct and the middle be corrupt. Thus, this "checksum" scheme,
+** though fast and simple, catches the mostly likely kind of corruption.
+*/
+static u32 pager_cksum(Pager *pPager, const u8 *aData){
+ u32 cksum = pPager->cksumInit; /* Checksum value to return */
+ int i = pPager->pageSize-200; /* Loop counter */
+ while( i>0 ){
+ cksum += aData[i];
+ i -= 200;
+ }
+ return cksum;
+}
+
+/*
+** Read a single page from either the journal file (if isMainJrnl==1) or
+** from the sub-journal (if isMainJrnl==0) and playback that page.
+** The page begins at offset *pOffset into the file. The *pOffset
+** value is increased to the start of the next page in the journal.
+**
+** The main rollback journal uses checksums - the statement journal does
+** not.
+**
+** If the page number of the page record read from the (sub-)journal file
+** is greater than the current value of Pager.dbSize, then playback is
+** skipped and SQLITE_OK is returned.
+**
+** If pDone is not NULL, then it is a record of pages that have already
+** been played back. If the page at *pOffset has already been played back
+** (if the corresponding pDone bit is set) then skip the playback.
+** Make sure the pDone bit corresponding to the *pOffset page is set
+** prior to returning.
+**
+** If the page record is successfully read from the (sub-)journal file
+** and played back, then SQLITE_OK is returned. If an IO error occurs
+** while reading the record from the (sub-)journal file or while writing
+** to the database file, then the IO error code is returned. If data
+** is successfully read from the (sub-)journal file but appears to be
+** corrupted, SQLITE_DONE is returned. Data is considered corrupted in
+** two circumstances:
+**
+** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or
+** * If the record is being rolled back from the main journal file
+** and the checksum field does not match the record content.
+**
+** Neither of these two scenarios are possible during a savepoint rollback.
+**
+** If this is a savepoint rollback, then memory may have to be dynamically
+** allocated by this function. If this is the case and an allocation fails,
+** SQLITE_NOMEM is returned.
+*/
+static int pager_playback_one_page(
+ Pager *pPager, /* The pager being played back */
+ i64 *pOffset, /* Offset of record to playback */
+ Bitvec *pDone, /* Bitvec of pages already played back */
+ int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */
+ int isSavepnt /* True for a savepoint rollback */
+){
+ int rc;
+ PgHdr *pPg; /* An existing page in the cache */
+ Pgno pgno; /* The page number of a page in journal */
+ u32 cksum; /* Checksum used for sanity checking */
+ char *aData; /* Temporary storage for the page */
+ sqlite3_file *jfd; /* The file descriptor for the journal file */
+ int isSynced; /* True if journal page is synced */
+
+ assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */
+ assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */
+ assert( isMainJrnl || pDone ); /* pDone always used on sub-journals */
+ assert( isSavepnt || pDone==0 ); /* pDone never used on non-savepoint */
+
+ aData = pPager->pTmpSpace;
+ assert( aData ); /* Temp storage must have already been allocated */
+ assert( pagerUseWal(pPager)==0 || (!isMainJrnl && isSavepnt) );
+
+ /* Either the state is greater than PAGER_WRITER_CACHEMOD (a transaction
+ ** or savepoint rollback done at the request of the caller) or this is
+ ** a hot-journal rollback. If it is a hot-journal rollback, the pager
+ ** is in state OPEN and holds an EXCLUSIVE lock. Hot-journal rollback
+ ** only reads from the main journal, not the sub-journal.
+ */
+ assert( pPager->eState>=PAGER_WRITER_CACHEMOD
+ || (pPager->eState==PAGER_OPEN && pPager->eLock==EXCLUSIVE_LOCK)
+ );
+ assert( pPager->eState>=PAGER_WRITER_CACHEMOD || isMainJrnl );
+
+ /* Read the page number and page data from the journal or sub-journal
+ ** file. Return an error code to the caller if an IO error occurs.
+ */
+ jfd = isMainJrnl ? pPager->jfd : pPager->sjfd;
+ rc = read32bits(jfd, *pOffset, &pgno);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = sqlite3OsRead(jfd, (u8*)aData, pPager->pageSize, (*pOffset)+4);
+ if( rc!=SQLITE_OK ) return rc;
+ *pOffset += pPager->pageSize + 4 + isMainJrnl*4;
+
+ /* Sanity checking on the page. This is more important that I originally
+ ** thought. If a power failure occurs while the journal is being written,
+ ** it could cause invalid data to be written into the journal. We need to
+ ** detect this invalid data (with high probability) and ignore it.
+ */
+ if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){
+ assert( !isSavepnt );
+ return SQLITE_DONE;
+ }
+ if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){
+ return SQLITE_OK;
+ }
+ if( isMainJrnl ){
+ rc = read32bits(jfd, (*pOffset)-4, &cksum);
+ if( rc ) return rc;
+ if( !isSavepnt && pager_cksum(pPager, (u8*)aData)!=cksum ){
+ return SQLITE_DONE;
+ }
+ }
+
+ /* If this page has already been played back before during the current
+ ** rollback, then don't bother to play it back again.
+ */
+ if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* When playing back page 1, restore the nReserve setting
+ */
+ if( pgno==1 && pPager->nReserve!=((u8*)aData)[20] ){
+ pPager->nReserve = ((u8*)aData)[20];
+ }
+
+ /* If the pager is in CACHEMOD state, then there must be a copy of this
+ ** page in the pager cache. In this case just update the pager cache,
+ ** not the database file. The page is left marked dirty in this case.
+ **
+ ** An exception to the above rule: If the database is in no-sync mode
+ ** and a page is moved during an incremental vacuum then the page may
+ ** not be in the pager cache. Later: if a malloc() or IO error occurs
+ ** during a Movepage() call, then the page may not be in the cache
+ ** either. So the condition described in the above paragraph is not
+ ** assert()able.
+ **
+ ** If in WRITER_DBMOD, WRITER_FINISHED or OPEN state, then we update the
+ ** pager cache if it exists and the main file. The page is then marked
+ ** not dirty. Since this code is only executed in PAGER_OPEN state for
+ ** a hot-journal rollback, it is guaranteed that the page-cache is empty
+ ** if the pager is in OPEN state.
+ **
+ ** Ticket #1171: The statement journal might contain page content that is
+ ** different from the page content at the start of the transaction.
+ ** This occurs when a page is changed prior to the start of a statement
+ ** then changed again within the statement. When rolling back such a
+ ** statement we must not write to the original database unless we know
+ ** for certain that original page contents are synced into the main rollback
+ ** journal. Otherwise, a power loss might leave modified data in the
+ ** database file without an entry in the rollback journal that can
+ ** restore the database to its original form. Two conditions must be
+ ** met before writing to the database files. (1) the database must be
+ ** locked. (2) we know that the original page content is fully synced
+ ** in the main journal either because the page is not in cache or else
+ ** the page is marked as needSync==0.
+ **
+ ** 2008-04-14: When attempting to vacuum a corrupt database file, it
+ ** is possible to fail a statement on a database that does not yet exist.
+ ** Do not attempt to write if database file has never been opened.
+ */
+ if( pagerUseWal(pPager) ){
+ pPg = 0;
+ }else{
+ pPg = sqlite3PagerLookup(pPager, pgno);
+ }
+ assert( pPg || !MEMDB );
+ assert( pPager->eState!=PAGER_OPEN || pPg==0 || pPager->tempFile );
+ PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n",
+ PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData),
+ (isMainJrnl?"main-journal":"sub-journal")
+ ));
+ if( isMainJrnl ){
+ isSynced = pPager->noSync || (*pOffset <= pPager->journalHdr);
+ }else{
+ isSynced = (pPg==0 || 0==(pPg->flags & PGHDR_NEED_SYNC));
+ }
+ if( isOpen(pPager->fd)
+ && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN)
+ && isSynced
+ ){
+ i64 ofst = (pgno-1)*(i64)pPager->pageSize;
+ testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 );
+ assert( !pagerUseWal(pPager) );
+
+ /* Write the data read from the journal back into the database file.
+ ** This is usually safe even for an encrypted database - as the data
+ ** was encrypted before it was written to the journal file. The exception
+ ** is if the data was just read from an in-memory sub-journal. In that
+ ** case it must be encrypted here before it is copied into the database
+ ** file. */
+ rc = sqlite3OsWrite(pPager->fd, (u8 *)aData, pPager->pageSize, ofst);
+
+ if( pgno>pPager->dbFileSize ){
+ pPager->dbFileSize = pgno;
+ }
+ if( pPager->pBackup ){
+ sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)aData);
+ }
+ }else if( !isMainJrnl && pPg==0 ){
+ /* If this is a rollback of a savepoint and data was not written to
+ ** the database and the page is not in-memory, there is a potential
+ ** problem. When the page is next fetched by the b-tree layer, it
+ ** will be read from the database file, which may or may not be
+ ** current.
+ **
+ ** There are a couple of different ways this can happen. All are quite
+ ** obscure. When running in synchronous mode, this can only happen
+ ** if the page is on the free-list at the start of the transaction, then
+ ** populated, then moved using sqlite3PagerMovepage().
+ **
+ ** The solution is to add an in-memory page to the cache containing
+ ** the data just read from the sub-journal. Mark the page as dirty
+ ** and if the pager requires a journal-sync, then mark the page as
+ ** requiring a journal-sync before it is written.
+ */
+ assert( isSavepnt );
+ assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 );
+ pPager->doNotSpill |= SPILLFLAG_ROLLBACK;
+ rc = sqlite3PagerGet(pPager, pgno, &pPg, 1);
+ assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 );
+ pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK;
+ if( rc!=SQLITE_OK ) return rc;
+ sqlite3PcacheMakeDirty(pPg);
+ }
+ if( pPg ){
+ /* No page should ever be explicitly rolled back that is in use, except
+ ** for page 1 which is held in use in order to keep the lock on the
+ ** database active. However such a page may be rolled back as a result
+ ** of an internal error resulting in an automatic call to
+ ** sqlite3PagerRollback().
+ */
+ void *pData;
+ pData = pPg->pData;
+ memcpy(pData, (u8*)aData, pPager->pageSize);
+ pPager->xReiniter(pPg);
+ /* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But
+ ** that call was dangerous and had no detectable benefit since the cache
+ ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so
+ ** has been removed. */
+ pager_set_pagehash(pPg);
+
+ /* If this was page 1, then restore the value of Pager.dbFileVers.
+ ** Do this before any decoding. */
+ if( pgno==1 ){
+ memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers));
+ }
+ sqlite3PcacheRelease(pPg);
+ }
+ return rc;
+}
+
+/*
+** Parameter zSuper is the name of a super-journal file. A single journal
+** file that referred to the super-journal file has just been rolled back.
+** This routine checks if it is possible to delete the super-journal file,
+** and does so if it is.
+**
+** Argument zSuper may point to Pager.pTmpSpace. So that buffer is not
+** available for use within this function.
+**
+** When a super-journal file is created, it is populated with the names
+** of all of its child journals, one after another, formatted as utf-8
+** encoded text. The end of each child journal file is marked with a
+** nul-terminator byte (0x00). i.e. the entire contents of a super-journal
+** file for a transaction involving two databases might be:
+**
+** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00"
+**
+** A super-journal file may only be deleted once all of its child
+** journals have been rolled back.
+**
+** This function reads the contents of the super-journal file into
+** memory and loops through each of the child journal names. For
+** each child journal, it checks if:
+**
+** * if the child journal exists, and if so
+** * if the child journal contains a reference to super-journal
+** file zSuper
+**
+** If a child journal can be found that matches both of the criteria
+** above, this function returns without doing anything. Otherwise, if
+** no such child journal can be found, file zSuper is deleted from
+** the file-system using sqlite3OsDelete().
+**
+** If an IO error within this function, an error code is returned. This
+** function allocates memory by calling sqlite3Malloc(). If an allocation
+** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors
+** occur, SQLITE_OK is returned.
+**
+** TODO: This function allocates a single block of memory to load
+** the entire contents of the super-journal file. This could be
+** a couple of kilobytes or so - potentially larger than the page
+** size.
+*/
+static int pager_delsuper(Pager *pPager, const char *zSuper){
+ sqlite3_vfs *pVfs = pPager->pVfs;
+ int rc; /* Return code */
+ sqlite3_file *pSuper; /* Malloc'd super-journal file descriptor */
+ sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */
+ char *zSuperJournal = 0; /* Contents of super-journal file */
+ i64 nSuperJournal; /* Size of super-journal file */
+ char *zJournal; /* Pointer to one journal within MJ file */
+ char *zSuperPtr; /* Space to hold super-journal filename */
+ int nSuperPtr; /* Amount of space allocated to zSuperPtr[] */
+
+ /* Allocate space for both the pJournal and pSuper file descriptors.
+ ** If successful, open the super-journal file for reading.
+ */
+ pSuper = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2);
+ if( !pSuper ){
+ rc = SQLITE_NOMEM_BKPT;
+ pJournal = 0;
+ }else{
+ const int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_SUPER_JOURNAL);
+ rc = sqlite3OsOpen(pVfs, zSuper, pSuper, flags, 0);
+ pJournal = (sqlite3_file *)(((u8 *)pSuper) + pVfs->szOsFile);
+ }
+ if( rc!=SQLITE_OK ) goto delsuper_out;
+
+ /* Load the entire super-journal file into space obtained from
+ ** sqlite3_malloc() and pointed to by zSuperJournal. Also obtain
+ ** sufficient space (in zSuperPtr) to hold the names of super-journal
+ ** files extracted from regular rollback-journals.
+ */
+ rc = sqlite3OsFileSize(pSuper, &nSuperJournal);
+ if( rc!=SQLITE_OK ) goto delsuper_out;
+ nSuperPtr = pVfs->mxPathname+1;
+ zSuperJournal = sqlite3Malloc(nSuperJournal + nSuperPtr + 2);
+ if( !zSuperJournal ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto delsuper_out;
+ }
+ zSuperPtr = &zSuperJournal[nSuperJournal+2];
+ rc = sqlite3OsRead(pSuper, zSuperJournal, (int)nSuperJournal, 0);
+ if( rc!=SQLITE_OK ) goto delsuper_out;
+ zSuperJournal[nSuperJournal] = 0;
+ zSuperJournal[nSuperJournal+1] = 0;
+
+ zJournal = zSuperJournal;
+ while( (zJournal-zSuperJournal)<nSuperJournal ){
+ int exists;
+ rc = sqlite3OsAccess(pVfs, zJournal, SQLITE_ACCESS_EXISTS, &exists);
+ if( rc!=SQLITE_OK ){
+ goto delsuper_out;
+ }
+ if( exists ){
+ /* One of the journals pointed to by the super-journal exists.
+ ** Open it and check if it points at the super-journal. If
+ ** so, return without deleting the super-journal file.
+ ** NB: zJournal is really a MAIN_JOURNAL. But call it a
+ ** SUPER_JOURNAL here so that the VFS will not send the zJournal
+ ** name into sqlite3_database_file_object().
+ */
+ int c;
+ int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_SUPER_JOURNAL);
+ rc = sqlite3OsOpen(pVfs, zJournal, pJournal, flags, 0);
+ if( rc!=SQLITE_OK ){
+ goto delsuper_out;
+ }
+
+ rc = readSuperJournal(pJournal, zSuperPtr, nSuperPtr);
+ sqlite3OsClose(pJournal);
+ if( rc!=SQLITE_OK ){
+ goto delsuper_out;
+ }
+
+ c = zSuperPtr[0]!=0 && strcmp(zSuperPtr, zSuper)==0;
+ if( c ){
+ /* We have a match. Do not delete the super-journal file. */
+ goto delsuper_out;
+ }
+ }
+ zJournal += (sqlite3Strlen30(zJournal)+1);
+ }
+
+ sqlite3OsClose(pSuper);
+ rc = sqlite3OsDelete(pVfs, zSuper, 0);
+
+delsuper_out:
+ sqlite3_free(zSuperJournal);
+ if( pSuper ){
+ sqlite3OsClose(pSuper);
+ assert( !isOpen(pJournal) );
+ sqlite3_free(pSuper);
+ }
+ return rc;
+}
+
+
+/*
+** This function is used to change the actual size of the database
+** file in the file-system. This only happens when committing a transaction,
+** or rolling back a transaction (including rolling back a hot-journal).
+**
+** If the main database file is not open, or the pager is not in either
+** DBMOD or OPEN state, this function is a no-op. Otherwise, the size
+** of the file is changed to nPage pages (nPage*pPager->pageSize bytes).
+** If the file on disk is currently larger than nPage pages, then use the VFS
+** xTruncate() method to truncate it.
+**
+** Or, it might be the case that the file on disk is smaller than
+** nPage pages. Some operating system implementations can get confused if
+** you try to truncate a file to some size that is larger than it
+** currently is, so detect this case and write a single zero byte to
+** the end of the new file instead.
+**
+** If successful, return SQLITE_OK. If an IO error occurs while modifying
+** the database file, return the error code to the caller.
+*/
+static int pager_truncate(Pager *pPager, Pgno nPage){
+ int rc = SQLITE_OK;
+ assert( pPager->eState!=PAGER_ERROR );
+ assert( pPager->eState!=PAGER_READER );
+
+ if( isOpen(pPager->fd)
+ && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN)
+ ){
+ i64 currentSize, newSize;
+ int szPage = pPager->pageSize;
+ assert( pPager->eLock==EXCLUSIVE_LOCK );
+ /* TODO: Is it safe to use Pager.dbFileSize here? */
+ rc = sqlite3OsFileSize(pPager->fd, &currentSize);
+ newSize = szPage*(i64)nPage;
+ if( rc==SQLITE_OK && currentSize!=newSize ){
+ if( currentSize>newSize ){
+ rc = sqlite3OsTruncate(pPager->fd, newSize);
+ }else if( (currentSize+szPage)<=newSize ){
+ char *pTmp = pPager->pTmpSpace;
+ memset(pTmp, 0, szPage);
+ testcase( (newSize-szPage) == currentSize );
+ testcase( (newSize-szPage) > currentSize );
+ rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, newSize-szPage);
+ }
+ if( rc==SQLITE_OK ){
+ pPager->dbFileSize = nPage;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Return a sanitized version of the sector-size of OS file pFile. The
+** return value is guaranteed to lie between 32 and MAX_SECTOR_SIZE.
+*/
+SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *pFile){
+ int iRet = sqlite3OsSectorSize(pFile);
+ if( iRet<32 ){
+ iRet = 512;
+ }else if( iRet>MAX_SECTOR_SIZE ){
+ assert( MAX_SECTOR_SIZE>=512 );
+ iRet = MAX_SECTOR_SIZE;
+ }
+ return iRet;
+}
+
+/*
+** Set the value of the Pager.sectorSize variable for the given
+** pager based on the value returned by the xSectorSize method
+** of the open database file. The sector size will be used
+** to determine the size and alignment of journal header and
+** super-journal pointers within created journal files.
+**
+** For temporary files the effective sector size is always 512 bytes.
+**
+** Otherwise, for non-temporary files, the effective sector size is
+** the value returned by the xSectorSize() method rounded up to 32 if
+** it is less than 32, or rounded down to MAX_SECTOR_SIZE if it
+** is greater than MAX_SECTOR_SIZE.
+**
+** If the file has the SQLITE_IOCAP_POWERSAFE_OVERWRITE property, then set
+** the effective sector size to its minimum value (512). The purpose of
+** pPager->sectorSize is to define the "blast radius" of bytes that
+** might change if a crash occurs while writing to a single byte in
+** that range. But with POWERSAFE_OVERWRITE, the blast radius is zero
+** (that is what POWERSAFE_OVERWRITE means), so we minimize the sector
+** size. For backwards compatibility of the rollback journal file format,
+** we cannot reduce the effective sector size below 512.
+*/
+static void setSectorSize(Pager *pPager){
+ assert( isOpen(pPager->fd) || pPager->tempFile );
+
+ if( pPager->tempFile
+ || (sqlite3OsDeviceCharacteristics(pPager->fd) &
+ SQLITE_IOCAP_POWERSAFE_OVERWRITE)!=0
+ ){
+ /* Sector size doesn't matter for temporary files. Also, the file
+ ** may not have been opened yet, in which case the OsSectorSize()
+ ** call will segfault. */
+ pPager->sectorSize = 512;
+ }else{
+ pPager->sectorSize = sqlite3SectorSize(pPager->fd);
+ }
+}
+
+/*
+** Playback the journal and thus restore the database file to
+** the state it was in before we started making changes.
+**
+** The journal file format is as follows:
+**
+** (1) 8 byte prefix. A copy of aJournalMagic[].
+** (2) 4 byte big-endian integer which is the number of valid page records
+** in the journal. If this value is 0xffffffff, then compute the
+** number of page records from the journal size.
+** (3) 4 byte big-endian integer which is the initial value for the
+** sanity checksum.
+** (4) 4 byte integer which is the number of pages to truncate the
+** database to during a rollback.
+** (5) 4 byte big-endian integer which is the sector size. The header
+** is this many bytes in size.
+** (6) 4 byte big-endian integer which is the page size.
+** (7) zero padding out to the next sector size.
+** (8) Zero or more pages instances, each as follows:
+** + 4 byte page number.
+** + pPager->pageSize bytes of data.
+** + 4 byte checksum
+**
+** When we speak of the journal header, we mean the first 7 items above.
+** Each entry in the journal is an instance of the 8th item.
+**
+** Call the value from the second bullet "nRec". nRec is the number of
+** valid page entries in the journal. In most cases, you can compute the
+** value of nRec from the size of the journal file. But if a power
+** failure occurred while the journal was being written, it could be the
+** case that the size of the journal file had already been increased but
+** the extra entries had not yet made it safely to disk. In such a case,
+** the value of nRec computed from the file size would be too large. For
+** that reason, we always use the nRec value in the header.
+**
+** If the nRec value is 0xffffffff it means that nRec should be computed
+** from the file size. This value is used when the user selects the
+** no-sync option for the journal. A power failure could lead to corruption
+** in this case. But for things like temporary table (which will be
+** deleted when the power is restored) we don't care.
+**
+** If the file opened as the journal file is not a well-formed
+** journal file then all pages up to the first corrupted page are rolled
+** back (or no pages if the journal header is corrupted). The journal file
+** is then deleted and SQLITE_OK returned, just as if no corruption had
+** been encountered.
+**
+** If an I/O or malloc() error occurs, the journal-file is not deleted
+** and an error code is returned.
+**
+** The isHot parameter indicates that we are trying to rollback a journal
+** that might be a hot journal. Or, it could be that the journal is
+** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE.
+** If the journal really is hot, reset the pager cache prior rolling
+** back any content. If the journal is merely persistent, no reset is
+** needed.
+*/
+static int pager_playback(Pager *pPager, int isHot){
+ sqlite3_vfs *pVfs = pPager->pVfs;
+ i64 szJ; /* Size of the journal file in bytes */
+ u32 nRec; /* Number of Records in the journal */
+ u32 u; /* Unsigned loop counter */
+ Pgno mxPg = 0; /* Size of the original file in pages */
+ int rc; /* Result code of a subroutine */
+ int res = 1; /* Value returned by sqlite3OsAccess() */
+ char *zSuper = 0; /* Name of super-journal file if any */
+ int needPagerReset; /* True to reset page prior to first page rollback */
+ int nPlayback = 0; /* Total number of pages restored from journal */
+ u32 savedPageSize = pPager->pageSize;
+
+ /* Figure out how many records are in the journal. Abort early if
+ ** the journal is empty.
+ */
+ assert( isOpen(pPager->jfd) );
+ rc = sqlite3OsFileSize(pPager->jfd, &szJ);
+ if( rc!=SQLITE_OK ){
+ goto end_playback;
+ }
+
+ /* Read the super-journal name from the journal, if it is present.
+ ** If a super-journal file name is specified, but the file is not
+ ** present on disk, then the journal is not hot and does not need to be
+ ** played back.
+ **
+ ** TODO: Technically the following is an error because it assumes that
+ ** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that
+ ** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c,
+ ** mxPathname is 512, which is the same as the minimum allowable value
+ ** for pageSize.
+ */
+ zSuper = pPager->pTmpSpace;
+ rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1);
+ if( rc==SQLITE_OK && zSuper[0] ){
+ rc = sqlite3OsAccess(pVfs, zSuper, SQLITE_ACCESS_EXISTS, &res);
+ }
+ zSuper = 0;
+ if( rc!=SQLITE_OK || !res ){
+ goto end_playback;
+ }
+ pPager->journalOff = 0;
+ needPagerReset = isHot;
+
+ /* This loop terminates either when a readJournalHdr() or
+ ** pager_playback_one_page() call returns SQLITE_DONE or an IO error
+ ** occurs.
+ */
+ while( 1 ){
+ /* Read the next journal header from the journal file. If there are
+ ** not enough bytes left in the journal file for a complete header, or
+ ** it is corrupted, then a process must have failed while writing it.
+ ** This indicates nothing more needs to be rolled back.
+ */
+ rc = readJournalHdr(pPager, isHot, szJ, &nRec, &mxPg);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ }
+ goto end_playback;
+ }
+
+ /* If nRec is 0xffffffff, then this journal was created by a process
+ ** working in no-sync mode. This means that the rest of the journal
+ ** file consists of pages, there are no more journal headers. Compute
+ ** the value of nRec based on this assumption.
+ */
+ if( nRec==0xffffffff ){
+ assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) );
+ nRec = (int)((szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager));
+ }
+
+ /* If nRec is 0 and this rollback is of a transaction created by this
+ ** process and if this is the final header in the journal, then it means
+ ** that this part of the journal was being filled but has not yet been
+ ** synced to disk. Compute the number of pages based on the remaining
+ ** size of the file.
+ **
+ ** The third term of the test was added to fix ticket #2565.
+ ** When rolling back a hot journal, nRec==0 always means that the next
+ ** chunk of the journal contains zero pages to be rolled back. But
+ ** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in
+ ** the journal, it means that the journal might contain additional
+ ** pages that need to be rolled back and that the number of pages
+ ** should be computed based on the journal file size.
+ */
+ if( nRec==0 && !isHot &&
+ pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){
+ nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager));
+ }
+
+ /* If this is the first header read from the journal, truncate the
+ ** database file back to its original size.
+ */
+ if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){
+ rc = pager_truncate(pPager, mxPg);
+ if( rc!=SQLITE_OK ){
+ goto end_playback;
+ }
+ pPager->dbSize = mxPg;
+ }
+
+ /* Copy original pages out of the journal and back into the
+ ** database file and/or page cache.
+ */
+ for(u=0; u<nRec; u++){
+ if( needPagerReset ){
+ pager_reset(pPager);
+ needPagerReset = 0;
+ }
+ rc = pager_playback_one_page(pPager,&pPager->journalOff,0,1,0);
+ if( rc==SQLITE_OK ){
+ nPlayback++;
+ }else{
+ if( rc==SQLITE_DONE ){
+ pPager->journalOff = szJ;
+ break;
+ }else if( rc==SQLITE_IOERR_SHORT_READ ){
+ /* If the journal has been truncated, simply stop reading and
+ ** processing the journal. This might happen if the journal was
+ ** not completely written and synced prior to a crash. In that
+ ** case, the database should have never been written in the
+ ** first place so it is OK to simply abandon the rollback. */
+ rc = SQLITE_OK;
+ goto end_playback;
+ }else{
+ /* If we are unable to rollback, quit and return the error
+ ** code. This will cause the pager to enter the error state
+ ** so that no further harm will be done. Perhaps the next
+ ** process to come along will be able to rollback the database.
+ */
+ goto end_playback;
+ }
+ }
+ }
+ }
+ /*NOTREACHED*/
+ assert( 0 );
+
+end_playback:
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSetPagesize(pPager, &savedPageSize, -1);
+ }
+ /* Following a rollback, the database file should be back in its original
+ ** state prior to the start of the transaction, so invoke the
+ ** SQLITE_FCNTL_DB_UNCHANGED file-control method to disable the
+ ** assertion that the transaction counter was modified.
+ */
+#ifdef SQLITE_DEBUG
+ sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0);
+#endif
+
+ /* If this playback is happening automatically as a result of an IO or
+ ** malloc error that occurred after the change-counter was updated but
+ ** before the transaction was committed, then the change-counter
+ ** modification may just have been reverted. If this happens in exclusive
+ ** mode, then subsequent transactions performed by the connection will not
+ ** update the change-counter at all. This may lead to cache inconsistency
+ ** problems for other processes at some point in the future. So, just
+ ** in case this has happened, clear the changeCountDone flag now.
+ */
+ pPager->changeCountDone = pPager->tempFile;
+
+ if( rc==SQLITE_OK ){
+ zSuper = pPager->pTmpSpace;
+ rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1);
+ testcase( rc!=SQLITE_OK );
+ }
+ if( rc==SQLITE_OK
+ && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN)
+ ){
+ rc = sqlite3PagerSync(pPager, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = pager_end_transaction(pPager, zSuper[0]!='\0', 0);
+ testcase( rc!=SQLITE_OK );
+ }
+ if( rc==SQLITE_OK && zSuper[0] && res ){
+ /* If there was a super-journal and this routine will return success,
+ ** see if it is possible to delete the super-journal.
+ */
+ rc = pager_delsuper(pPager, zSuper);
+ testcase( rc!=SQLITE_OK );
+ }
+ if( isHot && nPlayback ){
+ sqlite3_log(SQLITE_NOTICE_RECOVER_ROLLBACK, "recovered %d pages from %s",
+ nPlayback, pPager->zJournal);
+ }
+
+ /* The Pager.sectorSize variable may have been updated while rolling
+ ** back a journal created by a process with a different sector size
+ ** value. Reset it to the correct value for this process.
+ */
+ setSectorSize(pPager);
+ return rc;
+}
+
+
+/*
+** Read the content for page pPg out of the database file (or out of
+** the WAL if that is where the most recent copy if found) into
+** pPg->pData. A shared lock or greater must be held on the database
+** file before this function is called.
+**
+** If page 1 is read, then the value of Pager.dbFileVers[] is set to
+** the value read from the database file.
+**
+** If an IO error occurs, then the IO error is returned to the caller.
+** Otherwise, SQLITE_OK is returned.
+*/
+static int readDbPage(PgHdr *pPg){
+ Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */
+ int rc = SQLITE_OK; /* Return code */
+
+#ifndef SQLITE_OMIT_WAL
+ u32 iFrame = 0; /* Frame of WAL containing pgno */
+
+ assert( pPager->eState>=PAGER_READER && !MEMDB );
+ assert( isOpen(pPager->fd) );
+
+ if( pagerUseWal(pPager) ){
+ rc = sqlite3WalFindFrame(pPager->pWal, pPg->pgno, &iFrame);
+ if( rc ) return rc;
+ }
+ if( iFrame ){
+ rc = sqlite3WalReadFrame(pPager->pWal, iFrame,pPager->pageSize,pPg->pData);
+ }else
+#endif
+ {
+ i64 iOffset = (pPg->pgno-1)*(i64)pPager->pageSize;
+ rc = sqlite3OsRead(pPager->fd, pPg->pData, pPager->pageSize, iOffset);
+ if( rc==SQLITE_IOERR_SHORT_READ ){
+ rc = SQLITE_OK;
+ }
+ }
+
+ if( pPg->pgno==1 ){
+ if( rc ){
+ /* If the read is unsuccessful, set the dbFileVers[] to something
+ ** that will never be a valid file version. dbFileVers[] is a copy
+ ** of bytes 24..39 of the database. Bytes 28..31 should always be
+ ** zero or the size of the database in page. Bytes 32..35 and 35..39
+ ** should be page numbers which are never 0xffffffff. So filling
+ ** pPager->dbFileVers[] with all 0xff bytes should suffice.
+ **
+ ** For an encrypted database, the situation is more complex: bytes
+ ** 24..39 of the database are white noise. But the probability of
+ ** white noise equaling 16 bytes of 0xff is vanishingly small so
+ ** we should still be ok.
+ */
+ memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers));
+ }else{
+ u8 *dbFileVers = &((u8*)pPg->pData)[24];
+ memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers));
+ }
+ }
+ PAGER_INCR(sqlite3_pager_readdb_count);
+ PAGER_INCR(pPager->nRead);
+ IOTRACE(("PGIN %p %d\n", pPager, pPg->pgno));
+ PAGERTRACE(("FETCH %d page %d hash(%08x)\n",
+ PAGERID(pPager), pPg->pgno, pager_pagehash(pPg)));
+
+ return rc;
+}
+
+/*
+** Update the value of the change-counter at offsets 24 and 92 in
+** the header and the sqlite version number at offset 96.
+**
+** This is an unconditional update. See also the pager_incr_changecounter()
+** routine which only updates the change-counter if the update is actually
+** needed, as determined by the pPager->changeCountDone state variable.
+*/
+static void pager_write_changecounter(PgHdr *pPg){
+ u32 change_counter;
+
+ /* Increment the value just read and write it back to byte 24. */
+ change_counter = sqlite3Get4byte((u8*)pPg->pPager->dbFileVers)+1;
+ put32bits(((char*)pPg->pData)+24, change_counter);
+
+ /* Also store the SQLite version number in bytes 96..99 and in
+ ** bytes 92..95 store the change counter for which the version number
+ ** is valid. */
+ put32bits(((char*)pPg->pData)+92, change_counter);
+ put32bits(((char*)pPg->pData)+96, SQLITE_VERSION_NUMBER);
+}
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** This function is invoked once for each page that has already been
+** written into the log file when a WAL transaction is rolled back.
+** Parameter iPg is the page number of said page. The pCtx argument
+** is actually a pointer to the Pager structure.
+**
+** If page iPg is present in the cache, and has no outstanding references,
+** it is discarded. Otherwise, if there are one or more outstanding
+** references, the page content is reloaded from the database. If the
+** attempt to reload content from the database is required and fails,
+** return an SQLite error code. Otherwise, SQLITE_OK.
+*/
+static int pagerUndoCallback(void *pCtx, Pgno iPg){
+ int rc = SQLITE_OK;
+ Pager *pPager = (Pager *)pCtx;
+ PgHdr *pPg;
+
+ assert( pagerUseWal(pPager) );
+ pPg = sqlite3PagerLookup(pPager, iPg);
+ if( pPg ){
+ if( sqlite3PcachePageRefcount(pPg)==1 ){
+ sqlite3PcacheDrop(pPg);
+ }else{
+ rc = readDbPage(pPg);
+ if( rc==SQLITE_OK ){
+ pPager->xReiniter(pPg);
+ }
+ sqlite3PagerUnrefNotNull(pPg);
+ }
+ }
+
+ /* Normally, if a transaction is rolled back, any backup processes are
+ ** updated as data is copied out of the rollback journal and into the
+ ** database. This is not generally possible with a WAL database, as
+ ** rollback involves simply truncating the log file. Therefore, if one
+ ** or more frames have already been written to the log (and therefore
+ ** also copied into the backup databases) as part of this transaction,
+ ** the backups must be restarted.
+ */
+ sqlite3BackupRestart(pPager->pBackup);
+
+ return rc;
+}
+
+/*
+** This function is called to rollback a transaction on a WAL database.
+*/
+static int pagerRollbackWal(Pager *pPager){
+ int rc; /* Return Code */
+ PgHdr *pList; /* List of dirty pages to revert */
+
+ /* For all pages in the cache that are currently dirty or have already
+ ** been written (but not committed) to the log file, do one of the
+ ** following:
+ **
+ ** + Discard the cached page (if refcount==0), or
+ ** + Reload page content from the database (if refcount>0).
+ */
+ pPager->dbSize = pPager->dbOrigSize;
+ rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager);
+ pList = sqlite3PcacheDirtyList(pPager->pPCache);
+ while( pList && rc==SQLITE_OK ){
+ PgHdr *pNext = pList->pDirty;
+ rc = pagerUndoCallback((void *)pPager, pList->pgno);
+ pList = pNext;
+ }
+
+ return rc;
+}
+
+/*
+** This function is a wrapper around sqlite3WalFrames(). As well as logging
+** the contents of the list of pages headed by pList (connected by pDirty),
+** this function notifies any active backup processes that the pages have
+** changed.
+**
+** The list of pages passed into this routine is always sorted by page number.
+** Hence, if page 1 appears anywhere on the list, it will be the first page.
+*/
+static int pagerWalFrames(
+ Pager *pPager, /* Pager object */
+ PgHdr *pList, /* List of frames to log */
+ Pgno nTruncate, /* Database size after this commit */
+ int isCommit /* True if this is a commit */
+){
+ int rc; /* Return code */
+ int nList; /* Number of pages in pList */
+ PgHdr *p; /* For looping over pages */
+
+ assert( pPager->pWal );
+ assert( pList );
+#ifdef SQLITE_DEBUG
+ /* Verify that the page list is in accending order */
+ for(p=pList; p && p->pDirty; p=p->pDirty){
+ assert( p->pgno < p->pDirty->pgno );
+ }
+#endif
+
+ assert( pList->pDirty==0 || isCommit );
+ if( isCommit ){
+ /* If a WAL transaction is being committed, there is no point in writing
+ ** any pages with page numbers greater than nTruncate into the WAL file.
+ ** They will never be read by any client. So remove them from the pDirty
+ ** list here. */
+ PgHdr **ppNext = &pList;
+ nList = 0;
+ for(p=pList; (*ppNext = p)!=0; p=p->pDirty){
+ if( p->pgno<=nTruncate ){
+ ppNext = &p->pDirty;
+ nList++;
+ }
+ }
+ assert( pList );
+ }else{
+ nList = 1;
+ }
+ pPager->aStat[PAGER_STAT_WRITE] += nList;
+
+ if( pList->pgno==1 ) pager_write_changecounter(pList);
+ rc = sqlite3WalFrames(pPager->pWal,
+ pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags
+ );
+ if( rc==SQLITE_OK && pPager->pBackup ){
+ for(p=pList; p; p=p->pDirty){
+ sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData);
+ }
+ }
+
+#ifdef SQLITE_CHECK_PAGES
+ pList = sqlite3PcacheDirtyList(pPager->pPCache);
+ for(p=pList; p; p=p->pDirty){
+ pager_set_pagehash(p);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** Begin a read transaction on the WAL.
+**
+** This routine used to be called "pagerOpenSnapshot()" because it essentially
+** makes a snapshot of the database at the current point in time and preserves
+** that snapshot for use by the reader in spite of concurrently changes by
+** other writers or checkpointers.
+*/
+static int pagerBeginReadTransaction(Pager *pPager){
+ int rc; /* Return code */
+ int changed = 0; /* True if cache must be reset */
+
+ assert( pagerUseWal(pPager) );
+ assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER );
+
+ /* sqlite3WalEndReadTransaction() was not called for the previous
+ ** transaction in locking_mode=EXCLUSIVE. So call it now. If we
+ ** are in locking_mode=NORMAL and EndRead() was previously called,
+ ** the duplicate call is harmless.
+ */
+ sqlite3WalEndReadTransaction(pPager->pWal);
+
+ rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed);
+ if( rc!=SQLITE_OK || changed ){
+ pager_reset(pPager);
+ if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0);
+ }
+
+ return rc;
+}
+#endif
+
+/*
+** This function is called as part of the transition from PAGER_OPEN
+** to PAGER_READER state to determine the size of the database file
+** in pages (assuming the page size currently stored in Pager.pageSize).
+**
+** If no error occurs, SQLITE_OK is returned and the size of the database
+** in pages is stored in *pnPage. Otherwise, an error code (perhaps
+** SQLITE_IOERR_FSTAT) is returned and *pnPage is left unmodified.
+*/
+static int pagerPagecount(Pager *pPager, Pgno *pnPage){
+ Pgno nPage; /* Value to return via *pnPage */
+
+ /* Query the WAL sub-system for the database size. The WalDbsize()
+ ** function returns zero if the WAL is not open (i.e. Pager.pWal==0), or
+ ** if the database size is not available. The database size is not
+ ** available from the WAL sub-system if the log file is empty or
+ ** contains no valid committed transactions.
+ */
+ assert( pPager->eState==PAGER_OPEN );
+ assert( pPager->eLock>=SHARED_LOCK );
+ assert( isOpen(pPager->fd) );
+ assert( pPager->tempFile==0 );
+ nPage = sqlite3WalDbsize(pPager->pWal);
+
+ /* If the number of pages in the database is not available from the
+ ** WAL sub-system, determine the page count based on the size of
+ ** the database file. If the size of the database file is not an
+ ** integer multiple of the page-size, round up the result.
+ */
+ if( nPage==0 && ALWAYS(isOpen(pPager->fd)) ){
+ i64 n = 0; /* Size of db file in bytes */
+ int rc = sqlite3OsFileSize(pPager->fd, &n);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ nPage = (Pgno)((n+pPager->pageSize-1) / pPager->pageSize);
+ }
+
+ /* If the current number of pages in the file is greater than the
+ ** configured maximum pager number, increase the allowed limit so
+ ** that the file can be read.
+ */
+ if( nPage>pPager->mxPgno ){
+ pPager->mxPgno = (Pgno)nPage;
+ }
+
+ *pnPage = nPage;
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** Check if the *-wal file that corresponds to the database opened by pPager
+** exists if the database is not empy, or verify that the *-wal file does
+** not exist (by deleting it) if the database file is empty.
+**
+** If the database is not empty and the *-wal file exists, open the pager
+** in WAL mode. If the database is empty or if no *-wal file exists and
+** if no error occurs, make sure Pager.journalMode is not set to
+** PAGER_JOURNALMODE_WAL.
+**
+** Return SQLITE_OK or an error code.
+**
+** The caller must hold a SHARED lock on the database file to call this
+** function. Because an EXCLUSIVE lock on the db file is required to delete
+** a WAL on a none-empty database, this ensures there is no race condition
+** between the xAccess() below and an xDelete() being executed by some
+** other connection.
+*/
+static int pagerOpenWalIfPresent(Pager *pPager){
+ int rc = SQLITE_OK;
+ assert( pPager->eState==PAGER_OPEN );
+ assert( pPager->eLock>=SHARED_LOCK );
+
+ if( !pPager->tempFile ){
+ int isWal; /* True if WAL file exists */
+ rc = sqlite3OsAccess(
+ pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &isWal
+ );
+ if( rc==SQLITE_OK ){
+ if( isWal ){
+ Pgno nPage; /* Size of the database file */
+
+ rc = pagerPagecount(pPager, &nPage);
+ if( rc ) return rc;
+ if( nPage==0 ){
+ rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0);
+ }else{
+ testcase( sqlite3PcachePagecount(pPager->pPCache)==0 );
+ rc = sqlite3PagerOpenWal(pPager, 0);
+ }
+ }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){
+ pPager->journalMode = PAGER_JOURNALMODE_DELETE;
+ }
+ }
+ }
+ return rc;
+}
+#endif
+
+/*
+** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback
+** the entire super-journal file. The case pSavepoint==NULL occurs when
+** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction
+** savepoint.
+**
+** When pSavepoint is not NULL (meaning a non-transaction savepoint is
+** being rolled back), then the rollback consists of up to three stages,
+** performed in the order specified:
+**
+** * Pages are played back from the main journal starting at byte
+** offset PagerSavepoint.iOffset and continuing to
+** PagerSavepoint.iHdrOffset, or to the end of the main journal
+** file if PagerSavepoint.iHdrOffset is zero.
+**
+** * If PagerSavepoint.iHdrOffset is not zero, then pages are played
+** back starting from the journal header immediately following
+** PagerSavepoint.iHdrOffset to the end of the main journal file.
+**
+** * Pages are then played back from the sub-journal file, starting
+** with the PagerSavepoint.iSubRec and continuing to the end of
+** the journal file.
+**
+** Throughout the rollback process, each time a page is rolled back, the
+** corresponding bit is set in a bitvec structure (variable pDone in the
+** implementation below). This is used to ensure that a page is only
+** rolled back the first time it is encountered in either journal.
+**
+** If pSavepoint is NULL, then pages are only played back from the main
+** journal file. There is no need for a bitvec in this case.
+**
+** In either case, before playback commences the Pager.dbSize variable
+** is reset to the value that it held at the start of the savepoint
+** (or transaction). No page with a page-number greater than this value
+** is played back. If one is encountered it is simply skipped.
+*/
+static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){
+ i64 szJ; /* Effective size of the main journal */
+ i64 iHdrOff; /* End of first segment of main-journal records */
+ int rc = SQLITE_OK; /* Return code */
+ Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */
+
+ assert( pPager->eState!=PAGER_ERROR );
+ assert( pPager->eState>=PAGER_WRITER_LOCKED );
+
+ /* Allocate a bitvec to use to store the set of pages rolled back */
+ if( pSavepoint ){
+ pDone = sqlite3BitvecCreate(pSavepoint->nOrig);
+ if( !pDone ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ }
+
+ /* Set the database size back to the value it was before the savepoint
+ ** being reverted was opened.
+ */
+ pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize;
+ pPager->changeCountDone = pPager->tempFile;
+
+ if( !pSavepoint && pagerUseWal(pPager) ){
+ return pagerRollbackWal(pPager);
+ }
+
+ /* Use pPager->journalOff as the effective size of the main rollback
+ ** journal. The actual file might be larger than this in
+ ** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything
+ ** past pPager->journalOff is off-limits to us.
+ */
+ szJ = pPager->journalOff;
+ assert( pagerUseWal(pPager)==0 || szJ==0 );
+
+ /* Begin by rolling back records from the main journal starting at
+ ** PagerSavepoint.iOffset and continuing to the next journal header.
+ ** There might be records in the main journal that have a page number
+ ** greater than the current database size (pPager->dbSize) but those
+ ** will be skipped automatically. Pages are added to pDone as they
+ ** are played back.
+ */
+ if( pSavepoint && !pagerUseWal(pPager) ){
+ iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ;
+ pPager->journalOff = pSavepoint->iOffset;
+ while( rc==SQLITE_OK && pPager->journalOff<iHdrOff ){
+ rc = pager_playback_one_page(pPager, &pPager->journalOff, pDone, 1, 1);
+ }
+ assert( rc!=SQLITE_DONE );
+ }else{
+ pPager->journalOff = 0;
+ }
+
+ /* Continue rolling back records out of the main journal starting at
+ ** the first journal header seen and continuing until the effective end
+ ** of the main journal file. Continue to skip out-of-range pages and
+ ** continue adding pages rolled back to pDone.
+ */
+ while( rc==SQLITE_OK && pPager->journalOff<szJ ){
+ u32 ii; /* Loop counter */
+ u32 nJRec = 0; /* Number of Journal Records */
+ u32 dummy;
+ rc = readJournalHdr(pPager, 0, szJ, &nJRec, &dummy);
+ assert( rc!=SQLITE_DONE );
+
+ /*
+ ** The "pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff"
+ ** test is related to ticket #2565. See the discussion in the
+ ** pager_playback() function for additional information.
+ */
+ if( nJRec==0
+ && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff
+ ){
+ nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager));
+ }
+ for(ii=0; rc==SQLITE_OK && ii<nJRec && pPager->journalOff<szJ; ii++){
+ rc = pager_playback_one_page(pPager, &pPager->journalOff, pDone, 1, 1);
+ }
+ assert( rc!=SQLITE_DONE );
+ }
+ assert( rc!=SQLITE_OK || pPager->journalOff>=szJ );
+
+ /* Finally, rollback pages from the sub-journal. Page that were
+ ** previously rolled back out of the main journal (and are hence in pDone)
+ ** will be skipped. Out-of-range pages are also skipped.
+ */
+ if( pSavepoint ){
+ u32 ii; /* Loop counter */
+ i64 offset = (i64)pSavepoint->iSubRec*(4+pPager->pageSize);
+
+ if( pagerUseWal(pPager) ){
+ rc = sqlite3WalSavepointUndo(pPager->pWal, pSavepoint->aWalData);
+ }
+ for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && ii<pPager->nSubRec; ii++){
+ assert( offset==(i64)ii*(4+pPager->pageSize) );
+ rc = pager_playback_one_page(pPager, &offset, pDone, 0, 1);
+ }
+ assert( rc!=SQLITE_DONE );
+ }
+
+ sqlite3BitvecDestroy(pDone);
+ if( rc==SQLITE_OK ){
+ pPager->journalOff = szJ;
+ }
+
+ return rc;
+}
+
+/*
+** Change the maximum number of in-memory pages that are allowed
+** before attempting to recycle clean and unused pages.
+*/
+SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){
+ sqlite3PcacheSetCachesize(pPager->pPCache, mxPage);
+}
+
+/*
+** Change the maximum number of in-memory pages that are allowed
+** before attempting to spill pages to journal.
+*/
+SQLITE_PRIVATE int sqlite3PagerSetSpillsize(Pager *pPager, int mxPage){
+ return sqlite3PcacheSetSpillsize(pPager->pPCache, mxPage);
+}
+
+/*
+** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap.
+*/
+static void pagerFixMaplimit(Pager *pPager){
+#if SQLITE_MAX_MMAP_SIZE>0
+ sqlite3_file *fd = pPager->fd;
+ if( isOpen(fd) && fd->pMethods->iVersion>=3 ){
+ sqlite3_int64 sz;
+ sz = pPager->szMmap;
+ pPager->bUseFetch = (sz>0);
+ setGetterMethod(pPager);
+ sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz);
+ }
+#endif
+}
+
+/*
+** Change the maximum size of any memory mapping made of the database file.
+*/
+SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *pPager, sqlite3_int64 szMmap){
+ pPager->szMmap = szMmap;
+ pagerFixMaplimit(pPager);
+}
+
+/*
+** Free as much memory as possible from the pager.
+*/
+SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){
+ sqlite3PcacheShrink(pPager->pPCache);
+}
+
+/*
+** Adjust settings of the pager to those specified in the pgFlags parameter.
+**
+** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness
+** of the database to damage due to OS crashes or power failures by
+** changing the number of syncs()s when writing the journals.
+** There are four levels:
+**
+** OFF sqlite3OsSync() is never called. This is the default
+** for temporary and transient files.
+**
+** NORMAL The journal is synced once before writes begin on the
+** database. This is normally adequate protection, but
+** it is theoretically possible, though very unlikely,
+** that an inopertune power failure could leave the journal
+** in a state which would cause damage to the database
+** when it is rolled back.
+**
+** FULL The journal is synced twice before writes begin on the
+** database (with some additional information - the nRec field
+** of the journal header - being written in between the two
+** syncs). If we assume that writing a
+** single disk sector is atomic, then this mode provides
+** assurance that the journal will not be corrupted to the
+** point of causing damage to the database during rollback.
+**
+** EXTRA This is like FULL except that is also syncs the directory
+** that contains the rollback journal after the rollback
+** journal is unlinked.
+**
+** The above is for a rollback-journal mode. For WAL mode, OFF continues
+** to mean that no syncs ever occur. NORMAL means that the WAL is synced
+** prior to the start of checkpoint and that the database file is synced
+** at the conclusion of the checkpoint if the entire content of the WAL
+** was written back into the database. But no sync operations occur for
+** an ordinary commit in NORMAL mode with WAL. FULL means that the WAL
+** file is synced following each commit operation, in addition to the
+** syncs associated with NORMAL. There is no difference between FULL
+** and EXTRA for WAL mode.
+**
+** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL. The
+** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync
+** using fcntl(F_FULLFSYNC). SQLITE_SYNC_NORMAL means to do an
+** ordinary fsync() call. There is no difference between SQLITE_SYNC_FULL
+** and SQLITE_SYNC_NORMAL on platforms other than MacOSX. But the
+** synchronous=FULL versus synchronous=NORMAL setting determines when
+** the xSync primitive is called and is relevant to all platforms.
+**
+** Numeric values associated with these states are OFF==1, NORMAL=2,
+** and FULL=3.
+*/
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+SQLITE_PRIVATE void sqlite3PagerSetFlags(
+ Pager *pPager, /* The pager to set safety level for */
+ unsigned pgFlags /* Various flags */
+){
+ unsigned level = pgFlags & PAGER_SYNCHRONOUS_MASK;
+ if( pPager->tempFile ){
+ pPager->noSync = 1;
+ pPager->fullSync = 0;
+ pPager->extraSync = 0;
+ }else{
+ pPager->noSync = level==PAGER_SYNCHRONOUS_OFF ?1:0;
+ pPager->fullSync = level>=PAGER_SYNCHRONOUS_FULL ?1:0;
+ pPager->extraSync = level==PAGER_SYNCHRONOUS_EXTRA ?1:0;
+ }
+ if( pPager->noSync ){
+ pPager->syncFlags = 0;
+ }else if( pgFlags & PAGER_FULLFSYNC ){
+ pPager->syncFlags = SQLITE_SYNC_FULL;
+ }else{
+ pPager->syncFlags = SQLITE_SYNC_NORMAL;
+ }
+ pPager->walSyncFlags = (pPager->syncFlags<<2);
+ if( pPager->fullSync ){
+ pPager->walSyncFlags |= pPager->syncFlags;
+ }
+ if( (pgFlags & PAGER_CKPT_FULLFSYNC) && !pPager->noSync ){
+ pPager->walSyncFlags |= (SQLITE_SYNC_FULL<<2);
+ }
+ if( pgFlags & PAGER_CACHESPILL ){
+ pPager->doNotSpill &= ~SPILLFLAG_OFF;
+ }else{
+ pPager->doNotSpill |= SPILLFLAG_OFF;
+ }
+}
+#endif
+
+/*
+** The following global variable is incremented whenever the library
+** attempts to open a temporary file. This information is used for
+** testing and analysis only.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_opentemp_count = 0;
+#endif
+
+/*
+** Open a temporary file.
+**
+** Write the file descriptor into *pFile. Return SQLITE_OK on success
+** or some other error code if we fail. The OS will automatically
+** delete the temporary file when it is closed.
+**
+** The flags passed to the VFS layer xOpen() call are those specified
+** by parameter vfsFlags ORed with the following:
+**
+** SQLITE_OPEN_READWRITE
+** SQLITE_OPEN_CREATE
+** SQLITE_OPEN_EXCLUSIVE
+** SQLITE_OPEN_DELETEONCLOSE
+*/
+static int pagerOpentemp(
+ Pager *pPager, /* The pager object */
+ sqlite3_file *pFile, /* Write the file descriptor here */
+ int vfsFlags /* Flags passed through to the VFS */
+){
+ int rc; /* Return code */
+
+#ifdef SQLITE_TEST
+ sqlite3_opentemp_count++; /* Used for testing and analysis only */
+#endif
+
+ vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE |
+ SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE;
+ rc = sqlite3OsOpen(pPager->pVfs, 0, pFile, vfsFlags, 0);
+ assert( rc!=SQLITE_OK || isOpen(pFile) );
+ return rc;
+}
+
+/*
+** Set the busy handler function.
+**
+** The pager invokes the busy-handler if sqlite3OsLock() returns
+** SQLITE_BUSY when trying to upgrade from no-lock to a SHARED lock,
+** or when trying to upgrade from a RESERVED lock to an EXCLUSIVE
+** lock. It does *not* invoke the busy handler when upgrading from
+** SHARED to RESERVED, or when upgrading from SHARED to EXCLUSIVE
+** (which occurs during hot-journal rollback). Summary:
+**
+** Transition | Invokes xBusyHandler
+** --------------------------------------------------------
+** NO_LOCK -> SHARED_LOCK | Yes
+** SHARED_LOCK -> RESERVED_LOCK | No
+** SHARED_LOCK -> EXCLUSIVE_LOCK | No
+** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes
+**
+** If the busy-handler callback returns non-zero, the lock is
+** retried. If it returns zero, then the SQLITE_BUSY error is
+** returned to the caller of the pager API function.
+*/
+SQLITE_PRIVATE void sqlite3PagerSetBusyHandler(
+ Pager *pPager, /* Pager object */
+ int (*xBusyHandler)(void *), /* Pointer to busy-handler function */
+ void *pBusyHandlerArg /* Argument to pass to xBusyHandler */
+){
+ void **ap;
+ pPager->xBusyHandler = xBusyHandler;
+ pPager->pBusyHandlerArg = pBusyHandlerArg;
+ ap = (void **)&pPager->xBusyHandler;
+ assert( ((int(*)(void *))(ap[0]))==xBusyHandler );
+ assert( ap[1]==pBusyHandlerArg );
+ sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap);
+}
+
+/*
+** Change the page size used by the Pager object. The new page size
+** is passed in *pPageSize.
+**
+** If the pager is in the error state when this function is called, it
+** is a no-op. The value returned is the error state error code (i.e.
+** one of SQLITE_IOERR, an SQLITE_IOERR_xxx sub-code or SQLITE_FULL).
+**
+** Otherwise, if all of the following are true:
+**
+** * the new page size (value of *pPageSize) is valid (a power
+** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and
+**
+** * there are no outstanding page references, and
+**
+** * the database is either not an in-memory database or it is
+** an in-memory database that currently consists of zero pages.
+**
+** then the pager object page size is set to *pPageSize.
+**
+** If the page size is changed, then this function uses sqlite3PagerMalloc()
+** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt
+** fails, SQLITE_NOMEM is returned and the page size remains unchanged.
+** In all other cases, SQLITE_OK is returned.
+**
+** If the page size is not changed, either because one of the enumerated
+** conditions above is not true, the pager was in error state when this
+** function was called, or because the memory allocation attempt failed,
+** then *pPageSize is set to the old, retained page size before returning.
+*/
+SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nReserve){
+ int rc = SQLITE_OK;
+
+ /* It is not possible to do a full assert_pager_state() here, as this
+ ** function may be called from within PagerOpen(), before the state
+ ** of the Pager object is internally consistent.
+ **
+ ** At one point this function returned an error if the pager was in
+ ** PAGER_ERROR state. But since PAGER_ERROR state guarantees that
+ ** there is at least one outstanding page reference, this function
+ ** is a no-op for that case anyhow.
+ */
+
+ u32 pageSize = *pPageSize;
+ assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) );
+ if( (pPager->memDb==0 || pPager->dbSize==0)
+ && sqlite3PcacheRefCount(pPager->pPCache)==0
+ && pageSize && pageSize!=(u32)pPager->pageSize
+ ){
+ char *pNew = NULL; /* New temp space */
+ i64 nByte = 0;
+
+ if( pPager->eState>PAGER_OPEN && isOpen(pPager->fd) ){
+ rc = sqlite3OsFileSize(pPager->fd, &nByte);
+ }
+ if( rc==SQLITE_OK ){
+ /* 8 bytes of zeroed overrun space is sufficient so that the b-tree
+ * cell header parser will never run off the end of the allocation */
+ pNew = (char *)sqlite3PageMalloc(pageSize+8);
+ if( !pNew ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ memset(pNew+pageSize, 0, 8);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ pager_reset(pPager);
+ rc = sqlite3PcacheSetPageSize(pPager->pPCache, pageSize);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3PageFree(pPager->pTmpSpace);
+ pPager->pTmpSpace = pNew;
+ pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize);
+ pPager->pageSize = pageSize;
+ }else{
+ sqlite3PageFree(pNew);
+ }
+ }
+
+ *pPageSize = pPager->pageSize;
+ if( rc==SQLITE_OK ){
+ if( nReserve<0 ) nReserve = pPager->nReserve;
+ assert( nReserve>=0 && nReserve<1000 );
+ pPager->nReserve = (i16)nReserve;
+ pagerFixMaplimit(pPager);
+ }
+ return rc;
+}
+
+/*
+** Return a pointer to the "temporary page" buffer held internally
+** by the pager. This is a buffer that is big enough to hold the
+** entire content of a database page. This buffer is used internally
+** during rollback and will be overwritten whenever a rollback
+** occurs. But other modules are free to use it too, as long as
+** no rollbacks are happening.
+*/
+SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager *pPager){
+ return pPager->pTmpSpace;
+}
+
+/*
+** Attempt to set the maximum database page count if mxPage is positive.
+** Make no changes if mxPage is zero or negative. And never reduce the
+** maximum page count below the current size of the database.
+**
+** Regardless of mxPage, return the current maximum page count.
+*/
+SQLITE_PRIVATE Pgno sqlite3PagerMaxPageCount(Pager *pPager, Pgno mxPage){
+ if( mxPage>0 ){
+ pPager->mxPgno = mxPage;
+ }
+ assert( pPager->eState!=PAGER_OPEN ); /* Called only by OP_MaxPgcnt */
+ /* assert( pPager->mxPgno>=pPager->dbSize ); */
+ /* OP_MaxPgcnt ensures that the parameter passed to this function is not
+ ** less than the total number of valid pages in the database. But this
+ ** may be less than Pager.dbSize, and so the assert() above is not valid */
+ return pPager->mxPgno;
+}
+
+/*
+** The following set of routines are used to disable the simulated
+** I/O error mechanism. These routines are used to avoid simulated
+** errors in places where we do not care about errors.
+**
+** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops
+** and generate no code.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API extern int sqlite3_io_error_pending;
+SQLITE_API extern int sqlite3_io_error_hit;
+static int saved_cnt;
+void disable_simulated_io_errors(void){
+ saved_cnt = sqlite3_io_error_pending;
+ sqlite3_io_error_pending = -1;
+}
+void enable_simulated_io_errors(void){
+ sqlite3_io_error_pending = saved_cnt;
+}
+#else
+# define disable_simulated_io_errors()
+# define enable_simulated_io_errors()
+#endif
+
+/*
+** Read the first N bytes from the beginning of the file into memory
+** that pDest points to.
+**
+** If the pager was opened on a transient file (zFilename==""), or
+** opened on a file less than N bytes in size, the output buffer is
+** zeroed and SQLITE_OK returned. The rationale for this is that this
+** function is used to read database headers, and a new transient or
+** zero sized database has a header than consists entirely of zeroes.
+**
+** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered,
+** the error code is returned to the caller and the contents of the
+** output buffer undefined.
+*/
+SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){
+ int rc = SQLITE_OK;
+ memset(pDest, 0, N);
+ assert( isOpen(pPager->fd) || pPager->tempFile );
+
+ /* This routine is only called by btree immediately after creating
+ ** the Pager object. There has not been an opportunity to transition
+ ** to WAL mode yet.
+ */
+ assert( !pagerUseWal(pPager) );
+
+ if( isOpen(pPager->fd) ){
+ IOTRACE(("DBHDR %p 0 %d\n", pPager, N))
+ rc = sqlite3OsRead(pPager->fd, pDest, N, 0);
+ if( rc==SQLITE_IOERR_SHORT_READ ){
+ rc = SQLITE_OK;
+ }
+ }
+ return rc;
+}
+
+/*
+** This function may only be called when a read-transaction is open on
+** the pager. It returns the total number of pages in the database.
+**
+** However, if the file is between 1 and <page-size> bytes in size, then
+** this is considered a 1 page file.
+*/
+SQLITE_PRIVATE void sqlite3PagerPagecount(Pager *pPager, int *pnPage){
+ assert( pPager->eState>=PAGER_READER );
+ assert( pPager->eState!=PAGER_WRITER_FINISHED );
+ *pnPage = (int)pPager->dbSize;
+}
+
+
+/*
+** Try to obtain a lock of type locktype on the database file. If
+** a similar or greater lock is already held, this function is a no-op
+** (returning SQLITE_OK immediately).
+**
+** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke
+** the busy callback if the lock is currently not available. Repeat
+** until the busy callback returns false or until the attempt to
+** obtain the lock succeeds.
+**
+** Return SQLITE_OK on success and an error code if we cannot obtain
+** the lock. If the lock is obtained successfully, set the Pager.state
+** variable to locktype before returning.
+*/
+static int pager_wait_on_lock(Pager *pPager, int locktype){
+ int rc; /* Return code */
+
+ /* Check that this is either a no-op (because the requested lock is
+ ** already held), or one of the transitions that the busy-handler
+ ** may be invoked during, according to the comment above
+ ** sqlite3PagerSetBusyhandler().
+ */
+ assert( (pPager->eLock>=locktype)
+ || (pPager->eLock==NO_LOCK && locktype==SHARED_LOCK)
+ || (pPager->eLock==RESERVED_LOCK && locktype==EXCLUSIVE_LOCK)
+ );
+
+ do {
+ rc = pagerLockDb(pPager, locktype);
+ }while( rc==SQLITE_BUSY && pPager->xBusyHandler(pPager->pBusyHandlerArg) );
+ return rc;
+}
+
+/*
+** Function assertTruncateConstraint(pPager) checks that one of the
+** following is true for all dirty pages currently in the page-cache:
+**
+** a) The page number is less than or equal to the size of the
+** current database image, in pages, OR
+**
+** b) if the page content were written at this time, it would not
+** be necessary to write the current content out to the sub-journal
+** (as determined by function subjRequiresPage()).
+**
+** If the condition asserted by this function were not true, and the
+** dirty page were to be discarded from the cache via the pagerStress()
+** routine, pagerStress() would not write the current page content to
+** the database file. If a savepoint transaction were rolled back after
+** this happened, the correct behavior would be to restore the current
+** content of the page. However, since this content is not present in either
+** the database file or the portion of the rollback journal and
+** sub-journal rolled back the content could not be restored and the
+** database image would become corrupt. It is therefore fortunate that
+** this circumstance cannot arise.
+*/
+#if defined(SQLITE_DEBUG)
+static void assertTruncateConstraintCb(PgHdr *pPg){
+ assert( pPg->flags&PGHDR_DIRTY );
+ assert( !subjRequiresPage(pPg) || pPg->pgno<=pPg->pPager->dbSize );
+}
+static void assertTruncateConstraint(Pager *pPager){
+ sqlite3PcacheIterateDirty(pPager->pPCache, assertTruncateConstraintCb);
+}
+#else
+# define assertTruncateConstraint(pPager)
+#endif
+
+/*
+** Truncate the in-memory database file image to nPage pages. This
+** function does not actually modify the database file on disk. It
+** just sets the internal state of the pager object so that the
+** truncation will be done when the current transaction is committed.
+**
+** This function is only called right before committing a transaction.
+** Once this function has been called, the transaction must either be
+** rolled back or committed. It is not safe to call this function and
+** then continue writing to the database.
+*/
+SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){
+ assert( pPager->dbSize>=nPage );
+ assert( pPager->eState>=PAGER_WRITER_CACHEMOD );
+ pPager->dbSize = nPage;
+
+ /* At one point the code here called assertTruncateConstraint() to
+ ** ensure that all pages being truncated away by this operation are,
+ ** if one or more savepoints are open, present in the savepoint
+ ** journal so that they can be restored if the savepoint is rolled
+ ** back. This is no longer necessary as this function is now only
+ ** called right before committing a transaction. So although the
+ ** Pager object may still have open savepoints (Pager.nSavepoint!=0),
+ ** they cannot be rolled back. So the assertTruncateConstraint() call
+ ** is no longer correct. */
+}
+
+
+/*
+** This function is called before attempting a hot-journal rollback. It
+** syncs the journal file to disk, then sets pPager->journalHdr to the
+** size of the journal file so that the pager_playback() routine knows
+** that the entire journal file has been synced.
+**
+** Syncing a hot-journal to disk before attempting to roll it back ensures
+** that if a power-failure occurs during the rollback, the process that
+** attempts rollback following system recovery sees the same journal
+** content as this process.
+**
+** If everything goes as planned, SQLITE_OK is returned. Otherwise,
+** an SQLite error code.
+*/
+static int pagerSyncHotJournal(Pager *pPager){
+ int rc = SQLITE_OK;
+ if( !pPager->noSync ){
+ rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_NORMAL);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsFileSize(pPager->jfd, &pPager->journalHdr);
+ }
+ return rc;
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** Obtain a reference to a memory mapped page object for page number pgno.
+** The new object will use the pointer pData, obtained from xFetch().
+** If successful, set *ppPage to point to the new page reference
+** and return SQLITE_OK. Otherwise, return an SQLite error code and set
+** *ppPage to zero.
+**
+** Page references obtained by calling this function should be released
+** by calling pagerReleaseMapPage().
+*/
+static int pagerAcquireMapPage(
+ Pager *pPager, /* Pager object */
+ Pgno pgno, /* Page number */
+ void *pData, /* xFetch()'d data for this page */
+ PgHdr **ppPage /* OUT: Acquired page object */
+){
+ PgHdr *p; /* Memory mapped page to return */
+
+ if( pPager->pMmapFreelist ){
+ *ppPage = p = pPager->pMmapFreelist;
+ pPager->pMmapFreelist = p->pDirty;
+ p->pDirty = 0;
+ assert( pPager->nExtra>=8 );
+ memset(p->pExtra, 0, 8);
+ }else{
+ *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra);
+ if( p==0 ){
+ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pData);
+ return SQLITE_NOMEM_BKPT;
+ }
+ p->pExtra = (void *)&p[1];
+ p->flags = PGHDR_MMAP;
+ p->nRef = 1;
+ p->pPager = pPager;
+ }
+
+ assert( p->pExtra==(void *)&p[1] );
+ assert( p->pPage==0 );
+ assert( p->flags==PGHDR_MMAP );
+ assert( p->pPager==pPager );
+ assert( p->nRef==1 );
+
+ p->pgno = pgno;
+ p->pData = pData;
+ pPager->nMmapOut++;
+
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Release a reference to page pPg. pPg must have been returned by an
+** earlier call to pagerAcquireMapPage().
+*/
+static void pagerReleaseMapPage(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ pPager->nMmapOut--;
+ pPg->pDirty = pPager->pMmapFreelist;
+ pPager->pMmapFreelist = pPg;
+
+ assert( pPager->fd->pMethods->iVersion>=3 );
+ sqlite3OsUnfetch(pPager->fd, (i64)(pPg->pgno-1)*pPager->pageSize, pPg->pData);
+}
+
+/*
+** Free all PgHdr objects stored in the Pager.pMmapFreelist list.
+*/
+static void pagerFreeMapHdrs(Pager *pPager){
+ PgHdr *p;
+ PgHdr *pNext;
+ for(p=pPager->pMmapFreelist; p; p=pNext){
+ pNext = p->pDirty;
+ sqlite3_free(p);
+ }
+}
+
+/* Verify that the database file has not be deleted or renamed out from
+** under the pager. Return SQLITE_OK if the database is still where it ought
+** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error
+** code from sqlite3OsAccess()) if the database has gone missing.
+*/
+static int databaseIsUnmoved(Pager *pPager){
+ int bHasMoved = 0;
+ int rc;
+
+ if( pPager->tempFile ) return SQLITE_OK;
+ if( pPager->dbSize==0 ) return SQLITE_OK;
+ assert( pPager->zFilename && pPager->zFilename[0] );
+ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved);
+ if( rc==SQLITE_NOTFOUND ){
+ /* If the HAS_MOVED file-control is unimplemented, assume that the file
+ ** has not been moved. That is the historical behavior of SQLite: prior to
+ ** version 3.8.3, it never checked */
+ rc = SQLITE_OK;
+ }else if( rc==SQLITE_OK && bHasMoved ){
+ rc = SQLITE_READONLY_DBMOVED;
+ }
+ return rc;
+}
+
+
+/*
+** Shutdown the page cache. Free all memory and close all files.
+**
+** If a transaction was in progress when this routine is called, that
+** transaction is rolled back. All outstanding pages are invalidated
+** and their memory is freed. Any attempt to use a page associated
+** with this page cache after this function returns will likely
+** result in a coredump.
+**
+** This function always succeeds. If a transaction is active an attempt
+** is made to roll it back. If an error occurs during the rollback
+** a hot journal may be left in the filesystem but no error is returned
+** to the caller.
+*/
+SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){
+ u8 *pTmp = (u8*)pPager->pTmpSpace;
+ assert( db || pagerUseWal(pPager)==0 );
+ assert( assert_pager_state(pPager) );
+ disable_simulated_io_errors();
+ sqlite3BeginBenignMalloc();
+ pagerFreeMapHdrs(pPager);
+ /* pPager->errCode = 0; */
+ pPager->exclusiveMode = 0;
+#ifndef SQLITE_OMIT_WAL
+ {
+ u8 *a = 0;
+ assert( db || pPager->pWal==0 );
+ if( db && 0==(db->flags & SQLITE_NoCkptOnClose)
+ && SQLITE_OK==databaseIsUnmoved(pPager)
+ ){
+ a = pTmp;
+ }
+ sqlite3WalClose(pPager->pWal, db, pPager->walSyncFlags, pPager->pageSize,a);
+ pPager->pWal = 0;
+ }
+#endif
+ pager_reset(pPager);
+ if( MEMDB ){
+ pager_unlock(pPager);
+ }else{
+ /* If it is open, sync the journal file before calling UnlockAndRollback.
+ ** If this is not done, then an unsynced portion of the open journal
+ ** file may be played back into the database. If a power failure occurs
+ ** while this is happening, the database could become corrupt.
+ **
+ ** If an error occurs while trying to sync the journal, shift the pager
+ ** into the ERROR state. This causes UnlockAndRollback to unlock the
+ ** database and close the journal file without attempting to roll it
+ ** back or finalize it. The next database user will have to do hot-journal
+ ** rollback before accessing the database file.
+ */
+ if( isOpen(pPager->jfd) ){
+ pager_error(pPager, pagerSyncHotJournal(pPager));
+ }
+ pagerUnlockAndRollback(pPager);
+ }
+ sqlite3EndBenignMalloc();
+ enable_simulated_io_errors();
+ PAGERTRACE(("CLOSE %d\n", PAGERID(pPager)));
+ IOTRACE(("CLOSE %p\n", pPager))
+ sqlite3OsClose(pPager->jfd);
+ sqlite3OsClose(pPager->fd);
+ sqlite3PageFree(pTmp);
+ sqlite3PcacheClose(pPager->pPCache);
+ assert( !pPager->aSavepoint && !pPager->pInJournal );
+ assert( !isOpen(pPager->jfd) && !isOpen(pPager->sjfd) );
+
+ sqlite3_free(pPager);
+ return SQLITE_OK;
+}
+
+#if !defined(NDEBUG) || defined(SQLITE_TEST)
+/*
+** Return the page number for page pPg.
+*/
+SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage *pPg){
+ return pPg->pgno;
+}
+#endif
+
+/*
+** Increment the reference count for page pPg.
+*/
+SQLITE_PRIVATE void sqlite3PagerRef(DbPage *pPg){
+ sqlite3PcacheRef(pPg);
+}
+
+/*
+** Sync the journal. In other words, make sure all the pages that have
+** been written to the journal have actually reached the surface of the
+** disk and can be restored in the event of a hot-journal rollback.
+**
+** If the Pager.noSync flag is set, then this function is a no-op.
+** Otherwise, the actions required depend on the journal-mode and the
+** device characteristics of the file-system, as follows:
+**
+** * If the journal file is an in-memory journal file, no action need
+** be taken.
+**
+** * Otherwise, if the device does not support the SAFE_APPEND property,
+** then the nRec field of the most recently written journal header
+** is updated to contain the number of journal records that have
+** been written following it. If the pager is operating in full-sync
+** mode, then the journal file is synced before this field is updated.
+**
+** * If the device does not support the SEQUENTIAL property, then
+** journal file is synced.
+**
+** Or, in pseudo-code:
+**
+** if( NOT <in-memory journal> ){
+** if( NOT SAFE_APPEND ){
+** if( <full-sync mode> ) xSync(<journal file>);
+** <update nRec field>
+** }
+** if( NOT SEQUENTIAL ) xSync(<journal file>);
+** }
+**
+** If successful, this routine clears the PGHDR_NEED_SYNC flag of every
+** page currently held in memory before returning SQLITE_OK. If an IO
+** error is encountered, then the IO error code is returned to the caller.
+*/
+static int syncJournal(Pager *pPager, int newHdr){
+ int rc; /* Return code */
+
+ assert( pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ );
+ assert( assert_pager_state(pPager) );
+ assert( !pagerUseWal(pPager) );
+
+ rc = sqlite3PagerExclusiveLock(pPager);
+ if( rc!=SQLITE_OK ) return rc;
+
+ if( !pPager->noSync ){
+ assert( !pPager->tempFile );
+ if( isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){
+ const int iDc = sqlite3OsDeviceCharacteristics(pPager->fd);
+ assert( isOpen(pPager->jfd) );
+
+ if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){
+ /* This block deals with an obscure problem. If the last connection
+ ** that wrote to this database was operating in persistent-journal
+ ** mode, then the journal file may at this point actually be larger
+ ** than Pager.journalOff bytes. If the next thing in the journal
+ ** file happens to be a journal-header (written as part of the
+ ** previous connection's transaction), and a crash or power-failure
+ ** occurs after nRec is updated but before this connection writes
+ ** anything else to the journal file (or commits/rolls back its
+ ** transaction), then SQLite may become confused when doing the
+ ** hot-journal rollback following recovery. It may roll back all
+ ** of this connections data, then proceed to rolling back the old,
+ ** out-of-date data that follows it. Database corruption.
+ **
+ ** To work around this, if the journal file does appear to contain
+ ** a valid header following Pager.journalOff, then write a 0x00
+ ** byte to the start of it to prevent it from being recognized.
+ **
+ ** Variable iNextHdrOffset is set to the offset at which this
+ ** problematic header will occur, if it exists. aMagic is used
+ ** as a temporary buffer to inspect the first couple of bytes of
+ ** the potential journal header.
+ */
+ i64 iNextHdrOffset;
+ u8 aMagic[8];
+ u8 zHeader[sizeof(aJournalMagic)+4];
+
+ memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic));
+ put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec);
+
+ iNextHdrOffset = journalHdrOffset(pPager);
+ rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset);
+ if( rc==SQLITE_OK && 0==memcmp(aMagic, aJournalMagic, 8) ){
+ static const u8 zerobyte = 0;
+ rc = sqlite3OsWrite(pPager->jfd, &zerobyte, 1, iNextHdrOffset);
+ }
+ if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){
+ return rc;
+ }
+
+ /* Write the nRec value into the journal file header. If in
+ ** full-synchronous mode, sync the journal first. This ensures that
+ ** all data has really hit the disk before nRec is updated to mark
+ ** it as a candidate for rollback.
+ **
+ ** This is not required if the persistent media supports the
+ ** SAFE_APPEND property. Because in this case it is not possible
+ ** for garbage data to be appended to the file, the nRec field
+ ** is populated with 0xFFFFFFFF when the journal header is written
+ ** and never needs to be updated.
+ */
+ if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){
+ PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager)));
+ IOTRACE(("JSYNC %p\n", pPager))
+ rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr));
+ rc = sqlite3OsWrite(
+ pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr
+ );
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){
+ PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager)));
+ IOTRACE(("JSYNC %p\n", pPager))
+ rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags|
+ (pPager->syncFlags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0)
+ );
+ if( rc!=SQLITE_OK ) return rc;
+ }
+
+ pPager->journalHdr = pPager->journalOff;
+ if( newHdr && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){
+ pPager->nRec = 0;
+ rc = writeJournalHdr(pPager);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ }else{
+ pPager->journalHdr = pPager->journalOff;
+ }
+ }
+
+ /* Unless the pager is in noSync mode, the journal file was just
+ ** successfully synced. Either way, clear the PGHDR_NEED_SYNC flag on
+ ** all pages.
+ */
+ sqlite3PcacheClearSyncFlags(pPager->pPCache);
+ pPager->eState = PAGER_WRITER_DBMOD;
+ assert( assert_pager_state(pPager) );
+ return SQLITE_OK;
+}
+
+/*
+** The argument is the first in a linked list of dirty pages connected
+** by the PgHdr.pDirty pointer. This function writes each one of the
+** in-memory pages in the list to the database file. The argument may
+** be NULL, representing an empty list. In this case this function is
+** a no-op.
+**
+** The pager must hold at least a RESERVED lock when this function
+** is called. Before writing anything to the database file, this lock
+** is upgraded to an EXCLUSIVE lock. If the lock cannot be obtained,
+** SQLITE_BUSY is returned and no data is written to the database file.
+**
+** If the pager is a temp-file pager and the actual file-system file
+** is not yet open, it is created and opened before any data is
+** written out.
+**
+** Once the lock has been upgraded and, if necessary, the file opened,
+** the pages are written out to the database file in list order. Writing
+** a page is skipped if it meets either of the following criteria:
+**
+** * The page number is greater than Pager.dbSize, or
+** * The PGHDR_DONT_WRITE flag is set on the page.
+**
+** If writing out a page causes the database file to grow, Pager.dbFileSize
+** is updated accordingly. If page 1 is written out, then the value cached
+** in Pager.dbFileVers[] is updated to match the new value stored in
+** the database file.
+**
+** If everything is successful, SQLITE_OK is returned. If an IO error
+** occurs, an IO error code is returned. Or, if the EXCLUSIVE lock cannot
+** be obtained, SQLITE_BUSY is returned.
+*/
+static int pager_write_pagelist(Pager *pPager, PgHdr *pList){
+ int rc = SQLITE_OK; /* Return code */
+
+ /* This function is only called for rollback pagers in WRITER_DBMOD state. */
+ assert( !pagerUseWal(pPager) );
+ assert( pPager->tempFile || pPager->eState==PAGER_WRITER_DBMOD );
+ assert( pPager->eLock==EXCLUSIVE_LOCK );
+ assert( isOpen(pPager->fd) || pList->pDirty==0 );
+
+ /* If the file is a temp-file has not yet been opened, open it now. It
+ ** is not possible for rc to be other than SQLITE_OK if this branch
+ ** is taken, as pager_wait_on_lock() is a no-op for temp-files.
+ */
+ if( !isOpen(pPager->fd) ){
+ assert( pPager->tempFile && rc==SQLITE_OK );
+ rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags);
+ }
+
+ /* Before the first write, give the VFS a hint of what the final
+ ** file size will be.
+ */
+ assert( rc!=SQLITE_OK || isOpen(pPager->fd) );
+ if( rc==SQLITE_OK
+ && pPager->dbHintSize<pPager->dbSize
+ && (pList->pDirty || pList->pgno>pPager->dbHintSize)
+ ){
+ sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize;
+ sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile);
+ pPager->dbHintSize = pPager->dbSize;
+ }
+
+ while( rc==SQLITE_OK && pList ){
+ Pgno pgno = pList->pgno;
+
+ /* If there are dirty pages in the page cache with page numbers greater
+ ** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to
+ ** make the file smaller (presumably by auto-vacuum code). Do not write
+ ** any such pages to the file.
+ **
+ ** Also, do not write out any page that has the PGHDR_DONT_WRITE flag
+ ** set (set by sqlite3PagerDontWrite()).
+ */
+ if( pgno<=pPager->dbSize && 0==(pList->flags&PGHDR_DONT_WRITE) ){
+ i64 offset = (pgno-1)*(i64)pPager->pageSize; /* Offset to write */
+ char *pData; /* Data to write */
+
+ assert( (pList->flags&PGHDR_NEED_SYNC)==0 );
+ if( pList->pgno==1 ) pager_write_changecounter(pList);
+
+ pData = pList->pData;
+
+ /* Write out the page data. */
+ rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset);
+
+ /* If page 1 was just written, update Pager.dbFileVers to match
+ ** the value now stored in the database file. If writing this
+ ** page caused the database file to grow, update dbFileSize.
+ */
+ if( pgno==1 ){
+ memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers));
+ }
+ if( pgno>pPager->dbFileSize ){
+ pPager->dbFileSize = pgno;
+ }
+ pPager->aStat[PAGER_STAT_WRITE]++;
+
+ /* Update any backup objects copying the contents of this pager. */
+ sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData);
+
+ PAGERTRACE(("STORE %d page %d hash(%08x)\n",
+ PAGERID(pPager), pgno, pager_pagehash(pList)));
+ IOTRACE(("PGOUT %p %d\n", pPager, pgno));
+ PAGER_INCR(sqlite3_pager_writedb_count);
+ }else{
+ PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno));
+ }
+ pager_set_pagehash(pList);
+ pList = pList->pDirty;
+ }
+
+ return rc;
+}
+
+/*
+** Ensure that the sub-journal file is open. If it is already open, this
+** function is a no-op.
+**
+** SQLITE_OK is returned if everything goes according to plan. An
+** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen()
+** fails.
+*/
+static int openSubJournal(Pager *pPager){
+ int rc = SQLITE_OK;
+ if( !isOpen(pPager->sjfd) ){
+ const int flags = SQLITE_OPEN_SUBJOURNAL | SQLITE_OPEN_READWRITE
+ | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE
+ | SQLITE_OPEN_DELETEONCLOSE;
+ int nStmtSpill = sqlite3Config.nStmtSpill;
+ if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){
+ nStmtSpill = -1;
+ }
+ rc = sqlite3JournalOpen(pPager->pVfs, 0, pPager->sjfd, flags, nStmtSpill);
+ }
+ return rc;
+}
+
+/*
+** Append a record of the current state of page pPg to the sub-journal.
+**
+** If successful, set the bit corresponding to pPg->pgno in the bitvecs
+** for all open savepoints before returning.
+**
+** This function returns SQLITE_OK if everything is successful, an IO
+** error code if the attempt to write to the sub-journal fails, or
+** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint
+** bitvec.
+*/
+static int subjournalPage(PgHdr *pPg){
+ int rc = SQLITE_OK;
+ Pager *pPager = pPg->pPager;
+ if( pPager->journalMode!=PAGER_JOURNALMODE_OFF ){
+
+ /* Open the sub-journal, if it has not already been opened */
+ assert( pPager->useJournal );
+ assert( isOpen(pPager->jfd) || pagerUseWal(pPager) );
+ assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 );
+ assert( pagerUseWal(pPager)
+ || pageInJournal(pPager, pPg)
+ || pPg->pgno>pPager->dbOrigSize
+ );
+ rc = openSubJournal(pPager);
+
+ /* If the sub-journal was opened successfully (or was already open),
+ ** write the journal record into the file. */
+ if( rc==SQLITE_OK ){
+ void *pData = pPg->pData;
+ i64 offset = (i64)pPager->nSubRec*(4+pPager->pageSize);
+ char *pData2;
+ pData2 = pData;
+ PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno));
+ rc = write32bits(pPager->sjfd, offset, pPg->pgno);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4);
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ pPager->nSubRec++;
+ assert( pPager->nSavepoint>0 );
+ rc = addToSavepointBitvecs(pPager, pPg->pgno);
+ }
+ return rc;
+}
+static int subjournalPageIfRequired(PgHdr *pPg){
+ if( subjRequiresPage(pPg) ){
+ return subjournalPage(pPg);
+ }else{
+ return SQLITE_OK;
+ }
+}
+
+/*
+** This function is called by the pcache layer when it has reached some
+** soft memory limit. The first argument is a pointer to a Pager object
+** (cast as a void*). The pager is always 'purgeable' (not an in-memory
+** database). The second argument is a reference to a page that is
+** currently dirty but has no outstanding references. The page
+** is always associated with the Pager object passed as the first
+** argument.
+**
+** The job of this function is to make pPg clean by writing its contents
+** out to the database file, if possible. This may involve syncing the
+** journal file.
+**
+** If successful, sqlite3PcacheMakeClean() is called on the page and
+** SQLITE_OK returned. If an IO error occurs while trying to make the
+** page clean, the IO error code is returned. If the page cannot be
+** made clean for some other reason, but no error occurs, then SQLITE_OK
+** is returned by sqlite3PcacheMakeClean() is not called.
+*/
+static int pagerStress(void *p, PgHdr *pPg){
+ Pager *pPager = (Pager *)p;
+ int rc = SQLITE_OK;
+
+ assert( pPg->pPager==pPager );
+ assert( pPg->flags&PGHDR_DIRTY );
+
+ /* The doNotSpill NOSYNC bit is set during times when doing a sync of
+ ** journal (and adding a new header) is not allowed. This occurs
+ ** during calls to sqlite3PagerWrite() while trying to journal multiple
+ ** pages belonging to the same sector.
+ **
+ ** The doNotSpill ROLLBACK and OFF bits inhibits all cache spilling
+ ** regardless of whether or not a sync is required. This is set during
+ ** a rollback or by user request, respectively.
+ **
+ ** Spilling is also prohibited when in an error state since that could
+ ** lead to database corruption. In the current implementation it
+ ** is impossible for sqlite3PcacheFetch() to be called with createFlag==3
+ ** while in the error state, hence it is impossible for this routine to
+ ** be called in the error state. Nevertheless, we include a NEVER()
+ ** test for the error state as a safeguard against future changes.
+ */
+ if( NEVER(pPager->errCode) ) return SQLITE_OK;
+ testcase( pPager->doNotSpill & SPILLFLAG_ROLLBACK );
+ testcase( pPager->doNotSpill & SPILLFLAG_OFF );
+ testcase( pPager->doNotSpill & SPILLFLAG_NOSYNC );
+ if( pPager->doNotSpill
+ && ((pPager->doNotSpill & (SPILLFLAG_ROLLBACK|SPILLFLAG_OFF))!=0
+ || (pPg->flags & PGHDR_NEED_SYNC)!=0)
+ ){
+ return SQLITE_OK;
+ }
+
+ pPager->aStat[PAGER_STAT_SPILL]++;
+ pPg->pDirty = 0;
+ if( pagerUseWal(pPager) ){
+ /* Write a single frame for this page to the log. */
+ rc = subjournalPageIfRequired(pPg);
+ if( rc==SQLITE_OK ){
+ rc = pagerWalFrames(pPager, pPg, 0, 0);
+ }
+ }else{
+
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ if( pPager->tempFile==0 ){
+ rc = sqlite3JournalCreate(pPager->jfd);
+ if( rc!=SQLITE_OK ) return pager_error(pPager, rc);
+ }
+#endif
+
+ /* Sync the journal file if required. */
+ if( pPg->flags&PGHDR_NEED_SYNC
+ || pPager->eState==PAGER_WRITER_CACHEMOD
+ ){
+ rc = syncJournal(pPager, 1);
+ }
+
+ /* Write the contents of the page out to the database file. */
+ if( rc==SQLITE_OK ){
+ assert( (pPg->flags&PGHDR_NEED_SYNC)==0 );
+ rc = pager_write_pagelist(pPager, pPg);
+ }
+ }
+
+ /* Mark the page as clean. */
+ if( rc==SQLITE_OK ){
+ PAGERTRACE(("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno));
+ sqlite3PcacheMakeClean(pPg);
+ }
+
+ return pager_error(pPager, rc);
+}
+
+/*
+** Flush all unreferenced dirty pages to disk.
+*/
+SQLITE_PRIVATE int sqlite3PagerFlush(Pager *pPager){
+ int rc = pPager->errCode;
+ if( !MEMDB ){
+ PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache);
+ assert( assert_pager_state(pPager) );
+ while( rc==SQLITE_OK && pList ){
+ PgHdr *pNext = pList->pDirty;
+ if( pList->nRef==0 ){
+ rc = pagerStress((void*)pPager, pList);
+ }
+ pList = pNext;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Allocate and initialize a new Pager object and put a pointer to it
+** in *ppPager. The pager should eventually be freed by passing it
+** to sqlite3PagerClose().
+**
+** The zFilename argument is the path to the database file to open.
+** If zFilename is NULL then a randomly-named temporary file is created
+** and used as the file to be cached. Temporary files are be deleted
+** automatically when they are closed. If zFilename is ":memory:" then
+** all information is held in cache. It is never written to disk.
+** This can be used to implement an in-memory database.
+**
+** The nExtra parameter specifies the number of bytes of space allocated
+** along with each page reference. This space is available to the user
+** via the sqlite3PagerGetExtra() API. When a new page is allocated, the
+** first 8 bytes of this space are zeroed but the remainder is uninitialized.
+** (The extra space is used by btree as the MemPage object.)
+**
+** The flags argument is used to specify properties that affect the
+** operation of the pager. It should be passed some bitwise combination
+** of the PAGER_* flags.
+**
+** The vfsFlags parameter is a bitmask to pass to the flags parameter
+** of the xOpen() method of the supplied VFS when opening files.
+**
+** If the pager object is allocated and the specified file opened
+** successfully, SQLITE_OK is returned and *ppPager set to point to
+** the new pager object. If an error occurs, *ppPager is set to NULL
+** and error code returned. This function may return SQLITE_NOMEM
+** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or
+** various SQLITE_IO_XXX errors.
+*/
+SQLITE_PRIVATE int sqlite3PagerOpen(
+ sqlite3_vfs *pVfs, /* The virtual file system to use */
+ Pager **ppPager, /* OUT: Return the Pager structure here */
+ const char *zFilename, /* Name of the database file to open */
+ int nExtra, /* Extra bytes append to each in-memory page */
+ int flags, /* flags controlling this file */
+ int vfsFlags, /* flags passed through to sqlite3_vfs.xOpen() */
+ void (*xReinit)(DbPage*) /* Function to reinitialize pages */
+){
+ u8 *pPtr;
+ Pager *pPager = 0; /* Pager object to allocate and return */
+ int rc = SQLITE_OK; /* Return code */
+ int tempFile = 0; /* True for temp files (incl. in-memory files) */
+ int memDb = 0; /* True if this is an in-memory file */
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ int memJM = 0; /* Memory journal mode */
+#else
+# define memJM 0
+#endif
+ int readOnly = 0; /* True if this is a read-only file */
+ int journalFileSize; /* Bytes to allocate for each journal fd */
+ char *zPathname = 0; /* Full path to database file */
+ int nPathname = 0; /* Number of bytes in zPathname */
+ int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; /* False to omit journal */
+ int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */
+ u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */
+ const char *zUri = 0; /* URI args to copy */
+ int nUriByte = 1; /* Number of bytes of URI args at *zUri */
+ int nUri = 0; /* Number of URI parameters */
+
+ /* Figure out how much space is required for each journal file-handle
+ ** (there are two of them, the main journal and the sub-journal). */
+ journalFileSize = ROUND8(sqlite3JournalSize(pVfs));
+
+ /* Set the output variable to NULL in case an error occurs. */
+ *ppPager = 0;
+
+#ifndef SQLITE_OMIT_MEMORYDB
+ if( flags & PAGER_MEMORY ){
+ memDb = 1;
+ if( zFilename && zFilename[0] ){
+ zPathname = sqlite3DbStrDup(0, zFilename);
+ if( zPathname==0 ) return SQLITE_NOMEM_BKPT;
+ nPathname = sqlite3Strlen30(zPathname);
+ zFilename = 0;
+ }
+ }
+#endif
+
+ /* Compute and store the full pathname in an allocated buffer pointed
+ ** to by zPathname, length nPathname. Or, if this is a temporary file,
+ ** leave both nPathname and zPathname set to 0.
+ */
+ if( zFilename && zFilename[0] ){
+ const char *z;
+ nPathname = pVfs->mxPathname+1;
+ zPathname = sqlite3DbMallocRaw(0, nPathname*2);
+ if( zPathname==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ zPathname[0] = 0; /* Make sure initialized even if FullPathname() fails */
+ rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_OK_SYMLINK ){
+ if( vfsFlags & SQLITE_OPEN_NOFOLLOW ){
+ rc = SQLITE_CANTOPEN_SYMLINK;
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+ }
+ nPathname = sqlite3Strlen30(zPathname);
+ z = zUri = &zFilename[sqlite3Strlen30(zFilename)+1];
+ while( *z ){
+ z += strlen(z)+1;
+ z += strlen(z)+1;
+ nUri++;
+ }
+ nUriByte = (int)(&z[1] - zUri);
+ assert( nUriByte>=1 );
+ if( rc==SQLITE_OK && nPathname+8>pVfs->mxPathname ){
+ /* This branch is taken when the journal path required by
+ ** the database being opened will be more than pVfs->mxPathname
+ ** bytes in length. This means the database cannot be opened,
+ ** as it will not be possible to open the journal file or even
+ ** check for a hot-journal before reading.
+ */
+ rc = SQLITE_CANTOPEN_BKPT;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(0, zPathname);
+ return rc;
+ }
+ }
+
+ /* Allocate memory for the Pager structure, PCache object, the
+ ** three file descriptors, the database file name and the journal
+ ** file name. The layout in memory is as follows:
+ **
+ ** Pager object (sizeof(Pager) bytes)
+ ** PCache object (sqlite3PcacheSize() bytes)
+ ** Database file handle (pVfs->szOsFile bytes)
+ ** Sub-journal file handle (journalFileSize bytes)
+ ** Main journal file handle (journalFileSize bytes)
+ ** Ptr back to the Pager (sizeof(Pager*) bytes)
+ ** \0\0\0\0 database prefix (4 bytes)
+ ** Database file name (nPathname+1 bytes)
+ ** URI query parameters (nUriByte bytes)
+ ** Journal filename (nPathname+8+1 bytes)
+ ** WAL filename (nPathname+4+1 bytes)
+ ** \0\0\0 terminator (3 bytes)
+ **
+ ** Some 3rd-party software, over which we have no control, depends on
+ ** the specific order of the filenames and the \0 separators between them
+ ** so that it can (for example) find the database filename given the WAL
+ ** filename without using the sqlite3_filename_database() API. This is a
+ ** misuse of SQLite and a bug in the 3rd-party software, but the 3rd-party
+ ** software is in widespread use, so we try to avoid changing the filename
+ ** order and formatting if possible. In particular, the details of the
+ ** filename format expected by 3rd-party software should be as follows:
+ **
+ ** - Main Database Path
+ ** - \0
+ ** - Multiple URI components consisting of:
+ ** - Key
+ ** - \0
+ ** - Value
+ ** - \0
+ ** - \0
+ ** - Journal Path
+ ** - \0
+ ** - WAL Path (zWALName)
+ ** - \0
+ **
+ ** The sqlite3_create_filename() interface and the databaseFilename() utility
+ ** that is used by sqlite3_filename_database() and kin also depend on the
+ ** specific formatting and order of the various filenames, so if the format
+ ** changes here, be sure to change it there as well.
+ */
+ pPtr = (u8 *)sqlite3MallocZero(
+ ROUND8(sizeof(*pPager)) + /* Pager structure */
+ ROUND8(pcacheSize) + /* PCache object */
+ ROUND8(pVfs->szOsFile) + /* The main db file */
+ journalFileSize * 2 + /* The two journal files */
+ sizeof(pPager) + /* Space to hold a pointer */
+ 4 + /* Database prefix */
+ nPathname + 1 + /* database filename */
+ nUriByte + /* query parameters */
+ nPathname + 8 + 1 + /* Journal filename */
+#ifndef SQLITE_OMIT_WAL
+ nPathname + 4 + 1 + /* WAL filename */
+#endif
+ 3 /* Terminator */
+ );
+ assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) );
+ if( !pPtr ){
+ sqlite3DbFree(0, zPathname);
+ return SQLITE_NOMEM_BKPT;
+ }
+ pPager = (Pager*)pPtr; pPtr += ROUND8(sizeof(*pPager));
+ pPager->pPCache = (PCache*)pPtr; pPtr += ROUND8(pcacheSize);
+ pPager->fd = (sqlite3_file*)pPtr; pPtr += ROUND8(pVfs->szOsFile);
+ pPager->sjfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
+ pPager->jfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
+ assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) );
+ memcpy(pPtr, &pPager, sizeof(pPager)); pPtr += sizeof(pPager);
+
+ /* Fill in the Pager.zFilename and pPager.zQueryParam fields */
+ pPtr += 4; /* Skip zero prefix */
+ pPager->zFilename = (char*)pPtr;
+ if( nPathname>0 ){
+ memcpy(pPtr, zPathname, nPathname); pPtr += nPathname + 1;
+ if( zUri ){
+ memcpy(pPtr, zUri, nUriByte); pPtr += nUriByte;
+ }else{
+ pPtr++;
+ }
+ }
+
+
+ /* Fill in Pager.zJournal */
+ if( nPathname>0 ){
+ pPager->zJournal = (char*)pPtr;
+ memcpy(pPtr, zPathname, nPathname); pPtr += nPathname;
+ memcpy(pPtr, "-journal",8); pPtr += 8 + 1;
+#ifdef SQLITE_ENABLE_8_3_NAMES
+ sqlite3FileSuffix3(zFilename,pPager->zJournal);
+ pPtr = (u8*)(pPager->zJournal + sqlite3Strlen30(pPager->zJournal)+1);
+#endif
+ }else{
+ pPager->zJournal = 0;
+ }
+
+#ifndef SQLITE_OMIT_WAL
+ /* Fill in Pager.zWal */
+ if( nPathname>0 ){
+ pPager->zWal = (char*)pPtr;
+ memcpy(pPtr, zPathname, nPathname); pPtr += nPathname;
+ memcpy(pPtr, "-wal", 4); pPtr += 4 + 1;
+#ifdef SQLITE_ENABLE_8_3_NAMES
+ sqlite3FileSuffix3(zFilename, pPager->zWal);
+ pPtr = (u8*)(pPager->zWal + sqlite3Strlen30(pPager->zWal)+1);
+#endif
+ }else{
+ pPager->zWal = 0;
+ }
+#endif
+
+ if( nPathname ) sqlite3DbFree(0, zPathname);
+ pPager->pVfs = pVfs;
+ pPager->vfsFlags = vfsFlags;
+
+ /* Open the pager file.
+ */
+ if( zFilename && zFilename[0] ){
+ int fout = 0; /* VFS flags returned by xOpen() */
+ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout);
+ assert( !memDb );
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ memJM = (fout&SQLITE_OPEN_MEMORY)!=0;
+#endif
+ readOnly = (fout&SQLITE_OPEN_READONLY)!=0;
+
+ /* If the file was successfully opened for read/write access,
+ ** choose a default page size in case we have to create the
+ ** database file. The default page size is the maximum of:
+ **
+ ** + SQLITE_DEFAULT_PAGE_SIZE,
+ ** + The value returned by sqlite3OsSectorSize()
+ ** + The largest page size that can be written atomically.
+ */
+ if( rc==SQLITE_OK ){
+ int iDc = sqlite3OsDeviceCharacteristics(pPager->fd);
+ if( !readOnly ){
+ setSectorSize(pPager);
+ assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE);
+ if( szPageDflt<pPager->sectorSize ){
+ if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){
+ szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE;
+ }else{
+ szPageDflt = (u32)pPager->sectorSize;
+ }
+ }
+#ifdef SQLITE_ENABLE_ATOMIC_WRITE
+ {
+ int ii;
+ assert(SQLITE_IOCAP_ATOMIC512==(512>>8));
+ assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8));
+ assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536);
+ for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){
+ if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){
+ szPageDflt = ii;
+ }
+ }
+ }
+#endif
+ }
+ pPager->noLock = sqlite3_uri_boolean(pPager->zFilename, "nolock", 0);
+ if( (iDc & SQLITE_IOCAP_IMMUTABLE)!=0
+ || sqlite3_uri_boolean(pPager->zFilename, "immutable", 0) ){
+ vfsFlags |= SQLITE_OPEN_READONLY;
+ goto act_like_temp_file;
+ }
+ }
+ }else{
+ /* If a temporary file is requested, it is not opened immediately.
+ ** In this case we accept the default page size and delay actually
+ ** opening the file until the first call to OsWrite().
+ **
+ ** This branch is also run for an in-memory database. An in-memory
+ ** database is the same as a temp-file that is never written out to
+ ** disk and uses an in-memory rollback journal.
+ **
+ ** This branch also runs for files marked as immutable.
+ */
+act_like_temp_file:
+ tempFile = 1;
+ pPager->eState = PAGER_READER; /* Pretend we already have a lock */
+ pPager->eLock = EXCLUSIVE_LOCK; /* Pretend we are in EXCLUSIVE mode */
+ pPager->noLock = 1; /* Do no locking */
+ readOnly = (vfsFlags&SQLITE_OPEN_READONLY);
+ }
+
+ /* The following call to PagerSetPagesize() serves to set the value of
+ ** Pager.pageSize and to allocate the Pager.pTmpSpace buffer.
+ */
+ if( rc==SQLITE_OK ){
+ assert( pPager->memDb==0 );
+ rc = sqlite3PagerSetPagesize(pPager, &szPageDflt, -1);
+ testcase( rc!=SQLITE_OK );
+ }
+
+ /* Initialize the PCache object. */
+ if( rc==SQLITE_OK ){
+ nExtra = ROUND8(nExtra);
+ assert( nExtra>=8 && nExtra<1000 );
+ rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb,
+ !memDb?pagerStress:0, (void *)pPager, pPager->pPCache);
+ }
+
+ /* If an error occurred above, free the Pager structure and close the file.
+ */
+ if( rc!=SQLITE_OK ){
+ sqlite3OsClose(pPager->fd);
+ sqlite3PageFree(pPager->pTmpSpace);
+ sqlite3_free(pPager);
+ return rc;
+ }
+
+ PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename));
+ IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename))
+
+ pPager->useJournal = (u8)useJournal;
+ /* pPager->stmtOpen = 0; */
+ /* pPager->stmtInUse = 0; */
+ /* pPager->nRef = 0; */
+ /* pPager->stmtSize = 0; */
+ /* pPager->stmtJSize = 0; */
+ /* pPager->nPage = 0; */
+ pPager->mxPgno = SQLITE_MAX_PAGE_COUNT;
+ /* pPager->state = PAGER_UNLOCK; */
+ /* pPager->errMask = 0; */
+ pPager->tempFile = (u8)tempFile;
+ assert( tempFile==PAGER_LOCKINGMODE_NORMAL
+ || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE );
+ assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 );
+ pPager->exclusiveMode = (u8)tempFile;
+ pPager->changeCountDone = pPager->tempFile;
+ pPager->memDb = (u8)memDb;
+ pPager->readOnly = (u8)readOnly;
+ assert( useJournal || pPager->tempFile );
+ pPager->noSync = pPager->tempFile;
+ if( pPager->noSync ){
+ assert( pPager->fullSync==0 );
+ assert( pPager->extraSync==0 );
+ assert( pPager->syncFlags==0 );
+ assert( pPager->walSyncFlags==0 );
+ }else{
+ pPager->fullSync = 1;
+ pPager->extraSync = 0;
+ pPager->syncFlags = SQLITE_SYNC_NORMAL;
+ pPager->walSyncFlags = SQLITE_SYNC_NORMAL | (SQLITE_SYNC_NORMAL<<2);
+ }
+ /* pPager->pFirst = 0; */
+ /* pPager->pFirstSynced = 0; */
+ /* pPager->pLast = 0; */
+ pPager->nExtra = (u16)nExtra;
+ pPager->journalSizeLimit = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT;
+ assert( isOpen(pPager->fd) || tempFile );
+ setSectorSize(pPager);
+ if( !useJournal ){
+ pPager->journalMode = PAGER_JOURNALMODE_OFF;
+ }else if( memDb || memJM ){
+ pPager->journalMode = PAGER_JOURNALMODE_MEMORY;
+ }
+ /* pPager->xBusyHandler = 0; */
+ /* pPager->pBusyHandlerArg = 0; */
+ pPager->xReiniter = xReinit;
+ setGetterMethod(pPager);
+ /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */
+ /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */
+
+ *ppPager = pPager;
+ return SQLITE_OK;
+}
+
+/*
+** Return the sqlite3_file for the main database given the name
+** of the corresonding WAL or Journal name as passed into
+** xOpen.
+*/
+SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){
+ Pager *pPager;
+ while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){
+ zName--;
+ }
+ pPager = *(Pager**)(zName - 4 - sizeof(Pager*));
+ return pPager->fd;
+}
+
+
+/*
+** This function is called after transitioning from PAGER_UNLOCK to
+** PAGER_SHARED state. It tests if there is a hot journal present in
+** the file-system for the given pager. A hot journal is one that
+** needs to be played back. According to this function, a hot-journal
+** file exists if the following criteria are met:
+**
+** * The journal file exists in the file system, and
+** * No process holds a RESERVED or greater lock on the database file, and
+** * The database file itself is greater than 0 bytes in size, and
+** * The first byte of the journal file exists and is not 0x00.
+**
+** If the current size of the database file is 0 but a journal file
+** exists, that is probably an old journal left over from a prior
+** database with the same name. In this case the journal file is
+** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK
+** is returned.
+**
+** This routine does not check if there is a super-journal filename
+** at the end of the file. If there is, and that super-journal file
+** does not exist, then the journal file is not really hot. In this
+** case this routine will return a false-positive. The pager_playback()
+** routine will discover that the journal file is not really hot and
+** will not roll it back.
+**
+** If a hot-journal file is found to exist, *pExists is set to 1 and
+** SQLITE_OK returned. If no hot-journal file is present, *pExists is
+** set to 0 and SQLITE_OK returned. If an IO error occurs while trying
+** to determine whether or not a hot-journal file exists, the IO error
+** code is returned and the value of *pExists is undefined.
+*/
+static int hasHotJournal(Pager *pPager, int *pExists){
+ sqlite3_vfs * const pVfs = pPager->pVfs;
+ int rc = SQLITE_OK; /* Return code */
+ int exists = 1; /* True if a journal file is present */
+ int jrnlOpen = !!isOpen(pPager->jfd);
+
+ assert( pPager->useJournal );
+ assert( isOpen(pPager->fd) );
+ assert( pPager->eState==PAGER_OPEN );
+
+ assert( jrnlOpen==0 || ( sqlite3OsDeviceCharacteristics(pPager->jfd) &
+ SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
+ ));
+
+ *pExists = 0;
+ if( !jrnlOpen ){
+ rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists);
+ }
+ if( rc==SQLITE_OK && exists ){
+ int locked = 0; /* True if some process holds a RESERVED lock */
+
+ /* Race condition here: Another process might have been holding the
+ ** the RESERVED lock and have a journal open at the sqlite3OsAccess()
+ ** call above, but then delete the journal and drop the lock before
+ ** we get to the following sqlite3OsCheckReservedLock() call. If that
+ ** is the case, this routine might think there is a hot journal when
+ ** in fact there is none. This results in a false-positive which will
+ ** be dealt with by the playback routine. Ticket #3883.
+ */
+ rc = sqlite3OsCheckReservedLock(pPager->fd, &locked);
+ if( rc==SQLITE_OK && !locked ){
+ Pgno nPage; /* Number of pages in database file */
+
+ assert( pPager->tempFile==0 );
+ rc = pagerPagecount(pPager, &nPage);
+ if( rc==SQLITE_OK ){
+ /* If the database is zero pages in size, that means that either (1) the
+ ** journal is a remnant from a prior database with the same name where
+ ** the database file but not the journal was deleted, or (2) the initial
+ ** transaction that populates a new database is being rolled back.
+ ** In either case, the journal file can be deleted. However, take care
+ ** not to delete the journal file if it is already open due to
+ ** journal_mode=PERSIST.
+ */
+ if( nPage==0 && !jrnlOpen ){
+ sqlite3BeginBenignMalloc();
+ if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){
+ sqlite3OsDelete(pVfs, pPager->zJournal, 0);
+ if( !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK);
+ }
+ sqlite3EndBenignMalloc();
+ }else{
+ /* The journal file exists and no other connection has a reserved
+ ** or greater lock on the database file. Now check that there is
+ ** at least one non-zero bytes at the start of the journal file.
+ ** If there is, then we consider this journal to be hot. If not,
+ ** it can be ignored.
+ */
+ if( !jrnlOpen ){
+ int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL;
+ rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f);
+ }
+ if( rc==SQLITE_OK ){
+ u8 first = 0;
+ rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0);
+ if( rc==SQLITE_IOERR_SHORT_READ ){
+ rc = SQLITE_OK;
+ }
+ if( !jrnlOpen ){
+ sqlite3OsClose(pPager->jfd);
+ }
+ *pExists = (first!=0);
+ }else if( rc==SQLITE_CANTOPEN ){
+ /* If we cannot open the rollback journal file in order to see if
+ ** it has a zero header, that might be due to an I/O error, or
+ ** it might be due to the race condition described above and in
+ ** ticket #3883. Either way, assume that the journal is hot.
+ ** This might be a false positive. But if it is, then the
+ ** automatic journal playback and recovery mechanism will deal
+ ** with it under an EXCLUSIVE lock where we do not need to
+ ** worry so much with race conditions.
+ */
+ *pExists = 1;
+ rc = SQLITE_OK;
+ }
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** This function is called to obtain a shared lock on the database file.
+** It is illegal to call sqlite3PagerGet() until after this function
+** has been successfully called. If a shared-lock is already held when
+** this function is called, it is a no-op.
+**
+** The following operations are also performed by this function.
+**
+** 1) If the pager is currently in PAGER_OPEN state (no lock held
+** on the database file), then an attempt is made to obtain a
+** SHARED lock on the database file. Immediately after obtaining
+** the SHARED lock, the file-system is checked for a hot-journal,
+** which is played back if present. Following any hot-journal
+** rollback, the contents of the cache are validated by checking
+** the 'change-counter' field of the database file header and
+** discarded if they are found to be invalid.
+**
+** 2) If the pager is running in exclusive-mode, and there are currently
+** no outstanding references to any pages, and is in the error state,
+** then an attempt is made to clear the error state by discarding
+** the contents of the page cache and rolling back any open journal
+** file.
+**
+** If everything is successful, SQLITE_OK is returned. If an IO error
+** occurs while locking the database, checking for a hot-journal file or
+** rolling back a journal file, the IO error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
+ int rc = SQLITE_OK; /* Return code */
+
+ /* This routine is only called from b-tree and only when there are no
+ ** outstanding pages. This implies that the pager state should either
+ ** be OPEN or READER. READER is only possible if the pager is or was in
+ ** exclusive access mode. */
+ assert( sqlite3PcacheRefCount(pPager->pPCache)==0 );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER );
+ assert( pPager->errCode==SQLITE_OK );
+
+ if( !pagerUseWal(pPager) && pPager->eState==PAGER_OPEN ){
+ int bHotJournal = 1; /* True if there exists a hot journal-file */
+
+ assert( !MEMDB );
+ assert( pPager->tempFile==0 || pPager->eLock==EXCLUSIVE_LOCK );
+
+ rc = pager_wait_on_lock(pPager, SHARED_LOCK);
+ if( rc!=SQLITE_OK ){
+ assert( pPager->eLock==NO_LOCK || pPager->eLock==UNKNOWN_LOCK );
+ goto failed;
+ }
+
+ /* If a journal file exists, and there is no RESERVED lock on the
+ ** database file, then it either needs to be played back or deleted.
+ */
+ if( pPager->eLock<=SHARED_LOCK ){
+ rc = hasHotJournal(pPager, &bHotJournal);
+ }
+ if( rc!=SQLITE_OK ){
+ goto failed;
+ }
+ if( bHotJournal ){
+ if( pPager->readOnly ){
+ rc = SQLITE_READONLY_ROLLBACK;
+ goto failed;
+ }
+
+ /* Get an EXCLUSIVE lock on the database file. At this point it is
+ ** important that a RESERVED lock is not obtained on the way to the
+ ** EXCLUSIVE lock. If it were, another process might open the
+ ** database file, detect the RESERVED lock, and conclude that the
+ ** database is safe to read while this process is still rolling the
+ ** hot-journal back.
+ **
+ ** Because the intermediate RESERVED lock is not requested, any
+ ** other process attempting to access the database file will get to
+ ** this point in the code and fail to obtain its own EXCLUSIVE lock
+ ** on the database file.
+ **
+ ** Unless the pager is in locking_mode=exclusive mode, the lock is
+ ** downgraded to SHARED_LOCK before this function returns.
+ */
+ rc = pagerLockDb(pPager, EXCLUSIVE_LOCK);
+ if( rc!=SQLITE_OK ){
+ goto failed;
+ }
+
+ /* If it is not already open and the file exists on disk, open the
+ ** journal for read/write access. Write access is required because
+ ** in exclusive-access mode the file descriptor will be kept open
+ ** and possibly used for a transaction later on. Also, write-access
+ ** is usually required to finalize the journal in journal_mode=persist
+ ** mode (and also for journal_mode=truncate on some systems).
+ **
+ ** If the journal does not exist, it usually means that some
+ ** other connection managed to get in and roll it back before
+ ** this connection obtained the exclusive lock above. Or, it
+ ** may mean that the pager was in the error-state when this
+ ** function was called and the journal file does not exist.
+ */
+ if( !isOpen(pPager->jfd) ){
+ sqlite3_vfs * const pVfs = pPager->pVfs;
+ int bExists; /* True if journal file exists */
+ rc = sqlite3OsAccess(
+ pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &bExists);
+ if( rc==SQLITE_OK && bExists ){
+ int fout = 0;
+ int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL;
+ assert( !pPager->tempFile );
+ rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout);
+ assert( rc!=SQLITE_OK || isOpen(pPager->jfd) );
+ if( rc==SQLITE_OK && fout&SQLITE_OPEN_READONLY ){
+ rc = SQLITE_CANTOPEN_BKPT;
+ sqlite3OsClose(pPager->jfd);
+ }
+ }
+ }
+
+ /* Playback and delete the journal. Drop the database write
+ ** lock and reacquire the read lock. Purge the cache before
+ ** playing back the hot-journal so that we don't end up with
+ ** an inconsistent cache. Sync the hot journal before playing
+ ** it back since the process that crashed and left the hot journal
+ ** probably did not sync it and we are required to always sync
+ ** the journal before playing it back.
+ */
+ if( isOpen(pPager->jfd) ){
+ assert( rc==SQLITE_OK );
+ rc = pagerSyncHotJournal(pPager);
+ if( rc==SQLITE_OK ){
+ rc = pager_playback(pPager, !pPager->tempFile);
+ pPager->eState = PAGER_OPEN;
+ }
+ }else if( !pPager->exclusiveMode ){
+ pagerUnlockDb(pPager, SHARED_LOCK);
+ }
+
+ if( rc!=SQLITE_OK ){
+ /* This branch is taken if an error occurs while trying to open
+ ** or roll back a hot-journal while holding an EXCLUSIVE lock. The
+ ** pager_unlock() routine will be called before returning to unlock
+ ** the file. If the unlock attempt fails, then Pager.eLock must be
+ ** set to UNKNOWN_LOCK (see the comment above the #define for
+ ** UNKNOWN_LOCK above for an explanation).
+ **
+ ** In order to get pager_unlock() to do this, set Pager.eState to
+ ** PAGER_ERROR now. This is not actually counted as a transition
+ ** to ERROR state in the state diagram at the top of this file,
+ ** since we know that the same call to pager_unlock() will very
+ ** shortly transition the pager object to the OPEN state. Calling
+ ** assert_pager_state() would fail now, as it should not be possible
+ ** to be in ERROR state when there are zero outstanding page
+ ** references.
+ */
+ pager_error(pPager, rc);
+ goto failed;
+ }
+
+ assert( pPager->eState==PAGER_OPEN );
+ assert( (pPager->eLock==SHARED_LOCK)
+ || (pPager->exclusiveMode && pPager->eLock>SHARED_LOCK)
+ );
+ }
+
+ if( !pPager->tempFile && pPager->hasHeldSharedLock ){
+ /* The shared-lock has just been acquired then check to
+ ** see if the database has been modified. If the database has changed,
+ ** flush the cache. The hasHeldSharedLock flag prevents this from
+ ** occurring on the very first access to a file, in order to save a
+ ** single unnecessary sqlite3OsRead() call at the start-up.
+ **
+ ** Database changes are detected by looking at 15 bytes beginning
+ ** at offset 24 into the file. The first 4 of these 16 bytes are
+ ** a 32-bit counter that is incremented with each change. The
+ ** other bytes change randomly with each file change when
+ ** a codec is in use.
+ **
+ ** There is a vanishingly small chance that a change will not be
+ ** detected. The chance of an undetected change is so small that
+ ** it can be neglected.
+ */
+ char dbFileVers[sizeof(pPager->dbFileVers)];
+
+ IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers)));
+ rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24);
+ if( rc!=SQLITE_OK ){
+ if( rc!=SQLITE_IOERR_SHORT_READ ){
+ goto failed;
+ }
+ memset(dbFileVers, 0, sizeof(dbFileVers));
+ }
+
+ if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){
+ pager_reset(pPager);
+
+ /* Unmap the database file. It is possible that external processes
+ ** may have truncated the database file and then extended it back
+ ** to its original size while this process was not holding a lock.
+ ** In this case there may exist a Pager.pMap mapping that appears
+ ** to be the right size but is not actually valid. Avoid this
+ ** possibility by unmapping the db here. */
+ if( USEFETCH(pPager) ){
+ sqlite3OsUnfetch(pPager->fd, 0, 0);
+ }
+ }
+ }
+
+ /* If there is a WAL file in the file-system, open this database in WAL
+ ** mode. Otherwise, the following function call is a no-op.
+ */
+ rc = pagerOpenWalIfPresent(pPager);
+#ifndef SQLITE_OMIT_WAL
+ assert( pPager->pWal==0 || rc==SQLITE_OK );
+#endif
+ }
+
+ if( pagerUseWal(pPager) ){
+ assert( rc==SQLITE_OK );
+ rc = pagerBeginReadTransaction(pPager);
+ }
+
+ if( pPager->tempFile==0 && pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){
+ rc = pagerPagecount(pPager, &pPager->dbSize);
+ }
+
+ failed:
+ if( rc!=SQLITE_OK ){
+ assert( !MEMDB );
+ pager_unlock(pPager);
+ assert( pPager->eState==PAGER_OPEN );
+ }else{
+ pPager->eState = PAGER_READER;
+ pPager->hasHeldSharedLock = 1;
+ }
+ return rc;
+}
+
+/*
+** If the reference count has reached zero, rollback any active
+** transaction and unlock the pager.
+**
+** Except, in locking_mode=EXCLUSIVE when there is nothing to in
+** the rollback journal, the unlock is not performed and there is
+** nothing to rollback, so this routine is a no-op.
+*/
+static void pagerUnlockIfUnused(Pager *pPager){
+ if( sqlite3PcacheRefCount(pPager->pPCache)==0 ){
+ assert( pPager->nMmapOut==0 ); /* because page1 is never memory mapped */
+ pagerUnlockAndRollback(pPager);
+ }
+}
+
+/*
+** The page getter methods each try to acquire a reference to a
+** page with page number pgno. If the requested reference is
+** successfully obtained, it is copied to *ppPage and SQLITE_OK returned.
+**
+** There are different implementations of the getter method depending
+** on the current state of the pager.
+**
+** getPageNormal() -- The normal getter
+** getPageError() -- Used if the pager is in an error state
+** getPageMmap() -- Used if memory-mapped I/O is enabled
+**
+** If the requested page is already in the cache, it is returned.
+** Otherwise, a new page object is allocated and populated with data
+** read from the database file. In some cases, the pcache module may
+** choose not to allocate a new page object and may reuse an existing
+** object with no outstanding references.
+**
+** The extra data appended to a page is always initialized to zeros the
+** first time a page is loaded into memory. If the page requested is
+** already in the cache when this function is called, then the extra
+** data is left as it was when the page object was last used.
+**
+** If the database image is smaller than the requested page or if
+** the flags parameter contains the PAGER_GET_NOCONTENT bit and the
+** requested page is not already stored in the cache, then no
+** actual disk read occurs. In this case the memory image of the
+** page is initialized to all zeros.
+**
+** If PAGER_GET_NOCONTENT is true, it means that we do not care about
+** the contents of the page. This occurs in two scenarios:
+**
+** a) When reading a free-list leaf page from the database, and
+**
+** b) When a savepoint is being rolled back and we need to load
+** a new page into the cache to be filled with the data read
+** from the savepoint journal.
+**
+** If PAGER_GET_NOCONTENT is true, then the data returned is zeroed instead
+** of being read from the database. Additionally, the bits corresponding
+** to pgno in Pager.pInJournal (bitvec of pages already written to the
+** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open
+** savepoints are set. This means if the page is made writable at any
+** point in the future, using a call to sqlite3PagerWrite(), its contents
+** will not be journaled. This saves IO.
+**
+** The acquisition might fail for several reasons. In all cases,
+** an appropriate error code is returned and *ppPage is set to NULL.
+**
+** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt
+** to find a page in the in-memory cache first. If the page is not already
+** in memory, this routine goes to disk to read it in whereas Lookup()
+** just returns 0. This routine acquires a read-lock the first time it
+** has to go to disk, and could also playback an old journal if necessary.
+** Since Lookup() never goes to disk, it never has to deal with locks
+** or journal files.
+*/
+static int getPageNormal(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ int rc = SQLITE_OK;
+ PgHdr *pPg;
+ u8 noContent; /* True if PAGER_GET_NOCONTENT is set */
+ sqlite3_pcache_page *pBase;
+
+ assert( pPager->errCode==SQLITE_OK );
+ assert( pPager->eState>=PAGER_READER );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->hasHeldSharedLock==1 );
+
+ if( pgno==0 ) return SQLITE_CORRUPT_BKPT;
+ pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3);
+ if( pBase==0 ){
+ pPg = 0;
+ rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase);
+ if( rc!=SQLITE_OK ) goto pager_acquire_err;
+ if( pBase==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto pager_acquire_err;
+ }
+ }
+ pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase);
+ assert( pPg==(*ppPage) );
+ assert( pPg->pgno==pgno );
+ assert( pPg->pPager==pPager || pPg->pPager==0 );
+
+ noContent = (flags & PAGER_GET_NOCONTENT)!=0;
+ if( pPg->pPager && !noContent ){
+ /* In this case the pcache already contains an initialized copy of
+ ** the page. Return without further ado. */
+ assert( pgno!=PAGER_MJ_PGNO(pPager) );
+ pPager->aStat[PAGER_STAT_HIT]++;
+ return SQLITE_OK;
+
+ }else{
+ /* The pager cache has created a new page. Its content needs to
+ ** be initialized. But first some error checks:
+ **
+ ** (*) obsolete. Was: maximum page number is 2^31
+ ** (2) Never try to fetch the locking page
+ */
+ if( pgno==PAGER_MJ_PGNO(pPager) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto pager_acquire_err;
+ }
+
+ pPg->pPager = pPager;
+
+ assert( !isOpen(pPager->fd) || !MEMDB );
+ if( !isOpen(pPager->fd) || pPager->dbSize<pgno || noContent ){
+ if( pgno>pPager->mxPgno ){
+ rc = SQLITE_FULL;
+ goto pager_acquire_err;
+ }
+ if( noContent ){
+ /* Failure to set the bits in the InJournal bit-vectors is benign.
+ ** It merely means that we might do some extra work to journal a
+ ** page that does not need to be journaled. Nevertheless, be sure
+ ** to test the case where a malloc error occurs while trying to set
+ ** a bit in a bit vector.
+ */
+ sqlite3BeginBenignMalloc();
+ if( pgno<=pPager->dbOrigSize ){
+ TESTONLY( rc = ) sqlite3BitvecSet(pPager->pInJournal, pgno);
+ testcase( rc==SQLITE_NOMEM );
+ }
+ TESTONLY( rc = ) addToSavepointBitvecs(pPager, pgno);
+ testcase( rc==SQLITE_NOMEM );
+ sqlite3EndBenignMalloc();
+ }
+ memset(pPg->pData, 0, pPager->pageSize);
+ IOTRACE(("ZERO %p %d\n", pPager, pgno));
+ }else{
+ assert( pPg->pPager==pPager );
+ pPager->aStat[PAGER_STAT_MISS]++;
+ rc = readDbPage(pPg);
+ if( rc!=SQLITE_OK ){
+ goto pager_acquire_err;
+ }
+ }
+ pager_set_pagehash(pPg);
+ }
+ return SQLITE_OK;
+
+pager_acquire_err:
+ assert( rc!=SQLITE_OK );
+ if( pPg ){
+ sqlite3PcacheDrop(pPg);
+ }
+ pagerUnlockIfUnused(pPager);
+ *ppPage = 0;
+ return rc;
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/* The page getter for when memory-mapped I/O is enabled */
+static int getPageMMap(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ int rc = SQLITE_OK;
+ PgHdr *pPg = 0;
+ u32 iFrame = 0; /* Frame to read from WAL file */
+
+ /* It is acceptable to use a read-only (mmap) page for any page except
+ ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
+ ** flag was specified by the caller. And so long as the db is not a
+ ** temporary or in-memory database. */
+ const int bMmapOk = (pgno>1
+ && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY))
+ );
+
+ assert( USEFETCH(pPager) );
+
+ /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here
+ ** allows the compiler optimizer to reuse the results of the "pgno>1"
+ ** test in the previous statement, and avoid testing pgno==0 in the
+ ** common case where pgno is large. */
+ if( pgno<=1 && pgno==0 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( pPager->eState>=PAGER_READER );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->hasHeldSharedLock==1 );
+ assert( pPager->errCode==SQLITE_OK );
+
+ if( bMmapOk && pagerUseWal(pPager) ){
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
+ if( rc!=SQLITE_OK ){
+ *ppPage = 0;
+ return rc;
+ }
+ }
+ if( bMmapOk && iFrame==0 ){
+ void *pData = 0;
+ rc = sqlite3OsFetch(pPager->fd,
+ (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData
+ );
+ if( rc==SQLITE_OK && pData ){
+ if( pPager->eState>PAGER_READER || pPager->tempFile ){
+ pPg = sqlite3PagerLookup(pPager, pgno);
+ }
+ if( pPg==0 ){
+ rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg);
+ }else{
+ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData);
+ }
+ if( pPg ){
+ assert( rc==SQLITE_OK );
+ *ppPage = pPg;
+ return SQLITE_OK;
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ *ppPage = 0;
+ return rc;
+ }
+ }
+ return getPageNormal(pPager, pgno, ppPage, flags);
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/* The page getter method for when the pager is an error state */
+static int getPageError(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ UNUSED_PARAMETER(pgno);
+ UNUSED_PARAMETER(flags);
+ assert( pPager->errCode!=SQLITE_OK );
+ *ppPage = 0;
+ return pPager->errCode;
+}
+
+
+/* Dispatch all page fetch requests to the appropriate getter method.
+*/
+SQLITE_PRIVATE int sqlite3PagerGet(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ return pPager->xGet(pPager, pgno, ppPage, flags);
+}
+
+/*
+** Acquire a page if it is already in the in-memory cache. Do
+** not read the page from disk. Return a pointer to the page,
+** or 0 if the page is not in cache.
+**
+** See also sqlite3PagerGet(). The difference between this routine
+** and sqlite3PagerGet() is that _get() will go to the disk and read
+** in the page if the page is not already in cache. This routine
+** returns NULL if the page is not in cache or if a disk I/O error
+** has ever happened.
+*/
+SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){
+ sqlite3_pcache_page *pPage;
+ assert( pPager!=0 );
+ assert( pgno!=0 );
+ assert( pPager->pPCache!=0 );
+ pPage = sqlite3PcacheFetch(pPager->pPCache, pgno, 0);
+ assert( pPage==0 || pPager->hasHeldSharedLock );
+ if( pPage==0 ) return 0;
+ return sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pPage);
+}
+
+/*
+** Release a page reference.
+**
+** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be
+** used if we know that the page being released is not the last page.
+** The btree layer always holds page1 open until the end, so these first
+** to routines can be used to release any page other than BtShared.pPage1.
+**
+** Use sqlite3PagerUnrefPageOne() to release page1. This latter routine
+** checks the total number of outstanding pages and if the number of
+** pages reaches zero it drops the database lock.
+*/
+SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){
+ TESTONLY( Pager *pPager = pPg->pPager; )
+ assert( pPg!=0 );
+ if( pPg->flags & PGHDR_MMAP ){
+ assert( pPg->pgno!=1 ); /* Page1 is never memory mapped */
+ pagerReleaseMapPage(pPg);
+ }else{
+ sqlite3PcacheRelease(pPg);
+ }
+ /* Do not use this routine to release the last reference to page1 */
+ assert( sqlite3PcacheRefCount(pPager->pPCache)>0 );
+}
+SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){
+ if( pPg ) sqlite3PagerUnrefNotNull(pPg);
+}
+SQLITE_PRIVATE void sqlite3PagerUnrefPageOne(DbPage *pPg){
+ Pager *pPager;
+ assert( pPg!=0 );
+ assert( pPg->pgno==1 );
+ assert( (pPg->flags & PGHDR_MMAP)==0 ); /* Page1 is never memory mapped */
+ pPager = pPg->pPager;
+ sqlite3PcacheRelease(pPg);
+ pagerUnlockIfUnused(pPager);
+}
+
+/*
+** This function is called at the start of every write transaction.
+** There must already be a RESERVED or EXCLUSIVE lock on the database
+** file when this routine is called.
+**
+** Open the journal file for pager pPager and write a journal header
+** to the start of it. If there are active savepoints, open the sub-journal
+** as well. This function is only used when the journal file is being
+** opened to write a rollback log for a transaction. It is not used
+** when opening a hot journal file to roll it back.
+**
+** If the journal file is already open (as it may be in exclusive mode),
+** then this function just writes a journal header to the start of the
+** already open file.
+**
+** Whether or not the journal file is opened by this function, the
+** Pager.pInJournal bitvec structure is allocated.
+**
+** Return SQLITE_OK if everything is successful. Otherwise, return
+** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or
+** an IO error code if opening or writing the journal file fails.
+*/
+static int pager_open_journal(Pager *pPager){
+ int rc = SQLITE_OK; /* Return code */
+ sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */
+
+ assert( pPager->eState==PAGER_WRITER_LOCKED );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->pInJournal==0 );
+
+ /* If already in the error state, this function is a no-op. But on
+ ** the other hand, this routine is never called if we are already in
+ ** an error state. */
+ if( NEVER(pPager->errCode) ) return pPager->errCode;
+
+ if( !pagerUseWal(pPager) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){
+ pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize);
+ if( pPager->pInJournal==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ /* Open the journal file if it is not already open. */
+ if( !isOpen(pPager->jfd) ){
+ if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){
+ sqlite3MemJournalOpen(pPager->jfd);
+ }else{
+ int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE;
+ int nSpill;
+
+ if( pPager->tempFile ){
+ flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL);
+ nSpill = sqlite3Config.nStmtSpill;
+ }else{
+ flags |= SQLITE_OPEN_MAIN_JOURNAL;
+ nSpill = jrnlBufferSize(pPager);
+ }
+
+ /* Verify that the database still has the same name as it did when
+ ** it was originally opened. */
+ rc = databaseIsUnmoved(pPager);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3JournalOpen (
+ pVfs, pPager->zJournal, pPager->jfd, flags, nSpill
+ );
+ }
+ }
+ assert( rc!=SQLITE_OK || isOpen(pPager->jfd) );
+ }
+
+
+ /* Write the first journal header to the journal file and open
+ ** the sub-journal if necessary.
+ */
+ if( rc==SQLITE_OK ){
+ /* TODO: Check if all of these are really required. */
+ pPager->nRec = 0;
+ pPager->journalOff = 0;
+ pPager->setSuper = 0;
+ pPager->journalHdr = 0;
+ rc = writeJournalHdr(pPager);
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3BitvecDestroy(pPager->pInJournal);
+ pPager->pInJournal = 0;
+ }else{
+ assert( pPager->eState==PAGER_WRITER_LOCKED );
+ pPager->eState = PAGER_WRITER_CACHEMOD;
+ }
+
+ return rc;
+}
+
+/*
+** Begin a write-transaction on the specified pager object. If a
+** write-transaction has already been opened, this function is a no-op.
+**
+** If the exFlag argument is false, then acquire at least a RESERVED
+** lock on the database file. If exFlag is true, then acquire at least
+** an EXCLUSIVE lock. If such a lock is already held, no locking
+** functions need be called.
+**
+** If the subjInMemory argument is non-zero, then any sub-journal opened
+** within this transaction will be opened as an in-memory file. This
+** has no effect if the sub-journal is already opened (as it may be when
+** running in exclusive mode) or if the transaction does not require a
+** sub-journal. If the subjInMemory argument is zero, then any required
+** sub-journal is implemented in-memory if pPager is an in-memory database,
+** or using a temporary file otherwise.
+*/
+SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){
+ int rc = SQLITE_OK;
+
+ if( pPager->errCode ) return pPager->errCode;
+ assert( pPager->eState>=PAGER_READER && pPager->eState<PAGER_ERROR );
+ pPager->subjInMemory = (u8)subjInMemory;
+
+ if( ALWAYS(pPager->eState==PAGER_READER) ){
+ assert( pPager->pInJournal==0 );
+
+ if( pagerUseWal(pPager) ){
+ /* If the pager is configured to use locking_mode=exclusive, and an
+ ** exclusive lock on the database is not already held, obtain it now.
+ */
+ if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){
+ rc = pagerLockDb(pPager, EXCLUSIVE_LOCK);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ (void)sqlite3WalExclusiveMode(pPager->pWal, 1);
+ }
+
+ /* Grab the write lock on the log file. If successful, upgrade to
+ ** PAGER_RESERVED state. Otherwise, return an error code to the caller.
+ ** The busy-handler is not invoked if another connection already
+ ** holds the write-lock. If possible, the upper layer will call it.
+ */
+ rc = sqlite3WalBeginWriteTransaction(pPager->pWal);
+ }else{
+ /* Obtain a RESERVED lock on the database file. If the exFlag parameter
+ ** is true, then immediately upgrade this to an EXCLUSIVE lock. The
+ ** busy-handler callback can be used when upgrading to the EXCLUSIVE
+ ** lock, but not when obtaining the RESERVED lock.
+ */
+ rc = pagerLockDb(pPager, RESERVED_LOCK);
+ if( rc==SQLITE_OK && exFlag ){
+ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ /* Change to WRITER_LOCKED state.
+ **
+ ** WAL mode sets Pager.eState to PAGER_WRITER_LOCKED or CACHEMOD
+ ** when it has an open transaction, but never to DBMOD or FINISHED.
+ ** This is because in those states the code to roll back savepoint
+ ** transactions may copy data from the sub-journal into the database
+ ** file as well as into the page cache. Which would be incorrect in
+ ** WAL mode.
+ */
+ pPager->eState = PAGER_WRITER_LOCKED;
+ pPager->dbHintSize = pPager->dbSize;
+ pPager->dbFileSize = pPager->dbSize;
+ pPager->dbOrigSize = pPager->dbSize;
+ pPager->journalOff = 0;
+ }
+
+ assert( rc==SQLITE_OK || pPager->eState==PAGER_READER );
+ assert( rc!=SQLITE_OK || pPager->eState==PAGER_WRITER_LOCKED );
+ assert( assert_pager_state(pPager) );
+ }
+
+ PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager)));
+ return rc;
+}
+
+/*
+** Write page pPg onto the end of the rollback journal.
+*/
+static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ int rc;
+ u32 cksum;
+ char *pData2;
+ i64 iOff = pPager->journalOff;
+
+ /* We should never write to the journal file the page that
+ ** contains the database locks. The following assert verifies
+ ** that we do not. */
+ assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) );
+
+ assert( pPager->journalHdr<=pPager->journalOff );
+ pData2 = pPg->pData;
+ cksum = pager_cksum(pPager, (u8*)pData2);
+
+ /* Even if an IO or diskfull error occurs while journalling the
+ ** page in the block above, set the need-sync flag for the page.
+ ** Otherwise, when the transaction is rolled back, the logic in
+ ** playback_one_page() will think that the page needs to be restored
+ ** in the database file. And if an IO error occurs while doing so,
+ ** then corruption may follow.
+ */
+ pPg->flags |= PGHDR_NEED_SYNC;
+
+ rc = write32bits(pPager->jfd, iOff, pPg->pgno);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum);
+ if( rc!=SQLITE_OK ) return rc;
+
+ IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno,
+ pPager->journalOff, pPager->pageSize));
+ PAGER_INCR(sqlite3_pager_writej_count);
+ PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n",
+ PAGERID(pPager), pPg->pgno,
+ ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg)));
+
+ pPager->journalOff += 8 + pPager->pageSize;
+ pPager->nRec++;
+ assert( pPager->pInJournal!=0 );
+ rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno);
+ testcase( rc==SQLITE_NOMEM );
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ rc |= addToSavepointBitvecs(pPager, pPg->pgno);
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ return rc;
+}
+
+/*
+** Mark a single data page as writeable. The page is written into the
+** main journal or sub-journal as required. If the page is written into
+** one of the journals, the corresponding bit is set in the
+** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs
+** of any open savepoints as appropriate.
+*/
+static int pager_write(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ int rc = SQLITE_OK;
+
+ /* This routine is not called unless a write-transaction has already
+ ** been started. The journal file may or may not be open at this point.
+ ** It is never called in the ERROR state.
+ */
+ assert( pPager->eState==PAGER_WRITER_LOCKED
+ || pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->errCode==0 );
+ assert( pPager->readOnly==0 );
+ CHECK_PAGE(pPg);
+
+ /* The journal file needs to be opened. Higher level routines have already
+ ** obtained the necessary locks to begin the write-transaction, but the
+ ** rollback journal might not yet be open. Open it now if this is the case.
+ **
+ ** This is done before calling sqlite3PcacheMakeDirty() on the page.
+ ** Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then
+ ** an error might occur and the pager would end up in WRITER_LOCKED state
+ ** with pages marked as dirty in the cache.
+ */
+ if( pPager->eState==PAGER_WRITER_LOCKED ){
+ rc = pager_open_journal(pPager);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ assert( pPager->eState>=PAGER_WRITER_CACHEMOD );
+ assert( assert_pager_state(pPager) );
+
+ /* Mark the page that is about to be modified as dirty. */
+ sqlite3PcacheMakeDirty(pPg);
+
+ /* If a rollback journal is in use, them make sure the page that is about
+ ** to change is in the rollback journal, or if the page is a new page off
+ ** then end of the file, make sure it is marked as PGHDR_NEED_SYNC.
+ */
+ assert( (pPager->pInJournal!=0) == isOpen(pPager->jfd) );
+ if( pPager->pInJournal!=0
+ && sqlite3BitvecTestNotNull(pPager->pInJournal, pPg->pgno)==0
+ ){
+ assert( pagerUseWal(pPager)==0 );
+ if( pPg->pgno<=pPager->dbOrigSize ){
+ rc = pagerAddPageToRollbackJournal(pPg);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }else{
+ if( pPager->eState!=PAGER_WRITER_DBMOD ){
+ pPg->flags |= PGHDR_NEED_SYNC;
+ }
+ PAGERTRACE(("APPEND %d page %d needSync=%d\n",
+ PAGERID(pPager), pPg->pgno,
+ ((pPg->flags&PGHDR_NEED_SYNC)?1:0)));
+ }
+ }
+
+ /* The PGHDR_DIRTY bit is set above when the page was added to the dirty-list
+ ** and before writing the page into the rollback journal. Wait until now,
+ ** after the page has been successfully journalled, before setting the
+ ** PGHDR_WRITEABLE bit that indicates that the page can be safely modified.
+ */
+ pPg->flags |= PGHDR_WRITEABLE;
+
+ /* If the statement journal is open and the page is not in it,
+ ** then write the page into the statement journal.
+ */
+ if( pPager->nSavepoint>0 ){
+ rc = subjournalPageIfRequired(pPg);
+ }
+
+ /* Update the database size and return. */
+ if( pPager->dbSize<pPg->pgno ){
+ pPager->dbSize = pPg->pgno;
+ }
+ return rc;
+}
+
+/*
+** This is a variant of sqlite3PagerWrite() that runs when the sector size
+** is larger than the page size. SQLite makes the (reasonable) assumption that
+** all bytes of a sector are written together by hardware. Hence, all bytes of
+** a sector need to be journalled in case of a power loss in the middle of
+** a write.
+**
+** Usually, the sector size is less than or equal to the page size, in which
+** case pages can be individually written. This routine only runs in the
+** exceptional case where the page size is smaller than the sector size.
+*/
+static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){
+ int rc = SQLITE_OK; /* Return code */
+ Pgno nPageCount; /* Total number of pages in database file */
+ Pgno pg1; /* First page of the sector pPg is located on. */
+ int nPage = 0; /* Number of pages starting at pg1 to journal */
+ int ii; /* Loop counter */
+ int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */
+ Pager *pPager = pPg->pPager; /* The pager that owns pPg */
+ Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize);
+
+ /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow
+ ** a journal header to be written between the pages journaled by
+ ** this function.
+ */
+ assert( !MEMDB );
+ assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 );
+ pPager->doNotSpill |= SPILLFLAG_NOSYNC;
+
+ /* This trick assumes that both the page-size and sector-size are
+ ** an integer power of 2. It sets variable pg1 to the identifier
+ ** of the first page of the sector pPg is located on.
+ */
+ pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1;
+
+ nPageCount = pPager->dbSize;
+ if( pPg->pgno>nPageCount ){
+ nPage = (pPg->pgno - pg1)+1;
+ }else if( (pg1+nPagePerSector-1)>nPageCount ){
+ nPage = nPageCount+1-pg1;
+ }else{
+ nPage = nPagePerSector;
+ }
+ assert(nPage>0);
+ assert(pg1<=pPg->pgno);
+ assert((pg1+nPage)>pPg->pgno);
+
+ for(ii=0; ii<nPage && rc==SQLITE_OK; ii++){
+ Pgno pg = pg1+ii;
+ PgHdr *pPage;
+ if( pg==pPg->pgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){
+ if( pg!=PAGER_MJ_PGNO(pPager) ){
+ rc = sqlite3PagerGet(pPager, pg, &pPage, 0);
+ if( rc==SQLITE_OK ){
+ rc = pager_write(pPage);
+ if( pPage->flags&PGHDR_NEED_SYNC ){
+ needSync = 1;
+ }
+ sqlite3PagerUnrefNotNull(pPage);
+ }
+ }
+ }else if( (pPage = sqlite3PagerLookup(pPager, pg))!=0 ){
+ if( pPage->flags&PGHDR_NEED_SYNC ){
+ needSync = 1;
+ }
+ sqlite3PagerUnrefNotNull(pPage);
+ }
+ }
+
+ /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages
+ ** starting at pg1, then it needs to be set for all of them. Because
+ ** writing to any of these nPage pages may damage the others, the
+ ** journal file must contain sync()ed copies of all of them
+ ** before any of them can be written out to the database file.
+ */
+ if( rc==SQLITE_OK && needSync ){
+ assert( !MEMDB );
+ for(ii=0; ii<nPage; ii++){
+ PgHdr *pPage = sqlite3PagerLookup(pPager, pg1+ii);
+ if( pPage ){
+ pPage->flags |= PGHDR_NEED_SYNC;
+ sqlite3PagerUnrefNotNull(pPage);
+ }
+ }
+ }
+
+ assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 );
+ pPager->doNotSpill &= ~SPILLFLAG_NOSYNC;
+ return rc;
+}
+
+/*
+** Mark a data page as writeable. This routine must be called before
+** making changes to a page. The caller must check the return value
+** of this function and be careful not to change any page data unless
+** this routine returns SQLITE_OK.
+**
+** The difference between this function and pager_write() is that this
+** function also deals with the special case where 2 or more pages
+** fit on a single disk sector. In this case all co-resident pages
+** must have been written to the journal file before returning.
+**
+** If an error occurs, SQLITE_NOMEM or an IO error code is returned
+** as appropriate. Otherwise, SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ assert( (pPg->flags & PGHDR_MMAP)==0 );
+ assert( pPager->eState>=PAGER_WRITER_LOCKED );
+ assert( assert_pager_state(pPager) );
+ if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){
+ if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg);
+ return SQLITE_OK;
+ }else if( pPager->errCode ){
+ return pPager->errCode;
+ }else if( pPager->sectorSize > (u32)pPager->pageSize ){
+ assert( pPager->tempFile==0 );
+ return pagerWriteLargeSector(pPg);
+ }else{
+ return pager_write(pPg);
+ }
+}
+
+/*
+** Return TRUE if the page given in the argument was previously passed
+** to sqlite3PagerWrite(). In other words, return TRUE if it is ok
+** to change the content of the page.
+*/
+#ifndef NDEBUG
+SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){
+ return pPg->flags & PGHDR_WRITEABLE;
+}
+#endif
+
+/*
+** A call to this routine tells the pager that it is not necessary to
+** write the information on page pPg back to the disk, even though
+** that page might be marked as dirty. This happens, for example, when
+** the page has been added as a leaf of the freelist and so its
+** content no longer matters.
+**
+** The overlying software layer calls this routine when all of the data
+** on the given page is unused. The pager marks the page as clean so
+** that it does not get written to disk.
+**
+** Tests show that this optimization can quadruple the speed of large
+** DELETE operations.
+**
+** This optimization cannot be used with a temp-file, as the page may
+** have been dirty at the start of the transaction. In that case, if
+** memory pressure forces page pPg out of the cache, the data does need
+** to be written out to disk so that it may be read back in if the
+** current transaction is rolled back.
+*/
+SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){
+ PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)));
+ IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno))
+ pPg->flags |= PGHDR_DONT_WRITE;
+ pPg->flags &= ~PGHDR_WRITEABLE;
+ testcase( pPg->flags & PGHDR_NEED_SYNC );
+ pager_set_pagehash(pPg);
+ }
+}
+
+/*
+** This routine is called to increment the value of the database file
+** change-counter, stored as a 4-byte big-endian integer starting at
+** byte offset 24 of the pager file. The secondary change counter at
+** 92 is also updated, as is the SQLite version number at offset 96.
+**
+** But this only happens if the pPager->changeCountDone flag is false.
+** To avoid excess churning of page 1, the update only happens once.
+** See also the pager_write_changecounter() routine that does an
+** unconditional update of the change counters.
+**
+** If the isDirectMode flag is zero, then this is done by calling
+** sqlite3PagerWrite() on page 1, then modifying the contents of the
+** page data. In this case the file will be updated when the current
+** transaction is committed.
+**
+** The isDirectMode flag may only be non-zero if the library was compiled
+** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case,
+** if isDirect is non-zero, then the database file is updated directly
+** by writing an updated version of page 1 using a call to the
+** sqlite3OsWrite() function.
+*/
+static int pager_incr_changecounter(Pager *pPager, int isDirectMode){
+ int rc = SQLITE_OK;
+
+ assert( pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ );
+ assert( assert_pager_state(pPager) );
+
+ /* Declare and initialize constant integer 'isDirect'. If the
+ ** atomic-write optimization is enabled in this build, then isDirect
+ ** is initialized to the value passed as the isDirectMode parameter
+ ** to this function. Otherwise, it is always set to zero.
+ **
+ ** The idea is that if the atomic-write optimization is not
+ ** enabled at compile time, the compiler can omit the tests of
+ ** 'isDirect' below, as well as the block enclosed in the
+ ** "if( isDirect )" condition.
+ */
+#ifndef SQLITE_ENABLE_ATOMIC_WRITE
+# define DIRECT_MODE 0
+ assert( isDirectMode==0 );
+ UNUSED_PARAMETER(isDirectMode);
+#else
+# define DIRECT_MODE isDirectMode
+#endif
+
+ if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){
+ PgHdr *pPgHdr; /* Reference to page 1 */
+
+ assert( !pPager->tempFile && isOpen(pPager->fd) );
+
+ /* Open page 1 of the file for writing. */
+ rc = sqlite3PagerGet(pPager, 1, &pPgHdr, 0);
+ assert( pPgHdr==0 || rc==SQLITE_OK );
+
+ /* If page one was fetched successfully, and this function is not
+ ** operating in direct-mode, make page 1 writable. When not in
+ ** direct mode, page 1 is always held in cache and hence the PagerGet()
+ ** above is always successful - hence the ALWAYS on rc==SQLITE_OK.
+ */
+ if( !DIRECT_MODE && ALWAYS(rc==SQLITE_OK) ){
+ rc = sqlite3PagerWrite(pPgHdr);
+ }
+
+ if( rc==SQLITE_OK ){
+ /* Actually do the update of the change counter */
+ pager_write_changecounter(pPgHdr);
+
+ /* If running in direct mode, write the contents of page 1 to the file. */
+ if( DIRECT_MODE ){
+ const void *zBuf;
+ assert( pPager->dbFileSize>0 );
+ zBuf = pPgHdr->pData;
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0);
+ pPager->aStat[PAGER_STAT_WRITE]++;
+ }
+ if( rc==SQLITE_OK ){
+ /* Update the pager's copy of the change-counter. Otherwise, the
+ ** next time a read transaction is opened the cache will be
+ ** flushed (as the change-counter values will not match). */
+ const void *pCopy = (const void *)&((const char *)zBuf)[24];
+ memcpy(&pPager->dbFileVers, pCopy, sizeof(pPager->dbFileVers));
+ pPager->changeCountDone = 1;
+ }
+ }else{
+ pPager->changeCountDone = 1;
+ }
+ }
+
+ /* Release the page reference. */
+ sqlite3PagerUnref(pPgHdr);
+ }
+ return rc;
+}
+
+/*
+** Sync the database file to disk. This is a no-op for in-memory databases
+** or pages with the Pager.noSync flag set.
+**
+** If successful, or if called on a pager for which it is a no-op, this
+** function returns SQLITE_OK. Otherwise, an IO error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zSuper){
+ int rc = SQLITE_OK;
+ void *pArg = (void*)zSuper;
+ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg);
+ if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
+ if( rc==SQLITE_OK && !pPager->noSync ){
+ assert( !MEMDB );
+ rc = sqlite3OsSync(pPager->fd, pPager->syncFlags);
+ }
+ return rc;
+}
+
+/*
+** This function may only be called while a write-transaction is active in
+** rollback. If the connection is in WAL mode, this call is a no-op.
+** Otherwise, if the connection does not already have an EXCLUSIVE lock on
+** the database file, an attempt is made to obtain one.
+**
+** If the EXCLUSIVE lock is already held or the attempt to obtain it is
+** successful, or the connection is in WAL mode, SQLITE_OK is returned.
+** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is
+** returned.
+*/
+SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager *pPager){
+ int rc = pPager->errCode;
+ assert( assert_pager_state(pPager) );
+ if( rc==SQLITE_OK ){
+ assert( pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ || pPager->eState==PAGER_WRITER_LOCKED
+ );
+ assert( assert_pager_state(pPager) );
+ if( 0==pagerUseWal(pPager) ){
+ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK);
+ }
+ }
+ return rc;
+}
+
+/*
+** Sync the database file for the pager pPager. zSuper points to the name
+** of a super-journal file that should be written into the individual
+** journal file. zSuper may be NULL, which is interpreted as no
+** super-journal (a single database transaction).
+**
+** This routine ensures that:
+**
+** * The database file change-counter is updated,
+** * the journal is synced (unless the atomic-write optimization is used),
+** * all dirty pages are written to the database file,
+** * the database file is truncated (if required), and
+** * the database file synced.
+**
+** The only thing that remains to commit the transaction is to finalize
+** (delete, truncate or zero the first part of) the journal file (or
+** delete the super-journal file if specified).
+**
+** Note that if zSuper==NULL, this does not overwrite a previous value
+** passed to an sqlite3PagerCommitPhaseOne() call.
+**
+** If the final parameter - noSync - is true, then the database file itself
+** is not synced. The caller must call sqlite3PagerSync() directly to
+** sync the database file before calling CommitPhaseTwo() to delete the
+** journal file in this case.
+*/
+SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
+ Pager *pPager, /* Pager object */
+ const char *zSuper, /* If not NULL, the super-journal name */
+ int noSync /* True to omit the xSync on the db file */
+){
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( pPager->eState==PAGER_WRITER_LOCKED
+ || pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ || pPager->eState==PAGER_ERROR
+ );
+ assert( assert_pager_state(pPager) );
+
+ /* If a prior error occurred, report that error again. */
+ if( NEVER(pPager->errCode) ) return pPager->errCode;
+
+ /* Provide the ability to easily simulate an I/O error during testing */
+ if( sqlite3FaultSim(400) ) return SQLITE_IOERR;
+
+ PAGERTRACE(("DATABASE SYNC: File=%s zSuper=%s nSize=%d\n",
+ pPager->zFilename, zSuper, pPager->dbSize));
+
+ /* If no database changes have been made, return early. */
+ if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK;
+
+ assert( MEMDB==0 || pPager->tempFile );
+ assert( isOpen(pPager->fd) || pPager->tempFile );
+ if( 0==pagerFlushOnCommit(pPager, 1) ){
+ /* If this is an in-memory db, or no pages have been written to, or this
+ ** function has already been called, it is mostly a no-op. However, any
+ ** backup in progress needs to be restarted. */
+ sqlite3BackupRestart(pPager->pBackup);
+ }else{
+ PgHdr *pList;
+ if( pagerUseWal(pPager) ){
+ PgHdr *pPageOne = 0;
+ pList = sqlite3PcacheDirtyList(pPager->pPCache);
+ if( pList==0 ){
+ /* Must have at least one page for the WAL commit flag.
+ ** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */
+ rc = sqlite3PagerGet(pPager, 1, &pPageOne, 0);
+ pList = pPageOne;
+ pList->pDirty = 0;
+ }
+ assert( rc==SQLITE_OK );
+ if( ALWAYS(pList) ){
+ rc = pagerWalFrames(pPager, pList, pPager->dbSize, 1);
+ }
+ sqlite3PagerUnref(pPageOne);
+ if( rc==SQLITE_OK ){
+ sqlite3PcacheCleanAll(pPager->pPCache);
+ }
+ }else{
+ /* The bBatch boolean is true if the batch-atomic-write commit method
+ ** should be used. No rollback journal is created if batch-atomic-write
+ ** is enabled.
+ */
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ sqlite3_file *fd = pPager->fd;
+ int bBatch = zSuper==0 /* An SQLITE_IOCAP_BATCH_ATOMIC commit */
+ && (sqlite3OsDeviceCharacteristics(fd) & SQLITE_IOCAP_BATCH_ATOMIC)
+ && !pPager->noSync
+ && sqlite3JournalIsInMemory(pPager->jfd);
+#else
+# define bBatch 0
+#endif
+
+#ifdef SQLITE_ENABLE_ATOMIC_WRITE
+ /* The following block updates the change-counter. Exactly how it
+ ** does this depends on whether or not the atomic-update optimization
+ ** was enabled at compile time, and if this transaction meets the
+ ** runtime criteria to use the operation:
+ **
+ ** * The file-system supports the atomic-write property for
+ ** blocks of size page-size, and
+ ** * This commit is not part of a multi-file transaction, and
+ ** * Exactly one page has been modified and store in the journal file.
+ **
+ ** If the optimization was not enabled at compile time, then the
+ ** pager_incr_changecounter() function is called to update the change
+ ** counter in 'indirect-mode'. If the optimization is compiled in but
+ ** is not applicable to this transaction, call sqlite3JournalCreate()
+ ** to make sure the journal file has actually been created, then call
+ ** pager_incr_changecounter() to update the change-counter in indirect
+ ** mode.
+ **
+ ** Otherwise, if the optimization is both enabled and applicable,
+ ** then call pager_incr_changecounter() to update the change-counter
+ ** in 'direct' mode. In this case the journal file will never be
+ ** created for this transaction.
+ */
+ if( bBatch==0 ){
+ PgHdr *pPg;
+ assert( isOpen(pPager->jfd)
+ || pPager->journalMode==PAGER_JOURNALMODE_OFF
+ || pPager->journalMode==PAGER_JOURNALMODE_WAL
+ );
+ if( !zSuper && isOpen(pPager->jfd)
+ && pPager->journalOff==jrnlBufferSize(pPager)
+ && pPager->dbSize>=pPager->dbOrigSize
+ && (!(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty)
+ ){
+ /* Update the db file change counter via the direct-write method. The
+ ** following call will modify the in-memory representation of page 1
+ ** to include the updated change counter and then write page 1
+ ** directly to the database file. Because of the atomic-write
+ ** property of the host file-system, this is safe.
+ */
+ rc = pager_incr_changecounter(pPager, 1);
+ }else{
+ rc = sqlite3JournalCreate(pPager->jfd);
+ if( rc==SQLITE_OK ){
+ rc = pager_incr_changecounter(pPager, 0);
+ }
+ }
+ }
+#else /* SQLITE_ENABLE_ATOMIC_WRITE */
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ if( zSuper ){
+ rc = sqlite3JournalCreate(pPager->jfd);
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+ assert( bBatch==0 );
+ }
+#endif
+ rc = pager_incr_changecounter(pPager, 0);
+#endif /* !SQLITE_ENABLE_ATOMIC_WRITE */
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+
+ /* Write the super-journal name into the journal file. If a
+ ** super-journal file name has already been written to the journal file,
+ ** or if zSuper is NULL (no super-journal), then this call is a no-op.
+ */
+ rc = writeSuperJournal(pPager, zSuper);
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+
+ /* Sync the journal file and write all dirty pages to the database.
+ ** If the atomic-update optimization is being used, this sync will not
+ ** create the journal file or perform any real IO.
+ **
+ ** Because the change-counter page was just modified, unless the
+ ** atomic-update optimization is used it is almost certain that the
+ ** journal requires a sync here. However, in locking_mode=exclusive
+ ** on a system under memory pressure it is just possible that this is
+ ** not the case. In this case it is likely enough that the redundant
+ ** xSync() call will be changed to a no-op by the OS anyhow.
+ */
+ rc = syncJournal(pPager, 0);
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+
+ pList = sqlite3PcacheDirtyList(pPager->pPCache);
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ if( bBatch ){
+ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0);
+ if( rc==SQLITE_OK ){
+ rc = pager_write_pagelist(pPager, pList);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0);
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3OsFileControlHint(fd, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, 0);
+ }
+ }
+
+ if( (rc&0xFF)==SQLITE_IOERR && rc!=SQLITE_IOERR_NOMEM ){
+ rc = sqlite3JournalCreate(pPager->jfd);
+ if( rc!=SQLITE_OK ){
+ sqlite3OsClose(pPager->jfd);
+ goto commit_phase_one_exit;
+ }
+ bBatch = 0;
+ }else{
+ sqlite3OsClose(pPager->jfd);
+ }
+ }
+#endif /* SQLITE_ENABLE_BATCH_ATOMIC_WRITE */
+
+ if( bBatch==0 ){
+ rc = pager_write_pagelist(pPager, pList);
+ }
+ if( rc!=SQLITE_OK ){
+ assert( rc!=SQLITE_IOERR_BLOCKED );
+ goto commit_phase_one_exit;
+ }
+ sqlite3PcacheCleanAll(pPager->pPCache);
+
+ /* If the file on disk is smaller than the database image, use
+ ** pager_truncate to grow the file here. This can happen if the database
+ ** image was extended as part of the current transaction and then the
+ ** last page in the db image moved to the free-list. In this case the
+ ** last page is never written out to disk, leaving the database file
+ ** undersized. Fix this now if it is the case. */
+ if( pPager->dbSize>pPager->dbFileSize ){
+ Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager));
+ assert( pPager->eState==PAGER_WRITER_DBMOD );
+ rc = pager_truncate(pPager, nNew);
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+ }
+
+ /* Finally, sync the database file. */
+ if( !noSync ){
+ rc = sqlite3PagerSync(pPager, zSuper);
+ }
+ IOTRACE(("DBSYNC %p\n", pPager))
+ }
+ }
+
+commit_phase_one_exit:
+ if( rc==SQLITE_OK && !pagerUseWal(pPager) ){
+ pPager->eState = PAGER_WRITER_FINISHED;
+ }
+ return rc;
+}
+
+
+/*
+** When this function is called, the database file has been completely
+** updated to reflect the changes made by the current transaction and
+** synced to disk. The journal file still exists in the file-system
+** though, and if a failure occurs at this point it will eventually
+** be used as a hot-journal and the current transaction rolled back.
+**
+** This function finalizes the journal file, either by deleting,
+** truncating or partially zeroing it, so that it cannot be used
+** for hot-journal rollback. Once this is done the transaction is
+** irrevocably committed.
+**
+** If an error occurs, an IO error code is returned and the pager
+** moves into the error state. Otherwise, SQLITE_OK is returned.
+*/
+SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager *pPager){
+ int rc = SQLITE_OK; /* Return code */
+
+ /* This routine should not be called if a prior error has occurred.
+ ** But if (due to a coding error elsewhere in the system) it does get
+ ** called, just return the same error code without doing anything. */
+ if( NEVER(pPager->errCode) ) return pPager->errCode;
+ pPager->iDataVersion++;
+
+ assert( pPager->eState==PAGER_WRITER_LOCKED
+ || pPager->eState==PAGER_WRITER_FINISHED
+ || (pagerUseWal(pPager) && pPager->eState==PAGER_WRITER_CACHEMOD)
+ );
+ assert( assert_pager_state(pPager) );
+
+ /* An optimization. If the database was not actually modified during
+ ** this transaction, the pager is running in exclusive-mode and is
+ ** using persistent journals, then this function is a no-op.
+ **
+ ** The start of the journal file currently contains a single journal
+ ** header with the nRec field set to 0. If such a journal is used as
+ ** a hot-journal during hot-journal rollback, 0 changes will be made
+ ** to the database file. So there is no need to zero the journal
+ ** header. Since the pager is in exclusive mode, there is no need
+ ** to drop any locks either.
+ */
+ if( pPager->eState==PAGER_WRITER_LOCKED
+ && pPager->exclusiveMode
+ && pPager->journalMode==PAGER_JOURNALMODE_PERSIST
+ ){
+ assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) || !pPager->journalOff );
+ pPager->eState = PAGER_READER;
+ return SQLITE_OK;
+ }
+
+ PAGERTRACE(("COMMIT %d\n", PAGERID(pPager)));
+ rc = pager_end_transaction(pPager, pPager->setSuper, 1);
+ return pager_error(pPager, rc);
+}
+
+/*
+** If a write transaction is open, then all changes made within the
+** transaction are reverted and the current write-transaction is closed.
+** The pager falls back to PAGER_READER state if successful, or PAGER_ERROR
+** state if an error occurs.
+**
+** If the pager is already in PAGER_ERROR state when this function is called,
+** it returns Pager.errCode immediately. No work is performed in this case.
+**
+** Otherwise, in rollback mode, this function performs two functions:
+**
+** 1) It rolls back the journal file, restoring all database file and
+** in-memory cache pages to the state they were in when the transaction
+** was opened, and
+**
+** 2) It finalizes the journal file, so that it is not used for hot
+** rollback at any point in the future.
+**
+** Finalization of the journal file (task 2) is only performed if the
+** rollback is successful.
+**
+** In WAL mode, all cache-entries containing data modified within the
+** current transaction are either expelled from the cache or reverted to
+** their pre-transaction state by re-reading data from the database or
+** WAL files. The WAL transaction is then closed.
+*/
+SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){
+ int rc = SQLITE_OK; /* Return code */
+ PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager)));
+
+ /* PagerRollback() is a no-op if called in READER or OPEN state. If
+ ** the pager is already in the ERROR state, the rollback is not
+ ** attempted here. Instead, the error code is returned to the caller.
+ */
+ assert( assert_pager_state(pPager) );
+ if( pPager->eState==PAGER_ERROR ) return pPager->errCode;
+ if( pPager->eState<=PAGER_READER ) return SQLITE_OK;
+
+ if( pagerUseWal(pPager) ){
+ int rc2;
+ rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, -1);
+ rc2 = pager_end_transaction(pPager, pPager->setSuper, 0);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }else if( !isOpen(pPager->jfd) || pPager->eState==PAGER_WRITER_LOCKED ){
+ int eState = pPager->eState;
+ rc = pager_end_transaction(pPager, 0, 0);
+ if( !MEMDB && eState>PAGER_WRITER_LOCKED ){
+ /* This can happen using journal_mode=off. Move the pager to the error
+ ** state to indicate that the contents of the cache may not be trusted.
+ ** Any active readers will get SQLITE_ABORT.
+ */
+ pPager->errCode = SQLITE_ABORT;
+ pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
+ return rc;
+ }
+ }else{
+ rc = pager_playback(pPager, 0);
+ }
+
+ assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK );
+ assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT
+ || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR
+ || rc==SQLITE_CANTOPEN
+ );
+
+ /* If an error occurs during a ROLLBACK, we can no longer trust the pager
+ ** cache. So call pager_error() on the way out to make any error persistent.
+ */
+ return pager_error(pPager, rc);
+}
+
+/*
+** Return TRUE if the database file is opened read-only. Return FALSE
+** if the database is (in theory) writable.
+*/
+SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager *pPager){
+ return pPager->readOnly;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Return the sum of the reference counts for all pages held by pPager.
+*/
+SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){
+ return sqlite3PcacheRefCount(pPager->pPCache);
+}
+#endif
+
+/*
+** Return the approximate number of bytes of memory currently
+** used by the pager and its associated cache.
+*/
+SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager *pPager){
+ int perPageSize = pPager->pageSize + pPager->nExtra + sizeof(PgHdr)
+ + 5*sizeof(void*);
+ return perPageSize*sqlite3PcachePagecount(pPager->pPCache)
+ + sqlite3MallocSize(pPager)
+ + pPager->pageSize;
+}
+
+/*
+** Return the number of references to the specified page.
+*/
+SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage *pPage){
+ return sqlite3PcachePageRefcount(pPage);
+}
+
+#ifdef SQLITE_TEST
+/*
+** This routine is used for testing and analysis only.
+*/
+SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){
+ static int a[11];
+ a[0] = sqlite3PcacheRefCount(pPager->pPCache);
+ a[1] = sqlite3PcachePagecount(pPager->pPCache);
+ a[2] = sqlite3PcacheGetCachesize(pPager->pPCache);
+ a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize;
+ a[4] = pPager->eState;
+ a[5] = pPager->errCode;
+ a[6] = pPager->aStat[PAGER_STAT_HIT];
+ a[7] = pPager->aStat[PAGER_STAT_MISS];
+ a[8] = 0; /* Used to be pPager->nOvfl */
+ a[9] = pPager->nRead;
+ a[10] = pPager->aStat[PAGER_STAT_WRITE];
+ return a;
+}
+#endif
+
+/*
+** Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE,
+** or _WRITE+1. The SQLITE_DBSTATUS_CACHE_WRITE+1 case is a translation
+** of SQLITE_DBSTATUS_CACHE_SPILL. The _SPILL case is not contiguous because
+** it was added later.
+**
+** Before returning, *pnVal is incremented by the
+** current cache hit or miss count, according to the value of eStat. If the
+** reset parameter is non-zero, the cache hit or miss count is zeroed before
+** returning.
+*/
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){
+
+ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT
+ || eStat==SQLITE_DBSTATUS_CACHE_MISS
+ || eStat==SQLITE_DBSTATUS_CACHE_WRITE
+ || eStat==SQLITE_DBSTATUS_CACHE_WRITE+1
+ );
+
+ assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS );
+ assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE );
+ assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1
+ && PAGER_STAT_WRITE==2 && PAGER_STAT_SPILL==3 );
+
+ eStat -= SQLITE_DBSTATUS_CACHE_HIT;
+ *pnVal += pPager->aStat[eStat];
+ if( reset ){
+ pPager->aStat[eStat] = 0;
+ }
+}
+
+/*
+** Return true if this is an in-memory or temp-file backed pager.
+*/
+SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){
+ return pPager->tempFile;
+}
+
+/*
+** Check that there are at least nSavepoint savepoints open. If there are
+** currently less than nSavepoints open, then open one or more savepoints
+** to make up the difference. If the number of savepoints is already
+** equal to nSavepoint, then this function is a no-op.
+**
+** If a memory allocation fails, SQLITE_NOMEM is returned. If an error
+** occurs while opening the sub-journal file, then an IO error code is
+** returned. Otherwise, SQLITE_OK.
+*/
+static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){
+ int rc = SQLITE_OK; /* Return code */
+ int nCurrent = pPager->nSavepoint; /* Current number of savepoints */
+ int ii; /* Iterator variable */
+ PagerSavepoint *aNew; /* New Pager.aSavepoint array */
+
+ assert( pPager->eState>=PAGER_WRITER_LOCKED );
+ assert( assert_pager_state(pPager) );
+ assert( nSavepoint>nCurrent && pPager->useJournal );
+
+ /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM
+ ** if the allocation fails. Otherwise, zero the new portion in case a
+ ** malloc failure occurs while populating it in the for(...) loop below.
+ */
+ aNew = (PagerSavepoint *)sqlite3Realloc(
+ pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint
+ );
+ if( !aNew ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint));
+ pPager->aSavepoint = aNew;
+
+ /* Populate the PagerSavepoint structures just allocated. */
+ for(ii=nCurrent; ii<nSavepoint; ii++){
+ aNew[ii].nOrig = pPager->dbSize;
+ if( isOpen(pPager->jfd) && pPager->journalOff>0 ){
+ aNew[ii].iOffset = pPager->journalOff;
+ }else{
+ aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager);
+ }
+ aNew[ii].iSubRec = pPager->nSubRec;
+ aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize);
+ if( !aNew[ii].pInSavepoint ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ if( pagerUseWal(pPager) ){
+ sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData);
+ }
+ pPager->nSavepoint = ii+1;
+ }
+ assert( pPager->nSavepoint==nSavepoint );
+ assertTruncateConstraint(pPager);
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){
+ assert( pPager->eState>=PAGER_WRITER_LOCKED );
+ assert( assert_pager_state(pPager) );
+
+ if( nSavepoint>pPager->nSavepoint && pPager->useJournal ){
+ return pagerOpenSavepoint(pPager, nSavepoint);
+ }else{
+ return SQLITE_OK;
+ }
+}
+
+
+/*
+** This function is called to rollback or release (commit) a savepoint.
+** The savepoint to release or rollback need not be the most recently
+** created savepoint.
+**
+** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE.
+** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with
+** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes
+** that have occurred since the specified savepoint was created.
+**
+** The savepoint to rollback or release is identified by parameter
+** iSavepoint. A value of 0 means to operate on the outermost savepoint
+** (the first created). A value of (Pager.nSavepoint-1) means operate
+** on the most recently created savepoint. If iSavepoint is greater than
+** (Pager.nSavepoint-1), then this function is a no-op.
+**
+** If a negative value is passed to this function, then the current
+** transaction is rolled back. This is different to calling
+** sqlite3PagerRollback() because this function does not terminate
+** the transaction or unlock the database, it just restores the
+** contents of the database to its original state.
+**
+** In any case, all savepoints with an index greater than iSavepoint
+** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE),
+** then savepoint iSavepoint is also destroyed.
+**
+** This function may return SQLITE_NOMEM if a memory allocation fails,
+** or an IO error code if an IO error occurs while rolling back a
+** savepoint. If no errors occur, SQLITE_OK is returned.
+*/
+SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){
+ int rc = pPager->errCode;
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+ if( op==SAVEPOINT_RELEASE ) rc = SQLITE_OK;
+#endif
+
+ assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK );
+ assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK );
+
+ if( rc==SQLITE_OK && iSavepoint<pPager->nSavepoint ){
+ int ii; /* Iterator variable */
+ int nNew; /* Number of remaining savepoints after this op. */
+
+ /* Figure out how many savepoints will still be active after this
+ ** operation. Store this value in nNew. Then free resources associated
+ ** with any savepoints that are destroyed by this operation.
+ */
+ nNew = iSavepoint + (( op==SAVEPOINT_RELEASE ) ? 0 : 1);
+ for(ii=nNew; ii<pPager->nSavepoint; ii++){
+ sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint);
+ }
+ pPager->nSavepoint = nNew;
+
+ /* If this is a release of the outermost savepoint, truncate
+ ** the sub-journal to zero bytes in size. */
+ if( op==SAVEPOINT_RELEASE ){
+ if( nNew==0 && isOpen(pPager->sjfd) ){
+ /* Only truncate if it is an in-memory sub-journal. */
+ if( sqlite3JournalIsInMemory(pPager->sjfd) ){
+ rc = sqlite3OsTruncate(pPager->sjfd, 0);
+ assert( rc==SQLITE_OK );
+ }
+ pPager->nSubRec = 0;
+ }
+ }
+ /* Else this is a rollback operation, playback the specified savepoint.
+ ** If this is a temp-file, it is possible that the journal file has
+ ** not yet been opened. In this case there have been no changes to
+ ** the database file, so the playback operation can be skipped.
+ */
+ else if( pagerUseWal(pPager) || isOpen(pPager->jfd) ){
+ PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1];
+ rc = pagerPlaybackSavepoint(pPager, pSavepoint);
+ assert(rc!=SQLITE_DONE);
+ }
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+ /* If the cache has been modified but the savepoint cannot be rolled
+ ** back journal_mode=off, put the pager in the error state. This way,
+ ** if the VFS used by this pager includes ZipVFS, the entire transaction
+ ** can be rolled back at the ZipVFS level. */
+ else if(
+ pPager->journalMode==PAGER_JOURNALMODE_OFF
+ && pPager->eState>=PAGER_WRITER_CACHEMOD
+ ){
+ pPager->errCode = SQLITE_ABORT;
+ pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
+ }
+#endif
+ }
+
+ return rc;
+}
+
+/*
+** Return the full pathname of the database file.
+**
+** Except, if the pager is in-memory only, then return an empty string if
+** nullIfMemDb is true. This routine is called with nullIfMemDb==1 when
+** used to report the filename to the user, for compatibility with legacy
+** behavior. But when the Btree needs to know the filename for matching to
+** shared cache, it uses nullIfMemDb==0 so that in-memory databases can
+** participate in shared-cache.
+**
+** The return value to this routine is always safe to use with
+** sqlite3_uri_parameter() and sqlite3_filename_database() and friends.
+*/
+SQLITE_PRIVATE const char *sqlite3PagerFilename(const Pager *pPager, int nullIfMemDb){
+ static const char zFake[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ return (nullIfMemDb && pPager->memDb) ? &zFake[4] : pPager->zFilename;
+}
+
+/*
+** Return the VFS structure for the pager.
+*/
+SQLITE_PRIVATE sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){
+ return pPager->pVfs;
+}
+
+/*
+** Return the file handle for the database file associated
+** with the pager. This might return NULL if the file has
+** not yet been opened.
+*/
+SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){
+ return pPager->fd;
+}
+
+/*
+** Return the file handle for the journal file (if it exists).
+** This will be either the rollback journal or the WAL file.
+*/
+SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager *pPager){
+#if SQLITE_OMIT_WAL
+ return pPager->jfd;
+#else
+ return pPager->pWal ? sqlite3WalFile(pPager->pWal) : pPager->jfd;
+#endif
+}
+
+/*
+** Return the full pathname of the journal file.
+*/
+SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager *pPager){
+ return pPager->zJournal;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Move the page pPg to location pgno in the file.
+**
+** There must be no references to the page previously located at
+** pgno (which we call pPgOld) though that page is allowed to be
+** in cache. If the page previously located at pgno is not already
+** in the rollback journal, it is not put there by by this routine.
+**
+** References to the page pPg remain valid. Updating any
+** meta-data associated with pPg (i.e. data stored in the nExtra bytes
+** allocated along with the page) is the responsibility of the caller.
+**
+** A transaction must be active when this routine is called. It used to be
+** required that a statement transaction was not active, but this restriction
+** has been removed (CREATE INDEX needs to move a page when a statement
+** transaction is active).
+**
+** If the fourth argument, isCommit, is non-zero, then this page is being
+** moved as part of a database reorganization just before the transaction
+** is being committed. In this case, it is guaranteed that the database page
+** pPg refers to will not be written to again within this transaction.
+**
+** This function may return SQLITE_NOMEM or an IO error code if an error
+** occurs. Otherwise, it returns SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, int isCommit){
+ PgHdr *pPgOld; /* The page being overwritten. */
+ Pgno needSyncPgno = 0; /* Old value of pPg->pgno, if sync is required */
+ int rc; /* Return code */
+ Pgno origPgno; /* The original page number */
+
+ assert( pPg->nRef>0 );
+ assert( pPager->eState==PAGER_WRITER_CACHEMOD
+ || pPager->eState==PAGER_WRITER_DBMOD
+ );
+ assert( assert_pager_state(pPager) );
+
+ /* In order to be able to rollback, an in-memory database must journal
+ ** the page we are moving from.
+ */
+ assert( pPager->tempFile || !MEMDB );
+ if( pPager->tempFile ){
+ rc = sqlite3PagerWrite(pPg);
+ if( rc ) return rc;
+ }
+
+ /* If the page being moved is dirty and has not been saved by the latest
+ ** savepoint, then save the current contents of the page into the
+ ** sub-journal now. This is required to handle the following scenario:
+ **
+ ** BEGIN;
+ ** <journal page X, then modify it in memory>
+ ** SAVEPOINT one;
+ ** <Move page X to location Y>
+ ** ROLLBACK TO one;
+ **
+ ** If page X were not written to the sub-journal here, it would not
+ ** be possible to restore its contents when the "ROLLBACK TO one"
+ ** statement were is processed.
+ **
+ ** subjournalPage() may need to allocate space to store pPg->pgno into
+ ** one or more savepoint bitvecs. This is the reason this function
+ ** may return SQLITE_NOMEM.
+ */
+ if( (pPg->flags & PGHDR_DIRTY)!=0
+ && SQLITE_OK!=(rc = subjournalPageIfRequired(pPg))
+ ){
+ return rc;
+ }
+
+ PAGERTRACE(("MOVE %d page %d (needSync=%d) moves to %d\n",
+ PAGERID(pPager), pPg->pgno, (pPg->flags&PGHDR_NEED_SYNC)?1:0, pgno));
+ IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno))
+
+ /* If the journal needs to be sync()ed before page pPg->pgno can
+ ** be written to, store pPg->pgno in local variable needSyncPgno.
+ **
+ ** If the isCommit flag is set, there is no need to remember that
+ ** the journal needs to be sync()ed before database page pPg->pgno
+ ** can be written to. The caller has already promised not to write to it.
+ */
+ if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){
+ needSyncPgno = pPg->pgno;
+ assert( pPager->journalMode==PAGER_JOURNALMODE_OFF ||
+ pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize );
+ assert( pPg->flags&PGHDR_DIRTY );
+ }
+
+ /* If the cache contains a page with page-number pgno, remove it
+ ** from its hash chain. Also, if the PGHDR_NEED_SYNC flag was set for
+ ** page pgno before the 'move' operation, it needs to be retained
+ ** for the page moved there.
+ */
+ pPg->flags &= ~PGHDR_NEED_SYNC;
+ pPgOld = sqlite3PagerLookup(pPager, pgno);
+ assert( !pPgOld || pPgOld->nRef==1 || CORRUPT_DB );
+ if( pPgOld ){
+ if( pPgOld->nRef>1 ){
+ sqlite3PagerUnrefNotNull(pPgOld);
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC);
+ if( pPager->tempFile ){
+ /* Do not discard pages from an in-memory database since we might
+ ** need to rollback later. Just move the page out of the way. */
+ sqlite3PcacheMove(pPgOld, pPager->dbSize+1);
+ }else{
+ sqlite3PcacheDrop(pPgOld);
+ }
+ }
+
+ origPgno = pPg->pgno;
+ sqlite3PcacheMove(pPg, pgno);
+ sqlite3PcacheMakeDirty(pPg);
+
+ /* For an in-memory database, make sure the original page continues
+ ** to exist, in case the transaction needs to roll back. Use pPgOld
+ ** as the original page since it has already been allocated.
+ */
+ if( pPager->tempFile && pPgOld ){
+ sqlite3PcacheMove(pPgOld, origPgno);
+ sqlite3PagerUnrefNotNull(pPgOld);
+ }
+
+ if( needSyncPgno ){
+ /* If needSyncPgno is non-zero, then the journal file needs to be
+ ** sync()ed before any data is written to database file page needSyncPgno.
+ ** Currently, no such page exists in the page-cache and the
+ ** "is journaled" bitvec flag has been set. This needs to be remedied by
+ ** loading the page into the pager-cache and setting the PGHDR_NEED_SYNC
+ ** flag.
+ **
+ ** If the attempt to load the page into the page-cache fails, (due
+ ** to a malloc() or IO failure), clear the bit in the pInJournal[]
+ ** array. Otherwise, if the page is loaded and written again in
+ ** this transaction, it may be written to the database file before
+ ** it is synced into the journal file. This way, it may end up in
+ ** the journal file twice, but that is not a problem.
+ */
+ PgHdr *pPgHdr;
+ rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr, 0);
+ if( rc!=SQLITE_OK ){
+ if( needSyncPgno<=pPager->dbOrigSize ){
+ assert( pPager->pTmpSpace!=0 );
+ sqlite3BitvecClear(pPager->pInJournal, needSyncPgno, pPager->pTmpSpace);
+ }
+ return rc;
+ }
+ pPgHdr->flags |= PGHDR_NEED_SYNC;
+ sqlite3PcacheMakeDirty(pPgHdr);
+ sqlite3PagerUnrefNotNull(pPgHdr);
+ }
+
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** The page handle passed as the first argument refers to a dirty page
+** with a page number other than iNew. This function changes the page's
+** page number to iNew and sets the value of the PgHdr.flags field to
+** the value passed as the third parameter.
+*/
+SQLITE_PRIVATE void sqlite3PagerRekey(DbPage *pPg, Pgno iNew, u16 flags){
+ assert( pPg->pgno!=iNew );
+ pPg->flags = flags;
+ sqlite3PcacheMove(pPg, iNew);
+}
+
+/*
+** Return a pointer to the data for the specified page.
+*/
+SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *pPg){
+ assert( pPg->nRef>0 || pPg->pPager->memDb );
+ return pPg->pData;
+}
+
+/*
+** Return a pointer to the Pager.nExtra bytes of "extra" space
+** allocated along with the specified page.
+*/
+SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *pPg){
+ return pPg->pExtra;
+}
+
+/*
+** Get/set the locking-mode for this pager. Parameter eMode must be one
+** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or
+** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then
+** the locking-mode is set to the value specified.
+**
+** The returned value is either PAGER_LOCKINGMODE_NORMAL or
+** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated)
+** locking-mode.
+*/
+SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *pPager, int eMode){
+ assert( eMode==PAGER_LOCKINGMODE_QUERY
+ || eMode==PAGER_LOCKINGMODE_NORMAL
+ || eMode==PAGER_LOCKINGMODE_EXCLUSIVE );
+ assert( PAGER_LOCKINGMODE_QUERY<0 );
+ assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 );
+ assert( pPager->exclusiveMode || 0==sqlite3WalHeapMemory(pPager->pWal) );
+ if( eMode>=0 && !pPager->tempFile && !sqlite3WalHeapMemory(pPager->pWal) ){
+ pPager->exclusiveMode = (u8)eMode;
+ }
+ return (int)pPager->exclusiveMode;
+}
+
+/*
+** Set the journal-mode for this pager. Parameter eMode must be one of:
+**
+** PAGER_JOURNALMODE_DELETE
+** PAGER_JOURNALMODE_TRUNCATE
+** PAGER_JOURNALMODE_PERSIST
+** PAGER_JOURNALMODE_OFF
+** PAGER_JOURNALMODE_MEMORY
+** PAGER_JOURNALMODE_WAL
+**
+** The journalmode is set to the value specified if the change is allowed.
+** The change may be disallowed for the following reasons:
+**
+** * An in-memory database can only have its journal_mode set to _OFF
+** or _MEMORY.
+**
+** * Temporary databases cannot have _WAL journalmode.
+**
+** The returned indicate the current (possibly updated) journal-mode.
+*/
+SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
+ u8 eOld = pPager->journalMode; /* Prior journalmode */
+
+ /* The eMode parameter is always valid */
+ assert( eMode==PAGER_JOURNALMODE_DELETE
+ || eMode==PAGER_JOURNALMODE_TRUNCATE
+ || eMode==PAGER_JOURNALMODE_PERSIST
+ || eMode==PAGER_JOURNALMODE_OFF
+ || eMode==PAGER_JOURNALMODE_WAL
+ || eMode==PAGER_JOURNALMODE_MEMORY );
+
+ /* This routine is only called from the OP_JournalMode opcode, and
+ ** the logic there will never allow a temporary file to be changed
+ ** to WAL mode.
+ */
+ assert( pPager->tempFile==0 || eMode!=PAGER_JOURNALMODE_WAL );
+
+ /* Do allow the journalmode of an in-memory database to be set to
+ ** anything other than MEMORY or OFF
+ */
+ if( MEMDB ){
+ assert( eOld==PAGER_JOURNALMODE_MEMORY || eOld==PAGER_JOURNALMODE_OFF );
+ if( eMode!=PAGER_JOURNALMODE_MEMORY && eMode!=PAGER_JOURNALMODE_OFF ){
+ eMode = eOld;
+ }
+ }
+
+ if( eMode!=eOld ){
+
+ /* Change the journal mode. */
+ assert( pPager->eState!=PAGER_ERROR );
+ pPager->journalMode = (u8)eMode;
+
+ /* When transistioning from TRUNCATE or PERSIST to any other journal
+ ** mode except WAL, unless the pager is in locking_mode=exclusive mode,
+ ** delete the journal file.
+ */
+ assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 );
+ assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 );
+ assert( (PAGER_JOURNALMODE_DELETE & 5)==0 );
+ assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 );
+ assert( (PAGER_JOURNALMODE_OFF & 5)==0 );
+ assert( (PAGER_JOURNALMODE_WAL & 5)==5 );
+
+ assert( isOpen(pPager->fd) || pPager->exclusiveMode );
+ if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){
+
+ /* In this case we would like to delete the journal file. If it is
+ ** not possible, then that is not a problem. Deleting the journal file
+ ** here is an optimization only.
+ **
+ ** Before deleting the journal file, obtain a RESERVED lock on the
+ ** database file. This ensures that the journal file is not deleted
+ ** while it is in use by some other client.
+ */
+ sqlite3OsClose(pPager->jfd);
+ if( pPager->eLock>=RESERVED_LOCK ){
+ sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0);
+ }else{
+ int rc = SQLITE_OK;
+ int state = pPager->eState;
+ assert( state==PAGER_OPEN || state==PAGER_READER );
+ if( state==PAGER_OPEN ){
+ rc = sqlite3PagerSharedLock(pPager);
+ }
+ if( pPager->eState==PAGER_READER ){
+ assert( rc==SQLITE_OK );
+ rc = pagerLockDb(pPager, RESERVED_LOCK);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0);
+ }
+ if( rc==SQLITE_OK && state==PAGER_READER ){
+ pagerUnlockDb(pPager, SHARED_LOCK);
+ }else if( state==PAGER_OPEN ){
+ pager_unlock(pPager);
+ }
+ assert( state==pPager->eState );
+ }
+ }else if( eMode==PAGER_JOURNALMODE_OFF ){
+ sqlite3OsClose(pPager->jfd);
+ }
+ }
+
+ /* Return the new journal mode */
+ return (int)pPager->journalMode;
+}
+
+/*
+** Return the current journal mode.
+*/
+SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){
+ return (int)pPager->journalMode;
+}
+
+/*
+** Return TRUE if the pager is in a state where it is OK to change the
+** journalmode. Journalmode changes can only happen when the database
+** is unmodified.
+*/
+SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){
+ assert( assert_pager_state(pPager) );
+ if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0;
+ if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0;
+ return 1;
+}
+
+/*
+** Get/set the size-limit used for persistent journal files.
+**
+** Setting the size limit to -1 means no limit is enforced.
+** An attempt to set a limit smaller than -1 is a no-op.
+*/
+SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *pPager, i64 iLimit){
+ if( iLimit>=-1 ){
+ pPager->journalSizeLimit = iLimit;
+ sqlite3WalLimit(pPager->pWal, iLimit);
+ }
+ return pPager->journalSizeLimit;
+}
+
+/*
+** Return a pointer to the pPager->pBackup variable. The backup module
+** in backup.c maintains the content of this variable. This module
+** uses it opaquely as an argument to sqlite3BackupRestart() and
+** sqlite3BackupUpdate() only.
+*/
+SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){
+ return &pPager->pBackup;
+}
+
+#ifndef SQLITE_OMIT_VACUUM
+/*
+** Unless this is an in-memory or temporary database, clear the pager cache.
+*/
+SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){
+ assert( MEMDB==0 || pPager->tempFile );
+ if( pPager->tempFile==0 ) pager_reset(pPager);
+}
+#endif
+
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** This function is called when the user invokes "PRAGMA wal_checkpoint",
+** "PRAGMA wal_blocking_checkpoint" or calls the sqlite3_wal_checkpoint()
+** or wal_blocking_checkpoint() API functions.
+**
+** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART.
+*/
+SQLITE_PRIVATE int sqlite3PagerCheckpoint(
+ Pager *pPager, /* Checkpoint on this pager */
+ sqlite3 *db, /* Db handle used to check for interrupts */
+ int eMode, /* Type of checkpoint */
+ int *pnLog, /* OUT: Final number of frames in log */
+ int *pnCkpt /* OUT: Final number of checkpointed frames */
+){
+ int rc = SQLITE_OK;
+ if( pPager->pWal ){
+ rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode,
+ (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler),
+ pPager->pBusyHandlerArg,
+ pPager->walSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace,
+ pnLog, pnCkpt
+ );
+ }
+ return rc;
+}
+
+SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager){
+ return sqlite3WalCallback(pPager->pWal);
+}
+
+/*
+** Return true if the underlying VFS for the given pager supports the
+** primitives necessary for write-ahead logging.
+*/
+SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){
+ const sqlite3_io_methods *pMethods = pPager->fd->pMethods;
+ if( pPager->noLock ) return 0;
+ return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap);
+}
+
+/*
+** Attempt to take an exclusive lock on the database file. If a PENDING lock
+** is obtained instead, immediately release it.
+*/
+static int pagerExclusiveLock(Pager *pPager){
+ int rc; /* Return code */
+
+ assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK );
+ rc = pagerLockDb(pPager, EXCLUSIVE_LOCK);
+ if( rc!=SQLITE_OK ){
+ /* If the attempt to grab the exclusive lock failed, release the
+ ** pending lock that may have been obtained instead. */
+ pagerUnlockDb(pPager, SHARED_LOCK);
+ }
+
+ return rc;
+}
+
+/*
+** Call sqlite3WalOpen() to open the WAL handle. If the pager is in
+** exclusive-locking mode when this function is called, take an EXCLUSIVE
+** lock on the database file and use heap-memory to store the wal-index
+** in. Otherwise, use the normal shared-memory.
+*/
+static int pagerOpenWal(Pager *pPager){
+ int rc = SQLITE_OK;
+
+ assert( pPager->pWal==0 && pPager->tempFile==0 );
+ assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK );
+
+ /* If the pager is already in exclusive-mode, the WAL module will use
+ ** heap-memory for the wal-index instead of the VFS shared-memory
+ ** implementation. Take the exclusive lock now, before opening the WAL
+ ** file, to make sure this is safe.
+ */
+ if( pPager->exclusiveMode ){
+ rc = pagerExclusiveLock(pPager);
+ }
+
+ /* Open the connection to the log file. If this operation fails,
+ ** (e.g. due to malloc() failure), return an error code.
+ */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3WalOpen(pPager->pVfs,
+ pPager->fd, pPager->zWal, pPager->exclusiveMode,
+ pPager->journalSizeLimit, &pPager->pWal
+ );
+ }
+ pagerFixMaplimit(pPager);
+
+ return rc;
+}
+
+
+/*
+** The caller must be holding a SHARED lock on the database file to call
+** this function.
+**
+** If the pager passed as the first argument is open on a real database
+** file (not a temp file or an in-memory database), and the WAL file
+** is not already open, make an attempt to open it now. If successful,
+** return SQLITE_OK. If an error occurs or the VFS used by the pager does
+** not support the xShmXXX() methods, return an error code. *pbOpen is
+** not modified in either case.
+**
+** If the pager is open on a temp-file (or in-memory database), or if
+** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK
+** without doing anything.
+*/
+SQLITE_PRIVATE int sqlite3PagerOpenWal(
+ Pager *pPager, /* Pager object */
+ int *pbOpen /* OUT: Set to true if call is a no-op */
+){
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( assert_pager_state(pPager) );
+ assert( pPager->eState==PAGER_OPEN || pbOpen );
+ assert( pPager->eState==PAGER_READER || !pbOpen );
+ assert( pbOpen==0 || *pbOpen==0 );
+ assert( pbOpen!=0 || (!pPager->tempFile && !pPager->pWal) );
+
+ if( !pPager->tempFile && !pPager->pWal ){
+ if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN;
+
+ /* Close any rollback journal previously open */
+ sqlite3OsClose(pPager->jfd);
+
+ rc = pagerOpenWal(pPager);
+ if( rc==SQLITE_OK ){
+ pPager->journalMode = PAGER_JOURNALMODE_WAL;
+ pPager->eState = PAGER_OPEN;
+ }
+ }else{
+ *pbOpen = 1;
+ }
+
+ return rc;
+}
+
+/*
+** This function is called to close the connection to the log file prior
+** to switching from WAL to rollback mode.
+**
+** Before closing the log file, this function attempts to take an
+** EXCLUSIVE lock on the database file. If this cannot be obtained, an
+** error (SQLITE_BUSY) is returned and the log connection is not closed.
+** If successful, the EXCLUSIVE lock is not released before returning.
+*/
+SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3 *db){
+ int rc = SQLITE_OK;
+
+ assert( pPager->journalMode==PAGER_JOURNALMODE_WAL );
+
+ /* If the log file is not already open, but does exist in the file-system,
+ ** it may need to be checkpointed before the connection can switch to
+ ** rollback mode. Open it now so this can happen.
+ */
+ if( !pPager->pWal ){
+ int logexists = 0;
+ rc = pagerLockDb(pPager, SHARED_LOCK);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsAccess(
+ pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists
+ );
+ }
+ if( rc==SQLITE_OK && logexists ){
+ rc = pagerOpenWal(pPager);
+ }
+ }
+
+ /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on
+ ** the database file, the log and log-summary files will be deleted.
+ */
+ if( rc==SQLITE_OK && pPager->pWal ){
+ rc = pagerExclusiveLock(pPager);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3WalClose(pPager->pWal, db, pPager->walSyncFlags,
+ pPager->pageSize, (u8*)pPager->pTmpSpace);
+ pPager->pWal = 0;
+ pagerFixMaplimit(pPager);
+ if( rc && !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK);
+ }
+ }
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+/*
+** If pager pPager is a wal-mode database not in exclusive locking mode,
+** invoke the sqlite3WalWriteLock() function on the associated Wal object
+** with the same db and bLock parameters as were passed to this function.
+** Return an SQLite error code if an error occurs, or SQLITE_OK otherwise.
+*/
+SQLITE_PRIVATE int sqlite3PagerWalWriteLock(Pager *pPager, int bLock){
+ int rc = SQLITE_OK;
+ if( pagerUseWal(pPager) && pPager->exclusiveMode==0 ){
+ rc = sqlite3WalWriteLock(pPager->pWal, bLock);
+ }
+ return rc;
+}
+
+/*
+** Set the database handle used by the wal layer to determine if
+** blocking locks are required.
+*/
+SQLITE_PRIVATE void sqlite3PagerWalDb(Pager *pPager, sqlite3 *db){
+ if( pagerUseWal(pPager) ){
+ sqlite3WalDb(pPager->pWal, db);
+ }
+}
+#endif
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+/*
+** If this is a WAL database, obtain a snapshot handle for the snapshot
+** currently open. Otherwise, return an error.
+*/
+SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot){
+ int rc = SQLITE_ERROR;
+ if( pPager->pWal ){
+ rc = sqlite3WalSnapshotGet(pPager->pWal, ppSnapshot);
+ }
+ return rc;
+}
+
+/*
+** If this is a WAL database, store a pointer to pSnapshot. Next time a
+** read transaction is opened, attempt to read from the snapshot it
+** identifies. If this is not a WAL database, return an error.
+*/
+SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(
+ Pager *pPager,
+ sqlite3_snapshot *pSnapshot
+){
+ int rc = SQLITE_OK;
+ if( pPager->pWal ){
+ sqlite3WalSnapshotOpen(pPager->pWal, pSnapshot);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
+
+/*
+** If this is a WAL database, call sqlite3WalSnapshotRecover(). If this
+** is not a WAL database, return an error.
+*/
+SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager){
+ int rc;
+ if( pPager->pWal ){
+ rc = sqlite3WalSnapshotRecover(pPager->pWal);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
+
+/*
+** The caller currently has a read transaction open on the database.
+** If this is not a WAL database, SQLITE_ERROR is returned. Otherwise,
+** this function takes a SHARED lock on the CHECKPOINTER slot and then
+** checks if the snapshot passed as the second argument is still
+** available. If so, SQLITE_OK is returned.
+**
+** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
+** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
+** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
+** lock is released before returning.
+*/
+SQLITE_PRIVATE int sqlite3PagerSnapshotCheck(Pager *pPager, sqlite3_snapshot *pSnapshot){
+ int rc;
+ if( pPager->pWal ){
+ rc = sqlite3WalSnapshotCheck(pPager->pWal, pSnapshot);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
+
+/*
+** Release a lock obtained by an earlier successful call to
+** sqlite3PagerSnapshotCheck().
+*/
+SQLITE_PRIVATE void sqlite3PagerSnapshotUnlock(Pager *pPager){
+ assert( pPager->pWal );
+ sqlite3WalSnapshotUnlock(pPager->pWal);
+}
+
+#endif /* SQLITE_ENABLE_SNAPSHOT */
+#endif /* !SQLITE_OMIT_WAL */
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+/*
+** A read-lock must be held on the pager when this function is called. If
+** the pager is in WAL mode and the WAL file currently contains one or more
+** frames, return the size in bytes of the page images stored within the
+** WAL frames. Otherwise, if this is not a WAL database or the WAL file
+** is empty, return 0.
+*/
+SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){
+ assert( pPager->eState>=PAGER_READER );
+ return sqlite3WalFramesize(pPager->pWal);
+}
+#endif
+
+#endif /* SQLITE_OMIT_DISKIO */
+
+/************** End of pager.c ***********************************************/
+/************** Begin file wal.c *********************************************/
+/*
+** 2010 February 1
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains the implementation of a write-ahead log (WAL) used in
+** "journal_mode=WAL" mode.
+**
+** WRITE-AHEAD LOG (WAL) FILE FORMAT
+**
+** A WAL file consists of a header followed by zero or more "frames".
+** Each frame records the revised content of a single page from the
+** database file. All changes to the database are recorded by writing
+** frames into the WAL. Transactions commit when a frame is written that
+** contains a commit marker. A single WAL can and usually does record
+** multiple transactions. Periodically, the content of the WAL is
+** transferred back into the database file in an operation called a
+** "checkpoint".
+**
+** A single WAL file can be used multiple times. In other words, the
+** WAL can fill up with frames and then be checkpointed and then new
+** frames can overwrite the old ones. A WAL always grows from beginning
+** toward the end. Checksums and counters attached to each frame are
+** used to determine which frames within the WAL are valid and which
+** are leftovers from prior checkpoints.
+**
+** The WAL header is 32 bytes in size and consists of the following eight
+** big-endian 32-bit unsigned integer values:
+**
+** 0: Magic number. 0x377f0682 or 0x377f0683
+** 4: File format version. Currently 3007000
+** 8: Database page size. Example: 1024
+** 12: Checkpoint sequence number
+** 16: Salt-1, random integer incremented with each checkpoint
+** 20: Salt-2, a different random integer changing with each ckpt
+** 24: Checksum-1 (first part of checksum for first 24 bytes of header).
+** 28: Checksum-2 (second part of checksum for first 24 bytes of header).
+**
+** Immediately following the wal-header are zero or more frames. Each
+** frame consists of a 24-byte frame-header followed by a <page-size> bytes
+** of page data. The frame-header is six big-endian 32-bit unsigned
+** integer values, as follows:
+**
+** 0: Page number.
+** 4: For commit records, the size of the database image in pages
+** after the commit. For all other records, zero.
+** 8: Salt-1 (copied from the header)
+** 12: Salt-2 (copied from the header)
+** 16: Checksum-1.
+** 20: Checksum-2.
+**
+** A frame is considered valid if and only if the following conditions are
+** true:
+**
+** (1) The salt-1 and salt-2 values in the frame-header match
+** salt values in the wal-header
+**
+** (2) The checksum values in the final 8 bytes of the frame-header
+** exactly match the checksum computed consecutively on the
+** WAL header and the first 8 bytes and the content of all frames
+** up to and including the current frame.
+**
+** The checksum is computed using 32-bit big-endian integers if the
+** magic number in the first 4 bytes of the WAL is 0x377f0683 and it
+** is computed using little-endian if the magic number is 0x377f0682.
+** The checksum values are always stored in the frame header in a
+** big-endian format regardless of which byte order is used to compute
+** the checksum. The checksum is computed by interpreting the input as
+** an even number of unsigned 32-bit integers: x[0] through x[N]. The
+** algorithm used for the checksum is as follows:
+**
+** for i from 0 to n-1 step 2:
+** s0 += x[i] + s1;
+** s1 += x[i+1] + s0;
+** endfor
+**
+** Note that s0 and s1 are both weighted checksums using fibonacci weights
+** in reverse order (the largest fibonacci weight occurs on the first element
+** of the sequence being summed.) The s1 value spans all 32-bit
+** terms of the sequence whereas s0 omits the final term.
+**
+** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the
+** WAL is transferred into the database, then the database is VFS.xSync-ed.
+** The VFS.xSync operations serve as write barriers - all writes launched
+** before the xSync must complete before any write that launches after the
+** xSync begins.
+**
+** After each checkpoint, the salt-1 value is incremented and the salt-2
+** value is randomized. This prevents old and new frames in the WAL from
+** being considered valid at the same time and being checkpointing together
+** following a crash.
+**
+** READER ALGORITHM
+**
+** To read a page from the database (call it page number P), a reader
+** first checks the WAL to see if it contains page P. If so, then the
+** last valid instance of page P that is a followed by a commit frame
+** or is a commit frame itself becomes the value read. If the WAL
+** contains no copies of page P that are valid and which are a commit
+** frame or are followed by a commit frame, then page P is read from
+** the database file.
+**
+** To start a read transaction, the reader records the index of the last
+** valid frame in the WAL. The reader uses this recorded "mxFrame" value
+** for all subsequent read operations. New transactions can be appended
+** to the WAL, but as long as the reader uses its original mxFrame value
+** and ignores the newly appended content, it will see a consistent snapshot
+** of the database from a single point in time. This technique allows
+** multiple concurrent readers to view different versions of the database
+** content simultaneously.
+**
+** The reader algorithm in the previous paragraphs works correctly, but
+** because frames for page P can appear anywhere within the WAL, the
+** reader has to scan the entire WAL looking for page P frames. If the
+** WAL is large (multiple megabytes is typical) that scan can be slow,
+** and read performance suffers. To overcome this problem, a separate
+** data structure called the wal-index is maintained to expedite the
+** search for frames of a particular page.
+**
+** WAL-INDEX FORMAT
+**
+** Conceptually, the wal-index is shared memory, though VFS implementations
+** might choose to implement the wal-index using a mmapped file. Because
+** the wal-index is shared memory, SQLite does not support journal_mode=WAL
+** on a network filesystem. All users of the database must be able to
+** share memory.
+**
+** In the default unix and windows implementation, the wal-index is a mmapped
+** file whose name is the database name with a "-shm" suffix added. For that
+** reason, the wal-index is sometimes called the "shm" file.
+**
+** The wal-index is transient. After a crash, the wal-index can (and should
+** be) reconstructed from the original WAL file. In fact, the VFS is required
+** to either truncate or zero the header of the wal-index when the last
+** connection to it closes. Because the wal-index is transient, it can
+** use an architecture-specific format; it does not have to be cross-platform.
+** Hence, unlike the database and WAL file formats which store all values
+** as big endian, the wal-index can store multi-byte values in the native
+** byte order of the host computer.
+**
+** The purpose of the wal-index is to answer this question quickly: Given
+** a page number P and a maximum frame index M, return the index of the
+** last frame in the wal before frame M for page P in the WAL, or return
+** NULL if there are no frames for page P in the WAL prior to M.
+**
+** The wal-index consists of a header region, followed by an one or
+** more index blocks.
+**
+** The wal-index header contains the total number of frames within the WAL
+** in the mxFrame field.
+**
+** Each index block except for the first contains information on
+** HASHTABLE_NPAGE frames. The first index block contains information on
+** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and
+** HASHTABLE_NPAGE are selected so that together the wal-index header and
+** first index block are the same size as all other index blocks in the
+** wal-index.
+**
+** Each index block contains two sections, a page-mapping that contains the
+** database page number associated with each wal frame, and a hash-table
+** that allows readers to query an index block for a specific page number.
+** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE
+** for the first index block) 32-bit page numbers. The first entry in the
+** first index-block contains the database page number corresponding to the
+** first frame in the WAL file. The first entry in the second index block
+** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in
+** the log, and so on.
+**
+** The last index block in a wal-index usually contains less than the full
+** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers,
+** depending on the contents of the WAL file. This does not change the
+** allocated size of the page-mapping array - the page-mapping array merely
+** contains unused entries.
+**
+** Even without using the hash table, the last frame for page P
+** can be found by scanning the page-mapping sections of each index block
+** starting with the last index block and moving toward the first, and
+** within each index block, starting at the end and moving toward the
+** beginning. The first entry that equals P corresponds to the frame
+** holding the content for that page.
+**
+** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers.
+** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the
+** hash table for each page number in the mapping section, so the hash
+** table is never more than half full. The expected number of collisions
+** prior to finding a match is 1. Each entry of the hash table is an
+** 1-based index of an entry in the mapping section of the same
+** index block. Let K be the 1-based index of the largest entry in
+** the mapping section. (For index blocks other than the last, K will
+** always be exactly HASHTABLE_NPAGE (4096) and for the last index block
+** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table
+** contain a value of 0.
+**
+** To look for page P in the hash table, first compute a hash iKey on
+** P as follows:
+**
+** iKey = (P * 383) % HASHTABLE_NSLOT
+**
+** Then start scanning entries of the hash table, starting with iKey
+** (wrapping around to the beginning when the end of the hash table is
+** reached) until an unused hash slot is found. Let the first unused slot
+** be at index iUnused. (iUnused might be less than iKey if there was
+** wrap-around.) Because the hash table is never more than half full,
+** the search is guaranteed to eventually hit an unused entry. Let
+** iMax be the value between iKey and iUnused, closest to iUnused,
+** where aHash[iMax]==P. If there is no iMax entry (if there exists
+** no hash slot such that aHash[i]==p) then page P is not in the
+** current index block. Otherwise the iMax-th mapping entry of the
+** current index block corresponds to the last entry that references
+** page P.
+**
+** A hash search begins with the last index block and moves toward the
+** first index block, looking for entries corresponding to page P. On
+** average, only two or three slots in each index block need to be
+** examined in order to either find the last entry for page P, or to
+** establish that no such entry exists in the block. Each index block
+** holds over 4000 entries. So two or three index blocks are sufficient
+** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10
+** comparisons (on average) suffice to either locate a frame in the
+** WAL or to establish that the frame does not exist in the WAL. This
+** is much faster than scanning the entire 10MB WAL.
+**
+** Note that entries are added in order of increasing K. Hence, one
+** reader might be using some value K0 and a second reader that started
+** at a later time (after additional transactions were added to the WAL
+** and to the wal-index) might be using a different value K1, where K1>K0.
+** Both readers can use the same hash table and mapping section to get
+** the correct result. There may be entries in the hash table with
+** K>K0 but to the first reader, those entries will appear to be unused
+** slots in the hash table and so the first reader will get an answer as
+** if no values greater than K0 had ever been inserted into the hash table
+** in the first place - which is what reader one wants. Meanwhile, the
+** second reader using K1 will see additional values that were inserted
+** later, which is exactly what reader two wants.
+**
+** When a rollback occurs, the value of K is decreased. Hash table entries
+** that correspond to frames greater than the new K value are removed
+** from the hash table at this point.
+*/
+#ifndef SQLITE_OMIT_WAL
+
+/* #include "wal.h" */
+
+/*
+** Trace output macros
+*/
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+SQLITE_PRIVATE int sqlite3WalTrace = 0;
+# define WALTRACE(X) if(sqlite3WalTrace) sqlite3DebugPrintf X
+#else
+# define WALTRACE(X)
+#endif
+
+/*
+** The maximum (and only) versions of the wal and wal-index formats
+** that may be interpreted by this version of SQLite.
+**
+** If a client begins recovering a WAL file and finds that (a) the checksum
+** values in the wal-header are correct and (b) the version field is not
+** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN.
+**
+** Similarly, if a client successfully reads a wal-index header (i.e. the
+** checksum test is successful) and finds that the version field is not
+** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite
+** returns SQLITE_CANTOPEN.
+*/
+#define WAL_MAX_VERSION 3007000
+#define WALINDEX_MAX_VERSION 3007000
+
+/*
+** Index numbers for various locking bytes. WAL_NREADER is the number
+** of available reader locks and should be at least 3. The default
+** is SQLITE_SHM_NLOCK==8 and WAL_NREADER==5.
+**
+** Technically, the various VFSes are free to implement these locks however
+** they see fit. However, compatibility is encouraged so that VFSes can
+** interoperate. The standard implemention used on both unix and windows
+** is for the index number to indicate a byte offset into the
+** WalCkptInfo.aLock[] array in the wal-index header. In other words, all
+** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which
+** should be 120) is the location in the shm file for the first locking
+** byte.
+*/
+#define WAL_WRITE_LOCK 0
+#define WAL_ALL_BUT_WRITE 1
+#define WAL_CKPT_LOCK 1
+#define WAL_RECOVER_LOCK 2
+#define WAL_READ_LOCK(I) (3+(I))
+#define WAL_NREADER (SQLITE_SHM_NLOCK-3)
+
+
+/* Object declarations */
+typedef struct WalIndexHdr WalIndexHdr;
+typedef struct WalIterator WalIterator;
+typedef struct WalCkptInfo WalCkptInfo;
+
+
+/*
+** The following object holds a copy of the wal-index header content.
+**
+** The actual header in the wal-index consists of two copies of this
+** object followed by one instance of the WalCkptInfo object.
+** For all versions of SQLite through 3.10.0 and probably beyond,
+** the locking bytes (WalCkptInfo.aLock) start at offset 120 and
+** the total header size is 136 bytes.
+**
+** The szPage value can be any power of 2 between 512 and 32768, inclusive.
+** Or it can be 1 to represent a 65536-byte page. The latter case was
+** added in 3.7.1 when support for 64K pages was added.
+*/
+struct WalIndexHdr {
+ u32 iVersion; /* Wal-index version */
+ u32 unused; /* Unused (padding) field */
+ u32 iChange; /* Counter incremented each transaction */
+ u8 isInit; /* 1 when initialized */
+ u8 bigEndCksum; /* True if checksums in WAL are big-endian */
+ u16 szPage; /* Database page size in bytes. 1==64K */
+ u32 mxFrame; /* Index of last valid frame in the WAL */
+ u32 nPage; /* Size of database in pages */
+ u32 aFrameCksum[2]; /* Checksum of last frame in log */
+ u32 aSalt[2]; /* Two salt values copied from WAL header */
+ u32 aCksum[2]; /* Checksum over all prior fields */
+};
+
+/*
+** A copy of the following object occurs in the wal-index immediately
+** following the second copy of the WalIndexHdr. This object stores
+** information used by checkpoint.
+**
+** nBackfill is the number of frames in the WAL that have been written
+** back into the database. (We call the act of moving content from WAL to
+** database "backfilling".) The nBackfill number is never greater than
+** WalIndexHdr.mxFrame. nBackfill can only be increased by threads
+** holding the WAL_CKPT_LOCK lock (which includes a recovery thread).
+** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from
+** mxFrame back to zero when the WAL is reset.
+**
+** nBackfillAttempted is the largest value of nBackfill that a checkpoint
+** has attempted to achieve. Normally nBackfill==nBackfillAtempted, however
+** the nBackfillAttempted is set before any backfilling is done and the
+** nBackfill is only set after all backfilling completes. So if a checkpoint
+** crashes, nBackfillAttempted might be larger than nBackfill. The
+** WalIndexHdr.mxFrame must never be less than nBackfillAttempted.
+**
+** The aLock[] field is a set of bytes used for locking. These bytes should
+** never be read or written.
+**
+** There is one entry in aReadMark[] for each reader lock. If a reader
+** holds read-lock K, then the value in aReadMark[K] is no greater than
+** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff)
+** for any aReadMark[] means that entry is unused. aReadMark[0] is
+** a special case; its value is never used and it exists as a place-holder
+** to avoid having to offset aReadMark[] indexs by one. Readers holding
+** WAL_READ_LOCK(0) always ignore the entire WAL and read all content
+** directly from the database.
+**
+** The value of aReadMark[K] may only be changed by a thread that
+** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of
+** aReadMark[K] cannot changed while there is a reader is using that mark
+** since the reader will be holding a shared lock on WAL_READ_LOCK(K).
+**
+** The checkpointer may only transfer frames from WAL to database where
+** the frame numbers are less than or equal to every aReadMark[] that is
+** in use (that is, every aReadMark[j] for which there is a corresponding
+** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the
+** largest value and will increase an unused aReadMark[] to mxFrame if there
+** is not already an aReadMark[] equal to mxFrame. The exception to the
+** previous sentence is when nBackfill equals mxFrame (meaning that everything
+** in the WAL has been backfilled into the database) then new readers
+** will choose aReadMark[0] which has value 0 and hence such reader will
+** get all their all content directly from the database file and ignore
+** the WAL.
+**
+** Writers normally append new frames to the end of the WAL. However,
+** if nBackfill equals mxFrame (meaning that all WAL content has been
+** written back into the database) and if no readers are using the WAL
+** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then
+** the writer will first "reset" the WAL back to the beginning and start
+** writing new content beginning at frame 1.
+**
+** We assume that 32-bit loads are atomic and so no locks are needed in
+** order to read from any aReadMark[] entries.
+*/
+struct WalCkptInfo {
+ u32 nBackfill; /* Number of WAL frames backfilled into DB */
+ u32 aReadMark[WAL_NREADER]; /* Reader marks */
+ u8 aLock[SQLITE_SHM_NLOCK]; /* Reserved space for locks */
+ u32 nBackfillAttempted; /* WAL frames perhaps written, or maybe not */
+ u32 notUsed0; /* Available for future enhancements */
+};
+#define READMARK_NOT_USED 0xffffffff
+
+
+/* A block of WALINDEX_LOCK_RESERVED bytes beginning at
+** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems
+** only support mandatory file-locks, we do not read or write data
+** from the region of the file on which locks are applied.
+*/
+#define WALINDEX_LOCK_OFFSET (sizeof(WalIndexHdr)*2+offsetof(WalCkptInfo,aLock))
+#define WALINDEX_HDR_SIZE (sizeof(WalIndexHdr)*2+sizeof(WalCkptInfo))
+
+/* Size of header before each frame in wal */
+#define WAL_FRAME_HDRSIZE 24
+
+/* Size of write ahead log header, including checksum. */
+#define WAL_HDRSIZE 32
+
+/* WAL magic value. Either this value, or the same value with the least
+** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit
+** big-endian format in the first 4 bytes of a WAL file.
+**
+** If the LSB is set, then the checksums for each frame within the WAL
+** file are calculated by treating all data as an array of 32-bit
+** big-endian words. Otherwise, they are calculated by interpreting
+** all data as 32-bit little-endian words.
+*/
+#define WAL_MAGIC 0x377f0682
+
+/*
+** Return the offset of frame iFrame in the write-ahead log file,
+** assuming a database page size of szPage bytes. The offset returned
+** is to the start of the write-ahead log frame-header.
+*/
+#define walFrameOffset(iFrame, szPage) ( \
+ WAL_HDRSIZE + ((iFrame)-1)*(i64)((szPage)+WAL_FRAME_HDRSIZE) \
+)
+
+/*
+** An open write-ahead log file is represented by an instance of the
+** following object.
+*/
+struct Wal {
+ sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */
+ sqlite3_file *pDbFd; /* File handle for the database file */
+ sqlite3_file *pWalFd; /* File handle for WAL file */
+ u32 iCallback; /* Value to pass to log callback (or 0) */
+ i64 mxWalSize; /* Truncate WAL to this size upon reset */
+ int nWiData; /* Size of array apWiData */
+ int szFirstBlock; /* Size of first block written to WAL file */
+ volatile u32 **apWiData; /* Pointer to wal-index content in memory */
+ u32 szPage; /* Database page size */
+ i16 readLock; /* Which read lock is being held. -1 for none */
+ u8 syncFlags; /* Flags to use to sync header writes */
+ u8 exclusiveMode; /* Non-zero if connection is in exclusive mode */
+ u8 writeLock; /* True if in a write transaction */
+ u8 ckptLock; /* True if holding a checkpoint lock */
+ u8 readOnly; /* WAL_RDWR, WAL_RDONLY, or WAL_SHM_RDONLY */
+ u8 truncateOnCommit; /* True to truncate WAL file on commit */
+ u8 syncHeader; /* Fsync the WAL header if true */
+ u8 padToSectorBoundary; /* Pad transactions out to the next sector */
+ u8 bShmUnreliable; /* SHM content is read-only and unreliable */
+ WalIndexHdr hdr; /* Wal-index header for current transaction */
+ u32 minFrame; /* Ignore wal frames before this one */
+ u32 iReCksum; /* On commit, recalculate checksums from here */
+ const char *zWalName; /* Name of WAL file */
+ u32 nCkpt; /* Checkpoint sequence counter in the wal-header */
+#ifdef SQLITE_DEBUG
+ u8 lockError; /* True if a locking error has occurred */
+#endif
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ WalIndexHdr *pSnapshot; /* Start transaction here if not NULL */
+#endif
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ sqlite3 *db;
+#endif
+};
+
+/*
+** Candidate values for Wal.exclusiveMode.
+*/
+#define WAL_NORMAL_MODE 0
+#define WAL_EXCLUSIVE_MODE 1
+#define WAL_HEAPMEMORY_MODE 2
+
+/*
+** Possible values for WAL.readOnly
+*/
+#define WAL_RDWR 0 /* Normal read/write connection */
+#define WAL_RDONLY 1 /* The WAL file is readonly */
+#define WAL_SHM_RDONLY 2 /* The SHM file is readonly */
+
+/*
+** Each page of the wal-index mapping contains a hash-table made up of
+** an array of HASHTABLE_NSLOT elements of the following type.
+*/
+typedef u16 ht_slot;
+
+/*
+** This structure is used to implement an iterator that loops through
+** all frames in the WAL in database page order. Where two or more frames
+** correspond to the same database page, the iterator visits only the
+** frame most recently written to the WAL (in other words, the frame with
+** the largest index).
+**
+** The internals of this structure are only accessed by:
+**
+** walIteratorInit() - Create a new iterator,
+** walIteratorNext() - Step an iterator,
+** walIteratorFree() - Free an iterator.
+**
+** This functionality is used by the checkpoint code (see walCheckpoint()).
+*/
+struct WalIterator {
+ u32 iPrior; /* Last result returned from the iterator */
+ int nSegment; /* Number of entries in aSegment[] */
+ struct WalSegment {
+ int iNext; /* Next slot in aIndex[] not yet returned */
+ ht_slot *aIndex; /* i0, i1, i2... such that aPgno[iN] ascend */
+ u32 *aPgno; /* Array of page numbers. */
+ int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */
+ int iZero; /* Frame number associated with aPgno[0] */
+ } aSegment[1]; /* One for every 32KB page in the wal-index */
+};
+
+/*
+** Define the parameters of the hash tables in the wal-index file. There
+** is a hash-table following every HASHTABLE_NPAGE page numbers in the
+** wal-index.
+**
+** Changing any of these constants will alter the wal-index format and
+** create incompatibilities.
+*/
+#define HASHTABLE_NPAGE 4096 /* Must be power of 2 */
+#define HASHTABLE_HASH_1 383 /* Should be prime */
+#define HASHTABLE_NSLOT (HASHTABLE_NPAGE*2) /* Must be a power of 2 */
+
+/*
+** The block of page numbers associated with the first hash-table in a
+** wal-index is smaller than usual. This is so that there is a complete
+** hash-table on each aligned 32KB page of the wal-index.
+*/
+#define HASHTABLE_NPAGE_ONE (HASHTABLE_NPAGE - (WALINDEX_HDR_SIZE/sizeof(u32)))
+
+/* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */
+#define WALINDEX_PGSZ ( \
+ sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \
+)
+
+/*
+** Obtain a pointer to the iPage'th page of the wal-index. The wal-index
+** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are
+** numbered from zero.
+**
+** If the wal-index is currently smaller the iPage pages then the size
+** of the wal-index might be increased, but only if it is safe to do
+** so. It is safe to enlarge the wal-index if pWal->writeLock is true
+** or pWal->exclusiveMode==WAL_HEAPMEMORY_MODE.
+**
+** If this call is successful, *ppPage is set to point to the wal-index
+** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs,
+** then an SQLite error code is returned and *ppPage is set to 0.
+*/
+static SQLITE_NOINLINE int walIndexPageRealloc(
+ Wal *pWal, /* The WAL context */
+ int iPage, /* The page we seek */
+ volatile u32 **ppPage /* Write the page pointer here */
+){
+ int rc = SQLITE_OK;
+
+ /* Enlarge the pWal->apWiData[] array if required */
+ if( pWal->nWiData<=iPage ){
+ sqlite3_int64 nByte = sizeof(u32*)*(iPage+1);
+ volatile u32 **apNew;
+ apNew = (volatile u32 **)sqlite3Realloc((void *)pWal->apWiData, nByte);
+ if( !apNew ){
+ *ppPage = 0;
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset((void*)&apNew[pWal->nWiData], 0,
+ sizeof(u32*)*(iPage+1-pWal->nWiData));
+ pWal->apWiData = apNew;
+ pWal->nWiData = iPage+1;
+ }
+
+ /* Request a pointer to the required page from the VFS */
+ assert( pWal->apWiData[iPage]==0 );
+ if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){
+ pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ);
+ if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM_BKPT;
+ }else{
+ rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ,
+ pWal->writeLock, (void volatile **)&pWal->apWiData[iPage]
+ );
+ assert( pWal->apWiData[iPage]!=0 || rc!=SQLITE_OK || pWal->writeLock==0 );
+ testcase( pWal->apWiData[iPage]==0 && rc==SQLITE_OK );
+ if( rc==SQLITE_OK ){
+ if( iPage>0 && sqlite3FaultSim(600) ) rc = SQLITE_NOMEM;
+ }else if( (rc&0xff)==SQLITE_READONLY ){
+ pWal->readOnly |= WAL_SHM_RDONLY;
+ if( rc==SQLITE_READONLY ){
+ rc = SQLITE_OK;
+ }
+ }
+ }
+
+ *ppPage = pWal->apWiData[iPage];
+ assert( iPage==0 || *ppPage || rc!=SQLITE_OK );
+ return rc;
+}
+static int walIndexPage(
+ Wal *pWal, /* The WAL context */
+ int iPage, /* The page we seek */
+ volatile u32 **ppPage /* Write the page pointer here */
+){
+ if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){
+ return walIndexPageRealloc(pWal, iPage, ppPage);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return a pointer to the WalCkptInfo structure in the wal-index.
+*/
+static volatile WalCkptInfo *walCkptInfo(Wal *pWal){
+ assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]);
+}
+
+/*
+** Return a pointer to the WalIndexHdr structure in the wal-index.
+*/
+static volatile WalIndexHdr *walIndexHdr(Wal *pWal){
+ assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ return (volatile WalIndexHdr*)pWal->apWiData[0];
+}
+
+/*
+** The argument to this macro must be of type u32. On a little-endian
+** architecture, it returns the u32 value that results from interpreting
+** the 4 bytes as a big-endian value. On a big-endian architecture, it
+** returns the value that would be produced by interpreting the 4 bytes
+** of the input value as a little-endian integer.
+*/
+#define BYTESWAP32(x) ( \
+ (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \
+ + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \
+)
+
+/*
+** Generate or extend an 8 byte checksum based on the data in
+** array aByte[] and the initial values of aIn[0] and aIn[1] (or
+** initial values of 0 and 0 if aIn==NULL).
+**
+** The checksum is written back into aOut[] before returning.
+**
+** nByte must be a positive multiple of 8.
+*/
+static void walChecksumBytes(
+ int nativeCksum, /* True for native byte-order, false for non-native */
+ u8 *a, /* Content to be checksummed */
+ int nByte, /* Bytes of content in a[]. Must be a multiple of 8. */
+ const u32 *aIn, /* Initial checksum value input */
+ u32 *aOut /* OUT: Final checksum value output */
+){
+ u32 s1, s2;
+ u32 *aData = (u32 *)a;
+ u32 *aEnd = (u32 *)&a[nByte];
+
+ if( aIn ){
+ s1 = aIn[0];
+ s2 = aIn[1];
+ }else{
+ s1 = s2 = 0;
+ }
+
+ assert( nByte>=8 );
+ assert( (nByte&0x00000007)==0 );
+ assert( nByte<=65536 );
+
+ if( nativeCksum ){
+ do {
+ s1 += *aData++ + s2;
+ s2 += *aData++ + s1;
+ }while( aData<aEnd );
+ }else{
+ do {
+ s1 += BYTESWAP32(aData[0]) + s2;
+ s2 += BYTESWAP32(aData[1]) + s1;
+ aData += 2;
+ }while( aData<aEnd );
+ }
+
+ aOut[0] = s1;
+ aOut[1] = s2;
+}
+
+/*
+** If there is the possibility of concurrent access to the SHM file
+** from multiple threads and/or processes, then do a memory barrier.
+*/
+static void walShmBarrier(Wal *pWal){
+ if( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE ){
+ sqlite3OsShmBarrier(pWal->pDbFd);
+ }
+}
+
+/*
+** Add the SQLITE_NO_TSAN as part of the return-type of a function
+** definition as a hint that the function contains constructs that
+** might give false-positive TSAN warnings.
+**
+** See tag-20200519-1.
+*/
+#if defined(__clang__) && !defined(SQLITE_NO_TSAN)
+# define SQLITE_NO_TSAN __attribute__((no_sanitize_thread))
+#else
+# define SQLITE_NO_TSAN
+#endif
+
+/*
+** Write the header information in pWal->hdr into the wal-index.
+**
+** The checksum on pWal->hdr is updated before it is written.
+*/
+static SQLITE_NO_TSAN void walIndexWriteHdr(Wal *pWal){
+ volatile WalIndexHdr *aHdr = walIndexHdr(pWal);
+ const int nCksum = offsetof(WalIndexHdr, aCksum);
+
+ assert( pWal->writeLock );
+ pWal->hdr.isInit = 1;
+ pWal->hdr.iVersion = WALINDEX_MAX_VERSION;
+ walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum);
+ /* Possible TSAN false-positive. See tag-20200519-1 */
+ memcpy((void*)&aHdr[1], (const void*)&pWal->hdr, sizeof(WalIndexHdr));
+ walShmBarrier(pWal);
+ memcpy((void*)&aHdr[0], (const void*)&pWal->hdr, sizeof(WalIndexHdr));
+}
+
+/*
+** This function encodes a single frame header and writes it to a buffer
+** supplied by the caller. A frame-header is made up of a series of
+** 4-byte big-endian integers, as follows:
+**
+** 0: Page number.
+** 4: For commit records, the size of the database image in pages
+** after the commit. For all other records, zero.
+** 8: Salt-1 (copied from the wal-header)
+** 12: Salt-2 (copied from the wal-header)
+** 16: Checksum-1.
+** 20: Checksum-2.
+*/
+static void walEncodeFrame(
+ Wal *pWal, /* The write-ahead log */
+ u32 iPage, /* Database page number for frame */
+ u32 nTruncate, /* New db size (or 0 for non-commit frames) */
+ u8 *aData, /* Pointer to page data */
+ u8 *aFrame /* OUT: Write encoded frame here */
+){
+ int nativeCksum; /* True for native byte-order checksums */
+ u32 *aCksum = pWal->hdr.aFrameCksum;
+ assert( WAL_FRAME_HDRSIZE==24 );
+ sqlite3Put4byte(&aFrame[0], iPage);
+ sqlite3Put4byte(&aFrame[4], nTruncate);
+ if( pWal->iReCksum==0 ){
+ memcpy(&aFrame[8], pWal->hdr.aSalt, 8);
+
+ nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN);
+ walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum);
+ walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum);
+
+ sqlite3Put4byte(&aFrame[16], aCksum[0]);
+ sqlite3Put4byte(&aFrame[20], aCksum[1]);
+ }else{
+ memset(&aFrame[8], 0, 16);
+ }
+}
+
+/*
+** Check to see if the frame with header in aFrame[] and content
+** in aData[] is valid. If it is a valid frame, fill *piPage and
+** *pnTruncate and return true. Return if the frame is not valid.
+*/
+static int walDecodeFrame(
+ Wal *pWal, /* The write-ahead log */
+ u32 *piPage, /* OUT: Database page number for frame */
+ u32 *pnTruncate, /* OUT: New db size (or 0 if not commit) */
+ u8 *aData, /* Pointer to page data (for checksum) */
+ u8 *aFrame /* Frame data */
+){
+ int nativeCksum; /* True for native byte-order checksums */
+ u32 *aCksum = pWal->hdr.aFrameCksum;
+ u32 pgno; /* Page number of the frame */
+ assert( WAL_FRAME_HDRSIZE==24 );
+
+ /* A frame is only valid if the salt values in the frame-header
+ ** match the salt values in the wal-header.
+ */
+ if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){
+ return 0;
+ }
+
+ /* A frame is only valid if the page number is creater than zero.
+ */
+ pgno = sqlite3Get4byte(&aFrame[0]);
+ if( pgno==0 ){
+ return 0;
+ }
+
+ /* A frame is only valid if a checksum of the WAL header,
+ ** all prior frams, the first 16 bytes of this frame-header,
+ ** and the frame-data matches the checksum in the last 8
+ ** bytes of this frame-header.
+ */
+ nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN);
+ walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum);
+ walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum);
+ if( aCksum[0]!=sqlite3Get4byte(&aFrame[16])
+ || aCksum[1]!=sqlite3Get4byte(&aFrame[20])
+ ){
+ /* Checksum failed. */
+ return 0;
+ }
+
+ /* If we reach this point, the frame is valid. Return the page number
+ ** and the new database size.
+ */
+ *piPage = pgno;
+ *pnTruncate = sqlite3Get4byte(&aFrame[4]);
+ return 1;
+}
+
+
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+/*
+** Names of locks. This routine is used to provide debugging output and is not
+** a part of an ordinary build.
+*/
+static const char *walLockName(int lockIdx){
+ if( lockIdx==WAL_WRITE_LOCK ){
+ return "WRITE-LOCK";
+ }else if( lockIdx==WAL_CKPT_LOCK ){
+ return "CKPT-LOCK";
+ }else if( lockIdx==WAL_RECOVER_LOCK ){
+ return "RECOVER-LOCK";
+ }else{
+ static char zName[15];
+ sqlite3_snprintf(sizeof(zName), zName, "READ-LOCK[%d]",
+ lockIdx-WAL_READ_LOCK(0));
+ return zName;
+ }
+}
+#endif /*defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */
+
+
+/*
+** Set or release locks on the WAL. Locks are either shared or exclusive.
+** A lock cannot be moved directly between shared and exclusive - it must go
+** through the unlocked state first.
+**
+** In locking_mode=EXCLUSIVE, all of these routines become no-ops.
+*/
+static int walLockShared(Wal *pWal, int lockIdx){
+ int rc;
+ if( pWal->exclusiveMode ) return SQLITE_OK;
+ rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1,
+ SQLITE_SHM_LOCK | SQLITE_SHM_SHARED);
+ WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal,
+ walLockName(lockIdx), rc ? "failed" : "ok"));
+ VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+ return rc;
+}
+static void walUnlockShared(Wal *pWal, int lockIdx){
+ if( pWal->exclusiveMode ) return;
+ (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1,
+ SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED);
+ WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx)));
+}
+static int walLockExclusive(Wal *pWal, int lockIdx, int n){
+ int rc;
+ if( pWal->exclusiveMode ) return SQLITE_OK;
+ rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n,
+ SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE);
+ WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal,
+ walLockName(lockIdx), n, rc ? "failed" : "ok"));
+ VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+ return rc;
+}
+static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){
+ if( pWal->exclusiveMode ) return;
+ (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n,
+ SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE);
+ WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal,
+ walLockName(lockIdx), n));
+}
+
+/*
+** Compute a hash on a page number. The resulting hash value must land
+** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances
+** the hash to the next value in the event of a collision.
+*/
+static int walHash(u32 iPage){
+ assert( iPage>0 );
+ assert( (HASHTABLE_NSLOT & (HASHTABLE_NSLOT-1))==0 );
+ return (iPage*HASHTABLE_HASH_1) & (HASHTABLE_NSLOT-1);
+}
+static int walNextHash(int iPriorHash){
+ return (iPriorHash+1)&(HASHTABLE_NSLOT-1);
+}
+
+/*
+** An instance of the WalHashLoc object is used to describe the location
+** of a page hash table in the wal-index. This becomes the return value
+** from walHashGet().
+*/
+typedef struct WalHashLoc WalHashLoc;
+struct WalHashLoc {
+ volatile ht_slot *aHash; /* Start of the wal-index hash table */
+ volatile u32 *aPgno; /* aPgno[1] is the page of first frame indexed */
+ u32 iZero; /* One less than the frame number of first indexed*/
+};
+
+/*
+** Return pointers to the hash table and page number array stored on
+** page iHash of the wal-index. The wal-index is broken into 32KB pages
+** numbered starting from 0.
+**
+** Set output variable pLoc->aHash to point to the start of the hash table
+** in the wal-index file. Set pLoc->iZero to one less than the frame
+** number of the first frame indexed by this hash table. If a
+** slot in the hash table is set to N, it refers to frame number
+** (pLoc->iZero+N) in the log.
+**
+** Finally, set pLoc->aPgno so that pLoc->aPgno[1] is the page number of the
+** first frame indexed by the hash table, frame (pLoc->iZero+1).
+*/
+static int walHashGet(
+ Wal *pWal, /* WAL handle */
+ int iHash, /* Find the iHash'th table */
+ WalHashLoc *pLoc /* OUT: Hash table location */
+){
+ int rc; /* Return code */
+
+ rc = walIndexPage(pWal, iHash, &pLoc->aPgno);
+ assert( rc==SQLITE_OK || iHash>0 );
+
+ if( rc==SQLITE_OK ){
+ pLoc->aHash = (volatile ht_slot *)&pLoc->aPgno[HASHTABLE_NPAGE];
+ if( iHash==0 ){
+ pLoc->aPgno = &pLoc->aPgno[WALINDEX_HDR_SIZE/sizeof(u32)];
+ pLoc->iZero = 0;
+ }else{
+ pLoc->iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE;
+ }
+ pLoc->aPgno = &pLoc->aPgno[-1];
+ }
+ return rc;
+}
+
+/*
+** Return the number of the wal-index page that contains the hash-table
+** and page-number array that contain entries corresponding to WAL frame
+** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages
+** are numbered starting from 0.
+*/
+static int walFramePage(u32 iFrame){
+ int iHash = (iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1) / HASHTABLE_NPAGE;
+ assert( (iHash==0 || iFrame>HASHTABLE_NPAGE_ONE)
+ && (iHash>=1 || iFrame<=HASHTABLE_NPAGE_ONE)
+ && (iHash<=1 || iFrame>(HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE))
+ && (iHash>=2 || iFrame<=HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE)
+ && (iHash<=2 || iFrame>(HASHTABLE_NPAGE_ONE+2*HASHTABLE_NPAGE))
+ );
+ assert( iHash>=0 );
+ return iHash;
+}
+
+/*
+** Return the page number associated with frame iFrame in this WAL.
+*/
+static u32 walFramePgno(Wal *pWal, u32 iFrame){
+ int iHash = walFramePage(iFrame);
+ if( iHash==0 ){
+ return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1];
+ }
+ return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE];
+}
+
+/*
+** Remove entries from the hash table that point to WAL slots greater
+** than pWal->hdr.mxFrame.
+**
+** This function is called whenever pWal->hdr.mxFrame is decreased due
+** to a rollback or savepoint.
+**
+** At most only the hash table containing pWal->hdr.mxFrame needs to be
+** updated. Any later hash tables will be automatically cleared when
+** pWal->hdr.mxFrame advances to the point where those hash tables are
+** actually needed.
+*/
+static void walCleanupHash(Wal *pWal){
+ WalHashLoc sLoc; /* Hash table location */
+ int iLimit = 0; /* Zero values greater than this */
+ int nByte; /* Number of bytes to zero in aPgno[] */
+ int i; /* Used to iterate through aHash[] */
+ int rc; /* Return code form walHashGet() */
+
+ assert( pWal->writeLock );
+ testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 );
+ testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE );
+ testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 );
+
+ if( pWal->hdr.mxFrame==0 ) return;
+
+ /* Obtain pointers to the hash-table and page-number array containing
+ ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed
+ ** that the page said hash-table and array reside on is already mapped.(1)
+ */
+ assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) );
+ assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] );
+ rc = walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &sLoc);
+ if( NEVER(rc) ) return; /* Defense-in-depth, in case (1) above is wrong */
+
+ /* Zero all hash-table entries that correspond to frame numbers greater
+ ** than pWal->hdr.mxFrame.
+ */
+ iLimit = pWal->hdr.mxFrame - sLoc.iZero;
+ assert( iLimit>0 );
+ for(i=0; i<HASHTABLE_NSLOT; i++){
+ if( sLoc.aHash[i]>iLimit ){
+ sLoc.aHash[i] = 0;
+ }
+ }
+
+ /* Zero the entries in the aPgno array that correspond to frames with
+ ** frame numbers greater than pWal->hdr.mxFrame.
+ */
+ nByte = (int)((char *)sLoc.aHash - (char *)&sLoc.aPgno[iLimit+1]);
+ memset((void *)&sLoc.aPgno[iLimit+1], 0, nByte);
+
+#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
+ /* Verify that the every entry in the mapping region is still reachable
+ ** via the hash table even after the cleanup.
+ */
+ if( iLimit ){
+ int j; /* Loop counter */
+ int iKey; /* Hash key */
+ for(j=1; j<=iLimit; j++){
+ for(iKey=walHash(sLoc.aPgno[j]);sLoc.aHash[iKey];iKey=walNextHash(iKey)){
+ if( sLoc.aHash[iKey]==j ) break;
+ }
+ assert( sLoc.aHash[iKey]==j );
+ }
+ }
+#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */
+}
+
+
+/*
+** Set an entry in the wal-index that will map database page number
+** pPage into WAL frame iFrame.
+*/
+static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
+ int rc; /* Return code */
+ WalHashLoc sLoc; /* Wal-index hash table location */
+
+ rc = walHashGet(pWal, walFramePage(iFrame), &sLoc);
+
+ /* Assuming the wal-index file was successfully mapped, populate the
+ ** page number array and hash table entry.
+ */
+ if( rc==SQLITE_OK ){
+ int iKey; /* Hash table key */
+ int idx; /* Value to write to hash-table slot */
+ int nCollide; /* Number of hash collisions */
+
+ idx = iFrame - sLoc.iZero;
+ assert( idx <= HASHTABLE_NSLOT/2 + 1 );
+
+ /* If this is the first entry to be added to this hash-table, zero the
+ ** entire hash table and aPgno[] array before proceeding.
+ */
+ if( idx==1 ){
+ int nByte = (int)((u8 *)&sLoc.aHash[HASHTABLE_NSLOT]
+ - (u8 *)&sLoc.aPgno[1]);
+ memset((void*)&sLoc.aPgno[1], 0, nByte);
+ }
+
+ /* If the entry in aPgno[] is already set, then the previous writer
+ ** must have exited unexpectedly in the middle of a transaction (after
+ ** writing one or more dirty pages to the WAL to free up memory).
+ ** Remove the remnants of that writers uncommitted transaction from
+ ** the hash-table before writing any new entries.
+ */
+ if( sLoc.aPgno[idx] ){
+ walCleanupHash(pWal);
+ assert( !sLoc.aPgno[idx] );
+ }
+
+ /* Write the aPgno[] array entry and the hash-table slot. */
+ nCollide = idx;
+ for(iKey=walHash(iPage); sLoc.aHash[iKey]; iKey=walNextHash(iKey)){
+ if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT;
+ }
+ sLoc.aPgno[idx] = iPage;
+ AtomicStore(&sLoc.aHash[iKey], (ht_slot)idx);
+
+#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
+ /* Verify that the number of entries in the hash table exactly equals
+ ** the number of entries in the mapping region.
+ */
+ {
+ int i; /* Loop counter */
+ int nEntry = 0; /* Number of entries in the hash table */
+ for(i=0; i<HASHTABLE_NSLOT; i++){ if( sLoc.aHash[i] ) nEntry++; }
+ assert( nEntry==idx );
+ }
+
+ /* Verify that the every entry in the mapping region is reachable
+ ** via the hash table. This turns out to be a really, really expensive
+ ** thing to check, so only do this occasionally - not on every
+ ** iteration.
+ */
+ if( (idx&0x3ff)==0 ){
+ int i; /* Loop counter */
+ for(i=1; i<=idx; i++){
+ for(iKey=walHash(sLoc.aPgno[i]);
+ sLoc.aHash[iKey];
+ iKey=walNextHash(iKey)){
+ if( sLoc.aHash[iKey]==i ) break;
+ }
+ assert( sLoc.aHash[iKey]==i );
+ }
+ }
+#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */
+ }
+
+
+ return rc;
+}
+
+
+/*
+** Recover the wal-index by reading the write-ahead log file.
+**
+** This routine first tries to establish an exclusive lock on the
+** wal-index to prevent other threads/processes from doing anything
+** with the WAL or wal-index while recovery is running. The
+** WAL_RECOVER_LOCK is also held so that other threads will know
+** that this thread is running recovery. If unable to establish
+** the necessary locks, this routine returns SQLITE_BUSY.
+*/
+static int walIndexRecover(Wal *pWal){
+ int rc; /* Return Code */
+ i64 nSize; /* Size of log file */
+ u32 aFrameCksum[2] = {0, 0};
+ int iLock; /* Lock offset to lock for checkpoint */
+
+ /* Obtain an exclusive lock on all byte in the locking range not already
+ ** locked by the caller. The caller is guaranteed to have locked the
+ ** WAL_WRITE_LOCK byte, and may have also locked the WAL_CKPT_LOCK byte.
+ ** If successful, the same bytes that are locked here are unlocked before
+ ** this function returns.
+ */
+ assert( pWal->ckptLock==1 || pWal->ckptLock==0 );
+ assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 );
+ assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE );
+ assert( pWal->writeLock );
+ iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock;
+ rc = walLockExclusive(pWal, iLock, WAL_READ_LOCK(0)-iLock);
+ if( rc ){
+ return rc;
+ }
+
+ WALTRACE(("WAL%p: recovery begin...\n", pWal));
+
+ memset(&pWal->hdr, 0, sizeof(WalIndexHdr));
+
+ rc = sqlite3OsFileSize(pWal->pWalFd, &nSize);
+ if( rc!=SQLITE_OK ){
+ goto recovery_error;
+ }
+
+ if( nSize>WAL_HDRSIZE ){
+ u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */
+ u32 *aPrivate = 0; /* Heap copy of *-shm hash being populated */
+ u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */
+ int szFrame; /* Number of bytes in buffer aFrame[] */
+ u8 *aData; /* Pointer to data part of aFrame buffer */
+ int szPage; /* Page size according to the log */
+ u32 magic; /* Magic value read from WAL header */
+ u32 version; /* Magic value read from WAL header */
+ int isValid; /* True if this frame is valid */
+ u32 iPg; /* Current 32KB wal-index page */
+ u32 iLastFrame; /* Last frame in wal, based on nSize alone */
+
+ /* Read in the WAL header. */
+ rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0);
+ if( rc!=SQLITE_OK ){
+ goto recovery_error;
+ }
+
+ /* If the database page size is not a power of two, or is greater than
+ ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid
+ ** data. Similarly, if the 'magic' value is invalid, ignore the whole
+ ** WAL file.
+ */
+ magic = sqlite3Get4byte(&aBuf[0]);
+ szPage = sqlite3Get4byte(&aBuf[8]);
+ if( (magic&0xFFFFFFFE)!=WAL_MAGIC
+ || szPage&(szPage-1)
+ || szPage>SQLITE_MAX_PAGE_SIZE
+ || szPage<512
+ ){
+ goto finished;
+ }
+ pWal->hdr.bigEndCksum = (u8)(magic&0x00000001);
+ pWal->szPage = szPage;
+ pWal->nCkpt = sqlite3Get4byte(&aBuf[12]);
+ memcpy(&pWal->hdr.aSalt, &aBuf[16], 8);
+
+ /* Verify that the WAL header checksum is correct */
+ walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN,
+ aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum
+ );
+ if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24])
+ || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28])
+ ){
+ goto finished;
+ }
+
+ /* Verify that the version number on the WAL format is one that
+ ** are able to understand */
+ version = sqlite3Get4byte(&aBuf[4]);
+ if( version!=WAL_MAX_VERSION ){
+ rc = SQLITE_CANTOPEN_BKPT;
+ goto finished;
+ }
+
+ /* Malloc a buffer to read frames into. */
+ szFrame = szPage + WAL_FRAME_HDRSIZE;
+ aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ);
+ if( !aFrame ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto recovery_error;
+ }
+ aData = &aFrame[WAL_FRAME_HDRSIZE];
+ aPrivate = (u32*)&aData[szPage];
+
+ /* Read all frames from the log file. */
+ iLastFrame = (nSize - WAL_HDRSIZE) / szFrame;
+ for(iPg=0; iPg<=(u32)walFramePage(iLastFrame); iPg++){
+ u32 *aShare;
+ u32 iFrame; /* Index of last frame read */
+ u32 iLast = MIN(iLastFrame, HASHTABLE_NPAGE_ONE+iPg*HASHTABLE_NPAGE);
+ u32 iFirst = 1 + (iPg==0?0:HASHTABLE_NPAGE_ONE+(iPg-1)*HASHTABLE_NPAGE);
+ u32 nHdr, nHdr32;
+ rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare);
+ if( rc ) break;
+ pWal->apWiData[iPg] = aPrivate;
+
+ for(iFrame=iFirst; iFrame<=iLast; iFrame++){
+ i64 iOffset = walFrameOffset(iFrame, szPage);
+ u32 pgno; /* Database page number for frame */
+ u32 nTruncate; /* dbsize field from frame header */
+
+ /* Read and decode the next log frame. */
+ rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame);
+ if( !isValid ) break;
+ rc = walIndexAppend(pWal, iFrame, pgno);
+ if( NEVER(rc!=SQLITE_OK) ) break;
+
+ /* If nTruncate is non-zero, this is a commit record. */
+ if( nTruncate ){
+ pWal->hdr.mxFrame = iFrame;
+ pWal->hdr.nPage = nTruncate;
+ pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16));
+ testcase( szPage<=32768 );
+ testcase( szPage>=65536 );
+ aFrameCksum[0] = pWal->hdr.aFrameCksum[0];
+ aFrameCksum[1] = pWal->hdr.aFrameCksum[1];
+ }
+ }
+ pWal->apWiData[iPg] = aShare;
+ nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0);
+ nHdr32 = nHdr / sizeof(u32);
+#ifndef SQLITE_SAFER_WALINDEX_RECOVERY
+ /* Memcpy() should work fine here, on all reasonable implementations.
+ ** Technically, memcpy() might change the destination to some
+ ** intermediate value before setting to the final value, and that might
+ ** cause a concurrent reader to malfunction. Memcpy() is allowed to
+ ** do that, according to the spec, but no memcpy() implementation that
+ ** we know of actually does that, which is why we say that memcpy()
+ ** is safe for this. Memcpy() is certainly a lot faster.
+ */
+ memcpy(&aShare[nHdr32], &aPrivate[nHdr32], WALINDEX_PGSZ-nHdr);
+#else
+ /* In the event that some platform is found for which memcpy()
+ ** changes the destination to some intermediate value before
+ ** setting the final value, this alternative copy routine is
+ ** provided.
+ */
+ {
+ int i;
+ for(i=nHdr32; i<WALINDEX_PGSZ/sizeof(u32); i++){
+ if( aShare[i]!=aPrivate[i] ){
+ /* Atomic memory operations are not required here because if
+ ** the value needs to be changed, that means it is not being
+ ** accessed concurrently. */
+ aShare[i] = aPrivate[i];
+ }
+ }
+ }
+#endif
+ if( iFrame<=iLast ) break;
+ }
+
+ sqlite3_free(aFrame);
+ }
+
+finished:
+ if( rc==SQLITE_OK ){
+ volatile WalCkptInfo *pInfo;
+ int i;
+ pWal->hdr.aFrameCksum[0] = aFrameCksum[0];
+ pWal->hdr.aFrameCksum[1] = aFrameCksum[1];
+ walIndexWriteHdr(pWal);
+
+ /* Reset the checkpoint-header. This is safe because this thread is
+ ** currently holding locks that exclude all other writers and
+ ** checkpointers. Then set the values of read-mark slots 1 through N.
+ */
+ pInfo = walCkptInfo(pWal);
+ pInfo->nBackfill = 0;
+ pInfo->nBackfillAttempted = pWal->hdr.mxFrame;
+ pInfo->aReadMark[0] = 0;
+ for(i=1; i<WAL_NREADER; i++){
+ rc = walLockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ if( rc==SQLITE_OK ){
+ if( i==1 && pWal->hdr.mxFrame ){
+ pInfo->aReadMark[i] = pWal->hdr.mxFrame;
+ }else{
+ pInfo->aReadMark[i] = READMARK_NOT_USED;
+ }
+ walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ }else if( rc!=SQLITE_BUSY ){
+ goto recovery_error;
+ }
+ }
+
+ /* If more than one frame was recovered from the log file, report an
+ ** event via sqlite3_log(). This is to help with identifying performance
+ ** problems caused by applications routinely shutting down without
+ ** checkpointing the log file.
+ */
+ if( pWal->hdr.nPage ){
+ sqlite3_log(SQLITE_NOTICE_RECOVER_WAL,
+ "recovered %d frames from WAL file %s",
+ pWal->hdr.mxFrame, pWal->zWalName
+ );
+ }
+ }
+
+recovery_error:
+ WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok"));
+ walUnlockExclusive(pWal, iLock, WAL_READ_LOCK(0)-iLock);
+ return rc;
+}
+
+/*
+** Close an open wal-index.
+*/
+static void walIndexClose(Wal *pWal, int isDelete){
+ if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE || pWal->bShmUnreliable ){
+ int i;
+ for(i=0; i<pWal->nWiData; i++){
+ sqlite3_free((void *)pWal->apWiData[i]);
+ pWal->apWiData[i] = 0;
+ }
+ }
+ if( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE ){
+ sqlite3OsShmUnmap(pWal->pDbFd, isDelete);
+ }
+}
+
+/*
+** Open a connection to the WAL file zWalName. The database file must
+** already be opened on connection pDbFd. The buffer that zWalName points
+** to must remain valid for the lifetime of the returned Wal* handle.
+**
+** A SHARED lock should be held on the database file when this function
+** is called. The purpose of this SHARED lock is to prevent any other
+** client from unlinking the WAL or wal-index file. If another process
+** were to do this just after this client opened one of these files, the
+** system would be badly broken.
+**
+** If the log file is successfully opened, SQLITE_OK is returned and
+** *ppWal is set to point to a new WAL handle. If an error occurs,
+** an SQLite error code is returned and *ppWal is left unmodified.
+*/
+SQLITE_PRIVATE int sqlite3WalOpen(
+ sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */
+ sqlite3_file *pDbFd, /* The open database file */
+ const char *zWalName, /* Name of the WAL file */
+ int bNoShm, /* True to run in heap-memory mode */
+ i64 mxWalSize, /* Truncate WAL to this size on reset */
+ Wal **ppWal /* OUT: Allocated Wal handle */
+){
+ int rc; /* Return Code */
+ Wal *pRet; /* Object to allocate and return */
+ int flags; /* Flags passed to OsOpen() */
+
+ assert( zWalName && zWalName[0] );
+ assert( pDbFd );
+
+ /* In the amalgamation, the os_unix.c and os_win.c source files come before
+ ** this source file. Verify that the #defines of the locking byte offsets
+ ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value.
+ ** For that matter, if the lock offset ever changes from its initial design
+ ** value of 120, we need to know that so there is an assert() to check it.
+ */
+ assert( 120==WALINDEX_LOCK_OFFSET );
+ assert( 136==WALINDEX_HDR_SIZE );
+#ifdef WIN_SHM_BASE
+ assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET );
+#endif
+#ifdef UNIX_SHM_BASE
+ assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET );
+#endif
+
+
+ /* Allocate an instance of struct Wal to return. */
+ *ppWal = 0;
+ pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile);
+ if( !pRet ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ pRet->pVfs = pVfs;
+ pRet->pWalFd = (sqlite3_file *)&pRet[1];
+ pRet->pDbFd = pDbFd;
+ pRet->readLock = -1;
+ pRet->mxWalSize = mxWalSize;
+ pRet->zWalName = zWalName;
+ pRet->syncHeader = 1;
+ pRet->padToSectorBoundary = 1;
+ pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE);
+
+ /* Open file handle on the write-ahead log file. */
+ flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL);
+ rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags);
+ if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){
+ pRet->readOnly = WAL_RDONLY;
+ }
+
+ if( rc!=SQLITE_OK ){
+ walIndexClose(pRet, 0);
+ sqlite3OsClose(pRet->pWalFd);
+ sqlite3_free(pRet);
+ }else{
+ int iDC = sqlite3OsDeviceCharacteristics(pDbFd);
+ if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; }
+ if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){
+ pRet->padToSectorBoundary = 0;
+ }
+ *ppWal = pRet;
+ WALTRACE(("WAL%d: opened\n", pRet));
+ }
+ return rc;
+}
+
+/*
+** Change the size to which the WAL file is trucated on each reset.
+*/
+SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){
+ if( pWal ) pWal->mxWalSize = iLimit;
+}
+
+/*
+** Find the smallest page number out of all pages held in the WAL that
+** has not been returned by any prior invocation of this method on the
+** same WalIterator object. Write into *piFrame the frame index where
+** that page was last written into the WAL. Write into *piPage the page
+** number.
+**
+** Return 0 on success. If there are no pages in the WAL with a page
+** number larger than *piPage, then return 1.
+*/
+static int walIteratorNext(
+ WalIterator *p, /* Iterator */
+ u32 *piPage, /* OUT: The page number of the next page */
+ u32 *piFrame /* OUT: Wal frame index of next page */
+){
+ u32 iMin; /* Result pgno must be greater than iMin */
+ u32 iRet = 0xFFFFFFFF; /* 0xffffffff is never a valid page number */
+ int i; /* For looping through segments */
+
+ iMin = p->iPrior;
+ assert( iMin<0xffffffff );
+ for(i=p->nSegment-1; i>=0; i--){
+ struct WalSegment *pSegment = &p->aSegment[i];
+ while( pSegment->iNext<pSegment->nEntry ){
+ u32 iPg = pSegment->aPgno[pSegment->aIndex[pSegment->iNext]];
+ if( iPg>iMin ){
+ if( iPg<iRet ){
+ iRet = iPg;
+ *piFrame = pSegment->iZero + pSegment->aIndex[pSegment->iNext];
+ }
+ break;
+ }
+ pSegment->iNext++;
+ }
+ }
+
+ *piPage = p->iPrior = iRet;
+ return (iRet==0xFFFFFFFF);
+}
+
+/*
+** This function merges two sorted lists into a single sorted list.
+**
+** aLeft[] and aRight[] are arrays of indices. The sort key is
+** aContent[aLeft[]] and aContent[aRight[]]. Upon entry, the following
+** is guaranteed for all J<K:
+**
+** aContent[aLeft[J]] < aContent[aLeft[K]]
+** aContent[aRight[J]] < aContent[aRight[K]]
+**
+** This routine overwrites aRight[] with a new (probably longer) sequence
+** of indices such that the aRight[] contains every index that appears in
+** either aLeft[] or the old aRight[] and such that the second condition
+** above is still met.
+**
+** The aContent[aLeft[X]] values will be unique for all X. And the
+** aContent[aRight[X]] values will be unique too. But there might be
+** one or more combinations of X and Y such that
+**
+** aLeft[X]!=aRight[Y] && aContent[aLeft[X]] == aContent[aRight[Y]]
+**
+** When that happens, omit the aLeft[X] and use the aRight[Y] index.
+*/
+static void walMerge(
+ const u32 *aContent, /* Pages in wal - keys for the sort */
+ ht_slot *aLeft, /* IN: Left hand input list */
+ int nLeft, /* IN: Elements in array *paLeft */
+ ht_slot **paRight, /* IN/OUT: Right hand input list */
+ int *pnRight, /* IN/OUT: Elements in *paRight */
+ ht_slot *aTmp /* Temporary buffer */
+){
+ int iLeft = 0; /* Current index in aLeft */
+ int iRight = 0; /* Current index in aRight */
+ int iOut = 0; /* Current index in output buffer */
+ int nRight = *pnRight;
+ ht_slot *aRight = *paRight;
+
+ assert( nLeft>0 && nRight>0 );
+ while( iRight<nRight || iLeft<nLeft ){
+ ht_slot logpage;
+ Pgno dbpage;
+
+ if( (iLeft<nLeft)
+ && (iRight>=nRight || aContent[aLeft[iLeft]]<aContent[aRight[iRight]])
+ ){
+ logpage = aLeft[iLeft++];
+ }else{
+ logpage = aRight[iRight++];
+ }
+ dbpage = aContent[logpage];
+
+ aTmp[iOut++] = logpage;
+ if( iLeft<nLeft && aContent[aLeft[iLeft]]==dbpage ) iLeft++;
+
+ assert( iLeft>=nLeft || aContent[aLeft[iLeft]]>dbpage );
+ assert( iRight>=nRight || aContent[aRight[iRight]]>dbpage );
+ }
+
+ *paRight = aLeft;
+ *pnRight = iOut;
+ memcpy(aLeft, aTmp, sizeof(aTmp[0])*iOut);
+}
+
+/*
+** Sort the elements in list aList using aContent[] as the sort key.
+** Remove elements with duplicate keys, preferring to keep the
+** larger aList[] values.
+**
+** The aList[] entries are indices into aContent[]. The values in
+** aList[] are to be sorted so that for all J<K:
+**
+** aContent[aList[J]] < aContent[aList[K]]
+**
+** For any X and Y such that
+**
+** aContent[aList[X]] == aContent[aList[Y]]
+**
+** Keep the larger of the two values aList[X] and aList[Y] and discard
+** the smaller.
+*/
+static void walMergesort(
+ const u32 *aContent, /* Pages in wal */
+ ht_slot *aBuffer, /* Buffer of at least *pnList items to use */
+ ht_slot *aList, /* IN/OUT: List to sort */
+ int *pnList /* IN/OUT: Number of elements in aList[] */
+){
+ struct Sublist {
+ int nList; /* Number of elements in aList */
+ ht_slot *aList; /* Pointer to sub-list content */
+ };
+
+ const int nList = *pnList; /* Size of input list */
+ int nMerge = 0; /* Number of elements in list aMerge */
+ ht_slot *aMerge = 0; /* List to be merged */
+ int iList; /* Index into input list */
+ u32 iSub = 0; /* Index into aSub array */
+ struct Sublist aSub[13]; /* Array of sub-lists */
+
+ memset(aSub, 0, sizeof(aSub));
+ assert( nList<=HASHTABLE_NPAGE && nList>0 );
+ assert( HASHTABLE_NPAGE==(1<<(ArraySize(aSub)-1)) );
+
+ for(iList=0; iList<nList; iList++){
+ nMerge = 1;
+ aMerge = &aList[iList];
+ for(iSub=0; iList & (1<<iSub); iSub++){
+ struct Sublist *p;
+ assert( iSub<ArraySize(aSub) );
+ p = &aSub[iSub];
+ assert( p->aList && p->nList<=(1<<iSub) );
+ assert( p->aList==&aList[iList&~((2<<iSub)-1)] );
+ walMerge(aContent, p->aList, p->nList, &aMerge, &nMerge, aBuffer);
+ }
+ aSub[iSub].aList = aMerge;
+ aSub[iSub].nList = nMerge;
+ }
+
+ for(iSub++; iSub<ArraySize(aSub); iSub++){
+ if( nList & (1<<iSub) ){
+ struct Sublist *p;
+ assert( iSub<ArraySize(aSub) );
+ p = &aSub[iSub];
+ assert( p->nList<=(1<<iSub) );
+ assert( p->aList==&aList[nList&~((2<<iSub)-1)] );
+ walMerge(aContent, p->aList, p->nList, &aMerge, &nMerge, aBuffer);
+ }
+ }
+ assert( aMerge==aList );
+ *pnList = nMerge;
+
+#ifdef SQLITE_DEBUG
+ {
+ int i;
+ for(i=1; i<*pnList; i++){
+ assert( aContent[aList[i]] > aContent[aList[i-1]] );
+ }
+ }
+#endif
+}
+
+/*
+** Free an iterator allocated by walIteratorInit().
+*/
+static void walIteratorFree(WalIterator *p){
+ sqlite3_free(p);
+}
+
+/*
+** Construct a WalInterator object that can be used to loop over all
+** pages in the WAL following frame nBackfill in ascending order. Frames
+** nBackfill or earlier may be included - excluding them is an optimization
+** only. The caller must hold the checkpoint lock.
+**
+** On success, make *pp point to the newly allocated WalInterator object
+** return SQLITE_OK. Otherwise, return an error code. If this routine
+** returns an error, the value of *pp is undefined.
+**
+** The calling routine should invoke walIteratorFree() to destroy the
+** WalIterator object when it has finished with it.
+*/
+static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
+ WalIterator *p; /* Return value */
+ int nSegment; /* Number of segments to merge */
+ u32 iLast; /* Last frame in log */
+ sqlite3_int64 nByte; /* Number of bytes to allocate */
+ int i; /* Iterator variable */
+ ht_slot *aTmp; /* Temp space used by merge-sort */
+ int rc = SQLITE_OK; /* Return Code */
+
+ /* This routine only runs while holding the checkpoint lock. And
+ ** it only runs if there is actually content in the log (mxFrame>0).
+ */
+ assert( pWal->ckptLock && pWal->hdr.mxFrame>0 );
+ iLast = pWal->hdr.mxFrame;
+
+ /* Allocate space for the WalIterator object. */
+ nSegment = walFramePage(iLast) + 1;
+ nByte = sizeof(WalIterator)
+ + (nSegment-1)*sizeof(struct WalSegment)
+ + iLast*sizeof(ht_slot);
+ p = (WalIterator *)sqlite3_malloc64(nByte);
+ if( !p ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset(p, 0, nByte);
+ p->nSegment = nSegment;
+
+ /* Allocate temporary space used by the merge-sort routine. This block
+ ** of memory will be freed before this function returns.
+ */
+ aTmp = (ht_slot *)sqlite3_malloc64(
+ sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
+ );
+ if( !aTmp ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+
+ for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && i<nSegment; i++){
+ WalHashLoc sLoc;
+
+ rc = walHashGet(pWal, i, &sLoc);
+ if( rc==SQLITE_OK ){
+ int j; /* Counter variable */
+ int nEntry; /* Number of entries in this segment */
+ ht_slot *aIndex; /* Sorted index for this segment */
+
+ sLoc.aPgno++;
+ if( (i+1)==nSegment ){
+ nEntry = (int)(iLast - sLoc.iZero);
+ }else{
+ nEntry = (int)((u32*)sLoc.aHash - (u32*)sLoc.aPgno);
+ }
+ aIndex = &((ht_slot *)&p->aSegment[p->nSegment])[sLoc.iZero];
+ sLoc.iZero++;
+
+ for(j=0; j<nEntry; j++){
+ aIndex[j] = (ht_slot)j;
+ }
+ walMergesort((u32 *)sLoc.aPgno, aTmp, aIndex, &nEntry);
+ p->aSegment[i].iZero = sLoc.iZero;
+ p->aSegment[i].nEntry = nEntry;
+ p->aSegment[i].aIndex = aIndex;
+ p->aSegment[i].aPgno = (u32 *)sLoc.aPgno;
+ }
+ }
+ sqlite3_free(aTmp);
+
+ if( rc!=SQLITE_OK ){
+ walIteratorFree(p);
+ p = 0;
+ }
+ *pp = p;
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+/*
+** Attempt to enable blocking locks. Blocking locks are enabled only if (a)
+** they are supported by the VFS, and (b) the database handle is configured
+** with a busy-timeout. Return 1 if blocking locks are successfully enabled,
+** or 0 otherwise.
+*/
+static int walEnableBlocking(Wal *pWal){
+ int res = 0;
+ if( pWal->db ){
+ int tmout = pWal->db->busyTimeout;
+ if( tmout ){
+ int rc;
+ rc = sqlite3OsFileControl(
+ pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout
+ );
+ res = (rc==SQLITE_OK);
+ }
+ }
+ return res;
+}
+
+/*
+** Disable blocking locks.
+*/
+static void walDisableBlocking(Wal *pWal){
+ int tmout = 0;
+ sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout);
+}
+
+/*
+** If parameter bLock is true, attempt to enable blocking locks, take
+** the WRITER lock, and then disable blocking locks. If blocking locks
+** cannot be enabled, no attempt to obtain the WRITER lock is made. Return
+** an SQLite error code if an error occurs, or SQLITE_OK otherwise. It is not
+** an error if blocking locks can not be enabled.
+**
+** If the bLock parameter is false and the WRITER lock is held, release it.
+*/
+SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock){
+ int rc = SQLITE_OK;
+ assert( pWal->readLock<0 || bLock==0 );
+ if( bLock ){
+ assert( pWal->db );
+ if( walEnableBlocking(pWal) ){
+ rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ if( rc==SQLITE_OK ){
+ pWal->writeLock = 1;
+ }
+ walDisableBlocking(pWal);
+ }
+ }else if( pWal->writeLock ){
+ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ pWal->writeLock = 0;
+ }
+ return rc;
+}
+
+/*
+** Set the database handle used to determine if blocking locks are required.
+*/
+SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){
+ pWal->db = db;
+}
+
+/*
+** Take an exclusive WRITE lock. Blocking if so configured.
+*/
+static int walLockWriter(Wal *pWal){
+ int rc;
+ walEnableBlocking(pWal);
+ rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ walDisableBlocking(pWal);
+ return rc;
+}
+#else
+# define walEnableBlocking(x) 0
+# define walDisableBlocking(x)
+# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1)
+# define sqlite3WalDb(pWal, db)
+#endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */
+
+
+/*
+** Attempt to obtain the exclusive WAL lock defined by parameters lockIdx and
+** n. If the attempt fails and parameter xBusy is not NULL, then it is a
+** busy-handler function. Invoke it and retry the lock until either the
+** lock is successfully obtained or the busy-handler returns 0.
+*/
+static int walBusyLock(
+ Wal *pWal, /* WAL connection */
+ int (*xBusy)(void*), /* Function to call when busy */
+ void *pBusyArg, /* Context argument for xBusyHandler */
+ int lockIdx, /* Offset of first byte to lock */
+ int n /* Number of bytes to lock */
+){
+ int rc;
+ do {
+ rc = walLockExclusive(pWal, lockIdx, n);
+ }while( xBusy && rc==SQLITE_BUSY && xBusy(pBusyArg) );
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ walDisableBlocking(pWal);
+ rc = SQLITE_BUSY;
+ }
+#endif
+ return rc;
+}
+
+/*
+** The cache of the wal-index header must be valid to call this function.
+** Return the page-size in bytes used by the database.
+*/
+static int walPagesize(Wal *pWal){
+ return (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16);
+}
+
+/*
+** The following is guaranteed when this function is called:
+**
+** a) the WRITER lock is held,
+** b) the entire log file has been checkpointed, and
+** c) any existing readers are reading exclusively from the database
+** file - there are no readers that may attempt to read a frame from
+** the log file.
+**
+** This function updates the shared-memory structures so that the next
+** client to write to the database (which may be this one) does so by
+** writing frames into the start of the log file.
+**
+** The value of parameter salt1 is used as the aSalt[1] value in the
+** new wal-index header. It should be passed a pseudo-random value (i.e.
+** one obtained from sqlite3_randomness()).
+*/
+static void walRestartHdr(Wal *pWal, u32 salt1){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ int i; /* Loop counter */
+ u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */
+ pWal->nCkpt++;
+ pWal->hdr.mxFrame = 0;
+ sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0]));
+ memcpy(&pWal->hdr.aSalt[1], &salt1, 4);
+ walIndexWriteHdr(pWal);
+ AtomicStore(&pInfo->nBackfill, 0);
+ pInfo->nBackfillAttempted = 0;
+ pInfo->aReadMark[1] = 0;
+ for(i=2; i<WAL_NREADER; i++) pInfo->aReadMark[i] = READMARK_NOT_USED;
+ assert( pInfo->aReadMark[0]==0 );
+}
+
+/*
+** Copy as much content as we can from the WAL back into the database file
+** in response to an sqlite3_wal_checkpoint() request or the equivalent.
+**
+** The amount of information copies from WAL to database might be limited
+** by active readers. This routine will never overwrite a database page
+** that a concurrent reader might be using.
+**
+** All I/O barrier operations (a.k.a fsyncs) occur in this routine when
+** SQLite is in WAL-mode in synchronous=NORMAL. That means that if
+** checkpoints are always run by a background thread or background
+** process, foreground threads will never block on a lengthy fsync call.
+**
+** Fsync is called on the WAL before writing content out of the WAL and
+** into the database. This ensures that if the new content is persistent
+** in the WAL and can be recovered following a power-loss or hard reset.
+**
+** Fsync is also called on the database file if (and only if) the entire
+** WAL content is copied into the database file. This second fsync makes
+** it safe to delete the WAL since the new content will persist in the
+** database file.
+**
+** This routine uses and updates the nBackfill field of the wal-index header.
+** This is the only routine that will increase the value of nBackfill.
+** (A WAL reset or recovery will revert nBackfill to zero, but not increase
+** its value.)
+**
+** The caller must be holding sufficient locks to ensure that no other
+** checkpoint is running (in any other thread or process) at the same
+** time.
+*/
+static int walCheckpoint(
+ Wal *pWal, /* Wal connection */
+ sqlite3 *db, /* Check for interrupts on this handle */
+ int eMode, /* One of PASSIVE, FULL or RESTART */
+ int (*xBusy)(void*), /* Function to call when busy */
+ void *pBusyArg, /* Context argument for xBusyHandler */
+ int sync_flags, /* Flags for OsSync() (or 0) */
+ u8 *zBuf /* Temporary buffer to use */
+){
+ int rc = SQLITE_OK; /* Return code */
+ int szPage; /* Database page-size */
+ WalIterator *pIter = 0; /* Wal iterator context */
+ u32 iDbpage = 0; /* Next database page to write */
+ u32 iFrame = 0; /* Wal frame containing data for iDbpage */
+ u32 mxSafeFrame; /* Max frame that can be backfilled */
+ u32 mxPage; /* Max database page to write */
+ int i; /* Loop counter */
+ volatile WalCkptInfo *pInfo; /* The checkpoint status information */
+
+ szPage = walPagesize(pWal);
+ testcase( szPage<=32768 );
+ testcase( szPage>=65536 );
+ pInfo = walCkptInfo(pWal);
+ if( pInfo->nBackfill<pWal->hdr.mxFrame ){
+
+ /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked
+ ** in the SQLITE_CHECKPOINT_PASSIVE mode. */
+ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 );
+
+ /* Compute in mxSafeFrame the index of the last frame of the WAL that is
+ ** safe to write into the database. Frames beyond mxSafeFrame might
+ ** overwrite database pages that are in use by active readers and thus
+ ** cannot be backfilled from the WAL.
+ */
+ mxSafeFrame = pWal->hdr.mxFrame;
+ mxPage = pWal->hdr.nPage;
+ for(i=1; i<WAL_NREADER; i++){
+ u32 y = AtomicLoad(pInfo->aReadMark+i);
+ if( mxSafeFrame>y ){
+ assert( y<=pWal->hdr.mxFrame );
+ rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1);
+ if( rc==SQLITE_OK ){
+ u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED);
+ AtomicStore(pInfo->aReadMark+i, iMark);
+ walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ }else if( rc==SQLITE_BUSY ){
+ mxSafeFrame = y;
+ xBusy = 0;
+ }else{
+ goto walcheckpoint_out;
+ }
+ }
+ }
+
+ /* Allocate the iterator */
+ if( pInfo->nBackfill<mxSafeFrame ){
+ rc = walIteratorInit(pWal, pInfo->nBackfill, &pIter);
+ assert( rc==SQLITE_OK || pIter==0 );
+ }
+
+ if( pIter
+ && (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK
+ ){
+ u32 nBackfill = pInfo->nBackfill;
+
+ pInfo->nBackfillAttempted = mxSafeFrame;
+
+ /* Sync the WAL to disk */
+ rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));
+
+ /* If the database may grow as a result of this checkpoint, hint
+ ** about the eventual size of the db file to the VFS layer.
+ */
+ if( rc==SQLITE_OK ){
+ i64 nReq = ((i64)mxPage * szPage);
+ i64 nSize; /* Current size of database file */
+ sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_START, 0);
+ rc = sqlite3OsFileSize(pWal->pDbFd, &nSize);
+ if( rc==SQLITE_OK && nSize<nReq ){
+ if( (nSize+65536+(i64)pWal->hdr.mxFrame*szPage)<nReq ){
+ /* If the size of the final database is larger than the current
+ ** database plus the amount of data in the wal file, plus the
+ ** maximum size of the pending-byte page (65536 bytes), then
+ ** must be corruption somewhere. */
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ sqlite3OsFileControlHint(pWal->pDbFd, SQLITE_FCNTL_SIZE_HINT,&nReq);
+ }
+ }
+
+ }
+
+ /* Iterate through the contents of the WAL, copying data to the db file */
+ while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
+ i64 iOffset;
+ assert( walFramePgno(pWal, iFrame)==iDbpage );
+ if( AtomicLoad(&db->u1.isInterrupted) ){
+ rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT;
+ break;
+ }
+ if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){
+ continue;
+ }
+ iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE;
+ /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */
+ rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ iOffset = (iDbpage-1)*(i64)szPage;
+ testcase( IS_BIG_INT(iOffset) );
+ rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ }
+ sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_DONE, 0);
+
+ /* If work was actually accomplished... */
+ if( rc==SQLITE_OK ){
+ if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){
+ i64 szDb = pWal->hdr.nPage*(i64)szPage;
+ testcase( IS_BIG_INT(szDb) );
+ rc = sqlite3OsTruncate(pWal->pDbFd, szDb);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags));
+ }
+ }
+ if( rc==SQLITE_OK ){
+ AtomicStore(&pInfo->nBackfill, mxSafeFrame);
+ }
+ }
+
+ /* Release the reader lock held while backfilling */
+ walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1);
+ }
+
+ if( rc==SQLITE_BUSY ){
+ /* Reset the return code so as not to report a checkpoint failure
+ ** just because there are active readers. */
+ rc = SQLITE_OK;
+ }
+ }
+
+ /* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the
+ ** entire wal file has been copied into the database file, then block
+ ** until all readers have finished using the wal file. This ensures that
+ ** the next process to write to the database restarts the wal file.
+ */
+ if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){
+ assert( pWal->writeLock );
+ if( pInfo->nBackfill<pWal->hdr.mxFrame ){
+ rc = SQLITE_BUSY;
+ }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){
+ u32 salt1;
+ sqlite3_randomness(4, &salt1);
+ assert( pInfo->nBackfill==pWal->hdr.mxFrame );
+ rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1);
+ if( rc==SQLITE_OK ){
+ if( eMode==SQLITE_CHECKPOINT_TRUNCATE ){
+ /* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as
+ ** SQLITE_CHECKPOINT_RESTART with the addition that it also
+ ** truncates the log file to zero bytes just prior to a
+ ** successful return.
+ **
+ ** In theory, it might be safe to do this without updating the
+ ** wal-index header in shared memory, as all subsequent reader or
+ ** writer clients should see that the entire log file has been
+ ** checkpointed and behave accordingly. This seems unsafe though,
+ ** as it would leave the system in a state where the contents of
+ ** the wal-index header do not match the contents of the
+ ** file-system. To avoid this, update the wal-index header to
+ ** indicate that the log file contains zero valid frames. */
+ walRestartHdr(pWal, salt1);
+ rc = sqlite3OsTruncate(pWal->pWalFd, 0);
+ }
+ walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
+ }
+ }
+ }
+
+ walcheckpoint_out:
+ walIteratorFree(pIter);
+ return rc;
+}
+
+/*
+** If the WAL file is currently larger than nMax bytes in size, truncate
+** it to exactly nMax bytes. If an error occurs while doing so, ignore it.
+*/
+static void walLimitSize(Wal *pWal, i64 nMax){
+ i64 sz;
+ int rx;
+ sqlite3BeginBenignMalloc();
+ rx = sqlite3OsFileSize(pWal->pWalFd, &sz);
+ if( rx==SQLITE_OK && (sz > nMax ) ){
+ rx = sqlite3OsTruncate(pWal->pWalFd, nMax);
+ }
+ sqlite3EndBenignMalloc();
+ if( rx ){
+ sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName);
+ }
+}
+
+/*
+** Close a connection to a log file.
+*/
+SQLITE_PRIVATE int sqlite3WalClose(
+ Wal *pWal, /* Wal to close */
+ sqlite3 *db, /* For interrupt flag */
+ int sync_flags, /* Flags to pass to OsSync() (or 0) */
+ int nBuf,
+ u8 *zBuf /* Buffer of at least nBuf bytes */
+){
+ int rc = SQLITE_OK;
+ if( pWal ){
+ int isDelete = 0; /* True to unlink wal and wal-index files */
+
+ /* If an EXCLUSIVE lock can be obtained on the database file (using the
+ ** ordinary, rollback-mode locking methods, this guarantees that the
+ ** connection associated with this log file is the only connection to
+ ** the database. In this case checkpoint the database and unlink both
+ ** the wal and wal-index files.
+ **
+ ** The EXCLUSIVE lock is not released before returning.
+ */
+ if( zBuf!=0
+ && SQLITE_OK==(rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE))
+ ){
+ if( pWal->exclusiveMode==WAL_NORMAL_MODE ){
+ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE;
+ }
+ rc = sqlite3WalCheckpoint(pWal, db,
+ SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0
+ );
+ if( rc==SQLITE_OK ){
+ int bPersist = -1;
+ sqlite3OsFileControlHint(
+ pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist
+ );
+ if( bPersist!=1 ){
+ /* Try to delete the WAL file if the checkpoint completed and
+ ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal
+ ** mode (!bPersist) */
+ isDelete = 1;
+ }else if( pWal->mxWalSize>=0 ){
+ /* Try to truncate the WAL file to zero bytes if the checkpoint
+ ** completed and fsynced (rc==SQLITE_OK) and we are in persistent
+ ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a
+ ** non-negative value (pWal->mxWalSize>=0). Note that we truncate
+ ** to zero bytes as truncating to the journal_size_limit might
+ ** leave a corrupt WAL file on disk. */
+ walLimitSize(pWal, 0);
+ }
+ }
+ }
+
+ walIndexClose(pWal, isDelete);
+ sqlite3OsClose(pWal->pWalFd);
+ if( isDelete ){
+ sqlite3BeginBenignMalloc();
+ sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0);
+ sqlite3EndBenignMalloc();
+ }
+ WALTRACE(("WAL%p: closed\n", pWal));
+ sqlite3_free((void *)pWal->apWiData);
+ sqlite3_free(pWal);
+ }
+ return rc;
+}
+
+/*
+** Try to read the wal-index header. Return 0 on success and 1 if
+** there is a problem.
+**
+** The wal-index is in shared memory. Another thread or process might
+** be writing the header at the same time this procedure is trying to
+** read it, which might result in inconsistency. A dirty read is detected
+** by verifying that both copies of the header are the same and also by
+** a checksum on the header.
+**
+** If and only if the read is consistent and the header is different from
+** pWal->hdr, then pWal->hdr is updated to the content of the new header
+** and *pChanged is set to 1.
+**
+** If the checksum cannot be verified return non-zero. If the header
+** is read successfully and the checksum verified, return zero.
+*/
+static SQLITE_NO_TSAN int walIndexTryHdr(Wal *pWal, int *pChanged){
+ u32 aCksum[2]; /* Checksum on the header content */
+ WalIndexHdr h1, h2; /* Two copies of the header content */
+ WalIndexHdr volatile *aHdr; /* Header in shared memory */
+
+ /* The first page of the wal-index must be mapped at this point. */
+ assert( pWal->nWiData>0 && pWal->apWiData[0] );
+
+ /* Read the header. This might happen concurrently with a write to the
+ ** same area of shared memory on a different CPU in a SMP,
+ ** meaning it is possible that an inconsistent snapshot is read
+ ** from the file. If this happens, return non-zero.
+ **
+ ** tag-20200519-1:
+ ** There are two copies of the header at the beginning of the wal-index.
+ ** When reading, read [0] first then [1]. Writes are in the reverse order.
+ ** Memory barriers are used to prevent the compiler or the hardware from
+ ** reordering the reads and writes. TSAN and similar tools can sometimes
+ ** give false-positive warnings about these accesses because the tools do not
+ ** account for the double-read and the memory barrier. The use of mutexes
+ ** here would be problematic as the memory being accessed is potentially
+ ** shared among multiple processes and not all mutex implementions work
+ ** reliably in that environment.
+ */
+ aHdr = walIndexHdr(pWal);
+ memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); /* Possible TSAN false-positive */
+ walShmBarrier(pWal);
+ memcpy(&h2, (void *)&aHdr[1], sizeof(h2));
+
+ if( memcmp(&h1, &h2, sizeof(h1))!=0 ){
+ return 1; /* Dirty read */
+ }
+ if( h1.isInit==0 ){
+ return 1; /* Malformed header - probably all zeros */
+ }
+ walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum);
+ if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){
+ return 1; /* Checksum does not match */
+ }
+
+ if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){
+ *pChanged = 1;
+ memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr));
+ pWal->szPage = (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16);
+ testcase( pWal->szPage<=32768 );
+ testcase( pWal->szPage>=65536 );
+ }
+
+ /* The header was successfully read. Return zero. */
+ return 0;
+}
+
+/*
+** This is the value that walTryBeginRead returns when it needs to
+** be retried.
+*/
+#define WAL_RETRY (-1)
+
+/*
+** Read the wal-index header from the wal-index and into pWal->hdr.
+** If the wal-header appears to be corrupt, try to reconstruct the
+** wal-index from the WAL before returning.
+**
+** Set *pChanged to 1 if the wal-index header value in pWal->hdr is
+** changed by this operation. If pWal->hdr is unchanged, set *pChanged
+** to 0.
+**
+** If the wal-index header is successfully read, return SQLITE_OK.
+** Otherwise an SQLite error code.
+*/
+static int walIndexReadHdr(Wal *pWal, int *pChanged){
+ int rc; /* Return code */
+ int badHdr; /* True if a header read failed */
+ volatile u32 *page0; /* Chunk of wal-index containing header */
+
+ /* Ensure that page 0 of the wal-index (the page that contains the
+ ** wal-index header) is mapped. Return early if an error occurs here.
+ */
+ assert( pChanged );
+ rc = walIndexPage(pWal, 0, &page0);
+ if( rc!=SQLITE_OK ){
+ assert( rc!=SQLITE_READONLY ); /* READONLY changed to OK in walIndexPage */
+ if( rc==SQLITE_READONLY_CANTINIT ){
+ /* The SQLITE_READONLY_CANTINIT return means that the shared-memory
+ ** was openable but is not writable, and this thread is unable to
+ ** confirm that another write-capable connection has the shared-memory
+ ** open, and hence the content of the shared-memory is unreliable,
+ ** since the shared-memory might be inconsistent with the WAL file
+ ** and there is no writer on hand to fix it. */
+ assert( page0==0 );
+ assert( pWal->writeLock==0 );
+ assert( pWal->readOnly & WAL_SHM_RDONLY );
+ pWal->bShmUnreliable = 1;
+ pWal->exclusiveMode = WAL_HEAPMEMORY_MODE;
+ *pChanged = 1;
+ }else{
+ return rc; /* Any other non-OK return is just an error */
+ }
+ }else{
+ /* page0 can be NULL if the SHM is zero bytes in size and pWal->writeLock
+ ** is zero, which prevents the SHM from growing */
+ testcase( page0!=0 );
+ }
+ assert( page0!=0 || pWal->writeLock==0 );
+
+ /* If the first page of the wal-index has been mapped, try to read the
+ ** wal-index header immediately, without holding any lock. This usually
+ ** works, but may fail if the wal-index header is corrupt or currently
+ ** being modified by another thread or process.
+ */
+ badHdr = (page0 ? walIndexTryHdr(pWal, pChanged) : 1);
+
+ /* If the first attempt failed, it might have been due to a race
+ ** with a writer. So get a WRITE lock and try again.
+ */
+ if( badHdr ){
+ if( pWal->bShmUnreliable==0 && (pWal->readOnly & WAL_SHM_RDONLY) ){
+ if( SQLITE_OK==(rc = walLockShared(pWal, WAL_WRITE_LOCK)) ){
+ walUnlockShared(pWal, WAL_WRITE_LOCK);
+ rc = SQLITE_READONLY_RECOVERY;
+ }
+ }else{
+ int bWriteLock = pWal->writeLock;
+ if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){
+ pWal->writeLock = 1;
+ if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){
+ badHdr = walIndexTryHdr(pWal, pChanged);
+ if( badHdr ){
+ /* If the wal-index header is still malformed even while holding
+ ** a WRITE lock, it can only mean that the header is corrupted and
+ ** needs to be reconstructed. So run recovery to do exactly that.
+ */
+ rc = walIndexRecover(pWal);
+ *pChanged = 1;
+ }
+ }
+ if( bWriteLock==0 ){
+ pWal->writeLock = 0;
+ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ }
+ }
+ }
+ }
+
+ /* If the header is read successfully, check the version number to make
+ ** sure the wal-index was not constructed with some future format that
+ ** this version of SQLite cannot understand.
+ */
+ if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){
+ rc = SQLITE_CANTOPEN_BKPT;
+ }
+ if( pWal->bShmUnreliable ){
+ if( rc!=SQLITE_OK ){
+ walIndexClose(pWal, 0);
+ pWal->bShmUnreliable = 0;
+ assert( pWal->nWiData>0 && pWal->apWiData[0]==0 );
+ /* walIndexRecover() might have returned SHORT_READ if a concurrent
+ ** writer truncated the WAL out from under it. If that happens, it
+ ** indicates that a writer has fixed the SHM file for us, so retry */
+ if( rc==SQLITE_IOERR_SHORT_READ ) rc = WAL_RETRY;
+ }
+ pWal->exclusiveMode = WAL_NORMAL_MODE;
+ }
+
+ return rc;
+}
+
+/*
+** Open a transaction in a connection where the shared-memory is read-only
+** and where we cannot verify that there is a separate write-capable connection
+** on hand to keep the shared-memory up-to-date with the WAL file.
+**
+** This can happen, for example, when the shared-memory is implemented by
+** memory-mapping a *-shm file, where a prior writer has shut down and
+** left the *-shm file on disk, and now the present connection is trying
+** to use that database but lacks write permission on the *-shm file.
+** Other scenarios are also possible, depending on the VFS implementation.
+**
+** Precondition:
+**
+** The *-wal file has been read and an appropriate wal-index has been
+** constructed in pWal->apWiData[] using heap memory instead of shared
+** memory.
+**
+** If this function returns SQLITE_OK, then the read transaction has
+** been successfully opened. In this case output variable (*pChanged)
+** is set to true before returning if the caller should discard the
+** contents of the page cache before proceeding. Or, if it returns
+** WAL_RETRY, then the heap memory wal-index has been discarded and
+** the caller should retry opening the read transaction from the
+** beginning (including attempting to map the *-shm file).
+**
+** If an error occurs, an SQLite error code is returned.
+*/
+static int walBeginShmUnreliable(Wal *pWal, int *pChanged){
+ i64 szWal; /* Size of wal file on disk in bytes */
+ i64 iOffset; /* Current offset when reading wal file */
+ u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */
+ u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */
+ int szFrame; /* Number of bytes in buffer aFrame[] */
+ u8 *aData; /* Pointer to data part of aFrame buffer */
+ volatile void *pDummy; /* Dummy argument for xShmMap */
+ int rc; /* Return code */
+ u32 aSaveCksum[2]; /* Saved copy of pWal->hdr.aFrameCksum */
+
+ assert( pWal->bShmUnreliable );
+ assert( pWal->readOnly & WAL_SHM_RDONLY );
+ assert( pWal->nWiData>0 && pWal->apWiData[0] );
+
+ /* Take WAL_READ_LOCK(0). This has the effect of preventing any
+ ** writers from running a checkpoint, but does not stop them
+ ** from running recovery. */
+ rc = walLockShared(pWal, WAL_READ_LOCK(0));
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_BUSY ) rc = WAL_RETRY;
+ goto begin_unreliable_shm_out;
+ }
+ pWal->readLock = 0;
+
+ /* Check to see if a separate writer has attached to the shared-memory area,
+ ** thus making the shared-memory "reliable" again. Do this by invoking
+ ** the xShmMap() routine of the VFS and looking to see if the return
+ ** is SQLITE_READONLY instead of SQLITE_READONLY_CANTINIT.
+ **
+ ** If the shared-memory is now "reliable" return WAL_RETRY, which will
+ ** cause the heap-memory WAL-index to be discarded and the actual
+ ** shared memory to be used in its place.
+ **
+ ** This step is important because, even though this connection is holding
+ ** the WAL_READ_LOCK(0) which prevents a checkpoint, a writer might
+ ** have already checkpointed the WAL file and, while the current
+ ** is active, wrap the WAL and start overwriting frames that this
+ ** process wants to use.
+ **
+ ** Once sqlite3OsShmMap() has been called for an sqlite3_file and has
+ ** returned any SQLITE_READONLY value, it must return only SQLITE_READONLY
+ ** or SQLITE_READONLY_CANTINIT or some error for all subsequent invocations,
+ ** even if some external agent does a "chmod" to make the shared-memory
+ ** writable by us, until sqlite3OsShmUnmap() has been called.
+ ** This is a requirement on the VFS implementation.
+ */
+ rc = sqlite3OsShmMap(pWal->pDbFd, 0, WALINDEX_PGSZ, 0, &pDummy);
+ assert( rc!=SQLITE_OK ); /* SQLITE_OK not possible for read-only connection */
+ if( rc!=SQLITE_READONLY_CANTINIT ){
+ rc = (rc==SQLITE_READONLY ? WAL_RETRY : rc);
+ goto begin_unreliable_shm_out;
+ }
+
+ /* We reach this point only if the real shared-memory is still unreliable.
+ ** Assume the in-memory WAL-index substitute is correct and load it
+ ** into pWal->hdr.
+ */
+ memcpy(&pWal->hdr, (void*)walIndexHdr(pWal), sizeof(WalIndexHdr));
+
+ /* Make sure some writer hasn't come in and changed the WAL file out
+ ** from under us, then disconnected, while we were not looking.
+ */
+ rc = sqlite3OsFileSize(pWal->pWalFd, &szWal);
+ if( rc!=SQLITE_OK ){
+ goto begin_unreliable_shm_out;
+ }
+ if( szWal<WAL_HDRSIZE ){
+ /* If the wal file is too small to contain a wal-header and the
+ ** wal-index header has mxFrame==0, then it must be safe to proceed
+ ** reading the database file only. However, the page cache cannot
+ ** be trusted, as a read/write connection may have connected, written
+ ** the db, run a checkpoint, truncated the wal file and disconnected
+ ** since this client's last read transaction. */
+ *pChanged = 1;
+ rc = (pWal->hdr.mxFrame==0 ? SQLITE_OK : WAL_RETRY);
+ goto begin_unreliable_shm_out;
+ }
+
+ /* Check the salt keys at the start of the wal file still match. */
+ rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0);
+ if( rc!=SQLITE_OK ){
+ goto begin_unreliable_shm_out;
+ }
+ if( memcmp(&pWal->hdr.aSalt, &aBuf[16], 8) ){
+ /* Some writer has wrapped the WAL file while we were not looking.
+ ** Return WAL_RETRY which will cause the in-memory WAL-index to be
+ ** rebuilt. */
+ rc = WAL_RETRY;
+ goto begin_unreliable_shm_out;
+ }
+
+ /* Allocate a buffer to read frames into */
+ szFrame = pWal->hdr.szPage + WAL_FRAME_HDRSIZE;
+ aFrame = (u8 *)sqlite3_malloc64(szFrame);
+ if( aFrame==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto begin_unreliable_shm_out;
+ }
+ aData = &aFrame[WAL_FRAME_HDRSIZE];
+
+ /* Check to see if a complete transaction has been appended to the
+ ** wal file since the heap-memory wal-index was created. If so, the
+ ** heap-memory wal-index is discarded and WAL_RETRY returned to
+ ** the caller. */
+ aSaveCksum[0] = pWal->hdr.aFrameCksum[0];
+ aSaveCksum[1] = pWal->hdr.aFrameCksum[1];
+ for(iOffset=walFrameOffset(pWal->hdr.mxFrame+1, pWal->hdr.szPage);
+ iOffset+szFrame<=szWal;
+ iOffset+=szFrame
+ ){
+ u32 pgno; /* Database page number for frame */
+ u32 nTruncate; /* dbsize field from frame header */
+
+ /* Read and decode the next log frame. */
+ rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ if( !walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame) ) break;
+
+ /* If nTruncate is non-zero, then a complete transaction has been
+ ** appended to this wal file. Set rc to WAL_RETRY and break out of
+ ** the loop. */
+ if( nTruncate ){
+ rc = WAL_RETRY;
+ break;
+ }
+ }
+ pWal->hdr.aFrameCksum[0] = aSaveCksum[0];
+ pWal->hdr.aFrameCksum[1] = aSaveCksum[1];
+
+ begin_unreliable_shm_out:
+ sqlite3_free(aFrame);
+ if( rc!=SQLITE_OK ){
+ int i;
+ for(i=0; i<pWal->nWiData; i++){
+ sqlite3_free((void*)pWal->apWiData[i]);
+ pWal->apWiData[i] = 0;
+ }
+ pWal->bShmUnreliable = 0;
+ sqlite3WalEndReadTransaction(pWal);
+ *pChanged = 1;
+ }
+ return rc;
+}
+
+/*
+** Attempt to start a read transaction. This might fail due to a race or
+** other transient condition. When that happens, it returns WAL_RETRY to
+** indicate to the caller that it is safe to retry immediately.
+**
+** On success return SQLITE_OK. On a permanent failure (such an
+** I/O error or an SQLITE_BUSY because another process is running
+** recovery) return a positive error code.
+**
+** The useWal parameter is true to force the use of the WAL and disable
+** the case where the WAL is bypassed because it has been completely
+** checkpointed. If useWal==0 then this routine calls walIndexReadHdr()
+** to make a copy of the wal-index header into pWal->hdr. If the
+** wal-index header has changed, *pChanged is set to 1 (as an indication
+** to the caller that the local page cache is obsolete and needs to be
+** flushed.) When useWal==1, the wal-index header is assumed to already
+** be loaded and the pChanged parameter is unused.
+**
+** The caller must set the cnt parameter to the number of prior calls to
+** this routine during the current read attempt that returned WAL_RETRY.
+** This routine will start taking more aggressive measures to clear the
+** race conditions after multiple WAL_RETRY returns, and after an excessive
+** number of errors will ultimately return SQLITE_PROTOCOL. The
+** SQLITE_PROTOCOL return indicates that some other process has gone rogue
+** and is not honoring the locking protocol. There is a vanishingly small
+** chance that SQLITE_PROTOCOL could be returned because of a run of really
+** bad luck when there is lots of contention for the wal-index, but that
+** possibility is so small that it can be safely neglected, we believe.
+**
+** On success, this routine obtains a read lock on
+** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is
+** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1)
+** that means the Wal does not hold any read lock. The reader must not
+** access any database page that is modified by a WAL frame up to and
+** including frame number aReadMark[pWal->readLock]. The reader will
+** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0
+** Or if pWal->readLock==0, then the reader will ignore the WAL
+** completely and get all content directly from the database file.
+** If the useWal parameter is 1 then the WAL will never be ignored and
+** this routine will always set pWal->readLock>0 on success.
+** When the read transaction is completed, the caller must release the
+** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1.
+**
+** This routine uses the nBackfill and aReadMark[] fields of the header
+** to select a particular WAL_READ_LOCK() that strives to let the
+** checkpoint process do as much work as possible. This routine might
+** update values of the aReadMark[] array in the header, but if it does
+** so it takes care to hold an exclusive lock on the corresponding
+** WAL_READ_LOCK() while changing values.
+*/
+static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
+ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */
+ u32 mxReadMark; /* Largest aReadMark[] value */
+ int mxI; /* Index of largest aReadMark[] value */
+ int i; /* Loop counter */
+ int rc = SQLITE_OK; /* Return code */
+ u32 mxFrame; /* Wal frame to lock to */
+
+ assert( pWal->readLock<0 ); /* Not currently locked */
+
+ /* useWal may only be set for read/write connections */
+ assert( (pWal->readOnly & WAL_SHM_RDONLY)==0 || useWal==0 );
+
+ /* Take steps to avoid spinning forever if there is a protocol error.
+ **
+ ** Circumstances that cause a RETRY should only last for the briefest
+ ** instances of time. No I/O or other system calls are done while the
+ ** locks are held, so the locks should not be held for very long. But
+ ** if we are unlucky, another process that is holding a lock might get
+ ** paged out or take a page-fault that is time-consuming to resolve,
+ ** during the few nanoseconds that it is holding the lock. In that case,
+ ** it might take longer than normal for the lock to free.
+ **
+ ** After 5 RETRYs, we begin calling sqlite3OsSleep(). The first few
+ ** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this
+ ** is more of a scheduler yield than an actual delay. But on the 10th
+ ** an subsequent retries, the delays start becoming longer and longer,
+ ** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
+ ** The total delay time before giving up is less than 10 seconds.
+ */
+ if( cnt>5 ){
+ int nDelay = 1; /* Pause time in microseconds */
+ if( cnt>100 ){
+ VVA_ONLY( pWal->lockError = 1; )
+ return SQLITE_PROTOCOL;
+ }
+ if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+ sqlite3OsSleep(pWal->pVfs, nDelay);
+ }
+
+ if( !useWal ){
+ assert( rc==SQLITE_OK );
+ if( pWal->bShmUnreliable==0 ){
+ rc = walIndexReadHdr(pWal, pChanged);
+ }
+ if( rc==SQLITE_BUSY ){
+ /* If there is not a recovery running in another thread or process
+ ** then convert BUSY errors to WAL_RETRY. If recovery is known to
+ ** be running, convert BUSY to BUSY_RECOVERY. There is a race here
+ ** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY
+ ** would be technically correct. But the race is benign since with
+ ** WAL_RETRY this routine will be called again and will probably be
+ ** right on the second iteration.
+ */
+ if( pWal->apWiData[0]==0 ){
+ /* This branch is taken when the xShmMap() method returns SQLITE_BUSY.
+ ** We assume this is a transient condition, so return WAL_RETRY. The
+ ** xShmMap() implementation used by the default unix and win32 VFS
+ ** modules may return SQLITE_BUSY due to a race condition in the
+ ** code that determines whether or not the shared-memory region
+ ** must be zeroed before the requested page is returned.
+ */
+ rc = WAL_RETRY;
+ }else if( SQLITE_OK==(rc = walLockShared(pWal, WAL_RECOVER_LOCK)) ){
+ walUnlockShared(pWal, WAL_RECOVER_LOCK);
+ rc = WAL_RETRY;
+ }else if( rc==SQLITE_BUSY ){
+ rc = SQLITE_BUSY_RECOVERY;
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ else if( pWal->bShmUnreliable ){
+ return walBeginShmUnreliable(pWal, pChanged);
+ }
+ }
+
+ assert( pWal->nWiData>0 );
+ assert( pWal->apWiData[0]!=0 );
+ pInfo = walCkptInfo(pWal);
+ if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0)
+#endif
+ ){
+ /* The WAL has been completely backfilled (or it is empty).
+ ** and can be safely ignored.
+ */
+ rc = walLockShared(pWal, WAL_READ_LOCK(0));
+ walShmBarrier(pWal);
+ if( rc==SQLITE_OK ){
+ if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){
+ /* It is not safe to allow the reader to continue here if frames
+ ** may have been appended to the log before READ_LOCK(0) was obtained.
+ ** When holding READ_LOCK(0), the reader ignores the entire log file,
+ ** which implies that the database file contains a trustworthy
+ ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from
+ ** happening, this is usually correct.
+ **
+ ** However, if frames have been appended to the log (or if the log
+ ** is wrapped and written for that matter) before the READ_LOCK(0)
+ ** is obtained, that is not necessarily true. A checkpointer may
+ ** have started to backfill the appended frames but crashed before
+ ** it finished. Leaving a corrupt image in the database file.
+ */
+ walUnlockShared(pWal, WAL_READ_LOCK(0));
+ return WAL_RETRY;
+ }
+ pWal->readLock = 0;
+ return SQLITE_OK;
+ }else if( rc!=SQLITE_BUSY ){
+ return rc;
+ }
+ }
+
+ /* If we get this far, it means that the reader will want to use
+ ** the WAL to get at content from recent commits. The job now is
+ ** to select one of the aReadMark[] entries that is closest to
+ ** but not exceeding pWal->hdr.mxFrame and lock that entry.
+ */
+ mxReadMark = 0;
+ mxI = 0;
+ mxFrame = pWal->hdr.mxFrame;
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ if( pWal->pSnapshot && pWal->pSnapshot->mxFrame<mxFrame ){
+ mxFrame = pWal->pSnapshot->mxFrame;
+ }
+#endif
+ for(i=1; i<WAL_NREADER; i++){
+ u32 thisMark = AtomicLoad(pInfo->aReadMark+i);
+ if( mxReadMark<=thisMark && thisMark<=mxFrame ){
+ assert( thisMark!=READMARK_NOT_USED );
+ mxReadMark = thisMark;
+ mxI = i;
+ }
+ }
+ if( (pWal->readOnly & WAL_SHM_RDONLY)==0
+ && (mxReadMark<mxFrame || mxI==0)
+ ){
+ for(i=1; i<WAL_NREADER; i++){
+ rc = walLockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ if( rc==SQLITE_OK ){
+ AtomicStore(pInfo->aReadMark+i,mxFrame);
+ mxReadMark = mxFrame;
+ mxI = i;
+ walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ break;
+ }else if( rc!=SQLITE_BUSY ){
+ return rc;
+ }
+ }
+ }
+ if( mxI==0 ){
+ assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 );
+ return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT;
+ }
+
+ rc = walLockShared(pWal, WAL_READ_LOCK(mxI));
+ if( rc ){
+ return rc==SQLITE_BUSY ? WAL_RETRY : rc;
+ }
+ /* Now that the read-lock has been obtained, check that neither the
+ ** value in the aReadMark[] array or the contents of the wal-index
+ ** header have changed.
+ **
+ ** It is necessary to check that the wal-index header did not change
+ ** between the time it was read and when the shared-lock was obtained
+ ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility
+ ** that the log file may have been wrapped by a writer, or that frames
+ ** that occur later in the log than pWal->hdr.mxFrame may have been
+ ** copied into the database by a checkpointer. If either of these things
+ ** happened, then reading the database with the current value of
+ ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry
+ ** instead.
+ **
+ ** Before checking that the live wal-index header has not changed
+ ** since it was read, set Wal.minFrame to the first frame in the wal
+ ** file that has not yet been checkpointed. This client will not need
+ ** to read any frames earlier than minFrame from the wal file - they
+ ** can be safely read directly from the database file.
+ **
+ ** Because a ShmBarrier() call is made between taking the copy of
+ ** nBackfill and checking that the wal-header in shared-memory still
+ ** matches the one cached in pWal->hdr, it is guaranteed that the
+ ** checkpointer that set nBackfill was not working with a wal-index
+ ** header newer than that cached in pWal->hdr. If it were, that could
+ ** cause a problem. The checkpointer could omit to checkpoint
+ ** a version of page X that lies before pWal->minFrame (call that version
+ ** A) on the basis that there is a newer version (version B) of the same
+ ** page later in the wal file. But if version B happens to like past
+ ** frame pWal->hdr.mxFrame - then the client would incorrectly assume
+ ** that it can read version A from the database file. However, since
+ ** we can guarantee that the checkpointer that set nBackfill could not
+ ** see any pages past pWal->hdr.mxFrame, this problem does not come up.
+ */
+ pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1;
+ walShmBarrier(pWal);
+ if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark
+ || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))
+ ){
+ walUnlockShared(pWal, WAL_READ_LOCK(mxI));
+ return WAL_RETRY;
+ }else{
+ assert( mxReadMark<=pWal->hdr.mxFrame );
+ pWal->readLock = (i16)mxI;
+ }
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+/*
+** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted
+** variable so that older snapshots can be accessed. To do this, loop
+** through all wal frames from nBackfillAttempted to (nBackfill+1),
+** comparing their content to the corresponding page with the database
+** file, if any. Set nBackfillAttempted to the frame number of the
+** first frame for which the wal file content matches the db file.
+**
+** This is only really safe if the file-system is such that any page
+** writes made by earlier checkpointers were atomic operations, which
+** is not always true. It is also possible that nBackfillAttempted
+** may be left set to a value larger than expected, if a wal frame
+** contains content that duplicate of an earlier version of the same
+** page.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code if an
+** error occurs. It is not an error if nBackfillAttempted cannot be
+** decreased at all.
+*/
+SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
+ int rc;
+
+ assert( pWal->readLock>=0 );
+ rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ if( rc==SQLITE_OK ){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ int szPage = (int)pWal->szPage;
+ i64 szDb; /* Size of db file in bytes */
+
+ rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
+ if( rc==SQLITE_OK ){
+ void *pBuf1 = sqlite3_malloc(szPage);
+ void *pBuf2 = sqlite3_malloc(szPage);
+ if( pBuf1==0 || pBuf2==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ u32 i = pInfo->nBackfillAttempted;
+ for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){
+ WalHashLoc sLoc; /* Hash table location */
+ u32 pgno; /* Page number in db file */
+ i64 iDbOff; /* Offset of db file entry */
+ i64 iWalOff; /* Offset of wal file entry */
+
+ rc = walHashGet(pWal, walFramePage(i), &sLoc);
+ if( rc!=SQLITE_OK ) break;
+ pgno = sLoc.aPgno[i-sLoc.iZero];
+ iDbOff = (i64)(pgno-1) * szPage;
+
+ if( iDbOff+szPage<=szDb ){
+ iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
+ rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
+ }
+
+ if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
+ break;
+ }
+ }
+
+ pInfo->nBackfillAttempted = i-1;
+ }
+ }
+
+ sqlite3_free(pBuf1);
+ sqlite3_free(pBuf2);
+ }
+ walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ }
+
+ return rc;
+}
+#endif /* SQLITE_ENABLE_SNAPSHOT */
+
+/*
+** Begin a read transaction on the database.
+**
+** This routine used to be called sqlite3OpenSnapshot() and with good reason:
+** it takes a snapshot of the state of the WAL and wal-index for the current
+** instant in time. The current thread will continue to use this snapshot.
+** Other threads might append new content to the WAL and wal-index but
+** that extra content is ignored by the current thread.
+**
+** If the database contents have changes since the previous read
+** transaction, then *pChanged is set to 1 before returning. The
+** Pager layer will use this to know that its cache is stale and
+** needs to be flushed.
+*/
+SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
+ int rc; /* Return code */
+ int cnt = 0; /* Number of TryBeginRead attempts */
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ int bChanged = 0;
+ WalIndexHdr *pSnapshot = pWal->pSnapshot;
+#endif
+
+ assert( pWal->ckptLock==0 );
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ if( pSnapshot ){
+ if( memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){
+ bChanged = 1;
+ }
+
+ /* It is possible that there is a checkpointer thread running
+ ** concurrent with this code. If this is the case, it may be that the
+ ** checkpointer has already determined that it will checkpoint
+ ** snapshot X, where X is later in the wal file than pSnapshot, but
+ ** has not yet set the pInfo->nBackfillAttempted variable to indicate
+ ** its intent. To avoid the race condition this leads to, ensure that
+ ** there is no checkpointer process by taking a shared CKPT lock
+ ** before checking pInfo->nBackfillAttempted. */
+ (void)walEnableBlocking(pWal);
+ rc = walLockShared(pWal, WAL_CKPT_LOCK);
+ walDisableBlocking(pWal);
+
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pWal->ckptLock = 1;
+ }
+#endif
+
+ do{
+ rc = walTryBeginRead(pWal, pChanged, 0, ++cnt);
+ }while( rc==WAL_RETRY );
+ testcase( (rc&0xff)==SQLITE_BUSY );
+ testcase( (rc&0xff)==SQLITE_IOERR );
+ testcase( rc==SQLITE_PROTOCOL );
+ testcase( rc==SQLITE_OK );
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ if( rc==SQLITE_OK ){
+ if( pSnapshot && memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){
+ /* At this point the client has a lock on an aReadMark[] slot holding
+ ** a value equal to or smaller than pSnapshot->mxFrame, but pWal->hdr
+ ** is populated with the wal-index header corresponding to the head
+ ** of the wal file. Verify that pSnapshot is still valid before
+ ** continuing. Reasons why pSnapshot might no longer be valid:
+ **
+ ** (1) The WAL file has been reset since the snapshot was taken.
+ ** In this case, the salt will have changed.
+ **
+ ** (2) A checkpoint as been attempted that wrote frames past
+ ** pSnapshot->mxFrame into the database file. Note that the
+ ** checkpoint need not have completed for this to cause problems.
+ */
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+
+ assert( pWal->readLock>0 || pWal->hdr.mxFrame==0 );
+ assert( pInfo->aReadMark[pWal->readLock]<=pSnapshot->mxFrame );
+
+ /* Check that the wal file has not been wrapped. Assuming that it has
+ ** not, also check that no checkpointer has attempted to checkpoint any
+ ** frames beyond pSnapshot->mxFrame. If either of these conditions are
+ ** true, return SQLITE_ERROR_SNAPSHOT. Otherwise, overwrite pWal->hdr
+ ** with *pSnapshot and set *pChanged as appropriate for opening the
+ ** snapshot. */
+ if( !memcmp(pSnapshot->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
+ && pSnapshot->mxFrame>=pInfo->nBackfillAttempted
+ ){
+ assert( pWal->readLock>0 );
+ memcpy(&pWal->hdr, pSnapshot, sizeof(WalIndexHdr));
+ *pChanged = bChanged;
+ }else{
+ rc = SQLITE_ERROR_SNAPSHOT;
+ }
+
+ /* A client using a non-current snapshot may not ignore any frames
+ ** from the start of the wal file. This is because, for a system
+ ** where (minFrame < iSnapshot < maxFrame), a checkpointer may
+ ** have omitted to checkpoint a frame earlier than minFrame in
+ ** the file because there exists a frame after iSnapshot that
+ ** is the same database page. */
+ pWal->minFrame = 1;
+
+ if( rc!=SQLITE_OK ){
+ sqlite3WalEndReadTransaction(pWal);
+ }
+ }
+ }
+
+ /* Release the shared CKPT lock obtained above. */
+ if( pWal->ckptLock ){
+ assert( pSnapshot );
+ walUnlockShared(pWal, WAL_CKPT_LOCK);
+ pWal->ckptLock = 0;
+ }
+#endif
+ return rc;
+}
+
+/*
+** Finish with a read transaction. All this does is release the
+** read-lock.
+*/
+SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){
+ sqlite3WalEndWriteTransaction(pWal);
+ if( pWal->readLock>=0 ){
+ walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock));
+ pWal->readLock = -1;
+ }
+}
+
+/*
+** Search the wal file for page pgno. If found, set *piRead to the frame that
+** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
+** to zero.
+**
+** Return SQLITE_OK if successful, or an error code if an error occurs. If an
+** error does occur, the final value of *piRead is undefined.
+*/
+SQLITE_PRIVATE int sqlite3WalFindFrame(
+ Wal *pWal, /* WAL handle */
+ Pgno pgno, /* Database page number to read data for */
+ u32 *piRead /* OUT: Frame number (or zero) */
+){
+ u32 iRead = 0; /* If !=0, WAL frame to return data from */
+ u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */
+ int iHash; /* Used to loop through N hash tables */
+ int iMinHash;
+
+ /* This routine is only be called from within a read transaction. */
+ assert( pWal->readLock>=0 || pWal->lockError );
+
+ /* If the "last page" field of the wal-index header snapshot is 0, then
+ ** no data will be read from the wal under any circumstances. Return early
+ ** in this case as an optimization. Likewise, if pWal->readLock==0,
+ ** then the WAL is ignored by the reader so return early, as if the
+ ** WAL were empty.
+ */
+ if( iLast==0 || (pWal->readLock==0 && pWal->bShmUnreliable==0) ){
+ *piRead = 0;
+ return SQLITE_OK;
+ }
+
+ /* Search the hash table or tables for an entry matching page number
+ ** pgno. Each iteration of the following for() loop searches one
+ ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames).
+ **
+ ** This code might run concurrently to the code in walIndexAppend()
+ ** that adds entries to the wal-index (and possibly to this hash
+ ** table). This means the value just read from the hash
+ ** slot (aHash[iKey]) may have been added before or after the
+ ** current read transaction was opened. Values added after the
+ ** read transaction was opened may have been written incorrectly -
+ ** i.e. these slots may contain garbage data. However, we assume
+ ** that any slots written before the current read transaction was
+ ** opened remain unmodified.
+ **
+ ** For the reasons above, the if(...) condition featured in the inner
+ ** loop of the following block is more stringent that would be required
+ ** if we had exclusive access to the hash-table:
+ **
+ ** (aPgno[iFrame]==pgno):
+ ** This condition filters out normal hash-table collisions.
+ **
+ ** (iFrame<=iLast):
+ ** This condition filters out entries that were added to the hash
+ ** table after the current read-transaction had started.
+ */
+ iMinHash = walFramePage(pWal->minFrame);
+ for(iHash=walFramePage(iLast); iHash>=iMinHash; iHash--){
+ WalHashLoc sLoc; /* Hash table location */
+ int iKey; /* Hash slot index */
+ int nCollide; /* Number of hash collisions remaining */
+ int rc; /* Error code */
+ u32 iH;
+
+ rc = walHashGet(pWal, iHash, &sLoc);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ nCollide = HASHTABLE_NSLOT;
+ iKey = walHash(pgno);
+ while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){
+ u32 iFrame = iH + sLoc.iZero;
+ if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH]==pgno ){
+ assert( iFrame>iRead || CORRUPT_DB );
+ iRead = iFrame;
+ }
+ if( (nCollide--)==0 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ iKey = walNextHash(iKey);
+ }
+ if( iRead ) break;
+ }
+
+#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
+ /* If expensive assert() statements are available, do a linear search
+ ** of the wal-index file content. Make sure the results agree with the
+ ** result obtained using the hash indexes above. */
+ {
+ u32 iRead2 = 0;
+ u32 iTest;
+ assert( pWal->bShmUnreliable || pWal->minFrame>0 );
+ for(iTest=iLast; iTest>=pWal->minFrame && iTest>0; iTest--){
+ if( walFramePgno(pWal, iTest)==pgno ){
+ iRead2 = iTest;
+ break;
+ }
+ }
+ assert( iRead==iRead2 );
+ }
+#endif
+
+ *piRead = iRead;
+ return SQLITE_OK;
+}
+
+/*
+** Read the contents of frame iRead from the wal file into buffer pOut
+** (which is nOut bytes in size). Return SQLITE_OK if successful, or an
+** error code otherwise.
+*/
+SQLITE_PRIVATE int sqlite3WalReadFrame(
+ Wal *pWal, /* WAL handle */
+ u32 iRead, /* Frame to read */
+ int nOut, /* Size of buffer pOut in bytes */
+ u8 *pOut /* Buffer to write page data to */
+){
+ int sz;
+ i64 iOffset;
+ sz = pWal->hdr.szPage;
+ sz = (sz&0xfe00) + ((sz&0x0001)<<16);
+ testcase( sz<=32768 );
+ testcase( sz>=65536 );
+ iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE;
+ /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */
+ return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset);
+}
+
+/*
+** Return the size of the database in pages (or zero, if unknown).
+*/
+SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal){
+ if( pWal && ALWAYS(pWal->readLock>=0) ){
+ return pWal->hdr.nPage;
+ }
+ return 0;
+}
+
+
+/*
+** This function starts a write transaction on the WAL.
+**
+** A read transaction must have already been started by a prior call
+** to sqlite3WalBeginReadTransaction().
+**
+** If another thread or process has written into the database since
+** the read transaction was started, then it is not possible for this
+** thread to write as doing so would cause a fork. So this routine
+** returns SQLITE_BUSY in that case and no write transaction is started.
+**
+** There can only be a single writer active at a time.
+*/
+SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){
+ int rc;
+
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* If the write-lock is already held, then it was obtained before the
+ ** read-transaction was even opened, making this call a no-op.
+ ** Return early. */
+ if( pWal->writeLock ){
+ assert( !memcmp(&pWal->hdr,(void *)walIndexHdr(pWal),sizeof(WalIndexHdr)) );
+ return SQLITE_OK;
+ }
+#endif
+
+ /* Cannot start a write transaction without first holding a read
+ ** transaction. */
+ assert( pWal->readLock>=0 );
+ assert( pWal->writeLock==0 && pWal->iReCksum==0 );
+
+ if( pWal->readOnly ){
+ return SQLITE_READONLY;
+ }
+
+ /* Only one writer allowed at a time. Get the write lock. Return
+ ** SQLITE_BUSY if unable.
+ */
+ rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ if( rc ){
+ return rc;
+ }
+ pWal->writeLock = 1;
+
+ /* If another connection has written to the database file since the
+ ** time the read transaction on this connection was started, then
+ ** the write is disallowed.
+ */
+ if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){
+ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ pWal->writeLock = 0;
+ rc = SQLITE_BUSY_SNAPSHOT;
+ }
+
+ return rc;
+}
+
+/*
+** End a write transaction. The commit has already been done. This
+** routine merely releases the lock.
+*/
+SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal){
+ if( pWal->writeLock ){
+ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
+ pWal->writeLock = 0;
+ pWal->iReCksum = 0;
+ pWal->truncateOnCommit = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** If any data has been written (but not committed) to the log file, this
+** function moves the write-pointer back to the start of the transaction.
+**
+** Additionally, the callback function is invoked for each frame written
+** to the WAL since the start of the transaction. If the callback returns
+** other than SQLITE_OK, it is not invoked again and the error code is
+** returned to the caller.
+**
+** Otherwise, if the callback function does not return an error, this
+** function returns SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){
+ int rc = SQLITE_OK;
+ if( ALWAYS(pWal->writeLock) ){
+ Pgno iMax = pWal->hdr.mxFrame;
+ Pgno iFrame;
+
+ /* Restore the clients cache of the wal-index header to the state it
+ ** was in before the client began writing to the database.
+ */
+ memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr));
+
+ for(iFrame=pWal->hdr.mxFrame+1;
+ ALWAYS(rc==SQLITE_OK) && iFrame<=iMax;
+ iFrame++
+ ){
+ /* This call cannot fail. Unless the page for which the page number
+ ** is passed as the second argument is (a) in the cache and
+ ** (b) has an outstanding reference, then xUndo is either a no-op
+ ** (if (a) is false) or simply expels the page from the cache (if (b)
+ ** is false).
+ **
+ ** If the upper layer is doing a rollback, it is guaranteed that there
+ ** are no outstanding references to any page other than page 1. And
+ ** page 1 is never written to the log until the transaction is
+ ** committed. As a result, the call to xUndo may not fail.
+ */
+ assert( walFramePgno(pWal, iFrame)!=1 );
+ rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame));
+ }
+ if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
+ }
+ return rc;
+}
+
+/*
+** Argument aWalData must point to an array of WAL_SAVEPOINT_NDATA u32
+** values. This function populates the array with values required to
+** "rollback" the write position of the WAL handle back to the current
+** point in the event of a savepoint rollback (via WalSavepointUndo()).
+*/
+SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){
+ assert( pWal->writeLock );
+ aWalData[0] = pWal->hdr.mxFrame;
+ aWalData[1] = pWal->hdr.aFrameCksum[0];
+ aWalData[2] = pWal->hdr.aFrameCksum[1];
+ aWalData[3] = pWal->nCkpt;
+}
+
+/*
+** Move the write position of the WAL back to the point identified by
+** the values in the aWalData[] array. aWalData must point to an array
+** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated
+** by a call to WalSavepoint().
+*/
+SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){
+ int rc = SQLITE_OK;
+
+ assert( pWal->writeLock );
+ assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame );
+
+ if( aWalData[3]!=pWal->nCkpt ){
+ /* This savepoint was opened immediately after the write-transaction
+ ** was started. Right after that, the writer decided to wrap around
+ ** to the start of the log. Update the savepoint values to match.
+ */
+ aWalData[0] = 0;
+ aWalData[3] = pWal->nCkpt;
+ }
+
+ if( aWalData[0]<pWal->hdr.mxFrame ){
+ pWal->hdr.mxFrame = aWalData[0];
+ pWal->hdr.aFrameCksum[0] = aWalData[1];
+ pWal->hdr.aFrameCksum[1] = aWalData[2];
+ walCleanupHash(pWal);
+ }
+
+ return rc;
+}
+
+/*
+** This function is called just before writing a set of frames to the log
+** file (see sqlite3WalFrames()). It checks to see if, instead of appending
+** to the current log file, it is possible to overwrite the start of the
+** existing log file with the new frames (i.e. "reset" the log). If so,
+** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left
+** unchanged.
+**
+** SQLITE_OK is returned if no error is encountered (regardless of whether
+** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned
+** if an error occurs.
+*/
+static int walRestartLog(Wal *pWal){
+ int rc = SQLITE_OK;
+ int cnt;
+
+ if( pWal->readLock==0 ){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ assert( pInfo->nBackfill==pWal->hdr.mxFrame );
+ if( pInfo->nBackfill>0 ){
+ u32 salt1;
+ sqlite3_randomness(4, &salt1);
+ rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
+ if( rc==SQLITE_OK ){
+ /* If all readers are using WAL_READ_LOCK(0) (in other words if no
+ ** readers are currently using the WAL), then the transactions
+ ** frames will overwrite the start of the existing log. Update the
+ ** wal-index header to reflect this.
+ **
+ ** In theory it would be Ok to update the cache of the header only
+ ** at this point. But updating the actual wal-index header is also
+ ** safe and means there is no special case for sqlite3WalUndo()
+ ** to handle if this transaction is rolled back. */
+ walRestartHdr(pWal, salt1);
+ walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
+ }else if( rc!=SQLITE_BUSY ){
+ return rc;
+ }
+ }
+ walUnlockShared(pWal, WAL_READ_LOCK(0));
+ pWal->readLock = -1;
+ cnt = 0;
+ do{
+ int notUsed;
+ rc = walTryBeginRead(pWal, &notUsed, 1, ++cnt);
+ }while( rc==WAL_RETRY );
+ assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */
+ testcase( (rc&0xff)==SQLITE_IOERR );
+ testcase( rc==SQLITE_PROTOCOL );
+ testcase( rc==SQLITE_OK );
+ }
+ return rc;
+}
+
+/*
+** Information about the current state of the WAL file and where
+** the next fsync should occur - passed from sqlite3WalFrames() into
+** walWriteToLog().
+*/
+typedef struct WalWriter {
+ Wal *pWal; /* The complete WAL information */
+ sqlite3_file *pFd; /* The WAL file to which we write */
+ sqlite3_int64 iSyncPoint; /* Fsync at this offset */
+ int syncFlags; /* Flags for the fsync */
+ int szPage; /* Size of one page */
+} WalWriter;
+
+/*
+** Write iAmt bytes of content into the WAL file beginning at iOffset.
+** Do a sync when crossing the p->iSyncPoint boundary.
+**
+** In other words, if iSyncPoint is in between iOffset and iOffset+iAmt,
+** first write the part before iSyncPoint, then sync, then write the
+** rest.
+*/
+static int walWriteToLog(
+ WalWriter *p, /* WAL to write to */
+ void *pContent, /* Content to be written */
+ int iAmt, /* Number of bytes to write */
+ sqlite3_int64 iOffset /* Start writing at this offset */
+){
+ int rc;
+ if( iOffset<p->iSyncPoint && iOffset+iAmt>=p->iSyncPoint ){
+ int iFirstAmt = (int)(p->iSyncPoint - iOffset);
+ rc = sqlite3OsWrite(p->pFd, pContent, iFirstAmt, iOffset);
+ if( rc ) return rc;
+ iOffset += iFirstAmt;
+ iAmt -= iFirstAmt;
+ pContent = (void*)(iFirstAmt + (char*)pContent);
+ assert( WAL_SYNC_FLAGS(p->syncFlags)!=0 );
+ rc = sqlite3OsSync(p->pFd, WAL_SYNC_FLAGS(p->syncFlags));
+ if( iAmt==0 || rc ) return rc;
+ }
+ rc = sqlite3OsWrite(p->pFd, pContent, iAmt, iOffset);
+ return rc;
+}
+
+/*
+** Write out a single frame of the WAL
+*/
+static int walWriteOneFrame(
+ WalWriter *p, /* Where to write the frame */
+ PgHdr *pPage, /* The page of the frame to be written */
+ int nTruncate, /* The commit flag. Usually 0. >0 for commit */
+ sqlite3_int64 iOffset /* Byte offset at which to write */
+){
+ int rc; /* Result code from subfunctions */
+ void *pData; /* Data actually written */
+ u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */
+ pData = pPage->pData;
+ walEncodeFrame(p->pWal, pPage->pgno, nTruncate, pData, aFrame);
+ rc = walWriteToLog(p, aFrame, sizeof(aFrame), iOffset);
+ if( rc ) return rc;
+ /* Write the page data */
+ rc = walWriteToLog(p, pData, p->szPage, iOffset+sizeof(aFrame));
+ return rc;
+}
+
+/*
+** This function is called as part of committing a transaction within which
+** one or more frames have been overwritten. It updates the checksums for
+** all frames written to the wal file by the current transaction starting
+** with the earliest to have been overwritten.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+static int walRewriteChecksums(Wal *pWal, u32 iLast){
+ const int szPage = pWal->szPage;/* Database page size */
+ int rc = SQLITE_OK; /* Return code */
+ u8 *aBuf; /* Buffer to load data from wal file into */
+ u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-headers in */
+ u32 iRead; /* Next frame to read from wal file */
+ i64 iCksumOff;
+
+ aBuf = sqlite3_malloc(szPage + WAL_FRAME_HDRSIZE);
+ if( aBuf==0 ) return SQLITE_NOMEM_BKPT;
+
+ /* Find the checksum values to use as input for the recalculating the
+ ** first checksum. If the first frame is frame 1 (implying that the current
+ ** transaction restarted the wal file), these values must be read from the
+ ** wal-file header. Otherwise, read them from the frame header of the
+ ** previous frame. */
+ assert( pWal->iReCksum>0 );
+ if( pWal->iReCksum==1 ){
+ iCksumOff = 24;
+ }else{
+ iCksumOff = walFrameOffset(pWal->iReCksum-1, szPage) + 16;
+ }
+ rc = sqlite3OsRead(pWal->pWalFd, aBuf, sizeof(u32)*2, iCksumOff);
+ pWal->hdr.aFrameCksum[0] = sqlite3Get4byte(aBuf);
+ pWal->hdr.aFrameCksum[1] = sqlite3Get4byte(&aBuf[sizeof(u32)]);
+
+ iRead = pWal->iReCksum;
+ pWal->iReCksum = 0;
+ for(; rc==SQLITE_OK && iRead<=iLast; iRead++){
+ i64 iOff = walFrameOffset(iRead, szPage);
+ rc = sqlite3OsRead(pWal->pWalFd, aBuf, szPage+WAL_FRAME_HDRSIZE, iOff);
+ if( rc==SQLITE_OK ){
+ u32 iPgno, nDbSize;
+ iPgno = sqlite3Get4byte(aBuf);
+ nDbSize = sqlite3Get4byte(&aBuf[4]);
+
+ walEncodeFrame(pWal, iPgno, nDbSize, &aBuf[WAL_FRAME_HDRSIZE], aFrame);
+ rc = sqlite3OsWrite(pWal->pWalFd, aFrame, sizeof(aFrame), iOff);
+ }
+ }
+
+ sqlite3_free(aBuf);
+ return rc;
+}
+
+/*
+** Write a set of frames to the log. The caller must hold the write-lock
+** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
+*/
+SQLITE_PRIVATE int sqlite3WalFrames(
+ Wal *pWal, /* Wal handle to write to */
+ int szPage, /* Database page-size in bytes */
+ PgHdr *pList, /* List of dirty pages to write */
+ Pgno nTruncate, /* Database size after this commit */
+ int isCommit, /* True if this is a commit */
+ int sync_flags /* Flags to pass to OsSync() (or 0) */
+){
+ int rc; /* Used to catch return codes */
+ u32 iFrame; /* Next frame address */
+ PgHdr *p; /* Iterator to run through pList with. */
+ PgHdr *pLast = 0; /* Last frame in list */
+ int nExtra = 0; /* Number of extra copies of last page */
+ int szFrame; /* The size of a single frame */
+ i64 iOffset; /* Next byte to write in WAL file */
+ WalWriter w; /* The writer */
+ u32 iFirst = 0; /* First frame that may be overwritten */
+ WalIndexHdr *pLive; /* Pointer to shared header */
+
+ assert( pList );
+ assert( pWal->writeLock );
+
+ /* If this frame set completes a transaction, then nTruncate>0. If
+ ** nTruncate==0 then this frame set does not complete the transaction. */
+ assert( (isCommit!=0)==(nTruncate!=0) );
+
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+ { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){}
+ WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n",
+ pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill"));
+ }
+#endif
+
+ pLive = (WalIndexHdr*)walIndexHdr(pWal);
+ if( memcmp(&pWal->hdr, (void *)pLive, sizeof(WalIndexHdr))!=0 ){
+ iFirst = pLive->mxFrame+1;
+ }
+
+ /* See if it is possible to write these frames into the start of the
+ ** log file, instead of appending to it at pWal->hdr.mxFrame.
+ */
+ if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){
+ return rc;
+ }
+
+ /* If this is the first frame written into the log, write the WAL
+ ** header to the start of the WAL file. See comments at the top of
+ ** this source file for a description of the WAL header format.
+ */
+ iFrame = pWal->hdr.mxFrame;
+ if( iFrame==0 ){
+ u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */
+ u32 aCksum[2]; /* Checksum for wal-header */
+
+ sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN));
+ sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION);
+ sqlite3Put4byte(&aWalHdr[8], szPage);
+ sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt);
+ if( pWal->nCkpt==0 ) sqlite3_randomness(8, pWal->hdr.aSalt);
+ memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8);
+ walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum);
+ sqlite3Put4byte(&aWalHdr[24], aCksum[0]);
+ sqlite3Put4byte(&aWalHdr[28], aCksum[1]);
+
+ pWal->szPage = szPage;
+ pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN;
+ pWal->hdr.aFrameCksum[0] = aCksum[0];
+ pWal->hdr.aFrameCksum[1] = aCksum[1];
+ pWal->truncateOnCommit = 1;
+
+ rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0);
+ WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok"));
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Sync the header (unless SQLITE_IOCAP_SEQUENTIAL is true or unless
+ ** all syncing is turned off by PRAGMA synchronous=OFF). Otherwise
+ ** an out-of-order write following a WAL restart could result in
+ ** database corruption. See the ticket:
+ **
+ ** https://sqlite.org/src/info/ff5be73dee
+ */
+ if( pWal->syncHeader ){
+ rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));
+ if( rc ) return rc;
+ }
+ }
+ assert( (int)pWal->szPage==szPage );
+
+ /* Setup information needed to write frames into the WAL */
+ w.pWal = pWal;
+ w.pFd = pWal->pWalFd;
+ w.iSyncPoint = 0;
+ w.syncFlags = sync_flags;
+ w.szPage = szPage;
+ iOffset = walFrameOffset(iFrame+1, szPage);
+ szFrame = szPage + WAL_FRAME_HDRSIZE;
+
+ /* Write all frames into the log file exactly once */
+ for(p=pList; p; p=p->pDirty){
+ int nDbSize; /* 0 normally. Positive == commit flag */
+
+ /* Check if this page has already been written into the wal file by
+ ** the current transaction. If so, overwrite the existing frame and
+ ** set Wal.writeLock to WAL_WRITELOCK_RECKSUM - indicating that
+ ** checksums must be recomputed when the transaction is committed. */
+ if( iFirst && (p->pDirty || isCommit==0) ){
+ u32 iWrite = 0;
+ VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite);
+ assert( rc==SQLITE_OK || iWrite==0 );
+ if( iWrite>=iFirst ){
+ i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE;
+ void *pData;
+ if( pWal->iReCksum==0 || iWrite<pWal->iReCksum ){
+ pWal->iReCksum = iWrite;
+ }
+ pData = p->pData;
+ rc = sqlite3OsWrite(pWal->pWalFd, pData, szPage, iOff);
+ if( rc ) return rc;
+ p->flags &= ~PGHDR_WAL_APPEND;
+ continue;
+ }
+ }
+
+ iFrame++;
+ assert( iOffset==walFrameOffset(iFrame, szPage) );
+ nDbSize = (isCommit && p->pDirty==0) ? nTruncate : 0;
+ rc = walWriteOneFrame(&w, p, nDbSize, iOffset);
+ if( rc ) return rc;
+ pLast = p;
+ iOffset += szFrame;
+ p->flags |= PGHDR_WAL_APPEND;
+ }
+
+ /* Recalculate checksums within the wal file if required. */
+ if( isCommit && pWal->iReCksum ){
+ rc = walRewriteChecksums(pWal, iFrame);
+ if( rc ) return rc;
+ }
+
+ /* If this is the end of a transaction, then we might need to pad
+ ** the transaction and/or sync the WAL file.
+ **
+ ** Padding and syncing only occur if this set of frames complete a
+ ** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL
+ ** or synchronous==OFF, then no padding or syncing are needed.
+ **
+ ** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not
+ ** needed and only the sync is done. If padding is needed, then the
+ ** final frame is repeated (with its commit mark) until the next sector
+ ** boundary is crossed. Only the part of the WAL prior to the last
+ ** sector boundary is synced; the part of the last frame that extends
+ ** past the sector boundary is written after the sync.
+ */
+ if( isCommit && WAL_SYNC_FLAGS(sync_flags)!=0 ){
+ int bSync = 1;
+ if( pWal->padToSectorBoundary ){
+ int sectorSize = sqlite3SectorSize(pWal->pWalFd);
+ w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize;
+ bSync = (w.iSyncPoint==iOffset);
+ testcase( bSync );
+ while( iOffset<w.iSyncPoint ){
+ rc = walWriteOneFrame(&w, pLast, nTruncate, iOffset);
+ if( rc ) return rc;
+ iOffset += szFrame;
+ nExtra++;
+ assert( pLast!=0 );
+ }
+ }
+ if( bSync ){
+ assert( rc==SQLITE_OK );
+ rc = sqlite3OsSync(w.pFd, WAL_SYNC_FLAGS(sync_flags));
+ }
+ }
+
+ /* If this frame set completes the first transaction in the WAL and
+ ** if PRAGMA journal_size_limit is set, then truncate the WAL to the
+ ** journal size limit, if possible.
+ */
+ if( isCommit && pWal->truncateOnCommit && pWal->mxWalSize>=0 ){
+ i64 sz = pWal->mxWalSize;
+ if( walFrameOffset(iFrame+nExtra+1, szPage)>pWal->mxWalSize ){
+ sz = walFrameOffset(iFrame+nExtra+1, szPage);
+ }
+ walLimitSize(pWal, sz);
+ pWal->truncateOnCommit = 0;
+ }
+
+ /* Append data to the wal-index. It is not necessary to lock the
+ ** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index
+ ** guarantees that there are no other writers, and no data that may
+ ** be in use by existing readers is being overwritten.
+ */
+ iFrame = pWal->hdr.mxFrame;
+ for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){
+ if( (p->flags & PGHDR_WAL_APPEND)==0 ) continue;
+ iFrame++;
+ rc = walIndexAppend(pWal, iFrame, p->pgno);
+ }
+ assert( pLast!=0 || nExtra==0 );
+ while( rc==SQLITE_OK && nExtra>0 ){
+ iFrame++;
+ nExtra--;
+ rc = walIndexAppend(pWal, iFrame, pLast->pgno);
+ }
+
+ if( rc==SQLITE_OK ){
+ /* Update the private copy of the header. */
+ pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16));
+ testcase( szPage<=32768 );
+ testcase( szPage>=65536 );
+ pWal->hdr.mxFrame = iFrame;
+ if( isCommit ){
+ pWal->hdr.iChange++;
+ pWal->hdr.nPage = nTruncate;
+ }
+ /* If this is a commit, update the wal-index header too. */
+ if( isCommit ){
+ walIndexWriteHdr(pWal);
+ pWal->iCallback = iFrame;
+ }
+ }
+
+ WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok"));
+ return rc;
+}
+
+/*
+** This routine is called to implement sqlite3_wal_checkpoint() and
+** related interfaces.
+**
+** Obtain a CHECKPOINT lock and then backfill as much information as
+** we can from WAL into the database.
+**
+** If parameter xBusy is not NULL, it is a pointer to a busy-handler
+** callback. In this case this function runs a blocking checkpoint.
+*/
+SQLITE_PRIVATE int sqlite3WalCheckpoint(
+ Wal *pWal, /* Wal connection */
+ sqlite3 *db, /* Check this handle's interrupt flag */
+ int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */
+ int (*xBusy)(void*), /* Function to call when busy */
+ void *pBusyArg, /* Context argument for xBusyHandler */
+ int sync_flags, /* Flags to sync db file with (or 0) */
+ int nBuf, /* Size of temporary buffer */
+ u8 *zBuf, /* Temporary buffer to use */
+ int *pnLog, /* OUT: Number of frames in WAL */
+ int *pnCkpt /* OUT: Number of backfilled frames in WAL */
+){
+ int rc; /* Return code */
+ int isChanged = 0; /* True if a new wal-index header is loaded */
+ int eMode2 = eMode; /* Mode to pass to walCheckpoint() */
+ int (*xBusy2)(void*) = xBusy; /* Busy handler for eMode2 */
+
+ assert( pWal->ckptLock==0 );
+ assert( pWal->writeLock==0 );
+
+ /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked
+ ** in the SQLITE_CHECKPOINT_PASSIVE mode. */
+ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 );
+
+ if( pWal->readOnly ) return SQLITE_READONLY;
+ WALTRACE(("WAL%p: checkpoint begins\n", pWal));
+
+ /* Enable blocking locks, if possible. If blocking locks are successfully
+ ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */
+ sqlite3WalDb(pWal, db);
+ (void)walEnableBlocking(pWal);
+
+ /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
+ ** "checkpoint" lock on the database file.
+ ** EVIDENCE-OF: R-10421-19736 If any other process is running a
+ ** checkpoint operation at the same time, the lock cannot be obtained and
+ ** SQLITE_BUSY is returned.
+ ** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured,
+ ** it will not be invoked in this case.
+ */
+ rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ testcase( rc==SQLITE_BUSY );
+ testcase( rc!=SQLITE_OK && xBusy2!=0 );
+ if( rc==SQLITE_OK ){
+ pWal->ckptLock = 1;
+
+ /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and
+ ** TRUNCATE modes also obtain the exclusive "writer" lock on the database
+ ** file.
+ **
+ ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained
+ ** immediately, and a busy-handler is configured, it is invoked and the
+ ** writer lock retried until either the busy-handler returns 0 or the
+ ** lock is successfully obtained.
+ */
+ if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){
+ rc = walBusyLock(pWal, xBusy2, pBusyArg, WAL_WRITE_LOCK, 1);
+ if( rc==SQLITE_OK ){
+ pWal->writeLock = 1;
+ }else if( rc==SQLITE_BUSY ){
+ eMode2 = SQLITE_CHECKPOINT_PASSIVE;
+ xBusy2 = 0;
+ rc = SQLITE_OK;
+ }
+ }
+ }
+
+
+ /* Read the wal-index header. */
+ if( rc==SQLITE_OK ){
+ walDisableBlocking(pWal);
+ rc = walIndexReadHdr(pWal, &isChanged);
+ (void)walEnableBlocking(pWal);
+ if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
+ sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ }
+ }
+
+ /* Copy data from the log to the database file. */
+ if( rc==SQLITE_OK ){
+
+ if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
+ }
+
+ /* If no error occurred, set the output variables. */
+ if( rc==SQLITE_OK || rc==SQLITE_BUSY ){
+ if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame;
+ if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill);
+ }
+ }
+
+ if( isChanged ){
+ /* If a new wal-index header was loaded before the checkpoint was
+ ** performed, then the pager-cache associated with pWal is now
+ ** out of date. So zero the cached wal-index header to ensure that
+ ** next time the pager opens a snapshot on this database it knows that
+ ** the cache needs to be reset.
+ */
+ memset(&pWal->hdr, 0, sizeof(WalIndexHdr));
+ }
+
+ walDisableBlocking(pWal);
+ sqlite3WalDb(pWal, 0);
+
+ /* Release the locks. */
+ sqlite3WalEndWriteTransaction(pWal);
+ if( pWal->ckptLock ){
+ walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ pWal->ckptLock = 0;
+ }
+ WALTRACE(("WAL%p: checkpoint %s\n", pWal, rc ? "failed" : "ok"));
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ) rc = SQLITE_BUSY;
+#endif
+ return (rc==SQLITE_OK && eMode!=eMode2 ? SQLITE_BUSY : rc);
+}
+
+/* Return the value to pass to a sqlite3_wal_hook callback, the
+** number of frames in the WAL at the point of the last commit since
+** sqlite3WalCallback() was called. If no commits have occurred since
+** the last call, then return 0.
+*/
+SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal){
+ u32 ret = 0;
+ if( pWal ){
+ ret = pWal->iCallback;
+ pWal->iCallback = 0;
+ }
+ return (int)ret;
+}
+
+/*
+** This function is called to change the WAL subsystem into or out
+** of locking_mode=EXCLUSIVE.
+**
+** If op is zero, then attempt to change from locking_mode=EXCLUSIVE
+** into locking_mode=NORMAL. This means that we must acquire a lock
+** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL
+** or if the acquisition of the lock fails, then return 0. If the
+** transition out of exclusive-mode is successful, return 1. This
+** operation must occur while the pager is still holding the exclusive
+** lock on the main database file.
+**
+** If op is one, then change from locking_mode=NORMAL into
+** locking_mode=EXCLUSIVE. This means that the pWal->readLock must
+** be released. Return 1 if the transition is made and 0 if the
+** WAL is already in exclusive-locking mode - meaning that this
+** routine is a no-op. The pager must already hold the exclusive lock
+** on the main database file before invoking this operation.
+**
+** If op is negative, then do a dry-run of the op==1 case but do
+** not actually change anything. The pager uses this to see if it
+** should acquire the database exclusive lock prior to invoking
+** the op==1 case.
+*/
+SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){
+ int rc;
+ assert( pWal->writeLock==0 );
+ assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 );
+
+ /* pWal->readLock is usually set, but might be -1 if there was a
+ ** prior error while attempting to acquire are read-lock. This cannot
+ ** happen if the connection is actually in exclusive mode (as no xShmLock
+ ** locks are taken in this case). Nor should the pager attempt to
+ ** upgrade to exclusive-mode following such an error.
+ */
+ assert( pWal->readLock>=0 || pWal->lockError );
+ assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) );
+
+ if( op==0 ){
+ if( pWal->exclusiveMode!=WAL_NORMAL_MODE ){
+ pWal->exclusiveMode = WAL_NORMAL_MODE;
+ if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){
+ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE;
+ }
+ rc = pWal->exclusiveMode==WAL_NORMAL_MODE;
+ }else{
+ /* Already in locking_mode=NORMAL */
+ rc = 0;
+ }
+ }else if( op>0 ){
+ assert( pWal->exclusiveMode==WAL_NORMAL_MODE );
+ assert( pWal->readLock>=0 );
+ walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock));
+ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE;
+ rc = 1;
+ }else{
+ rc = pWal->exclusiveMode==WAL_NORMAL_MODE;
+ }
+ return rc;
+}
+
+/*
+** Return true if the argument is non-NULL and the WAL module is using
+** heap-memory for the wal-index. Otherwise, if the argument is NULL or the
+** WAL module is using shared-memory, return false.
+*/
+SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){
+ return (pWal && pWal->exclusiveMode==WAL_HEAPMEMORY_MODE );
+}
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+/* Create a snapshot object. The content of a snapshot is opaque to
+** every other subsystem, so the WAL module can put whatever it needs
+** in the object.
+*/
+SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot){
+ int rc = SQLITE_OK;
+ WalIndexHdr *pRet;
+ static const u32 aZero[4] = { 0, 0, 0, 0 };
+
+ assert( pWal->readLock>=0 && pWal->writeLock==0 );
+
+ if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,16)==0 ){
+ *ppSnapshot = 0;
+ return SQLITE_ERROR;
+ }
+ pRet = (WalIndexHdr*)sqlite3_malloc(sizeof(WalIndexHdr));
+ if( pRet==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ memcpy(pRet, &pWal->hdr, sizeof(WalIndexHdr));
+ *ppSnapshot = (sqlite3_snapshot*)pRet;
+ }
+
+ return rc;
+}
+
+/* Try to open on pSnapshot when the next read-transaction starts
+*/
+SQLITE_PRIVATE void sqlite3WalSnapshotOpen(
+ Wal *pWal,
+ sqlite3_snapshot *pSnapshot
+){
+ pWal->pSnapshot = (WalIndexHdr*)pSnapshot;
+}
+
+/*
+** Return a +ve value if snapshot p1 is newer than p2. A -ve value if
+** p1 is older than p2 and zero if p1 and p2 are the same snapshot.
+*/
+SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
+ WalIndexHdr *pHdr1 = (WalIndexHdr*)p1;
+ WalIndexHdr *pHdr2 = (WalIndexHdr*)p2;
+
+ /* aSalt[0] is a copy of the value stored in the wal file header. It
+ ** is incremented each time the wal file is restarted. */
+ if( pHdr1->aSalt[0]<pHdr2->aSalt[0] ) return -1;
+ if( pHdr1->aSalt[0]>pHdr2->aSalt[0] ) return +1;
+ if( pHdr1->mxFrame<pHdr2->mxFrame ) return -1;
+ if( pHdr1->mxFrame>pHdr2->mxFrame ) return +1;
+ return 0;
+}
+
+/*
+** The caller currently has a read transaction open on the database.
+** This function takes a SHARED lock on the CHECKPOINTER slot and then
+** checks if the snapshot passed as the second argument is still
+** available. If so, SQLITE_OK is returned.
+**
+** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
+** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
+** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
+** lock is released before returning.
+*/
+SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){
+ int rc;
+ rc = walLockShared(pWal, WAL_CKPT_LOCK);
+ if( rc==SQLITE_OK ){
+ WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
+ if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
+ || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted
+ ){
+ rc = SQLITE_ERROR_SNAPSHOT;
+ walUnlockShared(pWal, WAL_CKPT_LOCK);
+ }
+ }
+ return rc;
+}
+
+/*
+** Release a lock obtained by an earlier successful call to
+** sqlite3WalSnapshotCheck().
+*/
+SQLITE_PRIVATE void sqlite3WalSnapshotUnlock(Wal *pWal){
+ assert( pWal );
+ walUnlockShared(pWal, WAL_CKPT_LOCK);
+}
+
+
+#endif /* SQLITE_ENABLE_SNAPSHOT */
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+/*
+** If the argument is not NULL, it points to a Wal object that holds a
+** read-lock. This function returns the database page-size if it is known,
+** or zero if it is not (or if pWal is NULL).
+*/
+SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){
+ assert( pWal==0 || pWal->readLock>=0 );
+ return (pWal ? pWal->szPage : 0);
+}
+#endif
+
+/* Return the sqlite3_file object for the WAL file
+*/
+SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){
+ return pWal->pWalFd;
+}
+
+#endif /* #ifndef SQLITE_OMIT_WAL */
+
+/************** End of wal.c *************************************************/
+/************** Begin file btmutex.c *****************************************/
+/*
+** 2007 August 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code used to implement mutexes on Btree objects.
+** This code really belongs in btree.c. But btree.c is getting too
+** big and we want to break it down some. This packaged seemed like
+** a good breakout.
+*/
+/************** Include btreeInt.h in the middle of btmutex.c ****************/
+/************** Begin file btreeInt.h ****************************************/
+/*
+** 2004 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements an external (disk-based) database using BTrees.
+** For a detailed discussion of BTrees, refer to
+**
+** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3:
+** "Sorting And Searching", pages 473-480. Addison-Wesley
+** Publishing Company, Reading, Massachusetts.
+**
+** The basic idea is that each page of the file contains N database
+** entries and N+1 pointers to subpages.
+**
+** ----------------------------------------------------------------
+** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N-1) | Ptr(N) |
+** ----------------------------------------------------------------
+**
+** All of the keys on the page that Ptr(0) points to have values less
+** than Key(0). All of the keys on page Ptr(1) and its subpages have
+** values greater than Key(0) and less than Key(1). All of the keys
+** on Ptr(N) and its subpages have values greater than Key(N-1). And
+** so forth.
+**
+** Finding a particular key requires reading O(log(M)) pages from the
+** disk where M is the number of entries in the tree.
+**
+** In this implementation, a single file can hold one or more separate
+** BTrees. Each BTree is identified by the index of its root page. The
+** key and data for any entry are combined to form the "payload". A
+** fixed amount of payload can be carried directly on the database
+** page. If the payload is larger than the preset amount then surplus
+** bytes are stored on overflow pages. The payload for an entry
+** and the preceding pointer are combined to form a "Cell". Each
+** page has a small header which contains the Ptr(N) pointer and other
+** information such as the size of key and data.
+**
+** FORMAT DETAILS
+**
+** The file is divided into pages. The first page is called page 1,
+** the second is page 2, and so forth. A page number of zero indicates
+** "no such page". The page size can be any power of 2 between 512 and 65536.
+** Each page can be either a btree page, a freelist page, an overflow
+** page, or a pointer-map page.
+**
+** The first page is always a btree page. The first 100 bytes of the first
+** page contain a special header (the "file header") that describes the file.
+** The format of the file header is as follows:
+**
+** OFFSET SIZE DESCRIPTION
+** 0 16 Header string: "SQLite format 3\000"
+** 16 2 Page size in bytes. (1 means 65536)
+** 18 1 File format write version
+** 19 1 File format read version
+** 20 1 Bytes of unused space at the end of each page
+** 21 1 Max embedded payload fraction (must be 64)
+** 22 1 Min embedded payload fraction (must be 32)
+** 23 1 Min leaf payload fraction (must be 32)
+** 24 4 File change counter
+** 28 4 Reserved for future use
+** 32 4 First freelist page
+** 36 4 Number of freelist pages in the file
+** 40 60 15 4-byte meta values passed to higher layers
+**
+** 40 4 Schema cookie
+** 44 4 File format of schema layer
+** 48 4 Size of page cache
+** 52 4 Largest root-page (auto/incr_vacuum)
+** 56 4 1=UTF-8 2=UTF16le 3=UTF16be
+** 60 4 User version
+** 64 4 Incremental vacuum mode
+** 68 4 Application-ID
+** 72 20 unused
+** 92 4 The version-valid-for number
+** 96 4 SQLITE_VERSION_NUMBER
+**
+** All of the integer values are big-endian (most significant byte first).
+**
+** The file change counter is incremented when the database is changed
+** This counter allows other processes to know when the file has changed
+** and thus when they need to flush their cache.
+**
+** The max embedded payload fraction is the amount of the total usable
+** space in a page that can be consumed by a single cell for standard
+** B-tree (non-LEAFDATA) tables. A value of 255 means 100%. The default
+** is to limit the maximum cell size so that at least 4 cells will fit
+** on one page. Thus the default max embedded payload fraction is 64.
+**
+** If the payload for a cell is larger than the max payload, then extra
+** payload is spilled to overflow pages. Once an overflow page is allocated,
+** as many bytes as possible are moved into the overflow pages without letting
+** the cell size drop below the min embedded payload fraction.
+**
+** The min leaf payload fraction is like the min embedded payload fraction
+** except that it applies to leaf nodes in a LEAFDATA tree. The maximum
+** payload fraction for a LEAFDATA tree is always 100% (or 255) and it
+** not specified in the header.
+**
+** Each btree pages is divided into three sections: The header, the
+** cell pointer array, and the cell content area. Page 1 also has a 100-byte
+** file header that occurs before the page header.
+**
+** |----------------|
+** | file header | 100 bytes. Page 1 only.
+** |----------------|
+** | page header | 8 bytes for leaves. 12 bytes for interior nodes
+** |----------------|
+** | cell pointer | | 2 bytes per cell. Sorted order.
+** | array | | Grows downward
+** | | v
+** |----------------|
+** | unallocated |
+** | space |
+** |----------------| ^ Grows upwards
+** | cell content | | Arbitrary order interspersed with freeblocks.
+** | area | | and free space fragments.
+** |----------------|
+**
+** The page headers looks like this:
+**
+** OFFSET SIZE DESCRIPTION
+** 0 1 Flags. 1: intkey, 2: zerodata, 4: leafdata, 8: leaf
+** 1 2 byte offset to the first freeblock
+** 3 2 number of cells on this page
+** 5 2 first byte of the cell content area
+** 7 1 number of fragmented free bytes
+** 8 4 Right child (the Ptr(N) value). Omitted on leaves.
+**
+** The flags define the format of this btree page. The leaf flag means that
+** this page has no children. The zerodata flag means that this page carries
+** only keys and no data. The intkey flag means that the key is an integer
+** which is stored in the key size entry of the cell header rather than in
+** the payload area.
+**
+** The cell pointer array begins on the first byte after the page header.
+** The cell pointer array contains zero or more 2-byte numbers which are
+** offsets from the beginning of the page to the cell content in the cell
+** content area. The cell pointers occur in sorted order. The system strives
+** to keep free space after the last cell pointer so that new cells can
+** be easily added without having to defragment the page.
+**
+** Cell content is stored at the very end of the page and grows toward the
+** beginning of the page.
+**
+** Unused space within the cell content area is collected into a linked list of
+** freeblocks. Each freeblock is at least 4 bytes in size. The byte offset
+** to the first freeblock is given in the header. Freeblocks occur in
+** increasing order. Because a freeblock must be at least 4 bytes in size,
+** any group of 3 or fewer unused bytes in the cell content area cannot
+** exist on the freeblock chain. A group of 3 or fewer free bytes is called
+** a fragment. The total number of bytes in all fragments is recorded.
+** in the page header at offset 7.
+**
+** SIZE DESCRIPTION
+** 2 Byte offset of the next freeblock
+** 2 Bytes in this freeblock
+**
+** Cells are of variable length. Cells are stored in the cell content area at
+** the end of the page. Pointers to the cells are in the cell pointer array
+** that immediately follows the page header. Cells is not necessarily
+** contiguous or in order, but cell pointers are contiguous and in order.
+**
+** Cell content makes use of variable length integers. A variable
+** length integer is 1 to 9 bytes where the lower 7 bits of each
+** byte are used. The integer consists of all bytes that have bit 8 set and
+** the first byte with bit 8 clear. The most significant byte of the integer
+** appears first. A variable-length integer may not be more than 9 bytes long.
+** As a special case, all 8 bytes of the 9th byte are used as data. This
+** allows a 64-bit integer to be encoded in 9 bytes.
+**
+** 0x00 becomes 0x00000000
+** 0x7f becomes 0x0000007f
+** 0x81 0x00 becomes 0x00000080
+** 0x82 0x00 becomes 0x00000100
+** 0x80 0x7f becomes 0x0000007f
+** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678
+** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081
+**
+** Variable length integers are used for rowids and to hold the number of
+** bytes of key and data in a btree cell.
+**
+** The content of a cell looks like this:
+**
+** SIZE DESCRIPTION
+** 4 Page number of the left child. Omitted if leaf flag is set.
+** var Number of bytes of data. Omitted if the zerodata flag is set.
+** var Number of bytes of key. Or the key itself if intkey flag is set.
+** * Payload
+** 4 First page of the overflow chain. Omitted if no overflow
+**
+** Overflow pages form a linked list. Each page except the last is completely
+** filled with data (pagesize - 4 bytes). The last page can have as little
+** as 1 byte of data.
+**
+** SIZE DESCRIPTION
+** 4 Page number of next overflow page
+** * Data
+**
+** Freelist pages come in two subtypes: trunk pages and leaf pages. The
+** file header points to the first in a linked list of trunk page. Each trunk
+** page points to multiple leaf pages. The content of a leaf page is
+** unspecified. A trunk page looks like this:
+**
+** SIZE DESCRIPTION
+** 4 Page number of next trunk page
+** 4 Number of leaf pointers on this page
+** * zero or more pages numbers of leaves
+*/
+/* #include "sqliteInt.h" */
+
+
+/* The following value is the maximum cell size assuming a maximum page
+** size give above.
+*/
+#define MX_CELL_SIZE(pBt) ((int)(pBt->pageSize-8))
+
+/* The maximum number of cells on a single page of the database. This
+** assumes a minimum cell size of 6 bytes (4 bytes for the cell itself
+** plus 2 bytes for the index to the cell in the page header). Such
+** small cells will be rare, but they are possible.
+*/
+#define MX_CELL(pBt) ((pBt->pageSize-8)/6)
+
+/* Forward declarations */
+typedef struct MemPage MemPage;
+typedef struct BtLock BtLock;
+typedef struct CellInfo CellInfo;
+
+/*
+** This is a magic string that appears at the beginning of every
+** SQLite database in order to identify the file as a real database.
+**
+** You can change this value at compile-time by specifying a
+** -DSQLITE_FILE_HEADER="..." on the compiler command-line. The
+** header must be exactly 16 bytes including the zero-terminator so
+** the string itself should be 15 characters long. If you change
+** the header, then your custom library will not be able to read
+** databases generated by the standard tools and the standard tools
+** will not be able to read databases created by your custom library.
+*/
+#ifndef SQLITE_FILE_HEADER /* 123456789 123456 */
+# define SQLITE_FILE_HEADER "SQLite format 3"
+#endif
+
+/*
+** Page type flags. An ORed combination of these flags appear as the
+** first byte of on-disk image of every BTree page.
+*/
+#define PTF_INTKEY 0x01
+#define PTF_ZERODATA 0x02
+#define PTF_LEAFDATA 0x04
+#define PTF_LEAF 0x08
+
+/*
+** An instance of this object stores information about each a single database
+** page that has been loaded into memory. The information in this object
+** is derived from the raw on-disk page content.
+**
+** As each database page is loaded into memory, the pager allocats an
+** instance of this object and zeros the first 8 bytes. (This is the
+** "extra" information associated with each page of the pager.)
+**
+** Access to all fields of this structure is controlled by the mutex
+** stored in MemPage.pBt->mutex.
+*/
+struct MemPage {
+ u8 isInit; /* True if previously initialized. MUST BE FIRST! */
+ u8 bBusy; /* Prevent endless loops on corrupt database files */
+ u8 intKey; /* True if table b-trees. False for index b-trees */
+ u8 intKeyLeaf; /* True if the leaf of an intKey table */
+ Pgno pgno; /* Page number for this page */
+ /* Only the first 8 bytes (above) are zeroed by pager.c when a new page
+ ** is allocated. All fields that follow must be initialized before use */
+ u8 leaf; /* True if a leaf page */
+ u8 hdrOffset; /* 100 for page 1. 0 otherwise */
+ u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */
+ u8 max1bytePayload; /* min(maxLocal,127) */
+ u8 nOverflow; /* Number of overflow cell bodies in aCell[] */
+ u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */
+ u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */
+ u16 cellOffset; /* Index in aData of first cell pointer */
+ int nFree; /* Number of free bytes on the page. -1 for unknown */
+ u16 nCell; /* Number of cells on this page, local and ovfl */
+ u16 maskPage; /* Mask for page offset */
+ u16 aiOvfl[4]; /* Insert the i-th overflow cell before the aiOvfl-th
+ ** non-overflow cell */
+ u8 *apOvfl[4]; /* Pointers to the body of overflow cells */
+ BtShared *pBt; /* Pointer to BtShared that this page is part of */
+ u8 *aData; /* Pointer to disk image of the page data */
+ u8 *aDataEnd; /* One byte past the end of usable data */
+ u8 *aCellIdx; /* The cell index area */
+ u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */
+ DbPage *pDbPage; /* Pager page handle */
+ u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */
+ void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */
+};
+
+/*
+** A linked list of the following structures is stored at BtShared.pLock.
+** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor
+** is opened on the table with root page BtShared.iTable. Locks are removed
+** from this list when a transaction is committed or rolled back, or when
+** a btree handle is closed.
+*/
+struct BtLock {
+ Btree *pBtree; /* Btree handle holding this lock */
+ Pgno iTable; /* Root page of table */
+ u8 eLock; /* READ_LOCK or WRITE_LOCK */
+ BtLock *pNext; /* Next in BtShared.pLock list */
+};
+
+/* Candidate values for BtLock.eLock */
+#define READ_LOCK 1
+#define WRITE_LOCK 2
+
+/* A Btree handle
+**
+** A database connection contains a pointer to an instance of
+** this object for every database file that it has open. This structure
+** is opaque to the database connection. The database connection cannot
+** see the internals of this structure and only deals with pointers to
+** this structure.
+**
+** For some database files, the same underlying database cache might be
+** shared between multiple connections. In that case, each connection
+** has it own instance of this object. But each instance of this object
+** points to the same BtShared object. The database cache and the
+** schema associated with the database file are all contained within
+** the BtShared object.
+**
+** All fields in this structure are accessed under sqlite3.mutex.
+** The pBt pointer itself may not be changed while there exists cursors
+** in the referenced BtShared that point back to this Btree since those
+** cursors have to go through this Btree to find their BtShared and
+** they often do so without holding sqlite3.mutex.
+*/
+struct Btree {
+ sqlite3 *db; /* The database connection holding this btree */
+ BtShared *pBt; /* Sharable content of this btree */
+ u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */
+ u8 sharable; /* True if we can share pBt with another db */
+ u8 locked; /* True if db currently has pBt locked */
+ u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */
+ int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */
+ int nBackup; /* Number of backup operations reading this btree */
+ u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */
+ Btree *pNext; /* List of other sharable Btrees from the same db */
+ Btree *pPrev; /* Back pointer of the same list */
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ BtLock lock; /* Object used to lock page 1 */
+#endif
+};
+
+/*
+** Btree.inTrans may take one of the following values.
+**
+** If the shared-data extension is enabled, there may be multiple users
+** of the Btree structure. At most one of these may open a write transaction,
+** but any number may have active read transactions.
+*/
+#define TRANS_NONE 0
+#define TRANS_READ 1
+#define TRANS_WRITE 2
+
+/*
+** An instance of this object represents a single database file.
+**
+** A single database file can be in use at the same time by two
+** or more database connections. When two or more connections are
+** sharing the same database file, each connection has it own
+** private Btree object for the file and each of those Btrees points
+** to this one BtShared object. BtShared.nRef is the number of
+** connections currently sharing this database file.
+**
+** Fields in this structure are accessed under the BtShared.mutex
+** mutex, except for nRef and pNext which are accessed under the
+** global SQLITE_MUTEX_STATIC_MAIN mutex. The pPager field
+** may not be modified once it is initially set as long as nRef>0.
+** The pSchema field may be set once under BtShared.mutex and
+** thereafter is unchanged as long as nRef>0.
+**
+** isPending:
+**
+** If a BtShared client fails to obtain a write-lock on a database
+** table (because there exists one or more read-locks on the table),
+** the shared-cache enters 'pending-lock' state and isPending is
+** set to true.
+**
+** The shared-cache leaves the 'pending lock' state when either of
+** the following occur:
+**
+** 1) The current writer (BtShared.pWriter) concludes its transaction, OR
+** 2) The number of locks held by other connections drops to zero.
+**
+** while in the 'pending-lock' state, no connection may start a new
+** transaction.
+**
+** This feature is included to help prevent writer-starvation.
+*/
+struct BtShared {
+ Pager *pPager; /* The page cache */
+ sqlite3 *db; /* Database connection currently using this Btree */
+ BtCursor *pCursor; /* A list of all open cursors */
+ MemPage *pPage1; /* First page of the database */
+ u8 openFlags; /* Flags to sqlite3BtreeOpen() */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ u8 autoVacuum; /* True if auto-vacuum is enabled */
+ u8 incrVacuum; /* True if incr-vacuum is enabled */
+ u8 bDoTruncate; /* True to truncate db on commit */
+#endif
+ u8 inTransaction; /* Transaction state */
+ u8 max1bytePayload; /* Maximum first byte of cell for a 1-byte payload */
+ u8 nReserveWanted; /* Desired number of extra bytes per page */
+ u16 btsFlags; /* Boolean parameters. See BTS_* macros below */
+ u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */
+ u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */
+ u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */
+ u16 minLeaf; /* Minimum local payload in a LEAFDATA table */
+ u32 pageSize; /* Total number of bytes on a page */
+ u32 usableSize; /* Number of usable bytes on each page */
+ int nTransaction; /* Number of open transactions (read + write) */
+ u32 nPage; /* Number of pages in the database */
+ void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */
+ void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */
+ sqlite3_mutex *mutex; /* Non-recursive mutex required to access this object */
+ Bitvec *pHasContent; /* Set of pages moved to free-list this transaction */
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ int nRef; /* Number of references to this structure */
+ BtShared *pNext; /* Next on a list of sharable BtShared structs */
+ BtLock *pLock; /* List of locks held on this shared-btree struct */
+ Btree *pWriter; /* Btree with currently open write transaction */
+#endif
+ u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */
+};
+
+/*
+** Allowed values for BtShared.btsFlags
+*/
+#define BTS_READ_ONLY 0x0001 /* Underlying file is readonly */
+#define BTS_PAGESIZE_FIXED 0x0002 /* Page size can no longer be changed */
+#define BTS_SECURE_DELETE 0x0004 /* PRAGMA secure_delete is enabled */
+#define BTS_OVERWRITE 0x0008 /* Overwrite deleted content with zeros */
+#define BTS_FAST_SECURE 0x000c /* Combination of the previous two */
+#define BTS_INITIALLY_EMPTY 0x0010 /* Database was empty at trans start */
+#define BTS_NO_WAL 0x0020 /* Do not open write-ahead-log files */
+#define BTS_EXCLUSIVE 0x0040 /* pWriter has an exclusive lock */
+#define BTS_PENDING 0x0080 /* Waiting for read-locks to clear */
+
+/*
+** An instance of the following structure is used to hold information
+** about a cell. The parseCellPtr() function fills in this structure
+** based on information extract from the raw disk page.
+*/
+struct CellInfo {
+ i64 nKey; /* The key for INTKEY tables, or nPayload otherwise */
+ u8 *pPayload; /* Pointer to the start of payload */
+ u32 nPayload; /* Bytes of payload */
+ u16 nLocal; /* Amount of payload held locally, not on overflow */
+ u16 nSize; /* Size of the cell content on the main b-tree page */
+};
+
+/*
+** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than
+** this will be declared corrupt. This value is calculated based on a
+** maximum database size of 2^31 pages a minimum fanout of 2 for a
+** root-node and 3 for all other internal nodes.
+**
+** If a tree that appears to be taller than this is encountered, it is
+** assumed that the database is corrupt.
+*/
+#define BTCURSOR_MAX_DEPTH 20
+
+/*
+** A cursor is a pointer to a particular entry within a particular
+** b-tree within a database file.
+**
+** The entry is identified by its MemPage and the index in
+** MemPage.aCell[] of the entry.
+**
+** A single database file can be shared by two more database connections,
+** but cursors cannot be shared. Each cursor is associated with a
+** particular database connection identified BtCursor.pBtree.db.
+**
+** Fields in this structure are accessed under the BtShared.mutex
+** found at self->pBt->mutex.
+**
+** skipNext meaning:
+** The meaning of skipNext depends on the value of eState:
+**
+** eState Meaning of skipNext
+** VALID skipNext is meaningless and is ignored
+** INVALID skipNext is meaningless and is ignored
+** SKIPNEXT sqlite3BtreeNext() is a no-op if skipNext>0 and
+** sqlite3BtreePrevious() is no-op if skipNext<0.
+** REQUIRESEEK restoreCursorPosition() restores the cursor to
+** eState=SKIPNEXT if skipNext!=0
+** FAULT skipNext holds the cursor fault error code.
+*/
+struct BtCursor {
+ u8 eState; /* One of the CURSOR_XXX constants (see below) */
+ u8 curFlags; /* zero or more BTCF_* flags defined below */
+ u8 curPagerFlags; /* Flags to send to sqlite3PagerGet() */
+ u8 hints; /* As configured by CursorSetHints() */
+ int skipNext; /* Prev() is noop if negative. Next() is noop if positive.
+ ** Error code if eState==CURSOR_FAULT */
+ Btree *pBtree; /* The Btree to which this cursor belongs */
+ Pgno *aOverflow; /* Cache of overflow page locations */
+ void *pKey; /* Saved key that was cursor last known position */
+ /* All fields above are zeroed when the cursor is allocated. See
+ ** sqlite3BtreeCursorZero(). Fields that follow must be manually
+ ** initialized. */
+#define BTCURSOR_FIRST_UNINIT pBt /* Name of first uninitialized field */
+ BtShared *pBt; /* The BtShared this cursor points to */
+ BtCursor *pNext; /* Forms a linked list of all cursors */
+ CellInfo info; /* A parse of the cell we are pointing at */
+ i64 nKey; /* Size of pKey, or last integer key */
+ Pgno pgnoRoot; /* The root page of this tree */
+ i8 iPage; /* Index of current page in apPage */
+ u8 curIntKey; /* Value of apPage[0]->intKey */
+ u16 ix; /* Current index for apPage[iPage] */
+ u16 aiIdx[BTCURSOR_MAX_DEPTH-1]; /* Current index in apPage[i] */
+ struct KeyInfo *pKeyInfo; /* Arg passed to comparison function */
+ MemPage *pPage; /* Current page */
+ MemPage *apPage[BTCURSOR_MAX_DEPTH-1]; /* Stack of parents of current page */
+};
+
+/*
+** Legal values for BtCursor.curFlags
+*/
+#define BTCF_WriteFlag 0x01 /* True if a write cursor */
+#define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */
+#define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */
+#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */
+#define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */
+#define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */
+#define BTCF_Pinned 0x40 /* Cursor is busy and cannot be moved */
+
+/*
+** Potential values for BtCursor.eState.
+**
+** CURSOR_INVALID:
+** Cursor does not point to a valid entry. This can happen (for example)
+** because the table is empty or because BtreeCursorFirst() has not been
+** called.
+**
+** CURSOR_VALID:
+** Cursor points to a valid entry. getPayload() etc. may be called.
+**
+** CURSOR_SKIPNEXT:
+** Cursor is valid except that the Cursor.skipNext field is non-zero
+** indicating that the next sqlite3BtreeNext() or sqlite3BtreePrevious()
+** operation should be a no-op.
+**
+** CURSOR_REQUIRESEEK:
+** The table that this cursor was opened on still exists, but has been
+** modified since the cursor was last used. The cursor position is saved
+** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in
+** this state, restoreCursorPosition() can be called to attempt to
+** seek the cursor to the saved position.
+**
+** CURSOR_FAULT:
+** An unrecoverable error (an I/O error or a malloc failure) has occurred
+** on a different connection that shares the BtShared cache with this
+** cursor. The error has left the cache in an inconsistent state.
+** Do nothing else with this cursor. Any attempt to use the cursor
+** should return the error code stored in BtCursor.skipNext
+*/
+#define CURSOR_VALID 0
+#define CURSOR_INVALID 1
+#define CURSOR_SKIPNEXT 2
+#define CURSOR_REQUIRESEEK 3
+#define CURSOR_FAULT 4
+
+/*
+** The database page the PENDING_BYTE occupies. This page is never used.
+*/
+# define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt)
+
+/*
+** These macros define the location of the pointer-map entry for a
+** database page. The first argument to each is the number of usable
+** bytes on each page of the database (often 1024). The second is the
+** page number to look up in the pointer map.
+**
+** PTRMAP_PAGENO returns the database page number of the pointer-map
+** page that stores the required pointer. PTRMAP_PTROFFSET returns
+** the offset of the requested map entry.
+**
+** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page,
+** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be
+** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements
+** this test.
+*/
+#define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno)
+#define PTRMAP_PTROFFSET(pgptrmap, pgno) (5*(pgno-pgptrmap-1))
+#define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno))
+
+/*
+** The pointer map is a lookup table that identifies the parent page for
+** each child page in the database file. The parent page is the page that
+** contains a pointer to the child. Every page in the database contains
+** 0 or 1 parent pages. (In this context 'database page' refers
+** to any page that is not part of the pointer map itself.) Each pointer map
+** entry consists of a single byte 'type' and a 4 byte parent page number.
+** The PTRMAP_XXX identifiers below are the valid types.
+**
+** The purpose of the pointer map is to facility moving pages from one
+** position in the file to another as part of autovacuum. When a page
+** is moved, the pointer in its parent must be updated to point to the
+** new location. The pointer map is used to locate the parent page quickly.
+**
+** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not
+** used in this case.
+**
+** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number
+** is not used in this case.
+**
+** PTRMAP_OVERFLOW1: The database page is the first page in a list of
+** overflow pages. The page number identifies the page that
+** contains the cell with a pointer to this overflow page.
+**
+** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of
+** overflow pages. The page-number identifies the previous
+** page in the overflow page list.
+**
+** PTRMAP_BTREE: The database page is a non-root btree page. The page number
+** identifies the parent page in the btree.
+*/
+#define PTRMAP_ROOTPAGE 1
+#define PTRMAP_FREEPAGE 2
+#define PTRMAP_OVERFLOW1 3
+#define PTRMAP_OVERFLOW2 4
+#define PTRMAP_BTREE 5
+
+/* A bunch of assert() statements to check the transaction state variables
+** of handle p (type Btree*) are internally consistent.
+*/
+#define btreeIntegrity(p) \
+ assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \
+ assert( p->pBt->inTransaction>=p->inTrans );
+
+
+/*
+** The ISAUTOVACUUM macro is used within balance_nonroot() to determine
+** if the database supports auto-vacuum or not. Because it is used
+** within an expression that is an argument to another macro
+** (sqliteMallocRaw), it is not possible to use conditional compilation.
+** So, this macro is defined instead.
+*/
+#ifndef SQLITE_OMIT_AUTOVACUUM
+#define ISAUTOVACUUM (pBt->autoVacuum)
+#else
+#define ISAUTOVACUUM 0
+#endif
+
+
+/*
+** This structure is passed around through all the sanity checking routines
+** in order to keep track of some global state information.
+**
+** The aRef[] array is allocated so that there is 1 bit for each page in
+** the database. As the integrity-check proceeds, for each page used in
+** the database the corresponding bit is set. This allows integrity-check to
+** detect pages that are used twice and orphaned pages (both of which
+** indicate corruption).
+*/
+typedef struct IntegrityCk IntegrityCk;
+struct IntegrityCk {
+ BtShared *pBt; /* The tree being checked out */
+ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */
+ u8 *aPgRef; /* 1 bit per page in the db (see above) */
+ Pgno nPage; /* Number of pages in the database */
+ int mxErr; /* Stop accumulating errors when this reaches zero */
+ int nErr; /* Number of messages written to zErrMsg so far */
+ int bOomFault; /* A memory allocation error has occurred */
+ const char *zPfx; /* Error message prefix */
+ Pgno v1; /* Value for first %u substitution in zPfx */
+ int v2; /* Value for second %d substitution in zPfx */
+ StrAccum errMsg; /* Accumulate the error message text here */
+ u32 *heap; /* Min-heap used for analyzing cell coverage */
+ sqlite3 *db; /* Database connection running the check */
+};
+
+/*
+** Routines to read or write a two- and four-byte big-endian integer values.
+*/
+#define get2byte(x) ((x)[0]<<8 | (x)[1])
+#define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v))
+#define get4byte sqlite3Get4byte
+#define put4byte sqlite3Put4byte
+
+/*
+** get2byteAligned(), unlike get2byte(), requires that its argument point to a
+** two-byte aligned address. get2bytea() is only used for accessing the
+** cell addresses in a btree header.
+*/
+#if SQLITE_BYTEORDER==4321
+# define get2byteAligned(x) (*(u16*)(x))
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4008000
+# define get2byteAligned(x) __builtin_bswap16(*(u16*)(x))
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+# define get2byteAligned(x) _byteswap_ushort(*(u16*)(x))
+#else
+# define get2byteAligned(x) ((x)[0]<<8 | (x)[1])
+#endif
+
+/************** End of btreeInt.h ********************************************/
+/************** Continuing where we left off in btmutex.c ********************/
+#ifndef SQLITE_OMIT_SHARED_CACHE
+#if SQLITE_THREADSAFE
+
+/*
+** Obtain the BtShared mutex associated with B-Tree handle p. Also,
+** set BtShared.db to the database handle associated with p and the
+** p->locked boolean to true.
+*/
+static void lockBtreeMutex(Btree *p){
+ assert( p->locked==0 );
+ assert( sqlite3_mutex_notheld(p->pBt->mutex) );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+
+ sqlite3_mutex_enter(p->pBt->mutex);
+ p->pBt->db = p->db;
+ p->locked = 1;
+}
+
+/*
+** Release the BtShared mutex associated with B-Tree handle p and
+** clear the p->locked boolean.
+*/
+static void SQLITE_NOINLINE unlockBtreeMutex(Btree *p){
+ BtShared *pBt = p->pBt;
+ assert( p->locked==1 );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ assert( p->db==pBt->db );
+
+ sqlite3_mutex_leave(pBt->mutex);
+ p->locked = 0;
+}
+
+/* Forward reference */
+static void SQLITE_NOINLINE btreeLockCarefully(Btree *p);
+
+/*
+** Enter a mutex on the given BTree object.
+**
+** If the object is not sharable, then no mutex is ever required
+** and this routine is a no-op. The underlying mutex is non-recursive.
+** But we keep a reference count in Btree.wantToLock so the behavior
+** of this interface is recursive.
+**
+** To avoid deadlocks, multiple Btrees are locked in the same order
+** by all database connections. The p->pNext is a list of other
+** Btrees belonging to the same database connection as the p Btree
+** which need to be locked after p. If we cannot get a lock on
+** p, then first unlock all of the others on p->pNext, then wait
+** for the lock to become available on p, then relock all of the
+** subsequent Btrees that desire a lock.
+*/
+SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){
+ /* Some basic sanity checking on the Btree. The list of Btrees
+ ** connected by pNext and pPrev should be in sorted order by
+ ** Btree.pBt value. All elements of the list should belong to
+ ** the same connection. Only shared Btrees are on the list. */
+ assert( p->pNext==0 || p->pNext->pBt>p->pBt );
+ assert( p->pPrev==0 || p->pPrev->pBt<p->pBt );
+ assert( p->pNext==0 || p->pNext->db==p->db );
+ assert( p->pPrev==0 || p->pPrev->db==p->db );
+ assert( p->sharable || (p->pNext==0 && p->pPrev==0) );
+
+ /* Check for locking consistency */
+ assert( !p->locked || p->wantToLock>0 );
+ assert( p->sharable || p->wantToLock==0 );
+
+ /* We should already hold a lock on the database connection */
+ assert( sqlite3_mutex_held(p->db->mutex) );
+
+ /* Unless the database is sharable and unlocked, then BtShared.db
+ ** should already be set correctly. */
+ assert( (p->locked==0 && p->sharable) || p->pBt->db==p->db );
+
+ if( !p->sharable ) return;
+ p->wantToLock++;
+ if( p->locked ) return;
+ btreeLockCarefully(p);
+}
+
+/* This is a helper function for sqlite3BtreeLock(). By moving
+** complex, but seldom used logic, out of sqlite3BtreeLock() and
+** into this routine, we avoid unnecessary stack pointer changes
+** and thus help the sqlite3BtreeLock() routine to run much faster
+** in the common case.
+*/
+static void SQLITE_NOINLINE btreeLockCarefully(Btree *p){
+ Btree *pLater;
+
+ /* In most cases, we should be able to acquire the lock we
+ ** want without having to go through the ascending lock
+ ** procedure that follows. Just be sure not to block.
+ */
+ if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){
+ p->pBt->db = p->db;
+ p->locked = 1;
+ return;
+ }
+
+ /* To avoid deadlock, first release all locks with a larger
+ ** BtShared address. Then acquire our lock. Then reacquire
+ ** the other BtShared locks that we used to hold in ascending
+ ** order.
+ */
+ for(pLater=p->pNext; pLater; pLater=pLater->pNext){
+ assert( pLater->sharable );
+ assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt );
+ assert( !pLater->locked || pLater->wantToLock>0 );
+ if( pLater->locked ){
+ unlockBtreeMutex(pLater);
+ }
+ }
+ lockBtreeMutex(p);
+ for(pLater=p->pNext; pLater; pLater=pLater->pNext){
+ if( pLater->wantToLock ){
+ lockBtreeMutex(pLater);
+ }
+ }
+}
+
+
+/*
+** Exit the recursive mutex on a Btree.
+*/
+SQLITE_PRIVATE void sqlite3BtreeLeave(Btree *p){
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ if( p->sharable ){
+ assert( p->wantToLock>0 );
+ p->wantToLock--;
+ if( p->wantToLock==0 ){
+ unlockBtreeMutex(p);
+ }
+ }
+}
+
+#ifndef NDEBUG
+/*
+** Return true if the BtShared mutex is held on the btree, or if the
+** B-Tree is not marked as sharable.
+**
+** This routine is used only from within assert() statements.
+*/
+SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){
+ assert( p->sharable==0 || p->locked==0 || p->wantToLock>0 );
+ assert( p->sharable==0 || p->locked==0 || p->db==p->pBt->db );
+ assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->pBt->mutex) );
+ assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->db->mutex) );
+
+ return (p->sharable==0 || p->locked);
+}
+#endif
+
+
+/*
+** Enter the mutex on every Btree associated with a database
+** connection. This is needed (for example) prior to parsing
+** a statement since we will be comparing table and column names
+** against all schemas and we do not want those schemas being
+** reset out from under us.
+**
+** There is a corresponding leave-all procedures.
+**
+** Enter the mutexes in accending order by BtShared pointer address
+** to avoid the possibility of deadlock when two threads with
+** two or more btrees in common both try to lock all their btrees
+** at the same instant.
+*/
+static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){
+ int i;
+ int skipOk = 1;
+ Btree *p;
+ assert( sqlite3_mutex_held(db->mutex) );
+ for(i=0; i<db->nDb; i++){
+ p = db->aDb[i].pBt;
+ if( p && p->sharable ){
+ sqlite3BtreeEnter(p);
+ skipOk = 0;
+ }
+ }
+ db->noSharedCache = skipOk;
+}
+SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){
+ if( db->noSharedCache==0 ) btreeEnterAll(db);
+}
+static void SQLITE_NOINLINE btreeLeaveAll(sqlite3 *db){
+ int i;
+ Btree *p;
+ assert( sqlite3_mutex_held(db->mutex) );
+ for(i=0; i<db->nDb; i++){
+ p = db->aDb[i].pBt;
+ if( p ) sqlite3BtreeLeave(p);
+ }
+}
+SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){
+ if( db->noSharedCache==0 ) btreeLeaveAll(db);
+}
+
+#ifndef NDEBUG
+/*
+** Return true if the current thread holds the database connection
+** mutex and all required BtShared mutexes.
+**
+** This routine is used inside assert() statements only.
+*/
+SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){
+ int i;
+ if( !sqlite3_mutex_held(db->mutex) ){
+ return 0;
+ }
+ for(i=0; i<db->nDb; i++){
+ Btree *p;
+ p = db->aDb[i].pBt;
+ if( p && p->sharable &&
+ (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){
+ return 0;
+ }
+ }
+ return 1;
+}
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Return true if the correct mutexes are held for accessing the
+** db->aDb[iDb].pSchema structure. The mutexes required for schema
+** access are:
+**
+** (1) The mutex on db
+** (2) if iDb!=1, then the mutex on db->aDb[iDb].pBt.
+**
+** If pSchema is not NULL, then iDb is computed from pSchema and
+** db using sqlite3SchemaToIndex().
+*/
+SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){
+ Btree *p;
+ assert( db!=0 );
+ if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema);
+ assert( iDb>=0 && iDb<db->nDb );
+ if( !sqlite3_mutex_held(db->mutex) ) return 0;
+ if( iDb==1 ) return 1;
+ p = db->aDb[iDb].pBt;
+ assert( p!=0 );
+ return p->sharable==0 || p->locked==1;
+}
+#endif /* NDEBUG */
+
+#else /* SQLITE_THREADSAFE>0 above. SQLITE_THREADSAFE==0 below */
+/*
+** The following are special cases for mutex enter routines for use
+** in single threaded applications that use shared cache. Except for
+** these two routines, all mutex operations are no-ops in that case and
+** are null #defines in btree.h.
+**
+** If shared cache is disabled, then all btree mutex routines, including
+** the ones below, are no-ops and are null #defines in btree.h.
+*/
+
+SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){
+ p->pBt->db = p->db;
+}
+SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){
+ int i;
+ for(i=0; i<db->nDb; i++){
+ Btree *p = db->aDb[i].pBt;
+ if( p ){
+ p->pBt->db = p->db;
+ }
+ }
+}
+#endif /* if SQLITE_THREADSAFE */
+
+#ifndef SQLITE_OMIT_INCRBLOB
+/*
+** Enter a mutex on a Btree given a cursor owned by that Btree.
+**
+** These entry points are used by incremental I/O only. Enter() is required
+** any time OMIT_SHARED_CACHE is not defined, regardless of whether or not
+** the build is threadsafe. Leave() is only required by threadsafe builds.
+*/
+SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor *pCur){
+ sqlite3BtreeEnter(pCur->pBtree);
+}
+# if SQLITE_THREADSAFE
+SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor *pCur){
+ sqlite3BtreeLeave(pCur->pBtree);
+}
+# endif
+#endif /* ifndef SQLITE_OMIT_INCRBLOB */
+
+#endif /* ifndef SQLITE_OMIT_SHARED_CACHE */
+
+/************** End of btmutex.c *********************************************/
+/************** Begin file btree.c *******************************************/
+/*
+** 2004 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements an external (disk-based) database using BTrees.
+** See the header comment on "btreeInt.h" for additional information.
+** Including a description of file format and an overview of operation.
+*/
+/* #include "btreeInt.h" */
+
+/*
+** The header string that appears at the beginning of every
+** SQLite database.
+*/
+static const char zMagicHeader[] = SQLITE_FILE_HEADER;
+
+/*
+** Set this global variable to 1 to enable tracing using the TRACE
+** macro.
+*/
+#if 0
+int sqlite3BtreeTrace=1; /* True to enable tracing */
+# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);}
+#else
+# define TRACE(X)
+#endif
+
+/*
+** Extract a 2-byte big-endian integer from an array of unsigned bytes.
+** But if the value is zero, make it 65536.
+**
+** This routine is used to extract the "offset to cell content area" value
+** from the header of a btree page. If the page size is 65536 and the page
+** is empty, the offset should be 65536, but the 2-byte value stores zero.
+** This routine makes the necessary adjustment to 65536.
+*/
+#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1)
+
+/*
+** Values passed as the 5th argument to allocateBtreePage()
+*/
+#define BTALLOC_ANY 0 /* Allocate any page */
+#define BTALLOC_EXACT 1 /* Allocate exact page if possible */
+#define BTALLOC_LE 2 /* Allocate any page <= the parameter */
+
+/*
+** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not
+** defined, or 0 if it is. For example:
+**
+** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum);
+*/
+#ifndef SQLITE_OMIT_AUTOVACUUM
+#define IfNotOmitAV(expr) (expr)
+#else
+#define IfNotOmitAV(expr) 0
+#endif
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** A list of BtShared objects that are eligible for participation
+** in shared cache. This variable has file scope during normal builds,
+** but the test harness needs to access it so we make it global for
+** test builds.
+**
+** Access to this variable is protected by SQLITE_MUTEX_STATIC_MAIN.
+*/
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
+#else
+static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
+#endif
+#endif /* SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Enable or disable the shared pager and schema features.
+**
+** This routine has no effect on existing database connections.
+** The shared cache setting effects only future calls to
+** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
+*/
+SQLITE_API int sqlite3_enable_shared_cache(int enable){
+ sqlite3GlobalConfig.sharedCacheEnabled = enable;
+ return SQLITE_OK;
+}
+#endif
+
+
+
+#ifdef SQLITE_OMIT_SHARED_CACHE
+ /*
+ ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(),
+ ** and clearAllSharedCacheTableLocks()
+ ** manipulate entries in the BtShared.pLock linked list used to store
+ ** shared-cache table level locks. If the library is compiled with the
+ ** shared-cache feature disabled, then there is only ever one user
+ ** of each BtShared structure and so this locking is not necessary.
+ ** So define the lock related functions as no-ops.
+ */
+ #define querySharedCacheTableLock(a,b,c) SQLITE_OK
+ #define setSharedCacheTableLock(a,b,c) SQLITE_OK
+ #define clearAllSharedCacheTableLocks(a)
+ #define downgradeAllSharedCacheTableLocks(a)
+ #define hasSharedCacheTableLock(a,b,c,d) 1
+ #define hasReadConflicts(a, b) 0
+#endif
+
+/*
+** Implementation of the SQLITE_CORRUPT_PAGE() macro. Takes a single
+** (MemPage*) as an argument. The (MemPage*) must not be NULL.
+**
+** If SQLITE_DEBUG is not defined, then this macro is equivalent to
+** SQLITE_CORRUPT_BKPT. Or, if SQLITE_DEBUG is set, then the log message
+** normally produced as a side-effect of SQLITE_CORRUPT_BKPT is augmented
+** with the page number and filename associated with the (MemPage*).
+*/
+#ifdef SQLITE_DEBUG
+int corruptPageError(int lineno, MemPage *p){
+ char *zMsg;
+ sqlite3BeginBenignMalloc();
+ zMsg = sqlite3_mprintf("database corruption page %d of %s",
+ (int)p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0)
+ );
+ sqlite3EndBenignMalloc();
+ if( zMsg ){
+ sqlite3ReportError(SQLITE_CORRUPT, lineno, zMsg);
+ }
+ sqlite3_free(zMsg);
+ return SQLITE_CORRUPT_BKPT;
+}
+# define SQLITE_CORRUPT_PAGE(pMemPage) corruptPageError(__LINE__, pMemPage)
+#else
+# define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno)
+#endif
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+
+#ifdef SQLITE_DEBUG
+/*
+**** This function is only used as part of an assert() statement. ***
+**
+** Check to see if pBtree holds the required locks to read or write to the
+** table with root page iRoot. Return 1 if it does and 0 if not.
+**
+** For example, when writing to a table with root-page iRoot via
+** Btree connection pBtree:
+**
+** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) );
+**
+** When writing to an index that resides in a sharable database, the
+** caller should have first obtained a lock specifying the root page of
+** the corresponding table. This makes things a bit more complicated,
+** as this module treats each table as a separate structure. To determine
+** the table corresponding to the index being written, this
+** function has to search through the database schema.
+**
+** Instead of a lock on the table/index rooted at page iRoot, the caller may
+** hold a write-lock on the schema table (root page 1). This is also
+** acceptable.
+*/
+static int hasSharedCacheTableLock(
+ Btree *pBtree, /* Handle that must hold lock */
+ Pgno iRoot, /* Root page of b-tree */
+ int isIndex, /* True if iRoot is the root of an index b-tree */
+ int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */
+){
+ Schema *pSchema = (Schema *)pBtree->pBt->pSchema;
+ Pgno iTab = 0;
+ BtLock *pLock;
+
+ /* If this database is not shareable, or if the client is reading
+ ** and has the read-uncommitted flag set, then no lock is required.
+ ** Return true immediately.
+ */
+ if( (pBtree->sharable==0)
+ || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommit))
+ ){
+ return 1;
+ }
+
+ /* If the client is reading or writing an index and the schema is
+ ** not loaded, then it is too difficult to actually check to see if
+ ** the correct locks are held. So do not bother - just return true.
+ ** This case does not come up very often anyhow.
+ */
+ if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){
+ return 1;
+ }
+
+ /* Figure out the root-page that the lock should be held on. For table
+ ** b-trees, this is just the root page of the b-tree being read or
+ ** written. For index b-trees, it is the root page of the associated
+ ** table. */
+ if( isIndex ){
+ HashElem *p;
+ int bSeen = 0;
+ for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){
+ Index *pIdx = (Index *)sqliteHashData(p);
+ if( pIdx->tnum==(int)iRoot ){
+ if( bSeen ){
+ /* Two or more indexes share the same root page. There must
+ ** be imposter tables. So just return true. The assert is not
+ ** useful in that case. */
+ return 1;
+ }
+ iTab = pIdx->pTable->tnum;
+ bSeen = 1;
+ }
+ }
+ }else{
+ iTab = iRoot;
+ }
+
+ /* Search for the required lock. Either a write-lock on root-page iTab, a
+ ** write-lock on the schema table, or (if the client is reading) a
+ ** read-lock on iTab will suffice. Return 1 if any of these are found. */
+ for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){
+ if( pLock->pBtree==pBtree
+ && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1))
+ && pLock->eLock>=eLockType
+ ){
+ return 1;
+ }
+ }
+
+ /* Failed to find the required lock. */
+ return 0;
+}
+#endif /* SQLITE_DEBUG */
+
+#ifdef SQLITE_DEBUG
+/*
+**** This function may be used as part of assert() statements only. ****
+**
+** Return true if it would be illegal for pBtree to write into the
+** table or index rooted at iRoot because other shared connections are
+** simultaneously reading that same table or index.
+**
+** It is illegal for pBtree to write if some other Btree object that
+** shares the same BtShared object is currently reading or writing
+** the iRoot table. Except, if the other Btree object has the
+** read-uncommitted flag set, then it is OK for the other object to
+** have a read cursor.
+**
+** For example, before writing to any part of the table or index
+** rooted at page iRoot, one should call:
+**
+** assert( !hasReadConflicts(pBtree, iRoot) );
+*/
+static int hasReadConflicts(Btree *pBtree, Pgno iRoot){
+ BtCursor *p;
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ if( p->pgnoRoot==iRoot
+ && p->pBtree!=pBtree
+ && 0==(p->pBtree->db->flags & SQLITE_ReadUncommit)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif /* #ifdef SQLITE_DEBUG */
+
+/*
+** Query to see if Btree handle p may obtain a lock of type eLock
+** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return
+** SQLITE_OK if the lock may be obtained (by calling
+** setSharedCacheTableLock()), or SQLITE_LOCKED if not.
+*/
+static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){
+ BtShared *pBt = p->pBt;
+ BtLock *pIter;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
+ assert( p->db!=0 );
+ assert( !(p->db->flags&SQLITE_ReadUncommit)||eLock==WRITE_LOCK||iTab==1 );
+
+ /* If requesting a write-lock, then the Btree must have an open write
+ ** transaction on this file. And, obviously, for this to be so there
+ ** must be an open write transaction on the file itself.
+ */
+ assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) );
+ assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE );
+
+ /* This routine is a no-op if the shared-cache is not enabled */
+ if( !p->sharable ){
+ return SQLITE_OK;
+ }
+
+ /* If some other connection is holding an exclusive lock, the
+ ** requested lock may not be obtained.
+ */
+ if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){
+ sqlite3ConnectionBlocked(p->db, pBt->pWriter->db);
+ return SQLITE_LOCKED_SHAREDCACHE;
+ }
+
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ /* The condition (pIter->eLock!=eLock) in the following if(...)
+ ** statement is a simplification of:
+ **
+ ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK)
+ **
+ ** since we know that if eLock==WRITE_LOCK, then no other connection
+ ** may hold a WRITE_LOCK on any table in this file (since there can
+ ** only be a single writer).
+ */
+ assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK );
+ assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK);
+ if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){
+ sqlite3ConnectionBlocked(p->db, pIter->pBtree->db);
+ if( eLock==WRITE_LOCK ){
+ assert( p==pBt->pWriter );
+ pBt->btsFlags |= BTS_PENDING;
+ }
+ return SQLITE_LOCKED_SHAREDCACHE;
+ }
+ }
+ return SQLITE_OK;
+}
+#endif /* !SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Add a lock on the table with root-page iTable to the shared-btree used
+** by Btree handle p. Parameter eLock must be either READ_LOCK or
+** WRITE_LOCK.
+**
+** This function assumes the following:
+**
+** (a) The specified Btree object p is connected to a sharable
+** database (one with the BtShared.sharable flag set), and
+**
+** (b) No other Btree objects hold a lock that conflicts
+** with the requested lock (i.e. querySharedCacheTableLock() has
+** already been called and returned SQLITE_OK).
+**
+** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM
+** is returned if a malloc attempt fails.
+*/
+static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){
+ BtShared *pBt = p->pBt;
+ BtLock *pLock = 0;
+ BtLock *pIter;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
+ assert( p->db!=0 );
+
+ /* A connection with the read-uncommitted flag set will never try to
+ ** obtain a read-lock using this function. The only read-lock obtained
+ ** by a connection in read-uncommitted mode is on the sqlite_schema
+ ** table, and that lock is obtained in BtreeBeginTrans(). */
+ assert( 0==(p->db->flags&SQLITE_ReadUncommit) || eLock==WRITE_LOCK );
+
+ /* This function should only be called on a sharable b-tree after it
+ ** has been determined that no other b-tree holds a conflicting lock. */
+ assert( p->sharable );
+ assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) );
+
+ /* First search the list for an existing lock on this table. */
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ if( pIter->iTable==iTable && pIter->pBtree==p ){
+ pLock = pIter;
+ break;
+ }
+ }
+
+ /* If the above search did not find a BtLock struct associating Btree p
+ ** with table iTable, allocate one and link it into the list.
+ */
+ if( !pLock ){
+ pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock));
+ if( !pLock ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pLock->iTable = iTable;
+ pLock->pBtree = p;
+ pLock->pNext = pBt->pLock;
+ pBt->pLock = pLock;
+ }
+
+ /* Set the BtLock.eLock variable to the maximum of the current lock
+ ** and the requested lock. This means if a write-lock was already held
+ ** and a read-lock requested, we don't incorrectly downgrade the lock.
+ */
+ assert( WRITE_LOCK>READ_LOCK );
+ if( eLock>pLock->eLock ){
+ pLock->eLock = eLock;
+ }
+
+ return SQLITE_OK;
+}
+#endif /* !SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Release all the table locks (locks obtained via calls to
+** the setSharedCacheTableLock() procedure) held by Btree object p.
+**
+** This function assumes that Btree p has an open read or write
+** transaction. If it does not, then the BTS_PENDING flag
+** may be incorrectly cleared.
+*/
+static void clearAllSharedCacheTableLocks(Btree *p){
+ BtShared *pBt = p->pBt;
+ BtLock **ppIter = &pBt->pLock;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( p->sharable || 0==*ppIter );
+ assert( p->inTrans>0 );
+
+ while( *ppIter ){
+ BtLock *pLock = *ppIter;
+ assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree );
+ assert( pLock->pBtree->inTrans>=pLock->eLock );
+ if( pLock->pBtree==p ){
+ *ppIter = pLock->pNext;
+ assert( pLock->iTable!=1 || pLock==&p->lock );
+ if( pLock->iTable!=1 ){
+ sqlite3_free(pLock);
+ }
+ }else{
+ ppIter = &pLock->pNext;
+ }
+ }
+
+ assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter );
+ if( pBt->pWriter==p ){
+ pBt->pWriter = 0;
+ pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING);
+ }else if( pBt->nTransaction==2 ){
+ /* This function is called when Btree p is concluding its
+ ** transaction. If there currently exists a writer, and p is not
+ ** that writer, then the number of locks held by connections other
+ ** than the writer must be about to drop to zero. In this case
+ ** set the BTS_PENDING flag to 0.
+ **
+ ** If there is not currently a writer, then BTS_PENDING must
+ ** be zero already. So this next line is harmless in that case.
+ */
+ pBt->btsFlags &= ~BTS_PENDING;
+ }
+}
+
+/*
+** This function changes all write-locks held by Btree p into read-locks.
+*/
+static void downgradeAllSharedCacheTableLocks(Btree *p){
+ BtShared *pBt = p->pBt;
+ if( pBt->pWriter==p ){
+ BtLock *pLock;
+ pBt->pWriter = 0;
+ pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING);
+ for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){
+ assert( pLock->eLock==READ_LOCK || pLock->pBtree==p );
+ pLock->eLock = READ_LOCK;
+ }
+ }
+}
+
+#endif /* SQLITE_OMIT_SHARED_CACHE */
+
+static void releasePage(MemPage *pPage); /* Forward reference */
+static void releasePageOne(MemPage *pPage); /* Forward reference */
+static void releasePageNotNull(MemPage *pPage); /* Forward reference */
+
+/*
+***** This routine is used inside of assert() only ****
+**
+** Verify that the cursor holds the mutex on its BtShared
+*/
+#ifdef SQLITE_DEBUG
+static int cursorHoldsMutex(BtCursor *p){
+ return sqlite3_mutex_held(p->pBt->mutex);
+}
+
+/* Verify that the cursor and the BtShared agree about what is the current
+** database connetion. This is important in shared-cache mode. If the database
+** connection pointers get out-of-sync, it is possible for routines like
+** btreeInitPage() to reference an stale connection pointer that references a
+** a connection that has already closed. This routine is used inside assert()
+** statements only and for the purpose of double-checking that the btree code
+** does keep the database connection pointers up-to-date.
+*/
+static int cursorOwnsBtShared(BtCursor *p){
+ assert( cursorHoldsMutex(p) );
+ return (p->pBtree->db==p->pBt->db);
+}
+#endif
+
+/*
+** Invalidate the overflow cache of the cursor passed as the first argument.
+** on the shared btree structure pBt.
+*/
+#define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl)
+
+/*
+** Invalidate the overflow page-list cache for all cursors opened
+** on the shared btree structure pBt.
+*/
+static void invalidateAllOverflowCache(BtShared *pBt){
+ BtCursor *p;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ for(p=pBt->pCursor; p; p=p->pNext){
+ invalidateOverflowCache(p);
+ }
+}
+
+#ifndef SQLITE_OMIT_INCRBLOB
+/*
+** This function is called before modifying the contents of a table
+** to invalidate any incrblob cursors that are open on the
+** row or one of the rows being modified.
+**
+** If argument isClearTable is true, then the entire contents of the
+** table is about to be deleted. In this case invalidate all incrblob
+** cursors open on any row within the table with root-page pgnoRoot.
+**
+** Otherwise, if argument isClearTable is false, then the row with
+** rowid iRow is being replaced or deleted. In this case invalidate
+** only those incrblob cursors open on that specific row.
+*/
+static void invalidateIncrblobCursors(
+ Btree *pBtree, /* The database file to check */
+ Pgno pgnoRoot, /* The table that might be changing */
+ i64 iRow, /* The rowid that might be changing */
+ int isClearTable /* True if all rows are being deleted */
+){
+ BtCursor *p;
+ if( pBtree->hasIncrblobCur==0 ) return;
+ assert( sqlite3BtreeHoldsMutex(pBtree) );
+ pBtree->hasIncrblobCur = 0;
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ if( (p->curFlags & BTCF_Incrblob)!=0 ){
+ pBtree->hasIncrblobCur = 1;
+ if( p->pgnoRoot==pgnoRoot && (isClearTable || p->info.nKey==iRow) ){
+ p->eState = CURSOR_INVALID;
+ }
+ }
+ }
+}
+
+#else
+ /* Stub function when INCRBLOB is omitted */
+ #define invalidateIncrblobCursors(w,x,y,z)
+#endif /* SQLITE_OMIT_INCRBLOB */
+
+/*
+** Set bit pgno of the BtShared.pHasContent bitvec. This is called
+** when a page that previously contained data becomes a free-list leaf
+** page.
+**
+** The BtShared.pHasContent bitvec exists to work around an obscure
+** bug caused by the interaction of two useful IO optimizations surrounding
+** free-list leaf pages:
+**
+** 1) When all data is deleted from a page and the page becomes
+** a free-list leaf page, the page is not written to the database
+** (as free-list leaf pages contain no meaningful data). Sometimes
+** such a page is not even journalled (as it will not be modified,
+** why bother journalling it?).
+**
+** 2) When a free-list leaf page is reused, its content is not read
+** from the database or written to the journal file (why should it
+** be, if it is not at all meaningful?).
+**
+** By themselves, these optimizations work fine and provide a handy
+** performance boost to bulk delete or insert operations. However, if
+** a page is moved to the free-list and then reused within the same
+** transaction, a problem comes up. If the page is not journalled when
+** it is moved to the free-list and it is also not journalled when it
+** is extracted from the free-list and reused, then the original data
+** may be lost. In the event of a rollback, it may not be possible
+** to restore the database to its original configuration.
+**
+** The solution is the BtShared.pHasContent bitvec. Whenever a page is
+** moved to become a free-list leaf page, the corresponding bit is
+** set in the bitvec. Whenever a leaf page is extracted from the free-list,
+** optimization 2 above is omitted if the corresponding bit is already
+** set in BtShared.pHasContent. The contents of the bitvec are cleared
+** at the end of every transaction.
+*/
+static int btreeSetHasContent(BtShared *pBt, Pgno pgno){
+ int rc = SQLITE_OK;
+ if( !pBt->pHasContent ){
+ assert( pgno<=pBt->nPage );
+ pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage);
+ if( !pBt->pHasContent ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){
+ rc = sqlite3BitvecSet(pBt->pHasContent, pgno);
+ }
+ return rc;
+}
+
+/*
+** Query the BtShared.pHasContent vector.
+**
+** This function is called when a free-list leaf page is removed from the
+** free-list for reuse. It returns false if it is safe to retrieve the
+** page from the pager layer with the 'no-content' flag set. True otherwise.
+*/
+static int btreeGetHasContent(BtShared *pBt, Pgno pgno){
+ Bitvec *p = pBt->pHasContent;
+ return p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTestNotNull(p, pgno));
+}
+
+/*
+** Clear (destroy) the BtShared.pHasContent bitvec. This should be
+** invoked at the conclusion of each write-transaction.
+*/
+static void btreeClearHasContent(BtShared *pBt){
+ sqlite3BitvecDestroy(pBt->pHasContent);
+ pBt->pHasContent = 0;
+}
+
+/*
+** Release all of the apPage[] pages for a cursor.
+*/
+static void btreeReleaseAllCursorPages(BtCursor *pCur){
+ int i;
+ if( pCur->iPage>=0 ){
+ for(i=0; i<pCur->iPage; i++){
+ releasePageNotNull(pCur->apPage[i]);
+ }
+ releasePageNotNull(pCur->pPage);
+ pCur->iPage = -1;
+ }
+}
+
+/*
+** The cursor passed as the only argument must point to a valid entry
+** when this function is called (i.e. have eState==CURSOR_VALID). This
+** function saves the current cursor key in variables pCur->nKey and
+** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error
+** code otherwise.
+**
+** If the cursor is open on an intkey table, then the integer key
+** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to
+** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is
+** set to point to a malloced buffer pCur->nKey bytes in size containing
+** the key.
+*/
+static int saveCursorKey(BtCursor *pCur){
+ int rc = SQLITE_OK;
+ assert( CURSOR_VALID==pCur->eState );
+ assert( 0==pCur->pKey );
+ assert( cursorHoldsMutex(pCur) );
+
+ if( pCur->curIntKey ){
+ /* Only the rowid is required for a table btree */
+ pCur->nKey = sqlite3BtreeIntegerKey(pCur);
+ }else{
+ /* For an index btree, save the complete key content. It is possible
+ ** that the current key is corrupt. In that case, it is possible that
+ ** the sqlite3VdbeRecordUnpack() function may overread the buffer by
+ ** up to the size of 1 varint plus 1 8-byte value when the cursor
+ ** position is restored. Hence the 17 bytes of padding allocated
+ ** below. */
+ void *pKey;
+ pCur->nKey = sqlite3BtreePayloadSize(pCur);
+ pKey = sqlite3Malloc( pCur->nKey + 9 + 8 );
+ if( pKey ){
+ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey);
+ if( rc==SQLITE_OK ){
+ memset(((u8*)pKey)+pCur->nKey, 0, 9+8);
+ pCur->pKey = pKey;
+ }else{
+ sqlite3_free(pKey);
+ }
+ }else{
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ assert( !pCur->curIntKey || !pCur->pKey );
+ return rc;
+}
+
+/*
+** Save the current cursor position in the variables BtCursor.nKey
+** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
+**
+** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
+** prior to calling this routine.
+*/
+static int saveCursorPosition(BtCursor *pCur){
+ int rc;
+
+ assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState );
+ assert( 0==pCur->pKey );
+ assert( cursorHoldsMutex(pCur) );
+
+ if( pCur->curFlags & BTCF_Pinned ){
+ return SQLITE_CONSTRAINT_PINNED;
+ }
+ if( pCur->eState==CURSOR_SKIPNEXT ){
+ pCur->eState = CURSOR_VALID;
+ }else{
+ pCur->skipNext = 0;
+ }
+
+ rc = saveCursorKey(pCur);
+ if( rc==SQLITE_OK ){
+ btreeReleaseAllCursorPages(pCur);
+ pCur->eState = CURSOR_REQUIRESEEK;
+ }
+
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl|BTCF_AtLast);
+ return rc;
+}
+
+/* Forward reference */
+static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*);
+
+/*
+** Save the positions of all cursors (except pExcept) that are open on
+** the table with root-page iRoot. "Saving the cursor position" means that
+** the location in the btree is remembered in such a way that it can be
+** moved back to the same spot after the btree has been modified. This
+** routine is called just before cursor pExcept is used to modify the
+** table, for example in BtreeDelete() or BtreeInsert().
+**
+** If there are two or more cursors on the same btree, then all such
+** cursors should have their BTCF_Multiple flag set. The btreeCursor()
+** routine enforces that rule. This routine only needs to be called in
+** the uncommon case when pExpect has the BTCF_Multiple flag set.
+**
+** If pExpect!=NULL and if no other cursors are found on the same root-page,
+** then the BTCF_Multiple flag on pExpect is cleared, to avoid another
+** pointless call to this routine.
+**
+** Implementation note: This routine merely checks to see if any cursors
+** need to be saved. It calls out to saveCursorsOnList() in the (unusual)
+** event that cursors are in need to being saved.
+*/
+static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){
+ BtCursor *p;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pExcept==0 || pExcept->pBt==pBt );
+ for(p=pBt->pCursor; p; p=p->pNext){
+ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break;
+ }
+ if( p ) return saveCursorsOnList(p, iRoot, pExcept);
+ if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple;
+ return SQLITE_OK;
+}
+
+/* This helper routine to saveAllCursors does the actual work of saving
+** the cursors if and when a cursor is found that actually requires saving.
+** The common case is that no cursors need to be saved, so this routine is
+** broken out from its caller to avoid unnecessary stack pointer movement.
+*/
+static int SQLITE_NOINLINE saveCursorsOnList(
+ BtCursor *p, /* The first cursor that needs saving */
+ Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */
+ BtCursor *pExcept /* Do not save this cursor */
+){
+ do{
+ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){
+ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
+ int rc = saveCursorPosition(p);
+ if( SQLITE_OK!=rc ){
+ return rc;
+ }
+ }else{
+ testcase( p->iPage>=0 );
+ btreeReleaseAllCursorPages(p);
+ }
+ }
+ p = p->pNext;
+ }while( p );
+ return SQLITE_OK;
+}
+
+/*
+** Clear the current cursor position.
+*/
+SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ sqlite3_free(pCur->pKey);
+ pCur->pKey = 0;
+ pCur->eState = CURSOR_INVALID;
+}
+
+/*
+** In this version of BtreeMoveto, pKey is a packed index record
+** such as is generated by the OP_MakeRecord opcode. Unpack the
+** record and then call BtreeMovetoUnpacked() to do the work.
+*/
+static int btreeMoveto(
+ BtCursor *pCur, /* Cursor open on the btree to be searched */
+ const void *pKey, /* Packed key if the btree is an index */
+ i64 nKey, /* Integer key for tables. Size of pKey for indices */
+ int bias, /* Bias search to the high end */
+ int *pRes /* Write search results here */
+){
+ int rc; /* Status code */
+ UnpackedRecord *pIdxKey; /* Unpacked index key */
+
+ if( pKey ){
+ KeyInfo *pKeyInfo = pCur->pKeyInfo;
+ assert( nKey==(i64)(int)nKey );
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
+ if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT;
+ sqlite3VdbeRecordUnpack(pKeyInfo, (int)nKey, pKey, pIdxKey);
+ if( pIdxKey->nField==0 || pIdxKey->nField>pKeyInfo->nAllField ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto moveto_done;
+ }
+ }else{
+ pIdxKey = 0;
+ }
+ rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes);
+moveto_done:
+ if( pIdxKey ){
+ sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey);
+ }
+ return rc;
+}
+
+/*
+** Restore the cursor to the position it was in (or as close to as possible)
+** when saveCursorPosition() was called. Note that this call deletes the
+** saved position info stored by saveCursorPosition(), so there can be
+** at most one effective restoreCursorPosition() call after each
+** saveCursorPosition().
+*/
+static int btreeRestoreCursorPosition(BtCursor *pCur){
+ int rc;
+ int skipNext = 0;
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState>=CURSOR_REQUIRESEEK );
+ if( pCur->eState==CURSOR_FAULT ){
+ return pCur->skipNext;
+ }
+ pCur->eState = CURSOR_INVALID;
+ if( sqlite3FaultSim(410) ){
+ rc = SQLITE_IOERR;
+ }else{
+ rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_free(pCur->pKey);
+ pCur->pKey = 0;
+ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID );
+ if( skipNext ) pCur->skipNext = skipNext;
+ if( pCur->skipNext && pCur->eState==CURSOR_VALID ){
+ pCur->eState = CURSOR_SKIPNEXT;
+ }
+ }
+ return rc;
+}
+
+#define restoreCursorPosition(p) \
+ (p->eState>=CURSOR_REQUIRESEEK ? \
+ btreeRestoreCursorPosition(p) : \
+ SQLITE_OK)
+
+/*
+** Determine whether or not a cursor has moved from the position where
+** it was last placed, or has been invalidated for any other reason.
+** Cursors can move when the row they are pointing at is deleted out
+** from under them, for example. Cursor might also move if a btree
+** is rebalanced.
+**
+** Calling this routine with a NULL cursor pointer returns false.
+**
+** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor
+** back to where it ought to be if this routine returns true.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur){
+ assert( EIGHT_BYTE_ALIGNMENT(pCur)
+ || pCur==sqlite3BtreeFakeValidCursor() );
+ assert( offsetof(BtCursor, eState)==0 );
+ assert( sizeof(pCur->eState)==1 );
+ return CURSOR_VALID != *(u8*)pCur;
+}
+
+/*
+** Return a pointer to a fake BtCursor object that will always answer
+** false to the sqlite3BtreeCursorHasMoved() routine above. The fake
+** cursor returned must not be used with any other Btree interface.
+*/
+SQLITE_PRIVATE BtCursor *sqlite3BtreeFakeValidCursor(void){
+ static u8 fakeCursor = CURSOR_VALID;
+ assert( offsetof(BtCursor, eState)==0 );
+ return (BtCursor*)&fakeCursor;
+}
+
+/*
+** This routine restores a cursor back to its original position after it
+** has been moved by some outside activity (such as a btree rebalance or
+** a row having been deleted out from under the cursor).
+**
+** On success, the *pDifferentRow parameter is false if the cursor is left
+** pointing at exactly the same row. *pDifferntRow is the row the cursor
+** was pointing to has been deleted, forcing the cursor to point to some
+** nearby row.
+**
+** This routine should only be called for a cursor that just returned
+** TRUE from sqlite3BtreeCursorHasMoved().
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){
+ int rc;
+
+ assert( pCur!=0 );
+ assert( pCur->eState!=CURSOR_VALID );
+ rc = restoreCursorPosition(pCur);
+ if( rc ){
+ *pDifferentRow = 1;
+ return rc;
+ }
+ if( pCur->eState!=CURSOR_VALID ){
+ *pDifferentRow = 1;
+ }else{
+ *pDifferentRow = 0;
+ }
+ return SQLITE_OK;
+}
+
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+/*
+** Provide hints to the cursor. The particular hint given (and the type
+** and number of the varargs parameters) is determined by the eHintType
+** parameter. See the definitions of the BTREE_HINT_* macros for details.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){
+ /* Used only by system that substitute their own storage engine */
+}
+#endif
+
+/*
+** Provide flag hints to the cursor.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){
+ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 );
+ pCur->hints = x;
+}
+
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Given a page number of a regular database page, return the page
+** number for the pointer-map page that contains the entry for the
+** input page number.
+**
+** Return 0 (not a valid page) for pgno==1 since there is
+** no pointer map associated with page 1. The integrity_check logic
+** requires that ptrmapPageno(*,1)!=1.
+*/
+static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){
+ int nPagesPerMapPage;
+ Pgno iPtrMap, ret;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pgno<2 ) return 0;
+ nPagesPerMapPage = (pBt->usableSize/5)+1;
+ iPtrMap = (pgno-2)/nPagesPerMapPage;
+ ret = (iPtrMap*nPagesPerMapPage) + 2;
+ if( ret==PENDING_BYTE_PAGE(pBt) ){
+ ret++;
+ }
+ return ret;
+}
+
+/*
+** Write an entry into the pointer map.
+**
+** This routine updates the pointer map entry for page number 'key'
+** so that it maps to type 'eType' and parent page number 'pgno'.
+**
+** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is
+** a no-op. If an error occurs, the appropriate error code is written
+** into *pRC.
+*/
+static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){
+ DbPage *pDbPage; /* The pointer map page */
+ u8 *pPtrmap; /* The pointer map data */
+ Pgno iPtrmap; /* The pointer map page number */
+ int offset; /* Offset in pointer map page */
+ int rc; /* Return code from subfunctions */
+
+ if( *pRC ) return;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ /* The super-journal page number must never be used as a pointer map page */
+ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) );
+
+ assert( pBt->autoVacuum );
+ if( key==0 ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ return;
+ }
+ iPtrmap = PTRMAP_PAGENO(pBt, key);
+ rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+ if( ((char*)sqlite3PagerGetExtra(pDbPage))[0]!=0 ){
+ /* The first byte of the extra data is the MemPage.isInit byte.
+ ** If that byte is set, it means this page is also being used
+ ** as a btree page. */
+ *pRC = SQLITE_CORRUPT_BKPT;
+ goto ptrmap_exit;
+ }
+ offset = PTRMAP_PTROFFSET(iPtrmap, key);
+ if( offset<0 ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ goto ptrmap_exit;
+ }
+ assert( offset <= (int)pBt->usableSize-5 );
+ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage);
+
+ if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){
+ TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent));
+ *pRC= rc = sqlite3PagerWrite(pDbPage);
+ if( rc==SQLITE_OK ){
+ pPtrmap[offset] = eType;
+ put4byte(&pPtrmap[offset+1], parent);
+ }
+ }
+
+ptrmap_exit:
+ sqlite3PagerUnref(pDbPage);
+}
+
+/*
+** Read an entry from the pointer map.
+**
+** This routine retrieves the pointer map entry for page 'key', writing
+** the type and parent page number to *pEType and *pPgno respectively.
+** An error code is returned if something goes wrong, otherwise SQLITE_OK.
+*/
+static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){
+ DbPage *pDbPage; /* The pointer map page */
+ int iPtrmap; /* Pointer map page index */
+ u8 *pPtrmap; /* Pointer map page data */
+ int offset; /* Offset of entry in pointer map */
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+
+ iPtrmap = PTRMAP_PAGENO(pBt, key);
+ rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0);
+ if( rc!=0 ){
+ return rc;
+ }
+ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage);
+
+ offset = PTRMAP_PTROFFSET(iPtrmap, key);
+ if( offset<0 ){
+ sqlite3PagerUnref(pDbPage);
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( offset <= (int)pBt->usableSize-5 );
+ assert( pEType!=0 );
+ *pEType = pPtrmap[offset];
+ if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]);
+
+ sqlite3PagerUnref(pDbPage);
+ if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_PGNO(iPtrmap);
+ return SQLITE_OK;
+}
+
+#else /* if defined SQLITE_OMIT_AUTOVACUUM */
+ #define ptrmapPut(w,x,y,z,rc)
+ #define ptrmapGet(w,x,y,z) SQLITE_OK
+ #define ptrmapPutOvflPtr(x, y, z, rc)
+#endif
+
+/*
+** Given a btree page and a cell index (0 means the first cell on
+** the page, 1 means the second cell, and so forth) return a pointer
+** to the cell content.
+**
+** findCellPastPtr() does the same except it skips past the initial
+** 4-byte child pointer found on interior pages, if there is one.
+**
+** This routine works only for pages that do not contain overflow cells.
+*/
+#define findCell(P,I) \
+ ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
+#define findCellPastPtr(P,I) \
+ ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
+
+
+/*
+** This is common tail processing for btreeParseCellPtr() and
+** btreeParseCellPtrIndex() for the case when the cell does not fit entirely
+** on a single B-tree page. Make necessary adjustments to the CellInfo
+** structure.
+*/
+static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ /* If the payload will not fit completely on the local page, we have
+ ** to decide how much to store locally and how much to spill onto
+ ** overflow pages. The strategy is to minimize the amount of unused
+ ** space on overflow pages while keeping the amount of local storage
+ ** in between minLocal and maxLocal.
+ **
+ ** Warning: changing the way overflow payload is distributed in any
+ ** way will result in an incompatible file format.
+ */
+ int minLocal; /* Minimum amount of payload held locally */
+ int maxLocal; /* Maximum amount of payload held locally */
+ int surplus; /* Overflow payload available for local storage */
+
+ minLocal = pPage->minLocal;
+ maxLocal = pPage->maxLocal;
+ surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4);
+ testcase( surplus==maxLocal );
+ testcase( surplus==maxLocal+1 );
+ if( surplus <= maxLocal ){
+ pInfo->nLocal = (u16)surplus;
+ }else{
+ pInfo->nLocal = (u16)minLocal;
+ }
+ pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4;
+}
+
+/*
+** The following routines are implementations of the MemPage.xParseCell()
+** method.
+**
+** Parse a cell content block and fill in the CellInfo structure.
+**
+** btreeParseCellPtr() => table btree leaf nodes
+** btreeParseCellNoPayload() => table btree internal nodes
+** btreeParseCellPtrIndex() => index btree nodes
+**
+** There is also a wrapper function btreeParseCell() that works for
+** all MemPage types and that references the cell by index rather than
+** by pointer.
+*/
+static void btreeParseCellPtrNoPayload(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 );
+ assert( pPage->childPtrSize==4 );
+#ifndef SQLITE_DEBUG
+ UNUSED_PARAMETER(pPage);
+#endif
+ pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey);
+ pInfo->nPayload = 0;
+ pInfo->nLocal = 0;
+ pInfo->pPayload = 0;
+ return;
+}
+static void btreeParseCellPtr(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ u8 *pIter; /* For scanning through pCell */
+ u32 nPayload; /* Number of bytes of cell payload */
+ u64 iKey; /* Extracted Key value */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 || pPage->leaf==1 );
+ assert( pPage->intKeyLeaf );
+ assert( pPage->childPtrSize==0 );
+ pIter = pCell;
+
+ /* The next block of code is equivalent to:
+ **
+ ** pIter += getVarint32(pIter, nPayload);
+ **
+ ** The code is inlined to avoid a function call.
+ */
+ nPayload = *pIter;
+ if( nPayload>=0x80 ){
+ u8 *pEnd = &pIter[8];
+ nPayload &= 0x7f;
+ do{
+ nPayload = (nPayload<<7) | (*++pIter & 0x7f);
+ }while( (*pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+
+ /* The next block of code is equivalent to:
+ **
+ ** pIter += getVarint(pIter, (u64*)&pInfo->nKey);
+ **
+ ** The code is inlined to avoid a function call.
+ */
+ iKey = *pIter;
+ if( iKey>=0x80 ){
+ u8 *pEnd = &pIter[7];
+ iKey &= 0x7f;
+ while(1){
+ iKey = (iKey<<7) | (*++pIter & 0x7f);
+ if( (*pIter)<0x80 ) break;
+ if( pIter>=pEnd ){
+ iKey = (iKey<<8) | *++pIter;
+ break;
+ }
+ }
+ }
+ pIter++;
+
+ pInfo->nKey = *(i64*)&iKey;
+ pInfo->nPayload = nPayload;
+ pInfo->pPayload = pIter;
+ testcase( nPayload==pPage->maxLocal );
+ testcase( nPayload==pPage->maxLocal+1 );
+ if( nPayload<=pPage->maxLocal ){
+ /* This is the (easy) common case where the entire payload fits
+ ** on the local page. No overflow is required.
+ */
+ pInfo->nSize = nPayload + (u16)(pIter - pCell);
+ if( pInfo->nSize<4 ) pInfo->nSize = 4;
+ pInfo->nLocal = (u16)nPayload;
+ }else{
+ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ }
+}
+static void btreeParseCellPtrIndex(
+ MemPage *pPage, /* Page containing the cell */
+ u8 *pCell, /* Pointer to the cell text. */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ u8 *pIter; /* For scanning through pCell */
+ u32 nPayload; /* Number of bytes of cell payload */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->leaf==0 || pPage->leaf==1 );
+ assert( pPage->intKeyLeaf==0 );
+ pIter = pCell + pPage->childPtrSize;
+ nPayload = *pIter;
+ if( nPayload>=0x80 ){
+ u8 *pEnd = &pIter[8];
+ nPayload &= 0x7f;
+ do{
+ nPayload = (nPayload<<7) | (*++pIter & 0x7f);
+ }while( *(pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+ pInfo->nKey = nPayload;
+ pInfo->nPayload = nPayload;
+ pInfo->pPayload = pIter;
+ testcase( nPayload==pPage->maxLocal );
+ testcase( nPayload==pPage->maxLocal+1 );
+ if( nPayload<=pPage->maxLocal ){
+ /* This is the (easy) common case where the entire payload fits
+ ** on the local page. No overflow is required.
+ */
+ pInfo->nSize = nPayload + (u16)(pIter - pCell);
+ if( pInfo->nSize<4 ) pInfo->nSize = 4;
+ pInfo->nLocal = (u16)nPayload;
+ }else{
+ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ }
+}
+static void btreeParseCell(
+ MemPage *pPage, /* Page containing the cell */
+ int iCell, /* The cell index. First cell is 0 */
+ CellInfo *pInfo /* Fill in this structure */
+){
+ pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo);
+}
+
+/*
+** The following routines are implementations of the MemPage.xCellSize
+** method.
+**
+** Compute the total number of bytes that a Cell needs in the cell
+** data area of the btree-page. The return number includes the cell
+** data header and the local payload, but not any overflow page or
+** the space used by the cell pointer.
+**
+** cellSizePtrNoPayload() => table internal nodes
+** cellSizePtr() => all index nodes & table leaf nodes
+*/
+static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
+ u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */
+ u8 *pEnd; /* End mark for a varint */
+ u32 nSize; /* Size value to return */
+
+#ifdef SQLITE_DEBUG
+ /* The value returned by this function should always be the same as
+ ** the (CellInfo.nSize) value found by doing a full parse of the
+ ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
+ ** this function verifies that this invariant is not violated. */
+ CellInfo debuginfo;
+ pPage->xParseCell(pPage, pCell, &debuginfo);
+#endif
+
+ nSize = *pIter;
+ if( nSize>=0x80 ){
+ pEnd = &pIter[8];
+ nSize &= 0x7f;
+ do{
+ nSize = (nSize<<7) | (*++pIter & 0x7f);
+ }while( *(pIter)>=0x80 && pIter<pEnd );
+ }
+ pIter++;
+ if( pPage->intKey ){
+ /* pIter now points at the 64-bit integer key value, a variable length
+ ** integer. The following block moves pIter to point at the first byte
+ ** past the end of the key value. */
+ pEnd = &pIter[9];
+ while( (*pIter++)&0x80 && pIter<pEnd );
+ }
+ testcase( nSize==pPage->maxLocal );
+ testcase( nSize==pPage->maxLocal+1 );
+ if( nSize<=pPage->maxLocal ){
+ nSize += (u32)(pIter - pCell);
+ if( nSize<4 ) nSize = 4;
+ }else{
+ int minLocal = pPage->minLocal;
+ nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4);
+ testcase( nSize==pPage->maxLocal );
+ testcase( nSize==pPage->maxLocal+1 );
+ if( nSize>pPage->maxLocal ){
+ nSize = minLocal;
+ }
+ nSize += 4 + (u16)(pIter - pCell);
+ }
+ assert( nSize==debuginfo.nSize || CORRUPT_DB );
+ return (u16)nSize;
+}
+static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){
+ u8 *pIter = pCell + 4; /* For looping over bytes of pCell */
+ u8 *pEnd; /* End mark for a varint */
+
+#ifdef SQLITE_DEBUG
+ /* The value returned by this function should always be the same as
+ ** the (CellInfo.nSize) value found by doing a full parse of the
+ ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
+ ** this function verifies that this invariant is not violated. */
+ CellInfo debuginfo;
+ pPage->xParseCell(pPage, pCell, &debuginfo);
+#else
+ UNUSED_PARAMETER(pPage);
+#endif
+
+ assert( pPage->childPtrSize==4 );
+ pEnd = pIter + 9;
+ while( (*pIter++)&0x80 && pIter<pEnd );
+ assert( debuginfo.nSize==(u16)(pIter - pCell) || CORRUPT_DB );
+ return (u16)(pIter - pCell);
+}
+
+
+#ifdef SQLITE_DEBUG
+/* This variation on cellSizePtr() is used inside of assert() statements
+** only. */
+static u16 cellSize(MemPage *pPage, int iCell){
+ return pPage->xCellSize(pPage, findCell(pPage, iCell));
+}
+#endif
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** The cell pCell is currently part of page pSrc but will ultimately be part
+** of pPage. (pSrc and pPager are often the same.) If pCell contains a
+** pointer to an overflow page, insert an entry into the pointer-map for
+** the overflow page that will be valid after pCell has been moved to pPage.
+*/
+static void ptrmapPutOvflPtr(MemPage *pPage, MemPage *pSrc, u8 *pCell,int *pRC){
+ CellInfo info;
+ if( *pRC ) return;
+ assert( pCell!=0 );
+ pPage->xParseCell(pPage, pCell, &info);
+ if( info.nLocal<info.nPayload ){
+ Pgno ovfl;
+ if( SQLITE_WITHIN(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){
+ testcase( pSrc!=pPage );
+ *pRC = SQLITE_CORRUPT_BKPT;
+ return;
+ }
+ ovfl = get4byte(&pCell[info.nSize-4]);
+ ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC);
+ }
+}
+#endif
+
+
+/*
+** Defragment the page given. This routine reorganizes cells within the
+** page so that there are no free-blocks on the free-block list.
+**
+** Parameter nMaxFrag is the maximum amount of fragmented space that may be
+** present in the page after this routine returns.
+**
+** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a
+** b-tree page so that there are no freeblocks or fragment bytes, all
+** unused bytes are contained in the unallocated space region, and all
+** cells are packed tightly at the end of the page.
+*/
+static int defragmentPage(MemPage *pPage, int nMaxFrag){
+ int i; /* Loop counter */
+ int pc; /* Address of the i-th cell */
+ int hdr; /* Offset to the page header */
+ int size; /* Size of a cell */
+ int usableSize; /* Number of usable bytes on a page */
+ int cellOffset; /* Offset to the cell pointer array */
+ int cbrk; /* Offset to the cell content area */
+ int nCell; /* Number of cells on the page */
+ unsigned char *data; /* The page data */
+ unsigned char *temp; /* Temp area for cell content */
+ unsigned char *src; /* Source of content */
+ int iCellFirst; /* First allowable cell index */
+ int iCellLast; /* Last possible cell index */
+
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( pPage->pBt!=0 );
+ assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE );
+ assert( pPage->nOverflow==0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ temp = 0;
+ src = data = pPage->aData;
+ hdr = pPage->hdrOffset;
+ cellOffset = pPage->cellOffset;
+ nCell = pPage->nCell;
+ assert( nCell==get2byte(&data[hdr+3]) || CORRUPT_DB );
+ iCellFirst = cellOffset + 2*nCell;
+ usableSize = pPage->pBt->usableSize;
+
+ /* This block handles pages with two or fewer free blocks and nMaxFrag
+ ** or fewer fragmented bytes. In this case it is faster to move the
+ ** two (or one) blocks of cells using memmove() and add the required
+ ** offsets to each pointer in the cell-pointer array than it is to
+ ** reconstruct the entire page. */
+ if( (int)data[hdr+7]<=nMaxFrag ){
+ int iFree = get2byte(&data[hdr+1]);
+ if( iFree>usableSize-4 ) return SQLITE_CORRUPT_PAGE(pPage);
+ if( iFree ){
+ int iFree2 = get2byte(&data[iFree]);
+ if( iFree2>usableSize-4 ) return SQLITE_CORRUPT_PAGE(pPage);
+ if( 0==iFree2 || (data[iFree2]==0 && data[iFree2+1]==0) ){
+ u8 *pEnd = &data[cellOffset + nCell*2];
+ u8 *pAddr;
+ int sz2 = 0;
+ int sz = get2byte(&data[iFree+2]);
+ int top = get2byte(&data[hdr+5]);
+ if( top>=iFree ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ if( iFree2 ){
+ if( iFree+sz>iFree2 ) return SQLITE_CORRUPT_PAGE(pPage);
+ sz2 = get2byte(&data[iFree2+2]);
+ if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage);
+ memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz));
+ sz += sz2;
+ }else if( NEVER(iFree+sz>usableSize) ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+
+ cbrk = top+sz;
+ assert( cbrk+(iFree-top) <= usableSize );
+ memmove(&data[cbrk], &data[top], iFree-top);
+ for(pAddr=&data[cellOffset]; pAddr<pEnd; pAddr+=2){
+ pc = get2byte(pAddr);
+ if( pc<iFree ){ put2byte(pAddr, pc+sz); }
+ else if( pc<iFree2 ){ put2byte(pAddr, pc+sz2); }
+ }
+ goto defragment_out;
+ }
+ }
+ }
+
+ cbrk = usableSize;
+ iCellLast = usableSize - 4;
+ for(i=0; i<nCell; i++){
+ u8 *pAddr; /* The i-th cell pointer */
+ pAddr = &data[cellOffset + i*2];
+ pc = get2byte(pAddr);
+ testcase( pc==iCellFirst );
+ testcase( pc==iCellLast );
+ /* These conditions have already been verified in btreeInitPage()
+ ** if PRAGMA cell_size_check=ON.
+ */
+ if( pc<iCellFirst || pc>iCellLast ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( pc>=iCellFirst && pc<=iCellLast );
+ size = pPage->xCellSize(pPage, &src[pc]);
+ cbrk -= size;
+ if( cbrk<iCellFirst || pc+size>usableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( cbrk+size<=usableSize && cbrk>=iCellFirst );
+ testcase( cbrk+size==usableSize );
+ testcase( pc+size==usableSize );
+ put2byte(pAddr, cbrk);
+ if( temp==0 ){
+ int x;
+ if( cbrk==pc ) continue;
+ temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
+ x = get2byte(&data[hdr+5]);
+ memcpy(&temp[x], &data[x], (cbrk+size) - x);
+ src = temp;
+ }
+ memcpy(&data[cbrk], &src[pc], size);
+ }
+ data[hdr+7] = 0;
+
+ defragment_out:
+ assert( pPage->nFree>=0 );
+ if( data[hdr+7]+cbrk-iCellFirst!=pPage->nFree ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( cbrk>=iCellFirst );
+ put2byte(&data[hdr+5], cbrk);
+ data[hdr+1] = 0;
+ data[hdr+2] = 0;
+ memset(&data[iCellFirst], 0, cbrk-iCellFirst);
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ return SQLITE_OK;
+}
+
+/*
+** Search the free-list on page pPg for space to store a cell nByte bytes in
+** size. If one can be found, return a pointer to the space and remove it
+** from the free-list.
+**
+** If no suitable space can be found on the free-list, return NULL.
+**
+** This function may detect corruption within pPg. If corruption is
+** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned.
+**
+** Slots on the free list that are between 1 and 3 bytes larger than nByte
+** will be ignored if adding the extra space to the fragmentation count
+** causes the fragmentation count to exceed 60.
+*/
+static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
+ const int hdr = pPg->hdrOffset; /* Offset to page header */
+ u8 * const aData = pPg->aData; /* Page data */
+ int iAddr = hdr + 1; /* Address of ptr to pc */
+ int pc = get2byte(&aData[iAddr]); /* Address of a free slot */
+ int x; /* Excess size of the slot */
+ int maxPC = pPg->pBt->usableSize - nByte; /* Max address for a usable slot */
+ int size; /* Size of the free slot */
+
+ assert( pc>0 );
+ while( pc<=maxPC ){
+ /* EVIDENCE-OF: R-22710-53328 The third and fourth bytes of each
+ ** freeblock form a big-endian integer which is the size of the freeblock
+ ** in bytes, including the 4-byte header. */
+ size = get2byte(&aData[pc+2]);
+ if( (x = size - nByte)>=0 ){
+ testcase( x==4 );
+ testcase( x==3 );
+ if( x<4 ){
+ /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total
+ ** number of bytes in fragments may not exceed 60. */
+ if( aData[hdr+7]>57 ) return 0;
+
+ /* Remove the slot from the free-list. Update the number of
+ ** fragmented bytes within the page. */
+ memcpy(&aData[iAddr], &aData[pc], 2);
+ aData[hdr+7] += (u8)x;
+ }else if( x+pc > maxPC ){
+ /* This slot extends off the end of the usable part of the page */
+ *pRc = SQLITE_CORRUPT_PAGE(pPg);
+ return 0;
+ }else{
+ /* The slot remains on the free-list. Reduce its size to account
+ ** for the portion used by the new allocation. */
+ put2byte(&aData[pc+2], x);
+ }
+ return &aData[pc + x];
+ }
+ iAddr = pc;
+ pc = get2byte(&aData[pc]);
+ if( pc<=iAddr+size ){
+ if( pc ){
+ /* The next slot in the chain is not past the end of the current slot */
+ *pRc = SQLITE_CORRUPT_PAGE(pPg);
+ }
+ return 0;
+ }
+ }
+ if( pc>maxPC+nByte-4 ){
+ /* The free slot chain extends off the end of the page */
+ *pRc = SQLITE_CORRUPT_PAGE(pPg);
+ }
+ return 0;
+}
+
+/*
+** Allocate nByte bytes of space from within the B-Tree page passed
+** as the first argument. Write into *pIdx the index into pPage->aData[]
+** of the first byte of allocated space. Return either SQLITE_OK or
+** an error code (usually SQLITE_CORRUPT).
+**
+** The caller guarantees that there is sufficient space to make the
+** allocation. This routine might need to defragment in order to bring
+** all the space together, however. This routine will avoid using
+** the first two bytes past the cell pointer area since presumably this
+** allocation is being made in order to insert a new cell, so we will
+** also end up needing a new cell pointer.
+*/
+static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
+ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */
+ u8 * const data = pPage->aData; /* Local cache of pPage->aData */
+ int top; /* First byte of cell content area */
+ int rc = SQLITE_OK; /* Integer return code */
+ int gap; /* First byte of gap between cell pointers and cell content */
+
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( pPage->pBt );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( nByte>=0 ); /* Minimum cell size is 4 */
+ assert( pPage->nFree>=nByte );
+ assert( pPage->nOverflow==0 );
+ assert( nByte < (int)(pPage->pBt->usableSize-8) );
+
+ assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf );
+ gap = pPage->cellOffset + 2*pPage->nCell;
+ assert( gap<=65536 );
+ /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size
+ ** and the reserved space is zero (the usual value for reserved space)
+ ** then the cell content offset of an empty page wants to be 65536.
+ ** However, that integer is too large to be stored in a 2-byte unsigned
+ ** integer, so a value of 0 is used in its place. */
+ top = get2byte(&data[hdr+5]);
+ assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */
+ if( gap>top ){
+ if( top==0 && pPage->pBt->usableSize==65536 ){
+ top = 65536;
+ }else{
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ }
+
+ /* If there is enough space between gap and top for one more cell pointer,
+ ** and if the freelist is not empty, then search the
+ ** freelist looking for a slot big enough to satisfy the request.
+ */
+ testcase( gap+2==top );
+ testcase( gap+1==top );
+ testcase( gap==top );
+ if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){
+ u8 *pSpace = pageFindSlot(pPage, nByte, &rc);
+ if( pSpace ){
+ int g2;
+ assert( pSpace+nByte<=data+pPage->pBt->usableSize );
+ *pIdx = g2 = (int)(pSpace-data);
+ if( NEVER(g2<=gap) ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }else{
+ return SQLITE_OK;
+ }
+ }else if( rc ){
+ return rc;
+ }
+ }
+
+ /* The request could not be fulfilled using a freelist slot. Check
+ ** to see if defragmentation is necessary.
+ */
+ testcase( gap+2+nByte==top );
+ if( gap+2+nByte>top ){
+ assert( pPage->nCell>0 || CORRUPT_DB );
+ assert( pPage->nFree>=0 );
+ rc = defragmentPage(pPage, MIN(4, pPage->nFree - (2+nByte)));
+ if( rc ) return rc;
+ top = get2byteNotZero(&data[hdr+5]);
+ assert( gap+2+nByte<=top );
+ }
+
+
+ /* Allocate memory from the gap in between the cell pointer array
+ ** and the cell content area. The btreeComputeFreeSpace() call has already
+ ** validated the freelist. Given that the freelist is valid, there
+ ** is no way that the allocation can extend off the end of the page.
+ ** The assert() below verifies the previous sentence.
+ */
+ top -= nByte;
+ put2byte(&data[hdr+5], top);
+ assert( top+nByte <= (int)pPage->pBt->usableSize );
+ *pIdx = top;
+ return SQLITE_OK;
+}
+
+/*
+** Return a section of the pPage->aData to the freelist.
+** The first byte of the new free block is pPage->aData[iStart]
+** and the size of the block is iSize bytes.
+**
+** Adjacent freeblocks are coalesced.
+**
+** Even though the freeblock list was checked by btreeComputeFreeSpace(),
+** that routine will not detect overlap between cells or freeblocks. Nor
+** does it detect cells or freeblocks that encrouch into the reserved bytes
+** at the end of the page. So do additional corruption checks inside this
+** routine and return SQLITE_CORRUPT if any problems are found.
+*/
+static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
+ u16 iPtr; /* Address of ptr to next freeblock */
+ u16 iFreeBlk; /* Address of the next freeblock */
+ u8 hdr; /* Page header size. 0 or 100 */
+ u8 nFrag = 0; /* Reduction in fragmentation */
+ u16 iOrigSize = iSize; /* Original value of iSize */
+ u16 x; /* Offset to cell content area */
+ u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */
+ unsigned char *data = pPage->aData; /* Page content */
+
+ assert( pPage->pBt!=0 );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize );
+ assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( iSize>=4 ); /* Minimum cell size is 4 */
+ assert( iStart<=pPage->pBt->usableSize-4 );
+
+ /* The list of freeblocks must be in ascending order. Find the
+ ** spot on the list where iStart should be inserted.
+ */
+ hdr = pPage->hdrOffset;
+ iPtr = hdr + 1;
+ if( data[iPtr+1]==0 && data[iPtr]==0 ){
+ iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */
+ }else{
+ while( (iFreeBlk = get2byte(&data[iPtr]))<iStart ){
+ if( iFreeBlk<iPtr+4 ){
+ if( iFreeBlk==0 ) break; /* TH3: corrupt082.100 */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ iPtr = iFreeBlk;
+ }
+ if( iFreeBlk>pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( iFreeBlk>iPtr || iFreeBlk==0 );
+
+ /* At this point:
+ ** iFreeBlk: First freeblock after iStart, or zero if none
+ ** iPtr: The address of a pointer to iFreeBlk
+ **
+ ** Check to see if iFreeBlk should be coalesced onto the end of iStart.
+ */
+ if( iFreeBlk && iEnd+3>=iFreeBlk ){
+ nFrag = iFreeBlk - iEnd;
+ if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_PAGE(pPage);
+ iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]);
+ if( iEnd > pPage->pBt->usableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ iSize = iEnd - iStart;
+ iFreeBlk = get2byte(&data[iFreeBlk]);
+ }
+
+ /* If iPtr is another freeblock (that is, if iPtr is not the freelist
+ ** pointer in the page header) then check to see if iStart should be
+ ** coalesced onto the end of iPtr.
+ */
+ if( iPtr>hdr+1 ){
+ int iPtrEnd = iPtr + get2byte(&data[iPtr+2]);
+ if( iPtrEnd+3>=iStart ){
+ if( iPtrEnd>iStart ) return SQLITE_CORRUPT_PAGE(pPage);
+ nFrag += iStart - iPtrEnd;
+ iSize = iEnd - iPtr;
+ iStart = iPtr;
+ }
+ }
+ if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage);
+ data[hdr+7] -= nFrag;
+ }
+ x = get2byte(&data[hdr+5]);
+ if( iStart<=x ){
+ /* The new freeblock is at the beginning of the cell content area,
+ ** so just extend the cell content area rather than create another
+ ** freelist entry */
+ if( iStart<x ) return SQLITE_CORRUPT_PAGE(pPage);
+ if( iPtr!=hdr+1 ) return SQLITE_CORRUPT_PAGE(pPage);
+ put2byte(&data[hdr+1], iFreeBlk);
+ put2byte(&data[hdr+5], iEnd);
+ }else{
+ /* Insert the new freeblock into the freelist */
+ put2byte(&data[iPtr], iStart);
+ }
+ if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){
+ /* Overwrite deleted information with zeros when the secure_delete
+ ** option is enabled */
+ memset(&data[iStart], 0, iSize);
+ }
+ put2byte(&data[iStart], iFreeBlk);
+ put2byte(&data[iStart+2], iSize);
+ pPage->nFree += iOrigSize;
+ return SQLITE_OK;
+}
+
+/*
+** Decode the flags byte (the first byte of the header) for a page
+** and initialize fields of the MemPage structure accordingly.
+**
+** Only the following combinations are supported. Anything different
+** indicates a corrupt database files:
+**
+** PTF_ZERODATA
+** PTF_ZERODATA | PTF_LEAF
+** PTF_LEAFDATA | PTF_INTKEY
+** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF
+*/
+static int decodeFlags(MemPage *pPage, int flagByte){
+ BtShared *pBt; /* A copy of pPage->pBt */
+
+ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 );
+ flagByte &= ~PTF_LEAF;
+ pPage->childPtrSize = 4-4*pPage->leaf;
+ pPage->xCellSize = cellSizePtr;
+ pBt = pPage->pBt;
+ if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
+ /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an
+ ** interior table b-tree page. */
+ assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
+ /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a
+ ** leaf table b-tree page. */
+ assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
+ pPage->intKey = 1;
+ if( pPage->leaf ){
+ pPage->intKeyLeaf = 1;
+ pPage->xParseCell = btreeParseCellPtr;
+ }else{
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtrNoPayload;
+ pPage->xParseCell = btreeParseCellPtrNoPayload;
+ }
+ pPage->maxLocal = pBt->maxLeaf;
+ pPage->minLocal = pBt->minLeaf;
+ }else if( flagByte==PTF_ZERODATA ){
+ /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an
+ ** interior index b-tree page. */
+ assert( (PTF_ZERODATA)==2 );
+ /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a
+ ** leaf index b-tree page. */
+ assert( (PTF_ZERODATA|PTF_LEAF)==10 );
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ pPage->maxLocal = pBt->maxLocal;
+ pPage->minLocal = pBt->minLocal;
+ }else{
+ /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is
+ ** an error. */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ pPage->max1bytePayload = pBt->max1bytePayload;
+ return SQLITE_OK;
+}
+
+/*
+** Compute the amount of freespace on the page. In other words, fill
+** in the pPage->nFree field.
+*/
+static int btreeComputeFreeSpace(MemPage *pPage){
+ int pc; /* Address of a freeblock within pPage->aData[] */
+ u8 hdr; /* Offset to beginning of page header */
+ u8 *data; /* Equal to pPage->aData */
+ int usableSize; /* Amount of usable space on each page */
+ int nFree; /* Number of unused bytes on the page */
+ int top; /* First byte of the cell content area */
+ int iCellFirst; /* First allowable cell or freeblock offset */
+ int iCellLast; /* Last possible cell or freeblock offset */
+
+ assert( pPage->pBt!=0 );
+ assert( pPage->pBt->db!=0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) );
+ assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) );
+ assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) );
+ assert( pPage->isInit==1 );
+ assert( pPage->nFree<0 );
+
+ usableSize = pPage->pBt->usableSize;
+ hdr = pPage->hdrOffset;
+ data = pPage->aData;
+ /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates
+ ** the start of the cell content area. A zero value for this integer is
+ ** interpreted as 65536. */
+ top = get2byteNotZero(&data[hdr+5]);
+ iCellFirst = hdr + 8 + pPage->childPtrSize + 2*pPage->nCell;
+ iCellLast = usableSize - 4;
+
+ /* Compute the total free space on the page
+ ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the
+ ** start of the first freeblock on the page, or is zero if there are no
+ ** freeblocks. */
+ pc = get2byte(&data[hdr+1]);
+ nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */
+ if( pc>0 ){
+ u32 next, size;
+ if( pc<top ){
+ /* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will
+ ** always be at least one cell before the first freeblock.
+ */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ while( 1 ){
+ if( pc>iCellLast ){
+ /* Freeblock off the end of the page */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ next = get2byte(&data[pc]);
+ size = get2byte(&data[pc+2]);
+ nFree = nFree + size;
+ if( next<=pc+size+3 ) break;
+ pc = next;
+ }
+ if( next>0 ){
+ /* Freeblock not in ascending order */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ if( pc+size>(unsigned int)usableSize ){
+ /* Last freeblock extends past page end */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ }
+
+ /* At this point, nFree contains the sum of the offset to the start
+ ** of the cell-content area plus the number of free bytes within
+ ** the cell-content area. If this is greater than the usable-size
+ ** of the page, then the page must be corrupted. This check also
+ ** serves to verify that the offset to the start of the cell-content
+ ** area, according to the page header, lies within the page.
+ */
+ if( nFree>usableSize || nFree<iCellFirst ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ pPage->nFree = (u16)(nFree - iCellFirst);
+ return SQLITE_OK;
+}
+
+/*
+** Do additional sanity check after btreeInitPage() if
+** PRAGMA cell_size_check=ON
+*/
+static SQLITE_NOINLINE int btreeCellSizeCheck(MemPage *pPage){
+ int iCellFirst; /* First allowable cell or freeblock offset */
+ int iCellLast; /* Last possible cell or freeblock offset */
+ int i; /* Index into the cell pointer array */
+ int sz; /* Size of a cell */
+ int pc; /* Address of a freeblock within pPage->aData[] */
+ u8 *data; /* Equal to pPage->aData */
+ int usableSize; /* Maximum usable space on the page */
+ int cellOffset; /* Start of cell content area */
+
+ iCellFirst = pPage->cellOffset + 2*pPage->nCell;
+ usableSize = pPage->pBt->usableSize;
+ iCellLast = usableSize - 4;
+ data = pPage->aData;
+ cellOffset = pPage->cellOffset;
+ if( !pPage->leaf ) iCellLast--;
+ for(i=0; i<pPage->nCell; i++){
+ pc = get2byteAligned(&data[cellOffset+i*2]);
+ testcase( pc==iCellFirst );
+ testcase( pc==iCellLast );
+ if( pc<iCellFirst || pc>iCellLast ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ sz = pPage->xCellSize(pPage, &data[pc]);
+ testcase( pc+sz==usableSize );
+ if( pc+sz>usableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Initialize the auxiliary information for a disk block.
+**
+** Return SQLITE_OK on success. If we see that the page does
+** not contain a well-formed database page, then return
+** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not
+** guarantee that the page is well-formed. It only shows that
+** we failed to detect any corruption.
+*/
+static int btreeInitPage(MemPage *pPage){
+ u8 *data; /* Equal to pPage->aData */
+ BtShared *pBt; /* The main btree structure */
+
+ assert( pPage->pBt!=0 );
+ assert( pPage->pBt->db!=0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) );
+ assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) );
+ assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) );
+ assert( pPage->isInit==0 );
+
+ pBt = pPage->pBt;
+ data = pPage->aData + pPage->hdrOffset;
+ /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating
+ ** the b-tree page type. */
+ if( decodeFlags(pPage, data[0]) ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
+ pPage->maskPage = (u16)(pBt->pageSize - 1);
+ pPage->nOverflow = 0;
+ pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize;
+ pPage->aCellIdx = data + pPage->childPtrSize + 8;
+ pPage->aDataEnd = pPage->aData + pBt->usableSize;
+ pPage->aDataOfst = pPage->aData + pPage->childPtrSize;
+ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
+ ** number of cells on the page. */
+ pPage->nCell = get2byte(&data[3]);
+ if( pPage->nCell>MX_CELL(pBt) ){
+ /* To many cells for a single page. The page must be corrupt */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ testcase( pPage->nCell==MX_CELL(pBt) );
+ /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only
+ ** possible for a root page of a table that contains no rows) then the
+ ** offset to the cell content area will equal the page size minus the
+ ** bytes of reserved space. */
+ assert( pPage->nCell>0
+ || get2byteNotZero(&data[5])==(int)pBt->usableSize
+ || CORRUPT_DB );
+ pPage->nFree = -1; /* Indicate that this value is yet uncomputed */
+ pPage->isInit = 1;
+ if( pBt->db->flags & SQLITE_CellSizeCk ){
+ return btreeCellSizeCheck(pPage);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Set up a raw page so that it looks like a database page holding
+** no entries.
+*/
+static void zeroPage(MemPage *pPage, int flags){
+ unsigned char *data = pPage->aData;
+ BtShared *pBt = pPage->pBt;
+ u8 hdr = pPage->hdrOffset;
+ u16 first;
+
+ assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage) == data );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pBt->btsFlags & BTS_FAST_SECURE ){
+ memset(&data[hdr], 0, pBt->usableSize - hdr);
+ }
+ data[hdr] = (char)flags;
+ first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8);
+ memset(&data[hdr+1], 0, 4);
+ data[hdr+7] = 0;
+ put2byte(&data[hdr+5], pBt->usableSize);
+ pPage->nFree = (u16)(pBt->usableSize - first);
+ decodeFlags(pPage, flags);
+ pPage->cellOffset = first;
+ pPage->aDataEnd = &data[pBt->usableSize];
+ pPage->aCellIdx = &data[first];
+ pPage->aDataOfst = &data[pPage->childPtrSize];
+ pPage->nOverflow = 0;
+ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
+ pPage->maskPage = (u16)(pBt->pageSize - 1);
+ pPage->nCell = 0;
+ pPage->isInit = 1;
+}
+
+
+/*
+** Convert a DbPage obtained from the pager into a MemPage used by
+** the btree layer.
+*/
+static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){
+ MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( pgno!=pPage->pgno ){
+ pPage->aData = sqlite3PagerGetData(pDbPage);
+ pPage->pDbPage = pDbPage;
+ pPage->pBt = pBt;
+ pPage->pgno = pgno;
+ pPage->hdrOffset = pgno==1 ? 100 : 0;
+ }
+ assert( pPage->aData==sqlite3PagerGetData(pDbPage) );
+ return pPage;
+}
+
+/*
+** Get a page from the pager. Initialize the MemPage.pBt and
+** MemPage.aData elements if needed. See also: btreeGetUnusedPage().
+**
+** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care
+** about the content of the page at this time. So do not go to the disk
+** to fetch the content. Just fill in the content with zeros for now.
+** If in the future we call sqlite3PagerWrite() on this page, that
+** means we have started to be concerned about content and the disk
+** read should occur at that point.
+*/
+static int btreeGetPage(
+ BtShared *pBt, /* The btree */
+ Pgno pgno, /* Number of the page to fetch */
+ MemPage **ppPage, /* Return the page in this parameter */
+ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */
+){
+ int rc;
+ DbPage *pDbPage;
+
+ assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, flags);
+ if( rc ) return rc;
+ *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt);
+ return SQLITE_OK;
+}
+
+/*
+** Retrieve a page from the pager cache. If the requested page is not
+** already in the pager cache return NULL. Initialize the MemPage.pBt and
+** MemPage.aData elements if needed.
+*/
+static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){
+ DbPage *pDbPage;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ pDbPage = sqlite3PagerLookup(pBt->pPager, pgno);
+ if( pDbPage ){
+ return btreePageFromDbPage(pDbPage, pgno, pBt);
+ }
+ return 0;
+}
+
+/*
+** Return the size of the database file in pages. If there is any kind of
+** error, return ((unsigned int)-1).
+*/
+static Pgno btreePagecount(BtShared *pBt){
+ return pBt->nPage;
+}
+SQLITE_PRIVATE Pgno sqlite3BtreeLastPage(Btree *p){
+ assert( sqlite3BtreeHoldsMutex(p) );
+ return btreePagecount(p->pBt);
+}
+
+/*
+** Get a page from the pager and initialize it.
+**
+** If pCur!=0 then the page is being fetched as part of a moveToChild()
+** call. Do additional sanity checking on the page in this case.
+** And if the fetch fails, this routine must decrement pCur->iPage.
+**
+** The page is fetched as read-write unless pCur is not NULL and is
+** a read-only cursor.
+**
+** If an error occurs, then *ppPage is undefined. It
+** may remain unchanged, or it may be set to an invalid value.
+*/
+static int getAndInitPage(
+ BtShared *pBt, /* The database file */
+ Pgno pgno, /* Number of the page to get */
+ MemPage **ppPage, /* Write the page pointer here */
+ BtCursor *pCur, /* Cursor to receive the page, or NULL */
+ int bReadOnly /* True for a read-only page */
+){
+ int rc;
+ DbPage *pDbPage;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pCur==0 || ppPage==&pCur->pPage );
+ assert( pCur==0 || bReadOnly==pCur->curPagerFlags );
+ assert( pCur==0 || pCur->iPage>0 );
+
+ if( pgno>btreePagecount(pBt) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto getAndInitPage_error1;
+ }
+ rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly);
+ if( rc ){
+ goto getAndInitPage_error1;
+ }
+ *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( (*ppPage)->isInit==0 ){
+ btreePageFromDbPage(pDbPage, pgno, pBt);
+ rc = btreeInitPage(*ppPage);
+ if( rc!=SQLITE_OK ){
+ goto getAndInitPage_error2;
+ }
+ }
+ assert( (*ppPage)->pgno==pgno );
+ assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) );
+
+ /* If obtaining a child page for a cursor, we must verify that the page is
+ ** compatible with the root page. */
+ if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){
+ rc = SQLITE_CORRUPT_PGNO(pgno);
+ goto getAndInitPage_error2;
+ }
+ return SQLITE_OK;
+
+getAndInitPage_error2:
+ releasePage(*ppPage);
+getAndInitPage_error1:
+ if( pCur ){
+ pCur->iPage--;
+ pCur->pPage = pCur->apPage[pCur->iPage];
+ }
+ testcase( pgno==0 );
+ assert( pgno!=0 || rc==SQLITE_CORRUPT );
+ return rc;
+}
+
+/*
+** Release a MemPage. This should be called once for each prior
+** call to btreeGetPage.
+**
+** Page1 is a special case and must be released using releasePageOne().
+*/
+static void releasePageNotNull(MemPage *pPage){
+ assert( pPage->aData );
+ assert( pPage->pBt );
+ assert( pPage->pDbPage!=0 );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ sqlite3PagerUnrefNotNull(pPage->pDbPage);
+}
+static void releasePage(MemPage *pPage){
+ if( pPage ) releasePageNotNull(pPage);
+}
+static void releasePageOne(MemPage *pPage){
+ assert( pPage!=0 );
+ assert( pPage->aData );
+ assert( pPage->pBt );
+ assert( pPage->pDbPage!=0 );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ sqlite3PagerUnrefPageOne(pPage->pDbPage);
+}
+
+/*
+** Get an unused page.
+**
+** This works just like btreeGetPage() with the addition:
+**
+** * If the page is already in use for some other purpose, immediately
+** release it and return an SQLITE_CURRUPT error.
+** * Make sure the isInit flag is clear
+*/
+static int btreeGetUnusedPage(
+ BtShared *pBt, /* The btree */
+ Pgno pgno, /* Number of the page to fetch */
+ MemPage **ppPage, /* Return the page in this parameter */
+ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */
+){
+ int rc = btreeGetPage(pBt, pgno, ppPage, flags);
+ if( rc==SQLITE_OK ){
+ if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ return SQLITE_CORRUPT_BKPT;
+ }
+ (*ppPage)->isInit = 0;
+ }else{
+ *ppPage = 0;
+ }
+ return rc;
+}
+
+
+/*
+** During a rollback, when the pager reloads information into the cache
+** so that the cache is restored to its original state at the start of
+** the transaction, for each page restored this routine is called.
+**
+** This routine needs to reset the extra data section at the end of the
+** page to agree with the restored data.
+*/
+static void pageReinit(DbPage *pData){
+ MemPage *pPage;
+ pPage = (MemPage *)sqlite3PagerGetExtra(pData);
+ assert( sqlite3PagerPageRefcount(pData)>0 );
+ if( pPage->isInit ){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->isInit = 0;
+ if( sqlite3PagerPageRefcount(pData)>1 ){
+ /* pPage might not be a btree page; it might be an overflow page
+ ** or ptrmap page or a free page. In those cases, the following
+ ** call to btreeInitPage() will likely return SQLITE_CORRUPT.
+ ** But no harm is done by this. And it is very important that
+ ** btreeInitPage() be called on every btree page so we make
+ ** the call for every page that comes in for re-initing. */
+ btreeInitPage(pPage);
+ }
+ }
+}
+
+/*
+** Invoke the busy handler for a btree.
+*/
+static int btreeInvokeBusyHandler(void *pArg){
+ BtShared *pBt = (BtShared*)pArg;
+ assert( pBt->db );
+ assert( sqlite3_mutex_held(pBt->db->mutex) );
+ return sqlite3InvokeBusyHandler(&pBt->db->busyHandler);
+}
+
+/*
+** Open a database file.
+**
+** zFilename is the name of the database file. If zFilename is NULL
+** then an ephemeral database is created. The ephemeral database might
+** be exclusively in memory, or it might use a disk-based memory cache.
+** Either way, the ephemeral database will be automatically deleted
+** when sqlite3BtreeClose() is called.
+**
+** If zFilename is ":memory:" then an in-memory database is created
+** that is automatically destroyed when it is closed.
+**
+** The "flags" parameter is a bitmask that might contain bits like
+** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY.
+**
+** If the database is already opened in the same database connection
+** and we are in shared cache mode, then the open will fail with an
+** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared
+** objects in the same database connection since doing so will lead
+** to problems with locking.
+*/
+SQLITE_PRIVATE int sqlite3BtreeOpen(
+ sqlite3_vfs *pVfs, /* VFS to use for this b-tree */
+ const char *zFilename, /* Name of the file containing the BTree database */
+ sqlite3 *db, /* Associated database handle */
+ Btree **ppBtree, /* Pointer to new Btree object written here */
+ int flags, /* Options */
+ int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */
+){
+ BtShared *pBt = 0; /* Shared part of btree structure */
+ Btree *p; /* Handle to return */
+ sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */
+ int rc = SQLITE_OK; /* Result code from this function */
+ u8 nReserve; /* Byte of unused space on each page */
+ unsigned char zDbHeader[100]; /* Database header content */
+
+ /* True if opening an ephemeral, temporary database */
+ const int isTempDb = zFilename==0 || zFilename[0]==0;
+
+ /* Set the variable isMemdb to true for an in-memory database, or
+ ** false for a file-based database.
+ */
+#ifdef SQLITE_OMIT_MEMORYDB
+ const int isMemdb = 0;
+#else
+ const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0)
+ || (isTempDb && sqlite3TempInMemory(db))
+ || (vfsFlags & SQLITE_OPEN_MEMORY)!=0;
+#endif
+
+ assert( db!=0 );
+ assert( pVfs!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( (flags&0xff)==flags ); /* flags fit in 8 bits */
+
+ /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */
+ assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 );
+
+ /* A BTREE_SINGLE database is always a temporary and/or ephemeral */
+ assert( (flags & BTREE_SINGLE)==0 || isTempDb );
+
+ if( isMemdb ){
+ flags |= BTREE_MEMORY;
+ }
+ if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){
+ vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB;
+ }
+ p = sqlite3MallocZero(sizeof(Btree));
+ if( !p ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ p->inTrans = TRANS_NONE;
+ p->db = db;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ p->lock.pBtree = p;
+ p->lock.iTable = 1;
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /*
+ ** If this Btree is a candidate for shared cache, try to find an
+ ** existing BtShared object that we can share with
+ */
+ if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){
+ if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){
+ int nFilename = sqlite3Strlen30(zFilename)+1;
+ int nFullPathname = pVfs->mxPathname+1;
+ char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename));
+ MUTEX_LOGIC( sqlite3_mutex *mutexShared; )
+
+ p->sharable = 1;
+ if( !zFullPathname ){
+ sqlite3_free(p);
+ return SQLITE_NOMEM_BKPT;
+ }
+ if( isMemdb ){
+ memcpy(zFullPathname, zFilename, nFilename);
+ }else{
+ rc = sqlite3OsFullPathname(pVfs, zFilename,
+ nFullPathname, zFullPathname);
+ if( rc ){
+ if( rc==SQLITE_OK_SYMLINK ){
+ rc = SQLITE_OK;
+ }else{
+ sqlite3_free(zFullPathname);
+ sqlite3_free(p);
+ return rc;
+ }
+ }
+ }
+#if SQLITE_THREADSAFE
+ mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN);
+ sqlite3_mutex_enter(mutexOpen);
+ mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+ sqlite3_mutex_enter(mutexShared);
+#endif
+ for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){
+ assert( pBt->nRef>0 );
+ if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0))
+ && sqlite3PagerVfs(pBt->pPager)==pVfs ){
+ int iDb;
+ for(iDb=db->nDb-1; iDb>=0; iDb--){
+ Btree *pExisting = db->aDb[iDb].pBt;
+ if( pExisting && pExisting->pBt==pBt ){
+ sqlite3_mutex_leave(mutexShared);
+ sqlite3_mutex_leave(mutexOpen);
+ sqlite3_free(zFullPathname);
+ sqlite3_free(p);
+ return SQLITE_CONSTRAINT;
+ }
+ }
+ p->pBt = pBt;
+ pBt->nRef++;
+ break;
+ }
+ }
+ sqlite3_mutex_leave(mutexShared);
+ sqlite3_free(zFullPathname);
+ }
+#ifdef SQLITE_DEBUG
+ else{
+ /* In debug mode, we mark all persistent databases as sharable
+ ** even when they are not. This exercises the locking code and
+ ** gives more opportunity for asserts(sqlite3_mutex_held())
+ ** statements to find locking problems.
+ */
+ p->sharable = 1;
+ }
+#endif
+ }
+#endif
+ if( pBt==0 ){
+ /*
+ ** The following asserts make sure that structures used by the btree are
+ ** the right size. This is to guard against size changes that result
+ ** when compiling on a different architecture.
+ */
+ assert( sizeof(i64)==8 );
+ assert( sizeof(u64)==8 );
+ assert( sizeof(u32)==4 );
+ assert( sizeof(u16)==2 );
+ assert( sizeof(Pgno)==4 );
+
+ pBt = sqlite3MallocZero( sizeof(*pBt) );
+ if( pBt==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto btree_open_out;
+ }
+ rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename,
+ sizeof(MemPage), flags, vfsFlags, pageReinit);
+ if( rc==SQLITE_OK ){
+ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap);
+ rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader);
+ }
+ if( rc!=SQLITE_OK ){
+ goto btree_open_out;
+ }
+ pBt->openFlags = (u8)flags;
+ pBt->db = db;
+ sqlite3PagerSetBusyHandler(pBt->pPager, btreeInvokeBusyHandler, pBt);
+ p->pBt = pBt;
+
+ pBt->pCursor = 0;
+ pBt->pPage1 = 0;
+ if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY;
+#if defined(SQLITE_SECURE_DELETE)
+ pBt->btsFlags |= BTS_SECURE_DELETE;
+#elif defined(SQLITE_FAST_SECURE_DELETE)
+ pBt->btsFlags |= BTS_OVERWRITE;
+#endif
+ /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
+ ** determined by the 2-byte integer located at an offset of 16 bytes from
+ ** the beginning of the database file. */
+ pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16);
+ if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE
+ || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){
+ pBt->pageSize = 0;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* If the magic name ":memory:" will create an in-memory database, then
+ ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if
+ ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if
+ ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a
+ ** regular file-name. In this case the auto-vacuum applies as per normal.
+ */
+ if( zFilename && !isMemdb ){
+ pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0);
+ pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0);
+ }
+#endif
+ nReserve = 0;
+ }else{
+ /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is
+ ** determined by the one-byte unsigned integer found at an offset of 20
+ ** into the database file header. */
+ nReserve = zDbHeader[20];
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0);
+ pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0);
+#endif
+ }
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve);
+ if( rc ) goto btree_open_out;
+ pBt->usableSize = pBt->pageSize - nReserve;
+ assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /* Add the new BtShared object to the linked list sharable BtShareds.
+ */
+ pBt->nRef = 1;
+ if( p->sharable ){
+ MUTEX_LOGIC( sqlite3_mutex *mutexShared; )
+ MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);)
+ if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){
+ pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST);
+ if( pBt->mutex==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto btree_open_out;
+ }
+ }
+ sqlite3_mutex_enter(mutexShared);
+ pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList);
+ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt;
+ sqlite3_mutex_leave(mutexShared);
+ }
+#endif
+ }
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO)
+ /* If the new Btree uses a sharable pBtShared, then link the new
+ ** Btree into the list of all sharable Btrees for the same connection.
+ ** The list is kept in ascending order by pBt address.
+ */
+ if( p->sharable ){
+ int i;
+ Btree *pSib;
+ for(i=0; i<db->nDb; i++){
+ if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){
+ while( pSib->pPrev ){ pSib = pSib->pPrev; }
+ if( (uptr)p->pBt<(uptr)pSib->pBt ){
+ p->pNext = pSib;
+ p->pPrev = 0;
+ pSib->pPrev = p;
+ }else{
+ while( pSib->pNext && (uptr)pSib->pNext->pBt<(uptr)p->pBt ){
+ pSib = pSib->pNext;
+ }
+ p->pNext = pSib->pNext;
+ p->pPrev = pSib;
+ if( p->pNext ){
+ p->pNext->pPrev = p;
+ }
+ pSib->pNext = p;
+ }
+ break;
+ }
+ }
+ }
+#endif
+ *ppBtree = p;
+
+btree_open_out:
+ if( rc!=SQLITE_OK ){
+ if( pBt && pBt->pPager ){
+ sqlite3PagerClose(pBt->pPager, 0);
+ }
+ sqlite3_free(pBt);
+ sqlite3_free(p);
+ *ppBtree = 0;
+ }else{
+ sqlite3_file *pFile;
+
+ /* If the B-Tree was successfully opened, set the pager-cache size to the
+ ** default value. Except, when opening on an existing shared pager-cache,
+ ** do not change the pager-cache size.
+ */
+ if( sqlite3BtreeSchema(p, 0, 0)==0 ){
+ sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE);
+ }
+
+ pFile = sqlite3PagerFile(pBt->pPager);
+ if( pFile->pMethods ){
+ sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db);
+ }
+ }
+ if( mutexOpen ){
+ assert( sqlite3_mutex_held(mutexOpen) );
+ sqlite3_mutex_leave(mutexOpen);
+ }
+ assert( rc!=SQLITE_OK || sqlite3BtreeConnectionCount(*ppBtree)>0 );
+ return rc;
+}
+
+/*
+** Decrement the BtShared.nRef counter. When it reaches zero,
+** remove the BtShared structure from the sharing list. Return
+** true if the BtShared.nRef counter reaches zero and return
+** false if it is still positive.
+*/
+static int removeFromSharingList(BtShared *pBt){
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ MUTEX_LOGIC( sqlite3_mutex *pMainMtx; )
+ BtShared *pList;
+ int removed = 0;
+
+ assert( sqlite3_mutex_notheld(pBt->mutex) );
+ MUTEX_LOGIC( pMainMtx = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN); )
+ sqlite3_mutex_enter(pMainMtx);
+ pBt->nRef--;
+ if( pBt->nRef<=0 ){
+ if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){
+ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext;
+ }else{
+ pList = GLOBAL(BtShared*,sqlite3SharedCacheList);
+ while( ALWAYS(pList) && pList->pNext!=pBt ){
+ pList=pList->pNext;
+ }
+ if( ALWAYS(pList) ){
+ pList->pNext = pBt->pNext;
+ }
+ }
+ if( SQLITE_THREADSAFE ){
+ sqlite3_mutex_free(pBt->mutex);
+ }
+ removed = 1;
+ }
+ sqlite3_mutex_leave(pMainMtx);
+ return removed;
+#else
+ return 1;
+#endif
+}
+
+/*
+** Make sure pBt->pTmpSpace points to an allocation of
+** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child
+** pointer.
+*/
+static void allocateTempSpace(BtShared *pBt){
+ if( !pBt->pTmpSpace ){
+ pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize );
+
+ /* One of the uses of pBt->pTmpSpace is to format cells before
+ ** inserting them into a leaf page (function fillInCell()). If
+ ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes
+ ** by the various routines that manipulate binary cells. Which
+ ** can mean that fillInCell() only initializes the first 2 or 3
+ ** bytes of pTmpSpace, but that the first 4 bytes are copied from
+ ** it into a database page. This is not actually a problem, but it
+ ** does cause a valgrind error when the 1 or 2 bytes of unitialized
+ ** data is passed to system call write(). So to avoid this error,
+ ** zero the first 4 bytes of temp space here.
+ **
+ ** Also: Provide four bytes of initialized space before the
+ ** beginning of pTmpSpace as an area available to prepend the
+ ** left-child pointer to the beginning of a cell.
+ */
+ if( pBt->pTmpSpace ){
+ memset(pBt->pTmpSpace, 0, 8);
+ pBt->pTmpSpace += 4;
+ }
+ }
+}
+
+/*
+** Free the pBt->pTmpSpace allocation
+*/
+static void freeTempSpace(BtShared *pBt){
+ if( pBt->pTmpSpace ){
+ pBt->pTmpSpace -= 4;
+ sqlite3PageFree(pBt->pTmpSpace);
+ pBt->pTmpSpace = 0;
+ }
+}
+
+/*
+** Close an open database and invalidate all cursors.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){
+ BtShared *pBt = p->pBt;
+ BtCursor *pCur;
+
+ /* Close all cursors opened via this handle. */
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ pCur = pBt->pCursor;
+ while( pCur ){
+ BtCursor *pTmp = pCur;
+ pCur = pCur->pNext;
+ if( pTmp->pBtree==p ){
+ sqlite3BtreeCloseCursor(pTmp);
+ }
+ }
+
+ /* Rollback any active transaction and free the handle structure.
+ ** The call to sqlite3BtreeRollback() drops any table-locks held by
+ ** this handle.
+ */
+ sqlite3BtreeRollback(p, SQLITE_OK, 0);
+ sqlite3BtreeLeave(p);
+
+ /* If there are still other outstanding references to the shared-btree
+ ** structure, return now. The remainder of this procedure cleans
+ ** up the shared-btree.
+ */
+ assert( p->wantToLock==0 && p->locked==0 );
+ if( !p->sharable || removeFromSharingList(pBt) ){
+ /* The pBt is no longer on the sharing list, so we can access
+ ** it without having to hold the mutex.
+ **
+ ** Clean out and delete the BtShared object.
+ */
+ assert( !pBt->pCursor );
+ sqlite3PagerClose(pBt->pPager, p->db);
+ if( pBt->xFreeSchema && pBt->pSchema ){
+ pBt->xFreeSchema(pBt->pSchema);
+ }
+ sqlite3DbFree(0, pBt->pSchema);
+ freeTempSpace(pBt);
+ sqlite3_free(pBt);
+ }
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ assert( p->wantToLock==0 );
+ assert( p->locked==0 );
+ if( p->pPrev ) p->pPrev->pNext = p->pNext;
+ if( p->pNext ) p->pNext->pPrev = p->pPrev;
+#endif
+
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Change the "soft" limit on the number of pages in the cache.
+** Unused and unmodified pages will be recycled when the number of
+** pages in the cache exceeds this soft limit. But the size of the
+** cache is allowed to grow larger than this limit if it contains
+** dirty pages or pages still in active use.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetCachesize(pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+
+/*
+** Change the "spill" limit on the number of pages in the cache.
+** If the number of pages exceeds this limit during a write transaction,
+** the pager might attempt to "spill" pages to the journal early in
+** order to free up memory.
+**
+** The value returned is the current spill size. If zero is passed
+** as an argument, no changes are made to the spill size setting, so
+** using mxPage of 0 is a way to query the current spill size.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree *p, int mxPage){
+ BtShared *pBt = p->pBt;
+ int res;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ res = sqlite3PagerSetSpillsize(pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return res;
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** Change the limit on the amount of the database file that may be
+** memory mapped.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetMmapLimit(pBt->pPager, szMmap);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/*
+** Change the way data is synced to disk in order to increase or decrease
+** how well the database resists damage due to OS crashes and power
+** failures. Level 1 is the same as asynchronous (no syncs() occur and
+** there is a high probability of damage) Level 2 is the default. There
+** is a very low but non-zero probability of damage. Level 3 reduces the
+** probability of damage to near zero but with a write performance reduction.
+*/
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(
+ Btree *p, /* The btree to set the safety level on */
+ unsigned pgFlags /* Various PAGER_* flags */
+){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetFlags(pBt->pPager, pgFlags);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Change the default pages size and the number of reserved bytes per page.
+** Or, if the page size has already been fixed, return SQLITE_READONLY
+** without changing anything.
+**
+** The page size must be a power of 2 between 512 and 65536. If the page
+** size supplied does not meet this constraint then the page size is not
+** changed.
+**
+** Page sizes are constrained to be a power of two so that the region
+** of the database file used for locking (beginning at PENDING_BYTE,
+** the first byte past the 1GB boundary, 0x40000000) needs to occur
+** at the beginning of a page.
+**
+** If parameter nReserve is less than zero, then the number of reserved
+** bytes per page is left unchanged.
+**
+** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size
+** and autovacuum mode can no longer be changed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){
+ int rc = SQLITE_OK;
+ int x;
+ BtShared *pBt = p->pBt;
+ assert( nReserve>=0 && nReserve<=255 );
+ sqlite3BtreeEnter(p);
+ pBt->nReserveWanted = nReserve;
+ x = pBt->pageSize - pBt->usableSize;
+ if( nReserve<x ) nReserve = x;
+ if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){
+ sqlite3BtreeLeave(p);
+ return SQLITE_READONLY;
+ }
+ assert( nReserve>=0 && nReserve<=255 );
+ if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE &&
+ ((pageSize-1)&pageSize)==0 ){
+ assert( (pageSize & 7)==0 );
+ assert( !pBt->pCursor );
+ pBt->pageSize = (u32)pageSize;
+ freeTempSpace(pBt);
+ }
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve);
+ pBt->usableSize = pBt->pageSize - (u16)nReserve;
+ if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Return the currently defined page size
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){
+ return p->pBt->pageSize;
+}
+
+/*
+** This function is similar to sqlite3BtreeGetReserve(), except that it
+** may only be called if it is guaranteed that the b-tree mutex is already
+** held.
+**
+** This is useful in one special case in the backup API code where it is
+** known that the shared b-tree mutex is held, but the mutex on the
+** database handle that owns *p is not. In this case if sqlite3BtreeEnter()
+** were to be called, it might collide with some other operation on the
+** database handle that owns *p, causing undefined behavior.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){
+ int n;
+ assert( sqlite3_mutex_held(p->pBt->mutex) );
+ n = p->pBt->pageSize - p->pBt->usableSize;
+ return n;
+}
+
+/*
+** Return the number of bytes of space at the end of every page that
+** are intentually left unused. This is the "reserved" space that is
+** sometimes used by extensions.
+**
+** The value returned is the larger of the current reserve size and
+** the latest reserve size requested by SQLITE_FILECTRL_RESERVE_BYTES.
+** The amount of reserve can only grow - never shrink.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetRequestedReserve(Btree *p){
+ int n1, n2;
+ sqlite3BtreeEnter(p);
+ n1 = (int)p->pBt->nReserveWanted;
+ n2 = sqlite3BtreeGetReserveNoMutex(p);
+ sqlite3BtreeLeave(p);
+ return n1>n2 ? n1 : n2;
+}
+
+
+/*
+** Set the maximum page count for a database if mxPage is positive.
+** No changes are made if mxPage is 0 or negative.
+** Regardless of the value of mxPage, return the maximum page count.
+*/
+SQLITE_PRIVATE Pgno sqlite3BtreeMaxPageCount(Btree *p, Pgno mxPage){
+ Pgno n;
+ sqlite3BtreeEnter(p);
+ n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage);
+ sqlite3BtreeLeave(p);
+ return n;
+}
+
+/*
+** Change the values for the BTS_SECURE_DELETE and BTS_OVERWRITE flags:
+**
+** newFlag==0 Both BTS_SECURE_DELETE and BTS_OVERWRITE are cleared
+** newFlag==1 BTS_SECURE_DELETE set and BTS_OVERWRITE is cleared
+** newFlag==2 BTS_SECURE_DELETE cleared and BTS_OVERWRITE is set
+** newFlag==(-1) No changes
+**
+** This routine acts as a query if newFlag is less than zero
+**
+** With BTS_OVERWRITE set, deleted content is overwritten by zeros, but
+** freelist leaf pages are not written back to the database. Thus in-page
+** deleted content is cleared, but freelist deleted content is not.
+**
+** With BTS_SECURE_DELETE, operation is like BTS_OVERWRITE with the addition
+** that freelist leaf pages are written back into the database, increasing
+** the amount of disk I/O.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){
+ int b;
+ if( p==0 ) return 0;
+ sqlite3BtreeEnter(p);
+ assert( BTS_OVERWRITE==BTS_SECURE_DELETE*2 );
+ assert( BTS_FAST_SECURE==(BTS_OVERWRITE|BTS_SECURE_DELETE) );
+ if( newFlag>=0 ){
+ p->pBt->btsFlags &= ~BTS_FAST_SECURE;
+ p->pBt->btsFlags |= BTS_SECURE_DELETE*newFlag;
+ }
+ b = (p->pBt->btsFlags & BTS_FAST_SECURE)/BTS_SECURE_DELETE;
+ sqlite3BtreeLeave(p);
+ return b;
+}
+
+/*
+** Change the 'auto-vacuum' property of the database. If the 'autoVacuum'
+** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it
+** is disabled. The default value for the auto-vacuum property is
+** determined by the SQLITE_DEFAULT_AUTOVACUUM macro.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ return SQLITE_READONLY;
+#else
+ BtShared *pBt = p->pBt;
+ int rc = SQLITE_OK;
+ u8 av = (u8)autoVacuum;
+
+ sqlite3BtreeEnter(p);
+ if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){
+ rc = SQLITE_READONLY;
+ }else{
+ pBt->autoVacuum = av ?1:0;
+ pBt->incrVacuum = av==2 ?1:0;
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+#endif
+}
+
+/*
+** Return the value of the 'auto-vacuum' property. If auto-vacuum is
+** enabled 1 is returned. Otherwise 0.
+*/
+SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *p){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ return BTREE_AUTOVACUUM_NONE;
+#else
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = (
+ (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE:
+ (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL:
+ BTREE_AUTOVACUUM_INCR
+ );
+ sqlite3BtreeLeave(p);
+ return rc;
+#endif
+}
+
+/*
+** If the user has not set the safety-level for this database connection
+** using "PRAGMA synchronous", and if the safety-level is not already
+** set to the value passed to this function as the second parameter,
+** set it so.
+*/
+#if SQLITE_DEFAULT_SYNCHRONOUS!=SQLITE_DEFAULT_WAL_SYNCHRONOUS \
+ && !defined(SQLITE_OMIT_WAL)
+static void setDefaultSyncFlag(BtShared *pBt, u8 safety_level){
+ sqlite3 *db;
+ Db *pDb;
+ if( (db=pBt->db)!=0 && (pDb=db->aDb)!=0 ){
+ while( pDb->pBt==0 || pDb->pBt->pBt!=pBt ){ pDb++; }
+ if( pDb->bSyncSet==0
+ && pDb->safety_level!=safety_level
+ && pDb!=&db->aDb[1]
+ ){
+ pDb->safety_level = safety_level;
+ sqlite3PagerSetFlags(pBt->pPager,
+ pDb->safety_level | (db->flags & PAGER_FLAGS_MASK));
+ }
+ }
+}
+#else
+# define setDefaultSyncFlag(pBt,safety_level)
+#endif
+
+/* Forward declaration */
+static int newDatabase(BtShared*);
+
+
+/*
+** Get a reference to pPage1 of the database file. This will
+** also acquire a readlock on that file.
+**
+** SQLITE_OK is returned on success. If the file is not a
+** well-formed database file, then SQLITE_CORRUPT is returned.
+** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM
+** is returned if we run out of memory.
+*/
+static int lockBtree(BtShared *pBt){
+ int rc; /* Result code from subfunctions */
+ MemPage *pPage1; /* Page 1 of the database file */
+ u32 nPage; /* Number of pages in the database */
+ u32 nPageFile = 0; /* Number of pages in the database file */
+ u32 nPageHeader; /* Number of pages in the database according to hdr */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pBt->pPage1==0 );
+ rc = sqlite3PagerSharedLock(pBt->pPager);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = btreeGetPage(pBt, 1, &pPage1, 0);
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Do some checking to help insure the file we opened really is
+ ** a valid database file.
+ */
+ nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData);
+ sqlite3PagerPagecount(pBt->pPager, (int*)&nPageFile);
+ if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){
+ nPage = nPageFile;
+ }
+ if( (pBt->db->flags & SQLITE_ResetDatabase)!=0 ){
+ nPage = 0;
+ }
+ if( nPage>0 ){
+ u32 pageSize;
+ u32 usableSize;
+ u8 *page1 = pPage1->aData;
+ rc = SQLITE_NOTADB;
+ /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins
+ ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d
+ ** 61 74 20 33 00. */
+ if( memcmp(page1, zMagicHeader, 16)!=0 ){
+ goto page1_init_failed;
+ }
+
+#ifdef SQLITE_OMIT_WAL
+ if( page1[18]>1 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+ if( page1[19]>1 ){
+ goto page1_init_failed;
+ }
+#else
+ if( page1[18]>2 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+ if( page1[19]>2 ){
+ goto page1_init_failed;
+ }
+
+ /* If the write version is set to 2, this database should be accessed
+ ** in WAL mode. If the log is not already open, open it now. Then
+ ** return SQLITE_OK and return without populating BtShared.pPage1.
+ ** The caller detects this and calls this function again. This is
+ ** required as the version of page 1 currently in the page1 buffer
+ ** may not be the latest version - there may be a newer one in the log
+ ** file.
+ */
+ if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){
+ int isOpen = 0;
+ rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen);
+ if( rc!=SQLITE_OK ){
+ goto page1_init_failed;
+ }else{
+ setDefaultSyncFlag(pBt, SQLITE_DEFAULT_WAL_SYNCHRONOUS+1);
+ if( isOpen==0 ){
+ releasePageOne(pPage1);
+ return SQLITE_OK;
+ }
+ }
+ rc = SQLITE_NOTADB;
+ }else{
+ setDefaultSyncFlag(pBt, SQLITE_DEFAULT_SYNCHRONOUS+1);
+ }
+#endif
+
+ /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
+ ** fractions and the leaf payload fraction values must be 64, 32, and 32.
+ **
+ ** The original design allowed these amounts to vary, but as of
+ ** version 3.6.0, we require them to be fixed.
+ */
+ if( memcmp(&page1[21], "\100\040\040",3)!=0 ){
+ goto page1_init_failed;
+ }
+ /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
+ ** determined by the 2-byte integer located at an offset of 16 bytes from
+ ** the beginning of the database file. */
+ pageSize = (page1[16]<<8) | (page1[17]<<16);
+ /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two
+ ** between 512 and 65536 inclusive. */
+ if( ((pageSize-1)&pageSize)!=0
+ || pageSize>SQLITE_MAX_PAGE_SIZE
+ || pageSize<=256
+ ){
+ goto page1_init_failed;
+ }
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+ assert( (pageSize & 7)==0 );
+ /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
+ ** integer at offset 20 is the number of bytes of space at the end of
+ ** each page to reserve for extensions.
+ **
+ ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is
+ ** determined by the one-byte unsigned integer found at an offset of 20
+ ** into the database file header. */
+ usableSize = pageSize - page1[20];
+ if( (u32)pageSize!=pBt->pageSize ){
+ /* After reading the first page of the database assuming a page size
+ ** of BtShared.pageSize, we have discovered that the page-size is
+ ** actually pageSize. Unlock the database, leave pBt->pPage1 at
+ ** zero and return SQLITE_OK. The caller will call this function
+ ** again with the correct page-size.
+ */
+ releasePageOne(pPage1);
+ pBt->usableSize = usableSize;
+ pBt->pageSize = pageSize;
+ freeTempSpace(pBt);
+ rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize,
+ pageSize-usableSize);
+ return rc;
+ }
+ if( sqlite3WritableSchema(pBt->db)==0 && nPage>nPageFile ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto page1_init_failed;
+ }
+ /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to
+ ** be less than 480. In other words, if the page size is 512, then the
+ ** reserved space size cannot exceed 32. */
+ if( usableSize<480 ){
+ goto page1_init_failed;
+ }
+ pBt->pageSize = pageSize;
+ pBt->usableSize = usableSize;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0);
+ pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0);
+#endif
+ }
+
+ /* maxLocal is the maximum amount of payload to store locally for
+ ** a cell. Make sure it is small enough so that at least minFanout
+ ** cells can will fit on one page. We assume a 10-byte page header.
+ ** Besides the payload, the cell must store:
+ ** 2-byte pointer to the cell
+ ** 4-byte child pointer
+ ** 9-byte nKey value
+ ** 4-byte nData value
+ ** 4-byte overflow page pointer
+ ** So a cell consists of a 2-byte pointer, a header which is as much as
+ ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow
+ ** page pointer.
+ */
+ pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23);
+ pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23);
+ pBt->maxLeaf = (u16)(pBt->usableSize - 35);
+ pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23);
+ if( pBt->maxLocal>127 ){
+ pBt->max1bytePayload = 127;
+ }else{
+ pBt->max1bytePayload = (u8)pBt->maxLocal;
+ }
+ assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) );
+ pBt->pPage1 = pPage1;
+ pBt->nPage = nPage;
+ return SQLITE_OK;
+
+page1_init_failed:
+ releasePageOne(pPage1);
+ pBt->pPage1 = 0;
+ return rc;
+}
+
+#ifndef NDEBUG
+/*
+** Return the number of cursors open on pBt. This is for use
+** in assert() expressions, so it is only compiled if NDEBUG is not
+** defined.
+**
+** Only write cursors are counted if wrOnly is true. If wrOnly is
+** false then all cursors are counted.
+**
+** For the purposes of this routine, a cursor is any cursor that
+** is capable of reading or writing to the database. Cursors that
+** have been tripped into the CURSOR_FAULT state are not counted.
+*/
+static int countValidCursors(BtShared *pBt, int wrOnly){
+ BtCursor *pCur;
+ int r = 0;
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0)
+ && pCur->eState!=CURSOR_FAULT ) r++;
+ }
+ return r;
+}
+#endif
+
+/*
+** If there are no outstanding cursors and we are not in the middle
+** of a transaction but there is a read lock on the database, then
+** this routine unrefs the first page of the database file which
+** has the effect of releasing the read lock.
+**
+** If there is a transaction in progress, this routine is a no-op.
+*/
+static void unlockBtreeIfUnused(BtShared *pBt){
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE );
+ if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){
+ MemPage *pPage1 = pBt->pPage1;
+ assert( pPage1->aData );
+ assert( sqlite3PagerRefcount(pBt->pPager)==1 );
+ pBt->pPage1 = 0;
+ releasePageOne(pPage1);
+ }
+}
+
+/*
+** If pBt points to an empty file then convert that empty file
+** into a new empty database by initializing the first page of
+** the database.
+*/
+static int newDatabase(BtShared *pBt){
+ MemPage *pP1;
+ unsigned char *data;
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pBt->nPage>0 ){
+ return SQLITE_OK;
+ }
+ pP1 = pBt->pPage1;
+ assert( pP1!=0 );
+ data = pP1->aData;
+ rc = sqlite3PagerWrite(pP1->pDbPage);
+ if( rc ) return rc;
+ memcpy(data, zMagicHeader, sizeof(zMagicHeader));
+ assert( sizeof(zMagicHeader)==16 );
+ data[16] = (u8)((pBt->pageSize>>8)&0xff);
+ data[17] = (u8)((pBt->pageSize>>16)&0xff);
+ data[18] = 1;
+ data[19] = 1;
+ assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize);
+ data[20] = (u8)(pBt->pageSize - pBt->usableSize);
+ data[21] = 64;
+ data[22] = 32;
+ data[23] = 32;
+ memset(&data[24], 0, 100-24);
+ zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA );
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 );
+ assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 );
+ put4byte(&data[36 + 4*4], pBt->autoVacuum);
+ put4byte(&data[36 + 7*4], pBt->incrVacuum);
+#endif
+ pBt->nPage = 1;
+ data[31] = 1;
+ return SQLITE_OK;
+}
+
+/*
+** Initialize the first page of the database file (creating a database
+** consisting of a single page and no schema objects). Return SQLITE_OK
+** if successful, or an SQLite error code otherwise.
+*/
+SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){
+ int rc;
+ sqlite3BtreeEnter(p);
+ p->pBt->nPage = 0;
+ rc = newDatabase(p->pBt);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Attempt to start a new transaction. A write-transaction
+** is started if the second argument is nonzero, otherwise a read-
+** transaction. If the second argument is 2 or more and exclusive
+** transaction is started, meaning that no other process is allowed
+** to access the database. A preexisting transaction may not be
+** upgraded to exclusive by calling this routine a second time - the
+** exclusivity flag only works for a new transaction.
+**
+** A write-transaction must be started before attempting any
+** changes to the database. None of the following routines
+** will work unless a transaction is started first:
+**
+** sqlite3BtreeCreateTable()
+** sqlite3BtreeCreateIndex()
+** sqlite3BtreeClearTable()
+** sqlite3BtreeDropTable()
+** sqlite3BtreeInsert()
+** sqlite3BtreeDelete()
+** sqlite3BtreeUpdateMeta()
+**
+** If an initial attempt to acquire the lock fails because of lock contention
+** and the database was previously unlocked, then invoke the busy handler
+** if there is one. But if there was previously a read-lock, do not
+** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is
+** returned when there is already a read-lock in order to avoid a deadlock.
+**
+** Suppose there are two processes A and B. A has a read lock and B has
+** a reserved lock. B tries to promote to exclusive but is blocked because
+** of A's read lock. A tries to promote to reserved but is blocked by B.
+** One or the other of the two processes must give way or there can be
+** no progress. By returning SQLITE_BUSY and not invoking the busy callback
+** when A already has a read lock, we encourage A to give up and let B
+** proceed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+ BtShared *pBt = p->pBt;
+ Pager *pPager = pBt->pPager;
+ int rc = SQLITE_OK;
+
+ sqlite3BtreeEnter(p);
+ btreeIntegrity(p);
+
+ /* If the btree is already in a write-transaction, or it
+ ** is already in a read-transaction and a read-transaction
+ ** is requested, this is a no-op.
+ */
+ if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){
+ goto trans_begun;
+ }
+ assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 );
+
+ if( (p->db->flags & SQLITE_ResetDatabase)
+ && sqlite3PagerIsreadonly(pPager)==0
+ ){
+ pBt->btsFlags &= ~BTS_READ_ONLY;
+ }
+
+ /* Write transactions are not possible on a read-only database */
+ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){
+ rc = SQLITE_READONLY;
+ goto trans_begun;
+ }
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ {
+ sqlite3 *pBlock = 0;
+ /* If another database handle has already opened a write transaction
+ ** on this shared-btree structure and a second write transaction is
+ ** requested, return SQLITE_LOCKED.
+ */
+ if( (wrflag && pBt->inTransaction==TRANS_WRITE)
+ || (pBt->btsFlags & BTS_PENDING)!=0
+ ){
+ pBlock = pBt->pWriter->db;
+ }else if( wrflag>1 ){
+ BtLock *pIter;
+ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){
+ if( pIter->pBtree!=p ){
+ pBlock = pIter->pBtree->db;
+ break;
+ }
+ }
+ }
+ if( pBlock ){
+ sqlite3ConnectionBlocked(p->db, pBlock);
+ rc = SQLITE_LOCKED_SHAREDCACHE;
+ goto trans_begun;
+ }
+ }
+#endif
+
+ /* Any read-only or read-write transaction implies a read-lock on
+ ** page 1. So if some other shared-cache client already has a write-lock
+ ** on page 1, the transaction cannot be opened. */
+ rc = querySharedCacheTableLock(p, SCHEMA_ROOT, READ_LOCK);
+ if( SQLITE_OK!=rc ) goto trans_begun;
+
+ pBt->btsFlags &= ~BTS_INITIALLY_EMPTY;
+ if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY;
+ do {
+ sqlite3PagerWalDb(pPager, p->db);
+
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* If transitioning from no transaction directly to a write transaction,
+ ** block for the WRITER lock first if possible. */
+ if( pBt->pPage1==0 && wrflag ){
+ assert( pBt->inTransaction==TRANS_NONE );
+ rc = sqlite3PagerWalWriteLock(pPager, 1);
+ if( rc!=SQLITE_BUSY && rc!=SQLITE_OK ) break;
+ }
+#endif
+
+ /* Call lockBtree() until either pBt->pPage1 is populated or
+ ** lockBtree() returns something other than SQLITE_OK. lockBtree()
+ ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after
+ ** reading page 1 it discovers that the page-size of the database
+ ** file is not pBt->pageSize. In this case lockBtree() will update
+ ** pBt->pageSize to the page-size of the file on disk.
+ */
+ while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) );
+
+ if( rc==SQLITE_OK && wrflag ){
+ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){
+ rc = SQLITE_READONLY;
+ }else{
+ rc = sqlite3PagerBegin(pPager, wrflag>1, sqlite3TempInMemory(p->db));
+ if( rc==SQLITE_OK ){
+ rc = newDatabase(pBt);
+ }else if( rc==SQLITE_BUSY_SNAPSHOT && pBt->inTransaction==TRANS_NONE ){
+ /* if there was no transaction opened when this function was
+ ** called and SQLITE_BUSY_SNAPSHOT is returned, change the error
+ ** code to SQLITE_BUSY. */
+ rc = SQLITE_BUSY;
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ (void)sqlite3PagerWalWriteLock(pPager, 0);
+ unlockBtreeIfUnused(pBt);
+ }
+ }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE &&
+ btreeInvokeBusyHandler(pBt) );
+ sqlite3PagerWalDb(pPager, 0);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ) rc = SQLITE_BUSY;
+#endif
+
+ if( rc==SQLITE_OK ){
+ if( p->inTrans==TRANS_NONE ){
+ pBt->nTransaction++;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ if( p->sharable ){
+ assert( p->lock.pBtree==p && p->lock.iTable==1 );
+ p->lock.eLock = READ_LOCK;
+ p->lock.pNext = pBt->pLock;
+ pBt->pLock = &p->lock;
+ }
+#endif
+ }
+ p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ);
+ if( p->inTrans>pBt->inTransaction ){
+ pBt->inTransaction = p->inTrans;
+ }
+ if( wrflag ){
+ MemPage *pPage1 = pBt->pPage1;
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ assert( !pBt->pWriter );
+ pBt->pWriter = p;
+ pBt->btsFlags &= ~BTS_EXCLUSIVE;
+ if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE;
+#endif
+
+ /* If the db-size header field is incorrect (as it may be if an old
+ ** client has been writing the database file), update it now. Doing
+ ** this sooner rather than later means the database size can safely
+ ** re-read the database size from page 1 if a savepoint or transaction
+ ** rollback occurs within the transaction.
+ */
+ if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pPage1->aData[28], pBt->nPage);
+ }
+ }
+ }
+ }
+
+trans_begun:
+ if( rc==SQLITE_OK ){
+ if( pSchemaVersion ){
+ *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]);
+ }
+ if( wrflag ){
+ /* This call makes sure that the pager has the correct number of
+ ** open savepoints. If the second parameter is greater than 0 and
+ ** the sub-journal is not already open, then it will be opened here.
+ */
+ rc = sqlite3PagerOpenSavepoint(pPager, p->db->nSavepoint);
+ }
+ }
+
+ btreeIntegrity(p);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+
+/*
+** Set the pointer-map entries for all children of page pPage. Also, if
+** pPage contains cells that point to overflow pages, set the pointer
+** map entries for the overflow pages as well.
+*/
+static int setChildPtrmaps(MemPage *pPage){
+ int i; /* Counter variable */
+ int nCell; /* Number of cells in page pPage */
+ int rc; /* Return code */
+ BtShared *pBt = pPage->pBt;
+ Pgno pgno = pPage->pgno;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ rc = pPage->isInit ? SQLITE_OK : btreeInitPage(pPage);
+ if( rc!=SQLITE_OK ) return rc;
+ nCell = pPage->nCell;
+
+ for(i=0; i<nCell; i++){
+ u8 *pCell = findCell(pPage, i);
+
+ ptrmapPutOvflPtr(pPage, pPage, pCell, &rc);
+
+ if( !pPage->leaf ){
+ Pgno childPgno = get4byte(pCell);
+ ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc);
+ }
+ }
+
+ if( !pPage->leaf ){
+ Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc);
+ }
+
+ return rc;
+}
+
+/*
+** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so
+** that it points to iTo. Parameter eType describes the type of pointer to
+** be modified, as follows:
+**
+** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child
+** page of pPage.
+**
+** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow
+** page pointed to by one of the cells on pPage.
+**
+** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next
+** overflow page in the list.
+*/
+static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ if( eType==PTRMAP_OVERFLOW2 ){
+ /* The pointer is always the first 4 bytes of the page in this case. */
+ if( get4byte(pPage->aData)!=iFrom ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ put4byte(pPage->aData, iTo);
+ }else{
+ int i;
+ int nCell;
+ int rc;
+
+ rc = pPage->isInit ? SQLITE_OK : btreeInitPage(pPage);
+ if( rc ) return rc;
+ nCell = pPage->nCell;
+
+ for(i=0; i<nCell; i++){
+ u8 *pCell = findCell(pPage, i);
+ if( eType==PTRMAP_OVERFLOW1 ){
+ CellInfo info;
+ pPage->xParseCell(pPage, pCell, &info);
+ if( info.nLocal<info.nPayload ){
+ if( pCell+info.nSize > pPage->aData+pPage->pBt->usableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ if( iFrom==get4byte(pCell+info.nSize-4) ){
+ put4byte(pCell+info.nSize-4, iTo);
+ break;
+ }
+ }
+ }else{
+ if( get4byte(pCell)==iFrom ){
+ put4byte(pCell, iTo);
+ break;
+ }
+ }
+ }
+
+ if( i==nCell ){
+ if( eType!=PTRMAP_BTREE ||
+ get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ put4byte(&pPage->aData[pPage->hdrOffset+8], iTo);
+ }
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** Move the open database page pDbPage to location iFreePage in the
+** database. The pDbPage reference remains valid.
+**
+** The isCommit flag indicates that there is no need to remember that
+** the journal needs to be sync()ed before database page pDbPage->pgno
+** can be written to. The caller has already promised not to write to that
+** page.
+*/
+static int relocatePage(
+ BtShared *pBt, /* Btree */
+ MemPage *pDbPage, /* Open page to move */
+ u8 eType, /* Pointer map 'type' entry for pDbPage */
+ Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */
+ Pgno iFreePage, /* The location to move pDbPage to */
+ int isCommit /* isCommit flag passed to sqlite3PagerMovepage */
+){
+ MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */
+ Pgno iDbPage = pDbPage->pgno;
+ Pager *pPager = pBt->pPager;
+ int rc;
+
+ assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 ||
+ eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( pDbPage->pBt==pBt );
+ if( iDbPage<3 ) return SQLITE_CORRUPT_BKPT;
+
+ /* Move page iDbPage from its current location to page number iFreePage */
+ TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n",
+ iDbPage, iFreePage, iPtrPage, eType));
+ rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pDbPage->pgno = iFreePage;
+
+ /* If pDbPage was a btree-page, then it may have child pages and/or cells
+ ** that point to overflow pages. The pointer map entries for all these
+ ** pages need to be changed.
+ **
+ ** If pDbPage is an overflow page, then the first 4 bytes may store a
+ ** pointer to a subsequent overflow page. If this is the case, then
+ ** the pointer map needs to be updated for the subsequent overflow page.
+ */
+ if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){
+ rc = setChildPtrmaps(pDbPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }else{
+ Pgno nextOvfl = get4byte(pDbPage->aData);
+ if( nextOvfl!=0 ){
+ ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ }
+
+ /* Fix the database pointer on page iPtrPage that pointed at iDbPage so
+ ** that it points at iFreePage. Also fix the pointer map entry for
+ ** iPtrPage.
+ */
+ if( eType!=PTRMAP_ROOTPAGE ){
+ rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = sqlite3PagerWrite(pPtrPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pPtrPage);
+ return rc;
+ }
+ rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType);
+ releasePage(pPtrPage);
+ if( rc==SQLITE_OK ){
+ ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc);
+ }
+ }
+ return rc;
+}
+
+/* Forward declaration required by incrVacuumStep(). */
+static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8);
+
+/*
+** Perform a single step of an incremental-vacuum. If successful, return
+** SQLITE_OK. If there is no work to do (and therefore no point in
+** calling this function again), return SQLITE_DONE. Or, if an error
+** occurs, return some other error code.
+**
+** More specifically, this function attempts to re-organize the database so
+** that the last page of the file currently in use is no longer in use.
+**
+** Parameter nFin is the number of pages that this database would contain
+** were this function called until it returns SQLITE_DONE.
+**
+** If the bCommit parameter is non-zero, this function assumes that the
+** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE
+** or an error. bCommit is passed true for an auto-vacuum-on-commit
+** operation, or false for an incremental vacuum.
+*/
+static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){
+ Pgno nFreeList; /* Number of pages still on the free-list */
+ int rc;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( iLastPg>nFin );
+
+ if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){
+ u8 eType;
+ Pgno iPtrPage;
+
+ nFreeList = get4byte(&pBt->pPage1->aData[36]);
+ if( nFreeList==0 ){
+ return SQLITE_DONE;
+ }
+
+ rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( eType==PTRMAP_ROOTPAGE ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ if( eType==PTRMAP_FREEPAGE ){
+ if( bCommit==0 ){
+ /* Remove the page from the files free-list. This is not required
+ ** if bCommit is non-zero. In that case, the free-list will be
+ ** truncated to zero after this function returns, so it doesn't
+ ** matter if it still contains some garbage entries.
+ */
+ Pgno iFreePg;
+ MemPage *pFreePg;
+ rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( iFreePg==iLastPg );
+ releasePage(pFreePg);
+ }
+ } else {
+ Pgno iFreePg; /* Index of free page to move pLastPg to */
+ MemPage *pLastPg;
+ u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */
+ Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */
+
+ rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* If bCommit is zero, this loop runs exactly once and page pLastPg
+ ** is swapped with the first free page pulled off the free list.
+ **
+ ** On the other hand, if bCommit is greater than zero, then keep
+ ** looping until a free-page located within the first nFin pages
+ ** of the file is found.
+ */
+ if( bCommit==0 ){
+ eMode = BTALLOC_LE;
+ iNear = nFin;
+ }
+ do {
+ MemPage *pFreePg;
+ rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode);
+ if( rc!=SQLITE_OK ){
+ releasePage(pLastPg);
+ return rc;
+ }
+ releasePage(pFreePg);
+ }while( bCommit && iFreePg>nFin );
+ assert( iFreePg<iLastPg );
+
+ rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, bCommit);
+ releasePage(pLastPg);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+ }
+
+ if( bCommit==0 ){
+ do {
+ iLastPg--;
+ }while( iLastPg==PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, iLastPg) );
+ pBt->bDoTruncate = 1;
+ pBt->nPage = iLastPg;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The database opened by the first argument is an auto-vacuum database
+** nOrig pages in size containing nFree free pages. Return the expected
+** size of the database in pages following an auto-vacuum operation.
+*/
+static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){
+ int nEntry; /* Number of entries on one ptrmap page */
+ Pgno nPtrmap; /* Number of PtrMap pages to be freed */
+ Pgno nFin; /* Return value */
+
+ nEntry = pBt->usableSize/5;
+ nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry;
+ nFin = nOrig - nFree - nPtrmap;
+ if( nOrig>PENDING_BYTE_PAGE(pBt) && nFin<PENDING_BYTE_PAGE(pBt) ){
+ nFin--;
+ }
+ while( PTRMAP_ISPAGE(pBt, nFin) || nFin==PENDING_BYTE_PAGE(pBt) ){
+ nFin--;
+ }
+
+ return nFin;
+}
+
+/*
+** A write-transaction must be opened before calling this function.
+** It performs a single unit of work towards an incremental vacuum.
+**
+** If the incremental vacuum is finished after this function has run,
+** SQLITE_DONE is returned. If it is not finished, but no error occurred,
+** SQLITE_OK is returned. Otherwise an SQLite error code.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *p){
+ int rc;
+ BtShared *pBt = p->pBt;
+
+ sqlite3BtreeEnter(p);
+ assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE );
+ if( !pBt->autoVacuum ){
+ rc = SQLITE_DONE;
+ }else{
+ Pgno nOrig = btreePagecount(pBt);
+ Pgno nFree = get4byte(&pBt->pPage1->aData[36]);
+ Pgno nFin = finalDbSize(pBt, nOrig, nFree);
+
+ if( nOrig<nFin || nFree>=nOrig ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else if( nFree>0 ){
+ rc = saveAllCursors(pBt, 0, 0);
+ if( rc==SQLITE_OK ){
+ invalidateAllOverflowCache(pBt);
+ rc = incrVacuumStep(pBt, nFin, nOrig, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ put4byte(&pBt->pPage1->aData[28], pBt->nPage);
+ }
+ }else{
+ rc = SQLITE_DONE;
+ }
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** This routine is called prior to sqlite3PagerCommit when a transaction
+** is committed for an auto-vacuum database.
+**
+** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages
+** the database file should be truncated to during the commit process.
+** i.e. the database has been reorganized so that only the first *pnTrunc
+** pages are in use.
+*/
+static int autoVacuumCommit(BtShared *pBt){
+ int rc = SQLITE_OK;
+ Pager *pPager = pBt->pPager;
+ VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); )
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ invalidateAllOverflowCache(pBt);
+ assert(pBt->autoVacuum);
+ if( !pBt->incrVacuum ){
+ Pgno nFin; /* Number of pages in database after autovacuuming */
+ Pgno nFree; /* Number of pages on the freelist initially */
+ Pgno iFree; /* The next page to be freed */
+ Pgno nOrig; /* Database size before freeing */
+
+ nOrig = btreePagecount(pBt);
+ if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){
+ /* It is not possible to create a database for which the final page
+ ** is either a pointer-map page or the pending-byte page. If one
+ ** is encountered, this indicates corruption.
+ */
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ nFree = get4byte(&pBt->pPage1->aData[36]);
+ nFin = finalDbSize(pBt, nOrig, nFree);
+ if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT;
+ if( nFin<nOrig ){
+ rc = saveAllCursors(pBt, 0, 0);
+ }
+ for(iFree=nOrig; iFree>nFin && rc==SQLITE_OK; iFree--){
+ rc = incrVacuumStep(pBt, nFin, iFree, 1);
+ }
+ if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ put4byte(&pBt->pPage1->aData[32], 0);
+ put4byte(&pBt->pPage1->aData[36], 0);
+ put4byte(&pBt->pPage1->aData[28], nFin);
+ pBt->bDoTruncate = 1;
+ pBt->nPage = nFin;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3PagerRollback(pPager);
+ }
+ }
+
+ assert( nRef>=sqlite3PagerRefcount(pPager) );
+ return rc;
+}
+
+#else /* ifndef SQLITE_OMIT_AUTOVACUUM */
+# define setChildPtrmaps(x) SQLITE_OK
+#endif
+
+/*
+** This routine does the first phase of a two-phase commit. This routine
+** causes a rollback journal to be created (if it does not already exist)
+** and populated with enough information so that if a power loss occurs
+** the database can be restored to its original state by playing back
+** the journal. Then the contents of the journal are flushed out to
+** the disk. After the journal is safely on oxide, the changes to the
+** database are written into the database file and flushed to oxide.
+** At the end of this call, the rollback journal still exists on the
+** disk and we are still holding all locks, so the transaction has not
+** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the
+** commit process.
+**
+** This call is a no-op if no write-transaction is currently active on pBt.
+**
+** Otherwise, sync the database file for the btree pBt. zSuperJrnl points to
+** the name of a super-journal file that should be written into the
+** individual journal file, or is NULL, indicating no super-journal file
+** (single database transaction).
+**
+** When this is called, the super-journal should already have been
+** created, populated with this journal pointer and synced to disk.
+**
+** Once this is routine has returned, the only thing required to commit
+** the write-transaction for this database file is to delete the journal.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zSuperJrnl){
+ int rc = SQLITE_OK;
+ if( p->inTrans==TRANS_WRITE ){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ rc = autoVacuumCommit(pBt);
+ if( rc!=SQLITE_OK ){
+ sqlite3BtreeLeave(p);
+ return rc;
+ }
+ }
+ if( pBt->bDoTruncate ){
+ sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage);
+ }
+#endif
+ rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zSuperJrnl, 0);
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+
+/*
+** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback()
+** at the conclusion of a transaction.
+*/
+static void btreeEndTransaction(Btree *p){
+ BtShared *pBt = p->pBt;
+ sqlite3 *db = p->db;
+ assert( sqlite3BtreeHoldsMutex(p) );
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ pBt->bDoTruncate = 0;
+#endif
+ if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){
+ /* If there are other active statements that belong to this database
+ ** handle, downgrade to a read-only transaction. The other statements
+ ** may still be reading from the database. */
+ downgradeAllSharedCacheTableLocks(p);
+ p->inTrans = TRANS_READ;
+ }else{
+ /* If the handle had any kind of transaction open, decrement the
+ ** transaction count of the shared btree. If the transaction count
+ ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused()
+ ** call below will unlock the pager. */
+ if( p->inTrans!=TRANS_NONE ){
+ clearAllSharedCacheTableLocks(p);
+ pBt->nTransaction--;
+ if( 0==pBt->nTransaction ){
+ pBt->inTransaction = TRANS_NONE;
+ }
+ }
+
+ /* Set the current transaction state to TRANS_NONE and unlock the
+ ** pager if this call closed the only read or write transaction. */
+ p->inTrans = TRANS_NONE;
+ unlockBtreeIfUnused(pBt);
+ }
+
+ btreeIntegrity(p);
+}
+
+/*
+** Commit the transaction currently in progress.
+**
+** This routine implements the second phase of a 2-phase commit. The
+** sqlite3BtreeCommitPhaseOne() routine does the first phase and should
+** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne()
+** routine did all the work of writing information out to disk and flushing the
+** contents so that they are written onto the disk platter. All this
+** routine has to do is delete or truncate or zero the header in the
+** the rollback journal (which causes the transaction to commit) and
+** drop locks.
+**
+** Normally, if an error occurs while the pager layer is attempting to
+** finalize the underlying journal file, this function returns an error and
+** the upper layer will attempt a rollback. However, if the second argument
+** is non-zero then this b-tree transaction is part of a multi-file
+** transaction. In this case, the transaction has already been committed
+** (by deleting a super-journal file) and the caller will ignore this
+** functions return code. So, even if an error occurs in the pager layer,
+** reset the b-tree objects internal state to indicate that the write
+** transaction has been closed. This is quite safe, as the pager will have
+** transitioned to the error state.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){
+
+ if( p->inTrans==TRANS_NONE ) return SQLITE_OK;
+ sqlite3BtreeEnter(p);
+ btreeIntegrity(p);
+
+ /* If the handle has a write-transaction open, commit the shared-btrees
+ ** transaction and set the shared state to TRANS_READ.
+ */
+ if( p->inTrans==TRANS_WRITE ){
+ int rc;
+ BtShared *pBt = p->pBt;
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( pBt->nTransaction>0 );
+ rc = sqlite3PagerCommitPhaseTwo(pBt->pPager);
+ if( rc!=SQLITE_OK && bCleanup==0 ){
+ sqlite3BtreeLeave(p);
+ return rc;
+ }
+ p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */
+ pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
+ }
+
+ btreeEndTransaction(p);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+
+/*
+** Do both phases of a commit.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = sqlite3BtreeCommitPhaseOne(p, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeCommitPhaseTwo(p, 0);
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** This routine sets the state to CURSOR_FAULT and the error
+** code to errCode for every cursor on any BtShared that pBtree
+** references. Or if the writeOnly flag is set to 1, then only
+** trip write cursors and leave read cursors unchanged.
+**
+** Every cursor is a candidate to be tripped, including cursors
+** that belong to other database connections that happen to be
+** sharing the cache with pBtree.
+**
+** This routine gets called when a rollback occurs. If the writeOnly
+** flag is true, then only write-cursors need be tripped - read-only
+** cursors save their current positions so that they may continue
+** following the rollback. Or, if writeOnly is false, all cursors are
+** tripped. In general, writeOnly is false if the transaction being
+** rolled back modified the database schema. In this case b-tree root
+** pages may be moved or deleted from the database altogether, making
+** it unsafe for read cursors to continue.
+**
+** If the writeOnly flag is true and an error is encountered while
+** saving the current position of a read-only cursor, all cursors,
+** including all read-cursors are tripped.
+**
+** SQLITE_OK is returned if successful, or if an error occurs while
+** saving a cursor position, an SQLite error code.
+*/
+SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){
+ BtCursor *p;
+ int rc = SQLITE_OK;
+
+ assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 );
+ if( pBtree ){
+ sqlite3BtreeEnter(pBtree);
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){
+ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
+ rc = saveCursorPosition(p);
+ if( rc!=SQLITE_OK ){
+ (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0);
+ break;
+ }
+ }
+ }else{
+ sqlite3BtreeClearCursor(p);
+ p->eState = CURSOR_FAULT;
+ p->skipNext = errCode;
+ }
+ btreeReleaseAllCursorPages(p);
+ }
+ sqlite3BtreeLeave(pBtree);
+ }
+ return rc;
+}
+
+/*
+** Set the pBt->nPage field correctly, according to the current
+** state of the database. Assume pBt->pPage1 is valid.
+*/
+static void btreeSetNPage(BtShared *pBt, MemPage *pPage1){
+ int nPage = get4byte(&pPage1->aData[28]);
+ testcase( nPage==0 );
+ if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage);
+ testcase( pBt->nPage!=nPage );
+ pBt->nPage = nPage;
+}
+
+/*
+** Rollback the transaction in progress.
+**
+** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped).
+** Only write cursors are tripped if writeOnly is true but all cursors are
+** tripped if writeOnly is false. Any attempt to use
+** a tripped cursor will result in an error.
+**
+** This will release the write lock on the database file. If there
+** are no active cursors, it also releases the read lock.
+*/
+SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){
+ int rc;
+ BtShared *pBt = p->pBt;
+ MemPage *pPage1;
+
+ assert( writeOnly==1 || writeOnly==0 );
+ assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK );
+ sqlite3BtreeEnter(p);
+ if( tripCode==SQLITE_OK ){
+ rc = tripCode = saveAllCursors(pBt, 0, 0);
+ if( rc ) writeOnly = 0;
+ }else{
+ rc = SQLITE_OK;
+ }
+ if( tripCode ){
+ int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly);
+ assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) );
+ if( rc2!=SQLITE_OK ) rc = rc2;
+ }
+ btreeIntegrity(p);
+
+ if( p->inTrans==TRANS_WRITE ){
+ int rc2;
+
+ assert( TRANS_WRITE==pBt->inTransaction );
+ rc2 = sqlite3PagerRollback(pBt->pPager);
+ if( rc2!=SQLITE_OK ){
+ rc = rc2;
+ }
+
+ /* The rollback may have destroyed the pPage1->aData value. So
+ ** call btreeGetPage() on page 1 again to make
+ ** sure pPage1->aData is set correctly. */
+ if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){
+ btreeSetNPage(pBt, pPage1);
+ releasePageOne(pPage1);
+ }
+ assert( countValidCursors(pBt, 1)==0 );
+ pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
+ }
+
+ btreeEndTransaction(p);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Start a statement subtransaction. The subtransaction can be rolled
+** back independently of the main transaction. You must start a transaction
+** before starting a subtransaction. The subtransaction is ended automatically
+** if the main transaction commits or rolls back.
+**
+** Statement subtransactions are used around individual SQL statements
+** that are contained within a BEGIN...COMMIT block. If a constraint
+** error occurs within the statement, the effect of that one statement
+** can be rolled back without having to rollback the entire transaction.
+**
+** A statement sub-transaction is implemented as an anonymous savepoint. The
+** value passed as the second parameter is the total number of savepoints,
+** including the new anonymous savepoint, open on the B-Tree. i.e. if there
+** are no active savepoints and no other statement-transactions open,
+** iStatement is 1. This anonymous savepoint can be released or rolled back
+** using the sqlite3BtreeSavepoint() function.
+*/
+SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree *p, int iStatement){
+ int rc;
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( iStatement>0 );
+ assert( iStatement>p->db->nSavepoint );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ /* At the pager level, a statement transaction is a savepoint with
+ ** an index greater than all savepoints created explicitly using
+ ** SQL statements. It is illegal to open, release or rollback any
+ ** such savepoints while the statement transaction savepoint is active.
+ */
+ rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** The second argument to this function, op, is always SAVEPOINT_ROLLBACK
+** or SAVEPOINT_RELEASE. This function either releases or rolls back the
+** savepoint identified by parameter iSavepoint, depending on the value
+** of op.
+**
+** Normally, iSavepoint is greater than or equal to zero. However, if op is
+** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the
+** contents of the entire transaction are rolled back. This is different
+** from a normal transaction rollback, as no locks are released and the
+** transaction remains open.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){
+ int rc = SQLITE_OK;
+ if( p && p->inTrans==TRANS_WRITE ){
+ BtShared *pBt = p->pBt;
+ assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK );
+ assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) );
+ sqlite3BtreeEnter(p);
+ if( op==SAVEPOINT_ROLLBACK ){
+ rc = saveAllCursors(pBt, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){
+ pBt->nPage = 0;
+ }
+ rc = newDatabase(pBt);
+ btreeSetNPage(pBt, pBt->pPage1);
+
+ /* pBt->nPage might be zero if the database was corrupt when
+ ** the transaction was started. Otherwise, it must be at least 1. */
+ assert( CORRUPT_DB || pBt->nPage>0 );
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+
+/*
+** Create a new cursor for the BTree whose root is on the page
+** iTable. If a read-only cursor is requested, it is assumed that
+** the caller already has at least a read-only transaction open
+** on the database already. If a write-cursor is requested, then
+** the caller is assumed to have an open write transaction.
+**
+** If the BTREE_WRCSR bit of wrFlag is clear, then the cursor can only
+** be used for reading. If the BTREE_WRCSR bit is set, then the cursor
+** can be used for reading or for writing if other conditions for writing
+** are also met. These are the conditions that must be met in order
+** for writing to be allowed:
+**
+** 1: The cursor must have been opened with wrFlag containing BTREE_WRCSR
+**
+** 2: Other database connections that share the same pager cache
+** but which are not in the READ_UNCOMMITTED state may not have
+** cursors open with wrFlag==0 on the same table. Otherwise
+** the changes made by this write cursor would be visible to
+** the read cursors in the other database connection.
+**
+** 3: The database must be writable (not on read-only media)
+**
+** 4: There must be an active transaction.
+**
+** The BTREE_FORDELETE bit of wrFlag may optionally be set if BTREE_WRCSR
+** is set. If FORDELETE is set, that is a hint to the implementation that
+** this cursor will only be used to seek to and delete entries of an index
+** as part of a larger DELETE statement. The FORDELETE hint is not used by
+** this implementation. But in a hypothetical alternative storage engine
+** in which index entries are automatically deleted when corresponding table
+** rows are deleted, the FORDELETE flag is a hint that all SEEK and DELETE
+** operations on this cursor can be no-ops and all READ operations can
+** return a null row (2-bytes: 0x01 0x00).
+**
+** No checking is done to make sure that page iTable really is the
+** root page of a b-tree. If it is not, then the cursor acquired
+** will not work correctly.
+**
+** It is assumed that the sqlite3BtreeCursorZero() has been called
+** on pCur to initialize the memory space prior to invoking this routine.
+*/
+static int btreeCursor(
+ Btree *p, /* The btree */
+ Pgno iTable, /* Root page of table to open */
+ int wrFlag, /* 1 to write. 0 read-only */
+ struct KeyInfo *pKeyInfo, /* First arg to comparison function */
+ BtCursor *pCur /* Space for new cursor */
+){
+ BtShared *pBt = p->pBt; /* Shared b-tree handle */
+ BtCursor *pX; /* Looping over other all cursors */
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( wrFlag==0
+ || wrFlag==BTREE_WRCSR
+ || wrFlag==(BTREE_WRCSR|BTREE_FORDELETE)
+ );
+
+ /* The following assert statements verify that if this is a sharable
+ ** b-tree database, the connection is holding the required table locks,
+ ** and that no other connection has any open cursor that conflicts with
+ ** this lock. The iTable<1 term disables the check for corrupt schemas. */
+ assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, (wrFlag?2:1))
+ || iTable<1 );
+ assert( wrFlag==0 || !hasReadConflicts(p, iTable) );
+
+ /* Assert that the caller has opened the required transaction. */
+ assert( p->inTrans>TRANS_NONE );
+ assert( wrFlag==0 || p->inTrans==TRANS_WRITE );
+ assert( pBt->pPage1 && pBt->pPage1->aData );
+ assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 );
+
+ if( wrFlag ){
+ allocateTempSpace(pBt);
+ if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ if( iTable<=1 ){
+ if( iTable<1 ){
+ return SQLITE_CORRUPT_BKPT;
+ }else if( btreePagecount(pBt)==0 ){
+ assert( wrFlag==0 );
+ iTable = 0;
+ }
+ }
+
+ /* Now that no other errors can occur, finish filling in the BtCursor
+ ** variables and link the cursor into the BtShared list. */
+ pCur->pgnoRoot = iTable;
+ pCur->iPage = -1;
+ pCur->pKeyInfo = pKeyInfo;
+ pCur->pBtree = p;
+ pCur->pBt = pBt;
+ pCur->curFlags = wrFlag ? BTCF_WriteFlag : 0;
+ pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY;
+ /* If there are two or more cursors on the same btree, then all such
+ ** cursors *must* have the BTCF_Multiple flag set. */
+ for(pX=pBt->pCursor; pX; pX=pX->pNext){
+ if( pX->pgnoRoot==iTable ){
+ pX->curFlags |= BTCF_Multiple;
+ pCur->curFlags |= BTCF_Multiple;
+ }
+ }
+ pCur->pNext = pBt->pCursor;
+ pBt->pCursor = pCur;
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_OK;
+}
+static int btreeCursorWithLock(
+ Btree *p, /* The btree */
+ Pgno iTable, /* Root page of table to open */
+ int wrFlag, /* 1 to write. 0 read-only */
+ struct KeyInfo *pKeyInfo, /* First arg to comparison function */
+ BtCursor *pCur /* Space for new cursor */
+){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3BtreeCursor(
+ Btree *p, /* The btree */
+ Pgno iTable, /* Root page of table to open */
+ int wrFlag, /* 1 to write. 0 read-only */
+ struct KeyInfo *pKeyInfo, /* First arg to xCompare() */
+ BtCursor *pCur /* Write new cursor here */
+){
+ if( p->sharable ){
+ return btreeCursorWithLock(p, iTable, wrFlag, pKeyInfo, pCur);
+ }else{
+ return btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur);
+ }
+}
+
+/*
+** Return the size of a BtCursor object in bytes.
+**
+** This interfaces is needed so that users of cursors can preallocate
+** sufficient storage to hold a cursor. The BtCursor object is opaque
+** to users so they cannot do the sizeof() themselves - they must call
+** this routine.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){
+ return ROUND8(sizeof(BtCursor));
+}
+
+/*
+** Initialize memory that will be converted into a BtCursor object.
+**
+** The simple approach here would be to memset() the entire object
+** to zero. But it turns out that the apPage[] and aiIdx[] arrays
+** do not need to be zeroed and they are large, so we can save a lot
+** of run-time by skipping the initialization of those elements.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){
+ memset(p, 0, offsetof(BtCursor, BTCURSOR_FIRST_UNINIT));
+}
+
+/*
+** Close a cursor. The read lock on the database file is released
+** when the last cursor is closed.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){
+ Btree *pBtree = pCur->pBtree;
+ if( pBtree ){
+ BtShared *pBt = pCur->pBt;
+ sqlite3BtreeEnter(pBtree);
+ assert( pBt->pCursor!=0 );
+ if( pBt->pCursor==pCur ){
+ pBt->pCursor = pCur->pNext;
+ }else{
+ BtCursor *pPrev = pBt->pCursor;
+ do{
+ if( pPrev->pNext==pCur ){
+ pPrev->pNext = pCur->pNext;
+ break;
+ }
+ pPrev = pPrev->pNext;
+ }while( ALWAYS(pPrev) );
+ }
+ btreeReleaseAllCursorPages(pCur);
+ unlockBtreeIfUnused(pBt);
+ sqlite3_free(pCur->aOverflow);
+ sqlite3_free(pCur->pKey);
+ sqlite3BtreeLeave(pBtree);
+ pCur->pBtree = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Make sure the BtCursor* given in the argument has a valid
+** BtCursor.info structure. If it is not already valid, call
+** btreeParseCell() to fill it in.
+**
+** BtCursor.info is a cache of the information in the current cell.
+** Using this cache reduces the number of calls to btreeParseCell().
+*/
+#ifndef NDEBUG
+ static int cellInfoEqual(CellInfo *a, CellInfo *b){
+ if( a->nKey!=b->nKey ) return 0;
+ if( a->pPayload!=b->pPayload ) return 0;
+ if( a->nPayload!=b->nPayload ) return 0;
+ if( a->nLocal!=b->nLocal ) return 0;
+ if( a->nSize!=b->nSize ) return 0;
+ return 1;
+ }
+ static void assertCellInfo(BtCursor *pCur){
+ CellInfo info;
+ memset(&info, 0, sizeof(info));
+ btreeParseCell(pCur->pPage, pCur->ix, &info);
+ assert( CORRUPT_DB || cellInfoEqual(&info, &pCur->info) );
+ }
+#else
+ #define assertCellInfo(x)
+#endif
+static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){
+ if( pCur->info.nSize==0 ){
+ pCur->curFlags |= BTCF_ValidNKey;
+ btreeParseCell(pCur->pPage,pCur->ix,&pCur->info);
+ }else{
+ assertCellInfo(pCur);
+ }
+}
+
+#ifndef NDEBUG /* The next routine used only within assert() statements */
+/*
+** Return true if the given BtCursor is valid. A valid cursor is one
+** that is currently pointing to a row in a (non-empty) table.
+** This is a verification routine is used only within assert() statements.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){
+ return pCur && pCur->eState==CURSOR_VALID;
+}
+#endif /* NDEBUG */
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){
+ assert( pCur!=0 );
+ return pCur->eState==CURSOR_VALID;
+}
+
+/*
+** Return the value of the integer key or "rowid" for a table btree.
+** This routine is only valid for a cursor that is pointing into a
+** ordinary table btree. If the cursor points to an index btree or
+** is invalid, the result of this routine is undefined.
+*/
+SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->curIntKey );
+ getCellInfo(pCur);
+ return pCur->info.nKey;
+}
+
+/*
+** Pin or unpin a cursor.
+*/
+SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor *pCur){
+ assert( (pCur->curFlags & BTCF_Pinned)==0 );
+ pCur->curFlags |= BTCF_Pinned;
+}
+SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor *pCur){
+ assert( (pCur->curFlags & BTCF_Pinned)!=0 );
+ pCur->curFlags &= ~BTCF_Pinned;
+}
+
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+/*
+** Return the offset into the database file for the start of the
+** payload to which the cursor is pointing.
+*/
+SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ getCellInfo(pCur);
+ return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) +
+ (i64)(pCur->info.pPayload - pCur->pPage->aData);
+}
+#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */
+
+/*
+** Return the number of bytes of payload for the entry that pCur is
+** currently pointing to. For table btrees, this will be the amount
+** of data. For index btrees, this will be the size of the key.
+**
+** The caller must guarantee that the cursor is pointing to a non-NULL
+** valid entry. In other words, the calling procedure must guarantee
+** that the cursor has Cursor.eState==CURSOR_VALID.
+*/
+SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ getCellInfo(pCur);
+ return pCur->info.nPayload;
+}
+
+/*
+** Return an upper bound on the size of any record for the table
+** that the cursor is pointing into.
+**
+** This is an optimization. Everything will still work if this
+** routine always returns 2147483647 (which is the largest record
+** that SQLite can handle) or more. But returning a smaller value might
+** prevent large memory allocations when trying to interpret a
+** corrupt datrabase.
+**
+** The current implementation merely returns the size of the underlying
+** database file.
+*/
+SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ return pCur->pBt->pageSize * (sqlite3_int64)pCur->pBt->nPage;
+}
+
+/*
+** Given the page number of an overflow page in the database (parameter
+** ovfl), this function finds the page number of the next page in the
+** linked list of overflow pages. If possible, it uses the auto-vacuum
+** pointer-map data instead of reading the content of page ovfl to do so.
+**
+** If an error occurs an SQLite error code is returned. Otherwise:
+**
+** The page number of the next overflow page in the linked list is
+** written to *pPgnoNext. If page ovfl is the last page in its linked
+** list, *pPgnoNext is set to zero.
+**
+** If ppPage is not NULL, and a reference to the MemPage object corresponding
+** to page number pOvfl was obtained, then *ppPage is set to point to that
+** reference. It is the responsibility of the caller to call releasePage()
+** on *ppPage to free the reference. In no reference was obtained (because
+** the pointer-map was used to obtain the value for *pPgnoNext), then
+** *ppPage is set to zero.
+*/
+static int getOverflowPage(
+ BtShared *pBt, /* The database file */
+ Pgno ovfl, /* Current overflow page number */
+ MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */
+ Pgno *pPgnoNext /* OUT: Next overflow page number */
+){
+ Pgno next = 0;
+ MemPage *pPage = 0;
+ int rc = SQLITE_OK;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert(pPgnoNext);
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* Try to find the next page in the overflow list using the
+ ** autovacuum pointer-map pages. Guess that the next page in
+ ** the overflow list is page number (ovfl+1). If that guess turns
+ ** out to be wrong, fall back to loading the data of page
+ ** number ovfl to determine the next page number.
+ */
+ if( pBt->autoVacuum ){
+ Pgno pgno;
+ Pgno iGuess = ovfl+1;
+ u8 eType;
+
+ while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){
+ iGuess++;
+ }
+
+ if( iGuess<=btreePagecount(pBt) ){
+ rc = ptrmapGet(pBt, iGuess, &eType, &pgno);
+ if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){
+ next = iGuess;
+ rc = SQLITE_DONE;
+ }
+ }
+ }
+#endif
+
+ assert( next==0 || rc==SQLITE_DONE );
+ if( rc==SQLITE_OK ){
+ rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0);
+ assert( rc==SQLITE_OK || pPage==0 );
+ if( rc==SQLITE_OK ){
+ next = get4byte(pPage->aData);
+ }
+ }
+
+ *pPgnoNext = next;
+ if( ppPage ){
+ *ppPage = pPage;
+ }else{
+ releasePage(pPage);
+ }
+ return (rc==SQLITE_DONE ? SQLITE_OK : rc);
+}
+
+/*
+** Copy data from a buffer to a page, or from a page to a buffer.
+**
+** pPayload is a pointer to data stored on database page pDbPage.
+** If argument eOp is false, then nByte bytes of data are copied
+** from pPayload to the buffer pointed at by pBuf. If eOp is true,
+** then sqlite3PagerWrite() is called on pDbPage and nByte bytes
+** of data are copied from the buffer pBuf to pPayload.
+**
+** SQLITE_OK is returned on success, otherwise an error code.
+*/
+static int copyPayload(
+ void *pPayload, /* Pointer to page data */
+ void *pBuf, /* Pointer to buffer */
+ int nByte, /* Number of bytes to copy */
+ int eOp, /* 0 -> copy from page, 1 -> copy to page */
+ DbPage *pDbPage /* Page containing pPayload */
+){
+ if( eOp ){
+ /* Copy data from buffer to page (a write operation) */
+ int rc = sqlite3PagerWrite(pDbPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ memcpy(pPayload, pBuf, nByte);
+ }else{
+ /* Copy data from page to buffer (a read operation) */
+ memcpy(pBuf, pPayload, nByte);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** This function is used to read or overwrite payload information
+** for the entry that the pCur cursor is pointing to. The eOp
+** argument is interpreted as follows:
+**
+** 0: The operation is a read. Populate the overflow cache.
+** 1: The operation is a write. Populate the overflow cache.
+**
+** A total of "amt" bytes are read or written beginning at "offset".
+** Data is read to or from the buffer pBuf.
+**
+** The content being read or written might appear on the main page
+** or be scattered out on multiple overflow pages.
+**
+** If the current cursor entry uses one or more overflow pages
+** this function may allocate space for and lazily populate
+** the overflow page-list cache array (BtCursor.aOverflow).
+** Subsequent calls use this cache to make seeking to the supplied offset
+** more efficient.
+**
+** Once an overflow page-list cache has been allocated, it must be
+** invalidated if some other cursor writes to the same table, or if
+** the cursor is moved to a different row. Additionally, in auto-vacuum
+** mode, the following events may invalidate an overflow page-list cache.
+**
+** * An incremental vacuum,
+** * A commit in auto_vacuum="full" mode,
+** * Creating a table (may require moving an overflow page).
+*/
+static int accessPayload(
+ BtCursor *pCur, /* Cursor pointing to entry to read from */
+ u32 offset, /* Begin reading this far into payload */
+ u32 amt, /* Read this many bytes */
+ unsigned char *pBuf, /* Write the bytes into this buffer */
+ int eOp /* zero to read. non-zero to write. */
+){
+ unsigned char *aPayload;
+ int rc = SQLITE_OK;
+ int iIdx = 0;
+ MemPage *pPage = pCur->pPage; /* Btree page of current entry */
+ BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+ unsigned char * const pBufStart = pBuf; /* Start of original out buffer */
+#endif
+
+ assert( pPage );
+ assert( eOp==0 || eOp==1 );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->ix<pPage->nCell );
+ assert( cursorHoldsMutex(pCur) );
+
+ getCellInfo(pCur);
+ aPayload = pCur->info.pPayload;
+ assert( offset+amt <= pCur->info.nPayload );
+
+ assert( aPayload > pPage->aData );
+ if( (uptr)(aPayload - pPage->aData) > (pBt->usableSize - pCur->info.nLocal) ){
+ /* Trying to read or write past the end of the data is an error. The
+ ** conditional above is really:
+ ** &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize]
+ ** but is recast into its current form to avoid integer overflow problems
+ */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+
+ /* Check if data must be read/written to/from the btree page itself. */
+ if( offset<pCur->info.nLocal ){
+ int a = amt;
+ if( a+offset>pCur->info.nLocal ){
+ a = pCur->info.nLocal - offset;
+ }
+ rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage);
+ offset = 0;
+ pBuf += a;
+ amt -= a;
+ }else{
+ offset -= pCur->info.nLocal;
+ }
+
+
+ if( rc==SQLITE_OK && amt>0 ){
+ const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */
+ Pgno nextPage;
+
+ nextPage = get4byte(&aPayload[pCur->info.nLocal]);
+
+ /* If the BtCursor.aOverflow[] has not been allocated, allocate it now.
+ **
+ ** The aOverflow[] array is sized at one entry for each overflow page
+ ** in the overflow chain. The page number of the first overflow page is
+ ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array
+ ** means "not yet known" (the cache is lazily populated).
+ */
+ if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){
+ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize;
+ if( pCur->aOverflow==0
+ || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow)
+ ){
+ Pgno *aNew = (Pgno*)sqlite3Realloc(
+ pCur->aOverflow, nOvfl*2*sizeof(Pgno)
+ );
+ if( aNew==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ pCur->aOverflow = aNew;
+ }
+ }
+ memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno));
+ pCur->curFlags |= BTCF_ValidOvfl;
+ }else{
+ /* If the overflow page-list cache has been allocated and the
+ ** entry for the first required overflow page is valid, skip
+ ** directly to it.
+ */
+ if( pCur->aOverflow[offset/ovflSize] ){
+ iIdx = (offset/ovflSize);
+ nextPage = pCur->aOverflow[iIdx];
+ offset = (offset%ovflSize);
+ }
+ }
+
+ assert( rc==SQLITE_OK && amt>0 );
+ while( nextPage ){
+ /* If required, populate the overflow page-list cache. */
+ if( nextPage > pBt->nPage ) return SQLITE_CORRUPT_BKPT;
+ assert( pCur->aOverflow[iIdx]==0
+ || pCur->aOverflow[iIdx]==nextPage
+ || CORRUPT_DB );
+ pCur->aOverflow[iIdx] = nextPage;
+
+ if( offset>=ovflSize ){
+ /* The only reason to read this page is to obtain the page
+ ** number for the next page in the overflow chain. The page
+ ** data is not required. So first try to lookup the overflow
+ ** page-list cache, if any, then fall back to the getOverflowPage()
+ ** function.
+ */
+ assert( pCur->curFlags & BTCF_ValidOvfl );
+ assert( pCur->pBtree->db==pBt->db );
+ if( pCur->aOverflow[iIdx+1] ){
+ nextPage = pCur->aOverflow[iIdx+1];
+ }else{
+ rc = getOverflowPage(pBt, nextPage, 0, &nextPage);
+ }
+ offset -= ovflSize;
+ }else{
+ /* Need to read this page properly. It contains some of the
+ ** range of data that is being read (eOp==0) or written (eOp!=0).
+ */
+ int a = amt;
+ if( a + offset > ovflSize ){
+ a = ovflSize - offset;
+ }
+
+#ifdef SQLITE_DIRECT_OVERFLOW_READ
+ /* If all the following are true:
+ **
+ ** 1) this is a read operation, and
+ ** 2) data is required from the start of this overflow page, and
+ ** 3) there are no dirty pages in the page-cache
+ ** 4) the database is file-backed, and
+ ** 5) the page is not in the WAL file
+ ** 6) at least 4 bytes have already been read into the output buffer
+ **
+ ** then data can be read directly from the database file into the
+ ** output buffer, bypassing the page-cache altogether. This speeds
+ ** up loading large records that span many overflow pages.
+ */
+ if( eOp==0 /* (1) */
+ && offset==0 /* (2) */
+ && sqlite3PagerDirectReadOk(pBt->pPager, nextPage) /* (3,4,5) */
+ && &pBuf[-4]>=pBufStart /* (6) */
+ ){
+ sqlite3_file *fd = sqlite3PagerFile(pBt->pPager);
+ u8 aSave[4];
+ u8 *aWrite = &pBuf[-4];
+ assert( aWrite>=pBufStart ); /* due to (6) */
+ memcpy(aSave, aWrite, 4);
+ rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
+ if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT;
+ nextPage = get4byte(aWrite);
+ memcpy(aWrite, aSave, 4);
+ }else
+#endif
+
+ {
+ DbPage *pDbPage;
+ rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage,
+ (eOp==0 ? PAGER_GET_READONLY : 0)
+ );
+ if( rc==SQLITE_OK ){
+ aPayload = sqlite3PagerGetData(pDbPage);
+ nextPage = get4byte(aPayload);
+ rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage);
+ sqlite3PagerUnref(pDbPage);
+ offset = 0;
+ }
+ }
+ amt -= a;
+ if( amt==0 ) return rc;
+ pBuf += a;
+ }
+ if( rc ) break;
+ iIdx++;
+ }
+ }
+
+ if( rc==SQLITE_OK && amt>0 ){
+ /* Overflow chain ends prematurely */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ return rc;
+}
+
+/*
+** Read part of the payload for the row at which that cursor pCur is currently
+** pointing. "amt" bytes will be transferred into pBuf[]. The transfer
+** begins at "offset".
+**
+** pCur can be pointing to either a table or an index b-tree.
+** If pointing to a table btree, then the content section is read. If
+** pCur is pointing to an index b-tree then the key section is read.
+**
+** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing
+** to a valid row in the table. For sqlite3BtreePayloadChecked(), the
+** cursor might be invalid or might need to be restored before being read.
+**
+** Return SQLITE_OK on success or an error code if anything goes
+** wrong. An error is returned if "offset+amt" is larger than
+** the available payload.
+*/
+SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+ assert( cursorHoldsMutex(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage>=0 && pCur->pPage );
+ assert( pCur->ix<pCur->pPage->nCell );
+ return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0);
+}
+
+/*
+** This variant of sqlite3BtreePayload() works even if the cursor has not
+** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read()
+** interface.
+*/
+#ifndef SQLITE_OMIT_INCRBLOB
+static SQLITE_NOINLINE int accessPayloadChecked(
+ BtCursor *pCur,
+ u32 offset,
+ u32 amt,
+ void *pBuf
+){
+ int rc;
+ if ( pCur->eState==CURSOR_INVALID ){
+ return SQLITE_ABORT;
+ }
+ assert( cursorOwnsBtShared(pCur) );
+ rc = btreeRestoreCursorPosition(pCur);
+ return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0);
+}
+SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+ if( pCur->eState==CURSOR_VALID ){
+ assert( cursorOwnsBtShared(pCur) );
+ return accessPayload(pCur, offset, amt, pBuf, 0);
+ }else{
+ return accessPayloadChecked(pCur, offset, amt, pBuf);
+ }
+}
+#endif /* SQLITE_OMIT_INCRBLOB */
+
+/*
+** Return a pointer to payload information from the entry that the
+** pCur cursor is pointing to. The pointer is to the beginning of
+** the key if index btrees (pPage->intKey==0) and is the data for
+** table btrees (pPage->intKey==1). The number of bytes of available
+** key/data is written into *pAmt. If *pAmt==0, then the value
+** returned will not be a valid pointer.
+**
+** This routine is an optimization. It is common for the entire key
+** and data to fit on the local page and for there to be no overflow
+** pages. When that is so, this routine can be used to access the
+** key and data without making a copy. If the key and/or data spills
+** onto overflow pages, then accessPayload() must be used to reassemble
+** the key/data and copy it into a preallocated buffer.
+**
+** The pointer returned by this routine looks directly into the cached
+** page of the database. The data might change or move the next time
+** any btree routine is called.
+*/
+static const void *fetchPayload(
+ BtCursor *pCur, /* Cursor pointing to entry to read from */
+ u32 *pAmt /* Write the number of available bytes here */
+){
+ int amt;
+ assert( pCur!=0 && pCur->iPage>=0 && pCur->pPage);
+ assert( pCur->eState==CURSOR_VALID );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->ix<pCur->pPage->nCell );
+ assert( pCur->info.nSize>0 );
+ assert( pCur->info.pPayload>pCur->pPage->aData || CORRUPT_DB );
+ assert( pCur->info.pPayload<pCur->pPage->aDataEnd ||CORRUPT_DB);
+ amt = pCur->info.nLocal;
+ if( amt>(int)(pCur->pPage->aDataEnd - pCur->info.pPayload) ){
+ /* There is too little space on the page for the expected amount
+ ** of local content. Database must be corrupt. */
+ assert( CORRUPT_DB );
+ amt = MAX(0, (int)(pCur->pPage->aDataEnd - pCur->info.pPayload));
+ }
+ *pAmt = (u32)amt;
+ return (void*)pCur->info.pPayload;
+}
+
+
+/*
+** For the entry that cursor pCur is point to, return as
+** many bytes of the key or data as are available on the local
+** b-tree page. Write the number of available bytes into *pAmt.
+**
+** The pointer returned is ephemeral. The key/data may move
+** or be destroyed on the next call to any Btree routine,
+** including calls from other threads against the same cache.
+** Hence, a mutex on the BtShared should be held prior to calling
+** this routine.
+**
+** These routines is used to get quick access to key and data
+** in the common case where no overflow pages are used.
+*/
+SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){
+ return fetchPayload(pCur, pAmt);
+}
+
+
+/*
+** Move the cursor down to a new child page. The newPgno argument is the
+** page number of the child page to move to.
+**
+** This function returns SQLITE_CORRUPT if the page-header flags field of
+** the new child page does not match the flags field of the parent (i.e.
+** if an intkey page appears to be the parent of a non-intkey page, or
+** vice-versa).
+*/
+static int moveToChild(BtCursor *pCur, u32 newPgno){
+ BtShared *pBt = pCur->pBt;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage<BTCURSOR_MAX_DEPTH );
+ assert( pCur->iPage>=0 );
+ if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ pCur->aiIdx[pCur->iPage] = pCur->ix;
+ pCur->apPage[pCur->iPage] = pCur->pPage;
+ pCur->ix = 0;
+ pCur->iPage++;
+ return getAndInitPage(pBt, newPgno, &pCur->pPage, pCur, pCur->curPagerFlags);
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Page pParent is an internal (non-leaf) tree page. This function
+** asserts that page number iChild is the left-child if the iIdx'th
+** cell in page pParent. Or, if iIdx is equal to the total number of
+** cells in pParent, that page number iChild is the right-child of
+** the page.
+*/
+static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){
+ if( CORRUPT_DB ) return; /* The conditions tested below might not be true
+ ** in a corrupt database */
+ assert( iIdx<=pParent->nCell );
+ if( iIdx==pParent->nCell ){
+ assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild );
+ }else{
+ assert( get4byte(findCell(pParent, iIdx))==iChild );
+ }
+}
+#else
+# define assertParentIndex(x,y,z)
+#endif
+
+/*
+** Move the cursor up to the parent page.
+**
+** pCur->idx is set to the cell index that contains the pointer
+** to the page we are coming from. If we are coming from the
+** right-most child page then pCur->idx is set to one more than
+** the largest cell index.
+*/
+static void moveToParent(BtCursor *pCur){
+ MemPage *pLeaf;
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->iPage>0 );
+ assert( pCur->pPage );
+ assertParentIndex(
+ pCur->apPage[pCur->iPage-1],
+ pCur->aiIdx[pCur->iPage-1],
+ pCur->pPage->pgno
+ );
+ testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell );
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ pCur->ix = pCur->aiIdx[pCur->iPage-1];
+ pLeaf = pCur->pPage;
+ pCur->pPage = pCur->apPage[--pCur->iPage];
+ releasePageNotNull(pLeaf);
+}
+
+/*
+** Move the cursor to point to the root page of its b-tree structure.
+**
+** If the table has a virtual root page, then the cursor is moved to point
+** to the virtual root page instead of the actual root page. A table has a
+** virtual root page when the actual root page contains no cells and a
+** single child page. This can only happen with the table rooted at page 1.
+**
+** If the b-tree structure is empty, the cursor state is set to
+** CURSOR_INVALID and this routine returns SQLITE_EMPTY. Otherwise,
+** the cursor is set to point to the first cell located on the root
+** (or virtual root) page and the cursor state is set to CURSOR_VALID.
+**
+** If this function returns successfully, it may be assumed that the
+** page-header flags indicate that the [virtual] root-page is the expected
+** kind of b-tree page (i.e. if when opening the cursor the caller did not
+** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D,
+** indicating a table b-tree, or if the caller did specify a KeyInfo
+** structure the flags byte is set to 0x02 or 0x0A, indicating an index
+** b-tree).
+*/
+static int moveToRoot(BtCursor *pCur){
+ MemPage *pRoot;
+ int rc = SQLITE_OK;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( CURSOR_INVALID < CURSOR_REQUIRESEEK );
+ assert( CURSOR_VALID < CURSOR_REQUIRESEEK );
+ assert( CURSOR_FAULT > CURSOR_REQUIRESEEK );
+ assert( pCur->eState < CURSOR_REQUIRESEEK || pCur->iPage<0 );
+ assert( pCur->pgnoRoot>0 || pCur->iPage<0 );
+
+ if( pCur->iPage>=0 ){
+ if( pCur->iPage ){
+ releasePageNotNull(pCur->pPage);
+ while( --pCur->iPage ){
+ releasePageNotNull(pCur->apPage[pCur->iPage]);
+ }
+ pCur->pPage = pCur->apPage[0];
+ goto skip_init;
+ }
+ }else if( pCur->pgnoRoot==0 ){
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_EMPTY;
+ }else{
+ assert( pCur->iPage==(-1) );
+ if( pCur->eState>=CURSOR_REQUIRESEEK ){
+ if( pCur->eState==CURSOR_FAULT ){
+ assert( pCur->skipNext!=SQLITE_OK );
+ return pCur->skipNext;
+ }
+ sqlite3BtreeClearCursor(pCur);
+ }
+ rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->pPage,
+ 0, pCur->curPagerFlags);
+ if( rc!=SQLITE_OK ){
+ pCur->eState = CURSOR_INVALID;
+ return rc;
+ }
+ pCur->iPage = 0;
+ pCur->curIntKey = pCur->pPage->intKey;
+ }
+ pRoot = pCur->pPage;
+ assert( pRoot->pgno==pCur->pgnoRoot );
+
+ /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor
+ ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is
+ ** NULL, the caller expects a table b-tree. If this is not the case,
+ ** return an SQLITE_CORRUPT error.
+ **
+ ** Earlier versions of SQLite assumed that this test could not fail
+ ** if the root page was already loaded when this function was called (i.e.
+ ** if pCur->iPage>=0). But this is not so if the database is corrupted
+ ** in such a way that page pRoot is linked into a second b-tree table
+ ** (or the freelist). */
+ assert( pRoot->intKey==1 || pRoot->intKey==0 );
+ if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){
+ return SQLITE_CORRUPT_PAGE(pCur->pPage);
+ }
+
+skip_init:
+ pCur->ix = 0;
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl);
+
+ pRoot = pCur->pPage;
+ if( pRoot->nCell>0 ){
+ pCur->eState = CURSOR_VALID;
+ }else if( !pRoot->leaf ){
+ Pgno subpage;
+ if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT;
+ subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]);
+ pCur->eState = CURSOR_VALID;
+ rc = moveToChild(pCur, subpage);
+ }else{
+ pCur->eState = CURSOR_INVALID;
+ rc = SQLITE_EMPTY;
+ }
+ return rc;
+}
+
+/*
+** Move the cursor down to the left-most leaf entry beneath the
+** entry to which it is currently pointing.
+**
+** The left-most leaf is the one with the smallest key - the first
+** in ascending order.
+*/
+static int moveToLeftmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc = SQLITE_OK;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ while( rc==SQLITE_OK && !(pPage = pCur->pPage)->leaf ){
+ assert( pCur->ix<pPage->nCell );
+ pgno = get4byte(findCell(pPage, pCur->ix));
+ rc = moveToChild(pCur, pgno);
+ }
+ return rc;
+}
+
+/*
+** Move the cursor down to the right-most leaf entry beneath the
+** page to which it is currently pointing. Notice the difference
+** between moveToLeftmost() and moveToRightmost(). moveToLeftmost()
+** finds the left-most entry beneath the *entry* whereas moveToRightmost()
+** finds the right-most entry beneath the *page*.
+**
+** The right-most entry is the one with the largest key - the last
+** key in ascending order.
+*/
+static int moveToRightmost(BtCursor *pCur){
+ Pgno pgno;
+ int rc = SQLITE_OK;
+ MemPage *pPage = 0;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pCur->eState==CURSOR_VALID );
+ while( !(pPage = pCur->pPage)->leaf ){
+ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ pCur->ix = pPage->nCell;
+ rc = moveToChild(pCur, pgno);
+ if( rc ) return rc;
+ }
+ pCur->ix = pPage->nCell-1;
+ assert( pCur->info.nSize==0 );
+ assert( (pCur->curFlags & BTCF_ValidNKey)==0 );
+ return SQLITE_OK;
+}
+
+/* Move the cursor to the first entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
+ int rc;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ rc = moveToRoot(pCur);
+ if( rc==SQLITE_OK ){
+ assert( pCur->pPage->nCell>0 );
+ *pRes = 0;
+ rc = moveToLeftmost(pCur);
+ }else if( rc==SQLITE_EMPTY ){
+ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ *pRes = 1;
+ rc = SQLITE_OK;
+ }
+ return rc;
+}
+
+/* Move the cursor to the last entry in the table. Return SQLITE_OK
+** on success. Set *pRes to 0 if the cursor actually points to something
+** or set *pRes to 1 if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
+ int rc;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+
+ /* If the cursor already points to the last entry, this is a no-op. */
+ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){
+#ifdef SQLITE_DEBUG
+ /* This block serves to assert() that the cursor really does point
+ ** to the last entry in the b-tree. */
+ int ii;
+ for(ii=0; ii<pCur->iPage; ii++){
+ assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell );
+ }
+ assert( pCur->ix==pCur->pPage->nCell-1 );
+ assert( pCur->pPage->leaf );
+#endif
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+
+ rc = moveToRoot(pCur);
+ if( rc==SQLITE_OK ){
+ assert( pCur->eState==CURSOR_VALID );
+ *pRes = 0;
+ rc = moveToRightmost(pCur);
+ if( rc==SQLITE_OK ){
+ pCur->curFlags |= BTCF_AtLast;
+ }else{
+ pCur->curFlags &= ~BTCF_AtLast;
+ }
+ }else if( rc==SQLITE_EMPTY ){
+ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ *pRes = 1;
+ rc = SQLITE_OK;
+ }
+ return rc;
+}
+
+/* Move the cursor so that it points to an entry near the key
+** specified by pIdxKey or intKey. Return a success code.
+**
+** For INTKEY tables, the intKey parameter is used. pIdxKey
+** must be NULL. For index tables, pIdxKey is used and intKey
+** is ignored.
+**
+** If an exact match is not found, then the cursor is always
+** left pointing at a leaf page which would hold the entry if it
+** were present. The cursor might point to an entry that comes
+** before or after the key.
+**
+** An integer is written into *pRes which is the result of
+** comparing the key with the entry to which the cursor is
+** pointing. The meaning of the integer written into
+** *pRes is as follows:
+**
+** *pRes<0 The cursor is left pointing at an entry that
+** is smaller than intKey/pIdxKey or if the table is empty
+** and the cursor is therefore left point to nothing.
+**
+** *pRes==0 The cursor is left pointing at an entry that
+** exactly matches intKey/pIdxKey.
+**
+** *pRes>0 The cursor is left pointing at an entry that
+** is larger than intKey/pIdxKey.
+**
+** For index tables, the pIdxKey->eqSeen field is set to 1 if there
+** exists an entry in the table that exactly matches pIdxKey.
+*/
+SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
+ BtCursor *pCur, /* The cursor to be moved */
+ UnpackedRecord *pIdxKey, /* Unpacked index key */
+ i64 intKey, /* The table key */
+ int biasRight, /* If true, bias the search to the high end */
+ int *pRes /* Write search results here */
+){
+ int rc;
+ RecordCompare xRecordCompare;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( pRes );
+ assert( (pIdxKey==0)==(pCur->pKeyInfo==0) );
+ assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) );
+
+ /* If the cursor is already positioned at the point we are trying
+ ** to move to, then just return without doing any work */
+ if( pIdxKey==0
+ && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0
+ ){
+ if( pCur->info.nKey==intKey ){
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ if( pCur->info.nKey<intKey ){
+ if( (pCur->curFlags & BTCF_AtLast)!=0 ){
+ *pRes = -1;
+ return SQLITE_OK;
+ }
+ /* If the requested key is one more than the previous key, then
+ ** try to get there using sqlite3BtreeNext() rather than a full
+ ** binary search. This is an optimization only. The correct answer
+ ** is still obtained without this case, only a little more slowely */
+ if( pCur->info.nKey+1==intKey ){
+ *pRes = 0;
+ rc = sqlite3BtreeNext(pCur, 0);
+ if( rc==SQLITE_OK ){
+ getCellInfo(pCur);
+ if( pCur->info.nKey==intKey ){
+ return SQLITE_OK;
+ }
+ }else if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ }else{
+ return rc;
+ }
+ }
+ }
+ }
+
+ if( pIdxKey ){
+ xRecordCompare = sqlite3VdbeFindCompare(pIdxKey);
+ pIdxKey->errCode = 0;
+ assert( pIdxKey->default_rc==1
+ || pIdxKey->default_rc==0
+ || pIdxKey->default_rc==-1
+ );
+ }else{
+ xRecordCompare = 0; /* All keys are integers */
+ }
+
+ rc = moveToRoot(pCur);
+ if( rc ){
+ if( rc==SQLITE_EMPTY ){
+ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ *pRes = -1;
+ return SQLITE_OK;
+ }
+ return rc;
+ }
+ assert( pCur->pPage );
+ assert( pCur->pPage->isInit );
+ assert( pCur->eState==CURSOR_VALID );
+ assert( pCur->pPage->nCell > 0 );
+ assert( pCur->iPage==0 || pCur->apPage[0]->intKey==pCur->curIntKey );
+ assert( pCur->curIntKey || pIdxKey );
+ for(;;){
+ int lwr, upr, idx, c;
+ Pgno chldPg;
+ MemPage *pPage = pCur->pPage;
+ u8 *pCell; /* Pointer to current cell in pPage */
+
+ /* pPage->nCell must be greater than zero. If this is the root-page
+ ** the cursor would have been INVALID above and this for(;;) loop
+ ** not run. If this is not the root-page, then the moveToChild() routine
+ ** would have already detected db corruption. Similarly, pPage must
+ ** be the right kind (index or table) of b-tree page. Otherwise
+ ** a moveToChild() or moveToRoot() call would have detected corruption. */
+ assert( pPage->nCell>0 );
+ assert( pPage->intKey==(pIdxKey==0) );
+ lwr = 0;
+ upr = pPage->nCell-1;
+ assert( biasRight==0 || biasRight==1 );
+ idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */
+ pCur->ix = (u16)idx;
+ if( xRecordCompare==0 ){
+ for(;;){
+ i64 nCellKey;
+ pCell = findCellPastPtr(pPage, idx);
+ if( pPage->intKeyLeaf ){
+ while( 0x80 <= *(pCell++) ){
+ if( pCell>=pPage->aDataEnd ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ }
+ }
+ getVarint(pCell, (u64*)&nCellKey);
+ if( nCellKey<intKey ){
+ lwr = idx+1;
+ if( lwr>upr ){ c = -1; break; }
+ }else if( nCellKey>intKey ){
+ upr = idx-1;
+ if( lwr>upr ){ c = +1; break; }
+ }else{
+ assert( nCellKey==intKey );
+ pCur->ix = (u16)idx;
+ if( !pPage->leaf ){
+ lwr = idx;
+ goto moveto_next_layer;
+ }else{
+ pCur->curFlags |= BTCF_ValidNKey;
+ pCur->info.nKey = nCellKey;
+ pCur->info.nSize = 0;
+ *pRes = 0;
+ return SQLITE_OK;
+ }
+ }
+ assert( lwr+upr>=0 );
+ idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */
+ }
+ }else{
+ for(;;){
+ int nCell; /* Size of the pCell cell in bytes */
+ pCell = findCellPastPtr(pPage, idx);
+
+ /* The maximum supported page-size is 65536 bytes. This means that
+ ** the maximum number of record bytes stored on an index B-Tree
+ ** page is less than 16384 bytes and may be stored as a 2-byte
+ ** varint. This information is used to attempt to avoid parsing
+ ** the entire cell by checking for the cases where the record is
+ ** stored entirely within the b-tree page by inspecting the first
+ ** 2 bytes of the cell.
+ */
+ nCell = pCell[0];
+ if( nCell<=pPage->max1bytePayload ){
+ /* This branch runs if the record-size field of the cell is a
+ ** single byte varint and the record fits entirely on the main
+ ** b-tree page. */
+ testcase( pCell+nCell+1==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey);
+ }else if( !(pCell[1] & 0x80)
+ && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal
+ ){
+ /* The record-size field is a 2 byte varint and the record
+ ** fits entirely on the main b-tree page. */
+ testcase( pCell+nCell+2==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey);
+ }else{
+ /* The record flows over onto one or more overflow pages. In
+ ** this case the whole cell needs to be parsed, a buffer allocated
+ ** and accessPayload() used to retrieve the record into the
+ ** buffer before VdbeRecordCompare() can be called.
+ **
+ ** If the record is corrupt, the xRecordCompare routine may read
+ ** up to two varints past the end of the buffer. An extra 18
+ ** bytes of padding is allocated at the end of the buffer in
+ ** case this happens. */
+ void *pCellKey;
+ u8 * const pCellBody = pCell - pPage->childPtrSize;
+ const int nOverrun = 18; /* Size of the overrun padding */
+ pPage->xParseCell(pPage, pCellBody, &pCur->info);
+ nCell = (int)pCur->info.nKey;
+ testcase( nCell<0 ); /* True if key size is 2^32 or more */
+ testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */
+ testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */
+ testcase( nCell==2 ); /* Minimum legal index key size */
+ if( nCell<2 || nCell/pCur->pBt->usableSize>pCur->pBt->nPage ){
+ rc = SQLITE_CORRUPT_PAGE(pPage);
+ goto moveto_finish;
+ }
+ pCellKey = sqlite3Malloc( nCell+nOverrun );
+ if( pCellKey==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto moveto_finish;
+ }
+ pCur->ix = (u16)idx;
+ rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0);
+ memset(((u8*)pCellKey)+nCell,0,nOverrun); /* Fix uninit warnings */
+ pCur->curFlags &= ~BTCF_ValidOvfl;
+ if( rc ){
+ sqlite3_free(pCellKey);
+ goto moveto_finish;
+ }
+ c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey);
+ sqlite3_free(pCellKey);
+ }
+ assert(
+ (pIdxKey->errCode!=SQLITE_CORRUPT || c==0)
+ && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed)
+ );
+ if( c<0 ){
+ lwr = idx+1;
+ }else if( c>0 ){
+ upr = idx-1;
+ }else{
+ assert( c==0 );
+ *pRes = 0;
+ rc = SQLITE_OK;
+ pCur->ix = (u16)idx;
+ if( pIdxKey->errCode ) rc = SQLITE_CORRUPT_BKPT;
+ goto moveto_finish;
+ }
+ if( lwr>upr ) break;
+ assert( lwr+upr>=0 );
+ idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */
+ }
+ }
+ assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) );
+ assert( pPage->isInit );
+ if( pPage->leaf ){
+ assert( pCur->ix<pCur->pPage->nCell );
+ pCur->ix = (u16)idx;
+ *pRes = c;
+ rc = SQLITE_OK;
+ goto moveto_finish;
+ }
+moveto_next_layer:
+ if( lwr>=pPage->nCell ){
+ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ }else{
+ chldPg = get4byte(findCell(pPage, lwr));
+ }
+ pCur->ix = (u16)lwr;
+ rc = moveToChild(pCur, chldPg);
+ if( rc ) break;
+ }
+moveto_finish:
+ pCur->info.nSize = 0;
+ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
+ return rc;
+}
+
+
+/*
+** Return TRUE if the cursor is not pointing at an entry of the table.
+**
+** TRUE will be returned after a call to sqlite3BtreeNext() moves
+** past the last entry in the table or sqlite3BtreePrev() moves past
+** the first entry. TRUE is also returned if the table is empty.
+*/
+SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){
+ /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries
+ ** have been deleted? This API will need to change to return an error code
+ ** as well as the boolean result value.
+ */
+ return (CURSOR_VALID!=pCur->eState);
+}
+
+/*
+** Return an estimate for the number of rows in the table that pCur is
+** pointing to. Return a negative number if no estimate is currently
+** available.
+*/
+SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor *pCur){
+ i64 n;
+ u8 i;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+
+ /* Currently this interface is only called by the OP_IfSmaller
+ ** opcode, and it that case the cursor will always be valid and
+ ** will always point to a leaf node. */
+ if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1;
+ if( NEVER(pCur->pPage->leaf==0) ) return -1;
+
+ n = pCur->pPage->nCell;
+ for(i=0; i<pCur->iPage; i++){
+ n *= pCur->apPage[i]->nCell;
+ }
+ return n;
+}
+
+/*
+** Advance the cursor to the next entry in the database.
+** Return value:
+**
+** SQLITE_OK success
+** SQLITE_DONE cursor is already pointing at the last element
+** otherwise some kind of error occurred
+**
+** The main entry point is sqlite3BtreeNext(). That routine is optimized
+** for the common case of merely incrementing the cell counter BtCursor.aiIdx
+** to the next cell on the current page. The (slower) btreeNext() helper
+** routine is called when it is necessary to move to a different page or
+** to restore the cursor.
+**
+** If bit 0x01 of the F argument in sqlite3BtreeNext(C,F) is 1, then the
+** cursor corresponds to an SQL index and this routine could have been
+** skipped if the SQL index had been a unique index. The F argument
+** is a hint to the implement. SQLite btree implementation does not use
+** this hint, but COMDB2 does.
+*/
+static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
+ int rc;
+ int idx;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ if( pCur->eState!=CURSOR_VALID ){
+ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
+ rc = restoreCursorPosition(pCur);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( CURSOR_INVALID==pCur->eState ){
+ return SQLITE_DONE;
+ }
+ if( pCur->eState==CURSOR_SKIPNEXT ){
+ pCur->eState = CURSOR_VALID;
+ if( pCur->skipNext>0 ) return SQLITE_OK;
+ }
+ }
+
+ pPage = pCur->pPage;
+ idx = ++pCur->ix;
+ if( !pPage->isInit ){
+ /* The only known way for this to happen is for there to be a
+ ** recursive SQL function that does a DELETE operation as part of a
+ ** SELECT which deletes content out from under an active cursor
+ ** in a corrupt database file where the table being DELETE-ed from
+ ** has pages in common with the table being queried. See TH3
+ ** module cov1/btree78.test testcase 220 (2018-06-08) for an
+ ** example. */
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ /* If the database file is corrupt, it is possible for the value of idx
+ ** to be invalid here. This can only occur if a second cursor modifies
+ ** the page while cursor pCur is holding a reference to it. Which can
+ ** only happen if the database is corrupt in such a way as to link the
+ ** page into more than one b-tree structure.
+ **
+ ** Update 2019-12-23: appears to long longer be possible after the
+ ** addition of anotherValidCursor() condition on balance_deeper(). */
+ harmless( idx>pPage->nCell );
+
+ if( idx>=pPage->nCell ){
+ if( !pPage->leaf ){
+ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
+ if( rc ) return rc;
+ return moveToLeftmost(pCur);
+ }
+ do{
+ if( pCur->iPage==0 ){
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_DONE;
+ }
+ moveToParent(pCur);
+ pPage = pCur->pPage;
+ }while( pCur->ix>=pPage->nCell );
+ if( pPage->intKey ){
+ return sqlite3BtreeNext(pCur, 0);
+ }else{
+ return SQLITE_OK;
+ }
+ }
+ if( pPage->leaf ){
+ return SQLITE_OK;
+ }else{
+ return moveToLeftmost(pCur);
+ }
+}
+SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int flags){
+ MemPage *pPage;
+ UNUSED_PARAMETER( flags ); /* Used in COMDB2 but not native SQLite */
+ assert( cursorOwnsBtShared(pCur) );
+ assert( flags==0 || flags==1 );
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur);
+ pPage = pCur->pPage;
+ if( (++pCur->ix)>=pPage->nCell ){
+ pCur->ix--;
+ return btreeNext(pCur);
+ }
+ if( pPage->leaf ){
+ return SQLITE_OK;
+ }else{
+ return moveToLeftmost(pCur);
+ }
+}
+
+/*
+** Step the cursor to the back to the previous entry in the database.
+** Return values:
+**
+** SQLITE_OK success
+** SQLITE_DONE the cursor is already on the first element of the table
+** otherwise some kind of error occurred
+**
+** The main entry point is sqlite3BtreePrevious(). That routine is optimized
+** for the common case of merely decrementing the cell counter BtCursor.aiIdx
+** to the previous cell on the current page. The (slower) btreePrevious()
+** helper routine is called when it is necessary to move to a different page
+** or to restore the cursor.
+**
+** If bit 0x01 of the F argument to sqlite3BtreePrevious(C,F) is 1, then
+** the cursor corresponds to an SQL index and this routine could have been
+** skipped if the SQL index had been a unique index. The F argument is a
+** hint to the implement. The native SQLite btree implementation does not
+** use this hint, but COMDB2 does.
+*/
+static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){
+ int rc;
+ MemPage *pPage;
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 );
+ assert( pCur->info.nSize==0 );
+ if( pCur->eState!=CURSOR_VALID ){
+ rc = restoreCursorPosition(pCur);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ if( CURSOR_INVALID==pCur->eState ){
+ return SQLITE_DONE;
+ }
+ if( CURSOR_SKIPNEXT==pCur->eState ){
+ pCur->eState = CURSOR_VALID;
+ if( pCur->skipNext<0 ) return SQLITE_OK;
+ }
+ }
+
+ pPage = pCur->pPage;
+ assert( pPage->isInit );
+ if( !pPage->leaf ){
+ int idx = pCur->ix;
+ rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
+ if( rc ) return rc;
+ rc = moveToRightmost(pCur);
+ }else{
+ while( pCur->ix==0 ){
+ if( pCur->iPage==0 ){
+ pCur->eState = CURSOR_INVALID;
+ return SQLITE_DONE;
+ }
+ moveToParent(pCur);
+ }
+ assert( pCur->info.nSize==0 );
+ assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 );
+
+ pCur->ix--;
+ pPage = pCur->pPage;
+ if( pPage->intKey && !pPage->leaf ){
+ rc = sqlite3BtreePrevious(pCur, 0);
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int flags){
+ assert( cursorOwnsBtShared(pCur) );
+ assert( flags==0 || flags==1 );
+ UNUSED_PARAMETER( flags ); /* Used in COMDB2 but not native SQLite */
+ pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey);
+ pCur->info.nSize = 0;
+ if( pCur->eState!=CURSOR_VALID
+ || pCur->ix==0
+ || pCur->pPage->leaf==0
+ ){
+ return btreePrevious(pCur);
+ }
+ pCur->ix--;
+ return SQLITE_OK;
+}
+
+/*
+** Allocate a new page from the database file.
+**
+** The new page is marked as dirty. (In other words, sqlite3PagerWrite()
+** has already been called on the new page.) The new page has also
+** been referenced and the calling routine is responsible for calling
+** sqlite3PagerUnref() on the new page when it is done.
+**
+** SQLITE_OK is returned on success. Any other return value indicates
+** an error. *ppPage is set to NULL in the event of an error.
+**
+** If the "nearby" parameter is not 0, then an effort is made to
+** locate a page close to the page number "nearby". This can be used in an
+** attempt to keep related pages close to each other in the database file,
+** which in turn can make database access faster.
+**
+** If the eMode parameter is BTALLOC_EXACT and the nearby page exists
+** anywhere on the free-list, then it is guaranteed to be returned. If
+** eMode is BTALLOC_LT then the page returned will be less than or equal
+** to nearby if any such page exists. If eMode is BTALLOC_ANY then there
+** are no restrictions on which page is returned.
+*/
+static int allocateBtreePage(
+ BtShared *pBt, /* The btree */
+ MemPage **ppPage, /* Store pointer to the allocated page here */
+ Pgno *pPgno, /* Store the page number here */
+ Pgno nearby, /* Search for a page near this one */
+ u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */
+){
+ MemPage *pPage1;
+ int rc;
+ u32 n; /* Number of pages on the freelist */
+ u32 k; /* Number of leaves on the trunk of the freelist */
+ MemPage *pTrunk = 0;
+ MemPage *pPrevTrunk = 0;
+ Pgno mxPage; /* Total size of the database file */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) );
+ pPage1 = pBt->pPage1;
+ mxPage = btreePagecount(pBt);
+ /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36
+ ** stores stores the total number of pages on the freelist. */
+ n = get4byte(&pPage1->aData[36]);
+ testcase( n==mxPage-1 );
+ if( n>=mxPage ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( n>0 ){
+ /* There are pages on the freelist. Reuse one of those pages. */
+ Pgno iTrunk;
+ u8 searchList = 0; /* If the free-list must be searched for 'nearby' */
+ u32 nSearch = 0; /* Count of the number of search attempts */
+
+ /* If eMode==BTALLOC_EXACT and a query of the pointer-map
+ ** shows that the page 'nearby' is somewhere on the free-list, then
+ ** the entire-list will be searched for that page.
+ */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( eMode==BTALLOC_EXACT ){
+ if( nearby<=mxPage ){
+ u8 eType;
+ assert( nearby>0 );
+ assert( pBt->autoVacuum );
+ rc = ptrmapGet(pBt, nearby, &eType, 0);
+ if( rc ) return rc;
+ if( eType==PTRMAP_FREEPAGE ){
+ searchList = 1;
+ }
+ }
+ }else if( eMode==BTALLOC_LE ){
+ searchList = 1;
+ }
+#endif
+
+ /* Decrement the free-list count by 1. Set iTrunk to the index of the
+ ** first free-list trunk page. iPrevTrunk is initially 1.
+ */
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc ) return rc;
+ put4byte(&pPage1->aData[36], n-1);
+
+ /* The code within this loop is run only once if the 'searchList' variable
+ ** is not true. Otherwise, it runs once for each trunk-page on the
+ ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT)
+ ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT)
+ */
+ do {
+ pPrevTrunk = pTrunk;
+ if( pPrevTrunk ){
+ /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page
+ ** is the page number of the next freelist trunk page in the list or
+ ** zero if this is the last freelist trunk page. */
+ iTrunk = get4byte(&pPrevTrunk->aData[0]);
+ }else{
+ /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32
+ ** stores the page number of the first page of the freelist, or zero if
+ ** the freelist is empty. */
+ iTrunk = get4byte(&pPage1->aData[32]);
+ }
+ testcase( iTrunk==mxPage );
+ if( iTrunk>mxPage || nSearch++ > n ){
+ rc = SQLITE_CORRUPT_PGNO(pPrevTrunk ? pPrevTrunk->pgno : 1);
+ }else{
+ rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0);
+ }
+ if( rc ){
+ pTrunk = 0;
+ goto end_allocate_page;
+ }
+ assert( pTrunk!=0 );
+ assert( pTrunk->aData!=0 );
+ /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page
+ ** is the number of leaf page pointers to follow. */
+ k = get4byte(&pTrunk->aData[4]);
+ if( k==0 && !searchList ){
+ /* The trunk has no leaves and the list is not being searched.
+ ** So extract the trunk page itself and use it as the newly
+ ** allocated page */
+ assert( pPrevTrunk==0 );
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ *pPgno = iTrunk;
+ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4);
+ *ppPage = pTrunk;
+ pTrunk = 0;
+ TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1));
+ }else if( k>(u32)(pBt->usableSize/4 - 2) ){
+ /* Value of k is out of range. Database corruption */
+ rc = SQLITE_CORRUPT_PGNO(iTrunk);
+ goto end_allocate_page;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ }else if( searchList
+ && (nearby==iTrunk || (iTrunk<nearby && eMode==BTALLOC_LE))
+ ){
+ /* The list is being searched and this trunk page is the page
+ ** to allocate, regardless of whether it has leaves.
+ */
+ *pPgno = iTrunk;
+ *ppPage = pTrunk;
+ searchList = 0;
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ if( k==0 ){
+ if( !pPrevTrunk ){
+ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4);
+ }else{
+ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage);
+ if( rc!=SQLITE_OK ){
+ goto end_allocate_page;
+ }
+ memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4);
+ }
+ }else{
+ /* The trunk page is required by the caller but it contains
+ ** pointers to free-list leaves. The first leaf becomes a trunk
+ ** page in this case.
+ */
+ MemPage *pNewTrunk;
+ Pgno iNewTrunk = get4byte(&pTrunk->aData[8]);
+ if( iNewTrunk>mxPage ){
+ rc = SQLITE_CORRUPT_PGNO(iTrunk);
+ goto end_allocate_page;
+ }
+ testcase( iNewTrunk==mxPage );
+ rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0);
+ if( rc!=SQLITE_OK ){
+ goto end_allocate_page;
+ }
+ rc = sqlite3PagerWrite(pNewTrunk->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pNewTrunk);
+ goto end_allocate_page;
+ }
+ memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4);
+ put4byte(&pNewTrunk->aData[4], k-1);
+ memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4);
+ releasePage(pNewTrunk);
+ if( !pPrevTrunk ){
+ assert( sqlite3PagerIswriteable(pPage1->pDbPage) );
+ put4byte(&pPage1->aData[32], iNewTrunk);
+ }else{
+ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage);
+ if( rc ){
+ goto end_allocate_page;
+ }
+ put4byte(&pPrevTrunk->aData[0], iNewTrunk);
+ }
+ }
+ pTrunk = 0;
+ TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1));
+#endif
+ }else if( k>0 ){
+ /* Extract a leaf from the trunk */
+ u32 closest;
+ Pgno iPage;
+ unsigned char *aData = pTrunk->aData;
+ if( nearby>0 ){
+ u32 i;
+ closest = 0;
+ if( eMode==BTALLOC_LE ){
+ for(i=0; i<k; i++){
+ iPage = get4byte(&aData[8+i*4]);
+ if( iPage<=nearby ){
+ closest = i;
+ break;
+ }
+ }
+ }else{
+ int dist;
+ dist = sqlite3AbsInt32(get4byte(&aData[8]) - nearby);
+ for(i=1; i<k; i++){
+ int d2 = sqlite3AbsInt32(get4byte(&aData[8+i*4]) - nearby);
+ if( d2<dist ){
+ closest = i;
+ dist = d2;
+ }
+ }
+ }
+ }else{
+ closest = 0;
+ }
+
+ iPage = get4byte(&aData[8+closest*4]);
+ testcase( iPage==mxPage );
+ if( iPage>mxPage ){
+ rc = SQLITE_CORRUPT_PGNO(iTrunk);
+ goto end_allocate_page;
+ }
+ testcase( iPage==mxPage );
+ if( !searchList
+ || (iPage==nearby || (iPage<nearby && eMode==BTALLOC_LE))
+ ){
+ int noContent;
+ *pPgno = iPage;
+ TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d"
+ ": %d more free pages\n",
+ *pPgno, closest+1, k, pTrunk->pgno, n-1));
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc ) goto end_allocate_page;
+ if( closest<k-1 ){
+ memcpy(&aData[8+closest*4], &aData[4+k*4], 4);
+ }
+ put4byte(&aData[4], k-1);
+ noContent = !btreeGetHasContent(pBt, *pPgno)? PAGER_GET_NOCONTENT : 0;
+ rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, noContent);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite((*ppPage)->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ }
+ }
+ searchList = 0;
+ }
+ }
+ releasePage(pPrevTrunk);
+ pPrevTrunk = 0;
+ }while( searchList );
+ }else{
+ /* There are no pages on the freelist, so append a new page to the
+ ** database image.
+ **
+ ** Normally, new pages allocated by this block can be requested from the
+ ** pager layer with the 'no-content' flag set. This prevents the pager
+ ** from trying to read the pages content from disk. However, if the
+ ** current transaction has already run one or more incremental-vacuum
+ ** steps, then the page we are about to allocate may contain content
+ ** that is required in the event of a rollback. In this case, do
+ ** not set the no-content flag. This causes the pager to load and journal
+ ** the current page content before overwriting it.
+ **
+ ** Note that the pager will not actually attempt to load or journal
+ ** content for any page that really does lie past the end of the database
+ ** file on disk. So the effects of disabling the no-content optimization
+ ** here are confined to those pages that lie between the end of the
+ ** database image and the end of the database file.
+ */
+ int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0;
+
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc ) return rc;
+ pBt->nPage++;
+ if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++;
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){
+ /* If *pPgno refers to a pointer-map page, allocate two new pages
+ ** at the end of the file instead of one. The first allocated page
+ ** becomes a new pointer-map page, the second is used by the caller.
+ */
+ MemPage *pPg = 0;
+ TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage));
+ assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) );
+ rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pPg->pDbPage);
+ releasePage(pPg);
+ }
+ if( rc ) return rc;
+ pBt->nPage++;
+ if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; }
+ }
+#endif
+ put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage);
+ *pPgno = pBt->nPage;
+
+ assert( *pPgno!=PENDING_BYTE_PAGE(pBt) );
+ rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent);
+ if( rc ) return rc;
+ rc = sqlite3PagerWrite((*ppPage)->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ *ppPage = 0;
+ }
+ TRACE(("ALLOCATE: %d from end of file\n", *pPgno));
+ }
+
+ assert( CORRUPT_DB || *pPgno!=PENDING_BYTE_PAGE(pBt) );
+
+end_allocate_page:
+ releasePage(pTrunk);
+ releasePage(pPrevTrunk);
+ assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 );
+ assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 );
+ return rc;
+}
+
+/*
+** This function is used to add page iPage to the database file free-list.
+** It is assumed that the page is not already a part of the free-list.
+**
+** The value passed as the second argument to this function is optional.
+** If the caller happens to have a pointer to the MemPage object
+** corresponding to page iPage handy, it may pass it as the second value.
+** Otherwise, it may pass NULL.
+**
+** If a pointer to a MemPage object is passed as the second argument,
+** its reference count is not altered by this function.
+*/
+static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
+ MemPage *pTrunk = 0; /* Free-list trunk page */
+ Pgno iTrunk = 0; /* Page number of free-list trunk page */
+ MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */
+ MemPage *pPage; /* Page being freed. May be NULL. */
+ int rc; /* Return Code */
+ u32 nFree; /* Initial number of pages on free-list */
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( CORRUPT_DB || iPage>1 );
+ assert( !pMemPage || pMemPage->pgno==iPage );
+
+ if( iPage<2 || iPage>pBt->nPage ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( pMemPage ){
+ pPage = pMemPage;
+ sqlite3PagerRef(pPage->pDbPage);
+ }else{
+ pPage = btreePageLookup(pBt, iPage);
+ }
+
+ /* Increment the free page count on pPage1 */
+ rc = sqlite3PagerWrite(pPage1->pDbPage);
+ if( rc ) goto freepage_out;
+ nFree = get4byte(&pPage1->aData[36]);
+ put4byte(&pPage1->aData[36], nFree+1);
+
+ if( pBt->btsFlags & BTS_SECURE_DELETE ){
+ /* If the secure_delete option is enabled, then
+ ** always fully overwrite deleted information with zeros.
+ */
+ if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) )
+ || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0)
+ ){
+ goto freepage_out;
+ }
+ memset(pPage->aData, 0, pPage->pBt->pageSize);
+ }
+
+ /* If the database supports auto-vacuum, write an entry in the pointer-map
+ ** to indicate that the page is free.
+ */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc);
+ if( rc ) goto freepage_out;
+ }
+
+ /* Now manipulate the actual database free-list structure. There are two
+ ** possibilities. If the free-list is currently empty, or if the first
+ ** trunk page in the free-list is full, then this page will become a
+ ** new free-list trunk page. Otherwise, it will become a leaf of the
+ ** first trunk page in the current free-list. This block tests if it
+ ** is possible to add the page as a new free-list leaf.
+ */
+ if( nFree!=0 ){
+ u32 nLeaf; /* Initial number of leaf cells on trunk page */
+
+ iTrunk = get4byte(&pPage1->aData[32]);
+ if( iTrunk>btreePagecount(pBt) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto freepage_out;
+ }
+ rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0);
+ if( rc!=SQLITE_OK ){
+ goto freepage_out;
+ }
+
+ nLeaf = get4byte(&pTrunk->aData[4]);
+ assert( pBt->usableSize>32 );
+ if( nLeaf > (u32)pBt->usableSize/4 - 2 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto freepage_out;
+ }
+ if( nLeaf < (u32)pBt->usableSize/4 - 8 ){
+ /* In this case there is room on the trunk page to insert the page
+ ** being freed as a new leaf.
+ **
+ ** Note that the trunk page is not really full until it contains
+ ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have
+ ** coded. But due to a coding error in versions of SQLite prior to
+ ** 3.6.0, databases with freelist trunk pages holding more than
+ ** usableSize/4 - 8 entries will be reported as corrupt. In order
+ ** to maintain backwards compatibility with older versions of SQLite,
+ ** we will continue to restrict the number of entries to usableSize/4 - 8
+ ** for now. At some point in the future (once everyone has upgraded
+ ** to 3.6.0 or later) we should consider fixing the conditional above
+ ** to read "usableSize/4-2" instead of "usableSize/4-8".
+ **
+ ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still
+ ** avoid using the last six entries in the freelist trunk page array in
+ ** order that database files created by newer versions of SQLite can be
+ ** read by older versions of SQLite.
+ */
+ rc = sqlite3PagerWrite(pTrunk->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pTrunk->aData[4], nLeaf+1);
+ put4byte(&pTrunk->aData[8+nLeaf*4], iPage);
+ if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){
+ sqlite3PagerDontWrite(pPage->pDbPage);
+ }
+ rc = btreeSetHasContent(pBt, iPage);
+ }
+ TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno));
+ goto freepage_out;
+ }
+ }
+
+ /* If control flows to this point, then it was not possible to add the
+ ** the page being freed as a leaf page of the first trunk in the free-list.
+ ** Possibly because the free-list is empty, or possibly because the
+ ** first trunk in the free-list is full. Either way, the page being freed
+ ** will become the new first trunk page in the free-list.
+ */
+ if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){
+ goto freepage_out;
+ }
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ goto freepage_out;
+ }
+ put4byte(pPage->aData, iTrunk);
+ put4byte(&pPage->aData[4], 0);
+ put4byte(&pPage1->aData[32], iPage);
+ TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk));
+
+freepage_out:
+ if( pPage ){
+ pPage->isInit = 0;
+ }
+ releasePage(pPage);
+ releasePage(pTrunk);
+ return rc;
+}
+static void freePage(MemPage *pPage, int *pRC){
+ if( (*pRC)==SQLITE_OK ){
+ *pRC = freePage2(pPage->pBt, pPage, pPage->pgno);
+ }
+}
+
+/*
+** Free any overflow pages associated with the given Cell. Store
+** size information about the cell in pInfo.
+*/
+static int clearCell(
+ MemPage *pPage, /* The page that contains the Cell */
+ unsigned char *pCell, /* First byte of the Cell */
+ CellInfo *pInfo /* Size information about the cell */
+){
+ BtShared *pBt;
+ Pgno ovflPgno;
+ int rc;
+ int nOvfl;
+ u32 ovflPageSize;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ pPage->xParseCell(pPage, pCell, pInfo);
+ if( pInfo->nLocal==pInfo->nPayload ){
+ return SQLITE_OK; /* No overflow pages. Return without doing anything */
+ }
+ testcase( pCell + pInfo->nSize == pPage->aDataEnd );
+ testcase( pCell + (pInfo->nSize-1) == pPage->aDataEnd );
+ if( pCell + pInfo->nSize > pPage->aDataEnd ){
+ /* Cell extends past end of page */
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ ovflPgno = get4byte(pCell + pInfo->nSize - 4);
+ pBt = pPage->pBt;
+ assert( pBt->usableSize > 4 );
+ ovflPageSize = pBt->usableSize - 4;
+ nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize;
+ assert( nOvfl>0 ||
+ (CORRUPT_DB && (pInfo->nPayload + ovflPageSize)<ovflPageSize)
+ );
+ while( nOvfl-- ){
+ Pgno iNext = 0;
+ MemPage *pOvfl = 0;
+ if( ovflPgno<2 || ovflPgno>btreePagecount(pBt) ){
+ /* 0 is not a legal page number and page 1 cannot be an
+ ** overflow page. Therefore if ovflPgno<2 or past the end of the
+ ** file the database must be corrupt. */
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( nOvfl ){
+ rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext);
+ if( rc ) return rc;
+ }
+
+ if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) )
+ && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1
+ ){
+ /* There is no reason any cursor should have an outstanding reference
+ ** to an overflow page belonging to a cell that is being deleted/updated.
+ ** So if there exists more than one reference to this page, then it
+ ** must not really be an overflow page and the database must be corrupt.
+ ** It is helpful to detect this before calling freePage2(), as
+ ** freePage2() may zero the page contents if secure-delete mode is
+ ** enabled. If this 'overflow' page happens to be a page that the
+ ** caller is iterating through or using in some other way, this
+ ** can be problematic.
+ */
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = freePage2(pBt, pOvfl, ovflPgno);
+ }
+
+ if( pOvfl ){
+ sqlite3PagerUnref(pOvfl->pDbPage);
+ }
+ if( rc ) return rc;
+ ovflPgno = iNext;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create the byte sequence used to represent a cell on page pPage
+** and write that byte sequence into pCell[]. Overflow pages are
+** allocated and filled in as necessary. The calling procedure
+** is responsible for making sure sufficient space has been allocated
+** for pCell[].
+**
+** Note that pCell does not necessary need to point to the pPage->aData
+** area. pCell might point to some temporary storage. The cell will
+** be constructed in this temporary area then copied into pPage->aData
+** later.
+*/
+static int fillInCell(
+ MemPage *pPage, /* The page that contains the cell */
+ unsigned char *pCell, /* Complete text of the cell */
+ const BtreePayload *pX, /* Payload with which to construct the cell */
+ int *pnSize /* Write cell size here */
+){
+ int nPayload;
+ const u8 *pSrc;
+ int nSrc, n, rc, mn;
+ int spaceLeft;
+ MemPage *pToRelease;
+ unsigned char *pPrior;
+ unsigned char *pPayload;
+ BtShared *pBt;
+ Pgno pgnoOvfl;
+ int nHeader;
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+
+ /* pPage is not necessarily writeable since pCell might be auxiliary
+ ** buffer space that is separate from the pPage buffer area */
+ assert( pCell<pPage->aData || pCell>=&pPage->aData[pPage->pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ /* Fill in the header. */
+ nHeader = pPage->childPtrSize;
+ if( pPage->intKey ){
+ nPayload = pX->nData + pX->nZero;
+ pSrc = pX->pData;
+ nSrc = pX->nData;
+ assert( pPage->intKeyLeaf ); /* fillInCell() only called for leaves */
+ nHeader += putVarint32(&pCell[nHeader], nPayload);
+ nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey);
+ }else{
+ assert( pX->nKey<=0x7fffffff && pX->pKey!=0 );
+ nSrc = nPayload = (int)pX->nKey;
+ pSrc = pX->pKey;
+ nHeader += putVarint32(&pCell[nHeader], nPayload);
+ }
+
+ /* Fill in the payload */
+ pPayload = &pCell[nHeader];
+ if( nPayload<=pPage->maxLocal ){
+ /* This is the common case where everything fits on the btree page
+ ** and no overflow pages are required. */
+ n = nHeader + nPayload;
+ testcase( n==3 );
+ testcase( n==4 );
+ if( n<4 ) n = 4;
+ *pnSize = n;
+ assert( nSrc<=nPayload );
+ testcase( nSrc<nPayload );
+ memcpy(pPayload, pSrc, nSrc);
+ memset(pPayload+nSrc, 0, nPayload-nSrc);
+ return SQLITE_OK;
+ }
+
+ /* If we reach this point, it means that some of the content will need
+ ** to spill onto overflow pages.
+ */
+ mn = pPage->minLocal;
+ n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4);
+ testcase( n==pPage->maxLocal );
+ testcase( n==pPage->maxLocal+1 );
+ if( n > pPage->maxLocal ) n = mn;
+ spaceLeft = n;
+ *pnSize = n + nHeader + 4;
+ pPrior = &pCell[nHeader+n];
+ pToRelease = 0;
+ pgnoOvfl = 0;
+ pBt = pPage->pBt;
+
+ /* At this point variables should be set as follows:
+ **
+ ** nPayload Total payload size in bytes
+ ** pPayload Begin writing payload here
+ ** spaceLeft Space available at pPayload. If nPayload>spaceLeft,
+ ** that means content must spill into overflow pages.
+ ** *pnSize Size of the local cell (not counting overflow pages)
+ ** pPrior Where to write the pgno of the first overflow page
+ **
+ ** Use a call to btreeParseCellPtr() to verify that the values above
+ ** were computed correctly.
+ */
+#ifdef SQLITE_DEBUG
+ {
+ CellInfo info;
+ pPage->xParseCell(pPage, pCell, &info);
+ assert( nHeader==(int)(info.pPayload - pCell) );
+ assert( info.nKey==pX->nKey );
+ assert( *pnSize == info.nSize );
+ assert( spaceLeft == info.nLocal );
+ }
+#endif
+
+ /* Write the payload into the local Cell and any extra into overflow pages */
+ while( 1 ){
+ n = nPayload;
+ if( n>spaceLeft ) n = spaceLeft;
+
+ /* If pToRelease is not zero than pPayload points into the data area
+ ** of pToRelease. Make sure pToRelease is still writeable. */
+ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) );
+
+ /* If pPayload is part of the data area of pPage, then make sure pPage
+ ** is still writeable */
+ assert( pPayload<pPage->aData || pPayload>=&pPage->aData[pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ if( nSrc>=n ){
+ memcpy(pPayload, pSrc, n);
+ }else if( nSrc>0 ){
+ n = nSrc;
+ memcpy(pPayload, pSrc, n);
+ }else{
+ memset(pPayload, 0, n);
+ }
+ nPayload -= n;
+ if( nPayload<=0 ) break;
+ pPayload += n;
+ pSrc += n;
+ nSrc -= n;
+ spaceLeft -= n;
+ if( spaceLeft==0 ){
+ MemPage *pOvfl = 0;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */
+ if( pBt->autoVacuum ){
+ do{
+ pgnoOvfl++;
+ } while(
+ PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt)
+ );
+ }
+#endif
+ rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* If the database supports auto-vacuum, and the second or subsequent
+ ** overflow page is being allocated, add an entry to the pointer-map
+ ** for that page now.
+ **
+ ** If this is the first overflow page, then write a partial entry
+ ** to the pointer-map. If we write nothing to this pointer-map slot,
+ ** then the optimistic overflow chain processing in clearCell()
+ ** may misinterpret the uninitialized values and delete the
+ ** wrong pages from the database.
+ */
+ if( pBt->autoVacuum && rc==SQLITE_OK ){
+ u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1);
+ ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc);
+ if( rc ){
+ releasePage(pOvfl);
+ }
+ }
+#endif
+ if( rc ){
+ releasePage(pToRelease);
+ return rc;
+ }
+
+ /* If pToRelease is not zero than pPrior points into the data area
+ ** of pToRelease. Make sure pToRelease is still writeable. */
+ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) );
+
+ /* If pPrior is part of the data area of pPage, then make sure pPage
+ ** is still writeable */
+ assert( pPrior<pPage->aData || pPrior>=&pPage->aData[pBt->pageSize]
+ || sqlite3PagerIswriteable(pPage->pDbPage) );
+
+ put4byte(pPrior, pgnoOvfl);
+ releasePage(pToRelease);
+ pToRelease = pOvfl;
+ pPrior = pOvfl->aData;
+ put4byte(pPrior, 0);
+ pPayload = &pOvfl->aData[4];
+ spaceLeft = pBt->usableSize - 4;
+ }
+ }
+ releasePage(pToRelease);
+ return SQLITE_OK;
+}
+
+/*
+** Remove the i-th cell from pPage. This routine effects pPage only.
+** The cell content is not freed or deallocated. It is assumed that
+** the cell content has been copied someplace else. This routine just
+** removes the reference to the cell from pPage.
+**
+** "sz" must be the number of bytes in the cell.
+*/
+static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
+ u32 pc; /* Offset to cell content of cell being deleted */
+ u8 *data; /* pPage->aData */
+ u8 *ptr; /* Used to move bytes around within data[] */
+ int rc; /* The return code */
+ int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */
+
+ if( *pRC ) return;
+ assert( idx>=0 && idx<pPage->nCell );
+ assert( CORRUPT_DB || sz==cellSize(pPage, idx) );
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( pPage->nFree>=0 );
+ data = pPage->aData;
+ ptr = &pPage->aCellIdx[2*idx];
+ pc = get2byte(ptr);
+ hdr = pPage->hdrOffset;
+ testcase( pc==get2byte(&data[hdr+5]) );
+ testcase( pc+sz==pPage->pBt->usableSize );
+ if( pc+sz > pPage->pBt->usableSize ){
+ *pRC = SQLITE_CORRUPT_BKPT;
+ return;
+ }
+ rc = freeSpace(pPage, pc, sz);
+ if( rc ){
+ *pRC = rc;
+ return;
+ }
+ pPage->nCell--;
+ if( pPage->nCell==0 ){
+ memset(&data[hdr+1], 0, 4);
+ data[hdr+7] = 0;
+ put2byte(&data[hdr+5], pPage->pBt->usableSize);
+ pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset
+ - pPage->childPtrSize - 8;
+ }else{
+ memmove(ptr, ptr+2, 2*(pPage->nCell - idx));
+ put2byte(&data[hdr+3], pPage->nCell);
+ pPage->nFree += 2;
+ }
+}
+
+/*
+** Insert a new cell on pPage at cell index "i". pCell points to the
+** content of the cell.
+**
+** If the cell content will fit on the page, then put it there. If it
+** will not fit, then make a copy of the cell content into pTemp if
+** pTemp is not null. Regardless of pTemp, allocate a new entry
+** in pPage->apOvfl[] and make it point to the cell content (either
+** in pTemp or the original pCell) and also record its index.
+** Allocating a new entry in pPage->aCell[] implies that
+** pPage->nOverflow is incremented.
+**
+** *pRC must be SQLITE_OK when this routine is called.
+*/
+static void insertCell(
+ MemPage *pPage, /* Page into which we are copying */
+ int i, /* New cell becomes the i-th cell of the page */
+ u8 *pCell, /* Content of the new cell */
+ int sz, /* Bytes of content in pCell */
+ u8 *pTemp, /* Temp storage space for pCell, if needed */
+ Pgno iChild, /* If non-zero, replace first 4 bytes with this value */
+ int *pRC /* Read and write return code from here */
+){
+ int idx = 0; /* Where to write new cell content in data[] */
+ int j; /* Loop counter */
+ u8 *data; /* The content of the whole page */
+ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */
+
+ assert( *pRC==SQLITE_OK );
+ assert( i>=0 && i<=pPage->nCell+pPage->nOverflow );
+ assert( MX_CELL(pPage->pBt)<=10921 );
+ assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB );
+ assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) );
+ assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB );
+ assert( pPage->nFree>=0 );
+ if( pPage->nOverflow || sz+2>pPage->nFree ){
+ if( pTemp ){
+ memcpy(pTemp, pCell, sz);
+ pCell = pTemp;
+ }
+ if( iChild ){
+ put4byte(pCell, iChild);
+ }
+ j = pPage->nOverflow++;
+ /* Comparison against ArraySize-1 since we hold back one extra slot
+ ** as a contingency. In other words, never need more than 3 overflow
+ ** slots but 4 are allocated, just to be safe. */
+ assert( j < ArraySize(pPage->apOvfl)-1 );
+ pPage->apOvfl[j] = pCell;
+ pPage->aiOvfl[j] = (u16)i;
+
+ /* When multiple overflows occur, they are always sequential and in
+ ** sorted order. This invariants arise because multiple overflows can
+ ** only occur when inserting divider cells into the parent page during
+ ** balancing, and the dividers are adjacent and sorted.
+ */
+ assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */
+ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */
+ }else{
+ int rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
+ data = pPage->aData;
+ assert( &data[pPage->cellOffset]==pPage->aCellIdx );
+ rc = allocateSpace(pPage, sz, &idx);
+ if( rc ){ *pRC = rc; return; }
+ /* The allocateSpace() routine guarantees the following properties
+ ** if it returns successfully */
+ assert( idx >= 0 );
+ assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB );
+ assert( idx+sz <= (int)pPage->pBt->usableSize );
+ pPage->nFree -= (u16)(2 + sz);
+ if( iChild ){
+ /* In a corrupt database where an entry in the cell index section of
+ ** a btree page has a value of 3 or less, the pCell value might point
+ ** as many as 4 bytes in front of the start of the aData buffer for
+ ** the source page. Make sure this does not cause problems by not
+ ** reading the first 4 bytes */
+ memcpy(&data[idx+4], pCell+4, sz-4);
+ put4byte(&data[idx], iChild);
+ }else{
+ memcpy(&data[idx], pCell, sz);
+ }
+ pIns = pPage->aCellIdx + i*2;
+ memmove(pIns+2, pIns, 2*(pPage->nCell - i));
+ put2byte(pIns, idx);
+ pPage->nCell++;
+ /* increment the cell count */
+ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++;
+ assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB );
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pPage->pBt->autoVacuum ){
+ /* The cell may contain a pointer to an overflow page. If so, write
+ ** the entry for the overflow page into the pointer map.
+ */
+ ptrmapPutOvflPtr(pPage, pPage, pCell, pRC);
+ }
+#endif
+ }
+}
+
+/*
+** The following parameters determine how many adjacent pages get involved
+** in a balancing operation. NN is the number of neighbors on either side
+** of the page that participate in the balancing operation. NB is the
+** total number of pages that participate, including the target page and
+** NN neighbors on either side.
+**
+** The minimum value of NN is 1 (of course). Increasing NN above 1
+** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance
+** in exchange for a larger degradation in INSERT and UPDATE performance.
+** The value of NN appears to give the best results overall.
+**
+** (Later:) The description above makes it seem as if these values are
+** tunable - as if you could change them and recompile and it would all work.
+** But that is unlikely. NB has been 3 since the inception of SQLite and
+** we have never tested any other value.
+*/
+#define NN 1 /* Number of neighbors on either side of pPage */
+#define NB 3 /* (NN*2+1): Total pages involved in the balance */
+
+/*
+** A CellArray object contains a cache of pointers and sizes for a
+** consecutive sequence of cells that might be held on multiple pages.
+**
+** The cells in this array are the divider cell or cells from the pParent
+** page plus up to three child pages. There are a total of nCell cells.
+**
+** pRef is a pointer to one of the pages that contributes cells. This is
+** used to access information such as MemPage.intKey and MemPage.pBt->pageSize
+** which should be common to all pages that contribute cells to this array.
+**
+** apCell[] and szCell[] hold, respectively, pointers to the start of each
+** cell and the size of each cell. Some of the apCell[] pointers might refer
+** to overflow cells. In other words, some apCel[] pointers might not point
+** to content area of the pages.
+**
+** A szCell[] of zero means the size of that cell has not yet been computed.
+**
+** The cells come from as many as four different pages:
+**
+** -----------
+** | Parent |
+** -----------
+** / | \
+** / | \
+** --------- --------- ---------
+** |Child-1| |Child-2| |Child-3|
+** --------- --------- ---------
+**
+** The order of cells is in the array is for an index btree is:
+**
+** 1. All cells from Child-1 in order
+** 2. The first divider cell from Parent
+** 3. All cells from Child-2 in order
+** 4. The second divider cell from Parent
+** 5. All cells from Child-3 in order
+**
+** For a table-btree (with rowids) the items 2 and 4 are empty because
+** content exists only in leaves and there are no divider cells.
+**
+** For an index btree, the apEnd[] array holds pointer to the end of page
+** for Child-1, the Parent, Child-2, the Parent (again), and Child-3,
+** respectively. The ixNx[] array holds the number of cells contained in
+** each of these 5 stages, and all stages to the left. Hence:
+**
+** ixNx[0] = Number of cells in Child-1.
+** ixNx[1] = Number of cells in Child-1 plus 1 for first divider.
+** ixNx[2] = Number of cells in Child-1 and Child-2 + 1 for 1st divider.
+** ixNx[3] = Number of cells in Child-1 and Child-2 + both divider cells
+** ixNx[4] = Total number of cells.
+**
+** For a table-btree, the concept is similar, except only apEnd[0]..apEnd[2]
+** are used and they point to the leaf pages only, and the ixNx value are:
+**
+** ixNx[0] = Number of cells in Child-1.
+** ixNx[1] = Number of cells in Child-1 and Child-2.
+** ixNx[2] = Total number of cells.
+**
+** Sometimes when deleting, a child page can have zero cells. In those
+** cases, ixNx[] entries with higher indexes, and the corresponding apEnd[]
+** entries, shift down. The end result is that each ixNx[] entry should
+** be larger than the previous
+*/
+typedef struct CellArray CellArray;
+struct CellArray {
+ int nCell; /* Number of cells in apCell[] */
+ MemPage *pRef; /* Reference page */
+ u8 **apCell; /* All cells begin balanced */
+ u16 *szCell; /* Local size of all cells in apCell[] */
+ u8 *apEnd[NB*2]; /* MemPage.aDataEnd values */
+ int ixNx[NB*2]; /* Index of at which we move to the next apEnd[] */
+};
+
+/*
+** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been
+** computed.
+*/
+static void populateCellCache(CellArray *p, int idx, int N){
+ assert( idx>=0 && idx+N<=p->nCell );
+ while( N>0 ){
+ assert( p->apCell[idx]!=0 );
+ if( p->szCell[idx]==0 ){
+ p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]);
+ }else{
+ assert( CORRUPT_DB ||
+ p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) );
+ }
+ idx++;
+ N--;
+ }
+}
+
+/*
+** Return the size of the Nth element of the cell array
+*/
+static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){
+ assert( N>=0 && N<p->nCell );
+ assert( p->szCell[N]==0 );
+ p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]);
+ return p->szCell[N];
+}
+static u16 cachedCellSize(CellArray *p, int N){
+ assert( N>=0 && N<p->nCell );
+ if( p->szCell[N] ) return p->szCell[N];
+ return computeCellSize(p, N);
+}
+
+/*
+** Array apCell[] contains pointers to nCell b-tree page cells. The
+** szCell[] array contains the size in bytes of each cell. This function
+** replaces the current contents of page pPg with the contents of the cell
+** array.
+**
+** Some of the cells in apCell[] may currently be stored in pPg. This
+** function works around problems caused by this by making a copy of any
+** such cells before overwriting the page data.
+**
+** The MemPage.nFree field is invalidated by this function. It is the
+** responsibility of the caller to set it correctly.
+*/
+static int rebuildPage(
+ CellArray *pCArray, /* Content to be added to page pPg */
+ int iFirst, /* First cell in pCArray to use */
+ int nCell, /* Final number of cells on page */
+ MemPage *pPg /* The page to be reconstructed */
+){
+ const int hdr = pPg->hdrOffset; /* Offset of header on pPg */
+ u8 * const aData = pPg->aData; /* Pointer to data for pPg */
+ const int usableSize = pPg->pBt->usableSize;
+ u8 * const pEnd = &aData[usableSize];
+ int i = iFirst; /* Which cell to copy from pCArray*/
+ u32 j; /* Start of cell content area */
+ int iEnd = i+nCell; /* Loop terminator */
+ u8 *pCellptr = pPg->aCellIdx;
+ u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
+ u8 *pData;
+ int k; /* Current slot in pCArray->apEnd[] */
+ u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */
+
+ assert( i<iEnd );
+ j = get2byte(&aData[hdr+5]);
+ if( NEVER(j>(u32)usableSize) ){ j = 0; }
+ memcpy(&pTmp[j], &aData[j], usableSize - j);
+
+ for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ pSrcEnd = pCArray->apEnd[k];
+
+ pData = pEnd;
+ while( 1/*exit by break*/ ){
+ u8 *pCell = pCArray->apCell[i];
+ u16 sz = pCArray->szCell[i];
+ assert( sz>0 );
+ if( SQLITE_WITHIN(pCell,aData,pEnd) ){
+ if( ((uptr)(pCell+sz))>(uptr)pEnd ) return SQLITE_CORRUPT_BKPT;
+ pCell = &pTmp[pCell - aData];
+ }else if( (uptr)(pCell+sz)>(uptr)pSrcEnd
+ && (uptr)(pCell)<(uptr)pSrcEnd
+ ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ pData -= sz;
+ put2byte(pCellptr, (pData - aData));
+ pCellptr += 2;
+ if( pData < pCellptr ) return SQLITE_CORRUPT_BKPT;
+ memcpy(pData, pCell, sz);
+ assert( sz==pPg->xCellSize(pPg, pCell) || CORRUPT_DB );
+ testcase( sz!=pPg->xCellSize(pPg,pCell) )
+ i++;
+ if( i>=iEnd ) break;
+ if( pCArray->ixNx[k]<=i ){
+ k++;
+ pSrcEnd = pCArray->apEnd[k];
+ }
+ }
+
+ /* The pPg->nFree field is now set incorrectly. The caller will fix it. */
+ pPg->nCell = nCell;
+ pPg->nOverflow = 0;
+
+ put2byte(&aData[hdr+1], 0);
+ put2byte(&aData[hdr+3], pPg->nCell);
+ put2byte(&aData[hdr+5], pData - aData);
+ aData[hdr+7] = 0x00;
+ return SQLITE_OK;
+}
+
+/*
+** The pCArray objects contains pointers to b-tree cells and the cell sizes.
+** This function attempts to add the cells stored in the array to page pPg.
+** If it cannot (because the page needs to be defragmented before the cells
+** will fit), non-zero is returned. Otherwise, if the cells are added
+** successfully, zero is returned.
+**
+** Argument pCellptr points to the first entry in the cell-pointer array
+** (part of page pPg) to populate. After cell apCell[0] is written to the
+** page body, a 16-bit offset is written to pCellptr. And so on, for each
+** cell in the array. It is the responsibility of the caller to ensure
+** that it is safe to overwrite this part of the cell-pointer array.
+**
+** When this function is called, *ppData points to the start of the
+** content area on page pPg. If the size of the content area is extended,
+** *ppData is updated to point to the new start of the content area
+** before returning.
+**
+** Finally, argument pBegin points to the byte immediately following the
+** end of the space required by this page for the cell-pointer area (for
+** all cells - not just those inserted by the current call). If the content
+** area must be extended to before this point in order to accomodate all
+** cells in apCell[], then the cells do not fit and non-zero is returned.
+*/
+static int pageInsertArray(
+ MemPage *pPg, /* Page to add cells to */
+ u8 *pBegin, /* End of cell-pointer array */
+ u8 **ppData, /* IN/OUT: Page content-area pointer */
+ u8 *pCellptr, /* Pointer to cell-pointer area */
+ int iFirst, /* Index of first cell to add */
+ int nCell, /* Number of cells to add to pPg */
+ CellArray *pCArray /* Array of cells */
+){
+ int i = iFirst; /* Loop counter - cell index to insert */
+ u8 *aData = pPg->aData; /* Complete page */
+ u8 *pData = *ppData; /* Content area. A subset of aData[] */
+ int iEnd = iFirst + nCell; /* End of loop. One past last cell to ins */
+ int k; /* Current slot in pCArray->apEnd[] */
+ u8 *pEnd; /* Maximum extent of cell data */
+ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */
+ if( iEnd<=iFirst ) return 0;
+ for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ pEnd = pCArray->apEnd[k];
+ while( 1 /*Exit by break*/ ){
+ int sz, rc;
+ u8 *pSlot;
+ assert( pCArray->szCell[i]!=0 );
+ sz = pCArray->szCell[i];
+ if( (aData[1]==0 && aData[2]==0) || (pSlot = pageFindSlot(pPg,sz,&rc))==0 ){
+ if( (pData - pBegin)<sz ) return 1;
+ pData -= sz;
+ pSlot = pData;
+ }
+ /* pSlot and pCArray->apCell[i] will never overlap on a well-formed
+ ** database. But they might for a corrupt database. Hence use memmove()
+ ** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */
+ assert( (pSlot+sz)<=pCArray->apCell[i]
+ || pSlot>=(pCArray->apCell[i]+sz)
+ || CORRUPT_DB );
+ if( (uptr)(pCArray->apCell[i]+sz)>(uptr)pEnd
+ && (uptr)(pCArray->apCell[i])<(uptr)pEnd
+ ){
+ assert( CORRUPT_DB );
+ (void)SQLITE_CORRUPT_BKPT;
+ return 1;
+ }
+ memmove(pSlot, pCArray->apCell[i], sz);
+ put2byte(pCellptr, (pSlot - aData));
+ pCellptr += 2;
+ i++;
+ if( i>=iEnd ) break;
+ if( pCArray->ixNx[k]<=i ){
+ k++;
+ pEnd = pCArray->apEnd[k];
+ }
+ }
+ *ppData = pData;
+ return 0;
+}
+
+/*
+** The pCArray object contains pointers to b-tree cells and their sizes.
+**
+** This function adds the space associated with each cell in the array
+** that is currently stored within the body of pPg to the pPg free-list.
+** The cell-pointers and other fields of the page are not updated.
+**
+** This function returns the total number of cells added to the free-list.
+*/
+static int pageFreeArray(
+ MemPage *pPg, /* Page to edit */
+ int iFirst, /* First cell to delete */
+ int nCell, /* Cells to delete */
+ CellArray *pCArray /* Array of cells */
+){
+ u8 * const aData = pPg->aData;
+ u8 * const pEnd = &aData[pPg->pBt->usableSize];
+ u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize];
+ int nRet = 0;
+ int i;
+ int iEnd = iFirst + nCell;
+ u8 *pFree = 0;
+ int szFree = 0;
+
+ for(i=iFirst; i<iEnd; i++){
+ u8 *pCell = pCArray->apCell[i];
+ if( SQLITE_WITHIN(pCell, pStart, pEnd) ){
+ int sz;
+ /* No need to use cachedCellSize() here. The sizes of all cells that
+ ** are to be freed have already been computing while deciding which
+ ** cells need freeing */
+ sz = pCArray->szCell[i]; assert( sz>0 );
+ if( pFree!=(pCell + sz) ){
+ if( pFree ){
+ assert( pFree>aData && (pFree - aData)<65536 );
+ freeSpace(pPg, (u16)(pFree - aData), szFree);
+ }
+ pFree = pCell;
+ szFree = sz;
+ if( pFree+sz>pEnd ) return 0;
+ }else{
+ pFree = pCell;
+ szFree += sz;
+ }
+ nRet++;
+ }
+ }
+ if( pFree ){
+ assert( pFree>aData && (pFree - aData)<65536 );
+ freeSpace(pPg, (u16)(pFree - aData), szFree);
+ }
+ return nRet;
+}
+
+/*
+** pCArray contains pointers to and sizes of all cells in the page being
+** balanced. The current page, pPg, has pPg->nCell cells starting with
+** pCArray->apCell[iOld]. After balancing, this page should hold nNew cells
+** starting at apCell[iNew].
+**
+** This routine makes the necessary adjustments to pPg so that it contains
+** the correct cells after being balanced.
+**
+** The pPg->nFree field is invalid when this function returns. It is the
+** responsibility of the caller to set it correctly.
+*/
+static int editPage(
+ MemPage *pPg, /* Edit this page */
+ int iOld, /* Index of first cell currently on page */
+ int iNew, /* Index of new first cell on page */
+ int nNew, /* Final number of cells on page */
+ CellArray *pCArray /* Array of cells and sizes */
+){
+ u8 * const aData = pPg->aData;
+ const int hdr = pPg->hdrOffset;
+ u8 *pBegin = &pPg->aCellIdx[nNew * 2];
+ int nCell = pPg->nCell; /* Cells stored on pPg */
+ u8 *pData;
+ u8 *pCellptr;
+ int i;
+ int iOldEnd = iOld + pPg->nCell + pPg->nOverflow;
+ int iNewEnd = iNew + nNew;
+
+#ifdef SQLITE_DEBUG
+ u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
+ memcpy(pTmp, aData, pPg->pBt->usableSize);
+#endif
+
+ /* Remove cells from the start and end of the page */
+ assert( nCell>=0 );
+ if( iOld<iNew ){
+ int nShift = pageFreeArray(pPg, iOld, iNew-iOld, pCArray);
+ if( NEVER(nShift>nCell) ) return SQLITE_CORRUPT_BKPT;
+ memmove(pPg->aCellIdx, &pPg->aCellIdx[nShift*2], nCell*2);
+ nCell -= nShift;
+ }
+ if( iNewEnd < iOldEnd ){
+ int nTail = pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray);
+ assert( nCell>=nTail );
+ nCell -= nTail;
+ }
+
+ pData = &aData[get2byteNotZero(&aData[hdr+5])];
+ if( pData<pBegin ) goto editpage_fail;
+
+ /* Add cells to the start of the page */
+ if( iNew<iOld ){
+ int nAdd = MIN(nNew,iOld-iNew);
+ assert( (iOld-iNew)<nNew || nCell==0 || CORRUPT_DB );
+ assert( nAdd>=0 );
+ pCellptr = pPg->aCellIdx;
+ memmove(&pCellptr[nAdd*2], pCellptr, nCell*2);
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iNew, nAdd, pCArray
+ ) ) goto editpage_fail;
+ nCell += nAdd;
+ }
+
+ /* Add any overflow cells */
+ for(i=0; i<pPg->nOverflow; i++){
+ int iCell = (iOld + pPg->aiOvfl[i]) - iNew;
+ if( iCell>=0 && iCell<nNew ){
+ pCellptr = &pPg->aCellIdx[iCell * 2];
+ if( nCell>iCell ){
+ memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2);
+ }
+ nCell++;
+ cachedCellSize(pCArray, iCell+iNew);
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iCell+iNew, 1, pCArray
+ ) ) goto editpage_fail;
+ }
+ }
+
+ /* Append cells to the end of the page */
+ assert( nCell>=0 );
+ pCellptr = &pPg->aCellIdx[nCell*2];
+ if( pageInsertArray(
+ pPg, pBegin, &pData, pCellptr,
+ iNew+nCell, nNew-nCell, pCArray
+ ) ) goto editpage_fail;
+
+ pPg->nCell = nNew;
+ pPg->nOverflow = 0;
+
+ put2byte(&aData[hdr+3], pPg->nCell);
+ put2byte(&aData[hdr+5], pData - aData);
+
+#ifdef SQLITE_DEBUG
+ for(i=0; i<nNew && !CORRUPT_DB; i++){
+ u8 *pCell = pCArray->apCell[i+iNew];
+ int iOff = get2byteAligned(&pPg->aCellIdx[i*2]);
+ if( SQLITE_WITHIN(pCell, aData, &aData[pPg->pBt->usableSize]) ){
+ pCell = &pTmp[pCell - aData];
+ }
+ assert( 0==memcmp(pCell, &aData[iOff],
+ pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) );
+ }
+#endif
+
+ return SQLITE_OK;
+ editpage_fail:
+ /* Unable to edit this page. Rebuild it from scratch instead. */
+ populateCellCache(pCArray, iNew, nNew);
+ return rebuildPage(pCArray, iNew, nNew, pPg);
+}
+
+
+#ifndef SQLITE_OMIT_QUICKBALANCE
+/*
+** This version of balance() handles the common special case where
+** a new entry is being inserted on the extreme right-end of the
+** tree, in other words, when the new entry will become the largest
+** entry in the tree.
+**
+** Instead of trying to balance the 3 right-most leaf pages, just add
+** a new page to the right-hand side and put the one new entry in
+** that page. This leaves the right side of the tree somewhat
+** unbalanced. But odds are that we will be inserting new entries
+** at the end soon afterwards so the nearly empty page will quickly
+** fill up. On average.
+**
+** pPage is the leaf page which is the right-most page in the tree.
+** pParent is its parent. pPage must have a single overflow entry
+** which is also the right-most entry on the page.
+**
+** The pSpace buffer is used to store a temporary copy of the divider
+** cell that will be inserted into pParent. Such a cell consists of a 4
+** byte page number followed by a variable length integer. In other
+** words, at most 13 bytes. Hence the pSpace buffer must be at
+** least 13 bytes in size.
+*/
+static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
+ BtShared *const pBt = pPage->pBt; /* B-Tree Database */
+ MemPage *pNew; /* Newly allocated page */
+ int rc; /* Return Code */
+ Pgno pgnoNew; /* Page number of pNew */
+
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ assert( pPage->nOverflow==1 );
+
+ if( pPage->nCell==0 ) return SQLITE_CORRUPT_BKPT; /* dbfuzz001.test */
+ assert( pPage->nFree>=0 );
+ assert( pParent->nFree>=0 );
+
+ /* Allocate a new page. This page will become the right-sibling of
+ ** pPage. Make the parent page writable, so that the new divider cell
+ ** may be inserted. If both these operations are successful, proceed.
+ */
+ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0);
+
+ if( rc==SQLITE_OK ){
+
+ u8 *pOut = &pSpace[4];
+ u8 *pCell = pPage->apOvfl[0];
+ u16 szCell = pPage->xCellSize(pPage, pCell);
+ u8 *pStop;
+ CellArray b;
+
+ assert( sqlite3PagerIswriteable(pNew->pDbPage) );
+ assert( CORRUPT_DB || pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) );
+ zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF);
+ b.nCell = 1;
+ b.pRef = pPage;
+ b.apCell = &pCell;
+ b.szCell = &szCell;
+ b.apEnd[0] = pPage->aDataEnd;
+ b.ixNx[0] = 2;
+ rc = rebuildPage(&b, 0, 1, pNew);
+ if( NEVER(rc) ){
+ releasePage(pNew);
+ return rc;
+ }
+ pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell;
+
+ /* If this is an auto-vacuum database, update the pointer map
+ ** with entries for the new page, and any pointer from the
+ ** cell on the page to an overflow page. If either of these
+ ** operations fails, the return code is set, but the contents
+ ** of the parent page are still manipulated by thh code below.
+ ** That is Ok, at this point the parent page is guaranteed to
+ ** be marked as dirty. Returning an error code will cause a
+ ** rollback, undoing any changes made to the parent page.
+ */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc);
+ if( szCell>pNew->minLocal ){
+ ptrmapPutOvflPtr(pNew, pNew, pCell, &rc);
+ }
+ }
+
+ /* Create a divider cell to insert into pParent. The divider cell
+ ** consists of a 4-byte page number (the page number of pPage) and
+ ** a variable length key value (which must be the same value as the
+ ** largest key on pPage).
+ **
+ ** To find the largest key value on pPage, first find the right-most
+ ** cell on pPage. The first two fields of this cell are the
+ ** record-length (a variable length integer at most 32-bits in size)
+ ** and the key value (a variable length integer, may have any value).
+ ** The first of the while(...) loops below skips over the record-length
+ ** field. The second while(...) loop copies the key value from the
+ ** cell on pPage into the pSpace buffer.
+ */
+ pCell = findCell(pPage, pPage->nCell-1);
+ pStop = &pCell[9];
+ while( (*(pCell++)&0x80) && pCell<pStop );
+ pStop = &pCell[9];
+ while( ((*(pOut++) = *(pCell++))&0x80) && pCell<pStop );
+
+ /* Insert the new divider cell into pParent. */
+ if( rc==SQLITE_OK ){
+ insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace),
+ 0, pPage->pgno, &rc);
+ }
+
+ /* Set the right-child pointer of pParent to point to the new page. */
+ put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew);
+
+ /* Release the reference to the new page. */
+ releasePage(pNew);
+ }
+
+ return rc;
+}
+#endif /* SQLITE_OMIT_QUICKBALANCE */
+
+#if 0
+/*
+** This function does not contribute anything to the operation of SQLite.
+** it is sometimes activated temporarily while debugging code responsible
+** for setting pointer-map entries.
+*/
+static int ptrmapCheckPages(MemPage **apPage, int nPage){
+ int i, j;
+ for(i=0; i<nPage; i++){
+ Pgno n;
+ u8 e;
+ MemPage *pPage = apPage[i];
+ BtShared *pBt = pPage->pBt;
+ assert( pPage->isInit );
+
+ for(j=0; j<pPage->nCell; j++){
+ CellInfo info;
+ u8 *z;
+
+ z = findCell(pPage, j);
+ pPage->xParseCell(pPage, z, &info);
+ if( info.nLocal<info.nPayload ){
+ Pgno ovfl = get4byte(&z[info.nSize-4]);
+ ptrmapGet(pBt, ovfl, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 );
+ }
+ if( !pPage->leaf ){
+ Pgno child = get4byte(z);
+ ptrmapGet(pBt, child, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_BTREE );
+ }
+ }
+ if( !pPage->leaf ){
+ Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ ptrmapGet(pBt, child, &e, &n);
+ assert( n==pPage->pgno && e==PTRMAP_BTREE );
+ }
+ }
+ return 1;
+}
+#endif
+
+/*
+** This function is used to copy the contents of the b-tree node stored
+** on page pFrom to page pTo. If page pFrom was not a leaf page, then
+** the pointer-map entries for each child page are updated so that the
+** parent page stored in the pointer map is page pTo. If pFrom contained
+** any cells with overflow page pointers, then the corresponding pointer
+** map entries are also updated so that the parent page is page pTo.
+**
+** If pFrom is currently carrying any overflow cells (entries in the
+** MemPage.apOvfl[] array), they are not copied to pTo.
+**
+** Before returning, page pTo is reinitialized using btreeInitPage().
+**
+** The performance of this function is not critical. It is only used by
+** the balance_shallower() and balance_deeper() procedures, neither of
+** which are called often under normal circumstances.
+*/
+static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){
+ if( (*pRC)==SQLITE_OK ){
+ BtShared * const pBt = pFrom->pBt;
+ u8 * const aFrom = pFrom->aData;
+ u8 * const aTo = pTo->aData;
+ int const iFromHdr = pFrom->hdrOffset;
+ int const iToHdr = ((pTo->pgno==1) ? 100 : 0);
+ int rc;
+ int iData;
+
+
+ assert( pFrom->isInit );
+ assert( pFrom->nFree>=iToHdr );
+ assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize );
+
+ /* Copy the b-tree node content from page pFrom to page pTo. */
+ iData = get2byte(&aFrom[iFromHdr+5]);
+ memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData);
+ memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell);
+
+ /* Reinitialize page pTo so that the contents of the MemPage structure
+ ** match the new data. The initialization of pTo can actually fail under
+ ** fairly obscure circumstances, even though it is a copy of initialized
+ ** page pFrom.
+ */
+ pTo->isInit = 0;
+ rc = btreeInitPage(pTo);
+ if( rc==SQLITE_OK ) rc = btreeComputeFreeSpace(pTo);
+ if( rc!=SQLITE_OK ){
+ *pRC = rc;
+ return;
+ }
+
+ /* If this is an auto-vacuum database, update the pointer-map entries
+ ** for any b-tree or overflow pages that pTo now contains the pointers to.
+ */
+ if( ISAUTOVACUUM ){
+ *pRC = setChildPtrmaps(pTo);
+ }
+ }
+}
+
+/*
+** This routine redistributes cells on the iParentIdx'th child of pParent
+** (hereafter "the page") and up to 2 siblings so that all pages have about the
+** same amount of free space. Usually a single sibling on either side of the
+** page are used in the balancing, though both siblings might come from one
+** side if the page is the first or last child of its parent. If the page
+** has fewer than 2 siblings (something which can only happen if the page
+** is a root page or a child of a root page) then all available siblings
+** participate in the balancing.
+**
+** The number of siblings of the page might be increased or decreased by
+** one or two in an effort to keep pages nearly full but not over full.
+**
+** Note that when this routine is called, some of the cells on the page
+** might not actually be stored in MemPage.aData[]. This can happen
+** if the page is overfull. This routine ensures that all cells allocated
+** to the page and its siblings fit into MemPage.aData[] before returning.
+**
+** In the course of balancing the page and its siblings, cells may be
+** inserted into or removed from the parent page (pParent). Doing so
+** may cause the parent page to become overfull or underfull. If this
+** happens, it is the responsibility of the caller to invoke the correct
+** balancing routine to fix this problem (see the balance() routine).
+**
+** If this routine fails for any reason, it might leave the database
+** in a corrupted state. So if this routine fails, the database should
+** be rolled back.
+**
+** The third argument to this function, aOvflSpace, is a pointer to a
+** buffer big enough to hold one page. If while inserting cells into the parent
+** page (pParent) the parent page becomes overfull, this buffer is
+** used to store the parent's overflow cells. Because this function inserts
+** a maximum of four divider cells into the parent page, and the maximum
+** size of a cell stored within an internal node is always less than 1/4
+** of the page-size, the aOvflSpace[] buffer is guaranteed to be large
+** enough for all overflow cells.
+**
+** If aOvflSpace is set to a null pointer, this function returns
+** SQLITE_NOMEM.
+*/
+static int balance_nonroot(
+ MemPage *pParent, /* Parent page of siblings being balanced */
+ int iParentIdx, /* Index of "the page" in pParent */
+ u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */
+ int isRoot, /* True if pParent is a root-page */
+ int bBulk /* True if this call is part of a bulk load */
+){
+ BtShared *pBt; /* The whole database */
+ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */
+ int nNew = 0; /* Number of pages in apNew[] */
+ int nOld; /* Number of pages in apOld[] */
+ int i, j, k; /* Loop counters */
+ int nxDiv; /* Next divider slot in pParent->aCell[] */
+ int rc = SQLITE_OK; /* The return code */
+ u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */
+ int leafData; /* True if pPage is a leaf of a LEAFDATA tree */
+ int usableSpace; /* Bytes in pPage beyond the header */
+ int pageFlags; /* Value of pPage->aData[0] */
+ int iSpace1 = 0; /* First unused byte of aSpace1[] */
+ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */
+ int szScratch; /* Size of scratch memory requested */
+ MemPage *apOld[NB]; /* pPage and up to two siblings */
+ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */
+ u8 *pRight; /* Location in parent of right-sibling pointer */
+ u8 *apDiv[NB-1]; /* Divider cells in pParent */
+ int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */
+ int cntOld[NB+2]; /* Old index in b.apCell[] */
+ int szNew[NB+2]; /* Combined size of cells placed on i-th page */
+ u8 *aSpace1; /* Space for copies of dividers cells */
+ Pgno pgno; /* Temp var to store a page number in */
+ u8 abDone[NB+2]; /* True after i'th new page is populated */
+ Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */
+ Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */
+ u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */
+ CellArray b; /* Parsed information on cells being balanced */
+
+ memset(abDone, 0, sizeof(abDone));
+ b.nCell = 0;
+ b.apCell = 0;
+ pBt = pParent->pBt;
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+
+ /* At this point pParent may have at most one overflow cell. And if
+ ** this overflow cell is present, it must be the cell with
+ ** index iParentIdx. This scenario comes about when this function
+ ** is called (indirectly) from sqlite3BtreeDelete().
+ */
+ assert( pParent->nOverflow==0 || pParent->nOverflow==1 );
+ assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx );
+
+ if( !aOvflSpace ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ assert( pParent->nFree>=0 );
+
+ /* Find the sibling pages to balance. Also locate the cells in pParent
+ ** that divide the siblings. An attempt is made to find NN siblings on
+ ** either side of pPage. More siblings are taken from one side, however,
+ ** if there are fewer than NN siblings on the other side. If pParent
+ ** has NB or fewer children then all children of pParent are taken.
+ **
+ ** This loop also drops the divider cells from the parent page. This
+ ** way, the remainder of the function does not have to deal with any
+ ** overflow cells in the parent page, since if any existed they will
+ ** have already been removed.
+ */
+ i = pParent->nOverflow + pParent->nCell;
+ if( i<2 ){
+ nxDiv = 0;
+ }else{
+ assert( bBulk==0 || bBulk==1 );
+ if( iParentIdx==0 ){
+ nxDiv = 0;
+ }else if( iParentIdx==i ){
+ nxDiv = i-2+bBulk;
+ }else{
+ nxDiv = iParentIdx-1;
+ }
+ i = 2-bBulk;
+ }
+ nOld = i+1;
+ if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){
+ pRight = &pParent->aData[pParent->hdrOffset+8];
+ }else{
+ pRight = findCell(pParent, i+nxDiv-pParent->nOverflow);
+ }
+ pgno = get4byte(pRight);
+ while( 1 ){
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0);
+ if( rc ){
+ memset(apOld, 0, (i+1)*sizeof(MemPage*));
+ goto balance_cleanup;
+ }
+ if( apOld[i]->nFree<0 ){
+ rc = btreeComputeFreeSpace(apOld[i]);
+ if( rc ){
+ memset(apOld, 0, (i)*sizeof(MemPage*));
+ goto balance_cleanup;
+ }
+ }
+ if( (i--)==0 ) break;
+
+ if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){
+ apDiv[i] = pParent->apOvfl[0];
+ pgno = get4byte(apDiv[i]);
+ szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+ pParent->nOverflow = 0;
+ }else{
+ apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow);
+ pgno = get4byte(apDiv[i]);
+ szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+
+ /* Drop the cell from the parent page. apDiv[i] still points to
+ ** the cell within the parent, even though it has been dropped.
+ ** This is safe because dropping a cell only overwrites the first
+ ** four bytes of it, and this function does not need the first
+ ** four bytes of the divider cell. So the pointer is safe to use
+ ** later on.
+ **
+ ** But not if we are in secure-delete mode. In secure-delete mode,
+ ** the dropCell() routine will overwrite the entire cell with zeroes.
+ ** In this case, temporarily copy the cell into the aOvflSpace[]
+ ** buffer. It will be copied out again as soon as the aSpace[] buffer
+ ** is allocated. */
+ if( pBt->btsFlags & BTS_FAST_SECURE ){
+ int iOff;
+
+ iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData);
+ if( (iOff+szNew[i])>(int)pBt->usableSize ){
+ rc = SQLITE_CORRUPT_BKPT;
+ memset(apOld, 0, (i+1)*sizeof(MemPage*));
+ goto balance_cleanup;
+ }else{
+ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]);
+ apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData];
+ }
+ }
+ dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc);
+ }
+ }
+
+ /* Make nMaxCells a multiple of 4 in order to preserve 8-byte
+ ** alignment */
+ nMaxCells = nOld*(MX_CELL(pBt) + ArraySize(pParent->apOvfl));
+ nMaxCells = (nMaxCells + 3)&~3;
+
+ /*
+ ** Allocate space for memory structures
+ */
+ szScratch =
+ nMaxCells*sizeof(u8*) /* b.apCell */
+ + nMaxCells*sizeof(u16) /* b.szCell */
+ + pBt->pageSize; /* aSpace1 */
+
+ assert( szScratch<=7*(int)pBt->pageSize );
+ b.apCell = sqlite3StackAllocRaw(0, szScratch );
+ if( b.apCell==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto balance_cleanup;
+ }
+ b.szCell = (u16*)&b.apCell[nMaxCells];
+ aSpace1 = (u8*)&b.szCell[nMaxCells];
+ assert( EIGHT_BYTE_ALIGNMENT(aSpace1) );
+
+ /*
+ ** Load pointers to all cells on sibling pages and the divider cells
+ ** into the local b.apCell[] array. Make copies of the divider cells
+ ** into space obtained from aSpace1[]. The divider cells have already
+ ** been removed from pParent.
+ **
+ ** If the siblings are on leaf pages, then the child pointers of the
+ ** divider cells are stripped from the cells before they are copied
+ ** into aSpace1[]. In this way, all cells in b.apCell[] are without
+ ** child pointers. If siblings are not leaves, then all cell in
+ ** b.apCell[] include child pointers. Either way, all cells in b.apCell[]
+ ** are alike.
+ **
+ ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf.
+ ** leafData: 1 if pPage holds key+data and pParent holds only keys.
+ */
+ b.pRef = apOld[0];
+ leafCorrection = b.pRef->leaf*4;
+ leafData = b.pRef->intKeyLeaf;
+ for(i=0; i<nOld; i++){
+ MemPage *pOld = apOld[i];
+ int limit = pOld->nCell;
+ u8 *aData = pOld->aData;
+ u16 maskPage = pOld->maskPage;
+ u8 *piCell = aData + pOld->cellOffset;
+ u8 *piEnd;
+ VVA_ONLY( int nCellAtStart = b.nCell; )
+
+ /* Verify that all sibling pages are of the same "type" (table-leaf,
+ ** table-interior, index-leaf, or index-interior).
+ */
+ if( pOld->aData[0]!=apOld[0]->aData[0] ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+
+ /* Load b.apCell[] with pointers to all cells in pOld. If pOld
+ ** contains overflow cells, include them in the b.apCell[] array
+ ** in the correct spot.
+ **
+ ** Note that when there are multiple overflow cells, it is always the
+ ** case that they are sequential and adjacent. This invariant arises
+ ** because multiple overflows can only occurs when inserting divider
+ ** cells into a parent on a prior balance, and divider cells are always
+ ** adjacent and are inserted in order. There is an assert() tagged
+ ** with "NOTE 1" in the overflow cell insertion loop to prove this
+ ** invariant.
+ **
+ ** This must be done in advance. Once the balance starts, the cell
+ ** offset section of the btree page will be overwritten and we will no
+ ** long be able to find the cells if a pointer to each cell is not saved
+ ** first.
+ */
+ memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow));
+ if( pOld->nOverflow>0 ){
+ if( NEVER(limit<pOld->aiOvfl[0]) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ limit = pOld->aiOvfl[0];
+ for(j=0; j<limit; j++){
+ b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
+ piCell += 2;
+ b.nCell++;
+ }
+ for(k=0; k<pOld->nOverflow; k++){
+ assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */
+ b.apCell[b.nCell] = pOld->apOvfl[k];
+ b.nCell++;
+ }
+ }
+ piEnd = aData + pOld->cellOffset + 2*pOld->nCell;
+ while( piCell<piEnd ){
+ assert( b.nCell<nMaxCells );
+ b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
+ piCell += 2;
+ b.nCell++;
+ }
+ assert( (b.nCell-nCellAtStart)==(pOld->nCell+pOld->nOverflow) );
+
+ cntOld[i] = b.nCell;
+ if( i<nOld-1 && !leafData){
+ u16 sz = (u16)szNew[i];
+ u8 *pTemp;
+ assert( b.nCell<nMaxCells );
+ b.szCell[b.nCell] = sz;
+ pTemp = &aSpace1[iSpace1];
+ iSpace1 += sz;
+ assert( sz<=pBt->maxLocal+23 );
+ assert( iSpace1 <= (int)pBt->pageSize );
+ memcpy(pTemp, apDiv[i], sz);
+ b.apCell[b.nCell] = pTemp+leafCorrection;
+ assert( leafCorrection==0 || leafCorrection==4 );
+ b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection;
+ if( !pOld->leaf ){
+ assert( leafCorrection==0 );
+ assert( pOld->hdrOffset==0 );
+ /* The right pointer of the child page pOld becomes the left
+ ** pointer of the divider cell */
+ memcpy(b.apCell[b.nCell], &pOld->aData[8], 4);
+ }else{
+ assert( leafCorrection==4 );
+ while( b.szCell[b.nCell]<4 ){
+ /* Do not allow any cells smaller than 4 bytes. If a smaller cell
+ ** does exist, pad it with 0x00 bytes. */
+ assert( b.szCell[b.nCell]==3 || CORRUPT_DB );
+ assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB );
+ aSpace1[iSpace1++] = 0x00;
+ b.szCell[b.nCell]++;
+ }
+ }
+ b.nCell++;
+ }
+ }
+
+ /*
+ ** Figure out the number of pages needed to hold all b.nCell cells.
+ ** Store this number in "k". Also compute szNew[] which is the total
+ ** size of all cells on the i-th page and cntNew[] which is the index
+ ** in b.apCell[] of the cell that divides page i from page i+1.
+ ** cntNew[k] should equal b.nCell.
+ **
+ ** Values computed by this block:
+ **
+ ** k: The total number of sibling pages
+ ** szNew[i]: Spaced used on the i-th sibling page.
+ ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to
+ ** the right of the i-th sibling page.
+ ** usableSpace: Number of bytes of space available on each sibling.
+ **
+ */
+ usableSpace = pBt->usableSize - 12 + leafCorrection;
+ for(i=k=0; i<nOld; i++, k++){
+ MemPage *p = apOld[i];
+ b.apEnd[k] = p->aDataEnd;
+ b.ixNx[k] = cntOld[i];
+ if( k && b.ixNx[k]==b.ixNx[k-1] ){
+ k--; /* Omit b.ixNx[] entry for child pages with no cells */
+ }
+ if( !leafData ){
+ k++;
+ b.apEnd[k] = pParent->aDataEnd;
+ b.ixNx[k] = cntOld[i]+1;
+ }
+ assert( p->nFree>=0 );
+ szNew[i] = usableSpace - p->nFree;
+ for(j=0; j<p->nOverflow; j++){
+ szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]);
+ }
+ cntNew[i] = cntOld[i];
+ }
+ k = nOld;
+ for(i=0; i<k; i++){
+ int sz;
+ while( szNew[i]>usableSpace ){
+ if( i+1>=k ){
+ k = i+2;
+ if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
+ szNew[k-1] = 0;
+ cntNew[k-1] = b.nCell;
+ }
+ sz = 2 + cachedCellSize(&b, cntNew[i]-1);
+ szNew[i] -= sz;
+ if( !leafData ){
+ if( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ }else{
+ sz = 0;
+ }
+ }
+ szNew[i+1] += sz;
+ cntNew[i]--;
+ }
+ while( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ if( szNew[i]+sz>usableSpace ) break;
+ szNew[i] += sz;
+ cntNew[i]++;
+ if( !leafData ){
+ if( cntNew[i]<b.nCell ){
+ sz = 2 + cachedCellSize(&b, cntNew[i]);
+ }else{
+ sz = 0;
+ }
+ }
+ szNew[i+1] -= sz;
+ }
+ if( cntNew[i]>=b.nCell ){
+ k = i+1;
+ }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+
+ /*
+ ** The packing computed by the previous block is biased toward the siblings
+ ** on the left side (siblings with smaller keys). The left siblings are
+ ** always nearly full, while the right-most sibling might be nearly empty.
+ ** The next block of code attempts to adjust the packing of siblings to
+ ** get a better balance.
+ **
+ ** This adjustment is more than an optimization. The packing above might
+ ** be so out of balance as to be illegal. For example, the right-most
+ ** sibling might be completely empty. This adjustment is not optional.
+ */
+ for(i=k-1; i>0; i--){
+ int szRight = szNew[i]; /* Size of sibling on the right */
+ int szLeft = szNew[i-1]; /* Size of sibling on the left */
+ int r; /* Index of right-most cell in left sibling */
+ int d; /* Index of first cell to the left of right sibling */
+
+ r = cntNew[i-1] - 1;
+ d = r + 1 - leafData;
+ (void)cachedCellSize(&b, d);
+ do{
+ assert( d<nMaxCells );
+ assert( r<nMaxCells );
+ (void)cachedCellSize(&b, r);
+ if( szRight!=0
+ && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){
+ break;
+ }
+ szRight += b.szCell[d] + 2;
+ szLeft -= b.szCell[r] + 2;
+ cntNew[i-1] = r;
+ r--;
+ d--;
+ }while( r>=0 );
+ szNew[i] = szRight;
+ szNew[i-1] = szLeft;
+ if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+
+ /* Sanity check: For a non-corrupt database file one of the follwing
+ ** must be true:
+ ** (1) We found one or more cells (cntNew[0])>0), or
+ ** (2) pPage is a virtual root page. A virtual root page is when
+ ** the real root page is page 1 and we are the only child of
+ ** that page.
+ */
+ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB);
+ TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n",
+ apOld[0]->pgno, apOld[0]->nCell,
+ nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0,
+ nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0
+ ));
+
+ /*
+ ** Allocate k new pages. Reuse old pages where possible.
+ */
+ pageFlags = apOld[0]->aData[0];
+ for(i=0; i<k; i++){
+ MemPage *pNew;
+ if( i<nOld ){
+ pNew = apNew[i] = apOld[i];
+ apOld[i] = 0;
+ rc = sqlite3PagerWrite(pNew->pDbPage);
+ nNew++;
+ if( rc ) goto balance_cleanup;
+ }else{
+ assert( i>0 );
+ rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0);
+ if( rc ) goto balance_cleanup;
+ zeroPage(pNew, pageFlags);
+ apNew[i] = pNew;
+ nNew++;
+ cntOld[i] = b.nCell;
+
+ /* Set the pointer-map entry for the new sibling page. */
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc);
+ if( rc!=SQLITE_OK ){
+ goto balance_cleanup;
+ }
+ }
+ }
+ }
+
+ /*
+ ** Reassign page numbers so that the new pages are in ascending order.
+ ** This helps to keep entries in the disk file in order so that a scan
+ ** of the table is closer to a linear scan through the file. That in turn
+ ** helps the operating system to deliver pages from the disk more rapidly.
+ **
+ ** An O(n^2) insertion sort algorithm is used, but since n is never more
+ ** than (NB+2) (a small constant), that should not be a problem.
+ **
+ ** When NB==3, this one optimization makes the database about 25% faster
+ ** for large insertions and deletions.
+ */
+ for(i=0; i<nNew; i++){
+ aPgOrder[i] = aPgno[i] = apNew[i]->pgno;
+ aPgFlags[i] = apNew[i]->pDbPage->flags;
+ for(j=0; j<i; j++){
+ if( aPgno[j]==aPgno[i] ){
+ /* This branch is taken if the set of sibling pages somehow contains
+ ** duplicate entries. This can happen if the database is corrupt.
+ ** It would be simpler to detect this as part of the loop below, but
+ ** we do the detection here in order to avoid populating the pager
+ ** cache with two separate objects associated with the same
+ ** page number. */
+ assert( CORRUPT_DB );
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
+ }
+ }
+ for(i=0; i<nNew; i++){
+ int iBest = 0; /* aPgno[] index of page number to use */
+ for(j=1; j<nNew; j++){
+ if( aPgOrder[j]<aPgOrder[iBest] ) iBest = j;
+ }
+ pgno = aPgOrder[iBest];
+ aPgOrder[iBest] = 0xffffffff;
+ if( iBest!=i ){
+ if( iBest>i ){
+ sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0);
+ }
+ sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]);
+ apNew[i]->pgno = pgno;
+ }
+ }
+
+ TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) "
+ "%d(%d nc=%d) %d(%d nc=%d)\n",
+ apNew[0]->pgno, szNew[0], cntNew[0],
+ nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0,
+ nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0,
+ nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0,
+ nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0,
+ nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0,
+ nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0,
+ nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0,
+ nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0
+ ));
+
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ assert( nNew>=1 && nNew<=ArraySize(apNew) );
+ assert( apNew[nNew-1]!=0 );
+ put4byte(pRight, apNew[nNew-1]->pgno);
+
+ /* If the sibling pages are not leaves, ensure that the right-child pointer
+ ** of the right-most new sibling page is set to the value that was
+ ** originally in the same field of the right-most old sibling page. */
+ if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){
+ MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1];
+ memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4);
+ }
+
+ /* Make any required updates to pointer map entries associated with
+ ** cells stored on sibling pages following the balance operation. Pointer
+ ** map entries associated with divider cells are set by the insertCell()
+ ** routine. The associated pointer map entries are:
+ **
+ ** a) if the cell contains a reference to an overflow chain, the
+ ** entry associated with the first page in the overflow chain, and
+ **
+ ** b) if the sibling pages are not leaves, the child page associated
+ ** with the cell.
+ **
+ ** If the sibling pages are not leaves, then the pointer map entry
+ ** associated with the right-child of each sibling may also need to be
+ ** updated. This happens below, after the sibling pages have been
+ ** populated, not here.
+ */
+ if( ISAUTOVACUUM ){
+ MemPage *pOld;
+ MemPage *pNew = pOld = apNew[0];
+ int cntOldNext = pNew->nCell + pNew->nOverflow;
+ int iNew = 0;
+ int iOld = 0;
+
+ for(i=0; i<b.nCell; i++){
+ u8 *pCell = b.apCell[i];
+ while( i==cntOldNext ){
+ iOld++;
+ assert( iOld<nNew || iOld<nOld );
+ assert( iOld>=0 && iOld<NB );
+ pOld = iOld<nNew ? apNew[iOld] : apOld[iOld];
+ cntOldNext += pOld->nCell + pOld->nOverflow + !leafData;
+ }
+ if( i==cntNew[iNew] ){
+ pNew = apNew[++iNew];
+ if( !leafData ) continue;
+ }
+
+ /* Cell pCell is destined for new sibling page pNew. Originally, it
+ ** was either part of sibling page iOld (possibly an overflow cell),
+ ** or else the divider cell to the left of sibling page iOld. So,
+ ** if sibling page iOld had the same page number as pNew, and if
+ ** pCell really was a part of sibling page iOld (not a divider or
+ ** overflow cell), we can skip updating the pointer map entries. */
+ if( iOld>=nNew
+ || pNew->pgno!=aPgno[iOld]
+ || !SQLITE_WITHIN(pCell,pOld->aData,pOld->aDataEnd)
+ ){
+ if( !leafCorrection ){
+ ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc);
+ }
+ if( cachedCellSize(&b,i)>pNew->minLocal ){
+ ptrmapPutOvflPtr(pNew, pOld, pCell, &rc);
+ }
+ if( rc ) goto balance_cleanup;
+ }
+ }
+ }
+
+ /* Insert new divider cells into pParent. */
+ for(i=0; i<nNew-1; i++){
+ u8 *pCell;
+ u8 *pTemp;
+ int sz;
+ MemPage *pNew = apNew[i];
+ j = cntNew[i];
+
+ assert( j<nMaxCells );
+ assert( b.apCell[j]!=0 );
+ pCell = b.apCell[j];
+ sz = b.szCell[j] + leafCorrection;
+ pTemp = &aOvflSpace[iOvflSpace];
+ if( !pNew->leaf ){
+ memcpy(&pNew->aData[8], pCell, 4);
+ }else if( leafData ){
+ /* If the tree is a leaf-data tree, and the siblings are leaves,
+ ** then there is no divider cell in b.apCell[]. Instead, the divider
+ ** cell consists of the integer key for the right-most cell of
+ ** the sibling-page assembled above only.
+ */
+ CellInfo info;
+ j--;
+ pNew->xParseCell(pNew, b.apCell[j], &info);
+ pCell = pTemp;
+ sz = 4 + putVarint(&pCell[4], info.nKey);
+ pTemp = 0;
+ }else{
+ pCell -= 4;
+ /* Obscure case for non-leaf-data trees: If the cell at pCell was
+ ** previously stored on a leaf node, and its reported size was 4
+ ** bytes, then it may actually be smaller than this
+ ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
+ ** any cell). But it is important to pass the correct size to
+ ** insertCell(), so reparse the cell now.
+ **
+ ** This can only happen for b-trees used to evaluate "IN (SELECT ...)"
+ ** and WITHOUT ROWID tables with exactly one column which is the
+ ** primary key.
+ */
+ if( b.szCell[j]==4 ){
+ assert(leafCorrection==4);
+ sz = pParent->xCellSize(pParent, pCell);
+ }
+ }
+ iOvflSpace += sz;
+ assert( sz<=pBt->maxLocal+23 );
+ assert( iOvflSpace <= (int)pBt->pageSize );
+ insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc);
+ if( rc!=SQLITE_OK ) goto balance_cleanup;
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
+ }
+
+ /* Now update the actual sibling pages. The order in which they are updated
+ ** is important, as this code needs to avoid disrupting any page from which
+ ** cells may still to be read. In practice, this means:
+ **
+ ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1])
+ ** then it is not safe to update page apNew[iPg] until after
+ ** the left-hand sibling apNew[iPg-1] has been updated.
+ **
+ ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1])
+ ** then it is not safe to update page apNew[iPg] until after
+ ** the right-hand sibling apNew[iPg+1] has been updated.
+ **
+ ** If neither of the above apply, the page is safe to update.
+ **
+ ** The iPg value in the following loop starts at nNew-1 goes down
+ ** to 0, then back up to nNew-1 again, thus making two passes over
+ ** the pages. On the initial downward pass, only condition (1) above
+ ** needs to be tested because (2) will always be true from the previous
+ ** step. On the upward pass, both conditions are always true, so the
+ ** upwards pass simply processes pages that were missed on the downward
+ ** pass.
+ */
+ for(i=1-nNew; i<nNew; i++){
+ int iPg = i<0 ? -i : i;
+ assert( iPg>=0 && iPg<nNew );
+ if( abDone[iPg] ) continue; /* Skip pages already processed */
+ if( i>=0 /* On the upwards pass, or... */
+ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */
+ ){
+ int iNew;
+ int iOld;
+ int nNewCell;
+
+ /* Verify condition (1): If cells are moving left, update iPg
+ ** only after iPg-1 has already been updated. */
+ assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] );
+
+ /* Verify condition (2): If cells are moving right, update iPg
+ ** only after iPg+1 has already been updated. */
+ assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] );
+
+ if( iPg==0 ){
+ iNew = iOld = 0;
+ nNewCell = cntNew[0];
+ }else{
+ iOld = iPg<nOld ? (cntOld[iPg-1] + !leafData) : b.nCell;
+ iNew = cntNew[iPg-1] + !leafData;
+ nNewCell = cntNew[iPg] - iNew;
+ }
+
+ rc = editPage(apNew[iPg], iOld, iNew, nNewCell, &b);
+ if( rc ) goto balance_cleanup;
+ abDone[iPg]++;
+ apNew[iPg]->nFree = usableSpace-szNew[iPg];
+ assert( apNew[iPg]->nOverflow==0 );
+ assert( apNew[iPg]->nCell==nNewCell );
+ }
+ }
+
+ /* All pages have been processed exactly once */
+ assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 );
+
+ assert( nOld>0 );
+ assert( nNew>0 );
+
+ if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){
+ /* The root page of the b-tree now contains no cells. The only sibling
+ ** page is the right-child of the parent. Copy the contents of the
+ ** child page into the parent, decreasing the overall height of the
+ ** b-tree structure by one. This is described as the "balance-shallower"
+ ** sub-algorithm in some documentation.
+ **
+ ** If this is an auto-vacuum database, the call to copyNodeContent()
+ ** sets all pointer-map entries corresponding to database image pages
+ ** for which the pointer is stored within the content being copied.
+ **
+ ** It is critical that the child page be defragmented before being
+ ** copied into the parent, because if the parent is page 1 then it will
+ ** by smaller than the child due to the database header, and so all the
+ ** free space needs to be up front.
+ */
+ assert( nNew==1 || CORRUPT_DB );
+ rc = defragmentPage(apNew[0], -1);
+ testcase( rc!=SQLITE_OK );
+ assert( apNew[0]->nFree ==
+ (get2byteNotZero(&apNew[0]->aData[5]) - apNew[0]->cellOffset
+ - apNew[0]->nCell*2)
+ || rc!=SQLITE_OK
+ );
+ copyNodeContent(apNew[0], pParent, &rc);
+ freePage(apNew[0], &rc);
+ }else if( ISAUTOVACUUM && !leafCorrection ){
+ /* Fix the pointer map entries associated with the right-child of each
+ ** sibling page. All other pointer map entries have already been taken
+ ** care of. */
+ for(i=0; i<nNew; i++){
+ u32 key = get4byte(&apNew[i]->aData[8]);
+ ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc);
+ }
+ }
+
+ assert( pParent->isInit );
+ TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n",
+ nOld, nNew, b.nCell));
+
+ /* Free any old pages that were not reused as new pages.
+ */
+ for(i=nNew; i<nOld; i++){
+ freePage(apOld[i], &rc);
+ }
+
+#if 0
+ if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){
+ /* The ptrmapCheckPages() contains assert() statements that verify that
+ ** all pointer map pages are set correctly. This is helpful while
+ ** debugging. This is usually disabled because a corrupt database may
+ ** cause an assert() statement to fail. */
+ ptrmapCheckPages(apNew, nNew);
+ ptrmapCheckPages(&pParent, 1);
+ }
+#endif
+
+ /*
+ ** Cleanup before returning.
+ */
+balance_cleanup:
+ sqlite3StackFree(0, b.apCell);
+ for(i=0; i<nOld; i++){
+ releasePage(apOld[i]);
+ }
+ for(i=0; i<nNew; i++){
+ releasePage(apNew[i]);
+ }
+
+ return rc;
+}
+
+
+/*
+** This function is called when the root page of a b-tree structure is
+** overfull (has one or more overflow pages).
+**
+** A new child page is allocated and the contents of the current root
+** page, including overflow cells, are copied into the child. The root
+** page is then overwritten to make it an empty page with the right-child
+** pointer pointing to the new page.
+**
+** Before returning, all pointer-map entries corresponding to pages
+** that the new child-page now contains pointers to are updated. The
+** entry corresponding to the new right-child pointer of the root
+** page is also updated.
+**
+** If successful, *ppChild is set to contain a reference to the child
+** page and SQLITE_OK is returned. In this case the caller is required
+** to call releasePage() on *ppChild exactly once. If an error occurs,
+** an error code is returned and *ppChild is set to 0.
+*/
+static int balance_deeper(MemPage *pRoot, MemPage **ppChild){
+ int rc; /* Return value from subprocedures */
+ MemPage *pChild = 0; /* Pointer to a new child page */
+ Pgno pgnoChild = 0; /* Page number of the new child page */
+ BtShared *pBt = pRoot->pBt; /* The BTree */
+
+ assert( pRoot->nOverflow>0 );
+ assert( sqlite3_mutex_held(pBt->mutex) );
+
+ /* Make pRoot, the root page of the b-tree, writable. Allocate a new
+ ** page that will become the new right-child of pPage. Copy the contents
+ ** of the node stored on pRoot into the new child page.
+ */
+ rc = sqlite3PagerWrite(pRoot->pDbPage);
+ if( rc==SQLITE_OK ){
+ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0);
+ copyNodeContent(pRoot, pChild, &rc);
+ if( ISAUTOVACUUM ){
+ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc);
+ }
+ }
+ if( rc ){
+ *ppChild = 0;
+ releasePage(pChild);
+ return rc;
+ }
+ assert( sqlite3PagerIswriteable(pChild->pDbPage) );
+ assert( sqlite3PagerIswriteable(pRoot->pDbPage) );
+ assert( pChild->nCell==pRoot->nCell || CORRUPT_DB );
+
+ TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno));
+
+ /* Copy the overflow cells from pRoot to pChild */
+ memcpy(pChild->aiOvfl, pRoot->aiOvfl,
+ pRoot->nOverflow*sizeof(pRoot->aiOvfl[0]));
+ memcpy(pChild->apOvfl, pRoot->apOvfl,
+ pRoot->nOverflow*sizeof(pRoot->apOvfl[0]));
+ pChild->nOverflow = pRoot->nOverflow;
+
+ /* Zero the contents of pRoot. Then install pChild as the right-child. */
+ zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF);
+ put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild);
+
+ *ppChild = pChild;
+ return SQLITE_OK;
+}
+
+/*
+** Return SQLITE_CORRUPT if any cursor other than pCur is currently valid
+** on the same B-tree as pCur.
+**
+** This can if a database is corrupt with two or more SQL tables
+** pointing to the same b-tree. If an insert occurs on one SQL table
+** and causes a BEFORE TRIGGER to do a secondary insert on the other SQL
+** table linked to the same b-tree. If the secondary insert causes a
+** rebalance, that can change content out from under the cursor on the
+** first SQL table, violating invariants on the first insert.
+*/
+static int anotherValidCursor(BtCursor *pCur){
+ BtCursor *pOther;
+ for(pOther=pCur->pBt->pCursor; pOther; pOther=pOther->pNext){
+ if( pOther!=pCur
+ && pOther->eState==CURSOR_VALID
+ && pOther->pPage==pCur->pPage
+ ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The page that pCur currently points to has just been modified in
+** some way. This function figures out if this modification means the
+** tree needs to be balanced, and if so calls the appropriate balancing
+** routine. Balancing routines are:
+**
+** balance_quick()
+** balance_deeper()
+** balance_nonroot()
+*/
+static int balance(BtCursor *pCur){
+ int rc = SQLITE_OK;
+ const int nMin = pCur->pBt->usableSize * 2 / 3;
+ u8 aBalanceQuickSpace[13];
+ u8 *pFree = 0;
+
+ VVA_ONLY( int balance_quick_called = 0 );
+ VVA_ONLY( int balance_deeper_called = 0 );
+
+ do {
+ int iPage;
+ MemPage *pPage = pCur->pPage;
+
+ if( NEVER(pPage->nFree<0) && btreeComputeFreeSpace(pPage) ) break;
+ if( pPage->nOverflow==0 && pPage->nFree<=nMin ){
+ break;
+ }else if( (iPage = pCur->iPage)==0 ){
+ if( pPage->nOverflow && (rc = anotherValidCursor(pCur))==SQLITE_OK ){
+ /* The root page of the b-tree is overfull. In this case call the
+ ** balance_deeper() function to create a new child for the root-page
+ ** and copy the current contents of the root-page to it. The
+ ** next iteration of the do-loop will balance the child page.
+ */
+ assert( balance_deeper_called==0 );
+ VVA_ONLY( balance_deeper_called++ );
+ rc = balance_deeper(pPage, &pCur->apPage[1]);
+ if( rc==SQLITE_OK ){
+ pCur->iPage = 1;
+ pCur->ix = 0;
+ pCur->aiIdx[0] = 0;
+ pCur->apPage[0] = pPage;
+ pCur->pPage = pCur->apPage[1];
+ assert( pCur->pPage->nOverflow );
+ }
+ }else{
+ break;
+ }
+ }else{
+ MemPage * const pParent = pCur->apPage[iPage-1];
+ int const iIdx = pCur->aiIdx[iPage-1];
+
+ rc = sqlite3PagerWrite(pParent->pDbPage);
+ if( rc==SQLITE_OK && pParent->nFree<0 ){
+ rc = btreeComputeFreeSpace(pParent);
+ }
+ if( rc==SQLITE_OK ){
+#ifndef SQLITE_OMIT_QUICKBALANCE
+ if( pPage->intKeyLeaf
+ && pPage->nOverflow==1
+ && pPage->aiOvfl[0]==pPage->nCell
+ && pParent->pgno!=1
+ && pParent->nCell==iIdx
+ ){
+ /* Call balance_quick() to create a new sibling of pPage on which
+ ** to store the overflow cell. balance_quick() inserts a new cell
+ ** into pParent, which may cause pParent overflow. If this
+ ** happens, the next iteration of the do-loop will balance pParent
+ ** use either balance_nonroot() or balance_deeper(). Until this
+ ** happens, the overflow cell is stored in the aBalanceQuickSpace[]
+ ** buffer.
+ **
+ ** The purpose of the following assert() is to check that only a
+ ** single call to balance_quick() is made for each call to this
+ ** function. If this were not verified, a subtle bug involving reuse
+ ** of the aBalanceQuickSpace[] might sneak in.
+ */
+ assert( balance_quick_called==0 );
+ VVA_ONLY( balance_quick_called++ );
+ rc = balance_quick(pParent, pPage, aBalanceQuickSpace);
+ }else
+#endif
+ {
+ /* In this case, call balance_nonroot() to redistribute cells
+ ** between pPage and up to 2 of its sibling pages. This involves
+ ** modifying the contents of pParent, which may cause pParent to
+ ** become overfull or underfull. The next iteration of the do-loop
+ ** will balance the parent page to correct this.
+ **
+ ** If the parent page becomes overfull, the overflow cell or cells
+ ** are stored in the pSpace buffer allocated immediately below.
+ ** A subsequent iteration of the do-loop will deal with this by
+ ** calling balance_nonroot() (balance_deeper() may be called first,
+ ** but it doesn't deal with overflow cells - just moves them to a
+ ** different page). Once this subsequent call to balance_nonroot()
+ ** has completed, it is safe to release the pSpace buffer used by
+ ** the previous call, as the overflow cell data will have been
+ ** copied either into the body of a database page or into the new
+ ** pSpace buffer passed to the latter call to balance_nonroot().
+ */
+ u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize);
+ rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1,
+ pCur->hints&BTREE_BULKLOAD);
+ if( pFree ){
+ /* If pFree is not NULL, it points to the pSpace buffer used
+ ** by a previous call to balance_nonroot(). Its contents are
+ ** now stored either on real database pages or within the
+ ** new pSpace buffer, so it may be safely freed here. */
+ sqlite3PageFree(pFree);
+ }
+
+ /* The pSpace buffer will be freed after the next call to
+ ** balance_nonroot(), or just before this function returns, whichever
+ ** comes first. */
+ pFree = pSpace;
+ }
+ }
+
+ pPage->nOverflow = 0;
+
+ /* The next iteration of the do-loop balances the parent page. */
+ releasePage(pPage);
+ pCur->iPage--;
+ assert( pCur->iPage>=0 );
+ pCur->pPage = pCur->apPage[pCur->iPage];
+ }
+ }while( rc==SQLITE_OK );
+
+ if( pFree ){
+ sqlite3PageFree(pFree);
+ }
+ return rc;
+}
+
+/* Overwrite content from pX into pDest. Only do the write if the
+** content is different from what is already there.
+*/
+static int btreeOverwriteContent(
+ MemPage *pPage, /* MemPage on which writing will occur */
+ u8 *pDest, /* Pointer to the place to start writing */
+ const BtreePayload *pX, /* Source of data to write */
+ int iOffset, /* Offset of first byte to write */
+ int iAmt /* Number of bytes to be written */
+){
+ int nData = pX->nData - iOffset;
+ if( nData<=0 ){
+ /* Overwritting with zeros */
+ int i;
+ for(i=0; i<iAmt && pDest[i]==0; i++){}
+ if( i<iAmt ){
+ int rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ) return rc;
+ memset(pDest + i, 0, iAmt - i);
+ }
+ }else{
+ if( nData<iAmt ){
+ /* Mixed read data and zeros at the end. Make a recursive call
+ ** to write the zeros then fall through to write the real data */
+ int rc = btreeOverwriteContent(pPage, pDest+nData, pX, iOffset+nData,
+ iAmt-nData);
+ if( rc ) return rc;
+ iAmt = nData;
+ }
+ if( memcmp(pDest, ((u8*)pX->pData) + iOffset, iAmt)!=0 ){
+ int rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ) return rc;
+ /* In a corrupt database, it is possible for the source and destination
+ ** buffers to overlap. This is harmless since the database is already
+ ** corrupt but it does cause valgrind and ASAN warnings. So use
+ ** memmove(). */
+ memmove(pDest, ((u8*)pX->pData) + iOffset, iAmt);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Overwrite the cell that cursor pCur is pointing to with fresh content
+** contained in pX.
+*/
+static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
+ int iOffset; /* Next byte of pX->pData to write */
+ int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */
+ int rc; /* Return code */
+ MemPage *pPage = pCur->pPage; /* Page being written */
+ BtShared *pBt; /* Btree */
+ Pgno ovflPgno; /* Next overflow page to write */
+ u32 ovflPageSize; /* Size to write on overflow page */
+
+ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd
+ || pCur->info.pPayload < pPage->aData + pPage->cellOffset
+ ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ /* Overwrite the local portion first */
+ rc = btreeOverwriteContent(pPage, pCur->info.pPayload, pX,
+ 0, pCur->info.nLocal);
+ if( rc ) return rc;
+ if( pCur->info.nLocal==nTotal ) return SQLITE_OK;
+
+ /* Now overwrite the overflow pages */
+ iOffset = pCur->info.nLocal;
+ assert( nTotal>=0 );
+ assert( iOffset>=0 );
+ ovflPgno = get4byte(pCur->info.pPayload + iOffset);
+ pBt = pPage->pBt;
+ ovflPageSize = pBt->usableSize - 4;
+ do{
+ rc = btreeGetPage(pBt, ovflPgno, &pPage, 0);
+ if( rc ) return rc;
+ if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ if( iOffset+ovflPageSize<(u32)nTotal ){
+ ovflPgno = get4byte(pPage->aData);
+ }else{
+ ovflPageSize = nTotal - iOffset;
+ }
+ rc = btreeOverwriteContent(pPage, pPage->aData+4, pX,
+ iOffset, ovflPageSize);
+ }
+ sqlite3PagerUnref(pPage->pDbPage);
+ if( rc ) return rc;
+ iOffset += ovflPageSize;
+ }while( iOffset<nTotal );
+ return SQLITE_OK;
+}
+
+
+/*
+** Insert a new record into the BTree. The content of the new record
+** is described by the pX object. The pCur cursor is used only to
+** define what table the record should be inserted into, and is left
+** pointing at a random location.
+**
+** For a table btree (used for rowid tables), only the pX.nKey value of
+** the key is used. The pX.pKey value must be NULL. The pX.nKey is the
+** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields
+** hold the content of the row.
+**
+** For an index btree (used for indexes and WITHOUT ROWID tables), the
+** key is an arbitrary byte sequence stored in pX.pKey,nKey. The
+** pX.pData,nData,nZero fields must be zero.
+**
+** If the seekResult parameter is non-zero, then a successful call to
+** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already
+** been performed. In other words, if seekResult!=0 then the cursor
+** is currently pointing to a cell that will be adjacent to the cell
+** to be inserted. If seekResult<0 then pCur points to a cell that is
+** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell
+** that is larger than (pKey,nKey).
+**
+** If seekResult==0, that means pCur is pointing at some unknown location.
+** In that case, this routine must seek the cursor to the correct insertion
+** point for (pKey,nKey) before doing the insertion. For index btrees,
+** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked
+** key values and pX->aMem can be used instead of pX->pKey to avoid having
+** to decode the key.
+*/
+SQLITE_PRIVATE int sqlite3BtreeInsert(
+ BtCursor *pCur, /* Insert data into the table of this cursor */
+ const BtreePayload *pX, /* Content of the row to be inserted */
+ int flags, /* True if this is likely an append */
+ int seekResult /* Result of prior MovetoUnpacked() call */
+){
+ int rc;
+ int loc = seekResult; /* -1: before desired location +1: after */
+ int szNew = 0;
+ int idx;
+ MemPage *pPage;
+ Btree *p = pCur->pBtree;
+ BtShared *pBt = p->pBt;
+ unsigned char *oldCell;
+ unsigned char *newCell = 0;
+
+ assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags );
+
+ if( pCur->eState==CURSOR_FAULT ){
+ assert( pCur->skipNext!=SQLITE_OK );
+ return pCur->skipNext;
+ }
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( (pCur->curFlags & BTCF_WriteFlag)!=0
+ && pBt->inTransaction==TRANS_WRITE
+ && (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
+
+ /* Assert that the caller has been consistent. If this cursor was opened
+ ** expecting an index b-tree, then the caller should be inserting blob
+ ** keys with no associated data. If the cursor was opened expecting an
+ ** intkey table, the caller should be inserting integer keys with a
+ ** blob of associated data. */
+ assert( (pX->pKey==0)==(pCur->pKeyInfo==0) );
+
+ /* Save the positions of any other cursors open on this table.
+ **
+ ** In some cases, the call to btreeMoveto() below is a no-op. For
+ ** example, when inserting data into a table with auto-generated integer
+ ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the
+ ** integer key to use. It then calls this function to actually insert the
+ ** data into the intkey B-Tree. In this case btreeMoveto() recognizes
+ ** that the cursor is already where it needs to be and returns without
+ ** doing any work. To avoid thwarting these optimizations, it is important
+ ** not to clear the cursor here.
+ */
+ if( pCur->curFlags & BTCF_Multiple ){
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
+ }
+
+ if( pCur->pKeyInfo==0 ){
+ assert( pX->pKey==0 );
+ /* If this is an insert into a table b-tree, invalidate any incrblob
+ ** cursors open on the row being replaced */
+ invalidateIncrblobCursors(p, pCur->pgnoRoot, pX->nKey, 0);
+
+ /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
+ ** to a row with the same key as the new entry being inserted.
+ */
+#ifdef SQLITE_DEBUG
+ if( flags & BTREE_SAVEPOSITION ){
+ assert( pCur->curFlags & BTCF_ValidNKey );
+ assert( pX->nKey==pCur->info.nKey );
+ assert( loc==0 );
+ }
+#endif
+
+ /* On the other hand, BTREE_SAVEPOSITION==0 does not imply
+ ** that the cursor is not pointing to a row to be overwritten.
+ ** So do a complete check.
+ */
+ if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){
+ /* The cursor is pointing to the entry that is to be
+ ** overwritten */
+ assert( pX->nData>=0 && pX->nZero>=0 );
+ if( pCur->info.nSize!=0
+ && pCur->info.nPayload==(u32)pX->nData+pX->nZero
+ ){
+ /* New entry is the same size as the old. Do an overwrite */
+ return btreeOverwriteCell(pCur, pX);
+ }
+ assert( loc==0 );
+ }else if( loc==0 ){
+ /* The cursor is *not* pointing to the cell to be overwritten, nor
+ ** to an adjacent cell. Move the cursor so that it is pointing either
+ ** to the cell to be overwritten or an adjacent cell.
+ */
+ rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc);
+ if( rc ) return rc;
+ }
+ }else{
+ /* This is an index or a WITHOUT ROWID table */
+
+ /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
+ ** to a row with the same key as the new entry being inserted.
+ */
+ assert( (flags & BTREE_SAVEPOSITION)==0 || loc==0 );
+
+ /* If the cursor is not already pointing either to the cell to be
+ ** overwritten, or if a new cell is being inserted, if the cursor is
+ ** not pointing to an immediately adjacent cell, then move the cursor
+ ** so that it does.
+ */
+ if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){
+ if( pX->nMem ){
+ UnpackedRecord r;
+ r.pKeyInfo = pCur->pKeyInfo;
+ r.aMem = pX->aMem;
+ r.nField = pX->nMem;
+ r.default_rc = 0;
+ r.errCode = 0;
+ r.r1 = 0;
+ r.r2 = 0;
+ r.eqSeen = 0;
+ rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc);
+ }else{
+ rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc);
+ }
+ if( rc ) return rc;
+ }
+
+ /* If the cursor is currently pointing to an entry to be overwritten
+ ** and the new content is the same as as the old, then use the
+ ** overwrite optimization.
+ */
+ if( loc==0 ){
+ getCellInfo(pCur);
+ if( pCur->info.nKey==pX->nKey ){
+ BtreePayload x2;
+ x2.pData = pX->pKey;
+ x2.nData = pX->nKey;
+ x2.nZero = 0;
+ return btreeOverwriteCell(pCur, &x2);
+ }
+ }
+
+ }
+ assert( pCur->eState==CURSOR_VALID
+ || (pCur->eState==CURSOR_INVALID && loc)
+ || CORRUPT_DB );
+
+ pPage = pCur->pPage;
+ assert( pPage->intKey || pX->nKey>=0 );
+ assert( pPage->leaf || !pPage->intKey );
+ if( pPage->nFree<0 ){
+ if( pCur->eState>CURSOR_INVALID ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = btreeComputeFreeSpace(pPage);
+ }
+ if( rc ) return rc;
+ }
+
+ TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n",
+ pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno,
+ loc==0 ? "overwrite" : "new entry"));
+ assert( pPage->isInit );
+ newCell = pBt->pTmpSpace;
+ assert( newCell!=0 );
+ rc = fillInCell(pPage, newCell, pX, &szNew);
+ if( rc ) goto end_insert;
+ assert( szNew==pPage->xCellSize(pPage, newCell) );
+ assert( szNew <= MX_CELL_SIZE(pBt) );
+ idx = pCur->ix;
+ if( loc==0 ){
+ CellInfo info;
+ assert( idx<pPage->nCell );
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ){
+ goto end_insert;
+ }
+ oldCell = findCell(pPage, idx);
+ if( !pPage->leaf ){
+ memcpy(newCell, oldCell, 4);
+ }
+ rc = clearCell(pPage, oldCell, &info);
+ testcase( pCur->curFlags & BTCF_ValidOvfl );
+ invalidateOverflowCache(pCur);
+ if( info.nSize==szNew && info.nLocal==info.nPayload
+ && (!ISAUTOVACUUM || szNew<pPage->minLocal)
+ ){
+ /* Overwrite the old cell with the new if they are the same size.
+ ** We could also try to do this if the old cell is smaller, then add
+ ** the leftover space to the free list. But experiments show that
+ ** doing that is no faster then skipping this optimization and just
+ ** calling dropCell() and insertCell().
+ **
+ ** This optimization cannot be used on an autovacuum database if the
+ ** new entry uses overflow pages, as the insertCell() call below is
+ ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */
+ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */
+ if( oldCell < pPage->aData+pPage->hdrOffset+10 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( oldCell+szNew > pPage->aDataEnd ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ memcpy(oldCell, newCell, szNew);
+ return SQLITE_OK;
+ }
+ dropCell(pPage, idx, info.nSize, &rc);
+ if( rc ) goto end_insert;
+ }else if( loc<0 && pPage->nCell>0 ){
+ assert( pPage->leaf );
+ idx = ++pCur->ix;
+ pCur->curFlags &= ~BTCF_ValidNKey;
+ }else{
+ assert( pPage->leaf );
+ }
+ insertCell(pPage, idx, newCell, szNew, 0, 0, &rc);
+ assert( pPage->nOverflow==0 || rc==SQLITE_OK );
+ assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 );
+
+ /* If no error has occurred and pPage has an overflow cell, call balance()
+ ** to redistribute the cells within the tree. Since balance() may move
+ ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey
+ ** variables.
+ **
+ ** Previous versions of SQLite called moveToRoot() to move the cursor
+ ** back to the root page as balance() used to invalidate the contents
+ ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that,
+ ** set the cursor state to "invalid". This makes common insert operations
+ ** slightly faster.
+ **
+ ** There is a subtle but important optimization here too. When inserting
+ ** multiple records into an intkey b-tree using a single cursor (as can
+ ** happen while processing an "INSERT INTO ... SELECT" statement), it
+ ** is advantageous to leave the cursor pointing to the last entry in
+ ** the b-tree if possible. If the cursor is left pointing to the last
+ ** entry in the table, and the next row inserted has an integer key
+ ** larger than the largest existing key, it is possible to insert the
+ ** row without seeking the cursor. This can be a big performance boost.
+ */
+ pCur->info.nSize = 0;
+ if( pPage->nOverflow ){
+ assert( rc==SQLITE_OK );
+ pCur->curFlags &= ~(BTCF_ValidNKey);
+ rc = balance(pCur);
+
+ /* Must make sure nOverflow is reset to zero even if the balance()
+ ** fails. Internal data structure corruption will result otherwise.
+ ** Also, set the cursor state to invalid. This stops saveCursorPosition()
+ ** from trying to save the current position of the cursor. */
+ pCur->pPage->nOverflow = 0;
+ pCur->eState = CURSOR_INVALID;
+ if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){
+ btreeReleaseAllCursorPages(pCur);
+ if( pCur->pKeyInfo ){
+ assert( pCur->pKey==0 );
+ pCur->pKey = sqlite3Malloc( pX->nKey );
+ if( pCur->pKey==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pCur->pKey, pX->pKey, pX->nKey);
+ }
+ }
+ pCur->eState = CURSOR_REQUIRESEEK;
+ pCur->nKey = pX->nKey;
+ }
+ }
+ assert( pCur->iPage<0 || pCur->pPage->nOverflow==0 );
+
+end_insert:
+ return rc;
+}
+
+/*
+** Delete the entry that the cursor is pointing to.
+**
+** If the BTREE_SAVEPOSITION bit of the flags parameter is zero, then
+** the cursor is left pointing at an arbitrary location after the delete.
+** But if that bit is set, then the cursor is left in a state such that
+** the next call to BtreeNext() or BtreePrev() moves it to the same row
+** as it would have been on if the call to BtreeDelete() had been omitted.
+**
+** The BTREE_AUXDELETE bit of flags indicates that is one of several deletes
+** associated with a single table entry and its indexes. Only one of those
+** deletes is considered the "primary" delete. The primary delete occurs
+** on a cursor that is not a BTREE_FORDELETE cursor. All but one delete
+** operation on non-FORDELETE cursors is tagged with the AUXDELETE flag.
+** The BTREE_AUXDELETE bit is a hint that is not used by this implementation,
+** but which might be used by alternative storage engines.
+*/
+SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
+ Btree *p = pCur->pBtree;
+ BtShared *pBt = p->pBt;
+ int rc; /* Return code */
+ MemPage *pPage; /* Page to delete cell from */
+ unsigned char *pCell; /* Pointer to cell to delete */
+ int iCellIdx; /* Index of cell to delete */
+ int iCellDepth; /* Depth of node containing pCell */
+ CellInfo info; /* Size of the cell being deleted */
+ int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */
+ u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */
+
+ assert( cursorOwnsBtShared(pCur) );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ assert( pCur->curFlags & BTCF_WriteFlag );
+ assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
+ assert( !hasReadConflicts(p, pCur->pgnoRoot) );
+ assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 );
+ if( pCur->eState==CURSOR_REQUIRESEEK ){
+ rc = btreeRestoreCursorPosition(pCur);
+ if( rc ) return rc;
+ }
+ assert( pCur->eState==CURSOR_VALID );
+
+ iCellDepth = pCur->iPage;
+ iCellIdx = pCur->ix;
+ pPage = pCur->pPage;
+ pCell = findCell(pPage, iCellIdx);
+ if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ) return SQLITE_CORRUPT;
+
+ /* If the bPreserve flag is set to true, then the cursor position must
+ ** be preserved following this delete operation. If the current delete
+ ** will cause a b-tree rebalance, then this is done by saving the cursor
+ ** key and leaving the cursor in CURSOR_REQUIRESEEK state before
+ ** returning.
+ **
+ ** Or, if the current delete will not cause a rebalance, then the cursor
+ ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately
+ ** before or after the deleted entry. In this case set bSkipnext to true. */
+ if( bPreserve ){
+ if( !pPage->leaf
+ || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3)
+ || pPage->nCell==1 /* See dbfuzz001.test for a test case */
+ ){
+ /* A b-tree rebalance will be required after deleting this entry.
+ ** Save the cursor key. */
+ rc = saveCursorKey(pCur);
+ if( rc ) return rc;
+ }else{
+ bSkipnext = 1;
+ }
+ }
+
+ /* If the page containing the entry to delete is not a leaf page, move
+ ** the cursor to the largest entry in the tree that is smaller than
+ ** the entry being deleted. This cell will replace the cell being deleted
+ ** from the internal node. The 'previous' entry is used for this instead
+ ** of the 'next' entry, as the previous entry is always a part of the
+ ** sub-tree headed by the child page of the cell being deleted. This makes
+ ** balancing the tree following the delete operation easier. */
+ if( !pPage->leaf ){
+ rc = sqlite3BtreePrevious(pCur, 0);
+ assert( rc!=SQLITE_DONE );
+ if( rc ) return rc;
+ }
+
+ /* Save the positions of any other cursors open on this table before
+ ** making any modifications. */
+ if( pCur->curFlags & BTCF_Multiple ){
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
+ }
+
+ /* If this is a delete operation to remove a row from a table b-tree,
+ ** invalidate any incrblob cursors open on the row being deleted. */
+ if( pCur->pKeyInfo==0 ){
+ invalidateIncrblobCursors(p, pCur->pgnoRoot, pCur->info.nKey, 0);
+ }
+
+ /* Make the page containing the entry to be deleted writable. Then free any
+ ** overflow pages associated with the entry and finally remove the cell
+ ** itself from within the page. */
+ rc = sqlite3PagerWrite(pPage->pDbPage);
+ if( rc ) return rc;
+ rc = clearCell(pPage, pCell, &info);
+ dropCell(pPage, iCellIdx, info.nSize, &rc);
+ if( rc ) return rc;
+
+ /* If the cell deleted was not located on a leaf page, then the cursor
+ ** is currently pointing to the largest entry in the sub-tree headed
+ ** by the child-page of the cell that was just deleted from an internal
+ ** node. The cell from the leaf node needs to be moved to the internal
+ ** node to replace the deleted cell. */
+ if( !pPage->leaf ){
+ MemPage *pLeaf = pCur->pPage;
+ int nCell;
+ Pgno n;
+ unsigned char *pTmp;
+
+ if( pLeaf->nFree<0 ){
+ rc = btreeComputeFreeSpace(pLeaf);
+ if( rc ) return rc;
+ }
+ if( iCellDepth<pCur->iPage-1 ){
+ n = pCur->apPage[iCellDepth+1]->pgno;
+ }else{
+ n = pCur->pPage->pgno;
+ }
+ pCell = findCell(pLeaf, pLeaf->nCell-1);
+ if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT;
+ nCell = pLeaf->xCellSize(pLeaf, pCell);
+ assert( MX_CELL_SIZE(pBt) >= nCell );
+ pTmp = pBt->pTmpSpace;
+ assert( pTmp!=0 );
+ rc = sqlite3PagerWrite(pLeaf->pDbPage);
+ if( rc==SQLITE_OK ){
+ insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc);
+ }
+ dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc);
+ if( rc ) return rc;
+ }
+
+ /* Balance the tree. If the entry deleted was located on a leaf page,
+ ** then the cursor still points to that page. In this case the first
+ ** call to balance() repairs the tree, and the if(...) condition is
+ ** never true.
+ **
+ ** Otherwise, if the entry deleted was on an internal node page, then
+ ** pCur is pointing to the leaf page from which a cell was removed to
+ ** replace the cell deleted from the internal node. This is slightly
+ ** tricky as the leaf node may be underfull, and the internal node may
+ ** be either under or overfull. In this case run the balancing algorithm
+ ** on the leaf node first. If the balance proceeds far enough up the
+ ** tree that we can be sure that any problem in the internal node has
+ ** been corrected, so be it. Otherwise, after balancing the leaf node,
+ ** walk the cursor up the tree to the internal node and balance it as
+ ** well. */
+ rc = balance(pCur);
+ if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){
+ releasePageNotNull(pCur->pPage);
+ pCur->iPage--;
+ while( pCur->iPage>iCellDepth ){
+ releasePage(pCur->apPage[pCur->iPage--]);
+ }
+ pCur->pPage = pCur->apPage[pCur->iPage];
+ rc = balance(pCur);
+ }
+
+ if( rc==SQLITE_OK ){
+ if( bSkipnext ){
+ assert( bPreserve && (pCur->iPage==iCellDepth || CORRUPT_DB) );
+ assert( pPage==pCur->pPage || CORRUPT_DB );
+ assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell );
+ pCur->eState = CURSOR_SKIPNEXT;
+ if( iCellIdx>=pPage->nCell ){
+ pCur->skipNext = -1;
+ pCur->ix = pPage->nCell-1;
+ }else{
+ pCur->skipNext = 1;
+ }
+ }else{
+ rc = moveToRoot(pCur);
+ if( bPreserve ){
+ btreeReleaseAllCursorPages(pCur);
+ pCur->eState = CURSOR_REQUIRESEEK;
+ }
+ if( rc==SQLITE_EMPTY ) rc = SQLITE_OK;
+ }
+ }
+ return rc;
+}
+
+/*
+** Create a new BTree table. Write into *piTable the page
+** number for the root page of the new table.
+**
+** The type of type is determined by the flags parameter. Only the
+** following values of flags are currently in use. Other values for
+** flags might not work:
+**
+** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys
+** BTREE_ZERODATA Used for SQL indices
+*/
+static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){
+ BtShared *pBt = p->pBt;
+ MemPage *pRoot;
+ Pgno pgnoRoot;
+ int rc;
+ int ptfFlags; /* Page-type flage for the root page of new table */
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( pBt->inTransaction==TRANS_WRITE );
+ assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
+
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0);
+ if( rc ){
+ return rc;
+ }
+#else
+ if( pBt->autoVacuum ){
+ Pgno pgnoMove; /* Move a page here to make room for the root-page */
+ MemPage *pPageMove; /* The page to move to. */
+
+ /* Creating a new table may probably require moving an existing database
+ ** to make room for the new tables root page. In case this page turns
+ ** out to be an overflow page, delete all overflow page-map caches
+ ** held by open cursors.
+ */
+ invalidateAllOverflowCache(pBt);
+
+ /* Read the value of meta[3] from the database to determine where the
+ ** root page of the new table should go. meta[3] is the largest root-page
+ ** created so far, so the new root-page is (meta[3]+1).
+ */
+ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot);
+ if( pgnoRoot>btreePagecount(pBt) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pgnoRoot++;
+
+ /* The new root-page may not be allocated on a pointer-map page, or the
+ ** PENDING_BYTE page.
+ */
+ while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) ||
+ pgnoRoot==PENDING_BYTE_PAGE(pBt) ){
+ pgnoRoot++;
+ }
+ assert( pgnoRoot>=3 );
+
+ /* Allocate a page. The page that currently resides at pgnoRoot will
+ ** be moved to the allocated page (unless the allocated page happens
+ ** to reside at pgnoRoot).
+ */
+ rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ if( pgnoMove!=pgnoRoot ){
+ /* pgnoRoot is the page that will be used for the root-page of
+ ** the new table (assuming an error did not occur). But we were
+ ** allocated pgnoMove. If required (i.e. if it was not allocated
+ ** by extending the file), the current page at position pgnoMove
+ ** is already journaled.
+ */
+ u8 eType = 0;
+ Pgno iPtrPage = 0;
+
+ /* Save the positions of any open cursors. This is required in
+ ** case they are holding a reference to an xFetch reference
+ ** corresponding to page pgnoRoot. */
+ rc = saveAllCursors(pBt, 0, 0);
+ releasePage(pPageMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Move the page currently at pgnoRoot to pgnoMove. */
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage);
+ if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }
+ if( rc!=SQLITE_OK ){
+ releasePage(pRoot);
+ return rc;
+ }
+ assert( eType!=PTRMAP_ROOTPAGE );
+ assert( eType!=PTRMAP_FREEPAGE );
+ rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0);
+ releasePage(pRoot);
+
+ /* Obtain the page at pgnoRoot */
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = sqlite3PagerWrite(pRoot->pDbPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(pRoot);
+ return rc;
+ }
+ }else{
+ pRoot = pPageMove;
+ }
+
+ /* Update the pointer-map and meta-data with the new root-page number. */
+ ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc);
+ if( rc ){
+ releasePage(pRoot);
+ return rc;
+ }
+
+ /* When the new root page was allocated, page 1 was made writable in
+ ** order either to increase the database filesize, or to decrement the
+ ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail.
+ */
+ assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) );
+ rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot);
+ if( NEVER(rc) ){
+ releasePage(pRoot);
+ return rc;
+ }
+
+ }else{
+ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0);
+ if( rc ) return rc;
+ }
+#endif
+ assert( sqlite3PagerIswriteable(pRoot->pDbPage) );
+ if( createTabFlags & BTREE_INTKEY ){
+ ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF;
+ }else{
+ ptfFlags = PTF_ZERODATA | PTF_LEAF;
+ }
+ zeroPage(pRoot, ptfFlags);
+ sqlite3PagerUnref(pRoot->pDbPage);
+ assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 );
+ *piTable = pgnoRoot;
+ return SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree *p, Pgno *piTable, int flags){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = btreeCreateTable(p, piTable, flags);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Erase the given database page and all its children. Return
+** the page to the freelist.
+*/
+static int clearDatabasePage(
+ BtShared *pBt, /* The BTree that contains the table */
+ Pgno pgno, /* Page number to clear */
+ int freePageFlag, /* Deallocate page if true */
+ int *pnChange /* Add number of Cells freed to this counter */
+){
+ MemPage *pPage;
+ int rc;
+ unsigned char *pCell;
+ int i;
+ int hdr;
+ CellInfo info;
+
+ assert( sqlite3_mutex_held(pBt->mutex) );
+ if( pgno>btreePagecount(pBt) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ rc = getAndInitPage(pBt, pgno, &pPage, 0, 0);
+ if( rc ) return rc;
+ if( pPage->bBusy ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto cleardatabasepage_out;
+ }
+ pPage->bBusy = 1;
+ hdr = pPage->hdrOffset;
+ for(i=0; i<pPage->nCell; i++){
+ pCell = findCell(pPage, i);
+ if( !pPage->leaf ){
+ rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange);
+ if( rc ) goto cleardatabasepage_out;
+ }
+ rc = clearCell(pPage, pCell, &info);
+ if( rc ) goto cleardatabasepage_out;
+ }
+ if( !pPage->leaf ){
+ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange);
+ if( rc ) goto cleardatabasepage_out;
+ }else if( pnChange ){
+ assert( pPage->intKey || CORRUPT_DB );
+ testcase( !pPage->intKey );
+ *pnChange += pPage->nCell;
+ }
+ if( freePageFlag ){
+ freePage(pPage, &rc);
+ }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){
+ zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF);
+ }
+
+cleardatabasepage_out:
+ pPage->bBusy = 0;
+ releasePage(pPage);
+ return rc;
+}
+
+/*
+** Delete all information from a single table in the database. iTable is
+** the page number of the root of the table. After this routine returns,
+** the root page is empty, but still exists.
+**
+** This routine will fail with SQLITE_LOCKED if there are any open
+** read cursors on the table. Open write cursors are moved to the
+** root of the table.
+**
+** If pnChange is not NULL, then table iTable must be an intkey table. The
+** integer value pointed to by pnChange is incremented by the number of
+** entries in the table.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){
+ int rc;
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+
+ rc = saveAllCursors(pBt, (Pgno)iTable, 0);
+
+ if( SQLITE_OK==rc ){
+ /* Invalidate all incrblob cursors open on table iTable (assuming iTable
+ ** is the root of a table b-tree - if it is not, the following call is
+ ** a no-op). */
+ invalidateIncrblobCursors(p, (Pgno)iTable, 0, 1);
+ rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange);
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** Delete all information from the single table that pCur is open on.
+**
+** This routine only work for pCur on an ephemeral table.
+*/
+SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){
+ return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0);
+}
+
+/*
+** Erase all information in a table and add the root of the table to
+** the freelist. Except, the root of the principle table (the one on
+** page 1) is never added to the freelist.
+**
+** This routine will fail with SQLITE_LOCKED if there are any open
+** cursors on the table.
+**
+** If AUTOVACUUM is enabled and the page at iTable is not the last
+** root page in the database file, then the last root page
+** in the database file is moved into the slot formerly occupied by
+** iTable and that last slot formerly occupied by the last root page
+** is added to the freelist instead of iTable. In this say, all
+** root pages are kept at the beginning of the database file, which
+** is necessary for AUTOVACUUM to work right. *piMoved is set to the
+** page number that used to be the last root page in the file before
+** the move. If no page gets moved, *piMoved is set to 0.
+** The last root page is recorded in meta[3] and the value of
+** meta[3] is updated by this procedure.
+*/
+static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
+ int rc;
+ MemPage *pPage = 0;
+ BtShared *pBt = p->pBt;
+
+ assert( sqlite3BtreeHoldsMutex(p) );
+ assert( p->inTrans==TRANS_WRITE );
+ assert( iTable>=2 );
+ if( iTable>btreePagecount(pBt) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+
+ rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0);
+ if( rc ) return rc;
+ rc = sqlite3BtreeClearTable(p, iTable, 0);
+ if( rc ){
+ releasePage(pPage);
+ return rc;
+ }
+
+ *piMoved = 0;
+
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ freePage(pPage, &rc);
+ releasePage(pPage);
+#else
+ if( pBt->autoVacuum ){
+ Pgno maxRootPgno;
+ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno);
+
+ if( iTable==maxRootPgno ){
+ /* If the table being dropped is the table with the largest root-page
+ ** number in the database, put the root page on the free list.
+ */
+ freePage(pPage, &rc);
+ releasePage(pPage);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }else{
+ /* The table being dropped does not have the largest root-page
+ ** number in the database. So move the page that does into the
+ ** gap left by the deleted root-page.
+ */
+ MemPage *pMove;
+ releasePage(pPage);
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0);
+ releasePage(pMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ pMove = 0;
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ freePage(pMove, &rc);
+ releasePage(pMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ *piMoved = maxRootPgno;
+ }
+
+ /* Set the new 'max-root-page' value in the database header. This
+ ** is the old value less one, less one more if that happens to
+ ** be a root-page number, less one again if that is the
+ ** PENDING_BYTE_PAGE.
+ */
+ maxRootPgno--;
+ while( maxRootPgno==PENDING_BYTE_PAGE(pBt)
+ || PTRMAP_ISPAGE(pBt, maxRootPgno) ){
+ maxRootPgno--;
+ }
+ assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) );
+
+ rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno);
+ }else{
+ freePage(pPage, &rc);
+ releasePage(pPage);
+ }
+#endif
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){
+ int rc;
+ sqlite3BtreeEnter(p);
+ rc = btreeDropTable(p, iTable, piMoved);
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+
+/*
+** This function may only be called if the b-tree connection already
+** has a read or write transaction open on the database.
+**
+** Read the meta-information out of a database file. Meta[0]
+** is the number of free pages currently in the database. Meta[1]
+** through meta[15] are available for use by higher layers. Meta[0]
+** is read-only, the others are read/write.
+**
+** The schema layer numbers meta values differently. At the schema
+** layer (and the SetCookie and ReadCookie opcodes) the number of
+** free pages is not visible. So Cookie[0] is the same as Meta[1].
+**
+** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead
+** of reading the value out of the header, it instead loads the "DataVersion"
+** from the pager. The BTREE_DATA_VERSION value is not actually stored in the
+** database file. It is a number computed by the pager. But its access
+** pattern is the same as header meta values, and so it is convenient to
+** read it from this routine.
+*/
+SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){
+ BtShared *pBt = p->pBt;
+
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans>TRANS_NONE );
+ assert( SQLITE_OK==querySharedCacheTableLock(p, SCHEMA_ROOT, READ_LOCK) );
+ assert( pBt->pPage1 );
+ assert( idx>=0 && idx<=15 );
+
+ if( idx==BTREE_DATA_VERSION ){
+ *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion;
+ }else{
+ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]);
+ }
+
+ /* If auto-vacuum is disabled in this build and this is an auto-vacuum
+ ** database, mark the database as read-only. */
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){
+ pBt->btsFlags |= BTS_READ_ONLY;
+ }
+#endif
+
+ sqlite3BtreeLeave(p);
+}
+
+/*
+** Write meta-information back into the database. Meta[0] is
+** read-only and may not be written.
+*/
+SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){
+ BtShared *pBt = p->pBt;
+ unsigned char *pP1;
+ int rc;
+ assert( idx>=1 && idx<=15 );
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans==TRANS_WRITE );
+ assert( pBt->pPage1!=0 );
+ pP1 = pBt->pPage1->aData;
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ put4byte(&pP1[36 + idx*4], iMeta);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( idx==BTREE_INCR_VACUUM ){
+ assert( pBt->autoVacuum || iMeta==0 );
+ assert( iMeta==0 || iMeta==1 );
+ pBt->incrVacuum = (u8)iMeta;
+ }
+#endif
+ }
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+/*
+** The first argument, pCur, is a cursor opened on some b-tree. Count the
+** number of entries in the b-tree and write the result to *pnEntry.
+**
+** SQLITE_OK is returned if the operation is successfully executed.
+** Otherwise, if an error is encountered (i.e. an IO error or database
+** corruption) an SQLite error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCount(sqlite3 *db, BtCursor *pCur, i64 *pnEntry){
+ i64 nEntry = 0; /* Value to return in *pnEntry */
+ int rc; /* Return code */
+
+ rc = moveToRoot(pCur);
+ if( rc==SQLITE_EMPTY ){
+ *pnEntry = 0;
+ return SQLITE_OK;
+ }
+
+ /* Unless an error occurs, the following loop runs one iteration for each
+ ** page in the B-Tree structure (not including overflow pages).
+ */
+ while( rc==SQLITE_OK && !AtomicLoad(&db->u1.isInterrupted) ){
+ int iIdx; /* Index of child node in parent */
+ MemPage *pPage; /* Current page of the b-tree */
+
+ /* If this is a leaf page or the tree is not an int-key tree, then
+ ** this page contains countable entries. Increment the entry counter
+ ** accordingly.
+ */
+ pPage = pCur->pPage;
+ if( pPage->leaf || !pPage->intKey ){
+ nEntry += pPage->nCell;
+ }
+
+ /* pPage is a leaf node. This loop navigates the cursor so that it
+ ** points to the first interior cell that it points to the parent of
+ ** the next page in the tree that has not yet been visited. The
+ ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell
+ ** of the page, or to the number of cells in the page if the next page
+ ** to visit is the right-child of its parent.
+ **
+ ** If all pages in the tree have been visited, return SQLITE_OK to the
+ ** caller.
+ */
+ if( pPage->leaf ){
+ do {
+ if( pCur->iPage==0 ){
+ /* All pages of the b-tree have been visited. Return successfully. */
+ *pnEntry = nEntry;
+ return moveToRoot(pCur);
+ }
+ moveToParent(pCur);
+ }while ( pCur->ix>=pCur->pPage->nCell );
+
+ pCur->ix++;
+ pPage = pCur->pPage;
+ }
+
+ /* Descend to the child node of the cell that the cursor currently
+ ** points at. This is the right-child if (iIdx==pPage->nCell).
+ */
+ iIdx = pCur->ix;
+ if( iIdx==pPage->nCell ){
+ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
+ }else{
+ rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx)));
+ }
+ }
+
+ /* An error has occurred. Return an error code. */
+ return rc;
+}
+
+/*
+** Return the pager associated with a BTree. This routine is used for
+** testing and debugging only.
+*/
+SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){
+ return p->pBt->pPager;
+}
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** Append a message to the error message string.
+*/
+static void checkAppendMsg(
+ IntegrityCk *pCheck,
+ const char *zFormat,
+ ...
+){
+ va_list ap;
+ if( !pCheck->mxErr ) return;
+ pCheck->mxErr--;
+ pCheck->nErr++;
+ va_start(ap, zFormat);
+ if( pCheck->errMsg.nChar ){
+ sqlite3_str_append(&pCheck->errMsg, "\n", 1);
+ }
+ if( pCheck->zPfx ){
+ sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2);
+ }
+ sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap);
+ va_end(ap);
+ if( pCheck->errMsg.accError==SQLITE_NOMEM ){
+ pCheck->bOomFault = 1;
+ }
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+
+/*
+** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that
+** corresponds to page iPg is already set.
+*/
+static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
+ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07)));
+}
+
+/*
+** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg.
+*/
+static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
+ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07));
+}
+
+
+/*
+** Add 1 to the reference count for page iPage. If this is the second
+** reference to the page, add an error message to pCheck->zErrMsg.
+** Return 1 if there are 2 or more references to the page and 0 if
+** if this is the first reference to the page.
+**
+** Also check that the page number is in bounds.
+*/
+static int checkRef(IntegrityCk *pCheck, Pgno iPage){
+ if( iPage>pCheck->nPage || iPage==0 ){
+ checkAppendMsg(pCheck, "invalid page number %d", iPage);
+ return 1;
+ }
+ if( getPageReferenced(pCheck, iPage) ){
+ checkAppendMsg(pCheck, "2nd reference to page %d", iPage);
+ return 1;
+ }
+ if( AtomicLoad(&pCheck->db->u1.isInterrupted) ) return 1;
+ setPageReferenced(pCheck, iPage);
+ return 0;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Check that the entry in the pointer-map for page iChild maps to
+** page iParent, pointer type ptrType. If not, append an error message
+** to pCheck.
+*/
+static void checkPtrmap(
+ IntegrityCk *pCheck, /* Integrity check context */
+ Pgno iChild, /* Child page number */
+ u8 eType, /* Expected pointer map type */
+ Pgno iParent /* Expected pointer map parent page number */
+){
+ int rc;
+ u8 ePtrmapType;
+ Pgno iPtrmapParent;
+
+ rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->bOomFault = 1;
+ checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild);
+ return;
+ }
+
+ if( ePtrmapType!=eType || iPtrmapParent!=iParent ){
+ checkAppendMsg(pCheck,
+ "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)",
+ iChild, eType, iParent, ePtrmapType, iPtrmapParent);
+ }
+}
+#endif
+
+/*
+** Check the integrity of the freelist or of an overflow page list.
+** Verify that the number of pages on the list is N.
+*/
+static void checkList(
+ IntegrityCk *pCheck, /* Integrity checking context */
+ int isFreeList, /* True for a freelist. False for overflow page list */
+ Pgno iPage, /* Page number for first page in the list */
+ u32 N /* Expected number of pages in the list */
+){
+ int i;
+ u32 expected = N;
+ int nErrAtStart = pCheck->nErr;
+ while( iPage!=0 && pCheck->mxErr ){
+ DbPage *pOvflPage;
+ unsigned char *pOvflData;
+ if( checkRef(pCheck, iPage) ) break;
+ N--;
+ if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){
+ checkAppendMsg(pCheck, "failed to get page %d", iPage);
+ break;
+ }
+ pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage);
+ if( isFreeList ){
+ u32 n = (u32)get4byte(&pOvflData[4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pCheck->pBt->autoVacuum ){
+ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0);
+ }
+#endif
+ if( n>pCheck->pBt->usableSize/4-2 ){
+ checkAppendMsg(pCheck,
+ "freelist leaf count too big on page %d", iPage);
+ N--;
+ }else{
+ for(i=0; i<(int)n; i++){
+ Pgno iFreePage = get4byte(&pOvflData[8+i*4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pCheck->pBt->autoVacuum ){
+ checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0);
+ }
+#endif
+ checkRef(pCheck, iFreePage);
+ }
+ N -= n;
+ }
+ }
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ else{
+ /* If this database supports auto-vacuum and iPage is not the last
+ ** page in this overflow list, check that the pointer-map entry for
+ ** the following page matches iPage.
+ */
+ if( pCheck->pBt->autoVacuum && N>0 ){
+ i = get4byte(pOvflData);
+ checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage);
+ }
+ }
+#endif
+ iPage = get4byte(pOvflData);
+ sqlite3PagerUnref(pOvflPage);
+ }
+ if( N && nErrAtStart==pCheck->nErr ){
+ checkAppendMsg(pCheck,
+ "%s is %d but should be %d",
+ isFreeList ? "size" : "overflow list length",
+ expected-N, expected);
+ }
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+/*
+** An implementation of a min-heap.
+**
+** aHeap[0] is the number of elements on the heap. aHeap[1] is the
+** root element. The daughter nodes of aHeap[N] are aHeap[N*2]
+** and aHeap[N*2+1].
+**
+** The heap property is this: Every node is less than or equal to both
+** of its daughter nodes. A consequence of the heap property is that the
+** root node aHeap[1] is always the minimum value currently in the heap.
+**
+** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto
+** the heap, preserving the heap property. The btreeHeapPull() routine
+** removes the root element from the heap (the minimum value in the heap)
+** and then moves other nodes around as necessary to preserve the heap
+** property.
+**
+** This heap is used for cell overlap and coverage testing. Each u32
+** entry represents the span of a cell or freeblock on a btree page.
+** The upper 16 bits are the index of the first byte of a range and the
+** lower 16 bits are the index of the last byte of that range.
+*/
+static void btreeHeapInsert(u32 *aHeap, u32 x){
+ u32 j, i = ++aHeap[0];
+ aHeap[i] = x;
+ while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){
+ x = aHeap[j];
+ aHeap[j] = aHeap[i];
+ aHeap[i] = x;
+ i = j;
+ }
+}
+static int btreeHeapPull(u32 *aHeap, u32 *pOut){
+ u32 j, i, x;
+ if( (x = aHeap[0])==0 ) return 0;
+ *pOut = aHeap[1];
+ aHeap[1] = aHeap[x];
+ aHeap[x] = 0xffffffff;
+ aHeap[0]--;
+ i = 1;
+ while( (j = i*2)<=aHeap[0] ){
+ if( aHeap[j]>aHeap[j+1] ) j++;
+ if( aHeap[i]<aHeap[j] ) break;
+ x = aHeap[i];
+ aHeap[i] = aHeap[j];
+ aHeap[j] = x;
+ i = j;
+ }
+ return 1;
+}
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** Do various sanity checks on a single page of a tree. Return
+** the tree depth. Root pages return 0. Parents of root pages
+** return 1, and so forth.
+**
+** These checks are done:
+**
+** 1. Make sure that cells and freeblocks do not overlap
+** but combine to completely cover the page.
+** 2. Make sure integer cell keys are in order.
+** 3. Check the integrity of overflow pages.
+** 4. Recursively call checkTreePage on all children.
+** 5. Verify that the depth of all children is the same.
+*/
+static int checkTreePage(
+ IntegrityCk *pCheck, /* Context for the sanity check */
+ Pgno iPage, /* Page number of the page to check */
+ i64 *piMinKey, /* Write minimum integer primary key here */
+ i64 maxKey /* Error if integer primary key greater than this */
+){
+ MemPage *pPage = 0; /* The page being analyzed */
+ int i; /* Loop counter */
+ int rc; /* Result code from subroutine call */
+ int depth = -1, d2; /* Depth of a subtree */
+ int pgno; /* Page number */
+ int nFrag; /* Number of fragmented bytes on the page */
+ int hdr; /* Offset to the page header */
+ int cellStart; /* Offset to the start of the cell pointer array */
+ int nCell; /* Number of cells */
+ int doCoverageCheck = 1; /* True if cell coverage checking should be done */
+ int keyCanBeEqual = 1; /* True if IPK can be equal to maxKey
+ ** False if IPK must be strictly less than maxKey */
+ u8 *data; /* Page content */
+ u8 *pCell; /* Cell content */
+ u8 *pCellIdx; /* Next element of the cell pointer array */
+ BtShared *pBt; /* The BtShared object that owns pPage */
+ u32 pc; /* Address of a cell */
+ u32 usableSize; /* Usable size of the page */
+ u32 contentOffset; /* Offset to the start of the cell content area */
+ u32 *heap = 0; /* Min-heap used for checking cell coverage */
+ u32 x, prev = 0; /* Next and previous entry on the min-heap */
+ const char *saved_zPfx = pCheck->zPfx;
+ int saved_v1 = pCheck->v1;
+ int saved_v2 = pCheck->v2;
+ u8 savedIsInit = 0;
+
+ /* Check that the page exists
+ */
+ pBt = pCheck->pBt;
+ usableSize = pBt->usableSize;
+ if( iPage==0 ) return 0;
+ if( checkRef(pCheck, iPage) ) return 0;
+ pCheck->zPfx = "Page %u: ";
+ pCheck->v1 = iPage;
+ if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){
+ checkAppendMsg(pCheck,
+ "unable to get the page. error code=%d", rc);
+ goto end_of_check;
+ }
+
+ /* Clear MemPage.isInit to make sure the corruption detection code in
+ ** btreeInitPage() is executed. */
+ savedIsInit = pPage->isInit;
+ pPage->isInit = 0;
+ if( (rc = btreeInitPage(pPage))!=0 ){
+ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */
+ checkAppendMsg(pCheck,
+ "btreeInitPage() returns error code %d", rc);
+ goto end_of_check;
+ }
+ if( (rc = btreeComputeFreeSpace(pPage))!=0 ){
+ assert( rc==SQLITE_CORRUPT );
+ checkAppendMsg(pCheck, "free space corruption", rc);
+ goto end_of_check;
+ }
+ data = pPage->aData;
+ hdr = pPage->hdrOffset;
+
+ /* Set up for cell analysis */
+ pCheck->zPfx = "On tree page %u cell %d: ";
+ contentOffset = get2byteNotZero(&data[hdr+5]);
+ assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */
+
+ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
+ ** number of cells on the page. */
+ nCell = get2byte(&data[hdr+3]);
+ assert( pPage->nCell==nCell );
+
+ /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page
+ ** immediately follows the b-tree page header. */
+ cellStart = hdr + 12 - 4*pPage->leaf;
+ assert( pPage->aCellIdx==&data[cellStart] );
+ pCellIdx = &data[cellStart + 2*(nCell-1)];
+
+ if( !pPage->leaf ){
+ /* Analyze the right-child page of internal pages */
+ pgno = get4byte(&data[hdr+8]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ pCheck->zPfx = "On page %u at right child: ";
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
+ }
+#endif
+ depth = checkTreePage(pCheck, pgno, &maxKey, maxKey);
+ keyCanBeEqual = 0;
+ }else{
+ /* For leaf pages, the coverage check will occur in the same loop
+ ** as the other cell checks, so initialize the heap. */
+ heap = pCheck->heap;
+ heap[0] = 0;
+ }
+
+ /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte
+ ** integer offsets to the cell contents. */
+ for(i=nCell-1; i>=0 && pCheck->mxErr; i--){
+ CellInfo info;
+
+ /* Check cell size */
+ pCheck->v2 = i;
+ assert( pCellIdx==&data[cellStart + i*2] );
+ pc = get2byteAligned(pCellIdx);
+ pCellIdx -= 2;
+ if( pc<contentOffset || pc>usableSize-4 ){
+ checkAppendMsg(pCheck, "Offset %d out of range %d..%d",
+ pc, contentOffset, usableSize-4);
+ doCoverageCheck = 0;
+ continue;
+ }
+ pCell = &data[pc];
+ pPage->xParseCell(pPage, pCell, &info);
+ if( pc+info.nSize>usableSize ){
+ checkAppendMsg(pCheck, "Extends off end of page");
+ doCoverageCheck = 0;
+ continue;
+ }
+
+ /* Check for integer primary key out of range */
+ if( pPage->intKey ){
+ if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){
+ checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey);
+ }
+ maxKey = info.nKey;
+ keyCanBeEqual = 0; /* Only the first key on the page may ==maxKey */
+ }
+
+ /* Check the content overflow list */
+ if( info.nPayload>info.nLocal ){
+ u32 nPage; /* Number of pages on the overflow chain */
+ Pgno pgnoOvfl; /* First page of the overflow chain */
+ assert( pc + info.nSize - 4 <= usableSize );
+ nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4);
+ pgnoOvfl = get4byte(&pCell[info.nSize - 4]);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage);
+ }
+#endif
+ checkList(pCheck, 0, pgnoOvfl, nPage);
+ }
+
+ if( !pPage->leaf ){
+ /* Check sanity of left child page for internal pages */
+ pgno = get4byte(pCell);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
+ }
+#endif
+ d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey);
+ keyCanBeEqual = 0;
+ if( d2!=depth ){
+ checkAppendMsg(pCheck, "Child page depth differs");
+ depth = d2;
+ }
+ }else{
+ /* Populate the coverage-checking heap for leaf pages */
+ btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1));
+ }
+ }
+ *piMinKey = maxKey;
+
+ /* Check for complete coverage of the page
+ */
+ pCheck->zPfx = 0;
+ if( doCoverageCheck && pCheck->mxErr>0 ){
+ /* For leaf pages, the min-heap has already been initialized and the
+ ** cells have already been inserted. But for internal pages, that has
+ ** not yet been done, so do it now */
+ if( !pPage->leaf ){
+ heap = pCheck->heap;
+ heap[0] = 0;
+ for(i=nCell-1; i>=0; i--){
+ u32 size;
+ pc = get2byteAligned(&data[cellStart+i*2]);
+ size = pPage->xCellSize(pPage, &data[pc]);
+ btreeHeapInsert(heap, (pc<<16)|(pc+size-1));
+ }
+ }
+ /* Add the freeblocks to the min-heap
+ **
+ ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header
+ ** is the offset of the first freeblock, or zero if there are no
+ ** freeblocks on the page.
+ */
+ i = get2byte(&data[hdr+1]);
+ while( i>0 ){
+ int size, j;
+ assert( (u32)i<=usableSize-4 ); /* Enforced by btreeComputeFreeSpace() */
+ size = get2byte(&data[i+2]);
+ assert( (u32)(i+size)<=usableSize ); /* due to btreeComputeFreeSpace() */
+ btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1));
+ /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a
+ ** big-endian integer which is the offset in the b-tree page of the next
+ ** freeblock in the chain, or zero if the freeblock is the last on the
+ ** chain. */
+ j = get2byte(&data[i]);
+ /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of
+ ** increasing offset. */
+ assert( j==0 || j>i+size ); /* Enforced by btreeComputeFreeSpace() */
+ assert( (u32)j<=usableSize-4 ); /* Enforced by btreeComputeFreeSpace() */
+ i = j;
+ }
+ /* Analyze the min-heap looking for overlap between cells and/or
+ ** freeblocks, and counting the number of untracked bytes in nFrag.
+ **
+ ** Each min-heap entry is of the form: (start_address<<16)|end_address.
+ ** There is an implied first entry the covers the page header, the cell
+ ** pointer index, and the gap between the cell pointer index and the start
+ ** of cell content.
+ **
+ ** The loop below pulls entries from the min-heap in order and compares
+ ** the start_address against the previous end_address. If there is an
+ ** overlap, that means bytes are used multiple times. If there is a gap,
+ ** that gap is added to the fragmentation count.
+ */
+ nFrag = 0;
+ prev = contentOffset - 1; /* Implied first min-heap entry */
+ while( btreeHeapPull(heap,&x) ){
+ if( (prev&0xffff)>=(x>>16) ){
+ checkAppendMsg(pCheck,
+ "Multiple uses for byte %u of page %u", x>>16, iPage);
+ break;
+ }else{
+ nFrag += (x>>16) - (prev&0xffff) - 1;
+ prev = x;
+ }
+ }
+ nFrag += usableSize - (prev&0xffff) - 1;
+ /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments
+ ** is stored in the fifth field of the b-tree page header.
+ ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the
+ ** number of fragmented free bytes within the cell content area.
+ */
+ if( heap[0]==0 && nFrag!=data[hdr+7] ){
+ checkAppendMsg(pCheck,
+ "Fragmentation of %d bytes reported as %d on page %u",
+ nFrag, data[hdr+7], iPage);
+ }
+ }
+
+end_of_check:
+ if( !doCoverageCheck ) pPage->isInit = savedIsInit;
+ releasePage(pPage);
+ pCheck->zPfx = saved_zPfx;
+ pCheck->v1 = saved_v1;
+ pCheck->v2 = saved_v2;
+ return depth+1;
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/*
+** This routine does a complete check of the given BTree file. aRoot[] is
+** an array of pages numbers were each page number is the root page of
+** a table. nRoot is the number of entries in aRoot.
+**
+** A read-only or read-write transaction must be opened before calling
+** this function.
+**
+** Write the number of error seen in *pnErr. Except for some memory
+** allocation errors, an error message held in memory obtained from
+** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is
+** returned. If a memory allocation error occurs, NULL is returned.
+**
+** If the first entry in aRoot[] is 0, that indicates that the list of
+** root pages is incomplete. This is a "partial integrity-check". This
+** happens when performing an integrity check on a single table. The
+** zero is skipped, of course. But in addition, the freelist checks
+** and the checks to make sure every page is referenced are also skipped,
+** since obviously it is not possible to know which pages are covered by
+** the unverified btrees. Except, if aRoot[1] is 1, then the freelist
+** checks are still performed.
+*/
+SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
+ sqlite3 *db, /* Database connection that is running the check */
+ Btree *p, /* The btree to be checked */
+ Pgno *aRoot, /* An array of root pages numbers for individual trees */
+ int nRoot, /* Number of entries in aRoot[] */
+ int mxErr, /* Stop reporting errors after this many */
+ int *pnErr /* Write number of errors seen to this variable */
+){
+ Pgno i;
+ IntegrityCk sCheck;
+ BtShared *pBt = p->pBt;
+ u64 savedDbFlags = pBt->db->flags;
+ char zErr[100];
+ int bPartial = 0; /* True if not checking all btrees */
+ int bCkFreelist = 1; /* True to scan the freelist */
+ VVA_ONLY( int nRef );
+ assert( nRoot>0 );
+
+ /* aRoot[0]==0 means this is a partial check */
+ if( aRoot[0]==0 ){
+ assert( nRoot>1 );
+ bPartial = 1;
+ if( aRoot[1]!=1 ) bCkFreelist = 0;
+ }
+
+ sqlite3BtreeEnter(p);
+ assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE );
+ VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) );
+ assert( nRef>=0 );
+ sCheck.db = db;
+ sCheck.pBt = pBt;
+ sCheck.pPager = pBt->pPager;
+ sCheck.nPage = btreePagecount(sCheck.pBt);
+ sCheck.mxErr = mxErr;
+ sCheck.nErr = 0;
+ sCheck.bOomFault = 0;
+ sCheck.zPfx = 0;
+ sCheck.v1 = 0;
+ sCheck.v2 = 0;
+ sCheck.aPgRef = 0;
+ sCheck.heap = 0;
+ sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
+ sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL;
+ if( sCheck.nPage==0 ){
+ goto integrity_ck_cleanup;
+ }
+
+ sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
+ if( !sCheck.aPgRef ){
+ sCheck.bOomFault = 1;
+ goto integrity_ck_cleanup;
+ }
+ sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize );
+ if( sCheck.heap==0 ){
+ sCheck.bOomFault = 1;
+ goto integrity_ck_cleanup;
+ }
+
+ i = PENDING_BYTE_PAGE(pBt);
+ if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i);
+
+ /* Check the integrity of the freelist
+ */
+ if( bCkFreelist ){
+ sCheck.zPfx = "Main freelist: ";
+ checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]),
+ get4byte(&pBt->pPage1->aData[36]));
+ sCheck.zPfx = 0;
+ }
+
+ /* Check all the tables.
+ */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( !bPartial ){
+ if( pBt->autoVacuum ){
+ Pgno mx = 0;
+ Pgno mxInHdr;
+ for(i=0; (int)i<nRoot; i++) if( mx<aRoot[i] ) mx = aRoot[i];
+ mxInHdr = get4byte(&pBt->pPage1->aData[52]);
+ if( mx!=mxInHdr ){
+ checkAppendMsg(&sCheck,
+ "max rootpage (%d) disagrees with header (%d)",
+ mx, mxInHdr
+ );
+ }
+ }else if( get4byte(&pBt->pPage1->aData[64])!=0 ){
+ checkAppendMsg(&sCheck,
+ "incremental_vacuum enabled with a max rootpage of zero"
+ );
+ }
+ }
+#endif
+ testcase( pBt->db->flags & SQLITE_CellSizeCk );
+ pBt->db->flags &= ~(u64)SQLITE_CellSizeCk;
+ for(i=0; (int)i<nRoot && sCheck.mxErr; i++){
+ i64 notUsed;
+ if( aRoot[i]==0 ) continue;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){
+ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0);
+ }
+#endif
+ checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64);
+ }
+ pBt->db->flags = savedDbFlags;
+
+ /* Make sure every page in the file is referenced
+ */
+ if( !bPartial ){
+ for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){
+#ifdef SQLITE_OMIT_AUTOVACUUM
+ if( getPageReferenced(&sCheck, i)==0 ){
+ checkAppendMsg(&sCheck, "Page %d is never used", i);
+ }
+#else
+ /* If the database supports auto-vacuum, make sure no tables contain
+ ** references to pointer-map pages.
+ */
+ if( getPageReferenced(&sCheck, i)==0 &&
+ (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){
+ checkAppendMsg(&sCheck, "Page %d is never used", i);
+ }
+ if( getPageReferenced(&sCheck, i)!=0 &&
+ (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){
+ checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i);
+ }
+#endif
+ }
+ }
+
+ /* Clean up and report errors.
+ */
+integrity_ck_cleanup:
+ sqlite3PageFree(sCheck.heap);
+ sqlite3_free(sCheck.aPgRef);
+ if( sCheck.bOomFault ){
+ sqlite3_str_reset(&sCheck.errMsg);
+ sCheck.nErr++;
+ }
+ *pnErr = sCheck.nErr;
+ if( sCheck.nErr==0 ) sqlite3_str_reset(&sCheck.errMsg);
+ /* Make sure this analysis did not leave any unref() pages. */
+ assert( nRef==sqlite3PagerRefcount(pBt->pPager) );
+ sqlite3BtreeLeave(p);
+ return sqlite3StrAccumFinish(&sCheck.errMsg);
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+/*
+** Return the full pathname of the underlying database file. Return
+** an empty string if the database is in-memory or a TEMP database.
+**
+** The pager filename is invariant as long as the pager is
+** open so it is safe to access without the BtShared mutex.
+*/
+SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *p){
+ assert( p->pBt->pPager!=0 );
+ return sqlite3PagerFilename(p->pBt->pPager, 1);
+}
+
+/*
+** Return the pathname of the journal file for this database. The return
+** value of this routine is the same regardless of whether the journal file
+** has been created or not.
+**
+** The pager journal filename is invariant as long as the pager is
+** open so it is safe to access without the BtShared mutex.
+*/
+SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){
+ assert( p->pBt->pPager!=0 );
+ return sqlite3PagerJournalname(p->pBt->pPager);
+}
+
+/*
+** Return non-zero if a transaction is active.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){
+ assert( p==0 || sqlite3_mutex_held(p->db->mutex) );
+ return (p && (p->inTrans==TRANS_WRITE));
+}
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** Run a checkpoint on the Btree passed as the first argument.
+**
+** Return SQLITE_LOCKED if this or any other connection has an open
+** transaction on the shared-cache the argument Btree is connected to.
+**
+** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){
+ int rc = SQLITE_OK;
+ if( p ){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ if( pBt->inTransaction!=TRANS_NONE ){
+ rc = SQLITE_LOCKED;
+ }else{
+ rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt);
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+#endif
+
+/*
+** Return non-zero if a read (or write) transaction is active.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){
+ assert( p );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ return p->inTrans!=TRANS_NONE;
+}
+
+SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){
+ assert( p );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ return p->nBackup!=0;
+}
+
+/*
+** This function returns a pointer to a blob of memory associated with
+** a single shared-btree. The memory is used by client code for its own
+** purposes (for example, to store a high-level schema associated with
+** the shared-btree). The btree layer manages reference counting issues.
+**
+** The first time this is called on a shared-btree, nBytes bytes of memory
+** are allocated, zeroed, and returned to the caller. For each subsequent
+** call the nBytes parameter is ignored and a pointer to the same blob
+** of memory returned.
+**
+** If the nBytes parameter is 0 and the blob of memory has not yet been
+** allocated, a null pointer is returned. If the blob has already been
+** allocated, it is returned as normal.
+**
+** Just before the shared-btree is closed, the function passed as the
+** xFree argument when the memory allocation was made is invoked on the
+** blob of allocated memory. The xFree function should not call sqlite3_free()
+** on the memory, the btree layer does that.
+*/
+SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){
+ BtShared *pBt = p->pBt;
+ sqlite3BtreeEnter(p);
+ if( !pBt->pSchema && nBytes ){
+ pBt->pSchema = sqlite3DbMallocZero(0, nBytes);
+ pBt->xFreeSchema = xFree;
+ }
+ sqlite3BtreeLeave(p);
+ return pBt->pSchema;
+}
+
+/*
+** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared
+** btree as the argument handle holds an exclusive lock on the
+** sqlite_schema table. Otherwise SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){
+ int rc;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ rc = querySharedCacheTableLock(p, SCHEMA_ROOT, READ_LOCK);
+ assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE );
+ sqlite3BtreeLeave(p);
+ return rc;
+}
+
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** Obtain a lock on the table whose root page is iTab. The
+** lock is a write lock if isWritelock is true or a read lock
+** if it is false.
+*/
+SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){
+ int rc = SQLITE_OK;
+ assert( p->inTrans!=TRANS_NONE );
+ if( p->sharable ){
+ u8 lockType = READ_LOCK + isWriteLock;
+ assert( READ_LOCK+1==WRITE_LOCK );
+ assert( isWriteLock==0 || isWriteLock==1 );
+
+ sqlite3BtreeEnter(p);
+ rc = querySharedCacheTableLock(p, iTab, lockType);
+ if( rc==SQLITE_OK ){
+ rc = setSharedCacheTableLock(p, iTab, lockType);
+ }
+ sqlite3BtreeLeave(p);
+ }
+ return rc;
+}
+#endif
+
+#ifndef SQLITE_OMIT_INCRBLOB
+/*
+** Argument pCsr must be a cursor opened for writing on an
+** INTKEY table currently pointing at a valid table entry.
+** This function modifies the data stored as part of that entry.
+**
+** Only the data content may only be modified, it is not possible to
+** change the length of the data stored. If this function is called with
+** parameters that attempt to write past the end of the existing data,
+** no modifications are made and SQLITE_CORRUPT is returned.
+*/
+SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){
+ int rc;
+ assert( cursorOwnsBtShared(pCsr) );
+ assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) );
+ assert( pCsr->curFlags & BTCF_Incrblob );
+
+ rc = restoreCursorPosition(pCsr);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( pCsr->eState!=CURSOR_REQUIRESEEK );
+ if( pCsr->eState!=CURSOR_VALID ){
+ return SQLITE_ABORT;
+ }
+
+ /* Save the positions of all other cursors open on this table. This is
+ ** required in case any of them are holding references to an xFetch
+ ** version of the b-tree page modified by the accessPayload call below.
+ **
+ ** Note that pCsr must be open on a INTKEY table and saveCursorPosition()
+ ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence
+ ** saveAllCursors can only return SQLITE_OK.
+ */
+ VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr);
+ assert( rc==SQLITE_OK );
+
+ /* Check some assumptions:
+ ** (a) the cursor is open for writing,
+ ** (b) there is a read/write transaction open,
+ ** (c) the connection holds a write-lock on the table (if required),
+ ** (d) there are no conflicting read-locks, and
+ ** (e) the cursor points at a valid row of an intKey table.
+ */
+ if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){
+ return SQLITE_READONLY;
+ }
+ assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0
+ && pCsr->pBt->inTransaction==TRANS_WRITE );
+ assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) );
+ assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) );
+ assert( pCsr->pPage->intKey );
+
+ return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1);
+}
+
+/*
+** Mark this cursor as an incremental blob cursor.
+*/
+SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){
+ pCur->curFlags |= BTCF_Incrblob;
+ pCur->pBtree->hasIncrblobCur = 1;
+}
+#endif
+
+/*
+** Set both the "read version" (single byte at byte offset 18) and
+** "write version" (single byte at byte offset 19) fields in the database
+** header to iVersion.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){
+ BtShared *pBt = pBtree->pBt;
+ int rc; /* Return code */
+
+ assert( iVersion==1 || iVersion==2 );
+
+ /* If setting the version fields to 1, do not automatically open the
+ ** WAL connection, even if the version fields are currently set to 2.
+ */
+ pBt->btsFlags &= ~BTS_NO_WAL;
+ if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL;
+
+ rc = sqlite3BtreeBeginTrans(pBtree, 0, 0);
+ if( rc==SQLITE_OK ){
+ u8 *aData = pBt->pPage1->aData;
+ if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){
+ rc = sqlite3BtreeBeginTrans(pBtree, 2, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
+ if( rc==SQLITE_OK ){
+ aData[18] = (u8)iVersion;
+ aData[19] = (u8)iVersion;
+ }
+ }
+ }
+ }
+
+ pBt->btsFlags &= ~BTS_NO_WAL;
+ return rc;
+}
+
+/*
+** Return true if the cursor has a hint specified. This routine is
+** only used from within assert() statements
+*/
+SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){
+ return (pCsr->hints & mask)!=0;
+}
+
+/*
+** Return true if the given Btree is read-only.
+*/
+SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){
+ return (p->pBt->btsFlags & BTS_READ_ONLY)!=0;
+}
+
+/*
+** Return the size of the header added to each page by this module.
+*/
+SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); }
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE)
+/*
+** Return true if the Btree passed as the only argument is sharable.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){
+ return p->sharable;
+}
+
+/*
+** Return the number of connections to the BtShared object accessed by
+** the Btree handle passed as the only argument. For private caches
+** this is always 1. For shared caches it may be 1 or greater.
+*/
+SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree *p){
+ testcase( p->sharable );
+ return p->pBt->nRef;
+}
+#endif
+
+/************** End of btree.c ***********************************************/
+/************** Begin file backup.c ******************************************/
+/*
+** 2009 January 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the implementation of the sqlite3_backup_XXX()
+** API functions and the related features.
+*/
+/* #include "sqliteInt.h" */
+/* #include "btreeInt.h" */
+
+/*
+** Structure allocated for each backup operation.
+*/
+struct sqlite3_backup {
+ sqlite3* pDestDb; /* Destination database handle */
+ Btree *pDest; /* Destination b-tree file */
+ u32 iDestSchema; /* Original schema cookie in destination */
+ int bDestLocked; /* True once a write-transaction is open on pDest */
+
+ Pgno iNext; /* Page number of the next source page to copy */
+ sqlite3* pSrcDb; /* Source database handle */
+ Btree *pSrc; /* Source b-tree file */
+
+ int rc; /* Backup process error code */
+
+ /* These two variables are set by every call to backup_step(). They are
+ ** read by calls to backup_remaining() and backup_pagecount().
+ */
+ Pgno nRemaining; /* Number of pages left to copy */
+ Pgno nPagecount; /* Total number of pages to copy */
+
+ int isAttached; /* True once backup has been registered with pager */
+ sqlite3_backup *pNext; /* Next backup associated with source pager */
+};
+
+/*
+** THREAD SAFETY NOTES:
+**
+** Once it has been created using backup_init(), a single sqlite3_backup
+** structure may be accessed via two groups of thread-safe entry points:
+**
+** * Via the sqlite3_backup_XXX() API function backup_step() and
+** backup_finish(). Both these functions obtain the source database
+** handle mutex and the mutex associated with the source BtShared
+** structure, in that order.
+**
+** * Via the BackupUpdate() and BackupRestart() functions, which are
+** invoked by the pager layer to report various state changes in
+** the page cache associated with the source database. The mutex
+** associated with the source database BtShared structure will always
+** be held when either of these functions are invoked.
+**
+** The other sqlite3_backup_XXX() API functions, backup_remaining() and
+** backup_pagecount() are not thread-safe functions. If they are called
+** while some other thread is calling backup_step() or backup_finish(),
+** the values returned may be invalid. There is no way for a call to
+** BackupUpdate() or BackupRestart() to interfere with backup_remaining()
+** or backup_pagecount().
+**
+** Depending on the SQLite configuration, the database handles and/or
+** the Btree objects may have their own mutexes that require locking.
+** Non-sharable Btrees (in-memory databases for example), do not have
+** associated mutexes.
+*/
+
+/*
+** Return a pointer corresponding to database zDb (i.e. "main", "temp")
+** in connection handle pDb. If such a database cannot be found, return
+** a NULL pointer and write an error message to pErrorDb.
+**
+** If the "temp" database is requested, it may need to be opened by this
+** function. If an error occurs while doing so, return 0 and write an
+** error message to pErrorDb.
+*/
+static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){
+ int i = sqlite3FindDbName(pDb, zDb);
+
+ if( i==1 ){
+ Parse sParse;
+ int rc = 0;
+ memset(&sParse, 0, sizeof(sParse));
+ sParse.db = pDb;
+ if( sqlite3OpenTempDatabase(&sParse) ){
+ sqlite3ErrorWithMsg(pErrorDb, sParse.rc, "%s", sParse.zErrMsg);
+ rc = SQLITE_ERROR;
+ }
+ sqlite3DbFree(pErrorDb, sParse.zErrMsg);
+ sqlite3ParserReset(&sParse);
+ if( rc ){
+ return 0;
+ }
+ }
+
+ if( i<0 ){
+ sqlite3ErrorWithMsg(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb);
+ return 0;
+ }
+
+ return pDb->aDb[i].pBt;
+}
+
+/*
+** Attempt to set the page size of the destination to match the page size
+** of the source.
+*/
+static int setDestPgsz(sqlite3_backup *p){
+ int rc;
+ rc = sqlite3BtreeSetPageSize(p->pDest,sqlite3BtreeGetPageSize(p->pSrc),0,0);
+ return rc;
+}
+
+/*
+** Check that there is no open read-transaction on the b-tree passed as the
+** second argument. If there is not, return SQLITE_OK. Otherwise, if there
+** is an open read-transaction, return SQLITE_ERROR and leave an error
+** message in database handle db.
+*/
+static int checkReadTransaction(sqlite3 *db, Btree *p){
+ if( sqlite3BtreeIsInReadTrans(p) ){
+ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use");
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Create an sqlite3_backup process to copy the contents of zSrcDb from
+** connection handle pSrcDb to zDestDb in pDestDb. If successful, return
+** a pointer to the new sqlite3_backup object.
+**
+** If an error occurs, NULL is returned and an error code and error message
+** stored in database handle pDestDb.
+*/
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
+ sqlite3* pDestDb, /* Database to write to */
+ const char *zDestDb, /* Name of database within pDestDb */
+ sqlite3* pSrcDb, /* Database connection to read from */
+ const char *zSrcDb /* Name of database within pSrcDb */
+){
+ sqlite3_backup *p; /* Value to return */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(pSrcDb)||!sqlite3SafetyCheckOk(pDestDb) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+
+ /* Lock the source database handle. The destination database
+ ** handle is not locked in this routine, but it is locked in
+ ** sqlite3_backup_step(). The user is required to ensure that no
+ ** other thread accesses the destination handle for the duration
+ ** of the backup operation. Any attempt to use the destination
+ ** database connection while a backup is in progress may cause
+ ** a malfunction or a deadlock.
+ */
+ sqlite3_mutex_enter(pSrcDb->mutex);
+ sqlite3_mutex_enter(pDestDb->mutex);
+
+ if( pSrcDb==pDestDb ){
+ sqlite3ErrorWithMsg(
+ pDestDb, SQLITE_ERROR, "source and destination must be distinct"
+ );
+ p = 0;
+ }else {
+ /* Allocate space for a new sqlite3_backup object...
+ ** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a
+ ** call to sqlite3_backup_init() and is destroyed by a call to
+ ** sqlite3_backup_finish(). */
+ p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup));
+ if( !p ){
+ sqlite3Error(pDestDb, SQLITE_NOMEM_BKPT);
+ }
+ }
+
+ /* If the allocation succeeded, populate the new object. */
+ if( p ){
+ p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb);
+ p->pDest = findBtree(pDestDb, pDestDb, zDestDb);
+ p->pDestDb = pDestDb;
+ p->pSrcDb = pSrcDb;
+ p->iNext = 1;
+ p->isAttached = 0;
+
+ if( 0==p->pSrc || 0==p->pDest
+ || checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK
+ ){
+ /* One (or both) of the named databases did not exist or an OOM
+ ** error was hit. Or there is a transaction open on the destination
+ ** database. The error has already been written into the pDestDb
+ ** handle. All that is left to do here is free the sqlite3_backup
+ ** structure. */
+ sqlite3_free(p);
+ p = 0;
+ }
+ }
+ if( p ){
+ p->pSrc->nBackup++;
+ }
+
+ sqlite3_mutex_leave(pDestDb->mutex);
+ sqlite3_mutex_leave(pSrcDb->mutex);
+ return p;
+}
+
+/*
+** Argument rc is an SQLite error code. Return true if this error is
+** considered fatal if encountered during a backup operation. All errors
+** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED.
+*/
+static int isFatalError(int rc){
+ return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED));
+}
+
+/*
+** Parameter zSrcData points to a buffer containing the data for
+** page iSrcPg from the source database. Copy this data into the
+** destination database.
+*/
+static int backupOnePage(
+ sqlite3_backup *p, /* Backup handle */
+ Pgno iSrcPg, /* Source database page to backup */
+ const u8 *zSrcData, /* Source database page data */
+ int bUpdate /* True for an update, false otherwise */
+){
+ Pager * const pDestPager = sqlite3BtreePager(p->pDest);
+ const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc);
+ int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest);
+ const int nCopy = MIN(nSrcPgsz, nDestPgsz);
+ const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz;
+ int rc = SQLITE_OK;
+ i64 iOff;
+
+ assert( sqlite3BtreeGetReserveNoMutex(p->pSrc)>=0 );
+ assert( p->bDestLocked );
+ assert( !isFatalError(p->rc) );
+ assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) );
+ assert( zSrcData );
+
+ /* Catch the case where the destination is an in-memory database and the
+ ** page sizes of the source and destination differ.
+ */
+ if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){
+ rc = SQLITE_READONLY;
+ }
+
+ /* This loop runs once for each destination page spanned by the source
+ ** page. For each iteration, variable iOff is set to the byte offset
+ ** of the destination page.
+ */
+ for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOff<iEnd; iOff+=nDestPgsz){
+ DbPage *pDestPg = 0;
+ Pgno iDest = (Pgno)(iOff/nDestPgsz)+1;
+ if( iDest==PENDING_BYTE_PAGE(p->pDest->pBt) ) continue;
+ if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg, 0))
+ && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg))
+ ){
+ const u8 *zIn = &zSrcData[iOff%nSrcPgsz];
+ u8 *zDestData = sqlite3PagerGetData(pDestPg);
+ u8 *zOut = &zDestData[iOff%nDestPgsz];
+
+ /* Copy the data from the source page into the destination page.
+ ** Then clear the Btree layer MemPage.isInit flag. Both this module
+ ** and the pager code use this trick (clearing the first byte
+ ** of the page 'extra' space to invalidate the Btree layers
+ ** cached parse of the page). MemPage.isInit is marked
+ ** "MUST BE FIRST" for this purpose.
+ */
+ memcpy(zOut, zIn, nCopy);
+ ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0;
+ if( iOff==0 && bUpdate==0 ){
+ sqlite3Put4byte(&zOut[28], sqlite3BtreeLastPage(p->pSrc));
+ }
+ }
+ sqlite3PagerUnref(pDestPg);
+ }
+
+ return rc;
+}
+
+/*
+** If pFile is currently larger than iSize bytes, then truncate it to
+** exactly iSize bytes. If pFile is not larger than iSize bytes, then
+** this function is a no-op.
+**
+** Return SQLITE_OK if everything is successful, or an SQLite error
+** code if an error occurs.
+*/
+static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){
+ i64 iCurrent;
+ int rc = sqlite3OsFileSize(pFile, &iCurrent);
+ if( rc==SQLITE_OK && iCurrent>iSize ){
+ rc = sqlite3OsTruncate(pFile, iSize);
+ }
+ return rc;
+}
+
+/*
+** Register this backup object with the associated source pager for
+** callbacks when pages are changed or the cache invalidated.
+*/
+static void attachBackupObject(sqlite3_backup *p){
+ sqlite3_backup **pp;
+ assert( sqlite3BtreeHoldsMutex(p->pSrc) );
+ pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc));
+ p->pNext = *pp;
+ *pp = p;
+ p->isAttached = 1;
+}
+
+/*
+** Copy nPage pages from the source b-tree to the destination.
+*/
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
+ int rc;
+ int destMode; /* Destination journal mode */
+ int pgszSrc = 0; /* Source page size */
+ int pgszDest = 0; /* Destination page size */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(p->pSrcDb->mutex);
+ sqlite3BtreeEnter(p->pSrc);
+ if( p->pDestDb ){
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ }
+
+ rc = p->rc;
+ if( !isFatalError(rc) ){
+ Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */
+ Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */
+ int ii; /* Iterator variable */
+ int nSrcPage = -1; /* Size of source db in pages */
+ int bCloseTrans = 0; /* True if src db requires unlocking */
+
+ /* If the source pager is currently in a write-transaction, return
+ ** SQLITE_BUSY immediately.
+ */
+ if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){
+ rc = SQLITE_BUSY;
+ }else{
+ rc = SQLITE_OK;
+ }
+
+ /* If there is no open read-transaction on the source database, open
+ ** one now. If a transaction is opened here, then it will be closed
+ ** before this function exits.
+ */
+ if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){
+ rc = sqlite3BtreeBeginTrans(p->pSrc, 0, 0);
+ bCloseTrans = 1;
+ }
+
+ /* If the destination database has not yet been locked (i.e. if this
+ ** is the first call to backup_step() for the current backup operation),
+ ** try to set its page size to the same as the source database. This
+ ** is especially important on ZipVFS systems, as in that case it is
+ ** not possible to create a database file that uses one page size by
+ ** writing to it with another. */
+ if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){
+ rc = SQLITE_NOMEM;
+ }
+
+ /* Lock the destination database, if it is not locked already. */
+ if( SQLITE_OK==rc && p->bDestLocked==0
+ && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2,
+ (int*)&p->iDestSchema))
+ ){
+ p->bDestLocked = 1;
+ }
+
+ /* Do not allow backup if the destination database is in WAL mode
+ ** and the page sizes are different between source and destination */
+ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc);
+ pgszDest = sqlite3BtreeGetPageSize(p->pDest);
+ destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest));
+ if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){
+ rc = SQLITE_READONLY;
+ }
+
+ /* Now that there is a read-lock on the source database, query the
+ ** source pager for the number of pages in the database.
+ */
+ nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc);
+ assert( nSrcPage>=0 );
+ for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){
+ const Pgno iSrcPg = p->iNext; /* Source page number */
+ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){
+ DbPage *pSrcPg; /* Source page object */
+ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg,PAGER_GET_READONLY);
+ if( rc==SQLITE_OK ){
+ rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0);
+ sqlite3PagerUnref(pSrcPg);
+ }
+ }
+ p->iNext++;
+ }
+ if( rc==SQLITE_OK ){
+ p->nPagecount = nSrcPage;
+ p->nRemaining = nSrcPage+1-p->iNext;
+ if( p->iNext>(Pgno)nSrcPage ){
+ rc = SQLITE_DONE;
+ }else if( !p->isAttached ){
+ attachBackupObject(p);
+ }
+ }
+
+ /* Update the schema version field in the destination database. This
+ ** is to make sure that the schema-version really does change in
+ ** the case where the source and destination databases have the
+ ** same schema version.
+ */
+ if( rc==SQLITE_DONE ){
+ if( nSrcPage==0 ){
+ rc = sqlite3BtreeNewDb(p->pDest);
+ nSrcPage = 1;
+ }
+ if( rc==SQLITE_OK || rc==SQLITE_DONE ){
+ rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1);
+ }
+ if( rc==SQLITE_OK ){
+ if( p->pDestDb ){
+ sqlite3ResetAllSchemasOfConnection(p->pDestDb);
+ }
+ if( destMode==PAGER_JOURNALMODE_WAL ){
+ rc = sqlite3BtreeSetVersion(p->pDest, 2);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ int nDestTruncate;
+ /* Set nDestTruncate to the final number of pages in the destination
+ ** database. The complication here is that the destination page
+ ** size may be different to the source page size.
+ **
+ ** If the source page size is smaller than the destination page size,
+ ** round up. In this case the call to sqlite3OsTruncate() below will
+ ** fix the size of the file. However it is important to call
+ ** sqlite3PagerTruncateImage() here so that any pages in the
+ ** destination file that lie beyond the nDestTruncate page mark are
+ ** journalled by PagerCommitPhaseOne() before they are destroyed
+ ** by the file truncation.
+ */
+ assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) );
+ assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) );
+ if( pgszSrc<pgszDest ){
+ int ratio = pgszDest/pgszSrc;
+ nDestTruncate = (nSrcPage+ratio-1)/ratio;
+ if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){
+ nDestTruncate--;
+ }
+ }else{
+ nDestTruncate = nSrcPage * (pgszSrc/pgszDest);
+ }
+ assert( nDestTruncate>0 );
+
+ if( pgszSrc<pgszDest ){
+ /* If the source page-size is smaller than the destination page-size,
+ ** two extra things may need to happen:
+ **
+ ** * The destination may need to be truncated, and
+ **
+ ** * Data stored on the pages immediately following the
+ ** pending-byte page in the source database may need to be
+ ** copied into the destination database.
+ */
+ const i64 iSize = (i64)pgszSrc * (i64)nSrcPage;
+ sqlite3_file * const pFile = sqlite3PagerFile(pDestPager);
+ Pgno iPg;
+ int nDstPage;
+ i64 iOff;
+ i64 iEnd;
+
+ assert( pFile );
+ assert( nDestTruncate==0
+ || (i64)nDestTruncate*(i64)pgszDest >= iSize || (
+ nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1)
+ && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest
+ ));
+
+ /* This block ensures that all data required to recreate the original
+ ** database has been stored in the journal for pDestPager and the
+ ** journal synced to disk. So at this point we may safely modify
+ ** the database file in any way, knowing that if a power failure
+ ** occurs, the original database will be reconstructed from the
+ ** journal file. */
+ sqlite3PagerPagecount(pDestPager, &nDstPage);
+ for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){
+ if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){
+ DbPage *pPg;
+ rc = sqlite3PagerGet(pDestPager, iPg, &pPg, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pPg);
+ sqlite3PagerUnref(pPg);
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1);
+ }
+
+ /* Write the extra pages and truncate the database file as required */
+ iEnd = MIN(PENDING_BYTE + pgszDest, iSize);
+ for(
+ iOff=PENDING_BYTE+pgszSrc;
+ rc==SQLITE_OK && iOff<iEnd;
+ iOff+=pgszSrc
+ ){
+ PgHdr *pSrcPg = 0;
+ const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1);
+ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg, 0);
+ if( rc==SQLITE_OK ){
+ u8 *zData = sqlite3PagerGetData(pSrcPg);
+ rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff);
+ }
+ sqlite3PagerUnref(pSrcPg);
+ }
+ if( rc==SQLITE_OK ){
+ rc = backupTruncateFile(pFile, iSize);
+ }
+
+ /* Sync the database file to disk. */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSync(pDestPager, 0);
+ }
+ }else{
+ sqlite3PagerTruncateImage(pDestPager, nDestTruncate);
+ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0);
+ }
+
+ /* Finish committing the transaction to the destination database. */
+ if( SQLITE_OK==rc
+ && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest, 0))
+ ){
+ rc = SQLITE_DONE;
+ }
+ }
+ }
+
+ /* If bCloseTrans is true, then this function opened a read transaction
+ ** on the source database. Close the read transaction here. There is
+ ** no need to check the return values of the btree methods here, as
+ ** "committing" a read-only transaction cannot fail.
+ */
+ if( bCloseTrans ){
+ TESTONLY( int rc2 );
+ TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0);
+ TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0);
+ assert( rc2==SQLITE_OK );
+ }
+
+ if( rc==SQLITE_IOERR_NOMEM ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ p->rc = rc;
+ }
+ if( p->pDestDb ){
+ sqlite3_mutex_leave(p->pDestDb->mutex);
+ }
+ sqlite3BtreeLeave(p->pSrc);
+ sqlite3_mutex_leave(p->pSrcDb->mutex);
+ return rc;
+}
+
+/*
+** Release all resources associated with an sqlite3_backup* handle.
+*/
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){
+ sqlite3_backup **pp; /* Ptr to head of pagers backup list */
+ sqlite3 *pSrcDb; /* Source database connection */
+ int rc; /* Value to return */
+
+ /* Enter the mutexes */
+ if( p==0 ) return SQLITE_OK;
+ pSrcDb = p->pSrcDb;
+ sqlite3_mutex_enter(pSrcDb->mutex);
+ sqlite3BtreeEnter(p->pSrc);
+ if( p->pDestDb ){
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ }
+
+ /* Detach this backup from the source pager. */
+ if( p->pDestDb ){
+ p->pSrc->nBackup--;
+ }
+ if( p->isAttached ){
+ pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc));
+ assert( pp!=0 );
+ while( *pp!=p ){
+ pp = &(*pp)->pNext;
+ assert( pp!=0 );
+ }
+ *pp = p->pNext;
+ }
+
+ /* If a transaction is still open on the Btree, roll it back. */
+ sqlite3BtreeRollback(p->pDest, SQLITE_OK, 0);
+
+ /* Set the error code of the destination database handle. */
+ rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc;
+ if( p->pDestDb ){
+ sqlite3Error(p->pDestDb, rc);
+
+ /* Exit the mutexes and free the backup context structure. */
+ sqlite3LeaveMutexAndCloseZombie(p->pDestDb);
+ }
+ sqlite3BtreeLeave(p->pSrc);
+ if( p->pDestDb ){
+ /* EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a
+ ** call to sqlite3_backup_init() and is destroyed by a call to
+ ** sqlite3_backup_finish(). */
+ sqlite3_free(p);
+ }
+ sqlite3LeaveMutexAndCloseZombie(pSrcDb);
+ return rc;
+}
+
+/*
+** Return the number of pages still to be backed up as of the most recent
+** call to sqlite3_backup_step().
+*/
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return p->nRemaining;
+}
+
+/*
+** Return the total number of pages in the source database as of the most
+** recent call to sqlite3_backup_step().
+*/
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return p->nPagecount;
+}
+
+/*
+** This function is called after the contents of page iPage of the
+** source database have been modified. If page iPage has already been
+** copied into the destination database, then the data written to the
+** destination is now invalidated. The destination copy of iPage needs
+** to be updated with the new data before the backup operation is
+** complete.
+**
+** It is assumed that the mutex associated with the BtShared object
+** corresponding to the source database is held when this function is
+** called.
+*/
+static SQLITE_NOINLINE void backupUpdate(
+ sqlite3_backup *p,
+ Pgno iPage,
+ const u8 *aData
+){
+ assert( p!=0 );
+ do{
+ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) );
+ if( !isFatalError(p->rc) && iPage<p->iNext ){
+ /* The backup process p has already copied page iPage. But now it
+ ** has been modified by a transaction on the source pager. Copy
+ ** the new data into the backup.
+ */
+ int rc;
+ assert( p->pDestDb );
+ sqlite3_mutex_enter(p->pDestDb->mutex);
+ rc = backupOnePage(p, iPage, aData, 1);
+ sqlite3_mutex_leave(p->pDestDb->mutex);
+ assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED );
+ if( rc!=SQLITE_OK ){
+ p->rc = rc;
+ }
+ }
+ }while( (p = p->pNext)!=0 );
+}
+SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){
+ if( pBackup ) backupUpdate(pBackup, iPage, aData);
+}
+
+/*
+** Restart the backup process. This is called when the pager layer
+** detects that the database has been modified by an external database
+** connection. In this case there is no way of knowing which of the
+** pages that have been copied into the destination database are still
+** valid and which are not, so the entire process needs to be restarted.
+**
+** It is assumed that the mutex associated with the BtShared object
+** corresponding to the source database is held when this function is
+** called.
+*/
+SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *pBackup){
+ sqlite3_backup *p; /* Iterator variable */
+ for(p=pBackup; p; p=p->pNext){
+ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) );
+ p->iNext = 1;
+ }
+}
+
+#ifndef SQLITE_OMIT_VACUUM
+/*
+** Copy the complete content of pBtFrom into pBtTo. A transaction
+** must be active for both files.
+**
+** The size of file pTo may be reduced by this operation. If anything
+** goes wrong, the transaction on pTo is rolled back. If successful, the
+** transaction is committed before returning.
+*/
+SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){
+ int rc;
+ sqlite3_file *pFd; /* File descriptor for database pTo */
+ sqlite3_backup b;
+ sqlite3BtreeEnter(pTo);
+ sqlite3BtreeEnter(pFrom);
+
+ assert( sqlite3BtreeIsInTrans(pTo) );
+ pFd = sqlite3PagerFile(sqlite3BtreePager(pTo));
+ if( pFd->pMethods ){
+ i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom);
+ rc = sqlite3OsFileControl(pFd, SQLITE_FCNTL_OVERWRITE, &nByte);
+ if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
+ if( rc ) goto copy_finished;
+ }
+
+ /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set
+ ** to 0. This is used by the implementations of sqlite3_backup_step()
+ ** and sqlite3_backup_finish() to detect that they are being called
+ ** from this function, not directly by the user.
+ */
+ memset(&b, 0, sizeof(b));
+ b.pSrcDb = pFrom->db;
+ b.pSrc = pFrom;
+ b.pDest = pTo;
+ b.iNext = 1;
+
+ /* 0x7FFFFFFF is the hard limit for the number of pages in a database
+ ** file. By passing this as the number of pages to copy to
+ ** sqlite3_backup_step(), we can guarantee that the copy finishes
+ ** within a single call (unless an error occurs). The assert() statement
+ ** checks this assumption - (p->rc) should be set to either SQLITE_DONE
+ ** or an error code. */
+ sqlite3_backup_step(&b, 0x7FFFFFFF);
+ assert( b.rc!=SQLITE_OK );
+
+ rc = sqlite3_backup_finish(&b);
+ if( rc==SQLITE_OK ){
+ pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED;
+ }else{
+ sqlite3PagerClearCache(sqlite3BtreePager(b.pDest));
+ }
+
+ assert( sqlite3BtreeIsInTrans(pTo)==0 );
+copy_finished:
+ sqlite3BtreeLeave(pFrom);
+ sqlite3BtreeLeave(pTo);
+ return rc;
+}
+#endif /* SQLITE_OMIT_VACUUM */
+
+/************** End of backup.c **********************************************/
+/************** Begin file vdbemem.c *****************************************/
+/*
+** 2004 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code use to manipulate "Mem" structure. A "Mem"
+** stores a single value in the VDBE. Mem is an opaque structure visible
+** only within the VDBE. Interface routines refer to a Mem using the
+** name sqlite_value
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+/* True if X is a power of two. 0 is considered a power of two here.
+** In other words, return true if X has at most one bit set.
+*/
+#define ISPOWEROF2(X) (((X)&((X)-1))==0)
+
+#ifdef SQLITE_DEBUG
+/*
+** Check invariants on a Mem object.
+**
+** This routine is intended for use inside of assert() statements, like
+** this: assert( sqlite3VdbeCheckMemInvariants(pMem) );
+*/
+SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){
+ /* If MEM_Dyn is set then Mem.xDel!=0.
+ ** Mem.xDel might not be initialized if MEM_Dyn is clear.
+ */
+ assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 );
+
+ /* MEM_Dyn may only be set if Mem.szMalloc==0. In this way we
+ ** ensure that if Mem.szMalloc>0 then it is safe to do
+ ** Mem.z = Mem.zMalloc without having to check Mem.flags&MEM_Dyn.
+ ** That saves a few cycles in inner loops. */
+ assert( (p->flags & MEM_Dyn)==0 || p->szMalloc==0 );
+
+ /* Cannot have more than one of MEM_Int, MEM_Real, or MEM_IntReal */
+ assert( ISPOWEROF2(p->flags & (MEM_Int|MEM_Real|MEM_IntReal)) );
+
+ if( p->flags & MEM_Null ){
+ /* Cannot be both MEM_Null and some other type */
+ assert( (p->flags & (MEM_Int|MEM_Real|MEM_Str|MEM_Blob|MEM_Agg))==0 );
+
+ /* If MEM_Null is set, then either the value is a pure NULL (the usual
+ ** case) or it is a pointer set using sqlite3_bind_pointer() or
+ ** sqlite3_result_pointer(). If a pointer, then MEM_Term must also be
+ ** set.
+ */
+ if( (p->flags & (MEM_Term|MEM_Subtype))==(MEM_Term|MEM_Subtype) ){
+ /* This is a pointer type. There may be a flag to indicate what to
+ ** do with the pointer. */
+ assert( ((p->flags&MEM_Dyn)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Ephem)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Static)!=0 ? 1 : 0) <= 1 );
+
+ /* No other bits set */
+ assert( (p->flags & ~(MEM_Null|MEM_Term|MEM_Subtype|MEM_FromBind
+ |MEM_Dyn|MEM_Ephem|MEM_Static))==0 );
+ }else{
+ /* A pure NULL might have other flags, such as MEM_Static, MEM_Dyn,
+ ** MEM_Ephem, MEM_Cleared, or MEM_Subtype */
+ }
+ }else{
+ /* The MEM_Cleared bit is only allowed on NULLs */
+ assert( (p->flags & MEM_Cleared)==0 );
+ }
+
+ /* The szMalloc field holds the correct memory allocation size */
+ assert( p->szMalloc==0
+ || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) );
+
+ /* If p holds a string or blob, the Mem.z must point to exactly
+ ** one of the following:
+ **
+ ** (1) Memory in Mem.zMalloc and managed by the Mem object
+ ** (2) Memory to be freed using Mem.xDel
+ ** (3) An ephemeral string or blob
+ ** (4) A static string or blob
+ */
+ if( (p->flags & (MEM_Str|MEM_Blob)) && p->n>0 ){
+ assert(
+ ((p->szMalloc>0 && p->z==p->zMalloc)? 1 : 0) +
+ ((p->flags&MEM_Dyn)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Ephem)!=0 ? 1 : 0) +
+ ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1
+ );
+ }
+ return 1;
+}
+#endif
+
+/*
+** Render a Mem object which is one of MEM_Int, MEM_Real, or MEM_IntReal
+** into a buffer.
+*/
+static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){
+ StrAccum acc;
+ assert( p->flags & (MEM_Int|MEM_Real|MEM_IntReal) );
+ assert( sz>22 );
+ if( p->flags & MEM_Int ){
+#if GCC_VERSION>=7000000
+ /* Work-around for GCC bug
+ ** https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96270 */
+ i64 x;
+ assert( (p->flags&MEM_Int)*2==sizeof(x) );
+ memcpy(&x, (char*)&p->u, (p->flags&MEM_Int)*2);
+ sqlite3Int64ToText(x, zBuf);
+#else
+ sqlite3Int64ToText(p->u.i, zBuf);
+#endif
+ }else{
+ sqlite3StrAccumInit(&acc, 0, zBuf, sz, 0);
+ sqlite3_str_appendf(&acc, "%!.15g",
+ (p->flags & MEM_IntReal)!=0 ? (double)p->u.i : p->u.r);
+ assert( acc.zText==zBuf && acc.mxAlloc<=0 );
+ zBuf[acc.nChar] = 0; /* Fast version of sqlite3StrAccumFinish(&acc) */
+ }
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Validity checks on pMem. pMem holds a string.
+**
+** (1) Check that string value of pMem agrees with its integer or real value.
+** (2) Check that the string is correctly zero terminated
+**
+** A single int or real value always converts to the same strings. But
+** many different strings can be converted into the same int or real.
+** If a table contains a numeric value and an index is based on the
+** corresponding string value, then it is important that the string be
+** derived from the numeric value, not the other way around, to ensure
+** that the index and table are consistent. See ticket
+** https://www.sqlite.org/src/info/343634942dd54ab (2018-01-31) for
+** an example.
+**
+** This routine looks at pMem to verify that if it has both a numeric
+** representation and a string representation then the string rep has
+** been derived from the numeric and not the other way around. It returns
+** true if everything is ok and false if there is a problem.
+**
+** This routine is for use inside of assert() statements only.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){
+ char zBuf[100];
+ char *z;
+ int i, j, incr;
+ if( (p->flags & MEM_Str)==0 ) return 1;
+ if( p->flags & MEM_Term ){
+ /* Insure that the string is properly zero-terminated. Pay particular
+ ** attention to the case where p->n is odd */
+ if( p->szMalloc>0 && p->z==p->zMalloc ){
+ assert( p->enc==SQLITE_UTF8 || p->szMalloc >= ((p->n+1)&~1)+2 );
+ assert( p->enc!=SQLITE_UTF8 || p->szMalloc >= p->n+1 );
+ }
+ assert( p->z[p->n]==0 );
+ assert( p->enc==SQLITE_UTF8 || p->z[(p->n+1)&~1]==0 );
+ assert( p->enc==SQLITE_UTF8 || p->z[((p->n+1)&~1)+1]==0 );
+ }
+ if( (p->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 ) return 1;
+ vdbeMemRenderNum(sizeof(zBuf), zBuf, p);
+ z = p->z;
+ i = j = 0;
+ incr = 1;
+ if( p->enc!=SQLITE_UTF8 ){
+ incr = 2;
+ if( p->enc==SQLITE_UTF16BE ) z++;
+ }
+ while( zBuf[j] ){
+ if( zBuf[j++]!=z[i] ) return 0;
+ i += incr;
+ }
+ return 1;
+}
+#endif /* SQLITE_DEBUG */
+
+/*
+** If pMem is an object with a valid string representation, this routine
+** ensures the internal encoding for the string representation is
+** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE.
+**
+** If pMem is not a string object, or the encoding of the string
+** representation is already stored using the requested encoding, then this
+** routine is a no-op.
+**
+** SQLITE_OK is returned if the conversion is successful (or not required).
+** SQLITE_NOMEM may be returned if a malloc() fails during conversion
+** between formats.
+*/
+SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
+#ifndef SQLITE_OMIT_UTF16
+ int rc;
+#endif
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE
+ || desiredEnc==SQLITE_UTF16BE );
+ if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){
+ return SQLITE_OK;
+ }
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+#ifdef SQLITE_OMIT_UTF16
+ return SQLITE_ERROR;
+#else
+
+ /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned,
+ ** then the encoding of the value may not have changed.
+ */
+ rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc);
+ assert(rc==SQLITE_OK || rc==SQLITE_NOMEM);
+ assert(rc==SQLITE_OK || pMem->enc!=desiredEnc);
+ assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc);
+ return rc;
+#endif
+}
+
+/*
+** Make sure pMem->z points to a writable allocation of at least n bytes.
+**
+** If the bPreserve argument is true, then copy of the content of
+** pMem->z into the new allocation. pMem must be either a string or
+** blob if bPreserve is true. If bPreserve is false, any prior content
+** in pMem->z is discarded.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){
+ assert( sqlite3VdbeCheckMemInvariants(pMem) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ testcase( pMem->db==0 );
+
+ /* If the bPreserve flag is set to true, then the memory cell must already
+ ** contain a valid string or blob value. */
+ assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) );
+ testcase( bPreserve && pMem->z==0 );
+
+ assert( pMem->szMalloc==0
+ || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) );
+ if( pMem->szMalloc>0 && bPreserve && pMem->z==pMem->zMalloc ){
+ if( pMem->db ){
+ pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n);
+ }else{
+ pMem->zMalloc = sqlite3Realloc(pMem->z, n);
+ if( pMem->zMalloc==0 ) sqlite3_free(pMem->z);
+ pMem->z = pMem->zMalloc;
+ }
+ bPreserve = 0;
+ }else{
+ if( pMem->szMalloc>0 ) sqlite3DbFreeNN(pMem->db, pMem->zMalloc);
+ pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n);
+ }
+ if( pMem->zMalloc==0 ){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->z = 0;
+ pMem->szMalloc = 0;
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
+ }
+
+ if( bPreserve && pMem->z ){
+ assert( pMem->z!=pMem->zMalloc );
+ memcpy(pMem->zMalloc, pMem->z, pMem->n);
+ }
+ if( (pMem->flags&MEM_Dyn)!=0 ){
+ assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC );
+ pMem->xDel((void *)(pMem->z));
+ }
+
+ pMem->z = pMem->zMalloc;
+ pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static);
+ return SQLITE_OK;
+}
+
+/*
+** Change the pMem->zMalloc allocation to be at least szNew bytes.
+** If pMem->zMalloc already meets or exceeds the requested size, this
+** routine is a no-op.
+**
+** Any prior string or blob content in the pMem object may be discarded.
+** The pMem->xDel destructor is called, if it exists. Though MEM_Str
+** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, MEM_IntReal,
+** and MEM_Null values are preserved.
+**
+** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM)
+** if unable to complete the resizing.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
+ assert( CORRUPT_DB || szNew>0 );
+ assert( (pMem->flags & MEM_Dyn)==0 || pMem->szMalloc==0 );
+ if( pMem->szMalloc<szNew ){
+ return sqlite3VdbeMemGrow(pMem, szNew, 0);
+ }
+ assert( (pMem->flags & MEM_Dyn)==0 );
+ pMem->z = pMem->zMalloc;
+ pMem->flags &= (MEM_Null|MEM_Int|MEM_Real|MEM_IntReal);
+ return SQLITE_OK;
+}
+
+/*
+** It is already known that pMem contains an unterminated string.
+** Add the zero terminator.
+**
+** Three bytes of zero are added. In this way, there is guaranteed
+** to be a double-zero byte at an even byte boundary in order to
+** terminate a UTF16 string, even if the initial size of the buffer
+** is an odd number of bytes.
+*/
+static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){
+ if( sqlite3VdbeMemGrow(pMem, pMem->n+3, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pMem->z[pMem->n] = 0;
+ pMem->z[pMem->n+1] = 0;
+ pMem->z[pMem->n+2] = 0;
+ pMem->flags |= MEM_Term;
+ return SQLITE_OK;
+}
+
+/*
+** Change pMem so that its MEM_Str or MEM_Blob value is stored in
+** MEM.zMalloc, where it can be safely written.
+**
+** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){
+ if( ExpandBlob(pMem) ) return SQLITE_NOMEM;
+ if( pMem->szMalloc==0 || pMem->z!=pMem->zMalloc ){
+ int rc = vdbeMemAddTerminator(pMem);
+ if( rc ) return rc;
+ }
+ }
+ pMem->flags &= ~MEM_Ephem;
+#ifdef SQLITE_DEBUG
+ pMem->pScopyFrom = 0;
+#endif
+
+ return SQLITE_OK;
+}
+
+/*
+** If the given Mem* has a zero-filled tail, turn it into an ordinary
+** blob stored in dynamically allocated space.
+*/
+#ifndef SQLITE_OMIT_INCRBLOB
+SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){
+ int nByte;
+ assert( pMem->flags & MEM_Zero );
+ assert( (pMem->flags&MEM_Blob)!=0 || MemNullNochng(pMem) );
+ testcase( sqlite3_value_nochange(pMem) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+
+ /* Set nByte to the number of bytes required to store the expanded blob. */
+ nByte = pMem->n + pMem->u.nZero;
+ if( nByte<=0 ){
+ if( (pMem->flags & MEM_Blob)==0 ) return SQLITE_OK;
+ nByte = 1;
+ }
+ if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ memset(&pMem->z[pMem->n], 0, pMem->u.nZero);
+ pMem->n += pMem->u.nZero;
+ pMem->flags &= ~(MEM_Zero|MEM_Term);
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Make sure the given Mem is \u0000 terminated.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) );
+ testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 );
+ if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){
+ return SQLITE_OK; /* Nothing to do */
+ }else{
+ return vdbeMemAddTerminator(pMem);
+ }
+}
+
+/*
+** Add MEM_Str to the set of representations for the given Mem. This
+** routine is only called if pMem is a number of some kind, not a NULL
+** or a BLOB.
+**
+** Existing representations MEM_Int, MEM_Real, or MEM_IntReal are invalidated
+** if bForce is true but are retained if bForce is false.
+**
+** A MEM_Null value will never be passed to this function. This function is
+** used for converting values to text for returning to the user (i.e. via
+** sqlite3_value_text()), or for ensuring that values to be used as btree
+** keys are strings. In the former case a NULL pointer is returned the
+** user and the latter is an internal programming error.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
+ const int nByte = 32;
+
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( !(pMem->flags&MEM_Zero) );
+ assert( !(pMem->flags&(MEM_Str|MEM_Blob)) );
+ assert( pMem->flags&(MEM_Int|MEM_Real|MEM_IntReal) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+
+ if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ pMem->enc = 0;
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ vdbeMemRenderNum(nByte, pMem->z, pMem);
+ assert( pMem->z!=0 );
+ pMem->n = sqlite3Strlen30NN(pMem->z);
+ pMem->enc = SQLITE_UTF8;
+ pMem->flags |= MEM_Str|MEM_Term;
+ if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal);
+ sqlite3VdbeChangeEncoding(pMem, enc);
+ return SQLITE_OK;
+}
+
+/*
+** Memory cell pMem contains the context of an aggregate function.
+** This routine calls the finalize method for that function. The
+** result of the aggregate is stored back into pMem.
+**
+** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK
+** otherwise.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
+ sqlite3_context ctx;
+ Mem t;
+ assert( pFunc!=0 );
+ assert( pFunc->xFinalize!=0 );
+ assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ memset(&ctx, 0, sizeof(ctx));
+ memset(&t, 0, sizeof(t));
+ t.flags = MEM_Null;
+ t.db = pMem->db;
+ ctx.pOut = &t;
+ ctx.pMem = pMem;
+ ctx.pFunc = pFunc;
+ pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */
+ assert( (pMem->flags & MEM_Dyn)==0 );
+ if( pMem->szMalloc>0 ) sqlite3DbFreeNN(pMem->db, pMem->zMalloc);
+ memcpy(pMem, &t, sizeof(t));
+ return ctx.isError;
+}
+
+/*
+** Memory cell pAccum contains the context of an aggregate function.
+** This routine calls the xValue method for that function and stores
+** the results in memory cell pMem.
+**
+** SQLITE_ERROR is returned if xValue() reports an error. SQLITE_OK
+** otherwise.
+*/
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem *pAccum, Mem *pOut, FuncDef *pFunc){
+ sqlite3_context ctx;
+ assert( pFunc!=0 );
+ assert( pFunc->xValue!=0 );
+ assert( (pAccum->flags & MEM_Null)!=0 || pFunc==pAccum->u.pDef );
+ assert( pAccum->db==0 || sqlite3_mutex_held(pAccum->db->mutex) );
+ memset(&ctx, 0, sizeof(ctx));
+ sqlite3VdbeMemSetNull(pOut);
+ ctx.pOut = pOut;
+ ctx.pMem = pAccum;
+ ctx.pFunc = pFunc;
+ pFunc->xValue(&ctx);
+ return ctx.isError;
+}
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** If the memory cell contains a value that must be freed by
+** invoking the external callback in Mem.xDel, then this routine
+** will free that value. It also sets Mem.flags to MEM_Null.
+**
+** This is a helper routine for sqlite3VdbeMemSetNull() and
+** for sqlite3VdbeMemRelease(). Use those other routines as the
+** entry point for releasing Mem resources.
+*/
+static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){
+ assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) );
+ assert( VdbeMemDynamic(p) );
+ if( p->flags&MEM_Agg ){
+ sqlite3VdbeMemFinalize(p, p->u.pDef);
+ assert( (p->flags & MEM_Agg)==0 );
+ testcase( p->flags & MEM_Dyn );
+ }
+ if( p->flags&MEM_Dyn ){
+ assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 );
+ p->xDel((void *)p->z);
+ }
+ p->flags = MEM_Null;
+}
+
+/*
+** Release memory held by the Mem p, both external memory cleared
+** by p->xDel and memory in p->zMalloc.
+**
+** This is a helper routine invoked by sqlite3VdbeMemRelease() in
+** the unusual case where there really is memory in p that needs
+** to be freed.
+*/
+static SQLITE_NOINLINE void vdbeMemClear(Mem *p){
+ if( VdbeMemDynamic(p) ){
+ vdbeMemClearExternAndSetNull(p);
+ }
+ if( p->szMalloc ){
+ sqlite3DbFreeNN(p->db, p->zMalloc);
+ p->szMalloc = 0;
+ }
+ p->z = 0;
+}
+
+/*
+** Release any memory resources held by the Mem. Both the memory that is
+** free by Mem.xDel and the Mem.zMalloc allocation are freed.
+**
+** Use this routine prior to clean up prior to abandoning a Mem, or to
+** reset a Mem back to its minimum memory utilization.
+**
+** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space
+** prior to inserting new content into the Mem.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){
+ assert( sqlite3VdbeCheckMemInvariants(p) );
+ if( VdbeMemDynamic(p) || p->szMalloc ){
+ vdbeMemClear(p);
+ }
+}
+
+/*
+** Convert a 64-bit IEEE double into a 64-bit signed integer.
+** If the double is out of range of a 64-bit signed integer then
+** return the closest available 64-bit signed integer.
+*/
+static SQLITE_NOINLINE i64 doubleToInt64(double r){
+#ifdef SQLITE_OMIT_FLOATING_POINT
+ /* When floating-point is omitted, double and int64 are the same thing */
+ return r;
+#else
+ /*
+ ** Many compilers we encounter do not define constants for the
+ ** minimum and maximum 64-bit integers, or they define them
+ ** inconsistently. And many do not understand the "LL" notation.
+ ** So we define our own static constants here using nothing
+ ** larger than a 32-bit integer constant.
+ */
+ static const i64 maxInt = LARGEST_INT64;
+ static const i64 minInt = SMALLEST_INT64;
+
+ if( r<=(double)minInt ){
+ return minInt;
+ }else if( r>=(double)maxInt ){
+ return maxInt;
+ }else{
+ return (i64)r;
+ }
+#endif
+}
+
+/*
+** Return some kind of integer value which is the best we can do
+** at representing the value that *pMem describes as an integer.
+** If pMem is an integer, then the value is exact. If pMem is
+** a floating-point then the value returned is the integer part.
+** If pMem is a string or blob, then we make an attempt to convert
+** it into an integer and return that. If pMem represents an
+** an SQL-NULL value, return 0.
+**
+** If pMem represents a string value, its encoding might be changed.
+*/
+static SQLITE_NOINLINE i64 memIntValue(Mem *pMem){
+ i64 value = 0;
+ sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc);
+ return value;
+}
+SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){
+ int flags;
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ flags = pMem->flags;
+ if( flags & (MEM_Int|MEM_IntReal) ){
+ testcase( flags & MEM_IntReal );
+ return pMem->u.i;
+ }else if( flags & MEM_Real ){
+ return doubleToInt64(pMem->u.r);
+ }else if( (flags & (MEM_Str|MEM_Blob))!=0 && pMem->z!=0 ){
+ return memIntValue(pMem);
+ }else{
+ return 0;
+ }
+}
+
+/*
+** Return the best representation of pMem that we can get into a
+** double. If pMem is already a double or an integer, return its
+** value. If it is a string or blob, try to convert it to a double.
+** If it is a NULL, return 0.0.
+*/
+static SQLITE_NOINLINE double memRealValue(Mem *pMem){
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ double val = (double)0;
+ sqlite3AtoF(pMem->z, &val, pMem->n, pMem->enc);
+ return val;
+}
+SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ if( pMem->flags & MEM_Real ){
+ return pMem->u.r;
+ }else if( pMem->flags & (MEM_Int|MEM_IntReal) ){
+ testcase( pMem->flags & MEM_IntReal );
+ return (double)pMem->u.i;
+ }else if( pMem->flags & (MEM_Str|MEM_Blob) ){
+ return memRealValue(pMem);
+ }else{
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ return (double)0;
+ }
+}
+
+/*
+** Return 1 if pMem represents true, and return 0 if pMem represents false.
+** Return the value ifNull if pMem is NULL.
+*/
+SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){
+ testcase( pMem->flags & MEM_IntReal );
+ if( pMem->flags & (MEM_Int|MEM_IntReal) ) return pMem->u.i!=0;
+ if( pMem->flags & MEM_Null ) return ifNull;
+ return sqlite3VdbeRealValue(pMem)!=0.0;
+}
+
+/*
+** The MEM structure is already a MEM_Real. Try to also make it a
+** MEM_Int if we can.
+*/
+SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
+ i64 ix;
+ assert( pMem->flags & MEM_Real );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ ix = doubleToInt64(pMem->u.r);
+
+ /* Only mark the value as an integer if
+ **
+ ** (1) the round-trip conversion real->int->real is a no-op, and
+ ** (2) The integer is neither the largest nor the smallest
+ ** possible integer (ticket #3922)
+ **
+ ** The second and third terms in the following conditional enforces
+ ** the second condition under the assumption that addition overflow causes
+ ** values to wrap around.
+ */
+ if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){
+ pMem->u.i = ix;
+ MemSetTypeFlag(pMem, MEM_Int);
+ }
+}
+
+/*
+** Convert pMem to type integer. Invalidate any prior representations.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ pMem->u.i = sqlite3VdbeIntValue(pMem);
+ MemSetTypeFlag(pMem, MEM_Int);
+ return SQLITE_OK;
+}
+
+/*
+** Convert pMem so that it is of type MEM_Real.
+** Invalidate any prior representations.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+
+ pMem->u.r = sqlite3VdbeRealValue(pMem);
+ MemSetTypeFlag(pMem, MEM_Real);
+ return SQLITE_OK;
+}
+
+/* Compare a floating point value to an integer. Return true if the two
+** values are the same within the precision of the floating point value.
+**
+** This function assumes that i was obtained by assignment from r1.
+**
+** For some versions of GCC on 32-bit machines, if you do the more obvious
+** comparison of "r1==(double)i" you sometimes get an answer of false even
+** though the r1 and (double)i values are bit-for-bit the same.
+*/
+SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){
+ double r2 = (double)i;
+ return r1==0.0
+ || (memcmp(&r1, &r2, sizeof(r1))==0
+ && i >= -2251799813685248LL && i < 2251799813685248LL);
+}
+
+/*
+** Convert pMem so that it has type MEM_Real or MEM_Int.
+** Invalidate any prior representations.
+**
+** Every effort is made to force the conversion, even if the input
+** is a string that does not look completely like a number. Convert
+** as much of the string as we can and ignore the rest.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
+ testcase( pMem->flags & MEM_Int );
+ testcase( pMem->flags & MEM_Real );
+ testcase( pMem->flags & MEM_IntReal );
+ testcase( pMem->flags & MEM_Null );
+ if( (pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null))==0 ){
+ int rc;
+ sqlite3_int64 ix;
+ assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ rc = sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc);
+ if( ((rc==0 || rc==1) && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)<=1)
+ || sqlite3RealSameAsInt(pMem->u.r, (ix = (i64)pMem->u.r))
+ ){
+ pMem->u.i = ix;
+ MemSetTypeFlag(pMem, MEM_Int);
+ }else{
+ MemSetTypeFlag(pMem, MEM_Real);
+ }
+ }
+ assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null))!=0 );
+ pMem->flags &= ~(MEM_Str|MEM_Blob|MEM_Zero);
+ return SQLITE_OK;
+}
+
+/*
+** Cast the datatype of the value in pMem according to the affinity
+** "aff". Casting is different from applying affinity in that a cast
+** is forced. In other words, the value is converted into the desired
+** affinity even if that results in loss of data. This routine is
+** used (for example) to implement the SQL "cast()" operator.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
+ if( pMem->flags & MEM_Null ) return SQLITE_OK;
+ switch( aff ){
+ case SQLITE_AFF_BLOB: { /* Really a cast to BLOB */
+ if( (pMem->flags & MEM_Blob)==0 ){
+ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
+ assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
+ if( pMem->flags & MEM_Str ) MemSetTypeFlag(pMem, MEM_Blob);
+ }else{
+ pMem->flags &= ~(MEM_TypeMask&~MEM_Blob);
+ }
+ break;
+ }
+ case SQLITE_AFF_NUMERIC: {
+ sqlite3VdbeMemNumerify(pMem);
+ break;
+ }
+ case SQLITE_AFF_INTEGER: {
+ sqlite3VdbeMemIntegerify(pMem);
+ break;
+ }
+ case SQLITE_AFF_REAL: {
+ sqlite3VdbeMemRealify(pMem);
+ break;
+ }
+ default: {
+ assert( aff==SQLITE_AFF_TEXT );
+ assert( MEM_Str==(MEM_Blob>>3) );
+ pMem->flags |= (pMem->flags&MEM_Blob)>>3;
+ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
+ assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
+ pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero);
+ return sqlite3VdbeChangeEncoding(pMem, encoding);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Initialize bulk memory to be a consistent Mem object.
+**
+** The minimum amount of initialization feasible is performed.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem *pMem, sqlite3 *db, u16 flags){
+ assert( (flags & ~MEM_TypeMask)==0 );
+ pMem->flags = flags;
+ pMem->db = db;
+ pMem->szMalloc = 0;
+}
+
+
+/*
+** Delete any previous value and set the value stored in *pMem to NULL.
+**
+** This routine calls the Mem.xDel destructor to dispose of values that
+** require the destructor. But it preserves the Mem.zMalloc memory allocation.
+** To free all resources, use sqlite3VdbeMemRelease(), which both calls this
+** routine to invoke the destructor and deallocates Mem.zMalloc.
+**
+** Use this routine to reset the Mem prior to insert a new value.
+**
+** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){
+ if( VdbeMemDynamic(pMem) ){
+ vdbeMemClearExternAndSetNull(pMem);
+ }else{
+ pMem->flags = MEM_Null;
+ }
+}
+SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){
+ sqlite3VdbeMemSetNull((Mem*)p);
+}
+
+/*
+** Delete any previous value and set the value to be a BLOB of length
+** n containing all zeros.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){
+ sqlite3VdbeMemRelease(pMem);
+ pMem->flags = MEM_Blob|MEM_Zero;
+ pMem->n = 0;
+ if( n<0 ) n = 0;
+ pMem->u.nZero = n;
+ pMem->enc = SQLITE_UTF8;
+ pMem->z = 0;
+}
+
+/*
+** The pMem is known to contain content that needs to be destroyed prior
+** to a value change. So invoke the destructor, then set the value to
+** a 64-bit integer.
+*/
+static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->u.i = val;
+ pMem->flags = MEM_Int;
+}
+
+/*
+** Delete any previous value and set the value stored in *pMem to val,
+** manifest type INTEGER.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){
+ if( VdbeMemDynamic(pMem) ){
+ vdbeReleaseAndSetInt64(pMem, val);
+ }else{
+ pMem->u.i = val;
+ pMem->flags = MEM_Int;
+ }
+}
+
+/* A no-op destructor */
+SQLITE_PRIVATE void sqlite3NoopDestructor(void *p){ UNUSED_PARAMETER(p); }
+
+/*
+** Set the value stored in *pMem should already be a NULL.
+** Also store a pointer to go with it.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetPointer(
+ Mem *pMem,
+ void *pPtr,
+ const char *zPType,
+ void (*xDestructor)(void*)
+){
+ assert( pMem->flags==MEM_Null );
+ pMem->u.zPType = zPType ? zPType : "";
+ pMem->z = pPtr;
+ pMem->flags = MEM_Null|MEM_Dyn|MEM_Subtype|MEM_Term;
+ pMem->eSubtype = 'p';
+ pMem->xDel = xDestructor ? xDestructor : sqlite3NoopDestructor;
+}
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/*
+** Delete any previous value and set the value stored in *pMem to val,
+** manifest type REAL.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){
+ sqlite3VdbeMemSetNull(pMem);
+ if( !sqlite3IsNaN(val) ){
+ pMem->u.r = val;
+ pMem->flags = MEM_Real;
+ }
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** Return true if the Mem holds a RowSet object. This routine is intended
+** for use inside of assert() statements.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem *pMem){
+ return (pMem->flags&(MEM_Blob|MEM_Dyn))==(MEM_Blob|MEM_Dyn)
+ && pMem->xDel==sqlite3RowSetDelete;
+}
+#endif
+
+/*
+** Delete any previous value and set the value of pMem to be an
+** empty boolean index.
+**
+** Return SQLITE_OK on success and SQLITE_NOMEM if a memory allocation
+** error occurs.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem *pMem){
+ sqlite3 *db = pMem->db;
+ RowSet *p;
+ assert( db!=0 );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ sqlite3VdbeMemRelease(pMem);
+ p = sqlite3RowSetInit(db);
+ if( p==0 ) return SQLITE_NOMEM;
+ pMem->z = (char*)p;
+ pMem->flags = MEM_Blob|MEM_Dyn;
+ pMem->xDel = sqlite3RowSetDelete;
+ return SQLITE_OK;
+}
+
+/*
+** Return true if the Mem object contains a TEXT or BLOB that is
+** too large - whose size exceeds SQLITE_MAX_LENGTH.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){
+ assert( p->db!=0 );
+ if( p->flags & (MEM_Str|MEM_Blob) ){
+ int n = p->n;
+ if( p->flags & MEM_Zero ){
+ n += p->u.nZero;
+ }
+ return n>p->db->aLimit[SQLITE_LIMIT_LENGTH];
+ }
+ return 0;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** This routine prepares a memory cell for modification by breaking
+** its link to a shallow copy and by marking any current shallow
+** copies of this cell as invalid.
+**
+** This is used for testing and debugging only - to help ensure that shallow
+** copies (created by OP_SCopy) are not misused.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
+ int i;
+ Mem *pX;
+ for(i=1, pX=pVdbe->aMem+1; i<pVdbe->nMem; i++, pX++){
+ if( pX->pScopyFrom==pMem ){
+ u16 mFlags;
+ if( pVdbe->db->flags & SQLITE_VdbeTrace ){
+ sqlite3DebugPrintf("Invalidate R[%d] due to change in R[%d]\n",
+ (int)(pX - pVdbe->aMem), (int)(pMem - pVdbe->aMem));
+ }
+ /* If pX is marked as a shallow copy of pMem, then try to verify that
+ ** no significant changes have been made to pX since the OP_SCopy.
+ ** A significant change would indicated a missed call to this
+ ** function for pX. Minor changes, such as adding or removing a
+ ** dual type, are allowed, as long as the underlying value is the
+ ** same. */
+ mFlags = pMem->flags & pX->flags & pX->mScopyFlags;
+ assert( (mFlags&(MEM_Int|MEM_IntReal))==0 || pMem->u.i==pX->u.i );
+
+ /* pMem is the register that is changing. But also mark pX as
+ ** undefined so that we can quickly detect the shallow-copy error */
+ pX->flags = MEM_Undefined;
+ pX->pScopyFrom = 0;
+ }
+ }
+ pMem->pScopyFrom = 0;
+}
+#endif /* SQLITE_DEBUG */
+
+/*
+** Make an shallow copy of pFrom into pTo. Prior contents of
+** pTo are freed. The pFrom->z field is not duplicated. If
+** pFrom->z is used, then pTo->z points to the same thing as pFrom->z
+** and flags gets srcType (either MEM_Ephem or MEM_Static).
+*/
+static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){
+ vdbeMemClearExternAndSetNull(pTo);
+ assert( !VdbeMemDynamic(pTo) );
+ sqlite3VdbeMemShallowCopy(pTo, pFrom, eType);
+}
+SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){
+ assert( !sqlite3VdbeMemIsRowSet(pFrom) );
+ assert( pTo->db==pFrom->db );
+ if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; }
+ memcpy(pTo, pFrom, MEMCELLSIZE);
+ if( (pFrom->flags&MEM_Static)==0 ){
+ pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem);
+ assert( srcType==MEM_Ephem || srcType==MEM_Static );
+ pTo->flags |= srcType;
+ }
+}
+
+/*
+** Make a full copy of pFrom into pTo. Prior contents of pTo are
+** freed before the copy is made.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){
+ int rc = SQLITE_OK;
+
+ assert( !sqlite3VdbeMemIsRowSet(pFrom) );
+ if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo);
+ memcpy(pTo, pFrom, MEMCELLSIZE);
+ pTo->flags &= ~MEM_Dyn;
+ if( pTo->flags&(MEM_Str|MEM_Blob) ){
+ if( 0==(pFrom->flags&MEM_Static) ){
+ pTo->flags |= MEM_Ephem;
+ rc = sqlite3VdbeMemMakeWriteable(pTo);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Transfer the contents of pFrom to pTo. Any existing value in pTo is
+** freed. If pFrom contains ephemeral data, a copy is made.
+**
+** pFrom contains an SQL NULL when this routine returns.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){
+ assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) );
+ assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) );
+ assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db );
+
+ sqlite3VdbeMemRelease(pTo);
+ memcpy(pTo, pFrom, sizeof(Mem));
+ pFrom->flags = MEM_Null;
+ pFrom->szMalloc = 0;
+}
+
+/*
+** Change the value of a Mem to be a string or a BLOB.
+**
+** The memory management strategy depends on the value of the xDel
+** parameter. If the value passed is SQLITE_TRANSIENT, then the
+** string is copied into a (possibly existing) buffer managed by the
+** Mem structure. Otherwise, any existing buffer is freed and the
+** pointer copied.
+**
+** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH
+** size limit) then no memory allocation occurs. If the string can be
+** stored without allocating memory, then it is. If a memory allocation
+** is required to store the string, then value of pMem is unchanged. In
+** either case, SQLITE_TOOBIG is returned.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
+ Mem *pMem, /* Memory cell to set to string value */
+ const char *z, /* String pointer */
+ int n, /* Bytes in string, or negative */
+ u8 enc, /* Encoding of z. 0 for BLOBs */
+ void (*xDel)(void*) /* Destructor function */
+){
+ int nByte = n; /* New value for pMem->n */
+ int iLimit; /* Maximum allowed string or blob size */
+ u16 flags = 0; /* New value for pMem->flags */
+
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+
+ /* If z is a NULL pointer, set pMem to contain an SQL NULL. */
+ if( !z ){
+ sqlite3VdbeMemSetNull(pMem);
+ return SQLITE_OK;
+ }
+
+ if( pMem->db ){
+ iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH];
+ }else{
+ iLimit = SQLITE_MAX_LENGTH;
+ }
+ flags = (enc==0?MEM_Blob:MEM_Str);
+ if( nByte<0 ){
+ assert( enc!=0 );
+ if( enc==SQLITE_UTF8 ){
+ nByte = 0x7fffffff & (int)strlen(z);
+ }else{
+ for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){}
+ }
+ flags |= MEM_Term;
+ }
+
+ /* The following block sets the new values of Mem.z and Mem.xDel. It
+ ** also sets a flag in local variable "flags" to indicate the memory
+ ** management (one of MEM_Dyn or MEM_Static).
+ */
+ if( xDel==SQLITE_TRANSIENT ){
+ u32 nAlloc = nByte;
+ if( flags&MEM_Term ){
+ nAlloc += (enc==SQLITE_UTF8?1:2);
+ }
+ if( nByte>iLimit ){
+ return sqlite3ErrorToParser(pMem->db, SQLITE_TOOBIG);
+ }
+ testcase( nAlloc==0 );
+ testcase( nAlloc==31 );
+ testcase( nAlloc==32 );
+ if( sqlite3VdbeMemClearAndResize(pMem, (int)MAX(nAlloc,32)) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memcpy(pMem->z, z, nAlloc);
+ }else{
+ sqlite3VdbeMemRelease(pMem);
+ pMem->z = (char *)z;
+ if( xDel==SQLITE_DYNAMIC ){
+ pMem->zMalloc = pMem->z;
+ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
+ }else{
+ pMem->xDel = xDel;
+ flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn);
+ }
+ }
+
+ pMem->n = nByte;
+ pMem->flags = flags;
+ if( enc ){
+ pMem->enc = enc;
+#ifdef SQLITE_ENABLE_SESSION
+ }else if( pMem->db==0 ){
+ pMem->enc = SQLITE_UTF8;
+#endif
+ }else{
+ assert( pMem->db!=0 );
+ pMem->enc = ENC(pMem->db);
+ }
+
+#ifndef SQLITE_OMIT_UTF16
+ if( enc>SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+#endif
+
+ if( nByte>iLimit ){
+ return SQLITE_TOOBIG;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Move data out of a btree key or data field and into a Mem structure.
+** The data is payload from the entry that pCur is currently pointing
+** to. offset and amt determine what portion of the data or key to retrieve.
+** The result is written into the pMem element.
+**
+** The pMem object must have been initialized. This routine will use
+** pMem->zMalloc to hold the content from the btree, if possible. New
+** pMem->zMalloc space will be allocated if necessary. The calling routine
+** is responsible for making sure that the pMem object is eventually
+** destroyed.
+**
+** If this routine fails for any reason (malloc returns NULL or unable
+** to read from the disk) then the pMem is left in an inconsistent state.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
+ BtCursor *pCur, /* Cursor pointing at record to retrieve. */
+ u32 offset, /* Offset from the start of data to return bytes from. */
+ u32 amt, /* Number of bytes to return. */
+ Mem *pMem /* OUT: Return data in this Mem structure. */
+){
+ int rc;
+ pMem->flags = MEM_Null;
+ if( sqlite3BtreeMaxRecordSize(pCur)<offset+amt ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+1)) ){
+ rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z);
+ if( rc==SQLITE_OK ){
+ pMem->z[amt] = 0; /* Overrun area used when reading malformed records */
+ pMem->flags = MEM_Blob;
+ pMem->n = (int)amt;
+ }else{
+ sqlite3VdbeMemRelease(pMem);
+ }
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtreeZeroOffset(
+ BtCursor *pCur, /* Cursor pointing at record to retrieve. */
+ u32 amt, /* Number of bytes to return. */
+ Mem *pMem /* OUT: Return data in this Mem structure. */
+){
+ u32 available = 0; /* Number of bytes available on the local btree page */
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ assert( !VdbeMemDynamic(pMem) );
+
+ /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert()
+ ** that both the BtShared and database handle mutexes are held. */
+ assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ pMem->z = (char *)sqlite3BtreePayloadFetch(pCur, &available);
+ assert( pMem->z!=0 );
+
+ if( amt<=available ){
+ pMem->flags = MEM_Blob|MEM_Ephem;
+ pMem->n = (int)amt;
+ }else{
+ rc = sqlite3VdbeMemFromBtree(pCur, 0, amt, pMem);
+ }
+
+ return rc;
+}
+
+/*
+** The pVal argument is known to be a value other than NULL.
+** Convert it into a string with encoding enc and return a pointer
+** to a zero-terminated version of that string.
+*/
+static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
+ assert( pVal!=0 );
+ assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
+ assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
+ assert( !sqlite3VdbeMemIsRowSet(pVal) );
+ assert( (pVal->flags & (MEM_Null))==0 );
+ if( pVal->flags & (MEM_Blob|MEM_Str) ){
+ if( ExpandBlob(pVal) ) return 0;
+ pVal->flags |= MEM_Str;
+ if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){
+ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
+ }
+ if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){
+ assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 );
+ if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){
+ return 0;
+ }
+ }
+ sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */
+ }else{
+ sqlite3VdbeMemStringify(pVal, enc, 0);
+ assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) );
+ }
+ assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0
+ || pVal->db->mallocFailed );
+ if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){
+ assert( sqlite3VdbeMemValidStrRep(pVal) );
+ return pVal->z;
+ }else{
+ return 0;
+ }
+}
+
+/* This function is only available internally, it is not part of the
+** external API. It works in a similar way to sqlite3_value_text(),
+** except the data returned is in the encoding specified by the second
+** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or
+** SQLITE_UTF8.
+**
+** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED.
+** If that is the case, then the result must be aligned on an even byte
+** boundary.
+*/
+SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
+ if( !pVal ) return 0;
+ assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
+ assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
+ assert( !sqlite3VdbeMemIsRowSet(pVal) );
+ if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){
+ assert( sqlite3VdbeMemValidStrRep(pVal) );
+ return pVal->z;
+ }
+ if( pVal->flags&MEM_Null ){
+ return 0;
+ }
+ return valueToText(pVal, enc);
+}
+
+/*
+** Create a new sqlite3_value object.
+*/
+SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){
+ Mem *p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p ){
+ p->flags = MEM_Null;
+ p->db = db;
+ }
+ return p;
+}
+
+/*
+** Context object passed by sqlite3Stat4ProbeSetValue() through to
+** valueNew(). See comments above valueNew() for details.
+*/
+struct ValueNewStat4Ctx {
+ Parse *pParse;
+ Index *pIdx;
+ UnpackedRecord **ppRec;
+ int iVal;
+};
+
+/*
+** Allocate and return a pointer to a new sqlite3_value object. If
+** the second argument to this function is NULL, the object is allocated
+** by calling sqlite3ValueNew().
+**
+** Otherwise, if the second argument is non-zero, then this function is
+** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not
+** already been allocated, allocate the UnpackedRecord structure that
+** that function will return to its caller here. Then return a pointer to
+** an sqlite3_value within the UnpackedRecord.a[] array.
+*/
+static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
+#ifdef SQLITE_ENABLE_STAT4
+ if( p ){
+ UnpackedRecord *pRec = p->ppRec[0];
+
+ if( pRec==0 ){
+ Index *pIdx = p->pIdx; /* Index being probed */
+ int nByte; /* Bytes of space to allocate */
+ int i; /* Counter variable */
+ int nCol = pIdx->nColumn; /* Number of index columns including rowid */
+
+ nByte = sizeof(Mem) * nCol + ROUND8(sizeof(UnpackedRecord));
+ pRec = (UnpackedRecord*)sqlite3DbMallocZero(db, nByte);
+ if( pRec ){
+ pRec->pKeyInfo = sqlite3KeyInfoOfIndex(p->pParse, pIdx);
+ if( pRec->pKeyInfo ){
+ assert( pRec->pKeyInfo->nAllField==nCol );
+ assert( pRec->pKeyInfo->enc==ENC(db) );
+ pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord)));
+ for(i=0; i<nCol; i++){
+ pRec->aMem[i].flags = MEM_Null;
+ pRec->aMem[i].db = db;
+ }
+ }else{
+ sqlite3DbFreeNN(db, pRec);
+ pRec = 0;
+ }
+ }
+ if( pRec==0 ) return 0;
+ p->ppRec[0] = pRec;
+ }
+
+ pRec->nField = p->iVal+1;
+ return &pRec->aMem[p->iVal];
+ }
+#else
+ UNUSED_PARAMETER(p);
+#endif /* defined(SQLITE_ENABLE_STAT4) */
+ return sqlite3ValueNew(db);
+}
+
+/*
+** The expression object indicated by the second argument is guaranteed
+** to be a scalar SQL function. If
+**
+** * all function arguments are SQL literals,
+** * one of the SQLITE_FUNC_CONSTANT or _SLOCHNG function flags is set, and
+** * the SQLITE_FUNC_NEEDCOLL function flag is not set,
+**
+** then this routine attempts to invoke the SQL function. Assuming no
+** error occurs, output parameter (*ppVal) is set to point to a value
+** object containing the result before returning SQLITE_OK.
+**
+** Affinity aff is applied to the result of the function before returning.
+** If the result is a text value, the sqlite3_value object uses encoding
+** enc.
+**
+** If the conditions above are not met, this function returns SQLITE_OK
+** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to
+** NULL and an SQLite error code returned.
+*/
+#ifdef SQLITE_ENABLE_STAT4
+static int valueFromFunction(
+ sqlite3 *db, /* The database connection */
+ Expr *p, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 aff, /* Affinity to use */
+ sqlite3_value **ppVal, /* Write the new value here */
+ struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */
+){
+ sqlite3_context ctx; /* Context object for function invocation */
+ sqlite3_value **apVal = 0; /* Function arguments */
+ int nVal = 0; /* Size of apVal[] array */
+ FuncDef *pFunc = 0; /* Function definition */
+ sqlite3_value *pVal = 0; /* New value */
+ int rc = SQLITE_OK; /* Return code */
+ ExprList *pList = 0; /* Function arguments */
+ int i; /* Iterator variable */
+
+ assert( pCtx!=0 );
+ assert( (p->flags & EP_TokenOnly)==0 );
+ pList = p->x.pList;
+ if( pList ) nVal = pList->nExpr;
+ pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0);
+ assert( pFunc );
+ if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0
+ || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
+ ){
+ return SQLITE_OK;
+ }
+
+ if( pList ){
+ apVal = (sqlite3_value**)sqlite3DbMallocZero(db, sizeof(apVal[0]) * nVal);
+ if( apVal==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto value_from_function_out;
+ }
+ for(i=0; i<nVal; i++){
+ rc = sqlite3ValueFromExpr(db, pList->a[i].pExpr, enc, aff, &apVal[i]);
+ if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out;
+ }
+ }
+
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto value_from_function_out;
+ }
+
+ assert( pCtx->pParse->rc==SQLITE_OK );
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.pOut = pVal;
+ ctx.pFunc = pFunc;
+ pFunc->xSFunc(&ctx, nVal, apVal);
+ if( ctx.isError ){
+ rc = ctx.isError;
+ sqlite3ErrorMsg(pCtx->pParse, "%s", sqlite3_value_text(pVal));
+ }else{
+ sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8);
+ assert( rc==SQLITE_OK );
+ rc = sqlite3VdbeChangeEncoding(pVal, enc);
+ if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){
+ rc = SQLITE_TOOBIG;
+ pCtx->pParse->nErr++;
+ }
+ }
+ pCtx->pParse->rc = rc;
+
+ value_from_function_out:
+ if( rc!=SQLITE_OK ){
+ pVal = 0;
+ }
+ if( apVal ){
+ for(i=0; i<nVal; i++){
+ sqlite3ValueFree(apVal[i]);
+ }
+ sqlite3DbFreeNN(db, apVal);
+ }
+
+ *ppVal = pVal;
+ return rc;
+}
+#else
+# define valueFromFunction(a,b,c,d,e,f) SQLITE_OK
+#endif /* defined(SQLITE_ENABLE_STAT4) */
+
+/*
+** Extract a value from the supplied expression in the manner described
+** above sqlite3ValueFromExpr(). Allocate the sqlite3_value object
+** using valueNew().
+**
+** If pCtx is NULL and an error occurs after the sqlite3_value object
+** has been allocated, it is freed before returning. Or, if pCtx is not
+** NULL, it is assumed that the caller will free any allocated object
+** in all cases.
+*/
+static int valueFromExpr(
+ sqlite3 *db, /* The database connection */
+ Expr *pExpr, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal, /* Write the new value here */
+ struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */
+){
+ int op;
+ char *zVal = 0;
+ sqlite3_value *pVal = 0;
+ int negInt = 1;
+ const char *zNeg = "";
+ int rc = SQLITE_OK;
+
+ assert( pExpr!=0 );
+ while( (op = pExpr->op)==TK_UPLUS || op==TK_SPAN ) pExpr = pExpr->pLeft;
+#if defined(SQLITE_ENABLE_STAT4)
+ if( op==TK_REGISTER ) op = pExpr->op2;
+#else
+ if( NEVER(op==TK_REGISTER) ) op = pExpr->op2;
+#endif
+
+ /* Compressed expressions only appear when parsing the DEFAULT clause
+ ** on a table column definition, and hence only when pCtx==0. This
+ ** check ensures that an EP_TokenOnly expression is never passed down
+ ** into valueFromFunction(). */
+ assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 );
+
+ if( op==TK_CAST ){
+ u8 aff = sqlite3AffinityType(pExpr->u.zToken,0);
+ rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx);
+ testcase( rc!=SQLITE_OK );
+ if( *ppVal ){
+ sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8);
+ sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8);
+ }
+ return rc;
+ }
+
+ /* Handle negative integers in a single step. This is needed in the
+ ** case when the value is -9223372036854775808.
+ */
+ if( op==TK_UMINUS
+ && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){
+ pExpr = pExpr->pLeft;
+ op = pExpr->op;
+ negInt = -1;
+ zNeg = "-";
+ }
+
+ if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ) goto no_mem;
+ if( ExprHasProperty(pExpr, EP_IntValue) ){
+ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt);
+ }else{
+ zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken);
+ if( zVal==0 ) goto no_mem;
+ sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC);
+ }
+ if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){
+ sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8);
+ }else{
+ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8);
+ }
+ assert( (pVal->flags & MEM_IntReal)==0 );
+ if( pVal->flags & (MEM_Int|MEM_IntReal|MEM_Real) ){
+ testcase( pVal->flags & MEM_Int );
+ testcase( pVal->flags & MEM_Real );
+ pVal->flags &= ~MEM_Str;
+ }
+ if( enc!=SQLITE_UTF8 ){
+ rc = sqlite3VdbeChangeEncoding(pVal, enc);
+ }
+ }else if( op==TK_UMINUS ) {
+ /* This branch happens for multiple negative signs. Ex: -(-5) */
+ if( SQLITE_OK==valueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal,pCtx)
+ && pVal!=0
+ ){
+ sqlite3VdbeMemNumerify(pVal);
+ if( pVal->flags & MEM_Real ){
+ pVal->u.r = -pVal->u.r;
+ }else if( pVal->u.i==SMALLEST_INT64 ){
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ pVal->u.r = -(double)SMALLEST_INT64;
+#else
+ pVal->u.r = LARGEST_INT64;
+#endif
+ MemSetTypeFlag(pVal, MEM_Real);
+ }else{
+ pVal->u.i = -pVal->u.i;
+ }
+ sqlite3ValueApplyAffinity(pVal, affinity, enc);
+ }
+ }else if( op==TK_NULL ){
+ pVal = valueNew(db, pCtx);
+ if( pVal==0 ) goto no_mem;
+ sqlite3VdbeMemSetNull(pVal);
+ }
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ else if( op==TK_BLOB ){
+ int nVal;
+ assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' );
+ assert( pExpr->u.zToken[1]=='\'' );
+ pVal = valueNew(db, pCtx);
+ if( !pVal ) goto no_mem;
+ zVal = &pExpr->u.zToken[2];
+ nVal = sqlite3Strlen30(zVal)-1;
+ assert( zVal[nVal]=='\'' );
+ sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2,
+ 0, SQLITE_DYNAMIC);
+ }
+#endif
+#ifdef SQLITE_ENABLE_STAT4
+ else if( op==TK_FUNCTION && pCtx!=0 ){
+ rc = valueFromFunction(db, pExpr, enc, affinity, &pVal, pCtx);
+ }
+#endif
+ else if( op==TK_TRUEFALSE ){
+ pVal = valueNew(db, pCtx);
+ if( pVal ){
+ pVal->flags = MEM_Int;
+ pVal->u.i = pExpr->u.zToken[4]==0;
+ }
+ }
+
+ *ppVal = pVal;
+ return rc;
+
+no_mem:
+#ifdef SQLITE_ENABLE_STAT4
+ if( pCtx==0 || pCtx->pParse->nErr==0 )
+#endif
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, zVal);
+ assert( *ppVal==0 );
+#ifdef SQLITE_ENABLE_STAT4
+ if( pCtx==0 ) sqlite3ValueFree(pVal);
+#else
+ assert( pCtx==0 ); sqlite3ValueFree(pVal);
+#endif
+ return SQLITE_NOMEM_BKPT;
+}
+
+/*
+** Create a new sqlite3_value object, containing the value of pExpr.
+**
+** This only works for very simple expressions that consist of one constant
+** token (i.e. "5", "5.1", "'a string'"). If the expression can
+** be converted directly into a value, then the value is allocated and
+** a pointer written to *ppVal. The caller is responsible for deallocating
+** the value by passing it to sqlite3ValueFree() later on. If the expression
+** cannot be converted to a value, then *ppVal is set to NULL.
+*/
+SQLITE_PRIVATE int sqlite3ValueFromExpr(
+ sqlite3 *db, /* The database connection */
+ Expr *pExpr, /* The expression to evaluate */
+ u8 enc, /* Encoding to use */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal /* Write the new value here */
+){
+ return pExpr ? valueFromExpr(db, pExpr, enc, affinity, ppVal, 0) : 0;
+}
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Attempt to extract a value from pExpr and use it to construct *ppVal.
+**
+** If pAlloc is not NULL, then an UnpackedRecord object is created for
+** pAlloc if one does not exist and the new value is added to the
+** UnpackedRecord object.
+**
+** A value is extracted in the following cases:
+**
+** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
+**
+** * The expression is a bound variable, and this is a reprepare, or
+**
+** * The expression is a literal value.
+**
+** On success, *ppVal is made to point to the extracted value. The caller
+** is responsible for ensuring that the value is eventually freed.
+*/
+static int stat4ValueFromExpr(
+ Parse *pParse, /* Parse context */
+ Expr *pExpr, /* The expression to extract a value from */
+ u8 affinity, /* Affinity to use */
+ struct ValueNewStat4Ctx *pAlloc,/* How to allocate space. Or NULL */
+ sqlite3_value **ppVal /* OUT: New value object (or NULL) */
+){
+ int rc = SQLITE_OK;
+ sqlite3_value *pVal = 0;
+ sqlite3 *db = pParse->db;
+
+ /* Skip over any TK_COLLATE nodes */
+ pExpr = sqlite3ExprSkipCollate(pExpr);
+
+ assert( pExpr==0 || pExpr->op!=TK_REGISTER || pExpr->op2!=TK_VARIABLE );
+ if( !pExpr ){
+ pVal = valueNew(db, pAlloc);
+ if( pVal ){
+ sqlite3VdbeMemSetNull((Mem*)pVal);
+ }
+ }else if( pExpr->op==TK_VARIABLE && (db->flags & SQLITE_EnableQPSG)==0 ){
+ Vdbe *v;
+ int iBindVar = pExpr->iColumn;
+ sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar);
+ if( (v = pParse->pReprepare)!=0 ){
+ pVal = valueNew(db, pAlloc);
+ if( pVal ){
+ rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]);
+ sqlite3ValueApplyAffinity(pVal, affinity, ENC(db));
+ pVal->db = pParse->db;
+ }
+ }
+ }else{
+ rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, pAlloc);
+ }
+
+ assert( pVal==0 || pVal->db==db );
+ *ppVal = pVal;
+ return rc;
+}
+
+/*
+** This function is used to allocate and populate UnpackedRecord
+** structures intended to be compared against sample index keys stored
+** in the sqlite_stat4 table.
+**
+** A single call to this function populates zero or more fields of the
+** record starting with field iVal (fields are numbered from left to
+** right starting with 0). A single field is populated if:
+**
+** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
+**
+** * The expression is a bound variable, and this is a reprepare, or
+**
+** * The sqlite3ValueFromExpr() function is able to extract a value
+** from the expression (i.e. the expression is a literal value).
+**
+** Or, if pExpr is a TK_VECTOR, one field is populated for each of the
+** vector components that match either of the two latter criteria listed
+** above.
+**
+** Before any value is appended to the record, the affinity of the
+** corresponding column within index pIdx is applied to it. Before
+** this function returns, output parameter *pnExtract is set to the
+** number of values appended to the record.
+**
+** When this function is called, *ppRec must either point to an object
+** allocated by an earlier call to this function, or must be NULL. If it
+** is NULL and a value can be successfully extracted, a new UnpackedRecord
+** is allocated (and *ppRec set to point to it) before returning.
+**
+** Unless an error is encountered, SQLITE_OK is returned. It is not an
+** error if a value cannot be extracted from pExpr. If an error does
+** occur, an SQLite error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
+ Parse *pParse, /* Parse context */
+ Index *pIdx, /* Index being probed */
+ UnpackedRecord **ppRec, /* IN/OUT: Probe record */
+ Expr *pExpr, /* The expression to extract a value from */
+ int nElem, /* Maximum number of values to append */
+ int iVal, /* Array element to populate */
+ int *pnExtract /* OUT: Values appended to the record */
+){
+ int rc = SQLITE_OK;
+ int nExtract = 0;
+
+ if( pExpr==0 || pExpr->op!=TK_SELECT ){
+ int i;
+ struct ValueNewStat4Ctx alloc;
+
+ alloc.pParse = pParse;
+ alloc.pIdx = pIdx;
+ alloc.ppRec = ppRec;
+
+ for(i=0; i<nElem; i++){
+ sqlite3_value *pVal = 0;
+ Expr *pElem = (pExpr ? sqlite3VectorFieldSubexpr(pExpr, i) : 0);
+ u8 aff = sqlite3IndexColumnAffinity(pParse->db, pIdx, iVal+i);
+ alloc.iVal = iVal+i;
+ rc = stat4ValueFromExpr(pParse, pElem, aff, &alloc, &pVal);
+ if( !pVal ) break;
+ nExtract++;
+ }
+ }
+
+ *pnExtract = nExtract;
+ return rc;
+}
+
+/*
+** Attempt to extract a value from expression pExpr using the methods
+** as described for sqlite3Stat4ProbeSetValue() above.
+**
+** If successful, set *ppVal to point to a new value object and return
+** SQLITE_OK. If no value can be extracted, but no other error occurs
+** (e.g. OOM), return SQLITE_OK and set *ppVal to NULL. Or, if an error
+** does occur, return an SQLite error code. The final value of *ppVal
+** is undefined in this case.
+*/
+SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(
+ Parse *pParse, /* Parse context */
+ Expr *pExpr, /* The expression to extract a value from */
+ u8 affinity, /* Affinity to use */
+ sqlite3_value **ppVal /* OUT: New value object (or NULL) */
+){
+ return stat4ValueFromExpr(pParse, pExpr, affinity, 0, ppVal);
+}
+
+/*
+** Extract the iCol-th column from the nRec-byte record in pRec. Write
+** the column value into *ppVal. If *ppVal is initially NULL then a new
+** sqlite3_value object is allocated.
+**
+** If *ppVal is initially NULL then the caller is responsible for
+** ensuring that the value written into *ppVal is eventually freed.
+*/
+SQLITE_PRIVATE int sqlite3Stat4Column(
+ sqlite3 *db, /* Database handle */
+ const void *pRec, /* Pointer to buffer containing record */
+ int nRec, /* Size of buffer pRec in bytes */
+ int iCol, /* Column to extract */
+ sqlite3_value **ppVal /* OUT: Extracted value */
+){
+ u32 t = 0; /* a column type code */
+ int nHdr; /* Size of the header in the record */
+ int iHdr; /* Next unread header byte */
+ int iField; /* Next unread data byte */
+ int szField = 0; /* Size of the current data field */
+ int i; /* Column index */
+ u8 *a = (u8*)pRec; /* Typecast byte array */
+ Mem *pMem = *ppVal; /* Write result into this Mem object */
+
+ assert( iCol>0 );
+ iHdr = getVarint32(a, nHdr);
+ if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT;
+ iField = nHdr;
+ for(i=0; i<=iCol; i++){
+ iHdr += getVarint32(&a[iHdr], t);
+ testcase( iHdr==nHdr );
+ testcase( iHdr==nHdr+1 );
+ if( iHdr>nHdr ) return SQLITE_CORRUPT_BKPT;
+ szField = sqlite3VdbeSerialTypeLen(t);
+ iField += szField;
+ }
+ testcase( iField==nRec );
+ testcase( iField==nRec+1 );
+ if( iField>nRec ) return SQLITE_CORRUPT_BKPT;
+ if( pMem==0 ){
+ pMem = *ppVal = sqlite3ValueNew(db);
+ if( pMem==0 ) return SQLITE_NOMEM_BKPT;
+ }
+ sqlite3VdbeSerialGet(&a[iField-szField], t, pMem);
+ pMem->enc = ENC(db);
+ return SQLITE_OK;
+}
+
+/*
+** Unless it is NULL, the argument must be an UnpackedRecord object returned
+** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes
+** the object.
+*/
+SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){
+ if( pRec ){
+ int i;
+ int nCol = pRec->pKeyInfo->nAllField;
+ Mem *aMem = pRec->aMem;
+ sqlite3 *db = aMem[0].db;
+ for(i=0; i<nCol; i++){
+ sqlite3VdbeMemRelease(&aMem[i]);
+ }
+ sqlite3KeyInfoUnref(pRec->pKeyInfo);
+ sqlite3DbFreeNN(db, pRec);
+ }
+}
+#endif /* ifdef SQLITE_ENABLE_STAT4 */
+
+/*
+** Change the string value of an sqlite3_value object
+*/
+SQLITE_PRIVATE void sqlite3ValueSetStr(
+ sqlite3_value *v, /* Value to be set */
+ int n, /* Length of string z */
+ const void *z, /* Text of the new string */
+ u8 enc, /* Encoding to use */
+ void (*xDel)(void*) /* Destructor for the string */
+){
+ if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel);
+}
+
+/*
+** Free an sqlite3_value object
+*/
+SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){
+ if( !v ) return;
+ sqlite3VdbeMemRelease((Mem *)v);
+ sqlite3DbFreeNN(((Mem*)v)->db, v);
+}
+
+/*
+** The sqlite3ValueBytes() routine returns the number of bytes in the
+** sqlite3_value object assuming that it uses the encoding "enc".
+** The valueBytes() routine is a helper function.
+*/
+static SQLITE_NOINLINE int valueBytes(sqlite3_value *pVal, u8 enc){
+ return valueToText(pVal, enc)!=0 ? pVal->n : 0;
+}
+SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
+ Mem *p = (Mem*)pVal;
+ assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 );
+ if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){
+ return p->n;
+ }
+ if( (p->flags & MEM_Blob)!=0 ){
+ if( p->flags & MEM_Zero ){
+ return p->n + p->u.nZero;
+ }else{
+ return p->n;
+ }
+ }
+ if( p->flags & MEM_Null ) return 0;
+ return valueBytes(pVal, enc);
+}
+
+/************** End of vdbemem.c *********************************************/
+/************** Begin file vdbeaux.c *****************************************/
+/*
+** 2003 September 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used for creating, destroying, and populating
+** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.)
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+/* Forward references */
+static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef);
+static void vdbeFreeOpArray(sqlite3 *, Op *, int);
+
+/*
+** Create a new virtual database engine.
+*/
+SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ Vdbe *p;
+ p = sqlite3DbMallocRawNN(db, sizeof(Vdbe) );
+ if( p==0 ) return 0;
+ memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp));
+ p->db = db;
+ if( db->pVdbe ){
+ db->pVdbe->pPrev = p;
+ }
+ p->pNext = db->pVdbe;
+ p->pPrev = 0;
+ db->pVdbe = p;
+ p->magic = VDBE_MAGIC_INIT;
+ p->pParse = pParse;
+ pParse->pVdbe = p;
+ assert( pParse->aLabel==0 );
+ assert( pParse->nLabel==0 );
+ assert( p->nOpAlloc==0 );
+ assert( pParse->szOpAlloc==0 );
+ sqlite3VdbeAddOp2(p, OP_Init, 0, 1);
+ return p;
+}
+
+/*
+** Return the Parse object that owns a Vdbe object.
+*/
+SQLITE_PRIVATE Parse *sqlite3VdbeParser(Vdbe *p){
+ return p->pParse;
+}
+
+/*
+** Change the error string stored in Vdbe.zErrMsg
+*/
+SQLITE_PRIVATE void sqlite3VdbeError(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ sqlite3DbFree(p->db, p->zErrMsg);
+ va_start(ap, zFormat);
+ p->zErrMsg = sqlite3VMPrintf(p->db, zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** Remember the SQL string for a prepared statement.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, u8 prepFlags){
+ if( p==0 ) return;
+ p->prepFlags = prepFlags;
+ if( (prepFlags & SQLITE_PREPARE_SAVESQL)==0 ){
+ p->expmask = 0;
+ }
+ assert( p->zSql==0 );
+ p->zSql = sqlite3DbStrNDup(p->db, z, n);
+}
+
+#ifdef SQLITE_ENABLE_NORMALIZE
+/*
+** Add a new element to the Vdbe->pDblStr list.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAddDblquoteStr(sqlite3 *db, Vdbe *p, const char *z){
+ if( p ){
+ int n = sqlite3Strlen30(z);
+ DblquoteStr *pStr = sqlite3DbMallocRawNN(db,
+ sizeof(*pStr)+n+1-sizeof(pStr->z));
+ if( pStr ){
+ pStr->pNextStr = p->pDblStr;
+ p->pDblStr = pStr;
+ memcpy(pStr->z, z, n+1);
+ }
+ }
+}
+#endif
+
+#ifdef SQLITE_ENABLE_NORMALIZE
+/*
+** zId of length nId is a double-quoted identifier. Check to see if
+** that identifier is really used as a string literal.
+*/
+SQLITE_PRIVATE int sqlite3VdbeUsesDoubleQuotedString(
+ Vdbe *pVdbe, /* The prepared statement */
+ const char *zId /* The double-quoted identifier, already dequoted */
+){
+ DblquoteStr *pStr;
+ assert( zId!=0 );
+ if( pVdbe->pDblStr==0 ) return 0;
+ for(pStr=pVdbe->pDblStr; pStr; pStr=pStr->pNextStr){
+ if( strcmp(zId, pStr->z)==0 ) return 1;
+ }
+ return 0;
+}
+#endif
+
+/*
+** Swap all content between two VDBE structures.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){
+ Vdbe tmp, *pTmp;
+ char *zTmp;
+ assert( pA->db==pB->db );
+ tmp = *pA;
+ *pA = *pB;
+ *pB = tmp;
+ pTmp = pA->pNext;
+ pA->pNext = pB->pNext;
+ pB->pNext = pTmp;
+ pTmp = pA->pPrev;
+ pA->pPrev = pB->pPrev;
+ pB->pPrev = pTmp;
+ zTmp = pA->zSql;
+ pA->zSql = pB->zSql;
+ pB->zSql = zTmp;
+#ifdef SQLITE_ENABLE_NORMALIZE
+ zTmp = pA->zNormSql;
+ pA->zNormSql = pB->zNormSql;
+ pB->zNormSql = zTmp;
+#endif
+ pB->expmask = pA->expmask;
+ pB->prepFlags = pA->prepFlags;
+ memcpy(pB->aCounter, pA->aCounter, sizeof(pB->aCounter));
+ pB->aCounter[SQLITE_STMTSTATUS_REPREPARE]++;
+}
+
+/*
+** Resize the Vdbe.aOp array so that it is at least nOp elements larger
+** than its current size. nOp is guaranteed to be less than or equal
+** to 1024/sizeof(Op).
+**
+** If an out-of-memory error occurs while resizing the array, return
+** SQLITE_NOMEM. In this case Vdbe.aOp and Vdbe.nOpAlloc remain
+** unchanged (this is so that any opcodes already allocated can be
+** correctly deallocated along with the rest of the Vdbe).
+*/
+static int growOpArray(Vdbe *v, int nOp){
+ VdbeOp *pNew;
+ Parse *p = v->pParse;
+
+ /* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force
+ ** more frequent reallocs and hence provide more opportunities for
+ ** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used
+ ** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array
+ ** by the minimum* amount required until the size reaches 512. Normal
+ ** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current
+ ** size of the op array or add 1KB of space, whichever is smaller. */
+#ifdef SQLITE_TEST_REALLOC_STRESS
+ sqlite3_int64 nNew = (v->nOpAlloc>=512 ? 2*(sqlite3_int64)v->nOpAlloc
+ : (sqlite3_int64)v->nOpAlloc+nOp);
+#else
+ sqlite3_int64 nNew = (v->nOpAlloc ? 2*(sqlite3_int64)v->nOpAlloc
+ : (sqlite3_int64)(1024/sizeof(Op)));
+ UNUSED_PARAMETER(nOp);
+#endif
+
+ /* Ensure that the size of a VDBE does not grow too large */
+ if( nNew > p->db->aLimit[SQLITE_LIMIT_VDBE_OP] ){
+ sqlite3OomFault(p->db);
+ return SQLITE_NOMEM;
+ }
+
+ assert( nOp<=(1024/sizeof(Op)) );
+ assert( nNew>=(v->nOpAlloc+nOp) );
+ pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op));
+ if( pNew ){
+ p->szOpAlloc = sqlite3DbMallocSize(p->db, pNew);
+ v->nOpAlloc = p->szOpAlloc/sizeof(Op);
+ v->aOp = pNew;
+ }
+ return (pNew ? SQLITE_OK : SQLITE_NOMEM_BKPT);
+}
+
+#ifdef SQLITE_DEBUG
+/* This routine is just a convenient place to set a breakpoint that will
+** fire after each opcode is inserted and displayed using
+** "PRAGMA vdbe_addoptrace=on". Parameters "pc" (program counter) and
+** pOp are available to make the breakpoint conditional.
+**
+** Other useful labels for breakpoints include:
+** test_trace_breakpoint(pc,pOp)
+** sqlite3CorruptError(lineno)
+** sqlite3MisuseError(lineno)
+** sqlite3CantopenError(lineno)
+*/
+static void test_addop_breakpoint(int pc, Op *pOp){
+ static int n = 0;
+ n++;
+}
+#endif
+
+/*
+** Add a new instruction to the list of instructions current in the
+** VDBE. Return the address of the new instruction.
+**
+** Parameters:
+**
+** p Pointer to the VDBE
+**
+** op The opcode for this instruction
+**
+** p1, p2, p3 Operands
+**
+** Use the sqlite3VdbeResolveLabel() function to fix an address and
+** the sqlite3VdbeChangeP4() function to change the value of the P4
+** operand.
+*/
+static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ assert( p->nOpAlloc<=p->nOp );
+ if( growOpArray(p, 1) ) return 1;
+ assert( p->nOpAlloc>p->nOp );
+ return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ int i;
+ VdbeOp *pOp;
+
+ i = p->nOp;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( op>=0 && op<0xff );
+ if( p->nOpAlloc<=i ){
+ return growOp3(p, op, p1, p2, p3);
+ }
+ p->nOp++;
+ pOp = &p->aOp[i];
+ pOp->opcode = (u8)op;
+ pOp->p5 = 0;
+ pOp->p1 = p1;
+ pOp->p2 = p2;
+ pOp->p3 = p3;
+ pOp->p4.p = 0;
+ pOp->p4type = P4_NOTUSED;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOp->zComment = 0;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i, &p->aOp[i]);
+ test_addop_breakpoint(i, &p->aOp[i]);
+ }
+#endif
+#ifdef VDBE_PROFILE
+ pOp->cycles = 0;
+ pOp->cnt = 0;
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOp->iSrcLine = 0;
+#endif
+ return i;
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
+ return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
+ return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
+ return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
+}
+
+/* Generate code for an unconditional jump to instruction iDest
+*/
+SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe *p, int iDest){
+ return sqlite3VdbeAddOp3(p, OP_Goto, 0, iDest, 0);
+}
+
+/* Generate code to cause the string zStr to be loaded into
+** register iDest
+*/
+SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe *p, int iDest, const char *zStr){
+ return sqlite3VdbeAddOp4(p, OP_String8, 0, iDest, 0, zStr, 0);
+}
+
+/*
+** Generate code that initializes multiple registers to string or integer
+** constants. The registers begin with iDest and increase consecutively.
+** One register is initialized for each characgter in zTypes[]. For each
+** "s" character in zTypes[], the register is a string if the argument is
+** not NULL, or OP_Null if the value is a null pointer. For each "i" character
+** in zTypes[], the register is initialized to an integer.
+**
+** If the input string does not end with "X" then an OP_ResultRow instruction
+** is generated for the values inserted.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe *p, int iDest, const char *zTypes, ...){
+ va_list ap;
+ int i;
+ char c;
+ va_start(ap, zTypes);
+ for(i=0; (c = zTypes[i])!=0; i++){
+ if( c=='s' ){
+ const char *z = va_arg(ap, const char*);
+ sqlite3VdbeAddOp4(p, z==0 ? OP_Null : OP_String8, 0, iDest+i, 0, z, 0);
+ }else if( c=='i' ){
+ sqlite3VdbeAddOp2(p, OP_Integer, va_arg(ap, int), iDest+i);
+ }else{
+ goto skip_op_resultrow;
+ }
+ }
+ sqlite3VdbeAddOp2(p, OP_ResultRow, iDest, i);
+skip_op_resultrow:
+ va_end(ap);
+}
+
+/*
+** Add an opcode that includes the p4 value as a pointer.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ const char *zP4, /* The P4 operand */
+ int p4type /* P4 operand type */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ sqlite3VdbeChangeP4(p, addr, zP4, p4type);
+ return addr;
+}
+
+/*
+** Add an OP_Function or OP_PureFunc opcode.
+**
+** The eCallCtx argument is information (typically taken from Expr.op2)
+** that describes the calling context of the function. 0 means a general
+** function call. NC_IsCheck means called by a check constraint,
+** NC_IdxExpr means called as part of an index expression. NC_PartIdx
+** means in the WHERE clause of a partial index. NC_GenCol means called
+** while computing a generated column value. 0 is the usual case.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall(
+ Parse *pParse, /* Parsing context */
+ int p1, /* Constant argument mask */
+ int p2, /* First argument register */
+ int p3, /* Register into which results are written */
+ int nArg, /* Number of argument */
+ const FuncDef *pFunc, /* The function to be invoked */
+ int eCallCtx /* Calling context */
+){
+ Vdbe *v = pParse->pVdbe;
+ int nByte;
+ int addr;
+ sqlite3_context *pCtx;
+ assert( v );
+ nByte = sizeof(*pCtx) + (nArg-1)*sizeof(sqlite3_value*);
+ pCtx = sqlite3DbMallocRawNN(pParse->db, nByte);
+ if( pCtx==0 ){
+ assert( pParse->db->mallocFailed );
+ freeEphemeralFunction(pParse->db, (FuncDef*)pFunc);
+ return 0;
+ }
+ pCtx->pOut = 0;
+ pCtx->pFunc = (FuncDef*)pFunc;
+ pCtx->pVdbe = 0;
+ pCtx->isError = 0;
+ pCtx->argc = nArg;
+ pCtx->iOp = sqlite3VdbeCurrentAddr(v);
+ addr = sqlite3VdbeAddOp4(v, eCallCtx ? OP_PureFunc : OP_Function,
+ p1, p2, p3, (char*)pCtx, P4_FUNCCTX);
+ sqlite3VdbeChangeP5(v, eCallCtx & NC_SelfRef);
+ return addr;
+}
+
+/*
+** Add an opcode that includes the p4 value with a P4_INT64 or
+** P4_REAL type.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ const u8 *zP4, /* The P4 operand */
+ int p4type /* P4 operand type */
+){
+ char *p4copy = sqlite3DbMallocRawNN(sqlite3VdbeDb(p), 8);
+ if( p4copy ) memcpy(p4copy, zP4, 8);
+ return sqlite3VdbeAddOp4(p, op, p1, p2, p3, p4copy, p4type);
+}
+
+#ifndef SQLITE_OMIT_EXPLAIN
+/*
+** Return the address of the current EXPLAIN QUERY PLAN baseline.
+** 0 means "none".
+*/
+SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse *pParse){
+ VdbeOp *pOp;
+ if( pParse->addrExplain==0 ) return 0;
+ pOp = sqlite3VdbeGetOp(pParse->pVdbe, pParse->addrExplain);
+ return pOp->p2;
+}
+
+/*
+** Set a debugger breakpoint on the following routine in order to
+** monitor the EXPLAIN QUERY PLAN code generation.
+*/
+#if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char *z1, const char *z2){
+ (void)z1;
+ (void)z2;
+}
+#endif
+
+/*
+** Add a new OP_Explain opcode.
+**
+** If the bPush flag is true, then make this opcode the parent for
+** subsequent Explains until sqlite3VdbeExplainPop() is called.
+*/
+SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){
+#ifndef SQLITE_DEBUG
+ /* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined.
+ ** But omit them (for performance) during production builds */
+ if( pParse->explain==2 )
+#endif
+ {
+ char *zMsg;
+ Vdbe *v;
+ va_list ap;
+ int iThis;
+ va_start(ap, zFmt);
+ zMsg = sqlite3VMPrintf(pParse->db, zFmt, ap);
+ va_end(ap);
+ v = pParse->pVdbe;
+ iThis = v->nOp;
+ sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0,
+ zMsg, P4_DYNAMIC);
+ sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetOp(v,-1)->p4.z);
+ if( bPush){
+ pParse->addrExplain = iThis;
+ }
+ }
+}
+
+/*
+** Pop the EXPLAIN QUERY PLAN stack one level.
+*/
+SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse *pParse){
+ sqlite3ExplainBreakpoint("POP", 0);
+ pParse->addrExplain = sqlite3VdbeExplainParent(pParse);
+}
+#endif /* SQLITE_OMIT_EXPLAIN */
+
+/*
+** Add an OP_ParseSchema opcode. This routine is broken out from
+** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees
+** as having been used.
+**
+** The zWhere string must have been obtained from sqlite3_malloc().
+** This routine will take ownership of the allocated memory.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){
+ int j;
+ sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC);
+ for(j=0; j<p->db->nDb; j++) sqlite3VdbeUsesBtree(p, j);
+}
+
+/*
+** Add an opcode that includes the p4 value as an integer.
+*/
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ if( p->db->mallocFailed==0 ){
+ VdbeOp *pOp = &p->aOp[addr];
+ pOp->p4type = P4_INT32;
+ pOp->p4.i = p4;
+ }
+ return addr;
+}
+
+/* Insert the end of a co-routine
+*/
+SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){
+ sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield);
+
+ /* Clear the temporary register cache, thereby ensuring that each
+ ** co-routine has its own independent set of registers, because co-routines
+ ** might expect their registers to be preserved across an OP_Yield, and
+ ** that could cause problems if two or more co-routines are using the same
+ ** temporary register.
+ */
+ v->pParse->nTempReg = 0;
+ v->pParse->nRangeReg = 0;
+}
+
+/*
+** Create a new symbolic label for an instruction that has yet to be
+** coded. The symbolic label is really just a negative number. The
+** label can be used as the P2 value of an operation. Later, when
+** the label is resolved to a specific address, the VDBE will scan
+** through its operation list and change all values of P2 which match
+** the label into the resolved address.
+**
+** The VDBE knows that a P2 value is a label because labels are
+** always negative and P2 values are suppose to be non-negative.
+** Hence, a negative P2 value is a label that has yet to be resolved.
+** (Later:) This is only true for opcodes that have the OPFLG_JUMP
+** property.
+**
+** Variable usage notes:
+**
+** Parse.aLabel[x] Stores the address that the x-th label resolves
+** into. For testing (SQLITE_DEBUG), unresolved
+** labels stores -1, but that is not required.
+** Parse.nLabelAlloc Number of slots allocated to Parse.aLabel[]
+** Parse.nLabel The *negative* of the number of labels that have
+** been issued. The negative is stored because
+** that gives a performance improvement over storing
+** the equivalent positive value.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Parse *pParse){
+ return --pParse->nLabel;
+}
+
+/*
+** Resolve label "x" to be the address of the next instruction to
+** be inserted. The parameter "x" must have been obtained from
+** a prior call to sqlite3VdbeMakeLabel().
+*/
+static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){
+ int nNewSize = 10 - p->nLabel;
+ p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel,
+ nNewSize*sizeof(p->aLabel[0]));
+ if( p->aLabel==0 ){
+ p->nLabelAlloc = 0;
+ }else{
+#ifdef SQLITE_DEBUG
+ int i;
+ for(i=p->nLabelAlloc; i<nNewSize; i++) p->aLabel[i] = -1;
+#endif
+ p->nLabelAlloc = nNewSize;
+ p->aLabel[j] = v->nOp;
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
+ Parse *p = v->pParse;
+ int j = ADDR(x);
+ assert( v->magic==VDBE_MAGIC_INIT );
+ assert( j<-p->nLabel );
+ assert( j>=0 );
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("RESOLVE LABEL %d to %d\n", x, v->nOp);
+ }
+#endif
+ if( p->nLabelAlloc + p->nLabel < 0 ){
+ resizeResolveLabel(p,v,j);
+ }else{
+ assert( p->aLabel[j]==(-1) ); /* Labels may only be resolved once */
+ p->aLabel[j] = v->nOp;
+ }
+}
+
+/*
+** Mark the VDBE as one that can only be run one time.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){
+ p->runOnlyOnce = 1;
+}
+
+/*
+** Mark the VDBE as one that can only be run multiple times.
+*/
+SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe *p){
+ p->runOnlyOnce = 0;
+}
+
+#ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */
+
+/*
+** The following type and function are used to iterate through all opcodes
+** in a Vdbe main program and each of the sub-programs (triggers) it may
+** invoke directly or indirectly. It should be used as follows:
+**
+** Op *pOp;
+** VdbeOpIter sIter;
+**
+** memset(&sIter, 0, sizeof(sIter));
+** sIter.v = v; // v is of type Vdbe*
+** while( (pOp = opIterNext(&sIter)) ){
+** // Do something with pOp
+** }
+** sqlite3DbFree(v->db, sIter.apSub);
+**
+*/
+typedef struct VdbeOpIter VdbeOpIter;
+struct VdbeOpIter {
+ Vdbe *v; /* Vdbe to iterate through the opcodes of */
+ SubProgram **apSub; /* Array of subprograms */
+ int nSub; /* Number of entries in apSub */
+ int iAddr; /* Address of next instruction to return */
+ int iSub; /* 0 = main program, 1 = first sub-program etc. */
+};
+static Op *opIterNext(VdbeOpIter *p){
+ Vdbe *v = p->v;
+ Op *pRet = 0;
+ Op *aOp;
+ int nOp;
+
+ if( p->iSub<=p->nSub ){
+
+ if( p->iSub==0 ){
+ aOp = v->aOp;
+ nOp = v->nOp;
+ }else{
+ aOp = p->apSub[p->iSub-1]->aOp;
+ nOp = p->apSub[p->iSub-1]->nOp;
+ }
+ assert( p->iAddr<nOp );
+
+ pRet = &aOp[p->iAddr];
+ p->iAddr++;
+ if( p->iAddr==nOp ){
+ p->iSub++;
+ p->iAddr = 0;
+ }
+
+ if( pRet->p4type==P4_SUBPROGRAM ){
+ int nByte = (p->nSub+1)*sizeof(SubProgram*);
+ int j;
+ for(j=0; j<p->nSub; j++){
+ if( p->apSub[j]==pRet->p4.pProgram ) break;
+ }
+ if( j==p->nSub ){
+ p->apSub = sqlite3DbReallocOrFree(v->db, p->apSub, nByte);
+ if( !p->apSub ){
+ pRet = 0;
+ }else{
+ p->apSub[p->nSub++] = pRet->p4.pProgram;
+ }
+ }
+ }
+ }
+
+ return pRet;
+}
+
+/*
+** Check if the program stored in the VM associated with pParse may
+** throw an ABORT exception (causing the statement, but not entire transaction
+** to be rolled back). This condition is true if the main program or any
+** sub-programs contains any of the following:
+**
+** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort.
+** * OP_HaltIfNull with P1=SQLITE_CONSTRAINT and P2=OE_Abort.
+** * OP_Destroy
+** * OP_VUpdate
+** * OP_VCreate
+** * OP_VRename
+** * OP_FkCounter with P2==0 (immediate foreign key constraint)
+** * OP_CreateBtree/BTREE_INTKEY and OP_InitCoroutine
+** (for CREATE TABLE AS SELECT ...)
+**
+** Then check that the value of Parse.mayAbort is true if an
+** ABORT may be thrown, or false otherwise. Return true if it does
+** match, or false otherwise. This function is intended to be used as
+** part of an assert statement in the compiler. Similar to:
+**
+** assert( sqlite3VdbeAssertMayAbort(pParse->pVdbe, pParse->mayAbort) );
+*/
+SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
+ int hasAbort = 0;
+ int hasFkCounter = 0;
+ int hasCreateTable = 0;
+ int hasCreateIndex = 0;
+ int hasInitCoroutine = 0;
+ Op *pOp;
+ VdbeOpIter sIter;
+ memset(&sIter, 0, sizeof(sIter));
+ sIter.v = v;
+
+ while( (pOp = opIterNext(&sIter))!=0 ){
+ int opcode = pOp->opcode;
+ if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename
+ || opcode==OP_VDestroy
+ || opcode==OP_VCreate
+ || (opcode==OP_ParseSchema && pOp->p4.z==0)
+ || ((opcode==OP_Halt || opcode==OP_HaltIfNull)
+ && ((pOp->p1)!=SQLITE_OK && pOp->p2==OE_Abort))
+ ){
+ hasAbort = 1;
+ break;
+ }
+ if( opcode==OP_CreateBtree && pOp->p3==BTREE_INTKEY ) hasCreateTable = 1;
+ if( mayAbort ){
+ /* hasCreateIndex may also be set for some DELETE statements that use
+ ** OP_Clear. So this routine may end up returning true in the case
+ ** where a "DELETE FROM tbl" has a statement-journal but does not
+ ** require one. This is not so bad - it is an inefficiency, not a bug. */
+ if( opcode==OP_CreateBtree && pOp->p3==BTREE_BLOBKEY ) hasCreateIndex = 1;
+ if( opcode==OP_Clear ) hasCreateIndex = 1;
+ }
+ if( opcode==OP_InitCoroutine ) hasInitCoroutine = 1;
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ if( opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1 ){
+ hasFkCounter = 1;
+ }
+#endif
+ }
+ sqlite3DbFree(v->db, sIter.apSub);
+
+ /* Return true if hasAbort==mayAbort. Or if a malloc failure occurred.
+ ** If malloc failed, then the while() loop above may not have iterated
+ ** through all opcodes and hasAbort may be set incorrectly. Return
+ ** true for this case to prevent the assert() in the callers frame
+ ** from failing. */
+ return ( v->db->mallocFailed || hasAbort==mayAbort || hasFkCounter
+ || (hasCreateTable && hasInitCoroutine) || hasCreateIndex
+ );
+}
+#endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */
+
+#ifdef SQLITE_DEBUG
+/*
+** Increment the nWrite counter in the VDBE if the cursor is not an
+** ephemeral cursor, or if the cursor argument is NULL.
+*/
+SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe *p, VdbeCursor *pC){
+ if( pC==0
+ || (pC->eCurType!=CURTYPE_SORTER
+ && pC->eCurType!=CURTYPE_PSEUDO
+ && !pC->isEphemeral)
+ ){
+ p->nWrite++;
+ }
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** Assert if an Abort at this point in time might result in a corrupt
+** database.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){
+ assert( p->nWrite==0 || p->usesStmtJournal );
+}
+#endif
+
+/*
+** This routine is called after all opcodes have been inserted. It loops
+** through all the opcodes and fixes up some details.
+**
+** (1) For each jump instruction with a negative P2 value (a label)
+** resolve the P2 value to an actual address.
+**
+** (2) Compute the maximum number of arguments used by any SQL function
+** and store that value in *pMaxFuncArgs.
+**
+** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately
+** indicate what the prepared statement actually does.
+**
+** (4) Initialize the p4.xAdvance pointer on opcodes that use it.
+**
+** (5) Reclaim the memory allocated for storing labels.
+**
+** This routine will only function correctly if the mkopcodeh.tcl generator
+** script numbers the opcodes correctly. Changes to this routine must be
+** coordinated with changes to mkopcodeh.tcl.
+*/
+static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
+ int nMaxArgs = *pMaxFuncArgs;
+ Op *pOp;
+ Parse *pParse = p->pParse;
+ int *aLabel = pParse->aLabel;
+ p->readOnly = 1;
+ p->bIsReader = 0;
+ pOp = &p->aOp[p->nOp-1];
+ while(1){
+
+ /* Only JUMP opcodes and the short list of special opcodes in the switch
+ ** below need to be considered. The mkopcodeh.tcl generator script groups
+ ** all these opcodes together near the front of the opcode list. Skip
+ ** any opcode that does not need processing by virtual of the fact that
+ ** it is larger than SQLITE_MX_JUMP_OPCODE, as a performance optimization.
+ */
+ if( pOp->opcode<=SQLITE_MX_JUMP_OPCODE ){
+ /* NOTE: Be sure to update mkopcodeh.tcl when adding or removing
+ ** cases from this switch! */
+ switch( pOp->opcode ){
+ case OP_Transaction: {
+ if( pOp->p2!=0 ) p->readOnly = 0;
+ /* no break */ deliberate_fall_through
+ }
+ case OP_AutoCommit:
+ case OP_Savepoint: {
+ p->bIsReader = 1;
+ break;
+ }
+#ifndef SQLITE_OMIT_WAL
+ case OP_Checkpoint:
+#endif
+ case OP_Vacuum:
+ case OP_JournalMode: {
+ p->readOnly = 0;
+ p->bIsReader = 1;
+ break;
+ }
+ case OP_Next:
+ case OP_SorterNext: {
+ pOp->p4.xAdvance = sqlite3BtreeNext;
+ pOp->p4type = P4_ADVANCE;
+ /* The code generator never codes any of these opcodes as a jump
+ ** to a label. They are always coded as a jump backwards to a
+ ** known address */
+ assert( pOp->p2>=0 );
+ break;
+ }
+ case OP_Prev: {
+ pOp->p4.xAdvance = sqlite3BtreePrevious;
+ pOp->p4type = P4_ADVANCE;
+ /* The code generator never codes any of these opcodes as a jump
+ ** to a label. They are always coded as a jump backwards to a
+ ** known address */
+ assert( pOp->p2>=0 );
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case OP_VUpdate: {
+ if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2;
+ break;
+ }
+ case OP_VFilter: {
+ int n;
+ assert( (pOp - p->aOp) >= 3 );
+ assert( pOp[-1].opcode==OP_Integer );
+ n = pOp[-1].p1;
+ if( n>nMaxArgs ) nMaxArgs = n;
+ /* Fall through into the default case */
+ /* no break */ deliberate_fall_through
+ }
+#endif
+ default: {
+ if( pOp->p2<0 ){
+ /* The mkopcodeh.tcl script has so arranged things that the only
+ ** non-jump opcodes less than SQLITE_MX_JUMP_CODE are guaranteed to
+ ** have non-negative values for P2. */
+ assert( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 );
+ assert( ADDR(pOp->p2)<-pParse->nLabel );
+ pOp->p2 = aLabel[ADDR(pOp->p2)];
+ }
+ break;
+ }
+ }
+ /* The mkopcodeh.tcl script has so arranged things that the only
+ ** non-jump opcodes less than SQLITE_MX_JUMP_CODE are guaranteed to
+ ** have non-negative values for P2. */
+ assert( (sqlite3OpcodeProperty[pOp->opcode]&OPFLG_JUMP)==0 || pOp->p2>=0);
+ }
+ if( pOp==p->aOp ) break;
+ pOp--;
+ }
+ sqlite3DbFree(p->db, pParse->aLabel);
+ pParse->aLabel = 0;
+ pParse->nLabel = 0;
+ *pMaxFuncArgs = nMaxArgs;
+ assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) );
+}
+
+/*
+** Return the address of the next instruction to be inserted.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){
+ assert( p->magic==VDBE_MAGIC_INIT );
+ return p->nOp;
+}
+
+/*
+** Verify that at least N opcode slots are available in p without
+** having to malloc for more space (except when compiled using
+** SQLITE_TEST_REALLOC_STRESS). This interface is used during testing
+** to verify that certain calls to sqlite3VdbeAddOpList() can never
+** fail due to a OOM fault and hence that the return value from
+** sqlite3VdbeAddOpList() will always be non-NULL.
+*/
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){
+ assert( p->nOp + N <= p->nOpAlloc );
+}
+#endif
+
+/*
+** Verify that the VM passed as the only argument does not contain
+** an OP_ResultRow opcode. Fail an assert() if it does. This is used
+** by code in pragma.c to ensure that the implementation of certain
+** pragmas comports with the flags specified in the mkpragmatab.tcl
+** script.
+*/
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p){
+ int i;
+ for(i=0; i<p->nOp; i++){
+ assert( p->aOp[i].opcode!=OP_ResultRow );
+ }
+}
+#endif
+
+/*
+** Generate code (a single OP_Abortable opcode) that will
+** verify that the VDBE program can safely call Abort in the current
+** context.
+*/
+#if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE void sqlite3VdbeVerifyAbortable(Vdbe *p, int onError){
+ if( onError==OE_Abort ) sqlite3VdbeAddOp0(p, OP_Abortable);
+}
+#endif
+
+/*
+** This function returns a pointer to the array of opcodes associated with
+** the Vdbe passed as the first argument. It is the callers responsibility
+** to arrange for the returned array to be eventually freed using the
+** vdbeFreeOpArray() function.
+**
+** Before returning, *pnOp is set to the number of entries in the returned
+** array. Also, *pnMaxArg is set to the larger of its current value and
+** the number of entries in the Vdbe.apArg[] array required to execute the
+** returned program.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg){
+ VdbeOp *aOp = p->aOp;
+ assert( aOp && !p->db->mallocFailed );
+
+ /* Check that sqlite3VdbeUsesBtree() was not called on this VM */
+ assert( DbMaskAllZero(p->btreeMask) );
+
+ resolveP2Values(p, pnMaxArg);
+ *pnOp = p->nOp;
+ p->aOp = 0;
+ return aOp;
+}
+
+/*
+** Add a whole list of operations to the operation stack. Return a
+** pointer to the first operation inserted.
+**
+** Non-zero P2 arguments to jump instructions are automatically adjusted
+** so that the jump target is relative to the first operation inserted.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(
+ Vdbe *p, /* Add opcodes to the prepared statement */
+ int nOp, /* Number of opcodes to add */
+ VdbeOpList const *aOp, /* The opcodes to be added */
+ int iLineno /* Source-file line number of first opcode */
+){
+ int i;
+ VdbeOp *pOut, *pFirst;
+ assert( nOp>0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( p->nOp + nOp > p->nOpAlloc && growOpArray(p, nOp) ){
+ return 0;
+ }
+ pFirst = pOut = &p->aOp[p->nOp];
+ for(i=0; i<nOp; i++, aOp++, pOut++){
+ pOut->opcode = aOp->opcode;
+ pOut->p1 = aOp->p1;
+ pOut->p2 = aOp->p2;
+ assert( aOp->p2>=0 );
+ if( (sqlite3OpcodeProperty[aOp->opcode] & OPFLG_JUMP)!=0 && aOp->p2>0 ){
+ pOut->p2 += p->nOp;
+ }
+ pOut->p3 = aOp->p3;
+ pOut->p4type = P4_NOTUSED;
+ pOut->p4.p = 0;
+ pOut->p5 = 0;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOut->zComment = 0;
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOut->iSrcLine = iLineno+i;
+#else
+ (void)iLineno;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i+p->nOp, &p->aOp[i+p->nOp]);
+ }
+#endif
+ }
+ p->nOp += nOp;
+ return pFirst;
+}
+
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+/*
+** Add an entry to the array of counters managed by sqlite3_stmt_scanstatus().
+*/
+SQLITE_PRIVATE void sqlite3VdbeScanStatus(
+ Vdbe *p, /* VM to add scanstatus() to */
+ int addrExplain, /* Address of OP_Explain (or 0) */
+ int addrLoop, /* Address of loop counter */
+ int addrVisit, /* Address of rows visited counter */
+ LogEst nEst, /* Estimated number of output rows */
+ const char *zName /* Name of table or index being scanned */
+){
+ sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus);
+ ScanStatus *aNew;
+ aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte);
+ if( aNew ){
+ ScanStatus *pNew = &aNew[p->nScan++];
+ pNew->addrExplain = addrExplain;
+ pNew->addrLoop = addrLoop;
+ pNew->addrVisit = addrVisit;
+ pNew->nEst = nEst;
+ pNew->zName = sqlite3DbStrDup(p->db, zName);
+ p->aScan = aNew;
+ }
+}
+#endif
+
+
+/*
+** Change the value of the opcode, or P1, P2, P3, or P5 operands
+** for a specific instruction.
+*/
+SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, int addr, u8 iNewOpcode){
+ sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, int addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p1 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p2 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, int addr, int val){
+ sqlite3VdbeGetOp(p,addr)->p3 = val;
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){
+ assert( p->nOp>0 || p->db->mallocFailed );
+ if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5;
+}
+
+/*
+** Change the P2 operand of instruction addr so that it points to
+** the address of the next instruction to be coded.
+*/
+SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){
+ sqlite3VdbeChangeP2(p, addr, p->nOp);
+}
+
+/*
+** Change the P2 operand of the jump instruction at addr so that
+** the jump lands on the next opcode. Or if the jump instruction was
+** the previous opcode (and is thus a no-op) then simply back up
+** the next instruction counter by one slot so that the jump is
+** overwritten by the next inserted opcode.
+**
+** This routine is an optimization of sqlite3VdbeJumpHere() that
+** strives to omit useless byte-code like this:
+**
+** 7 Once 0 8 0
+** 8 ...
+*/
+SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){
+ if( addr==p->nOp-1 ){
+ assert( p->aOp[addr].opcode==OP_Once
+ || p->aOp[addr].opcode==OP_If
+ || p->aOp[addr].opcode==OP_FkIfZero );
+ assert( p->aOp[addr].p4type==0 );
+#ifdef SQLITE_VDBE_COVERAGE
+ sqlite3VdbeGetOp(p,-1)->iSrcLine = 0; /* Erase VdbeCoverage() macros */
+#endif
+ p->nOp--;
+ }else{
+ sqlite3VdbeChangeP2(p, addr, p->nOp);
+ }
+}
+
+
+/*
+** If the input FuncDef structure is ephemeral, then free it. If
+** the FuncDef is not ephermal, then do nothing.
+*/
+static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
+ if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){
+ sqlite3DbFreeNN(db, pDef);
+ }
+}
+
+/*
+** Delete a P4 value if necessary.
+*/
+static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){
+ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
+ sqlite3DbFreeNN(db, p);
+}
+static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){
+ freeEphemeralFunction(db, p->pFunc);
+ sqlite3DbFreeNN(db, p);
+}
+static void freeP4(sqlite3 *db, int p4type, void *p4){
+ assert( db );
+ switch( p4type ){
+ case P4_FUNCCTX: {
+ freeP4FuncCtx(db, (sqlite3_context*)p4);
+ break;
+ }
+ case P4_REAL:
+ case P4_INT64:
+ case P4_DYNAMIC:
+ case P4_DYNBLOB:
+ case P4_INTARRAY: {
+ sqlite3DbFree(db, p4);
+ break;
+ }
+ case P4_KEYINFO: {
+ if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4);
+ break;
+ }
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ case P4_EXPR: {
+ sqlite3ExprDelete(db, (Expr*)p4);
+ break;
+ }
+#endif
+ case P4_FUNCDEF: {
+ freeEphemeralFunction(db, (FuncDef*)p4);
+ break;
+ }
+ case P4_MEM: {
+ if( db->pnBytesFreed==0 ){
+ sqlite3ValueFree((sqlite3_value*)p4);
+ }else{
+ freeP4Mem(db, (Mem*)p4);
+ }
+ break;
+ }
+ case P4_VTAB : {
+ if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4);
+ break;
+ }
+ }
+}
+
+/*
+** Free the space allocated for aOp and any p4 values allocated for the
+** opcodes contained within. If aOp is not NULL it is assumed to contain
+** nOp entries.
+*/
+static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){
+ if( aOp ){
+ Op *pOp;
+ for(pOp=&aOp[nOp-1]; pOp>=aOp; pOp--){
+ if( pOp->p4type <= P4_FREE_IF_LE ) freeP4(db, pOp->p4type, pOp->p4.p);
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ sqlite3DbFree(db, pOp->zComment);
+#endif
+ }
+ sqlite3DbFreeNN(db, aOp);
+ }
+}
+
+/*
+** Link the SubProgram object passed as the second argument into the linked
+** list at Vdbe.pSubProgram. This list is used to delete all sub-program
+** objects when the VM is no longer required.
+*/
+SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){
+ p->pNext = pVdbe->pProgram;
+ pVdbe->pProgram = p;
+}
+
+/*
+** Return true if the given Vdbe has any SubPrograms.
+*/
+SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe *pVdbe){
+ return pVdbe->pProgram!=0;
+}
+
+/*
+** Change the opcode at addr into OP_Noop
+*/
+SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe *p, int addr){
+ VdbeOp *pOp;
+ if( p->db->mallocFailed ) return 0;
+ assert( addr>=0 && addr<p->nOp );
+ pOp = &p->aOp[addr];
+ freeP4(p->db, pOp->p4type, pOp->p4.p);
+ pOp->p4type = P4_NOTUSED;
+ pOp->p4.z = 0;
+ pOp->opcode = OP_Noop;
+ return 1;
+}
+
+/*
+** If the last opcode is "op" and it is not a jump destination,
+** then remove it. Return true if and only if an opcode was removed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){
+ if( p->nOp>0 && p->aOp[p->nOp-1].opcode==op ){
+ return sqlite3VdbeChangeToNoop(p, p->nOp-1);
+ }else{
+ return 0;
+ }
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Generate an OP_ReleaseReg opcode to indicate that a range of
+** registers, except any identified by mask, are no longer in use.
+*/
+SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters(
+ Parse *pParse, /* Parsing context */
+ int iFirst, /* Index of first register to be released */
+ int N, /* Number of registers to release */
+ u32 mask, /* Mask of registers to NOT release */
+ int bUndefine /* If true, mark registers as undefined */
+){
+ if( N==0 ) return;
+ assert( pParse->pVdbe );
+ assert( iFirst>=1 );
+ assert( iFirst+N-1<=pParse->nMem );
+ if( N<=31 && mask!=0 ){
+ while( N>0 && (mask&1)!=0 ){
+ mask >>= 1;
+ iFirst++;
+ N--;
+ }
+ while( N>0 && N<=32 && (mask & MASKBIT32(N-1))!=0 ){
+ mask &= ~MASKBIT32(N-1);
+ N--;
+ }
+ }
+ if( N>0 ){
+ sqlite3VdbeAddOp3(pParse->pVdbe, OP_ReleaseReg, iFirst, N, *(int*)&mask);
+ if( bUndefine ) sqlite3VdbeChangeP5(pParse->pVdbe, 1);
+ }
+}
+#endif /* SQLITE_DEBUG */
+
+
+/*
+** Change the value of the P4 operand for a specific instruction.
+** This routine is useful when a large program is loaded from a
+** static array using sqlite3VdbeAddOpList but we want to make a
+** few minor changes to the program.
+**
+** If n>=0 then the P4 operand is dynamic, meaning that a copy of
+** the string is made into memory obtained from sqlite3_malloc().
+** A value of n==0 means copy bytes of zP4 up to and including the
+** first null byte. If n>0 then copy n+1 bytes of zP4.
+**
+** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points
+** to a string or structure that is guaranteed to exist for the lifetime of
+** the Vdbe. In these cases we can just copy the pointer.
+**
+** If addr<0 then change P4 on the most recently inserted instruction.
+*/
+static void SQLITE_NOINLINE vdbeChangeP4Full(
+ Vdbe *p,
+ Op *pOp,
+ const char *zP4,
+ int n
+){
+ if( pOp->p4type ){
+ freeP4(p->db, pOp->p4type, pOp->p4.p);
+ pOp->p4type = 0;
+ pOp->p4.p = 0;
+ }
+ if( n<0 ){
+ sqlite3VdbeChangeP4(p, (int)(pOp - p->aOp), zP4, n);
+ }else{
+ if( n==0 ) n = sqlite3Strlen30(zP4);
+ pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n);
+ pOp->p4type = P4_DYNAMIC;
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){
+ Op *pOp;
+ sqlite3 *db;
+ assert( p!=0 );
+ db = p->db;
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( p->aOp!=0 || db->mallocFailed );
+ if( db->mallocFailed ){
+ if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4);
+ return;
+ }
+ assert( p->nOp>0 );
+ assert( addr<p->nOp );
+ if( addr<0 ){
+ addr = p->nOp - 1;
+ }
+ pOp = &p->aOp[addr];
+ if( n>=0 || pOp->p4type ){
+ vdbeChangeP4Full(p, pOp, zP4, n);
+ return;
+ }
+ if( n==P4_INT32 ){
+ /* Note: this cast is safe, because the origin data point was an int
+ ** that was cast to a (const char *). */
+ pOp->p4.i = SQLITE_PTR_TO_INT(zP4);
+ pOp->p4type = P4_INT32;
+ }else if( zP4!=0 ){
+ assert( n<0 );
+ pOp->p4.p = (void*)zP4;
+ pOp->p4type = (signed char)n;
+ if( n==P4_VTAB ) sqlite3VtabLock((VTable*)zP4);
+ }
+}
+
+/*
+** Change the P4 operand of the most recently coded instruction
+** to the value defined by the arguments. This is a high-speed
+** version of sqlite3VdbeChangeP4().
+**
+** The P4 operand must not have been previously defined. And the new
+** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of
+** those cases.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){
+ VdbeOp *pOp;
+ assert( n!=P4_INT32 && n!=P4_VTAB );
+ assert( n<=0 );
+ if( p->db->mallocFailed ){
+ freeP4(p->db, n, pP4);
+ }else{
+ assert( pP4!=0 );
+ assert( p->nOp>0 );
+ pOp = &p->aOp[p->nOp-1];
+ assert( pOp->p4type==P4_NOTUSED );
+ pOp->p4type = n;
+ pOp->p4.p = pP4;
+ }
+}
+
+/*
+** Set the P4 on the most recently added opcode to the KeyInfo for the
+** index given.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){
+ Vdbe *v = pParse->pVdbe;
+ KeyInfo *pKeyInfo;
+ assert( v!=0 );
+ assert( pIdx!=0 );
+ pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pIdx);
+ if( pKeyInfo ) sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
+}
+
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+/*
+** Change the comment on the most recently coded instruction. Or
+** insert a No-op and add the comment to that new instruction. This
+** makes the code easier to read during debugging. None of this happens
+** in a production build.
+*/
+static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){
+ assert( p->nOp>0 || p->aOp==0 );
+ assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed
+ || p->pParse->nErr>0 );
+ if( p->nOp ){
+ assert( p->aOp );
+ sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment);
+ p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap);
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ if( p ){
+ va_start(ap, zFormat);
+ vdbeVComment(p, zFormat, ap);
+ va_end(ap);
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){
+ va_list ap;
+ if( p ){
+ sqlite3VdbeAddOp0(p, OP_Noop);
+ va_start(ap, zFormat);
+ vdbeVComment(p, zFormat, ap);
+ va_end(ap);
+ }
+}
+#endif /* NDEBUG */
+
+#ifdef SQLITE_VDBE_COVERAGE
+/*
+** Set the value if the iSrcLine field for the previously coded instruction.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){
+ sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine;
+}
+#endif /* SQLITE_VDBE_COVERAGE */
+
+/*
+** Return the opcode for a given address. If the address is -1, then
+** return the most recently inserted opcode.
+**
+** If a memory allocation error has occurred prior to the calling of this
+** routine, then a pointer to a dummy VdbeOp will be returned. That opcode
+** is readable but not writable, though it is cast to a writable value.
+** The return of a dummy opcode allows the call to continue functioning
+** after an OOM fault without having to check to see if the return from
+** this routine is a valid pointer. But because the dummy.opcode is 0,
+** dummy will never be written to. This is verified by code inspection and
+** by running with Valgrind.
+*/
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
+ /* C89 specifies that the constant "dummy" will be initialized to all
+ ** zeros, which is correct. MSVC generates a warning, nevertheless. */
+ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */
+ assert( p->magic==VDBE_MAGIC_INIT );
+ if( addr<0 ){
+ addr = p->nOp - 1;
+ }
+ assert( (addr>=0 && addr<p->nOp) || p->db->mallocFailed );
+ if( p->db->mallocFailed ){
+ return (VdbeOp*)&dummy;
+ }else{
+ return &p->aOp[addr];
+ }
+}
+
+#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS)
+/*
+** Return an integer value for one of the parameters to the opcode pOp
+** determined by character c.
+*/
+static int translateP(char c, const Op *pOp){
+ if( c=='1' ) return pOp->p1;
+ if( c=='2' ) return pOp->p2;
+ if( c=='3' ) return pOp->p3;
+ if( c=='4' ) return pOp->p4.i;
+ return pOp->p5;
+}
+
+/*
+** Compute a string for the "comment" field of a VDBE opcode listing.
+**
+** The Synopsis: field in comments in the vdbe.c source file gets converted
+** to an extra string that is appended to the sqlite3OpcodeName(). In the
+** absence of other comments, this synopsis becomes the comment on the opcode.
+** Some translation occurs:
+**
+** "PX" -> "r[X]"
+** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1
+** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0
+** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x
+*/
+SQLITE_PRIVATE char *sqlite3VdbeDisplayComment(
+ sqlite3 *db, /* Optional - Oom error reporting only */
+ const Op *pOp, /* The opcode to be commented */
+ const char *zP4 /* Previously obtained value for P4 */
+){
+ const char *zOpName;
+ const char *zSynopsis;
+ int nOpName;
+ int ii;
+ char zAlt[50];
+ StrAccum x;
+
+ sqlite3StrAccumInit(&x, 0, 0, 0, SQLITE_MAX_LENGTH);
+ zOpName = sqlite3OpcodeName(pOp->opcode);
+ nOpName = sqlite3Strlen30(zOpName);
+ if( zOpName[nOpName+1] ){
+ int seenCom = 0;
+ char c;
+ zSynopsis = zOpName += nOpName + 1;
+ if( strncmp(zSynopsis,"IF ",3)==0 ){
+ if( pOp->p5 & SQLITE_STOREP2 ){
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "r[P2] = (%s)", zSynopsis+3);
+ }else{
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3);
+ }
+ zSynopsis = zAlt;
+ }
+ for(ii=0; (c = zSynopsis[ii])!=0; ii++){
+ if( c=='P' ){
+ c = zSynopsis[++ii];
+ if( c=='4' ){
+ sqlite3_str_appendall(&x, zP4);
+ }else if( c=='X' ){
+ sqlite3_str_appendall(&x, pOp->zComment);
+ seenCom = 1;
+ }else{
+ int v1 = translateP(c, pOp);
+ int v2;
+ if( strncmp(zSynopsis+ii+1, "@P", 2)==0 ){
+ ii += 3;
+ v2 = translateP(zSynopsis[ii], pOp);
+ if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){
+ ii += 2;
+ v2++;
+ }
+ if( v2<2 ){
+ sqlite3_str_appendf(&x, "%d", v1);
+ }else{
+ sqlite3_str_appendf(&x, "%d..%d", v1, v1+v2-1);
+ }
+ }else if( strncmp(zSynopsis+ii+1, "@NP", 3)==0 ){
+ sqlite3_context *pCtx = pOp->p4.pCtx;
+ if( pOp->p4type!=P4_FUNCCTX || pCtx->argc==1 ){
+ sqlite3_str_appendf(&x, "%d", v1);
+ }else if( pCtx->argc>1 ){
+ sqlite3_str_appendf(&x, "%d..%d", v1, v1+pCtx->argc-1);
+ }else{
+ assert( x.nChar>2 );
+ x.nChar -= 2;
+ ii++;
+ }
+ ii += 3;
+ }else{
+ sqlite3_str_appendf(&x, "%d", v1);
+ if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){
+ ii += 4;
+ }
+ }
+ }
+ }else{
+ sqlite3_str_appendchar(&x, 1, c);
+ }
+ }
+ if( !seenCom && pOp->zComment ){
+ sqlite3_str_appendf(&x, "; %s", pOp->zComment);
+ }
+ }else if( pOp->zComment ){
+ sqlite3_str_appendall(&x, pOp->zComment);
+ }
+ if( (x.accError & SQLITE_NOMEM)!=0 && db!=0 ){
+ sqlite3OomFault(db);
+ }
+ return sqlite3StrAccumFinish(&x);
+}
+#endif /* SQLITE_ENABLE_EXPLAIN_COMMENTS */
+
+#if VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS)
+/*
+** Translate the P4.pExpr value for an OP_CursorHint opcode into text
+** that can be displayed in the P4 column of EXPLAIN output.
+*/
+static void displayP4Expr(StrAccum *p, Expr *pExpr){
+ const char *zOp = 0;
+ switch( pExpr->op ){
+ case TK_STRING:
+ sqlite3_str_appendf(p, "%Q", pExpr->u.zToken);
+ break;
+ case TK_INTEGER:
+ sqlite3_str_appendf(p, "%d", pExpr->u.iValue);
+ break;
+ case TK_NULL:
+ sqlite3_str_appendf(p, "NULL");
+ break;
+ case TK_REGISTER: {
+ sqlite3_str_appendf(p, "r[%d]", pExpr->iTable);
+ break;
+ }
+ case TK_COLUMN: {
+ if( pExpr->iColumn<0 ){
+ sqlite3_str_appendf(p, "rowid");
+ }else{
+ sqlite3_str_appendf(p, "c%d", (int)pExpr->iColumn);
+ }
+ break;
+ }
+ case TK_LT: zOp = "LT"; break;
+ case TK_LE: zOp = "LE"; break;
+ case TK_GT: zOp = "GT"; break;
+ case TK_GE: zOp = "GE"; break;
+ case TK_NE: zOp = "NE"; break;
+ case TK_EQ: zOp = "EQ"; break;
+ case TK_IS: zOp = "IS"; break;
+ case TK_ISNOT: zOp = "ISNOT"; break;
+ case TK_AND: zOp = "AND"; break;
+ case TK_OR: zOp = "OR"; break;
+ case TK_PLUS: zOp = "ADD"; break;
+ case TK_STAR: zOp = "MUL"; break;
+ case TK_MINUS: zOp = "SUB"; break;
+ case TK_REM: zOp = "REM"; break;
+ case TK_BITAND: zOp = "BITAND"; break;
+ case TK_BITOR: zOp = "BITOR"; break;
+ case TK_SLASH: zOp = "DIV"; break;
+ case TK_LSHIFT: zOp = "LSHIFT"; break;
+ case TK_RSHIFT: zOp = "RSHIFT"; break;
+ case TK_CONCAT: zOp = "CONCAT"; break;
+ case TK_UMINUS: zOp = "MINUS"; break;
+ case TK_UPLUS: zOp = "PLUS"; break;
+ case TK_BITNOT: zOp = "BITNOT"; break;
+ case TK_NOT: zOp = "NOT"; break;
+ case TK_ISNULL: zOp = "ISNULL"; break;
+ case TK_NOTNULL: zOp = "NOTNULL"; break;
+
+ default:
+ sqlite3_str_appendf(p, "%s", "expr");
+ break;
+ }
+
+ if( zOp ){
+ sqlite3_str_appendf(p, "%s(", zOp);
+ displayP4Expr(p, pExpr->pLeft);
+ if( pExpr->pRight ){
+ sqlite3_str_append(p, ",", 1);
+ displayP4Expr(p, pExpr->pRight);
+ }
+ sqlite3_str_append(p, ")", 1);
+ }
+}
+#endif /* VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS) */
+
+
+#if VDBE_DISPLAY_P4
+/*
+** Compute a string that describes the P4 parameter for an opcode.
+** Use zTemp for any required temporary buffer space.
+*/
+SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3 *db, Op *pOp){
+ char *zP4 = 0;
+ StrAccum x;
+
+ sqlite3StrAccumInit(&x, 0, 0, 0, SQLITE_MAX_LENGTH);
+ switch( pOp->p4type ){
+ case P4_KEYINFO: {
+ int j;
+ KeyInfo *pKeyInfo = pOp->p4.pKeyInfo;
+ assert( pKeyInfo->aSortFlags!=0 );
+ sqlite3_str_appendf(&x, "k(%d", pKeyInfo->nKeyField);
+ for(j=0; j<pKeyInfo->nKeyField; j++){
+ CollSeq *pColl = pKeyInfo->aColl[j];
+ const char *zColl = pColl ? pColl->zName : "";
+ if( strcmp(zColl, "BINARY")==0 ) zColl = "B";
+ sqlite3_str_appendf(&x, ",%s%s%s",
+ (pKeyInfo->aSortFlags[j] & KEYINFO_ORDER_DESC) ? "-" : "",
+ (pKeyInfo->aSortFlags[j] & KEYINFO_ORDER_BIGNULL)? "N." : "",
+ zColl);
+ }
+ sqlite3_str_append(&x, ")", 1);
+ break;
+ }
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ case P4_EXPR: {
+ displayP4Expr(&x, pOp->p4.pExpr);
+ break;
+ }
+#endif
+ case P4_COLLSEQ: {
+ static const char *const encnames[] = {"?", "8", "16LE", "16BE"};
+ CollSeq *pColl = pOp->p4.pColl;
+ assert( pColl->enc>=0 && pColl->enc<4 );
+ sqlite3_str_appendf(&x, "%.18s-%s", pColl->zName,
+ encnames[pColl->enc]);
+ break;
+ }
+ case P4_FUNCDEF: {
+ FuncDef *pDef = pOp->p4.pFunc;
+ sqlite3_str_appendf(&x, "%s(%d)", pDef->zName, pDef->nArg);
+ break;
+ }
+ case P4_FUNCCTX: {
+ FuncDef *pDef = pOp->p4.pCtx->pFunc;
+ sqlite3_str_appendf(&x, "%s(%d)", pDef->zName, pDef->nArg);
+ break;
+ }
+ case P4_INT64: {
+ sqlite3_str_appendf(&x, "%lld", *pOp->p4.pI64);
+ break;
+ }
+ case P4_INT32: {
+ sqlite3_str_appendf(&x, "%d", pOp->p4.i);
+ break;
+ }
+ case P4_REAL: {
+ sqlite3_str_appendf(&x, "%.16g", *pOp->p4.pReal);
+ break;
+ }
+ case P4_MEM: {
+ Mem *pMem = pOp->p4.pMem;
+ if( pMem->flags & MEM_Str ){
+ zP4 = pMem->z;
+ }else if( pMem->flags & (MEM_Int|MEM_IntReal) ){
+ sqlite3_str_appendf(&x, "%lld", pMem->u.i);
+ }else if( pMem->flags & MEM_Real ){
+ sqlite3_str_appendf(&x, "%.16g", pMem->u.r);
+ }else if( pMem->flags & MEM_Null ){
+ zP4 = "NULL";
+ }else{
+ assert( pMem->flags & MEM_Blob );
+ zP4 = "(blob)";
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case P4_VTAB: {
+ sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab;
+ sqlite3_str_appendf(&x, "vtab:%p", pVtab);
+ break;
+ }
+#endif
+ case P4_INTARRAY: {
+ u32 i;
+ u32 *ai = pOp->p4.ai;
+ u32 n = ai[0]; /* The first element of an INTARRAY is always the
+ ** count of the number of elements to follow */
+ for(i=1; i<=n; i++){
+ sqlite3_str_appendf(&x, "%c%u", (i==1 ? '[' : ','), ai[i]);
+ }
+ sqlite3_str_append(&x, "]", 1);
+ break;
+ }
+ case P4_SUBPROGRAM: {
+ zP4 = "program";
+ break;
+ }
+ case P4_DYNBLOB:
+ case P4_ADVANCE: {
+ break;
+ }
+ case P4_TABLE: {
+ zP4 = pOp->p4.pTab->zName;
+ break;
+ }
+ default: {
+ zP4 = pOp->p4.z;
+ }
+ }
+ if( zP4 ) sqlite3_str_appendall(&x, zP4);
+ if( (x.accError & SQLITE_NOMEM)!=0 ){
+ sqlite3OomFault(db);
+ }
+ return sqlite3StrAccumFinish(&x);
+}
+#endif /* VDBE_DISPLAY_P4 */
+
+/*
+** Declare to the Vdbe that the BTree object at db->aDb[i] is used.
+**
+** The prepared statements need to know in advance the complete set of
+** attached databases that will be use. A mask of these databases
+** is maintained in p->btreeMask. The p->lockMask value is the subset of
+** p->btreeMask of databases that will require a lock.
+*/
+SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){
+ assert( i>=0 && i<p->db->nDb && i<(int)sizeof(yDbMask)*8 );
+ assert( i<(int)sizeof(p->btreeMask)*8 );
+ DbMaskSet(p->btreeMask, i);
+ if( i!=1 && sqlite3BtreeSharable(p->db->aDb[i].pBt) ){
+ DbMaskSet(p->lockMask, i);
+ }
+}
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE)
+/*
+** If SQLite is compiled to support shared-cache mode and to be threadsafe,
+** this routine obtains the mutex associated with each BtShared structure
+** that may be accessed by the VM passed as an argument. In doing so it also
+** sets the BtShared.db member of each of the BtShared structures, ensuring
+** that the correct busy-handler callback is invoked if required.
+**
+** If SQLite is not threadsafe but does support shared-cache mode, then
+** sqlite3BtreeEnter() is invoked to set the BtShared.db variables
+** of all of BtShared structures accessible via the database handle
+** associated with the VM.
+**
+** If SQLite is not threadsafe and does not support shared-cache mode, this
+** function is a no-op.
+**
+** The p->btreeMask field is a bitmask of all btrees that the prepared
+** statement p will ever use. Let N be the number of bits in p->btreeMask
+** corresponding to btrees that use shared cache. Then the runtime of
+** this routine is N*N. But as N is rarely more than 1, this should not
+** be a problem.
+*/
+SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){
+ int i;
+ sqlite3 *db;
+ Db *aDb;
+ int nDb;
+ if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
+ db = p->db;
+ aDb = db->aDb;
+ nDb = db->nDb;
+ for(i=0; i<nDb; i++){
+ if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ sqlite3BtreeEnter(aDb[i].pBt);
+ }
+ }
+}
+#endif
+
+#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0
+/*
+** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter().
+*/
+static SQLITE_NOINLINE void vdbeLeave(Vdbe *p){
+ int i;
+ sqlite3 *db;
+ Db *aDb;
+ int nDb;
+ db = p->db;
+ aDb = db->aDb;
+ nDb = db->nDb;
+ for(i=0; i<nDb; i++){
+ if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ sqlite3BtreeLeave(aDb[i].pBt);
+ }
+ }
+}
+SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){
+ if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
+ vdbeLeave(p);
+}
+#endif
+
+#if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG)
+/*
+** Print a single opcode. This routine is used for debugging only.
+*/
+SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, VdbeOp *pOp){
+ char *zP4;
+ char *zCom;
+ sqlite3 dummyDb;
+ static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-13s %.2X %s\n";
+ if( pOut==0 ) pOut = stdout;
+ sqlite3BeginBenignMalloc();
+ dummyDb.mallocFailed = 1;
+ zP4 = sqlite3VdbeDisplayP4(&dummyDb, pOp);
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ zCom = sqlite3VdbeDisplayComment(0, pOp, zP4);
+#else
+ zCom = 0;
+#endif
+ /* NB: The sqlite3OpcodeName() function is implemented by code created
+ ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the
+ ** information from the vdbe.c source text */
+ fprintf(pOut, zFormat1, pc,
+ sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3,
+ zP4 ? zP4 : "", pOp->p5,
+ zCom ? zCom : ""
+ );
+ fflush(pOut);
+ sqlite3_free(zP4);
+ sqlite3_free(zCom);
+ sqlite3EndBenignMalloc();
+}
+#endif
+
+/*
+** Initialize an array of N Mem element.
+*/
+static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){
+ while( (N--)>0 ){
+ p->db = db;
+ p->flags = flags;
+ p->szMalloc = 0;
+#ifdef SQLITE_DEBUG
+ p->pScopyFrom = 0;
+#endif
+ p++;
+ }
+}
+
+/*
+** Release an array of N Mem elements
+*/
+static void releaseMemArray(Mem *p, int N){
+ if( p && N ){
+ Mem *pEnd = &p[N];
+ sqlite3 *db = p->db;
+ if( db->pnBytesFreed ){
+ do{
+ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
+ }while( (++p)<pEnd );
+ return;
+ }
+ do{
+ assert( (&p[1])==pEnd || p[0].db==p[1].db );
+ assert( sqlite3VdbeCheckMemInvariants(p) );
+
+ /* This block is really an inlined version of sqlite3VdbeMemRelease()
+ ** that takes advantage of the fact that the memory cell value is
+ ** being set to NULL after releasing any dynamic resources.
+ **
+ ** The justification for duplicating code is that according to
+ ** callgrind, this causes a certain test case to hit the CPU 4.7
+ ** percent less (x86 linux, gcc version 4.1.2, -O6) than if
+ ** sqlite3MemRelease() were called from here. With -O2, this jumps
+ ** to 6.6 percent. The test case is inserting 1000 rows into a table
+ ** with no indexes using a single prepared INSERT statement, bind()
+ ** and reset(). Inserts are grouped into a transaction.
+ */
+ testcase( p->flags & MEM_Agg );
+ testcase( p->flags & MEM_Dyn );
+ testcase( p->xDel==sqlite3VdbeFrameMemDel );
+ if( p->flags&(MEM_Agg|MEM_Dyn) ){
+ sqlite3VdbeMemRelease(p);
+ }else if( p->szMalloc ){
+ sqlite3DbFreeNN(db, p->zMalloc);
+ p->szMalloc = 0;
+ }
+
+ p->flags = MEM_Undefined;
+ }while( (++p)<pEnd );
+ }
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Verify that pFrame is a valid VdbeFrame pointer. Return true if it is
+** and false if something is wrong.
+**
+** This routine is intended for use inside of assert() statements only.
+*/
+SQLITE_PRIVATE int sqlite3VdbeFrameIsValid(VdbeFrame *pFrame){
+ if( pFrame->iFrameMagic!=SQLITE_FRAME_MAGIC ) return 0;
+ return 1;
+}
+#endif
+
+
+/*
+** This is a destructor on a Mem object (which is really an sqlite3_value)
+** that deletes the Frame object that is attached to it as a blob.
+**
+** This routine does not delete the Frame right away. It merely adds the
+** frame to a list of frames to be deleted when the Vdbe halts.
+*/
+SQLITE_PRIVATE void sqlite3VdbeFrameMemDel(void *pArg){
+ VdbeFrame *pFrame = (VdbeFrame*)pArg;
+ assert( sqlite3VdbeFrameIsValid(pFrame) );
+ pFrame->pParent = pFrame->v->pDelFrame;
+ pFrame->v->pDelFrame = pFrame;
+}
+
+#if defined(SQLITE_ENABLE_BYTECODE_VTAB) || !defined(SQLITE_OMIT_EXPLAIN)
+/*
+** Locate the next opcode to be displayed in EXPLAIN or EXPLAIN
+** QUERY PLAN output.
+**
+** Return SQLITE_ROW on success. Return SQLITE_DONE if there are no
+** more opcodes to be displayed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeNextOpcode(
+ Vdbe *p, /* The statement being explained */
+ Mem *pSub, /* Storage for keeping track of subprogram nesting */
+ int eMode, /* 0: normal. 1: EQP. 2: TablesUsed */
+ int *piPc, /* IN/OUT: Current rowid. Overwritten with next rowid */
+ int *piAddr, /* OUT: Write index into (*paOp)[] here */
+ Op **paOp /* OUT: Write the opcode array here */
+){
+ int nRow; /* Stop when row count reaches this */
+ int nSub = 0; /* Number of sub-vdbes seen so far */
+ SubProgram **apSub = 0; /* Array of sub-vdbes */
+ int i; /* Next instruction address */
+ int rc = SQLITE_OK; /* Result code */
+ Op *aOp = 0; /* Opcode array */
+ int iPc; /* Rowid. Copy of value in *piPc */
+
+ /* When the number of output rows reaches nRow, that means the
+ ** listing has finished and sqlite3_step() should return SQLITE_DONE.
+ ** nRow is the sum of the number of rows in the main program, plus
+ ** the sum of the number of rows in all trigger subprograms encountered
+ ** so far. The nRow value will increase as new trigger subprograms are
+ ** encountered, but p->pc will eventually catch up to nRow.
+ */
+ nRow = p->nOp;
+ if( pSub!=0 ){
+ if( pSub->flags&MEM_Blob ){
+ /* pSub is initiallly NULL. It is initialized to a BLOB by
+ ** the P4_SUBPROGRAM processing logic below */
+ nSub = pSub->n/sizeof(Vdbe*);
+ apSub = (SubProgram **)pSub->z;
+ }
+ for(i=0; i<nSub; i++){
+ nRow += apSub[i]->nOp;
+ }
+ }
+ iPc = *piPc;
+ while(1){ /* Loop exits via break */
+ i = iPc++;
+ if( i>=nRow ){
+ p->rc = SQLITE_OK;
+ rc = SQLITE_DONE;
+ break;
+ }
+ if( i<p->nOp ){
+ /* The rowid is small enough that we are still in the
+ ** main program. */
+ aOp = p->aOp;
+ }else{
+ /* We are currently listing subprograms. Figure out which one and
+ ** pick up the appropriate opcode. */
+ int j;
+ i -= p->nOp;
+ assert( apSub!=0 );
+ assert( nSub>0 );
+ for(j=0; i>=apSub[j]->nOp; j++){
+ i -= apSub[j]->nOp;
+ assert( i<apSub[j]->nOp || j+1<nSub );
+ }
+ aOp = apSub[j]->aOp;
+ }
+
+ /* When an OP_Program opcode is encounter (the only opcode that has
+ ** a P4_SUBPROGRAM argument), expand the size of the array of subprograms
+ ** kept in p->aMem[9].z to hold the new program - assuming this subprogram
+ ** has not already been seen.
+ */
+ if( pSub!=0 && aOp[i].p4type==P4_SUBPROGRAM ){
+ int nByte = (nSub+1)*sizeof(SubProgram*);
+ int j;
+ for(j=0; j<nSub; j++){
+ if( apSub[j]==aOp[i].p4.pProgram ) break;
+ }
+ if( j==nSub ){
+ p->rc = sqlite3VdbeMemGrow(pSub, nByte, nSub!=0);
+ if( p->rc!=SQLITE_OK ){
+ rc = SQLITE_ERROR;
+ break;
+ }
+ apSub = (SubProgram **)pSub->z;
+ apSub[nSub++] = aOp[i].p4.pProgram;
+ MemSetTypeFlag(pSub, MEM_Blob);
+ pSub->n = nSub*sizeof(SubProgram*);
+ nRow += aOp[i].p4.pProgram->nOp;
+ }
+ }
+ if( eMode==0 ) break;
+#ifdef SQLITE_ENABLE_BYTECODE_VTAB
+ if( eMode==2 ){
+ Op *pOp = aOp + i;
+ if( pOp->opcode==OP_OpenRead ) break;
+ if( pOp->opcode==OP_OpenWrite && (pOp->p5 & OPFLAG_P2ISREG)==0 ) break;
+ if( pOp->opcode==OP_ReopenIdx ) break;
+ }else
+#endif
+ {
+ assert( eMode==1 );
+ if( aOp[i].opcode==OP_Explain ) break;
+ if( aOp[i].opcode==OP_Init && iPc>1 ) break;
+ }
+ }
+ *piPc = iPc;
+ *piAddr = i;
+ *paOp = aOp;
+ return rc;
+}
+#endif /* SQLITE_ENABLE_BYTECODE_VTAB || !SQLITE_OMIT_EXPLAIN */
+
+
+/*
+** Delete a VdbeFrame object and its contents. VdbeFrame objects are
+** allocated by the OP_Program opcode in sqlite3VdbeExec().
+*/
+SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame *p){
+ int i;
+ Mem *aMem = VdbeFrameMem(p);
+ VdbeCursor **apCsr = (VdbeCursor **)&aMem[p->nChildMem];
+ assert( sqlite3VdbeFrameIsValid(p) );
+ for(i=0; i<p->nChildCsr; i++){
+ sqlite3VdbeFreeCursor(p->v, apCsr[i]);
+ }
+ releaseMemArray(aMem, p->nChildMem);
+ sqlite3VdbeDeleteAuxData(p->v->db, &p->pAuxData, -1, 0);
+ sqlite3DbFree(p->v->db, p);
+}
+
+#ifndef SQLITE_OMIT_EXPLAIN
+/*
+** Give a listing of the program in the virtual machine.
+**
+** The interface is the same as sqlite3VdbeExec(). But instead of
+** running the code, it invokes the callback once for each instruction.
+** This feature is used to implement "EXPLAIN".
+**
+** When p->explain==1, each instruction is listed. When
+** p->explain==2, only OP_Explain instructions are listed and these
+** are shown in a different format. p->explain==2 is used to implement
+** EXPLAIN QUERY PLAN.
+** 2018-04-24: In p->explain==2 mode, the OP_Init opcodes of triggers
+** are also shown, so that the boundaries between the main program and
+** each trigger are clear.
+**
+** When p->explain==1, first the main program is listed, then each of
+** the trigger subprograms are listed one by one.
+*/
+SQLITE_PRIVATE int sqlite3VdbeList(
+ Vdbe *p /* The VDBE */
+){
+ Mem *pSub = 0; /* Memory cell hold array of subprogs */
+ sqlite3 *db = p->db; /* The database connection */
+ int i; /* Loop counter */
+ int rc = SQLITE_OK; /* Return code */
+ Mem *pMem = &p->aMem[1]; /* First Mem of result set */
+ int bListSubprogs = (p->explain==1 || (db->flags & SQLITE_TriggerEQP)!=0);
+ Op *aOp; /* Array of opcodes */
+ Op *pOp; /* Current opcode */
+
+ assert( p->explain );
+ assert( p->magic==VDBE_MAGIC_RUN );
+ assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM );
+
+ /* Even though this opcode does not use dynamic strings for
+ ** the result, result columns may become dynamic if the user calls
+ ** sqlite3_column_text16(), causing a translation to UTF-16 encoding.
+ */
+ releaseMemArray(pMem, 8);
+ p->pResultSet = 0;
+
+ if( p->rc==SQLITE_NOMEM ){
+ /* This happens if a malloc() inside a call to sqlite3_column_text() or
+ ** sqlite3_column_text16() failed. */
+ sqlite3OomFault(db);
+ return SQLITE_ERROR;
+ }
+
+ if( bListSubprogs ){
+ /* The first 8 memory cells are used for the result set. So we will
+ ** commandeer the 9th cell to use as storage for an array of pointers
+ ** to trigger subprograms. The VDBE is guaranteed to have at least 9
+ ** cells. */
+ assert( p->nMem>9 );
+ pSub = &p->aMem[9];
+ }else{
+ pSub = 0;
+ }
+
+ /* Figure out which opcode is next to display */
+ rc = sqlite3VdbeNextOpcode(p, pSub, p->explain==2, &p->pc, &i, &aOp);
+
+ if( rc==SQLITE_OK ){
+ pOp = aOp + i;
+ if( AtomicLoad(&db->u1.isInterrupted) ){
+ p->rc = SQLITE_INTERRUPT;
+ rc = SQLITE_ERROR;
+ sqlite3VdbeError(p, sqlite3ErrStr(p->rc));
+ }else{
+ char *zP4 = sqlite3VdbeDisplayP4(db, pOp);
+ if( p->explain==2 ){
+ sqlite3VdbeMemSetInt64(pMem, pOp->p1);
+ sqlite3VdbeMemSetInt64(pMem+1, pOp->p2);
+ sqlite3VdbeMemSetInt64(pMem+2, pOp->p3);
+ sqlite3VdbeMemSetStr(pMem+3, zP4, -1, SQLITE_UTF8, sqlite3_free);
+ p->nResColumn = 4;
+ }else{
+ sqlite3VdbeMemSetInt64(pMem+0, i);
+ sqlite3VdbeMemSetStr(pMem+1, (char*)sqlite3OpcodeName(pOp->opcode),
+ -1, SQLITE_UTF8, SQLITE_STATIC);
+ sqlite3VdbeMemSetInt64(pMem+2, pOp->p1);
+ sqlite3VdbeMemSetInt64(pMem+3, pOp->p2);
+ sqlite3VdbeMemSetInt64(pMem+4, pOp->p3);
+ /* pMem+5 for p4 is done last */
+ sqlite3VdbeMemSetInt64(pMem+6, pOp->p5);
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ {
+ char *zCom = sqlite3VdbeDisplayComment(db, pOp, zP4);
+ sqlite3VdbeMemSetStr(pMem+7, zCom, -1, SQLITE_UTF8, sqlite3_free);
+ }
+#else
+ sqlite3VdbeMemSetNull(pMem+7);
+#endif
+ sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free);
+ p->nResColumn = 8;
+ }
+ p->pResultSet = pMem;
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM;
+ rc = SQLITE_ERROR;
+ }else{
+ p->rc = SQLITE_OK;
+ rc = SQLITE_ROW;
+ }
+ }
+ }
+ return rc;
+}
+#endif /* SQLITE_OMIT_EXPLAIN */
+
+#ifdef SQLITE_DEBUG
+/*
+** Print the SQL that was used to generate a VDBE program.
+*/
+SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){
+ const char *z = 0;
+ if( p->zSql ){
+ z = p->zSql;
+ }else if( p->nOp>=1 ){
+ const VdbeOp *pOp = &p->aOp[0];
+ if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ z = pOp->p4.z;
+ while( sqlite3Isspace(*z) ) z++;
+ }
+ }
+ if( z ) printf("SQL: [%s]\n", z);
+}
+#endif
+
+#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE)
+/*
+** Print an IOTRACE message showing SQL content.
+*/
+SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){
+ int nOp = p->nOp;
+ VdbeOp *pOp;
+ if( sqlite3IoTrace==0 ) return;
+ if( nOp<1 ) return;
+ pOp = &p->aOp[0];
+ if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ int i, j;
+ char z[1000];
+ sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z);
+ for(i=0; sqlite3Isspace(z[i]); i++){}
+ for(j=0; z[i]; i++){
+ if( sqlite3Isspace(z[i]) ){
+ if( z[i-1]!=' ' ){
+ z[j++] = ' ';
+ }
+ }else{
+ z[j++] = z[i];
+ }
+ }
+ z[j] = 0;
+ sqlite3IoTrace("SQL %s\n", z);
+ }
+}
+#endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */
+
+/* An instance of this object describes bulk memory available for use
+** by subcomponents of a prepared statement. Space is allocated out
+** of a ReusableSpace object by the allocSpace() routine below.
+*/
+struct ReusableSpace {
+ u8 *pSpace; /* Available memory */
+ sqlite3_int64 nFree; /* Bytes of available memory */
+ sqlite3_int64 nNeeded; /* Total bytes that could not be allocated */
+};
+
+/* Try to allocate nByte bytes of 8-byte aligned bulk memory for pBuf
+** from the ReusableSpace object. Return a pointer to the allocated
+** memory on success. If insufficient memory is available in the
+** ReusableSpace object, increase the ReusableSpace.nNeeded
+** value by the amount needed and return NULL.
+**
+** If pBuf is not initially NULL, that means that the memory has already
+** been allocated by a prior call to this routine, so just return a copy
+** of pBuf and leave ReusableSpace unchanged.
+**
+** This allocator is employed to repurpose unused slots at the end of the
+** opcode array of prepared state for other memory needs of the prepared
+** statement.
+*/
+static void *allocSpace(
+ struct ReusableSpace *p, /* Bulk memory available for allocation */
+ void *pBuf, /* Pointer to a prior allocation */
+ sqlite3_int64 nByte /* Bytes of memory needed */
+){
+ assert( EIGHT_BYTE_ALIGNMENT(p->pSpace) );
+ if( pBuf==0 ){
+ nByte = ROUND8(nByte);
+ if( nByte <= p->nFree ){
+ p->nFree -= nByte;
+ pBuf = &p->pSpace[p->nFree];
+ }else{
+ p->nNeeded += nByte;
+ }
+ }
+ assert( EIGHT_BYTE_ALIGNMENT(pBuf) );
+ return pBuf;
+}
+
+/*
+** Rewind the VDBE back to the beginning in preparation for
+** running it.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ int i;
+#endif
+ assert( p!=0 );
+ assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET );
+
+ /* There should be at least one opcode.
+ */
+ assert( p->nOp>0 );
+
+ /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */
+ p->magic = VDBE_MAGIC_RUN;
+
+#ifdef SQLITE_DEBUG
+ for(i=0; i<p->nMem; i++){
+ assert( p->aMem[i].db==p->db );
+ }
+#endif
+ p->pc = -1;
+ p->rc = SQLITE_OK;
+ p->errorAction = OE_Abort;
+ p->nChange = 0;
+ p->cacheCtr = 1;
+ p->minWriteFileFormat = 255;
+ p->iStatement = 0;
+ p->nFkConstraint = 0;
+#ifdef VDBE_PROFILE
+ for(i=0; i<p->nOp; i++){
+ p->aOp[i].cnt = 0;
+ p->aOp[i].cycles = 0;
+ }
+#endif
+}
+
+/*
+** Prepare a virtual machine for execution for the first time after
+** creating the virtual machine. This involves things such
+** as allocating registers and initializing the program counter.
+** After the VDBE has be prepped, it can be executed by one or more
+** calls to sqlite3VdbeExec().
+**
+** This function may be called exactly once on each virtual machine.
+** After this routine is called the VM has been "packaged" and is ready
+** to run. After this routine is called, further calls to
+** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects
+** the Vdbe from the Parse object that helped generate it so that the
+** the Vdbe becomes an independent entity and the Parse object can be
+** destroyed.
+**
+** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back
+** to its initial state after it has been run.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMakeReady(
+ Vdbe *p, /* The VDBE */
+ Parse *pParse /* Parsing context */
+){
+ sqlite3 *db; /* The database connection */
+ int nVar; /* Number of parameters */
+ int nMem; /* Number of VM memory registers */
+ int nCursor; /* Number of cursors required */
+ int nArg; /* Number of arguments in subprograms */
+ int n; /* Loop counter */
+ struct ReusableSpace x; /* Reusable bulk memory */
+
+ assert( p!=0 );
+ assert( p->nOp>0 );
+ assert( pParse!=0 );
+ assert( p->magic==VDBE_MAGIC_INIT );
+ assert( pParse==p->pParse );
+ db = p->db;
+ assert( db->mallocFailed==0 );
+ nVar = pParse->nVar;
+ nMem = pParse->nMem;
+ nCursor = pParse->nTab;
+ nArg = pParse->nMaxArg;
+
+ /* Each cursor uses a memory cell. The first cursor (cursor 0) can
+ ** use aMem[0] which is not otherwise used by the VDBE program. Allocate
+ ** space at the end of aMem[] for cursors 1 and greater.
+ ** See also: allocateCursor().
+ */
+ nMem += nCursor;
+ if( nCursor==0 && nMem>0 ) nMem++; /* Space for aMem[0] even if not used */
+
+ /* Figure out how much reusable memory is available at the end of the
+ ** opcode array. This extra memory will be reallocated for other elements
+ ** of the prepared statement.
+ */
+ n = ROUND8(sizeof(Op)*p->nOp); /* Bytes of opcode memory used */
+ x.pSpace = &((u8*)p->aOp)[n]; /* Unused opcode memory */
+ assert( EIGHT_BYTE_ALIGNMENT(x.pSpace) );
+ x.nFree = ROUNDDOWN8(pParse->szOpAlloc - n); /* Bytes of unused memory */
+ assert( x.nFree>=0 );
+ assert( EIGHT_BYTE_ALIGNMENT(&x.pSpace[x.nFree]) );
+
+ resolveP2Values(p, &nArg);
+ p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
+ if( pParse->explain ){
+ static const char * const azColName[] = {
+ "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment",
+ "id", "parent", "notused", "detail"
+ };
+ int iFirst, mx, i;
+ if( nMem<10 ) nMem = 10;
+ p->explain = pParse->explain;
+ if( pParse->explain==2 ){
+ sqlite3VdbeSetNumCols(p, 4);
+ iFirst = 8;
+ mx = 12;
+ }else{
+ sqlite3VdbeSetNumCols(p, 8);
+ iFirst = 0;
+ mx = 8;
+ }
+ for(i=iFirst; i<mx; i++){
+ sqlite3VdbeSetColName(p, i-iFirst, COLNAME_NAME,
+ azColName[i], SQLITE_STATIC);
+ }
+ }
+ p->expired = 0;
+
+ /* Memory for registers, parameters, cursor, etc, is allocated in one or two
+ ** passes. On the first pass, we try to reuse unused memory at the
+ ** end of the opcode array. If we are unable to satisfy all memory
+ ** requirements by reusing the opcode array tail, then the second
+ ** pass will fill in the remainder using a fresh memory allocation.
+ **
+ ** This two-pass approach that reuses as much memory as possible from
+ ** the leftover memory at the end of the opcode array. This can significantly
+ ** reduce the amount of memory held by a prepared statement.
+ */
+ x.nNeeded = 0;
+ p->aMem = allocSpace(&x, 0, nMem*sizeof(Mem));
+ p->aVar = allocSpace(&x, 0, nVar*sizeof(Mem));
+ p->apArg = allocSpace(&x, 0, nArg*sizeof(Mem*));
+ p->apCsr = allocSpace(&x, 0, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ p->anExec = allocSpace(&x, 0, p->nOp*sizeof(i64));
+#endif
+ if( x.nNeeded ){
+ x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded);
+ x.nFree = x.nNeeded;
+ if( !db->mallocFailed ){
+ p->aMem = allocSpace(&x, p->aMem, nMem*sizeof(Mem));
+ p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem));
+ p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*));
+ p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64));
+#endif
+ }
+ }
+
+ p->pVList = pParse->pVList;
+ pParse->pVList = 0;
+ if( db->mallocFailed ){
+ p->nVar = 0;
+ p->nCursor = 0;
+ p->nMem = 0;
+ }else{
+ p->nCursor = nCursor;
+ p->nVar = (ynVar)nVar;
+ initMemArray(p->aVar, nVar, db, MEM_Null);
+ p->nMem = nMem;
+ initMemArray(p->aMem, nMem, db, MEM_Undefined);
+ memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ memset(p->anExec, 0, p->nOp*sizeof(i64));
+#endif
+ }
+ sqlite3VdbeRewind(p);
+}
+
+/*
+** Close a VDBE cursor and release all the resources that cursor
+** happens to hold.
+*/
+SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
+ if( pCx==0 ){
+ return;
+ }
+ assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE );
+ switch( pCx->eCurType ){
+ case CURTYPE_SORTER: {
+ sqlite3VdbeSorterClose(p->db, pCx);
+ break;
+ }
+ case CURTYPE_BTREE: {
+ if( pCx->isEphemeral ){
+ if( pCx->pBtx ) sqlite3BtreeClose(pCx->pBtx);
+ /* The pCx->pCursor will be close automatically, if it exists, by
+ ** the call above. */
+ }else{
+ assert( pCx->uc.pCursor!=0 );
+ sqlite3BtreeCloseCursor(pCx->uc.pCursor);
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case CURTYPE_VTAB: {
+ sqlite3_vtab_cursor *pVCur = pCx->uc.pVCur;
+ const sqlite3_module *pModule = pVCur->pVtab->pModule;
+ assert( pVCur->pVtab->nRef>0 );
+ pVCur->pVtab->nRef--;
+ pModule->xClose(pVCur);
+ break;
+ }
+#endif
+ }
+}
+
+/*
+** Close all cursors in the current frame.
+*/
+static void closeCursorsInFrame(Vdbe *p){
+ if( p->apCsr ){
+ int i;
+ for(i=0; i<p->nCursor; i++){
+ VdbeCursor *pC = p->apCsr[i];
+ if( pC ){
+ sqlite3VdbeFreeCursor(p, pC);
+ p->apCsr[i] = 0;
+ }
+ }
+ }
+}
+
+/*
+** Copy the values stored in the VdbeFrame structure to its Vdbe. This
+** is used, for example, when a trigger sub-program is halted to restore
+** control to the main program.
+*/
+SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
+ Vdbe *v = pFrame->v;
+ closeCursorsInFrame(v);
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ v->anExec = pFrame->anExec;
+#endif
+ v->aOp = pFrame->aOp;
+ v->nOp = pFrame->nOp;
+ v->aMem = pFrame->aMem;
+ v->nMem = pFrame->nMem;
+ v->apCsr = pFrame->apCsr;
+ v->nCursor = pFrame->nCursor;
+ v->db->lastRowid = pFrame->lastRowid;
+ v->nChange = pFrame->nChange;
+ v->db->nChange = pFrame->nDbChange;
+ sqlite3VdbeDeleteAuxData(v->db, &v->pAuxData, -1, 0);
+ v->pAuxData = pFrame->pAuxData;
+ pFrame->pAuxData = 0;
+ return pFrame->pc;
+}
+
+/*
+** Close all cursors.
+**
+** Also release any dynamic memory held by the VM in the Vdbe.aMem memory
+** cell array. This is necessary as the memory cell array may contain
+** pointers to VdbeFrame objects, which may in turn contain pointers to
+** open cursors.
+*/
+static void closeAllCursors(Vdbe *p){
+ if( p->pFrame ){
+ VdbeFrame *pFrame;
+ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
+ sqlite3VdbeFrameRestore(pFrame);
+ p->pFrame = 0;
+ p->nFrame = 0;
+ }
+ assert( p->nFrame==0 );
+ closeCursorsInFrame(p);
+ if( p->aMem ){
+ releaseMemArray(p->aMem, p->nMem);
+ }
+ while( p->pDelFrame ){
+ VdbeFrame *pDel = p->pDelFrame;
+ p->pDelFrame = pDel->pParent;
+ sqlite3VdbeFrameDelete(pDel);
+ }
+
+ /* Delete any auxdata allocations made by the VM */
+ if( p->pAuxData ) sqlite3VdbeDeleteAuxData(p->db, &p->pAuxData, -1, 0);
+ assert( p->pAuxData==0 );
+}
+
+/*
+** Set the number of result columns that will be returned by this SQL
+** statement. This is now set at compile time, rather than during
+** execution of the vdbe program so that sqlite3_column_count() can
+** be called on an SQL statement before sqlite3_step().
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){
+ int n;
+ sqlite3 *db = p->db;
+
+ if( p->nResColumn ){
+ releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ sqlite3DbFree(db, p->aColName);
+ }
+ n = nResColumn*COLNAME_N;
+ p->nResColumn = (u16)nResColumn;
+ p->aColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n );
+ if( p->aColName==0 ) return;
+ initMemArray(p->aColName, n, db, MEM_Null);
+}
+
+/*
+** Set the name of the idx'th column to be returned by the SQL statement.
+** zName must be a pointer to a nul terminated string.
+**
+** This call must be made after a call to sqlite3VdbeSetNumCols().
+**
+** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC
+** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed
+** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSetColName(
+ Vdbe *p, /* Vdbe being configured */
+ int idx, /* Index of column zName applies to */
+ int var, /* One of the COLNAME_* constants */
+ const char *zName, /* Pointer to buffer containing name */
+ void (*xDel)(void*) /* Memory management strategy for zName */
+){
+ int rc;
+ Mem *pColName;
+ assert( idx<p->nResColumn );
+ assert( var<COLNAME_N );
+ if( p->db->mallocFailed ){
+ assert( !zName || xDel!=SQLITE_DYNAMIC );
+ return SQLITE_NOMEM_BKPT;
+ }
+ assert( p->aColName!=0 );
+ pColName = &(p->aColName[idx+var*p->nResColumn]);
+ rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel);
+ assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 );
+ return rc;
+}
+
+/*
+** A read or write transaction may or may not be active on database handle
+** db. If a transaction is active, commit it. If there is a
+** write-transaction spanning more than one database file, this routine
+** takes care of the super-journal trickery.
+*/
+static int vdbeCommit(sqlite3 *db, Vdbe *p){
+ int i;
+ int nTrans = 0; /* Number of databases with an active write-transaction
+ ** that are candidates for a two-phase commit using a
+ ** super-journal */
+ int rc = SQLITE_OK;
+ int needXcommit = 0;
+
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+ /* With this option, sqlite3VtabSync() is defined to be simply
+ ** SQLITE_OK so p is not used.
+ */
+ UNUSED_PARAMETER(p);
+#endif
+
+ /* Before doing anything else, call the xSync() callback for any
+ ** virtual module tables written in this transaction. This has to
+ ** be done before determining whether a super-journal file is
+ ** required, as an xSync() callback may add an attached database
+ ** to the transaction.
+ */
+ rc = sqlite3VtabSync(db, p);
+
+ /* This loop determines (a) if the commit hook should be invoked and
+ ** (b) how many database files have open write transactions, not
+ ** including the temp database. (b) is important because if more than
+ ** one database file has an open write transaction, a super-journal
+ ** file is required for an atomic commit.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( sqlite3BtreeIsInTrans(pBt) ){
+ /* Whether or not a database might need a super-journal depends upon
+ ** its journal mode (among other things). This matrix determines which
+ ** journal modes use a super-journal and which do not */
+ static const u8 aMJNeeded[] = {
+ /* DELETE */ 1,
+ /* PERSIST */ 1,
+ /* OFF */ 0,
+ /* TRUNCATE */ 1,
+ /* MEMORY */ 0,
+ /* WAL */ 0
+ };
+ Pager *pPager; /* Pager associated with pBt */
+ needXcommit = 1;
+ sqlite3BtreeEnter(pBt);
+ pPager = sqlite3BtreePager(pBt);
+ if( db->aDb[i].safety_level!=PAGER_SYNCHRONOUS_OFF
+ && aMJNeeded[sqlite3PagerGetJournalMode(pPager)]
+ && sqlite3PagerIsMemdb(pPager)==0
+ ){
+ assert( i!=1 );
+ nTrans++;
+ }
+ rc = sqlite3PagerExclusiveLock(pPager);
+ sqlite3BtreeLeave(pBt);
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* If there are any write-transactions at all, invoke the commit hook */
+ if( needXcommit && db->xCommitCallback ){
+ rc = db->xCommitCallback(db->pCommitArg);
+ if( rc ){
+ return SQLITE_CONSTRAINT_COMMITHOOK;
+ }
+ }
+
+ /* The simple case - no more than one database file (not counting the
+ ** TEMP database) has a transaction active. There is no need for the
+ ** super-journal.
+ **
+ ** If the return value of sqlite3BtreeGetFilename() is a zero length
+ ** string, it means the main database is :memory: or a temp file. In
+ ** that case we do not support atomic multi-file commits, so use the
+ ** simple case then too.
+ */
+ if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt))
+ || nTrans<=1
+ ){
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseOne(pBt, 0);
+ }
+ }
+
+ /* Do the commit only if all databases successfully complete phase 1.
+ ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an
+ ** IO error while deleting or truncating a journal file. It is unlikely,
+ ** but could happen. In this case abandon processing and return the error.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseTwo(pBt, 0);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3VtabCommit(db);
+ }
+ }
+
+ /* The complex case - There is a multi-file write-transaction active.
+ ** This requires a super-journal file to ensure the transaction is
+ ** committed atomically.
+ */
+#ifndef SQLITE_OMIT_DISKIO
+ else{
+ sqlite3_vfs *pVfs = db->pVfs;
+ char *zSuper = 0; /* File-name for the super-journal */
+ char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt);
+ sqlite3_file *pSuperJrnl = 0;
+ i64 offset = 0;
+ int res;
+ int retryCount = 0;
+ int nMainFile;
+
+ /* Select a super-journal file name */
+ nMainFile = sqlite3Strlen30(zMainFile);
+ zSuper = sqlite3MPrintf(db, "%.4c%s%.16c", 0,zMainFile,0);
+ if( zSuper==0 ) return SQLITE_NOMEM_BKPT;
+ zSuper += 4;
+ do {
+ u32 iRandom;
+ if( retryCount ){
+ if( retryCount>100 ){
+ sqlite3_log(SQLITE_FULL, "MJ delete: %s", zSuper);
+ sqlite3OsDelete(pVfs, zSuper, 0);
+ break;
+ }else if( retryCount==1 ){
+ sqlite3_log(SQLITE_FULL, "MJ collide: %s", zSuper);
+ }
+ }
+ retryCount++;
+ sqlite3_randomness(sizeof(iRandom), &iRandom);
+ sqlite3_snprintf(13, &zSuper[nMainFile], "-mj%06X9%02X",
+ (iRandom>>8)&0xffffff, iRandom&0xff);
+ /* The antipenultimate character of the super-journal name must
+ ** be "9" to avoid name collisions when using 8+3 filenames. */
+ assert( zSuper[sqlite3Strlen30(zSuper)-3]=='9' );
+ sqlite3FileSuffix3(zMainFile, zSuper);
+ rc = sqlite3OsAccess(pVfs, zSuper, SQLITE_ACCESS_EXISTS, &res);
+ }while( rc==SQLITE_OK && res );
+ if( rc==SQLITE_OK ){
+ /* Open the super-journal. */
+ rc = sqlite3OsOpenMalloc(pVfs, zSuper, &pSuperJrnl,
+ SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|
+ SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_SUPER_JOURNAL, 0
+ );
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, zSuper-4);
+ return rc;
+ }
+
+ /* Write the name of each database file in the transaction into the new
+ ** super-journal file. If an error occurs at this point close
+ ** and delete the super-journal file. All the individual journal files
+ ** still have 'null' as the super-journal pointer, so they will roll
+ ** back independently if a failure occurs.
+ */
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( sqlite3BtreeIsInTrans(pBt) ){
+ char const *zFile = sqlite3BtreeGetJournalname(pBt);
+ if( zFile==0 ){
+ continue; /* Ignore TEMP and :memory: databases */
+ }
+ assert( zFile[0]!=0 );
+ rc = sqlite3OsWrite(pSuperJrnl, zFile, sqlite3Strlen30(zFile)+1,offset);
+ offset += sqlite3Strlen30(zFile)+1;
+ if( rc!=SQLITE_OK ){
+ sqlite3OsCloseFree(pSuperJrnl);
+ sqlite3OsDelete(pVfs, zSuper, 0);
+ sqlite3DbFree(db, zSuper-4);
+ return rc;
+ }
+ }
+ }
+
+ /* Sync the super-journal file. If the IOCAP_SEQUENTIAL device
+ ** flag is set this is not required.
+ */
+ if( 0==(sqlite3OsDeviceCharacteristics(pSuperJrnl)&SQLITE_IOCAP_SEQUENTIAL)
+ && SQLITE_OK!=(rc = sqlite3OsSync(pSuperJrnl, SQLITE_SYNC_NORMAL))
+ ){
+ sqlite3OsCloseFree(pSuperJrnl);
+ sqlite3OsDelete(pVfs, zSuper, 0);
+ sqlite3DbFree(db, zSuper-4);
+ return rc;
+ }
+
+ /* Sync all the db files involved in the transaction. The same call
+ ** sets the super-journal pointer in each individual journal. If
+ ** an error occurs here, do not delete the super-journal file.
+ **
+ ** If the error occurs during the first call to
+ ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the
+ ** super-journal file will be orphaned. But we cannot delete it,
+ ** in case the super-journal file name was written into the journal
+ ** file before the failure occurred.
+ */
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ rc = sqlite3BtreeCommitPhaseOne(pBt, zSuper);
+ }
+ }
+ sqlite3OsCloseFree(pSuperJrnl);
+ assert( rc!=SQLITE_BUSY );
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, zSuper-4);
+ return rc;
+ }
+
+ /* Delete the super-journal file. This commits the transaction. After
+ ** doing this the directory is synced again before any individual
+ ** transaction files are deleted.
+ */
+ rc = sqlite3OsDelete(pVfs, zSuper, 1);
+ sqlite3DbFree(db, zSuper-4);
+ zSuper = 0;
+ if( rc ){
+ return rc;
+ }
+
+ /* All files and directories have already been synced, so the following
+ ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and
+ ** deleting or truncating journals. If something goes wrong while
+ ** this is happening we don't really care. The integrity of the
+ ** transaction is already guaranteed, but some stray 'cold' journals
+ ** may be lying around. Returning an error code won't help matters.
+ */
+ disable_simulated_io_errors();
+ sqlite3BeginBenignMalloc();
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ sqlite3BtreeCommitPhaseTwo(pBt, 1);
+ }
+ }
+ sqlite3EndBenignMalloc();
+ enable_simulated_io_errors();
+
+ sqlite3VtabCommit(db);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** This routine checks that the sqlite3.nVdbeActive count variable
+** matches the number of vdbe's in the list sqlite3.pVdbe that are
+** currently active. An assertion fails if the two counts do not match.
+** This is an internal self-check only - it is not an essential processing
+** step.
+**
+** This is a no-op if NDEBUG is defined.
+*/
+#ifndef NDEBUG
+static void checkActiveVdbeCnt(sqlite3 *db){
+ Vdbe *p;
+ int cnt = 0;
+ int nWrite = 0;
+ int nRead = 0;
+ p = db->pVdbe;
+ while( p ){
+ if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){
+ cnt++;
+ if( p->readOnly==0 ) nWrite++;
+ if( p->bIsReader ) nRead++;
+ }
+ p = p->pNext;
+ }
+ assert( cnt==db->nVdbeActive );
+ assert( nWrite==db->nVdbeWrite );
+ assert( nRead==db->nVdbeRead );
+}
+#else
+#define checkActiveVdbeCnt(x)
+#endif
+
+/*
+** If the Vdbe passed as the first argument opened a statement-transaction,
+** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or
+** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement
+** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the
+** statement transaction is committed.
+**
+** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned.
+** Otherwise SQLITE_OK.
+*/
+static SQLITE_NOINLINE int vdbeCloseStatement(Vdbe *p, int eOp){
+ sqlite3 *const db = p->db;
+ int rc = SQLITE_OK;
+ int i;
+ const int iSavepoint = p->iStatement-1;
+
+ assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE);
+ assert( db->nStatement>0 );
+ assert( p->iStatement==(db->nStatement+db->nSavepoint) );
+
+ for(i=0; i<db->nDb; i++){
+ int rc2 = SQLITE_OK;
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc2==SQLITE_OK ){
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+ db->nStatement--;
+ p->iStatement = 0;
+
+ if( rc==SQLITE_OK ){
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint);
+ }
+ }
+
+ /* If the statement transaction is being rolled back, also restore the
+ ** database handles deferred constraint counter to the value it had when
+ ** the statement transaction was opened. */
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ db->nDeferredCons = p->nStmtDefCons;
+ db->nDeferredImmCons = p->nStmtDefImmCons;
+ }
+ return rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){
+ if( p->db->nStatement && p->iStatement ){
+ return vdbeCloseStatement(p, eOp);
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** This function is called when a transaction opened by the database
+** handle associated with the VM passed as an argument is about to be
+** committed. If there are outstanding deferred foreign key constraint
+** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK.
+**
+** If there are outstanding FK violations and this function returns
+** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY
+** and write an error message to it. Then return SQLITE_ERROR.
+*/
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){
+ sqlite3 *db = p->db;
+ if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0)
+ || (!deferred && p->nFkConstraint>0)
+ ){
+ p->rc = SQLITE_CONSTRAINT_FOREIGNKEY;
+ p->errorAction = OE_Abort;
+ sqlite3VdbeError(p, "FOREIGN KEY constraint failed");
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** This routine is called the when a VDBE tries to halt. If the VDBE
+** has made changes and is in autocommit mode, then commit those
+** changes. If a rollback is needed, then do the rollback.
+**
+** This routine is the only way to move the state of a VM from
+** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to
+** call this on a VM that is in the SQLITE_MAGIC_HALT state.
+**
+** Return an error code. If the commit could not complete because of
+** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it
+** means the close did not happen and needs to be repeated.
+*/
+SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
+ int rc; /* Used to store transient return codes */
+ sqlite3 *db = p->db;
+
+ /* This function contains the logic that determines if a statement or
+ ** transaction will be committed or rolled back as a result of the
+ ** execution of this virtual machine.
+ **
+ ** If any of the following errors occur:
+ **
+ ** SQLITE_NOMEM
+ ** SQLITE_IOERR
+ ** SQLITE_FULL
+ ** SQLITE_INTERRUPT
+ **
+ ** Then the internal cache might have been left in an inconsistent
+ ** state. We need to rollback the statement transaction, if there is
+ ** one, or the complete transaction if there is no statement transaction.
+ */
+
+ if( p->magic!=VDBE_MAGIC_RUN ){
+ return SQLITE_OK;
+ }
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ }
+ closeAllCursors(p);
+ checkActiveVdbeCnt(db);
+
+ /* No commit or rollback needed if the program never started or if the
+ ** SQL statement does not read or write a database file. */
+ if( p->pc>=0 && p->bIsReader ){
+ int mrc; /* Primary error code from p->rc */
+ int eStatementOp = 0;
+ int isSpecialError; /* Set to true if a 'special' error */
+
+ /* Lock all btrees used by the statement */
+ sqlite3VdbeEnter(p);
+
+ /* Check for one of the special errors */
+ mrc = p->rc & 0xff;
+ isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR
+ || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL;
+ if( isSpecialError ){
+ /* If the query was read-only and the error code is SQLITE_INTERRUPT,
+ ** no rollback is necessary. Otherwise, at least a savepoint
+ ** transaction must be rolled back to restore the database to a
+ ** consistent state.
+ **
+ ** Even if the statement is read-only, it is important to perform
+ ** a statement or transaction rollback operation. If the error
+ ** occurred while writing to the journal, sub-journal or database
+ ** file as part of an effort to free up cache space (see function
+ ** pagerStress() in pager.c), the rollback is required to restore
+ ** the pager to a consistent state.
+ */
+ if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){
+ if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) && p->usesStmtJournal ){
+ eStatementOp = SAVEPOINT_ROLLBACK;
+ }else{
+ /* We are forced to roll back the active transaction. Before doing
+ ** so, abort any other statements this handle currently has active.
+ */
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+ }
+
+ /* Check for immediate foreign key violations. */
+ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){
+ sqlite3VdbeCheckFk(p, 0);
+ }
+
+ /* If the auto-commit flag is set and this is the only active writer
+ ** VM, then we do either a commit or rollback of the current transaction.
+ **
+ ** Note: This block also runs if one of the special errors handled
+ ** above has occurred.
+ */
+ if( !sqlite3VtabInSync(db)
+ && db->autoCommit
+ && db->nVdbeWrite==(p->readOnly==0)
+ ){
+ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){
+ rc = sqlite3VdbeCheckFk(p, 1);
+ if( rc!=SQLITE_OK ){
+ if( NEVER(p->readOnly) ){
+ sqlite3VdbeLeave(p);
+ return SQLITE_ERROR;
+ }
+ rc = SQLITE_CONSTRAINT_FOREIGNKEY;
+ }else{
+ /* The auto-commit flag is true, the vdbe program was successful
+ ** or hit an 'OR FAIL' constraint and there are no deferred foreign
+ ** key constraints to hold up the transaction. This means a commit
+ ** is required. */
+ rc = vdbeCommit(db, p);
+ }
+ if( rc==SQLITE_BUSY && p->readOnly ){
+ sqlite3VdbeLeave(p);
+ return SQLITE_BUSY;
+ }else if( rc!=SQLITE_OK ){
+ p->rc = rc;
+ sqlite3RollbackAll(db, SQLITE_OK);
+ p->nChange = 0;
+ }else{
+ db->nDeferredCons = 0;
+ db->nDeferredImmCons = 0;
+ db->flags &= ~(u64)SQLITE_DeferFKs;
+ sqlite3CommitInternalChanges(db);
+ }
+ }else{
+ sqlite3RollbackAll(db, SQLITE_OK);
+ p->nChange = 0;
+ }
+ db->nStatement = 0;
+ }else if( eStatementOp==0 ){
+ if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){
+ eStatementOp = SAVEPOINT_RELEASE;
+ }else if( p->errorAction==OE_Abort ){
+ eStatementOp = SAVEPOINT_ROLLBACK;
+ }else{
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+
+ /* If eStatementOp is non-zero, then a statement transaction needs to
+ ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to
+ ** do so. If this operation returns an error, and the current statement
+ ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then promote the
+ ** current statement error code.
+ */
+ if( eStatementOp ){
+ rc = sqlite3VdbeCloseStatement(p, eStatementOp);
+ if( rc ){
+ if( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ){
+ p->rc = rc;
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ }
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ sqlite3CloseSavepoints(db);
+ db->autoCommit = 1;
+ p->nChange = 0;
+ }
+ }
+
+ /* If this was an INSERT, UPDATE or DELETE and no statement transaction
+ ** has been rolled back, update the database connection change-counter.
+ */
+ if( p->changeCntOn ){
+ if( eStatementOp!=SAVEPOINT_ROLLBACK ){
+ sqlite3VdbeSetChanges(db, p->nChange);
+ }else{
+ sqlite3VdbeSetChanges(db, 0);
+ }
+ p->nChange = 0;
+ }
+
+ /* Release the locks */
+ sqlite3VdbeLeave(p);
+ }
+
+ /* We have successfully halted and closed the VM. Record this fact. */
+ if( p->pc>=0 ){
+ db->nVdbeActive--;
+ if( !p->readOnly ) db->nVdbeWrite--;
+ if( p->bIsReader ) db->nVdbeRead--;
+ assert( db->nVdbeActive>=db->nVdbeRead );
+ assert( db->nVdbeRead>=db->nVdbeWrite );
+ assert( db->nVdbeWrite>=0 );
+ }
+ p->magic = VDBE_MAGIC_HALT;
+ checkActiveVdbeCnt(db);
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ }
+
+ /* If the auto-commit flag is set to true, then any locks that were held
+ ** by connection db have now been released. Call sqlite3ConnectionUnlocked()
+ ** to invoke any required unlock-notify callbacks.
+ */
+ if( db->autoCommit ){
+ sqlite3ConnectionUnlocked(db);
+ }
+
+ assert( db->nVdbeActive>0 || db->autoCommit==0 || db->nStatement==0 );
+ return (p->rc==SQLITE_BUSY ? SQLITE_BUSY : SQLITE_OK);
+}
+
+
+/*
+** Each VDBE holds the result of the most recent sqlite3_step() call
+** in p->rc. This routine sets that result back to SQLITE_OK.
+*/
+SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe *p){
+ p->rc = SQLITE_OK;
+}
+
+/*
+** Copy the error code and error message belonging to the VDBE passed
+** as the first argument to its database handle (so that they will be
+** returned by calls to sqlite3_errcode() and sqlite3_errmsg()).
+**
+** This function does not clear the VDBE error code or message, just
+** copies them to the database handle.
+*/
+SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){
+ sqlite3 *db = p->db;
+ int rc = p->rc;
+ if( p->zErrMsg ){
+ db->bBenignMalloc++;
+ sqlite3BeginBenignMalloc();
+ if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db);
+ sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT);
+ sqlite3EndBenignMalloc();
+ db->bBenignMalloc--;
+ }else if( db->pErr ){
+ sqlite3ValueSetNull(db->pErr);
+ }
+ db->errCode = rc;
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_SQLLOG
+/*
+** If an SQLITE_CONFIG_SQLLOG hook is registered and the VM has been run,
+** invoke it.
+*/
+static void vdbeInvokeSqllog(Vdbe *v){
+ if( sqlite3GlobalConfig.xSqllog && v->rc==SQLITE_OK && v->zSql && v->pc>=0 ){
+ char *zExpanded = sqlite3VdbeExpandSql(v, v->zSql);
+ assert( v->db->init.busy==0 );
+ if( zExpanded ){
+ sqlite3GlobalConfig.xSqllog(
+ sqlite3GlobalConfig.pSqllogArg, v->db, zExpanded, 1
+ );
+ sqlite3DbFree(v->db, zExpanded);
+ }
+ }
+}
+#else
+# define vdbeInvokeSqllog(x)
+#endif
+
+/*
+** Clean up a VDBE after execution but do not delete the VDBE just yet.
+** Write any error messages into *pzErrMsg. Return the result code.
+**
+** After this routine is run, the VDBE should be ready to be executed
+** again.
+**
+** To look at it another way, this routine resets the state of the
+** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to
+** VDBE_MAGIC_INIT.
+*/
+SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ int i;
+#endif
+
+ sqlite3 *db;
+ db = p->db;
+
+ /* If the VM did not run to completion or if it encountered an
+ ** error, then it might not have been halted properly. So halt
+ ** it now.
+ */
+ sqlite3VdbeHalt(p);
+
+ /* If the VDBE has been run even partially, then transfer the error code
+ ** and error message from the VDBE into the main database structure. But
+ ** if the VDBE has just been set to run but has not actually executed any
+ ** instructions yet, leave the main database error information unchanged.
+ */
+ if( p->pc>=0 ){
+ vdbeInvokeSqllog(p);
+ if( db->pErr || p->zErrMsg ){
+ sqlite3VdbeTransferError(p);
+ }else{
+ db->errCode = p->rc;
+ }
+ if( p->runOnlyOnce ) p->expired = 1;
+ }else if( p->rc && p->expired ){
+ /* The expired flag was set on the VDBE before the first call
+ ** to sqlite3_step(). For consistency (since sqlite3_step() was
+ ** called), set the database error in this case as well.
+ */
+ sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg);
+ }
+
+ /* Reset register contents and reclaim error message memory.
+ */
+#ifdef SQLITE_DEBUG
+ /* Execute assert() statements to ensure that the Vdbe.apCsr[] and
+ ** Vdbe.aMem[] arrays have already been cleaned up. */
+ if( p->apCsr ) for(i=0; i<p->nCursor; i++) assert( p->apCsr[i]==0 );
+ if( p->aMem ){
+ for(i=0; i<p->nMem; i++) assert( p->aMem[i].flags==MEM_Undefined );
+ }
+#endif
+ if( p->zErrMsg ){
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = 0;
+ }
+ p->pResultSet = 0;
+#ifdef SQLITE_DEBUG
+ p->nWrite = 0;
+#endif
+
+ /* Save profiling information from this VDBE run.
+ */
+#ifdef VDBE_PROFILE
+ {
+ FILE *out = fopen("vdbe_profile.out", "a");
+ if( out ){
+ fprintf(out, "---- ");
+ for(i=0; i<p->nOp; i++){
+ fprintf(out, "%02x", p->aOp[i].opcode);
+ }
+ fprintf(out, "\n");
+ if( p->zSql ){
+ char c, pc = 0;
+ fprintf(out, "-- ");
+ for(i=0; (c = p->zSql[i])!=0; i++){
+ if( pc=='\n' ) fprintf(out, "-- ");
+ putc(c, out);
+ pc = c;
+ }
+ if( pc!='\n' ) fprintf(out, "\n");
+ }
+ for(i=0; i<p->nOp; i++){
+ char zHdr[100];
+ sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ",
+ p->aOp[i].cnt,
+ p->aOp[i].cycles,
+ p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0
+ );
+ fprintf(out, "%s", zHdr);
+ sqlite3VdbePrintOp(out, i, &p->aOp[i]);
+ }
+ fclose(out);
+ }
+ }
+#endif
+ p->magic = VDBE_MAGIC_RESET;
+ return p->rc & db->errMask;
+}
+
+/*
+** Clean up and delete a VDBE after execution. Return an integer which is
+** the result code. Write any error message text into *pzErrMsg.
+*/
+SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){
+ int rc = SQLITE_OK;
+ if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){
+ rc = sqlite3VdbeReset(p);
+ assert( (rc & p->db->errMask)==rc );
+ }
+ sqlite3VdbeDelete(p);
+ return rc;
+}
+
+/*
+** If parameter iOp is less than zero, then invoke the destructor for
+** all auxiliary data pointers currently cached by the VM passed as
+** the first argument.
+**
+** Or, if iOp is greater than or equal to zero, then the destructor is
+** only invoked for those auxiliary data pointers created by the user
+** function invoked by the OP_Function opcode at instruction iOp of
+** VM pVdbe, and only then if:
+**
+** * the associated function parameter is the 32nd or later (counting
+** from left to right), or
+**
+** * the corresponding bit in argument mask is clear (where the first
+** function parameter corresponds to bit 0 etc.).
+*/
+SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp, int mask){
+ while( *pp ){
+ AuxData *pAux = *pp;
+ if( (iOp<0)
+ || (pAux->iAuxOp==iOp
+ && pAux->iAuxArg>=0
+ && (pAux->iAuxArg>31 || !(mask & MASKBIT32(pAux->iAuxArg))))
+ ){
+ testcase( pAux->iAuxArg==31 );
+ if( pAux->xDeleteAux ){
+ pAux->xDeleteAux(pAux->pAux);
+ }
+ *pp = pAux->pNextAux;
+ sqlite3DbFree(db, pAux);
+ }else{
+ pp= &pAux->pNextAux;
+ }
+ }
+}
+
+/*
+** Free all memory associated with the Vdbe passed as the second argument,
+** except for object itself, which is preserved.
+**
+** The difference between this function and sqlite3VdbeDelete() is that
+** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with
+** the database connection and frees the object itself.
+*/
+SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
+ SubProgram *pSub, *pNext;
+ assert( p->db==0 || p->db==db );
+ releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ for(pSub=p->pProgram; pSub; pSub=pNext){
+ pNext = pSub->pNext;
+ vdbeFreeOpArray(db, pSub->aOp, pSub->nOp);
+ sqlite3DbFree(db, pSub);
+ }
+ if( p->magic!=VDBE_MAGIC_INIT ){
+ releaseMemArray(p->aVar, p->nVar);
+ sqlite3DbFree(db, p->pVList);
+ sqlite3DbFree(db, p->pFree);
+ }
+ vdbeFreeOpArray(db, p->aOp, p->nOp);
+ sqlite3DbFree(db, p->aColName);
+ sqlite3DbFree(db, p->zSql);
+#ifdef SQLITE_ENABLE_NORMALIZE
+ sqlite3DbFree(db, p->zNormSql);
+ {
+ DblquoteStr *pThis, *pNext;
+ for(pThis=p->pDblStr; pThis; pThis=pNext){
+ pNext = pThis->pNextStr;
+ sqlite3DbFree(db, pThis);
+ }
+ }
+#endif
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ {
+ int i;
+ for(i=0; i<p->nScan; i++){
+ sqlite3DbFree(db, p->aScan[i].zName);
+ }
+ sqlite3DbFree(db, p->aScan);
+ }
+#endif
+}
+
+/*
+** Delete an entire VDBE.
+*/
+SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){
+ sqlite3 *db;
+
+ assert( p!=0 );
+ db = p->db;
+ assert( sqlite3_mutex_held(db->mutex) );
+ sqlite3VdbeClearObject(db, p);
+ if( p->pPrev ){
+ p->pPrev->pNext = p->pNext;
+ }else{
+ assert( db->pVdbe==p );
+ db->pVdbe = p->pNext;
+ }
+ if( p->pNext ){
+ p->pNext->pPrev = p->pPrev;
+ }
+ p->magic = VDBE_MAGIC_DEAD;
+ p->db = 0;
+ sqlite3DbFreeNN(db, p);
+}
+
+/*
+** The cursor "p" has a pending seek operation that has not yet been
+** carried out. Seek the cursor now. If an error occurs, return
+** the appropriate error code.
+*/
+SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeFinishMoveto(VdbeCursor *p){
+ int res, rc;
+#ifdef SQLITE_TEST
+ extern int sqlite3_search_count;
+#endif
+ assert( p->deferredMoveto );
+ assert( p->isTable );
+ assert( p->eCurType==CURTYPE_BTREE );
+ rc = sqlite3BtreeMovetoUnpacked(p->uc.pCursor, 0, p->movetoTarget, 0, &res);
+ if( rc ) return rc;
+ if( res!=0 ) return SQLITE_CORRUPT_BKPT;
+#ifdef SQLITE_TEST
+ sqlite3_search_count++;
+#endif
+ p->deferredMoveto = 0;
+ p->cacheStatus = CACHE_STALE;
+ return SQLITE_OK;
+}
+
+/*
+** Something has moved cursor "p" out of place. Maybe the row it was
+** pointed to was deleted out from under it. Or maybe the btree was
+** rebalanced. Whatever the cause, try to restore "p" to the place it
+** is supposed to be pointing. If the row was deleted out from under the
+** cursor, set the cursor to point to a NULL row.
+*/
+static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){
+ int isDifferentRow, rc;
+ assert( p->eCurType==CURTYPE_BTREE );
+ assert( p->uc.pCursor!=0 );
+ assert( sqlite3BtreeCursorHasMoved(p->uc.pCursor) );
+ rc = sqlite3BtreeCursorRestore(p->uc.pCursor, &isDifferentRow);
+ p->cacheStatus = CACHE_STALE;
+ if( isDifferentRow ) p->nullRow = 1;
+ return rc;
+}
+
+/*
+** Check to ensure that the cursor is valid. Restore the cursor
+** if need be. Return any I/O error from the restore operation.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){
+ assert( p->eCurType==CURTYPE_BTREE );
+ if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
+ return handleMovedCursor(p);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Make sure the cursor p is ready to read or write the row to which it
+** was last positioned. Return an error code if an OOM fault or I/O error
+** prevents us from positioning the cursor to its correct position.
+**
+** If a MoveTo operation is pending on the given cursor, then do that
+** MoveTo now. If no move is pending, check to see if the row has been
+** deleted out from under the cursor and if it has, mark the row as
+** a NULL row.
+**
+** If the cursor is already pointing to the correct row and that row has
+** not been deleted out from under the cursor, then this routine is a no-op.
+*/
+SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){
+ VdbeCursor *p = *pp;
+ assert( p->eCurType==CURTYPE_BTREE || p->eCurType==CURTYPE_PSEUDO );
+ if( p->deferredMoveto ){
+ u32 iMap;
+ if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 && !p->nullRow ){
+ *pp = p->pAltCursor;
+ *piCol = iMap - 1;
+ return SQLITE_OK;
+ }
+ return sqlite3VdbeFinishMoveto(p);
+ }
+ if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
+ return handleMovedCursor(p);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** The following functions:
+**
+** sqlite3VdbeSerialType()
+** sqlite3VdbeSerialTypeLen()
+** sqlite3VdbeSerialLen()
+** sqlite3VdbeSerialPut()
+** sqlite3VdbeSerialGet()
+**
+** encapsulate the code that serializes values for storage in SQLite
+** data and index records. Each serialized value consists of a
+** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned
+** integer, stored as a varint.
+**
+** In an SQLite index record, the serial type is stored directly before
+** the blob of data that it corresponds to. In a table record, all serial
+** types are stored at the start of the record, and the blobs of data at
+** the end. Hence these functions allow the caller to handle the
+** serial-type and data blob separately.
+**
+** The following table describes the various storage classes for data:
+**
+** serial type bytes of data type
+** -------------- --------------- ---------------
+** 0 0 NULL
+** 1 1 signed integer
+** 2 2 signed integer
+** 3 3 signed integer
+** 4 4 signed integer
+** 5 6 signed integer
+** 6 8 signed integer
+** 7 8 IEEE float
+** 8 0 Integer constant 0
+** 9 0 Integer constant 1
+** 10,11 reserved for expansion
+** N>=12 and even (N-12)/2 BLOB
+** N>=13 and odd (N-13)/2 text
+**
+** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions
+** of SQLite will not understand those serial types.
+*/
+
+#if 0 /* Inlined into the OP_MakeRecord opcode */
+/*
+** Return the serial-type for the value stored in pMem.
+**
+** This routine might convert a large MEM_IntReal value into MEM_Real.
+**
+** 2019-07-11: The primary user of this subroutine was the OP_MakeRecord
+** opcode in the byte-code engine. But by moving this routine in-line, we
+** can omit some redundant tests and make that opcode a lot faster. So
+** this routine is now only used by the STAT3 logic and STAT3 support has
+** ended. The code is kept here for historical reference only.
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format, u32 *pLen){
+ int flags = pMem->flags;
+ u32 n;
+
+ assert( pLen!=0 );
+ if( flags&MEM_Null ){
+ *pLen = 0;
+ return 0;
+ }
+ if( flags&(MEM_Int|MEM_IntReal) ){
+ /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */
+# define MAX_6BYTE ((((i64)0x00008000)<<32)-1)
+ i64 i = pMem->u.i;
+ u64 u;
+ testcase( flags & MEM_Int );
+ testcase( flags & MEM_IntReal );
+ if( i<0 ){
+ u = ~i;
+ }else{
+ u = i;
+ }
+ if( u<=127 ){
+ if( (i&1)==i && file_format>=4 ){
+ *pLen = 0;
+ return 8+(u32)u;
+ }else{
+ *pLen = 1;
+ return 1;
+ }
+ }
+ if( u<=32767 ){ *pLen = 2; return 2; }
+ if( u<=8388607 ){ *pLen = 3; return 3; }
+ if( u<=2147483647 ){ *pLen = 4; return 4; }
+ if( u<=MAX_6BYTE ){ *pLen = 6; return 5; }
+ *pLen = 8;
+ if( flags&MEM_IntReal ){
+ /* If the value is IntReal and is going to take up 8 bytes to store
+ ** as an integer, then we might as well make it an 8-byte floating
+ ** point value */
+ pMem->u.r = (double)pMem->u.i;
+ pMem->flags &= ~MEM_IntReal;
+ pMem->flags |= MEM_Real;
+ return 7;
+ }
+ return 6;
+ }
+ if( flags&MEM_Real ){
+ *pLen = 8;
+ return 7;
+ }
+ assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) );
+ assert( pMem->n>=0 );
+ n = (u32)pMem->n;
+ if( flags & MEM_Zero ){
+ n += pMem->u.nZero;
+ }
+ *pLen = n;
+ return ((n*2) + 12 + ((flags&MEM_Str)!=0));
+}
+#endif /* inlined into OP_MakeRecord */
+
+/*
+** The sizes for serial types less than 128
+*/
+static const u8 sqlite3SmallTypeSizes[] = {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+/* 0 */ 0, 1, 2, 3, 4, 6, 8, 8, 0, 0,
+/* 10 */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+/* 20 */ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+/* 30 */ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+/* 40 */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18,
+/* 50 */ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23,
+/* 60 */ 24, 24, 25, 25, 26, 26, 27, 27, 28, 28,
+/* 70 */ 29, 29, 30, 30, 31, 31, 32, 32, 33, 33,
+/* 80 */ 34, 34, 35, 35, 36, 36, 37, 37, 38, 38,
+/* 90 */ 39, 39, 40, 40, 41, 41, 42, 42, 43, 43,
+/* 100 */ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48,
+/* 110 */ 49, 49, 50, 50, 51, 51, 52, 52, 53, 53,
+/* 120 */ 54, 54, 55, 55, 56, 56, 57, 57
+};
+
+/*
+** Return the length of the data corresponding to the supplied serial-type.
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){
+ if( serial_type>=128 ){
+ return (serial_type-12)/2;
+ }else{
+ assert( serial_type<12
+ || sqlite3SmallTypeSizes[serial_type]==(serial_type - 12)/2 );
+ return sqlite3SmallTypeSizes[serial_type];
+ }
+}
+SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8 serial_type){
+ assert( serial_type<128 );
+ return sqlite3SmallTypeSizes[serial_type];
+}
+
+/*
+** If we are on an architecture with mixed-endian floating
+** points (ex: ARM7) then swap the lower 4 bytes with the
+** upper 4 bytes. Return the result.
+**
+** For most architectures, this is a no-op.
+**
+** (later): It is reported to me that the mixed-endian problem
+** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems
+** that early versions of GCC stored the two words of a 64-bit
+** float in the wrong order. And that error has been propagated
+** ever since. The blame is not necessarily with GCC, though.
+** GCC might have just copying the problem from a prior compiler.
+** I am also told that newer versions of GCC that follow a different
+** ABI get the byte order right.
+**
+** Developers using SQLite on an ARM7 should compile and run their
+** application using -DSQLITE_DEBUG=1 at least once. With DEBUG
+** enabled, some asserts below will ensure that the byte order of
+** floating point values is correct.
+**
+** (2007-08-30) Frank van Vugt has studied this problem closely
+** and has send his findings to the SQLite developers. Frank
+** writes that some Linux kernels offer floating point hardware
+** emulation that uses only 32-bit mantissas instead of a full
+** 48-bits as required by the IEEE standard. (This is the
+** CONFIG_FPE_FASTFPE option.) On such systems, floating point
+** byte swapping becomes very complicated. To avoid problems,
+** the necessary byte swapping is carried out using a 64-bit integer
+** rather than a 64-bit float. Frank assures us that the code here
+** works for him. We, the developers, have no way to independently
+** verify this, but Frank seems to know what he is talking about
+** so we trust him.
+*/
+#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+static u64 floatSwap(u64 in){
+ union {
+ u64 r;
+ u32 i[2];
+ } u;
+ u32 t;
+
+ u.r = in;
+ t = u.i[0];
+ u.i[0] = u.i[1];
+ u.i[1] = t;
+ return u.r;
+}
+# define swapMixedEndianFloat(X) X = floatSwap(X)
+#else
+# define swapMixedEndianFloat(X)
+#endif
+
+/*
+** Write the serialized data blob for the value stored in pMem into
+** buf. It is assumed that the caller has allocated sufficient space.
+** Return the number of bytes written.
+**
+** nBuf is the amount of space left in buf[]. The caller is responsible
+** for allocating enough space to buf[] to hold the entire field, exclusive
+** of the pMem->u.nZero bytes for a MEM_Zero value.
+**
+** Return the number of bytes actually written into buf[]. The number
+** of bytes in the zero-filled tail is included in the return value only
+** if those bytes were zeroed in buf[].
+*/
+SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
+ u32 len;
+
+ /* Integer and Real */
+ if( serial_type<=7 && serial_type>0 ){
+ u64 v;
+ u32 i;
+ if( serial_type==7 ){
+ assert( sizeof(v)==sizeof(pMem->u.r) );
+ memcpy(&v, &pMem->u.r, sizeof(v));
+ swapMixedEndianFloat(v);
+ }else{
+ v = pMem->u.i;
+ }
+ len = i = sqlite3SmallTypeSizes[serial_type];
+ assert( i>0 );
+ do{
+ buf[--i] = (u8)(v&0xFF);
+ v >>= 8;
+ }while( i );
+ return len;
+ }
+
+ /* String or blob */
+ if( serial_type>=12 ){
+ assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0)
+ == (int)sqlite3VdbeSerialTypeLen(serial_type) );
+ len = pMem->n;
+ if( len>0 ) memcpy(buf, pMem->z, len);
+ return len;
+ }
+
+ /* NULL or constants 0 or 1 */
+ return 0;
+}
+
+/* Input "x" is a sequence of unsigned characters that represent a
+** big-endian integer. Return the equivalent native integer
+*/
+#define ONE_BYTE_INT(x) ((i8)(x)[0])
+#define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1])
+#define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2])
+#define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
+#define FOUR_BYTE_INT(x) (16777216*(i8)((x)[0])|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
+
+/*
+** Deserialize the data blob pointed to by buf as serial type serial_type
+** and store the result in pMem. Return the number of bytes read.
+**
+** This function is implemented as two separate routines for performance.
+** The few cases that require local variables are broken out into a separate
+** routine so that in most cases the overhead of moving the stack pointer
+** is avoided.
+*/
+static u32 serialGet(
+ const unsigned char *buf, /* Buffer to deserialize from */
+ u32 serial_type, /* Serial type to deserialize */
+ Mem *pMem /* Memory cell to write value into */
+){
+ u64 x = FOUR_BYTE_UINT(buf);
+ u32 y = FOUR_BYTE_UINT(buf+4);
+ x = (x<<32) + y;
+ if( serial_type==6 ){
+ /* EVIDENCE-OF: R-29851-52272 Value is a big-endian 64-bit
+ ** twos-complement integer. */
+ pMem->u.i = *(i64*)&x;
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ }else{
+ /* EVIDENCE-OF: R-57343-49114 Value is a big-endian IEEE 754-2008 64-bit
+ ** floating point number. */
+#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT)
+ /* Verify that integers and floating point values use the same
+ ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is
+ ** defined that 64-bit floating point values really are mixed
+ ** endian.
+ */
+ static const u64 t1 = ((u64)0x3ff00000)<<32;
+ static const double r1 = 1.0;
+ u64 t2 = t1;
+ swapMixedEndianFloat(t2);
+ assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 );
+#endif
+ assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 );
+ swapMixedEndianFloat(x);
+ memcpy(&pMem->u.r, &x, sizeof(x));
+ pMem->flags = IsNaN(x) ? MEM_Null : MEM_Real;
+ }
+ return 8;
+}
+SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(
+ const unsigned char *buf, /* Buffer to deserialize from */
+ u32 serial_type, /* Serial type to deserialize */
+ Mem *pMem /* Memory cell to write value into */
+){
+ switch( serial_type ){
+ case 10: { /* Internal use only: NULL with virtual table
+ ** UPDATE no-change flag set */
+ pMem->flags = MEM_Null|MEM_Zero;
+ pMem->n = 0;
+ pMem->u.nZero = 0;
+ break;
+ }
+ case 11: /* Reserved for future use */
+ case 0: { /* Null */
+ /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */
+ pMem->flags = MEM_Null;
+ break;
+ }
+ case 1: {
+ /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement
+ ** integer. */
+ pMem->u.i = ONE_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 1;
+ }
+ case 2: { /* 2-byte signed integer */
+ /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit
+ ** twos-complement integer. */
+ pMem->u.i = TWO_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 2;
+ }
+ case 3: { /* 3-byte signed integer */
+ /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit
+ ** twos-complement integer. */
+ pMem->u.i = THREE_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 3;
+ }
+ case 4: { /* 4-byte signed integer */
+ /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit
+ ** twos-complement integer. */
+ pMem->u.i = FOUR_BYTE_INT(buf);
+#ifdef __HP_cc
+ /* Work around a sign-extension bug in the HP compiler for HP/UX */
+ if( buf[0]&0x80 ) pMem->u.i |= 0xffffffff80000000LL;
+#endif
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 4;
+ }
+ case 5: { /* 6-byte signed integer */
+ /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit
+ ** twos-complement integer. */
+ pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf);
+ pMem->flags = MEM_Int;
+ testcase( pMem->u.i<0 );
+ return 6;
+ }
+ case 6: /* 8-byte signed integer */
+ case 7: { /* IEEE floating point */
+ /* These use local variables, so do them in a separate routine
+ ** to avoid having to move the frame pointer in the common case */
+ return serialGet(buf,serial_type,pMem);
+ }
+ case 8: /* Integer 0 */
+ case 9: { /* Integer 1 */
+ /* EVIDENCE-OF: R-12976-22893 Value is the integer 0. */
+ /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */
+ pMem->u.i = serial_type-8;
+ pMem->flags = MEM_Int;
+ return 0;
+ }
+ default: {
+ /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in
+ ** length.
+ ** EVIDENCE-OF: R-28401-00140 Value is a string in the text encoding and
+ ** (N-13)/2 bytes in length. */
+ static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem };
+ pMem->z = (char *)buf;
+ pMem->n = (serial_type-12)/2;
+ pMem->flags = aFlag[serial_type&1];
+ return pMem->n;
+ }
+ }
+ return 0;
+}
+/*
+** This routine is used to allocate sufficient space for an UnpackedRecord
+** structure large enough to be used with sqlite3VdbeRecordUnpack() if
+** the first argument is a pointer to KeyInfo structure pKeyInfo.
+**
+** The space is either allocated using sqlite3DbMallocRaw() or from within
+** the unaligned buffer passed via the second and third arguments (presumably
+** stack space). If the former, then *ppFree is set to a pointer that should
+** be eventually freed by the caller using sqlite3DbFree(). Or, if the
+** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL
+** before returning.
+**
+** If an OOM error occurs, NULL is returned.
+*/
+SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(
+ KeyInfo *pKeyInfo /* Description of the record */
+){
+ UnpackedRecord *p; /* Unpacked record to return */
+ int nByte; /* Number of bytes required for *p */
+ nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1);
+ p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte);
+ if( !p ) return 0;
+ p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))];
+ assert( pKeyInfo->aSortFlags!=0 );
+ p->pKeyInfo = pKeyInfo;
+ p->nField = pKeyInfo->nKeyField + 1;
+ return p;
+}
+
+/*
+** Given the nKey-byte encoding of a record in pKey[], populate the
+** UnpackedRecord structure indicated by the fourth argument with the
+** contents of the decoded record.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(
+ KeyInfo *pKeyInfo, /* Information about the record format */
+ int nKey, /* Size of the binary record */
+ const void *pKey, /* The binary record */
+ UnpackedRecord *p /* Populate this structure before returning. */
+){
+ const unsigned char *aKey = (const unsigned char *)pKey;
+ u32 d;
+ u32 idx; /* Offset in aKey[] to read from */
+ u16 u; /* Unsigned loop counter */
+ u32 szHdr;
+ Mem *pMem = p->aMem;
+
+ p->default_rc = 0;
+ assert( EIGHT_BYTE_ALIGNMENT(pMem) );
+ idx = getVarint32(aKey, szHdr);
+ d = szHdr;
+ u = 0;
+ while( idx<szHdr && d<=(u32)nKey ){
+ u32 serial_type;
+
+ idx += getVarint32(&aKey[idx], serial_type);
+ pMem->enc = pKeyInfo->enc;
+ pMem->db = pKeyInfo->db;
+ /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */
+ pMem->szMalloc = 0;
+ pMem->z = 0;
+ d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem);
+ pMem++;
+ if( (++u)>=p->nField ) break;
+ }
+ if( d>(u32)nKey && u ){
+ assert( CORRUPT_DB );
+ /* In a corrupt record entry, the last pMem might have been set up using
+ ** uninitialized memory. Overwrite its value with NULL, to prevent
+ ** warnings from MSAN. */
+ sqlite3VdbeMemSetNull(pMem-1);
+ }
+ assert( u<=pKeyInfo->nKeyField + 1 );
+ p->nField = u;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** This function compares two index or table record keys in the same way
+** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(),
+** this function deserializes and compares values using the
+** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used
+** in assert() statements to ensure that the optimized code in
+** sqlite3VdbeRecordCompare() returns results with these two primitives.
+**
+** Return true if the result of comparison is equivalent to desiredResult.
+** Return false if there is a disagreement.
+*/
+static int vdbeRecordCompareDebug(
+ int nKey1, const void *pKey1, /* Left key */
+ const UnpackedRecord *pPKey2, /* Right key */
+ int desiredResult /* Correct answer */
+){
+ u32 d1; /* Offset into aKey[] of next data element */
+ u32 idx1; /* Offset into aKey[] of next header element */
+ u32 szHdr1; /* Number of bytes in header */
+ int i = 0;
+ int rc = 0;
+ const unsigned char *aKey1 = (const unsigned char *)pKey1;
+ KeyInfo *pKeyInfo;
+ Mem mem1;
+
+ pKeyInfo = pPKey2->pKeyInfo;
+ if( pKeyInfo->db==0 ) return 1;
+ mem1.enc = pKeyInfo->enc;
+ mem1.db = pKeyInfo->db;
+ /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */
+ VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
+
+ /* Compilers may complain that mem1.u.i is potentially uninitialized.
+ ** We could initialize it, as shown here, to silence those complaints.
+ ** But in fact, mem1.u.i will never actually be used uninitialized, and doing
+ ** the unnecessary initialization has a measurable negative performance
+ ** impact, since this routine is a very high runner. And so, we choose
+ ** to ignore the compiler warnings and leave this variable uninitialized.
+ */
+ /* mem1.u.i = 0; // not needed, here to silence compiler warning */
+
+ idx1 = getVarint32(aKey1, szHdr1);
+ if( szHdr1>98307 ) return SQLITE_CORRUPT;
+ d1 = szHdr1;
+ assert( pKeyInfo->nAllField>=pPKey2->nField || CORRUPT_DB );
+ assert( pKeyInfo->aSortFlags!=0 );
+ assert( pKeyInfo->nKeyField>0 );
+ assert( idx1<=szHdr1 || CORRUPT_DB );
+ do{
+ u32 serial_type1;
+
+ /* Read the serial types for the next element in each key. */
+ idx1 += getVarint32( aKey1+idx1, serial_type1 );
+
+ /* Verify that there is enough key space remaining to avoid
+ ** a buffer overread. The "d1+serial_type1+2" subexpression will
+ ** always be greater than or equal to the amount of required key space.
+ ** Use that approximation to avoid the more expensive call to
+ ** sqlite3VdbeSerialTypeLen() in the common case.
+ */
+ if( d1+(u64)serial_type1+2>(u64)nKey1
+ && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1
+ ){
+ break;
+ }
+
+ /* Extract the values to be compared.
+ */
+ d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1);
+
+ /* Do the comparison
+ */
+ rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i],
+ pKeyInfo->nAllField>i ? pKeyInfo->aColl[i] : 0);
+ if( rc!=0 ){
+ assert( mem1.szMalloc==0 ); /* See comment below */
+ if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL)
+ && ((mem1.flags & MEM_Null) || (pPKey2->aMem[i].flags & MEM_Null))
+ ){
+ rc = -rc;
+ }
+ if( pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_DESC ){
+ rc = -rc; /* Invert the result for DESC sort order. */
+ }
+ goto debugCompareEnd;
+ }
+ i++;
+ }while( idx1<szHdr1 && i<pPKey2->nField );
+
+ /* No memory allocation is ever used on mem1. Prove this using
+ ** the following assert(). If the assert() fails, it indicates a
+ ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1).
+ */
+ assert( mem1.szMalloc==0 );
+
+ /* rc==0 here means that one of the keys ran out of fields and
+ ** all the fields up to that point were equal. Return the default_rc
+ ** value. */
+ rc = pPKey2->default_rc;
+
+debugCompareEnd:
+ if( desiredResult==0 && rc==0 ) return 1;
+ if( desiredResult<0 && rc<0 ) return 1;
+ if( desiredResult>0 && rc>0 ) return 1;
+ if( CORRUPT_DB ) return 1;
+ if( pKeyInfo->db->mallocFailed ) return 1;
+ return 0;
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** Count the number of fields (a.k.a. columns) in the record given by
+** pKey,nKey. The verify that this count is less than or equal to the
+** limit given by pKeyInfo->nAllField.
+**
+** If this constraint is not satisfied, it means that the high-speed
+** vdbeRecordCompareInt() and vdbeRecordCompareString() routines will
+** not work correctly. If this assert() ever fires, it probably means
+** that the KeyInfo.nKeyField or KeyInfo.nAllField values were computed
+** incorrectly.
+*/
+static void vdbeAssertFieldCountWithinLimits(
+ int nKey, const void *pKey, /* The record to verify */
+ const KeyInfo *pKeyInfo /* Compare size with this KeyInfo */
+){
+ int nField = 0;
+ u32 szHdr;
+ u32 idx;
+ u32 notUsed;
+ const unsigned char *aKey = (const unsigned char*)pKey;
+
+ if( CORRUPT_DB ) return;
+ idx = getVarint32(aKey, szHdr);
+ assert( nKey>=0 );
+ assert( szHdr<=(u32)nKey );
+ while( idx<szHdr ){
+ idx += getVarint32(aKey+idx, notUsed);
+ nField++;
+ }
+ assert( nField <= pKeyInfo->nAllField );
+}
+#else
+# define vdbeAssertFieldCountWithinLimits(A,B,C)
+#endif
+
+/*
+** Both *pMem1 and *pMem2 contain string values. Compare the two values
+** using the collation sequence pColl. As usual, return a negative , zero
+** or positive value if *pMem1 is less than, equal to or greater than
+** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);".
+*/
+static int vdbeCompareMemString(
+ const Mem *pMem1,
+ const Mem *pMem2,
+ const CollSeq *pColl,
+ u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */
+){
+ if( pMem1->enc==pColl->enc ){
+ /* The strings are already in the correct encoding. Call the
+ ** comparison function directly */
+ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z);
+ }else{
+ int rc;
+ const void *v1, *v2;
+ Mem c1;
+ Mem c2;
+ sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null);
+ sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null);
+ sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem);
+ sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem);
+ v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc);
+ v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc);
+ if( (v1==0 || v2==0) ){
+ if( prcErr ) *prcErr = SQLITE_NOMEM_BKPT;
+ rc = 0;
+ }else{
+ rc = pColl->xCmp(pColl->pUser, c1.n, v1, c2.n, v2);
+ }
+ sqlite3VdbeMemRelease(&c1);
+ sqlite3VdbeMemRelease(&c2);
+ return rc;
+ }
+}
+
+/*
+** The input pBlob is guaranteed to be a Blob that is not marked
+** with MEM_Zero. Return true if it could be a zero-blob.
+*/
+static int isAllZero(const char *z, int n){
+ int i;
+ for(i=0; i<n; i++){
+ if( z[i] ) return 0;
+ }
+ return 1;
+}
+
+/*
+** Compare two blobs. Return negative, zero, or positive if the first
+** is less than, equal to, or greater than the second, respectively.
+** If one blob is a prefix of the other, then the shorter is the lessor.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
+ int c;
+ int n1 = pB1->n;
+ int n2 = pB2->n;
+
+ /* It is possible to have a Blob value that has some non-zero content
+ ** followed by zero content. But that only comes up for Blobs formed
+ ** by the OP_MakeRecord opcode, and such Blobs never get passed into
+ ** sqlite3MemCompare(). */
+ assert( (pB1->flags & MEM_Zero)==0 || n1==0 );
+ assert( (pB2->flags & MEM_Zero)==0 || n2==0 );
+
+ if( (pB1->flags|pB2->flags) & MEM_Zero ){
+ if( pB1->flags & pB2->flags & MEM_Zero ){
+ return pB1->u.nZero - pB2->u.nZero;
+ }else if( pB1->flags & MEM_Zero ){
+ if( !isAllZero(pB2->z, pB2->n) ) return -1;
+ return pB1->u.nZero - n2;
+ }else{
+ if( !isAllZero(pB1->z, pB1->n) ) return +1;
+ return n1 - pB2->u.nZero;
+ }
+ }
+ c = memcmp(pB1->z, pB2->z, n1>n2 ? n2 : n1);
+ if( c ) return c;
+ return n1 - n2;
+}
+
+/*
+** Do a comparison between a 64-bit signed integer and a 64-bit floating-point
+** number. Return negative, zero, or positive if the first (i64) is less than,
+** equal to, or greater than the second (double).
+*/
+static int sqlite3IntFloatCompare(i64 i, double r){
+ if( sizeof(LONGDOUBLE_TYPE)>8 ){
+ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i;
+ if( x<r ) return -1;
+ if( x>r ) return +1;
+ return 0;
+ }else{
+ i64 y;
+ double s;
+ if( r<-9223372036854775808.0 ) return +1;
+ if( r>=9223372036854775808.0 ) return -1;
+ y = (i64)r;
+ if( i<y ) return -1;
+ if( i>y ) return +1;
+ s = (double)i;
+ if( s<r ) return -1;
+ if( s>r ) return +1;
+ return 0;
+ }
+}
+
+/*
+** Compare the values contained by the two memory cells, returning
+** negative, zero or positive if pMem1 is less than, equal to, or greater
+** than pMem2. Sorting order is NULL's first, followed by numbers (integers
+** and reals) sorted numerically, followed by text ordered by the collating
+** sequence pColl and finally blob's ordered by memcmp().
+**
+** Two NULL values are considered equal by this function.
+*/
+SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){
+ int f1, f2;
+ int combined_flags;
+
+ f1 = pMem1->flags;
+ f2 = pMem2->flags;
+ combined_flags = f1|f2;
+ assert( !sqlite3VdbeMemIsRowSet(pMem1) && !sqlite3VdbeMemIsRowSet(pMem2) );
+
+ /* If one value is NULL, it is less than the other. If both values
+ ** are NULL, return 0.
+ */
+ if( combined_flags&MEM_Null ){
+ return (f2&MEM_Null) - (f1&MEM_Null);
+ }
+
+ /* At least one of the two values is a number
+ */
+ if( combined_flags&(MEM_Int|MEM_Real|MEM_IntReal) ){
+ testcase( combined_flags & MEM_Int );
+ testcase( combined_flags & MEM_Real );
+ testcase( combined_flags & MEM_IntReal );
+ if( (f1 & f2 & (MEM_Int|MEM_IntReal))!=0 ){
+ testcase( f1 & f2 & MEM_Int );
+ testcase( f1 & f2 & MEM_IntReal );
+ if( pMem1->u.i < pMem2->u.i ) return -1;
+ if( pMem1->u.i > pMem2->u.i ) return +1;
+ return 0;
+ }
+ if( (f1 & f2 & MEM_Real)!=0 ){
+ if( pMem1->u.r < pMem2->u.r ) return -1;
+ if( pMem1->u.r > pMem2->u.r ) return +1;
+ return 0;
+ }
+ if( (f1&(MEM_Int|MEM_IntReal))!=0 ){
+ testcase( f1 & MEM_Int );
+ testcase( f1 & MEM_IntReal );
+ if( (f2&MEM_Real)!=0 ){
+ return sqlite3IntFloatCompare(pMem1->u.i, pMem2->u.r);
+ }else if( (f2&(MEM_Int|MEM_IntReal))!=0 ){
+ if( pMem1->u.i < pMem2->u.i ) return -1;
+ if( pMem1->u.i > pMem2->u.i ) return +1;
+ return 0;
+ }else{
+ return -1;
+ }
+ }
+ if( (f1&MEM_Real)!=0 ){
+ if( (f2&(MEM_Int|MEM_IntReal))!=0 ){
+ testcase( f2 & MEM_Int );
+ testcase( f2 & MEM_IntReal );
+ return -sqlite3IntFloatCompare(pMem2->u.i, pMem1->u.r);
+ }else{
+ return -1;
+ }
+ }
+ return +1;
+ }
+
+ /* If one value is a string and the other is a blob, the string is less.
+ ** If both are strings, compare using the collating functions.
+ */
+ if( combined_flags&MEM_Str ){
+ if( (f1 & MEM_Str)==0 ){
+ return 1;
+ }
+ if( (f2 & MEM_Str)==0 ){
+ return -1;
+ }
+
+ assert( pMem1->enc==pMem2->enc || pMem1->db->mallocFailed );
+ assert( pMem1->enc==SQLITE_UTF8 ||
+ pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE );
+
+ /* The collation sequence must be defined at this point, even if
+ ** the user deletes the collation sequence after the vdbe program is
+ ** compiled (this was not always the case).
+ */
+ assert( !pColl || pColl->xCmp );
+
+ if( pColl ){
+ return vdbeCompareMemString(pMem1, pMem2, pColl, 0);
+ }
+ /* If a NULL pointer was passed as the collate function, fall through
+ ** to the blob case and use memcmp(). */
+ }
+
+ /* Both values must be blobs. Compare using memcmp(). */
+ return sqlite3BlobCompare(pMem1, pMem2);
+}
+
+
+/*
+** The first argument passed to this function is a serial-type that
+** corresponds to an integer - all values between 1 and 9 inclusive
+** except 7. The second points to a buffer containing an integer value
+** serialized according to serial_type. This function deserializes
+** and returns the value.
+*/
+static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){
+ u32 y;
+ assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) );
+ switch( serial_type ){
+ case 0:
+ case 1:
+ testcase( aKey[0]&0x80 );
+ return ONE_BYTE_INT(aKey);
+ case 2:
+ testcase( aKey[0]&0x80 );
+ return TWO_BYTE_INT(aKey);
+ case 3:
+ testcase( aKey[0]&0x80 );
+ return THREE_BYTE_INT(aKey);
+ case 4: {
+ testcase( aKey[0]&0x80 );
+ y = FOUR_BYTE_UINT(aKey);
+ return (i64)*(int*)&y;
+ }
+ case 5: {
+ testcase( aKey[0]&0x80 );
+ return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
+ }
+ case 6: {
+ u64 x = FOUR_BYTE_UINT(aKey);
+ testcase( aKey[0]&0x80 );
+ x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
+ return (i64)*(i64*)&x;
+ }
+ }
+
+ return (serial_type - 8);
+}
+
+/*
+** This function compares the two table rows or index records
+** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero
+** or positive integer if key1 is less than, equal to or
+** greater than key2. The {nKey1, pKey1} key must be a blob
+** created by the OP_MakeRecord opcode of the VDBE. The pPKey2
+** key must be a parsed key such as obtained from
+** sqlite3VdbeParseRecord.
+**
+** If argument bSkip is non-zero, it is assumed that the caller has already
+** determined that the first fields of the keys are equal.
+**
+** Key1 and Key2 do not have to contain the same number of fields. If all
+** fields that appear in both keys are equal, then pPKey2->default_rc is
+** returned.
+**
+** If database corruption is discovered, set pPKey2->errCode to
+** SQLITE_CORRUPT and return 0. If an OOM error is encountered,
+** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the
+** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db).
+*/
+SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2, /* Right key */
+ int bSkip /* If true, skip the first field */
+){
+ u32 d1; /* Offset into aKey[] of next data element */
+ int i; /* Index of next field to compare */
+ u32 szHdr1; /* Size of record header in bytes */
+ u32 idx1; /* Offset of first type in header */
+ int rc = 0; /* Return value */
+ Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */
+ KeyInfo *pKeyInfo;
+ const unsigned char *aKey1 = (const unsigned char *)pKey1;
+ Mem mem1;
+
+ /* If bSkip is true, then the caller has already determined that the first
+ ** two elements in the keys are equal. Fix the various stack variables so
+ ** that this routine begins comparing at the second field. */
+ if( bSkip ){
+ u32 s1;
+ idx1 = 1 + getVarint32(&aKey1[1], s1);
+ szHdr1 = aKey1[0];
+ d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1);
+ i = 1;
+ pRhs++;
+ }else{
+ idx1 = getVarint32(aKey1, szHdr1);
+ d1 = szHdr1;
+ i = 0;
+ }
+ if( d1>(unsigned)nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }
+
+ VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
+ assert( pPKey2->pKeyInfo->nAllField>=pPKey2->nField
+ || CORRUPT_DB );
+ assert( pPKey2->pKeyInfo->aSortFlags!=0 );
+ assert( pPKey2->pKeyInfo->nKeyField>0 );
+ assert( idx1<=szHdr1 || CORRUPT_DB );
+ do{
+ u32 serial_type;
+
+ /* RHS is an integer */
+ if( pRhs->flags & (MEM_Int|MEM_IntReal) ){
+ testcase( pRhs->flags & MEM_Int );
+ testcase( pRhs->flags & MEM_IntReal );
+ serial_type = aKey1[idx1];
+ testcase( serial_type==12 );
+ if( serial_type>=10 ){
+ rc = +1;
+ }else if( serial_type==0 ){
+ rc = -1;
+ }else if( serial_type==7 ){
+ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
+ rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r);
+ }else{
+ i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]);
+ i64 rhs = pRhs->u.i;
+ if( lhs<rhs ){
+ rc = -1;
+ }else if( lhs>rhs ){
+ rc = +1;
+ }
+ }
+ }
+
+ /* RHS is real */
+ else if( pRhs->flags & MEM_Real ){
+ serial_type = aKey1[idx1];
+ if( serial_type>=10 ){
+ /* Serial types 12 or greater are strings and blobs (greater than
+ ** numbers). Types 10 and 11 are currently "reserved for future
+ ** use", so it doesn't really matter what the results of comparing
+ ** them to numberic values are. */
+ rc = +1;
+ }else if( serial_type==0 ){
+ rc = -1;
+ }else{
+ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
+ if( serial_type==7 ){
+ if( mem1.u.r<pRhs->u.r ){
+ rc = -1;
+ }else if( mem1.u.r>pRhs->u.r ){
+ rc = +1;
+ }
+ }else{
+ rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r);
+ }
+ }
+ }
+
+ /* RHS is a string */
+ else if( pRhs->flags & MEM_Str ){
+ getVarint32NR(&aKey1[idx1], serial_type);
+ testcase( serial_type==12 );
+ if( serial_type<12 ){
+ rc = -1;
+ }else if( !(serial_type & 0x01) ){
+ rc = +1;
+ }else{
+ mem1.n = (serial_type - 12) / 2;
+ testcase( (d1+mem1.n)==(unsigned)nKey1 );
+ testcase( (d1+mem1.n+1)==(unsigned)nKey1 );
+ if( (d1+mem1.n) > (unsigned)nKey1
+ || (pKeyInfo = pPKey2->pKeyInfo)->nAllField<=i
+ ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }else if( pKeyInfo->aColl[i] ){
+ mem1.enc = pKeyInfo->enc;
+ mem1.db = pKeyInfo->db;
+ mem1.flags = MEM_Str;
+ mem1.z = (char*)&aKey1[d1];
+ rc = vdbeCompareMemString(
+ &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode
+ );
+ }else{
+ int nCmp = MIN(mem1.n, pRhs->n);
+ rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
+ if( rc==0 ) rc = mem1.n - pRhs->n;
+ }
+ }
+ }
+
+ /* RHS is a blob */
+ else if( pRhs->flags & MEM_Blob ){
+ assert( (pRhs->flags & MEM_Zero)==0 || pRhs->n==0 );
+ getVarint32NR(&aKey1[idx1], serial_type);
+ testcase( serial_type==12 );
+ if( serial_type<12 || (serial_type & 0x01) ){
+ rc = -1;
+ }else{
+ int nStr = (serial_type - 12) / 2;
+ testcase( (d1+nStr)==(unsigned)nKey1 );
+ testcase( (d1+nStr+1)==(unsigned)nKey1 );
+ if( (d1+nStr) > (unsigned)nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }else if( pRhs->flags & MEM_Zero ){
+ if( !isAllZero((const char*)&aKey1[d1],nStr) ){
+ rc = 1;
+ }else{
+ rc = nStr - pRhs->u.nZero;
+ }
+ }else{
+ int nCmp = MIN(nStr, pRhs->n);
+ rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
+ if( rc==0 ) rc = nStr - pRhs->n;
+ }
+ }
+ }
+
+ /* RHS is null */
+ else{
+ serial_type = aKey1[idx1];
+ rc = (serial_type!=0);
+ }
+
+ if( rc!=0 ){
+ int sortFlags = pPKey2->pKeyInfo->aSortFlags[i];
+ if( sortFlags ){
+ if( (sortFlags & KEYINFO_ORDER_BIGNULL)==0
+ || ((sortFlags & KEYINFO_ORDER_DESC)
+ !=(serial_type==0 || (pRhs->flags&MEM_Null)))
+ ){
+ rc = -rc;
+ }
+ }
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) );
+ assert( mem1.szMalloc==0 ); /* See comment below */
+ return rc;
+ }
+
+ i++;
+ if( i==pPKey2->nField ) break;
+ pRhs++;
+ d1 += sqlite3VdbeSerialTypeLen(serial_type);
+ idx1 += sqlite3VarintLen(serial_type);
+ }while( idx1<(unsigned)szHdr1 && d1<=(unsigned)nKey1 );
+
+ /* No memory allocation is ever used on mem1. Prove this using
+ ** the following assert(). If the assert() fails, it indicates a
+ ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */
+ assert( mem1.szMalloc==0 );
+
+ /* rc==0 here means that one or both of the keys ran out of fields and
+ ** all the fields up to that point were equal. Return the default_rc
+ ** value. */
+ assert( CORRUPT_DB
+ || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc)
+ || pPKey2->pKeyInfo->db->mallocFailed
+ );
+ pPKey2->eqSeen = 1;
+ return pPKey2->default_rc;
+}
+SQLITE_PRIVATE int sqlite3VdbeRecordCompare(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 0);
+}
+
+
+/*
+** This function is an optimized version of sqlite3VdbeRecordCompare()
+** that (a) the first field of pPKey2 is an integer, and (b) the
+** size-of-header varint at the start of (pKey1/nKey1) fits in a single
+** byte (i.e. is less than 128).
+**
+** To avoid concerns about buffer overreads, this routine is only used
+** on schemas where the maximum valid header size is 63 bytes or less.
+*/
+static int vdbeRecordCompareInt(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F];
+ int serial_type = ((const u8*)pKey1)[1];
+ int res;
+ u32 y;
+ u64 x;
+ i64 v;
+ i64 lhs;
+
+ vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
+ assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB );
+ switch( serial_type ){
+ case 1: { /* 1-byte signed integer */
+ lhs = ONE_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 2: { /* 2-byte signed integer */
+ lhs = TWO_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 3: { /* 3-byte signed integer */
+ lhs = THREE_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 4: { /* 4-byte signed integer */
+ y = FOUR_BYTE_UINT(aKey);
+ lhs = (i64)*(int*)&y;
+ testcase( lhs<0 );
+ break;
+ }
+ case 5: { /* 6-byte signed integer */
+ lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
+ testcase( lhs<0 );
+ break;
+ }
+ case 6: { /* 8-byte signed integer */
+ x = FOUR_BYTE_UINT(aKey);
+ x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
+ lhs = *(i64*)&x;
+ testcase( lhs<0 );
+ break;
+ }
+ case 8:
+ lhs = 0;
+ break;
+ case 9:
+ lhs = 1;
+ break;
+
+ /* This case could be removed without changing the results of running
+ ** this code. Including it causes gcc to generate a faster switch
+ ** statement (since the range of switch targets now starts at zero and
+ ** is contiguous) but does not cause any duplicate code to be generated
+ ** (as gcc is clever enough to combine the two like cases). Other
+ ** compilers might be similar. */
+ case 0: case 7:
+ return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
+
+ default:
+ return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
+ }
+
+ v = pPKey2->aMem[0].u.i;
+ if( v>lhs ){
+ res = pPKey2->r1;
+ }else if( v<lhs ){
+ res = pPKey2->r2;
+ }else if( pPKey2->nField>1 ){
+ /* The first fields of the two keys are equal. Compare the trailing
+ ** fields. */
+ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
+ }else{
+ /* The first fields of the two keys are equal and there are no trailing
+ ** fields. Return pPKey2->default_rc in this case. */
+ res = pPKey2->default_rc;
+ pPKey2->eqSeen = 1;
+ }
+
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) );
+ return res;
+}
+
+/*
+** This function is an optimized version of sqlite3VdbeRecordCompare()
+** that (a) the first field of pPKey2 is a string, that (b) the first field
+** uses the collation sequence BINARY and (c) that the size-of-header varint
+** at the start of (pKey1/nKey1) fits in a single byte.
+*/
+static int vdbeRecordCompareString(
+ int nKey1, const void *pKey1, /* Left key */
+ UnpackedRecord *pPKey2 /* Right key */
+){
+ const u8 *aKey1 = (const u8*)pKey1;
+ int serial_type;
+ int res;
+
+ assert( pPKey2->aMem[0].flags & MEM_Str );
+ vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
+ serial_type = (u8)(aKey1[1]);
+ if( serial_type >= 0x80 ){
+ sqlite3GetVarint32(&aKey1[1], (u32*)&serial_type);
+ }
+ if( serial_type<12 ){
+ res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */
+ }else if( !(serial_type & 0x01) ){
+ res = pPKey2->r2; /* (pKey1/nKey1) is a blob */
+ }else{
+ int nCmp;
+ int nStr;
+ int szHdr = aKey1[0];
+
+ nStr = (serial_type-12) / 2;
+ if( (szHdr + nStr) > nKey1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corruption */
+ }
+ nCmp = MIN( pPKey2->aMem[0].n, nStr );
+ res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp);
+
+ if( res>0 ){
+ res = pPKey2->r2;
+ }else if( res<0 ){
+ res = pPKey2->r1;
+ }else{
+ res = nStr - pPKey2->aMem[0].n;
+ if( res==0 ){
+ if( pPKey2->nField>1 ){
+ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
+ }else{
+ res = pPKey2->default_rc;
+ pPKey2->eqSeen = 1;
+ }
+ }else if( res>0 ){
+ res = pPKey2->r2;
+ }else{
+ res = pPKey2->r1;
+ }
+ }
+ }
+
+ assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res)
+ || CORRUPT_DB
+ || pPKey2->pKeyInfo->db->mallocFailed
+ );
+ return res;
+}
+
+/*
+** Return a pointer to an sqlite3VdbeRecordCompare() compatible function
+** suitable for comparing serialized records to the unpacked record passed
+** as the only argument.
+*/
+SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){
+ /* varintRecordCompareInt() and varintRecordCompareString() both assume
+ ** that the size-of-header varint that occurs at the start of each record
+ ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt()
+ ** also assumes that it is safe to overread a buffer by at least the
+ ** maximum possible legal header size plus 8 bytes. Because there is
+ ** guaranteed to be at least 74 (but not 136) bytes of padding following each
+ ** buffer passed to varintRecordCompareInt() this makes it convenient to
+ ** limit the size of the header to 64 bytes in cases where the first field
+ ** is an integer.
+ **
+ ** The easiest way to enforce this limit is to consider only records with
+ ** 13 fields or less. If the first field is an integer, the maximum legal
+ ** header size is (12*5 + 1 + 1) bytes. */
+ if( p->pKeyInfo->nAllField<=13 ){
+ int flags = p->aMem[0].flags;
+ if( p->pKeyInfo->aSortFlags[0] ){
+ if( p->pKeyInfo->aSortFlags[0] & KEYINFO_ORDER_BIGNULL ){
+ return sqlite3VdbeRecordCompare;
+ }
+ p->r1 = 1;
+ p->r2 = -1;
+ }else{
+ p->r1 = -1;
+ p->r2 = 1;
+ }
+ if( (flags & MEM_Int) ){
+ return vdbeRecordCompareInt;
+ }
+ testcase( flags & MEM_Real );
+ testcase( flags & MEM_Null );
+ testcase( flags & MEM_Blob );
+ if( (flags & (MEM_Real|MEM_IntReal|MEM_Null|MEM_Blob))==0
+ && p->pKeyInfo->aColl[0]==0
+ ){
+ assert( flags & MEM_Str );
+ return vdbeRecordCompareString;
+ }
+ }
+
+ return sqlite3VdbeRecordCompare;
+}
+
+/*
+** pCur points at an index entry created using the OP_MakeRecord opcode.
+** Read the rowid (the last field in the record) and store it in *rowid.
+** Return SQLITE_OK if everything works, or an error code otherwise.
+**
+** pCur might be pointing to text obtained from a corrupt database file.
+** So the content cannot be trusted. Do appropriate checks on the content.
+*/
+SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
+ i64 nCellKey = 0;
+ int rc;
+ u32 szHdr; /* Size of the header */
+ u32 typeRowid; /* Serial type of the rowid */
+ u32 lenRowid; /* Size of the rowid */
+ Mem m, v;
+
+ /* Get the size of the index entry. Only indices entries of less
+ ** than 2GiB are support - anything large must be database corruption.
+ ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so
+ ** this code can safely assume that nCellKey is 32-bits
+ */
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ nCellKey = sqlite3BtreePayloadSize(pCur);
+ assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey );
+
+ /* Read in the complete content of the index entry */
+ sqlite3VdbeMemInit(&m, db, 0);
+ rc = sqlite3VdbeMemFromBtreeZeroOffset(pCur, (u32)nCellKey, &m);
+ if( rc ){
+ return rc;
+ }
+
+ /* The index entry must begin with a header size */
+ getVarint32NR((u8*)m.z, szHdr);
+ testcase( szHdr==3 );
+ testcase( szHdr==m.n );
+ testcase( szHdr>0x7fffffff );
+ assert( m.n>=0 );
+ if( unlikely(szHdr<3 || szHdr>(unsigned)m.n) ){
+ goto idx_rowid_corruption;
+ }
+
+ /* The last field of the index should be an integer - the ROWID.
+ ** Verify that the last entry really is an integer. */
+ getVarint32NR((u8*)&m.z[szHdr-1], typeRowid);
+ testcase( typeRowid==1 );
+ testcase( typeRowid==2 );
+ testcase( typeRowid==3 );
+ testcase( typeRowid==4 );
+ testcase( typeRowid==5 );
+ testcase( typeRowid==6 );
+ testcase( typeRowid==8 );
+ testcase( typeRowid==9 );
+ if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){
+ goto idx_rowid_corruption;
+ }
+ lenRowid = sqlite3SmallTypeSizes[typeRowid];
+ testcase( (u32)m.n==szHdr+lenRowid );
+ if( unlikely((u32)m.n<szHdr+lenRowid) ){
+ goto idx_rowid_corruption;
+ }
+
+ /* Fetch the integer off the end of the index record */
+ sqlite3VdbeSerialGet((u8*)&m.z[m.n-lenRowid], typeRowid, &v);
+ *rowid = v.u.i;
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_OK;
+
+ /* Jump here if database corruption is detected after m has been
+ ** allocated. Free the m object and return SQLITE_CORRUPT. */
+idx_rowid_corruption:
+ testcase( m.szMalloc!=0 );
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_CORRUPT_BKPT;
+}
+
+/*
+** Compare the key of the index entry that cursor pC is pointing to against
+** the key string in pUnpacked. Write into *pRes a number
+** that is negative, zero, or positive if pC is less than, equal to,
+** or greater than pUnpacked. Return SQLITE_OK on success.
+**
+** pUnpacked is either created without a rowid or is truncated so that it
+** omits the rowid at the end. The rowid at the end of the index entry
+** is ignored as well. Hence, this routine only compares the prefixes
+** of the keys prior to the final rowid, not the entire key.
+*/
+SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
+ sqlite3 *db, /* Database connection */
+ VdbeCursor *pC, /* The cursor to compare against */
+ UnpackedRecord *pUnpacked, /* Unpacked version of key */
+ int *res /* Write the comparison result here */
+){
+ i64 nCellKey = 0;
+ int rc;
+ BtCursor *pCur;
+ Mem m;
+
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pCur = pC->uc.pCursor;
+ assert( sqlite3BtreeCursorIsValid(pCur) );
+ nCellKey = sqlite3BtreePayloadSize(pCur);
+ /* nCellKey will always be between 0 and 0xffffffff because of the way
+ ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */
+ if( nCellKey<=0 || nCellKey>0x7fffffff ){
+ *res = 0;
+ return SQLITE_CORRUPT_BKPT;
+ }
+ sqlite3VdbeMemInit(&m, db, 0);
+ rc = sqlite3VdbeMemFromBtreeZeroOffset(pCur, (u32)nCellKey, &m);
+ if( rc ){
+ return rc;
+ }
+ *res = sqlite3VdbeRecordCompareWithSkip(m.n, m.z, pUnpacked, 0);
+ sqlite3VdbeMemRelease(&m);
+ return SQLITE_OK;
+}
+
+/*
+** This routine sets the value to be returned by subsequent calls to
+** sqlite3_changes() on the database handle 'db'.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){
+ assert( sqlite3_mutex_held(db->mutex) );
+ db->nChange = nChange;
+ db->nTotalChange += nChange;
+}
+
+/*
+** Set a flag in the vdbe to update the change counter when it is finalised
+** or reset.
+*/
+SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){
+ v->changeCntOn = 1;
+}
+
+/*
+** Mark every prepared statement associated with a database connection
+** as expired.
+**
+** An expired statement means that recompilation of the statement is
+** recommend. Statements expire when things happen that make their
+** programs obsolete. Removing user-defined functions or collating
+** sequences, or changing an authorization function are the types of
+** things that make prepared statements obsolete.
+**
+** If iCode is 1, then expiration is advisory. The statement should
+** be reprepared before being restarted, but if it is already running
+** it is allowed to run to completion.
+**
+** Internally, this function just sets the Vdbe.expired flag on all
+** prepared statements. The flag is set to 1 for an immediate expiration
+** and set to 2 for an advisory expiration.
+*/
+SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db, int iCode){
+ Vdbe *p;
+ for(p = db->pVdbe; p; p=p->pNext){
+ p->expired = iCode+1;
+ }
+}
+
+/*
+** Return the database associated with the Vdbe.
+*/
+SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe *v){
+ return v->db;
+}
+
+/*
+** Return the SQLITE_PREPARE flags for a Vdbe.
+*/
+SQLITE_PRIVATE u8 sqlite3VdbePrepareFlags(Vdbe *v){
+ return v->prepFlags;
+}
+
+/*
+** Return a pointer to an sqlite3_value structure containing the value bound
+** parameter iVar of VM v. Except, if the value is an SQL NULL, return
+** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_*
+** constants) to the value before returning it.
+**
+** The returned value must be freed by the caller using sqlite3ValueFree().
+*/
+SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff){
+ assert( iVar>0 );
+ if( v ){
+ Mem *pMem = &v->aVar[iVar-1];
+ assert( (v->db->flags & SQLITE_EnableQPSG)==0 );
+ if( 0==(pMem->flags & MEM_Null) ){
+ sqlite3_value *pRet = sqlite3ValueNew(v->db);
+ if( pRet ){
+ sqlite3VdbeMemCopy((Mem *)pRet, pMem);
+ sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8);
+ }
+ return pRet;
+ }
+ }
+ return 0;
+}
+
+/*
+** Configure SQL variable iVar so that binding a new value to it signals
+** to sqlite3_reoptimize() that re-preparing the statement may result
+** in a better query plan.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){
+ assert( iVar>0 );
+ assert( (v->db->flags & SQLITE_EnableQPSG)==0 );
+ if( iVar>=32 ){
+ v->expmask |= 0x80000000;
+ }else{
+ v->expmask |= ((u32)1 << (iVar-1));
+ }
+}
+
+/*
+** Cause a function to throw an error if it was call from OP_PureFunc
+** rather than OP_Function.
+**
+** OP_PureFunc means that the function must be deterministic, and should
+** throw an error if it is given inputs that would make it non-deterministic.
+** This routine is invoked by date/time functions that use non-deterministic
+** features such as 'now'.
+*/
+SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context *pCtx){
+ const VdbeOp *pOp;
+#ifdef SQLITE_ENABLE_STAT4
+ if( pCtx->pVdbe==0 ) return 1;
+#endif
+ pOp = pCtx->pVdbe->aOp + pCtx->iOp;
+ if( pOp->opcode==OP_PureFunc ){
+ const char *zContext;
+ char *zMsg;
+ if( pOp->p5 & NC_IsCheck ){
+ zContext = "a CHECK constraint";
+ }else if( pOp->p5 & NC_GenCol ){
+ zContext = "a generated column";
+ }else{
+ zContext = "an index";
+ }
+ zMsg = sqlite3_mprintf("non-deterministic use of %s() in %s",
+ pCtx->pFunc->zName, zContext);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+ return 0;
+ }
+ return 1;
+}
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored
+** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored
+** in memory obtained from sqlite3DbMalloc).
+*/
+SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
+ if( pVtab->zErrMsg ){
+ sqlite3 *db = p->db;
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = sqlite3DbStrDup(db, pVtab->zErrMsg);
+ sqlite3_free(pVtab->zErrMsg);
+ pVtab->zErrMsg = 0;
+ }
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+
+/*
+** If the second argument is not NULL, release any allocations associated
+** with the memory cells in the p->aMem[] array. Also free the UnpackedRecord
+** structure itself, using sqlite3DbFree().
+**
+** This function is used to free UnpackedRecord structures allocated by
+** the vdbeUnpackRecord() function found in vdbeapi.c.
+*/
+static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){
+ if( p ){
+ int i;
+ for(i=0; i<nField; i++){
+ Mem *pMem = &p->aMem[i];
+ if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem);
+ }
+ sqlite3DbFreeNN(db, p);
+ }
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** Invoke the pre-update hook. If this is an UPDATE or DELETE pre-update call,
+** then cursor passed as the second argument should point to the row about
+** to be update or deleted. If the application calls sqlite3_preupdate_old(),
+** the required value will be read from the row the cursor points to.
+*/
+SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
+ Vdbe *v, /* Vdbe pre-update hook is invoked by */
+ VdbeCursor *pCsr, /* Cursor to grab old.* values from */
+ int op, /* SQLITE_INSERT, UPDATE or DELETE */
+ const char *zDb, /* Database name */
+ Table *pTab, /* Modified table */
+ i64 iKey1, /* Initial key value */
+ int iReg /* Register for new.* record */
+){
+ sqlite3 *db = v->db;
+ i64 iKey2;
+ PreUpdate preupdate;
+ const char *zTbl = pTab->zName;
+ static const u8 fakeSortOrder = 0;
+
+ assert( db->pPreUpdate==0 );
+ memset(&preupdate, 0, sizeof(PreUpdate));
+ if( HasRowid(pTab)==0 ){
+ iKey1 = iKey2 = 0;
+ preupdate.pPk = sqlite3PrimaryKeyIndex(pTab);
+ }else{
+ if( op==SQLITE_UPDATE ){
+ iKey2 = v->aMem[iReg].u.i;
+ }else{
+ iKey2 = iKey1;
+ }
+ }
+
+ assert( pCsr->nField==pTab->nCol
+ || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1)
+ );
+
+ preupdate.v = v;
+ preupdate.pCsr = pCsr;
+ preupdate.op = op;
+ preupdate.iNewReg = iReg;
+ preupdate.keyinfo.db = db;
+ preupdate.keyinfo.enc = ENC(db);
+ preupdate.keyinfo.nKeyField = pTab->nCol;
+ preupdate.keyinfo.aSortFlags = (u8*)&fakeSortOrder;
+ preupdate.iKey1 = iKey1;
+ preupdate.iKey2 = iKey2;
+ preupdate.pTab = pTab;
+
+ db->pPreUpdate = &preupdate;
+ db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2);
+ db->pPreUpdate = 0;
+ sqlite3DbFree(db, preupdate.aRecord);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pUnpacked);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pNewUnpacked);
+ if( preupdate.aNew ){
+ int i;
+ for(i=0; i<pCsr->nField; i++){
+ sqlite3VdbeMemRelease(&preupdate.aNew[i]);
+ }
+ sqlite3DbFreeNN(db, preupdate.aNew);
+ }
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+/************** End of vdbeaux.c *********************************************/
+/************** Begin file vdbeapi.c *****************************************/
+/*
+** 2004 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code use to implement APIs that are part of the
+** VDBE.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Return TRUE (non-zero) of the statement supplied as an argument needs
+** to be recompiled. A statement needs to be recompiled whenever the
+** execution environment changes in a way that would alter the program
+** that sqlite3_prepare() generates. For example, if new functions or
+** collating sequences are registered or if an authorizer function is
+** added or changed.
+*/
+SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ return p==0 || p->expired;
+}
+#endif
+
+/*
+** Check on a Vdbe to make sure it has not been finalized. Log
+** an error and return true if it has been finalized (or is otherwise
+** invalid). Return false if it is ok.
+*/
+static int vdbeSafety(Vdbe *p){
+ if( p->db==0 ){
+ sqlite3_log(SQLITE_MISUSE, "API called with finalized prepared statement");
+ return 1;
+ }else{
+ return 0;
+ }
+}
+static int vdbeSafetyNotNull(Vdbe *p){
+ if( p==0 ){
+ sqlite3_log(SQLITE_MISUSE, "API called with NULL prepared statement");
+ return 1;
+ }else{
+ return vdbeSafety(p);
+ }
+}
+
+#ifndef SQLITE_OMIT_TRACE
+/*
+** Invoke the profile callback. This routine is only called if we already
+** know that the profile callback is defined and needs to be invoked.
+*/
+static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){
+ sqlite3_int64 iNow;
+ sqlite3_int64 iElapse;
+ assert( p->startTime>0 );
+ assert( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0 );
+ assert( db->init.busy==0 );
+ assert( p->zSql!=0 );
+ sqlite3OsCurrentTimeInt64(db->pVfs, &iNow);
+ iElapse = (iNow - p->startTime)*1000000;
+#ifndef SQLITE_OMIT_DEPRECATED
+ if( db->xProfile ){
+ db->xProfile(db->pProfileArg, p->zSql, iElapse);
+ }
+#endif
+ if( db->mTrace & SQLITE_TRACE_PROFILE ){
+ db->trace.xV2(SQLITE_TRACE_PROFILE, db->pTraceArg, p, (void*)&iElapse);
+ }
+ p->startTime = 0;
+}
+/*
+** The checkProfileCallback(DB,P) macro checks to see if a profile callback
+** is needed, and it invokes the callback if it is needed.
+*/
+# define checkProfileCallback(DB,P) \
+ if( ((P)->startTime)>0 ){ invokeProfileCallback(DB,P); }
+#else
+# define checkProfileCallback(DB,P) /*no-op*/
+#endif
+
+/*
+** The following routine destroys a virtual machine that is created by
+** the sqlite3_compile() routine. The integer returned is an SQLITE_
+** success/failure code that describes the result of executing the virtual
+** machine.
+**
+** This routine sets the error code and string returned by
+** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
+*/
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){
+ int rc;
+ if( pStmt==0 ){
+ /* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL
+ ** pointer is a harmless no-op. */
+ rc = SQLITE_OK;
+ }else{
+ Vdbe *v = (Vdbe*)pStmt;
+ sqlite3 *db = v->db;
+ if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT;
+ sqlite3_mutex_enter(db->mutex);
+ checkProfileCallback(db, v);
+ rc = sqlite3VdbeFinalize(v);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3LeaveMutexAndCloseZombie(db);
+ }
+ return rc;
+}
+
+/*
+** Terminate the current execution of an SQL statement and reset it
+** back to its starting state so that it can be reused. A success code from
+** the prior execution is returned.
+**
+** This routine sets the error code and string returned by
+** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
+*/
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){
+ int rc;
+ if( pStmt==0 ){
+ rc = SQLITE_OK;
+ }else{
+ Vdbe *v = (Vdbe*)pStmt;
+ sqlite3 *db = v->db;
+ sqlite3_mutex_enter(db->mutex);
+ checkProfileCallback(db, v);
+ rc = sqlite3VdbeReset(v);
+ sqlite3VdbeRewind(v);
+ assert( (rc & (db->errMask))==rc );
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ }
+ return rc;
+}
+
+/*
+** Set all the parameters in the compiled SQL statement to NULL.
+*/
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
+ int i;
+ int rc = SQLITE_OK;
+ Vdbe *p = (Vdbe*)pStmt;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex;
+#endif
+ sqlite3_mutex_enter(mutex);
+ for(i=0; i<p->nVar; i++){
+ sqlite3VdbeMemRelease(&p->aVar[i]);
+ p->aVar[i].flags = MEM_Null;
+ }
+ assert( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 || p->expmask==0 );
+ if( p->expmask ){
+ p->expired = 1;
+ }
+ sqlite3_mutex_leave(mutex);
+ return rc;
+}
+
+
+/**************************** sqlite3_value_ *******************************
+** The following routines extract information from a Mem or sqlite3_value
+** structure.
+*/
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){
+ Mem *p = (Mem*)pVal;
+ if( p->flags & (MEM_Blob|MEM_Str) ){
+ if( ExpandBlob(p)!=SQLITE_OK ){
+ assert( p->flags==MEM_Null && p->z==0 );
+ return 0;
+ }
+ p->flags |= MEM_Blob;
+ return p->n ? p->z : 0;
+ }else{
+ return sqlite3_value_text(pVal);
+ }
+}
+SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){
+ return sqlite3ValueBytes(pVal, SQLITE_UTF8);
+}
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){
+ return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE);
+}
+SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){
+ return sqlite3VdbeRealValue((Mem*)pVal);
+}
+SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){
+ return (int)sqlite3VdbeIntValue((Mem*)pVal);
+}
+SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){
+ return sqlite3VdbeIntValue((Mem*)pVal);
+}
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value *pVal){
+ Mem *pMem = (Mem*)pVal;
+ return ((pMem->flags & MEM_Subtype) ? pMem->eSubtype : 0);
+}
+SQLITE_API void *sqlite3_value_pointer(sqlite3_value *pVal, const char *zPType){
+ Mem *p = (Mem*)pVal;
+ if( (p->flags&(MEM_TypeMask|MEM_Term|MEM_Subtype)) ==
+ (MEM_Null|MEM_Term|MEM_Subtype)
+ && zPType!=0
+ && p->eSubtype=='p'
+ && strcmp(p->u.zPType, zPType)==0
+ ){
+ return (void*)p->z;
+ }else{
+ return 0;
+ }
+}
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){
+ return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE);
+}
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16BE);
+}
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){
+ return sqlite3ValueText(pVal, SQLITE_UTF16LE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+/* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five
+** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating
+** point number string BLOB NULL
+*/
+SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){
+ static const u8 aType[] = {
+ SQLITE_BLOB, /* 0x00 (not possible) */
+ SQLITE_NULL, /* 0x01 NULL */
+ SQLITE_TEXT, /* 0x02 TEXT */
+ SQLITE_NULL, /* 0x03 (not possible) */
+ SQLITE_INTEGER, /* 0x04 INTEGER */
+ SQLITE_NULL, /* 0x05 (not possible) */
+ SQLITE_INTEGER, /* 0x06 INTEGER + TEXT */
+ SQLITE_NULL, /* 0x07 (not possible) */
+ SQLITE_FLOAT, /* 0x08 FLOAT */
+ SQLITE_NULL, /* 0x09 (not possible) */
+ SQLITE_FLOAT, /* 0x0a FLOAT + TEXT */
+ SQLITE_NULL, /* 0x0b (not possible) */
+ SQLITE_INTEGER, /* 0x0c (not possible) */
+ SQLITE_NULL, /* 0x0d (not possible) */
+ SQLITE_INTEGER, /* 0x0e (not possible) */
+ SQLITE_NULL, /* 0x0f (not possible) */
+ SQLITE_BLOB, /* 0x10 BLOB */
+ SQLITE_NULL, /* 0x11 (not possible) */
+ SQLITE_TEXT, /* 0x12 (not possible) */
+ SQLITE_NULL, /* 0x13 (not possible) */
+ SQLITE_INTEGER, /* 0x14 INTEGER + BLOB */
+ SQLITE_NULL, /* 0x15 (not possible) */
+ SQLITE_INTEGER, /* 0x16 (not possible) */
+ SQLITE_NULL, /* 0x17 (not possible) */
+ SQLITE_FLOAT, /* 0x18 FLOAT + BLOB */
+ SQLITE_NULL, /* 0x19 (not possible) */
+ SQLITE_FLOAT, /* 0x1a (not possible) */
+ SQLITE_NULL, /* 0x1b (not possible) */
+ SQLITE_INTEGER, /* 0x1c (not possible) */
+ SQLITE_NULL, /* 0x1d (not possible) */
+ SQLITE_INTEGER, /* 0x1e (not possible) */
+ SQLITE_NULL, /* 0x1f (not possible) */
+ SQLITE_FLOAT, /* 0x20 INTREAL */
+ SQLITE_NULL, /* 0x21 (not possible) */
+ SQLITE_TEXT, /* 0x22 INTREAL + TEXT */
+ SQLITE_NULL, /* 0x23 (not possible) */
+ SQLITE_FLOAT, /* 0x24 (not possible) */
+ SQLITE_NULL, /* 0x25 (not possible) */
+ SQLITE_FLOAT, /* 0x26 (not possible) */
+ SQLITE_NULL, /* 0x27 (not possible) */
+ SQLITE_FLOAT, /* 0x28 (not possible) */
+ SQLITE_NULL, /* 0x29 (not possible) */
+ SQLITE_FLOAT, /* 0x2a (not possible) */
+ SQLITE_NULL, /* 0x2b (not possible) */
+ SQLITE_FLOAT, /* 0x2c (not possible) */
+ SQLITE_NULL, /* 0x2d (not possible) */
+ SQLITE_FLOAT, /* 0x2e (not possible) */
+ SQLITE_NULL, /* 0x2f (not possible) */
+ SQLITE_BLOB, /* 0x30 (not possible) */
+ SQLITE_NULL, /* 0x31 (not possible) */
+ SQLITE_TEXT, /* 0x32 (not possible) */
+ SQLITE_NULL, /* 0x33 (not possible) */
+ SQLITE_FLOAT, /* 0x34 (not possible) */
+ SQLITE_NULL, /* 0x35 (not possible) */
+ SQLITE_FLOAT, /* 0x36 (not possible) */
+ SQLITE_NULL, /* 0x37 (not possible) */
+ SQLITE_FLOAT, /* 0x38 (not possible) */
+ SQLITE_NULL, /* 0x39 (not possible) */
+ SQLITE_FLOAT, /* 0x3a (not possible) */
+ SQLITE_NULL, /* 0x3b (not possible) */
+ SQLITE_FLOAT, /* 0x3c (not possible) */
+ SQLITE_NULL, /* 0x3d (not possible) */
+ SQLITE_FLOAT, /* 0x3e (not possible) */
+ SQLITE_NULL, /* 0x3f (not possible) */
+ };
+#ifdef SQLITE_DEBUG
+ {
+ int eType = SQLITE_BLOB;
+ if( pVal->flags & MEM_Null ){
+ eType = SQLITE_NULL;
+ }else if( pVal->flags & (MEM_Real|MEM_IntReal) ){
+ eType = SQLITE_FLOAT;
+ }else if( pVal->flags & MEM_Int ){
+ eType = SQLITE_INTEGER;
+ }else if( pVal->flags & MEM_Str ){
+ eType = SQLITE_TEXT;
+ }
+ assert( eType == aType[pVal->flags&MEM_AffMask] );
+ }
+#endif
+ return aType[pVal->flags&MEM_AffMask];
+}
+
+/* Return true if a parameter to xUpdate represents an unchanged column */
+SQLITE_API int sqlite3_value_nochange(sqlite3_value *pVal){
+ return (pVal->flags&(MEM_Null|MEM_Zero))==(MEM_Null|MEM_Zero);
+}
+
+/* Return true if a parameter value originated from an sqlite3_bind() */
+SQLITE_API int sqlite3_value_frombind(sqlite3_value *pVal){
+ return (pVal->flags&MEM_FromBind)!=0;
+}
+
+/* Make a copy of an sqlite3_value object
+*/
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value *pOrig){
+ sqlite3_value *pNew;
+ if( pOrig==0 ) return 0;
+ pNew = sqlite3_malloc( sizeof(*pNew) );
+ if( pNew==0 ) return 0;
+ memset(pNew, 0, sizeof(*pNew));
+ memcpy(pNew, pOrig, MEMCELLSIZE);
+ pNew->flags &= ~MEM_Dyn;
+ pNew->db = 0;
+ if( pNew->flags&(MEM_Str|MEM_Blob) ){
+ pNew->flags &= ~(MEM_Static|MEM_Dyn);
+ pNew->flags |= MEM_Ephem;
+ if( sqlite3VdbeMemMakeWriteable(pNew)!=SQLITE_OK ){
+ sqlite3ValueFree(pNew);
+ pNew = 0;
+ }
+ }
+ return pNew;
+}
+
+/* Destroy an sqlite3_value object previously obtained from
+** sqlite3_value_dup().
+*/
+SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){
+ sqlite3ValueFree(pOld);
+}
+
+
+/**************************** sqlite3_result_ *******************************
+** The following routines are used by user-defined functions to specify
+** the function result.
+**
+** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the
+** result as a string or blob but if the string or blob is too large, it
+** then sets the error code to SQLITE_TOOBIG
+**
+** The invokeValueDestructor(P,X) routine invokes destructor function X()
+** on value P is not going to be used and need to be destroyed.
+*/
+static void setResultStrOrError(
+ sqlite3_context *pCtx, /* Function context */
+ const char *z, /* String pointer */
+ int n, /* Bytes in string, or negative */
+ u8 enc, /* Encoding of z. 0 for BLOBs */
+ void (*xDel)(void*) /* Destructor function */
+){
+ if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){
+ sqlite3_result_error_toobig(pCtx);
+ }
+}
+static int invokeValueDestructor(
+ const void *p, /* Value to destroy */
+ void (*xDel)(void*), /* The destructor */
+ sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( xDel==0 ){
+ /* noop */
+ }else if( xDel==SQLITE_TRANSIENT ){
+ /* noop */
+ }else{
+ xDel((void*)p);
+ }
+ if( pCtx ) sqlite3_result_error_toobig(pCtx);
+ return SQLITE_TOOBIG;
+}
+SQLITE_API void sqlite3_result_blob(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( n>=0 );
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, 0, xDel);
+}
+SQLITE_API void sqlite3_result_blob64(
+ sqlite3_context *pCtx,
+ const void *z,
+ sqlite3_uint64 n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( n>0x7fffffff ){
+ (void)invokeValueDestructor(z, xDel, pCtx);
+ }else{
+ setResultStrOrError(pCtx, z, (int)n, 0, xDel);
+ }
+}
+SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
+}
+SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_ERROR;
+ sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_ERROR;
+ sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
+}
+#endif
+SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
+}
+SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
+}
+SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetNull(pCtx->pOut);
+}
+SQLITE_API void sqlite3_result_pointer(
+ sqlite3_context *pCtx,
+ void *pPtr,
+ const char *zPType,
+ void (*xDestructor)(void*)
+){
+ Mem *pOut = pCtx->pOut;
+ assert( sqlite3_mutex_held(pOut->db->mutex) );
+ sqlite3VdbeMemRelease(pOut);
+ pOut->flags = MEM_Null;
+ sqlite3VdbeMemSetPointer(pOut, pPtr, zPType, xDestructor);
+}
+SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
+ Mem *pOut = pCtx->pOut;
+ assert( sqlite3_mutex_held(pOut->db->mutex) );
+ pOut->eSubtype = eSubtype & 0xff;
+ pOut->flags |= MEM_Subtype;
+}
+SQLITE_API void sqlite3_result_text(
+ sqlite3_context *pCtx,
+ const char *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
+}
+SQLITE_API void sqlite3_result_text64(
+ sqlite3_context *pCtx,
+ const char *z,
+ sqlite3_uint64 n,
+ void (*xDel)(void *),
+ unsigned char enc
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ if( n>0x7fffffff ){
+ (void)invokeValueDestructor(z, xDel, pCtx);
+ }else{
+ setResultStrOrError(pCtx, z, (int)n, enc, xDel);
+ }
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API void sqlite3_result_text16(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel);
+}
+SQLITE_API void sqlite3_result_text16be(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel);
+}
+SQLITE_API void sqlite3_result_text16le(
+ sqlite3_context *pCtx,
+ const void *z,
+ int n,
+ void (*xDel)(void *)
+){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemCopy(pCtx->pOut, pValue);
+}
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n);
+}
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
+ Mem *pOut = pCtx->pOut;
+ assert( sqlite3_mutex_held(pOut->db->mutex) );
+ if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ return SQLITE_TOOBIG;
+ }
+ sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n);
+ return SQLITE_OK;
+}
+SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+ pCtx->isError = errCode ? errCode : -1;
+#ifdef SQLITE_DEBUG
+ if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
+#endif
+ if( pCtx->pOut->flags & MEM_Null ){
+ sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1,
+ SQLITE_UTF8, SQLITE_STATIC);
+ }
+}
+
+/* Force an SQLITE_TOOBIG error. */
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ pCtx->isError = SQLITE_TOOBIG;
+ sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,
+ SQLITE_UTF8, SQLITE_STATIC);
+}
+
+/* An SQLITE_NOMEM error. */
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ sqlite3VdbeMemSetNull(pCtx->pOut);
+ pCtx->isError = SQLITE_NOMEM_BKPT;
+ sqlite3OomFault(pCtx->pOut->db);
+}
+
+#ifndef SQLITE_UNTESTABLE
+/* Force the INT64 value currently stored as the result to be
+** a MEM_IntReal value. See the SQLITE_TESTCTRL_RESULT_INTREAL
+** test-control.
+*/
+SQLITE_PRIVATE void sqlite3ResultIntReal(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ if( pCtx->pOut->flags & MEM_Int ){
+ pCtx->pOut->flags &= ~MEM_Int;
+ pCtx->pOut->flags |= MEM_IntReal;
+ }
+}
+#endif
+
+
+/*
+** This function is called after a transaction has been committed. It
+** invokes callbacks registered with sqlite3_wal_hook() as required.
+*/
+static int doWalCallbacks(sqlite3 *db){
+ int rc = SQLITE_OK;
+#ifndef SQLITE_OMIT_WAL
+ int i;
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ int nEntry;
+ sqlite3BtreeEnter(pBt);
+ nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt));
+ sqlite3BtreeLeave(pBt);
+ if( nEntry>0 && db->xWalCallback && rc==SQLITE_OK ){
+ rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zDbSName, nEntry);
+ }
+ }
+ }
+#endif
+ return rc;
+}
+
+
+/*
+** Execute the statement pStmt, either until a row of data is ready, the
+** statement is completely executed or an error occurs.
+**
+** This routine implements the bulk of the logic behind the sqlite_step()
+** API. The only thing omitted is the automatic recompile if a
+** schema change has occurred. That detail is handled by the
+** outer sqlite3_step() wrapper procedure.
+*/
+static int sqlite3Step(Vdbe *p){
+ sqlite3 *db;
+ int rc;
+
+ assert(p);
+ if( p->magic!=VDBE_MAGIC_RUN ){
+ /* We used to require that sqlite3_reset() be called before retrying
+ ** sqlite3_step() after any error or after SQLITE_DONE. But beginning
+ ** with version 3.7.0, we changed this so that sqlite3_reset() would
+ ** be called automatically instead of throwing the SQLITE_MISUSE error.
+ ** This "automatic-reset" change is not technically an incompatibility,
+ ** since any application that receives an SQLITE_MISUSE is broken by
+ ** definition.
+ **
+ ** Nevertheless, some published applications that were originally written
+ ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE
+ ** returns, and those were broken by the automatic-reset change. As a
+ ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the
+ ** legacy behavior of returning SQLITE_MISUSE for cases where the
+ ** previous sqlite3_step() returned something other than a SQLITE_LOCKED
+ ** or SQLITE_BUSY error.
+ */
+#ifdef SQLITE_OMIT_AUTORESET
+ if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){
+ sqlite3_reset((sqlite3_stmt*)p);
+ }else{
+ return SQLITE_MISUSE_BKPT;
+ }
+#else
+ sqlite3_reset((sqlite3_stmt*)p);
+#endif
+ }
+
+ /* Check that malloc() has not failed. If it has, return early. */
+ db = p->db;
+ if( db->mallocFailed ){
+ p->rc = SQLITE_NOMEM;
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ if( p->pc<0 && p->expired ){
+ p->rc = SQLITE_SCHEMA;
+ rc = SQLITE_ERROR;
+ if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 ){
+ /* If this statement was prepared using saved SQL and an
+ ** error has occurred, then return the error code in p->rc to the
+ ** caller. Set the error code in the database handle to the same value.
+ */
+ rc = sqlite3VdbeTransferError(p);
+ }
+ goto end_of_step;
+ }
+ if( p->pc<0 ){
+ /* If there are no other statements currently running, then
+ ** reset the interrupt flag. This prevents a call to sqlite3_interrupt
+ ** from interrupting a statement that has not yet started.
+ */
+ if( db->nVdbeActive==0 ){
+ AtomicStore(&db->u1.isInterrupted, 0);
+ }
+
+ assert( db->nVdbeWrite>0 || db->autoCommit==0
+ || (db->nDeferredCons==0 && db->nDeferredImmCons==0)
+ );
+
+#ifndef SQLITE_OMIT_TRACE
+ if( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0
+ && !db->init.busy && p->zSql ){
+ sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime);
+ }else{
+ assert( p->startTime==0 );
+ }
+#endif
+
+ db->nVdbeActive++;
+ if( p->readOnly==0 ) db->nVdbeWrite++;
+ if( p->bIsReader ) db->nVdbeRead++;
+ p->pc = 0;
+ }
+#ifdef SQLITE_DEBUG
+ p->rcApp = SQLITE_OK;
+#endif
+#ifndef SQLITE_OMIT_EXPLAIN
+ if( p->explain ){
+ rc = sqlite3VdbeList(p);
+ }else
+#endif /* SQLITE_OMIT_EXPLAIN */
+ {
+ db->nVdbeExec++;
+ rc = sqlite3VdbeExec(p);
+ db->nVdbeExec--;
+ }
+
+ if( rc!=SQLITE_ROW ){
+#ifndef SQLITE_OMIT_TRACE
+ /* If the statement completed successfully, invoke the profile callback */
+ checkProfileCallback(db, p);
+#endif
+
+ if( rc==SQLITE_DONE && db->autoCommit ){
+ assert( p->rc==SQLITE_OK );
+ p->rc = doWalCallbacks(db);
+ if( p->rc!=SQLITE_OK ){
+ rc = SQLITE_ERROR;
+ }
+ }else if( rc!=SQLITE_DONE && (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 ){
+ /* If this statement was prepared using saved SQL and an
+ ** error has occurred, then return the error code in p->rc to the
+ ** caller. Set the error code in the database handle to the same value.
+ */
+ rc = sqlite3VdbeTransferError(p);
+ }
+ }
+
+ db->errCode = rc;
+ if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){
+ p->rc = SQLITE_NOMEM_BKPT;
+ if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 ) rc = p->rc;
+ }
+end_of_step:
+ /* There are only a limited number of result codes allowed from the
+ ** statements prepared using the legacy sqlite3_prepare() interface */
+ assert( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0
+ || rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR
+ || (rc&0xff)==SQLITE_BUSY || rc==SQLITE_MISUSE
+ );
+ return (rc&db->errMask);
+}
+
+/*
+** This is the top-level implementation of sqlite3_step(). Call
+** sqlite3Step() to do most of the work. If a schema error occurs,
+** call sqlite3Reprepare() and try again.
+*/
+SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
+ int rc = SQLITE_OK; /* Result from sqlite3Step() */
+ Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */
+ int cnt = 0; /* Counter to prevent infinite loop of reprepares */
+ sqlite3 *db; /* The database connection */
+
+ if( vdbeSafetyNotNull(v) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ db = v->db;
+ sqlite3_mutex_enter(db->mutex);
+ v->doingRerun = 0;
+ while( (rc = sqlite3Step(v))==SQLITE_SCHEMA
+ && cnt++ < SQLITE_MAX_SCHEMA_RETRY ){
+ int savedPc = v->pc;
+ rc = sqlite3Reprepare(v);
+ if( rc!=SQLITE_OK ){
+ /* This case occurs after failing to recompile an sql statement.
+ ** The error message from the SQL compiler has already been loaded
+ ** into the database handle. This block copies the error message
+ ** from the database handle into the statement and sets the statement
+ ** program counter to 0 to ensure that when the statement is
+ ** finalized or reset the parser error message is available via
+ ** sqlite3_errmsg() and sqlite3_errcode().
+ */
+ const char *zErr = (const char *)sqlite3_value_text(db->pErr);
+ sqlite3DbFree(db, v->zErrMsg);
+ if( !db->mallocFailed ){
+ v->zErrMsg = sqlite3DbStrDup(db, zErr);
+ v->rc = rc = sqlite3ApiExit(db, rc);
+ } else {
+ v->zErrMsg = 0;
+ v->rc = rc = SQLITE_NOMEM_BKPT;
+ }
+ break;
+ }
+ sqlite3_reset(pStmt);
+ if( savedPc>=0 ) v->doingRerun = 1;
+ assert( v->expired==0 );
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+
+/*
+** Extract the user data from a sqlite3_context structure and return a
+** pointer to it.
+*/
+SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
+ assert( p && p->pFunc );
+ return p->pFunc->pUserData;
+}
+
+/*
+** Extract the user data from a sqlite3_context structure and return a
+** pointer to it.
+**
+** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface
+** returns a copy of the pointer to the database connection (the 1st
+** parameter) of the sqlite3_create_function() and
+** sqlite3_create_function16() routines that originally registered the
+** application defined function.
+*/
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
+ assert( p && p->pOut );
+ return p->pOut->db;
+}
+
+/*
+** If this routine is invoked from within an xColumn method of a virtual
+** table, then it returns true if and only if the the call is during an
+** UPDATE operation and the value of the column will not be modified
+** by the UPDATE.
+**
+** If this routine is called from any context other than within the
+** xColumn method of a virtual table, then the return value is meaningless
+** and arbitrary.
+**
+** Virtual table implements might use this routine to optimize their
+** performance by substituting a NULL result, or some other light-weight
+** value, as a signal to the xUpdate routine that the column is unchanged.
+*/
+SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
+ assert( p );
+ return sqlite3_value_nochange(p->pOut);
+}
+
+/*
+** Return the current time for a statement. If the current time
+** is requested more than once within the same run of a single prepared
+** statement, the exact same time is returned for each invocation regardless
+** of the amount of time that elapses between invocations. In other words,
+** the time returned is always the time of the first call.
+*/
+SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){
+ int rc;
+#ifndef SQLITE_ENABLE_STAT4
+ sqlite3_int64 *piTime = &p->pVdbe->iCurrentTime;
+ assert( p->pVdbe!=0 );
+#else
+ sqlite3_int64 iTime = 0;
+ sqlite3_int64 *piTime = p->pVdbe!=0 ? &p->pVdbe->iCurrentTime : &iTime;
+#endif
+ if( *piTime==0 ){
+ rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, piTime);
+ if( rc ) *piTime = 0;
+ }
+ return *piTime;
+}
+
+/*
+** Create a new aggregate context for p and return a pointer to
+** its pMem->z element.
+*/
+static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){
+ Mem *pMem = p->pMem;
+ assert( (pMem->flags & MEM_Agg)==0 );
+ if( nByte<=0 ){
+ sqlite3VdbeMemSetNull(pMem);
+ pMem->z = 0;
+ }else{
+ sqlite3VdbeMemClearAndResize(pMem, nByte);
+ pMem->flags = MEM_Agg;
+ pMem->u.pDef = p->pFunc;
+ if( pMem->z ){
+ memset(pMem->z, 0, nByte);
+ }
+ }
+ return (void*)pMem->z;
+}
+
+/*
+** Allocate or return the aggregate context for a user function. A new
+** context is allocated on the first call. Subsequent calls return the
+** same context that was returned on prior calls.
+*/
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
+ assert( p && p->pFunc && p->pFunc->xFinalize );
+ assert( sqlite3_mutex_held(p->pOut->db->mutex) );
+ testcase( nByte<0 );
+ if( (p->pMem->flags & MEM_Agg)==0 ){
+ return createAggContext(p, nByte);
+ }else{
+ return (void*)p->pMem->z;
+ }
+}
+
+/*
+** Return the auxiliary data pointer, if any, for the iArg'th argument to
+** the user-function defined by pCtx.
+**
+** The left-most argument is 0.
+**
+** Undocumented behavior: If iArg is negative then access a cache of
+** auxiliary data pointers that is available to all functions within a
+** single prepared statement. The iArg values must match.
+*/
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
+ AuxData *pAuxData;
+
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+#if SQLITE_ENABLE_STAT4
+ if( pCtx->pVdbe==0 ) return 0;
+#else
+ assert( pCtx->pVdbe!=0 );
+#endif
+ for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNextAux){
+ if( pAuxData->iAuxArg==iArg && (pAuxData->iAuxOp==pCtx->iOp || iArg<0) ){
+ return pAuxData->pAux;
+ }
+ }
+ return 0;
+}
+
+/*
+** Set the auxiliary data pointer and delete function, for the iArg'th
+** argument to the user-function defined by pCtx. Any previous value is
+** deleted by calling the delete function specified when it was set.
+**
+** The left-most argument is 0.
+**
+** Undocumented behavior: If iArg is negative then make the data available
+** to all functions within the current prepared statement using iArg as an
+** access code.
+*/
+SQLITE_API void sqlite3_set_auxdata(
+ sqlite3_context *pCtx,
+ int iArg,
+ void *pAux,
+ void (*xDelete)(void*)
+){
+ AuxData *pAuxData;
+ Vdbe *pVdbe = pCtx->pVdbe;
+
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+#ifdef SQLITE_ENABLE_STAT4
+ if( pVdbe==0 ) goto failed;
+#else
+ assert( pVdbe!=0 );
+#endif
+
+ for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNextAux){
+ if( pAuxData->iAuxArg==iArg && (pAuxData->iAuxOp==pCtx->iOp || iArg<0) ){
+ break;
+ }
+ }
+ if( pAuxData==0 ){
+ pAuxData = sqlite3DbMallocZero(pVdbe->db, sizeof(AuxData));
+ if( !pAuxData ) goto failed;
+ pAuxData->iAuxOp = pCtx->iOp;
+ pAuxData->iAuxArg = iArg;
+ pAuxData->pNextAux = pVdbe->pAuxData;
+ pVdbe->pAuxData = pAuxData;
+ if( pCtx->isError==0 ) pCtx->isError = -1;
+ }else if( pAuxData->xDeleteAux ){
+ pAuxData->xDeleteAux(pAuxData->pAux);
+ }
+
+ pAuxData->pAux = pAux;
+ pAuxData->xDeleteAux = xDelete;
+ return;
+
+failed:
+ if( xDelete ){
+ xDelete(pAux);
+ }
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Return the number of times the Step function of an aggregate has been
+** called.
+**
+** This function is deprecated. Do not use it for new code. It is
+** provide only to avoid breaking legacy code. New aggregate function
+** implementations should keep their own counts within their aggregate
+** context.
+*/
+SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
+ assert( p && p->pMem && p->pFunc && p->pFunc->xFinalize );
+ return p->pMem->n;
+}
+#endif
+
+/*
+** Return the number of columns in the result set for the statement pStmt.
+*/
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
+ Vdbe *pVm = (Vdbe *)pStmt;
+ return pVm ? pVm->nResColumn : 0;
+}
+
+/*
+** Return the number of values available from the current row of the
+** currently executing statement pStmt.
+*/
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){
+ Vdbe *pVm = (Vdbe *)pStmt;
+ if( pVm==0 || pVm->pResultSet==0 ) return 0;
+ return pVm->nResColumn;
+}
+
+/*
+** Return a pointer to static memory containing an SQL NULL value.
+*/
+static const Mem *columnNullValue(void){
+ /* Even though the Mem structure contains an element
+ ** of type i64, on certain architectures (x86) with certain compiler
+ ** switches (-Os), gcc may align this Mem object on a 4-byte boundary
+ ** instead of an 8-byte one. This all works fine, except that when
+ ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s
+ ** that a Mem structure is located on an 8-byte boundary. To prevent
+ ** these assert()s from failing, when building with SQLITE_DEBUG defined
+ ** using gcc, we force nullMem to be 8-byte aligned using the magical
+ ** __attribute__((aligned(8))) macro. */
+ static const Mem nullMem
+#if defined(SQLITE_DEBUG) && defined(__GNUC__)
+ __attribute__((aligned(8)))
+#endif
+ = {
+ /* .u = */ {0},
+ /* .flags = */ (u16)MEM_Null,
+ /* .enc = */ (u8)0,
+ /* .eSubtype = */ (u8)0,
+ /* .n = */ (int)0,
+ /* .z = */ (char*)0,
+ /* .zMalloc = */ (char*)0,
+ /* .szMalloc = */ (int)0,
+ /* .uTemp = */ (u32)0,
+ /* .db = */ (sqlite3*)0,
+ /* .xDel = */ (void(*)(void*))0,
+#ifdef SQLITE_DEBUG
+ /* .pScopyFrom = */ (Mem*)0,
+ /* .mScopyFlags= */ 0,
+#endif
+ };
+ return &nullMem;
+}
+
+/*
+** Check to see if column iCol of the given statement is valid. If
+** it is, return a pointer to the Mem for the value of that column.
+** If iCol is not valid, return a pointer to a Mem which has a value
+** of NULL.
+*/
+static Mem *columnMem(sqlite3_stmt *pStmt, int i){
+ Vdbe *pVm;
+ Mem *pOut;
+
+ pVm = (Vdbe *)pStmt;
+ if( pVm==0 ) return (Mem*)columnNullValue();
+ assert( pVm->db );
+ sqlite3_mutex_enter(pVm->db->mutex);
+ if( pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){
+ pOut = &pVm->pResultSet[i];
+ }else{
+ sqlite3Error(pVm->db, SQLITE_RANGE);
+ pOut = (Mem*)columnNullValue();
+ }
+ return pOut;
+}
+
+/*
+** This function is called after invoking an sqlite3_value_XXX function on a
+** column value (i.e. a value returned by evaluating an SQL expression in the
+** select list of a SELECT statement) that may cause a malloc() failure. If
+** malloc() has failed, the threads mallocFailed flag is cleared and the result
+** code of statement pStmt set to SQLITE_NOMEM.
+**
+** Specifically, this is called from within:
+**
+** sqlite3_column_int()
+** sqlite3_column_int64()
+** sqlite3_column_text()
+** sqlite3_column_text16()
+** sqlite3_column_real()
+** sqlite3_column_bytes()
+** sqlite3_column_bytes16()
+** sqiite3_column_blob()
+*/
+static void columnMallocFailure(sqlite3_stmt *pStmt)
+{
+ /* If malloc() failed during an encoding conversion within an
+ ** sqlite3_column_XXX API, then set the return code of the statement to
+ ** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR
+ ** and _finalize() will return NOMEM.
+ */
+ Vdbe *p = (Vdbe *)pStmt;
+ if( p ){
+ assert( p->db!=0 );
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ p->rc = sqlite3ApiExit(p->db, p->rc);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+}
+
+/**************************** sqlite3_column_ *******************************
+** The following routines are used to access elements of the current row
+** in the result set.
+*/
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
+ const void *val;
+ val = sqlite3_value_blob( columnMem(pStmt,i) );
+ /* Even though there is no encoding conversion, value_blob() might
+ ** need to call malloc() to expand the result of a zeroblob()
+ ** expression.
+ */
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_bytes( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_bytes16( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){
+ double val = sqlite3_value_double( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){
+ int val = sqlite3_value_int( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
+ sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){
+ const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){
+ Mem *pOut = columnMem(pStmt, i);
+ if( pOut->flags&MEM_Static ){
+ pOut->flags &= ~MEM_Static;
+ pOut->flags |= MEM_Ephem;
+ }
+ columnMallocFailure(pStmt);
+ return (sqlite3_value *)pOut;
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
+ const void *val = sqlite3_value_text16( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return val;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
+ int iType = sqlite3_value_type( columnMem(pStmt,i) );
+ columnMallocFailure(pStmt);
+ return iType;
+}
+
+/*
+** Convert the N-th element of pStmt->pColName[] into a string using
+** xFunc() then return that string. If N is out of range, return 0.
+**
+** There are up to 5 names for each column. useType determines which
+** name is returned. Here are the names:
+**
+** 0 The column name as it should be displayed for output
+** 1 The datatype name for the column
+** 2 The name of the database that the column derives from
+** 3 The name of the table that the column derives from
+** 4 The name of the table column that the result column derives from
+**
+** If the result is not a simple column reference (if it is an expression
+** or a constant) then useTypes 2, 3, and 4 return NULL.
+*/
+static const void *columnName(
+ sqlite3_stmt *pStmt, /* The statement */
+ int N, /* Which column to get the name for */
+ int useUtf16, /* True to return the name as UTF16 */
+ int useType /* What type of name */
+){
+ const void *ret;
+ Vdbe *p;
+ int n;
+ sqlite3 *db;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ ret = 0;
+ p = (Vdbe *)pStmt;
+ db = p->db;
+ assert( db!=0 );
+ n = sqlite3_column_count(pStmt);
+ if( N<n && N>=0 ){
+ N += useType*n;
+ sqlite3_mutex_enter(db->mutex);
+ assert( db->mallocFailed==0 );
+#ifndef SQLITE_OMIT_UTF16
+ if( useUtf16 ){
+ ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]);
+ }else
+#endif
+ {
+ ret = sqlite3_value_text((sqlite3_value*)&p->aColName[N]);
+ }
+ /* A malloc may have failed inside of the _text() call. If this
+ ** is the case, clear the mallocFailed flag and return NULL.
+ */
+ if( db->mallocFailed ){
+ sqlite3OomClear(db);
+ ret = 0;
+ }
+ sqlite3_mutex_leave(db->mutex);
+ }
+ return ret;
+}
+
+/*
+** Return the name of the Nth column of the result set returned by SQL
+** statement pStmt.
+*/
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 0, COLNAME_NAME);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 1, COLNAME_NAME);
+}
+#endif
+
+/*
+** Constraint: If you have ENABLE_COLUMN_METADATA then you must
+** not define OMIT_DECLTYPE.
+*/
+#if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA)
+# error "Must not define both SQLITE_OMIT_DECLTYPE \
+ and SQLITE_ENABLE_COLUMN_METADATA"
+#endif
+
+#ifndef SQLITE_OMIT_DECLTYPE
+/*
+** Return the column declaration type (if applicable) of the 'i'th column
+** of the result set of SQL statement pStmt.
+*/
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 0, COLNAME_DECLTYPE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 1, COLNAME_DECLTYPE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+#endif /* SQLITE_OMIT_DECLTYPE */
+
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+/*
+** Return the name of the database from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 0, COLNAME_DATABASE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 1, COLNAME_DATABASE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Return the name of the table from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 0, COLNAME_TABLE);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 1, COLNAME_TABLE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Return the name of the table column from which a result column derives.
+** NULL is returned if the result column is an expression or constant or
+** anything else which is not an unambiguous reference to a database column.
+*/
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 0, COLNAME_COLUMN);
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
+ return columnName(pStmt, N, 1, COLNAME_COLUMN);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+#endif /* SQLITE_ENABLE_COLUMN_METADATA */
+
+
+/******************************* sqlite3_bind_ ***************************
+**
+** Routines used to attach values to wildcards in a compiled SQL statement.
+*/
+/*
+** Unbind the value bound to variable i in virtual machine p. This is the
+** the same as binding a NULL value to the column. If the "i" parameter is
+** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK.
+**
+** A successful evaluation of this routine acquires the mutex on p.
+** the mutex is released if any kind of error occurs.
+**
+** The error code stored in database p->db is overwritten with the return
+** value in any case.
+*/
+static int vdbeUnbind(Vdbe *p, int i){
+ Mem *pVar;
+ if( vdbeSafetyNotNull(p) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ sqlite3_mutex_enter(p->db->mutex);
+ if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){
+ sqlite3Error(p->db, SQLITE_MISUSE);
+ sqlite3_mutex_leave(p->db->mutex);
+ sqlite3_log(SQLITE_MISUSE,
+ "bind on a busy prepared statement: [%s]", p->zSql);
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( i<1 || i>p->nVar ){
+ sqlite3Error(p->db, SQLITE_RANGE);
+ sqlite3_mutex_leave(p->db->mutex);
+ return SQLITE_RANGE;
+ }
+ i--;
+ pVar = &p->aVar[i];
+ sqlite3VdbeMemRelease(pVar);
+ pVar->flags = MEM_Null;
+ p->db->errCode = SQLITE_OK;
+
+ /* If the bit corresponding to this variable in Vdbe.expmask is set, then
+ ** binding a new value to this variable invalidates the current query plan.
+ **
+ ** IMPLEMENTATION-OF: R-57496-20354 If the specific value bound to a host
+ ** parameter in the WHERE clause might influence the choice of query plan
+ ** for a statement, then the statement will be automatically recompiled,
+ ** as if there had been a schema change, on the first sqlite3_step() call
+ ** following any change to the bindings of that parameter.
+ */
+ assert( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 || p->expmask==0 );
+ if( p->expmask!=0 && (p->expmask & (i>=31 ? 0x80000000 : (u32)1<<i))!=0 ){
+ p->expired = 1;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Bind a text or BLOB value.
+*/
+static int bindText(
+ sqlite3_stmt *pStmt, /* The statement to bind against */
+ int i, /* Index of the parameter to bind */
+ const void *zData, /* Pointer to the data to be bound */
+ int nData, /* Number of bytes of data to be bound */
+ void (*xDel)(void*), /* Destructor for the data */
+ u8 encoding /* Encoding for the data */
+){
+ Vdbe *p = (Vdbe *)pStmt;
+ Mem *pVar;
+ int rc;
+
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ if( zData!=0 ){
+ pVar = &p->aVar[i-1];
+ rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel);
+ if( rc==SQLITE_OK && encoding!=0 ){
+ rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db));
+ }
+ if( rc ){
+ sqlite3Error(p->db, rc);
+ rc = sqlite3ApiExit(p->db, rc);
+ }
+ }
+ sqlite3_mutex_leave(p->db->mutex);
+ }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){
+ xDel((void*)zData);
+ }
+ return rc;
+}
+
+
+/*
+** Bind a blob value to an SQL statement variable.
+*/
+SQLITE_API int sqlite3_bind_blob(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( nData<0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ return bindText(pStmt, i, zData, nData, xDel, 0);
+}
+SQLITE_API int sqlite3_bind_blob64(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ sqlite3_uint64 nData,
+ void (*xDel)(void*)
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( nData>0x7fffffff ){
+ return invokeValueDestructor(zData, xDel, 0);
+ }else{
+ return bindText(pStmt, i, zData, (int)nData, xDel, 0);
+ }
+}
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
+ return sqlite3_bind_int64(p, i, (i64)iValue);
+}
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
+ int rc;
+ Vdbe *p = (Vdbe*)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_pointer(
+ sqlite3_stmt *pStmt,
+ int i,
+ void *pPtr,
+ const char *zPTtype,
+ void (*xDestructor)(void*)
+){
+ int rc;
+ Vdbe *p = (Vdbe*)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetPointer(&p->aVar[i-1], pPtr, zPTtype, xDestructor);
+ sqlite3_mutex_leave(p->db->mutex);
+ }else if( xDestructor ){
+ xDestructor(pPtr);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_text(
+ sqlite3_stmt *pStmt,
+ int i,
+ const char *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8);
+}
+SQLITE_API int sqlite3_bind_text64(
+ sqlite3_stmt *pStmt,
+ int i,
+ const char *zData,
+ sqlite3_uint64 nData,
+ void (*xDel)(void*),
+ unsigned char enc
+){
+ assert( xDel!=SQLITE_DYNAMIC );
+ if( nData>0x7fffffff ){
+ return invokeValueDestructor(zData, xDel, 0);
+ }else{
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ return bindText(pStmt, i, zData, (int)nData, xDel, enc);
+ }
+}
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API int sqlite3_bind_text16(
+ sqlite3_stmt *pStmt,
+ int i,
+ const void *zData,
+ int nData,
+ void (*xDel)(void*)
+){
+ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE);
+}
+#endif /* SQLITE_OMIT_UTF16 */
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
+ int rc;
+ switch( sqlite3_value_type((sqlite3_value*)pValue) ){
+ case SQLITE_INTEGER: {
+ rc = sqlite3_bind_int64(pStmt, i, pValue->u.i);
+ break;
+ }
+ case SQLITE_FLOAT: {
+ rc = sqlite3_bind_double(pStmt, i, pValue->u.r);
+ break;
+ }
+ case SQLITE_BLOB: {
+ if( pValue->flags & MEM_Zero ){
+ rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero);
+ }else{
+ rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT);
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT,
+ pValue->enc);
+ break;
+ }
+ default: {
+ rc = sqlite3_bind_null(pStmt, i);
+ break;
+ }
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ rc = vdbeUnbind(p, i);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return rc;
+}
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
+ int rc;
+ Vdbe *p = (Vdbe *)pStmt;
+ sqlite3_mutex_enter(p->db->mutex);
+ if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ rc = SQLITE_TOOBIG;
+ }else{
+ assert( (n & 0x7FFFFFFF)==n );
+ rc = sqlite3_bind_zeroblob(pStmt, i, n);
+ }
+ rc = sqlite3ApiExit(p->db, rc);
+ sqlite3_mutex_leave(p->db->mutex);
+ return rc;
+}
+
+/*
+** Return the number of wildcards that can be potentially bound to.
+** This routine is added to support DBD::SQLite.
+*/
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ return p ? p->nVar : 0;
+}
+
+/*
+** Return the name of a wildcard parameter. Return NULL if the index
+** is out of range or if the wildcard is unnamed.
+**
+** The result is always UTF-8.
+*/
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
+ Vdbe *p = (Vdbe*)pStmt;
+ if( p==0 ) return 0;
+ return sqlite3VListNumToName(p->pVList, i);
+}
+
+/*
+** Given a wildcard parameter name, return the index of the variable
+** with that name. If there is no variable with the given name,
+** return 0.
+*/
+SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){
+ if( p==0 || zName==0 ) return 0;
+ return sqlite3VListNameToNum(p->pVList, zName, nName);
+}
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
+ return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName));
+}
+
+/*
+** Transfer all bindings from the first statement over to the second.
+*/
+SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+ Vdbe *pFrom = (Vdbe*)pFromStmt;
+ Vdbe *pTo = (Vdbe*)pToStmt;
+ int i;
+ assert( pTo->db==pFrom->db );
+ assert( pTo->nVar==pFrom->nVar );
+ sqlite3_mutex_enter(pTo->db->mutex);
+ for(i=0; i<pFrom->nVar; i++){
+ sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]);
+ }
+ sqlite3_mutex_leave(pTo->db->mutex);
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Deprecated external interface. Internal/core SQLite code
+** should call sqlite3TransferBindings.
+**
+** It is misuse to call this routine with statements from different
+** database connections. But as this is a deprecated interface, we
+** will not bother to check for that condition.
+**
+** If the two statements contain a different number of bindings, then
+** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise
+** SQLITE_OK is returned.
+*/
+SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+ Vdbe *pFrom = (Vdbe*)pFromStmt;
+ Vdbe *pTo = (Vdbe*)pToStmt;
+ if( pFrom->nVar!=pTo->nVar ){
+ return SQLITE_ERROR;
+ }
+ assert( (pTo->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 || pTo->expmask==0 );
+ if( pTo->expmask ){
+ pTo->expired = 1;
+ }
+ assert( (pFrom->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 || pFrom->expmask==0 );
+ if( pFrom->expmask ){
+ pFrom->expired = 1;
+ }
+ return sqlite3TransferBindings(pFromStmt, pToStmt);
+}
+#endif
+
+/*
+** Return the sqlite3* database handle to which the prepared statement given
+** in the argument belongs. This is the same database handle that was
+** the first argument to the sqlite3_prepare() that was used to create
+** the statement in the first place.
+*/
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){
+ return pStmt ? ((Vdbe*)pStmt)->db : 0;
+}
+
+/*
+** Return true if the prepared statement is guaranteed to not modify the
+** database.
+*/
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
+ return pStmt ? ((Vdbe*)pStmt)->readOnly : 1;
+}
+
+/*
+** Return 1 if the statement is an EXPLAIN and return 2 if the
+** statement is an EXPLAIN QUERY PLAN
+*/
+SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){
+ return pStmt ? ((Vdbe*)pStmt)->explain : 0;
+}
+
+/*
+** Return true if the prepared statement is in need of being reset.
+*/
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
+ Vdbe *v = (Vdbe*)pStmt;
+ return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0;
+}
+
+/*
+** Return a pointer to the next prepared statement after pStmt associated
+** with database connection pDb. If pStmt is NULL, return the first
+** prepared statement for the database connection. Return NULL if there
+** are no more.
+*/
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
+ sqlite3_stmt *pNext;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(pDb) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(pDb->mutex);
+ if( pStmt==0 ){
+ pNext = (sqlite3_stmt*)pDb->pVdbe;
+ }else{
+ pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext;
+ }
+ sqlite3_mutex_leave(pDb->mutex);
+ return pNext;
+}
+
+/*
+** Return the value of a status counter for a prepared statement
+*/
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
+ Vdbe *pVdbe = (Vdbe*)pStmt;
+ u32 v;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !pStmt
+ || (op!=SQLITE_STMTSTATUS_MEMUSED && (op<0||op>=ArraySize(pVdbe->aCounter)))
+ ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ if( op==SQLITE_STMTSTATUS_MEMUSED ){
+ sqlite3 *db = pVdbe->db;
+ sqlite3_mutex_enter(db->mutex);
+ v = 0;
+ db->pnBytesFreed = (int*)&v;
+ sqlite3VdbeClearObject(db, pVdbe);
+ sqlite3DbFree(db, pVdbe);
+ db->pnBytesFreed = 0;
+ sqlite3_mutex_leave(db->mutex);
+ }else{
+ v = pVdbe->aCounter[op];
+ if( resetFlag ) pVdbe->aCounter[op] = 0;
+ }
+ return (int)v;
+}
+
+/*
+** Return the SQL associated with a prepared statement
+*/
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe *)pStmt;
+ return p ? p->zSql : 0;
+}
+
+/*
+** Return the SQL associated with a prepared statement with
+** bound parameters expanded. Space to hold the returned string is
+** obtained from sqlite3_malloc(). The caller is responsible for
+** freeing the returned string by passing it to sqlite3_free().
+**
+** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of
+** expanded bound parameters.
+*/
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt){
+#ifdef SQLITE_OMIT_TRACE
+ return 0;
+#else
+ char *z = 0;
+ const char *zSql = sqlite3_sql(pStmt);
+ if( zSql ){
+ Vdbe *p = (Vdbe *)pStmt;
+ sqlite3_mutex_enter(p->db->mutex);
+ z = sqlite3VdbeExpandSql(p, zSql);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return z;
+#endif
+}
+
+#ifdef SQLITE_ENABLE_NORMALIZE
+/*
+** Return the normalized SQL associated with a prepared statement.
+*/
+SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe *)pStmt;
+ if( p==0 ) return 0;
+ if( p->zNormSql==0 && ALWAYS(p->zSql!=0) ){
+ sqlite3_mutex_enter(p->db->mutex);
+ p->zNormSql = sqlite3Normalize(p, p->zSql);
+ sqlite3_mutex_leave(p->db->mutex);
+ }
+ return p->zNormSql;
+}
+#endif /* SQLITE_ENABLE_NORMALIZE */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** Allocate and populate an UnpackedRecord structure based on the serialized
+** record in nKey/pKey. Return a pointer to the new UnpackedRecord structure
+** if successful, or a NULL pointer if an OOM error is encountered.
+*/
+static UnpackedRecord *vdbeUnpackRecord(
+ KeyInfo *pKeyInfo,
+ int nKey,
+ const void *pKey
+){
+ UnpackedRecord *pRet; /* Return value */
+
+ pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
+ if( pRet ){
+ memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nKeyField+1));
+ sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet);
+ }
+ return pRet;
+}
+
+/*
+** This function is called from within a pre-update callback to retrieve
+** a field of the row currently being updated or deleted.
+*/
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+ PreUpdate *p = db->pPreUpdate;
+ Mem *pMem;
+ int rc = SQLITE_OK;
+
+ /* Test that this call is being made from within an SQLITE_DELETE or
+ ** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */
+ if( !p || p->op==SQLITE_INSERT ){
+ rc = SQLITE_MISUSE_BKPT;
+ goto preupdate_old_out;
+ }
+ if( p->pPk ){
+ iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx);
+ }
+ if( iIdx>=p->pCsr->nField || iIdx<0 ){
+ rc = SQLITE_RANGE;
+ goto preupdate_old_out;
+ }
+
+ /* If the old.* record has not yet been loaded into memory, do so now. */
+ if( p->pUnpacked==0 ){
+ u32 nRec;
+ u8 *aRec;
+
+ nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor);
+ aRec = sqlite3DbMallocRaw(db, nRec);
+ if( !aRec ) goto preupdate_old_out;
+ rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec);
+ if( rc==SQLITE_OK ){
+ p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec);
+ if( !p->pUnpacked ) rc = SQLITE_NOMEM;
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3DbFree(db, aRec);
+ goto preupdate_old_out;
+ }
+ p->aRecord = aRec;
+ }
+
+ pMem = *ppValue = &p->pUnpacked->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey1);
+ }else if( iIdx>=p->pUnpacked->nField ){
+ *ppValue = (sqlite3_value *)columnNullValue();
+ }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){
+ if( pMem->flags & (MEM_Int|MEM_IntReal) ){
+ testcase( pMem->flags & MEM_Int );
+ testcase( pMem->flags & MEM_IntReal );
+ sqlite3VdbeMemRealify(pMem);
+ }
+ }
+
+ preupdate_old_out:
+ sqlite3Error(db, rc);
+ return sqlite3ApiExit(db, rc);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is called from within a pre-update callback to retrieve
+** the number of columns in the row being updated, deleted or inserted.
+*/
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
+ PreUpdate *p = db->pPreUpdate;
+ return (p ? p->keyinfo.nKeyField : 0);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is designed to be called from within a pre-update callback
+** only. It returns zero if the change that caused the callback was made
+** immediately by a user SQL statement. Or, if the change was made by a
+** trigger program, it returns the number of trigger programs currently
+** on the stack (1 for a top-level trigger, 2 for a trigger fired by a
+** top-level trigger etc.).
+**
+** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL
+** or SET DEFAULT action is considered a trigger.
+*/
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
+ PreUpdate *p = db->pPreUpdate;
+ return (p ? p->v->nFrame : 0);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** This function is called from within a pre-update callback to retrieve
+** a field of the row currently being updated or inserted.
+*/
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+ PreUpdate *p = db->pPreUpdate;
+ int rc = SQLITE_OK;
+ Mem *pMem;
+
+ if( !p || p->op==SQLITE_DELETE ){
+ rc = SQLITE_MISUSE_BKPT;
+ goto preupdate_new_out;
+ }
+ if( p->pPk && p->op!=SQLITE_UPDATE ){
+ iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx);
+ }
+ if( iIdx>=p->pCsr->nField || iIdx<0 ){
+ rc = SQLITE_RANGE;
+ goto preupdate_new_out;
+ }
+
+ if( p->op==SQLITE_INSERT ){
+ /* For an INSERT, memory cell p->iNewReg contains the serialized record
+ ** that is being inserted. Deserialize it. */
+ UnpackedRecord *pUnpack = p->pNewUnpacked;
+ if( !pUnpack ){
+ Mem *pData = &p->v->aMem[p->iNewReg];
+ rc = ExpandBlob(pData);
+ if( rc!=SQLITE_OK ) goto preupdate_new_out;
+ pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z);
+ if( !pUnpack ){
+ rc = SQLITE_NOMEM;
+ goto preupdate_new_out;
+ }
+ p->pNewUnpacked = pUnpack;
+ }
+ pMem = &pUnpack->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey2);
+ }else if( iIdx>=pUnpack->nField ){
+ pMem = (sqlite3_value *)columnNullValue();
+ }
+ }else{
+ /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required
+ ** value. Make a copy of the cell contents and return a pointer to it.
+ ** It is not safe to return a pointer to the memory cell itself as the
+ ** caller may modify the value text encoding.
+ */
+ assert( p->op==SQLITE_UPDATE );
+ if( !p->aNew ){
+ p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField);
+ if( !p->aNew ){
+ rc = SQLITE_NOMEM;
+ goto preupdate_new_out;
+ }
+ }
+ assert( iIdx>=0 && iIdx<p->pCsr->nField );
+ pMem = &p->aNew[iIdx];
+ if( pMem->flags==0 ){
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey2);
+ }else{
+ rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]);
+ if( rc!=SQLITE_OK ) goto preupdate_new_out;
+ }
+ }
+ }
+ *ppValue = pMem;
+
+ preupdate_new_out:
+ sqlite3Error(db, rc);
+ return sqlite3ApiExit(db, rc);
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+/*
+** Return status data for a single loop within query pStmt.
+*/
+SQLITE_API int sqlite3_stmt_scanstatus(
+ sqlite3_stmt *pStmt, /* Prepared statement being queried */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Which metric to return */
+ void *pOut /* OUT: Write the answer here */
+){
+ Vdbe *p = (Vdbe*)pStmt;
+ ScanStatus *pScan;
+ if( idx<0 || idx>=p->nScan ) return 1;
+ pScan = &p->aScan[idx];
+ switch( iScanStatusOp ){
+ case SQLITE_SCANSTAT_NLOOP: {
+ *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop];
+ break;
+ }
+ case SQLITE_SCANSTAT_NVISIT: {
+ *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit];
+ break;
+ }
+ case SQLITE_SCANSTAT_EST: {
+ double r = 1.0;
+ LogEst x = pScan->nEst;
+ while( x<100 ){
+ x += 10;
+ r *= 0.5;
+ }
+ *(double*)pOut = r*sqlite3LogEstToInt(x);
+ break;
+ }
+ case SQLITE_SCANSTAT_NAME: {
+ *(const char**)pOut = pScan->zName;
+ break;
+ }
+ case SQLITE_SCANSTAT_EXPLAIN: {
+ if( pScan->addrExplain ){
+ *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z;
+ }else{
+ *(const char**)pOut = 0;
+ }
+ break;
+ }
+ case SQLITE_SCANSTAT_SELECTID: {
+ if( pScan->addrExplain ){
+ *(int*)pOut = p->aOp[ pScan->addrExplain ].p1;
+ }else{
+ *(int*)pOut = -1;
+ }
+ break;
+ }
+ default: {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Zero all counters associated with the sqlite3_stmt_scanstatus() data.
+*/
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
+ Vdbe *p = (Vdbe*)pStmt;
+ memset(p->anExec, 0, p->nOp * sizeof(i64));
+}
+#endif /* SQLITE_ENABLE_STMT_SCANSTATUS */
+
+/************** End of vdbeapi.c *********************************************/
+/************** Begin file vdbetrace.c ***************************************/
+/*
+** 2009 November 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code used to insert the values of host parameters
+** (aka "wildcards") into the SQL text output by sqlite3_trace().
+**
+** The Vdbe parse-tree explainer is also found here.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifndef SQLITE_OMIT_TRACE
+
+/*
+** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of
+** bytes in this text up to but excluding the first character in
+** a host parameter. If the text contains no host parameters, return
+** the total number of bytes in the text.
+*/
+static int findNextHostParameter(const char *zSql, int *pnToken){
+ int tokenType;
+ int nTotal = 0;
+ int n;
+
+ *pnToken = 0;
+ while( zSql[0] ){
+ n = sqlite3GetToken((u8*)zSql, &tokenType);
+ assert( n>0 && tokenType!=TK_ILLEGAL );
+ if( tokenType==TK_VARIABLE ){
+ *pnToken = n;
+ break;
+ }
+ nTotal += n;
+ zSql += n;
+ }
+ return nTotal;
+}
+
+/*
+** This function returns a pointer to a nul-terminated string in memory
+** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the
+** string contains a copy of zRawSql but with host parameters expanded to
+** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1,
+** then the returned string holds a copy of zRawSql with "-- " prepended
+** to each line of text.
+**
+** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then
+** then long strings and blobs are truncated to that many bytes. This
+** can be used to prevent unreasonably large trace strings when dealing
+** with large (multi-megabyte) strings and blobs.
+**
+** The calling function is responsible for making sure the memory returned
+** is eventually freed.
+**
+** ALGORITHM: Scan the input string looking for host parameters in any of
+** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within
+** string literals, quoted identifier names, and comments. For text forms,
+** the host parameter index is found by scanning the prepared
+** statement for the corresponding OP_Variable opcode. Once the host
+** parameter index is known, locate the value in p->aVar[]. Then render
+** the value as a literal in place of the host parameter name.
+*/
+SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
+ Vdbe *p, /* The prepared statement being evaluated */
+ const char *zRawSql /* Raw text of the SQL statement */
+){
+ sqlite3 *db; /* The database connection */
+ int idx = 0; /* Index of a host parameter */
+ int nextIndex = 1; /* Index of next ? host parameter */
+ int n; /* Length of a token prefix */
+ int nToken; /* Length of the parameter token */
+ int i; /* Loop counter */
+ Mem *pVar; /* Value of a host parameter */
+ StrAccum out; /* Accumulate the output here */
+#ifndef SQLITE_OMIT_UTF16
+ Mem utf8; /* Used to convert UTF16 into UTF8 for display */
+#endif
+ char zBase[100]; /* Initial working space */
+
+ db = p->db;
+ sqlite3StrAccumInit(&out, 0, zBase, sizeof(zBase),
+ db->aLimit[SQLITE_LIMIT_LENGTH]);
+ if( db->nVdbeExec>1 ){
+ while( *zRawSql ){
+ const char *zStart = zRawSql;
+ while( *(zRawSql++)!='\n' && *zRawSql );
+ sqlite3_str_append(&out, "-- ", 3);
+ assert( (zRawSql - zStart) > 0 );
+ sqlite3_str_append(&out, zStart, (int)(zRawSql-zStart));
+ }
+ }else if( p->nVar==0 ){
+ sqlite3_str_append(&out, zRawSql, sqlite3Strlen30(zRawSql));
+ }else{
+ while( zRawSql[0] ){
+ n = findNextHostParameter(zRawSql, &nToken);
+ assert( n>0 );
+ sqlite3_str_append(&out, zRawSql, n);
+ zRawSql += n;
+ assert( zRawSql[0] || nToken==0 );
+ if( nToken==0 ) break;
+ if( zRawSql[0]=='?' ){
+ if( nToken>1 ){
+ assert( sqlite3Isdigit(zRawSql[1]) );
+ sqlite3GetInt32(&zRawSql[1], &idx);
+ }else{
+ idx = nextIndex;
+ }
+ }else{
+ assert( zRawSql[0]==':' || zRawSql[0]=='$' ||
+ zRawSql[0]=='@' || zRawSql[0]=='#' );
+ testcase( zRawSql[0]==':' );
+ testcase( zRawSql[0]=='$' );
+ testcase( zRawSql[0]=='@' );
+ testcase( zRawSql[0]=='#' );
+ idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken);
+ assert( idx>0 );
+ }
+ zRawSql += nToken;
+ nextIndex = idx + 1;
+ assert( idx>0 && idx<=p->nVar );
+ pVar = &p->aVar[idx-1];
+ if( pVar->flags & MEM_Null ){
+ sqlite3_str_append(&out, "NULL", 4);
+ }else if( pVar->flags & (MEM_Int|MEM_IntReal) ){
+ sqlite3_str_appendf(&out, "%lld", pVar->u.i);
+ }else if( pVar->flags & MEM_Real ){
+ sqlite3_str_appendf(&out, "%!.15g", pVar->u.r);
+ }else if( pVar->flags & MEM_Str ){
+ int nOut; /* Number of bytes of the string text to include in output */
+#ifndef SQLITE_OMIT_UTF16
+ u8 enc = ENC(db);
+ if( enc!=SQLITE_UTF8 ){
+ memset(&utf8, 0, sizeof(utf8));
+ utf8.db = db;
+ sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC);
+ if( SQLITE_NOMEM==sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8) ){
+ out.accError = SQLITE_NOMEM;
+ out.nAlloc = 0;
+ }
+ pVar = &utf8;
+ }
+#endif
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ){
+ nOut = SQLITE_TRACE_SIZE_LIMIT;
+ while( nOut<pVar->n && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; }
+ }
+#endif
+ sqlite3_str_appendf(&out, "'%.*q'", nOut, pVar->z);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut<pVar->n ){
+ sqlite3_str_appendf(&out, "/*+%d bytes*/", pVar->n-nOut);
+ }
+#endif
+#ifndef SQLITE_OMIT_UTF16
+ if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8);
+#endif
+ }else if( pVar->flags & MEM_Zero ){
+ sqlite3_str_appendf(&out, "zeroblob(%d)", pVar->u.nZero);
+ }else{
+ int nOut; /* Number of bytes of the blob to include in output */
+ assert( pVar->flags & MEM_Blob );
+ sqlite3_str_append(&out, "x'", 2);
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT;
+#endif
+ for(i=0; i<nOut; i++){
+ sqlite3_str_appendf(&out, "%02x", pVar->z[i]&0xff);
+ }
+ sqlite3_str_append(&out, "'", 1);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut<pVar->n ){
+ sqlite3_str_appendf(&out, "/*+%d bytes*/", pVar->n-nOut);
+ }
+#endif
+ }
+ }
+ }
+ if( out.accError ) sqlite3_str_reset(&out);
+ return sqlite3StrAccumFinish(&out);
+}
+
+#endif /* #ifndef SQLITE_OMIT_TRACE */
+
+/************** End of vdbetrace.c *******************************************/
+/************** Begin file vdbe.c ********************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** The code in this file implements the function that runs the
+** bytecode of a prepared statement.
+**
+** Various scripts scan this source file in order to generate HTML
+** documentation, headers files, or other derived files. The formatting
+** of the code in this file is, therefore, important. See other comments
+** in this file for details. If in doubt, do not deviate from existing
+** commenting and indentation practices when changing or adding code.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+/*
+** Invoke this macro on memory cells just prior to changing the
+** value of the cell. This macro verifies that shallow copies are
+** not misused. A shallow copy of a string or blob just copies a
+** pointer to the string or blob, not the content. If the original
+** is changed while the copy is still in use, the string or blob might
+** be changed out from under the copy. This macro verifies that nothing
+** like that ever happens.
+*/
+#ifdef SQLITE_DEBUG
+# define memAboutToChange(P,M) sqlite3VdbeMemAboutToChange(P,M)
+#else
+# define memAboutToChange(P,M)
+#endif
+
+/*
+** The following global variable is incremented every time a cursor
+** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test
+** procedures use this information to make sure that indices are
+** working correctly. This variable has no function other than to
+** help verify the correct operation of the library.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_search_count = 0;
+#endif
+
+/*
+** When this global variable is positive, it gets decremented once before
+** each instruction in the VDBE. When it reaches zero, the u1.isInterrupted
+** field of the sqlite3 structure is set in order to simulate an interrupt.
+**
+** This facility is used for testing purposes only. It does not function
+** in an ordinary build.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_interrupt_count = 0;
+#endif
+
+/*
+** The next global variable is incremented each type the OP_Sort opcode
+** is executed. The test procedures use this information to make sure that
+** sorting is occurring or not occurring at appropriate times. This variable
+** has no function other than to help verify the correct operation of the
+** library.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_sort_count = 0;
+#endif
+
+/*
+** The next global variable records the size of the largest MEM_Blob
+** or MEM_Str that has been used by a VDBE opcode. The test procedures
+** use this information to make sure that the zero-blob functionality
+** is working correctly. This variable has no function other than to
+** help verify the correct operation of the library.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_max_blobsize = 0;
+static void updateMaxBlobsize(Mem *p){
+ if( (p->flags & (MEM_Str|MEM_Blob))!=0 && p->n>sqlite3_max_blobsize ){
+ sqlite3_max_blobsize = p->n;
+ }
+}
+#endif
+
+/*
+** This macro evaluates to true if either the update hook or the preupdate
+** hook are enabled for database connect DB.
+*/
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+# define HAS_UPDATE_HOOK(DB) ((DB)->xPreUpdateCallback||(DB)->xUpdateCallback)
+#else
+# define HAS_UPDATE_HOOK(DB) ((DB)->xUpdateCallback)
+#endif
+
+/*
+** The next global variable is incremented each time the OP_Found opcode
+** is executed. This is used to test whether or not the foreign key
+** operation implemented using OP_FkIsZero is working. This variable
+** has no function other than to help verify the correct operation of the
+** library.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_found_count = 0;
+#endif
+
+/*
+** Test a register to see if it exceeds the current maximum blob size.
+** If it does, record the new maximum blob size.
+*/
+#if defined(SQLITE_TEST) && !defined(SQLITE_UNTESTABLE)
+# define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P)
+#else
+# define UPDATE_MAX_BLOBSIZE(P)
+#endif
+
+#ifdef SQLITE_DEBUG
+/* This routine provides a convenient place to set a breakpoint during
+** tracing with PRAGMA vdbe_trace=on. The breakpoint fires right after
+** each opcode is printed. Variables "pc" (program counter) and pOp are
+** available to add conditionals to the breakpoint. GDB example:
+**
+** break test_trace_breakpoint if pc=22
+**
+** Other useful labels for breakpoints include:
+** test_addop_breakpoint(pc,pOp)
+** sqlite3CorruptError(lineno)
+** sqlite3MisuseError(lineno)
+** sqlite3CantopenError(lineno)
+*/
+static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){
+ static int n = 0;
+ n++;
+}
+#endif
+
+/*
+** Invoke the VDBE coverage callback, if that callback is defined. This
+** feature is used for test suite validation only and does not appear an
+** production builds.
+**
+** M is the type of branch. I is the direction taken for this instance of
+** the branch.
+**
+** M: 2 - two-way branch (I=0: fall-thru 1: jump )
+** 3 - two-way + NULL (I=0: fall-thru 1: jump 2: NULL )
+** 4 - OP_Jump (I=0: jump p1 1: jump p2 2: jump p3)
+**
+** In other words, if M is 2, then I is either 0 (for fall-through) or
+** 1 (for when the branch is taken). If M is 3, the I is 0 for an
+** ordinary fall-through, I is 1 if the branch was taken, and I is 2
+** if the result of comparison is NULL. For M=3, I=2 the jump may or
+** may not be taken, depending on the SQLITE_JUMPIFNULL flags in p5.
+** When M is 4, that means that an OP_Jump is being run. I is 0, 1, or 2
+** depending on if the operands are less than, equal, or greater than.
+**
+** iSrcLine is the source code line (from the __LINE__ macro) that
+** generated the VDBE instruction combined with flag bits. The source
+** code line number is in the lower 24 bits of iSrcLine and the upper
+** 8 bytes are flags. The lower three bits of the flags indicate
+** values for I that should never occur. For example, if the branch is
+** always taken, the flags should be 0x05 since the fall-through and
+** alternate branch are never taken. If a branch is never taken then
+** flags should be 0x06 since only the fall-through approach is allowed.
+**
+** Bit 0x08 of the flags indicates an OP_Jump opcode that is only
+** interested in equal or not-equal. In other words, I==0 and I==2
+** should be treated as equivalent
+**
+** Since only a line number is retained, not the filename, this macro
+** only works for amalgamation builds. But that is ok, since these macros
+** should be no-ops except for special builds used to measure test coverage.
+*/
+#if !defined(SQLITE_VDBE_COVERAGE)
+# define VdbeBranchTaken(I,M)
+#else
+# define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M)
+ static void vdbeTakeBranch(u32 iSrcLine, u8 I, u8 M){
+ u8 mNever;
+ assert( I<=2 ); /* 0: fall through, 1: taken, 2: alternate taken */
+ assert( M<=4 ); /* 2: two-way branch, 3: three-way branch, 4: OP_Jump */
+ assert( I<M ); /* I can only be 2 if M is 3 or 4 */
+ /* Transform I from a integer [0,1,2] into a bitmask of [1,2,4] */
+ I = 1<<I;
+ /* The upper 8 bits of iSrcLine are flags. The lower three bits of
+ ** the flags indicate directions that the branch can never go. If
+ ** a branch really does go in one of those directions, assert right
+ ** away. */
+ mNever = iSrcLine >> 24;
+ assert( (I & mNever)==0 );
+ if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/
+ /* Invoke the branch coverage callback with three arguments:
+ ** iSrcLine - the line number of the VdbeCoverage() macro, with
+ ** flags removed.
+ ** I - Mask of bits 0x07 indicating which cases are are
+ ** fulfilled by this instance of the jump. 0x01 means
+ ** fall-thru, 0x02 means taken, 0x04 means NULL. Any
+ ** impossible cases (ex: if the comparison is never NULL)
+ ** are filled in automatically so that the coverage
+ ** measurement logic does not flag those impossible cases
+ ** as missed coverage.
+ ** M - Type of jump. Same as M argument above
+ */
+ I |= mNever;
+ if( M==2 ) I |= 0x04;
+ if( M==4 ){
+ I |= 0x08;
+ if( (mNever&0x08)!=0 && (I&0x05)!=0) I |= 0x05; /*NO_TEST*/
+ }
+ sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg,
+ iSrcLine&0xffffff, I, M);
+ }
+#endif
+
+/*
+** An ephemeral string value (signified by the MEM_Ephem flag) contains
+** a pointer to a dynamically allocated string where some other entity
+** is responsible for deallocating that string. Because the register
+** does not control the string, it might be deleted without the register
+** knowing it.
+**
+** This routine converts an ephemeral string into a dynamically allocated
+** string that the register itself controls. In other words, it
+** converts an MEM_Ephem string into a string with P.z==P.zMalloc.
+*/
+#define Deephemeralize(P) \
+ if( ((P)->flags&MEM_Ephem)!=0 \
+ && sqlite3VdbeMemMakeWriteable(P) ){ goto no_mem;}
+
+/* Return true if the cursor was opened using the OP_OpenSorter opcode. */
+#define isSorter(x) ((x)->eCurType==CURTYPE_SORTER)
+
+/*
+** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL
+** if we run out of memory.
+*/
+static VdbeCursor *allocateCursor(
+ Vdbe *p, /* The virtual machine */
+ int iCur, /* Index of the new VdbeCursor */
+ int nField, /* Number of fields in the table or index */
+ int iDb, /* Database the cursor belongs to, or -1 */
+ u8 eCurType /* Type of the new cursor */
+){
+ /* Find the memory cell that will be used to store the blob of memory
+ ** required for this VdbeCursor structure. It is convenient to use a
+ ** vdbe memory cell to manage the memory allocation required for a
+ ** VdbeCursor structure for the following reasons:
+ **
+ ** * Sometimes cursor numbers are used for a couple of different
+ ** purposes in a vdbe program. The different uses might require
+ ** different sized allocations. Memory cells provide growable
+ ** allocations.
+ **
+ ** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can
+ ** be freed lazily via the sqlite3_release_memory() API. This
+ ** minimizes the number of malloc calls made by the system.
+ **
+ ** The memory cell for cursor 0 is aMem[0]. The rest are allocated from
+ ** the top of the register space. Cursor 1 is at Mem[p->nMem-1].
+ ** Cursor 2 is at Mem[p->nMem-2]. And so forth.
+ */
+ Mem *pMem = iCur>0 ? &p->aMem[p->nMem-iCur] : p->aMem;
+
+ int nByte;
+ VdbeCursor *pCx = 0;
+ nByte =
+ ROUND8(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField +
+ (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0);
+
+ assert( iCur>=0 && iCur<p->nCursor );
+ if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/
+ /* Before calling sqlite3VdbeFreeCursor(), ensure the isEphemeral flag
+ ** is clear. Otherwise, if this is an ephemeral cursor created by
+ ** OP_OpenDup, the cursor will not be closed and will still be part
+ ** of a BtShared.pCursor list. */
+ if( p->apCsr[iCur]->pBtx==0 ) p->apCsr[iCur]->isEphemeral = 0;
+ sqlite3VdbeFreeCursor(p, p->apCsr[iCur]);
+ p->apCsr[iCur] = 0;
+ }
+ if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z;
+ memset(pCx, 0, offsetof(VdbeCursor,pAltCursor));
+ pCx->eCurType = eCurType;
+ pCx->iDb = iDb;
+ pCx->nField = nField;
+ pCx->aOffset = &pCx->aType[nField];
+ if( eCurType==CURTYPE_BTREE ){
+ pCx->uc.pCursor = (BtCursor*)
+ &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField];
+ sqlite3BtreeCursorZero(pCx->uc.pCursor);
+ }
+ }
+ return pCx;
+}
+
+/*
+** The string in pRec is known to look like an integer and to have a
+** floating point value of rValue. Return true and set *piValue to the
+** integer value if the string is in range to be an integer. Otherwise,
+** return false.
+*/
+static int alsoAnInt(Mem *pRec, double rValue, i64 *piValue){
+ i64 iValue = (double)rValue;
+ if( sqlite3RealSameAsInt(rValue,iValue) ){
+ *piValue = iValue;
+ return 1;
+ }
+ return 0==sqlite3Atoi64(pRec->z, piValue, pRec->n, pRec->enc);
+}
+
+/*
+** Try to convert a value into a numeric representation if we can
+** do so without loss of information. In other words, if the string
+** looks like a number, convert it into a number. If it does not
+** look like a number, leave it alone.
+**
+** If the bTryForInt flag is true, then extra effort is made to give
+** an integer representation. Strings that look like floating point
+** values but which have no fractional component (example: '48.00')
+** will have a MEM_Int representation when bTryForInt is true.
+**
+** If bTryForInt is false, then if the input string contains a decimal
+** point or exponential notation, the result is only MEM_Real, even
+** if there is an exact integer representation of the quantity.
+*/
+static void applyNumericAffinity(Mem *pRec, int bTryForInt){
+ double rValue;
+ u8 enc = pRec->enc;
+ int rc;
+ assert( (pRec->flags & (MEM_Str|MEM_Int|MEM_Real|MEM_IntReal))==MEM_Str );
+ rc = sqlite3AtoF(pRec->z, &rValue, pRec->n, enc);
+ if( rc<=0 ) return;
+ if( rc==1 && alsoAnInt(pRec, rValue, &pRec->u.i) ){
+ pRec->flags |= MEM_Int;
+ }else{
+ pRec->u.r = rValue;
+ pRec->flags |= MEM_Real;
+ if( bTryForInt ) sqlite3VdbeIntegerAffinity(pRec);
+ }
+ /* TEXT->NUMERIC is many->one. Hence, it is important to invalidate the
+ ** string representation after computing a numeric equivalent, because the
+ ** string representation might not be the canonical representation for the
+ ** numeric value. Ticket [343634942dd54ab57b7024] 2018-01-31. */
+ pRec->flags &= ~MEM_Str;
+}
+
+/*
+** Processing is determine by the affinity parameter:
+**
+** SQLITE_AFF_INTEGER:
+** SQLITE_AFF_REAL:
+** SQLITE_AFF_NUMERIC:
+** Try to convert pRec to an integer representation or a
+** floating-point representation if an integer representation
+** is not possible. Note that the integer representation is
+** always preferred, even if the affinity is REAL, because
+** an integer representation is more space efficient on disk.
+**
+** SQLITE_AFF_TEXT:
+** Convert pRec to a text representation.
+**
+** SQLITE_AFF_BLOB:
+** SQLITE_AFF_NONE:
+** No-op. pRec is unchanged.
+*/
+static void applyAffinity(
+ Mem *pRec, /* The value to apply affinity to */
+ char affinity, /* The affinity to be applied */
+ u8 enc /* Use this text encoding */
+){
+ if( affinity>=SQLITE_AFF_NUMERIC ){
+ assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL
+ || affinity==SQLITE_AFF_NUMERIC );
+ if( (pRec->flags & MEM_Int)==0 ){ /*OPTIMIZATION-IF-FALSE*/
+ if( (pRec->flags & MEM_Real)==0 ){
+ if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1);
+ }else{
+ sqlite3VdbeIntegerAffinity(pRec);
+ }
+ }
+ }else if( affinity==SQLITE_AFF_TEXT ){
+ /* Only attempt the conversion to TEXT if there is an integer or real
+ ** representation (blob and NULL do not get converted) but no string
+ ** representation. It would be harmless to repeat the conversion if
+ ** there is already a string rep, but it is pointless to waste those
+ ** CPU cycles. */
+ if( 0==(pRec->flags&MEM_Str) ){ /*OPTIMIZATION-IF-FALSE*/
+ if( (pRec->flags&(MEM_Real|MEM_Int|MEM_IntReal)) ){
+ testcase( pRec->flags & MEM_Int );
+ testcase( pRec->flags & MEM_Real );
+ testcase( pRec->flags & MEM_IntReal );
+ sqlite3VdbeMemStringify(pRec, enc, 1);
+ }
+ }
+ pRec->flags &= ~(MEM_Real|MEM_Int|MEM_IntReal);
+ }
+}
+
+/*
+** Try to convert the type of a function argument or a result column
+** into a numeric representation. Use either INTEGER or REAL whichever
+** is appropriate. But only do the conversion if it is possible without
+** loss of information and return the revised type of the argument.
+*/
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value *pVal){
+ int eType = sqlite3_value_type(pVal);
+ if( eType==SQLITE_TEXT ){
+ Mem *pMem = (Mem*)pVal;
+ applyNumericAffinity(pMem, 0);
+ eType = sqlite3_value_type(pVal);
+ }
+ return eType;
+}
+
+/*
+** Exported version of applyAffinity(). This one works on sqlite3_value*,
+** not the internal Mem* type.
+*/
+SQLITE_PRIVATE void sqlite3ValueApplyAffinity(
+ sqlite3_value *pVal,
+ u8 affinity,
+ u8 enc
+){
+ applyAffinity((Mem *)pVal, affinity, enc);
+}
+
+/*
+** pMem currently only holds a string type (or maybe a BLOB that we can
+** interpret as a string if we want to). Compute its corresponding
+** numeric type, if has one. Set the pMem->u.r and pMem->u.i fields
+** accordingly.
+*/
+static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){
+ int rc;
+ sqlite3_int64 ix;
+ assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 );
+ assert( (pMem->flags & (MEM_Str|MEM_Blob))!=0 );
+ ExpandBlob(pMem);
+ rc = sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc);
+ if( rc<=0 ){
+ if( rc==0 && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)<=1 ){
+ pMem->u.i = ix;
+ return MEM_Int;
+ }else{
+ return MEM_Real;
+ }
+ }else if( rc==1 && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)==0 ){
+ pMem->u.i = ix;
+ return MEM_Int;
+ }
+ return MEM_Real;
+}
+
+/*
+** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or
+** none.
+**
+** Unlike applyNumericAffinity(), this routine does not modify pMem->flags.
+** But it does set pMem->u.r and pMem->u.i appropriately.
+*/
+static u16 numericType(Mem *pMem){
+ if( pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal) ){
+ testcase( pMem->flags & MEM_Int );
+ testcase( pMem->flags & MEM_Real );
+ testcase( pMem->flags & MEM_IntReal );
+ return pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal);
+ }
+ if( pMem->flags & (MEM_Str|MEM_Blob) ){
+ testcase( pMem->flags & MEM_Str );
+ testcase( pMem->flags & MEM_Blob );
+ return computeNumericType(pMem);
+ }
+ return 0;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Write a nice string representation of the contents of cell pMem
+** into buffer zBuf, length nBuf.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, StrAccum *pStr){
+ int f = pMem->flags;
+ static const char *const encnames[] = {"(X)", "(8)", "(16LE)", "(16BE)"};
+ if( f&MEM_Blob ){
+ int i;
+ char c;
+ if( f & MEM_Dyn ){
+ c = 'z';
+ assert( (f & (MEM_Static|MEM_Ephem))==0 );
+ }else if( f & MEM_Static ){
+ c = 't';
+ assert( (f & (MEM_Dyn|MEM_Ephem))==0 );
+ }else if( f & MEM_Ephem ){
+ c = 'e';
+ assert( (f & (MEM_Static|MEM_Dyn))==0 );
+ }else{
+ c = 's';
+ }
+ sqlite3_str_appendf(pStr, "%cx[", c);
+ for(i=0; i<25 && i<pMem->n; i++){
+ sqlite3_str_appendf(pStr, "%02X", ((int)pMem->z[i] & 0xFF));
+ }
+ sqlite3_str_appendf(pStr, "|");
+ for(i=0; i<25 && i<pMem->n; i++){
+ char z = pMem->z[i];
+ sqlite3_str_appendchar(pStr, 1, (z<32||z>126)?'.':z);
+ }
+ sqlite3_str_appendf(pStr,"]");
+ if( f & MEM_Zero ){
+ sqlite3_str_appendf(pStr, "+%dz",pMem->u.nZero);
+ }
+ }else if( f & MEM_Str ){
+ int j;
+ u8 c;
+ if( f & MEM_Dyn ){
+ c = 'z';
+ assert( (f & (MEM_Static|MEM_Ephem))==0 );
+ }else if( f & MEM_Static ){
+ c = 't';
+ assert( (f & (MEM_Dyn|MEM_Ephem))==0 );
+ }else if( f & MEM_Ephem ){
+ c = 'e';
+ assert( (f & (MEM_Static|MEM_Dyn))==0 );
+ }else{
+ c = 's';
+ }
+ sqlite3_str_appendf(pStr, " %c%d[", c, pMem->n);
+ for(j=0; j<25 && j<pMem->n; j++){
+ c = pMem->z[j];
+ sqlite3_str_appendchar(pStr, 1, (c>=0x20&&c<=0x7f) ? c : '.');
+ }
+ sqlite3_str_appendf(pStr, "]%s", encnames[pMem->enc]);
+ }
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** Print the value of a register for tracing purposes:
+*/
+static void memTracePrint(Mem *p){
+ if( p->flags & MEM_Undefined ){
+ printf(" undefined");
+ }else if( p->flags & MEM_Null ){
+ printf(p->flags & MEM_Zero ? " NULL-nochng" : " NULL");
+ }else if( (p->flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){
+ printf(" si:%lld", p->u.i);
+ }else if( (p->flags & (MEM_IntReal))!=0 ){
+ printf(" ir:%lld", p->u.i);
+ }else if( p->flags & MEM_Int ){
+ printf(" i:%lld", p->u.i);
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ }else if( p->flags & MEM_Real ){
+ printf(" r:%.17g", p->u.r);
+#endif
+ }else if( sqlite3VdbeMemIsRowSet(p) ){
+ printf(" (rowset)");
+ }else{
+ StrAccum acc;
+ char zBuf[1000];
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3VdbeMemPrettyPrint(p, &acc);
+ printf(" %s", sqlite3StrAccumFinish(&acc));
+ }
+ if( p->flags & MEM_Subtype ) printf(" subtype=0x%02x", p->eSubtype);
+}
+static void registerTrace(int iReg, Mem *p){
+ printf("R[%d] = ", iReg);
+ memTracePrint(p);
+ if( p->pScopyFrom ){
+ printf(" <== R[%d]", (int)(p->pScopyFrom - &p[-iReg]));
+ }
+ printf("\n");
+ sqlite3VdbeCheckMemInvariants(p);
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/*
+** Show the values of all registers in the virtual machine. Used for
+** interactive debugging.
+*/
+SQLITE_PRIVATE void sqlite3VdbeRegisterDump(Vdbe *v){
+ int i;
+ for(i=1; i<v->nMem; i++) registerTrace(i, v->aMem+i);
+}
+#endif /* SQLITE_DEBUG */
+
+
+#ifdef SQLITE_DEBUG
+# define REGISTER_TRACE(R,M) if(db->flags&SQLITE_VdbeTrace)registerTrace(R,M)
+#else
+# define REGISTER_TRACE(R,M)
+#endif
+
+
+#ifdef VDBE_PROFILE
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+/************** Include hwtime.h in the middle of vdbe.c *********************/
+/************** Begin file hwtime.h ******************************************/
+/*
+** 2008 May 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains inline asm code for retrieving "high-performance"
+** counters for x86 and x86_64 class CPUs.
+*/
+#ifndef SQLITE_HWTIME_H
+#define SQLITE_HWTIME_H
+
+/*
+** The following routine only works on pentium-class (or newer) processors.
+** It uses the RDTSC opcode to read the cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+#if !defined(__STRICT_ANSI__) && \
+ (defined(__GNUC__) || defined(_MSC_VER)) && \
+ (defined(i386) || defined(__i386__) || defined(_M_IX86))
+
+ #if defined(__GNUC__)
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+ #elif defined(_MSC_VER)
+
+ __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
+ __asm {
+ rdtsc
+ ret ; return value at EDX:EAX
+ }
+ }
+
+ #endif
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long val;
+ __asm__ __volatile__ ("rdtsc" : "=A" (val));
+ return val;
+ }
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long long retval;
+ unsigned long junk;
+ __asm__ __volatile__ ("\n\
+ 1: mftbu %1\n\
+ mftb %L0\n\
+ mftbu %0\n\
+ cmpw %0,%1\n\
+ bne 1b"
+ : "=r" (retval), "=r" (junk));
+ return retval;
+ }
+
+#else
+
+ /*
+ ** asm() is needed for hardware timing support. Without asm(),
+ ** disable the sqlite3Hwtime() routine.
+ **
+ ** sqlite3Hwtime() is only used for some obscure debugging
+ ** and analysis configurations, not in any deliverable, so this
+ ** should not be a great loss.
+ */
+SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
+
+#endif
+
+#endif /* !defined(SQLITE_HWTIME_H) */
+
+/************** End of hwtime.h **********************************************/
+/************** Continuing where we left off in vdbe.c ***********************/
+
+#endif
+
+#ifndef NDEBUG
+/*
+** This function is only called from within an assert() expression. It
+** checks that the sqlite3.nTransaction variable is correctly set to
+** the number of non-transaction savepoints currently in the
+** linked list starting at sqlite3.pSavepoint.
+**
+** Usage:
+**
+** assert( checkSavepointCount(db) );
+*/
+static int checkSavepointCount(sqlite3 *db){
+ int n = 0;
+ Savepoint *p;
+ for(p=db->pSavepoint; p; p=p->pNext) n++;
+ assert( n==(db->nSavepoint + db->isTransactionSavepoint) );
+ return 1;
+}
+#endif
+
+/*
+** Return the register of pOp->p2 after first preparing it to be
+** overwritten with an integer value.
+*/
+static SQLITE_NOINLINE Mem *out2PrereleaseWithClear(Mem *pOut){
+ sqlite3VdbeMemSetNull(pOut);
+ pOut->flags = MEM_Int;
+ return pOut;
+}
+static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){
+ Mem *pOut;
+ assert( pOp->p2>0 );
+ assert( pOp->p2<=(p->nMem+1 - p->nCursor) );
+ pOut = &p->aMem[pOp->p2];
+ memAboutToChange(p, pOut);
+ if( VdbeMemDynamic(pOut) ){ /*OPTIMIZATION-IF-FALSE*/
+ return out2PrereleaseWithClear(pOut);
+ }else{
+ pOut->flags = MEM_Int;
+ return pOut;
+ }
+}
+
+
+/*
+** Execute as much of a VDBE program as we can.
+** This is the core of sqlite3_step().
+*/
+SQLITE_PRIVATE int sqlite3VdbeExec(
+ Vdbe *p /* The VDBE */
+){
+ Op *aOp = p->aOp; /* Copy of p->aOp */
+ Op *pOp = aOp; /* Current operation */
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ Op *pOrigOp; /* Value of pOp at the top of the loop */
+#endif
+#ifdef SQLITE_DEBUG
+ int nExtraDelete = 0; /* Verifies FORDELETE and AUXDELETE flags */
+#endif
+ int rc = SQLITE_OK; /* Value to return */
+ sqlite3 *db = p->db; /* The database */
+ u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */
+ u8 encoding = ENC(db); /* The database encoding */
+ int iCompare = 0; /* Result of last comparison */
+ u64 nVmStep = 0; /* Number of virtual machine steps */
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ u64 nProgressLimit; /* Invoke xProgress() when nVmStep reaches this */
+#endif
+ Mem *aMem = p->aMem; /* Copy of p->aMem */
+ Mem *pIn1 = 0; /* 1st input operand */
+ Mem *pIn2 = 0; /* 2nd input operand */
+ Mem *pIn3 = 0; /* 3rd input operand */
+ Mem *pOut = 0; /* Output operand */
+#ifdef VDBE_PROFILE
+ u64 start; /* CPU clock count at start of opcode */
+#endif
+ /*** INSERT STACK UNION HERE ***/
+
+ assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */
+ sqlite3VdbeEnter(p);
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ if( db->xProgress ){
+ u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP];
+ assert( 0 < db->nProgressOps );
+ nProgressLimit = db->nProgressOps - (iPrior % db->nProgressOps);
+ }else{
+ nProgressLimit = LARGEST_UINT64;
+ }
+#endif
+ if( p->rc==SQLITE_NOMEM ){
+ /* This happens if a malloc() inside a call to sqlite3_column_text() or
+ ** sqlite3_column_text16() failed. */
+ goto no_mem;
+ }
+ assert( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_BUSY );
+ testcase( p->rc!=SQLITE_OK );
+ p->rc = SQLITE_OK;
+ assert( p->bIsReader || p->readOnly!=0 );
+ p->iCurrentTime = 0;
+ assert( p->explain==0 );
+ p->pResultSet = 0;
+ db->busyHandler.nBusy = 0;
+ if( AtomicLoad(&db->u1.isInterrupted) ) goto abort_due_to_interrupt;
+ sqlite3VdbeIOTraceSql(p);
+#ifdef SQLITE_DEBUG
+ sqlite3BeginBenignMalloc();
+ if( p->pc==0
+ && (p->db->flags & (SQLITE_VdbeListing|SQLITE_VdbeEQP|SQLITE_VdbeTrace))!=0
+ ){
+ int i;
+ int once = 1;
+ sqlite3VdbePrintSql(p);
+ if( p->db->flags & SQLITE_VdbeListing ){
+ printf("VDBE Program Listing:\n");
+ for(i=0; i<p->nOp; i++){
+ sqlite3VdbePrintOp(stdout, i, &aOp[i]);
+ }
+ }
+ if( p->db->flags & SQLITE_VdbeEQP ){
+ for(i=0; i<p->nOp; i++){
+ if( aOp[i].opcode==OP_Explain ){
+ if( once ) printf("VDBE Query Plan:\n");
+ printf("%s\n", aOp[i].p4.z);
+ once = 0;
+ }
+ }
+ }
+ if( p->db->flags & SQLITE_VdbeTrace ) printf("VDBE Trace:\n");
+ }
+ sqlite3EndBenignMalloc();
+#endif
+ for(pOp=&aOp[p->pc]; 1; pOp++){
+ /* Errors are detected by individual opcodes, with an immediate
+ ** jumps to abort_due_to_error. */
+ assert( rc==SQLITE_OK );
+
+ assert( pOp>=aOp && pOp<&aOp[p->nOp]);
+#ifdef VDBE_PROFILE
+ start = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
+#endif
+ nVmStep++;
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ if( p->anExec ) p->anExec[(int)(pOp-aOp)]++;
+#endif
+
+ /* Only allow tracing if SQLITE_DEBUG is defined.
+ */
+#ifdef SQLITE_DEBUG
+ if( db->flags & SQLITE_VdbeTrace ){
+ sqlite3VdbePrintOp(stdout, (int)(pOp - aOp), pOp);
+ test_trace_breakpoint((int)(pOp - aOp),pOp,p);
+ }
+#endif
+
+
+ /* Check to see if we need to simulate an interrupt. This only happens
+ ** if we have a special test build.
+ */
+#ifdef SQLITE_TEST
+ if( sqlite3_interrupt_count>0 ){
+ sqlite3_interrupt_count--;
+ if( sqlite3_interrupt_count==0 ){
+ sqlite3_interrupt(db);
+ }
+ }
+#endif
+
+ /* Sanity checking on other operands */
+#ifdef SQLITE_DEBUG
+ {
+ u8 opProperty = sqlite3OpcodeProperty[pOp->opcode];
+ if( (opProperty & OPFLG_IN1)!=0 ){
+ assert( pOp->p1>0 );
+ assert( pOp->p1<=(p->nMem+1 - p->nCursor) );
+ assert( memIsValid(&aMem[pOp->p1]) );
+ assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) );
+ REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]);
+ }
+ if( (opProperty & OPFLG_IN2)!=0 ){
+ assert( pOp->p2>0 );
+ assert( pOp->p2<=(p->nMem+1 - p->nCursor) );
+ assert( memIsValid(&aMem[pOp->p2]) );
+ assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) );
+ REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]);
+ }
+ if( (opProperty & OPFLG_IN3)!=0 ){
+ assert( pOp->p3>0 );
+ assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
+ assert( memIsValid(&aMem[pOp->p3]) );
+ assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) );
+ REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]);
+ }
+ if( (opProperty & OPFLG_OUT2)!=0 ){
+ assert( pOp->p2>0 );
+ assert( pOp->p2<=(p->nMem+1 - p->nCursor) );
+ memAboutToChange(p, &aMem[pOp->p2]);
+ }
+ if( (opProperty & OPFLG_OUT3)!=0 ){
+ assert( pOp->p3>0 );
+ assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
+ memAboutToChange(p, &aMem[pOp->p3]);
+ }
+ }
+#endif
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+ pOrigOp = pOp;
+#endif
+
+ switch( pOp->opcode ){
+
+/*****************************************************************************
+** What follows is a massive switch statement where each case implements a
+** separate instruction in the virtual machine. If we follow the usual
+** indentation conventions, each case should be indented by 6 spaces. But
+** that is a lot of wasted space on the left margin. So the code within
+** the switch statement will break with convention and be flush-left. Another
+** big comment (similar to this one) will mark the point in the code where
+** we transition back to normal indentation.
+**
+** The formatting of each case is important. The makefile for SQLite
+** generates two C files "opcodes.h" and "opcodes.c" by scanning this
+** file looking for lines that begin with "case OP_". The opcodes.h files
+** will be filled with #defines that give unique integer values to each
+** opcode and the opcodes.c file is filled with an array of strings where
+** each string is the symbolic name for the corresponding opcode. If the
+** case statement is followed by a comment of the form "/# same as ... #/"
+** that comment is used to determine the particular value of the opcode.
+**
+** Other keywords in the comment that follows each case are used to
+** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[].
+** Keywords include: in1, in2, in3, out2, out3. See
+** the mkopcodeh.awk script for additional information.
+**
+** Documentation about VDBE opcodes is generated by scanning this file
+** for lines of that contain "Opcode:". That line and all subsequent
+** comment lines are used in the generation of the opcode.html documentation
+** file.
+**
+** SUMMARY:
+**
+** Formatting is important to scripts that scan this file.
+** Do not deviate from the formatting style currently in use.
+**
+*****************************************************************************/
+
+/* Opcode: Goto * P2 * * *
+**
+** An unconditional jump to address P2.
+** The next instruction executed will be
+** the one at index P2 from the beginning of
+** the program.
+**
+** The P1 parameter is not actually used by this opcode. However, it
+** is sometimes set to 1 instead of 0 as a hint to the command-line shell
+** that this Goto is the bottom of a loop and that the lines from P2 down
+** to the current line should be indented for EXPLAIN output.
+*/
+case OP_Goto: { /* jump */
+
+#ifdef SQLITE_DEBUG
+ /* In debuggging mode, when the p5 flags is set on an OP_Goto, that
+ ** means we should really jump back to the preceeding OP_ReleaseReg
+ ** instruction. */
+ if( pOp->p5 ){
+ assert( pOp->p2 < (int)(pOp - aOp) );
+ assert( pOp->p2 > 1 );
+ pOp = &aOp[pOp->p2 - 2];
+ assert( pOp[1].opcode==OP_ReleaseReg );
+ goto check_for_interrupt;
+ }
+#endif
+
+jump_to_p2_and_check_for_interrupt:
+ pOp = &aOp[pOp->p2 - 1];
+
+ /* Opcodes that are used as the bottom of a loop (OP_Next, OP_Prev,
+ ** OP_VNext, or OP_SorterNext) all jump here upon
+ ** completion. Check to see if sqlite3_interrupt() has been called
+ ** or if the progress callback needs to be invoked.
+ **
+ ** This code uses unstructured "goto" statements and does not look clean.
+ ** But that is not due to sloppy coding habits. The code is written this
+ ** way for performance, to avoid having to run the interrupt and progress
+ ** checks on every opcode. This helps sqlite3_step() to run about 1.5%
+ ** faster according to "valgrind --tool=cachegrind" */
+check_for_interrupt:
+ if( AtomicLoad(&db->u1.isInterrupted) ) goto abort_due_to_interrupt;
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ /* Call the progress callback if it is configured and the required number
+ ** of VDBE ops have been executed (either since this invocation of
+ ** sqlite3VdbeExec() or since last time the progress callback was called).
+ ** If the progress callback returns non-zero, exit the virtual machine with
+ ** a return code SQLITE_ABORT.
+ */
+ while( nVmStep>=nProgressLimit && db->xProgress!=0 ){
+ assert( db->nProgressOps!=0 );
+ nProgressLimit += db->nProgressOps;
+ if( db->xProgress(db->pProgressArg) ){
+ nProgressLimit = LARGEST_UINT64;
+ rc = SQLITE_INTERRUPT;
+ goto abort_due_to_error;
+ }
+ }
+#endif
+
+ break;
+}
+
+/* Opcode: Gosub P1 P2 * * *
+**
+** Write the current address onto register P1
+** and then jump to address P2.
+*/
+case OP_Gosub: { /* jump */
+ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) );
+ pIn1 = &aMem[pOp->p1];
+ assert( VdbeMemDynamic(pIn1)==0 );
+ memAboutToChange(p, pIn1);
+ pIn1->flags = MEM_Int;
+ pIn1->u.i = (int)(pOp-aOp);
+ REGISTER_TRACE(pOp->p1, pIn1);
+
+ /* Most jump operations do a goto to this spot in order to update
+ ** the pOp pointer. */
+jump_to_p2:
+ pOp = &aOp[pOp->p2 - 1];
+ break;
+}
+
+/* Opcode: Return P1 * * * *
+**
+** Jump to the next instruction after the address in register P1. After
+** the jump, register P1 becomes undefined.
+*/
+case OP_Return: { /* in1 */
+ pIn1 = &aMem[pOp->p1];
+ assert( pIn1->flags==MEM_Int );
+ pOp = &aOp[pIn1->u.i];
+ pIn1->flags = MEM_Undefined;
+ break;
+}
+
+/* Opcode: InitCoroutine P1 P2 P3 * *
+**
+** Set up register P1 so that it will Yield to the coroutine
+** located at address P3.
+**
+** If P2!=0 then the coroutine implementation immediately follows
+** this opcode. So jump over the coroutine implementation to
+** address P2.
+**
+** See also: EndCoroutine
+*/
+case OP_InitCoroutine: { /* jump */
+ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) );
+ assert( pOp->p2>=0 && pOp->p2<p->nOp );
+ assert( pOp->p3>=0 && pOp->p3<p->nOp );
+ pOut = &aMem[pOp->p1];
+ assert( !VdbeMemDynamic(pOut) );
+ pOut->u.i = pOp->p3 - 1;
+ pOut->flags = MEM_Int;
+ if( pOp->p2 ) goto jump_to_p2;
+ break;
+}
+
+/* Opcode: EndCoroutine P1 * * * *
+**
+** The instruction at the address in register P1 is a Yield.
+** Jump to the P2 parameter of that Yield.
+** After the jump, register P1 becomes undefined.
+**
+** See also: InitCoroutine
+*/
+case OP_EndCoroutine: { /* in1 */
+ VdbeOp *pCaller;
+ pIn1 = &aMem[pOp->p1];
+ assert( pIn1->flags==MEM_Int );
+ assert( pIn1->u.i>=0 && pIn1->u.i<p->nOp );
+ pCaller = &aOp[pIn1->u.i];
+ assert( pCaller->opcode==OP_Yield );
+ assert( pCaller->p2>=0 && pCaller->p2<p->nOp );
+ pOp = &aOp[pCaller->p2 - 1];
+ pIn1->flags = MEM_Undefined;
+ break;
+}
+
+/* Opcode: Yield P1 P2 * * *
+**
+** Swap the program counter with the value in register P1. This
+** has the effect of yielding to a coroutine.
+**
+** If the coroutine that is launched by this instruction ends with
+** Yield or Return then continue to the next instruction. But if
+** the coroutine launched by this instruction ends with
+** EndCoroutine, then jump to P2 rather than continuing with the
+** next instruction.
+**
+** See also: InitCoroutine
+*/
+case OP_Yield: { /* in1, jump */
+ int pcDest;
+ pIn1 = &aMem[pOp->p1];
+ assert( VdbeMemDynamic(pIn1)==0 );
+ pIn1->flags = MEM_Int;
+ pcDest = (int)pIn1->u.i;
+ pIn1->u.i = (int)(pOp - aOp);
+ REGISTER_TRACE(pOp->p1, pIn1);
+ pOp = &aOp[pcDest];
+ break;
+}
+
+/* Opcode: HaltIfNull P1 P2 P3 P4 P5
+** Synopsis: if r[P3]=null halt
+**
+** Check the value in register P3. If it is NULL then Halt using
+** parameter P1, P2, and P4 as if this were a Halt instruction. If the
+** value in register P3 is not NULL, then this routine is a no-op.
+** The P5 parameter should be 1.
+*/
+case OP_HaltIfNull: { /* in3 */
+ pIn3 = &aMem[pOp->p3];
+#ifdef SQLITE_DEBUG
+ if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); }
+#endif
+ if( (pIn3->flags & MEM_Null)==0 ) break;
+ /* Fall through into OP_Halt */
+ /* no break */ deliberate_fall_through
+}
+
+/* Opcode: Halt P1 P2 * P4 P5
+**
+** Exit immediately. All open cursors, etc are closed
+** automatically.
+**
+** P1 is the result code returned by sqlite3_exec(), sqlite3_reset(),
+** or sqlite3_finalize(). For a normal halt, this should be SQLITE_OK (0).
+** For errors, it can be some other value. If P1!=0 then P2 will determine
+** whether or not to rollback the current transaction. Do not rollback
+** if P2==OE_Fail. Do the rollback if P2==OE_Rollback. If P2==OE_Abort,
+** then back out all changes that have occurred during this execution of the
+** VDBE, but do not rollback the transaction.
+**
+** If P4 is not null then it is an error message string.
+**
+** P5 is a value between 0 and 4, inclusive, that modifies the P4 string.
+**
+** 0: (no change)
+** 1: NOT NULL contraint failed: P4
+** 2: UNIQUE constraint failed: P4
+** 3: CHECK constraint failed: P4
+** 4: FOREIGN KEY constraint failed: P4
+**
+** If P5 is not zero and P4 is NULL, then everything after the ":" is
+** omitted.
+**
+** There is an implied "Halt 0 0 0" instruction inserted at the very end of
+** every program. So a jump past the last instruction of the program
+** is the same as executing Halt.
+*/
+case OP_Halt: {
+ VdbeFrame *pFrame;
+ int pcx;
+
+ pcx = (int)(pOp - aOp);
+#ifdef SQLITE_DEBUG
+ if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); }
+#endif
+ if( pOp->p1==SQLITE_OK && p->pFrame ){
+ /* Halt the sub-program. Return control to the parent frame. */
+ pFrame = p->pFrame;
+ p->pFrame = pFrame->pParent;
+ p->nFrame--;
+ sqlite3VdbeSetChanges(db, p->nChange);
+ pcx = sqlite3VdbeFrameRestore(pFrame);
+ if( pOp->p2==OE_Ignore ){
+ /* Instruction pcx is the OP_Program that invoked the sub-program
+ ** currently being halted. If the p2 instruction of this OP_Halt
+ ** instruction is set to OE_Ignore, then the sub-program is throwing
+ ** an IGNORE exception. In this case jump to the address specified
+ ** as the p2 of the calling OP_Program. */
+ pcx = p->aOp[pcx].p2-1;
+ }
+ aOp = p->aOp;
+ aMem = p->aMem;
+ pOp = &aOp[pcx];
+ break;
+ }
+ p->rc = pOp->p1;
+ p->errorAction = (u8)pOp->p2;
+ p->pc = pcx;
+ assert( pOp->p5<=4 );
+ if( p->rc ){
+ if( pOp->p5 ){
+ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK",
+ "FOREIGN KEY" };
+ testcase( pOp->p5==1 );
+ testcase( pOp->p5==2 );
+ testcase( pOp->p5==3 );
+ testcase( pOp->p5==4 );
+ sqlite3VdbeError(p, "%s constraint failed", azType[pOp->p5-1]);
+ if( pOp->p4.z ){
+ p->zErrMsg = sqlite3MPrintf(db, "%z: %s", p->zErrMsg, pOp->p4.z);
+ }
+ }else{
+ sqlite3VdbeError(p, "%s", pOp->p4.z);
+ }
+ sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg);
+ }
+ rc = sqlite3VdbeHalt(p);
+ assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR );
+ if( rc==SQLITE_BUSY ){
+ p->rc = SQLITE_BUSY;
+ }else{
+ assert( rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT );
+ assert( rc==SQLITE_OK || db->nDeferredCons>0 || db->nDeferredImmCons>0 );
+ rc = p->rc ? SQLITE_ERROR : SQLITE_DONE;
+ }
+ goto vdbe_return;
+}
+
+/* Opcode: Integer P1 P2 * * *
+** Synopsis: r[P2]=P1
+**
+** The 32-bit integer value P1 is written into register P2.
+*/
+case OP_Integer: { /* out2 */
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = pOp->p1;
+ break;
+}
+
+/* Opcode: Int64 * P2 * P4 *
+** Synopsis: r[P2]=P4
+**
+** P4 is a pointer to a 64-bit integer value.
+** Write that value into register P2.
+*/
+case OP_Int64: { /* out2 */
+ pOut = out2Prerelease(p, pOp);
+ assert( pOp->p4.pI64!=0 );
+ pOut->u.i = *pOp->p4.pI64;
+ break;
+}
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/* Opcode: Real * P2 * P4 *
+** Synopsis: r[P2]=P4
+**
+** P4 is a pointer to a 64-bit floating point value.
+** Write that value into register P2.
+*/
+case OP_Real: { /* same as TK_FLOAT, out2 */
+ pOut = out2Prerelease(p, pOp);
+ pOut->flags = MEM_Real;
+ assert( !sqlite3IsNaN(*pOp->p4.pReal) );
+ pOut->u.r = *pOp->p4.pReal;
+ break;
+}
+#endif
+
+/* Opcode: String8 * P2 * P4 *
+** Synopsis: r[P2]='P4'
+**
+** P4 points to a nul terminated UTF-8 string. This opcode is transformed
+** into a String opcode before it is executed for the first time. During
+** this transformation, the length of string P4 is computed and stored
+** as the P1 parameter.
+*/
+case OP_String8: { /* same as TK_STRING, out2 */
+ assert( pOp->p4.z!=0 );
+ pOut = out2Prerelease(p, pOp);
+ pOp->p1 = sqlite3Strlen30(pOp->p4.z);
+
+#ifndef SQLITE_OMIT_UTF16
+ if( encoding!=SQLITE_UTF8 ){
+ rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC);
+ assert( rc==SQLITE_OK || rc==SQLITE_TOOBIG );
+ if( rc ) goto too_big;
+ if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem;
+ assert( pOut->szMalloc>0 && pOut->zMalloc==pOut->z );
+ assert( VdbeMemDynamic(pOut)==0 );
+ pOut->szMalloc = 0;
+ pOut->flags |= MEM_Static;
+ if( pOp->p4type==P4_DYNAMIC ){
+ sqlite3DbFree(db, pOp->p4.z);
+ }
+ pOp->p4type = P4_DYNAMIC;
+ pOp->p4.z = pOut->z;
+ pOp->p1 = pOut->n;
+ }
+#endif
+ if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ goto too_big;
+ }
+ pOp->opcode = OP_String;
+ assert( rc==SQLITE_OK );
+ /* Fall through to the next case, OP_String */
+ /* no break */ deliberate_fall_through
+}
+
+/* Opcode: String P1 P2 P3 P4 P5
+** Synopsis: r[P2]='P4' (len=P1)
+**
+** The string value P4 of length P1 (bytes) is stored in register P2.
+**
+** If P3 is not zero and the content of register P3 is equal to P5, then
+** the datatype of the register P2 is converted to BLOB. The content is
+** the same sequence of bytes, it is merely interpreted as a BLOB instead
+** of a string, as if it had been CAST. In other words:
+**
+** if( P3!=0 and reg[P3]==P5 ) reg[P2] := CAST(reg[P2] as BLOB)
+*/
+case OP_String: { /* out2 */
+ assert( pOp->p4.z!=0 );
+ pOut = out2Prerelease(p, pOp);
+ pOut->flags = MEM_Str|MEM_Static|MEM_Term;
+ pOut->z = pOp->p4.z;
+ pOut->n = pOp->p1;
+ pOut->enc = encoding;
+ UPDATE_MAX_BLOBSIZE(pOut);
+#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ if( pOp->p3>0 ){
+ assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pIn3 = &aMem[pOp->p3];
+ assert( pIn3->flags & MEM_Int );
+ if( pIn3->u.i==pOp->p5 ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term;
+ }
+#endif
+ break;
+}
+
+/* Opcode: Null P1 P2 P3 * *
+** Synopsis: r[P2..P3]=NULL
+**
+** Write a NULL into registers P2. If P3 greater than P2, then also write
+** NULL into register P3 and every register in between P2 and P3. If P3
+** is less than P2 (typically P3 is zero) then only register P2 is
+** set to NULL.
+**
+** If the P1 value is non-zero, then also set the MEM_Cleared flag so that
+** NULL values will not compare equal even if SQLITE_NULLEQ is set on
+** OP_Ne or OP_Eq.
+*/
+case OP_Null: { /* out2 */
+ int cnt;
+ u16 nullFlag;
+ pOut = out2Prerelease(p, pOp);
+ cnt = pOp->p3-pOp->p2;
+ assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null;
+ pOut->n = 0;
+#ifdef SQLITE_DEBUG
+ pOut->uTemp = 0;
+#endif
+ while( cnt>0 ){
+ pOut++;
+ memAboutToChange(p, pOut);
+ sqlite3VdbeMemSetNull(pOut);
+ pOut->flags = nullFlag;
+ pOut->n = 0;
+ cnt--;
+ }
+ break;
+}
+
+/* Opcode: SoftNull P1 * * * *
+** Synopsis: r[P1]=NULL
+**
+** Set register P1 to have the value NULL as seen by the OP_MakeRecord
+** instruction, but do not free any string or blob memory associated with
+** the register, so that if the value was a string or blob that was
+** previously copied using OP_SCopy, the copies will continue to be valid.
+*/
+case OP_SoftNull: {
+ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) );
+ pOut = &aMem[pOp->p1];
+ pOut->flags = (pOut->flags&~(MEM_Undefined|MEM_AffMask))|MEM_Null;
+ break;
+}
+
+/* Opcode: Blob P1 P2 * P4 *
+** Synopsis: r[P2]=P4 (len=P1)
+**
+** P4 points to a blob of data P1 bytes long. Store this
+** blob in register P2.
+*/
+case OP_Blob: { /* out2 */
+ assert( pOp->p1 <= SQLITE_MAX_LENGTH );
+ pOut = out2Prerelease(p, pOp);
+ sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0);
+ pOut->enc = encoding;
+ UPDATE_MAX_BLOBSIZE(pOut);
+ break;
+}
+
+/* Opcode: Variable P1 P2 * P4 *
+** Synopsis: r[P2]=parameter(P1,P4)
+**
+** Transfer the values of bound parameter P1 into register P2
+**
+** If the parameter is named, then its name appears in P4.
+** The P4 value is used by sqlite3_bind_parameter_name().
+*/
+case OP_Variable: { /* out2 */
+ Mem *pVar; /* Value being transferred */
+
+ assert( pOp->p1>0 && pOp->p1<=p->nVar );
+ assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) );
+ pVar = &p->aVar[pOp->p1 - 1];
+ if( sqlite3VdbeMemTooBig(pVar) ){
+ goto too_big;
+ }
+ pOut = &aMem[pOp->p2];
+ if( VdbeMemDynamic(pOut) ) sqlite3VdbeMemSetNull(pOut);
+ memcpy(pOut, pVar, MEMCELLSIZE);
+ pOut->flags &= ~(MEM_Dyn|MEM_Ephem);
+ pOut->flags |= MEM_Static|MEM_FromBind;
+ UPDATE_MAX_BLOBSIZE(pOut);
+ break;
+}
+
+/* Opcode: Move P1 P2 P3 * *
+** Synopsis: r[P2@P3]=r[P1@P3]
+**
+** Move the P3 values in register P1..P1+P3-1 over into
+** registers P2..P2+P3-1. Registers P1..P1+P3-1 are
+** left holding a NULL. It is an error for register ranges
+** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error
+** for P3 to be less than 1.
+*/
+case OP_Move: {
+ int n; /* Number of registers left to copy */
+ int p1; /* Register to copy from */
+ int p2; /* Register to copy to */
+
+ n = pOp->p3;
+ p1 = pOp->p1;
+ p2 = pOp->p2;
+ assert( n>0 && p1>0 && p2>0 );
+ assert( p1+n<=p2 || p2+n<=p1 );
+
+ pIn1 = &aMem[p1];
+ pOut = &aMem[p2];
+ do{
+ assert( pOut<=&aMem[(p->nMem+1 - p->nCursor)] );
+ assert( pIn1<=&aMem[(p->nMem+1 - p->nCursor)] );
+ assert( memIsValid(pIn1) );
+ memAboutToChange(p, pOut);
+ sqlite3VdbeMemMove(pOut, pIn1);
+#ifdef SQLITE_DEBUG
+ pIn1->pScopyFrom = 0;
+ { int i;
+ for(i=1; i<p->nMem; i++){
+ if( aMem[i].pScopyFrom==pIn1 ){
+ aMem[i].pScopyFrom = pOut;
+ }
+ }
+ }
+#endif
+ Deephemeralize(pOut);
+ REGISTER_TRACE(p2++, pOut);
+ pIn1++;
+ pOut++;
+ }while( --n );
+ break;
+}
+
+/* Opcode: Copy P1 P2 P3 * *
+** Synopsis: r[P2@P3+1]=r[P1@P3+1]
+**
+** Make a copy of registers P1..P1+P3 into registers P2..P2+P3.
+**
+** This instruction makes a deep copy of the value. A duplicate
+** is made of any string or blob constant. See also OP_SCopy.
+*/
+case OP_Copy: {
+ int n;
+
+ n = pOp->p3;
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ assert( pOut!=pIn1 );
+ while( 1 ){
+ memAboutToChange(p, pOut);
+ sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem);
+ Deephemeralize(pOut);
+#ifdef SQLITE_DEBUG
+ pOut->pScopyFrom = 0;
+#endif
+ REGISTER_TRACE(pOp->p2+pOp->p3-n, pOut);
+ if( (n--)==0 ) break;
+ pOut++;
+ pIn1++;
+ }
+ break;
+}
+
+/* Opcode: SCopy P1 P2 * * *
+** Synopsis: r[P2]=r[P1]
+**
+** Make a shallow copy of register P1 into register P2.
+**
+** This instruction makes a shallow copy of the value. If the value
+** is a string or blob, then the copy is only a pointer to the
+** original and hence if the original changes so will the copy.
+** Worse, if the original is deallocated, the copy becomes invalid.
+** Thus the program must guarantee that the original will not change
+** during the lifetime of the copy. Use OP_Copy to make a complete
+** copy.
+*/
+case OP_SCopy: { /* out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ assert( pOut!=pIn1 );
+ sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem);
+#ifdef SQLITE_DEBUG
+ pOut->pScopyFrom = pIn1;
+ pOut->mScopyFlags = pIn1->flags;
+#endif
+ break;
+}
+
+/* Opcode: IntCopy P1 P2 * * *
+** Synopsis: r[P2]=r[P1]
+**
+** Transfer the integer value held in register P1 into register P2.
+**
+** This is an optimized version of SCopy that works only for integer
+** values.
+*/
+case OP_IntCopy: { /* out2 */
+ pIn1 = &aMem[pOp->p1];
+ assert( (pIn1->flags & MEM_Int)!=0 );
+ pOut = &aMem[pOp->p2];
+ sqlite3VdbeMemSetInt64(pOut, pIn1->u.i);
+ break;
+}
+
+/* Opcode: ResultRow P1 P2 * * *
+** Synopsis: output=r[P1@P2]
+**
+** The registers P1 through P1+P2-1 contain a single row of
+** results. This opcode causes the sqlite3_step() call to terminate
+** with an SQLITE_ROW return code and it sets up the sqlite3_stmt
+** structure to provide access to the r(P1)..r(P1+P2-1) values as
+** the result row.
+*/
+case OP_ResultRow: {
+ Mem *pMem;
+ int i;
+ assert( p->nResColumn==pOp->p2 );
+ assert( pOp->p1>0 );
+ assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 );
+
+ /* If this statement has violated immediate foreign key constraints, do
+ ** not return the number of rows modified. And do not RELEASE the statement
+ ** transaction. It needs to be rolled back. */
+ if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){
+ assert( db->flags&SQLITE_CountRows );
+ assert( p->usesStmtJournal );
+ goto abort_due_to_error;
+ }
+
+ /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then
+ ** DML statements invoke this opcode to return the number of rows
+ ** modified to the user. This is the only way that a VM that
+ ** opens a statement transaction may invoke this opcode.
+ **
+ ** In case this is such a statement, close any statement transaction
+ ** opened by this VM before returning control to the user. This is to
+ ** ensure that statement-transactions are always nested, not overlapping.
+ ** If the open statement-transaction is not closed here, then the user
+ ** may step another VM that opens its own statement transaction. This
+ ** may lead to overlapping statement transactions.
+ **
+ ** The statement transaction is never a top-level transaction. Hence
+ ** the RELEASE call below can never fail.
+ */
+ assert( p->iStatement==0 || db->flags&SQLITE_CountRows );
+ rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE);
+ assert( rc==SQLITE_OK );
+
+ /* Invalidate all ephemeral cursor row caches */
+ p->cacheCtr = (p->cacheCtr + 2)|1;
+
+ /* Make sure the results of the current row are \000 terminated
+ ** and have an assigned type. The results are de-ephemeralized as
+ ** a side effect.
+ */
+ pMem = p->pResultSet = &aMem[pOp->p1];
+ for(i=0; i<pOp->p2; i++){
+ assert( memIsValid(&pMem[i]) );
+ Deephemeralize(&pMem[i]);
+ assert( (pMem[i].flags & MEM_Ephem)==0
+ || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 );
+ sqlite3VdbeMemNulTerminate(&pMem[i]);
+ REGISTER_TRACE(pOp->p1+i, &pMem[i]);
+#ifdef SQLITE_DEBUG
+ /* The registers in the result will not be used again when the
+ ** prepared statement restarts. This is because sqlite3_column()
+ ** APIs might have caused type conversions of made other changes to
+ ** the register values. Therefore, we can go ahead and break any
+ ** OP_SCopy dependencies. */
+ pMem[i].pScopyFrom = 0;
+#endif
+ }
+ if( db->mallocFailed ) goto no_mem;
+
+ if( db->mTrace & SQLITE_TRACE_ROW ){
+ db->trace.xV2(SQLITE_TRACE_ROW, db->pTraceArg, p, 0);
+ }
+
+
+ /* Return SQLITE_ROW
+ */
+ p->pc = (int)(pOp - aOp) + 1;
+ rc = SQLITE_ROW;
+ goto vdbe_return;
+}
+
+/* Opcode: Concat P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]+r[P1]
+**
+** Add the text in register P1 onto the end of the text in
+** register P2 and store the result in register P3.
+** If either the P1 or P2 text are NULL then store NULL in P3.
+**
+** P3 = P2 || P1
+**
+** It is illegal for P1 and P3 to be the same register. Sometimes,
+** if P3 is the same register as P2, the implementation is able
+** to avoid a memcpy().
+*/
+case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
+ i64 nByte; /* Total size of the output string or blob */
+ u16 flags1; /* Initial flags for P1 */
+ u16 flags2; /* Initial flags for P2 */
+
+ pIn1 = &aMem[pOp->p1];
+ pIn2 = &aMem[pOp->p2];
+ pOut = &aMem[pOp->p3];
+ testcase( pOut==pIn2 );
+ assert( pIn1!=pOut );
+ flags1 = pIn1->flags;
+ testcase( flags1 & MEM_Null );
+ testcase( pIn2->flags & MEM_Null );
+ if( (flags1 | pIn2->flags) & MEM_Null ){
+ sqlite3VdbeMemSetNull(pOut);
+ break;
+ }
+ if( (flags1 & (MEM_Str|MEM_Blob))==0 ){
+ if( sqlite3VdbeMemStringify(pIn1,encoding,0) ) goto no_mem;
+ flags1 = pIn1->flags & ~MEM_Str;
+ }else if( (flags1 & MEM_Zero)!=0 ){
+ if( sqlite3VdbeMemExpandBlob(pIn1) ) goto no_mem;
+ flags1 = pIn1->flags & ~MEM_Str;
+ }
+ flags2 = pIn2->flags;
+ if( (flags2 & (MEM_Str|MEM_Blob))==0 ){
+ if( sqlite3VdbeMemStringify(pIn2,encoding,0) ) goto no_mem;
+ flags2 = pIn2->flags & ~MEM_Str;
+ }else if( (flags2 & MEM_Zero)!=0 ){
+ if( sqlite3VdbeMemExpandBlob(pIn2) ) goto no_mem;
+ flags2 = pIn2->flags & ~MEM_Str;
+ }
+ nByte = pIn1->n + pIn2->n;
+ if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ goto too_big;
+ }
+ if( sqlite3VdbeMemGrow(pOut, (int)nByte+3, pOut==pIn2) ){
+ goto no_mem;
+ }
+ MemSetTypeFlag(pOut, MEM_Str);
+ if( pOut!=pIn2 ){
+ memcpy(pOut->z, pIn2->z, pIn2->n);
+ assert( (pIn2->flags & MEM_Dyn) == (flags2 & MEM_Dyn) );
+ pIn2->flags = flags2;
+ }
+ memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n);
+ assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) );
+ pIn1->flags = flags1;
+ pOut->z[nByte]=0;
+ pOut->z[nByte+1] = 0;
+ pOut->z[nByte+2] = 0;
+ pOut->flags |= MEM_Term;
+ pOut->n = (int)nByte;
+ pOut->enc = encoding;
+ UPDATE_MAX_BLOBSIZE(pOut);
+ break;
+}
+
+/* Opcode: Add P1 P2 P3 * *
+** Synopsis: r[P3]=r[P1]+r[P2]
+**
+** Add the value in register P1 to the value in register P2
+** and store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: Multiply P1 P2 P3 * *
+** Synopsis: r[P3]=r[P1]*r[P2]
+**
+**
+** Multiply the value in register P1 by the value in register P2
+** and store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: Subtract P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]-r[P1]
+**
+** Subtract the value in register P1 from the value in register P2
+** and store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: Divide P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]/r[P1]
+**
+** Divide the value in register P1 by the value in register P2
+** and store the result in register P3 (P3=P2/P1). If the value in
+** register P1 is zero, then the result is NULL. If either input is
+** NULL, the result is NULL.
+*/
+/* Opcode: Remainder P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]%r[P1]
+**
+** Compute the remainder after integer register P2 is divided by
+** register P1 and store the result in register P3.
+** If the value in register P1 is zero the result is NULL.
+** If either operand is NULL, the result is NULL.
+*/
+case OP_Add: /* same as TK_PLUS, in1, in2, out3 */
+case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */
+case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */
+case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */
+case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */
+ u16 flags; /* Combined MEM_* flags from both inputs */
+ u16 type1; /* Numeric type of left operand */
+ u16 type2; /* Numeric type of right operand */
+ i64 iA; /* Integer value of left operand */
+ i64 iB; /* Integer value of right operand */
+ double rA; /* Real value of left operand */
+ double rB; /* Real value of right operand */
+
+ pIn1 = &aMem[pOp->p1];
+ type1 = numericType(pIn1);
+ pIn2 = &aMem[pOp->p2];
+ type2 = numericType(pIn2);
+ pOut = &aMem[pOp->p3];
+ flags = pIn1->flags | pIn2->flags;
+ if( (type1 & type2 & MEM_Int)!=0 ){
+ iA = pIn1->u.i;
+ iB = pIn2->u.i;
+ switch( pOp->opcode ){
+ case OP_Add: if( sqlite3AddInt64(&iB,iA) ) goto fp_math; break;
+ case OP_Subtract: if( sqlite3SubInt64(&iB,iA) ) goto fp_math; break;
+ case OP_Multiply: if( sqlite3MulInt64(&iB,iA) ) goto fp_math; break;
+ case OP_Divide: {
+ if( iA==0 ) goto arithmetic_result_is_null;
+ if( iA==-1 && iB==SMALLEST_INT64 ) goto fp_math;
+ iB /= iA;
+ break;
+ }
+ default: {
+ if( iA==0 ) goto arithmetic_result_is_null;
+ if( iA==-1 ) iA = 1;
+ iB %= iA;
+ break;
+ }
+ }
+ pOut->u.i = iB;
+ MemSetTypeFlag(pOut, MEM_Int);
+ }else if( (flags & MEM_Null)!=0 ){
+ goto arithmetic_result_is_null;
+ }else{
+fp_math:
+ rA = sqlite3VdbeRealValue(pIn1);
+ rB = sqlite3VdbeRealValue(pIn2);
+ switch( pOp->opcode ){
+ case OP_Add: rB += rA; break;
+ case OP_Subtract: rB -= rA; break;
+ case OP_Multiply: rB *= rA; break;
+ case OP_Divide: {
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ if( rA==(double)0 ) goto arithmetic_result_is_null;
+ rB /= rA;
+ break;
+ }
+ default: {
+ iA = sqlite3VdbeIntValue(pIn1);
+ iB = sqlite3VdbeIntValue(pIn2);
+ if( iA==0 ) goto arithmetic_result_is_null;
+ if( iA==-1 ) iA = 1;
+ rB = (double)(iB % iA);
+ break;
+ }
+ }
+#ifdef SQLITE_OMIT_FLOATING_POINT
+ pOut->u.i = rB;
+ MemSetTypeFlag(pOut, MEM_Int);
+#else
+ if( sqlite3IsNaN(rB) ){
+ goto arithmetic_result_is_null;
+ }
+ pOut->u.r = rB;
+ MemSetTypeFlag(pOut, MEM_Real);
+#endif
+ }
+ break;
+
+arithmetic_result_is_null:
+ sqlite3VdbeMemSetNull(pOut);
+ break;
+}
+
+/* Opcode: CollSeq P1 * * P4
+**
+** P4 is a pointer to a CollSeq object. If the next call to a user function
+** or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will
+** be returned. This is used by the built-in min(), max() and nullif()
+** functions.
+**
+** If P1 is not zero, then it is a register that a subsequent min() or
+** max() aggregate will set to 1 if the current row is not the minimum or
+** maximum. The P1 register is initialized to 0 by this instruction.
+**
+** The interface used by the implementation of the aforementioned functions
+** to retrieve the collation sequence set by this opcode is not available
+** publicly. Only built-in functions have access to this feature.
+*/
+case OP_CollSeq: {
+ assert( pOp->p4type==P4_COLLSEQ );
+ if( pOp->p1 ){
+ sqlite3VdbeMemSetInt64(&aMem[pOp->p1], 0);
+ }
+ break;
+}
+
+/* Opcode: BitAnd P1 P2 P3 * *
+** Synopsis: r[P3]=r[P1]&r[P2]
+**
+** Take the bit-wise AND of the values in register P1 and P2 and
+** store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: BitOr P1 P2 P3 * *
+** Synopsis: r[P3]=r[P1]|r[P2]
+**
+** Take the bit-wise OR of the values in register P1 and P2 and
+** store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: ShiftLeft P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]<<r[P1]
+**
+** Shift the integer value in register P2 to the left by the
+** number of bits specified by the integer in register P1.
+** Store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+/* Opcode: ShiftRight P1 P2 P3 * *
+** Synopsis: r[P3]=r[P2]>>r[P1]
+**
+** Shift the integer value in register P2 to the right by the
+** number of bits specified by the integer in register P1.
+** Store the result in register P3.
+** If either input is NULL, the result is NULL.
+*/
+case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */
+case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */
+case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */
+case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */
+ i64 iA;
+ u64 uA;
+ i64 iB;
+ u8 op;
+
+ pIn1 = &aMem[pOp->p1];
+ pIn2 = &aMem[pOp->p2];
+ pOut = &aMem[pOp->p3];
+ if( (pIn1->flags | pIn2->flags) & MEM_Null ){
+ sqlite3VdbeMemSetNull(pOut);
+ break;
+ }
+ iA = sqlite3VdbeIntValue(pIn2);
+ iB = sqlite3VdbeIntValue(pIn1);
+ op = pOp->opcode;
+ if( op==OP_BitAnd ){
+ iA &= iB;
+ }else if( op==OP_BitOr ){
+ iA |= iB;
+ }else if( iB!=0 ){
+ assert( op==OP_ShiftRight || op==OP_ShiftLeft );
+
+ /* If shifting by a negative amount, shift in the other direction */
+ if( iB<0 ){
+ assert( OP_ShiftRight==OP_ShiftLeft+1 );
+ op = 2*OP_ShiftLeft + 1 - op;
+ iB = iB>(-64) ? -iB : 64;
+ }
+
+ if( iB>=64 ){
+ iA = (iA>=0 || op==OP_ShiftLeft) ? 0 : -1;
+ }else{
+ memcpy(&uA, &iA, sizeof(uA));
+ if( op==OP_ShiftLeft ){
+ uA <<= iB;
+ }else{
+ uA >>= iB;
+ /* Sign-extend on a right shift of a negative number */
+ if( iA<0 ) uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-iB);
+ }
+ memcpy(&iA, &uA, sizeof(iA));
+ }
+ }
+ pOut->u.i = iA;
+ MemSetTypeFlag(pOut, MEM_Int);
+ break;
+}
+
+/* Opcode: AddImm P1 P2 * * *
+** Synopsis: r[P1]=r[P1]+P2
+**
+** Add the constant P2 to the value in register P1.
+** The result is always an integer.
+**
+** To force any register to be an integer, just add 0.
+*/
+case OP_AddImm: { /* in1 */
+ pIn1 = &aMem[pOp->p1];
+ memAboutToChange(p, pIn1);
+ sqlite3VdbeMemIntegerify(pIn1);
+ pIn1->u.i += pOp->p2;
+ break;
+}
+
+/* Opcode: MustBeInt P1 P2 * * *
+**
+** Force the value in register P1 to be an integer. If the value
+** in P1 is not an integer and cannot be converted into an integer
+** without data loss, then jump immediately to P2, or if P2==0
+** raise an SQLITE_MISMATCH exception.
+*/
+case OP_MustBeInt: { /* jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ if( (pIn1->flags & MEM_Int)==0 ){
+ applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding);
+ if( (pIn1->flags & MEM_Int)==0 ){
+ VdbeBranchTaken(1, 2);
+ if( pOp->p2==0 ){
+ rc = SQLITE_MISMATCH;
+ goto abort_due_to_error;
+ }else{
+ goto jump_to_p2;
+ }
+ }
+ }
+ VdbeBranchTaken(0, 2);
+ MemSetTypeFlag(pIn1, MEM_Int);
+ break;
+}
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/* Opcode: RealAffinity P1 * * * *
+**
+** If register P1 holds an integer convert it to a real value.
+**
+** This opcode is used when extracting information from a column that
+** has REAL affinity. Such column values may still be stored as
+** integers, for space efficiency, but after extraction we want them
+** to have only a real value.
+*/
+case OP_RealAffinity: { /* in1 */
+ pIn1 = &aMem[pOp->p1];
+ if( pIn1->flags & (MEM_Int|MEM_IntReal) ){
+ testcase( pIn1->flags & MEM_Int );
+ testcase( pIn1->flags & MEM_IntReal );
+ sqlite3VdbeMemRealify(pIn1);
+ REGISTER_TRACE(pOp->p1, pIn1);
+ }
+ break;
+}
+#endif
+
+#ifndef SQLITE_OMIT_CAST
+/* Opcode: Cast P1 P2 * * *
+** Synopsis: affinity(r[P1])
+**
+** Force the value in register P1 to be the type defined by P2.
+**
+** <ul>
+** <li> P2=='A' &rarr; BLOB
+** <li> P2=='B' &rarr; TEXT
+** <li> P2=='C' &rarr; NUMERIC
+** <li> P2=='D' &rarr; INTEGER
+** <li> P2=='E' &rarr; REAL
+** </ul>
+**
+** A NULL value is not changed by this routine. It remains NULL.
+*/
+case OP_Cast: { /* in1 */
+ assert( pOp->p2>=SQLITE_AFF_BLOB && pOp->p2<=SQLITE_AFF_REAL );
+ testcase( pOp->p2==SQLITE_AFF_TEXT );
+ testcase( pOp->p2==SQLITE_AFF_BLOB );
+ testcase( pOp->p2==SQLITE_AFF_NUMERIC );
+ testcase( pOp->p2==SQLITE_AFF_INTEGER );
+ testcase( pOp->p2==SQLITE_AFF_REAL );
+ pIn1 = &aMem[pOp->p1];
+ memAboutToChange(p, pIn1);
+ rc = ExpandBlob(pIn1);
+ if( rc ) goto abort_due_to_error;
+ rc = sqlite3VdbeMemCast(pIn1, pOp->p2, encoding);
+ if( rc ) goto abort_due_to_error;
+ UPDATE_MAX_BLOBSIZE(pIn1);
+ REGISTER_TRACE(pOp->p1, pIn1);
+ break;
+}
+#endif /* SQLITE_OMIT_CAST */
+
+/* Opcode: Eq P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]==r[P1]
+**
+** Compare the values in register P1 and P3. If reg(P3)==reg(P1) then
+** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5, then
+** store the result of comparison in register P2.
+**
+** The SQLITE_AFF_MASK portion of P5 must be an affinity character -
+** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made
+** to coerce both inputs according to this affinity before the
+** comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric
+** affinity is used. Note that the affinity conversions are stored
+** back into the input registers P1 and P3. So this opcode can cause
+** persistent changes to registers P1 and P3.
+**
+** Once any conversions have taken place, and neither value is NULL,
+** the values are compared. If both values are blobs then memcmp() is
+** used to determine the results of the comparison. If both values
+** are text, then the appropriate collating function specified in
+** P4 is used to do the comparison. If P4 is not specified then
+** memcmp() is used to compare text string. If both values are
+** numeric, then a numeric comparison is used. If the two values
+** are of different types, then numbers are considered less than
+** strings and strings are considered less than blobs.
+**
+** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either
+** true or false and is never NULL. If both operands are NULL then the result
+** of comparison is true. If either operand is NULL then the result is false.
+** If neither operand is NULL the result is the same as it would be if
+** the SQLITE_NULLEQ flag were omitted from P5.
+**
+** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the
+** content of r[P2] is only changed if the new value is NULL or 0 (false).
+** In other words, a prior r[P2] value will not be overwritten by 1 (true).
+*/
+/* Opcode: Ne P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]!=r[P1]
+**
+** This works just like the Eq opcode except that the jump is taken if
+** the operands in registers P1 and P3 are not equal. See the Eq opcode for
+** additional information.
+**
+** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the
+** content of r[P2] is only changed if the new value is NULL or 1 (true).
+** In other words, a prior r[P2] value will not be overwritten by 0 (false).
+*/
+/* Opcode: Lt P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]<r[P1]
+**
+** Compare the values in register P1 and P3. If reg(P3)<reg(P1) then
+** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5 store
+** the result of comparison (0 or 1 or NULL) into register P2.
+**
+** If the SQLITE_JUMPIFNULL bit of P5 is set and either reg(P1) or
+** reg(P3) is NULL then the take the jump. If the SQLITE_JUMPIFNULL
+** bit is clear then fall through if either operand is NULL.
+**
+** The SQLITE_AFF_MASK portion of P5 must be an affinity character -
+** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made
+** to coerce both inputs according to this affinity before the
+** comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric
+** affinity is used. Note that the affinity conversions are stored
+** back into the input registers P1 and P3. So this opcode can cause
+** persistent changes to registers P1 and P3.
+**
+** Once any conversions have taken place, and neither value is NULL,
+** the values are compared. If both values are blobs then memcmp() is
+** used to determine the results of the comparison. If both values
+** are text, then the appropriate collating function specified in
+** P4 is used to do the comparison. If P4 is not specified then
+** memcmp() is used to compare text string. If both values are
+** numeric, then a numeric comparison is used. If the two values
+** are of different types, then numbers are considered less than
+** strings and strings are considered less than blobs.
+*/
+/* Opcode: Le P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]<=r[P1]
+**
+** This works just like the Lt opcode except that the jump is taken if
+** the content of register P3 is less than or equal to the content of
+** register P1. See the Lt opcode for additional information.
+*/
+/* Opcode: Gt P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]>r[P1]
+**
+** This works just like the Lt opcode except that the jump is taken if
+** the content of register P3 is greater than the content of
+** register P1. See the Lt opcode for additional information.
+*/
+/* Opcode: Ge P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]>=r[P1]
+**
+** This works just like the Lt opcode except that the jump is taken if
+** the content of register P3 is greater than or equal to the content of
+** register P1. See the Lt opcode for additional information.
+*/
+case OP_Eq: /* same as TK_EQ, jump, in1, in3 */
+case OP_Ne: /* same as TK_NE, jump, in1, in3 */
+case OP_Lt: /* same as TK_LT, jump, in1, in3 */
+case OP_Le: /* same as TK_LE, jump, in1, in3 */
+case OP_Gt: /* same as TK_GT, jump, in1, in3 */
+case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
+ int res, res2; /* Result of the comparison of pIn1 against pIn3 */
+ char affinity; /* Affinity to use for comparison */
+ u16 flags1; /* Copy of initial value of pIn1->flags */
+ u16 flags3; /* Copy of initial value of pIn3->flags */
+
+ pIn1 = &aMem[pOp->p1];
+ pIn3 = &aMem[pOp->p3];
+ flags1 = pIn1->flags;
+ flags3 = pIn3->flags;
+ if( (flags1 | flags3)&MEM_Null ){
+ /* One or both operands are NULL */
+ if( pOp->p5 & SQLITE_NULLEQ ){
+ /* If SQLITE_NULLEQ is set (which will only happen if the operator is
+ ** OP_Eq or OP_Ne) then take the jump or not depending on whether
+ ** or not both operands are null.
+ */
+ assert( (flags1 & MEM_Cleared)==0 );
+ assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 || CORRUPT_DB );
+ testcase( (pOp->p5 & SQLITE_JUMPIFNULL)!=0 );
+ if( (flags1&flags3&MEM_Null)!=0
+ && (flags3&MEM_Cleared)==0
+ ){
+ res = 0; /* Operands are equal */
+ }else{
+ res = ((flags3 & MEM_Null) ? -1 : +1); /* Operands are not equal */
+ }
+ }else{
+ /* SQLITE_NULLEQ is clear and at least one operand is NULL,
+ ** then the result is always NULL.
+ ** The jump is taken if the SQLITE_JUMPIFNULL bit is set.
+ */
+ if( pOp->p5 & SQLITE_STOREP2 ){
+ pOut = &aMem[pOp->p2];
+ iCompare = 1; /* Operands are not equal */
+ memAboutToChange(p, pOut);
+ MemSetTypeFlag(pOut, MEM_Null);
+ REGISTER_TRACE(pOp->p2, pOut);
+ }else{
+ VdbeBranchTaken(2,3);
+ if( pOp->p5 & SQLITE_JUMPIFNULL ){
+ goto jump_to_p2;
+ }
+ }
+ break;
+ }
+ }else{
+ /* Neither operand is NULL. Do a comparison. */
+ affinity = pOp->p5 & SQLITE_AFF_MASK;
+ if( affinity>=SQLITE_AFF_NUMERIC ){
+ if( (flags1 | flags3)&MEM_Str ){
+ if( (flags1 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){
+ applyNumericAffinity(pIn1,0);
+ testcase( flags3==pIn3->flags );
+ flags3 = pIn3->flags;
+ }
+ if( (flags3 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){
+ applyNumericAffinity(pIn3,0);
+ }
+ }
+ /* Handle the common case of integer comparison here, as an
+ ** optimization, to avoid a call to sqlite3MemCompare() */
+ if( (pIn1->flags & pIn3->flags & MEM_Int)!=0 ){
+ if( pIn3->u.i > pIn1->u.i ){ res = +1; goto compare_op; }
+ if( pIn3->u.i < pIn1->u.i ){ res = -1; goto compare_op; }
+ res = 0;
+ goto compare_op;
+ }
+ }else if( affinity==SQLITE_AFF_TEXT ){
+ if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
+ testcase( pIn1->flags & MEM_Int );
+ testcase( pIn1->flags & MEM_Real );
+ testcase( pIn1->flags & MEM_IntReal );
+ sqlite3VdbeMemStringify(pIn1, encoding, 1);
+ testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) );
+ flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask);
+ if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str;
+ }
+ if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
+ testcase( pIn3->flags & MEM_Int );
+ testcase( pIn3->flags & MEM_Real );
+ testcase( pIn3->flags & MEM_IntReal );
+ sqlite3VdbeMemStringify(pIn3, encoding, 1);
+ testcase( (flags3&MEM_Dyn) != (pIn3->flags&MEM_Dyn) );
+ flags3 = (pIn3->flags & ~MEM_TypeMask) | (flags3 & MEM_TypeMask);
+ }
+ }
+ assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 );
+ res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl);
+ }
+compare_op:
+ /* At this point, res is negative, zero, or positive if reg[P1] is
+ ** less than, equal to, or greater than reg[P3], respectively. Compute
+ ** the answer to this operator in res2, depending on what the comparison
+ ** operator actually is. The next block of code depends on the fact
+ ** that the 6 comparison operators are consecutive integers in this
+ ** order: NE, EQ, GT, LE, LT, GE */
+ assert( OP_Eq==OP_Ne+1 ); assert( OP_Gt==OP_Ne+2 ); assert( OP_Le==OP_Ne+3 );
+ assert( OP_Lt==OP_Ne+4 ); assert( OP_Ge==OP_Ne+5 );
+ if( res<0 ){ /* ne, eq, gt, le, lt, ge */
+ static const unsigned char aLTb[] = { 1, 0, 0, 1, 1, 0 };
+ res2 = aLTb[pOp->opcode - OP_Ne];
+ }else if( res==0 ){
+ static const unsigned char aEQb[] = { 0, 1, 0, 1, 0, 1 };
+ res2 = aEQb[pOp->opcode - OP_Ne];
+ }else{
+ static const unsigned char aGTb[] = { 1, 0, 1, 0, 0, 1 };
+ res2 = aGTb[pOp->opcode - OP_Ne];
+ }
+
+ /* Undo any changes made by applyAffinity() to the input registers. */
+ assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) );
+ pIn3->flags = flags3;
+ assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) );
+ pIn1->flags = flags1;
+
+ if( pOp->p5 & SQLITE_STOREP2 ){
+ pOut = &aMem[pOp->p2];
+ iCompare = res;
+ if( (pOp->p5 & SQLITE_KEEPNULL)!=0 ){
+ /* The KEEPNULL flag prevents OP_Eq from overwriting a NULL with 1
+ ** and prevents OP_Ne from overwriting NULL with 0. This flag
+ ** is only used in contexts where either:
+ ** (1) op==OP_Eq && (r[P2]==NULL || r[P2]==0)
+ ** (2) op==OP_Ne && (r[P2]==NULL || r[P2]==1)
+ ** Therefore it is not necessary to check the content of r[P2] for
+ ** NULL. */
+ assert( pOp->opcode==OP_Ne || pOp->opcode==OP_Eq );
+ assert( res2==0 || res2==1 );
+ testcase( res2==0 && pOp->opcode==OP_Eq );
+ testcase( res2==1 && pOp->opcode==OP_Eq );
+ testcase( res2==0 && pOp->opcode==OP_Ne );
+ testcase( res2==1 && pOp->opcode==OP_Ne );
+ if( (pOp->opcode==OP_Eq)==res2 ) break;
+ }
+ memAboutToChange(p, pOut);
+ MemSetTypeFlag(pOut, MEM_Int);
+ pOut->u.i = res2;
+ REGISTER_TRACE(pOp->p2, pOut);
+ }else{
+ VdbeBranchTaken(res2!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3);
+ if( res2 ){
+ goto jump_to_p2;
+ }
+ }
+ break;
+}
+
+/* Opcode: ElseNotEq * P2 * * *
+**
+** This opcode must follow an OP_Lt or OP_Gt comparison operator. There
+** can be zero or more OP_ReleaseReg opcodes intervening, but no other
+** opcodes are allowed to occur between this instruction and the previous
+** OP_Lt or OP_Gt. Furthermore, the prior OP_Lt or OP_Gt must have the
+** SQLITE_STOREP2 bit set in the P5 field.
+**
+** If result of an OP_Eq comparison on the same two operands as the
+** prior OP_Lt or OP_Gt would have been NULL or false (0), then then
+** jump to P2. If the result of an OP_Eq comparison on the two previous
+** operands would have been true (1), then fall through.
+*/
+case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */
+
+#ifdef SQLITE_DEBUG
+ /* Verify the preconditions of this opcode - that it follows an OP_Lt or
+ ** OP_Gt with the SQLITE_STOREP2 flag set, with zero or more intervening
+ ** OP_ReleaseReg opcodes */
+ int iAddr;
+ for(iAddr = (int)(pOp - aOp) - 1; ALWAYS(iAddr>=0); iAddr--){
+ if( aOp[iAddr].opcode==OP_ReleaseReg ) continue;
+ assert( aOp[iAddr].opcode==OP_Lt || aOp[iAddr].opcode==OP_Gt );
+ assert( aOp[iAddr].p5 & SQLITE_STOREP2 );
+ break;
+ }
+#endif /* SQLITE_DEBUG */
+ VdbeBranchTaken(iCompare!=0, 2);
+ if( iCompare!=0 ) goto jump_to_p2;
+ break;
+}
+
+
+/* Opcode: Permutation * * * P4 *
+**
+** Set the permutation used by the OP_Compare operator in the next
+** instruction. The permutation is stored in the P4 operand.
+**
+** The permutation is only valid until the next OP_Compare that has
+** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should
+** occur immediately prior to the OP_Compare.
+**
+** The first integer in the P4 integer array is the length of the array
+** and does not become part of the permutation.
+*/
+case OP_Permutation: {
+ assert( pOp->p4type==P4_INTARRAY );
+ assert( pOp->p4.ai );
+ assert( pOp[1].opcode==OP_Compare );
+ assert( pOp[1].p5 & OPFLAG_PERMUTE );
+ break;
+}
+
+/* Opcode: Compare P1 P2 P3 P4 P5
+** Synopsis: r[P1@P3] <-> r[P2@P3]
+**
+** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this
+** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of
+** the comparison for use by the next OP_Jump instruct.
+**
+** If P5 has the OPFLAG_PERMUTE bit set, then the order of comparison is
+** determined by the most recent OP_Permutation operator. If the
+** OPFLAG_PERMUTE bit is clear, then register are compared in sequential
+** order.
+**
+** P4 is a KeyInfo structure that defines collating sequences and sort
+** orders for the comparison. The permutation applies to registers
+** only. The KeyInfo elements are used sequentially.
+**
+** The comparison is a sort comparison, so NULLs compare equal,
+** NULLs are less than numbers, numbers are less than strings,
+** and strings are less than blobs.
+*/
+case OP_Compare: {
+ int n;
+ int i;
+ int p1;
+ int p2;
+ const KeyInfo *pKeyInfo;
+ u32 idx;
+ CollSeq *pColl; /* Collating sequence to use on this term */
+ int bRev; /* True for DESCENDING sort order */
+ u32 *aPermute; /* The permutation */
+
+ if( (pOp->p5 & OPFLAG_PERMUTE)==0 ){
+ aPermute = 0;
+ }else{
+ assert( pOp>aOp );
+ assert( pOp[-1].opcode==OP_Permutation );
+ assert( pOp[-1].p4type==P4_INTARRAY );
+ aPermute = pOp[-1].p4.ai + 1;
+ assert( aPermute!=0 );
+ }
+ n = pOp->p3;
+ pKeyInfo = pOp->p4.pKeyInfo;
+ assert( n>0 );
+ assert( pKeyInfo!=0 );
+ p1 = pOp->p1;
+ p2 = pOp->p2;
+#ifdef SQLITE_DEBUG
+ if( aPermute ){
+ int k, mx = 0;
+ for(k=0; k<n; k++) if( aPermute[k]>(u32)mx ) mx = aPermute[k];
+ assert( p1>0 && p1+mx<=(p->nMem+1 - p->nCursor)+1 );
+ assert( p2>0 && p2+mx<=(p->nMem+1 - p->nCursor)+1 );
+ }else{
+ assert( p1>0 && p1+n<=(p->nMem+1 - p->nCursor)+1 );
+ assert( p2>0 && p2+n<=(p->nMem+1 - p->nCursor)+1 );
+ }
+#endif /* SQLITE_DEBUG */
+ for(i=0; i<n; i++){
+ idx = aPermute ? aPermute[i] : (u32)i;
+ assert( memIsValid(&aMem[p1+idx]) );
+ assert( memIsValid(&aMem[p2+idx]) );
+ REGISTER_TRACE(p1+idx, &aMem[p1+idx]);
+ REGISTER_TRACE(p2+idx, &aMem[p2+idx]);
+ assert( i<pKeyInfo->nKeyField );
+ pColl = pKeyInfo->aColl[i];
+ bRev = (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_DESC);
+ iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl);
+ if( iCompare ){
+ if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL)
+ && ((aMem[p1+idx].flags & MEM_Null) || (aMem[p2+idx].flags & MEM_Null))
+ ){
+ iCompare = -iCompare;
+ }
+ if( bRev ) iCompare = -iCompare;
+ break;
+ }
+ }
+ break;
+}
+
+/* Opcode: Jump P1 P2 P3 * *
+**
+** Jump to the instruction at address P1, P2, or P3 depending on whether
+** in the most recent OP_Compare instruction the P1 vector was less than
+** equal to, or greater than the P2 vector, respectively.
+*/
+case OP_Jump: { /* jump */
+ if( iCompare<0 ){
+ VdbeBranchTaken(0,4); pOp = &aOp[pOp->p1 - 1];
+ }else if( iCompare==0 ){
+ VdbeBranchTaken(1,4); pOp = &aOp[pOp->p2 - 1];
+ }else{
+ VdbeBranchTaken(2,4); pOp = &aOp[pOp->p3 - 1];
+ }
+ break;
+}
+
+/* Opcode: And P1 P2 P3 * *
+** Synopsis: r[P3]=(r[P1] && r[P2])
+**
+** Take the logical AND of the values in registers P1 and P2 and
+** write the result into register P3.
+**
+** If either P1 or P2 is 0 (false) then the result is 0 even if
+** the other input is NULL. A NULL and true or two NULLs give
+** a NULL output.
+*/
+/* Opcode: Or P1 P2 P3 * *
+** Synopsis: r[P3]=(r[P1] || r[P2])
+**
+** Take the logical OR of the values in register P1 and P2 and
+** store the answer in register P3.
+**
+** If either P1 or P2 is nonzero (true) then the result is 1 (true)
+** even if the other input is NULL. A NULL and false or two NULLs
+** give a NULL output.
+*/
+case OP_And: /* same as TK_AND, in1, in2, out3 */
+case OP_Or: { /* same as TK_OR, in1, in2, out3 */
+ int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
+ int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
+
+ v1 = sqlite3VdbeBooleanValue(&aMem[pOp->p1], 2);
+ v2 = sqlite3VdbeBooleanValue(&aMem[pOp->p2], 2);
+ if( pOp->opcode==OP_And ){
+ static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 };
+ v1 = and_logic[v1*3+v2];
+ }else{
+ static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 };
+ v1 = or_logic[v1*3+v2];
+ }
+ pOut = &aMem[pOp->p3];
+ if( v1==2 ){
+ MemSetTypeFlag(pOut, MEM_Null);
+ }else{
+ pOut->u.i = v1;
+ MemSetTypeFlag(pOut, MEM_Int);
+ }
+ break;
+}
+
+/* Opcode: IsTrue P1 P2 P3 P4 *
+** Synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4
+**
+** This opcode implements the IS TRUE, IS FALSE, IS NOT TRUE, and
+** IS NOT FALSE operators.
+**
+** Interpret the value in register P1 as a boolean value. Store that
+** boolean (a 0 or 1) in register P2. Or if the value in register P1 is
+** NULL, then the P3 is stored in register P2. Invert the answer if P4
+** is 1.
+**
+** The logic is summarized like this:
+**
+** <ul>
+** <li> If P3==0 and P4==0 then r[P2] := r[P1] IS TRUE
+** <li> If P3==1 and P4==1 then r[P2] := r[P1] IS FALSE
+** <li> If P3==0 and P4==1 then r[P2] := r[P1] IS NOT TRUE
+** <li> If P3==1 and P4==0 then r[P2] := r[P1] IS NOT FALSE
+** </ul>
+*/
+case OP_IsTrue: { /* in1, out2 */
+ assert( pOp->p4type==P4_INT32 );
+ assert( pOp->p4.i==0 || pOp->p4.i==1 );
+ assert( pOp->p3==0 || pOp->p3==1 );
+ sqlite3VdbeMemSetInt64(&aMem[pOp->p2],
+ sqlite3VdbeBooleanValue(&aMem[pOp->p1], pOp->p3) ^ pOp->p4.i);
+ break;
+}
+
+/* Opcode: Not P1 P2 * * *
+** Synopsis: r[P2]= !r[P1]
+**
+** Interpret the value in register P1 as a boolean value. Store the
+** boolean complement in register P2. If the value in register P1 is
+** NULL, then a NULL is stored in P2.
+*/
+case OP_Not: { /* same as TK_NOT, in1, out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( (pIn1->flags & MEM_Null)==0 ){
+ sqlite3VdbeMemSetInt64(pOut, !sqlite3VdbeBooleanValue(pIn1,0));
+ }else{
+ sqlite3VdbeMemSetNull(pOut);
+ }
+ break;
+}
+
+/* Opcode: BitNot P1 P2 * * *
+** Synopsis: r[P2]= ~r[P1]
+**
+** Interpret the content of register P1 as an integer. Store the
+** ones-complement of the P1 value into register P2. If P1 holds
+** a NULL then store a NULL in P2.
+*/
+case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ sqlite3VdbeMemSetNull(pOut);
+ if( (pIn1->flags & MEM_Null)==0 ){
+ pOut->flags = MEM_Int;
+ pOut->u.i = ~sqlite3VdbeIntValue(pIn1);
+ }
+ break;
+}
+
+/* Opcode: Once P1 P2 * * *
+**
+** Fall through to the next instruction the first time this opcode is
+** encountered on each invocation of the byte-code program. Jump to P2
+** on the second and all subsequent encounters during the same invocation.
+**
+** Top-level programs determine first invocation by comparing the P1
+** operand against the P1 operand on the OP_Init opcode at the beginning
+** of the program. If the P1 values differ, then fall through and make
+** the P1 of this opcode equal to the P1 of OP_Init. If P1 values are
+** the same then take the jump.
+**
+** For subprograms, there is a bitmask in the VdbeFrame that determines
+** whether or not the jump should be taken. The bitmask is necessary
+** because the self-altering code trick does not work for recursive
+** triggers.
+*/
+case OP_Once: { /* jump */
+ u32 iAddr; /* Address of this instruction */
+ assert( p->aOp[0].opcode==OP_Init );
+ if( p->pFrame ){
+ iAddr = (int)(pOp - p->aOp);
+ if( (p->pFrame->aOnce[iAddr/8] & (1<<(iAddr & 7)))!=0 ){
+ VdbeBranchTaken(1, 2);
+ goto jump_to_p2;
+ }
+ p->pFrame->aOnce[iAddr/8] |= 1<<(iAddr & 7);
+ }else{
+ if( p->aOp[0].p1==pOp->p1 ){
+ VdbeBranchTaken(1, 2);
+ goto jump_to_p2;
+ }
+ }
+ VdbeBranchTaken(0, 2);
+ pOp->p1 = p->aOp[0].p1;
+ break;
+}
+
+/* Opcode: If P1 P2 P3 * *
+**
+** Jump to P2 if the value in register P1 is true. The value
+** is considered true if it is numeric and non-zero. If the value
+** in P1 is NULL then take the jump if and only if P3 is non-zero.
+*/
+case OP_If: { /* jump, in1 */
+ int c;
+ c = sqlite3VdbeBooleanValue(&aMem[pOp->p1], pOp->p3);
+ VdbeBranchTaken(c!=0, 2);
+ if( c ) goto jump_to_p2;
+ break;
+}
+
+/* Opcode: IfNot P1 P2 P3 * *
+**
+** Jump to P2 if the value in register P1 is False. The value
+** is considered false if it has a numeric value of zero. If the value
+** in P1 is NULL then take the jump if and only if P3 is non-zero.
+*/
+case OP_IfNot: { /* jump, in1 */
+ int c;
+ c = !sqlite3VdbeBooleanValue(&aMem[pOp->p1], !pOp->p3);
+ VdbeBranchTaken(c!=0, 2);
+ if( c ) goto jump_to_p2;
+ break;
+}
+
+/* Opcode: IsNull P1 P2 * * *
+** Synopsis: if r[P1]==NULL goto P2
+**
+** Jump to P2 if the value in register P1 is NULL.
+*/
+case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ VdbeBranchTaken( (pIn1->flags & MEM_Null)!=0, 2);
+ if( (pIn1->flags & MEM_Null)!=0 ){
+ goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: NotNull P1 P2 * * *
+** Synopsis: if r[P1]!=NULL goto P2
+**
+** Jump to P2 if the value in register P1 is not NULL.
+*/
+case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ VdbeBranchTaken( (pIn1->flags & MEM_Null)==0, 2);
+ if( (pIn1->flags & MEM_Null)==0 ){
+ goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: IfNullRow P1 P2 P3 * *
+** Synopsis: if P1.nullRow then r[P3]=NULL, goto P2
+**
+** Check the cursor P1 to see if it is currently pointing at a NULL row.
+** If it is, then set register P3 to NULL and jump immediately to P2.
+** If P1 is not on a NULL row, then fall through without making any
+** changes.
+*/
+case OP_IfNullRow: { /* jump */
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( p->apCsr[pOp->p1]!=0 );
+ if( p->apCsr[pOp->p1]->nullRow ){
+ sqlite3VdbeMemSetNull(aMem + pOp->p3);
+ goto jump_to_p2;
+ }
+ break;
+}
+
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+/* Opcode: Offset P1 P2 P3 * *
+** Synopsis: r[P3] = sqlite_offset(P1)
+**
+** Store in register r[P3] the byte offset into the database file that is the
+** start of the payload for the record at which that cursor P1 is currently
+** pointing.
+**
+** P2 is the column number for the argument to the sqlite_offset() function.
+** This opcode does not use P2 itself, but the P2 value is used by the
+** code generator. The P1, P2, and P3 operands to this opcode are the
+** same as for OP_Column.
+**
+** This opcode is only available if SQLite is compiled with the
+** -DSQLITE_ENABLE_OFFSET_SQL_FUNC option.
+*/
+case OP_Offset: { /* out3 */
+ VdbeCursor *pC; /* The VDBE cursor */
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ pOut = &p->aMem[pOp->p3];
+ if( NEVER(pC==0) || pC->eCurType!=CURTYPE_BTREE ){
+ sqlite3VdbeMemSetNull(pOut);
+ }else{
+ sqlite3VdbeMemSetInt64(pOut, sqlite3BtreeOffset(pC->uc.pCursor));
+ }
+ break;
+}
+#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */
+
+/* Opcode: Column P1 P2 P3 P4 P5
+** Synopsis: r[P3]=PX
+**
+** Interpret the data that cursor P1 points to as a structure built using
+** the MakeRecord instruction. (See the MakeRecord opcode for additional
+** information about the format of the data.) Extract the P2-th column
+** from this record. If there are less that (P2+1)
+** values in the record, extract a NULL.
+**
+** The value extracted is stored in register P3.
+**
+** If the record contains fewer than P2 fields, then extract a NULL. Or,
+** if the P4 argument is a P4_MEM use the value of the P4 argument as
+** the result.
+**
+** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 then
+** the result is guaranteed to only be used as the argument of a length()
+** or typeof() function, respectively. The loading of large blobs can be
+** skipped for length() and all content loading can be skipped for typeof().
+*/
+case OP_Column: {
+ u32 p2; /* column number to retrieve */
+ VdbeCursor *pC; /* The VDBE cursor */
+ BtCursor *pCrsr; /* The BTree cursor */
+ u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */
+ int len; /* The length of the serialized data for the column */
+ int i; /* Loop counter */
+ Mem *pDest; /* Where to write the extracted value */
+ Mem sMem; /* For storing the record being decoded */
+ const u8 *zData; /* Part of the record being decoded */
+ const u8 *zHdr; /* Next unparsed byte of the header */
+ const u8 *zEndHdr; /* Pointer to first byte after the header */
+ u64 offset64; /* 64-bit offset */
+ u32 t; /* A type code from the record header */
+ Mem *pReg; /* PseudoTable input register */
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ p2 = (u32)pOp->p2;
+
+ /* If the cursor cache is stale (meaning it is not currently point at
+ ** the correct row) then bring it up-to-date by doing the necessary
+ ** B-Tree seek. */
+ rc = sqlite3VdbeCursorMoveto(&pC, &p2);
+ if( rc ) goto abort_due_to_error;
+
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pDest = &aMem[pOp->p3];
+ memAboutToChange(p, pDest);
+ assert( pC!=0 );
+ assert( p2<(u32)pC->nField );
+ aOffset = pC->aOffset;
+ assert( pC->eCurType!=CURTYPE_VTAB );
+ assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow );
+ assert( pC->eCurType!=CURTYPE_SORTER );
+
+ if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/
+ if( pC->nullRow ){
+ if( pC->eCurType==CURTYPE_PSEUDO ){
+ /* For the special case of as pseudo-cursor, the seekResult field
+ ** identifies the register that holds the record */
+ assert( pC->seekResult>0 );
+ pReg = &aMem[pC->seekResult];
+ assert( pReg->flags & MEM_Blob );
+ assert( memIsValid(pReg) );
+ pC->payloadSize = pC->szRow = pReg->n;
+ pC->aRow = (u8*)pReg->z;
+ }else{
+ sqlite3VdbeMemSetNull(pDest);
+ goto op_column_out;
+ }
+ }else{
+ pCrsr = pC->uc.pCursor;
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pCrsr );
+ assert( sqlite3BtreeCursorIsValid(pCrsr) );
+ pC->payloadSize = sqlite3BtreePayloadSize(pCrsr);
+ pC->aRow = sqlite3BtreePayloadFetch(pCrsr, &pC->szRow);
+ assert( pC->szRow<=pC->payloadSize );
+ assert( pC->szRow<=65536 ); /* Maximum page size is 64KiB */
+ if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ goto too_big;
+ }
+ }
+ pC->cacheStatus = p->cacheCtr;
+ pC->iHdrOffset = getVarint32(pC->aRow, aOffset[0]);
+ pC->nHdrParsed = 0;
+
+
+ if( pC->szRow<aOffset[0] ){ /*OPTIMIZATION-IF-FALSE*/
+ /* pC->aRow does not have to hold the entire row, but it does at least
+ ** need to cover the header of the record. If pC->aRow does not contain
+ ** the complete header, then set it to zero, forcing the header to be
+ ** dynamically allocated. */
+ pC->aRow = 0;
+ pC->szRow = 0;
+
+ /* Make sure a corrupt database has not given us an oversize header.
+ ** Do this now to avoid an oversize memory allocation.
+ **
+ ** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte
+ ** types use so much data space that there can only be 4096 and 32 of
+ ** them, respectively. So the maximum header length results from a
+ ** 3-byte type for each of the maximum of 32768 columns plus three
+ ** extra bytes for the header length itself. 32768*3 + 3 = 98307.
+ */
+ if( aOffset[0] > 98307 || aOffset[0] > pC->payloadSize ){
+ goto op_column_corrupt;
+ }
+ }else{
+ /* This is an optimization. By skipping over the first few tests
+ ** (ex: pC->nHdrParsed<=p2) in the next section, we achieve a
+ ** measurable performance gain.
+ **
+ ** This branch is taken even if aOffset[0]==0. Such a record is never
+ ** generated by SQLite, and could be considered corruption, but we
+ ** accept it for historical reasons. When aOffset[0]==0, the code this
+ ** branch jumps to reads past the end of the record, but never more
+ ** than a few bytes. Even if the record occurs at the end of the page
+ ** content area, the "page header" comes after the page content and so
+ ** this overread is harmless. Similar overreads can occur for a corrupt
+ ** database file.
+ */
+ zData = pC->aRow;
+ assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */
+ testcase( aOffset[0]==0 );
+ goto op_column_read_header;
+ }
+ }
+
+ /* Make sure at least the first p2+1 entries of the header have been
+ ** parsed and valid information is in aOffset[] and pC->aType[].
+ */
+ if( pC->nHdrParsed<=p2 ){
+ /* If there is more header available for parsing in the record, try
+ ** to extract additional fields up through the p2+1-th field
+ */
+ if( pC->iHdrOffset<aOffset[0] ){
+ /* Make sure zData points to enough of the record to cover the header. */
+ if( pC->aRow==0 ){
+ memset(&sMem, 0, sizeof(sMem));
+ rc = sqlite3VdbeMemFromBtreeZeroOffset(pC->uc.pCursor,aOffset[0],&sMem);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ zData = (u8*)sMem.z;
+ }else{
+ zData = pC->aRow;
+ }
+
+ /* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */
+ op_column_read_header:
+ i = pC->nHdrParsed;
+ offset64 = aOffset[i];
+ zHdr = zData + pC->iHdrOffset;
+ zEndHdr = zData + aOffset[0];
+ testcase( zHdr>=zEndHdr );
+ do{
+ if( (pC->aType[i] = t = zHdr[0])<0x80 ){
+ zHdr++;
+ offset64 += sqlite3VdbeOneByteSerialTypeLen(t);
+ }else{
+ zHdr += sqlite3GetVarint32(zHdr, &t);
+ pC->aType[i] = t;
+ offset64 += sqlite3VdbeSerialTypeLen(t);
+ }
+ aOffset[++i] = (u32)(offset64 & 0xffffffff);
+ }while( (u32)i<=p2 && zHdr<zEndHdr );
+
+ /* The record is corrupt if any of the following are true:
+ ** (1) the bytes of the header extend past the declared header size
+ ** (2) the entire header was used but not all data was used
+ ** (3) the end of the data extends beyond the end of the record.
+ */
+ if( (zHdr>=zEndHdr && (zHdr>zEndHdr || offset64!=pC->payloadSize))
+ || (offset64 > pC->payloadSize)
+ ){
+ if( aOffset[0]==0 ){
+ i = 0;
+ zHdr = zEndHdr;
+ }else{
+ if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem);
+ goto op_column_corrupt;
+ }
+ }
+
+ pC->nHdrParsed = i;
+ pC->iHdrOffset = (u32)(zHdr - zData);
+ if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem);
+ }else{
+ t = 0;
+ }
+
+ /* If after trying to extract new entries from the header, nHdrParsed is
+ ** still not up to p2, that means that the record has fewer than p2
+ ** columns. So the result will be either the default value or a NULL.
+ */
+ if( pC->nHdrParsed<=p2 ){
+ if( pOp->p4type==P4_MEM ){
+ sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static);
+ }else{
+ sqlite3VdbeMemSetNull(pDest);
+ }
+ goto op_column_out;
+ }
+ }else{
+ t = pC->aType[p2];
+ }
+
+ /* Extract the content for the p2+1-th column. Control can only
+ ** reach this point if aOffset[p2], aOffset[p2+1], and pC->aType[p2] are
+ ** all valid.
+ */
+ assert( p2<pC->nHdrParsed );
+ assert( rc==SQLITE_OK );
+ assert( sqlite3VdbeCheckMemInvariants(pDest) );
+ if( VdbeMemDynamic(pDest) ){
+ sqlite3VdbeMemSetNull(pDest);
+ }
+ assert( t==pC->aType[p2] );
+ if( pC->szRow>=aOffset[p2+1] ){
+ /* This is the common case where the desired content fits on the original
+ ** page - where the content is not on an overflow page */
+ zData = pC->aRow + aOffset[p2];
+ if( t<12 ){
+ sqlite3VdbeSerialGet(zData, t, pDest);
+ }else{
+ /* If the column value is a string, we need a persistent value, not
+ ** a MEM_Ephem value. This branch is a fast short-cut that is equivalent
+ ** to calling sqlite3VdbeSerialGet() and sqlite3VdbeDeephemeralize().
+ */
+ static const u16 aFlag[] = { MEM_Blob, MEM_Str|MEM_Term };
+ pDest->n = len = (t-12)/2;
+ pDest->enc = encoding;
+ if( pDest->szMalloc < len+2 ){
+ pDest->flags = MEM_Null;
+ if( sqlite3VdbeMemGrow(pDest, len+2, 0) ) goto no_mem;
+ }else{
+ pDest->z = pDest->zMalloc;
+ }
+ memcpy(pDest->z, zData, len);
+ pDest->z[len] = 0;
+ pDest->z[len+1] = 0;
+ pDest->flags = aFlag[t&1];
+ }
+ }else{
+ pDest->enc = encoding;
+ /* This branch happens only when content is on overflow pages */
+ if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0
+ && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0))
+ || (len = sqlite3VdbeSerialTypeLen(t))==0
+ ){
+ /* Content is irrelevant for
+ ** 1. the typeof() function,
+ ** 2. the length(X) function if X is a blob, and
+ ** 3. if the content length is zero.
+ ** So we might as well use bogus content rather than reading
+ ** content from disk.
+ **
+ ** Although sqlite3VdbeSerialGet() may read at most 8 bytes from the
+ ** buffer passed to it, debugging function VdbeMemPrettyPrint() may
+ ** read more. Use the global constant sqlite3CtypeMap[] as the array,
+ ** as that array is 256 bytes long (plenty for VdbeMemPrettyPrint())
+ ** and it begins with a bunch of zeros.
+ */
+ sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest);
+ }else{
+ rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
+ pDest->flags &= ~MEM_Ephem;
+ }
+ }
+
+op_column_out:
+ UPDATE_MAX_BLOBSIZE(pDest);
+ REGISTER_TRACE(pOp->p3, pDest);
+ break;
+
+op_column_corrupt:
+ if( aOp[0].p3>0 ){
+ pOp = &aOp[aOp[0].p3-1];
+ break;
+ }else{
+ rc = SQLITE_CORRUPT_BKPT;
+ goto abort_due_to_error;
+ }
+}
+
+/* Opcode: Affinity P1 P2 * P4 *
+** Synopsis: affinity(r[P1@P2])
+**
+** Apply affinities to a range of P2 registers starting with P1.
+**
+** P4 is a string that is P2 characters long. The N-th character of the
+** string indicates the column affinity that should be used for the N-th
+** memory cell in the range.
+*/
+case OP_Affinity: {
+ const char *zAffinity; /* The affinity to be applied */
+
+ zAffinity = pOp->p4.z;
+ assert( zAffinity!=0 );
+ assert( pOp->p2>0 );
+ assert( zAffinity[pOp->p2]==0 );
+ pIn1 = &aMem[pOp->p1];
+ while( 1 /*exit-by-break*/ ){
+ assert( pIn1 <= &p->aMem[(p->nMem+1 - p->nCursor)] );
+ assert( zAffinity[0]==SQLITE_AFF_NONE || memIsValid(pIn1) );
+ applyAffinity(pIn1, zAffinity[0], encoding);
+ if( zAffinity[0]==SQLITE_AFF_REAL && (pIn1->flags & MEM_Int)!=0 ){
+ /* When applying REAL affinity, if the result is still an MEM_Int
+ ** that will fit in 6 bytes, then change the type to MEM_IntReal
+ ** so that we keep the high-resolution integer value but know that
+ ** the type really wants to be REAL. */
+ testcase( pIn1->u.i==140737488355328LL );
+ testcase( pIn1->u.i==140737488355327LL );
+ testcase( pIn1->u.i==-140737488355328LL );
+ testcase( pIn1->u.i==-140737488355329LL );
+ if( pIn1->u.i<=140737488355327LL && pIn1->u.i>=-140737488355328LL ){
+ pIn1->flags |= MEM_IntReal;
+ pIn1->flags &= ~MEM_Int;
+ }else{
+ pIn1->u.r = (double)pIn1->u.i;
+ pIn1->flags |= MEM_Real;
+ pIn1->flags &= ~MEM_Int;
+ }
+ }
+ REGISTER_TRACE((int)(pIn1-aMem), pIn1);
+ zAffinity++;
+ if( zAffinity[0]==0 ) break;
+ pIn1++;
+ }
+ break;
+}
+
+/* Opcode: MakeRecord P1 P2 P3 P4 *
+** Synopsis: r[P3]=mkrec(r[P1@P2])
+**
+** Convert P2 registers beginning with P1 into the [record format]
+** use as a data record in a database table or as a key
+** in an index. The OP_Column opcode can decode the record later.
+**
+** P4 may be a string that is P2 characters long. The N-th character of the
+** string indicates the column affinity that should be used for the N-th
+** field of the index key.
+**
+** The mapping from character to affinity is given by the SQLITE_AFF_
+** macros defined in sqliteInt.h.
+**
+** If P4 is NULL then all index fields have the affinity BLOB.
+**
+** The meaning of P5 depends on whether or not the SQLITE_ENABLE_NULL_TRIM
+** compile-time option is enabled:
+**
+** * If SQLITE_ENABLE_NULL_TRIM is enabled, then the P5 is the index
+** of the right-most table that can be null-trimmed.
+**
+** * If SQLITE_ENABLE_NULL_TRIM is omitted, then P5 has the value
+** OPFLAG_NOCHNG_MAGIC if the OP_MakeRecord opcode is allowed to
+** accept no-change records with serial_type 10. This value is
+** only used inside an assert() and does not affect the end result.
+*/
+case OP_MakeRecord: {
+ Mem *pRec; /* The new record */
+ u64 nData; /* Number of bytes of data space */
+ int nHdr; /* Number of bytes of header space */
+ i64 nByte; /* Data space required for this record */
+ i64 nZero; /* Number of zero bytes at the end of the record */
+ int nVarint; /* Number of bytes in a varint */
+ u32 serial_type; /* Type field */
+ Mem *pData0; /* First field to be combined into the record */
+ Mem *pLast; /* Last field of the record */
+ int nField; /* Number of fields in the record */
+ char *zAffinity; /* The affinity string for the record */
+ int file_format; /* File format to use for encoding */
+ u32 len; /* Length of a field */
+ u8 *zHdr; /* Where to write next byte of the header */
+ u8 *zPayload; /* Where to write next byte of the payload */
+
+ /* Assuming the record contains N fields, the record format looks
+ ** like this:
+ **
+ ** ------------------------------------------------------------------------
+ ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 |
+ ** ------------------------------------------------------------------------
+ **
+ ** Data(0) is taken from register P1. Data(1) comes from register P1+1
+ ** and so forth.
+ **
+ ** Each type field is a varint representing the serial type of the
+ ** corresponding data element (see sqlite3VdbeSerialType()). The
+ ** hdr-size field is also a varint which is the offset from the beginning
+ ** of the record to data0.
+ */
+ nData = 0; /* Number of bytes of data space */
+ nHdr = 0; /* Number of bytes of header space */
+ nZero = 0; /* Number of zero bytes at the end of the record */
+ nField = pOp->p1;
+ zAffinity = pOp->p4.z;
+ assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=(p->nMem+1 - p->nCursor)+1 );
+ pData0 = &aMem[nField];
+ nField = pOp->p2;
+ pLast = &pData0[nField-1];
+ file_format = p->minWriteFileFormat;
+
+ /* Identify the output register */
+ assert( pOp->p3<pOp->p1 || pOp->p3>=pOp->p1+pOp->p2 );
+ pOut = &aMem[pOp->p3];
+ memAboutToChange(p, pOut);
+
+ /* Apply the requested affinity to all inputs
+ */
+ assert( pData0<=pLast );
+ if( zAffinity ){
+ pRec = pData0;
+ do{
+ applyAffinity(pRec, zAffinity[0], encoding);
+ if( zAffinity[0]==SQLITE_AFF_REAL && (pRec->flags & MEM_Int) ){
+ pRec->flags |= MEM_IntReal;
+ pRec->flags &= ~(MEM_Int);
+ }
+ REGISTER_TRACE((int)(pRec-aMem), pRec);
+ zAffinity++;
+ pRec++;
+ assert( zAffinity[0]==0 || pRec<=pLast );
+ }while( zAffinity[0] );
+ }
+
+#ifdef SQLITE_ENABLE_NULL_TRIM
+ /* NULLs can be safely trimmed from the end of the record, as long as
+ ** as the schema format is 2 or more and none of the omitted columns
+ ** have a non-NULL default value. Also, the record must be left with
+ ** at least one field. If P5>0 then it will be one more than the
+ ** index of the right-most column with a non-NULL default value */
+ if( pOp->p5 ){
+ while( (pLast->flags & MEM_Null)!=0 && nField>pOp->p5 ){
+ pLast--;
+ nField--;
+ }
+ }
+#endif
+
+ /* Loop through the elements that will make up the record to figure
+ ** out how much space is required for the new record. After this loop,
+ ** the Mem.uTemp field of each term should hold the serial-type that will
+ ** be used for that term in the generated record:
+ **
+ ** Mem.uTemp value type
+ ** --------------- ---------------
+ ** 0 NULL
+ ** 1 1-byte signed integer
+ ** 2 2-byte signed integer
+ ** 3 3-byte signed integer
+ ** 4 4-byte signed integer
+ ** 5 6-byte signed integer
+ ** 6 8-byte signed integer
+ ** 7 IEEE float
+ ** 8 Integer constant 0
+ ** 9 Integer constant 1
+ ** 10,11 reserved for expansion
+ ** N>=12 and even BLOB
+ ** N>=13 and odd text
+ **
+ ** The following additional values are computed:
+ ** nHdr Number of bytes needed for the record header
+ ** nData Number of bytes of data space needed for the record
+ ** nZero Zero bytes at the end of the record
+ */
+ pRec = pLast;
+ do{
+ assert( memIsValid(pRec) );
+ if( pRec->flags & MEM_Null ){
+ if( pRec->flags & MEM_Zero ){
+ /* Values with MEM_Null and MEM_Zero are created by xColumn virtual
+ ** table methods that never invoke sqlite3_result_xxxxx() while
+ ** computing an unchanging column value in an UPDATE statement.
+ ** Give such values a special internal-use-only serial-type of 10
+ ** so that they can be passed through to xUpdate and have
+ ** a true sqlite3_value_nochange(). */
+#ifndef SQLITE_ENABLE_NULL_TRIM
+ assert( pOp->p5==OPFLAG_NOCHNG_MAGIC || CORRUPT_DB );
+#endif
+ pRec->uTemp = 10;
+ }else{
+ pRec->uTemp = 0;
+ }
+ nHdr++;
+ }else if( pRec->flags & (MEM_Int|MEM_IntReal) ){
+ /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */
+ i64 i = pRec->u.i;
+ u64 uu;
+ testcase( pRec->flags & MEM_Int );
+ testcase( pRec->flags & MEM_IntReal );
+ if( i<0 ){
+ uu = ~i;
+ }else{
+ uu = i;
+ }
+ nHdr++;
+ testcase( uu==127 ); testcase( uu==128 );
+ testcase( uu==32767 ); testcase( uu==32768 );
+ testcase( uu==8388607 ); testcase( uu==8388608 );
+ testcase( uu==2147483647 ); testcase( uu==2147483648 );
+ testcase( uu==140737488355327LL ); testcase( uu==140737488355328LL );
+ if( uu<=127 ){
+ if( (i&1)==i && file_format>=4 ){
+ pRec->uTemp = 8+(u32)uu;
+ }else{
+ nData++;
+ pRec->uTemp = 1;
+ }
+ }else if( uu<=32767 ){
+ nData += 2;
+ pRec->uTemp = 2;
+ }else if( uu<=8388607 ){
+ nData += 3;
+ pRec->uTemp = 3;
+ }else if( uu<=2147483647 ){
+ nData += 4;
+ pRec->uTemp = 4;
+ }else if( uu<=140737488355327LL ){
+ nData += 6;
+ pRec->uTemp = 5;
+ }else{
+ nData += 8;
+ if( pRec->flags & MEM_IntReal ){
+ /* If the value is IntReal and is going to take up 8 bytes to store
+ ** as an integer, then we might as well make it an 8-byte floating
+ ** point value */
+ pRec->u.r = (double)pRec->u.i;
+ pRec->flags &= ~MEM_IntReal;
+ pRec->flags |= MEM_Real;
+ pRec->uTemp = 7;
+ }else{
+ pRec->uTemp = 6;
+ }
+ }
+ }else if( pRec->flags & MEM_Real ){
+ nHdr++;
+ nData += 8;
+ pRec->uTemp = 7;
+ }else{
+ assert( db->mallocFailed || pRec->flags&(MEM_Str|MEM_Blob) );
+ assert( pRec->n>=0 );
+ len = (u32)pRec->n;
+ serial_type = (len*2) + 12 + ((pRec->flags & MEM_Str)!=0);
+ if( pRec->flags & MEM_Zero ){
+ serial_type += pRec->u.nZero*2;
+ if( nData ){
+ if( sqlite3VdbeMemExpandBlob(pRec) ) goto no_mem;
+ len += pRec->u.nZero;
+ }else{
+ nZero += pRec->u.nZero;
+ }
+ }
+ nData += len;
+ nHdr += sqlite3VarintLen(serial_type);
+ pRec->uTemp = serial_type;
+ }
+ if( pRec==pData0 ) break;
+ pRec--;
+ }while(1);
+
+ /* EVIDENCE-OF: R-22564-11647 The header begins with a single varint
+ ** which determines the total number of bytes in the header. The varint
+ ** value is the size of the header in bytes including the size varint
+ ** itself. */
+ testcase( nHdr==126 );
+ testcase( nHdr==127 );
+ if( nHdr<=126 ){
+ /* The common case */
+ nHdr += 1;
+ }else{
+ /* Rare case of a really large header */
+ nVarint = sqlite3VarintLen(nHdr);
+ nHdr += nVarint;
+ if( nVarint<sqlite3VarintLen(nHdr) ) nHdr++;
+ }
+ nByte = nHdr+nData;
+
+ /* Make sure the output register has a buffer large enough to store
+ ** the new record. The output register (pOp->p3) is not allowed to
+ ** be one of the input registers (because the following call to
+ ** sqlite3VdbeMemClearAndResize() could clobber the value before it is used).
+ */
+ if( nByte+nZero<=pOut->szMalloc ){
+ /* The output register is already large enough to hold the record.
+ ** No error checks or buffer enlargement is required */
+ pOut->z = pOut->zMalloc;
+ }else{
+ /* Need to make sure that the output is not too big and then enlarge
+ ** the output register to hold the full result */
+ if( nByte+nZero>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ goto too_big;
+ }
+ if( sqlite3VdbeMemClearAndResize(pOut, (int)nByte) ){
+ goto no_mem;
+ }
+ }
+ pOut->n = (int)nByte;
+ pOut->flags = MEM_Blob;
+ if( nZero ){
+ pOut->u.nZero = nZero;
+ pOut->flags |= MEM_Zero;
+ }
+ UPDATE_MAX_BLOBSIZE(pOut);
+ zHdr = (u8 *)pOut->z;
+ zPayload = zHdr + nHdr;
+
+ /* Write the record */
+ zHdr += putVarint32(zHdr, nHdr);
+ assert( pData0<=pLast );
+ pRec = pData0;
+ do{
+ serial_type = pRec->uTemp;
+ /* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more
+ ** additional varints, one per column. */
+ zHdr += putVarint32(zHdr, serial_type); /* serial type */
+ /* EVIDENCE-OF: R-64536-51728 The values for each column in the record
+ ** immediately follow the header. */
+ zPayload += sqlite3VdbeSerialPut(zPayload, pRec, serial_type); /* content */
+ }while( (++pRec)<=pLast );
+ assert( nHdr==(int)(zHdr - (u8*)pOut->z) );
+ assert( nByte==(int)(zPayload - (u8*)pOut->z) );
+
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
+ REGISTER_TRACE(pOp->p3, pOut);
+ break;
+}
+
+/* Opcode: Count P1 P2 p3 * *
+** Synopsis: r[P2]=count()
+**
+** Store the number of entries (an integer value) in the table or index
+** opened by cursor P1 in register P2.
+**
+** If P3==0, then an exact count is obtained, which involves visiting
+** every btree page of the table. But if P3 is non-zero, an estimate
+** is returned based on the current cursor position.
+*/
+case OP_Count: { /* out2 */
+ i64 nEntry;
+ BtCursor *pCrsr;
+
+ assert( p->apCsr[pOp->p1]->eCurType==CURTYPE_BTREE );
+ pCrsr = p->apCsr[pOp->p1]->uc.pCursor;
+ assert( pCrsr );
+ if( pOp->p3 ){
+ nEntry = sqlite3BtreeRowCountEst(pCrsr);
+ }else{
+ nEntry = 0; /* Not needed. Only used to silence a warning. */
+ rc = sqlite3BtreeCount(db, pCrsr, &nEntry);
+ if( rc ) goto abort_due_to_error;
+ }
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = nEntry;
+ goto check_for_interrupt;
+}
+
+/* Opcode: Savepoint P1 * * P4 *
+**
+** Open, release or rollback the savepoint named by parameter P4, depending
+** on the value of P1. To open a new savepoint set P1==0 (SAVEPOINT_BEGIN).
+** To release (commit) an existing savepoint set P1==1 (SAVEPOINT_RELEASE).
+** To rollback an existing savepoint set P1==2 (SAVEPOINT_ROLLBACK).
+*/
+case OP_Savepoint: {
+ int p1; /* Value of P1 operand */
+ char *zName; /* Name of savepoint */
+ int nName;
+ Savepoint *pNew;
+ Savepoint *pSavepoint;
+ Savepoint *pTmp;
+ int iSavepoint;
+ int ii;
+
+ p1 = pOp->p1;
+ zName = pOp->p4.z;
+
+ /* Assert that the p1 parameter is valid. Also that if there is no open
+ ** transaction, then there cannot be any savepoints.
+ */
+ assert( db->pSavepoint==0 || db->autoCommit==0 );
+ assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK );
+ assert( db->pSavepoint || db->isTransactionSavepoint==0 );
+ assert( checkSavepointCount(db) );
+ assert( p->bIsReader );
+
+ if( p1==SAVEPOINT_BEGIN ){
+ if( db->nVdbeWrite>0 ){
+ /* A new savepoint cannot be created if there are active write
+ ** statements (i.e. open read/write incremental blob handles).
+ */
+ sqlite3VdbeError(p, "cannot open savepoint - SQL statements in progress");
+ rc = SQLITE_BUSY;
+ }else{
+ nName = sqlite3Strlen30(zName);
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* This call is Ok even if this savepoint is actually a transaction
+ ** savepoint (and therefore should not prompt xSavepoint()) callbacks.
+ ** If this is a transaction savepoint being opened, it is guaranteed
+ ** that the db->aVTrans[] array is empty. */
+ assert( db->autoCommit==0 || db->nVTrans==0 );
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN,
+ db->nStatement+db->nSavepoint);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+#endif
+
+ /* Create a new savepoint structure. */
+ pNew = sqlite3DbMallocRawNN(db, sizeof(Savepoint)+nName+1);
+ if( pNew ){
+ pNew->zName = (char *)&pNew[1];
+ memcpy(pNew->zName, zName, nName+1);
+
+ /* If there is no open transaction, then mark this as a special
+ ** "transaction savepoint". */
+ if( db->autoCommit ){
+ db->autoCommit = 0;
+ db->isTransactionSavepoint = 1;
+ }else{
+ db->nSavepoint++;
+ }
+
+ /* Link the new savepoint into the database handle's list. */
+ pNew->pNext = db->pSavepoint;
+ db->pSavepoint = pNew;
+ pNew->nDeferredCons = db->nDeferredCons;
+ pNew->nDeferredImmCons = db->nDeferredImmCons;
+ }
+ }
+ }else{
+ assert( p1==SAVEPOINT_RELEASE || p1==SAVEPOINT_ROLLBACK );
+ iSavepoint = 0;
+
+ /* Find the named savepoint. If there is no such savepoint, then an
+ ** an error is returned to the user. */
+ for(
+ pSavepoint = db->pSavepoint;
+ pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName);
+ pSavepoint = pSavepoint->pNext
+ ){
+ iSavepoint++;
+ }
+ if( !pSavepoint ){
+ sqlite3VdbeError(p, "no such savepoint: %s", zName);
+ rc = SQLITE_ERROR;
+ }else if( db->nVdbeWrite>0 && p1==SAVEPOINT_RELEASE ){
+ /* It is not possible to release (commit) a savepoint if there are
+ ** active write statements.
+ */
+ sqlite3VdbeError(p, "cannot release savepoint - "
+ "SQL statements in progress");
+ rc = SQLITE_BUSY;
+ }else{
+
+ /* Determine whether or not this is a transaction savepoint. If so,
+ ** and this is a RELEASE command, then the current transaction
+ ** is committed.
+ */
+ int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint;
+ if( isTransaction && p1==SAVEPOINT_RELEASE ){
+ if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){
+ goto vdbe_return;
+ }
+ db->autoCommit = 1;
+ if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){
+ p->pc = (int)(pOp - aOp);
+ db->autoCommit = 0;
+ p->rc = rc = SQLITE_BUSY;
+ goto vdbe_return;
+ }
+ rc = p->rc;
+ if( rc ){
+ db->autoCommit = 0;
+ }else{
+ db->isTransactionSavepoint = 0;
+ }
+ }else{
+ int isSchemaChange;
+ iSavepoint = db->nSavepoint - iSavepoint - 1;
+ if( p1==SAVEPOINT_ROLLBACK ){
+ isSchemaChange = (db->mDbFlags & DBFLAG_SchemaChange)!=0;
+ for(ii=0; ii<db->nDb; ii++){
+ rc = sqlite3BtreeTripAllCursors(db->aDb[ii].pBt,
+ SQLITE_ABORT_ROLLBACK,
+ isSchemaChange==0);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ }
+ }else{
+ assert( p1==SAVEPOINT_RELEASE );
+ isSchemaChange = 0;
+ }
+ for(ii=0; ii<db->nDb; ii++){
+ rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ }
+ if( isSchemaChange ){
+ sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ResetAllSchemasOfConnection(db);
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ }
+ }
+ if( rc ) goto abort_due_to_error;
+
+ /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all
+ ** savepoints nested inside of the savepoint being operated on. */
+ while( db->pSavepoint!=pSavepoint ){
+ pTmp = db->pSavepoint;
+ db->pSavepoint = pTmp->pNext;
+ sqlite3DbFree(db, pTmp);
+ db->nSavepoint--;
+ }
+
+ /* If it is a RELEASE, then destroy the savepoint being operated on
+ ** too. If it is a ROLLBACK TO, then set the number of deferred
+ ** constraint violations present in the database to the value stored
+ ** when the savepoint was created. */
+ if( p1==SAVEPOINT_RELEASE ){
+ assert( pSavepoint==db->pSavepoint );
+ db->pSavepoint = pSavepoint->pNext;
+ sqlite3DbFree(db, pSavepoint);
+ if( !isTransaction ){
+ db->nSavepoint--;
+ }
+ }else{
+ assert( p1==SAVEPOINT_ROLLBACK );
+ db->nDeferredCons = pSavepoint->nDeferredCons;
+ db->nDeferredImmCons = pSavepoint->nDeferredImmCons;
+ }
+
+ if( !isTransaction || p1==SAVEPOINT_ROLLBACK ){
+ rc = sqlite3VtabSavepoint(db, p1, iSavepoint);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ }
+ }
+ }
+ if( rc ) goto abort_due_to_error;
+
+ break;
+}
+
+/* Opcode: AutoCommit P1 P2 * * *
+**
+** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll
+** back any currently active btree transactions. If there are any active
+** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if
+** there are active writing VMs or active VMs that use shared cache.
+**
+** This instruction causes the VM to halt.
+*/
+case OP_AutoCommit: {
+ int desiredAutoCommit;
+ int iRollback;
+
+ desiredAutoCommit = pOp->p1;
+ iRollback = pOp->p2;
+ assert( desiredAutoCommit==1 || desiredAutoCommit==0 );
+ assert( desiredAutoCommit==1 || iRollback==0 );
+ assert( db->nVdbeActive>0 ); /* At least this one VM is active */
+ assert( p->bIsReader );
+
+ if( desiredAutoCommit!=db->autoCommit ){
+ if( iRollback ){
+ assert( desiredAutoCommit==1 );
+ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
+ db->autoCommit = 1;
+ }else if( desiredAutoCommit && db->nVdbeWrite>0 ){
+ /* If this instruction implements a COMMIT and other VMs are writing
+ ** return an error indicating that the other VMs must complete first.
+ */
+ sqlite3VdbeError(p, "cannot commit transaction - "
+ "SQL statements in progress");
+ rc = SQLITE_BUSY;
+ goto abort_due_to_error;
+ }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){
+ goto vdbe_return;
+ }else{
+ db->autoCommit = (u8)desiredAutoCommit;
+ }
+ if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){
+ p->pc = (int)(pOp - aOp);
+ db->autoCommit = (u8)(1-desiredAutoCommit);
+ p->rc = rc = SQLITE_BUSY;
+ goto vdbe_return;
+ }
+ sqlite3CloseSavepoints(db);
+ if( p->rc==SQLITE_OK ){
+ rc = SQLITE_DONE;
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ goto vdbe_return;
+ }else{
+ sqlite3VdbeError(p,
+ (!desiredAutoCommit)?"cannot start a transaction within a transaction":(
+ (iRollback)?"cannot rollback - no transaction is active":
+ "cannot commit - no transaction is active"));
+
+ rc = SQLITE_ERROR;
+ goto abort_due_to_error;
+ }
+ /*NOTREACHED*/ assert(0);
+}
+
+/* Opcode: Transaction P1 P2 P3 P4 P5
+**
+** Begin a transaction on database P1 if a transaction is not already
+** active.
+** If P2 is non-zero, then a write-transaction is started, or if a
+** read-transaction is already active, it is upgraded to a write-transaction.
+** If P2 is zero, then a read-transaction is started.
+**
+** P1 is the index of the database file on which the transaction is
+** started. Index 0 is the main database file and index 1 is the
+** file used for temporary tables. Indices of 2 or more are used for
+** attached databases.
+**
+** If a write-transaction is started and the Vdbe.usesStmtJournal flag is
+** true (this flag is set if the Vdbe may modify more than one row and may
+** throw an ABORT exception), a statement transaction may also be opened.
+** More specifically, a statement transaction is opened iff the database
+** connection is currently not in autocommit mode, or if there are other
+** active statements. A statement transaction allows the changes made by this
+** VDBE to be rolled back after an error without having to roll back the
+** entire transaction. If no error is encountered, the statement transaction
+** will automatically commit when the VDBE halts.
+**
+** If P5!=0 then this opcode also checks the schema cookie against P3
+** and the schema generation counter against P4.
+** The cookie changes its value whenever the database schema changes.
+** This operation is used to detect when that the cookie has changed
+** and that the current process needs to reread the schema. If the schema
+** cookie in P3 differs from the schema cookie in the database header or
+** if the schema generation counter in P4 differs from the current
+** generation counter, then an SQLITE_SCHEMA error is raised and execution
+** halts. The sqlite3_step() wrapper function might then reprepare the
+** statement and rerun it from the beginning.
+*/
+case OP_Transaction: {
+ Btree *pBt;
+ int iMeta = 0;
+
+ assert( p->bIsReader );
+ assert( p->readOnly==0 || pOp->p2==0 );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){
+ rc = SQLITE_READONLY;
+ goto abort_due_to_error;
+ }
+ pBt = db->aDb[pOp->p1].pBt;
+
+ if( pBt ){
+ rc = sqlite3BtreeBeginTrans(pBt, pOp->p2, &iMeta);
+ testcase( rc==SQLITE_BUSY_SNAPSHOT );
+ testcase( rc==SQLITE_BUSY_RECOVERY );
+ if( rc!=SQLITE_OK ){
+ if( (rc&0xff)==SQLITE_BUSY ){
+ p->pc = (int)(pOp - aOp);
+ p->rc = rc;
+ goto vdbe_return;
+ }
+ goto abort_due_to_error;
+ }
+
+ if( p->usesStmtJournal
+ && pOp->p2
+ && (db->autoCommit==0 || db->nVdbeRead>1)
+ ){
+ assert( sqlite3BtreeIsInTrans(pBt) );
+ if( p->iStatement==0 ){
+ assert( db->nStatement>=0 && db->nSavepoint>=0 );
+ db->nStatement++;
+ p->iStatement = db->nSavepoint + db->nStatement;
+ }
+
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, p->iStatement-1);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeBeginStmt(pBt, p->iStatement);
+ }
+
+ /* Store the current value of the database handles deferred constraint
+ ** counter. If the statement transaction needs to be rolled back,
+ ** the value of this counter needs to be restored too. */
+ p->nStmtDefCons = db->nDeferredCons;
+ p->nStmtDefImmCons = db->nDeferredImmCons;
+ }
+ }
+ assert( pOp->p5==0 || pOp->p4type==P4_INT32 );
+ if( pOp->p5
+ && (iMeta!=pOp->p3
+ || db->aDb[pOp->p1].pSchema->iGeneration!=pOp->p4.i)
+ ){
+ /*
+ ** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema
+ ** version is checked to ensure that the schema has not changed since the
+ ** SQL statement was prepared.
+ */
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed");
+ /* If the schema-cookie from the database file matches the cookie
+ ** stored with the in-memory representation of the schema, do
+ ** not reload the schema from the database file.
+ **
+ ** If virtual-tables are in use, this is not just an optimization.
+ ** Often, v-tables store their data in other SQLite tables, which
+ ** are queried from within xNext() and other v-table methods using
+ ** prepared queries. If such a query is out-of-date, we do not want to
+ ** discard the database schema, as the user code implementing the
+ ** v-table would have to be ready for the sqlite3_vtab structure itself
+ ** to be invalidated whenever sqlite3_step() is called from within
+ ** a v-table method.
+ */
+ if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){
+ sqlite3ResetOneSchema(db, pOp->p1);
+ }
+ p->expired = 1;
+ rc = SQLITE_SCHEMA;
+ }
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: ReadCookie P1 P2 P3 * *
+**
+** Read cookie number P3 from database P1 and write it into register P2.
+** P3==1 is the schema version. P3==2 is the database format.
+** P3==3 is the recommended pager cache size, and so forth. P1==0 is
+** the main database file and P1==1 is the database file used to store
+** temporary tables.
+**
+** There must be a read-lock on the database (either a transaction
+** must be started or there must be an open cursor) before
+** executing this instruction.
+*/
+case OP_ReadCookie: { /* out2 */
+ int iMeta;
+ int iDb;
+ int iCookie;
+
+ assert( p->bIsReader );
+ iDb = pOp->p1;
+ iCookie = pOp->p3;
+ assert( pOp->p3<SQLITE_N_BTREE_META );
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( db->aDb[iDb].pBt!=0 );
+ assert( DbMaskTest(p->btreeMask, iDb) );
+
+ sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta);
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = iMeta;
+ break;
+}
+
+/* Opcode: SetCookie P1 P2 P3 * P5
+**
+** Write the integer value P3 into cookie number P2 of database P1.
+** P2==1 is the schema version. P2==2 is the database format.
+** P2==3 is the recommended pager cache
+** size, and so forth. P1==0 is the main database file and P1==1 is the
+** database file used to store temporary tables.
+**
+** A transaction must be started before executing this opcode.
+**
+** If P2 is the SCHEMA_VERSION cookie (cookie number 1) then the internal
+** schema version is set to P3-P5. The "PRAGMA schema_version=N" statement
+** has P5 set to 1, so that the internal schema version will be different
+** from the database schema version, resulting in a schema reset.
+*/
+case OP_SetCookie: {
+ Db *pDb;
+
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ assert( pOp->p2<SQLITE_N_BTREE_META );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( p->readOnly==0 );
+ pDb = &db->aDb[pOp->p1];
+ assert( pDb->pBt!=0 );
+ assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) );
+ /* See note about index shifting on OP_ReadCookie */
+ rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, pOp->p3);
+ if( pOp->p2==BTREE_SCHEMA_VERSION ){
+ /* When the schema cookie changes, record the new cookie internally */
+ pDb->pSchema->schema_cookie = pOp->p3 - pOp->p5;
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ }else if( pOp->p2==BTREE_FILE_FORMAT ){
+ /* Record changes in the file format */
+ pDb->pSchema->file_format = pOp->p3;
+ }
+ if( pOp->p1==1 ){
+ /* Invalidate all prepared statements whenever the TEMP database
+ ** schema is changed. Ticket #1644 */
+ sqlite3ExpirePreparedStatements(db, 0);
+ p->expired = 0;
+ }
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: OpenRead P1 P2 P3 P4 P5
+** Synopsis: root=P2 iDb=P3
+**
+** Open a read-only cursor for the database table whose root page is
+** P2 in a database file. The database file is determined by P3.
+** P3==0 means the main database, P3==1 means the database used for
+** temporary tables, and P3>1 means used the corresponding attached
+** database. Give the new cursor an identifier of P1. The P1
+** values need not be contiguous but all P1 values should be small integers.
+** It is an error for P1 to be negative.
+**
+** Allowed P5 bits:
+** <ul>
+** <li> <b>0x02 OPFLAG_SEEKEQ</b>: This cursor will only be used for
+** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
+** of OP_SeekLE/OP_IdxLT)
+** </ul>
+**
+** The P4 value may be either an integer (P4_INT32) or a pointer to
+** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
+** object, then table being opened must be an [index b-tree] where the
+** KeyInfo object defines the content and collating
+** sequence of that index b-tree. Otherwise, if P4 is an integer
+** value, then the table being opened must be a [table b-tree] with a
+** number of columns no less than the value of P4.
+**
+** See also: OpenWrite, ReopenIdx
+*/
+/* Opcode: ReopenIdx P1 P2 P3 P4 P5
+** Synopsis: root=P2 iDb=P3
+**
+** The ReopenIdx opcode works like OP_OpenRead except that it first
+** checks to see if the cursor on P1 is already open on the same
+** b-tree and if it is this opcode becomes a no-op. In other words,
+** if the cursor is already open, do not reopen it.
+**
+** The ReopenIdx opcode may only be used with P5==0 or P5==OPFLAG_SEEKEQ
+** and with P4 being a P4_KEYINFO object. Furthermore, the P3 value must
+** be the same as every other ReopenIdx or OpenRead for the same cursor
+** number.
+**
+** Allowed P5 bits:
+** <ul>
+** <li> <b>0x02 OPFLAG_SEEKEQ</b>: This cursor will only be used for
+** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
+** of OP_SeekLE/OP_IdxLT)
+** </ul>
+**
+** See also: OP_OpenRead, OP_OpenWrite
+*/
+/* Opcode: OpenWrite P1 P2 P3 P4 P5
+** Synopsis: root=P2 iDb=P3
+**
+** Open a read/write cursor named P1 on the table or index whose root
+** page is P2 (or whose root page is held in register P2 if the
+** OPFLAG_P2ISREG bit is set in P5 - see below).
+**
+** The P4 value may be either an integer (P4_INT32) or a pointer to
+** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
+** object, then table being opened must be an [index b-tree] where the
+** KeyInfo object defines the content and collating
+** sequence of that index b-tree. Otherwise, if P4 is an integer
+** value, then the table being opened must be a [table b-tree] with a
+** number of columns no less than the value of P4.
+**
+** Allowed P5 bits:
+** <ul>
+** <li> <b>0x02 OPFLAG_SEEKEQ</b>: This cursor will only be used for
+** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
+** of OP_SeekLE/OP_IdxLT)
+** <li> <b>0x08 OPFLAG_FORDELETE</b>: This cursor is used only to seek
+** and subsequently delete entries in an index btree. This is a
+** hint to the storage engine that the storage engine is allowed to
+** ignore. The hint is not used by the official SQLite b*tree storage
+** engine, but is used by COMDB2.
+** <li> <b>0x10 OPFLAG_P2ISREG</b>: Use the content of register P2
+** as the root page, not the value of P2 itself.
+** </ul>
+**
+** This instruction works like OpenRead except that it opens the cursor
+** in read/write mode.
+**
+** See also: OP_OpenRead, OP_ReopenIdx
+*/
+case OP_ReopenIdx: {
+ int nField;
+ KeyInfo *pKeyInfo;
+ u32 p2;
+ int iDb;
+ int wrFlag;
+ Btree *pX;
+ VdbeCursor *pCur;
+ Db *pDb;
+
+ assert( pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );
+ assert( pOp->p4type==P4_KEYINFO );
+ pCur = p->apCsr[pOp->p1];
+ if( pCur && pCur->pgnoRoot==(u32)pOp->p2 ){
+ assert( pCur->iDb==pOp->p3 ); /* Guaranteed by the code generator */
+ goto open_cursor_set_hints;
+ }
+ /* If the cursor is not currently open or is open on a different
+ ** index, then fall through into OP_OpenRead to force a reopen */
+case OP_OpenRead:
+case OP_OpenWrite:
+
+ assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );
+ assert( p->bIsReader );
+ assert( pOp->opcode==OP_OpenRead || pOp->opcode==OP_ReopenIdx
+ || p->readOnly==0 );
+
+ if( p->expired==1 ){
+ rc = SQLITE_ABORT_ROLLBACK;
+ goto abort_due_to_error;
+ }
+
+ nField = 0;
+ pKeyInfo = 0;
+ p2 = (u32)pOp->p2;
+ iDb = pOp->p3;
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( DbMaskTest(p->btreeMask, iDb) );
+ pDb = &db->aDb[iDb];
+ pX = pDb->pBt;
+ assert( pX!=0 );
+ if( pOp->opcode==OP_OpenWrite ){
+ assert( OPFLAG_FORDELETE==BTREE_FORDELETE );
+ wrFlag = BTREE_WRCSR | (pOp->p5 & OPFLAG_FORDELETE);
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( pDb->pSchema->file_format < p->minWriteFileFormat ){
+ p->minWriteFileFormat = pDb->pSchema->file_format;
+ }
+ }else{
+ wrFlag = 0;
+ }
+ if( pOp->p5 & OPFLAG_P2ISREG ){
+ assert( p2>0 );
+ assert( p2<=(u32)(p->nMem+1 - p->nCursor) );
+ assert( pOp->opcode==OP_OpenWrite );
+ pIn2 = &aMem[p2];
+ assert( memIsValid(pIn2) );
+ assert( (pIn2->flags & MEM_Int)!=0 );
+ sqlite3VdbeMemIntegerify(pIn2);
+ p2 = (int)pIn2->u.i;
+ /* The p2 value always comes from a prior OP_CreateBtree opcode and
+ ** that opcode will always set the p2 value to 2 or more or else fail.
+ ** If there were a failure, the prepared statement would have halted
+ ** before reaching this instruction. */
+ assert( p2>=2 );
+ }
+ if( pOp->p4type==P4_KEYINFO ){
+ pKeyInfo = pOp->p4.pKeyInfo;
+ assert( pKeyInfo->enc==ENC(db) );
+ assert( pKeyInfo->db==db );
+ nField = pKeyInfo->nAllField;
+ }else if( pOp->p4type==P4_INT32 ){
+ nField = pOp->p4.i;
+ }
+ assert( pOp->p1>=0 );
+ assert( nField>=0 );
+ testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */
+ pCur = allocateCursor(p, pOp->p1, nField, iDb, CURTYPE_BTREE);
+ if( pCur==0 ) goto no_mem;
+ pCur->nullRow = 1;
+ pCur->isOrdered = 1;
+ pCur->pgnoRoot = p2;
+#ifdef SQLITE_DEBUG
+ pCur->wrFlag = wrFlag;
+#endif
+ rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->uc.pCursor);
+ pCur->pKeyInfo = pKeyInfo;
+ /* Set the VdbeCursor.isTable variable. Previous versions of
+ ** SQLite used to check if the root-page flags were sane at this point
+ ** and report database corruption if they were not, but this check has
+ ** since moved into the btree layer. */
+ pCur->isTable = pOp->p4type!=P4_KEYINFO;
+
+open_cursor_set_hints:
+ assert( OPFLAG_BULKCSR==BTREE_BULKLOAD );
+ assert( OPFLAG_SEEKEQ==BTREE_SEEK_EQ );
+ testcase( pOp->p5 & OPFLAG_BULKCSR );
+ testcase( pOp->p2 & OPFLAG_SEEKEQ );
+ sqlite3BtreeCursorHintFlags(pCur->uc.pCursor,
+ (pOp->p5 & (OPFLAG_BULKCSR|OPFLAG_SEEKEQ)));
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: OpenDup P1 P2 * * *
+**
+** Open a new cursor P1 that points to the same ephemeral table as
+** cursor P2. The P2 cursor must have been opened by a prior OP_OpenEphemeral
+** opcode. Only ephemeral cursors may be duplicated.
+**
+** Duplicate ephemeral cursors are used for self-joins of materialized views.
+*/
+case OP_OpenDup: {
+ VdbeCursor *pOrig; /* The original cursor to be duplicated */
+ VdbeCursor *pCx; /* The new cursor */
+
+ pOrig = p->apCsr[pOp->p2];
+ assert( pOrig );
+ assert( pOrig->pBtx!=0 ); /* Only ephemeral cursors can be duplicated */
+
+ pCx = allocateCursor(p, pOp->p1, pOrig->nField, -1, CURTYPE_BTREE);
+ if( pCx==0 ) goto no_mem;
+ pCx->nullRow = 1;
+ pCx->isEphemeral = 1;
+ pCx->pKeyInfo = pOrig->pKeyInfo;
+ pCx->isTable = pOrig->isTable;
+ pCx->pgnoRoot = pOrig->pgnoRoot;
+ pCx->isOrdered = pOrig->isOrdered;
+ rc = sqlite3BtreeCursor(pOrig->pBtx, pCx->pgnoRoot, BTREE_WRCSR,
+ pCx->pKeyInfo, pCx->uc.pCursor);
+ /* The sqlite3BtreeCursor() routine can only fail for the first cursor
+ ** opened for a database. Since there is already an open cursor when this
+ ** opcode is run, the sqlite3BtreeCursor() cannot fail */
+ assert( rc==SQLITE_OK );
+ break;
+}
+
+
+/* Opcode: OpenEphemeral P1 P2 * P4 P5
+** Synopsis: nColumn=P2
+**
+** Open a new cursor P1 to a transient table.
+** The cursor is always opened read/write even if
+** the main database is read-only. The ephemeral
+** table is deleted automatically when the cursor is closed.
+**
+** If the cursor P1 is already opened on an ephemeral table, the table
+** is cleared (all content is erased).
+**
+** P2 is the number of columns in the ephemeral table.
+** The cursor points to a BTree table if P4==0 and to a BTree index
+** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure
+** that defines the format of keys in the index.
+**
+** The P5 parameter can be a mask of the BTREE_* flags defined
+** in btree.h. These flags control aspects of the operation of
+** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are
+** added automatically.
+*/
+/* Opcode: OpenAutoindex P1 P2 * P4 *
+** Synopsis: nColumn=P2
+**
+** This opcode works the same as OP_OpenEphemeral. It has a
+** different name to distinguish its use. Tables created using
+** by this opcode will be used for automatically created transient
+** indices in joins.
+*/
+case OP_OpenAutoindex:
+case OP_OpenEphemeral: {
+ VdbeCursor *pCx;
+ KeyInfo *pKeyInfo;
+
+ static const int vfsFlags =
+ SQLITE_OPEN_READWRITE |
+ SQLITE_OPEN_CREATE |
+ SQLITE_OPEN_EXCLUSIVE |
+ SQLITE_OPEN_DELETEONCLOSE |
+ SQLITE_OPEN_TRANSIENT_DB;
+ assert( pOp->p1>=0 );
+ assert( pOp->p2>=0 );
+ pCx = p->apCsr[pOp->p1];
+ if( pCx && pCx->pBtx ){
+ /* If the ephermeral table is already open, erase all existing content
+ ** so that the table is empty again, rather than creating a new table. */
+ assert( pCx->isEphemeral );
+ pCx->seqCount = 0;
+ pCx->cacheStatus = CACHE_STALE;
+ rc = sqlite3BtreeClearTable(pCx->pBtx, pCx->pgnoRoot, 0);
+ }else{
+ pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_BTREE);
+ if( pCx==0 ) goto no_mem;
+ pCx->isEphemeral = 1;
+ rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx,
+ BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5,
+ vfsFlags);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1, 0);
+ }
+ if( rc==SQLITE_OK ){
+ /* If a transient index is required, create it by calling
+ ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before
+ ** opening it. If a transient table is required, just use the
+ ** automatically created table with root-page 1 (an BLOB_INTKEY table).
+ */
+ if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){
+ assert( pOp->p4type==P4_KEYINFO );
+ rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot,
+ BTREE_BLOBKEY | pOp->p5);
+ if( rc==SQLITE_OK ){
+ assert( pCx->pgnoRoot==SCHEMA_ROOT+1 );
+ assert( pKeyInfo->db==db );
+ assert( pKeyInfo->enc==ENC(db) );
+ rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR,
+ pKeyInfo, pCx->uc.pCursor);
+ }
+ pCx->isTable = 0;
+ }else{
+ pCx->pgnoRoot = SCHEMA_ROOT;
+ rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR,
+ 0, pCx->uc.pCursor);
+ pCx->isTable = 1;
+ }
+ }
+ pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED);
+ }
+ if( rc ) goto abort_due_to_error;
+ pCx->nullRow = 1;
+ break;
+}
+
+/* Opcode: SorterOpen P1 P2 P3 P4 *
+**
+** This opcode works like OP_OpenEphemeral except that it opens
+** a transient index that is specifically designed to sort large
+** tables using an external merge-sort algorithm.
+**
+** If argument P3 is non-zero, then it indicates that the sorter may
+** assume that a stable sort considering the first P3 fields of each
+** key is sufficient to produce the required results.
+*/
+case OP_SorterOpen: {
+ VdbeCursor *pCx;
+
+ assert( pOp->p1>=0 );
+ assert( pOp->p2>=0 );
+ pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_SORTER);
+ if( pCx==0 ) goto no_mem;
+ pCx->pKeyInfo = pOp->p4.pKeyInfo;
+ assert( pCx->pKeyInfo->db==db );
+ assert( pCx->pKeyInfo->enc==ENC(db) );
+ rc = sqlite3VdbeSorterInit(db, pOp->p3, pCx);
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: SequenceTest P1 P2 * * *
+** Synopsis: if( cursor[P1].ctr++ ) pc = P2
+**
+** P1 is a sorter cursor. If the sequence counter is currently zero, jump
+** to P2. Regardless of whether or not the jump is taken, increment the
+** the sequence value.
+*/
+case OP_SequenceTest: {
+ VdbeCursor *pC;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( isSorter(pC) );
+ if( (pC->seqCount++)==0 ){
+ goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: OpenPseudo P1 P2 P3 * *
+** Synopsis: P3 columns in r[P2]
+**
+** Open a new cursor that points to a fake table that contains a single
+** row of data. The content of that one row is the content of memory
+** register P2. In other words, cursor P1 becomes an alias for the
+** MEM_Blob content contained in register P2.
+**
+** A pseudo-table created by this opcode is used to hold a single
+** row output from the sorter so that the row can be decomposed into
+** individual columns using the OP_Column opcode. The OP_Column opcode
+** is the only cursor opcode that works with a pseudo-table.
+**
+** P3 is the number of fields in the records that will be stored by
+** the pseudo-table.
+*/
+case OP_OpenPseudo: {
+ VdbeCursor *pCx;
+
+ assert( pOp->p1>=0 );
+ assert( pOp->p3>=0 );
+ pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, CURTYPE_PSEUDO);
+ if( pCx==0 ) goto no_mem;
+ pCx->nullRow = 1;
+ pCx->seekResult = pOp->p2;
+ pCx->isTable = 1;
+ /* Give this pseudo-cursor a fake BtCursor pointer so that pCx
+ ** can be safely passed to sqlite3VdbeCursorMoveto(). This avoids a test
+ ** for pCx->eCurType==CURTYPE_BTREE inside of sqlite3VdbeCursorMoveto()
+ ** which is a performance optimization */
+ pCx->uc.pCursor = sqlite3BtreeFakeValidCursor();
+ assert( pOp->p5==0 );
+ break;
+}
+
+/* Opcode: Close P1 * * * *
+**
+** Close a cursor previously opened as P1. If P1 is not
+** currently open, this instruction is a no-op.
+*/
+case OP_Close: {
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]);
+ p->apCsr[pOp->p1] = 0;
+ break;
+}
+
+#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
+/* Opcode: ColumnsUsed P1 * * P4 *
+**
+** This opcode (which only exists if SQLite was compiled with
+** SQLITE_ENABLE_COLUMN_USED_MASK) identifies which columns of the
+** table or index for cursor P1 are used. P4 is a 64-bit integer
+** (P4_INT64) in which the first 63 bits are one for each of the
+** first 63 columns of the table or index that are actually used
+** by the cursor. The high-order bit is set if any column after
+** the 64th is used.
+*/
+case OP_ColumnsUsed: {
+ VdbeCursor *pC;
+ pC = p->apCsr[pOp->p1];
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pC->maskUsed = *(u64*)pOp->p4.pI64;
+ break;
+}
+#endif
+
+/* Opcode: SeekGE P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
+** use the value in register P3 as the key. If cursor P1 refers
+** to an SQL index, then P3 is the first in an array of P4 registers
+** that are used as an unpacked index key.
+**
+** Reposition cursor P1 so that it points to the smallest entry that
+** is greater than or equal to the key value. If there are no records
+** greater than or equal to the key and P2 is not zero, then jump to P2.
+**
+** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this
+** opcode will either land on a record that exactly matches the key, or
+** else it will cause a jump to P2. When the cursor is OPFLAG_SEEKEQ,
+** this opcode must be followed by an IdxLE opcode with the same arguments.
+** The IdxGT opcode will be skipped if this opcode succeeds, but the
+** IdxGT opcode will be used on subsequent loop iterations. The
+** OPFLAG_SEEKEQ flags is a hint to the btree layer to say that this
+** is an equality search.
+**
+** This opcode leaves the cursor configured to move in forward order,
+** from the beginning toward the end. In other words, the cursor is
+** configured to use Next, not Prev.
+**
+** See also: Found, NotFound, SeekLt, SeekGt, SeekLe
+*/
+/* Opcode: SeekGT P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
+** use the value in register P3 as a key. If cursor P1 refers
+** to an SQL index, then P3 is the first in an array of P4 registers
+** that are used as an unpacked index key.
+**
+** Reposition cursor P1 so that it points to the smallest entry that
+** is greater than the key value. If there are no records greater than
+** the key and P2 is not zero, then jump to P2.
+**
+** This opcode leaves the cursor configured to move in forward order,
+** from the beginning toward the end. In other words, the cursor is
+** configured to use Next, not Prev.
+**
+** See also: Found, NotFound, SeekLt, SeekGe, SeekLe
+*/
+/* Opcode: SeekLT P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
+** use the value in register P3 as a key. If cursor P1 refers
+** to an SQL index, then P3 is the first in an array of P4 registers
+** that are used as an unpacked index key.
+**
+** Reposition cursor P1 so that it points to the largest entry that
+** is less than the key value. If there are no records less than
+** the key and P2 is not zero, then jump to P2.
+**
+** This opcode leaves the cursor configured to move in reverse order,
+** from the end toward the beginning. In other words, the cursor is
+** configured to use Prev, not Next.
+**
+** See also: Found, NotFound, SeekGt, SeekGe, SeekLe
+*/
+/* Opcode: SeekLE P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
+** use the value in register P3 as a key. If cursor P1 refers
+** to an SQL index, then P3 is the first in an array of P4 registers
+** that are used as an unpacked index key.
+**
+** Reposition cursor P1 so that it points to the largest entry that
+** is less than or equal to the key value. If there are no records
+** less than or equal to the key and P2 is not zero, then jump to P2.
+**
+** This opcode leaves the cursor configured to move in reverse order,
+** from the end toward the beginning. In other words, the cursor is
+** configured to use Prev, not Next.
+**
+** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this
+** opcode will either land on a record that exactly matches the key, or
+** else it will cause a jump to P2. When the cursor is OPFLAG_SEEKEQ,
+** this opcode must be followed by an IdxLE opcode with the same arguments.
+** The IdxGE opcode will be skipped if this opcode succeeds, but the
+** IdxGE opcode will be used on subsequent loop iterations. The
+** OPFLAG_SEEKEQ flags is a hint to the btree layer to say that this
+** is an equality search.
+**
+** See also: Found, NotFound, SeekGt, SeekGe, SeekLt
+*/
+case OP_SeekLT: /* jump, in3, group */
+case OP_SeekLE: /* jump, in3, group */
+case OP_SeekGE: /* jump, in3, group */
+case OP_SeekGT: { /* jump, in3, group */
+ int res; /* Comparison result */
+ int oc; /* Opcode */
+ VdbeCursor *pC; /* The cursor to seek */
+ UnpackedRecord r; /* The key to seek for */
+ int nField; /* Number of columns or fields in the key */
+ i64 iKey; /* The rowid we are to seek to */
+ int eqOnly; /* Only interested in == results */
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( pOp->p2!=0 );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( OP_SeekLE == OP_SeekLT+1 );
+ assert( OP_SeekGE == OP_SeekLT+2 );
+ assert( OP_SeekGT == OP_SeekLT+3 );
+ assert( pC->isOrdered );
+ assert( pC->uc.pCursor!=0 );
+ oc = pOp->opcode;
+ eqOnly = 0;
+ pC->nullRow = 0;
+#ifdef SQLITE_DEBUG
+ pC->seekOp = pOp->opcode;
+#endif
+
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ if( pC->isTable ){
+ u16 flags3, newType;
+ /* The OPFLAG_SEEKEQ/BTREE_SEEK_EQ flag is only set on index cursors */
+ assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0
+ || CORRUPT_DB );
+
+ /* The input value in P3 might be of any type: integer, real, string,
+ ** blob, or NULL. But it needs to be an integer before we can do
+ ** the seek, so convert it. */
+ pIn3 = &aMem[pOp->p3];
+ flags3 = pIn3->flags;
+ if( (flags3 & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Str))==MEM_Str ){
+ applyNumericAffinity(pIn3, 0);
+ }
+ iKey = sqlite3VdbeIntValue(pIn3); /* Get the integer key value */
+ newType = pIn3->flags; /* Record the type after applying numeric affinity */
+ pIn3->flags = flags3; /* But convert the type back to its original */
+
+ /* If the P3 value could not be converted into an integer without
+ ** loss of information, then special processing is required... */
+ if( (newType & (MEM_Int|MEM_IntReal))==0 ){
+ if( (newType & MEM_Real)==0 ){
+ if( (newType & MEM_Null) || oc>=OP_SeekGE ){
+ VdbeBranchTaken(1,2);
+ goto jump_to_p2;
+ }else{
+ rc = sqlite3BtreeLast(pC->uc.pCursor, &res);
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ goto seek_not_found;
+ }
+ }else
+
+ /* If the approximation iKey is larger than the actual real search
+ ** term, substitute >= for > and < for <=. e.g. if the search term
+ ** is 4.9 and the integer approximation 5:
+ **
+ ** (x > 4.9) -> (x >= 5)
+ ** (x <= 4.9) -> (x < 5)
+ */
+ if( pIn3->u.r<(double)iKey ){
+ assert( OP_SeekGE==(OP_SeekGT-1) );
+ assert( OP_SeekLT==(OP_SeekLE-1) );
+ assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) );
+ if( (oc & 0x0001)==(OP_SeekGT & 0x0001) ) oc--;
+ }
+
+ /* If the approximation iKey is smaller than the actual real search
+ ** term, substitute <= for < and > for >=. */
+ else if( pIn3->u.r>(double)iKey ){
+ assert( OP_SeekLE==(OP_SeekLT+1) );
+ assert( OP_SeekGT==(OP_SeekGE+1) );
+ assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) );
+ if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++;
+ }
+ }
+ rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)iKey, 0, &res);
+ pC->movetoTarget = iKey; /* Used by OP_Delete */
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ }else{
+ /* For a cursor with the OPFLAG_SEEKEQ/BTREE_SEEK_EQ hint, only the
+ ** OP_SeekGE and OP_SeekLE opcodes are allowed, and these must be
+ ** immediately followed by an OP_IdxGT or OP_IdxLT opcode, respectively,
+ ** with the same key.
+ */
+ if( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ) ){
+ eqOnly = 1;
+ assert( pOp->opcode==OP_SeekGE || pOp->opcode==OP_SeekLE );
+ assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT );
+ assert( pOp->opcode==OP_SeekGE || pOp[1].opcode==OP_IdxLT );
+ assert( pOp->opcode==OP_SeekLE || pOp[1].opcode==OP_IdxGT );
+ assert( pOp[1].p1==pOp[0].p1 );
+ assert( pOp[1].p2==pOp[0].p2 );
+ assert( pOp[1].p3==pOp[0].p3 );
+ assert( pOp[1].p4.i==pOp[0].p4.i );
+ }
+
+ nField = pOp->p4.i;
+ assert( pOp->p4type==P4_INT32 );
+ assert( nField>0 );
+ r.pKeyInfo = pC->pKeyInfo;
+ r.nField = (u16)nField;
+
+ /* The next line of code computes as follows, only faster:
+ ** if( oc==OP_SeekGT || oc==OP_SeekLE ){
+ ** r.default_rc = -1;
+ ** }else{
+ ** r.default_rc = +1;
+ ** }
+ */
+ r.default_rc = ((1 & (oc - OP_SeekLT)) ? -1 : +1);
+ assert( oc!=OP_SeekGT || r.default_rc==-1 );
+ assert( oc!=OP_SeekLE || r.default_rc==-1 );
+ assert( oc!=OP_SeekGE || r.default_rc==+1 );
+ assert( oc!=OP_SeekLT || r.default_rc==+1 );
+
+ r.aMem = &aMem[pOp->p3];
+#ifdef SQLITE_DEBUG
+ { int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
+#endif
+ r.eqSeen = 0;
+ rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, &r, 0, 0, &res);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ if( eqOnly && r.eqSeen==0 ){
+ assert( res!=0 );
+ goto seek_not_found;
+ }
+ }
+#ifdef SQLITE_TEST
+ sqlite3_search_count++;
+#endif
+ if( oc>=OP_SeekGE ){ assert( oc==OP_SeekGE || oc==OP_SeekGT );
+ if( res<0 || (res==0 && oc==OP_SeekGT) ){
+ res = 0;
+ rc = sqlite3BtreeNext(pC->uc.pCursor, 0);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ res = 1;
+ }else{
+ goto abort_due_to_error;
+ }
+ }
+ }else{
+ res = 0;
+ }
+ }else{
+ assert( oc==OP_SeekLT || oc==OP_SeekLE );
+ if( res>0 || (res==0 && oc==OP_SeekLT) ){
+ res = 0;
+ rc = sqlite3BtreePrevious(pC->uc.pCursor, 0);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ res = 1;
+ }else{
+ goto abort_due_to_error;
+ }
+ }
+ }else{
+ /* res might be negative because the table is empty. Check to
+ ** see if this is the case.
+ */
+ res = sqlite3BtreeEof(pC->uc.pCursor);
+ }
+ }
+seek_not_found:
+ assert( pOp->p2>0 );
+ VdbeBranchTaken(res!=0,2);
+ if( res ){
+ goto jump_to_p2;
+ }else if( eqOnly ){
+ assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT );
+ pOp++; /* Skip the OP_IdxLt or OP_IdxGT that follows */
+ }
+ break;
+}
+
+/* Opcode: SeekHit P1 P2 * * *
+** Synopsis: seekHit=P2
+**
+** Set the seekHit flag on cursor P1 to the value in P2.
+** The seekHit flag is used by the IfNoHope opcode.
+**
+** P1 must be a valid b-tree cursor. P2 must be a boolean value,
+** either 0 or 1.
+*/
+case OP_SeekHit: {
+ VdbeCursor *pC;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pOp->p2==0 || pOp->p2==1 );
+ pC->seekHit = pOp->p2 & 1;
+ break;
+}
+
+/* Opcode: IfNotOpen P1 P2 * * *
+** Synopsis: if( !csr[P1] ) goto P2
+**
+** If cursor P1 is not open, jump to instruction P2. Otherwise, fall through.
+*/
+case OP_IfNotOpen: { /* jump */
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ VdbeBranchTaken(p->apCsr[pOp->p1]==0, 2);
+ if( !p->apCsr[pOp->p1] ){
+ goto jump_to_p2_and_check_for_interrupt;
+ }
+ break;
+}
+
+/* Opcode: Found P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
+** P4>0 then register P3 is the first of P4 registers that form an unpacked
+** record.
+**
+** Cursor P1 is on an index btree. If the record identified by P3 and P4
+** is a prefix of any entry in P1 then a jump is made to P2 and
+** P1 is left pointing at the matching entry.
+**
+** This operation leaves the cursor in a state where it can be
+** advanced in the forward direction. The Next instruction will work,
+** but not the Prev instruction.
+**
+** See also: NotFound, NoConflict, NotExists. SeekGe
+*/
+/* Opcode: NotFound P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
+** P4>0 then register P3 is the first of P4 registers that form an unpacked
+** record.
+**
+** Cursor P1 is on an index btree. If the record identified by P3 and P4
+** is not the prefix of any entry in P1 then a jump is made to P2. If P1
+** does contain an entry whose prefix matches the P3/P4 record then control
+** falls through to the next instruction and P1 is left pointing at the
+** matching entry.
+**
+** This operation leaves the cursor in a state where it cannot be
+** advanced in either direction. In other words, the Next and Prev
+** opcodes do not work after this operation.
+**
+** See also: Found, NotExists, NoConflict, IfNoHope
+*/
+/* Opcode: IfNoHope P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** Register P3 is the first of P4 registers that form an unpacked
+** record.
+**
+** Cursor P1 is on an index btree. If the seekHit flag is set on P1, then
+** this opcode is a no-op. But if the seekHit flag of P1 is clear, then
+** check to see if there is any entry in P1 that matches the
+** prefix identified by P3 and P4. If no entry matches the prefix,
+** jump to P2. Otherwise fall through.
+**
+** This opcode behaves like OP_NotFound if the seekHit
+** flag is clear and it behaves like OP_Noop if the seekHit flag is set.
+**
+** This opcode is used in IN clause processing for a multi-column key.
+** If an IN clause is attached to an element of the key other than the
+** left-most element, and if there are no matches on the most recent
+** seek over the whole key, then it might be that one of the key element
+** to the left is prohibiting a match, and hence there is "no hope" of
+** any match regardless of how many IN clause elements are checked.
+** In such a case, we abandon the IN clause search early, using this
+** opcode. The opcode name comes from the fact that the
+** jump is taken if there is "no hope" of achieving a match.
+**
+** See also: NotFound, SeekHit
+*/
+/* Opcode: NoConflict P1 P2 P3 P4 *
+** Synopsis: key=r[P3@P4]
+**
+** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
+** P4>0 then register P3 is the first of P4 registers that form an unpacked
+** record.
+**
+** Cursor P1 is on an index btree. If the record identified by P3 and P4
+** contains any NULL value, jump immediately to P2. If all terms of the
+** record are not-NULL then a check is done to determine if any row in the
+** P1 index btree has a matching key prefix. If there are no matches, jump
+** immediately to P2. If there is a match, fall through and leave the P1
+** cursor pointing to the matching row.
+**
+** This opcode is similar to OP_NotFound with the exceptions that the
+** branch is always taken if any part of the search key input is NULL.
+**
+** This operation leaves the cursor in a state where it cannot be
+** advanced in either direction. In other words, the Next and Prev
+** opcodes do not work after this operation.
+**
+** See also: NotFound, Found, NotExists
+*/
+case OP_IfNoHope: { /* jump, in3 */
+ VdbeCursor *pC;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ if( pC->seekHit ) break;
+ /* Fall through into OP_NotFound */
+ /* no break */ deliberate_fall_through
+}
+case OP_NoConflict: /* jump, in3 */
+case OP_NotFound: /* jump, in3 */
+case OP_Found: { /* jump, in3 */
+ int alreadyExists;
+ int takeJump;
+ int ii;
+ VdbeCursor *pC;
+ int res;
+ UnpackedRecord *pFree;
+ UnpackedRecord *pIdxKey;
+ UnpackedRecord r;
+
+#ifdef SQLITE_TEST
+ if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++;
+#endif
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( pOp->p4type==P4_INT32 );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+#ifdef SQLITE_DEBUG
+ pC->seekOp = pOp->opcode;
+#endif
+ pIn3 = &aMem[pOp->p3];
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0 );
+ assert( pC->isTable==0 );
+ if( pOp->p4.i>0 ){
+ r.pKeyInfo = pC->pKeyInfo;
+ r.nField = (u16)pOp->p4.i;
+ r.aMem = pIn3;
+#ifdef SQLITE_DEBUG
+ for(ii=0; ii<r.nField; ii++){
+ assert( memIsValid(&r.aMem[ii]) );
+ assert( (r.aMem[ii].flags & MEM_Zero)==0 || r.aMem[ii].n==0 );
+ if( ii ) REGISTER_TRACE(pOp->p3+ii, &r.aMem[ii]);
+ }
+#endif
+ pIdxKey = &r;
+ pFree = 0;
+ }else{
+ assert( pIn3->flags & MEM_Blob );
+ rc = ExpandBlob(pIn3);
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ if( rc ) goto no_mem;
+ pFree = pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo);
+ if( pIdxKey==0 ) goto no_mem;
+ sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey);
+ }
+ pIdxKey->default_rc = 0;
+ takeJump = 0;
+ if( pOp->opcode==OP_NoConflict ){
+ /* For the OP_NoConflict opcode, take the jump if any of the
+ ** input fields are NULL, since any key with a NULL will not
+ ** conflict */
+ for(ii=0; ii<pIdxKey->nField; ii++){
+ if( pIdxKey->aMem[ii].flags & MEM_Null ){
+ takeJump = 1;
+ break;
+ }
+ }
+ }
+ rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, pIdxKey, 0, 0, &res);
+ if( pFree ) sqlite3DbFreeNN(db, pFree);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ pC->seekResult = res;
+ alreadyExists = (res==0);
+ pC->nullRow = 1-alreadyExists;
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ if( pOp->opcode==OP_Found ){
+ VdbeBranchTaken(alreadyExists!=0,2);
+ if( alreadyExists ) goto jump_to_p2;
+ }else{
+ VdbeBranchTaken(takeJump||alreadyExists==0,2);
+ if( takeJump || !alreadyExists ) goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: SeekRowid P1 P2 P3 * *
+** Synopsis: intkey=r[P3]
+**
+** P1 is the index of a cursor open on an SQL table btree (with integer
+** keys). If register P3 does not contain an integer or if P1 does not
+** contain a record with rowid P3 then jump immediately to P2.
+** Or, if P2 is 0, raise an SQLITE_CORRUPT error. If P1 does contain
+** a record with rowid P3 then
+** leave the cursor pointing at that record and fall through to the next
+** instruction.
+**
+** The OP_NotExists opcode performs the same operation, but with OP_NotExists
+** the P3 register must be guaranteed to contain an integer value. With this
+** opcode, register P3 might not contain an integer.
+**
+** The OP_NotFound opcode performs the same operation on index btrees
+** (with arbitrary multi-value keys).
+**
+** This opcode leaves the cursor in a state where it cannot be advanced
+** in either direction. In other words, the Next and Prev opcodes will
+** not work following this opcode.
+**
+** See also: Found, NotFound, NoConflict, SeekRowid
+*/
+/* Opcode: NotExists P1 P2 P3 * *
+** Synopsis: intkey=r[P3]
+**
+** P1 is the index of a cursor open on an SQL table btree (with integer
+** keys). P3 is an integer rowid. If P1 does not contain a record with
+** rowid P3 then jump immediately to P2. Or, if P2 is 0, raise an
+** SQLITE_CORRUPT error. If P1 does contain a record with rowid P3 then
+** leave the cursor pointing at that record and fall through to the next
+** instruction.
+**
+** The OP_SeekRowid opcode performs the same operation but also allows the
+** P3 register to contain a non-integer value, in which case the jump is
+** always taken. This opcode requires that P3 always contain an integer.
+**
+** The OP_NotFound opcode performs the same operation on index btrees
+** (with arbitrary multi-value keys).
+**
+** This opcode leaves the cursor in a state where it cannot be advanced
+** in either direction. In other words, the Next and Prev opcodes will
+** not work following this opcode.
+**
+** See also: Found, NotFound, NoConflict, SeekRowid
+*/
+case OP_SeekRowid: { /* jump, in3 */
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ u64 iKey;
+
+ pIn3 = &aMem[pOp->p3];
+ testcase( pIn3->flags & MEM_Int );
+ testcase( pIn3->flags & MEM_IntReal );
+ testcase( pIn3->flags & MEM_Real );
+ testcase( (pIn3->flags & (MEM_Str|MEM_Int))==MEM_Str );
+ if( (pIn3->flags & (MEM_Int|MEM_IntReal))==0 ){
+ /* If pIn3->u.i does not contain an integer, compute iKey as the
+ ** integer value of pIn3. Jump to P2 if pIn3 cannot be converted
+ ** into an integer without loss of information. Take care to avoid
+ ** changing the datatype of pIn3, however, as it is used by other
+ ** parts of the prepared statement. */
+ Mem x = pIn3[0];
+ applyAffinity(&x, SQLITE_AFF_NUMERIC, encoding);
+ if( (x.flags & MEM_Int)==0 ) goto jump_to_p2;
+ iKey = x.u.i;
+ goto notExistsWithKey;
+ }
+ /* Fall through into OP_NotExists */
+ /* no break */ deliberate_fall_through
+case OP_NotExists: /* jump, in3 */
+ pIn3 = &aMem[pOp->p3];
+ assert( (pIn3->flags & MEM_Int)!=0 || pOp->opcode==OP_SeekRowid );
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ iKey = pIn3->u.i;
+notExistsWithKey:
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+#ifdef SQLITE_DEBUG
+ if( pOp->opcode==OP_SeekRowid ) pC->seekOp = OP_SeekRowid;
+#endif
+ assert( pC->isTable );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pCrsr = pC->uc.pCursor;
+ assert( pCrsr!=0 );
+ res = 0;
+ rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res);
+ assert( rc==SQLITE_OK || res==0 );
+ pC->movetoTarget = iKey; /* Used by OP_Delete */
+ pC->nullRow = 0;
+ pC->cacheStatus = CACHE_STALE;
+ pC->deferredMoveto = 0;
+ VdbeBranchTaken(res!=0,2);
+ pC->seekResult = res;
+ if( res!=0 ){
+ assert( rc==SQLITE_OK );
+ if( pOp->p2==0 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ goto jump_to_p2;
+ }
+ }
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: Sequence P1 P2 * * *
+** Synopsis: r[P2]=cursor[P1].ctr++
+**
+** Find the next available sequence number for cursor P1.
+** Write the sequence number into register P2.
+** The sequence number on the cursor is incremented after this
+** instruction.
+*/
+case OP_Sequence: { /* out2 */
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( p->apCsr[pOp->p1]!=0 );
+ assert( p->apCsr[pOp->p1]->eCurType!=CURTYPE_VTAB );
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = p->apCsr[pOp->p1]->seqCount++;
+ break;
+}
+
+
+/* Opcode: NewRowid P1 P2 P3 * *
+** Synopsis: r[P2]=rowid
+**
+** Get a new integer record number (a.k.a "rowid") used as the key to a table.
+** The record number is not previously used as a key in the database
+** table that cursor P1 points to. The new record number is written
+** written to register P2.
+**
+** If P3>0 then P3 is a register in the root frame of this VDBE that holds
+** the largest previously generated record number. No new record numbers are
+** allowed to be less than this value. When this value reaches its maximum,
+** an SQLITE_FULL error is generated. The P3 register is updated with the '
+** generated record number. This P3 mechanism is used to help implement the
+** AUTOINCREMENT feature.
+*/
+case OP_NewRowid: { /* out2 */
+ i64 v; /* The new rowid */
+ VdbeCursor *pC; /* Cursor of table to get the new rowid */
+ int res; /* Result of an sqlite3BtreeLast() */
+ int cnt; /* Counter to limit the number of searches */
+ Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */
+ VdbeFrame *pFrame; /* Root frame of VDBE */
+
+ v = 0;
+ res = 0;
+ pOut = out2Prerelease(p, pOp);
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->isTable );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0 );
+ {
+ /* The next rowid or record number (different terms for the same
+ ** thing) is obtained in a two-step algorithm.
+ **
+ ** First we attempt to find the largest existing rowid and add one
+ ** to that. But if the largest existing rowid is already the maximum
+ ** positive integer, we have to fall through to the second
+ ** probabilistic algorithm
+ **
+ ** The second algorithm is to select a rowid at random and see if
+ ** it already exists in the table. If it does not exist, we have
+ ** succeeded. If the random rowid does exist, we select a new one
+ ** and try again, up to 100 times.
+ */
+ assert( pC->isTable );
+
+#ifdef SQLITE_32BIT_ROWID
+# define MAX_ROWID 0x7fffffff
+#else
+ /* Some compilers complain about constants of the form 0x7fffffffffffffff.
+ ** Others complain about 0x7ffffffffffffffffLL. The following macro seems
+ ** to provide the constant while making all compilers happy.
+ */
+# define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff )
+#endif
+
+ if( !pC->useRandomRowid ){
+ rc = sqlite3BtreeLast(pC->uc.pCursor, &res);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ if( res ){
+ v = 1; /* IMP: R-61914-48074 */
+ }else{
+ assert( sqlite3BtreeCursorIsValid(pC->uc.pCursor) );
+ v = sqlite3BtreeIntegerKey(pC->uc.pCursor);
+ if( v>=MAX_ROWID ){
+ pC->useRandomRowid = 1;
+ }else{
+ v++; /* IMP: R-29538-34987 */
+ }
+ }
+ }
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ if( pOp->p3 ){
+ /* Assert that P3 is a valid memory cell. */
+ assert( pOp->p3>0 );
+ if( p->pFrame ){
+ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
+ /* Assert that P3 is a valid memory cell. */
+ assert( pOp->p3<=pFrame->nMem );
+ pMem = &pFrame->aMem[pOp->p3];
+ }else{
+ /* Assert that P3 is a valid memory cell. */
+ assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pMem = &aMem[pOp->p3];
+ memAboutToChange(p, pMem);
+ }
+ assert( memIsValid(pMem) );
+
+ REGISTER_TRACE(pOp->p3, pMem);
+ sqlite3VdbeMemIntegerify(pMem);
+ assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */
+ if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){
+ rc = SQLITE_FULL; /* IMP: R-17817-00630 */
+ goto abort_due_to_error;
+ }
+ if( v<pMem->u.i+1 ){
+ v = pMem->u.i + 1;
+ }
+ pMem->u.i = v;
+ }
+#endif
+ if( pC->useRandomRowid ){
+ /* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the
+ ** largest possible integer (9223372036854775807) then the database
+ ** engine starts picking positive candidate ROWIDs at random until
+ ** it finds one that is not previously used. */
+ assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is
+ ** an AUTOINCREMENT table. */
+ cnt = 0;
+ do{
+ sqlite3_randomness(sizeof(v), &v);
+ v &= (MAX_ROWID>>1); v++; /* Ensure that v is greater than zero */
+ }while( ((rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)v,
+ 0, &res))==SQLITE_OK)
+ && (res==0)
+ && (++cnt<100));
+ if( rc ) goto abort_due_to_error;
+ if( res==0 ){
+ rc = SQLITE_FULL; /* IMP: R-38219-53002 */
+ goto abort_due_to_error;
+ }
+ assert( v>0 ); /* EV: R-40812-03570 */
+ }
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ }
+ pOut->u.i = v;
+ break;
+}
+
+/* Opcode: Insert P1 P2 P3 P4 P5
+** Synopsis: intkey=r[P3] data=r[P2]
+**
+** Write an entry into the table of cursor P1. A new entry is
+** created if it doesn't already exist or the data for an existing
+** entry is overwritten. The data is the value MEM_Blob stored in register
+** number P2. The key is stored in register P3. The key must
+** be a MEM_Int.
+**
+** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is
+** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set,
+** then rowid is stored for subsequent return by the
+** sqlite3_last_insert_rowid() function (otherwise it is unmodified).
+**
+** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
+** run faster by avoiding an unnecessary seek on cursor P1. However,
+** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
+** seeks on the cursor or if the most recent seek used a key equal to P3.
+**
+** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an
+** UPDATE operation. Otherwise (if the flag is clear) then this opcode
+** is part of an INSERT operation. The difference is only important to
+** the update hook.
+**
+** Parameter P4 may point to a Table structure, or may be NULL. If it is
+** not NULL, then the update-hook (sqlite3.xUpdateCallback) is invoked
+** following a successful insert.
+**
+** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically
+** allocated, then ownership of P2 is transferred to the pseudo-cursor
+** and register P2 becomes ephemeral. If the cursor is changed, the
+** value of register P2 will then change. Make sure this does not
+** cause any problems.)
+**
+** This instruction only works on tables. The equivalent instruction
+** for indices is OP_IdxInsert.
+*/
+case OP_Insert: {
+ Mem *pData; /* MEM cell holding data for the record to be inserted */
+ Mem *pKey; /* MEM cell holding key for the record */
+ VdbeCursor *pC; /* Cursor to table into which insert is written */
+ int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */
+ const char *zDb; /* database name - used by the update hook */
+ Table *pTab; /* Table structure - used by update and pre-update hooks */
+ BtreePayload x; /* Payload to be inserted */
+
+ pData = &aMem[pOp->p2];
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( memIsValid(pData) );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->deferredMoveto==0 );
+ assert( pC->uc.pCursor!=0 );
+ assert( (pOp->p5 & OPFLAG_ISNOOP) || pC->isTable );
+ assert( pOp->p4type==P4_TABLE || pOp->p4type>=P4_STATIC );
+ REGISTER_TRACE(pOp->p2, pData);
+ sqlite3VdbeIncrWriteCounter(p, pC);
+
+ pKey = &aMem[pOp->p3];
+ assert( pKey->flags & MEM_Int );
+ assert( memIsValid(pKey) );
+ REGISTER_TRACE(pOp->p3, pKey);
+ x.nKey = pKey->u.i;
+
+ if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){
+ assert( pC->iDb>=0 );
+ zDb = db->aDb[pC->iDb].zDbSName;
+ pTab = pOp->p4.pTab;
+ assert( (pOp->p5 & OPFLAG_ISNOOP) || HasRowid(pTab) );
+ }else{
+ pTab = 0;
+ zDb = 0; /* Not needed. Silence a compiler warning. */
+ }
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ /* Invoke the pre-update hook, if any */
+ if( pTab ){
+ if( db->xPreUpdateCallback && !(pOp->p5 & OPFLAG_ISUPDATE) ){
+ sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey,pOp->p2);
+ }
+ if( db->xUpdateCallback==0 || pTab->aCol==0 ){
+ /* Prevent post-update hook from running in cases when it should not */
+ pTab = 0;
+ }
+ }
+ if( pOp->p5 & OPFLAG_ISNOOP ) break;
+#endif
+
+ if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
+ if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey;
+ assert( pData->flags & (MEM_Blob|MEM_Str) );
+ x.pData = pData->z;
+ x.nData = pData->n;
+ seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0);
+ if( pData->flags & MEM_Zero ){
+ x.nZero = pData->u.nZero;
+ }else{
+ x.nZero = 0;
+ }
+ x.pKey = 0;
+ rc = sqlite3BtreeInsert(pC->uc.pCursor, &x,
+ (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult
+ );
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+
+ /* Invoke the update-hook if required. */
+ if( rc ) goto abort_due_to_error;
+ if( pTab ){
+ assert( db->xUpdateCallback!=0 );
+ assert( pTab->aCol!=0 );
+ db->xUpdateCallback(db->pUpdateArg,
+ (pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT,
+ zDb, pTab->zName, x.nKey);
+ }
+ break;
+}
+
+/* Opcode: Delete P1 P2 P3 P4 P5
+**
+** Delete the record at which the P1 cursor is currently pointing.
+**
+** If the OPFLAG_SAVEPOSITION bit of the P5 parameter is set, then
+** the cursor will be left pointing at either the next or the previous
+** record in the table. If it is left pointing at the next record, then
+** the next Next instruction will be a no-op. As a result, in this case
+** it is ok to delete a record from within a Next loop. If
+** OPFLAG_SAVEPOSITION bit of P5 is clear, then the cursor will be
+** left in an undefined state.
+**
+** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this
+** delete one of several associated with deleting a table row and all its
+** associated index entries. Exactly one of those deletes is the "primary"
+** delete. The others are all on OPFLAG_FORDELETE cursors or else are
+** marked with the AUXDELETE flag.
+**
+** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row
+** change count is incremented (otherwise not).
+**
+** P1 must not be pseudo-table. It has to be a real table with
+** multiple rows.
+**
+** If P4 is not NULL then it points to a Table object. In this case either
+** the update or pre-update hook, or both, may be invoked. The P1 cursor must
+** have been positioned using OP_NotFound prior to invoking this opcode in
+** this case. Specifically, if one is configured, the pre-update hook is
+** invoked if P4 is not NULL. The update-hook is invoked if one is configured,
+** P4 is not NULL, and the OPFLAG_NCHANGE flag is set in P2.
+**
+** If the OPFLAG_ISUPDATE flag is set in P2, then P3 contains the address
+** of the memory cell that contains the value that the rowid of the row will
+** be set to by the update.
+*/
+case OP_Delete: {
+ VdbeCursor *pC;
+ const char *zDb;
+ Table *pTab;
+ int opflags;
+
+ opflags = pOp->p2;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0 );
+ assert( pC->deferredMoveto==0 );
+ sqlite3VdbeIncrWriteCounter(p, pC);
+
+#ifdef SQLITE_DEBUG
+ if( pOp->p4type==P4_TABLE
+ && HasRowid(pOp->p4.pTab)
+ && pOp->p5==0
+ && sqlite3BtreeCursorIsValidNN(pC->uc.pCursor)
+ ){
+ /* If p5 is zero, the seek operation that positioned the cursor prior to
+ ** OP_Delete will have also set the pC->movetoTarget field to the rowid of
+ ** the row that is being deleted */
+ i64 iKey = sqlite3BtreeIntegerKey(pC->uc.pCursor);
+ assert( CORRUPT_DB || pC->movetoTarget==iKey );
+ }
+#endif
+
+ /* If the update-hook or pre-update-hook will be invoked, set zDb to
+ ** the name of the db to pass as to it. Also set local pTab to a copy
+ ** of p4.pTab. Finally, if p5 is true, indicating that this cursor was
+ ** last moved with OP_Next or OP_Prev, not Seek or NotFound, set
+ ** VdbeCursor.movetoTarget to the current rowid. */
+ if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){
+ assert( pC->iDb>=0 );
+ assert( pOp->p4.pTab!=0 );
+ zDb = db->aDb[pC->iDb].zDbSName;
+ pTab = pOp->p4.pTab;
+ if( (pOp->p5 & OPFLAG_SAVEPOSITION)!=0 && pC->isTable ){
+ pC->movetoTarget = sqlite3BtreeIntegerKey(pC->uc.pCursor);
+ }
+ }else{
+ zDb = 0; /* Not needed. Silence a compiler warning. */
+ pTab = 0; /* Not needed. Silence a compiler warning. */
+ }
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ /* Invoke the pre-update-hook if required. */
+ if( db->xPreUpdateCallback && pOp->p4.pTab ){
+ assert( !(opflags & OPFLAG_ISUPDATE)
+ || HasRowid(pTab)==0
+ || (aMem[pOp->p3].flags & MEM_Int)
+ );
+ sqlite3VdbePreUpdateHook(p, pC,
+ (opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE,
+ zDb, pTab, pC->movetoTarget,
+ pOp->p3
+ );
+ }
+ if( opflags & OPFLAG_ISNOOP ) break;
+#endif
+
+ /* Only flags that can be set are SAVEPOISTION and AUXDELETE */
+ assert( (pOp->p5 & ~(OPFLAG_SAVEPOSITION|OPFLAG_AUXDELETE))==0 );
+ assert( OPFLAG_SAVEPOSITION==BTREE_SAVEPOSITION );
+ assert( OPFLAG_AUXDELETE==BTREE_AUXDELETE );
+
+#ifdef SQLITE_DEBUG
+ if( p->pFrame==0 ){
+ if( pC->isEphemeral==0
+ && (pOp->p5 & OPFLAG_AUXDELETE)==0
+ && (pC->wrFlag & OPFLAG_FORDELETE)==0
+ ){
+ nExtraDelete++;
+ }
+ if( pOp->p2 & OPFLAG_NCHANGE ){
+ nExtraDelete--;
+ }
+ }
+#endif
+
+ rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5);
+ pC->cacheStatus = CACHE_STALE;
+ pC->seekResult = 0;
+ if( rc ) goto abort_due_to_error;
+
+ /* Invoke the update-hook if required. */
+ if( opflags & OPFLAG_NCHANGE ){
+ p->nChange++;
+ if( db->xUpdateCallback && HasRowid(pTab) ){
+ db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, pTab->zName,
+ pC->movetoTarget);
+ assert( pC->iDb>=0 );
+ }
+ }
+
+ break;
+}
+/* Opcode: ResetCount * * * * *
+**
+** The value of the change counter is copied to the database handle
+** change counter (returned by subsequent calls to sqlite3_changes()).
+** Then the VMs internal change counter resets to 0.
+** This is used by trigger programs.
+*/
+case OP_ResetCount: {
+ sqlite3VdbeSetChanges(db, p->nChange);
+ p->nChange = 0;
+ break;
+}
+
+/* Opcode: SorterCompare P1 P2 P3 P4
+** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2
+**
+** P1 is a sorter cursor. This instruction compares a prefix of the
+** record blob in register P3 against a prefix of the entry that
+** the sorter cursor currently points to. Only the first P4 fields
+** of r[P3] and the sorter record are compared.
+**
+** If either P3 or the sorter contains a NULL in one of their significant
+** fields (not counting the P4 fields at the end which are ignored) then
+** the comparison is assumed to be equal.
+**
+** Fall through to next instruction if the two records compare equal to
+** each other. Jump to P2 if they are different.
+*/
+case OP_SorterCompare: {
+ VdbeCursor *pC;
+ int res;
+ int nKeyCol;
+
+ pC = p->apCsr[pOp->p1];
+ assert( isSorter(pC) );
+ assert( pOp->p4type==P4_INT32 );
+ pIn3 = &aMem[pOp->p3];
+ nKeyCol = pOp->p4.i;
+ res = 0;
+ rc = sqlite3VdbeSorterCompare(pC, pIn3, nKeyCol, &res);
+ VdbeBranchTaken(res!=0,2);
+ if( rc ) goto abort_due_to_error;
+ if( res ) goto jump_to_p2;
+ break;
+};
+
+/* Opcode: SorterData P1 P2 P3 * *
+** Synopsis: r[P2]=data
+**
+** Write into register P2 the current sorter data for sorter cursor P1.
+** Then clear the column header cache on cursor P3.
+**
+** This opcode is normally use to move a record out of the sorter and into
+** a register that is the source for a pseudo-table cursor created using
+** OpenPseudo. That pseudo-table cursor is the one that is identified by
+** parameter P3. Clearing the P3 column cache as part of this opcode saves
+** us from having to issue a separate NullRow instruction to clear that cache.
+*/
+case OP_SorterData: {
+ VdbeCursor *pC;
+
+ pOut = &aMem[pOp->p2];
+ pC = p->apCsr[pOp->p1];
+ assert( isSorter(pC) );
+ rc = sqlite3VdbeSorterRowkey(pC, pOut);
+ assert( rc!=SQLITE_OK || (pOut->flags & MEM_Blob) );
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ if( rc ) goto abort_due_to_error;
+ p->apCsr[pOp->p3]->cacheStatus = CACHE_STALE;
+ break;
+}
+
+/* Opcode: RowData P1 P2 P3 * *
+** Synopsis: r[P2]=data
+**
+** Write into register P2 the complete row content for the row at
+** which cursor P1 is currently pointing.
+** There is no interpretation of the data.
+** It is just copied onto the P2 register exactly as
+** it is found in the database file.
+**
+** If cursor P1 is an index, then the content is the key of the row.
+** If cursor P2 is a table, then the content extracted is the data.
+**
+** If the P1 cursor must be pointing to a valid row (not a NULL row)
+** of a real table, not a pseudo-table.
+**
+** If P3!=0 then this opcode is allowed to make an ephemeral pointer
+** into the database page. That means that the content of the output
+** register will be invalidated as soon as the cursor moves - including
+** moves caused by other cursors that "save" the current cursors
+** position in order that they can write to the same table. If P3==0
+** then a copy of the data is made into memory. P3!=0 is faster, but
+** P3==0 is safer.
+**
+** If P3!=0 then the content of the P2 register is unsuitable for use
+** in OP_Result and any OP_Result will invalidate the P2 register content.
+** The P2 register content is invalidated by opcodes like OP_Function or
+** by any use of another cursor pointing to the same table.
+*/
+case OP_RowData: {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ u32 n;
+
+ pOut = out2Prerelease(p, pOp);
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( isSorter(pC)==0 );
+ assert( pC->nullRow==0 );
+ assert( pC->uc.pCursor!=0 );
+ pCrsr = pC->uc.pCursor;
+
+ /* The OP_RowData opcodes always follow OP_NotExists or
+ ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions
+ ** that might invalidate the cursor.
+ ** If this where not the case, on of the following assert()s
+ ** would fail. Should this ever change (because of changes in the code
+ ** generator) then the fix would be to insert a call to
+ ** sqlite3VdbeCursorMoveto().
+ */
+ assert( pC->deferredMoveto==0 );
+ assert( sqlite3BtreeCursorIsValid(pCrsr) );
+
+ n = sqlite3BtreePayloadSize(pCrsr);
+ if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ goto too_big;
+ }
+ testcase( n==0 );
+ rc = sqlite3VdbeMemFromBtreeZeroOffset(pCrsr, n, pOut);
+ if( rc ) goto abort_due_to_error;
+ if( !pOp->p3 ) Deephemeralize(pOut);
+ UPDATE_MAX_BLOBSIZE(pOut);
+ REGISTER_TRACE(pOp->p2, pOut);
+ break;
+}
+
+/* Opcode: Rowid P1 P2 * * *
+** Synopsis: r[P2]=rowid
+**
+** Store in register P2 an integer which is the key of the table entry that
+** P1 is currently point to.
+**
+** P1 can be either an ordinary table or a virtual table. There used to
+** be a separate OP_VRowid opcode for use with virtual tables, but this
+** one opcode now works for both table types.
+*/
+case OP_Rowid: { /* out2 */
+ VdbeCursor *pC;
+ i64 v;
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+
+ pOut = out2Prerelease(p, pOp);
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow );
+ if( pC->nullRow ){
+ pOut->flags = MEM_Null;
+ break;
+ }else if( pC->deferredMoveto ){
+ v = pC->movetoTarget;
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ }else if( pC->eCurType==CURTYPE_VTAB ){
+ assert( pC->uc.pVCur!=0 );
+ pVtab = pC->uc.pVCur->pVtab;
+ pModule = pVtab->pModule;
+ assert( pModule->xRowid );
+ rc = pModule->xRowid(pC->uc.pVCur, &v);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( rc ) goto abort_due_to_error;
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+ }else{
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0 );
+ rc = sqlite3VdbeCursorRestore(pC);
+ if( rc ) goto abort_due_to_error;
+ if( pC->nullRow ){
+ pOut->flags = MEM_Null;
+ break;
+ }
+ v = sqlite3BtreeIntegerKey(pC->uc.pCursor);
+ }
+ pOut->u.i = v;
+ break;
+}
+
+/* Opcode: NullRow P1 * * * *
+**
+** Move the cursor P1 to a null row. Any OP_Column operations
+** that occur while the cursor is on the null row will always
+** write a NULL.
+*/
+case OP_NullRow: {
+ VdbeCursor *pC;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ pC->nullRow = 1;
+ pC->cacheStatus = CACHE_STALE;
+ if( pC->eCurType==CURTYPE_BTREE ){
+ assert( pC->uc.pCursor!=0 );
+ sqlite3BtreeClearCursor(pC->uc.pCursor);
+ }
+#ifdef SQLITE_DEBUG
+ if( pC->seekOp==0 ) pC->seekOp = OP_NullRow;
+#endif
+ break;
+}
+
+/* Opcode: SeekEnd P1 * * * *
+**
+** Position cursor P1 at the end of the btree for the purpose of
+** appending a new entry onto the btree.
+**
+** It is assumed that the cursor is used only for appending and so
+** if the cursor is valid, then the cursor must already be pointing
+** at the end of the btree and so no changes are made to
+** the cursor.
+*/
+/* Opcode: Last P1 P2 * * *
+**
+** The next use of the Rowid or Column or Prev instruction for P1
+** will refer to the last entry in the database table or index.
+** If the table or index is empty and P2>0, then jump immediately to P2.
+** If P2 is 0 or if the table or index is not empty, fall through
+** to the following instruction.
+**
+** This opcode leaves the cursor configured to move in reverse order,
+** from the end toward the beginning. In other words, the cursor is
+** configured to use Prev, not Next.
+*/
+case OP_SeekEnd:
+case OP_Last: { /* jump */
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pCrsr = pC->uc.pCursor;
+ res = 0;
+ assert( pCrsr!=0 );
+#ifdef SQLITE_DEBUG
+ pC->seekOp = pOp->opcode;
+#endif
+ if( pOp->opcode==OP_SeekEnd ){
+ assert( pOp->p2==0 );
+ pC->seekResult = -1;
+ if( sqlite3BtreeCursorIsValidNN(pCrsr) ){
+ break;
+ }
+ }
+ rc = sqlite3BtreeLast(pCrsr, &res);
+ pC->nullRow = (u8)res;
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ if( rc ) goto abort_due_to_error;
+ if( pOp->p2>0 ){
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: IfSmaller P1 P2 P3 * *
+**
+** Estimate the number of rows in the table P1. Jump to P2 if that
+** estimate is less than approximately 2**(0.1*P3).
+*/
+case OP_IfSmaller: { /* jump */
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ i64 sz;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ pCrsr = pC->uc.pCursor;
+ assert( pCrsr );
+ rc = sqlite3BtreeFirst(pCrsr, &res);
+ if( rc ) goto abort_due_to_error;
+ if( res==0 ){
+ sz = sqlite3BtreeRowCountEst(pCrsr);
+ if( ALWAYS(sz>=0) && sqlite3LogEst((u64)sz)<pOp->p3 ) res = 1;
+ }
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ break;
+}
+
+
+/* Opcode: SorterSort P1 P2 * * *
+**
+** After all records have been inserted into the Sorter object
+** identified by P1, invoke this opcode to actually do the sorting.
+** Jump to P2 if there are no records to be sorted.
+**
+** This opcode is an alias for OP_Sort and OP_Rewind that is used
+** for Sorter objects.
+*/
+/* Opcode: Sort P1 P2 * * *
+**
+** This opcode does exactly the same thing as OP_Rewind except that
+** it increments an undocumented global variable used for testing.
+**
+** Sorting is accomplished by writing records into a sorting index,
+** then rewinding that index and playing it back from beginning to
+** end. We use the OP_Sort opcode instead of OP_Rewind to do the
+** rewinding so that the global variable will be incremented and
+** regression tests can determine whether or not the optimizer is
+** correctly optimizing out sorts.
+*/
+case OP_SorterSort: /* jump */
+case OP_Sort: { /* jump */
+#ifdef SQLITE_TEST
+ sqlite3_sort_count++;
+ sqlite3_search_count--;
+#endif
+ p->aCounter[SQLITE_STMTSTATUS_SORT]++;
+ /* Fall through into OP_Rewind */
+ /* no break */ deliberate_fall_through
+}
+/* Opcode: Rewind P1 P2 * * *
+**
+** The next use of the Rowid or Column or Next instruction for P1
+** will refer to the first entry in the database table or index.
+** If the table or index is empty, jump immediately to P2.
+** If the table or index is not empty, fall through to the following
+** instruction.
+**
+** This opcode leaves the cursor configured to move in forward order,
+** from the beginning toward the end. In other words, the cursor is
+** configured to use Next, not Prev.
+*/
+case OP_Rewind: { /* jump */
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( pOp->p5==0 );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) );
+ res = 1;
+#ifdef SQLITE_DEBUG
+ pC->seekOp = OP_Rewind;
+#endif
+ if( isSorter(pC) ){
+ rc = sqlite3VdbeSorterRewind(pC, &res);
+ }else{
+ assert( pC->eCurType==CURTYPE_BTREE );
+ pCrsr = pC->uc.pCursor;
+ assert( pCrsr );
+ rc = sqlite3BtreeFirst(pCrsr, &res);
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ }
+ if( rc ) goto abort_due_to_error;
+ pC->nullRow = (u8)res;
+ assert( pOp->p2>0 && pOp->p2<p->nOp );
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ break;
+}
+
+/* Opcode: Next P1 P2 P3 P4 P5
+**
+** Advance cursor P1 so that it points to the next key/data pair in its
+** table or index. If there are no more key/value pairs then fall through
+** to the following instruction. But if the cursor advance was successful,
+** jump immediately to P2.
+**
+** The Next opcode is only valid following an SeekGT, SeekGE, or
+** OP_Rewind opcode used to position the cursor. Next is not allowed
+** to follow SeekLT, SeekLE, or OP_Last.
+**
+** The P1 cursor must be for a real table, not a pseudo-table. P1 must have
+** been opened prior to this opcode or the program will segfault.
+**
+** The P3 value is a hint to the btree implementation. If P3==1, that
+** means P1 is an SQL index and that this instruction could have been
+** omitted if that index had been unique. P3 is usually 0. P3 is
+** always either 0 or 1.
+**
+** P4 is always of type P4_ADVANCE. The function pointer points to
+** sqlite3BtreeNext().
+**
+** If P5 is positive and the jump is taken, then event counter
+** number P5-1 in the prepared statement is incremented.
+**
+** See also: Prev
+*/
+/* Opcode: Prev P1 P2 P3 P4 P5
+**
+** Back up cursor P1 so that it points to the previous key/data pair in its
+** table or index. If there is no previous key/value pairs then fall through
+** to the following instruction. But if the cursor backup was successful,
+** jump immediately to P2.
+**
+**
+** The Prev opcode is only valid following an SeekLT, SeekLE, or
+** OP_Last opcode used to position the cursor. Prev is not allowed
+** to follow SeekGT, SeekGE, or OP_Rewind.
+**
+** The P1 cursor must be for a real table, not a pseudo-table. If P1 is
+** not open then the behavior is undefined.
+**
+** The P3 value is a hint to the btree implementation. If P3==1, that
+** means P1 is an SQL index and that this instruction could have been
+** omitted if that index had been unique. P3 is usually 0. P3 is
+** always either 0 or 1.
+**
+** P4 is always of type P4_ADVANCE. The function pointer points to
+** sqlite3BtreePrevious().
+**
+** If P5 is positive and the jump is taken, then event counter
+** number P5-1 in the prepared statement is incremented.
+*/
+/* Opcode: SorterNext P1 P2 * * P5
+**
+** This opcode works just like OP_Next except that P1 must be a
+** sorter object for which the OP_SorterSort opcode has been
+** invoked. This opcode advances the cursor to the next sorted
+** record, or jumps to P2 if there are no more sorted records.
+*/
+case OP_SorterNext: { /* jump */
+ VdbeCursor *pC;
+
+ pC = p->apCsr[pOp->p1];
+ assert( isSorter(pC) );
+ rc = sqlite3VdbeSorterNext(db, pC);
+ goto next_tail;
+case OP_Prev: /* jump */
+case OP_Next: /* jump */
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( pOp->p5<ArraySize(p->aCounter) );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->deferredMoveto==0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext );
+ assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious );
+
+ /* The Next opcode is only used after SeekGT, SeekGE, Rewind, and Found.
+ ** The Prev opcode is only used after SeekLT, SeekLE, and Last. */
+ assert( pOp->opcode!=OP_Next
+ || pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE
+ || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found
+ || pC->seekOp==OP_NullRow|| pC->seekOp==OP_SeekRowid
+ || pC->seekOp==OP_IfNoHope);
+ assert( pOp->opcode!=OP_Prev
+ || pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE
+ || pC->seekOp==OP_Last || pC->seekOp==OP_IfNoHope
+ || pC->seekOp==OP_NullRow);
+
+ rc = pOp->p4.xAdvance(pC->uc.pCursor, pOp->p3);
+next_tail:
+ pC->cacheStatus = CACHE_STALE;
+ VdbeBranchTaken(rc==SQLITE_OK,2);
+ if( rc==SQLITE_OK ){
+ pC->nullRow = 0;
+ p->aCounter[pOp->p5]++;
+#ifdef SQLITE_TEST
+ sqlite3_search_count++;
+#endif
+ goto jump_to_p2_and_check_for_interrupt;
+ }
+ if( rc!=SQLITE_DONE ) goto abort_due_to_error;
+ rc = SQLITE_OK;
+ pC->nullRow = 1;
+ goto check_for_interrupt;
+}
+
+/* Opcode: IdxInsert P1 P2 P3 P4 P5
+** Synopsis: key=r[P2]
+**
+** Register P2 holds an SQL index key made using the
+** MakeRecord instructions. This opcode writes that key
+** into the index P1. Data for the entry is nil.
+**
+** If P4 is not zero, then it is the number of values in the unpacked
+** key of reg(P2). In that case, P3 is the index of the first register
+** for the unpacked key. The availability of the unpacked key can sometimes
+** be an optimization.
+**
+** If P5 has the OPFLAG_APPEND bit set, that is a hint to the b-tree layer
+** that this insert is likely to be an append.
+**
+** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is
+** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear,
+** then the change counter is unchanged.
+**
+** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
+** run faster by avoiding an unnecessary seek on cursor P1. However,
+** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
+** seeks on the cursor or if the most recent seek used a key equivalent
+** to P2.
+**
+** This instruction only works for indices. The equivalent instruction
+** for tables is OP_Insert.
+*/
+case OP_IdxInsert: { /* in2 */
+ VdbeCursor *pC;
+ BtreePayload x;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ sqlite3VdbeIncrWriteCounter(p, pC);
+ assert( pC!=0 );
+ assert( !isSorter(pC) );
+ pIn2 = &aMem[pOp->p2];
+ assert( pIn2->flags & MEM_Blob );
+ if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->isTable==0 );
+ rc = ExpandBlob(pIn2);
+ if( rc ) goto abort_due_to_error;
+ x.nKey = pIn2->n;
+ x.pKey = pIn2->z;
+ x.aMem = aMem + pOp->p3;
+ x.nMem = (u16)pOp->p4.i;
+ rc = sqlite3BtreeInsert(pC->uc.pCursor, &x,
+ (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)),
+ ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0)
+ );
+ assert( pC->deferredMoveto==0 );
+ pC->cacheStatus = CACHE_STALE;
+ if( rc) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: SorterInsert P1 P2 * * *
+** Synopsis: key=r[P2]
+**
+** Register P2 holds an SQL index key made using the
+** MakeRecord instructions. This opcode writes that key
+** into the sorter P1. Data for the entry is nil.
+*/
+case OP_SorterInsert: { /* in2 */
+ VdbeCursor *pC;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ sqlite3VdbeIncrWriteCounter(p, pC);
+ assert( pC!=0 );
+ assert( isSorter(pC) );
+ pIn2 = &aMem[pOp->p2];
+ assert( pIn2->flags & MEM_Blob );
+ assert( pC->isTable==0 );
+ rc = ExpandBlob(pIn2);
+ if( rc ) goto abort_due_to_error;
+ rc = sqlite3VdbeSorterWrite(pC, pIn2);
+ if( rc) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: IdxDelete P1 P2 P3 * P5
+** Synopsis: key=r[P2@P3]
+**
+** The content of P3 registers starting at register P2 form
+** an unpacked index key. This opcode removes that entry from the
+** index opened by cursor P1.
+**
+** If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error
+** if no matching index entry is found. This happens when running
+** an UPDATE or DELETE statement and the index entry to be updated
+** or deleted is not found. For some uses of IdxDelete
+** (example: the EXCEPT operator) it does not matter that no matching
+** entry is found. For those cases, P5 is zero.
+*/
+case OP_IdxDelete: {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ UnpackedRecord r;
+
+ assert( pOp->p3>0 );
+ assert( pOp->p2>0 && pOp->p2+pOp->p3<=(p->nMem+1 - p->nCursor)+1 );
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ sqlite3VdbeIncrWriteCounter(p, pC);
+ pCrsr = pC->uc.pCursor;
+ assert( pCrsr!=0 );
+ r.pKeyInfo = pC->pKeyInfo;
+ r.nField = (u16)pOp->p3;
+ r.default_rc = 0;
+ r.aMem = &aMem[pOp->p2];
+ rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &res);
+ if( rc ) goto abort_due_to_error;
+ if( res==0 ){
+ rc = sqlite3BtreeDelete(pCrsr, BTREE_AUXDELETE);
+ if( rc ) goto abort_due_to_error;
+ }else if( pOp->p5 ){
+ rc = SQLITE_CORRUPT_INDEX;
+ goto abort_due_to_error;
+ }
+ assert( pC->deferredMoveto==0 );
+ pC->cacheStatus = CACHE_STALE;
+ pC->seekResult = 0;
+ break;
+}
+
+/* Opcode: DeferredSeek P1 * P3 P4 *
+** Synopsis: Move P3 to P1.rowid if needed
+**
+** P1 is an open index cursor and P3 is a cursor on the corresponding
+** table. This opcode does a deferred seek of the P3 table cursor
+** to the row that corresponds to the current row of P1.
+**
+** This is a deferred seek. Nothing actually happens until
+** the cursor is used to read a record. That way, if no reads
+** occur, no unnecessary I/O happens.
+**
+** P4 may be an array of integers (type P4_INTARRAY) containing
+** one entry for each column in the P3 table. If array entry a(i)
+** is non-zero, then reading column a(i)-1 from cursor P3 is
+** equivalent to performing the deferred seek and then reading column i
+** from P1. This information is stored in P3 and used to redirect
+** reads against P3 over to P1, thus possibly avoiding the need to
+** seek and read cursor P3.
+*/
+/* Opcode: IdxRowid P1 P2 * * *
+** Synopsis: r[P2]=rowid
+**
+** Write into register P2 an integer which is the last entry in the record at
+** the end of the index key pointed to by cursor P1. This integer should be
+** the rowid of the table entry to which this index entry points.
+**
+** See also: Rowid, MakeRecord.
+*/
+case OP_DeferredSeek:
+case OP_IdxRowid: { /* out2 */
+ VdbeCursor *pC; /* The P1 index cursor */
+ VdbeCursor *pTabCur; /* The P2 table cursor (OP_DeferredSeek only) */
+ i64 rowid; /* Rowid that P1 current points to */
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0 );
+ assert( pC->isTable==0 );
+ assert( pC->deferredMoveto==0 );
+ assert( !pC->nullRow || pOp->opcode==OP_IdxRowid );
+
+ /* The IdxRowid and Seek opcodes are combined because of the commonality
+ ** of sqlite3VdbeCursorRestore() and sqlite3VdbeIdxRowid(). */
+ rc = sqlite3VdbeCursorRestore(pC);
+
+ /* sqlite3VbeCursorRestore() can only fail if the record has been deleted
+ ** out from under the cursor. That will never happens for an IdxRowid
+ ** or Seek opcode */
+ if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error;
+
+ if( !pC->nullRow ){
+ rowid = 0; /* Not needed. Only used to silence a warning. */
+ rc = sqlite3VdbeIdxRowid(db, pC->uc.pCursor, &rowid);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ if( pOp->opcode==OP_DeferredSeek ){
+ assert( pOp->p3>=0 && pOp->p3<p->nCursor );
+ pTabCur = p->apCsr[pOp->p3];
+ assert( pTabCur!=0 );
+ assert( pTabCur->eCurType==CURTYPE_BTREE );
+ assert( pTabCur->uc.pCursor!=0 );
+ assert( pTabCur->isTable );
+ pTabCur->nullRow = 0;
+ pTabCur->movetoTarget = rowid;
+ pTabCur->deferredMoveto = 1;
+ assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 );
+ pTabCur->aAltMap = pOp->p4.ai;
+ pTabCur->pAltCursor = pC;
+ }else{
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = rowid;
+ }
+ }else{
+ assert( pOp->opcode==OP_IdxRowid );
+ sqlite3VdbeMemSetNull(&aMem[pOp->p2]);
+ }
+ break;
+}
+
+/* Opcode: FinishSeek P1 * * * *
+**
+** If cursor P1 was previously moved via OP_DeferredSeek, complete that
+** seek operation now, without further delay. If the cursor seek has
+** already occurred, this instruction is a no-op.
+*/
+case OP_FinishSeek: {
+ VdbeCursor *pC; /* The P1 index cursor */
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ if( pC->deferredMoveto ){
+ rc = sqlite3VdbeFinishMoveto(pC);
+ if( rc ) goto abort_due_to_error;
+ }
+ break;
+}
+
+/* Opcode: IdxGE P1 P2 P3 P4 P5
+** Synopsis: key=r[P3@P4]
+**
+** The P4 register values beginning with P3 form an unpacked index
+** key that omits the PRIMARY KEY. Compare this key value against the index
+** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID
+** fields at the end.
+**
+** If the P1 index entry is greater than or equal to the key value
+** then jump to P2. Otherwise fall through to the next instruction.
+*/
+/* Opcode: IdxGT P1 P2 P3 P4 P5
+** Synopsis: key=r[P3@P4]
+**
+** The P4 register values beginning with P3 form an unpacked index
+** key that omits the PRIMARY KEY. Compare this key value against the index
+** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID
+** fields at the end.
+**
+** If the P1 index entry is greater than the key value
+** then jump to P2. Otherwise fall through to the next instruction.
+*/
+/* Opcode: IdxLT P1 P2 P3 P4 P5
+** Synopsis: key=r[P3@P4]
+**
+** The P4 register values beginning with P3 form an unpacked index
+** key that omits the PRIMARY KEY or ROWID. Compare this key value against
+** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or
+** ROWID on the P1 index.
+**
+** If the P1 index entry is less than the key value then jump to P2.
+** Otherwise fall through to the next instruction.
+*/
+/* Opcode: IdxLE P1 P2 P3 P4 P5
+** Synopsis: key=r[P3@P4]
+**
+** The P4 register values beginning with P3 form an unpacked index
+** key that omits the PRIMARY KEY or ROWID. Compare this key value against
+** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or
+** ROWID on the P1 index.
+**
+** If the P1 index entry is less than or equal to the key value then jump
+** to P2. Otherwise fall through to the next instruction.
+*/
+case OP_IdxLE: /* jump */
+case OP_IdxGT: /* jump */
+case OP_IdxLT: /* jump */
+case OP_IdxGE: { /* jump */
+ VdbeCursor *pC;
+ int res;
+ UnpackedRecord r;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->isOrdered );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->uc.pCursor!=0);
+ assert( pC->deferredMoveto==0 );
+ assert( pOp->p5==0 || pOp->p5==1 );
+ assert( pOp->p4type==P4_INT32 );
+ r.pKeyInfo = pC->pKeyInfo;
+ r.nField = (u16)pOp->p4.i;
+ if( pOp->opcode<OP_IdxLT ){
+ assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxGT );
+ r.default_rc = -1;
+ }else{
+ assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxLT );
+ r.default_rc = 0;
+ }
+ r.aMem = &aMem[pOp->p3];
+#ifdef SQLITE_DEBUG
+ {
+ int i;
+ for(i=0; i<r.nField; i++){
+ assert( memIsValid(&r.aMem[i]) );
+ REGISTER_TRACE(pOp->p3+i, &aMem[pOp->p3+i]);
+ }
+ }
+#endif
+ res = 0; /* Not needed. Only used to silence a warning. */
+ rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res);
+ assert( (OP_IdxLE&1)==(OP_IdxLT&1) && (OP_IdxGE&1)==(OP_IdxGT&1) );
+ if( (pOp->opcode&1)==(OP_IdxLT&1) ){
+ assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT );
+ res = -res;
+ }else{
+ assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxGT );
+ res++;
+ }
+ VdbeBranchTaken(res>0,2);
+ if( rc ) goto abort_due_to_error;
+ if( res>0 ) goto jump_to_p2;
+ break;
+}
+
+/* Opcode: Destroy P1 P2 P3 * *
+**
+** Delete an entire database table or index whose root page in the database
+** file is given by P1.
+**
+** The table being destroyed is in the main database file if P3==0. If
+** P3==1 then the table to be clear is in the auxiliary database file
+** that is used to store tables create using CREATE TEMPORARY TABLE.
+**
+** If AUTOVACUUM is enabled then it is possible that another root page
+** might be moved into the newly deleted root page in order to keep all
+** root pages contiguous at the beginning of the database. The former
+** value of the root page that moved - its value before the move occurred -
+** is stored in register P2. If no page movement was required (because the
+** table being dropped was already the last one in the database) then a
+** zero is stored in register P2. If AUTOVACUUM is disabled then a zero
+** is stored in register P2.
+**
+** This opcode throws an error if there are any active reader VMs when
+** it is invoked. This is done to avoid the difficulty associated with
+** updating existing cursors when a root page is moved in an AUTOVACUUM
+** database. This error is thrown even if the database is not an AUTOVACUUM
+** db in order to avoid introducing an incompatibility between autovacuum
+** and non-autovacuum modes.
+**
+** See also: Clear
+*/
+case OP_Destroy: { /* out2 */
+ int iMoved;
+ int iDb;
+
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ assert( p->readOnly==0 );
+ assert( pOp->p1>1 );
+ pOut = out2Prerelease(p, pOp);
+ pOut->flags = MEM_Null;
+ if( db->nVdbeRead > db->nVDestroy+1 ){
+ rc = SQLITE_LOCKED;
+ p->errorAction = OE_Abort;
+ goto abort_due_to_error;
+ }else{
+ iDb = pOp->p3;
+ assert( DbMaskTest(p->btreeMask, iDb) );
+ iMoved = 0; /* Not needed. Only to silence a warning. */
+ rc = sqlite3BtreeDropTable(db->aDb[iDb].pBt, pOp->p1, &iMoved);
+ pOut->flags = MEM_Int;
+ pOut->u.i = iMoved;
+ if( rc ) goto abort_due_to_error;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( iMoved!=0 ){
+ sqlite3RootPageMoved(db, iDb, iMoved, pOp->p1);
+ /* All OP_Destroy operations occur on the same btree */
+ assert( resetSchemaOnFault==0 || resetSchemaOnFault==iDb+1 );
+ resetSchemaOnFault = iDb+1;
+ }
+#endif
+ }
+ break;
+}
+
+/* Opcode: Clear P1 P2 P3
+**
+** Delete all contents of the database table or index whose root page
+** in the database file is given by P1. But, unlike Destroy, do not
+** remove the table or index from the database file.
+**
+** The table being clear is in the main database file if P2==0. If
+** P2==1 then the table to be clear is in the auxiliary database file
+** that is used to store tables create using CREATE TEMPORARY TABLE.
+**
+** If the P3 value is non-zero, then the table referred to must be an
+** intkey table (an SQL table, not an index). In this case the row change
+** count is incremented by the number of rows in the table being cleared.
+** If P3 is greater than zero, then the value stored in register P3 is
+** also incremented by the number of rows in the table being cleared.
+**
+** See also: Destroy
+*/
+case OP_Clear: {
+ int nChange;
+
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ nChange = 0;
+ assert( p->readOnly==0 );
+ assert( DbMaskTest(p->btreeMask, pOp->p2) );
+ rc = sqlite3BtreeClearTable(
+ db->aDb[pOp->p2].pBt, (u32)pOp->p1, (pOp->p3 ? &nChange : 0)
+ );
+ if( pOp->p3 ){
+ p->nChange += nChange;
+ if( pOp->p3>0 ){
+ assert( memIsValid(&aMem[pOp->p3]) );
+ memAboutToChange(p, &aMem[pOp->p3]);
+ aMem[pOp->p3].u.i += nChange;
+ }
+ }
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: ResetSorter P1 * * * *
+**
+** Delete all contents from the ephemeral table or sorter
+** that is open on cursor P1.
+**
+** This opcode only works for cursors used for sorting and
+** opened with OP_OpenEphemeral or OP_SorterOpen.
+*/
+case OP_ResetSorter: {
+ VdbeCursor *pC;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ if( isSorter(pC) ){
+ sqlite3VdbeSorterReset(db, pC->uc.pSorter);
+ }else{
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->isEphemeral );
+ rc = sqlite3BtreeClearTableOfCursor(pC->uc.pCursor);
+ if( rc ) goto abort_due_to_error;
+ }
+ break;
+}
+
+/* Opcode: CreateBtree P1 P2 P3 * *
+** Synopsis: r[P2]=root iDb=P1 flags=P3
+**
+** Allocate a new b-tree in the main database file if P1==0 or in the
+** TEMP database file if P1==1 or in an attached database if
+** P1>1. The P3 argument must be 1 (BTREE_INTKEY) for a rowid table
+** it must be 2 (BTREE_BLOBKEY) for an index or WITHOUT ROWID table.
+** The root page number of the new b-tree is stored in register P2.
+*/
+case OP_CreateBtree: { /* out2 */
+ Pgno pgno;
+ Db *pDb;
+
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ pOut = out2Prerelease(p, pOp);
+ pgno = 0;
+ assert( pOp->p3==BTREE_INTKEY || pOp->p3==BTREE_BLOBKEY );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( p->readOnly==0 );
+ pDb = &db->aDb[pOp->p1];
+ assert( pDb->pBt!=0 );
+ rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, pOp->p3);
+ if( rc ) goto abort_due_to_error;
+ pOut->u.i = pgno;
+ break;
+}
+
+/* Opcode: SqlExec * * * P4 *
+**
+** Run the SQL statement or statements specified in the P4 string.
+*/
+case OP_SqlExec: {
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ db->nSqlExec++;
+ rc = sqlite3_exec(db, pOp->p4.z, 0, 0, 0);
+ db->nSqlExec--;
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+
+/* Opcode: ParseSchema P1 * * P4 *
+**
+** Read and parse all entries from the schema table of database P1
+** that match the WHERE clause P4. If P4 is a NULL pointer, then the
+** entire schema for P1 is reparsed.
+**
+** This opcode invokes the parser to create a new virtual machine,
+** then runs the new virtual machine. It is thus a re-entrant opcode.
+*/
+case OP_ParseSchema: {
+ int iDb;
+ const char *zSchema;
+ char *zSql;
+ InitData initData;
+
+ /* Any prepared statement that invokes this opcode will hold mutexes
+ ** on every btree. This is a prerequisite for invoking
+ ** sqlite3InitCallback().
+ */
+#ifdef SQLITE_DEBUG
+ for(iDb=0; iDb<db->nDb; iDb++){
+ assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) );
+ }
+#endif
+
+ iDb = pOp->p1;
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( DbHasProperty(db, iDb, DB_SchemaLoaded) );
+
+#ifndef SQLITE_OMIT_ALTERTABLE
+ if( pOp->p4.z==0 ){
+ sqlite3SchemaClear(db->aDb[iDb].pSchema);
+ db->mDbFlags &= ~DBFLAG_SchemaKnownOk;
+ rc = sqlite3InitOne(db, iDb, &p->zErrMsg, INITFLAG_AlterTable);
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ p->expired = 0;
+ }else
+#endif
+ {
+ zSchema = DFLT_SCHEMA_TABLE;
+ initData.db = db;
+ initData.iDb = iDb;
+ initData.pzErrMsg = &p->zErrMsg;
+ initData.mInitFlags = 0;
+ initData.mxPage = sqlite3BtreeLastPage(db->aDb[iDb].pBt);
+ zSql = sqlite3MPrintf(db,
+ "SELECT*FROM\"%w\".%s WHERE %s ORDER BY rowid",
+ db->aDb[iDb].zDbSName, zSchema, pOp->p4.z);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ assert( db->init.busy==0 );
+ db->init.busy = 1;
+ initData.rc = SQLITE_OK;
+ initData.nInitRow = 0;
+ assert( !db->mallocFailed );
+ rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0);
+ if( rc==SQLITE_OK ) rc = initData.rc;
+ if( rc==SQLITE_OK && initData.nInitRow==0 ){
+ /* The OP_ParseSchema opcode with a non-NULL P4 argument should parse
+ ** at least one SQL statement. Any less than that indicates that
+ ** the sqlite_schema table is corrupt. */
+ rc = SQLITE_CORRUPT_BKPT;
+ }
+ sqlite3DbFreeNN(db, zSql);
+ db->init.busy = 0;
+ }
+ }
+ if( rc ){
+ sqlite3ResetAllSchemasOfConnection(db);
+ if( rc==SQLITE_NOMEM ){
+ goto no_mem;
+ }
+ goto abort_due_to_error;
+ }
+ break;
+}
+
+#if !defined(SQLITE_OMIT_ANALYZE)
+/* Opcode: LoadAnalysis P1 * * * *
+**
+** Read the sqlite_stat1 table for database P1 and load the content
+** of that table into the internal index hash table. This will cause
+** the analysis to be used when preparing all subsequent queries.
+*/
+case OP_LoadAnalysis: {
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ rc = sqlite3AnalysisLoad(db, pOp->p1);
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif /* !defined(SQLITE_OMIT_ANALYZE) */
+
+/* Opcode: DropTable P1 * * P4 *
+**
+** Remove the internal (in-memory) data structures that describe
+** the table named P4 in database P1. This is called after a table
+** is dropped from disk (using the Destroy opcode) in order to keep
+** the internal representation of the
+** schema consistent with what is on disk.
+*/
+case OP_DropTable: {
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p4.z);
+ break;
+}
+
+/* Opcode: DropIndex P1 * * P4 *
+**
+** Remove the internal (in-memory) data structures that describe
+** the index named P4 in database P1. This is called after an index
+** is dropped from disk (using the Destroy opcode)
+** in order to keep the internal representation of the
+** schema consistent with what is on disk.
+*/
+case OP_DropIndex: {
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p4.z);
+ break;
+}
+
+/* Opcode: DropTrigger P1 * * P4 *
+**
+** Remove the internal (in-memory) data structures that describe
+** the trigger named P4 in database P1. This is called after a trigger
+** is dropped from disk (using the Destroy opcode) in order to keep
+** the internal representation of the
+** schema consistent with what is on disk.
+*/
+case OP_DropTrigger: {
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z);
+ break;
+}
+
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+/* Opcode: IntegrityCk P1 P2 P3 P4 P5
+**
+** Do an analysis of the currently open database. Store in
+** register P1 the text of an error message describing any problems.
+** If no problems are found, store a NULL in register P1.
+**
+** The register P3 contains one less than the maximum number of allowed errors.
+** At most reg(P3) errors will be reported.
+** In other words, the analysis stops as soon as reg(P1) errors are
+** seen. Reg(P1) is updated with the number of errors remaining.
+**
+** The root page numbers of all tables in the database are integers
+** stored in P4_INTARRAY argument.
+**
+** If P5 is not zero, the check is done on the auxiliary database
+** file, not the main database file.
+**
+** This opcode is used to implement the integrity_check pragma.
+*/
+case OP_IntegrityCk: {
+ int nRoot; /* Number of tables to check. (Number of root pages.) */
+ Pgno *aRoot; /* Array of rootpage numbers for tables to be checked */
+ int nErr; /* Number of errors reported */
+ char *z; /* Text of the error report */
+ Mem *pnErr; /* Register keeping track of errors remaining */
+
+ assert( p->bIsReader );
+ nRoot = pOp->p2;
+ aRoot = pOp->p4.ai;
+ assert( nRoot>0 );
+ assert( aRoot[0]==(Pgno)nRoot );
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pnErr = &aMem[pOp->p3];
+ assert( (pnErr->flags & MEM_Int)!=0 );
+ assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 );
+ pIn1 = &aMem[pOp->p1];
+ assert( pOp->p5<db->nDb );
+ assert( DbMaskTest(p->btreeMask, pOp->p5) );
+ z = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot,
+ (int)pnErr->u.i+1, &nErr);
+ sqlite3VdbeMemSetNull(pIn1);
+ if( nErr==0 ){
+ assert( z==0 );
+ }else if( z==0 ){
+ goto no_mem;
+ }else{
+ pnErr->u.i -= nErr-1;
+ sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free);
+ }
+ UPDATE_MAX_BLOBSIZE(pIn1);
+ sqlite3VdbeChangeEncoding(pIn1, encoding);
+ goto check_for_interrupt;
+}
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+/* Opcode: RowSetAdd P1 P2 * * *
+** Synopsis: rowset(P1)=r[P2]
+**
+** Insert the integer value held by register P2 into a RowSet object
+** held in register P1.
+**
+** An assertion fails if P2 is not an integer.
+*/
+case OP_RowSetAdd: { /* in1, in2 */
+ pIn1 = &aMem[pOp->p1];
+ pIn2 = &aMem[pOp->p2];
+ assert( (pIn2->flags & MEM_Int)!=0 );
+ if( (pIn1->flags & MEM_Blob)==0 ){
+ if( sqlite3VdbeMemSetRowSet(pIn1) ) goto no_mem;
+ }
+ assert( sqlite3VdbeMemIsRowSet(pIn1) );
+ sqlite3RowSetInsert((RowSet*)pIn1->z, pIn2->u.i);
+ break;
+}
+
+/* Opcode: RowSetRead P1 P2 P3 * *
+** Synopsis: r[P3]=rowset(P1)
+**
+** Extract the smallest value from the RowSet object in P1
+** and put that value into register P3.
+** Or, if RowSet object P1 is initially empty, leave P3
+** unchanged and jump to instruction P2.
+*/
+case OP_RowSetRead: { /* jump, in1, out3 */
+ i64 val;
+
+ pIn1 = &aMem[pOp->p1];
+ assert( (pIn1->flags & MEM_Blob)==0 || sqlite3VdbeMemIsRowSet(pIn1) );
+ if( (pIn1->flags & MEM_Blob)==0
+ || sqlite3RowSetNext((RowSet*)pIn1->z, &val)==0
+ ){
+ /* The boolean index is empty */
+ sqlite3VdbeMemSetNull(pIn1);
+ VdbeBranchTaken(1,2);
+ goto jump_to_p2_and_check_for_interrupt;
+ }else{
+ /* A value was pulled from the index */
+ VdbeBranchTaken(0,2);
+ sqlite3VdbeMemSetInt64(&aMem[pOp->p3], val);
+ }
+ goto check_for_interrupt;
+}
+
+/* Opcode: RowSetTest P1 P2 P3 P4
+** Synopsis: if r[P3] in rowset(P1) goto P2
+**
+** Register P3 is assumed to hold a 64-bit integer value. If register P1
+** contains a RowSet object and that RowSet object contains
+** the value held in P3, jump to register P2. Otherwise, insert the
+** integer in P3 into the RowSet and continue on to the
+** next opcode.
+**
+** The RowSet object is optimized for the case where sets of integers
+** are inserted in distinct phases, which each set contains no duplicates.
+** Each set is identified by a unique P4 value. The first set
+** must have P4==0, the final set must have P4==-1, and for all other sets
+** must have P4>0.
+**
+** This allows optimizations: (a) when P4==0 there is no need to test
+** the RowSet object for P3, as it is guaranteed not to contain it,
+** (b) when P4==-1 there is no need to insert the value, as it will
+** never be tested for, and (c) when a value that is part of set X is
+** inserted, there is no need to search to see if the same value was
+** previously inserted as part of set X (only if it was previously
+** inserted as part of some other set).
+*/
+case OP_RowSetTest: { /* jump, in1, in3 */
+ int iSet;
+ int exists;
+
+ pIn1 = &aMem[pOp->p1];
+ pIn3 = &aMem[pOp->p3];
+ iSet = pOp->p4.i;
+ assert( pIn3->flags&MEM_Int );
+
+ /* If there is anything other than a rowset object in memory cell P1,
+ ** delete it now and initialize P1 with an empty rowset
+ */
+ if( (pIn1->flags & MEM_Blob)==0 ){
+ if( sqlite3VdbeMemSetRowSet(pIn1) ) goto no_mem;
+ }
+ assert( sqlite3VdbeMemIsRowSet(pIn1) );
+ assert( pOp->p4type==P4_INT32 );
+ assert( iSet==-1 || iSet>=0 );
+ if( iSet ){
+ exists = sqlite3RowSetTest((RowSet*)pIn1->z, iSet, pIn3->u.i);
+ VdbeBranchTaken(exists!=0,2);
+ if( exists ) goto jump_to_p2;
+ }
+ if( iSet>=0 ){
+ sqlite3RowSetInsert((RowSet*)pIn1->z, pIn3->u.i);
+ }
+ break;
+}
+
+
+#ifndef SQLITE_OMIT_TRIGGER
+
+/* Opcode: Program P1 P2 P3 P4 P5
+**
+** Execute the trigger program passed as P4 (type P4_SUBPROGRAM).
+**
+** P1 contains the address of the memory cell that contains the first memory
+** cell in an array of values used as arguments to the sub-program. P2
+** contains the address to jump to if the sub-program throws an IGNORE
+** exception using the RAISE() function. Register P3 contains the address
+** of a memory cell in this (the parent) VM that is used to allocate the
+** memory required by the sub-vdbe at runtime.
+**
+** P4 is a pointer to the VM containing the trigger program.
+**
+** If P5 is non-zero, then recursive program invocation is enabled.
+*/
+case OP_Program: { /* jump */
+ int nMem; /* Number of memory registers for sub-program */
+ int nByte; /* Bytes of runtime space required for sub-program */
+ Mem *pRt; /* Register to allocate runtime space */
+ Mem *pMem; /* Used to iterate through memory cells */
+ Mem *pEnd; /* Last memory cell in new array */
+ VdbeFrame *pFrame; /* New vdbe frame to execute in */
+ SubProgram *pProgram; /* Sub-program to execute */
+ void *t; /* Token identifying trigger */
+
+ pProgram = pOp->p4.pProgram;
+ pRt = &aMem[pOp->p3];
+ assert( pProgram->nOp>0 );
+
+ /* If the p5 flag is clear, then recursive invocation of triggers is
+ ** disabled for backwards compatibility (p5 is set if this sub-program
+ ** is really a trigger, not a foreign key action, and the flag set
+ ** and cleared by the "PRAGMA recursive_triggers" command is clear).
+ **
+ ** It is recursive invocation of triggers, at the SQL level, that is
+ ** disabled. In some cases a single trigger may generate more than one
+ ** SubProgram (if the trigger may be executed with more than one different
+ ** ON CONFLICT algorithm). SubProgram structures associated with a
+ ** single trigger all have the same value for the SubProgram.token
+ ** variable. */
+ if( pOp->p5 ){
+ t = pProgram->token;
+ for(pFrame=p->pFrame; pFrame && pFrame->token!=t; pFrame=pFrame->pParent);
+ if( pFrame ) break;
+ }
+
+ if( p->nFrame>=db->aLimit[SQLITE_LIMIT_TRIGGER_DEPTH] ){
+ rc = SQLITE_ERROR;
+ sqlite3VdbeError(p, "too many levels of trigger recursion");
+ goto abort_due_to_error;
+ }
+
+ /* Register pRt is used to store the memory required to save the state
+ ** of the current program, and the memory required at runtime to execute
+ ** the trigger program. If this trigger has been fired before, then pRt
+ ** is already allocated. Otherwise, it must be initialized. */
+ if( (pRt->flags&MEM_Blob)==0 ){
+ /* SubProgram.nMem is set to the number of memory cells used by the
+ ** program stored in SubProgram.aOp. As well as these, one memory
+ ** cell is required for each cursor used by the program. Set local
+ ** variable nMem (and later, VdbeFrame.nChildMem) to this value.
+ */
+ nMem = pProgram->nMem + pProgram->nCsr;
+ assert( nMem>0 );
+ if( pProgram->nCsr==0 ) nMem++;
+ nByte = ROUND8(sizeof(VdbeFrame))
+ + nMem * sizeof(Mem)
+ + pProgram->nCsr * sizeof(VdbeCursor*)
+ + (pProgram->nOp + 7)/8;
+ pFrame = sqlite3DbMallocZero(db, nByte);
+ if( !pFrame ){
+ goto no_mem;
+ }
+ sqlite3VdbeMemRelease(pRt);
+ pRt->flags = MEM_Blob|MEM_Dyn;
+ pRt->z = (char*)pFrame;
+ pRt->n = nByte;
+ pRt->xDel = sqlite3VdbeFrameMemDel;
+
+ pFrame->v = p;
+ pFrame->nChildMem = nMem;
+ pFrame->nChildCsr = pProgram->nCsr;
+ pFrame->pc = (int)(pOp - aOp);
+ pFrame->aMem = p->aMem;
+ pFrame->nMem = p->nMem;
+ pFrame->apCsr = p->apCsr;
+ pFrame->nCursor = p->nCursor;
+ pFrame->aOp = p->aOp;
+ pFrame->nOp = p->nOp;
+ pFrame->token = pProgram->token;
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ pFrame->anExec = p->anExec;
+#endif
+#ifdef SQLITE_DEBUG
+ pFrame->iFrameMagic = SQLITE_FRAME_MAGIC;
+#endif
+
+ pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem];
+ for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){
+ pMem->flags = MEM_Undefined;
+ pMem->db = db;
+ }
+ }else{
+ pFrame = (VdbeFrame*)pRt->z;
+ assert( pRt->xDel==sqlite3VdbeFrameMemDel );
+ assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem
+ || (pProgram->nCsr==0 && pProgram->nMem+1==pFrame->nChildMem) );
+ assert( pProgram->nCsr==pFrame->nChildCsr );
+ assert( (int)(pOp - aOp)==pFrame->pc );
+ }
+
+ p->nFrame++;
+ pFrame->pParent = p->pFrame;
+ pFrame->lastRowid = db->lastRowid;
+ pFrame->nChange = p->nChange;
+ pFrame->nDbChange = p->db->nChange;
+ assert( pFrame->pAuxData==0 );
+ pFrame->pAuxData = p->pAuxData;
+ p->pAuxData = 0;
+ p->nChange = 0;
+ p->pFrame = pFrame;
+ p->aMem = aMem = VdbeFrameMem(pFrame);
+ p->nMem = pFrame->nChildMem;
+ p->nCursor = (u16)pFrame->nChildCsr;
+ p->apCsr = (VdbeCursor **)&aMem[p->nMem];
+ pFrame->aOnce = (u8*)&p->apCsr[pProgram->nCsr];
+ memset(pFrame->aOnce, 0, (pProgram->nOp + 7)/8);
+ p->aOp = aOp = pProgram->aOp;
+ p->nOp = pProgram->nOp;
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ p->anExec = 0;
+#endif
+#ifdef SQLITE_DEBUG
+ /* Verify that second and subsequent executions of the same trigger do not
+ ** try to reuse register values from the first use. */
+ {
+ int i;
+ for(i=0; i<p->nMem; i++){
+ aMem[i].pScopyFrom = 0; /* Prevent false-positive AboutToChange() errs */
+ MemSetTypeFlag(&aMem[i], MEM_Undefined); /* Fault if this reg is reused */
+ }
+ }
+#endif
+ pOp = &aOp[-1];
+ goto check_for_interrupt;
+}
+
+/* Opcode: Param P1 P2 * * *
+**
+** This opcode is only ever present in sub-programs called via the
+** OP_Program instruction. Copy a value currently stored in a memory
+** cell of the calling (parent) frame to cell P2 in the current frames
+** address space. This is used by trigger programs to access the new.*
+** and old.* values.
+**
+** The address of the cell in the parent frame is determined by adding
+** the value of the P1 argument to the value of the P1 argument to the
+** calling OP_Program instruction.
+*/
+case OP_Param: { /* out2 */
+ VdbeFrame *pFrame;
+ Mem *pIn;
+ pOut = out2Prerelease(p, pOp);
+ pFrame = p->pFrame;
+ pIn = &pFrame->aMem[pOp->p1 + pFrame->aOp[pFrame->pc].p1];
+ sqlite3VdbeMemShallowCopy(pOut, pIn, MEM_Ephem);
+ break;
+}
+
+#endif /* #ifndef SQLITE_OMIT_TRIGGER */
+
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+/* Opcode: FkCounter P1 P2 * * *
+** Synopsis: fkctr[P1]+=P2
+**
+** Increment a "constraint counter" by P2 (P2 may be negative or positive).
+** If P1 is non-zero, the database constraint counter is incremented
+** (deferred foreign key constraints). Otherwise, if P1 is zero, the
+** statement counter is incremented (immediate foreign key constraints).
+*/
+case OP_FkCounter: {
+ if( db->flags & SQLITE_DeferFKs ){
+ db->nDeferredImmCons += pOp->p2;
+ }else if( pOp->p1 ){
+ db->nDeferredCons += pOp->p2;
+ }else{
+ p->nFkConstraint += pOp->p2;
+ }
+ break;
+}
+
+/* Opcode: FkIfZero P1 P2 * * *
+** Synopsis: if fkctr[P1]==0 goto P2
+**
+** This opcode tests if a foreign key constraint-counter is currently zero.
+** If so, jump to instruction P2. Otherwise, fall through to the next
+** instruction.
+**
+** If P1 is non-zero, then the jump is taken if the database constraint-counter
+** is zero (the one that counts deferred constraint violations). If P1 is
+** zero, the jump is taken if the statement constraint-counter is zero
+** (immediate foreign key constraint violations).
+*/
+case OP_FkIfZero: { /* jump */
+ if( pOp->p1 ){
+ VdbeBranchTaken(db->nDeferredCons==0 && db->nDeferredImmCons==0, 2);
+ if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) goto jump_to_p2;
+ }else{
+ VdbeBranchTaken(p->nFkConstraint==0 && db->nDeferredImmCons==0, 2);
+ if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) goto jump_to_p2;
+ }
+ break;
+}
+#endif /* #ifndef SQLITE_OMIT_FOREIGN_KEY */
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+/* Opcode: MemMax P1 P2 * * *
+** Synopsis: r[P1]=max(r[P1],r[P2])
+**
+** P1 is a register in the root frame of this VM (the root frame is
+** different from the current frame if this instruction is being executed
+** within a sub-program). Set the value of register P1 to the maximum of
+** its current value and the value in register P2.
+**
+** This instruction throws an error if the memory cell is not initially
+** an integer.
+*/
+case OP_MemMax: { /* in2 */
+ VdbeFrame *pFrame;
+ if( p->pFrame ){
+ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
+ pIn1 = &pFrame->aMem[pOp->p1];
+ }else{
+ pIn1 = &aMem[pOp->p1];
+ }
+ assert( memIsValid(pIn1) );
+ sqlite3VdbeMemIntegerify(pIn1);
+ pIn2 = &aMem[pOp->p2];
+ sqlite3VdbeMemIntegerify(pIn2);
+ if( pIn1->u.i<pIn2->u.i){
+ pIn1->u.i = pIn2->u.i;
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_AUTOINCREMENT */
+
+/* Opcode: IfPos P1 P2 P3 * *
+** Synopsis: if r[P1]>0 then r[P1]-=P3, goto P2
+**
+** Register P1 must contain an integer.
+** If the value of register P1 is 1 or greater, subtract P3 from the
+** value in P1 and jump to P2.
+**
+** If the initial value of register P1 is less than 1, then the
+** value is unchanged and control passes through to the next instruction.
+*/
+case OP_IfPos: { /* jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ assert( pIn1->flags&MEM_Int );
+ VdbeBranchTaken( pIn1->u.i>0, 2);
+ if( pIn1->u.i>0 ){
+ pIn1->u.i -= pOp->p3;
+ goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: OffsetLimit P1 P2 P3 * *
+** Synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)
+**
+** This opcode performs a commonly used computation associated with
+** LIMIT and OFFSET process. r[P1] holds the limit counter. r[P3]
+** holds the offset counter. The opcode computes the combined value
+** of the LIMIT and OFFSET and stores that value in r[P2]. The r[P2]
+** value computed is the total number of rows that will need to be
+** visited in order to complete the query.
+**
+** If r[P3] is zero or negative, that means there is no OFFSET
+** and r[P2] is set to be the value of the LIMIT, r[P1].
+**
+** if r[P1] is zero or negative, that means there is no LIMIT
+** and r[P2] is set to -1.
+**
+** Otherwise, r[P2] is set to the sum of r[P1] and r[P3].
+*/
+case OP_OffsetLimit: { /* in1, out2, in3 */
+ i64 x;
+ pIn1 = &aMem[pOp->p1];
+ pIn3 = &aMem[pOp->p3];
+ pOut = out2Prerelease(p, pOp);
+ assert( pIn1->flags & MEM_Int );
+ assert( pIn3->flags & MEM_Int );
+ x = pIn1->u.i;
+ if( x<=0 || sqlite3AddInt64(&x, pIn3->u.i>0?pIn3->u.i:0) ){
+ /* If the LIMIT is less than or equal to zero, loop forever. This
+ ** is documented. But also, if the LIMIT+OFFSET exceeds 2^63 then
+ ** also loop forever. This is undocumented. In fact, one could argue
+ ** that the loop should terminate. But assuming 1 billion iterations
+ ** per second (far exceeding the capabilities of any current hardware)
+ ** it would take nearly 300 years to actually reach the limit. So
+ ** looping forever is a reasonable approximation. */
+ pOut->u.i = -1;
+ }else{
+ pOut->u.i = x;
+ }
+ break;
+}
+
+/* Opcode: IfNotZero P1 P2 * * *
+** Synopsis: if r[P1]!=0 then r[P1]--, goto P2
+**
+** Register P1 must contain an integer. If the content of register P1 is
+** initially greater than zero, then decrement the value in register P1.
+** If it is non-zero (negative or positive) and then also jump to P2.
+** If register P1 is initially zero, leave it unchanged and fall through.
+*/
+case OP_IfNotZero: { /* jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ assert( pIn1->flags&MEM_Int );
+ VdbeBranchTaken(pIn1->u.i<0, 2);
+ if( pIn1->u.i ){
+ if( pIn1->u.i>0 ) pIn1->u.i--;
+ goto jump_to_p2;
+ }
+ break;
+}
+
+/* Opcode: DecrJumpZero P1 P2 * * *
+** Synopsis: if (--r[P1])==0 goto P2
+**
+** Register P1 must hold an integer. Decrement the value in P1
+** and jump to P2 if the new value is exactly zero.
+*/
+case OP_DecrJumpZero: { /* jump, in1 */
+ pIn1 = &aMem[pOp->p1];
+ assert( pIn1->flags&MEM_Int );
+ if( pIn1->u.i>SMALLEST_INT64 ) pIn1->u.i--;
+ VdbeBranchTaken(pIn1->u.i==0, 2);
+ if( pIn1->u.i==0 ) goto jump_to_p2;
+ break;
+}
+
+
+/* Opcode: AggStep * P2 P3 P4 P5
+** Synopsis: accum=r[P3] step(r[P2@P5])
+**
+** Execute the xStep function for an aggregate.
+** The function has P5 arguments. P4 is a pointer to the
+** FuncDef structure that specifies the function. Register P3 is the
+** accumulator.
+**
+** The P5 arguments are taken from register P2 and its
+** successors.
+*/
+/* Opcode: AggInverse * P2 P3 P4 P5
+** Synopsis: accum=r[P3] inverse(r[P2@P5])
+**
+** Execute the xInverse function for an aggregate.
+** The function has P5 arguments. P4 is a pointer to the
+** FuncDef structure that specifies the function. Register P3 is the
+** accumulator.
+**
+** The P5 arguments are taken from register P2 and its
+** successors.
+*/
+/* Opcode: AggStep1 P1 P2 P3 P4 P5
+** Synopsis: accum=r[P3] step(r[P2@P5])
+**
+** Execute the xStep (if P1==0) or xInverse (if P1!=0) function for an
+** aggregate. The function has P5 arguments. P4 is a pointer to the
+** FuncDef structure that specifies the function. Register P3 is the
+** accumulator.
+**
+** The P5 arguments are taken from register P2 and its
+** successors.
+**
+** This opcode is initially coded as OP_AggStep0. On first evaluation,
+** the FuncDef stored in P4 is converted into an sqlite3_context and
+** the opcode is changed. In this way, the initialization of the
+** sqlite3_context only happens once, instead of on each call to the
+** step function.
+*/
+case OP_AggInverse:
+case OP_AggStep: {
+ int n;
+ sqlite3_context *pCtx;
+
+ assert( pOp->p4type==P4_FUNCDEF );
+ n = pOp->p5;
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
+ assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) );
+ assert( pOp->p3<pOp->p2 || pOp->p3>=pOp->p2+n );
+ pCtx = sqlite3DbMallocRawNN(db, n*sizeof(sqlite3_value*) +
+ (sizeof(pCtx[0]) + sizeof(Mem) - sizeof(sqlite3_value*)));
+ if( pCtx==0 ) goto no_mem;
+ pCtx->pMem = 0;
+ pCtx->pOut = (Mem*)&(pCtx->argv[n]);
+ sqlite3VdbeMemInit(pCtx->pOut, db, MEM_Null);
+ pCtx->pFunc = pOp->p4.pFunc;
+ pCtx->iOp = (int)(pOp - aOp);
+ pCtx->pVdbe = p;
+ pCtx->skipFlag = 0;
+ pCtx->isError = 0;
+ pCtx->argc = n;
+ pOp->p4type = P4_FUNCCTX;
+ pOp->p4.pCtx = pCtx;
+
+ /* OP_AggInverse must have P1==1 and OP_AggStep must have P1==0 */
+ assert( pOp->p1==(pOp->opcode==OP_AggInverse) );
+
+ pOp->opcode = OP_AggStep1;
+ /* Fall through into OP_AggStep */
+ /* no break */ deliberate_fall_through
+}
+case OP_AggStep1: {
+ int i;
+ sqlite3_context *pCtx;
+ Mem *pMem;
+
+ assert( pOp->p4type==P4_FUNCCTX );
+ pCtx = pOp->p4.pCtx;
+ pMem = &aMem[pOp->p3];
+
+#ifdef SQLITE_DEBUG
+ if( pOp->p1 ){
+ /* This is an OP_AggInverse call. Verify that xStep has always
+ ** been called at least once prior to any xInverse call. */
+ assert( pMem->uTemp==0x1122e0e3 );
+ }else{
+ /* This is an OP_AggStep call. Mark it as such. */
+ pMem->uTemp = 0x1122e0e3;
+ }
+#endif
+
+ /* If this function is inside of a trigger, the register array in aMem[]
+ ** might change from one evaluation to the next. The next block of code
+ ** checks to see if the register array has changed, and if so it
+ ** reinitializes the relavant parts of the sqlite3_context object */
+ if( pCtx->pMem != pMem ){
+ pCtx->pMem = pMem;
+ for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
+ }
+
+#ifdef SQLITE_DEBUG
+ for(i=0; i<pCtx->argc; i++){
+ assert( memIsValid(pCtx->argv[i]) );
+ REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]);
+ }
+#endif
+
+ pMem->n++;
+ assert( pCtx->pOut->flags==MEM_Null );
+ assert( pCtx->isError==0 );
+ assert( pCtx->skipFlag==0 );
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( pOp->p1 ){
+ (pCtx->pFunc->xInverse)(pCtx,pCtx->argc,pCtx->argv);
+ }else
+#endif
+ (pCtx->pFunc->xSFunc)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */
+
+ if( pCtx->isError ){
+ if( pCtx->isError>0 ){
+ sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut));
+ rc = pCtx->isError;
+ }
+ if( pCtx->skipFlag ){
+ assert( pOp[-1].opcode==OP_CollSeq );
+ i = pOp[-1].p1;
+ if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1);
+ pCtx->skipFlag = 0;
+ }
+ sqlite3VdbeMemRelease(pCtx->pOut);
+ pCtx->pOut->flags = MEM_Null;
+ pCtx->isError = 0;
+ if( rc ) goto abort_due_to_error;
+ }
+ assert( pCtx->pOut->flags==MEM_Null );
+ assert( pCtx->skipFlag==0 );
+ break;
+}
+
+/* Opcode: AggFinal P1 P2 * P4 *
+** Synopsis: accum=r[P1] N=P2
+**
+** P1 is the memory location that is the accumulator for an aggregate
+** or window function. Execute the finalizer function
+** for an aggregate and store the result in P1.
+**
+** P2 is the number of arguments that the step function takes and
+** P4 is a pointer to the FuncDef for this function. The P2
+** argument is not used by this opcode. It is only there to disambiguate
+** functions that can take varying numbers of arguments. The
+** P4 argument is only needed for the case where
+** the step function was not previously called.
+*/
+/* Opcode: AggValue * P2 P3 P4 *
+** Synopsis: r[P3]=value N=P2
+**
+** Invoke the xValue() function and store the result in register P3.
+**
+** P2 is the number of arguments that the step function takes and
+** P4 is a pointer to the FuncDef for this function. The P2
+** argument is not used by this opcode. It is only there to disambiguate
+** functions that can take varying numbers of arguments. The
+** P4 argument is only needed for the case where
+** the step function was not previously called.
+*/
+case OP_AggValue:
+case OP_AggFinal: {
+ Mem *pMem;
+ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) );
+ assert( pOp->p3==0 || pOp->opcode==OP_AggValue );
+ pMem = &aMem[pOp->p1];
+ assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 );
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( pOp->p3 ){
+ memAboutToChange(p, &aMem[pOp->p3]);
+ rc = sqlite3VdbeMemAggValue(pMem, &aMem[pOp->p3], pOp->p4.pFunc);
+ pMem = &aMem[pOp->p3];
+ }else
+#endif
+ {
+ rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc);
+ }
+
+ if( rc ){
+ sqlite3VdbeError(p, "%s", sqlite3_value_text(pMem));
+ goto abort_due_to_error;
+ }
+ sqlite3VdbeChangeEncoding(pMem, encoding);
+ UPDATE_MAX_BLOBSIZE(pMem);
+ if( sqlite3VdbeMemTooBig(pMem) ){
+ goto too_big;
+ }
+ break;
+}
+
+#ifndef SQLITE_OMIT_WAL
+/* Opcode: Checkpoint P1 P2 P3 * *
+**
+** Checkpoint database P1. This is a no-op if P1 is not currently in
+** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL,
+** RESTART, or TRUNCATE. Write 1 or 0 into mem[P3] if the checkpoint returns
+** SQLITE_BUSY or not, respectively. Write the number of pages in the
+** WAL after the checkpoint into mem[P3+1] and the number of pages
+** in the WAL that have been checkpointed after the checkpoint
+** completes into mem[P3+2]. However on an error, mem[P3+1] and
+** mem[P3+2] are initialized to -1.
+*/
+case OP_Checkpoint: {
+ int i; /* Loop counter */
+ int aRes[3]; /* Results */
+ Mem *pMem; /* Write results here */
+
+ assert( p->readOnly==0 );
+ aRes[0] = 0;
+ aRes[1] = aRes[2] = -1;
+ assert( pOp->p2==SQLITE_CHECKPOINT_PASSIVE
+ || pOp->p2==SQLITE_CHECKPOINT_FULL
+ || pOp->p2==SQLITE_CHECKPOINT_RESTART
+ || pOp->p2==SQLITE_CHECKPOINT_TRUNCATE
+ );
+ rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]);
+ if( rc ){
+ if( rc!=SQLITE_BUSY ) goto abort_due_to_error;
+ rc = SQLITE_OK;
+ aRes[0] = 1;
+ }
+ for(i=0, pMem = &aMem[pOp->p3]; i<3; i++, pMem++){
+ sqlite3VdbeMemSetInt64(pMem, (i64)aRes[i]);
+ }
+ break;
+};
+#endif
+
+#ifndef SQLITE_OMIT_PRAGMA
+/* Opcode: JournalMode P1 P2 P3 * *
+**
+** Change the journal mode of database P1 to P3. P3 must be one of the
+** PAGER_JOURNALMODE_XXX values. If changing between the various rollback
+** modes (delete, truncate, persist, off and memory), this is a simple
+** operation. No IO is required.
+**
+** If changing into or out of WAL mode the procedure is more complicated.
+**
+** Write a string containing the final journal-mode to register P2.
+*/
+case OP_JournalMode: { /* out2 */
+ Btree *pBt; /* Btree to change journal mode of */
+ Pager *pPager; /* Pager associated with pBt */
+ int eNew; /* New journal mode */
+ int eOld; /* The old journal mode */
+#ifndef SQLITE_OMIT_WAL
+ const char *zFilename; /* Name of database file for pPager */
+#endif
+
+ pOut = out2Prerelease(p, pOp);
+ eNew = pOp->p3;
+ assert( eNew==PAGER_JOURNALMODE_DELETE
+ || eNew==PAGER_JOURNALMODE_TRUNCATE
+ || eNew==PAGER_JOURNALMODE_PERSIST
+ || eNew==PAGER_JOURNALMODE_OFF
+ || eNew==PAGER_JOURNALMODE_MEMORY
+ || eNew==PAGER_JOURNALMODE_WAL
+ || eNew==PAGER_JOURNALMODE_QUERY
+ );
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( p->readOnly==0 );
+
+ pBt = db->aDb[pOp->p1].pBt;
+ pPager = sqlite3BtreePager(pBt);
+ eOld = sqlite3PagerGetJournalMode(pPager);
+ if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld;
+ if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld;
+
+#ifndef SQLITE_OMIT_WAL
+ zFilename = sqlite3PagerFilename(pPager, 1);
+
+ /* Do not allow a transition to journal_mode=WAL for a database
+ ** in temporary storage or if the VFS does not support shared memory
+ */
+ if( eNew==PAGER_JOURNALMODE_WAL
+ && (sqlite3Strlen30(zFilename)==0 /* Temp file */
+ || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */
+ ){
+ eNew = eOld;
+ }
+
+ if( (eNew!=eOld)
+ && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL)
+ ){
+ if( !db->autoCommit || db->nVdbeRead>1 ){
+ rc = SQLITE_ERROR;
+ sqlite3VdbeError(p,
+ "cannot change %s wal mode from within a transaction",
+ (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of")
+ );
+ goto abort_due_to_error;
+ }else{
+
+ if( eOld==PAGER_JOURNALMODE_WAL ){
+ /* If leaving WAL mode, close the log file. If successful, the call
+ ** to PagerCloseWal() checkpoints and deletes the write-ahead-log
+ ** file. An EXCLUSIVE lock may still be held on the database file
+ ** after a successful return.
+ */
+ rc = sqlite3PagerCloseWal(pPager, db);
+ if( rc==SQLITE_OK ){
+ sqlite3PagerSetJournalMode(pPager, eNew);
+ }
+ }else if( eOld==PAGER_JOURNALMODE_MEMORY ){
+ /* Cannot transition directly from MEMORY to WAL. Use mode OFF
+ ** as an intermediate */
+ sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF);
+ }
+
+ /* Open a transaction on the database file. Regardless of the journal
+ ** mode, this transaction always uses a rollback journal.
+ */
+ assert( sqlite3BtreeIsInTrans(pBt)==0 );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1));
+ }
+ }
+ }
+#endif /* ifndef SQLITE_OMIT_WAL */
+
+ if( rc ) eNew = eOld;
+ eNew = sqlite3PagerSetJournalMode(pPager, eNew);
+
+ pOut->flags = MEM_Str|MEM_Static|MEM_Term;
+ pOut->z = (char *)sqlite3JournalModename(eNew);
+ pOut->n = sqlite3Strlen30(pOut->z);
+ pOut->enc = SQLITE_UTF8;
+ sqlite3VdbeChangeEncoding(pOut, encoding);
+ if( rc ) goto abort_due_to_error;
+ break;
+};
+#endif /* SQLITE_OMIT_PRAGMA */
+
+#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)
+/* Opcode: Vacuum P1 P2 * * *
+**
+** Vacuum the entire database P1. P1 is 0 for "main", and 2 or more
+** for an attached database. The "temp" database may not be vacuumed.
+**
+** If P2 is not zero, then it is a register holding a string which is
+** the file into which the result of vacuum should be written. When
+** P2 is zero, the vacuum overwrites the original database.
+*/
+case OP_Vacuum: {
+ assert( p->readOnly==0 );
+ rc = sqlite3RunVacuum(&p->zErrMsg, db, pOp->p1,
+ pOp->p2 ? &aMem[pOp->p2] : 0);
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif
+
+#if !defined(SQLITE_OMIT_AUTOVACUUM)
+/* Opcode: IncrVacuum P1 P2 * * *
+**
+** Perform a single step of the incremental vacuum procedure on
+** the P1 database. If the vacuum has finished, jump to instruction
+** P2. Otherwise, fall through to the next instruction.
+*/
+case OP_IncrVacuum: { /* jump */
+ Btree *pBt;
+
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( p->readOnly==0 );
+ pBt = db->aDb[pOp->p1].pBt;
+ rc = sqlite3BtreeIncrVacuum(pBt);
+ VdbeBranchTaken(rc==SQLITE_DONE,2);
+ if( rc ){
+ if( rc!=SQLITE_DONE ) goto abort_due_to_error;
+ rc = SQLITE_OK;
+ goto jump_to_p2;
+ }
+ break;
+}
+#endif
+
+/* Opcode: Expire P1 P2 * * *
+**
+** Cause precompiled statements to expire. When an expired statement
+** is executed using sqlite3_step() it will either automatically
+** reprepare itself (if it was originally created using sqlite3_prepare_v2())
+** or it will fail with SQLITE_SCHEMA.
+**
+** If P1 is 0, then all SQL statements become expired. If P1 is non-zero,
+** then only the currently executing statement is expired.
+**
+** If P2 is 0, then SQL statements are expired immediately. If P2 is 1,
+** then running SQL statements are allowed to continue to run to completion.
+** The P2==1 case occurs when a CREATE INDEX or similar schema change happens
+** that might help the statement run faster but which does not affect the
+** correctness of operation.
+*/
+case OP_Expire: {
+ assert( pOp->p2==0 || pOp->p2==1 );
+ if( !pOp->p1 ){
+ sqlite3ExpirePreparedStatements(db, pOp->p2);
+ }else{
+ p->expired = pOp->p2+1;
+ }
+ break;
+}
+
+/* Opcode: CursorLock P1 * * * *
+**
+** Lock the btree to which cursor P1 is pointing so that the btree cannot be
+** written by an other cursor.
+*/
+case OP_CursorLock: {
+ VdbeCursor *pC;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ sqlite3BtreeCursorPin(pC->uc.pCursor);
+ break;
+}
+
+/* Opcode: CursorUnlock P1 * * * *
+**
+** Unlock the btree to which cursor P1 is pointing so that it can be
+** written by other cursors.
+*/
+case OP_CursorUnlock: {
+ VdbeCursor *pC;
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ sqlite3BtreeCursorUnpin(pC->uc.pCursor);
+ break;
+}
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/* Opcode: TableLock P1 P2 P3 P4 *
+** Synopsis: iDb=P1 root=P2 write=P3
+**
+** Obtain a lock on a particular table. This instruction is only used when
+** the shared-cache feature is enabled.
+**
+** P1 is the index of the database in sqlite3.aDb[] of the database
+** on which the lock is acquired. A readlock is obtained if P3==0 or
+** a write lock if P3==1.
+**
+** P2 contains the root-page of the table to lock.
+**
+** P4 contains a pointer to the name of the table being locked. This is only
+** used to generate an error message if the lock cannot be obtained.
+*/
+case OP_TableLock: {
+ u8 isWriteLock = (u8)pOp->p3;
+ if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommit) ){
+ int p1 = pOp->p1;
+ assert( p1>=0 && p1<db->nDb );
+ assert( DbMaskTest(p->btreeMask, p1) );
+ assert( isWriteLock==0 || isWriteLock==1 );
+ rc = sqlite3BtreeLockTable(db->aDb[p1].pBt, pOp->p2, isWriteLock);
+ if( rc ){
+ if( (rc&0xFF)==SQLITE_LOCKED ){
+ const char *z = pOp->p4.z;
+ sqlite3VdbeError(p, "database table is locked: %s", z);
+ }
+ goto abort_due_to_error;
+ }
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_SHARED_CACHE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VBegin * * * P4 *
+**
+** P4 may be a pointer to an sqlite3_vtab structure. If so, call the
+** xBegin method for that table.
+**
+** Also, whether or not P4 is set, check that this is not being called from
+** within a callback to a virtual table xSync() method. If it is, the error
+** code will be set to SQLITE_LOCKED.
+*/
+case OP_VBegin: {
+ VTable *pVTab;
+ pVTab = pOp->p4.pVtab;
+ rc = sqlite3VtabBegin(db, pVTab);
+ if( pVTab ) sqlite3VtabImportErrmsg(p, pVTab->pVtab);
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VCreate P1 P2 * * *
+**
+** P2 is a register that holds the name of a virtual table in database
+** P1. Call the xCreate method for that table.
+*/
+case OP_VCreate: {
+ Mem sMem; /* For storing the record being decoded */
+ const char *zTab; /* Name of the virtual table */
+
+ memset(&sMem, 0, sizeof(sMem));
+ sMem.db = db;
+ /* Because P2 is always a static string, it is impossible for the
+ ** sqlite3VdbeMemCopy() to fail */
+ assert( (aMem[pOp->p2].flags & MEM_Str)!=0 );
+ assert( (aMem[pOp->p2].flags & MEM_Static)!=0 );
+ rc = sqlite3VdbeMemCopy(&sMem, &aMem[pOp->p2]);
+ assert( rc==SQLITE_OK );
+ zTab = (const char*)sqlite3_value_text(&sMem);
+ assert( zTab || db->mallocFailed );
+ if( zTab ){
+ rc = sqlite3VtabCallCreate(db, pOp->p1, zTab, &p->zErrMsg);
+ }
+ sqlite3VdbeMemRelease(&sMem);
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VDestroy P1 * * P4 *
+**
+** P4 is the name of a virtual table in database P1. Call the xDestroy method
+** of that table.
+*/
+case OP_VDestroy: {
+ db->nVDestroy++;
+ rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z);
+ db->nVDestroy--;
+ assert( p->errorAction==OE_Abort && p->usesStmtJournal );
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VOpen P1 * * P4 *
+**
+** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
+** P1 is a cursor number. This opcode opens a cursor to the virtual
+** table and stores that cursor in P1.
+*/
+case OP_VOpen: {
+ VdbeCursor *pCur;
+ sqlite3_vtab_cursor *pVCur;
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+
+ assert( p->bIsReader );
+ pCur = 0;
+ pVCur = 0;
+ pVtab = pOp->p4.pVtab->pVtab;
+ if( pVtab==0 || NEVER(pVtab->pModule==0) ){
+ rc = SQLITE_LOCKED;
+ goto abort_due_to_error;
+ }
+ pModule = pVtab->pModule;
+ rc = pModule->xOpen(pVtab, &pVCur);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( rc ) goto abort_due_to_error;
+
+ /* Initialize sqlite3_vtab_cursor base class */
+ pVCur->pVtab = pVtab;
+
+ /* Initialize vdbe cursor object */
+ pCur = allocateCursor(p, pOp->p1, 0, -1, CURTYPE_VTAB);
+ if( pCur ){
+ pCur->uc.pVCur = pVCur;
+ pVtab->nRef++;
+ }else{
+ assert( db->mallocFailed );
+ pModule->xClose(pVCur);
+ goto no_mem;
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VFilter P1 P2 P3 P4 *
+** Synopsis: iplan=r[P3] zplan='P4'
+**
+** P1 is a cursor opened using VOpen. P2 is an address to jump to if
+** the filtered result set is empty.
+**
+** P4 is either NULL or a string that was generated by the xBestIndex
+** method of the module. The interpretation of the P4 string is left
+** to the module implementation.
+**
+** This opcode invokes the xFilter method on the virtual table specified
+** by P1. The integer query plan parameter to xFilter is stored in register
+** P3. Register P3+1 stores the argc parameter to be passed to the
+** xFilter method. Registers P3+2..P3+1+argc are the argc
+** additional parameters which are passed to
+** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter.
+**
+** A jump is made to P2 if the result set after filtering would be empty.
+*/
+case OP_VFilter: { /* jump */
+ int nArg;
+ int iQuery;
+ const sqlite3_module *pModule;
+ Mem *pQuery;
+ Mem *pArgc;
+ sqlite3_vtab_cursor *pVCur;
+ sqlite3_vtab *pVtab;
+ VdbeCursor *pCur;
+ int res;
+ int i;
+ Mem **apArg;
+
+ pQuery = &aMem[pOp->p3];
+ pArgc = &pQuery[1];
+ pCur = p->apCsr[pOp->p1];
+ assert( memIsValid(pQuery) );
+ REGISTER_TRACE(pOp->p3, pQuery);
+ assert( pCur->eCurType==CURTYPE_VTAB );
+ pVCur = pCur->uc.pVCur;
+ pVtab = pVCur->pVtab;
+ pModule = pVtab->pModule;
+
+ /* Grab the index number and argc parameters */
+ assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int );
+ nArg = (int)pArgc->u.i;
+ iQuery = (int)pQuery->u.i;
+
+ /* Invoke the xFilter method */
+ res = 0;
+ apArg = p->apArg;
+ for(i = 0; i<nArg; i++){
+ apArg[i] = &pArgc[i+1];
+ }
+ rc = pModule->xFilter(pVCur, iQuery, pOp->p4.z, nArg, apArg);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( rc ) goto abort_due_to_error;
+ res = pModule->xEof(pVCur);
+ pCur->nullRow = 0;
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VColumn P1 P2 P3 * P5
+** Synopsis: r[P3]=vcolumn(P2)
+**
+** Store in register P3 the value of the P2-th column of
+** the current row of the virtual-table of cursor P1.
+**
+** If the VColumn opcode is being used to fetch the value of
+** an unchanging column during an UPDATE operation, then the P5
+** value is OPFLAG_NOCHNG. This will cause the sqlite3_vtab_nochange()
+** function to return true inside the xColumn method of the virtual
+** table implementation. The P5 column might also contain other
+** bits (OPFLAG_LENGTHARG or OPFLAG_TYPEOFARG) but those bits are
+** unused by OP_VColumn.
+*/
+case OP_VColumn: {
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ Mem *pDest;
+ sqlite3_context sContext;
+
+ VdbeCursor *pCur = p->apCsr[pOp->p1];
+ assert( pCur->eCurType==CURTYPE_VTAB );
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
+ pDest = &aMem[pOp->p3];
+ memAboutToChange(p, pDest);
+ if( pCur->nullRow ){
+ sqlite3VdbeMemSetNull(pDest);
+ break;
+ }
+ pVtab = pCur->uc.pVCur->pVtab;
+ pModule = pVtab->pModule;
+ assert( pModule->xColumn );
+ memset(&sContext, 0, sizeof(sContext));
+ sContext.pOut = pDest;
+ assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 );
+ if( pOp->p5 & OPFLAG_NOCHNG ){
+ sqlite3VdbeMemSetNull(pDest);
+ pDest->flags = MEM_Null|MEM_Zero;
+ pDest->u.nZero = 0;
+ }else{
+ MemSetTypeFlag(pDest, MEM_Null);
+ }
+ rc = pModule->xColumn(pCur->uc.pVCur, &sContext, pOp->p2);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( sContext.isError>0 ){
+ sqlite3VdbeError(p, "%s", sqlite3_value_text(pDest));
+ rc = sContext.isError;
+ }
+ sqlite3VdbeChangeEncoding(pDest, encoding);
+ REGISTER_TRACE(pOp->p3, pDest);
+ UPDATE_MAX_BLOBSIZE(pDest);
+
+ if( sqlite3VdbeMemTooBig(pDest) ){
+ goto too_big;
+ }
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VNext P1 P2 * * *
+**
+** Advance virtual table P1 to the next row in its result set and
+** jump to instruction P2. Or, if the virtual table has reached
+** the end of its result set, then fall through to the next instruction.
+*/
+case OP_VNext: { /* jump */
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ int res;
+ VdbeCursor *pCur;
+
+ res = 0;
+ pCur = p->apCsr[pOp->p1];
+ assert( pCur->eCurType==CURTYPE_VTAB );
+ if( pCur->nullRow ){
+ break;
+ }
+ pVtab = pCur->uc.pVCur->pVtab;
+ pModule = pVtab->pModule;
+ assert( pModule->xNext );
+
+ /* Invoke the xNext() method of the module. There is no way for the
+ ** underlying implementation to return an error if one occurs during
+ ** xNext(). Instead, if an error occurs, true is returned (indicating that
+ ** data is available) and the error code returned when xColumn or
+ ** some other method is next invoked on the save virtual table cursor.
+ */
+ rc = pModule->xNext(pCur->uc.pVCur);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( rc ) goto abort_due_to_error;
+ res = pModule->xEof(pCur->uc.pVCur);
+ VdbeBranchTaken(!res,2);
+ if( !res ){
+ /* If there is data, jump to P2 */
+ goto jump_to_p2_and_check_for_interrupt;
+ }
+ goto check_for_interrupt;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VRename P1 * * P4 *
+**
+** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
+** This opcode invokes the corresponding xRename method. The value
+** in register P1 is passed as the zName argument to the xRename method.
+*/
+case OP_VRename: {
+ sqlite3_vtab *pVtab;
+ Mem *pName;
+ int isLegacy;
+
+ isLegacy = (db->flags & SQLITE_LegacyAlter);
+ db->flags |= SQLITE_LegacyAlter;
+ pVtab = pOp->p4.pVtab->pVtab;
+ pName = &aMem[pOp->p1];
+ assert( pVtab->pModule->xRename );
+ assert( memIsValid(pName) );
+ assert( p->readOnly==0 );
+ REGISTER_TRACE(pOp->p1, pName);
+ assert( pName->flags & MEM_Str );
+ testcase( pName->enc==SQLITE_UTF8 );
+ testcase( pName->enc==SQLITE_UTF16BE );
+ testcase( pName->enc==SQLITE_UTF16LE );
+ rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8);
+ if( rc ) goto abort_due_to_error;
+ rc = pVtab->pModule->xRename(pVtab, pName->z);
+ if( isLegacy==0 ) db->flags &= ~(u64)SQLITE_LegacyAlter;
+ sqlite3VtabImportErrmsg(p, pVtab);
+ p->expired = 0;
+ if( rc ) goto abort_due_to_error;
+ break;
+}
+#endif
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VUpdate P1 P2 P3 P4 P5
+** Synopsis: data=r[P3@P2]
+**
+** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
+** This opcode invokes the corresponding xUpdate method. P2 values
+** are contiguous memory cells starting at P3 to pass to the xUpdate
+** invocation. The value in register (P3+P2-1) corresponds to the
+** p2th element of the argv array passed to xUpdate.
+**
+** The xUpdate method will do a DELETE or an INSERT or both.
+** The argv[0] element (which corresponds to memory cell P3)
+** is the rowid of a row to delete. If argv[0] is NULL then no
+** deletion occurs. The argv[1] element is the rowid of the new
+** row. This can be NULL to have the virtual table select the new
+** rowid for itself. The subsequent elements in the array are
+** the values of columns in the new row.
+**
+** If P2==1 then no insert is performed. argv[0] is the rowid of
+** a row to delete.
+**
+** P1 is a boolean flag. If it is set to true and the xUpdate call
+** is successful, then the value returned by sqlite3_last_insert_rowid()
+** is set to the value of the rowid for the row just inserted.
+**
+** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to
+** apply in the case of a constraint failure on an insert or update.
+*/
+case OP_VUpdate: {
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ int nArg;
+ int i;
+ sqlite_int64 rowid;
+ Mem **apArg;
+ Mem *pX;
+
+ assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback
+ || pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace
+ );
+ assert( p->readOnly==0 );
+ if( db->mallocFailed ) goto no_mem;
+ sqlite3VdbeIncrWriteCounter(p, 0);
+ pVtab = pOp->p4.pVtab->pVtab;
+ if( pVtab==0 || NEVER(pVtab->pModule==0) ){
+ rc = SQLITE_LOCKED;
+ goto abort_due_to_error;
+ }
+ pModule = pVtab->pModule;
+ nArg = pOp->p2;
+ assert( pOp->p4type==P4_VTAB );
+ if( ALWAYS(pModule->xUpdate) ){
+ u8 vtabOnConflict = db->vtabOnConflict;
+ apArg = p->apArg;
+ pX = &aMem[pOp->p3];
+ for(i=0; i<nArg; i++){
+ assert( memIsValid(pX) );
+ memAboutToChange(p, pX);
+ apArg[i] = pX;
+ pX++;
+ }
+ db->vtabOnConflict = pOp->p5;
+ rc = pModule->xUpdate(pVtab, nArg, apArg, &rowid);
+ db->vtabOnConflict = vtabOnConflict;
+ sqlite3VtabImportErrmsg(p, pVtab);
+ if( rc==SQLITE_OK && pOp->p1 ){
+ assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) );
+ db->lastRowid = rowid;
+ }
+ if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){
+ if( pOp->p5==OE_Ignore ){
+ rc = SQLITE_OK;
+ }else{
+ p->errorAction = ((pOp->p5==OE_Replace) ? OE_Abort : pOp->p5);
+ }
+ }else{
+ p->nChange++;
+ }
+ if( rc ) goto abort_due_to_error;
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+/* Opcode: Pagecount P1 P2 * * *
+**
+** Write the current number of pages in database P1 to memory cell P2.
+*/
+case OP_Pagecount: { /* out2 */
+ pOut = out2Prerelease(p, pOp);
+ pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt);
+ break;
+}
+#endif
+
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+/* Opcode: MaxPgcnt P1 P2 P3 * *
+**
+** Try to set the maximum page count for database P1 to the value in P3.
+** Do not let the maximum page count fall below the current page count and
+** do not change the maximum page count value if P3==0.
+**
+** Store the maximum page count after the change in register P2.
+*/
+case OP_MaxPgcnt: { /* out2 */
+ unsigned int newMax;
+ Btree *pBt;
+
+ pOut = out2Prerelease(p, pOp);
+ pBt = db->aDb[pOp->p1].pBt;
+ newMax = 0;
+ if( pOp->p3 ){
+ newMax = sqlite3BtreeLastPage(pBt);
+ if( newMax < (unsigned)pOp->p3 ) newMax = (unsigned)pOp->p3;
+ }
+ pOut->u.i = sqlite3BtreeMaxPageCount(pBt, newMax);
+ break;
+}
+#endif
+
+/* Opcode: Function P1 P2 P3 P4 *
+** Synopsis: r[P3]=func(r[P2@NP])
+**
+** Invoke a user function (P4 is a pointer to an sqlite3_context object that
+** contains a pointer to the function to be run) with arguments taken
+** from register P2 and successors. The number of arguments is in
+** the sqlite3_context object that P4 points to.
+** The result of the function is stored
+** in register P3. Register P3 must not be one of the function inputs.
+**
+** P1 is a 32-bit bitmask indicating whether or not each argument to the
+** function was determined to be constant at compile time. If the first
+** argument was constant then bit 0 of P1 is set. This is used to determine
+** whether meta data associated with a user function argument using the
+** sqlite3_set_auxdata() API may be safely retained until the next
+** invocation of this opcode.
+**
+** See also: AggStep, AggFinal, PureFunc
+*/
+/* Opcode: PureFunc P1 P2 P3 P4 *
+** Synopsis: r[P3]=func(r[P2@NP])
+**
+** Invoke a user function (P4 is a pointer to an sqlite3_context object that
+** contains a pointer to the function to be run) with arguments taken
+** from register P2 and successors. The number of arguments is in
+** the sqlite3_context object that P4 points to.
+** The result of the function is stored
+** in register P3. Register P3 must not be one of the function inputs.
+**
+** P1 is a 32-bit bitmask indicating whether or not each argument to the
+** function was determined to be constant at compile time. If the first
+** argument was constant then bit 0 of P1 is set. This is used to determine
+** whether meta data associated with a user function argument using the
+** sqlite3_set_auxdata() API may be safely retained until the next
+** invocation of this opcode.
+**
+** This opcode works exactly like OP_Function. The only difference is in
+** its name. This opcode is used in places where the function must be
+** purely non-deterministic. Some built-in date/time functions can be
+** either determinitic of non-deterministic, depending on their arguments.
+** When those function are used in a non-deterministic way, they will check
+** to see if they were called using OP_PureFunc instead of OP_Function, and
+** if they were, they throw an error.
+**
+** See also: AggStep, AggFinal, Function
+*/
+case OP_PureFunc: /* group */
+case OP_Function: { /* group */
+ int i;
+ sqlite3_context *pCtx;
+
+ assert( pOp->p4type==P4_FUNCCTX );
+ pCtx = pOp->p4.pCtx;
+
+ /* If this function is inside of a trigger, the register array in aMem[]
+ ** might change from one evaluation to the next. The next block of code
+ ** checks to see if the register array has changed, and if so it
+ ** reinitializes the relavant parts of the sqlite3_context object */
+ pOut = &aMem[pOp->p3];
+ if( pCtx->pOut != pOut ){
+ pCtx->pVdbe = p;
+ pCtx->pOut = pOut;
+ for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
+ }
+ assert( pCtx->pVdbe==p );
+
+ memAboutToChange(p, pOut);
+#ifdef SQLITE_DEBUG
+ for(i=0; i<pCtx->argc; i++){
+ assert( memIsValid(pCtx->argv[i]) );
+ REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]);
+ }
+#endif
+ MemSetTypeFlag(pOut, MEM_Null);
+ assert( pCtx->isError==0 );
+ (*pCtx->pFunc->xSFunc)(pCtx, pCtx->argc, pCtx->argv);/* IMP: R-24505-23230 */
+
+ /* If the function returned an error, throw an exception */
+ if( pCtx->isError ){
+ if( pCtx->isError>0 ){
+ sqlite3VdbeError(p, "%s", sqlite3_value_text(pOut));
+ rc = pCtx->isError;
+ }
+ sqlite3VdbeDeleteAuxData(db, &p->pAuxData, pCtx->iOp, pOp->p1);
+ pCtx->isError = 0;
+ if( rc ) goto abort_due_to_error;
+ }
+
+ /* Copy the result of the function into register P3 */
+ if( pOut->flags & (MEM_Str|MEM_Blob) ){
+ sqlite3VdbeChangeEncoding(pOut, encoding);
+ if( sqlite3VdbeMemTooBig(pOut) ) goto too_big;
+ }
+
+ REGISTER_TRACE(pOp->p3, pOut);
+ UPDATE_MAX_BLOBSIZE(pOut);
+ break;
+}
+
+/* Opcode: Trace P1 P2 * P4 *
+**
+** Write P4 on the statement trace output if statement tracing is
+** enabled.
+**
+** Operand P1 must be 0x7fffffff and P2 must positive.
+*/
+/* Opcode: Init P1 P2 P3 P4 *
+** Synopsis: Start at P2
+**
+** Programs contain a single instance of this opcode as the very first
+** opcode.
+**
+** If tracing is enabled (by the sqlite3_trace()) interface, then
+** the UTF-8 string contained in P4 is emitted on the trace callback.
+** Or if P4 is blank, use the string returned by sqlite3_sql().
+**
+** If P2 is not zero, jump to instruction P2.
+**
+** Increment the value of P1 so that OP_Once opcodes will jump the
+** first time they are evaluated for this run.
+**
+** If P3 is not zero, then it is an address to jump to if an SQLITE_CORRUPT
+** error is encountered.
+*/
+case OP_Trace:
+case OP_Init: { /* jump */
+ int i;
+#ifndef SQLITE_OMIT_TRACE
+ char *zTrace;
+#endif
+
+ /* If the P4 argument is not NULL, then it must be an SQL comment string.
+ ** The "--" string is broken up to prevent false-positives with srcck1.c.
+ **
+ ** This assert() provides evidence for:
+ ** EVIDENCE-OF: R-50676-09860 The callback can compute the same text that
+ ** would have been returned by the legacy sqlite3_trace() interface by
+ ** using the X argument when X begins with "--" and invoking
+ ** sqlite3_expanded_sql(P) otherwise.
+ */
+ assert( pOp->p4.z==0 || strncmp(pOp->p4.z, "-" "- ", 3)==0 );
+
+ /* OP_Init is always instruction 0 */
+ assert( pOp==p->aOp || pOp->opcode==OP_Trace );
+
+#ifndef SQLITE_OMIT_TRACE
+ if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0
+ && !p->doingRerun
+ && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
+ ){
+#ifndef SQLITE_OMIT_DEPRECATED
+ if( db->mTrace & SQLITE_TRACE_LEGACY ){
+ char *z = sqlite3VdbeExpandSql(p, zTrace);
+ db->trace.xLegacy(db->pTraceArg, z);
+ sqlite3_free(z);
+ }else
+#endif
+ if( db->nVdbeExec>1 ){
+ char *z = sqlite3MPrintf(db, "-- %s", zTrace);
+ (void)db->trace.xV2(SQLITE_TRACE_STMT, db->pTraceArg, p, z);
+ sqlite3DbFree(db, z);
+ }else{
+ (void)db->trace.xV2(SQLITE_TRACE_STMT, db->pTraceArg, p, zTrace);
+ }
+ }
+#ifdef SQLITE_USE_FCNTL_TRACE
+ zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql);
+ if( zTrace ){
+ int j;
+ for(j=0; j<db->nDb; j++){
+ if( DbMaskTest(p->btreeMask, j)==0 ) continue;
+ sqlite3_file_control(db, db->aDb[j].zDbSName, SQLITE_FCNTL_TRACE, zTrace);
+ }
+ }
+#endif /* SQLITE_USE_FCNTL_TRACE */
+#ifdef SQLITE_DEBUG
+ if( (db->flags & SQLITE_SqlTrace)!=0
+ && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
+ ){
+ sqlite3DebugPrintf("SQL-trace: %s\n", zTrace);
+ }
+#endif /* SQLITE_DEBUG */
+#endif /* SQLITE_OMIT_TRACE */
+ assert( pOp->p2>0 );
+ if( pOp->p1>=sqlite3GlobalConfig.iOnceResetThreshold ){
+ if( pOp->opcode==OP_Trace ) break;
+ for(i=1; i<p->nOp; i++){
+ if( p->aOp[i].opcode==OP_Once ) p->aOp[i].p1 = 0;
+ }
+ pOp->p1 = 0;
+ }
+ pOp->p1++;
+ p->aCounter[SQLITE_STMTSTATUS_RUN]++;
+ goto jump_to_p2;
+}
+
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+/* Opcode: CursorHint P1 * * P4 *
+**
+** Provide a hint to cursor P1 that it only needs to return rows that
+** satisfy the Expr in P4. TK_REGISTER terms in the P4 expression refer
+** to values currently held in registers. TK_COLUMN terms in the P4
+** expression refer to columns in the b-tree to which cursor P1 is pointing.
+*/
+case OP_CursorHint: {
+ VdbeCursor *pC;
+
+ assert( pOp->p1>=0 && pOp->p1<p->nCursor );
+ assert( pOp->p4type==P4_EXPR );
+ pC = p->apCsr[pOp->p1];
+ if( pC ){
+ assert( pC->eCurType==CURTYPE_BTREE );
+ sqlite3BtreeCursorHint(pC->uc.pCursor, BTREE_HINT_RANGE,
+ pOp->p4.pExpr, aMem);
+ }
+ break;
+}
+#endif /* SQLITE_ENABLE_CURSOR_HINTS */
+
+#ifdef SQLITE_DEBUG
+/* Opcode: Abortable * * * * *
+**
+** Verify that an Abort can happen. Assert if an Abort at this point
+** might cause database corruption. This opcode only appears in debugging
+** builds.
+**
+** An Abort is safe if either there have been no writes, or if there is
+** an active statement journal.
+*/
+case OP_Abortable: {
+ sqlite3VdbeAssertAbortable(p);
+ break;
+}
+#endif
+
+#ifdef SQLITE_DEBUG
+/* Opcode: ReleaseReg P1 P2 P3 * P5
+** Synopsis: release r[P1@P2] mask P3
+**
+** Release registers from service. Any content that was in the
+** the registers is unreliable after this opcode completes.
+**
+** The registers released will be the P2 registers starting at P1,
+** except if bit ii of P3 set, then do not release register P1+ii.
+** In other words, P3 is a mask of registers to preserve.
+**
+** Releasing a register clears the Mem.pScopyFrom pointer. That means
+** that if the content of the released register was set using OP_SCopy,
+** a change to the value of the source register for the OP_SCopy will no longer
+** generate an assertion fault in sqlite3VdbeMemAboutToChange().
+**
+** If P5 is set, then all released registers have their type set
+** to MEM_Undefined so that any subsequent attempt to read the released
+** register (before it is reinitialized) will generate an assertion fault.
+**
+** P5 ought to be set on every call to this opcode.
+** However, there are places in the code generator will release registers
+** before their are used, under the (valid) assumption that the registers
+** will not be reallocated for some other purpose before they are used and
+** hence are safe to release.
+**
+** This opcode is only available in testing and debugging builds. It is
+** not generated for release builds. The purpose of this opcode is to help
+** validate the generated bytecode. This opcode does not actually contribute
+** to computing an answer.
+*/
+case OP_ReleaseReg: {
+ Mem *pMem;
+ int i;
+ u32 constMask;
+ assert( pOp->p1>0 );
+ assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 );
+ pMem = &aMem[pOp->p1];
+ constMask = pOp->p3;
+ for(i=0; i<pOp->p2; i++, pMem++){
+ if( i>=32 || (constMask & MASKBIT32(i))==0 ){
+ pMem->pScopyFrom = 0;
+ if( i<32 && pOp->p5 ) MemSetTypeFlag(pMem, MEM_Undefined);
+ }
+ }
+ break;
+}
+#endif
+
+/* Opcode: Noop * * * * *
+**
+** Do nothing. This instruction is often useful as a jump
+** destination.
+*/
+/*
+** The magic Explain opcode are only inserted when explain==2 (which
+** is to say when the EXPLAIN QUERY PLAN syntax is used.)
+** This opcode records information from the optimizer. It is the
+** the same as a no-op. This opcodesnever appears in a real VM program.
+*/
+default: { /* This is really OP_Noop, OP_Explain */
+ assert( pOp->opcode==OP_Noop || pOp->opcode==OP_Explain );
+
+ break;
+}
+
+/*****************************************************************************
+** The cases of the switch statement above this line should all be indented
+** by 6 spaces. But the left-most 6 spaces have been removed to improve the
+** readability. From this point on down, the normal indentation rules are
+** restored.
+*****************************************************************************/
+ }
+
+#ifdef VDBE_PROFILE
+ {
+ u64 endTime = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
+ if( endTime>start ) pOrigOp->cycles += endTime - start;
+ pOrigOp->cnt++;
+ }
+#endif
+
+ /* The following code adds nothing to the actual functionality
+ ** of the program. It is only here for testing and debugging.
+ ** On the other hand, it does burn CPU cycles every time through
+ ** the evaluator loop. So we can leave it out when NDEBUG is defined.
+ */
+#ifndef NDEBUG
+ assert( pOp>=&aOp[-1] && pOp<&aOp[p->nOp-1] );
+
+#ifdef SQLITE_DEBUG
+ if( db->flags & SQLITE_VdbeTrace ){
+ u8 opProperty = sqlite3OpcodeProperty[pOrigOp->opcode];
+ if( rc!=0 ) printf("rc=%d\n",rc);
+ if( opProperty & (OPFLG_OUT2) ){
+ registerTrace(pOrigOp->p2, &aMem[pOrigOp->p2]);
+ }
+ if( opProperty & OPFLG_OUT3 ){
+ registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]);
+ }
+ if( opProperty==0xff ){
+ /* Never happens. This code exists to avoid a harmless linkage
+ ** warning aboud sqlite3VdbeRegisterDump() being defined but not
+ ** used. */
+ sqlite3VdbeRegisterDump(p);
+ }
+ }
+#endif /* SQLITE_DEBUG */
+#endif /* NDEBUG */
+ } /* The end of the for(;;) loop the loops through opcodes */
+
+ /* If we reach this point, it means that execution is finished with
+ ** an error of some kind.
+ */
+abort_due_to_error:
+ if( db->mallocFailed ) rc = SQLITE_NOMEM_BKPT;
+ assert( rc );
+ if( p->zErrMsg==0 && rc!=SQLITE_IOERR_NOMEM ){
+ sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc));
+ }
+ p->rc = rc;
+ sqlite3SystemError(db, rc);
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ sqlite3_log(rc, "statement aborts at %d: [%s] %s",
+ (int)(pOp - aOp), p->zSql, p->zErrMsg);
+ sqlite3VdbeHalt(p);
+ if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db);
+ rc = SQLITE_ERROR;
+ if( resetSchemaOnFault>0 ){
+ sqlite3ResetOneSchema(db, resetSchemaOnFault-1);
+ }
+
+ /* This is the only way out of this procedure. We have to
+ ** release the mutexes on btrees that were acquired at the
+ ** top. */
+vdbe_return:
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ while( nVmStep>=nProgressLimit && db->xProgress!=0 ){
+ nProgressLimit += db->nProgressOps;
+ if( db->xProgress(db->pProgressArg) ){
+ nProgressLimit = LARGEST_UINT64;
+ rc = SQLITE_INTERRUPT;
+ goto abort_due_to_error;
+ }
+ }
+#endif
+ p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep;
+ sqlite3VdbeLeave(p);
+ assert( rc!=SQLITE_OK || nExtraDelete==0
+ || sqlite3_strlike("DELETE%",p->zSql,0)!=0
+ );
+ return rc;
+
+ /* Jump to here if a string or blob larger than SQLITE_MAX_LENGTH
+ ** is encountered.
+ */
+too_big:
+ sqlite3VdbeError(p, "string or blob too big");
+ rc = SQLITE_TOOBIG;
+ goto abort_due_to_error;
+
+ /* Jump to here if a malloc() fails.
+ */
+no_mem:
+ sqlite3OomFault(db);
+ sqlite3VdbeError(p, "out of memory");
+ rc = SQLITE_NOMEM_BKPT;
+ goto abort_due_to_error;
+
+ /* Jump to here if the sqlite3_interrupt() API sets the interrupt
+ ** flag.
+ */
+abort_due_to_interrupt:
+ assert( AtomicLoad(&db->u1.isInterrupted) );
+ rc = SQLITE_INTERRUPT;
+ goto abort_due_to_error;
+}
+
+
+/************** End of vdbe.c ************************************************/
+/************** Begin file vdbeblob.c ****************************************/
+/*
+** 2007 May 1
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code used to implement incremental BLOB I/O.
+*/
+
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#ifndef SQLITE_OMIT_INCRBLOB
+
+/*
+** Valid sqlite3_blob* handles point to Incrblob structures.
+*/
+typedef struct Incrblob Incrblob;
+struct Incrblob {
+ int nByte; /* Size of open blob, in bytes */
+ int iOffset; /* Byte offset of blob in cursor data */
+ u16 iCol; /* Table column this handle is open on */
+ BtCursor *pCsr; /* Cursor pointing at blob row */
+ sqlite3_stmt *pStmt; /* Statement holding cursor open */
+ sqlite3 *db; /* The associated database */
+ char *zDb; /* Database name */
+ Table *pTab; /* Table object */
+};
+
+
+/*
+** This function is used by both blob_open() and blob_reopen(). It seeks
+** the b-tree cursor associated with blob handle p to point to row iRow.
+** If successful, SQLITE_OK is returned and subsequent calls to
+** sqlite3_blob_read() or sqlite3_blob_write() access the specified row.
+**
+** If an error occurs, or if the specified row does not exist or does not
+** contain a value of type TEXT or BLOB in the column nominated when the
+** blob handle was opened, then an error code is returned and *pzErr may
+** be set to point to a buffer containing an error message. It is the
+** responsibility of the caller to free the error message buffer using
+** sqlite3DbFree().
+**
+** If an error does occur, then the b-tree cursor is closed. All subsequent
+** calls to sqlite3_blob_read(), blob_write() or blob_reopen() will
+** immediately return SQLITE_ABORT.
+*/
+static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
+ int rc; /* Error code */
+ char *zErr = 0; /* Error message */
+ Vdbe *v = (Vdbe *)p->pStmt;
+
+ /* Set the value of register r[1] in the SQL statement to integer iRow.
+ ** This is done directly as a performance optimization
+ */
+ v->aMem[1].flags = MEM_Int;
+ v->aMem[1].u.i = iRow;
+
+ /* If the statement has been run before (and is paused at the OP_ResultRow)
+ ** then back it up to the point where it does the OP_NotExists. This could
+ ** have been down with an extra OP_Goto, but simply setting the program
+ ** counter is faster. */
+ if( v->pc>4 ){
+ v->pc = 4;
+ assert( v->aOp[v->pc].opcode==OP_NotExists );
+ rc = sqlite3VdbeExec(v);
+ }else{
+ rc = sqlite3_step(p->pStmt);
+ }
+ if( rc==SQLITE_ROW ){
+ VdbeCursor *pC = v->apCsr[0];
+ u32 type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0;
+ testcase( pC->nHdrParsed==p->iCol );
+ testcase( pC->nHdrParsed==p->iCol+1 );
+ if( type<12 ){
+ zErr = sqlite3MPrintf(p->db, "cannot open value of type %s",
+ type==0?"null": type==7?"real": "integer"
+ );
+ rc = SQLITE_ERROR;
+ sqlite3_finalize(p->pStmt);
+ p->pStmt = 0;
+ }else{
+ p->iOffset = pC->aType[p->iCol + pC->nField];
+ p->nByte = sqlite3VdbeSerialTypeLen(type);
+ p->pCsr = pC->uc.pCursor;
+ sqlite3BtreeIncrblobCursor(p->pCsr);
+ }
+ }
+
+ if( rc==SQLITE_ROW ){
+ rc = SQLITE_OK;
+ }else if( p->pStmt ){
+ rc = sqlite3_finalize(p->pStmt);
+ p->pStmt = 0;
+ if( rc==SQLITE_OK ){
+ zErr = sqlite3MPrintf(p->db, "no such rowid: %lld", iRow);
+ rc = SQLITE_ERROR;
+ }else{
+ zErr = sqlite3MPrintf(p->db, "%s", sqlite3_errmsg(p->db));
+ }
+ }
+
+ assert( rc!=SQLITE_OK || zErr==0 );
+ assert( rc!=SQLITE_ROW && rc!=SQLITE_DONE );
+
+ *pzErr = zErr;
+ return rc;
+}
+
+/*
+** Open a blob handle.
+*/
+SQLITE_API int sqlite3_blob_open(
+ sqlite3* db, /* The database connection */
+ const char *zDb, /* The attached database containing the blob */
+ const char *zTable, /* The table containing the blob */
+ const char *zColumn, /* The column containing the blob */
+ sqlite_int64 iRow, /* The row containing the glob */
+ int wrFlag, /* True -> read/write access, false -> read-only */
+ sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */
+){
+ int nAttempt = 0;
+ int iCol; /* Index of zColumn in row-record */
+ int rc = SQLITE_OK;
+ char *zErr = 0;
+ Table *pTab;
+ Incrblob *pBlob = 0;
+ Parse sParse;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( ppBlob==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ *ppBlob = 0;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zTable==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ wrFlag = !!wrFlag; /* wrFlag = (wrFlag ? 1 : 0); */
+
+ sqlite3_mutex_enter(db->mutex);
+
+ pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob));
+ do {
+ memset(&sParse, 0, sizeof(Parse));
+ if( !pBlob ) goto blob_open_out;
+ sParse.db = db;
+ sqlite3DbFree(db, zErr);
+ zErr = 0;
+
+ sqlite3BtreeEnterAll(db);
+ pTab = sqlite3LocateTable(&sParse, 0, zTable, zDb);
+ if( pTab && IsVirtual(pTab) ){
+ pTab = 0;
+ sqlite3ErrorMsg(&sParse, "cannot open virtual table: %s", zTable);
+ }
+ if( pTab && !HasRowid(pTab) ){
+ pTab = 0;
+ sqlite3ErrorMsg(&sParse, "cannot open table without rowid: %s", zTable);
+ }
+#ifndef SQLITE_OMIT_VIEW
+ if( pTab && pTab->pSelect ){
+ pTab = 0;
+ sqlite3ErrorMsg(&sParse, "cannot open view: %s", zTable);
+ }
+#endif
+ if( !pTab ){
+ if( sParse.zErrMsg ){
+ sqlite3DbFree(db, zErr);
+ zErr = sParse.zErrMsg;
+ sParse.zErrMsg = 0;
+ }
+ rc = SQLITE_ERROR;
+ sqlite3BtreeLeaveAll(db);
+ goto blob_open_out;
+ }
+ pBlob->pTab = pTab;
+ pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName;
+
+ /* Now search pTab for the exact column. */
+ for(iCol=0; iCol<pTab->nCol; iCol++) {
+ if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){
+ break;
+ }
+ }
+ if( iCol==pTab->nCol ){
+ sqlite3DbFree(db, zErr);
+ zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn);
+ rc = SQLITE_ERROR;
+ sqlite3BtreeLeaveAll(db);
+ goto blob_open_out;
+ }
+
+ /* If the value is being opened for writing, check that the
+ ** column is not indexed, and that it is not part of a foreign key.
+ */
+ if( wrFlag ){
+ const char *zFault = 0;
+ Index *pIdx;
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ if( db->flags&SQLITE_ForeignKeys ){
+ /* Check that the column is not part of an FK child key definition. It
+ ** is not necessary to check if it is part of a parent key, as parent
+ ** key columns must be indexed. The check below will pick up this
+ ** case. */
+ FKey *pFKey;
+ for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){
+ int j;
+ for(j=0; j<pFKey->nCol; j++){
+ if( pFKey->aCol[j].iFrom==iCol ){
+ zFault = "foreign key";
+ }
+ }
+ }
+ }
+#endif
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int j;
+ for(j=0; j<pIdx->nKeyCol; j++){
+ /* FIXME: Be smarter about indexes that use expressions */
+ if( pIdx->aiColumn[j]==iCol || pIdx->aiColumn[j]==XN_EXPR ){
+ zFault = "indexed";
+ }
+ }
+ }
+ if( zFault ){
+ sqlite3DbFree(db, zErr);
+ zErr = sqlite3MPrintf(db, "cannot open %s column for writing", zFault);
+ rc = SQLITE_ERROR;
+ sqlite3BtreeLeaveAll(db);
+ goto blob_open_out;
+ }
+ }
+
+ pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(&sParse);
+ assert( pBlob->pStmt || db->mallocFailed );
+ if( pBlob->pStmt ){
+
+ /* This VDBE program seeks a btree cursor to the identified
+ ** db/table/row entry. The reason for using a vdbe program instead
+ ** of writing code to use the b-tree layer directly is that the
+ ** vdbe program will take advantage of the various transaction,
+ ** locking and error handling infrastructure built into the vdbe.
+ **
+ ** After seeking the cursor, the vdbe executes an OP_ResultRow.
+ ** Code external to the Vdbe then "borrows" the b-tree cursor and
+ ** uses it to implement the blob_read(), blob_write() and
+ ** blob_bytes() functions.
+ **
+ ** The sqlite3_blob_close() function finalizes the vdbe program,
+ ** which closes the b-tree cursor and (possibly) commits the
+ ** transaction.
+ */
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList openBlob[] = {
+ {OP_TableLock, 0, 0, 0}, /* 0: Acquire a read or write lock */
+ {OP_OpenRead, 0, 0, 0}, /* 1: Open a cursor */
+ /* blobSeekToRow() will initialize r[1] to the desired rowid */
+ {OP_NotExists, 0, 5, 1}, /* 2: Seek the cursor to rowid=r[1] */
+ {OP_Column, 0, 0, 1}, /* 3 */
+ {OP_ResultRow, 1, 0, 0}, /* 4 */
+ {OP_Halt, 0, 0, 0}, /* 5 */
+ };
+ Vdbe *v = (Vdbe *)pBlob->pStmt;
+ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ VdbeOp *aOp;
+
+ sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag,
+ pTab->pSchema->schema_cookie,
+ pTab->pSchema->iGeneration);
+ sqlite3VdbeChangeP5(v, 1);
+ assert( sqlite3VdbeCurrentAddr(v)==2 || db->mallocFailed );
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(openBlob), openBlob, iLn);
+
+ /* Make sure a mutex is held on the table to be accessed */
+ sqlite3VdbeUsesBtree(v, iDb);
+
+ if( db->mallocFailed==0 ){
+ assert( aOp!=0 );
+ /* Configure the OP_TableLock instruction */
+#ifdef SQLITE_OMIT_SHARED_CACHE
+ aOp[0].opcode = OP_Noop;
+#else
+ aOp[0].p1 = iDb;
+ aOp[0].p2 = pTab->tnum;
+ aOp[0].p3 = wrFlag;
+ sqlite3VdbeChangeP4(v, 2, pTab->zName, P4_TRANSIENT);
+ }
+ if( db->mallocFailed==0 ){
+#endif
+
+ /* Remove either the OP_OpenWrite or OpenRead. Set the P2
+ ** parameter of the other to pTab->tnum. */
+ if( wrFlag ) aOp[1].opcode = OP_OpenWrite;
+ aOp[1].p2 = pTab->tnum;
+ aOp[1].p3 = iDb;
+
+ /* Configure the number of columns. Configure the cursor to
+ ** think that the table has one more column than it really
+ ** does. An OP_Column to retrieve this imaginary column will
+ ** always return an SQL NULL. This is useful because it means
+ ** we can invoke OP_Column to fill in the vdbe cursors type
+ ** and offset cache without causing any IO.
+ */
+ aOp[1].p4type = P4_INT32;
+ aOp[1].p4.i = pTab->nCol+1;
+ aOp[3].p2 = pTab->nCol;
+
+ sParse.nVar = 0;
+ sParse.nMem = 1;
+ sParse.nTab = 1;
+ sqlite3VdbeMakeReady(v, &sParse);
+ }
+ }
+
+ pBlob->iCol = iCol;
+ pBlob->db = db;
+ sqlite3BtreeLeaveAll(db);
+ if( db->mallocFailed ){
+ goto blob_open_out;
+ }
+ rc = blobSeekToRow(pBlob, iRow, &zErr);
+ } while( (++nAttempt)<SQLITE_MAX_SCHEMA_RETRY && rc==SQLITE_SCHEMA );
+
+blob_open_out:
+ if( rc==SQLITE_OK && db->mallocFailed==0 ){
+ *ppBlob = (sqlite3_blob *)pBlob;
+ }else{
+ if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt);
+ sqlite3DbFree(db, pBlob);
+ }
+ sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr);
+ sqlite3DbFree(db, zErr);
+ sqlite3ParserReset(&sParse);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Close a blob handle that was previously created using
+** sqlite3_blob_open().
+*/
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *pBlob){
+ Incrblob *p = (Incrblob *)pBlob;
+ int rc;
+ sqlite3 *db;
+
+ if( p ){
+ sqlite3_stmt *pStmt = p->pStmt;
+ db = p->db;
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3DbFree(db, p);
+ sqlite3_mutex_leave(db->mutex);
+ rc = sqlite3_finalize(pStmt);
+ }else{
+ rc = SQLITE_OK;
+ }
+ return rc;
+}
+
+/*
+** Perform a read or write operation on a blob
+*/
+static int blobReadWrite(
+ sqlite3_blob *pBlob,
+ void *z,
+ int n,
+ int iOffset,
+ int (*xCall)(BtCursor*, u32, u32, void*)
+){
+ int rc;
+ Incrblob *p = (Incrblob *)pBlob;
+ Vdbe *v;
+ sqlite3 *db;
+
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+ db = p->db;
+ sqlite3_mutex_enter(db->mutex);
+ v = (Vdbe*)p->pStmt;
+
+ if( n<0 || iOffset<0 || ((sqlite3_int64)iOffset+n)>p->nByte ){
+ /* Request is out of range. Return a transient error. */
+ rc = SQLITE_ERROR;
+ }else if( v==0 ){
+ /* If there is no statement handle, then the blob-handle has
+ ** already been invalidated. Return SQLITE_ABORT in this case.
+ */
+ rc = SQLITE_ABORT;
+ }else{
+ /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is
+ ** returned, clean-up the statement handle.
+ */
+ assert( db == v->db );
+ sqlite3BtreeEnterCursor(p->pCsr);
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ if( xCall==sqlite3BtreePutData && db->xPreUpdateCallback ){
+ /* If a pre-update hook is registered and this is a write cursor,
+ ** invoke it here.
+ **
+ ** TODO: The preupdate-hook is passed SQLITE_DELETE, even though this
+ ** operation should really be an SQLITE_UPDATE. This is probably
+ ** incorrect, but is convenient because at this point the new.* values
+ ** are not easily obtainable. And for the sessions module, an
+ ** SQLITE_UPDATE where the PK columns do not change is handled in the
+ ** same way as an SQLITE_DELETE (the SQLITE_DELETE code is actually
+ ** slightly more efficient). Since you cannot write to a PK column
+ ** using the incremental-blob API, this works. For the sessions module
+ ** anyhow.
+ */
+ sqlite3_int64 iKey;
+ iKey = sqlite3BtreeIntegerKey(p->pCsr);
+ sqlite3VdbePreUpdateHook(
+ v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1
+ );
+ }
+#endif
+
+ rc = xCall(p->pCsr, iOffset+p->iOffset, n, z);
+ sqlite3BtreeLeaveCursor(p->pCsr);
+ if( rc==SQLITE_ABORT ){
+ sqlite3VdbeFinalize(v);
+ p->pStmt = 0;
+ }else{
+ v->rc = rc;
+ }
+ }
+ sqlite3Error(db, rc);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Read data from a blob handle.
+*/
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
+ return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreePayloadChecked);
+}
+
+/*
+** Write data to a blob handle.
+*/
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){
+ return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData);
+}
+
+/*
+** Query a blob handle for the size of the data.
+**
+** The Incrblob.nByte field is fixed for the lifetime of the Incrblob
+** so no mutex is required for access.
+*/
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){
+ Incrblob *p = (Incrblob *)pBlob;
+ return (p && p->pStmt) ? p->nByte : 0;
+}
+
+/*
+** Move an existing blob handle to point to a different row of the same
+** database table.
+**
+** If an error occurs, or if the specified row does not exist or does not
+** contain a blob or text value, then an error code is returned and the
+** database handle error code and message set. If this happens, then all
+** subsequent calls to sqlite3_blob_xxx() functions (except blob_close())
+** immediately return SQLITE_ABORT.
+*/
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
+ int rc;
+ Incrblob *p = (Incrblob *)pBlob;
+ sqlite3 *db;
+
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+ db = p->db;
+ sqlite3_mutex_enter(db->mutex);
+
+ if( p->pStmt==0 ){
+ /* If there is no statement handle, then the blob-handle has
+ ** already been invalidated. Return SQLITE_ABORT in this case.
+ */
+ rc = SQLITE_ABORT;
+ }else{
+ char *zErr;
+ rc = blobSeekToRow(p, iRow, &zErr);
+ if( rc!=SQLITE_OK ){
+ sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr);
+ sqlite3DbFree(db, zErr);
+ }
+ assert( rc!=SQLITE_SCHEMA );
+ }
+
+ rc = sqlite3ApiExit(db, rc);
+ assert( rc==SQLITE_OK || p->pStmt==0 );
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+#endif /* #ifndef SQLITE_OMIT_INCRBLOB */
+
+/************** End of vdbeblob.c ********************************************/
+/************** Begin file vdbesort.c ****************************************/
+/*
+** 2011-07-09
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code for the VdbeSorter object, used in concert with
+** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements
+** or by SELECT statements with ORDER BY clauses that cannot be satisfied
+** using indexes and without LIMIT clauses.
+**
+** The VdbeSorter object implements a multi-threaded external merge sort
+** algorithm that is efficient even if the number of elements being sorted
+** exceeds the available memory.
+**
+** Here is the (internal, non-API) interface between this module and the
+** rest of the SQLite system:
+**
+** sqlite3VdbeSorterInit() Create a new VdbeSorter object.
+**
+** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter
+** object. The row is a binary blob in the
+** OP_MakeRecord format that contains both
+** the ORDER BY key columns and result columns
+** in the case of a SELECT w/ ORDER BY, or
+** the complete record for an index entry
+** in the case of a CREATE INDEX.
+**
+** sqlite3VdbeSorterRewind() Sort all content previously added.
+** Position the read cursor on the
+** first sorted element.
+**
+** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted
+** element.
+**
+** sqlite3VdbeSorterRowkey() Return the complete binary blob for the
+** row currently under the read cursor.
+**
+** sqlite3VdbeSorterCompare() Compare the binary blob for the row
+** currently under the read cursor against
+** another binary blob X and report if
+** X is strictly less than the read cursor.
+** Used to enforce uniqueness in a
+** CREATE UNIQUE INDEX statement.
+**
+** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim
+** all resources.
+**
+** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This
+** is like Close() followed by Init() only
+** much faster.
+**
+** The interfaces above must be called in a particular order. Write() can
+** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and
+** Compare() can only occur in between Rewind() and Close()/Reset(). i.e.
+**
+** Init()
+** for each record: Write()
+** Rewind()
+** Rowkey()/Compare()
+** Next()
+** Close()
+**
+** Algorithm:
+**
+** Records passed to the sorter via calls to Write() are initially held
+** unsorted in main memory. Assuming the amount of memory used never exceeds
+** a threshold, when Rewind() is called the set of records is sorted using
+** an in-memory merge sort. In this case, no temporary files are required
+** and subsequent calls to Rowkey(), Next() and Compare() read records
+** directly from main memory.
+**
+** If the amount of space used to store records in main memory exceeds the
+** threshold, then the set of records currently in memory are sorted and
+** written to a temporary file in "Packed Memory Array" (PMA) format.
+** A PMA created at this point is known as a "level-0 PMA". Higher levels
+** of PMAs may be created by merging existing PMAs together - for example
+** merging two or more level-0 PMAs together creates a level-1 PMA.
+**
+** The threshold for the amount of main memory to use before flushing
+** records to a PMA is roughly the same as the limit configured for the
+** page-cache of the main database. Specifically, the threshold is set to
+** the value returned by "PRAGMA main.page_size" multipled by
+** that returned by "PRAGMA main.cache_size", in bytes.
+**
+** If the sorter is running in single-threaded mode, then all PMAs generated
+** are appended to a single temporary file. Or, if the sorter is running in
+** multi-threaded mode then up to (N+1) temporary files may be opened, where
+** N is the configured number of worker threads. In this case, instead of
+** sorting the records and writing the PMA to a temporary file itself, the
+** calling thread usually launches a worker thread to do so. Except, if
+** there are already N worker threads running, the main thread does the work
+** itself.
+**
+** The sorter is running in multi-threaded mode if (a) the library was built
+** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater
+** than zero, and (b) worker threads have been enabled at runtime by calling
+** "PRAGMA threads=N" with some value of N greater than 0.
+**
+** When Rewind() is called, any data remaining in memory is flushed to a
+** final PMA. So at this point the data is stored in some number of sorted
+** PMAs within temporary files on disk.
+**
+** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the
+** sorter is running in single-threaded mode, then these PMAs are merged
+** incrementally as keys are retreived from the sorter by the VDBE. The
+** MergeEngine object, described in further detail below, performs this
+** merge.
+**
+** Or, if running in multi-threaded mode, then a background thread is
+** launched to merge the existing PMAs. Once the background thread has
+** merged T bytes of data into a single sorted PMA, the main thread
+** begins reading keys from that PMA while the background thread proceeds
+** with merging the next T bytes of data. And so on.
+**
+** Parameter T is set to half the value of the memory threshold used
+** by Write() above to determine when to create a new PMA.
+**
+** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when
+** Rewind() is called, then a hierarchy of incremental-merges is used.
+** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on
+** disk are merged together. Then T bytes of data from the second set, and
+** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT
+** PMAs at a time. This done is to improve locality.
+**
+** If running in multi-threaded mode and there are more than
+** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more
+** than one background thread may be created. Specifically, there may be
+** one background thread for each temporary file on disk, and one background
+** thread to merge the output of each of the others to a single PMA for
+** the main thread to read from.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+/*
+** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various
+** messages to stderr that may be helpful in understanding the performance
+** characteristics of the sorter in multi-threaded mode.
+*/
+#if 0
+# define SQLITE_DEBUG_SORTER_THREADS 1
+#endif
+
+/*
+** Hard-coded maximum amount of data to accumulate in memory before flushing
+** to a level 0 PMA. The purpose of this limit is to prevent various integer
+** overflows. 512MiB.
+*/
+#define SQLITE_MAX_PMASZ (1<<29)
+
+/*
+** Private objects used by the sorter
+*/
+typedef struct MergeEngine MergeEngine; /* Merge PMAs together */
+typedef struct PmaReader PmaReader; /* Incrementally read one PMA */
+typedef struct PmaWriter PmaWriter; /* Incrementally write one PMA */
+typedef struct SorterRecord SorterRecord; /* A record being sorted */
+typedef struct SortSubtask SortSubtask; /* A sub-task in the sort process */
+typedef struct SorterFile SorterFile; /* Temporary file object wrapper */
+typedef struct SorterList SorterList; /* In-memory list of records */
+typedef struct IncrMerger IncrMerger; /* Read & merge multiple PMAs */
+
+/*
+** A container for a temp file handle and the current amount of data
+** stored in the file.
+*/
+struct SorterFile {
+ sqlite3_file *pFd; /* File handle */
+ i64 iEof; /* Bytes of data stored in pFd */
+};
+
+/*
+** An in-memory list of objects to be sorted.
+**
+** If aMemory==0 then each object is allocated separately and the objects
+** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects
+** are stored in the aMemory[] bulk memory, one right after the other, and
+** are connected using SorterRecord.u.iNext.
+*/
+struct SorterList {
+ SorterRecord *pList; /* Linked list of records */
+ u8 *aMemory; /* If non-NULL, bulk memory to hold pList */
+ int szPMA; /* Size of pList as PMA in bytes */
+};
+
+/*
+** The MergeEngine object is used to combine two or more smaller PMAs into
+** one big PMA using a merge operation. Separate PMAs all need to be
+** combined into one big PMA in order to be able to step through the sorted
+** records in order.
+**
+** The aReadr[] array contains a PmaReader object for each of the PMAs being
+** merged. An aReadr[] object either points to a valid key or else is at EOF.
+** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.)
+** For the purposes of the paragraphs below, we assume that the array is
+** actually N elements in size, where N is the smallest power of 2 greater
+** to or equal to the number of PMAs being merged. The extra aReadr[] elements
+** are treated as if they are empty (always at EOF).
+**
+** The aTree[] array is also N elements in size. The value of N is stored in
+** the MergeEngine.nTree variable.
+**
+** The final (N/2) elements of aTree[] contain the results of comparing
+** pairs of PMA keys together. Element i contains the result of
+** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the
+** aTree element is set to the index of it.
+**
+** For the purposes of this comparison, EOF is considered greater than any
+** other key value. If the keys are equal (only possible with two EOF
+** values), it doesn't matter which index is stored.
+**
+** The (N/4) elements of aTree[] that precede the final (N/2) described
+** above contains the index of the smallest of each block of 4 PmaReaders
+** And so on. So that aTree[1] contains the index of the PmaReader that
+** currently points to the smallest key value. aTree[0] is unused.
+**
+** Example:
+**
+** aReadr[0] -> Banana
+** aReadr[1] -> Feijoa
+** aReadr[2] -> Elderberry
+** aReadr[3] -> Currant
+** aReadr[4] -> Grapefruit
+** aReadr[5] -> Apple
+** aReadr[6] -> Durian
+** aReadr[7] -> EOF
+**
+** aTree[] = { X, 5 0, 5 0, 3, 5, 6 }
+**
+** The current element is "Apple" (the value of the key indicated by
+** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will
+** be advanced to the next key in its segment. Say the next key is
+** "Eggplant":
+**
+** aReadr[5] -> Eggplant
+**
+** The contents of aTree[] are updated first by comparing the new PmaReader
+** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader
+** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree.
+** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader
+** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Banana<Durian),
+** so the value written into element 1 of the array is 0. As follows:
+**
+** aTree[] = { X, 0 0, 6 0, 3, 5, 6 }
+**
+** In other words, each time we advance to the next sorter element, log2(N)
+** key comparison operations are required, where N is the number of segments
+** being merged (rounded up to the next power of 2).
+*/
+struct MergeEngine {
+ int nTree; /* Used size of aTree/aReadr (power of 2) */
+ SortSubtask *pTask; /* Used by this thread only */
+ int *aTree; /* Current state of incremental merge */
+ PmaReader *aReadr; /* Array of PmaReaders to merge data from */
+};
+
+/*
+** This object represents a single thread of control in a sort operation.
+** Exactly VdbeSorter.nTask instances of this object are allocated
+** as part of each VdbeSorter object. Instances are never allocated any
+** other way. VdbeSorter.nTask is set to the number of worker threads allowed
+** (see SQLITE_CONFIG_WORKER_THREADS) plus one (the main thread). Thus for
+** single-threaded operation, there is exactly one instance of this object
+** and for multi-threaded operation there are two or more instances.
+**
+** Essentially, this structure contains all those fields of the VdbeSorter
+** structure for which each thread requires a separate instance. For example,
+** each thread requries its own UnpackedRecord object to unpack records in
+** as part of comparison operations.
+**
+** Before a background thread is launched, variable bDone is set to 0. Then,
+** right before it exits, the thread itself sets bDone to 1. This is used for
+** two purposes:
+**
+** 1. When flushing the contents of memory to a level-0 PMA on disk, to
+** attempt to select a SortSubtask for which there is not already an
+** active background thread (since doing so causes the main thread
+** to block until it finishes).
+**
+** 2. If SQLITE_DEBUG_SORTER_THREADS is defined, to determine if a call
+** to sqlite3ThreadJoin() is likely to block. Cases that are likely to
+** block provoke debugging output.
+**
+** In both cases, the effects of the main thread seeing (bDone==0) even
+** after the thread has finished are not dire. So we don't worry about
+** memory barriers and such here.
+*/
+typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int);
+struct SortSubtask {
+ SQLiteThread *pThread; /* Background thread, if any */
+ int bDone; /* Set if thread is finished but not joined */
+ VdbeSorter *pSorter; /* Sorter that owns this sub-task */
+ UnpackedRecord *pUnpacked; /* Space to unpack a record */
+ SorterList list; /* List for thread to write to a PMA */
+ int nPMA; /* Number of PMAs currently in file */
+ SorterCompare xCompare; /* Compare function to use */
+ SorterFile file; /* Temp file for level-0 PMAs */
+ SorterFile file2; /* Space for other PMAs */
+};
+
+
+/*
+** Main sorter structure. A single instance of this is allocated for each
+** sorter cursor created by the VDBE.
+**
+** mxKeysize:
+** As records are added to the sorter by calls to sqlite3VdbeSorterWrite(),
+** this variable is updated so as to be set to the size on disk of the
+** largest record in the sorter.
+*/
+struct VdbeSorter {
+ int mnPmaSize; /* Minimum PMA size, in bytes */
+ int mxPmaSize; /* Maximum PMA size, in bytes. 0==no limit */
+ int mxKeysize; /* Largest serialized key seen so far */
+ int pgsz; /* Main database page size */
+ PmaReader *pReader; /* Readr data from here after Rewind() */
+ MergeEngine *pMerger; /* Or here, if bUseThreads==0 */
+ sqlite3 *db; /* Database connection */
+ KeyInfo *pKeyInfo; /* How to compare records */
+ UnpackedRecord *pUnpacked; /* Used by VdbeSorterCompare() */
+ SorterList list; /* List of in-memory records */
+ int iMemory; /* Offset of free space in list.aMemory */
+ int nMemory; /* Size of list.aMemory allocation in bytes */
+ u8 bUsePMA; /* True if one or more PMAs created */
+ u8 bUseThreads; /* True to use background threads */
+ u8 iPrev; /* Previous thread used to flush PMA */
+ u8 nTask; /* Size of aTask[] array */
+ u8 typeMask;
+ SortSubtask aTask[1]; /* One or more subtasks */
+};
+
+#define SORTER_TYPE_INTEGER 0x01
+#define SORTER_TYPE_TEXT 0x02
+
+/*
+** An instance of the following object is used to read records out of a
+** PMA, in sorted order. The next key to be read is cached in nKey/aKey.
+** aKey might point into aMap or into aBuffer. If neither of those locations
+** contain a contiguous representation of the key, then aAlloc is allocated
+** and the key is copied into aAlloc and aKey is made to poitn to aAlloc.
+**
+** pFd==0 at EOF.
+*/
+struct PmaReader {
+ i64 iReadOff; /* Current read offset */
+ i64 iEof; /* 1 byte past EOF for this PmaReader */
+ int nAlloc; /* Bytes of space at aAlloc */
+ int nKey; /* Number of bytes in key */
+ sqlite3_file *pFd; /* File handle we are reading from */
+ u8 *aAlloc; /* Space for aKey if aBuffer and pMap wont work */
+ u8 *aKey; /* Pointer to current key */
+ u8 *aBuffer; /* Current read buffer */
+ int nBuffer; /* Size of read buffer in bytes */
+ u8 *aMap; /* Pointer to mapping of entire file */
+ IncrMerger *pIncr; /* Incremental merger */
+};
+
+/*
+** Normally, a PmaReader object iterates through an existing PMA stored
+** within a temp file. However, if the PmaReader.pIncr variable points to
+** an object of the following type, it may be used to iterate/merge through
+** multiple PMAs simultaneously.
+**
+** There are two types of IncrMerger object - single (bUseThread==0) and
+** multi-threaded (bUseThread==1).
+**
+** A multi-threaded IncrMerger object uses two temporary files - aFile[0]
+** and aFile[1]. Neither file is allowed to grow to more than mxSz bytes in
+** size. When the IncrMerger is initialized, it reads enough data from
+** pMerger to populate aFile[0]. It then sets variables within the
+** corresponding PmaReader object to read from that file and kicks off
+** a background thread to populate aFile[1] with the next mxSz bytes of
+** sorted record data from pMerger.
+**
+** When the PmaReader reaches the end of aFile[0], it blocks until the
+** background thread has finished populating aFile[1]. It then exchanges
+** the contents of the aFile[0] and aFile[1] variables within this structure,
+** sets the PmaReader fields to read from the new aFile[0] and kicks off
+** another background thread to populate the new aFile[1]. And so on, until
+** the contents of pMerger are exhausted.
+**
+** A single-threaded IncrMerger does not open any temporary files of its
+** own. Instead, it has exclusive access to mxSz bytes of space beginning
+** at offset iStartOff of file pTask->file2. And instead of using a
+** background thread to prepare data for the PmaReader, with a single
+** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with
+** keys from pMerger by the calling thread whenever the PmaReader runs out
+** of data.
+*/
+struct IncrMerger {
+ SortSubtask *pTask; /* Task that owns this merger */
+ MergeEngine *pMerger; /* Merge engine thread reads data from */
+ i64 iStartOff; /* Offset to start writing file at */
+ int mxSz; /* Maximum bytes of data to store */
+ int bEof; /* Set to true when merge is finished */
+ int bUseThread; /* True to use a bg thread for this object */
+ SorterFile aFile[2]; /* aFile[0] for reading, [1] for writing */
+};
+
+/*
+** An instance of this object is used for writing a PMA.
+**
+** The PMA is written one record at a time. Each record is of an arbitrary
+** size. But I/O is more efficient if it occurs in page-sized blocks where
+** each block is aligned on a page boundary. This object caches writes to
+** the PMA so that aligned, page-size blocks are written.
+*/
+struct PmaWriter {
+ int eFWErr; /* Non-zero if in an error state */
+ u8 *aBuffer; /* Pointer to write buffer */
+ int nBuffer; /* Size of write buffer in bytes */
+ int iBufStart; /* First byte of buffer to write */
+ int iBufEnd; /* Last byte of buffer to write */
+ i64 iWriteOff; /* Offset of start of buffer in file */
+ sqlite3_file *pFd; /* File handle to write to */
+};
+
+/*
+** This object is the header on a single record while that record is being
+** held in memory and prior to being written out as part of a PMA.
+**
+** How the linked list is connected depends on how memory is being managed
+** by this module. If using a separate allocation for each in-memory record
+** (VdbeSorter.list.aMemory==0), then the list is always connected using the
+** SorterRecord.u.pNext pointers.
+**
+** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0),
+** then while records are being accumulated the list is linked using the
+** SorterRecord.u.iNext offset. This is because the aMemory[] array may
+** be sqlite3Realloc()ed while records are being accumulated. Once the VM
+** has finished passing records to the sorter, or when the in-memory buffer
+** is full, the list is sorted. As part of the sorting process, it is
+** converted to use the SorterRecord.u.pNext pointers. See function
+** vdbeSorterSort() for details.
+*/
+struct SorterRecord {
+ int nVal; /* Size of the record in bytes */
+ union {
+ SorterRecord *pNext; /* Pointer to next record in list */
+ int iNext; /* Offset within aMemory of next record */
+ } u;
+ /* The data for the record immediately follows this header */
+};
+
+/* Return a pointer to the buffer containing the record data for SorterRecord
+** object p. Should be used as if:
+**
+** void *SRVAL(SorterRecord *p) { return (void*)&p[1]; }
+*/
+#define SRVAL(p) ((void*)((SorterRecord*)(p) + 1))
+
+
+/* Maximum number of PMAs that a single MergeEngine can merge */
+#define SORTER_MAX_MERGE_COUNT 16
+
+static int vdbeIncrSwap(IncrMerger*);
+static void vdbeIncrFree(IncrMerger *);
+
+/*
+** Free all memory belonging to the PmaReader object passed as the
+** argument. All structure fields are set to zero before returning.
+*/
+static void vdbePmaReaderClear(PmaReader *pReadr){
+ sqlite3_free(pReadr->aAlloc);
+ sqlite3_free(pReadr->aBuffer);
+ if( pReadr->aMap ) sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap);
+ vdbeIncrFree(pReadr->pIncr);
+ memset(pReadr, 0, sizeof(PmaReader));
+}
+
+/*
+** Read the next nByte bytes of data from the PMA p.
+** If successful, set *ppOut to point to a buffer containing the data
+** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite
+** error code.
+**
+** The buffer returned in *ppOut is only valid until the
+** next call to this function.
+*/
+static int vdbePmaReadBlob(
+ PmaReader *p, /* PmaReader from which to take the blob */
+ int nByte, /* Bytes of data to read */
+ u8 **ppOut /* OUT: Pointer to buffer containing data */
+){
+ int iBuf; /* Offset within buffer to read from */
+ int nAvail; /* Bytes of data available in buffer */
+
+ if( p->aMap ){
+ *ppOut = &p->aMap[p->iReadOff];
+ p->iReadOff += nByte;
+ return SQLITE_OK;
+ }
+
+ assert( p->aBuffer );
+
+ /* If there is no more data to be read from the buffer, read the next
+ ** p->nBuffer bytes of data from the file into it. Or, if there are less
+ ** than p->nBuffer bytes remaining in the PMA, read all remaining data. */
+ iBuf = p->iReadOff % p->nBuffer;
+ if( iBuf==0 ){
+ int nRead; /* Bytes to read from disk */
+ int rc; /* sqlite3OsRead() return code */
+
+ /* Determine how many bytes of data to read. */
+ if( (p->iEof - p->iReadOff) > (i64)p->nBuffer ){
+ nRead = p->nBuffer;
+ }else{
+ nRead = (int)(p->iEof - p->iReadOff);
+ }
+ assert( nRead>0 );
+
+ /* Readr data from the file. Return early if an error occurs. */
+ rc = sqlite3OsRead(p->pFd, p->aBuffer, nRead, p->iReadOff);
+ assert( rc!=SQLITE_IOERR_SHORT_READ );
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ nAvail = p->nBuffer - iBuf;
+
+ if( nByte<=nAvail ){
+ /* The requested data is available in the in-memory buffer. In this
+ ** case there is no need to make a copy of the data, just return a
+ ** pointer into the buffer to the caller. */
+ *ppOut = &p->aBuffer[iBuf];
+ p->iReadOff += nByte;
+ }else{
+ /* The requested data is not all available in the in-memory buffer.
+ ** In this case, allocate space at p->aAlloc[] to copy the requested
+ ** range into. Then return a copy of pointer p->aAlloc to the caller. */
+ int nRem; /* Bytes remaining to copy */
+
+ /* Extend the p->aAlloc[] allocation if required. */
+ if( p->nAlloc<nByte ){
+ u8 *aNew;
+ sqlite3_int64 nNew = MAX(128, 2*(sqlite3_int64)p->nAlloc);
+ while( nByte>nNew ) nNew = nNew*2;
+ aNew = sqlite3Realloc(p->aAlloc, nNew);
+ if( !aNew ) return SQLITE_NOMEM_BKPT;
+ p->nAlloc = nNew;
+ p->aAlloc = aNew;
+ }
+
+ /* Copy as much data as is available in the buffer into the start of
+ ** p->aAlloc[]. */
+ memcpy(p->aAlloc, &p->aBuffer[iBuf], nAvail);
+ p->iReadOff += nAvail;
+ nRem = nByte - nAvail;
+
+ /* The following loop copies up to p->nBuffer bytes per iteration into
+ ** the p->aAlloc[] buffer. */
+ while( nRem>0 ){
+ int rc; /* vdbePmaReadBlob() return code */
+ int nCopy; /* Number of bytes to copy */
+ u8 *aNext; /* Pointer to buffer to copy data from */
+
+ nCopy = nRem;
+ if( nRem>p->nBuffer ) nCopy = p->nBuffer;
+ rc = vdbePmaReadBlob(p, nCopy, &aNext);
+ if( rc!=SQLITE_OK ) return rc;
+ assert( aNext!=p->aAlloc );
+ memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy);
+ nRem -= nCopy;
+ }
+
+ *ppOut = p->aAlloc;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Read a varint from the stream of data accessed by p. Set *pnOut to
+** the value read.
+*/
+static int vdbePmaReadVarint(PmaReader *p, u64 *pnOut){
+ int iBuf;
+
+ if( p->aMap ){
+ p->iReadOff += sqlite3GetVarint(&p->aMap[p->iReadOff], pnOut);
+ }else{
+ iBuf = p->iReadOff % p->nBuffer;
+ if( iBuf && (p->nBuffer-iBuf)>=9 ){
+ p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut);
+ }else{
+ u8 aVarint[16], *a;
+ int i = 0, rc;
+ do{
+ rc = vdbePmaReadBlob(p, 1, &a);
+ if( rc ) return rc;
+ aVarint[(i++)&0xf] = a[0];
+ }while( (a[0]&0x80)!=0 );
+ sqlite3GetVarint(aVarint, pnOut);
+ }
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Attempt to memory map file pFile. If successful, set *pp to point to the
+** new mapping and return SQLITE_OK. If the mapping is not attempted
+** (because the file is too large or the VFS layer is configured not to use
+** mmap), return SQLITE_OK and set *pp to NULL.
+**
+** Or, if an error occurs, return an SQLite error code. The final value of
+** *pp is undefined in this case.
+*/
+static int vdbeSorterMapFile(SortSubtask *pTask, SorterFile *pFile, u8 **pp){
+ int rc = SQLITE_OK;
+ if( pFile->iEof<=(i64)(pTask->pSorter->db->nMaxSorterMmap) ){
+ sqlite3_file *pFd = pFile->pFd;
+ if( pFd->pMethods->iVersion>=3 ){
+ rc = sqlite3OsFetch(pFd, 0, (int)pFile->iEof, (void**)pp);
+ testcase( rc!=SQLITE_OK );
+ }
+ }
+ return rc;
+}
+
+/*
+** Attach PmaReader pReadr to file pFile (if it is not already attached to
+** that file) and seek it to offset iOff within the file. Return SQLITE_OK
+** if successful, or an SQLite error code if an error occurs.
+*/
+static int vdbePmaReaderSeek(
+ SortSubtask *pTask, /* Task context */
+ PmaReader *pReadr, /* Reader whose cursor is to be moved */
+ SorterFile *pFile, /* Sorter file to read from */
+ i64 iOff /* Offset in pFile */
+){
+ int rc = SQLITE_OK;
+
+ assert( pReadr->pIncr==0 || pReadr->pIncr->bEof==0 );
+
+ if( sqlite3FaultSim(201) ) return SQLITE_IOERR_READ;
+ if( pReadr->aMap ){
+ sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap);
+ pReadr->aMap = 0;
+ }
+ pReadr->iReadOff = iOff;
+ pReadr->iEof = pFile->iEof;
+ pReadr->pFd = pFile->pFd;
+
+ rc = vdbeSorterMapFile(pTask, pFile, &pReadr->aMap);
+ if( rc==SQLITE_OK && pReadr->aMap==0 ){
+ int pgsz = pTask->pSorter->pgsz;
+ int iBuf = pReadr->iReadOff % pgsz;
+ if( pReadr->aBuffer==0 ){
+ pReadr->aBuffer = (u8*)sqlite3Malloc(pgsz);
+ if( pReadr->aBuffer==0 ) rc = SQLITE_NOMEM_BKPT;
+ pReadr->nBuffer = pgsz;
+ }
+ if( rc==SQLITE_OK && iBuf ){
+ int nRead = pgsz - iBuf;
+ if( (pReadr->iReadOff + nRead) > pReadr->iEof ){
+ nRead = (int)(pReadr->iEof - pReadr->iReadOff);
+ }
+ rc = sqlite3OsRead(
+ pReadr->pFd, &pReadr->aBuffer[iBuf], nRead, pReadr->iReadOff
+ );
+ testcase( rc!=SQLITE_OK );
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if
+** no error occurs, or an SQLite error code if one does.
+*/
+static int vdbePmaReaderNext(PmaReader *pReadr){
+ int rc = SQLITE_OK; /* Return Code */
+ u64 nRec = 0; /* Size of record in bytes */
+
+
+ if( pReadr->iReadOff>=pReadr->iEof ){
+ IncrMerger *pIncr = pReadr->pIncr;
+ int bEof = 1;
+ if( pIncr ){
+ rc = vdbeIncrSwap(pIncr);
+ if( rc==SQLITE_OK && pIncr->bEof==0 ){
+ rc = vdbePmaReaderSeek(
+ pIncr->pTask, pReadr, &pIncr->aFile[0], pIncr->iStartOff
+ );
+ bEof = 0;
+ }
+ }
+
+ if( bEof ){
+ /* This is an EOF condition */
+ vdbePmaReaderClear(pReadr);
+ testcase( rc!=SQLITE_OK );
+ return rc;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = vdbePmaReadVarint(pReadr, &nRec);
+ }
+ if( rc==SQLITE_OK ){
+ pReadr->nKey = (int)nRec;
+ rc = vdbePmaReadBlob(pReadr, (int)nRec, &pReadr->aKey);
+ testcase( rc!=SQLITE_OK );
+ }
+
+ return rc;
+}
+
+/*
+** Initialize PmaReader pReadr to scan through the PMA stored in file pFile
+** starting at offset iStart and ending at offset iEof-1. This function
+** leaves the PmaReader pointing to the first key in the PMA (or EOF if the
+** PMA is empty).
+**
+** If the pnByte parameter is NULL, then it is assumed that the file
+** contains a single PMA, and that that PMA omits the initial length varint.
+*/
+static int vdbePmaReaderInit(
+ SortSubtask *pTask, /* Task context */
+ SorterFile *pFile, /* Sorter file to read from */
+ i64 iStart, /* Start offset in pFile */
+ PmaReader *pReadr, /* PmaReader to populate */
+ i64 *pnByte /* IN/OUT: Increment this value by PMA size */
+){
+ int rc;
+
+ assert( pFile->iEof>iStart );
+ assert( pReadr->aAlloc==0 && pReadr->nAlloc==0 );
+ assert( pReadr->aBuffer==0 );
+ assert( pReadr->aMap==0 );
+
+ rc = vdbePmaReaderSeek(pTask, pReadr, pFile, iStart);
+ if( rc==SQLITE_OK ){
+ u64 nByte = 0; /* Size of PMA in bytes */
+ rc = vdbePmaReadVarint(pReadr, &nByte);
+ pReadr->iEof = pReadr->iReadOff + nByte;
+ *pnByte += nByte;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = vdbePmaReaderNext(pReadr);
+ }
+ return rc;
+}
+
+/*
+** A version of vdbeSorterCompare() that assumes that it has already been
+** determined that the first field of key1 is equal to the first field of
+** key2.
+*/
+static int vdbeSorterCompareTail(
+ SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
+ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
+ const void *pKey1, int nKey1, /* Left side of comparison */
+ const void *pKey2, int nKey2 /* Right side of comparison */
+){
+ UnpackedRecord *r2 = pTask->pUnpacked;
+ if( *pbKey2Cached==0 ){
+ sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2);
+ *pbKey2Cached = 1;
+ }
+ return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, r2, 1);
+}
+
+/*
+** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2,
+** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences
+** used by the comparison. Return the result of the comparison.
+**
+** If IN/OUT parameter *pbKey2Cached is true when this function is called,
+** it is assumed that (pTask->pUnpacked) contains the unpacked version
+** of key2. If it is false, (pTask->pUnpacked) is populated with the unpacked
+** version of key2 and *pbKey2Cached set to true before returning.
+**
+** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set
+** to SQLITE_NOMEM.
+*/
+static int vdbeSorterCompare(
+ SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
+ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
+ const void *pKey1, int nKey1, /* Left side of comparison */
+ const void *pKey2, int nKey2 /* Right side of comparison */
+){
+ UnpackedRecord *r2 = pTask->pUnpacked;
+ if( !*pbKey2Cached ){
+ sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2);
+ *pbKey2Cached = 1;
+ }
+ return sqlite3VdbeRecordCompare(nKey1, pKey1, r2);
+}
+
+/*
+** A specially optimized version of vdbeSorterCompare() that assumes that
+** the first field of each key is a TEXT value and that the collation
+** sequence to compare them with is BINARY.
+*/
+static int vdbeSorterCompareText(
+ SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
+ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
+ const void *pKey1, int nKey1, /* Left side of comparison */
+ const void *pKey2, int nKey2 /* Right side of comparison */
+){
+ const u8 * const p1 = (const u8 * const)pKey1;
+ const u8 * const p2 = (const u8 * const)pKey2;
+ const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */
+ const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */
+
+ int n1;
+ int n2;
+ int res;
+
+ getVarint32NR(&p1[1], n1);
+ getVarint32NR(&p2[1], n2);
+ res = memcmp(v1, v2, (MIN(n1, n2) - 13)/2);
+ if( res==0 ){
+ res = n1 - n2;
+ }
+
+ if( res==0 ){
+ if( pTask->pSorter->pKeyInfo->nKeyField>1 ){
+ res = vdbeSorterCompareTail(
+ pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2
+ );
+ }
+ }else{
+ assert( !(pTask->pSorter->pKeyInfo->aSortFlags[0]&KEYINFO_ORDER_BIGNULL) );
+ if( pTask->pSorter->pKeyInfo->aSortFlags[0] ){
+ res = res * -1;
+ }
+ }
+
+ return res;
+}
+
+/*
+** A specially optimized version of vdbeSorterCompare() that assumes that
+** the first field of each key is an INTEGER value.
+*/
+static int vdbeSorterCompareInt(
+ SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
+ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
+ const void *pKey1, int nKey1, /* Left side of comparison */
+ const void *pKey2, int nKey2 /* Right side of comparison */
+){
+ const u8 * const p1 = (const u8 * const)pKey1;
+ const u8 * const p2 = (const u8 * const)pKey2;
+ const int s1 = p1[1]; /* Left hand serial type */
+ const int s2 = p2[1]; /* Right hand serial type */
+ const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */
+ const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */
+ int res; /* Return value */
+
+ assert( (s1>0 && s1<7) || s1==8 || s1==9 );
+ assert( (s2>0 && s2<7) || s2==8 || s2==9 );
+
+ if( s1==s2 ){
+ /* The two values have the same sign. Compare using memcmp(). */
+ static const u8 aLen[] = {0, 1, 2, 3, 4, 6, 8, 0, 0, 0 };
+ const u8 n = aLen[s1];
+ int i;
+ res = 0;
+ for(i=0; i<n; i++){
+ if( (res = v1[i] - v2[i])!=0 ){
+ if( ((v1[0] ^ v2[0]) & 0x80)!=0 ){
+ res = v1[0] & 0x80 ? -1 : +1;
+ }
+ break;
+ }
+ }
+ }else if( s1>7 && s2>7 ){
+ res = s1 - s2;
+ }else{
+ if( s2>7 ){
+ res = +1;
+ }else if( s1>7 ){
+ res = -1;
+ }else{
+ res = s1 - s2;
+ }
+ assert( res!=0 );
+
+ if( res>0 ){
+ if( *v1 & 0x80 ) res = -1;
+ }else{
+ if( *v2 & 0x80 ) res = +1;
+ }
+ }
+
+ if( res==0 ){
+ if( pTask->pSorter->pKeyInfo->nKeyField>1 ){
+ res = vdbeSorterCompareTail(
+ pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2
+ );
+ }
+ }else if( pTask->pSorter->pKeyInfo->aSortFlags[0] ){
+ assert( !(pTask->pSorter->pKeyInfo->aSortFlags[0]&KEYINFO_ORDER_BIGNULL) );
+ res = res * -1;
+ }
+
+ return res;
+}
+
+/*
+** Initialize the temporary index cursor just opened as a sorter cursor.
+**
+** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nKeyField)
+** to determine the number of fields that should be compared from the
+** records being sorted. However, if the value passed as argument nField
+** is non-zero and the sorter is able to guarantee a stable sort, nField
+** is used instead. This is used when sorting records for a CREATE INDEX
+** statement. In this case, keys are always delivered to the sorter in
+** order of the primary key, which happens to be make up the final part
+** of the records being sorted. So if the sort is stable, there is never
+** any reason to compare PK fields and they can be ignored for a small
+** performance boost.
+**
+** The sorter can guarantee a stable sort when running in single-threaded
+** mode, but not in multi-threaded mode.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterInit(
+ sqlite3 *db, /* Database connection (for malloc()) */
+ int nField, /* Number of key fields in each record */
+ VdbeCursor *pCsr /* Cursor that holds the new sorter */
+){
+ int pgsz; /* Page size of main database */
+ int i; /* Used to iterate through aTask[] */
+ VdbeSorter *pSorter; /* The new sorter */
+ KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */
+ int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */
+ int sz; /* Size of pSorter in bytes */
+ int rc = SQLITE_OK;
+#if SQLITE_MAX_WORKER_THREADS==0
+# define nWorker 0
+#else
+ int nWorker;
+#endif
+
+ /* Initialize the upper limit on the number of worker threads */
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( sqlite3TempInMemory(db) || sqlite3GlobalConfig.bCoreMutex==0 ){
+ nWorker = 0;
+ }else{
+ nWorker = db->aLimit[SQLITE_LIMIT_WORKER_THREADS];
+ }
+#endif
+
+ /* Do not allow the total number of threads (main thread + all workers)
+ ** to exceed the maximum merge count */
+#if SQLITE_MAX_WORKER_THREADS>=SORTER_MAX_MERGE_COUNT
+ if( nWorker>=SORTER_MAX_MERGE_COUNT ){
+ nWorker = SORTER_MAX_MERGE_COUNT-1;
+ }
+#endif
+
+ assert( pCsr->pKeyInfo && pCsr->pBtx==0 );
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nKeyField-1)*sizeof(CollSeq*);
+ sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask);
+
+ pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo);
+ pCsr->uc.pSorter = pSorter;
+ if( pSorter==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz);
+ memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo);
+ pKeyInfo->db = 0;
+ if( nField && nWorker==0 ){
+ pKeyInfo->nKeyField = nField;
+ }
+ pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt);
+ pSorter->nTask = nWorker + 1;
+ pSorter->iPrev = (u8)(nWorker - 1);
+ pSorter->bUseThreads = (pSorter->nTask>1);
+ pSorter->db = db;
+ for(i=0; i<pSorter->nTask; i++){
+ SortSubtask *pTask = &pSorter->aTask[i];
+ pTask->pSorter = pSorter;
+ }
+
+ if( !sqlite3TempInMemory(db) ){
+ i64 mxCache; /* Cache size in bytes*/
+ u32 szPma = sqlite3GlobalConfig.szPma;
+ pSorter->mnPmaSize = szPma * pgsz;
+
+ mxCache = db->aDb[0].pSchema->cache_size;
+ if( mxCache<0 ){
+ /* A negative cache-size value C indicates that the cache is abs(C)
+ ** KiB in size. */
+ mxCache = mxCache * -1024;
+ }else{
+ mxCache = mxCache * pgsz;
+ }
+ mxCache = MIN(mxCache, SQLITE_MAX_PMASZ);
+ pSorter->mxPmaSize = MAX(pSorter->mnPmaSize, (int)mxCache);
+
+ /* Avoid large memory allocations if the application has requested
+ ** SQLITE_CONFIG_SMALL_MALLOC. */
+ if( sqlite3GlobalConfig.bSmallMalloc==0 ){
+ assert( pSorter->iMemory==0 );
+ pSorter->nMemory = pgsz;
+ pSorter->list.aMemory = (u8*)sqlite3Malloc(pgsz);
+ if( !pSorter->list.aMemory ) rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+
+ if( pKeyInfo->nAllField<13
+ && (pKeyInfo->aColl[0]==0 || pKeyInfo->aColl[0]==db->pDfltColl)
+ && (pKeyInfo->aSortFlags[0] & KEYINFO_ORDER_BIGNULL)==0
+ ){
+ pSorter->typeMask = SORTER_TYPE_INTEGER | SORTER_TYPE_TEXT;
+ }
+ }
+
+ return rc;
+}
+#undef nWorker /* Defined at the top of this function */
+
+/*
+** Free the list of sorted records starting at pRecord.
+*/
+static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){
+ SorterRecord *p;
+ SorterRecord *pNext;
+ for(p=pRecord; p; p=pNext){
+ pNext = p->u.pNext;
+ sqlite3DbFree(db, p);
+ }
+}
+
+/*
+** Free all resources owned by the object indicated by argument pTask. All
+** fields of *pTask are zeroed before returning.
+*/
+static void vdbeSortSubtaskCleanup(sqlite3 *db, SortSubtask *pTask){
+ sqlite3DbFree(db, pTask->pUnpacked);
+#if SQLITE_MAX_WORKER_THREADS>0
+ /* pTask->list.aMemory can only be non-zero if it was handed memory
+ ** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */
+ if( pTask->list.aMemory ){
+ sqlite3_free(pTask->list.aMemory);
+ }else
+#endif
+ {
+ assert( pTask->list.aMemory==0 );
+ vdbeSorterRecordFree(0, pTask->list.pList);
+ }
+ if( pTask->file.pFd ){
+ sqlite3OsCloseFree(pTask->file.pFd);
+ }
+ if( pTask->file2.pFd ){
+ sqlite3OsCloseFree(pTask->file2.pFd);
+ }
+ memset(pTask, 0, sizeof(SortSubtask));
+}
+
+#ifdef SQLITE_DEBUG_SORTER_THREADS
+static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){
+ i64 t;
+ int iTask = (pTask - pTask->pSorter->aTask);
+ sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
+ fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent);
+}
+static void vdbeSorterRewindDebug(const char *zEvent){
+ i64 t;
+ sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t);
+ fprintf(stderr, "%lld:X %s\n", t, zEvent);
+}
+static void vdbeSorterPopulateDebug(
+ SortSubtask *pTask,
+ const char *zEvent
+){
+ i64 t;
+ int iTask = (pTask - pTask->pSorter->aTask);
+ sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
+ fprintf(stderr, "%lld:bg%d %s\n", t, iTask, zEvent);
+}
+static void vdbeSorterBlockDebug(
+ SortSubtask *pTask,
+ int bBlocked,
+ const char *zEvent
+){
+ if( bBlocked ){
+ i64 t;
+ sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
+ fprintf(stderr, "%lld:main %s\n", t, zEvent);
+ }
+}
+#else
+# define vdbeSorterWorkDebug(x,y)
+# define vdbeSorterRewindDebug(y)
+# define vdbeSorterPopulateDebug(x,y)
+# define vdbeSorterBlockDebug(x,y,z)
+#endif
+
+#if SQLITE_MAX_WORKER_THREADS>0
+/*
+** Join thread pTask->thread.
+*/
+static int vdbeSorterJoinThread(SortSubtask *pTask){
+ int rc = SQLITE_OK;
+ if( pTask->pThread ){
+#ifdef SQLITE_DEBUG_SORTER_THREADS
+ int bDone = pTask->bDone;
+#endif
+ void *pRet = SQLITE_INT_TO_PTR(SQLITE_ERROR);
+ vdbeSorterBlockDebug(pTask, !bDone, "enter");
+ (void)sqlite3ThreadJoin(pTask->pThread, &pRet);
+ vdbeSorterBlockDebug(pTask, !bDone, "exit");
+ rc = SQLITE_PTR_TO_INT(pRet);
+ assert( pTask->bDone==1 );
+ pTask->bDone = 0;
+ pTask->pThread = 0;
+ }
+ return rc;
+}
+
+/*
+** Launch a background thread to run xTask(pIn).
+*/
+static int vdbeSorterCreateThread(
+ SortSubtask *pTask, /* Thread will use this task object */
+ void *(*xTask)(void*), /* Routine to run in a separate thread */
+ void *pIn /* Argument passed into xTask() */
+){
+ assert( pTask->pThread==0 && pTask->bDone==0 );
+ return sqlite3ThreadCreate(&pTask->pThread, xTask, pIn);
+}
+
+/*
+** Join all outstanding threads launched by SorterWrite() to create
+** level-0 PMAs.
+*/
+static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){
+ int rc = rcin;
+ int i;
+
+ /* This function is always called by the main user thread.
+ **
+ ** If this function is being called after SorterRewind() has been called,
+ ** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread
+ ** is currently attempt to join one of the other threads. To avoid a race
+ ** condition where this thread also attempts to join the same object, join
+ ** thread pSorter->aTask[pSorter->nTask-1].pThread first. */
+ for(i=pSorter->nTask-1; i>=0; i--){
+ SortSubtask *pTask = &pSorter->aTask[i];
+ int rc2 = vdbeSorterJoinThread(pTask);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+ return rc;
+}
+#else
+# define vdbeSorterJoinAll(x,rcin) (rcin)
+# define vdbeSorterJoinThread(pTask) SQLITE_OK
+#endif
+
+/*
+** Allocate a new MergeEngine object capable of handling up to
+** nReader PmaReader inputs.
+**
+** nReader is automatically rounded up to the next power of two.
+** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up.
+*/
+static MergeEngine *vdbeMergeEngineNew(int nReader){
+ int N = 2; /* Smallest power of two >= nReader */
+ int nByte; /* Total bytes of space to allocate */
+ MergeEngine *pNew; /* Pointer to allocated object to return */
+
+ assert( nReader<=SORTER_MAX_MERGE_COUNT );
+
+ while( N<nReader ) N += N;
+ nByte = sizeof(MergeEngine) + N * (sizeof(int) + sizeof(PmaReader));
+
+ pNew = sqlite3FaultSim(100) ? 0 : (MergeEngine*)sqlite3MallocZero(nByte);
+ if( pNew ){
+ pNew->nTree = N;
+ pNew->pTask = 0;
+ pNew->aReadr = (PmaReader*)&pNew[1];
+ pNew->aTree = (int*)&pNew->aReadr[N];
+ }
+ return pNew;
+}
+
+/*
+** Free the MergeEngine object passed as the only argument.
+*/
+static void vdbeMergeEngineFree(MergeEngine *pMerger){
+ int i;
+ if( pMerger ){
+ for(i=0; i<pMerger->nTree; i++){
+ vdbePmaReaderClear(&pMerger->aReadr[i]);
+ }
+ }
+ sqlite3_free(pMerger);
+}
+
+/*
+** Free all resources associated with the IncrMerger object indicated by
+** the first argument.
+*/
+static void vdbeIncrFree(IncrMerger *pIncr){
+ if( pIncr ){
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pIncr->bUseThread ){
+ vdbeSorterJoinThread(pIncr->pTask);
+ if( pIncr->aFile[0].pFd ) sqlite3OsCloseFree(pIncr->aFile[0].pFd);
+ if( pIncr->aFile[1].pFd ) sqlite3OsCloseFree(pIncr->aFile[1].pFd);
+ }
+#endif
+ vdbeMergeEngineFree(pIncr->pMerger);
+ sqlite3_free(pIncr);
+ }
+}
+
+/*
+** Reset a sorting cursor back to its original empty state.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){
+ int i;
+ (void)vdbeSorterJoinAll(pSorter, SQLITE_OK);
+ assert( pSorter->bUseThreads || pSorter->pReader==0 );
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pSorter->pReader ){
+ vdbePmaReaderClear(pSorter->pReader);
+ sqlite3DbFree(db, pSorter->pReader);
+ pSorter->pReader = 0;
+ }
+#endif
+ vdbeMergeEngineFree(pSorter->pMerger);
+ pSorter->pMerger = 0;
+ for(i=0; i<pSorter->nTask; i++){
+ SortSubtask *pTask = &pSorter->aTask[i];
+ vdbeSortSubtaskCleanup(db, pTask);
+ pTask->pSorter = pSorter;
+ }
+ if( pSorter->list.aMemory==0 ){
+ vdbeSorterRecordFree(0, pSorter->list.pList);
+ }
+ pSorter->list.pList = 0;
+ pSorter->list.szPMA = 0;
+ pSorter->bUsePMA = 0;
+ pSorter->iMemory = 0;
+ pSorter->mxKeysize = 0;
+ sqlite3DbFree(db, pSorter->pUnpacked);
+ pSorter->pUnpacked = 0;
+}
+
+/*
+** Free any cursor components allocated by sqlite3VdbeSorterXXX routines.
+*/
+SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){
+ VdbeSorter *pSorter;
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ if( pSorter ){
+ sqlite3VdbeSorterReset(db, pSorter);
+ sqlite3_free(pSorter->list.aMemory);
+ sqlite3DbFree(db, pSorter);
+ pCsr->uc.pSorter = 0;
+ }
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** The first argument is a file-handle open on a temporary file. The file
+** is guaranteed to be nByte bytes or smaller in size. This function
+** attempts to extend the file to nByte bytes in size and to ensure that
+** the VFS has memory mapped it.
+**
+** Whether or not the file does end up memory mapped of course depends on
+** the specific VFS implementation.
+*/
+static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){
+ if( nByte<=(i64)(db->nMaxSorterMmap) && pFd->pMethods->iVersion>=3 ){
+ void *p = 0;
+ int chunksize = 4*1024;
+ sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize);
+ sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte);
+ sqlite3OsFetch(pFd, 0, (int)nByte, &p);
+ sqlite3OsUnfetch(pFd, 0, p);
+ }
+}
+#else
+# define vdbeSorterExtendFile(x,y,z)
+#endif
+
+/*
+** Allocate space for a file-handle and open a temporary file. If successful,
+** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK.
+** Otherwise, set *ppFd to 0 and return an SQLite error code.
+*/
+static int vdbeSorterOpenTempFile(
+ sqlite3 *db, /* Database handle doing sort */
+ i64 nExtend, /* Attempt to extend file to this size */
+ sqlite3_file **ppFd
+){
+ int rc;
+ if( sqlite3FaultSim(202) ) return SQLITE_IOERR_ACCESS;
+ rc = sqlite3OsOpenMalloc(db->pVfs, 0, ppFd,
+ SQLITE_OPEN_TEMP_JOURNAL |
+ SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE |
+ SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &rc
+ );
+ if( rc==SQLITE_OK ){
+ i64 max = SQLITE_MAX_MMAP_SIZE;
+ sqlite3OsFileControlHint(*ppFd, SQLITE_FCNTL_MMAP_SIZE, (void*)&max);
+ if( nExtend>0 ){
+ vdbeSorterExtendFile(db, *ppFd, nExtend);
+ }
+ }
+ return rc;
+}
+
+/*
+** If it has not already been allocated, allocate the UnpackedRecord
+** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or
+** if no allocation was required), or SQLITE_NOMEM otherwise.
+*/
+static int vdbeSortAllocUnpacked(SortSubtask *pTask){
+ if( pTask->pUnpacked==0 ){
+ pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pTask->pSorter->pKeyInfo);
+ if( pTask->pUnpacked==0 ) return SQLITE_NOMEM_BKPT;
+ pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nKeyField;
+ pTask->pUnpacked->errCode = 0;
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** Merge the two sorted lists p1 and p2 into a single list.
+*/
+static SorterRecord *vdbeSorterMerge(
+ SortSubtask *pTask, /* Calling thread context */
+ SorterRecord *p1, /* First list to merge */
+ SorterRecord *p2 /* Second list to merge */
+){
+ SorterRecord *pFinal = 0;
+ SorterRecord **pp = &pFinal;
+ int bCached = 0;
+
+ assert( p1!=0 && p2!=0 );
+ for(;;){
+ int res;
+ res = pTask->xCompare(
+ pTask, &bCached, SRVAL(p1), p1->nVal, SRVAL(p2), p2->nVal
+ );
+
+ if( res<=0 ){
+ *pp = p1;
+ pp = &p1->u.pNext;
+ p1 = p1->u.pNext;
+ if( p1==0 ){
+ *pp = p2;
+ break;
+ }
+ }else{
+ *pp = p2;
+ pp = &p2->u.pNext;
+ p2 = p2->u.pNext;
+ bCached = 0;
+ if( p2==0 ){
+ *pp = p1;
+ break;
+ }
+ }
+ }
+ return pFinal;
+}
+
+/*
+** Return the SorterCompare function to compare values collected by the
+** sorter object passed as the only argument.
+*/
+static SorterCompare vdbeSorterGetCompare(VdbeSorter *p){
+ if( p->typeMask==SORTER_TYPE_INTEGER ){
+ return vdbeSorterCompareInt;
+ }else if( p->typeMask==SORTER_TYPE_TEXT ){
+ return vdbeSorterCompareText;
+ }
+ return vdbeSorterCompare;
+}
+
+/*
+** Sort the linked list of records headed at pTask->pList. Return
+** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if
+** an error occurs.
+*/
+static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){
+ int i;
+ SorterRecord *p;
+ int rc;
+ SorterRecord *aSlot[64];
+
+ rc = vdbeSortAllocUnpacked(pTask);
+ if( rc!=SQLITE_OK ) return rc;
+
+ p = pList->pList;
+ pTask->xCompare = vdbeSorterGetCompare(pTask->pSorter);
+ memset(aSlot, 0, sizeof(aSlot));
+
+ while( p ){
+ SorterRecord *pNext;
+ if( pList->aMemory ){
+ if( (u8*)p==pList->aMemory ){
+ pNext = 0;
+ }else{
+ assert( p->u.iNext<sqlite3MallocSize(pList->aMemory) );
+ pNext = (SorterRecord*)&pList->aMemory[p->u.iNext];
+ }
+ }else{
+ pNext = p->u.pNext;
+ }
+
+ p->u.pNext = 0;
+ for(i=0; aSlot[i]; i++){
+ p = vdbeSorterMerge(pTask, p, aSlot[i]);
+ aSlot[i] = 0;
+ }
+ aSlot[i] = p;
+ p = pNext;
+ }
+
+ p = 0;
+ for(i=0; i<ArraySize(aSlot); i++){
+ if( aSlot[i]==0 ) continue;
+ p = p ? vdbeSorterMerge(pTask, p, aSlot[i]) : aSlot[i];
+ }
+ pList->pList = p;
+
+ assert( pTask->pUnpacked->errCode==SQLITE_OK
+ || pTask->pUnpacked->errCode==SQLITE_NOMEM
+ );
+ return pTask->pUnpacked->errCode;
+}
+
+/*
+** Initialize a PMA-writer object.
+*/
+static void vdbePmaWriterInit(
+ sqlite3_file *pFd, /* File handle to write to */
+ PmaWriter *p, /* Object to populate */
+ int nBuf, /* Buffer size */
+ i64 iStart /* Offset of pFd to begin writing at */
+){
+ memset(p, 0, sizeof(PmaWriter));
+ p->aBuffer = (u8*)sqlite3Malloc(nBuf);
+ if( !p->aBuffer ){
+ p->eFWErr = SQLITE_NOMEM_BKPT;
+ }else{
+ p->iBufEnd = p->iBufStart = (iStart % nBuf);
+ p->iWriteOff = iStart - p->iBufStart;
+ p->nBuffer = nBuf;
+ p->pFd = pFd;
+ }
+}
+
+/*
+** Write nData bytes of data to the PMA. Return SQLITE_OK
+** if successful, or an SQLite error code if an error occurs.
+*/
+static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){
+ int nRem = nData;
+ while( nRem>0 && p->eFWErr==0 ){
+ int nCopy = nRem;
+ if( nCopy>(p->nBuffer - p->iBufEnd) ){
+ nCopy = p->nBuffer - p->iBufEnd;
+ }
+
+ memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy);
+ p->iBufEnd += nCopy;
+ if( p->iBufEnd==p->nBuffer ){
+ p->eFWErr = sqlite3OsWrite(p->pFd,
+ &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart,
+ p->iWriteOff + p->iBufStart
+ );
+ p->iBufStart = p->iBufEnd = 0;
+ p->iWriteOff += p->nBuffer;
+ }
+ assert( p->iBufEnd<p->nBuffer );
+
+ nRem -= nCopy;
+ }
+}
+
+/*
+** Flush any buffered data to disk and clean up the PMA-writer object.
+** The results of using the PMA-writer after this call are undefined.
+** Return SQLITE_OK if flushing the buffered data succeeds or is not
+** required. Otherwise, return an SQLite error code.
+**
+** Before returning, set *piEof to the offset immediately following the
+** last byte written to the file.
+*/
+static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){
+ int rc;
+ if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){
+ p->eFWErr = sqlite3OsWrite(p->pFd,
+ &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart,
+ p->iWriteOff + p->iBufStart
+ );
+ }
+ *piEof = (p->iWriteOff + p->iBufEnd);
+ sqlite3_free(p->aBuffer);
+ rc = p->eFWErr;
+ memset(p, 0, sizeof(PmaWriter));
+ return rc;
+}
+
+/*
+** Write value iVal encoded as a varint to the PMA. Return
+** SQLITE_OK if successful, or an SQLite error code if an error occurs.
+*/
+static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){
+ int nByte;
+ u8 aByte[10];
+ nByte = sqlite3PutVarint(aByte, iVal);
+ vdbePmaWriteBlob(p, aByte, nByte);
+}
+
+/*
+** Write the current contents of in-memory linked-list pList to a level-0
+** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if
+** successful, or an SQLite error code otherwise.
+**
+** The format of a PMA is:
+**
+** * A varint. This varint contains the total number of bytes of content
+** in the PMA (not including the varint itself).
+**
+** * One or more records packed end-to-end in order of ascending keys.
+** Each record consists of a varint followed by a blob of data (the
+** key). The varint is the number of bytes in the blob of data.
+*/
+static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){
+ sqlite3 *db = pTask->pSorter->db;
+ int rc = SQLITE_OK; /* Return code */
+ PmaWriter writer; /* Object used to write to the file */
+
+#ifdef SQLITE_DEBUG
+ /* Set iSz to the expected size of file pTask->file after writing the PMA.
+ ** This is used by an assert() statement at the end of this function. */
+ i64 iSz = pList->szPMA + sqlite3VarintLen(pList->szPMA) + pTask->file.iEof;
+#endif
+
+ vdbeSorterWorkDebug(pTask, "enter");
+ memset(&writer, 0, sizeof(PmaWriter));
+ assert( pList->szPMA>0 );
+
+ /* If the first temporary PMA file has not been opened, open it now. */
+ if( pTask->file.pFd==0 ){
+ rc = vdbeSorterOpenTempFile(db, 0, &pTask->file.pFd);
+ assert( rc!=SQLITE_OK || pTask->file.pFd );
+ assert( pTask->file.iEof==0 );
+ assert( pTask->nPMA==0 );
+ }
+
+ /* Try to get the file to memory map */
+ if( rc==SQLITE_OK ){
+ vdbeSorterExtendFile(db, pTask->file.pFd, pTask->file.iEof+pList->szPMA+9);
+ }
+
+ /* Sort the list */
+ if( rc==SQLITE_OK ){
+ rc = vdbeSorterSort(pTask, pList);
+ }
+
+ if( rc==SQLITE_OK ){
+ SorterRecord *p;
+ SorterRecord *pNext = 0;
+
+ vdbePmaWriterInit(pTask->file.pFd, &writer, pTask->pSorter->pgsz,
+ pTask->file.iEof);
+ pTask->nPMA++;
+ vdbePmaWriteVarint(&writer, pList->szPMA);
+ for(p=pList->pList; p; p=pNext){
+ pNext = p->u.pNext;
+ vdbePmaWriteVarint(&writer, p->nVal);
+ vdbePmaWriteBlob(&writer, SRVAL(p), p->nVal);
+ if( pList->aMemory==0 ) sqlite3_free(p);
+ }
+ pList->pList = p;
+ rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof);
+ }
+
+ vdbeSorterWorkDebug(pTask, "exit");
+ assert( rc!=SQLITE_OK || pList->pList==0 );
+ assert( rc!=SQLITE_OK || pTask->file.iEof==iSz );
+ return rc;
+}
+
+/*
+** Advance the MergeEngine to its next entry.
+** Set *pbEof to true there is no next entry because
+** the MergeEngine has reached the end of all its inputs.
+**
+** Return SQLITE_OK if successful or an error code if an error occurs.
+*/
+static int vdbeMergeEngineStep(
+ MergeEngine *pMerger, /* The merge engine to advance to the next row */
+ int *pbEof /* Set TRUE at EOF. Set false for more content */
+){
+ int rc;
+ int iPrev = pMerger->aTree[1];/* Index of PmaReader to advance */
+ SortSubtask *pTask = pMerger->pTask;
+
+ /* Advance the current PmaReader */
+ rc = vdbePmaReaderNext(&pMerger->aReadr[iPrev]);
+
+ /* Update contents of aTree[] */
+ if( rc==SQLITE_OK ){
+ int i; /* Index of aTree[] to recalculate */
+ PmaReader *pReadr1; /* First PmaReader to compare */
+ PmaReader *pReadr2; /* Second PmaReader to compare */
+ int bCached = 0;
+
+ /* Find the first two PmaReaders to compare. The one that was just
+ ** advanced (iPrev) and the one next to it in the array. */
+ pReadr1 = &pMerger->aReadr[(iPrev & 0xFFFE)];
+ pReadr2 = &pMerger->aReadr[(iPrev | 0x0001)];
+
+ for(i=(pMerger->nTree+iPrev)/2; i>0; i=i/2){
+ /* Compare pReadr1 and pReadr2. Store the result in variable iRes. */
+ int iRes;
+ if( pReadr1->pFd==0 ){
+ iRes = +1;
+ }else if( pReadr2->pFd==0 ){
+ iRes = -1;
+ }else{
+ iRes = pTask->xCompare(pTask, &bCached,
+ pReadr1->aKey, pReadr1->nKey, pReadr2->aKey, pReadr2->nKey
+ );
+ }
+
+ /* If pReadr1 contained the smaller value, set aTree[i] to its index.
+ ** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this
+ ** case there is no cache of pReadr2 in pTask->pUnpacked, so set
+ ** pKey2 to point to the record belonging to pReadr2.
+ **
+ ** Alternatively, if pReadr2 contains the smaller of the two values,
+ ** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare()
+ ** was actually called above, then pTask->pUnpacked now contains
+ ** a value equivalent to pReadr2. So set pKey2 to NULL to prevent
+ ** vdbeSorterCompare() from decoding pReadr2 again.
+ **
+ ** If the two values were equal, then the value from the oldest
+ ** PMA should be considered smaller. The VdbeSorter.aReadr[] array
+ ** is sorted from oldest to newest, so pReadr1 contains older values
+ ** than pReadr2 iff (pReadr1<pReadr2). */
+ if( iRes<0 || (iRes==0 && pReadr1<pReadr2) ){
+ pMerger->aTree[i] = (int)(pReadr1 - pMerger->aReadr);
+ pReadr2 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ];
+ bCached = 0;
+ }else{
+ if( pReadr1->pFd ) bCached = 0;
+ pMerger->aTree[i] = (int)(pReadr2 - pMerger->aReadr);
+ pReadr1 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ];
+ }
+ }
+ *pbEof = (pMerger->aReadr[pMerger->aTree[1]].pFd==0);
+ }
+
+ return (rc==SQLITE_OK ? pTask->pUnpacked->errCode : rc);
+}
+
+#if SQLITE_MAX_WORKER_THREADS>0
+/*
+** The main routine for background threads that write level-0 PMAs.
+*/
+static void *vdbeSorterFlushThread(void *pCtx){
+ SortSubtask *pTask = (SortSubtask*)pCtx;
+ int rc; /* Return code */
+ assert( pTask->bDone==0 );
+ rc = vdbeSorterListToPMA(pTask, &pTask->list);
+ pTask->bDone = 1;
+ return SQLITE_INT_TO_PTR(rc);
+}
+#endif /* SQLITE_MAX_WORKER_THREADS>0 */
+
+/*
+** Flush the current contents of VdbeSorter.list to a new PMA, possibly
+** using a background thread.
+*/
+static int vdbeSorterFlushPMA(VdbeSorter *pSorter){
+#if SQLITE_MAX_WORKER_THREADS==0
+ pSorter->bUsePMA = 1;
+ return vdbeSorterListToPMA(&pSorter->aTask[0], &pSorter->list);
+#else
+ int rc = SQLITE_OK;
+ int i;
+ SortSubtask *pTask = 0; /* Thread context used to create new PMA */
+ int nWorker = (pSorter->nTask-1);
+
+ /* Set the flag to indicate that at least one PMA has been written.
+ ** Or will be, anyhow. */
+ pSorter->bUsePMA = 1;
+
+ /* Select a sub-task to sort and flush the current list of in-memory
+ ** records to disk. If the sorter is running in multi-threaded mode,
+ ** round-robin between the first (pSorter->nTask-1) tasks. Except, if
+ ** the background thread from a sub-tasks previous turn is still running,
+ ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy,
+ ** fall back to using the final sub-task. The first (pSorter->nTask-1)
+ ** sub-tasks are prefered as they use background threads - the final
+ ** sub-task uses the main thread. */
+ for(i=0; i<nWorker; i++){
+ int iTest = (pSorter->iPrev + i + 1) % nWorker;
+ pTask = &pSorter->aTask[iTest];
+ if( pTask->bDone ){
+ rc = vdbeSorterJoinThread(pTask);
+ }
+ if( rc!=SQLITE_OK || pTask->pThread==0 ) break;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( i==nWorker ){
+ /* Use the foreground thread for this operation */
+ rc = vdbeSorterListToPMA(&pSorter->aTask[nWorker], &pSorter->list);
+ }else{
+ /* Launch a background thread for this operation */
+ u8 *aMem;
+ void *pCtx;
+
+ assert( pTask!=0 );
+ assert( pTask->pThread==0 && pTask->bDone==0 );
+ assert( pTask->list.pList==0 );
+ assert( pTask->list.aMemory==0 || pSorter->list.aMemory!=0 );
+
+ aMem = pTask->list.aMemory;
+ pCtx = (void*)pTask;
+ pSorter->iPrev = (u8)(pTask - pSorter->aTask);
+ pTask->list = pSorter->list;
+ pSorter->list.pList = 0;
+ pSorter->list.szPMA = 0;
+ if( aMem ){
+ pSorter->list.aMemory = aMem;
+ pSorter->nMemory = sqlite3MallocSize(aMem);
+ }else if( pSorter->list.aMemory ){
+ pSorter->list.aMemory = sqlite3Malloc(pSorter->nMemory);
+ if( !pSorter->list.aMemory ) return SQLITE_NOMEM_BKPT;
+ }
+
+ rc = vdbeSorterCreateThread(pTask, vdbeSorterFlushThread, pCtx);
+ }
+ }
+
+ return rc;
+#endif /* SQLITE_MAX_WORKER_THREADS!=0 */
+}
+
+/*
+** Add a record to the sorter.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterWrite(
+ const VdbeCursor *pCsr, /* Sorter cursor */
+ Mem *pVal /* Memory cell containing record */
+){
+ VdbeSorter *pSorter;
+ int rc = SQLITE_OK; /* Return Code */
+ SorterRecord *pNew; /* New list element */
+ int bFlush; /* True to flush contents of memory to PMA */
+ int nReq; /* Bytes of memory required */
+ int nPMA; /* Bytes of PMA space required */
+ int t; /* serial type of first record field */
+
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ getVarint32NR((const u8*)&pVal->z[1], t);
+ if( t>0 && t<10 && t!=7 ){
+ pSorter->typeMask &= SORTER_TYPE_INTEGER;
+ }else if( t>10 && (t & 0x01) ){
+ pSorter->typeMask &= SORTER_TYPE_TEXT;
+ }else{
+ pSorter->typeMask = 0;
+ }
+
+ assert( pSorter );
+
+ /* Figure out whether or not the current contents of memory should be
+ ** flushed to a PMA before continuing. If so, do so.
+ **
+ ** If using the single large allocation mode (pSorter->aMemory!=0), then
+ ** flush the contents of memory to a new PMA if (a) at least one value is
+ ** already in memory and (b) the new value will not fit in memory.
+ **
+ ** Or, if using separate allocations for each record, flush the contents
+ ** of memory to a PMA if either of the following are true:
+ **
+ ** * The total memory allocated for the in-memory list is greater
+ ** than (page-size * cache-size), or
+ **
+ ** * The total memory allocated for the in-memory list is greater
+ ** than (page-size * 10) and sqlite3HeapNearlyFull() returns true.
+ */
+ nReq = pVal->n + sizeof(SorterRecord);
+ nPMA = pVal->n + sqlite3VarintLen(pVal->n);
+ if( pSorter->mxPmaSize ){
+ if( pSorter->list.aMemory ){
+ bFlush = pSorter->iMemory && (pSorter->iMemory+nReq) > pSorter->mxPmaSize;
+ }else{
+ bFlush = (
+ (pSorter->list.szPMA > pSorter->mxPmaSize)
+ || (pSorter->list.szPMA > pSorter->mnPmaSize && sqlite3HeapNearlyFull())
+ );
+ }
+ if( bFlush ){
+ rc = vdbeSorterFlushPMA(pSorter);
+ pSorter->list.szPMA = 0;
+ pSorter->iMemory = 0;
+ assert( rc!=SQLITE_OK || pSorter->list.pList==0 );
+ }
+ }
+
+ pSorter->list.szPMA += nPMA;
+ if( nPMA>pSorter->mxKeysize ){
+ pSorter->mxKeysize = nPMA;
+ }
+
+ if( pSorter->list.aMemory ){
+ int nMin = pSorter->iMemory + nReq;
+
+ if( nMin>pSorter->nMemory ){
+ u8 *aNew;
+ sqlite3_int64 nNew = 2 * (sqlite3_int64)pSorter->nMemory;
+ int iListOff = -1;
+ if( pSorter->list.pList ){
+ iListOff = (u8*)pSorter->list.pList - pSorter->list.aMemory;
+ }
+ while( nNew < nMin ) nNew = nNew*2;
+ if( nNew > pSorter->mxPmaSize ) nNew = pSorter->mxPmaSize;
+ if( nNew < nMin ) nNew = nMin;
+ aNew = sqlite3Realloc(pSorter->list.aMemory, nNew);
+ if( !aNew ) return SQLITE_NOMEM_BKPT;
+ if( iListOff>=0 ){
+ pSorter->list.pList = (SorterRecord*)&aNew[iListOff];
+ }
+ pSorter->list.aMemory = aNew;
+ pSorter->nMemory = nNew;
+ }
+
+ pNew = (SorterRecord*)&pSorter->list.aMemory[pSorter->iMemory];
+ pSorter->iMemory += ROUND8(nReq);
+ if( pSorter->list.pList ){
+ pNew->u.iNext = (int)((u8*)(pSorter->list.pList) - pSorter->list.aMemory);
+ }
+ }else{
+ pNew = (SorterRecord *)sqlite3Malloc(nReq);
+ if( pNew==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pNew->u.pNext = pSorter->list.pList;
+ }
+
+ memcpy(SRVAL(pNew), pVal->z, pVal->n);
+ pNew->nVal = pVal->n;
+ pSorter->list.pList = pNew;
+
+ return rc;
+}
+
+/*
+** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format
+** of the data stored in aFile[1] is the same as that used by regular PMAs,
+** except that the number-of-bytes varint is omitted from the start.
+*/
+static int vdbeIncrPopulate(IncrMerger *pIncr){
+ int rc = SQLITE_OK;
+ int rc2;
+ i64 iStart = pIncr->iStartOff;
+ SorterFile *pOut = &pIncr->aFile[1];
+ SortSubtask *pTask = pIncr->pTask;
+ MergeEngine *pMerger = pIncr->pMerger;
+ PmaWriter writer;
+ assert( pIncr->bEof==0 );
+
+ vdbeSorterPopulateDebug(pTask, "enter");
+
+ vdbePmaWriterInit(pOut->pFd, &writer, pTask->pSorter->pgsz, iStart);
+ while( rc==SQLITE_OK ){
+ int dummy;
+ PmaReader *pReader = &pMerger->aReadr[ pMerger->aTree[1] ];
+ int nKey = pReader->nKey;
+ i64 iEof = writer.iWriteOff + writer.iBufEnd;
+
+ /* Check if the output file is full or if the input has been exhausted.
+ ** In either case exit the loop. */
+ if( pReader->pFd==0 ) break;
+ if( (iEof + nKey + sqlite3VarintLen(nKey))>(iStart + pIncr->mxSz) ) break;
+
+ /* Write the next key to the output. */
+ vdbePmaWriteVarint(&writer, nKey);
+ vdbePmaWriteBlob(&writer, pReader->aKey, nKey);
+ assert( pIncr->pMerger->pTask==pTask );
+ rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy);
+ }
+
+ rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof);
+ if( rc==SQLITE_OK ) rc = rc2;
+ vdbeSorterPopulateDebug(pTask, "exit");
+ return rc;
+}
+
+#if SQLITE_MAX_WORKER_THREADS>0
+/*
+** The main routine for background threads that populate aFile[1] of
+** multi-threaded IncrMerger objects.
+*/
+static void *vdbeIncrPopulateThread(void *pCtx){
+ IncrMerger *pIncr = (IncrMerger*)pCtx;
+ void *pRet = SQLITE_INT_TO_PTR( vdbeIncrPopulate(pIncr) );
+ pIncr->pTask->bDone = 1;
+ return pRet;
+}
+
+/*
+** Launch a background thread to populate aFile[1] of pIncr.
+*/
+static int vdbeIncrBgPopulate(IncrMerger *pIncr){
+ void *p = (void*)pIncr;
+ assert( pIncr->bUseThread );
+ return vdbeSorterCreateThread(pIncr->pTask, vdbeIncrPopulateThread, p);
+}
+#endif
+
+/*
+** This function is called when the PmaReader corresponding to pIncr has
+** finished reading the contents of aFile[0]. Its purpose is to "refill"
+** aFile[0] such that the PmaReader should start rereading it from the
+** beginning.
+**
+** For single-threaded objects, this is accomplished by literally reading
+** keys from pIncr->pMerger and repopulating aFile[0].
+**
+** For multi-threaded objects, all that is required is to wait until the
+** background thread is finished (if it is not already) and then swap
+** aFile[0] and aFile[1] in place. If the contents of pMerger have not
+** been exhausted, this function also launches a new background thread
+** to populate the new aFile[1].
+**
+** SQLITE_OK is returned on success, or an SQLite error code otherwise.
+*/
+static int vdbeIncrSwap(IncrMerger *pIncr){
+ int rc = SQLITE_OK;
+
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pIncr->bUseThread ){
+ rc = vdbeSorterJoinThread(pIncr->pTask);
+
+ if( rc==SQLITE_OK ){
+ SorterFile f0 = pIncr->aFile[0];
+ pIncr->aFile[0] = pIncr->aFile[1];
+ pIncr->aFile[1] = f0;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( pIncr->aFile[0].iEof==pIncr->iStartOff ){
+ pIncr->bEof = 1;
+ }else{
+ rc = vdbeIncrBgPopulate(pIncr);
+ }
+ }
+ }else
+#endif
+ {
+ rc = vdbeIncrPopulate(pIncr);
+ pIncr->aFile[0] = pIncr->aFile[1];
+ if( pIncr->aFile[0].iEof==pIncr->iStartOff ){
+ pIncr->bEof = 1;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Allocate and return a new IncrMerger object to read data from pMerger.
+**
+** If an OOM condition is encountered, return NULL. In this case free the
+** pMerger argument before returning.
+*/
+static int vdbeIncrMergerNew(
+ SortSubtask *pTask, /* The thread that will be using the new IncrMerger */
+ MergeEngine *pMerger, /* The MergeEngine that the IncrMerger will control */
+ IncrMerger **ppOut /* Write the new IncrMerger here */
+){
+ int rc = SQLITE_OK;
+ IncrMerger *pIncr = *ppOut = (IncrMerger*)
+ (sqlite3FaultSim(100) ? 0 : sqlite3MallocZero(sizeof(*pIncr)));
+ if( pIncr ){
+ pIncr->pMerger = pMerger;
+ pIncr->pTask = pTask;
+ pIncr->mxSz = MAX(pTask->pSorter->mxKeysize+9,pTask->pSorter->mxPmaSize/2);
+ pTask->file2.iEof += pIncr->mxSz;
+ }else{
+ vdbeMergeEngineFree(pMerger);
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ return rc;
+}
+
+#if SQLITE_MAX_WORKER_THREADS>0
+/*
+** Set the "use-threads" flag on object pIncr.
+*/
+static void vdbeIncrMergerSetThreads(IncrMerger *pIncr){
+ pIncr->bUseThread = 1;
+ pIncr->pTask->file2.iEof -= pIncr->mxSz;
+}
+#endif /* SQLITE_MAX_WORKER_THREADS>0 */
+
+
+
+/*
+** Recompute pMerger->aTree[iOut] by comparing the next keys on the
+** two PmaReaders that feed that entry. Neither of the PmaReaders
+** are advanced. This routine merely does the comparison.
+*/
+static void vdbeMergeEngineCompare(
+ MergeEngine *pMerger, /* Merge engine containing PmaReaders to compare */
+ int iOut /* Store the result in pMerger->aTree[iOut] */
+){
+ int i1;
+ int i2;
+ int iRes;
+ PmaReader *p1;
+ PmaReader *p2;
+
+ assert( iOut<pMerger->nTree && iOut>0 );
+
+ if( iOut>=(pMerger->nTree/2) ){
+ i1 = (iOut - pMerger->nTree/2) * 2;
+ i2 = i1 + 1;
+ }else{
+ i1 = pMerger->aTree[iOut*2];
+ i2 = pMerger->aTree[iOut*2+1];
+ }
+
+ p1 = &pMerger->aReadr[i1];
+ p2 = &pMerger->aReadr[i2];
+
+ if( p1->pFd==0 ){
+ iRes = i2;
+ }else if( p2->pFd==0 ){
+ iRes = i1;
+ }else{
+ SortSubtask *pTask = pMerger->pTask;
+ int bCached = 0;
+ int res;
+ assert( pTask->pUnpacked!=0 ); /* from vdbeSortSubtaskMain() */
+ res = pTask->xCompare(
+ pTask, &bCached, p1->aKey, p1->nKey, p2->aKey, p2->nKey
+ );
+ if( res<=0 ){
+ iRes = i1;
+ }else{
+ iRes = i2;
+ }
+ }
+
+ pMerger->aTree[iOut] = iRes;
+}
+
+/*
+** Allowed values for the eMode parameter to vdbeMergeEngineInit()
+** and vdbePmaReaderIncrMergeInit().
+**
+** Only INCRINIT_NORMAL is valid in single-threaded builds (when
+** SQLITE_MAX_WORKER_THREADS==0). The other values are only used
+** when there exists one or more separate worker threads.
+*/
+#define INCRINIT_NORMAL 0
+#define INCRINIT_TASK 1
+#define INCRINIT_ROOT 2
+
+/*
+** Forward reference required as the vdbeIncrMergeInit() and
+** vdbePmaReaderIncrInit() routines are called mutually recursively when
+** building a merge tree.
+*/
+static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode);
+
+/*
+** Initialize the MergeEngine object passed as the second argument. Once this
+** function returns, the first key of merged data may be read from the
+** MergeEngine object in the usual fashion.
+**
+** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge
+** objects attached to the PmaReader objects that the merger reads from have
+** already been populated, but that they have not yet populated aFile[0] and
+** set the PmaReader objects up to read from it. In this case all that is
+** required is to call vdbePmaReaderNext() on each PmaReader to point it at
+** its first key.
+**
+** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use
+** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data
+** to pMerger.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+static int vdbeMergeEngineInit(
+ SortSubtask *pTask, /* Thread that will run pMerger */
+ MergeEngine *pMerger, /* MergeEngine to initialize */
+ int eMode /* One of the INCRINIT_XXX constants */
+){
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* For looping over PmaReader objects */
+ int nTree; /* Number of subtrees to merge */
+
+ /* Failure to allocate the merge would have been detected prior to
+ ** invoking this routine */
+ assert( pMerger!=0 );
+
+ /* eMode is always INCRINIT_NORMAL in single-threaded mode */
+ assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );
+
+ /* Verify that the MergeEngine is assigned to a single thread */
+ assert( pMerger->pTask==0 );
+ pMerger->pTask = pTask;
+
+ nTree = pMerger->nTree;
+ for(i=0; i<nTree; i++){
+ if( SQLITE_MAX_WORKER_THREADS>0 && eMode==INCRINIT_ROOT ){
+ /* PmaReaders should be normally initialized in order, as if they are
+ ** reading from the same temp file this makes for more linear file IO.
+ ** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is
+ ** in use it will block the vdbePmaReaderNext() call while it uses
+ ** the main thread to fill its buffer. So calling PmaReaderNext()
+ ** on this PmaReader before any of the multi-threaded PmaReaders takes
+ ** better advantage of multi-processor hardware. */
+ rc = vdbePmaReaderNext(&pMerger->aReadr[nTree-i-1]);
+ }else{
+ rc = vdbePmaReaderIncrInit(&pMerger->aReadr[i], INCRINIT_NORMAL);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+ }
+
+ for(i=pMerger->nTree-1; i>0; i--){
+ vdbeMergeEngineCompare(pMerger, i);
+ }
+ return pTask->pUnpacked->errCode;
+}
+
+/*
+** The PmaReader passed as the first argument is guaranteed to be an
+** incremental-reader (pReadr->pIncr!=0). This function serves to open
+** and/or initialize the temp file related fields of the IncrMerge
+** object at (pReadr->pIncr).
+**
+** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders
+** in the sub-tree headed by pReadr are also initialized. Data is then
+** loaded into the buffers belonging to pReadr and it is set to point to
+** the first key in its range.
+**
+** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed
+** to be a multi-threaded PmaReader and this function is being called in a
+** background thread. In this case all PmaReaders in the sub-tree are
+** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to
+** pReadr is populated. However, pReadr itself is not set up to point
+** to its first key. A call to vdbePmaReaderNext() is still required to do
+** that.
+**
+** The reason this function does not call vdbePmaReaderNext() immediately
+** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has
+** to block on thread (pTask->thread) before accessing aFile[1]. But, since
+** this entire function is being run by thread (pTask->thread), that will
+** lead to the current background thread attempting to join itself.
+**
+** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed
+** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all
+** child-trees have already been initialized using IncrInit(INCRINIT_TASK).
+** In this case vdbePmaReaderNext() is called on all child PmaReaders and
+** the current PmaReader set to point to the first key in its range.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){
+ int rc = SQLITE_OK;
+ IncrMerger *pIncr = pReadr->pIncr;
+ SortSubtask *pTask = pIncr->pTask;
+ sqlite3 *db = pTask->pSorter->db;
+
+ /* eMode is always INCRINIT_NORMAL in single-threaded mode */
+ assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );
+
+ rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode);
+
+ /* Set up the required files for pIncr. A multi-theaded IncrMerge object
+ ** requires two temp files to itself, whereas a single-threaded object
+ ** only requires a region of pTask->file2. */
+ if( rc==SQLITE_OK ){
+ int mxSz = pIncr->mxSz;
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pIncr->bUseThread ){
+ rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[0].pFd);
+ if( rc==SQLITE_OK ){
+ rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[1].pFd);
+ }
+ }else
+#endif
+ /*if( !pIncr->bUseThread )*/{
+ if( pTask->file2.pFd==0 ){
+ assert( pTask->file2.iEof>0 );
+ rc = vdbeSorterOpenTempFile(db, pTask->file2.iEof, &pTask->file2.pFd);
+ pTask->file2.iEof = 0;
+ }
+ if( rc==SQLITE_OK ){
+ pIncr->aFile[1].pFd = pTask->file2.pFd;
+ pIncr->iStartOff = pTask->file2.iEof;
+ pTask->file2.iEof += mxSz;
+ }
+ }
+ }
+
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( rc==SQLITE_OK && pIncr->bUseThread ){
+ /* Use the current thread to populate aFile[1], even though this
+ ** PmaReader is multi-threaded. If this is an INCRINIT_TASK object,
+ ** then this function is already running in background thread
+ ** pIncr->pTask->thread.
+ **
+ ** If this is the INCRINIT_ROOT object, then it is running in the
+ ** main VDBE thread. But that is Ok, as that thread cannot return
+ ** control to the VDBE or proceed with anything useful until the
+ ** first results are ready from this merger object anyway.
+ */
+ assert( eMode==INCRINIT_ROOT || eMode==INCRINIT_TASK );
+ rc = vdbeIncrPopulate(pIncr);
+ }
+#endif
+
+ if( rc==SQLITE_OK && (SQLITE_MAX_WORKER_THREADS==0 || eMode!=INCRINIT_TASK) ){
+ rc = vdbePmaReaderNext(pReadr);
+ }
+
+ return rc;
+}
+
+#if SQLITE_MAX_WORKER_THREADS>0
+/*
+** The main routine for vdbePmaReaderIncrMergeInit() operations run in
+** background threads.
+*/
+static void *vdbePmaReaderBgIncrInit(void *pCtx){
+ PmaReader *pReader = (PmaReader*)pCtx;
+ void *pRet = SQLITE_INT_TO_PTR(
+ vdbePmaReaderIncrMergeInit(pReader,INCRINIT_TASK)
+ );
+ pReader->pIncr->pTask->bDone = 1;
+ return pRet;
+}
+#endif
+
+/*
+** If the PmaReader passed as the first argument is not an incremental-reader
+** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it invokes
+** the vdbePmaReaderIncrMergeInit() function with the parameters passed to
+** this routine to initialize the incremental merge.
+**
+** If the IncrMerger object is multi-threaded (IncrMerger.bUseThread==1),
+** then a background thread is launched to call vdbePmaReaderIncrMergeInit().
+** Or, if the IncrMerger is single threaded, the same function is called
+** using the current thread.
+*/
+static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode){
+ IncrMerger *pIncr = pReadr->pIncr; /* Incremental merger */
+ int rc = SQLITE_OK; /* Return code */
+ if( pIncr ){
+#if SQLITE_MAX_WORKER_THREADS>0
+ assert( pIncr->bUseThread==0 || eMode==INCRINIT_TASK );
+ if( pIncr->bUseThread ){
+ void *pCtx = (void*)pReadr;
+ rc = vdbeSorterCreateThread(pIncr->pTask, vdbePmaReaderBgIncrInit, pCtx);
+ }else
+#endif
+ {
+ rc = vdbePmaReaderIncrMergeInit(pReadr, eMode);
+ }
+ }
+ return rc;
+}
+
+/*
+** Allocate a new MergeEngine object to merge the contents of nPMA level-0
+** PMAs from pTask->file. If no error occurs, set *ppOut to point to
+** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut
+** to NULL and return an SQLite error code.
+**
+** When this function is called, *piOffset is set to the offset of the
+** first PMA to read from pTask->file. Assuming no error occurs, it is
+** set to the offset immediately following the last byte of the last
+** PMA before returning. If an error does occur, then the final value of
+** *piOffset is undefined.
+*/
+static int vdbeMergeEngineLevel0(
+ SortSubtask *pTask, /* Sorter task to read from */
+ int nPMA, /* Number of PMAs to read */
+ i64 *piOffset, /* IN/OUT: Readr offset in pTask->file */
+ MergeEngine **ppOut /* OUT: New merge-engine */
+){
+ MergeEngine *pNew; /* Merge engine to return */
+ i64 iOff = *piOffset;
+ int i;
+ int rc = SQLITE_OK;
+
+ *ppOut = pNew = vdbeMergeEngineNew(nPMA);
+ if( pNew==0 ) rc = SQLITE_NOMEM_BKPT;
+
+ for(i=0; i<nPMA && rc==SQLITE_OK; i++){
+ i64 nDummy = 0;
+ PmaReader *pReadr = &pNew->aReadr[i];
+ rc = vdbePmaReaderInit(pTask, &pTask->file, iOff, pReadr, &nDummy);
+ iOff = pReadr->iEof;
+ }
+
+ if( rc!=SQLITE_OK ){
+ vdbeMergeEngineFree(pNew);
+ *ppOut = 0;
+ }
+ *piOffset = iOff;
+ return rc;
+}
+
+/*
+** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of
+** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes.
+**
+** i.e.
+**
+** nPMA<=16 -> TreeDepth() == 0
+** nPMA<=256 -> TreeDepth() == 1
+** nPMA<=65536 -> TreeDepth() == 2
+*/
+static int vdbeSorterTreeDepth(int nPMA){
+ int nDepth = 0;
+ i64 nDiv = SORTER_MAX_MERGE_COUNT;
+ while( nDiv < (i64)nPMA ){
+ nDiv = nDiv * SORTER_MAX_MERGE_COUNT;
+ nDepth++;
+ }
+ return nDepth;
+}
+
+/*
+** pRoot is the root of an incremental merge-tree with depth nDepth (according
+** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the
+** tree, counting from zero. This function adds pLeaf to the tree.
+**
+** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error
+** code is returned and pLeaf is freed.
+*/
+static int vdbeSorterAddToTree(
+ SortSubtask *pTask, /* Task context */
+ int nDepth, /* Depth of tree according to TreeDepth() */
+ int iSeq, /* Sequence number of leaf within tree */
+ MergeEngine *pRoot, /* Root of tree */
+ MergeEngine *pLeaf /* Leaf to add to tree */
+){
+ int rc = SQLITE_OK;
+ int nDiv = 1;
+ int i;
+ MergeEngine *p = pRoot;
+ IncrMerger *pIncr;
+
+ rc = vdbeIncrMergerNew(pTask, pLeaf, &pIncr);
+
+ for(i=1; i<nDepth; i++){
+ nDiv = nDiv * SORTER_MAX_MERGE_COUNT;
+ }
+
+ for(i=1; i<nDepth && rc==SQLITE_OK; i++){
+ int iIter = (iSeq / nDiv) % SORTER_MAX_MERGE_COUNT;
+ PmaReader *pReadr = &p->aReadr[iIter];
+
+ if( pReadr->pIncr==0 ){
+ MergeEngine *pNew = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT);
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ rc = vdbeIncrMergerNew(pTask, pNew, &pReadr->pIncr);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ p = pReadr->pIncr->pMerger;
+ nDiv = nDiv / SORTER_MAX_MERGE_COUNT;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ p->aReadr[iSeq % SORTER_MAX_MERGE_COUNT].pIncr = pIncr;
+ }else{
+ vdbeIncrFree(pIncr);
+ }
+ return rc;
+}
+
+/*
+** This function is called as part of a SorterRewind() operation on a sorter
+** that has already written two or more level-0 PMAs to one or more temp
+** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that
+** can be used to incrementally merge all PMAs on disk.
+**
+** If successful, SQLITE_OK is returned and *ppOut set to point to the
+** MergeEngine object at the root of the tree before returning. Or, if an
+** error occurs, an SQLite error code is returned and the final value
+** of *ppOut is undefined.
+*/
+static int vdbeSorterMergeTreeBuild(
+ VdbeSorter *pSorter, /* The VDBE cursor that implements the sort */
+ MergeEngine **ppOut /* Write the MergeEngine here */
+){
+ MergeEngine *pMain = 0;
+ int rc = SQLITE_OK;
+ int iTask;
+
+#if SQLITE_MAX_WORKER_THREADS>0
+ /* If the sorter uses more than one task, then create the top-level
+ ** MergeEngine here. This MergeEngine will read data from exactly
+ ** one PmaReader per sub-task. */
+ assert( pSorter->bUseThreads || pSorter->nTask==1 );
+ if( pSorter->nTask>1 ){
+ pMain = vdbeMergeEngineNew(pSorter->nTask);
+ if( pMain==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+#endif
+
+ for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){
+ SortSubtask *pTask = &pSorter->aTask[iTask];
+ assert( pTask->nPMA>0 || SQLITE_MAX_WORKER_THREADS>0 );
+ if( SQLITE_MAX_WORKER_THREADS==0 || pTask->nPMA ){
+ MergeEngine *pRoot = 0; /* Root node of tree for this task */
+ int nDepth = vdbeSorterTreeDepth(pTask->nPMA);
+ i64 iReadOff = 0;
+
+ if( pTask->nPMA<=SORTER_MAX_MERGE_COUNT ){
+ rc = vdbeMergeEngineLevel0(pTask, pTask->nPMA, &iReadOff, &pRoot);
+ }else{
+ int i;
+ int iSeq = 0;
+ pRoot = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT);
+ if( pRoot==0 ) rc = SQLITE_NOMEM_BKPT;
+ for(i=0; i<pTask->nPMA && rc==SQLITE_OK; i += SORTER_MAX_MERGE_COUNT){
+ MergeEngine *pMerger = 0; /* New level-0 PMA merger */
+ int nReader; /* Number of level-0 PMAs to merge */
+
+ nReader = MIN(pTask->nPMA - i, SORTER_MAX_MERGE_COUNT);
+ rc = vdbeMergeEngineLevel0(pTask, nReader, &iReadOff, &pMerger);
+ if( rc==SQLITE_OK ){
+ rc = vdbeSorterAddToTree(pTask, nDepth, iSeq++, pRoot, pMerger);
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pMain!=0 ){
+ rc = vdbeIncrMergerNew(pTask, pRoot, &pMain->aReadr[iTask].pIncr);
+ }else
+#endif
+ {
+ assert( pMain==0 );
+ pMain = pRoot;
+ }
+ }else{
+ vdbeMergeEngineFree(pRoot);
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ vdbeMergeEngineFree(pMain);
+ pMain = 0;
+ }
+ *ppOut = pMain;
+ return rc;
+}
+
+/*
+** This function is called as part of an sqlite3VdbeSorterRewind() operation
+** on a sorter that has written two or more PMAs to temporary files. It sets
+** up either VdbeSorter.pMerger (for single threaded sorters) or pReader
+** (for multi-threaded sorters) so that it can be used to iterate through
+** all records stored in the sorter.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+static int vdbeSorterSetupMerge(VdbeSorter *pSorter){
+ int rc; /* Return code */
+ SortSubtask *pTask0 = &pSorter->aTask[0];
+ MergeEngine *pMain = 0;
+#if SQLITE_MAX_WORKER_THREADS
+ sqlite3 *db = pTask0->pSorter->db;
+ int i;
+ SorterCompare xCompare = vdbeSorterGetCompare(pSorter);
+ for(i=0; i<pSorter->nTask; i++){
+ pSorter->aTask[i].xCompare = xCompare;
+ }
+#endif
+
+ rc = vdbeSorterMergeTreeBuild(pSorter, &pMain);
+ if( rc==SQLITE_OK ){
+#if SQLITE_MAX_WORKER_THREADS
+ assert( pSorter->bUseThreads==0 || pSorter->nTask>1 );
+ if( pSorter->bUseThreads ){
+ int iTask;
+ PmaReader *pReadr = 0;
+ SortSubtask *pLast = &pSorter->aTask[pSorter->nTask-1];
+ rc = vdbeSortAllocUnpacked(pLast);
+ if( rc==SQLITE_OK ){
+ pReadr = (PmaReader*)sqlite3DbMallocZero(db, sizeof(PmaReader));
+ pSorter->pReader = pReadr;
+ if( pReadr==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+ if( rc==SQLITE_OK ){
+ rc = vdbeIncrMergerNew(pLast, pMain, &pReadr->pIncr);
+ if( rc==SQLITE_OK ){
+ vdbeIncrMergerSetThreads(pReadr->pIncr);
+ for(iTask=0; iTask<(pSorter->nTask-1); iTask++){
+ IncrMerger *pIncr;
+ if( (pIncr = pMain->aReadr[iTask].pIncr) ){
+ vdbeIncrMergerSetThreads(pIncr);
+ assert( pIncr->pTask!=pLast );
+ }
+ }
+ for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){
+ /* Check that:
+ **
+ ** a) The incremental merge object is configured to use the
+ ** right task, and
+ ** b) If it is using task (nTask-1), it is configured to run
+ ** in single-threaded mode. This is important, as the
+ ** root merge (INCRINIT_ROOT) will be using the same task
+ ** object.
+ */
+ PmaReader *p = &pMain->aReadr[iTask];
+ assert( p->pIncr==0 || (
+ (p->pIncr->pTask==&pSorter->aTask[iTask]) /* a */
+ && (iTask!=pSorter->nTask-1 || p->pIncr->bUseThread==0) /* b */
+ ));
+ rc = vdbePmaReaderIncrInit(p, INCRINIT_TASK);
+ }
+ }
+ pMain = 0;
+ }
+ if( rc==SQLITE_OK ){
+ rc = vdbePmaReaderIncrMergeInit(pReadr, INCRINIT_ROOT);
+ }
+ }else
+#endif
+ {
+ rc = vdbeMergeEngineInit(pTask0, pMain, INCRINIT_NORMAL);
+ pSorter->pMerger = pMain;
+ pMain = 0;
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ vdbeMergeEngineFree(pMain);
+ }
+ return rc;
+}
+
+
+/*
+** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite,
+** this function is called to prepare for iterating through the records
+** in sorted order.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *pCsr, int *pbEof){
+ VdbeSorter *pSorter;
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ assert( pSorter );
+
+ /* If no data has been written to disk, then do not do so now. Instead,
+ ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly
+ ** from the in-memory list. */
+ if( pSorter->bUsePMA==0 ){
+ if( pSorter->list.pList ){
+ *pbEof = 0;
+ rc = vdbeSorterSort(&pSorter->aTask[0], &pSorter->list);
+ }else{
+ *pbEof = 1;
+ }
+ return rc;
+ }
+
+ /* Write the current in-memory list to a PMA. When the VdbeSorterWrite()
+ ** function flushes the contents of memory to disk, it immediately always
+ ** creates a new list consisting of a single key immediately afterwards.
+ ** So the list is never empty at this point. */
+ assert( pSorter->list.pList );
+ rc = vdbeSorterFlushPMA(pSorter);
+
+ /* Join all threads */
+ rc = vdbeSorterJoinAll(pSorter, rc);
+
+ vdbeSorterRewindDebug("rewind");
+
+ /* Assuming no errors have occurred, set up a merger structure to
+ ** incrementally read and merge all remaining PMAs. */
+ assert( pSorter->pReader==0 );
+ if( rc==SQLITE_OK ){
+ rc = vdbeSorterSetupMerge(pSorter);
+ *pbEof = 0;
+ }
+
+ vdbeSorterRewindDebug("rewinddone");
+ return rc;
+}
+
+/*
+** Advance to the next element in the sorter. Return value:
+**
+** SQLITE_OK success
+** SQLITE_DONE end of data
+** otherwise some kind of error.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr){
+ VdbeSorter *pSorter;
+ int rc; /* Return code */
+
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ assert( pSorter->bUsePMA || (pSorter->pReader==0 && pSorter->pMerger==0) );
+ if( pSorter->bUsePMA ){
+ assert( pSorter->pReader==0 || pSorter->pMerger==0 );
+ assert( pSorter->bUseThreads==0 || pSorter->pReader );
+ assert( pSorter->bUseThreads==1 || pSorter->pMerger );
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pSorter->bUseThreads ){
+ rc = vdbePmaReaderNext(pSorter->pReader);
+ if( rc==SQLITE_OK && pSorter->pReader->pFd==0 ) rc = SQLITE_DONE;
+ }else
+#endif
+ /*if( !pSorter->bUseThreads )*/ {
+ int res = 0;
+ assert( pSorter->pMerger!=0 );
+ assert( pSorter->pMerger->pTask==(&pSorter->aTask[0]) );
+ rc = vdbeMergeEngineStep(pSorter->pMerger, &res);
+ if( rc==SQLITE_OK && res ) rc = SQLITE_DONE;
+ }
+ }else{
+ SorterRecord *pFree = pSorter->list.pList;
+ pSorter->list.pList = pFree->u.pNext;
+ pFree->u.pNext = 0;
+ if( pSorter->list.aMemory==0 ) vdbeSorterRecordFree(db, pFree);
+ rc = pSorter->list.pList ? SQLITE_OK : SQLITE_DONE;
+ }
+ return rc;
+}
+
+/*
+** Return a pointer to a buffer owned by the sorter that contains the
+** current key.
+*/
+static void *vdbeSorterRowkey(
+ const VdbeSorter *pSorter, /* Sorter object */
+ int *pnKey /* OUT: Size of current key in bytes */
+){
+ void *pKey;
+ if( pSorter->bUsePMA ){
+ PmaReader *pReader;
+#if SQLITE_MAX_WORKER_THREADS>0
+ if( pSorter->bUseThreads ){
+ pReader = pSorter->pReader;
+ }else
+#endif
+ /*if( !pSorter->bUseThreads )*/{
+ pReader = &pSorter->pMerger->aReadr[pSorter->pMerger->aTree[1]];
+ }
+ *pnKey = pReader->nKey;
+ pKey = pReader->aKey;
+ }else{
+ *pnKey = pSorter->list.pList->nVal;
+ pKey = SRVAL(pSorter->list.pList);
+ }
+ return pKey;
+}
+
+/*
+** Copy the current sorter key into the memory cell pOut.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){
+ VdbeSorter *pSorter;
+ void *pKey; int nKey; /* Sorter key to copy into pOut */
+
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ pKey = vdbeSorterRowkey(pSorter, &nKey);
+ if( sqlite3VdbeMemClearAndResize(pOut, nKey) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pOut->n = nKey;
+ MemSetTypeFlag(pOut, MEM_Blob);
+ memcpy(pOut->z, pKey, nKey);
+
+ return SQLITE_OK;
+}
+
+/*
+** Compare the key in memory cell pVal with the key that the sorter cursor
+** passed as the first argument currently points to. For the purposes of
+** the comparison, ignore the rowid field at the end of each record.
+**
+** If the sorter cursor key contains any NULL values, consider it to be
+** less than pVal. Even if pVal also contains NULL values.
+**
+** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM).
+** Otherwise, set *pRes to a negative, zero or positive value if the
+** key in pVal is smaller than, equal to or larger than the current sorter
+** key.
+**
+** This routine forms the core of the OP_SorterCompare opcode, which in
+** turn is used to verify uniqueness when constructing a UNIQUE INDEX.
+*/
+SQLITE_PRIVATE int sqlite3VdbeSorterCompare(
+ const VdbeCursor *pCsr, /* Sorter cursor */
+ Mem *pVal, /* Value to compare to current sorter key */
+ int nKeyCol, /* Compare this many columns */
+ int *pRes /* OUT: Result of comparison */
+){
+ VdbeSorter *pSorter;
+ UnpackedRecord *r2;
+ KeyInfo *pKeyInfo;
+ int i;
+ void *pKey; int nKey; /* Sorter key to compare pVal with */
+
+ assert( pCsr->eCurType==CURTYPE_SORTER );
+ pSorter = pCsr->uc.pSorter;
+ r2 = pSorter->pUnpacked;
+ pKeyInfo = pCsr->pKeyInfo;
+ if( r2==0 ){
+ r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
+ if( r2==0 ) return SQLITE_NOMEM_BKPT;
+ r2->nField = nKeyCol;
+ }
+ assert( r2->nField==nKeyCol );
+
+ pKey = vdbeSorterRowkey(pSorter, &nKey);
+ sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2);
+ for(i=0; i<nKeyCol; i++){
+ if( r2->aMem[i].flags & MEM_Null ){
+ *pRes = -1;
+ return SQLITE_OK;
+ }
+ }
+
+ *pRes = sqlite3VdbeRecordCompare(pVal->n, pVal->z, r2);
+ return SQLITE_OK;
+}
+
+/************** End of vdbesort.c ********************************************/
+/************** Begin file vdbevtab.c ****************************************/
+/*
+** 2020-03-23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file implements virtual-tables for examining the bytecode content
+** of a prepared statement.
+*/
+/* #include "sqliteInt.h" */
+#if defined(SQLITE_ENABLE_BYTECODE_VTAB) && !defined(SQLITE_OMIT_VIRTUALTABLE)
+/* #include "vdbeInt.h" */
+
+/* An instance of the bytecode() table-valued function.
+*/
+typedef struct bytecodevtab bytecodevtab;
+struct bytecodevtab {
+ sqlite3_vtab base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection */
+ int bTablesUsed; /* 2 for tables_used(). 0 for bytecode(). */
+};
+
+/* A cursor for scanning through the bytecode
+*/
+typedef struct bytecodevtab_cursor bytecodevtab_cursor;
+struct bytecodevtab_cursor {
+ sqlite3_vtab_cursor base; /* Base class - must be first */
+ sqlite3_stmt *pStmt; /* The statement whose bytecode is displayed */
+ int iRowid; /* The rowid of the output table */
+ int iAddr; /* Address */
+ int needFinalize; /* Cursors owns pStmt and must finalize it */
+ int showSubprograms; /* Provide a listing of subprograms */
+ Op *aOp; /* Operand array */
+ char *zP4; /* Rendered P4 value */
+ const char *zType; /* tables_used.type */
+ const char *zSchema; /* tables_used.schema */
+ const char *zName; /* tables_used.name */
+ Mem sub; /* Subprograms */
+};
+
+/*
+** Create a new bytecode() table-valued function.
+*/
+static int bytecodevtabConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ bytecodevtab *pNew;
+ int rc;
+ int isTabUsed = pAux!=0;
+ const char *azSchema[2] = {
+ /* bytecode() schema */
+ "CREATE TABLE x("
+ "addr INT,"
+ "opcode TEXT,"
+ "p1 INT,"
+ "p2 INT,"
+ "p3 INT,"
+ "p4 TEXT,"
+ "p5 INT,"
+ "comment TEXT,"
+ "subprog TEXT,"
+ "stmt HIDDEN"
+ ");",
+
+ /* Tables_used() schema */
+ "CREATE TABLE x("
+ "type TEXT,"
+ "schema TEXT,"
+ "name TEXT,"
+ "wr INT,"
+ "subprog TEXT,"
+ "stmt HIDDEN"
+ ");"
+ };
+
+ rc = sqlite3_declare_vtab(db, azSchema[isTabUsed]);
+ if( rc==SQLITE_OK ){
+ pNew = sqlite3_malloc( sizeof(*pNew) );
+ *ppVtab = (sqlite3_vtab*)pNew;
+ if( pNew==0 ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(*pNew));
+ pNew->db = db;
+ pNew->bTablesUsed = isTabUsed*2;
+ }
+ return rc;
+}
+
+/*
+** This method is the destructor for bytecodevtab objects.
+*/
+static int bytecodevtabDisconnect(sqlite3_vtab *pVtab){
+ bytecodevtab *p = (bytecodevtab*)pVtab;
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Constructor for a new bytecodevtab_cursor object.
+*/
+static int bytecodevtabOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ bytecodevtab *pVTab = (bytecodevtab*)p;
+ bytecodevtab_cursor *pCur;
+ pCur = sqlite3_malloc( sizeof(*pCur) );
+ if( pCur==0 ) return SQLITE_NOMEM;
+ memset(pCur, 0, sizeof(*pCur));
+ sqlite3VdbeMemInit(&pCur->sub, pVTab->db, 1);
+ *ppCursor = &pCur->base;
+ return SQLITE_OK;
+}
+
+/*
+** Clear all internal content from a bytecodevtab cursor.
+*/
+static void bytecodevtabCursorClear(bytecodevtab_cursor *pCur){
+ sqlite3_free(pCur->zP4);
+ pCur->zP4 = 0;
+ sqlite3VdbeMemRelease(&pCur->sub);
+ sqlite3VdbeMemSetNull(&pCur->sub);
+ if( pCur->needFinalize ){
+ sqlite3_finalize(pCur->pStmt);
+ }
+ pCur->pStmt = 0;
+ pCur->needFinalize = 0;
+ pCur->zType = 0;
+ pCur->zSchema = 0;
+ pCur->zName = 0;
+}
+
+/*
+** Destructor for a bytecodevtab_cursor.
+*/
+static int bytecodevtabClose(sqlite3_vtab_cursor *cur){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor*)cur;
+ bytecodevtabCursorClear(pCur);
+ sqlite3_free(pCur);
+ return SQLITE_OK;
+}
+
+
+/*
+** Advance a bytecodevtab_cursor to its next row of output.
+*/
+static int bytecodevtabNext(sqlite3_vtab_cursor *cur){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor*)cur;
+ bytecodevtab *pTab = (bytecodevtab*)cur->pVtab;
+ int rc;
+ if( pCur->zP4 ){
+ sqlite3_free(pCur->zP4);
+ pCur->zP4 = 0;
+ }
+ if( pCur->zName ){
+ pCur->zName = 0;
+ pCur->zType = 0;
+ pCur->zSchema = 0;
+ }
+ rc = sqlite3VdbeNextOpcode(
+ (Vdbe*)pCur->pStmt,
+ pCur->showSubprograms ? &pCur->sub : 0,
+ pTab->bTablesUsed,
+ &pCur->iRowid,
+ &pCur->iAddr,
+ &pCur->aOp);
+ if( rc!=SQLITE_OK ){
+ sqlite3VdbeMemSetNull(&pCur->sub);
+ pCur->aOp = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the cursor has been moved off of the last
+** row of output.
+*/
+static int bytecodevtabEof(sqlite3_vtab_cursor *cur){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor*)cur;
+ return pCur->aOp==0;
+}
+
+/*
+** Return values of columns for the row at which the bytecodevtab_cursor
+** is currently pointing.
+*/
+static int bytecodevtabColumn(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor*)cur;
+ bytecodevtab *pVTab = (bytecodevtab*)cur->pVtab;
+ Op *pOp = pCur->aOp + pCur->iAddr;
+ if( pVTab->bTablesUsed ){
+ if( i==4 ){
+ i = 8;
+ }else{
+ if( i<=2 && pCur->zType==0 ){
+ Schema *pSchema;
+ HashElem *k;
+ int iDb = pOp->p3;
+ Pgno iRoot = (Pgno)pOp->p2;
+ sqlite3 *db = pVTab->db;
+ pSchema = db->aDb[iDb].pSchema;
+ pCur->zSchema = db->aDb[iDb].zDbSName;
+ for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){
+ Table *pTab = (Table*)sqliteHashData(k);
+ if( !IsVirtual(pTab) && pTab->tnum==iRoot ){
+ pCur->zName = pTab->zName;
+ pCur->zType = "table";
+ break;
+ }
+ }
+ if( pCur->zName==0 ){
+ for(k=sqliteHashFirst(&pSchema->idxHash); k; k=sqliteHashNext(k)){
+ Index *pIdx = (Index*)sqliteHashData(k);
+ if( pIdx->tnum==iRoot ){
+ pCur->zName = pIdx->zName;
+ pCur->zType = "index";
+ }
+ }
+ }
+ }
+ i += 10;
+ }
+ }
+ switch( i ){
+ case 0: /* addr */
+ sqlite3_result_int(ctx, pCur->iAddr);
+ break;
+ case 1: /* opcode */
+ sqlite3_result_text(ctx, (char*)sqlite3OpcodeName(pOp->opcode),
+ -1, SQLITE_STATIC);
+ break;
+ case 2: /* p1 */
+ sqlite3_result_int(ctx, pOp->p1);
+ break;
+ case 3: /* p2 */
+ sqlite3_result_int(ctx, pOp->p2);
+ break;
+ case 4: /* p3 */
+ sqlite3_result_int(ctx, pOp->p3);
+ break;
+ case 5: /* p4 */
+ case 7: /* comment */
+ if( pCur->zP4==0 ){
+ pCur->zP4 = sqlite3VdbeDisplayP4(pVTab->db, pOp);
+ }
+ if( i==5 ){
+ sqlite3_result_text(ctx, pCur->zP4, -1, SQLITE_STATIC);
+ }else{
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ char *zCom = sqlite3VdbeDisplayComment(pVTab->db, pOp, pCur->zP4);
+ sqlite3_result_text(ctx, zCom, -1, sqlite3_free);
+#endif
+ }
+ break;
+ case 6: /* p5 */
+ sqlite3_result_int(ctx, pOp->p5);
+ break;
+ case 8: { /* subprog */
+ Op *aOp = pCur->aOp;
+ assert( aOp[0].opcode==OP_Init );
+ assert( aOp[0].p4.z==0 || strncmp(aOp[0].p4.z,"-" "- ",3)==0 );
+ if( pCur->iRowid==pCur->iAddr+1 ){
+ break; /* Result is NULL for the main program */
+ }else if( aOp[0].p4.z!=0 ){
+ sqlite3_result_text(ctx, aOp[0].p4.z+3, -1, SQLITE_STATIC);
+ }else{
+ sqlite3_result_text(ctx, "(FK)", 4, SQLITE_STATIC);
+ }
+ break;
+ }
+ case 10: /* tables_used.type */
+ sqlite3_result_text(ctx, pCur->zType, -1, SQLITE_STATIC);
+ break;
+ case 11: /* tables_used.schema */
+ sqlite3_result_text(ctx, pCur->zSchema, -1, SQLITE_STATIC);
+ break;
+ case 12: /* tables_used.name */
+ sqlite3_result_text(ctx, pCur->zName, -1, SQLITE_STATIC);
+ break;
+ case 13: /* tables_used.wr */
+ sqlite3_result_int(ctx, pOp->opcode==OP_OpenWrite);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return the rowid for the current row. In this implementation, the
+** rowid is the same as the output value.
+*/
+static int bytecodevtabRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor*)cur;
+ *pRowid = pCur->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Initialize a cursor.
+**
+** idxNum==0 means show all subprograms
+** idxNum==1 means show only the main bytecode and omit subprograms.
+*/
+static int bytecodevtabFilter(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ bytecodevtab_cursor *pCur = (bytecodevtab_cursor *)pVtabCursor;
+ bytecodevtab *pVTab = (bytecodevtab *)pVtabCursor->pVtab;
+ int rc = SQLITE_OK;
+
+ bytecodevtabCursorClear(pCur);
+ pCur->iRowid = 0;
+ pCur->iAddr = 0;
+ pCur->showSubprograms = idxNum==0;
+ assert( argc==1 );
+ if( sqlite3_value_type(argv[0])==SQLITE_TEXT ){
+ const char *zSql = (const char*)sqlite3_value_text(argv[0]);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v2(pVTab->db, zSql, -1, &pCur->pStmt, 0);
+ pCur->needFinalize = 1;
+ }
+ }else{
+ pCur->pStmt = (sqlite3_stmt*)sqlite3_value_pointer(argv[0],"stmt-pointer");
+ }
+ if( pCur->pStmt==0 ){
+ pVTab->base.zErrMsg = sqlite3_mprintf(
+ "argument to %s() is not a valid SQL statement",
+ pVTab->bTablesUsed ? "tables_used" : "bytecode"
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ bytecodevtabNext(pVtabCursor);
+ }
+ return rc;
+}
+
+/*
+** We must have a single stmt=? constraint that will be passed through
+** into the xFilter method. If there is no valid stmt=? constraint,
+** then return an SQLITE_CONSTRAINT error.
+*/
+static int bytecodevtabBestIndex(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ int i;
+ int rc = SQLITE_CONSTRAINT;
+ struct sqlite3_index_constraint *p;
+ bytecodevtab *pVTab = (bytecodevtab*)tab;
+ int iBaseCol = pVTab->bTablesUsed ? 4 : 8;
+ pIdxInfo->estimatedCost = (double)100;
+ pIdxInfo->estimatedRows = 100;
+ pIdxInfo->idxNum = 0;
+ for(i=0, p=pIdxInfo->aConstraint; i<pIdxInfo->nConstraint; i++, p++){
+ if( p->usable==0 ) continue;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==iBaseCol+1 ){
+ rc = SQLITE_OK;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ pIdxInfo->aConstraintUsage[i].argvIndex = 1;
+ }
+ if( p->op==SQLITE_INDEX_CONSTRAINT_ISNULL && p->iColumn==iBaseCol ){
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ pIdxInfo->idxNum = 1;
+ }
+ }
+ return rc;
+}
+
+/*
+** This following structure defines all the methods for the
+** virtual table.
+*/
+static sqlite3_module bytecodevtabModule = {
+ /* iVersion */ 0,
+ /* xCreate */ 0,
+ /* xConnect */ bytecodevtabConnect,
+ /* xBestIndex */ bytecodevtabBestIndex,
+ /* xDisconnect */ bytecodevtabDisconnect,
+ /* xDestroy */ 0,
+ /* xOpen */ bytecodevtabOpen,
+ /* xClose */ bytecodevtabClose,
+ /* xFilter */ bytecodevtabFilter,
+ /* xNext */ bytecodevtabNext,
+ /* xEof */ bytecodevtabEof,
+ /* xColumn */ bytecodevtabColumn,
+ /* xRowid */ bytecodevtabRowid,
+ /* xUpdate */ 0,
+ /* xBegin */ 0,
+ /* xSync */ 0,
+ /* xCommit */ 0,
+ /* xRollback */ 0,
+ /* xFindMethod */ 0,
+ /* xRename */ 0,
+ /* xSavepoint */ 0,
+ /* xRelease */ 0,
+ /* xRollbackTo */ 0,
+ /* xShadowName */ 0
+};
+
+
+SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3 *db){
+ int rc;
+ rc = sqlite3_create_module(db, "bytecode", &bytecodevtabModule, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_module(db, "tables_used", &bytecodevtabModule, &db);
+ }
+ return rc;
+}
+#elif defined(SQLITE_ENABLE_BYTECODE_VTAB)
+SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3 *db){ return SQLITE_OK; }
+#endif /* SQLITE_ENABLE_BYTECODE_VTAB */
+
+/************** End of vdbevtab.c ********************************************/
+/************** Begin file memjournal.c **************************************/
+/*
+** 2008 October 7
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains code use to implement an in-memory rollback journal.
+** The in-memory rollback journal is used to journal transactions for
+** ":memory:" databases and when the journal_mode=MEMORY pragma is used.
+**
+** Update: The in-memory journal is also used to temporarily cache
+** smaller journals that are not critical for power-loss recovery.
+** For example, statement journals that are not too big will be held
+** entirely in memory, thus reducing the number of file I/O calls, and
+** more importantly, reducing temporary file creation events. If these
+** journals become too large for memory, they are spilled to disk. But
+** in the common case, they are usually small and no file I/O needs to
+** occur.
+*/
+/* #include "sqliteInt.h" */
+
+/* Forward references to internal structures */
+typedef struct MemJournal MemJournal;
+typedef struct FilePoint FilePoint;
+typedef struct FileChunk FileChunk;
+
+/*
+** The rollback journal is composed of a linked list of these structures.
+**
+** The zChunk array is always at least 8 bytes in size - usually much more.
+** Its actual size is stored in the MemJournal.nChunkSize variable.
+*/
+struct FileChunk {
+ FileChunk *pNext; /* Next chunk in the journal */
+ u8 zChunk[8]; /* Content of this chunk */
+};
+
+/*
+** By default, allocate this many bytes of memory for each FileChunk object.
+*/
+#define MEMJOURNAL_DFLT_FILECHUNKSIZE 1024
+
+/*
+** For chunk size nChunkSize, return the number of bytes that should
+** be allocated for each FileChunk structure.
+*/
+#define fileChunkSize(nChunkSize) (sizeof(FileChunk) + ((nChunkSize)-8))
+
+/*
+** An instance of this object serves as a cursor into the rollback journal.
+** The cursor can be either for reading or writing.
+*/
+struct FilePoint {
+ sqlite3_int64 iOffset; /* Offset from the beginning of the file */
+ FileChunk *pChunk; /* Specific chunk into which cursor points */
+};
+
+/*
+** This structure is a subclass of sqlite3_file. Each open memory-journal
+** is an instance of this class.
+*/
+struct MemJournal {
+ const sqlite3_io_methods *pMethod; /* Parent class. MUST BE FIRST */
+ int nChunkSize; /* In-memory chunk-size */
+
+ int nSpill; /* Bytes of data before flushing */
+ int nSize; /* Bytes of data currently in memory */
+ FileChunk *pFirst; /* Head of in-memory chunk-list */
+ FilePoint endpoint; /* Pointer to the end of the file */
+ FilePoint readpoint; /* Pointer to the end of the last xRead() */
+
+ int flags; /* xOpen flags */
+ sqlite3_vfs *pVfs; /* The "real" underlying VFS */
+ const char *zJournal; /* Name of the journal file */
+};
+
+/*
+** Read data from the in-memory journal file. This is the implementation
+** of the sqlite3_vfs.xRead method.
+*/
+static int memjrnlRead(
+ sqlite3_file *pJfd, /* The journal file from which to read */
+ void *zBuf, /* Put the results here */
+ int iAmt, /* Number of bytes to read */
+ sqlite_int64 iOfst /* Begin reading at this offset */
+){
+ MemJournal *p = (MemJournal *)pJfd;
+ u8 *zOut = zBuf;
+ int nRead = iAmt;
+ int iChunkOffset;
+ FileChunk *pChunk;
+
+ if( (iAmt+iOfst)>p->endpoint.iOffset ){
+ return SQLITE_IOERR_SHORT_READ;
+ }
+ assert( p->readpoint.iOffset==0 || p->readpoint.pChunk!=0 );
+ if( p->readpoint.iOffset!=iOfst || iOfst==0 ){
+ sqlite3_int64 iOff = 0;
+ for(pChunk=p->pFirst;
+ ALWAYS(pChunk) && (iOff+p->nChunkSize)<=iOfst;
+ pChunk=pChunk->pNext
+ ){
+ iOff += p->nChunkSize;
+ }
+ }else{
+ pChunk = p->readpoint.pChunk;
+ assert( pChunk!=0 );
+ }
+
+ iChunkOffset = (int)(iOfst%p->nChunkSize);
+ do {
+ int iSpace = p->nChunkSize - iChunkOffset;
+ int nCopy = MIN(nRead, (p->nChunkSize - iChunkOffset));
+ memcpy(zOut, (u8*)pChunk->zChunk + iChunkOffset, nCopy);
+ zOut += nCopy;
+ nRead -= iSpace;
+ iChunkOffset = 0;
+ } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 );
+ p->readpoint.iOffset = pChunk ? iOfst+iAmt : 0;
+ p->readpoint.pChunk = pChunk;
+
+ return SQLITE_OK;
+}
+
+/*
+** Free the list of FileChunk structures headed at MemJournal.pFirst.
+*/
+static void memjrnlFreeChunks(MemJournal *p){
+ FileChunk *pIter;
+ FileChunk *pNext;
+ for(pIter=p->pFirst; pIter; pIter=pNext){
+ pNext = pIter->pNext;
+ sqlite3_free(pIter);
+ }
+ p->pFirst = 0;
+}
+
+/*
+** Flush the contents of memory to a real file on disk.
+*/
+static int memjrnlCreateFile(MemJournal *p){
+ int rc;
+ sqlite3_file *pReal = (sqlite3_file*)p;
+ MemJournal copy = *p;
+
+ memset(p, 0, sizeof(MemJournal));
+ rc = sqlite3OsOpen(copy.pVfs, copy.zJournal, pReal, copy.flags, 0);
+ if( rc==SQLITE_OK ){
+ int nChunk = copy.nChunkSize;
+ i64 iOff = 0;
+ FileChunk *pIter;
+ for(pIter=copy.pFirst; pIter; pIter=pIter->pNext){
+ if( iOff + nChunk > copy.endpoint.iOffset ){
+ nChunk = copy.endpoint.iOffset - iOff;
+ }
+ rc = sqlite3OsWrite(pReal, (u8*)pIter->zChunk, nChunk, iOff);
+ if( rc ) break;
+ iOff += nChunk;
+ }
+ if( rc==SQLITE_OK ){
+ /* No error has occurred. Free the in-memory buffers. */
+ memjrnlFreeChunks(&copy);
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ /* If an error occurred while creating or writing to the file, restore
+ ** the original before returning. This way, SQLite uses the in-memory
+ ** journal data to roll back changes made to the internal page-cache
+ ** before this function was called. */
+ sqlite3OsClose(pReal);
+ *p = copy;
+ }
+ return rc;
+}
+
+
+/*
+** Write data to the file.
+*/
+static int memjrnlWrite(
+ sqlite3_file *pJfd, /* The journal file into which to write */
+ const void *zBuf, /* Take data to be written from here */
+ int iAmt, /* Number of bytes to write */
+ sqlite_int64 iOfst /* Begin writing at this offset into the file */
+){
+ MemJournal *p = (MemJournal *)pJfd;
+ int nWrite = iAmt;
+ u8 *zWrite = (u8 *)zBuf;
+
+ /* If the file should be created now, create it and write the new data
+ ** into the file on disk. */
+ if( p->nSpill>0 && (iAmt+iOfst)>p->nSpill ){
+ int rc = memjrnlCreateFile(p);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsWrite(pJfd, zBuf, iAmt, iOfst);
+ }
+ return rc;
+ }
+
+ /* If the contents of this write should be stored in memory */
+ else{
+ /* An in-memory journal file should only ever be appended to. Random
+ ** access writes are not required. The only exception to this is when
+ ** the in-memory journal is being used by a connection using the
+ ** atomic-write optimization. In this case the first 28 bytes of the
+ ** journal file may be written as part of committing the transaction. */
+ assert( iOfst==p->endpoint.iOffset || iOfst==0 );
+#if defined(SQLITE_ENABLE_ATOMIC_WRITE) \
+ || defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+ if( iOfst==0 && p->pFirst ){
+ assert( p->nChunkSize>iAmt );
+ memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt);
+ }else
+#else
+ assert( iOfst>0 || p->pFirst==0 );
+#endif
+ {
+ while( nWrite>0 ){
+ FileChunk *pChunk = p->endpoint.pChunk;
+ int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize);
+ int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset);
+
+ if( iChunkOffset==0 ){
+ /* New chunk is required to extend the file. */
+ FileChunk *pNew = sqlite3_malloc(fileChunkSize(p->nChunkSize));
+ if( !pNew ){
+ return SQLITE_IOERR_NOMEM_BKPT;
+ }
+ pNew->pNext = 0;
+ if( pChunk ){
+ assert( p->pFirst );
+ pChunk->pNext = pNew;
+ }else{
+ assert( !p->pFirst );
+ p->pFirst = pNew;
+ }
+ p->endpoint.pChunk = pNew;
+ }
+
+ memcpy((u8*)p->endpoint.pChunk->zChunk + iChunkOffset, zWrite, iSpace);
+ zWrite += iSpace;
+ nWrite -= iSpace;
+ p->endpoint.iOffset += iSpace;
+ }
+ p->nSize = iAmt + iOfst;
+ }
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Truncate the file.
+**
+** If the journal file is already on disk, truncate it there. Or, if it
+** is still in main memory but is being truncated to zero bytes in size,
+** ignore
+*/
+static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){
+ MemJournal *p = (MemJournal *)pJfd;
+ if( ALWAYS(size==0) ){
+ memjrnlFreeChunks(p);
+ p->nSize = 0;
+ p->endpoint.pChunk = 0;
+ p->endpoint.iOffset = 0;
+ p->readpoint.pChunk = 0;
+ p->readpoint.iOffset = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Close the file.
+*/
+static int memjrnlClose(sqlite3_file *pJfd){
+ MemJournal *p = (MemJournal *)pJfd;
+ memjrnlFreeChunks(p);
+ return SQLITE_OK;
+}
+
+/*
+** Sync the file.
+**
+** If the real file has been created, call its xSync method. Otherwise,
+** syncing an in-memory journal is a no-op.
+*/
+static int memjrnlSync(sqlite3_file *pJfd, int flags){
+ UNUSED_PARAMETER2(pJfd, flags);
+ return SQLITE_OK;
+}
+
+/*
+** Query the size of the file in bytes.
+*/
+static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){
+ MemJournal *p = (MemJournal *)pJfd;
+ *pSize = (sqlite_int64) p->endpoint.iOffset;
+ return SQLITE_OK;
+}
+
+/*
+** Table of methods for MemJournal sqlite3_file object.
+*/
+static const struct sqlite3_io_methods MemJournalMethods = {
+ 1, /* iVersion */
+ memjrnlClose, /* xClose */
+ memjrnlRead, /* xRead */
+ memjrnlWrite, /* xWrite */
+ memjrnlTruncate, /* xTruncate */
+ memjrnlSync, /* xSync */
+ memjrnlFileSize, /* xFileSize */
+ 0, /* xLock */
+ 0, /* xUnlock */
+ 0, /* xCheckReservedLock */
+ 0, /* xFileControl */
+ 0, /* xSectorSize */
+ 0, /* xDeviceCharacteristics */
+ 0, /* xShmMap */
+ 0, /* xShmLock */
+ 0, /* xShmBarrier */
+ 0, /* xShmUnmap */
+ 0, /* xFetch */
+ 0 /* xUnfetch */
+};
+
+/*
+** Open a journal file.
+**
+** The behaviour of the journal file depends on the value of parameter
+** nSpill. If nSpill is 0, then the journal file is always create and
+** accessed using the underlying VFS. If nSpill is less than zero, then
+** all content is always stored in main-memory. Finally, if nSpill is a
+** positive value, then the journal file is initially created in-memory
+** but may be flushed to disk later on. In this case the journal file is
+** flushed to disk either when it grows larger than nSpill bytes in size,
+** or when sqlite3JournalCreate() is called.
+*/
+SQLITE_PRIVATE int sqlite3JournalOpen(
+ sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */
+ const char *zName, /* Name of the journal file */
+ sqlite3_file *pJfd, /* Preallocated, blank file handle */
+ int flags, /* Opening flags */
+ int nSpill /* Bytes buffered before opening the file */
+){
+ MemJournal *p = (MemJournal*)pJfd;
+
+ /* Zero the file-handle object. If nSpill was passed zero, initialize
+ ** it using the sqlite3OsOpen() function of the underlying VFS. In this
+ ** case none of the code in this module is executed as a result of calls
+ ** made on the journal file-handle. */
+ memset(p, 0, sizeof(MemJournal));
+ if( nSpill==0 ){
+ return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0);
+ }
+
+ if( nSpill>0 ){
+ p->nChunkSize = nSpill;
+ }else{
+ p->nChunkSize = 8 + MEMJOURNAL_DFLT_FILECHUNKSIZE - sizeof(FileChunk);
+ assert( MEMJOURNAL_DFLT_FILECHUNKSIZE==fileChunkSize(p->nChunkSize) );
+ }
+
+ pJfd->pMethods = (const sqlite3_io_methods*)&MemJournalMethods;
+ p->nSpill = nSpill;
+ p->flags = flags;
+ p->zJournal = zName;
+ p->pVfs = pVfs;
+ return SQLITE_OK;
+}
+
+/*
+** Open an in-memory journal file.
+*/
+SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *pJfd){
+ sqlite3JournalOpen(0, 0, pJfd, 0, -1);
+}
+
+#if defined(SQLITE_ENABLE_ATOMIC_WRITE) \
+ || defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
+/*
+** If the argument p points to a MemJournal structure that is not an
+** in-memory-only journal file (i.e. is one that was opened with a +ve
+** nSpill parameter or as SQLITE_OPEN_MAIN_JOURNAL), and the underlying
+** file has not yet been created, create it now.
+*/
+SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *pJfd){
+ int rc = SQLITE_OK;
+ MemJournal *p = (MemJournal*)pJfd;
+ if( pJfd->pMethods==&MemJournalMethods && (
+#ifdef SQLITE_ENABLE_ATOMIC_WRITE
+ p->nSpill>0
+#else
+ /* While this appears to not be possible without ATOMIC_WRITE, the
+ ** paths are complex, so it seems prudent to leave the test in as
+ ** a NEVER(), in case our analysis is subtly flawed. */
+ NEVER(p->nSpill>0)
+#endif
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ || (p->flags & SQLITE_OPEN_MAIN_JOURNAL)
+#endif
+ )){
+ rc = memjrnlCreateFile(p);
+ }
+ return rc;
+}
+#endif
+
+/*
+** The file-handle passed as the only argument is open on a journal file.
+** Return true if this "journal file" is currently stored in heap memory,
+** or false otherwise.
+*/
+SQLITE_PRIVATE int sqlite3JournalIsInMemory(sqlite3_file *p){
+ return p->pMethods==&MemJournalMethods;
+}
+
+/*
+** Return the number of bytes required to store a JournalFile that uses vfs
+** pVfs to create the underlying on-disk files.
+*/
+SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){
+ return MAX(pVfs->szOsFile, (int)sizeof(MemJournal));
+}
+
+/************** End of memjournal.c ******************************************/
+/************** Begin file walker.c ******************************************/
+/*
+** 2008 August 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains routines used for walking the parser tree for
+** an SQL statement.
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdlib.h> */
+/* #include <string.h> */
+
+
+#if !defined(SQLITE_OMIT_WINDOWFUNC)
+/*
+** Walk all expressions linked into the list of Window objects passed
+** as the second argument.
+*/
+static int walkWindowList(Walker *pWalker, Window *pList){
+ Window *pWin;
+ for(pWin=pList; pWin; pWin=pWin->pNextWin){
+ int rc;
+ rc = sqlite3WalkExprList(pWalker, pWin->pOrderBy);
+ if( rc ) return WRC_Abort;
+ rc = sqlite3WalkExprList(pWalker, pWin->pPartition);
+ if( rc ) return WRC_Abort;
+ rc = sqlite3WalkExpr(pWalker, pWin->pFilter);
+ if( rc ) return WRC_Abort;
+
+ /* The next two are purely for calls to sqlite3RenameExprUnmap()
+ ** within sqlite3WindowOffsetExpr(). Because of constraints imposed
+ ** by sqlite3WindowOffsetExpr(), they can never fail. The results do
+ ** not matter anyhow. */
+ rc = sqlite3WalkExpr(pWalker, pWin->pStart);
+ if( NEVER(rc) ) return WRC_Abort;
+ rc = sqlite3WalkExpr(pWalker, pWin->pEnd);
+ if( NEVER(rc) ) return WRC_Abort;
+ }
+ return WRC_Continue;
+}
+#endif
+
+/*
+** Walk an expression tree. Invoke the callback once for each node
+** of the expression, while descending. (In other words, the callback
+** is invoked before visiting children.)
+**
+** The return value from the callback should be one of the WRC_*
+** constants to specify how to proceed with the walk.
+**
+** WRC_Continue Continue descending down the tree.
+**
+** WRC_Prune Do not descend into child nodes, but allow
+** the walk to continue with sibling nodes.
+**
+** WRC_Abort Do no more callbacks. Unwind the stack and
+** return from the top-level walk call.
+**
+** The return value from this routine is WRC_Abort to abandon the tree walk
+** and WRC_Continue to continue.
+*/
+static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
+ int rc;
+ testcase( ExprHasProperty(pExpr, EP_TokenOnly) );
+ testcase( ExprHasProperty(pExpr, EP_Reduced) );
+ while(1){
+ rc = pWalker->xExprCallback(pWalker, pExpr);
+ if( rc ) return rc & WRC_Abort;
+ if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
+ assert( pExpr->x.pList==0 || pExpr->pRight==0 );
+ if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort;
+ if( pExpr->pRight ){
+ assert( !ExprHasProperty(pExpr, EP_WinFunc) );
+ pExpr = pExpr->pRight;
+ continue;
+ }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ assert( !ExprHasProperty(pExpr, EP_WinFunc) );
+ if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort;
+ }else{
+ if( pExpr->x.pList ){
+ if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort;
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ if( walkWindowList(pWalker, pExpr->y.pWin) ) return WRC_Abort;
+ }
+#endif
+ }
+ }
+ break;
+ }
+ return WRC_Continue;
+}
+SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){
+ return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue;
+}
+
+/*
+** Call sqlite3WalkExpr() for every expression in list p or until
+** an abort request is seen.
+*/
+SQLITE_PRIVATE int sqlite3WalkExprList(Walker *pWalker, ExprList *p){
+ int i;
+ struct ExprList_item *pItem;
+ if( p ){
+ for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){
+ if( sqlite3WalkExpr(pWalker, pItem->pExpr) ) return WRC_Abort;
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** Walk all expressions associated with SELECT statement p. Do
+** not invoke the SELECT callback on p, but do (of course) invoke
+** any expr callbacks and SELECT callbacks that come from subqueries.
+** Return WRC_Abort or WRC_Continue.
+*/
+SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){
+ if( sqlite3WalkExprList(pWalker, p->pEList) ) return WRC_Abort;
+ if( sqlite3WalkExpr(pWalker, p->pWhere) ) return WRC_Abort;
+ if( sqlite3WalkExprList(pWalker, p->pGroupBy) ) return WRC_Abort;
+ if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort;
+ if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort;
+ if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort;
+#if !defined(SQLITE_OMIT_WINDOWFUNC) && !defined(SQLITE_OMIT_ALTERTABLE)
+ {
+ Parse *pParse = pWalker->pParse;
+ if( pParse && IN_RENAME_OBJECT ){
+ /* The following may return WRC_Abort if there are unresolvable
+ ** symbols (e.g. a table that does not exist) in a window definition. */
+ int rc = walkWindowList(pWalker, p->pWinDefn);
+ return rc;
+ }
+ }
+#endif
+ return WRC_Continue;
+}
+
+/*
+** Walk the parse trees associated with all subqueries in the
+** FROM clause of SELECT statement p. Do not invoke the select
+** callback on p, but do invoke it on each FROM clause subquery
+** and on any subqueries further down in the tree. Return
+** WRC_Abort or WRC_Continue;
+*/
+SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){
+ SrcList *pSrc;
+ int i;
+ struct SrcList_item *pItem;
+
+ pSrc = p->pSrc;
+ if( pSrc ){
+ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){
+ if( pItem->pSelect && sqlite3WalkSelect(pWalker, pItem->pSelect) ){
+ return WRC_Abort;
+ }
+ if( pItem->fg.isTabFunc
+ && sqlite3WalkExprList(pWalker, pItem->u1.pFuncArg)
+ ){
+ return WRC_Abort;
+ }
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** Call sqlite3WalkExpr() for every expression in Select statement p.
+** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and
+** on the compound select chain, p->pPrior.
+**
+** If it is not NULL, the xSelectCallback() callback is invoked before
+** the walk of the expressions and FROM clause. The xSelectCallback2()
+** method is invoked following the walk of the expressions and FROM clause,
+** but only if both xSelectCallback and xSelectCallback2 are both non-NULL
+** and if the expressions and FROM clause both return WRC_Continue;
+**
+** Return WRC_Continue under normal conditions. Return WRC_Abort if
+** there is an abort request.
+**
+** If the Walker does not have an xSelectCallback() then this routine
+** is a no-op returning WRC_Continue.
+*/
+SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
+ int rc;
+ if( p==0 ) return WRC_Continue;
+ if( pWalker->xSelectCallback==0 ) return WRC_Continue;
+ do{
+ rc = pWalker->xSelectCallback(pWalker, p);
+ if( rc ) return rc & WRC_Abort;
+ if( sqlite3WalkSelectExpr(pWalker, p)
+ || sqlite3WalkSelectFrom(pWalker, p)
+ ){
+ return WRC_Abort;
+ }
+ if( pWalker->xSelectCallback2 ){
+ pWalker->xSelectCallback2(pWalker, p);
+ }
+ p = p->pPrior;
+ }while( p!=0 );
+ return WRC_Continue;
+}
+
+/* Increase the walkerDepth when entering a subquery, and
+** descrease when leaving the subquery.
+*/
+SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker *pWalker, Select *pSelect){
+ UNUSED_PARAMETER(pSelect);
+ pWalker->walkerDepth++;
+ return WRC_Continue;
+}
+SQLITE_PRIVATE void sqlite3WalkerDepthDecrease(Walker *pWalker, Select *pSelect){
+ UNUSED_PARAMETER(pSelect);
+ pWalker->walkerDepth--;
+}
+
+
+/*
+** No-op routine for the parse-tree walker.
+**
+** When this routine is the Walker.xExprCallback then expression trees
+** are walked without any actions being taken at each node. Presumably,
+** when this routine is used for Walker.xExprCallback then
+** Walker.xSelectCallback is set to do something useful for every
+** subquery in the parser tree.
+*/
+SQLITE_PRIVATE int sqlite3ExprWalkNoop(Walker *NotUsed, Expr *NotUsed2){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ return WRC_Continue;
+}
+
+/*
+** No-op routine for the parse-tree walker for SELECT statements.
+** subquery in the parser tree.
+*/
+SQLITE_PRIVATE int sqlite3SelectWalkNoop(Walker *NotUsed, Select *NotUsed2){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ return WRC_Continue;
+}
+
+/************** End of walker.c **********************************************/
+/************** Begin file resolve.c *****************************************/
+/*
+** 2008 August 18
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains routines used for walking the parser tree and
+** resolve all identifiers by associating them with a particular
+** table and column.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** Magic table number to mean the EXCLUDED table in an UPSERT statement.
+*/
+#define EXCLUDED_TABLE_NUMBER 2
+
+/*
+** Walk the expression tree pExpr and increase the aggregate function
+** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node.
+** This needs to occur when copying a TK_AGG_FUNCTION node from an
+** outer query into an inner subquery.
+**
+** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..)
+** is a helper function - a callback for the tree walker.
+**
+** See also the sqlite3WindowExtraAggFuncDepth() routine in window.c
+*/
+static int incrAggDepth(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.n;
+ return WRC_Continue;
+}
+static void incrAggFunctionDepth(Expr *pExpr, int N){
+ if( N>0 ){
+ Walker w;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = incrAggDepth;
+ w.u.n = N;
+ sqlite3WalkExpr(&w, pExpr);
+ }
+}
+
+/*
+** Turn the pExpr expression into an alias for the iCol-th column of the
+** result set in pEList.
+**
+** If the reference is followed by a COLLATE operator, then make sure
+** the COLLATE operator is preserved. For example:
+**
+** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase;
+**
+** Should be transformed into:
+**
+** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase;
+**
+** The nSubquery parameter specifies how many levels of subquery the
+** alias is removed from the original expression. The usual value is
+** zero but it might be more if the alias is contained within a subquery
+** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION
+** structures must be increased by the nSubquery amount.
+*/
+static void resolveAlias(
+ Parse *pParse, /* Parsing context */
+ ExprList *pEList, /* A result set */
+ int iCol, /* A column in the result set. 0..pEList->nExpr-1 */
+ Expr *pExpr, /* Transform this into an alias to the result set */
+ const char *zType, /* "GROUP" or "ORDER" or "" */
+ int nSubquery /* Number of subqueries that the label is moving */
+){
+ Expr *pOrig; /* The iCol-th column of the result set */
+ Expr *pDup; /* Copy of pOrig */
+ sqlite3 *db; /* The database connection */
+
+ assert( iCol>=0 && iCol<pEList->nExpr );
+ pOrig = pEList->a[iCol].pExpr;
+ assert( pOrig!=0 );
+ db = pParse->db;
+ pDup = sqlite3ExprDup(db, pOrig, 0);
+ if( pDup!=0 ){
+ if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery);
+ if( pExpr->op==TK_COLLATE ){
+ pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken);
+ }
+
+ /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This
+ ** prevents ExprDelete() from deleting the Expr structure itself,
+ ** allowing it to be repopulated by the memcpy() on the following line.
+ ** The pExpr->u.zToken might point into memory that will be freed by the
+ ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to
+ ** make a copy of the token before doing the sqlite3DbFree().
+ */
+ ExprSetProperty(pExpr, EP_Static);
+ sqlite3ExprDelete(db, pExpr);
+ memcpy(pExpr, pDup, sizeof(*pExpr));
+ if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){
+ assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 );
+ pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken);
+ pExpr->flags |= EP_MemToken;
+ }
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ if( pExpr->y.pWin!=0 ){
+ pExpr->y.pWin->pOwner = pExpr;
+ }else{
+ assert( db->mallocFailed );
+ }
+ }
+ sqlite3DbFree(db, pDup);
+ }
+ ExprSetProperty(pExpr, EP_Alias);
+}
+
+
+/*
+** Return TRUE if the name zCol occurs anywhere in the USING clause.
+**
+** Return FALSE if the USING clause is NULL or if it does not contain
+** zCol.
+*/
+static int nameInUsingClause(IdList *pUsing, const char *zCol){
+ if( pUsing ){
+ int k;
+ for(k=0; k<pUsing->nId; k++){
+ if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Subqueries stores the original database, table and column names for their
+** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN".
+** Check to see if the zSpan given to this routine matches the zDb, zTab,
+** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will
+** match anything.
+*/
+SQLITE_PRIVATE int sqlite3MatchEName(
+ const struct ExprList_item *pItem,
+ const char *zCol,
+ const char *zTab,
+ const char *zDb
+){
+ int n;
+ const char *zSpan;
+ if( pItem->eEName!=ENAME_TAB ) return 0;
+ zSpan = pItem->zEName;
+ for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){}
+ if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){
+ return 0;
+ }
+ zSpan += n+1;
+ for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){}
+ if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){
+ return 0;
+ }
+ zSpan += n+1;
+ if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){
+ return 0;
+ }
+ return 1;
+}
+
+/*
+** Return TRUE if the double-quoted string mis-feature should be supported.
+*/
+static int areDoubleQuotedStringsEnabled(sqlite3 *db, NameContext *pTopNC){
+ if( db->init.busy ) return 1; /* Always support for legacy schemas */
+ if( pTopNC->ncFlags & NC_IsDDL ){
+ /* Currently parsing a DDL statement */
+ if( sqlite3WritableSchema(db) && (db->flags & SQLITE_DqsDML)!=0 ){
+ return 1;
+ }
+ return (db->flags & SQLITE_DqsDDL)!=0;
+ }else{
+ /* Currently parsing a DML statement */
+ return (db->flags & SQLITE_DqsDML)!=0;
+ }
+}
+
+/*
+** The argument is guaranteed to be a non-NULL Expr node of type TK_COLUMN.
+** return the appropriate colUsed mask.
+*/
+SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){
+ int n;
+ Table *pExTab;
+
+ n = pExpr->iColumn;
+ pExTab = pExpr->y.pTab;
+ assert( pExTab!=0 );
+ if( (pExTab->tabFlags & TF_HasGenerated)!=0
+ && (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0
+ ){
+ testcase( pExTab->nCol==BMS-1 );
+ testcase( pExTab->nCol==BMS );
+ return pExTab->nCol>=BMS ? ALLBITS : MASKBIT(pExTab->nCol)-1;
+ }else{
+ testcase( n==BMS-1 );
+ testcase( n==BMS );
+ if( n>=BMS ) n = BMS-1;
+ return ((Bitmask)1)<<n;
+ }
+}
+
+/*
+** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up
+** that name in the set of source tables in pSrcList and make the pExpr
+** expression node refer back to that source column. The following changes
+** are made to pExpr:
+**
+** pExpr->iDb Set the index in db->aDb[] of the database X
+** (even if X is implied).
+** pExpr->iTable Set to the cursor number for the table obtained
+** from pSrcList.
+** pExpr->y.pTab Points to the Table structure of X.Y (even if
+** X and/or Y are implied.)
+** pExpr->iColumn Set to the column number within the table.
+** pExpr->op Set to TK_COLUMN.
+** pExpr->pLeft Any expression this points to is deleted
+** pExpr->pRight Any expression this points to is deleted.
+**
+** The zDb variable is the name of the database (the "X"). This value may be
+** NULL meaning that name is of the form Y.Z or Z. Any available database
+** can be used. The zTable variable is the name of the table (the "Y"). This
+** value can be NULL if zDb is also NULL. If zTable is NULL it
+** means that the form of the name is Z and that columns from any table
+** can be used.
+**
+** If the name cannot be resolved unambiguously, leave an error message
+** in pParse and return WRC_Abort. Return WRC_Prune on success.
+*/
+static int lookupName(
+ Parse *pParse, /* The parsing context */
+ const char *zDb, /* Name of the database containing table, or NULL */
+ const char *zTab, /* Name of table containing column, or NULL */
+ const char *zCol, /* Name of the column. */
+ NameContext *pNC, /* The name context used to resolve the name */
+ Expr *pExpr /* Make this EXPR node point to the selected column */
+){
+ int i, j; /* Loop counters */
+ int cnt = 0; /* Number of matching column names */
+ int cntTab = 0; /* Number of matching table names */
+ int nSubquery = 0; /* How many levels of subquery */
+ sqlite3 *db = pParse->db; /* The database connection */
+ struct SrcList_item *pItem; /* Use for looping over pSrcList items */
+ struct SrcList_item *pMatch = 0; /* The matching pSrcList item */
+ NameContext *pTopNC = pNC; /* First namecontext in the list */
+ Schema *pSchema = 0; /* Schema of the expression */
+ int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */
+ Table *pTab = 0; /* Table hold the row */
+ Column *pCol; /* A column of pTab */
+
+ assert( pNC ); /* the name context cannot be NULL. */
+ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */
+ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
+
+ /* Initialize the node to no-match */
+ pExpr->iTable = -1;
+ ExprSetVVAProperty(pExpr, EP_NoReduce);
+
+ /* Translate the schema name in zDb into a pointer to the corresponding
+ ** schema. If not found, pSchema will remain NULL and nothing will match
+ ** resulting in an appropriate error message toward the end of this routine
+ */
+ if( zDb ){
+ testcase( pNC->ncFlags & NC_PartIdx );
+ testcase( pNC->ncFlags & NC_IsCheck );
+ if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){
+ /* Silently ignore database qualifiers inside CHECK constraints and
+ ** partial indices. Do not raise errors because that might break
+ ** legacy and because it does not hurt anything to just ignore the
+ ** database name. */
+ zDb = 0;
+ }else{
+ for(i=0; i<db->nDb; i++){
+ assert( db->aDb[i].zDbSName );
+ if( sqlite3StrICmp(db->aDb[i].zDbSName,zDb)==0 ){
+ pSchema = db->aDb[i].pSchema;
+ break;
+ }
+ }
+ if( i==db->nDb && sqlite3StrICmp("main", zDb)==0 ){
+ /* This branch is taken when the main database has been renamed
+ ** using SQLITE_DBCONFIG_MAINDBNAME. */
+ pSchema = db->aDb[0].pSchema;
+ zDb = db->aDb[0].zDbSName;
+ }
+ }
+ }
+
+ /* Start at the inner-most context and move outward until a match is found */
+ assert( pNC && cnt==0 );
+ do{
+ ExprList *pEList;
+ SrcList *pSrcList = pNC->pSrcList;
+
+ if( pSrcList ){
+ for(i=0, pItem=pSrcList->a; i<pSrcList->nSrc; i++, pItem++){
+ u8 hCol;
+ pTab = pItem->pTab;
+ assert( pTab!=0 && pTab->zName!=0 );
+ assert( pTab->nCol>0 );
+ if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){
+ int hit = 0;
+ pEList = pItem->pSelect->pEList;
+ for(j=0; j<pEList->nExpr; j++){
+ if( sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){
+ cnt++;
+ cntTab = 2;
+ pMatch = pItem;
+ pExpr->iColumn = j;
+ hit = 1;
+ }
+ }
+ if( hit || zTab==0 ) continue;
+ }
+ if( zDb && pTab->pSchema!=pSchema ){
+ continue;
+ }
+ if( zTab ){
+ const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName;
+ assert( zTabName!=0 );
+ if( sqlite3StrICmp(zTabName, zTab)!=0 ){
+ continue;
+ }
+ if( IN_RENAME_OBJECT && pItem->zAlias ){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab);
+ }
+ }
+ if( 0==(cntTab++) ){
+ pMatch = pItem;
+ }
+ hCol = sqlite3StrIHash(zCol);
+ for(j=0, pCol=pTab->aCol; j<pTab->nCol; j++, pCol++){
+ if( pCol->hName==hCol && sqlite3StrICmp(pCol->zName, zCol)==0 ){
+ /* If there has been exactly one prior match and this match
+ ** is for the right-hand table of a NATURAL JOIN or is in a
+ ** USING clause, then skip this match.
+ */
+ if( cnt==1 ){
+ if( pItem->fg.jointype & JT_NATURAL ) continue;
+ if( nameInUsingClause(pItem->pUsing, zCol) ) continue;
+ }
+ cnt++;
+ pMatch = pItem;
+ /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */
+ pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j;
+ break;
+ }
+ }
+ }
+ if( pMatch ){
+ pExpr->iTable = pMatch->iCursor;
+ pExpr->y.pTab = pMatch->pTab;
+ /* RIGHT JOIN not (yet) supported */
+ assert( (pMatch->fg.jointype & JT_RIGHT)==0 );
+ if( (pMatch->fg.jointype & JT_LEFT)!=0 ){
+ ExprSetProperty(pExpr, EP_CanBeNull);
+ }
+ pSchema = pExpr->y.pTab->pSchema;
+ }
+ } /* if( pSrcList ) */
+
+#if !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT)
+ /* If we have not already resolved the name, then maybe
+ ** it is a new.* or old.* trigger argument reference. Or
+ ** maybe it is an excluded.* from an upsert.
+ */
+ if( zDb==0 && zTab!=0 && cntTab==0 ){
+ pTab = 0;
+#ifndef SQLITE_OMIT_TRIGGER
+ if( pParse->pTriggerTab!=0 ){
+ int op = pParse->eTriggerOp;
+ assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT );
+ if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){
+ pExpr->iTable = 1;
+ pTab = pParse->pTriggerTab;
+ }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){
+ pExpr->iTable = 0;
+ pTab = pParse->pTriggerTab;
+ }
+ }
+#endif /* SQLITE_OMIT_TRIGGER */
+#ifndef SQLITE_OMIT_UPSERT
+ if( (pNC->ncFlags & NC_UUpsert)!=0 ){
+ Upsert *pUpsert = pNC->uNC.pUpsert;
+ if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){
+ pTab = pUpsert->pUpsertSrc->a[0].pTab;
+ pExpr->iTable = EXCLUDED_TABLE_NUMBER;
+ }
+ }
+#endif /* SQLITE_OMIT_UPSERT */
+
+ if( pTab ){
+ int iCol;
+ u8 hCol = sqlite3StrIHash(zCol);
+ pSchema = pTab->pSchema;
+ cntTab++;
+ for(iCol=0, pCol=pTab->aCol; iCol<pTab->nCol; iCol++, pCol++){
+ if( pCol->hName==hCol && sqlite3StrICmp(pCol->zName, zCol)==0 ){
+ if( iCol==pTab->iPKey ){
+ iCol = -1;
+ }
+ break;
+ }
+ }
+ if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){
+ /* IMP: R-51414-32910 */
+ iCol = -1;
+ }
+ if( iCol<pTab->nCol ){
+ cnt++;
+#ifndef SQLITE_OMIT_UPSERT
+ if( pExpr->iTable==EXCLUDED_TABLE_NUMBER ){
+ testcase( iCol==(-1) );
+ if( IN_RENAME_OBJECT ){
+ pExpr->iColumn = iCol;
+ pExpr->y.pTab = pTab;
+ eNewExprOp = TK_COLUMN;
+ }else{
+ pExpr->iTable = pNC->uNC.pUpsert->regData +
+ sqlite3TableColumnToStorage(pTab, iCol);
+ eNewExprOp = TK_REGISTER;
+ ExprSetProperty(pExpr, EP_Alias);
+ }
+ }else
+#endif /* SQLITE_OMIT_UPSERT */
+ {
+#ifndef SQLITE_OMIT_TRIGGER
+ if( iCol<0 ){
+ pExpr->affExpr = SQLITE_AFF_INTEGER;
+ }else if( pExpr->iTable==0 ){
+ testcase( iCol==31 );
+ testcase( iCol==32 );
+ pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol));
+ }else{
+ testcase( iCol==31 );
+ testcase( iCol==32 );
+ pParse->newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol));
+ }
+ pExpr->y.pTab = pTab;
+ pExpr->iColumn = (i16)iCol;
+ eNewExprOp = TK_TRIGGER;
+#endif /* SQLITE_OMIT_TRIGGER */
+ }
+ }
+ }
+ }
+#endif /* !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT) */
+
+ /*
+ ** Perhaps the name is a reference to the ROWID
+ */
+ if( cnt==0
+ && cntTab==1
+ && pMatch
+ && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0
+ && sqlite3IsRowid(zCol)
+ && VisibleRowid(pMatch->pTab)
+ ){
+ cnt = 1;
+ pExpr->iColumn = -1;
+ pExpr->affExpr = SQLITE_AFF_INTEGER;
+ }
+
+ /*
+ ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z
+ ** might refer to an result-set alias. This happens, for example, when
+ ** we are resolving names in the WHERE clause of the following command:
+ **
+ ** SELECT a+b AS x FROM table WHERE x<10;
+ **
+ ** In cases like this, replace pExpr with a copy of the expression that
+ ** forms the result set entry ("a+b" in the example) and return immediately.
+ ** Note that the expression in the result set should have already been
+ ** resolved by the time the WHERE clause is resolved.
+ **
+ ** The ability to use an output result-set column in the WHERE, GROUP BY,
+ ** or HAVING clauses, or as part of a larger expression in the ORDER BY
+ ** clause is not standard SQL. This is a (goofy) SQLite extension, that
+ ** is supported for backwards compatibility only. Hence, we issue a warning
+ ** on sqlite3_log() whenever the capability is used.
+ */
+ if( (pNC->ncFlags & NC_UEList)!=0
+ && cnt==0
+ && zTab==0
+ ){
+ pEList = pNC->uNC.pEList;
+ assert( pEList!=0 );
+ for(j=0; j<pEList->nExpr; j++){
+ char *zAs = pEList->a[j].zEName;
+ if( pEList->a[j].eEName==ENAME_NAME
+ && sqlite3_stricmp(zAs, zCol)==0
+ ){
+ Expr *pOrig;
+ assert( pExpr->pLeft==0 && pExpr->pRight==0 );
+ assert( pExpr->x.pList==0 );
+ assert( pExpr->x.pSelect==0 );
+ pOrig = pEList->a[j].pExpr;
+ if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){
+ sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs);
+ return WRC_Abort;
+ }
+ if( ExprHasProperty(pOrig, EP_Win)
+ && ((pNC->ncFlags&NC_AllowWin)==0 || pNC!=pTopNC )
+ ){
+ sqlite3ErrorMsg(pParse, "misuse of aliased window function %s",zAs);
+ return WRC_Abort;
+ }
+ if( sqlite3ExprVectorSize(pOrig)!=1 ){
+ sqlite3ErrorMsg(pParse, "row value misused");
+ return WRC_Abort;
+ }
+ resolveAlias(pParse, pEList, j, pExpr, "", nSubquery);
+ cnt = 1;
+ pMatch = 0;
+ assert( zTab==0 && zDb==0 );
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr);
+ }
+ goto lookupname_end;
+ }
+ }
+ }
+
+ /* Advance to the next name context. The loop will exit when either
+ ** we have a match (cnt>0) or when we run out of name contexts.
+ */
+ if( cnt ) break;
+ pNC = pNC->pNext;
+ nSubquery++;
+ }while( pNC );
+
+
+ /*
+ ** If X and Y are NULL (in other words if only the column name Z is
+ ** supplied) and the value of Z is enclosed in double-quotes, then
+ ** Z is a string literal if it doesn't match any column names. In that
+ ** case, we need to return right away and not make any changes to
+ ** pExpr.
+ **
+ ** Because no reference was made to outer contexts, the pNC->nRef
+ ** fields are not changed in any context.
+ */
+ if( cnt==0 && zTab==0 ){
+ assert( pExpr->op==TK_ID );
+ if( ExprHasProperty(pExpr,EP_DblQuoted)
+ && areDoubleQuotedStringsEnabled(db, pTopNC)
+ ){
+ /* If a double-quoted identifier does not match any known column name,
+ ** then treat it as a string.
+ **
+ ** This hack was added in the early days of SQLite in a misguided attempt
+ ** to be compatible with MySQL 3.x, which used double-quotes for strings.
+ ** I now sorely regret putting in this hack. The effect of this hack is
+ ** that misspelled identifier names are silently converted into strings
+ ** rather than causing an error, to the frustration of countless
+ ** programmers. To all those frustrated programmers, my apologies.
+ **
+ ** Someday, I hope to get rid of this hack. Unfortunately there is
+ ** a huge amount of legacy SQL that uses it. So for now, we just
+ ** issue a warning.
+ */
+ sqlite3_log(SQLITE_WARNING,
+ "double-quoted string literal: \"%w\"", zCol);
+#ifdef SQLITE_ENABLE_NORMALIZE
+ sqlite3VdbeAddDblquoteStr(db, pParse->pVdbe, zCol);
+#endif
+ pExpr->op = TK_STRING;
+ pExpr->y.pTab = 0;
+ return WRC_Prune;
+ }
+ if( sqlite3ExprIdToTrueFalse(pExpr) ){
+ return WRC_Prune;
+ }
+ }
+
+ /*
+ ** cnt==0 means there was not match. cnt>1 means there were two or
+ ** more matches. Either way, we have an error.
+ */
+ if( cnt!=1 ){
+ const char *zErr;
+ zErr = cnt==0 ? "no such column" : "ambiguous column name";
+ if( zDb ){
+ sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol);
+ }else if( zTab ){
+ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol);
+ }else{
+ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol);
+ }
+ pParse->checkSchema = 1;
+ pTopNC->nErr++;
+ }
+
+ /* If a column from a table in pSrcList is referenced, then record
+ ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes
+ ** bit 0 to be set. Column 1 sets bit 1. And so forth. Bit 63 is
+ ** set if the 63rd or any subsequent column is used.
+ **
+ ** The colUsed mask is an optimization used to help determine if an
+ ** index is a covering index. The correct answer is still obtained
+ ** if the mask contains extra set bits. However, it is important to
+ ** avoid setting bits beyond the maximum column number of the table.
+ ** (See ticket [b92e5e8ec2cdbaa1]).
+ **
+ ** If a generated column is referenced, set bits for every column
+ ** of the table.
+ */
+ if( pExpr->iColumn>=0 && pMatch!=0 ){
+ pMatch->colUsed |= sqlite3ExprColUsed(pExpr);
+ }
+
+ /* Clean up and return
+ */
+ sqlite3ExprDelete(db, pExpr->pLeft);
+ pExpr->pLeft = 0;
+ sqlite3ExprDelete(db, pExpr->pRight);
+ pExpr->pRight = 0;
+ pExpr->op = eNewExprOp;
+ ExprSetProperty(pExpr, EP_Leaf);
+lookupname_end:
+ if( cnt==1 ){
+ assert( pNC!=0 );
+ if( !ExprHasProperty(pExpr, EP_Alias) ){
+ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList);
+ }
+ /* Increment the nRef value on all name contexts from TopNC up to
+ ** the point where the name matched. */
+ for(;;){
+ assert( pTopNC!=0 );
+ pTopNC->nRef++;
+ if( pTopNC==pNC ) break;
+ pTopNC = pTopNC->pNext;
+ }
+ return WRC_Prune;
+ } else {
+ return WRC_Abort;
+ }
+}
+
+/*
+** Allocate and return a pointer to an expression to load the column iCol
+** from datasource iSrc in SrcList pSrc.
+*/
+SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){
+ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0);
+ if( p ){
+ struct SrcList_item *pItem = &pSrc->a[iSrc];
+ Table *pTab = p->y.pTab = pItem->pTab;
+ p->iTable = pItem->iCursor;
+ if( p->y.pTab->iPKey==iCol ){
+ p->iColumn = -1;
+ }else{
+ p->iColumn = (ynVar)iCol;
+ if( (pTab->tabFlags & TF_HasGenerated)!=0
+ && (pTab->aCol[iCol].colFlags & COLFLAG_GENERATED)!=0
+ ){
+ testcase( pTab->nCol==63 );
+ testcase( pTab->nCol==64 );
+ pItem->colUsed = pTab->nCol>=64 ? ALLBITS : MASKBIT(pTab->nCol)-1;
+ }else{
+ testcase( iCol==BMS );
+ testcase( iCol==BMS-1 );
+ pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol);
+ }
+ }
+ }
+ return p;
+}
+
+/*
+** Report an error that an expression is not valid for some set of
+** pNC->ncFlags values determined by validMask.
+**
+** static void notValid(
+** Parse *pParse, // Leave error message here
+** NameContext *pNC, // The name context
+** const char *zMsg, // Type of error
+** int validMask, // Set of contexts for which prohibited
+** Expr *pExpr // Invalidate this expression on error
+** ){...}
+**
+** As an optimization, since the conditional is almost always false
+** (because errors are rare), the conditional is moved outside of the
+** function call using a macro.
+*/
+static void notValidImpl(
+ Parse *pParse, /* Leave error message here */
+ NameContext *pNC, /* The name context */
+ const char *zMsg, /* Type of error */
+ Expr *pExpr /* Invalidate this expression on error */
+){
+ const char *zIn = "partial index WHERE clauses";
+ if( pNC->ncFlags & NC_IdxExpr ) zIn = "index expressions";
+#ifndef SQLITE_OMIT_CHECK
+ else if( pNC->ncFlags & NC_IsCheck ) zIn = "CHECK constraints";
+#endif
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ else if( pNC->ncFlags & NC_GenCol ) zIn = "generated columns";
+#endif
+ sqlite3ErrorMsg(pParse, "%s prohibited in %s", zMsg, zIn);
+ if( pExpr ) pExpr->op = TK_NULL;
+}
+#define sqlite3ResolveNotValid(P,N,M,X,E) \
+ assert( ((X)&~(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol))==0 ); \
+ if( ((N)->ncFlags & (X))!=0 ) notValidImpl(P,N,M,E);
+
+/*
+** Expression p should encode a floating point value between 1.0 and 0.0.
+** Return 1024 times this value. Or return -1 if p is not a floating point
+** value between 1.0 and 0.0.
+*/
+static int exprProbability(Expr *p){
+ double r = -1.0;
+ if( p->op!=TK_FLOAT ) return -1;
+ sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8);
+ assert( r>=0.0 );
+ if( r>1.0 ) return -1;
+ return (int)(r*134217728.0);
+}
+
+/*
+** This routine is callback for sqlite3WalkExpr().
+**
+** Resolve symbolic names into TK_COLUMN operators for the current
+** node in the expression tree. Return 0 to continue the search down
+** the tree or 2 to abort the tree walk.
+**
+** This routine also does error checking and name resolution for
+** function names. The operator for aggregate functions is changed
+** to TK_AGG_FUNCTION.
+*/
+static int resolveExprStep(Walker *pWalker, Expr *pExpr){
+ NameContext *pNC;
+ Parse *pParse;
+
+ pNC = pWalker->u.pNC;
+ assert( pNC!=0 );
+ pParse = pNC->pParse;
+ assert( pParse==pWalker->pParse );
+
+#ifndef NDEBUG
+ if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){
+ SrcList *pSrcList = pNC->pSrcList;
+ int i;
+ for(i=0; i<pNC->pSrcList->nSrc; i++){
+ assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursor<pParse->nTab);
+ }
+ }
+#endif
+ switch( pExpr->op ){
+
+ /* The special operator TK_ROW means use the rowid for the first
+ ** column in the FROM clause. This is used by the LIMIT and ORDER BY
+ ** clause processing on UPDATE and DELETE statements, and by
+ ** UPDATE ... FROM statement processing.
+ */
+ case TK_ROW: {
+ SrcList *pSrcList = pNC->pSrcList;
+ struct SrcList_item *pItem;
+ assert( pSrcList && pSrcList->nSrc>=1 );
+ pItem = pSrcList->a;
+ pExpr->op = TK_COLUMN;
+ pExpr->y.pTab = pItem->pTab;
+ pExpr->iTable = pItem->iCursor;
+ pExpr->iColumn--;
+ pExpr->affExpr = SQLITE_AFF_INTEGER;
+ break;
+ }
+
+ /* A column name: ID
+ ** Or table name and column name: ID.ID
+ ** Or a database, table and column: ID.ID.ID
+ **
+ ** The TK_ID and TK_OUT cases are combined so that there will only
+ ** be one call to lookupName(). Then the compiler will in-line
+ ** lookupName() for a size reduction and performance increase.
+ */
+ case TK_ID:
+ case TK_DOT: {
+ const char *zColumn;
+ const char *zTable;
+ const char *zDb;
+ Expr *pRight;
+
+ if( pExpr->op==TK_ID ){
+ zDb = 0;
+ zTable = 0;
+ zColumn = pExpr->u.zToken;
+ }else{
+ Expr *pLeft = pExpr->pLeft;
+ testcase( pNC->ncFlags & NC_IdxExpr );
+ testcase( pNC->ncFlags & NC_GenCol );
+ sqlite3ResolveNotValid(pParse, pNC, "the \".\" operator",
+ NC_IdxExpr|NC_GenCol, 0);
+ pRight = pExpr->pRight;
+ if( pRight->op==TK_ID ){
+ zDb = 0;
+ }else{
+ assert( pRight->op==TK_DOT );
+ zDb = pLeft->u.zToken;
+ pLeft = pRight->pLeft;
+ pRight = pRight->pRight;
+ }
+ zTable = pLeft->u.zToken;
+ zColumn = pRight->u.zToken;
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight);
+ sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft);
+ }
+ }
+ return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr);
+ }
+
+ /* Resolve function names
+ */
+ case TK_FUNCTION: {
+ ExprList *pList = pExpr->x.pList; /* The argument list */
+ int n = pList ? pList->nExpr : 0; /* Number of arguments */
+ int no_such_func = 0; /* True if no such function exists */
+ int wrong_num_args = 0; /* True if wrong number of arguments */
+ int is_agg = 0; /* True if is an aggregate function */
+ int nId; /* Number of characters in function name */
+ const char *zId; /* The function name. */
+ FuncDef *pDef; /* Information about the function */
+ u8 enc = ENC(pParse->db); /* The database encoding */
+ int savedAllowFlags = (pNC->ncFlags & (NC_AllowAgg | NC_AllowWin));
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0);
+#endif
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ zId = pExpr->u.zToken;
+ nId = sqlite3Strlen30(zId);
+ pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0);
+ if( pDef==0 ){
+ pDef = sqlite3FindFunction(pParse->db, zId, -2, enc, 0);
+ if( pDef==0 ){
+ no_such_func = 1;
+ }else{
+ wrong_num_args = 1;
+ }
+ }else{
+ is_agg = pDef->xFinalize!=0;
+ if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){
+ ExprSetProperty(pExpr, EP_Unlikely);
+ if( n==2 ){
+ pExpr->iTable = exprProbability(pList->a[1].pExpr);
+ if( pExpr->iTable<0 ){
+ sqlite3ErrorMsg(pParse,
+ "second argument to likelihood() must be a "
+ "constant between 0.0 and 1.0");
+ pNC->nErr++;
+ }
+ }else{
+ /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is
+ ** equivalent to likelihood(X, 0.0625).
+ ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is
+ ** short-hand for likelihood(X,0.0625).
+ ** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand
+ ** for likelihood(X,0.9375).
+ ** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent
+ ** to likelihood(X,0.9375). */
+ /* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */
+ pExpr->iTable = pDef->zName[0]=='u' ? 8388608 : 125829120;
+ }
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0,pDef->zName,0);
+ if( auth!=SQLITE_OK ){
+ if( auth==SQLITE_DENY ){
+ sqlite3ErrorMsg(pParse, "not authorized to use function: %s",
+ pDef->zName);
+ pNC->nErr++;
+ }
+ pExpr->op = TK_NULL;
+ return WRC_Prune;
+ }
+ }
+#endif
+ if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){
+ /* For the purposes of the EP_ConstFunc flag, date and time
+ ** functions and other functions that change slowly are considered
+ ** constant because they are constant for the duration of one query.
+ ** This allows them to be factored out of inner loops. */
+ ExprSetProperty(pExpr,EP_ConstFunc);
+ }
+ if( (pDef->funcFlags & SQLITE_FUNC_CONSTANT)==0 ){
+ /* Clearly non-deterministic functions like random(), but also
+ ** date/time functions that use 'now', and other functions like
+ ** sqlite_version() that might change over time cannot be used
+ ** in an index or generated column. Curiously, they can be used
+ ** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all
+ ** all this. */
+ sqlite3ResolveNotValid(pParse, pNC, "non-deterministic functions",
+ NC_IdxExpr|NC_PartIdx|NC_GenCol, 0);
+ }else{
+ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */
+ pExpr->op2 = pNC->ncFlags & NC_SelfRef;
+ if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL);
+ }
+ if( (pDef->funcFlags & SQLITE_FUNC_INTERNAL)!=0
+ && pParse->nested==0
+ && (pParse->db->mDbFlags & DBFLAG_InternalFunc)==0
+ ){
+ /* Internal-use-only functions are disallowed unless the
+ ** SQL is being compiled using sqlite3NestedParse() or
+ ** the SQLITE_TESTCTRL_INTERNAL_FUNCTIONS test-control has be
+ ** used to activate internal functionsn for testing purposes */
+ no_such_func = 1;
+ pDef = 0;
+ }else
+ if( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0
+ && !IN_RENAME_OBJECT
+ ){
+ sqlite3ExprFunctionUsable(pParse, pExpr, pDef);
+ }
+ }
+
+ if( 0==IN_RENAME_OBJECT ){
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ assert( is_agg==0 || (pDef->funcFlags & SQLITE_FUNC_MINMAX)
+ || (pDef->xValue==0 && pDef->xInverse==0)
+ || (pDef->xValue && pDef->xInverse && pDef->xSFunc && pDef->xFinalize)
+ );
+ if( pDef && pDef->xValue==0 && pWin ){
+ sqlite3ErrorMsg(pParse,
+ "%.*s() may not be used as a window function", nId, zId
+ );
+ pNC->nErr++;
+ }else if(
+ (is_agg && (pNC->ncFlags & NC_AllowAgg)==0)
+ || (is_agg && (pDef->funcFlags&SQLITE_FUNC_WINDOW) && !pWin)
+ || (is_agg && pWin && (pNC->ncFlags & NC_AllowWin)==0)
+ ){
+ const char *zType;
+ if( (pDef->funcFlags & SQLITE_FUNC_WINDOW) || pWin ){
+ zType = "window";
+ }else{
+ zType = "aggregate";
+ }
+ sqlite3ErrorMsg(pParse, "misuse of %s function %.*s()",zType,nId,zId);
+ pNC->nErr++;
+ is_agg = 0;
+ }
+#else
+ if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) ){
+ sqlite3ErrorMsg(pParse,"misuse of aggregate function %.*s()",nId,zId);
+ pNC->nErr++;
+ is_agg = 0;
+ }
+#endif
+ else if( no_such_func && pParse->db->init.busy==0
+#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+ && pParse->explain==0
+#endif
+ ){
+ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId);
+ pNC->nErr++;
+ }else if( wrong_num_args ){
+ sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()",
+ nId, zId);
+ pNC->nErr++;
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ else if( is_agg==0 && ExprHasProperty(pExpr, EP_WinFunc) ){
+ sqlite3ErrorMsg(pParse,
+ "FILTER may not be used with non-aggregate %.*s()",
+ nId, zId
+ );
+ pNC->nErr++;
+ }
+#endif
+ if( is_agg ){
+ /* Window functions may not be arguments of aggregate functions.
+ ** Or arguments of other window functions. But aggregate functions
+ ** may be arguments for window functions. */
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ pNC->ncFlags &= ~(NC_AllowWin | (!pWin ? NC_AllowAgg : 0));
+#else
+ pNC->ncFlags &= ~NC_AllowAgg;
+#endif
+ }
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ else if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ is_agg = 1;
+ }
+#endif
+ sqlite3WalkExprList(pWalker, pList);
+ if( is_agg ){
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( pWin ){
+ Select *pSel = pNC->pWinSelect;
+ assert( pWin==pExpr->y.pWin );
+ if( IN_RENAME_OBJECT==0 ){
+ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef);
+ }
+ sqlite3WalkExprList(pWalker, pWin->pPartition);
+ sqlite3WalkExprList(pWalker, pWin->pOrderBy);
+ sqlite3WalkExpr(pWalker, pWin->pFilter);
+ sqlite3WindowLink(pSel, pWin);
+ pNC->ncFlags |= NC_HasWin;
+ }else
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ {
+ NameContext *pNC2 = pNC;
+ pExpr->op = TK_AGG_FUNCTION;
+ pExpr->op2 = 0;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ sqlite3WalkExpr(pWalker, pExpr->y.pWin->pFilter);
+ }
+#endif
+ while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){
+ pExpr->op2++;
+ pNC2 = pNC2->pNext;
+ }
+ assert( pDef!=0 || IN_RENAME_OBJECT );
+ if( pNC2 && pDef ){
+ assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
+ testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
+ pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX);
+
+ }
+ }
+ pNC->ncFlags |= savedAllowFlags;
+ }
+ /* FIX ME: Compute pExpr->affinity based on the expected return
+ ** type of the function
+ */
+ return WRC_Prune;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_SELECT:
+ case TK_EXISTS: testcase( pExpr->op==TK_EXISTS );
+#endif
+ case TK_IN: {
+ testcase( pExpr->op==TK_IN );
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ int nRef = pNC->nRef;
+ testcase( pNC->ncFlags & NC_IsCheck );
+ testcase( pNC->ncFlags & NC_PartIdx );
+ testcase( pNC->ncFlags & NC_IdxExpr );
+ testcase( pNC->ncFlags & NC_GenCol );
+ sqlite3ResolveNotValid(pParse, pNC, "subqueries",
+ NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol, pExpr);
+ sqlite3WalkSelect(pWalker, pExpr->x.pSelect);
+ assert( pNC->nRef>=nRef );
+ if( nRef!=pNC->nRef ){
+ ExprSetProperty(pExpr, EP_VarSelect);
+ pNC->ncFlags |= NC_VarSelect;
+ }
+ }
+ break;
+ }
+ case TK_VARIABLE: {
+ testcase( pNC->ncFlags & NC_IsCheck );
+ testcase( pNC->ncFlags & NC_PartIdx );
+ testcase( pNC->ncFlags & NC_IdxExpr );
+ testcase( pNC->ncFlags & NC_GenCol );
+ sqlite3ResolveNotValid(pParse, pNC, "parameters",
+ NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol, pExpr);
+ break;
+ }
+ case TK_IS:
+ case TK_ISNOT: {
+ Expr *pRight = sqlite3ExprSkipCollateAndLikely(pExpr->pRight);
+ assert( !ExprHasProperty(pExpr, EP_Reduced) );
+ /* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE",
+ ** and "x IS NOT FALSE". */
+ if( pRight && pRight->op==TK_ID ){
+ int rc = resolveExprStep(pWalker, pRight);
+ if( rc==WRC_Abort ) return WRC_Abort;
+ if( pRight->op==TK_TRUEFALSE ){
+ pExpr->op2 = pExpr->op;
+ pExpr->op = TK_TRUTH;
+ return WRC_Continue;
+ }
+ }
+ /* no break */ deliberate_fall_through
+ }
+ case TK_BETWEEN:
+ case TK_EQ:
+ case TK_NE:
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE: {
+ int nLeft, nRight;
+ if( pParse->db->mallocFailed ) break;
+ assert( pExpr->pLeft!=0 );
+ nLeft = sqlite3ExprVectorSize(pExpr->pLeft);
+ if( pExpr->op==TK_BETWEEN ){
+ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr);
+ if( nRight==nLeft ){
+ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr);
+ }
+ }else{
+ assert( pExpr->pRight!=0 );
+ nRight = sqlite3ExprVectorSize(pExpr->pRight);
+ }
+ if( nLeft!=nRight ){
+ testcase( pExpr->op==TK_EQ );
+ testcase( pExpr->op==TK_NE );
+ testcase( pExpr->op==TK_LT );
+ testcase( pExpr->op==TK_LE );
+ testcase( pExpr->op==TK_GT );
+ testcase( pExpr->op==TK_GE );
+ testcase( pExpr->op==TK_IS );
+ testcase( pExpr->op==TK_ISNOT );
+ testcase( pExpr->op==TK_BETWEEN );
+ sqlite3ErrorMsg(pParse, "row value misused");
+ }
+ break;
+ }
+ }
+ return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue;
+}
+
+/*
+** pEList is a list of expressions which are really the result set of the
+** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause.
+** This routine checks to see if pE is a simple identifier which corresponds
+** to the AS-name of one of the terms of the expression list. If it is,
+** this routine return an integer between 1 and N where N is the number of
+** elements in pEList, corresponding to the matching entry. If there is
+** no match, or if pE is not a simple identifier, then this routine
+** return 0.
+**
+** pEList has been resolved. pE has not.
+*/
+static int resolveAsName(
+ Parse *pParse, /* Parsing context for error messages */
+ ExprList *pEList, /* List of expressions to scan */
+ Expr *pE /* Expression we are trying to match */
+){
+ int i; /* Loop counter */
+
+ UNUSED_PARAMETER(pParse);
+
+ if( pE->op==TK_ID ){
+ char *zCol = pE->u.zToken;
+ for(i=0; i<pEList->nExpr; i++){
+ if( pEList->a[i].eEName==ENAME_NAME
+ && sqlite3_stricmp(pEList->a[i].zEName, zCol)==0
+ ){
+ return i+1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** pE is a pointer to an expression which is a single term in the
+** ORDER BY of a compound SELECT. The expression has not been
+** name resolved.
+**
+** At the point this routine is called, we already know that the
+** ORDER BY term is not an integer index into the result set. That
+** case is handled by the calling routine.
+**
+** Attempt to match pE against result set columns in the left-most
+** SELECT statement. Return the index i of the matching column,
+** as an indication to the caller that it should sort by the i-th column.
+** The left-most column is 1. In other words, the value returned is the
+** same integer value that would be used in the SQL statement to indicate
+** the column.
+**
+** If there is no match, return 0. Return -1 if an error occurs.
+*/
+static int resolveOrderByTermToExprList(
+ Parse *pParse, /* Parsing context for error messages */
+ Select *pSelect, /* The SELECT statement with the ORDER BY clause */
+ Expr *pE /* The specific ORDER BY term */
+){
+ int i; /* Loop counter */
+ ExprList *pEList; /* The columns of the result set */
+ NameContext nc; /* Name context for resolving pE */
+ sqlite3 *db; /* Database connection */
+ int rc; /* Return code from subprocedures */
+ u8 savedSuppErr; /* Saved value of db->suppressErr */
+
+ assert( sqlite3ExprIsInteger(pE, &i)==0 );
+ pEList = pSelect->pEList;
+
+ /* Resolve all names in the ORDER BY term expression
+ */
+ memset(&nc, 0, sizeof(nc));
+ nc.pParse = pParse;
+ nc.pSrcList = pSelect->pSrc;
+ nc.uNC.pEList = pEList;
+ nc.ncFlags = NC_AllowAgg|NC_UEList;
+ nc.nErr = 0;
+ db = pParse->db;
+ savedSuppErr = db->suppressErr;
+ if( IN_RENAME_OBJECT==0 ) db->suppressErr = 1;
+ rc = sqlite3ResolveExprNames(&nc, pE);
+ db->suppressErr = savedSuppErr;
+ if( rc ) return 0;
+
+ /* Try to match the ORDER BY expression against an expression
+ ** in the result set. Return an 1-based index of the matching
+ ** result-set entry.
+ */
+ for(i=0; i<pEList->nExpr; i++){
+ if( sqlite3ExprCompare(0, pEList->a[i].pExpr, pE, -1)<2 ){
+ return i+1;
+ }
+ }
+
+ /* If no match, return 0. */
+ return 0;
+}
+
+/*
+** Generate an ORDER BY or GROUP BY term out-of-range error.
+*/
+static void resolveOutOfRangeError(
+ Parse *pParse, /* The error context into which to write the error */
+ const char *zType, /* "ORDER" or "GROUP" */
+ int i, /* The index (1-based) of the term out of range */
+ int mx /* Largest permissible value of i */
+){
+ sqlite3ErrorMsg(pParse,
+ "%r %s BY term out of range - should be "
+ "between 1 and %d", i, zType, mx);
+}
+
+/*
+** Analyze the ORDER BY clause in a compound SELECT statement. Modify
+** each term of the ORDER BY clause is a constant integer between 1
+** and N where N is the number of columns in the compound SELECT.
+**
+** ORDER BY terms that are already an integer between 1 and N are
+** unmodified. ORDER BY terms that are integers outside the range of
+** 1 through N generate an error. ORDER BY terms that are expressions
+** are matched against result set expressions of compound SELECT
+** beginning with the left-most SELECT and working toward the right.
+** At the first match, the ORDER BY expression is transformed into
+** the integer column number.
+**
+** Return the number of errors seen.
+*/
+static int resolveCompoundOrderBy(
+ Parse *pParse, /* Parsing context. Leave error messages here */
+ Select *pSelect /* The SELECT statement containing the ORDER BY */
+){
+ int i;
+ ExprList *pOrderBy;
+ ExprList *pEList;
+ sqlite3 *db;
+ int moreToDo = 1;
+
+ pOrderBy = pSelect->pOrderBy;
+ if( pOrderBy==0 ) return 0;
+ db = pParse->db;
+ if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){
+ sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause");
+ return 1;
+ }
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].done = 0;
+ }
+ pSelect->pNext = 0;
+ while( pSelect->pPrior ){
+ pSelect->pPrior->pNext = pSelect;
+ pSelect = pSelect->pPrior;
+ }
+ while( pSelect && moreToDo ){
+ struct ExprList_item *pItem;
+ moreToDo = 0;
+ pEList = pSelect->pEList;
+ assert( pEList!=0 );
+ for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){
+ int iCol = -1;
+ Expr *pE, *pDup;
+ if( pItem->done ) continue;
+ pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr);
+ if( sqlite3ExprIsInteger(pE, &iCol) ){
+ if( iCol<=0 || iCol>pEList->nExpr ){
+ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr);
+ return 1;
+ }
+ }else{
+ iCol = resolveAsName(pParse, pEList, pE);
+ if( iCol==0 ){
+ /* Now test if expression pE matches one of the values returned
+ ** by pSelect. In the usual case this is done by duplicating the
+ ** expression, resolving any symbols in it, and then comparing
+ ** it against each expression returned by the SELECT statement.
+ ** Once the comparisons are finished, the duplicate expression
+ ** is deleted.
+ **
+ ** Or, if this is running as part of an ALTER TABLE operation,
+ ** resolve the symbols in the actual expression, not a duplicate.
+ ** And, if one of the comparisons is successful, leave the expression
+ ** as is instead of transforming it to an integer as in the usual
+ ** case. This allows the code in alter.c to modify column
+ ** refererences within the ORDER BY expression as required. */
+ if( IN_RENAME_OBJECT ){
+ pDup = pE;
+ }else{
+ pDup = sqlite3ExprDup(db, pE, 0);
+ }
+ if( !db->mallocFailed ){
+ assert(pDup);
+ iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup);
+ }
+ if( !IN_RENAME_OBJECT ){
+ sqlite3ExprDelete(db, pDup);
+ }
+ }
+ }
+ if( iCol>0 ){
+ /* Convert the ORDER BY term into an integer column number iCol,
+ ** taking care to preserve the COLLATE clause if it exists */
+ if( !IN_RENAME_OBJECT ){
+ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0);
+ if( pNew==0 ) return 1;
+ pNew->flags |= EP_IntValue;
+ pNew->u.iValue = iCol;
+ if( pItem->pExpr==pE ){
+ pItem->pExpr = pNew;
+ }else{
+ Expr *pParent = pItem->pExpr;
+ assert( pParent->op==TK_COLLATE );
+ while( pParent->pLeft->op==TK_COLLATE ) pParent = pParent->pLeft;
+ assert( pParent->pLeft==pE );
+ pParent->pLeft = pNew;
+ }
+ sqlite3ExprDelete(db, pE);
+ pItem->u.x.iOrderByCol = (u16)iCol;
+ }
+ pItem->done = 1;
+ }else{
+ moreToDo = 1;
+ }
+ }
+ pSelect = pSelect->pNext;
+ }
+ for(i=0; i<pOrderBy->nExpr; i++){
+ if( pOrderBy->a[i].done==0 ){
+ sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any "
+ "column in the result set", i+1);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Check every term in the ORDER BY or GROUP BY clause pOrderBy of
+** the SELECT statement pSelect. If any term is reference to a
+** result set expression (as determined by the ExprList.a.u.x.iOrderByCol
+** field) then convert that term into a copy of the corresponding result set
+** column.
+**
+** If any errors are detected, add an error message to pParse and
+** return non-zero. Return zero if no errors are seen.
+*/
+SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(
+ Parse *pParse, /* Parsing context. Leave error messages here */
+ Select *pSelect, /* The SELECT statement containing the clause */
+ ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */
+ const char *zType /* "ORDER" or "GROUP" */
+){
+ int i;
+ sqlite3 *db = pParse->db;
+ ExprList *pEList;
+ struct ExprList_item *pItem;
+
+ if( pOrderBy==0 || pParse->db->mallocFailed || IN_RENAME_OBJECT ) return 0;
+ if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){
+ sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType);
+ return 1;
+ }
+ pEList = pSelect->pEList;
+ assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */
+ for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){
+ if( pItem->u.x.iOrderByCol ){
+ if( pItem->u.x.iOrderByCol>pEList->nExpr ){
+ resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr);
+ return 1;
+ }
+ resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr,
+ zType,0);
+ }
+ }
+ return 0;
+}
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** Walker callback for windowRemoveExprFromSelect().
+*/
+static int resolveRemoveWindowsCb(Walker *pWalker, Expr *pExpr){
+ UNUSED_PARAMETER(pWalker);
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ Window *pWin = pExpr->y.pWin;
+ sqlite3WindowUnlinkFromSelect(pWin);
+ }
+ return WRC_Continue;
+}
+
+/*
+** Remove any Window objects owned by the expression pExpr from the
+** Select.pWin list of Select object pSelect.
+*/
+static void windowRemoveExprFromSelect(Select *pSelect, Expr *pExpr){
+ if( pSelect->pWin ){
+ Walker sWalker;
+ memset(&sWalker, 0, sizeof(Walker));
+ sWalker.xExprCallback = resolveRemoveWindowsCb;
+ sWalker.u.pSelect = pSelect;
+ sqlite3WalkExpr(&sWalker, pExpr);
+ }
+}
+#else
+# define windowRemoveExprFromSelect(a, b)
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect.
+** The Name context of the SELECT statement is pNC. zType is either
+** "ORDER" or "GROUP" depending on which type of clause pOrderBy is.
+**
+** This routine resolves each term of the clause into an expression.
+** If the order-by term is an integer I between 1 and N (where N is the
+** number of columns in the result set of the SELECT) then the expression
+** in the resolution is a copy of the I-th result-set expression. If
+** the order-by term is an identifier that corresponds to the AS-name of
+** a result-set expression, then the term resolves to a copy of the
+** result-set expression. Otherwise, the expression is resolved in
+** the usual way - using sqlite3ResolveExprNames().
+**
+** This routine returns the number of errors. If errors occur, then
+** an appropriate error message might be left in pParse. (OOM errors
+** excepted.)
+*/
+static int resolveOrderGroupBy(
+ NameContext *pNC, /* The name context of the SELECT statement */
+ Select *pSelect, /* The SELECT statement holding pOrderBy */
+ ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */
+ const char *zType /* Either "ORDER" or "GROUP", as appropriate */
+){
+ int i, j; /* Loop counters */
+ int iCol; /* Column number */
+ struct ExprList_item *pItem; /* A term of the ORDER BY clause */
+ Parse *pParse; /* Parsing context */
+ int nResult; /* Number of terms in the result set */
+
+ if( pOrderBy==0 ) return 0;
+ nResult = pSelect->pEList->nExpr;
+ pParse = pNC->pParse;
+ for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){
+ Expr *pE = pItem->pExpr;
+ Expr *pE2 = sqlite3ExprSkipCollateAndLikely(pE);
+ if( zType[0]!='G' ){
+ iCol = resolveAsName(pParse, pSelect->pEList, pE2);
+ if( iCol>0 ){
+ /* If an AS-name match is found, mark this ORDER BY column as being
+ ** a copy of the iCol-th result-set column. The subsequent call to
+ ** sqlite3ResolveOrderGroupBy() will convert the expression to a
+ ** copy of the iCol-th result-set expression. */
+ pItem->u.x.iOrderByCol = (u16)iCol;
+ continue;
+ }
+ }
+ if( sqlite3ExprIsInteger(pE2, &iCol) ){
+ /* The ORDER BY term is an integer constant. Again, set the column
+ ** number so that sqlite3ResolveOrderGroupBy() will convert the
+ ** order-by term to a copy of the result-set expression */
+ if( iCol<1 || iCol>0xffff ){
+ resolveOutOfRangeError(pParse, zType, i+1, nResult);
+ return 1;
+ }
+ pItem->u.x.iOrderByCol = (u16)iCol;
+ continue;
+ }
+
+ /* Otherwise, treat the ORDER BY term as an ordinary expression */
+ pItem->u.x.iOrderByCol = 0;
+ if( sqlite3ResolveExprNames(pNC, pE) ){
+ return 1;
+ }
+ for(j=0; j<pSelect->pEList->nExpr; j++){
+ if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){
+ /* Since this expresion is being changed into a reference
+ ** to an identical expression in the result set, remove all Window
+ ** objects belonging to the expression from the Select.pWin list. */
+ windowRemoveExprFromSelect(pSelect, pE);
+ pItem->u.x.iOrderByCol = j+1;
+ }
+ }
+ }
+ return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType);
+}
+
+/*
+** Resolve names in the SELECT statement p and all of its descendants.
+*/
+static int resolveSelectStep(Walker *pWalker, Select *p){
+ NameContext *pOuterNC; /* Context that contains this SELECT */
+ NameContext sNC; /* Name context of this SELECT */
+ int isCompound; /* True if p is a compound select */
+ int nCompound; /* Number of compound terms processed so far */
+ Parse *pParse; /* Parsing context */
+ int i; /* Loop counter */
+ ExprList *pGroupBy; /* The GROUP BY clause */
+ Select *pLeftmost; /* Left-most of SELECT of a compound */
+ sqlite3 *db; /* Database connection */
+
+
+ assert( p!=0 );
+ if( p->selFlags & SF_Resolved ){
+ return WRC_Prune;
+ }
+ pOuterNC = pWalker->u.pNC;
+ pParse = pWalker->pParse;
+ db = pParse->db;
+
+ /* Normally sqlite3SelectExpand() will be called first and will have
+ ** already expanded this SELECT. However, if this is a subquery within
+ ** an expression, sqlite3ResolveExprNames() will be called without a
+ ** prior call to sqlite3SelectExpand(). When that happens, let
+ ** sqlite3SelectPrep() do all of the processing for this SELECT.
+ ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and
+ ** this routine in the correct order.
+ */
+ if( (p->selFlags & SF_Expanded)==0 ){
+ sqlite3SelectPrep(pParse, p, pOuterNC);
+ return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune;
+ }
+
+ isCompound = p->pPrior!=0;
+ nCompound = 0;
+ pLeftmost = p;
+ while( p ){
+ assert( (p->selFlags & SF_Expanded)!=0 );
+ assert( (p->selFlags & SF_Resolved)==0 );
+ p->selFlags |= SF_Resolved;
+
+ /* Resolve the expressions in the LIMIT and OFFSET clauses. These
+ ** are not allowed to refer to any names, so pass an empty NameContext.
+ */
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ sNC.pWinSelect = p;
+ if( sqlite3ResolveExprNames(&sNC, p->pLimit) ){
+ return WRC_Abort;
+ }
+
+ /* If the SF_Converted flags is set, then this Select object was
+ ** was created by the convertCompoundSelectToSubquery() function.
+ ** In this case the ORDER BY clause (p->pOrderBy) should be resolved
+ ** as if it were part of the sub-query, not the parent. This block
+ ** moves the pOrderBy down to the sub-query. It will be moved back
+ ** after the names have been resolved. */
+ if( p->selFlags & SF_Converted ){
+ Select *pSub = p->pSrc->a[0].pSelect;
+ assert( p->pSrc->nSrc==1 && p->pOrderBy );
+ assert( pSub->pPrior && pSub->pOrderBy==0 );
+ pSub->pOrderBy = p->pOrderBy;
+ p->pOrderBy = 0;
+ }
+
+ /* Recursively resolve names in all subqueries
+ */
+ for(i=0; i<p->pSrc->nSrc; i++){
+ struct SrcList_item *pItem = &p->pSrc->a[i];
+ if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){
+ NameContext *pNC; /* Used to iterate name contexts */
+ int nRef = 0; /* Refcount for pOuterNC and outer contexts */
+ const char *zSavedContext = pParse->zAuthContext;
+
+ /* Count the total number of references to pOuterNC and all of its
+ ** parent contexts. After resolving references to expressions in
+ ** pItem->pSelect, check if this value has changed. If so, then
+ ** SELECT statement pItem->pSelect must be correlated. Set the
+ ** pItem->fg.isCorrelated flag if this is the case. */
+ for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef;
+
+ if( pItem->zName ) pParse->zAuthContext = pItem->zName;
+ sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC);
+ pParse->zAuthContext = zSavedContext;
+ if( pParse->nErr || db->mallocFailed ) return WRC_Abort;
+
+ for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef;
+ assert( pItem->fg.isCorrelated==0 && nRef<=0 );
+ pItem->fg.isCorrelated = (nRef!=0);
+ }
+ }
+
+ /* Set up the local name-context to pass to sqlite3ResolveExprNames() to
+ ** resolve the result-set expression list.
+ */
+ sNC.ncFlags = NC_AllowAgg|NC_AllowWin;
+ sNC.pSrcList = p->pSrc;
+ sNC.pNext = pOuterNC;
+
+ /* Resolve names in the result set. */
+ if( sqlite3ResolveExprListNames(&sNC, p->pEList) ) return WRC_Abort;
+ sNC.ncFlags &= ~NC_AllowWin;
+
+ /* If there are no aggregate functions in the result-set, and no GROUP BY
+ ** expression, do not allow aggregates in any of the other expressions.
+ */
+ assert( (p->selFlags & SF_Aggregate)==0 );
+ pGroupBy = p->pGroupBy;
+ if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){
+ assert( NC_MinMaxAgg==SF_MinMaxAgg );
+ p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg);
+ }else{
+ sNC.ncFlags &= ~NC_AllowAgg;
+ }
+
+ /* If a HAVING clause is present, then there must be a GROUP BY clause.
+ */
+ if( p->pHaving && !pGroupBy ){
+ sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING");
+ return WRC_Abort;
+ }
+
+ /* Add the output column list to the name-context before parsing the
+ ** other expressions in the SELECT statement. This is so that
+ ** expressions in the WHERE clause (etc.) can refer to expressions by
+ ** aliases in the result set.
+ **
+ ** Minor point: If this is the case, then the expression will be
+ ** re-evaluated for each reference to it.
+ */
+ assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert))==0 );
+ sNC.uNC.pEList = p->pEList;
+ sNC.ncFlags |= NC_UEList;
+ if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort;
+ if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort;
+
+ /* Resolve names in table-valued-function arguments */
+ for(i=0; i<p->pSrc->nSrc; i++){
+ struct SrcList_item *pItem = &p->pSrc->a[i];
+ if( pItem->fg.isTabFunc
+ && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg)
+ ){
+ return WRC_Abort;
+ }
+ }
+
+ /* The ORDER BY and GROUP BY clauses may not refer to terms in
+ ** outer queries
+ */
+ sNC.pNext = 0;
+ sNC.ncFlags |= NC_AllowAgg|NC_AllowWin;
+
+ /* If this is a converted compound query, move the ORDER BY clause from
+ ** the sub-query back to the parent query. At this point each term
+ ** within the ORDER BY clause has been transformed to an integer value.
+ ** These integers will be replaced by copies of the corresponding result
+ ** set expressions by the call to resolveOrderGroupBy() below. */
+ if( p->selFlags & SF_Converted ){
+ Select *pSub = p->pSrc->a[0].pSelect;
+ p->pOrderBy = pSub->pOrderBy;
+ pSub->pOrderBy = 0;
+ }
+
+ /* Process the ORDER BY clause for singleton SELECT statements.
+ ** The ORDER BY clause for compounds SELECT statements is handled
+ ** below, after all of the result-sets for all of the elements of
+ ** the compound have been resolved.
+ **
+ ** If there is an ORDER BY clause on a term of a compound-select other
+ ** than the right-most term, then that is a syntax error. But the error
+ ** is not detected until much later, and so we need to go ahead and
+ ** resolve those symbols on the incorrect ORDER BY for consistency.
+ */
+ if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */
+ && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER")
+ ){
+ return WRC_Abort;
+ }
+ if( db->mallocFailed ){
+ return WRC_Abort;
+ }
+ sNC.ncFlags &= ~NC_AllowWin;
+
+ /* Resolve the GROUP BY clause. At the same time, make sure
+ ** the GROUP BY clause does not contain aggregate functions.
+ */
+ if( pGroupBy ){
+ struct ExprList_item *pItem;
+
+ if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){
+ return WRC_Abort;
+ }
+ for(i=0, pItem=pGroupBy->a; i<pGroupBy->nExpr; i++, pItem++){
+ if( ExprHasProperty(pItem->pExpr, EP_Agg) ){
+ sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in "
+ "the GROUP BY clause");
+ return WRC_Abort;
+ }
+ }
+ }
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( IN_RENAME_OBJECT ){
+ Window *pWin;
+ for(pWin=p->pWinDefn; pWin; pWin=pWin->pNextWin){
+ if( sqlite3ResolveExprListNames(&sNC, pWin->pOrderBy)
+ || sqlite3ResolveExprListNames(&sNC, pWin->pPartition)
+ ){
+ return WRC_Abort;
+ }
+ }
+ }
+#endif
+
+ /* If this is part of a compound SELECT, check that it has the right
+ ** number of expressions in the select list. */
+ if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){
+ sqlite3SelectWrongNumTermsError(pParse, p->pNext);
+ return WRC_Abort;
+ }
+
+ /* Advance to the next term of the compound
+ */
+ p = p->pPrior;
+ nCompound++;
+ }
+
+ /* Resolve the ORDER BY on a compound SELECT after all terms of
+ ** the compound have been resolved.
+ */
+ if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){
+ return WRC_Abort;
+ }
+
+ return WRC_Prune;
+}
+
+/*
+** This routine walks an expression tree and resolves references to
+** table columns and result-set columns. At the same time, do error
+** checking on function usage and set a flag if any aggregate functions
+** are seen.
+**
+** To resolve table columns references we look for nodes (or subtrees) of the
+** form X.Y.Z or Y.Z or just Z where
+**
+** X: The name of a database. Ex: "main" or "temp" or
+** the symbolic name assigned to an ATTACH-ed database.
+**
+** Y: The name of a table in a FROM clause. Or in a trigger
+** one of the special names "old" or "new".
+**
+** Z: The name of a column in table Y.
+**
+** The node at the root of the subtree is modified as follows:
+**
+** Expr.op Changed to TK_COLUMN
+** Expr.pTab Points to the Table object for X.Y
+** Expr.iColumn The column index in X.Y. -1 for the rowid.
+** Expr.iTable The VDBE cursor number for X.Y
+**
+**
+** To resolve result-set references, look for expression nodes of the
+** form Z (with no X and Y prefix) where the Z matches the right-hand
+** size of an AS clause in the result-set of a SELECT. The Z expression
+** is replaced by a copy of the left-hand side of the result-set expression.
+** Table-name and function resolution occurs on the substituted expression
+** tree. For example, in:
+**
+** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x;
+**
+** The "x" term of the order by is replaced by "a+b" to render:
+**
+** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b;
+**
+** Function calls are checked to make sure that the function is
+** defined and that the correct number of arguments are specified.
+** If the function is an aggregate function, then the NC_HasAgg flag is
+** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION.
+** If an expression contains aggregate functions then the EP_Agg
+** property on the expression is set.
+**
+** An error message is left in pParse if anything is amiss. The number
+** if errors is returned.
+*/
+SQLITE_PRIVATE int sqlite3ResolveExprNames(
+ NameContext *pNC, /* Namespace to resolve expressions in. */
+ Expr *pExpr /* The expression to be analyzed. */
+){
+ int savedHasAgg;
+ Walker w;
+
+ if( pExpr==0 ) return SQLITE_OK;
+ savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ w.pParse = pNC->pParse;
+ w.xExprCallback = resolveExprStep;
+ w.xSelectCallback = resolveSelectStep;
+ w.xSelectCallback2 = 0;
+ w.u.pNC = pNC;
+#if SQLITE_MAX_EXPR_DEPTH>0
+ w.pParse->nHeight += pExpr->nHeight;
+ if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){
+ return SQLITE_ERROR;
+ }
+#endif
+ sqlite3WalkExpr(&w, pExpr);
+#if SQLITE_MAX_EXPR_DEPTH>0
+ w.pParse->nHeight -= pExpr->nHeight;
+#endif
+ assert( EP_Agg==NC_HasAgg );
+ assert( EP_Win==NC_HasWin );
+ testcase( pNC->ncFlags & NC_HasAgg );
+ testcase( pNC->ncFlags & NC_HasWin );
+ ExprSetProperty(pExpr, pNC->ncFlags & (NC_HasAgg|NC_HasWin) );
+ pNC->ncFlags |= savedHasAgg;
+ return pNC->nErr>0 || w.pParse->nErr>0;
+}
+
+/*
+** Resolve all names for all expression in an expression list. This is
+** just like sqlite3ResolveExprNames() except that it works for an expression
+** list rather than a single expression.
+*/
+SQLITE_PRIVATE int sqlite3ResolveExprListNames(
+ NameContext *pNC, /* Namespace to resolve expressions in. */
+ ExprList *pList /* The expression list to be analyzed. */
+){
+ int i;
+ int savedHasAgg = 0;
+ Walker w;
+ if( pList==0 ) return WRC_Continue;
+ w.pParse = pNC->pParse;
+ w.xExprCallback = resolveExprStep;
+ w.xSelectCallback = resolveSelectStep;
+ w.xSelectCallback2 = 0;
+ w.u.pNC = pNC;
+ savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ for(i=0; i<pList->nExpr; i++){
+ Expr *pExpr = pList->a[i].pExpr;
+ if( pExpr==0 ) continue;
+#if SQLITE_MAX_EXPR_DEPTH>0
+ w.pParse->nHeight += pExpr->nHeight;
+ if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){
+ return WRC_Abort;
+ }
+#endif
+ sqlite3WalkExpr(&w, pExpr);
+#if SQLITE_MAX_EXPR_DEPTH>0
+ w.pParse->nHeight -= pExpr->nHeight;
+#endif
+ assert( EP_Agg==NC_HasAgg );
+ assert( EP_Win==NC_HasWin );
+ testcase( pNC->ncFlags & NC_HasAgg );
+ testcase( pNC->ncFlags & NC_HasWin );
+ if( pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin) ){
+ ExprSetProperty(pExpr, pNC->ncFlags & (NC_HasAgg|NC_HasWin) );
+ savedHasAgg |= pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin);
+ }
+ if( pNC->nErr>0 || w.pParse->nErr>0 ) return WRC_Abort;
+ }
+ pNC->ncFlags |= savedHasAgg;
+ return WRC_Continue;
+}
+
+/*
+** Resolve all names in all expressions of a SELECT and in all
+** decendents of the SELECT, including compounds off of p->pPrior,
+** subqueries in expressions, and subqueries used as FROM clause
+** terms.
+**
+** See sqlite3ResolveExprNames() for a description of the kinds of
+** transformations that occur.
+**
+** All SELECT statements should have been expanded using
+** sqlite3SelectExpand() prior to invoking this routine.
+*/
+SQLITE_PRIVATE void sqlite3ResolveSelectNames(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The SELECT statement being coded. */
+ NameContext *pOuterNC /* Name context for parent SELECT statement */
+){
+ Walker w;
+
+ assert( p!=0 );
+ w.xExprCallback = resolveExprStep;
+ w.xSelectCallback = resolveSelectStep;
+ w.xSelectCallback2 = 0;
+ w.pParse = pParse;
+ w.u.pNC = pOuterNC;
+ sqlite3WalkSelect(&w, p);
+}
+
+/*
+** Resolve names in expressions that can only reference a single table
+** or which cannot reference any tables at all. Examples:
+**
+** "type" flag
+** ------------
+** (1) CHECK constraints NC_IsCheck
+** (2) WHERE clauses on partial indices NC_PartIdx
+** (3) Expressions in indexes on expressions NC_IdxExpr
+** (4) Expression arguments to VACUUM INTO. 0
+** (5) GENERATED ALWAYS as expressions NC_GenCol
+**
+** In all cases except (4), the Expr.iTable value for Expr.op==TK_COLUMN
+** nodes of the expression is set to -1 and the Expr.iColumn value is
+** set to the column number. In case (4), TK_COLUMN nodes cause an error.
+**
+** Any errors cause an error message to be set in pParse.
+*/
+SQLITE_PRIVATE int sqlite3ResolveSelfReference(
+ Parse *pParse, /* Parsing context */
+ Table *pTab, /* The table being referenced, or NULL */
+ int type, /* NC_IsCheck, NC_PartIdx, NC_IdxExpr, NC_GenCol, or 0 */
+ Expr *pExpr, /* Expression to resolve. May be NULL. */
+ ExprList *pList /* Expression list to resolve. May be NULL. */
+){
+ SrcList sSrc; /* Fake SrcList for pParse->pNewTable */
+ NameContext sNC; /* Name context for pParse->pNewTable */
+ int rc;
+
+ assert( type==0 || pTab!=0 );
+ assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr
+ || type==NC_GenCol || pTab==0 );
+ memset(&sNC, 0, sizeof(sNC));
+ memset(&sSrc, 0, sizeof(sSrc));
+ if( pTab ){
+ sSrc.nSrc = 1;
+ sSrc.a[0].zName = pTab->zName;
+ sSrc.a[0].pTab = pTab;
+ sSrc.a[0].iCursor = -1;
+ if( pTab->pSchema!=pParse->db->aDb[1].pSchema ){
+ /* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP
+ ** schema elements */
+ type |= NC_FromDDL;
+ }
+ }
+ sNC.pParse = pParse;
+ sNC.pSrcList = &sSrc;
+ sNC.ncFlags = type | NC_IsDDL;
+ if( (rc = sqlite3ResolveExprNames(&sNC, pExpr))!=SQLITE_OK ) return rc;
+ if( pList ) rc = sqlite3ResolveExprListNames(&sNC, pList);
+ return rc;
+}
+
+/************** End of resolve.c *********************************************/
+/************** Begin file expr.c ********************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains routines used for analyzing expressions and
+** for generating VDBE code that evaluates expressions in SQLite.
+*/
+/* #include "sqliteInt.h" */
+
+/* Forward declarations */
+static void exprCodeBetween(Parse*,Expr*,int,void(*)(Parse*,Expr*,int,int),int);
+static int exprCodeVector(Parse *pParse, Expr *p, int *piToFree);
+
+/*
+** Return the affinity character for a single column of a table.
+*/
+SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table *pTab, int iCol){
+ assert( iCol<pTab->nCol );
+ return iCol>=0 ? pTab->aCol[iCol].affinity : SQLITE_AFF_INTEGER;
+}
+
+/*
+** Return the 'affinity' of the expression pExpr if any.
+**
+** If pExpr is a column, a reference to a column via an 'AS' alias,
+** or a sub-select with a column as the return value, then the
+** affinity of that column is returned. Otherwise, 0x00 is returned,
+** indicating no affinity for the expression.
+**
+** i.e. the WHERE clause expressions in the following statements all
+** have an affinity:
+**
+** CREATE TABLE t1(a);
+** SELECT * FROM t1 WHERE a;
+** SELECT a AS b FROM t1 WHERE b;
+** SELECT * FROM t1 WHERE (select a from t1);
+*/
+SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
+ int op;
+ while( ExprHasProperty(pExpr, EP_Skip) ){
+ assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW );
+ pExpr = pExpr->pLeft;
+ assert( pExpr!=0 );
+ }
+ op = pExpr->op;
+ if( op==TK_SELECT ){
+ assert( pExpr->flags&EP_xIsSelect );
+ assert( pExpr->x.pSelect!=0 );
+ assert( pExpr->x.pSelect->pEList!=0 );
+ assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 );
+ return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr);
+ }
+ if( op==TK_REGISTER ) op = pExpr->op2;
+#ifndef SQLITE_OMIT_CAST
+ if( op==TK_CAST ){
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ return sqlite3AffinityType(pExpr->u.zToken, 0);
+ }
+#endif
+ if( (op==TK_AGG_COLUMN || op==TK_COLUMN) && pExpr->y.pTab ){
+ return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
+ }
+ if( op==TK_SELECT_COLUMN ){
+ assert( pExpr->pLeft->flags&EP_xIsSelect );
+ return sqlite3ExprAffinity(
+ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
+ );
+ }
+ if( op==TK_VECTOR ){
+ return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr);
+ }
+ return pExpr->affExpr;
+}
+
+/*
+** Set the collating sequence for expression pExpr to be the collating
+** sequence named by pToken. Return a pointer to a new Expr node that
+** implements the COLLATE operator.
+**
+** If a memory allocation error occurs, that fact is recorded in pParse->db
+** and the pExpr parameter is returned unchanged.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* Add the "COLLATE" clause to this expression */
+ const Token *pCollName, /* Name of collating sequence */
+ int dequote /* True to dequote pCollName */
+){
+ if( pCollName->n>0 ){
+ Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, dequote);
+ if( pNew ){
+ pNew->pLeft = pExpr;
+ pNew->flags |= EP_Collate|EP_Skip;
+ pExpr = pNew;
+ }
+ }
+ return pExpr;
+}
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, const char *zC){
+ Token s;
+ assert( zC!=0 );
+ sqlite3TokenInit(&s, (char*)zC);
+ return sqlite3ExprAddCollateToken(pParse, pExpr, &s, 0);
+}
+
+/*
+** Skip over any TK_COLLATE operators.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){
+ while( pExpr && ExprHasProperty(pExpr, EP_Skip) ){
+ assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW );
+ pExpr = pExpr->pLeft;
+ }
+ return pExpr;
+}
+
+/*
+** Skip over any TK_COLLATE operators and/or any unlikely()
+** or likelihood() or likely() functions at the root of an
+** expression.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr *pExpr){
+ while( pExpr && ExprHasProperty(pExpr, EP_Skip|EP_Unlikely) ){
+ if( ExprHasProperty(pExpr, EP_Unlikely) ){
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ assert( pExpr->x.pList->nExpr>0 );
+ assert( pExpr->op==TK_FUNCTION );
+ pExpr = pExpr->x.pList->a[0].pExpr;
+ }else{
+ assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW );
+ pExpr = pExpr->pLeft;
+ }
+ }
+ return pExpr;
+}
+
+/*
+** Return the collation sequence for the expression pExpr. If
+** there is no defined collating sequence, return NULL.
+**
+** See also: sqlite3ExprNNCollSeq()
+**
+** The sqlite3ExprNNCollSeq() works the same exact that it returns the
+** default collation if pExpr has no defined collation.
+**
+** The collating sequence might be determined by a COLLATE operator
+** or by the presence of a column with a defined collating sequence.
+** COLLATE operators take first precedence. Left operands take
+** precedence over right operands.
+*/
+SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){
+ sqlite3 *db = pParse->db;
+ CollSeq *pColl = 0;
+ const Expr *p = pExpr;
+ while( p ){
+ int op = p->op;
+ if( op==TK_REGISTER ) op = p->op2;
+ if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER)
+ && p->y.pTab!=0
+ ){
+ /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally
+ ** a TK_COLUMN but was previously evaluated and cached in a register */
+ int j = p->iColumn;
+ if( j>=0 ){
+ const char *zColl = p->y.pTab->aCol[j].zColl;
+ pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0);
+ }
+ break;
+ }
+ if( op==TK_CAST || op==TK_UPLUS ){
+ p = p->pLeft;
+ continue;
+ }
+ if( op==TK_VECTOR ){
+ p = p->x.pList->a[0].pExpr;
+ continue;
+ }
+ if( op==TK_COLLATE ){
+ pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
+ break;
+ }
+ if( p->flags & EP_Collate ){
+ if( p->pLeft && (p->pLeft->flags & EP_Collate)!=0 ){
+ p = p->pLeft;
+ }else{
+ Expr *pNext = p->pRight;
+ /* The Expr.x union is never used at the same time as Expr.pRight */
+ assert( p->x.pList==0 || p->pRight==0 );
+ if( p->x.pList!=0
+ && !db->mallocFailed
+ && ALWAYS(!ExprHasProperty(p, EP_xIsSelect))
+ ){
+ int i;
+ for(i=0; ALWAYS(i<p->x.pList->nExpr); i++){
+ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){
+ pNext = p->x.pList->a[i].pExpr;
+ break;
+ }
+ }
+ }
+ p = pNext;
+ }
+ }else{
+ break;
+ }
+ }
+ if( sqlite3CheckCollSeq(pParse, pColl) ){
+ pColl = 0;
+ }
+ return pColl;
+}
+
+/*
+** Return the collation sequence for the expression pExpr. If
+** there is no defined collating sequence, return a pointer to the
+** defautl collation sequence.
+**
+** See also: sqlite3ExprCollSeq()
+**
+** The sqlite3ExprCollSeq() routine works the same except that it
+** returns NULL if there is no defined collation.
+*/
+SQLITE_PRIVATE CollSeq *sqlite3ExprNNCollSeq(Parse *pParse, const Expr *pExpr){
+ CollSeq *p = sqlite3ExprCollSeq(pParse, pExpr);
+ if( p==0 ) p = pParse->db->pDfltColl;
+ assert( p!=0 );
+ return p;
+}
+
+/*
+** Return TRUE if the two expressions have equivalent collating sequences.
+*/
+SQLITE_PRIVATE int sqlite3ExprCollSeqMatch(Parse *pParse, const Expr *pE1, const Expr *pE2){
+ CollSeq *pColl1 = sqlite3ExprNNCollSeq(pParse, pE1);
+ CollSeq *pColl2 = sqlite3ExprNNCollSeq(pParse, pE2);
+ return sqlite3StrICmp(pColl1->zName, pColl2->zName)==0;
+}
+
+/*
+** pExpr is an operand of a comparison operator. aff2 is the
+** type affinity of the other operand. This routine returns the
+** type affinity that should be used for the comparison operator.
+*/
+SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2){
+ char aff1 = sqlite3ExprAffinity(pExpr);
+ if( aff1>SQLITE_AFF_NONE && aff2>SQLITE_AFF_NONE ){
+ /* Both sides of the comparison are columns. If one has numeric
+ ** affinity, use that. Otherwise use no affinity.
+ */
+ if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){
+ return SQLITE_AFF_NUMERIC;
+ }else{
+ return SQLITE_AFF_BLOB;
+ }
+ }else{
+ /* One side is a column, the other is not. Use the columns affinity. */
+ assert( aff1<=SQLITE_AFF_NONE || aff2<=SQLITE_AFF_NONE );
+ return (aff1<=SQLITE_AFF_NONE ? aff2 : aff1) | SQLITE_AFF_NONE;
+ }
+}
+
+/*
+** pExpr is a comparison operator. Return the type affinity that should
+** be applied to both operands prior to doing the comparison.
+*/
+static char comparisonAffinity(const Expr *pExpr){
+ char aff;
+ assert( pExpr->op==TK_EQ || pExpr->op==TK_IN || pExpr->op==TK_LT ||
+ pExpr->op==TK_GT || pExpr->op==TK_GE || pExpr->op==TK_LE ||
+ pExpr->op==TK_NE || pExpr->op==TK_IS || pExpr->op==TK_ISNOT );
+ assert( pExpr->pLeft );
+ aff = sqlite3ExprAffinity(pExpr->pLeft);
+ if( pExpr->pRight ){
+ aff = sqlite3CompareAffinity(pExpr->pRight, aff);
+ }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff);
+ }else if( aff==0 ){
+ aff = SQLITE_AFF_BLOB;
+ }
+ return aff;
+}
+
+/*
+** pExpr is a comparison expression, eg. '=', '<', IN(...) etc.
+** idx_affinity is the affinity of an indexed column. Return true
+** if the index with affinity idx_affinity may be used to implement
+** the comparison in pExpr.
+*/
+SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity){
+ char aff = comparisonAffinity(pExpr);
+ if( aff<SQLITE_AFF_TEXT ){
+ return 1;
+ }
+ if( aff==SQLITE_AFF_TEXT ){
+ return idx_affinity==SQLITE_AFF_TEXT;
+ }
+ return sqlite3IsNumericAffinity(idx_affinity);
+}
+
+/*
+** Return the P5 value that should be used for a binary comparison
+** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2.
+*/
+static u8 binaryCompareP5(
+ const Expr *pExpr1, /* Left operand */
+ const Expr *pExpr2, /* Right operand */
+ int jumpIfNull /* Extra flags added to P5 */
+){
+ u8 aff = (char)sqlite3ExprAffinity(pExpr2);
+ aff = (u8)sqlite3CompareAffinity(pExpr1, aff) | (u8)jumpIfNull;
+ return aff;
+}
+
+/*
+** Return a pointer to the collation sequence that should be used by
+** a binary comparison operator comparing pLeft and pRight.
+**
+** If the left hand expression has a collating sequence type, then it is
+** used. Otherwise the collation sequence for the right hand expression
+** is used, or the default (BINARY) if neither expression has a collating
+** type.
+**
+** Argument pRight (but not pLeft) may be a null pointer. In this case,
+** it is not considered.
+*/
+SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(
+ Parse *pParse,
+ const Expr *pLeft,
+ const Expr *pRight
+){
+ CollSeq *pColl;
+ assert( pLeft );
+ if( pLeft->flags & EP_Collate ){
+ pColl = sqlite3ExprCollSeq(pParse, pLeft);
+ }else if( pRight && (pRight->flags & EP_Collate)!=0 ){
+ pColl = sqlite3ExprCollSeq(pParse, pRight);
+ }else{
+ pColl = sqlite3ExprCollSeq(pParse, pLeft);
+ if( !pColl ){
+ pColl = sqlite3ExprCollSeq(pParse, pRight);
+ }
+ }
+ return pColl;
+}
+
+/* Expresssion p is a comparison operator. Return a collation sequence
+** appropriate for the comparison operator.
+**
+** This is normally just a wrapper around sqlite3BinaryCompareCollSeq().
+** However, if the OP_Commuted flag is set, then the order of the operands
+** is reversed in the sqlite3BinaryCompareCollSeq() call so that the
+** correct collating sequence is found.
+*/
+SQLITE_PRIVATE CollSeq *sqlite3ExprCompareCollSeq(Parse *pParse, const Expr *p){
+ if( ExprHasProperty(p, EP_Commuted) ){
+ return sqlite3BinaryCompareCollSeq(pParse, p->pRight, p->pLeft);
+ }else{
+ return sqlite3BinaryCompareCollSeq(pParse, p->pLeft, p->pRight);
+ }
+}
+
+/*
+** Generate code for a comparison operator.
+*/
+static int codeCompare(
+ Parse *pParse, /* The parsing (and code generating) context */
+ Expr *pLeft, /* The left operand */
+ Expr *pRight, /* The right operand */
+ int opcode, /* The comparison opcode */
+ int in1, int in2, /* Register holding operands */
+ int dest, /* Jump here if true. */
+ int jumpIfNull, /* If true, jump if either operand is NULL */
+ int isCommuted /* The comparison has been commuted */
+){
+ int p5;
+ int addr;
+ CollSeq *p4;
+
+ if( pParse->nErr ) return 0;
+ if( isCommuted ){
+ p4 = sqlite3BinaryCompareCollSeq(pParse, pRight, pLeft);
+ }else{
+ p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight);
+ }
+ p5 = binaryCompareP5(pLeft, pRight, jumpIfNull);
+ addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1,
+ (void*)p4, P4_COLLSEQ);
+ sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5);
+ return addr;
+}
+
+/*
+** Return true if expression pExpr is a vector, or false otherwise.
+**
+** A vector is defined as any expression that results in two or more
+** columns of result. Every TK_VECTOR node is an vector because the
+** parser will not generate a TK_VECTOR with fewer than two entries.
+** But a TK_SELECT might be either a vector or a scalar. It is only
+** considered a vector if it has two or more result columns.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr){
+ return sqlite3ExprVectorSize(pExpr)>1;
+}
+
+/*
+** If the expression passed as the only argument is of type TK_VECTOR
+** return the number of expressions in the vector. Or, if the expression
+** is a sub-select, return the number of columns in the sub-select. For
+** any other type of expression, return 1.
+*/
+SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr){
+ u8 op = pExpr->op;
+ if( op==TK_REGISTER ) op = pExpr->op2;
+ if( op==TK_VECTOR ){
+ return pExpr->x.pList->nExpr;
+ }else if( op==TK_SELECT ){
+ return pExpr->x.pSelect->pEList->nExpr;
+ }else{
+ return 1;
+ }
+}
+
+/*
+** Return a pointer to a subexpression of pVector that is the i-th
+** column of the vector (numbered starting with 0). The caller must
+** ensure that i is within range.
+**
+** If pVector is really a scalar (and "scalar" here includes subqueries
+** that return a single column!) then return pVector unmodified.
+**
+** pVector retains ownership of the returned subexpression.
+**
+** If the vector is a (SELECT ...) then the expression returned is
+** just the expression for the i-th term of the result set, and may
+** not be ready for evaluation because the table cursor has not yet
+** been positioned.
+*/
+SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){
+ assert( i<sqlite3ExprVectorSize(pVector) );
+ if( sqlite3ExprIsVector(pVector) ){
+ assert( pVector->op2==0 || pVector->op==TK_REGISTER );
+ if( pVector->op==TK_SELECT || pVector->op2==TK_SELECT ){
+ return pVector->x.pSelect->pEList->a[i].pExpr;
+ }else{
+ return pVector->x.pList->a[i].pExpr;
+ }
+ }
+ return pVector;
+}
+
+/*
+** Compute and return a new Expr object which when passed to
+** sqlite3ExprCode() will generate all necessary code to compute
+** the iField-th column of the vector expression pVector.
+**
+** It is ok for pVector to be a scalar (as long as iField==0).
+** In that case, this routine works like sqlite3ExprDup().
+**
+** The caller owns the returned Expr object and is responsible for
+** ensuring that the returned value eventually gets freed.
+**
+** The caller retains ownership of pVector. If pVector is a TK_SELECT,
+** then the returned object will reference pVector and so pVector must remain
+** valid for the life of the returned object. If pVector is a TK_VECTOR
+** or a scalar expression, then it can be deleted as soon as this routine
+** returns.
+**
+** A trick to cause a TK_SELECT pVector to be deleted together with
+** the returned Expr object is to attach the pVector to the pRight field
+** of the returned TK_SELECT_COLUMN Expr object.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
+ Parse *pParse, /* Parsing context */
+ Expr *pVector, /* The vector. List of expressions or a sub-SELECT */
+ int iField /* Which column of the vector to return */
+){
+ Expr *pRet;
+ if( pVector->op==TK_SELECT ){
+ assert( pVector->flags & EP_xIsSelect );
+ /* The TK_SELECT_COLUMN Expr node:
+ **
+ ** pLeft: pVector containing TK_SELECT. Not deleted.
+ ** pRight: not used. But recursively deleted.
+ ** iColumn: Index of a column in pVector
+ ** iTable: 0 or the number of columns on the LHS of an assignment
+ ** pLeft->iTable: First in an array of register holding result, or 0
+ ** if the result is not yet computed.
+ **
+ ** sqlite3ExprDelete() specifically skips the recursive delete of
+ ** pLeft on TK_SELECT_COLUMN nodes. But pRight is followed, so pVector
+ ** can be attached to pRight to cause this node to take ownership of
+ ** pVector. Typically there will be multiple TK_SELECT_COLUMN nodes
+ ** with the same pLeft pointer to the pVector, but only one of them
+ ** will own the pVector.
+ */
+ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0);
+ if( pRet ){
+ pRet->iColumn = iField;
+ pRet->pLeft = pVector;
+ }
+ assert( pRet==0 || pRet->iTable==0 );
+ }else{
+ if( pVector->op==TK_VECTOR ) pVector = pVector->x.pList->a[iField].pExpr;
+ pRet = sqlite3ExprDup(pParse->db, pVector, 0);
+ sqlite3RenameTokenRemap(pParse, pRet, pVector);
+ }
+ return pRet;
+}
+
+/*
+** If expression pExpr is of type TK_SELECT, generate code to evaluate
+** it. Return the register in which the result is stored (or, if the
+** sub-select returns more than one column, the first in an array
+** of registers in which the result is stored).
+**
+** If pExpr is not a TK_SELECT expression, return 0.
+*/
+static int exprCodeSubselect(Parse *pParse, Expr *pExpr){
+ int reg = 0;
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( pExpr->op==TK_SELECT ){
+ reg = sqlite3CodeSubselect(pParse, pExpr);
+ }
+#endif
+ return reg;
+}
+
+/*
+** Argument pVector points to a vector expression - either a TK_VECTOR
+** or TK_SELECT that returns more than one column. This function returns
+** the register number of a register that contains the value of
+** element iField of the vector.
+**
+** If pVector is a TK_SELECT expression, then code for it must have
+** already been generated using the exprCodeSubselect() routine. In this
+** case parameter regSelect should be the first in an array of registers
+** containing the results of the sub-select.
+**
+** If pVector is of type TK_VECTOR, then code for the requested field
+** is generated. In this case (*pRegFree) may be set to the number of
+** a temporary register to be freed by the caller before returning.
+**
+** Before returning, output parameter (*ppExpr) is set to point to the
+** Expr object corresponding to element iElem of the vector.
+*/
+static int exprVectorRegister(
+ Parse *pParse, /* Parse context */
+ Expr *pVector, /* Vector to extract element from */
+ int iField, /* Field to extract from pVector */
+ int regSelect, /* First in array of registers */
+ Expr **ppExpr, /* OUT: Expression element */
+ int *pRegFree /* OUT: Temp register to free */
+){
+ u8 op = pVector->op;
+ assert( op==TK_VECTOR || op==TK_REGISTER || op==TK_SELECT );
+ if( op==TK_REGISTER ){
+ *ppExpr = sqlite3VectorFieldSubexpr(pVector, iField);
+ return pVector->iTable+iField;
+ }
+ if( op==TK_SELECT ){
+ *ppExpr = pVector->x.pSelect->pEList->a[iField].pExpr;
+ return regSelect+iField;
+ }
+ *ppExpr = pVector->x.pList->a[iField].pExpr;
+ return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree);
+}
+
+/*
+** Expression pExpr is a comparison between two vector values. Compute
+** the result of the comparison (1, 0, or NULL) and write that
+** result into register dest.
+**
+** The caller must satisfy the following preconditions:
+**
+** if pExpr->op==TK_IS: op==TK_EQ and p5==SQLITE_NULLEQ
+** if pExpr->op==TK_ISNOT: op==TK_NE and p5==SQLITE_NULLEQ
+** otherwise: op==pExpr->op and p5==0
+*/
+static void codeVectorCompare(
+ Parse *pParse, /* Code generator context */
+ Expr *pExpr, /* The comparison operation */
+ int dest, /* Write results into this register */
+ u8 op, /* Comparison operator */
+ u8 p5 /* SQLITE_NULLEQ or zero */
+){
+ Vdbe *v = pParse->pVdbe;
+ Expr *pLeft = pExpr->pLeft;
+ Expr *pRight = pExpr->pRight;
+ int nLeft = sqlite3ExprVectorSize(pLeft);
+ int i;
+ int regLeft = 0;
+ int regRight = 0;
+ u8 opx = op;
+ int addrDone = sqlite3VdbeMakeLabel(pParse);
+ int isCommuted = ExprHasProperty(pExpr,EP_Commuted);
+
+ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
+ if( pParse->nErr ) return;
+ if( nLeft!=sqlite3ExprVectorSize(pRight) ){
+ sqlite3ErrorMsg(pParse, "row value misused");
+ return;
+ }
+ assert( pExpr->op==TK_EQ || pExpr->op==TK_NE
+ || pExpr->op==TK_IS || pExpr->op==TK_ISNOT
+ || pExpr->op==TK_LT || pExpr->op==TK_GT
+ || pExpr->op==TK_LE || pExpr->op==TK_GE
+ );
+ assert( pExpr->op==op || (pExpr->op==TK_IS && op==TK_EQ)
+ || (pExpr->op==TK_ISNOT && op==TK_NE) );
+ assert( p5==0 || pExpr->op!=op );
+ assert( p5==SQLITE_NULLEQ || pExpr->op==op );
+
+ p5 |= SQLITE_STOREP2;
+ if( opx==TK_LE ) opx = TK_LT;
+ if( opx==TK_GE ) opx = TK_GT;
+
+ regLeft = exprCodeSubselect(pParse, pLeft);
+ regRight = exprCodeSubselect(pParse, pRight);
+
+ for(i=0; 1 /*Loop exits by "break"*/; i++){
+ int regFree1 = 0, regFree2 = 0;
+ Expr *pL, *pR;
+ int r1, r2;
+ assert( i>=0 && i<nLeft );
+ r1 = exprVectorRegister(pParse, pLeft, i, regLeft, &pL, &regFree1);
+ r2 = exprVectorRegister(pParse, pRight, i, regRight, &pR, &regFree2);
+ codeCompare(pParse, pL, pR, opx, r1, r2, dest, p5, isCommuted);
+ testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
+ testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ sqlite3ReleaseTempReg(pParse, regFree2);
+ if( i==nLeft-1 ){
+ break;
+ }
+ if( opx==TK_EQ ){
+ sqlite3VdbeAddOp2(v, OP_IfNot, dest, addrDone); VdbeCoverage(v);
+ p5 |= SQLITE_KEEPNULL;
+ }else if( opx==TK_NE ){
+ sqlite3VdbeAddOp2(v, OP_If, dest, addrDone); VdbeCoverage(v);
+ p5 |= SQLITE_KEEPNULL;
+ }else{
+ assert( op==TK_LT || op==TK_GT || op==TK_LE || op==TK_GE );
+ sqlite3VdbeAddOp2(v, OP_ElseNotEq, 0, addrDone);
+ VdbeCoverageIf(v, op==TK_LT);
+ VdbeCoverageIf(v, op==TK_GT);
+ VdbeCoverageIf(v, op==TK_LE);
+ VdbeCoverageIf(v, op==TK_GE);
+ if( i==nLeft-2 ) opx = op;
+ }
+ }
+ sqlite3VdbeResolveLabel(v, addrDone);
+}
+
+#if SQLITE_MAX_EXPR_DEPTH>0
+/*
+** Check that argument nHeight is less than or equal to the maximum
+** expression depth allowed. If it is not, leave an error message in
+** pParse.
+*/
+SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){
+ int rc = SQLITE_OK;
+ int mxHeight = pParse->db->aLimit[SQLITE_LIMIT_EXPR_DEPTH];
+ if( nHeight>mxHeight ){
+ sqlite3ErrorMsg(pParse,
+ "Expression tree is too large (maximum depth %d)", mxHeight
+ );
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
+
+/* The following three functions, heightOfExpr(), heightOfExprList()
+** and heightOfSelect(), are used to determine the maximum height
+** of any expression tree referenced by the structure passed as the
+** first argument.
+**
+** If this maximum height is greater than the current value pointed
+** to by pnHeight, the second parameter, then set *pnHeight to that
+** value.
+*/
+static void heightOfExpr(Expr *p, int *pnHeight){
+ if( p ){
+ if( p->nHeight>*pnHeight ){
+ *pnHeight = p->nHeight;
+ }
+ }
+}
+static void heightOfExprList(ExprList *p, int *pnHeight){
+ if( p ){
+ int i;
+ for(i=0; i<p->nExpr; i++){
+ heightOfExpr(p->a[i].pExpr, pnHeight);
+ }
+ }
+}
+static void heightOfSelect(Select *pSelect, int *pnHeight){
+ Select *p;
+ for(p=pSelect; p; p=p->pPrior){
+ heightOfExpr(p->pWhere, pnHeight);
+ heightOfExpr(p->pHaving, pnHeight);
+ heightOfExpr(p->pLimit, pnHeight);
+ heightOfExprList(p->pEList, pnHeight);
+ heightOfExprList(p->pGroupBy, pnHeight);
+ heightOfExprList(p->pOrderBy, pnHeight);
+ }
+}
+
+/*
+** Set the Expr.nHeight variable in the structure passed as an
+** argument. An expression with no children, Expr.pList or
+** Expr.pSelect member has a height of 1. Any other expression
+** has a height equal to the maximum height of any other
+** referenced Expr plus one.
+**
+** Also propagate EP_Propagate flags up from Expr.x.pList to Expr.flags,
+** if appropriate.
+*/
+static void exprSetHeight(Expr *p){
+ int nHeight = 0;
+ heightOfExpr(p->pLeft, &nHeight);
+ heightOfExpr(p->pRight, &nHeight);
+ if( ExprHasProperty(p, EP_xIsSelect) ){
+ heightOfSelect(p->x.pSelect, &nHeight);
+ }else if( p->x.pList ){
+ heightOfExprList(p->x.pList, &nHeight);
+ p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList);
+ }
+ p->nHeight = nHeight + 1;
+}
+
+/*
+** Set the Expr.nHeight variable using the exprSetHeight() function. If
+** the height is greater than the maximum allowed expression depth,
+** leave an error in pParse.
+**
+** Also propagate all EP_Propagate flags from the Expr.x.pList into
+** Expr.flags.
+*/
+SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
+ if( pParse->nErr ) return;
+ exprSetHeight(p);
+ sqlite3ExprCheckHeight(pParse, p->nHeight);
+}
+
+/*
+** Return the maximum height of any expression tree referenced
+** by the select statement passed as an argument.
+*/
+SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){
+ int nHeight = 0;
+ heightOfSelect(p, &nHeight);
+ return nHeight;
+}
+#else /* ABOVE: Height enforcement enabled. BELOW: Height enforcement off */
+/*
+** Propagate all EP_Propagate flags from the Expr.x.pList into
+** Expr.flags.
+*/
+SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
+ if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){
+ p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList);
+ }
+}
+#define exprSetHeight(y)
+#endif /* SQLITE_MAX_EXPR_DEPTH>0 */
+
+/*
+** This routine is the core allocator for Expr nodes.
+**
+** Construct a new expression node and return a pointer to it. Memory
+** for this node and for the pToken argument is a single allocation
+** obtained from sqlite3DbMalloc(). The calling function
+** is responsible for making sure the node eventually gets freed.
+**
+** If dequote is true, then the token (if it exists) is dequoted.
+** If dequote is false, no dequoting is performed. The deQuote
+** parameter is ignored if pToken is NULL or if the token does not
+** appear to be quoted. If the quotes were of the form "..." (double-quotes)
+** then the EP_DblQuoted flag is set on the expression node.
+**
+** Special case: If op==TK_INTEGER and pToken points to a string that
+** can be translated into a 32-bit integer, then the token is not
+** stored in u.zToken. Instead, the integer values is written
+** into u.iValue and the EP_IntValue flag is set. No extra storage
+** is allocated to hold the integer text and the dequote flag is ignored.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprAlloc(
+ sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */
+ int op, /* Expression opcode */
+ const Token *pToken, /* Token argument. Might be NULL */
+ int dequote /* True to dequote */
+){
+ Expr *pNew;
+ int nExtra = 0;
+ int iValue = 0;
+
+ assert( db!=0 );
+ if( pToken ){
+ if( op!=TK_INTEGER || pToken->z==0
+ || sqlite3GetInt32(pToken->z, &iValue)==0 ){
+ nExtra = pToken->n+1;
+ assert( iValue>=0 );
+ }
+ }
+ pNew = sqlite3DbMallocRawNN(db, sizeof(Expr)+nExtra);
+ if( pNew ){
+ memset(pNew, 0, sizeof(Expr));
+ pNew->op = (u8)op;
+ pNew->iAgg = -1;
+ if( pToken ){
+ if( nExtra==0 ){
+ pNew->flags |= EP_IntValue|EP_Leaf|(iValue?EP_IsTrue:EP_IsFalse);
+ pNew->u.iValue = iValue;
+ }else{
+ pNew->u.zToken = (char*)&pNew[1];
+ assert( pToken->z!=0 || pToken->n==0 );
+ if( pToken->n ) memcpy(pNew->u.zToken, pToken->z, pToken->n);
+ pNew->u.zToken[pToken->n] = 0;
+ if( dequote && sqlite3Isquote(pNew->u.zToken[0]) ){
+ sqlite3DequoteExpr(pNew);
+ }
+ }
+ }
+#if SQLITE_MAX_EXPR_DEPTH>0
+ pNew->nHeight = 1;
+#endif
+ }
+ return pNew;
+}
+
+/*
+** Allocate a new expression node from a zero-terminated token that has
+** already been dequoted.
+*/
+SQLITE_PRIVATE Expr *sqlite3Expr(
+ sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */
+ int op, /* Expression opcode */
+ const char *zToken /* Token argument. Might be NULL */
+){
+ Token x;
+ x.z = zToken;
+ x.n = sqlite3Strlen30(zToken);
+ return sqlite3ExprAlloc(db, op, &x, 0);
+}
+
+/*
+** Attach subtrees pLeft and pRight to the Expr node pRoot.
+**
+** If pRoot==NULL that means that a memory allocation error has occurred.
+** In that case, delete the subtrees pLeft and pRight.
+*/
+SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(
+ sqlite3 *db,
+ Expr *pRoot,
+ Expr *pLeft,
+ Expr *pRight
+){
+ if( pRoot==0 ){
+ assert( db->mallocFailed );
+ sqlite3ExprDelete(db, pLeft);
+ sqlite3ExprDelete(db, pRight);
+ }else{
+ if( pRight ){
+ pRoot->pRight = pRight;
+ pRoot->flags |= EP_Propagate & pRight->flags;
+ }
+ if( pLeft ){
+ pRoot->pLeft = pLeft;
+ pRoot->flags |= EP_Propagate & pLeft->flags;
+ }
+ exprSetHeight(pRoot);
+ }
+}
+
+/*
+** Allocate an Expr node which joins as many as two subtrees.
+**
+** One or both of the subtrees can be NULL. Return a pointer to the new
+** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed,
+** free the subtrees and return NULL.
+*/
+SQLITE_PRIVATE Expr *sqlite3PExpr(
+ Parse *pParse, /* Parsing context */
+ int op, /* Expression opcode */
+ Expr *pLeft, /* Left operand */
+ Expr *pRight /* Right operand */
+){
+ Expr *p;
+ p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr));
+ if( p ){
+ memset(p, 0, sizeof(Expr));
+ p->op = op & 0xff;
+ p->iAgg = -1;
+ sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight);
+ sqlite3ExprCheckHeight(pParse, p->nHeight);
+ }else{
+ sqlite3ExprDelete(pParse->db, pLeft);
+ sqlite3ExprDelete(pParse->db, pRight);
+ }
+ return p;
+}
+
+/*
+** Add pSelect to the Expr.x.pSelect field. Or, if pExpr is NULL (due
+** do a memory allocation failure) then delete the pSelect object.
+*/
+SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse *pParse, Expr *pExpr, Select *pSelect){
+ if( pExpr ){
+ pExpr->x.pSelect = pSelect;
+ ExprSetProperty(pExpr, EP_xIsSelect|EP_Subquery);
+ sqlite3ExprSetHeightAndFlags(pParse, pExpr);
+ }else{
+ assert( pParse->db->mallocFailed );
+ sqlite3SelectDelete(pParse->db, pSelect);
+ }
+}
+
+
+/*
+** Join two expressions using an AND operator. If either expression is
+** NULL, then just return the other expression.
+**
+** If one side or the other of the AND is known to be false, then instead
+** of returning an AND expression, just return a constant expression with
+** a value of false.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){
+ sqlite3 *db = pParse->db;
+ if( pLeft==0 ){
+ return pRight;
+ }else if( pRight==0 ){
+ return pLeft;
+ }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight))
+ && !IN_RENAME_OBJECT
+ ){
+ sqlite3ExprDelete(db, pLeft);
+ sqlite3ExprDelete(db, pRight);
+ return sqlite3Expr(db, TK_INTEGER, "0");
+ }else{
+ return sqlite3PExpr(pParse, TK_AND, pLeft, pRight);
+ }
+}
+
+/*
+** Construct a new expression node for a function with multiple
+** arguments.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprFunction(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* Argument list */
+ Token *pToken, /* Name of the function */
+ int eDistinct /* SF_Distinct or SF_ALL or 0 */
+){
+ Expr *pNew;
+ sqlite3 *db = pParse->db;
+ assert( pToken );
+ pNew = sqlite3ExprAlloc(db, TK_FUNCTION, pToken, 1);
+ if( pNew==0 ){
+ sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */
+ return 0;
+ }
+ if( pList && pList->nExpr > pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
+ sqlite3ErrorMsg(pParse, "too many arguments on function %T", pToken);
+ }
+ pNew->x.pList = pList;
+ ExprSetProperty(pNew, EP_HasFunc);
+ assert( !ExprHasProperty(pNew, EP_xIsSelect) );
+ sqlite3ExprSetHeightAndFlags(pParse, pNew);
+ if( eDistinct==SF_Distinct ) ExprSetProperty(pNew, EP_Distinct);
+ return pNew;
+}
+
+/*
+** Check to see if a function is usable according to current access
+** rules:
+**
+** SQLITE_FUNC_DIRECT - Only usable from top-level SQL
+**
+** SQLITE_FUNC_UNSAFE - Usable if TRUSTED_SCHEMA or from
+** top-level SQL
+**
+** If the function is not usable, create an error.
+*/
+SQLITE_PRIVATE void sqlite3ExprFunctionUsable(
+ Parse *pParse, /* Parsing and code generating context */
+ Expr *pExpr, /* The function invocation */
+ FuncDef *pDef /* The function being invoked */
+){
+ assert( !IN_RENAME_OBJECT );
+ assert( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0 );
+ if( ExprHasProperty(pExpr, EP_FromDDL) ){
+ if( (pDef->funcFlags & SQLITE_FUNC_DIRECT)!=0
+ || (pParse->db->flags & SQLITE_TrustedSchema)==0
+ ){
+ /* Functions prohibited in triggers and views if:
+ ** (1) tagged with SQLITE_DIRECTONLY
+ ** (2) not tagged with SQLITE_INNOCUOUS (which means it
+ ** is tagged with SQLITE_FUNC_UNSAFE) and
+ ** SQLITE_DBCONFIG_TRUSTED_SCHEMA is off (meaning
+ ** that the schema is possibly tainted).
+ */
+ sqlite3ErrorMsg(pParse, "unsafe use of %s()", pDef->zName);
+ }
+ }
+}
+
+/*
+** Assign a variable number to an expression that encodes a wildcard
+** in the original SQL statement.
+**
+** Wildcards consisting of a single "?" are assigned the next sequential
+** variable number.
+**
+** Wildcards of the form "?nnn" are assigned the number "nnn". We make
+** sure "nnn" is not too big to avoid a denial of service attack when
+** the SQL statement comes from an external source.
+**
+** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number
+** as the previous instance of the same wildcard. Or if this is the first
+** instance of the wildcard, the next sequential variable number is
+** assigned.
+*/
+SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n){
+ sqlite3 *db = pParse->db;
+ const char *z;
+ ynVar x;
+
+ if( pExpr==0 ) return;
+ assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) );
+ z = pExpr->u.zToken;
+ assert( z!=0 );
+ assert( z[0]!=0 );
+ assert( n==(u32)sqlite3Strlen30(z) );
+ if( z[1]==0 ){
+ /* Wildcard of the form "?". Assign the next variable number */
+ assert( z[0]=='?' );
+ x = (ynVar)(++pParse->nVar);
+ }else{
+ int doAdd = 0;
+ if( z[0]=='?' ){
+ /* Wildcard of the form "?nnn". Convert "nnn" to an integer and
+ ** use it as the variable number */
+ i64 i;
+ int bOk;
+ if( n==2 ){ /*OPTIMIZATION-IF-TRUE*/
+ i = z[1]-'0'; /* The common case of ?N for a single digit N */
+ bOk = 1;
+ }else{
+ bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8);
+ }
+ testcase( i==0 );
+ testcase( i==1 );
+ testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 );
+ testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] );
+ if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
+ sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d",
+ db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]);
+ return;
+ }
+ x = (ynVar)i;
+ if( x>pParse->nVar ){
+ pParse->nVar = (int)x;
+ doAdd = 1;
+ }else if( sqlite3VListNumToName(pParse->pVList, x)==0 ){
+ doAdd = 1;
+ }
+ }else{
+ /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable
+ ** number as the prior appearance of the same name, or if the name
+ ** has never appeared before, reuse the same variable number
+ */
+ x = (ynVar)sqlite3VListNameToNum(pParse->pVList, z, n);
+ if( x==0 ){
+ x = (ynVar)(++pParse->nVar);
+ doAdd = 1;
+ }
+ }
+ if( doAdd ){
+ pParse->pVList = sqlite3VListAdd(db, pParse->pVList, z, n, x);
+ }
+ }
+ pExpr->iColumn = x;
+ if( x>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
+ sqlite3ErrorMsg(pParse, "too many SQL variables");
+ }
+}
+
+/*
+** Recursively delete an expression tree.
+*/
+static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
+ assert( p!=0 );
+ /* Sanity check: Assert that the IntValue is non-negative if it exists */
+ assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 );
+
+ assert( !ExprHasProperty(p, EP_WinFunc) || p->y.pWin!=0 || db->mallocFailed );
+ assert( p->op!=TK_FUNCTION || ExprHasProperty(p, EP_TokenOnly|EP_Reduced)
+ || p->y.pWin==0 || ExprHasProperty(p, EP_WinFunc) );
+#ifdef SQLITE_DEBUG
+ if( ExprHasProperty(p, EP_Leaf) && !ExprHasProperty(p, EP_TokenOnly) ){
+ assert( p->pLeft==0 );
+ assert( p->pRight==0 );
+ assert( p->x.pSelect==0 );
+ }
+#endif
+ if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){
+ /* The Expr.x union is never used at the same time as Expr.pRight */
+ assert( p->x.pList==0 || p->pRight==0 );
+ if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft);
+ if( p->pRight ){
+ assert( !ExprHasProperty(p, EP_WinFunc) );
+ sqlite3ExprDeleteNN(db, p->pRight);
+ }else if( ExprHasProperty(p, EP_xIsSelect) ){
+ assert( !ExprHasProperty(p, EP_WinFunc) );
+ sqlite3SelectDelete(db, p->x.pSelect);
+ }else{
+ sqlite3ExprListDelete(db, p->x.pList);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(p, EP_WinFunc) ){
+ sqlite3WindowDelete(db, p->y.pWin);
+ }
+#endif
+ }
+ }
+ if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken);
+ if( !ExprHasProperty(p, EP_Static) ){
+ sqlite3DbFreeNN(db, p);
+ }
+}
+SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){
+ if( p ) sqlite3ExprDeleteNN(db, p);
+}
+
+/* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
+** expression.
+*/
+SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse *pParse, Expr *p){
+ if( p ){
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameExprUnmap(pParse, p);
+ }
+ sqlite3ExprDeleteNN(pParse->db, p);
+ }
+}
+
+/*
+** Return the number of bytes allocated for the expression structure
+** passed as the first argument. This is always one of EXPR_FULLSIZE,
+** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE.
+*/
+static int exprStructSize(Expr *p){
+ if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE;
+ if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE;
+ return EXPR_FULLSIZE;
+}
+
+/*
+** The dupedExpr*Size() routines each return the number of bytes required
+** to store a copy of an expression or expression tree. They differ in
+** how much of the tree is measured.
+**
+** dupedExprStructSize() Size of only the Expr structure
+** dupedExprNodeSize() Size of Expr + space for token
+** dupedExprSize() Expr + token + subtree components
+**
+***************************************************************************
+**
+** The dupedExprStructSize() function returns two values OR-ed together:
+** (1) the space required for a copy of the Expr structure only and
+** (2) the EP_xxx flags that indicate what the structure size should be.
+** The return values is always one of:
+**
+** EXPR_FULLSIZE
+** EXPR_REDUCEDSIZE | EP_Reduced
+** EXPR_TOKENONLYSIZE | EP_TokenOnly
+**
+** The size of the structure can be found by masking the return value
+** of this routine with 0xfff. The flags can be found by masking the
+** return value with EP_Reduced|EP_TokenOnly.
+**
+** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size
+** (unreduced) Expr objects as they or originally constructed by the parser.
+** During expression analysis, extra information is computed and moved into
+** later parts of the Expr object and that extra information might get chopped
+** off if the expression is reduced. Note also that it does not work to
+** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal
+** to reduce a pristine expression tree from the parser. The implementation
+** of dupedExprStructSize() contain multiple assert() statements that attempt
+** to enforce this constraint.
+*/
+static int dupedExprStructSize(Expr *p, int flags){
+ int nSize;
+ assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */
+ assert( EXPR_FULLSIZE<=0xfff );
+ assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 );
+ if( 0==flags || p->op==TK_SELECT_COLUMN
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ || ExprHasProperty(p, EP_WinFunc)
+#endif
+ ){
+ nSize = EXPR_FULLSIZE;
+ }else{
+ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
+ assert( !ExprHasProperty(p, EP_FromJoin) );
+ assert( !ExprHasProperty(p, EP_MemToken) );
+ assert( !ExprHasVVAProperty(p, EP_NoReduce) );
+ if( p->pLeft || p->x.pList ){
+ nSize = EXPR_REDUCEDSIZE | EP_Reduced;
+ }else{
+ assert( p->pRight==0 );
+ nSize = EXPR_TOKENONLYSIZE | EP_TokenOnly;
+ }
+ }
+ return nSize;
+}
+
+/*
+** This function returns the space in bytes required to store the copy
+** of the Expr structure and a copy of the Expr.u.zToken string (if that
+** string is defined.)
+*/
+static int dupedExprNodeSize(Expr *p, int flags){
+ int nByte = dupedExprStructSize(p, flags) & 0xfff;
+ if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nByte += sqlite3Strlen30NN(p->u.zToken)+1;
+ }
+ return ROUND8(nByte);
+}
+
+/*
+** Return the number of bytes required to create a duplicate of the
+** expression passed as the first argument. The second argument is a
+** mask containing EXPRDUP_XXX flags.
+**
+** The value returned includes space to create a copy of the Expr struct
+** itself and the buffer referred to by Expr.u.zToken, if any.
+**
+** If the EXPRDUP_REDUCE flag is set, then the return value includes
+** space to duplicate all Expr nodes in the tree formed by Expr.pLeft
+** and Expr.pRight variables (but not for any structures pointed to or
+** descended from the Expr.x.pList or Expr.x.pSelect variables).
+*/
+static int dupedExprSize(Expr *p, int flags){
+ int nByte = 0;
+ if( p ){
+ nByte = dupedExprNodeSize(p, flags);
+ if( flags&EXPRDUP_REDUCE ){
+ nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags);
+ }
+ }
+ return nByte;
+}
+
+/*
+** This function is similar to sqlite3ExprDup(), except that if pzBuffer
+** is not NULL then *pzBuffer is assumed to point to a buffer large enough
+** to store the copy of expression p, the copies of p->u.zToken
+** (if applicable), and the copies of the p->pLeft and p->pRight expressions,
+** if any. Before returning, *pzBuffer is set to the first byte past the
+** portion of the buffer copied into by this function.
+*/
+static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
+ Expr *pNew; /* Value to return */
+ u8 *zAlloc; /* Memory space from which to build Expr object */
+ u32 staticFlag; /* EP_Static if space not obtained from malloc */
+
+ assert( db!=0 );
+ assert( p );
+ assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE );
+ assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE );
+
+ /* Figure out where to write the new Expr structure. */
+ if( pzBuffer ){
+ zAlloc = *pzBuffer;
+ staticFlag = EP_Static;
+ }else{
+ zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags));
+ staticFlag = 0;
+ }
+ pNew = (Expr *)zAlloc;
+
+ if( pNew ){
+ /* Set nNewSize to the size allocated for the structure pointed to
+ ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or
+ ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed
+ ** by the copy of the p->u.zToken string (if any).
+ */
+ const unsigned nStructSize = dupedExprStructSize(p, dupFlags);
+ const int nNewSize = nStructSize & 0xfff;
+ int nToken;
+ if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nToken = sqlite3Strlen30(p->u.zToken) + 1;
+ }else{
+ nToken = 0;
+ }
+ if( dupFlags ){
+ assert( ExprHasProperty(p, EP_Reduced)==0 );
+ memcpy(zAlloc, p, nNewSize);
+ }else{
+ u32 nSize = (u32)exprStructSize(p);
+ memcpy(zAlloc, p, nSize);
+ if( nSize<EXPR_FULLSIZE ){
+ memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize);
+ }
+ }
+
+ /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */
+ pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken);
+ pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly);
+ pNew->flags |= staticFlag;
+ ExprClearVVAProperties(pNew);
+ if( dupFlags ){
+ ExprSetVVAProperty(pNew, EP_Immutable);
+ }
+
+ /* Copy the p->u.zToken string, if any. */
+ if( nToken ){
+ char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize];
+ memcpy(zToken, p->u.zToken, nToken);
+ }
+
+ if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){
+ /* Fill in the pNew->x.pSelect or pNew->x.pList member. */
+ if( ExprHasProperty(p, EP_xIsSelect) ){
+ pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags);
+ }else{
+ pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags);
+ }
+ }
+
+ /* Fill in pNew->pLeft and pNew->pRight. */
+ if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly|EP_WinFunc) ){
+ zAlloc += dupedExprNodeSize(p, dupFlags);
+ if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){
+ pNew->pLeft = p->pLeft ?
+ exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0;
+ pNew->pRight = p->pRight ?
+ exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0;
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(p, EP_WinFunc) ){
+ pNew->y.pWin = sqlite3WindowDup(db, pNew, p->y.pWin);
+ assert( ExprHasProperty(pNew, EP_WinFunc) );
+ }
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ if( pzBuffer ){
+ *pzBuffer = zAlloc;
+ }
+ }else{
+ if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
+ if( pNew->op==TK_SELECT_COLUMN ){
+ pNew->pLeft = p->pLeft;
+ assert( p->iColumn==0 || p->pRight==0 );
+ assert( p->pRight==0 || p->pRight==p->pLeft );
+ }else{
+ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
+ }
+ pNew->pRight = sqlite3ExprDup(db, p->pRight, 0);
+ }
+ }
+ }
+ return pNew;
+}
+
+/*
+** Create and return a deep copy of the object passed as the second
+** argument. If an OOM condition is encountered, NULL is returned
+** and the db->mallocFailed flag set.
+*/
+#ifndef SQLITE_OMIT_CTE
+static With *withDup(sqlite3 *db, With *p){
+ With *pRet = 0;
+ if( p ){
+ sqlite3_int64 nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1);
+ pRet = sqlite3DbMallocZero(db, nByte);
+ if( pRet ){
+ int i;
+ pRet->nCte = p->nCte;
+ for(i=0; i<p->nCte; i++){
+ pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0);
+ pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0);
+ pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName);
+ }
+ }
+ }
+ return pRet;
+}
+#else
+# define withDup(x,y) 0
+#endif
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** The gatherSelectWindows() procedure and its helper routine
+** gatherSelectWindowsCallback() are used to scan all the expressions
+** an a newly duplicated SELECT statement and gather all of the Window
+** objects found there, assembling them onto the linked list at Select->pWin.
+*/
+static int gatherSelectWindowsCallback(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_FUNCTION && ExprHasProperty(pExpr, EP_WinFunc) ){
+ Select *pSelect = pWalker->u.pSelect;
+ Window *pWin = pExpr->y.pWin;
+ assert( pWin );
+ assert( IsWindowFunc(pExpr) );
+ assert( pWin->ppThis==0 );
+ sqlite3WindowLink(pSelect, pWin);
+ }
+ return WRC_Continue;
+}
+static int gatherSelectWindowsSelectCallback(Walker *pWalker, Select *p){
+ return p==pWalker->u.pSelect ? WRC_Continue : WRC_Prune;
+}
+static void gatherSelectWindows(Select *p){
+ Walker w;
+ w.xExprCallback = gatherSelectWindowsCallback;
+ w.xSelectCallback = gatherSelectWindowsSelectCallback;
+ w.xSelectCallback2 = 0;
+ w.pParse = 0;
+ w.u.pSelect = p;
+ sqlite3WalkSelect(&w, p);
+}
+#endif
+
+
+/*
+** The following group of routines make deep copies of expressions,
+** expression lists, ID lists, and select statements. The copies can
+** be deleted (by being passed to their respective ...Delete() routines)
+** without effecting the originals.
+**
+** The expression list, ID, and source lists return by sqlite3ExprListDup(),
+** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded
+** by subsequent calls to sqlite*ListAppend() routines.
+**
+** Any tables that the SrcList might point to are not duplicated.
+**
+** The flags parameter contains a combination of the EXPRDUP_XXX flags.
+** If the EXPRDUP_REDUCE flag is set, then the structure returned is a
+** truncated version of the usual Expr structure that will be stored as
+** part of the in-memory representation of the database schema.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){
+ assert( flags==0 || flags==EXPRDUP_REDUCE );
+ return p ? exprDup(db, p, flags, 0) : 0;
+}
+SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){
+ ExprList *pNew;
+ struct ExprList_item *pItem, *pOldItem;
+ int i;
+ Expr *pPriorSelectCol = 0;
+ assert( db!=0 );
+ if( p==0 ) return 0;
+ pNew = sqlite3DbMallocRawNN(db, sqlite3DbMallocSize(db, p));
+ if( pNew==0 ) return 0;
+ pNew->nExpr = p->nExpr;
+ pItem = pNew->a;
+ pOldItem = p->a;
+ for(i=0; i<p->nExpr; i++, pItem++, pOldItem++){
+ Expr *pOldExpr = pOldItem->pExpr;
+ Expr *pNewExpr;
+ pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags);
+ if( pOldExpr
+ && pOldExpr->op==TK_SELECT_COLUMN
+ && (pNewExpr = pItem->pExpr)!=0
+ ){
+ assert( pNewExpr->iColumn==0 || i>0 );
+ if( pNewExpr->iColumn==0 ){
+ assert( pOldExpr->pLeft==pOldExpr->pRight );
+ pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight;
+ }else{
+ assert( i>0 );
+ assert( pItem[-1].pExpr!=0 );
+ assert( pNewExpr->iColumn==pItem[-1].pExpr->iColumn+1 );
+ assert( pPriorSelectCol==pItem[-1].pExpr->pLeft );
+ pNewExpr->pLeft = pPriorSelectCol;
+ }
+ }
+ pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName);
+ pItem->sortFlags = pOldItem->sortFlags;
+ pItem->eEName = pOldItem->eEName;
+ pItem->done = 0;
+ pItem->bNulls = pOldItem->bNulls;
+ pItem->bSorterRef = pOldItem->bSorterRef;
+ pItem->u = pOldItem->u;
+ }
+ return pNew;
+}
+
+/*
+** If cursors, triggers, views and subqueries are all omitted from
+** the build, then none of the following routines, except for
+** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes
+** called with a NULL argument.
+*/
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \
+ || !defined(SQLITE_OMIT_SUBQUERY)
+SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){
+ SrcList *pNew;
+ int i;
+ int nByte;
+ assert( db!=0 );
+ if( p==0 ) return 0;
+ nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0);
+ pNew = sqlite3DbMallocRawNN(db, nByte );
+ if( pNew==0 ) return 0;
+ pNew->nSrc = pNew->nAlloc = p->nSrc;
+ for(i=0; i<p->nSrc; i++){
+ struct SrcList_item *pNewItem = &pNew->a[i];
+ struct SrcList_item *pOldItem = &p->a[i];
+ Table *pTab;
+ pNewItem->pSchema = pOldItem->pSchema;
+ pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase);
+ pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName);
+ pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias);
+ pNewItem->fg = pOldItem->fg;
+ pNewItem->iCursor = pOldItem->iCursor;
+ pNewItem->addrFillSub = pOldItem->addrFillSub;
+ pNewItem->regReturn = pOldItem->regReturn;
+ if( pNewItem->fg.isIndexedBy ){
+ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy);
+ }
+ pNewItem->pIBIndex = pOldItem->pIBIndex;
+ if( pNewItem->fg.isTabFunc ){
+ pNewItem->u1.pFuncArg =
+ sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags);
+ }
+ pTab = pNewItem->pTab = pOldItem->pTab;
+ if( pTab ){
+ pTab->nTabRef++;
+ }
+ pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags);
+ pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags);
+ pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing);
+ pNewItem->colUsed = pOldItem->colUsed;
+ }
+ return pNew;
+}
+SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){
+ IdList *pNew;
+ int i;
+ assert( db!=0 );
+ if( p==0 ) return 0;
+ pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) );
+ if( pNew==0 ) return 0;
+ pNew->nId = p->nId;
+ pNew->a = sqlite3DbMallocRawNN(db, p->nId*sizeof(p->a[0]) );
+ if( pNew->a==0 ){
+ sqlite3DbFreeNN(db, pNew);
+ return 0;
+ }
+ /* Note that because the size of the allocation for p->a[] is not
+ ** necessarily a power of two, sqlite3IdListAppend() may not be called
+ ** on the duplicate created by this function. */
+ for(i=0; i<p->nId; i++){
+ struct IdList_item *pNewItem = &pNew->a[i];
+ struct IdList_item *pOldItem = &p->a[i];
+ pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName);
+ pNewItem->idx = pOldItem->idx;
+ }
+ return pNew;
+}
+SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){
+ Select *pRet = 0;
+ Select *pNext = 0;
+ Select **pp = &pRet;
+ Select *p;
+
+ assert( db!=0 );
+ for(p=pDup; p; p=p->pPrior){
+ Select *pNew = sqlite3DbMallocRawNN(db, sizeof(*p) );
+ if( pNew==0 ) break;
+ pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags);
+ pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags);
+ pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags);
+ pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags);
+ pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags);
+ pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags);
+ pNew->op = p->op;
+ pNew->pNext = pNext;
+ pNew->pPrior = 0;
+ pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags);
+ pNew->iLimit = 0;
+ pNew->iOffset = 0;
+ pNew->selFlags = p->selFlags & ~SF_UsesEphemeral;
+ pNew->addrOpenEphm[0] = -1;
+ pNew->addrOpenEphm[1] = -1;
+ pNew->nSelectRow = p->nSelectRow;
+ pNew->pWith = withDup(db, p->pWith);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ pNew->pWin = 0;
+ pNew->pWinDefn = sqlite3WindowListDup(db, p->pWinDefn);
+ if( p->pWin && db->mallocFailed==0 ) gatherSelectWindows(pNew);
+#endif
+ pNew->selId = p->selId;
+ *pp = pNew;
+ pp = &pNew->pPrior;
+ pNext = pNew;
+ }
+
+ return pRet;
+}
+#else
+SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){
+ assert( p==0 );
+ return 0;
+}
+#endif
+
+
+/*
+** Add a new element to the end of an expression list. If pList is
+** initially NULL, then create a new expression list.
+**
+** The pList argument must be either NULL or a pointer to an ExprList
+** obtained from a prior call to sqlite3ExprListAppend(). This routine
+** may not be used with an ExprList obtained from sqlite3ExprListDup().
+** Reason: This routine assumes that the number of slots in pList->a[]
+** is a power of two. That is true for sqlite3ExprListAppend() returns
+** but is not necessarily true from the return value of sqlite3ExprListDup().
+**
+** If a memory allocation error occurs, the entire list is freed and
+** NULL is returned. If non-NULL is returned, then it is guaranteed
+** that the new entry was successfully appended.
+*/
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to append. Might be NULL */
+ Expr *pExpr /* Expression to be appended. Might be NULL */
+){
+ struct ExprList_item *pItem;
+ sqlite3 *db = pParse->db;
+ assert( db!=0 );
+ if( pList==0 ){
+ pList = sqlite3DbMallocRawNN(db, sizeof(ExprList) );
+ if( pList==0 ){
+ goto no_mem;
+ }
+ pList->nExpr = 0;
+ }else if( (pList->nExpr & (pList->nExpr-1))==0 ){
+ ExprList *pNew;
+ pNew = sqlite3DbRealloc(db, pList,
+ sizeof(*pList)+(2*(sqlite3_int64)pList->nExpr-1)*sizeof(pList->a[0]));
+ if( pNew==0 ){
+ goto no_mem;
+ }
+ pList = pNew;
+ }
+ pItem = &pList->a[pList->nExpr++];
+ assert( offsetof(struct ExprList_item,zEName)==sizeof(pItem->pExpr) );
+ assert( offsetof(struct ExprList_item,pExpr)==0 );
+ memset(&pItem->zEName,0,sizeof(*pItem)-offsetof(struct ExprList_item,zEName));
+ pItem->pExpr = pExpr;
+ return pList;
+
+no_mem:
+ /* Avoid leaking memory if malloc has failed. */
+ sqlite3ExprDelete(db, pExpr);
+ sqlite3ExprListDelete(db, pList);
+ return 0;
+}
+
+/*
+** pColumns and pExpr form a vector assignment which is part of the SET
+** clause of an UPDATE statement. Like this:
+**
+** (a,b,c) = (expr1,expr2,expr3)
+** Or: (a,b,c) = (SELECT x,y,z FROM ....)
+**
+** For each term of the vector assignment, append new entries to the
+** expression list pList. In the case of a subquery on the RHS, append
+** TK_SELECT_COLUMN expressions.
+*/
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to append. Might be NULL */
+ IdList *pColumns, /* List of names of LHS of the assignment */
+ Expr *pExpr /* Vector expression to be appended. Might be NULL */
+){
+ sqlite3 *db = pParse->db;
+ int n;
+ int i;
+ int iFirst = pList ? pList->nExpr : 0;
+ /* pColumns can only be NULL due to an OOM but an OOM will cause an
+ ** exit prior to this routine being invoked */
+ if( NEVER(pColumns==0) ) goto vector_append_error;
+ if( pExpr==0 ) goto vector_append_error;
+
+ /* If the RHS is a vector, then we can immediately check to see that
+ ** the size of the RHS and LHS match. But if the RHS is a SELECT,
+ ** wildcards ("*") in the result set of the SELECT must be expanded before
+ ** we can do the size check, so defer the size check until code generation.
+ */
+ if( pExpr->op!=TK_SELECT && pColumns->nId!=(n=sqlite3ExprVectorSize(pExpr)) ){
+ sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
+ pColumns->nId, n);
+ goto vector_append_error;
+ }
+
+ for(i=0; i<pColumns->nId; i++){
+ Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i);
+ assert( pSubExpr!=0 || db->mallocFailed );
+ assert( pSubExpr==0 || pSubExpr->iTable==0 );
+ if( pSubExpr==0 ) continue;
+ pSubExpr->iTable = pColumns->nId;
+ pList = sqlite3ExprListAppend(pParse, pList, pSubExpr);
+ if( pList ){
+ assert( pList->nExpr==iFirst+i+1 );
+ pList->a[pList->nExpr-1].zEName = pColumns->a[i].zName;
+ pColumns->a[i].zName = 0;
+ }
+ }
+
+ if( !db->mallocFailed && pExpr->op==TK_SELECT && ALWAYS(pList!=0) ){
+ Expr *pFirst = pList->a[iFirst].pExpr;
+ assert( pFirst!=0 );
+ assert( pFirst->op==TK_SELECT_COLUMN );
+
+ /* Store the SELECT statement in pRight so it will be deleted when
+ ** sqlite3ExprListDelete() is called */
+ pFirst->pRight = pExpr;
+ pExpr = 0;
+
+ /* Remember the size of the LHS in iTable so that we can check that
+ ** the RHS and LHS sizes match during code generation. */
+ pFirst->iTable = pColumns->nId;
+ }
+
+vector_append_error:
+ sqlite3ExprUnmapAndDelete(pParse, pExpr);
+ sqlite3IdListDelete(db, pColumns);
+ return pList;
+}
+
+/*
+** Set the sort order for the last element on the given ExprList.
+*/
+SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder, int eNulls){
+ struct ExprList_item *pItem;
+ if( p==0 ) return;
+ assert( p->nExpr>0 );
+
+ assert( SQLITE_SO_UNDEFINED<0 && SQLITE_SO_ASC==0 && SQLITE_SO_DESC>0 );
+ assert( iSortOrder==SQLITE_SO_UNDEFINED
+ || iSortOrder==SQLITE_SO_ASC
+ || iSortOrder==SQLITE_SO_DESC
+ );
+ assert( eNulls==SQLITE_SO_UNDEFINED
+ || eNulls==SQLITE_SO_ASC
+ || eNulls==SQLITE_SO_DESC
+ );
+
+ pItem = &p->a[p->nExpr-1];
+ assert( pItem->bNulls==0 );
+ if( iSortOrder==SQLITE_SO_UNDEFINED ){
+ iSortOrder = SQLITE_SO_ASC;
+ }
+ pItem->sortFlags = (u8)iSortOrder;
+
+ if( eNulls!=SQLITE_SO_UNDEFINED ){
+ pItem->bNulls = 1;
+ if( iSortOrder!=eNulls ){
+ pItem->sortFlags |= KEYINFO_ORDER_BIGNULL;
+ }
+ }
+}
+
+/*
+** Set the ExprList.a[].zEName element of the most recently added item
+** on the expression list.
+**
+** pList might be NULL following an OOM error. But pName should never be
+** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag
+** is set.
+*/
+SQLITE_PRIVATE void sqlite3ExprListSetName(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to add the span. */
+ Token *pName, /* Name to be added */
+ int dequote /* True to cause the name to be dequoted */
+){
+ assert( pList!=0 || pParse->db->mallocFailed!=0 );
+ assert( pParse->eParseMode!=PARSE_MODE_UNMAP || dequote==0 );
+ if( pList ){
+ struct ExprList_item *pItem;
+ assert( pList->nExpr>0 );
+ pItem = &pList->a[pList->nExpr-1];
+ assert( pItem->zEName==0 );
+ assert( pItem->eEName==ENAME_NAME );
+ pItem->zEName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n);
+ if( dequote ){
+ /* If dequote==0, then pName->z does not point to part of a DDL
+ ** statement handled by the parser. And so no token need be added
+ ** to the token-map. */
+ sqlite3Dequote(pItem->zEName);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, (void*)pItem->zEName, pName);
+ }
+ }
+ }
+}
+
+/*
+** Set the ExprList.a[].zSpan element of the most recently added item
+** on the expression list.
+**
+** pList might be NULL following an OOM error. But pSpan should never be
+** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag
+** is set.
+*/
+SQLITE_PRIVATE void sqlite3ExprListSetSpan(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to add the span. */
+ const char *zStart, /* Start of the span */
+ const char *zEnd /* End of the span */
+){
+ sqlite3 *db = pParse->db;
+ assert( pList!=0 || db->mallocFailed!=0 );
+ if( pList ){
+ struct ExprList_item *pItem = &pList->a[pList->nExpr-1];
+ assert( pList->nExpr>0 );
+ if( pItem->zEName==0 ){
+ pItem->zEName = sqlite3DbSpanDup(db, zStart, zEnd);
+ pItem->eEName = ENAME_SPAN;
+ }
+ }
+}
+
+/*
+** If the expression list pEList contains more than iLimit elements,
+** leave an error message in pParse.
+*/
+SQLITE_PRIVATE void sqlite3ExprListCheckLength(
+ Parse *pParse,
+ ExprList *pEList,
+ const char *zObject
+){
+ int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN];
+ testcase( pEList && pEList->nExpr==mx );
+ testcase( pEList && pEList->nExpr==mx+1 );
+ if( pEList && pEList->nExpr>mx ){
+ sqlite3ErrorMsg(pParse, "too many columns in %s", zObject);
+ }
+}
+
+/*
+** Delete an entire expression list.
+*/
+static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){
+ int i = pList->nExpr;
+ struct ExprList_item *pItem = pList->a;
+ assert( pList->nExpr>0 );
+ do{
+ sqlite3ExprDelete(db, pItem->pExpr);
+ sqlite3DbFree(db, pItem->zEName);
+ pItem++;
+ }while( --i>0 );
+ sqlite3DbFreeNN(db, pList);
+}
+SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
+ if( pList ) exprListDeleteNN(db, pList);
+}
+
+/*
+** Return the bitwise-OR of all Expr.flags fields in the given
+** ExprList.
+*/
+SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList *pList){
+ int i;
+ u32 m = 0;
+ assert( pList!=0 );
+ for(i=0; i<pList->nExpr; i++){
+ Expr *pExpr = pList->a[i].pExpr;
+ assert( pExpr!=0 );
+ m |= pExpr->flags;
+ }
+ return m;
+}
+
+/*
+** This is a SELECT-node callback for the expression walker that
+** always "fails". By "fail" in this case, we mean set
+** pWalker->eCode to zero and abort.
+**
+** This callback is used by multiple expression walkers.
+*/
+SQLITE_PRIVATE int sqlite3SelectWalkFail(Walker *pWalker, Select *NotUsed){
+ UNUSED_PARAMETER(NotUsed);
+ pWalker->eCode = 0;
+ return WRC_Abort;
+}
+
+/*
+** Check the input string to see if it is "true" or "false" (in any case).
+**
+** If the string is.... Return
+** "true" EP_IsTrue
+** "false" EP_IsFalse
+** anything else 0
+*/
+SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char *zIn){
+ if( sqlite3StrICmp(zIn, "true")==0 ) return EP_IsTrue;
+ if( sqlite3StrICmp(zIn, "false")==0 ) return EP_IsFalse;
+ return 0;
+}
+
+
+/*
+** If the input expression is an ID with the name "true" or "false"
+** then convert it into an TK_TRUEFALSE term. Return non-zero if
+** the conversion happened, and zero if the expression is unaltered.
+*/
+SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){
+ u32 v;
+ assert( pExpr->op==TK_ID || pExpr->op==TK_STRING );
+ if( !ExprHasProperty(pExpr, EP_Quoted)
+ && (v = sqlite3IsTrueOrFalse(pExpr->u.zToken))!=0
+ ){
+ pExpr->op = TK_TRUEFALSE;
+ ExprSetProperty(pExpr, v);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** The argument must be a TK_TRUEFALSE Expr node. Return 1 if it is TRUE
+** and 0 if it is FALSE.
+*/
+SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){
+ pExpr = sqlite3ExprSkipCollate((Expr*)pExpr);
+ assert( pExpr->op==TK_TRUEFALSE );
+ assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0
+ || sqlite3StrICmp(pExpr->u.zToken,"false")==0 );
+ return pExpr->u.zToken[4]==0;
+}
+
+/*
+** If pExpr is an AND or OR expression, try to simplify it by eliminating
+** terms that are always true or false. Return the simplified expression.
+** Or return the original expression if no simplification is possible.
+**
+** Examples:
+**
+** (x<10) AND true => (x<10)
+** (x<10) AND false => false
+** (x<10) AND (y=22 OR false) => (x<10) AND (y=22)
+** (x<10) AND (y=22 OR true) => (x<10)
+** (y=22) OR true => true
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){
+ assert( pExpr!=0 );
+ if( pExpr->op==TK_AND || pExpr->op==TK_OR ){
+ Expr *pRight = sqlite3ExprSimplifiedAndOr(pExpr->pRight);
+ Expr *pLeft = sqlite3ExprSimplifiedAndOr(pExpr->pLeft);
+ if( ExprAlwaysTrue(pLeft) || ExprAlwaysFalse(pRight) ){
+ pExpr = pExpr->op==TK_AND ? pRight : pLeft;
+ }else if( ExprAlwaysTrue(pRight) || ExprAlwaysFalse(pLeft) ){
+ pExpr = pExpr->op==TK_AND ? pLeft : pRight;
+ }
+ }
+ return pExpr;
+}
+
+
+/*
+** These routines are Walker callbacks used to check expressions to
+** see if they are "constant" for some definition of constant. The
+** Walker.eCode value determines the type of "constant" we are looking
+** for.
+**
+** These callback routines are used to implement the following:
+**
+** sqlite3ExprIsConstant() pWalker->eCode==1
+** sqlite3ExprIsConstantNotJoin() pWalker->eCode==2
+** sqlite3ExprIsTableConstant() pWalker->eCode==3
+** sqlite3ExprIsConstantOrFunction() pWalker->eCode==4 or 5
+**
+** In all cases, the callbacks set Walker.eCode=0 and abort if the expression
+** is found to not be a constant.
+**
+** The sqlite3ExprIsConstantOrFunction() is used for evaluating DEFAULT
+** expressions in a CREATE TABLE statement. The Walker.eCode value is 5
+** when parsing an existing schema out of the sqlite_schema table and 4
+** when processing a new CREATE TABLE statement. A bound parameter raises
+** an error for new statements, but is silently converted
+** to NULL for existing schemas. This allows sqlite_schema tables that
+** contain a bound parameter because they were generated by older versions
+** of SQLite to be parsed by newer versions of SQLite without raising a
+** malformed schema error.
+*/
+static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
+
+ /* If pWalker->eCode is 2 then any term of the expression that comes from
+ ** the ON or USING clauses of a left join disqualifies the expression
+ ** from being considered constant. */
+ if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_FromJoin) ){
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ }
+
+ switch( pExpr->op ){
+ /* Consider functions to be constant if all their arguments are constant
+ ** and either pWalker->eCode==4 or 5 or the function has the
+ ** SQLITE_FUNC_CONST flag. */
+ case TK_FUNCTION:
+ if( (pWalker->eCode>=4 || ExprHasProperty(pExpr,EP_ConstFunc))
+ && !ExprHasProperty(pExpr, EP_WinFunc)
+ ){
+ if( pWalker->eCode==5 ) ExprSetProperty(pExpr, EP_FromDDL);
+ return WRC_Continue;
+ }else{
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ }
+ case TK_ID:
+ /* Convert "true" or "false" in a DEFAULT clause into the
+ ** appropriate TK_TRUEFALSE operator */
+ if( sqlite3ExprIdToTrueFalse(pExpr) ){
+ return WRC_Prune;
+ }
+ /* no break */ deliberate_fall_through
+ case TK_COLUMN:
+ case TK_AGG_FUNCTION:
+ case TK_AGG_COLUMN:
+ testcase( pExpr->op==TK_ID );
+ testcase( pExpr->op==TK_COLUMN );
+ testcase( pExpr->op==TK_AGG_FUNCTION );
+ testcase( pExpr->op==TK_AGG_COLUMN );
+ if( ExprHasProperty(pExpr, EP_FixedCol) && pWalker->eCode!=2 ){
+ return WRC_Continue;
+ }
+ if( pWalker->eCode==3 && pExpr->iTable==pWalker->u.iCur ){
+ return WRC_Continue;
+ }
+ /* no break */ deliberate_fall_through
+ case TK_IF_NULL_ROW:
+ case TK_REGISTER:
+ case TK_DOT:
+ testcase( pExpr->op==TK_REGISTER );
+ testcase( pExpr->op==TK_IF_NULL_ROW );
+ testcase( pExpr->op==TK_DOT );
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ case TK_VARIABLE:
+ if( pWalker->eCode==5 ){
+ /* Silently convert bound parameters that appear inside of CREATE
+ ** statements into a NULL when parsing the CREATE statement text out
+ ** of the sqlite_schema table */
+ pExpr->op = TK_NULL;
+ }else if( pWalker->eCode==4 ){
+ /* A bound parameter in a CREATE statement that originates from
+ ** sqlite3_prepare() causes an error */
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ }
+ /* no break */ deliberate_fall_through
+ default:
+ testcase( pExpr->op==TK_SELECT ); /* sqlite3SelectWalkFail() disallows */
+ testcase( pExpr->op==TK_EXISTS ); /* sqlite3SelectWalkFail() disallows */
+ return WRC_Continue;
+ }
+}
+static int exprIsConst(Expr *p, int initFlag, int iCur){
+ Walker w;
+ w.eCode = initFlag;
+ w.xExprCallback = exprNodeIsConstant;
+ w.xSelectCallback = sqlite3SelectWalkFail;
+#ifdef SQLITE_DEBUG
+ w.xSelectCallback2 = sqlite3SelectWalkAssert2;
+#endif
+ w.u.iCur = iCur;
+ sqlite3WalkExpr(&w, p);
+ return w.eCode;
+}
+
+/*
+** Walk an expression tree. Return non-zero if the expression is constant
+** and 0 if it involves variables or function calls.
+**
+** For the purposes of this function, a double-quoted string (ex: "abc")
+** is considered a variable but a single-quoted string (ex: 'abc') is
+** a constant.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){
+ return exprIsConst(p, 1, 0);
+}
+
+/*
+** Walk an expression tree. Return non-zero if
+**
+** (1) the expression is constant, and
+** (2) the expression does originate in the ON or USING clause
+** of a LEFT JOIN, and
+** (3) the expression does not contain any EP_FixedCol TK_COLUMN
+** operands created by the constant propagation optimization.
+**
+** When this routine returns true, it indicates that the expression
+** can be added to the pParse->pConstExpr list and evaluated once when
+** the prepared statement starts up. See sqlite3ExprCodeRunJustOnce().
+*/
+SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){
+ return exprIsConst(p, 2, 0);
+}
+
+/*
+** Walk an expression tree. Return non-zero if the expression is constant
+** for any single row of the table with cursor iCur. In other words, the
+** expression must not refer to any non-deterministic function nor any
+** table other than iCur.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){
+ return exprIsConst(p, 3, iCur);
+}
+
+
+/*
+** sqlite3WalkExpr() callback used by sqlite3ExprIsConstantOrGroupBy().
+*/
+static int exprNodeIsConstantOrGroupBy(Walker *pWalker, Expr *pExpr){
+ ExprList *pGroupBy = pWalker->u.pGroupBy;
+ int i;
+
+ /* Check if pExpr is identical to any GROUP BY term. If so, consider
+ ** it constant. */
+ for(i=0; i<pGroupBy->nExpr; i++){
+ Expr *p = pGroupBy->a[i].pExpr;
+ if( sqlite3ExprCompare(0, pExpr, p, -1)<2 ){
+ CollSeq *pColl = sqlite3ExprNNCollSeq(pWalker->pParse, p);
+ if( sqlite3IsBinary(pColl) ){
+ return WRC_Prune;
+ }
+ }
+ }
+
+ /* Check if pExpr is a sub-select. If so, consider it variable. */
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ }
+
+ return exprNodeIsConstant(pWalker, pExpr);
+}
+
+/*
+** Walk the expression tree passed as the first argument. Return non-zero
+** if the expression consists entirely of constants or copies of terms
+** in pGroupBy that sort with the BINARY collation sequence.
+**
+** This routine is used to determine if a term of the HAVING clause can
+** be promoted into the WHERE clause. In order for such a promotion to work,
+** the value of the HAVING clause term must be the same for all members of
+** a "group". The requirement that the GROUP BY term must be BINARY
+** assumes that no other collating sequence will have a finer-grained
+** grouping than binary. In other words (A=B COLLATE binary) implies
+** A=B in every other collating sequence. The requirement that the
+** GROUP BY be BINARY is stricter than necessary. It would also work
+** to promote HAVING clauses that use the same alternative collating
+** sequence as the GROUP BY term, but that is much harder to check,
+** alternative collating sequences are uncommon, and this is only an
+** optimization, so we take the easy way out and simply require the
+** GROUP BY to use the BINARY collating sequence.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse *pParse, Expr *p, ExprList *pGroupBy){
+ Walker w;
+ w.eCode = 1;
+ w.xExprCallback = exprNodeIsConstantOrGroupBy;
+ w.xSelectCallback = 0;
+ w.u.pGroupBy = pGroupBy;
+ w.pParse = pParse;
+ sqlite3WalkExpr(&w, p);
+ return w.eCode;
+}
+
+/*
+** Walk an expression tree for the DEFAULT field of a column definition
+** in a CREATE TABLE statement. Return non-zero if the expression is
+** acceptable for use as a DEFAULT. That is to say, return non-zero if
+** the expression is constant or a function call with constant arguments.
+** Return and 0 if there are any variables.
+**
+** isInit is true when parsing from sqlite_schema. isInit is false when
+** processing a new CREATE TABLE statement. When isInit is true, parameters
+** (such as ? or $abc) in the expression are converted into NULL. When
+** isInit is false, parameters raise an error. Parameters should not be
+** allowed in a CREATE TABLE statement, but some legacy versions of SQLite
+** allowed it, so we need to support it when reading sqlite_schema for
+** backwards compatibility.
+**
+** If isInit is true, set EP_FromDDL on every TK_FUNCTION node.
+**
+** For the purposes of this function, a double-quoted string (ex: "abc")
+** is considered a variable but a single-quoted string (ex: 'abc') is
+** a constant.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){
+ assert( isInit==0 || isInit==1 );
+ return exprIsConst(p, 4+isInit, 0);
+}
+
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+/*
+** Walk an expression tree. Return 1 if the expression contains a
+** subquery of some kind. Return 0 if there are no subqueries.
+*/
+SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr *p){
+ Walker w;
+ w.eCode = 1;
+ w.xExprCallback = sqlite3ExprWalkNoop;
+ w.xSelectCallback = sqlite3SelectWalkFail;
+#ifdef SQLITE_DEBUG
+ w.xSelectCallback2 = sqlite3SelectWalkAssert2;
+#endif
+ sqlite3WalkExpr(&w, p);
+ return w.eCode==0;
+}
+#endif
+
+/*
+** If the expression p codes a constant integer that is small enough
+** to fit in a 32-bit integer, return 1 and put the value of the integer
+** in *pValue. If the expression is not an integer or if it is too big
+** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){
+ int rc = 0;
+ if( NEVER(p==0) ) return 0; /* Used to only happen following on OOM */
+
+ /* If an expression is an integer literal that fits in a signed 32-bit
+ ** integer, then the EP_IntValue flag will have already been set */
+ assert( p->op!=TK_INTEGER || (p->flags & EP_IntValue)!=0
+ || sqlite3GetInt32(p->u.zToken, &rc)==0 );
+
+ if( p->flags & EP_IntValue ){
+ *pValue = p->u.iValue;
+ return 1;
+ }
+ switch( p->op ){
+ case TK_UPLUS: {
+ rc = sqlite3ExprIsInteger(p->pLeft, pValue);
+ break;
+ }
+ case TK_UMINUS: {
+ int v;
+ if( sqlite3ExprIsInteger(p->pLeft, &v) ){
+ assert( v!=(-2147483647-1) );
+ *pValue = -v;
+ rc = 1;
+ }
+ break;
+ }
+ default: break;
+ }
+ return rc;
+}
+
+/*
+** Return FALSE if there is no chance that the expression can be NULL.
+**
+** If the expression might be NULL or if the expression is too complex
+** to tell return TRUE.
+**
+** This routine is used as an optimization, to skip OP_IsNull opcodes
+** when we know that a value cannot be NULL. Hence, a false positive
+** (returning TRUE when in fact the expression can never be NULL) might
+** be a small performance hit but is otherwise harmless. On the other
+** hand, a false negative (returning FALSE when the result could be NULL)
+** will likely result in an incorrect answer. So when in doubt, return
+** TRUE.
+*/
+SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){
+ u8 op;
+ while( p->op==TK_UPLUS || p->op==TK_UMINUS ){
+ p = p->pLeft;
+ }
+ op = p->op;
+ if( op==TK_REGISTER ) op = p->op2;
+ switch( op ){
+ case TK_INTEGER:
+ case TK_STRING:
+ case TK_FLOAT:
+ case TK_BLOB:
+ return 0;
+ case TK_COLUMN:
+ return ExprHasProperty(p, EP_CanBeNull) ||
+ p->y.pTab==0 || /* Reference to column of index on expression */
+ (p->iColumn>=0
+ && ALWAYS(p->y.pTab->aCol!=0) /* Defense against OOM problems */
+ && p->y.pTab->aCol[p->iColumn].notNull==0);
+ default:
+ return 1;
+ }
+}
+
+/*
+** Return TRUE if the given expression is a constant which would be
+** unchanged by OP_Affinity with the affinity given in the second
+** argument.
+**
+** This routine is used to determine if the OP_Affinity operation
+** can be omitted. When in doubt return FALSE. A false negative
+** is harmless. A false positive, however, can result in the wrong
+** answer.
+*/
+SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr *p, char aff){
+ u8 op;
+ int unaryMinus = 0;
+ if( aff==SQLITE_AFF_BLOB ) return 1;
+ while( p->op==TK_UPLUS || p->op==TK_UMINUS ){
+ if( p->op==TK_UMINUS ) unaryMinus = 1;
+ p = p->pLeft;
+ }
+ op = p->op;
+ if( op==TK_REGISTER ) op = p->op2;
+ switch( op ){
+ case TK_INTEGER: {
+ return aff>=SQLITE_AFF_NUMERIC;
+ }
+ case TK_FLOAT: {
+ return aff>=SQLITE_AFF_NUMERIC;
+ }
+ case TK_STRING: {
+ return !unaryMinus && aff==SQLITE_AFF_TEXT;
+ }
+ case TK_BLOB: {
+ return !unaryMinus;
+ }
+ case TK_COLUMN: {
+ assert( p->iTable>=0 ); /* p cannot be part of a CHECK constraint */
+ return aff>=SQLITE_AFF_NUMERIC && p->iColumn<0;
+ }
+ default: {
+ return 0;
+ }
+ }
+}
+
+/*
+** Return TRUE if the given string is a row-id column name.
+*/
+SQLITE_PRIVATE int sqlite3IsRowid(const char *z){
+ if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1;
+ if( sqlite3StrICmp(z, "ROWID")==0 ) return 1;
+ if( sqlite3StrICmp(z, "OID")==0 ) return 1;
+ return 0;
+}
+
+/*
+** pX is the RHS of an IN operator. If pX is a SELECT statement
+** that can be simplified to a direct table access, then return
+** a pointer to the SELECT statement. If pX is not a SELECT statement,
+** or if the SELECT statement needs to be manifested into a transient
+** table, then return NULL.
+*/
+#ifndef SQLITE_OMIT_SUBQUERY
+static Select *isCandidateForInOpt(Expr *pX){
+ Select *p;
+ SrcList *pSrc;
+ ExprList *pEList;
+ Table *pTab;
+ int i;
+ if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0; /* Not a subquery */
+ if( ExprHasProperty(pX, EP_VarSelect) ) return 0; /* Correlated subq */
+ p = pX->x.pSelect;
+ if( p->pPrior ) return 0; /* Not a compound SELECT */
+ if( p->selFlags & (SF_Distinct|SF_Aggregate) ){
+ testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
+ testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
+ return 0; /* No DISTINCT keyword and no aggregate functions */
+ }
+ assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */
+ if( p->pLimit ) return 0; /* Has no LIMIT clause */
+ if( p->pWhere ) return 0; /* Has no WHERE clause */
+ pSrc = p->pSrc;
+ assert( pSrc!=0 );
+ if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */
+ if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */
+ pTab = pSrc->a[0].pTab;
+ assert( pTab!=0 );
+ assert( pTab->pSelect==0 ); /* FROM clause is not a view */
+ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */
+ pEList = p->pEList;
+ assert( pEList!=0 );
+ /* All SELECT results must be columns. */
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *pRes = pEList->a[i].pExpr;
+ if( pRes->op!=TK_COLUMN ) return 0;
+ assert( pRes->iTable==pSrc->a[0].iCursor ); /* Not a correlated subquery */
+ }
+ return p;
+}
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Generate code that checks the left-most column of index table iCur to see if
+** it contains any NULL entries. Cause the register at regHasNull to be set
+** to a non-NULL value if iCur contains no NULLs. Cause register regHasNull
+** to be set to NULL if iCur contains one or more NULL values.
+*/
+static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){
+ int addr1;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regHasNull);
+ addr1 = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Column, iCur, 0, regHasNull);
+ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
+ VdbeComment((v, "first_entry_in(%d)", iCur));
+ sqlite3VdbeJumpHere(v, addr1);
+}
+#endif
+
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** The argument is an IN operator with a list (not a subquery) on the
+** right-hand side. Return TRUE if that list is constant.
+*/
+static int sqlite3InRhsIsConstant(Expr *pIn){
+ Expr *pLHS;
+ int res;
+ assert( !ExprHasProperty(pIn, EP_xIsSelect) );
+ pLHS = pIn->pLeft;
+ pIn->pLeft = 0;
+ res = sqlite3ExprIsConstant(pIn);
+ pIn->pLeft = pLHS;
+ return res;
+}
+#endif
+
+/*
+** This function is used by the implementation of the IN (...) operator.
+** The pX parameter is the expression on the RHS of the IN operator, which
+** might be either a list of expressions or a subquery.
+**
+** The job of this routine is to find or create a b-tree object that can
+** be used either to test for membership in the RHS set or to iterate through
+** all members of the RHS set, skipping duplicates.
+**
+** A cursor is opened on the b-tree object that is the RHS of the IN operator
+** and pX->iTable is set to the index of that cursor.
+**
+** The returned value of this function indicates the b-tree type, as follows:
+**
+** IN_INDEX_ROWID - The cursor was opened on a database table.
+** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index.
+** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index.
+** IN_INDEX_EPH - The cursor was opened on a specially created and
+** populated epheremal table.
+** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be
+** implemented as a sequence of comparisons.
+**
+** An existing b-tree might be used if the RHS expression pX is a simple
+** subquery such as:
+**
+** SELECT <column1>, <column2>... FROM <table>
+**
+** If the RHS of the IN operator is a list or a more complex subquery, then
+** an ephemeral table might need to be generated from the RHS and then
+** pX->iTable made to point to the ephemeral table instead of an
+** existing table.
+**
+** The inFlags parameter must contain, at a minimum, one of the bits
+** IN_INDEX_MEMBERSHIP or IN_INDEX_LOOP but not both. If inFlags contains
+** IN_INDEX_MEMBERSHIP, then the generated table will be used for a fast
+** membership test. When the IN_INDEX_LOOP bit is set, the IN index will
+** be used to loop over all values of the RHS of the IN operator.
+**
+** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate
+** through the set members) then the b-tree must not contain duplicates.
+** An epheremal table will be created unless the selected columns are guaranteed
+** to be unique - either because it is an INTEGER PRIMARY KEY or due to
+** a UNIQUE constraint or index.
+**
+** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used
+** for fast set membership tests) then an epheremal table must
+** be used unless <columns> is a single INTEGER PRIMARY KEY column or an
+** index can be found with the specified <columns> as its left-most.
+**
+** If the IN_INDEX_NOOP_OK and IN_INDEX_MEMBERSHIP are both set and
+** if the RHS of the IN operator is a list (not a subquery) then this
+** routine might decide that creating an ephemeral b-tree for membership
+** testing is too expensive and return IN_INDEX_NOOP. In that case, the
+** calling routine should implement the IN operator using a sequence
+** of Eq or Ne comparison operations.
+**
+** When the b-tree is being used for membership tests, the calling function
+** might need to know whether or not the RHS side of the IN operator
+** contains a NULL. If prRhsHasNull is not a NULL pointer and
+** if there is any chance that the (...) might contain a NULL value at
+** runtime, then a register is allocated and the register number written
+** to *prRhsHasNull. If there is no chance that the (...) contains a
+** NULL value, then *prRhsHasNull is left unchanged.
+**
+** If a register is allocated and its location stored in *prRhsHasNull, then
+** the value in that register will be NULL if the b-tree contains one or more
+** NULL values, and it will be some non-NULL value if the b-tree contains no
+** NULL values.
+**
+** If the aiMap parameter is not NULL, it must point to an array containing
+** one element for each column returned by the SELECT statement on the RHS
+** of the IN(...) operator. The i'th entry of the array is populated with the
+** offset of the index column that matches the i'th column returned by the
+** SELECT. For example, if the expression and selected index are:
+**
+** (?,?,?) IN (SELECT a, b, c FROM t1)
+** CREATE INDEX i1 ON t1(b, c, a);
+**
+** then aiMap[] is populated with {2, 0, 1}.
+*/
+#ifndef SQLITE_OMIT_SUBQUERY
+SQLITE_PRIVATE int sqlite3FindInIndex(
+ Parse *pParse, /* Parsing context */
+ Expr *pX, /* The IN expression */
+ u32 inFlags, /* IN_INDEX_LOOP, _MEMBERSHIP, and/or _NOOP_OK */
+ int *prRhsHasNull, /* Register holding NULL status. See notes */
+ int *aiMap, /* Mapping from Index fields to RHS fields */
+ int *piTab /* OUT: index to use */
+){
+ Select *p; /* SELECT to the right of IN operator */
+ int eType = 0; /* Type of RHS table. IN_INDEX_* */
+ int iTab = pParse->nTab++; /* Cursor of the RHS table */
+ int mustBeUnique; /* True if RHS must be unique */
+ Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */
+
+ assert( pX->op==TK_IN );
+ mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;
+
+ /* If the RHS of this IN(...) operator is a SELECT, and if it matters
+ ** whether or not the SELECT result contains NULL values, check whether
+ ** or not NULL is actually possible (it may not be, for example, due
+ ** to NOT NULL constraints in the schema). If no NULL values are possible,
+ ** set prRhsHasNull to 0 before continuing. */
+ if( prRhsHasNull && (pX->flags & EP_xIsSelect) ){
+ int i;
+ ExprList *pEList = pX->x.pSelect->pEList;
+ for(i=0; i<pEList->nExpr; i++){
+ if( sqlite3ExprCanBeNull(pEList->a[i].pExpr) ) break;
+ }
+ if( i==pEList->nExpr ){
+ prRhsHasNull = 0;
+ }
+ }
+
+ /* Check to see if an existing table or index can be used to
+ ** satisfy the query. This is preferable to generating a new
+ ** ephemeral table. */
+ if( pParse->nErr==0 && (p = isCandidateForInOpt(pX))!=0 ){
+ sqlite3 *db = pParse->db; /* Database connection */
+ Table *pTab; /* Table <table>. */
+ int iDb; /* Database idx for pTab */
+ ExprList *pEList = p->pEList;
+ int nExpr = pEList->nExpr;
+
+ assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */
+ assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */
+ assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */
+ pTab = p->pSrc->a[0].pTab;
+
+ /* Code an OP_Transaction and OP_TableLock for <table>. */
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iDb>=0 && iDb<SQLITE_MAX_ATTACHED );
+ sqlite3CodeVerifySchema(pParse, iDb);
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
+
+ assert(v); /* sqlite3GetVdbe() has always been previously called */
+ if( nExpr==1 && pEList->a[0].pExpr->iColumn<0 ){
+ /* The "x IN (SELECT rowid FROM table)" case */
+ int iAddr = sqlite3VdbeAddOp0(v, OP_Once);
+ VdbeCoverage(v);
+
+ sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
+ eType = IN_INDEX_ROWID;
+ ExplainQueryPlan((pParse, 0,
+ "USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR",pTab->zName));
+ sqlite3VdbeJumpHere(v, iAddr);
+ }else{
+ Index *pIdx; /* Iterator variable */
+ int affinity_ok = 1;
+ int i;
+
+ /* Check that the affinity that will be used to perform each
+ ** comparison is the same as the affinity of each column in table
+ ** on the RHS of the IN operator. If it not, it is not possible to
+ ** use any index of the RHS table. */
+ for(i=0; i<nExpr && affinity_ok; i++){
+ Expr *pLhs = sqlite3VectorFieldSubexpr(pX->pLeft, i);
+ int iCol = pEList->a[i].pExpr->iColumn;
+ char idxaff = sqlite3TableColumnAffinity(pTab,iCol); /* RHS table */
+ char cmpaff = sqlite3CompareAffinity(pLhs, idxaff);
+ testcase( cmpaff==SQLITE_AFF_BLOB );
+ testcase( cmpaff==SQLITE_AFF_TEXT );
+ switch( cmpaff ){
+ case SQLITE_AFF_BLOB:
+ break;
+ case SQLITE_AFF_TEXT:
+ /* sqlite3CompareAffinity() only returns TEXT if one side or the
+ ** other has no affinity and the other side is TEXT. Hence,
+ ** the only way for cmpaff to be TEXT is for idxaff to be TEXT
+ ** and for the term on the LHS of the IN to have no affinity. */
+ assert( idxaff==SQLITE_AFF_TEXT );
+ break;
+ default:
+ affinity_ok = sqlite3IsNumericAffinity(idxaff);
+ }
+ }
+
+ if( affinity_ok ){
+ /* Search for an existing index that will work for this IN operator */
+ for(pIdx=pTab->pIndex; pIdx && eType==0; pIdx=pIdx->pNext){
+ Bitmask colUsed; /* Columns of the index used */
+ Bitmask mCol; /* Mask for the current column */
+ if( pIdx->nColumn<nExpr ) continue;
+ if( pIdx->pPartIdxWhere!=0 ) continue;
+ /* Maximum nColumn is BMS-2, not BMS-1, so that we can compute
+ ** BITMASK(nExpr) without overflowing */
+ testcase( pIdx->nColumn==BMS-2 );
+ testcase( pIdx->nColumn==BMS-1 );
+ if( pIdx->nColumn>=BMS-1 ) continue;
+ if( mustBeUnique ){
+ if( pIdx->nKeyCol>nExpr
+ ||(pIdx->nColumn>nExpr && !IsUniqueIndex(pIdx))
+ ){
+ continue; /* This index is not unique over the IN RHS columns */
+ }
+ }
+
+ colUsed = 0; /* Columns of index used so far */
+ for(i=0; i<nExpr; i++){
+ Expr *pLhs = sqlite3VectorFieldSubexpr(pX->pLeft, i);
+ Expr *pRhs = pEList->a[i].pExpr;
+ CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs);
+ int j;
+
+ assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr );
+ for(j=0; j<nExpr; j++){
+ if( pIdx->aiColumn[j]!=pRhs->iColumn ) continue;
+ assert( pIdx->azColl[j] );
+ if( pReq!=0 && sqlite3StrICmp(pReq->zName, pIdx->azColl[j])!=0 ){
+ continue;
+ }
+ break;
+ }
+ if( j==nExpr ) break;
+ mCol = MASKBIT(j);
+ if( mCol & colUsed ) break; /* Each column used only once */
+ colUsed |= mCol;
+ if( aiMap ) aiMap[i] = j;
+ }
+
+ assert( i==nExpr || colUsed!=(MASKBIT(nExpr)-1) );
+ if( colUsed==(MASKBIT(nExpr)-1) ){
+ /* If we reach this point, that means the index pIdx is usable */
+ int iAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ ExplainQueryPlan((pParse, 0,
+ "USING INDEX %s FOR IN-OPERATOR",pIdx->zName));
+ sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ VdbeComment((v, "%s", pIdx->zName));
+ assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 );
+ eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0];
+
+ if( prRhsHasNull ){
+#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
+ i64 mask = (1<<nExpr)-1;
+ sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed,
+ iTab, 0, 0, (u8*)&mask, P4_INT64);
+#endif
+ *prRhsHasNull = ++pParse->nMem;
+ if( nExpr==1 ){
+ sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull);
+ }
+ }
+ sqlite3VdbeJumpHere(v, iAddr);
+ }
+ } /* End loop over indexes */
+ } /* End if( affinity_ok ) */
+ } /* End if not an rowid index */
+ } /* End attempt to optimize using an index */
+
+ /* If no preexisting index is available for the IN clause
+ ** and IN_INDEX_NOOP is an allowed reply
+ ** and the RHS of the IN operator is a list, not a subquery
+ ** and the RHS is not constant or has two or fewer terms,
+ ** then it is not worth creating an ephemeral table to evaluate
+ ** the IN operator so return IN_INDEX_NOOP.
+ */
+ if( eType==0
+ && (inFlags & IN_INDEX_NOOP_OK)
+ && !ExprHasProperty(pX, EP_xIsSelect)
+ && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2)
+ ){
+ eType = IN_INDEX_NOOP;
+ }
+
+ if( eType==0 ){
+ /* Could not find an existing table or index to use as the RHS b-tree.
+ ** We will have to generate an ephemeral table to do the job.
+ */
+ u32 savedNQueryLoop = pParse->nQueryLoop;
+ int rMayHaveNull = 0;
+ eType = IN_INDEX_EPH;
+ if( inFlags & IN_INDEX_LOOP ){
+ pParse->nQueryLoop = 0;
+ }else if( prRhsHasNull ){
+ *prRhsHasNull = rMayHaveNull = ++pParse->nMem;
+ }
+ assert( pX->op==TK_IN );
+ sqlite3CodeRhsOfIN(pParse, pX, iTab);
+ if( rMayHaveNull ){
+ sqlite3SetHasNullFlag(v, iTab, rMayHaveNull);
+ }
+ pParse->nQueryLoop = savedNQueryLoop;
+ }
+
+ if( aiMap && eType!=IN_INDEX_INDEX_ASC && eType!=IN_INDEX_INDEX_DESC ){
+ int i, n;
+ n = sqlite3ExprVectorSize(pX->pLeft);
+ for(i=0; i<n; i++) aiMap[i] = i;
+ }
+ *piTab = iTab;
+ return eType;
+}
+#endif
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Argument pExpr is an (?, ?...) IN(...) expression. This
+** function allocates and returns a nul-terminated string containing
+** the affinities to be used for each column of the comparison.
+**
+** It is the responsibility of the caller to ensure that the returned
+** string is eventually freed using sqlite3DbFree().
+*/
+static char *exprINAffinity(Parse *pParse, Expr *pExpr){
+ Expr *pLeft = pExpr->pLeft;
+ int nVal = sqlite3ExprVectorSize(pLeft);
+ Select *pSelect = (pExpr->flags & EP_xIsSelect) ? pExpr->x.pSelect : 0;
+ char *zRet;
+
+ assert( pExpr->op==TK_IN );
+ zRet = sqlite3DbMallocRaw(pParse->db, nVal+1);
+ if( zRet ){
+ int i;
+ for(i=0; i<nVal; i++){
+ Expr *pA = sqlite3VectorFieldSubexpr(pLeft, i);
+ char a = sqlite3ExprAffinity(pA);
+ if( pSelect ){
+ zRet[i] = sqlite3CompareAffinity(pSelect->pEList->a[i].pExpr, a);
+ }else{
+ zRet[i] = a;
+ }
+ }
+ zRet[nVal] = '\0';
+ }
+ return zRet;
+}
+#endif
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Load the Parse object passed as the first argument with an error
+** message of the form:
+**
+** "sub-select returns N columns - expected M"
+*/
+SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpect){
+ if( pParse->nErr==0 ){
+ const char *zFmt = "sub-select returns %d columns - expected %d";
+ sqlite3ErrorMsg(pParse, zFmt, nActual, nExpect);
+ }
+}
+#endif
+
+/*
+** Expression pExpr is a vector that has been used in a context where
+** it is not permitted. If pExpr is a sub-select vector, this routine
+** loads the Parse object with a message of the form:
+**
+** "sub-select returns N columns - expected 1"
+**
+** Or, if it is a regular scalar vector:
+**
+** "row value misused"
+*/
+SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( pExpr->flags & EP_xIsSelect ){
+ sqlite3SubselectError(pParse, pExpr->x.pSelect->pEList->nExpr, 1);
+ }else
+#endif
+ {
+ sqlite3ErrorMsg(pParse, "row value misused");
+ }
+}
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Generate code that will construct an ephemeral table containing all terms
+** in the RHS of an IN operator. The IN operator can be in either of two
+** forms:
+**
+** x IN (4,5,11) -- IN operator with list on right-hand side
+** x IN (SELECT a FROM b) -- IN operator with subquery on the right
+**
+** The pExpr parameter is the IN operator. The cursor number for the
+** constructed ephermeral table is returned. The first time the ephemeral
+** table is computed, the cursor number is also stored in pExpr->iTable,
+** however the cursor number returned might not be the same, as it might
+** have been duplicated using OP_OpenDup.
+**
+** If the LHS expression ("x" in the examples) is a column value, or
+** the SELECT statement returns a column value, then the affinity of that
+** column is used to build the index keys. If both 'x' and the
+** SELECT... statement are columns, then numeric affinity is used
+** if either column has NUMERIC or INTEGER affinity. If neither
+** 'x' nor the SELECT... statement are columns, then numeric affinity
+** is used.
+*/
+SQLITE_PRIVATE void sqlite3CodeRhsOfIN(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* The IN operator */
+ int iTab /* Use this cursor number */
+){
+ int addrOnce = 0; /* Address of the OP_Once instruction at top */
+ int addr; /* Address of OP_OpenEphemeral instruction */
+ Expr *pLeft; /* the LHS of the IN operator */
+ KeyInfo *pKeyInfo = 0; /* Key information */
+ int nVal; /* Size of vector pLeft */
+ Vdbe *v; /* The prepared statement under construction */
+
+ v = pParse->pVdbe;
+ assert( v!=0 );
+
+ /* The evaluation of the IN must be repeated every time it
+ ** is encountered if any of the following is true:
+ **
+ ** * The right-hand side is a correlated subquery
+ ** * The right-hand side is an expression list containing variables
+ ** * We are inside a trigger
+ **
+ ** If all of the above are false, then we can compute the RHS just once
+ ** and reuse it many names.
+ */
+ if( !ExprHasProperty(pExpr, EP_VarSelect) && pParse->iSelfTab==0 ){
+ /* Reuse of the RHS is allowed */
+ /* If this routine has already been coded, but the previous code
+ ** might not have been invoked yet, so invoke it now as a subroutine.
+ */
+ if( ExprHasProperty(pExpr, EP_Subrtn) ){
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ ExplainQueryPlan((pParse, 0, "REUSE LIST SUBQUERY %d",
+ pExpr->x.pSelect->selId));
+ }
+ sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn,
+ pExpr->y.sub.iAddr);
+ sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable);
+ sqlite3VdbeJumpHere(v, addrOnce);
+ return;
+ }
+
+ /* Begin coding the subroutine */
+ ExprSetProperty(pExpr, EP_Subrtn);
+ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
+ pExpr->y.sub.regReturn = ++pParse->nMem;
+ pExpr->y.sub.iAddr =
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1;
+ VdbeComment((v, "return address"));
+
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
+
+ /* Check to see if this is a vector IN operator */
+ pLeft = pExpr->pLeft;
+ nVal = sqlite3ExprVectorSize(pLeft);
+
+ /* Construct the ephemeral table that will contain the content of
+ ** RHS of the IN operator.
+ */
+ pExpr->iTable = iTab;
+ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, nVal);
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ VdbeComment((v, "Result of SELECT %u", pExpr->x.pSelect->selId));
+ }else{
+ VdbeComment((v, "RHS of IN operator"));
+ }
+#endif
+ pKeyInfo = sqlite3KeyInfoAlloc(pParse->db, nVal, 1);
+
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ /* Case 1: expr IN (SELECT ...)
+ **
+ ** Generate code to write the results of the select into the temporary
+ ** table allocated and opened above.
+ */
+ Select *pSelect = pExpr->x.pSelect;
+ ExprList *pEList = pSelect->pEList;
+
+ ExplainQueryPlan((pParse, 1, "%sLIST SUBQUERY %d",
+ addrOnce?"":"CORRELATED ", pSelect->selId
+ ));
+ /* If the LHS and RHS of the IN operator do not match, that
+ ** error will have been caught long before we reach this point. */
+ if( ALWAYS(pEList->nExpr==nVal) ){
+ SelectDest dest;
+ int i;
+ sqlite3SelectDestInit(&dest, SRT_Set, iTab);
+ dest.zAffSdst = exprINAffinity(pParse, pExpr);
+ pSelect->iLimit = 0;
+ testcase( pSelect->selFlags & SF_Distinct );
+ testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */
+ if( sqlite3Select(pParse, pSelect, &dest) ){
+ sqlite3DbFree(pParse->db, dest.zAffSdst);
+ sqlite3KeyInfoUnref(pKeyInfo);
+ return;
+ }
+ sqlite3DbFree(pParse->db, dest.zAffSdst);
+ assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */
+ assert( pEList!=0 );
+ assert( pEList->nExpr>0 );
+ assert( sqlite3KeyInfoIsWriteable(pKeyInfo) );
+ for(i=0; i<nVal; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pLeft, i);
+ pKeyInfo->aColl[i] = sqlite3BinaryCompareCollSeq(
+ pParse, p, pEList->a[i].pExpr
+ );
+ }
+ }
+ }else if( ALWAYS(pExpr->x.pList!=0) ){
+ /* Case 2: expr IN (exprlist)
+ **
+ ** For each expression, build an index key from the evaluation and
+ ** store it in the temporary table. If <expr> is a column, then use
+ ** that columns affinity when building index keys. If <expr> is not
+ ** a column, use numeric affinity.
+ */
+ char affinity; /* Affinity of the LHS of the IN */
+ int i;
+ ExprList *pList = pExpr->x.pList;
+ struct ExprList_item *pItem;
+ int r1, r2;
+ affinity = sqlite3ExprAffinity(pLeft);
+ if( affinity<=SQLITE_AFF_NONE ){
+ affinity = SQLITE_AFF_BLOB;
+ }else if( affinity==SQLITE_AFF_REAL ){
+ affinity = SQLITE_AFF_NUMERIC;
+ }
+ if( pKeyInfo ){
+ assert( sqlite3KeyInfoIsWriteable(pKeyInfo) );
+ pKeyInfo->aColl[0] = sqlite3ExprCollSeq(pParse, pExpr->pLeft);
+ }
+
+ /* Loop through each expression in <exprlist>. */
+ r1 = sqlite3GetTempReg(pParse);
+ r2 = sqlite3GetTempReg(pParse);
+ for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){
+ Expr *pE2 = pItem->pExpr;
+
+ /* If the expression is not constant then we will need to
+ ** disable the test that was generated above that makes sure
+ ** this code only executes once. Because for a non-constant
+ ** expression we need to rerun this code each time.
+ */
+ if( addrOnce && !sqlite3ExprIsConstant(pE2) ){
+ sqlite3VdbeChangeToNoop(v, addrOnce);
+ ExprClearProperty(pExpr, EP_Subrtn);
+ addrOnce = 0;
+ }
+
+ /* Evaluate the expression and insert it into the temp table */
+ sqlite3ExprCode(pParse, pE2, r1);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, r1, 1, r2, &affinity, 1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r2, r1, 1);
+ }
+ sqlite3ReleaseTempReg(pParse, r1);
+ sqlite3ReleaseTempReg(pParse, r2);
+ }
+ if( pKeyInfo ){
+ sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO);
+ }
+ if( addrOnce ){
+ sqlite3VdbeJumpHere(v, addrOnce);
+ /* Subroutine return */
+ sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn);
+ sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1);
+ sqlite3ClearTempRegCache(pParse);
+ }
+}
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+/*
+** Generate code for scalar subqueries used as a subquery expression
+** or EXISTS operator:
+**
+** (SELECT a FROM b) -- subquery
+** EXISTS (SELECT a FROM b) -- EXISTS subquery
+**
+** The pExpr parameter is the SELECT or EXISTS operator to be coded.
+**
+** Return the register that holds the result. For a multi-column SELECT,
+** the result is stored in a contiguous array of registers and the
+** return value is the register of the left-most result column.
+** Return 0 if an error occurs.
+*/
+#ifndef SQLITE_OMIT_SUBQUERY
+SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
+ int addrOnce = 0; /* Address of OP_Once at top of subroutine */
+ int rReg = 0; /* Register storing resulting */
+ Select *pSel; /* SELECT statement to encode */
+ SelectDest dest; /* How to deal with SELECT result */
+ int nReg; /* Registers to allocate */
+ Expr *pLimit; /* New limit expression */
+
+ Vdbe *v = pParse->pVdbe;
+ assert( v!=0 );
+ testcase( pExpr->op==TK_EXISTS );
+ testcase( pExpr->op==TK_SELECT );
+ assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT );
+ assert( ExprHasProperty(pExpr, EP_xIsSelect) );
+ pSel = pExpr->x.pSelect;
+
+ /* The evaluation of the EXISTS/SELECT must be repeated every time it
+ ** is encountered if any of the following is true:
+ **
+ ** * The right-hand side is a correlated subquery
+ ** * The right-hand side is an expression list containing variables
+ ** * We are inside a trigger
+ **
+ ** If all of the above are false, then we can run this code just once
+ ** save the results, and reuse the same result on subsequent invocations.
+ */
+ if( !ExprHasProperty(pExpr, EP_VarSelect) ){
+ /* If this routine has already been coded, then invoke it as a
+ ** subroutine. */
+ if( ExprHasProperty(pExpr, EP_Subrtn) ){
+ ExplainQueryPlan((pParse, 0, "REUSE SUBQUERY %d", pSel->selId));
+ sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn,
+ pExpr->y.sub.iAddr);
+ return pExpr->iTable;
+ }
+
+ /* Begin coding the subroutine */
+ ExprSetProperty(pExpr, EP_Subrtn);
+ pExpr->y.sub.regReturn = ++pParse->nMem;
+ pExpr->y.sub.iAddr =
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1;
+ VdbeComment((v, "return address"));
+
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
+
+ /* For a SELECT, generate code to put the values for all columns of
+ ** the first row into an array of registers and return the index of
+ ** the first register.
+ **
+ ** If this is an EXISTS, write an integer 0 (not exists) or 1 (exists)
+ ** into a register and return that register number.
+ **
+ ** In both cases, the query is augmented with "LIMIT 1". Any
+ ** preexisting limit is discarded in place of the new LIMIT 1.
+ */
+ ExplainQueryPlan((pParse, 1, "%sSCALAR SUBQUERY %d",
+ addrOnce?"":"CORRELATED ", pSel->selId));
+ nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1;
+ sqlite3SelectDestInit(&dest, 0, pParse->nMem+1);
+ pParse->nMem += nReg;
+ if( pExpr->op==TK_SELECT ){
+ dest.eDest = SRT_Mem;
+ dest.iSdst = dest.iSDParm;
+ dest.nSdst = nReg;
+ sqlite3VdbeAddOp3(v, OP_Null, 0, dest.iSDParm, dest.iSDParm+nReg-1);
+ VdbeComment((v, "Init subquery result"));
+ }else{
+ dest.eDest = SRT_Exists;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, dest.iSDParm);
+ VdbeComment((v, "Init EXISTS result"));
+ }
+ if( pSel->pLimit ){
+ /* The subquery already has a limit. If the pre-existing limit is X
+ ** then make the new limit X<>0 so that the new limit is either 1 or 0 */
+ sqlite3 *db = pParse->db;
+ pLimit = sqlite3Expr(db, TK_INTEGER, "0");
+ if( pLimit ){
+ pLimit->affExpr = SQLITE_AFF_NUMERIC;
+ pLimit = sqlite3PExpr(pParse, TK_NE,
+ sqlite3ExprDup(db, pSel->pLimit->pLeft, 0), pLimit);
+ }
+ sqlite3ExprDelete(db, pSel->pLimit->pLeft);
+ pSel->pLimit->pLeft = pLimit;
+ }else{
+ /* If there is no pre-existing limit add a limit of 1 */
+ pLimit = sqlite3Expr(pParse->db, TK_INTEGER, "1");
+ pSel->pLimit = sqlite3PExpr(pParse, TK_LIMIT, pLimit, 0);
+ }
+ pSel->iLimit = 0;
+ if( sqlite3Select(pParse, pSel, &dest) ){
+ return 0;
+ }
+ pExpr->iTable = rReg = dest.iSDParm;
+ ExprSetVVAProperty(pExpr, EP_NoReduce);
+ if( addrOnce ){
+ sqlite3VdbeJumpHere(v, addrOnce);
+
+ /* Subroutine return */
+ sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn);
+ sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1);
+ sqlite3ClearTempRegCache(pParse);
+ }
+
+ return rReg;
+}
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Expr pIn is an IN(...) expression. This function checks that the
+** sub-select on the RHS of the IN() operator has the same number of
+** columns as the vector on the LHS. Or, if the RHS of the IN() is not
+** a sub-query, that the LHS is a vector of size 1.
+*/
+SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){
+ int nVector = sqlite3ExprVectorSize(pIn->pLeft);
+ if( (pIn->flags & EP_xIsSelect) ){
+ if( nVector!=pIn->x.pSelect->pEList->nExpr ){
+ sqlite3SubselectError(pParse, pIn->x.pSelect->pEList->nExpr, nVector);
+ return 1;
+ }
+ }else if( nVector!=1 ){
+ sqlite3VectorErrorMsg(pParse, pIn->pLeft);
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Generate code for an IN expression.
+**
+** x IN (SELECT ...)
+** x IN (value, value, ...)
+**
+** The left-hand side (LHS) is a scalar or vector expression. The
+** right-hand side (RHS) is an array of zero or more scalar values, or a
+** subquery. If the RHS is a subquery, the number of result columns must
+** match the number of columns in the vector on the LHS. If the RHS is
+** a list of values, the LHS must be a scalar.
+**
+** The IN operator is true if the LHS value is contained within the RHS.
+** The result is false if the LHS is definitely not in the RHS. The
+** result is NULL if the presence of the LHS in the RHS cannot be
+** determined due to NULLs.
+**
+** This routine generates code that jumps to destIfFalse if the LHS is not
+** contained within the RHS. If due to NULLs we cannot determine if the LHS
+** is contained in the RHS then jump to destIfNull. If the LHS is contained
+** within the RHS then fall through.
+**
+** See the separate in-operator.md documentation file in the canonical
+** SQLite source tree for additional information.
+*/
+static void sqlite3ExprCodeIN(
+ Parse *pParse, /* Parsing and code generating context */
+ Expr *pExpr, /* The IN expression */
+ int destIfFalse, /* Jump here if LHS is not contained in the RHS */
+ int destIfNull /* Jump here if the results are unknown due to NULLs */
+){
+ int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */
+ int eType; /* Type of the RHS */
+ int rLhs; /* Register(s) holding the LHS values */
+ int rLhsOrig; /* LHS values prior to reordering by aiMap[] */
+ Vdbe *v; /* Statement under construction */
+ int *aiMap = 0; /* Map from vector field to index column */
+ char *zAff = 0; /* Affinity string for comparisons */
+ int nVector; /* Size of vectors for this IN operator */
+ int iDummy; /* Dummy parameter to exprCodeVector() */
+ Expr *pLeft; /* The LHS of the IN operator */
+ int i; /* loop counter */
+ int destStep2; /* Where to jump when NULLs seen in step 2 */
+ int destStep6 = 0; /* Start of code for Step 6 */
+ int addrTruthOp; /* Address of opcode that determines the IN is true */
+ int destNotNull; /* Jump here if a comparison is not true in step 6 */
+ int addrTop; /* Top of the step-6 loop */
+ int iTab = 0; /* Index to use */
+ u8 okConstFactor = pParse->okConstFactor;
+
+ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
+ pLeft = pExpr->pLeft;
+ if( sqlite3ExprCheckIN(pParse, pExpr) ) return;
+ zAff = exprINAffinity(pParse, pExpr);
+ nVector = sqlite3ExprVectorSize(pExpr->pLeft);
+ aiMap = (int*)sqlite3DbMallocZero(
+ pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1
+ );
+ if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error;
+
+ /* Attempt to compute the RHS. After this step, if anything other than
+ ** IN_INDEX_NOOP is returned, the table opened with cursor iTab
+ ** contains the values that make up the RHS. If IN_INDEX_NOOP is returned,
+ ** the RHS has not yet been coded. */
+ v = pParse->pVdbe;
+ assert( v!=0 ); /* OOM detected prior to this routine */
+ VdbeNoopComment((v, "begin IN expr"));
+ eType = sqlite3FindInIndex(pParse, pExpr,
+ IN_INDEX_MEMBERSHIP | IN_INDEX_NOOP_OK,
+ destIfFalse==destIfNull ? 0 : &rRhsHasNull,
+ aiMap, &iTab);
+
+ assert( pParse->nErr || nVector==1 || eType==IN_INDEX_EPH
+ || eType==IN_INDEX_INDEX_ASC || eType==IN_INDEX_INDEX_DESC
+ );
+#ifdef SQLITE_DEBUG
+ /* Confirm that aiMap[] contains nVector integer values between 0 and
+ ** nVector-1. */
+ for(i=0; i<nVector; i++){
+ int j, cnt;
+ for(cnt=j=0; j<nVector; j++) if( aiMap[j]==i ) cnt++;
+ assert( cnt==1 );
+ }
+#endif
+
+ /* Code the LHS, the <expr> from "<expr> IN (...)". If the LHS is a
+ ** vector, then it is stored in an array of nVector registers starting
+ ** at r1.
+ **
+ ** sqlite3FindInIndex() might have reordered the fields of the LHS vector
+ ** so that the fields are in the same order as an existing index. The
+ ** aiMap[] array contains a mapping from the original LHS field order to
+ ** the field order that matches the RHS index.
+ **
+ ** Avoid factoring the LHS of the IN(...) expression out of the loop,
+ ** even if it is constant, as OP_Affinity may be used on the register
+ ** by code generated below. */
+ assert( pParse->okConstFactor==okConstFactor );
+ pParse->okConstFactor = 0;
+ rLhsOrig = exprCodeVector(pParse, pLeft, &iDummy);
+ pParse->okConstFactor = okConstFactor;
+ for(i=0; i<nVector && aiMap[i]==i; i++){} /* Are LHS fields reordered? */
+ if( i==nVector ){
+ /* LHS fields are not reordered */
+ rLhs = rLhsOrig;
+ }else{
+ /* Need to reorder the LHS fields according to aiMap */
+ rLhs = sqlite3GetTempRange(pParse, nVector);
+ for(i=0; i<nVector; i++){
+ sqlite3VdbeAddOp3(v, OP_Copy, rLhsOrig+i, rLhs+aiMap[i], 0);
+ }
+ }
+
+ /* If sqlite3FindInIndex() did not find or create an index that is
+ ** suitable for evaluating the IN operator, then evaluate using a
+ ** sequence of comparisons.
+ **
+ ** This is step (1) in the in-operator.md optimized algorithm.
+ */
+ if( eType==IN_INDEX_NOOP ){
+ ExprList *pList = pExpr->x.pList;
+ CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft);
+ int labelOk = sqlite3VdbeMakeLabel(pParse);
+ int r2, regToFree;
+ int regCkNull = 0;
+ int ii;
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ if( destIfNull!=destIfFalse ){
+ regCkNull = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_BitAnd, rLhs, rLhs, regCkNull);
+ }
+ for(ii=0; ii<pList->nExpr; ii++){
+ r2 = sqlite3ExprCodeTemp(pParse, pList->a[ii].pExpr, &regToFree);
+ if( regCkNull && sqlite3ExprCanBeNull(pList->a[ii].pExpr) ){
+ sqlite3VdbeAddOp3(v, OP_BitAnd, regCkNull, r2, regCkNull);
+ }
+ sqlite3ReleaseTempReg(pParse, regToFree);
+ if( ii<pList->nExpr-1 || destIfNull!=destIfFalse ){
+ int op = rLhs!=r2 ? OP_Eq : OP_NotNull;
+ sqlite3VdbeAddOp4(v, op, rLhs, labelOk, r2,
+ (void*)pColl, P4_COLLSEQ);
+ VdbeCoverageIf(v, ii<pList->nExpr-1 && op==OP_Eq);
+ VdbeCoverageIf(v, ii==pList->nExpr-1 && op==OP_Eq);
+ VdbeCoverageIf(v, ii<pList->nExpr-1 && op==OP_NotNull);
+ VdbeCoverageIf(v, ii==pList->nExpr-1 && op==OP_NotNull);
+ sqlite3VdbeChangeP5(v, zAff[0]);
+ }else{
+ int op = rLhs!=r2 ? OP_Ne : OP_IsNull;
+ assert( destIfNull==destIfFalse );
+ sqlite3VdbeAddOp4(v, op, rLhs, destIfFalse, r2,
+ (void*)pColl, P4_COLLSEQ);
+ VdbeCoverageIf(v, op==OP_Ne);
+ VdbeCoverageIf(v, op==OP_IsNull);
+ sqlite3VdbeChangeP5(v, zAff[0] | SQLITE_JUMPIFNULL);
+ }
+ }
+ if( regCkNull ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, regCkNull, destIfNull); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, destIfFalse);
+ }
+ sqlite3VdbeResolveLabel(v, labelOk);
+ sqlite3ReleaseTempReg(pParse, regCkNull);
+ goto sqlite3ExprCodeIN_finished;
+ }
+
+ /* Step 2: Check to see if the LHS contains any NULL columns. If the
+ ** LHS does contain NULLs then the result must be either FALSE or NULL.
+ ** We will then skip the binary search of the RHS.
+ */
+ if( destIfNull==destIfFalse ){
+ destStep2 = destIfFalse;
+ }else{
+ destStep2 = destStep6 = sqlite3VdbeMakeLabel(pParse);
+ }
+ if( pParse->nErr ) goto sqlite3ExprCodeIN_finished;
+ for(i=0; i<nVector; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pExpr->pLeft, i);
+ if( sqlite3ExprCanBeNull(p) ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, rLhs+i, destStep2);
+ VdbeCoverage(v);
+ }
+ }
+
+ /* Step 3. The LHS is now known to be non-NULL. Do the binary search
+ ** of the RHS using the LHS as a probe. If found, the result is
+ ** true.
+ */
+ if( eType==IN_INDEX_ROWID ){
+ /* In this case, the RHS is the ROWID of table b-tree and so we also
+ ** know that the RHS is non-NULL. Hence, we combine steps 3 and 4
+ ** into a single opcode. */
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, iTab, destIfFalse, rLhs);
+ VdbeCoverage(v);
+ addrTruthOp = sqlite3VdbeAddOp0(v, OP_Goto); /* Return True */
+ }else{
+ sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector);
+ if( destIfFalse==destIfNull ){
+ /* Combine Step 3 and Step 5 into a single opcode */
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iTab, destIfFalse,
+ rLhs, nVector); VdbeCoverage(v);
+ goto sqlite3ExprCodeIN_finished;
+ }
+ /* Ordinary Step 3, for the case where FALSE and NULL are distinct */
+ addrTruthOp = sqlite3VdbeAddOp4Int(v, OP_Found, iTab, 0,
+ rLhs, nVector); VdbeCoverage(v);
+ }
+
+ /* Step 4. If the RHS is known to be non-NULL and we did not find
+ ** an match on the search above, then the result must be FALSE.
+ */
+ if( rRhsHasNull && nVector==1 ){
+ sqlite3VdbeAddOp2(v, OP_NotNull, rRhsHasNull, destIfFalse);
+ VdbeCoverage(v);
+ }
+
+ /* Step 5. If we do not care about the difference between NULL and
+ ** FALSE, then just return false.
+ */
+ if( destIfFalse==destIfNull ) sqlite3VdbeGoto(v, destIfFalse);
+
+ /* Step 6: Loop through rows of the RHS. Compare each row to the LHS.
+ ** If any comparison is NULL, then the result is NULL. If all
+ ** comparisons are FALSE then the final result is FALSE.
+ **
+ ** For a scalar LHS, it is sufficient to check just the first row
+ ** of the RHS.
+ */
+ if( destStep6 ) sqlite3VdbeResolveLabel(v, destStep6);
+ addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, destIfFalse);
+ VdbeCoverage(v);
+ if( nVector>1 ){
+ destNotNull = sqlite3VdbeMakeLabel(pParse);
+ }else{
+ /* For nVector==1, combine steps 6 and 7 by immediately returning
+ ** FALSE if the first comparison is not NULL */
+ destNotNull = destIfFalse;
+ }
+ for(i=0; i<nVector; i++){
+ Expr *p;
+ CollSeq *pColl;
+ int r3 = sqlite3GetTempReg(pParse);
+ p = sqlite3VectorFieldSubexpr(pLeft, i);
+ pColl = sqlite3ExprCollSeq(pParse, p);
+ sqlite3VdbeAddOp3(v, OP_Column, iTab, i, r3);
+ sqlite3VdbeAddOp4(v, OP_Ne, rLhs+i, destNotNull, r3,
+ (void*)pColl, P4_COLLSEQ);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, r3);
+ }
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull);
+ if( nVector>1 ){
+ sqlite3VdbeResolveLabel(v, destNotNull);
+ sqlite3VdbeAddOp2(v, OP_Next, iTab, addrTop+1);
+ VdbeCoverage(v);
+
+ /* Step 7: If we reach this point, we know that the result must
+ ** be false. */
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse);
+ }
+
+ /* Jumps here in order to return true. */
+ sqlite3VdbeJumpHere(v, addrTruthOp);
+
+sqlite3ExprCodeIN_finished:
+ if( rLhs!=rLhsOrig ) sqlite3ReleaseTempReg(pParse, rLhs);
+ VdbeComment((v, "end IN expr"));
+sqlite3ExprCodeIN_oom_error:
+ sqlite3DbFree(pParse->db, aiMap);
+ sqlite3DbFree(pParse->db, zAff);
+}
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/*
+** Generate an instruction that will put the floating point
+** value described by z[0..n-1] into register iMem.
+**
+** The z[] string will probably not be zero-terminated. But the
+** z[n] character is guaranteed to be something that does not look
+** like the continuation of the number.
+*/
+static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){
+ if( ALWAYS(z!=0) ){
+ double value;
+ sqlite3AtoF(z, &value, sqlite3Strlen30(z), SQLITE_UTF8);
+ assert( !sqlite3IsNaN(value) ); /* The new AtoF never returns NaN */
+ if( negateFlag ) value = -value;
+ sqlite3VdbeAddOp4Dup8(v, OP_Real, 0, iMem, 0, (u8*)&value, P4_REAL);
+ }
+}
+#endif
+
+
+/*
+** Generate an instruction that will put the integer describe by
+** text z[0..n-1] into register iMem.
+**
+** Expr.u.zToken is always UTF8 and zero-terminated.
+*/
+static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){
+ Vdbe *v = pParse->pVdbe;
+ if( pExpr->flags & EP_IntValue ){
+ int i = pExpr->u.iValue;
+ assert( i>=0 );
+ if( negFlag ) i = -i;
+ sqlite3VdbeAddOp2(v, OP_Integer, i, iMem);
+ }else{
+ int c;
+ i64 value;
+ const char *z = pExpr->u.zToken;
+ assert( z!=0 );
+ c = sqlite3DecOrHexToI64(z, &value);
+ if( (c==3 && !negFlag) || (c==2) || (negFlag && value==SMALLEST_INT64)){
+#ifdef SQLITE_OMIT_FLOATING_POINT
+ sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z);
+#else
+#ifndef SQLITE_OMIT_HEX_INTEGER
+ if( sqlite3_strnicmp(z,"0x",2)==0 ){
+ sqlite3ErrorMsg(pParse, "hex literal too big: %s%s", negFlag?"-":"",z);
+ }else
+#endif
+ {
+ codeReal(v, z, negFlag, iMem);
+ }
+#endif
+ }else{
+ if( negFlag ){ value = c==3 ? SMALLEST_INT64 : -value; }
+ sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64);
+ }
+ }
+}
+
+
+/* Generate code that will load into register regOut a value that is
+** appropriate for the iIdxCol-th column of index pIdx.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(
+ Parse *pParse, /* The parsing context */
+ Index *pIdx, /* The index whose column is to be loaded */
+ int iTabCur, /* Cursor pointing to a table row */
+ int iIdxCol, /* The column of the index to be loaded */
+ int regOut /* Store the index column value in this register */
+){
+ i16 iTabCol = pIdx->aiColumn[iIdxCol];
+ if( iTabCol==XN_EXPR ){
+ assert( pIdx->aColExpr );
+ assert( pIdx->aColExpr->nExpr>iIdxCol );
+ pParse->iSelfTab = iTabCur + 1;
+ sqlite3ExprCodeCopy(pParse, pIdx->aColExpr->a[iIdxCol].pExpr, regOut);
+ pParse->iSelfTab = 0;
+ }else{
+ sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pIdx->pTable, iTabCur,
+ iTabCol, regOut);
+ }
+}
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+/*
+** Generate code that will compute the value of generated column pCol
+** and store the result in register regOut
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(
+ Parse *pParse,
+ Column *pCol,
+ int regOut
+){
+ int iAddr;
+ Vdbe *v = pParse->pVdbe;
+ assert( v!=0 );
+ assert( pParse->iSelfTab!=0 );
+ if( pParse->iSelfTab>0 ){
+ iAddr = sqlite3VdbeAddOp3(v, OP_IfNullRow, pParse->iSelfTab-1, 0, regOut);
+ }else{
+ iAddr = 0;
+ }
+ sqlite3ExprCodeCopy(pParse, pCol->pDflt, regOut);
+ if( pCol->affinity>=SQLITE_AFF_TEXT ){
+ sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1);
+ }
+ if( iAddr ) sqlite3VdbeJumpHere(v, iAddr);
+}
+#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
+
+/*
+** Generate code to extract the value of the iCol-th column of a table.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(
+ Vdbe *v, /* Parsing context */
+ Table *pTab, /* The table containing the value */
+ int iTabCur, /* The table cursor. Or the PK cursor for WITHOUT ROWID */
+ int iCol, /* Index of the column to extract */
+ int regOut /* Extract the value into this register */
+){
+ Column *pCol;
+ assert( v!=0 );
+ if( pTab==0 ){
+ sqlite3VdbeAddOp3(v, OP_Column, iTabCur, iCol, regOut);
+ return;
+ }
+ if( iCol<0 || iCol==pTab->iPKey ){
+ sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut);
+ }else{
+ int op;
+ int x;
+ if( IsVirtual(pTab) ){
+ op = OP_VColumn;
+ x = iCol;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ }else if( (pCol = &pTab->aCol[iCol])->colFlags & COLFLAG_VIRTUAL ){
+ Parse *pParse = sqlite3VdbeParser(v);
+ if( pCol->colFlags & COLFLAG_BUSY ){
+ sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pCol->zName);
+ }else{
+ int savedSelfTab = pParse->iSelfTab;
+ pCol->colFlags |= COLFLAG_BUSY;
+ pParse->iSelfTab = iTabCur+1;
+ sqlite3ExprCodeGeneratedColumn(pParse, pCol, regOut);
+ pParse->iSelfTab = savedSelfTab;
+ pCol->colFlags &= ~COLFLAG_BUSY;
+ }
+ return;
+#endif
+ }else if( !HasRowid(pTab) ){
+ testcase( iCol!=sqlite3TableColumnToStorage(pTab, iCol) );
+ x = sqlite3TableColumnToIndex(sqlite3PrimaryKeyIndex(pTab), iCol);
+ op = OP_Column;
+ }else{
+ x = sqlite3TableColumnToStorage(pTab,iCol);
+ testcase( x!=iCol );
+ op = OP_Column;
+ }
+ sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut);
+ sqlite3ColumnDefault(v, pTab, iCol, regOut);
+ }
+}
+
+/*
+** Generate code that will extract the iColumn-th column from
+** table pTab and store the column value in register iReg.
+**
+** There must be an open cursor to pTab in iTable when this routine
+** is called. If iColumn<0 then code is generated that extracts the rowid.
+*/
+SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
+ Parse *pParse, /* Parsing and code generating context */
+ Table *pTab, /* Description of the table we are reading from */
+ int iColumn, /* Index of the table column */
+ int iTable, /* The cursor pointing to the table */
+ int iReg, /* Store results here */
+ u8 p5 /* P5 value for OP_Column + FLAGS */
+){
+ assert( pParse->pVdbe!=0 );
+ sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg);
+ if( p5 ){
+ VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1);
+ if( pOp->opcode==OP_Column ) pOp->p5 = p5;
+ }
+ return iReg;
+}
+
+/*
+** Generate code to move content from registers iFrom...iFrom+nReg-1
+** over to iTo..iTo+nReg-1.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){
+ sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg);
+}
+
+/*
+** Convert a scalar expression node to a TK_REGISTER referencing
+** register iReg. The caller must ensure that iReg already contains
+** the correct value for the expression.
+*/
+static void exprToRegister(Expr *pExpr, int iReg){
+ Expr *p = sqlite3ExprSkipCollateAndLikely(pExpr);
+ p->op2 = p->op;
+ p->op = TK_REGISTER;
+ p->iTable = iReg;
+ ExprClearProperty(p, EP_Skip);
+}
+
+/*
+** Evaluate an expression (either a vector or a scalar expression) and store
+** the result in continguous temporary registers. Return the index of
+** the first register used to store the result.
+**
+** If the returned result register is a temporary scalar, then also write
+** that register number into *piFreeable. If the returned result register
+** is not a temporary or if the expression is a vector set *piFreeable
+** to 0.
+*/
+static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
+ int iResult;
+ int nResult = sqlite3ExprVectorSize(p);
+ if( nResult==1 ){
+ iResult = sqlite3ExprCodeTemp(pParse, p, piFreeable);
+ }else{
+ *piFreeable = 0;
+ if( p->op==TK_SELECT ){
+#if SQLITE_OMIT_SUBQUERY
+ iResult = 0;
+#else
+ iResult = sqlite3CodeSubselect(pParse, p);
+#endif
+ }else{
+ int i;
+ iResult = pParse->nMem+1;
+ pParse->nMem += nResult;
+ for(i=0; i<nResult; i++){
+ sqlite3ExprCodeFactorable(pParse, p->x.pList->a[i].pExpr, i+iResult);
+ }
+ }
+ }
+ return iResult;
+}
+
+/*
+** If the last opcode is a OP_Copy, then set the do-not-merge flag (p5)
+** so that a subsequent copy will not be merged into this one.
+*/
+static void setDoNotMergeFlagOnCopy(Vdbe *v){
+ if( sqlite3VdbeGetOp(v, -1)->opcode==OP_Copy ){
+ sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */
+ }
+}
+
+/*
+** Generate code to implement special SQL functions that are implemented
+** in-line rather than by using the usual callbacks.
+*/
+static int exprCodeInlineFunction(
+ Parse *pParse, /* Parsing context */
+ ExprList *pFarg, /* List of function arguments */
+ int iFuncId, /* Function ID. One of the INTFUNC_... values */
+ int target /* Store function result in this register */
+){
+ int nFarg;
+ Vdbe *v = pParse->pVdbe;
+ assert( v!=0 );
+ assert( pFarg!=0 );
+ nFarg = pFarg->nExpr;
+ assert( nFarg>0 ); /* All in-line functions have at least one argument */
+ switch( iFuncId ){
+ case INLINEFUNC_coalesce: {
+ /* Attempt a direct implementation of the built-in COALESCE() and
+ ** IFNULL() functions. This avoids unnecessary evaluation of
+ ** arguments past the first non-NULL argument.
+ */
+ int endCoalesce = sqlite3VdbeMakeLabel(pParse);
+ int i;
+ assert( nFarg>=2 );
+ sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target);
+ for(i=1; i<nFarg; i++){
+ sqlite3VdbeAddOp2(v, OP_NotNull, target, endCoalesce);
+ VdbeCoverage(v);
+ sqlite3ExprCode(pParse, pFarg->a[i].pExpr, target);
+ }
+ setDoNotMergeFlagOnCopy(v);
+ sqlite3VdbeResolveLabel(v, endCoalesce);
+ break;
+ }
+ case INLINEFUNC_iif: {
+ Expr caseExpr;
+ memset(&caseExpr, 0, sizeof(caseExpr));
+ caseExpr.op = TK_CASE;
+ caseExpr.x.pList = pFarg;
+ return sqlite3ExprCodeTarget(pParse, &caseExpr, target);
+ }
+
+ default: {
+ /* The UNLIKELY() function is a no-op. The result is the value
+ ** of the first argument.
+ */
+ assert( nFarg==1 || nFarg==2 );
+ target = sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target);
+ break;
+ }
+
+ /***********************************************************************
+ ** Test-only SQL functions that are only usable if enabled
+ ** via SQLITE_TESTCTRL_INTERNAL_FUNCTIONS
+ */
+ case INLINEFUNC_expr_compare: {
+ /* Compare two expressions using sqlite3ExprCompare() */
+ assert( nFarg==2 );
+ sqlite3VdbeAddOp2(v, OP_Integer,
+ sqlite3ExprCompare(0,pFarg->a[0].pExpr, pFarg->a[1].pExpr,-1),
+ target);
+ break;
+ }
+
+ case INLINEFUNC_expr_implies_expr: {
+ /* Compare two expressions using sqlite3ExprImpliesExpr() */
+ assert( nFarg==2 );
+ sqlite3VdbeAddOp2(v, OP_Integer,
+ sqlite3ExprImpliesExpr(pParse,pFarg->a[0].pExpr, pFarg->a[1].pExpr,-1),
+ target);
+ break;
+ }
+
+ case INLINEFUNC_implies_nonnull_row: {
+ /* REsult of sqlite3ExprImpliesNonNullRow() */
+ Expr *pA1;
+ assert( nFarg==2 );
+ pA1 = pFarg->a[1].pExpr;
+ if( pA1->op==TK_COLUMN ){
+ sqlite3VdbeAddOp2(v, OP_Integer,
+ sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable),
+ target);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ }
+ break;
+ }
+
+#ifdef SQLITE_DEBUG
+ case INLINEFUNC_affinity: {
+ /* The AFFINITY() function evaluates to a string that describes
+ ** the type affinity of the argument. This is used for testing of
+ ** the SQLite type logic.
+ */
+ const char *azAff[] = { "blob", "text", "numeric", "integer", "real" };
+ char aff;
+ assert( nFarg==1 );
+ aff = sqlite3ExprAffinity(pFarg->a[0].pExpr);
+ sqlite3VdbeLoadString(v, target,
+ (aff<=SQLITE_AFF_NONE) ? "none" : azAff[aff-SQLITE_AFF_BLOB]);
+ break;
+ }
+#endif
+ }
+ return target;
+}
+
+
+/*
+** Generate code into the current Vdbe to evaluate the given
+** expression. Attempt to store the results in register "target".
+** Return the register where results are stored.
+**
+** With this routine, there is no guarantee that results will
+** be stored in target. The result might be stored in some other
+** register if it is convenient to do so. The calling function
+** must check the return code and move the results to the desired
+** register.
+*/
+SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){
+ Vdbe *v = pParse->pVdbe; /* The VM under construction */
+ int op; /* The opcode being coded */
+ int inReg = target; /* Results stored in register inReg */
+ int regFree1 = 0; /* If non-zero free this temporary register */
+ int regFree2 = 0; /* If non-zero free this temporary register */
+ int r1, r2; /* Various register numbers */
+ Expr tempX; /* Temporary expression node */
+ int p5 = 0;
+
+ assert( target>0 && target<=pParse->nMem );
+ assert( v!=0 );
+
+expr_code_doover:
+ if( pExpr==0 ){
+ op = TK_NULL;
+ }else{
+ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
+ op = pExpr->op;
+ }
+ switch( op ){
+ case TK_AGG_COLUMN: {
+ AggInfo *pAggInfo = pExpr->pAggInfo;
+ struct AggInfo_col *pCol;
+ assert( pAggInfo!=0 );
+ assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn );
+ pCol = &pAggInfo->aCol[pExpr->iAgg];
+ if( !pAggInfo->directMode ){
+ assert( pCol->iMem>0 );
+ return pCol->iMem;
+ }else if( pAggInfo->useSortingIdx ){
+ Table *pTab = pCol->pTab;
+ sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab,
+ pCol->iSorterColumn, target);
+ if( pCol->iColumn<0 ){
+ VdbeComment((v,"%s.rowid",pTab->zName));
+ }else{
+ VdbeComment((v,"%s.%s",pTab->zName,pTab->aCol[pCol->iColumn].zName));
+ if( pTab->aCol[pCol->iColumn].affinity==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, target);
+ }
+ }
+ return target;
+ }
+ /* Otherwise, fall thru into the TK_COLUMN case */
+ /* no break */ deliberate_fall_through
+ }
+ case TK_COLUMN: {
+ int iTab = pExpr->iTable;
+ int iReg;
+ if( ExprHasProperty(pExpr, EP_FixedCol) ){
+ /* This COLUMN expression is really a constant due to WHERE clause
+ ** constraints, and that constant is coded by the pExpr->pLeft
+ ** expresssion. However, make sure the constant has the correct
+ ** datatype by applying the Affinity of the table column to the
+ ** constant.
+ */
+ int aff;
+ iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target);
+ if( pExpr->y.pTab ){
+ aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
+ }else{
+ aff = pExpr->affExpr;
+ }
+ if( aff>SQLITE_AFF_BLOB ){
+ static const char zAff[] = "B\000C\000D\000E";
+ assert( SQLITE_AFF_BLOB=='A' );
+ assert( SQLITE_AFF_TEXT=='B' );
+ sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0,
+ &zAff[(aff-'B')*2], P4_STATIC);
+ }
+ return iReg;
+ }
+ if( iTab<0 ){
+ if( pParse->iSelfTab<0 ){
+ /* Other columns in the same row for CHECK constraints or
+ ** generated columns or for inserting into partial index.
+ ** The row is unpacked into registers beginning at
+ ** 0-(pParse->iSelfTab). The rowid (if any) is in a register
+ ** immediately prior to the first column.
+ */
+ Column *pCol;
+ Table *pTab = pExpr->y.pTab;
+ int iSrc;
+ int iCol = pExpr->iColumn;
+ assert( pTab!=0 );
+ assert( iCol>=XN_ROWID );
+ assert( iCol<pTab->nCol );
+ if( iCol<0 ){
+ return -1-pParse->iSelfTab;
+ }
+ pCol = pTab->aCol + iCol;
+ testcase( iCol!=sqlite3TableColumnToStorage(pTab,iCol) );
+ iSrc = sqlite3TableColumnToStorage(pTab, iCol) - pParse->iSelfTab;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( pCol->colFlags & COLFLAG_GENERATED ){
+ if( pCol->colFlags & COLFLAG_BUSY ){
+ sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"",
+ pCol->zName);
+ return 0;
+ }
+ pCol->colFlags |= COLFLAG_BUSY;
+ if( pCol->colFlags & COLFLAG_NOTAVAIL ){
+ sqlite3ExprCodeGeneratedColumn(pParse, pCol, iSrc);
+ }
+ pCol->colFlags &= ~(COLFLAG_BUSY|COLFLAG_NOTAVAIL);
+ return iSrc;
+ }else
+#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
+ if( pCol->affinity==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp2(v, OP_SCopy, iSrc, target);
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, target);
+ return target;
+ }else{
+ return iSrc;
+ }
+ }else{
+ /* Coding an expression that is part of an index where column names
+ ** in the index refer to the table to which the index belongs */
+ iTab = pParse->iSelfTab - 1;
+ }
+ }
+ iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab,
+ pExpr->iColumn, iTab, target,
+ pExpr->op2);
+ if( pExpr->y.pTab==0 && pExpr->affExpr==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
+ }
+ return iReg;
+ }
+ case TK_INTEGER: {
+ codeInteger(pParse, pExpr, 0, target);
+ return target;
+ }
+ case TK_TRUEFALSE: {
+ sqlite3VdbeAddOp2(v, OP_Integer, sqlite3ExprTruthValue(pExpr), target);
+ return target;
+ }
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ case TK_FLOAT: {
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ codeReal(v, pExpr->u.zToken, 0, target);
+ return target;
+ }
+#endif
+ case TK_STRING: {
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ sqlite3VdbeLoadString(v, target, pExpr->u.zToken);
+ return target;
+ }
+ default: {
+ /* Make NULL the default case so that if a bug causes an illegal
+ ** Expr node to be passed into this function, it will be handled
+ ** sanely and not crash. But keep the assert() to bring the problem
+ ** to the attention of the developers. */
+ assert( op==TK_NULL );
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ return target;
+ }
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ case TK_BLOB: {
+ int n;
+ const char *z;
+ char *zBlob;
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' );
+ assert( pExpr->u.zToken[1]=='\'' );
+ z = &pExpr->u.zToken[2];
+ n = sqlite3Strlen30(z) - 1;
+ assert( z[n]=='\'' );
+ zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n);
+ sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC);
+ return target;
+ }
+#endif
+ case TK_VARIABLE: {
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ assert( pExpr->u.zToken!=0 );
+ assert( pExpr->u.zToken[0]!=0 );
+ sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target);
+ if( pExpr->u.zToken[1]!=0 ){
+ const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn);
+ assert( pExpr->u.zToken[0]=='?' || (z && !strcmp(pExpr->u.zToken, z)) );
+ pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */
+ sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC);
+ }
+ return target;
+ }
+ case TK_REGISTER: {
+ return pExpr->iTable;
+ }
+#ifndef SQLITE_OMIT_CAST
+ case TK_CAST: {
+ /* Expressions of the form: CAST(pLeft AS token) */
+ inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ if( inReg!=target ){
+ sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target);
+ inReg = target;
+ }
+ sqlite3VdbeAddOp2(v, OP_Cast, target,
+ sqlite3AffinityType(pExpr->u.zToken, 0));
+ return inReg;
+ }
+#endif /* SQLITE_OMIT_CAST */
+ case TK_IS:
+ case TK_ISNOT:
+ op = (op==TK_IS) ? TK_EQ : TK_NE;
+ p5 = SQLITE_NULLEQ;
+ /* fall-through */
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ Expr *pLeft = pExpr->pLeft;
+ if( sqlite3ExprIsVector(pLeft) ){
+ codeVectorCompare(pParse, pExpr, target, op, p5);
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pLeft, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
+ codeCompare(pParse, pLeft, pExpr->pRight, op,
+ r1, r2, inReg, SQLITE_STOREP2 | p5,
+ ExprHasProperty(pExpr,EP_Commuted));
+ assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
+ assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
+ testcase( regFree1==0 );
+ testcase( regFree2==0 );
+ }
+ break;
+ }
+ case TK_AND:
+ case TK_OR:
+ case TK_PLUS:
+ case TK_STAR:
+ case TK_MINUS:
+ case TK_REM:
+ case TK_BITAND:
+ case TK_BITOR:
+ case TK_SLASH:
+ case TK_LSHIFT:
+ case TK_RSHIFT:
+ case TK_CONCAT: {
+ assert( TK_AND==OP_And ); testcase( op==TK_AND );
+ assert( TK_OR==OP_Or ); testcase( op==TK_OR );
+ assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS );
+ assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS );
+ assert( TK_REM==OP_Remainder ); testcase( op==TK_REM );
+ assert( TK_BITAND==OP_BitAnd ); testcase( op==TK_BITAND );
+ assert( TK_BITOR==OP_BitOr ); testcase( op==TK_BITOR );
+ assert( TK_SLASH==OP_Divide ); testcase( op==TK_SLASH );
+ assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT );
+ assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT );
+ assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT );
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
+ sqlite3VdbeAddOp3(v, op, r2, r1, target);
+ testcase( regFree1==0 );
+ testcase( regFree2==0 );
+ break;
+ }
+ case TK_UMINUS: {
+ Expr *pLeft = pExpr->pLeft;
+ assert( pLeft );
+ if( pLeft->op==TK_INTEGER ){
+ codeInteger(pParse, pLeft, 1, target);
+ return target;
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ }else if( pLeft->op==TK_FLOAT ){
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ codeReal(v, pLeft->u.zToken, 1, target);
+ return target;
+#endif
+ }else{
+ tempX.op = TK_INTEGER;
+ tempX.flags = EP_IntValue|EP_TokenOnly;
+ tempX.u.iValue = 0;
+ ExprClearVVAProperties(&tempX);
+ r1 = sqlite3ExprCodeTemp(pParse, &tempX, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree2);
+ sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target);
+ testcase( regFree2==0 );
+ }
+ break;
+ }
+ case TK_BITNOT:
+ case TK_NOT: {
+ assert( TK_BITNOT==OP_BitNot ); testcase( op==TK_BITNOT );
+ assert( TK_NOT==OP_Not ); testcase( op==TK_NOT );
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ testcase( regFree1==0 );
+ sqlite3VdbeAddOp2(v, op, r1, inReg);
+ break;
+ }
+ case TK_TRUTH: {
+ int isTrue; /* IS TRUE or IS NOT TRUE */
+ int bNormal; /* IS TRUE or IS FALSE */
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ testcase( regFree1==0 );
+ isTrue = sqlite3ExprTruthValue(pExpr->pRight);
+ bNormal = pExpr->op2==TK_IS;
+ testcase( isTrue && bNormal);
+ testcase( !isTrue && bNormal);
+ sqlite3VdbeAddOp4Int(v, OP_IsTrue, r1, inReg, !isTrue, isTrue ^ bNormal);
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ int addr;
+ assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL );
+ assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL );
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, target);
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ testcase( regFree1==0 );
+ addr = sqlite3VdbeAddOp1(v, op, r1);
+ VdbeCoverageIf(v, op==TK_ISNULL);
+ VdbeCoverageIf(v, op==TK_NOTNULL);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, target);
+ sqlite3VdbeJumpHere(v, addr);
+ break;
+ }
+ case TK_AGG_FUNCTION: {
+ AggInfo *pInfo = pExpr->pAggInfo;
+ if( pInfo==0
+ || NEVER(pExpr->iAgg<0)
+ || NEVER(pExpr->iAgg>=pInfo->nFunc)
+ ){
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken);
+ }else{
+ return pInfo->aFunc[pExpr->iAgg].iMem;
+ }
+ break;
+ }
+ case TK_FUNCTION: {
+ ExprList *pFarg; /* List of function arguments */
+ int nFarg; /* Number of function arguments */
+ FuncDef *pDef; /* The function definition object */
+ const char *zId; /* The function name */
+ u32 constMask = 0; /* Mask of function arguments that are constant */
+ int i; /* Loop counter */
+ sqlite3 *db = pParse->db; /* The database connection */
+ u8 enc = ENC(db); /* The text encoding used by this database */
+ CollSeq *pColl = 0; /* A collating sequence */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ return pExpr->y.pWin->regResult;
+ }
+#endif
+
+ if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){
+ /* SQL functions can be expensive. So try to avoid running them
+ ** multiple times if we know they always give the same result */
+ return sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1);
+ }
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ assert( !ExprHasProperty(pExpr, EP_TokenOnly) );
+ pFarg = pExpr->x.pList;
+ nFarg = pFarg ? pFarg->nExpr : 0;
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ zId = pExpr->u.zToken;
+ pDef = sqlite3FindFunction(db, zId, nFarg, enc, 0);
+#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+ if( pDef==0 && pParse->explain ){
+ pDef = sqlite3FindFunction(db, "unknown", nFarg, enc, 0);
+ }
+#endif
+ if( pDef==0 || pDef->xFinalize!=0 ){
+ sqlite3ErrorMsg(pParse, "unknown function: %s()", zId);
+ break;
+ }
+ if( pDef->funcFlags & SQLITE_FUNC_INLINE ){
+ assert( (pDef->funcFlags & SQLITE_FUNC_UNSAFE)==0 );
+ assert( (pDef->funcFlags & SQLITE_FUNC_DIRECT)==0 );
+ return exprCodeInlineFunction(pParse, pFarg,
+ SQLITE_PTR_TO_INT(pDef->pUserData), target);
+ }else if( pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE) ){
+ sqlite3ExprFunctionUsable(pParse, pExpr, pDef);
+ }
+
+ for(i=0; i<nFarg; i++){
+ if( i<32 && sqlite3ExprIsConstant(pFarg->a[i].pExpr) ){
+ testcase( i==31 );
+ constMask |= MASKBIT32(i);
+ }
+ if( (pDef->funcFlags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){
+ pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr);
+ }
+ }
+ if( pFarg ){
+ if( constMask ){
+ r1 = pParse->nMem+1;
+ pParse->nMem += nFarg;
+ }else{
+ r1 = sqlite3GetTempRange(pParse, nFarg);
+ }
+
+ /* For length() and typeof() functions with a column argument,
+ ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG
+ ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data
+ ** loading.
+ */
+ if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){
+ u8 exprOp;
+ assert( nFarg==1 );
+ assert( pFarg->a[0].pExpr!=0 );
+ exprOp = pFarg->a[0].pExpr->op;
+ if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){
+ assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG );
+ assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG );
+ testcase( pDef->funcFlags & OPFLAG_LENGTHARG );
+ pFarg->a[0].pExpr->op2 =
+ pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG);
+ }
+ }
+
+ sqlite3ExprCodeExprList(pParse, pFarg, r1, 0,
+ SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR);
+ }else{
+ r1 = 0;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* Possibly overload the function if the first argument is
+ ** a virtual table column.
+ **
+ ** For infix functions (LIKE, GLOB, REGEXP, and MATCH) use the
+ ** second argument, not the first, as the argument to test to
+ ** see if it is a column in a virtual table. This is done because
+ ** the left operand of infix functions (the operand we want to
+ ** control overloading) ends up as the second argument to the
+ ** function. The expression "A glob B" is equivalent to
+ ** "glob(B,A). We want to use the A in "A glob B" to test
+ ** for function overloading. But we use the B term in "glob(B,A)".
+ */
+ if( nFarg>=2 && ExprHasProperty(pExpr, EP_InfixFunc) ){
+ pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr);
+ }else if( nFarg>0 ){
+ pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr);
+ }
+#endif
+ if( pDef->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ if( !pColl ) pColl = db->pDfltColl;
+ sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ);
+ }
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+ if( pDef->funcFlags & SQLITE_FUNC_OFFSET ){
+ Expr *pArg = pFarg->a[0].pExpr;
+ if( pArg->op==TK_COLUMN ){
+ sqlite3VdbeAddOp3(v, OP_Offset, pArg->iTable, pArg->iColumn, target);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ }
+ }else
+#endif
+ {
+ sqlite3VdbeAddFunctionCall(pParse, constMask, r1, target, nFarg,
+ pDef, pExpr->op2);
+ }
+ if( nFarg ){
+ if( constMask==0 ){
+ sqlite3ReleaseTempRange(pParse, r1, nFarg);
+ }else{
+ sqlite3VdbeReleaseRegisters(pParse, r1, nFarg, constMask, 1);
+ }
+ }
+ return target;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_EXISTS:
+ case TK_SELECT: {
+ int nCol;
+ testcase( op==TK_EXISTS );
+ testcase( op==TK_SELECT );
+ if( pParse->db->mallocFailed ){
+ return 0;
+ }else if( op==TK_SELECT && (nCol = pExpr->x.pSelect->pEList->nExpr)!=1 ){
+ sqlite3SubselectError(pParse, nCol, 1);
+ }else{
+ return sqlite3CodeSubselect(pParse, pExpr);
+ }
+ break;
+ }
+ case TK_SELECT_COLUMN: {
+ int n;
+ if( pExpr->pLeft->iTable==0 ){
+ pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft);
+ }
+ assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT );
+ if( pExpr->iTable!=0
+ && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft))
+ ){
+ sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
+ pExpr->iTable, n);
+ }
+ return pExpr->pLeft->iTable + pExpr->iColumn;
+ }
+ case TK_IN: {
+ int destIfFalse = sqlite3VdbeMakeLabel(pParse);
+ int destIfNull = sqlite3VdbeMakeLabel(pParse);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, target);
+ sqlite3VdbeResolveLabel(v, destIfFalse);
+ sqlite3VdbeAddOp2(v, OP_AddImm, target, 0);
+ sqlite3VdbeResolveLabel(v, destIfNull);
+ return target;
+ }
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+
+ /*
+ ** x BETWEEN y AND z
+ **
+ ** This is equivalent to
+ **
+ ** x>=y AND x<=z
+ **
+ ** X is stored in pExpr->pLeft.
+ ** Y is stored in pExpr->pList->a[0].pExpr.
+ ** Z is stored in pExpr->pList->a[1].pExpr.
+ */
+ case TK_BETWEEN: {
+ exprCodeBetween(pParse, pExpr, target, 0, 0);
+ return target;
+ }
+ case TK_SPAN:
+ case TK_COLLATE:
+ case TK_UPLUS: {
+ pExpr = pExpr->pLeft;
+ goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. OSSFuzz. */
+ }
+
+ case TK_TRIGGER: {
+ /* If the opcode is TK_TRIGGER, then the expression is a reference
+ ** to a column in the new.* or old.* pseudo-tables available to
+ ** trigger programs. In this case Expr.iTable is set to 1 for the
+ ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn
+ ** is set to the column of the pseudo-table to read, or to -1 to
+ ** read the rowid field.
+ **
+ ** The expression is implemented using an OP_Param opcode. The p1
+ ** parameter is set to 0 for an old.rowid reference, or to (i+1)
+ ** to reference another column of the old.* pseudo-table, where
+ ** i is the index of the column. For a new.rowid reference, p1 is
+ ** set to (n+1), where n is the number of columns in each pseudo-table.
+ ** For a reference to any other column in the new.* pseudo-table, p1
+ ** is set to (n+2+i), where n and i are as defined previously. For
+ ** example, if the table on which triggers are being fired is
+ ** declared as:
+ **
+ ** CREATE TABLE t1(a, b);
+ **
+ ** Then p1 is interpreted as follows:
+ **
+ ** p1==0 -> old.rowid p1==3 -> new.rowid
+ ** p1==1 -> old.a p1==4 -> new.a
+ ** p1==2 -> old.b p1==5 -> new.b
+ */
+ Table *pTab = pExpr->y.pTab;
+ int iCol = pExpr->iColumn;
+ int p1 = pExpr->iTable * (pTab->nCol+1) + 1
+ + sqlite3TableColumnToStorage(pTab, iCol);
+
+ assert( pExpr->iTable==0 || pExpr->iTable==1 );
+ assert( iCol>=-1 && iCol<pTab->nCol );
+ assert( pTab->iPKey<0 || iCol!=pTab->iPKey );
+ assert( p1>=0 && p1<(pTab->nCol*2+2) );
+
+ sqlite3VdbeAddOp2(v, OP_Param, p1, target);
+ VdbeComment((v, "r[%d]=%s.%s", target,
+ (pExpr->iTable ? "new" : "old"),
+ (pExpr->iColumn<0 ? "rowid" : pExpr->y.pTab->aCol[iCol].zName)
+ ));
+
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ /* If the column has REAL affinity, it may currently be stored as an
+ ** integer. Use OP_RealAffinity to make sure it is really real.
+ **
+ ** EVIDENCE-OF: R-60985-57662 SQLite will convert the value back to
+ ** floating point when extracting it from the record. */
+ if( iCol>=0 && pTab->aCol[iCol].affinity==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, target);
+ }
+#endif
+ break;
+ }
+
+ case TK_VECTOR: {
+ sqlite3ErrorMsg(pParse, "row value misused");
+ break;
+ }
+
+ /* TK_IF_NULL_ROW Expr nodes are inserted ahead of expressions
+ ** that derive from the right-hand table of a LEFT JOIN. The
+ ** Expr.iTable value is the table number for the right-hand table.
+ ** The expression is only evaluated if that table is not currently
+ ** on a LEFT JOIN NULL row.
+ */
+ case TK_IF_NULL_ROW: {
+ int addrINR;
+ u8 okConstFactor = pParse->okConstFactor;
+ addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable);
+ /* Temporarily disable factoring of constant expressions, since
+ ** even though expressions may appear to be constant, they are not
+ ** really constant because they originate from the right-hand side
+ ** of a LEFT JOIN. */
+ pParse->okConstFactor = 0;
+ inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ pParse->okConstFactor = okConstFactor;
+ sqlite3VdbeJumpHere(v, addrINR);
+ sqlite3VdbeChangeP3(v, addrINR, inReg);
+ break;
+ }
+
+ /*
+ ** Form A:
+ ** CASE x WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END
+ **
+ ** Form B:
+ ** CASE WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END
+ **
+ ** Form A is can be transformed into the equivalent form B as follows:
+ ** CASE WHEN x=e1 THEN r1 WHEN x=e2 THEN r2 ...
+ ** WHEN x=eN THEN rN ELSE y END
+ **
+ ** X (if it exists) is in pExpr->pLeft.
+ ** Y is in the last element of pExpr->x.pList if pExpr->x.pList->nExpr is
+ ** odd. The Y is also optional. If the number of elements in x.pList
+ ** is even, then Y is omitted and the "otherwise" result is NULL.
+ ** Ei is in pExpr->pList->a[i*2] and Ri is pExpr->pList->a[i*2+1].
+ **
+ ** The result of the expression is the Ri for the first matching Ei,
+ ** or if there is no matching Ei, the ELSE term Y, or if there is
+ ** no ELSE term, NULL.
+ */
+ case TK_CASE: {
+ int endLabel; /* GOTO label for end of CASE stmt */
+ int nextCase; /* GOTO label for next WHEN clause */
+ int nExpr; /* 2x number of WHEN terms */
+ int i; /* Loop counter */
+ ExprList *pEList; /* List of WHEN terms */
+ struct ExprList_item *aListelem; /* Array of WHEN terms */
+ Expr opCompare; /* The X==Ei expression */
+ Expr *pX; /* The X expression */
+ Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */
+ Expr *pDel = 0;
+ sqlite3 *db = pParse->db;
+
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList );
+ assert(pExpr->x.pList->nExpr > 0);
+ pEList = pExpr->x.pList;
+ aListelem = pEList->a;
+ nExpr = pEList->nExpr;
+ endLabel = sqlite3VdbeMakeLabel(pParse);
+ if( (pX = pExpr->pLeft)!=0 ){
+ pDel = sqlite3ExprDup(db, pX, 0);
+ if( db->mallocFailed ){
+ sqlite3ExprDelete(db, pDel);
+ break;
+ }
+ testcase( pX->op==TK_COLUMN );
+ exprToRegister(pDel, exprCodeVector(pParse, pDel, &regFree1));
+ testcase( regFree1==0 );
+ memset(&opCompare, 0, sizeof(opCompare));
+ opCompare.op = TK_EQ;
+ opCompare.pLeft = pDel;
+ pTest = &opCompare;
+ /* Ticket b351d95f9cd5ef17e9d9dbae18f5ca8611190001:
+ ** The value in regFree1 might get SCopy-ed into the file result.
+ ** So make sure that the regFree1 register is not reused for other
+ ** purposes and possibly overwritten. */
+ regFree1 = 0;
+ }
+ for(i=0; i<nExpr-1; i=i+2){
+ if( pX ){
+ assert( pTest!=0 );
+ opCompare.pRight = aListelem[i].pExpr;
+ }else{
+ pTest = aListelem[i].pExpr;
+ }
+ nextCase = sqlite3VdbeMakeLabel(pParse);
+ testcase( pTest->op==TK_COLUMN );
+ sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL);
+ testcase( aListelem[i+1].pExpr->op==TK_COLUMN );
+ sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target);
+ sqlite3VdbeGoto(v, endLabel);
+ sqlite3VdbeResolveLabel(v, nextCase);
+ }
+ if( (nExpr&1)!=0 ){
+ sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ }
+ sqlite3ExprDelete(db, pDel);
+ setDoNotMergeFlagOnCopy(v);
+ sqlite3VdbeResolveLabel(v, endLabel);
+ break;
+ }
+#ifndef SQLITE_OMIT_TRIGGER
+ case TK_RAISE: {
+ assert( pExpr->affExpr==OE_Rollback
+ || pExpr->affExpr==OE_Abort
+ || pExpr->affExpr==OE_Fail
+ || pExpr->affExpr==OE_Ignore
+ );
+ if( !pParse->pTriggerTab && !pParse->nested ){
+ sqlite3ErrorMsg(pParse,
+ "RAISE() may only be used within a trigger-program");
+ return 0;
+ }
+ if( pExpr->affExpr==OE_Abort ){
+ sqlite3MayAbort(pParse);
+ }
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ if( pExpr->affExpr==OE_Ignore ){
+ sqlite3VdbeAddOp4(
+ v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0);
+ VdbeCoverage(v);
+ }else{
+ sqlite3HaltConstraint(pParse,
+ pParse->pTriggerTab ? SQLITE_CONSTRAINT_TRIGGER : SQLITE_ERROR,
+ pExpr->affExpr, pExpr->u.zToken, 0, 0);
+ }
+
+ break;
+ }
+#endif
+ }
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ sqlite3ReleaseTempReg(pParse, regFree2);
+ return inReg;
+}
+
+/*
+** Generate code that will evaluate expression pExpr just one time
+** per prepared statement execution.
+**
+** If the expression uses functions (that might throw an exception) then
+** guard them with an OP_Once opcode to ensure that the code is only executed
+** once. If no functions are involved, then factor the code out and put it at
+** the end of the prepared statement in the initialization section.
+**
+** If regDest>=0 then the result is always stored in that register and the
+** result is not reusable. If regDest<0 then this routine is free to
+** store the value whereever it wants. The register where the expression
+** is stored is returned. When regDest<0, two identical expressions might
+** code to the same register, if they do not contain function calls and hence
+** are factored out into the initialization section at the end of the
+** prepared statement.
+*/
+SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* The expression to code when the VDBE initializes */
+ int regDest /* Store the value in this register */
+){
+ ExprList *p;
+ assert( ConstFactorOk(pParse) );
+ p = pParse->pConstExpr;
+ if( regDest<0 && p ){
+ struct ExprList_item *pItem;
+ int i;
+ for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){
+ if( pItem->reusable && sqlite3ExprCompare(0,pItem->pExpr,pExpr,-1)==0 ){
+ return pItem->u.iConstExprReg;
+ }
+ }
+ }
+ pExpr = sqlite3ExprDup(pParse->db, pExpr, 0);
+ if( pExpr!=0 && ExprHasProperty(pExpr, EP_HasFunc) ){
+ Vdbe *v = pParse->pVdbe;
+ int addr;
+ assert( v );
+ addr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ pParse->okConstFactor = 0;
+ if( !pParse->db->mallocFailed ){
+ if( regDest<0 ) regDest = ++pParse->nMem;
+ sqlite3ExprCode(pParse, pExpr, regDest);
+ }
+ pParse->okConstFactor = 1;
+ sqlite3ExprDelete(pParse->db, pExpr);
+ sqlite3VdbeJumpHere(v, addr);
+ }else{
+ p = sqlite3ExprListAppend(pParse, p, pExpr);
+ if( p ){
+ struct ExprList_item *pItem = &p->a[p->nExpr-1];
+ pItem->reusable = regDest<0;
+ if( regDest<0 ) regDest = ++pParse->nMem;
+ pItem->u.iConstExprReg = regDest;
+ }
+ pParse->pConstExpr = p;
+ }
+ return regDest;
+}
+
+/*
+** Generate code to evaluate an expression and store the results
+** into a register. Return the register number where the results
+** are stored.
+**
+** If the register is a temporary register that can be deallocated,
+** then write its number into *pReg. If the result register is not
+** a temporary, then set *pReg to zero.
+**
+** If pExpr is a constant, then this routine might generate this
+** code to fill the register in the initialization section of the
+** VDBE program, in order to factor it out of the evaluation loop.
+*/
+SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){
+ int r2;
+ pExpr = sqlite3ExprSkipCollateAndLikely(pExpr);
+ if( ConstFactorOk(pParse)
+ && pExpr->op!=TK_REGISTER
+ && sqlite3ExprIsConstantNotJoin(pExpr)
+ ){
+ *pReg = 0;
+ r2 = sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1);
+ }else{
+ int r1 = sqlite3GetTempReg(pParse);
+ r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1);
+ if( r2==r1 ){
+ *pReg = r1;
+ }else{
+ sqlite3ReleaseTempReg(pParse, r1);
+ *pReg = 0;
+ }
+ }
+ return r2;
+}
+
+/*
+** Generate code that will evaluate expression pExpr and store the
+** results in register target. The results are guaranteed to appear
+** in register target.
+*/
+SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
+ int inReg;
+
+ assert( pExpr==0 || !ExprHasVVAProperty(pExpr,EP_Immutable) );
+ assert( target>0 && target<=pParse->nMem );
+ assert( pParse->pVdbe!=0 || pParse->db->mallocFailed );
+ if( pParse->pVdbe==0 ) return;
+ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target);
+ if( inReg!=target ){
+ u8 op;
+ if( ExprHasProperty(pExpr,EP_Subquery) ){
+ op = OP_Copy;
+ }else{
+ op = OP_SCopy;
+ }
+ sqlite3VdbeAddOp2(pParse->pVdbe, op, inReg, target);
+ }
+}
+
+/*
+** Make a transient copy of expression pExpr and then code it using
+** sqlite3ExprCode(). This routine works just like sqlite3ExprCode()
+** except that the input expression is guaranteed to be unchanged.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){
+ sqlite3 *db = pParse->db;
+ pExpr = sqlite3ExprDup(db, pExpr, 0);
+ if( !db->mallocFailed ) sqlite3ExprCode(pParse, pExpr, target);
+ sqlite3ExprDelete(db, pExpr);
+}
+
+/*
+** Generate code that will evaluate expression pExpr and store the
+** results in register target. The results are guaranteed to appear
+** in register target. If the expression is constant, then this routine
+** might choose to code the expression at initialization time.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){
+ if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pExpr) ){
+ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target);
+ }else{
+ sqlite3ExprCodeCopy(pParse, pExpr, target);
+ }
+}
+
+/*
+** Generate code that pushes the value of every element of the given
+** expression list into a sequence of registers beginning at target.
+**
+** Return the number of elements evaluated. The number returned will
+** usually be pList->nExpr but might be reduced if SQLITE_ECEL_OMITREF
+** is defined.
+**
+** The SQLITE_ECEL_DUP flag prevents the arguments from being
+** filled using OP_SCopy. OP_Copy must be used instead.
+**
+** The SQLITE_ECEL_FACTOR argument allows constant arguments to be
+** factored out into initialization code.
+**
+** The SQLITE_ECEL_REF flag means that expressions in the list with
+** ExprList.a[].u.x.iOrderByCol>0 have already been evaluated and stored
+** in registers at srcReg, and so the value can be copied from there.
+** If SQLITE_ECEL_OMITREF is also set, then the values with u.x.iOrderByCol>0
+** are simply omitted rather than being copied from srcReg.
+*/
+SQLITE_PRIVATE int sqlite3ExprCodeExprList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* The expression list to be coded */
+ int target, /* Where to write results */
+ int srcReg, /* Source registers if SQLITE_ECEL_REF */
+ u8 flags /* SQLITE_ECEL_* flags */
+){
+ struct ExprList_item *pItem;
+ int i, j, n;
+ u8 copyOp = (flags & SQLITE_ECEL_DUP) ? OP_Copy : OP_SCopy;
+ Vdbe *v = pParse->pVdbe;
+ assert( pList!=0 );
+ assert( target>0 );
+ assert( pParse->pVdbe!=0 ); /* Never gets this far otherwise */
+ n = pList->nExpr;
+ if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR;
+ for(pItem=pList->a, i=0; i<n; i++, pItem++){
+ Expr *pExpr = pItem->pExpr;
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( pItem->bSorterRef ){
+ i--;
+ n--;
+ }else
+#endif
+ if( (flags & SQLITE_ECEL_REF)!=0 && (j = pItem->u.x.iOrderByCol)>0 ){
+ if( flags & SQLITE_ECEL_OMITREF ){
+ i--;
+ n--;
+ }else{
+ sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i);
+ }
+ }else if( (flags & SQLITE_ECEL_FACTOR)!=0
+ && sqlite3ExprIsConstantNotJoin(pExpr)
+ ){
+ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target+i);
+ }else{
+ int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i);
+ if( inReg!=target+i ){
+ VdbeOp *pOp;
+ if( copyOp==OP_Copy
+ && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy
+ && pOp->p1+pOp->p3+1==inReg
+ && pOp->p2+pOp->p3+1==target+i
+ && pOp->p5==0 /* The do-not-merge flag must be clear */
+ ){
+ pOp->p3++;
+ }else{
+ sqlite3VdbeAddOp2(v, copyOp, inReg, target+i);
+ }
+ }
+ }
+ }
+ return n;
+}
+
+/*
+** Generate code for a BETWEEN operator.
+**
+** x BETWEEN y AND z
+**
+** The above is equivalent to
+**
+** x>=y AND x<=z
+**
+** Code it as such, taking care to do the common subexpression
+** elimination of x.
+**
+** The xJumpIf parameter determines details:
+**
+** NULL: Store the boolean result in reg[dest]
+** sqlite3ExprIfTrue: Jump to dest if true
+** sqlite3ExprIfFalse: Jump to dest if false
+**
+** The jumpIfNull parameter is ignored if xJumpIf is NULL.
+*/
+static void exprCodeBetween(
+ Parse *pParse, /* Parsing and code generating context */
+ Expr *pExpr, /* The BETWEEN expression */
+ int dest, /* Jump destination or storage location */
+ void (*xJump)(Parse*,Expr*,int,int), /* Action to take */
+ int jumpIfNull /* Take the jump if the BETWEEN is NULL */
+){
+ Expr exprAnd; /* The AND operator in x>=y AND x<=z */
+ Expr compLeft; /* The x>=y term */
+ Expr compRight; /* The x<=z term */
+ int regFree1 = 0; /* Temporary use register */
+ Expr *pDel = 0;
+ sqlite3 *db = pParse->db;
+
+ memset(&compLeft, 0, sizeof(Expr));
+ memset(&compRight, 0, sizeof(Expr));
+ memset(&exprAnd, 0, sizeof(Expr));
+
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ pDel = sqlite3ExprDup(db, pExpr->pLeft, 0);
+ if( db->mallocFailed==0 ){
+ exprAnd.op = TK_AND;
+ exprAnd.pLeft = &compLeft;
+ exprAnd.pRight = &compRight;
+ compLeft.op = TK_GE;
+ compLeft.pLeft = pDel;
+ compLeft.pRight = pExpr->x.pList->a[0].pExpr;
+ compRight.op = TK_LE;
+ compRight.pLeft = pDel;
+ compRight.pRight = pExpr->x.pList->a[1].pExpr;
+ exprToRegister(pDel, exprCodeVector(pParse, pDel, &regFree1));
+ if( xJump ){
+ xJump(pParse, &exprAnd, dest, jumpIfNull);
+ }else{
+ /* Mark the expression is being from the ON or USING clause of a join
+ ** so that the sqlite3ExprCodeTarget() routine will not attempt to move
+ ** it into the Parse.pConstExpr list. We should use a new bit for this,
+ ** for clarity, but we are out of bits in the Expr.flags field so we
+ ** have to reuse the EP_FromJoin bit. Bummer. */
+ pDel->flags |= EP_FromJoin;
+ sqlite3ExprCodeTarget(pParse, &exprAnd, dest);
+ }
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ }
+ sqlite3ExprDelete(db, pDel);
+
+ /* Ensure adequate test coverage */
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1!=0 );
+ testcase( xJump==0 );
+}
+
+/*
+** Generate code for a boolean expression such that a jump is made
+** to the label "dest" if the expression is true but execution
+** continues straight thru if the expression is false.
+**
+** If the expression evaluates to NULL (neither true nor false), then
+** take the jump if the jumpIfNull flag is SQLITE_JUMPIFNULL.
+**
+** This code depends on the fact that certain token values (ex: TK_EQ)
+** are the same as opcode values (ex: OP_Eq) that implement the corresponding
+** operation. Special comments in vdbe.c and the mkopcodeh.awk script in
+** the make process cause these values to align. Assert()s in the code
+** below verify that the numbers are aligned correctly.
+*/
+SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){
+ Vdbe *v = pParse->pVdbe;
+ int op = 0;
+ int regFree1 = 0;
+ int regFree2 = 0;
+ int r1, r2;
+
+ assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 );
+ if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */
+ if( NEVER(pExpr==0) ) return; /* No way this can happen */
+ assert( !ExprHasVVAProperty(pExpr, EP_Immutable) );
+ op = pExpr->op;
+ switch( op ){
+ case TK_AND:
+ case TK_OR: {
+ Expr *pAlt = sqlite3ExprSimplifiedAndOr(pExpr);
+ if( pAlt!=pExpr ){
+ sqlite3ExprIfTrue(pParse, pAlt, dest, jumpIfNull);
+ }else if( op==TK_AND ){
+ int d2 = sqlite3VdbeMakeLabel(pParse);
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,
+ jumpIfNull^SQLITE_JUMPIFNULL);
+ sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqlite3VdbeResolveLabel(v, d2);
+ }else{
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
+ }
+ break;
+ }
+ case TK_NOT: {
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
+ break;
+ }
+ case TK_TRUTH: {
+ int isNot; /* IS NOT TRUE or IS NOT FALSE */
+ int isTrue; /* IS TRUE or IS NOT TRUE */
+ testcase( jumpIfNull==0 );
+ isNot = pExpr->op2==TK_ISNOT;
+ isTrue = sqlite3ExprTruthValue(pExpr->pRight);
+ testcase( isTrue && isNot );
+ testcase( !isTrue && isNot );
+ if( isTrue ^ isNot ){
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest,
+ isNot ? SQLITE_JUMPIFNULL : 0);
+ }else{
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest,
+ isNot ? SQLITE_JUMPIFNULL : 0);
+ }
+ break;
+ }
+ case TK_IS:
+ case TK_ISNOT:
+ testcase( op==TK_IS );
+ testcase( op==TK_ISNOT );
+ op = (op==TK_IS) ? TK_EQ : TK_NE;
+ jumpIfNull = SQLITE_NULLEQ;
+ /* no break */ deliberate_fall_through
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr;
+ testcase( jumpIfNull==0 );
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
+ codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
+ r1, r2, dest, jumpIfNull, ExprHasProperty(pExpr,EP_Commuted));
+ assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ assert(TK_EQ==OP_Eq); testcase(op==OP_Eq);
+ VdbeCoverageIf(v, op==OP_Eq && jumpIfNull==SQLITE_NULLEQ);
+ VdbeCoverageIf(v, op==OP_Eq && jumpIfNull!=SQLITE_NULLEQ);
+ assert(TK_NE==OP_Ne); testcase(op==OP_Ne);
+ VdbeCoverageIf(v, op==OP_Ne && jumpIfNull==SQLITE_NULLEQ);
+ VdbeCoverageIf(v, op==OP_Ne && jumpIfNull!=SQLITE_NULLEQ);
+ testcase( regFree1==0 );
+ testcase( regFree2==0 );
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL );
+ assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL );
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ sqlite3VdbeAddOp2(v, op, r1, dest);
+ VdbeCoverageIf(v, op==TK_ISNULL);
+ VdbeCoverageIf(v, op==TK_NOTNULL);
+ testcase( regFree1==0 );
+ break;
+ }
+ case TK_BETWEEN: {
+ testcase( jumpIfNull==0 );
+ exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfTrue, jumpIfNull);
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_IN: {
+ int destIfFalse = sqlite3VdbeMakeLabel(pParse);
+ int destIfNull = jumpIfNull ? dest : destIfFalse;
+ sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull);
+ sqlite3VdbeGoto(v, dest);
+ sqlite3VdbeResolveLabel(v, destIfFalse);
+ break;
+ }
+#endif
+ default: {
+ default_expr:
+ if( ExprAlwaysTrue(pExpr) ){
+ sqlite3VdbeGoto(v, dest);
+ }else if( ExprAlwaysFalse(pExpr) ){
+ /* No-op */
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
+ sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0);
+ VdbeCoverage(v);
+ testcase( regFree1==0 );
+ testcase( jumpIfNull==0 );
+ }
+ break;
+ }
+ }
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ sqlite3ReleaseTempReg(pParse, regFree2);
+}
+
+/*
+** Generate code for a boolean expression such that a jump is made
+** to the label "dest" if the expression is false but execution
+** continues straight thru if the expression is true.
+**
+** If the expression evaluates to NULL (neither true nor false) then
+** jump if jumpIfNull is SQLITE_JUMPIFNULL or fall through if jumpIfNull
+** is 0.
+*/
+SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){
+ Vdbe *v = pParse->pVdbe;
+ int op = 0;
+ int regFree1 = 0;
+ int regFree2 = 0;
+ int r1, r2;
+
+ assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 );
+ if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */
+ if( pExpr==0 ) return;
+ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
+
+ /* The value of pExpr->op and op are related as follows:
+ **
+ ** pExpr->op op
+ ** --------- ----------
+ ** TK_ISNULL OP_NotNull
+ ** TK_NOTNULL OP_IsNull
+ ** TK_NE OP_Eq
+ ** TK_EQ OP_Ne
+ ** TK_GT OP_Le
+ ** TK_LE OP_Gt
+ ** TK_GE OP_Lt
+ ** TK_LT OP_Ge
+ **
+ ** For other values of pExpr->op, op is undefined and unused.
+ ** The value of TK_ and OP_ constants are arranged such that we
+ ** can compute the mapping above using the following expression.
+ ** Assert()s verify that the computation is correct.
+ */
+ op = ((pExpr->op+(TK_ISNULL&1))^1)-(TK_ISNULL&1);
+
+ /* Verify correct alignment of TK_ and OP_ constants
+ */
+ assert( pExpr->op!=TK_ISNULL || op==OP_NotNull );
+ assert( pExpr->op!=TK_NOTNULL || op==OP_IsNull );
+ assert( pExpr->op!=TK_NE || op==OP_Eq );
+ assert( pExpr->op!=TK_EQ || op==OP_Ne );
+ assert( pExpr->op!=TK_LT || op==OP_Ge );
+ assert( pExpr->op!=TK_LE || op==OP_Gt );
+ assert( pExpr->op!=TK_GT || op==OP_Le );
+ assert( pExpr->op!=TK_GE || op==OP_Lt );
+
+ switch( pExpr->op ){
+ case TK_AND:
+ case TK_OR: {
+ Expr *pAlt = sqlite3ExprSimplifiedAndOr(pExpr);
+ if( pAlt!=pExpr ){
+ sqlite3ExprIfFalse(pParse, pAlt, dest, jumpIfNull);
+ }else if( pExpr->op==TK_AND ){
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
+ }else{
+ int d2 = sqlite3VdbeMakeLabel(pParse);
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2,
+ jumpIfNull^SQLITE_JUMPIFNULL);
+ sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqlite3VdbeResolveLabel(v, d2);
+ }
+ break;
+ }
+ case TK_NOT: {
+ testcase( jumpIfNull==0 );
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
+ break;
+ }
+ case TK_TRUTH: {
+ int isNot; /* IS NOT TRUE or IS NOT FALSE */
+ int isTrue; /* IS TRUE or IS NOT TRUE */
+ testcase( jumpIfNull==0 );
+ isNot = pExpr->op2==TK_ISNOT;
+ isTrue = sqlite3ExprTruthValue(pExpr->pRight);
+ testcase( isTrue && isNot );
+ testcase( !isTrue && isNot );
+ if( isTrue ^ isNot ){
+ /* IS TRUE and IS NOT FALSE */
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest,
+ isNot ? 0 : SQLITE_JUMPIFNULL);
+
+ }else{
+ /* IS FALSE and IS NOT TRUE */
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest,
+ isNot ? 0 : SQLITE_JUMPIFNULL);
+ }
+ break;
+ }
+ case TK_IS:
+ case TK_ISNOT:
+ testcase( pExpr->op==TK_IS );
+ testcase( pExpr->op==TK_ISNOT );
+ op = (pExpr->op==TK_IS) ? TK_NE : TK_EQ;
+ jumpIfNull = SQLITE_NULLEQ;
+ /* no break */ deliberate_fall_through
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_NE:
+ case TK_EQ: {
+ if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr;
+ testcase( jumpIfNull==0 );
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
+ codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
+ r1, r2, dest, jumpIfNull,ExprHasProperty(pExpr,EP_Commuted));
+ assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ assert(TK_EQ==OP_Eq); testcase(op==OP_Eq);
+ VdbeCoverageIf(v, op==OP_Eq && jumpIfNull!=SQLITE_NULLEQ);
+ VdbeCoverageIf(v, op==OP_Eq && jumpIfNull==SQLITE_NULLEQ);
+ assert(TK_NE==OP_Ne); testcase(op==OP_Ne);
+ VdbeCoverageIf(v, op==OP_Ne && jumpIfNull!=SQLITE_NULLEQ);
+ VdbeCoverageIf(v, op==OP_Ne && jumpIfNull==SQLITE_NULLEQ);
+ testcase( regFree1==0 );
+ testcase( regFree2==0 );
+ break;
+ }
+ case TK_ISNULL:
+ case TK_NOTNULL: {
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
+ sqlite3VdbeAddOp2(v, op, r1, dest);
+ testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL);
+ testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL);
+ testcase( regFree1==0 );
+ break;
+ }
+ case TK_BETWEEN: {
+ testcase( jumpIfNull==0 );
+ exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfFalse, jumpIfNull);
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_IN: {
+ if( jumpIfNull ){
+ sqlite3ExprCodeIN(pParse, pExpr, dest, dest);
+ }else{
+ int destIfNull = sqlite3VdbeMakeLabel(pParse);
+ sqlite3ExprCodeIN(pParse, pExpr, dest, destIfNull);
+ sqlite3VdbeResolveLabel(v, destIfNull);
+ }
+ break;
+ }
+#endif
+ default: {
+ default_expr:
+ if( ExprAlwaysFalse(pExpr) ){
+ sqlite3VdbeGoto(v, dest);
+ }else if( ExprAlwaysTrue(pExpr) ){
+ /* no-op */
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
+ sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0);
+ VdbeCoverage(v);
+ testcase( regFree1==0 );
+ testcase( jumpIfNull==0 );
+ }
+ break;
+ }
+ }
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ sqlite3ReleaseTempReg(pParse, regFree2);
+}
+
+/*
+** Like sqlite3ExprIfFalse() except that a copy is made of pExpr before
+** code generation, and that copy is deleted after code generation. This
+** ensures that the original pExpr is unchanged.
+*/
+SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,int jumpIfNull){
+ sqlite3 *db = pParse->db;
+ Expr *pCopy = sqlite3ExprDup(db, pExpr, 0);
+ if( db->mallocFailed==0 ){
+ sqlite3ExprIfFalse(pParse, pCopy, dest, jumpIfNull);
+ }
+ sqlite3ExprDelete(db, pCopy);
+}
+
+/*
+** Expression pVar is guaranteed to be an SQL variable. pExpr may be any
+** type of expression.
+**
+** If pExpr is a simple SQL value - an integer, real, string, blob
+** or NULL value - then the VDBE currently being prepared is configured
+** to re-prepare each time a new value is bound to variable pVar.
+**
+** Additionally, if pExpr is a simple SQL value and the value is the
+** same as that currently bound to variable pVar, non-zero is returned.
+** Otherwise, if the values are not the same or if pExpr is not a simple
+** SQL value, zero is returned.
+*/
+static int exprCompareVariable(Parse *pParse, Expr *pVar, Expr *pExpr){
+ int res = 0;
+ int iVar;
+ sqlite3_value *pL, *pR = 0;
+
+ sqlite3ValueFromExpr(pParse->db, pExpr, SQLITE_UTF8, SQLITE_AFF_BLOB, &pR);
+ if( pR ){
+ iVar = pVar->iColumn;
+ sqlite3VdbeSetVarmask(pParse->pVdbe, iVar);
+ pL = sqlite3VdbeGetBoundValue(pParse->pReprepare, iVar, SQLITE_AFF_BLOB);
+ if( pL ){
+ if( sqlite3_value_type(pL)==SQLITE_TEXT ){
+ sqlite3_value_text(pL); /* Make sure the encoding is UTF-8 */
+ }
+ res = 0==sqlite3MemCompare(pL, pR, 0);
+ }
+ sqlite3ValueFree(pR);
+ sqlite3ValueFree(pL);
+ }
+
+ return res;
+}
+
+/*
+** Do a deep comparison of two expression trees. Return 0 if the two
+** expressions are completely identical. Return 1 if they differ only
+** by a COLLATE operator at the top level. Return 2 if there are differences
+** other than the top-level COLLATE operator.
+**
+** If any subelement of pB has Expr.iTable==(-1) then it is allowed
+** to compare equal to an equivalent element in pA with Expr.iTable==iTab.
+**
+** The pA side might be using TK_REGISTER. If that is the case and pB is
+** not using TK_REGISTER but is otherwise equivalent, then still return 0.
+**
+** Sometimes this routine will return 2 even if the two expressions
+** really are equivalent. If we cannot prove that the expressions are
+** identical, we return 2 just to be safe. So if this routine
+** returns 2, then you do not really know for certain if the two
+** expressions are the same. But if you get a 0 or 1 return, then you
+** can be sure the expressions are the same. In the places where
+** this routine is used, it does not hurt to get an extra 2 - that
+** just might result in some slightly slower code. But returning
+** an incorrect 0 or 1 could lead to a malfunction.
+**
+** If pParse is not NULL then TK_VARIABLE terms in pA with bindings in
+** pParse->pReprepare can be matched against literals in pB. The
+** pParse->pVdbe->expmask bitmask is updated for each variable referenced.
+** If pParse is NULL (the normal case) then any TK_VARIABLE term in
+** Argument pParse should normally be NULL. If it is not NULL and pA or
+** pB causes a return value of 2.
+*/
+SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTab){
+ u32 combinedFlags;
+ if( pA==0 || pB==0 ){
+ return pB==pA ? 0 : 2;
+ }
+ if( pParse && pA->op==TK_VARIABLE && exprCompareVariable(pParse, pA, pB) ){
+ return 0;
+ }
+ combinedFlags = pA->flags | pB->flags;
+ if( combinedFlags & EP_IntValue ){
+ if( (pA->flags&pB->flags&EP_IntValue)!=0 && pA->u.iValue==pB->u.iValue ){
+ return 0;
+ }
+ return 2;
+ }
+ if( pA->op!=pB->op || pA->op==TK_RAISE ){
+ if( pA->op==TK_COLLATE && sqlite3ExprCompare(pParse, pA->pLeft,pB,iTab)<2 ){
+ return 1;
+ }
+ if( pB->op==TK_COLLATE && sqlite3ExprCompare(pParse, pA,pB->pLeft,iTab)<2 ){
+ return 1;
+ }
+ return 2;
+ }
+ if( pA->op!=TK_COLUMN && pA->op!=TK_AGG_COLUMN && pA->u.zToken ){
+ if( pA->op==TK_FUNCTION || pA->op==TK_AGG_FUNCTION ){
+ if( sqlite3StrICmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ assert( pA->op==pB->op );
+ if( ExprHasProperty(pA,EP_WinFunc)!=ExprHasProperty(pB,EP_WinFunc) ){
+ return 2;
+ }
+ if( ExprHasProperty(pA,EP_WinFunc) ){
+ if( sqlite3WindowCompare(pParse, pA->y.pWin, pB->y.pWin, 1)!=0 ){
+ return 2;
+ }
+ }
+#endif
+ }else if( pA->op==TK_NULL ){
+ return 0;
+ }else if( pA->op==TK_COLLATE ){
+ if( sqlite3_stricmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2;
+ }else if( ALWAYS(pB->u.zToken!=0) && strcmp(pA->u.zToken,pB->u.zToken)!=0 ){
+ return 2;
+ }
+ }
+ if( (pA->flags & (EP_Distinct|EP_Commuted))
+ != (pB->flags & (EP_Distinct|EP_Commuted)) ) return 2;
+ if( ALWAYS((combinedFlags & EP_TokenOnly)==0) ){
+ if( combinedFlags & EP_xIsSelect ) return 2;
+ if( (combinedFlags & EP_FixedCol)==0
+ && sqlite3ExprCompare(pParse, pA->pLeft, pB->pLeft, iTab) ) return 2;
+ if( sqlite3ExprCompare(pParse, pA->pRight, pB->pRight, iTab) ) return 2;
+ if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2;
+ if( pA->op!=TK_STRING
+ && pA->op!=TK_TRUEFALSE
+ && ALWAYS((combinedFlags & EP_Reduced)==0)
+ ){
+ if( pA->iColumn!=pB->iColumn ) return 2;
+ if( pA->op2!=pB->op2 && pA->op==TK_TRUTH ) return 2;
+ if( pA->op!=TK_IN && pA->iTable!=pB->iTable && pA->iTable!=iTab ){
+ return 2;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Compare two ExprList objects. Return 0 if they are identical, 1
+** if they are certainly different, or 2 if it is not possible to
+** determine if they are identical or not.
+**
+** If any subelement of pB has Expr.iTable==(-1) then it is allowed
+** to compare equal to an equivalent element in pA with Expr.iTable==iTab.
+**
+** This routine might return non-zero for equivalent ExprLists. The
+** only consequence will be disabled optimizations. But this routine
+** must never return 0 if the two ExprList objects are different, or
+** a malfunction will result.
+**
+** Two NULL pointers are considered to be the same. But a NULL pointer
+** always differs from a non-NULL pointer.
+*/
+SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){
+ int i;
+ if( pA==0 && pB==0 ) return 0;
+ if( pA==0 || pB==0 ) return 1;
+ if( pA->nExpr!=pB->nExpr ) return 1;
+ for(i=0; i<pA->nExpr; i++){
+ int res;
+ Expr *pExprA = pA->a[i].pExpr;
+ Expr *pExprB = pB->a[i].pExpr;
+ if( pA->a[i].sortFlags!=pB->a[i].sortFlags ) return 1;
+ if( (res = sqlite3ExprCompare(0, pExprA, pExprB, iTab)) ) return res;
+ }
+ return 0;
+}
+
+/*
+** Like sqlite3ExprCompare() except COLLATE operators at the top-level
+** are ignored.
+*/
+SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA, Expr *pB, int iTab){
+ return sqlite3ExprCompare(0,
+ sqlite3ExprSkipCollateAndLikely(pA),
+ sqlite3ExprSkipCollateAndLikely(pB),
+ iTab);
+}
+
+/*
+** Return non-zero if Expr p can only be true if pNN is not NULL.
+**
+** Or if seenNot is true, return non-zero if Expr p can only be
+** non-NULL if pNN is not NULL
+*/
+static int exprImpliesNotNull(
+ Parse *pParse, /* Parsing context */
+ Expr *p, /* The expression to be checked */
+ Expr *pNN, /* The expression that is NOT NULL */
+ int iTab, /* Table being evaluated */
+ int seenNot /* Return true only if p can be any non-NULL value */
+){
+ assert( p );
+ assert( pNN );
+ if( sqlite3ExprCompare(pParse, p, pNN, iTab)==0 ){
+ return pNN->op!=TK_NULL;
+ }
+ switch( p->op ){
+ case TK_IN: {
+ if( seenNot && ExprHasProperty(p, EP_xIsSelect) ) return 0;
+ assert( ExprHasProperty(p,EP_xIsSelect)
+ || (p->x.pList!=0 && p->x.pList->nExpr>0) );
+ return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, 1);
+ }
+ case TK_BETWEEN: {
+ ExprList *pList = p->x.pList;
+ assert( pList!=0 );
+ assert( pList->nExpr==2 );
+ if( seenNot ) return 0;
+ if( exprImpliesNotNull(pParse, pList->a[0].pExpr, pNN, iTab, 1)
+ || exprImpliesNotNull(pParse, pList->a[1].pExpr, pNN, iTab, 1)
+ ){
+ return 1;
+ }
+ return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, 1);
+ }
+ case TK_EQ:
+ case TK_NE:
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_PLUS:
+ case TK_MINUS:
+ case TK_BITOR:
+ case TK_LSHIFT:
+ case TK_RSHIFT:
+ case TK_CONCAT:
+ seenNot = 1;
+ /* no break */ deliberate_fall_through
+ case TK_STAR:
+ case TK_REM:
+ case TK_BITAND:
+ case TK_SLASH: {
+ if( exprImpliesNotNull(pParse, p->pRight, pNN, iTab, seenNot) ) return 1;
+ /* no break */ deliberate_fall_through
+ }
+ case TK_SPAN:
+ case TK_COLLATE:
+ case TK_UPLUS:
+ case TK_UMINUS: {
+ return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, seenNot);
+ }
+ case TK_TRUTH: {
+ if( seenNot ) return 0;
+ if( p->op2!=TK_IS ) return 0;
+ return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, 1);
+ }
+ case TK_BITNOT:
+ case TK_NOT: {
+ return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, 1);
+ }
+ }
+ return 0;
+}
+
+/*
+** Return true if we can prove the pE2 will always be true if pE1 is
+** true. Return false if we cannot complete the proof or if pE2 might
+** be false. Examples:
+**
+** pE1: x==5 pE2: x==5 Result: true
+** pE1: x>0 pE2: x==5 Result: false
+** pE1: x=21 pE2: x=21 OR y=43 Result: true
+** pE1: x!=123 pE2: x IS NOT NULL Result: true
+** pE1: x!=?1 pE2: x IS NOT NULL Result: true
+** pE1: x IS NULL pE2: x IS NOT NULL Result: false
+** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false
+**
+** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has
+** Expr.iTable<0 then assume a table number given by iTab.
+**
+** If pParse is not NULL, then the values of bound variables in pE1 are
+** compared against literal values in pE2 and pParse->pVdbe->expmask is
+** modified to record which bound variables are referenced. If pParse
+** is NULL, then false will be returned if pE1 contains any bound variables.
+**
+** When in doubt, return false. Returning true might give a performance
+** improvement. Returning false might cause a performance reduction, but
+** it will always give the correct answer and is hence always safe.
+*/
+SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse *pParse, Expr *pE1, Expr *pE2, int iTab){
+ if( sqlite3ExprCompare(pParse, pE1, pE2, iTab)==0 ){
+ return 1;
+ }
+ if( pE2->op==TK_OR
+ && (sqlite3ExprImpliesExpr(pParse, pE1, pE2->pLeft, iTab)
+ || sqlite3ExprImpliesExpr(pParse, pE1, pE2->pRight, iTab) )
+ ){
+ return 1;
+ }
+ if( pE2->op==TK_NOTNULL
+ && exprImpliesNotNull(pParse, pE1, pE2->pLeft, iTab, 0)
+ ){
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** This is the Expr node callback for sqlite3ExprImpliesNonNullRow().
+** If the expression node requires that the table at pWalker->iCur
+** have one or more non-NULL column, then set pWalker->eCode to 1 and abort.
+**
+** This routine controls an optimization. False positives (setting
+** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives
+** (never setting pWalker->eCode) is a harmless missed optimization.
+*/
+static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
+ testcase( pExpr->op==TK_AGG_COLUMN );
+ testcase( pExpr->op==TK_AGG_FUNCTION );
+ if( ExprHasProperty(pExpr, EP_FromJoin) ) return WRC_Prune;
+ switch( pExpr->op ){
+ case TK_ISNOT:
+ case TK_ISNULL:
+ case TK_NOTNULL:
+ case TK_IS:
+ case TK_OR:
+ case TK_VECTOR:
+ case TK_CASE:
+ case TK_IN:
+ case TK_FUNCTION:
+ case TK_TRUTH:
+ testcase( pExpr->op==TK_ISNOT );
+ testcase( pExpr->op==TK_ISNULL );
+ testcase( pExpr->op==TK_NOTNULL );
+ testcase( pExpr->op==TK_IS );
+ testcase( pExpr->op==TK_OR );
+ testcase( pExpr->op==TK_VECTOR );
+ testcase( pExpr->op==TK_CASE );
+ testcase( pExpr->op==TK_IN );
+ testcase( pExpr->op==TK_FUNCTION );
+ testcase( pExpr->op==TK_TRUTH );
+ return WRC_Prune;
+ case TK_COLUMN:
+ if( pWalker->u.iCur==pExpr->iTable ){
+ pWalker->eCode = 1;
+ return WRC_Abort;
+ }
+ return WRC_Prune;
+
+ case TK_AND:
+ if( pWalker->eCode==0 ){
+ sqlite3WalkExpr(pWalker, pExpr->pLeft);
+ if( pWalker->eCode ){
+ pWalker->eCode = 0;
+ sqlite3WalkExpr(pWalker, pExpr->pRight);
+ }
+ }
+ return WRC_Prune;
+
+ case TK_BETWEEN:
+ if( sqlite3WalkExpr(pWalker, pExpr->pLeft)==WRC_Abort ){
+ assert( pWalker->eCode );
+ return WRC_Abort;
+ }
+ return WRC_Prune;
+
+ /* Virtual tables are allowed to use constraints like x=NULL. So
+ ** a term of the form x=y does not prove that y is not null if x
+ ** is the column of a virtual table */
+ case TK_EQ:
+ case TK_NE:
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE: {
+ Expr *pLeft = pExpr->pLeft;
+ Expr *pRight = pExpr->pRight;
+ testcase( pExpr->op==TK_EQ );
+ testcase( pExpr->op==TK_NE );
+ testcase( pExpr->op==TK_LT );
+ testcase( pExpr->op==TK_LE );
+ testcase( pExpr->op==TK_GT );
+ testcase( pExpr->op==TK_GE );
+ /* The y.pTab=0 assignment in wherecode.c always happens after the
+ ** impliesNotNullRow() test */
+ if( (pLeft->op==TK_COLUMN && ALWAYS(pLeft->y.pTab!=0)
+ && IsVirtual(pLeft->y.pTab))
+ || (pRight->op==TK_COLUMN && ALWAYS(pRight->y.pTab!=0)
+ && IsVirtual(pRight->y.pTab))
+ ){
+ return WRC_Prune;
+ }
+ /* no break */ deliberate_fall_through
+ }
+ default:
+ return WRC_Continue;
+ }
+}
+
+/*
+** Return true (non-zero) if expression p can only be true if at least
+** one column of table iTab is non-null. In other words, return true
+** if expression p will always be NULL or false if every column of iTab
+** is NULL.
+**
+** False negatives are acceptable. In other words, it is ok to return
+** zero even if expression p will never be true of every column of iTab
+** is NULL. A false negative is merely a missed optimization opportunity.
+**
+** False positives are not allowed, however. A false positive may result
+** in an incorrect answer.
+**
+** Terms of p that are marked with EP_FromJoin (and hence that come from
+** the ON or USING clauses of LEFT JOINS) are excluded from the analysis.
+**
+** This routine is used to check if a LEFT JOIN can be converted into
+** an ordinary JOIN. The p argument is the WHERE clause. If the WHERE
+** clause requires that some column of the right table of the LEFT JOIN
+** be non-NULL, then the LEFT JOIN can be safely converted into an
+** ordinary join.
+*/
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
+ Walker w;
+ p = sqlite3ExprSkipCollateAndLikely(p);
+ if( p==0 ) return 0;
+ if( p->op==TK_NOTNULL ){
+ p = p->pLeft;
+ }else{
+ while( p->op==TK_AND ){
+ if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab) ) return 1;
+ p = p->pRight;
+ }
+ }
+ w.xExprCallback = impliesNotNullRow;
+ w.xSelectCallback = 0;
+ w.xSelectCallback2 = 0;
+ w.eCode = 0;
+ w.u.iCur = iTab;
+ sqlite3WalkExpr(&w, p);
+ return w.eCode;
+}
+
+/*
+** An instance of the following structure is used by the tree walker
+** to determine if an expression can be evaluated by reference to the
+** index only, without having to do a search for the corresponding
+** table entry. The IdxCover.pIdx field is the index. IdxCover.iCur
+** is the cursor for the table.
+*/
+struct IdxCover {
+ Index *pIdx; /* The index to be tested for coverage */
+ int iCur; /* Cursor number for the table corresponding to the index */
+};
+
+/*
+** Check to see if there are references to columns in table
+** pWalker->u.pIdxCover->iCur can be satisfied using the index
+** pWalker->u.pIdxCover->pIdx.
+*/
+static int exprIdxCover(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_COLUMN
+ && pExpr->iTable==pWalker->u.pIdxCover->iCur
+ && sqlite3TableColumnToIndex(pWalker->u.pIdxCover->pIdx, pExpr->iColumn)<0
+ ){
+ pWalker->eCode = 1;
+ return WRC_Abort;
+ }
+ return WRC_Continue;
+}
+
+/*
+** Determine if an index pIdx on table with cursor iCur contains will
+** the expression pExpr. Return true if the index does cover the
+** expression and false if the pExpr expression references table columns
+** that are not found in the index pIdx.
+**
+** An index covering an expression means that the expression can be
+** evaluated using only the index and without having to lookup the
+** corresponding table entry.
+*/
+SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(
+ Expr *pExpr, /* The index to be tested */
+ int iCur, /* The cursor number for the corresponding table */
+ Index *pIdx /* The index that might be used for coverage */
+){
+ Walker w;
+ struct IdxCover xcov;
+ memset(&w, 0, sizeof(w));
+ xcov.iCur = iCur;
+ xcov.pIdx = pIdx;
+ w.xExprCallback = exprIdxCover;
+ w.u.pIdxCover = &xcov;
+ sqlite3WalkExpr(&w, pExpr);
+ return !w.eCode;
+}
+
+
+/*
+** An instance of the following structure is used by the tree walker
+** to count references to table columns in the arguments of an
+** aggregate function, in order to implement the
+** sqlite3FunctionThisSrc() routine.
+*/
+struct SrcCount {
+ SrcList *pSrc; /* One particular FROM clause in a nested query */
+ int iSrcInner; /* Smallest cursor number in this context */
+ int nThis; /* Number of references to columns in pSrcList */
+ int nOther; /* Number of references to columns in other FROM clauses */
+};
+
+/*
+** xSelect callback for sqlite3FunctionUsesThisSrc(). If this is the first
+** SELECT with a FROM clause encountered during this iteration, set
+** SrcCount.iSrcInner to the cursor number of the leftmost object in
+** the FROM cause.
+*/
+static int selectSrcCount(Walker *pWalker, Select *pSel){
+ struct SrcCount *p = pWalker->u.pSrcCount;
+ if( p->iSrcInner==0x7FFFFFFF && ALWAYS(pSel->pSrc) && pSel->pSrc->nSrc ){
+ pWalker->u.pSrcCount->iSrcInner = pSel->pSrc->a[0].iCursor;
+ }
+ return WRC_Continue;
+}
+
+/*
+** Count the number of references to columns.
+*/
+static int exprSrcCount(Walker *pWalker, Expr *pExpr){
+ /* There was once a NEVER() on the second term on the grounds that
+ ** sqlite3FunctionUsesThisSrc() was always called before
+ ** sqlite3ExprAnalyzeAggregates() and so the TK_COLUMNs have not yet
+ ** been converted into TK_AGG_COLUMN. But this is no longer true due
+ ** to window functions - sqlite3WindowRewrite() may now indirectly call
+ ** FunctionUsesThisSrc() when creating a new sub-select. */
+ if( pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN ){
+ int i;
+ struct SrcCount *p = pWalker->u.pSrcCount;
+ SrcList *pSrc = p->pSrc;
+ int nSrc = pSrc ? pSrc->nSrc : 0;
+ for(i=0; i<nSrc; i++){
+ if( pExpr->iTable==pSrc->a[i].iCursor ) break;
+ }
+ if( i<nSrc ){
+ p->nThis++;
+ }else if( pExpr->iTable<p->iSrcInner ){
+ /* In a well-formed parse tree (no name resolution errors),
+ ** TK_COLUMN nodes with smaller Expr.iTable values are in an
+ ** outer context. Those are the only ones to count as "other" */
+ p->nOther++;
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** Determine if any of the arguments to the pExpr Function reference
+** pSrcList. Return true if they do. Also return true if the function
+** has no arguments or has only constant arguments. Return false if pExpr
+** references columns but not columns of tables found in pSrcList.
+*/
+SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr *pExpr, SrcList *pSrcList){
+ Walker w;
+ struct SrcCount cnt;
+ assert( pExpr->op==TK_AGG_FUNCTION );
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = exprSrcCount;
+ w.xSelectCallback = selectSrcCount;
+ w.u.pSrcCount = &cnt;
+ cnt.pSrc = pSrcList;
+ cnt.iSrcInner = (pSrcList&&pSrcList->nSrc)?pSrcList->a[0].iCursor:0x7FFFFFFF;
+ cnt.nThis = 0;
+ cnt.nOther = 0;
+ sqlite3WalkExprList(&w, pExpr->x.pList);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter);
+ }
+#endif
+ return cnt.nThis>0 || cnt.nOther==0;
+}
+
+/*
+** This is a Walker expression node callback.
+**
+** For Expr nodes that contain pAggInfo pointers, make sure the AggInfo
+** object that is referenced does not refer directly to the Expr. If
+** it does, make a copy. This is done because the pExpr argument is
+** subject to change.
+**
+** The copy is stored on pParse->pConstExpr with a register number of 0.
+** This will cause the expression to be deleted automatically when the
+** Parse object is destroyed, but the zero register number means that it
+** will not generate any code in the preamble.
+*/
+static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){
+ if( ALWAYS(!ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced))
+ && pExpr->pAggInfo!=0
+ ){
+ AggInfo *pAggInfo = pExpr->pAggInfo;
+ int iAgg = pExpr->iAgg;
+ Parse *pParse = pWalker->pParse;
+ sqlite3 *db = pParse->db;
+ assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_AGG_FUNCTION );
+ if( pExpr->op==TK_AGG_COLUMN ){
+ assert( iAgg>=0 && iAgg<pAggInfo->nColumn );
+ if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){
+ pExpr = sqlite3ExprDup(db, pExpr, 0);
+ if( pExpr ){
+ pAggInfo->aCol[iAgg].pCExpr = pExpr;
+ pParse->pConstExpr =
+ sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr);
+ }
+ }
+ }else{
+ assert( iAgg>=0 && iAgg<pAggInfo->nFunc );
+ if( pAggInfo->aFunc[iAgg].pFExpr==pExpr ){
+ pExpr = sqlite3ExprDup(db, pExpr, 0);
+ if( pExpr ){
+ pAggInfo->aFunc[iAgg].pFExpr = pExpr;
+ pParse->pConstExpr =
+ sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr);
+ }
+ }
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** Initialize a Walker object so that will persist AggInfo entries referenced
+** by the tree that is walked.
+*/
+SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker *pWalker, Parse *pParse){
+ memset(pWalker, 0, sizeof(*pWalker));
+ pWalker->pParse = pParse;
+ pWalker->xExprCallback = agginfoPersistExprCb;
+ pWalker->xSelectCallback = sqlite3SelectWalkNoop;
+}
+
+/*
+** Add a new element to the pAggInfo->aCol[] array. Return the index of
+** the new element. Return a negative number if malloc fails.
+*/
+static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){
+ int i;
+ pInfo->aCol = sqlite3ArrayAllocate(
+ db,
+ pInfo->aCol,
+ sizeof(pInfo->aCol[0]),
+ &pInfo->nColumn,
+ &i
+ );
+ return i;
+}
+
+/*
+** Add a new element to the pAggInfo->aFunc[] array. Return the index of
+** the new element. Return a negative number if malloc fails.
+*/
+static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){
+ int i;
+ pInfo->aFunc = sqlite3ArrayAllocate(
+ db,
+ pInfo->aFunc,
+ sizeof(pInfo->aFunc[0]),
+ &pInfo->nFunc,
+ &i
+ );
+ return i;
+}
+
+/*
+** This is the xExprCallback for a tree walker. It is used to
+** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates
+** for additional information.
+*/
+static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
+ int i;
+ NameContext *pNC = pWalker->u.pNC;
+ Parse *pParse = pNC->pParse;
+ SrcList *pSrcList = pNC->pSrcList;
+ AggInfo *pAggInfo = pNC->uNC.pAggInfo;
+
+ assert( pNC->ncFlags & NC_UAggInfo );
+ switch( pExpr->op ){
+ case TK_AGG_COLUMN:
+ case TK_COLUMN: {
+ testcase( pExpr->op==TK_AGG_COLUMN );
+ testcase( pExpr->op==TK_COLUMN );
+ /* Check to see if the column is in one of the tables in the FROM
+ ** clause of the aggregate query */
+ if( ALWAYS(pSrcList!=0) ){
+ struct SrcList_item *pItem = pSrcList->a;
+ for(i=0; i<pSrcList->nSrc; i++, pItem++){
+ struct AggInfo_col *pCol;
+ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
+ if( pExpr->iTable==pItem->iCursor ){
+ /* If we reach this point, it means that pExpr refers to a table
+ ** that is in the FROM clause of the aggregate query.
+ **
+ ** Make an entry for the column in pAggInfo->aCol[] if there
+ ** is not an entry there already.
+ */
+ int k;
+ pCol = pAggInfo->aCol;
+ for(k=0; k<pAggInfo->nColumn; k++, pCol++){
+ if( pCol->iTable==pExpr->iTable &&
+ pCol->iColumn==pExpr->iColumn ){
+ break;
+ }
+ }
+ if( (k>=pAggInfo->nColumn)
+ && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0
+ ){
+ pCol = &pAggInfo->aCol[k];
+ pCol->pTab = pExpr->y.pTab;
+ pCol->iTable = pExpr->iTable;
+ pCol->iColumn = pExpr->iColumn;
+ pCol->iMem = ++pParse->nMem;
+ pCol->iSorterColumn = -1;
+ pCol->pCExpr = pExpr;
+ if( pAggInfo->pGroupBy ){
+ int j, n;
+ ExprList *pGB = pAggInfo->pGroupBy;
+ struct ExprList_item *pTerm = pGB->a;
+ n = pGB->nExpr;
+ for(j=0; j<n; j++, pTerm++){
+ Expr *pE = pTerm->pExpr;
+ if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable &&
+ pE->iColumn==pExpr->iColumn ){
+ pCol->iSorterColumn = j;
+ break;
+ }
+ }
+ }
+ if( pCol->iSorterColumn<0 ){
+ pCol->iSorterColumn = pAggInfo->nSortingColumn++;
+ }
+ }
+ /* There is now an entry for pExpr in pAggInfo->aCol[] (either
+ ** because it was there before or because we just created it).
+ ** Convert the pExpr to be a TK_AGG_COLUMN referring to that
+ ** pAggInfo->aCol[] entry.
+ */
+ ExprSetVVAProperty(pExpr, EP_NoReduce);
+ pExpr->pAggInfo = pAggInfo;
+ pExpr->op = TK_AGG_COLUMN;
+ pExpr->iAgg = (i16)k;
+ break;
+ } /* endif pExpr->iTable==pItem->iCursor */
+ } /* end loop over pSrcList */
+ }
+ return WRC_Prune;
+ }
+ case TK_AGG_FUNCTION: {
+ if( (pNC->ncFlags & NC_InAggFunc)==0
+ && pWalker->walkerDepth==pExpr->op2
+ ){
+ /* Check to see if pExpr is a duplicate of another aggregate
+ ** function that is already in the pAggInfo structure
+ */
+ struct AggInfo_func *pItem = pAggInfo->aFunc;
+ for(i=0; i<pAggInfo->nFunc; i++, pItem++){
+ if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){
+ break;
+ }
+ }
+ if( i>=pAggInfo->nFunc ){
+ /* pExpr is original. Make a new entry in pAggInfo->aFunc[]
+ */
+ u8 enc = ENC(pParse->db);
+ i = addAggInfoFunc(pParse->db, pAggInfo);
+ if( i>=0 ){
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ pItem = &pAggInfo->aFunc[i];
+ pItem->pFExpr = pExpr;
+ pItem->iMem = ++pParse->nMem;
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ pItem->pFunc = sqlite3FindFunction(pParse->db,
+ pExpr->u.zToken,
+ pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0);
+ if( pExpr->flags & EP_Distinct ){
+ pItem->iDistinct = pParse->nTab++;
+ }else{
+ pItem->iDistinct = -1;
+ }
+ }
+ }
+ /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry
+ */
+ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
+ ExprSetVVAProperty(pExpr, EP_NoReduce);
+ pExpr->iAgg = (i16)i;
+ pExpr->pAggInfo = pAggInfo;
+ return WRC_Prune;
+ }else{
+ return WRC_Continue;
+ }
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** Analyze the pExpr expression looking for aggregate functions and
+** for variables that need to be added to AggInfo object that pNC->pAggInfo
+** points to. Additional entries are made on the AggInfo object as
+** necessary.
+**
+** This routine should only be called after the expression has been
+** analyzed by sqlite3ResolveExprNames().
+*/
+SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){
+ Walker w;
+ w.xExprCallback = analyzeAggregate;
+ w.xSelectCallback = sqlite3WalkerDepthIncrease;
+ w.xSelectCallback2 = sqlite3WalkerDepthDecrease;
+ w.walkerDepth = 0;
+ w.u.pNC = pNC;
+ w.pParse = 0;
+ assert( pNC->pSrcList!=0 );
+ sqlite3WalkExpr(&w, pExpr);
+}
+
+/*
+** Call sqlite3ExprAnalyzeAggregates() for every expression in an
+** expression list. Return the number of errors.
+**
+** If an error is found, the analysis is cut short.
+*/
+SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){
+ struct ExprList_item *pItem;
+ int i;
+ if( pList ){
+ for(pItem=pList->a, i=0; i<pList->nExpr; i++, pItem++){
+ sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr);
+ }
+ }
+}
+
+/*
+** Allocate a single new register for use to hold some intermediate result.
+*/
+SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){
+ if( pParse->nTempReg==0 ){
+ return ++pParse->nMem;
+ }
+ return pParse->aTempReg[--pParse->nTempReg];
+}
+
+/*
+** Deallocate a register, making available for reuse for some other
+** purpose.
+*/
+SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){
+ if( iReg ){
+ sqlite3VdbeReleaseRegisters(pParse, iReg, 1, 0, 0);
+ if( pParse->nTempReg<ArraySize(pParse->aTempReg) ){
+ pParse->aTempReg[pParse->nTempReg++] = iReg;
+ }
+ }
+}
+
+/*
+** Allocate or deallocate a block of nReg consecutive registers.
+*/
+SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){
+ int i, n;
+ if( nReg==1 ) return sqlite3GetTempReg(pParse);
+ i = pParse->iRangeReg;
+ n = pParse->nRangeReg;
+ if( nReg<=n ){
+ pParse->iRangeReg += nReg;
+ pParse->nRangeReg -= nReg;
+ }else{
+ i = pParse->nMem+1;
+ pParse->nMem += nReg;
+ }
+ return i;
+}
+SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){
+ if( nReg==1 ){
+ sqlite3ReleaseTempReg(pParse, iReg);
+ return;
+ }
+ sqlite3VdbeReleaseRegisters(pParse, iReg, nReg, 0, 0);
+ if( nReg>pParse->nRangeReg ){
+ pParse->nRangeReg = nReg;
+ pParse->iRangeReg = iReg;
+ }
+}
+
+/*
+** Mark all temporary registers as being unavailable for reuse.
+**
+** Always invoke this procedure after coding a subroutine or co-routine
+** that might be invoked from other parts of the code, to ensure that
+** the sub/co-routine does not use registers in common with the code that
+** invokes the sub/co-routine.
+*/
+SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){
+ pParse->nTempReg = 0;
+ pParse->nRangeReg = 0;
+}
+
+/*
+** Validate that no temporary register falls within the range of
+** iFirst..iLast, inclusive. This routine is only call from within assert()
+** statements.
+*/
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){
+ int i;
+ if( pParse->nRangeReg>0
+ && pParse->iRangeReg+pParse->nRangeReg > iFirst
+ && pParse->iRangeReg <= iLast
+ ){
+ return 0;
+ }
+ for(i=0; i<pParse->nTempReg; i++){
+ if( pParse->aTempReg[i]>=iFirst && pParse->aTempReg[i]<=iLast ){
+ return 0;
+ }
+ }
+ return 1;
+}
+#endif /* SQLITE_DEBUG */
+
+/************** End of expr.c ************************************************/
+/************** Begin file alter.c *******************************************/
+/*
+** 2005 February 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that used to generate VDBE code
+** that implements the ALTER TABLE command.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** The code in this file only exists if we are not omitting the
+** ALTER TABLE logic from the build.
+*/
+#ifndef SQLITE_OMIT_ALTERTABLE
+
+/*
+** Parameter zName is the name of a table that is about to be altered
+** (either with ALTER TABLE ... RENAME TO or ALTER TABLE ... ADD COLUMN).
+** If the table is a system table, this function leaves an error message
+** in pParse->zErr (system tables may not be altered) and returns non-zero.
+**
+** Or, if zName is not a system table, zero is returned.
+*/
+static int isAlterableTable(Parse *pParse, Table *pTab){
+ if( 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7)
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ || ( (pTab->tabFlags & TF_Shadow)!=0
+ && sqlite3ReadOnlyShadowTables(pParse->db)
+ )
+#endif
+ ){
+ sqlite3ErrorMsg(pParse, "table %s may not be altered", pTab->zName);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** Generate code to verify that the schemas of database zDb and, if
+** bTemp is not true, database "temp", can still be parsed. This is
+** called at the end of the generation of an ALTER TABLE ... RENAME ...
+** statement to ensure that the operation has not rendered any schema
+** objects unusable.
+*/
+static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){
+ sqlite3NestedParse(pParse,
+ "SELECT 1 "
+ "FROM \"%w\"." DFLT_SCHEMA_TABLE " "
+ "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'"
+ " AND sql NOT LIKE 'create virtual%%'"
+ " AND sqlite_rename_test(%Q, sql, type, name, %d)=NULL ",
+ zDb,
+ zDb, bTemp
+ );
+
+ if( bTemp==0 ){
+ sqlite3NestedParse(pParse,
+ "SELECT 1 "
+ "FROM temp." DFLT_SCHEMA_TABLE " "
+ "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'"
+ " AND sql NOT LIKE 'create virtual%%'"
+ " AND sqlite_rename_test(%Q, sql, type, name, 1)=NULL ",
+ zDb
+ );
+ }
+}
+
+/*
+** Generate code to reload the schema for database iDb. And, if iDb!=1, for
+** the temp database as well.
+*/
+static void renameReloadSchema(Parse *pParse, int iDb){
+ Vdbe *v = pParse->pVdbe;
+ if( v ){
+ sqlite3ChangeCookie(pParse, iDb);
+ sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0);
+ if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0);
+ }
+}
+
+/*
+** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy"
+** command.
+*/
+SQLITE_PRIVATE void sqlite3AlterRenameTable(
+ Parse *pParse, /* Parser context. */
+ SrcList *pSrc, /* The table to rename. */
+ Token *pName /* The new table name. */
+){
+ int iDb; /* Database that contains the table */
+ char *zDb; /* Name of database iDb */
+ Table *pTab; /* Table being renamed */
+ char *zName = 0; /* NULL-terminated version of pName */
+ sqlite3 *db = pParse->db; /* Database connection */
+ int nTabName; /* Number of UTF-8 characters in zTabName */
+ const char *zTabName; /* Original name of the table */
+ Vdbe *v;
+ VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */
+ u32 savedDbFlags; /* Saved value of db->mDbFlags */
+
+ savedDbFlags = db->mDbFlags;
+ if( NEVER(db->mallocFailed) ) goto exit_rename_table;
+ assert( pSrc->nSrc==1 );
+ assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
+
+ pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
+ if( !pTab ) goto exit_rename_table;
+ iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ zDb = db->aDb[iDb].zDbSName;
+ db->mDbFlags |= DBFLAG_PreferBuiltin;
+
+ /* Get a NULL terminated version of the new table name. */
+ zName = sqlite3NameFromToken(db, pName);
+ if( !zName ) goto exit_rename_table;
+
+ /* Check that a table or index named 'zName' does not already exist
+ ** in database iDb. If so, this is an error.
+ */
+ if( sqlite3FindTable(db, zName, zDb)
+ || sqlite3FindIndex(db, zName, zDb)
+ || sqlite3IsShadowTableOf(db, pTab, zName)
+ ){
+ sqlite3ErrorMsg(pParse,
+ "there is already another table or index with this name: %s", zName);
+ goto exit_rename_table;
+ }
+
+ /* Make sure it is not a system table being altered, or a reserved name
+ ** that the table is being renamed to.
+ */
+ if( SQLITE_OK!=isAlterableTable(pParse, pTab) ){
+ goto exit_rename_table;
+ }
+ if( SQLITE_OK!=sqlite3CheckObjectName(pParse,zName,"table",zName) ){
+ goto exit_rename_table;
+ }
+
+#ifndef SQLITE_OMIT_VIEW
+ if( pTab->pSelect ){
+ sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName);
+ goto exit_rename_table;
+ }
+#endif
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ /* Invoke the authorization callback. */
+ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){
+ goto exit_rename_table;
+ }
+#endif
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( sqlite3ViewGetColumnNames(pParse, pTab) ){
+ goto exit_rename_table;
+ }
+ if( IsVirtual(pTab) ){
+ pVTab = sqlite3GetVTable(db, pTab);
+ if( pVTab->pVtab->pModule->xRename==0 ){
+ pVTab = 0;
+ }
+ }
+#endif
+
+ /* Begin a transaction for database iDb. Then modify the schema cookie
+ ** (since the ALTER TABLE modifies the schema). Call sqlite3MayAbort(),
+ ** as the scalar functions (e.g. sqlite_rename_table()) invoked by the
+ ** nested SQL may raise an exception. */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ){
+ goto exit_rename_table;
+ }
+ sqlite3MayAbort(pParse);
+
+ /* figure out how many UTF-8 characters are in zName */
+ zTabName = pTab->zName;
+ nTabName = sqlite3Utf8CharLen(zTabName, -1);
+
+ /* Rewrite all CREATE TABLE, INDEX, TRIGGER or VIEW statements in
+ ** the schema to use the new table name. */
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET "
+ "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) "
+ "WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)"
+ "AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'"
+ , zDb, zDb, zTabName, zName, (iDb==1), zTabName
+ );
+
+ /* Update the tbl_name and name columns of the sqlite_schema table
+ ** as required. */
+ sqlite3NestedParse(pParse,
+ "UPDATE %Q." DFLT_SCHEMA_TABLE " SET "
+ "tbl_name = %Q, "
+ "name = CASE "
+ "WHEN type='table' THEN %Q "
+ "WHEN name LIKE 'sqliteX_autoindex%%' ESCAPE 'X' "
+ " AND type='index' THEN "
+ "'sqlite_autoindex_' || %Q || substr(name,%d+18) "
+ "ELSE name END "
+ "WHERE tbl_name=%Q COLLATE nocase AND "
+ "(type='table' OR type='index' OR type='trigger');",
+ zDb,
+ zName, zName, zName,
+ nTabName, zTabName
+ );
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ /* If the sqlite_sequence table exists in this database, then update
+ ** it with the new table name.
+ */
+ if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q",
+ zDb, zName, pTab->zName);
+ }
+#endif
+
+ /* If the table being renamed is not itself part of the temp database,
+ ** edit view and trigger definitions within the temp database
+ ** as required. */
+ if( iDb!=1 ){
+ sqlite3NestedParse(pParse,
+ "UPDATE sqlite_temp_schema SET "
+ "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), "
+ "tbl_name = "
+ "CASE WHEN tbl_name=%Q COLLATE nocase AND "
+ " sqlite_rename_test(%Q, sql, type, name, 1) "
+ "THEN %Q ELSE tbl_name END "
+ "WHERE type IN ('view', 'trigger')"
+ , zDb, zTabName, zName, zTabName, zDb, zName);
+ }
+
+ /* If this is a virtual table, invoke the xRename() function if
+ ** one is defined. The xRename() callback will modify the names
+ ** of any resources used by the v-table implementation (including other
+ ** SQLite tables) that are identified by the name of the virtual table.
+ */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( pVTab ){
+ int i = ++pParse->nMem;
+ sqlite3VdbeLoadString(v, i, zName);
+ sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pVTab, P4_VTAB);
+ }
+#endif
+
+ renameReloadSchema(pParse, iDb);
+ renameTestSchema(pParse, zDb, iDb==1);
+
+exit_rename_table:
+ sqlite3SrcListDelete(db, pSrc);
+ sqlite3DbFree(db, zName);
+ db->mDbFlags = savedDbFlags;
+}
+
+/*
+** Write code that will raise an error if the table described by
+** zDb and zTab is not empty.
+*/
+static void sqlite3ErrorIfNotEmpty(
+ Parse *pParse, /* Parsing context */
+ const char *zDb, /* Schema holding the table */
+ const char *zTab, /* Table to check for empty */
+ const char *zErr /* Error message text */
+){
+ sqlite3NestedParse(pParse,
+ "SELECT raise(ABORT,%Q) FROM \"%w\".\"%w\"",
+ zErr, zDb, zTab
+ );
+}
+
+/*
+** This function is called after an "ALTER TABLE ... ADD" statement
+** has been parsed. Argument pColDef contains the text of the new
+** column definition.
+**
+** The Table structure pParse->pNewTable was extended to include
+** the new column during parsing.
+*/
+SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
+ Table *pNew; /* Copy of pParse->pNewTable */
+ Table *pTab; /* Table being altered */
+ int iDb; /* Database number */
+ const char *zDb; /* Database name */
+ const char *zTab; /* Table name */
+ char *zCol; /* Null-terminated column definition */
+ Column *pCol; /* The new column */
+ Expr *pDflt; /* Default value for the new column */
+ sqlite3 *db; /* The database connection; */
+ Vdbe *v; /* The prepared statement under construction */
+ int r1; /* Temporary registers */
+
+ db = pParse->db;
+ if( pParse->nErr || db->mallocFailed ) return;
+ pNew = pParse->pNewTable;
+ assert( pNew );
+
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ iDb = sqlite3SchemaToIndex(db, pNew->pSchema);
+ zDb = db->aDb[iDb].zDbSName;
+ zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */
+ pCol = &pNew->aCol[pNew->nCol-1];
+ pDflt = pCol->pDflt;
+ pTab = sqlite3FindTable(db, zTab, zDb);
+ assert( pTab );
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ /* Invoke the authorization callback. */
+ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){
+ return;
+ }
+#endif
+
+
+ /* Check that the new column is not specified as PRIMARY KEY or UNIQUE.
+ ** If there is a NOT NULL constraint, then the default value for the
+ ** column must not be NULL.
+ */
+ if( pCol->colFlags & COLFLAG_PRIMKEY ){
+ sqlite3ErrorMsg(pParse, "Cannot add a PRIMARY KEY column");
+ return;
+ }
+ if( pNew->pIndex ){
+ sqlite3ErrorMsg(pParse,
+ "Cannot add a UNIQUE column");
+ return;
+ }
+ if( (pCol->colFlags & COLFLAG_GENERATED)==0 ){
+ /* If the default value for the new column was specified with a
+ ** literal NULL, then set pDflt to 0. This simplifies checking
+ ** for an SQL NULL default below.
+ */
+ assert( pDflt==0 || pDflt->op==TK_SPAN );
+ if( pDflt && pDflt->pLeft->op==TK_NULL ){
+ pDflt = 0;
+ }
+ if( (db->flags&SQLITE_ForeignKeys) && pNew->pFKey && pDflt ){
+ sqlite3ErrorIfNotEmpty(pParse, zDb, zTab,
+ "Cannot add a REFERENCES column with non-NULL default value");
+ }
+ if( pCol->notNull && !pDflt ){
+ sqlite3ErrorIfNotEmpty(pParse, zDb, zTab,
+ "Cannot add a NOT NULL column with default value NULL");
+ }
+
+
+ /* Ensure the default expression is something that sqlite3ValueFromExpr()
+ ** can handle (i.e. not CURRENT_TIME etc.)
+ */
+ if( pDflt ){
+ sqlite3_value *pVal = 0;
+ int rc;
+ rc = sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_BLOB, &pVal);
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ if( rc!=SQLITE_OK ){
+ assert( db->mallocFailed == 1 );
+ return;
+ }
+ if( !pVal ){
+ sqlite3ErrorIfNotEmpty(pParse, zDb, zTab,
+ "Cannot add a column with non-constant default");
+ }
+ sqlite3ValueFree(pVal);
+ }
+ }else if( pCol->colFlags & COLFLAG_STORED ){
+ sqlite3ErrorIfNotEmpty(pParse, zDb, zTab, "cannot add a STORED column");
+ }
+
+
+ /* Modify the CREATE TABLE statement. */
+ zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n);
+ if( zCol ){
+ char *zEnd = &zCol[pColDef->n-1];
+ u32 savedDbFlags = db->mDbFlags;
+ while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){
+ *zEnd-- = '\0';
+ }
+ db->mDbFlags |= DBFLAG_PreferBuiltin;
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET "
+ "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) "
+ "WHERE type = 'table' AND name = %Q",
+ zDb, pNew->addColOffset, zCol, pNew->addColOffset+1,
+ zTab
+ );
+ sqlite3DbFree(db, zCol);
+ db->mDbFlags = savedDbFlags;
+ }
+
+ /* Make sure the schema version is at least 3. But do not upgrade
+ ** from less than 3 to 4, as that will corrupt any preexisting DESC
+ ** index.
+ */
+ v = sqlite3GetVdbe(pParse);
+ if( v ){
+ r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT);
+ sqlite3VdbeUsesBtree(v, iDb);
+ sqlite3VdbeAddOp2(v, OP_AddImm, r1, -2);
+ sqlite3VdbeAddOp2(v, OP_IfPos, r1, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3);
+ sqlite3ReleaseTempReg(pParse, r1);
+ }
+
+ /* Reload the table definition */
+ renameReloadSchema(pParse, iDb);
+}
+
+/*
+** This function is called by the parser after the table-name in
+** an "ALTER TABLE <table-name> ADD" statement is parsed. Argument
+** pSrc is the full-name of the table being altered.
+**
+** This routine makes a (partial) copy of the Table structure
+** for the table being altered and sets Parse.pNewTable to point
+** to it. Routines called by the parser as the column definition
+** is parsed (i.e. sqlite3AddColumn()) add the new Column data to
+** the copy. The copy of the Table structure is deleted by tokenize.c
+** after parsing is finished.
+**
+** Routine sqlite3AlterFinishAddColumn() will be called to complete
+** coding the "ALTER TABLE ... ADD" statement.
+*/
+SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
+ Table *pNew;
+ Table *pTab;
+ int iDb;
+ int i;
+ int nAlloc;
+ sqlite3 *db = pParse->db;
+
+ /* Look up the table being altered. */
+ assert( pParse->pNewTable==0 );
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ if( db->mallocFailed ) goto exit_begin_add_column;
+ pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
+ if( !pTab ) goto exit_begin_add_column;
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ sqlite3ErrorMsg(pParse, "virtual tables may not be altered");
+ goto exit_begin_add_column;
+ }
+#endif
+
+ /* Make sure this is not an attempt to ALTER a view. */
+ if( pTab->pSelect ){
+ sqlite3ErrorMsg(pParse, "Cannot add a column to a view");
+ goto exit_begin_add_column;
+ }
+ if( SQLITE_OK!=isAlterableTable(pParse, pTab) ){
+ goto exit_begin_add_column;
+ }
+
+ sqlite3MayAbort(pParse);
+ assert( pTab->addColOffset>0 );
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+
+ /* Put a copy of the Table struct in Parse.pNewTable for the
+ ** sqlite3AddColumn() function and friends to modify. But modify
+ ** the name by adding an "sqlite_altertab_" prefix. By adding this
+ ** prefix, we insure that the name will not collide with an existing
+ ** table because user table are not allowed to have the "sqlite_"
+ ** prefix on their name.
+ */
+ pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table));
+ if( !pNew ) goto exit_begin_add_column;
+ pParse->pNewTable = pNew;
+ pNew->nTabRef = 1;
+ pNew->nCol = pTab->nCol;
+ assert( pNew->nCol>0 );
+ nAlloc = (((pNew->nCol-1)/8)*8)+8;
+ assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 );
+ pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc);
+ pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName);
+ if( !pNew->aCol || !pNew->zName ){
+ assert( db->mallocFailed );
+ goto exit_begin_add_column;
+ }
+ memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol);
+ for(i=0; i<pNew->nCol; i++){
+ Column *pCol = &pNew->aCol[i];
+ pCol->zName = sqlite3DbStrDup(db, pCol->zName);
+ pCol->hName = sqlite3StrIHash(pCol->zName);
+ pCol->zColl = 0;
+ pCol->pDflt = 0;
+ }
+ pNew->pSchema = db->aDb[iDb].pSchema;
+ pNew->addColOffset = pTab->addColOffset;
+ pNew->nTabRef = 1;
+
+exit_begin_add_column:
+ sqlite3SrcListDelete(db, pSrc);
+ return;
+}
+
+/*
+** Parameter pTab is the subject of an ALTER TABLE ... RENAME COLUMN
+** command. This function checks if the table is a view or virtual
+** table (columns of views or virtual tables may not be renamed). If so,
+** it loads an error message into pParse and returns non-zero.
+**
+** Or, if pTab is not a view or virtual table, zero is returned.
+*/
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
+static int isRealTable(Parse *pParse, Table *pTab){
+ const char *zType = 0;
+#ifndef SQLITE_OMIT_VIEW
+ if( pTab->pSelect ){
+ zType = "view";
+ }
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ zType = "virtual table";
+ }
+#endif
+ if( zType ){
+ sqlite3ErrorMsg(
+ pParse, "cannot rename columns of %s \"%s\"", zType, pTab->zName
+ );
+ return 1;
+ }
+ return 0;
+}
+#else /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */
+# define isRealTable(x,y) (0)
+#endif
+
+/*
+** Handles the following parser reduction:
+**
+** cmd ::= ALTER TABLE pSrc RENAME COLUMN pOld TO pNew
+*/
+SQLITE_PRIVATE void sqlite3AlterRenameColumn(
+ Parse *pParse, /* Parsing context */
+ SrcList *pSrc, /* Table being altered. pSrc->nSrc==1 */
+ Token *pOld, /* Name of column being changed */
+ Token *pNew /* New column name */
+){
+ sqlite3 *db = pParse->db; /* Database connection */
+ Table *pTab; /* Table being updated */
+ int iCol; /* Index of column being renamed */
+ char *zOld = 0; /* Old column name */
+ char *zNew = 0; /* New column name */
+ const char *zDb; /* Name of schema containing the table */
+ int iSchema; /* Index of the schema */
+ int bQuote; /* True to quote the new name */
+
+ /* Locate the table to be altered */
+ pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
+ if( !pTab ) goto exit_rename_column;
+
+ /* Cannot alter a system table */
+ if( SQLITE_OK!=isAlterableTable(pParse, pTab) ) goto exit_rename_column;
+ if( SQLITE_OK!=isRealTable(pParse, pTab) ) goto exit_rename_column;
+
+ /* Which schema holds the table to be altered */
+ iSchema = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iSchema>=0 );
+ zDb = db->aDb[iSchema].zDbSName;
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ /* Invoke the authorization callback. */
+ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){
+ goto exit_rename_column;
+ }
+#endif
+
+ /* Make sure the old name really is a column name in the table to be
+ ** altered. Set iCol to be the index of the column being renamed */
+ zOld = sqlite3NameFromToken(db, pOld);
+ if( !zOld ) goto exit_rename_column;
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( 0==sqlite3StrICmp(pTab->aCol[iCol].zName, zOld) ) break;
+ }
+ if( iCol==pTab->nCol ){
+ sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zOld);
+ goto exit_rename_column;
+ }
+
+ /* Do the rename operation using a recursive UPDATE statement that
+ ** uses the sqlite_rename_column() SQL function to compute the new
+ ** CREATE statement text for the sqlite_schema table.
+ */
+ sqlite3MayAbort(pParse);
+ zNew = sqlite3NameFromToken(db, pNew);
+ if( !zNew ) goto exit_rename_column;
+ assert( pNew->n>0 );
+ bQuote = sqlite3Isquote(pNew->z[0]);
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET "
+ "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) "
+ "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' "
+ " AND (type != 'index' OR tbl_name = %Q)"
+ " AND sql NOT LIKE 'create virtual%%'",
+ zDb,
+ zDb, pTab->zName, iCol, zNew, bQuote, iSchema==1,
+ pTab->zName
+ );
+
+ sqlite3NestedParse(pParse,
+ "UPDATE temp." DFLT_SCHEMA_TABLE " SET "
+ "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) "
+ "WHERE type IN ('trigger', 'view')",
+ zDb, pTab->zName, iCol, zNew, bQuote
+ );
+
+ /* Drop and reload the database schema. */
+ renameReloadSchema(pParse, iSchema);
+ renameTestSchema(pParse, zDb, iSchema==1);
+
+ exit_rename_column:
+ sqlite3SrcListDelete(db, pSrc);
+ sqlite3DbFree(db, zOld);
+ sqlite3DbFree(db, zNew);
+ return;
+}
+
+/*
+** Each RenameToken object maps an element of the parse tree into
+** the token that generated that element. The parse tree element
+** might be one of:
+**
+** * A pointer to an Expr that represents an ID
+** * The name of a table column in Column.zName
+**
+** A list of RenameToken objects can be constructed during parsing.
+** Each new object is created by sqlite3RenameTokenMap().
+** As the parse tree is transformed, the sqlite3RenameTokenRemap()
+** routine is used to keep the mapping current.
+**
+** After the parse finishes, renameTokenFind() routine can be used
+** to look up the actual token value that created some element in
+** the parse tree.
+*/
+struct RenameToken {
+ void *p; /* Parse tree element created by token t */
+ Token t; /* The token that created parse tree element p */
+ RenameToken *pNext; /* Next is a list of all RenameToken objects */
+};
+
+/*
+** The context of an ALTER TABLE RENAME COLUMN operation that gets passed
+** down into the Walker.
+*/
+typedef struct RenameCtx RenameCtx;
+struct RenameCtx {
+ RenameToken *pList; /* List of tokens to overwrite */
+ int nList; /* Number of tokens in pList */
+ int iCol; /* Index of column being renamed */
+ Table *pTab; /* Table being ALTERed */
+ const char *zOld; /* Old column name */
+};
+
+#ifdef SQLITE_DEBUG
+/*
+** This function is only for debugging. It performs two tasks:
+**
+** 1. Checks that pointer pPtr does not already appear in the
+** rename-token list.
+**
+** 2. Dereferences each pointer in the rename-token list.
+**
+** The second is most effective when debugging under valgrind or
+** address-sanitizer or similar. If any of these pointers no longer
+** point to valid objects, an exception is raised by the memory-checking
+** tool.
+**
+** The point of this is to prevent comparisons of invalid pointer values.
+** Even though this always seems to work, it is undefined according to the
+** C standard. Example of undefined comparison:
+**
+** sqlite3_free(x);
+** if( x==y ) ...
+**
+** Technically, as x no longer points into a valid object or to the byte
+** following a valid object, it may not be used in comparison operations.
+*/
+static void renameTokenCheckAll(Parse *pParse, void *pPtr){
+ if( pParse->nErr==0 && pParse->db->mallocFailed==0 ){
+ RenameToken *p;
+ u8 i = 0;
+ for(p=pParse->pRename; p; p=p->pNext){
+ if( p->p ){
+ assert( p->p!=pPtr );
+ i += *(u8*)(p->p);
+ }
+ }
+ }
+}
+#else
+# define renameTokenCheckAll(x,y)
+#endif
+
+/*
+** Remember that the parser tree element pPtr was created using
+** the token pToken.
+**
+** In other words, construct a new RenameToken object and add it
+** to the list of RenameToken objects currently being built up
+** in pParse->pRename.
+**
+** The pPtr argument is returned so that this routine can be used
+** with tail recursion in tokenExpr() routine, for a small performance
+** improvement.
+*/
+SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse *pParse, void *pPtr, Token *pToken){
+ RenameToken *pNew;
+ assert( pPtr || pParse->db->mallocFailed );
+ renameTokenCheckAll(pParse, pPtr);
+ if( ALWAYS(pParse->eParseMode!=PARSE_MODE_UNMAP) ){
+ pNew = sqlite3DbMallocZero(pParse->db, sizeof(RenameToken));
+ if( pNew ){
+ pNew->p = pPtr;
+ pNew->t = *pToken;
+ pNew->pNext = pParse->pRename;
+ pParse->pRename = pNew;
+ }
+ }
+
+ return pPtr;
+}
+
+/*
+** It is assumed that there is already a RenameToken object associated
+** with parse tree element pFrom. This function remaps the associated token
+** to parse tree element pTo.
+*/
+SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse *pParse, void *pTo, void *pFrom){
+ RenameToken *p;
+ renameTokenCheckAll(pParse, pTo);
+ for(p=pParse->pRename; p; p=p->pNext){
+ if( p->p==pFrom ){
+ p->p = pTo;
+ break;
+ }
+ }
+}
+
+/*
+** Walker callback used by sqlite3RenameExprUnmap().
+*/
+static int renameUnmapExprCb(Walker *pWalker, Expr *pExpr){
+ Parse *pParse = pWalker->pParse;
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr);
+ return WRC_Continue;
+}
+
+/*
+** Iterate through the Select objects that are part of WITH clauses attached
+** to select statement pSelect.
+*/
+static void renameWalkWith(Walker *pWalker, Select *pSelect){
+ With *pWith = pSelect->pWith;
+ if( pWith ){
+ int i;
+ for(i=0; i<pWith->nCte; i++){
+ Select *p = pWith->a[i].pSelect;
+ NameContext sNC;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pWalker->pParse;
+ sqlite3SelectPrep(sNC.pParse, p, &sNC);
+ sqlite3WalkSelect(pWalker, p);
+ sqlite3RenameExprlistUnmap(pWalker->pParse, pWith->a[i].pCols);
+ }
+ }
+}
+
+/*
+** Unmap all tokens in the IdList object passed as the second argument.
+*/
+static void unmapColumnIdlistNames(
+ Parse *pParse,
+ IdList *pIdList
+){
+ if( pIdList ){
+ int ii;
+ for(ii=0; ii<pIdList->nId; ii++){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pIdList->a[ii].zName);
+ }
+ }
+}
+
+/*
+** Walker callback used by sqlite3RenameExprUnmap().
+*/
+static int renameUnmapSelectCb(Walker *pWalker, Select *p){
+ Parse *pParse = pWalker->pParse;
+ int i;
+ if( pParse->nErr ) return WRC_Abort;
+ if( NEVER(p->selFlags & SF_View) ) return WRC_Prune;
+ if( ALWAYS(p->pEList) ){
+ ExprList *pList = p->pEList;
+ for(i=0; i<pList->nExpr; i++){
+ if( pList->a[i].zEName && pList->a[i].eEName==ENAME_NAME ){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pList->a[i].zEName);
+ }
+ }
+ }
+ if( ALWAYS(p->pSrc) ){ /* Every Select as a SrcList, even if it is empty */
+ SrcList *pSrc = p->pSrc;
+ for(i=0; i<pSrc->nSrc; i++){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pSrc->a[i].zName);
+ if( sqlite3WalkExpr(pWalker, pSrc->a[i].pOn) ) return WRC_Abort;
+ unmapColumnIdlistNames(pParse, pSrc->a[i].pUsing);
+ }
+ }
+
+ renameWalkWith(pWalker, p);
+ return WRC_Continue;
+}
+
+/*
+** Remove all nodes that are part of expression pExpr from the rename list.
+*/
+SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse *pParse, Expr *pExpr){
+ u8 eMode = pParse->eParseMode;
+ Walker sWalker;
+ memset(&sWalker, 0, sizeof(Walker));
+ sWalker.pParse = pParse;
+ sWalker.xExprCallback = renameUnmapExprCb;
+ sWalker.xSelectCallback = renameUnmapSelectCb;
+ pParse->eParseMode = PARSE_MODE_UNMAP;
+ sqlite3WalkExpr(&sWalker, pExpr);
+ pParse->eParseMode = eMode;
+}
+
+/*
+** Remove all nodes that are part of expression-list pEList from the
+** rename list.
+*/
+SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse *pParse, ExprList *pEList){
+ if( pEList ){
+ int i;
+ Walker sWalker;
+ memset(&sWalker, 0, sizeof(Walker));
+ sWalker.pParse = pParse;
+ sWalker.xExprCallback = renameUnmapExprCb;
+ sqlite3WalkExprList(&sWalker, pEList);
+ for(i=0; i<pEList->nExpr; i++){
+ if( ALWAYS(pEList->a[i].eEName==ENAME_NAME) ){
+ sqlite3RenameTokenRemap(pParse, 0, (void*)pEList->a[i].zEName);
+ }
+ }
+ }
+}
+
+/*
+** Free the list of RenameToken objects given in the second argument
+*/
+static void renameTokenFree(sqlite3 *db, RenameToken *pToken){
+ RenameToken *pNext;
+ RenameToken *p;
+ for(p=pToken; p; p=pNext){
+ pNext = p->pNext;
+ sqlite3DbFree(db, p);
+ }
+}
+
+/*
+** Search the Parse object passed as the first argument for a RenameToken
+** object associated with parse tree element pPtr. If found, remove it
+** from the Parse object and add it to the list maintained by the
+** RenameCtx object passed as the second argument.
+*/
+static void renameTokenFind(Parse *pParse, struct RenameCtx *pCtx, void *pPtr){
+ RenameToken **pp;
+ assert( pPtr!=0 );
+ for(pp=&pParse->pRename; (*pp); pp=&(*pp)->pNext){
+ if( (*pp)->p==pPtr ){
+ RenameToken *pToken = *pp;
+ *pp = pToken->pNext;
+ pToken->pNext = pCtx->pList;
+ pCtx->pList = pToken;
+ pCtx->nList++;
+ break;
+ }
+ }
+}
+
+/*
+** This is a Walker select callback. It does nothing. It is only required
+** because without a dummy callback, sqlite3WalkExpr() and similar do not
+** descend into sub-select statements.
+*/
+static int renameColumnSelectCb(Walker *pWalker, Select *p){
+ if( p->selFlags & SF_View ) return WRC_Prune;
+ renameWalkWith(pWalker, p);
+ return WRC_Continue;
+}
+
+/*
+** This is a Walker expression callback.
+**
+** For every TK_COLUMN node in the expression tree, search to see
+** if the column being references is the column being renamed by an
+** ALTER TABLE statement. If it is, then attach its associated
+** RenameToken object to the list of RenameToken objects being
+** constructed in RenameCtx object at pWalker->u.pRename.
+*/
+static int renameColumnExprCb(Walker *pWalker, Expr *pExpr){
+ RenameCtx *p = pWalker->u.pRename;
+ if( pExpr->op==TK_TRIGGER
+ && pExpr->iColumn==p->iCol
+ && pWalker->pParse->pTriggerTab==p->pTab
+ ){
+ renameTokenFind(pWalker->pParse, p, (void*)pExpr);
+ }else if( pExpr->op==TK_COLUMN
+ && pExpr->iColumn==p->iCol
+ && p->pTab==pExpr->y.pTab
+ ){
+ renameTokenFind(pWalker->pParse, p, (void*)pExpr);
+ }
+ return WRC_Continue;
+}
+
+/*
+** The RenameCtx contains a list of tokens that reference a column that
+** is being renamed by an ALTER TABLE statement. Return the "last"
+** RenameToken in the RenameCtx and remove that RenameToken from the
+** RenameContext. "Last" means the last RenameToken encountered when
+** the input SQL is parsed from left to right. Repeated calls to this routine
+** return all column name tokens in the order that they are encountered
+** in the SQL statement.
+*/
+static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){
+ RenameToken *pBest = pCtx->pList;
+ RenameToken *pToken;
+ RenameToken **pp;
+
+ for(pToken=pBest->pNext; pToken; pToken=pToken->pNext){
+ if( pToken->t.z>pBest->t.z ) pBest = pToken;
+ }
+ for(pp=&pCtx->pList; *pp!=pBest; pp=&(*pp)->pNext);
+ *pp = pBest->pNext;
+
+ return pBest;
+}
+
+/*
+** An error occured while parsing or otherwise processing a database
+** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an
+** ALTER TABLE RENAME COLUMN program. The error message emitted by the
+** sub-routine is currently stored in pParse->zErrMsg. This function
+** adds context to the error message and then stores it in pCtx.
+*/
+static void renameColumnParseError(
+ sqlite3_context *pCtx,
+ int bPost,
+ sqlite3_value *pType,
+ sqlite3_value *pObject,
+ Parse *pParse
+){
+ const char *zT = (const char*)sqlite3_value_text(pType);
+ const char *zN = (const char*)sqlite3_value_text(pObject);
+ char *zErr;
+
+ zErr = sqlite3_mprintf("error in %s %s%s: %s",
+ zT, zN, (bPost ? " after rename" : ""),
+ pParse->zErrMsg
+ );
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
+}
+
+/*
+** For each name in the the expression-list pEList (i.e. each
+** pEList->a[i].zName) that matches the string in zOld, extract the
+** corresponding rename-token from Parse object pParse and add it
+** to the RenameCtx pCtx.
+*/
+static void renameColumnElistNames(
+ Parse *pParse,
+ RenameCtx *pCtx,
+ ExprList *pEList,
+ const char *zOld
+){
+ if( pEList ){
+ int i;
+ for(i=0; i<pEList->nExpr; i++){
+ char *zName = pEList->a[i].zEName;
+ if( ALWAYS(pEList->a[i].eEName==ENAME_NAME)
+ && ALWAYS(zName!=0)
+ && 0==sqlite3_stricmp(zName, zOld)
+ ){
+ renameTokenFind(pParse, pCtx, (void*)zName);
+ }
+ }
+ }
+}
+
+/*
+** For each name in the the id-list pIdList (i.e. each pIdList->a[i].zName)
+** that matches the string in zOld, extract the corresponding rename-token
+** from Parse object pParse and add it to the RenameCtx pCtx.
+*/
+static void renameColumnIdlistNames(
+ Parse *pParse,
+ RenameCtx *pCtx,
+ IdList *pIdList,
+ const char *zOld
+){
+ if( pIdList ){
+ int i;
+ for(i=0; i<pIdList->nId; i++){
+ char *zName = pIdList->a[i].zName;
+ if( 0==sqlite3_stricmp(zName, zOld) ){
+ renameTokenFind(pParse, pCtx, (void*)zName);
+ }
+ }
+ }
+}
+
+
+/*
+** Parse the SQL statement zSql using Parse object (*p). The Parse object
+** is initialized by this function before it is used.
+*/
+static int renameParseSql(
+ Parse *p, /* Memory to use for Parse object */
+ const char *zDb, /* Name of schema SQL belongs to */
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL to parse */
+ int bTemp /* True if SQL is from temp schema */
+){
+ int rc;
+ char *zErr = 0;
+
+ db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb);
+
+ /* Parse the SQL statement passed as the first argument. If no error
+ ** occurs and the parse does not result in a new table, index or
+ ** trigger object, the database must be corrupt. */
+ memset(p, 0, sizeof(Parse));
+ p->eParseMode = PARSE_MODE_RENAME;
+ p->db = db;
+ p->nQueryLoop = 1;
+ rc = sqlite3RunParser(p, zSql, &zErr);
+ assert( p->zErrMsg==0 );
+ assert( rc!=SQLITE_OK || zErr==0 );
+ p->zErrMsg = zErr;
+ if( db->mallocFailed ) rc = SQLITE_NOMEM;
+ if( rc==SQLITE_OK
+ && p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0
+ ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }
+
+#ifdef SQLITE_DEBUG
+ /* Ensure that all mappings in the Parse.pRename list really do map to
+ ** a part of the input string. */
+ if( rc==SQLITE_OK ){
+ int nSql = sqlite3Strlen30(zSql);
+ RenameToken *pToken;
+ for(pToken=p->pRename; pToken; pToken=pToken->pNext){
+ assert( pToken->t.z>=zSql && &pToken->t.z[pToken->t.n]<=&zSql[nSql] );
+ }
+ }
+#endif
+
+ db->init.iDb = 0;
+ return rc;
+}
+
+/*
+** This function edits SQL statement zSql, replacing each token identified
+** by the linked list pRename with the text of zNew. If argument bQuote is
+** true, then zNew is always quoted first. If no error occurs, the result
+** is loaded into context object pCtx as the result.
+**
+** Or, if an error occurs (i.e. an OOM condition), an error is left in
+** pCtx and an SQLite error code returned.
+*/
+static int renameEditSql(
+ sqlite3_context *pCtx, /* Return result here */
+ RenameCtx *pRename, /* Rename context */
+ const char *zSql, /* SQL statement to edit */
+ const char *zNew, /* New token text */
+ int bQuote /* True to always quote token */
+){
+ int nNew = sqlite3Strlen30(zNew);
+ int nSql = sqlite3Strlen30(zSql);
+ sqlite3 *db = sqlite3_context_db_handle(pCtx);
+ int rc = SQLITE_OK;
+ char *zQuot;
+ char *zOut;
+ int nQuot;
+
+ /* Set zQuot to point to a buffer containing a quoted copy of the
+ ** identifier zNew. If the corresponding identifier in the original
+ ** ALTER TABLE statement was quoted (bQuote==1), then set zNew to
+ ** point to zQuot so that all substitutions are made using the
+ ** quoted version of the new column name. */
+ zQuot = sqlite3MPrintf(db, "\"%w\"", zNew);
+ if( zQuot==0 ){
+ return SQLITE_NOMEM;
+ }else{
+ nQuot = sqlite3Strlen30(zQuot);
+ }
+ if( bQuote ){
+ zNew = zQuot;
+ nNew = nQuot;
+ }
+
+ /* At this point pRename->pList contains a list of RenameToken objects
+ ** corresponding to all tokens in the input SQL that must be replaced
+ ** with the new column name. All that remains is to construct and
+ ** return the edited SQL string. */
+ assert( nQuot>=nNew );
+ zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1);
+ if( zOut ){
+ int nOut = nSql;
+ memcpy(zOut, zSql, nSql);
+ while( pRename->pList ){
+ int iOff; /* Offset of token to replace in zOut */
+ RenameToken *pBest = renameColumnTokenNext(pRename);
+
+ u32 nReplace;
+ const char *zReplace;
+ if( sqlite3IsIdChar(*pBest->t.z) ){
+ nReplace = nNew;
+ zReplace = zNew;
+ }else{
+ nReplace = nQuot;
+ zReplace = zQuot;
+ }
+
+ iOff = pBest->t.z - zSql;
+ if( pBest->t.n!=nReplace ){
+ memmove(&zOut[iOff + nReplace], &zOut[iOff + pBest->t.n],
+ nOut - (iOff + pBest->t.n)
+ );
+ nOut += nReplace - pBest->t.n;
+ zOut[nOut] = '\0';
+ }
+ memcpy(&zOut[iOff], zReplace, nReplace);
+ sqlite3DbFree(db, pBest);
+ }
+
+ sqlite3_result_text(pCtx, zOut, -1, SQLITE_TRANSIENT);
+ sqlite3DbFree(db, zOut);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+
+ sqlite3_free(zQuot);
+ return rc;
+}
+
+/*
+** Resolve all symbols in the trigger at pParse->pNewTrigger, assuming
+** it was read from the schema of database zDb. Return SQLITE_OK if
+** successful. Otherwise, return an SQLite error code and leave an error
+** message in the Parse object.
+*/
+static int renameResolveTrigger(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ Trigger *pNew = pParse->pNewTrigger;
+ TriggerStep *pStep;
+ NameContext sNC;
+ int rc = SQLITE_OK;
+
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ assert( pNew->pTabSchema );
+ pParse->pTriggerTab = sqlite3FindTable(db, pNew->table,
+ db->aDb[sqlite3SchemaToIndex(db, pNew->pTabSchema)].zDbSName
+ );
+ pParse->eTriggerOp = pNew->op;
+ /* ALWAYS() because if the table of the trigger does not exist, the
+ ** error would have been hit before this point */
+ if( ALWAYS(pParse->pTriggerTab) ){
+ rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab);
+ }
+
+ /* Resolve symbols in WHEN clause */
+ if( rc==SQLITE_OK && pNew->pWhen ){
+ rc = sqlite3ResolveExprNames(&sNC, pNew->pWhen);
+ }
+
+ for(pStep=pNew->step_list; rc==SQLITE_OK && pStep; pStep=pStep->pNext){
+ if( pStep->pSelect ){
+ sqlite3SelectPrep(pParse, pStep->pSelect, &sNC);
+ if( pParse->nErr ) rc = pParse->rc;
+ }
+ if( rc==SQLITE_OK && pStep->zTarget ){
+ SrcList *pSrc = sqlite3TriggerStepSrc(pParse, pStep);
+ if( pSrc ){
+ int i;
+ for(i=0; i<pSrc->nSrc && rc==SQLITE_OK; i++){
+ struct SrcList_item *p = &pSrc->a[i];
+ p->pTab = sqlite3LocateTableItem(pParse, 0, p);
+ p->iCursor = pParse->nTab++;
+ if( p->pTab==0 ){
+ rc = SQLITE_ERROR;
+ }else{
+ p->pTab->nTabRef++;
+ rc = sqlite3ViewGetColumnNames(pParse, p->pTab);
+ }
+ }
+ sNC.pSrcList = pSrc;
+ if( rc==SQLITE_OK && pStep->pWhere ){
+ rc = sqlite3ResolveExprNames(&sNC, pStep->pWhere);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3ResolveExprListNames(&sNC, pStep->pExprList);
+ }
+ assert( !pStep->pUpsert || (!pStep->pWhere && !pStep->pExprList) );
+ if( pStep->pUpsert ){
+ Upsert *pUpsert = pStep->pUpsert;
+ assert( rc==SQLITE_OK );
+ pUpsert->pUpsertSrc = pSrc;
+ sNC.uNC.pUpsert = pUpsert;
+ sNC.ncFlags = NC_UUpsert;
+ rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget);
+ if( rc==SQLITE_OK ){
+ ExprList *pUpsertSet = pUpsert->pUpsertSet;
+ rc = sqlite3ResolveExprListNames(&sNC, pUpsertSet);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertWhere);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere);
+ }
+ sNC.ncFlags = 0;
+ }
+ sNC.pSrcList = 0;
+ sqlite3SrcListDelete(db, pSrc);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Invoke sqlite3WalkExpr() or sqlite3WalkSelect() on all Select or Expr
+** objects that are part of the trigger passed as the second argument.
+*/
+static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){
+ TriggerStep *pStep;
+
+ /* Find tokens to edit in WHEN clause */
+ sqlite3WalkExpr(pWalker, pTrigger->pWhen);
+
+ /* Find tokens to edit in trigger steps */
+ for(pStep=pTrigger->step_list; pStep; pStep=pStep->pNext){
+ sqlite3WalkSelect(pWalker, pStep->pSelect);
+ sqlite3WalkExpr(pWalker, pStep->pWhere);
+ sqlite3WalkExprList(pWalker, pStep->pExprList);
+ if( pStep->pUpsert ){
+ Upsert *pUpsert = pStep->pUpsert;
+ sqlite3WalkExprList(pWalker, pUpsert->pUpsertTarget);
+ sqlite3WalkExprList(pWalker, pUpsert->pUpsertSet);
+ sqlite3WalkExpr(pWalker, pUpsert->pUpsertWhere);
+ sqlite3WalkExpr(pWalker, pUpsert->pUpsertTargetWhere);
+ }
+ }
+}
+
+/*
+** Free the contents of Parse object (*pParse). Do not free the memory
+** occupied by the Parse object itself.
+*/
+static void renameParseCleanup(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ Index *pIdx;
+ if( pParse->pVdbe ){
+ sqlite3VdbeFinalize(pParse->pVdbe);
+ }
+ sqlite3DeleteTable(db, pParse->pNewTable);
+ while( (pIdx = pParse->pNewIndex)!=0 ){
+ pParse->pNewIndex = pIdx->pNext;
+ sqlite3FreeIndex(db, pIdx);
+ }
+ sqlite3DeleteTrigger(db, pParse->pNewTrigger);
+ sqlite3DbFree(db, pParse->zErrMsg);
+ renameTokenFree(db, pParse->pRename);
+ sqlite3ParserReset(pParse);
+}
+
+/*
+** SQL function:
+**
+** sqlite_rename_column(zSql, iCol, bQuote, zNew, zTable, zOld)
+**
+** 0. zSql: SQL statement to rewrite
+** 1. type: Type of object ("table", "view" etc.)
+** 2. object: Name of object
+** 3. Database: Database name (e.g. "main")
+** 4. Table: Table name
+** 5. iCol: Index of column to rename
+** 6. zNew: New column name
+** 7. bQuote: Non-zero if the new column name should be quoted.
+** 8. bTemp: True if zSql comes from temp schema
+**
+** Do a column rename operation on the CREATE statement given in zSql.
+** The iCol-th column (left-most is 0) of table zTable is renamed from zCol
+** into zNew. The name should be quoted if bQuote is true.
+**
+** This function is used internally by the ALTER TABLE RENAME COLUMN command.
+** It is only accessible to SQL created using sqlite3NestedParse(). It is
+** not reachable from ordinary SQL passed into sqlite3_prepare().
+*/
+static void renameColumnFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ RenameCtx sCtx;
+ const char *zSql = (const char*)sqlite3_value_text(argv[0]);
+ const char *zDb = (const char*)sqlite3_value_text(argv[3]);
+ const char *zTable = (const char*)sqlite3_value_text(argv[4]);
+ int iCol = sqlite3_value_int(argv[5]);
+ const char *zNew = (const char*)sqlite3_value_text(argv[6]);
+ int bQuote = sqlite3_value_int(argv[7]);
+ int bTemp = sqlite3_value_int(argv[8]);
+ const char *zOld;
+ int rc;
+ Parse sParse;
+ Walker sWalker;
+ Index *pIdx;
+ int i;
+ Table *pTab;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth = db->xAuth;
+#endif
+
+ UNUSED_PARAMETER(NotUsed);
+ if( zSql==0 ) return;
+ if( zTable==0 ) return;
+ if( zNew==0 ) return;
+ if( iCol<0 ) return;
+ sqlite3BtreeEnterAll(db);
+ pTab = sqlite3FindTable(db, zTable, zDb);
+ if( pTab==0 || iCol>=pTab->nCol ){
+ sqlite3BtreeLeaveAll(db);
+ return;
+ }
+ zOld = pTab->aCol[iCol].zName;
+ memset(&sCtx, 0, sizeof(sCtx));
+ sCtx.iCol = ((iCol==pTab->iPKey) ? -1 : iCol);
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = 0;
+#endif
+ rc = renameParseSql(&sParse, zDb, db, zSql, bTemp);
+
+ /* Find tokens that need to be replaced. */
+ memset(&sWalker, 0, sizeof(Walker));
+ sWalker.pParse = &sParse;
+ sWalker.xExprCallback = renameColumnExprCb;
+ sWalker.xSelectCallback = renameColumnSelectCb;
+ sWalker.u.pRename = &sCtx;
+
+ sCtx.pTab = pTab;
+ if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
+ if( sParse.pNewTable ){
+ Select *pSelect = sParse.pNewTable->pSelect;
+ if( pSelect ){
+ pSelect->selFlags &= ~SF_View;
+ sParse.rc = SQLITE_OK;
+ sqlite3SelectPrep(&sParse, pSelect, 0);
+ rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc);
+ if( rc==SQLITE_OK ){
+ sqlite3WalkSelect(&sWalker, pSelect);
+ }
+ if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
+ }else{
+ /* A regular table */
+ int bFKOnly = sqlite3_stricmp(zTable, sParse.pNewTable->zName);
+ FKey *pFKey;
+ assert( sParse.pNewTable->pSelect==0 );
+ sCtx.pTab = sParse.pNewTable;
+ if( bFKOnly==0 ){
+ renameTokenFind(
+ &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zName
+ );
+ if( sCtx.iCol<0 ){
+ renameTokenFind(&sParse, &sCtx, (void*)&sParse.pNewTable->iPKey);
+ }
+ sqlite3WalkExprList(&sWalker, sParse.pNewTable->pCheck);
+ for(pIdx=sParse.pNewTable->pIndex; pIdx; pIdx=pIdx->pNext){
+ sqlite3WalkExprList(&sWalker, pIdx->aColExpr);
+ }
+ for(pIdx=sParse.pNewIndex; pIdx; pIdx=pIdx->pNext){
+ sqlite3WalkExprList(&sWalker, pIdx->aColExpr);
+ }
+ }
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ for(i=0; i<sParse.pNewTable->nCol; i++){
+ sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt);
+ }
+#endif
+
+ for(pFKey=sParse.pNewTable->pFKey; pFKey; pFKey=pFKey->pNextFrom){
+ for(i=0; i<pFKey->nCol; i++){
+ if( bFKOnly==0 && pFKey->aCol[i].iFrom==iCol ){
+ renameTokenFind(&sParse, &sCtx, (void*)&pFKey->aCol[i]);
+ }
+ if( 0==sqlite3_stricmp(pFKey->zTo, zTable)
+ && 0==sqlite3_stricmp(pFKey->aCol[i].zCol, zOld)
+ ){
+ renameTokenFind(&sParse, &sCtx, (void*)pFKey->aCol[i].zCol);
+ }
+ }
+ }
+ }
+ }else if( sParse.pNewIndex ){
+ sqlite3WalkExprList(&sWalker, sParse.pNewIndex->aColExpr);
+ sqlite3WalkExpr(&sWalker, sParse.pNewIndex->pPartIdxWhere);
+ }else{
+ /* A trigger */
+ TriggerStep *pStep;
+ rc = renameResolveTrigger(&sParse);
+ if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
+
+ for(pStep=sParse.pNewTrigger->step_list; pStep; pStep=pStep->pNext){
+ if( pStep->zTarget ){
+ Table *pTarget = sqlite3LocateTable(&sParse, 0, pStep->zTarget, zDb);
+ if( pTarget==pTab ){
+ if( pStep->pUpsert ){
+ ExprList *pUpsertSet = pStep->pUpsert->pUpsertSet;
+ renameColumnElistNames(&sParse, &sCtx, pUpsertSet, zOld);
+ }
+ renameColumnIdlistNames(&sParse, &sCtx, pStep->pIdList, zOld);
+ renameColumnElistNames(&sParse, &sCtx, pStep->pExprList, zOld);
+ }
+ }
+ }
+
+
+ /* Find tokens to edit in UPDATE OF clause */
+ if( sParse.pTriggerTab==pTab ){
+ renameColumnIdlistNames(&sParse, &sCtx,sParse.pNewTrigger->pColumns,zOld);
+ }
+
+ /* Find tokens to edit in various expressions and selects */
+ renameWalkTrigger(&sWalker, sParse.pNewTrigger);
+ }
+
+ assert( rc==SQLITE_OK );
+ rc = renameEditSql(context, &sCtx, zSql, zNew, bQuote);
+
+renameColumnFunc_done:
+ if( rc!=SQLITE_OK ){
+ if( sParse.zErrMsg ){
+ renameColumnParseError(context, 0, argv[1], argv[2], &sParse);
+ }else{
+ sqlite3_result_error_code(context, rc);
+ }
+ }
+
+ renameParseCleanup(&sParse);
+ renameTokenFree(db, sCtx.pList);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+#endif
+ sqlite3BtreeLeaveAll(db);
+}
+
+/*
+** Walker expression callback used by "RENAME TABLE".
+*/
+static int renameTableExprCb(Walker *pWalker, Expr *pExpr){
+ RenameCtx *p = pWalker->u.pRename;
+ if( pExpr->op==TK_COLUMN && p->pTab==pExpr->y.pTab ){
+ renameTokenFind(pWalker->pParse, p, (void*)&pExpr->y.pTab);
+ }
+ return WRC_Continue;
+}
+
+/*
+** Walker select callback used by "RENAME TABLE".
+*/
+static int renameTableSelectCb(Walker *pWalker, Select *pSelect){
+ int i;
+ RenameCtx *p = pWalker->u.pRename;
+ SrcList *pSrc = pSelect->pSrc;
+ if( pSelect->selFlags & SF_View ) return WRC_Prune;
+ if( pSrc==0 ){
+ assert( pWalker->pParse->db->mallocFailed );
+ return WRC_Abort;
+ }
+ for(i=0; i<pSrc->nSrc; i++){
+ struct SrcList_item *pItem = &pSrc->a[i];
+ if( pItem->pTab==p->pTab ){
+ renameTokenFind(pWalker->pParse, p, pItem->zName);
+ }
+ }
+ renameWalkWith(pWalker, pSelect);
+
+ return WRC_Continue;
+}
+
+
+/*
+** This C function implements an SQL user function that is used by SQL code
+** generated by the ALTER TABLE ... RENAME command to modify the definition
+** of any foreign key constraints that use the table being renamed as the
+** parent table. It is passed three arguments:
+**
+** 0: The database containing the table being renamed.
+** 1. type: Type of object ("table", "view" etc.)
+** 2. object: Name of object
+** 3: The complete text of the schema statement being modified,
+** 4: The old name of the table being renamed, and
+** 5: The new name of the table being renamed.
+** 6: True if the schema statement comes from the temp db.
+**
+** It returns the new schema statement. For example:
+**
+** sqlite_rename_table('main', 'CREATE TABLE t1(a REFERENCES t2)','t2','t3',0)
+** -> 'CREATE TABLE t1(a REFERENCES t3)'
+*/
+static void renameTableFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ const char *zDb = (const char*)sqlite3_value_text(argv[0]);
+ const char *zInput = (const char*)sqlite3_value_text(argv[3]);
+ const char *zOld = (const char*)sqlite3_value_text(argv[4]);
+ const char *zNew = (const char*)sqlite3_value_text(argv[5]);
+ int bTemp = sqlite3_value_int(argv[6]);
+ UNUSED_PARAMETER(NotUsed);
+
+ if( zInput && zOld && zNew ){
+ Parse sParse;
+ int rc;
+ int bQuote = 1;
+ RenameCtx sCtx;
+ Walker sWalker;
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth = db->xAuth;
+ db->xAuth = 0;
+#endif
+
+ sqlite3BtreeEnterAll(db);
+
+ memset(&sCtx, 0, sizeof(RenameCtx));
+ sCtx.pTab = sqlite3FindTable(db, zOld, zDb);
+ memset(&sWalker, 0, sizeof(Walker));
+ sWalker.pParse = &sParse;
+ sWalker.xExprCallback = renameTableExprCb;
+ sWalker.xSelectCallback = renameTableSelectCb;
+ sWalker.u.pRename = &sCtx;
+
+ rc = renameParseSql(&sParse, zDb, db, zInput, bTemp);
+
+ if( rc==SQLITE_OK ){
+ int isLegacy = (db->flags & SQLITE_LegacyAlter);
+ if( sParse.pNewTable ){
+ Table *pTab = sParse.pNewTable;
+
+ if( pTab->pSelect ){
+ if( isLegacy==0 ){
+ Select *pSelect = pTab->pSelect;
+ NameContext sNC;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = &sParse;
+
+ assert( pSelect->selFlags & SF_View );
+ pSelect->selFlags &= ~SF_View;
+ sqlite3SelectPrep(&sParse, pTab->pSelect, &sNC);
+ if( sParse.nErr ){
+ rc = sParse.rc;
+ }else{
+ sqlite3WalkSelect(&sWalker, pTab->pSelect);
+ }
+ }
+ }else{
+ /* Modify any FK definitions to point to the new table. */
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ if( isLegacy==0 || (db->flags & SQLITE_ForeignKeys) ){
+ FKey *pFKey;
+ for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){
+ if( sqlite3_stricmp(pFKey->zTo, zOld)==0 ){
+ renameTokenFind(&sParse, &sCtx, (void*)pFKey->zTo);
+ }
+ }
+ }
+#endif
+
+ /* If this is the table being altered, fix any table refs in CHECK
+ ** expressions. Also update the name that appears right after the
+ ** "CREATE [VIRTUAL] TABLE" bit. */
+ if( sqlite3_stricmp(zOld, pTab->zName)==0 ){
+ sCtx.pTab = pTab;
+ if( isLegacy==0 ){
+ sqlite3WalkExprList(&sWalker, pTab->pCheck);
+ }
+ renameTokenFind(&sParse, &sCtx, pTab->zName);
+ }
+ }
+ }
+
+ else if( sParse.pNewIndex ){
+ renameTokenFind(&sParse, &sCtx, sParse.pNewIndex->zName);
+ if( isLegacy==0 ){
+ sqlite3WalkExpr(&sWalker, sParse.pNewIndex->pPartIdxWhere);
+ }
+ }
+
+#ifndef SQLITE_OMIT_TRIGGER
+ else{
+ Trigger *pTrigger = sParse.pNewTrigger;
+ TriggerStep *pStep;
+ if( 0==sqlite3_stricmp(sParse.pNewTrigger->table, zOld)
+ && sCtx.pTab->pSchema==pTrigger->pTabSchema
+ ){
+ renameTokenFind(&sParse, &sCtx, sParse.pNewTrigger->table);
+ }
+
+ if( isLegacy==0 ){
+ rc = renameResolveTrigger(&sParse);
+ if( rc==SQLITE_OK ){
+ renameWalkTrigger(&sWalker, pTrigger);
+ for(pStep=pTrigger->step_list; pStep; pStep=pStep->pNext){
+ if( pStep->zTarget && 0==sqlite3_stricmp(pStep->zTarget, zOld) ){
+ renameTokenFind(&sParse, &sCtx, pStep->zTarget);
+ }
+ }
+ }
+ }
+ }
+#endif
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = renameEditSql(context, &sCtx, zInput, zNew, bQuote);
+ }
+ if( rc!=SQLITE_OK ){
+ if( sParse.zErrMsg ){
+ renameColumnParseError(context, 0, argv[1], argv[2], &sParse);
+ }else{
+ sqlite3_result_error_code(context, rc);
+ }
+ }
+
+ renameParseCleanup(&sParse);
+ renameTokenFree(db, sCtx.pList);
+ sqlite3BtreeLeaveAll(db);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+#endif
+ }
+
+ return;
+}
+
+/*
+** An SQL user function that checks that there are no parse or symbol
+** resolution problems in a CREATE TRIGGER|TABLE|VIEW|INDEX statement.
+** After an ALTER TABLE .. RENAME operation is performed and the schema
+** reloaded, this function is called on each SQL statement in the schema
+** to ensure that it is still usable.
+**
+** 0: Database name ("main", "temp" etc.).
+** 1: SQL statement.
+** 2: Object type ("view", "table", "trigger" or "index").
+** 3: Object name.
+** 4: True if object is from temp schema.
+**
+** Unless it finds an error, this function normally returns NULL. However, it
+** returns integer value 1 if:
+**
+** * the SQL argument creates a trigger, and
+** * the table that the trigger is attached to is in database zDb.
+*/
+static void renameTableTest(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ char const *zDb = (const char*)sqlite3_value_text(argv[0]);
+ char const *zInput = (const char*)sqlite3_value_text(argv[1]);
+ int bTemp = sqlite3_value_int(argv[4]);
+ int isLegacy = (db->flags & SQLITE_LegacyAlter);
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth = db->xAuth;
+ db->xAuth = 0;
+#endif
+
+ UNUSED_PARAMETER(NotUsed);
+ if( zDb && zInput ){
+ int rc;
+ Parse sParse;
+ rc = renameParseSql(&sParse, zDb, db, zInput, bTemp);
+ if( rc==SQLITE_OK ){
+ if( isLegacy==0 && sParse.pNewTable && sParse.pNewTable->pSelect ){
+ NameContext sNC;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = &sParse;
+ sqlite3SelectPrep(&sParse, sParse.pNewTable->pSelect, &sNC);
+ if( sParse.nErr ) rc = sParse.rc;
+ }
+
+ else if( sParse.pNewTrigger ){
+ if( isLegacy==0 ){
+ rc = renameResolveTrigger(&sParse);
+ }
+ if( rc==SQLITE_OK ){
+ int i1 = sqlite3SchemaToIndex(db, sParse.pNewTrigger->pTabSchema);
+ int i2 = sqlite3FindDbName(db, zDb);
+ if( i1==i2 ) sqlite3_result_int(context, 1);
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ renameColumnParseError(context, 1, argv[2], argv[3], &sParse);
+ }
+ renameParseCleanup(&sParse);
+ }
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+#endif
+}
+
+/*
+** Register built-in functions used to help implement ALTER TABLE
+*/
+SQLITE_PRIVATE void sqlite3AlterFunctions(void){
+ static FuncDef aAlterTableFuncs[] = {
+ INTERNAL_FUNCTION(sqlite_rename_column, 9, renameColumnFunc),
+ INTERNAL_FUNCTION(sqlite_rename_table, 7, renameTableFunc),
+ INTERNAL_FUNCTION(sqlite_rename_test, 5, renameTableTest),
+ };
+ sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs));
+}
+#endif /* SQLITE_ALTER_TABLE */
+
+/************** End of alter.c ***********************************************/
+/************** Begin file analyze.c *****************************************/
+/*
+** 2005-07-08
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code associated with the ANALYZE command.
+**
+** The ANALYZE command gather statistics about the content of tables
+** and indices. These statistics are made available to the query planner
+** to help it make better decisions about how to perform queries.
+**
+** The following system tables are or have been supported:
+**
+** CREATE TABLE sqlite_stat1(tbl, idx, stat);
+** CREATE TABLE sqlite_stat2(tbl, idx, sampleno, sample);
+** CREATE TABLE sqlite_stat3(tbl, idx, nEq, nLt, nDLt, sample);
+** CREATE TABLE sqlite_stat4(tbl, idx, nEq, nLt, nDLt, sample);
+**
+** Additional tables might be added in future releases of SQLite.
+** The sqlite_stat2 table is not created or used unless the SQLite version
+** is between 3.6.18 and 3.7.8, inclusive, and unless SQLite is compiled
+** with SQLITE_ENABLE_STAT2. The sqlite_stat2 table is deprecated.
+** The sqlite_stat2 table is superseded by sqlite_stat3, which is only
+** created and used by SQLite versions 3.7.9 through 3.29.0 when
+** SQLITE_ENABLE_STAT3 defined. The functionality of sqlite_stat3
+** is a superset of sqlite_stat2 and is also now deprecated. The
+** sqlite_stat4 is an enhanced version of sqlite_stat3 and is only
+** available when compiled with SQLITE_ENABLE_STAT4 and in SQLite
+** versions 3.8.1 and later. STAT4 is the only variant that is still
+** supported.
+**
+** For most applications, sqlite_stat1 provides all the statistics required
+** for the query planner to make good choices.
+**
+** Format of sqlite_stat1:
+**
+** There is normally one row per index, with the index identified by the
+** name in the idx column. The tbl column is the name of the table to
+** which the index belongs. In each such row, the stat column will be
+** a string consisting of a list of integers. The first integer in this
+** list is the number of rows in the index. (This is the same as the
+** number of rows in the table, except for partial indices.) The second
+** integer is the average number of rows in the index that have the same
+** value in the first column of the index. The third integer is the average
+** number of rows in the index that have the same value for the first two
+** columns. The N-th integer (for N>1) is the average number of rows in
+** the index which have the same value for the first N-1 columns. For
+** a K-column index, there will be K+1 integers in the stat column. If
+** the index is unique, then the last integer will be 1.
+**
+** The list of integers in the stat column can optionally be followed
+** by the keyword "unordered". The "unordered" keyword, if it is present,
+** must be separated from the last integer by a single space. If the
+** "unordered" keyword is present, then the query planner assumes that
+** the index is unordered and will not use the index for a range query.
+**
+** If the sqlite_stat1.idx column is NULL, then the sqlite_stat1.stat
+** column contains a single integer which is the (estimated) number of
+** rows in the table identified by sqlite_stat1.tbl.
+**
+** Format of sqlite_stat2:
+**
+** The sqlite_stat2 is only created and is only used if SQLite is compiled
+** with SQLITE_ENABLE_STAT2 and if the SQLite version number is between
+** 3.6.18 and 3.7.8. The "stat2" table contains additional information
+** about the distribution of keys within an index. The index is identified by
+** the "idx" column and the "tbl" column is the name of the table to which
+** the index belongs. There are usually 10 rows in the sqlite_stat2
+** table for each index.
+**
+** The sqlite_stat2 entries for an index that have sampleno between 0 and 9
+** inclusive are samples of the left-most key value in the index taken at
+** evenly spaced points along the index. Let the number of samples be S
+** (10 in the standard build) and let C be the number of rows in the index.
+** Then the sampled rows are given by:
+**
+** rownumber = (i*C*2 + C)/(S*2)
+**
+** For i between 0 and S-1. Conceptually, the index space is divided into
+** S uniform buckets and the samples are the middle row from each bucket.
+**
+** The format for sqlite_stat2 is recorded here for legacy reference. This
+** version of SQLite does not support sqlite_stat2. It neither reads nor
+** writes the sqlite_stat2 table. This version of SQLite only supports
+** sqlite_stat3.
+**
+** Format for sqlite_stat3:
+**
+** The sqlite_stat3 format is a subset of sqlite_stat4. Hence, the
+** sqlite_stat4 format will be described first. Further information
+** about sqlite_stat3 follows the sqlite_stat4 description.
+**
+** Format for sqlite_stat4:
+**
+** As with sqlite_stat2, the sqlite_stat4 table contains histogram data
+** to aid the query planner in choosing good indices based on the values
+** that indexed columns are compared against in the WHERE clauses of
+** queries.
+**
+** The sqlite_stat4 table contains multiple entries for each index.
+** The idx column names the index and the tbl column is the table of the
+** index. If the idx and tbl columns are the same, then the sample is
+** of the INTEGER PRIMARY KEY. The sample column is a blob which is the
+** binary encoding of a key from the index. The nEq column is a
+** list of integers. The first integer is the approximate number
+** of entries in the index whose left-most column exactly matches
+** the left-most column of the sample. The second integer in nEq
+** is the approximate number of entries in the index where the
+** first two columns match the first two columns of the sample.
+** And so forth. nLt is another list of integers that show the approximate
+** number of entries that are strictly less than the sample. The first
+** integer in nLt contains the number of entries in the index where the
+** left-most column is less than the left-most column of the sample.
+** The K-th integer in the nLt entry is the number of index entries
+** where the first K columns are less than the first K columns of the
+** sample. The nDLt column is like nLt except that it contains the
+** number of distinct entries in the index that are less than the
+** sample.
+**
+** There can be an arbitrary number of sqlite_stat4 entries per index.
+** The ANALYZE command will typically generate sqlite_stat4 tables
+** that contain between 10 and 40 samples which are distributed across
+** the key space, though not uniformly, and which include samples with
+** large nEq values.
+**
+** Format for sqlite_stat3 redux:
+**
+** The sqlite_stat3 table is like sqlite_stat4 except that it only
+** looks at the left-most column of the index. The sqlite_stat3.sample
+** column contains the actual value of the left-most column instead
+** of a blob encoding of the complete index key as is found in
+** sqlite_stat4.sample. The nEq, nLt, and nDLt entries of sqlite_stat3
+** all contain just a single integer which is the same as the first
+** integer in the equivalent columns in sqlite_stat4.
+*/
+#ifndef SQLITE_OMIT_ANALYZE
+/* #include "sqliteInt.h" */
+
+#if defined(SQLITE_ENABLE_STAT4)
+# define IsStat4 1
+#else
+# define IsStat4 0
+# undef SQLITE_STAT4_SAMPLES
+# define SQLITE_STAT4_SAMPLES 1
+#endif
+
+/*
+** This routine generates code that opens the sqlite_statN tables.
+** The sqlite_stat1 table is always relevant. sqlite_stat2 is now
+** obsolete. sqlite_stat3 and sqlite_stat4 are only opened when
+** appropriate compile-time options are provided.
+**
+** If the sqlite_statN tables do not previously exist, it is created.
+**
+** Argument zWhere may be a pointer to a buffer containing a table name,
+** or it may be a NULL pointer. If it is not NULL, then all entries in
+** the sqlite_statN tables associated with the named table are deleted.
+** If zWhere==0, then code is generated to delete all stat table entries.
+*/
+static void openStatTable(
+ Parse *pParse, /* Parsing context */
+ int iDb, /* The database we are looking in */
+ int iStatCur, /* Open the sqlite_stat1 table on this cursor */
+ const char *zWhere, /* Delete entries for this table or index */
+ const char *zWhereType /* Either "tbl" or "idx" */
+){
+ static const struct {
+ const char *zName;
+ const char *zCols;
+ } aTable[] = {
+ { "sqlite_stat1", "tbl,idx,stat" },
+#if defined(SQLITE_ENABLE_STAT4)
+ { "sqlite_stat4", "tbl,idx,neq,nlt,ndlt,sample" },
+#else
+ { "sqlite_stat4", 0 },
+#endif
+ { "sqlite_stat3", 0 },
+ };
+ int i;
+ sqlite3 *db = pParse->db;
+ Db *pDb;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ u32 aRoot[ArraySize(aTable)];
+ u8 aCreateTbl[ArraySize(aTable)];
+#ifdef SQLITE_ENABLE_STAT4
+ const int nToOpen = OptimizationEnabled(db,SQLITE_Stat4) ? 2 : 1;
+#else
+ const int nToOpen = 1;
+#endif
+
+ if( v==0 ) return;
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ assert( sqlite3VdbeDb(v)==db );
+ pDb = &db->aDb[iDb];
+
+ /* Create new statistic tables if they do not exist, or clear them
+ ** if they do already exist.
+ */
+ for(i=0; i<ArraySize(aTable); i++){
+ const char *zTab = aTable[i].zName;
+ Table *pStat;
+ aCreateTbl[i] = 0;
+ if( (pStat = sqlite3FindTable(db, zTab, pDb->zDbSName))==0 ){
+ if( i<nToOpen ){
+ /* The sqlite_statN table does not exist. Create it. Note that a
+ ** side-effect of the CREATE TABLE statement is to leave the rootpage
+ ** of the new table in register pParse->regRoot. This is important
+ ** because the OpenWrite opcode below will be needing it. */
+ sqlite3NestedParse(pParse,
+ "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols
+ );
+ aRoot[i] = (u32)pParse->regRoot;
+ aCreateTbl[i] = OPFLAG_P2ISREG;
+ }
+ }else{
+ /* The table already exists. If zWhere is not NULL, delete all entries
+ ** associated with the table zWhere. If zWhere is NULL, delete the
+ ** entire contents of the table. */
+ aRoot[i] = pStat->tnum;
+ sqlite3TableLock(pParse, iDb, aRoot[i], 1, zTab);
+ if( zWhere ){
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q.%s WHERE %s=%Q",
+ pDb->zDbSName, zTab, zWhereType, zWhere
+ );
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ }else if( db->xPreUpdateCallback ){
+ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s", pDb->zDbSName, zTab);
+#endif
+ }else{
+ /* The sqlite_stat[134] table already exists. Delete all rows. */
+ sqlite3VdbeAddOp2(v, OP_Clear, (int)aRoot[i], iDb);
+ }
+ }
+ }
+
+ /* Open the sqlite_stat[134] tables for writing. */
+ for(i=0; i<nToOpen; i++){
+ assert( i<ArraySize(aTable) );
+ sqlite3VdbeAddOp4Int(v, OP_OpenWrite, iStatCur+i, (int)aRoot[i], iDb, 3);
+ sqlite3VdbeChangeP5(v, aCreateTbl[i]);
+ VdbeComment((v, aTable[i].zName));
+ }
+}
+
+/*
+** Recommended number of samples for sqlite_stat4
+*/
+#ifndef SQLITE_STAT4_SAMPLES
+# define SQLITE_STAT4_SAMPLES 24
+#endif
+
+/*
+** Three SQL functions - stat_init(), stat_push(), and stat_get() -
+** share an instance of the following structure to hold their state
+** information.
+*/
+typedef struct StatAccum StatAccum;
+typedef struct StatSample StatSample;
+struct StatSample {
+ tRowcnt *anEq; /* sqlite_stat4.nEq */
+ tRowcnt *anDLt; /* sqlite_stat4.nDLt */
+#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt *anLt; /* sqlite_stat4.nLt */
+ union {
+ i64 iRowid; /* Rowid in main table of the key */
+ u8 *aRowid; /* Key for WITHOUT ROWID tables */
+ } u;
+ u32 nRowid; /* Sizeof aRowid[] */
+ u8 isPSample; /* True if a periodic sample */
+ int iCol; /* If !isPSample, the reason for inclusion */
+ u32 iHash; /* Tiebreaker hash */
+#endif
+};
+struct StatAccum {
+ sqlite3 *db; /* Database connection, for malloc() */
+ tRowcnt nEst; /* Estimated number of rows */
+ tRowcnt nRow; /* Number of rows visited so far */
+ int nLimit; /* Analysis row-scan limit */
+ int nCol; /* Number of columns in index + pk/rowid */
+ int nKeyCol; /* Number of index columns w/o the pk/rowid */
+ u8 nSkipAhead; /* Number of times of skip-ahead */
+ StatSample current; /* Current row as a StatSample */
+#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt nPSample; /* How often to do a periodic sample */
+ int mxSample; /* Maximum number of samples to accumulate */
+ u32 iPrn; /* Pseudo-random number used for sampling */
+ StatSample *aBest; /* Array of nCol best samples */
+ int iMin; /* Index in a[] of entry with minimum score */
+ int nSample; /* Current number of samples */
+ int nMaxEqZero; /* Max leading 0 in anEq[] for any a[] entry */
+ int iGet; /* Index of current sample accessed by stat_get() */
+ StatSample *a; /* Array of mxSample StatSample objects */
+#endif
+};
+
+/* Reclaim memory used by a StatSample
+*/
+#ifdef SQLITE_ENABLE_STAT4
+static void sampleClear(sqlite3 *db, StatSample *p){
+ assert( db!=0 );
+ if( p->nRowid ){
+ sqlite3DbFree(db, p->u.aRowid);
+ p->nRowid = 0;
+ }
+}
+#endif
+
+/* Initialize the BLOB value of a ROWID
+*/
+#ifdef SQLITE_ENABLE_STAT4
+static void sampleSetRowid(sqlite3 *db, StatSample *p, int n, const u8 *pData){
+ assert( db!=0 );
+ if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid);
+ p->u.aRowid = sqlite3DbMallocRawNN(db, n);
+ if( p->u.aRowid ){
+ p->nRowid = n;
+ memcpy(p->u.aRowid, pData, n);
+ }else{
+ p->nRowid = 0;
+ }
+}
+#endif
+
+/* Initialize the INTEGER value of a ROWID.
+*/
+#ifdef SQLITE_ENABLE_STAT4
+static void sampleSetRowidInt64(sqlite3 *db, StatSample *p, i64 iRowid){
+ assert( db!=0 );
+ if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid);
+ p->nRowid = 0;
+ p->u.iRowid = iRowid;
+}
+#endif
+
+
+/*
+** Copy the contents of object (*pFrom) into (*pTo).
+*/
+#ifdef SQLITE_ENABLE_STAT4
+static void sampleCopy(StatAccum *p, StatSample *pTo, StatSample *pFrom){
+ pTo->isPSample = pFrom->isPSample;
+ pTo->iCol = pFrom->iCol;
+ pTo->iHash = pFrom->iHash;
+ memcpy(pTo->anEq, pFrom->anEq, sizeof(tRowcnt)*p->nCol);
+ memcpy(pTo->anLt, pFrom->anLt, sizeof(tRowcnt)*p->nCol);
+ memcpy(pTo->anDLt, pFrom->anDLt, sizeof(tRowcnt)*p->nCol);
+ if( pFrom->nRowid ){
+ sampleSetRowid(p->db, pTo, pFrom->nRowid, pFrom->u.aRowid);
+ }else{
+ sampleSetRowidInt64(p->db, pTo, pFrom->u.iRowid);
+ }
+}
+#endif
+
+/*
+** Reclaim all memory of a StatAccum structure.
+*/
+static void statAccumDestructor(void *pOld){
+ StatAccum *p = (StatAccum*)pOld;
+#ifdef SQLITE_ENABLE_STAT4
+ if( p->mxSample ){
+ int i;
+ for(i=0; i<p->nCol; i++) sampleClear(p->db, p->aBest+i);
+ for(i=0; i<p->mxSample; i++) sampleClear(p->db, p->a+i);
+ sampleClear(p->db, &p->current);
+ }
+#endif
+ sqlite3DbFree(p->db, p);
+}
+
+/*
+** Implementation of the stat_init(N,K,C,L) SQL function. The four parameters
+** are:
+** N: The number of columns in the index including the rowid/pk (note 1)
+** K: The number of columns in the index excluding the rowid/pk.
+** C: Estimated number of rows in the index
+** L: A limit on the number of rows to scan, or 0 for no-limit
+**
+** Note 1: In the special case of the covering index that implements a
+** WITHOUT ROWID table, N is the number of PRIMARY KEY columns, not the
+** total number of columns in the table.
+**
+** For indexes on ordinary rowid tables, N==K+1. But for indexes on
+** WITHOUT ROWID tables, N=K+P where P is the number of columns in the
+** PRIMARY KEY of the table. The covering index that implements the
+** original WITHOUT ROWID table as N==K as a special case.
+**
+** This routine allocates the StatAccum object in heap memory. The return
+** value is a pointer to the StatAccum object. The datatype of the
+** return value is BLOB, but it is really just a pointer to the StatAccum
+** object.
+*/
+static void statInit(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ StatAccum *p;
+ int nCol; /* Number of columns in index being sampled */
+ int nKeyCol; /* Number of key columns */
+ int nColUp; /* nCol rounded up for alignment */
+ int n; /* Bytes of space to allocate */
+ sqlite3 *db = sqlite3_context_db_handle(context); /* Database connection */
+#ifdef SQLITE_ENABLE_STAT4
+ /* Maximum number of samples. 0 if STAT4 data is not collected */
+ int mxSample = OptimizationEnabled(db,SQLITE_Stat4) ?SQLITE_STAT4_SAMPLES :0;
+#endif
+
+ /* Decode the three function arguments */
+ UNUSED_PARAMETER(argc);
+ nCol = sqlite3_value_int(argv[0]);
+ assert( nCol>0 );
+ nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol;
+ nKeyCol = sqlite3_value_int(argv[1]);
+ assert( nKeyCol<=nCol );
+ assert( nKeyCol>0 );
+
+ /* Allocate the space required for the StatAccum object */
+ n = sizeof(*p)
+ + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */
+ + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
+#ifdef SQLITE_ENABLE_STAT4
+ if( mxSample ){
+ n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */
+ + sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */
+ + sizeof(tRowcnt)*3*nColUp*(nCol+mxSample);
+ }
+#endif
+ db = sqlite3_context_db_handle(context);
+ p = sqlite3DbMallocZero(db, n);
+ if( p==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+
+ p->db = db;
+ p->nEst = sqlite3_value_int64(argv[2]);
+ p->nRow = 0;
+ p->nLimit = sqlite3_value_int64(argv[3]);
+ p->nCol = nCol;
+ p->nKeyCol = nKeyCol;
+ p->nSkipAhead = 0;
+ p->current.anDLt = (tRowcnt*)&p[1];
+ p->current.anEq = &p->current.anDLt[nColUp];
+
+#ifdef SQLITE_ENABLE_STAT4
+ p->mxSample = p->nLimit==0 ? mxSample : 0;
+ if( mxSample ){
+ u8 *pSpace; /* Allocated space not yet assigned */
+ int i; /* Used to iterate through p->aSample[] */
+
+ p->iGet = -1;
+ p->nPSample = (tRowcnt)(p->nEst/(mxSample/3+1) + 1);
+ p->current.anLt = &p->current.anEq[nColUp];
+ p->iPrn = 0x689e962d*(u32)nCol ^ 0xd0944565*(u32)sqlite3_value_int(argv[2]);
+
+ /* Set up the StatAccum.a[] and aBest[] arrays */
+ p->a = (struct StatSample*)&p->current.anLt[nColUp];
+ p->aBest = &p->a[mxSample];
+ pSpace = (u8*)(&p->a[mxSample+nCol]);
+ for(i=0; i<(mxSample+nCol); i++){
+ p->a[i].anEq = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
+ p->a[i].anLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
+ p->a[i].anDLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
+ }
+ assert( (pSpace - (u8*)p)==n );
+
+ for(i=0; i<nCol; i++){
+ p->aBest[i].iCol = i;
+ }
+ }
+#endif
+
+ /* Return a pointer to the allocated object to the caller. Note that
+ ** only the pointer (the 2nd parameter) matters. The size of the object
+ ** (given by the 3rd parameter) is never used and can be any positive
+ ** value. */
+ sqlite3_result_blob(context, p, sizeof(*p), statAccumDestructor);
+}
+static const FuncDef statInitFuncdef = {
+ 4, /* nArg */
+ SQLITE_UTF8, /* funcFlags */
+ 0, /* pUserData */
+ 0, /* pNext */
+ statInit, /* xSFunc */
+ 0, /* xFinalize */
+ 0, 0, /* xValue, xInverse */
+ "stat_init", /* zName */
+ {0}
+};
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** pNew and pOld are both candidate non-periodic samples selected for
+** the same column (pNew->iCol==pOld->iCol). Ignoring this column and
+** considering only any trailing columns and the sample hash value, this
+** function returns true if sample pNew is to be preferred over pOld.
+** In other words, if we assume that the cardinalities of the selected
+** column for pNew and pOld are equal, is pNew to be preferred over pOld.
+**
+** This function assumes that for each argument sample, the contents of
+** the anEq[] array from pSample->anEq[pSample->iCol+1] onwards are valid.
+*/
+static int sampleIsBetterPost(
+ StatAccum *pAccum,
+ StatSample *pNew,
+ StatSample *pOld
+){
+ int nCol = pAccum->nCol;
+ int i;
+ assert( pNew->iCol==pOld->iCol );
+ for(i=pNew->iCol+1; i<nCol; i++){
+ if( pNew->anEq[i]>pOld->anEq[i] ) return 1;
+ if( pNew->anEq[i]<pOld->anEq[i] ) return 0;
+ }
+ if( pNew->iHash>pOld->iHash ) return 1;
+ return 0;
+}
+#endif
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Return true if pNew is to be preferred over pOld.
+**
+** This function assumes that for each argument sample, the contents of
+** the anEq[] array from pSample->anEq[pSample->iCol] onwards are valid.
+*/
+static int sampleIsBetter(
+ StatAccum *pAccum,
+ StatSample *pNew,
+ StatSample *pOld
+){
+ tRowcnt nEqNew = pNew->anEq[pNew->iCol];
+ tRowcnt nEqOld = pOld->anEq[pOld->iCol];
+
+ assert( pOld->isPSample==0 && pNew->isPSample==0 );
+ assert( IsStat4 || (pNew->iCol==0 && pOld->iCol==0) );
+
+ if( (nEqNew>nEqOld) ) return 1;
+ if( nEqNew==nEqOld ){
+ if( pNew->iCol<pOld->iCol ) return 1;
+ return (pNew->iCol==pOld->iCol && sampleIsBetterPost(pAccum, pNew, pOld));
+ }
+ return 0;
+}
+
+/*
+** Copy the contents of sample *pNew into the p->a[] array. If necessary,
+** remove the least desirable sample from p->a[] to make room.
+*/
+static void sampleInsert(StatAccum *p, StatSample *pNew, int nEqZero){
+ StatSample *pSample = 0;
+ int i;
+
+ assert( IsStat4 || nEqZero==0 );
+
+ /* StatAccum.nMaxEqZero is set to the maximum number of leading 0
+ ** values in the anEq[] array of any sample in StatAccum.a[]. In
+ ** other words, if nMaxEqZero is n, then it is guaranteed that there
+ ** are no samples with StatSample.anEq[m]==0 for (m>=n). */
+ if( nEqZero>p->nMaxEqZero ){
+ p->nMaxEqZero = nEqZero;
+ }
+ if( pNew->isPSample==0 ){
+ StatSample *pUpgrade = 0;
+ assert( pNew->anEq[pNew->iCol]>0 );
+
+ /* This sample is being added because the prefix that ends in column
+ ** iCol occurs many times in the table. However, if we have already
+ ** added a sample that shares this prefix, there is no need to add
+ ** this one. Instead, upgrade the priority of the highest priority
+ ** existing sample that shares this prefix. */
+ for(i=p->nSample-1; i>=0; i--){
+ StatSample *pOld = &p->a[i];
+ if( pOld->anEq[pNew->iCol]==0 ){
+ if( pOld->isPSample ) return;
+ assert( pOld->iCol>pNew->iCol );
+ assert( sampleIsBetter(p, pNew, pOld) );
+ if( pUpgrade==0 || sampleIsBetter(p, pOld, pUpgrade) ){
+ pUpgrade = pOld;
+ }
+ }
+ }
+ if( pUpgrade ){
+ pUpgrade->iCol = pNew->iCol;
+ pUpgrade->anEq[pUpgrade->iCol] = pNew->anEq[pUpgrade->iCol];
+ goto find_new_min;
+ }
+ }
+
+ /* If necessary, remove sample iMin to make room for the new sample. */
+ if( p->nSample>=p->mxSample ){
+ StatSample *pMin = &p->a[p->iMin];
+ tRowcnt *anEq = pMin->anEq;
+ tRowcnt *anLt = pMin->anLt;
+ tRowcnt *anDLt = pMin->anDLt;
+ sampleClear(p->db, pMin);
+ memmove(pMin, &pMin[1], sizeof(p->a[0])*(p->nSample-p->iMin-1));
+ pSample = &p->a[p->nSample-1];
+ pSample->nRowid = 0;
+ pSample->anEq = anEq;
+ pSample->anDLt = anDLt;
+ pSample->anLt = anLt;
+ p->nSample = p->mxSample-1;
+ }
+
+ /* The "rows less-than" for the rowid column must be greater than that
+ ** for the last sample in the p->a[] array. Otherwise, the samples would
+ ** be out of order. */
+ assert( p->nSample==0
+ || pNew->anLt[p->nCol-1] > p->a[p->nSample-1].anLt[p->nCol-1] );
+
+ /* Insert the new sample */
+ pSample = &p->a[p->nSample];
+ sampleCopy(p, pSample, pNew);
+ p->nSample++;
+
+ /* Zero the first nEqZero entries in the anEq[] array. */
+ memset(pSample->anEq, 0, sizeof(tRowcnt)*nEqZero);
+
+find_new_min:
+ if( p->nSample>=p->mxSample ){
+ int iMin = -1;
+ for(i=0; i<p->mxSample; i++){
+ if( p->a[i].isPSample ) continue;
+ if( iMin<0 || sampleIsBetter(p, &p->a[iMin], &p->a[i]) ){
+ iMin = i;
+ }
+ }
+ assert( iMin>=0 );
+ p->iMin = iMin;
+ }
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Field iChng of the index being scanned has changed. So at this point
+** p->current contains a sample that reflects the previous row of the
+** index. The value of anEq[iChng] and subsequent anEq[] elements are
+** correct at this point.
+*/
+static void samplePushPrevious(StatAccum *p, int iChng){
+ int i;
+
+ /* Check if any samples from the aBest[] array should be pushed
+ ** into IndexSample.a[] at this point. */
+ for(i=(p->nCol-2); i>=iChng; i--){
+ StatSample *pBest = &p->aBest[i];
+ pBest->anEq[i] = p->current.anEq[i];
+ if( p->nSample<p->mxSample || sampleIsBetter(p, pBest, &p->a[p->iMin]) ){
+ sampleInsert(p, pBest, i);
+ }
+ }
+
+ /* Check that no sample contains an anEq[] entry with an index of
+ ** p->nMaxEqZero or greater set to zero. */
+ for(i=p->nSample-1; i>=0; i--){
+ int j;
+ for(j=p->nMaxEqZero; j<p->nCol; j++) assert( p->a[i].anEq[j]>0 );
+ }
+
+ /* Update the anEq[] fields of any samples already collected. */
+ if( iChng<p->nMaxEqZero ){
+ for(i=p->nSample-1; i>=0; i--){
+ int j;
+ for(j=iChng; j<p->nCol; j++){
+ if( p->a[i].anEq[j]==0 ) p->a[i].anEq[j] = p->current.anEq[j];
+ }
+ }
+ p->nMaxEqZero = iChng;
+ }
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+/*
+** Implementation of the stat_push SQL function: stat_push(P,C,R)
+** Arguments:
+**
+** P Pointer to the StatAccum object created by stat_init()
+** C Index of left-most column to differ from previous row
+** R Rowid for the current row. Might be a key record for
+** WITHOUT ROWID tables.
+**
+** The purpose of this routine is to collect statistical data and/or
+** samples from the index being analyzed into the StatAccum object.
+** The stat_get() SQL function will be used afterwards to
+** retrieve the information gathered.
+**
+** This SQL function usually returns NULL, but might return an integer
+** if it wants the byte-code to do special processing.
+**
+** The R parameter is only used for STAT4
+*/
+static void statPush(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int i;
+
+ /* The three function arguments */
+ StatAccum *p = (StatAccum*)sqlite3_value_blob(argv[0]);
+ int iChng = sqlite3_value_int(argv[1]);
+
+ UNUSED_PARAMETER( argc );
+ UNUSED_PARAMETER( context );
+ assert( p->nCol>0 );
+ assert( iChng<p->nCol );
+
+ if( p->nRow==0 ){
+ /* This is the first call to this function. Do initialization. */
+ for(i=0; i<p->nCol; i++) p->current.anEq[i] = 1;
+ }else{
+ /* Second and subsequent calls get processed here */
+#ifdef SQLITE_ENABLE_STAT4
+ if( p->mxSample ) samplePushPrevious(p, iChng);
+#endif
+
+ /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply
+ ** to the current row of the index. */
+ for(i=0; i<iChng; i++){
+ p->current.anEq[i]++;
+ }
+ for(i=iChng; i<p->nCol; i++){
+ p->current.anDLt[i]++;
+#ifdef SQLITE_ENABLE_STAT4
+ if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i];
+#endif
+ p->current.anEq[i] = 1;
+ }
+ }
+
+ p->nRow++;
+#ifdef SQLITE_ENABLE_STAT4
+ if( p->mxSample ){
+ tRowcnt nLt;
+ if( sqlite3_value_type(argv[2])==SQLITE_INTEGER ){
+ sampleSetRowidInt64(p->db, &p->current, sqlite3_value_int64(argv[2]));
+ }else{
+ sampleSetRowid(p->db, &p->current, sqlite3_value_bytes(argv[2]),
+ sqlite3_value_blob(argv[2]));
+ }
+ p->current.iHash = p->iPrn = p->iPrn*1103515245 + 12345;
+
+ nLt = p->current.anLt[p->nCol-1];
+ /* Check if this is to be a periodic sample. If so, add it. */
+ if( (nLt/p->nPSample)!=(nLt+1)/p->nPSample ){
+ p->current.isPSample = 1;
+ p->current.iCol = 0;
+ sampleInsert(p, &p->current, p->nCol-1);
+ p->current.isPSample = 0;
+ }
+
+ /* Update the aBest[] array. */
+ for(i=0; i<(p->nCol-1); i++){
+ p->current.iCol = i;
+ if( i>=iChng || sampleIsBetterPost(p, &p->current, &p->aBest[i]) ){
+ sampleCopy(p, &p->aBest[i], &p->current);
+ }
+ }
+ }else
+#endif
+ if( p->nLimit && p->nRow>(tRowcnt)p->nLimit*(p->nSkipAhead+1) ){
+ p->nSkipAhead++;
+ sqlite3_result_int(context, p->current.anDLt[0]>0);
+ }
+}
+
+static const FuncDef statPushFuncdef = {
+ 2+IsStat4, /* nArg */
+ SQLITE_UTF8, /* funcFlags */
+ 0, /* pUserData */
+ 0, /* pNext */
+ statPush, /* xSFunc */
+ 0, /* xFinalize */
+ 0, 0, /* xValue, xInverse */
+ "stat_push", /* zName */
+ {0}
+};
+
+#define STAT_GET_STAT1 0 /* "stat" column of stat1 table */
+#define STAT_GET_ROWID 1 /* "rowid" column of stat[34] entry */
+#define STAT_GET_NEQ 2 /* "neq" column of stat[34] entry */
+#define STAT_GET_NLT 3 /* "nlt" column of stat[34] entry */
+#define STAT_GET_NDLT 4 /* "ndlt" column of stat[34] entry */
+
+/*
+** Implementation of the stat_get(P,J) SQL function. This routine is
+** used to query statistical information that has been gathered into
+** the StatAccum object by prior calls to stat_push(). The P parameter
+** has type BLOB but it is really just a pointer to the StatAccum object.
+** The content to returned is determined by the parameter J
+** which is one of the STAT_GET_xxxx values defined above.
+**
+** The stat_get(P,J) function is not available to generic SQL. It is
+** inserted as part of a manually constructed bytecode program. (See
+** the callStatGet() routine below.) It is guaranteed that the P
+** parameter will always be a pointer to a StatAccum object, never a
+** NULL.
+**
+** If STAT4 is not enabled, then J is always
+** STAT_GET_STAT1 and is hence omitted and this routine becomes
+** a one-parameter function, stat_get(P), that always returns the
+** stat1 table entry information.
+*/
+static void statGet(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ StatAccum *p = (StatAccum*)sqlite3_value_blob(argv[0]);
+#ifdef SQLITE_ENABLE_STAT4
+ /* STAT4 has a parameter on this routine. */
+ int eCall = sqlite3_value_int(argv[1]);
+ assert( argc==2 );
+ assert( eCall==STAT_GET_STAT1 || eCall==STAT_GET_NEQ
+ || eCall==STAT_GET_ROWID || eCall==STAT_GET_NLT
+ || eCall==STAT_GET_NDLT
+ );
+ assert( eCall==STAT_GET_STAT1 || p->mxSample );
+ if( eCall==STAT_GET_STAT1 )
+#else
+ assert( argc==1 );
+#endif
+ {
+ /* Return the value to store in the "stat" column of the sqlite_stat1
+ ** table for this index.
+ **
+ ** The value is a string composed of a list of integers describing
+ ** the index. The first integer in the list is the total number of
+ ** entries in the index. There is one additional integer in the list
+ ** for each indexed column. This additional integer is an estimate of
+ ** the number of rows matched by a equality query on the index using
+ ** a key with the corresponding number of fields. In other words,
+ ** if the index is on columns (a,b) and the sqlite_stat1 value is
+ ** "100 10 2", then SQLite estimates that:
+ **
+ ** * the index contains 100 rows,
+ ** * "WHERE a=?" matches 10 rows, and
+ ** * "WHERE a=? AND b=?" matches 2 rows.
+ **
+ ** If D is the count of distinct values and K is the total number of
+ ** rows, then each estimate is computed as:
+ **
+ ** I = (K+D-1)/D
+ */
+ char *z;
+ int i;
+
+ char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 );
+ if( zRet==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+
+ sqlite3_snprintf(24, zRet, "%llu",
+ p->nSkipAhead ? (u64)p->nEst : (u64)p->nRow);
+ z = zRet + sqlite3Strlen30(zRet);
+ for(i=0; i<p->nKeyCol; i++){
+ u64 nDistinct = p->current.anDLt[i] + 1;
+ u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
+ sqlite3_snprintf(24, z, " %llu", iVal);
+ z += sqlite3Strlen30(z);
+ assert( p->current.anEq[i] );
+ }
+ assert( z[0]=='\0' && z>zRet );
+
+ sqlite3_result_text(context, zRet, -1, sqlite3_free);
+ }
+#ifdef SQLITE_ENABLE_STAT4
+ else if( eCall==STAT_GET_ROWID ){
+ if( p->iGet<0 ){
+ samplePushPrevious(p, 0);
+ p->iGet = 0;
+ }
+ if( p->iGet<p->nSample ){
+ StatSample *pS = p->a + p->iGet;
+ if( pS->nRowid==0 ){
+ sqlite3_result_int64(context, pS->u.iRowid);
+ }else{
+ sqlite3_result_blob(context, pS->u.aRowid, pS->nRowid,
+ SQLITE_TRANSIENT);
+ }
+ }
+ }else{
+ tRowcnt *aCnt = 0;
+
+ assert( p->iGet<p->nSample );
+ switch( eCall ){
+ case STAT_GET_NEQ: aCnt = p->a[p->iGet].anEq; break;
+ case STAT_GET_NLT: aCnt = p->a[p->iGet].anLt; break;
+ default: {
+ aCnt = p->a[p->iGet].anDLt;
+ p->iGet++;
+ break;
+ }
+ }
+
+ {
+ char *zRet = sqlite3MallocZero(p->nCol * 25);
+ if( zRet==0 ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ int i;
+ char *z = zRet;
+ for(i=0; i<p->nCol; i++){
+ sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]);
+ z += sqlite3Strlen30(z);
+ }
+ assert( z[0]=='\0' && z>zRet );
+ z[-1] = '\0';
+ sqlite3_result_text(context, zRet, -1, sqlite3_free);
+ }
+ }
+ }
+#endif /* SQLITE_ENABLE_STAT4 */
+#ifndef SQLITE_DEBUG
+ UNUSED_PARAMETER( argc );
+#endif
+}
+static const FuncDef statGetFuncdef = {
+ 1+IsStat4, /* nArg */
+ SQLITE_UTF8, /* funcFlags */
+ 0, /* pUserData */
+ 0, /* pNext */
+ statGet, /* xSFunc */
+ 0, /* xFinalize */
+ 0, 0, /* xValue, xInverse */
+ "stat_get", /* zName */
+ {0}
+};
+
+static void callStatGet(Parse *pParse, int regStat, int iParam, int regOut){
+#ifdef SQLITE_ENABLE_STAT4
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_Integer, iParam, regStat+1);
+#elif SQLITE_DEBUG
+ assert( iParam==STAT_GET_STAT1 );
+#else
+ UNUSED_PARAMETER( iParam );
+#endif
+ assert( regOut!=regStat && regOut!=regStat+1 );
+ sqlite3VdbeAddFunctionCall(pParse, 0, regStat, regOut, 1+IsStat4,
+ &statGetFuncdef, 0);
+}
+
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+/* Add a comment to the most recent VDBE opcode that is the name
+** of the k-th column of the pIdx index.
+*/
+static void analyzeVdbeCommentIndexWithColumnName(
+ Vdbe *v, /* Prepared statement under construction */
+ Index *pIdx, /* Index whose column is being loaded */
+ int k /* Which column index */
+){
+ int i; /* Index of column in the table */
+ assert( k>=0 && k<pIdx->nColumn );
+ i = pIdx->aiColumn[k];
+ if( NEVER(i==XN_ROWID) ){
+ VdbeComment((v,"%s.rowid",pIdx->zName));
+ }else if( i==XN_EXPR ){
+ VdbeComment((v,"%s.expr(%d)",pIdx->zName, k));
+ }else{
+ VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zName));
+ }
+}
+#else
+# define analyzeVdbeCommentIndexWithColumnName(a,b,c)
+#endif /* SQLITE_DEBUG */
+
+/*
+** Generate code to do an analysis of all indices associated with
+** a single table.
+*/
+static void analyzeOneTable(
+ Parse *pParse, /* Parser context */
+ Table *pTab, /* Table whose indices are to be analyzed */
+ Index *pOnlyIdx, /* If not NULL, only analyze this one index */
+ int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */
+ int iMem, /* Available memory locations begin here */
+ int iTab /* Next available cursor */
+){
+ sqlite3 *db = pParse->db; /* Database handle */
+ Index *pIdx; /* An index to being analyzed */
+ int iIdxCur; /* Cursor open on index being analyzed */
+ int iTabCur; /* Table cursor */
+ Vdbe *v; /* The virtual machine being built up */
+ int i; /* Loop counter */
+ int jZeroRows = -1; /* Jump from here if number of rows is zero */
+ int iDb; /* Index of database containing pTab */
+ u8 needTableCnt = 1; /* True to count the table */
+ int regNewRowid = iMem++; /* Rowid for the inserted record */
+ int regStat = iMem++; /* Register to hold StatAccum object */
+ int regChng = iMem++; /* Index of changed index field */
+ int regRowid = iMem++; /* Rowid argument passed to stat_push() */
+ int regTemp = iMem++; /* Temporary use register */
+ int regTemp2 = iMem++; /* Second temporary use register */
+ int regTabname = iMem++; /* Register containing table name */
+ int regIdxname = iMem++; /* Register containing index name */
+ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */
+ int regPrev = iMem; /* MUST BE LAST (see below) */
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ Table *pStat1 = 0;
+#endif
+
+ pParse->nMem = MAX(pParse->nMem, iMem);
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 || NEVER(pTab==0) ){
+ return;
+ }
+ if( pTab->tnum==0 ){
+ /* Do not gather statistics on views or virtual tables */
+ return;
+ }
+ if( sqlite3_strlike("sqlite\\_%", pTab->zName, '\\')==0 ){
+ /* Do not gather statistics on system tables */
+ return;
+ }
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iDb>=0 );
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0,
+ db->aDb[iDb].zDbSName ) ){
+ return;
+ }
+#endif
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ if( db->xPreUpdateCallback ){
+ pStat1 = (Table*)sqlite3DbMallocZero(db, sizeof(Table) + 13);
+ if( pStat1==0 ) return;
+ pStat1->zName = (char*)&pStat1[1];
+ memcpy(pStat1->zName, "sqlite_stat1", 13);
+ pStat1->nCol = 3;
+ pStat1->iPKey = -1;
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_Noop, 0, 0, 0,(char*)pStat1,P4_DYNBLOB);
+ }
+#endif
+
+ /* Establish a read-lock on the table at the shared-cache level.
+ ** Open a read-only cursor on the table. Also allocate a cursor number
+ ** to use for scanning indexes (iIdxCur). No index cursor is opened at
+ ** this time though. */
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
+ iTabCur = iTab++;
+ iIdxCur = iTab++;
+ pParse->nTab = MAX(pParse->nTab, iTab);
+ sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead);
+ sqlite3VdbeLoadString(v, regTabname, pTab->zName);
+
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int nCol; /* Number of columns in pIdx. "N" */
+ int addrRewind; /* Address of "OP_Rewind iIdxCur" */
+ int addrNextRow; /* Address of "next_row:" */
+ const char *zIdxName; /* Name of the index */
+ int nColTest; /* Number of columns to test for changes */
+
+ if( pOnlyIdx && pOnlyIdx!=pIdx ) continue;
+ if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0;
+ if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){
+ nCol = pIdx->nKeyCol;
+ zIdxName = pTab->zName;
+ nColTest = nCol - 1;
+ }else{
+ nCol = pIdx->nColumn;
+ zIdxName = pIdx->zName;
+ nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1;
+ }
+
+ /* Populate the register containing the index name. */
+ sqlite3VdbeLoadString(v, regIdxname, zIdxName);
+ VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName));
+
+ /*
+ ** Pseudo-code for loop that calls stat_push():
+ **
+ ** Rewind csr
+ ** if eof(csr) goto end_of_scan;
+ ** regChng = 0
+ ** goto chng_addr_0;
+ **
+ ** next_row:
+ ** regChng = 0
+ ** if( idx(0) != regPrev(0) ) goto chng_addr_0
+ ** regChng = 1
+ ** if( idx(1) != regPrev(1) ) goto chng_addr_1
+ ** ...
+ ** regChng = N
+ ** goto chng_addr_N
+ **
+ ** chng_addr_0:
+ ** regPrev(0) = idx(0)
+ ** chng_addr_1:
+ ** regPrev(1) = idx(1)
+ ** ...
+ **
+ ** endDistinctTest:
+ ** regRowid = idx(rowid)
+ ** stat_push(P, regChng, regRowid)
+ ** Next csr
+ ** if !eof(csr) goto next_row;
+ **
+ ** end_of_scan:
+ */
+
+ /* Make sure there are enough memory cells allocated to accommodate
+ ** the regPrev array and a trailing rowid (the rowid slot is required
+ ** when building a record to insert into the sample column of
+ ** the sqlite_stat4 table. */
+ pParse->nMem = MAX(pParse->nMem, regPrev+nColTest);
+
+ /* Open a read-only cursor on the index being analyzed. */
+ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) );
+ sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ VdbeComment((v, "%s", pIdx->zName));
+
+ /* Invoke the stat_init() function. The arguments are:
+ **
+ ** (1) the number of columns in the index including the rowid
+ ** (or for a WITHOUT ROWID table, the number of PK columns),
+ ** (2) the number of columns in the key without the rowid/pk
+ ** (3) estimated number of rows in the index,
+ */
+ sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat+1);
+ assert( regRowid==regStat+2 );
+ sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regRowid);
+#ifdef SQLITE_ENABLE_STAT4
+ if( OptimizationEnabled(db, SQLITE_Stat4) ){
+ sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regTemp);
+ addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur);
+ VdbeCoverage(v);
+ }else
+#endif
+ {
+ addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, 1);
+ }
+ assert( regTemp2==regStat+4 );
+ sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2);
+ sqlite3VdbeAddFunctionCall(pParse, 0, regStat+1, regStat, 4,
+ &statInitFuncdef, 0);
+
+ /* Implementation of the following:
+ **
+ ** Rewind csr
+ ** if eof(csr) goto end_of_scan;
+ ** regChng = 0
+ ** goto next_push_0;
+ **
+ */
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng);
+ addrNextRow = sqlite3VdbeCurrentAddr(v);
+
+ if( nColTest>0 ){
+ int endDistinctTest = sqlite3VdbeMakeLabel(pParse);
+ int *aGotoChng; /* Array of jump instruction addresses */
+ aGotoChng = sqlite3DbMallocRawNN(db, sizeof(int)*nColTest);
+ if( aGotoChng==0 ) continue;
+
+ /*
+ ** next_row:
+ ** regChng = 0
+ ** if( idx(0) != regPrev(0) ) goto chng_addr_0
+ ** regChng = 1
+ ** if( idx(1) != regPrev(1) ) goto chng_addr_1
+ ** ...
+ ** regChng = N
+ ** goto endDistinctTest
+ */
+ sqlite3VdbeAddOp0(v, OP_Goto);
+ addrNextRow = sqlite3VdbeCurrentAddr(v);
+ if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){
+ /* For a single-column UNIQUE index, once we have found a non-NULL
+ ** row, we know that all the rest will be distinct, so skip
+ ** subsequent distinctness tests. */
+ sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest);
+ VdbeCoverage(v);
+ }
+ for(i=0; i<nColTest; i++){
+ char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]);
+ sqlite3VdbeAddOp2(v, OP_Integer, i, regChng);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp);
+ analyzeVdbeCommentIndexWithColumnName(v,pIdx,i);
+ aGotoChng[i] =
+ sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ);
+ sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng);
+ sqlite3VdbeGoto(v, endDistinctTest);
+
+
+ /*
+ ** chng_addr_0:
+ ** regPrev(0) = idx(0)
+ ** chng_addr_1:
+ ** regPrev(1) = idx(1)
+ ** ...
+ */
+ sqlite3VdbeJumpHere(v, addrNextRow-1);
+ for(i=0; i<nColTest; i++){
+ sqlite3VdbeJumpHere(v, aGotoChng[i]);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i);
+ analyzeVdbeCommentIndexWithColumnName(v,pIdx,i);
+ }
+ sqlite3VdbeResolveLabel(v, endDistinctTest);
+ sqlite3DbFree(db, aGotoChng);
+ }
+
+ /*
+ ** chng_addr_N:
+ ** regRowid = idx(rowid) // STAT4 only
+ ** stat_push(P, regChng, regRowid) // 3rd parameter STAT4 only
+ ** Next csr
+ ** if !eof(csr) goto next_row;
+ */
+#ifdef SQLITE_ENABLE_STAT4
+ if( OptimizationEnabled(db, SQLITE_Stat4) ){
+ assert( regRowid==(regStat+2) );
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, regRowid);
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
+ int j, k, regKey;
+ regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol);
+ for(j=0; j<pPk->nKeyCol; j++){
+ k = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[j]);
+ assert( k>=0 && k<pIdx->nColumn );
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j);
+ analyzeVdbeCommentIndexWithColumnName(v,pIdx,k);
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid);
+ sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol);
+ }
+ }
+#endif
+ assert( regChng==(regStat+1) );
+ {
+ sqlite3VdbeAddFunctionCall(pParse, 1, regStat, regTemp, 2+IsStat4,
+ &statPushFuncdef, 0);
+ if( db->nAnalysisLimit ){
+ int j1, j2, j3;
+ j1 = sqlite3VdbeAddOp1(v, OP_IsNull, regTemp); VdbeCoverage(v);
+ j2 = sqlite3VdbeAddOp1(v, OP_If, regTemp); VdbeCoverage(v);
+ j3 = sqlite3VdbeAddOp4Int(v, OP_SeekGT, iIdxCur, 0, regPrev, 1);
+ VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, j1);
+ sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, j2);
+ sqlite3VdbeJumpHere(v, j3);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v);
+ }
+ }
+
+ /* Add the entry to the stat1 table. */
+ callStatGet(pParse, regStat, STAT_GET_STAT1, regStat1);
+ assert( "BBB"[0]==SQLITE_AFF_TEXT );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE);
+#endif
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+
+ /* Add the entries to the stat4 table. */
+#ifdef SQLITE_ENABLE_STAT4
+ if( OptimizationEnabled(db, SQLITE_Stat4) && db->nAnalysisLimit==0 ){
+ int regEq = regStat1;
+ int regLt = regStat1+1;
+ int regDLt = regStat1+2;
+ int regSample = regStat1+3;
+ int regCol = regStat1+4;
+ int regSampleRowid = regCol + nCol;
+ int addrNext;
+ int addrIsNull;
+ u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound;
+
+ pParse->nMem = MAX(pParse->nMem, regCol+nCol);
+
+ addrNext = sqlite3VdbeCurrentAddr(v);
+ callStatGet(pParse, regStat, STAT_GET_ROWID, regSampleRowid);
+ addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid);
+ VdbeCoverage(v);
+ callStatGet(pParse, regStat, STAT_GET_NEQ, regEq);
+ callStatGet(pParse, regStat, STAT_GET_NLT, regLt);
+ callStatGet(pParse, regStat, STAT_GET_NDLT, regDLt);
+ sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0);
+ VdbeCoverage(v);
+ for(i=0; i<nCol; i++){
+ sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, i, regCol+i);
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid);
+ sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */
+ sqlite3VdbeJumpHere(v, addrIsNull);
+ }
+#endif /* SQLITE_ENABLE_STAT4 */
+
+ /* End of analysis */
+ sqlite3VdbeJumpHere(v, addrRewind);
+ }
+
+
+ /* Create a single sqlite_stat1 entry containing NULL as the index
+ ** name and the row count as the content.
+ */
+ if( pOnlyIdx==0 && needTableCnt ){
+ VdbeComment((v, "%s", pTab->zName));
+ sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1);
+ jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname);
+ assert( "BBB"[0]==SQLITE_AFF_TEXT );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE);
+#endif
+ sqlite3VdbeJumpHere(v, jZeroRows);
+ }
+}
+
+
+/*
+** Generate code that will cause the most recent index analysis to
+** be loaded into internal hash tables where is can be used.
+*/
+static void loadAnalysis(Parse *pParse, int iDb){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ if( v ){
+ sqlite3VdbeAddOp1(v, OP_LoadAnalysis, iDb);
+ }
+}
+
+/*
+** Generate code that will do an analysis of an entire database
+*/
+static void analyzeDatabase(Parse *pParse, int iDb){
+ sqlite3 *db = pParse->db;
+ Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */
+ HashElem *k;
+ int iStatCur;
+ int iMem;
+ int iTab;
+
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ iStatCur = pParse->nTab;
+ pParse->nTab += 3;
+ openStatTable(pParse, iDb, iStatCur, 0, 0);
+ iMem = pParse->nMem+1;
+ iTab = pParse->nTab;
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){
+ Table *pTab = (Table*)sqliteHashData(k);
+ analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab);
+ }
+ loadAnalysis(pParse, iDb);
+}
+
+/*
+** Generate code that will do an analysis of a single table in
+** a database. If pOnlyIdx is not NULL then it is a single index
+** in pTab that should be analyzed.
+*/
+static void analyzeTable(Parse *pParse, Table *pTab, Index *pOnlyIdx){
+ int iDb;
+ int iStatCur;
+
+ assert( pTab!=0 );
+ assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
+ iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ iStatCur = pParse->nTab;
+ pParse->nTab += 3;
+ if( pOnlyIdx ){
+ openStatTable(pParse, iDb, iStatCur, pOnlyIdx->zName, "idx");
+ }else{
+ openStatTable(pParse, iDb, iStatCur, pTab->zName, "tbl");
+ }
+ analyzeOneTable(pParse, pTab, pOnlyIdx, iStatCur,pParse->nMem+1,pParse->nTab);
+ loadAnalysis(pParse, iDb);
+}
+
+/*
+** Generate code for the ANALYZE command. The parser calls this routine
+** when it recognizes an ANALYZE command.
+**
+** ANALYZE -- 1
+** ANALYZE <database> -- 2
+** ANALYZE ?<database>.?<tablename> -- 3
+**
+** Form 1 causes all indices in all attached databases to be analyzed.
+** Form 2 analyzes all indices the single database named.
+** Form 3 analyzes all indices associated with the named table.
+*/
+SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){
+ sqlite3 *db = pParse->db;
+ int iDb;
+ int i;
+ char *z, *zDb;
+ Table *pTab;
+ Index *pIdx;
+ Token *pTableName;
+ Vdbe *v;
+
+ /* Read the database schema. If an error occurs, leave an error message
+ ** and code in pParse and return NULL. */
+ assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ return;
+ }
+
+ assert( pName2!=0 || pName1==0 );
+ if( pName1==0 ){
+ /* Form 1: Analyze everything */
+ for(i=0; i<db->nDb; i++){
+ if( i==1 ) continue; /* Do not analyze the TEMP database */
+ analyzeDatabase(pParse, i);
+ }
+ }else if( pName2->n==0 && (iDb = sqlite3FindDb(db, pName1))>=0 ){
+ /* Analyze the schema named as the argument */
+ analyzeDatabase(pParse, iDb);
+ }else{
+ /* Form 3: Analyze the table or index named as an argument */
+ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName);
+ if( iDb>=0 ){
+ zDb = pName2->n ? db->aDb[iDb].zDbSName : 0;
+ z = sqlite3NameFromToken(db, pTableName);
+ if( z ){
+ if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){
+ analyzeTable(pParse, pIdx->pTable, pIdx);
+ }else if( (pTab = sqlite3LocateTable(pParse, 0, z, zDb))!=0 ){
+ analyzeTable(pParse, pTab, 0);
+ }
+ sqlite3DbFree(db, z);
+ }
+ }
+ }
+ if( db->nSqlExec==0 && (v = sqlite3GetVdbe(pParse))!=0 ){
+ sqlite3VdbeAddOp0(v, OP_Expire);
+ }
+}
+
+/*
+** Used to pass information from the analyzer reader through to the
+** callback routine.
+*/
+typedef struct analysisInfo analysisInfo;
+struct analysisInfo {
+ sqlite3 *db;
+ const char *zDatabase;
+};
+
+/*
+** The first argument points to a nul-terminated string containing a
+** list of space separated integers. Read the first nOut of these into
+** the array aOut[].
+*/
+static void decodeIntArray(
+ char *zIntArray, /* String containing int array to decode */
+ int nOut, /* Number of slots in aOut[] */
+ tRowcnt *aOut, /* Store integers here */
+ LogEst *aLog, /* Or, if aOut==0, here */
+ Index *pIndex /* Handle extra flags for this index, if not NULL */
+){
+ char *z = zIntArray;
+ int c;
+ int i;
+ tRowcnt v;
+
+#ifdef SQLITE_ENABLE_STAT4
+ if( z==0 ) z = "";
+#else
+ assert( z!=0 );
+#endif
+ for(i=0; *z && i<nOut; i++){
+ v = 0;
+ while( (c=z[0])>='0' && c<='9' ){
+ v = v*10 + c - '0';
+ z++;
+ }
+#ifdef SQLITE_ENABLE_STAT4
+ if( aOut ) aOut[i] = v;
+ if( aLog ) aLog[i] = sqlite3LogEst(v);
+#else
+ assert( aOut==0 );
+ UNUSED_PARAMETER(aOut);
+ assert( aLog!=0 );
+ aLog[i] = sqlite3LogEst(v);
+#endif
+ if( *z==' ' ) z++;
+ }
+#ifndef SQLITE_ENABLE_STAT4
+ assert( pIndex!=0 ); {
+#else
+ if( pIndex ){
+#endif
+ pIndex->bUnordered = 0;
+ pIndex->noSkipScan = 0;
+ while( z[0] ){
+ if( sqlite3_strglob("unordered*", z)==0 ){
+ pIndex->bUnordered = 1;
+ }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){
+ int sz = sqlite3Atoi(z+3);
+ if( sz<2 ) sz = 2;
+ pIndex->szIdxRow = sqlite3LogEst(sz);
+ }else if( sqlite3_strglob("noskipscan*", z)==0 ){
+ pIndex->noSkipScan = 1;
+ }
+#ifdef SQLITE_ENABLE_COSTMULT
+ else if( sqlite3_strglob("costmult=[0-9]*",z)==0 ){
+ pIndex->pTable->costMult = sqlite3LogEst(sqlite3Atoi(z+9));
+ }
+#endif
+ while( z[0]!=0 && z[0]!=' ' ) z++;
+ while( z[0]==' ' ) z++;
+ }
+ }
+}
+
+/*
+** This callback is invoked once for each index when reading the
+** sqlite_stat1 table.
+**
+** argv[0] = name of the table
+** argv[1] = name of the index (might be NULL)
+** argv[2] = results of analysis - on integer for each column
+**
+** Entries for which argv[1]==NULL simply record the number of rows in
+** the table.
+*/
+static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){
+ analysisInfo *pInfo = (analysisInfo*)pData;
+ Index *pIndex;
+ Table *pTable;
+ const char *z;
+
+ assert( argc==3 );
+ UNUSED_PARAMETER2(NotUsed, argc);
+
+ if( argv==0 || argv[0]==0 || argv[2]==0 ){
+ return 0;
+ }
+ pTable = sqlite3FindTable(pInfo->db, argv[0], pInfo->zDatabase);
+ if( pTable==0 ){
+ return 0;
+ }
+ if( argv[1]==0 ){
+ pIndex = 0;
+ }else if( sqlite3_stricmp(argv[0],argv[1])==0 ){
+ pIndex = sqlite3PrimaryKeyIndex(pTable);
+ }else{
+ pIndex = sqlite3FindIndex(pInfo->db, argv[1], pInfo->zDatabase);
+ }
+ z = argv[2];
+
+ if( pIndex ){
+ tRowcnt *aiRowEst = 0;
+ int nCol = pIndex->nKeyCol+1;
+#ifdef SQLITE_ENABLE_STAT4
+ /* Index.aiRowEst may already be set here if there are duplicate
+ ** sqlite_stat1 entries for this index. In that case just clobber
+ ** the old data with the new instead of allocating a new array. */
+ if( pIndex->aiRowEst==0 ){
+ pIndex->aiRowEst = (tRowcnt*)sqlite3MallocZero(sizeof(tRowcnt) * nCol);
+ if( pIndex->aiRowEst==0 ) sqlite3OomFault(pInfo->db);
+ }
+ aiRowEst = pIndex->aiRowEst;
+#endif
+ pIndex->bUnordered = 0;
+ decodeIntArray((char*)z, nCol, aiRowEst, pIndex->aiRowLogEst, pIndex);
+ pIndex->hasStat1 = 1;
+ if( pIndex->pPartIdxWhere==0 ){
+ pTable->nRowLogEst = pIndex->aiRowLogEst[0];
+ pTable->tabFlags |= TF_HasStat1;
+ }
+ }else{
+ Index fakeIdx;
+ fakeIdx.szIdxRow = pTable->szTabRow;
+#ifdef SQLITE_ENABLE_COSTMULT
+ fakeIdx.pTable = pTable;
+#endif
+ decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx);
+ pTable->szTabRow = fakeIdx.szIdxRow;
+ pTable->tabFlags |= TF_HasStat1;
+ }
+
+ return 0;
+}
+
+/*
+** If the Index.aSample variable is not NULL, delete the aSample[] array
+** and its contents.
+*/
+SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){
+#ifdef SQLITE_ENABLE_STAT4
+ if( pIdx->aSample ){
+ int j;
+ for(j=0; j<pIdx->nSample; j++){
+ IndexSample *p = &pIdx->aSample[j];
+ sqlite3DbFree(db, p->p);
+ }
+ sqlite3DbFree(db, pIdx->aSample);
+ }
+ if( db && db->pnBytesFreed==0 ){
+ pIdx->nSample = 0;
+ pIdx->aSample = 0;
+ }
+#else
+ UNUSED_PARAMETER(db);
+ UNUSED_PARAMETER(pIdx);
+#endif /* SQLITE_ENABLE_STAT4 */
+}
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Populate the pIdx->aAvgEq[] array based on the samples currently
+** stored in pIdx->aSample[].
+*/
+static void initAvgEq(Index *pIdx){
+ if( pIdx ){
+ IndexSample *aSample = pIdx->aSample;
+ IndexSample *pFinal = &aSample[pIdx->nSample-1];
+ int iCol;
+ int nCol = 1;
+ if( pIdx->nSampleCol>1 ){
+ /* If this is stat4 data, then calculate aAvgEq[] values for all
+ ** sample columns except the last. The last is always set to 1, as
+ ** once the trailing PK fields are considered all index keys are
+ ** unique. */
+ nCol = pIdx->nSampleCol-1;
+ pIdx->aAvgEq[nCol] = 1;
+ }
+ for(iCol=0; iCol<nCol; iCol++){
+ int nSample = pIdx->nSample;
+ int i; /* Used to iterate through samples */
+ tRowcnt sumEq = 0; /* Sum of the nEq values */
+ tRowcnt avgEq = 0;
+ tRowcnt nRow; /* Number of rows in index */
+ i64 nSum100 = 0; /* Number of terms contributing to sumEq */
+ i64 nDist100; /* Number of distinct values in index */
+
+ if( !pIdx->aiRowEst || iCol>=pIdx->nKeyCol || pIdx->aiRowEst[iCol+1]==0 ){
+ nRow = pFinal->anLt[iCol];
+ nDist100 = (i64)100 * pFinal->anDLt[iCol];
+ nSample--;
+ }else{
+ nRow = pIdx->aiRowEst[0];
+ nDist100 = ((i64)100 * pIdx->aiRowEst[0]) / pIdx->aiRowEst[iCol+1];
+ }
+ pIdx->nRowEst0 = nRow;
+
+ /* Set nSum to the number of distinct (iCol+1) field prefixes that
+ ** occur in the stat4 table for this index. Set sumEq to the sum of
+ ** the nEq values for column iCol for the same set (adding the value
+ ** only once where there exist duplicate prefixes). */
+ for(i=0; i<nSample; i++){
+ if( i==(pIdx->nSample-1)
+ || aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol]
+ ){
+ sumEq += aSample[i].anEq[iCol];
+ nSum100 += 100;
+ }
+ }
+
+ if( nDist100>nSum100 && sumEq<nRow ){
+ avgEq = ((i64)100 * (nRow - sumEq))/(nDist100 - nSum100);
+ }
+ if( avgEq==0 ) avgEq = 1;
+ pIdx->aAvgEq[iCol] = avgEq;
+ }
+ }
+}
+
+/*
+** Look up an index by name. Or, if the name of a WITHOUT ROWID table
+** is supplied instead, find the PRIMARY KEY index for that table.
+*/
+static Index *findIndexOrPrimaryKey(
+ sqlite3 *db,
+ const char *zName,
+ const char *zDb
+){
+ Index *pIdx = sqlite3FindIndex(db, zName, zDb);
+ if( pIdx==0 ){
+ Table *pTab = sqlite3FindTable(db, zName, zDb);
+ if( pTab && !HasRowid(pTab) ) pIdx = sqlite3PrimaryKeyIndex(pTab);
+ }
+ return pIdx;
+}
+
+/*
+** Load the content from either the sqlite_stat4
+** into the relevant Index.aSample[] arrays.
+**
+** Arguments zSql1 and zSql2 must point to SQL statements that return
+** data equivalent to the following:
+**
+** zSql1: SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx
+** zSql2: SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4
+**
+** where %Q is replaced with the database name before the SQL is executed.
+*/
+static int loadStatTbl(
+ sqlite3 *db, /* Database handle */
+ const char *zSql1, /* SQL statement 1 (see above) */
+ const char *zSql2, /* SQL statement 2 (see above) */
+ const char *zDb /* Database name (e.g. "main") */
+){
+ int rc; /* Result codes from subroutines */
+ sqlite3_stmt *pStmt = 0; /* An SQL statement being run */
+ char *zSql; /* Text of the SQL statement */
+ Index *pPrevIdx = 0; /* Previous index in the loop */
+ IndexSample *pSample; /* A slot in pIdx->aSample[] */
+
+ assert( db->lookaside.bDisable );
+ zSql = sqlite3MPrintf(db, zSql1, zDb);
+ if( !zSql ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
+ sqlite3DbFree(db, zSql);
+ if( rc ) return rc;
+
+ while( sqlite3_step(pStmt)==SQLITE_ROW ){
+ int nIdxCol = 1; /* Number of columns in stat4 records */
+
+ char *zIndex; /* Index name */
+ Index *pIdx; /* Pointer to the index object */
+ int nSample; /* Number of samples */
+ int nByte; /* Bytes of space required */
+ int i; /* Bytes of space required */
+ tRowcnt *pSpace;
+
+ zIndex = (char *)sqlite3_column_text(pStmt, 0);
+ if( zIndex==0 ) continue;
+ nSample = sqlite3_column_int(pStmt, 1);
+ pIdx = findIndexOrPrimaryKey(db, zIndex, zDb);
+ assert( pIdx==0 || pIdx->nSample==0 );
+ if( pIdx==0 ) continue;
+ assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 );
+ if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){
+ nIdxCol = pIdx->nKeyCol;
+ }else{
+ nIdxCol = pIdx->nColumn;
+ }
+ pIdx->nSampleCol = nIdxCol;
+ nByte = sizeof(IndexSample) * nSample;
+ nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample;
+ nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */
+
+ pIdx->aSample = sqlite3DbMallocZero(db, nByte);
+ if( pIdx->aSample==0 ){
+ sqlite3_finalize(pStmt);
+ return SQLITE_NOMEM_BKPT;
+ }
+ pSpace = (tRowcnt*)&pIdx->aSample[nSample];
+ pIdx->aAvgEq = pSpace; pSpace += nIdxCol;
+ for(i=0; i<nSample; i++){
+ pIdx->aSample[i].anEq = pSpace; pSpace += nIdxCol;
+ pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol;
+ pIdx->aSample[i].anDLt = pSpace; pSpace += nIdxCol;
+ }
+ assert( ((u8*)pSpace)-nByte==(u8*)(pIdx->aSample) );
+ }
+ rc = sqlite3_finalize(pStmt);
+ if( rc ) return rc;
+
+ zSql = sqlite3MPrintf(db, zSql2, zDb);
+ if( !zSql ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
+ sqlite3DbFree(db, zSql);
+ if( rc ) return rc;
+
+ while( sqlite3_step(pStmt)==SQLITE_ROW ){
+ char *zIndex; /* Index name */
+ Index *pIdx; /* Pointer to the index object */
+ int nCol = 1; /* Number of columns in index */
+
+ zIndex = (char *)sqlite3_column_text(pStmt, 0);
+ if( zIndex==0 ) continue;
+ pIdx = findIndexOrPrimaryKey(db, zIndex, zDb);
+ if( pIdx==0 ) continue;
+ /* This next condition is true if data has already been loaded from
+ ** the sqlite_stat4 table. */
+ nCol = pIdx->nSampleCol;
+ if( pIdx!=pPrevIdx ){
+ initAvgEq(pPrevIdx);
+ pPrevIdx = pIdx;
+ }
+ pSample = &pIdx->aSample[pIdx->nSample];
+ decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0);
+ decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0);
+ decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0);
+
+ /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer.
+ ** This is in case the sample record is corrupted. In that case, the
+ ** sqlite3VdbeRecordCompare() may read up to two varints past the
+ ** end of the allocated buffer before it realizes it is dealing with
+ ** a corrupt record. Adding the two 0x00 bytes prevents this from causing
+ ** a buffer overread. */
+ pSample->n = sqlite3_column_bytes(pStmt, 4);
+ pSample->p = sqlite3DbMallocZero(db, pSample->n + 2);
+ if( pSample->p==0 ){
+ sqlite3_finalize(pStmt);
+ return SQLITE_NOMEM_BKPT;
+ }
+ if( pSample->n ){
+ memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n);
+ }
+ pIdx->nSample++;
+ }
+ rc = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ) initAvgEq(pPrevIdx);
+ return rc;
+}
+
+/*
+** Load content from the sqlite_stat4 table into
+** the Index.aSample[] arrays of all indices.
+*/
+static int loadStat4(sqlite3 *db, const char *zDb){
+ int rc = SQLITE_OK; /* Result codes from subroutines */
+
+ assert( db->lookaside.bDisable );
+ if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){
+ rc = loadStatTbl(db,
+ "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx",
+ "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4",
+ zDb
+ );
+ }
+ return rc;
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+/*
+** Load the content of the sqlite_stat1 and sqlite_stat4 tables. The
+** contents of sqlite_stat1 are used to populate the Index.aiRowEst[]
+** arrays. The contents of sqlite_stat4 are used to populate the
+** Index.aSample[] arrays.
+**
+** If the sqlite_stat1 table is not present in the database, SQLITE_ERROR
+** is returned. In this case, even if SQLITE_ENABLE_STAT4 was defined
+** during compilation and the sqlite_stat4 table is present, no data is
+** read from it.
+**
+** If SQLITE_ENABLE_STAT4 was defined during compilation and the
+** sqlite_stat4 table is not present in the database, SQLITE_ERROR is
+** returned. However, in this case, data is read from the sqlite_stat1
+** table (if it is present) before returning.
+**
+** If an OOM error occurs, this function always sets db->mallocFailed.
+** This means if the caller does not care about other errors, the return
+** code may be ignored.
+*/
+SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
+ analysisInfo sInfo;
+ HashElem *i;
+ char *zSql;
+ int rc = SQLITE_OK;
+ Schema *pSchema = db->aDb[iDb].pSchema;
+
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( db->aDb[iDb].pBt!=0 );
+
+ /* Clear any prior statistics */
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ for(i=sqliteHashFirst(&pSchema->tblHash); i; i=sqliteHashNext(i)){
+ Table *pTab = sqliteHashData(i);
+ pTab->tabFlags &= ~TF_HasStat1;
+ }
+ for(i=sqliteHashFirst(&pSchema->idxHash); i; i=sqliteHashNext(i)){
+ Index *pIdx = sqliteHashData(i);
+ pIdx->hasStat1 = 0;
+#ifdef SQLITE_ENABLE_STAT4
+ sqlite3DeleteIndexSamples(db, pIdx);
+ pIdx->aSample = 0;
+#endif
+ }
+
+ /* Load new statistics out of the sqlite_stat1 table */
+ sInfo.db = db;
+ sInfo.zDatabase = db->aDb[iDb].zDbSName;
+ if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)!=0 ){
+ zSql = sqlite3MPrintf(db,
+ "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0);
+ sqlite3DbFree(db, zSql);
+ }
+ }
+
+ /* Set appropriate defaults on all indexes not in the sqlite_stat1 table */
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ for(i=sqliteHashFirst(&pSchema->idxHash); i; i=sqliteHashNext(i)){
+ Index *pIdx = sqliteHashData(i);
+ if( !pIdx->hasStat1 ) sqlite3DefaultRowEst(pIdx);
+ }
+
+ /* Load the statistics from the sqlite_stat4 table. */
+#ifdef SQLITE_ENABLE_STAT4
+ if( rc==SQLITE_OK ){
+ DisableLookaside;
+ rc = loadStat4(db, sInfo.zDatabase);
+ EnableLookaside;
+ }
+ for(i=sqliteHashFirst(&pSchema->idxHash); i; i=sqliteHashNext(i)){
+ Index *pIdx = sqliteHashData(i);
+ sqlite3_free(pIdx->aiRowEst);
+ pIdx->aiRowEst = 0;
+ }
+#endif
+
+ if( rc==SQLITE_NOMEM ){
+ sqlite3OomFault(db);
+ }
+ return rc;
+}
+
+
+#endif /* SQLITE_OMIT_ANALYZE */
+
+/************** End of analyze.c *********************************************/
+/************** Begin file attach.c ******************************************/
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the ATTACH and DETACH commands.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_ATTACH
+/*
+** Resolve an expression that was part of an ATTACH or DETACH statement. This
+** is slightly different from resolving a normal SQL expression, because simple
+** identifiers are treated as strings, not possible column names or aliases.
+**
+** i.e. if the parser sees:
+**
+** ATTACH DATABASE abc AS def
+**
+** it treats the two expressions as literal strings 'abc' and 'def' instead of
+** looking for columns of the same name.
+**
+** This only applies to the root node of pExpr, so the statement:
+**
+** ATTACH DATABASE abc||def AS 'db2'
+**
+** will fail because neither abc or def can be resolved.
+*/
+static int resolveAttachExpr(NameContext *pName, Expr *pExpr)
+{
+ int rc = SQLITE_OK;
+ if( pExpr ){
+ if( pExpr->op!=TK_ID ){
+ rc = sqlite3ResolveExprNames(pName, pExpr);
+ }else{
+ pExpr->op = TK_STRING;
+ }
+ }
+ return rc;
+}
+
+/*
+** Return true if zName points to a name that may be used to refer to
+** database iDb attached to handle db.
+*/
+SQLITE_PRIVATE int sqlite3DbIsNamed(sqlite3 *db, int iDb, const char *zName){
+ return (
+ sqlite3StrICmp(db->aDb[iDb].zDbSName, zName)==0
+ || (iDb==0 && sqlite3StrICmp("main", zName)==0)
+ );
+}
+
+/*
+** An SQL user-function registered to do the work of an ATTACH statement. The
+** three arguments to the function come directly from an attach statement:
+**
+** ATTACH DATABASE x AS y KEY z
+**
+** SELECT sqlite_attach(x, y, z)
+**
+** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the
+** third argument.
+**
+** If the db->init.reopenMemdb flags is set, then instead of attaching a
+** new database, close the database on db->init.iDb and reopen it as an
+** empty MemDB.
+*/
+static void attachFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ int i;
+ int rc = 0;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ const char *zName;
+ const char *zFile;
+ char *zPath = 0;
+ char *zErr = 0;
+ unsigned int flags;
+ Db *aNew; /* New array of Db pointers */
+ Db *pNew; /* Db object for the newly attached database */
+ char *zErrDyn = 0;
+ sqlite3_vfs *pVfs;
+
+ UNUSED_PARAMETER(NotUsed);
+ zFile = (const char *)sqlite3_value_text(argv[0]);
+ zName = (const char *)sqlite3_value_text(argv[1]);
+ if( zFile==0 ) zFile = "";
+ if( zName==0 ) zName = "";
+
+#ifdef SQLITE_ENABLE_DESERIALIZE
+# define REOPEN_AS_MEMDB(db) (db->init.reopenMemdb)
+#else
+# define REOPEN_AS_MEMDB(db) (0)
+#endif
+
+ if( REOPEN_AS_MEMDB(db) ){
+ /* This is not a real ATTACH. Instead, this routine is being called
+ ** from sqlite3_deserialize() to close database db->init.iDb and
+ ** reopen it as a MemDB */
+ pVfs = sqlite3_vfs_find("memdb");
+ if( pVfs==0 ) return;
+ pNew = &db->aDb[db->init.iDb];
+ if( pNew->pBt ) sqlite3BtreeClose(pNew->pBt);
+ pNew->pBt = 0;
+ pNew->pSchema = 0;
+ rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNew->pBt, 0, SQLITE_OPEN_MAIN_DB);
+ }else{
+ /* This is a real ATTACH
+ **
+ ** Check for the following errors:
+ **
+ ** * Too many attached databases,
+ ** * Transaction currently open
+ ** * Specified database name already being used.
+ */
+ if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){
+ zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d",
+ db->aLimit[SQLITE_LIMIT_ATTACHED]
+ );
+ goto attach_error;
+ }
+ for(i=0; i<db->nDb; i++){
+ assert( zName );
+ if( sqlite3DbIsNamed(db, i, zName) ){
+ zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName);
+ goto attach_error;
+ }
+ }
+
+ /* Allocate the new entry in the db->aDb[] array and initialize the schema
+ ** hash tables.
+ */
+ if( db->aDb==db->aDbStatic ){
+ aNew = sqlite3DbMallocRawNN(db, sizeof(db->aDb[0])*3 );
+ if( aNew==0 ) return;
+ memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2);
+ }else{
+ aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) );
+ if( aNew==0 ) return;
+ }
+ db->aDb = aNew;
+ pNew = &db->aDb[db->nDb];
+ memset(pNew, 0, sizeof(*pNew));
+
+ /* Open the database file. If the btree is successfully opened, use
+ ** it to obtain the database schema. At this point the schema may
+ ** or may not be initialized.
+ */
+ flags = db->openFlags;
+ rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_NOMEM ) sqlite3OomFault(db);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
+ return;
+ }
+ assert( pVfs );
+ flags |= SQLITE_OPEN_MAIN_DB;
+ rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags);
+ db->nDb++;
+ pNew->zDbSName = sqlite3DbStrDup(db, zName);
+ }
+ db->noSharedCache = 0;
+ if( rc==SQLITE_CONSTRAINT ){
+ rc = SQLITE_ERROR;
+ zErrDyn = sqlite3MPrintf(db, "database is already attached");
+ }else if( rc==SQLITE_OK ){
+ Pager *pPager;
+ pNew->pSchema = sqlite3SchemaGet(db, pNew->pBt);
+ if( !pNew->pSchema ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else if( pNew->pSchema->file_format && pNew->pSchema->enc!=ENC(db) ){
+ zErrDyn = sqlite3MPrintf(db,
+ "attached databases must use the same text encoding as main database");
+ rc = SQLITE_ERROR;
+ }
+ sqlite3BtreeEnter(pNew->pBt);
+ pPager = sqlite3BtreePager(pNew->pBt);
+ sqlite3PagerLockingMode(pPager, db->dfltLockMode);
+ sqlite3BtreeSecureDelete(pNew->pBt,
+ sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) );
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+ sqlite3BtreeSetPagerFlags(pNew->pBt,
+ PAGER_SYNCHRONOUS_FULL | (db->flags & PAGER_FLAGS_MASK));
+#endif
+ sqlite3BtreeLeave(pNew->pBt);
+ }
+ pNew->safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
+ if( rc==SQLITE_OK && pNew->zDbSName==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ sqlite3_free_filename( zPath );
+
+ /* If the file was opened successfully, read the schema for the new database.
+ ** If this fails, or if opening the file failed, then close the file and
+ ** remove the entry from the db->aDb[] array. i.e. put everything back the
+ ** way we found it.
+ */
+ if( rc==SQLITE_OK ){
+ sqlite3BtreeEnterAll(db);
+ db->init.iDb = 0;
+ db->mDbFlags &= ~(DBFLAG_SchemaKnownOk);
+ if( !REOPEN_AS_MEMDB(db) ){
+ rc = sqlite3Init(db, &zErrDyn);
+ }
+ sqlite3BtreeLeaveAll(db);
+ assert( zErrDyn==0 || rc!=SQLITE_OK );
+ }
+#ifdef SQLITE_USER_AUTHENTICATION
+ if( rc==SQLITE_OK && !REOPEN_AS_MEMDB(db) ){
+ u8 newAuth = 0;
+ rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth);
+ if( newAuth<db->auth.authLevel ){
+ rc = SQLITE_AUTH_USER;
+ }
+ }
+#endif
+ if( rc ){
+ if( !REOPEN_AS_MEMDB(db) ){
+ int iDb = db->nDb - 1;
+ assert( iDb>=2 );
+ if( db->aDb[iDb].pBt ){
+ sqlite3BtreeClose(db->aDb[iDb].pBt);
+ db->aDb[iDb].pBt = 0;
+ db->aDb[iDb].pSchema = 0;
+ }
+ sqlite3ResetAllSchemasOfConnection(db);
+ db->nDb = iDb;
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, zErrDyn);
+ zErrDyn = sqlite3MPrintf(db, "out of memory");
+ }else if( zErrDyn==0 ){
+ zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile);
+ }
+ }
+ goto attach_error;
+ }
+
+ return;
+
+attach_error:
+ /* Return an error if we get here */
+ if( zErrDyn ){
+ sqlite3_result_error(context, zErrDyn, -1);
+ sqlite3DbFree(db, zErrDyn);
+ }
+ if( rc ) sqlite3_result_error_code(context, rc);
+}
+
+/*
+** An SQL user-function registered to do the work of an DETACH statement. The
+** three arguments to the function come directly from a detach statement:
+**
+** DETACH DATABASE x
+**
+** SELECT sqlite_detach(x)
+*/
+static void detachFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ const char *zName = (const char *)sqlite3_value_text(argv[0]);
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ int i;
+ Db *pDb = 0;
+ HashElem *pEntry;
+ char zErr[128];
+
+ UNUSED_PARAMETER(NotUsed);
+
+ if( zName==0 ) zName = "";
+ for(i=0; i<db->nDb; i++){
+ pDb = &db->aDb[i];
+ if( pDb->pBt==0 ) continue;
+ if( sqlite3DbIsNamed(db, i, zName) ) break;
+ }
+
+ if( i>=db->nDb ){
+ sqlite3_snprintf(sizeof(zErr),zErr, "no such database: %s", zName);
+ goto detach_error;
+ }
+ if( i<2 ){
+ sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName);
+ goto detach_error;
+ }
+ if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){
+ sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName);
+ goto detach_error;
+ }
+
+ /* If any TEMP triggers reference the schema being detached, move those
+ ** triggers to reference the TEMP schema itself. */
+ assert( db->aDb[1].pSchema );
+ pEntry = sqliteHashFirst(&db->aDb[1].pSchema->trigHash);
+ while( pEntry ){
+ Trigger *pTrig = (Trigger*)sqliteHashData(pEntry);
+ if( pTrig->pTabSchema==pDb->pSchema ){
+ pTrig->pTabSchema = pTrig->pSchema;
+ }
+ pEntry = sqliteHashNext(pEntry);
+ }
+
+ sqlite3BtreeClose(pDb->pBt);
+ pDb->pBt = 0;
+ pDb->pSchema = 0;
+ sqlite3CollapseDatabaseArray(db);
+ return;
+
+detach_error:
+ sqlite3_result_error(context, zErr, -1);
+}
+
+/*
+** This procedure generates VDBE code for a single invocation of either the
+** sqlite_detach() or sqlite_attach() SQL user functions.
+*/
+static void codeAttach(
+ Parse *pParse, /* The parser context */
+ int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */
+ FuncDef const *pFunc,/* FuncDef wrapper for detachFunc() or attachFunc() */
+ Expr *pAuthArg, /* Expression to pass to authorization callback */
+ Expr *pFilename, /* Name of database file */
+ Expr *pDbname, /* Name of the database to use internally */
+ Expr *pKey /* Database key for encryption extension */
+){
+ int rc;
+ NameContext sName;
+ Vdbe *v;
+ sqlite3* db = pParse->db;
+ int regArgs;
+
+ if( pParse->nErr ) goto attach_end;
+ memset(&sName, 0, sizeof(NameContext));
+ sName.pParse = pParse;
+
+ if(
+ SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) ||
+ SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) ||
+ SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey))
+ ){
+ goto attach_end;
+ }
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( pAuthArg ){
+ char *zAuthArg;
+ if( pAuthArg->op==TK_STRING ){
+ zAuthArg = pAuthArg->u.zToken;
+ }else{
+ zAuthArg = 0;
+ }
+ rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0);
+ if(rc!=SQLITE_OK ){
+ goto attach_end;
+ }
+ }
+#endif /* SQLITE_OMIT_AUTHORIZATION */
+
+
+ v = sqlite3GetVdbe(pParse);
+ regArgs = sqlite3GetTempRange(pParse, 4);
+ sqlite3ExprCode(pParse, pFilename, regArgs);
+ sqlite3ExprCode(pParse, pDbname, regArgs+1);
+ sqlite3ExprCode(pParse, pKey, regArgs+2);
+
+ assert( v || db->mallocFailed );
+ if( v ){
+ sqlite3VdbeAddFunctionCall(pParse, 0, regArgs+3-pFunc->nArg, regArgs+3,
+ pFunc->nArg, pFunc, 0);
+ /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this
+ ** statement only). For DETACH, set it to false (expire all existing
+ ** statements).
+ */
+ sqlite3VdbeAddOp1(v, OP_Expire, (type==SQLITE_ATTACH));
+ }
+
+attach_end:
+ sqlite3ExprDelete(db, pFilename);
+ sqlite3ExprDelete(db, pDbname);
+ sqlite3ExprDelete(db, pKey);
+}
+
+/*
+** Called by the parser to compile a DETACH statement.
+**
+** DETACH pDbname
+*/
+SQLITE_PRIVATE void sqlite3Detach(Parse *pParse, Expr *pDbname){
+ static const FuncDef detach_func = {
+ 1, /* nArg */
+ SQLITE_UTF8, /* funcFlags */
+ 0, /* pUserData */
+ 0, /* pNext */
+ detachFunc, /* xSFunc */
+ 0, /* xFinalize */
+ 0, 0, /* xValue, xInverse */
+ "sqlite_detach", /* zName */
+ {0}
+ };
+ codeAttach(pParse, SQLITE_DETACH, &detach_func, pDbname, 0, 0, pDbname);
+}
+
+/*
+** Called by the parser to compile an ATTACH statement.
+**
+** ATTACH p AS pDbname KEY pKey
+*/
+SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){
+ static const FuncDef attach_func = {
+ 3, /* nArg */
+ SQLITE_UTF8, /* funcFlags */
+ 0, /* pUserData */
+ 0, /* pNext */
+ attachFunc, /* xSFunc */
+ 0, /* xFinalize */
+ 0, 0, /* xValue, xInverse */
+ "sqlite_attach", /* zName */
+ {0}
+ };
+ codeAttach(pParse, SQLITE_ATTACH, &attach_func, p, p, pDbname, pKey);
+}
+#endif /* SQLITE_OMIT_ATTACH */
+
+/*
+** Initialize a DbFixer structure. This routine must be called prior
+** to passing the structure to one of the sqliteFixAAAA() routines below.
+*/
+SQLITE_PRIVATE void sqlite3FixInit(
+ DbFixer *pFix, /* The fixer to be initialized */
+ Parse *pParse, /* Error messages will be written here */
+ int iDb, /* This is the database that must be used */
+ const char *zType, /* "view", "trigger", or "index" */
+ const Token *pName /* Name of the view, trigger, or index */
+){
+ sqlite3 *db;
+
+ db = pParse->db;
+ assert( db->nDb>iDb );
+ pFix->pParse = pParse;
+ pFix->zDb = db->aDb[iDb].zDbSName;
+ pFix->pSchema = db->aDb[iDb].pSchema;
+ pFix->zType = zType;
+ pFix->pName = pName;
+ pFix->bTemp = (iDb==1);
+}
+
+/*
+** The following set of routines walk through the parse tree and assign
+** a specific database to all table references where the database name
+** was left unspecified in the original SQL statement. The pFix structure
+** must have been initialized by a prior call to sqlite3FixInit().
+**
+** These routines are used to make sure that an index, trigger, or
+** view in one database does not refer to objects in a different database.
+** (Exception: indices, triggers, and views in the TEMP database are
+** allowed to refer to anything.) If a reference is explicitly made
+** to an object in a different database, an error message is added to
+** pParse->zErrMsg and these routines return non-zero. If everything
+** checks out, these routines return 0.
+*/
+SQLITE_PRIVATE int sqlite3FixSrcList(
+ DbFixer *pFix, /* Context of the fixation */
+ SrcList *pList /* The Source list to check and modify */
+){
+ int i;
+ struct SrcList_item *pItem;
+ sqlite3 *db = pFix->pParse->db;
+ int iDb = sqlite3FindDbName(db, pFix->zDb);
+
+ if( NEVER(pList==0) ) return 0;
+
+ for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){
+ if( pFix->bTemp==0 ){
+ if( pItem->zDatabase && iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){
+ sqlite3ErrorMsg(pFix->pParse,
+ "%s %T cannot reference objects in database %s",
+ pFix->zType, pFix->pName, pItem->zDatabase);
+ return 1;
+ }
+ sqlite3DbFree(db, pItem->zDatabase);
+ pItem->zDatabase = 0;
+ pItem->pSchema = pFix->pSchema;
+ pItem->fg.fromDDL = 1;
+ }
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER)
+ if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1;
+ if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1;
+#endif
+ if( pItem->fg.isTabFunc && sqlite3FixExprList(pFix, pItem->u1.pFuncArg) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER)
+SQLITE_PRIVATE int sqlite3FixSelect(
+ DbFixer *pFix, /* Context of the fixation */
+ Select *pSelect /* The SELECT statement to be fixed to one database */
+){
+ while( pSelect ){
+ if( sqlite3FixExprList(pFix, pSelect->pEList) ){
+ return 1;
+ }
+ if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){
+ return 1;
+ }
+ if( sqlite3FixExpr(pFix, pSelect->pWhere) ){
+ return 1;
+ }
+ if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){
+ return 1;
+ }
+ if( sqlite3FixExpr(pFix, pSelect->pHaving) ){
+ return 1;
+ }
+ if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){
+ return 1;
+ }
+ if( sqlite3FixExpr(pFix, pSelect->pLimit) ){
+ return 1;
+ }
+ if( pSelect->pWith ){
+ int i;
+ for(i=0; i<pSelect->pWith->nCte; i++){
+ if( sqlite3FixSelect(pFix, pSelect->pWith->a[i].pSelect) ){
+ return 1;
+ }
+ }
+ }
+ pSelect = pSelect->pPrior;
+ }
+ return 0;
+}
+SQLITE_PRIVATE int sqlite3FixExpr(
+ DbFixer *pFix, /* Context of the fixation */
+ Expr *pExpr /* The expression to be fixed to one database */
+){
+ while( pExpr ){
+ if( !pFix->bTemp ) ExprSetProperty(pExpr, EP_FromDDL);
+ if( pExpr->op==TK_VARIABLE ){
+ if( pFix->pParse->db->init.busy ){
+ pExpr->op = TK_NULL;
+ }else{
+ sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType);
+ return 1;
+ }
+ }
+ if( ExprHasProperty(pExpr, EP_TokenOnly|EP_Leaf) ) break;
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1;
+ }else{
+ if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1;
+ }
+ if( sqlite3FixExpr(pFix, pExpr->pRight) ){
+ return 1;
+ }
+ pExpr = pExpr->pLeft;
+ }
+ return 0;
+}
+SQLITE_PRIVATE int sqlite3FixExprList(
+ DbFixer *pFix, /* Context of the fixation */
+ ExprList *pList /* The expression to be fixed to one database */
+){
+ int i;
+ struct ExprList_item *pItem;
+ if( pList==0 ) return 0;
+ for(i=0, pItem=pList->a; i<pList->nExpr; i++, pItem++){
+ if( sqlite3FixExpr(pFix, pItem->pExpr) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+#ifndef SQLITE_OMIT_TRIGGER
+SQLITE_PRIVATE int sqlite3FixTriggerStep(
+ DbFixer *pFix, /* Context of the fixation */
+ TriggerStep *pStep /* The trigger step be fixed to one database */
+){
+ while( pStep ){
+ if( sqlite3FixSelect(pFix, pStep->pSelect) ){
+ return 1;
+ }
+ if( sqlite3FixExpr(pFix, pStep->pWhere) ){
+ return 1;
+ }
+ if( sqlite3FixExprList(pFix, pStep->pExprList) ){
+ return 1;
+ }
+ if( pStep->pFrom && sqlite3FixSrcList(pFix, pStep->pFrom) ){
+ return 1;
+ }
+#ifndef SQLITE_OMIT_UPSERT
+ if( pStep->pUpsert ){
+ Upsert *pUp = pStep->pUpsert;
+ if( sqlite3FixExprList(pFix, pUp->pUpsertTarget)
+ || sqlite3FixExpr(pFix, pUp->pUpsertTargetWhere)
+ || sqlite3FixExprList(pFix, pUp->pUpsertSet)
+ || sqlite3FixExpr(pFix, pUp->pUpsertWhere)
+ ){
+ return 1;
+ }
+ }
+#endif
+ pStep = pStep->pNext;
+ }
+ return 0;
+}
+#endif
+
+/************** End of attach.c **********************************************/
+/************** Begin file auth.c ********************************************/
+/*
+** 2003 January 11
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the sqlite3_set_authorizer()
+** API. This facility is an optional feature of the library. Embedded
+** systems that do not need this facility may omit it by recompiling
+** the library with -DSQLITE_OMIT_AUTHORIZATION=1
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** All of the code in this file may be omitted by defining a single
+** macro.
+*/
+#ifndef SQLITE_OMIT_AUTHORIZATION
+
+/*
+** Set or clear the access authorization function.
+**
+** The access authorization function is be called during the compilation
+** phase to verify that the user has read and/or write access permission on
+** various fields of the database. The first argument to the auth function
+** is a copy of the 3rd argument to this routine. The second argument
+** to the auth function is one of these constants:
+**
+** SQLITE_CREATE_INDEX
+** SQLITE_CREATE_TABLE
+** SQLITE_CREATE_TEMP_INDEX
+** SQLITE_CREATE_TEMP_TABLE
+** SQLITE_CREATE_TEMP_TRIGGER
+** SQLITE_CREATE_TEMP_VIEW
+** SQLITE_CREATE_TRIGGER
+** SQLITE_CREATE_VIEW
+** SQLITE_DELETE
+** SQLITE_DROP_INDEX
+** SQLITE_DROP_TABLE
+** SQLITE_DROP_TEMP_INDEX
+** SQLITE_DROP_TEMP_TABLE
+** SQLITE_DROP_TEMP_TRIGGER
+** SQLITE_DROP_TEMP_VIEW
+** SQLITE_DROP_TRIGGER
+** SQLITE_DROP_VIEW
+** SQLITE_INSERT
+** SQLITE_PRAGMA
+** SQLITE_READ
+** SQLITE_SELECT
+** SQLITE_TRANSACTION
+** SQLITE_UPDATE
+**
+** The third and fourth arguments to the auth function are the name of
+** the table and the column that are being accessed. The auth function
+** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If
+** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY
+** means that the SQL statement will never-run - the sqlite3_exec() call
+** will return with an error. SQLITE_IGNORE means that the SQL statement
+** should run but attempts to read the specified column will return NULL
+** and attempts to write the column will be ignored.
+**
+** Setting the auth function to NULL disables this hook. The default
+** setting of the auth function is NULL.
+*/
+SQLITE_API int sqlite3_set_authorizer(
+ sqlite3 *db,
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
+ void *pArg
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->xAuth = (sqlite3_xauth)xAuth;
+ db->pAuthArg = pArg;
+ if( db->xAuth ) sqlite3ExpirePreparedStatements(db, 1);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+/*
+** Write an error message into pParse->zErrMsg that explains that the
+** user-supplied authorization function returned an illegal value.
+*/
+static void sqliteAuthBadReturnCode(Parse *pParse){
+ sqlite3ErrorMsg(pParse, "authorizer malfunction");
+ pParse->rc = SQLITE_ERROR;
+}
+
+/*
+** Invoke the authorization callback for permission to read column zCol from
+** table zTab in database zDb. This function assumes that an authorization
+** callback has been registered (i.e. that sqlite3.xAuth is not NULL).
+**
+** If SQLITE_IGNORE is returned and pExpr is not NULL, then pExpr is changed
+** to an SQL NULL expression. Otherwise, if pExpr is NULL, then SQLITE_IGNORE
+** is treated as SQLITE_DENY. In this case an error is left in pParse.
+*/
+SQLITE_PRIVATE int sqlite3AuthReadCol(
+ Parse *pParse, /* The parser context */
+ const char *zTab, /* Table name */
+ const char *zCol, /* Column name */
+ int iDb /* Index of containing database. */
+){
+ sqlite3 *db = pParse->db; /* Database handle */
+ char *zDb = db->aDb[iDb].zDbSName; /* Schema name of attached database */
+ int rc; /* Auth callback return code */
+
+ if( db->init.busy ) return SQLITE_OK;
+ rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext
+#ifdef SQLITE_USER_AUTHENTICATION
+ ,db->auth.zAuthUser
+#endif
+ );
+ if( rc==SQLITE_DENY ){
+ char *z = sqlite3_mprintf("%s.%s", zTab, zCol);
+ if( db->nDb>2 || iDb!=0 ) z = sqlite3_mprintf("%s.%z", zDb, z);
+ sqlite3ErrorMsg(pParse, "access to %z is prohibited", z);
+ pParse->rc = SQLITE_AUTH;
+ }else if( rc!=SQLITE_IGNORE && rc!=SQLITE_OK ){
+ sqliteAuthBadReturnCode(pParse);
+ }
+ return rc;
+}
+
+/*
+** The pExpr should be a TK_COLUMN expression. The table referred to
+** is in pTabList or else it is the NEW or OLD table of a trigger.
+** Check to see if it is OK to read this particular column.
+**
+** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN
+** instruction into a TK_NULL. If the auth function returns SQLITE_DENY,
+** then generate an error.
+*/
+SQLITE_PRIVATE void sqlite3AuthRead(
+ Parse *pParse, /* The parser context */
+ Expr *pExpr, /* The expression to check authorization on */
+ Schema *pSchema, /* The schema of the expression */
+ SrcList *pTabList /* All table that pExpr might refer to */
+){
+ sqlite3 *db = pParse->db;
+ Table *pTab = 0; /* The table being read */
+ const char *zCol; /* Name of the column of the table */
+ int iSrc; /* Index in pTabList->a[] of table being read */
+ int iDb; /* The index of the database the expression refers to */
+ int iCol; /* Index of column in table */
+
+ assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER );
+ assert( !IN_RENAME_OBJECT || db->xAuth==0 );
+ if( db->xAuth==0 ) return;
+ iDb = sqlite3SchemaToIndex(pParse->db, pSchema);
+ if( iDb<0 ){
+ /* An attempt to read a column out of a subquery or other
+ ** temporary table. */
+ return;
+ }
+
+ if( pExpr->op==TK_TRIGGER ){
+ pTab = pParse->pTriggerTab;
+ }else{
+ assert( pTabList );
+ for(iSrc=0; ALWAYS(iSrc<pTabList->nSrc); iSrc++){
+ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){
+ pTab = pTabList->a[iSrc].pTab;
+ break;
+ }
+ }
+ }
+ iCol = pExpr->iColumn;
+ if( NEVER(pTab==0) ) return;
+
+ if( iCol>=0 ){
+ assert( iCol<pTab->nCol );
+ zCol = pTab->aCol[iCol].zName;
+ }else if( pTab->iPKey>=0 ){
+ assert( pTab->iPKey<pTab->nCol );
+ zCol = pTab->aCol[pTab->iPKey].zName;
+ }else{
+ zCol = "ROWID";
+ }
+ assert( iDb>=0 && iDb<db->nDb );
+ if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){
+ pExpr->op = TK_NULL;
+ }
+}
+
+/*
+** Do an authorization check using the code and arguments given. Return
+** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY
+** is returned, then the error count and error message in pParse are
+** modified appropriately.
+*/
+SQLITE_PRIVATE int sqlite3AuthCheck(
+ Parse *pParse,
+ int code,
+ const char *zArg1,
+ const char *zArg2,
+ const char *zArg3
+){
+ sqlite3 *db = pParse->db;
+ int rc;
+
+ /* Don't do any authorization checks if the database is initialising
+ ** or if the parser is being invoked from within sqlite3_declare_vtab.
+ */
+ assert( !IN_RENAME_OBJECT || db->xAuth==0 );
+ if( db->init.busy || IN_SPECIAL_PARSE ){
+ return SQLITE_OK;
+ }
+
+ if( db->xAuth==0 ){
+ return SQLITE_OK;
+ }
+
+ /* EVIDENCE-OF: R-43249-19882 The third through sixth parameters to the
+ ** callback are either NULL pointers or zero-terminated strings that
+ ** contain additional details about the action to be authorized.
+ **
+ ** The following testcase() macros show that any of the 3rd through 6th
+ ** parameters can be either NULL or a string. */
+ testcase( zArg1==0 );
+ testcase( zArg2==0 );
+ testcase( zArg3==0 );
+ testcase( pParse->zAuthContext==0 );
+
+ rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext
+#ifdef SQLITE_USER_AUTHENTICATION
+ ,db->auth.zAuthUser
+#endif
+ );
+ if( rc==SQLITE_DENY ){
+ sqlite3ErrorMsg(pParse, "not authorized");
+ pParse->rc = SQLITE_AUTH;
+ }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){
+ rc = SQLITE_DENY;
+ sqliteAuthBadReturnCode(pParse);
+ }
+ return rc;
+}
+
+/*
+** Push an authorization context. After this routine is called, the
+** zArg3 argument to authorization callbacks will be zContext until
+** popped. Or if pParse==0, this routine is a no-op.
+*/
+SQLITE_PRIVATE void sqlite3AuthContextPush(
+ Parse *pParse,
+ AuthContext *pContext,
+ const char *zContext
+){
+ assert( pParse );
+ pContext->pParse = pParse;
+ pContext->zAuthContext = pParse->zAuthContext;
+ pParse->zAuthContext = zContext;
+}
+
+/*
+** Pop an authorization context that was previously pushed
+** by sqlite3AuthContextPush
+*/
+SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){
+ if( pContext->pParse ){
+ pContext->pParse->zAuthContext = pContext->zAuthContext;
+ pContext->pParse = 0;
+ }
+}
+
+#endif /* SQLITE_OMIT_AUTHORIZATION */
+
+/************** End of auth.c ************************************************/
+/************** Begin file build.c *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the SQLite parser
+** when syntax rules are reduced. The routines in this file handle the
+** following kinds of SQL syntax:
+**
+** CREATE TABLE
+** DROP TABLE
+** CREATE INDEX
+** DROP INDEX
+** creating ID lists
+** BEGIN TRANSACTION
+** COMMIT
+** ROLLBACK
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_SHARED_CACHE
+/*
+** The TableLock structure is only used by the sqlite3TableLock() and
+** codeTableLocks() functions.
+*/
+struct TableLock {
+ int iDb; /* The database containing the table to be locked */
+ Pgno iTab; /* The root page of the table to be locked */
+ u8 isWriteLock; /* True for write lock. False for a read lock */
+ const char *zLockName; /* Name of the table */
+};
+
+/*
+** Record the fact that we want to lock a table at run-time.
+**
+** The table to be locked has root page iTab and is found in database iDb.
+** A read or a write lock can be taken depending on isWritelock.
+**
+** This routine just records the fact that the lock is desired. The
+** code to make the lock occur is generated by a later call to
+** codeTableLocks() which occurs during sqlite3FinishCoding().
+*/
+SQLITE_PRIVATE void sqlite3TableLock(
+ Parse *pParse, /* Parsing context */
+ int iDb, /* Index of the database containing the table to lock */
+ Pgno iTab, /* Root page number of the table to be locked */
+ u8 isWriteLock, /* True for a write lock */
+ const char *zName /* Name of the table to be locked */
+){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ int i;
+ int nBytes;
+ TableLock *p;
+ assert( iDb>=0 );
+
+ if( iDb==1 ) return;
+ if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return;
+ for(i=0; i<pToplevel->nTableLock; i++){
+ p = &pToplevel->aTableLock[i];
+ if( p->iDb==iDb && p->iTab==iTab ){
+ p->isWriteLock = (p->isWriteLock || isWriteLock);
+ return;
+ }
+ }
+
+ nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1);
+ pToplevel->aTableLock =
+ sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes);
+ if( pToplevel->aTableLock ){
+ p = &pToplevel->aTableLock[pToplevel->nTableLock++];
+ p->iDb = iDb;
+ p->iTab = iTab;
+ p->isWriteLock = isWriteLock;
+ p->zLockName = zName;
+ }else{
+ pToplevel->nTableLock = 0;
+ sqlite3OomFault(pToplevel->db);
+ }
+}
+
+/*
+** Code an OP_TableLock instruction for each table locked by the
+** statement (configured by calls to sqlite3TableLock()).
+*/
+static void codeTableLocks(Parse *pParse){
+ int i;
+ Vdbe *pVdbe;
+
+ pVdbe = sqlite3GetVdbe(pParse);
+ assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */
+
+ for(i=0; i<pParse->nTableLock; i++){
+ TableLock *p = &pParse->aTableLock[i];
+ int p1 = p->iDb;
+ sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock,
+ p->zLockName, P4_STATIC);
+ }
+}
+#else
+ #define codeTableLocks(x)
+#endif
+
+/*
+** Return TRUE if the given yDbMask object is empty - if it contains no
+** 1 bits. This routine is used by the DbMaskAllZero() and DbMaskNotZero()
+** macros when SQLITE_MAX_ATTACHED is greater than 30.
+*/
+#if SQLITE_MAX_ATTACHED>30
+SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){
+ int i;
+ for(i=0; i<sizeof(yDbMask); i++) if( m[i] ) return 0;
+ return 1;
+}
+#endif
+
+/*
+** This routine is called after a single SQL statement has been
+** parsed and a VDBE program to execute that statement has been
+** prepared. This routine puts the finishing touches on the
+** VDBE program and resets the pParse structure for the next
+** parse.
+**
+** Note that if an error occurred, it might be the case that
+** no VDBE code was generated.
+*/
+SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
+ sqlite3 *db;
+ Vdbe *v;
+
+ assert( pParse->pToplevel==0 );
+ db = pParse->db;
+ if( pParse->nested ) return;
+ if( db->mallocFailed || pParse->nErr ){
+ if( pParse->rc==SQLITE_OK ) pParse->rc = SQLITE_ERROR;
+ return;
+ }
+
+ /* Begin by generating some termination code at the end of the
+ ** vdbe program
+ */
+ v = sqlite3GetVdbe(pParse);
+ assert( !pParse->isMultiWrite
+ || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort));
+ if( v ){
+ sqlite3VdbeAddOp0(v, OP_Halt);
+
+#if SQLITE_USER_AUTHENTICATION
+ if( pParse->nTableLock>0 && db->init.busy==0 ){
+ sqlite3UserAuthInit(db);
+ if( db->auth.authLevel<UAUTH_User ){
+ sqlite3ErrorMsg(pParse, "user not authenticated");
+ pParse->rc = SQLITE_AUTH_USER;
+ return;
+ }
+ }
+#endif
+
+ /* The cookie mask contains one bit for each database file open.
+ ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are
+ ** set for each database that is used. Generate code to start a
+ ** transaction on each used database and to verify the schema cookie
+ ** on each used database.
+ */
+ if( db->mallocFailed==0
+ && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr)
+ ){
+ int iDb, i;
+ assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init );
+ sqlite3VdbeJumpHere(v, 0);
+ for(iDb=0; iDb<db->nDb; iDb++){
+ Schema *pSchema;
+ if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue;
+ sqlite3VdbeUsesBtree(v, iDb);
+ pSchema = db->aDb[iDb].pSchema;
+ sqlite3VdbeAddOp4Int(v,
+ OP_Transaction, /* Opcode */
+ iDb, /* P1 */
+ DbMaskTest(pParse->writeMask,iDb), /* P2 */
+ pSchema->schema_cookie, /* P3 */
+ pSchema->iGeneration /* P4 */
+ );
+ if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1);
+ VdbeComment((v,
+ "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite));
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ for(i=0; i<pParse->nVtabLock; i++){
+ char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]);
+ sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB);
+ }
+ pParse->nVtabLock = 0;
+#endif
+
+ /* Once all the cookies have been verified and transactions opened,
+ ** obtain the required table-locks. This is a no-op unless the
+ ** shared-cache feature is enabled.
+ */
+ codeTableLocks(pParse);
+
+ /* Initialize any AUTOINCREMENT data structures required.
+ */
+ sqlite3AutoincrementBegin(pParse);
+
+ /* Code constant expressions that where factored out of inner loops.
+ **
+ ** The pConstExpr list might also contain expressions that we simply
+ ** want to keep around until the Parse object is deleted. Such
+ ** expressions have iConstExprReg==0. Do not generate code for
+ ** those expressions, of course.
+ */
+ if( pParse->pConstExpr ){
+ ExprList *pEL = pParse->pConstExpr;
+ pParse->okConstFactor = 0;
+ for(i=0; i<pEL->nExpr; i++){
+ int iReg = pEL->a[i].u.iConstExprReg;
+ if( iReg>0 ){
+ sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg);
+ }
+ }
+ }
+
+ /* Finally, jump back to the beginning of the executable code. */
+ sqlite3VdbeGoto(v, 1);
+ }
+ }
+
+
+ /* Get the VDBE program ready for execution
+ */
+ if( v && pParse->nErr==0 && !db->mallocFailed ){
+ /* A minimum of one cursor is required if autoincrement is used
+ * See ticket [a696379c1f08866] */
+ assert( pParse->pAinc==0 || pParse->nTab>0 );
+ sqlite3VdbeMakeReady(v, pParse);
+ pParse->rc = SQLITE_DONE;
+ }else{
+ pParse->rc = SQLITE_ERROR;
+ }
+}
+
+/*
+** Run the parser and code generator recursively in order to generate
+** code for the SQL statement given onto the end of the pParse context
+** currently under construction. When the parser is run recursively
+** this way, the final OP_Halt is not appended and other initialization
+** and finalization steps are omitted because those are handling by the
+** outermost parser.
+**
+** Not everything is nestable. This facility is designed to permit
+** INSERT, UPDATE, and DELETE operations against the schema table. Use
+** care if you decide to try to use this routine for some other purposes.
+*/
+SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
+ va_list ap;
+ char *zSql;
+ char *zErrMsg = 0;
+ sqlite3 *db = pParse->db;
+ char saveBuf[PARSE_TAIL_SZ];
+
+ if( pParse->nErr ) return;
+ assert( pParse->nested<10 ); /* Nesting should only be of limited depth */
+ va_start(ap, zFormat);
+ zSql = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ if( zSql==0 ){
+ /* This can result either from an OOM or because the formatted string
+ ** exceeds SQLITE_LIMIT_LENGTH. In the latter case, we need to set
+ ** an error */
+ if( !db->mallocFailed ) pParse->rc = SQLITE_TOOBIG;
+ pParse->nErr++;
+ return;
+ }
+ pParse->nested++;
+ memcpy(saveBuf, PARSE_TAIL(pParse), PARSE_TAIL_SZ);
+ memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ);
+ sqlite3RunParser(pParse, zSql, &zErrMsg);
+ sqlite3DbFree(db, zErrMsg);
+ sqlite3DbFree(db, zSql);
+ memcpy(PARSE_TAIL(pParse), saveBuf, PARSE_TAIL_SZ);
+ pParse->nested--;
+}
+
+#if SQLITE_USER_AUTHENTICATION
+/*
+** Return TRUE if zTable is the name of the system table that stores the
+** list of users and their access credentials.
+*/
+SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){
+ return sqlite3_stricmp(zTable, "sqlite_user")==0;
+}
+#endif
+
+/*
+** Locate the in-memory structure that describes a particular database
+** table given the name of that table and (optionally) the name of the
+** database containing the table. Return NULL if not found.
+**
+** If zDatabase is 0, all databases are searched for the table and the
+** first matching table is returned. (No checking for duplicate table
+** names is done.) The search order is TEMP first, then MAIN, then any
+** auxiliary databases added using the ATTACH command.
+**
+** See also sqlite3LocateTable().
+*/
+SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){
+ Table *p = 0;
+ int i;
+
+ /* All mutexes are required for schema access. Make sure we hold them. */
+ assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) );
+#if SQLITE_USER_AUTHENTICATION
+ /* Only the admin user is allowed to know that the sqlite_user table
+ ** exists */
+ if( db->auth.authLevel<UAUTH_Admin && sqlite3UserAuthTable(zName)!=0 ){
+ return 0;
+ }
+#endif
+ if( zDatabase ){
+ for(i=0; i<db->nDb; i++){
+ if( sqlite3StrICmp(zDatabase, db->aDb[i].zDbSName)==0 ) break;
+ }
+ if( i>=db->nDb ){
+ /* No match against the official names. But always match "main"
+ ** to schema 0 as a legacy fallback. */
+ if( sqlite3StrICmp(zDatabase,"main")==0 ){
+ i = 0;
+ }else{
+ return 0;
+ }
+ }
+ p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, zName);
+ if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){
+ if( i==1 ){
+ if( sqlite3StrICmp(zName+7, &ALT_TEMP_SCHEMA_TABLE[7])==0
+ || sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0
+ || sqlite3StrICmp(zName+7, &DFLT_SCHEMA_TABLE[7])==0
+ ){
+ p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash,
+ DFLT_TEMP_SCHEMA_TABLE);
+ }
+ }else{
+ if( sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0 ){
+ p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash,
+ DFLT_SCHEMA_TABLE);
+ }
+ }
+ }
+ }else{
+ /* Match against TEMP first */
+ p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, zName);
+ if( p ) return p;
+ /* The main database is second */
+ p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, zName);
+ if( p ) return p;
+ /* Attached databases are in order of attachment */
+ for(i=2; i<db->nDb; i++){
+ assert( sqlite3SchemaMutexHeld(db, i, 0) );
+ p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, zName);
+ if( p ) break;
+ }
+ if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){
+ if( sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0 ){
+ p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, DFLT_SCHEMA_TABLE);
+ }else if( sqlite3StrICmp(zName+7, &ALT_TEMP_SCHEMA_TABLE[7])==0 ){
+ p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash,
+ DFLT_TEMP_SCHEMA_TABLE);
+ }
+ }
+ }
+ return p;
+}
+
+/*
+** Locate the in-memory structure that describes a particular database
+** table given the name of that table and (optionally) the name of the
+** database containing the table. Return NULL if not found. Also leave an
+** error message in pParse->zErrMsg.
+**
+** The difference between this routine and sqlite3FindTable() is that this
+** routine leaves an error message in pParse->zErrMsg where
+** sqlite3FindTable() does not.
+*/
+SQLITE_PRIVATE Table *sqlite3LocateTable(
+ Parse *pParse, /* context in which to report errors */
+ u32 flags, /* LOCATE_VIEW or LOCATE_NOERR */
+ const char *zName, /* Name of the table we are looking for */
+ const char *zDbase /* Name of the database. Might be NULL */
+){
+ Table *p;
+ sqlite3 *db = pParse->db;
+
+ /* Read the database schema. If an error occurs, leave an error message
+ ** and code in pParse and return NULL. */
+ if( (db->mDbFlags & DBFLAG_SchemaKnownOk)==0
+ && SQLITE_OK!=sqlite3ReadSchema(pParse)
+ ){
+ return 0;
+ }
+
+ p = sqlite3FindTable(db, zName, zDbase);
+ if( p==0 ){
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* If zName is the not the name of a table in the schema created using
+ ** CREATE, then check to see if it is the name of an virtual table that
+ ** can be an eponymous virtual table. */
+ if( pParse->disableVtab==0 ){
+ Module *pMod = (Module*)sqlite3HashFind(&db->aModule, zName);
+ if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){
+ pMod = sqlite3PragmaVtabRegister(db, zName);
+ }
+ if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){
+ return pMod->pEpoTab;
+ }
+ }
+#endif
+ if( flags & LOCATE_NOERR ) return 0;
+ pParse->checkSchema = 1;
+ }else if( IsVirtual(p) && pParse->disableVtab ){
+ p = 0;
+ }
+
+ if( p==0 ){
+ const char *zMsg = flags & LOCATE_VIEW ? "no such view" : "no such table";
+ if( zDbase ){
+ sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName);
+ }else{
+ sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName);
+ }
+ }
+
+ return p;
+}
+
+/*
+** Locate the table identified by *p.
+**
+** This is a wrapper around sqlite3LocateTable(). The difference between
+** sqlite3LocateTable() and this function is that this function restricts
+** the search to schema (p->pSchema) if it is not NULL. p->pSchema may be
+** non-NULL if it is part of a view or trigger program definition. See
+** sqlite3FixSrcList() for details.
+*/
+SQLITE_PRIVATE Table *sqlite3LocateTableItem(
+ Parse *pParse,
+ u32 flags,
+ struct SrcList_item *p
+){
+ const char *zDb;
+ assert( p->pSchema==0 || p->zDatabase==0 );
+ if( p->pSchema ){
+ int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema);
+ zDb = pParse->db->aDb[iDb].zDbSName;
+ }else{
+ zDb = p->zDatabase;
+ }
+ return sqlite3LocateTable(pParse, flags, p->zName, zDb);
+}
+
+/*
+** Locate the in-memory structure that describes
+** a particular index given the name of that index
+** and the name of the database that contains the index.
+** Return NULL if not found.
+**
+** If zDatabase is 0, all databases are searched for the
+** table and the first matching index is returned. (No checking
+** for duplicate index names is done.) The search order is
+** TEMP first, then MAIN, then any auxiliary databases added
+** using the ATTACH command.
+*/
+SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){
+ Index *p = 0;
+ int i;
+ /* All mutexes are required for schema access. Make sure we hold them. */
+ assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) );
+ for(i=OMIT_TEMPDB; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ Schema *pSchema = db->aDb[j].pSchema;
+ assert( pSchema );
+ if( zDb && sqlite3DbIsNamed(db, j, zDb)==0 ) continue;
+ assert( sqlite3SchemaMutexHeld(db, j, 0) );
+ p = sqlite3HashFind(&pSchema->idxHash, zName);
+ if( p ) break;
+ }
+ return p;
+}
+
+/*
+** Reclaim the memory used by an index
+*/
+SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3 *db, Index *p){
+#ifndef SQLITE_OMIT_ANALYZE
+ sqlite3DeleteIndexSamples(db, p);
+#endif
+ sqlite3ExprDelete(db, p->pPartIdxWhere);
+ sqlite3ExprListDelete(db, p->aColExpr);
+ sqlite3DbFree(db, p->zColAff);
+ if( p->isResized ) sqlite3DbFree(db, (void *)p->azColl);
+#ifdef SQLITE_ENABLE_STAT4
+ sqlite3_free(p->aiRowEst);
+#endif
+ sqlite3DbFree(db, p);
+}
+
+/*
+** For the index called zIdxName which is found in the database iDb,
+** unlike that index from its Table then remove the index from
+** the index hash table and free all memory structures associated
+** with the index.
+*/
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){
+ Index *pIndex;
+ Hash *pHash;
+
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pHash = &db->aDb[iDb].pSchema->idxHash;
+ pIndex = sqlite3HashInsert(pHash, zIdxName, 0);
+ if( ALWAYS(pIndex) ){
+ if( pIndex->pTable->pIndex==pIndex ){
+ pIndex->pTable->pIndex = pIndex->pNext;
+ }else{
+ Index *p;
+ /* Justification of ALWAYS(); The index must be on the list of
+ ** indices. */
+ p = pIndex->pTable->pIndex;
+ while( ALWAYS(p) && p->pNext!=pIndex ){ p = p->pNext; }
+ if( ALWAYS(p && p->pNext==pIndex) ){
+ p->pNext = pIndex->pNext;
+ }
+ }
+ sqlite3FreeIndex(db, pIndex);
+ }
+ db->mDbFlags |= DBFLAG_SchemaChange;
+}
+
+/*
+** Look through the list of open database files in db->aDb[] and if
+** any have been closed, remove them from the list. Reallocate the
+** db->aDb[] structure to a smaller size, if possible.
+**
+** Entry 0 (the "main" database) and entry 1 (the "temp" database)
+** are never candidates for being collapsed.
+*/
+SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3 *db){
+ int i, j;
+ for(i=j=2; i<db->nDb; i++){
+ struct Db *pDb = &db->aDb[i];
+ if( pDb->pBt==0 ){
+ sqlite3DbFree(db, pDb->zDbSName);
+ pDb->zDbSName = 0;
+ continue;
+ }
+ if( j<i ){
+ db->aDb[j] = db->aDb[i];
+ }
+ j++;
+ }
+ db->nDb = j;
+ if( db->nDb<=2 && db->aDb!=db->aDbStatic ){
+ memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0]));
+ sqlite3DbFree(db, db->aDb);
+ db->aDb = db->aDbStatic;
+ }
+}
+
+/*
+** Reset the schema for the database at index iDb. Also reset the
+** TEMP schema. The reset is deferred if db->nSchemaLock is not zero.
+** Deferred resets may be run by calling with iDb<0.
+*/
+SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3 *db, int iDb){
+ int i;
+ assert( iDb<db->nDb );
+
+ if( iDb>=0 ){
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ DbSetProperty(db, iDb, DB_ResetWanted);
+ DbSetProperty(db, 1, DB_ResetWanted);
+ db->mDbFlags &= ~DBFLAG_SchemaKnownOk;
+ }
+
+ if( db->nSchemaLock==0 ){
+ for(i=0; i<db->nDb; i++){
+ if( DbHasProperty(db, i, DB_ResetWanted) ){
+ sqlite3SchemaClear(db->aDb[i].pSchema);
+ }
+ }
+ }
+}
+
+/*
+** Erase all schema information from all attached databases (including
+** "main" and "temp") for a single database connection.
+*/
+SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3 *db){
+ int i;
+ sqlite3BtreeEnterAll(db);
+ for(i=0; i<db->nDb; i++){
+ Db *pDb = &db->aDb[i];
+ if( pDb->pSchema ){
+ if( db->nSchemaLock==0 ){
+ sqlite3SchemaClear(pDb->pSchema);
+ }else{
+ DbSetProperty(db, i, DB_ResetWanted);
+ }
+ }
+ }
+ db->mDbFlags &= ~(DBFLAG_SchemaChange|DBFLAG_SchemaKnownOk);
+ sqlite3VtabUnlockList(db);
+ sqlite3BtreeLeaveAll(db);
+ if( db->nSchemaLock==0 ){
+ sqlite3CollapseDatabaseArray(db);
+ }
+}
+
+/*
+** This routine is called when a commit occurs.
+*/
+SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){
+ db->mDbFlags &= ~DBFLAG_SchemaChange;
+}
+
+/*
+** Delete memory allocated for the column names of a table or view (the
+** Table.aCol[] array).
+*/
+SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){
+ int i;
+ Column *pCol;
+ assert( pTable!=0 );
+ if( (pCol = pTable->aCol)!=0 ){
+ for(i=0; i<pTable->nCol; i++, pCol++){
+ assert( pCol->zName==0 || pCol->hName==sqlite3StrIHash(pCol->zName) );
+ sqlite3DbFree(db, pCol->zName);
+ sqlite3ExprDelete(db, pCol->pDflt);
+ sqlite3DbFree(db, pCol->zColl);
+ }
+ sqlite3DbFree(db, pTable->aCol);
+ }
+}
+
+/*
+** Remove the memory data structures associated with the given
+** Table. No changes are made to disk by this routine.
+**
+** This routine just deletes the data structure. It does not unlink
+** the table data structure from the hash table. But it does destroy
+** memory structures of the indices and foreign keys associated with
+** the table.
+**
+** The db parameter is optional. It is needed if the Table object
+** contains lookaside memory. (Table objects in the schema do not use
+** lookaside memory, but some ephemeral Table objects do.) Or the
+** db parameter can be used with db->pnBytesFreed to measure the memory
+** used by the Table object.
+*/
+static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
+ Index *pIndex, *pNext;
+
+#ifdef SQLITE_DEBUG
+ /* Record the number of outstanding lookaside allocations in schema Tables
+ ** prior to doing any free() operations. Since schema Tables do not use
+ ** lookaside, this number should not change.
+ **
+ ** If malloc has already failed, it may be that it failed while allocating
+ ** a Table object that was going to be marked ephemeral. So do not check
+ ** that no lookaside memory is used in this case either. */
+ int nLookaside = 0;
+ if( db && !db->mallocFailed && (pTable->tabFlags & TF_Ephemeral)==0 ){
+ nLookaside = sqlite3LookasideUsed(db, 0);
+ }
+#endif
+
+ /* Delete all indices associated with this table. */
+ for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){
+ pNext = pIndex->pNext;
+ assert( pIndex->pSchema==pTable->pSchema
+ || (IsVirtual(pTable) && pIndex->idxType!=SQLITE_IDXTYPE_APPDEF) );
+ if( (db==0 || db->pnBytesFreed==0) && !IsVirtual(pTable) ){
+ char *zName = pIndex->zName;
+ TESTONLY ( Index *pOld = ) sqlite3HashInsert(
+ &pIndex->pSchema->idxHash, zName, 0
+ );
+ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
+ assert( pOld==pIndex || pOld==0 );
+ }
+ sqlite3FreeIndex(db, pIndex);
+ }
+
+ /* Delete any foreign keys attached to this table. */
+ sqlite3FkDelete(db, pTable);
+
+ /* Delete the Table structure itself.
+ */
+ sqlite3DeleteColumnNames(db, pTable);
+ sqlite3DbFree(db, pTable->zName);
+ sqlite3DbFree(db, pTable->zColAff);
+ sqlite3SelectDelete(db, pTable->pSelect);
+ sqlite3ExprListDelete(db, pTable->pCheck);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3VtabClear(db, pTable);
+#endif
+ sqlite3DbFree(db, pTable);
+
+ /* Verify that no lookaside memory was used by schema tables */
+ assert( nLookaside==0 || nLookaside==sqlite3LookasideUsed(db,0) );
+}
+SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
+ /* Do not delete the table until the reference count reaches zero. */
+ if( !pTable ) return;
+ if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return;
+ deleteTable(db, pTable);
+}
+
+
+/*
+** Unlink the given table from the hash tables and the delete the
+** table structure with all its indices and foreign keys.
+*/
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){
+ Table *p;
+ Db *pDb;
+
+ assert( db!=0 );
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( zTabName );
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ testcase( zTabName[0]==0 ); /* Zero-length table names are allowed */
+ pDb = &db->aDb[iDb];
+ p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, 0);
+ sqlite3DeleteTable(db, p);
+ db->mDbFlags |= DBFLAG_SchemaChange;
+}
+
+/*
+** Given a token, return a string that consists of the text of that
+** token. Space to hold the returned string
+** is obtained from sqliteMalloc() and must be freed by the calling
+** function.
+**
+** Any quotation marks (ex: "name", 'name', [name], or `name`) that
+** surround the body of the token are removed.
+**
+** Tokens are often just pointers into the original SQL text and so
+** are not \000 terminated and are not persistent. The returned string
+** is \000 terminated and is persistent.
+*/
+SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){
+ char *zName;
+ if( pName ){
+ zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n);
+ sqlite3Dequote(zName);
+ }else{
+ zName = 0;
+ }
+ return zName;
+}
+
+/*
+** Open the sqlite_schema table stored in database number iDb for
+** writing. The table is opened using cursor 0.
+*/
+SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *p, int iDb){
+ Vdbe *v = sqlite3GetVdbe(p);
+ sqlite3TableLock(p, iDb, SCHEMA_ROOT, 1, DFLT_SCHEMA_TABLE);
+ sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, SCHEMA_ROOT, iDb, 5);
+ if( p->nTab==0 ){
+ p->nTab = 1;
+ }
+}
+
+/*
+** Parameter zName points to a nul-terminated buffer containing the name
+** of a database ("main", "temp" or the name of an attached db). This
+** function returns the index of the named database in db->aDb[], or
+** -1 if the named db cannot be found.
+*/
+SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){
+ int i = -1; /* Database number */
+ if( zName ){
+ Db *pDb;
+ for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){
+ if( 0==sqlite3_stricmp(pDb->zDbSName, zName) ) break;
+ /* "main" is always an acceptable alias for the primary database
+ ** even if it has been renamed using SQLITE_DBCONFIG_MAINDBNAME. */
+ if( i==0 && 0==sqlite3_stricmp("main", zName) ) break;
+ }
+ }
+ return i;
+}
+
+/*
+** The token *pName contains the name of a database (either "main" or
+** "temp" or the name of an attached db). This routine returns the
+** index of the named database in db->aDb[], or -1 if the named db
+** does not exist.
+*/
+SQLITE_PRIVATE int sqlite3FindDb(sqlite3 *db, Token *pName){
+ int i; /* Database number */
+ char *zName; /* Name we are searching for */
+ zName = sqlite3NameFromToken(db, pName);
+ i = sqlite3FindDbName(db, zName);
+ sqlite3DbFree(db, zName);
+ return i;
+}
+
+/* The table or view or trigger name is passed to this routine via tokens
+** pName1 and pName2. If the table name was fully qualified, for example:
+**
+** CREATE TABLE xxx.yyy (...);
+**
+** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if
+** the table name is not fully qualified, i.e.:
+**
+** CREATE TABLE yyy(...);
+**
+** Then pName1 is set to "yyy" and pName2 is "".
+**
+** This routine sets the *ppUnqual pointer to point at the token (pName1 or
+** pName2) that stores the unqualified table name. The index of the
+** database "xxx" is returned.
+*/
+SQLITE_PRIVATE int sqlite3TwoPartName(
+ Parse *pParse, /* Parsing and code generating context */
+ Token *pName1, /* The "xxx" in the name "xxx.yyy" or "xxx" */
+ Token *pName2, /* The "yyy" in the name "xxx.yyy" */
+ Token **pUnqual /* Write the unqualified object name here */
+){
+ int iDb; /* Database holding the object */
+ sqlite3 *db = pParse->db;
+
+ assert( pName2!=0 );
+ if( pName2->n>0 ){
+ if( db->init.busy ) {
+ sqlite3ErrorMsg(pParse, "corrupt database");
+ return -1;
+ }
+ *pUnqual = pName2;
+ iDb = sqlite3FindDb(db, pName1);
+ if( iDb<0 ){
+ sqlite3ErrorMsg(pParse, "unknown database %T", pName1);
+ return -1;
+ }
+ }else{
+ assert( db->init.iDb==0 || db->init.busy || IN_RENAME_OBJECT
+ || (db->mDbFlags & DBFLAG_Vacuum)!=0);
+ iDb = db->init.iDb;
+ *pUnqual = pName1;
+ }
+ return iDb;
+}
+
+/*
+** True if PRAGMA writable_schema is ON
+*/
+SQLITE_PRIVATE int sqlite3WritableSchema(sqlite3 *db){
+ testcase( (db->flags&(SQLITE_WriteSchema|SQLITE_Defensive))==0 );
+ testcase( (db->flags&(SQLITE_WriteSchema|SQLITE_Defensive))==
+ SQLITE_WriteSchema );
+ testcase( (db->flags&(SQLITE_WriteSchema|SQLITE_Defensive))==
+ SQLITE_Defensive );
+ testcase( (db->flags&(SQLITE_WriteSchema|SQLITE_Defensive))==
+ (SQLITE_WriteSchema|SQLITE_Defensive) );
+ return (db->flags&(SQLITE_WriteSchema|SQLITE_Defensive))==SQLITE_WriteSchema;
+}
+
+/*
+** This routine is used to check if the UTF-8 string zName is a legal
+** unqualified name for a new schema object (table, index, view or
+** trigger). All names are legal except those that begin with the string
+** "sqlite_" (in upper, lower or mixed case). This portion of the namespace
+** is reserved for internal use.
+**
+** When parsing the sqlite_schema table, this routine also checks to
+** make sure the "type", "name", and "tbl_name" columns are consistent
+** with the SQL.
+*/
+SQLITE_PRIVATE int sqlite3CheckObjectName(
+ Parse *pParse, /* Parsing context */
+ const char *zName, /* Name of the object to check */
+ const char *zType, /* Type of this object */
+ const char *zTblName /* Parent table name for triggers and indexes */
+){
+ sqlite3 *db = pParse->db;
+ if( sqlite3WritableSchema(db)
+ || db->init.imposterTable
+ || !sqlite3Config.bExtraSchemaChecks
+ ){
+ /* Skip these error checks for writable_schema=ON */
+ return SQLITE_OK;
+ }
+ if( db->init.busy ){
+ if( sqlite3_stricmp(zType, db->init.azInit[0])
+ || sqlite3_stricmp(zName, db->init.azInit[1])
+ || sqlite3_stricmp(zTblName, db->init.azInit[2])
+ ){
+ sqlite3ErrorMsg(pParse, ""); /* corruptSchema() will supply the error */
+ return SQLITE_ERROR;
+ }
+ }else{
+ if( (pParse->nested==0 && 0==sqlite3StrNICmp(zName, "sqlite_", 7))
+ || (sqlite3ReadOnlyShadowTables(db) && sqlite3ShadowTableName(db, zName))
+ ){
+ sqlite3ErrorMsg(pParse, "object name reserved for internal use: %s",
+ zName);
+ return SQLITE_ERROR;
+ }
+
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return the PRIMARY KEY index of a table
+*/
+SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){
+ Index *p;
+ for(p=pTab->pIndex; p && !IsPrimaryKeyIndex(p); p=p->pNext){}
+ return p;
+}
+
+/*
+** Convert an table column number into a index column number. That is,
+** for the column iCol in the table (as defined by the CREATE TABLE statement)
+** find the (first) offset of that column in index pIdx. Or return -1
+** if column iCol is not used in index pIdx.
+*/
+SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index *pIdx, i16 iCol){
+ int i;
+ for(i=0; i<pIdx->nColumn; i++){
+ if( iCol==pIdx->aiColumn[i] ) return i;
+ }
+ return -1;
+}
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+/* Convert a storage column number into a table column number.
+**
+** The storage column number (0,1,2,....) is the index of the value
+** as it appears in the record on disk. The true column number
+** is the index (0,1,2,...) of the column in the CREATE TABLE statement.
+**
+** The storage column number is less than the table column number if
+** and only there are VIRTUAL columns to the left.
+**
+** If SQLITE_OMIT_GENERATED_COLUMNS, this routine is a no-op macro.
+*/
+SQLITE_PRIVATE i16 sqlite3StorageColumnToTable(Table *pTab, i16 iCol){
+ if( pTab->tabFlags & TF_HasVirtual ){
+ int i;
+ for(i=0; i<=iCol; i++){
+ if( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL ) iCol++;
+ }
+ }
+ return iCol;
+}
+#endif
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+/* Convert a table column number into a storage column number.
+**
+** The storage column number (0,1,2,....) is the index of the value
+** as it appears in the record on disk. Or, if the input column is
+** the N-th virtual column (zero-based) then the storage number is
+** the number of non-virtual columns in the table plus N.
+**
+** The true column number is the index (0,1,2,...) of the column in
+** the CREATE TABLE statement.
+**
+** If the input column is a VIRTUAL column, then it should not appear
+** in storage. But the value sometimes is cached in registers that
+** follow the range of registers used to construct storage. This
+** avoids computing the same VIRTUAL column multiple times, and provides
+** values for use by OP_Param opcodes in triggers. Hence, if the
+** input column is a VIRTUAL table, put it after all the other columns.
+**
+** In the following, N means "normal column", S means STORED, and
+** V means VIRTUAL. Suppose the CREATE TABLE has columns like this:
+**
+** CREATE TABLE ex(N,S,V,N,S,V,N,S,V);
+** -- 0 1 2 3 4 5 6 7 8
+**
+** Then the mapping from this function is as follows:
+**
+** INPUTS: 0 1 2 3 4 5 6 7 8
+** OUTPUTS: 0 1 6 2 3 7 4 5 8
+**
+** So, in other words, this routine shifts all the virtual columns to
+** the end.
+**
+** If SQLITE_OMIT_GENERATED_COLUMNS then there are no virtual columns and
+** this routine is a no-op macro. If the pTab does not have any virtual
+** columns, then this routine is no-op that always return iCol. If iCol
+** is negative (indicating the ROWID column) then this routine return iCol.
+*/
+SQLITE_PRIVATE i16 sqlite3TableColumnToStorage(Table *pTab, i16 iCol){
+ int i;
+ i16 n;
+ assert( iCol<pTab->nCol );
+ if( (pTab->tabFlags & TF_HasVirtual)==0 || iCol<0 ) return iCol;
+ for(i=0, n=0; i<iCol; i++){
+ if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) n++;
+ }
+ if( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL ){
+ /* iCol is a virtual column itself */
+ return pTab->nNVCol + i - n;
+ }else{
+ /* iCol is a normal or stored column */
+ return n;
+ }
+}
+#endif
+
+/*
+** Begin constructing a new table representation in memory. This is
+** the first of several action routines that get called in response
+** to a CREATE TABLE statement. In particular, this routine is called
+** after seeing tokens "CREATE" and "TABLE" and the table name. The isTemp
+** flag is true if the table should be stored in the auxiliary database
+** file instead of in the main database file. This is normally the case
+** when the "TEMP" or "TEMPORARY" keyword occurs in between
+** CREATE and TABLE.
+**
+** The new table record is initialized and put in pParse->pNewTable.
+** As more of the CREATE TABLE statement is parsed, additional action
+** routines will be called to add more information to this record.
+** At the end of the CREATE TABLE statement, the sqlite3EndTable() routine
+** is called to complete the construction of the new table record.
+*/
+SQLITE_PRIVATE void sqlite3StartTable(
+ Parse *pParse, /* Parser context */
+ Token *pName1, /* First part of the name of the table or view */
+ Token *pName2, /* Second part of the name of the table or view */
+ int isTemp, /* True if this is a TEMP table */
+ int isView, /* True if this is a VIEW */
+ int isVirtual, /* True if this is a VIRTUAL table */
+ int noErr /* Do nothing if table already exists */
+){
+ Table *pTable;
+ char *zName = 0; /* The name of the new table */
+ sqlite3 *db = pParse->db;
+ Vdbe *v;
+ int iDb; /* Database number to create the table in */
+ Token *pName; /* Unqualified name of the table to create */
+
+ if( db->init.busy && db->init.newTnum==1 ){
+ /* Special case: Parsing the sqlite_schema or sqlite_temp_schema schema */
+ iDb = db->init.iDb;
+ zName = sqlite3DbStrDup(db, SCHEMA_TABLE(iDb));
+ pName = pName1;
+ }else{
+ /* The common case */
+ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName);
+ if( iDb<0 ) return;
+ if( !OMIT_TEMPDB && isTemp && pName2->n>0 && iDb!=1 ){
+ /* If creating a temp table, the name may not be qualified. Unless
+ ** the database name is "temp" anyway. */
+ sqlite3ErrorMsg(pParse, "temporary table name must be unqualified");
+ return;
+ }
+ if( !OMIT_TEMPDB && isTemp ) iDb = 1;
+ zName = sqlite3NameFromToken(db, pName);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, (void*)zName, pName);
+ }
+ }
+ pParse->sNameToken = *pName;
+ if( zName==0 ) return;
+ if( sqlite3CheckObjectName(pParse, zName, isView?"view":"table", zName) ){
+ goto begin_table_error;
+ }
+ if( db->init.iDb==1 ) isTemp = 1;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ assert( isTemp==0 || isTemp==1 );
+ assert( isView==0 || isView==1 );
+ {
+ static const u8 aCode[] = {
+ SQLITE_CREATE_TABLE,
+ SQLITE_CREATE_TEMP_TABLE,
+ SQLITE_CREATE_VIEW,
+ SQLITE_CREATE_TEMP_VIEW
+ };
+ char *zDb = db->aDb[iDb].zDbSName;
+ if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){
+ goto begin_table_error;
+ }
+ if( !isVirtual && sqlite3AuthCheck(pParse, (int)aCode[isTemp+2*isView],
+ zName, 0, zDb) ){
+ goto begin_table_error;
+ }
+ }
+#endif
+
+ /* Make sure the new table name does not collide with an existing
+ ** index or table name in the same database. Issue an error message if
+ ** it does. The exception is if the statement being parsed was passed
+ ** to an sqlite3_declare_vtab() call. In that case only the column names
+ ** and types will be used, so there is no need to test for namespace
+ ** collisions.
+ */
+ if( !IN_SPECIAL_PARSE ){
+ char *zDb = db->aDb[iDb].zDbSName;
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ goto begin_table_error;
+ }
+ pTable = sqlite3FindTable(db, zName, zDb);
+ if( pTable ){
+ if( !noErr ){
+ sqlite3ErrorMsg(pParse, "table %T already exists", pName);
+ }else{
+ assert( !db->init.busy || CORRUPT_DB );
+ sqlite3CodeVerifySchema(pParse, iDb);
+ }
+ goto begin_table_error;
+ }
+ if( sqlite3FindIndex(db, zName, zDb)!=0 ){
+ sqlite3ErrorMsg(pParse, "there is already an index named %s", zName);
+ goto begin_table_error;
+ }
+ }
+
+ pTable = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTable==0 ){
+ assert( db->mallocFailed );
+ pParse->rc = SQLITE_NOMEM_BKPT;
+ pParse->nErr++;
+ goto begin_table_error;
+ }
+ pTable->zName = zName;
+ pTable->iPKey = -1;
+ pTable->pSchema = db->aDb[iDb].pSchema;
+ pTable->nTabRef = 1;
+#ifdef SQLITE_DEFAULT_ROWEST
+ pTable->nRowLogEst = sqlite3LogEst(SQLITE_DEFAULT_ROWEST);
+#else
+ pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+#endif
+ assert( pParse->pNewTable==0 );
+ pParse->pNewTable = pTable;
+
+ /* If this is the magic sqlite_sequence table used by autoincrement,
+ ** then record a pointer to this table in the main database structure
+ ** so that INSERT can find the table easily.
+ */
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pTable->pSchema->pSeqTab = pTable;
+ }
+#endif
+
+ /* Begin generating the code that will insert the table record into
+ ** the schema table. Note in particular that we must go ahead
+ ** and allocate the record number for the table entry now. Before any
+ ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause
+ ** indices to be created and the table record must come before the
+ ** indices. Hence, the record number for the table must be allocated
+ ** now.
+ */
+ if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){
+ int addr1;
+ int fileFormat;
+ int reg1, reg2, reg3;
+ /* nullRow[] is an OP_Record encoding of a row containing 5 NULLs */
+ static const char nullRow[] = { 6, 0, 0, 0, 0, 0 };
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( isVirtual ){
+ sqlite3VdbeAddOp0(v, OP_VBegin);
+ }
+#endif
+
+ /* If the file format and encoding in the database have not been set,
+ ** set them now.
+ */
+ reg1 = pParse->regRowid = ++pParse->nMem;
+ reg2 = pParse->regRoot = ++pParse->nMem;
+ reg3 = ++pParse->nMem;
+ sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT);
+ sqlite3VdbeUsesBtree(v, iDb);
+ addr1 = sqlite3VdbeAddOp1(v, OP_If, reg3); VdbeCoverage(v);
+ fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ?
+ 1 : SQLITE_MAX_FILE_FORMAT;
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, fileFormat);
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, ENC(db));
+ sqlite3VdbeJumpHere(v, addr1);
+
+ /* This just creates a place-holder record in the sqlite_schema table.
+ ** The record created does not contain anything yet. It will be replaced
+ ** by the real entry in code generated at sqlite3EndTable().
+ **
+ ** The rowid for the new entry is left in register pParse->regRowid.
+ ** The root page number of the new table is left in reg pParse->regRoot.
+ ** The rowid and root page number values are needed by the code that
+ ** sqlite3EndTable will generate.
+ */
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
+ if( isView || isVirtual ){
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, reg2);
+ }else
+#endif
+ {
+ pParse->addrCrTab =
+ sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY);
+ }
+ sqlite3OpenSchemaTable(pParse, iDb);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1);
+ sqlite3VdbeAddOp4(v, OP_Blob, 6, reg3, 0, nullRow, P4_STATIC);
+ sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1);
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+ sqlite3VdbeAddOp0(v, OP_Close);
+ }
+
+ /* Normal (non-error) return. */
+ return;
+
+ /* If an error occurs, we jump here */
+begin_table_error:
+ sqlite3DbFree(db, zName);
+ return;
+}
+
+/* Set properties of a table column based on the (magical)
+** name of the column.
+*/
+#if SQLITE_ENABLE_HIDDEN_COLUMNS
+SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){
+ if( sqlite3_strnicmp(pCol->zName, "__hidden__", 10)==0 ){
+ pCol->colFlags |= COLFLAG_HIDDEN;
+ }else if( pTab && pCol!=pTab->aCol && (pCol[-1].colFlags & COLFLAG_HIDDEN) ){
+ pTab->tabFlags |= TF_OOOHidden;
+ }
+}
+#endif
+
+
+/*
+** Add a new column to the table currently being constructed.
+**
+** The parser calls this routine once for each column declaration
+** in a CREATE TABLE statement. sqlite3StartTable() gets called
+** first to get things going. Then this routine is called for each
+** column.
+*/
+SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){
+ Table *p;
+ int i;
+ char *z;
+ char *zType;
+ Column *pCol;
+ sqlite3 *db = pParse->db;
+ if( (p = pParse->pNewTable)==0 ) return;
+ if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){
+ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName);
+ return;
+ }
+ z = sqlite3DbMallocRaw(db, pName->n + pType->n + 2);
+ if( z==0 ) return;
+ if( IN_RENAME_OBJECT ) sqlite3RenameTokenMap(pParse, (void*)z, pName);
+ memcpy(z, pName->z, pName->n);
+ z[pName->n] = 0;
+ sqlite3Dequote(z);
+ for(i=0; i<p->nCol; i++){
+ if( sqlite3_stricmp(z, p->aCol[i].zName)==0 ){
+ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z);
+ sqlite3DbFree(db, z);
+ return;
+ }
+ }
+ if( (p->nCol & 0x7)==0 ){
+ Column *aNew;
+ aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0]));
+ if( aNew==0 ){
+ sqlite3DbFree(db, z);
+ return;
+ }
+ p->aCol = aNew;
+ }
+ pCol = &p->aCol[p->nCol];
+ memset(pCol, 0, sizeof(p->aCol[0]));
+ pCol->zName = z;
+ pCol->hName = sqlite3StrIHash(z);
+ sqlite3ColumnPropertiesFromName(p, pCol);
+
+ if( pType->n==0 ){
+ /* If there is no type specified, columns have the default affinity
+ ** 'BLOB' with a default size of 4 bytes. */
+ pCol->affinity = SQLITE_AFF_BLOB;
+ pCol->szEst = 1;
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( 4>=sqlite3GlobalConfig.szSorterRef ){
+ pCol->colFlags |= COLFLAG_SORTERREF;
+ }
+#endif
+ }else{
+ zType = z + sqlite3Strlen30(z) + 1;
+ memcpy(zType, pType->z, pType->n);
+ zType[pType->n] = 0;
+ sqlite3Dequote(zType);
+ pCol->affinity = sqlite3AffinityType(zType, pCol);
+ pCol->colFlags |= COLFLAG_HASTYPE;
+ }
+ p->nCol++;
+ p->nNVCol++;
+ pParse->constraintName.n = 0;
+}
+
+/*
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement. A "NOT NULL" constraint has
+** been seen on a column. This routine sets the notNull flag on
+** the column currently under construction.
+*/
+SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){
+ Table *p;
+ Column *pCol;
+ p = pParse->pNewTable;
+ if( p==0 || NEVER(p->nCol<1) ) return;
+ pCol = &p->aCol[p->nCol-1];
+ pCol->notNull = (u8)onError;
+ p->tabFlags |= TF_HasNotNull;
+
+ /* Set the uniqNotNull flag on any UNIQUE or PK indexes already created
+ ** on this column. */
+ if( pCol->colFlags & COLFLAG_UNIQUE ){
+ Index *pIdx;
+ for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){
+ assert( pIdx->nKeyCol==1 && pIdx->onError!=OE_None );
+ if( pIdx->aiColumn[0]==p->nCol-1 ){
+ pIdx->uniqNotNull = 1;
+ }
+ }
+ }
+}
+
+/*
+** Scan the column type name zType (length nType) and return the
+** associated affinity type.
+**
+** This routine does a case-independent search of zType for the
+** substrings in the following table. If one of the substrings is
+** found, the corresponding affinity is returned. If zType contains
+** more than one of the substrings, entries toward the top of
+** the table take priority. For example, if zType is 'BLOBINT',
+** SQLITE_AFF_INTEGER is returned.
+**
+** Substring | Affinity
+** --------------------------------
+** 'INT' | SQLITE_AFF_INTEGER
+** 'CHAR' | SQLITE_AFF_TEXT
+** 'CLOB' | SQLITE_AFF_TEXT
+** 'TEXT' | SQLITE_AFF_TEXT
+** 'BLOB' | SQLITE_AFF_BLOB
+** 'REAL' | SQLITE_AFF_REAL
+** 'FLOA' | SQLITE_AFF_REAL
+** 'DOUB' | SQLITE_AFF_REAL
+**
+** If none of the substrings in the above table are found,
+** SQLITE_AFF_NUMERIC is returned.
+*/
+SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){
+ u32 h = 0;
+ char aff = SQLITE_AFF_NUMERIC;
+ const char *zChar = 0;
+
+ assert( zIn!=0 );
+ while( zIn[0] ){
+ h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
+ zIn++;
+ if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */
+ aff = SQLITE_AFF_TEXT;
+ zChar = zIn;
+ }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */
+ aff = SQLITE_AFF_TEXT;
+ }else if( h==(('t'<<24)+('e'<<16)+('x'<<8)+'t') ){ /* TEXT */
+ aff = SQLITE_AFF_TEXT;
+ }else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */
+ && (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){
+ aff = SQLITE_AFF_BLOB;
+ if( zIn[0]=='(' ) zChar = zIn;
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ }else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */
+ && aff==SQLITE_AFF_NUMERIC ){
+ aff = SQLITE_AFF_REAL;
+ }else if( h==(('f'<<24)+('l'<<16)+('o'<<8)+'a') /* FLOA */
+ && aff==SQLITE_AFF_NUMERIC ){
+ aff = SQLITE_AFF_REAL;
+ }else if( h==(('d'<<24)+('o'<<16)+('u'<<8)+'b') /* DOUB */
+ && aff==SQLITE_AFF_NUMERIC ){
+ aff = SQLITE_AFF_REAL;
+#endif
+ }else if( (h&0x00FFFFFF)==(('i'<<16)+('n'<<8)+'t') ){ /* INT */
+ aff = SQLITE_AFF_INTEGER;
+ break;
+ }
+ }
+
+ /* If pCol is not NULL, store an estimate of the field size. The
+ ** estimate is scaled so that the size of an integer is 1. */
+ if( pCol ){
+ int v = 0; /* default size is approx 4 bytes */
+ if( aff<SQLITE_AFF_NUMERIC ){
+ if( zChar ){
+ while( zChar[0] ){
+ if( sqlite3Isdigit(zChar[0]) ){
+ /* BLOB(k), VARCHAR(k), CHAR(k) -> r=(k/4+1) */
+ sqlite3GetInt32(zChar, &v);
+ break;
+ }
+ zChar++;
+ }
+ }else{
+ v = 16; /* BLOB, TEXT, CLOB -> r=5 (approx 20 bytes)*/
+ }
+ }
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( v>=sqlite3GlobalConfig.szSorterRef ){
+ pCol->colFlags |= COLFLAG_SORTERREF;
+ }
+#endif
+ v = v/4 + 1;
+ if( v>255 ) v = 255;
+ pCol->szEst = v;
+ }
+ return aff;
+}
+
+/*
+** The expression is the default value for the most recently added column
+** of the table currently under construction.
+**
+** Default value expressions must be constant. Raise an exception if this
+** is not the case.
+**
+** This routine is called by the parser while in the middle of
+** parsing a CREATE TABLE statement.
+*/
+SQLITE_PRIVATE void sqlite3AddDefaultValue(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* The parsed expression of the default value */
+ const char *zStart, /* Start of the default value text */
+ const char *zEnd /* First character past end of defaut value text */
+){
+ Table *p;
+ Column *pCol;
+ sqlite3 *db = pParse->db;
+ p = pParse->pNewTable;
+ if( p!=0 ){
+ int isInit = db->init.busy && db->init.iDb!=1;
+ pCol = &(p->aCol[p->nCol-1]);
+ if( !sqlite3ExprIsConstantOrFunction(pExpr, isInit) ){
+ sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant",
+ pCol->zName);
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ }else if( pCol->colFlags & COLFLAG_GENERATED ){
+ testcase( pCol->colFlags & COLFLAG_VIRTUAL );
+ testcase( pCol->colFlags & COLFLAG_STORED );
+ sqlite3ErrorMsg(pParse, "cannot use DEFAULT on a generated column");
+#endif
+ }else{
+ /* A copy of pExpr is used instead of the original, as pExpr contains
+ ** tokens that point to volatile memory.
+ */
+ Expr x;
+ sqlite3ExprDelete(db, pCol->pDflt);
+ memset(&x, 0, sizeof(x));
+ x.op = TK_SPAN;
+ x.u.zToken = sqlite3DbSpanDup(db, zStart, zEnd);
+ x.pLeft = pExpr;
+ x.flags = EP_Skip;
+ pCol->pDflt = sqlite3ExprDup(db, &x, EXPRDUP_REDUCE);
+ sqlite3DbFree(db, x.u.zToken);
+ }
+ }
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameExprUnmap(pParse, pExpr);
+ }
+ sqlite3ExprDelete(db, pExpr);
+}
+
+/*
+** Backwards Compatibility Hack:
+**
+** Historical versions of SQLite accepted strings as column names in
+** indexes and PRIMARY KEY constraints and in UNIQUE constraints. Example:
+**
+** CREATE TABLE xyz(a,b,c,d,e,PRIMARY KEY('a'),UNIQUE('b','c' COLLATE trim)
+** CREATE INDEX abc ON xyz('c','d' DESC,'e' COLLATE nocase DESC);
+**
+** This is goofy. But to preserve backwards compatibility we continue to
+** accept it. This routine does the necessary conversion. It converts
+** the expression given in its argument from a TK_STRING into a TK_ID
+** if the expression is just a TK_STRING with an optional COLLATE clause.
+** If the expression is anything other than TK_STRING, the expression is
+** unchanged.
+*/
+static void sqlite3StringToId(Expr *p){
+ if( p->op==TK_STRING ){
+ p->op = TK_ID;
+ }else if( p->op==TK_COLLATE && p->pLeft->op==TK_STRING ){
+ p->pLeft->op = TK_ID;
+ }
+}
+
+/*
+** Tag the given column as being part of the PRIMARY KEY
+*/
+static void makeColumnPartOfPrimaryKey(Parse *pParse, Column *pCol){
+ pCol->colFlags |= COLFLAG_PRIMKEY;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( pCol->colFlags & COLFLAG_GENERATED ){
+ testcase( pCol->colFlags & COLFLAG_VIRTUAL );
+ testcase( pCol->colFlags & COLFLAG_STORED );
+ sqlite3ErrorMsg(pParse,
+ "generated columns cannot be part of the PRIMARY KEY");
+ }
+#endif
+}
+
+/*
+** Designate the PRIMARY KEY for the table. pList is a list of names
+** of columns that form the primary key. If pList is NULL, then the
+** most recently added column of the table is the primary key.
+**
+** A table can have at most one primary key. If the table already has
+** a primary key (and this is the second primary key) then create an
+** error.
+**
+** If the PRIMARY KEY is on a single column whose datatype is INTEGER,
+** then we will try to use that column as the rowid. Set the Table.iPKey
+** field of the table under construction to be the index of the
+** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is
+** no INTEGER PRIMARY KEY.
+**
+** If the key is not an INTEGER PRIMARY KEY, then create a unique
+** index for the key. No index is created for INTEGER PRIMARY KEYs.
+*/
+SQLITE_PRIVATE void sqlite3AddPrimaryKey(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List of field names to be indexed */
+ int onError, /* What to do with a uniqueness conflict */
+ int autoInc, /* True if the AUTOINCREMENT keyword is present */
+ int sortOrder /* SQLITE_SO_ASC or SQLITE_SO_DESC */
+){
+ Table *pTab = pParse->pNewTable;
+ Column *pCol = 0;
+ int iCol = -1, i;
+ int nTerm;
+ if( pTab==0 ) goto primary_key_exit;
+ if( pTab->tabFlags & TF_HasPrimaryKey ){
+ sqlite3ErrorMsg(pParse,
+ "table \"%s\" has more than one primary key", pTab->zName);
+ goto primary_key_exit;
+ }
+ pTab->tabFlags |= TF_HasPrimaryKey;
+ if( pList==0 ){
+ iCol = pTab->nCol - 1;
+ pCol = &pTab->aCol[iCol];
+ makeColumnPartOfPrimaryKey(pParse, pCol);
+ nTerm = 1;
+ }else{
+ nTerm = pList->nExpr;
+ for(i=0; i<nTerm; i++){
+ Expr *pCExpr = sqlite3ExprSkipCollate(pList->a[i].pExpr);
+ assert( pCExpr!=0 );
+ sqlite3StringToId(pCExpr);
+ if( pCExpr->op==TK_ID ){
+ const char *zCName = pCExpr->u.zToken;
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zName)==0 ){
+ pCol = &pTab->aCol[iCol];
+ makeColumnPartOfPrimaryKey(pParse, pCol);
+ break;
+ }
+ }
+ }
+ }
+ }
+ if( nTerm==1
+ && pCol
+ && sqlite3StrICmp(sqlite3ColumnType(pCol,""), "INTEGER")==0
+ && sortOrder!=SQLITE_SO_DESC
+ ){
+ if( IN_RENAME_OBJECT && pList ){
+ Expr *pCExpr = sqlite3ExprSkipCollate(pList->a[0].pExpr);
+ sqlite3RenameTokenRemap(pParse, &pTab->iPKey, pCExpr);
+ }
+ pTab->iPKey = iCol;
+ pTab->keyConf = (u8)onError;
+ assert( autoInc==0 || autoInc==1 );
+ pTab->tabFlags |= autoInc*TF_Autoincrement;
+ if( pList ) pParse->iPkSortOrder = pList->a[0].sortFlags;
+ (void)sqlite3HasExplicitNulls(pParse, pList);
+ }else if( autoInc ){
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an "
+ "INTEGER PRIMARY KEY");
+#endif
+ }else{
+ sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0,
+ 0, sortOrder, 0, SQLITE_IDXTYPE_PRIMARYKEY);
+ pList = 0;
+ }
+
+primary_key_exit:
+ sqlite3ExprListDelete(pParse->db, pList);
+ return;
+}
+
+/*
+** Add a new CHECK constraint to the table currently under construction.
+*/
+SQLITE_PRIVATE void sqlite3AddCheckConstraint(
+ Parse *pParse, /* Parsing context */
+ Expr *pCheckExpr /* The check expression */
+){
+#ifndef SQLITE_OMIT_CHECK
+ Table *pTab = pParse->pNewTable;
+ sqlite3 *db = pParse->db;
+ if( pTab && !IN_DECLARE_VTAB
+ && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt)
+ ){
+ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr);
+ if( pParse->constraintName.n ){
+ sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1);
+ }
+ }else
+#endif
+ {
+ sqlite3ExprDelete(pParse->db, pCheckExpr);
+ }
+}
+
+/*
+** Set the collation function of the most recently parsed table column
+** to the CollSeq given.
+*/
+SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){
+ Table *p;
+ int i;
+ char *zColl; /* Dequoted name of collation sequence */
+ sqlite3 *db;
+
+ if( (p = pParse->pNewTable)==0 ) return;
+ i = p->nCol-1;
+ db = pParse->db;
+ zColl = sqlite3NameFromToken(db, pToken);
+ if( !zColl ) return;
+
+ if( sqlite3LocateCollSeq(pParse, zColl) ){
+ Index *pIdx;
+ sqlite3DbFree(db, p->aCol[i].zColl);
+ p->aCol[i].zColl = zColl;
+
+ /* If the column is declared as "<name> PRIMARY KEY COLLATE <type>",
+ ** then an index may have been created on this column before the
+ ** collation type was added. Correct this if it is the case.
+ */
+ for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){
+ assert( pIdx->nKeyCol==1 );
+ if( pIdx->aiColumn[0]==i ){
+ pIdx->azColl[0] = p->aCol[i].zColl;
+ }
+ }
+ }else{
+ sqlite3DbFree(db, zColl);
+ }
+}
+
+/* Change the most recently parsed column to be a GENERATED ALWAYS AS
+** column.
+*/
+SQLITE_PRIVATE void sqlite3AddGenerated(Parse *pParse, Expr *pExpr, Token *pType){
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ u8 eType = COLFLAG_VIRTUAL;
+ Table *pTab = pParse->pNewTable;
+ Column *pCol;
+ if( pTab==0 ){
+ /* generated column in an CREATE TABLE IF NOT EXISTS that already exists */
+ goto generated_done;
+ }
+ pCol = &(pTab->aCol[pTab->nCol-1]);
+ if( IN_DECLARE_VTAB ){
+ sqlite3ErrorMsg(pParse, "virtual tables cannot use computed columns");
+ goto generated_done;
+ }
+ if( pCol->pDflt ) goto generated_error;
+ if( pType ){
+ if( pType->n==7 && sqlite3StrNICmp("virtual",pType->z,7)==0 ){
+ /* no-op */
+ }else if( pType->n==6 && sqlite3StrNICmp("stored",pType->z,6)==0 ){
+ eType = COLFLAG_STORED;
+ }else{
+ goto generated_error;
+ }
+ }
+ if( eType==COLFLAG_VIRTUAL ) pTab->nNVCol--;
+ pCol->colFlags |= eType;
+ assert( TF_HasVirtual==COLFLAG_VIRTUAL );
+ assert( TF_HasStored==COLFLAG_STORED );
+ pTab->tabFlags |= eType;
+ if( pCol->colFlags & COLFLAG_PRIMKEY ){
+ makeColumnPartOfPrimaryKey(pParse, pCol); /* For the error message */
+ }
+ pCol->pDflt = pExpr;
+ pExpr = 0;
+ goto generated_done;
+
+generated_error:
+ sqlite3ErrorMsg(pParse, "error in generated column \"%s\"",
+ pCol->zName);
+generated_done:
+ sqlite3ExprDelete(pParse->db, pExpr);
+#else
+ /* Throw and error for the GENERATED ALWAYS AS clause if the
+ ** SQLITE_OMIT_GENERATED_COLUMNS compile-time option is used. */
+ sqlite3ErrorMsg(pParse, "generated columns not supported");
+ sqlite3ExprDelete(pParse->db, pExpr);
+#endif
+}
+
+/*
+** Generate code that will increment the schema cookie.
+**
+** The schema cookie is used to determine when the schema for the
+** database changes. After each schema change, the cookie value
+** changes. When a process first reads the schema it records the
+** cookie. Thereafter, whenever it goes to access the database,
+** it checks the cookie to make sure the schema has not changed
+** since it was last read.
+**
+** This plan is not completely bullet-proof. It is possible for
+** the schema to change multiple times and for the cookie to be
+** set back to prior value. But schema changes are infrequent
+** and the probability of hitting the same cookie value is only
+** 1 chance in 2^32. So we're safe enough.
+**
+** IMPLEMENTATION-OF: R-34230-56049 SQLite automatically increments
+** the schema-version whenever the schema changes.
+*/
+SQLITE_PRIVATE void sqlite3ChangeCookie(Parse *pParse, int iDb){
+ sqlite3 *db = pParse->db;
+ Vdbe *v = pParse->pVdbe;
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_SCHEMA_VERSION,
+ (int)(1+(unsigned)db->aDb[iDb].pSchema->schema_cookie));
+}
+
+/*
+** Measure the number of characters needed to output the given
+** identifier. The number returned includes any quotes used
+** but does not include the null terminator.
+**
+** The estimate is conservative. It might be larger that what is
+** really needed.
+*/
+static int identLength(const char *z){
+ int n;
+ for(n=0; *z; n++, z++){
+ if( *z=='"' ){ n++; }
+ }
+ return n + 2;
+}
+
+/*
+** The first parameter is a pointer to an output buffer. The second
+** parameter is a pointer to an integer that contains the offset at
+** which to write into the output buffer. This function copies the
+** nul-terminated string pointed to by the third parameter, zSignedIdent,
+** to the specified offset in the buffer and updates *pIdx to refer
+** to the first byte after the last byte written before returning.
+**
+** If the string zSignedIdent consists entirely of alpha-numeric
+** characters, does not begin with a digit and is not an SQL keyword,
+** then it is copied to the output buffer exactly as it is. Otherwise,
+** it is quoted using double-quotes.
+*/
+static void identPut(char *z, int *pIdx, char *zSignedIdent){
+ unsigned char *zIdent = (unsigned char*)zSignedIdent;
+ int i, j, needQuote;
+ i = *pIdx;
+
+ for(j=0; zIdent[j]; j++){
+ if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break;
+ }
+ needQuote = sqlite3Isdigit(zIdent[0])
+ || sqlite3KeywordCode(zIdent, j)!=TK_ID
+ || zIdent[j]!=0
+ || j==0;
+
+ if( needQuote ) z[i++] = '"';
+ for(j=0; zIdent[j]; j++){
+ z[i++] = zIdent[j];
+ if( zIdent[j]=='"' ) z[i++] = '"';
+ }
+ if( needQuote ) z[i++] = '"';
+ z[i] = 0;
+ *pIdx = i;
+}
+
+/*
+** Generate a CREATE TABLE statement appropriate for the given
+** table. Memory to hold the text of the statement is obtained
+** from sqliteMalloc() and must be freed by the calling function.
+*/
+static char *createTableStmt(sqlite3 *db, Table *p){
+ int i, k, n;
+ char *zStmt;
+ char *zSep, *zSep2, *zEnd;
+ Column *pCol;
+ n = 0;
+ for(pCol = p->aCol, i=0; i<p->nCol; i++, pCol++){
+ n += identLength(pCol->zName) + 5;
+ }
+ n += identLength(p->zName);
+ if( n<50 ){
+ zSep = "";
+ zSep2 = ",";
+ zEnd = ")";
+ }else{
+ zSep = "\n ";
+ zSep2 = ",\n ";
+ zEnd = "\n)";
+ }
+ n += 35 + 6*p->nCol;
+ zStmt = sqlite3DbMallocRaw(0, n);
+ if( zStmt==0 ){
+ sqlite3OomFault(db);
+ return 0;
+ }
+ sqlite3_snprintf(n, zStmt, "CREATE TABLE ");
+ k = sqlite3Strlen30(zStmt);
+ identPut(zStmt, &k, p->zName);
+ zStmt[k++] = '(';
+ for(pCol=p->aCol, i=0; i<p->nCol; i++, pCol++){
+ static const char * const azType[] = {
+ /* SQLITE_AFF_BLOB */ "",
+ /* SQLITE_AFF_TEXT */ " TEXT",
+ /* SQLITE_AFF_NUMERIC */ " NUM",
+ /* SQLITE_AFF_INTEGER */ " INT",
+ /* SQLITE_AFF_REAL */ " REAL"
+ };
+ int len;
+ const char *zType;
+
+ sqlite3_snprintf(n-k, &zStmt[k], zSep);
+ k += sqlite3Strlen30(&zStmt[k]);
+ zSep = zSep2;
+ identPut(zStmt, &k, pCol->zName);
+ assert( pCol->affinity-SQLITE_AFF_BLOB >= 0 );
+ assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) );
+ testcase( pCol->affinity==SQLITE_AFF_BLOB );
+ testcase( pCol->affinity==SQLITE_AFF_TEXT );
+ testcase( pCol->affinity==SQLITE_AFF_NUMERIC );
+ testcase( pCol->affinity==SQLITE_AFF_INTEGER );
+ testcase( pCol->affinity==SQLITE_AFF_REAL );
+
+ zType = azType[pCol->affinity - SQLITE_AFF_BLOB];
+ len = sqlite3Strlen30(zType);
+ assert( pCol->affinity==SQLITE_AFF_BLOB
+ || pCol->affinity==sqlite3AffinityType(zType, 0) );
+ memcpy(&zStmt[k], zType, len);
+ k += len;
+ assert( k<=n );
+ }
+ sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd);
+ return zStmt;
+}
+
+/*
+** Resize an Index object to hold N columns total. Return SQLITE_OK
+** on success and SQLITE_NOMEM on an OOM error.
+*/
+static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){
+ char *zExtra;
+ int nByte;
+ if( pIdx->nColumn>=N ) return SQLITE_OK;
+ assert( pIdx->isResized==0 );
+ nByte = (sizeof(char*) + sizeof(i16) + 1)*N;
+ zExtra = sqlite3DbMallocZero(db, nByte);
+ if( zExtra==0 ) return SQLITE_NOMEM_BKPT;
+ memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn);
+ pIdx->azColl = (const char**)zExtra;
+ zExtra += sizeof(char*)*N;
+ memcpy(zExtra, pIdx->aiColumn, sizeof(i16)*pIdx->nColumn);
+ pIdx->aiColumn = (i16*)zExtra;
+ zExtra += sizeof(i16)*N;
+ memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn);
+ pIdx->aSortOrder = (u8*)zExtra;
+ pIdx->nColumn = N;
+ pIdx->isResized = 1;
+ return SQLITE_OK;
+}
+
+/*
+** Estimate the total row width for a table.
+*/
+static void estimateTableWidth(Table *pTab){
+ unsigned wTable = 0;
+ const Column *pTabCol;
+ int i;
+ for(i=pTab->nCol, pTabCol=pTab->aCol; i>0; i--, pTabCol++){
+ wTable += pTabCol->szEst;
+ }
+ if( pTab->iPKey<0 ) wTable++;
+ pTab->szTabRow = sqlite3LogEst(wTable*4);
+}
+
+/*
+** Estimate the average size of a row for an index.
+*/
+static void estimateIndexWidth(Index *pIdx){
+ unsigned wIndex = 0;
+ int i;
+ const Column *aCol = pIdx->pTable->aCol;
+ for(i=0; i<pIdx->nColumn; i++){
+ i16 x = pIdx->aiColumn[i];
+ assert( x<pIdx->pTable->nCol );
+ wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst;
+ }
+ pIdx->szIdxRow = sqlite3LogEst(wIndex*4);
+}
+
+/* Return true if column number x is any of the first nCol entries of aiCol[].
+** This is used to determine if the column number x appears in any of the
+** first nCol entries of an index.
+*/
+static int hasColumn(const i16 *aiCol, int nCol, int x){
+ while( nCol-- > 0 ){
+ assert( aiCol[0]>=0 );
+ if( x==*(aiCol++) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Return true if any of the first nKey entries of index pIdx exactly
+** match the iCol-th entry of pPk. pPk is always a WITHOUT ROWID
+** PRIMARY KEY index. pIdx is an index on the same table. pIdx may
+** or may not be the same index as pPk.
+**
+** The first nKey entries of pIdx are guaranteed to be ordinary columns,
+** not a rowid or expression.
+**
+** This routine differs from hasColumn() in that both the column and the
+** collating sequence must match for this routine, but for hasColumn() only
+** the column name must match.
+*/
+static int isDupColumn(Index *pIdx, int nKey, Index *pPk, int iCol){
+ int i, j;
+ assert( nKey<=pIdx->nColumn );
+ assert( iCol<MAX(pPk->nColumn,pPk->nKeyCol) );
+ assert( pPk->idxType==SQLITE_IDXTYPE_PRIMARYKEY );
+ assert( pPk->pTable->tabFlags & TF_WithoutRowid );
+ assert( pPk->pTable==pIdx->pTable );
+ testcase( pPk==pIdx );
+ j = pPk->aiColumn[iCol];
+ assert( j!=XN_ROWID && j!=XN_EXPR );
+ for(i=0; i<nKey; i++){
+ assert( pIdx->aiColumn[i]>=0 || j>=0 );
+ if( pIdx->aiColumn[i]==j
+ && sqlite3StrICmp(pIdx->azColl[i], pPk->azColl[iCol])==0
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Recompute the colNotIdxed field of the Index.
+**
+** colNotIdxed is a bitmask that has a 0 bit representing each indexed
+** columns that are within the first 63 columns of the table. The
+** high-order bit of colNotIdxed is always 1. All unindexed columns
+** of the table have a 1.
+**
+** 2019-10-24: For the purpose of this computation, virtual columns are
+** not considered to be covered by the index, even if they are in the
+** index, because we do not trust the logic in whereIndexExprTrans() to be
+** able to find all instances of a reference to the indexed table column
+** and convert them into references to the index. Hence we always want
+** the actual table at hand in order to recompute the virtual column, if
+** necessary.
+**
+** The colNotIdxed mask is AND-ed with the SrcList.a[].colUsed mask
+** to determine if the index is covering index.
+*/
+static void recomputeColumnsNotIndexed(Index *pIdx){
+ Bitmask m = 0;
+ int j;
+ Table *pTab = pIdx->pTable;
+ for(j=pIdx->nColumn-1; j>=0; j--){
+ int x = pIdx->aiColumn[j];
+ if( x>=0 && (pTab->aCol[x].colFlags & COLFLAG_VIRTUAL)==0 ){
+ testcase( x==BMS-1 );
+ testcase( x==BMS-2 );
+ if( x<BMS-1 ) m |= MASKBIT(x);
+ }
+ }
+ pIdx->colNotIdxed = ~m;
+ assert( (pIdx->colNotIdxed>>63)==1 );
+}
+
+/*
+** This routine runs at the end of parsing a CREATE TABLE statement that
+** has a WITHOUT ROWID clause. The job of this routine is to convert both
+** internal schema data structures and the generated VDBE code so that they
+** are appropriate for a WITHOUT ROWID table instead of a rowid table.
+** Changes include:
+**
+** (1) Set all columns of the PRIMARY KEY schema object to be NOT NULL.
+** (2) Convert P3 parameter of the OP_CreateBtree from BTREE_INTKEY
+** into BTREE_BLOBKEY.
+** (3) Bypass the creation of the sqlite_schema table entry
+** for the PRIMARY KEY as the primary key index is now
+** identified by the sqlite_schema table entry of the table itself.
+** (4) Set the Index.tnum of the PRIMARY KEY Index object in the
+** schema to the rootpage from the main table.
+** (5) Add all table columns to the PRIMARY KEY Index object
+** so that the PRIMARY KEY is a covering index. The surplus
+** columns are part of KeyInfo.nAllField and are not used for
+** sorting or lookup or uniqueness checks.
+** (6) Replace the rowid tail on all automatically generated UNIQUE
+** indices with the PRIMARY KEY columns.
+**
+** For virtual tables, only (1) is performed.
+*/
+static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
+ Index *pIdx;
+ Index *pPk;
+ int nPk;
+ int nExtra;
+ int i, j;
+ sqlite3 *db = pParse->db;
+ Vdbe *v = pParse->pVdbe;
+
+ /* Mark every PRIMARY KEY column as NOT NULL (except for imposter tables)
+ */
+ if( !db->init.imposterTable ){
+ for(i=0; i<pTab->nCol; i++){
+ if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){
+ pTab->aCol[i].notNull = OE_Abort;
+ }
+ }
+ pTab->tabFlags |= TF_HasNotNull;
+ }
+
+ /* Convert the P3 operand of the OP_CreateBtree opcode from BTREE_INTKEY
+ ** into BTREE_BLOBKEY.
+ */
+ if( pParse->addrCrTab ){
+ assert( v );
+ sqlite3VdbeChangeP3(v, pParse->addrCrTab, BTREE_BLOBKEY);
+ }
+
+ /* Locate the PRIMARY KEY index. Or, if this table was originally
+ ** an INTEGER PRIMARY KEY table, create a new PRIMARY KEY index.
+ */
+ if( pTab->iPKey>=0 ){
+ ExprList *pList;
+ Token ipkToken;
+ sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zName);
+ pList = sqlite3ExprListAppend(pParse, 0,
+ sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0));
+ if( pList==0 ) return;
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, pList->a[0].pExpr, &pTab->iPKey);
+ }
+ pList->a[0].sortFlags = pParse->iPkSortOrder;
+ assert( pParse->pNewTable==pTab );
+ pTab->iPKey = -1;
+ sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0,
+ SQLITE_IDXTYPE_PRIMARYKEY);
+ if( db->mallocFailed || pParse->nErr ) return;
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk->nKeyCol==1 );
+ }else{
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+
+ /*
+ ** Remove all redundant columns from the PRIMARY KEY. For example, change
+ ** "PRIMARY KEY(a,b,a,b,c,b,c,d)" into just "PRIMARY KEY(a,b,c,d)". Later
+ ** code assumes the PRIMARY KEY contains no repeated columns.
+ */
+ for(i=j=1; i<pPk->nKeyCol; i++){
+ if( isDupColumn(pPk, j, pPk, i) ){
+ pPk->nColumn--;
+ }else{
+ testcase( hasColumn(pPk->aiColumn, j, pPk->aiColumn[i]) );
+ pPk->azColl[j] = pPk->azColl[i];
+ pPk->aSortOrder[j] = pPk->aSortOrder[i];
+ pPk->aiColumn[j++] = pPk->aiColumn[i];
+ }
+ }
+ pPk->nKeyCol = j;
+ }
+ assert( pPk!=0 );
+ pPk->isCovering = 1;
+ if( !db->init.imposterTable ) pPk->uniqNotNull = 1;
+ nPk = pPk->nColumn = pPk->nKeyCol;
+
+ /* Bypass the creation of the PRIMARY KEY btree and the sqlite_schema
+ ** table entry. This is only required if currently generating VDBE
+ ** code for a CREATE TABLE (not when parsing one as part of reading
+ ** a database schema). */
+ if( v && pPk->tnum>0 ){
+ assert( db->init.busy==0 );
+ sqlite3VdbeChangeOpcode(v, (int)pPk->tnum, OP_Goto);
+ }
+
+ /* The root page of the PRIMARY KEY is the table root page */
+ pPk->tnum = pTab->tnum;
+
+ /* Update the in-memory representation of all UNIQUE indices by converting
+ ** the final rowid column into one or more columns of the PRIMARY KEY.
+ */
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int n;
+ if( IsPrimaryKeyIndex(pIdx) ) continue;
+ for(i=n=0; i<nPk; i++){
+ if( !isDupColumn(pIdx, pIdx->nKeyCol, pPk, i) ){
+ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) );
+ n++;
+ }
+ }
+ if( n==0 ){
+ /* This index is a superset of the primary key */
+ pIdx->nColumn = pIdx->nKeyCol;
+ continue;
+ }
+ if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return;
+ for(i=0, j=pIdx->nKeyCol; i<nPk; i++){
+ if( !isDupColumn(pIdx, pIdx->nKeyCol, pPk, i) ){
+ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) );
+ pIdx->aiColumn[j] = pPk->aiColumn[i];
+ pIdx->azColl[j] = pPk->azColl[i];
+ if( pPk->aSortOrder[i] ){
+ /* See ticket https://www.sqlite.org/src/info/bba7b69f9849b5bf */
+ pIdx->bAscKeyBug = 1;
+ }
+ j++;
+ }
+ }
+ assert( pIdx->nColumn>=pIdx->nKeyCol+n );
+ assert( pIdx->nColumn>=j );
+ }
+
+ /* Add all table columns to the PRIMARY KEY index
+ */
+ nExtra = 0;
+ for(i=0; i<pTab->nCol; i++){
+ if( !hasColumn(pPk->aiColumn, nPk, i)
+ && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) nExtra++;
+ }
+ if( resizeIndexObject(db, pPk, nPk+nExtra) ) return;
+ for(i=0, j=nPk; i<pTab->nCol; i++){
+ if( !hasColumn(pPk->aiColumn, j, i)
+ && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0
+ ){
+ assert( j<pPk->nColumn );
+ pPk->aiColumn[j] = i;
+ pPk->azColl[j] = sqlite3StrBINARY;
+ j++;
+ }
+ }
+ assert( pPk->nColumn==j );
+ assert( pTab->nNVCol<=j );
+ recomputeColumnsNotIndexed(pPk);
+}
+
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Return true if pTab is a virtual table and zName is a shadow table name
+** for that virtual table.
+*/
+SQLITE_PRIVATE int sqlite3IsShadowTableOf(sqlite3 *db, Table *pTab, const char *zName){
+ int nName; /* Length of zName */
+ Module *pMod; /* Module for the virtual table */
+
+ if( !IsVirtual(pTab) ) return 0;
+ nName = sqlite3Strlen30(pTab->zName);
+ if( sqlite3_strnicmp(zName, pTab->zName, nName)!=0 ) return 0;
+ if( zName[nName]!='_' ) return 0;
+ pMod = (Module*)sqlite3HashFind(&db->aModule, pTab->azModuleArg[0]);
+ if( pMod==0 ) return 0;
+ if( pMod->pModule->iVersion<3 ) return 0;
+ if( pMod->pModule->xShadowName==0 ) return 0;
+ return pMod->pModule->xShadowName(zName+nName+1);
+}
+#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Return true if zName is a shadow table name in the current database
+** connection.
+**
+** zName is temporarily modified while this routine is running, but is
+** restored to its original value prior to this routine returning.
+*/
+SQLITE_PRIVATE int sqlite3ShadowTableName(sqlite3 *db, const char *zName){
+ char *zTail; /* Pointer to the last "_" in zName */
+ Table *pTab; /* Table that zName is a shadow of */
+ zTail = strrchr(zName, '_');
+ if( zTail==0 ) return 0;
+ *zTail = 0;
+ pTab = sqlite3FindTable(db, zName, 0);
+ *zTail = '_';
+ if( pTab==0 ) return 0;
+ if( !IsVirtual(pTab) ) return 0;
+ return sqlite3IsShadowTableOf(db, pTab, zName);
+}
+#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */
+
+
+#ifdef SQLITE_DEBUG
+/*
+** Mark all nodes of an expression as EP_Immutable, indicating that
+** they should not be changed. Expressions attached to a table or
+** index definition are tagged this way to help ensure that we do
+** not pass them into code generator routines by mistake.
+*/
+static int markImmutableExprStep(Walker *pWalker, Expr *pExpr){
+ ExprSetVVAProperty(pExpr, EP_Immutable);
+ return WRC_Continue;
+}
+static void markExprListImmutable(ExprList *pList){
+ if( pList ){
+ Walker w;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = markImmutableExprStep;
+ w.xSelectCallback = sqlite3SelectWalkNoop;
+ w.xSelectCallback2 = 0;
+ sqlite3WalkExprList(&w, pList);
+ }
+}
+#else
+#define markExprListImmutable(X) /* no-op */
+#endif /* SQLITE_DEBUG */
+
+
+/*
+** This routine is called to report the final ")" that terminates
+** a CREATE TABLE statement.
+**
+** The table structure that other action routines have been building
+** is added to the internal hash tables, assuming no errors have
+** occurred.
+**
+** An entry for the table is made in the schema table on disk, unless
+** this is a temporary table or db->init.busy==1. When db->init.busy==1
+** it means we are reading the sqlite_schema table because we just
+** connected to the database or because the sqlite_schema table has
+** recently changed, so the entry for this table already exists in
+** the sqlite_schema table. We do not want to create it again.
+**
+** If the pSelect argument is not NULL, it means that this routine
+** was called to create a table generated from a
+** "CREATE TABLE ... AS SELECT ..." statement. The column names of
+** the new table will match the result set of the SELECT.
+*/
+SQLITE_PRIVATE void sqlite3EndTable(
+ Parse *pParse, /* Parse context */
+ Token *pCons, /* The ',' token after the last column defn. */
+ Token *pEnd, /* The ')' before options in the CREATE TABLE */
+ u8 tabOpts, /* Extra table options. Usually 0. */
+ Select *pSelect /* Select from a "CREATE ... AS SELECT" */
+){
+ Table *p; /* The new table */
+ sqlite3 *db = pParse->db; /* The database connection */
+ int iDb; /* Database in which the table lives */
+ Index *pIdx; /* An implied index of the table */
+
+ if( pEnd==0 && pSelect==0 ){
+ return;
+ }
+ assert( !db->mallocFailed );
+ p = pParse->pNewTable;
+ if( p==0 ) return;
+
+ if( pSelect==0 && sqlite3ShadowTableName(db, p->zName) ){
+ p->tabFlags |= TF_Shadow;
+ }
+
+ /* If the db->init.busy is 1 it means we are reading the SQL off the
+ ** "sqlite_schema" or "sqlite_temp_schema" table on the disk.
+ ** So do not write to the disk again. Extract the root page number
+ ** for the table from the db->init.newTnum field. (The page number
+ ** should have been put there by the sqliteOpenCb routine.)
+ **
+ ** If the root page number is 1, that means this is the sqlite_schema
+ ** table itself. So mark it read-only.
+ */
+ if( db->init.busy ){
+ if( pSelect ){
+ sqlite3ErrorMsg(pParse, "");
+ return;
+ }
+ p->tnum = db->init.newTnum;
+ if( p->tnum==1 ) p->tabFlags |= TF_Readonly;
+ }
+
+ assert( (p->tabFlags & TF_HasPrimaryKey)==0
+ || p->iPKey>=0 || sqlite3PrimaryKeyIndex(p)!=0 );
+ assert( (p->tabFlags & TF_HasPrimaryKey)!=0
+ || (p->iPKey<0 && sqlite3PrimaryKeyIndex(p)==0) );
+
+ /* Special processing for WITHOUT ROWID Tables */
+ if( tabOpts & TF_WithoutRowid ){
+ if( (p->tabFlags & TF_Autoincrement) ){
+ sqlite3ErrorMsg(pParse,
+ "AUTOINCREMENT not allowed on WITHOUT ROWID tables");
+ return;
+ }
+ if( (p->tabFlags & TF_HasPrimaryKey)==0 ){
+ sqlite3ErrorMsg(pParse, "PRIMARY KEY missing on table %s", p->zName);
+ return;
+ }
+ p->tabFlags |= TF_WithoutRowid | TF_NoVisibleRowid;
+ convertToWithoutRowidTable(pParse, p);
+ }
+ iDb = sqlite3SchemaToIndex(db, p->pSchema);
+
+#ifndef SQLITE_OMIT_CHECK
+ /* Resolve names in all CHECK constraint expressions.
+ */
+ if( p->pCheck ){
+ sqlite3ResolveSelfReference(pParse, p, NC_IsCheck, 0, p->pCheck);
+ if( pParse->nErr ){
+ /* If errors are seen, delete the CHECK constraints now, else they might
+ ** actually be used if PRAGMA writable_schema=ON is set. */
+ sqlite3ExprListDelete(db, p->pCheck);
+ p->pCheck = 0;
+ }else{
+ markExprListImmutable(p->pCheck);
+ }
+ }
+#endif /* !defined(SQLITE_OMIT_CHECK) */
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( p->tabFlags & TF_HasGenerated ){
+ int ii, nNG = 0;
+ testcase( p->tabFlags & TF_HasVirtual );
+ testcase( p->tabFlags & TF_HasStored );
+ for(ii=0; ii<p->nCol; ii++){
+ u32 colFlags = p->aCol[ii].colFlags;
+ if( (colFlags & COLFLAG_GENERATED)!=0 ){
+ Expr *pX = p->aCol[ii].pDflt;
+ testcase( colFlags & COLFLAG_VIRTUAL );
+ testcase( colFlags & COLFLAG_STORED );
+ if( sqlite3ResolveSelfReference(pParse, p, NC_GenCol, pX, 0) ){
+ /* If there are errors in resolving the expression, change the
+ ** expression to a NULL. This prevents code generators that operate
+ ** on the expression from inserting extra parts into the expression
+ ** tree that have been allocated from lookaside memory, which is
+ ** illegal in a schema and will lead to errors or heap corruption
+ ** when the database connection closes. */
+ sqlite3ExprDelete(db, pX);
+ p->aCol[ii].pDflt = sqlite3ExprAlloc(db, TK_NULL, 0, 0);
+ }
+ }else{
+ nNG++;
+ }
+ }
+ if( nNG==0 ){
+ sqlite3ErrorMsg(pParse, "must have at least one non-generated column");
+ return;
+ }
+ }
+#endif
+
+ /* Estimate the average row size for the table and for all implied indices */
+ estimateTableWidth(p);
+ for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){
+ estimateIndexWidth(pIdx);
+ }
+
+ /* If not initializing, then create a record for the new table
+ ** in the schema table of the database.
+ **
+ ** If this is a TEMPORARY table, write the entry into the auxiliary
+ ** file instead of into the main database file.
+ */
+ if( !db->init.busy ){
+ int n;
+ Vdbe *v;
+ char *zType; /* "view" or "table" */
+ char *zType2; /* "VIEW" or "TABLE" */
+ char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */
+
+ v = sqlite3GetVdbe(pParse);
+ if( NEVER(v==0) ) return;
+
+ sqlite3VdbeAddOp1(v, OP_Close, 0);
+
+ /*
+ ** Initialize zType for the new view or table.
+ */
+ if( p->pSelect==0 ){
+ /* A regular table */
+ zType = "table";
+ zType2 = "TABLE";
+#ifndef SQLITE_OMIT_VIEW
+ }else{
+ /* A view */
+ zType = "view";
+ zType2 = "VIEW";
+#endif
+ }
+
+ /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT
+ ** statement to populate the new table. The root-page number for the
+ ** new table is in register pParse->regRoot.
+ **
+ ** Once the SELECT has been coded by sqlite3Select(), it is in a
+ ** suitable state to query for the column names and types to be used
+ ** by the new table.
+ **
+ ** A shared-cache write-lock is not required to write to the new table,
+ ** as a schema-lock must have already been obtained to create it. Since
+ ** a schema-lock excludes all other database users, the write-lock would
+ ** be redundant.
+ */
+ if( pSelect ){
+ SelectDest dest; /* Where the SELECT should store results */
+ int regYield; /* Register holding co-routine entry-point */
+ int addrTop; /* Top of the co-routine */
+ int regRec; /* A record to be insert into the new table */
+ int regRowid; /* Rowid of the next row to insert */
+ int addrInsLoop; /* Top of the loop for inserting rows */
+ Table *pSelTab; /* A table that describes the SELECT results */
+
+ regYield = ++pParse->nMem;
+ regRec = ++pParse->nMem;
+ regRowid = ++pParse->nMem;
+ assert(pParse->nTab==1);
+ sqlite3MayAbort(pParse);
+ sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb);
+ sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG);
+ pParse->nTab = 2;
+ addrTop = sqlite3VdbeCurrentAddr(v) + 1;
+ sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop);
+ if( pParse->nErr ) return;
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect, SQLITE_AFF_BLOB);
+ if( pSelTab==0 ) return;
+ assert( p->aCol==0 );
+ p->nCol = p->nNVCol = pSelTab->nCol;
+ p->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ sqlite3DeleteTable(db, pSelTab);
+ sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield);
+ sqlite3Select(pParse, pSelect, &dest);
+ if( pParse->nErr ) return;
+ sqlite3VdbeEndCoroutine(v, regYield);
+ sqlite3VdbeJumpHere(v, addrTop - 1);
+ addrInsLoop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec);
+ sqlite3TableAffinity(v, p, 0);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid);
+ sqlite3VdbeGoto(v, addrInsLoop);
+ sqlite3VdbeJumpHere(v, addrInsLoop);
+ sqlite3VdbeAddOp1(v, OP_Close, 1);
+ }
+
+ /* Compute the complete text of the CREATE statement */
+ if( pSelect ){
+ zStmt = createTableStmt(db, p);
+ }else{
+ Token *pEnd2 = tabOpts ? &pParse->sLastToken : pEnd;
+ n = (int)(pEnd2->z - pParse->sNameToken.z);
+ if( pEnd2->z[0]!=';' ) n += pEnd2->n;
+ zStmt = sqlite3MPrintf(db,
+ "CREATE %s %.*s", zType2, n, pParse->sNameToken.z
+ );
+ }
+
+ /* A slot for the record has already been allocated in the
+ ** schema table. We just need to update that slot with all
+ ** the information we've collected.
+ */
+ sqlite3NestedParse(pParse,
+ "UPDATE %Q." DFLT_SCHEMA_TABLE
+ " SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q"
+ " WHERE rowid=#%d",
+ db->aDb[iDb].zDbSName,
+ zType,
+ p->zName,
+ p->zName,
+ pParse->regRoot,
+ zStmt,
+ pParse->regRowid
+ );
+ sqlite3DbFree(db, zStmt);
+ sqlite3ChangeCookie(pParse, iDb);
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ /* Check to see if we need to create an sqlite_sequence table for
+ ** keeping track of autoincrement keys.
+ */
+ if( (p->tabFlags & TF_Autoincrement)!=0 ){
+ Db *pDb = &db->aDb[iDb];
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( pDb->pSchema->pSeqTab==0 ){
+ sqlite3NestedParse(pParse,
+ "CREATE TABLE %Q.sqlite_sequence(name,seq)",
+ pDb->zDbSName
+ );
+ }
+ }
+#endif
+
+ /* Reparse everything to update our internal data structures */
+ sqlite3VdbeAddParseSchemaOp(v, iDb,
+ sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName));
+ }
+
+ /* Add the table to the in-memory representation of the database.
+ */
+ if( db->init.busy ){
+ Table *pOld;
+ Schema *pSchema = p->pSchema;
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, p);
+ if( pOld ){
+ assert( p==pOld ); /* Malloc must have failed inside HashInsert() */
+ sqlite3OomFault(db);
+ return;
+ }
+ pParse->pNewTable = 0;
+ db->mDbFlags |= DBFLAG_SchemaChange;
+
+#ifndef SQLITE_OMIT_ALTERTABLE
+ if( !p->pSelect ){
+ const char *zName = (const char *)pParse->sNameToken.z;
+ int nName;
+ assert( !pSelect && pCons && pEnd );
+ if( pCons->z==0 ){
+ pCons = pEnd;
+ }
+ nName = (int)((const char *)pCons->z - zName);
+ p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName);
+ }
+#endif
+ }
+}
+
+#ifndef SQLITE_OMIT_VIEW
+/*
+** The parser calls this routine in order to create a new VIEW
+*/
+SQLITE_PRIVATE void sqlite3CreateView(
+ Parse *pParse, /* The parsing context */
+ Token *pBegin, /* The CREATE token that begins the statement */
+ Token *pName1, /* The token that holds the name of the view */
+ Token *pName2, /* The token that holds the name of the view */
+ ExprList *pCNames, /* Optional list of view column names */
+ Select *pSelect, /* A SELECT statement that will become the new view */
+ int isTemp, /* TRUE for a TEMPORARY view */
+ int noErr /* Suppress error messages if VIEW already exists */
+){
+ Table *p;
+ int n;
+ const char *z;
+ Token sEnd;
+ DbFixer sFix;
+ Token *pName = 0;
+ int iDb;
+ sqlite3 *db = pParse->db;
+
+ if( pParse->nVar>0 ){
+ sqlite3ErrorMsg(pParse, "parameters are not allowed in views");
+ goto create_view_fail;
+ }
+ sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr);
+ p = pParse->pNewTable;
+ if( p==0 || pParse->nErr ) goto create_view_fail;
+ sqlite3TwoPartName(pParse, pName1, pName2, &pName);
+ iDb = sqlite3SchemaToIndex(db, p->pSchema);
+ sqlite3FixInit(&sFix, pParse, iDb, "view", pName);
+ if( sqlite3FixSelect(&sFix, pSelect) ) goto create_view_fail;
+
+ /* Make a copy of the entire SELECT statement that defines the view.
+ ** This will force all the Expr.token.z values to be dynamically
+ ** allocated rather than point to the input string - which means that
+ ** they will persist after the current sqlite3_exec() call returns.
+ */
+ pSelect->selFlags |= SF_View;
+ if( IN_RENAME_OBJECT ){
+ p->pSelect = pSelect;
+ pSelect = 0;
+ }else{
+ p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
+ }
+ p->pCheck = sqlite3ExprListDup(db, pCNames, EXPRDUP_REDUCE);
+ if( db->mallocFailed ) goto create_view_fail;
+
+ /* Locate the end of the CREATE VIEW statement. Make sEnd point to
+ ** the end.
+ */
+ sEnd = pParse->sLastToken;
+ assert( sEnd.z[0]!=0 || sEnd.n==0 );
+ if( sEnd.z[0]!=';' ){
+ sEnd.z += sEnd.n;
+ }
+ sEnd.n = 0;
+ n = (int)(sEnd.z - pBegin->z);
+ assert( n>0 );
+ z = pBegin->z;
+ while( sqlite3Isspace(z[n-1]) ){ n--; }
+ sEnd.z = &z[n-1];
+ sEnd.n = 1;
+
+ /* Use sqlite3EndTable() to add the view to the schema table */
+ sqlite3EndTable(pParse, 0, &sEnd, 0, 0);
+
+create_view_fail:
+ sqlite3SelectDelete(db, pSelect);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameExprlistUnmap(pParse, pCNames);
+ }
+ sqlite3ExprListDelete(db, pCNames);
+ return;
+}
+#endif /* SQLITE_OMIT_VIEW */
+
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
+/*
+** The Table structure pTable is really a VIEW. Fill in the names of
+** the columns of the view in the pTable structure. Return the number
+** of errors. If an error is seen leave an error message in pParse->zErrMsg.
+*/
+SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
+ Table *pSelTab; /* A fake table from which we get the result set */
+ Select *pSel; /* Copy of the SELECT that implements the view */
+ int nErr = 0; /* Number of errors encountered */
+ int n; /* Temporarily holds the number of cursors assigned */
+ sqlite3 *db = pParse->db; /* Database connection for malloc errors */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ int rc;
+#endif
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth; /* Saved xAuth pointer */
+#endif
+
+ assert( pTable );
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ db->nSchemaLock++;
+ rc = sqlite3VtabCallConnect(pParse, pTable);
+ db->nSchemaLock--;
+ if( rc ){
+ return 1;
+ }
+ if( IsVirtual(pTable) ) return 0;
+#endif
+
+#ifndef SQLITE_OMIT_VIEW
+ /* A positive nCol means the columns names for this view are
+ ** already known.
+ */
+ if( pTable->nCol>0 ) return 0;
+
+ /* A negative nCol is a special marker meaning that we are currently
+ ** trying to compute the column names. If we enter this routine with
+ ** a negative nCol, it means two or more views form a loop, like this:
+ **
+ ** CREATE VIEW one AS SELECT * FROM two;
+ ** CREATE VIEW two AS SELECT * FROM one;
+ **
+ ** Actually, the error above is now caught prior to reaching this point.
+ ** But the following test is still important as it does come up
+ ** in the following:
+ **
+ ** CREATE TABLE main.ex1(a);
+ ** CREATE TEMP VIEW ex1 AS SELECT a FROM ex1;
+ ** SELECT * FROM temp.ex1;
+ */
+ if( pTable->nCol<0 ){
+ sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName);
+ return 1;
+ }
+ assert( pTable->nCol>=0 );
+
+ /* If we get this far, it means we need to compute the table names.
+ ** Note that the call to sqlite3ResultSetOfSelect() will expand any
+ ** "*" elements in the results set of the view and will assign cursors
+ ** to the elements of the FROM clause. But we do not want these changes
+ ** to be permanent. So the computation is done on a copy of the SELECT
+ ** statement that defines the view.
+ */
+ assert( pTable->pSelect );
+ pSel = sqlite3SelectDup(db, pTable->pSelect, 0);
+ if( pSel ){
+ u8 eParseMode = pParse->eParseMode;
+ pParse->eParseMode = PARSE_MODE_NORMAL;
+ n = pParse->nTab;
+ sqlite3SrcListAssignCursors(pParse, pSel->pSrc);
+ pTable->nCol = -1;
+ DisableLookaside;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ xAuth = db->xAuth;
+ db->xAuth = 0;
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSel, SQLITE_AFF_NONE);
+ db->xAuth = xAuth;
+#else
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSel, SQLITE_AFF_NONE);
+#endif
+ pParse->nTab = n;
+ if( pSelTab==0 ){
+ pTable->nCol = 0;
+ nErr++;
+ }else if( pTable->pCheck ){
+ /* CREATE VIEW name(arglist) AS ...
+ ** The names of the columns in the table are taken from
+ ** arglist which is stored in pTable->pCheck. The pCheck field
+ ** normally holds CHECK constraints on an ordinary table, but for
+ ** a VIEW it holds the list of column names.
+ */
+ sqlite3ColumnsFromExprList(pParse, pTable->pCheck,
+ &pTable->nCol, &pTable->aCol);
+ if( db->mallocFailed==0
+ && pParse->nErr==0
+ && pTable->nCol==pSel->pEList->nExpr
+ ){
+ sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel,
+ SQLITE_AFF_NONE);
+ }
+ }else{
+ /* CREATE VIEW name AS... without an argument list. Construct
+ ** the column names from the SELECT statement that defines the view.
+ */
+ assert( pTable->aCol==0 );
+ pTable->nCol = pSelTab->nCol;
+ pTable->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) );
+ }
+ pTable->nNVCol = pTable->nCol;
+ sqlite3DeleteTable(db, pSelTab);
+ sqlite3SelectDelete(db, pSel);
+ EnableLookaside;
+ pParse->eParseMode = eParseMode;
+ } else {
+ nErr++;
+ }
+ pTable->pSchema->schemaFlags |= DB_UnresetViews;
+ if( db->mallocFailed ){
+ sqlite3DeleteColumnNames(db, pTable);
+ pTable->aCol = 0;
+ pTable->nCol = 0;
+ }
+#endif /* SQLITE_OMIT_VIEW */
+ return nErr;
+}
+#endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */
+
+#ifndef SQLITE_OMIT_VIEW
+/*
+** Clear the column names from every VIEW in database idx.
+*/
+static void sqliteViewResetAll(sqlite3 *db, int idx){
+ HashElem *i;
+ assert( sqlite3SchemaMutexHeld(db, idx, 0) );
+ if( !DbHasProperty(db, idx, DB_UnresetViews) ) return;
+ for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){
+ Table *pTab = sqliteHashData(i);
+ if( pTab->pSelect ){
+ sqlite3DeleteColumnNames(db, pTab);
+ pTab->aCol = 0;
+ pTab->nCol = 0;
+ }
+ }
+ DbClearProperty(db, idx, DB_UnresetViews);
+}
+#else
+# define sqliteViewResetAll(A,B)
+#endif /* SQLITE_OMIT_VIEW */
+
+/*
+** This function is called by the VDBE to adjust the internal schema
+** used by SQLite when the btree layer moves a table root page. The
+** root-page of a table or index in database iDb has changed from iFrom
+** to iTo.
+**
+** Ticket #1728: The symbol table might still contain information
+** on tables and/or indices that are the process of being deleted.
+** If you are unlucky, one of those deleted indices or tables might
+** have the same rootpage number as the real table or index that is
+** being moved. So we cannot stop searching after the first match
+** because the first match might be for one of the deleted indices
+** or tables and not the table/index that is actually being moved.
+** We must continue looping until all tables and indices with
+** rootpage==iFrom have been converted to have a rootpage of iTo
+** in order to be certain that we got the right one.
+*/
+#ifndef SQLITE_OMIT_AUTOVACUUM
+SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3 *db, int iDb, Pgno iFrom, Pgno iTo){
+ HashElem *pElem;
+ Hash *pHash;
+ Db *pDb;
+
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pDb = &db->aDb[iDb];
+ pHash = &pDb->pSchema->tblHash;
+ for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){
+ Table *pTab = sqliteHashData(pElem);
+ if( pTab->tnum==iFrom ){
+ pTab->tnum = iTo;
+ }
+ }
+ pHash = &pDb->pSchema->idxHash;
+ for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){
+ Index *pIdx = sqliteHashData(pElem);
+ if( pIdx->tnum==iFrom ){
+ pIdx->tnum = iTo;
+ }
+ }
+}
+#endif
+
+/*
+** Write code to erase the table with root-page iTable from database iDb.
+** Also write code to modify the sqlite_schema table and internal schema
+** if a root-page of another table is moved by the btree-layer whilst
+** erasing iTable (this can happen with an auto-vacuum database).
+*/
+static void destroyRootPage(Parse *pParse, int iTable, int iDb){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int r1 = sqlite3GetTempReg(pParse);
+ if( iTable<2 ) sqlite3ErrorMsg(pParse, "corrupt schema");
+ sqlite3VdbeAddOp3(v, OP_Destroy, iTable, r1, iDb);
+ sqlite3MayAbort(pParse);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ /* OP_Destroy stores an in integer r1. If this integer
+ ** is non-zero, then it is the root page number of a table moved to
+ ** location iTable. The following code modifies the sqlite_schema table to
+ ** reflect this.
+ **
+ ** The "#NNN" in the SQL is a special constant that means whatever value
+ ** is in register NNN. See grammar rules associated with the TK_REGISTER
+ ** token for additional information.
+ */
+ sqlite3NestedParse(pParse,
+ "UPDATE %Q." DFLT_SCHEMA_TABLE
+ " SET rootpage=%d WHERE #%d AND rootpage=#%d",
+ pParse->db->aDb[iDb].zDbSName, iTable, r1, r1);
+#endif
+ sqlite3ReleaseTempReg(pParse, r1);
+}
+
+/*
+** Write VDBE code to erase table pTab and all associated indices on disk.
+** Code to update the sqlite_schema tables and internal schema definitions
+** in case a root-page belonging to another table is moved by the btree layer
+** is also added (this can happen with an auto-vacuum database).
+*/
+static void destroyTable(Parse *pParse, Table *pTab){
+ /* If the database may be auto-vacuum capable (if SQLITE_OMIT_AUTOVACUUM
+ ** is not defined), then it is important to call OP_Destroy on the
+ ** table and index root-pages in order, starting with the numerically
+ ** largest root-page number. This guarantees that none of the root-pages
+ ** to be destroyed is relocated by an earlier OP_Destroy. i.e. if the
+ ** following were coded:
+ **
+ ** OP_Destroy 4 0
+ ** ...
+ ** OP_Destroy 5 0
+ **
+ ** and root page 5 happened to be the largest root-page number in the
+ ** database, then root page 5 would be moved to page 4 by the
+ ** "OP_Destroy 4 0" opcode. The subsequent "OP_Destroy 5 0" would hit
+ ** a free-list page.
+ */
+ Pgno iTab = pTab->tnum;
+ Pgno iDestroyed = 0;
+
+ while( 1 ){
+ Index *pIdx;
+ Pgno iLargest = 0;
+
+ if( iDestroyed==0 || iTab<iDestroyed ){
+ iLargest = iTab;
+ }
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ Pgno iIdx = pIdx->tnum;
+ assert( pIdx->pSchema==pTab->pSchema );
+ if( (iDestroyed==0 || (iIdx<iDestroyed)) && iIdx>iLargest ){
+ iLargest = iIdx;
+ }
+ }
+ if( iLargest==0 ){
+ return;
+ }else{
+ int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ assert( iDb>=0 && iDb<pParse->db->nDb );
+ destroyRootPage(pParse, iLargest, iDb);
+ iDestroyed = iLargest;
+ }
+ }
+}
+
+/*
+** Remove entries from the sqlite_statN tables (for N in (1,2,3))
+** after a DROP INDEX or DROP TABLE command.
+*/
+static void sqlite3ClearStatTables(
+ Parse *pParse, /* The parsing context */
+ int iDb, /* The database number */
+ const char *zType, /* "idx" or "tbl" */
+ const char *zName /* Name of index or table */
+){
+ int i;
+ const char *zDbName = pParse->db->aDb[iDb].zDbSName;
+ for(i=1; i<=4; i++){
+ char zTab[24];
+ sqlite3_snprintf(sizeof(zTab),zTab,"sqlite_stat%d",i);
+ if( sqlite3FindTable(pParse->db, zTab, zDbName) ){
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q.%s WHERE %s=%Q",
+ zDbName, zTab, zType, zName
+ );
+ }
+ }
+}
+
+/*
+** Generate code to drop a table.
+*/
+SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, int isView){
+ Vdbe *v;
+ sqlite3 *db = pParse->db;
+ Trigger *pTrigger;
+ Db *pDb = &db->aDb[iDb];
+
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 );
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ sqlite3VdbeAddOp0(v, OP_VBegin);
+ }
+#endif
+
+ /* Drop all triggers associated with the table being dropped. Code
+ ** is generated to remove entries from sqlite_schema and/or
+ ** sqlite_temp_schema if required.
+ */
+ pTrigger = sqlite3TriggerList(pParse, pTab);
+ while( pTrigger ){
+ assert( pTrigger->pSchema==pTab->pSchema ||
+ pTrigger->pSchema==db->aDb[1].pSchema );
+ sqlite3DropTriggerPtr(pParse, pTrigger);
+ pTrigger = pTrigger->pNext;
+ }
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ /* Remove any entries of the sqlite_sequence table associated with
+ ** the table being dropped. This is done before the table is dropped
+ ** at the btree level, in case the sqlite_sequence table needs to
+ ** move as a result of the drop (can happen in auto-vacuum mode).
+ */
+ if( pTab->tabFlags & TF_Autoincrement ){
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q.sqlite_sequence WHERE name=%Q",
+ pDb->zDbSName, pTab->zName
+ );
+ }
+#endif
+
+ /* Drop all entries in the schema table that refer to the
+ ** table. The program name loops through the schema table and deletes
+ ** every row that refers to a table of the same name as the one being
+ ** dropped. Triggers are handled separately because a trigger can be
+ ** created in the temp database that refers to a table in another
+ ** database.
+ */
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q." DFLT_SCHEMA_TABLE
+ " WHERE tbl_name=%Q and type!='trigger'",
+ pDb->zDbSName, pTab->zName);
+ if( !isView && !IsVirtual(pTab) ){
+ destroyTable(pParse, pTab);
+ }
+
+ /* Remove the table entry from SQLite's internal schema and modify
+ ** the schema cookie.
+ */
+ if( IsVirtual(pTab) ){
+ sqlite3VdbeAddOp4(v, OP_VDestroy, iDb, 0, 0, pTab->zName, 0);
+ sqlite3MayAbort(pParse);
+ }
+ sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0);
+ sqlite3ChangeCookie(pParse, iDb);
+ sqliteViewResetAll(db, iDb);
+}
+
+/*
+** Return TRUE if shadow tables should be read-only in the current
+** context.
+*/
+SQLITE_PRIVATE int sqlite3ReadOnlyShadowTables(sqlite3 *db){
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( (db->flags & SQLITE_Defensive)!=0
+ && db->pVtabCtx==0
+ && db->nVdbeExec==0
+ ){
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/*
+** Return true if it is not allowed to drop the given table
+*/
+static int tableMayNotBeDropped(sqlite3 *db, Table *pTab){
+ if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){
+ if( sqlite3StrNICmp(pTab->zName+7, "stat", 4)==0 ) return 0;
+ if( sqlite3StrNICmp(pTab->zName+7, "parameters", 10)==0 ) return 0;
+ return 1;
+ }
+ if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** This routine is called to do the work of a DROP TABLE statement.
+** pName is the name of the table to be dropped.
+*/
+SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){
+ Table *pTab;
+ Vdbe *v;
+ sqlite3 *db = pParse->db;
+ int iDb;
+
+ if( db->mallocFailed ){
+ goto exit_drop_table;
+ }
+ assert( pParse->nErr==0 );
+ assert( pName->nSrc==1 );
+ if( sqlite3ReadSchema(pParse) ) goto exit_drop_table;
+ if( noErr ) db->suppressErr++;
+ assert( isView==0 || isView==LOCATE_VIEW );
+ pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]);
+ if( noErr ) db->suppressErr--;
+
+ if( pTab==0 ){
+ if( noErr ) sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase);
+ goto exit_drop_table;
+ }
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iDb>=0 && iDb<db->nDb );
+
+ /* If pTab is a virtual table, call ViewGetColumnNames() to ensure
+ ** it is initialized.
+ */
+ if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){
+ goto exit_drop_table;
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code;
+ const char *zTab = SCHEMA_TABLE(iDb);
+ const char *zDb = db->aDb[iDb].zDbSName;
+ const char *zArg2 = 0;
+ if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){
+ goto exit_drop_table;
+ }
+ if( isView ){
+ if( !OMIT_TEMPDB && iDb==1 ){
+ code = SQLITE_DROP_TEMP_VIEW;
+ }else{
+ code = SQLITE_DROP_VIEW;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ }else if( IsVirtual(pTab) ){
+ code = SQLITE_DROP_VTABLE;
+ zArg2 = sqlite3GetVTable(db, pTab)->pMod->zName;
+#endif
+ }else{
+ if( !OMIT_TEMPDB && iDb==1 ){
+ code = SQLITE_DROP_TEMP_TABLE;
+ }else{
+ code = SQLITE_DROP_TABLE;
+ }
+ }
+ if( sqlite3AuthCheck(pParse, code, pTab->zName, zArg2, zDb) ){
+ goto exit_drop_table;
+ }
+ if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){
+ goto exit_drop_table;
+ }
+ }
+#endif
+ if( tableMayNotBeDropped(db, pTab) ){
+ sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName);
+ goto exit_drop_table;
+ }
+
+#ifndef SQLITE_OMIT_VIEW
+ /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used
+ ** on a table.
+ */
+ if( isView && pTab->pSelect==0 ){
+ sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName);
+ goto exit_drop_table;
+ }
+ if( !isView && pTab->pSelect ){
+ sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName);
+ goto exit_drop_table;
+ }
+#endif
+
+ /* Generate code to remove the table from the schema table
+ ** on disk.
+ */
+ v = sqlite3GetVdbe(pParse);
+ if( v ){
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+ if( !isView ){
+ sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName);
+ sqlite3FkDropTable(pParse, pName, pTab);
+ }
+ sqlite3CodeDropTable(pParse, pTab, iDb, isView);
+ }
+
+exit_drop_table:
+ sqlite3SrcListDelete(db, pName);
+}
+
+/*
+** This routine is called to create a new foreign key on the table
+** currently under construction. pFromCol determines which columns
+** in the current table point to the foreign key. If pFromCol==0 then
+** connect the key to the last column inserted. pTo is the name of
+** the table referred to (a.k.a the "parent" table). pToCol is a list
+** of tables in the parent pTo table. flags contains all
+** information about the conflict resolution algorithms specified
+** in the ON DELETE, ON UPDATE and ON INSERT clauses.
+**
+** An FKey structure is created and added to the table currently
+** under construction in the pParse->pNewTable field.
+**
+** The foreign key is set for IMMEDIATE processing. A subsequent call
+** to sqlite3DeferForeignKey() might change this to DEFERRED.
+*/
+SQLITE_PRIVATE void sqlite3CreateForeignKey(
+ Parse *pParse, /* Parsing context */
+ ExprList *pFromCol, /* Columns in this table that point to other table */
+ Token *pTo, /* Name of the other table */
+ ExprList *pToCol, /* Columns in the other table */
+ int flags /* Conflict resolution algorithms. */
+){
+ sqlite3 *db = pParse->db;
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ FKey *pFKey = 0;
+ FKey *pNextTo;
+ Table *p = pParse->pNewTable;
+ int nByte;
+ int i;
+ int nCol;
+ char *z;
+
+ assert( pTo!=0 );
+ if( p==0 || IN_DECLARE_VTAB ) goto fk_end;
+ if( pFromCol==0 ){
+ int iCol = p->nCol-1;
+ if( NEVER(iCol<0) ) goto fk_end;
+ if( pToCol && pToCol->nExpr!=1 ){
+ sqlite3ErrorMsg(pParse, "foreign key on %s"
+ " should reference only one column of table %T",
+ p->aCol[iCol].zName, pTo);
+ goto fk_end;
+ }
+ nCol = 1;
+ }else if( pToCol && pToCol->nExpr!=pFromCol->nExpr ){
+ sqlite3ErrorMsg(pParse,
+ "number of columns in foreign key does not match the number of "
+ "columns in the referenced table");
+ goto fk_end;
+ }else{
+ nCol = pFromCol->nExpr;
+ }
+ nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1;
+ if( pToCol ){
+ for(i=0; i<pToCol->nExpr; i++){
+ nByte += sqlite3Strlen30(pToCol->a[i].zEName) + 1;
+ }
+ }
+ pFKey = sqlite3DbMallocZero(db, nByte );
+ if( pFKey==0 ){
+ goto fk_end;
+ }
+ pFKey->pFrom = p;
+ pFKey->pNextFrom = p->pFKey;
+ z = (char*)&pFKey->aCol[nCol];
+ pFKey->zTo = z;
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, (void*)z, pTo);
+ }
+ memcpy(z, pTo->z, pTo->n);
+ z[pTo->n] = 0;
+ sqlite3Dequote(z);
+ z += pTo->n+1;
+ pFKey->nCol = nCol;
+ if( pFromCol==0 ){
+ pFKey->aCol[0].iFrom = p->nCol-1;
+ }else{
+ for(i=0; i<nCol; i++){
+ int j;
+ for(j=0; j<p->nCol; j++){
+ if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zEName)==0 ){
+ pFKey->aCol[i].iFrom = j;
+ break;
+ }
+ }
+ if( j>=p->nCol ){
+ sqlite3ErrorMsg(pParse,
+ "unknown column \"%s\" in foreign key definition",
+ pFromCol->a[i].zEName);
+ goto fk_end;
+ }
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, &pFKey->aCol[i], pFromCol->a[i].zEName);
+ }
+ }
+ }
+ if( pToCol ){
+ for(i=0; i<nCol; i++){
+ int n = sqlite3Strlen30(pToCol->a[i].zEName);
+ pFKey->aCol[i].zCol = z;
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, z, pToCol->a[i].zEName);
+ }
+ memcpy(z, pToCol->a[i].zEName, n);
+ z[n] = 0;
+ z += n+1;
+ }
+ }
+ pFKey->isDeferred = 0;
+ pFKey->aAction[0] = (u8)(flags & 0xff); /* ON DELETE action */
+ pFKey->aAction[1] = (u8)((flags >> 8 ) & 0xff); /* ON UPDATE action */
+
+ assert( sqlite3SchemaMutexHeld(db, 0, p->pSchema) );
+ pNextTo = (FKey *)sqlite3HashInsert(&p->pSchema->fkeyHash,
+ pFKey->zTo, (void *)pFKey
+ );
+ if( pNextTo==pFKey ){
+ sqlite3OomFault(db);
+ goto fk_end;
+ }
+ if( pNextTo ){
+ assert( pNextTo->pPrevTo==0 );
+ pFKey->pNextTo = pNextTo;
+ pNextTo->pPrevTo = pFKey;
+ }
+
+ /* Link the foreign key to the table as the last step.
+ */
+ p->pFKey = pFKey;
+ pFKey = 0;
+
+fk_end:
+ sqlite3DbFree(db, pFKey);
+#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */
+ sqlite3ExprListDelete(db, pFromCol);
+ sqlite3ExprListDelete(db, pToCol);
+}
+
+/*
+** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED
+** clause is seen as part of a foreign key definition. The isDeferred
+** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE.
+** The behavior of the most recently created foreign key is adjusted
+** accordingly.
+*/
+SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ Table *pTab;
+ FKey *pFKey;
+ if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return;
+ assert( isDeferred==0 || isDeferred==1 ); /* EV: R-30323-21917 */
+ pFKey->isDeferred = (u8)isDeferred;
+#endif
+}
+
+/*
+** Generate code that will erase and refill index *pIdx. This is
+** used to initialize a newly created index or to recompute the
+** content of an index in response to a REINDEX command.
+**
+** if memRootPage is not negative, it means that the index is newly
+** created. The register specified by memRootPage contains the
+** root page number of the index. If memRootPage is negative, then
+** the index already exists and must be cleared before being refilled and
+** the root page number of the index is taken from pIndex->tnum.
+*/
+static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){
+ Table *pTab = pIndex->pTable; /* The table that is indexed */
+ int iTab = pParse->nTab++; /* Btree cursor used for pTab */
+ int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */
+ int iSorter; /* Cursor opened by OpenSorter (if in use) */
+ int addr1; /* Address of top of loop */
+ int addr2; /* Address to jump to for next iteration */
+ Pgno tnum; /* Root page of index */
+ int iPartIdxLabel; /* Jump to this label to skip a row */
+ Vdbe *v; /* Generate code into this virtual machine */
+ KeyInfo *pKey; /* KeyInfo for index */
+ int regRecord; /* Register holding assembled index record */
+ sqlite3 *db = pParse->db; /* The database connection */
+ int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema);
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0,
+ db->aDb[iDb].zDbSName ) ){
+ return;
+ }
+#endif
+
+ /* Require a write-lock on the table to perform this operation */
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName);
+
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) return;
+ if( memRootPage>=0 ){
+ tnum = (Pgno)memRootPage;
+ }else{
+ tnum = pIndex->tnum;
+ }
+ pKey = sqlite3KeyInfoOfIndex(pParse, pIndex);
+ assert( pKey!=0 || db->mallocFailed || pParse->nErr );
+
+ /* Open the sorter cursor if we are to use one. */
+ iSorter = pParse->nTab++;
+ sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, pIndex->nKeyCol, (char*)
+ sqlite3KeyInfoRef(pKey), P4_KEYINFO);
+
+ /* Open the table. Loop through all rows of the table, inserting index
+ ** records into the sorter. */
+ sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); VdbeCoverage(v);
+ regRecord = sqlite3GetTempReg(pParse);
+ sqlite3MultiWrite(pParse);
+
+ sqlite3GenerateIndexKey(pParse,pIndex,iTab,regRecord,0,&iPartIdxLabel,0,0);
+ sqlite3VdbeAddOp2(v, OP_SorterInsert, iSorter, regRecord);
+ sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel);
+ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr1);
+ if( memRootPage<0 ) sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb);
+ sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, (int)tnum, iDb,
+ (char *)pKey, P4_KEYINFO);
+ sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0));
+
+ addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v);
+ if( IsUniqueIndex(pIndex) ){
+ int j2 = sqlite3VdbeGoto(v, 1);
+ addr2 = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeVerifyAbortable(v, OE_Abort);
+ sqlite3VdbeAddOp4Int(v, OP_SorterCompare, iSorter, j2, regRecord,
+ pIndex->nKeyCol); VdbeCoverage(v);
+ sqlite3UniqueConstraint(pParse, OE_Abort, pIndex);
+ sqlite3VdbeJumpHere(v, j2);
+ }else{
+ /* Most CREATE INDEX and REINDEX statements that are not UNIQUE can not
+ ** abort. The exception is if one of the indexed expressions contains a
+ ** user function that throws an exception when it is evaluated. But the
+ ** overhead of adding a statement journal to a CREATE INDEX statement is
+ ** very small (since most of the pages written do not contain content that
+ ** needs to be restored if the statement aborts), so we call
+ ** sqlite3MayAbort() for all CREATE INDEX statements. */
+ sqlite3MayAbort(pParse);
+ addr2 = sqlite3VdbeCurrentAddr(v);
+ }
+ sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx);
+ if( !pIndex->bAscKeyBug ){
+ /* This OP_SeekEnd opcode makes index insert for a REINDEX go much
+ ** faster by avoiding unnecessary seeks. But the optimization does
+ ** not work for UNIQUE constraint indexes on WITHOUT ROWID tables
+ ** with DESC primary keys, since those indexes have there keys in
+ ** a different order from the main table.
+ ** See ticket: https://www.sqlite.org/src/info/bba7b69f9849b5bf
+ */
+ sqlite3VdbeAddOp1(v, OP_SeekEnd, iIdx);
+ }
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdx, regRecord);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ sqlite3ReleaseTempReg(pParse, regRecord);
+ sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr1);
+
+ sqlite3VdbeAddOp1(v, OP_Close, iTab);
+ sqlite3VdbeAddOp1(v, OP_Close, iIdx);
+ sqlite3VdbeAddOp1(v, OP_Close, iSorter);
+}
+
+/*
+** Allocate heap space to hold an Index object with nCol columns.
+**
+** Increase the allocation size to provide an extra nExtra bytes
+** of 8-byte aligned space after the Index object and return a
+** pointer to this extra space in *ppExtra.
+*/
+SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(
+ sqlite3 *db, /* Database connection */
+ i16 nCol, /* Total number of columns in the index */
+ int nExtra, /* Number of bytes of extra space to alloc */
+ char **ppExtra /* Pointer to the "extra" space */
+){
+ Index *p; /* Allocated index object */
+ int nByte; /* Bytes of space for Index object + arrays */
+
+ nByte = ROUND8(sizeof(Index)) + /* Index structure */
+ ROUND8(sizeof(char*)*nCol) + /* Index.azColl */
+ ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */
+ sizeof(i16)*nCol + /* Index.aiColumn */
+ sizeof(u8)*nCol); /* Index.aSortOrder */
+ p = sqlite3DbMallocZero(db, nByte + nExtra);
+ if( p ){
+ char *pExtra = ((char*)p)+ROUND8(sizeof(Index));
+ p->azColl = (const char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol);
+ p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1);
+ p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol;
+ p->aSortOrder = (u8*)pExtra;
+ p->nColumn = nCol;
+ p->nKeyCol = nCol - 1;
+ *ppExtra = ((char*)p) + nByte;
+ }
+ return p;
+}
+
+/*
+** If expression list pList contains an expression that was parsed with
+** an explicit "NULLS FIRST" or "NULLS LAST" clause, leave an error in
+** pParse and return non-zero. Otherwise, return zero.
+*/
+SQLITE_PRIVATE int sqlite3HasExplicitNulls(Parse *pParse, ExprList *pList){
+ if( pList ){
+ int i;
+ for(i=0; i<pList->nExpr; i++){
+ if( pList->a[i].bNulls ){
+ u8 sf = pList->a[i].sortFlags;
+ sqlite3ErrorMsg(pParse, "unsupported use of NULLS %s",
+ (sf==0 || sf==3) ? "FIRST" : "LAST"
+ );
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Create a new index for an SQL table. pName1.pName2 is the name of the index
+** and pTblList is the name of the table that is to be indexed. Both will
+** be NULL for a primary key or an index that is created to satisfy a
+** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable
+** as the table to be indexed. pParse->pNewTable is a table that is
+** currently being constructed by a CREATE TABLE statement.
+**
+** pList is a list of columns to be indexed. pList will be NULL if this
+** is a primary key or unique-constraint on the most recent column added
+** to the table currently under construction.
+*/
+SQLITE_PRIVATE void sqlite3CreateIndex(
+ Parse *pParse, /* All information about this parse */
+ Token *pName1, /* First part of index name. May be NULL */
+ Token *pName2, /* Second part of index name. May be NULL */
+ SrcList *pTblName, /* Table to index. Use pParse->pNewTable if 0 */
+ ExprList *pList, /* A list of columns to be indexed */
+ int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
+ Token *pStart, /* The CREATE token that begins this statement */
+ Expr *pPIWhere, /* WHERE clause for partial indices */
+ int sortOrder, /* Sort order of primary key when pList==NULL */
+ int ifNotExist, /* Omit error if index already exists */
+ u8 idxType /* The index type */
+){
+ Table *pTab = 0; /* Table to be indexed */
+ Index *pIndex = 0; /* The index to be created */
+ char *zName = 0; /* Name of the index */
+ int nName; /* Number of characters in zName */
+ int i, j;
+ DbFixer sFix; /* For assigning database names to pTable */
+ int sortOrderMask; /* 1 to honor DESC in index. 0 to ignore. */
+ sqlite3 *db = pParse->db;
+ Db *pDb; /* The specific table containing the indexed database */
+ int iDb; /* Index of the database that is being written */
+ Token *pName = 0; /* Unqualified name of the index to create */
+ struct ExprList_item *pListItem; /* For looping over pList */
+ int nExtra = 0; /* Space allocated for zExtra[] */
+ int nExtraCol; /* Number of extra columns needed */
+ char *zExtra = 0; /* Extra space after the Index object */
+ Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */
+
+ if( db->mallocFailed || pParse->nErr>0 ){
+ goto exit_create_index;
+ }
+ if( IN_DECLARE_VTAB && idxType!=SQLITE_IDXTYPE_PRIMARYKEY ){
+ goto exit_create_index;
+ }
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ goto exit_create_index;
+ }
+ if( sqlite3HasExplicitNulls(pParse, pList) ){
+ goto exit_create_index;
+ }
+
+ /*
+ ** Find the table that is to be indexed. Return early if not found.
+ */
+ if( pTblName!=0 ){
+
+ /* Use the two-part index name to determine the database
+ ** to search for the table. 'Fix' the table name to this db
+ ** before looking up the table.
+ */
+ assert( pName1 && pName2 );
+ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName);
+ if( iDb<0 ) goto exit_create_index;
+ assert( pName && pName->z );
+
+#ifndef SQLITE_OMIT_TEMPDB
+ /* If the index name was unqualified, check if the table
+ ** is a temp table. If so, set the database to 1. Do not do this
+ ** if initialising a database schema.
+ */
+ if( !db->init.busy ){
+ pTab = sqlite3SrcListLookup(pParse, pTblName);
+ if( pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){
+ iDb = 1;
+ }
+ }
+#endif
+
+ sqlite3FixInit(&sFix, pParse, iDb, "index", pName);
+ if( sqlite3FixSrcList(&sFix, pTblName) ){
+ /* Because the parser constructs pTblName from a single identifier,
+ ** sqlite3FixSrcList can never fail. */
+ assert(0);
+ }
+ pTab = sqlite3LocateTableItem(pParse, 0, &pTblName->a[0]);
+ assert( db->mallocFailed==0 || pTab==0 );
+ if( pTab==0 ) goto exit_create_index;
+ if( iDb==1 && db->aDb[iDb].pSchema!=pTab->pSchema ){
+ sqlite3ErrorMsg(pParse,
+ "cannot create a TEMP index on non-TEMP table \"%s\"",
+ pTab->zName);
+ goto exit_create_index;
+ }
+ if( !HasRowid(pTab) ) pPk = sqlite3PrimaryKeyIndex(pTab);
+ }else{
+ assert( pName==0 );
+ assert( pStart==0 );
+ pTab = pParse->pNewTable;
+ if( !pTab ) goto exit_create_index;
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ }
+ pDb = &db->aDb[iDb];
+
+ assert( pTab!=0 );
+ assert( pParse->nErr==0 );
+ if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0
+ && db->init.busy==0
+ && pTblName!=0
+#if SQLITE_USER_AUTHENTICATION
+ && sqlite3UserAuthTable(pTab->zName)==0
+#endif
+ ){
+ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName);
+ goto exit_create_index;
+ }
+#ifndef SQLITE_OMIT_VIEW
+ if( pTab->pSelect ){
+ sqlite3ErrorMsg(pParse, "views may not be indexed");
+ goto exit_create_index;
+ }
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ sqlite3ErrorMsg(pParse, "virtual tables may not be indexed");
+ goto exit_create_index;
+ }
+#endif
+
+ /*
+ ** Find the name of the index. Make sure there is not already another
+ ** index or table with the same name.
+ **
+ ** Exception: If we are reading the names of permanent indices from the
+ ** sqlite_schema table (because some other process changed the schema) and
+ ** one of the index names collides with the name of a temporary table or
+ ** index, then we will continue to process this index.
+ **
+ ** If pName==0 it means that we are
+ ** dealing with a primary key or UNIQUE constraint. We have to invent our
+ ** own name.
+ */
+ if( pName ){
+ zName = sqlite3NameFromToken(db, pName);
+ if( zName==0 ) goto exit_create_index;
+ assert( pName->z!=0 );
+ if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName,"index",pTab->zName) ){
+ goto exit_create_index;
+ }
+ if( !IN_RENAME_OBJECT ){
+ if( !db->init.busy ){
+ if( sqlite3FindTable(db, zName, 0)!=0 ){
+ sqlite3ErrorMsg(pParse, "there is already a table named %s", zName);
+ goto exit_create_index;
+ }
+ }
+ if( sqlite3FindIndex(db, zName, pDb->zDbSName)!=0 ){
+ if( !ifNotExist ){
+ sqlite3ErrorMsg(pParse, "index %s already exists", zName);
+ }else{
+ assert( !db->init.busy );
+ sqlite3CodeVerifySchema(pParse, iDb);
+ }
+ goto exit_create_index;
+ }
+ }
+ }else{
+ int n;
+ Index *pLoop;
+ for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){}
+ zName = sqlite3MPrintf(db, "sqlite_autoindex_%s_%d", pTab->zName, n);
+ if( zName==0 ){
+ goto exit_create_index;
+ }
+
+ /* Automatic index names generated from within sqlite3_declare_vtab()
+ ** must have names that are distinct from normal automatic index names.
+ ** The following statement converts "sqlite3_autoindex..." into
+ ** "sqlite3_butoindex..." in order to make the names distinct.
+ ** The "vtab_err.test" test demonstrates the need of this statement. */
+ if( IN_SPECIAL_PARSE ) zName[7]++;
+ }
+
+ /* Check for authorization to create an index.
+ */
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( !IN_RENAME_OBJECT ){
+ const char *zDb = pDb->zDbSName;
+ if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){
+ goto exit_create_index;
+ }
+ i = SQLITE_CREATE_INDEX;
+ if( !OMIT_TEMPDB && iDb==1 ) i = SQLITE_CREATE_TEMP_INDEX;
+ if( sqlite3AuthCheck(pParse, i, zName, pTab->zName, zDb) ){
+ goto exit_create_index;
+ }
+ }
+#endif
+
+ /* If pList==0, it means this routine was called to make a primary
+ ** key out of the last column added to the table under construction.
+ ** So create a fake list to simulate this.
+ */
+ if( pList==0 ){
+ Token prevCol;
+ Column *pCol = &pTab->aCol[pTab->nCol-1];
+ pCol->colFlags |= COLFLAG_UNIQUE;
+ sqlite3TokenInit(&prevCol, pCol->zName);
+ pList = sqlite3ExprListAppend(pParse, 0,
+ sqlite3ExprAlloc(db, TK_ID, &prevCol, 0));
+ if( pList==0 ) goto exit_create_index;
+ assert( pList->nExpr==1 );
+ sqlite3ExprListSetSortOrder(pList, sortOrder, SQLITE_SO_UNDEFINED);
+ }else{
+ sqlite3ExprListCheckLength(pParse, pList, "index");
+ if( pParse->nErr ) goto exit_create_index;
+ }
+
+ /* Figure out how many bytes of space are required to store explicitly
+ ** specified collation sequence names.
+ */
+ for(i=0; i<pList->nExpr; i++){
+ Expr *pExpr = pList->a[i].pExpr;
+ assert( pExpr!=0 );
+ if( pExpr->op==TK_COLLATE ){
+ nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken));
+ }
+ }
+
+ /*
+ ** Allocate the index structure.
+ */
+ nName = sqlite3Strlen30(zName);
+ nExtraCol = pPk ? pPk->nKeyCol : 1;
+ assert( pList->nExpr + nExtraCol <= 32767 /* Fits in i16 */ );
+ pIndex = sqlite3AllocateIndexObject(db, pList->nExpr + nExtraCol,
+ nName + nExtra + 1, &zExtra);
+ if( db->mallocFailed ){
+ goto exit_create_index;
+ }
+ assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowLogEst) );
+ assert( EIGHT_BYTE_ALIGNMENT(pIndex->azColl) );
+ pIndex->zName = zExtra;
+ zExtra += nName + 1;
+ memcpy(pIndex->zName, zName, nName+1);
+ pIndex->pTable = pTab;
+ pIndex->onError = (u8)onError;
+ pIndex->uniqNotNull = onError!=OE_None;
+ pIndex->idxType = idxType;
+ pIndex->pSchema = db->aDb[iDb].pSchema;
+ pIndex->nKeyCol = pList->nExpr;
+ if( pPIWhere ){
+ sqlite3ResolveSelfReference(pParse, pTab, NC_PartIdx, pPIWhere, 0);
+ pIndex->pPartIdxWhere = pPIWhere;
+ pPIWhere = 0;
+ }
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+
+ /* Check to see if we should honor DESC requests on index columns
+ */
+ if( pDb->pSchema->file_format>=4 ){
+ sortOrderMask = -1; /* Honor DESC */
+ }else{
+ sortOrderMask = 0; /* Ignore DESC */
+ }
+
+ /* Analyze the list of expressions that form the terms of the index and
+ ** report any errors. In the common case where the expression is exactly
+ ** a table column, store that column in aiColumn[]. For general expressions,
+ ** populate pIndex->aColExpr and store XN_EXPR (-2) in aiColumn[].
+ **
+ ** TODO: Issue a warning if two or more columns of the index are identical.
+ ** TODO: Issue a warning if the table primary key is used as part of the
+ ** index key.
+ */
+ pListItem = pList->a;
+ if( IN_RENAME_OBJECT ){
+ pIndex->aColExpr = pList;
+ pList = 0;
+ }
+ for(i=0; i<pIndex->nKeyCol; i++, pListItem++){
+ Expr *pCExpr; /* The i-th index expression */
+ int requestedSortOrder; /* ASC or DESC on the i-th expression */
+ const char *zColl; /* Collation sequence name */
+
+ sqlite3StringToId(pListItem->pExpr);
+ sqlite3ResolveSelfReference(pParse, pTab, NC_IdxExpr, pListItem->pExpr, 0);
+ if( pParse->nErr ) goto exit_create_index;
+ pCExpr = sqlite3ExprSkipCollate(pListItem->pExpr);
+ if( pCExpr->op!=TK_COLUMN ){
+ if( pTab==pParse->pNewTable ){
+ sqlite3ErrorMsg(pParse, "expressions prohibited in PRIMARY KEY and "
+ "UNIQUE constraints");
+ goto exit_create_index;
+ }
+ if( pIndex->aColExpr==0 ){
+ pIndex->aColExpr = pList;
+ pList = 0;
+ }
+ j = XN_EXPR;
+ pIndex->aiColumn[i] = XN_EXPR;
+ pIndex->uniqNotNull = 0;
+ }else{
+ j = pCExpr->iColumn;
+ assert( j<=0x7fff );
+ if( j<0 ){
+ j = pTab->iPKey;
+ }else{
+ if( pTab->aCol[j].notNull==0 ){
+ pIndex->uniqNotNull = 0;
+ }
+ if( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ){
+ pIndex->bHasVCol = 1;
+ }
+ }
+ pIndex->aiColumn[i] = (i16)j;
+ }
+ zColl = 0;
+ if( pListItem->pExpr->op==TK_COLLATE ){
+ int nColl;
+ zColl = pListItem->pExpr->u.zToken;
+ nColl = sqlite3Strlen30(zColl) + 1;
+ assert( nExtra>=nColl );
+ memcpy(zExtra, zColl, nColl);
+ zColl = zExtra;
+ zExtra += nColl;
+ nExtra -= nColl;
+ }else if( j>=0 ){
+ zColl = pTab->aCol[j].zColl;
+ }
+ if( !zColl ) zColl = sqlite3StrBINARY;
+ if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){
+ goto exit_create_index;
+ }
+ pIndex->azColl[i] = zColl;
+ requestedSortOrder = pListItem->sortFlags & sortOrderMask;
+ pIndex->aSortOrder[i] = (u8)requestedSortOrder;
+ }
+
+ /* Append the table key to the end of the index. For WITHOUT ROWID
+ ** tables (when pPk!=0) this will be the declared PRIMARY KEY. For
+ ** normal tables (when pPk==0) this will be the rowid.
+ */
+ if( pPk ){
+ for(j=0; j<pPk->nKeyCol; j++){
+ int x = pPk->aiColumn[j];
+ assert( x>=0 );
+ if( isDupColumn(pIndex, pIndex->nKeyCol, pPk, j) ){
+ pIndex->nColumn--;
+ }else{
+ testcase( hasColumn(pIndex->aiColumn,pIndex->nKeyCol,x) );
+ pIndex->aiColumn[i] = x;
+ pIndex->azColl[i] = pPk->azColl[j];
+ pIndex->aSortOrder[i] = pPk->aSortOrder[j];
+ i++;
+ }
+ }
+ assert( i==pIndex->nColumn );
+ }else{
+ pIndex->aiColumn[i] = XN_ROWID;
+ pIndex->azColl[i] = sqlite3StrBINARY;
+ }
+ sqlite3DefaultRowEst(pIndex);
+ if( pParse->pNewTable==0 ) estimateIndexWidth(pIndex);
+
+ /* If this index contains every column of its table, then mark
+ ** it as a covering index */
+ assert( HasRowid(pTab)
+ || pTab->iPKey<0 || sqlite3TableColumnToIndex(pIndex, pTab->iPKey)>=0 );
+ recomputeColumnsNotIndexed(pIndex);
+ if( pTblName!=0 && pIndex->nColumn>=pTab->nCol ){
+ pIndex->isCovering = 1;
+ for(j=0; j<pTab->nCol; j++){
+ if( j==pTab->iPKey ) continue;
+ if( sqlite3TableColumnToIndex(pIndex,j)>=0 ) continue;
+ pIndex->isCovering = 0;
+ break;
+ }
+ }
+
+ if( pTab==pParse->pNewTable ){
+ /* This routine has been called to create an automatic index as a
+ ** result of a PRIMARY KEY or UNIQUE clause on a column definition, or
+ ** a PRIMARY KEY or UNIQUE clause following the column definitions.
+ ** i.e. one of:
+ **
+ ** CREATE TABLE t(x PRIMARY KEY, y);
+ ** CREATE TABLE t(x, y, UNIQUE(x, y));
+ **
+ ** Either way, check to see if the table already has such an index. If
+ ** so, don't bother creating this one. This only applies to
+ ** automatically created indices. Users can do as they wish with
+ ** explicit indices.
+ **
+ ** Two UNIQUE or PRIMARY KEY constraints are considered equivalent
+ ** (and thus suppressing the second one) even if they have different
+ ** sort orders.
+ **
+ ** If there are different collating sequences or if the columns of
+ ** the constraint occur in different orders, then the constraints are
+ ** considered distinct and both result in separate indices.
+ */
+ Index *pIdx;
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int k;
+ assert( IsUniqueIndex(pIdx) );
+ assert( pIdx->idxType!=SQLITE_IDXTYPE_APPDEF );
+ assert( IsUniqueIndex(pIndex) );
+
+ if( pIdx->nKeyCol!=pIndex->nKeyCol ) continue;
+ for(k=0; k<pIdx->nKeyCol; k++){
+ const char *z1;
+ const char *z2;
+ assert( pIdx->aiColumn[k]>=0 );
+ if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break;
+ z1 = pIdx->azColl[k];
+ z2 = pIndex->azColl[k];
+ if( sqlite3StrICmp(z1, z2) ) break;
+ }
+ if( k==pIdx->nKeyCol ){
+ if( pIdx->onError!=pIndex->onError ){
+ /* This constraint creates the same index as a previous
+ ** constraint specified somewhere in the CREATE TABLE statement.
+ ** However the ON CONFLICT clauses are different. If both this
+ ** constraint and the previous equivalent constraint have explicit
+ ** ON CONFLICT clauses this is an error. Otherwise, use the
+ ** explicitly specified behavior for the index.
+ */
+ if( !(pIdx->onError==OE_Default || pIndex->onError==OE_Default) ){
+ sqlite3ErrorMsg(pParse,
+ "conflicting ON CONFLICT clauses specified", 0);
+ }
+ if( pIdx->onError==OE_Default ){
+ pIdx->onError = pIndex->onError;
+ }
+ }
+ if( idxType==SQLITE_IDXTYPE_PRIMARYKEY ) pIdx->idxType = idxType;
+ if( IN_RENAME_OBJECT ){
+ pIndex->pNext = pParse->pNewIndex;
+ pParse->pNewIndex = pIndex;
+ pIndex = 0;
+ }
+ goto exit_create_index;
+ }
+ }
+ }
+
+ if( !IN_RENAME_OBJECT ){
+
+ /* Link the new Index structure to its table and to the other
+ ** in-memory database structures.
+ */
+ assert( pParse->nErr==0 );
+ if( db->init.busy ){
+ Index *p;
+ assert( !IN_SPECIAL_PARSE );
+ assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
+ if( pTblName!=0 ){
+ pIndex->tnum = db->init.newTnum;
+ if( sqlite3IndexHasDuplicateRootPage(pIndex) ){
+ sqlite3ErrorMsg(pParse, "invalid rootpage");
+ pParse->rc = SQLITE_CORRUPT_BKPT;
+ goto exit_create_index;
+ }
+ }
+ p = sqlite3HashInsert(&pIndex->pSchema->idxHash,
+ pIndex->zName, pIndex);
+ if( p ){
+ assert( p==pIndex ); /* Malloc must have failed */
+ sqlite3OomFault(db);
+ goto exit_create_index;
+ }
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ }
+
+ /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the
+ ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then
+ ** emit code to allocate the index rootpage on disk and make an entry for
+ ** the index in the sqlite_schema table and populate the index with
+ ** content. But, do not do this if we are simply reading the sqlite_schema
+ ** table to parse the schema, or if this index is the PRIMARY KEY index
+ ** of a WITHOUT ROWID table.
+ **
+ ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY
+ ** or UNIQUE index in a CREATE TABLE statement. Since the table
+ ** has just been created, it contains no data and the index initialization
+ ** step can be skipped.
+ */
+ else if( HasRowid(pTab) || pTblName!=0 ){
+ Vdbe *v;
+ char *zStmt;
+ int iMem = ++pParse->nMem;
+
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto exit_create_index;
+
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+
+ /* Create the rootpage for the index using CreateIndex. But before
+ ** doing so, code a Noop instruction and store its address in
+ ** Index.tnum. This is required in case this index is actually a
+ ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In
+ ** that case the convertToWithoutRowidTable() routine will replace
+ ** the Noop with a Goto to jump over the VDBE code generated below. */
+ pIndex->tnum = (Pgno)sqlite3VdbeAddOp0(v, OP_Noop);
+ sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, iMem, BTREE_BLOBKEY);
+
+ /* Gather the complete text of the CREATE INDEX statement into
+ ** the zStmt variable
+ */
+ assert( pName!=0 || pStart==0 );
+ if( pStart ){
+ int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n;
+ if( pName->z[n-1]==';' ) n--;
+ /* A named index with an explicit CREATE INDEX statement */
+ zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s",
+ onError==OE_None ? "" : " UNIQUE", n, pName->z);
+ }else{
+ /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */
+ /* zStmt = sqlite3MPrintf(""); */
+ zStmt = 0;
+ }
+
+ /* Add an entry in sqlite_schema for this index
+ */
+ sqlite3NestedParse(pParse,
+ "INSERT INTO %Q." DFLT_SCHEMA_TABLE " VALUES('index',%Q,%Q,#%d,%Q);",
+ db->aDb[iDb].zDbSName,
+ pIndex->zName,
+ pTab->zName,
+ iMem,
+ zStmt
+ );
+ sqlite3DbFree(db, zStmt);
+
+ /* Fill the index with data and reparse the schema. Code an OP_Expire
+ ** to invalidate all pre-compiled statements.
+ */
+ if( pTblName ){
+ sqlite3RefillIndex(pParse, pIndex, iMem);
+ sqlite3ChangeCookie(pParse, iDb);
+ sqlite3VdbeAddParseSchemaOp(v, iDb,
+ sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName));
+ sqlite3VdbeAddOp2(v, OP_Expire, 0, 1);
+ }
+
+ sqlite3VdbeJumpHere(v, (int)pIndex->tnum);
+ }
+ }
+ if( db->init.busy || pTblName==0 ){
+ pIndex->pNext = pTab->pIndex;
+ pTab->pIndex = pIndex;
+ pIndex = 0;
+ }
+ else if( IN_RENAME_OBJECT ){
+ assert( pParse->pNewIndex==0 );
+ pParse->pNewIndex = pIndex;
+ pIndex = 0;
+ }
+
+ /* Clean up before exiting */
+exit_create_index:
+ if( pIndex ) sqlite3FreeIndex(db, pIndex);
+ if( pTab ){ /* Ensure all REPLACE indexes are at the end of the list */
+ Index **ppFrom = &pTab->pIndex;
+ Index *pThis;
+ for(ppFrom=&pTab->pIndex; (pThis = *ppFrom)!=0; ppFrom=&pThis->pNext){
+ Index *pNext;
+ if( pThis->onError!=OE_Replace ) continue;
+ while( (pNext = pThis->pNext)!=0 && pNext->onError!=OE_Replace ){
+ *ppFrom = pNext;
+ pThis->pNext = pNext->pNext;
+ pNext->pNext = pThis;
+ ppFrom = &pNext->pNext;
+ }
+ break;
+ }
+ }
+ sqlite3ExprDelete(db, pPIWhere);
+ sqlite3ExprListDelete(db, pList);
+ sqlite3SrcListDelete(db, pTblName);
+ sqlite3DbFree(db, zName);
+}
+
+/*
+** Fill the Index.aiRowEst[] array with default information - information
+** to be used when we have not run the ANALYZE command.
+**
+** aiRowEst[0] is supposed to contain the number of elements in the index.
+** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the
+** number of rows in the table that match any particular value of the
+** first column of the index. aiRowEst[2] is an estimate of the number
+** of rows that match any particular combination of the first 2 columns
+** of the index. And so forth. It must always be the case that
+*
+** aiRowEst[N]<=aiRowEst[N-1]
+** aiRowEst[N]>=1
+**
+** Apart from that, we have little to go on besides intuition as to
+** how aiRowEst[] should be initialized. The numbers generated here
+** are based on typical values found in actual indices.
+*/
+SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){
+ /* 10, 9, 8, 7, 6 */
+ static const LogEst aVal[] = { 33, 32, 30, 28, 26 };
+ LogEst *a = pIdx->aiRowLogEst;
+ LogEst x;
+ int nCopy = MIN(ArraySize(aVal), pIdx->nKeyCol);
+ int i;
+
+ /* Indexes with default row estimates should not have stat1 data */
+ assert( !pIdx->hasStat1 );
+
+ /* Set the first entry (number of rows in the index) to the estimated
+ ** number of rows in the table, or half the number of rows in the table
+ ** for a partial index.
+ **
+ ** 2020-05-27: If some of the stat data is coming from the sqlite_stat1
+ ** table but other parts we are having to guess at, then do not let the
+ ** estimated number of rows in the table be less than 1000 (LogEst 99).
+ ** Failure to do this can cause the indexes for which we do not have
+ ** stat1 data to be ignored by the query planner.
+ */
+ x = pIdx->pTable->nRowLogEst;
+ assert( 99==sqlite3LogEst(1000) );
+ if( x<99 ){
+ pIdx->pTable->nRowLogEst = x = 99;
+ }
+ if( pIdx->pPartIdxWhere!=0 ) x -= 10; assert( 10==sqlite3LogEst(2) );
+ a[0] = x;
+
+ /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is
+ ** 6 and each subsequent value (if any) is 5. */
+ memcpy(&a[1], aVal, nCopy*sizeof(LogEst));
+ for(i=nCopy+1; i<=pIdx->nKeyCol; i++){
+ a[i] = 23; assert( 23==sqlite3LogEst(5) );
+ }
+
+ assert( 0==sqlite3LogEst(1) );
+ if( IsUniqueIndex(pIdx) ) a[pIdx->nKeyCol] = 0;
+}
+
+/*
+** This routine will drop an existing named index. This routine
+** implements the DROP INDEX statement.
+*/
+SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists){
+ Index *pIndex;
+ Vdbe *v;
+ sqlite3 *db = pParse->db;
+ int iDb;
+
+ assert( pParse->nErr==0 ); /* Never called with prior errors */
+ if( db->mallocFailed ){
+ goto exit_drop_index;
+ }
+ assert( pName->nSrc==1 );
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ goto exit_drop_index;
+ }
+ pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase);
+ if( pIndex==0 ){
+ if( !ifExists ){
+ sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0);
+ }else{
+ sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase);
+ }
+ pParse->checkSchema = 1;
+ goto exit_drop_index;
+ }
+ if( pIndex->idxType!=SQLITE_IDXTYPE_APPDEF ){
+ sqlite3ErrorMsg(pParse, "index associated with UNIQUE "
+ "or PRIMARY KEY constraint cannot be dropped", 0);
+ goto exit_drop_index;
+ }
+ iDb = sqlite3SchemaToIndex(db, pIndex->pSchema);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int code = SQLITE_DROP_INDEX;
+ Table *pTab = pIndex->pTable;
+ const char *zDb = db->aDb[iDb].zDbSName;
+ const char *zTab = SCHEMA_TABLE(iDb);
+ if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){
+ goto exit_drop_index;
+ }
+ if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX;
+ if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){
+ goto exit_drop_index;
+ }
+ }
+#endif
+
+ /* Generate code to remove the index and from the schema table */
+ v = sqlite3GetVdbe(pParse);
+ if( v ){
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q." DFLT_SCHEMA_TABLE " WHERE name=%Q AND type='index'",
+ db->aDb[iDb].zDbSName, pIndex->zName
+ );
+ sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName);
+ sqlite3ChangeCookie(pParse, iDb);
+ destroyRootPage(pParse, pIndex->tnum, iDb);
+ sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0);
+ }
+
+exit_drop_index:
+ sqlite3SrcListDelete(db, pName);
+}
+
+/*
+** pArray is a pointer to an array of objects. Each object in the
+** array is szEntry bytes in size. This routine uses sqlite3DbRealloc()
+** to extend the array so that there is space for a new object at the end.
+**
+** When this function is called, *pnEntry contains the current size of
+** the array (in entries - so the allocation is ((*pnEntry) * szEntry) bytes
+** in total).
+**
+** If the realloc() is successful (i.e. if no OOM condition occurs), the
+** space allocated for the new object is zeroed, *pnEntry updated to
+** reflect the new size of the array and a pointer to the new allocation
+** returned. *pIdx is set to the index of the new array entry in this case.
+**
+** Otherwise, if the realloc() fails, *pIdx is set to -1, *pnEntry remains
+** unchanged and a copy of pArray returned.
+*/
+SQLITE_PRIVATE void *sqlite3ArrayAllocate(
+ sqlite3 *db, /* Connection to notify of malloc failures */
+ void *pArray, /* Array of objects. Might be reallocated */
+ int szEntry, /* Size of each object in the array */
+ int *pnEntry, /* Number of objects currently in use */
+ int *pIdx /* Write the index of a new slot here */
+){
+ char *z;
+ sqlite3_int64 n = *pIdx = *pnEntry;
+ if( (n & (n-1))==0 ){
+ sqlite3_int64 sz = (n==0) ? 1 : 2*n;
+ void *pNew = sqlite3DbRealloc(db, pArray, sz*szEntry);
+ if( pNew==0 ){
+ *pIdx = -1;
+ return pArray;
+ }
+ pArray = pNew;
+ }
+ z = (char*)pArray;
+ memset(&z[n * szEntry], 0, szEntry);
+ ++*pnEntry;
+ return pArray;
+}
+
+/*
+** Append a new element to the given IdList. Create a new IdList if
+** need be.
+**
+** A new IdList is returned, or NULL if malloc() fails.
+*/
+SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token *pToken){
+ sqlite3 *db = pParse->db;
+ int i;
+ if( pList==0 ){
+ pList = sqlite3DbMallocZero(db, sizeof(IdList) );
+ if( pList==0 ) return 0;
+ }
+ pList->a = sqlite3ArrayAllocate(
+ db,
+ pList->a,
+ sizeof(pList->a[0]),
+ &pList->nId,
+ &i
+ );
+ if( i<0 ){
+ sqlite3IdListDelete(db, pList);
+ return 0;
+ }
+ pList->a[i].zName = sqlite3NameFromToken(db, pToken);
+ if( IN_RENAME_OBJECT && pList->a[i].zName ){
+ sqlite3RenameTokenMap(pParse, (void*)pList->a[i].zName, pToken);
+ }
+ return pList;
+}
+
+/*
+** Delete an IdList.
+*/
+SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){
+ int i;
+ if( pList==0 ) return;
+ for(i=0; i<pList->nId; i++){
+ sqlite3DbFree(db, pList->a[i].zName);
+ }
+ sqlite3DbFree(db, pList->a);
+ sqlite3DbFreeNN(db, pList);
+}
+
+/*
+** Return the index in pList of the identifier named zId. Return -1
+** if not found.
+*/
+SQLITE_PRIVATE int sqlite3IdListIndex(IdList *pList, const char *zName){
+ int i;
+ if( pList==0 ) return -1;
+ for(i=0; i<pList->nId; i++){
+ if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i;
+ }
+ return -1;
+}
+
+/*
+** Maximum size of a SrcList object.
+** The SrcList object is used to represent the FROM clause of a
+** SELECT statement, and the query planner cannot deal with more
+** than 64 tables in a join. So any value larger than 64 here
+** is sufficient for most uses. Smaller values, like say 10, are
+** appropriate for small and memory-limited applications.
+*/
+#ifndef SQLITE_MAX_SRCLIST
+# define SQLITE_MAX_SRCLIST 200
+#endif
+
+/*
+** Expand the space allocated for the given SrcList object by
+** creating nExtra new slots beginning at iStart. iStart is zero based.
+** New slots are zeroed.
+**
+** For example, suppose a SrcList initially contains two entries: A,B.
+** To append 3 new entries onto the end, do this:
+**
+** sqlite3SrcListEnlarge(db, pSrclist, 3, 2);
+**
+** After the call above it would contain: A, B, nil, nil, nil.
+** If the iStart argument had been 1 instead of 2, then the result
+** would have been: A, nil, nil, nil, B. To prepend the new slots,
+** the iStart value would be 0. The result then would
+** be: nil, nil, nil, A, B.
+**
+** If a memory allocation fails or the SrcList becomes too large, leave
+** the original SrcList unchanged, return NULL, and leave an error message
+** in pParse.
+*/
+SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(
+ Parse *pParse, /* Parsing context into which errors are reported */
+ SrcList *pSrc, /* The SrcList to be enlarged */
+ int nExtra, /* Number of new slots to add to pSrc->a[] */
+ int iStart /* Index in pSrc->a[] of first new slot */
+){
+ int i;
+
+ /* Sanity checking on calling parameters */
+ assert( iStart>=0 );
+ assert( nExtra>=1 );
+ assert( pSrc!=0 );
+ assert( iStart<=pSrc->nSrc );
+
+ /* Allocate additional space if needed */
+ if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){
+ SrcList *pNew;
+ sqlite3_int64 nAlloc = 2*(sqlite3_int64)pSrc->nSrc+nExtra;
+ sqlite3 *db = pParse->db;
+
+ if( pSrc->nSrc+nExtra>=SQLITE_MAX_SRCLIST ){
+ sqlite3ErrorMsg(pParse, "too many FROM clause terms, max: %d",
+ SQLITE_MAX_SRCLIST);
+ return 0;
+ }
+ if( nAlloc>SQLITE_MAX_SRCLIST ) nAlloc = SQLITE_MAX_SRCLIST;
+ pNew = sqlite3DbRealloc(db, pSrc,
+ sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) );
+ if( pNew==0 ){
+ assert( db->mallocFailed );
+ return 0;
+ }
+ pSrc = pNew;
+ pSrc->nAlloc = nAlloc;
+ }
+
+ /* Move existing slots that come after the newly inserted slots
+ ** out of the way */
+ for(i=pSrc->nSrc-1; i>=iStart; i--){
+ pSrc->a[i+nExtra] = pSrc->a[i];
+ }
+ pSrc->nSrc += nExtra;
+
+ /* Zero the newly allocated slots */
+ memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra);
+ for(i=iStart; i<iStart+nExtra; i++){
+ pSrc->a[i].iCursor = -1;
+ }
+
+ /* Return a pointer to the enlarged SrcList */
+ return pSrc;
+}
+
+
+/*
+** Append a new table name to the given SrcList. Create a new SrcList if
+** need be. A new entry is created in the SrcList even if pTable is NULL.
+**
+** A SrcList is returned, or NULL if there is an OOM error or if the
+** SrcList grows to large. The returned
+** SrcList might be the same as the SrcList that was input or it might be
+** a new one. If an OOM error does occurs, then the prior value of pList
+** that is input to this routine is automatically freed.
+**
+** If pDatabase is not null, it means that the table has an optional
+** database name prefix. Like this: "database.table". The pDatabase
+** points to the table name and the pTable points to the database name.
+** The SrcList.a[].zName field is filled with the table name which might
+** come from pTable (if pDatabase is NULL) or from pDatabase.
+** SrcList.a[].zDatabase is filled with the database name from pTable,
+** or with NULL if no database is specified.
+**
+** In other words, if call like this:
+**
+** sqlite3SrcListAppend(D,A,B,0);
+**
+** Then B is a table name and the database name is unspecified. If called
+** like this:
+**
+** sqlite3SrcListAppend(D,A,B,C);
+**
+** Then C is the table name and B is the database name. If C is defined
+** then so is B. In other words, we never have a case where:
+**
+** sqlite3SrcListAppend(D,A,0,C);
+**
+** Both pTable and pDatabase are assumed to be quoted. They are dequoted
+** before being added to the SrcList.
+*/
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(
+ Parse *pParse, /* Parsing context, in which errors are reported */
+ SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */
+ Token *pTable, /* Table to append */
+ Token *pDatabase /* Database of the table */
+){
+ struct SrcList_item *pItem;
+ sqlite3 *db;
+ assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */
+ assert( pParse!=0 );
+ assert( pParse->db!=0 );
+ db = pParse->db;
+ if( pList==0 ){
+ pList = sqlite3DbMallocRawNN(pParse->db, sizeof(SrcList) );
+ if( pList==0 ) return 0;
+ pList->nAlloc = 1;
+ pList->nSrc = 1;
+ memset(&pList->a[0], 0, sizeof(pList->a[0]));
+ pList->a[0].iCursor = -1;
+ }else{
+ SrcList *pNew = sqlite3SrcListEnlarge(pParse, pList, 1, pList->nSrc);
+ if( pNew==0 ){
+ sqlite3SrcListDelete(db, pList);
+ return 0;
+ }else{
+ pList = pNew;
+ }
+ }
+ pItem = &pList->a[pList->nSrc-1];
+ if( pDatabase && pDatabase->z==0 ){
+ pDatabase = 0;
+ }
+ if( pDatabase ){
+ pItem->zName = sqlite3NameFromToken(db, pDatabase);
+ pItem->zDatabase = sqlite3NameFromToken(db, pTable);
+ }else{
+ pItem->zName = sqlite3NameFromToken(db, pTable);
+ pItem->zDatabase = 0;
+ }
+ return pList;
+}
+
+/*
+** Assign VdbeCursor index numbers to all tables in a SrcList
+*/
+SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){
+ int i;
+ struct SrcList_item *pItem;
+ assert(pList || pParse->db->mallocFailed );
+ if( pList ){
+ for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){
+ if( pItem->iCursor>=0 ) break;
+ pItem->iCursor = pParse->nTab++;
+ if( pItem->pSelect ){
+ sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc);
+ }
+ }
+ }
+}
+
+/*
+** Delete an entire SrcList including all its substructure.
+*/
+SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){
+ int i;
+ struct SrcList_item *pItem;
+ if( pList==0 ) return;
+ for(pItem=pList->a, i=0; i<pList->nSrc; i++, pItem++){
+ sqlite3DbFree(db, pItem->zDatabase);
+ sqlite3DbFree(db, pItem->zName);
+ sqlite3DbFree(db, pItem->zAlias);
+ if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy);
+ if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg);
+ sqlite3DeleteTable(db, pItem->pTab);
+ sqlite3SelectDelete(db, pItem->pSelect);
+ sqlite3ExprDelete(db, pItem->pOn);
+ sqlite3IdListDelete(db, pItem->pUsing);
+ }
+ sqlite3DbFreeNN(db, pList);
+}
+
+/*
+** This routine is called by the parser to add a new term to the
+** end of a growing FROM clause. The "p" parameter is the part of
+** the FROM clause that has already been constructed. "p" is NULL
+** if this is the first term of the FROM clause. pTable and pDatabase
+** are the name of the table and database named in the FROM clause term.
+** pDatabase is NULL if the database name qualifier is missing - the
+** usual case. If the term has an alias, then pAlias points to the
+** alias token. If the term is a subquery, then pSubquery is the
+** SELECT statement that the subquery encodes. The pTable and
+** pDatabase parameters are NULL for subqueries. The pOn and pUsing
+** parameters are the content of the ON and USING clauses.
+**
+** Return a new SrcList which encodes is the FROM with the new
+** term added.
+*/
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(
+ Parse *pParse, /* Parsing context */
+ SrcList *p, /* The left part of the FROM clause already seen */
+ Token *pTable, /* Name of the table to add to the FROM clause */
+ Token *pDatabase, /* Name of the database containing pTable */
+ Token *pAlias, /* The right-hand side of the AS subexpression */
+ Select *pSubquery, /* A subquery used in place of a table name */
+ Expr *pOn, /* The ON clause of a join */
+ IdList *pUsing /* The USING clause of a join */
+){
+ struct SrcList_item *pItem;
+ sqlite3 *db = pParse->db;
+ if( !p && (pOn || pUsing) ){
+ sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s",
+ (pOn ? "ON" : "USING")
+ );
+ goto append_from_error;
+ }
+ p = sqlite3SrcListAppend(pParse, p, pTable, pDatabase);
+ if( p==0 ){
+ goto append_from_error;
+ }
+ assert( p->nSrc>0 );
+ pItem = &p->a[p->nSrc-1];
+ assert( (pTable==0)==(pDatabase==0) );
+ assert( pItem->zName==0 || pDatabase!=0 );
+ if( IN_RENAME_OBJECT && pItem->zName ){
+ Token *pToken = (ALWAYS(pDatabase) && pDatabase->z) ? pDatabase : pTable;
+ sqlite3RenameTokenMap(pParse, pItem->zName, pToken);
+ }
+ assert( pAlias!=0 );
+ if( pAlias->n ){
+ pItem->zAlias = sqlite3NameFromToken(db, pAlias);
+ }
+ pItem->pSelect = pSubquery;
+ pItem->pOn = pOn;
+ pItem->pUsing = pUsing;
+ return p;
+
+ append_from_error:
+ assert( p==0 );
+ sqlite3ExprDelete(db, pOn);
+ sqlite3IdListDelete(db, pUsing);
+ sqlite3SelectDelete(db, pSubquery);
+ return 0;
+}
+
+/*
+** Add an INDEXED BY or NOT INDEXED clause to the most recently added
+** element of the source-list passed as the second argument.
+*/
+SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){
+ assert( pIndexedBy!=0 );
+ if( p && pIndexedBy->n>0 ){
+ struct SrcList_item *pItem;
+ assert( p->nSrc>0 );
+ pItem = &p->a[p->nSrc-1];
+ assert( pItem->fg.notIndexed==0 );
+ assert( pItem->fg.isIndexedBy==0 );
+ assert( pItem->fg.isTabFunc==0 );
+ if( pIndexedBy->n==1 && !pIndexedBy->z ){
+ /* A "NOT INDEXED" clause was supplied. See parse.y
+ ** construct "indexed_opt" for details. */
+ pItem->fg.notIndexed = 1;
+ }else{
+ pItem->u1.zIndexedBy = sqlite3NameFromToken(pParse->db, pIndexedBy);
+ pItem->fg.isIndexedBy = 1;
+ }
+ }
+}
+
+/*
+** Append the contents of SrcList p2 to SrcList p1 and return the resulting
+** SrcList. Or, if an error occurs, return NULL. In all cases, p1 and p2
+** are deleted by this function.
+*/
+SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, SrcList *p2){
+ assert( p1 && p1->nSrc==1 );
+ if( p2 ){
+ SrcList *pNew = sqlite3SrcListEnlarge(pParse, p1, p2->nSrc, 1);
+ if( pNew==0 ){
+ sqlite3SrcListDelete(pParse->db, p2);
+ }else{
+ p1 = pNew;
+ memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(struct SrcList_item));
+ sqlite3DbFree(pParse->db, p2);
+ }
+ }
+ return p1;
+}
+
+/*
+** Add the list of function arguments to the SrcList entry for a
+** table-valued-function.
+*/
+SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *pList){
+ if( p ){
+ struct SrcList_item *pItem = &p->a[p->nSrc-1];
+ assert( pItem->fg.notIndexed==0 );
+ assert( pItem->fg.isIndexedBy==0 );
+ assert( pItem->fg.isTabFunc==0 );
+ pItem->u1.pFuncArg = pList;
+ pItem->fg.isTabFunc = 1;
+ }else{
+ sqlite3ExprListDelete(pParse->db, pList);
+ }
+}
+
+/*
+** When building up a FROM clause in the parser, the join operator
+** is initially attached to the left operand. But the code generator
+** expects the join operator to be on the right operand. This routine
+** Shifts all join operators from left to right for an entire FROM
+** clause.
+**
+** Example: Suppose the join is like this:
+**
+** A natural cross join B
+**
+** The operator is "natural cross join". The A and B operands are stored
+** in p->a[0] and p->a[1], respectively. The parser initially stores the
+** operator with A. This routine shifts that operator over to B.
+*/
+SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){
+ if( p ){
+ int i;
+ for(i=p->nSrc-1; i>0; i--){
+ p->a[i].fg.jointype = p->a[i-1].fg.jointype;
+ }
+ p->a[0].fg.jointype = 0;
+ }
+}
+
+/*
+** Generate VDBE code for a BEGIN statement.
+*/
+SQLITE_PRIVATE void sqlite3BeginTransaction(Parse *pParse, int type){
+ sqlite3 *db;
+ Vdbe *v;
+ int i;
+
+ assert( pParse!=0 );
+ db = pParse->db;
+ assert( db!=0 );
+ if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){
+ return;
+ }
+ v = sqlite3GetVdbe(pParse);
+ if( !v ) return;
+ if( type!=TK_DEFERRED ){
+ for(i=0; i<db->nDb; i++){
+ sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1);
+ sqlite3VdbeUsesBtree(v, i);
+ }
+ }
+ sqlite3VdbeAddOp0(v, OP_AutoCommit);
+}
+
+/*
+** Generate VDBE code for a COMMIT or ROLLBACK statement.
+** Code for ROLLBACK is generated if eType==TK_ROLLBACK. Otherwise
+** code is generated for a COMMIT.
+*/
+SQLITE_PRIVATE void sqlite3EndTransaction(Parse *pParse, int eType){
+ Vdbe *v;
+ int isRollback;
+
+ assert( pParse!=0 );
+ assert( pParse->db!=0 );
+ assert( eType==TK_COMMIT || eType==TK_END || eType==TK_ROLLBACK );
+ isRollback = eType==TK_ROLLBACK;
+ if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION,
+ isRollback ? "ROLLBACK" : "COMMIT", 0, 0) ){
+ return;
+ }
+ v = sqlite3GetVdbe(pParse);
+ if( v ){
+ sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, isRollback);
+ }
+}
+
+/*
+** This function is called by the parser when it parses a command to create,
+** release or rollback an SQL savepoint.
+*/
+SQLITE_PRIVATE void sqlite3Savepoint(Parse *pParse, int op, Token *pName){
+ char *zName = sqlite3NameFromToken(pParse->db, pName);
+ if( zName ){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ static const char * const az[] = { "BEGIN", "RELEASE", "ROLLBACK" };
+ assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 );
+#endif
+ if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){
+ sqlite3DbFree(pParse->db, zName);
+ return;
+ }
+ sqlite3VdbeAddOp4(v, OP_Savepoint, op, 0, 0, zName, P4_DYNAMIC);
+ }
+}
+
+/*
+** Make sure the TEMP database is open and available for use. Return
+** the number of errors. Leave any error messages in the pParse structure.
+*/
+SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ if( db->aDb[1].pBt==0 && !pParse->explain ){
+ int rc;
+ Btree *pBt;
+ static const int flags =
+ SQLITE_OPEN_READWRITE |
+ SQLITE_OPEN_CREATE |
+ SQLITE_OPEN_EXCLUSIVE |
+ SQLITE_OPEN_DELETEONCLOSE |
+ SQLITE_OPEN_TEMP_DB;
+
+ rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pBt, 0, flags);
+ if( rc!=SQLITE_OK ){
+ sqlite3ErrorMsg(pParse, "unable to open a temporary database "
+ "file for storing temporary tables");
+ pParse->rc = rc;
+ return 1;
+ }
+ db->aDb[1].pBt = pBt;
+ assert( db->aDb[1].pSchema );
+ if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize, 0, 0) ){
+ sqlite3OomFault(db);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Record the fact that the schema cookie will need to be verified
+** for database iDb. The code to actually verify the schema cookie
+** will occur at the end of the top-level VDBE and will be generated
+** later, by sqlite3FinishCoding().
+*/
+SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+
+ assert( iDb>=0 && iDb<pParse->db->nDb );
+ assert( pParse->db->aDb[iDb].pBt!=0 || iDb==1 );
+ assert( iDb<SQLITE_MAX_ATTACHED+2 );
+ assert( sqlite3SchemaMutexHeld(pParse->db, iDb, 0) );
+ if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){
+ DbMaskSet(pToplevel->cookieMask, iDb);
+ if( !OMIT_TEMPDB && iDb==1 ){
+ sqlite3OpenTempDatabase(pToplevel);
+ }
+ }
+}
+
+/*
+** If argument zDb is NULL, then call sqlite3CodeVerifySchema() for each
+** attached database. Otherwise, invoke it for the database named zDb only.
+*/
+SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb){
+ sqlite3 *db = pParse->db;
+ int i;
+ for(i=0; i<db->nDb; i++){
+ Db *pDb = &db->aDb[i];
+ if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zDbSName)) ){
+ sqlite3CodeVerifySchema(pParse, i);
+ }
+ }
+}
+
+/*
+** Generate VDBE code that prepares for doing an operation that
+** might change the database.
+**
+** This routine starts a new transaction if we are not already within
+** a transaction. If we are already within a transaction, then a checkpoint
+** is set if the setStatement parameter is true. A checkpoint should
+** be set for operations that might fail (due to a constraint) part of
+** the way through and which will need to undo some writes without having to
+** rollback the whole transaction. For operations where all constraints
+** can be checked before any changes are made to the database, it is never
+** necessary to undo a write and the checkpoint should not be set.
+*/
+SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ sqlite3CodeVerifySchema(pParse, iDb);
+ DbMaskSet(pToplevel->writeMask, iDb);
+ pToplevel->isMultiWrite |= setStatement;
+}
+
+/*
+** Indicate that the statement currently under construction might write
+** more than one entry (example: deleting one row then inserting another,
+** inserting multiple rows in a table, or inserting a row and index entries.)
+** If an abort occurs after some of these writes have completed, then it will
+** be necessary to undo the completed writes.
+*/
+SQLITE_PRIVATE void sqlite3MultiWrite(Parse *pParse){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ pToplevel->isMultiWrite = 1;
+}
+
+/*
+** The code generator calls this routine if is discovers that it is
+** possible to abort a statement prior to completion. In order to
+** perform this abort without corrupting the database, we need to make
+** sure that the statement is protected by a statement transaction.
+**
+** Technically, we only need to set the mayAbort flag if the
+** isMultiWrite flag was previously set. There is a time dependency
+** such that the abort must occur after the multiwrite. This makes
+** some statements involving the REPLACE conflict resolution algorithm
+** go a little faster. But taking advantage of this time dependency
+** makes it more difficult to prove that the code is correct (in
+** particular, it prevents us from writing an effective
+** implementation of sqlite3AssertMayAbort()) and so we have chosen
+** to take the safe route and skip the optimization.
+*/
+SQLITE_PRIVATE void sqlite3MayAbort(Parse *pParse){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ pToplevel->mayAbort = 1;
+}
+
+/*
+** Code an OP_Halt that causes the vdbe to return an SQLITE_CONSTRAINT
+** error. The onError parameter determines which (if any) of the statement
+** and/or current transaction is rolled back.
+*/
+SQLITE_PRIVATE void sqlite3HaltConstraint(
+ Parse *pParse, /* Parsing context */
+ int errCode, /* extended error code */
+ int onError, /* Constraint type */
+ char *p4, /* Error message */
+ i8 p4type, /* P4_STATIC or P4_TRANSIENT */
+ u8 p5Errmsg /* P5_ErrMsg type */
+){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ assert( (errCode&0xff)==SQLITE_CONSTRAINT || pParse->nested );
+ if( onError==OE_Abort ){
+ sqlite3MayAbort(pParse);
+ }
+ sqlite3VdbeAddOp4(v, OP_Halt, errCode, onError, 0, p4, p4type);
+ sqlite3VdbeChangeP5(v, p5Errmsg);
+}
+
+/*
+** Code an OP_Halt due to UNIQUE or PRIMARY KEY constraint violation.
+*/
+SQLITE_PRIVATE void sqlite3UniqueConstraint(
+ Parse *pParse, /* Parsing context */
+ int onError, /* Constraint type */
+ Index *pIdx /* The index that triggers the constraint */
+){
+ char *zErr;
+ int j;
+ StrAccum errMsg;
+ Table *pTab = pIdx->pTable;
+
+ sqlite3StrAccumInit(&errMsg, pParse->db, 0, 0,
+ pParse->db->aLimit[SQLITE_LIMIT_LENGTH]);
+ if( pIdx->aColExpr ){
+ sqlite3_str_appendf(&errMsg, "index '%q'", pIdx->zName);
+ }else{
+ for(j=0; j<pIdx->nKeyCol; j++){
+ char *zCol;
+ assert( pIdx->aiColumn[j]>=0 );
+ zCol = pTab->aCol[pIdx->aiColumn[j]].zName;
+ if( j ) sqlite3_str_append(&errMsg, ", ", 2);
+ sqlite3_str_appendall(&errMsg, pTab->zName);
+ sqlite3_str_append(&errMsg, ".", 1);
+ sqlite3_str_appendall(&errMsg, zCol);
+ }
+ }
+ zErr = sqlite3StrAccumFinish(&errMsg);
+ sqlite3HaltConstraint(pParse,
+ IsPrimaryKeyIndex(pIdx) ? SQLITE_CONSTRAINT_PRIMARYKEY
+ : SQLITE_CONSTRAINT_UNIQUE,
+ onError, zErr, P4_DYNAMIC, P5_ConstraintUnique);
+}
+
+
+/*
+** Code an OP_Halt due to non-unique rowid.
+*/
+SQLITE_PRIVATE void sqlite3RowidConstraint(
+ Parse *pParse, /* Parsing context */
+ int onError, /* Conflict resolution algorithm */
+ Table *pTab /* The table with the non-unique rowid */
+){
+ char *zMsg;
+ int rc;
+ if( pTab->iPKey>=0 ){
+ zMsg = sqlite3MPrintf(pParse->db, "%s.%s", pTab->zName,
+ pTab->aCol[pTab->iPKey].zName);
+ rc = SQLITE_CONSTRAINT_PRIMARYKEY;
+ }else{
+ zMsg = sqlite3MPrintf(pParse->db, "%s.rowid", pTab->zName);
+ rc = SQLITE_CONSTRAINT_ROWID;
+ }
+ sqlite3HaltConstraint(pParse, rc, onError, zMsg, P4_DYNAMIC,
+ P5_ConstraintUnique);
+}
+
+/*
+** Check to see if pIndex uses the collating sequence pColl. Return
+** true if it does and false if it does not.
+*/
+#ifndef SQLITE_OMIT_REINDEX
+static int collationMatch(const char *zColl, Index *pIndex){
+ int i;
+ assert( zColl!=0 );
+ for(i=0; i<pIndex->nColumn; i++){
+ const char *z = pIndex->azColl[i];
+ assert( z!=0 || pIndex->aiColumn[i]<0 );
+ if( pIndex->aiColumn[i]>=0 && 0==sqlite3StrICmp(z, zColl) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/*
+** Recompute all indices of pTab that use the collating sequence pColl.
+** If pColl==0 then recompute all indices of pTab.
+*/
+#ifndef SQLITE_OMIT_REINDEX
+static void reindexTable(Parse *pParse, Table *pTab, char const *zColl){
+ if( !IsVirtual(pTab) ){
+ Index *pIndex; /* An index associated with pTab */
+
+ for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){
+ if( zColl==0 || collationMatch(zColl, pIndex) ){
+ int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ sqlite3RefillIndex(pParse, pIndex, -1);
+ }
+ }
+ }
+}
+#endif
+
+/*
+** Recompute all indices of all tables in all databases where the
+** indices use the collating sequence pColl. If pColl==0 then recompute
+** all indices everywhere.
+*/
+#ifndef SQLITE_OMIT_REINDEX
+static void reindexDatabases(Parse *pParse, char const *zColl){
+ Db *pDb; /* A single database */
+ int iDb; /* The database index number */
+ sqlite3 *db = pParse->db; /* The database connection */
+ HashElem *k; /* For looping over tables in pDb */
+ Table *pTab; /* A table in the database */
+
+ assert( sqlite3BtreeHoldsAllMutexes(db) ); /* Needed for schema access */
+ for(iDb=0, pDb=db->aDb; iDb<db->nDb; iDb++, pDb++){
+ assert( pDb!=0 );
+ for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){
+ pTab = (Table*)sqliteHashData(k);
+ reindexTable(pParse, pTab, zColl);
+ }
+ }
+}
+#endif
+
+/*
+** Generate code for the REINDEX command.
+**
+** REINDEX -- 1
+** REINDEX <collation> -- 2
+** REINDEX ?<database>.?<tablename> -- 3
+** REINDEX ?<database>.?<indexname> -- 4
+**
+** Form 1 causes all indices in all attached databases to be rebuilt.
+** Form 2 rebuilds all indices in all databases that use the named
+** collating function. Forms 3 and 4 rebuild the named index or all
+** indices associated with the named table.
+*/
+#ifndef SQLITE_OMIT_REINDEX
+SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
+ CollSeq *pColl; /* Collating sequence to be reindexed, or NULL */
+ char *z; /* Name of a table or index */
+ const char *zDb; /* Name of the database */
+ Table *pTab; /* A table in the database */
+ Index *pIndex; /* An index associated with pTab */
+ int iDb; /* The database index number */
+ sqlite3 *db = pParse->db; /* The database connection */
+ Token *pObjName; /* Name of the table or index to be reindexed */
+
+ /* Read the database schema. If an error occurs, leave an error message
+ ** and code in pParse and return NULL. */
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ return;
+ }
+
+ if( pName1==0 ){
+ reindexDatabases(pParse, 0);
+ return;
+ }else if( NEVER(pName2==0) || pName2->z==0 ){
+ char *zColl;
+ assert( pName1->z );
+ zColl = sqlite3NameFromToken(pParse->db, pName1);
+ if( !zColl ) return;
+ pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0);
+ if( pColl ){
+ reindexDatabases(pParse, zColl);
+ sqlite3DbFree(db, zColl);
+ return;
+ }
+ sqlite3DbFree(db, zColl);
+ }
+ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName);
+ if( iDb<0 ) return;
+ z = sqlite3NameFromToken(db, pObjName);
+ if( z==0 ) return;
+ zDb = db->aDb[iDb].zDbSName;
+ pTab = sqlite3FindTable(db, z, zDb);
+ if( pTab ){
+ reindexTable(pParse, pTab, 0);
+ sqlite3DbFree(db, z);
+ return;
+ }
+ pIndex = sqlite3FindIndex(db, z, zDb);
+ sqlite3DbFree(db, z);
+ if( pIndex ){
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ sqlite3RefillIndex(pParse, pIndex, -1);
+ return;
+ }
+ sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed");
+}
+#endif
+
+/*
+** Return a KeyInfo structure that is appropriate for the given Index.
+**
+** The caller should invoke sqlite3KeyInfoUnref() on the returned object
+** when it has finished using it.
+*/
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){
+ int i;
+ int nCol = pIdx->nColumn;
+ int nKey = pIdx->nKeyCol;
+ KeyInfo *pKey;
+ if( pParse->nErr ) return 0;
+ if( pIdx->uniqNotNull ){
+ pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey);
+ }else{
+ pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0);
+ }
+ if( pKey ){
+ assert( sqlite3KeyInfoIsWriteable(pKey) );
+ for(i=0; i<nCol; i++){
+ const char *zColl = pIdx->azColl[i];
+ pKey->aColl[i] = zColl==sqlite3StrBINARY ? 0 :
+ sqlite3LocateCollSeq(pParse, zColl);
+ pKey->aSortFlags[i] = pIdx->aSortOrder[i];
+ assert( 0==(pKey->aSortFlags[i] & KEYINFO_ORDER_BIGNULL) );
+ }
+ if( pParse->nErr ){
+ assert( pParse->rc==SQLITE_ERROR_MISSING_COLLSEQ );
+ if( pIdx->bNoQuery==0 ){
+ /* Deactivate the index because it contains an unknown collating
+ ** sequence. The only way to reactive the index is to reload the
+ ** schema. Adding the missing collating sequence later does not
+ ** reactive the index. The application had the chance to register
+ ** the missing index using the collation-needed callback. For
+ ** simplicity, SQLite will not give the application a second chance.
+ */
+ pIdx->bNoQuery = 1;
+ pParse->rc = SQLITE_ERROR_RETRY;
+ }
+ sqlite3KeyInfoUnref(pKey);
+ pKey = 0;
+ }
+ }
+ return pKey;
+}
+
+#ifndef SQLITE_OMIT_CTE
+/*
+** This routine is invoked once per CTE by the parser while parsing a
+** WITH clause.
+*/
+SQLITE_PRIVATE With *sqlite3WithAdd(
+ Parse *pParse, /* Parsing context */
+ With *pWith, /* Existing WITH clause, or NULL */
+ Token *pName, /* Name of the common-table */
+ ExprList *pArglist, /* Optional column name list for the table */
+ Select *pQuery /* Query used to initialize the table */
+){
+ sqlite3 *db = pParse->db;
+ With *pNew;
+ char *zName;
+
+ /* Check that the CTE name is unique within this WITH clause. If
+ ** not, store an error in the Parse structure. */
+ zName = sqlite3NameFromToken(pParse->db, pName);
+ if( zName && pWith ){
+ int i;
+ for(i=0; i<pWith->nCte; i++){
+ if( sqlite3StrICmp(zName, pWith->a[i].zName)==0 ){
+ sqlite3ErrorMsg(pParse, "duplicate WITH table name: %s", zName);
+ }
+ }
+ }
+
+ if( pWith ){
+ sqlite3_int64 nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte);
+ pNew = sqlite3DbRealloc(db, pWith, nByte);
+ }else{
+ pNew = sqlite3DbMallocZero(db, sizeof(*pWith));
+ }
+ assert( (pNew!=0 && zName!=0) || db->mallocFailed );
+
+ if( db->mallocFailed ){
+ sqlite3ExprListDelete(db, pArglist);
+ sqlite3SelectDelete(db, pQuery);
+ sqlite3DbFree(db, zName);
+ pNew = pWith;
+ }else{
+ pNew->a[pNew->nCte].pSelect = pQuery;
+ pNew->a[pNew->nCte].pCols = pArglist;
+ pNew->a[pNew->nCte].zName = zName;
+ pNew->a[pNew->nCte].zCteErr = 0;
+ pNew->nCte++;
+ }
+
+ return pNew;
+}
+
+/*
+** Free the contents of the With object passed as the second argument.
+*/
+SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){
+ if( pWith ){
+ int i;
+ for(i=0; i<pWith->nCte; i++){
+ struct Cte *pCte = &pWith->a[i];
+ sqlite3ExprListDelete(db, pCte->pCols);
+ sqlite3SelectDelete(db, pCte->pSelect);
+ sqlite3DbFree(db, pCte->zName);
+ }
+ sqlite3DbFree(db, pWith);
+ }
+}
+#endif /* !defined(SQLITE_OMIT_CTE) */
+
+/************** End of build.c ***********************************************/
+/************** Begin file callback.c ****************************************/
+/*
+** 2005 May 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains functions used to access the internal hash tables
+** of user defined functions and collation sequences.
+*/
+
+/* #include "sqliteInt.h" */
+
+/*
+** Invoke the 'collation needed' callback to request a collation sequence
+** in the encoding enc of name zName, length nName.
+*/
+static void callCollNeeded(sqlite3 *db, int enc, const char *zName){
+ assert( !db->xCollNeeded || !db->xCollNeeded16 );
+ if( db->xCollNeeded ){
+ char *zExternal = sqlite3DbStrDup(db, zName);
+ if( !zExternal ) return;
+ db->xCollNeeded(db->pCollNeededArg, db, enc, zExternal);
+ sqlite3DbFree(db, zExternal);
+ }
+#ifndef SQLITE_OMIT_UTF16
+ if( db->xCollNeeded16 ){
+ char const *zExternal;
+ sqlite3_value *pTmp = sqlite3ValueNew(db);
+ sqlite3ValueSetStr(pTmp, -1, zName, SQLITE_UTF8, SQLITE_STATIC);
+ zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE);
+ if( zExternal ){
+ db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal);
+ }
+ sqlite3ValueFree(pTmp);
+ }
+#endif
+}
+
+/*
+** This routine is called if the collation factory fails to deliver a
+** collation function in the best encoding but there may be other versions
+** of this collation function (for other text encodings) available. Use one
+** of these instead if they exist. Avoid a UTF-8 <-> UTF-16 conversion if
+** possible.
+*/
+static int synthCollSeq(sqlite3 *db, CollSeq *pColl){
+ CollSeq *pColl2;
+ char *z = pColl->zName;
+ int i;
+ static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 };
+ for(i=0; i<3; i++){
+ pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, 0);
+ if( pColl2->xCmp!=0 ){
+ memcpy(pColl, pColl2, sizeof(CollSeq));
+ pColl->xDel = 0; /* Do not copy the destructor */
+ return SQLITE_OK;
+ }
+ }
+ return SQLITE_ERROR;
+}
+
+/*
+** This routine is called on a collation sequence before it is used to
+** check that it is defined. An undefined collation sequence exists when
+** a database is loaded that contains references to collation sequences
+** that have not been defined by sqlite3_create_collation() etc.
+**
+** If required, this routine calls the 'collation needed' callback to
+** request a definition of the collating sequence. If this doesn't work,
+** an equivalent collating sequence that uses a text encoding different
+** from the main database is substituted, if one is available.
+*/
+SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){
+ if( pColl && pColl->xCmp==0 ){
+ const char *zName = pColl->zName;
+ sqlite3 *db = pParse->db;
+ CollSeq *p = sqlite3GetCollSeq(pParse, ENC(db), pColl, zName);
+ if( !p ){
+ return SQLITE_ERROR;
+ }
+ assert( p==pColl );
+ }
+ return SQLITE_OK;
+}
+
+
+
+/*
+** Locate and return an entry from the db.aCollSeq hash table. If the entry
+** specified by zName and nName is not found and parameter 'create' is
+** true, then create a new entry. Otherwise return NULL.
+**
+** Each pointer stored in the sqlite3.aCollSeq hash table contains an
+** array of three CollSeq structures. The first is the collation sequence
+** preferred for UTF-8, the second UTF-16le, and the third UTF-16be.
+**
+** Stored immediately after the three collation sequences is a copy of
+** the collation sequence name. A pointer to this string is stored in
+** each collation sequence structure.
+*/
+static CollSeq *findCollSeqEntry(
+ sqlite3 *db, /* Database connection */
+ const char *zName, /* Name of the collating sequence */
+ int create /* Create a new entry if true */
+){
+ CollSeq *pColl;
+ pColl = sqlite3HashFind(&db->aCollSeq, zName);
+
+ if( 0==pColl && create ){
+ int nName = sqlite3Strlen30(zName) + 1;
+ pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName);
+ if( pColl ){
+ CollSeq *pDel = 0;
+ pColl[0].zName = (char*)&pColl[3];
+ pColl[0].enc = SQLITE_UTF8;
+ pColl[1].zName = (char*)&pColl[3];
+ pColl[1].enc = SQLITE_UTF16LE;
+ pColl[2].zName = (char*)&pColl[3];
+ pColl[2].enc = SQLITE_UTF16BE;
+ memcpy(pColl[0].zName, zName, nName);
+ pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, pColl);
+
+ /* If a malloc() failure occurred in sqlite3HashInsert(), it will
+ ** return the pColl pointer to be deleted (because it wasn't added
+ ** to the hash table).
+ */
+ assert( pDel==0 || pDel==pColl );
+ if( pDel!=0 ){
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, pDel);
+ pColl = 0;
+ }
+ }
+ }
+ return pColl;
+}
+
+/*
+** Parameter zName points to a UTF-8 encoded string nName bytes long.
+** Return the CollSeq* pointer for the collation sequence named zName
+** for the encoding 'enc' from the database 'db'.
+**
+** If the entry specified is not found and 'create' is true, then create a
+** new entry. Otherwise return NULL.
+**
+** A separate function sqlite3LocateCollSeq() is a wrapper around
+** this routine. sqlite3LocateCollSeq() invokes the collation factory
+** if necessary and generates an error message if the collating sequence
+** cannot be found.
+**
+** See also: sqlite3LocateCollSeq(), sqlite3GetCollSeq()
+*/
+SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(
+ sqlite3 *db, /* Database connection to search */
+ u8 enc, /* Desired text encoding */
+ const char *zName, /* Name of the collating sequence. Might be NULL */
+ int create /* True to create CollSeq if doesn't already exist */
+){
+ CollSeq *pColl;
+ assert( SQLITE_UTF8==1 && SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 );
+ assert( enc>=SQLITE_UTF8 && enc<=SQLITE_UTF16BE );
+ if( zName ){
+ pColl = findCollSeqEntry(db, zName, create);
+ if( pColl ) pColl += enc-1;
+ }else{
+ pColl = db->pDfltColl;
+ }
+ return pColl;
+}
+
+/*
+** Change the text encoding for a database connection. This means that
+** the pDfltColl must change as well.
+*/
+SQLITE_PRIVATE void sqlite3SetTextEncoding(sqlite3 *db, u8 enc){
+ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE );
+ db->enc = enc;
+ /* EVIDENCE-OF: R-08308-17224 The default collating function for all
+ ** strings is BINARY.
+ */
+ db->pDfltColl = sqlite3FindCollSeq(db, enc, sqlite3StrBINARY, 0);
+}
+
+/*
+** This function is responsible for invoking the collation factory callback
+** or substituting a collation sequence of a different encoding when the
+** requested collation sequence is not available in the desired encoding.
+**
+** If it is not NULL, then pColl must point to the database native encoding
+** collation sequence with name zName, length nName.
+**
+** The return value is either the collation sequence to be used in database
+** db for collation type name zName, length nName, or NULL, if no collation
+** sequence can be found. If no collation is found, leave an error message.
+**
+** See also: sqlite3LocateCollSeq(), sqlite3FindCollSeq()
+*/
+SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(
+ Parse *pParse, /* Parsing context */
+ u8 enc, /* The desired encoding for the collating sequence */
+ CollSeq *pColl, /* Collating sequence with native encoding, or NULL */
+ const char *zName /* Collating sequence name */
+){
+ CollSeq *p;
+ sqlite3 *db = pParse->db;
+
+ p = pColl;
+ if( !p ){
+ p = sqlite3FindCollSeq(db, enc, zName, 0);
+ }
+ if( !p || !p->xCmp ){
+ /* No collation sequence of this type for this encoding is registered.
+ ** Call the collation factory to see if it can supply us with one.
+ */
+ callCollNeeded(db, enc, zName);
+ p = sqlite3FindCollSeq(db, enc, zName, 0);
+ }
+ if( p && !p->xCmp && synthCollSeq(db, p) ){
+ p = 0;
+ }
+ assert( !p || p->xCmp );
+ if( p==0 ){
+ sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName);
+ pParse->rc = SQLITE_ERROR_MISSING_COLLSEQ;
+ }
+ return p;
+}
+
+/*
+** This function returns the collation sequence for database native text
+** encoding identified by the string zName.
+**
+** If the requested collation sequence is not available, or not available
+** in the database native encoding, the collation factory is invoked to
+** request it. If the collation factory does not supply such a sequence,
+** and the sequence is available in another text encoding, then that is
+** returned instead.
+**
+** If no versions of the requested collations sequence are available, or
+** another error occurs, NULL is returned and an error message written into
+** pParse.
+**
+** This routine is a wrapper around sqlite3FindCollSeq(). This routine
+** invokes the collation factory if the named collation cannot be found
+** and generates an error message.
+**
+** See also: sqlite3FindCollSeq(), sqlite3GetCollSeq()
+*/
+SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){
+ sqlite3 *db = pParse->db;
+ u8 enc = ENC(db);
+ u8 initbusy = db->init.busy;
+ CollSeq *pColl;
+
+ pColl = sqlite3FindCollSeq(db, enc, zName, initbusy);
+ if( !initbusy && (!pColl || !pColl->xCmp) ){
+ pColl = sqlite3GetCollSeq(pParse, enc, pColl, zName);
+ }
+
+ return pColl;
+}
+
+/* During the search for the best function definition, this procedure
+** is called to test how well the function passed as the first argument
+** matches the request for a function with nArg arguments in a system
+** that uses encoding enc. The value returned indicates how well the
+** request is matched. A higher value indicates a better match.
+**
+** If nArg is -1 that means to only return a match (non-zero) if p->nArg
+** is also -1. In other words, we are searching for a function that
+** takes a variable number of arguments.
+**
+** If nArg is -2 that means that we are searching for any function
+** regardless of the number of arguments it uses, so return a positive
+** match score for any
+**
+** The returned value is always between 0 and 6, as follows:
+**
+** 0: Not a match.
+** 1: UTF8/16 conversion required and function takes any number of arguments.
+** 2: UTF16 byte order change required and function takes any number of args.
+** 3: encoding matches and function takes any number of arguments
+** 4: UTF8/16 conversion required - argument count matches exactly
+** 5: UTF16 byte order conversion required - argument count matches exactly
+** 6: Perfect match: encoding and argument count match exactly.
+**
+** If nArg==(-2) then any function with a non-null xSFunc is
+** a perfect match and any function with xSFunc NULL is
+** a non-match.
+*/
+#define FUNC_PERFECT_MATCH 6 /* The score for a perfect match */
+static int matchQuality(
+ FuncDef *p, /* The function we are evaluating for match quality */
+ int nArg, /* Desired number of arguments. (-1)==any */
+ u8 enc /* Desired text encoding */
+){
+ int match;
+ assert( p->nArg>=-1 );
+
+ /* Wrong number of arguments means "no match" */
+ if( p->nArg!=nArg ){
+ if( nArg==(-2) ) return (p->xSFunc==0) ? 0 : FUNC_PERFECT_MATCH;
+ if( p->nArg>=0 ) return 0;
+ }
+
+ /* Give a better score to a function with a specific number of arguments
+ ** than to function that accepts any number of arguments. */
+ if( p->nArg==nArg ){
+ match = 4;
+ }else{
+ match = 1;
+ }
+
+ /* Bonus points if the text encoding matches */
+ if( enc==(p->funcFlags & SQLITE_FUNC_ENCMASK) ){
+ match += 2; /* Exact encoding match */
+ }else if( (enc & p->funcFlags & 2)!=0 ){
+ match += 1; /* Both are UTF16, but with different byte orders */
+ }
+
+ return match;
+}
+
+/*
+** Search a FuncDefHash for a function with the given name. Return
+** a pointer to the matching FuncDef if found, or 0 if there is no match.
+*/
+SQLITE_PRIVATE FuncDef *sqlite3FunctionSearch(
+ int h, /* Hash of the name */
+ const char *zFunc /* Name of function */
+){
+ FuncDef *p;
+ for(p=sqlite3BuiltinFunctions.a[h]; p; p=p->u.pHash){
+ if( sqlite3StrICmp(p->zName, zFunc)==0 ){
+ return p;
+ }
+ }
+ return 0;
+}
+
+/*
+** Insert a new FuncDef into a FuncDefHash hash table.
+*/
+SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(
+ FuncDef *aDef, /* List of global functions to be inserted */
+ int nDef /* Length of the apDef[] list */
+){
+ int i;
+ for(i=0; i<nDef; i++){
+ FuncDef *pOther;
+ const char *zName = aDef[i].zName;
+ int nName = sqlite3Strlen30(zName);
+ int h = SQLITE_FUNC_HASH(zName[0], nName);
+ assert( zName[0]>='a' && zName[0]<='z' );
+ pOther = sqlite3FunctionSearch(h, zName);
+ if( pOther ){
+ assert( pOther!=&aDef[i] && pOther->pNext!=&aDef[i] );
+ aDef[i].pNext = pOther->pNext;
+ pOther->pNext = &aDef[i];
+ }else{
+ aDef[i].pNext = 0;
+ aDef[i].u.pHash = sqlite3BuiltinFunctions.a[h];
+ sqlite3BuiltinFunctions.a[h] = &aDef[i];
+ }
+ }
+}
+
+
+
+/*
+** Locate a user function given a name, a number of arguments and a flag
+** indicating whether the function prefers UTF-16 over UTF-8. Return a
+** pointer to the FuncDef structure that defines that function, or return
+** NULL if the function does not exist.
+**
+** If the createFlag argument is true, then a new (blank) FuncDef
+** structure is created and liked into the "db" structure if a
+** no matching function previously existed.
+**
+** If nArg is -2, then the first valid function found is returned. A
+** function is valid if xSFunc is non-zero. The nArg==(-2)
+** case is used to see if zName is a valid function name for some number
+** of arguments. If nArg is -2, then createFlag must be 0.
+**
+** If createFlag is false, then a function with the required name and
+** number of arguments may be returned even if the eTextRep flag does not
+** match that requested.
+*/
+SQLITE_PRIVATE FuncDef *sqlite3FindFunction(
+ sqlite3 *db, /* An open database */
+ const char *zName, /* Name of the function. zero-terminated */
+ int nArg, /* Number of arguments. -1 means any number */
+ u8 enc, /* Preferred text encoding */
+ u8 createFlag /* Create new entry if true and does not otherwise exist */
+){
+ FuncDef *p; /* Iterator variable */
+ FuncDef *pBest = 0; /* Best match found so far */
+ int bestScore = 0; /* Score of best match */
+ int h; /* Hash value */
+ int nName; /* Length of the name */
+
+ assert( nArg>=(-2) );
+ assert( nArg>=(-1) || createFlag==0 );
+ nName = sqlite3Strlen30(zName);
+
+ /* First search for a match amongst the application-defined functions.
+ */
+ p = (FuncDef*)sqlite3HashFind(&db->aFunc, zName);
+ while( p ){
+ int score = matchQuality(p, nArg, enc);
+ if( score>bestScore ){
+ pBest = p;
+ bestScore = score;
+ }
+ p = p->pNext;
+ }
+
+ /* If no match is found, search the built-in functions.
+ **
+ ** If the DBFLAG_PreferBuiltin flag is set, then search the built-in
+ ** functions even if a prior app-defined function was found. And give
+ ** priority to built-in functions.
+ **
+ ** Except, if createFlag is true, that means that we are trying to
+ ** install a new function. Whatever FuncDef structure is returned it will
+ ** have fields overwritten with new information appropriate for the
+ ** new function. But the FuncDefs for built-in functions are read-only.
+ ** So we must not search for built-ins when creating a new function.
+ */
+ if( !createFlag && (pBest==0 || (db->mDbFlags & DBFLAG_PreferBuiltin)!=0) ){
+ bestScore = 0;
+ h = SQLITE_FUNC_HASH(sqlite3UpperToLower[(u8)zName[0]], nName);
+ p = sqlite3FunctionSearch(h, zName);
+ while( p ){
+ int score = matchQuality(p, nArg, enc);
+ if( score>bestScore ){
+ pBest = p;
+ bestScore = score;
+ }
+ p = p->pNext;
+ }
+ }
+
+ /* If the createFlag parameter is true and the search did not reveal an
+ ** exact match for the name, number of arguments and encoding, then add a
+ ** new entry to the hash table and return it.
+ */
+ if( createFlag && bestScore<FUNC_PERFECT_MATCH &&
+ (pBest = sqlite3DbMallocZero(db, sizeof(*pBest)+nName+1))!=0 ){
+ FuncDef *pOther;
+ u8 *z;
+ pBest->zName = (const char*)&pBest[1];
+ pBest->nArg = (u16)nArg;
+ pBest->funcFlags = enc;
+ memcpy((char*)&pBest[1], zName, nName+1);
+ for(z=(u8*)pBest->zName; *z; z++) *z = sqlite3UpperToLower[*z];
+ pOther = (FuncDef*)sqlite3HashInsert(&db->aFunc, pBest->zName, pBest);
+ if( pOther==pBest ){
+ sqlite3DbFree(db, pBest);
+ sqlite3OomFault(db);
+ return 0;
+ }else{
+ pBest->pNext = pOther;
+ }
+ }
+
+ if( pBest && (pBest->xSFunc || createFlag) ){
+ return pBest;
+ }
+ return 0;
+}
+
+/*
+** Free all resources held by the schema structure. The void* argument points
+** at a Schema struct. This function does not call sqlite3DbFree(db, ) on the
+** pointer itself, it just cleans up subsidiary resources (i.e. the contents
+** of the schema hash tables).
+**
+** The Schema.cache_size variable is not cleared.
+*/
+SQLITE_PRIVATE void sqlite3SchemaClear(void *p){
+ Hash temp1;
+ Hash temp2;
+ HashElem *pElem;
+ Schema *pSchema = (Schema *)p;
+
+ temp1 = pSchema->tblHash;
+ temp2 = pSchema->trigHash;
+ sqlite3HashInit(&pSchema->trigHash);
+ sqlite3HashClear(&pSchema->idxHash);
+ for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){
+ sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem));
+ }
+ sqlite3HashClear(&temp2);
+ sqlite3HashInit(&pSchema->tblHash);
+ for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){
+ Table *pTab = sqliteHashData(pElem);
+ sqlite3DeleteTable(0, pTab);
+ }
+ sqlite3HashClear(&temp1);
+ sqlite3HashClear(&pSchema->fkeyHash);
+ pSchema->pSeqTab = 0;
+ if( pSchema->schemaFlags & DB_SchemaLoaded ){
+ pSchema->iGeneration++;
+ }
+ pSchema->schemaFlags &= ~(DB_SchemaLoaded|DB_ResetWanted);
+}
+
+/*
+** Find and return the schema associated with a BTree. Create
+** a new one if necessary.
+*/
+SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){
+ Schema * p;
+ if( pBt ){
+ p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear);
+ }else{
+ p = (Schema *)sqlite3DbMallocZero(0, sizeof(Schema));
+ }
+ if( !p ){
+ sqlite3OomFault(db);
+ }else if ( 0==p->file_format ){
+ sqlite3HashInit(&p->tblHash);
+ sqlite3HashInit(&p->idxHash);
+ sqlite3HashInit(&p->trigHash);
+ sqlite3HashInit(&p->fkeyHash);
+ p->enc = SQLITE_UTF8;
+ }
+ return p;
+}
+
+/************** End of callback.c ********************************************/
+/************** Begin file delete.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** in order to generate code for DELETE FROM statements.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** While a SrcList can in general represent multiple tables and subqueries
+** (as in the FROM clause of a SELECT statement) in this case it contains
+** the name of a single table, as one might find in an INSERT, DELETE,
+** or UPDATE statement. Look up that table in the symbol table and
+** return a pointer. Set an error message and return NULL if the table
+** name is not found or if any other error occurs.
+**
+** The following fields are initialized appropriate in pSrc:
+**
+** pSrc->a[0].pTab Pointer to the Table object
+** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one
+**
+*/
+SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){
+ struct SrcList_item *pItem = pSrc->a;
+ Table *pTab;
+ assert( pItem && pSrc->nSrc>=1 );
+ pTab = sqlite3LocateTableItem(pParse, 0, pItem);
+ sqlite3DeleteTable(pParse->db, pItem->pTab);
+ pItem->pTab = pTab;
+ if( pTab ){
+ pTab->nTabRef++;
+ }
+ if( sqlite3IndexedByLookup(pParse, pItem) ){
+ pTab = 0;
+ }
+ return pTab;
+}
+
+/* Return true if table pTab is read-only.
+**
+** A table is read-only if any of the following are true:
+**
+** 1) It is a virtual table and no implementation of the xUpdate method
+** has been provided
+**
+** 2) It is a system table (i.e. sqlite_schema), this call is not
+** part of a nested parse and writable_schema pragma has not
+** been specified
+**
+** 3) The table is a shadow table, the database connection is in
+** defensive mode, and the current sqlite3_prepare()
+** is for a top-level SQL statement.
+*/
+static int tabIsReadOnly(Parse *pParse, Table *pTab){
+ sqlite3 *db;
+ if( IsVirtual(pTab) ){
+ return sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0;
+ }
+ if( (pTab->tabFlags & (TF_Readonly|TF_Shadow))==0 ) return 0;
+ db = pParse->db;
+ if( (pTab->tabFlags & TF_Readonly)!=0 ){
+ return sqlite3WritableSchema(db)==0 && pParse->nested==0;
+ }
+ assert( pTab->tabFlags & TF_Shadow );
+ return sqlite3ReadOnlyShadowTables(db);
+}
+
+/*
+** Check to make sure the given table is writable. If it is not
+** writable, generate an error message and return 1. If it is
+** writable return 0;
+*/
+SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){
+ if( tabIsReadOnly(pParse, pTab) ){
+ sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName);
+ return 1;
+ }
+#ifndef SQLITE_OMIT_VIEW
+ if( !viewOk && pTab->pSelect ){
+ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+
+#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
+/*
+** Evaluate a view and store its result in an ephemeral table. The
+** pWhere argument is an optional WHERE clause that restricts the
+** set of rows in the view that are to be added to the ephemeral table.
+*/
+SQLITE_PRIVATE void sqlite3MaterializeView(
+ Parse *pParse, /* Parsing context */
+ Table *pView, /* View definition */
+ Expr *pWhere, /* Optional WHERE clause to be added */
+ ExprList *pOrderBy, /* Optional ORDER BY clause */
+ Expr *pLimit, /* Optional LIMIT clause */
+ int iCur /* Cursor number for ephemeral table */
+){
+ SelectDest dest;
+ Select *pSel;
+ SrcList *pFrom;
+ sqlite3 *db = pParse->db;
+ int iDb = sqlite3SchemaToIndex(db, pView->pSchema);
+ pWhere = sqlite3ExprDup(db, pWhere, 0);
+ pFrom = sqlite3SrcListAppend(pParse, 0, 0, 0);
+ if( pFrom ){
+ assert( pFrom->nSrc==1 );
+ pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName);
+ pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName);
+ assert( pFrom->a[0].pOn==0 );
+ assert( pFrom->a[0].pUsing==0 );
+ }
+ pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, pOrderBy,
+ SF_IncludeHidden, pLimit);
+ sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur);
+ sqlite3Select(pParse, pSel, &dest);
+ sqlite3SelectDelete(db, pSel);
+}
+#endif /* !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) */
+
+#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY)
+/*
+** Generate an expression tree to implement the WHERE, ORDER BY,
+** and LIMIT/OFFSET portion of DELETE and UPDATE statements.
+**
+** DELETE FROM table_wxyz WHERE a<5 ORDER BY a LIMIT 1;
+** \__________________________/
+** pLimitWhere (pInClause)
+*/
+SQLITE_PRIVATE Expr *sqlite3LimitWhere(
+ Parse *pParse, /* The parser context */
+ SrcList *pSrc, /* the FROM clause -- which tables to scan */
+ Expr *pWhere, /* The WHERE clause. May be null */
+ ExprList *pOrderBy, /* The ORDER BY clause. May be null */
+ Expr *pLimit, /* The LIMIT clause. May be null */
+ char *zStmtType /* Either DELETE or UPDATE. For err msgs. */
+){
+ sqlite3 *db = pParse->db;
+ Expr *pLhs = NULL; /* LHS of IN(SELECT...) operator */
+ Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */
+ ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */
+ SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */
+ Select *pSelect = NULL; /* Complete SELECT tree */
+ Table *pTab;
+
+ /* Check that there isn't an ORDER BY without a LIMIT clause.
+ */
+ if( pOrderBy && pLimit==0 ) {
+ sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType);
+ sqlite3ExprDelete(pParse->db, pWhere);
+ sqlite3ExprListDelete(pParse->db, pOrderBy);
+ return 0;
+ }
+
+ /* We only need to generate a select expression if there
+ ** is a limit/offset term to enforce.
+ */
+ if( pLimit == 0 ) {
+ return pWhere;
+ }
+
+ /* Generate a select expression tree to enforce the limit/offset
+ ** term for the DELETE or UPDATE statement. For example:
+ ** DELETE FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1
+ ** becomes:
+ ** DELETE FROM table_a WHERE rowid IN (
+ ** SELECT rowid FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1
+ ** );
+ */
+
+ pTab = pSrc->a[0].pTab;
+ if( HasRowid(pTab) ){
+ pLhs = sqlite3PExpr(pParse, TK_ROW, 0, 0);
+ pEList = sqlite3ExprListAppend(
+ pParse, 0, sqlite3PExpr(pParse, TK_ROW, 0, 0)
+ );
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ if( pPk->nKeyCol==1 ){
+ const char *zName = pTab->aCol[pPk->aiColumn[0]].zName;
+ pLhs = sqlite3Expr(db, TK_ID, zName);
+ pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName));
+ }else{
+ int i;
+ for(i=0; i<pPk->nKeyCol; i++){
+ Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zName);
+ pEList = sqlite3ExprListAppend(pParse, pEList, p);
+ }
+ pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
+ if( pLhs ){
+ pLhs->x.pList = sqlite3ExprListDup(db, pEList, 0);
+ }
+ }
+ }
+
+ /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree
+ ** and the SELECT subtree. */
+ pSrc->a[0].pTab = 0;
+ pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0);
+ pSrc->a[0].pTab = pTab;
+ pSrc->a[0].pIBIndex = 0;
+
+ /* generate the SELECT expression tree. */
+ pSelect = sqlite3SelectNew(pParse, pEList, pSelectSrc, pWhere, 0 ,0,
+ pOrderBy,0,pLimit
+ );
+
+ /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */
+ pInClause = sqlite3PExpr(pParse, TK_IN, pLhs, 0);
+ sqlite3PExprAddSelect(pParse, pInClause, pSelect);
+ return pInClause;
+}
+#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) */
+ /* && !defined(SQLITE_OMIT_SUBQUERY) */
+
+/*
+** Generate code for a DELETE FROM statement.
+**
+** DELETE FROM table_wxyz WHERE a<5 AND b NOT NULL;
+** \________/ \________________/
+** pTabList pWhere
+*/
+SQLITE_PRIVATE void sqlite3DeleteFrom(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* The table from which we should delete things */
+ Expr *pWhere, /* The WHERE clause. May be null */
+ ExprList *pOrderBy, /* ORDER BY clause. May be null */
+ Expr *pLimit /* LIMIT clause. May be null */
+){
+ Vdbe *v; /* The virtual database engine */
+ Table *pTab; /* The table from which records will be deleted */
+ int i; /* Loop counter */
+ WhereInfo *pWInfo; /* Information about the WHERE clause */
+ Index *pIdx; /* For looping over indices of the table */
+ int iTabCur; /* Cursor number for the table */
+ int iDataCur = 0; /* VDBE cursor for the canonical data source */
+ int iIdxCur = 0; /* Cursor number of the first index */
+ int nIdx; /* Number of indices */
+ sqlite3 *db; /* Main database structure */
+ AuthContext sContext; /* Authorization context */
+ NameContext sNC; /* Name context to resolve expressions in */
+ int iDb; /* Database number */
+ int memCnt = 0; /* Memory cell used for change counting */
+ int rcauth; /* Value returned by authorization callback */
+ int eOnePass; /* ONEPASS_OFF or _SINGLE or _MULTI */
+ int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */
+ u8 *aToOpen = 0; /* Open cursor iTabCur+j if aToOpen[j] is true */
+ Index *pPk; /* The PRIMARY KEY index on the table */
+ int iPk = 0; /* First of nPk registers holding PRIMARY KEY value */
+ i16 nPk = 1; /* Number of columns in the PRIMARY KEY */
+ int iKey; /* Memory cell holding key of row to be deleted */
+ i16 nKey; /* Number of memory cells in the row key */
+ int iEphCur = 0; /* Ephemeral table holding all primary key values */
+ int iRowSet = 0; /* Register for rowset of rows to delete */
+ int addrBypass = 0; /* Address of jump over the delete logic */
+ int addrLoop = 0; /* Top of the delete loop */
+ int addrEphOpen = 0; /* Instruction to open the Ephemeral table */
+ int bComplex; /* True if there are triggers or FKs or
+ ** subqueries in the WHERE clause */
+
+#ifndef SQLITE_OMIT_TRIGGER
+ int isView; /* True if attempting to delete from a view */
+ Trigger *pTrigger; /* List of table triggers, if required */
+#endif
+
+ memset(&sContext, 0, sizeof(sContext));
+ db = pParse->db;
+ if( pParse->nErr || db->mallocFailed ){
+ goto delete_from_cleanup;
+ }
+ assert( pTabList->nSrc==1 );
+
+
+ /* Locate the table which we want to delete. This table has to be
+ ** put in an SrcList structure because some of the subroutines we
+ ** will be calling are designed to work with multiple tables and expect
+ ** an SrcList* parameter instead of just a Table* parameter.
+ */
+ pTab = sqlite3SrcListLookup(pParse, pTabList);
+ if( pTab==0 ) goto delete_from_cleanup;
+
+ /* Figure out if we have any triggers and if the table being
+ ** deleted from is a view
+ */
+#ifndef SQLITE_OMIT_TRIGGER
+ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0);
+ isView = pTab->pSelect!=0;
+#else
+# define pTrigger 0
+# define isView 0
+#endif
+ bComplex = pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0);
+#ifdef SQLITE_OMIT_VIEW
+# undef isView
+# define isView 0
+#endif
+
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( !isView ){
+ pWhere = sqlite3LimitWhere(
+ pParse, pTabList, pWhere, pOrderBy, pLimit, "DELETE"
+ );
+ pOrderBy = 0;
+ pLimit = 0;
+ }
+#endif
+
+ /* If pTab is really a view, make sure it has been initialized.
+ */
+ if( sqlite3ViewGetColumnNames(pParse, pTab) ){
+ goto delete_from_cleanup;
+ }
+
+ if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){
+ goto delete_from_cleanup;
+ }
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iDb<db->nDb );
+ rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0,
+ db->aDb[iDb].zDbSName);
+ assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE );
+ if( rcauth==SQLITE_DENY ){
+ goto delete_from_cleanup;
+ }
+ assert(!isView || pTrigger);
+
+ /* Assign cursor numbers to the table and all its indices.
+ */
+ assert( pTabList->nSrc==1 );
+ iTabCur = pTabList->a[0].iCursor = pParse->nTab++;
+ for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){
+ pParse->nTab++;
+ }
+
+ /* Start the view context
+ */
+ if( isView ){
+ sqlite3AuthContextPush(pParse, &sContext, pTab->zName);
+ }
+
+ /* Begin generating code.
+ */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ){
+ goto delete_from_cleanup;
+ }
+ if( pParse->nested==0 ) sqlite3VdbeCountChanges(v);
+ sqlite3BeginWriteOperation(pParse, bComplex, iDb);
+
+ /* If we are trying to delete from a view, realize that view into
+ ** an ephemeral table.
+ */
+#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
+ if( isView ){
+ sqlite3MaterializeView(pParse, pTab,
+ pWhere, pOrderBy, pLimit, iTabCur
+ );
+ iDataCur = iIdxCur = iTabCur;
+ pOrderBy = 0;
+ pLimit = 0;
+ }
+#endif
+
+ /* Resolve the column names in the WHERE clause.
+ */
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ sNC.pSrcList = pTabList;
+ if( sqlite3ResolveExprNames(&sNC, pWhere) ){
+ goto delete_from_cleanup;
+ }
+
+ /* Initialize the counter of the number of rows deleted, if
+ ** we are counting rows.
+ */
+ if( (db->flags & SQLITE_CountRows)!=0
+ && !pParse->nested
+ && !pParse->pTriggerTab
+ ){
+ memCnt = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt);
+ }
+
+#ifndef SQLITE_OMIT_TRUNCATE_OPTIMIZATION
+ /* Special case: A DELETE without a WHERE clause deletes everything.
+ ** It is easier just to erase the whole table. Prior to version 3.6.5,
+ ** this optimization caused the row change count (the value returned by
+ ** API function sqlite3_count_changes) to be set incorrectly.
+ **
+ ** The "rcauth==SQLITE_OK" terms is the
+ ** IMPLEMENTATION-OF: R-17228-37124 If the action code is SQLITE_DELETE and
+ ** the callback returns SQLITE_IGNORE then the DELETE operation proceeds but
+ ** the truncate optimization is disabled and all rows are deleted
+ ** individually.
+ */
+ if( rcauth==SQLITE_OK
+ && pWhere==0
+ && !bComplex
+ && !IsVirtual(pTab)
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ && db->xPreUpdateCallback==0
+#endif
+ ){
+ assert( !isView );
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName);
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp4(v, OP_Clear, pTab->tnum, iDb, memCnt ? memCnt : -1,
+ pTab->zName, P4_STATIC);
+ }
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ assert( pIdx->pSchema==pTab->pSchema );
+ sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb);
+ }
+ }else
+#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */
+ {
+ u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK|WHERE_SEEK_TABLE;
+ if( sNC.ncFlags & NC_VarSelect ) bComplex = 1;
+ wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW);
+ if( HasRowid(pTab) ){
+ /* For a rowid table, initialize the RowSet to an empty set */
+ pPk = 0;
+ nPk = 1;
+ iRowSet = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet);
+ }else{
+ /* For a WITHOUT ROWID table, create an ephemeral table used to
+ ** hold all primary keys for rows to be deleted. */
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+ nPk = pPk->nKeyCol;
+ iPk = pParse->nMem+1;
+ pParse->nMem += nPk;
+ iEphCur = pParse->nTab++;
+ addrEphOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEphCur, nPk);
+ sqlite3VdbeSetP4KeyInfo(pParse, pPk);
+ }
+
+ /* Construct a query to find the rowid or primary key for every row
+ ** to be deleted, based on the WHERE clause. Set variable eOnePass
+ ** to indicate the strategy used to implement this delete:
+ **
+ ** ONEPASS_OFF: Two-pass approach - use a FIFO for rowids/PK values.
+ ** ONEPASS_SINGLE: One-pass approach - at most one row deleted.
+ ** ONEPASS_MULTI: One-pass approach - any number of rows may be deleted.
+ */
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, wcf, iTabCur+1);
+ if( pWInfo==0 ) goto delete_from_cleanup;
+ eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
+ assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI );
+ assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF );
+ if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse);
+
+ /* Keep track of the number of rows to be deleted */
+ if( memCnt ){
+ sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1);
+ }
+
+ /* Extract the rowid or primary key for the current row */
+ if( pPk ){
+ for(i=0; i<nPk; i++){
+ assert( pPk->aiColumn[i]>=0 );
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur,
+ pPk->aiColumn[i], iPk+i);
+ }
+ iKey = iPk;
+ }else{
+ iKey = ++pParse->nMem;
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, -1, iKey);
+ }
+
+ if( eOnePass!=ONEPASS_OFF ){
+ /* For ONEPASS, no need to store the rowid/primary-key. There is only
+ ** one, so just keep it in its register(s) and fall through to the
+ ** delete code. */
+ nKey = nPk; /* OP_Found will use an unpacked key */
+ aToOpen = sqlite3DbMallocRawNN(db, nIdx+2);
+ if( aToOpen==0 ){
+ sqlite3WhereEnd(pWInfo);
+ goto delete_from_cleanup;
+ }
+ memset(aToOpen, 1, nIdx+1);
+ aToOpen[nIdx+1] = 0;
+ if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0;
+ if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0;
+ if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen);
+ }else{
+ if( pPk ){
+ /* Add the PK key for this row to the temporary table */
+ iKey = ++pParse->nMem;
+ nKey = 0; /* Zero tells OP_Found to use a composite key */
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey,
+ sqlite3IndexAffinityStr(pParse->db, pPk), nPk);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEphCur, iKey, iPk, nPk);
+ }else{
+ /* Add the rowid of the row to be deleted to the RowSet */
+ nKey = 1; /* OP_DeferredSeek always uses a single rowid */
+ sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey);
+ }
+ }
+
+ /* If this DELETE cannot use the ONEPASS strategy, this is the
+ ** end of the WHERE loop */
+ if( eOnePass!=ONEPASS_OFF ){
+ addrBypass = sqlite3VdbeMakeLabel(pParse);
+ }else{
+ sqlite3WhereEnd(pWInfo);
+ }
+
+ /* Unless this is a view, open cursors for the table we are
+ ** deleting from and all its indices. If this is a view, then the
+ ** only effect this statement has is to fire the INSTEAD OF
+ ** triggers.
+ */
+ if( !isView ){
+ int iAddrOnce = 0;
+ if( eOnePass==ONEPASS_MULTI ){
+ iAddrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
+ testcase( IsVirtual(pTab) );
+ sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, OPFLAG_FORDELETE,
+ iTabCur, aToOpen, &iDataCur, &iIdxCur);
+ assert( pPk || IsVirtual(pTab) || iDataCur==iTabCur );
+ assert( pPk || IsVirtual(pTab) || iIdxCur==iDataCur+1 );
+ if( eOnePass==ONEPASS_MULTI ){
+ sqlite3VdbeJumpHereOrPopInst(v, iAddrOnce);
+ }
+ }
+
+ /* Set up a loop over the rowids/primary-keys that were found in the
+ ** where-clause loop above.
+ */
+ if( eOnePass!=ONEPASS_OFF ){
+ assert( nKey==nPk ); /* OP_Found will use an unpacked key */
+ if( !IsVirtual(pTab) && aToOpen[iDataCur-iTabCur] ){
+ assert( pPk!=0 || pTab->pSelect!=0 );
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey);
+ VdbeCoverage(v);
+ }
+ }else if( pPk ){
+ addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v);
+ if( IsVirtual(pTab) ){
+ sqlite3VdbeAddOp3(v, OP_Column, iEphCur, 0, iKey);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_RowData, iEphCur, iKey);
+ }
+ assert( nKey==0 ); /* OP_Found will use a composite key */
+ }else{
+ addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey);
+ VdbeCoverage(v);
+ assert( nKey==1 );
+ }
+
+ /* Delete the row */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab);
+ sqlite3VtabMakeWritable(pParse, pTab);
+ assert( eOnePass==ONEPASS_OFF || eOnePass==ONEPASS_SINGLE );
+ sqlite3MayAbort(pParse);
+ if( eOnePass==ONEPASS_SINGLE ){
+ sqlite3VdbeAddOp1(v, OP_Close, iTabCur);
+ if( sqlite3IsToplevel(pParse) ){
+ pParse->isMultiWrite = 0;
+ }
+ }
+ sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iKey, pVTab, P4_VTAB);
+ sqlite3VdbeChangeP5(v, OE_Abort);
+ }else
+#endif
+ {
+ int count = (pParse->nested==0); /* True to count changes */
+ sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
+ iKey, nKey, count, OE_Default, eOnePass, aiCurOnePass[1]);
+ }
+
+ /* End of the loop over all rowids/primary-keys. */
+ if( eOnePass!=ONEPASS_OFF ){
+ sqlite3VdbeResolveLabel(v, addrBypass);
+ sqlite3WhereEnd(pWInfo);
+ }else if( pPk ){
+ sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addrLoop);
+ }else{
+ sqlite3VdbeGoto(v, addrLoop);
+ sqlite3VdbeJumpHere(v, addrLoop);
+ }
+ } /* End non-truncate path */
+
+ /* Update the sqlite_sequence table by storing the content of the
+ ** maximum rowid counter values recorded while inserting into
+ ** autoincrement tables.
+ */
+ if( pParse->nested==0 && pParse->pTriggerTab==0 ){
+ sqlite3AutoincrementEnd(pParse);
+ }
+
+ /* Return the number of rows that were deleted. If this routine is
+ ** generating code because of a call to sqlite3NestedParse(), do not
+ ** invoke the callback function.
+ */
+ if( memCnt ){
+ sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC);
+ }
+
+delete_from_cleanup:
+ sqlite3AuthContextPop(&sContext);
+ sqlite3SrcListDelete(db, pTabList);
+ sqlite3ExprDelete(db, pWhere);
+#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT)
+ sqlite3ExprListDelete(db, pOrderBy);
+ sqlite3ExprDelete(db, pLimit);
+#endif
+ sqlite3DbFree(db, aToOpen);
+ return;
+}
+/* Make sure "isView" and other macros defined above are undefined. Otherwise
+** they may interfere with compilation of other functions in this file
+** (or in another file, if this file becomes part of the amalgamation). */
+#ifdef isView
+ #undef isView
+#endif
+#ifdef pTrigger
+ #undef pTrigger
+#endif
+
+/*
+** This routine generates VDBE code that causes a single row of a
+** single table to be deleted. Both the original table entry and
+** all indices are removed.
+**
+** Preconditions:
+**
+** 1. iDataCur is an open cursor on the btree that is the canonical data
+** store for the table. (This will be either the table itself,
+** in the case of a rowid table, or the PRIMARY KEY index in the case
+** of a WITHOUT ROWID table.)
+**
+** 2. Read/write cursors for all indices of pTab must be open as
+** cursor number iIdxCur+i for the i-th index.
+**
+** 3. The primary key for the row to be deleted must be stored in a
+** sequence of nPk memory cells starting at iPk. If nPk==0 that means
+** that a search record formed from OP_MakeRecord is contained in the
+** single memory location iPk.
+**
+** eMode:
+** Parameter eMode may be passed either ONEPASS_OFF (0), ONEPASS_SINGLE, or
+** ONEPASS_MULTI. If eMode is not ONEPASS_OFF, then the cursor
+** iDataCur already points to the row to delete. If eMode is ONEPASS_OFF
+** then this function must seek iDataCur to the entry identified by iPk
+** and nPk before reading from it.
+**
+** If eMode is ONEPASS_MULTI, then this call is being made as part
+** of a ONEPASS delete that affects multiple rows. In this case, if
+** iIdxNoSeek is a valid cursor number (>=0) and is not the same as
+** iDataCur, then its position should be preserved following the delete
+** operation. Or, if iIdxNoSeek is not a valid cursor number, the
+** position of iDataCur should be preserved instead.
+**
+** iIdxNoSeek:
+** If iIdxNoSeek is a valid cursor number (>=0) not equal to iDataCur,
+** then it identifies an index cursor (from within array of cursors
+** starting at iIdxCur) that already points to the index entry to be deleted.
+** Except, this optimization is disabled if there are BEFORE triggers since
+** the trigger body might have moved the cursor.
+*/
+SQLITE_PRIVATE void sqlite3GenerateRowDelete(
+ Parse *pParse, /* Parsing context */
+ Table *pTab, /* Table containing the row to be deleted */
+ Trigger *pTrigger, /* List of triggers to (potentially) fire */
+ int iDataCur, /* Cursor from which column data is extracted */
+ int iIdxCur, /* First index cursor */
+ int iPk, /* First memory cell containing the PRIMARY KEY */
+ i16 nPk, /* Number of PRIMARY KEY memory cells */
+ u8 count, /* If non-zero, increment the row change counter */
+ u8 onconf, /* Default ON CONFLICT policy for triggers */
+ u8 eMode, /* ONEPASS_OFF, _SINGLE, or _MULTI. See above */
+ int iIdxNoSeek /* Cursor number of cursor that does not need seeking */
+){
+ Vdbe *v = pParse->pVdbe; /* Vdbe */
+ int iOld = 0; /* First register in OLD.* array */
+ int iLabel; /* Label resolved to end of generated code */
+ u8 opSeek; /* Seek opcode */
+
+ /* Vdbe is guaranteed to have been allocated by this stage. */
+ assert( v );
+ VdbeModuleComment((v, "BEGIN: GenRowDel(%d,%d,%d,%d)",
+ iDataCur, iIdxCur, iPk, (int)nPk));
+
+ /* Seek cursor iCur to the row to delete. If this row no longer exists
+ ** (this can happen if a trigger program has already deleted it), do
+ ** not attempt to delete it or fire any DELETE triggers. */
+ iLabel = sqlite3VdbeMakeLabel(pParse);
+ opSeek = HasRowid(pTab) ? OP_NotExists : OP_NotFound;
+ if( eMode==ONEPASS_OFF ){
+ sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
+ VdbeCoverageIf(v, opSeek==OP_NotExists);
+ VdbeCoverageIf(v, opSeek==OP_NotFound);
+ }
+
+ /* If there are any triggers to fire, allocate a range of registers to
+ ** use for the old.* references in the triggers. */
+ if( sqlite3FkRequired(pParse, pTab, 0, 0) || pTrigger ){
+ u32 mask; /* Mask of OLD.* columns in use */
+ int iCol; /* Iterator used while populating OLD.* */
+ int addrStart; /* Start of BEFORE trigger programs */
+
+ /* TODO: Could use temporary registers here. Also could attempt to
+ ** avoid copying the contents of the rowid register. */
+ mask = sqlite3TriggerColmask(
+ pParse, pTrigger, 0, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onconf
+ );
+ mask |= sqlite3FkOldmask(pParse, pTab);
+ iOld = pParse->nMem+1;
+ pParse->nMem += (1 + pTab->nCol);
+
+ /* Populate the OLD.* pseudo-table register array. These values will be
+ ** used by any BEFORE and AFTER triggers that exist. */
+ sqlite3VdbeAddOp2(v, OP_Copy, iPk, iOld);
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ testcase( mask!=0xffffffff && iCol==31 );
+ testcase( mask!=0xffffffff && iCol==32 );
+ if( mask==0xffffffff || (iCol<=31 && (mask & MASKBIT32(iCol))!=0) ){
+ int kk = sqlite3TableColumnToStorage(pTab, iCol);
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, iCol, iOld+kk+1);
+ }
+ }
+
+ /* Invoke BEFORE DELETE trigger programs. */
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ sqlite3CodeRowTrigger(pParse, pTrigger,
+ TK_DELETE, 0, TRIGGER_BEFORE, pTab, iOld, onconf, iLabel
+ );
+
+ /* If any BEFORE triggers were coded, then seek the cursor to the
+ ** row to be deleted again. It may be that the BEFORE triggers moved
+ ** the cursor or already deleted the row that the cursor was
+ ** pointing to.
+ **
+ ** Also disable the iIdxNoSeek optimization since the BEFORE trigger
+ ** may have moved that cursor.
+ */
+ if( addrStart<sqlite3VdbeCurrentAddr(v) ){
+ sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
+ VdbeCoverageIf(v, opSeek==OP_NotExists);
+ VdbeCoverageIf(v, opSeek==OP_NotFound);
+ testcase( iIdxNoSeek>=0 );
+ iIdxNoSeek = -1;
+ }
+
+ /* Do FK processing. This call checks that any FK constraints that
+ ** refer to this table (i.e. constraints attached to other tables)
+ ** are not violated by deleting this row. */
+ sqlite3FkCheck(pParse, pTab, iOld, 0, 0, 0);
+ }
+
+ /* Delete the index and table entries. Skip this step if pTab is really
+ ** a view (in which case the only effect of the DELETE statement is to
+ ** fire the INSTEAD OF triggers).
+ **
+ ** If variable 'count' is non-zero, then this OP_Delete instruction should
+ ** invoke the update-hook. The pre-update-hook, on the other hand should
+ ** be invoked unless table pTab is a system table. The difference is that
+ ** the update-hook is not invoked for rows removed by REPLACE, but the
+ ** pre-update-hook is.
+ */
+ if( pTab->pSelect==0 ){
+ u8 p5 = 0;
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek);
+ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0));
+ if( pParse->nested==0 || 0==sqlite3_stricmp(pTab->zName, "sqlite_stat1") ){
+ sqlite3VdbeAppendP4(v, (char*)pTab, P4_TABLE);
+ }
+ if( eMode!=ONEPASS_OFF ){
+ sqlite3VdbeChangeP5(v, OPFLAG_AUXDELETE);
+ }
+ if( iIdxNoSeek>=0 && iIdxNoSeek!=iDataCur ){
+ sqlite3VdbeAddOp1(v, OP_Delete, iIdxNoSeek);
+ }
+ if( eMode==ONEPASS_MULTI ) p5 |= OPFLAG_SAVEPOSITION;
+ sqlite3VdbeChangeP5(v, p5);
+ }
+
+ /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to
+ ** handle rows (possibly in other tables) that refer via a foreign key
+ ** to the row just deleted. */
+ sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0);
+
+ /* Invoke AFTER DELETE trigger programs. */
+ sqlite3CodeRowTrigger(pParse, pTrigger,
+ TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel
+ );
+
+ /* Jump here if the row had already been deleted before any BEFORE
+ ** trigger programs were invoked. Or if a trigger program throws a
+ ** RAISE(IGNORE) exception. */
+ sqlite3VdbeResolveLabel(v, iLabel);
+ VdbeModuleComment((v, "END: GenRowDel()"));
+}
+
+/*
+** This routine generates VDBE code that causes the deletion of all
+** index entries associated with a single row of a single table, pTab
+**
+** Preconditions:
+**
+** 1. A read/write cursor "iDataCur" must be open on the canonical storage
+** btree for the table pTab. (This will be either the table itself
+** for rowid tables or to the primary key index for WITHOUT ROWID
+** tables.)
+**
+** 2. Read/write cursors for all indices of pTab must be open as
+** cursor number iIdxCur+i for the i-th index. (The pTab->pIndex
+** index is the 0-th index.)
+**
+** 3. The "iDataCur" cursor must be already be positioned on the row
+** that is to be deleted.
+*/
+SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(
+ Parse *pParse, /* Parsing and code generating context */
+ Table *pTab, /* Table containing the row to be deleted */
+ int iDataCur, /* Cursor of table holding data. */
+ int iIdxCur, /* First index cursor */
+ int *aRegIdx, /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */
+ int iIdxNoSeek /* Do not delete from this cursor */
+){
+ int i; /* Index loop counter */
+ int r1 = -1; /* Register holding an index key */
+ int iPartIdxLabel; /* Jump destination for skipping partial index entries */
+ Index *pIdx; /* Current index */
+ Index *pPrior = 0; /* Prior index */
+ Vdbe *v; /* The prepared statement under construction */
+ Index *pPk; /* PRIMARY KEY index, or NULL for rowid tables */
+
+ v = pParse->pVdbe;
+ pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
+ for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){
+ assert( iIdxCur+i!=iDataCur || pPk==pIdx );
+ if( aRegIdx!=0 && aRegIdx[i]==0 ) continue;
+ if( pIdx==pPk ) continue;
+ if( iIdxCur+i==iIdxNoSeek ) continue;
+ VdbeModuleComment((v, "GenRowIdxDel for %s", pIdx->zName));
+ r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1,
+ &iPartIdxLabel, pPrior, r1);
+ sqlite3VdbeAddOp3(v, OP_IdxDelete, iIdxCur+i, r1,
+ pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn);
+ sqlite3VdbeChangeP5(v, 1); /* Cause IdxDelete to error if no entry found */
+ sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel);
+ pPrior = pIdx;
+ }
+}
+
+/*
+** Generate code that will assemble an index key and stores it in register
+** regOut. The key with be for index pIdx which is an index on pTab.
+** iCur is the index of a cursor open on the pTab table and pointing to
+** the entry that needs indexing. If pTab is a WITHOUT ROWID table, then
+** iCur must be the cursor of the PRIMARY KEY index.
+**
+** Return a register number which is the first in a block of
+** registers that holds the elements of the index key. The
+** block of registers has already been deallocated by the time
+** this routine returns.
+**
+** If *piPartIdxLabel is not NULL, fill it in with a label and jump
+** to that label if pIdx is a partial index that should be skipped.
+** The label should be resolved using sqlite3ResolvePartIdxLabel().
+** A partial index should be skipped if its WHERE clause evaluates
+** to false or null. If pIdx is not a partial index, *piPartIdxLabel
+** will be set to zero which is an empty label that is ignored by
+** sqlite3ResolvePartIdxLabel().
+**
+** The pPrior and regPrior parameters are used to implement a cache to
+** avoid unnecessary register loads. If pPrior is not NULL, then it is
+** a pointer to a different index for which an index key has just been
+** computed into register regPrior. If the current pIdx index is generating
+** its key into the same sequence of registers and if pPrior and pIdx share
+** a column in common, then the register corresponding to that column already
+** holds the correct value and the loading of that register is skipped.
+** This optimization is helpful when doing a DELETE or an INTEGRITY_CHECK
+** on a table with multiple indices, and especially with the ROWID or
+** PRIMARY KEY columns of the index.
+*/
+SQLITE_PRIVATE int sqlite3GenerateIndexKey(
+ Parse *pParse, /* Parsing context */
+ Index *pIdx, /* The index for which to generate a key */
+ int iDataCur, /* Cursor number from which to take column data */
+ int regOut, /* Put the new key into this register if not 0 */
+ int prefixOnly, /* Compute only a unique prefix of the key */
+ int *piPartIdxLabel, /* OUT: Jump to this label to skip partial index */
+ Index *pPrior, /* Previously generated index key */
+ int regPrior /* Register holding previous generated key */
+){
+ Vdbe *v = pParse->pVdbe;
+ int j;
+ int regBase;
+ int nCol;
+
+ if( piPartIdxLabel ){
+ if( pIdx->pPartIdxWhere ){
+ *piPartIdxLabel = sqlite3VdbeMakeLabel(pParse);
+ pParse->iSelfTab = iDataCur + 1;
+ sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel,
+ SQLITE_JUMPIFNULL);
+ pParse->iSelfTab = 0;
+ pPrior = 0; /* Ticket a9efb42811fa41ee 2019-11-02;
+ ** pPartIdxWhere may have corrupted regPrior registers */
+ }else{
+ *piPartIdxLabel = 0;
+ }
+ }
+ nCol = (prefixOnly && pIdx->uniqNotNull) ? pIdx->nKeyCol : pIdx->nColumn;
+ regBase = sqlite3GetTempRange(pParse, nCol);
+ if( pPrior && (regBase!=regPrior || pPrior->pPartIdxWhere) ) pPrior = 0;
+ for(j=0; j<nCol; j++){
+ if( pPrior
+ && pPrior->aiColumn[j]==pIdx->aiColumn[j]
+ && pPrior->aiColumn[j]!=XN_EXPR
+ ){
+ /* This column was already computed by the previous index */
+ continue;
+ }
+ sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iDataCur, j, regBase+j);
+ /* If the column affinity is REAL but the number is an integer, then it
+ ** might be stored in the table as an integer (using a compact
+ ** representation) then converted to REAL by an OP_RealAffinity opcode.
+ ** But we are getting ready to store this value back into an index, where
+ ** it should be converted by to INTEGER again. So omit the OP_RealAffinity
+ ** opcode if it is present */
+ sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity);
+ }
+ if( regOut ){
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
+ if( pIdx->pTable->pSelect ){
+ const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx);
+ sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT);
+ }
+ }
+ sqlite3ReleaseTempRange(pParse, regBase, nCol);
+ return regBase;
+}
+
+/*
+** If a prior call to sqlite3GenerateIndexKey() generated a jump-over label
+** because it was a partial index, then this routine should be called to
+** resolve that label.
+*/
+SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){
+ if( iLabel ){
+ sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel);
+ }
+}
+
+/************** End of delete.c **********************************************/
+/************** Begin file func.c ********************************************/
+/*
+** 2002 February 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the C-language implementations for many of the SQL
+** functions of SQLite. (Some function, and in particular the date and
+** time functions, are implemented separately.)
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdlib.h> */
+/* #include <assert.h> */
+#ifndef SQLITE_OMIT_FLOATING_POINT
+/* #include <math.h> */
+#endif
+/* #include "vdbeInt.h" */
+
+/*
+** Return the collating function associated with a function.
+*/
+static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){
+ VdbeOp *pOp;
+ assert( context->pVdbe!=0 );
+ pOp = &context->pVdbe->aOp[context->iOp-1];
+ assert( pOp->opcode==OP_CollSeq );
+ assert( pOp->p4type==P4_COLLSEQ );
+ return pOp->p4.pColl;
+}
+
+/*
+** Indicate that the accumulator load should be skipped on this
+** iteration of the aggregate loop.
+*/
+static void sqlite3SkipAccumulatorLoad(sqlite3_context *context){
+ assert( context->isError<=0 );
+ context->isError = -1;
+ context->skipFlag = 1;
+}
+
+/*
+** Implementation of the non-aggregate min() and max() functions
+*/
+static void minmaxFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int i;
+ int mask; /* 0 for min() or 0xffffffff for max() */
+ int iBest;
+ CollSeq *pColl;
+
+ assert( argc>1 );
+ mask = sqlite3_user_data(context)==0 ? 0 : -1;
+ pColl = sqlite3GetFuncCollSeq(context);
+ assert( pColl );
+ assert( mask==-1 || mask==0 );
+ iBest = 0;
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
+ for(i=1; i<argc; i++){
+ if( sqlite3_value_type(argv[i])==SQLITE_NULL ) return;
+ if( (sqlite3MemCompare(argv[iBest], argv[i], pColl)^mask)>=0 ){
+ testcase( mask==0 );
+ iBest = i;
+ }
+ }
+ sqlite3_result_value(context, argv[iBest]);
+}
+
+/*
+** Return the type of the argument.
+*/
+static void typeofFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ static const char *azType[] = { "integer", "real", "text", "blob", "null" };
+ int i = sqlite3_value_type(argv[0]) - 1;
+ UNUSED_PARAMETER(NotUsed);
+ assert( i>=0 && i<ArraySize(azType) );
+ assert( SQLITE_INTEGER==1 );
+ assert( SQLITE_FLOAT==2 );
+ assert( SQLITE_TEXT==3 );
+ assert( SQLITE_BLOB==4 );
+ assert( SQLITE_NULL==5 );
+ /* EVIDENCE-OF: R-01470-60482 The sqlite3_value_type(V) interface returns
+ ** the datatype code for the initial datatype of the sqlite3_value object
+ ** V. The returned value is one of SQLITE_INTEGER, SQLITE_FLOAT,
+ ** SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL. */
+ sqlite3_result_text(context, azType[i], -1, SQLITE_STATIC);
+}
+
+
+/*
+** Implementation of the length() function
+*/
+static void lengthFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_BLOB:
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT: {
+ sqlite3_result_int(context, sqlite3_value_bytes(argv[0]));
+ break;
+ }
+ case SQLITE_TEXT: {
+ const unsigned char *z = sqlite3_value_text(argv[0]);
+ const unsigned char *z0;
+ unsigned char c;
+ if( z==0 ) return;
+ z0 = z;
+ while( (c = *z)!=0 ){
+ z++;
+ if( c>=0xc0 ){
+ while( (*z & 0xc0)==0x80 ){ z++; z0++; }
+ }
+ }
+ sqlite3_result_int(context, (int)(z-z0));
+ break;
+ }
+ default: {
+ sqlite3_result_null(context);
+ break;
+ }
+ }
+}
+
+/*
+** Implementation of the abs() function.
+**
+** IMP: R-23979-26855 The abs(X) function returns the absolute value of
+** the numeric argument X.
+*/
+static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_INTEGER: {
+ i64 iVal = sqlite3_value_int64(argv[0]);
+ if( iVal<0 ){
+ if( iVal==SMALLEST_INT64 ){
+ /* IMP: R-31676-45509 If X is the integer -9223372036854775808
+ ** then abs(X) throws an integer overflow error since there is no
+ ** equivalent positive 64-bit two complement value. */
+ sqlite3_result_error(context, "integer overflow", -1);
+ return;
+ }
+ iVal = -iVal;
+ }
+ sqlite3_result_int64(context, iVal);
+ break;
+ }
+ case SQLITE_NULL: {
+ /* IMP: R-37434-19929 Abs(X) returns NULL if X is NULL. */
+ sqlite3_result_null(context);
+ break;
+ }
+ default: {
+ /* Because sqlite3_value_double() returns 0.0 if the argument is not
+ ** something that can be converted into a number, we have:
+ ** IMP: R-01992-00519 Abs(X) returns 0.0 if X is a string or blob
+ ** that cannot be converted to a numeric value.
+ */
+ double rVal = sqlite3_value_double(argv[0]);
+ if( rVal<0 ) rVal = -rVal;
+ sqlite3_result_double(context, rVal);
+ break;
+ }
+ }
+}
+
+/*
+** Implementation of the instr() function.
+**
+** instr(haystack,needle) finds the first occurrence of needle
+** in haystack and returns the number of previous characters plus 1,
+** or 0 if needle does not occur within haystack.
+**
+** If both haystack and needle are BLOBs, then the result is one more than
+** the number of bytes in haystack prior to the first occurrence of needle,
+** or 0 if needle never occurs in haystack.
+*/
+static void instrFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *zHaystack;
+ const unsigned char *zNeedle;
+ int nHaystack;
+ int nNeedle;
+ int typeHaystack, typeNeedle;
+ int N = 1;
+ int isText;
+ unsigned char firstChar;
+ sqlite3_value *pC1 = 0;
+ sqlite3_value *pC2 = 0;
+
+ UNUSED_PARAMETER(argc);
+ typeHaystack = sqlite3_value_type(argv[0]);
+ typeNeedle = sqlite3_value_type(argv[1]);
+ if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return;
+ nHaystack = sqlite3_value_bytes(argv[0]);
+ nNeedle = sqlite3_value_bytes(argv[1]);
+ if( nNeedle>0 ){
+ if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){
+ zHaystack = sqlite3_value_blob(argv[0]);
+ zNeedle = sqlite3_value_blob(argv[1]);
+ isText = 0;
+ }else if( typeHaystack!=SQLITE_BLOB && typeNeedle!=SQLITE_BLOB ){
+ zHaystack = sqlite3_value_text(argv[0]);
+ zNeedle = sqlite3_value_text(argv[1]);
+ isText = 1;
+ }else{
+ pC1 = sqlite3_value_dup(argv[0]);
+ zHaystack = sqlite3_value_text(pC1);
+ if( zHaystack==0 ) goto endInstrOOM;
+ nHaystack = sqlite3_value_bytes(pC1);
+ pC2 = sqlite3_value_dup(argv[1]);
+ zNeedle = sqlite3_value_text(pC2);
+ if( zNeedle==0 ) goto endInstrOOM;
+ nNeedle = sqlite3_value_bytes(pC2);
+ isText = 1;
+ }
+ if( zNeedle==0 || (nHaystack && zHaystack==0) ) goto endInstrOOM;
+ firstChar = zNeedle[0];
+ while( nNeedle<=nHaystack
+ && (zHaystack[0]!=firstChar || memcmp(zHaystack, zNeedle, nNeedle)!=0)
+ ){
+ N++;
+ do{
+ nHaystack--;
+ zHaystack++;
+ }while( isText && (zHaystack[0]&0xc0)==0x80 );
+ }
+ if( nNeedle>nHaystack ) N = 0;
+ }
+ sqlite3_result_int(context, N);
+endInstr:
+ sqlite3_value_free(pC1);
+ sqlite3_value_free(pC2);
+ return;
+endInstrOOM:
+ sqlite3_result_error_nomem(context);
+ goto endInstr;
+}
+
+/*
+** Implementation of the printf() function.
+*/
+static void printfFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ PrintfArguments x;
+ StrAccum str;
+ const char *zFormat;
+ int n;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+
+ if( argc>=1 && (zFormat = (const char*)sqlite3_value_text(argv[0]))!=0 ){
+ x.nArg = argc-1;
+ x.nUsed = 0;
+ x.apArg = argv+1;
+ sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]);
+ str.printfFlags = SQLITE_PRINTF_SQLFUNC;
+ sqlite3_str_appendf(&str, zFormat, &x);
+ n = str.nChar;
+ sqlite3_result_text(context, sqlite3StrAccumFinish(&str), n,
+ SQLITE_DYNAMIC);
+ }
+}
+
+/*
+** Implementation of the substr() function.
+**
+** substr(x,p1,p2) returns p2 characters of x[] beginning with p1.
+** p1 is 1-indexed. So substr(x,1,1) returns the first character
+** of x. If x is text, then we actually count UTF-8 characters.
+** If x is a blob, then we count bytes.
+**
+** If p1 is negative, then we begin abs(p1) from the end of x[].
+**
+** If p2 is negative, return the p2 characters preceding p1.
+*/
+static void substrFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *z;
+ const unsigned char *z2;
+ int len;
+ int p0type;
+ i64 p1, p2;
+ int negP2 = 0;
+
+ assert( argc==3 || argc==2 );
+ if( sqlite3_value_type(argv[1])==SQLITE_NULL
+ || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL)
+ ){
+ return;
+ }
+ p0type = sqlite3_value_type(argv[0]);
+ p1 = sqlite3_value_int(argv[1]);
+ if( p0type==SQLITE_BLOB ){
+ len = sqlite3_value_bytes(argv[0]);
+ z = sqlite3_value_blob(argv[0]);
+ if( z==0 ) return;
+ assert( len==sqlite3_value_bytes(argv[0]) );
+ }else{
+ z = sqlite3_value_text(argv[0]);
+ if( z==0 ) return;
+ len = 0;
+ if( p1<0 ){
+ for(z2=z; *z2; len++){
+ SQLITE_SKIP_UTF8(z2);
+ }
+ }
+ }
+#ifdef SQLITE_SUBSTR_COMPATIBILITY
+ /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as
+ ** as substr(X,1,N) - it returns the first N characters of X. This
+ ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8]
+ ** from 2009-02-02 for compatibility of applications that exploited the
+ ** old buggy behavior. */
+ if( p1==0 ) p1 = 1; /* <rdar://problem/6778339> */
+#endif
+ if( argc==3 ){
+ p2 = sqlite3_value_int(argv[2]);
+ if( p2<0 ){
+ p2 = -p2;
+ negP2 = 1;
+ }
+ }else{
+ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH];
+ }
+ if( p1<0 ){
+ p1 += len;
+ if( p1<0 ){
+ p2 += p1;
+ if( p2<0 ) p2 = 0;
+ p1 = 0;
+ }
+ }else if( p1>0 ){
+ p1--;
+ }else if( p2>0 ){
+ p2--;
+ }
+ if( negP2 ){
+ p1 -= p2;
+ if( p1<0 ){
+ p2 += p1;
+ p1 = 0;
+ }
+ }
+ assert( p1>=0 && p2>=0 );
+ if( p0type!=SQLITE_BLOB ){
+ while( *z && p1 ){
+ SQLITE_SKIP_UTF8(z);
+ p1--;
+ }
+ for(z2=z; *z2 && p2; p2--){
+ SQLITE_SKIP_UTF8(z2);
+ }
+ sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT,
+ SQLITE_UTF8);
+ }else{
+ if( p1+p2>len ){
+ p2 = len-p1;
+ if( p2<0 ) p2 = 0;
+ }
+ sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT);
+ }
+}
+
+/*
+** Implementation of the round() function
+*/
+#ifndef SQLITE_OMIT_FLOATING_POINT
+static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
+ int n = 0;
+ double r;
+ char *zBuf;
+ assert( argc==1 || argc==2 );
+ if( argc==2 ){
+ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return;
+ n = sqlite3_value_int(argv[1]);
+ if( n>30 ) n = 30;
+ if( n<0 ) n = 0;
+ }
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
+ r = sqlite3_value_double(argv[0]);
+ /* If Y==0 and X will fit in a 64-bit int,
+ ** handle the rounding directly,
+ ** otherwise use printf.
+ */
+ if( r<-4503599627370496.0 || r>+4503599627370496.0 ){
+ /* The value has no fractional part so there is nothing to round */
+ }else if( n==0 ){
+ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5)));
+ }else{
+ zBuf = sqlite3_mprintf("%.*f",n,r);
+ if( zBuf==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ sqlite3AtoF(zBuf, &r, sqlite3Strlen30(zBuf), SQLITE_UTF8);
+ sqlite3_free(zBuf);
+ }
+ sqlite3_result_double(context, r);
+}
+#endif
+
+/*
+** Allocate nByte bytes of space using sqlite3Malloc(). If the
+** allocation fails, call sqlite3_result_error_nomem() to notify
+** the database handle that malloc() has failed and return NULL.
+** If nByte is larger than the maximum string or blob length, then
+** raise an SQLITE_TOOBIG exception and return NULL.
+*/
+static void *contextMalloc(sqlite3_context *context, i64 nByte){
+ char *z;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ assert( nByte>0 );
+ testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] );
+ testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 );
+ if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ sqlite3_result_error_toobig(context);
+ z = 0;
+ }else{
+ z = sqlite3Malloc(nByte);
+ if( !z ){
+ sqlite3_result_error_nomem(context);
+ }
+ }
+ return z;
+}
+
+/*
+** Implementation of the upper() and lower() SQL functions.
+*/
+static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
+ char *z1;
+ const char *z2;
+ int i, n;
+ UNUSED_PARAMETER(argc);
+ z2 = (char*)sqlite3_value_text(argv[0]);
+ n = sqlite3_value_bytes(argv[0]);
+ /* Verify that the call to _bytes() does not invalidate the _text() pointer */
+ assert( z2==(char*)sqlite3_value_text(argv[0]) );
+ if( z2 ){
+ z1 = contextMalloc(context, ((i64)n)+1);
+ if( z1 ){
+ for(i=0; i<n; i++){
+ z1[i] = (char)sqlite3Toupper(z2[i]);
+ }
+ sqlite3_result_text(context, z1, n, sqlite3_free);
+ }
+ }
+}
+static void lowerFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
+ char *z1;
+ const char *z2;
+ int i, n;
+ UNUSED_PARAMETER(argc);
+ z2 = (char*)sqlite3_value_text(argv[0]);
+ n = sqlite3_value_bytes(argv[0]);
+ /* Verify that the call to _bytes() does not invalidate the _text() pointer */
+ assert( z2==(char*)sqlite3_value_text(argv[0]) );
+ if( z2 ){
+ z1 = contextMalloc(context, ((i64)n)+1);
+ if( z1 ){
+ for(i=0; i<n; i++){
+ z1[i] = sqlite3Tolower(z2[i]);
+ }
+ sqlite3_result_text(context, z1, n, sqlite3_free);
+ }
+ }
+}
+
+/*
+** Some functions like COALESCE() and IFNULL() and UNLIKELY() are implemented
+** as VDBE code so that unused argument values do not have to be computed.
+** However, we still need some kind of function implementation for this
+** routines in the function table. The noopFunc macro provides this.
+** noopFunc will never be called so it doesn't matter what the implementation
+** is. We might as well use the "version()" function as a substitute.
+*/
+#define noopFunc versionFunc /* Substitute function - never called */
+
+/*
+** Implementation of random(). Return a random integer.
+*/
+static void randomFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ sqlite_int64 r;
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ sqlite3_randomness(sizeof(r), &r);
+ if( r<0 ){
+ /* We need to prevent a random number of 0x8000000000000000
+ ** (or -9223372036854775808) since when you do abs() of that
+ ** number of you get the same value back again. To do this
+ ** in a way that is testable, mask the sign bit off of negative
+ ** values, resulting in a positive value. Then take the
+ ** 2s complement of that positive value. The end result can
+ ** therefore be no less than -9223372036854775807.
+ */
+ r = -(r & LARGEST_INT64);
+ }
+ sqlite3_result_int64(context, r);
+}
+
+/*
+** Implementation of randomblob(N). Return a random blob
+** that is N bytes long.
+*/
+static void randomBlob(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ sqlite3_int64 n;
+ unsigned char *p;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ n = sqlite3_value_int64(argv[0]);
+ if( n<1 ){
+ n = 1;
+ }
+ p = contextMalloc(context, n);
+ if( p ){
+ sqlite3_randomness(n, p);
+ sqlite3_result_blob(context, (char*)p, n, sqlite3_free);
+ }
+}
+
+/*
+** Implementation of the last_insert_rowid() SQL function. The return
+** value is the same as the sqlite3_last_insert_rowid() API function.
+*/
+static void last_insert_rowid(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ /* IMP: R-51513-12026 The last_insert_rowid() SQL function is a
+ ** wrapper around the sqlite3_last_insert_rowid() C/C++ interface
+ ** function. */
+ sqlite3_result_int64(context, sqlite3_last_insert_rowid(db));
+}
+
+/*
+** Implementation of the changes() SQL function.
+**
+** IMP: R-62073-11209 The changes() SQL function is a wrapper
+** around the sqlite3_changes() C/C++ function and hence follows the same
+** rules for counting changes.
+*/
+static void changes(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ sqlite3_result_int(context, sqlite3_changes(db));
+}
+
+/*
+** Implementation of the total_changes() SQL function. The return value is
+** the same as the sqlite3_total_changes() API function.
+*/
+static void total_changes(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ /* IMP: R-52756-41993 This function is a wrapper around the
+ ** sqlite3_total_changes() C/C++ interface. */
+ sqlite3_result_int(context, sqlite3_total_changes(db));
+}
+
+/*
+** A structure defining how to do GLOB-style comparisons.
+*/
+struct compareInfo {
+ u8 matchAll; /* "*" or "%" */
+ u8 matchOne; /* "?" or "_" */
+ u8 matchSet; /* "[" or 0 */
+ u8 noCase; /* true to ignore case differences */
+};
+
+/*
+** For LIKE and GLOB matching on EBCDIC machines, assume that every
+** character is exactly one byte in size. Also, provde the Utf8Read()
+** macro for fast reading of the next character in the common case where
+** the next character is ASCII.
+*/
+#if defined(SQLITE_EBCDIC)
+# define sqlite3Utf8Read(A) (*((*A)++))
+# define Utf8Read(A) (*(A++))
+#else
+# define Utf8Read(A) (A[0]<0x80?*(A++):sqlite3Utf8Read(&A))
+#endif
+
+static const struct compareInfo globInfo = { '*', '?', '[', 0 };
+/* The correct SQL-92 behavior is for the LIKE operator to ignore
+** case. Thus 'a' LIKE 'A' would be true. */
+static const struct compareInfo likeInfoNorm = { '%', '_', 0, 1 };
+/* If SQLITE_CASE_SENSITIVE_LIKE is defined, then the LIKE operator
+** is case sensitive causing 'a' LIKE 'A' to be false */
+static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 };
+
+/*
+** Possible error returns from patternMatch()
+*/
+#define SQLITE_MATCH 0
+#define SQLITE_NOMATCH 1
+#define SQLITE_NOWILDCARDMATCH 2
+
+/*
+** Compare two UTF-8 strings for equality where the first string is
+** a GLOB or LIKE expression. Return values:
+**
+** SQLITE_MATCH: Match
+** SQLITE_NOMATCH: No match
+** SQLITE_NOWILDCARDMATCH: No match in spite of having * or % wildcards.
+**
+** Globbing rules:
+**
+** '*' Matches any sequence of zero or more characters.
+**
+** '?' Matches exactly one character.
+**
+** [...] Matches one character from the enclosed list of
+** characters.
+**
+** [^...] Matches one character not in the enclosed list.
+**
+** With the [...] and [^...] matching, a ']' character can be included
+** in the list by making it the first character after '[' or '^'. A
+** range of characters can be specified using '-'. Example:
+** "[a-z]" matches any single lower-case letter. To match a '-', make
+** it the last character in the list.
+**
+** Like matching rules:
+**
+** '%' Matches any sequence of zero or more characters
+**
+*** '_' Matches any one character
+**
+** Ec Where E is the "esc" character and c is any other
+** character, including '%', '_', and esc, match exactly c.
+**
+** The comments within this routine usually assume glob matching.
+**
+** This routine is usually quick, but can be N**2 in the worst case.
+*/
+static int patternCompare(
+ const u8 *zPattern, /* The glob pattern */
+ const u8 *zString, /* The string to compare against the glob */
+ const struct compareInfo *pInfo, /* Information about how to do the compare */
+ u32 matchOther /* The escape char (LIKE) or '[' (GLOB) */
+){
+ u32 c, c2; /* Next pattern and input string chars */
+ u32 matchOne = pInfo->matchOne; /* "?" or "_" */
+ u32 matchAll = pInfo->matchAll; /* "*" or "%" */
+ u8 noCase = pInfo->noCase; /* True if uppercase==lowercase */
+ const u8 *zEscaped = 0; /* One past the last escaped input char */
+
+ while( (c = Utf8Read(zPattern))!=0 ){
+ if( c==matchAll ){ /* Match "*" */
+ /* Skip over multiple "*" characters in the pattern. If there
+ ** are also "?" characters, skip those as well, but consume a
+ ** single character of the input string for each "?" skipped */
+ while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){
+ if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){
+ return SQLITE_NOWILDCARDMATCH;
+ }
+ }
+ if( c==0 ){
+ return SQLITE_MATCH; /* "*" at the end of the pattern matches */
+ }else if( c==matchOther ){
+ if( pInfo->matchSet==0 ){
+ c = sqlite3Utf8Read(&zPattern);
+ if( c==0 ) return SQLITE_NOWILDCARDMATCH;
+ }else{
+ /* "[...]" immediately follows the "*". We have to do a slow
+ ** recursive search in this case, but it is an unusual case. */
+ assert( matchOther<0x80 ); /* '[' is a single-byte character */
+ while( *zString ){
+ int bMatch = patternCompare(&zPattern[-1],zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
+ SQLITE_SKIP_UTF8(zString);
+ }
+ return SQLITE_NOWILDCARDMATCH;
+ }
+ }
+
+ /* At this point variable c contains the first character of the
+ ** pattern string past the "*". Search in the input string for the
+ ** first matching character and recursively continue the match from
+ ** that point.
+ **
+ ** For a case-insensitive search, set variable cx to be the same as
+ ** c but in the other case and search the input string for either
+ ** c or cx.
+ */
+ if( c<=0x80 ){
+ char zStop[3];
+ int bMatch;
+ if( noCase ){
+ zStop[0] = sqlite3Toupper(c);
+ zStop[1] = sqlite3Tolower(c);
+ zStop[2] = 0;
+ }else{
+ zStop[0] = c;
+ zStop[1] = 0;
+ }
+ while(1){
+ zString += strcspn((const char*)zString, zStop);
+ if( zString[0]==0 ) break;
+ zString++;
+ bMatch = patternCompare(zPattern,zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
+ }
+ }else{
+ int bMatch;
+ while( (c2 = Utf8Read(zString))!=0 ){
+ if( c2!=c ) continue;
+ bMatch = patternCompare(zPattern,zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
+ }
+ }
+ return SQLITE_NOWILDCARDMATCH;
+ }
+ if( c==matchOther ){
+ if( pInfo->matchSet==0 ){
+ c = sqlite3Utf8Read(&zPattern);
+ if( c==0 ) return SQLITE_NOMATCH;
+ zEscaped = zPattern;
+ }else{
+ u32 prior_c = 0;
+ int seen = 0;
+ int invert = 0;
+ c = sqlite3Utf8Read(&zString);
+ if( c==0 ) return SQLITE_NOMATCH;
+ c2 = sqlite3Utf8Read(&zPattern);
+ if( c2=='^' ){
+ invert = 1;
+ c2 = sqlite3Utf8Read(&zPattern);
+ }
+ if( c2==']' ){
+ if( c==']' ) seen = 1;
+ c2 = sqlite3Utf8Read(&zPattern);
+ }
+ while( c2 && c2!=']' ){
+ if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){
+ c2 = sqlite3Utf8Read(&zPattern);
+ if( c>=prior_c && c<=c2 ) seen = 1;
+ prior_c = 0;
+ }else{
+ if( c==c2 ){
+ seen = 1;
+ }
+ prior_c = c2;
+ }
+ c2 = sqlite3Utf8Read(&zPattern);
+ }
+ if( c2==0 || (seen ^ invert)==0 ){
+ return SQLITE_NOMATCH;
+ }
+ continue;
+ }
+ }
+ c2 = Utf8Read(zString);
+ if( c==c2 ) continue;
+ if( noCase && sqlite3Tolower(c)==sqlite3Tolower(c2) && c<0x80 && c2<0x80 ){
+ continue;
+ }
+ if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue;
+ return SQLITE_NOMATCH;
+ }
+ return *zString==0 ? SQLITE_MATCH : SQLITE_NOMATCH;
+}
+
+/*
+** The sqlite3_strglob() interface. Return 0 on a match (like strcmp()) and
+** non-zero if there is no match.
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
+ return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[');
+}
+
+/*
+** The sqlite3_strlike() interface. Return 0 on a match and non-zero for
+** a miss - like strcmp().
+*/
+SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){
+ return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc);
+}
+
+/*
+** Count the number of times that the LIKE operator (or GLOB which is
+** just a variation of LIKE) gets called. This is used for testing
+** only.
+*/
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_like_count = 0;
+#endif
+
+
+/*
+** Implementation of the like() SQL function. This function implements
+** the build-in LIKE operator. The first argument to the function is the
+** pattern and the second argument is the string. So, the SQL statements:
+**
+** A LIKE B
+**
+** is implemented as like(B,A).
+**
+** This same function (with a different compareInfo structure) computes
+** the GLOB operator.
+*/
+static void likeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *zA, *zB;
+ u32 escape;
+ int nPat;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ struct compareInfo *pInfo = sqlite3_user_data(context);
+ struct compareInfo backupInfo;
+
+#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ if( sqlite3_value_type(argv[0])==SQLITE_BLOB
+ || sqlite3_value_type(argv[1])==SQLITE_BLOB
+ ){
+#ifdef SQLITE_TEST
+ sqlite3_like_count++;
+#endif
+ sqlite3_result_int(context, 0);
+ return;
+ }
+#endif
+
+ /* Limit the length of the LIKE or GLOB pattern to avoid problems
+ ** of deep recursion and N*N behavior in patternCompare().
+ */
+ nPat = sqlite3_value_bytes(argv[0]);
+ testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] );
+ testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]+1 );
+ if( nPat > db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ){
+ sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1);
+ return;
+ }
+ if( argc==3 ){
+ /* The escape character string must consist of a single UTF-8 character.
+ ** Otherwise, return an error.
+ */
+ const unsigned char *zEsc = sqlite3_value_text(argv[2]);
+ if( zEsc==0 ) return;
+ if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){
+ sqlite3_result_error(context,
+ "ESCAPE expression must be a single character", -1);
+ return;
+ }
+ escape = sqlite3Utf8Read(&zEsc);
+ if( escape==pInfo->matchAll || escape==pInfo->matchOne ){
+ memcpy(&backupInfo, pInfo, sizeof(backupInfo));
+ pInfo = &backupInfo;
+ if( escape==pInfo->matchAll ) pInfo->matchAll = 0;
+ if( escape==pInfo->matchOne ) pInfo->matchOne = 0;
+ }
+ }else{
+ escape = pInfo->matchSet;
+ }
+ zB = sqlite3_value_text(argv[0]);
+ zA = sqlite3_value_text(argv[1]);
+ if( zA && zB ){
+#ifdef SQLITE_TEST
+ sqlite3_like_count++;
+#endif
+ sqlite3_result_int(context,
+ patternCompare(zB, zA, pInfo, escape)==SQLITE_MATCH);
+ }
+}
+
+/*
+** Implementation of the NULLIF(x,y) function. The result is the first
+** argument if the arguments are different. The result is NULL if the
+** arguments are equal to each other.
+*/
+static void nullifFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ CollSeq *pColl = sqlite3GetFuncCollSeq(context);
+ UNUSED_PARAMETER(NotUsed);
+ if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){
+ sqlite3_result_value(context, argv[0]);
+ }
+}
+
+/*
+** Implementation of the sqlite_version() function. The result is the version
+** of the SQLite library that is running.
+*/
+static void versionFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ /* IMP: R-48699-48617 This function is an SQL wrapper around the
+ ** sqlite3_libversion() C-interface. */
+ sqlite3_result_text(context, sqlite3_libversion(), -1, SQLITE_STATIC);
+}
+
+/*
+** Implementation of the sqlite_source_id() function. The result is a string
+** that identifies the particular version of the source code used to build
+** SQLite.
+*/
+static void sourceidFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **NotUsed2
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ /* IMP: R-24470-31136 This function is an SQL wrapper around the
+ ** sqlite3_sourceid() C interface. */
+ sqlite3_result_text(context, sqlite3_sourceid(), -1, SQLITE_STATIC);
+}
+
+/*
+** Implementation of the sqlite_log() function. This is a wrapper around
+** sqlite3_log(). The return value is NULL. The function exists purely for
+** its side-effects.
+*/
+static void errlogFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(context);
+ sqlite3_log(sqlite3_value_int(argv[0]), "%s", sqlite3_value_text(argv[1]));
+}
+
+/*
+** Implementation of the sqlite_compileoption_used() function.
+** The result is an integer that identifies if the compiler option
+** was used to build SQLite.
+*/
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+static void compileoptionusedFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const char *zOptName;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ /* IMP: R-39564-36305 The sqlite_compileoption_used() SQL
+ ** function is a wrapper around the sqlite3_compileoption_used() C/C++
+ ** function.
+ */
+ if( (zOptName = (const char*)sqlite3_value_text(argv[0]))!=0 ){
+ sqlite3_result_int(context, sqlite3_compileoption_used(zOptName));
+ }
+}
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+
+/*
+** Implementation of the sqlite_compileoption_get() function.
+** The result is a string that identifies the compiler options
+** used to build SQLite.
+*/
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+static void compileoptiongetFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int n;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ /* IMP: R-04922-24076 The sqlite_compileoption_get() SQL function
+ ** is a wrapper around the sqlite3_compileoption_get() C/C++ function.
+ */
+ n = sqlite3_value_int(argv[0]);
+ sqlite3_result_text(context, sqlite3_compileoption_get(n), -1, SQLITE_STATIC);
+}
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+
+/* Array for converting from half-bytes (nybbles) into ASCII hex
+** digits. */
+static const char hexdigits[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
+};
+
+/*
+** Implementation of the QUOTE() function. This function takes a single
+** argument. If the argument is numeric, the return value is the same as
+** the argument. If the argument is NULL, the return value is the string
+** "NULL". Otherwise, the argument is enclosed in single quotes with
+** single-quote escapes.
+*/
+static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_FLOAT: {
+ double r1, r2;
+ char zBuf[50];
+ r1 = sqlite3_value_double(argv[0]);
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.15g", r1);
+ sqlite3AtoF(zBuf, &r2, 20, SQLITE_UTF8);
+ if( r1!=r2 ){
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.20e", r1);
+ }
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ break;
+ }
+ case SQLITE_INTEGER: {
+ sqlite3_result_value(context, argv[0]);
+ break;
+ }
+ case SQLITE_BLOB: {
+ char *zText = 0;
+ char const *zBlob = sqlite3_value_blob(argv[0]);
+ int nBlob = sqlite3_value_bytes(argv[0]);
+ assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */
+ zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4);
+ if( zText ){
+ int i;
+ for(i=0; i<nBlob; i++){
+ zText[(i*2)+2] = hexdigits[(zBlob[i]>>4)&0x0F];
+ zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F];
+ }
+ zText[(nBlob*2)+2] = '\'';
+ zText[(nBlob*2)+3] = '\0';
+ zText[0] = 'X';
+ zText[1] = '\'';
+ sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT);
+ sqlite3_free(zText);
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ int i,j;
+ u64 n;
+ const unsigned char *zArg = sqlite3_value_text(argv[0]);
+ char *z;
+
+ if( zArg==0 ) return;
+ for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; }
+ z = contextMalloc(context, ((i64)i)+((i64)n)+3);
+ if( z ){
+ z[0] = '\'';
+ for(i=0, j=1; zArg[i]; i++){
+ z[j++] = zArg[i];
+ if( zArg[i]=='\'' ){
+ z[j++] = '\'';
+ }
+ }
+ z[j++] = '\'';
+ z[j] = 0;
+ sqlite3_result_text(context, z, j, sqlite3_free);
+ }
+ break;
+ }
+ default: {
+ assert( sqlite3_value_type(argv[0])==SQLITE_NULL );
+ sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC);
+ break;
+ }
+ }
+}
+
+/*
+** The unicode() function. Return the integer unicode code-point value
+** for the first character of the input string.
+*/
+static void unicodeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *z = sqlite3_value_text(argv[0]);
+ (void)argc;
+ if( z && z[0] ) sqlite3_result_int(context, sqlite3Utf8Read(&z));
+}
+
+/*
+** The char() function takes zero or more arguments, each of which is
+** an integer. It constructs a string where each character of the string
+** is the unicode character for the corresponding integer argument.
+*/
+static void charFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ unsigned char *z, *zOut;
+ int i;
+ zOut = z = sqlite3_malloc64( argc*4+1 );
+ if( z==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ for(i=0; i<argc; i++){
+ sqlite3_int64 x;
+ unsigned c;
+ x = sqlite3_value_int64(argv[i]);
+ if( x<0 || x>0x10ffff ) x = 0xfffd;
+ c = (unsigned)(x & 0x1fffff);
+ if( c<0x00080 ){
+ *zOut++ = (u8)(c&0xFF);
+ }else if( c<0x00800 ){
+ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F);
+ *zOut++ = 0x80 + (u8)(c & 0x3F);
+ }else if( c<0x10000 ){
+ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F);
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F);
+ *zOut++ = 0x80 + (u8)(c & 0x3F);
+ }else{
+ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07);
+ *zOut++ = 0x80 + (u8)((c>>12) & 0x3F);
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F);
+ *zOut++ = 0x80 + (u8)(c & 0x3F);
+ } \
+ }
+ sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8);
+}
+
+/*
+** The hex() function. Interpret the argument as a blob. Return
+** a hexadecimal rendering as text.
+*/
+static void hexFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int i, n;
+ const unsigned char *pBlob;
+ char *zHex, *z;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ pBlob = sqlite3_value_blob(argv[0]);
+ n = sqlite3_value_bytes(argv[0]);
+ assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */
+ z = zHex = contextMalloc(context, ((i64)n)*2 + 1);
+ if( zHex ){
+ for(i=0; i<n; i++, pBlob++){
+ unsigned char c = *pBlob;
+ *(z++) = hexdigits[(c>>4)&0xf];
+ *(z++) = hexdigits[c&0xf];
+ }
+ *z = 0;
+ sqlite3_result_text(context, zHex, n*2, sqlite3_free);
+ }
+}
+
+/*
+** The zeroblob(N) function returns a zero-filled blob of size N bytes.
+*/
+static void zeroblobFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ i64 n;
+ int rc;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ n = sqlite3_value_int64(argv[0]);
+ if( n<0 ) n = 0;
+ rc = sqlite3_result_zeroblob64(context, n); /* IMP: R-00293-64994 */
+ if( rc ){
+ sqlite3_result_error_code(context, rc);
+ }
+}
+
+/*
+** The replace() function. Three arguments are all strings: call
+** them A, B, and C. The result is also a string which is derived
+** from A by replacing every occurrence of B with C. The match
+** must be exact. Collating sequences are not used.
+*/
+static void replaceFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *zStr; /* The input string A */
+ const unsigned char *zPattern; /* The pattern string B */
+ const unsigned char *zRep; /* The replacement string C */
+ unsigned char *zOut; /* The output */
+ int nStr; /* Size of zStr */
+ int nPattern; /* Size of zPattern */
+ int nRep; /* Size of zRep */
+ i64 nOut; /* Maximum size of zOut */
+ int loopLimit; /* Last zStr[] that might match zPattern[] */
+ int i, j; /* Loop counters */
+ unsigned cntExpand; /* Number zOut expansions */
+ sqlite3 *db = sqlite3_context_db_handle(context);
+
+ assert( argc==3 );
+ UNUSED_PARAMETER(argc);
+ zStr = sqlite3_value_text(argv[0]);
+ if( zStr==0 ) return;
+ nStr = sqlite3_value_bytes(argv[0]);
+ assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */
+ zPattern = sqlite3_value_text(argv[1]);
+ if( zPattern==0 ){
+ assert( sqlite3_value_type(argv[1])==SQLITE_NULL
+ || sqlite3_context_db_handle(context)->mallocFailed );
+ return;
+ }
+ if( zPattern[0]==0 ){
+ assert( sqlite3_value_type(argv[1])!=SQLITE_NULL );
+ sqlite3_result_value(context, argv[0]);
+ return;
+ }
+ nPattern = sqlite3_value_bytes(argv[1]);
+ assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */
+ zRep = sqlite3_value_text(argv[2]);
+ if( zRep==0 ) return;
+ nRep = sqlite3_value_bytes(argv[2]);
+ assert( zRep==sqlite3_value_text(argv[2]) );
+ nOut = nStr + 1;
+ assert( nOut<SQLITE_MAX_LENGTH );
+ zOut = contextMalloc(context, (i64)nOut);
+ if( zOut==0 ){
+ return;
+ }
+ loopLimit = nStr - nPattern;
+ cntExpand = 0;
+ for(i=j=0; i<=loopLimit; i++){
+ if( zStr[i]!=zPattern[0] || memcmp(&zStr[i], zPattern, nPattern) ){
+ zOut[j++] = zStr[i];
+ }else{
+ if( nRep>nPattern ){
+ nOut += nRep - nPattern;
+ testcase( nOut-1==db->aLimit[SQLITE_LIMIT_LENGTH] );
+ testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] );
+ if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ sqlite3_result_error_toobig(context);
+ sqlite3_free(zOut);
+ return;
+ }
+ cntExpand++;
+ if( (cntExpand&(cntExpand-1))==0 ){
+ /* Grow the size of the output buffer only on substitutions
+ ** whose index is a power of two: 1, 2, 4, 8, 16, 32, ... */
+ u8 *zOld;
+ zOld = zOut;
+ zOut = sqlite3Realloc(zOut, (int)nOut + (nOut - nStr - 1));
+ if( zOut==0 ){
+ sqlite3_result_error_nomem(context);
+ sqlite3_free(zOld);
+ return;
+ }
+ }
+ }
+ memcpy(&zOut[j], zRep, nRep);
+ j += nRep;
+ i += nPattern-1;
+ }
+ }
+ assert( j+nStr-i+1<=nOut );
+ memcpy(&zOut[j], &zStr[i], nStr-i);
+ j += nStr - i;
+ assert( j<=nOut );
+ zOut[j] = 0;
+ sqlite3_result_text(context, (char*)zOut, j, sqlite3_free);
+}
+
+/*
+** Implementation of the TRIM(), LTRIM(), and RTRIM() functions.
+** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both.
+*/
+static void trimFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *zIn; /* Input string */
+ const unsigned char *zCharSet; /* Set of characters to trim */
+ int nIn; /* Number of bytes in input */
+ int flags; /* 1: trimleft 2: trimright 3: trim */
+ int i; /* Loop counter */
+ unsigned char *aLen = 0; /* Length of each character in zCharSet */
+ unsigned char **azChar = 0; /* Individual characters in zCharSet */
+ int nChar; /* Number of characters in zCharSet */
+
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ){
+ return;
+ }
+ zIn = sqlite3_value_text(argv[0]);
+ if( zIn==0 ) return;
+ nIn = sqlite3_value_bytes(argv[0]);
+ assert( zIn==sqlite3_value_text(argv[0]) );
+ if( argc==1 ){
+ static const unsigned char lenOne[] = { 1 };
+ static unsigned char * const azOne[] = { (u8*)" " };
+ nChar = 1;
+ aLen = (u8*)lenOne;
+ azChar = (unsigned char **)azOne;
+ zCharSet = 0;
+ }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){
+ return;
+ }else{
+ const unsigned char *z;
+ for(z=zCharSet, nChar=0; *z; nChar++){
+ SQLITE_SKIP_UTF8(z);
+ }
+ if( nChar>0 ){
+ azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1));
+ if( azChar==0 ){
+ return;
+ }
+ aLen = (unsigned char*)&azChar[nChar];
+ for(z=zCharSet, nChar=0; *z; nChar++){
+ azChar[nChar] = (unsigned char *)z;
+ SQLITE_SKIP_UTF8(z);
+ aLen[nChar] = (u8)(z - azChar[nChar]);
+ }
+ }
+ }
+ if( nChar>0 ){
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context));
+ if( flags & 1 ){
+ while( nIn>0 ){
+ int len = 0;
+ for(i=0; i<nChar; i++){
+ len = aLen[i];
+ if( len<=nIn && memcmp(zIn, azChar[i], len)==0 ) break;
+ }
+ if( i>=nChar ) break;
+ zIn += len;
+ nIn -= len;
+ }
+ }
+ if( flags & 2 ){
+ while( nIn>0 ){
+ int len = 0;
+ for(i=0; i<nChar; i++){
+ len = aLen[i];
+ if( len<=nIn && memcmp(&zIn[nIn-len],azChar[i],len)==0 ) break;
+ }
+ if( i>=nChar ) break;
+ nIn -= len;
+ }
+ }
+ if( zCharSet ){
+ sqlite3_free(azChar);
+ }
+ }
+ sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT);
+}
+
+
+#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+/*
+** The "unknown" function is automatically substituted in place of
+** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN
+** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used.
+** When the "sqlite3" command-line shell is built using this functionality,
+** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries
+** involving application-defined functions to be examined in a generic
+** sqlite3 shell.
+*/
+static void unknownFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ /* no-op */
+}
+#endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/
+
+
+/* IMP: R-25361-16150 This function is omitted from SQLite by default. It
+** is only available if the SQLITE_SOUNDEX compile-time option is used
+** when SQLite is built.
+*/
+#ifdef SQLITE_SOUNDEX
+/*
+** Compute the soundex encoding of a word.
+**
+** IMP: R-59782-00072 The soundex(X) function returns a string that is the
+** soundex encoding of the string X.
+*/
+static void soundexFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ char zResult[8];
+ const u8 *zIn;
+ int i, j;
+ static const unsigned char iCode[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0,
+ 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0,
+ 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0,
+ 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0,
+ };
+ assert( argc==1 );
+ zIn = (u8*)sqlite3_value_text(argv[0]);
+ if( zIn==0 ) zIn = (u8*)"";
+ for(i=0; zIn[i] && !sqlite3Isalpha(zIn[i]); i++){}
+ if( zIn[i] ){
+ u8 prevcode = iCode[zIn[i]&0x7f];
+ zResult[0] = sqlite3Toupper(zIn[i]);
+ for(j=1; j<4 && zIn[i]; i++){
+ int code = iCode[zIn[i]&0x7f];
+ if( code>0 ){
+ if( code!=prevcode ){
+ prevcode = code;
+ zResult[j++] = code + '0';
+ }
+ }else{
+ prevcode = 0;
+ }
+ }
+ while( j<4 ){
+ zResult[j++] = '0';
+ }
+ zResult[j] = 0;
+ sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT);
+ }else{
+ /* IMP: R-64894-50321 The string "?000" is returned if the argument
+ ** is NULL or contains no ASCII alphabetic characters. */
+ sqlite3_result_text(context, "?000", 4, SQLITE_STATIC);
+ }
+}
+#endif /* SQLITE_SOUNDEX */
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+/*
+** A function that loads a shared-library extension then returns NULL.
+*/
+static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){
+ const char *zFile = (const char *)sqlite3_value_text(argv[0]);
+ const char *zProc;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ char *zErrMsg = 0;
+
+ /* Disallow the load_extension() SQL function unless the SQLITE_LoadExtFunc
+ ** flag is set. See the sqlite3_enable_load_extension() API.
+ */
+ if( (db->flags & SQLITE_LoadExtFunc)==0 ){
+ sqlite3_result_error(context, "not authorized", -1);
+ return;
+ }
+
+ if( argc==2 ){
+ zProc = (const char *)sqlite3_value_text(argv[1]);
+ }else{
+ zProc = 0;
+ }
+ if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){
+ sqlite3_result_error(context, zErrMsg, -1);
+ sqlite3_free(zErrMsg);
+ }
+}
+#endif
+
+
+/*
+** An instance of the following structure holds the context of a
+** sum() or avg() aggregate computation.
+*/
+typedef struct SumCtx SumCtx;
+struct SumCtx {
+ double rSum; /* Floating point sum */
+ i64 iSum; /* Integer sum */
+ i64 cnt; /* Number of elements summed */
+ u8 overflow; /* True if integer overflow seen */
+ u8 approx; /* True if non-integer value was input to the sum */
+};
+
+/*
+** Routines used to compute the sum, average, and total.
+**
+** The SUM() function follows the (broken) SQL standard which means
+** that it returns NULL if it sums over no inputs. TOTAL returns
+** 0.0 in that case. In addition, TOTAL always returns a float where
+** SUM might return an integer if it never encounters a floating point
+** value. TOTAL never fails, but SUM might through an exception if
+** it overflows an integer.
+*/
+static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){
+ SumCtx *p;
+ int type;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ p = sqlite3_aggregate_context(context, sizeof(*p));
+ type = sqlite3_value_numeric_type(argv[0]);
+ if( p && type!=SQLITE_NULL ){
+ p->cnt++;
+ if( type==SQLITE_INTEGER ){
+ i64 v = sqlite3_value_int64(argv[0]);
+ p->rSum += v;
+ if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){
+ p->approx = p->overflow = 1;
+ }
+ }else{
+ p->rSum += sqlite3_value_double(argv[0]);
+ p->approx = 1;
+ }
+ }
+}
+#ifndef SQLITE_OMIT_WINDOWFUNC
+static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){
+ SumCtx *p;
+ int type;
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ p = sqlite3_aggregate_context(context, sizeof(*p));
+ type = sqlite3_value_numeric_type(argv[0]);
+ /* p is always non-NULL because sumStep() will have been called first
+ ** to initialize it */
+ if( ALWAYS(p) && type!=SQLITE_NULL ){
+ assert( p->cnt>0 );
+ p->cnt--;
+ assert( type==SQLITE_INTEGER || p->approx );
+ if( type==SQLITE_INTEGER && p->approx==0 ){
+ i64 v = sqlite3_value_int64(argv[0]);
+ p->rSum -= v;
+ p->iSum -= v;
+ }else{
+ p->rSum -= sqlite3_value_double(argv[0]);
+ }
+ }
+}
+#else
+# define sumInverse 0
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+static void sumFinalize(sqlite3_context *context){
+ SumCtx *p;
+ p = sqlite3_aggregate_context(context, 0);
+ if( p && p->cnt>0 ){
+ if( p->overflow ){
+ sqlite3_result_error(context,"integer overflow",-1);
+ }else if( p->approx ){
+ sqlite3_result_double(context, p->rSum);
+ }else{
+ sqlite3_result_int64(context, p->iSum);
+ }
+ }
+}
+static void avgFinalize(sqlite3_context *context){
+ SumCtx *p;
+ p = sqlite3_aggregate_context(context, 0);
+ if( p && p->cnt>0 ){
+ sqlite3_result_double(context, p->rSum/(double)p->cnt);
+ }
+}
+static void totalFinalize(sqlite3_context *context){
+ SumCtx *p;
+ p = sqlite3_aggregate_context(context, 0);
+ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
+ sqlite3_result_double(context, p ? p->rSum : (double)0);
+}
+
+/*
+** The following structure keeps track of state information for the
+** count() aggregate function.
+*/
+typedef struct CountCtx CountCtx;
+struct CountCtx {
+ i64 n;
+#ifdef SQLITE_DEBUG
+ int bInverse; /* True if xInverse() ever called */
+#endif
+};
+
+/*
+** Routines to implement the count() aggregate function.
+*/
+static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){
+ CountCtx *p;
+ p = sqlite3_aggregate_context(context, sizeof(*p));
+ if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){
+ p->n++;
+ }
+
+#ifndef SQLITE_OMIT_DEPRECATED
+ /* The sqlite3_aggregate_count() function is deprecated. But just to make
+ ** sure it still operates correctly, verify that its count agrees with our
+ ** internal count when using count(*) and when the total count can be
+ ** expressed as a 32-bit integer. */
+ assert( argc==1 || p==0 || p->n>0x7fffffff || p->bInverse
+ || p->n==sqlite3_aggregate_count(context) );
+#endif
+}
+static void countFinalize(sqlite3_context *context){
+ CountCtx *p;
+ p = sqlite3_aggregate_context(context, 0);
+ sqlite3_result_int64(context, p ? p->n : 0);
+}
+#ifndef SQLITE_OMIT_WINDOWFUNC
+static void countInverse(sqlite3_context *ctx, int argc, sqlite3_value **argv){
+ CountCtx *p;
+ p = sqlite3_aggregate_context(ctx, sizeof(*p));
+ /* p is always non-NULL since countStep() will have been called first */
+ if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && ALWAYS(p) ){
+ p->n--;
+#ifdef SQLITE_DEBUG
+ p->bInverse = 1;
+#endif
+ }
+}
+#else
+# define countInverse 0
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** Routines to implement min() and max() aggregate functions.
+*/
+static void minmaxStep(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ Mem *pArg = (Mem *)argv[0];
+ Mem *pBest;
+ UNUSED_PARAMETER(NotUsed);
+
+ pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest));
+ if( !pBest ) return;
+
+ if( sqlite3_value_type(pArg)==SQLITE_NULL ){
+ if( pBest->flags ) sqlite3SkipAccumulatorLoad(context);
+ }else if( pBest->flags ){
+ int max;
+ int cmp;
+ CollSeq *pColl = sqlite3GetFuncCollSeq(context);
+ /* This step function is used for both the min() and max() aggregates,
+ ** the only difference between the two being that the sense of the
+ ** comparison is inverted. For the max() aggregate, the
+ ** sqlite3_user_data() function returns (void *)-1. For min() it
+ ** returns (void *)db, where db is the sqlite3* database pointer.
+ ** Therefore the next statement sets variable 'max' to 1 for the max()
+ ** aggregate, or 0 for min().
+ */
+ max = sqlite3_user_data(context)!=0;
+ cmp = sqlite3MemCompare(pBest, pArg, pColl);
+ if( (max && cmp<0) || (!max && cmp>0) ){
+ sqlite3VdbeMemCopy(pBest, pArg);
+ }else{
+ sqlite3SkipAccumulatorLoad(context);
+ }
+ }else{
+ pBest->db = sqlite3_context_db_handle(context);
+ sqlite3VdbeMemCopy(pBest, pArg);
+ }
+}
+static void minMaxValueFinalize(sqlite3_context *context, int bValue){
+ sqlite3_value *pRes;
+ pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0);
+ if( pRes ){
+ if( pRes->flags ){
+ sqlite3_result_value(context, pRes);
+ }
+ if( bValue==0 ) sqlite3VdbeMemRelease(pRes);
+ }
+}
+#ifndef SQLITE_OMIT_WINDOWFUNC
+static void minMaxValue(sqlite3_context *context){
+ minMaxValueFinalize(context, 1);
+}
+#else
+# define minMaxValue 0
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+static void minMaxFinalize(sqlite3_context *context){
+ minMaxValueFinalize(context, 0);
+}
+
+/*
+** group_concat(EXPR, ?SEPARATOR?)
+*/
+static void groupConcatStep(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const char *zVal;
+ StrAccum *pAccum;
+ const char *zSep;
+ int nVal, nSep;
+ assert( argc==1 || argc==2 );
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
+ pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum));
+
+ if( pAccum ){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ int firstTerm = pAccum->mxAlloc==0;
+ pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH];
+ if( !firstTerm ){
+ if( argc==2 ){
+ zSep = (char*)sqlite3_value_text(argv[1]);
+ nSep = sqlite3_value_bytes(argv[1]);
+ }else{
+ zSep = ",";
+ nSep = 1;
+ }
+ if( zSep ) sqlite3_str_append(pAccum, zSep, nSep);
+ }
+ zVal = (char*)sqlite3_value_text(argv[0]);
+ nVal = sqlite3_value_bytes(argv[0]);
+ if( zVal ) sqlite3_str_append(pAccum, zVal, nVal);
+ }
+}
+#ifndef SQLITE_OMIT_WINDOWFUNC
+static void groupConcatInverse(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int n;
+ StrAccum *pAccum;
+ assert( argc==1 || argc==2 );
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
+ pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum));
+ /* pAccum is always non-NULL since groupConcatStep() will have always
+ ** run frist to initialize it */
+ if( ALWAYS(pAccum) ){
+ n = sqlite3_value_bytes(argv[0]);
+ if( argc==2 ){
+ n += sqlite3_value_bytes(argv[1]);
+ }else{
+ n++;
+ }
+ if( n>=(int)pAccum->nChar ){
+ pAccum->nChar = 0;
+ }else{
+ pAccum->nChar -= n;
+ memmove(pAccum->zText, &pAccum->zText[n], pAccum->nChar);
+ }
+ if( pAccum->nChar==0 ) pAccum->mxAlloc = 0;
+ }
+}
+#else
+# define groupConcatInverse 0
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+static void groupConcatFinalize(sqlite3_context *context){
+ StrAccum *pAccum;
+ pAccum = sqlite3_aggregate_context(context, 0);
+ if( pAccum ){
+ if( pAccum->accError==SQLITE_TOOBIG ){
+ sqlite3_result_error_toobig(context);
+ }else if( pAccum->accError==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1,
+ sqlite3_free);
+ }
+ }
+}
+#ifndef SQLITE_OMIT_WINDOWFUNC
+static void groupConcatValue(sqlite3_context *context){
+ sqlite3_str *pAccum;
+ pAccum = (sqlite3_str*)sqlite3_aggregate_context(context, 0);
+ if( pAccum ){
+ if( pAccum->accError==SQLITE_TOOBIG ){
+ sqlite3_result_error_toobig(context);
+ }else if( pAccum->accError==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ const char *zText = sqlite3_str_value(pAccum);
+ sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT);
+ }
+ }
+}
+#else
+# define groupConcatValue 0
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** This routine does per-connection function registration. Most
+** of the built-in functions above are part of the global function set.
+** This routine only deals with those that are not global.
+*/
+SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3 *db){
+ int rc = sqlite3_overload_function(db, "MATCH", 2);
+ assert( rc==SQLITE_NOMEM || rc==SQLITE_OK );
+ if( rc==SQLITE_NOMEM ){
+ sqlite3OomFault(db);
+ }
+}
+
+/*
+** Re-register the built-in LIKE functions. The caseSensitive
+** parameter determines whether or not the LIKE operator is case
+** sensitive.
+*/
+SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){
+ struct compareInfo *pInfo;
+ int flags;
+ if( caseSensitive ){
+ pInfo = (struct compareInfo*)&likeInfoAlt;
+ flags = SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE;
+ }else{
+ pInfo = (struct compareInfo*)&likeInfoNorm;
+ flags = SQLITE_FUNC_LIKE;
+ }
+ sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
+ sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
+ sqlite3FindFunction(db, "like", 2, SQLITE_UTF8, 0)->funcFlags |= flags;
+ sqlite3FindFunction(db, "like", 3, SQLITE_UTF8, 0)->funcFlags |= flags;
+}
+
+/*
+** pExpr points to an expression which implements a function. If
+** it is appropriate to apply the LIKE optimization to that function
+** then set aWc[0] through aWc[2] to the wildcard characters and the
+** escape character and then return TRUE. If the function is not a
+** LIKE-style function then return FALSE.
+**
+** The expression "a LIKE b ESCAPE c" is only considered a valid LIKE
+** operator if c is a string literal that is exactly one byte in length.
+** That one byte is stored in aWc[3]. aWc[3] is set to zero if there is
+** no ESCAPE clause.
+**
+** *pIsNocase is set to true if uppercase and lowercase are equivalent for
+** the function (default for LIKE). If the function makes the distinction
+** between uppercase and lowercase (as does GLOB) then *pIsNocase is set to
+** false.
+*/
+SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){
+ FuncDef *pDef;
+ int nExpr;
+ if( pExpr->op!=TK_FUNCTION || !pExpr->x.pList ){
+ return 0;
+ }
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ nExpr = pExpr->x.pList->nExpr;
+ pDef = sqlite3FindFunction(db, pExpr->u.zToken, nExpr, SQLITE_UTF8, 0);
+#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+ if( pDef==0 ) return 0;
+#endif
+ if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_FUNC_LIKE)==0 ){
+ return 0;
+ }
+
+ /* The memcpy() statement assumes that the wildcard characters are
+ ** the first three statements in the compareInfo structure. The
+ ** asserts() that follow verify that assumption
+ */
+ memcpy(aWc, pDef->pUserData, 3);
+ assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll );
+ assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne );
+ assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet );
+
+ if( nExpr<3 ){
+ aWc[3] = 0;
+ }else{
+ Expr *pEscape = pExpr->x.pList->a[2].pExpr;
+ char *zEscape;
+ if( pEscape->op!=TK_STRING ) return 0;
+ zEscape = pEscape->u.zToken;
+ if( zEscape[0]==0 || zEscape[1]!=0 ) return 0;
+ if( zEscape[0]==aWc[0] ) return 0;
+ if( zEscape[0]==aWc[1] ) return 0;
+ aWc[3] = zEscape[0];
+ }
+
+ *pIsNocase = (pDef->funcFlags & SQLITE_FUNC_CASE)==0;
+ return 1;
+}
+
+/*
+** All of the FuncDef structures in the aBuiltinFunc[] array above
+** to the global function hash table. This occurs at start-time (as
+** a consequence of calling sqlite3_initialize()).
+**
+** After this routine runs
+*/
+SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
+ /*
+ ** The following array holds FuncDef structures for all of the functions
+ ** defined in this file.
+ **
+ ** The array cannot be constant since changes are made to the
+ ** FuncDef.pHash elements at start-time. The elements of this array
+ ** are read-only after initialization is complete.
+ **
+ ** For peak efficiency, put the most frequently used function last.
+ */
+ static FuncDef aBuiltinFunc[] = {
+/***** Functions only available with SQLITE_TESTCTRL_INTERNAL_FUNCTIONS *****/
+ TEST_FUNC(implies_nonnull_row, 2, INLINEFUNC_implies_nonnull_row, 0),
+ TEST_FUNC(expr_compare, 2, INLINEFUNC_expr_compare, 0),
+ TEST_FUNC(expr_implies_expr, 2, INLINEFUNC_expr_implies_expr, 0),
+#ifdef SQLITE_DEBUG
+ TEST_FUNC(affinity, 1, INLINEFUNC_affinity, 0),
+#endif
+/***** Regular functions *****/
+#ifdef SQLITE_SOUNDEX
+ FUNCTION(soundex, 1, 0, 0, soundexFunc ),
+#endif
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+ SFUNCTION(load_extension, 1, 0, 0, loadExt ),
+ SFUNCTION(load_extension, 2, 0, 0, loadExt ),
+#endif
+#if SQLITE_USER_AUTHENTICATION
+ FUNCTION(sqlite_crypt, 2, 0, 0, sqlite3CryptFunc ),
+#endif
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+ DFUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ),
+ DFUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ),
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+ INLINE_FUNC(unlikely, 1, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY),
+ INLINE_FUNC(likelihood, 2, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY),
+ INLINE_FUNC(likely, 1, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY),
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+ FUNCTION2(sqlite_offset, 1, 0, 0, noopFunc, SQLITE_FUNC_OFFSET|
+ SQLITE_FUNC_TYPEOF),
+#endif
+ FUNCTION(ltrim, 1, 1, 0, trimFunc ),
+ FUNCTION(ltrim, 2, 1, 0, trimFunc ),
+ FUNCTION(rtrim, 1, 2, 0, trimFunc ),
+ FUNCTION(rtrim, 2, 2, 0, trimFunc ),
+ FUNCTION(trim, 1, 3, 0, trimFunc ),
+ FUNCTION(trim, 2, 3, 0, trimFunc ),
+ FUNCTION(min, -1, 0, 1, minmaxFunc ),
+ FUNCTION(min, 0, 0, 1, 0 ),
+ WAGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize, minMaxValue, 0,
+ SQLITE_FUNC_MINMAX ),
+ FUNCTION(max, -1, 1, 1, minmaxFunc ),
+ FUNCTION(max, 0, 1, 1, 0 ),
+ WAGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize, minMaxValue, 0,
+ SQLITE_FUNC_MINMAX ),
+ FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF),
+ FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH),
+ FUNCTION(instr, 2, 0, 0, instrFunc ),
+ FUNCTION(printf, -1, 0, 0, printfFunc ),
+ FUNCTION(unicode, 1, 0, 0, unicodeFunc ),
+ FUNCTION(char, -1, 0, 0, charFunc ),
+ FUNCTION(abs, 1, 0, 0, absFunc ),
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ FUNCTION(round, 1, 0, 0, roundFunc ),
+ FUNCTION(round, 2, 0, 0, roundFunc ),
+#endif
+ FUNCTION(upper, 1, 0, 0, upperFunc ),
+ FUNCTION(lower, 1, 0, 0, lowerFunc ),
+ FUNCTION(hex, 1, 0, 0, hexFunc ),
+ INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ),
+ VFUNCTION(random, 0, 0, 0, randomFunc ),
+ VFUNCTION(randomblob, 1, 0, 0, randomBlob ),
+ FUNCTION(nullif, 2, 0, 1, nullifFunc ),
+ DFUNCTION(sqlite_version, 0, 0, 0, versionFunc ),
+ DFUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ),
+ FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ),
+ FUNCTION(quote, 1, 0, 0, quoteFunc ),
+ VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid),
+ VFUNCTION(changes, 0, 0, 0, changes ),
+ VFUNCTION(total_changes, 0, 0, 0, total_changes ),
+ FUNCTION(replace, 3, 0, 0, replaceFunc ),
+ FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ),
+ FUNCTION(substr, 2, 0, 0, substrFunc ),
+ FUNCTION(substr, 3, 0, 0, substrFunc ),
+ WAGGREGATE(sum, 1,0,0, sumStep, sumFinalize, sumFinalize, sumInverse, 0),
+ WAGGREGATE(total, 1,0,0, sumStep,totalFinalize,totalFinalize,sumInverse, 0),
+ WAGGREGATE(avg, 1,0,0, sumStep, avgFinalize, avgFinalize, sumInverse, 0),
+ WAGGREGATE(count, 0,0,0, countStep,
+ countFinalize, countFinalize, countInverse, SQLITE_FUNC_COUNT ),
+ WAGGREGATE(count, 1,0,0, countStep,
+ countFinalize, countFinalize, countInverse, 0 ),
+ WAGGREGATE(group_concat, 1, 0, 0, groupConcatStep,
+ groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
+ WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep,
+ groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
+
+ LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
+#ifdef SQLITE_CASE_SENSITIVE_LIKE
+ LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
+ LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
+#else
+ LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE),
+ LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE),
+#endif
+#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
+ FUNCTION(unknown, -1, 0, 0, unknownFunc ),
+#endif
+ FUNCTION(coalesce, 1, 0, 0, 0 ),
+ FUNCTION(coalesce, 0, 0, 0, 0 ),
+ INLINE_FUNC(coalesce, -1, INLINEFUNC_coalesce, 0 ),
+ INLINE_FUNC(iif, 3, INLINEFUNC_iif, 0 ),
+ };
+#ifndef SQLITE_OMIT_ALTERTABLE
+ sqlite3AlterFunctions();
+#endif
+ sqlite3WindowFunctions();
+ sqlite3RegisterDateTimeFunctions();
+ sqlite3InsertBuiltinFuncs(aBuiltinFunc, ArraySize(aBuiltinFunc));
+
+#if 0 /* Enable to print out how the built-in functions are hashed */
+ {
+ int i;
+ FuncDef *p;
+ for(i=0; i<SQLITE_FUNC_HASH_SZ; i++){
+ printf("FUNC-HASH %02d:", i);
+ for(p=sqlite3BuiltinFunctions.a[i]; p; p=p->u.pHash){
+ int n = sqlite3Strlen30(p->zName);
+ int h = p->zName[0] + n;
+ printf(" %s(%d)", p->zName, h);
+ }
+ printf("\n");
+ }
+ }
+#endif
+}
+
+/************** End of func.c ************************************************/
+/************** Begin file fkey.c ********************************************/
+/*
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used by the compiler to add foreign key
+** support to compiled SQL statements.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+#ifndef SQLITE_OMIT_TRIGGER
+
+/*
+** Deferred and Immediate FKs
+** --------------------------
+**
+** Foreign keys in SQLite come in two flavours: deferred and immediate.
+** If an immediate foreign key constraint is violated,
+** SQLITE_CONSTRAINT_FOREIGNKEY is returned and the current
+** statement transaction rolled back. If a
+** deferred foreign key constraint is violated, no action is taken
+** immediately. However if the application attempts to commit the
+** transaction before fixing the constraint violation, the attempt fails.
+**
+** Deferred constraints are implemented using a simple counter associated
+** with the database handle. The counter is set to zero each time a
+** database transaction is opened. Each time a statement is executed
+** that causes a foreign key violation, the counter is incremented. Each
+** time a statement is executed that removes an existing violation from
+** the database, the counter is decremented. When the transaction is
+** committed, the commit fails if the current value of the counter is
+** greater than zero. This scheme has two big drawbacks:
+**
+** * When a commit fails due to a deferred foreign key constraint,
+** there is no way to tell which foreign constraint is not satisfied,
+** or which row it is not satisfied for.
+**
+** * If the database contains foreign key violations when the
+** transaction is opened, this may cause the mechanism to malfunction.
+**
+** Despite these problems, this approach is adopted as it seems simpler
+** than the alternatives.
+**
+** INSERT operations:
+**
+** I.1) For each FK for which the table is the child table, search
+** the parent table for a match. If none is found increment the
+** constraint counter.
+**
+** I.2) For each FK for which the table is the parent table,
+** search the child table for rows that correspond to the new
+** row in the parent table. Decrement the counter for each row
+** found (as the constraint is now satisfied).
+**
+** DELETE operations:
+**
+** D.1) For each FK for which the table is the child table,
+** search the parent table for a row that corresponds to the
+** deleted row in the child table. If such a row is not found,
+** decrement the counter.
+**
+** D.2) For each FK for which the table is the parent table, search
+** the child table for rows that correspond to the deleted row
+** in the parent table. For each found increment the counter.
+**
+** UPDATE operations:
+**
+** An UPDATE command requires that all 4 steps above are taken, but only
+** for FK constraints for which the affected columns are actually
+** modified (values must be compared at runtime).
+**
+** Note that I.1 and D.1 are very similar operations, as are I.2 and D.2.
+** This simplifies the implementation a bit.
+**
+** For the purposes of immediate FK constraints, the OR REPLACE conflict
+** resolution is considered to delete rows before the new row is inserted.
+** If a delete caused by OR REPLACE violates an FK constraint, an exception
+** is thrown, even if the FK constraint would be satisfied after the new
+** row is inserted.
+**
+** Immediate constraints are usually handled similarly. The only difference
+** is that the counter used is stored as part of each individual statement
+** object (struct Vdbe). If, after the statement has run, its immediate
+** constraint counter is greater than zero,
+** it returns SQLITE_CONSTRAINT_FOREIGNKEY
+** and the statement transaction is rolled back. An exception is an INSERT
+** statement that inserts a single row only (no triggers). In this case,
+** instead of using a counter, an exception is thrown immediately if the
+** INSERT violates a foreign key constraint. This is necessary as such
+** an INSERT does not open a statement transaction.
+**
+** TODO: How should dropping a table be handled? How should renaming a
+** table be handled?
+**
+**
+** Query API Notes
+** ---------------
+**
+** Before coding an UPDATE or DELETE row operation, the code-generator
+** for those two operations needs to know whether or not the operation
+** requires any FK processing and, if so, which columns of the original
+** row are required by the FK processing VDBE code (i.e. if FKs were
+** implemented using triggers, which of the old.* columns would be
+** accessed). No information is required by the code-generator before
+** coding an INSERT operation. The functions used by the UPDATE/DELETE
+** generation code to query for this information are:
+**
+** sqlite3FkRequired() - Test to see if FK processing is required.
+** sqlite3FkOldmask() - Query for the set of required old.* columns.
+**
+**
+** Externally accessible module functions
+** --------------------------------------
+**
+** sqlite3FkCheck() - Check for foreign key violations.
+** sqlite3FkActions() - Code triggers for ON UPDATE/ON DELETE actions.
+** sqlite3FkDelete() - Delete an FKey structure.
+*/
+
+/*
+** VDBE Calling Convention
+** -----------------------
+**
+** Example:
+**
+** For the following INSERT statement:
+**
+** CREATE TABLE t1(a, b INTEGER PRIMARY KEY, c);
+** INSERT INTO t1 VALUES(1, 2, 3.1);
+**
+** Register (x): 2 (type integer)
+** Register (x+1): 1 (type integer)
+** Register (x+2): NULL (type NULL)
+** Register (x+3): 3.1 (type real)
+*/
+
+/*
+** A foreign key constraint requires that the key columns in the parent
+** table are collectively subject to a UNIQUE or PRIMARY KEY constraint.
+** Given that pParent is the parent table for foreign key constraint pFKey,
+** search the schema for a unique index on the parent key columns.
+**
+** If successful, zero is returned. If the parent key is an INTEGER PRIMARY
+** KEY column, then output variable *ppIdx is set to NULL. Otherwise, *ppIdx
+** is set to point to the unique index.
+**
+** If the parent key consists of a single column (the foreign key constraint
+** is not a composite foreign key), output variable *paiCol is set to NULL.
+** Otherwise, it is set to point to an allocated array of size N, where
+** N is the number of columns in the parent key. The first element of the
+** array is the index of the child table column that is mapped by the FK
+** constraint to the parent table column stored in the left-most column
+** of index *ppIdx. The second element of the array is the index of the
+** child table column that corresponds to the second left-most column of
+** *ppIdx, and so on.
+**
+** If the required index cannot be found, either because:
+**
+** 1) The named parent key columns do not exist, or
+**
+** 2) The named parent key columns do exist, but are not subject to a
+** UNIQUE or PRIMARY KEY constraint, or
+**
+** 3) No parent key columns were provided explicitly as part of the
+** foreign key definition, and the parent table does not have a
+** PRIMARY KEY, or
+**
+** 4) No parent key columns were provided explicitly as part of the
+** foreign key definition, and the PRIMARY KEY of the parent table
+** consists of a different number of columns to the child key in
+** the child table.
+**
+** then non-zero is returned, and a "foreign key mismatch" error loaded
+** into pParse. If an OOM error occurs, non-zero is returned and the
+** pParse->db->mallocFailed flag is set.
+*/
+SQLITE_PRIVATE int sqlite3FkLocateIndex(
+ Parse *pParse, /* Parse context to store any error in */
+ Table *pParent, /* Parent table of FK constraint pFKey */
+ FKey *pFKey, /* Foreign key to find index for */
+ Index **ppIdx, /* OUT: Unique index on parent table */
+ int **paiCol /* OUT: Map of index columns in pFKey */
+){
+ Index *pIdx = 0; /* Value to return via *ppIdx */
+ int *aiCol = 0; /* Value to return via *paiCol */
+ int nCol = pFKey->nCol; /* Number of columns in parent key */
+ char *zKey = pFKey->aCol[0].zCol; /* Name of left-most parent key column */
+
+ /* The caller is responsible for zeroing output parameters. */
+ assert( ppIdx && *ppIdx==0 );
+ assert( !paiCol || *paiCol==0 );
+ assert( pParse );
+
+ /* If this is a non-composite (single column) foreign key, check if it
+ ** maps to the INTEGER PRIMARY KEY of table pParent. If so, leave *ppIdx
+ ** and *paiCol set to zero and return early.
+ **
+ ** Otherwise, for a composite foreign key (more than one column), allocate
+ ** space for the aiCol array (returned via output parameter *paiCol).
+ ** Non-composite foreign keys do not require the aiCol array.
+ */
+ if( nCol==1 ){
+ /* The FK maps to the IPK if any of the following are true:
+ **
+ ** 1) There is an INTEGER PRIMARY KEY column and the FK is implicitly
+ ** mapped to the primary key of table pParent, or
+ ** 2) The FK is explicitly mapped to a column declared as INTEGER
+ ** PRIMARY KEY.
+ */
+ if( pParent->iPKey>=0 ){
+ if( !zKey ) return 0;
+ if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zName, zKey) ) return 0;
+ }
+ }else if( paiCol ){
+ assert( nCol>1 );
+ aiCol = (int *)sqlite3DbMallocRawNN(pParse->db, nCol*sizeof(int));
+ if( !aiCol ) return 1;
+ *paiCol = aiCol;
+ }
+
+ for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) && pIdx->pPartIdxWhere==0 ){
+ /* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number
+ ** of columns. If each indexed column corresponds to a foreign key
+ ** column of pFKey, then this index is a winner. */
+
+ if( zKey==0 ){
+ /* If zKey is NULL, then this foreign key is implicitly mapped to
+ ** the PRIMARY KEY of table pParent. The PRIMARY KEY index may be
+ ** identified by the test. */
+ if( IsPrimaryKeyIndex(pIdx) ){
+ if( aiCol ){
+ int i;
+ for(i=0; i<nCol; i++) aiCol[i] = pFKey->aCol[i].iFrom;
+ }
+ break;
+ }
+ }else{
+ /* If zKey is non-NULL, then this foreign key was declared to
+ ** map to an explicit list of columns in table pParent. Check if this
+ ** index matches those columns. Also, check that the index uses
+ ** the default collation sequences for each column. */
+ int i, j;
+ for(i=0; i<nCol; i++){
+ i16 iCol = pIdx->aiColumn[i]; /* Index of column in parent tbl */
+ const char *zDfltColl; /* Def. collation for column */
+ char *zIdxCol; /* Name of indexed column */
+
+ if( iCol<0 ) break; /* No foreign keys against expression indexes */
+
+ /* If the index uses a collation sequence that is different from
+ ** the default collation sequence for the column, this index is
+ ** unusable. Bail out early in this case. */
+ zDfltColl = pParent->aCol[iCol].zColl;
+ if( !zDfltColl ) zDfltColl = sqlite3StrBINARY;
+ if( sqlite3StrICmp(pIdx->azColl[i], zDfltColl) ) break;
+
+ zIdxCol = pParent->aCol[iCol].zName;
+ for(j=0; j<nCol; j++){
+ if( sqlite3StrICmp(pFKey->aCol[j].zCol, zIdxCol)==0 ){
+ if( aiCol ) aiCol[i] = pFKey->aCol[j].iFrom;
+ break;
+ }
+ }
+ if( j==nCol ) break;
+ }
+ if( i==nCol ) break; /* pIdx is usable */
+ }
+ }
+ }
+
+ if( !pIdx ){
+ if( !pParse->disableTriggers ){
+ sqlite3ErrorMsg(pParse,
+ "foreign key mismatch - \"%w\" referencing \"%w\"",
+ pFKey->pFrom->zName, pFKey->zTo);
+ }
+ sqlite3DbFree(pParse->db, aiCol);
+ return 1;
+ }
+
+ *ppIdx = pIdx;
+ return 0;
+}
+
+/*
+** This function is called when a row is inserted into or deleted from the
+** child table of foreign key constraint pFKey. If an SQL UPDATE is executed
+** on the child table of pFKey, this function is invoked twice for each row
+** affected - once to "delete" the old row, and then again to "insert" the
+** new row.
+**
+** Each time it is called, this function generates VDBE code to locate the
+** row in the parent table that corresponds to the row being inserted into
+** or deleted from the child table. If the parent row can be found, no
+** special action is taken. Otherwise, if the parent row can *not* be
+** found in the parent table:
+**
+** Operation | FK type | Action taken
+** --------------------------------------------------------------------------
+** INSERT immediate Increment the "immediate constraint counter".
+**
+** DELETE immediate Decrement the "immediate constraint counter".
+**
+** INSERT deferred Increment the "deferred constraint counter".
+**
+** DELETE deferred Decrement the "deferred constraint counter".
+**
+** These operations are identified in the comment at the top of this file
+** (fkey.c) as "I.1" and "D.1".
+*/
+static void fkLookupParent(
+ Parse *pParse, /* Parse context */
+ int iDb, /* Index of database housing pTab */
+ Table *pTab, /* Parent table of FK pFKey */
+ Index *pIdx, /* Unique index on parent key columns in pTab */
+ FKey *pFKey, /* Foreign key constraint */
+ int *aiCol, /* Map from parent key columns to child table columns */
+ int regData, /* Address of array containing child table row */
+ int nIncr, /* Increment constraint counter by this */
+ int isIgnore /* If true, pretend pTab contains all NULL values */
+){
+ int i; /* Iterator variable */
+ Vdbe *v = sqlite3GetVdbe(pParse); /* Vdbe to add code to */
+ int iCur = pParse->nTab - 1; /* Cursor number to use */
+ int iOk = sqlite3VdbeMakeLabel(pParse); /* jump here if parent key found */
+
+ sqlite3VdbeVerifyAbortable(v,
+ (!pFKey->isDeferred
+ && !(pParse->db->flags & SQLITE_DeferFKs)
+ && !pParse->pToplevel
+ && !pParse->isMultiWrite) ? OE_Abort : OE_Ignore);
+
+ /* If nIncr is less than zero, then check at runtime if there are any
+ ** outstanding constraints to resolve. If there are not, there is no need
+ ** to check if deleting this row resolves any outstanding violations.
+ **
+ ** Check if any of the key columns in the child table row are NULL. If
+ ** any are, then the constraint is considered satisfied. No need to
+ ** search for a matching row in the parent table. */
+ if( nIncr<0 ){
+ sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, iOk);
+ VdbeCoverage(v);
+ }
+ for(i=0; i<pFKey->nCol; i++){
+ int iReg = sqlite3TableColumnToStorage(pFKey->pFrom,aiCol[i]) + regData + 1;
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk); VdbeCoverage(v);
+ }
+
+ if( isIgnore==0 ){
+ if( pIdx==0 ){
+ /* If pIdx is NULL, then the parent key is the INTEGER PRIMARY KEY
+ ** column of the parent table (table pTab). */
+ int iMustBeInt; /* Address of MustBeInt instruction */
+ int regTemp = sqlite3GetTempReg(pParse);
+
+ /* Invoke MustBeInt to coerce the child key value to an integer (i.e.
+ ** apply the affinity of the parent key). If this fails, then there
+ ** is no matching parent key. Before using MustBeInt, make a copy of
+ ** the value. Otherwise, the value inserted into the child key column
+ ** will have INTEGER affinity applied to it, which may not be correct. */
+ sqlite3VdbeAddOp2(v, OP_SCopy,
+ sqlite3TableColumnToStorage(pFKey->pFrom,aiCol[0])+1+regData, regTemp);
+ iMustBeInt = sqlite3VdbeAddOp2(v, OP_MustBeInt, regTemp, 0);
+ VdbeCoverage(v);
+
+ /* If the parent table is the same as the child table, and we are about
+ ** to increment the constraint-counter (i.e. this is an INSERT operation),
+ ** then check if the row being inserted matches itself. If so, do not
+ ** increment the constraint-counter. */
+ if( pTab==pFKey->pFrom && nIncr==1 ){
+ sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp); VdbeCoverage(v);
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ }
+
+ sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, iOk);
+ sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
+ sqlite3VdbeJumpHere(v, iMustBeInt);
+ sqlite3ReleaseTempReg(pParse, regTemp);
+ }else{
+ int nCol = pFKey->nCol;
+ int regTemp = sqlite3GetTempRange(pParse, nCol);
+ int regRec = sqlite3GetTempReg(pParse);
+
+ sqlite3VdbeAddOp3(v, OP_OpenRead, iCur, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ for(i=0; i<nCol; i++){
+ sqlite3VdbeAddOp2(v, OP_Copy,
+ sqlite3TableColumnToStorage(pFKey->pFrom, aiCol[i])+1+regData,
+ regTemp+i);
+ }
+
+ /* If the parent table is the same as the child table, and we are about
+ ** to increment the constraint-counter (i.e. this is an INSERT operation),
+ ** then check if the row being inserted matches itself. If so, do not
+ ** increment the constraint-counter.
+ **
+ ** If any of the parent-key values are NULL, then the row cannot match
+ ** itself. So set JUMPIFNULL to make sure we do the OP_Found if any
+ ** of the parent-key values are NULL (at this point it is known that
+ ** none of the child key values are).
+ */
+ if( pTab==pFKey->pFrom && nIncr==1 ){
+ int iJump = sqlite3VdbeCurrentAddr(v) + nCol + 1;
+ for(i=0; i<nCol; i++){
+ int iChild = sqlite3TableColumnToStorage(pFKey->pFrom,aiCol[i])
+ +1+regData;
+ int iParent = 1+regData;
+ iParent += sqlite3TableColumnToStorage(pIdx->pTable,
+ pIdx->aiColumn[i]);
+ assert( pIdx->aiColumn[i]>=0 );
+ assert( aiCol[i]!=pTab->iPKey );
+ if( pIdx->aiColumn[i]==pTab->iPKey ){
+ /* The parent key is a composite key that includes the IPK column */
+ iParent = regData;
+ }
+ sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent); VdbeCoverage(v);
+ sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL);
+ }
+ sqlite3VdbeGoto(v, iOk);
+ }
+
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec,
+ sqlite3IndexAffinityStr(pParse->db,pIdx), nCol);
+ sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v);
+
+ sqlite3ReleaseTempReg(pParse, regRec);
+ sqlite3ReleaseTempRange(pParse, regTemp, nCol);
+ }
+ }
+
+ if( !pFKey->isDeferred && !(pParse->db->flags & SQLITE_DeferFKs)
+ && !pParse->pToplevel
+ && !pParse->isMultiWrite
+ ){
+ /* Special case: If this is an INSERT statement that will insert exactly
+ ** one row into the table, raise a constraint immediately instead of
+ ** incrementing a counter. This is necessary as the VM code is being
+ ** generated for will not open a statement transaction. */
+ assert( nIncr==1 );
+ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY,
+ OE_Abort, 0, P4_STATIC, P5_ConstraintFK);
+ }else{
+ if( nIncr>0 && pFKey->isDeferred==0 ){
+ sqlite3MayAbort(pParse);
+ }
+ sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr);
+ }
+
+ sqlite3VdbeResolveLabel(v, iOk);
+ sqlite3VdbeAddOp1(v, OP_Close, iCur);
+}
+
+
+/*
+** Return an Expr object that refers to a memory register corresponding
+** to column iCol of table pTab.
+**
+** regBase is the first of an array of register that contains the data
+** for pTab. regBase itself holds the rowid. regBase+1 holds the first
+** column. regBase+2 holds the second column, and so forth.
+*/
+static Expr *exprTableRegister(
+ Parse *pParse, /* Parsing and code generating context */
+ Table *pTab, /* The table whose content is at r[regBase]... */
+ int regBase, /* Contents of table pTab */
+ i16 iCol /* Which column of pTab is desired */
+){
+ Expr *pExpr;
+ Column *pCol;
+ const char *zColl;
+ sqlite3 *db = pParse->db;
+
+ pExpr = sqlite3Expr(db, TK_REGISTER, 0);
+ if( pExpr ){
+ if( iCol>=0 && iCol!=pTab->iPKey ){
+ pCol = &pTab->aCol[iCol];
+ pExpr->iTable = regBase + sqlite3TableColumnToStorage(pTab,iCol) + 1;
+ pExpr->affExpr = pCol->affinity;
+ zColl = pCol->zColl;
+ if( zColl==0 ) zColl = db->pDfltColl->zName;
+ pExpr = sqlite3ExprAddCollateString(pParse, pExpr, zColl);
+ }else{
+ pExpr->iTable = regBase;
+ pExpr->affExpr = SQLITE_AFF_INTEGER;
+ }
+ }
+ return pExpr;
+}
+
+/*
+** Return an Expr object that refers to column iCol of table pTab which
+** has cursor iCur.
+*/
+static Expr *exprTableColumn(
+ sqlite3 *db, /* The database connection */
+ Table *pTab, /* The table whose column is desired */
+ int iCursor, /* The open cursor on the table */
+ i16 iCol /* The column that is wanted */
+){
+ Expr *pExpr = sqlite3Expr(db, TK_COLUMN, 0);
+ if( pExpr ){
+ pExpr->y.pTab = pTab;
+ pExpr->iTable = iCursor;
+ pExpr->iColumn = iCol;
+ }
+ return pExpr;
+}
+
+/*
+** This function is called to generate code executed when a row is deleted
+** from the parent table of foreign key constraint pFKey and, if pFKey is
+** deferred, when a row is inserted into the same table. When generating
+** code for an SQL UPDATE operation, this function may be called twice -
+** once to "delete" the old row and once to "insert" the new row.
+**
+** Parameter nIncr is passed -1 when inserting a row (as this may decrease
+** the number of FK violations in the db) or +1 when deleting one (as this
+** may increase the number of FK constraint problems).
+**
+** The code generated by this function scans through the rows in the child
+** table that correspond to the parent table row being deleted or inserted.
+** For each child row found, one of the following actions is taken:
+**
+** Operation | FK type | Action taken
+** --------------------------------------------------------------------------
+** DELETE immediate Increment the "immediate constraint counter".
+** Or, if the ON (UPDATE|DELETE) action is RESTRICT,
+** throw a "FOREIGN KEY constraint failed" exception.
+**
+** INSERT immediate Decrement the "immediate constraint counter".
+**
+** DELETE deferred Increment the "deferred constraint counter".
+** Or, if the ON (UPDATE|DELETE) action is RESTRICT,
+** throw a "FOREIGN KEY constraint failed" exception.
+**
+** INSERT deferred Decrement the "deferred constraint counter".
+**
+** These operations are identified in the comment at the top of this file
+** (fkey.c) as "I.2" and "D.2".
+*/
+static void fkScanChildren(
+ Parse *pParse, /* Parse context */
+ SrcList *pSrc, /* The child table to be scanned */
+ Table *pTab, /* The parent table */
+ Index *pIdx, /* Index on parent covering the foreign key */
+ FKey *pFKey, /* The foreign key linking pSrc to pTab */
+ int *aiCol, /* Map from pIdx cols to child table cols */
+ int regData, /* Parent row data starts here */
+ int nIncr /* Amount to increment deferred counter by */
+){
+ sqlite3 *db = pParse->db; /* Database handle */
+ int i; /* Iterator variable */
+ Expr *pWhere = 0; /* WHERE clause to scan with */
+ NameContext sNameContext; /* Context used to resolve WHERE clause */
+ WhereInfo *pWInfo; /* Context used by sqlite3WhereXXX() */
+ int iFkIfZero = 0; /* Address of OP_FkIfZero */
+ Vdbe *v = sqlite3GetVdbe(pParse);
+
+ assert( pIdx==0 || pIdx->pTable==pTab );
+ assert( pIdx==0 || pIdx->nKeyCol==pFKey->nCol );
+ assert( pIdx!=0 || pFKey->nCol==1 );
+ assert( pIdx!=0 || HasRowid(pTab) );
+
+ if( nIncr<0 ){
+ iFkIfZero = sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, 0);
+ VdbeCoverage(v);
+ }
+
+ /* Create an Expr object representing an SQL expression like:
+ **
+ ** <parent-key1> = <child-key1> AND <parent-key2> = <child-key2> ...
+ **
+ ** The collation sequence used for the comparison should be that of
+ ** the parent key columns. The affinity of the parent key column should
+ ** be applied to each child key value before the comparison takes place.
+ */
+ for(i=0; i<pFKey->nCol; i++){
+ Expr *pLeft; /* Value from parent table row */
+ Expr *pRight; /* Column ref to child table */
+ Expr *pEq; /* Expression (pLeft = pRight) */
+ i16 iCol; /* Index of column in child table */
+ const char *zCol; /* Name of column in child table */
+
+ iCol = pIdx ? pIdx->aiColumn[i] : -1;
+ pLeft = exprTableRegister(pParse, pTab, regData, iCol);
+ iCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom;
+ assert( iCol>=0 );
+ zCol = pFKey->pFrom->aCol[iCol].zName;
+ pRight = sqlite3Expr(db, TK_ID, zCol);
+ pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight);
+ pWhere = sqlite3ExprAnd(pParse, pWhere, pEq);
+ }
+
+ /* If the child table is the same as the parent table, then add terms
+ ** to the WHERE clause that prevent this entry from being scanned.
+ ** The added WHERE clause terms are like this:
+ **
+ ** $current_rowid!=rowid
+ ** NOT( $current_a==a AND $current_b==b AND ... )
+ **
+ ** The first form is used for rowid tables. The second form is used
+ ** for WITHOUT ROWID tables. In the second form, the *parent* key is
+ ** (a,b,...). Either the parent or primary key could be used to
+ ** uniquely identify the current row, but the parent key is more convenient
+ ** as the required values have already been loaded into registers
+ ** by the caller.
+ */
+ if( pTab==pFKey->pFrom && nIncr>0 ){
+ Expr *pNe; /* Expression (pLeft != pRight) */
+ Expr *pLeft; /* Value from parent table row */
+ Expr *pRight; /* Column ref to child table */
+ if( HasRowid(pTab) ){
+ pLeft = exprTableRegister(pParse, pTab, regData, -1);
+ pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1);
+ pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight);
+ }else{
+ Expr *pEq, *pAll = 0;
+ assert( pIdx!=0 );
+ for(i=0; i<pIdx->nKeyCol; i++){
+ i16 iCol = pIdx->aiColumn[i];
+ assert( iCol>=0 );
+ pLeft = exprTableRegister(pParse, pTab, regData, iCol);
+ pRight = sqlite3Expr(db, TK_ID, pTab->aCol[iCol].zName);
+ pEq = sqlite3PExpr(pParse, TK_IS, pLeft, pRight);
+ pAll = sqlite3ExprAnd(pParse, pAll, pEq);
+ }
+ pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0);
+ }
+ pWhere = sqlite3ExprAnd(pParse, pWhere, pNe);
+ }
+
+ /* Resolve the references in the WHERE clause. */
+ memset(&sNameContext, 0, sizeof(NameContext));
+ sNameContext.pSrcList = pSrc;
+ sNameContext.pParse = pParse;
+ sqlite3ResolveExprNames(&sNameContext, pWhere);
+
+ /* Create VDBE to loop through the entries in pSrc that match the WHERE
+ ** clause. For each row found, increment either the deferred or immediate
+ ** foreign key constraint counter. */
+ if( pParse->nErr==0 ){
+ pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0);
+ sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr);
+ if( pWInfo ){
+ sqlite3WhereEnd(pWInfo);
+ }
+ }
+
+ /* Clean up the WHERE clause constructed above. */
+ sqlite3ExprDelete(db, pWhere);
+ if( iFkIfZero ){
+ sqlite3VdbeJumpHereOrPopInst(v, iFkIfZero);
+ }
+}
+
+/*
+** This function returns a linked list of FKey objects (connected by
+** FKey.pNextTo) holding all children of table pTab. For example,
+** given the following schema:
+**
+** CREATE TABLE t1(a PRIMARY KEY);
+** CREATE TABLE t2(b REFERENCES t1(a);
+**
+** Calling this function with table "t1" as an argument returns a pointer
+** to the FKey structure representing the foreign key constraint on table
+** "t2". Calling this function with "t2" as the argument would return a
+** NULL pointer (as there are no FK constraints for which t2 is the parent
+** table).
+*/
+SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *pTab){
+ return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName);
+}
+
+/*
+** The second argument is a Trigger structure allocated by the
+** fkActionTrigger() routine. This function deletes the Trigger structure
+** and all of its sub-components.
+**
+** The Trigger structure or any of its sub-components may be allocated from
+** the lookaside buffer belonging to database handle dbMem.
+*/
+static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){
+ if( p ){
+ TriggerStep *pStep = p->step_list;
+ sqlite3ExprDelete(dbMem, pStep->pWhere);
+ sqlite3ExprListDelete(dbMem, pStep->pExprList);
+ sqlite3SelectDelete(dbMem, pStep->pSelect);
+ sqlite3ExprDelete(dbMem, p->pWhen);
+ sqlite3DbFree(dbMem, p);
+ }
+}
+
+/*
+** This function is called to generate code that runs when table pTab is
+** being dropped from the database. The SrcList passed as the second argument
+** to this function contains a single entry guaranteed to resolve to
+** table pTab.
+**
+** Normally, no code is required. However, if either
+**
+** (a) The table is the parent table of a FK constraint, or
+** (b) The table is the child table of a deferred FK constraint and it is
+** determined at runtime that there are outstanding deferred FK
+** constraint violations in the database,
+**
+** then the equivalent of "DELETE FROM <tbl>" is executed before dropping
+** the table from the database. Triggers are disabled while running this
+** DELETE, but foreign key actions are not.
+*/
+SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){
+ sqlite3 *db = pParse->db;
+ if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) ){
+ int iSkip = 0;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+
+ assert( v ); /* VDBE has already been allocated */
+ assert( pTab->pSelect==0 ); /* Not a view */
+ if( sqlite3FkReferences(pTab)==0 ){
+ /* Search for a deferred foreign key constraint for which this table
+ ** is the child table. If one cannot be found, return without
+ ** generating any VDBE code. If one can be found, then jump over
+ ** the entire DELETE if there are no outstanding deferred constraints
+ ** when this statement is run. */
+ FKey *p;
+ for(p=pTab->pFKey; p; p=p->pNextFrom){
+ if( p->isDeferred || (db->flags & SQLITE_DeferFKs) ) break;
+ }
+ if( !p ) return;
+ iSkip = sqlite3VdbeMakeLabel(pParse);
+ sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip); VdbeCoverage(v);
+ }
+
+ pParse->disableTriggers = 1;
+ sqlite3DeleteFrom(pParse, sqlite3SrcListDup(db, pName, 0), 0, 0, 0);
+ pParse->disableTriggers = 0;
+
+ /* If the DELETE has generated immediate foreign key constraint
+ ** violations, halt the VDBE and return an error at this point, before
+ ** any modifications to the schema are made. This is because statement
+ ** transactions are not able to rollback schema changes.
+ **
+ ** If the SQLITE_DeferFKs flag is set, then this is not required, as
+ ** the statement transaction will not be rolled back even if FK
+ ** constraints are violated.
+ */
+ if( (db->flags & SQLITE_DeferFKs)==0 ){
+ sqlite3VdbeVerifyAbortable(v, OE_Abort);
+ sqlite3VdbeAddOp2(v, OP_FkIfZero, 0, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY,
+ OE_Abort, 0, P4_STATIC, P5_ConstraintFK);
+ }
+
+ if( iSkip ){
+ sqlite3VdbeResolveLabel(v, iSkip);
+ }
+ }
+}
+
+
+/*
+** The second argument points to an FKey object representing a foreign key
+** for which pTab is the child table. An UPDATE statement against pTab
+** is currently being processed. For each column of the table that is
+** actually updated, the corresponding element in the aChange[] array
+** is zero or greater (if a column is unmodified the corresponding element
+** is set to -1). If the rowid column is modified by the UPDATE statement
+** the bChngRowid argument is non-zero.
+**
+** This function returns true if any of the columns that are part of the
+** child key for FK constraint *p are modified.
+*/
+static int fkChildIsModified(
+ Table *pTab, /* Table being updated */
+ FKey *p, /* Foreign key for which pTab is the child */
+ int *aChange, /* Array indicating modified columns */
+ int bChngRowid /* True if rowid is modified by this update */
+){
+ int i;
+ for(i=0; i<p->nCol; i++){
+ int iChildKey = p->aCol[i].iFrom;
+ if( aChange[iChildKey]>=0 ) return 1;
+ if( iChildKey==pTab->iPKey && bChngRowid ) return 1;
+ }
+ return 0;
+}
+
+/*
+** The second argument points to an FKey object representing a foreign key
+** for which pTab is the parent table. An UPDATE statement against pTab
+** is currently being processed. For each column of the table that is
+** actually updated, the corresponding element in the aChange[] array
+** is zero or greater (if a column is unmodified the corresponding element
+** is set to -1). If the rowid column is modified by the UPDATE statement
+** the bChngRowid argument is non-zero.
+**
+** This function returns true if any of the columns that are part of the
+** parent key for FK constraint *p are modified.
+*/
+static int fkParentIsModified(
+ Table *pTab,
+ FKey *p,
+ int *aChange,
+ int bChngRowid
+){
+ int i;
+ for(i=0; i<p->nCol; i++){
+ char *zKey = p->aCol[i].zCol;
+ int iKey;
+ for(iKey=0; iKey<pTab->nCol; iKey++){
+ if( aChange[iKey]>=0 || (iKey==pTab->iPKey && bChngRowid) ){
+ Column *pCol = &pTab->aCol[iKey];
+ if( zKey ){
+ if( 0==sqlite3StrICmp(pCol->zName, zKey) ) return 1;
+ }else if( pCol->colFlags & COLFLAG_PRIMKEY ){
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Return true if the parser passed as the first argument is being
+** used to code a trigger that is really a "SET NULL" action belonging
+** to trigger pFKey.
+*/
+static int isSetNullAction(Parse *pParse, FKey *pFKey){
+ Parse *pTop = sqlite3ParseToplevel(pParse);
+ if( pTop->pTriggerPrg ){
+ Trigger *p = pTop->pTriggerPrg->pTrigger;
+ if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull)
+ || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** This function is called when inserting, deleting or updating a row of
+** table pTab to generate VDBE code to perform foreign key constraint
+** processing for the operation.
+**
+** For a DELETE operation, parameter regOld is passed the index of the
+** first register in an array of (pTab->nCol+1) registers containing the
+** rowid of the row being deleted, followed by each of the column values
+** of the row being deleted, from left to right. Parameter regNew is passed
+** zero in this case.
+**
+** For an INSERT operation, regOld is passed zero and regNew is passed the
+** first register of an array of (pTab->nCol+1) registers containing the new
+** row data.
+**
+** For an UPDATE operation, this function is called twice. Once before
+** the original record is deleted from the table using the calling convention
+** described for DELETE. Then again after the original record is deleted
+** but before the new record is inserted using the INSERT convention.
+*/
+SQLITE_PRIVATE void sqlite3FkCheck(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* Row is being deleted from this table */
+ int regOld, /* Previous row data is stored here */
+ int regNew, /* New row data is stored here */
+ int *aChange, /* Array indicating UPDATEd columns (or 0) */
+ int bChngRowid /* True if rowid is UPDATEd */
+){
+ sqlite3 *db = pParse->db; /* Database handle */
+ FKey *pFKey; /* Used to iterate through FKs */
+ int iDb; /* Index of database containing pTab */
+ const char *zDb; /* Name of database containing pTab */
+ int isIgnoreErrors = pParse->disableTriggers;
+
+ /* Exactly one of regOld and regNew should be non-zero. */
+ assert( (regOld==0)!=(regNew==0) );
+
+ /* If foreign-keys are disabled, this function is a no-op. */
+ if( (db->flags&SQLITE_ForeignKeys)==0 ) return;
+
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ zDb = db->aDb[iDb].zDbSName;
+
+ /* Loop through all the foreign key constraints for which pTab is the
+ ** child table (the table that the foreign key definition is part of). */
+ for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){
+ Table *pTo; /* Parent table of foreign key pFKey */
+ Index *pIdx = 0; /* Index on key columns in pTo */
+ int *aiFree = 0;
+ int *aiCol;
+ int iCol;
+ int i;
+ int bIgnore = 0;
+
+ if( aChange
+ && sqlite3_stricmp(pTab->zName, pFKey->zTo)!=0
+ && fkChildIsModified(pTab, pFKey, aChange, bChngRowid)==0
+ ){
+ continue;
+ }
+
+ /* Find the parent table of this foreign key. Also find a unique index
+ ** on the parent key columns in the parent table. If either of these
+ ** schema items cannot be located, set an error in pParse and return
+ ** early. */
+ if( pParse->disableTriggers ){
+ pTo = sqlite3FindTable(db, pFKey->zTo, zDb);
+ }else{
+ pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb);
+ }
+ if( !pTo || sqlite3FkLocateIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){
+ assert( isIgnoreErrors==0 || (regOld!=0 && regNew==0) );
+ if( !isIgnoreErrors || db->mallocFailed ) return;
+ if( pTo==0 ){
+ /* If isIgnoreErrors is true, then a table is being dropped. In this
+ ** case SQLite runs a "DELETE FROM xxx" on the table being dropped
+ ** before actually dropping it in order to check FK constraints.
+ ** If the parent table of an FK constraint on the current table is
+ ** missing, behave as if it is empty. i.e. decrement the relevant
+ ** FK counter for each row of the current table with non-NULL keys.
+ */
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1;
+ for(i=0; i<pFKey->nCol; i++){
+ int iFromCol, iReg;
+ iFromCol = pFKey->aCol[i].iFrom;
+ iReg = sqlite3TableColumnToStorage(pFKey->pFrom,iFromCol) + regOld+1;
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1);
+ }
+ continue;
+ }
+ assert( pFKey->nCol==1 || (aiFree && pIdx) );
+
+ if( aiFree ){
+ aiCol = aiFree;
+ }else{
+ iCol = pFKey->aCol[0].iFrom;
+ aiCol = &iCol;
+ }
+ for(i=0; i<pFKey->nCol; i++){
+ if( aiCol[i]==pTab->iPKey ){
+ aiCol[i] = -1;
+ }
+ assert( pIdx==0 || pIdx->aiColumn[i]>=0 );
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ /* Request permission to read the parent key columns. If the
+ ** authorization callback returns SQLITE_IGNORE, behave as if any
+ ** values read from the parent table are NULL. */
+ if( db->xAuth ){
+ int rcauth;
+ char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName;
+ rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb);
+ bIgnore = (rcauth==SQLITE_IGNORE);
+ }
+#endif
+ }
+
+ /* Take a shared-cache advisory read-lock on the parent table. Allocate
+ ** a cursor to use to search the unique index on the parent key columns
+ ** in the parent table. */
+ sqlite3TableLock(pParse, iDb, pTo->tnum, 0, pTo->zName);
+ pParse->nTab++;
+
+ if( regOld!=0 ){
+ /* A row is being removed from the child table. Search for the parent.
+ ** If the parent does not exist, removing the child row resolves an
+ ** outstanding foreign key constraint violation. */
+ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1, bIgnore);
+ }
+ if( regNew!=0 && !isSetNullAction(pParse, pFKey) ){
+ /* A row is being added to the child table. If a parent row cannot
+ ** be found, adding the child row has violated the FK constraint.
+ **
+ ** If this operation is being performed as part of a trigger program
+ ** that is actually a "SET NULL" action belonging to this very
+ ** foreign key, then omit this scan altogether. As all child key
+ ** values are guaranteed to be NULL, it is not possible for adding
+ ** this row to cause an FK violation. */
+ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1, bIgnore);
+ }
+
+ sqlite3DbFree(db, aiFree);
+ }
+
+ /* Loop through all the foreign key constraints that refer to this table.
+ ** (the "child" constraints) */
+ for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){
+ Index *pIdx = 0; /* Foreign key index for pFKey */
+ SrcList *pSrc;
+ int *aiCol = 0;
+
+ if( aChange && fkParentIsModified(pTab, pFKey, aChange, bChngRowid)==0 ){
+ continue;
+ }
+
+ if( !pFKey->isDeferred && !(db->flags & SQLITE_DeferFKs)
+ && !pParse->pToplevel && !pParse->isMultiWrite
+ ){
+ assert( regOld==0 && regNew!=0 );
+ /* Inserting a single row into a parent table cannot cause (or fix)
+ ** an immediate foreign key violation. So do nothing in this case. */
+ continue;
+ }
+
+ if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ){
+ if( !isIgnoreErrors || db->mallocFailed ) return;
+ continue;
+ }
+ assert( aiCol || pFKey->nCol==1 );
+
+ /* Create a SrcList structure containing the child table. We need the
+ ** child table as a SrcList for sqlite3WhereBegin() */
+ pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
+ if( pSrc ){
+ struct SrcList_item *pItem = pSrc->a;
+ pItem->pTab = pFKey->pFrom;
+ pItem->zName = pFKey->pFrom->zName;
+ pItem->pTab->nTabRef++;
+ pItem->iCursor = pParse->nTab++;
+
+ if( regNew!=0 ){
+ fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1);
+ }
+ if( regOld!=0 ){
+ int eAction = pFKey->aAction[aChange!=0];
+ fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1);
+ /* If this is a deferred FK constraint, or a CASCADE or SET NULL
+ ** action applies, then any foreign key violations caused by
+ ** removing the parent key will be rectified by the action trigger.
+ ** So do not set the "may-abort" flag in this case.
+ **
+ ** Note 1: If the FK is declared "ON UPDATE CASCADE", then the
+ ** may-abort flag will eventually be set on this statement anyway
+ ** (when this function is called as part of processing the UPDATE
+ ** within the action trigger).
+ **
+ ** Note 2: At first glance it may seem like SQLite could simply omit
+ ** all OP_FkCounter related scans when either CASCADE or SET NULL
+ ** applies. The trouble starts if the CASCADE or SET NULL action
+ ** trigger causes other triggers or action rules attached to the
+ ** child table to fire. In these cases the fk constraint counters
+ ** might be set incorrectly if any OP_FkCounter related scans are
+ ** omitted. */
+ if( !pFKey->isDeferred && eAction!=OE_Cascade && eAction!=OE_SetNull ){
+ sqlite3MayAbort(pParse);
+ }
+ }
+ pItem->zName = 0;
+ sqlite3SrcListDelete(db, pSrc);
+ }
+ sqlite3DbFree(db, aiCol);
+ }
+}
+
+#define COLUMN_MASK(x) (((x)>31) ? 0xffffffff : ((u32)1<<(x)))
+
+/*
+** This function is called before generating code to update or delete a
+** row contained in table pTab.
+*/
+SQLITE_PRIVATE u32 sqlite3FkOldmask(
+ Parse *pParse, /* Parse context */
+ Table *pTab /* Table being modified */
+){
+ u32 mask = 0;
+ if( pParse->db->flags&SQLITE_ForeignKeys ){
+ FKey *p;
+ int i;
+ for(p=pTab->pFKey; p; p=p->pNextFrom){
+ for(i=0; i<p->nCol; i++) mask |= COLUMN_MASK(p->aCol[i].iFrom);
+ }
+ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
+ Index *pIdx = 0;
+ sqlite3FkLocateIndex(pParse, pTab, p, &pIdx, 0);
+ if( pIdx ){
+ for(i=0; i<pIdx->nKeyCol; i++){
+ assert( pIdx->aiColumn[i]>=0 );
+ mask |= COLUMN_MASK(pIdx->aiColumn[i]);
+ }
+ }
+ }
+ }
+ return mask;
+}
+
+
+/*
+** This function is called before generating code to update or delete a
+** row contained in table pTab. If the operation is a DELETE, then
+** parameter aChange is passed a NULL value. For an UPDATE, aChange points
+** to an array of size N, where N is the number of columns in table pTab.
+** If the i'th column is not modified by the UPDATE, then the corresponding
+** entry in the aChange[] array is set to -1. If the column is modified,
+** the value is 0 or greater. Parameter chngRowid is set to true if the
+** UPDATE statement modifies the rowid fields of the table.
+**
+** If any foreign key processing will be required, this function returns
+** non-zero. If there is no foreign key related processing, this function
+** returns zero.
+**
+** For an UPDATE, this function returns 2 if:
+**
+** * There are any FKs for which pTab is the child and the parent table, or
+** * the UPDATE modifies one or more parent keys for which the action is
+** not "NO ACTION" (i.e. is CASCADE, SET DEFAULT or SET NULL).
+**
+** Or, assuming some other foreign key processing is required, 1.
+*/
+SQLITE_PRIVATE int sqlite3FkRequired(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* Table being modified */
+ int *aChange, /* Non-NULL for UPDATE operations */
+ int chngRowid /* True for UPDATE that affects rowid */
+){
+ int eRet = 0;
+ if( pParse->db->flags&SQLITE_ForeignKeys ){
+ if( !aChange ){
+ /* A DELETE operation. Foreign key processing is required if the
+ ** table in question is either the child or parent table for any
+ ** foreign key constraint. */
+ eRet = (sqlite3FkReferences(pTab) || pTab->pFKey);
+ }else{
+ /* This is an UPDATE. Foreign key processing is only required if the
+ ** operation modifies one or more child or parent key columns. */
+ FKey *p;
+
+ /* Check if any child key columns are being modified. */
+ for(p=pTab->pFKey; p; p=p->pNextFrom){
+ if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) return 2;
+ if( fkChildIsModified(pTab, p, aChange, chngRowid) ){
+ eRet = 1;
+ }
+ }
+
+ /* Check if any parent key columns are being modified. */
+ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
+ if( fkParentIsModified(pTab, p, aChange, chngRowid) ){
+ if( p->aAction[1]!=OE_None ) return 2;
+ eRet = 1;
+ }
+ }
+ }
+ }
+ return eRet;
+}
+
+/*
+** This function is called when an UPDATE or DELETE operation is being
+** compiled on table pTab, which is the parent table of foreign-key pFKey.
+** If the current operation is an UPDATE, then the pChanges parameter is
+** passed a pointer to the list of columns being modified. If it is a
+** DELETE, pChanges is passed a NULL pointer.
+**
+** It returns a pointer to a Trigger structure containing a trigger
+** equivalent to the ON UPDATE or ON DELETE action specified by pFKey.
+** If the action is "NO ACTION" or "RESTRICT", then a NULL pointer is
+** returned (these actions require no special handling by the triggers
+** sub-system, code for them is created by fkScanChildren()).
+**
+** For example, if pFKey is the foreign key and pTab is table "p" in
+** the following schema:
+**
+** CREATE TABLE p(pk PRIMARY KEY);
+** CREATE TABLE c(ck REFERENCES p ON DELETE CASCADE);
+**
+** then the returned trigger structure is equivalent to:
+**
+** CREATE TRIGGER ... DELETE ON p BEGIN
+** DELETE FROM c WHERE ck = old.pk;
+** END;
+**
+** The returned pointer is cached as part of the foreign key object. It
+** is eventually freed along with the rest of the foreign key object by
+** sqlite3FkDelete().
+*/
+static Trigger *fkActionTrigger(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* Table being updated or deleted from */
+ FKey *pFKey, /* Foreign key to get action for */
+ ExprList *pChanges /* Change-list for UPDATE, NULL for DELETE */
+){
+ sqlite3 *db = pParse->db; /* Database handle */
+ int action; /* One of OE_None, OE_Cascade etc. */
+ Trigger *pTrigger; /* Trigger definition to return */
+ int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */
+
+ action = pFKey->aAction[iAction];
+ if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){
+ return 0;
+ }
+ pTrigger = pFKey->apTrigger[iAction];
+
+ if( action!=OE_None && !pTrigger ){
+ char const *zFrom; /* Name of child table */
+ int nFrom; /* Length in bytes of zFrom */
+ Index *pIdx = 0; /* Parent key index for this FK */
+ int *aiCol = 0; /* child table cols -> parent key cols */
+ TriggerStep *pStep = 0; /* First (only) step of trigger program */
+ Expr *pWhere = 0; /* WHERE clause of trigger step */
+ ExprList *pList = 0; /* Changes list if ON UPDATE CASCADE */
+ Select *pSelect = 0; /* If RESTRICT, "SELECT RAISE(...)" */
+ int i; /* Iterator variable */
+ Expr *pWhen = 0; /* WHEN clause for the trigger */
+
+ if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ) return 0;
+ assert( aiCol || pFKey->nCol==1 );
+
+ for(i=0; i<pFKey->nCol; i++){
+ Token tOld = { "old", 3 }; /* Literal "old" token */
+ Token tNew = { "new", 3 }; /* Literal "new" token */
+ Token tFromCol; /* Name of column in child table */
+ Token tToCol; /* Name of column in parent table */
+ int iFromCol; /* Idx of column in child table */
+ Expr *pEq; /* tFromCol = OLD.tToCol */
+
+ iFromCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom;
+ assert( iFromCol>=0 );
+ assert( pIdx!=0 || (pTab->iPKey>=0 && pTab->iPKey<pTab->nCol) );
+ assert( pIdx==0 || pIdx->aiColumn[i]>=0 );
+ sqlite3TokenInit(&tToCol,
+ pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zName);
+ sqlite3TokenInit(&tFromCol, pFKey->pFrom->aCol[iFromCol].zName);
+
+ /* Create the expression "OLD.zToCol = zFromCol". It is important
+ ** that the "OLD.zToCol" term is on the LHS of the = operator, so
+ ** that the affinity and collation sequence associated with the
+ ** parent table are used for the comparison. */
+ pEq = sqlite3PExpr(pParse, TK_EQ,
+ sqlite3PExpr(pParse, TK_DOT,
+ sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)),
+ sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0)
+ );
+ pWhere = sqlite3ExprAnd(pParse, pWhere, pEq);
+
+ /* For ON UPDATE, construct the next term of the WHEN clause.
+ ** The final WHEN clause will be like this:
+ **
+ ** WHEN NOT(old.col1 IS new.col1 AND ... AND old.colN IS new.colN)
+ */
+ if( pChanges ){
+ pEq = sqlite3PExpr(pParse, TK_IS,
+ sqlite3PExpr(pParse, TK_DOT,
+ sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)),
+ sqlite3PExpr(pParse, TK_DOT,
+ sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0))
+ );
+ pWhen = sqlite3ExprAnd(pParse, pWhen, pEq);
+ }
+
+ if( action!=OE_Restrict && (action!=OE_Cascade || pChanges) ){
+ Expr *pNew;
+ if( action==OE_Cascade ){
+ pNew = sqlite3PExpr(pParse, TK_DOT,
+ sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0));
+ }else if( action==OE_SetDflt ){
+ Column *pCol = pFKey->pFrom->aCol + iFromCol;
+ Expr *pDflt;
+ if( pCol->colFlags & COLFLAG_GENERATED ){
+ testcase( pCol->colFlags & COLFLAG_VIRTUAL );
+ testcase( pCol->colFlags & COLFLAG_STORED );
+ pDflt = 0;
+ }else{
+ pDflt = pCol->pDflt;
+ }
+ if( pDflt ){
+ pNew = sqlite3ExprDup(db, pDflt, 0);
+ }else{
+ pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0);
+ }
+ }else{
+ pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0);
+ }
+ pList = sqlite3ExprListAppend(pParse, pList, pNew);
+ sqlite3ExprListSetName(pParse, pList, &tFromCol, 0);
+ }
+ }
+ sqlite3DbFree(db, aiCol);
+
+ zFrom = pFKey->pFrom->zName;
+ nFrom = sqlite3Strlen30(zFrom);
+
+ if( action==OE_Restrict ){
+ Token tFrom;
+ Expr *pRaise;
+
+ tFrom.z = zFrom;
+ tFrom.n = nFrom;
+ pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed");
+ if( pRaise ){
+ pRaise->affExpr = OE_Abort;
+ }
+ pSelect = sqlite3SelectNew(pParse,
+ sqlite3ExprListAppend(pParse, 0, pRaise),
+ sqlite3SrcListAppend(pParse, 0, &tFrom, 0),
+ pWhere,
+ 0, 0, 0, 0, 0
+ );
+ pWhere = 0;
+ }
+
+ /* Disable lookaside memory allocation */
+ DisableLookaside;
+
+ pTrigger = (Trigger *)sqlite3DbMallocZero(db,
+ sizeof(Trigger) + /* struct Trigger */
+ sizeof(TriggerStep) + /* Single step in trigger program */
+ nFrom + 1 /* Space for pStep->zTarget */
+ );
+ if( pTrigger ){
+ pStep = pTrigger->step_list = (TriggerStep *)&pTrigger[1];
+ pStep->zTarget = (char *)&pStep[1];
+ memcpy((char *)pStep->zTarget, zFrom, nFrom);
+
+ pStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
+ pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE);
+ pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
+ if( pWhen ){
+ pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0);
+ pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE);
+ }
+ }
+
+ /* Re-enable the lookaside buffer, if it was disabled earlier. */
+ EnableLookaside;
+
+ sqlite3ExprDelete(db, pWhere);
+ sqlite3ExprDelete(db, pWhen);
+ sqlite3ExprListDelete(db, pList);
+ sqlite3SelectDelete(db, pSelect);
+ if( db->mallocFailed==1 ){
+ fkTriggerDelete(db, pTrigger);
+ return 0;
+ }
+ assert( pStep!=0 );
+ assert( pTrigger!=0 );
+
+ switch( action ){
+ case OE_Restrict:
+ pStep->op = TK_SELECT;
+ break;
+ case OE_Cascade:
+ if( !pChanges ){
+ pStep->op = TK_DELETE;
+ break;
+ }
+ /* no break */ deliberate_fall_through
+ default:
+ pStep->op = TK_UPDATE;
+ }
+ pStep->pTrig = pTrigger;
+ pTrigger->pSchema = pTab->pSchema;
+ pTrigger->pTabSchema = pTab->pSchema;
+ pFKey->apTrigger[iAction] = pTrigger;
+ pTrigger->op = (pChanges ? TK_UPDATE : TK_DELETE);
+ }
+
+ return pTrigger;
+}
+
+/*
+** This function is called when deleting or updating a row to implement
+** any required CASCADE, SET NULL or SET DEFAULT actions.
+*/
+SQLITE_PRIVATE void sqlite3FkActions(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* Table being updated or deleted from */
+ ExprList *pChanges, /* Change-list for UPDATE, NULL for DELETE */
+ int regOld, /* Address of array containing old row */
+ int *aChange, /* Array indicating UPDATEd columns (or 0) */
+ int bChngRowid /* True if rowid is UPDATEd */
+){
+ /* If foreign-key support is enabled, iterate through all FKs that
+ ** refer to table pTab. If there is an action associated with the FK
+ ** for this operation (either update or delete), invoke the associated
+ ** trigger sub-program. */
+ if( pParse->db->flags&SQLITE_ForeignKeys ){
+ FKey *pFKey; /* Iterator variable */
+ for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){
+ if( aChange==0 || fkParentIsModified(pTab, pFKey, aChange, bChngRowid) ){
+ Trigger *pAct = fkActionTrigger(pParse, pTab, pFKey, pChanges);
+ if( pAct ){
+ sqlite3CodeRowTriggerDirect(pParse, pAct, pTab, regOld, OE_Abort, 0);
+ }
+ }
+ }
+ }
+}
+
+#endif /* ifndef SQLITE_OMIT_TRIGGER */
+
+/*
+** Free all memory associated with foreign key definitions attached to
+** table pTab. Remove the deleted foreign keys from the Schema.fkeyHash
+** hash table.
+*/
+SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
+ FKey *pFKey; /* Iterator variable */
+ FKey *pNext; /* Copy of pFKey->pNextFrom */
+
+ assert( db==0 || IsVirtual(pTab)
+ || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) );
+ for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){
+
+ /* Remove the FK from the fkeyHash hash table. */
+ if( !db || db->pnBytesFreed==0 ){
+ if( pFKey->pPrevTo ){
+ pFKey->pPrevTo->pNextTo = pFKey->pNextTo;
+ }else{
+ void *p = (void *)pFKey->pNextTo;
+ const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo);
+ sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p);
+ }
+ if( pFKey->pNextTo ){
+ pFKey->pNextTo->pPrevTo = pFKey->pPrevTo;
+ }
+ }
+
+ /* EV: R-30323-21917 Each foreign key constraint in SQLite is
+ ** classified as either immediate or deferred.
+ */
+ assert( pFKey->isDeferred==0 || pFKey->isDeferred==1 );
+
+ /* Delete any triggers created to implement actions for this FK. */
+#ifndef SQLITE_OMIT_TRIGGER
+ fkTriggerDelete(db, pFKey->apTrigger[0]);
+ fkTriggerDelete(db, pFKey->apTrigger[1]);
+#endif
+
+ pNext = pFKey->pNextFrom;
+ sqlite3DbFree(db, pFKey);
+ }
+}
+#endif /* ifndef SQLITE_OMIT_FOREIGN_KEY */
+
+/************** End of fkey.c ************************************************/
+/************** Begin file insert.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle INSERT statements in SQLite.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** Generate code that will
+**
+** (1) acquire a lock for table pTab then
+** (2) open pTab as cursor iCur.
+**
+** If pTab is a WITHOUT ROWID table, then it is the PRIMARY KEY index
+** for that table that is actually opened.
+*/
+SQLITE_PRIVATE void sqlite3OpenTable(
+ Parse *pParse, /* Generate code into this VDBE */
+ int iCur, /* The cursor number of the table */
+ int iDb, /* The database index in sqlite3.aDb[] */
+ Table *pTab, /* The table to be opened */
+ int opcode /* OP_OpenRead or OP_OpenWrite */
+){
+ Vdbe *v;
+ assert( !IsVirtual(pTab) );
+ v = sqlite3GetVdbe(pParse);
+ assert( opcode==OP_OpenWrite || opcode==OP_OpenRead );
+ sqlite3TableLock(pParse, iDb, pTab->tnum,
+ (opcode==OP_OpenWrite)?1:0, pTab->zName);
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nNVCol);
+ VdbeComment((v, "%s", pTab->zName));
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+ assert( pPk->tnum==pTab->tnum );
+ sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pPk);
+ VdbeComment((v, "%s", pTab->zName));
+ }
+}
+
+/*
+** Return a pointer to the column affinity string associated with index
+** pIdx. A column affinity string has one character for each column in
+** the table, according to the affinity of the column:
+**
+** Character Column affinity
+** ------------------------------
+** 'A' BLOB
+** 'B' TEXT
+** 'C' NUMERIC
+** 'D' INTEGER
+** 'F' REAL
+**
+** An extra 'D' is appended to the end of the string to cover the
+** rowid that appears as the last column in every index.
+**
+** Memory for the buffer containing the column index affinity string
+** is managed along with the rest of the Index structure. It will be
+** released when sqlite3DeleteIndex() is called.
+*/
+SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
+ if( !pIdx->zColAff ){
+ /* The first time a column affinity string for a particular index is
+ ** required, it is allocated and populated here. It is then stored as
+ ** a member of the Index structure for subsequent use.
+ **
+ ** The column affinity string will eventually be deleted by
+ ** sqliteDeleteIndex() when the Index structure itself is cleaned
+ ** up.
+ */
+ int n;
+ Table *pTab = pIdx->pTable;
+ pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1);
+ if( !pIdx->zColAff ){
+ sqlite3OomFault(db);
+ return 0;
+ }
+ for(n=0; n<pIdx->nColumn; n++){
+ i16 x = pIdx->aiColumn[n];
+ char aff;
+ if( x>=0 ){
+ aff = pTab->aCol[x].affinity;
+ }else if( x==XN_ROWID ){
+ aff = SQLITE_AFF_INTEGER;
+ }else{
+ assert( x==XN_EXPR );
+ assert( pIdx->aColExpr!=0 );
+ aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr);
+ }
+ if( aff<SQLITE_AFF_BLOB ) aff = SQLITE_AFF_BLOB;
+ if( aff>SQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC;
+ pIdx->zColAff[n] = aff;
+ }
+ pIdx->zColAff[n] = 0;
+ }
+
+ return pIdx->zColAff;
+}
+
+/*
+** Compute the affinity string for table pTab, if it has not already been
+** computed. As an optimization, omit trailing SQLITE_AFF_BLOB affinities.
+**
+** If the affinity exists (if it is no entirely SQLITE_AFF_BLOB values) and
+** if iReg>0 then code an OP_Affinity opcode that will set the affinities
+** for register iReg and following. Or if affinities exists and iReg==0,
+** then just set the P4 operand of the previous opcode (which should be
+** an OP_MakeRecord) to the affinity string.
+**
+** A column affinity string has one character per column:
+**
+** Character Column affinity
+** ------------------------------
+** 'A' BLOB
+** 'B' TEXT
+** 'C' NUMERIC
+** 'D' INTEGER
+** 'E' REAL
+*/
+SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
+ int i, j;
+ char *zColAff = pTab->zColAff;
+ if( zColAff==0 ){
+ sqlite3 *db = sqlite3VdbeDb(v);
+ zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1);
+ if( !zColAff ){
+ sqlite3OomFault(db);
+ return;
+ }
+
+ for(i=j=0; i<pTab->nCol; i++){
+ assert( pTab->aCol[i].affinity!=0 );
+ if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){
+ zColAff[j++] = pTab->aCol[i].affinity;
+ }
+ }
+ do{
+ zColAff[j--] = 0;
+ }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB );
+ pTab->zColAff = zColAff;
+ }
+ assert( zColAff!=0 );
+ i = sqlite3Strlen30NN(zColAff);
+ if( i ){
+ if( iReg ){
+ sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i);
+ }else{
+ sqlite3VdbeChangeP4(v, -1, zColAff, i);
+ }
+ }
+}
+
+/*
+** Return non-zero if the table pTab in database iDb or any of its indices
+** have been opened at any point in the VDBE program. This is used to see if
+** a statement of the form "INSERT INTO <iDb, pTab> SELECT ..." can
+** run without using a temporary table for the results of the SELECT.
+*/
+static int readsTable(Parse *p, int iDb, Table *pTab){
+ Vdbe *v = sqlite3GetVdbe(p);
+ int i;
+ int iEnd = sqlite3VdbeCurrentAddr(v);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ VTable *pVTab = IsVirtual(pTab) ? sqlite3GetVTable(p->db, pTab) : 0;
+#endif
+
+ for(i=1; i<iEnd; i++){
+ VdbeOp *pOp = sqlite3VdbeGetOp(v, i);
+ assert( pOp!=0 );
+ if( pOp->opcode==OP_OpenRead && pOp->p3==iDb ){
+ Index *pIndex;
+ Pgno tnum = pOp->p2;
+ if( tnum==pTab->tnum ){
+ return 1;
+ }
+ for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){
+ if( tnum==pIndex->tnum ){
+ return 1;
+ }
+ }
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( pOp->opcode==OP_VOpen && pOp->p4.pVtab==pVTab ){
+ assert( pOp->p4.pVtab!=0 );
+ assert( pOp->p4type==P4_VTAB );
+ return 1;
+ }
+#endif
+ }
+ return 0;
+}
+
+/* This walker callback will compute the union of colFlags flags for all
+** referenced columns in a CHECK constraint or generated column expression.
+*/
+static int exprColumnFlagUnion(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_COLUMN && pExpr->iColumn>=0 ){
+ assert( pExpr->iColumn < pWalker->u.pTab->nCol );
+ pWalker->eCode |= pWalker->u.pTab->aCol[pExpr->iColumn].colFlags;
+ }
+ return WRC_Continue;
+}
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+/*
+** All regular columns for table pTab have been puts into registers
+** starting with iRegStore. The registers that correspond to STORED
+** or VIRTUAL columns have not yet been initialized. This routine goes
+** back and computes the values for those columns based on the previously
+** computed normal columns.
+*/
+SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns(
+ Parse *pParse, /* Parsing context */
+ int iRegStore, /* Register holding the first column */
+ Table *pTab /* The table */
+){
+ int i;
+ Walker w;
+ Column *pRedo;
+ int eProgress;
+ VdbeOp *pOp;
+
+ assert( pTab->tabFlags & TF_HasGenerated );
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+
+ /* Before computing generated columns, first go through and make sure
+ ** that appropriate affinity has been applied to the regular columns
+ */
+ sqlite3TableAffinity(pParse->pVdbe, pTab, iRegStore);
+ if( (pTab->tabFlags & TF_HasStored)!=0
+ && (pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1))->opcode==OP_Affinity
+ ){
+ /* Change the OP_Affinity argument to '@' (NONE) for all stored
+ ** columns. '@' is the no-op affinity and those columns have not
+ ** yet been computed. */
+ int ii, jj;
+ char *zP4 = pOp->p4.z;
+ assert( zP4!=0 );
+ assert( pOp->p4type==P4_DYNAMIC );
+ for(ii=jj=0; zP4[jj]; ii++){
+ if( pTab->aCol[ii].colFlags & COLFLAG_VIRTUAL ){
+ continue;
+ }
+ if( pTab->aCol[ii].colFlags & COLFLAG_STORED ){
+ zP4[jj] = SQLITE_AFF_NONE;
+ }
+ jj++;
+ }
+ }
+
+ /* Because there can be multiple generated columns that refer to one another,
+ ** this is a two-pass algorithm. On the first pass, mark all generated
+ ** columns as "not available".
+ */
+ for(i=0; i<pTab->nCol; i++){
+ if( pTab->aCol[i].colFlags & COLFLAG_GENERATED ){
+ testcase( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL );
+ testcase( pTab->aCol[i].colFlags & COLFLAG_STORED );
+ pTab->aCol[i].colFlags |= COLFLAG_NOTAVAIL;
+ }
+ }
+
+ w.u.pTab = pTab;
+ w.xExprCallback = exprColumnFlagUnion;
+ w.xSelectCallback = 0;
+ w.xSelectCallback2 = 0;
+
+ /* On the second pass, compute the value of each NOT-AVAILABLE column.
+ ** Companion code in the TK_COLUMN case of sqlite3ExprCodeTarget() will
+ ** compute dependencies and mark remove the COLSPAN_NOTAVAIL mark, as
+ ** they are needed.
+ */
+ pParse->iSelfTab = -iRegStore;
+ do{
+ eProgress = 0;
+ pRedo = 0;
+ for(i=0; i<pTab->nCol; i++){
+ Column *pCol = pTab->aCol + i;
+ if( (pCol->colFlags & COLFLAG_NOTAVAIL)!=0 ){
+ int x;
+ pCol->colFlags |= COLFLAG_BUSY;
+ w.eCode = 0;
+ sqlite3WalkExpr(&w, pCol->pDflt);
+ pCol->colFlags &= ~COLFLAG_BUSY;
+ if( w.eCode & COLFLAG_NOTAVAIL ){
+ pRedo = pCol;
+ continue;
+ }
+ eProgress = 1;
+ assert( pCol->colFlags & COLFLAG_GENERATED );
+ x = sqlite3TableColumnToStorage(pTab, i) + iRegStore;
+ sqlite3ExprCodeGeneratedColumn(pParse, pCol, x);
+ pCol->colFlags &= ~COLFLAG_NOTAVAIL;
+ }
+ }
+ }while( pRedo && eProgress );
+ if( pRedo ){
+ sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pRedo->zName);
+ }
+ pParse->iSelfTab = 0;
+}
+#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
+
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+/*
+** Locate or create an AutoincInfo structure associated with table pTab
+** which is in database iDb. Return the register number for the register
+** that holds the maximum rowid. Return zero if pTab is not an AUTOINCREMENT
+** table. (Also return zero when doing a VACUUM since we do not want to
+** update the AUTOINCREMENT counters during a VACUUM.)
+**
+** There is at most one AutoincInfo structure per table even if the
+** same table is autoincremented multiple times due to inserts within
+** triggers. A new AutoincInfo structure is created if this is the
+** first use of table pTab. On 2nd and subsequent uses, the original
+** AutoincInfo structure is used.
+**
+** Four consecutive registers are allocated:
+**
+** (1) The name of the pTab table.
+** (2) The maximum ROWID of pTab.
+** (3) The rowid in sqlite_sequence of pTab
+** (4) The original value of the max ROWID in pTab, or NULL if none
+**
+** The 2nd register is the one that is returned. That is all the
+** insert routine needs to know about.
+*/
+static int autoIncBegin(
+ Parse *pParse, /* Parsing context */
+ int iDb, /* Index of the database holding pTab */
+ Table *pTab /* The table we are writing to */
+){
+ int memId = 0; /* Register holding maximum rowid */
+ assert( pParse->db->aDb[iDb].pSchema!=0 );
+ if( (pTab->tabFlags & TF_Autoincrement)!=0
+ && (pParse->db->mDbFlags & DBFLAG_Vacuum)==0
+ ){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ AutoincInfo *pInfo;
+ Table *pSeqTab = pParse->db->aDb[iDb].pSchema->pSeqTab;
+
+ /* Verify that the sqlite_sequence table exists and is an ordinary
+ ** rowid table with exactly two columns.
+ ** Ticket d8dc2b3a58cd5dc2918a1d4acb 2018-05-23 */
+ if( pSeqTab==0
+ || !HasRowid(pSeqTab)
+ || IsVirtual(pSeqTab)
+ || pSeqTab->nCol!=2
+ ){
+ pParse->nErr++;
+ pParse->rc = SQLITE_CORRUPT_SEQUENCE;
+ return 0;
+ }
+
+ pInfo = pToplevel->pAinc;
+ while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; }
+ if( pInfo==0 ){
+ pInfo = sqlite3DbMallocRawNN(pParse->db, sizeof(*pInfo));
+ if( pInfo==0 ) return 0;
+ pInfo->pNext = pToplevel->pAinc;
+ pToplevel->pAinc = pInfo;
+ pInfo->pTab = pTab;
+ pInfo->iDb = iDb;
+ pToplevel->nMem++; /* Register to hold name of table */
+ pInfo->regCtr = ++pToplevel->nMem; /* Max rowid register */
+ pToplevel->nMem +=2; /* Rowid in sqlite_sequence + orig max val */
+ }
+ memId = pInfo->regCtr;
+ }
+ return memId;
+}
+
+/*
+** This routine generates code that will initialize all of the
+** register used by the autoincrement tracker.
+*/
+SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){
+ AutoincInfo *p; /* Information about an AUTOINCREMENT */
+ sqlite3 *db = pParse->db; /* The database connection */
+ Db *pDb; /* Database only autoinc table */
+ int memId; /* Register holding max rowid */
+ Vdbe *v = pParse->pVdbe; /* VDBE under construction */
+
+ /* This routine is never called during trigger-generation. It is
+ ** only called from the top-level */
+ assert( pParse->pTriggerTab==0 );
+ assert( sqlite3IsToplevel(pParse) );
+
+ assert( v ); /* We failed long ago if this is not so */
+ for(p = pParse->pAinc; p; p = p->pNext){
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList autoInc[] = {
+ /* 0 */ {OP_Null, 0, 0, 0},
+ /* 1 */ {OP_Rewind, 0, 10, 0},
+ /* 2 */ {OP_Column, 0, 0, 0},
+ /* 3 */ {OP_Ne, 0, 9, 0},
+ /* 4 */ {OP_Rowid, 0, 0, 0},
+ /* 5 */ {OP_Column, 0, 1, 0},
+ /* 6 */ {OP_AddImm, 0, 0, 0},
+ /* 7 */ {OP_Copy, 0, 0, 0},
+ /* 8 */ {OP_Goto, 0, 11, 0},
+ /* 9 */ {OP_Next, 0, 2, 0},
+ /* 10 */ {OP_Integer, 0, 0, 0},
+ /* 11 */ {OP_Close, 0, 0, 0}
+ };
+ VdbeOp *aOp;
+ pDb = &db->aDb[p->iDb];
+ memId = p->regCtr;
+ assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) );
+ sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead);
+ sqlite3VdbeLoadString(v, memId-1, p->pTab->zName);
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(autoInc), autoInc, iLn);
+ if( aOp==0 ) break;
+ aOp[0].p2 = memId;
+ aOp[0].p3 = memId+2;
+ aOp[2].p3 = memId;
+ aOp[3].p1 = memId-1;
+ aOp[3].p3 = memId;
+ aOp[3].p5 = SQLITE_JUMPIFNULL;
+ aOp[4].p2 = memId+1;
+ aOp[5].p3 = memId;
+ aOp[6].p1 = memId;
+ aOp[7].p2 = memId+2;
+ aOp[7].p1 = memId;
+ aOp[10].p2 = memId;
+ if( pParse->nTab==0 ) pParse->nTab = 1;
+ }
+}
+
+/*
+** Update the maximum rowid for an autoincrement calculation.
+**
+** This routine should be called when the regRowid register holds a
+** new rowid that is about to be inserted. If that new rowid is
+** larger than the maximum rowid in the memId memory cell, then the
+** memory cell is updated.
+*/
+static void autoIncStep(Parse *pParse, int memId, int regRowid){
+ if( memId>0 ){
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_MemMax, memId, regRowid);
+ }
+}
+
+/*
+** This routine generates the code needed to write autoincrement
+** maximum rowid values back into the sqlite_sequence register.
+** Every statement that might do an INSERT into an autoincrement
+** table (either directly or through triggers) needs to call this
+** routine just before the "exit" code.
+*/
+static SQLITE_NOINLINE void autoIncrementEnd(Parse *pParse){
+ AutoincInfo *p;
+ Vdbe *v = pParse->pVdbe;
+ sqlite3 *db = pParse->db;
+
+ assert( v );
+ for(p = pParse->pAinc; p; p = p->pNext){
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList autoIncEnd[] = {
+ /* 0 */ {OP_NotNull, 0, 2, 0},
+ /* 1 */ {OP_NewRowid, 0, 0, 0},
+ /* 2 */ {OP_MakeRecord, 0, 2, 0},
+ /* 3 */ {OP_Insert, 0, 0, 0},
+ /* 4 */ {OP_Close, 0, 0, 0}
+ };
+ VdbeOp *aOp;
+ Db *pDb = &db->aDb[p->iDb];
+ int iRec;
+ int memId = p->regCtr;
+
+ iRec = sqlite3GetTempReg(pParse);
+ assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) );
+ sqlite3VdbeAddOp3(v, OP_Le, memId+2, sqlite3VdbeCurrentAddr(v)+7, memId);
+ VdbeCoverage(v);
+ sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite);
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(autoIncEnd), autoIncEnd, iLn);
+ if( aOp==0 ) break;
+ aOp[0].p1 = memId+1;
+ aOp[1].p2 = memId+1;
+ aOp[2].p1 = memId-1;
+ aOp[2].p3 = iRec;
+ aOp[3].p2 = iRec;
+ aOp[3].p3 = memId+1;
+ aOp[3].p5 = OPFLAG_APPEND;
+ sqlite3ReleaseTempReg(pParse, iRec);
+ }
+}
+SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){
+ if( pParse->pAinc ) autoIncrementEnd(pParse);
+}
+#else
+/*
+** If SQLITE_OMIT_AUTOINCREMENT is defined, then the three routines
+** above are all no-ops
+*/
+# define autoIncBegin(A,B,C) (0)
+# define autoIncStep(A,B,C)
+#endif /* SQLITE_OMIT_AUTOINCREMENT */
+
+
+/* Forward declaration */
+static int xferOptimization(
+ Parse *pParse, /* Parser context */
+ Table *pDest, /* The table we are inserting into */
+ Select *pSelect, /* A SELECT statement to use as the data source */
+ int onError, /* How to handle constraint errors */
+ int iDbDest /* The database of pDest */
+);
+
+/*
+** This routine is called to handle SQL of the following forms:
+**
+** insert into TABLE (IDLIST) values(EXPRLIST),(EXPRLIST),...
+** insert into TABLE (IDLIST) select
+** insert into TABLE (IDLIST) default values
+**
+** The IDLIST following the table name is always optional. If omitted,
+** then a list of all (non-hidden) columns for the table is substituted.
+** The IDLIST appears in the pColumn parameter. pColumn is NULL if IDLIST
+** is omitted.
+**
+** For the pSelect parameter holds the values to be inserted for the
+** first two forms shown above. A VALUES clause is really just short-hand
+** for a SELECT statement that omits the FROM clause and everything else
+** that follows. If the pSelect parameter is NULL, that means that the
+** DEFAULT VALUES form of the INSERT statement is intended.
+**
+** The code generated follows one of four templates. For a simple
+** insert with data coming from a single-row VALUES clause, the code executes
+** once straight down through. Pseudo-code follows (we call this
+** the "1st template"):
+**
+** open write cursor to <table> and its indices
+** put VALUES clause expressions into registers
+** write the resulting record into <table>
+** cleanup
+**
+** The three remaining templates assume the statement is of the form
+**
+** INSERT INTO <table> SELECT ...
+**
+** If the SELECT clause is of the restricted form "SELECT * FROM <table2>" -
+** in other words if the SELECT pulls all columns from a single table
+** and there is no WHERE or LIMIT or GROUP BY or ORDER BY clauses, and
+** if <table2> and <table1> are distinct tables but have identical
+** schemas, including all the same indices, then a special optimization
+** is invoked that copies raw records from <table2> over to <table1>.
+** See the xferOptimization() function for the implementation of this
+** template. This is the 2nd template.
+**
+** open a write cursor to <table>
+** open read cursor on <table2>
+** transfer all records in <table2> over to <table>
+** close cursors
+** foreach index on <table>
+** open a write cursor on the <table> index
+** open a read cursor on the corresponding <table2> index
+** transfer all records from the read to the write cursors
+** close cursors
+** end foreach
+**
+** The 3rd template is for when the second template does not apply
+** and the SELECT clause does not read from <table> at any time.
+** The generated code follows this template:
+**
+** X <- A
+** goto B
+** A: setup for the SELECT
+** loop over the rows in the SELECT
+** load values into registers R..R+n
+** yield X
+** end loop
+** cleanup after the SELECT
+** end-coroutine X
+** B: open write cursor to <table> and its indices
+** C: yield X, at EOF goto D
+** insert the select result into <table> from R..R+n
+** goto C
+** D: cleanup
+**
+** The 4th template is used if the insert statement takes its
+** values from a SELECT but the data is being inserted into a table
+** that is also read as part of the SELECT. In the third form,
+** we have to use an intermediate table to store the results of
+** the select. The template is like this:
+**
+** X <- A
+** goto B
+** A: setup for the SELECT
+** loop over the tables in the SELECT
+** load value into register R..R+n
+** yield X
+** end loop
+** cleanup after the SELECT
+** end co-routine R
+** B: open temp table
+** L: yield X, at EOF goto M
+** insert row from R..R+n into temp table
+** goto L
+** M: open write cursor to <table> and its indices
+** rewind temp table
+** C: loop over rows of intermediate table
+** transfer values form intermediate table into <table>
+** end loop
+** D: cleanup
+*/
+SQLITE_PRIVATE void sqlite3Insert(
+ Parse *pParse, /* Parser context */
+ SrcList *pTabList, /* Name of table into which we are inserting */
+ Select *pSelect, /* A SELECT statement to use as the data source */
+ IdList *pColumn, /* Column names corresponding to IDLIST, or NULL. */
+ int onError, /* How to handle constraint errors */
+ Upsert *pUpsert /* ON CONFLICT clauses for upsert, or NULL */
+){
+ sqlite3 *db; /* The main database structure */
+ Table *pTab; /* The table to insert into. aka TABLE */
+ int i, j; /* Loop counters */
+ Vdbe *v; /* Generate code into this virtual machine */
+ Index *pIdx; /* For looping over indices of the table */
+ int nColumn; /* Number of columns in the data */
+ int nHidden = 0; /* Number of hidden columns if TABLE is virtual */
+ int iDataCur = 0; /* VDBE cursor that is the main data repository */
+ int iIdxCur = 0; /* First index cursor */
+ int ipkColumn = -1; /* Column that is the INTEGER PRIMARY KEY */
+ int endOfLoop; /* Label for the end of the insertion loop */
+ int srcTab = 0; /* Data comes from this temporary cursor if >=0 */
+ int addrInsTop = 0; /* Jump to label "D" */
+ int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */
+ SelectDest dest; /* Destination for SELECT on rhs of INSERT */
+ int iDb; /* Index of database holding TABLE */
+ u8 useTempTable = 0; /* Store SELECT results in intermediate table */
+ u8 appendFlag = 0; /* True if the insert is likely to be an append */
+ u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */
+ u8 bIdListInOrder; /* True if IDLIST is in table order */
+ ExprList *pList = 0; /* List of VALUES() to be inserted */
+ int iRegStore; /* Register in which to store next column */
+
+ /* Register allocations */
+ int regFromSelect = 0;/* Base register for data coming from SELECT */
+ int regAutoinc = 0; /* Register holding the AUTOINCREMENT counter */
+ int regRowCount = 0; /* Memory cell used for the row counter */
+ int regIns; /* Block of regs holding rowid+data being inserted */
+ int regRowid; /* registers holding insert rowid */
+ int regData; /* register holding first column to insert */
+ int *aRegIdx = 0; /* One register allocated to each index */
+
+#ifndef SQLITE_OMIT_TRIGGER
+ int isView; /* True if attempting to insert into a view */
+ Trigger *pTrigger; /* List of triggers on pTab, if required */
+ int tmask; /* Mask of trigger times */
+#endif
+
+ db = pParse->db;
+ if( pParse->nErr || db->mallocFailed ){
+ goto insert_cleanup;
+ }
+ dest.iSDParm = 0; /* Suppress a harmless compiler warning */
+
+ /* If the Select object is really just a simple VALUES() list with a
+ ** single row (the common case) then keep that one row of values
+ ** and discard the other (unused) parts of the pSelect object
+ */
+ if( pSelect && (pSelect->selFlags & SF_Values)!=0 && pSelect->pPrior==0 ){
+ pList = pSelect->pEList;
+ pSelect->pEList = 0;
+ sqlite3SelectDelete(db, pSelect);
+ pSelect = 0;
+ }
+
+ /* Locate the table into which we will be inserting new information.
+ */
+ assert( pTabList->nSrc==1 );
+ pTab = sqlite3SrcListLookup(pParse, pTabList);
+ if( pTab==0 ){
+ goto insert_cleanup;
+ }
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ assert( iDb<db->nDb );
+ if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0,
+ db->aDb[iDb].zDbSName) ){
+ goto insert_cleanup;
+ }
+ withoutRowid = !HasRowid(pTab);
+
+ /* Figure out if we have any triggers and if the table being
+ ** inserted into is a view
+ */
+#ifndef SQLITE_OMIT_TRIGGER
+ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask);
+ isView = pTab->pSelect!=0;
+#else
+# define pTrigger 0
+# define tmask 0
+# define isView 0
+#endif
+#ifdef SQLITE_OMIT_VIEW
+# undef isView
+# define isView 0
+#endif
+ assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) );
+
+ /* If pTab is really a view, make sure it has been initialized.
+ ** ViewGetColumnNames() is a no-op if pTab is not a view.
+ */
+ if( sqlite3ViewGetColumnNames(pParse, pTab) ){
+ goto insert_cleanup;
+ }
+
+ /* Cannot insert into a read-only table.
+ */
+ if( sqlite3IsReadOnly(pParse, pTab, tmask) ){
+ goto insert_cleanup;
+ }
+
+ /* Allocate a VDBE
+ */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto insert_cleanup;
+ if( pParse->nested==0 ) sqlite3VdbeCountChanges(v);
+ sqlite3BeginWriteOperation(pParse, pSelect || pTrigger, iDb);
+
+#ifndef SQLITE_OMIT_XFER_OPT
+ /* If the statement is of the form
+ **
+ ** INSERT INTO <table1> SELECT * FROM <table2>;
+ **
+ ** Then special optimizations can be applied that make the transfer
+ ** very fast and which reduce fragmentation of indices.
+ **
+ ** This is the 2nd template.
+ */
+ if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){
+ assert( !pTrigger );
+ assert( pList==0 );
+ goto insert_end;
+ }
+#endif /* SQLITE_OMIT_XFER_OPT */
+
+ /* If this is an AUTOINCREMENT table, look up the sequence number in the
+ ** sqlite_sequence table and store it in memory cell regAutoinc.
+ */
+ regAutoinc = autoIncBegin(pParse, iDb, pTab);
+
+ /* Allocate a block registers to hold the rowid and the values
+ ** for all columns of the new row.
+ */
+ regRowid = regIns = pParse->nMem+1;
+ pParse->nMem += pTab->nCol + 1;
+ if( IsVirtual(pTab) ){
+ regRowid++;
+ pParse->nMem++;
+ }
+ regData = regRowid+1;
+
+ /* If the INSERT statement included an IDLIST term, then make sure
+ ** all elements of the IDLIST really are columns of the table and
+ ** remember the column indices.
+ **
+ ** If the table has an INTEGER PRIMARY KEY column and that column
+ ** is named in the IDLIST, then record in the ipkColumn variable
+ ** the index into IDLIST of the primary key column. ipkColumn is
+ ** the index of the primary key as it appears in IDLIST, not as
+ ** is appears in the original table. (The index of the INTEGER
+ ** PRIMARY KEY in the original table is pTab->iPKey.) After this
+ ** loop, if ipkColumn==(-1), that means that integer primary key
+ ** is unspecified, and hence the table is either WITHOUT ROWID or
+ ** it will automatically generated an integer primary key.
+ **
+ ** bIdListInOrder is true if the columns in IDLIST are in storage
+ ** order. This enables an optimization that avoids shuffling the
+ ** columns into storage order. False negatives are harmless,
+ ** but false positives will cause database corruption.
+ */
+ bIdListInOrder = (pTab->tabFlags & (TF_OOOHidden|TF_HasStored))==0;
+ if( pColumn ){
+ for(i=0; i<pColumn->nId; i++){
+ pColumn->a[i].idx = -1;
+ }
+ for(i=0; i<pColumn->nId; i++){
+ for(j=0; j<pTab->nCol; j++){
+ if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){
+ pColumn->a[i].idx = j;
+ if( i!=j ) bIdListInOrder = 0;
+ if( j==pTab->iPKey ){
+ ipkColumn = i; assert( !withoutRowid );
+ }
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){
+ sqlite3ErrorMsg(pParse,
+ "cannot INSERT into generated column \"%s\"",
+ pTab->aCol[j].zName);
+ goto insert_cleanup;
+ }
+#endif
+ break;
+ }
+ }
+ if( j>=pTab->nCol ){
+ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){
+ ipkColumn = i;
+ bIdListInOrder = 0;
+ }else{
+ sqlite3ErrorMsg(pParse, "table %S has no column named %s",
+ pTabList, 0, pColumn->a[i].zName);
+ pParse->checkSchema = 1;
+ goto insert_cleanup;
+ }
+ }
+ }
+ }
+
+ /* Figure out how many columns of data are supplied. If the data
+ ** is coming from a SELECT statement, then generate a co-routine that
+ ** produces a single row of the SELECT on each invocation. The
+ ** co-routine is the common header to the 3rd and 4th templates.
+ */
+ if( pSelect ){
+ /* Data is coming from a SELECT or from a multi-row VALUES clause.
+ ** Generate a co-routine to run the SELECT. */
+ int regYield; /* Register holding co-routine entry-point */
+ int addrTop; /* Top of the co-routine */
+ int rc; /* Result code */
+
+ regYield = ++pParse->nMem;
+ addrTop = sqlite3VdbeCurrentAddr(v) + 1;
+ sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop);
+ sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield);
+ dest.iSdst = bIdListInOrder ? regData : 0;
+ dest.nSdst = pTab->nCol;
+ rc = sqlite3Select(pParse, pSelect, &dest);
+ regFromSelect = dest.iSdst;
+ if( rc || db->mallocFailed || pParse->nErr ) goto insert_cleanup;
+ sqlite3VdbeEndCoroutine(v, regYield);
+ sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */
+ assert( pSelect->pEList );
+ nColumn = pSelect->pEList->nExpr;
+
+ /* Set useTempTable to TRUE if the result of the SELECT statement
+ ** should be written into a temporary table (template 4). Set to
+ ** FALSE if each output row of the SELECT can be written directly into
+ ** the destination table (template 3).
+ **
+ ** A temp table must be used if the table being updated is also one
+ ** of the tables being read by the SELECT statement. Also use a
+ ** temp table in the case of row triggers.
+ */
+ if( pTrigger || readsTable(pParse, iDb, pTab) ){
+ useTempTable = 1;
+ }
+
+ if( useTempTable ){
+ /* Invoke the coroutine to extract information from the SELECT
+ ** and add it to a transient table srcTab. The code generated
+ ** here is from the 4th template:
+ **
+ ** B: open temp table
+ ** L: yield X, goto M at EOF
+ ** insert row from R..R+n into temp table
+ ** goto L
+ ** M: ...
+ */
+ int regRec; /* Register to hold packed record */
+ int regTempRowid; /* Register to hold temp table ROWID */
+ int addrL; /* Label "L" */
+
+ srcTab = pParse->nTab++;
+ regRec = sqlite3GetTempReg(pParse);
+ regTempRowid = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn);
+ addrL = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid);
+ sqlite3VdbeGoto(v, addrL);
+ sqlite3VdbeJumpHere(v, addrL);
+ sqlite3ReleaseTempReg(pParse, regRec);
+ sqlite3ReleaseTempReg(pParse, regTempRowid);
+ }
+ }else{
+ /* This is the case if the data for the INSERT is coming from a
+ ** single-row VALUES clause
+ */
+ NameContext sNC;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ srcTab = -1;
+ assert( useTempTable==0 );
+ if( pList ){
+ nColumn = pList->nExpr;
+ if( sqlite3ResolveExprListNames(&sNC, pList) ){
+ goto insert_cleanup;
+ }
+ }else{
+ nColumn = 0;
+ }
+ }
+
+ /* If there is no IDLIST term but the table has an integer primary
+ ** key, the set the ipkColumn variable to the integer primary key
+ ** column index in the original table definition.
+ */
+ if( pColumn==0 && nColumn>0 ){
+ ipkColumn = pTab->iPKey;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( ipkColumn>=0 && (pTab->tabFlags & TF_HasGenerated)!=0 ){
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+ for(i=ipkColumn-1; i>=0; i--){
+ if( pTab->aCol[i].colFlags & COLFLAG_GENERATED ){
+ testcase( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL );
+ testcase( pTab->aCol[i].colFlags & COLFLAG_STORED );
+ ipkColumn--;
+ }
+ }
+ }
+#endif
+ }
+
+ /* Make sure the number of columns in the source data matches the number
+ ** of columns to be inserted into the table.
+ */
+ for(i=0; i<pTab->nCol; i++){
+ if( pTab->aCol[i].colFlags & COLFLAG_NOINSERT ) nHidden++;
+ }
+ if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){
+ sqlite3ErrorMsg(pParse,
+ "table %S has %d columns but %d values were supplied",
+ pTabList, 0, pTab->nCol-nHidden, nColumn);
+ goto insert_cleanup;
+ }
+ if( pColumn!=0 && nColumn!=pColumn->nId ){
+ sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId);
+ goto insert_cleanup;
+ }
+
+ /* Initialize the count of rows to be inserted
+ */
+ if( (db->flags & SQLITE_CountRows)!=0
+ && !pParse->nested
+ && !pParse->pTriggerTab
+ ){
+ regRowCount = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount);
+ }
+
+ /* If this is not a view, open the table and and all indices */
+ if( !isView ){
+ int nIdx;
+ nIdx = sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, -1, 0,
+ &iDataCur, &iIdxCur);
+ aRegIdx = sqlite3DbMallocRawNN(db, sizeof(int)*(nIdx+2));
+ if( aRegIdx==0 ){
+ goto insert_cleanup;
+ }
+ for(i=0, pIdx=pTab->pIndex; i<nIdx; pIdx=pIdx->pNext, i++){
+ assert( pIdx );
+ aRegIdx[i] = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
+ }
+ aRegIdx[i] = ++pParse->nMem; /* Register to store the table record */
+ }
+#ifndef SQLITE_OMIT_UPSERT
+ if( pUpsert ){
+ if( IsVirtual(pTab) ){
+ sqlite3ErrorMsg(pParse, "UPSERT not implemented for virtual table \"%s\"",
+ pTab->zName);
+ goto insert_cleanup;
+ }
+ if( pTab->pSelect ){
+ sqlite3ErrorMsg(pParse, "cannot UPSERT a view");
+ goto insert_cleanup;
+ }
+ if( sqlite3HasExplicitNulls(pParse, pUpsert->pUpsertTarget) ){
+ goto insert_cleanup;
+ }
+ pTabList->a[0].iCursor = iDataCur;
+ pUpsert->pUpsertSrc = pTabList;
+ pUpsert->regData = regData;
+ pUpsert->iDataCur = iDataCur;
+ pUpsert->iIdxCur = iIdxCur;
+ if( pUpsert->pUpsertTarget ){
+ sqlite3UpsertAnalyzeTarget(pParse, pTabList, pUpsert);
+ }
+ }
+#endif
+
+
+ /* This is the top of the main insertion loop */
+ if( useTempTable ){
+ /* This block codes the top of loop only. The complete loop is the
+ ** following pseudocode (template 4):
+ **
+ ** rewind temp table, if empty goto D
+ ** C: loop over rows of intermediate table
+ ** transfer values form intermediate table into <table>
+ ** end loop
+ ** D: ...
+ */
+ addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); VdbeCoverage(v);
+ addrCont = sqlite3VdbeCurrentAddr(v);
+ }else if( pSelect ){
+ /* This block codes the top of loop only. The complete loop is the
+ ** following pseudocode (template 3):
+ **
+ ** C: yield X, at EOF goto D
+ ** insert the select result into <table> from R..R+n
+ ** goto C
+ ** D: ...
+ */
+ sqlite3VdbeReleaseRegisters(pParse, regData, pTab->nCol, 0, 0);
+ addrInsTop = addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
+ VdbeCoverage(v);
+ if( ipkColumn>=0 ){
+ /* tag-20191021-001: If the INTEGER PRIMARY KEY is being generated by the
+ ** SELECT, go ahead and copy the value into the rowid slot now, so that
+ ** the value does not get overwritten by a NULL at tag-20191021-002. */
+ sqlite3VdbeAddOp2(v, OP_Copy, regFromSelect+ipkColumn, regRowid);
+ }
+ }
+
+ /* Compute data for ordinary columns of the new entry. Values
+ ** are written in storage order into registers starting with regData.
+ ** Only ordinary columns are computed in this loop. The rowid
+ ** (if there is one) is computed later and generated columns are
+ ** computed after the rowid since they might depend on the value
+ ** of the rowid.
+ */
+ nHidden = 0;
+ iRegStore = regData; assert( regData==regRowid+1 );
+ for(i=0; i<pTab->nCol; i++, iRegStore++){
+ int k;
+ u32 colFlags;
+ assert( i>=nHidden );
+ if( i==pTab->iPKey ){
+ /* tag-20191021-002: References to the INTEGER PRIMARY KEY are filled
+ ** using the rowid. So put a NULL in the IPK slot of the record to avoid
+ ** using excess space. The file format definition requires this extra
+ ** NULL - we cannot optimize further by skipping the column completely */
+ sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore);
+ continue;
+ }
+ if( ((colFlags = pTab->aCol[i].colFlags) & COLFLAG_NOINSERT)!=0 ){
+ nHidden++;
+ if( (colFlags & COLFLAG_VIRTUAL)!=0 ){
+ /* Virtual columns do not participate in OP_MakeRecord. So back up
+ ** iRegStore by one slot to compensate for the iRegStore++ in the
+ ** outer for() loop */
+ iRegStore--;
+ continue;
+ }else if( (colFlags & COLFLAG_STORED)!=0 ){
+ /* Stored columns are computed later. But if there are BEFORE
+ ** triggers, the slots used for stored columns will be OP_Copy-ed
+ ** to a second block of registers, so the register needs to be
+ ** initialized to NULL to avoid an uninitialized register read */
+ if( tmask & TRIGGER_BEFORE ){
+ sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore);
+ }
+ continue;
+ }else if( pColumn==0 ){
+ /* Hidden columns that are not explicitly named in the INSERT
+ ** get there default value */
+ sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore);
+ continue;
+ }
+ }
+ if( pColumn ){
+ for(j=0; j<pColumn->nId && pColumn->a[j].idx!=i; j++){}
+ if( j>=pColumn->nId ){
+ /* A column not named in the insert column list gets its
+ ** default value */
+ sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore);
+ continue;
+ }
+ k = j;
+ }else if( nColumn==0 ){
+ /* This is INSERT INTO ... DEFAULT VALUES. Load the default value. */
+ sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore);
+ continue;
+ }else{
+ k = i - nHidden;
+ }
+
+ if( useTempTable ){
+ sqlite3VdbeAddOp3(v, OP_Column, srcTab, k, iRegStore);
+ }else if( pSelect ){
+ if( regFromSelect!=regData ){
+ sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+k, iRegStore);
+ }
+ }else{
+ sqlite3ExprCode(pParse, pList->a[k].pExpr, iRegStore);
+ }
+ }
+
+
+ /* Run the BEFORE and INSTEAD OF triggers, if there are any
+ */
+ endOfLoop = sqlite3VdbeMakeLabel(pParse);
+ if( tmask & TRIGGER_BEFORE ){
+ int regCols = sqlite3GetTempRange(pParse, pTab->nCol+1);
+
+ /* build the NEW.* reference row. Note that if there is an INTEGER
+ ** PRIMARY KEY into which a NULL is being inserted, that NULL will be
+ ** translated into a unique ID for the row. But on a BEFORE trigger,
+ ** we do not know what the unique ID will be (because the insert has
+ ** not happened yet) so we substitute a rowid of -1
+ */
+ if( ipkColumn<0 ){
+ sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols);
+ }else{
+ int addr1;
+ assert( !withoutRowid );
+ if( useTempTable ){
+ sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regCols);
+ }else{
+ assert( pSelect==0 ); /* Otherwise useTempTable is true */
+ sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regCols);
+ }
+ addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols);
+ sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v);
+ }
+
+ /* Cannot have triggers on a virtual table. If it were possible,
+ ** this block would have to account for hidden column.
+ */
+ assert( !IsVirtual(pTab) );
+
+ /* Copy the new data already generated. */
+ assert( pTab->nNVCol>0 );
+ sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1);
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ /* Compute the new value for generated columns after all other
+ ** columns have already been computed. This must be done after
+ ** computing the ROWID in case one of the generated columns
+ ** refers to the ROWID. */
+ if( pTab->tabFlags & TF_HasGenerated ){
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+ sqlite3ComputeGeneratedColumns(pParse, regCols+1, pTab);
+ }
+#endif
+
+ /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger,
+ ** do not attempt any conversions before assembling the record.
+ ** If this is a real table, attempt conversions as required by the
+ ** table column affinities.
+ */
+ if( !isView ){
+ sqlite3TableAffinity(v, pTab, regCols+1);
+ }
+
+ /* Fire BEFORE or INSTEAD OF triggers */
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_BEFORE,
+ pTab, regCols-pTab->nCol-1, onError, endOfLoop);
+
+ sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol+1);
+ }
+
+ if( !isView ){
+ if( IsVirtual(pTab) ){
+ /* The row that the VUpdate opcode will delete: none */
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regIns);
+ }
+ if( ipkColumn>=0 ){
+ /* Compute the new rowid */
+ if( useTempTable ){
+ sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regRowid);
+ }else if( pSelect ){
+ /* Rowid already initialized at tag-20191021-001 */
+ }else{
+ Expr *pIpk = pList->a[ipkColumn].pExpr;
+ if( pIpk->op==TK_NULL && !IsVirtual(pTab) ){
+ sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc);
+ appendFlag = 1;
+ }else{
+ sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regRowid);
+ }
+ }
+ /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid
+ ** to generate a unique primary key value.
+ */
+ if( !appendFlag ){
+ int addr1;
+ if( !IsVirtual(pTab) ){
+ addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc);
+ sqlite3VdbeJumpHere(v, addr1);
+ }else{
+ addr1 = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, addr1+2); VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); VdbeCoverage(v);
+ }
+ }else if( IsVirtual(pTab) || withoutRowid ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc);
+ appendFlag = 1;
+ }
+ autoIncStep(pParse, regAutoinc, regRowid);
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ /* Compute the new value for generated columns after all other
+ ** columns have already been computed. This must be done after
+ ** computing the ROWID in case one of the generated columns
+ ** is derived from the INTEGER PRIMARY KEY. */
+ if( pTab->tabFlags & TF_HasGenerated ){
+ sqlite3ComputeGeneratedColumns(pParse, regRowid+1, pTab);
+ }
+#endif
+
+ /* Generate code to check constraints and generate index keys and
+ ** do the insertion.
+ */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab) ){
+ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab);
+ sqlite3VtabMakeWritable(pParse, pTab);
+ sqlite3VdbeAddOp4(v, OP_VUpdate, 1, pTab->nCol+2, regIns, pVTab, P4_VTAB);
+ sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError);
+ sqlite3MayAbort(pParse);
+ }else
+#endif
+ {
+ int isReplace; /* Set to true if constraints may cause a replace */
+ int bUseSeek; /* True to use OPFLAG_SEEKRESULT */
+ sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur,
+ regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0, pUpsert
+ );
+ sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0);
+
+ /* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE
+ ** constraints or (b) there are no triggers and this table is not a
+ ** parent table in a foreign key constraint. It is safe to set the
+ ** flag in the second case as if any REPLACE constraint is hit, an
+ ** OP_Delete or OP_IdxDelete instruction will be executed on each
+ ** cursor that is disturbed. And these instructions both clear the
+ ** VdbeCursor.seekResult variable, disabling the OPFLAG_USESEEKRESULT
+ ** functionality. */
+ bUseSeek = (isReplace==0 || !sqlite3VdbeHasSubProgram(v));
+ sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur,
+ regIns, aRegIdx, 0, appendFlag, bUseSeek
+ );
+ }
+ }
+
+ /* Update the count of rows that are inserted
+ */
+ if( regRowCount ){
+ sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1);
+ }
+
+ if( pTrigger ){
+ /* Code AFTER triggers */
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_AFTER,
+ pTab, regData-2-pTab->nCol, onError, endOfLoop);
+ }
+
+ /* The bottom of the main insertion loop, if the data source
+ ** is a SELECT statement.
+ */
+ sqlite3VdbeResolveLabel(v, endOfLoop);
+ if( useTempTable ){
+ sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addrInsTop);
+ sqlite3VdbeAddOp1(v, OP_Close, srcTab);
+ }else if( pSelect ){
+ sqlite3VdbeGoto(v, addrCont);
+#ifdef SQLITE_DEBUG
+ /* If we are jumping back to an OP_Yield that is preceded by an
+ ** OP_ReleaseReg, set the p5 flag on the OP_Goto so that the
+ ** OP_ReleaseReg will be included in the loop. */
+ if( sqlite3VdbeGetOp(v, addrCont-1)->opcode==OP_ReleaseReg ){
+ assert( sqlite3VdbeGetOp(v, addrCont)->opcode==OP_Yield );
+ sqlite3VdbeChangeP5(v, 1);
+ }
+#endif
+ sqlite3VdbeJumpHere(v, addrInsTop);
+ }
+
+insert_end:
+ /* Update the sqlite_sequence table by storing the content of the
+ ** maximum rowid counter values recorded while inserting into
+ ** autoincrement tables.
+ */
+ if( pParse->nested==0 && pParse->pTriggerTab==0 ){
+ sqlite3AutoincrementEnd(pParse);
+ }
+
+ /*
+ ** Return the number of rows inserted. If this routine is
+ ** generating code because of a call to sqlite3NestedParse(), do not
+ ** invoke the callback function.
+ */
+ if( regRowCount ){
+ sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC);
+ }
+
+insert_cleanup:
+ sqlite3SrcListDelete(db, pTabList);
+ sqlite3ExprListDelete(db, pList);
+ sqlite3UpsertDelete(db, pUpsert);
+ sqlite3SelectDelete(db, pSelect);
+ sqlite3IdListDelete(db, pColumn);
+ sqlite3DbFree(db, aRegIdx);
+}
+
+/* Make sure "isView" and other macros defined above are undefined. Otherwise
+** they may interfere with compilation of other functions in this file
+** (or in another file, if this file becomes part of the amalgamation). */
+#ifdef isView
+ #undef isView
+#endif
+#ifdef pTrigger
+ #undef pTrigger
+#endif
+#ifdef tmask
+ #undef tmask
+#endif
+
+/*
+** Meanings of bits in of pWalker->eCode for
+** sqlite3ExprReferencesUpdatedColumn()
+*/
+#define CKCNSTRNT_COLUMN 0x01 /* CHECK constraint uses a changing column */
+#define CKCNSTRNT_ROWID 0x02 /* CHECK constraint references the ROWID */
+
+/* This is the Walker callback from sqlite3ExprReferencesUpdatedColumn().
+* Set bit 0x01 of pWalker->eCode if pWalker->eCode to 0 and if this
+** expression node references any of the
+** columns that are being modifed by an UPDATE statement.
+*/
+static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_COLUMN ){
+ assert( pExpr->iColumn>=0 || pExpr->iColumn==-1 );
+ if( pExpr->iColumn>=0 ){
+ if( pWalker->u.aiCol[pExpr->iColumn]>=0 ){
+ pWalker->eCode |= CKCNSTRNT_COLUMN;
+ }
+ }else{
+ pWalker->eCode |= CKCNSTRNT_ROWID;
+ }
+ }
+ return WRC_Continue;
+}
+
+/*
+** pExpr is a CHECK constraint on a row that is being UPDATE-ed. The
+** only columns that are modified by the UPDATE are those for which
+** aiChng[i]>=0, and also the ROWID is modified if chngRowid is true.
+**
+** Return true if CHECK constraint pExpr uses any of the
+** changing columns (or the rowid if it is changing). In other words,
+** return true if this CHECK constraint must be validated for
+** the new row in the UPDATE statement.
+**
+** 2018-09-15: pExpr might also be an expression for an index-on-expressions.
+** The operation of this routine is the same - return true if an only if
+** the expression uses one or more of columns identified by the second and
+** third arguments.
+*/
+SQLITE_PRIVATE int sqlite3ExprReferencesUpdatedColumn(
+ Expr *pExpr, /* The expression to be checked */
+ int *aiChng, /* aiChng[x]>=0 if column x changed by the UPDATE */
+ int chngRowid /* True if UPDATE changes the rowid */
+){
+ Walker w;
+ memset(&w, 0, sizeof(w));
+ w.eCode = 0;
+ w.xExprCallback = checkConstraintExprNode;
+ w.u.aiCol = aiChng;
+ sqlite3WalkExpr(&w, pExpr);
+ if( !chngRowid ){
+ testcase( (w.eCode & CKCNSTRNT_ROWID)!=0 );
+ w.eCode &= ~CKCNSTRNT_ROWID;
+ }
+ testcase( w.eCode==0 );
+ testcase( w.eCode==CKCNSTRNT_COLUMN );
+ testcase( w.eCode==CKCNSTRNT_ROWID );
+ testcase( w.eCode==(CKCNSTRNT_ROWID|CKCNSTRNT_COLUMN) );
+ return w.eCode!=0;
+}
+
+/*
+** Generate code to do constraint checks prior to an INSERT or an UPDATE
+** on table pTab.
+**
+** The regNewData parameter is the first register in a range that contains
+** the data to be inserted or the data after the update. There will be
+** pTab->nCol+1 registers in this range. The first register (the one
+** that regNewData points to) will contain the new rowid, or NULL in the
+** case of a WITHOUT ROWID table. The second register in the range will
+** contain the content of the first table column. The third register will
+** contain the content of the second table column. And so forth.
+**
+** The regOldData parameter is similar to regNewData except that it contains
+** the data prior to an UPDATE rather than afterwards. regOldData is zero
+** for an INSERT. This routine can distinguish between UPDATE and INSERT by
+** checking regOldData for zero.
+**
+** For an UPDATE, the pkChng boolean is true if the true primary key (the
+** rowid for a normal table or the PRIMARY KEY for a WITHOUT ROWID table)
+** might be modified by the UPDATE. If pkChng is false, then the key of
+** the iDataCur content table is guaranteed to be unchanged by the UPDATE.
+**
+** For an INSERT, the pkChng boolean indicates whether or not the rowid
+** was explicitly specified as part of the INSERT statement. If pkChng
+** is zero, it means that the either rowid is computed automatically or
+** that the table is a WITHOUT ROWID table and has no rowid. On an INSERT,
+** pkChng will only be true if the INSERT statement provides an integer
+** value for either the rowid column or its INTEGER PRIMARY KEY alias.
+**
+** The code generated by this routine will store new index entries into
+** registers identified by aRegIdx[]. No index entry is created for
+** indices where aRegIdx[i]==0. The order of indices in aRegIdx[] is
+** the same as the order of indices on the linked list of indices
+** at pTab->pIndex.
+**
+** (2019-05-07) The generated code also creates a new record for the
+** main table, if pTab is a rowid table, and stores that record in the
+** register identified by aRegIdx[nIdx] - in other words in the first
+** entry of aRegIdx[] past the last index. It is important that the
+** record be generated during constraint checks to avoid affinity changes
+** to the register content that occur after constraint checks but before
+** the new record is inserted.
+**
+** The caller must have already opened writeable cursors on the main
+** table and all applicable indices (that is to say, all indices for which
+** aRegIdx[] is not zero). iDataCur is the cursor for the main table when
+** inserting or updating a rowid table, or the cursor for the PRIMARY KEY
+** index when operating on a WITHOUT ROWID table. iIdxCur is the cursor
+** for the first index in the pTab->pIndex list. Cursors for other indices
+** are at iIdxCur+N for the N-th element of the pTab->pIndex list.
+**
+** This routine also generates code to check constraints. NOT NULL,
+** CHECK, and UNIQUE constraints are all checked. If a constraint fails,
+** then the appropriate action is performed. There are five possible
+** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE.
+**
+** Constraint type Action What Happens
+** --------------- ---------- ----------------------------------------
+** any ROLLBACK The current transaction is rolled back and
+** sqlite3_step() returns immediately with a
+** return code of SQLITE_CONSTRAINT.
+**
+** any ABORT Back out changes from the current command
+** only (do not do a complete rollback) then
+** cause sqlite3_step() to return immediately
+** with SQLITE_CONSTRAINT.
+**
+** any FAIL Sqlite3_step() returns immediately with a
+** return code of SQLITE_CONSTRAINT. The
+** transaction is not rolled back and any
+** changes to prior rows are retained.
+**
+** any IGNORE The attempt in insert or update the current
+** row is skipped, without throwing an error.
+** Processing continues with the next row.
+** (There is an immediate jump to ignoreDest.)
+**
+** NOT NULL REPLACE The NULL value is replace by the default
+** value for that column. If the default value
+** is NULL, the action is the same as ABORT.
+**
+** UNIQUE REPLACE The other row that conflicts with the row
+** being inserted is removed.
+**
+** CHECK REPLACE Illegal. The results in an exception.
+**
+** Which action to take is determined by the overrideError parameter.
+** Or if overrideError==OE_Default, then the pParse->onError parameter
+** is used. Or if pParse->onError==OE_Default then the onError value
+** for the constraint is used.
+*/
+SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
+ Parse *pParse, /* The parser context */
+ Table *pTab, /* The table being inserted or updated */
+ int *aRegIdx, /* Use register aRegIdx[i] for index i. 0 for unused */
+ int iDataCur, /* Canonical data cursor (main table or PK index) */
+ int iIdxCur, /* First index cursor */
+ int regNewData, /* First register in a range holding values to insert */
+ int regOldData, /* Previous content. 0 for INSERTs */
+ u8 pkChng, /* Non-zero if the rowid or PRIMARY KEY changed */
+ u8 overrideError, /* Override onError to this if not OE_Default */
+ int ignoreDest, /* Jump to this label on an OE_Ignore resolution */
+ int *pbMayReplace, /* OUT: Set to true if constraint may cause a replace */
+ int *aiChng, /* column i is unchanged if aiChng[i]<0 */
+ Upsert *pUpsert /* ON CONFLICT clauses, if any. NULL otherwise */
+){
+ Vdbe *v; /* VDBE under constrution */
+ Index *pIdx; /* Pointer to one of the indices */
+ Index *pPk = 0; /* The PRIMARY KEY index */
+ sqlite3 *db; /* Database connection */
+ int i; /* loop counter */
+ int ix; /* Index loop counter */
+ int nCol; /* Number of columns */
+ int onError; /* Conflict resolution strategy */
+ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */
+ int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */
+ Index *pUpIdx = 0; /* Index to which to apply the upsert */
+ u8 isUpdate; /* True if this is an UPDATE operation */
+ u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */
+ int upsertBypass = 0; /* Address of Goto to bypass upsert subroutine */
+ int upsertJump = 0; /* Address of Goto that jumps into upsert subroutine */
+ int ipkTop = 0; /* Top of the IPK uniqueness check */
+ int ipkBottom = 0; /* OP_Goto at the end of the IPK uniqueness check */
+ /* Variables associated with retesting uniqueness constraints after
+ ** replace triggers fire have run */
+ int regTrigCnt; /* Register used to count replace trigger invocations */
+ int addrRecheck = 0; /* Jump here to recheck all uniqueness constraints */
+ int lblRecheckOk = 0; /* Each recheck jumps to this label if it passes */
+ Trigger *pTrigger; /* List of DELETE triggers on the table pTab */
+ int nReplaceTrig = 0; /* Number of replace triggers coded */
+
+ isUpdate = regOldData!=0;
+ db = pParse->db;
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 );
+ assert( pTab->pSelect==0 ); /* This table is not a VIEW */
+ nCol = pTab->nCol;
+
+ /* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for
+ ** normal rowid tables. nPkField is the number of key fields in the
+ ** pPk index or 1 for a rowid table. In other words, nPkField is the
+ ** number of fields in the true primary key of the table. */
+ if( HasRowid(pTab) ){
+ pPk = 0;
+ nPkField = 1;
+ }else{
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ nPkField = pPk->nKeyCol;
+ }
+
+ /* Record that this module has started */
+ VdbeModuleComment((v, "BEGIN: GenCnstCks(%d,%d,%d,%d,%d)",
+ iDataCur, iIdxCur, regNewData, regOldData, pkChng));
+
+ /* Test all NOT NULL constraints.
+ */
+ if( pTab->tabFlags & TF_HasNotNull ){
+ int b2ndPass = 0; /* True if currently running 2nd pass */
+ int nSeenReplace = 0; /* Number of ON CONFLICT REPLACE operations */
+ int nGenerated = 0; /* Number of generated columns with NOT NULL */
+ while(1){ /* Make 2 passes over columns. Exit loop via "break" */
+ for(i=0; i<nCol; i++){
+ int iReg; /* Register holding column value */
+ Column *pCol = &pTab->aCol[i]; /* The column to check for NOT NULL */
+ int isGenerated; /* non-zero if column is generated */
+ onError = pCol->notNull;
+ if( onError==OE_None ) continue; /* No NOT NULL on this column */
+ if( i==pTab->iPKey ){
+ continue; /* ROWID is never NULL */
+ }
+ isGenerated = pCol->colFlags & COLFLAG_GENERATED;
+ if( isGenerated && !b2ndPass ){
+ nGenerated++;
+ continue; /* Generated columns processed on 2nd pass */
+ }
+ if( aiChng && aiChng[i]<0 && !isGenerated ){
+ /* Do not check NOT NULL on columns that do not change */
+ continue;
+ }
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+ if( onError==OE_Replace ){
+ if( b2ndPass /* REPLACE becomes ABORT on the 2nd pass */
+ || pCol->pDflt==0 /* REPLACE is ABORT if no DEFAULT value */
+ ){
+ testcase( pCol->colFlags & COLFLAG_VIRTUAL );
+ testcase( pCol->colFlags & COLFLAG_STORED );
+ testcase( pCol->colFlags & COLFLAG_GENERATED );
+ onError = OE_Abort;
+ }else{
+ assert( !isGenerated );
+ }
+ }else if( b2ndPass && !isGenerated ){
+ continue;
+ }
+ assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail
+ || onError==OE_Ignore || onError==OE_Replace );
+ testcase( i!=sqlite3TableColumnToStorage(pTab, i) );
+ iReg = sqlite3TableColumnToStorage(pTab, i) + regNewData + 1;
+ switch( onError ){
+ case OE_Replace: {
+ int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, iReg);
+ VdbeCoverage(v);
+ assert( (pCol->colFlags & COLFLAG_GENERATED)==0 );
+ nSeenReplace++;
+ sqlite3ExprCodeCopy(pParse, pCol->pDflt, iReg);
+ sqlite3VdbeJumpHere(v, addr1);
+ break;
+ }
+ case OE_Abort:
+ sqlite3MayAbort(pParse);
+ /* no break */ deliberate_fall_through
+ case OE_Rollback:
+ case OE_Fail: {
+ char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName,
+ pCol->zName);
+ sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL,
+ onError, iReg);
+ sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC);
+ sqlite3VdbeChangeP5(v, P5_ConstraintNotNull);
+ VdbeCoverage(v);
+ break;
+ }
+ default: {
+ assert( onError==OE_Ignore );
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, ignoreDest);
+ VdbeCoverage(v);
+ break;
+ }
+ } /* end switch(onError) */
+ } /* end loop i over columns */
+ if( nGenerated==0 && nSeenReplace==0 ){
+ /* If there are no generated columns with NOT NULL constraints
+ ** and no NOT NULL ON CONFLICT REPLACE constraints, then a single
+ ** pass is sufficient */
+ break;
+ }
+ if( b2ndPass ) break; /* Never need more than 2 passes */
+ b2ndPass = 1;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( nSeenReplace>0 && (pTab->tabFlags & TF_HasGenerated)!=0 ){
+ /* If any NOT NULL ON CONFLICT REPLACE constraints fired on the
+ ** first pass, recomputed values for all generated columns, as
+ ** those values might depend on columns affected by the REPLACE.
+ */
+ sqlite3ComputeGeneratedColumns(pParse, regNewData+1, pTab);
+ }
+#endif
+ } /* end of 2-pass loop */
+ } /* end if( has-not-null-constraints ) */
+
+ /* Test all CHECK constraints
+ */
+#ifndef SQLITE_OMIT_CHECK
+ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){
+ ExprList *pCheck = pTab->pCheck;
+ pParse->iSelfTab = -(regNewData+1);
+ onError = overrideError!=OE_Default ? overrideError : OE_Abort;
+ for(i=0; i<pCheck->nExpr; i++){
+ int allOk;
+ Expr *pCopy;
+ Expr *pExpr = pCheck->a[i].pExpr;
+ if( aiChng
+ && !sqlite3ExprReferencesUpdatedColumn(pExpr, aiChng, pkChng)
+ ){
+ /* The check constraints do not reference any of the columns being
+ ** updated so there is no point it verifying the check constraint */
+ continue;
+ }
+ if( bAffinityDone==0 ){
+ sqlite3TableAffinity(v, pTab, regNewData+1);
+ bAffinityDone = 1;
+ }
+ allOk = sqlite3VdbeMakeLabel(pParse);
+ sqlite3VdbeVerifyAbortable(v, onError);
+ pCopy = sqlite3ExprDup(db, pExpr, 0);
+ if( !db->mallocFailed ){
+ sqlite3ExprIfTrue(pParse, pCopy, allOk, SQLITE_JUMPIFNULL);
+ }
+ sqlite3ExprDelete(db, pCopy);
+ if( onError==OE_Ignore ){
+ sqlite3VdbeGoto(v, ignoreDest);
+ }else{
+ char *zName = pCheck->a[i].zEName;
+ if( zName==0 ) zName = pTab->zName;
+ if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-26383-51744 */
+ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_CHECK,
+ onError, zName, P4_TRANSIENT,
+ P5_ConstraintCheck);
+ }
+ sqlite3VdbeResolveLabel(v, allOk);
+ }
+ pParse->iSelfTab = 0;
+ }
+#endif /* !defined(SQLITE_OMIT_CHECK) */
+
+ /* UNIQUE and PRIMARY KEY constraints should be handled in the following
+ ** order:
+ **
+ ** (1) OE_Update
+ ** (2) OE_Abort, OE_Fail, OE_Rollback, OE_Ignore
+ ** (3) OE_Replace
+ **
+ ** OE_Fail and OE_Ignore must happen before any changes are made.
+ ** OE_Update guarantees that only a single row will change, so it
+ ** must happen before OE_Replace. Technically, OE_Abort and OE_Rollback
+ ** could happen in any order, but they are grouped up front for
+ ** convenience.
+ **
+ ** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43
+ ** The order of constraints used to have OE_Update as (2) and OE_Abort
+ ** and so forth as (1). But apparently PostgreSQL checks the OE_Update
+ ** constraint before any others, so it had to be moved.
+ **
+ ** Constraint checking code is generated in this order:
+ ** (A) The rowid constraint
+ ** (B) Unique index constraints that do not have OE_Replace as their
+ ** default conflict resolution strategy
+ ** (C) Unique index that do use OE_Replace by default.
+ **
+ ** The ordering of (2) and (3) is accomplished by making sure the linked
+ ** list of indexes attached to a table puts all OE_Replace indexes last
+ ** in the list. See sqlite3CreateIndex() for where that happens.
+ */
+
+ if( pUpsert ){
+ if( pUpsert->pUpsertTarget==0 ){
+ /* An ON CONFLICT DO NOTHING clause, without a constraint-target.
+ ** Make all unique constraint resolution be OE_Ignore */
+ assert( pUpsert->pUpsertSet==0 );
+ overrideError = OE_Ignore;
+ pUpsert = 0;
+ }else if( (pUpIdx = pUpsert->pUpsertIdx)!=0 ){
+ /* If the constraint-target uniqueness check must be run first.
+ ** Jump to that uniqueness check now */
+ upsertJump = sqlite3VdbeAddOp0(v, OP_Goto);
+ VdbeComment((v, "UPSERT constraint goes first"));
+ }
+ }
+
+ /* Determine if it is possible that triggers (either explicitly coded
+ ** triggers or FK resolution actions) might run as a result of deletes
+ ** that happen when OE_Replace conflict resolution occurs. (Call these
+ ** "replace triggers".) If any replace triggers run, we will need to
+ ** recheck all of the uniqueness constraints after they have all run.
+ ** But on the recheck, the resolution is OE_Abort instead of OE_Replace.
+ **
+ ** If replace triggers are a possibility, then
+ **
+ ** (1) Allocate register regTrigCnt and initialize it to zero.
+ ** That register will count the number of replace triggers that
+ ** fire. Constraint recheck only occurs if the number is positive.
+ ** (2) Initialize pTrigger to the list of all DELETE triggers on pTab.
+ ** (3) Initialize addrRecheck and lblRecheckOk
+ **
+ ** The uniqueness rechecking code will create a series of tests to run
+ ** in a second pass. The addrRecheck and lblRecheckOk variables are
+ ** used to link together these tests which are separated from each other
+ ** in the generate bytecode.
+ */
+ if( (db->flags & (SQLITE_RecTriggers|SQLITE_ForeignKeys))==0 ){
+ /* There are not DELETE triggers nor FK constraints. No constraint
+ ** rechecks are needed. */
+ pTrigger = 0;
+ regTrigCnt = 0;
+ }else{
+ if( db->flags&SQLITE_RecTriggers ){
+ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0);
+ regTrigCnt = pTrigger!=0 || sqlite3FkRequired(pParse, pTab, 0, 0);
+ }else{
+ pTrigger = 0;
+ regTrigCnt = sqlite3FkRequired(pParse, pTab, 0, 0);
+ }
+ if( regTrigCnt ){
+ /* Replace triggers might exist. Allocate the counter and
+ ** initialize it to zero. */
+ regTrigCnt = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regTrigCnt);
+ VdbeComment((v, "trigger count"));
+ lblRecheckOk = sqlite3VdbeMakeLabel(pParse);
+ addrRecheck = lblRecheckOk;
+ }
+ }
+
+ /* If rowid is changing, make sure the new rowid does not previously
+ ** exist in the table.
+ */
+ if( pkChng && pPk==0 ){
+ int addrRowidOk = sqlite3VdbeMakeLabel(pParse);
+
+ /* Figure out what action to take in case of a rowid collision */
+ onError = pTab->keyConf;
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+
+ /* figure out whether or not upsert applies in this case */
+ if( pUpsert && pUpsert->pUpsertIdx==0 ){
+ if( pUpsert->pUpsertSet==0 ){
+ onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */
+ }else{
+ onError = OE_Update; /* DO UPDATE */
+ }
+ }
+
+ /* If the response to a rowid conflict is REPLACE but the response
+ ** to some other UNIQUE constraint is FAIL or IGNORE, then we need
+ ** to defer the running of the rowid conflict checking until after
+ ** the UNIQUE constraints have run.
+ */
+ if( onError==OE_Replace /* IPK rule is REPLACE */
+ && onError!=overrideError /* Rules for other contraints are different */
+ && pTab->pIndex /* There exist other constraints */
+ ){
+ ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1;
+ VdbeComment((v, "defer IPK REPLACE until last"));
+ }
+
+ if( isUpdate ){
+ /* pkChng!=0 does not mean that the rowid has changed, only that
+ ** it might have changed. Skip the conflict logic below if the rowid
+ ** is unchanged. */
+ sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData);
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ VdbeCoverage(v);
+ }
+
+ /* Check to see if the new rowid already exists in the table. Skip
+ ** the following conflict logic if it does not. */
+ VdbeNoopComment((v, "uniqueness check for ROWID"));
+ sqlite3VdbeVerifyAbortable(v, onError);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRowidOk, regNewData);
+ VdbeCoverage(v);
+
+ switch( onError ){
+ default: {
+ onError = OE_Abort;
+ /* no break */ deliberate_fall_through
+ }
+ case OE_Rollback:
+ case OE_Abort:
+ case OE_Fail: {
+ testcase( onError==OE_Rollback );
+ testcase( onError==OE_Abort );
+ testcase( onError==OE_Fail );
+ sqlite3RowidConstraint(pParse, onError, pTab);
+ break;
+ }
+ case OE_Replace: {
+ /* If there are DELETE triggers on this table and the
+ ** recursive-triggers flag is set, call GenerateRowDelete() to
+ ** remove the conflicting row from the table. This will fire
+ ** the triggers and remove both the table and index b-tree entries.
+ **
+ ** Otherwise, if there are no triggers or the recursive-triggers
+ ** flag is not set, but the table has one or more indexes, call
+ ** GenerateRowIndexDelete(). This removes the index b-tree entries
+ ** only. The table b-tree entry will be replaced by the new entry
+ ** when it is inserted.
+ **
+ ** If either GenerateRowDelete() or GenerateRowIndexDelete() is called,
+ ** also invoke MultiWrite() to indicate that this VDBE may require
+ ** statement rollback (if the statement is aborted after the delete
+ ** takes place). Earlier versions called sqlite3MultiWrite() regardless,
+ ** but being more selective here allows statements like:
+ **
+ ** REPLACE INTO t(rowid) VALUES($newrowid)
+ **
+ ** to run without a statement journal if there are no indexes on the
+ ** table.
+ */
+ if( regTrigCnt ){
+ sqlite3MultiWrite(pParse);
+ sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
+ regNewData, 1, 0, OE_Replace, 1, -1);
+ sqlite3VdbeAddOp2(v, OP_AddImm, regTrigCnt, 1); /* incr trigger cnt */
+ nReplaceTrig++;
+ }else{
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ assert( HasRowid(pTab) );
+ /* This OP_Delete opcode fires the pre-update-hook only. It does
+ ** not modify the b-tree. It is more efficient to let the coming
+ ** OP_Insert replace the existing entry than it is to delete the
+ ** existing entry and then insert a new one. */
+ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, OPFLAG_ISNOOP);
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+ if( pTab->pIndex ){
+ sqlite3MultiWrite(pParse);
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,-1);
+ }
+ }
+ seenReplace = 1;
+ break;
+ }
+#ifndef SQLITE_OMIT_UPSERT
+ case OE_Update: {
+ sqlite3UpsertDoUpdate(pParse, pUpsert, pTab, 0, iDataCur);
+ /* no break */ deliberate_fall_through
+ }
+#endif
+ case OE_Ignore: {
+ testcase( onError==OE_Ignore );
+ sqlite3VdbeGoto(v, ignoreDest);
+ break;
+ }
+ }
+ sqlite3VdbeResolveLabel(v, addrRowidOk);
+ if( ipkTop ){
+ ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeJumpHere(v, ipkTop-1);
+ }
+ }
+
+ /* Test all UNIQUE constraints by creating entries for each UNIQUE
+ ** index and making sure that duplicate entries do not already exist.
+ ** Compute the revised record entries for indices as we go.
+ **
+ ** This loop also handles the case of the PRIMARY KEY index for a
+ ** WITHOUT ROWID table.
+ */
+ for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){
+ int regIdx; /* Range of registers hold conent for pIdx */
+ int regR; /* Range of registers holding conflicting PK */
+ int iThisCur; /* Cursor for this UNIQUE index */
+ int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */
+ int addrConflictCk; /* First opcode in the conflict check logic */
+
+ if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */
+ if( pUpIdx==pIdx ){
+ addrUniqueOk = upsertJump+1;
+ upsertBypass = sqlite3VdbeGoto(v, 0);
+ VdbeComment((v, "Skip upsert subroutine"));
+ sqlite3VdbeJumpHere(v, upsertJump);
+ }else{
+ addrUniqueOk = sqlite3VdbeMakeLabel(pParse);
+ }
+ if( bAffinityDone==0 && (pUpIdx==0 || pUpIdx==pIdx) ){
+ sqlite3TableAffinity(v, pTab, regNewData+1);
+ bAffinityDone = 1;
+ }
+ VdbeNoopComment((v, "prep index %s", pIdx->zName));
+ iThisCur = iIdxCur+ix;
+
+
+ /* Skip partial indices for which the WHERE clause is not true */
+ if( pIdx->pPartIdxWhere ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, aRegIdx[ix]);
+ pParse->iSelfTab = -(regNewData+1);
+ sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, addrUniqueOk,
+ SQLITE_JUMPIFNULL);
+ pParse->iSelfTab = 0;
+ }
+
+ /* Create a record for this index entry as it should appear after
+ ** the insert or update. Store that record in the aRegIdx[ix] register
+ */
+ regIdx = aRegIdx[ix]+1;
+ for(i=0; i<pIdx->nColumn; i++){
+ int iField = pIdx->aiColumn[i];
+ int x;
+ if( iField==XN_EXPR ){
+ pParse->iSelfTab = -(regNewData+1);
+ sqlite3ExprCodeCopy(pParse, pIdx->aColExpr->a[i].pExpr, regIdx+i);
+ pParse->iSelfTab = 0;
+ VdbeComment((v, "%s column %d", pIdx->zName, i));
+ }else if( iField==XN_ROWID || iField==pTab->iPKey ){
+ x = regNewData;
+ sqlite3VdbeAddOp2(v, OP_IntCopy, x, regIdx+i);
+ VdbeComment((v, "rowid"));
+ }else{
+ testcase( sqlite3TableColumnToStorage(pTab, iField)!=iField );
+ x = sqlite3TableColumnToStorage(pTab, iField) + regNewData + 1;
+ sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i);
+ VdbeComment((v, "%s", pTab->aCol[iField].zName));
+ }
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]);
+ VdbeComment((v, "for %s", pIdx->zName));
+#ifdef SQLITE_ENABLE_NULL_TRIM
+ if( pIdx->idxType==SQLITE_IDXTYPE_PRIMARYKEY ){
+ sqlite3SetMakeRecordP5(v, pIdx->pTable);
+ }
+#endif
+ sqlite3VdbeReleaseRegisters(pParse, regIdx, pIdx->nColumn, 0, 0);
+
+ /* In an UPDATE operation, if this index is the PRIMARY KEY index
+ ** of a WITHOUT ROWID table and there has been no change the
+ ** primary key, then no collision is possible. The collision detection
+ ** logic below can all be skipped. */
+ if( isUpdate && pPk==pIdx && pkChng==0 ){
+ sqlite3VdbeResolveLabel(v, addrUniqueOk);
+ continue;
+ }
+
+ /* Find out what action to take in case there is a uniqueness conflict */
+ onError = pIdx->onError;
+ if( onError==OE_None ){
+ sqlite3VdbeResolveLabel(v, addrUniqueOk);
+ continue; /* pIdx is not a UNIQUE index */
+ }
+ if( overrideError!=OE_Default ){
+ onError = overrideError;
+ }else if( onError==OE_Default ){
+ onError = OE_Abort;
+ }
+
+ /* Figure out if the upsert clause applies to this index */
+ if( pUpIdx==pIdx ){
+ if( pUpsert->pUpsertSet==0 ){
+ onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */
+ }else{
+ onError = OE_Update; /* DO UPDATE */
+ }
+ }
+
+ /* Collision detection may be omitted if all of the following are true:
+ ** (1) The conflict resolution algorithm is REPLACE
+ ** (2) The table is a WITHOUT ROWID table
+ ** (3) There are no secondary indexes on the table
+ ** (4) No delete triggers need to be fired if there is a conflict
+ ** (5) No FK constraint counters need to be updated if a conflict occurs.
+ **
+ ** This is not possible for ENABLE_PREUPDATE_HOOK builds, as the row
+ ** must be explicitly deleted in order to ensure any pre-update hook
+ ** is invoked. */
+#ifndef SQLITE_ENABLE_PREUPDATE_HOOK
+ if( (ix==0 && pIdx->pNext==0) /* Condition 3 */
+ && pPk==pIdx /* Condition 2 */
+ && onError==OE_Replace /* Condition 1 */
+ && ( 0==(db->flags&SQLITE_RecTriggers) || /* Condition 4 */
+ 0==sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0))
+ && ( 0==(db->flags&SQLITE_ForeignKeys) || /* Condition 5 */
+ (0==pTab->pFKey && 0==sqlite3FkReferences(pTab)))
+ ){
+ sqlite3VdbeResolveLabel(v, addrUniqueOk);
+ continue;
+ }
+#endif /* ifndef SQLITE_ENABLE_PREUPDATE_HOOK */
+
+ /* Check to see if the new index entry will be unique */
+ sqlite3VdbeVerifyAbortable(v, onError);
+ addrConflictCk =
+ sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk,
+ regIdx, pIdx->nKeyCol); VdbeCoverage(v);
+
+ /* Generate code to handle collisions */
+ regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField);
+ if( isUpdate || onError==OE_Replace ){
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR);
+ /* Conflict only if the rowid of the existing index entry
+ ** is different from old-rowid */
+ if( isUpdate ){
+ sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData);
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ VdbeCoverage(v);
+ }
+ }else{
+ int x;
+ /* Extract the PRIMARY KEY from the end of the index entry and
+ ** store it in registers regR..regR+nPk-1 */
+ if( pIdx!=pPk ){
+ for(i=0; i<pPk->nKeyCol; i++){
+ assert( pPk->aiColumn[i]>=0 );
+ x = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[i]);
+ sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i);
+ VdbeComment((v, "%s.%s", pTab->zName,
+ pTab->aCol[pPk->aiColumn[i]].zName));
+ }
+ }
+ if( isUpdate ){
+ /* If currently processing the PRIMARY KEY of a WITHOUT ROWID
+ ** table, only conflict if the new PRIMARY KEY values are actually
+ ** different from the old.
+ **
+ ** For a UNIQUE index, only conflict if the PRIMARY KEY values
+ ** of the matched index row are different from the original PRIMARY
+ ** KEY values of this row before the update. */
+ int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol;
+ int op = OP_Ne;
+ int regCmp = (IsPrimaryKeyIndex(pIdx) ? regIdx : regR);
+
+ for(i=0; i<pPk->nKeyCol; i++){
+ char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]);
+ x = pPk->aiColumn[i];
+ assert( x>=0 );
+ if( i==(pPk->nKeyCol-1) ){
+ addrJump = addrUniqueOk;
+ op = OP_Eq;
+ }
+ x = sqlite3TableColumnToStorage(pTab, x);
+ sqlite3VdbeAddOp4(v, op,
+ regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ
+ );
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ VdbeCoverageIf(v, op==OP_Eq);
+ VdbeCoverageIf(v, op==OP_Ne);
+ }
+ }
+ }
+ }
+
+ /* Generate code that executes if the new index entry is not unique */
+ assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail
+ || onError==OE_Ignore || onError==OE_Replace || onError==OE_Update );
+ switch( onError ){
+ case OE_Rollback:
+ case OE_Abort:
+ case OE_Fail: {
+ testcase( onError==OE_Rollback );
+ testcase( onError==OE_Abort );
+ testcase( onError==OE_Fail );
+ sqlite3UniqueConstraint(pParse, onError, pIdx);
+ break;
+ }
+#ifndef SQLITE_OMIT_UPSERT
+ case OE_Update: {
+ sqlite3UpsertDoUpdate(pParse, pUpsert, pTab, pIdx, iIdxCur+ix);
+ /* no break */ deliberate_fall_through
+ }
+#endif
+ case OE_Ignore: {
+ testcase( onError==OE_Ignore );
+ sqlite3VdbeGoto(v, ignoreDest);
+ break;
+ }
+ default: {
+ int nConflictCk; /* Number of opcodes in conflict check logic */
+
+ assert( onError==OE_Replace );
+ nConflictCk = sqlite3VdbeCurrentAddr(v) - addrConflictCk;
+ assert( nConflictCk>0 );
+ testcase( nConflictCk>1 );
+ if( regTrigCnt ){
+ sqlite3MultiWrite(pParse);
+ nReplaceTrig++;
+ }
+ if( pTrigger && isUpdate ){
+ sqlite3VdbeAddOp1(v, OP_CursorLock, iDataCur);
+ }
+ sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
+ regR, nPkField, 0, OE_Replace,
+ (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), iThisCur);
+ if( pTrigger && isUpdate ){
+ sqlite3VdbeAddOp1(v, OP_CursorUnlock, iDataCur);
+ }
+ if( regTrigCnt ){
+ int addrBypass; /* Jump destination to bypass recheck logic */
+
+ sqlite3VdbeAddOp2(v, OP_AddImm, regTrigCnt, 1); /* incr trigger cnt */
+ addrBypass = sqlite3VdbeAddOp0(v, OP_Goto); /* Bypass recheck */
+ VdbeComment((v, "bypass recheck"));
+
+ /* Here we insert code that will be invoked after all constraint
+ ** checks have run, if and only if one or more replace triggers
+ ** fired. */
+ sqlite3VdbeResolveLabel(v, lblRecheckOk);
+ lblRecheckOk = sqlite3VdbeMakeLabel(pParse);
+ if( pIdx->pPartIdxWhere ){
+ /* Bypass the recheck if this partial index is not defined
+ ** for the current row */
+ sqlite3VdbeAddOp2(v, OP_IsNull, regIdx-1, lblRecheckOk);
+ VdbeCoverage(v);
+ }
+ /* Copy the constraint check code from above, except change
+ ** the constraint-ok jump destination to be the address of
+ ** the next retest block */
+ while( nConflictCk>0 ){
+ VdbeOp x; /* Conflict check opcode to copy */
+ /* The sqlite3VdbeAddOp4() call might reallocate the opcode array.
+ ** Hence, make a complete copy of the opcode, rather than using
+ ** a pointer to the opcode. */
+ x = *sqlite3VdbeGetOp(v, addrConflictCk);
+ if( x.opcode!=OP_IdxRowid ){
+ int p2; /* New P2 value for copied conflict check opcode */
+ const char *zP4;
+ if( sqlite3OpcodeProperty[x.opcode]&OPFLG_JUMP ){
+ p2 = lblRecheckOk;
+ }else{
+ p2 = x.p2;
+ }
+ zP4 = x.p4type==P4_INT32 ? SQLITE_INT_TO_PTR(x.p4.i) : x.p4.z;
+ sqlite3VdbeAddOp4(v, x.opcode, x.p1, p2, x.p3, zP4, x.p4type);
+ sqlite3VdbeChangeP5(v, x.p5);
+ VdbeCoverageIf(v, p2!=x.p2);
+ }
+ nConflictCk--;
+ addrConflictCk++;
+ }
+ /* If the retest fails, issue an abort */
+ sqlite3UniqueConstraint(pParse, OE_Abort, pIdx);
+
+ sqlite3VdbeJumpHere(v, addrBypass); /* Terminate the recheck bypass */
+ }
+ seenReplace = 1;
+ break;
+ }
+ }
+ if( pUpIdx==pIdx ){
+ sqlite3VdbeGoto(v, upsertJump+1);
+ sqlite3VdbeJumpHere(v, upsertBypass);
+ }else{
+ sqlite3VdbeResolveLabel(v, addrUniqueOk);
+ }
+ if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField);
+ }
+
+ /* If the IPK constraint is a REPLACE, run it last */
+ if( ipkTop ){
+ sqlite3VdbeGoto(v, ipkTop);
+ VdbeComment((v, "Do IPK REPLACE"));
+ sqlite3VdbeJumpHere(v, ipkBottom);
+ }
+
+ /* Recheck all uniqueness constraints after replace triggers have run */
+ testcase( regTrigCnt!=0 && nReplaceTrig==0 );
+ assert( regTrigCnt!=0 || nReplaceTrig==0 );
+ if( nReplaceTrig ){
+ sqlite3VdbeAddOp2(v, OP_IfNot, regTrigCnt, lblRecheckOk);VdbeCoverage(v);
+ if( !pPk ){
+ if( isUpdate ){
+ sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRecheck, regOldData);
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRecheck, regNewData);
+ VdbeCoverage(v);
+ sqlite3RowidConstraint(pParse, OE_Abort, pTab);
+ }else{
+ sqlite3VdbeGoto(v, addrRecheck);
+ }
+ sqlite3VdbeResolveLabel(v, lblRecheckOk);
+ }
+
+ /* Generate the table record */
+ if( HasRowid(pTab) ){
+ int regRec = aRegIdx[ix];
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regNewData+1, pTab->nNVCol, regRec);
+ sqlite3SetMakeRecordP5(v, pTab);
+ if( !bAffinityDone ){
+ sqlite3TableAffinity(v, pTab, 0);
+ }
+ }
+
+ *pbMayReplace = seenReplace;
+ VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace));
+}
+
+#ifdef SQLITE_ENABLE_NULL_TRIM
+/*
+** Change the P5 operand on the last opcode (which should be an OP_MakeRecord)
+** to be the number of columns in table pTab that must not be NULL-trimmed.
+**
+** Or if no columns of pTab may be NULL-trimmed, leave P5 at zero.
+*/
+SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){
+ u16 i;
+
+ /* Records with omitted columns are only allowed for schema format
+ ** version 2 and later (SQLite version 3.1.4, 2005-02-20). */
+ if( pTab->pSchema->file_format<2 ) return;
+
+ for(i=pTab->nCol-1; i>0; i--){
+ if( pTab->aCol[i].pDflt!=0 ) break;
+ if( pTab->aCol[i].colFlags & COLFLAG_PRIMKEY ) break;
+ }
+ sqlite3VdbeChangeP5(v, i+1);
+}
+#endif
+
+/*
+** This routine generates code to finish the INSERT or UPDATE operation
+** that was started by a prior call to sqlite3GenerateConstraintChecks.
+** A consecutive range of registers starting at regNewData contains the
+** rowid and the content to be inserted.
+**
+** The arguments to this routine should be the same as the first six
+** arguments to sqlite3GenerateConstraintChecks.
+*/
+SQLITE_PRIVATE void sqlite3CompleteInsertion(
+ Parse *pParse, /* The parser context */
+ Table *pTab, /* the table into which we are inserting */
+ int iDataCur, /* Cursor of the canonical data source */
+ int iIdxCur, /* First index cursor */
+ int regNewData, /* Range of content */
+ int *aRegIdx, /* Register used by each index. 0 for unused indices */
+ int update_flags, /* True for UPDATE, False for INSERT */
+ int appendBias, /* True if this is likely to be an append */
+ int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */
+){
+ Vdbe *v; /* Prepared statements under construction */
+ Index *pIdx; /* An index being inserted or updated */
+ u8 pik_flags; /* flag values passed to the btree insert */
+ int i; /* Loop counter */
+
+ assert( update_flags==0
+ || update_flags==OPFLAG_ISUPDATE
+ || update_flags==(OPFLAG_ISUPDATE|OPFLAG_SAVEPOSITION)
+ );
+
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 );
+ assert( pTab->pSelect==0 ); /* This table is not a VIEW */
+ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ /* All REPLACE indexes are at the end of the list */
+ assert( pIdx->onError!=OE_Replace
+ || pIdx->pNext==0
+ || pIdx->pNext->onError==OE_Replace );
+ if( aRegIdx[i]==0 ) continue;
+ if( pIdx->pPartIdxWhere ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ }
+ pik_flags = (useSeekResult ? OPFLAG_USESEEKRESULT : 0);
+ if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
+ assert( pParse->nested==0 );
+ pik_flags |= OPFLAG_NCHANGE;
+ pik_flags |= (update_flags & OPFLAG_SAVEPOSITION);
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ if( update_flags==0 ){
+ int r = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, r);
+ sqlite3VdbeAddOp4(v, OP_Insert,
+ iIdxCur+i, aRegIdx[i], r, (char*)pTab, P4_TABLE
+ );
+ sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP);
+ sqlite3ReleaseTempReg(pParse, r);
+ }
+#endif
+ }
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i],
+ aRegIdx[i]+1,
+ pIdx->uniqNotNull ? pIdx->nKeyCol: pIdx->nColumn);
+ sqlite3VdbeChangeP5(v, pik_flags);
+ }
+ if( !HasRowid(pTab) ) return;
+ if( pParse->nested ){
+ pik_flags = 0;
+ }else{
+ pik_flags = OPFLAG_NCHANGE;
+ pik_flags |= (update_flags?update_flags:OPFLAG_LASTROWID);
+ }
+ if( appendBias ){
+ pik_flags |= OPFLAG_APPEND;
+ }
+ if( useSeekResult ){
+ pik_flags |= OPFLAG_USESEEKRESULT;
+ }
+ sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, aRegIdx[i], regNewData);
+ if( !pParse->nested ){
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
+ }
+ sqlite3VdbeChangeP5(v, pik_flags);
+}
+
+/*
+** Allocate cursors for the pTab table and all its indices and generate
+** code to open and initialized those cursors.
+**
+** The cursor for the object that contains the complete data (normally
+** the table itself, but the PRIMARY KEY index in the case of a WITHOUT
+** ROWID table) is returned in *piDataCur. The first index cursor is
+** returned in *piIdxCur. The number of indices is returned.
+**
+** Use iBase as the first cursor (either the *piDataCur for rowid tables
+** or the first index for WITHOUT ROWID tables) if it is non-negative.
+** If iBase is negative, then allocate the next available cursor.
+**
+** For a rowid table, *piDataCur will be exactly one less than *piIdxCur.
+** For a WITHOUT ROWID table, *piDataCur will be somewhere in the range
+** of *piIdxCurs, depending on where the PRIMARY KEY index appears on the
+** pTab->pIndex list.
+**
+** If pTab is a virtual table, then this routine is a no-op and the
+** *piDataCur and *piIdxCur values are left uninitialized.
+*/
+SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
+ Parse *pParse, /* Parsing context */
+ Table *pTab, /* Table to be opened */
+ int op, /* OP_OpenRead or OP_OpenWrite */
+ u8 p5, /* P5 value for OP_Open* opcodes (except on WITHOUT ROWID) */
+ int iBase, /* Use this for the table cursor, if there is one */
+ u8 *aToOpen, /* If not NULL: boolean for each table and index */
+ int *piDataCur, /* Write the database source cursor number here */
+ int *piIdxCur /* Write the first index cursor number here */
+){
+ int i;
+ int iDb;
+ int iDataCur;
+ Index *pIdx;
+ Vdbe *v;
+
+ assert( op==OP_OpenRead || op==OP_OpenWrite );
+ assert( op==OP_OpenWrite || p5==0 );
+ if( IsVirtual(pTab) ){
+ /* This routine is a no-op for virtual tables. Leave the output
+ ** variables *piDataCur and *piIdxCur uninitialized so that valgrind
+ ** can detect if they are used by mistake in the caller. */
+ return 0;
+ }
+ iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 );
+ if( iBase<0 ) iBase = pParse->nTab;
+ iDataCur = iBase++;
+ if( piDataCur ) *piDataCur = iDataCur;
+ if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){
+ sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op);
+ }else{
+ sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName);
+ }
+ if( piIdxCur ) *piIdxCur = iBase;
+ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
+ int iIdxCur = iBase++;
+ assert( pIdx->pSchema==pTab->pSchema );
+ if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
+ if( piDataCur ) *piDataCur = iIdxCur;
+ p5 = 0;
+ }
+ if( aToOpen==0 || aToOpen[i+1] ){
+ sqlite3VdbeAddOp3(v, op, iIdxCur, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ sqlite3VdbeChangeP5(v, p5);
+ VdbeComment((v, "%s", pIdx->zName));
+ }
+ }
+ if( iBase>pParse->nTab ) pParse->nTab = iBase;
+ return i;
+}
+
+
+#ifdef SQLITE_TEST
+/*
+** The following global variable is incremented whenever the
+** transfer optimization is used. This is used for testing
+** purposes only - to make sure the transfer optimization really
+** is happening when it is supposed to.
+*/
+SQLITE_API int sqlite3_xferopt_count;
+#endif /* SQLITE_TEST */
+
+
+#ifndef SQLITE_OMIT_XFER_OPT
+/*
+** Check to see if index pSrc is compatible as a source of data
+** for index pDest in an insert transfer optimization. The rules
+** for a compatible index:
+**
+** * The index is over the same set of columns
+** * The same DESC and ASC markings occurs on all columns
+** * The same onError processing (OE_Abort, OE_Ignore, etc)
+** * The same collating sequence on each column
+** * The index has the exact same WHERE clause
+*/
+static int xferCompatibleIndex(Index *pDest, Index *pSrc){
+ int i;
+ assert( pDest && pSrc );
+ assert( pDest->pTable!=pSrc->pTable );
+ if( pDest->nKeyCol!=pSrc->nKeyCol || pDest->nColumn!=pSrc->nColumn ){
+ return 0; /* Different number of columns */
+ }
+ if( pDest->onError!=pSrc->onError ){
+ return 0; /* Different conflict resolution strategies */
+ }
+ for(i=0; i<pSrc->nKeyCol; i++){
+ if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){
+ return 0; /* Different columns indexed */
+ }
+ if( pSrc->aiColumn[i]==XN_EXPR ){
+ assert( pSrc->aColExpr!=0 && pDest->aColExpr!=0 );
+ if( sqlite3ExprCompare(0, pSrc->aColExpr->a[i].pExpr,
+ pDest->aColExpr->a[i].pExpr, -1)!=0 ){
+ return 0; /* Different expressions in the index */
+ }
+ }
+ if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){
+ return 0; /* Different sort orders */
+ }
+ if( sqlite3_stricmp(pSrc->azColl[i],pDest->azColl[i])!=0 ){
+ return 0; /* Different collating sequences */
+ }
+ }
+ if( sqlite3ExprCompare(0, pSrc->pPartIdxWhere, pDest->pPartIdxWhere, -1) ){
+ return 0; /* Different WHERE clauses */
+ }
+
+ /* If no test above fails then the indices must be compatible */
+ return 1;
+}
+
+/*
+** Attempt the transfer optimization on INSERTs of the form
+**
+** INSERT INTO tab1 SELECT * FROM tab2;
+**
+** The xfer optimization transfers raw records from tab2 over to tab1.
+** Columns are not decoded and reassembled, which greatly improves
+** performance. Raw index records are transferred in the same way.
+**
+** The xfer optimization is only attempted if tab1 and tab2 are compatible.
+** There are lots of rules for determining compatibility - see comments
+** embedded in the code for details.
+**
+** This routine returns TRUE if the optimization is guaranteed to be used.
+** Sometimes the xfer optimization will only work if the destination table
+** is empty - a factor that can only be determined at run-time. In that
+** case, this routine generates code for the xfer optimization but also
+** does a test to see if the destination table is empty and jumps over the
+** xfer optimization code if the test fails. In that case, this routine
+** returns FALSE so that the caller will know to go ahead and generate
+** an unoptimized transfer. This routine also returns FALSE if there
+** is no chance that the xfer optimization can be applied.
+**
+** This optimization is particularly useful at making VACUUM run faster.
+*/
+static int xferOptimization(
+ Parse *pParse, /* Parser context */
+ Table *pDest, /* The table we are inserting into */
+ Select *pSelect, /* A SELECT statement to use as the data source */
+ int onError, /* How to handle constraint errors */
+ int iDbDest /* The database of pDest */
+){
+ sqlite3 *db = pParse->db;
+ ExprList *pEList; /* The result set of the SELECT */
+ Table *pSrc; /* The table in the FROM clause of SELECT */
+ Index *pSrcIdx, *pDestIdx; /* Source and destination indices */
+ struct SrcList_item *pItem; /* An element of pSelect->pSrc */
+ int i; /* Loop counter */
+ int iDbSrc; /* The database of pSrc */
+ int iSrc, iDest; /* Cursors from source and destination */
+ int addr1, addr2; /* Loop addresses */
+ int emptyDestTest = 0; /* Address of test for empty pDest */
+ int emptySrcTest = 0; /* Address of test for empty pSrc */
+ Vdbe *v; /* The VDBE we are building */
+ int regAutoinc; /* Memory register used by AUTOINC */
+ int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */
+ int regData, regRowid; /* Registers holding data and rowid */
+
+ if( pSelect==0 ){
+ return 0; /* Must be of the form INSERT INTO ... SELECT ... */
+ }
+ if( pParse->pWith || pSelect->pWith ){
+ /* Do not attempt to process this query if there are an WITH clauses
+ ** attached to it. Proceeding may generate a false "no such table: xxx"
+ ** error if pSelect reads from a CTE named "xxx". */
+ return 0;
+ }
+ if( sqlite3TriggerList(pParse, pDest) ){
+ return 0; /* tab1 must not have triggers */
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pDest) ){
+ return 0; /* tab1 must not be a virtual table */
+ }
+#endif
+ if( onError==OE_Default ){
+ if( pDest->iPKey>=0 ) onError = pDest->keyConf;
+ if( onError==OE_Default ) onError = OE_Abort;
+ }
+ assert(pSelect->pSrc); /* allocated even if there is no FROM clause */
+ if( pSelect->pSrc->nSrc!=1 ){
+ return 0; /* FROM clause must have exactly one term */
+ }
+ if( pSelect->pSrc->a[0].pSelect ){
+ return 0; /* FROM clause cannot contain a subquery */
+ }
+ if( pSelect->pWhere ){
+ return 0; /* SELECT may not have a WHERE clause */
+ }
+ if( pSelect->pOrderBy ){
+ return 0; /* SELECT may not have an ORDER BY clause */
+ }
+ /* Do not need to test for a HAVING clause. If HAVING is present but
+ ** there is no ORDER BY, we will get an error. */
+ if( pSelect->pGroupBy ){
+ return 0; /* SELECT may not have a GROUP BY clause */
+ }
+ if( pSelect->pLimit ){
+ return 0; /* SELECT may not have a LIMIT clause */
+ }
+ if( pSelect->pPrior ){
+ return 0; /* SELECT may not be a compound query */
+ }
+ if( pSelect->selFlags & SF_Distinct ){
+ return 0; /* SELECT may not be DISTINCT */
+ }
+ pEList = pSelect->pEList;
+ assert( pEList!=0 );
+ if( pEList->nExpr!=1 ){
+ return 0; /* The result set must have exactly one column */
+ }
+ assert( pEList->a[0].pExpr );
+ if( pEList->a[0].pExpr->op!=TK_ASTERISK ){
+ return 0; /* The result set must be the special operator "*" */
+ }
+
+ /* At this point we have established that the statement is of the
+ ** correct syntactic form to participate in this optimization. Now
+ ** we have to check the semantics.
+ */
+ pItem = pSelect->pSrc->a;
+ pSrc = sqlite3LocateTableItem(pParse, 0, pItem);
+ if( pSrc==0 ){
+ return 0; /* FROM clause does not contain a real table */
+ }
+ if( pSrc->tnum==pDest->tnum && pSrc->pSchema==pDest->pSchema ){
+ testcase( pSrc!=pDest ); /* Possible due to bad sqlite_schema.rootpage */
+ return 0; /* tab1 and tab2 may not be the same table */
+ }
+ if( HasRowid(pDest)!=HasRowid(pSrc) ){
+ return 0; /* source and destination must both be WITHOUT ROWID or not */
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pSrc) ){
+ return 0; /* tab2 must not be a virtual table */
+ }
+#endif
+ if( pSrc->pSelect ){
+ return 0; /* tab2 may not be a view */
+ }
+ if( pDest->nCol!=pSrc->nCol ){
+ return 0; /* Number of columns must be the same in tab1 and tab2 */
+ }
+ if( pDest->iPKey!=pSrc->iPKey ){
+ return 0; /* Both tables must have the same INTEGER PRIMARY KEY */
+ }
+ for(i=0; i<pDest->nCol; i++){
+ Column *pDestCol = &pDest->aCol[i];
+ Column *pSrcCol = &pSrc->aCol[i];
+#ifdef SQLITE_ENABLE_HIDDEN_COLUMNS
+ if( (db->mDbFlags & DBFLAG_Vacuum)==0
+ && (pDestCol->colFlags | pSrcCol->colFlags) & COLFLAG_HIDDEN
+ ){
+ return 0; /* Neither table may have __hidden__ columns */
+ }
+#endif
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ /* Even if tables t1 and t2 have identical schemas, if they contain
+ ** generated columns, then this statement is semantically incorrect:
+ **
+ ** INSERT INTO t2 SELECT * FROM t1;
+ **
+ ** The reason is that generated column values are returned by the
+ ** the SELECT statement on the right but the INSERT statement on the
+ ** left wants them to be omitted.
+ **
+ ** Nevertheless, this is a useful notational shorthand to tell SQLite
+ ** to do a bulk transfer all of the content from t1 over to t2.
+ **
+ ** We could, in theory, disable this (except for internal use by the
+ ** VACUUM command where it is actually needed). But why do that? It
+ ** seems harmless enough, and provides a useful service.
+ */
+ if( (pDestCol->colFlags & COLFLAG_GENERATED) !=
+ (pSrcCol->colFlags & COLFLAG_GENERATED) ){
+ return 0; /* Both columns have the same generated-column type */
+ }
+ /* But the transfer is only allowed if both the source and destination
+ ** tables have the exact same expressions for generated columns.
+ ** This requirement could be relaxed for VIRTUAL columns, I suppose.
+ */
+ if( (pDestCol->colFlags & COLFLAG_GENERATED)!=0 ){
+ if( sqlite3ExprCompare(0, pSrcCol->pDflt, pDestCol->pDflt, -1)!=0 ){
+ testcase( pDestCol->colFlags & COLFLAG_VIRTUAL );
+ testcase( pDestCol->colFlags & COLFLAG_STORED );
+ return 0; /* Different generator expressions */
+ }
+ }
+#endif
+ if( pDestCol->affinity!=pSrcCol->affinity ){
+ return 0; /* Affinity must be the same on all columns */
+ }
+ if( sqlite3_stricmp(pDestCol->zColl, pSrcCol->zColl)!=0 ){
+ return 0; /* Collating sequence must be the same on all columns */
+ }
+ if( pDestCol->notNull && !pSrcCol->notNull ){
+ return 0; /* tab2 must be NOT NULL if tab1 is */
+ }
+ /* Default values for second and subsequent columns need to match. */
+ if( (pDestCol->colFlags & COLFLAG_GENERATED)==0 && i>0 ){
+ assert( pDestCol->pDflt==0 || pDestCol->pDflt->op==TK_SPAN );
+ assert( pSrcCol->pDflt==0 || pSrcCol->pDflt->op==TK_SPAN );
+ if( (pDestCol->pDflt==0)!=(pSrcCol->pDflt==0)
+ || (pDestCol->pDflt && strcmp(pDestCol->pDflt->u.zToken,
+ pSrcCol->pDflt->u.zToken)!=0)
+ ){
+ return 0; /* Default values must be the same for all columns */
+ }
+ }
+ }
+ for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){
+ if( IsUniqueIndex(pDestIdx) ){
+ destHasUniqueIdx = 1;
+ }
+ for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){
+ if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break;
+ }
+ if( pSrcIdx==0 ){
+ return 0; /* pDestIdx has no corresponding index in pSrc */
+ }
+ if( pSrcIdx->tnum==pDestIdx->tnum && pSrc->pSchema==pDest->pSchema
+ && sqlite3FaultSim(411)==SQLITE_OK ){
+ /* The sqlite3FaultSim() call allows this corruption test to be
+ ** bypassed during testing, in order to exercise other corruption tests
+ ** further downstream. */
+ return 0; /* Corrupt schema - two indexes on the same btree */
+ }
+ }
+#ifndef SQLITE_OMIT_CHECK
+ if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){
+ return 0; /* Tables have different CHECK constraints. Ticket #2252 */
+ }
+#endif
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ /* Disallow the transfer optimization if the destination table constains
+ ** any foreign key constraints. This is more restrictive than necessary.
+ ** But the main beneficiary of the transfer optimization is the VACUUM
+ ** command, and the VACUUM command disables foreign key constraints. So
+ ** the extra complication to make this rule less restrictive is probably
+ ** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e]
+ */
+ if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){
+ return 0;
+ }
+#endif
+ if( (db->flags & SQLITE_CountRows)!=0 ){
+ return 0; /* xfer opt does not play well with PRAGMA count_changes */
+ }
+
+ /* If we get this far, it means that the xfer optimization is at
+ ** least a possibility, though it might only work if the destination
+ ** table (tab1) is initially empty.
+ */
+#ifdef SQLITE_TEST
+ sqlite3_xferopt_count++;
+#endif
+ iDbSrc = sqlite3SchemaToIndex(db, pSrc->pSchema);
+ v = sqlite3GetVdbe(pParse);
+ sqlite3CodeVerifySchema(pParse, iDbSrc);
+ iSrc = pParse->nTab++;
+ iDest = pParse->nTab++;
+ regAutoinc = autoIncBegin(pParse, iDbDest, pDest);
+ regData = sqlite3GetTempReg(pParse);
+ regRowid = sqlite3GetTempReg(pParse);
+ sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite);
+ assert( HasRowid(pDest) || destHasUniqueIdx );
+ if( (db->mDbFlags & DBFLAG_Vacuum)==0 && (
+ (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */
+ || destHasUniqueIdx /* (2) */
+ || (onError!=OE_Abort && onError!=OE_Rollback) /* (3) */
+ )){
+ /* In some circumstances, we are able to run the xfer optimization
+ ** only if the destination table is initially empty. Unless the
+ ** DBFLAG_Vacuum flag is set, this block generates code to make
+ ** that determination. If DBFLAG_Vacuum is set, then the destination
+ ** table is always empty.
+ **
+ ** Conditions under which the destination must be empty:
+ **
+ ** (1) There is no INTEGER PRIMARY KEY but there are indices.
+ ** (If the destination is not initially empty, the rowid fields
+ ** of index entries might need to change.)
+ **
+ ** (2) The destination has a unique index. (The xfer optimization
+ ** is unable to test uniqueness.)
+ **
+ ** (3) onError is something other than OE_Abort and OE_Rollback.
+ */
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); VdbeCoverage(v);
+ emptyDestTest = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeJumpHere(v, addr1);
+ }
+ if( HasRowid(pSrc) ){
+ u8 insFlags;
+ sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead);
+ emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
+ if( pDest->iPKey>=0 ){
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid);
+ sqlite3VdbeVerifyAbortable(v, onError);
+ addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid);
+ VdbeCoverage(v);
+ sqlite3RowidConstraint(pParse, onError, pDest);
+ sqlite3VdbeJumpHere(v, addr2);
+ autoIncStep(pParse, regAutoinc, regRowid);
+ }else if( pDest->pIndex==0 && !(db->mDbFlags & DBFLAG_VacuumInto) ){
+ addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid);
+ }else{
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid);
+ assert( (pDest->tabFlags & TF_Autoincrement)==0 );
+ }
+ if( db->mDbFlags & DBFLAG_Vacuum ){
+ sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest);
+ insFlags = OPFLAG_APPEND|OPFLAG_USESEEKRESULT;
+ }else{
+ insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND;
+ }
+ sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1);
+ sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid,
+ (char*)pDest, P4_TABLE);
+ sqlite3VdbeChangeP5(v, insFlags);
+ sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
+ sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
+ }else{
+ sqlite3TableLock(pParse, iDbDest, pDest->tnum, 1, pDest->zName);
+ sqlite3TableLock(pParse, iDbSrc, pSrc->tnum, 0, pSrc->zName);
+ }
+ for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){
+ u8 idxInsFlags = 0;
+ for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){
+ if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break;
+ }
+ assert( pSrcIdx );
+ sqlite3VdbeAddOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, iDbSrc);
+ sqlite3VdbeSetP4KeyInfo(pParse, pSrcIdx);
+ VdbeComment((v, "%s", pSrcIdx->zName));
+ sqlite3VdbeAddOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, iDbDest);
+ sqlite3VdbeSetP4KeyInfo(pParse, pDestIdx);
+ sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR);
+ VdbeComment((v, "%s", pDestIdx->zName));
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
+ if( db->mDbFlags & DBFLAG_Vacuum ){
+ /* This INSERT command is part of a VACUUM operation, which guarantees
+ ** that the destination table is empty. If all indexed columns use
+ ** collation sequence BINARY, then it can also be assumed that the
+ ** index will be populated by inserting keys in strictly sorted
+ ** order. In this case, instead of seeking within the b-tree as part
+ ** of every OP_IdxInsert opcode, an OP_SeekEnd is added before the
+ ** OP_IdxInsert to seek to the point within the b-tree where each key
+ ** should be inserted. This is faster.
+ **
+ ** If any of the indexed columns use a collation sequence other than
+ ** BINARY, this optimization is disabled. This is because the user
+ ** might change the definition of a collation sequence and then run
+ ** a VACUUM command. In that case keys may not be written in strictly
+ ** sorted order. */
+ for(i=0; i<pSrcIdx->nColumn; i++){
+ const char *zColl = pSrcIdx->azColl[i];
+ if( sqlite3_stricmp(sqlite3StrBINARY, zColl) ) break;
+ }
+ if( i==pSrcIdx->nColumn ){
+ idxInsFlags = OPFLAG_USESEEKRESULT;
+ sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest);
+ }
+ }else if( !HasRowid(pSrc) && pDestIdx->idxType==SQLITE_IDXTYPE_PRIMARYKEY ){
+ idxInsFlags |= OPFLAG_NCHANGE;
+ }
+ sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1);
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData);
+ sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND);
+ sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
+ sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
+ }
+ if( emptySrcTest ) sqlite3VdbeJumpHere(v, emptySrcTest);
+ sqlite3ReleaseTempReg(pParse, regRowid);
+ sqlite3ReleaseTempReg(pParse, regData);
+ if( emptyDestTest ){
+ sqlite3AutoincrementEnd(pParse);
+ sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0);
+ sqlite3VdbeJumpHere(v, emptyDestTest);
+ sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
+ return 0;
+ }else{
+ return 1;
+ }
+}
+#endif /* SQLITE_OMIT_XFER_OPT */
+
+/************** End of insert.c **********************************************/
+/************** Begin file legacy.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Main file for the SQLite library. The routines in this file
+** implement the programmer interface to the library. Routines in
+** other files are for internal use by SQLite and should not be
+** accessed by users of the library.
+*/
+
+/* #include "sqliteInt.h" */
+
+/*
+** Execute SQL code. Return one of the SQLITE_ success/failure
+** codes. Also write an error message into memory obtained from
+** malloc() and make *pzErrMsg point to that message.
+**
+** If the SQL is a query, then for each row in the query result
+** the xCallback() function is called. pArg becomes the first
+** argument to xCallback(). If xCallback=NULL then no callback
+** is invoked, even for queries.
+*/
+SQLITE_API int sqlite3_exec(
+ sqlite3 *db, /* The database on which the SQL executes */
+ const char *zSql, /* The SQL to be executed */
+ sqlite3_callback xCallback, /* Invoke this callback routine */
+ void *pArg, /* First argument to xCallback() */
+ char **pzErrMsg /* Write error messages here */
+){
+ int rc = SQLITE_OK; /* Return code */
+ const char *zLeftover; /* Tail of unprocessed SQL */
+ sqlite3_stmt *pStmt = 0; /* The current SQL statement */
+ char **azCols = 0; /* Names of result columns */
+ int callbackIsInit; /* True if callback data is initialized */
+
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+ if( zSql==0 ) zSql = "";
+
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3Error(db, SQLITE_OK);
+ while( rc==SQLITE_OK && zSql[0] ){
+ int nCol = 0;
+ char **azVals = 0;
+
+ pStmt = 0;
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover);
+ assert( rc==SQLITE_OK || pStmt==0 );
+ if( rc!=SQLITE_OK ){
+ continue;
+ }
+ if( !pStmt ){
+ /* this happens for a comment or white-space */
+ zSql = zLeftover;
+ continue;
+ }
+ callbackIsInit = 0;
+
+ while( 1 ){
+ int i;
+ rc = sqlite3_step(pStmt);
+
+ /* Invoke the callback function if required */
+ if( xCallback && (SQLITE_ROW==rc ||
+ (SQLITE_DONE==rc && !callbackIsInit
+ && db->flags&SQLITE_NullCallback)) ){
+ if( !callbackIsInit ){
+ nCol = sqlite3_column_count(pStmt);
+ azCols = sqlite3DbMallocRaw(db, (2*nCol+1)*sizeof(const char*));
+ if( azCols==0 ){
+ goto exec_out;
+ }
+ for(i=0; i<nCol; i++){
+ azCols[i] = (char *)sqlite3_column_name(pStmt, i);
+ /* sqlite3VdbeSetColName() installs column names as UTF8
+ ** strings so there is no way for sqlite3_column_name() to fail. */
+ assert( azCols[i]!=0 );
+ }
+ callbackIsInit = 1;
+ }
+ if( rc==SQLITE_ROW ){
+ azVals = &azCols[nCol];
+ for(i=0; i<nCol; i++){
+ azVals[i] = (char *)sqlite3_column_text(pStmt, i);
+ if( !azVals[i] && sqlite3_column_type(pStmt, i)!=SQLITE_NULL ){
+ sqlite3OomFault(db);
+ goto exec_out;
+ }
+ }
+ azVals[i] = 0;
+ }
+ if( xCallback(pArg, nCol, azVals, azCols) ){
+ /* EVIDENCE-OF: R-38229-40159 If the callback function to
+ ** sqlite3_exec() returns non-zero, then sqlite3_exec() will
+ ** return SQLITE_ABORT. */
+ rc = SQLITE_ABORT;
+ sqlite3VdbeFinalize((Vdbe *)pStmt);
+ pStmt = 0;
+ sqlite3Error(db, SQLITE_ABORT);
+ goto exec_out;
+ }
+ }
+
+ if( rc!=SQLITE_ROW ){
+ rc = sqlite3VdbeFinalize((Vdbe *)pStmt);
+ pStmt = 0;
+ zSql = zLeftover;
+ while( sqlite3Isspace(zSql[0]) ) zSql++;
+ break;
+ }
+ }
+
+ sqlite3DbFree(db, azCols);
+ azCols = 0;
+ }
+
+exec_out:
+ if( pStmt ) sqlite3VdbeFinalize((Vdbe *)pStmt);
+ sqlite3DbFree(db, azCols);
+
+ rc = sqlite3ApiExit(db, rc);
+ if( rc!=SQLITE_OK && pzErrMsg ){
+ *pzErrMsg = sqlite3DbStrDup(0, sqlite3_errmsg(db));
+ if( *pzErrMsg==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ sqlite3Error(db, SQLITE_NOMEM);
+ }
+ }else if( pzErrMsg ){
+ *pzErrMsg = 0;
+ }
+
+ assert( (rc&db->errMask)==rc );
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/************** End of legacy.c **********************************************/
+/************** Begin file loadext.c *****************************************/
+/*
+** 2006 June 7
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to dynamically load extensions into
+** the SQLite library.
+*/
+
+#ifndef SQLITE_CORE
+ #define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */
+#endif
+/************** Include sqlite3ext.h in the middle of loadext.c **************/
+/************** Begin file sqlite3ext.h **************************************/
+/*
+** 2006 June 7
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the SQLite interface for use by
+** shared libraries that want to be imported as extensions into
+** an SQLite instance. Shared libraries that intend to be loaded
+** as extensions by SQLite should #include this file instead of
+** sqlite3.h.
+*/
+#ifndef SQLITE3EXT_H
+#define SQLITE3EXT_H
+/* #include "sqlite3.h" */
+
+/*
+** The following structure holds pointers to all of the SQLite API
+** routines.
+**
+** WARNING: In order to maintain backwards compatibility, add new
+** interfaces to the end of this structure only. If you insert new
+** interfaces in the middle of this structure, then older different
+** versions of SQLite will not be able to load each other's shared
+** libraries!
+*/
+struct sqlite3_api_routines {
+ void * (*aggregate_context)(sqlite3_context*,int nBytes);
+ int (*aggregate_count)(sqlite3_context*);
+ int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
+ int (*bind_double)(sqlite3_stmt*,int,double);
+ int (*bind_int)(sqlite3_stmt*,int,int);
+ int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
+ int (*bind_null)(sqlite3_stmt*,int);
+ int (*bind_parameter_count)(sqlite3_stmt*);
+ int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
+ const char * (*bind_parameter_name)(sqlite3_stmt*,int);
+ int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
+ int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
+ int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
+ int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
+ int (*busy_timeout)(sqlite3*,int ms);
+ int (*changes)(sqlite3*);
+ int (*close)(sqlite3*);
+ int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
+ int eTextRep,const char*));
+ int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
+ int eTextRep,const void*));
+ const void * (*column_blob)(sqlite3_stmt*,int iCol);
+ int (*column_bytes)(sqlite3_stmt*,int iCol);
+ int (*column_bytes16)(sqlite3_stmt*,int iCol);
+ int (*column_count)(sqlite3_stmt*pStmt);
+ const char * (*column_database_name)(sqlite3_stmt*,int);
+ const void * (*column_database_name16)(sqlite3_stmt*,int);
+ const char * (*column_decltype)(sqlite3_stmt*,int i);
+ const void * (*column_decltype16)(sqlite3_stmt*,int);
+ double (*column_double)(sqlite3_stmt*,int iCol);
+ int (*column_int)(sqlite3_stmt*,int iCol);
+ sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
+ const char * (*column_name)(sqlite3_stmt*,int);
+ const void * (*column_name16)(sqlite3_stmt*,int);
+ const char * (*column_origin_name)(sqlite3_stmt*,int);
+ const void * (*column_origin_name16)(sqlite3_stmt*,int);
+ const char * (*column_table_name)(sqlite3_stmt*,int);
+ const void * (*column_table_name16)(sqlite3_stmt*,int);
+ const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
+ const void * (*column_text16)(sqlite3_stmt*,int iCol);
+ int (*column_type)(sqlite3_stmt*,int iCol);
+ sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
+ void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
+ int (*complete)(const char*sql);
+ int (*complete16)(const void*sql);
+ int (*create_collation)(sqlite3*,const char*,int,void*,
+ int(*)(void*,int,const void*,int,const void*));
+ int (*create_collation16)(sqlite3*,const void*,int,void*,
+ int(*)(void*,int,const void*,int,const void*));
+ int (*create_function)(sqlite3*,const char*,int,int,void*,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*));
+ int (*create_function16)(sqlite3*,const void*,int,int,void*,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*));
+ int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
+ int (*data_count)(sqlite3_stmt*pStmt);
+ sqlite3 * (*db_handle)(sqlite3_stmt*);
+ int (*declare_vtab)(sqlite3*,const char*);
+ int (*enable_shared_cache)(int);
+ int (*errcode)(sqlite3*db);
+ const char * (*errmsg)(sqlite3*);
+ const void * (*errmsg16)(sqlite3*);
+ int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
+ int (*expired)(sqlite3_stmt*);
+ int (*finalize)(sqlite3_stmt*pStmt);
+ void (*free)(void*);
+ void (*free_table)(char**result);
+ int (*get_autocommit)(sqlite3*);
+ void * (*get_auxdata)(sqlite3_context*,int);
+ int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
+ int (*global_recover)(void);
+ void (*interruptx)(sqlite3*);
+ sqlite_int64 (*last_insert_rowid)(sqlite3*);
+ const char * (*libversion)(void);
+ int (*libversion_number)(void);
+ void *(*malloc)(int);
+ char * (*mprintf)(const char*,...);
+ int (*open)(const char*,sqlite3**);
+ int (*open16)(const void*,sqlite3**);
+ int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
+ int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
+ void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
+ void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
+ void *(*realloc)(void*,int);
+ int (*reset)(sqlite3_stmt*pStmt);
+ void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
+ void (*result_double)(sqlite3_context*,double);
+ void (*result_error)(sqlite3_context*,const char*,int);
+ void (*result_error16)(sqlite3_context*,const void*,int);
+ void (*result_int)(sqlite3_context*,int);
+ void (*result_int64)(sqlite3_context*,sqlite_int64);
+ void (*result_null)(sqlite3_context*);
+ void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
+ void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
+ void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
+ void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
+ void (*result_value)(sqlite3_context*,sqlite3_value*);
+ void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
+ int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
+ const char*,const char*),void*);
+ void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
+ char * (*xsnprintf)(int,char*,const char*,...);
+ int (*step)(sqlite3_stmt*);
+ int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
+ char const**,char const**,int*,int*,int*);
+ void (*thread_cleanup)(void);
+ int (*total_changes)(sqlite3*);
+ void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
+ int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
+ void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
+ sqlite_int64),void*);
+ void * (*user_data)(sqlite3_context*);
+ const void * (*value_blob)(sqlite3_value*);
+ int (*value_bytes)(sqlite3_value*);
+ int (*value_bytes16)(sqlite3_value*);
+ double (*value_double)(sqlite3_value*);
+ int (*value_int)(sqlite3_value*);
+ sqlite_int64 (*value_int64)(sqlite3_value*);
+ int (*value_numeric_type)(sqlite3_value*);
+ const unsigned char * (*value_text)(sqlite3_value*);
+ const void * (*value_text16)(sqlite3_value*);
+ const void * (*value_text16be)(sqlite3_value*);
+ const void * (*value_text16le)(sqlite3_value*);
+ int (*value_type)(sqlite3_value*);
+ char *(*vmprintf)(const char*,va_list);
+ /* Added ??? */
+ int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
+ /* Added by 3.3.13 */
+ int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
+ int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
+ int (*clear_bindings)(sqlite3_stmt*);
+ /* Added by 3.4.1 */
+ int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
+ void (*xDestroy)(void *));
+ /* Added by 3.5.0 */
+ int (*bind_zeroblob)(sqlite3_stmt*,int,int);
+ int (*blob_bytes)(sqlite3_blob*);
+ int (*blob_close)(sqlite3_blob*);
+ int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
+ int,sqlite3_blob**);
+ int (*blob_read)(sqlite3_blob*,void*,int,int);
+ int (*blob_write)(sqlite3_blob*,const void*,int,int);
+ int (*create_collation_v2)(sqlite3*,const char*,int,void*,
+ int(*)(void*,int,const void*,int,const void*),
+ void(*)(void*));
+ int (*file_control)(sqlite3*,const char*,int,void*);
+ sqlite3_int64 (*memory_highwater)(int);
+ sqlite3_int64 (*memory_used)(void);
+ sqlite3_mutex *(*mutex_alloc)(int);
+ void (*mutex_enter)(sqlite3_mutex*);
+ void (*mutex_free)(sqlite3_mutex*);
+ void (*mutex_leave)(sqlite3_mutex*);
+ int (*mutex_try)(sqlite3_mutex*);
+ int (*open_v2)(const char*,sqlite3**,int,const char*);
+ int (*release_memory)(int);
+ void (*result_error_nomem)(sqlite3_context*);
+ void (*result_error_toobig)(sqlite3_context*);
+ int (*sleep)(int);
+ void (*soft_heap_limit)(int);
+ sqlite3_vfs *(*vfs_find)(const char*);
+ int (*vfs_register)(sqlite3_vfs*,int);
+ int (*vfs_unregister)(sqlite3_vfs*);
+ int (*xthreadsafe)(void);
+ void (*result_zeroblob)(sqlite3_context*,int);
+ void (*result_error_code)(sqlite3_context*,int);
+ int (*test_control)(int, ...);
+ void (*randomness)(int,void*);
+ sqlite3 *(*context_db_handle)(sqlite3_context*);
+ int (*extended_result_codes)(sqlite3*,int);
+ int (*limit)(sqlite3*,int,int);
+ sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
+ const char *(*sql)(sqlite3_stmt*);
+ int (*status)(int,int*,int*,int);
+ int (*backup_finish)(sqlite3_backup*);
+ sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
+ int (*backup_pagecount)(sqlite3_backup*);
+ int (*backup_remaining)(sqlite3_backup*);
+ int (*backup_step)(sqlite3_backup*,int);
+ const char *(*compileoption_get)(int);
+ int (*compileoption_used)(const char*);
+ int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void(*xDestroy)(void*));
+ int (*db_config)(sqlite3*,int,...);
+ sqlite3_mutex *(*db_mutex)(sqlite3*);
+ int (*db_status)(sqlite3*,int,int*,int*,int);
+ int (*extended_errcode)(sqlite3*);
+ void (*log)(int,const char*,...);
+ sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
+ const char *(*sourceid)(void);
+ int (*stmt_status)(sqlite3_stmt*,int,int);
+ int (*strnicmp)(const char*,const char*,int);
+ int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
+ int (*wal_autocheckpoint)(sqlite3*,int);
+ int (*wal_checkpoint)(sqlite3*,const char*);
+ void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
+ int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
+ int (*vtab_config)(sqlite3*,int op,...);
+ int (*vtab_on_conflict)(sqlite3*);
+ /* Version 3.7.16 and later */
+ int (*close_v2)(sqlite3*);
+ const char *(*db_filename)(sqlite3*,const char*);
+ int (*db_readonly)(sqlite3*,const char*);
+ int (*db_release_memory)(sqlite3*);
+ const char *(*errstr)(int);
+ int (*stmt_busy)(sqlite3_stmt*);
+ int (*stmt_readonly)(sqlite3_stmt*);
+ int (*stricmp)(const char*,const char*);
+ int (*uri_boolean)(const char*,const char*,int);
+ sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
+ const char *(*uri_parameter)(const char*,const char*);
+ char *(*xvsnprintf)(int,char*,const char*,va_list);
+ int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
+ /* Version 3.8.7 and later */
+ int (*auto_extension)(void(*)(void));
+ int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
+ void(*)(void*));
+ int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
+ void(*)(void*),unsigned char);
+ int (*cancel_auto_extension)(void(*)(void));
+ int (*load_extension)(sqlite3*,const char*,const char*,char**);
+ void *(*malloc64)(sqlite3_uint64);
+ sqlite3_uint64 (*msize)(void*);
+ void *(*realloc64)(void*,sqlite3_uint64);
+ void (*reset_auto_extension)(void);
+ void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
+ void(*)(void*));
+ void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
+ void(*)(void*), unsigned char);
+ int (*strglob)(const char*,const char*);
+ /* Version 3.8.11 and later */
+ sqlite3_value *(*value_dup)(const sqlite3_value*);
+ void (*value_free)(sqlite3_value*);
+ int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
+ int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
+ /* Version 3.9.0 and later */
+ unsigned int (*value_subtype)(sqlite3_value*);
+ void (*result_subtype)(sqlite3_context*,unsigned int);
+ /* Version 3.10.0 and later */
+ int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
+ int (*strlike)(const char*,const char*,unsigned int);
+ int (*db_cacheflush)(sqlite3*);
+ /* Version 3.12.0 and later */
+ int (*system_errno)(sqlite3*);
+ /* Version 3.14.0 and later */
+ int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*);
+ char *(*expanded_sql)(sqlite3_stmt*);
+ /* Version 3.18.0 and later */
+ void (*set_last_insert_rowid)(sqlite3*,sqlite3_int64);
+ /* Version 3.20.0 and later */
+ int (*prepare_v3)(sqlite3*,const char*,int,unsigned int,
+ sqlite3_stmt**,const char**);
+ int (*prepare16_v3)(sqlite3*,const void*,int,unsigned int,
+ sqlite3_stmt**,const void**);
+ int (*bind_pointer)(sqlite3_stmt*,int,void*,const char*,void(*)(void*));
+ void (*result_pointer)(sqlite3_context*,void*,const char*,void(*)(void*));
+ void *(*value_pointer)(sqlite3_value*,const char*);
+ int (*vtab_nochange)(sqlite3_context*);
+ int (*value_nochange)(sqlite3_value*);
+ const char *(*vtab_collation)(sqlite3_index_info*,int);
+ /* Version 3.24.0 and later */
+ int (*keyword_count)(void);
+ int (*keyword_name)(int,const char**,int*);
+ int (*keyword_check)(const char*,int);
+ sqlite3_str *(*str_new)(sqlite3*);
+ char *(*str_finish)(sqlite3_str*);
+ void (*str_appendf)(sqlite3_str*, const char *zFormat, ...);
+ void (*str_vappendf)(sqlite3_str*, const char *zFormat, va_list);
+ void (*str_append)(sqlite3_str*, const char *zIn, int N);
+ void (*str_appendall)(sqlite3_str*, const char *zIn);
+ void (*str_appendchar)(sqlite3_str*, int N, char C);
+ void (*str_reset)(sqlite3_str*);
+ int (*str_errcode)(sqlite3_str*);
+ int (*str_length)(sqlite3_str*);
+ char *(*str_value)(sqlite3_str*);
+ /* Version 3.25.0 and later */
+ int (*create_window_function)(sqlite3*,const char*,int,int,void*,
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInv)(sqlite3_context*,int,sqlite3_value**),
+ void(*xDestroy)(void*));
+ /* Version 3.26.0 and later */
+ const char *(*normalized_sql)(sqlite3_stmt*);
+ /* Version 3.28.0 and later */
+ int (*stmt_isexplain)(sqlite3_stmt*);
+ int (*value_frombind)(sqlite3_value*);
+ /* Version 3.30.0 and later */
+ int (*drop_modules)(sqlite3*,const char**);
+ /* Version 3.31.0 and later */
+ sqlite3_int64 (*hard_heap_limit64)(sqlite3_int64);
+ const char *(*uri_key)(const char*,int);
+ const char *(*filename_database)(const char*);
+ const char *(*filename_journal)(const char*);
+ const char *(*filename_wal)(const char*);
+ /* Version 3.32.0 and later */
+ char *(*create_filename)(const char*,const char*,const char*,
+ int,const char**);
+ void (*free_filename)(char*);
+ sqlite3_file *(*database_file_object)(const char*);
+};
+
+/*
+** This is the function signature used for all extension entry points. It
+** is also defined in the file "loadext.c".
+*/
+typedef int (*sqlite3_loadext_entry)(
+ sqlite3 *db, /* Handle to the database. */
+ char **pzErrMsg, /* Used to set error string on failure. */
+ const sqlite3_api_routines *pThunk /* Extension API function pointers. */
+);
+
+/*
+** The following macros redefine the API routines so that they are
+** redirected through the global sqlite3_api structure.
+**
+** This header file is also used by the loadext.c source file
+** (part of the main SQLite library - not an extension) so that
+** it can get access to the sqlite3_api_routines structure
+** definition. But the main library does not want to redefine
+** the API. So the redefinition macros are only valid if the
+** SQLITE_CORE macros is undefined.
+*/
+#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#define sqlite3_aggregate_context sqlite3_api->aggregate_context
+#ifndef SQLITE_OMIT_DEPRECATED
+#define sqlite3_aggregate_count sqlite3_api->aggregate_count
+#endif
+#define sqlite3_bind_blob sqlite3_api->bind_blob
+#define sqlite3_bind_double sqlite3_api->bind_double
+#define sqlite3_bind_int sqlite3_api->bind_int
+#define sqlite3_bind_int64 sqlite3_api->bind_int64
+#define sqlite3_bind_null sqlite3_api->bind_null
+#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count
+#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index
+#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name
+#define sqlite3_bind_text sqlite3_api->bind_text
+#define sqlite3_bind_text16 sqlite3_api->bind_text16
+#define sqlite3_bind_value sqlite3_api->bind_value
+#define sqlite3_busy_handler sqlite3_api->busy_handler
+#define sqlite3_busy_timeout sqlite3_api->busy_timeout
+#define sqlite3_changes sqlite3_api->changes
+#define sqlite3_close sqlite3_api->close
+#define sqlite3_collation_needed sqlite3_api->collation_needed
+#define sqlite3_collation_needed16 sqlite3_api->collation_needed16
+#define sqlite3_column_blob sqlite3_api->column_blob
+#define sqlite3_column_bytes sqlite3_api->column_bytes
+#define sqlite3_column_bytes16 sqlite3_api->column_bytes16
+#define sqlite3_column_count sqlite3_api->column_count
+#define sqlite3_column_database_name sqlite3_api->column_database_name
+#define sqlite3_column_database_name16 sqlite3_api->column_database_name16
+#define sqlite3_column_decltype sqlite3_api->column_decltype
+#define sqlite3_column_decltype16 sqlite3_api->column_decltype16
+#define sqlite3_column_double sqlite3_api->column_double
+#define sqlite3_column_int sqlite3_api->column_int
+#define sqlite3_column_int64 sqlite3_api->column_int64
+#define sqlite3_column_name sqlite3_api->column_name
+#define sqlite3_column_name16 sqlite3_api->column_name16
+#define sqlite3_column_origin_name sqlite3_api->column_origin_name
+#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16
+#define sqlite3_column_table_name sqlite3_api->column_table_name
+#define sqlite3_column_table_name16 sqlite3_api->column_table_name16
+#define sqlite3_column_text sqlite3_api->column_text
+#define sqlite3_column_text16 sqlite3_api->column_text16
+#define sqlite3_column_type sqlite3_api->column_type
+#define sqlite3_column_value sqlite3_api->column_value
+#define sqlite3_commit_hook sqlite3_api->commit_hook
+#define sqlite3_complete sqlite3_api->complete
+#define sqlite3_complete16 sqlite3_api->complete16
+#define sqlite3_create_collation sqlite3_api->create_collation
+#define sqlite3_create_collation16 sqlite3_api->create_collation16
+#define sqlite3_create_function sqlite3_api->create_function
+#define sqlite3_create_function16 sqlite3_api->create_function16
+#define sqlite3_create_module sqlite3_api->create_module
+#define sqlite3_create_module_v2 sqlite3_api->create_module_v2
+#define sqlite3_data_count sqlite3_api->data_count
+#define sqlite3_db_handle sqlite3_api->db_handle
+#define sqlite3_declare_vtab sqlite3_api->declare_vtab
+#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache
+#define sqlite3_errcode sqlite3_api->errcode
+#define sqlite3_errmsg sqlite3_api->errmsg
+#define sqlite3_errmsg16 sqlite3_api->errmsg16
+#define sqlite3_exec sqlite3_api->exec
+#ifndef SQLITE_OMIT_DEPRECATED
+#define sqlite3_expired sqlite3_api->expired
+#endif
+#define sqlite3_finalize sqlite3_api->finalize
+#define sqlite3_free sqlite3_api->free
+#define sqlite3_free_table sqlite3_api->free_table
+#define sqlite3_get_autocommit sqlite3_api->get_autocommit
+#define sqlite3_get_auxdata sqlite3_api->get_auxdata
+#define sqlite3_get_table sqlite3_api->get_table
+#ifndef SQLITE_OMIT_DEPRECATED
+#define sqlite3_global_recover sqlite3_api->global_recover
+#endif
+#define sqlite3_interrupt sqlite3_api->interruptx
+#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid
+#define sqlite3_libversion sqlite3_api->libversion
+#define sqlite3_libversion_number sqlite3_api->libversion_number
+#define sqlite3_malloc sqlite3_api->malloc
+#define sqlite3_mprintf sqlite3_api->mprintf
+#define sqlite3_open sqlite3_api->open
+#define sqlite3_open16 sqlite3_api->open16
+#define sqlite3_prepare sqlite3_api->prepare
+#define sqlite3_prepare16 sqlite3_api->prepare16
+#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
+#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
+#define sqlite3_profile sqlite3_api->profile
+#define sqlite3_progress_handler sqlite3_api->progress_handler
+#define sqlite3_realloc sqlite3_api->realloc
+#define sqlite3_reset sqlite3_api->reset
+#define sqlite3_result_blob sqlite3_api->result_blob
+#define sqlite3_result_double sqlite3_api->result_double
+#define sqlite3_result_error sqlite3_api->result_error
+#define sqlite3_result_error16 sqlite3_api->result_error16
+#define sqlite3_result_int sqlite3_api->result_int
+#define sqlite3_result_int64 sqlite3_api->result_int64
+#define sqlite3_result_null sqlite3_api->result_null
+#define sqlite3_result_text sqlite3_api->result_text
+#define sqlite3_result_text16 sqlite3_api->result_text16
+#define sqlite3_result_text16be sqlite3_api->result_text16be
+#define sqlite3_result_text16le sqlite3_api->result_text16le
+#define sqlite3_result_value sqlite3_api->result_value
+#define sqlite3_rollback_hook sqlite3_api->rollback_hook
+#define sqlite3_set_authorizer sqlite3_api->set_authorizer
+#define sqlite3_set_auxdata sqlite3_api->set_auxdata
+#define sqlite3_snprintf sqlite3_api->xsnprintf
+#define sqlite3_step sqlite3_api->step
+#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata
+#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup
+#define sqlite3_total_changes sqlite3_api->total_changes
+#define sqlite3_trace sqlite3_api->trace
+#ifndef SQLITE_OMIT_DEPRECATED
+#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings
+#endif
+#define sqlite3_update_hook sqlite3_api->update_hook
+#define sqlite3_user_data sqlite3_api->user_data
+#define sqlite3_value_blob sqlite3_api->value_blob
+#define sqlite3_value_bytes sqlite3_api->value_bytes
+#define sqlite3_value_bytes16 sqlite3_api->value_bytes16
+#define sqlite3_value_double sqlite3_api->value_double
+#define sqlite3_value_int sqlite3_api->value_int
+#define sqlite3_value_int64 sqlite3_api->value_int64
+#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type
+#define sqlite3_value_text sqlite3_api->value_text
+#define sqlite3_value_text16 sqlite3_api->value_text16
+#define sqlite3_value_text16be sqlite3_api->value_text16be
+#define sqlite3_value_text16le sqlite3_api->value_text16le
+#define sqlite3_value_type sqlite3_api->value_type
+#define sqlite3_vmprintf sqlite3_api->vmprintf
+#define sqlite3_vsnprintf sqlite3_api->xvsnprintf
+#define sqlite3_overload_function sqlite3_api->overload_function
+#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
+#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
+#define sqlite3_clear_bindings sqlite3_api->clear_bindings
+#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob
+#define sqlite3_blob_bytes sqlite3_api->blob_bytes
+#define sqlite3_blob_close sqlite3_api->blob_close
+#define sqlite3_blob_open sqlite3_api->blob_open
+#define sqlite3_blob_read sqlite3_api->blob_read
+#define sqlite3_blob_write sqlite3_api->blob_write
+#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2
+#define sqlite3_file_control sqlite3_api->file_control
+#define sqlite3_memory_highwater sqlite3_api->memory_highwater
+#define sqlite3_memory_used sqlite3_api->memory_used
+#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc
+#define sqlite3_mutex_enter sqlite3_api->mutex_enter
+#define sqlite3_mutex_free sqlite3_api->mutex_free
+#define sqlite3_mutex_leave sqlite3_api->mutex_leave
+#define sqlite3_mutex_try sqlite3_api->mutex_try
+#define sqlite3_open_v2 sqlite3_api->open_v2
+#define sqlite3_release_memory sqlite3_api->release_memory
+#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem
+#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig
+#define sqlite3_sleep sqlite3_api->sleep
+#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit
+#define sqlite3_vfs_find sqlite3_api->vfs_find
+#define sqlite3_vfs_register sqlite3_api->vfs_register
+#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister
+#define sqlite3_threadsafe sqlite3_api->xthreadsafe
+#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob
+#define sqlite3_result_error_code sqlite3_api->result_error_code
+#define sqlite3_test_control sqlite3_api->test_control
+#define sqlite3_randomness sqlite3_api->randomness
+#define sqlite3_context_db_handle sqlite3_api->context_db_handle
+#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes
+#define sqlite3_limit sqlite3_api->limit
+#define sqlite3_next_stmt sqlite3_api->next_stmt
+#define sqlite3_sql sqlite3_api->sql
+#define sqlite3_status sqlite3_api->status
+#define sqlite3_backup_finish sqlite3_api->backup_finish
+#define sqlite3_backup_init sqlite3_api->backup_init
+#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount
+#define sqlite3_backup_remaining sqlite3_api->backup_remaining
+#define sqlite3_backup_step sqlite3_api->backup_step
+#define sqlite3_compileoption_get sqlite3_api->compileoption_get
+#define sqlite3_compileoption_used sqlite3_api->compileoption_used
+#define sqlite3_create_function_v2 sqlite3_api->create_function_v2
+#define sqlite3_db_config sqlite3_api->db_config
+#define sqlite3_db_mutex sqlite3_api->db_mutex
+#define sqlite3_db_status sqlite3_api->db_status
+#define sqlite3_extended_errcode sqlite3_api->extended_errcode
+#define sqlite3_log sqlite3_api->log
+#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64
+#define sqlite3_sourceid sqlite3_api->sourceid
+#define sqlite3_stmt_status sqlite3_api->stmt_status
+#define sqlite3_strnicmp sqlite3_api->strnicmp
+#define sqlite3_unlock_notify sqlite3_api->unlock_notify
+#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint
+#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint
+#define sqlite3_wal_hook sqlite3_api->wal_hook
+#define sqlite3_blob_reopen sqlite3_api->blob_reopen
+#define sqlite3_vtab_config sqlite3_api->vtab_config
+#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict
+/* Version 3.7.16 and later */
+#define sqlite3_close_v2 sqlite3_api->close_v2
+#define sqlite3_db_filename sqlite3_api->db_filename
+#define sqlite3_db_readonly sqlite3_api->db_readonly
+#define sqlite3_db_release_memory sqlite3_api->db_release_memory
+#define sqlite3_errstr sqlite3_api->errstr
+#define sqlite3_stmt_busy sqlite3_api->stmt_busy
+#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly
+#define sqlite3_stricmp sqlite3_api->stricmp
+#define sqlite3_uri_boolean sqlite3_api->uri_boolean
+#define sqlite3_uri_int64 sqlite3_api->uri_int64
+#define sqlite3_uri_parameter sqlite3_api->uri_parameter
+#define sqlite3_uri_vsnprintf sqlite3_api->xvsnprintf
+#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
+/* Version 3.8.7 and later */
+#define sqlite3_auto_extension sqlite3_api->auto_extension
+#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
+#define sqlite3_bind_text64 sqlite3_api->bind_text64
+#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
+#define sqlite3_load_extension sqlite3_api->load_extension
+#define sqlite3_malloc64 sqlite3_api->malloc64
+#define sqlite3_msize sqlite3_api->msize
+#define sqlite3_realloc64 sqlite3_api->realloc64
+#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
+#define sqlite3_result_blob64 sqlite3_api->result_blob64
+#define sqlite3_result_text64 sqlite3_api->result_text64
+#define sqlite3_strglob sqlite3_api->strglob
+/* Version 3.8.11 and later */
+#define sqlite3_value_dup sqlite3_api->value_dup
+#define sqlite3_value_free sqlite3_api->value_free
+#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
+#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
+/* Version 3.9.0 and later */
+#define sqlite3_value_subtype sqlite3_api->value_subtype
+#define sqlite3_result_subtype sqlite3_api->result_subtype
+/* Version 3.10.0 and later */
+#define sqlite3_status64 sqlite3_api->status64
+#define sqlite3_strlike sqlite3_api->strlike
+#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush
+/* Version 3.12.0 and later */
+#define sqlite3_system_errno sqlite3_api->system_errno
+/* Version 3.14.0 and later */
+#define sqlite3_trace_v2 sqlite3_api->trace_v2
+#define sqlite3_expanded_sql sqlite3_api->expanded_sql
+/* Version 3.18.0 and later */
+#define sqlite3_set_last_insert_rowid sqlite3_api->set_last_insert_rowid
+/* Version 3.20.0 and later */
+#define sqlite3_prepare_v3 sqlite3_api->prepare_v3
+#define sqlite3_prepare16_v3 sqlite3_api->prepare16_v3
+#define sqlite3_bind_pointer sqlite3_api->bind_pointer
+#define sqlite3_result_pointer sqlite3_api->result_pointer
+#define sqlite3_value_pointer sqlite3_api->value_pointer
+/* Version 3.22.0 and later */
+#define sqlite3_vtab_nochange sqlite3_api->vtab_nochange
+#define sqlite3_value_nochange sqlite3_api->value_nochange
+#define sqlite3_vtab_collation sqlite3_api->vtab_collation
+/* Version 3.24.0 and later */
+#define sqlite3_keyword_count sqlite3_api->keyword_count
+#define sqlite3_keyword_name sqlite3_api->keyword_name
+#define sqlite3_keyword_check sqlite3_api->keyword_check
+#define sqlite3_str_new sqlite3_api->str_new
+#define sqlite3_str_finish sqlite3_api->str_finish
+#define sqlite3_str_appendf sqlite3_api->str_appendf
+#define sqlite3_str_vappendf sqlite3_api->str_vappendf
+#define sqlite3_str_append sqlite3_api->str_append
+#define sqlite3_str_appendall sqlite3_api->str_appendall
+#define sqlite3_str_appendchar sqlite3_api->str_appendchar
+#define sqlite3_str_reset sqlite3_api->str_reset
+#define sqlite3_str_errcode sqlite3_api->str_errcode
+#define sqlite3_str_length sqlite3_api->str_length
+#define sqlite3_str_value sqlite3_api->str_value
+/* Version 3.25.0 and later */
+#define sqlite3_create_window_function sqlite3_api->create_window_function
+/* Version 3.26.0 and later */
+#define sqlite3_normalized_sql sqlite3_api->normalized_sql
+/* Version 3.28.0 and later */
+#define sqlite3_stmt_isexplain sqlite3_api->stmt_isexplain
+#define sqlite3_value_frombind sqlite3_api->value_frombind
+/* Version 3.30.0 and later */
+#define sqlite3_drop_modules sqlite3_api->drop_modules
+/* Version 3.31.0 and later */
+#define sqlite3_hard_heap_limit64 sqlite3_api->hard_heap_limit64
+#define sqlite3_uri_key sqlite3_api->uri_key
+#define sqlite3_filename_database sqlite3_api->filename_database
+#define sqlite3_filename_journal sqlite3_api->filename_journal
+#define sqlite3_filename_wal sqlite3_api->filename_wal
+/* Version 3.32.0 and later */
+#define sqlite3_create_filename sqlite3_api->create_filename
+#define sqlite3_free_filename sqlite3_api->free_filename
+#define sqlite3_database_file_object sqlite3_api->database_file_object
+#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
+
+#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+ /* This case when the file really is being compiled as a loadable
+ ** extension */
+# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
+# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
+# define SQLITE_EXTENSION_INIT3 \
+ extern const sqlite3_api_routines *sqlite3_api;
+#else
+ /* This case when the file is being statically linked into the
+ ** application */
+# define SQLITE_EXTENSION_INIT1 /*no-op*/
+# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
+# define SQLITE_EXTENSION_INIT3 /*no-op*/
+#endif
+
+#endif /* SQLITE3EXT_H */
+
+/************** End of sqlite3ext.h ******************************************/
+/************** Continuing where we left off in loadext.c ********************/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+/*
+** Some API routines are omitted when various features are
+** excluded from a build of SQLite. Substitute a NULL pointer
+** for any missing APIs.
+*/
+#ifndef SQLITE_ENABLE_COLUMN_METADATA
+# define sqlite3_column_database_name 0
+# define sqlite3_column_database_name16 0
+# define sqlite3_column_table_name 0
+# define sqlite3_column_table_name16 0
+# define sqlite3_column_origin_name 0
+# define sqlite3_column_origin_name16 0
+#endif
+
+#ifdef SQLITE_OMIT_AUTHORIZATION
+# define sqlite3_set_authorizer 0
+#endif
+
+#ifdef SQLITE_OMIT_UTF16
+# define sqlite3_bind_text16 0
+# define sqlite3_collation_needed16 0
+# define sqlite3_column_decltype16 0
+# define sqlite3_column_name16 0
+# define sqlite3_column_text16 0
+# define sqlite3_complete16 0
+# define sqlite3_create_collation16 0
+# define sqlite3_create_function16 0
+# define sqlite3_errmsg16 0
+# define sqlite3_open16 0
+# define sqlite3_prepare16 0
+# define sqlite3_prepare16_v2 0
+# define sqlite3_prepare16_v3 0
+# define sqlite3_result_error16 0
+# define sqlite3_result_text16 0
+# define sqlite3_result_text16be 0
+# define sqlite3_result_text16le 0
+# define sqlite3_value_text16 0
+# define sqlite3_value_text16be 0
+# define sqlite3_value_text16le 0
+# define sqlite3_column_database_name16 0
+# define sqlite3_column_table_name16 0
+# define sqlite3_column_origin_name16 0
+#endif
+
+#ifdef SQLITE_OMIT_COMPLETE
+# define sqlite3_complete 0
+# define sqlite3_complete16 0
+#endif
+
+#ifdef SQLITE_OMIT_DECLTYPE
+# define sqlite3_column_decltype16 0
+# define sqlite3_column_decltype 0
+#endif
+
+#ifdef SQLITE_OMIT_PROGRESS_CALLBACK
+# define sqlite3_progress_handler 0
+#endif
+
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+# define sqlite3_create_module 0
+# define sqlite3_create_module_v2 0
+# define sqlite3_declare_vtab 0
+# define sqlite3_vtab_config 0
+# define sqlite3_vtab_on_conflict 0
+# define sqlite3_vtab_collation 0
+#endif
+
+#ifdef SQLITE_OMIT_SHARED_CACHE
+# define sqlite3_enable_shared_cache 0
+#endif
+
+#if defined(SQLITE_OMIT_TRACE) || defined(SQLITE_OMIT_DEPRECATED)
+# define sqlite3_profile 0
+# define sqlite3_trace 0
+#endif
+
+#ifdef SQLITE_OMIT_GET_TABLE
+# define sqlite3_free_table 0
+# define sqlite3_get_table 0
+#endif
+
+#ifdef SQLITE_OMIT_INCRBLOB
+#define sqlite3_bind_zeroblob 0
+#define sqlite3_blob_bytes 0
+#define sqlite3_blob_close 0
+#define sqlite3_blob_open 0
+#define sqlite3_blob_read 0
+#define sqlite3_blob_write 0
+#define sqlite3_blob_reopen 0
+#endif
+
+#if defined(SQLITE_OMIT_TRACE)
+# define sqlite3_trace_v2 0
+#endif
+
+/*
+** The following structure contains pointers to all SQLite API routines.
+** A pointer to this structure is passed into extensions when they are
+** loaded so that the extension can make calls back into the SQLite
+** library.
+**
+** When adding new APIs, add them to the bottom of this structure
+** in order to preserve backwards compatibility.
+**
+** Extensions that use newer APIs should first call the
+** sqlite3_libversion_number() to make sure that the API they
+** intend to use is supported by the library. Extensions should
+** also check to make sure that the pointer to the function is
+** not NULL before calling it.
+*/
+static const sqlite3_api_routines sqlite3Apis = {
+ sqlite3_aggregate_context,
+#ifndef SQLITE_OMIT_DEPRECATED
+ sqlite3_aggregate_count,
+#else
+ 0,
+#endif
+ sqlite3_bind_blob,
+ sqlite3_bind_double,
+ sqlite3_bind_int,
+ sqlite3_bind_int64,
+ sqlite3_bind_null,
+ sqlite3_bind_parameter_count,
+ sqlite3_bind_parameter_index,
+ sqlite3_bind_parameter_name,
+ sqlite3_bind_text,
+ sqlite3_bind_text16,
+ sqlite3_bind_value,
+ sqlite3_busy_handler,
+ sqlite3_busy_timeout,
+ sqlite3_changes,
+ sqlite3_close,
+ sqlite3_collation_needed,
+ sqlite3_collation_needed16,
+ sqlite3_column_blob,
+ sqlite3_column_bytes,
+ sqlite3_column_bytes16,
+ sqlite3_column_count,
+ sqlite3_column_database_name,
+ sqlite3_column_database_name16,
+ sqlite3_column_decltype,
+ sqlite3_column_decltype16,
+ sqlite3_column_double,
+ sqlite3_column_int,
+ sqlite3_column_int64,
+ sqlite3_column_name,
+ sqlite3_column_name16,
+ sqlite3_column_origin_name,
+ sqlite3_column_origin_name16,
+ sqlite3_column_table_name,
+ sqlite3_column_table_name16,
+ sqlite3_column_text,
+ sqlite3_column_text16,
+ sqlite3_column_type,
+ sqlite3_column_value,
+ sqlite3_commit_hook,
+ sqlite3_complete,
+ sqlite3_complete16,
+ sqlite3_create_collation,
+ sqlite3_create_collation16,
+ sqlite3_create_function,
+ sqlite3_create_function16,
+ sqlite3_create_module,
+ sqlite3_data_count,
+ sqlite3_db_handle,
+ sqlite3_declare_vtab,
+ sqlite3_enable_shared_cache,
+ sqlite3_errcode,
+ sqlite3_errmsg,
+ sqlite3_errmsg16,
+ sqlite3_exec,
+#ifndef SQLITE_OMIT_DEPRECATED
+ sqlite3_expired,
+#else
+ 0,
+#endif
+ sqlite3_finalize,
+ sqlite3_free,
+ sqlite3_free_table,
+ sqlite3_get_autocommit,
+ sqlite3_get_auxdata,
+ sqlite3_get_table,
+ 0, /* Was sqlite3_global_recover(), but that function is deprecated */
+ sqlite3_interrupt,
+ sqlite3_last_insert_rowid,
+ sqlite3_libversion,
+ sqlite3_libversion_number,
+ sqlite3_malloc,
+ sqlite3_mprintf,
+ sqlite3_open,
+ sqlite3_open16,
+ sqlite3_prepare,
+ sqlite3_prepare16,
+ sqlite3_profile,
+ sqlite3_progress_handler,
+ sqlite3_realloc,
+ sqlite3_reset,
+ sqlite3_result_blob,
+ sqlite3_result_double,
+ sqlite3_result_error,
+ sqlite3_result_error16,
+ sqlite3_result_int,
+ sqlite3_result_int64,
+ sqlite3_result_null,
+ sqlite3_result_text,
+ sqlite3_result_text16,
+ sqlite3_result_text16be,
+ sqlite3_result_text16le,
+ sqlite3_result_value,
+ sqlite3_rollback_hook,
+ sqlite3_set_authorizer,
+ sqlite3_set_auxdata,
+ sqlite3_snprintf,
+ sqlite3_step,
+ sqlite3_table_column_metadata,
+#ifndef SQLITE_OMIT_DEPRECATED
+ sqlite3_thread_cleanup,
+#else
+ 0,
+#endif
+ sqlite3_total_changes,
+ sqlite3_trace,
+#ifndef SQLITE_OMIT_DEPRECATED
+ sqlite3_transfer_bindings,
+#else
+ 0,
+#endif
+ sqlite3_update_hook,
+ sqlite3_user_data,
+ sqlite3_value_blob,
+ sqlite3_value_bytes,
+ sqlite3_value_bytes16,
+ sqlite3_value_double,
+ sqlite3_value_int,
+ sqlite3_value_int64,
+ sqlite3_value_numeric_type,
+ sqlite3_value_text,
+ sqlite3_value_text16,
+ sqlite3_value_text16be,
+ sqlite3_value_text16le,
+ sqlite3_value_type,
+ sqlite3_vmprintf,
+ /*
+ ** The original API set ends here. All extensions can call any
+ ** of the APIs above provided that the pointer is not NULL. But
+ ** before calling APIs that follow, extension should check the
+ ** sqlite3_libversion_number() to make sure they are dealing with
+ ** a library that is new enough to support that API.
+ *************************************************************************
+ */
+ sqlite3_overload_function,
+
+ /*
+ ** Added after 3.3.13
+ */
+ sqlite3_prepare_v2,
+ sqlite3_prepare16_v2,
+ sqlite3_clear_bindings,
+
+ /*
+ ** Added for 3.4.1
+ */
+ sqlite3_create_module_v2,
+
+ /*
+ ** Added for 3.5.0
+ */
+ sqlite3_bind_zeroblob,
+ sqlite3_blob_bytes,
+ sqlite3_blob_close,
+ sqlite3_blob_open,
+ sqlite3_blob_read,
+ sqlite3_blob_write,
+ sqlite3_create_collation_v2,
+ sqlite3_file_control,
+ sqlite3_memory_highwater,
+ sqlite3_memory_used,
+#ifdef SQLITE_MUTEX_OMIT
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+#else
+ sqlite3_mutex_alloc,
+ sqlite3_mutex_enter,
+ sqlite3_mutex_free,
+ sqlite3_mutex_leave,
+ sqlite3_mutex_try,
+#endif
+ sqlite3_open_v2,
+ sqlite3_release_memory,
+ sqlite3_result_error_nomem,
+ sqlite3_result_error_toobig,
+ sqlite3_sleep,
+ sqlite3_soft_heap_limit,
+ sqlite3_vfs_find,
+ sqlite3_vfs_register,
+ sqlite3_vfs_unregister,
+
+ /*
+ ** Added for 3.5.8
+ */
+ sqlite3_threadsafe,
+ sqlite3_result_zeroblob,
+ sqlite3_result_error_code,
+ sqlite3_test_control,
+ sqlite3_randomness,
+ sqlite3_context_db_handle,
+
+ /*
+ ** Added for 3.6.0
+ */
+ sqlite3_extended_result_codes,
+ sqlite3_limit,
+ sqlite3_next_stmt,
+ sqlite3_sql,
+ sqlite3_status,
+
+ /*
+ ** Added for 3.7.4
+ */
+ sqlite3_backup_finish,
+ sqlite3_backup_init,
+ sqlite3_backup_pagecount,
+ sqlite3_backup_remaining,
+ sqlite3_backup_step,
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+ sqlite3_compileoption_get,
+ sqlite3_compileoption_used,
+#else
+ 0,
+ 0,
+#endif
+ sqlite3_create_function_v2,
+ sqlite3_db_config,
+ sqlite3_db_mutex,
+ sqlite3_db_status,
+ sqlite3_extended_errcode,
+ sqlite3_log,
+ sqlite3_soft_heap_limit64,
+ sqlite3_sourceid,
+ sqlite3_stmt_status,
+ sqlite3_strnicmp,
+#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
+ sqlite3_unlock_notify,
+#else
+ 0,
+#endif
+#ifndef SQLITE_OMIT_WAL
+ sqlite3_wal_autocheckpoint,
+ sqlite3_wal_checkpoint,
+ sqlite3_wal_hook,
+#else
+ 0,
+ 0,
+ 0,
+#endif
+ sqlite3_blob_reopen,
+ sqlite3_vtab_config,
+ sqlite3_vtab_on_conflict,
+ sqlite3_close_v2,
+ sqlite3_db_filename,
+ sqlite3_db_readonly,
+ sqlite3_db_release_memory,
+ sqlite3_errstr,
+ sqlite3_stmt_busy,
+ sqlite3_stmt_readonly,
+ sqlite3_stricmp,
+ sqlite3_uri_boolean,
+ sqlite3_uri_int64,
+ sqlite3_uri_parameter,
+ sqlite3_vsnprintf,
+ sqlite3_wal_checkpoint_v2,
+ /* Version 3.8.7 and later */
+ sqlite3_auto_extension,
+ sqlite3_bind_blob64,
+ sqlite3_bind_text64,
+ sqlite3_cancel_auto_extension,
+ sqlite3_load_extension,
+ sqlite3_malloc64,
+ sqlite3_msize,
+ sqlite3_realloc64,
+ sqlite3_reset_auto_extension,
+ sqlite3_result_blob64,
+ sqlite3_result_text64,
+ sqlite3_strglob,
+ /* Version 3.8.11 and later */
+ (sqlite3_value*(*)(const sqlite3_value*))sqlite3_value_dup,
+ sqlite3_value_free,
+ sqlite3_result_zeroblob64,
+ sqlite3_bind_zeroblob64,
+ /* Version 3.9.0 and later */
+ sqlite3_value_subtype,
+ sqlite3_result_subtype,
+ /* Version 3.10.0 and later */
+ sqlite3_status64,
+ sqlite3_strlike,
+ sqlite3_db_cacheflush,
+ /* Version 3.12.0 and later */
+ sqlite3_system_errno,
+ /* Version 3.14.0 and later */
+ sqlite3_trace_v2,
+ sqlite3_expanded_sql,
+ /* Version 3.18.0 and later */
+ sqlite3_set_last_insert_rowid,
+ /* Version 3.20.0 and later */
+ sqlite3_prepare_v3,
+ sqlite3_prepare16_v3,
+ sqlite3_bind_pointer,
+ sqlite3_result_pointer,
+ sqlite3_value_pointer,
+ /* Version 3.22.0 and later */
+ sqlite3_vtab_nochange,
+ sqlite3_value_nochange,
+ sqlite3_vtab_collation,
+ /* Version 3.24.0 and later */
+ sqlite3_keyword_count,
+ sqlite3_keyword_name,
+ sqlite3_keyword_check,
+ sqlite3_str_new,
+ sqlite3_str_finish,
+ sqlite3_str_appendf,
+ sqlite3_str_vappendf,
+ sqlite3_str_append,
+ sqlite3_str_appendall,
+ sqlite3_str_appendchar,
+ sqlite3_str_reset,
+ sqlite3_str_errcode,
+ sqlite3_str_length,
+ sqlite3_str_value,
+ /* Version 3.25.0 and later */
+ sqlite3_create_window_function,
+ /* Version 3.26.0 and later */
+#ifdef SQLITE_ENABLE_NORMALIZE
+ sqlite3_normalized_sql,
+#else
+ 0,
+#endif
+ /* Version 3.28.0 and later */
+ sqlite3_stmt_isexplain,
+ sqlite3_value_frombind,
+ /* Version 3.30.0 and later */
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3_drop_modules,
+#else
+ 0,
+#endif
+ /* Version 3.31.0 and later */
+ sqlite3_hard_heap_limit64,
+ sqlite3_uri_key,
+ sqlite3_filename_database,
+ sqlite3_filename_journal,
+ sqlite3_filename_wal,
+ /* Version 3.32.0 and later */
+ sqlite3_create_filename,
+ sqlite3_free_filename,
+ sqlite3_database_file_object,
+};
+
+/* True if x is the directory separator character
+*/
+#if SQLITE_OS_WIN
+# define DirSep(X) ((X)=='/'||(X)=='\\')
+#else
+# define DirSep(X) ((X)=='/')
+#endif
+
+/*
+** Attempt to load an SQLite extension library contained in the file
+** zFile. The entry point is zProc. zProc may be 0 in which case a
+** default entry point name (sqlite3_extension_init) is used. Use
+** of the default name is recommended.
+**
+** Return SQLITE_OK on success and SQLITE_ERROR if something goes wrong.
+**
+** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with
+** error message text. The calling function should free this memory
+** by calling sqlite3DbFree(db, ).
+*/
+static int sqlite3LoadExtension(
+ sqlite3 *db, /* Load the extension into this database connection */
+ const char *zFile, /* Name of the shared library containing extension */
+ const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */
+ char **pzErrMsg /* Put error message here if not 0 */
+){
+ sqlite3_vfs *pVfs = db->pVfs;
+ void *handle;
+ sqlite3_loadext_entry xInit;
+ char *zErrmsg = 0;
+ const char *zEntry;
+ char *zAltEntry = 0;
+ void **aHandle;
+ u64 nMsg = 300 + sqlite3Strlen30(zFile);
+ int ii;
+ int rc;
+
+ /* Shared library endings to try if zFile cannot be loaded as written */
+ static const char *azEndings[] = {
+#if SQLITE_OS_WIN
+ "dll"
+#elif defined(__APPLE__)
+ "dylib"
+#else
+ "so"
+#endif
+ };
+
+
+ if( pzErrMsg ) *pzErrMsg = 0;
+
+ /* Ticket #1863. To avoid a creating security problems for older
+ ** applications that relink against newer versions of SQLite, the
+ ** ability to run load_extension is turned off by default. One
+ ** must call either sqlite3_enable_load_extension(db) or
+ ** sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 1, 0)
+ ** to turn on extension loading.
+ */
+ if( (db->flags & SQLITE_LoadExtension)==0 ){
+ if( pzErrMsg ){
+ *pzErrMsg = sqlite3_mprintf("not authorized");
+ }
+ return SQLITE_ERROR;
+ }
+
+ zEntry = zProc ? zProc : "sqlite3_extension_init";
+
+ handle = sqlite3OsDlOpen(pVfs, zFile);
+#if SQLITE_OS_UNIX || SQLITE_OS_WIN
+ for(ii=0; ii<ArraySize(azEndings) && handle==0; ii++){
+ char *zAltFile = sqlite3_mprintf("%s.%s", zFile, azEndings[ii]);
+ if( zAltFile==0 ) return SQLITE_NOMEM_BKPT;
+ handle = sqlite3OsDlOpen(pVfs, zAltFile);
+ sqlite3_free(zAltFile);
+ }
+#endif
+ if( handle==0 ){
+ if( pzErrMsg ){
+ *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg);
+ if( zErrmsg ){
+ sqlite3_snprintf(nMsg, zErrmsg,
+ "unable to open shared library [%s]", zFile);
+ sqlite3OsDlError(pVfs, nMsg-1, zErrmsg);
+ }
+ }
+ return SQLITE_ERROR;
+ }
+ xInit = (sqlite3_loadext_entry)sqlite3OsDlSym(pVfs, handle, zEntry);
+
+ /* If no entry point was specified and the default legacy
+ ** entry point name "sqlite3_extension_init" was not found, then
+ ** construct an entry point name "sqlite3_X_init" where the X is
+ ** replaced by the lowercase value of every ASCII alphabetic
+ ** character in the filename after the last "/" upto the first ".",
+ ** and eliding the first three characters if they are "lib".
+ ** Examples:
+ **
+ ** /usr/local/lib/libExample5.4.3.so ==> sqlite3_example_init
+ ** C:/lib/mathfuncs.dll ==> sqlite3_mathfuncs_init
+ */
+ if( xInit==0 && zProc==0 ){
+ int iFile, iEntry, c;
+ int ncFile = sqlite3Strlen30(zFile);
+ zAltEntry = sqlite3_malloc64(ncFile+30);
+ if( zAltEntry==0 ){
+ sqlite3OsDlClose(pVfs, handle);
+ return SQLITE_NOMEM_BKPT;
+ }
+ memcpy(zAltEntry, "sqlite3_", 8);
+ for(iFile=ncFile-1; iFile>=0 && !DirSep(zFile[iFile]); iFile--){}
+ iFile++;
+ if( sqlite3_strnicmp(zFile+iFile, "lib", 3)==0 ) iFile += 3;
+ for(iEntry=8; (c = zFile[iFile])!=0 && c!='.'; iFile++){
+ if( sqlite3Isalpha(c) ){
+ zAltEntry[iEntry++] = (char)sqlite3UpperToLower[(unsigned)c];
+ }
+ }
+ memcpy(zAltEntry+iEntry, "_init", 6);
+ zEntry = zAltEntry;
+ xInit = (sqlite3_loadext_entry)sqlite3OsDlSym(pVfs, handle, zEntry);
+ }
+ if( xInit==0 ){
+ if( pzErrMsg ){
+ nMsg += sqlite3Strlen30(zEntry);
+ *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg);
+ if( zErrmsg ){
+ sqlite3_snprintf(nMsg, zErrmsg,
+ "no entry point [%s] in shared library [%s]", zEntry, zFile);
+ sqlite3OsDlError(pVfs, nMsg-1, zErrmsg);
+ }
+ }
+ sqlite3OsDlClose(pVfs, handle);
+ sqlite3_free(zAltEntry);
+ return SQLITE_ERROR;
+ }
+ sqlite3_free(zAltEntry);
+ rc = xInit(db, &zErrmsg, &sqlite3Apis);
+ if( rc ){
+ if( rc==SQLITE_OK_LOAD_PERMANENTLY ) return SQLITE_OK;
+ if( pzErrMsg ){
+ *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg);
+ }
+ sqlite3_free(zErrmsg);
+ sqlite3OsDlClose(pVfs, handle);
+ return SQLITE_ERROR;
+ }
+
+ /* Append the new shared library handle to the db->aExtension array. */
+ aHandle = sqlite3DbMallocZero(db, sizeof(handle)*(db->nExtension+1));
+ if( aHandle==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ if( db->nExtension>0 ){
+ memcpy(aHandle, db->aExtension, sizeof(handle)*db->nExtension);
+ }
+ sqlite3DbFree(db, db->aExtension);
+ db->aExtension = aHandle;
+
+ db->aExtension[db->nExtension++] = handle;
+ return SQLITE_OK;
+}
+SQLITE_API int sqlite3_load_extension(
+ sqlite3 *db, /* Load the extension into this database connection */
+ const char *zFile, /* Name of the shared library containing extension */
+ const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */
+ char **pzErrMsg /* Put error message here if not 0 */
+){
+ int rc;
+ sqlite3_mutex_enter(db->mutex);
+ rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Call this routine when the database connection is closing in order
+** to clean up loaded extensions
+*/
+SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){
+ int i;
+ assert( sqlite3_mutex_held(db->mutex) );
+ for(i=0; i<db->nExtension; i++){
+ sqlite3OsDlClose(db->pVfs, db->aExtension[i]);
+ }
+ sqlite3DbFree(db, db->aExtension);
+}
+
+/*
+** Enable or disable extension loading. Extension loading is disabled by
+** default so as not to open security holes in older applications.
+*/
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){
+ sqlite3_mutex_enter(db->mutex);
+ if( onoff ){
+ db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc;
+ }else{
+ db->flags &= ~(u64)(SQLITE_LoadExtension|SQLITE_LoadExtFunc);
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+#endif /* !defined(SQLITE_OMIT_LOAD_EXTENSION) */
+
+/*
+** The following object holds the list of automatically loaded
+** extensions.
+**
+** This list is shared across threads. The SQLITE_MUTEX_STATIC_MAIN
+** mutex must be held while accessing this list.
+*/
+typedef struct sqlite3AutoExtList sqlite3AutoExtList;
+static SQLITE_WSD struct sqlite3AutoExtList {
+ u32 nExt; /* Number of entries in aExt[] */
+ void (**aExt)(void); /* Pointers to the extension init functions */
+} sqlite3Autoext = { 0, 0 };
+
+/* The "wsdAutoext" macro will resolve to the autoextension
+** state vector. If writable static data is unsupported on the target,
+** we have to locate the state vector at run-time. In the more common
+** case where writable static data is supported, wsdStat can refer directly
+** to the "sqlite3Autoext" state vector declared above.
+*/
+#ifdef SQLITE_OMIT_WSD
+# define wsdAutoextInit \
+ sqlite3AutoExtList *x = &GLOBAL(sqlite3AutoExtList,sqlite3Autoext)
+# define wsdAutoext x[0]
+#else
+# define wsdAutoextInit
+# define wsdAutoext sqlite3Autoext
+#endif
+
+
+/*
+** Register a statically linked extension that is automatically
+** loaded by every new database connection.
+*/
+SQLITE_API int sqlite3_auto_extension(
+ void (*xInit)(void)
+){
+ int rc = SQLITE_OK;
+#ifndef SQLITE_OMIT_AUTOINIT
+ rc = sqlite3_initialize();
+ if( rc ){
+ return rc;
+ }else
+#endif
+ {
+ u32 i;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+ wsdAutoextInit;
+ sqlite3_mutex_enter(mutex);
+ for(i=0; i<wsdAutoext.nExt; i++){
+ if( wsdAutoext.aExt[i]==xInit ) break;
+ }
+ if( i==wsdAutoext.nExt ){
+ u64 nByte = (wsdAutoext.nExt+1)*sizeof(wsdAutoext.aExt[0]);
+ void (**aNew)(void);
+ aNew = sqlite3_realloc64(wsdAutoext.aExt, nByte);
+ if( aNew==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ }else{
+ wsdAutoext.aExt = aNew;
+ wsdAutoext.aExt[wsdAutoext.nExt] = xInit;
+ wsdAutoext.nExt++;
+ }
+ }
+ sqlite3_mutex_leave(mutex);
+ assert( (rc&0xff)==rc );
+ return rc;
+ }
+}
+
+/*
+** Cancel a prior call to sqlite3_auto_extension. Remove xInit from the
+** set of routines that is invoked for each new database connection, if it
+** is currently on the list. If xInit is not on the list, then this
+** routine is a no-op.
+**
+** Return 1 if xInit was found on the list and removed. Return 0 if xInit
+** was not on the list.
+*/
+SQLITE_API int sqlite3_cancel_auto_extension(
+ void (*xInit)(void)
+){
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+ int i;
+ int n = 0;
+ wsdAutoextInit;
+ sqlite3_mutex_enter(mutex);
+ for(i=(int)wsdAutoext.nExt-1; i>=0; i--){
+ if( wsdAutoext.aExt[i]==xInit ){
+ wsdAutoext.nExt--;
+ wsdAutoext.aExt[i] = wsdAutoext.aExt[wsdAutoext.nExt];
+ n++;
+ break;
+ }
+ }
+ sqlite3_mutex_leave(mutex);
+ return n;
+}
+
+/*
+** Reset the automatic extension loading mechanism.
+*/
+SQLITE_API void sqlite3_reset_auto_extension(void){
+#ifndef SQLITE_OMIT_AUTOINIT
+ if( sqlite3_initialize()==SQLITE_OK )
+#endif
+ {
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+ wsdAutoextInit;
+ sqlite3_mutex_enter(mutex);
+ sqlite3_free(wsdAutoext.aExt);
+ wsdAutoext.aExt = 0;
+ wsdAutoext.nExt = 0;
+ sqlite3_mutex_leave(mutex);
+ }
+}
+
+/*
+** Load all automatic extensions.
+**
+** If anything goes wrong, set an error in the database connection.
+*/
+SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
+ u32 i;
+ int go = 1;
+ int rc;
+ sqlite3_loadext_entry xInit;
+
+ wsdAutoextInit;
+ if( wsdAutoext.nExt==0 ){
+ /* Common case: early out without every having to acquire a mutex */
+ return;
+ }
+ for(i=0; go; i++){
+ char *zErrmsg;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN);
+#endif
+#ifdef SQLITE_OMIT_LOAD_EXTENSION
+ const sqlite3_api_routines *pThunk = 0;
+#else
+ const sqlite3_api_routines *pThunk = &sqlite3Apis;
+#endif
+ sqlite3_mutex_enter(mutex);
+ if( i>=wsdAutoext.nExt ){
+ xInit = 0;
+ go = 0;
+ }else{
+ xInit = (sqlite3_loadext_entry)wsdAutoext.aExt[i];
+ }
+ sqlite3_mutex_leave(mutex);
+ zErrmsg = 0;
+ if( xInit && (rc = xInit(db, &zErrmsg, pThunk))!=0 ){
+ sqlite3ErrorWithMsg(db, rc,
+ "automatic extension loading failed: %s", zErrmsg);
+ go = 0;
+ }
+ sqlite3_free(zErrmsg);
+ }
+}
+
+/************** End of loadext.c *********************************************/
+/************** Begin file pragma.c ******************************************/
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the PRAGMA command.
+*/
+/* #include "sqliteInt.h" */
+
+#if !defined(SQLITE_ENABLE_LOCKING_STYLE)
+# if defined(__APPLE__)
+# define SQLITE_ENABLE_LOCKING_STYLE 1
+# else
+# define SQLITE_ENABLE_LOCKING_STYLE 0
+# endif
+#endif
+
+/***************************************************************************
+** The "pragma.h" include file is an automatically generated file that
+** that includes the PragType_XXXX macro definitions and the aPragmaName[]
+** object. This ensures that the aPragmaName[] table is arranged in
+** lexicographical order to facility a binary search of the pragma name.
+** Do not edit pragma.h directly. Edit and rerun the script in at
+** ../tool/mkpragmatab.tcl. */
+/************** Include pragma.h in the middle of pragma.c *******************/
+/************** Begin file pragma.h ******************************************/
+/* DO NOT EDIT!
+** This file is automatically generated by the script at
+** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit
+** that script and rerun it.
+*/
+
+/* The various pragma types */
+#define PragTyp_ACTIVATE_EXTENSIONS 0
+#define PragTyp_ANALYSIS_LIMIT 1
+#define PragTyp_HEADER_VALUE 2
+#define PragTyp_AUTO_VACUUM 3
+#define PragTyp_FLAG 4
+#define PragTyp_BUSY_TIMEOUT 5
+#define PragTyp_CACHE_SIZE 6
+#define PragTyp_CACHE_SPILL 7
+#define PragTyp_CASE_SENSITIVE_LIKE 8
+#define PragTyp_COLLATION_LIST 9
+#define PragTyp_COMPILE_OPTIONS 10
+#define PragTyp_DATA_STORE_DIRECTORY 11
+#define PragTyp_DATABASE_LIST 12
+#define PragTyp_DEFAULT_CACHE_SIZE 13
+#define PragTyp_ENCODING 14
+#define PragTyp_FOREIGN_KEY_CHECK 15
+#define PragTyp_FOREIGN_KEY_LIST 16
+#define PragTyp_FUNCTION_LIST 17
+#define PragTyp_HARD_HEAP_LIMIT 18
+#define PragTyp_INCREMENTAL_VACUUM 19
+#define PragTyp_INDEX_INFO 20
+#define PragTyp_INDEX_LIST 21
+#define PragTyp_INTEGRITY_CHECK 22
+#define PragTyp_JOURNAL_MODE 23
+#define PragTyp_JOURNAL_SIZE_LIMIT 24
+#define PragTyp_LOCK_PROXY_FILE 25
+#define PragTyp_LOCKING_MODE 26
+#define PragTyp_PAGE_COUNT 27
+#define PragTyp_MMAP_SIZE 28
+#define PragTyp_MODULE_LIST 29
+#define PragTyp_OPTIMIZE 30
+#define PragTyp_PAGE_SIZE 31
+#define PragTyp_PRAGMA_LIST 32
+#define PragTyp_SECURE_DELETE 33
+#define PragTyp_SHRINK_MEMORY 34
+#define PragTyp_SOFT_HEAP_LIMIT 35
+#define PragTyp_SYNCHRONOUS 36
+#define PragTyp_TABLE_INFO 37
+#define PragTyp_TEMP_STORE 38
+#define PragTyp_TEMP_STORE_DIRECTORY 39
+#define PragTyp_THREADS 40
+#define PragTyp_WAL_AUTOCHECKPOINT 41
+#define PragTyp_WAL_CHECKPOINT 42
+#define PragTyp_LOCK_STATUS 43
+#define PragTyp_STATS 44
+
+/* Property flags associated with various pragma. */
+#define PragFlg_NeedSchema 0x01 /* Force schema load before running */
+#define PragFlg_NoColumns 0x02 /* OP_ResultRow called with zero columns */
+#define PragFlg_NoColumns1 0x04 /* zero columns if RHS argument is present */
+#define PragFlg_ReadOnly 0x08 /* Read-only HEADER_VALUE */
+#define PragFlg_Result0 0x10 /* Acts as query when no argument */
+#define PragFlg_Result1 0x20 /* Acts as query when has one argument */
+#define PragFlg_SchemaOpt 0x40 /* Schema restricts name search if present */
+#define PragFlg_SchemaReq 0x80 /* Schema required - "main" is default */
+
+/* Names of columns for pragmas that return multi-column result
+** or that return single-column results where the name of the
+** result column is different from the name of the pragma
+*/
+static const char *const pragCName[] = {
+ /* 0 */ "id", /* Used by: foreign_key_list */
+ /* 1 */ "seq",
+ /* 2 */ "table",
+ /* 3 */ "from",
+ /* 4 */ "to",
+ /* 5 */ "on_update",
+ /* 6 */ "on_delete",
+ /* 7 */ "match",
+ /* 8 */ "cid", /* Used by: table_xinfo */
+ /* 9 */ "name",
+ /* 10 */ "type",
+ /* 11 */ "notnull",
+ /* 12 */ "dflt_value",
+ /* 13 */ "pk",
+ /* 14 */ "hidden",
+ /* table_info reuses 8 */
+ /* 15 */ "seqno", /* Used by: index_xinfo */
+ /* 16 */ "cid",
+ /* 17 */ "name",
+ /* 18 */ "desc",
+ /* 19 */ "coll",
+ /* 20 */ "key",
+ /* 21 */ "name", /* Used by: function_list */
+ /* 22 */ "builtin",
+ /* 23 */ "type",
+ /* 24 */ "enc",
+ /* 25 */ "narg",
+ /* 26 */ "flags",
+ /* 27 */ "tbl", /* Used by: stats */
+ /* 28 */ "idx",
+ /* 29 */ "wdth",
+ /* 30 */ "hght",
+ /* 31 */ "flgs",
+ /* 32 */ "seq", /* Used by: index_list */
+ /* 33 */ "name",
+ /* 34 */ "unique",
+ /* 35 */ "origin",
+ /* 36 */ "partial",
+ /* 37 */ "table", /* Used by: foreign_key_check */
+ /* 38 */ "rowid",
+ /* 39 */ "parent",
+ /* 40 */ "fkid",
+ /* index_info reuses 15 */
+ /* 41 */ "seq", /* Used by: database_list */
+ /* 42 */ "name",
+ /* 43 */ "file",
+ /* 44 */ "busy", /* Used by: wal_checkpoint */
+ /* 45 */ "log",
+ /* 46 */ "checkpointed",
+ /* collation_list reuses 32 */
+ /* 47 */ "database", /* Used by: lock_status */
+ /* 48 */ "status",
+ /* 49 */ "cache_size", /* Used by: default_cache_size */
+ /* module_list pragma_list reuses 9 */
+ /* 50 */ "timeout", /* Used by: busy_timeout */
+};
+
+/* Definitions of all built-in pragmas */
+typedef struct PragmaName {
+ const char *const zName; /* Name of pragma */
+ u8 ePragTyp; /* PragTyp_XXX value */
+ u8 mPragFlg; /* Zero or more PragFlg_XXX values */
+ u8 iPragCName; /* Start of column names in pragCName[] */
+ u8 nPragCName; /* Num of col names. 0 means use pragma name */
+ u64 iArg; /* Extra argument */
+} PragmaName;
+static const PragmaName aPragmaName[] = {
+#if defined(SQLITE_ENABLE_CEROD)
+ {/* zName: */ "activate_extensions",
+ /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+ {/* zName: */ "analysis_limit",
+ /* ePragTyp: */ PragTyp_ANALYSIS_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+ {/* zName: */ "application_id",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_APPLICATION_ID },
+#endif
+#if !defined(SQLITE_OMIT_AUTOVACUUM)
+ {/* zName: */ "auto_vacuum",
+ /* ePragTyp: */ PragTyp_AUTO_VACUUM,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if !defined(SQLITE_OMIT_AUTOMATIC_INDEX)
+ {/* zName: */ "automatic_index",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_AutoIndex },
+#endif
+#endif
+ {/* zName: */ "busy_timeout",
+ /* ePragTyp: */ PragTyp_BUSY_TIMEOUT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 50, 1,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "cache_size",
+ /* ePragTyp: */ PragTyp_CACHE_SIZE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "cache_spill",
+ /* ePragTyp: */ PragTyp_CACHE_SPILL,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_CASE_SENSITIVE_LIKE_PRAGMA)
+ {/* zName: */ "case_sensitive_like",
+ /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE,
+ /* ePragFlg: */ PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+ {/* zName: */ "cell_size_check",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CellSizeCk },
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "checkpoint_fullfsync",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CkptFullFSync },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+ {/* zName: */ "collation_list",
+ /* ePragTyp: */ PragTyp_COLLATION_LIST,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 32, 2,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS)
+ {/* zName: */ "compile_options",
+ /* ePragTyp: */ PragTyp_COMPILE_OPTIONS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "count_changes",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CountRows },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN
+ {/* zName: */ "data_store_directory",
+ /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+ {/* zName: */ "data_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_DATA_VERSION },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+ {/* zName: */ "database_list",
+ /* ePragTyp: */ PragTyp_DATABASE_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0,
+ /* ColNames: */ 41, 3,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
+ {/* zName: */ "default_cache_size",
+ /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 49, 1,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+ {/* zName: */ "defer_foreign_keys",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_DeferFKs },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "empty_result_callbacks",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_NullCallback },
+#endif
+#if !defined(SQLITE_OMIT_UTF16)
+ {/* zName: */ "encoding",
+ /* ePragTyp: */ PragTyp_ENCODING,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+ {/* zName: */ "foreign_key_check",
+ /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 37, 4,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FOREIGN_KEY)
+ {/* zName: */ "foreign_key_list",
+ /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 0, 8,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+ {/* zName: */ "foreign_keys",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ForeignKeys },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+ {/* zName: */ "freelist_count",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_FREE_PAGE_COUNT },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "full_column_names",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_FullColNames },
+ {/* zName: */ "fullfsync",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_FullFSync },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+#if !defined(SQLITE_OMIT_INTROSPECTION_PRAGMAS)
+ {/* zName: */ "function_list",
+ /* ePragTyp: */ PragTyp_FUNCTION_LIST,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 21, 6,
+ /* iArg: */ 0 },
+#endif
+#endif
+ {/* zName: */ "hard_heap_limit",
+ /* ePragTyp: */ PragTyp_HARD_HEAP_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if !defined(SQLITE_OMIT_CHECK)
+ {/* zName: */ "ignore_check_constraints",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_IgnoreChecks },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_AUTOVACUUM)
+ {/* zName: */ "incremental_vacuum",
+ /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+ {/* zName: */ "index_info",
+ /* ePragTyp: */ PragTyp_INDEX_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 15, 3,
+ /* iArg: */ 0 },
+ {/* zName: */ "index_list",
+ /* ePragTyp: */ PragTyp_INDEX_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 32, 5,
+ /* iArg: */ 0 },
+ {/* zName: */ "index_xinfo",
+ /* ePragTyp: */ PragTyp_INDEX_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 15, 6,
+ /* iArg: */ 1 },
+#endif
+#if !defined(SQLITE_OMIT_INTEGRITY_CHECK)
+ {/* zName: */ "integrity_check",
+ /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "journal_mode",
+ /* ePragTyp: */ PragTyp_JOURNAL_MODE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "journal_size_limit",
+ /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "legacy_alter_table",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_LegacyAlter },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE
+ {/* zName: */ "lock_proxy_file",
+ /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ {/* zName: */ "lock_status",
+ /* ePragTyp: */ PragTyp_LOCK_STATUS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 47, 2,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "locking_mode",
+ /* ePragTyp: */ PragTyp_LOCKING_MODE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "max_page_count",
+ /* ePragTyp: */ PragTyp_PAGE_COUNT,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "mmap_size",
+ /* ePragTyp: */ PragTyp_MMAP_SIZE,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+#if !defined(SQLITE_OMIT_VIRTUALTABLE)
+#if !defined(SQLITE_OMIT_INTROSPECTION_PRAGMAS)
+ {/* zName: */ "module_list",
+ /* ePragTyp: */ PragTyp_MODULE_LIST,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 9, 1,
+ /* iArg: */ 0 },
+#endif
+#endif
+#endif
+ {/* zName: */ "optimize",
+ /* ePragTyp: */ PragTyp_OPTIMIZE,
+ /* ePragFlg: */ PragFlg_Result1|PragFlg_NeedSchema,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "page_count",
+ /* ePragTyp: */ PragTyp_PAGE_COUNT,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "page_size",
+ /* ePragTyp: */ PragTyp_PAGE_SIZE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if defined(SQLITE_DEBUG)
+ {/* zName: */ "parser_trace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ParserTrace },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_INTROSPECTION_PRAGMAS)
+ {/* zName: */ "pragma_list",
+ /* ePragTyp: */ PragTyp_PRAGMA_LIST,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 9, 1,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "query_only",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_QueryOnly },
+#endif
+#if !defined(SQLITE_OMIT_INTEGRITY_CHECK)
+ {/* zName: */ "quick_check",
+ /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "read_uncommitted",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ReadUncommit },
+ {/* zName: */ "recursive_triggers",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_RecTriggers },
+ {/* zName: */ "reverse_unordered_selects",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ReverseOrder },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+ {/* zName: */ "schema_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_SCHEMA_VERSION },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "secure_delete",
+ /* ePragTyp: */ PragTyp_SECURE_DELETE,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "short_column_names",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ShortColNames },
+#endif
+ {/* zName: */ "shrink_memory",
+ /* ePragTyp: */ PragTyp_SHRINK_MEMORY,
+ /* ePragFlg: */ PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "soft_heap_limit",
+ /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if defined(SQLITE_DEBUG)
+ {/* zName: */ "sql_trace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_SqlTrace },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) && defined(SQLITE_DEBUG)
+ {/* zName: */ "stats",
+ /* ePragTyp: */ PragTyp_STATS,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 27, 5,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "synchronous",
+ /* ePragTyp: */ PragTyp_SYNCHRONOUS,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
+ {/* zName: */ "table_info",
+ /* ePragTyp: */ PragTyp_TABLE_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 8, 6,
+ /* iArg: */ 0 },
+ {/* zName: */ "table_xinfo",
+ /* ePragTyp: */ PragTyp_TABLE_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 8, 7,
+ /* iArg: */ 1 },
+#endif
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ {/* zName: */ "temp_store",
+ /* ePragTyp: */ PragTyp_TEMP_STORE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "temp_store_directory",
+ /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+ {/* zName: */ "threads",
+ /* ePragTyp: */ PragTyp_THREADS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "trusted_schema",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_TrustedSchema },
+#endif
+#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
+ {/* zName: */ "user_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_USER_VERSION },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+#if defined(SQLITE_DEBUG)
+ {/* zName: */ "vdbe_addoptrace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeAddopTrace },
+ {/* zName: */ "vdbe_debug",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace },
+ {/* zName: */ "vdbe_eqp",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeEQP },
+ {/* zName: */ "vdbe_listing",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeListing },
+ {/* zName: */ "vdbe_trace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeTrace },
+#endif
+#endif
+#if !defined(SQLITE_OMIT_WAL)
+ {/* zName: */ "wal_autocheckpoint",
+ /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "wal_checkpoint",
+ /* ePragTyp: */ PragTyp_WAL_CHECKPOINT,
+ /* ePragFlg: */ PragFlg_NeedSchema,
+ /* ColNames: */ 44, 3,
+ /* iArg: */ 0 },
+#endif
+#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
+ {/* zName: */ "writable_schema",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_WriteSchema|SQLITE_NoSchemaError },
+#endif
+};
+/* Number of pragmas: 67 on by default, 77 total. */
+
+/************** End of pragma.h **********************************************/
+/************** Continuing where we left off in pragma.c *********************/
+
+/*
+** Interpret the given string as a safety level. Return 0 for OFF,
+** 1 for ON or NORMAL, 2 for FULL, and 3 for EXTRA. Return 1 for an empty or
+** unrecognized string argument. The FULL and EXTRA option is disallowed
+** if the omitFull parameter it 1.
+**
+** Note that the values returned are one less that the values that
+** should be passed into sqlite3BtreeSetSafetyLevel(). The is done
+** to support legacy SQL code. The safety level used to be boolean
+** and older scripts may have used numbers 0 for OFF and 1 for ON.
+*/
+static u8 getSafetyLevel(const char *z, int omitFull, u8 dflt){
+ /* 123456789 123456789 123 */
+ static const char zText[] = "onoffalseyestruextrafull";
+ static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 15, 20};
+ static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 5, 4};
+ static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 3, 2};
+ /* on no off false yes true extra full */
+ int i, n;
+ if( sqlite3Isdigit(*z) ){
+ return (u8)sqlite3Atoi(z);
+ }
+ n = sqlite3Strlen30(z);
+ for(i=0; i<ArraySize(iLength); i++){
+ if( iLength[i]==n && sqlite3StrNICmp(&zText[iOffset[i]],z,n)==0
+ && (!omitFull || iValue[i]<=1)
+ ){
+ return iValue[i];
+ }
+ }
+ return dflt;
+}
+
+/*
+** Interpret the given string as a boolean value.
+*/
+SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z, u8 dflt){
+ return getSafetyLevel(z,1,dflt)!=0;
+}
+
+/* The sqlite3GetBoolean() function is used by other modules but the
+** remainder of this file is specific to PRAGMA processing. So omit
+** the rest of the file if PRAGMAs are omitted from the build.
+*/
+#if !defined(SQLITE_OMIT_PRAGMA)
+
+/*
+** Interpret the given string as a locking mode value.
+*/
+static int getLockingMode(const char *z){
+ if( z ){
+ if( 0==sqlite3StrICmp(z, "exclusive") ) return PAGER_LOCKINGMODE_EXCLUSIVE;
+ if( 0==sqlite3StrICmp(z, "normal") ) return PAGER_LOCKINGMODE_NORMAL;
+ }
+ return PAGER_LOCKINGMODE_QUERY;
+}
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+/*
+** Interpret the given string as an auto-vacuum mode value.
+**
+** The following strings, "none", "full" and "incremental" are
+** acceptable, as are their numeric equivalents: 0, 1 and 2 respectively.
+*/
+static int getAutoVacuum(const char *z){
+ int i;
+ if( 0==sqlite3StrICmp(z, "none") ) return BTREE_AUTOVACUUM_NONE;
+ if( 0==sqlite3StrICmp(z, "full") ) return BTREE_AUTOVACUUM_FULL;
+ if( 0==sqlite3StrICmp(z, "incremental") ) return BTREE_AUTOVACUUM_INCR;
+ i = sqlite3Atoi(z);
+ return (u8)((i>=0&&i<=2)?i:0);
+}
+#endif /* ifndef SQLITE_OMIT_AUTOVACUUM */
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+/*
+** Interpret the given string as a temp db location. Return 1 for file
+** backed temporary databases, 2 for the Red-Black tree in memory database
+** and 0 to use the compile-time default.
+*/
+static int getTempStore(const char *z){
+ if( z[0]>='0' && z[0]<='2' ){
+ return z[0] - '0';
+ }else if( sqlite3StrICmp(z, "file")==0 ){
+ return 1;
+ }else if( sqlite3StrICmp(z, "memory")==0 ){
+ return 2;
+ }else{
+ return 0;
+ }
+}
+#endif /* SQLITE_PAGER_PRAGMAS */
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+/*
+** Invalidate temp storage, either when the temp storage is changed
+** from default, or when 'file' and the temp_store_directory has changed
+*/
+static int invalidateTempStorage(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ if( db->aDb[1].pBt!=0 ){
+ if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){
+ sqlite3ErrorMsg(pParse, "temporary storage cannot be changed "
+ "from within a transaction");
+ return SQLITE_ERROR;
+ }
+ sqlite3BtreeClose(db->aDb[1].pBt);
+ db->aDb[1].pBt = 0;
+ sqlite3ResetAllSchemasOfConnection(db);
+ }
+ return SQLITE_OK;
+}
+#endif /* SQLITE_PAGER_PRAGMAS */
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+/*
+** If the TEMP database is open, close it and mark the database schema
+** as needing reloading. This must be done when using the SQLITE_TEMP_STORE
+** or DEFAULT_TEMP_STORE pragmas.
+*/
+static int changeTempStorage(Parse *pParse, const char *zStorageType){
+ int ts = getTempStore(zStorageType);
+ sqlite3 *db = pParse->db;
+ if( db->temp_store==ts ) return SQLITE_OK;
+ if( invalidateTempStorage( pParse ) != SQLITE_OK ){
+ return SQLITE_ERROR;
+ }
+ db->temp_store = (u8)ts;
+ return SQLITE_OK;
+}
+#endif /* SQLITE_PAGER_PRAGMAS */
+
+/*
+** Set result column names for a pragma.
+*/
+static void setPragmaResultColumnNames(
+ Vdbe *v, /* The query under construction */
+ const PragmaName *pPragma /* The pragma */
+){
+ u8 n = pPragma->nPragCName;
+ sqlite3VdbeSetNumCols(v, n==0 ? 1 : n);
+ if( n==0 ){
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, pPragma->zName, SQLITE_STATIC);
+ }else{
+ int i, j;
+ for(i=0, j=pPragma->iPragCName; i<n; i++, j++){
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, pragCName[j], SQLITE_STATIC);
+ }
+ }
+}
+
+/*
+** Generate code to return a single integer value.
+*/
+static void returnSingleInt(Vdbe *v, i64 value){
+ sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, 1, 0, (const u8*)&value, P4_INT64);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+}
+
+/*
+** Generate code to return a single text value.
+*/
+static void returnSingleText(
+ Vdbe *v, /* Prepared statement under construction */
+ const char *zValue /* Value to be returned */
+){
+ if( zValue ){
+ sqlite3VdbeLoadString(v, 1, (const char*)zValue);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ }
+}
+
+
+/*
+** Set the safety_level and pager flags for pager iDb. Or if iDb<0
+** set these values for all pagers.
+*/
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+static void setAllPagerFlags(sqlite3 *db){
+ if( db->autoCommit ){
+ Db *pDb = db->aDb;
+ int n = db->nDb;
+ assert( SQLITE_FullFSync==PAGER_FULLFSYNC );
+ assert( SQLITE_CkptFullFSync==PAGER_CKPT_FULLFSYNC );
+ assert( SQLITE_CacheSpill==PAGER_CACHESPILL );
+ assert( (PAGER_FULLFSYNC | PAGER_CKPT_FULLFSYNC | PAGER_CACHESPILL)
+ == PAGER_FLAGS_MASK );
+ assert( (pDb->safety_level & PAGER_SYNCHRONOUS_MASK)==pDb->safety_level );
+ while( (n--) > 0 ){
+ if( pDb->pBt ){
+ sqlite3BtreeSetPagerFlags(pDb->pBt,
+ pDb->safety_level | (db->flags & PAGER_FLAGS_MASK) );
+ }
+ pDb++;
+ }
+ }
+}
+#else
+# define setAllPagerFlags(X) /* no-op */
+#endif
+
+
+/*
+** Return a human-readable name for a constraint resolution action.
+*/
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+static const char *actionName(u8 action){
+ const char *zName;
+ switch( action ){
+ case OE_SetNull: zName = "SET NULL"; break;
+ case OE_SetDflt: zName = "SET DEFAULT"; break;
+ case OE_Cascade: zName = "CASCADE"; break;
+ case OE_Restrict: zName = "RESTRICT"; break;
+ default: zName = "NO ACTION";
+ assert( action==OE_None ); break;
+ }
+ return zName;
+}
+#endif
+
+
+/*
+** Parameter eMode must be one of the PAGER_JOURNALMODE_XXX constants
+** defined in pager.h. This function returns the associated lowercase
+** journal-mode name.
+*/
+SQLITE_PRIVATE const char *sqlite3JournalModename(int eMode){
+ static char * const azModeName[] = {
+ "delete", "persist", "off", "truncate", "memory"
+#ifndef SQLITE_OMIT_WAL
+ , "wal"
+#endif
+ };
+ assert( PAGER_JOURNALMODE_DELETE==0 );
+ assert( PAGER_JOURNALMODE_PERSIST==1 );
+ assert( PAGER_JOURNALMODE_OFF==2 );
+ assert( PAGER_JOURNALMODE_TRUNCATE==3 );
+ assert( PAGER_JOURNALMODE_MEMORY==4 );
+ assert( PAGER_JOURNALMODE_WAL==5 );
+ assert( eMode>=0 && eMode<=ArraySize(azModeName) );
+
+ if( eMode==ArraySize(azModeName) ) return 0;
+ return azModeName[eMode];
+}
+
+/*
+** Locate a pragma in the aPragmaName[] array.
+*/
+static const PragmaName *pragmaLocate(const char *zName){
+ int upr, lwr, mid = 0, rc;
+ lwr = 0;
+ upr = ArraySize(aPragmaName)-1;
+ while( lwr<=upr ){
+ mid = (lwr+upr)/2;
+ rc = sqlite3_stricmp(zName, aPragmaName[mid].zName);
+ if( rc==0 ) break;
+ if( rc<0 ){
+ upr = mid - 1;
+ }else{
+ lwr = mid + 1;
+ }
+ }
+ return lwr>upr ? 0 : &aPragmaName[mid];
+}
+
+/*
+** Create zero or more entries in the output for the SQL functions
+** defined by FuncDef p.
+*/
+static void pragmaFunclistLine(
+ Vdbe *v, /* The prepared statement being created */
+ FuncDef *p, /* A particular function definition */
+ int isBuiltin, /* True if this is a built-in function */
+ int showInternFuncs /* True if showing internal functions */
+){
+ for(; p; p=p->pNext){
+ const char *zType;
+ static const u32 mask =
+ SQLITE_DETERMINISTIC |
+ SQLITE_DIRECTONLY |
+ SQLITE_SUBTYPE |
+ SQLITE_INNOCUOUS |
+ SQLITE_FUNC_INTERNAL
+ ;
+ static const char *azEnc[] = { 0, "utf8", "utf16le", "utf16be" };
+
+ assert( SQLITE_FUNC_ENCMASK==0x3 );
+ assert( strcmp(azEnc[SQLITE_UTF8],"utf8")==0 );
+ assert( strcmp(azEnc[SQLITE_UTF16LE],"utf16le")==0 );
+ assert( strcmp(azEnc[SQLITE_UTF16BE],"utf16be")==0 );
+
+ if( p->xSFunc==0 ) continue;
+ if( (p->funcFlags & SQLITE_FUNC_INTERNAL)!=0
+ && showInternFuncs==0
+ ){
+ continue;
+ }
+ if( p->xValue!=0 ){
+ zType = "w";
+ }else if( p->xFinalize!=0 ){
+ zType = "a";
+ }else{
+ zType = "s";
+ }
+ sqlite3VdbeMultiLoad(v, 1, "sissii",
+ p->zName, isBuiltin,
+ zType, azEnc[p->funcFlags&SQLITE_FUNC_ENCMASK],
+ p->nArg,
+ (p->funcFlags & mask) ^ SQLITE_INNOCUOUS
+ );
+ }
+}
+
+
+/*
+** Helper subroutine for PRAGMA integrity_check:
+**
+** Generate code to output a single-column result row with a value of the
+** string held in register 3. Decrement the result count in register 1
+** and halt if the maximum number of result rows have been issued.
+*/
+static int integrityCheckResultRow(Vdbe *v){
+ int addr;
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1);
+ addr = sqlite3VdbeAddOp3(v, OP_IfPos, 1, sqlite3VdbeCurrentAddr(v)+2, 1);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp0(v, OP_Halt);
+ return addr;
+}
+
+/*
+** Process a pragma statement.
+**
+** Pragmas are of this form:
+**
+** PRAGMA [schema.]id [= value]
+**
+** The identifier might also be a string. The value is a string, and
+** identifier, or a number. If minusFlag is true, then the value is
+** a number that was preceded by a minus sign.
+**
+** If the left side is "database.id" then pId1 is the database name
+** and pId2 is the id. If the left side is just "id" then pId1 is the
+** id and pId2 is any empty string.
+*/
+SQLITE_PRIVATE void sqlite3Pragma(
+ Parse *pParse,
+ Token *pId1, /* First part of [schema.]id field */
+ Token *pId2, /* Second part of [schema.]id field, or NULL */
+ Token *pValue, /* Token for <value>, or NULL */
+ int minusFlag /* True if a '-' sign preceded <value> */
+){
+ char *zLeft = 0; /* Nul-terminated UTF-8 string <id> */
+ char *zRight = 0; /* Nul-terminated UTF-8 string <value>, or NULL */
+ const char *zDb = 0; /* The database name */
+ Token *pId; /* Pointer to <id> token */
+ char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */
+ int iDb; /* Database index for <database> */
+ int rc; /* return value form SQLITE_FCNTL_PRAGMA */
+ sqlite3 *db = pParse->db; /* The database connection */
+ Db *pDb; /* The specific database being pragmaed */
+ Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */
+ const PragmaName *pPragma; /* The pragma */
+
+ if( v==0 ) return;
+ sqlite3VdbeRunOnlyOnce(v);
+ pParse->nMem = 2;
+
+ /* Interpret the [schema.] part of the pragma statement. iDb is the
+ ** index of the database this pragma is being applied to in db.aDb[]. */
+ iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId);
+ if( iDb<0 ) return;
+ pDb = &db->aDb[iDb];
+
+ /* If the temp database has been explicitly named as part of the
+ ** pragma, make sure it is open.
+ */
+ if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){
+ return;
+ }
+
+ zLeft = sqlite3NameFromToken(db, pId);
+ if( !zLeft ) return;
+ if( minusFlag ){
+ zRight = sqlite3MPrintf(db, "-%T", pValue);
+ }else{
+ zRight = sqlite3NameFromToken(db, pValue);
+ }
+
+ assert( pId2 );
+ zDb = pId2->n>0 ? pDb->zDbSName : 0;
+ if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){
+ goto pragma_out;
+ }
+
+ /* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS
+ ** connection. If it returns SQLITE_OK, then assume that the VFS
+ ** handled the pragma and generate a no-op prepared statement.
+ **
+ ** IMPLEMENTATION-OF: R-12238-55120 Whenever a PRAGMA statement is parsed,
+ ** an SQLITE_FCNTL_PRAGMA file control is sent to the open sqlite3_file
+ ** object corresponding to the database file to which the pragma
+ ** statement refers.
+ **
+ ** IMPLEMENTATION-OF: R-29875-31678 The argument to the SQLITE_FCNTL_PRAGMA
+ ** file control is an array of pointers to strings (char**) in which the
+ ** second element of the array is the name of the pragma and the third
+ ** element is the argument to the pragma or NULL if the pragma has no
+ ** argument.
+ */
+ aFcntl[0] = 0;
+ aFcntl[1] = zLeft;
+ aFcntl[2] = zRight;
+ aFcntl[3] = 0;
+ db->busyHandler.nBusy = 0;
+ rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl);
+ if( rc==SQLITE_OK ){
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, aFcntl[0], SQLITE_TRANSIENT);
+ returnSingleText(v, aFcntl[0]);
+ sqlite3_free(aFcntl[0]);
+ goto pragma_out;
+ }
+ if( rc!=SQLITE_NOTFOUND ){
+ if( aFcntl[0] ){
+ sqlite3ErrorMsg(pParse, "%s", aFcntl[0]);
+ sqlite3_free(aFcntl[0]);
+ }
+ pParse->nErr++;
+ pParse->rc = rc;
+ goto pragma_out;
+ }
+
+ /* Locate the pragma in the lookup table */
+ pPragma = pragmaLocate(zLeft);
+ if( pPragma==0 ) goto pragma_out;
+
+ /* Make sure the database schema is loaded if the pragma requires that */
+ if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){
+ if( sqlite3ReadSchema(pParse) ) goto pragma_out;
+ }
+
+ /* Register the result column names for pragmas that return results */
+ if( (pPragma->mPragFlg & PragFlg_NoColumns)==0
+ && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0)
+ ){
+ setPragmaResultColumnNames(v, pPragma);
+ }
+
+ /* Jump to the appropriate pragma handler */
+ switch( pPragma->ePragTyp ){
+
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
+ /*
+ ** PRAGMA [schema.]default_cache_size
+ ** PRAGMA [schema.]default_cache_size=N
+ **
+ ** The first form reports the current persistent setting for the
+ ** page cache size. The value returned is the maximum number of
+ ** pages in the page cache. The second form sets both the current
+ ** page cache size value and the persistent page cache size value
+ ** stored in the database file.
+ **
+ ** Older versions of SQLite would set the default cache size to a
+ ** negative number to indicate synchronous=OFF. These days, synchronous
+ ** is always on by default regardless of the sign of the default cache
+ ** size. But continue to take the absolute value of the default cache
+ ** size of historical compatibility.
+ */
+ case PragTyp_DEFAULT_CACHE_SIZE: {
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList getCacheSize[] = {
+ { OP_Transaction, 0, 0, 0}, /* 0 */
+ { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */
+ { OP_IfPos, 1, 8, 0},
+ { OP_Integer, 0, 2, 0},
+ { OP_Subtract, 1, 2, 1},
+ { OP_IfPos, 1, 8, 0},
+ { OP_Integer, 0, 1, 0}, /* 6 */
+ { OP_Noop, 0, 0, 0},
+ { OP_ResultRow, 1, 1, 0},
+ };
+ VdbeOp *aOp;
+ sqlite3VdbeUsesBtree(v, iDb);
+ if( !zRight ){
+ pParse->nMem += 2;
+ sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(getCacheSize));
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize, iLn);
+ if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break;
+ aOp[0].p1 = iDb;
+ aOp[1].p1 = iDb;
+ aOp[6].p1 = SQLITE_DEFAULT_CACHE_SIZE;
+ }else{
+ int size = sqlite3AbsInt32(sqlite3Atoi(zRight));
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, size);
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pDb->pSchema->cache_size = size;
+ sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size);
+ }
+ break;
+ }
+#endif /* !SQLITE_OMIT_PAGER_PRAGMAS && !SQLITE_OMIT_DEPRECATED */
+
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
+ /*
+ ** PRAGMA [schema.]page_size
+ ** PRAGMA [schema.]page_size=N
+ **
+ ** The first form reports the current setting for the
+ ** database page size in bytes. The second form sets the
+ ** database page size value. The value can only be set if
+ ** the database has not yet been created.
+ */
+ case PragTyp_PAGE_SIZE: {
+ Btree *pBt = pDb->pBt;
+ assert( pBt!=0 );
+ if( !zRight ){
+ int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0;
+ returnSingleInt(v, size);
+ }else{
+ /* Malloc may fail when setting the page-size, as there is an internal
+ ** buffer that the pager module resizes using sqlite3_realloc().
+ */
+ db->nextPagesize = sqlite3Atoi(zRight);
+ if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize,0,0) ){
+ sqlite3OomFault(db);
+ }
+ }
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]secure_delete
+ ** PRAGMA [schema.]secure_delete=ON/OFF/FAST
+ **
+ ** The first form reports the current setting for the
+ ** secure_delete flag. The second form changes the secure_delete
+ ** flag setting and reports the new value.
+ */
+ case PragTyp_SECURE_DELETE: {
+ Btree *pBt = pDb->pBt;
+ int b = -1;
+ assert( pBt!=0 );
+ if( zRight ){
+ if( sqlite3_stricmp(zRight, "fast")==0 ){
+ b = 2;
+ }else{
+ b = sqlite3GetBoolean(zRight, 0);
+ }
+ }
+ if( pId2->n==0 && b>=0 ){
+ int ii;
+ for(ii=0; ii<db->nDb; ii++){
+ sqlite3BtreeSecureDelete(db->aDb[ii].pBt, b);
+ }
+ }
+ b = sqlite3BtreeSecureDelete(pBt, b);
+ returnSingleInt(v, b);
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]max_page_count
+ ** PRAGMA [schema.]max_page_count=N
+ **
+ ** The first form reports the current setting for the
+ ** maximum number of pages in the database file. The
+ ** second form attempts to change this setting. Both
+ ** forms return the current setting.
+ **
+ ** The absolute value of N is used. This is undocumented and might
+ ** change. The only purpose is to provide an easy way to test
+ ** the sqlite3AbsInt32() function.
+ **
+ ** PRAGMA [schema.]page_count
+ **
+ ** Return the number of pages in the specified database.
+ */
+ case PragTyp_PAGE_COUNT: {
+ int iReg;
+ i64 x = 0;
+ sqlite3CodeVerifySchema(pParse, iDb);
+ iReg = ++pParse->nMem;
+ if( sqlite3Tolower(zLeft[0])=='p' ){
+ sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg);
+ }else{
+ if( zRight && sqlite3DecOrHexToI64(zRight,&x)==0 ){
+ if( x<0 ) x = 0;
+ else if( x>0xfffffffe ) x = 0xfffffffe;
+ }else{
+ x = 0;
+ }
+ sqlite3VdbeAddOp3(v, OP_MaxPgcnt, iDb, iReg, (int)x);
+ }
+ sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1);
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]locking_mode
+ ** PRAGMA [schema.]locking_mode = (normal|exclusive)
+ */
+ case PragTyp_LOCKING_MODE: {
+ const char *zRet = "normal";
+ int eMode = getLockingMode(zRight);
+
+ if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){
+ /* Simple "PRAGMA locking_mode;" statement. This is a query for
+ ** the current default locking mode (which may be different to
+ ** the locking-mode of the main database).
+ */
+ eMode = db->dfltLockMode;
+ }else{
+ Pager *pPager;
+ if( pId2->n==0 ){
+ /* This indicates that no database name was specified as part
+ ** of the PRAGMA command. In this case the locking-mode must be
+ ** set on all attached databases, as well as the main db file.
+ **
+ ** Also, the sqlite3.dfltLockMode variable is set so that
+ ** any subsequently attached databases also use the specified
+ ** locking mode.
+ */
+ int ii;
+ assert(pDb==&db->aDb[0]);
+ for(ii=2; ii<db->nDb; ii++){
+ pPager = sqlite3BtreePager(db->aDb[ii].pBt);
+ sqlite3PagerLockingMode(pPager, eMode);
+ }
+ db->dfltLockMode = (u8)eMode;
+ }
+ pPager = sqlite3BtreePager(pDb->pBt);
+ eMode = sqlite3PagerLockingMode(pPager, eMode);
+ }
+
+ assert( eMode==PAGER_LOCKINGMODE_NORMAL
+ || eMode==PAGER_LOCKINGMODE_EXCLUSIVE );
+ if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){
+ zRet = "exclusive";
+ }
+ returnSingleText(v, zRet);
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]journal_mode
+ ** PRAGMA [schema.]journal_mode =
+ ** (delete|persist|off|truncate|memory|wal|off)
+ */
+ case PragTyp_JOURNAL_MODE: {
+ int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */
+ int ii; /* Loop counter */
+
+ if( zRight==0 ){
+ /* If there is no "=MODE" part of the pragma, do a query for the
+ ** current mode */
+ eMode = PAGER_JOURNALMODE_QUERY;
+ }else{
+ const char *zMode;
+ int n = sqlite3Strlen30(zRight);
+ for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){
+ if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break;
+ }
+ if( !zMode ){
+ /* If the "=MODE" part does not match any known journal mode,
+ ** then do a query */
+ eMode = PAGER_JOURNALMODE_QUERY;
+ }
+ if( eMode==PAGER_JOURNALMODE_OFF && (db->flags & SQLITE_Defensive)!=0 ){
+ /* Do not allow journal-mode "OFF" in defensive since the database
+ ** can become corrupted using ordinary SQL when the journal is off */
+ eMode = PAGER_JOURNALMODE_QUERY;
+ }
+ }
+ if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){
+ /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */
+ iDb = 0;
+ pId2->n = 1;
+ }
+ for(ii=db->nDb-1; ii>=0; ii--){
+ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){
+ sqlite3VdbeUsesBtree(v, ii);
+ sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode);
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]journal_size_limit
+ ** PRAGMA [schema.]journal_size_limit=N
+ **
+ ** Get or set the size limit on rollback journal files.
+ */
+ case PragTyp_JOURNAL_SIZE_LIMIT: {
+ Pager *pPager = sqlite3BtreePager(pDb->pBt);
+ i64 iLimit = -2;
+ if( zRight ){
+ sqlite3DecOrHexToI64(zRight, &iLimit);
+ if( iLimit<-1 ) iLimit = -1;
+ }
+ iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit);
+ returnSingleInt(v, iLimit);
+ break;
+ }
+
+#endif /* SQLITE_OMIT_PAGER_PRAGMAS */
+
+ /*
+ ** PRAGMA [schema.]auto_vacuum
+ ** PRAGMA [schema.]auto_vacuum=N
+ **
+ ** Get or set the value of the database 'auto-vacuum' parameter.
+ ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL
+ */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ case PragTyp_AUTO_VACUUM: {
+ Btree *pBt = pDb->pBt;
+ assert( pBt!=0 );
+ if( !zRight ){
+ returnSingleInt(v, sqlite3BtreeGetAutoVacuum(pBt));
+ }else{
+ int eAuto = getAutoVacuum(zRight);
+ assert( eAuto>=0 && eAuto<=2 );
+ db->nextAutovac = (u8)eAuto;
+ /* Call SetAutoVacuum() to set initialize the internal auto and
+ ** incr-vacuum flags. This is required in case this connection
+ ** creates the database file. It is important that it is created
+ ** as an auto-vacuum capable db.
+ */
+ rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto);
+ if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){
+ /* When setting the auto_vacuum mode to either "full" or
+ ** "incremental", write the value of meta[6] in the database
+ ** file. Before writing to meta[6], check that meta[3] indicates
+ ** that this really is an auto-vacuum capable database.
+ */
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList setMeta6[] = {
+ { OP_Transaction, 0, 1, 0}, /* 0 */
+ { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE},
+ { OP_If, 1, 0, 0}, /* 2 */
+ { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */
+ { OP_SetCookie, 0, BTREE_INCR_VACUUM, 0}, /* 4 */
+ };
+ VdbeOp *aOp;
+ int iAddr = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setMeta6));
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn);
+ if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break;
+ aOp[0].p1 = iDb;
+ aOp[1].p1 = iDb;
+ aOp[2].p2 = iAddr+4;
+ aOp[4].p1 = iDb;
+ aOp[4].p3 = eAuto - 1;
+ sqlite3VdbeUsesBtree(v, iDb);
+ }
+ }
+ break;
+ }
+#endif
+
+ /*
+ ** PRAGMA [schema.]incremental_vacuum(N)
+ **
+ ** Do N steps of incremental vacuuming on a database.
+ */
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ case PragTyp_INCREMENTAL_VACUUM: {
+ int iLimit, addr;
+ if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){
+ iLimit = 0x7fffffff;
+ }
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1);
+ addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_ResultRow, 1);
+ sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1);
+ sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr);
+ break;
+ }
+#endif
+
+#ifndef SQLITE_OMIT_PAGER_PRAGMAS
+ /*
+ ** PRAGMA [schema.]cache_size
+ ** PRAGMA [schema.]cache_size=N
+ **
+ ** The first form reports the current local setting for the
+ ** page cache size. The second form sets the local
+ ** page cache size value. If N is positive then that is the
+ ** number of pages in the cache. If N is negative, then the
+ ** number of pages is adjusted so that the cache uses -N kibibytes
+ ** of memory.
+ */
+ case PragTyp_CACHE_SIZE: {
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( !zRight ){
+ returnSingleInt(v, pDb->pSchema->cache_size);
+ }else{
+ int size = sqlite3Atoi(zRight);
+ pDb->pSchema->cache_size = size;
+ sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size);
+ }
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]cache_spill
+ ** PRAGMA cache_spill=BOOLEAN
+ ** PRAGMA [schema.]cache_spill=N
+ **
+ ** The first form reports the current local setting for the
+ ** page cache spill size. The second form turns cache spill on
+ ** or off. When turnning cache spill on, the size is set to the
+ ** current cache_size. The third form sets a spill size that
+ ** may be different form the cache size.
+ ** If N is positive then that is the
+ ** number of pages in the cache. If N is negative, then the
+ ** number of pages is adjusted so that the cache uses -N kibibytes
+ ** of memory.
+ **
+ ** If the number of cache_spill pages is less then the number of
+ ** cache_size pages, no spilling occurs until the page count exceeds
+ ** the number of cache_size pages.
+ **
+ ** The cache_spill=BOOLEAN setting applies to all attached schemas,
+ ** not just the schema specified.
+ */
+ case PragTyp_CACHE_SPILL: {
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( !zRight ){
+ returnSingleInt(v,
+ (db->flags & SQLITE_CacheSpill)==0 ? 0 :
+ sqlite3BtreeSetSpillSize(pDb->pBt,0));
+ }else{
+ int size = 1;
+ if( sqlite3GetInt32(zRight, &size) ){
+ sqlite3BtreeSetSpillSize(pDb->pBt, size);
+ }
+ if( sqlite3GetBoolean(zRight, size!=0) ){
+ db->flags |= SQLITE_CacheSpill;
+ }else{
+ db->flags &= ~(u64)SQLITE_CacheSpill;
+ }
+ setAllPagerFlags(db);
+ }
+ break;
+ }
+
+ /*
+ ** PRAGMA [schema.]mmap_size(N)
+ **
+ ** Used to set mapping size limit. The mapping size limit is
+ ** used to limit the aggregate size of all memory mapped regions of the
+ ** database file. If this parameter is set to zero, then memory mapping
+ ** is not used at all. If N is negative, then the default memory map
+ ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set.
+ ** The parameter N is measured in bytes.
+ **
+ ** This value is advisory. The underlying VFS is free to memory map
+ ** as little or as much as it wants. Except, if N is set to 0 then the
+ ** upper layers will never invoke the xFetch interfaces to the VFS.
+ */
+ case PragTyp_MMAP_SIZE: {
+ sqlite3_int64 sz;
+#if SQLITE_MAX_MMAP_SIZE>0
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( zRight ){
+ int ii;
+ sqlite3DecOrHexToI64(zRight, &sz);
+ if( sz<0 ) sz = sqlite3GlobalConfig.szMmap;
+ if( pId2->n==0 ) db->szMmap = sz;
+ for(ii=db->nDb-1; ii>=0; ii--){
+ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){
+ sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz);
+ }
+ }
+ }
+ sz = -1;
+ rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_MMAP_SIZE, &sz);
+#else
+ sz = 0;
+ rc = SQLITE_OK;
+#endif
+ if( rc==SQLITE_OK ){
+ returnSingleInt(v, sz);
+ }else if( rc!=SQLITE_NOTFOUND ){
+ pParse->nErr++;
+ pParse->rc = rc;
+ }
+ break;
+ }
+
+ /*
+ ** PRAGMA temp_store
+ ** PRAGMA temp_store = "default"|"memory"|"file"
+ **
+ ** Return or set the local value of the temp_store flag. Changing
+ ** the local value does not make changes to the disk file and the default
+ ** value will be restored the next time the database is opened.
+ **
+ ** Note that it is possible for the library compile-time options to
+ ** override this setting
+ */
+ case PragTyp_TEMP_STORE: {
+ if( !zRight ){
+ returnSingleInt(v, db->temp_store);
+ }else{
+ changeTempStorage(pParse, zRight);
+ }
+ break;
+ }
+
+ /*
+ ** PRAGMA temp_store_directory
+ ** PRAGMA temp_store_directory = ""|"directory_name"
+ **
+ ** Return or set the local value of the temp_store_directory flag. Changing
+ ** the value sets a specific directory to be used for temporary files.
+ ** Setting to a null string reverts to the default temporary directory search.
+ ** If temporary directory is changed, then invalidateTempStorage.
+ **
+ */
+ case PragTyp_TEMP_STORE_DIRECTORY: {
+ if( !zRight ){
+ returnSingleText(v, sqlite3_temp_directory);
+ }else{
+#ifndef SQLITE_OMIT_WSD
+ if( zRight[0] ){
+ int res;
+ rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
+ if( rc!=SQLITE_OK || res==0 ){
+ sqlite3ErrorMsg(pParse, "not a writable directory");
+ goto pragma_out;
+ }
+ }
+ if( SQLITE_TEMP_STORE==0
+ || (SQLITE_TEMP_STORE==1 && db->temp_store<=1)
+ || (SQLITE_TEMP_STORE==2 && db->temp_store==1)
+ ){
+ invalidateTempStorage(pParse);
+ }
+ sqlite3_free(sqlite3_temp_directory);
+ if( zRight[0] ){
+ sqlite3_temp_directory = sqlite3_mprintf("%s", zRight);
+ }else{
+ sqlite3_temp_directory = 0;
+ }
+#endif /* SQLITE_OMIT_WSD */
+ }
+ break;
+ }
+
+#if SQLITE_OS_WIN
+ /*
+ ** PRAGMA data_store_directory
+ ** PRAGMA data_store_directory = ""|"directory_name"
+ **
+ ** Return or set the local value of the data_store_directory flag. Changing
+ ** the value sets a specific directory to be used for database files that
+ ** were specified with a relative pathname. Setting to a null string reverts
+ ** to the default database directory, which for database files specified with
+ ** a relative path will probably be based on the current directory for the
+ ** process. Database file specified with an absolute path are not impacted
+ ** by this setting, regardless of its value.
+ **
+ */
+ case PragTyp_DATA_STORE_DIRECTORY: {
+ if( !zRight ){
+ returnSingleText(v, sqlite3_data_directory);
+ }else{
+#ifndef SQLITE_OMIT_WSD
+ if( zRight[0] ){
+ int res;
+ rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
+ if( rc!=SQLITE_OK || res==0 ){
+ sqlite3ErrorMsg(pParse, "not a writable directory");
+ goto pragma_out;
+ }
+ }
+ sqlite3_free(sqlite3_data_directory);
+ if( zRight[0] ){
+ sqlite3_data_directory = sqlite3_mprintf("%s", zRight);
+ }else{
+ sqlite3_data_directory = 0;
+ }
+#endif /* SQLITE_OMIT_WSD */
+ }
+ break;
+ }
+#endif
+
+#if SQLITE_ENABLE_LOCKING_STYLE
+ /*
+ ** PRAGMA [schema.]lock_proxy_file
+ ** PRAGMA [schema.]lock_proxy_file = ":auto:"|"lock_file_path"
+ **
+ ** Return or set the value of the lock_proxy_file flag. Changing
+ ** the value sets a specific file to be used for database access locks.
+ **
+ */
+ case PragTyp_LOCK_PROXY_FILE: {
+ if( !zRight ){
+ Pager *pPager = sqlite3BtreePager(pDb->pBt);
+ char *proxy_file_path = NULL;
+ sqlite3_file *pFile = sqlite3PagerFile(pPager);
+ sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE,
+ &proxy_file_path);
+ returnSingleText(v, proxy_file_path);
+ }else{
+ Pager *pPager = sqlite3BtreePager(pDb->pBt);
+ sqlite3_file *pFile = sqlite3PagerFile(pPager);
+ int res;
+ if( zRight[0] ){
+ res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE,
+ zRight);
+ } else {
+ res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE,
+ NULL);
+ }
+ if( res!=SQLITE_OK ){
+ sqlite3ErrorMsg(pParse, "failed to set lock proxy file");
+ goto pragma_out;
+ }
+ }
+ break;
+ }
+#endif /* SQLITE_ENABLE_LOCKING_STYLE */
+
+ /*
+ ** PRAGMA [schema.]synchronous
+ ** PRAGMA [schema.]synchronous=OFF|ON|NORMAL|FULL|EXTRA
+ **
+ ** Return or set the local value of the synchronous flag. Changing
+ ** the local value does not make changes to the disk file and the
+ ** default value will be restored the next time the database is
+ ** opened.
+ */
+ case PragTyp_SYNCHRONOUS: {
+ if( !zRight ){
+ returnSingleInt(v, pDb->safety_level-1);
+ }else{
+ if( !db->autoCommit ){
+ sqlite3ErrorMsg(pParse,
+ "Safety level may not be changed inside a transaction");
+ }else if( iDb!=1 ){
+ int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK;
+ if( iLevel==0 ) iLevel = 1;
+ pDb->safety_level = iLevel;
+ pDb->bSyncSet = 1;
+ setAllPagerFlags(db);
+ }
+ }
+ break;
+ }
+#endif /* SQLITE_OMIT_PAGER_PRAGMAS */
+
+#ifndef SQLITE_OMIT_FLAG_PRAGMAS
+ case PragTyp_FLAG: {
+ if( zRight==0 ){
+ setPragmaResultColumnNames(v, pPragma);
+ returnSingleInt(v, (db->flags & pPragma->iArg)!=0 );
+ }else{
+ u64 mask = pPragma->iArg; /* Mask of bits to set or clear. */
+ if( db->autoCommit==0 ){
+ /* Foreign key support may not be enabled or disabled while not
+ ** in auto-commit mode. */
+ mask &= ~(SQLITE_ForeignKeys);
+ }
+#if SQLITE_USER_AUTHENTICATION
+ if( db->auth.authLevel==UAUTH_User ){
+ /* Do not allow non-admin users to modify the schema arbitrarily */
+ mask &= ~(SQLITE_WriteSchema);
+ }
+#endif
+
+ if( sqlite3GetBoolean(zRight, 0) ){
+ db->flags |= mask;
+ }else{
+ db->flags &= ~mask;
+ if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0;
+ }
+
+ /* Many of the flag-pragmas modify the code generated by the SQL
+ ** compiler (eg. count_changes). So add an opcode to expire all
+ ** compiled SQL statements after modifying a pragma value.
+ */
+ sqlite3VdbeAddOp0(v, OP_Expire);
+ setAllPagerFlags(db);
+ }
+ break;
+ }
+#endif /* SQLITE_OMIT_FLAG_PRAGMAS */
+
+#ifndef SQLITE_OMIT_SCHEMA_PRAGMAS
+ /*
+ ** PRAGMA table_info(<table>)
+ **
+ ** Return a single row for each column of the named table. The columns of
+ ** the returned data set are:
+ **
+ ** cid: Column id (numbered from left to right, starting at 0)
+ ** name: Column name
+ ** type: Column declaration type.
+ ** notnull: True if 'NOT NULL' is part of column declaration
+ ** dflt_value: The default value for the column, if any.
+ ** pk: Non-zero for PK fields.
+ */
+ case PragTyp_TABLE_INFO: if( zRight ){
+ Table *pTab;
+ sqlite3CodeVerifyNamedSchema(pParse, zDb);
+ pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb);
+ if( pTab ){
+ int i, k;
+ int nHidden = 0;
+ Column *pCol;
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ pParse->nMem = 7;
+ sqlite3ViewGetColumnNames(pParse, pTab);
+ for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
+ int isHidden = 0;
+ if( pCol->colFlags & COLFLAG_NOINSERT ){
+ if( pPragma->iArg==0 ){
+ nHidden++;
+ continue;
+ }
+ if( pCol->colFlags & COLFLAG_VIRTUAL ){
+ isHidden = 2; /* GENERATED ALWAYS AS ... VIRTUAL */
+ }else if( pCol->colFlags & COLFLAG_STORED ){
+ isHidden = 3; /* GENERATED ALWAYS AS ... STORED */
+ }else{ assert( pCol->colFlags & COLFLAG_HIDDEN );
+ isHidden = 1; /* HIDDEN */
+ }
+ }
+ if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){
+ k = 0;
+ }else if( pPk==0 ){
+ k = 1;
+ }else{
+ for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){}
+ }
+ assert( pCol->pDflt==0 || pCol->pDflt->op==TK_SPAN || isHidden>=2 );
+ sqlite3VdbeMultiLoad(v, 1, pPragma->iArg ? "issisii" : "issisi",
+ i-nHidden,
+ pCol->zName,
+ sqlite3ColumnType(pCol,""),
+ pCol->notNull ? 1 : 0,
+ pCol->pDflt && isHidden<2 ? pCol->pDflt->u.zToken : 0,
+ k,
+ isHidden);
+ }
+ }
+ }
+ break;
+
+#ifdef SQLITE_DEBUG
+ case PragTyp_STATS: {
+ Index *pIdx;
+ HashElem *i;
+ pParse->nMem = 5;
+ sqlite3CodeVerifySchema(pParse, iDb);
+ for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){
+ Table *pTab = sqliteHashData(i);
+ sqlite3VdbeMultiLoad(v, 1, "ssiii",
+ pTab->zName,
+ 0,
+ pTab->szTabRow,
+ pTab->nRowLogEst,
+ pTab->tabFlags);
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ sqlite3VdbeMultiLoad(v, 2, "siiiX",
+ pIdx->zName,
+ pIdx->szIdxRow,
+ pIdx->aiRowLogEst[0],
+ pIdx->hasStat1);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 5);
+ }
+ }
+ }
+ break;
+#endif
+
+ case PragTyp_INDEX_INFO: if( zRight ){
+ Index *pIdx;
+ Table *pTab;
+ pIdx = sqlite3FindIndex(db, zRight, zDb);
+ if( pIdx==0 ){
+ /* If there is no index named zRight, check to see if there is a
+ ** WITHOUT ROWID table named zRight, and if there is, show the
+ ** structure of the PRIMARY KEY index for that table. */
+ pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb);
+ if( pTab && !HasRowid(pTab) ){
+ pIdx = sqlite3PrimaryKeyIndex(pTab);
+ }
+ }
+ if( pIdx ){
+ int iIdxDb = sqlite3SchemaToIndex(db, pIdx->pSchema);
+ int i;
+ int mx;
+ if( pPragma->iArg ){
+ /* PRAGMA index_xinfo (newer version with more rows and columns) */
+ mx = pIdx->nColumn;
+ pParse->nMem = 6;
+ }else{
+ /* PRAGMA index_info (legacy version) */
+ mx = pIdx->nKeyCol;
+ pParse->nMem = 3;
+ }
+ pTab = pIdx->pTable;
+ sqlite3CodeVerifySchema(pParse, iIdxDb);
+ assert( pParse->nMem<=pPragma->nPragCName );
+ for(i=0; i<mx; i++){
+ i16 cnum = pIdx->aiColumn[i];
+ sqlite3VdbeMultiLoad(v, 1, "iisX", i, cnum,
+ cnum<0 ? 0 : pTab->aCol[cnum].zName);
+ if( pPragma->iArg ){
+ sqlite3VdbeMultiLoad(v, 4, "isiX",
+ pIdx->aSortOrder[i],
+ pIdx->azColl[i],
+ i<pIdx->nKeyCol);
+ }
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, pParse->nMem);
+ }
+ }
+ }
+ break;
+
+ case PragTyp_INDEX_LIST: if( zRight ){
+ Index *pIdx;
+ Table *pTab;
+ int i;
+ pTab = sqlite3FindTable(db, zRight, zDb);
+ if( pTab ){
+ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ pParse->nMem = 5;
+ sqlite3CodeVerifySchema(pParse, iTabDb);
+ for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){
+ const char *azOrigin[] = { "c", "u", "pk" };
+ sqlite3VdbeMultiLoad(v, 1, "isisi",
+ i,
+ pIdx->zName,
+ IsUniqueIndex(pIdx),
+ azOrigin[pIdx->idxType],
+ pIdx->pPartIdxWhere!=0);
+ }
+ }
+ }
+ break;
+
+ case PragTyp_DATABASE_LIST: {
+ int i;
+ pParse->nMem = 3;
+ for(i=0; i<db->nDb; i++){
+ if( db->aDb[i].pBt==0 ) continue;
+ assert( db->aDb[i].zDbSName!=0 );
+ sqlite3VdbeMultiLoad(v, 1, "iss",
+ i,
+ db->aDb[i].zDbSName,
+ sqlite3BtreeGetFilename(db->aDb[i].pBt));
+ }
+ }
+ break;
+
+ case PragTyp_COLLATION_LIST: {
+ int i = 0;
+ HashElem *p;
+ pParse->nMem = 2;
+ for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){
+ CollSeq *pColl = (CollSeq *)sqliteHashData(p);
+ sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName);
+ }
+ }
+ break;
+
+#ifndef SQLITE_OMIT_INTROSPECTION_PRAGMAS
+ case PragTyp_FUNCTION_LIST: {
+ int i;
+ HashElem *j;
+ FuncDef *p;
+ int showInternFunc = (db->mDbFlags & DBFLAG_InternalFunc)!=0;
+ pParse->nMem = 6;
+ for(i=0; i<SQLITE_FUNC_HASH_SZ; i++){
+ for(p=sqlite3BuiltinFunctions.a[i]; p; p=p->u.pHash ){
+ pragmaFunclistLine(v, p, 1, showInternFunc);
+ }
+ }
+ for(j=sqliteHashFirst(&db->aFunc); j; j=sqliteHashNext(j)){
+ p = (FuncDef*)sqliteHashData(j);
+ pragmaFunclistLine(v, p, 0, showInternFunc);
+ }
+ }
+ break;
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ case PragTyp_MODULE_LIST: {
+ HashElem *j;
+ pParse->nMem = 1;
+ for(j=sqliteHashFirst(&db->aModule); j; j=sqliteHashNext(j)){
+ Module *pMod = (Module*)sqliteHashData(j);
+ sqlite3VdbeMultiLoad(v, 1, "s", pMod->zName);
+ }
+ }
+ break;
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+ case PragTyp_PRAGMA_LIST: {
+ int i;
+ for(i=0; i<ArraySize(aPragmaName); i++){
+ sqlite3VdbeMultiLoad(v, 1, "s", aPragmaName[i].zName);
+ }
+ }
+ break;
+#endif /* SQLITE_INTROSPECTION_PRAGMAS */
+
+#endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */
+
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ case PragTyp_FOREIGN_KEY_LIST: if( zRight ){
+ FKey *pFK;
+ Table *pTab;
+ pTab = sqlite3FindTable(db, zRight, zDb);
+ if( pTab ){
+ pFK = pTab->pFKey;
+ if( pFK ){
+ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ int i = 0;
+ pParse->nMem = 8;
+ sqlite3CodeVerifySchema(pParse, iTabDb);
+ while(pFK){
+ int j;
+ for(j=0; j<pFK->nCol; j++){
+ sqlite3VdbeMultiLoad(v, 1, "iissssss",
+ i,
+ j,
+ pFK->zTo,
+ pTab->aCol[pFK->aCol[j].iFrom].zName,
+ pFK->aCol[j].zCol,
+ actionName(pFK->aAction[1]), /* ON UPDATE */
+ actionName(pFK->aAction[0]), /* ON DELETE */
+ "NONE");
+ }
+ ++i;
+ pFK = pFK->pNextFrom;
+ }
+ }
+ }
+ }
+ break;
+#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */
+
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+#ifndef SQLITE_OMIT_TRIGGER
+ case PragTyp_FOREIGN_KEY_CHECK: {
+ FKey *pFK; /* A foreign key constraint */
+ Table *pTab; /* Child table contain "REFERENCES" keyword */
+ Table *pParent; /* Parent table that child points to */
+ Index *pIdx; /* Index in the parent table */
+ int i; /* Loop counter: Foreign key number for pTab */
+ int j; /* Loop counter: Field of the foreign key */
+ HashElem *k; /* Loop counter: Next table in schema */
+ int x; /* result variable */
+ int regResult; /* 3 registers to hold a result row */
+ int regKey; /* Register to hold key for checking the FK */
+ int regRow; /* Registers to hold a row from pTab */
+ int addrTop; /* Top of a loop checking foreign keys */
+ int addrOk; /* Jump here if the key is OK */
+ int *aiCols; /* child to parent column mapping */
+
+ regResult = pParse->nMem+1;
+ pParse->nMem += 4;
+ regKey = ++pParse->nMem;
+ regRow = ++pParse->nMem;
+ k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash);
+ while( k ){
+ if( zRight ){
+ pTab = sqlite3LocateTable(pParse, 0, zRight, zDb);
+ k = 0;
+ }else{
+ pTab = (Table*)sqliteHashData(k);
+ k = sqliteHashNext(k);
+ }
+ if( pTab==0 || pTab->pFKey==0 ) continue;
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ zDb = db->aDb[iDb].zDbSName;
+ sqlite3CodeVerifySchema(pParse, iDb);
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
+ if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow;
+ sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead);
+ sqlite3VdbeLoadString(v, regResult, pTab->zName);
+ for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){
+ pParent = sqlite3FindTable(db, pFK->zTo, zDb);
+ if( pParent==0 ) continue;
+ pIdx = 0;
+ sqlite3TableLock(pParse, iDb, pParent->tnum, 0, pParent->zName);
+ x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, 0);
+ if( x==0 ){
+ if( pIdx==0 ){
+ sqlite3OpenTable(pParse, i, iDb, pParent, OP_OpenRead);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_OpenRead, i, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ }
+ }else{
+ k = 0;
+ break;
+ }
+ }
+ assert( pParse->nErr>0 || pFK==0 );
+ if( pFK ) break;
+ if( pParse->nTab<i ) pParse->nTab = i;
+ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v);
+ for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){
+ pParent = sqlite3FindTable(db, pFK->zTo, zDb);
+ pIdx = 0;
+ aiCols = 0;
+ if( pParent ){
+ x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols);
+ assert( x==0 );
+ }
+ addrOk = sqlite3VdbeMakeLabel(pParse);
+
+ /* Generate code to read the child key values into registers
+ ** regRow..regRow+n. If any of the child key values are NULL, this
+ ** row cannot cause an FK violation. Jump directly to addrOk in
+ ** this case. */
+ for(j=0; j<pFK->nCol; j++){
+ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom;
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j);
+ sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v);
+ }
+
+ /* Generate code to query the parent index for a matching parent
+ ** key. If a match is found, jump to addrOk. */
+ if( pIdx ){
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey,
+ sqlite3IndexAffinityStr(db,pIdx), pFK->nCol);
+ sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0);
+ VdbeCoverage(v);
+ }else if( pParent ){
+ int jmp = sqlite3VdbeCurrentAddr(v)+2;
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, i, jmp, regRow); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, addrOk);
+ assert( pFK->nCol==1 );
+ }
+
+ /* Generate code to report an FK violation to the caller. */
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regResult+1);
+ }
+ sqlite3VdbeMultiLoad(v, regResult+2, "siX", pFK->zTo, i-1);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4);
+ sqlite3VdbeResolveLabel(v, addrOk);
+ sqlite3DbFree(db, aiCols);
+ }
+ sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addrTop);
+ }
+ }
+ break;
+#endif /* !defined(SQLITE_OMIT_TRIGGER) */
+#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */
+
+#ifndef SQLITE_OMIT_CASE_SENSITIVE_LIKE_PRAGMA
+ /* Reinstall the LIKE and GLOB functions. The variant of LIKE
+ ** used will be case sensitive or not depending on the RHS.
+ */
+ case PragTyp_CASE_SENSITIVE_LIKE: {
+ if( zRight ){
+ sqlite3RegisterLikeFunctions(db, sqlite3GetBoolean(zRight, 0));
+ }
+ }
+ break;
+#endif /* SQLITE_OMIT_CASE_SENSITIVE_LIKE_PRAGMA */
+
+#ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX
+# define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100
+#endif
+
+#ifndef SQLITE_OMIT_INTEGRITY_CHECK
+ /* PRAGMA integrity_check
+ ** PRAGMA integrity_check(N)
+ ** PRAGMA quick_check
+ ** PRAGMA quick_check(N)
+ **
+ ** Verify the integrity of the database.
+ **
+ ** The "quick_check" is reduced version of
+ ** integrity_check designed to detect most database corruption
+ ** without the overhead of cross-checking indexes. Quick_check
+ ** is linear time wherease integrity_check is O(NlogN).
+ **
+ ** The maximum nubmer of errors is 100 by default. A different default
+ ** can be specified using a numeric parameter N.
+ **
+ ** Or, the parameter N can be the name of a table. In that case, only
+ ** the one table named is verified. The freelist is only verified if
+ ** the named table is "sqlite_schema" (or one of its aliases).
+ **
+ ** All schemas are checked by default. To check just a single
+ ** schema, use the form:
+ **
+ ** PRAGMA schema.integrity_check;
+ */
+ case PragTyp_INTEGRITY_CHECK: {
+ int i, j, addr, mxErr;
+ Table *pObjTab = 0; /* Check only this one table, if not NULL */
+
+ int isQuick = (sqlite3Tolower(zLeft[0])=='q');
+
+ /* If the PRAGMA command was of the form "PRAGMA <db>.integrity_check",
+ ** then iDb is set to the index of the database identified by <db>.
+ ** In this case, the integrity of database iDb only is verified by
+ ** the VDBE created below.
+ **
+ ** Otherwise, if the command was simply "PRAGMA integrity_check" (or
+ ** "PRAGMA quick_check"), then iDb is set to 0. In this case, set iDb
+ ** to -1 here, to indicate that the VDBE should verify the integrity
+ ** of all attached databases. */
+ assert( iDb>=0 );
+ assert( iDb==0 || pId2->z );
+ if( pId2->z==0 ) iDb = -1;
+
+ /* Initialize the VDBE program */
+ pParse->nMem = 6;
+
+ /* Set the maximum error count */
+ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX;
+ if( zRight ){
+ if( sqlite3GetInt32(zRight, &mxErr) ){
+ if( mxErr<=0 ){
+ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX;
+ }
+ }else{
+ pObjTab = sqlite3LocateTable(pParse, 0, zRight,
+ iDb>=0 ? db->aDb[iDb].zDbSName : 0);
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, mxErr-1, 1); /* reg[1] holds errors left */
+
+ /* Do an integrity check on each database file */
+ for(i=0; i<db->nDb; i++){
+ HashElem *x; /* For looping over tables in the schema */
+ Hash *pTbls; /* Set of all tables in the schema */
+ int *aRoot; /* Array of root page numbers of all btrees */
+ int cnt = 0; /* Number of entries in aRoot[] */
+ int mxIdx = 0; /* Maximum number of indexes for any table */
+
+ if( OMIT_TEMPDB && i==1 ) continue;
+ if( iDb>=0 && i!=iDb ) continue;
+
+ sqlite3CodeVerifySchema(pParse, i);
+
+ /* Do an integrity check of the B-Tree
+ **
+ ** Begin by finding the root pages numbers
+ ** for all tables and indices in the database.
+ */
+ assert( sqlite3SchemaMutexHeld(db, i, 0) );
+ pTbls = &db->aDb[i].pSchema->tblHash;
+ for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
+ Table *pTab = sqliteHashData(x); /* Current table */
+ Index *pIdx; /* An index on pTab */
+ int nIdx; /* Number of indexes on pTab */
+ if( pObjTab && pObjTab!=pTab ) continue;
+ if( HasRowid(pTab) ) cnt++;
+ for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; }
+ if( nIdx>mxIdx ) mxIdx = nIdx;
+ }
+ if( cnt==0 ) continue;
+ if( pObjTab ) cnt++;
+ aRoot = sqlite3DbMallocRawNN(db, sizeof(int)*(cnt+1));
+ if( aRoot==0 ) break;
+ cnt = 0;
+ if( pObjTab ) aRoot[++cnt] = 0;
+ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
+ Table *pTab = sqliteHashData(x);
+ Index *pIdx;
+ if( pObjTab && pObjTab!=pTab ) continue;
+ if( HasRowid(pTab) ) aRoot[++cnt] = pTab->tnum;
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ aRoot[++cnt] = pIdx->tnum;
+ }
+ }
+ aRoot[0] = cnt;
+
+ /* Make sure sufficient number of registers have been allocated */
+ pParse->nMem = MAX( pParse->nMem, 8+mxIdx );
+ sqlite3ClearTempRegCache(pParse);
+
+ /* Do the b-tree integrity checks */
+ sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY);
+ sqlite3VdbeChangeP5(v, (u8)i);
+ addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
+ sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName),
+ P4_DYNAMIC);
+ sqlite3VdbeAddOp3(v, OP_Concat, 2, 3, 3);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, addr);
+
+ /* Make sure all the indices are constructed correctly.
+ */
+ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
+ Table *pTab = sqliteHashData(x);
+ Index *pIdx, *pPk;
+ Index *pPrior = 0;
+ int loopTop;
+ int iDataCur, iIdxCur;
+ int r1 = -1;
+
+ if( pTab->tnum<1 ) continue; /* Skip VIEWs or VIRTUAL TABLEs */
+ if( pObjTab && pObjTab!=pTab ) continue;
+ pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
+ sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0,
+ 1, 0, &iDataCur, &iIdxCur);
+ /* reg[7] counts the number of entries in the table.
+ ** reg[8+i] counts the number of entries in the i-th index
+ */
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, 7);
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */
+ }
+ assert( pParse->nMem>=8+j );
+ assert( sqlite3NoTempsInRange(pParse,1,7+j) );
+ sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v);
+ loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1);
+ if( !isQuick ){
+ /* Sanity check on record header decoding */
+ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3);
+ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
+ }
+ /* Verify that all NOT NULL columns really are NOT NULL */
+ for(j=0; j<pTab->nCol; j++){
+ char *zErr;
+ int jmp2;
+ if( j==pTab->iPKey ) continue;
+ if( pTab->aCol[j].notNull==0 ) continue;
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3);
+ if( sqlite3VdbeGetOp(v,-1)->opcode==OP_Column ){
+ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
+ }
+ jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v);
+ zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName,
+ pTab->aCol[j].zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, jmp2);
+ }
+ /* Verify CHECK constraints */
+ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){
+ ExprList *pCheck = sqlite3ExprListDup(db, pTab->pCheck, 0);
+ if( db->mallocFailed==0 ){
+ int addrCkFault = sqlite3VdbeMakeLabel(pParse);
+ int addrCkOk = sqlite3VdbeMakeLabel(pParse);
+ char *zErr;
+ int k;
+ pParse->iSelfTab = iDataCur + 1;
+ for(k=pCheck->nExpr-1; k>0; k--){
+ sqlite3ExprIfFalse(pParse, pCheck->a[k].pExpr, addrCkFault, 0);
+ }
+ sqlite3ExprIfTrue(pParse, pCheck->a[0].pExpr, addrCkOk,
+ SQLITE_JUMPIFNULL);
+ sqlite3VdbeResolveLabel(v, addrCkFault);
+ pParse->iSelfTab = 0;
+ zErr = sqlite3MPrintf(db, "CHECK constraint failed in %s",
+ pTab->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
+ integrityCheckResultRow(v);
+ sqlite3VdbeResolveLabel(v, addrCkOk);
+ }
+ sqlite3ExprListDelete(db, pCheck);
+ }
+ if( !isQuick ){ /* Omit the remaining tests for quick_check */
+ /* Validate index entries for the current row */
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ int jmp2, jmp3, jmp4, jmp5;
+ int ckUniq = sqlite3VdbeMakeLabel(pParse);
+ if( pPk==pIdx ) continue;
+ r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3,
+ pPrior, r1);
+ pPrior = pIdx;
+ sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1);/* increment entry count */
+ /* Verify that an index entry exists for the current table row */
+ jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, ckUniq, r1,
+ pIdx->nColumn); VdbeCoverage(v);
+ sqlite3VdbeLoadString(v, 3, "row ");
+ sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3);
+ sqlite3VdbeLoadString(v, 4, " missing from index ");
+ sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3);
+ jmp5 = sqlite3VdbeLoadString(v, 4, pIdx->zName);
+ sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3);
+ jmp4 = integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, jmp2);
+ /* For UNIQUE indexes, verify that only one entry exists with the
+ ** current key. The entry is unique if (1) any column is NULL
+ ** or (2) the next entry has a different key */
+ if( IsUniqueIndex(pIdx) ){
+ int uniqOk = sqlite3VdbeMakeLabel(pParse);
+ int jmp6;
+ int kk;
+ for(kk=0; kk<pIdx->nKeyCol; kk++){
+ int iCol = pIdx->aiColumn[kk];
+ assert( iCol!=XN_ROWID && iCol<pTab->nCol );
+ if( iCol>=0 && pTab->aCol[iCol].notNull ) continue;
+ sqlite3VdbeAddOp2(v, OP_IsNull, r1+kk, uniqOk);
+ VdbeCoverage(v);
+ }
+ jmp6 = sqlite3VdbeAddOp1(v, OP_Next, iIdxCur+j); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, uniqOk);
+ sqlite3VdbeJumpHere(v, jmp6);
+ sqlite3VdbeAddOp4Int(v, OP_IdxGT, iIdxCur+j, uniqOk, r1,
+ pIdx->nKeyCol); VdbeCoverage(v);
+ sqlite3VdbeLoadString(v, 3, "non-unique entry in index ");
+ sqlite3VdbeGoto(v, jmp5);
+ sqlite3VdbeResolveLabel(v, uniqOk);
+ }
+ sqlite3VdbeJumpHere(v, jmp4);
+ sqlite3ResolvePartIdxLabel(pParse, jmp3);
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, loopTop-1);
+ if( !isQuick ){
+ sqlite3VdbeLoadString(v, 2, "wrong # of entries in index ");
+ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
+ if( pPk==pIdx ) continue;
+ sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3);
+ addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+j, 0, 3); VdbeCoverage(v);
+ sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ sqlite3VdbeLoadString(v, 4, pIdx->zName);
+ sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, addr);
+ }
+ }
+ }
+ }
+ {
+ static const int iLn = VDBE_OFFSET_LINENO(2);
+ static const VdbeOpList endCode[] = {
+ { OP_AddImm, 1, 0, 0}, /* 0 */
+ { OP_IfNotZero, 1, 4, 0}, /* 1 */
+ { OP_String8, 0, 3, 0}, /* 2 */
+ { OP_ResultRow, 3, 1, 0}, /* 3 */
+ { OP_Halt, 0, 0, 0}, /* 4 */
+ { OP_String8, 0, 3, 0}, /* 5 */
+ { OP_Goto, 0, 3, 0}, /* 6 */
+ };
+ VdbeOp *aOp;
+
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn);
+ if( aOp ){
+ aOp[0].p2 = 1-mxErr;
+ aOp[2].p4type = P4_STATIC;
+ aOp[2].p4.z = "ok";
+ aOp[5].p4type = P4_STATIC;
+ aOp[5].p4.z = (char*)sqlite3ErrStr(SQLITE_CORRUPT);
+ }
+ sqlite3VdbeChangeP3(v, 0, sqlite3VdbeCurrentAddr(v)-2);
+ }
+ }
+ break;
+#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
+
+#ifndef SQLITE_OMIT_UTF16
+ /*
+ ** PRAGMA encoding
+ ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be"
+ **
+ ** In its first form, this pragma returns the encoding of the main
+ ** database. If the database is not initialized, it is initialized now.
+ **
+ ** The second form of this pragma is a no-op if the main database file
+ ** has not already been initialized. In this case it sets the default
+ ** encoding that will be used for the main database file if a new file
+ ** is created. If an existing main database file is opened, then the
+ ** default text encoding for the existing database is used.
+ **
+ ** In all cases new databases created using the ATTACH command are
+ ** created to use the same default text encoding as the main database. If
+ ** the main database has not been initialized and/or created when ATTACH
+ ** is executed, this is done before the ATTACH operation.
+ **
+ ** In the second form this pragma sets the text encoding to be used in
+ ** new database files created using this database handle. It is only
+ ** useful if invoked immediately after the main database i
+ */
+ case PragTyp_ENCODING: {
+ static const struct EncName {
+ char *zName;
+ u8 enc;
+ } encnames[] = {
+ { "UTF8", SQLITE_UTF8 },
+ { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */
+ { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */
+ { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */
+ { "UTF16le", SQLITE_UTF16LE },
+ { "UTF16be", SQLITE_UTF16BE },
+ { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */
+ { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */
+ { 0, 0 }
+ };
+ const struct EncName *pEnc;
+ if( !zRight ){ /* "PRAGMA encoding" */
+ if( sqlite3ReadSchema(pParse) ) goto pragma_out;
+ assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 );
+ assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE );
+ assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE );
+ returnSingleText(v, encnames[ENC(pParse->db)].zName);
+ }else{ /* "PRAGMA encoding = XXX" */
+ /* Only change the value of sqlite.enc if the database handle is not
+ ** initialized. If the main database exists, the new sqlite.enc value
+ ** will be overwritten when the schema is next loaded. If it does not
+ ** already exists, it will be created to use the new encoding value.
+ */
+ if( (db->mDbFlags & DBFLAG_EncodingFixed)==0 ){
+ for(pEnc=&encnames[0]; pEnc->zName; pEnc++){
+ if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){
+ u8 enc = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE;
+ SCHEMA_ENC(db) = enc;
+ sqlite3SetTextEncoding(db, enc);
+ break;
+ }
+ }
+ if( !pEnc->zName ){
+ sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight);
+ }
+ }
+ }
+ }
+ break;
+#endif /* SQLITE_OMIT_UTF16 */
+
+#ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
+ /*
+ ** PRAGMA [schema.]schema_version
+ ** PRAGMA [schema.]schema_version = <integer>
+ **
+ ** PRAGMA [schema.]user_version
+ ** PRAGMA [schema.]user_version = <integer>
+ **
+ ** PRAGMA [schema.]freelist_count
+ **
+ ** PRAGMA [schema.]data_version
+ **
+ ** PRAGMA [schema.]application_id
+ ** PRAGMA [schema.]application_id = <integer>
+ **
+ ** The pragma's schema_version and user_version are used to set or get
+ ** the value of the schema-version and user-version, respectively. Both
+ ** the schema-version and the user-version are 32-bit signed integers
+ ** stored in the database header.
+ **
+ ** The schema-cookie is usually only manipulated internally by SQLite. It
+ ** is incremented by SQLite whenever the database schema is modified (by
+ ** creating or dropping a table or index). The schema version is used by
+ ** SQLite each time a query is executed to ensure that the internal cache
+ ** of the schema used when compiling the SQL query matches the schema of
+ ** the database against which the compiled query is actually executed.
+ ** Subverting this mechanism by using "PRAGMA schema_version" to modify
+ ** the schema-version is potentially dangerous and may lead to program
+ ** crashes or database corruption. Use with caution!
+ **
+ ** The user-version is not used internally by SQLite. It may be used by
+ ** applications for any purpose.
+ */
+ case PragTyp_HEADER_VALUE: {
+ int iCookie = pPragma->iArg; /* Which cookie to read or write */
+ sqlite3VdbeUsesBtree(v, iDb);
+ if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){
+ /* Write the specified cookie value */
+ static const VdbeOpList setCookie[] = {
+ { OP_Transaction, 0, 1, 0}, /* 0 */
+ { OP_SetCookie, 0, 0, 0}, /* 1 */
+ };
+ VdbeOp *aOp;
+ sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setCookie));
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0);
+ if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break;
+ aOp[0].p1 = iDb;
+ aOp[1].p1 = iDb;
+ aOp[1].p2 = iCookie;
+ aOp[1].p3 = sqlite3Atoi(zRight);
+ aOp[1].p5 = 1;
+ }else{
+ /* Read the specified cookie value */
+ static const VdbeOpList readCookie[] = {
+ { OP_Transaction, 0, 0, 0}, /* 0 */
+ { OP_ReadCookie, 0, 1, 0}, /* 1 */
+ { OP_ResultRow, 1, 1, 0}
+ };
+ VdbeOp *aOp;
+ sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(readCookie));
+ aOp = sqlite3VdbeAddOpList(v, ArraySize(readCookie),readCookie,0);
+ if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break;
+ aOp[0].p1 = iDb;
+ aOp[1].p1 = iDb;
+ aOp[1].p3 = iCookie;
+ sqlite3VdbeReusable(v);
+ }
+ }
+ break;
+#endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */
+
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+ /*
+ ** PRAGMA compile_options
+ **
+ ** Return the names of all compile-time options used in this build,
+ ** one option per row.
+ */
+ case PragTyp_COMPILE_OPTIONS: {
+ int i = 0;
+ const char *zOpt;
+ pParse->nMem = 1;
+ while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){
+ sqlite3VdbeLoadString(v, 1, zOpt);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ }
+ sqlite3VdbeReusable(v);
+ }
+ break;
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+
+#ifndef SQLITE_OMIT_WAL
+ /*
+ ** PRAGMA [schema.]wal_checkpoint = passive|full|restart|truncate
+ **
+ ** Checkpoint the database.
+ */
+ case PragTyp_WAL_CHECKPOINT: {
+ int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED);
+ int eMode = SQLITE_CHECKPOINT_PASSIVE;
+ if( zRight ){
+ if( sqlite3StrICmp(zRight, "full")==0 ){
+ eMode = SQLITE_CHECKPOINT_FULL;
+ }else if( sqlite3StrICmp(zRight, "restart")==0 ){
+ eMode = SQLITE_CHECKPOINT_RESTART;
+ }else if( sqlite3StrICmp(zRight, "truncate")==0 ){
+ eMode = SQLITE_CHECKPOINT_TRUNCATE;
+ }
+ }
+ pParse->nMem = 3;
+ sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
+ }
+ break;
+
+ /*
+ ** PRAGMA wal_autocheckpoint
+ ** PRAGMA wal_autocheckpoint = N
+ **
+ ** Configure a database connection to automatically checkpoint a database
+ ** after accumulating N frames in the log. Or query for the current value
+ ** of N.
+ */
+ case PragTyp_WAL_AUTOCHECKPOINT: {
+ if( zRight ){
+ sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight));
+ }
+ returnSingleInt(v,
+ db->xWalCallback==sqlite3WalDefaultHook ?
+ SQLITE_PTR_TO_INT(db->pWalArg) : 0);
+ }
+ break;
+#endif
+
+ /*
+ ** PRAGMA shrink_memory
+ **
+ ** IMPLEMENTATION-OF: R-23445-46109 This pragma causes the database
+ ** connection on which it is invoked to free up as much memory as it
+ ** can, by calling sqlite3_db_release_memory().
+ */
+ case PragTyp_SHRINK_MEMORY: {
+ sqlite3_db_release_memory(db);
+ break;
+ }
+
+ /*
+ ** PRAGMA optimize
+ ** PRAGMA optimize(MASK)
+ ** PRAGMA schema.optimize
+ ** PRAGMA schema.optimize(MASK)
+ **
+ ** Attempt to optimize the database. All schemas are optimized in the first
+ ** two forms, and only the specified schema is optimized in the latter two.
+ **
+ ** The details of optimizations performed by this pragma are expected
+ ** to change and improve over time. Applications should anticipate that
+ ** this pragma will perform new optimizations in future releases.
+ **
+ ** The optional argument is a bitmask of optimizations to perform:
+ **
+ ** 0x0001 Debugging mode. Do not actually perform any optimizations
+ ** but instead return one line of text for each optimization
+ ** that would have been done. Off by default.
+ **
+ ** 0x0002 Run ANALYZE on tables that might benefit. On by default.
+ ** See below for additional information.
+ **
+ ** 0x0004 (Not yet implemented) Record usage and performance
+ ** information from the current session in the
+ ** database file so that it will be available to "optimize"
+ ** pragmas run by future database connections.
+ **
+ ** 0x0008 (Not yet implemented) Create indexes that might have
+ ** been helpful to recent queries
+ **
+ ** The default MASK is and always shall be 0xfffe. 0xfffe means perform all
+ ** of the optimizations listed above except Debug Mode, including new
+ ** optimizations that have not yet been invented. If new optimizations are
+ ** ever added that should be off by default, those off-by-default
+ ** optimizations will have bitmasks of 0x10000 or larger.
+ **
+ ** DETERMINATION OF WHEN TO RUN ANALYZE
+ **
+ ** In the current implementation, a table is analyzed if only if all of
+ ** the following are true:
+ **
+ ** (1) MASK bit 0x02 is set.
+ **
+ ** (2) The query planner used sqlite_stat1-style statistics for one or
+ ** more indexes of the table at some point during the lifetime of
+ ** the current connection.
+ **
+ ** (3) One or more indexes of the table are currently unanalyzed OR
+ ** the number of rows in the table has increased by 25 times or more
+ ** since the last time ANALYZE was run.
+ **
+ ** The rules for when tables are analyzed are likely to change in
+ ** future releases.
+ */
+ case PragTyp_OPTIMIZE: {
+ int iDbLast; /* Loop termination point for the schema loop */
+ int iTabCur; /* Cursor for a table whose size needs checking */
+ HashElem *k; /* Loop over tables of a schema */
+ Schema *pSchema; /* The current schema */
+ Table *pTab; /* A table in the schema */
+ Index *pIdx; /* An index of the table */
+ LogEst szThreshold; /* Size threshold above which reanalysis is needd */
+ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */
+ u32 opMask; /* Mask of operations to perform */
+
+ if( zRight ){
+ opMask = (u32)sqlite3Atoi(zRight);
+ if( (opMask & 0x02)==0 ) break;
+ }else{
+ opMask = 0xfffe;
+ }
+ iTabCur = pParse->nTab++;
+ for(iDbLast = zDb?iDb:db->nDb-1; iDb<=iDbLast; iDb++){
+ if( iDb==1 ) continue;
+ sqlite3CodeVerifySchema(pParse, iDb);
+ pSchema = db->aDb[iDb].pSchema;
+ for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){
+ pTab = (Table*)sqliteHashData(k);
+
+ /* If table pTab has not been used in a way that would benefit from
+ ** having analysis statistics during the current session, then skip it.
+ ** This also has the effect of skipping virtual tables and views */
+ if( (pTab->tabFlags & TF_StatsUsed)==0 ) continue;
+
+ /* Reanalyze if the table is 25 times larger than the last analysis */
+ szThreshold = pTab->nRowLogEst + 46; assert( sqlite3LogEst(25)==46 );
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( !pIdx->hasStat1 ){
+ szThreshold = 0; /* Always analyze if any index lacks statistics */
+ break;
+ }
+ }
+ if( szThreshold ){
+ sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead);
+ sqlite3VdbeAddOp3(v, OP_IfSmaller, iTabCur,
+ sqlite3VdbeCurrentAddr(v)+2+(opMask&1), szThreshold);
+ VdbeCoverage(v);
+ }
+ zSubSql = sqlite3MPrintf(db, "ANALYZE \"%w\".\"%w\"",
+ db->aDb[iDb].zDbSName, pTab->zName);
+ if( opMask & 0x01 ){
+ int r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, r1, 0, zSubSql, P4_DYNAMIC);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, r1, 1);
+ }else{
+ sqlite3VdbeAddOp4(v, OP_SqlExec, 0, 0, 0, zSubSql, P4_DYNAMIC);
+ }
+ }
+ }
+ sqlite3VdbeAddOp0(v, OP_Expire);
+ break;
+ }
+
+ /*
+ ** PRAGMA busy_timeout
+ ** PRAGMA busy_timeout = N
+ **
+ ** Call sqlite3_busy_timeout(db, N). Return the current timeout value
+ ** if one is set. If no busy handler or a different busy handler is set
+ ** then 0 is returned. Setting the busy_timeout to 0 or negative
+ ** disables the timeout.
+ */
+ /*case PragTyp_BUSY_TIMEOUT*/ default: {
+ assert( pPragma->ePragTyp==PragTyp_BUSY_TIMEOUT );
+ if( zRight ){
+ sqlite3_busy_timeout(db, sqlite3Atoi(zRight));
+ }
+ returnSingleInt(v, db->busyTimeout);
+ break;
+ }
+
+ /*
+ ** PRAGMA soft_heap_limit
+ ** PRAGMA soft_heap_limit = N
+ **
+ ** IMPLEMENTATION-OF: R-26343-45930 This pragma invokes the
+ ** sqlite3_soft_heap_limit64() interface with the argument N, if N is
+ ** specified and is a non-negative integer.
+ ** IMPLEMENTATION-OF: R-64451-07163 The soft_heap_limit pragma always
+ ** returns the same integer that would be returned by the
+ ** sqlite3_soft_heap_limit64(-1) C-language function.
+ */
+ case PragTyp_SOFT_HEAP_LIMIT: {
+ sqlite3_int64 N;
+ if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){
+ sqlite3_soft_heap_limit64(N);
+ }
+ returnSingleInt(v, sqlite3_soft_heap_limit64(-1));
+ break;
+ }
+
+ /*
+ ** PRAGMA hard_heap_limit
+ ** PRAGMA hard_heap_limit = N
+ **
+ ** Invoke sqlite3_hard_heap_limit64() to query or set the hard heap
+ ** limit. The hard heap limit can be activated or lowered by this
+ ** pragma, but not raised or deactivated. Only the
+ ** sqlite3_hard_heap_limit64() C-language API can raise or deactivate
+ ** the hard heap limit. This allows an application to set a heap limit
+ ** constraint that cannot be relaxed by an untrusted SQL script.
+ */
+ case PragTyp_HARD_HEAP_LIMIT: {
+ sqlite3_int64 N;
+ if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){
+ sqlite3_int64 iPrior = sqlite3_hard_heap_limit64(-1);
+ if( N>0 && (iPrior==0 || iPrior>N) ) sqlite3_hard_heap_limit64(N);
+ }
+ returnSingleInt(v, sqlite3_hard_heap_limit64(-1));
+ break;
+ }
+
+ /*
+ ** PRAGMA threads
+ ** PRAGMA threads = N
+ **
+ ** Configure the maximum number of worker threads. Return the new
+ ** maximum, which might be less than requested.
+ */
+ case PragTyp_THREADS: {
+ sqlite3_int64 N;
+ if( zRight
+ && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK
+ && N>=0
+ ){
+ sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff));
+ }
+ returnSingleInt(v, sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1));
+ break;
+ }
+
+ /*
+ ** PRAGMA analysis_limit
+ ** PRAGMA analysis_limit = N
+ **
+ ** Configure the maximum number of rows that ANALYZE will examine
+ ** in each index that it looks at. Return the new limit.
+ */
+ case PragTyp_ANALYSIS_LIMIT: {
+ sqlite3_int64 N;
+ if( zRight
+ && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK
+ && N>=0
+ ){
+ db->nAnalysisLimit = (int)(N&0x7fffffff);
+ }
+ returnSingleInt(v, db->nAnalysisLimit);
+ break;
+ }
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ /*
+ ** Report the current state of file logs for all databases
+ */
+ case PragTyp_LOCK_STATUS: {
+ static const char *const azLockName[] = {
+ "unlocked", "shared", "reserved", "pending", "exclusive"
+ };
+ int i;
+ pParse->nMem = 2;
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt;
+ const char *zState = "unknown";
+ int j;
+ if( db->aDb[i].zDbSName==0 ) continue;
+ pBt = db->aDb[i].pBt;
+ if( pBt==0 || sqlite3BtreePager(pBt)==0 ){
+ zState = "closed";
+ }else if( sqlite3_file_control(db, i ? db->aDb[i].zDbSName : 0,
+ SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){
+ zState = azLockName[j];
+ }
+ sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zDbSName, zState);
+ }
+ break;
+ }
+#endif
+
+#if defined(SQLITE_ENABLE_CEROD)
+ case PragTyp_ACTIVATE_EXTENSIONS: if( zRight ){
+ if( sqlite3StrNICmp(zRight, "cerod-", 6)==0 ){
+ sqlite3_activate_cerod(&zRight[6]);
+ }
+ }
+ break;
+#endif
+
+ } /* End of the PRAGMA switch */
+
+ /* The following block is a no-op unless SQLITE_DEBUG is defined. Its only
+ ** purpose is to execute assert() statements to verify that if the
+ ** PragFlg_NoColumns1 flag is set and the caller specified an argument
+ ** to the PRAGMA, the implementation has not added any OP_ResultRow
+ ** instructions to the VM. */
+ if( (pPragma->mPragFlg & PragFlg_NoColumns1) && zRight ){
+ sqlite3VdbeVerifyNoResultRow(v);
+ }
+
+pragma_out:
+ sqlite3DbFree(db, zLeft);
+ sqlite3DbFree(db, zRight);
+}
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*****************************************************************************
+** Implementation of an eponymous virtual table that runs a pragma.
+**
+*/
+typedef struct PragmaVtab PragmaVtab;
+typedef struct PragmaVtabCursor PragmaVtabCursor;
+struct PragmaVtab {
+ sqlite3_vtab base; /* Base class. Must be first */
+ sqlite3 *db; /* The database connection to which it belongs */
+ const PragmaName *pName; /* Name of the pragma */
+ u8 nHidden; /* Number of hidden columns */
+ u8 iHidden; /* Index of the first hidden column */
+};
+struct PragmaVtabCursor {
+ sqlite3_vtab_cursor base; /* Base class. Must be first */
+ sqlite3_stmt *pPragma; /* The pragma statement to run */
+ sqlite_int64 iRowid; /* Current rowid */
+ char *azArg[2]; /* Value of the argument and schema */
+};
+
+/*
+** Pragma virtual table module xConnect method.
+*/
+static int pragmaVtabConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ const PragmaName *pPragma = (const PragmaName*)pAux;
+ PragmaVtab *pTab = 0;
+ int rc;
+ int i, j;
+ char cSep = '(';
+ StrAccum acc;
+ char zBuf[200];
+
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(argv);
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3_str_appendall(&acc, "CREATE TABLE x");
+ for(i=0, j=pPragma->iPragCName; i<pPragma->nPragCName; i++, j++){
+ sqlite3_str_appendf(&acc, "%c\"%s\"", cSep, pragCName[j]);
+ cSep = ',';
+ }
+ if( i==0 ){
+ sqlite3_str_appendf(&acc, "(\"%s\"", pPragma->zName);
+ i++;
+ }
+ j = 0;
+ if( pPragma->mPragFlg & PragFlg_Result1 ){
+ sqlite3_str_appendall(&acc, ",arg HIDDEN");
+ j++;
+ }
+ if( pPragma->mPragFlg & (PragFlg_SchemaOpt|PragFlg_SchemaReq) ){
+ sqlite3_str_appendall(&acc, ",schema HIDDEN");
+ j++;
+ }
+ sqlite3_str_append(&acc, ")", 1);
+ sqlite3StrAccumFinish(&acc);
+ assert( strlen(zBuf) < sizeof(zBuf)-1 );
+ rc = sqlite3_declare_vtab(db, zBuf);
+ if( rc==SQLITE_OK ){
+ pTab = (PragmaVtab*)sqlite3_malloc(sizeof(PragmaVtab));
+ if( pTab==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pTab, 0, sizeof(PragmaVtab));
+ pTab->pName = pPragma;
+ pTab->db = db;
+ pTab->iHidden = i;
+ pTab->nHidden = j;
+ }
+ }else{
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+
+ *ppVtab = (sqlite3_vtab*)pTab;
+ return rc;
+}
+
+/*
+** Pragma virtual table module xDisconnect method.
+*/
+static int pragmaVtabDisconnect(sqlite3_vtab *pVtab){
+ PragmaVtab *pTab = (PragmaVtab*)pVtab;
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/* Figure out the best index to use to search a pragma virtual table.
+**
+** There are not really any index choices. But we want to encourage the
+** query planner to give == constraints on as many hidden parameters as
+** possible, and especially on the first hidden parameter. So return a
+** high cost if hidden parameters are unconstrained.
+*/
+static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+ PragmaVtab *pTab = (PragmaVtab*)tab;
+ const struct sqlite3_index_constraint *pConstraint;
+ int i, j;
+ int seen[2];
+
+ pIdxInfo->estimatedCost = (double)1;
+ if( pTab->nHidden==0 ){ return SQLITE_OK; }
+ pConstraint = pIdxInfo->aConstraint;
+ seen[0] = 0;
+ seen[1] = 0;
+ for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){
+ if( pConstraint->usable==0 ) continue;
+ if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
+ if( pConstraint->iColumn < pTab->iHidden ) continue;
+ j = pConstraint->iColumn - pTab->iHidden;
+ assert( j < 2 );
+ seen[j] = i+1;
+ }
+ if( seen[0]==0 ){
+ pIdxInfo->estimatedCost = (double)2147483647;
+ pIdxInfo->estimatedRows = 2147483647;
+ return SQLITE_OK;
+ }
+ j = seen[0]-1;
+ pIdxInfo->aConstraintUsage[j].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[j].omit = 1;
+ if( seen[1]==0 ) return SQLITE_OK;
+ pIdxInfo->estimatedCost = (double)20;
+ pIdxInfo->estimatedRows = 20;
+ j = seen[1]-1;
+ pIdxInfo->aConstraintUsage[j].argvIndex = 2;
+ pIdxInfo->aConstraintUsage[j].omit = 1;
+ return SQLITE_OK;
+}
+
+/* Create a new cursor for the pragma virtual table */
+static int pragmaVtabOpen(sqlite3_vtab *pVtab, sqlite3_vtab_cursor **ppCursor){
+ PragmaVtabCursor *pCsr;
+ pCsr = (PragmaVtabCursor*)sqlite3_malloc(sizeof(*pCsr));
+ if( pCsr==0 ) return SQLITE_NOMEM;
+ memset(pCsr, 0, sizeof(PragmaVtabCursor));
+ pCsr->base.pVtab = pVtab;
+ *ppCursor = &pCsr->base;
+ return SQLITE_OK;
+}
+
+/* Clear all content from pragma virtual table cursor. */
+static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){
+ int i;
+ sqlite3_finalize(pCsr->pPragma);
+ pCsr->pPragma = 0;
+ for(i=0; i<ArraySize(pCsr->azArg); i++){
+ sqlite3_free(pCsr->azArg[i]);
+ pCsr->azArg[i] = 0;
+ }
+}
+
+/* Close a pragma virtual table cursor */
+static int pragmaVtabClose(sqlite3_vtab_cursor *cur){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)cur;
+ pragmaVtabCursorClear(pCsr);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/* Advance the pragma virtual table cursor to the next row */
+static int pragmaVtabNext(sqlite3_vtab_cursor *pVtabCursor){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ int rc = SQLITE_OK;
+
+ /* Increment the xRowid value */
+ pCsr->iRowid++;
+ assert( pCsr->pPragma );
+ if( SQLITE_ROW!=sqlite3_step(pCsr->pPragma) ){
+ rc = sqlite3_finalize(pCsr->pPragma);
+ pCsr->pPragma = 0;
+ pragmaVtabCursorClear(pCsr);
+ }
+ return rc;
+}
+
+/*
+** Pragma virtual table module xFilter method.
+*/
+static int pragmaVtabFilter(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab);
+ int rc;
+ int i, j;
+ StrAccum acc;
+ char *zSql;
+
+ UNUSED_PARAMETER(idxNum);
+ UNUSED_PARAMETER(idxStr);
+ pragmaVtabCursorClear(pCsr);
+ j = (pTab->pName->mPragFlg & PragFlg_Result1)!=0 ? 0 : 1;
+ for(i=0; i<argc; i++, j++){
+ const char *zText = (const char*)sqlite3_value_text(argv[i]);
+ assert( j<ArraySize(pCsr->azArg) );
+ assert( pCsr->azArg[j]==0 );
+ if( zText ){
+ pCsr->azArg[j] = sqlite3_mprintf("%s", zText);
+ if( pCsr->azArg[j]==0 ){
+ return SQLITE_NOMEM;
+ }
+ }
+ }
+ sqlite3StrAccumInit(&acc, 0, 0, 0, pTab->db->aLimit[SQLITE_LIMIT_SQL_LENGTH]);
+ sqlite3_str_appendall(&acc, "PRAGMA ");
+ if( pCsr->azArg[1] ){
+ sqlite3_str_appendf(&acc, "%Q.", pCsr->azArg[1]);
+ }
+ sqlite3_str_appendall(&acc, pTab->pName->zName);
+ if( pCsr->azArg[0] ){
+ sqlite3_str_appendf(&acc, "=%Q", pCsr->azArg[0]);
+ }
+ zSql = sqlite3StrAccumFinish(&acc);
+ if( zSql==0 ) return SQLITE_NOMEM;
+ rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pPragma, 0);
+ sqlite3_free(zSql);
+ if( rc!=SQLITE_OK ){
+ pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db));
+ return rc;
+ }
+ return pragmaVtabNext(pVtabCursor);
+}
+
+/*
+** Pragma virtual table module xEof method.
+*/
+static int pragmaVtabEof(sqlite3_vtab_cursor *pVtabCursor){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ return (pCsr->pPragma==0);
+}
+
+/* The xColumn method simply returns the corresponding column from
+** the PRAGMA.
+*/
+static int pragmaVtabColumn(
+ sqlite3_vtab_cursor *pVtabCursor,
+ sqlite3_context *ctx,
+ int i
+){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab);
+ if( i<pTab->iHidden ){
+ sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pPragma, i));
+ }else{
+ sqlite3_result_text(ctx, pCsr->azArg[i-pTab->iHidden],-1,SQLITE_TRANSIENT);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Pragma virtual table module xRowid method.
+*/
+static int pragmaVtabRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *p){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ *p = pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/* The pragma virtual table object */
+static const sqlite3_module pragmaVtabModule = {
+ 0, /* iVersion */
+ 0, /* xCreate - create a table */
+ pragmaVtabConnect, /* xConnect - connect to an existing table */
+ pragmaVtabBestIndex, /* xBestIndex - Determine search strategy */
+ pragmaVtabDisconnect, /* xDisconnect - Disconnect from a table */
+ 0, /* xDestroy - Drop a table */
+ pragmaVtabOpen, /* xOpen - open a cursor */
+ pragmaVtabClose, /* xClose - close a cursor */
+ pragmaVtabFilter, /* xFilter - configure scan constraints */
+ pragmaVtabNext, /* xNext - advance a cursor */
+ pragmaVtabEof, /* xEof */
+ pragmaVtabColumn, /* xColumn - read data */
+ pragmaVtabRowid, /* xRowid - read data */
+ 0, /* xUpdate - write data */
+ 0, /* xBegin - begin transaction */
+ 0, /* xSync - sync transaction */
+ 0, /* xCommit - commit transaction */
+ 0, /* xRollback - rollback transaction */
+ 0, /* xFindFunction - function overloading */
+ 0, /* xRename - rename the table */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+};
+
+/*
+** Check to see if zTabName is really the name of a pragma. If it is,
+** then register an eponymous virtual table for that pragma and return
+** a pointer to the Module object for the new virtual table.
+*/
+SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName){
+ const PragmaName *pName;
+ assert( sqlite3_strnicmp(zName, "pragma_", 7)==0 );
+ pName = pragmaLocate(zName+7);
+ if( pName==0 ) return 0;
+ if( (pName->mPragFlg & (PragFlg_Result0|PragFlg_Result1))==0 ) return 0;
+ assert( sqlite3HashFind(&db->aModule, zName)==0 );
+ return sqlite3VtabCreateModule(db, zName, &pragmaVtabModule, (void*)pName, 0);
+}
+
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#endif /* SQLITE_OMIT_PRAGMA */
+
+/************** End of pragma.c **********************************************/
+/************** Begin file prepare.c *****************************************/
+/*
+** 2005 May 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the implementation of the sqlite3_prepare()
+** interface, and routines that contribute to loading the database schema
+** from disk.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** Fill the InitData structure with an error message that indicates
+** that the database is corrupt.
+*/
+static void corruptSchema(
+ InitData *pData, /* Initialization context */
+ const char *zObj, /* Object being parsed at the point of error */
+ const char *zExtra /* Error information */
+){
+ sqlite3 *db = pData->db;
+ if( db->mallocFailed ){
+ pData->rc = SQLITE_NOMEM_BKPT;
+ }else if( pData->pzErrMsg[0]!=0 ){
+ /* A error message has already been generated. Do not overwrite it */
+ }else if( pData->mInitFlags & INITFLAG_AlterTable ){
+ *pData->pzErrMsg = sqlite3DbStrDup(db, zExtra);
+ pData->rc = SQLITE_ERROR;
+ }else if( db->flags & SQLITE_WriteSchema ){
+ pData->rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ char *z;
+ if( zObj==0 ) zObj = "?";
+ z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj);
+ if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra);
+ *pData->pzErrMsg = z;
+ pData->rc = SQLITE_CORRUPT_BKPT;
+ }
+}
+
+/*
+** Check to see if any sibling index (another index on the same table)
+** of pIndex has the same root page number, and if it does, return true.
+** This would indicate a corrupt schema.
+*/
+SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index *pIndex){
+ Index *p;
+ for(p=pIndex->pTable->pIndex; p; p=p->pNext){
+ if( p->tnum==pIndex->tnum && p!=pIndex ) return 1;
+ }
+ return 0;
+}
+
+/* forward declaration */
+static int sqlite3Prepare(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ Vdbe *pReprepare, /* VM being reprepared */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+);
+
+
+/*
+** This is the callback routine for the code that initializes the
+** database. See sqlite3Init() below for additional information.
+** This routine is also called from the OP_ParseSchema opcode of the VDBE.
+**
+** Each callback contains the following information:
+**
+** argv[0] = type of object: "table", "index", "trigger", or "view".
+** argv[1] = name of thing being created
+** argv[2] = associated table if an index or trigger
+** argv[3] = root page number for table or index. 0 for trigger or view.
+** argv[4] = SQL text for the CREATE statement.
+**
+*/
+SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){
+ InitData *pData = (InitData*)pInit;
+ sqlite3 *db = pData->db;
+ int iDb = pData->iDb;
+
+ assert( argc==5 );
+ UNUSED_PARAMETER2(NotUsed, argc);
+ assert( sqlite3_mutex_held(db->mutex) );
+ db->mDbFlags |= DBFLAG_EncodingFixed;
+ pData->nInitRow++;
+ if( db->mallocFailed ){
+ corruptSchema(pData, argv[1], 0);
+ return 1;
+ }
+
+ assert( iDb>=0 && iDb<db->nDb );
+ if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */
+ if( argv[3]==0 ){
+ corruptSchema(pData, argv[1], 0);
+ }else if( sqlite3_strnicmp(argv[4],"create ",7)==0 ){
+ /* Call the parser to process a CREATE TABLE, INDEX or VIEW.
+ ** But because db->init.busy is set to 1, no VDBE code is generated
+ ** or executed. All the parser does is build the internal data
+ ** structures that describe the table, index, or view.
+ */
+ int rc;
+ u8 saved_iDb = db->init.iDb;
+ sqlite3_stmt *pStmt;
+ TESTONLY(int rcp); /* Return code from sqlite3_prepare() */
+
+ assert( db->init.busy );
+ db->init.iDb = iDb;
+ if( sqlite3GetUInt32(argv[3], &db->init.newTnum)==0
+ || (db->init.newTnum>pData->mxPage && pData->mxPage>0)
+ ){
+ if( sqlite3Config.bExtraSchemaChecks ){
+ corruptSchema(pData, argv[1], "invalid rootpage");
+ }
+ }
+ db->init.orphanTrigger = 0;
+ db->init.azInit = argv;
+ pStmt = 0;
+ TESTONLY(rcp = ) sqlite3Prepare(db, argv[4], -1, 0, 0, &pStmt, 0);
+ rc = db->errCode;
+ assert( (rc&0xFF)==(rcp&0xFF) );
+ db->init.iDb = saved_iDb;
+ /* assert( saved_iDb==0 || (db->mDbFlags & DBFLAG_Vacuum)!=0 ); */
+ if( SQLITE_OK!=rc ){
+ if( db->init.orphanTrigger ){
+ assert( iDb==1 );
+ }else{
+ if( rc > pData->rc ) pData->rc = rc;
+ if( rc==SQLITE_NOMEM ){
+ sqlite3OomFault(db);
+ }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){
+ corruptSchema(pData, argv[1], sqlite3_errmsg(db));
+ }
+ }
+ }
+ sqlite3_finalize(pStmt);
+ }else if( argv[1]==0 || (argv[4]!=0 && argv[4][0]!=0) ){
+ corruptSchema(pData, argv[1], 0);
+ }else{
+ /* If the SQL column is blank it means this is an index that
+ ** was created to be the PRIMARY KEY or to fulfill a UNIQUE
+ ** constraint for a CREATE TABLE. The index should have already
+ ** been created when we processed the CREATE TABLE. All we have
+ ** to do here is record the root page number for that index.
+ */
+ Index *pIndex;
+ pIndex = sqlite3FindIndex(db, argv[1], db->aDb[iDb].zDbSName);
+ if( pIndex==0 ){
+ corruptSchema(pData, argv[1], "orphan index");
+ }else
+ if( sqlite3GetUInt32(argv[3],&pIndex->tnum)==0
+ || pIndex->tnum<2
+ || pIndex->tnum>pData->mxPage
+ || sqlite3IndexHasDuplicateRootPage(pIndex)
+ ){
+ if( sqlite3Config.bExtraSchemaChecks ){
+ corruptSchema(pData, argv[1], "invalid rootpage");
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Attempt to read the database schema and initialize internal
+** data structures for a single database file. The index of the
+** database file is given by iDb. iDb==0 is used for the main
+** database. iDb==1 should never be used. iDb>=2 is used for
+** auxiliary databases. Return one of the SQLITE_ error codes to
+** indicate success or failure.
+*/
+SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFlags){
+ int rc;
+ int i;
+#ifndef SQLITE_OMIT_DEPRECATED
+ int size;
+#endif
+ Db *pDb;
+ char const *azArg[6];
+ int meta[5];
+ InitData initData;
+ const char *zSchemaTabName;
+ int openedTransaction = 0;
+ int mask = ((db->mDbFlags & DBFLAG_EncodingFixed) | ~DBFLAG_EncodingFixed);
+
+ assert( (db->mDbFlags & DBFLAG_SchemaKnownOk)==0 );
+ assert( iDb>=0 && iDb<db->nDb );
+ assert( db->aDb[iDb].pSchema );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) );
+
+ db->init.busy = 1;
+
+ /* Construct the in-memory representation schema tables (sqlite_schema or
+ ** sqlite_temp_schema) by invoking the parser directly. The appropriate
+ ** table name will be inserted automatically by the parser so we can just
+ ** use the abbreviation "x" here. The parser will also automatically tag
+ ** the schema table as read-only. */
+ azArg[0] = "table";
+ azArg[1] = zSchemaTabName = SCHEMA_TABLE(iDb);
+ azArg[2] = azArg[1];
+ azArg[3] = "1";
+ azArg[4] = "CREATE TABLE x(type text,name text,tbl_name text,"
+ "rootpage int,sql text)";
+ azArg[5] = 0;
+ initData.db = db;
+ initData.iDb = iDb;
+ initData.rc = SQLITE_OK;
+ initData.pzErrMsg = pzErrMsg;
+ initData.mInitFlags = mFlags;
+ initData.nInitRow = 0;
+ initData.mxPage = 0;
+ sqlite3InitCallback(&initData, 5, (char **)azArg, 0);
+ db->mDbFlags &= mask;
+ if( initData.rc ){
+ rc = initData.rc;
+ goto error_out;
+ }
+
+ /* Create a cursor to hold the database open
+ */
+ pDb = &db->aDb[iDb];
+ if( pDb->pBt==0 ){
+ assert( iDb==1 );
+ DbSetProperty(db, 1, DB_SchemaLoaded);
+ rc = SQLITE_OK;
+ goto error_out;
+ }
+
+ /* If there is not already a read-only (or read-write) transaction opened
+ ** on the b-tree database, open one now. If a transaction is opened, it
+ ** will be closed before this function returns. */
+ sqlite3BtreeEnter(pDb->pBt);
+ if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){
+ rc = sqlite3BtreeBeginTrans(pDb->pBt, 0, 0);
+ if( rc!=SQLITE_OK ){
+ sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc));
+ goto initone_error_out;
+ }
+ openedTransaction = 1;
+ }
+
+ /* Get the database meta information.
+ **
+ ** Meta values are as follows:
+ ** meta[0] Schema cookie. Changes with each schema change.
+ ** meta[1] File format of schema layer.
+ ** meta[2] Size of the page cache.
+ ** meta[3] Largest rootpage (auto/incr_vacuum mode)
+ ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE
+ ** meta[5] User version
+ ** meta[6] Incremental vacuum mode
+ ** meta[7] unused
+ ** meta[8] unused
+ ** meta[9] unused
+ **
+ ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to
+ ** the possible values of meta[4].
+ */
+ for(i=0; i<ArraySize(meta); i++){
+ sqlite3BtreeGetMeta(pDb->pBt, i+1, (u32 *)&meta[i]);
+ }
+ if( (db->flags & SQLITE_ResetDatabase)!=0 ){
+ memset(meta, 0, sizeof(meta));
+ }
+ pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1];
+
+ /* If opening a non-empty database, check the text encoding. For the
+ ** main database, set sqlite3.enc to the encoding of the main database.
+ ** For an attached db, it is an error if the encoding is not the same
+ ** as sqlite3.enc.
+ */
+ if( meta[BTREE_TEXT_ENCODING-1] ){ /* text encoding */
+ if( iDb==0 && (db->mDbFlags & DBFLAG_EncodingFixed)==0 ){
+ u8 encoding;
+#ifndef SQLITE_OMIT_UTF16
+ /* If opening the main database, set ENC(db). */
+ encoding = (u8)meta[BTREE_TEXT_ENCODING-1] & 3;
+ if( encoding==0 ) encoding = SQLITE_UTF8;
+#else
+ encoding = SQLITE_UTF8;
+#endif
+ sqlite3SetTextEncoding(db, encoding);
+ }else{
+ /* If opening an attached database, the encoding much match ENC(db) */
+ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){
+ sqlite3SetString(pzErrMsg, db, "attached databases must use the same"
+ " text encoding as main database");
+ rc = SQLITE_ERROR;
+ goto initone_error_out;
+ }
+ }
+ }
+ pDb->pSchema->enc = ENC(db);
+
+ if( pDb->pSchema->cache_size==0 ){
+#ifndef SQLITE_OMIT_DEPRECATED
+ size = sqlite3AbsInt32(meta[BTREE_DEFAULT_CACHE_SIZE-1]);
+ if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; }
+ pDb->pSchema->cache_size = size;
+#else
+ pDb->pSchema->cache_size = SQLITE_DEFAULT_CACHE_SIZE;
+#endif
+ sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size);
+ }
+
+ /*
+ ** file_format==1 Version 3.0.0.
+ ** file_format==2 Version 3.1.3. // ALTER TABLE ADD COLUMN
+ ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults
+ ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants
+ */
+ pDb->pSchema->file_format = (u8)meta[BTREE_FILE_FORMAT-1];
+ if( pDb->pSchema->file_format==0 ){
+ pDb->pSchema->file_format = 1;
+ }
+ if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){
+ sqlite3SetString(pzErrMsg, db, "unsupported file format");
+ rc = SQLITE_ERROR;
+ goto initone_error_out;
+ }
+
+ /* Ticket #2804: When we open a database in the newer file format,
+ ** clear the legacy_file_format pragma flag so that a VACUUM will
+ ** not downgrade the database and thus invalidate any descending
+ ** indices that the user might have created.
+ */
+ if( iDb==0 && meta[BTREE_FILE_FORMAT-1]>=4 ){
+ db->flags &= ~(u64)SQLITE_LegacyFileFmt;
+ }
+
+ /* Read the schema information out of the schema tables
+ */
+ assert( db->init.busy );
+ initData.mxPage = sqlite3BtreeLastPage(pDb->pBt);
+ {
+ char *zSql;
+ zSql = sqlite3MPrintf(db,
+ "SELECT*FROM\"%w\".%s ORDER BY rowid",
+ db->aDb[iDb].zDbSName, zSchemaTabName);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ sqlite3_xauth xAuth;
+ xAuth = db->xAuth;
+ db->xAuth = 0;
+#endif
+ rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0);
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+ }
+#endif
+ if( rc==SQLITE_OK ) rc = initData.rc;
+ sqlite3DbFree(db, zSql);
+#ifndef SQLITE_OMIT_ANALYZE
+ if( rc==SQLITE_OK ){
+ sqlite3AnalysisLoad(db, iDb);
+ }
+#endif
+ }
+ if( db->mallocFailed ){
+ rc = SQLITE_NOMEM_BKPT;
+ sqlite3ResetAllSchemasOfConnection(db);
+ }
+ if( rc==SQLITE_OK || (db->flags&SQLITE_NoSchemaError)){
+ /* Black magic: If the SQLITE_NoSchemaError flag is set, then consider
+ ** the schema loaded, even if errors occurred. In this situation the
+ ** current sqlite3_prepare() operation will fail, but the following one
+ ** will attempt to compile the supplied statement against whatever subset
+ ** of the schema was loaded before the error occurred. The primary
+ ** purpose of this is to allow access to the sqlite_schema table
+ ** even when its contents have been corrupted.
+ */
+ DbSetProperty(db, iDb, DB_SchemaLoaded);
+ rc = SQLITE_OK;
+ }
+
+ /* Jump here for an error that occurs after successfully allocating
+ ** curMain and calling sqlite3BtreeEnter(). For an error that occurs
+ ** before that point, jump to error_out.
+ */
+initone_error_out:
+ if( openedTransaction ){
+ sqlite3BtreeCommit(pDb->pBt);
+ }
+ sqlite3BtreeLeave(pDb->pBt);
+
+error_out:
+ if( rc ){
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){
+ sqlite3OomFault(db);
+ }
+ sqlite3ResetOneSchema(db, iDb);
+ }
+ db->init.busy = 0;
+ return rc;
+}
+
+/*
+** Initialize all database files - the main database file, the file
+** used to store temporary tables, and any additional database files
+** created using ATTACH statements. Return a success code. If an
+** error occurs, write an error message into *pzErrMsg.
+**
+** After a database is initialized, the DB_SchemaLoaded bit is set
+** bit is set in the flags field of the Db structure.
+*/
+SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){
+ int i, rc;
+ int commit_internal = !(db->mDbFlags&DBFLAG_SchemaChange);
+
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( sqlite3BtreeHoldsMutex(db->aDb[0].pBt) );
+ assert( db->init.busy==0 );
+ ENC(db) = SCHEMA_ENC(db);
+ assert( db->nDb>0 );
+ /* Do the main schema first */
+ if( !DbHasProperty(db, 0, DB_SchemaLoaded) ){
+ rc = sqlite3InitOne(db, 0, pzErrMsg, 0);
+ if( rc ) return rc;
+ }
+ /* All other schemas after the main schema. The "temp" schema must be last */
+ for(i=db->nDb-1; i>0; i--){
+ assert( i==1 || sqlite3BtreeHoldsMutex(db->aDb[i].pBt) );
+ if( !DbHasProperty(db, i, DB_SchemaLoaded) ){
+ rc = sqlite3InitOne(db, i, pzErrMsg, 0);
+ if( rc ) return rc;
+ }
+ }
+ if( commit_internal ){
+ sqlite3CommitInternalChanges(db);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** This routine is a no-op if the database schema is already initialized.
+** Otherwise, the schema is loaded. An error code is returned.
+*/
+SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse){
+ int rc = SQLITE_OK;
+ sqlite3 *db = pParse->db;
+ assert( sqlite3_mutex_held(db->mutex) );
+ if( !db->init.busy ){
+ rc = sqlite3Init(db, &pParse->zErrMsg);
+ if( rc!=SQLITE_OK ){
+ pParse->rc = rc;
+ pParse->nErr++;
+ }else if( db->noSharedCache ){
+ db->mDbFlags |= DBFLAG_SchemaKnownOk;
+ }
+ }
+ return rc;
+}
+
+
+/*
+** Check schema cookies in all databases. If any cookie is out
+** of date set pParse->rc to SQLITE_SCHEMA. If all schema cookies
+** make no changes to pParse->rc.
+*/
+static void schemaIsValid(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ int iDb;
+ int rc;
+ int cookie;
+
+ assert( pParse->checkSchema );
+ assert( sqlite3_mutex_held(db->mutex) );
+ for(iDb=0; iDb<db->nDb; iDb++){
+ int openedTransaction = 0; /* True if a transaction is opened */
+ Btree *pBt = db->aDb[iDb].pBt; /* Btree database to read cookie from */
+ if( pBt==0 ) continue;
+
+ /* If there is not already a read-only (or read-write) transaction opened
+ ** on the b-tree database, open one now. If a transaction is opened, it
+ ** will be closed immediately after reading the meta-value. */
+ if( !sqlite3BtreeIsInReadTrans(pBt) ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){
+ sqlite3OomFault(db);
+ }
+ if( rc!=SQLITE_OK ) return;
+ openedTransaction = 1;
+ }
+
+ /* Read the schema cookie from the database. If it does not match the
+ ** value stored as part of the in-memory schema representation,
+ ** set Parse.rc to SQLITE_SCHEMA. */
+ sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie);
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){
+ sqlite3ResetOneSchema(db, iDb);
+ pParse->rc = SQLITE_SCHEMA;
+ }
+
+ /* Close the transaction, if one was opened. */
+ if( openedTransaction ){
+ sqlite3BtreeCommit(pBt);
+ }
+ }
+}
+
+/*
+** Convert a schema pointer into the iDb index that indicates
+** which database file in db->aDb[] the schema refers to.
+**
+** If the same database is attached more than once, the first
+** attached database is returned.
+*/
+SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){
+ int i = -32768;
+
+ /* If pSchema is NULL, then return -32768. This happens when code in
+ ** expr.c is trying to resolve a reference to a transient table (i.e. one
+ ** created by a sub-select). In this case the return value of this
+ ** function should never be used.
+ **
+ ** We return -32768 instead of the more usual -1 simply because using
+ ** -32768 as the incorrect index into db->aDb[] is much
+ ** more likely to cause a segfault than -1 (of course there are assert()
+ ** statements too, but it never hurts to play the odds) and
+ ** -32768 will still fit into a 16-bit signed integer.
+ */
+ assert( sqlite3_mutex_held(db->mutex) );
+ if( pSchema ){
+ for(i=0; 1; i++){
+ assert( i<db->nDb );
+ if( db->aDb[i].pSchema==pSchema ){
+ break;
+ }
+ }
+ assert( i>=0 && i<db->nDb );
+ }
+ return i;
+}
+
+/*
+** Deallocate a single AggInfo object
+*/
+static void agginfoFree(sqlite3 *db, AggInfo *p){
+ sqlite3DbFree(db, p->aCol);
+ sqlite3DbFree(db, p->aFunc);
+ sqlite3DbFree(db, p);
+}
+
+/*
+** Free all memory allocations in the pParse object
+*/
+SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ AggInfo *pThis = pParse->pAggList;
+ while( pThis ){
+ AggInfo *pNext = pThis->pNext;
+ agginfoFree(db, pThis);
+ pThis = pNext;
+ }
+ sqlite3DbFree(db, pParse->aLabel);
+ sqlite3ExprListDelete(db, pParse->pConstExpr);
+ if( db ){
+ assert( db->lookaside.bDisable >= pParse->disableLookaside );
+ db->lookaside.bDisable -= pParse->disableLookaside;
+ db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue;
+ }
+ pParse->disableLookaside = 0;
+}
+
+/*
+** Compile the UTF-8 encoded SQL statement zSql into a statement handle.
+*/
+static int sqlite3Prepare(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ Vdbe *pReprepare, /* VM being reprepared */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+){
+ char *zErrMsg = 0; /* Error message */
+ int rc = SQLITE_OK; /* Result code */
+ int i; /* Loop counter */
+ Parse sParse; /* Parsing context */
+
+ memset(&sParse, 0, PARSE_HDR_SZ);
+ memset(PARSE_TAIL(&sParse), 0, PARSE_TAIL_SZ);
+ sParse.pReprepare = pReprepare;
+ assert( ppStmt && *ppStmt==0 );
+ /* assert( !db->mallocFailed ); // not true with SQLITE_USE_ALLOCA */
+ assert( sqlite3_mutex_held(db->mutex) );
+
+ /* For a long-term use prepared statement avoid the use of
+ ** lookaside memory.
+ */
+ if( prepFlags & SQLITE_PREPARE_PERSISTENT ){
+ sParse.disableLookaside++;
+ DisableLookaside;
+ }
+ sParse.disableVtab = (prepFlags & SQLITE_PREPARE_NO_VTAB)!=0;
+
+ /* Check to verify that it is possible to get a read lock on all
+ ** database schemas. The inability to get a read lock indicates that
+ ** some other database connection is holding a write-lock, which in
+ ** turn means that the other connection has made uncommitted changes
+ ** to the schema.
+ **
+ ** Were we to proceed and prepare the statement against the uncommitted
+ ** schema changes and if those schema changes are subsequently rolled
+ ** back and different changes are made in their place, then when this
+ ** prepared statement goes to run the schema cookie would fail to detect
+ ** the schema change. Disaster would follow.
+ **
+ ** This thread is currently holding mutexes on all Btrees (because
+ ** of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it
+ ** is not possible for another thread to start a new schema change
+ ** while this routine is running. Hence, we do not need to hold
+ ** locks on the schema, we just need to make sure nobody else is
+ ** holding them.
+ **
+ ** Note that setting READ_UNCOMMITTED overrides most lock detection,
+ ** but it does *not* override schema lock detection, so this all still
+ ** works even if READ_UNCOMMITTED is set.
+ */
+ if( !db->noSharedCache ){
+ for(i=0; i<db->nDb; i++) {
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ assert( sqlite3BtreeHoldsMutex(pBt) );
+ rc = sqlite3BtreeSchemaLocked(pBt);
+ if( rc ){
+ const char *zDb = db->aDb[i].zDbSName;
+ sqlite3ErrorWithMsg(db, rc, "database schema is locked: %s", zDb);
+ testcase( db->flags & SQLITE_ReadUncommit );
+ goto end_prepare;
+ }
+ }
+ }
+ }
+
+ sqlite3VtabUnlockList(db);
+
+ sParse.db = db;
+ if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){
+ char *zSqlCopy;
+ int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH];
+ testcase( nBytes==mxLen );
+ testcase( nBytes==mxLen+1 );
+ if( nBytes>mxLen ){
+ sqlite3ErrorWithMsg(db, SQLITE_TOOBIG, "statement too long");
+ rc = sqlite3ApiExit(db, SQLITE_TOOBIG);
+ goto end_prepare;
+ }
+ zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes);
+ if( zSqlCopy ){
+ sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg);
+ sParse.zTail = &zSql[sParse.zTail-zSqlCopy];
+ sqlite3DbFree(db, zSqlCopy);
+ }else{
+ sParse.zTail = &zSql[nBytes];
+ }
+ }else{
+ sqlite3RunParser(&sParse, zSql, &zErrMsg);
+ }
+ assert( 0==sParse.nQueryLoop );
+
+ if( sParse.rc==SQLITE_DONE ){
+ sParse.rc = SQLITE_OK;
+ }
+ if( sParse.checkSchema ){
+ schemaIsValid(&sParse);
+ }
+ if( pzTail ){
+ *pzTail = sParse.zTail;
+ }
+
+ if( db->init.busy==0 ){
+ sqlite3VdbeSetSql(sParse.pVdbe, zSql, (int)(sParse.zTail-zSql), prepFlags);
+ }
+ if( db->mallocFailed ){
+ sParse.rc = SQLITE_NOMEM_BKPT;
+ }
+ rc = sParse.rc;
+ if( rc!=SQLITE_OK ){
+ if( sParse.pVdbe ) sqlite3VdbeFinalize(sParse.pVdbe);
+ assert(!(*ppStmt));
+ }else{
+ *ppStmt = (sqlite3_stmt*)sParse.pVdbe;
+ }
+
+ if( zErrMsg ){
+ sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg);
+ sqlite3DbFree(db, zErrMsg);
+ }else{
+ sqlite3Error(db, rc);
+ }
+
+ /* Delete any TriggerPrg structures allocated while parsing this statement. */
+ while( sParse.pTriggerPrg ){
+ TriggerPrg *pT = sParse.pTriggerPrg;
+ sParse.pTriggerPrg = pT->pNext;
+ sqlite3DbFree(db, pT);
+ }
+
+end_prepare:
+
+ sqlite3ParserReset(&sParse);
+ return rc;
+}
+static int sqlite3LockAndPrepare(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ Vdbe *pOld, /* VM being reprepared */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ int cnt = 0;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( ppStmt==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ *ppStmt = 0;
+ if( !sqlite3SafetyCheckOk(db)||zSql==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3BtreeEnterAll(db);
+ do{
+ /* Make multiple attempts to compile the SQL, until it either succeeds
+ ** or encounters a permanent error. A schema problem after one schema
+ ** reset is considered a permanent error. */
+ rc = sqlite3Prepare(db, zSql, nBytes, prepFlags, pOld, ppStmt, pzTail);
+ assert( rc==SQLITE_OK || *ppStmt==0 );
+ }while( rc==SQLITE_ERROR_RETRY
+ || (rc==SQLITE_SCHEMA && (sqlite3ResetOneSchema(db,-1), cnt++)==0) );
+ sqlite3BtreeLeaveAll(db);
+ rc = sqlite3ApiExit(db, rc);
+ assert( (rc&db->errMask)==rc );
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+
+/*
+** Rerun the compilation of a statement after a schema change.
+**
+** If the statement is successfully recompiled, return SQLITE_OK. Otherwise,
+** if the statement cannot be recompiled because another connection has
+** locked the sqlite3_schema table, return SQLITE_LOCKED. If any other error
+** occurs, return SQLITE_SCHEMA.
+*/
+SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){
+ int rc;
+ sqlite3_stmt *pNew;
+ const char *zSql;
+ sqlite3 *db;
+ u8 prepFlags;
+
+ assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) );
+ zSql = sqlite3_sql((sqlite3_stmt *)p);
+ assert( zSql!=0 ); /* Reprepare only called for prepare_v2() statements */
+ db = sqlite3VdbeDb(p);
+ assert( sqlite3_mutex_held(db->mutex) );
+ prepFlags = sqlite3VdbePrepareFlags(p);
+ rc = sqlite3LockAndPrepare(db, zSql, -1, prepFlags, p, &pNew, 0);
+ if( rc ){
+ if( rc==SQLITE_NOMEM ){
+ sqlite3OomFault(db);
+ }
+ assert( pNew==0 );
+ return rc;
+ }else{
+ assert( pNew!=0 );
+ }
+ sqlite3VdbeSwap((Vdbe*)pNew, p);
+ sqlite3TransferBindings(pNew, (sqlite3_stmt*)p);
+ sqlite3VdbeResetStepResult((Vdbe*)pNew);
+ sqlite3VdbeFinalize((Vdbe*)pNew);
+ return SQLITE_OK;
+}
+
+
+/*
+** Two versions of the official API. Legacy and new use. In the legacy
+** version, the original SQL text is not saved in the prepared statement
+** and so if a schema change occurs, SQLITE_SCHEMA is returned by
+** sqlite3_step(). In the new version, the original SQL text is retained
+** and the statement is automatically recompiled if an schema change
+** occurs.
+*/
+SQLITE_API int sqlite3_prepare(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ rc = sqlite3LockAndPrepare(db,zSql,nBytes,0,0,ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
+ return rc;
+}
+SQLITE_API int sqlite3_prepare_v2(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ /* EVIDENCE-OF: R-37923-12173 The sqlite3_prepare_v2() interface works
+ ** exactly the same as sqlite3_prepare_v3() with a zero prepFlags
+ ** parameter.
+ **
+ ** Proof in that the 5th parameter to sqlite3LockAndPrepare is 0 */
+ rc = sqlite3LockAndPrepare(db,zSql,nBytes,SQLITE_PREPARE_SAVESQL,0,
+ ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 );
+ return rc;
+}
+SQLITE_API int sqlite3_prepare_v3(
+ sqlite3 *db, /* Database handle. */
+ const char *zSql, /* UTF-8 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const char **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ /* EVIDENCE-OF: R-56861-42673 sqlite3_prepare_v3() differs from
+ ** sqlite3_prepare_v2() only in having the extra prepFlags parameter,
+ ** which is a bit array consisting of zero or more of the
+ ** SQLITE_PREPARE_* flags.
+ **
+ ** Proof by comparison to the implementation of sqlite3_prepare_v2()
+ ** directly above. */
+ rc = sqlite3LockAndPrepare(db,zSql,nBytes,
+ SQLITE_PREPARE_SAVESQL|(prepFlags&SQLITE_PREPARE_MASK),
+ 0,ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 );
+ return rc;
+}
+
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Compile the UTF-16 encoded SQL statement zSql into a statement handle.
+*/
+static int sqlite3Prepare16(
+ sqlite3 *db, /* Database handle. */
+ const void *zSql, /* UTF-16 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const void **pzTail /* OUT: End of parsed string */
+){
+ /* This function currently works by first transforming the UTF-16
+ ** encoded string to UTF-8, then invoking sqlite3_prepare(). The
+ ** tricky bit is figuring out the pointer to return in *pzTail.
+ */
+ char *zSql8;
+ const char *zTail8 = 0;
+ int rc = SQLITE_OK;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( ppStmt==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ *ppStmt = 0;
+ if( !sqlite3SafetyCheckOk(db)||zSql==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( nBytes>=0 ){
+ int sz;
+ const char *z = (const char*)zSql;
+ for(sz=0; sz<nBytes && (z[sz]!=0 || z[sz+1]!=0); sz += 2){}
+ nBytes = sz;
+ }
+ sqlite3_mutex_enter(db->mutex);
+ zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE);
+ if( zSql8 ){
+ rc = sqlite3LockAndPrepare(db, zSql8, -1, prepFlags, 0, ppStmt, &zTail8);
+ }
+
+ if( zTail8 && pzTail ){
+ /* If sqlite3_prepare returns a tail pointer, we calculate the
+ ** equivalent pointer into the UTF-16 string by counting the unicode
+ ** characters between zSql8 and zTail8, and then returning a pointer
+ ** the same number of characters into the UTF-16 string.
+ */
+ int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8));
+ *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed);
+ }
+ sqlite3DbFree(db, zSql8);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Two versions of the official API. Legacy and new use. In the legacy
+** version, the original SQL text is not saved in the prepared statement
+** and so if a schema change occurs, SQLITE_SCHEMA is returned by
+** sqlite3_step(). In the new version, the original SQL text is retained
+** and the statement is automatically recompiled if an schema change
+** occurs.
+*/
+SQLITE_API int sqlite3_prepare16(
+ sqlite3 *db, /* Database handle. */
+ const void *zSql, /* UTF-16 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const void **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ rc = sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
+ return rc;
+}
+SQLITE_API int sqlite3_prepare16_v2(
+ sqlite3 *db, /* Database handle. */
+ const void *zSql, /* UTF-16 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const void **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ rc = sqlite3Prepare16(db,zSql,nBytes,SQLITE_PREPARE_SAVESQL,ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
+ return rc;
+}
+SQLITE_API int sqlite3_prepare16_v3(
+ sqlite3 *db, /* Database handle. */
+ const void *zSql, /* UTF-16 encoded SQL statement. */
+ int nBytes, /* Length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_* flags */
+ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
+ const void **pzTail /* OUT: End of parsed string */
+){
+ int rc;
+ rc = sqlite3Prepare16(db,zSql,nBytes,
+ SQLITE_PREPARE_SAVESQL|(prepFlags&SQLITE_PREPARE_MASK),
+ ppStmt,pzTail);
+ assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
+ return rc;
+}
+
+#endif /* SQLITE_OMIT_UTF16 */
+
+/************** End of prepare.c *********************************************/
+/************** Begin file select.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle SELECT statements in SQLite.
+*/
+/* #include "sqliteInt.h" */
+
+/*
+** An instance of the following object is used to record information about
+** how to process the DISTINCT keyword, to simplify passing that information
+** into the selectInnerLoop() routine.
+*/
+typedef struct DistinctCtx DistinctCtx;
+struct DistinctCtx {
+ u8 isTnct; /* True if the DISTINCT keyword is present */
+ u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */
+ int tabTnct; /* Ephemeral table used for DISTINCT processing */
+ int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */
+};
+
+/*
+** An instance of the following object is used to record information about
+** the ORDER BY (or GROUP BY) clause of query is being coded.
+**
+** The aDefer[] array is used by the sorter-references optimization. For
+** example, assuming there is no index that can be used for the ORDER BY,
+** for the query:
+**
+** SELECT a, bigblob FROM t1 ORDER BY a LIMIT 10;
+**
+** it may be more efficient to add just the "a" values to the sorter, and
+** retrieve the associated "bigblob" values directly from table t1 as the
+** 10 smallest "a" values are extracted from the sorter.
+**
+** When the sorter-reference optimization is used, there is one entry in the
+** aDefer[] array for each database table that may be read as values are
+** extracted from the sorter.
+*/
+typedef struct SortCtx SortCtx;
+struct SortCtx {
+ ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */
+ int nOBSat; /* Number of ORDER BY terms satisfied by indices */
+ int iECursor; /* Cursor number for the sorter */
+ int regReturn; /* Register holding block-output return address */
+ int labelBkOut; /* Start label for the block-output subroutine */
+ int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */
+ int labelDone; /* Jump here when done, ex: LIMIT reached */
+ int labelOBLopt; /* Jump here when sorter is full */
+ u8 sortFlags; /* Zero or more SORTFLAG_* bits */
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ u8 nDefer; /* Number of valid entries in aDefer[] */
+ struct DeferredCsr {
+ Table *pTab; /* Table definition */
+ int iCsr; /* Cursor number for table */
+ int nKey; /* Number of PK columns for table pTab (>=1) */
+ } aDefer[4];
+#endif
+ struct RowLoadInfo *pDeferredRowLoad; /* Deferred row loading info or NULL */
+};
+#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */
+
+/*
+** Delete all the content of a Select structure. Deallocate the structure
+** itself depending on the value of bFree
+**
+** If bFree==1, call sqlite3DbFree() on the p object.
+** If bFree==0, Leave the first Select object unfreed
+*/
+static void clearSelect(sqlite3 *db, Select *p, int bFree){
+ while( p ){
+ Select *pPrior = p->pPrior;
+ sqlite3ExprListDelete(db, p->pEList);
+ sqlite3SrcListDelete(db, p->pSrc);
+ sqlite3ExprDelete(db, p->pWhere);
+ sqlite3ExprListDelete(db, p->pGroupBy);
+ sqlite3ExprDelete(db, p->pHaving);
+ sqlite3ExprListDelete(db, p->pOrderBy);
+ sqlite3ExprDelete(db, p->pLimit);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( OK_IF_ALWAYS_TRUE(p->pWinDefn) ){
+ sqlite3WindowListDelete(db, p->pWinDefn);
+ }
+#endif
+ if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith);
+ if( bFree ) sqlite3DbFreeNN(db, p);
+ p = pPrior;
+ bFree = 1;
+ }
+}
+
+/*
+** Initialize a SelectDest structure.
+*/
+SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){
+ pDest->eDest = (u8)eDest;
+ pDest->iSDParm = iParm;
+ pDest->iSDParm2 = 0;
+ pDest->zAffSdst = 0;
+ pDest->iSdst = 0;
+ pDest->nSdst = 0;
+}
+
+
+/*
+** Allocate a new Select structure and return a pointer to that
+** structure.
+*/
+SQLITE_PRIVATE Select *sqlite3SelectNew(
+ Parse *pParse, /* Parsing context */
+ ExprList *pEList, /* which columns to include in the result */
+ SrcList *pSrc, /* the FROM clause -- which tables to scan */
+ Expr *pWhere, /* the WHERE clause */
+ ExprList *pGroupBy, /* the GROUP BY clause */
+ Expr *pHaving, /* the HAVING clause */
+ ExprList *pOrderBy, /* the ORDER BY clause */
+ u32 selFlags, /* Flag parameters, such as SF_Distinct */
+ Expr *pLimit /* LIMIT value. NULL means not used */
+){
+ Select *pNew, *pAllocated;
+ Select standin;
+ pAllocated = pNew = sqlite3DbMallocRawNN(pParse->db, sizeof(*pNew) );
+ if( pNew==0 ){
+ assert( pParse->db->mallocFailed );
+ pNew = &standin;
+ }
+ if( pEList==0 ){
+ pEList = sqlite3ExprListAppend(pParse, 0,
+ sqlite3Expr(pParse->db,TK_ASTERISK,0));
+ }
+ pNew->pEList = pEList;
+ pNew->op = TK_SELECT;
+ pNew->selFlags = selFlags;
+ pNew->iLimit = 0;
+ pNew->iOffset = 0;
+ pNew->selId = ++pParse->nSelect;
+ pNew->addrOpenEphm[0] = -1;
+ pNew->addrOpenEphm[1] = -1;
+ pNew->nSelectRow = 0;
+ if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*pSrc));
+ pNew->pSrc = pSrc;
+ pNew->pWhere = pWhere;
+ pNew->pGroupBy = pGroupBy;
+ pNew->pHaving = pHaving;
+ pNew->pOrderBy = pOrderBy;
+ pNew->pPrior = 0;
+ pNew->pNext = 0;
+ pNew->pLimit = pLimit;
+ pNew->pWith = 0;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ pNew->pWin = 0;
+ pNew->pWinDefn = 0;
+#endif
+ if( pParse->db->mallocFailed ) {
+ clearSelect(pParse->db, pNew, pNew!=&standin);
+ pAllocated = 0;
+ }else{
+ assert( pNew->pSrc!=0 || pParse->nErr>0 );
+ }
+ return pAllocated;
+}
+
+
+/*
+** Delete the given Select structure and all of its substructures.
+*/
+SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){
+ if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1);
+}
+
+/*
+** Return a pointer to the right-most SELECT statement in a compound.
+*/
+static Select *findRightmost(Select *p){
+ while( p->pNext ) p = p->pNext;
+ return p;
+}
+
+/*
+** Given 1 to 3 identifiers preceding the JOIN keyword, determine the
+** type of join. Return an integer constant that expresses that type
+** in terms of the following bit values:
+**
+** JT_INNER
+** JT_CROSS
+** JT_OUTER
+** JT_NATURAL
+** JT_LEFT
+** JT_RIGHT
+**
+** A full outer join is the combination of JT_LEFT and JT_RIGHT.
+**
+** If an illegal or unsupported join type is seen, then still return
+** a join type, but put an error in the pParse structure.
+*/
+SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){
+ int jointype = 0;
+ Token *apAll[3];
+ Token *p;
+ /* 0123456789 123456789 123456789 123 */
+ static const char zKeyText[] = "naturaleftouterightfullinnercross";
+ static const struct {
+ u8 i; /* Beginning of keyword text in zKeyText[] */
+ u8 nChar; /* Length of the keyword in characters */
+ u8 code; /* Join type mask */
+ } aKeyword[] = {
+ /* natural */ { 0, 7, JT_NATURAL },
+ /* left */ { 6, 4, JT_LEFT|JT_OUTER },
+ /* outer */ { 10, 5, JT_OUTER },
+ /* right */ { 14, 5, JT_RIGHT|JT_OUTER },
+ /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER },
+ /* inner */ { 23, 5, JT_INNER },
+ /* cross */ { 28, 5, JT_INNER|JT_CROSS },
+ };
+ int i, j;
+ apAll[0] = pA;
+ apAll[1] = pB;
+ apAll[2] = pC;
+ for(i=0; i<3 && apAll[i]; i++){
+ p = apAll[i];
+ for(j=0; j<ArraySize(aKeyword); j++){
+ if( p->n==aKeyword[j].nChar
+ && sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){
+ jointype |= aKeyword[j].code;
+ break;
+ }
+ }
+ testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 );
+ if( j>=ArraySize(aKeyword) ){
+ jointype |= JT_ERROR;
+ break;
+ }
+ }
+ if(
+ (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) ||
+ (jointype & JT_ERROR)!=0
+ ){
+ const char *zSp = " ";
+ assert( pB!=0 );
+ if( pC==0 ){ zSp++; }
+ sqlite3ErrorMsg(pParse, "unknown or unsupported join type: "
+ "%T %T%s%T", pA, pB, zSp, pC);
+ jointype = JT_INNER;
+ }else if( (jointype & JT_OUTER)!=0
+ && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){
+ sqlite3ErrorMsg(pParse,
+ "RIGHT and FULL OUTER JOINs are not currently supported");
+ jointype = JT_INNER;
+ }
+ return jointype;
+}
+
+/*
+** Return the index of a column in a table. Return -1 if the column
+** is not contained in the table.
+*/
+static int columnIndex(Table *pTab, const char *zCol){
+ int i;
+ u8 h = sqlite3StrIHash(zCol);
+ Column *pCol;
+ for(pCol=pTab->aCol, i=0; i<pTab->nCol; pCol++, i++){
+ if( pCol->hName==h && sqlite3StrICmp(pCol->zName, zCol)==0 ) return i;
+ }
+ return -1;
+}
+
+/*
+** Search the first N tables in pSrc, from left to right, looking for a
+** table that has a column named zCol.
+**
+** When found, set *piTab and *piCol to the table index and column index
+** of the matching column and return TRUE.
+**
+** If not found, return FALSE.
+*/
+static int tableAndColumnIndex(
+ SrcList *pSrc, /* Array of tables to search */
+ int N, /* Number of tables in pSrc->a[] to search */
+ const char *zCol, /* Name of the column we are looking for */
+ int *piTab, /* Write index of pSrc->a[] here */
+ int *piCol, /* Write index of pSrc->a[*piTab].pTab->aCol[] here */
+ int bIgnoreHidden /* True to ignore hidden columns */
+){
+ int i; /* For looping over tables in pSrc */
+ int iCol; /* Index of column matching zCol */
+
+ assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */
+ for(i=0; i<N; i++){
+ iCol = columnIndex(pSrc->a[i].pTab, zCol);
+ if( iCol>=0
+ && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0)
+ ){
+ if( piTab ){
+ *piTab = i;
+ *piCol = iCol;
+ }
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** This function is used to add terms implied by JOIN syntax to the
+** WHERE clause expression of a SELECT statement. The new term, which
+** is ANDed with the existing WHERE clause, is of the form:
+**
+** (tab1.col1 = tab2.col2)
+**
+** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the
+** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is
+** column iColRight of tab2.
+*/
+static void addWhereTerm(
+ Parse *pParse, /* Parsing context */
+ SrcList *pSrc, /* List of tables in FROM clause */
+ int iLeft, /* Index of first table to join in pSrc */
+ int iColLeft, /* Index of column in first table */
+ int iRight, /* Index of second table in pSrc */
+ int iColRight, /* Index of column in second table */
+ int isOuterJoin, /* True if this is an OUTER join */
+ Expr **ppWhere /* IN/OUT: The WHERE clause to add to */
+){
+ sqlite3 *db = pParse->db;
+ Expr *pE1;
+ Expr *pE2;
+ Expr *pEq;
+
+ assert( iLeft<iRight );
+ assert( pSrc->nSrc>iRight );
+ assert( pSrc->a[iLeft].pTab );
+ assert( pSrc->a[iRight].pTab );
+
+ pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft);
+ pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight);
+
+ pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2);
+ if( pEq && isOuterJoin ){
+ ExprSetProperty(pEq, EP_FromJoin);
+ assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) );
+ ExprSetVVAProperty(pEq, EP_NoReduce);
+ pEq->iRightJoinTable = (i16)pE2->iTable;
+ }
+ *ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq);
+}
+
+/*
+** Set the EP_FromJoin property on all terms of the given expression.
+** And set the Expr.iRightJoinTable to iTable for every term in the
+** expression.
+**
+** The EP_FromJoin property is used on terms of an expression to tell
+** the LEFT OUTER JOIN processing logic that this term is part of the
+** join restriction specified in the ON or USING clause and not a part
+** of the more general WHERE clause. These terms are moved over to the
+** WHERE clause during join processing but we need to remember that they
+** originated in the ON or USING clause.
+**
+** The Expr.iRightJoinTable tells the WHERE clause processing that the
+** expression depends on table iRightJoinTable even if that table is not
+** explicitly mentioned in the expression. That information is needed
+** for cases like this:
+**
+** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5
+**
+** The where clause needs to defer the handling of the t1.x=5
+** term until after the t2 loop of the join. In that way, a
+** NULL t2 row will be inserted whenever t1.x!=5. If we do not
+** defer the handling of t1.x=5, it will be processed immediately
+** after the t1 loop and rows with t1.x!=5 will never appear in
+** the output, which is incorrect.
+*/
+SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){
+ while( p ){
+ ExprSetProperty(p, EP_FromJoin);
+ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
+ ExprSetVVAProperty(p, EP_NoReduce);
+ p->iRightJoinTable = (i16)iTable;
+ if( p->op==TK_FUNCTION && p->x.pList ){
+ int i;
+ for(i=0; i<p->x.pList->nExpr; i++){
+ sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable);
+ }
+ }
+ sqlite3SetJoinExpr(p->pLeft, iTable);
+ p = p->pRight;
+ }
+}
+
+/* Undo the work of sqlite3SetJoinExpr(). In the expression p, convert every
+** term that is marked with EP_FromJoin and iRightJoinTable==iTable into
+** an ordinary term that omits the EP_FromJoin mark.
+**
+** This happens when a LEFT JOIN is simplified into an ordinary JOIN.
+*/
+static void unsetJoinExpr(Expr *p, int iTable){
+ while( p ){
+ if( ExprHasProperty(p, EP_FromJoin)
+ && (iTable<0 || p->iRightJoinTable==iTable) ){
+ ExprClearProperty(p, EP_FromJoin);
+ }
+ if( p->op==TK_FUNCTION && p->x.pList ){
+ int i;
+ for(i=0; i<p->x.pList->nExpr; i++){
+ unsetJoinExpr(p->x.pList->a[i].pExpr, iTable);
+ }
+ }
+ unsetJoinExpr(p->pLeft, iTable);
+ p = p->pRight;
+ }
+}
+
+/*
+** This routine processes the join information for a SELECT statement.
+** ON and USING clauses are converted into extra terms of the WHERE clause.
+** NATURAL joins also create extra WHERE clause terms.
+**
+** The terms of a FROM clause are contained in the Select.pSrc structure.
+** The left most table is the first entry in Select.pSrc. The right-most
+** table is the last entry. The join operator is held in the entry to
+** the left. Thus entry 0 contains the join operator for the join between
+** entries 0 and 1. Any ON or USING clauses associated with the join are
+** also attached to the left entry.
+**
+** This routine returns the number of errors encountered.
+*/
+static int sqliteProcessJoin(Parse *pParse, Select *p){
+ SrcList *pSrc; /* All tables in the FROM clause */
+ int i, j; /* Loop counters */
+ struct SrcList_item *pLeft; /* Left table being joined */
+ struct SrcList_item *pRight; /* Right table being joined */
+
+ pSrc = p->pSrc;
+ pLeft = &pSrc->a[0];
+ pRight = &pLeft[1];
+ for(i=0; i<pSrc->nSrc-1; i++, pRight++, pLeft++){
+ Table *pRightTab = pRight->pTab;
+ int isOuter;
+
+ if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue;
+ isOuter = (pRight->fg.jointype & JT_OUTER)!=0;
+
+ /* When the NATURAL keyword is present, add WHERE clause terms for
+ ** every column that the two tables have in common.
+ */
+ if( pRight->fg.jointype & JT_NATURAL ){
+ if( pRight->pOn || pRight->pUsing ){
+ sqlite3ErrorMsg(pParse, "a NATURAL join may not have "
+ "an ON or USING clause", 0);
+ return 1;
+ }
+ for(j=0; j<pRightTab->nCol; j++){
+ char *zName; /* Name of column in the right table */
+ int iLeft; /* Matching left table */
+ int iLeftCol; /* Matching column in the left table */
+
+ if( IsHiddenColumn(&pRightTab->aCol[j]) ) continue;
+ zName = pRightTab->aCol[j].zName;
+ if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 1) ){
+ addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j,
+ isOuter, &p->pWhere);
+ }
+ }
+ }
+
+ /* Disallow both ON and USING clauses in the same join
+ */
+ if( pRight->pOn && pRight->pUsing ){
+ sqlite3ErrorMsg(pParse, "cannot have both ON and USING "
+ "clauses in the same join");
+ return 1;
+ }
+
+ /* Add the ON clause to the end of the WHERE clause, connected by
+ ** an AND operator.
+ */
+ if( pRight->pOn ){
+ if( isOuter ) sqlite3SetJoinExpr(pRight->pOn, pRight->iCursor);
+ p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pRight->pOn);
+ pRight->pOn = 0;
+ }
+
+ /* Create extra terms on the WHERE clause for each column named
+ ** in the USING clause. Example: If the two tables to be joined are
+ ** A and B and the USING clause names X, Y, and Z, then add this
+ ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z
+ ** Report an error if any column mentioned in the USING clause is
+ ** not contained in both tables to be joined.
+ */
+ if( pRight->pUsing ){
+ IdList *pList = pRight->pUsing;
+ for(j=0; j<pList->nId; j++){
+ char *zName; /* Name of the term in the USING clause */
+ int iLeft; /* Table on the left with matching column name */
+ int iLeftCol; /* Column number of matching column on the left */
+ int iRightCol; /* Column number of matching column on the right */
+
+ zName = pList->a[j].zName;
+ iRightCol = columnIndex(pRightTab, zName);
+ if( iRightCol<0
+ || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 0)
+ ){
+ sqlite3ErrorMsg(pParse, "cannot join using column %s - column "
+ "not present in both tables", zName);
+ return 1;
+ }
+ addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol,
+ isOuter, &p->pWhere);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** An instance of this object holds information (beyond pParse and pSelect)
+** needed to load the next result row that is to be added to the sorter.
+*/
+typedef struct RowLoadInfo RowLoadInfo;
+struct RowLoadInfo {
+ int regResult; /* Store results in array of registers here */
+ u8 ecelFlags; /* Flag argument to ExprCodeExprList() */
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ ExprList *pExtra; /* Extra columns needed by sorter refs */
+ int regExtraResult; /* Where to load the extra columns */
+#endif
+};
+
+/*
+** This routine does the work of loading query data into an array of
+** registers so that it can be added to the sorter.
+*/
+static void innerLoopLoadRow(
+ Parse *pParse, /* Statement under construction */
+ Select *pSelect, /* The query being coded */
+ RowLoadInfo *pInfo /* Info needed to complete the row load */
+){
+ sqlite3ExprCodeExprList(pParse, pSelect->pEList, pInfo->regResult,
+ 0, pInfo->ecelFlags);
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( pInfo->pExtra ){
+ sqlite3ExprCodeExprList(pParse, pInfo->pExtra, pInfo->regExtraResult, 0, 0);
+ sqlite3ExprListDelete(pParse->db, pInfo->pExtra);
+ }
+#endif
+}
+
+/*
+** Code the OP_MakeRecord instruction that generates the entry to be
+** added into the sorter.
+**
+** Return the register in which the result is stored.
+*/
+static int makeSorterRecord(
+ Parse *pParse,
+ SortCtx *pSort,
+ Select *pSelect,
+ int regBase,
+ int nBase
+){
+ int nOBSat = pSort->nOBSat;
+ Vdbe *v = pParse->pVdbe;
+ int regOut = ++pParse->nMem;
+ if( pSort->pDeferredRowLoad ){
+ innerLoopLoadRow(pParse, pSelect, pSort->pDeferredRowLoad);
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regOut);
+ return regOut;
+}
+
+/*
+** Generate code that will push the record in registers regData
+** through regData+nData-1 onto the sorter.
+*/
+static void pushOntoSorter(
+ Parse *pParse, /* Parser context */
+ SortCtx *pSort, /* Information about the ORDER BY clause */
+ Select *pSelect, /* The whole SELECT statement */
+ int regData, /* First register holding data to be sorted */
+ int regOrigData, /* First register holding data before packing */
+ int nData, /* Number of elements in the regData data array */
+ int nPrefixReg /* No. of reg prior to regData available for use */
+){
+ Vdbe *v = pParse->pVdbe; /* Stmt under construction */
+ int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0);
+ int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */
+ int nBase = nExpr + bSeq + nData; /* Fields in sorter record */
+ int regBase; /* Regs for sorter record */
+ int regRecord = 0; /* Assembled sorter record */
+ int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */
+ int op; /* Opcode to add sorter record to sorter */
+ int iLimit; /* LIMIT counter */
+ int iSkip = 0; /* End of the sorter insert loop */
+
+ assert( bSeq==0 || bSeq==1 );
+
+ /* Three cases:
+ ** (1) The data to be sorted has already been packed into a Record
+ ** by a prior OP_MakeRecord. In this case nData==1 and regData
+ ** will be completely unrelated to regOrigData.
+ ** (2) All output columns are included in the sort record. In that
+ ** case regData==regOrigData.
+ ** (3) Some output columns are omitted from the sort record due to
+ ** the SQLITE_ENABLE_SORTER_REFERENCE optimization, or due to the
+ ** SQLITE_ECEL_OMITREF optimization, or due to the
+ ** SortCtx.pDeferredRowLoad optimiation. In any of these cases
+ ** regOrigData is 0 to prevent this routine from trying to copy
+ ** values that might not yet exist.
+ */
+ assert( nData==1 || regData==regOrigData || regOrigData==0 );
+
+ if( nPrefixReg ){
+ assert( nPrefixReg==nExpr+bSeq );
+ regBase = regData - nPrefixReg;
+ }else{
+ regBase = pParse->nMem + 1;
+ pParse->nMem += nBase;
+ }
+ assert( pSelect->iOffset==0 || pSelect->iLimit!=0 );
+ iLimit = pSelect->iOffset ? pSelect->iOffset+1 : pSelect->iLimit;
+ pSort->labelDone = sqlite3VdbeMakeLabel(pParse);
+ sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData,
+ SQLITE_ECEL_DUP | (regOrigData? SQLITE_ECEL_REF : 0));
+ if( bSeq ){
+ sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr);
+ }
+ if( nPrefixReg==0 && nData>0 ){
+ sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData);
+ }
+ if( nOBSat>0 ){
+ int regPrevKey; /* The first nOBSat columns of the previous row */
+ int addrFirst; /* Address of the OP_IfNot opcode */
+ int addrJmp; /* Address of the OP_Jump opcode */
+ VdbeOp *pOp; /* Opcode that opens the sorter */
+ int nKey; /* Number of sorting key columns, including OP_Sequence */
+ KeyInfo *pKI; /* Original KeyInfo on the sorter table */
+
+ regRecord = makeSorterRecord(pParse, pSort, pSelect, regBase, nBase);
+ regPrevKey = pParse->nMem+1;
+ pParse->nMem += pSort->nOBSat;
+ nKey = nExpr - pSort->nOBSat + bSeq;
+ if( bSeq ){
+ addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr);
+ }else{
+ addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor);
+ }
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat);
+ pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex);
+ if( pParse->db->mallocFailed ) return;
+ pOp->p2 = nKey + nData;
+ pKI = pOp->p4.pKeyInfo;
+ memset(pKI->aSortFlags, 0, pKI->nKeyField); /* Makes OP_Jump testable */
+ sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO);
+ testcase( pKI->nAllField > pKI->nKeyField+2 );
+ pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat,
+ pKI->nAllField-pKI->nKeyField-1);
+ pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */
+ addrJmp = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
+ pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse);
+ pSort->regReturn = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
+ sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor);
+ if( iLimit ){
+ sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, pSort->labelDone);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeJumpHere(v, addrFirst);
+ sqlite3ExprCodeMove(pParse, regBase, regPrevKey, pSort->nOBSat);
+ sqlite3VdbeJumpHere(v, addrJmp);
+ }
+ if( iLimit ){
+ /* At this point the values for the new sorter entry are stored
+ ** in an array of registers. They need to be composed into a record
+ ** and inserted into the sorter if either (a) there are currently
+ ** less than LIMIT+OFFSET items or (b) the new record is smaller than
+ ** the largest record currently in the sorter. If (b) is true and there
+ ** are already LIMIT+OFFSET items in the sorter, delete the largest
+ ** entry before inserting the new one. This way there are never more
+ ** than LIMIT+OFFSET items in the sorter.
+ **
+ ** If the new record does not need to be inserted into the sorter,
+ ** jump to the next iteration of the loop. If the pSort->labelOBLopt
+ ** value is not zero, then it is a label of where to jump. Otherwise,
+ ** just bypass the row insert logic. See the header comment on the
+ ** sqlite3WhereOrderByLimitOptLabel() function for additional info.
+ */
+ int iCsr = pSort->iECursor;
+ sqlite3VdbeAddOp2(v, OP_IfNotZero, iLimit, sqlite3VdbeCurrentAddr(v)+4);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Last, iCsr, 0);
+ iSkip = sqlite3VdbeAddOp4Int(v, OP_IdxLE,
+ iCsr, 0, regBase+nOBSat, nExpr-nOBSat);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_Delete, iCsr);
+ }
+ if( regRecord==0 ){
+ regRecord = makeSorterRecord(pParse, pSort, pSelect, regBase, nBase);
+ }
+ if( pSort->sortFlags & SORTFLAG_UseSorter ){
+ op = OP_SorterInsert;
+ }else{
+ op = OP_IdxInsert;
+ }
+ sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord,
+ regBase+nOBSat, nBase-nOBSat);
+ if( iSkip ){
+ sqlite3VdbeChangeP2(v, iSkip,
+ pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v));
+ }
+}
+
+/*
+** Add code to implement the OFFSET
+*/
+static void codeOffset(
+ Vdbe *v, /* Generate code into this VM */
+ int iOffset, /* Register holding the offset counter */
+ int iContinue /* Jump here to skip the current record */
+){
+ if( iOffset>0 ){
+ sqlite3VdbeAddOp3(v, OP_IfPos, iOffset, iContinue, 1); VdbeCoverage(v);
+ VdbeComment((v, "OFFSET"));
+ }
+}
+
+/*
+** Add code that will check to make sure the N registers starting at iMem
+** form a distinct entry. iTab is a sorting index that holds previously
+** seen combinations of the N values. A new entry is made in iTab
+** if the current N values are new.
+**
+** A jump to addrRepeat is made and the N+1 values are popped from the
+** stack if the top N elements are not distinct.
+*/
+static void codeDistinct(
+ Parse *pParse, /* Parsing and code generating context */
+ int iTab, /* A sorting index used to test for distinctness */
+ int addrRepeat, /* Jump to here if not distinct */
+ int N, /* Number of elements */
+ int iMem /* First element */
+){
+ Vdbe *v;
+ int r1;
+
+ v = pParse->pVdbe;
+ r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ sqlite3ReleaseTempReg(pParse, r1);
+}
+
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+/*
+** This function is called as part of inner-loop generation for a SELECT
+** statement with an ORDER BY that is not optimized by an index. It
+** determines the expressions, if any, that the sorter-reference
+** optimization should be used for. The sorter-reference optimization
+** is used for SELECT queries like:
+**
+** SELECT a, bigblob FROM t1 ORDER BY a LIMIT 10
+**
+** If the optimization is used for expression "bigblob", then instead of
+** storing values read from that column in the sorter records, the PK of
+** the row from table t1 is stored instead. Then, as records are extracted from
+** the sorter to return to the user, the required value of bigblob is
+** retrieved directly from table t1. If the values are very large, this
+** can be more efficient than storing them directly in the sorter records.
+**
+** The ExprList_item.bSorterRef flag is set for each expression in pEList
+** for which the sorter-reference optimization should be enabled.
+** Additionally, the pSort->aDefer[] array is populated with entries
+** for all cursors required to evaluate all selected expressions. Finally.
+** output variable (*ppExtra) is set to an expression list containing
+** expressions for all extra PK values that should be stored in the
+** sorter records.
+*/
+static void selectExprDefer(
+ Parse *pParse, /* Leave any error here */
+ SortCtx *pSort, /* Sorter context */
+ ExprList *pEList, /* Expressions destined for sorter */
+ ExprList **ppExtra /* Expressions to append to sorter record */
+){
+ int i;
+ int nDefer = 0;
+ ExprList *pExtra = 0;
+ for(i=0; i<pEList->nExpr; i++){
+ struct ExprList_item *pItem = &pEList->a[i];
+ if( pItem->u.x.iOrderByCol==0 ){
+ Expr *pExpr = pItem->pExpr;
+ Table *pTab = pExpr->y.pTab;
+ if( pExpr->op==TK_COLUMN && pExpr->iColumn>=0 && pTab && !IsVirtual(pTab)
+ && (pTab->aCol[pExpr->iColumn].colFlags & COLFLAG_SORTERREF)
+ ){
+ int j;
+ for(j=0; j<nDefer; j++){
+ if( pSort->aDefer[j].iCsr==pExpr->iTable ) break;
+ }
+ if( j==nDefer ){
+ if( nDefer==ArraySize(pSort->aDefer) ){
+ continue;
+ }else{
+ int nKey = 1;
+ int k;
+ Index *pPk = 0;
+ if( !HasRowid(pTab) ){
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ nKey = pPk->nKeyCol;
+ }
+ for(k=0; k<nKey; k++){
+ Expr *pNew = sqlite3PExpr(pParse, TK_COLUMN, 0, 0);
+ if( pNew ){
+ pNew->iTable = pExpr->iTable;
+ pNew->y.pTab = pExpr->y.pTab;
+ pNew->iColumn = pPk ? pPk->aiColumn[k] : -1;
+ pExtra = sqlite3ExprListAppend(pParse, pExtra, pNew);
+ }
+ }
+ pSort->aDefer[nDefer].pTab = pExpr->y.pTab;
+ pSort->aDefer[nDefer].iCsr = pExpr->iTable;
+ pSort->aDefer[nDefer].nKey = nKey;
+ nDefer++;
+ }
+ }
+ pItem->bSorterRef = 1;
+ }
+ }
+ }
+ pSort->nDefer = (u8)nDefer;
+ *ppExtra = pExtra;
+}
+#endif
+
+/*
+** This routine generates the code for the inside of the inner loop
+** of a SELECT.
+**
+** If srcTab is negative, then the p->pEList expressions
+** are evaluated in order to get the data for this row. If srcTab is
+** zero or more, then data is pulled from srcTab and p->pEList is used only
+** to get the number of columns and the collation sequence for each column.
+*/
+static void selectInnerLoop(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The complete select statement being coded */
+ int srcTab, /* Pull data from this table if non-negative */
+ SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */
+ DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */
+ SelectDest *pDest, /* How to dispose of the results */
+ int iContinue, /* Jump here to continue with next row */
+ int iBreak /* Jump here to break out of the inner loop */
+){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ int hasDistinct; /* True if the DISTINCT keyword is present */
+ int eDest = pDest->eDest; /* How to dispose of results */
+ int iParm = pDest->iSDParm; /* First argument to disposal method */
+ int nResultCol; /* Number of result columns */
+ int nPrefixReg = 0; /* Number of extra registers before regResult */
+ RowLoadInfo sRowLoadInfo; /* Info for deferred row loading */
+
+ /* Usually, regResult is the first cell in an array of memory cells
+ ** containing the current result row. In this case regOrig is set to the
+ ** same value. However, if the results are being sent to the sorter, the
+ ** values for any expressions that are also part of the sort-key are omitted
+ ** from this array. In this case regOrig is set to zero. */
+ int regResult; /* Start of memory holding current results */
+ int regOrig; /* Start of memory holding full result (or 0) */
+
+ assert( v );
+ assert( p->pEList!=0 );
+ hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP;
+ if( pSort && pSort->pOrderBy==0 ) pSort = 0;
+ if( pSort==0 && !hasDistinct ){
+ assert( iContinue!=0 );
+ codeOffset(v, p->iOffset, iContinue);
+ }
+
+ /* Pull the requested columns.
+ */
+ nResultCol = p->pEList->nExpr;
+
+ if( pDest->iSdst==0 ){
+ if( pSort ){
+ nPrefixReg = pSort->pOrderBy->nExpr;
+ if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++;
+ pParse->nMem += nPrefixReg;
+ }
+ pDest->iSdst = pParse->nMem+1;
+ pParse->nMem += nResultCol;
+ }else if( pDest->iSdst+nResultCol > pParse->nMem ){
+ /* This is an error condition that can result, for example, when a SELECT
+ ** on the right-hand side of an INSERT contains more result columns than
+ ** there are columns in the table on the left. The error will be caught
+ ** and reported later. But we need to make sure enough memory is allocated
+ ** to avoid other spurious errors in the meantime. */
+ pParse->nMem += nResultCol;
+ }
+ pDest->nSdst = nResultCol;
+ regOrig = regResult = pDest->iSdst;
+ if( srcTab>=0 ){
+ for(i=0; i<nResultCol; i++){
+ sqlite3VdbeAddOp3(v, OP_Column, srcTab, i, regResult+i);
+ VdbeComment((v, "%s", p->pEList->a[i].zEName));
+ }
+ }else if( eDest!=SRT_Exists ){
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ ExprList *pExtra = 0;
+#endif
+ /* If the destination is an EXISTS(...) expression, the actual
+ ** values returned by the SELECT are not required.
+ */
+ u8 ecelFlags; /* "ecel" is an abbreviation of "ExprCodeExprList" */
+ ExprList *pEList;
+ if( eDest==SRT_Mem || eDest==SRT_Output || eDest==SRT_Coroutine ){
+ ecelFlags = SQLITE_ECEL_DUP;
+ }else{
+ ecelFlags = 0;
+ }
+ if( pSort && hasDistinct==0 && eDest!=SRT_EphemTab && eDest!=SRT_Table ){
+ /* For each expression in p->pEList that is a copy of an expression in
+ ** the ORDER BY clause (pSort->pOrderBy), set the associated
+ ** iOrderByCol value to one more than the index of the ORDER BY
+ ** expression within the sort-key that pushOntoSorter() will generate.
+ ** This allows the p->pEList field to be omitted from the sorted record,
+ ** saving space and CPU cycles. */
+ ecelFlags |= (SQLITE_ECEL_OMITREF|SQLITE_ECEL_REF);
+
+ for(i=pSort->nOBSat; i<pSort->pOrderBy->nExpr; i++){
+ int j;
+ if( (j = pSort->pOrderBy->a[i].u.x.iOrderByCol)>0 ){
+ p->pEList->a[j-1].u.x.iOrderByCol = i+1-pSort->nOBSat;
+ }
+ }
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ selectExprDefer(pParse, pSort, p->pEList, &pExtra);
+ if( pExtra && pParse->db->mallocFailed==0 ){
+ /* If there are any extra PK columns to add to the sorter records,
+ ** allocate extra memory cells and adjust the OpenEphemeral
+ ** instruction to account for the larger records. This is only
+ ** required if there are one or more WITHOUT ROWID tables with
+ ** composite primary keys in the SortCtx.aDefer[] array. */
+ VdbeOp *pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex);
+ pOp->p2 += (pExtra->nExpr - pSort->nDefer);
+ pOp->p4.pKeyInfo->nAllField += (pExtra->nExpr - pSort->nDefer);
+ pParse->nMem += pExtra->nExpr;
+ }
+#endif
+
+ /* Adjust nResultCol to account for columns that are omitted
+ ** from the sorter by the optimizations in this branch */
+ pEList = p->pEList;
+ for(i=0; i<pEList->nExpr; i++){
+ if( pEList->a[i].u.x.iOrderByCol>0
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ || pEList->a[i].bSorterRef
+#endif
+ ){
+ nResultCol--;
+ regOrig = 0;
+ }
+ }
+
+ testcase( regOrig );
+ testcase( eDest==SRT_Set );
+ testcase( eDest==SRT_Mem );
+ testcase( eDest==SRT_Coroutine );
+ testcase( eDest==SRT_Output );
+ assert( eDest==SRT_Set || eDest==SRT_Mem
+ || eDest==SRT_Coroutine || eDest==SRT_Output
+ || eDest==SRT_Upfrom );
+ }
+ sRowLoadInfo.regResult = regResult;
+ sRowLoadInfo.ecelFlags = ecelFlags;
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ sRowLoadInfo.pExtra = pExtra;
+ sRowLoadInfo.regExtraResult = regResult + nResultCol;
+ if( pExtra ) nResultCol += pExtra->nExpr;
+#endif
+ if( p->iLimit
+ && (ecelFlags & SQLITE_ECEL_OMITREF)!=0
+ && nPrefixReg>0
+ ){
+ assert( pSort!=0 );
+ assert( hasDistinct==0 );
+ pSort->pDeferredRowLoad = &sRowLoadInfo;
+ regOrig = 0;
+ }else{
+ innerLoopLoadRow(pParse, p, &sRowLoadInfo);
+ }
+ }
+
+ /* If the DISTINCT keyword was present on the SELECT statement
+ ** and this row has been seen before, then do not make this row
+ ** part of the result.
+ */
+ if( hasDistinct ){
+ switch( pDistinct->eTnctType ){
+ case WHERE_DISTINCT_ORDERED: {
+ VdbeOp *pOp; /* No longer required OpenEphemeral instr. */
+ int iJump; /* Jump destination */
+ int regPrev; /* Previous row content */
+
+ /* Allocate space for the previous row */
+ regPrev = pParse->nMem+1;
+ pParse->nMem += nResultCol;
+
+ /* Change the OP_OpenEphemeral coded earlier to an OP_Null
+ ** sets the MEM_Cleared bit on the first register of the
+ ** previous value. This will cause the OP_Ne below to always
+ ** fail on the first iteration of the loop even if the first
+ ** row is all NULLs.
+ */
+ sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct);
+ pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct);
+ pOp->opcode = OP_Null;
+ pOp->p1 = 1;
+ pOp->p2 = regPrev;
+ pOp = 0; /* Ensure pOp is not used after sqlite3VdbeAddOp() */
+
+ iJump = sqlite3VdbeCurrentAddr(v) + nResultCol;
+ for(i=0; i<nResultCol; i++){
+ CollSeq *pColl = sqlite3ExprCollSeq(pParse, p->pEList->a[i].pExpr);
+ if( i<nResultCol-1 ){
+ sqlite3VdbeAddOp3(v, OP_Ne, regResult+i, iJump, regPrev+i);
+ VdbeCoverage(v);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Eq, regResult+i, iContinue, regPrev+i);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeChangeP4(v, -1, (const char *)pColl, P4_COLLSEQ);
+ sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
+ }
+ assert( sqlite3VdbeCurrentAddr(v)==iJump || pParse->db->mallocFailed );
+ sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1);
+ break;
+ }
+
+ case WHERE_DISTINCT_UNIQUE: {
+ sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct);
+ break;
+ }
+
+ default: {
+ assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED );
+ codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol,
+ regResult);
+ break;
+ }
+ }
+ if( pSort==0 ){
+ codeOffset(v, p->iOffset, iContinue);
+ }
+ }
+
+ switch( eDest ){
+ /* In this mode, write each query result to the key of the temporary
+ ** table iParm.
+ */
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
+ case SRT_Union: {
+ int r1;
+ r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
+ sqlite3ReleaseTempReg(pParse, r1);
+ break;
+ }
+
+ /* Construct a record from the query result, but instead of
+ ** saving that record, use it as a key to delete elements from
+ ** the temporary table iParm.
+ */
+ case SRT_Except: {
+ sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol);
+ break;
+ }
+#endif /* SQLITE_OMIT_COMPOUND_SELECT */
+
+ /* Store the result as data using a unique key.
+ */
+ case SRT_Fifo:
+ case SRT_DistFifo:
+ case SRT_Table:
+ case SRT_EphemTab: {
+ int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1);
+ testcase( eDest==SRT_Table );
+ testcase( eDest==SRT_EphemTab );
+ testcase( eDest==SRT_Fifo );
+ testcase( eDest==SRT_DistFifo );
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
+#ifndef SQLITE_OMIT_CTE
+ if( eDest==SRT_DistFifo ){
+ /* If the destination is DistFifo, then cursor (iParm+1) is open
+ ** on an ephemeral index. If the current row is already present
+ ** in the index, do not write it to the output. If not, add the
+ ** current row to the index and proceed with writing it to the
+ ** output table as well. */
+ int addr = sqlite3VdbeCurrentAddr(v) + 4;
+ sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm+1, r1,regResult,nResultCol);
+ assert( pSort==0 );
+ }
+#endif
+ if( pSort ){
+ assert( regResult==regOrig );
+ pushOntoSorter(pParse, pSort, p, r1+nPrefixReg, regOrig, 1, nPrefixReg);
+ }else{
+ int r2 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2);
+ sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2);
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+ sqlite3ReleaseTempReg(pParse, r2);
+ }
+ sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1);
+ break;
+ }
+
+ case SRT_Upfrom: {
+ if( pSort ){
+ pushOntoSorter(
+ pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
+ }else{
+ int i2 = pDest->iSDParm2;
+ int r1 = sqlite3GetTempReg(pParse);
+
+ /* If the UPDATE FROM join is an aggregate that matches no rows, it
+ ** might still be trying to return one row, because that is what
+ ** aggregates do. Don't record that empty row in the output table. */
+ sqlite3VdbeAddOp2(v, OP_IsNull, regResult, iBreak); VdbeCoverage(v);
+
+ sqlite3VdbeAddOp3(v, OP_MakeRecord,
+ regResult+(i2<0), nResultCol-(i2<0), r1);
+ if( i2<0 ){
+ sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, regResult);
+ }else{
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, i2);
+ }
+ }
+ break;
+ }
+
+#ifndef SQLITE_OMIT_SUBQUERY
+ /* If we are creating a set for an "expr IN (SELECT ...)" construct,
+ ** then there should be a single item on the stack. Write this
+ ** item into the set table with bogus data.
+ */
+ case SRT_Set: {
+ if( pSort ){
+ /* At first glance you would think we could optimize out the
+ ** ORDER BY in this case since the order of entries in the set
+ ** does not matter. But there might be a LIMIT clause, in which
+ ** case the order does matter */
+ pushOntoSorter(
+ pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
+ }else{
+ int r1 = sqlite3GetTempReg(pParse);
+ assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol,
+ r1, pDest->zAffSdst, nResultCol);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
+ sqlite3ReleaseTempReg(pParse, r1);
+ }
+ break;
+ }
+
+
+ /* If any row exist in the result set, record that fact and abort.
+ */
+ case SRT_Exists: {
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm);
+ /* The LIMIT clause will terminate the loop for us */
+ break;
+ }
+
+ /* If this is a scalar select that is part of an expression, then
+ ** store the results in the appropriate memory cell or array of
+ ** memory cells and break out of the scan loop.
+ */
+ case SRT_Mem: {
+ if( pSort ){
+ assert( nResultCol<=pDest->nSdst );
+ pushOntoSorter(
+ pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
+ }else{
+ assert( nResultCol==pDest->nSdst );
+ assert( regResult==iParm );
+ /* The LIMIT clause will jump out of the loop for us */
+ }
+ break;
+ }
+#endif /* #ifndef SQLITE_OMIT_SUBQUERY */
+
+ case SRT_Coroutine: /* Send data to a co-routine */
+ case SRT_Output: { /* Return the results */
+ testcase( eDest==SRT_Coroutine );
+ testcase( eDest==SRT_Output );
+ if( pSort ){
+ pushOntoSorter(pParse, pSort, p, regResult, regOrig, nResultCol,
+ nPrefixReg);
+ }else if( eDest==SRT_Coroutine ){
+ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol);
+ }
+ break;
+ }
+
+#ifndef SQLITE_OMIT_CTE
+ /* Write the results into a priority queue that is order according to
+ ** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an
+ ** index with pSO->nExpr+2 columns. Build a key using pSO for the first
+ ** pSO->nExpr columns, then make sure all keys are unique by adding a
+ ** final OP_Sequence column. The last column is the record as a blob.
+ */
+ case SRT_DistQueue:
+ case SRT_Queue: {
+ int nKey;
+ int r1, r2, r3;
+ int addrTest = 0;
+ ExprList *pSO;
+ pSO = pDest->pOrderBy;
+ assert( pSO );
+ nKey = pSO->nExpr;
+ r1 = sqlite3GetTempReg(pParse);
+ r2 = sqlite3GetTempRange(pParse, nKey+2);
+ r3 = r2+nKey+1;
+ if( eDest==SRT_DistQueue ){
+ /* If the destination is DistQueue, then cursor (iParm+1) is open
+ ** on a second ephemeral index that holds all values every previously
+ ** added to the queue. */
+ addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0,
+ regResult, nResultCol);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3);
+ if( eDest==SRT_DistQueue ){
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ }
+ for(i=0; i<nKey; i++){
+ sqlite3VdbeAddOp2(v, OP_SCopy,
+ regResult + pSO->a[i].u.x.iOrderByCol - 1,
+ r2+i);
+ }
+ sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, r2, nKey+2);
+ if( addrTest ) sqlite3VdbeJumpHere(v, addrTest);
+ sqlite3ReleaseTempReg(pParse, r1);
+ sqlite3ReleaseTempRange(pParse, r2, nKey+2);
+ break;
+ }
+#endif /* SQLITE_OMIT_CTE */
+
+
+
+#if !defined(SQLITE_OMIT_TRIGGER)
+ /* Discard the results. This is used for SELECT statements inside
+ ** the body of a TRIGGER. The purpose of such selects is to call
+ ** user-defined functions that have side effects. We do not care
+ ** about the actual results of the select.
+ */
+ default: {
+ assert( eDest==SRT_Discard );
+ break;
+ }
+#endif
+ }
+
+ /* Jump to the end of the loop if the LIMIT is reached. Except, if
+ ** there is a sorter, in which case the sorter has already limited
+ ** the output for us.
+ */
+ if( pSort==0 && p->iLimit ){
+ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
+ }
+}
+
+/*
+** Allocate a KeyInfo object sufficient for an index of N key columns and
+** X extra columns.
+*/
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){
+ int nExtra = (N+X)*(sizeof(CollSeq*)+1) - sizeof(CollSeq*);
+ KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra);
+ if( p ){
+ p->aSortFlags = (u8*)&p->aColl[N+X];
+ p->nKeyField = (u16)N;
+ p->nAllField = (u16)(N+X);
+ p->enc = ENC(db);
+ p->db = db;
+ p->nRef = 1;
+ memset(&p[1], 0, nExtra);
+ }else{
+ sqlite3OomFault(db);
+ }
+ return p;
+}
+
+/*
+** Deallocate a KeyInfo object
+*/
+SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){
+ if( p ){
+ assert( p->nRef>0 );
+ p->nRef--;
+ if( p->nRef==0 ) sqlite3DbFreeNN(p->db, p);
+ }
+}
+
+/*
+** Make a new pointer to a KeyInfo object
+*/
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo *p){
+ if( p ){
+ assert( p->nRef>0 );
+ p->nRef++;
+ }
+ return p;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Return TRUE if a KeyInfo object can be change. The KeyInfo object
+** can only be changed if this is just a single reference to the object.
+**
+** This routine is used only inside of assert() statements.
+*/
+SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; }
+#endif /* SQLITE_DEBUG */
+
+/*
+** Given an expression list, generate a KeyInfo structure that records
+** the collating sequence for each expression in that expression list.
+**
+** If the ExprList is an ORDER BY or GROUP BY clause then the resulting
+** KeyInfo structure is appropriate for initializing a virtual index to
+** implement that clause. If the ExprList is the result set of a SELECT
+** then the KeyInfo structure is appropriate for initializing a virtual
+** index to implement a DISTINCT test.
+**
+** Space to hold the KeyInfo structure is obtained from malloc. The calling
+** function is responsible for seeing that this structure is eventually
+** freed.
+*/
+SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* Form the KeyInfo object from this ExprList */
+ int iStart, /* Begin with this column of pList */
+ int nExtra /* Add this many extra columns to the end */
+){
+ int nExpr;
+ KeyInfo *pInfo;
+ struct ExprList_item *pItem;
+ sqlite3 *db = pParse->db;
+ int i;
+
+ nExpr = pList->nExpr;
+ pInfo = sqlite3KeyInfoAlloc(db, nExpr-iStart, nExtra+1);
+ if( pInfo ){
+ assert( sqlite3KeyInfoIsWriteable(pInfo) );
+ for(i=iStart, pItem=pList->a+iStart; i<nExpr; i++, pItem++){
+ pInfo->aColl[i-iStart] = sqlite3ExprNNCollSeq(pParse, pItem->pExpr);
+ pInfo->aSortFlags[i-iStart] = pItem->sortFlags;
+ }
+ }
+ return pInfo;
+}
+
+/*
+** Name of the connection operator, used for error messages.
+*/
+static const char *selectOpName(int id){
+ char *z;
+ switch( id ){
+ case TK_ALL: z = "UNION ALL"; break;
+ case TK_INTERSECT: z = "INTERSECT"; break;
+ case TK_EXCEPT: z = "EXCEPT"; break;
+ default: z = "UNION"; break;
+ }
+ return z;
+}
+
+#ifndef SQLITE_OMIT_EXPLAIN
+/*
+** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function
+** is a no-op. Otherwise, it adds a single row of output to the EQP result,
+** where the caption is of the form:
+**
+** "USE TEMP B-TREE FOR xxx"
+**
+** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which
+** is determined by the zUsage argument.
+*/
+static void explainTempTable(Parse *pParse, const char *zUsage){
+ ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s", zUsage));
+}
+
+/*
+** Assign expression b to lvalue a. A second, no-op, version of this macro
+** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code
+** in sqlite3Select() to assign values to structure member variables that
+** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the
+** code with #ifndef directives.
+*/
+# define explainSetInteger(a, b) a = b
+
+#else
+/* No-op versions of the explainXXX() functions and macros. */
+# define explainTempTable(y,z)
+# define explainSetInteger(y,z)
+#endif
+
+
+/*
+** If the inner loop was generated using a non-null pOrderBy argument,
+** then the results were placed in a sorter. After the loop is terminated
+** we need to run the sorter and output the results. The following
+** routine generates the code needed to do that.
+*/
+static void generateSortTail(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The SELECT statement */
+ SortCtx *pSort, /* Information on the ORDER BY clause */
+ int nColumn, /* Number of columns of data */
+ SelectDest *pDest /* Write the sorted results here */
+){
+ Vdbe *v = pParse->pVdbe; /* The prepared statement */
+ int addrBreak = pSort->labelDone; /* Jump here to exit loop */
+ int addrContinue = sqlite3VdbeMakeLabel(pParse);/* Jump here for next cycle */
+ int addr; /* Top of output loop. Jump for Next. */
+ int addrOnce = 0;
+ int iTab;
+ ExprList *pOrderBy = pSort->pOrderBy;
+ int eDest = pDest->eDest;
+ int iParm = pDest->iSDParm;
+ int regRow;
+ int regRowid;
+ int iCol;
+ int nKey; /* Number of key columns in sorter record */
+ int iSortTab; /* Sorter cursor to read from */
+ int i;
+ int bSeq; /* True if sorter record includes seq. no. */
+ int nRefKey = 0;
+ struct ExprList_item *aOutEx = p->pEList->a;
+
+ assert( addrBreak<0 );
+ if( pSort->labelBkOut ){
+ sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
+ sqlite3VdbeGoto(v, addrBreak);
+ sqlite3VdbeResolveLabel(v, pSort->labelBkOut);
+ }
+
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ /* Open any cursors needed for sorter-reference expressions */
+ for(i=0; i<pSort->nDefer; i++){
+ Table *pTab = pSort->aDefer[i].pTab;
+ int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ sqlite3OpenTable(pParse, pSort->aDefer[i].iCsr, iDb, pTab, OP_OpenRead);
+ nRefKey = MAX(nRefKey, pSort->aDefer[i].nKey);
+ }
+#endif
+
+ iTab = pSort->iECursor;
+ if( eDest==SRT_Output || eDest==SRT_Coroutine || eDest==SRT_Mem ){
+ regRowid = 0;
+ regRow = pDest->iSdst;
+ }else{
+ regRowid = sqlite3GetTempReg(pParse);
+ if( eDest==SRT_EphemTab || eDest==SRT_Table ){
+ regRow = sqlite3GetTempReg(pParse);
+ nColumn = 0;
+ }else{
+ regRow = sqlite3GetTempRange(pParse, nColumn);
+ }
+ }
+ nKey = pOrderBy->nExpr - pSort->nOBSat;
+ if( pSort->sortFlags & SORTFLAG_UseSorter ){
+ int regSortOut = ++pParse->nMem;
+ iSortTab = pParse->nTab++;
+ if( pSort->labelBkOut ){
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
+ sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut,
+ nKey+1+nColumn+nRefKey);
+ if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
+ addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak);
+ VdbeCoverage(v);
+ codeOffset(v, p->iOffset, addrContinue);
+ sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab);
+ bSeq = 0;
+ }else{
+ addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v);
+ codeOffset(v, p->iOffset, addrContinue);
+ iSortTab = iTab;
+ bSeq = 1;
+ }
+ for(i=0, iCol=nKey+bSeq-1; i<nColumn; i++){
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( aOutEx[i].bSorterRef ) continue;
+#endif
+ if( aOutEx[i].u.x.iOrderByCol==0 ) iCol++;
+ }
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( pSort->nDefer ){
+ int iKey = iCol+1;
+ int regKey = sqlite3GetTempRange(pParse, nRefKey);
+
+ for(i=0; i<pSort->nDefer; i++){
+ int iCsr = pSort->aDefer[i].iCsr;
+ Table *pTab = pSort->aDefer[i].pTab;
+ int nKey = pSort->aDefer[i].nKey;
+
+ sqlite3VdbeAddOp1(v, OP_NullRow, iCsr);
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iKey++, regKey);
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, iCsr,
+ sqlite3VdbeCurrentAddr(v)+1, regKey);
+ }else{
+ int k;
+ int iJmp;
+ assert( sqlite3PrimaryKeyIndex(pTab)->nKeyCol==nKey );
+ for(k=0; k<nKey; k++){
+ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iKey++, regKey+k);
+ }
+ iJmp = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp4Int(v, OP_SeekGE, iCsr, iJmp+2, regKey, nKey);
+ sqlite3VdbeAddOp4Int(v, OP_IdxLE, iCsr, iJmp+3, regKey, nKey);
+ sqlite3VdbeAddOp1(v, OP_NullRow, iCsr);
+ }
+ }
+ sqlite3ReleaseTempRange(pParse, regKey, nRefKey);
+ }
+#endif
+ for(i=nColumn-1; i>=0; i--){
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ if( aOutEx[i].bSorterRef ){
+ sqlite3ExprCode(pParse, aOutEx[i].pExpr, regRow+i);
+ }else
+#endif
+ {
+ int iRead;
+ if( aOutEx[i].u.x.iOrderByCol ){
+ iRead = aOutEx[i].u.x.iOrderByCol-1;
+ }else{
+ iRead = iCol--;
+ }
+ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iRead, regRow+i);
+ VdbeComment((v, "%s", aOutEx[i].zEName));
+ }
+ }
+ switch( eDest ){
+ case SRT_Table:
+ case SRT_EphemTab: {
+ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, nKey+bSeq, regRow);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid);
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case SRT_Set: {
+ assert( nColumn==sqlite3Strlen30(pDest->zAffSdst) );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid,
+ pDest->zAffSdst, nColumn);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn);
+ break;
+ }
+ case SRT_Mem: {
+ /* The LIMIT clause will terminate the loop for us */
+ break;
+ }
+#endif
+ case SRT_Upfrom: {
+ int i2 = pDest->iSDParm2;
+ int r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord,regRow+(i2<0),nColumn-(i2<0),r1);
+ if( i2<0 ){
+ sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, regRow);
+ }else{
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regRow, i2);
+ }
+ break;
+ }
+ default: {
+ assert( eDest==SRT_Output || eDest==SRT_Coroutine );
+ testcase( eDest==SRT_Output );
+ testcase( eDest==SRT_Coroutine );
+ if( eDest==SRT_Output ){
+ sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn);
+ }else{
+ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
+ }
+ break;
+ }
+ }
+ if( regRowid ){
+ if( eDest==SRT_Set ){
+ sqlite3ReleaseTempRange(pParse, regRow, nColumn);
+ }else{
+ sqlite3ReleaseTempReg(pParse, regRow);
+ }
+ sqlite3ReleaseTempReg(pParse, regRowid);
+ }
+ /* The bottom of the loop
+ */
+ sqlite3VdbeResolveLabel(v, addrContinue);
+ if( pSort->sortFlags & SORTFLAG_UseSorter ){
+ sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v);
+ }
+ if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn);
+ sqlite3VdbeResolveLabel(v, addrBreak);
+}
+
+/*
+** Return a pointer to a string containing the 'declaration type' of the
+** expression pExpr. The string may be treated as static by the caller.
+**
+** Also try to estimate the size of the returned value and return that
+** result in *pEstWidth.
+**
+** The declaration type is the exact datatype definition extracted from the
+** original CREATE TABLE statement if the expression is a column. The
+** declaration type for a ROWID field is INTEGER. Exactly when an expression
+** is considered a column can be complex in the presence of subqueries. The
+** result-set expression in all of the following SELECT statements is
+** considered a column by this function.
+**
+** SELECT col FROM tbl;
+** SELECT (SELECT col FROM tbl;
+** SELECT (SELECT col FROM tbl);
+** SELECT abc FROM (SELECT col AS abc FROM tbl);
+**
+** The declaration type for any expression other than a column is NULL.
+**
+** This routine has either 3 or 6 parameters depending on whether or not
+** the SQLITE_ENABLE_COLUMN_METADATA compile-time option is used.
+*/
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+# define columnType(A,B,C,D,E) columnTypeImpl(A,B,C,D,E)
+#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */
+# define columnType(A,B,C,D,E) columnTypeImpl(A,B)
+#endif
+static const char *columnTypeImpl(
+ NameContext *pNC,
+#ifndef SQLITE_ENABLE_COLUMN_METADATA
+ Expr *pExpr
+#else
+ Expr *pExpr,
+ const char **pzOrigDb,
+ const char **pzOrigTab,
+ const char **pzOrigCol
+#endif
+){
+ char const *zType = 0;
+ int j;
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+ char const *zOrigDb = 0;
+ char const *zOrigTab = 0;
+ char const *zOrigCol = 0;
+#endif
+
+ assert( pExpr!=0 );
+ assert( pNC->pSrcList!=0 );
+ switch( pExpr->op ){
+ case TK_COLUMN: {
+ /* The expression is a column. Locate the table the column is being
+ ** extracted from in NameContext.pSrcList. This table may be real
+ ** database table or a subquery.
+ */
+ Table *pTab = 0; /* Table structure column is extracted from */
+ Select *pS = 0; /* Select the column is extracted from */
+ int iCol = pExpr->iColumn; /* Index of column in pTab */
+ while( pNC && !pTab ){
+ SrcList *pTabList = pNC->pSrcList;
+ for(j=0;j<pTabList->nSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++);
+ if( j<pTabList->nSrc ){
+ pTab = pTabList->a[j].pTab;
+ pS = pTabList->a[j].pSelect;
+ }else{
+ pNC = pNC->pNext;
+ }
+ }
+
+ if( pTab==0 ){
+ /* At one time, code such as "SELECT new.x" within a trigger would
+ ** cause this condition to run. Since then, we have restructured how
+ ** trigger code is generated and so this condition is no longer
+ ** possible. However, it can still be true for statements like
+ ** the following:
+ **
+ ** CREATE TABLE t1(col INTEGER);
+ ** SELECT (SELECT t1.col) FROM FROM t1;
+ **
+ ** when columnType() is called on the expression "t1.col" in the
+ ** sub-select. In this case, set the column type to NULL, even
+ ** though it should really be "INTEGER".
+ **
+ ** This is not a problem, as the column type of "t1.col" is never
+ ** used. When columnType() is called on the expression
+ ** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT
+ ** branch below. */
+ break;
+ }
+
+ assert( pTab && pExpr->y.pTab==pTab );
+ if( pS ){
+ /* The "table" is actually a sub-select or a view in the FROM clause
+ ** of the SELECT statement. Return the declaration type and origin
+ ** data for the result-set column of the sub-select.
+ */
+ if( iCol>=0 && iCol<pS->pEList->nExpr ){
+ /* If iCol is less than zero, then the expression requests the
+ ** rowid of the sub-select or view. This expression is legal (see
+ ** test case misc2.2.2) - it always evaluates to NULL.
+ */
+ NameContext sNC;
+ Expr *p = pS->pEList->a[iCol].pExpr;
+ sNC.pSrcList = pS->pSrc;
+ sNC.pNext = pNC;
+ sNC.pParse = pNC->pParse;
+ zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol);
+ }
+ }else{
+ /* A real table or a CTE table */
+ assert( !pS );
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+ if( iCol<0 ) iCol = pTab->iPKey;
+ assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
+ if( iCol<0 ){
+ zType = "INTEGER";
+ zOrigCol = "rowid";
+ }else{
+ zOrigCol = pTab->aCol[iCol].zName;
+ zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
+ }
+ zOrigTab = pTab->zName;
+ if( pNC->pParse && pTab->pSchema ){
+ int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema);
+ zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName;
+ }
+#else
+ assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
+ if( iCol<0 ){
+ zType = "INTEGER";
+ }else{
+ zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
+ }
+#endif
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_SELECT: {
+ /* The expression is a sub-select. Return the declaration type and
+ ** origin info for the single column in the result set of the SELECT
+ ** statement.
+ */
+ NameContext sNC;
+ Select *pS = pExpr->x.pSelect;
+ Expr *p = pS->pEList->a[0].pExpr;
+ assert( ExprHasProperty(pExpr, EP_xIsSelect) );
+ sNC.pSrcList = pS->pSrc;
+ sNC.pNext = pNC;
+ sNC.pParse = pNC->pParse;
+ zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol);
+ break;
+ }
+#endif
+ }
+
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+ if( pzOrigDb ){
+ assert( pzOrigTab && pzOrigCol );
+ *pzOrigDb = zOrigDb;
+ *pzOrigTab = zOrigTab;
+ *pzOrigCol = zOrigCol;
+ }
+#endif
+ return zType;
+}
+
+/*
+** Generate code that will tell the VDBE the declaration types of columns
+** in the result set.
+*/
+static void generateColumnTypes(
+ Parse *pParse, /* Parser context */
+ SrcList *pTabList, /* List of tables */
+ ExprList *pEList /* Expressions defining the result set */
+){
+#ifndef SQLITE_OMIT_DECLTYPE
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ NameContext sNC;
+ sNC.pSrcList = pTabList;
+ sNC.pParse = pParse;
+ sNC.pNext = 0;
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *p = pEList->a[i].pExpr;
+ const char *zType;
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+ const char *zOrigDb = 0;
+ const char *zOrigTab = 0;
+ const char *zOrigCol = 0;
+ zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol);
+
+ /* The vdbe must make its own copy of the column-type and other
+ ** column specific strings, in case the schema is reset before this
+ ** virtual machine is deleted.
+ */
+ sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT);
+ sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT);
+ sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT);
+#else
+ zType = columnType(&sNC, p, 0, 0, 0);
+#endif
+ sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT);
+ }
+#endif /* !defined(SQLITE_OMIT_DECLTYPE) */
+}
+
+
+/*
+** Compute the column names for a SELECT statement.
+**
+** The only guarantee that SQLite makes about column names is that if the
+** column has an AS clause assigning it a name, that will be the name used.
+** That is the only documented guarantee. However, countless applications
+** developed over the years have made baseless assumptions about column names
+** and will break if those assumptions changes. Hence, use extreme caution
+** when modifying this routine to avoid breaking legacy.
+**
+** See Also: sqlite3ColumnsFromExprList()
+**
+** The PRAGMA short_column_names and PRAGMA full_column_names settings are
+** deprecated. The default setting is short=ON, full=OFF. 99.9% of all
+** applications should operate this way. Nevertheless, we need to support the
+** other modes for legacy:
+**
+** short=OFF, full=OFF: Column name is the text of the expression has it
+** originally appears in the SELECT statement. In
+** other words, the zSpan of the result expression.
+**
+** short=ON, full=OFF: (This is the default setting). If the result
+** refers directly to a table column, then the
+** result column name is just the table column
+** name: COLUMN. Otherwise use zSpan.
+**
+** full=ON, short=ANY: If the result refers directly to a table column,
+** then the result column name with the table name
+** prefix, ex: TABLE.COLUMN. Otherwise use zSpan.
+*/
+static void generateColumnNames(
+ Parse *pParse, /* Parser context */
+ Select *pSelect /* Generate column names for this SELECT statement */
+){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ Table *pTab;
+ SrcList *pTabList;
+ ExprList *pEList;
+ sqlite3 *db = pParse->db;
+ int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */
+ int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */
+
+#ifndef SQLITE_OMIT_EXPLAIN
+ /* If this is an EXPLAIN, skip this step */
+ if( pParse->explain ){
+ return;
+ }
+#endif
+
+ if( pParse->colNamesSet ) return;
+ /* Column names are determined by the left-most term of a compound select */
+ while( pSelect->pPrior ) pSelect = pSelect->pPrior;
+ SELECTTRACE(1,pParse,pSelect,("generating column names\n"));
+ pTabList = pSelect->pSrc;
+ pEList = pSelect->pEList;
+ assert( v!=0 );
+ assert( pTabList!=0 );
+ pParse->colNamesSet = 1;
+ fullName = (db->flags & SQLITE_FullColNames)!=0;
+ srcName = (db->flags & SQLITE_ShortColNames)!=0 || fullName;
+ sqlite3VdbeSetNumCols(v, pEList->nExpr);
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *p = pEList->a[i].pExpr;
+
+ assert( p!=0 );
+ assert( p->op!=TK_AGG_COLUMN ); /* Agg processing has not run yet */
+ assert( p->op!=TK_COLUMN || p->y.pTab!=0 ); /* Covering idx not yet coded */
+ if( pEList->a[i].zEName && pEList->a[i].eEName==ENAME_NAME ){
+ /* An AS clause always takes first priority */
+ char *zName = pEList->a[i].zEName;
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT);
+ }else if( srcName && p->op==TK_COLUMN ){
+ char *zCol;
+ int iCol = p->iColumn;
+ pTab = p->y.pTab;
+ assert( pTab!=0 );
+ if( iCol<0 ) iCol = pTab->iPKey;
+ assert( iCol==-1 || (iCol>=0 && iCol<pTab->nCol) );
+ if( iCol<0 ){
+ zCol = "rowid";
+ }else{
+ zCol = pTab->aCol[iCol].zName;
+ }
+ if( fullName ){
+ char *zName = 0;
+ zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol);
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC);
+ }else{
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT);
+ }
+ }else{
+ const char *z = pEList->a[i].zEName;
+ z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z);
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC);
+ }
+ }
+ generateColumnTypes(pParse, pTabList, pEList);
+}
+
+/*
+** Given an expression list (which is really the list of expressions
+** that form the result set of a SELECT statement) compute appropriate
+** column names for a table that would hold the expression list.
+**
+** All column names will be unique.
+**
+** Only the column names are computed. Column.zType, Column.zColl,
+** and other fields of Column are zeroed.
+**
+** Return SQLITE_OK on success. If a memory allocation error occurs,
+** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM.
+**
+** The only guarantee that SQLite makes about column names is that if the
+** column has an AS clause assigning it a name, that will be the name used.
+** That is the only documented guarantee. However, countless applications
+** developed over the years have made baseless assumptions about column names
+** and will break if those assumptions changes. Hence, use extreme caution
+** when modifying this routine to avoid breaking legacy.
+**
+** See Also: generateColumnNames()
+*/
+SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pEList, /* Expr list from which to derive column names */
+ i16 *pnCol, /* Write the number of columns here */
+ Column **paCol /* Write the new column list here */
+){
+ sqlite3 *db = pParse->db; /* Database connection */
+ int i, j; /* Loop counters */
+ u32 cnt; /* Index added to make the name unique */
+ Column *aCol, *pCol; /* For looping over result columns */
+ int nCol; /* Number of columns in the result set */
+ char *zName; /* Column name */
+ int nName; /* Size of name in zName[] */
+ Hash ht; /* Hash table of column names */
+
+ sqlite3HashInit(&ht);
+ if( pEList ){
+ nCol = pEList->nExpr;
+ aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol);
+ testcase( aCol==0 );
+ if( nCol>32767 ) nCol = 32767;
+ }else{
+ nCol = 0;
+ aCol = 0;
+ }
+ assert( nCol==(i16)nCol );
+ *pnCol = nCol;
+ *paCol = aCol;
+
+ for(i=0, pCol=aCol; i<nCol && !db->mallocFailed; i++, pCol++){
+ /* Get an appropriate name for the column
+ */
+ if( (zName = pEList->a[i].zEName)!=0 && pEList->a[i].eEName==ENAME_NAME ){
+ /* If the column contains an "AS <name>" phrase, use <name> as the name */
+ }else{
+ Expr *pColExpr = sqlite3ExprSkipCollateAndLikely(pEList->a[i].pExpr);
+ while( pColExpr->op==TK_DOT ){
+ pColExpr = pColExpr->pRight;
+ assert( pColExpr!=0 );
+ }
+ if( pColExpr->op==TK_COLUMN ){
+ /* For columns use the column name name */
+ int iCol = pColExpr->iColumn;
+ Table *pTab = pColExpr->y.pTab;
+ assert( pTab!=0 );
+ if( iCol<0 ) iCol = pTab->iPKey;
+ zName = iCol>=0 ? pTab->aCol[iCol].zName : "rowid";
+ }else if( pColExpr->op==TK_ID ){
+ assert( !ExprHasProperty(pColExpr, EP_IntValue) );
+ zName = pColExpr->u.zToken;
+ }else{
+ /* Use the original text of the column expression as its name */
+ zName = pEList->a[i].zEName;
+ }
+ }
+ if( zName && !sqlite3IsTrueOrFalse(zName) ){
+ zName = sqlite3DbStrDup(db, zName);
+ }else{
+ zName = sqlite3MPrintf(db,"column%d",i+1);
+ }
+
+ /* Make sure the column name is unique. If the name is not unique,
+ ** append an integer to the name so that it becomes unique.
+ */
+ cnt = 0;
+ while( zName && sqlite3HashFind(&ht, zName)!=0 ){
+ nName = sqlite3Strlen30(zName);
+ if( nName>0 ){
+ for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){}
+ if( zName[j]==':' ) nName = j;
+ }
+ zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt);
+ if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt);
+ }
+ pCol->zName = zName;
+ pCol->hName = sqlite3StrIHash(zName);
+ sqlite3ColumnPropertiesFromName(0, pCol);
+ if( zName && sqlite3HashInsert(&ht, zName, pCol)==pCol ){
+ sqlite3OomFault(db);
+ }
+ }
+ sqlite3HashClear(&ht);
+ if( db->mallocFailed ){
+ for(j=0; j<i; j++){
+ sqlite3DbFree(db, aCol[j].zName);
+ }
+ sqlite3DbFree(db, aCol);
+ *paCol = 0;
+ *pnCol = 0;
+ return SQLITE_NOMEM_BKPT;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Add type and collation information to a column list based on
+** a SELECT statement.
+**
+** The column list presumably came from selectColumnNamesFromExprList().
+** The column list has only names, not types or collations. This
+** routine goes through and adds the types and collations.
+**
+** This routine requires that all identifiers in the SELECT
+** statement be resolved.
+*/
+SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(
+ Parse *pParse, /* Parsing contexts */
+ Table *pTab, /* Add column type information to this table */
+ Select *pSelect, /* SELECT used to determine types and collations */
+ char aff /* Default affinity for columns */
+){
+ sqlite3 *db = pParse->db;
+ NameContext sNC;
+ Column *pCol;
+ CollSeq *pColl;
+ int i;
+ Expr *p;
+ struct ExprList_item *a;
+
+ assert( pSelect!=0 );
+ assert( (pSelect->selFlags & SF_Resolved)!=0 );
+ assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed );
+ if( db->mallocFailed ) return;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pSrcList = pSelect->pSrc;
+ a = pSelect->pEList->a;
+ for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
+ const char *zType;
+ int n, m;
+ p = a[i].pExpr;
+ zType = columnType(&sNC, p, 0, 0, 0);
+ /* pCol->szEst = ... // Column size est for SELECT tables never used */
+ pCol->affinity = sqlite3ExprAffinity(p);
+ if( zType ){
+ m = sqlite3Strlen30(zType);
+ n = sqlite3Strlen30(pCol->zName);
+ pCol->zName = sqlite3DbReallocOrFree(db, pCol->zName, n+m+2);
+ if( pCol->zName ){
+ memcpy(&pCol->zName[n+1], zType, m+1);
+ pCol->colFlags |= COLFLAG_HASTYPE;
+ }
+ }
+ if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff;
+ pColl = sqlite3ExprCollSeq(pParse, p);
+ if( pColl && pCol->zColl==0 ){
+ pCol->zColl = sqlite3DbStrDup(db, pColl->zName);
+ }
+ }
+ pTab->szTabRow = 1; /* Any non-zero value works */
+}
+
+/*
+** Given a SELECT statement, generate a Table structure that describes
+** the result set of that SELECT.
+*/
+SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect, char aff){
+ Table *pTab;
+ sqlite3 *db = pParse->db;
+ u64 savedFlags;
+
+ savedFlags = db->flags;
+ db->flags &= ~(u64)SQLITE_FullColNames;
+ db->flags |= SQLITE_ShortColNames;
+ sqlite3SelectPrep(pParse, pSelect, 0);
+ db->flags = savedFlags;
+ if( pParse->nErr ) return 0;
+ while( pSelect->pPrior ) pSelect = pSelect->pPrior;
+ pTab = sqlite3DbMallocZero(db, sizeof(Table) );
+ if( pTab==0 ){
+ return 0;
+ }
+ pTab->nTabRef = 1;
+ pTab->zName = 0;
+ pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
+ sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect, aff);
+ pTab->iPKey = -1;
+ if( db->mallocFailed ){
+ sqlite3DeleteTable(db, pTab);
+ return 0;
+ }
+ return pTab;
+}
+
+/*
+** Get a VDBE for the given parser context. Create a new one if necessary.
+** If an error occurs, return NULL and leave a message in pParse.
+*/
+SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){
+ if( pParse->pVdbe ){
+ return pParse->pVdbe;
+ }
+ if( pParse->pToplevel==0
+ && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst)
+ ){
+ pParse->okConstFactor = 1;
+ }
+ return sqlite3VdbeCreate(pParse);
+}
+
+
+/*
+** Compute the iLimit and iOffset fields of the SELECT based on the
+** pLimit expressions. pLimit->pLeft and pLimit->pRight hold the expressions
+** that appear in the original SQL statement after the LIMIT and OFFSET
+** keywords. Or NULL if those keywords are omitted. iLimit and iOffset
+** are the integer memory register numbers for counters used to compute
+** the limit and offset. If there is no limit and/or offset, then
+** iLimit and iOffset are negative.
+**
+** This routine changes the values of iLimit and iOffset only if
+** a limit or offset is defined by pLimit->pLeft and pLimit->pRight. iLimit
+** and iOffset should have been preset to appropriate default values (zero)
+** prior to calling this routine.
+**
+** The iOffset register (if it exists) is initialized to the value
+** of the OFFSET. The iLimit register is initialized to LIMIT. Register
+** iOffset+1 is initialized to LIMIT+OFFSET.
+**
+** Only if pLimit->pLeft!=0 do the limit registers get
+** redefined. The UNION ALL operator uses this property to force
+** the reuse of the same limit and offset registers across multiple
+** SELECT statements.
+*/
+static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){
+ Vdbe *v = 0;
+ int iLimit = 0;
+ int iOffset;
+ int n;
+ Expr *pLimit = p->pLimit;
+
+ if( p->iLimit ) return;
+
+ /*
+ ** "LIMIT -1" always shows all rows. There is some
+ ** controversy about what the correct behavior should be.
+ ** The current implementation interprets "LIMIT 0" to mean
+ ** no rows.
+ */
+ if( pLimit ){
+ assert( pLimit->op==TK_LIMIT );
+ assert( pLimit->pLeft!=0 );
+ p->iLimit = iLimit = ++pParse->nMem;
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 );
+ if( sqlite3ExprIsInteger(pLimit->pLeft, &n) ){
+ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit);
+ VdbeComment((v, "LIMIT counter"));
+ if( n==0 ){
+ sqlite3VdbeGoto(v, iBreak);
+ }else if( n>=0 && p->nSelectRow>sqlite3LogEst((u64)n) ){
+ p->nSelectRow = sqlite3LogEst((u64)n);
+ p->selFlags |= SF_FixedLimit;
+ }
+ }else{
+ sqlite3ExprCode(pParse, pLimit->pLeft, iLimit);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v);
+ VdbeComment((v, "LIMIT counter"));
+ sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, iBreak); VdbeCoverage(v);
+ }
+ if( pLimit->pRight ){
+ p->iOffset = iOffset = ++pParse->nMem;
+ pParse->nMem++; /* Allocate an extra register for limit+offset */
+ sqlite3ExprCode(pParse, pLimit->pRight, iOffset);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v);
+ VdbeComment((v, "OFFSET counter"));
+ sqlite3VdbeAddOp3(v, OP_OffsetLimit, iLimit, iOffset+1, iOffset);
+ VdbeComment((v, "LIMIT+OFFSET"));
+ }
+ }
+}
+
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
+/*
+** Return the appropriate collating sequence for the iCol-th column of
+** the result set for the compound-select statement "p". Return NULL if
+** the column has no default collating sequence.
+**
+** The collating sequence for the compound select is taken from the
+** left-most term of the select that has a collating sequence.
+*/
+static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){
+ CollSeq *pRet;
+ if( p->pPrior ){
+ pRet = multiSelectCollSeq(pParse, p->pPrior, iCol);
+ }else{
+ pRet = 0;
+ }
+ assert( iCol>=0 );
+ /* iCol must be less than p->pEList->nExpr. Otherwise an error would
+ ** have been thrown during name resolution and we would not have gotten
+ ** this far */
+ if( pRet==0 && ALWAYS(iCol<p->pEList->nExpr) ){
+ pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr);
+ }
+ return pRet;
+}
+
+/*
+** The select statement passed as the second parameter is a compound SELECT
+** with an ORDER BY clause. This function allocates and returns a KeyInfo
+** structure suitable for implementing the ORDER BY.
+**
+** Space to hold the KeyInfo structure is obtained from malloc. The calling
+** function is responsible for ensuring that this structure is eventually
+** freed.
+*/
+static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){
+ ExprList *pOrderBy = p->pOrderBy;
+ int nOrderBy = p->pOrderBy->nExpr;
+ sqlite3 *db = pParse->db;
+ KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1);
+ if( pRet ){
+ int i;
+ for(i=0; i<nOrderBy; i++){
+ struct ExprList_item *pItem = &pOrderBy->a[i];
+ Expr *pTerm = pItem->pExpr;
+ CollSeq *pColl;
+
+ if( pTerm->flags & EP_Collate ){
+ pColl = sqlite3ExprCollSeq(pParse, pTerm);
+ }else{
+ pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1);
+ if( pColl==0 ) pColl = db->pDfltColl;
+ pOrderBy->a[i].pExpr =
+ sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName);
+ }
+ assert( sqlite3KeyInfoIsWriteable(pRet) );
+ pRet->aColl[i] = pColl;
+ pRet->aSortFlags[i] = pOrderBy->a[i].sortFlags;
+ }
+ }
+
+ return pRet;
+}
+
+#ifndef SQLITE_OMIT_CTE
+/*
+** This routine generates VDBE code to compute the content of a WITH RECURSIVE
+** query of the form:
+**
+** <recursive-table> AS (<setup-query> UNION [ALL] <recursive-query>)
+** \___________/ \_______________/
+** p->pPrior p
+**
+**
+** There is exactly one reference to the recursive-table in the FROM clause
+** of recursive-query, marked with the SrcList->a[].fg.isRecursive flag.
+**
+** The setup-query runs once to generate an initial set of rows that go
+** into a Queue table. Rows are extracted from the Queue table one by
+** one. Each row extracted from Queue is output to pDest. Then the single
+** extracted row (now in the iCurrent table) becomes the content of the
+** recursive-table for a recursive-query run. The output of the recursive-query
+** is added back into the Queue table. Then another row is extracted from Queue
+** and the iteration continues until the Queue table is empty.
+**
+** If the compound query operator is UNION then no duplicate rows are ever
+** inserted into the Queue table. The iDistinct table keeps a copy of all rows
+** that have ever been inserted into Queue and causes duplicates to be
+** discarded. If the operator is UNION ALL, then duplicates are allowed.
+**
+** If the query has an ORDER BY, then entries in the Queue table are kept in
+** ORDER BY order and the first entry is extracted for each cycle. Without
+** an ORDER BY, the Queue table is just a FIFO.
+**
+** If a LIMIT clause is provided, then the iteration stops after LIMIT rows
+** have been output to pDest. A LIMIT of zero means to output no rows and a
+** negative LIMIT means to output all rows. If there is also an OFFSET clause
+** with a positive value, then the first OFFSET outputs are discarded rather
+** than being sent to pDest. The LIMIT count does not begin until after OFFSET
+** rows have been skipped.
+*/
+static void generateWithRecursiveQuery(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The recursive SELECT to be coded */
+ SelectDest *pDest /* What to do with query results */
+){
+ SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
+ int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
+ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
+ Select *pSetup = p->pPrior; /* The setup query */
+ int addrTop; /* Top of the loop */
+ int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
+ int iCurrent = 0; /* The Current table */
+ int regCurrent; /* Register holding Current table */
+ int iQueue; /* The Queue table */
+ int iDistinct = 0; /* To ensure unique results if UNION */
+ int eDest = SRT_Fifo; /* How to write to Queue */
+ SelectDest destQueue; /* SelectDest targetting the Queue table */
+ int i; /* Loop counter */
+ int rc; /* Result code */
+ ExprList *pOrderBy; /* The ORDER BY clause */
+ Expr *pLimit; /* Saved LIMIT and OFFSET */
+ int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWin ){
+ sqlite3ErrorMsg(pParse, "cannot use window functions in recursive queries");
+ return;
+ }
+#endif
+
+ /* Obtain authorization to do a recursive query */
+ if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return;
+
+ /* Process the LIMIT and OFFSET clauses, if they exist */
+ addrBreak = sqlite3VdbeMakeLabel(pParse);
+ p->nSelectRow = 320; /* 4 billion rows */
+ computeLimitRegisters(pParse, p, addrBreak);
+ pLimit = p->pLimit;
+ regLimit = p->iLimit;
+ regOffset = p->iOffset;
+ p->pLimit = 0;
+ p->iLimit = p->iOffset = 0;
+ pOrderBy = p->pOrderBy;
+
+ /* Locate the cursor number of the Current table */
+ for(i=0; ALWAYS(i<pSrc->nSrc); i++){
+ if( pSrc->a[i].fg.isRecursive ){
+ iCurrent = pSrc->a[i].iCursor;
+ break;
+ }
+ }
+
+ /* Allocate cursors numbers for Queue and Distinct. The cursor number for
+ ** the Distinct table must be exactly one greater than Queue in order
+ ** for the SRT_DistFifo and SRT_DistQueue destinations to work. */
+ iQueue = pParse->nTab++;
+ if( p->op==TK_UNION ){
+ eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo;
+ iDistinct = pParse->nTab++;
+ }else{
+ eDest = pOrderBy ? SRT_Queue : SRT_Fifo;
+ }
+ sqlite3SelectDestInit(&destQueue, eDest, iQueue);
+
+ /* Allocate cursors for Current, Queue, and Distinct. */
+ regCurrent = ++pParse->nMem;
+ sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol);
+ if( pOrderBy ){
+ KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1);
+ sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0,
+ (char*)pKeyInfo, P4_KEYINFO);
+ destQueue.pOrderBy = pOrderBy;
+ }else{
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol);
+ }
+ VdbeComment((v, "Queue table"));
+ if( iDistinct ){
+ p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0);
+ p->selFlags |= SF_UsesEphemeral;
+ }
+
+ /* Detach the ORDER BY clause from the compound SELECT */
+ p->pOrderBy = 0;
+
+ /* Store the results of the setup-query in Queue. */
+ pSetup->pNext = 0;
+ ExplainQueryPlan((pParse, 1, "SETUP"));
+ rc = sqlite3Select(pParse, pSetup, &destQueue);
+ pSetup->pNext = p;
+ if( rc ) goto end_of_recursive_query;
+
+ /* Find the next row in the Queue and output that row */
+ addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v);
+
+ /* Transfer the next row in Queue over to Current */
+ sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */
+ if( pOrderBy ){
+ sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent);
+ }
+ sqlite3VdbeAddOp1(v, OP_Delete, iQueue);
+
+ /* Output the single row in Current */
+ addrCont = sqlite3VdbeMakeLabel(pParse);
+ codeOffset(v, regOffset, addrCont);
+ selectInnerLoop(pParse, p, iCurrent,
+ 0, 0, pDest, addrCont, addrBreak);
+ if( regLimit ){
+ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, regLimit, addrBreak);
+ VdbeCoverage(v);
+ }
+ sqlite3VdbeResolveLabel(v, addrCont);
+
+ /* Execute the recursive SELECT taking the single row in Current as
+ ** the value for the recursive-table. Store the results in the Queue.
+ */
+ if( p->selFlags & SF_Aggregate ){
+ sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
+ }else{
+ p->pPrior = 0;
+ ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
+ sqlite3Select(pParse, p, &destQueue);
+ assert( p->pPrior==0 );
+ p->pPrior = pSetup;
+ }
+
+ /* Keep running the loop until the Queue is empty */
+ sqlite3VdbeGoto(v, addrTop);
+ sqlite3VdbeResolveLabel(v, addrBreak);
+
+end_of_recursive_query:
+ sqlite3ExprListDelete(pParse->db, p->pOrderBy);
+ p->pOrderBy = pOrderBy;
+ p->pLimit = pLimit;
+ return;
+}
+#endif /* SQLITE_OMIT_CTE */
+
+/* Forward references */
+static int multiSelectOrderBy(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The right-most of SELECTs to be coded */
+ SelectDest *pDest /* What to do with query results */
+);
+
+/*
+** Handle the special case of a compound-select that originates from a
+** VALUES clause. By handling this as a special case, we avoid deep
+** recursion, and thus do not need to enforce the SQLITE_LIMIT_COMPOUND_SELECT
+** on a VALUES clause.
+**
+** Because the Select object originates from a VALUES clause:
+** (1) There is no LIMIT or OFFSET or else there is a LIMIT of exactly 1
+** (2) All terms are UNION ALL
+** (3) There is no ORDER BY clause
+**
+** The "LIMIT of exactly 1" case of condition (1) comes about when a VALUES
+** clause occurs within scalar expression (ex: "SELECT (VALUES(1),(2),(3))").
+** The sqlite3CodeSubselect will have added the LIMIT 1 clause in tht case.
+** Since the limit is exactly 1, we only need to evalutes the left-most VALUES.
+*/
+static int multiSelectValues(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The right-most of SELECTs to be coded */
+ SelectDest *pDest /* What to do with query results */
+){
+ int nRow = 1;
+ int rc = 0;
+ int bShowAll = p->pLimit==0;
+ assert( p->selFlags & SF_MultiValue );
+ do{
+ assert( p->selFlags & SF_Values );
+ assert( p->op==TK_ALL || (p->op==TK_SELECT && p->pPrior==0) );
+ assert( p->pNext==0 || p->pEList->nExpr==p->pNext->pEList->nExpr );
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWin ) return -1;
+#endif
+ if( p->pPrior==0 ) break;
+ assert( p->pPrior->pNext==p );
+ p = p->pPrior;
+ nRow += bShowAll;
+ }while(1);
+ ExplainQueryPlan((pParse, 0, "SCAN %d CONSTANT ROW%s", nRow,
+ nRow==1 ? "" : "S"));
+ while( p ){
+ selectInnerLoop(pParse, p, -1, 0, 0, pDest, 1, 1);
+ if( !bShowAll ) break;
+ p->nSelectRow = nRow;
+ p = p->pNext;
+ }
+ return rc;
+}
+
+/*
+** This routine is called to process a compound query form from
+** two or more separate queries using UNION, UNION ALL, EXCEPT, or
+** INTERSECT
+**
+** "p" points to the right-most of the two queries. the query on the
+** left is p->pPrior. The left query could also be a compound query
+** in which case this routine will be called recursively.
+**
+** The results of the total query are to be written into a destination
+** of type eDest with parameter iParm.
+**
+** Example 1: Consider a three-way compound SQL statement.
+**
+** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3
+**
+** This statement is parsed up as follows:
+**
+** SELECT c FROM t3
+** |
+** `-----> SELECT b FROM t2
+** |
+** `------> SELECT a FROM t1
+**
+** The arrows in the diagram above represent the Select.pPrior pointer.
+** So if this routine is called with p equal to the t3 query, then
+** pPrior will be the t2 query. p->op will be TK_UNION in this case.
+**
+** Notice that because of the way SQLite parses compound SELECTs, the
+** individual selects always group from left to right.
+*/
+static int multiSelect(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The right-most of SELECTs to be coded */
+ SelectDest *pDest /* What to do with query results */
+){
+ int rc = SQLITE_OK; /* Success code from a subroutine */
+ Select *pPrior; /* Another SELECT immediately to our left */
+ Vdbe *v; /* Generate code to this VDBE */
+ SelectDest dest; /* Alternative data destination */
+ Select *pDelete = 0; /* Chain of simple selects to delete */
+ sqlite3 *db; /* Database connection */
+
+ /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only
+ ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT.
+ */
+ assert( p && p->pPrior ); /* Calling function guarantees this much */
+ assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION );
+ assert( p->selFlags & SF_Compound );
+ db = pParse->db;
+ pPrior = p->pPrior;
+ dest = *pDest;
+ if( pPrior->pOrderBy || pPrior->pLimit ){
+ sqlite3ErrorMsg(pParse,"%s clause should come after %s not before",
+ pPrior->pOrderBy!=0 ? "ORDER BY" : "LIMIT", selectOpName(p->op));
+ rc = 1;
+ goto multi_select_end;
+ }
+
+ v = sqlite3GetVdbe(pParse);
+ assert( v!=0 ); /* The VDBE already created by calling function */
+
+ /* Create the destination temporary table if necessary
+ */
+ if( dest.eDest==SRT_EphemTab ){
+ assert( p->pEList );
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr);
+ dest.eDest = SRT_Table;
+ }
+
+ /* Special handling for a compound-select that originates as a VALUES clause.
+ */
+ if( p->selFlags & SF_MultiValue ){
+ rc = multiSelectValues(pParse, p, &dest);
+ if( rc>=0 ) goto multi_select_end;
+ rc = SQLITE_OK;
+ }
+
+ /* Make sure all SELECTs in the statement have the same number of elements
+ ** in their result sets.
+ */
+ assert( p->pEList && pPrior->pEList );
+ assert( p->pEList->nExpr==pPrior->pEList->nExpr );
+
+#ifndef SQLITE_OMIT_CTE
+ if( p->selFlags & SF_Recursive ){
+ generateWithRecursiveQuery(pParse, p, &dest);
+ }else
+#endif
+
+ /* Compound SELECTs that have an ORDER BY clause are handled separately.
+ */
+ if( p->pOrderBy ){
+ return multiSelectOrderBy(pParse, p, pDest);
+ }else{
+
+#ifndef SQLITE_OMIT_EXPLAIN
+ if( pPrior->pPrior==0 ){
+ ExplainQueryPlan((pParse, 1, "COMPOUND QUERY"));
+ ExplainQueryPlan((pParse, 1, "LEFT-MOST SUBQUERY"));
+ }
+#endif
+
+ /* Generate code for the left and right SELECT statements.
+ */
+ switch( p->op ){
+ case TK_ALL: {
+ int addr = 0;
+ int nLimit;
+ assert( !pPrior->pLimit );
+ pPrior->iLimit = p->iLimit;
+ pPrior->iOffset = p->iOffset;
+ pPrior->pLimit = p->pLimit;
+ rc = sqlite3Select(pParse, pPrior, &dest);
+ p->pLimit = 0;
+ if( rc ){
+ goto multi_select_end;
+ }
+ p->pPrior = 0;
+ p->iLimit = pPrior->iLimit;
+ p->iOffset = pPrior->iOffset;
+ if( p->iLimit ){
+ addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v);
+ VdbeComment((v, "Jump ahead if LIMIT reached"));
+ if( p->iOffset ){
+ sqlite3VdbeAddOp3(v, OP_OffsetLimit,
+ p->iLimit, p->iOffset+1, p->iOffset);
+ }
+ }
+ ExplainQueryPlan((pParse, 1, "UNION ALL"));
+ rc = sqlite3Select(pParse, p, &dest);
+ testcase( rc!=SQLITE_OK );
+ pDelete = p->pPrior;
+ p->pPrior = pPrior;
+ p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
+ if( pPrior->pLimit
+ && sqlite3ExprIsInteger(pPrior->pLimit->pLeft, &nLimit)
+ && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit)
+ ){
+ p->nSelectRow = sqlite3LogEst((u64)nLimit);
+ }
+ if( addr ){
+ sqlite3VdbeJumpHere(v, addr);
+ }
+ break;
+ }
+ case TK_EXCEPT:
+ case TK_UNION: {
+ int unionTab; /* Cursor number of the temp table holding result */
+ u8 op = 0; /* One of the SRT_ operations to apply to self */
+ int priorOp; /* The SRT_ operation to apply to prior selects */
+ Expr *pLimit; /* Saved values of p->nLimit */
+ int addr;
+ SelectDest uniondest;
+
+ testcase( p->op==TK_EXCEPT );
+ testcase( p->op==TK_UNION );
+ priorOp = SRT_Union;
+ if( dest.eDest==priorOp ){
+ /* We can reuse a temporary table generated by a SELECT to our
+ ** right.
+ */
+ assert( p->pLimit==0 ); /* Not allowed on leftward elements */
+ unionTab = dest.iSDParm;
+ }else{
+ /* We will need to create our own temporary table to hold the
+ ** intermediate results.
+ */
+ unionTab = pParse->nTab++;
+ assert( p->pOrderBy==0 );
+ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0);
+ assert( p->addrOpenEphm[0] == -1 );
+ p->addrOpenEphm[0] = addr;
+ findRightmost(p)->selFlags |= SF_UsesEphemeral;
+ assert( p->pEList );
+ }
+
+ /* Code the SELECT statements to our left
+ */
+ assert( !pPrior->pOrderBy );
+ sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
+ rc = sqlite3Select(pParse, pPrior, &uniondest);
+ if( rc ){
+ goto multi_select_end;
+ }
+
+ /* Code the current SELECT statement
+ */
+ if( p->op==TK_EXCEPT ){
+ op = SRT_Except;
+ }else{
+ assert( p->op==TK_UNION );
+ op = SRT_Union;
+ }
+ p->pPrior = 0;
+ pLimit = p->pLimit;
+ p->pLimit = 0;
+ uniondest.eDest = op;
+ ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
+ selectOpName(p->op)));
+ rc = sqlite3Select(pParse, p, &uniondest);
+ testcase( rc!=SQLITE_OK );
+ assert( p->pOrderBy==0 );
+ pDelete = p->pPrior;
+ p->pPrior = pPrior;
+ p->pOrderBy = 0;
+ if( p->op==TK_UNION ){
+ p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
+ }
+ sqlite3ExprDelete(db, p->pLimit);
+ p->pLimit = pLimit;
+ p->iLimit = 0;
+ p->iOffset = 0;
+
+ /* Convert the data in the temporary table into whatever form
+ ** it is that we currently need.
+ */
+ assert( unionTab==dest.iSDParm || dest.eDest!=priorOp );
+ assert( p->pEList || db->mallocFailed );
+ if( dest.eDest!=priorOp && db->mallocFailed==0 ){
+ int iCont, iBreak, iStart;
+ iBreak = sqlite3VdbeMakeLabel(pParse);
+ iCont = sqlite3VdbeMakeLabel(pParse);
+ computeLimitRegisters(pParse, p, iBreak);
+ sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v);
+ iStart = sqlite3VdbeCurrentAddr(v);
+ selectInnerLoop(pParse, p, unionTab,
+ 0, 0, &dest, iCont, iBreak);
+ sqlite3VdbeResolveLabel(v, iCont);
+ sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v);
+ sqlite3VdbeResolveLabel(v, iBreak);
+ sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0);
+ }
+ break;
+ }
+ default: assert( p->op==TK_INTERSECT ); {
+ int tab1, tab2;
+ int iCont, iBreak, iStart;
+ Expr *pLimit;
+ int addr;
+ SelectDest intersectdest;
+ int r1;
+
+ /* INTERSECT is different from the others since it requires
+ ** two temporary tables. Hence it has its own case. Begin
+ ** by allocating the tables we will need.
+ */
+ tab1 = pParse->nTab++;
+ tab2 = pParse->nTab++;
+ assert( p->pOrderBy==0 );
+
+ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0);
+ assert( p->addrOpenEphm[0] == -1 );
+ p->addrOpenEphm[0] = addr;
+ findRightmost(p)->selFlags |= SF_UsesEphemeral;
+ assert( p->pEList );
+
+ /* Code the SELECTs to our left into temporary table "tab1".
+ */
+ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1);
+ rc = sqlite3Select(pParse, pPrior, &intersectdest);
+ if( rc ){
+ goto multi_select_end;
+ }
+
+ /* Code the current SELECT into temporary table "tab2"
+ */
+ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0);
+ assert( p->addrOpenEphm[1] == -1 );
+ p->addrOpenEphm[1] = addr;
+ p->pPrior = 0;
+ pLimit = p->pLimit;
+ p->pLimit = 0;
+ intersectdest.iSDParm = tab2;
+ ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
+ selectOpName(p->op)));
+ rc = sqlite3Select(pParse, p, &intersectdest);
+ testcase( rc!=SQLITE_OK );
+ pDelete = p->pPrior;
+ p->pPrior = pPrior;
+ if( p->nSelectRow>pPrior->nSelectRow ){
+ p->nSelectRow = pPrior->nSelectRow;
+ }
+ sqlite3ExprDelete(db, p->pLimit);
+ p->pLimit = pLimit;
+
+ /* Generate code to take the intersection of the two temporary
+ ** tables.
+ */
+ if( rc ) break;
+ assert( p->pEList );
+ iBreak = sqlite3VdbeMakeLabel(pParse);
+ iCont = sqlite3VdbeMakeLabel(pParse);
+ computeLimitRegisters(pParse, p, iBreak);
+ sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v);
+ r1 = sqlite3GetTempReg(pParse);
+ iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1);
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, r1);
+ selectInnerLoop(pParse, p, tab1,
+ 0, 0, &dest, iCont, iBreak);
+ sqlite3VdbeResolveLabel(v, iCont);
+ sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v);
+ sqlite3VdbeResolveLabel(v, iBreak);
+ sqlite3VdbeAddOp2(v, OP_Close, tab2, 0);
+ sqlite3VdbeAddOp2(v, OP_Close, tab1, 0);
+ break;
+ }
+ }
+
+ #ifndef SQLITE_OMIT_EXPLAIN
+ if( p->pNext==0 ){
+ ExplainQueryPlanPop(pParse);
+ }
+ #endif
+ }
+ if( pParse->nErr ) goto multi_select_end;
+
+ /* Compute collating sequences used by
+ ** temporary tables needed to implement the compound select.
+ ** Attach the KeyInfo structure to all temporary tables.
+ **
+ ** This section is run by the right-most SELECT statement only.
+ ** SELECT statements to the left always skip this part. The right-most
+ ** SELECT might also skip this part if it has no ORDER BY clause and
+ ** no temp tables are required.
+ */
+ if( p->selFlags & SF_UsesEphemeral ){
+ int i; /* Loop counter */
+ KeyInfo *pKeyInfo; /* Collating sequence for the result set */
+ Select *pLoop; /* For looping through SELECT statements */
+ CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */
+ int nCol; /* Number of columns in result set */
+
+ assert( p->pNext==0 );
+ nCol = p->pEList->nExpr;
+ pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1);
+ if( !pKeyInfo ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto multi_select_end;
+ }
+ for(i=0, apColl=pKeyInfo->aColl; i<nCol; i++, apColl++){
+ *apColl = multiSelectCollSeq(pParse, p, i);
+ if( 0==*apColl ){
+ *apColl = db->pDfltColl;
+ }
+ }
+
+ for(pLoop=p; pLoop; pLoop=pLoop->pPrior){
+ for(i=0; i<2; i++){
+ int addr = pLoop->addrOpenEphm[i];
+ if( addr<0 ){
+ /* If [0] is unused then [1] is also unused. So we can
+ ** always safely abort as soon as the first unused slot is found */
+ assert( pLoop->addrOpenEphm[1]<0 );
+ break;
+ }
+ sqlite3VdbeChangeP2(v, addr, nCol);
+ sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo),
+ P4_KEYINFO);
+ pLoop->addrOpenEphm[i] = -1;
+ }
+ }
+ sqlite3KeyInfoUnref(pKeyInfo);
+ }
+
+multi_select_end:
+ pDest->iSdst = dest.iSdst;
+ pDest->nSdst = dest.nSdst;
+ sqlite3SelectDelete(db, pDelete);
+ return rc;
+}
+#endif /* SQLITE_OMIT_COMPOUND_SELECT */
+
+/*
+** Error message for when two or more terms of a compound select have different
+** size result sets.
+*/
+SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){
+ if( p->selFlags & SF_Values ){
+ sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms");
+ }else{
+ sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s"
+ " do not have the same number of result columns", selectOpName(p->op));
+ }
+}
+
+/*
+** Code an output subroutine for a coroutine implementation of a
+** SELECT statment.
+**
+** The data to be output is contained in pIn->iSdst. There are
+** pIn->nSdst columns to be output. pDest is where the output should
+** be sent.
+**
+** regReturn is the number of the register holding the subroutine
+** return address.
+**
+** If regPrev>0 then it is the first register in a vector that
+** records the previous output. mem[regPrev] is a flag that is false
+** if there has been no previous output. If regPrev>0 then code is
+** generated to suppress duplicates. pKeyInfo is used for comparing
+** keys.
+**
+** If the LIMIT found in p->iLimit is reached, jump immediately to
+** iBreak.
+*/
+static int generateOutputSubroutine(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The SELECT statement */
+ SelectDest *pIn, /* Coroutine supplying data */
+ SelectDest *pDest, /* Where to send the data */
+ int regReturn, /* The return address register */
+ int regPrev, /* Previous result register. No uniqueness if 0 */
+ KeyInfo *pKeyInfo, /* For comparing with previous entry */
+ int iBreak /* Jump here if we hit the LIMIT */
+){
+ Vdbe *v = pParse->pVdbe;
+ int iContinue;
+ int addr;
+
+ addr = sqlite3VdbeCurrentAddr(v);
+ iContinue = sqlite3VdbeMakeLabel(pParse);
+
+ /* Suppress duplicates for UNION, EXCEPT, and INTERSECT
+ */
+ if( regPrev ){
+ int addr1, addr2;
+ addr1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v);
+ addr2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst,
+ (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
+ sqlite3VdbeAddOp3(v, OP_Jump, addr2+2, iContinue, addr2+2); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev);
+ }
+ if( pParse->db->mallocFailed ) return 0;
+
+ /* Suppress the first OFFSET entries if there is an OFFSET clause
+ */
+ codeOffset(v, p->iOffset, iContinue);
+
+ assert( pDest->eDest!=SRT_Exists );
+ assert( pDest->eDest!=SRT_Table );
+ switch( pDest->eDest ){
+ /* Store the result as data using a unique key.
+ */
+ case SRT_EphemTab: {
+ int r1 = sqlite3GetTempReg(pParse);
+ int r2 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1);
+ sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2);
+ sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2);
+ sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
+ sqlite3ReleaseTempReg(pParse, r2);
+ sqlite3ReleaseTempReg(pParse, r1);
+ break;
+ }
+
+#ifndef SQLITE_OMIT_SUBQUERY
+ /* If we are creating a set for an "expr IN (SELECT ...)".
+ */
+ case SRT_Set: {
+ int r1;
+ testcase( pIn->nSdst>1 );
+ r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst,
+ r1, pDest->zAffSdst, pIn->nSdst);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1,
+ pIn->iSdst, pIn->nSdst);
+ sqlite3ReleaseTempReg(pParse, r1);
+ break;
+ }
+
+ /* If this is a scalar select that is part of an expression, then
+ ** store the results in the appropriate memory cell and break out
+ ** of the scan loop. Note that the select might return multiple columns
+ ** if it is the RHS of a row-value IN operator.
+ */
+ case SRT_Mem: {
+ if( pParse->nErr==0 ){
+ testcase( pIn->nSdst>1 );
+ sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst);
+ }
+ /* The LIMIT clause will jump out of the loop for us */
+ break;
+ }
+#endif /* #ifndef SQLITE_OMIT_SUBQUERY */
+
+ /* The results are stored in a sequence of registers
+ ** starting at pDest->iSdst. Then the co-routine yields.
+ */
+ case SRT_Coroutine: {
+ if( pDest->iSdst==0 ){
+ pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst);
+ pDest->nSdst = pIn->nSdst;
+ }
+ sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pIn->nSdst);
+ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
+ break;
+ }
+
+ /* If none of the above, then the result destination must be
+ ** SRT_Output. This routine is never called with any other
+ ** destination other than the ones handled above or SRT_Output.
+ **
+ ** For SRT_Output, results are stored in a sequence of registers.
+ ** Then the OP_ResultRow opcode is used to cause sqlite3_step() to
+ ** return the next row of result.
+ */
+ default: {
+ assert( pDest->eDest==SRT_Output );
+ sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst);
+ break;
+ }
+ }
+
+ /* Jump to the end of the loop if the LIMIT is reached.
+ */
+ if( p->iLimit ){
+ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
+ }
+
+ /* Generate the subroutine return
+ */
+ sqlite3VdbeResolveLabel(v, iContinue);
+ sqlite3VdbeAddOp1(v, OP_Return, regReturn);
+
+ return addr;
+}
+
+/*
+** Alternative compound select code generator for cases when there
+** is an ORDER BY clause.
+**
+** We assume a query of the following form:
+**
+** <selectA> <operator> <selectB> ORDER BY <orderbylist>
+**
+** <operator> is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea
+** is to code both <selectA> and <selectB> with the ORDER BY clause as
+** co-routines. Then run the co-routines in parallel and merge the results
+** into the output. In addition to the two coroutines (called selectA and
+** selectB) there are 7 subroutines:
+**
+** outA: Move the output of the selectA coroutine into the output
+** of the compound query.
+**
+** outB: Move the output of the selectB coroutine into the output
+** of the compound query. (Only generated for UNION and
+** UNION ALL. EXCEPT and INSERTSECT never output a row that
+** appears only in B.)
+**
+** AltB: Called when there is data from both coroutines and A<B.
+**
+** AeqB: Called when there is data from both coroutines and A==B.
+**
+** AgtB: Called when there is data from both coroutines and A>B.
+**
+** EofA: Called when data is exhausted from selectA.
+**
+** EofB: Called when data is exhausted from selectB.
+**
+** The implementation of the latter five subroutines depend on which
+** <operator> is used:
+**
+**
+** UNION ALL UNION EXCEPT INTERSECT
+** ------------- ----------------- -------------- -----------------
+** AltB: outA, nextA outA, nextA outA, nextA nextA
+**
+** AeqB: outA, nextA nextA nextA outA, nextA
+**
+** AgtB: outB, nextB outB, nextB nextB nextB
+**
+** EofA: outB, nextB outB, nextB halt halt
+**
+** EofB: outA, nextA outA, nextA outA, nextA halt
+**
+** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA
+** causes an immediate jump to EofA and an EOF on B following nextB causes
+** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or
+** following nextX causes a jump to the end of the select processing.
+**
+** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled
+** within the output subroutine. The regPrev register set holds the previously
+** output value. A comparison is made against this value and the output
+** is skipped if the next results would be the same as the previous.
+**
+** The implementation plan is to implement the two coroutines and seven
+** subroutines first, then put the control logic at the bottom. Like this:
+**
+** goto Init
+** coA: coroutine for left query (A)
+** coB: coroutine for right query (B)
+** outA: output one row of A
+** outB: output one row of B (UNION and UNION ALL only)
+** EofA: ...
+** EofB: ...
+** AltB: ...
+** AeqB: ...
+** AgtB: ...
+** Init: initialize coroutine registers
+** yield coA
+** if eof(A) goto EofA
+** yield coB
+** if eof(B) goto EofB
+** Cmpr: Compare A, B
+** Jump AltB, AeqB, AgtB
+** End: ...
+**
+** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not
+** actually called using Gosub and they do not Return. EofA and EofB loop
+** until all data is exhausted then jump to the "end" labe. AltB, AeqB,
+** and AgtB jump to either L2 or to one of EofA or EofB.
+*/
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
+static int multiSelectOrderBy(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The right-most of SELECTs to be coded */
+ SelectDest *pDest /* What to do with query results */
+){
+ int i, j; /* Loop counters */
+ Select *pPrior; /* Another SELECT immediately to our left */
+ Vdbe *v; /* Generate code to this VDBE */
+ SelectDest destA; /* Destination for coroutine A */
+ SelectDest destB; /* Destination for coroutine B */
+ int regAddrA; /* Address register for select-A coroutine */
+ int regAddrB; /* Address register for select-B coroutine */
+ int addrSelectA; /* Address of the select-A coroutine */
+ int addrSelectB; /* Address of the select-B coroutine */
+ int regOutA; /* Address register for the output-A subroutine */
+ int regOutB; /* Address register for the output-B subroutine */
+ int addrOutA; /* Address of the output-A subroutine */
+ int addrOutB = 0; /* Address of the output-B subroutine */
+ int addrEofA; /* Address of the select-A-exhausted subroutine */
+ int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */
+ int addrEofB; /* Address of the select-B-exhausted subroutine */
+ int addrAltB; /* Address of the A<B subroutine */
+ int addrAeqB; /* Address of the A==B subroutine */
+ int addrAgtB; /* Address of the A>B subroutine */
+ int regLimitA; /* Limit register for select-A */
+ int regLimitB; /* Limit register for select-A */
+ int regPrev; /* A range of registers to hold previous output */
+ int savedLimit; /* Saved value of p->iLimit */
+ int savedOffset; /* Saved value of p->iOffset */
+ int labelCmpr; /* Label for the start of the merge algorithm */
+ int labelEnd; /* Label for the end of the overall SELECT stmt */
+ int addr1; /* Jump instructions that get retargetted */
+ int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */
+ KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */
+ KeyInfo *pKeyMerge; /* Comparison information for merging rows */
+ sqlite3 *db; /* Database connection */
+ ExprList *pOrderBy; /* The ORDER BY clause */
+ int nOrderBy; /* Number of terms in the ORDER BY clause */
+ u32 *aPermute; /* Mapping from ORDER BY terms to result set columns */
+
+ assert( p->pOrderBy!=0 );
+ assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */
+ db = pParse->db;
+ v = pParse->pVdbe;
+ assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */
+ labelEnd = sqlite3VdbeMakeLabel(pParse);
+ labelCmpr = sqlite3VdbeMakeLabel(pParse);
+
+
+ /* Patch up the ORDER BY clause
+ */
+ op = p->op;
+ pPrior = p->pPrior;
+ assert( pPrior->pOrderBy==0 );
+ pOrderBy = p->pOrderBy;
+ assert( pOrderBy );
+ nOrderBy = pOrderBy->nExpr;
+
+ /* For operators other than UNION ALL we have to make sure that
+ ** the ORDER BY clause covers every term of the result set. Add
+ ** terms to the ORDER BY clause as necessary.
+ */
+ if( op!=TK_ALL ){
+ for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){
+ struct ExprList_item *pItem;
+ for(j=0, pItem=pOrderBy->a; j<nOrderBy; j++, pItem++){
+ assert( pItem->u.x.iOrderByCol>0 );
+ if( pItem->u.x.iOrderByCol==i ) break;
+ }
+ if( j==nOrderBy ){
+ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0);
+ if( pNew==0 ) return SQLITE_NOMEM_BKPT;
+ pNew->flags |= EP_IntValue;
+ pNew->u.iValue = i;
+ p->pOrderBy = pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew);
+ if( pOrderBy ) pOrderBy->a[nOrderBy++].u.x.iOrderByCol = (u16)i;
+ }
+ }
+ }
+
+ /* Compute the comparison permutation and keyinfo that is used with
+ ** the permutation used to determine if the next
+ ** row of results comes from selectA or selectB. Also add explicit
+ ** collations to the ORDER BY clause terms so that when the subqueries
+ ** to the right and the left are evaluated, they use the correct
+ ** collation.
+ */
+ aPermute = sqlite3DbMallocRawNN(db, sizeof(u32)*(nOrderBy + 1));
+ if( aPermute ){
+ struct ExprList_item *pItem;
+ aPermute[0] = nOrderBy;
+ for(i=1, pItem=pOrderBy->a; i<=nOrderBy; i++, pItem++){
+ assert( pItem->u.x.iOrderByCol>0 );
+ assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr );
+ aPermute[i] = pItem->u.x.iOrderByCol - 1;
+ }
+ pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1);
+ }else{
+ pKeyMerge = 0;
+ }
+
+ /* Reattach the ORDER BY clause to the query.
+ */
+ p->pOrderBy = pOrderBy;
+ pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0);
+
+ /* Allocate a range of temporary registers and the KeyInfo needed
+ ** for the logic that removes duplicate result rows when the
+ ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL).
+ */
+ if( op==TK_ALL ){
+ regPrev = 0;
+ }else{
+ int nExpr = p->pEList->nExpr;
+ assert( nOrderBy>=nExpr || db->mallocFailed );
+ regPrev = pParse->nMem+1;
+ pParse->nMem += nExpr+1;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev);
+ pKeyDup = sqlite3KeyInfoAlloc(db, nExpr, 1);
+ if( pKeyDup ){
+ assert( sqlite3KeyInfoIsWriteable(pKeyDup) );
+ for(i=0; i<nExpr; i++){
+ pKeyDup->aColl[i] = multiSelectCollSeq(pParse, p, i);
+ pKeyDup->aSortFlags[i] = 0;
+ }
+ }
+ }
+
+ /* Separate the left and the right query from one another
+ */
+ p->pPrior = 0;
+ pPrior->pNext = 0;
+ sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER");
+ if( pPrior->pPrior==0 ){
+ sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER");
+ }
+
+ /* Compute the limit registers */
+ computeLimitRegisters(pParse, p, labelEnd);
+ if( p->iLimit && op==TK_ALL ){
+ regLimitA = ++pParse->nMem;
+ regLimitB = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit,
+ regLimitA);
+ sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB);
+ }else{
+ regLimitA = regLimitB = 0;
+ }
+ sqlite3ExprDelete(db, p->pLimit);
+ p->pLimit = 0;
+
+ regAddrA = ++pParse->nMem;
+ regAddrB = ++pParse->nMem;
+ regOutA = ++pParse->nMem;
+ regOutB = ++pParse->nMem;
+ sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA);
+ sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB);
+
+ ExplainQueryPlan((pParse, 1, "MERGE (%s)", selectOpName(p->op)));
+
+ /* Generate a coroutine to evaluate the SELECT statement to the
+ ** left of the compound operator - the "A" select.
+ */
+ addrSelectA = sqlite3VdbeCurrentAddr(v) + 1;
+ addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA);
+ VdbeComment((v, "left SELECT"));
+ pPrior->iLimit = regLimitA;
+ ExplainQueryPlan((pParse, 1, "LEFT"));
+ sqlite3Select(pParse, pPrior, &destA);
+ sqlite3VdbeEndCoroutine(v, regAddrA);
+ sqlite3VdbeJumpHere(v, addr1);
+
+ /* Generate a coroutine to evaluate the SELECT statement on
+ ** the right - the "B" select
+ */
+ addrSelectB = sqlite3VdbeCurrentAddr(v) + 1;
+ addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB);
+ VdbeComment((v, "right SELECT"));
+ savedLimit = p->iLimit;
+ savedOffset = p->iOffset;
+ p->iLimit = regLimitB;
+ p->iOffset = 0;
+ ExplainQueryPlan((pParse, 1, "RIGHT"));
+ sqlite3Select(pParse, p, &destB);
+ p->iLimit = savedLimit;
+ p->iOffset = savedOffset;
+ sqlite3VdbeEndCoroutine(v, regAddrB);
+
+ /* Generate a subroutine that outputs the current row of the A
+ ** select as the next output row of the compound select.
+ */
+ VdbeNoopComment((v, "Output routine for A"));
+ addrOutA = generateOutputSubroutine(pParse,
+ p, &destA, pDest, regOutA,
+ regPrev, pKeyDup, labelEnd);
+
+ /* Generate a subroutine that outputs the current row of the B
+ ** select as the next output row of the compound select.
+ */
+ if( op==TK_ALL || op==TK_UNION ){
+ VdbeNoopComment((v, "Output routine for B"));
+ addrOutB = generateOutputSubroutine(pParse,
+ p, &destB, pDest, regOutB,
+ regPrev, pKeyDup, labelEnd);
+ }
+ sqlite3KeyInfoUnref(pKeyDup);
+
+ /* Generate a subroutine to run when the results from select A
+ ** are exhausted and only data in select B remains.
+ */
+ if( op==TK_EXCEPT || op==TK_INTERSECT ){
+ addrEofA_noB = addrEofA = labelEnd;
+ }else{
+ VdbeNoopComment((v, "eof-A subroutine"));
+ addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
+ addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd);
+ VdbeCoverage(v);
+ sqlite3VdbeGoto(v, addrEofA);
+ p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
+ }
+
+ /* Generate a subroutine to run when the results from select B
+ ** are exhausted and only data in select A remains.
+ */
+ if( op==TK_INTERSECT ){
+ addrEofB = addrEofA;
+ if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow;
+ }else{
+ VdbeNoopComment((v, "eof-B subroutine"));
+ addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, addrEofB);
+ }
+
+ /* Generate code to handle the case of A<B
+ */
+ VdbeNoopComment((v, "A-lt-B subroutine"));
+ addrAltB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, labelCmpr);
+
+ /* Generate code to handle the case of A==B
+ */
+ if( op==TK_ALL ){
+ addrAeqB = addrAltB;
+ }else if( op==TK_INTERSECT ){
+ addrAeqB = addrAltB;
+ addrAltB++;
+ }else{
+ VdbeNoopComment((v, "A-eq-B subroutine"));
+ addrAeqB =
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, labelCmpr);
+ }
+
+ /* Generate code to handle the case of A>B
+ */
+ VdbeNoopComment((v, "A-gt-B subroutine"));
+ addrAgtB = sqlite3VdbeCurrentAddr(v);
+ if( op==TK_ALL || op==TK_UNION ){
+ sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
+ }
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
+ sqlite3VdbeGoto(v, labelCmpr);
+
+ /* This code runs once to initialize everything.
+ */
+ sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
+
+ /* Implement the main merge loop
+ */
+ sqlite3VdbeResolveLabel(v, labelCmpr);
+ sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY);
+ sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy,
+ (char*)pKeyMerge, P4_KEYINFO);
+ sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE);
+ sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v);
+
+ /* Jump to the this point in order to terminate the query.
+ */
+ sqlite3VdbeResolveLabel(v, labelEnd);
+
+ /* Reassembly the compound query so that it will be freed correctly
+ ** by the calling function */
+ if( p->pPrior ){
+ sqlite3SelectDelete(db, p->pPrior);
+ }
+ p->pPrior = pPrior;
+ pPrior->pNext = p;
+
+ /*** TBD: Insert subroutine calls to close cursors on incomplete
+ **** subqueries ****/
+ ExplainQueryPlanPop(pParse);
+ return pParse->nErr!=0;
+}
+#endif
+
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+
+/* An instance of the SubstContext object describes an substitution edit
+** to be performed on a parse tree.
+**
+** All references to columns in table iTable are to be replaced by corresponding
+** expressions in pEList.
+*/
+typedef struct SubstContext {
+ Parse *pParse; /* The parsing context */
+ int iTable; /* Replace references to this table */
+ int iNewTable; /* New table number */
+ int isLeftJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */
+ ExprList *pEList; /* Replacement expressions */
+} SubstContext;
+
+/* Forward Declarations */
+static void substExprList(SubstContext*, ExprList*);
+static void substSelect(SubstContext*, Select*, int);
+
+/*
+** Scan through the expression pExpr. Replace every reference to
+** a column in table number iTable with a copy of the iColumn-th
+** entry in pEList. (But leave references to the ROWID column
+** unchanged.)
+**
+** This routine is part of the flattening procedure. A subquery
+** whose result set is defined by pEList appears as entry in the
+** FROM clause of a SELECT such that the VDBE cursor assigned to that
+** FORM clause entry is iTable. This routine makes the necessary
+** changes to pExpr so that it refers directly to the source table
+** of the subquery rather the result set of the subquery.
+*/
+static Expr *substExpr(
+ SubstContext *pSubst, /* Description of the substitution */
+ Expr *pExpr /* Expr in which substitution occurs */
+){
+ if( pExpr==0 ) return 0;
+ if( ExprHasProperty(pExpr, EP_FromJoin)
+ && pExpr->iRightJoinTable==pSubst->iTable
+ ){
+ pExpr->iRightJoinTable = pSubst->iNewTable;
+ }
+ if( pExpr->op==TK_COLUMN
+ && pExpr->iTable==pSubst->iTable
+ && !ExprHasProperty(pExpr, EP_FixedCol)
+ ){
+ if( pExpr->iColumn<0 ){
+ pExpr->op = TK_NULL;
+ }else{
+ Expr *pNew;
+ Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr;
+ Expr ifNullRow;
+ assert( pSubst->pEList!=0 && pExpr->iColumn<pSubst->pEList->nExpr );
+ assert( pExpr->pRight==0 );
+ if( sqlite3ExprIsVector(pCopy) ){
+ sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
+ }else{
+ sqlite3 *db = pSubst->pParse->db;
+ if( pSubst->isLeftJoin && pCopy->op!=TK_COLUMN ){
+ memset(&ifNullRow, 0, sizeof(ifNullRow));
+ ifNullRow.op = TK_IF_NULL_ROW;
+ ifNullRow.pLeft = pCopy;
+ ifNullRow.iTable = pSubst->iNewTable;
+ ifNullRow.flags = EP_Skip;
+ pCopy = &ifNullRow;
+ }
+ testcase( ExprHasProperty(pCopy, EP_Subquery) );
+ pNew = sqlite3ExprDup(db, pCopy, 0);
+ if( pNew && pSubst->isLeftJoin ){
+ ExprSetProperty(pNew, EP_CanBeNull);
+ }
+ if( pNew && ExprHasProperty(pExpr,EP_FromJoin) ){
+ pNew->iRightJoinTable = pExpr->iRightJoinTable;
+ ExprSetProperty(pNew, EP_FromJoin);
+ }
+ sqlite3ExprDelete(db, pExpr);
+ pExpr = pNew;
+
+ /* Ensure that the expression now has an implicit collation sequence,
+ ** just as it did when it was a column of a view or sub-query. */
+ if( pExpr ){
+ if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){
+ CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr);
+ pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr,
+ (pColl ? pColl->zName : "BINARY")
+ );
+ }
+ ExprClearProperty(pExpr, EP_Collate);
+ }
+ }
+ }
+ }else{
+ if( pExpr->op==TK_IF_NULL_ROW && pExpr->iTable==pSubst->iTable ){
+ pExpr->iTable = pSubst->iNewTable;
+ }
+ pExpr->pLeft = substExpr(pSubst, pExpr->pLeft);
+ pExpr->pRight = substExpr(pSubst, pExpr->pRight);
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ substSelect(pSubst, pExpr->x.pSelect, 1);
+ }else{
+ substExprList(pSubst, pExpr->x.pList);
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ Window *pWin = pExpr->y.pWin;
+ pWin->pFilter = substExpr(pSubst, pWin->pFilter);
+ substExprList(pSubst, pWin->pPartition);
+ substExprList(pSubst, pWin->pOrderBy);
+ }
+#endif
+ }
+ return pExpr;
+}
+static void substExprList(
+ SubstContext *pSubst, /* Description of the substitution */
+ ExprList *pList /* List to scan and in which to make substitutes */
+){
+ int i;
+ if( pList==0 ) return;
+ for(i=0; i<pList->nExpr; i++){
+ pList->a[i].pExpr = substExpr(pSubst, pList->a[i].pExpr);
+ }
+}
+static void substSelect(
+ SubstContext *pSubst, /* Description of the substitution */
+ Select *p, /* SELECT statement in which to make substitutions */
+ int doPrior /* Do substitutes on p->pPrior too */
+){
+ SrcList *pSrc;
+ struct SrcList_item *pItem;
+ int i;
+ if( !p ) return;
+ do{
+ substExprList(pSubst, p->pEList);
+ substExprList(pSubst, p->pGroupBy);
+ substExprList(pSubst, p->pOrderBy);
+ p->pHaving = substExpr(pSubst, p->pHaving);
+ p->pWhere = substExpr(pSubst, p->pWhere);
+ pSrc = p->pSrc;
+ assert( pSrc!=0 );
+ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){
+ substSelect(pSubst, pItem->pSelect, 1);
+ if( pItem->fg.isTabFunc ){
+ substExprList(pSubst, pItem->u1.pFuncArg);
+ }
+ }
+ }while( doPrior && (p = p->pPrior)!=0 );
+}
+#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
+
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+/*
+** pSelect is a SELECT statement and pSrcItem is one item in the FROM
+** clause of that SELECT.
+**
+** This routine scans the entire SELECT statement and recomputes the
+** pSrcItem->colUsed mask.
+*/
+static int recomputeColumnsUsedExpr(Walker *pWalker, Expr *pExpr){
+ struct SrcList_item *pItem;
+ if( pExpr->op!=TK_COLUMN ) return WRC_Continue;
+ pItem = pWalker->u.pSrcItem;
+ if( pItem->iCursor!=pExpr->iTable ) return WRC_Continue;
+ if( pExpr->iColumn<0 ) return WRC_Continue;
+ pItem->colUsed |= sqlite3ExprColUsed(pExpr);
+ return WRC_Continue;
+}
+static void recomputeColumnsUsed(
+ Select *pSelect, /* The complete SELECT statement */
+ struct SrcList_item *pSrcItem /* Which FROM clause item to recompute */
+){
+ Walker w;
+ if( NEVER(pSrcItem->pTab==0) ) return;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = recomputeColumnsUsedExpr;
+ w.xSelectCallback = sqlite3SelectWalkNoop;
+ w.u.pSrcItem = pSrcItem;
+ pSrcItem->colUsed = 0;
+ sqlite3WalkSelect(&w, pSelect);
+}
+#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
+
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+/*
+** This routine attempts to flatten subqueries as a performance optimization.
+** This routine returns 1 if it makes changes and 0 if no flattening occurs.
+**
+** To understand the concept of flattening, consider the following
+** query:
+**
+** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5
+**
+** The default way of implementing this query is to execute the
+** subquery first and store the results in a temporary table, then
+** run the outer query on that temporary table. This requires two
+** passes over the data. Furthermore, because the temporary table
+** has no indices, the WHERE clause on the outer query cannot be
+** optimized.
+**
+** This routine attempts to rewrite queries such as the above into
+** a single flat select, like this:
+**
+** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5
+**
+** The code generated for this simplification gives the same result
+** but only has to scan the data once. And because indices might
+** exist on the table t1, a complete scan of the data might be
+** avoided.
+**
+** Flattening is subject to the following constraints:
+**
+** (**) We no longer attempt to flatten aggregate subqueries. Was:
+** The subquery and the outer query cannot both be aggregates.
+**
+** (**) We no longer attempt to flatten aggregate subqueries. Was:
+** (2) If the subquery is an aggregate then
+** (2a) the outer query must not be a join and
+** (2b) the outer query must not use subqueries
+** other than the one FROM-clause subquery that is a candidate
+** for flattening. (This is due to ticket [2f7170d73bf9abf80]
+** from 2015-02-09.)
+**
+** (3) If the subquery is the right operand of a LEFT JOIN then
+** (3a) the subquery may not be a join and
+** (3b) the FROM clause of the subquery may not contain a virtual
+** table and
+** (3c) the outer query may not be an aggregate.
+** (3d) the outer query may not be DISTINCT.
+**
+** (4) The subquery can not be DISTINCT.
+**
+** (**) At one point restrictions (4) and (5) defined a subset of DISTINCT
+** sub-queries that were excluded from this optimization. Restriction
+** (4) has since been expanded to exclude all DISTINCT subqueries.
+**
+** (**) We no longer attempt to flatten aggregate subqueries. Was:
+** If the subquery is aggregate, the outer query may not be DISTINCT.
+**
+** (7) The subquery must have a FROM clause. TODO: For subqueries without
+** A FROM clause, consider adding a FROM clause with the special
+** table sqlite_once that consists of a single row containing a
+** single NULL.
+**
+** (8) If the subquery uses LIMIT then the outer query may not be a join.
+**
+** (9) If the subquery uses LIMIT then the outer query may not be aggregate.
+**
+** (**) Restriction (10) was removed from the code on 2005-02-05 but we
+** accidently carried the comment forward until 2014-09-15. Original
+** constraint: "If the subquery is aggregate then the outer query
+** may not use LIMIT."
+**
+** (11) The subquery and the outer query may not both have ORDER BY clauses.
+**
+** (**) Not implemented. Subsumed into restriction (3). Was previously
+** a separate restriction deriving from ticket #350.
+**
+** (13) The subquery and outer query may not both use LIMIT.
+**
+** (14) The subquery may not use OFFSET.
+**
+** (15) If the outer query is part of a compound select, then the
+** subquery may not use LIMIT.
+** (See ticket #2339 and ticket [02a8e81d44]).
+**
+** (16) If the outer query is aggregate, then the subquery may not
+** use ORDER BY. (Ticket #2942) This used to not matter
+** until we introduced the group_concat() function.
+**
+** (17) If the subquery is a compound select, then
+** (17a) all compound operators must be a UNION ALL, and
+** (17b) no terms within the subquery compound may be aggregate
+** or DISTINCT, and
+** (17c) every term within the subquery compound must have a FROM clause
+** (17d) the outer query may not be
+** (17d1) aggregate, or
+** (17d2) DISTINCT, or
+** (17d3) a join.
+** (17e) the subquery may not contain window functions
+**
+** The parent and sub-query may contain WHERE clauses. Subject to
+** rules (11), (13) and (14), they may also contain ORDER BY,
+** LIMIT and OFFSET clauses. The subquery cannot use any compound
+** operator other than UNION ALL because all the other compound
+** operators have an implied DISTINCT which is disallowed by
+** restriction (4).
+**
+** Also, each component of the sub-query must return the same number
+** of result columns. This is actually a requirement for any compound
+** SELECT statement, but all the code here does is make sure that no
+** such (illegal) sub-query is flattened. The caller will detect the
+** syntax error and return a detailed message.
+**
+** (18) If the sub-query is a compound select, then all terms of the
+** ORDER BY clause of the parent must be simple references to
+** columns of the sub-query.
+**
+** (19) If the subquery uses LIMIT then the outer query may not
+** have a WHERE clause.
+**
+** (20) If the sub-query is a compound select, then it must not use
+** an ORDER BY clause. Ticket #3773. We could relax this constraint
+** somewhat by saying that the terms of the ORDER BY clause must
+** appear as unmodified result columns in the outer query. But we
+** have other optimizations in mind to deal with that case.
+**
+** (21) If the subquery uses LIMIT then the outer query may not be
+** DISTINCT. (See ticket [752e1646fc]).
+**
+** (22) The subquery may not be a recursive CTE.
+**
+** (**) Subsumed into restriction (17d3). Was: If the outer query is
+** a recursive CTE, then the sub-query may not be a compound query.
+** This restriction is because transforming the
+** parent to a compound query confuses the code that handles
+** recursive queries in multiSelect().
+**
+** (**) We no longer attempt to flatten aggregate subqueries. Was:
+** The subquery may not be an aggregate that uses the built-in min() or
+** or max() functions. (Without this restriction, a query like:
+** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily
+** return the value X for which Y was maximal.)
+**
+** (25) If either the subquery or the parent query contains a window
+** function in the select list or ORDER BY clause, flattening
+** is not attempted.
+**
+**
+** In this routine, the "p" parameter is a pointer to the outer query.
+** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
+** uses aggregates.
+**
+** If flattening is not attempted, this routine is a no-op and returns 0.
+** If flattening is attempted this routine returns 1.
+**
+** All of the expression analysis must occur on both the outer query and
+** the subquery before this routine runs.
+*/
+static int flattenSubquery(
+ Parse *pParse, /* Parsing context */
+ Select *p, /* The parent or outer SELECT statement */
+ int iFrom, /* Index in p->pSrc->a[] of the inner subquery */
+ int isAgg /* True if outer SELECT uses aggregate functions */
+){
+ const char *zSavedAuthContext = pParse->zAuthContext;
+ Select *pParent; /* Current UNION ALL term of the other query */
+ Select *pSub; /* The inner query or "subquery" */
+ Select *pSub1; /* Pointer to the rightmost select in sub-query */
+ SrcList *pSrc; /* The FROM clause of the outer query */
+ SrcList *pSubSrc; /* The FROM clause of the subquery */
+ int iParent; /* VDBE cursor number of the pSub result set temp table */
+ int iNewParent = -1;/* Replacement table for iParent */
+ int isLeftJoin = 0; /* True if pSub is the right side of a LEFT JOIN */
+ int i; /* Loop counter */
+ Expr *pWhere; /* The WHERE clause */
+ struct SrcList_item *pSubitem; /* The subquery */
+ sqlite3 *db = pParse->db;
+ Walker w; /* Walker to persist agginfo data */
+
+ /* Check to see if flattening is permitted. Return 0 if not.
+ */
+ assert( p!=0 );
+ assert( p->pPrior==0 );
+ if( OptimizationDisabled(db, SQLITE_QueryFlattener) ) return 0;
+ pSrc = p->pSrc;
+ assert( pSrc && iFrom>=0 && iFrom<pSrc->nSrc );
+ pSubitem = &pSrc->a[iFrom];
+ iParent = pSubitem->iCursor;
+ pSub = pSubitem->pSelect;
+ assert( pSub!=0 );
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( p->pWin || pSub->pWin ) return 0; /* Restriction (25) */
+#endif
+
+ pSubSrc = pSub->pSrc;
+ assert( pSubSrc );
+ /* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants,
+ ** not arbitrary expressions, we allowed some combining of LIMIT and OFFSET
+ ** because they could be computed at compile-time. But when LIMIT and OFFSET
+ ** became arbitrary expressions, we were forced to add restrictions (13)
+ ** and (14). */
+ if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */
+ if( pSub->pLimit && pSub->pLimit->pRight ) return 0; /* Restriction (14) */
+ if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){
+ return 0; /* Restriction (15) */
+ }
+ if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */
+ if( pSub->selFlags & SF_Distinct ) return 0; /* Restriction (4) */
+ if( pSub->pLimit && (pSrc->nSrc>1 || isAgg) ){
+ return 0; /* Restrictions (8)(9) */
+ }
+ if( p->pOrderBy && pSub->pOrderBy ){
+ return 0; /* Restriction (11) */
+ }
+ if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */
+ if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */
+ if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){
+ return 0; /* Restriction (21) */
+ }
+ if( pSub->selFlags & (SF_Recursive) ){
+ return 0; /* Restrictions (22) */
+ }
+
+ /*
+ ** If the subquery is the right operand of a LEFT JOIN, then the
+ ** subquery may not be a join itself (3a). Example of why this is not
+ ** allowed:
+ **
+ ** t1 LEFT OUTER JOIN (t2 JOIN t3)
+ **
+ ** If we flatten the above, we would get
+ **
+ ** (t1 LEFT OUTER JOIN t2) JOIN t3
+ **
+ ** which is not at all the same thing.
+ **
+ ** If the subquery is the right operand of a LEFT JOIN, then the outer
+ ** query cannot be an aggregate. (3c) This is an artifact of the way
+ ** aggregates are processed - there is no mechanism to determine if
+ ** the LEFT JOIN table should be all-NULL.
+ **
+ ** See also tickets #306, #350, and #3300.
+ */
+ if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){
+ isLeftJoin = 1;
+ if( pSubSrc->nSrc>1 /* (3a) */
+ || isAgg /* (3b) */
+ || IsVirtual(pSubSrc->a[0].pTab) /* (3c) */
+ || (p->selFlags & SF_Distinct)!=0 /* (3d) */
+ ){
+ return 0;
+ }
+ }
+#ifdef SQLITE_EXTRA_IFNULLROW
+ else if( iFrom>0 && !isAgg ){
+ /* Setting isLeftJoin to -1 causes OP_IfNullRow opcodes to be generated for
+ ** every reference to any result column from subquery in a join, even
+ ** though they are not necessary. This will stress-test the OP_IfNullRow
+ ** opcode. */
+ isLeftJoin = -1;
+ }
+#endif
+
+ /* Restriction (17): If the sub-query is a compound SELECT, then it must
+ ** use only the UNION ALL operator. And none of the simple select queries
+ ** that make up the compound SELECT are allowed to be aggregate or distinct
+ ** queries.
+ */
+ if( pSub->pPrior ){
+ if( pSub->pOrderBy ){
+ return 0; /* Restriction (20) */
+ }
+ if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){
+ return 0; /* (17d1), (17d2), or (17d3) */
+ }
+ for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){
+ testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
+ testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
+ assert( pSub->pSrc!=0 );
+ assert( pSub->pEList->nExpr==pSub1->pEList->nExpr );
+ if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 /* (17b) */
+ || (pSub1->pPrior && pSub1->op!=TK_ALL) /* (17a) */
+ || pSub1->pSrc->nSrc<1 /* (17c) */
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ || pSub1->pWin /* (17e) */
+#endif
+ ){
+ return 0;
+ }
+ testcase( pSub1->pSrc->nSrc>1 );
+ }
+
+ /* Restriction (18). */
+ if( p->pOrderBy ){
+ int ii;
+ for(ii=0; ii<p->pOrderBy->nExpr; ii++){
+ if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0;
+ }
+ }
+ }
+
+ /* Ex-restriction (23):
+ ** The only way that the recursive part of a CTE can contain a compound
+ ** subquery is for the subquery to be one term of a join. But if the
+ ** subquery is a join, then the flattening has already been stopped by
+ ** restriction (17d3)
+ */
+ assert( (p->selFlags & SF_Recursive)==0 || pSub->pPrior==0 );
+
+ /***** If we reach this point, flattening is permitted. *****/
+ SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n",
+ pSub->selId, pSub, iFrom));
+
+ /* Authorize the subquery */
+ pParse->zAuthContext = pSubitem->zName;
+ TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0);
+ testcase( i==SQLITE_DENY );
+ pParse->zAuthContext = zSavedAuthContext;
+
+ /* If the sub-query is a compound SELECT statement, then (by restrictions
+ ** 17 and 18 above) it must be a UNION ALL and the parent query must
+ ** be of the form:
+ **
+ ** SELECT <expr-list> FROM (<sub-query>) <where-clause>
+ **
+ ** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block
+ ** creates N-1 copies of the parent query without any ORDER BY, LIMIT or
+ ** OFFSET clauses and joins them to the left-hand-side of the original
+ ** using UNION ALL operators. In this case N is the number of simple
+ ** select statements in the compound sub-query.
+ **
+ ** Example:
+ **
+ ** SELECT a+1 FROM (
+ ** SELECT x FROM tab
+ ** UNION ALL
+ ** SELECT y FROM tab
+ ** UNION ALL
+ ** SELECT abs(z*2) FROM tab2
+ ** ) WHERE a!=5 ORDER BY 1
+ **
+ ** Transformed into:
+ **
+ ** SELECT x+1 FROM tab WHERE x+1!=5
+ ** UNION ALL
+ ** SELECT y+1 FROM tab WHERE y+1!=5
+ ** UNION ALL
+ ** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5
+ ** ORDER BY 1
+ **
+ ** We call this the "compound-subquery flattening".
+ */
+ for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){
+ Select *pNew;
+ ExprList *pOrderBy = p->pOrderBy;
+ Expr *pLimit = p->pLimit;
+ Select *pPrior = p->pPrior;
+ p->pOrderBy = 0;
+ p->pSrc = 0;
+ p->pPrior = 0;
+ p->pLimit = 0;
+ pNew = sqlite3SelectDup(db, p, 0);
+ p->pLimit = pLimit;
+ p->pOrderBy = pOrderBy;
+ p->pSrc = pSrc;
+ p->op = TK_ALL;
+ if( pNew==0 ){
+ p->pPrior = pPrior;
+ }else{
+ pNew->pPrior = pPrior;
+ if( pPrior ) pPrior->pNext = pNew;
+ pNew->pNext = p;
+ p->pPrior = pNew;
+ SELECTTRACE(2,pParse,p,("compound-subquery flattener"
+ " creates %u as peer\n",pNew->selId));
+ }
+ if( db->mallocFailed ) return 1;
+ }
+
+ /* Begin flattening the iFrom-th entry of the FROM clause
+ ** in the outer query.
+ */
+ pSub = pSub1 = pSubitem->pSelect;
+
+ /* Delete the transient table structure associated with the
+ ** subquery
+ */
+ sqlite3DbFree(db, pSubitem->zDatabase);
+ sqlite3DbFree(db, pSubitem->zName);
+ sqlite3DbFree(db, pSubitem->zAlias);
+ pSubitem->zDatabase = 0;
+ pSubitem->zName = 0;
+ pSubitem->zAlias = 0;
+ pSubitem->pSelect = 0;
+
+ /* Defer deleting the Table object associated with the
+ ** subquery until code generation is
+ ** complete, since there may still exist Expr.pTab entries that
+ ** refer to the subquery even after flattening. Ticket #3346.
+ **
+ ** pSubitem->pTab is always non-NULL by test restrictions and tests above.
+ */
+ if( ALWAYS(pSubitem->pTab!=0) ){
+ Table *pTabToDel = pSubitem->pTab;
+ if( pTabToDel->nTabRef==1 ){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ pTabToDel->pNextZombie = pToplevel->pZombieTab;
+ pToplevel->pZombieTab = pTabToDel;
+ }else{
+ pTabToDel->nTabRef--;
+ }
+ pSubitem->pTab = 0;
+ }
+
+ /* The following loop runs once for each term in a compound-subquery
+ ** flattening (as described above). If we are doing a different kind
+ ** of flattening - a flattening other than a compound-subquery flattening -
+ ** then this loop only runs once.
+ **
+ ** This loop moves all of the FROM elements of the subquery into the
+ ** the FROM clause of the outer query. Before doing this, remember
+ ** the cursor number for the original outer query FROM element in
+ ** iParent. The iParent cursor will never be used. Subsequent code
+ ** will scan expressions looking for iParent references and replace
+ ** those references with expressions that resolve to the subquery FROM
+ ** elements we are now copying in.
+ */
+ for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){
+ int nSubSrc;
+ u8 jointype = 0;
+ assert( pSub!=0 );
+ pSubSrc = pSub->pSrc; /* FROM clause of subquery */
+ nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */
+ pSrc = pParent->pSrc; /* FROM clause of the outer query */
+
+ if( pSrc ){
+ assert( pParent==p ); /* First time through the loop */
+ jointype = pSubitem->fg.jointype;
+ }else{
+ assert( pParent!=p ); /* 2nd and subsequent times through the loop */
+ pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
+ if( pSrc==0 ) break;
+ pParent->pSrc = pSrc;
+ }
+
+ /* The subquery uses a single slot of the FROM clause of the outer
+ ** query. If the subquery has more than one element in its FROM clause,
+ ** then expand the outer query to make space for it to hold all elements
+ ** of the subquery.
+ **
+ ** Example:
+ **
+ ** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB;
+ **
+ ** The outer query has 3 slots in its FROM clause. One slot of the
+ ** outer query (the middle slot) is used by the subquery. The next
+ ** block of code will expand the outer query FROM clause to 4 slots.
+ ** The middle slot is expanded to two slots in order to make space
+ ** for the two elements in the FROM clause of the subquery.
+ */
+ if( nSubSrc>1 ){
+ pSrc = sqlite3SrcListEnlarge(pParse, pSrc, nSubSrc-1,iFrom+1);
+ if( pSrc==0 ) break;
+ pParent->pSrc = pSrc;
+ }
+
+ /* Transfer the FROM clause terms from the subquery into the
+ ** outer query.
+ */
+ for(i=0; i<nSubSrc; i++){
+ sqlite3IdListDelete(db, pSrc->a[i+iFrom].pUsing);
+ assert( pSrc->a[i+iFrom].fg.isTabFunc==0 );
+ pSrc->a[i+iFrom] = pSubSrc->a[i];
+ iNewParent = pSubSrc->a[i].iCursor;
+ memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i]));
+ }
+ pSrc->a[iFrom].fg.jointype = jointype;
+
+ /* Now begin substituting subquery result set expressions for
+ ** references to the iParent in the outer query.
+ **
+ ** Example:
+ **
+ ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b;
+ ** \ \_____________ subquery __________/ /
+ ** \_____________________ outer query ______________________________/
+ **
+ ** We look at every expression in the outer query and every place we see
+ ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10".
+ */
+ if( pSub->pOrderBy && (pParent->selFlags & SF_NoopOrderBy)==0 ){
+ /* At this point, any non-zero iOrderByCol values indicate that the
+ ** ORDER BY column expression is identical to the iOrderByCol'th
+ ** expression returned by SELECT statement pSub. Since these values
+ ** do not necessarily correspond to columns in SELECT statement pParent,
+ ** zero them before transfering the ORDER BY clause.
+ **
+ ** Not doing this may cause an error if a subsequent call to this
+ ** function attempts to flatten a compound sub-query into pParent
+ ** (the only way this can happen is if the compound sub-query is
+ ** currently part of pSub->pSrc). See ticket [d11a6e908f]. */
+ ExprList *pOrderBy = pSub->pOrderBy;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].u.x.iOrderByCol = 0;
+ }
+ assert( pParent->pOrderBy==0 );
+ pParent->pOrderBy = pOrderBy;
+ pSub->pOrderBy = 0;
+ }
+ pWhere = pSub->pWhere;
+ pSub->pWhere = 0;
+ if( isLeftJoin>0 ){
+ sqlite3SetJoinExpr(pWhere, iNewParent);
+ }
+ if( pWhere ){
+ if( pParent->pWhere ){
+ pParent->pWhere = sqlite3PExpr(pParse, TK_AND, pWhere, pParent->pWhere);
+ }else{
+ pParent->pWhere = pWhere;
+ }
+ }
+ if( db->mallocFailed==0 ){
+ SubstContext x;
+ x.pParse = pParse;
+ x.iTable = iParent;
+ x.iNewTable = iNewParent;
+ x.isLeftJoin = isLeftJoin;
+ x.pEList = pSub->pEList;
+ substSelect(&x, pParent, 0);
+ }
+
+ /* The flattened query is a compound if either the inner or the
+ ** outer query is a compound. */
+ pParent->selFlags |= pSub->selFlags & SF_Compound;
+ assert( (pSub->selFlags & SF_Distinct)==0 ); /* restriction (17b) */
+
+ /*
+ ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y;
+ **
+ ** One is tempted to try to add a and b to combine the limits. But this
+ ** does not work if either limit is negative.
+ */
+ if( pSub->pLimit ){
+ pParent->pLimit = pSub->pLimit;
+ pSub->pLimit = 0;
+ }
+
+ /* Recompute the SrcList_item.colUsed masks for the flattened
+ ** tables. */
+ for(i=0; i<nSubSrc; i++){
+ recomputeColumnsUsed(pParent, &pSrc->a[i+iFrom]);
+ }
+ }
+
+ /* Finially, delete what is left of the subquery and return
+ ** success.
+ */
+ sqlite3AggInfoPersistWalkerInit(&w, pParse);
+ sqlite3WalkSelect(&w,pSub1);
+ sqlite3SelectDelete(db, pSub1);
+
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x100 ){
+ SELECTTRACE(0x100,pParse,p,("After flattening:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+
+ return 1;
+}
+#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
+
+/*
+** A structure to keep track of all of the column values that are fixed to
+** a known value due to WHERE clause constraints of the form COLUMN=VALUE.
+*/
+typedef struct WhereConst WhereConst;
+struct WhereConst {
+ Parse *pParse; /* Parsing context */
+ int nConst; /* Number for COLUMN=CONSTANT terms */
+ int nChng; /* Number of times a constant is propagated */
+ Expr **apExpr; /* [i*2] is COLUMN and [i*2+1] is VALUE */
+};
+
+/*
+** Add a new entry to the pConst object. Except, do not add duplicate
+** pColumn entires. Also, do not add if doing so would not be appropriate.
+**
+** The caller guarantees the pColumn is a column and pValue is a constant.
+** This routine has to do some additional checks before completing the
+** insert.
+*/
+static void constInsert(
+ WhereConst *pConst, /* The WhereConst into which we are inserting */
+ Expr *pColumn, /* The COLUMN part of the constraint */
+ Expr *pValue, /* The VALUE part of the constraint */
+ Expr *pExpr /* Overall expression: COLUMN=VALUE or VALUE=COLUMN */
+){
+ int i;
+ assert( pColumn->op==TK_COLUMN );
+ assert( sqlite3ExprIsConstant(pValue) );
+
+ if( ExprHasProperty(pColumn, EP_FixedCol) ) return;
+ if( sqlite3ExprAffinity(pValue)!=0 ) return;
+ if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pConst->pParse,pExpr)) ){
+ return;
+ }
+
+ /* 2018-10-25 ticket [cf5ed20f]
+ ** Make sure the same pColumn is not inserted more than once */
+ for(i=0; i<pConst->nConst; i++){
+ const Expr *pE2 = pConst->apExpr[i*2];
+ assert( pE2->op==TK_COLUMN );
+ if( pE2->iTable==pColumn->iTable
+ && pE2->iColumn==pColumn->iColumn
+ ){
+ return; /* Already present. Return without doing anything. */
+ }
+ }
+
+ pConst->nConst++;
+ pConst->apExpr = sqlite3DbReallocOrFree(pConst->pParse->db, pConst->apExpr,
+ pConst->nConst*2*sizeof(Expr*));
+ if( pConst->apExpr==0 ){
+ pConst->nConst = 0;
+ }else{
+ pConst->apExpr[pConst->nConst*2-2] = pColumn;
+ pConst->apExpr[pConst->nConst*2-1] = pValue;
+ }
+}
+
+/*
+** Find all terms of COLUMN=VALUE or VALUE=COLUMN in pExpr where VALUE
+** is a constant expression and where the term must be true because it
+** is part of the AND-connected terms of the expression. For each term
+** found, add it to the pConst structure.
+*/
+static void findConstInWhere(WhereConst *pConst, Expr *pExpr){
+ Expr *pRight, *pLeft;
+ if( pExpr==0 ) return;
+ if( ExprHasProperty(pExpr, EP_FromJoin) ) return;
+ if( pExpr->op==TK_AND ){
+ findConstInWhere(pConst, pExpr->pRight);
+ findConstInWhere(pConst, pExpr->pLeft);
+ return;
+ }
+ if( pExpr->op!=TK_EQ ) return;
+ pRight = pExpr->pRight;
+ pLeft = pExpr->pLeft;
+ assert( pRight!=0 );
+ assert( pLeft!=0 );
+ if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pLeft) ){
+ constInsert(pConst,pRight,pLeft,pExpr);
+ }
+ if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pRight) ){
+ constInsert(pConst,pLeft,pRight,pExpr);
+ }
+}
+
+/*
+** This is a Walker expression callback. pExpr is a candidate expression
+** to be replaced by a value. If pExpr is equivalent to one of the
+** columns named in pWalker->u.pConst, then overwrite it with its
+** corresponding value.
+*/
+static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){
+ int i;
+ WhereConst *pConst;
+ if( pExpr->op!=TK_COLUMN ) return WRC_Continue;
+ if( ExprHasProperty(pExpr, EP_FixedCol|EP_FromJoin) ){
+ testcase( ExprHasProperty(pExpr, EP_FixedCol) );
+ testcase( ExprHasProperty(pExpr, EP_FromJoin) );
+ return WRC_Continue;
+ }
+ pConst = pWalker->u.pConst;
+ for(i=0; i<pConst->nConst; i++){
+ Expr *pColumn = pConst->apExpr[i*2];
+ if( pColumn==pExpr ) continue;
+ if( pColumn->iTable!=pExpr->iTable ) continue;
+ if( pColumn->iColumn!=pExpr->iColumn ) continue;
+ /* A match is found. Add the EP_FixedCol property */
+ pConst->nChng++;
+ ExprClearProperty(pExpr, EP_Leaf);
+ ExprSetProperty(pExpr, EP_FixedCol);
+ assert( pExpr->pLeft==0 );
+ pExpr->pLeft = sqlite3ExprDup(pConst->pParse->db, pConst->apExpr[i*2+1], 0);
+ break;
+ }
+ return WRC_Prune;
+}
+
+/*
+** The WHERE-clause constant propagation optimization.
+**
+** If the WHERE clause contains terms of the form COLUMN=CONSTANT or
+** CONSTANT=COLUMN that are top-level AND-connected terms that are not
+** part of a ON clause from a LEFT JOIN, then throughout the query
+** replace all other occurrences of COLUMN with CONSTANT.
+**
+** For example, the query:
+**
+** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=t1.a AND t3.c=t2.b
+**
+** Is transformed into
+**
+** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=39 AND t3.c=39
+**
+** Return true if any transformations where made and false if not.
+**
+** Implementation note: Constant propagation is tricky due to affinity
+** and collating sequence interactions. Consider this example:
+**
+** CREATE TABLE t1(a INT,b TEXT);
+** INSERT INTO t1 VALUES(123,'0123');
+** SELECT * FROM t1 WHERE a=123 AND b=a;
+** SELECT * FROM t1 WHERE a=123 AND b=123;
+**
+** The two SELECT statements above should return different answers. b=a
+** is alway true because the comparison uses numeric affinity, but b=123
+** is false because it uses text affinity and '0123' is not the same as '123'.
+** To work around this, the expression tree is not actually changed from
+** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol
+** and the "123" value is hung off of the pLeft pointer. Code generator
+** routines know to generate the constant "123" instead of looking up the
+** column value. Also, to avoid collation problems, this optimization is
+** only attempted if the "a=123" term uses the default BINARY collation.
+*/
+static int propagateConstants(
+ Parse *pParse, /* The parsing context */
+ Select *p /* The query in which to propagate constants */
+){
+ WhereConst x;
+ Walker w;
+ int nChng = 0;
+ x.pParse = pParse;
+ do{
+ x.nConst = 0;
+ x.nChng = 0;
+ x.apExpr = 0;
+ findConstInWhere(&x, p->pWhere);
+ if( x.nConst ){
+ memset(&w, 0, sizeof(w));
+ w.pParse = pParse;
+ w.xExprCallback = propagateConstantExprRewrite;
+ w.xSelectCallback = sqlite3SelectWalkNoop;
+ w.xSelectCallback2 = 0;
+ w.walkerDepth = 0;
+ w.u.pConst = &x;
+ sqlite3WalkExpr(&w, p->pWhere);
+ sqlite3DbFree(x.pParse->db, x.apExpr);
+ nChng += x.nChng;
+ }
+ }while( x.nChng );
+ return nChng;
+}
+
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+/*
+** Make copies of relevant WHERE clause terms of the outer query into
+** the WHERE clause of subquery. Example:
+**
+** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1) WHERE x=5 AND y=10;
+**
+** Transformed into:
+**
+** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1 WHERE a=5 AND c-d=10)
+** WHERE x=5 AND y=10;
+**
+** The hope is that the terms added to the inner query will make it more
+** efficient.
+**
+** Do not attempt this optimization if:
+**
+** (1) (** This restriction was removed on 2017-09-29. We used to
+** disallow this optimization for aggregate subqueries, but now
+** it is allowed by putting the extra terms on the HAVING clause.
+** The added HAVING clause is pointless if the subquery lacks
+** a GROUP BY clause. But such a HAVING clause is also harmless
+** so there does not appear to be any reason to add extra logic
+** to suppress it. **)
+**
+** (2) The inner query is the recursive part of a common table expression.
+**
+** (3) The inner query has a LIMIT clause (since the changes to the WHERE
+** clause would change the meaning of the LIMIT).
+**
+** (4) The inner query is the right operand of a LEFT JOIN and the
+** expression to be pushed down does not come from the ON clause
+** on that LEFT JOIN.
+**
+** (5) The WHERE clause expression originates in the ON or USING clause
+** of a LEFT JOIN where iCursor is not the right-hand table of that
+** left join. An example:
+**
+** SELECT *
+** FROM (SELECT 1 AS a1 UNION ALL SELECT 2) AS aa
+** JOIN (SELECT 1 AS b2 UNION ALL SELECT 2) AS bb ON (a1=b2)
+** LEFT JOIN (SELECT 8 AS c3 UNION ALL SELECT 9) AS cc ON (b2=2);
+**
+** The correct answer is three rows: (1,1,NULL),(2,2,8),(2,2,9).
+** But if the (b2=2) term were to be pushed down into the bb subquery,
+** then the (1,1,NULL) row would be suppressed.
+**
+** (6) The inner query features one or more window-functions (since
+** changes to the WHERE clause of the inner query could change the
+** window over which window functions are calculated).
+**
+** Return 0 if no changes are made and non-zero if one or more WHERE clause
+** terms are duplicated into the subquery.
+*/
+static int pushDownWhereTerms(
+ Parse *pParse, /* Parse context (for malloc() and error reporting) */
+ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */
+ Expr *pWhere, /* The WHERE clause of the outer query */
+ int iCursor, /* Cursor number of the subquery */
+ int isLeftJoin /* True if pSubq is the right term of a LEFT JOIN */
+){
+ Expr *pNew;
+ int nChng = 0;
+ Select *pSel;
+ if( pWhere==0 ) return 0;
+ if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ for(pSel=pSubq; pSel; pSel=pSel->pPrior){
+ if( pSel->pWin ) return 0; /* restriction (6) */
+ }
+#endif
+
+#ifdef SQLITE_DEBUG
+ /* Only the first term of a compound can have a WITH clause. But make
+ ** sure no other terms are marked SF_Recursive in case something changes
+ ** in the future.
+ */
+ {
+ Select *pX;
+ for(pX=pSubq; pX; pX=pX->pPrior){
+ assert( (pX->selFlags & (SF_Recursive))==0 );
+ }
+ }
+#endif
+
+ if( pSubq->pLimit!=0 ){
+ return 0; /* restriction (3) */
+ }
+ while( pWhere->op==TK_AND ){
+ nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight,
+ iCursor, isLeftJoin);
+ pWhere = pWhere->pLeft;
+ }
+ if( isLeftJoin
+ && (ExprHasProperty(pWhere,EP_FromJoin)==0
+ || pWhere->iRightJoinTable!=iCursor)
+ ){
+ return 0; /* restriction (4) */
+ }
+ if( ExprHasProperty(pWhere,EP_FromJoin) && pWhere->iRightJoinTable!=iCursor ){
+ return 0; /* restriction (5) */
+ }
+ if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){
+ nChng++;
+ while( pSubq ){
+ SubstContext x;
+ pNew = sqlite3ExprDup(pParse->db, pWhere, 0);
+ unsetJoinExpr(pNew, -1);
+ x.pParse = pParse;
+ x.iTable = iCursor;
+ x.iNewTable = iCursor;
+ x.isLeftJoin = 0;
+ x.pEList = pSubq->pEList;
+ pNew = substExpr(&x, pNew);
+ if( pSubq->selFlags & SF_Aggregate ){
+ pSubq->pHaving = sqlite3ExprAnd(pParse, pSubq->pHaving, pNew);
+ }else{
+ pSubq->pWhere = sqlite3ExprAnd(pParse, pSubq->pWhere, pNew);
+ }
+ pSubq = pSubq->pPrior;
+ }
+ }
+ return nChng;
+}
+#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
+
+/*
+** The pFunc is the only aggregate function in the query. Check to see
+** if the query is a candidate for the min/max optimization.
+**
+** If the query is a candidate for the min/max optimization, then set
+** *ppMinMax to be an ORDER BY clause to be used for the optimization
+** and return either WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX depending on
+** whether pFunc is a min() or max() function.
+**
+** If the query is not a candidate for the min/max optimization, return
+** WHERE_ORDERBY_NORMAL (which must be zero).
+**
+** This routine must be called after aggregate functions have been
+** located but before their arguments have been subjected to aggregate
+** analysis.
+*/
+static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){
+ int eRet = WHERE_ORDERBY_NORMAL; /* Return value */
+ ExprList *pEList = pFunc->x.pList; /* Arguments to agg function */
+ const char *zFunc; /* Name of aggregate function pFunc */
+ ExprList *pOrderBy;
+ u8 sortFlags = 0;
+
+ assert( *ppMinMax==0 );
+ assert( pFunc->op==TK_AGG_FUNCTION );
+ assert( !IsWindowFunc(pFunc) );
+ if( pEList==0 || pEList->nExpr!=1 || ExprHasProperty(pFunc, EP_WinFunc) ){
+ return eRet;
+ }
+ zFunc = pFunc->u.zToken;
+ if( sqlite3StrICmp(zFunc, "min")==0 ){
+ eRet = WHERE_ORDERBY_MIN;
+ if( sqlite3ExprCanBeNull(pEList->a[0].pExpr) ){
+ sortFlags = KEYINFO_ORDER_BIGNULL;
+ }
+ }else if( sqlite3StrICmp(zFunc, "max")==0 ){
+ eRet = WHERE_ORDERBY_MAX;
+ sortFlags = KEYINFO_ORDER_DESC;
+ }else{
+ return eRet;
+ }
+ *ppMinMax = pOrderBy = sqlite3ExprListDup(db, pEList, 0);
+ assert( pOrderBy!=0 || db->mallocFailed );
+ if( pOrderBy ) pOrderBy->a[0].sortFlags = sortFlags;
+ return eRet;
+}
+
+/*
+** The select statement passed as the first argument is an aggregate query.
+** The second argument is the associated aggregate-info object. This
+** function tests if the SELECT is of the form:
+**
+** SELECT count(*) FROM <tbl>
+**
+** where table is a database table, not a sub-select or view. If the query
+** does match this pattern, then a pointer to the Table object representing
+** <tbl> is returned. Otherwise, 0 is returned.
+*/
+static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){
+ Table *pTab;
+ Expr *pExpr;
+
+ assert( !p->pGroupBy );
+
+ if( p->pWhere || p->pEList->nExpr!=1
+ || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect
+ ){
+ return 0;
+ }
+ pTab = p->pSrc->a[0].pTab;
+ pExpr = p->pEList->a[0].pExpr;
+ assert( pTab && !pTab->pSelect && pExpr );
+
+ if( IsVirtual(pTab) ) return 0;
+ if( pExpr->op!=TK_AGG_FUNCTION ) return 0;
+ if( NEVER(pAggInfo->nFunc==0) ) return 0;
+ if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0;
+ if( ExprHasProperty(pExpr, EP_Distinct|EP_WinFunc) ) return 0;
+
+ return pTab;
+}
+
+/*
+** If the source-list item passed as an argument was augmented with an
+** INDEXED BY clause, then try to locate the specified index. If there
+** was such a clause and the named index cannot be found, return
+** SQLITE_ERROR and leave an error in pParse. Otherwise, populate
+** pFrom->pIndex and return SQLITE_OK.
+*/
+SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){
+ if( pFrom->pTab && pFrom->fg.isIndexedBy ){
+ Table *pTab = pFrom->pTab;
+ char *zIndexedBy = pFrom->u1.zIndexedBy;
+ Index *pIdx;
+ for(pIdx=pTab->pIndex;
+ pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy);
+ pIdx=pIdx->pNext
+ );
+ if( !pIdx ){
+ sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0);
+ pParse->checkSchema = 1;
+ return SQLITE_ERROR;
+ }
+ pFrom->pIBIndex = pIdx;
+ }
+ return SQLITE_OK;
+}
+/*
+** Detect compound SELECT statements that use an ORDER BY clause with
+** an alternative collating sequence.
+**
+** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ...
+**
+** These are rewritten as a subquery:
+**
+** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2)
+** ORDER BY ... COLLATE ...
+**
+** This transformation is necessary because the multiSelectOrderBy() routine
+** above that generates the code for a compound SELECT with an ORDER BY clause
+** uses a merge algorithm that requires the same collating sequence on the
+** result columns as on the ORDER BY clause. See ticket
+** http://www.sqlite.org/src/info/6709574d2a
+**
+** This transformation is only needed for EXCEPT, INTERSECT, and UNION.
+** The UNION ALL operator works fine with multiSelectOrderBy() even when
+** there are COLLATE terms in the ORDER BY.
+*/
+static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){
+ int i;
+ Select *pNew;
+ Select *pX;
+ sqlite3 *db;
+ struct ExprList_item *a;
+ SrcList *pNewSrc;
+ Parse *pParse;
+ Token dummy;
+
+ if( p->pPrior==0 ) return WRC_Continue;
+ if( p->pOrderBy==0 ) return WRC_Continue;
+ for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){}
+ if( pX==0 ) return WRC_Continue;
+ a = p->pOrderBy->a;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ /* If iOrderByCol is already non-zero, then it has already been matched
+ ** to a result column of the SELECT statement. This occurs when the
+ ** SELECT is rewritten for window-functions processing and then passed
+ ** to sqlite3SelectPrep() and similar a second time. The rewriting done
+ ** by this function is not required in this case. */
+ if( a[0].u.x.iOrderByCol ) return WRC_Continue;
+#endif
+ for(i=p->pOrderBy->nExpr-1; i>=0; i--){
+ if( a[i].pExpr->flags & EP_Collate ) break;
+ }
+ if( i<0 ) return WRC_Continue;
+
+ /* If we reach this point, that means the transformation is required. */
+
+ pParse = pWalker->pParse;
+ db = pParse->db;
+ pNew = sqlite3DbMallocZero(db, sizeof(*pNew) );
+ if( pNew==0 ) return WRC_Abort;
+ memset(&dummy, 0, sizeof(dummy));
+ pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0);
+ if( pNewSrc==0 ) return WRC_Abort;
+ *pNew = *p;
+ p->pSrc = pNewSrc;
+ p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0));
+ p->op = TK_SELECT;
+ p->pWhere = 0;
+ pNew->pGroupBy = 0;
+ pNew->pHaving = 0;
+ pNew->pOrderBy = 0;
+ p->pPrior = 0;
+ p->pNext = 0;
+ p->pWith = 0;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ p->pWinDefn = 0;
+#endif
+ p->selFlags &= ~SF_Compound;
+ assert( (p->selFlags & SF_Converted)==0 );
+ p->selFlags |= SF_Converted;
+ assert( pNew->pPrior!=0 );
+ pNew->pPrior->pNext = pNew;
+ pNew->pLimit = 0;
+ return WRC_Continue;
+}
+
+/*
+** Check to see if the FROM clause term pFrom has table-valued function
+** arguments. If it does, leave an error message in pParse and return
+** non-zero, since pFrom is not allowed to be a table-valued function.
+*/
+static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){
+ if( pFrom->fg.isTabFunc ){
+ sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName);
+ return 1;
+ }
+ return 0;
+}
+
+#ifndef SQLITE_OMIT_CTE
+/*
+** Argument pWith (which may be NULL) points to a linked list of nested
+** WITH contexts, from inner to outermost. If the table identified by
+** FROM clause element pItem is really a common-table-expression (CTE)
+** then return a pointer to the CTE definition for that table. Otherwise
+** return NULL.
+**
+** If a non-NULL value is returned, set *ppContext to point to the With
+** object that the returned CTE belongs to.
+*/
+static struct Cte *searchWith(
+ With *pWith, /* Current innermost WITH clause */
+ struct SrcList_item *pItem, /* FROM clause element to resolve */
+ With **ppContext /* OUT: WITH clause return value belongs to */
+){
+ const char *zName;
+ if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){
+ With *p;
+ for(p=pWith; p; p=p->pOuter){
+ int i;
+ for(i=0; i<p->nCte; i++){
+ if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){
+ *ppContext = p;
+ return &p->a[i];
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* The code generator maintains a stack of active WITH clauses
+** with the inner-most WITH clause being at the top of the stack.
+**
+** This routine pushes the WITH clause passed as the second argument
+** onto the top of the stack. If argument bFree is true, then this
+** WITH clause will never be popped from the stack. In this case it
+** should be freed along with the Parse object. In other cases, when
+** bFree==0, the With object will be freed along with the SELECT
+** statement with which it is associated.
+*/
+SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
+ assert( bFree==0 || (pParse->pWith==0 && pParse->pWithToFree==0) );
+ if( pWith ){
+ assert( pParse->pWith!=pWith );
+ pWith->pOuter = pParse->pWith;
+ pParse->pWith = pWith;
+ if( bFree ) pParse->pWithToFree = pWith;
+ }
+}
+
+/*
+** This function checks if argument pFrom refers to a CTE declared by
+** a WITH clause on the stack currently maintained by the parser. And,
+** if currently processing a CTE expression, if it is a recursive
+** reference to the current CTE.
+**
+** If pFrom falls into either of the two categories above, pFrom->pTab
+** and other fields are populated accordingly. The caller should check
+** (pFrom->pTab!=0) to determine whether or not a successful match
+** was found.
+**
+** Whether or not a match is found, SQLITE_OK is returned if no error
+** occurs. If an error does occur, an error message is stored in the
+** parser and some error code other than SQLITE_OK returned.
+*/
+static int withExpand(
+ Walker *pWalker,
+ struct SrcList_item *pFrom
+){
+ Parse *pParse = pWalker->pParse;
+ sqlite3 *db = pParse->db;
+ struct Cte *pCte; /* Matched CTE (or NULL if no match) */
+ With *pWith; /* WITH clause that pCte belongs to */
+
+ assert( pFrom->pTab==0 );
+ if( pParse->nErr ){
+ return SQLITE_ERROR;
+ }
+
+ pCte = searchWith(pParse->pWith, pFrom, &pWith);
+ if( pCte ){
+ Table *pTab;
+ ExprList *pEList;
+ Select *pSel;
+ Select *pLeft; /* Left-most SELECT statement */
+ int bMayRecursive; /* True if compound joined by UNION [ALL] */
+ With *pSavedWith; /* Initial value of pParse->pWith */
+
+ /* If pCte->zCteErr is non-NULL at this point, then this is an illegal
+ ** recursive reference to CTE pCte. Leave an error in pParse and return
+ ** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
+ ** In this case, proceed. */
+ if( pCte->zCteErr ){
+ sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName);
+ return SQLITE_ERROR;
+ }
+ if( cannotBeFunction(pParse, pFrom) ) return SQLITE_ERROR;
+
+ assert( pFrom->pTab==0 );
+ pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTab==0 ) return WRC_Abort;
+ pTab->nTabRef = 1;
+ pTab->zName = sqlite3DbStrDup(db, pCte->zName);
+ pTab->iPKey = -1;
+ pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid;
+ pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
+ if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
+ assert( pFrom->pSelect );
+
+ /* Check if this is a recursive CTE. */
+ pSel = pFrom->pSelect;
+ bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
+ if( bMayRecursive ){
+ int i;
+ SrcList *pSrc = pFrom->pSelect->pSrc;
+ for(i=0; i<pSrc->nSrc; i++){
+ struct SrcList_item *pItem = &pSrc->a[i];
+ if( pItem->zDatabase==0
+ && pItem->zName!=0
+ && 0==sqlite3StrICmp(pItem->zName, pCte->zName)
+ ){
+ pItem->pTab = pTab;
+ pItem->fg.isRecursive = 1;
+ pTab->nTabRef++;
+ pSel->selFlags |= SF_Recursive;
+ }
+ }
+ }
+
+ /* Only one recursive reference is permitted. */
+ if( pTab->nTabRef>2 ){
+ sqlite3ErrorMsg(
+ pParse, "multiple references to recursive table: %s", pCte->zName
+ );
+ return SQLITE_ERROR;
+ }
+ assert( pTab->nTabRef==1 ||
+ ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 ));
+
+ pCte->zCteErr = "circular reference: %s";
+ pSavedWith = pParse->pWith;
+ pParse->pWith = pWith;
+ if( bMayRecursive ){
+ Select *pPrior = pSel->pPrior;
+ assert( pPrior->pWith==0 );
+ pPrior->pWith = pSel->pWith;
+ sqlite3WalkSelect(pWalker, pPrior);
+ pPrior->pWith = 0;
+ }else{
+ sqlite3WalkSelect(pWalker, pSel);
+ }
+ pParse->pWith = pWith;
+
+ for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior);
+ pEList = pLeft->pEList;
+ if( pCte->pCols ){
+ if( pEList && pEList->nExpr!=pCte->pCols->nExpr ){
+ sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns",
+ pCte->zName, pEList->nExpr, pCte->pCols->nExpr
+ );
+ pParse->pWith = pSavedWith;
+ return SQLITE_ERROR;
+ }
+ pEList = pCte->pCols;
+ }
+
+ sqlite3ColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol);
+ if( bMayRecursive ){
+ if( pSel->selFlags & SF_Recursive ){
+ pCte->zCteErr = "multiple recursive references: %s";
+ }else{
+ pCte->zCteErr = "recursive reference in a subquery: %s";
+ }
+ sqlite3WalkSelect(pWalker, pSel);
+ }
+ pCte->zCteErr = 0;
+ pParse->pWith = pSavedWith;
+ }
+
+ return SQLITE_OK;
+}
+#endif
+
+#ifndef SQLITE_OMIT_CTE
+/*
+** If the SELECT passed as the second argument has an associated WITH
+** clause, pop it from the stack stored as part of the Parse object.
+**
+** This function is used as the xSelectCallback2() callback by
+** sqlite3SelectExpand() when walking a SELECT tree to resolve table
+** names and other FROM clause elements.
+*/
+static void selectPopWith(Walker *pWalker, Select *p){
+ Parse *pParse = pWalker->pParse;
+ if( OK_IF_ALWAYS_TRUE(pParse->pWith) && p->pPrior==0 ){
+ With *pWith = findRightmost(p)->pWith;
+ if( pWith!=0 ){
+ assert( pParse->pWith==pWith || pParse->nErr );
+ pParse->pWith = pWith->pOuter;
+ }
+ }
+}
+#else
+#define selectPopWith 0
+#endif
+
+/*
+** The SrcList_item structure passed as the second argument represents a
+** sub-query in the FROM clause of a SELECT statement. This function
+** allocates and populates the SrcList_item.pTab object. If successful,
+** SQLITE_OK is returned. Otherwise, if an OOM error is encountered,
+** SQLITE_NOMEM.
+*/
+SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFrom){
+ Select *pSel = pFrom->pSelect;
+ Table *pTab;
+
+ assert( pSel );
+ pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table));
+ if( pTab==0 ) return SQLITE_NOMEM;
+ pTab->nTabRef = 1;
+ if( pFrom->zAlias ){
+ pTab->zName = sqlite3DbStrDup(pParse->db, pFrom->zAlias);
+ }else{
+ pTab->zName = sqlite3MPrintf(pParse->db, "subquery_%u", pSel->selId);
+ }
+ while( pSel->pPrior ){ pSel = pSel->pPrior; }
+ sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
+ pTab->iPKey = -1;
+ pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ pTab->tabFlags |= TF_Ephemeral;
+
+ return pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
+}
+
+/*
+** This routine is a Walker callback for "expanding" a SELECT statement.
+** "Expanding" means to do the following:
+**
+** (1) Make sure VDBE cursor numbers have been assigned to every
+** element of the FROM clause.
+**
+** (2) Fill in the pTabList->a[].pTab fields in the SrcList that
+** defines FROM clause. When views appear in the FROM clause,
+** fill pTabList->a[].pSelect with a copy of the SELECT statement
+** that implements the view. A copy is made of the view's SELECT
+** statement so that we can freely modify or delete that statement
+** without worrying about messing up the persistent representation
+** of the view.
+**
+** (3) Add terms to the WHERE clause to accommodate the NATURAL keyword
+** on joins and the ON and USING clause of joins.
+**
+** (4) Scan the list of columns in the result set (pEList) looking
+** for instances of the "*" operator or the TABLE.* operator.
+** If found, expand each "*" to be every column in every table
+** and TABLE.* to be every column in TABLE.
+**
+*/
+static int selectExpander(Walker *pWalker, Select *p){
+ Parse *pParse = pWalker->pParse;
+ int i, j, k;
+ SrcList *pTabList;
+ ExprList *pEList;
+ struct SrcList_item *pFrom;
+ sqlite3 *db = pParse->db;
+ Expr *pE, *pRight, *pExpr;
+ u16 selFlags = p->selFlags;
+ u32 elistFlags = 0;
+
+ p->selFlags |= SF_Expanded;
+ if( db->mallocFailed ){
+ return WRC_Abort;
+ }
+ assert( p->pSrc!=0 );
+ if( (selFlags & SF_Expanded)!=0 ){
+ return WRC_Prune;
+ }
+ if( pWalker->eCode ){
+ /* Renumber selId because it has been copied from a view */
+ p->selId = ++pParse->nSelect;
+ }
+ pTabList = p->pSrc;
+ pEList = p->pEList;
+ sqlite3WithPush(pParse, p->pWith, 0);
+
+ /* Make sure cursor numbers have been assigned to all entries in
+ ** the FROM clause of the SELECT statement.
+ */
+ sqlite3SrcListAssignCursors(pParse, pTabList);
+
+ /* Look up every table named in the FROM clause of the select. If
+ ** an entry of the FROM clause is a subquery instead of a table or view,
+ ** then create a transient table structure to describe the subquery.
+ */
+ for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ Table *pTab;
+ assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 );
+ if( pFrom->pTab ) continue;
+ assert( pFrom->fg.isRecursive==0 );
+#ifndef SQLITE_OMIT_CTE
+ if( withExpand(pWalker, pFrom) ) return WRC_Abort;
+ if( pFrom->pTab ) {} else
+#endif
+ if( pFrom->zName==0 ){
+#ifndef SQLITE_OMIT_SUBQUERY
+ Select *pSel = pFrom->pSelect;
+ /* A sub-query in the FROM clause of a SELECT */
+ assert( pSel!=0 );
+ assert( pFrom->pTab==0 );
+ if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort;
+ if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort;
+#endif
+ }else{
+ /* An ordinary table or view name in the FROM clause */
+ assert( pFrom->pTab==0 );
+ pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom);
+ if( pTab==0 ) return WRC_Abort;
+ if( pTab->nTabRef>=0xffff ){
+ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535",
+ pTab->zName);
+ pFrom->pTab = 0;
+ return WRC_Abort;
+ }
+ pTab->nTabRef++;
+ if( !IsVirtual(pTab) && cannotBeFunction(pParse, pFrom) ){
+ return WRC_Abort;
+ }
+#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
+ if( IsVirtual(pTab) || pTab->pSelect ){
+ i16 nCol;
+ u8 eCodeOrig = pWalker->eCode;
+ if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort;
+ assert( pFrom->pSelect==0 );
+ if( pTab->pSelect && (db->flags & SQLITE_EnableView)==0 ){
+ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited",
+ pTab->zName);
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pTab)
+ && pFrom->fg.fromDDL
+ && ALWAYS(pTab->pVTable!=0)
+ && pTab->pVTable->eVtabRisk > ((db->flags & SQLITE_TrustedSchema)!=0)
+ ){
+ sqlite3ErrorMsg(pParse, "unsafe use of virtual table \"%s\"",
+ pTab->zName);
+ }
+#endif
+ pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0);
+ nCol = pTab->nCol;
+ pTab->nCol = -1;
+ pWalker->eCode = 1; /* Turn on Select.selId renumbering */
+ sqlite3WalkSelect(pWalker, pFrom->pSelect);
+ pWalker->eCode = eCodeOrig;
+ pTab->nCol = nCol;
+ }
+#endif
+ }
+
+ /* Locate the index named by the INDEXED BY clause, if any. */
+ if( sqlite3IndexedByLookup(pParse, pFrom) ){
+ return WRC_Abort;
+ }
+ }
+
+ /* Process NATURAL keywords, and ON and USING clauses of joins.
+ */
+ if( pParse->nErr || db->mallocFailed || sqliteProcessJoin(pParse, p) ){
+ return WRC_Abort;
+ }
+
+ /* For every "*" that occurs in the column list, insert the names of
+ ** all columns in all tables. And for every TABLE.* insert the names
+ ** of all columns in TABLE. The parser inserted a special expression
+ ** with the TK_ASTERISK operator for each "*" that it found in the column
+ ** list. The following code just has to locate the TK_ASTERISK
+ ** expressions and expand each one to the list of all columns in
+ ** all tables.
+ **
+ ** The first loop just checks to see if there are any "*" operators
+ ** that need expanding.
+ */
+ for(k=0; k<pEList->nExpr; k++){
+ pE = pEList->a[k].pExpr;
+ if( pE->op==TK_ASTERISK ) break;
+ assert( pE->op!=TK_DOT || pE->pRight!=0 );
+ assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) );
+ if( pE->op==TK_DOT && pE->pRight->op==TK_ASTERISK ) break;
+ elistFlags |= pE->flags;
+ }
+ if( k<pEList->nExpr ){
+ /*
+ ** If we get here it means the result set contains one or more "*"
+ ** operators that need to be expanded. Loop through each expression
+ ** in the result set and expand them one by one.
+ */
+ struct ExprList_item *a = pEList->a;
+ ExprList *pNew = 0;
+ int flags = pParse->db->flags;
+ int longNames = (flags & SQLITE_FullColNames)!=0
+ && (flags & SQLITE_ShortColNames)==0;
+
+ for(k=0; k<pEList->nExpr; k++){
+ pE = a[k].pExpr;
+ elistFlags |= pE->flags;
+ pRight = pE->pRight;
+ assert( pE->op!=TK_DOT || pRight!=0 );
+ if( pE->op!=TK_ASTERISK
+ && (pE->op!=TK_DOT || pRight->op!=TK_ASTERISK)
+ ){
+ /* This particular expression does not need to be expanded.
+ */
+ pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr);
+ if( pNew ){
+ pNew->a[pNew->nExpr-1].zEName = a[k].zEName;
+ pNew->a[pNew->nExpr-1].eEName = a[k].eEName;
+ a[k].zEName = 0;
+ }
+ a[k].pExpr = 0;
+ }else{
+ /* This expression is a "*" or a "TABLE.*" and needs to be
+ ** expanded. */
+ int tableSeen = 0; /* Set to 1 when TABLE matches */
+ char *zTName = 0; /* text of name of TABLE */
+ if( pE->op==TK_DOT ){
+ assert( pE->pLeft!=0 );
+ assert( !ExprHasProperty(pE->pLeft, EP_IntValue) );
+ zTName = pE->pLeft->u.zToken;
+ }
+ for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ Table *pTab = pFrom->pTab;
+ Select *pSub = pFrom->pSelect;
+ char *zTabName = pFrom->zAlias;
+ const char *zSchemaName = 0;
+ int iDb;
+ if( zTabName==0 ){
+ zTabName = pTab->zName;
+ }
+ if( db->mallocFailed ) break;
+ if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){
+ pSub = 0;
+ if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
+ continue;
+ }
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*";
+ }
+ for(j=0; j<pTab->nCol; j++){
+ char *zName = pTab->aCol[j].zName;
+ char *zColname; /* The computed column name */
+ char *zToFree; /* Malloced string that needs to be freed */
+ Token sColname; /* Computed column name as a token */
+
+ assert( zName );
+ if( zTName && pSub
+ && sqlite3MatchEName(&pSub->pEList->a[j], 0, zTName, 0)==0
+ ){
+ continue;
+ }
+
+ /* If a column is marked as 'hidden', omit it from the expanded
+ ** result-set list unless the SELECT has the SF_IncludeHidden
+ ** bit set.
+ */
+ if( (p->selFlags & SF_IncludeHidden)==0
+ && IsHiddenColumn(&pTab->aCol[j])
+ ){
+ continue;
+ }
+ tableSeen = 1;
+
+ if( i>0 && zTName==0 ){
+ if( (pFrom->fg.jointype & JT_NATURAL)!=0
+ && tableAndColumnIndex(pTabList, i, zName, 0, 0, 1)
+ ){
+ /* In a NATURAL join, omit the join columns from the
+ ** table to the right of the join */
+ continue;
+ }
+ if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){
+ /* In a join with a USING clause, omit columns in the
+ ** using clause from the table on the right. */
+ continue;
+ }
+ }
+ pRight = sqlite3Expr(db, TK_ID, zName);
+ zColname = zName;
+ zToFree = 0;
+ if( longNames || pTabList->nSrc>1 ){
+ Expr *pLeft;
+ pLeft = sqlite3Expr(db, TK_ID, zTabName);
+ pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
+ if( zSchemaName ){
+ pLeft = sqlite3Expr(db, TK_ID, zSchemaName);
+ pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr);
+ }
+ if( longNames ){
+ zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName);
+ zToFree = zColname;
+ }
+ }else{
+ pExpr = pRight;
+ }
+ pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
+ sqlite3TokenInit(&sColname, zColname);
+ sqlite3ExprListSetName(pParse, pNew, &sColname, 0);
+ if( pNew && (p->selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
+ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
+ sqlite3DbFree(db, pX->zEName);
+ if( pSub ){
+ pX->zEName = sqlite3DbStrDup(db, pSub->pEList->a[j].zEName);
+ testcase( pX->zEName==0 );
+ }else{
+ pX->zEName = sqlite3MPrintf(db, "%s.%s.%s",
+ zSchemaName, zTabName, zColname);
+ testcase( pX->zEName==0 );
+ }
+ pX->eEName = ENAME_TAB;
+ }
+ sqlite3DbFree(db, zToFree);
+ }
+ }
+ if( !tableSeen ){
+ if( zTName ){
+ sqlite3ErrorMsg(pParse, "no such table: %s", zTName);
+ }else{
+ sqlite3ErrorMsg(pParse, "no tables specified");
+ }
+ }
+ }
+ }
+ sqlite3ExprListDelete(db, pEList);
+ p->pEList = pNew;
+ }
+ if( p->pEList ){
+ if( p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){
+ sqlite3ErrorMsg(pParse, "too many columns in result set");
+ return WRC_Abort;
+ }
+ if( (elistFlags & (EP_HasFunc|EP_Subquery))!=0 ){
+ p->selFlags |= SF_ComplexResult;
+ }
+ }
+ return WRC_Continue;
+}
+
+#if SQLITE_DEBUG
+/*
+** Always assert. This xSelectCallback2 implementation proves that the
+** xSelectCallback2 is never invoked.
+*/
+SQLITE_PRIVATE void sqlite3SelectWalkAssert2(Walker *NotUsed, Select *NotUsed2){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ assert( 0 );
+}
+#endif
+/*
+** This routine "expands" a SELECT statement and all of its subqueries.
+** For additional information on what it means to "expand" a SELECT
+** statement, see the comment on the selectExpand worker callback above.
+**
+** Expanding a SELECT statement is the first step in processing a
+** SELECT statement. The SELECT statement must be expanded before
+** name resolution is performed.
+**
+** If anything goes wrong, an error message is written into pParse.
+** The calling function can detect the problem by looking at pParse->nErr
+** and/or pParse->db->mallocFailed.
+*/
+static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
+ Walker w;
+ w.xExprCallback = sqlite3ExprWalkNoop;
+ w.pParse = pParse;
+ if( OK_IF_ALWAYS_TRUE(pParse->hasCompound) ){
+ w.xSelectCallback = convertCompoundSelectToSubquery;
+ w.xSelectCallback2 = 0;
+ sqlite3WalkSelect(&w, pSelect);
+ }
+ w.xSelectCallback = selectExpander;
+ w.xSelectCallback2 = selectPopWith;
+ w.eCode = 0;
+ sqlite3WalkSelect(&w, pSelect);
+}
+
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo()
+** interface.
+**
+** For each FROM-clause subquery, add Column.zType and Column.zColl
+** information to the Table structure that represents the result set
+** of that subquery.
+**
+** The Table structure that represents the result set was constructed
+** by selectExpander() but the type and collation information was omitted
+** at that point because identifiers had not yet been resolved. This
+** routine is called after identifier resolution.
+*/
+static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
+ Parse *pParse;
+ int i;
+ SrcList *pTabList;
+ struct SrcList_item *pFrom;
+
+ assert( p->selFlags & SF_Resolved );
+ if( p->selFlags & SF_HasTypeInfo ) return;
+ p->selFlags |= SF_HasTypeInfo;
+ pParse = pWalker->pParse;
+ pTabList = p->pSrc;
+ for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ Table *pTab = pFrom->pTab;
+ assert( pTab!=0 );
+ if( (pTab->tabFlags & TF_Ephemeral)!=0 ){
+ /* A sub-query in the FROM clause of a SELECT */
+ Select *pSel = pFrom->pSelect;
+ if( pSel ){
+ while( pSel->pPrior ) pSel = pSel->pPrior;
+ sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel,
+ SQLITE_AFF_NONE);
+ }
+ }
+ }
+}
+#endif
+
+
+/*
+** This routine adds datatype and collating sequence information to
+** the Table structures of all FROM-clause subqueries in a
+** SELECT statement.
+**
+** Use this routine after name resolution.
+*/
+static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){
+#ifndef SQLITE_OMIT_SUBQUERY
+ Walker w;
+ w.xSelectCallback = sqlite3SelectWalkNoop;
+ w.xSelectCallback2 = selectAddSubqueryTypeInfo;
+ w.xExprCallback = sqlite3ExprWalkNoop;
+ w.pParse = pParse;
+ sqlite3WalkSelect(&w, pSelect);
+#endif
+}
+
+
+/*
+** This routine sets up a SELECT statement for processing. The
+** following is accomplished:
+**
+** * VDBE Cursor numbers are assigned to all FROM-clause terms.
+** * Ephemeral Table objects are created for all FROM-clause subqueries.
+** * ON and USING clauses are shifted into WHERE statements
+** * Wildcards "*" and "TABLE.*" in result sets are expanded.
+** * Identifiers in expression are matched to tables.
+**
+** This routine acts recursively on all subqueries within the SELECT.
+*/
+SQLITE_PRIVATE void sqlite3SelectPrep(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The SELECT statement being coded. */
+ NameContext *pOuterNC /* Name context for container */
+){
+ assert( p!=0 || pParse->db->mallocFailed );
+ if( pParse->db->mallocFailed ) return;
+ if( p->selFlags & SF_HasTypeInfo ) return;
+ sqlite3SelectExpand(pParse, p);
+ if( pParse->nErr || pParse->db->mallocFailed ) return;
+ sqlite3ResolveSelectNames(pParse, p, pOuterNC);
+ if( pParse->nErr || pParse->db->mallocFailed ) return;
+ sqlite3SelectAddTypeInfo(pParse, p);
+}
+
+/*
+** Reset the aggregate accumulator.
+**
+** The aggregate accumulator is a set of memory cells that hold
+** intermediate results while calculating an aggregate. This
+** routine generates code that stores NULLs in all of those memory
+** cells.
+*/
+static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ struct AggInfo_func *pFunc;
+ int nReg = pAggInfo->nFunc + pAggInfo->nColumn;
+ if( nReg==0 ) return;
+ if( pParse->nErr || pParse->db->mallocFailed ) return;
+#ifdef SQLITE_DEBUG
+ /* Verify that all AggInfo registers are within the range specified by
+ ** AggInfo.mnReg..AggInfo.mxReg */
+ assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 );
+ for(i=0; i<pAggInfo->nColumn; i++){
+ assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg
+ && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg );
+ }
+ for(i=0; i<pAggInfo->nFunc; i++){
+ assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg
+ && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg );
+ }
+#endif
+ sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg);
+ for(pFunc=pAggInfo->aFunc, i=0; i<pAggInfo->nFunc; i++, pFunc++){
+ if( pFunc->iDistinct>=0 ){
+ Expr *pE = pFunc->pFExpr;
+ assert( !ExprHasProperty(pE, EP_xIsSelect) );
+ if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){
+ sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one "
+ "argument");
+ pFunc->iDistinct = -1;
+ }else{
+ KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pE->x.pList,0,0);
+ sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0,
+ (char*)pKeyInfo, P4_KEYINFO);
+ }
+ }
+ }
+}
+
+/*
+** Invoke the OP_AggFinalize opcode for every aggregate function
+** in the AggInfo structure.
+*/
+static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ struct AggInfo_func *pF;
+ for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
+ ExprList *pList = pF->pFExpr->x.pList;
+ assert( !ExprHasProperty(pF->pFExpr, EP_xIsSelect) );
+ sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0);
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ }
+}
+
+
+/*
+** Update the accumulator memory cells for an aggregate based on
+** the current cursor position.
+**
+** If regAcc is non-zero and there are no min() or max() aggregates
+** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
+** registers if register regAcc contains 0. The caller will take care
+** of setting and clearing regAcc.
+*/
+static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ int regHit = 0;
+ int addrHitTest = 0;
+ struct AggInfo_func *pF;
+ struct AggInfo_col *pC;
+
+ pAggInfo->directMode = 1;
+ for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
+ int nArg;
+ int addrNext = 0;
+ int regAgg;
+ ExprList *pList = pF->pFExpr->x.pList;
+ assert( !ExprHasProperty(pF->pFExpr, EP_xIsSelect) );
+ assert( !IsWindowFunc(pF->pFExpr) );
+ if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){
+ Expr *pFilter = pF->pFExpr->y.pWin->pFilter;
+ if( pAggInfo->nAccumulator
+ && (pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
+ && regAcc
+ ){
+ /* If regAcc==0, there there exists some min() or max() function
+ ** without a FILTER clause that will ensure the magnet registers
+ ** are populated. */
+ if( regHit==0 ) regHit = ++pParse->nMem;
+ /* If this is the first row of the group (regAcc contains 0), clear the
+ ** "magnet" register regHit so that the accumulator registers
+ ** are populated if the FILTER clause jumps over the the
+ ** invocation of min() or max() altogether. Or, if this is not
+ ** the first row (regAcc contains 1), set the magnet register so that
+ ** the accumulators are not populated unless the min()/max() is invoked
+ ** and indicates that they should be. */
+ sqlite3VdbeAddOp2(v, OP_Copy, regAcc, regHit);
+ }
+ addrNext = sqlite3VdbeMakeLabel(pParse);
+ sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL);
+ }
+ if( pList ){
+ nArg = pList->nExpr;
+ regAgg = sqlite3GetTempRange(pParse, nArg);
+ sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP);
+ }else{
+ nArg = 0;
+ regAgg = 0;
+ }
+ if( pF->iDistinct>=0 ){
+ if( addrNext==0 ){
+ addrNext = sqlite3VdbeMakeLabel(pParse);
+ }
+ testcase( nArg==0 ); /* Error condition */
+ testcase( nArg>1 ); /* Also an error */
+ codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg);
+ }
+ if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ CollSeq *pColl = 0;
+ struct ExprList_item *pItem;
+ int j;
+ assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
+ for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
+ pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
+ }
+ if( !pColl ){
+ pColl = pParse->db->pDfltColl;
+ }
+ if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
+ sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
+ }
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem);
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3ReleaseTempRange(pParse, regAgg, nArg);
+ if( addrNext ){
+ sqlite3VdbeResolveLabel(v, addrNext);
+ }
+ }
+ if( regHit==0 && pAggInfo->nAccumulator ){
+ regHit = regAcc;
+ }
+ if( regHit ){
+ addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v);
+ }
+ for(i=0, pC=pAggInfo->aCol; i<pAggInfo->nAccumulator; i++, pC++){
+ sqlite3ExprCode(pParse, pC->pCExpr, pC->iMem);
+ }
+
+ pAggInfo->directMode = 0;
+ if( addrHitTest ){
+ sqlite3VdbeJumpHereOrPopInst(v, addrHitTest);
+ }
+}
+
+/*
+** Add a single OP_Explain instruction to the VDBE to explain a simple
+** count(*) query ("SELECT count(*) FROM pTab").
+*/
+#ifndef SQLITE_OMIT_EXPLAIN
+static void explainSimpleCount(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* Table being queried */
+ Index *pIdx /* Index used to optimize scan, or NULL */
+){
+ if( pParse->explain==2 ){
+ int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx)));
+ sqlite3VdbeExplain(pParse, 0, "SCAN TABLE %s%s%s",
+ pTab->zName,
+ bCover ? " USING COVERING INDEX " : "",
+ bCover ? pIdx->zName : ""
+ );
+ }
+}
+#else
+# define explainSimpleCount(a,b,c)
+#endif
+
+/*
+** sqlite3WalkExpr() callback used by havingToWhere().
+**
+** If the node passed to the callback is a TK_AND node, return
+** WRC_Continue to tell sqlite3WalkExpr() to iterate through child nodes.
+**
+** Otherwise, return WRC_Prune. In this case, also check if the
+** sub-expression matches the criteria for being moved to the WHERE
+** clause. If so, add it to the WHERE clause and replace the sub-expression
+** within the HAVING expression with a constant "1".
+*/
+static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op!=TK_AND ){
+ Select *pS = pWalker->u.pSelect;
+ if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) ){
+ sqlite3 *db = pWalker->pParse->db;
+ Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1");
+ if( pNew ){
+ Expr *pWhere = pS->pWhere;
+ SWAP(Expr, *pNew, *pExpr);
+ pNew = sqlite3ExprAnd(pWalker->pParse, pWhere, pNew);
+ pS->pWhere = pNew;
+ pWalker->eCode = 1;
+ }
+ }
+ return WRC_Prune;
+ }
+ return WRC_Continue;
+}
+
+/*
+** Transfer eligible terms from the HAVING clause of a query, which is
+** processed after grouping, to the WHERE clause, which is processed before
+** grouping. For example, the query:
+**
+** SELECT * FROM <tables> WHERE a=? GROUP BY b HAVING b=? AND c=?
+**
+** can be rewritten as:
+**
+** SELECT * FROM <tables> WHERE a=? AND b=? GROUP BY b HAVING c=?
+**
+** A term of the HAVING expression is eligible for transfer if it consists
+** entirely of constants and expressions that are also GROUP BY terms that
+** use the "BINARY" collation sequence.
+*/
+static void havingToWhere(Parse *pParse, Select *p){
+ Walker sWalker;
+ memset(&sWalker, 0, sizeof(sWalker));
+ sWalker.pParse = pParse;
+ sWalker.xExprCallback = havingToWhereExprCb;
+ sWalker.u.pSelect = p;
+ sqlite3WalkExpr(&sWalker, p->pHaving);
+#if SELECTTRACE_ENABLED
+ if( sWalker.eCode && (sqlite3_unsupported_selecttrace & 0x100)!=0 ){
+ SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+}
+
+/*
+** Check to see if the pThis entry of pTabList is a self-join of a prior view.
+** If it is, then return the SrcList_item for the prior view. If it is not,
+** then return 0.
+*/
+static struct SrcList_item *isSelfJoinView(
+ SrcList *pTabList, /* Search for self-joins in this FROM clause */
+ struct SrcList_item *pThis /* Search for prior reference to this subquery */
+){
+ struct SrcList_item *pItem;
+ for(pItem = pTabList->a; pItem<pThis; pItem++){
+ Select *pS1;
+ if( pItem->pSelect==0 ) continue;
+ if( pItem->fg.viaCoroutine ) continue;
+ if( pItem->zName==0 ) continue;
+ assert( pItem->pTab!=0 );
+ assert( pThis->pTab!=0 );
+ if( pItem->pTab->pSchema!=pThis->pTab->pSchema ) continue;
+ if( sqlite3_stricmp(pItem->zName, pThis->zName)!=0 ) continue;
+ pS1 = pItem->pSelect;
+ if( pItem->pTab->pSchema==0 && pThis->pSelect->selId!=pS1->selId ){
+ /* The query flattener left two different CTE tables with identical
+ ** names in the same FROM clause. */
+ continue;
+ }
+ if( sqlite3ExprCompare(0, pThis->pSelect->pWhere, pS1->pWhere, -1)
+ || sqlite3ExprCompare(0, pThis->pSelect->pHaving, pS1->pHaving, -1)
+ ){
+ /* The view was modified by some other optimization such as
+ ** pushDownWhereTerms() */
+ continue;
+ }
+ return pItem;
+ }
+ return 0;
+}
+
+#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
+/*
+** Attempt to transform a query of the form
+**
+** SELECT count(*) FROM (SELECT x FROM t1 UNION ALL SELECT y FROM t2)
+**
+** Into this:
+**
+** SELECT (SELECT count(*) FROM t1)+(SELECT count(*) FROM t2)
+**
+** The transformation only works if all of the following are true:
+**
+** * The subquery is a UNION ALL of two or more terms
+** * The subquery does not have a LIMIT clause
+** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries
+** * The outer query is a simple count(*) with no WHERE clause or other
+** extraneous syntax.
+**
+** Return TRUE if the optimization is undertaken.
+*/
+static int countOfViewOptimization(Parse *pParse, Select *p){
+ Select *pSub, *pPrior;
+ Expr *pExpr;
+ Expr *pCount;
+ sqlite3 *db;
+ if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */
+ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */
+ if( p->pWhere ) return 0;
+ if( p->pGroupBy ) return 0;
+ pExpr = p->pEList->a[0].pExpr;
+ if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */
+ if( sqlite3_stricmp(pExpr->u.zToken,"count") ) return 0; /* Is count() */
+ if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */
+ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */
+ pSub = p->pSrc->a[0].pSelect;
+ if( pSub==0 ) return 0; /* The FROM is a subquery */
+ if( pSub->pPrior==0 ) return 0; /* Must be a compound ry */
+ do{
+ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */
+ if( pSub->pWhere ) return 0; /* No WHERE clause */
+ if( pSub->pLimit ) return 0; /* No LIMIT clause */
+ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */
+ pSub = pSub->pPrior; /* Repeat over compound */
+ }while( pSub );
+
+ /* If we reach this point then it is OK to perform the transformation */
+
+ db = pParse->db;
+ pCount = pExpr;
+ pExpr = 0;
+ pSub = p->pSrc->a[0].pSelect;
+ p->pSrc->a[0].pSelect = 0;
+ sqlite3SrcListDelete(db, p->pSrc);
+ p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc));
+ while( pSub ){
+ Expr *pTerm;
+ pPrior = pSub->pPrior;
+ pSub->pPrior = 0;
+ pSub->pNext = 0;
+ pSub->selFlags |= SF_Aggregate;
+ pSub->selFlags &= ~SF_Compound;
+ pSub->nSelectRow = 0;
+ sqlite3ExprListDelete(db, pSub->pEList);
+ pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount;
+ pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm);
+ pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
+ sqlite3PExprAddSelect(pParse, pTerm, pSub);
+ if( pExpr==0 ){
+ pExpr = pTerm;
+ }else{
+ pExpr = sqlite3PExpr(pParse, TK_PLUS, pTerm, pExpr);
+ }
+ pSub = pPrior;
+ }
+ p->pEList->a[0].pExpr = pExpr;
+ p->selFlags &= ~SF_Aggregate;
+
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x400 ){
+ SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ return 1;
+}
+#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */
+
+/*
+** Generate code for the SELECT statement given in the p argument.
+**
+** The results are returned according to the SelectDest structure.
+** See comments in sqliteInt.h for further information.
+**
+** This routine returns the number of errors. If any errors are
+** encountered, then an appropriate error message is left in
+** pParse->zErrMsg.
+**
+** This routine does NOT free the Select structure passed in. The
+** calling function needs to do that.
+*/
+SQLITE_PRIVATE int sqlite3Select(
+ Parse *pParse, /* The parser context */
+ Select *p, /* The SELECT statement being coded. */
+ SelectDest *pDest /* What to do with the query results */
+){
+ int i, j; /* Loop counters */
+ WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */
+ Vdbe *v; /* The virtual machine under construction */
+ int isAgg; /* True for select lists like "count(*)" */
+ ExprList *pEList = 0; /* List of columns to extract. */
+ SrcList *pTabList; /* List of tables to select from */
+ Expr *pWhere; /* The WHERE clause. May be NULL */
+ ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */
+ Expr *pHaving; /* The HAVING clause. May be NULL */
+ AggInfo *pAggInfo = 0; /* Aggregate information */
+ int rc = 1; /* Value to return from this function */
+ DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */
+ SortCtx sSort; /* Info on how to code the ORDER BY clause */
+ int iEnd; /* Address of the end of the query */
+ sqlite3 *db; /* The database connection */
+ ExprList *pMinMaxOrderBy = 0; /* Added ORDER BY for min/max queries */
+ u8 minMaxFlag; /* Flag for min/max queries */
+
+ db = pParse->db;
+ v = sqlite3GetVdbe(pParse);
+ if( p==0 || db->mallocFailed || pParse->nErr ){
+ return 1;
+ }
+ if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
+#if SELECTTRACE_ENABLED
+ SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain));
+ if( sqlite3_unsupported_selecttrace & 0x100 ){
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+
+ assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo );
+ assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo );
+ assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue );
+ assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue );
+ if( IgnorableOrderby(pDest) ){
+ assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union ||
+ pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard ||
+ pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo ||
+ pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo);
+ /* If ORDER BY makes no difference in the output then neither does
+ ** DISTINCT so it can be removed too. */
+ sqlite3ExprListDelete(db, p->pOrderBy);
+ p->pOrderBy = 0;
+ p->selFlags &= ~SF_Distinct;
+ p->selFlags |= SF_NoopOrderBy;
+ }
+ sqlite3SelectPrep(pParse, p, 0);
+ if( pParse->nErr || db->mallocFailed ){
+ goto select_end;
+ }
+ assert( p->pEList!=0 );
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x104 ){
+ SELECTTRACE(0x104,pParse,p, ("after name resolution:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+
+ /* If the SF_UpdateFrom flag is set, then this function is being called
+ ** as part of populating the temp table for an UPDATE...FROM statement.
+ ** In this case, it is an error if the target object (pSrc->a[0]) name
+ ** or alias is duplicated within FROM clause (pSrc->a[1..n]). */
+ if( p->selFlags & SF_UpdateFrom ){
+ struct SrcList_item *p0 = &p->pSrc->a[0];
+ for(i=1; i<p->pSrc->nSrc; i++){
+ struct SrcList_item *p1 = &p->pSrc->a[i];
+ if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){
+ sqlite3ErrorMsg(pParse,
+ "target object/alias may not appear in FROM clause: %s",
+ p0->zAlias ? p0->zAlias : p0->pTab->zName
+ );
+ goto select_end;
+ }
+ }
+ }
+
+ if( pDest->eDest==SRT_Output ){
+ generateColumnNames(pParse, p);
+ }
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ rc = sqlite3WindowRewrite(pParse, p);
+ if( rc ){
+ assert( db->mallocFailed || pParse->nErr>0 );
+ goto select_end;
+ }
+#if SELECTTRACE_ENABLED
+ if( p->pWin && (sqlite3_unsupported_selecttrace & 0x108)!=0 ){
+ SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ pTabList = p->pSrc;
+ isAgg = (p->selFlags & SF_Aggregate)!=0;
+ memset(&sSort, 0, sizeof(sSort));
+ sSort.pOrderBy = p->pOrderBy;
+
+ /* Try to do various optimizations (flattening subqueries, and strength
+ ** reduction of join operators) in the FROM clause up into the main query
+ */
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+ for(i=0; !p->pPrior && i<pTabList->nSrc; i++){
+ struct SrcList_item *pItem = &pTabList->a[i];
+ Select *pSub = pItem->pSelect;
+ Table *pTab = pItem->pTab;
+
+ /* The expander should have already created transient Table objects
+ ** even for FROM clause elements such as subqueries that do not correspond
+ ** to a real table */
+ assert( pTab!=0 );
+
+ /* Convert LEFT JOIN into JOIN if there are terms of the right table
+ ** of the LEFT JOIN used in the WHERE clause.
+ */
+ if( (pItem->fg.jointype & JT_LEFT)!=0
+ && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
+ && OptimizationEnabled(db, SQLITE_SimplifyJoin)
+ ){
+ SELECTTRACE(0x100,pParse,p,
+ ("LEFT-JOIN simplifies to JOIN on term %d\n",i));
+ pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
+ unsetJoinExpr(p->pWhere, pItem->iCursor);
+ }
+
+ /* No futher action if this term of the FROM clause is no a subquery */
+ if( pSub==0 ) continue;
+
+ /* Catch mismatch in the declared columns of a view and the number of
+ ** columns in the SELECT on the RHS */
+ if( pTab->nCol!=pSub->pEList->nExpr ){
+ sqlite3ErrorMsg(pParse, "expected %d columns for '%s' but got %d",
+ pTab->nCol, pTab->zName, pSub->pEList->nExpr);
+ goto select_end;
+ }
+
+ /* Do not try to flatten an aggregate subquery.
+ **
+ ** Flattening an aggregate subquery is only possible if the outer query
+ ** is not a join. But if the outer query is not a join, then the subquery
+ ** will be implemented as a co-routine and there is no advantage to
+ ** flattening in that case.
+ */
+ if( (pSub->selFlags & SF_Aggregate)!=0 ) continue;
+ assert( pSub->pGroupBy==0 );
+
+ /* If the outer query contains a "complex" result set (that is,
+ ** if the result set of the outer query uses functions or subqueries)
+ ** and if the subquery contains an ORDER BY clause and if
+ ** it will be implemented as a co-routine, then do not flatten. This
+ ** restriction allows SQL constructs like this:
+ **
+ ** SELECT expensive_function(x)
+ ** FROM (SELECT x FROM tab ORDER BY y LIMIT 10);
+ **
+ ** The expensive_function() is only computed on the 10 rows that
+ ** are output, rather than every row of the table.
+ **
+ ** The requirement that the outer query have a complex result set
+ ** means that flattening does occur on simpler SQL constraints without
+ ** the expensive_function() like:
+ **
+ ** SELECT x FROM (SELECT x FROM tab ORDER BY y LIMIT 10);
+ */
+ if( pSub->pOrderBy!=0
+ && i==0
+ && (p->selFlags & SF_ComplexResult)!=0
+ && (pTabList->nSrc==1
+ || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0)
+ ){
+ continue;
+ }
+
+ if( flattenSubquery(pParse, p, i, isAgg) ){
+ if( pParse->nErr ) goto select_end;
+ /* This subquery can be absorbed into its parent. */
+ i = -1;
+ }
+ pTabList = p->pSrc;
+ if( db->mallocFailed ) goto select_end;
+ if( !IgnorableOrderby(pDest) ){
+ sSort.pOrderBy = p->pOrderBy;
+ }
+ }
+#endif
+
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
+ /* Handle compound SELECT statements using the separate multiSelect()
+ ** procedure.
+ */
+ if( p->pPrior ){
+ rc = multiSelect(pParse, p, pDest);
+#if SELECTTRACE_ENABLED
+ SELECTTRACE(0x1,pParse,p,("end compound-select processing\n"));
+ if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ if( p->pNext==0 ) ExplainQueryPlanPop(pParse);
+ return rc;
+ }
+#endif
+
+ /* Do the WHERE-clause constant propagation optimization if this is
+ ** a join. No need to speed time on this operation for non-join queries
+ ** as the equivalent optimization will be handled by query planner in
+ ** sqlite3WhereBegin().
+ */
+ if( pTabList->nSrc>1
+ && OptimizationEnabled(db, SQLITE_PropagateConst)
+ && propagateConstants(pParse, p)
+ ){
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x100 ){
+ SELECTTRACE(0x100,pParse,p,("After constant propagation:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ }else{
+ SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n"));
+ }
+
+#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
+ if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView)
+ && countOfViewOptimization(pParse, p)
+ ){
+ if( db->mallocFailed ) goto select_end;
+ pEList = p->pEList;
+ pTabList = p->pSrc;
+ }
+#endif
+
+ /* For each term in the FROM clause, do two things:
+ ** (1) Authorized unreferenced tables
+ ** (2) Generate code for all sub-queries
+ */
+ for(i=0; i<pTabList->nSrc; i++){
+ struct SrcList_item *pItem = &pTabList->a[i];
+ SelectDest dest;
+ Select *pSub;
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+ const char *zSavedAuthContext;
+#endif
+
+ /* Issue SQLITE_READ authorizations with a fake column name for any
+ ** tables that are referenced but from which no values are extracted.
+ ** Examples of where these kinds of null SQLITE_READ authorizations
+ ** would occur:
+ **
+ ** SELECT count(*) FROM t1; -- SQLITE_READ t1.""
+ ** SELECT t1.* FROM t1, t2; -- SQLITE_READ t2.""
+ **
+ ** The fake column name is an empty string. It is possible for a table to
+ ** have a column named by the empty string, in which case there is no way to
+ ** distinguish between an unreferenced table and an actual reference to the
+ ** "" column. The original design was for the fake column name to be a NULL,
+ ** which would be unambiguous. But legacy authorization callbacks might
+ ** assume the column name is non-NULL and segfault. The use of an empty
+ ** string for the fake column name seems safer.
+ */
+ if( pItem->colUsed==0 && pItem->zName!=0 ){
+ sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", pItem->zDatabase);
+ }
+
+#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
+ /* Generate code for all sub-queries in the FROM clause
+ */
+ pSub = pItem->pSelect;
+ if( pSub==0 ) continue;
+
+ /* The code for a subquery should only be generated once, though it is
+ ** technically harmless for it to be generated multiple times. The
+ ** following assert() will detect if something changes to cause
+ ** the same subquery to be coded multiple times, as a signal to the
+ ** developers to try to optimize the situation.
+ **
+ ** Update 2019-07-24:
+ ** See ticket https://sqlite.org/src/tktview/c52b09c7f38903b1311cec40.
+ ** The dbsqlfuzz fuzzer found a case where the same subquery gets
+ ** coded twice. So this assert() now becomes a testcase(). It should
+ ** be very rare, though.
+ */
+ testcase( pItem->addrFillSub!=0 );
+
+ /* Increment Parse.nHeight by the height of the largest expression
+ ** tree referred to by this, the parent select. The child select
+ ** may contain expression trees of at most
+ ** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit
+ ** more conservative than necessary, but much easier than enforcing
+ ** an exact limit.
+ */
+ pParse->nHeight += sqlite3SelectExprHeight(p);
+
+ /* Make copies of constant WHERE-clause terms in the outer query down
+ ** inside the subquery. This can help the subquery to run more efficiently.
+ */
+ if( OptimizationEnabled(db, SQLITE_PushDown)
+ && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor,
+ (pItem->fg.jointype & JT_OUTER)!=0)
+ ){
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x100 ){
+ SELECTTRACE(0x100,pParse,p,
+ ("After WHERE-clause push-down into subquery %d:\n", pSub->selId));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ }else{
+ SELECTTRACE(0x100,pParse,p,("Push-down not possible\n"));
+ }
+
+ zSavedAuthContext = pParse->zAuthContext;
+ pParse->zAuthContext = pItem->zName;
+
+ /* Generate code to implement the subquery
+ **
+ ** The subquery is implemented as a co-routine if the subquery is
+ ** guaranteed to be the outer loop (so that it does not need to be
+ ** computed more than once)
+ **
+ ** TODO: Are there other reasons beside (1) to use a co-routine
+ ** implementation?
+ */
+ if( i==0
+ && (pTabList->nSrc==1
+ || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */
+ ){
+ /* Implement a co-routine that will return a single row of the result
+ ** set on each invocation.
+ */
+ int addrTop = sqlite3VdbeCurrentAddr(v)+1;
+
+ pItem->regReturn = ++pParse->nMem;
+ sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop);
+ VdbeComment((v, "%s", pItem->pTab->zName));
+ pItem->addrFillSub = addrTop;
+ sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn);
+ ExplainQueryPlan((pParse, 1, "CO-ROUTINE %u", pSub->selId));
+ sqlite3Select(pParse, pSub, &dest);
+ pItem->pTab->nRowLogEst = pSub->nSelectRow;
+ pItem->fg.viaCoroutine = 1;
+ pItem->regResult = dest.iSdst;
+ sqlite3VdbeEndCoroutine(v, pItem->regReturn);
+ sqlite3VdbeJumpHere(v, addrTop-1);
+ sqlite3ClearTempRegCache(pParse);
+ }else{
+ /* Generate a subroutine that will fill an ephemeral table with
+ ** the content of this subquery. pItem->addrFillSub will point
+ ** to the address of the generated subroutine. pItem->regReturn
+ ** is a register allocated to hold the subroutine return address
+ */
+ int topAddr;
+ int onceAddr = 0;
+ int retAddr;
+ struct SrcList_item *pPrior;
+
+ testcase( pItem->addrFillSub==0 ); /* Ticket c52b09c7f38903b1311 */
+ pItem->regReturn = ++pParse->nMem;
+ topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn);
+ pItem->addrFillSub = topAddr+1;
+ if( pItem->fg.isCorrelated==0 ){
+ /* If the subquery is not correlated and if we are not inside of
+ ** a trigger, then we only need to compute the value of the subquery
+ ** once. */
+ onceAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName));
+ }else{
+ VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName));
+ }
+ pPrior = isSelfJoinView(pTabList, pItem);
+ if( pPrior ){
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor);
+ assert( pPrior->pSelect!=0 );
+ pSub->nSelectRow = pPrior->pSelect->nSelectRow;
+ }else{
+ sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
+ ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId));
+ sqlite3Select(pParse, pSub, &dest);
+ }
+ pItem->pTab->nRowLogEst = pSub->nSelectRow;
+ if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr);
+ retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn);
+ VdbeComment((v, "end %s", pItem->pTab->zName));
+ sqlite3VdbeChangeP1(v, topAddr, retAddr);
+ sqlite3ClearTempRegCache(pParse);
+ }
+ if( db->mallocFailed ) goto select_end;
+ pParse->nHeight -= sqlite3SelectExprHeight(p);
+ pParse->zAuthContext = zSavedAuthContext;
+#endif
+ }
+
+ /* Various elements of the SELECT copied into local variables for
+ ** convenience */
+ pEList = p->pEList;
+ pWhere = p->pWhere;
+ pGroupBy = p->pGroupBy;
+ pHaving = p->pHaving;
+ sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0;
+
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x400 ){
+ SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+
+ /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and
+ ** if the select-list is the same as the ORDER BY list, then this query
+ ** can be rewritten as a GROUP BY. In other words, this:
+ **
+ ** SELECT DISTINCT xyz FROM ... ORDER BY xyz
+ **
+ ** is transformed to:
+ **
+ ** SELECT xyz FROM ... GROUP BY xyz ORDER BY xyz
+ **
+ ** The second form is preferred as a single index (or temp-table) may be
+ ** used for both the ORDER BY and DISTINCT processing. As originally
+ ** written the query must use a temp-table for at least one of the ORDER
+ ** BY and DISTINCT, and an index or separate temp-table for the other.
+ */
+ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct
+ && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ && p->pWin==0
+#endif
+ ){
+ p->selFlags &= ~SF_Distinct;
+ pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0);
+ p->selFlags |= SF_Aggregate;
+ /* Notice that even thought SF_Distinct has been cleared from p->selFlags,
+ ** the sDistinct.isTnct is still set. Hence, isTnct represents the
+ ** original setting of the SF_Distinct flag, not the current setting */
+ assert( sDistinct.isTnct );
+
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x400 ){
+ SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ }
+
+ /* If there is an ORDER BY clause, then create an ephemeral index to
+ ** do the sorting. But this sorting ephemeral index might end up
+ ** being unused if the data can be extracted in pre-sorted order.
+ ** If that is the case, then the OP_OpenEphemeral instruction will be
+ ** changed to an OP_Noop once we figure out that the sorting index is
+ ** not needed. The sSort.addrSortIndex variable is used to facilitate
+ ** that change.
+ */
+ if( sSort.pOrderBy ){
+ KeyInfo *pKeyInfo;
+ pKeyInfo = sqlite3KeyInfoFromExprList(
+ pParse, sSort.pOrderBy, 0, pEList->nExpr);
+ sSort.iECursor = pParse->nTab++;
+ sSort.addrSortIndex =
+ sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
+ sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0,
+ (char*)pKeyInfo, P4_KEYINFO
+ );
+ }else{
+ sSort.addrSortIndex = -1;
+ }
+
+ /* If the output is destined for a temporary table, open that table.
+ */
+ if( pDest->eDest==SRT_EphemTab ){
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr);
+ }
+
+ /* Set the limiter.
+ */
+ iEnd = sqlite3VdbeMakeLabel(pParse);
+ if( (p->selFlags & SF_FixedLimit)==0 ){
+ p->nSelectRow = 320; /* 4 billion rows */
+ }
+ computeLimitRegisters(pParse, p, iEnd);
+ if( p->iLimit==0 && sSort.addrSortIndex>=0 ){
+ sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen);
+ sSort.sortFlags |= SORTFLAG_UseSorter;
+ }
+
+ /* Open an ephemeral index to use for the distinct set.
+ */
+ if( p->selFlags & SF_Distinct ){
+ sDistinct.tabTnct = pParse->nTab++;
+ sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
+ sDistinct.tabTnct, 0, 0,
+ (char*)sqlite3KeyInfoFromExprList(pParse, p->pEList,0,0),
+ P4_KEYINFO);
+ sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
+ sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED;
+ }else{
+ sDistinct.eTnctType = WHERE_DISTINCT_NOOP;
+ }
+
+ if( !isAgg && pGroupBy==0 ){
+ /* No aggregate functions and no GROUP BY clause */
+ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0)
+ | (p->selFlags & SF_FixedLimit);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ Window *pWin = p->pWin; /* Main window object (or NULL) */
+ if( pWin ){
+ sqlite3WindowCodeInit(pParse, p);
+ }
+#endif
+ assert( WHERE_USE_LIMIT==SF_FixedLimit );
+
+
+ /* Begin the database scan. */
+ SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy,
+ p->pEList, wctrlFlags, p->nSelectRow);
+ if( pWInfo==0 ) goto select_end;
+ if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){
+ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo);
+ }
+ if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){
+ sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo);
+ }
+ if( sSort.pOrderBy ){
+ sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo);
+ sSort.labelOBLopt = sqlite3WhereOrderByLimitOptLabel(pWInfo);
+ if( sSort.nOBSat==sSort.pOrderBy->nExpr ){
+ sSort.pOrderBy = 0;
+ }
+ }
+
+ /* If sorting index that was created by a prior OP_OpenEphemeral
+ ** instruction ended up not being needed, then change the OP_OpenEphemeral
+ ** into an OP_Noop.
+ */
+ if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){
+ sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
+ }
+
+ assert( p->pEList==pEList );
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( pWin ){
+ int addrGosub = sqlite3VdbeMakeLabel(pParse);
+ int iCont = sqlite3VdbeMakeLabel(pParse);
+ int iBreak = sqlite3VdbeMakeLabel(pParse);
+ int regGosub = ++pParse->nMem;
+
+ sqlite3WindowCodeStep(pParse, p, pWInfo, regGosub, addrGosub);
+
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak);
+ sqlite3VdbeResolveLabel(v, addrGosub);
+ VdbeNoopComment((v, "inner-loop subroutine"));
+ sSort.labelOBLopt = 0;
+ selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest, iCont, iBreak);
+ sqlite3VdbeResolveLabel(v, iCont);
+ sqlite3VdbeAddOp1(v, OP_Return, regGosub);
+ VdbeComment((v, "end inner-loop subroutine"));
+ sqlite3VdbeResolveLabel(v, iBreak);
+ }else
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ {
+ /* Use the standard inner loop. */
+ selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest,
+ sqlite3WhereContinueLabel(pWInfo),
+ sqlite3WhereBreakLabel(pWInfo));
+
+ /* End the database scan loop.
+ */
+ sqlite3WhereEnd(pWInfo);
+ }
+ }else{
+ /* This case when there exist aggregate functions or a GROUP BY clause
+ ** or both */
+ NameContext sNC; /* Name context for processing aggregate information */
+ int iAMem; /* First Mem address for storing current GROUP BY */
+ int iBMem; /* First Mem address for previous GROUP BY */
+ int iUseFlag; /* Mem address holding flag indicating that at least
+ ** one row of the input to the aggregator has been
+ ** processed */
+ int iAbortFlag; /* Mem address which causes query abort if positive */
+ int groupBySort; /* Rows come from source in GROUP BY order */
+ int addrEnd; /* End of processing for this SELECT */
+ int sortPTab = 0; /* Pseudotable used to decode sorting results */
+ int sortOut = 0; /* Output register from the sorter */
+ int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */
+
+ /* Remove any and all aliases between the result set and the
+ ** GROUP BY clause.
+ */
+ if( pGroupBy ){
+ int k; /* Loop counter */
+ struct ExprList_item *pItem; /* For looping over expression in a list */
+
+ for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){
+ pItem->u.x.iAlias = 0;
+ }
+ for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){
+ pItem->u.x.iAlias = 0;
+ }
+ assert( 66==sqlite3LogEst(100) );
+ if( p->nSelectRow>66 ) p->nSelectRow = 66;
+
+ /* If there is both a GROUP BY and an ORDER BY clause and they are
+ ** identical, then it may be possible to disable the ORDER BY clause
+ ** on the grounds that the GROUP BY will cause elements to come out
+ ** in the correct order. It also may not - the GROUP BY might use a
+ ** database index that causes rows to be grouped together as required
+ ** but not actually sorted. Either way, record the fact that the
+ ** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp
+ ** variable. */
+ if( sSort.pOrderBy && pGroupBy->nExpr==sSort.pOrderBy->nExpr ){
+ int ii;
+ /* The GROUP BY processing doesn't care whether rows are delivered in
+ ** ASC or DESC order - only that each group is returned contiguously.
+ ** So set the ASC/DESC flags in the GROUP BY to match those in the
+ ** ORDER BY to maximize the chances of rows being delivered in an
+ ** order that makes the ORDER BY redundant. */
+ for(ii=0; ii<pGroupBy->nExpr; ii++){
+ u8 sortFlags = sSort.pOrderBy->a[ii].sortFlags & KEYINFO_ORDER_DESC;
+ pGroupBy->a[ii].sortFlags = sortFlags;
+ }
+ if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){
+ orderByGrp = 1;
+ }
+ }
+ }else{
+ assert( 0==sqlite3LogEst(1) );
+ p->nSelectRow = 0;
+ }
+
+ /* Create a label to jump to when we want to abort the query */
+ addrEnd = sqlite3VdbeMakeLabel(pParse);
+
+ /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in
+ ** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the
+ ** SELECT statement.
+ */
+ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) );
+ if( pAggInfo==0 ){
+ goto select_end;
+ }
+ pAggInfo->pNext = pParse->pAggList;
+ pParse->pAggList = pAggInfo;
+ pAggInfo->selId = p->selId;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ sNC.pSrcList = pTabList;
+ sNC.uNC.pAggInfo = pAggInfo;
+ VVA_ONLY( sNC.ncFlags = NC_UAggInfo; )
+ pAggInfo->mnReg = pParse->nMem+1;
+ pAggInfo->nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0;
+ pAggInfo->pGroupBy = pGroupBy;
+ sqlite3ExprAnalyzeAggList(&sNC, pEList);
+ sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy);
+ if( pHaving ){
+ if( pGroupBy ){
+ assert( pWhere==p->pWhere );
+ assert( pHaving==p->pHaving );
+ assert( pGroupBy==p->pGroupBy );
+ havingToWhere(pParse, p);
+ pWhere = p->pWhere;
+ }
+ sqlite3ExprAnalyzeAggregates(&sNC, pHaving);
+ }
+ pAggInfo->nAccumulator = pAggInfo->nColumn;
+ if( p->pGroupBy==0 && p->pHaving==0 && pAggInfo->nFunc==1 ){
+ minMaxFlag = minMaxQuery(db, pAggInfo->aFunc[0].pFExpr, &pMinMaxOrderBy);
+ }else{
+ minMaxFlag = WHERE_ORDERBY_NORMAL;
+ }
+ for(i=0; i<pAggInfo->nFunc; i++){
+ Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
+ sNC.ncFlags |= NC_InAggFunc;
+ sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ assert( !IsWindowFunc(pExpr) );
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ sqlite3ExprAnalyzeAggregates(&sNC, pExpr->y.pWin->pFilter);
+ }
+#endif
+ sNC.ncFlags &= ~NC_InAggFunc;
+ }
+ pAggInfo->mxReg = pParse->nMem;
+ if( db->mallocFailed ) goto select_end;
+#if SELECTTRACE_ENABLED
+ if( sqlite3_unsupported_selecttrace & 0x400 ){
+ int ii;
+ SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo));
+ sqlite3TreeViewSelect(0, p, 0);
+ for(ii=0; ii<pAggInfo->nColumn; ii++){
+ sqlite3DebugPrintf("agg-column[%d] iMem=%d\n",
+ ii, pAggInfo->aCol[ii].iMem);
+ sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0);
+ }
+ for(ii=0; ii<pAggInfo->nFunc; ii++){
+ sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n",
+ ii, pAggInfo->aFunc[ii].iMem);
+ sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0);
+ }
+ }
+#endif
+
+
+ /* Processing for aggregates with GROUP BY is very different and
+ ** much more complex than aggregates without a GROUP BY.
+ */
+ if( pGroupBy ){
+ KeyInfo *pKeyInfo; /* Keying information for the group by clause */
+ int addr1; /* A-vs-B comparision jump */
+ int addrOutputRow; /* Start of subroutine that outputs a result row */
+ int regOutputRow; /* Return address register for output subroutine */
+ int addrSetAbort; /* Set the abort flag and return */
+ int addrTopOfLoop; /* Top of the input loop */
+ int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */
+ int addrReset; /* Subroutine for resetting the accumulator */
+ int regReset; /* Return address register for reset subroutine */
+
+ /* If there is a GROUP BY clause we might need a sorting index to
+ ** implement it. Allocate that sorting index now. If it turns out
+ ** that we do not need it after all, the OP_SorterOpen instruction
+ ** will be converted into a Noop.
+ */
+ pAggInfo->sortingIdx = pParse->nTab++;
+ pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pGroupBy,
+ 0, pAggInfo->nColumn);
+ addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen,
+ pAggInfo->sortingIdx, pAggInfo->nSortingColumn,
+ 0, (char*)pKeyInfo, P4_KEYINFO);
+
+ /* Initialize memory locations used by GROUP BY aggregate processing
+ */
+ iUseFlag = ++pParse->nMem;
+ iAbortFlag = ++pParse->nMem;
+ regOutputRow = ++pParse->nMem;
+ addrOutputRow = sqlite3VdbeMakeLabel(pParse);
+ regReset = ++pParse->nMem;
+ addrReset = sqlite3VdbeMakeLabel(pParse);
+ iAMem = pParse->nMem + 1;
+ pParse->nMem += pGroupBy->nExpr;
+ iBMem = pParse->nMem + 1;
+ pParse->nMem += pGroupBy->nExpr;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag);
+ VdbeComment((v, "clear abort flag"));
+ sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1);
+
+ /* Begin a loop that will extract all source rows in GROUP BY order.
+ ** This might involve two separate loops with an OP_Sort in between, or
+ ** it might be a single loop that uses an index to extract information
+ ** in the right order to begin with.
+ */
+ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
+ SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0,
+ WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0
+ );
+ if( pWInfo==0 ) goto select_end;
+ if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){
+ /* The optimizer is able to deliver rows in group by order so
+ ** we do not have to sort. The OP_OpenEphemeral table will be
+ ** cancelled later because we still need to use the pKeyInfo
+ */
+ groupBySort = 0;
+ }else{
+ /* Rows are coming out in undetermined order. We have to push
+ ** each row into a sorting index, terminate the first loop,
+ ** then loop over the sorting index in order to get the output
+ ** in sorted order
+ */
+ int regBase;
+ int regRecord;
+ int nCol;
+ int nGroupBy;
+
+ explainTempTable(pParse,
+ (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ?
+ "DISTINCT" : "GROUP BY");
+
+ groupBySort = 1;
+ nGroupBy = pGroupBy->nExpr;
+ nCol = nGroupBy;
+ j = nGroupBy;
+ for(i=0; i<pAggInfo->nColumn; i++){
+ if( pAggInfo->aCol[i].iSorterColumn>=j ){
+ nCol++;
+ j++;
+ }
+ }
+ regBase = sqlite3GetTempRange(pParse, nCol);
+ sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0);
+ j = nGroupBy;
+ for(i=0; i<pAggInfo->nColumn; i++){
+ struct AggInfo_col *pCol = &pAggInfo->aCol[i];
+ if( pCol->iSorterColumn>=j ){
+ int r1 = j + regBase;
+ sqlite3ExprCodeGetColumnOfTable(v,
+ pCol->pTab, pCol->iTable, pCol->iColumn, r1);
+ j++;
+ }
+ }
+ regRecord = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord);
+ sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord);
+ sqlite3ReleaseTempReg(pParse, regRecord);
+ sqlite3ReleaseTempRange(pParse, regBase, nCol);
+ sqlite3WhereEnd(pWInfo);
+ pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++;
+ sortOut = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol);
+ sqlite3VdbeAddOp2(v, OP_SorterSort, pAggInfo->sortingIdx, addrEnd);
+ VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
+ pAggInfo->useSortingIdx = 1;
+ }
+
+ /* If the index or temporary table used by the GROUP BY sort
+ ** will naturally deliver rows in the order required by the ORDER BY
+ ** clause, cancel the ephemeral table open coded earlier.
+ **
+ ** This is an optimization - the correct answer should result regardless.
+ ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to
+ ** disable this optimization for testing purposes. */
+ if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder)
+ && (groupBySort || sqlite3WhereIsSorted(pWInfo))
+ ){
+ sSort.pOrderBy = 0;
+ sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
+ }
+
+ /* Evaluate the current GROUP BY terms and store in b0, b1, b2...
+ ** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth)
+ ** Then compare the current GROUP BY terms against the GROUP BY terms
+ ** from the previous row currently stored in a0, a1, a2...
+ */
+ addrTopOfLoop = sqlite3VdbeCurrentAddr(v);
+ if( groupBySort ){
+ sqlite3VdbeAddOp3(v, OP_SorterData, pAggInfo->sortingIdx,
+ sortOut, sortPTab);
+ }
+ for(j=0; j<pGroupBy->nExpr; j++){
+ if( groupBySort ){
+ sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j);
+ }else{
+ pAggInfo->directMode = 1;
+ sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j);
+ }
+ }
+ sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr,
+ (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
+ addr1 = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp3(v, OP_Jump, addr1+1, 0, addr1+1); VdbeCoverage(v);
+
+ /* Generate code that runs whenever the GROUP BY changes.
+ ** Changes in the GROUP BY are detected by the previous code
+ ** block. If there were no changes, this block is skipped.
+ **
+ ** This code copies current group by terms in b0,b1,b2,...
+ ** over to a0,a1,a2. It then calls the output subroutine
+ ** and resets the aggregate accumulator registers in preparation
+ ** for the next GROUP BY batch.
+ */
+ sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr);
+ sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow);
+ VdbeComment((v, "output one row"));
+ sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v);
+ VdbeComment((v, "check abort flag"));
+ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
+ VdbeComment((v, "reset accumulator"));
+
+ /* Update the aggregate accumulators based on the content of
+ ** the current row
+ */
+ sqlite3VdbeJumpHere(v, addr1);
+ updateAccumulator(pParse, iUseFlag, pAggInfo);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag);
+ VdbeComment((v, "indicate data in accumulator"));
+
+ /* End of the loop
+ */
+ if( groupBySort ){
+ sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx, addrTopOfLoop);
+ VdbeCoverage(v);
+ }else{
+ sqlite3WhereEnd(pWInfo);
+ sqlite3VdbeChangeToNoop(v, addrSortingIdx);
+ }
+
+ /* Output the final row of result
+ */
+ sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow);
+ VdbeComment((v, "output final row"));
+
+ /* Jump over the subroutines
+ */
+ sqlite3VdbeGoto(v, addrEnd);
+
+ /* Generate a subroutine that outputs a single row of the result
+ ** set. This subroutine first looks at the iUseFlag. If iUseFlag
+ ** is less than or equal to zero, the subroutine is a no-op. If
+ ** the processing calls for the query to abort, this subroutine
+ ** increments the iAbortFlag memory location before returning in
+ ** order to signal the caller to abort.
+ */
+ addrSetAbort = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag);
+ VdbeComment((v, "set abort flag"));
+ sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
+ sqlite3VdbeResolveLabel(v, addrOutputRow);
+ addrOutputRow = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2);
+ VdbeCoverage(v);
+ VdbeComment((v, "Groupby result generator entry point"));
+ sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
+ finalizeAggFunctions(pParse, pAggInfo);
+ sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL);
+ selectInnerLoop(pParse, p, -1, &sSort,
+ &sDistinct, pDest,
+ addrOutputRow+1, addrSetAbort);
+ sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
+ VdbeComment((v, "end groupby result generator"));
+
+ /* Generate a subroutine that will reset the group-by accumulator
+ */
+ sqlite3VdbeResolveLabel(v, addrReset);
+ resetAccumulator(pParse, pAggInfo);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag);
+ VdbeComment((v, "indicate accumulator empty"));
+ sqlite3VdbeAddOp1(v, OP_Return, regReset);
+
+ } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */
+ else {
+ Table *pTab;
+ if( (pTab = isSimpleCount(p, pAggInfo))!=0 ){
+ /* If isSimpleCount() returns a pointer to a Table structure, then
+ ** the SQL statement is of the form:
+ **
+ ** SELECT count(*) FROM <tbl>
+ **
+ ** where the Table structure returned represents table <tbl>.
+ **
+ ** This statement is so common that it is optimized specially. The
+ ** OP_Count instruction is executed either on the intkey table that
+ ** contains the data for table <tbl> or on one of its indexes. It
+ ** is better to execute the op on an index, as indexes are almost
+ ** always spread across less pages than their corresponding tables.
+ */
+ const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */
+ Index *pIdx; /* Iterator variable */
+ KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */
+ Index *pBest = 0; /* Best index found so far */
+ Pgno iRoot = pTab->tnum; /* Root page of scanned b-tree */
+
+ sqlite3CodeVerifySchema(pParse, iDb);
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
+
+ /* Search for the index that has the lowest scan cost.
+ **
+ ** (2011-04-15) Do not do a full scan of an unordered index.
+ **
+ ** (2013-10-03) Do not count the entries in a partial index.
+ **
+ ** In practice the KeyInfo structure will not be used. It is only
+ ** passed to keep OP_OpenRead happy.
+ */
+ if( !HasRowid(pTab) ) pBest = sqlite3PrimaryKeyIndex(pTab);
+ if( !p->pSrc->a[0].fg.notIndexed ){
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->bUnordered==0
+ && pIdx->szIdxRow<pTab->szTabRow
+ && pIdx->pPartIdxWhere==0
+ && (!pBest || pIdx->szIdxRow<pBest->szIdxRow)
+ ){
+ pBest = pIdx;
+ }
+ }
+ }
+ if( pBest ){
+ iRoot = pBest->tnum;
+ pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pBest);
+ }
+
+ /* Open a read-only cursor, execute the OP_Count, close the cursor. */
+ sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, (int)iRoot, iDb, 1);
+ if( pKeyInfo ){
+ sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO);
+ }
+ sqlite3VdbeAddOp2(v, OP_Count, iCsr, pAggInfo->aFunc[0].iMem);
+ sqlite3VdbeAddOp1(v, OP_Close, iCsr);
+ explainSimpleCount(pParse, pTab, pBest);
+ }else{
+ int regAcc = 0; /* "populate accumulators" flag */
+ int addrSkip;
+
+ /* If there are accumulator registers but no min() or max() functions
+ ** without FILTER clauses, allocate register regAcc. Register regAcc
+ ** will contain 0 the first time the inner loop runs, and 1 thereafter.
+ ** The code generated by updateAccumulator() uses this to ensure
+ ** that the accumulator registers are (a) updated only once if
+ ** there are no min() or max functions or (b) always updated for the
+ ** first row visited by the aggregate, so that they are updated at
+ ** least once even if the FILTER clause means the min() or max()
+ ** function visits zero rows. */
+ if( pAggInfo->nAccumulator ){
+ for(i=0; i<pAggInfo->nFunc; i++){
+ if( ExprHasProperty(pAggInfo->aFunc[i].pFExpr, EP_WinFunc) ){
+ continue;
+ }
+ if( pAggInfo->aFunc[i].pFunc->funcFlags&SQLITE_FUNC_NEEDCOLL ){
+ break;
+ }
+ }
+ if( i==pAggInfo->nFunc ){
+ regAcc = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regAcc);
+ }
+ }
+
+ /* This case runs if the aggregate has no GROUP BY clause. The
+ ** processing is much simpler since there is only a single row
+ ** of output.
+ */
+ assert( p->pGroupBy==0 );
+ resetAccumulator(pParse, pAggInfo);
+
+ /* If this query is a candidate for the min/max optimization, then
+ ** minMaxFlag will have been previously set to either
+ ** WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX and pMinMaxOrderBy will
+ ** be an appropriate ORDER BY expression for the optimization.
+ */
+ assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 );
+ assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 );
+
+ SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy,
+ 0, minMaxFlag, 0);
+ if( pWInfo==0 ){
+ goto select_end;
+ }
+ updateAccumulator(pParse, regAcc, pAggInfo);
+ if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc);
+ addrSkip = sqlite3WhereOrderByLimitOptLabel(pWInfo);
+ if( addrSkip!=sqlite3WhereContinueLabel(pWInfo) ){
+ sqlite3VdbeGoto(v, addrSkip);
+ }
+ sqlite3WhereEnd(pWInfo);
+ finalizeAggFunctions(pParse, pAggInfo);
+ }
+
+ sSort.pOrderBy = 0;
+ sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL);
+ selectInnerLoop(pParse, p, -1, 0, 0,
+ pDest, addrEnd, addrEnd);
+ }
+ sqlite3VdbeResolveLabel(v, addrEnd);
+
+ } /* endif aggregate query */
+
+ if( sDistinct.eTnctType==WHERE_DISTINCT_UNORDERED ){
+ explainTempTable(pParse, "DISTINCT");
+ }
+
+ /* If there is an ORDER BY clause, then we need to sort the results
+ ** and send them to the callback one by one.
+ */
+ if( sSort.pOrderBy ){
+ explainTempTable(pParse,
+ sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY");
+ assert( p->pEList==pEList );
+ generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest);
+ }
+
+ /* Jump here to skip this query
+ */
+ sqlite3VdbeResolveLabel(v, iEnd);
+
+ /* The SELECT has been coded. If there is an error in the Parse structure,
+ ** set the return code to 1. Otherwise 0. */
+ rc = (pParse->nErr>0);
+
+ /* Control jumps to here if an error is encountered above, or upon
+ ** successful coding of the SELECT.
+ */
+select_end:
+ sqlite3ExprListDelete(db, pMinMaxOrderBy);
+#ifdef SQLITE_DEBUG
+ if( pAggInfo && !db->mallocFailed ){
+ for(i=0; i<pAggInfo->nColumn; i++){
+ Expr *pExpr = pAggInfo->aCol[i].pCExpr;
+ assert( pExpr!=0 || db->mallocFailed );
+ if( pExpr==0 ) continue;
+ assert( pExpr->pAggInfo==pAggInfo );
+ assert( pExpr->iAgg==i );
+ }
+ for(i=0; i<pAggInfo->nFunc; i++){
+ Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
+ assert( pExpr!=0 || db->mallocFailed );
+ if( pExpr==0 ) continue;
+ assert( pExpr->pAggInfo==pAggInfo );
+ assert( pExpr->iAgg==i );
+ }
+ }
+#endif
+
+#if SELECTTRACE_ENABLED
+ SELECTTRACE(0x1,pParse,p,("end processing\n"));
+ if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
+ ExplainQueryPlanPop(pParse);
+ return rc;
+}
+
+/************** End of select.c **********************************************/
+/************** Begin file table.c *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the sqlite3_get_table() and sqlite3_free_table()
+** interface routines. These are just wrappers around the main
+** interface routine of sqlite3_exec().
+**
+** These routines are in a separate files so that they will not be linked
+** if they are not used.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_GET_TABLE
+
+/*
+** This structure is used to pass data from sqlite3_get_table() through
+** to the callback function is uses to build the result.
+*/
+typedef struct TabResult {
+ char **azResult; /* Accumulated output */
+ char *zErrMsg; /* Error message text, if an error occurs */
+ u32 nAlloc; /* Slots allocated for azResult[] */
+ u32 nRow; /* Number of rows in the result */
+ u32 nColumn; /* Number of columns in the result */
+ u32 nData; /* Slots used in azResult[]. (nRow+1)*nColumn */
+ int rc; /* Return code from sqlite3_exec() */
+} TabResult;
+
+/*
+** This routine is called once for each row in the result table. Its job
+** is to fill in the TabResult structure appropriately, allocating new
+** memory as necessary.
+*/
+static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){
+ TabResult *p = (TabResult*)pArg; /* Result accumulator */
+ int need; /* Slots needed in p->azResult[] */
+ int i; /* Loop counter */
+ char *z; /* A single column of result */
+
+ /* Make sure there is enough space in p->azResult to hold everything
+ ** we need to remember from this invocation of the callback.
+ */
+ if( p->nRow==0 && argv!=0 ){
+ need = nCol*2;
+ }else{
+ need = nCol;
+ }
+ if( p->nData + need > p->nAlloc ){
+ char **azNew;
+ p->nAlloc = p->nAlloc*2 + need;
+ azNew = sqlite3Realloc( p->azResult, sizeof(char*)*p->nAlloc );
+ if( azNew==0 ) goto malloc_failed;
+ p->azResult = azNew;
+ }
+
+ /* If this is the first row, then generate an extra row containing
+ ** the names of all columns.
+ */
+ if( p->nRow==0 ){
+ p->nColumn = nCol;
+ for(i=0; i<nCol; i++){
+ z = sqlite3_mprintf("%s", colv[i]);
+ if( z==0 ) goto malloc_failed;
+ p->azResult[p->nData++] = z;
+ }
+ }else if( (int)p->nColumn!=nCol ){
+ sqlite3_free(p->zErrMsg);
+ p->zErrMsg = sqlite3_mprintf(
+ "sqlite3_get_table() called with two or more incompatible queries"
+ );
+ p->rc = SQLITE_ERROR;
+ return 1;
+ }
+
+ /* Copy over the row data
+ */
+ if( argv!=0 ){
+ for(i=0; i<nCol; i++){
+ if( argv[i]==0 ){
+ z = 0;
+ }else{
+ int n = sqlite3Strlen30(argv[i])+1;
+ z = sqlite3_malloc64( n );
+ if( z==0 ) goto malloc_failed;
+ memcpy(z, argv[i], n);
+ }
+ p->azResult[p->nData++] = z;
+ }
+ p->nRow++;
+ }
+ return 0;
+
+malloc_failed:
+ p->rc = SQLITE_NOMEM_BKPT;
+ return 1;
+}
+
+/*
+** Query the database. But instead of invoking a callback for each row,
+** malloc() for space to hold the result and return the entire results
+** at the conclusion of the call.
+**
+** The result that is written to ***pazResult is held in memory obtained
+** from malloc(). But the caller cannot free this memory directly.
+** Instead, the entire table should be passed to sqlite3_free_table() when
+** the calling procedure is finished using it.
+*/
+SQLITE_API int sqlite3_get_table(
+ sqlite3 *db, /* The database on which the SQL executes */
+ const char *zSql, /* The SQL to be executed */
+ char ***pazResult, /* Write the result table here */
+ int *pnRow, /* Write the number of rows in the result here */
+ int *pnColumn, /* Write the number of columns of result here */
+ char **pzErrMsg /* Write error messages here */
+){
+ int rc;
+ TabResult res;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || pazResult==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ *pazResult = 0;
+ if( pnColumn ) *pnColumn = 0;
+ if( pnRow ) *pnRow = 0;
+ if( pzErrMsg ) *pzErrMsg = 0;
+ res.zErrMsg = 0;
+ res.nRow = 0;
+ res.nColumn = 0;
+ res.nData = 1;
+ res.nAlloc = 20;
+ res.rc = SQLITE_OK;
+ res.azResult = sqlite3_malloc64(sizeof(char*)*res.nAlloc );
+ if( res.azResult==0 ){
+ db->errCode = SQLITE_NOMEM;
+ return SQLITE_NOMEM_BKPT;
+ }
+ res.azResult[0] = 0;
+ rc = sqlite3_exec(db, zSql, sqlite3_get_table_cb, &res, pzErrMsg);
+ assert( sizeof(res.azResult[0])>= sizeof(res.nData) );
+ res.azResult[0] = SQLITE_INT_TO_PTR(res.nData);
+ if( (rc&0xff)==SQLITE_ABORT ){
+ sqlite3_free_table(&res.azResult[1]);
+ if( res.zErrMsg ){
+ if( pzErrMsg ){
+ sqlite3_free(*pzErrMsg);
+ *pzErrMsg = sqlite3_mprintf("%s",res.zErrMsg);
+ }
+ sqlite3_free(res.zErrMsg);
+ }
+ db->errCode = res.rc; /* Assume 32-bit assignment is atomic */
+ return res.rc;
+ }
+ sqlite3_free(res.zErrMsg);
+ if( rc!=SQLITE_OK ){
+ sqlite3_free_table(&res.azResult[1]);
+ return rc;
+ }
+ if( res.nAlloc>res.nData ){
+ char **azNew;
+ azNew = sqlite3Realloc( res.azResult, sizeof(char*)*res.nData );
+ if( azNew==0 ){
+ sqlite3_free_table(&res.azResult[1]);
+ db->errCode = SQLITE_NOMEM;
+ return SQLITE_NOMEM_BKPT;
+ }
+ res.azResult = azNew;
+ }
+ *pazResult = &res.azResult[1];
+ if( pnColumn ) *pnColumn = res.nColumn;
+ if( pnRow ) *pnRow = res.nRow;
+ return rc;
+}
+
+/*
+** This routine frees the space the sqlite3_get_table() malloced.
+*/
+SQLITE_API void sqlite3_free_table(
+ char **azResult /* Result returned from sqlite3_get_table() */
+){
+ if( azResult ){
+ int i, n;
+ azResult--;
+ assert( azResult!=0 );
+ n = SQLITE_PTR_TO_INT(azResult[0]);
+ for(i=1; i<n; i++){ if( azResult[i] ) sqlite3_free(azResult[i]); }
+ sqlite3_free(azResult);
+ }
+}
+
+#endif /* SQLITE_OMIT_GET_TABLE */
+
+/************** End of table.c ***********************************************/
+/************** Begin file trigger.c *****************************************/
+/*
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains the implementation for TRIGGERs
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_TRIGGER
+/*
+** Delete a linked list of TriggerStep structures.
+*/
+SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3 *db, TriggerStep *pTriggerStep){
+ while( pTriggerStep ){
+ TriggerStep * pTmp = pTriggerStep;
+ pTriggerStep = pTriggerStep->pNext;
+
+ sqlite3ExprDelete(db, pTmp->pWhere);
+ sqlite3ExprListDelete(db, pTmp->pExprList);
+ sqlite3SelectDelete(db, pTmp->pSelect);
+ sqlite3IdListDelete(db, pTmp->pIdList);
+ sqlite3UpsertDelete(db, pTmp->pUpsert);
+ sqlite3SrcListDelete(db, pTmp->pFrom);
+ sqlite3DbFree(db, pTmp->zSpan);
+
+ sqlite3DbFree(db, pTmp);
+ }
+}
+
+/*
+** Given table pTab, return a list of all the triggers attached to
+** the table. The list is connected by Trigger.pNext pointers.
+**
+** All of the triggers on pTab that are in the same database as pTab
+** are already attached to pTab->pTrigger. But there might be additional
+** triggers on pTab in the TEMP schema. This routine prepends all
+** TEMP triggers on pTab to the beginning of the pTab->pTrigger list
+** and returns the combined list.
+**
+** To state it another way: This routine returns a list of all triggers
+** that fire off of pTab. The list will include any TEMP triggers on
+** pTab as well as the triggers lised in pTab->pTrigger.
+*/
+SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){
+ Schema * const pTmpSchema = pParse->db->aDb[1].pSchema;
+ Trigger *pList = 0; /* List of triggers to return */
+
+ if( pParse->disableTriggers ){
+ return 0;
+ }
+
+ if( pTmpSchema!=pTab->pSchema ){
+ HashElem *p;
+ assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) );
+ for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){
+ Trigger *pTrig = (Trigger *)sqliteHashData(p);
+ if( pTrig->pTabSchema==pTab->pSchema
+ && 0==sqlite3StrICmp(pTrig->table, pTab->zName)
+ ){
+ pTrig->pNext = (pList ? pList : pTab->pTrigger);
+ pList = pTrig;
+ }
+ }
+ }
+
+ return (pList ? pList : pTab->pTrigger);
+}
+
+/*
+** This is called by the parser when it sees a CREATE TRIGGER statement
+** up to the point of the BEGIN before the trigger actions. A Trigger
+** structure is generated based on the information available and stored
+** in pParse->pNewTrigger. After the trigger actions have been parsed, the
+** sqlite3FinishTrigger() function is called to complete the trigger
+** construction process.
+*/
+SQLITE_PRIVATE void sqlite3BeginTrigger(
+ Parse *pParse, /* The parse context of the CREATE TRIGGER statement */
+ Token *pName1, /* The name of the trigger */
+ Token *pName2, /* The name of the trigger */
+ int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */
+ int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */
+ IdList *pColumns, /* column list if this is an UPDATE OF trigger */
+ SrcList *pTableName,/* The name of the table/view the trigger applies to */
+ Expr *pWhen, /* WHEN clause */
+ int isTemp, /* True if the TEMPORARY keyword is present */
+ int noErr /* Suppress errors if the trigger already exists */
+){
+ Trigger *pTrigger = 0; /* The new trigger */
+ Table *pTab; /* Table that the trigger fires off of */
+ char *zName = 0; /* Name of the trigger */
+ sqlite3 *db = pParse->db; /* The database connection */
+ int iDb; /* The database to store the trigger in */
+ Token *pName; /* The unqualified db name */
+ DbFixer sFix; /* State vector for the DB fixer */
+
+ assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */
+ assert( pName2!=0 );
+ assert( op==TK_INSERT || op==TK_UPDATE || op==TK_DELETE );
+ assert( op>0 && op<0xff );
+ if( isTemp ){
+ /* If TEMP was specified, then the trigger name may not be qualified. */
+ if( pName2->n>0 ){
+ sqlite3ErrorMsg(pParse, "temporary trigger may not have qualified name");
+ goto trigger_cleanup;
+ }
+ iDb = 1;
+ pName = pName1;
+ }else{
+ /* Figure out the db that the trigger will be created in */
+ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName);
+ if( iDb<0 ){
+ goto trigger_cleanup;
+ }
+ }
+ if( !pTableName || db->mallocFailed ){
+ goto trigger_cleanup;
+ }
+
+ /* A long-standing parser bug is that this syntax was allowed:
+ **
+ ** CREATE TRIGGER attached.demo AFTER INSERT ON attached.tab ....
+ ** ^^^^^^^^
+ **
+ ** To maintain backwards compatibility, ignore the database
+ ** name on pTableName if we are reparsing out of the schema table
+ */
+ if( db->init.busy && iDb!=1 ){
+ sqlite3DbFree(db, pTableName->a[0].zDatabase);
+ pTableName->a[0].zDatabase = 0;
+ }
+
+ /* If the trigger name was unqualified, and the table is a temp table,
+ ** then set iDb to 1 to create the trigger in the temporary database.
+ ** If sqlite3SrcListLookup() returns 0, indicating the table does not
+ ** exist, the error is caught by the block below.
+ */
+ pTab = sqlite3SrcListLookup(pParse, pTableName);
+ if( db->init.busy==0 && pName2->n==0 && pTab
+ && pTab->pSchema==db->aDb[1].pSchema ){
+ iDb = 1;
+ }
+
+ /* Ensure the table name matches database name and that the table exists */
+ if( db->mallocFailed ) goto trigger_cleanup;
+ assert( pTableName->nSrc==1 );
+ sqlite3FixInit(&sFix, pParse, iDb, "trigger", pName);
+ if( sqlite3FixSrcList(&sFix, pTableName) ){
+ goto trigger_cleanup;
+ }
+ pTab = sqlite3SrcListLookup(pParse, pTableName);
+ if( !pTab ){
+ /* The table does not exist. */
+ if( db->init.iDb==1 ){
+ /* Ticket #3810.
+ ** Normally, whenever a table is dropped, all associated triggers are
+ ** dropped too. But if a TEMP trigger is created on a non-TEMP table
+ ** and the table is dropped by a different database connection, the
+ ** trigger is not visible to the database connection that does the
+ ** drop so the trigger cannot be dropped. This results in an
+ ** "orphaned trigger" - a trigger whose associated table is missing.
+ */
+ db->init.orphanTrigger = 1;
+ }
+ goto trigger_cleanup;
+ }
+ if( IsVirtual(pTab) ){
+ sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables");
+ goto trigger_cleanup;
+ }
+
+ /* Check that the trigger name is not reserved and that no trigger of the
+ ** specified name exists */
+ zName = sqlite3NameFromToken(db, pName);
+ if( zName==0 ){
+ assert( db->mallocFailed );
+ goto trigger_cleanup;
+ }
+ if( sqlite3CheckObjectName(pParse, zName, "trigger", pTab->zName) ){
+ goto trigger_cleanup;
+ }
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( !IN_RENAME_OBJECT ){
+ if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){
+ if( !noErr ){
+ sqlite3ErrorMsg(pParse, "trigger %T already exists", pName);
+ }else{
+ assert( !db->init.busy );
+ sqlite3CodeVerifySchema(pParse, iDb);
+ }
+ goto trigger_cleanup;
+ }
+ }
+
+ /* Do not create a trigger on a system table */
+ if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){
+ sqlite3ErrorMsg(pParse, "cannot create trigger on system table");
+ goto trigger_cleanup;
+ }
+
+ /* INSTEAD of triggers are only for views and views only support INSTEAD
+ ** of triggers.
+ */
+ if( pTab->pSelect && tr_tm!=TK_INSTEAD ){
+ sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S",
+ (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0);
+ goto trigger_cleanup;
+ }
+ if( !pTab->pSelect && tr_tm==TK_INSTEAD ){
+ sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF"
+ " trigger on table: %S", pTableName, 0);
+ goto trigger_cleanup;
+ }
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( !IN_RENAME_OBJECT ){
+ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ int code = SQLITE_CREATE_TRIGGER;
+ const char *zDb = db->aDb[iTabDb].zDbSName;
+ const char *zDbTrig = isTemp ? db->aDb[1].zDbSName : zDb;
+ if( iTabDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER;
+ if( sqlite3AuthCheck(pParse, code, zName, pTab->zName, zDbTrig) ){
+ goto trigger_cleanup;
+ }
+ if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iTabDb),0,zDb)){
+ goto trigger_cleanup;
+ }
+ }
+#endif
+
+ /* INSTEAD OF triggers can only appear on views and BEFORE triggers
+ ** cannot appear on views. So we might as well translate every
+ ** INSTEAD OF trigger into a BEFORE trigger. It simplifies code
+ ** elsewhere.
+ */
+ if (tr_tm == TK_INSTEAD){
+ tr_tm = TK_BEFORE;
+ }
+
+ /* Build the Trigger object */
+ pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger));
+ if( pTrigger==0 ) goto trigger_cleanup;
+ pTrigger->zName = zName;
+ zName = 0;
+ pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName);
+ pTrigger->pSchema = db->aDb[iDb].pSchema;
+ pTrigger->pTabSchema = pTab->pSchema;
+ pTrigger->op = (u8)op;
+ pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER;
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenRemap(pParse, pTrigger->table, pTableName->a[0].zName);
+ pTrigger->pWhen = pWhen;
+ pWhen = 0;
+ }else{
+ pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE);
+ }
+ pTrigger->pColumns = pColumns;
+ pColumns = 0;
+ assert( pParse->pNewTrigger==0 );
+ pParse->pNewTrigger = pTrigger;
+
+trigger_cleanup:
+ sqlite3DbFree(db, zName);
+ sqlite3SrcListDelete(db, pTableName);
+ sqlite3IdListDelete(db, pColumns);
+ sqlite3ExprDelete(db, pWhen);
+ if( !pParse->pNewTrigger ){
+ sqlite3DeleteTrigger(db, pTrigger);
+ }else{
+ assert( pParse->pNewTrigger==pTrigger );
+ }
+}
+
+/*
+** This routine is called after all of the trigger actions have been parsed
+** in order to complete the process of building the trigger.
+*/
+SQLITE_PRIVATE void sqlite3FinishTrigger(
+ Parse *pParse, /* Parser context */
+ TriggerStep *pStepList, /* The triggered program */
+ Token *pAll /* Token that describes the complete CREATE TRIGGER */
+){
+ Trigger *pTrig = pParse->pNewTrigger; /* Trigger being finished */
+ char *zName; /* Name of trigger */
+ sqlite3 *db = pParse->db; /* The database */
+ DbFixer sFix; /* Fixer object */
+ int iDb; /* Database containing the trigger */
+ Token nameToken; /* Trigger name for error reporting */
+
+ pParse->pNewTrigger = 0;
+ if( NEVER(pParse->nErr) || !pTrig ) goto triggerfinish_cleanup;
+ zName = pTrig->zName;
+ iDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema);
+ pTrig->step_list = pStepList;
+ while( pStepList ){
+ pStepList->pTrig = pTrig;
+ pStepList = pStepList->pNext;
+ }
+ sqlite3TokenInit(&nameToken, pTrig->zName);
+ sqlite3FixInit(&sFix, pParse, iDb, "trigger", &nameToken);
+ if( sqlite3FixTriggerStep(&sFix, pTrig->step_list)
+ || sqlite3FixExpr(&sFix, pTrig->pWhen)
+ ){
+ goto triggerfinish_cleanup;
+ }
+
+#ifndef SQLITE_OMIT_ALTERTABLE
+ if( IN_RENAME_OBJECT ){
+ assert( !db->init.busy );
+ pParse->pNewTrigger = pTrig;
+ pTrig = 0;
+ }else
+#endif
+
+ /* if we are not initializing,
+ ** build the sqlite_schema entry
+ */
+ if( !db->init.busy ){
+ Vdbe *v;
+ char *z;
+
+ /* Make an entry in the sqlite_schema table */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto triggerfinish_cleanup;
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n);
+ testcase( z==0 );
+ sqlite3NestedParse(pParse,
+ "INSERT INTO %Q." DFLT_SCHEMA_TABLE
+ " VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')",
+ db->aDb[iDb].zDbSName, zName,
+ pTrig->table, z);
+ sqlite3DbFree(db, z);
+ sqlite3ChangeCookie(pParse, iDb);
+ sqlite3VdbeAddParseSchemaOp(v, iDb,
+ sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName));
+ }
+
+ if( db->init.busy ){
+ Trigger *pLink = pTrig;
+ Hash *pHash = &db->aDb[iDb].pSchema->trigHash;
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ assert( pLink!=0 );
+ pTrig = sqlite3HashInsert(pHash, zName, pTrig);
+ if( pTrig ){
+ sqlite3OomFault(db);
+ }else if( pLink->pSchema==pLink->pTabSchema ){
+ Table *pTab;
+ pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table);
+ assert( pTab!=0 );
+ pLink->pNext = pTab->pTrigger;
+ pTab->pTrigger = pLink;
+ }
+ }
+
+triggerfinish_cleanup:
+ sqlite3DeleteTrigger(db, pTrig);
+ assert( IN_RENAME_OBJECT || !pParse->pNewTrigger );
+ sqlite3DeleteTriggerStep(db, pStepList);
+}
+
+/*
+** Duplicate a range of text from an SQL statement, then convert all
+** whitespace characters into ordinary space characters.
+*/
+static char *triggerSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){
+ char *z = sqlite3DbSpanDup(db, zStart, zEnd);
+ int i;
+ if( z ) for(i=0; z[i]; i++) if( sqlite3Isspace(z[i]) ) z[i] = ' ';
+ return z;
+}
+
+/*
+** Turn a SELECT statement (that the pSelect parameter points to) into
+** a trigger step. Return a pointer to a TriggerStep structure.
+**
+** The parser calls this routine when it finds a SELECT statement in
+** body of a TRIGGER.
+*/
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(
+ sqlite3 *db, /* Database connection */
+ Select *pSelect, /* The SELECT statement */
+ const char *zStart, /* Start of SQL text */
+ const char *zEnd /* End of SQL text */
+){
+ TriggerStep *pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep));
+ if( pTriggerStep==0 ) {
+ sqlite3SelectDelete(db, pSelect);
+ return 0;
+ }
+ pTriggerStep->op = TK_SELECT;
+ pTriggerStep->pSelect = pSelect;
+ pTriggerStep->orconf = OE_Default;
+ pTriggerStep->zSpan = triggerSpanDup(db, zStart, zEnd);
+ return pTriggerStep;
+}
+
+/*
+** Allocate space to hold a new trigger step. The allocated space
+** holds both the TriggerStep object and the TriggerStep.target.z string.
+**
+** If an OOM error occurs, NULL is returned and db->mallocFailed is set.
+*/
+static TriggerStep *triggerStepAllocate(
+ Parse *pParse, /* Parser context */
+ u8 op, /* Trigger opcode */
+ Token *pName, /* The target name */
+ const char *zStart, /* Start of SQL text */
+ const char *zEnd /* End of SQL text */
+){
+ sqlite3 *db = pParse->db;
+ TriggerStep *pTriggerStep;
+
+ pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1);
+ if( pTriggerStep ){
+ char *z = (char*)&pTriggerStep[1];
+ memcpy(z, pName->z, pName->n);
+ sqlite3Dequote(z);
+ pTriggerStep->zTarget = z;
+ pTriggerStep->op = op;
+ pTriggerStep->zSpan = triggerSpanDup(db, zStart, zEnd);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, pTriggerStep->zTarget, pName);
+ }
+ }
+ return pTriggerStep;
+}
+
+/*
+** Build a trigger step out of an INSERT statement. Return a pointer
+** to the new trigger step.
+**
+** The parser calls this routine when it sees an INSERT inside the
+** body of a trigger.
+*/
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
+ Parse *pParse, /* Parser */
+ Token *pTableName, /* Name of the table into which we insert */
+ IdList *pColumn, /* List of columns in pTableName to insert into */
+ Select *pSelect, /* A SELECT statement that supplies values */
+ u8 orconf, /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */
+ Upsert *pUpsert, /* ON CONFLICT clauses for upsert */
+ const char *zStart, /* Start of SQL text */
+ const char *zEnd /* End of SQL text */
+){
+ sqlite3 *db = pParse->db;
+ TriggerStep *pTriggerStep;
+
+ assert(pSelect != 0 || db->mallocFailed);
+
+ pTriggerStep = triggerStepAllocate(pParse, TK_INSERT, pTableName,zStart,zEnd);
+ if( pTriggerStep ){
+ if( IN_RENAME_OBJECT ){
+ pTriggerStep->pSelect = pSelect;
+ pSelect = 0;
+ }else{
+ pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
+ }
+ pTriggerStep->pIdList = pColumn;
+ pTriggerStep->pUpsert = pUpsert;
+ pTriggerStep->orconf = orconf;
+ if( pUpsert ){
+ sqlite3HasExplicitNulls(pParse, pUpsert->pUpsertTarget);
+ }
+ }else{
+ testcase( pColumn );
+ sqlite3IdListDelete(db, pColumn);
+ testcase( pUpsert );
+ sqlite3UpsertDelete(db, pUpsert);
+ }
+ sqlite3SelectDelete(db, pSelect);
+
+ return pTriggerStep;
+}
+
+/*
+** Construct a trigger step that implements an UPDATE statement and return
+** a pointer to that trigger step. The parser calls this routine when it
+** sees an UPDATE statement inside the body of a CREATE TRIGGER.
+*/
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(
+ Parse *pParse, /* Parser */
+ Token *pTableName, /* Name of the table to be updated */
+ SrcList *pFrom,
+ ExprList *pEList, /* The SET clause: list of column and new values */
+ Expr *pWhere, /* The WHERE clause */
+ u8 orconf, /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */
+ const char *zStart, /* Start of SQL text */
+ const char *zEnd /* End of SQL text */
+){
+ sqlite3 *db = pParse->db;
+ TriggerStep *pTriggerStep;
+
+ pTriggerStep = triggerStepAllocate(pParse, TK_UPDATE, pTableName,zStart,zEnd);
+ if( pTriggerStep ){
+ if( IN_RENAME_OBJECT ){
+ pTriggerStep->pExprList = pEList;
+ pTriggerStep->pWhere = pWhere;
+ pTriggerStep->pFrom = pFrom;
+ pEList = 0;
+ pWhere = 0;
+ pFrom = 0;
+ }else{
+ pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE);
+ pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
+ pTriggerStep->pFrom = sqlite3SrcListDup(db, pFrom, EXPRDUP_REDUCE);
+ }
+ pTriggerStep->orconf = orconf;
+ }
+ sqlite3ExprListDelete(db, pEList);
+ sqlite3ExprDelete(db, pWhere);
+ sqlite3SrcListDelete(db, pFrom);
+ return pTriggerStep;
+}
+
+/*
+** Construct a trigger step that implements a DELETE statement and return
+** a pointer to that trigger step. The parser calls this routine when it
+** sees a DELETE statement inside the body of a CREATE TRIGGER.
+*/
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(
+ Parse *pParse, /* Parser */
+ Token *pTableName, /* The table from which rows are deleted */
+ Expr *pWhere, /* The WHERE clause */
+ const char *zStart, /* Start of SQL text */
+ const char *zEnd /* End of SQL text */
+){
+ sqlite3 *db = pParse->db;
+ TriggerStep *pTriggerStep;
+
+ pTriggerStep = triggerStepAllocate(pParse, TK_DELETE, pTableName,zStart,zEnd);
+ if( pTriggerStep ){
+ if( IN_RENAME_OBJECT ){
+ pTriggerStep->pWhere = pWhere;
+ pWhere = 0;
+ }else{
+ pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
+ }
+ pTriggerStep->orconf = OE_Default;
+ }
+ sqlite3ExprDelete(db, pWhere);
+ return pTriggerStep;
+}
+
+/*
+** Recursively delete a Trigger structure
+*/
+SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){
+ if( pTrigger==0 ) return;
+ sqlite3DeleteTriggerStep(db, pTrigger->step_list);
+ sqlite3DbFree(db, pTrigger->zName);
+ sqlite3DbFree(db, pTrigger->table);
+ sqlite3ExprDelete(db, pTrigger->pWhen);
+ sqlite3IdListDelete(db, pTrigger->pColumns);
+ sqlite3DbFree(db, pTrigger);
+}
+
+/*
+** This function is called to drop a trigger from the database schema.
+**
+** This may be called directly from the parser and therefore identifies
+** the trigger by name. The sqlite3DropTriggerPtr() routine does the
+** same job as this routine except it takes a pointer to the trigger
+** instead of the trigger name.
+**/
+SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr){
+ Trigger *pTrigger = 0;
+ int i;
+ const char *zDb;
+ const char *zName;
+ sqlite3 *db = pParse->db;
+
+ if( db->mallocFailed ) goto drop_trigger_cleanup;
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
+ goto drop_trigger_cleanup;
+ }
+
+ assert( pName->nSrc==1 );
+ zDb = pName->a[0].zDatabase;
+ zName = pName->a[0].zName;
+ assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) );
+ for(i=OMIT_TEMPDB; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ if( zDb && sqlite3DbIsNamed(db, j, zDb)==0 ) continue;
+ assert( sqlite3SchemaMutexHeld(db, j, 0) );
+ pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName);
+ if( pTrigger ) break;
+ }
+ if( !pTrigger ){
+ if( !noErr ){
+ sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0);
+ }else{
+ sqlite3CodeVerifyNamedSchema(pParse, zDb);
+ }
+ pParse->checkSchema = 1;
+ goto drop_trigger_cleanup;
+ }
+ sqlite3DropTriggerPtr(pParse, pTrigger);
+
+drop_trigger_cleanup:
+ sqlite3SrcListDelete(db, pName);
+}
+
+/*
+** Return a pointer to the Table structure for the table that a trigger
+** is set on.
+*/
+static Table *tableOfTrigger(Trigger *pTrigger){
+ return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table);
+}
+
+
+/*
+** Drop a trigger given a pointer to that trigger.
+*/
+SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
+ Table *pTable;
+ Vdbe *v;
+ sqlite3 *db = pParse->db;
+ int iDb;
+
+ iDb = sqlite3SchemaToIndex(pParse->db, pTrigger->pSchema);
+ assert( iDb>=0 && iDb<db->nDb );
+ pTable = tableOfTrigger(pTrigger);
+ assert( (pTable && pTable->pSchema==pTrigger->pSchema) || iDb==1 );
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( pTable ){
+ int code = SQLITE_DROP_TRIGGER;
+ const char *zDb = db->aDb[iDb].zDbSName;
+ const char *zTab = SCHEMA_TABLE(iDb);
+ if( iDb==1 ) code = SQLITE_DROP_TEMP_TRIGGER;
+ if( sqlite3AuthCheck(pParse, code, pTrigger->zName, pTable->zName, zDb) ||
+ sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){
+ return;
+ }
+ }
+#endif
+
+ /* Generate code to destroy the database record of the trigger.
+ */
+ if( (v = sqlite3GetVdbe(pParse))!=0 ){
+ sqlite3NestedParse(pParse,
+ "DELETE FROM %Q." DFLT_SCHEMA_TABLE " WHERE name=%Q AND type='trigger'",
+ db->aDb[iDb].zDbSName, pTrigger->zName
+ );
+ sqlite3ChangeCookie(pParse, iDb);
+ sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0);
+ }
+}
+
+/*
+** Remove a trigger from the hash tables of the sqlite* pointer.
+*/
+SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const char *zName){
+ Trigger *pTrigger;
+ Hash *pHash;
+
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ pHash = &(db->aDb[iDb].pSchema->trigHash);
+ pTrigger = sqlite3HashInsert(pHash, zName, 0);
+ if( ALWAYS(pTrigger) ){
+ if( pTrigger->pSchema==pTrigger->pTabSchema ){
+ Table *pTab = tableOfTrigger(pTrigger);
+ if( pTab ){
+ Trigger **pp;
+ for(pp=&pTab->pTrigger; *pp; pp=&((*pp)->pNext)){
+ if( *pp==pTrigger ){
+ *pp = (*pp)->pNext;
+ break;
+ }
+ }
+ }
+ }
+ sqlite3DeleteTrigger(db, pTrigger);
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ }
+}
+
+/*
+** pEList is the SET clause of an UPDATE statement. Each entry
+** in pEList is of the format <id>=<expr>. If any of the entries
+** in pEList have an <id> which matches an identifier in pIdList,
+** then return TRUE. If pIdList==NULL, then it is considered a
+** wildcard that matches anything. Likewise if pEList==NULL then
+** it matches anything so always return true. Return false only
+** if there is no match.
+*/
+static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){
+ int e;
+ if( pIdList==0 || NEVER(pEList==0) ) return 1;
+ for(e=0; e<pEList->nExpr; e++){
+ if( sqlite3IdListIndex(pIdList, pEList->a[e].zEName)>=0 ) return 1;
+ }
+ return 0;
+}
+
+/*
+** Return a list of all triggers on table pTab if there exists at least
+** one trigger that must be fired when an operation of type 'op' is
+** performed on the table, and, if that operation is an UPDATE, if at
+** least one of the columns in pChanges is being modified.
+*/
+SQLITE_PRIVATE Trigger *sqlite3TriggersExist(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* The table the contains the triggers */
+ int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */
+ ExprList *pChanges, /* Columns that change in an UPDATE statement */
+ int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */
+){
+ int mask = 0;
+ Trigger *pList = 0;
+ Trigger *p;
+
+ if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){
+ pList = sqlite3TriggerList(pParse, pTab);
+ }
+ assert( pList==0 || IsVirtual(pTab)==0 );
+ for(p=pList; p; p=p->pNext){
+ if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){
+ mask |= p->tr_tm;
+ }
+ }
+ if( pMask ){
+ *pMask = mask;
+ }
+ return (mask ? pList : 0);
+}
+
+/*
+** Convert the pStep->zTarget string into a SrcList and return a pointer
+** to that SrcList.
+**
+** This routine adds a specific database name, if needed, to the target when
+** forming the SrcList. This prevents a trigger in one database from
+** referring to a target in another database. An exception is when the
+** trigger is in TEMP in which case it can refer to any other database it
+** wants.
+*/
+SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(
+ Parse *pParse, /* The parsing context */
+ TriggerStep *pStep /* The trigger containing the target token */
+){
+ sqlite3 *db = pParse->db;
+ SrcList *pSrc; /* SrcList to be returned */
+ char *zName = sqlite3DbStrDup(db, pStep->zTarget);
+ pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
+ assert( pSrc==0 || pSrc->nSrc==1 );
+ assert( zName || pSrc==0 );
+ if( pSrc ){
+ Schema *pSchema = pStep->pTrig->pSchema;
+ pSrc->a[0].zName = zName;
+ if( pSchema!=db->aDb[1].pSchema ){
+ pSrc->a[0].pSchema = pSchema;
+ }
+ if( pStep->pFrom ){
+ SrcList *pDup = sqlite3SrcListDup(db, pStep->pFrom, 0);
+ pSrc = sqlite3SrcListAppendList(pParse, pSrc, pDup);
+ }
+ }else{
+ sqlite3DbFree(db, zName);
+ }
+ return pSrc;
+}
+
+/*
+** Generate VDBE code for the statements inside the body of a single
+** trigger.
+*/
+static int codeTriggerProgram(
+ Parse *pParse, /* The parser context */
+ TriggerStep *pStepList, /* List of statements inside the trigger body */
+ int orconf /* Conflict algorithm. (OE_Abort, etc) */
+){
+ TriggerStep *pStep;
+ Vdbe *v = pParse->pVdbe;
+ sqlite3 *db = pParse->db;
+
+ assert( pParse->pTriggerTab && pParse->pToplevel );
+ assert( pStepList );
+ assert( v!=0 );
+ for(pStep=pStepList; pStep; pStep=pStep->pNext){
+ /* Figure out the ON CONFLICT policy that will be used for this step
+ ** of the trigger program. If the statement that caused this trigger
+ ** to fire had an explicit ON CONFLICT, then use it. Otherwise, use
+ ** the ON CONFLICT policy that was specified as part of the trigger
+ ** step statement. Example:
+ **
+ ** CREATE TRIGGER AFTER INSERT ON t1 BEGIN;
+ ** INSERT OR REPLACE INTO t2 VALUES(new.a, new.b);
+ ** END;
+ **
+ ** INSERT INTO t1 ... ; -- insert into t2 uses REPLACE policy
+ ** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy
+ */
+ pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf;
+ assert( pParse->okConstFactor==0 );
+
+#ifndef SQLITE_OMIT_TRACE
+ if( pStep->zSpan ){
+ sqlite3VdbeAddOp4(v, OP_Trace, 0x7fffffff, 1, 0,
+ sqlite3MPrintf(db, "-- %s", pStep->zSpan),
+ P4_DYNAMIC);
+ }
+#endif
+
+ switch( pStep->op ){
+ case TK_UPDATE: {
+ sqlite3Update(pParse,
+ sqlite3TriggerStepSrc(pParse, pStep),
+ sqlite3ExprListDup(db, pStep->pExprList, 0),
+ sqlite3ExprDup(db, pStep->pWhere, 0),
+ pParse->eOrconf, 0, 0, 0
+ );
+ break;
+ }
+ case TK_INSERT: {
+ sqlite3Insert(pParse,
+ sqlite3TriggerStepSrc(pParse, pStep),
+ sqlite3SelectDup(db, pStep->pSelect, 0),
+ sqlite3IdListDup(db, pStep->pIdList),
+ pParse->eOrconf,
+ sqlite3UpsertDup(db, pStep->pUpsert)
+ );
+ break;
+ }
+ case TK_DELETE: {
+ sqlite3DeleteFrom(pParse,
+ sqlite3TriggerStepSrc(pParse, pStep),
+ sqlite3ExprDup(db, pStep->pWhere, 0), 0, 0
+ );
+ break;
+ }
+ default: assert( pStep->op==TK_SELECT ); {
+ SelectDest sDest;
+ Select *pSelect = sqlite3SelectDup(db, pStep->pSelect, 0);
+ sqlite3SelectDestInit(&sDest, SRT_Discard, 0);
+ sqlite3Select(pParse, pSelect, &sDest);
+ sqlite3SelectDelete(db, pSelect);
+ break;
+ }
+ }
+ if( pStep->op!=TK_SELECT ){
+ sqlite3VdbeAddOp0(v, OP_ResetCount);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+/*
+** This function is used to add VdbeComment() annotations to a VDBE
+** program. It is not used in production code, only for debugging.
+*/
+static const char *onErrorText(int onError){
+ switch( onError ){
+ case OE_Abort: return "abort";
+ case OE_Rollback: return "rollback";
+ case OE_Fail: return "fail";
+ case OE_Replace: return "replace";
+ case OE_Ignore: return "ignore";
+ case OE_Default: return "default";
+ }
+ return "n/a";
+}
+#endif
+
+/*
+** Parse context structure pFrom has just been used to create a sub-vdbe
+** (trigger program). If an error has occurred, transfer error information
+** from pFrom to pTo.
+*/
+static void transferParseError(Parse *pTo, Parse *pFrom){
+ assert( pFrom->zErrMsg==0 || pFrom->nErr );
+ assert( pTo->zErrMsg==0 || pTo->nErr );
+ if( pTo->nErr==0 ){
+ pTo->zErrMsg = pFrom->zErrMsg;
+ pTo->nErr = pFrom->nErr;
+ pTo->rc = pFrom->rc;
+ }else{
+ sqlite3DbFree(pFrom->db, pFrom->zErrMsg);
+ }
+}
+
+/*
+** Create and populate a new TriggerPrg object with a sub-program
+** implementing trigger pTrigger with ON CONFLICT policy orconf.
+*/
+static TriggerPrg *codeRowTrigger(
+ Parse *pParse, /* Current parse context */
+ Trigger *pTrigger, /* Trigger to code */
+ Table *pTab, /* The table pTrigger is attached to */
+ int orconf /* ON CONFLICT policy to code trigger program with */
+){
+ Parse *pTop = sqlite3ParseToplevel(pParse);
+ sqlite3 *db = pParse->db; /* Database handle */
+ TriggerPrg *pPrg; /* Value to return */
+ Expr *pWhen = 0; /* Duplicate of trigger WHEN expression */
+ Vdbe *v; /* Temporary VM */
+ NameContext sNC; /* Name context for sub-vdbe */
+ SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */
+ Parse *pSubParse; /* Parse context for sub-vdbe */
+ int iEndTrigger = 0; /* Label to jump to if WHEN is false */
+
+ assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) );
+ assert( pTop->pVdbe );
+
+ /* Allocate the TriggerPrg and SubProgram objects. To ensure that they
+ ** are freed if an error occurs, link them into the Parse.pTriggerPrg
+ ** list of the top-level Parse object sooner rather than later. */
+ pPrg = sqlite3DbMallocZero(db, sizeof(TriggerPrg));
+ if( !pPrg ) return 0;
+ pPrg->pNext = pTop->pTriggerPrg;
+ pTop->pTriggerPrg = pPrg;
+ pPrg->pProgram = pProgram = sqlite3DbMallocZero(db, sizeof(SubProgram));
+ if( !pProgram ) return 0;
+ sqlite3VdbeLinkSubProgram(pTop->pVdbe, pProgram);
+ pPrg->pTrigger = pTrigger;
+ pPrg->orconf = orconf;
+ pPrg->aColmask[0] = 0xffffffff;
+ pPrg->aColmask[1] = 0xffffffff;
+
+ /* Allocate and populate a new Parse context to use for coding the
+ ** trigger sub-program. */
+ pSubParse = sqlite3StackAllocZero(db, sizeof(Parse));
+ if( !pSubParse ) return 0;
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pSubParse;
+ pSubParse->db = db;
+ pSubParse->pTriggerTab = pTab;
+ pSubParse->pToplevel = pTop;
+ pSubParse->zAuthContext = pTrigger->zName;
+ pSubParse->eTriggerOp = pTrigger->op;
+ pSubParse->nQueryLoop = pParse->nQueryLoop;
+ pSubParse->disableVtab = pParse->disableVtab;
+
+ v = sqlite3GetVdbe(pSubParse);
+ if( v ){
+ VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)",
+ pTrigger->zName, onErrorText(orconf),
+ (pTrigger->tr_tm==TRIGGER_BEFORE ? "BEFORE" : "AFTER"),
+ (pTrigger->op==TK_UPDATE ? "UPDATE" : ""),
+ (pTrigger->op==TK_INSERT ? "INSERT" : ""),
+ (pTrigger->op==TK_DELETE ? "DELETE" : ""),
+ pTab->zName
+ ));
+#ifndef SQLITE_OMIT_TRACE
+ if( pTrigger->zName ){
+ sqlite3VdbeChangeP4(v, -1,
+ sqlite3MPrintf(db, "-- TRIGGER %s", pTrigger->zName), P4_DYNAMIC
+ );
+ }
+#endif
+
+ /* If one was specified, code the WHEN clause. If it evaluates to false
+ ** (or NULL) the sub-vdbe is immediately halted by jumping to the
+ ** OP_Halt inserted at the end of the program. */
+ if( pTrigger->pWhen ){
+ pWhen = sqlite3ExprDup(db, pTrigger->pWhen, 0);
+ if( SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen)
+ && db->mallocFailed==0
+ ){
+ iEndTrigger = sqlite3VdbeMakeLabel(pSubParse);
+ sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL);
+ }
+ sqlite3ExprDelete(db, pWhen);
+ }
+
+ /* Code the trigger program into the sub-vdbe. */
+ codeTriggerProgram(pSubParse, pTrigger->step_list, orconf);
+
+ /* Insert an OP_Halt at the end of the sub-program. */
+ if( iEndTrigger ){
+ sqlite3VdbeResolveLabel(v, iEndTrigger);
+ }
+ sqlite3VdbeAddOp0(v, OP_Halt);
+ VdbeComment((v, "End: %s.%s", pTrigger->zName, onErrorText(orconf)));
+
+ transferParseError(pParse, pSubParse);
+ if( db->mallocFailed==0 && pParse->nErr==0 ){
+ pProgram->aOp = sqlite3VdbeTakeOpArray(v, &pProgram->nOp, &pTop->nMaxArg);
+ }
+ pProgram->nMem = pSubParse->nMem;
+ pProgram->nCsr = pSubParse->nTab;
+ pProgram->token = (void *)pTrigger;
+ pPrg->aColmask[0] = pSubParse->oldmask;
+ pPrg->aColmask[1] = pSubParse->newmask;
+ sqlite3VdbeDelete(v);
+ }
+
+ assert( !pSubParse->pAinc && !pSubParse->pZombieTab );
+ assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg );
+ sqlite3ParserReset(pSubParse);
+ sqlite3StackFree(db, pSubParse);
+
+ return pPrg;
+}
+
+/*
+** Return a pointer to a TriggerPrg object containing the sub-program for
+** trigger pTrigger with default ON CONFLICT algorithm orconf. If no such
+** TriggerPrg object exists, a new object is allocated and populated before
+** being returned.
+*/
+static TriggerPrg *getRowTrigger(
+ Parse *pParse, /* Current parse context */
+ Trigger *pTrigger, /* Trigger to code */
+ Table *pTab, /* The table trigger pTrigger is attached to */
+ int orconf /* ON CONFLICT algorithm. */
+){
+ Parse *pRoot = sqlite3ParseToplevel(pParse);
+ TriggerPrg *pPrg;
+
+ assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) );
+
+ /* It may be that this trigger has already been coded (or is in the
+ ** process of being coded). If this is the case, then an entry with
+ ** a matching TriggerPrg.pTrigger field will be present somewhere
+ ** in the Parse.pTriggerPrg list. Search for such an entry. */
+ for(pPrg=pRoot->pTriggerPrg;
+ pPrg && (pPrg->pTrigger!=pTrigger || pPrg->orconf!=orconf);
+ pPrg=pPrg->pNext
+ );
+
+ /* If an existing TriggerPrg could not be located, create a new one. */
+ if( !pPrg ){
+ pPrg = codeRowTrigger(pParse, pTrigger, pTab, orconf);
+ }
+
+ return pPrg;
+}
+
+/*
+** Generate code for the trigger program associated with trigger p on
+** table pTab. The reg, orconf and ignoreJump parameters passed to this
+** function are the same as those described in the header function for
+** sqlite3CodeRowTrigger()
+*/
+SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(
+ Parse *pParse, /* Parse context */
+ Trigger *p, /* Trigger to code */
+ Table *pTab, /* The table to code triggers from */
+ int reg, /* Reg array containing OLD.* and NEW.* values */
+ int orconf, /* ON CONFLICT policy */
+ int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */
+){
+ Vdbe *v = sqlite3GetVdbe(pParse); /* Main VM */
+ TriggerPrg *pPrg;
+ pPrg = getRowTrigger(pParse, p, pTab, orconf);
+ assert( pPrg || pParse->nErr || pParse->db->mallocFailed );
+
+ /* Code the OP_Program opcode in the parent VDBE. P4 of the OP_Program
+ ** is a pointer to the sub-vdbe containing the trigger program. */
+ if( pPrg ){
+ int bRecursive = (p->zName && 0==(pParse->db->flags&SQLITE_RecTriggers));
+
+ sqlite3VdbeAddOp4(v, OP_Program, reg, ignoreJump, ++pParse->nMem,
+ (const char *)pPrg->pProgram, P4_SUBPROGRAM);
+ VdbeComment(
+ (v, "Call: %s.%s", (p->zName?p->zName:"fkey"), onErrorText(orconf)));
+
+ /* Set the P5 operand of the OP_Program instruction to non-zero if
+ ** recursive invocation of this trigger program is disallowed. Recursive
+ ** invocation is disallowed if (a) the sub-program is really a trigger,
+ ** not a foreign key action, and (b) the flag to enable recursive triggers
+ ** is clear. */
+ sqlite3VdbeChangeP5(v, (u8)bRecursive);
+ }
+}
+
+/*
+** This is called to code the required FOR EACH ROW triggers for an operation
+** on table pTab. The operation to code triggers for (INSERT, UPDATE or DELETE)
+** is given by the op parameter. The tr_tm parameter determines whether the
+** BEFORE or AFTER triggers are coded. If the operation is an UPDATE, then
+** parameter pChanges is passed the list of columns being modified.
+**
+** If there are no triggers that fire at the specified time for the specified
+** operation on pTab, this function is a no-op.
+**
+** The reg argument is the address of the first in an array of registers
+** that contain the values substituted for the new.* and old.* references
+** in the trigger program. If N is the number of columns in table pTab
+** (a copy of pTab->nCol), then registers are populated as follows:
+**
+** Register Contains
+** ------------------------------------------------------
+** reg+0 OLD.rowid
+** reg+1 OLD.* value of left-most column of pTab
+** ... ...
+** reg+N OLD.* value of right-most column of pTab
+** reg+N+1 NEW.rowid
+** reg+N+2 OLD.* value of left-most column of pTab
+** ... ...
+** reg+N+N+1 NEW.* value of right-most column of pTab
+**
+** For ON DELETE triggers, the registers containing the NEW.* values will
+** never be accessed by the trigger program, so they are not allocated or
+** populated by the caller (there is no data to populate them with anyway).
+** Similarly, for ON INSERT triggers the values stored in the OLD.* registers
+** are never accessed, and so are not allocated by the caller. So, for an
+** ON INSERT trigger, the value passed to this function as parameter reg
+** is not a readable register, although registers (reg+N) through
+** (reg+N+N+1) are.
+**
+** Parameter orconf is the default conflict resolution algorithm for the
+** trigger program to use (REPLACE, IGNORE etc.). Parameter ignoreJump
+** is the instruction that control should jump to if a trigger program
+** raises an IGNORE exception.
+*/
+SQLITE_PRIVATE void sqlite3CodeRowTrigger(
+ Parse *pParse, /* Parse context */
+ Trigger *pTrigger, /* List of triggers on table pTab */
+ int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */
+ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */
+ int tr_tm, /* One of TRIGGER_BEFORE, TRIGGER_AFTER */
+ Table *pTab, /* The table to code triggers from */
+ int reg, /* The first in an array of registers (see above) */
+ int orconf, /* ON CONFLICT policy */
+ int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */
+){
+ Trigger *p; /* Used to iterate through pTrigger list */
+
+ assert( op==TK_UPDATE || op==TK_INSERT || op==TK_DELETE );
+ assert( tr_tm==TRIGGER_BEFORE || tr_tm==TRIGGER_AFTER );
+ assert( (op==TK_UPDATE)==(pChanges!=0) );
+
+ for(p=pTrigger; p; p=p->pNext){
+
+ /* Sanity checking: The schema for the trigger and for the table are
+ ** always defined. The trigger must be in the same schema as the table
+ ** or else it must be a TEMP trigger. */
+ assert( p->pSchema!=0 );
+ assert( p->pTabSchema!=0 );
+ assert( p->pSchema==p->pTabSchema
+ || p->pSchema==pParse->db->aDb[1].pSchema );
+
+ /* Determine whether we should code this trigger */
+ if( p->op==op
+ && p->tr_tm==tr_tm
+ && checkColumnOverlap(p->pColumns, pChanges)
+ ){
+ sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump);
+ }
+ }
+}
+
+/*
+** Triggers may access values stored in the old.* or new.* pseudo-table.
+** This function returns a 32-bit bitmask indicating which columns of the
+** old.* or new.* tables actually are used by triggers. This information
+** may be used by the caller, for example, to avoid having to load the entire
+** old.* record into memory when executing an UPDATE or DELETE command.
+**
+** Bit 0 of the returned mask is set if the left-most column of the
+** table may be accessed using an [old|new].<col> reference. Bit 1 is set if
+** the second leftmost column value is required, and so on. If there
+** are more than 32 columns in the table, and at least one of the columns
+** with an index greater than 32 may be accessed, 0xffffffff is returned.
+**
+** It is not possible to determine if the old.rowid or new.rowid column is
+** accessed by triggers. The caller must always assume that it is.
+**
+** Parameter isNew must be either 1 or 0. If it is 0, then the mask returned
+** applies to the old.* table. If 1, the new.* table.
+**
+** Parameter tr_tm must be a mask with one or both of the TRIGGER_BEFORE
+** and TRIGGER_AFTER bits set. Values accessed by BEFORE triggers are only
+** included in the returned mask if the TRIGGER_BEFORE bit is set in the
+** tr_tm parameter. Similarly, values accessed by AFTER triggers are only
+** included in the returned mask if the TRIGGER_AFTER bit is set in tr_tm.
+*/
+SQLITE_PRIVATE u32 sqlite3TriggerColmask(
+ Parse *pParse, /* Parse context */
+ Trigger *pTrigger, /* List of triggers on table pTab */
+ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */
+ int isNew, /* 1 for new.* ref mask, 0 for old.* ref mask */
+ int tr_tm, /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */
+ Table *pTab, /* The table to code triggers from */
+ int orconf /* Default ON CONFLICT policy for trigger steps */
+){
+ const int op = pChanges ? TK_UPDATE : TK_DELETE;
+ u32 mask = 0;
+ Trigger *p;
+
+ assert( isNew==1 || isNew==0 );
+ for(p=pTrigger; p; p=p->pNext){
+ if( p->op==op && (tr_tm&p->tr_tm)
+ && checkColumnOverlap(p->pColumns,pChanges)
+ ){
+ TriggerPrg *pPrg;
+ pPrg = getRowTrigger(pParse, p, pTab, orconf);
+ if( pPrg ){
+ mask |= pPrg->aColmask[isNew];
+ }
+ }
+ }
+
+ return mask;
+}
+
+#endif /* !defined(SQLITE_OMIT_TRIGGER) */
+
+/************** End of trigger.c *********************************************/
+/************** Begin file update.c ******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains C code routines that are called by the parser
+** to handle UPDATE statements.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Forward declaration */
+static void updateVirtualTable(
+ Parse *pParse, /* The parsing context */
+ SrcList *pSrc, /* The virtual table to be modified */
+ Table *pTab, /* The virtual table */
+ ExprList *pChanges, /* The columns to change in the UPDATE statement */
+ Expr *pRowidExpr, /* Expression used to recompute the rowid */
+ int *aXRef, /* Mapping from columns of pTab to entries in pChanges */
+ Expr *pWhere, /* WHERE clause of the UPDATE statement */
+ int onError /* ON CONFLICT strategy */
+);
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/*
+** The most recently coded instruction was an OP_Column to retrieve the
+** i-th column of table pTab. This routine sets the P4 parameter of the
+** OP_Column to the default value, if any.
+**
+** The default value of a column is specified by a DEFAULT clause in the
+** column definition. This was either supplied by the user when the table
+** was created, or added later to the table definition by an ALTER TABLE
+** command. If the latter, then the row-records in the table btree on disk
+** may not contain a value for the column and the default value, taken
+** from the P4 parameter of the OP_Column instruction, is returned instead.
+** If the former, then all row-records are guaranteed to include a value
+** for the column and the P4 value is not required.
+**
+** Column definitions created by an ALTER TABLE command may only have
+** literal default values specified: a number, null or a string. (If a more
+** complicated default expression value was provided, it is evaluated
+** when the ALTER TABLE is executed and one of the literal values written
+** into the sqlite_schema table.)
+**
+** Therefore, the P4 parameter is only required if the default value for
+** the column is a literal number, string or null. The sqlite3ValueFromExpr()
+** function is capable of transforming these types of expressions into
+** sqlite3_value objects.
+**
+** If column as REAL affinity and the table is an ordinary b-tree table
+** (not a virtual table) then the value might have been stored as an
+** integer. In that case, add an OP_RealAffinity opcode to make sure
+** it has been converted into REAL.
+*/
+SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){
+ assert( pTab!=0 );
+ if( !pTab->pSelect ){
+ sqlite3_value *pValue = 0;
+ u8 enc = ENC(sqlite3VdbeDb(v));
+ Column *pCol = &pTab->aCol[i];
+ VdbeComment((v, "%s.%s", pTab->zName, pCol->zName));
+ assert( i<pTab->nCol );
+ sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc,
+ pCol->affinity, &pValue);
+ if( pValue ){
+ sqlite3VdbeAppendP4(v, pValue, P4_MEM);
+ }
+ }
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ if( pTab->aCol[i].affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
+ }
+#endif
+}
+
+/*
+** Check to see if column iCol of index pIdx references any of the
+** columns defined by aXRef and chngRowid. Return true if it does
+** and false if not. This is an optimization. False-positives are a
+** performance degradation, but false-negatives can result in a corrupt
+** index and incorrect answers.
+**
+** aXRef[j] will be non-negative if column j of the original table is
+** being updated. chngRowid will be true if the rowid of the table is
+** being updated.
+*/
+static int indexColumnIsBeingUpdated(
+ Index *pIdx, /* The index to check */
+ int iCol, /* Which column of the index to check */
+ int *aXRef, /* aXRef[j]>=0 if column j is being updated */
+ int chngRowid /* true if the rowid is being updated */
+){
+ i16 iIdxCol = pIdx->aiColumn[iCol];
+ assert( iIdxCol!=XN_ROWID ); /* Cannot index rowid */
+ if( iIdxCol>=0 ){
+ return aXRef[iIdxCol]>=0;
+ }
+ assert( iIdxCol==XN_EXPR );
+ assert( pIdx->aColExpr!=0 );
+ assert( pIdx->aColExpr->a[iCol].pExpr!=0 );
+ return sqlite3ExprReferencesUpdatedColumn(pIdx->aColExpr->a[iCol].pExpr,
+ aXRef,chngRowid);
+}
+
+/*
+** Check to see if index pIdx is a partial index whose conditional
+** expression might change values due to an UPDATE. Return true if
+** the index is subject to change and false if the index is guaranteed
+** to be unchanged. This is an optimization. False-positives are a
+** performance degradation, but false-negatives can result in a corrupt
+** index and incorrect answers.
+**
+** aXRef[j] will be non-negative if column j of the original table is
+** being updated. chngRowid will be true if the rowid of the table is
+** being updated.
+*/
+static int indexWhereClauseMightChange(
+ Index *pIdx, /* The index to check */
+ int *aXRef, /* aXRef[j]>=0 if column j is being updated */
+ int chngRowid /* true if the rowid is being updated */
+){
+ if( pIdx->pPartIdxWhere==0 ) return 0;
+ return sqlite3ExprReferencesUpdatedColumn(pIdx->pPartIdxWhere,
+ aXRef, chngRowid);
+}
+
+/*
+** Allocate and return a pointer to an expression of type TK_ROW with
+** Expr.iColumn set to value (iCol+1). The resolver will modify the
+** expression to be a TK_COLUMN reading column iCol of the first
+** table in the source-list (pSrc->a[0]).
+*/
+static Expr *exprRowColumn(Parse *pParse, int iCol){
+ Expr *pRet = sqlite3PExpr(pParse, TK_ROW, 0, 0);
+ if( pRet ) pRet->iColumn = iCol+1;
+ return pRet;
+}
+
+/*
+** Assuming both the pLimit and pOrderBy parameters are NULL, this function
+** generates VM code to run the query:
+**
+** SELECT <other-columns>, pChanges FROM pTabList WHERE pWhere
+**
+** and write the results to the ephemeral table already opened as cursor
+** iEph. None of pChanges, pTabList or pWhere are modified or consumed by
+** this function, they must be deleted by the caller.
+**
+** Or, if pLimit and pOrderBy are not NULL, and pTab is not a view:
+**
+** SELECT <other-columns>, pChanges FROM pTabList
+** WHERE pWhere
+** GROUP BY <other-columns>
+** ORDER BY pOrderBy LIMIT pLimit
+**
+** If pTab is a view, the GROUP BY clause is omitted.
+**
+** Exactly how results are written to table iEph, and exactly what
+** the <other-columns> in the query above are is determined by the type
+** of table pTabList->a[0].pTab.
+**
+** If the table is a WITHOUT ROWID table, then argument pPk must be its
+** PRIMARY KEY. In this case <other-columns> are the primary key columns
+** of the table, in order. The results of the query are written to ephemeral
+** table iEph as index keys, using OP_IdxInsert.
+**
+** If the table is actually a view, then <other-columns> are all columns of
+** the view. The results are written to the ephemeral table iEph as records
+** with automatically assigned integer keys.
+**
+** If the table is a virtual or ordinary intkey table, then <other-columns>
+** is its rowid. For a virtual table, the results are written to iEph as
+** records with automatically assigned integer keys For intkey tables, the
+** rowid value in <other-columns> is used as the integer key, and the
+** remaining fields make up the table record.
+*/
+static void updateFromSelect(
+ Parse *pParse, /* Parse context */
+ int iEph, /* Cursor for open eph. table */
+ Index *pPk, /* PK if table 0 is WITHOUT ROWID */
+ ExprList *pChanges, /* List of expressions to return */
+ SrcList *pTabList, /* List of tables to select from */
+ Expr *pWhere, /* WHERE clause for query */
+ ExprList *pOrderBy, /* ORDER BY clause */
+ Expr *pLimit /* LIMIT clause */
+){
+ int i;
+ SelectDest dest;
+ Select *pSelect = 0;
+ ExprList *pList = 0;
+ ExprList *pGrp = 0;
+ Expr *pLimit2 = 0;
+ ExprList *pOrderBy2 = 0;
+ sqlite3 *db = pParse->db;
+ Table *pTab = pTabList->a[0].pTab;
+ SrcList *pSrc;
+ Expr *pWhere2;
+ int eDest;
+
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( pOrderBy && pLimit==0 ) {
+ sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on UPDATE");
+ return;
+ }
+ pOrderBy2 = sqlite3ExprListDup(db, pOrderBy, 0);
+ pLimit2 = sqlite3ExprDup(db, pLimit, 0);
+#else
+ UNUSED_PARAMETER(pOrderBy);
+ UNUSED_PARAMETER(pLimit);
+#endif
+
+ pSrc = sqlite3SrcListDup(db, pTabList, 0);
+ pWhere2 = sqlite3ExprDup(db, pWhere, 0);
+
+ assert( pTabList->nSrc>1 );
+ if( pSrc ){
+ pSrc->a[0].iCursor = -1;
+ pSrc->a[0].pTab->nTabRef--;
+ pSrc->a[0].pTab = 0;
+ }
+ if( pPk ){
+ for(i=0; i<pPk->nKeyCol; i++){
+ Expr *pNew = exprRowColumn(pParse, pPk->aiColumn[i]);
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( pLimit ){
+ pGrp = sqlite3ExprListAppend(pParse, pGrp, sqlite3ExprDup(db, pNew, 0));
+ }
+#endif
+ pList = sqlite3ExprListAppend(pParse, pList, pNew);
+ }
+ eDest = SRT_Upfrom;
+ }else if( pTab->pSelect ){
+ for(i=0; i<pTab->nCol; i++){
+ pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
+ }
+ eDest = SRT_Table;
+ }else{
+ eDest = IsVirtual(pTab) ? SRT_Table : SRT_Upfrom;
+ pList = sqlite3ExprListAppend(pParse, 0, sqlite3PExpr(pParse,TK_ROW,0,0));
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( pLimit ){
+ pGrp = sqlite3ExprListAppend(pParse, 0, sqlite3PExpr(pParse,TK_ROW,0,0));
+ }
+#endif
+ }
+ if( ALWAYS(pChanges) ){
+ for(i=0; i<pChanges->nExpr; i++){
+ pList = sqlite3ExprListAppend(pParse, pList,
+ sqlite3ExprDup(db, pChanges->a[i].pExpr, 0)
+ );
+ }
+ }
+ pSelect = sqlite3SelectNew(pParse, pList,
+ pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UpdateFrom|SF_IncludeHidden, pLimit2
+ );
+ sqlite3SelectDestInit(&dest, eDest, iEph);
+ dest.iSDParm2 = (pPk ? pPk->nKeyCol : -1);
+ sqlite3Select(pParse, pSelect, &dest);
+ sqlite3SelectDelete(db, pSelect);
+}
+
+/*
+** Process an UPDATE statement.
+**
+** UPDATE OR IGNORE tbl SET a=b, c=d FROM tbl2... WHERE e<5 AND f NOT NULL;
+** \_______/ \_/ \______/ \_____/ \________________/
+** onError | pChanges | pWhere
+** \_______________________/
+** pTabList
+*/
+SQLITE_PRIVATE void sqlite3Update(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* The table in which we should change things */
+ ExprList *pChanges, /* Things to be changed */
+ Expr *pWhere, /* The WHERE clause. May be null */
+ int onError, /* How to handle constraint errors */
+ ExprList *pOrderBy, /* ORDER BY clause. May be null */
+ Expr *pLimit, /* LIMIT clause. May be null */
+ Upsert *pUpsert /* ON CONFLICT clause, or null */
+){
+ int i, j, k; /* Loop counters */
+ Table *pTab; /* The table to be updated */
+ int addrTop = 0; /* VDBE instruction address of the start of the loop */
+ WhereInfo *pWInfo = 0; /* Information about the WHERE clause */
+ Vdbe *v; /* The virtual database engine */
+ Index *pIdx; /* For looping over indices */
+ Index *pPk; /* The PRIMARY KEY index for WITHOUT ROWID tables */
+ int nIdx; /* Number of indices that need updating */
+ int nAllIdx; /* Total number of indexes */
+ int iBaseCur; /* Base cursor number */
+ int iDataCur; /* Cursor for the canonical data btree */
+ int iIdxCur; /* Cursor for the first index */
+ sqlite3 *db; /* The database structure */
+ int *aRegIdx = 0; /* Registers for to each index and the main table */
+ int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the
+ ** an expression for the i-th column of the table.
+ ** aXRef[i]==-1 if the i-th column is not changed. */
+ u8 *aToOpen; /* 1 for tables and indices to be opened */
+ u8 chngPk; /* PRIMARY KEY changed in a WITHOUT ROWID table */
+ u8 chngRowid; /* Rowid changed in a normal table */
+ u8 chngKey; /* Either chngPk or chngRowid */
+ Expr *pRowidExpr = 0; /* Expression defining the new record number */
+ int iRowidExpr = -1; /* Index of "rowid=" (or IPK) assignment in pChanges */
+ AuthContext sContext; /* The authorization context */
+ NameContext sNC; /* The name-context to resolve expressions in */
+ int iDb; /* Database containing the table being updated */
+ int eOnePass; /* ONEPASS_XXX value from where.c */
+ int hasFK; /* True if foreign key processing is required */
+ int labelBreak; /* Jump here to break out of UPDATE loop */
+ int labelContinue; /* Jump here to continue next step of UPDATE loop */
+ int flags; /* Flags for sqlite3WhereBegin() */
+
+#ifndef SQLITE_OMIT_TRIGGER
+ int isView; /* True when updating a view (INSTEAD OF trigger) */
+ Trigger *pTrigger; /* List of triggers on pTab, if required */
+ int tmask; /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */
+#endif
+ int newmask; /* Mask of NEW.* columns accessed by BEFORE triggers */
+ int iEph = 0; /* Ephemeral table holding all primary key values */
+ int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */
+ int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */
+ int addrOpen = 0; /* Address of OP_OpenEphemeral */
+ int iPk = 0; /* First of nPk cells holding PRIMARY KEY value */
+ i16 nPk = 0; /* Number of components of the PRIMARY KEY */
+ int bReplace = 0; /* True if REPLACE conflict resolution might happen */
+ int bFinishSeek = 1; /* The OP_FinishSeek opcode is needed */
+ int nChangeFrom = 0; /* If there is a FROM, pChanges->nExpr, else 0 */
+
+ /* Register Allocations */
+ int regRowCount = 0; /* A count of rows changed */
+ int regOldRowid = 0; /* The old rowid */
+ int regNewRowid = 0; /* The new rowid */
+ int regNew = 0; /* Content of the NEW.* table in triggers */
+ int regOld = 0; /* Content of OLD.* table in triggers */
+ int regRowSet = 0; /* Rowset of rows to be updated */
+ int regKey = 0; /* composite PRIMARY KEY value */
+
+ memset(&sContext, 0, sizeof(sContext));
+ db = pParse->db;
+ if( pParse->nErr || db->mallocFailed ){
+ goto update_cleanup;
+ }
+
+ /* Locate the table which we want to update.
+ */
+ pTab = sqlite3SrcListLookup(pParse, pTabList);
+ if( pTab==0 ) goto update_cleanup;
+ iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+
+ /* Figure out if we have any triggers and if the table being
+ ** updated is a view.
+ */
+#ifndef SQLITE_OMIT_TRIGGER
+ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, &tmask);
+ isView = pTab->pSelect!=0;
+ assert( pTrigger || tmask==0 );
+#else
+# define pTrigger 0
+# define isView 0
+# define tmask 0
+#endif
+#ifdef SQLITE_OMIT_VIEW
+# undef isView
+# define isView 0
+#endif
+
+ /* If there was a FROM clause, set nChangeFrom to the number of expressions
+ ** in the change-list. Otherwise, set it to 0. There cannot be a FROM
+ ** clause if this function is being called to generate code for part of
+ ** an UPSERT statement. */
+ nChangeFrom = (pTabList->nSrc>1) ? pChanges->nExpr : 0;
+ assert( nChangeFrom==0 || pUpsert==0 );
+
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( !isView && nChangeFrom==0 ){
+ pWhere = sqlite3LimitWhere(
+ pParse, pTabList, pWhere, pOrderBy, pLimit, "UPDATE"
+ );
+ pOrderBy = 0;
+ pLimit = 0;
+ }
+#endif
+
+ if( sqlite3ViewGetColumnNames(pParse, pTab) ){
+ goto update_cleanup;
+ }
+ if( sqlite3IsReadOnly(pParse, pTab, tmask) ){
+ goto update_cleanup;
+ }
+
+ /* Allocate a cursors for the main database table and for all indices.
+ ** The index cursors might not be used, but if they are used they
+ ** need to occur right after the database cursor. So go ahead and
+ ** allocate enough space, just in case.
+ */
+ iBaseCur = iDataCur = pParse->nTab++;
+ iIdxCur = iDataCur+1;
+ pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
+ testcase( pPk!=0 && pPk!=pTab->pIndex );
+ for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){
+ if( pPk==pIdx ){
+ iDataCur = pParse->nTab;
+ }
+ pParse->nTab++;
+ }
+ if( pUpsert ){
+ /* On an UPSERT, reuse the same cursors already opened by INSERT */
+ iDataCur = pUpsert->iDataCur;
+ iIdxCur = pUpsert->iIdxCur;
+ pParse->nTab = iBaseCur;
+ }
+ pTabList->a[0].iCursor = iDataCur;
+
+ /* Allocate space for aXRef[], aRegIdx[], and aToOpen[].
+ ** Initialize aXRef[] and aToOpen[] to their default values.
+ */
+ aXRef = sqlite3DbMallocRawNN(db, sizeof(int) * (pTab->nCol+nIdx+1) + nIdx+2 );
+ if( aXRef==0 ) goto update_cleanup;
+ aRegIdx = aXRef+pTab->nCol;
+ aToOpen = (u8*)(aRegIdx+nIdx+1);
+ memset(aToOpen, 1, nIdx+1);
+ aToOpen[nIdx+1] = 0;
+ for(i=0; i<pTab->nCol; i++) aXRef[i] = -1;
+
+ /* Initialize the name-context */
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ sNC.pSrcList = pTabList;
+ sNC.uNC.pUpsert = pUpsert;
+ sNC.ncFlags = NC_UUpsert;
+
+ /* Begin generating code. */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto update_cleanup;
+
+ /* Resolve the column names in all the expressions of the
+ ** of the UPDATE statement. Also find the column index
+ ** for each column to be updated in the pChanges array. For each
+ ** column to be updated, make sure we have authorization to change
+ ** that column.
+ */
+ chngRowid = chngPk = 0;
+ for(i=0; i<pChanges->nExpr; i++){
+ /* If this is an UPDATE with a FROM clause, do not resolve expressions
+ ** here. The call to sqlite3Select() below will do that. */
+ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){
+ goto update_cleanup;
+ }
+ for(j=0; j<pTab->nCol; j++){
+ if( sqlite3StrICmp(pTab->aCol[j].zName, pChanges->a[i].zEName)==0 ){
+ if( j==pTab->iPKey ){
+ chngRowid = 1;
+ pRowidExpr = pChanges->a[i].pExpr;
+ iRowidExpr = i;
+ }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){
+ chngPk = 1;
+ }
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){
+ testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL );
+ testcase( pTab->aCol[j].colFlags & COLFLAG_STORED );
+ sqlite3ErrorMsg(pParse,
+ "cannot UPDATE generated column \"%s\"",
+ pTab->aCol[j].zName);
+ goto update_cleanup;
+ }
+#endif
+ aXRef[j] = i;
+ break;
+ }
+ }
+ if( j>=pTab->nCol ){
+ if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zEName) ){
+ j = -1;
+ chngRowid = 1;
+ pRowidExpr = pChanges->a[i].pExpr;
+ iRowidExpr = i;
+ }else{
+ sqlite3ErrorMsg(pParse, "no such column: %s", pChanges->a[i].zEName);
+ pParse->checkSchema = 1;
+ goto update_cleanup;
+ }
+ }
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ {
+ int rc;
+ rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName,
+ j<0 ? "ROWID" : pTab->aCol[j].zName,
+ db->aDb[iDb].zDbSName);
+ if( rc==SQLITE_DENY ){
+ goto update_cleanup;
+ }else if( rc==SQLITE_IGNORE ){
+ aXRef[j] = -1;
+ }
+ }
+#endif
+ }
+ assert( (chngRowid & chngPk)==0 );
+ assert( chngRowid==0 || chngRowid==1 );
+ assert( chngPk==0 || chngPk==1 );
+ chngKey = chngRowid + chngPk;
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ /* Mark generated columns as changing if their generator expressions
+ ** reference any changing column. The actual aXRef[] value for
+ ** generated expressions is not used, other than to check to see that it
+ ** is non-negative, so the value of aXRef[] for generated columns can be
+ ** set to any non-negative number. We use 99999 so that the value is
+ ** obvious when looking at aXRef[] in a symbolic debugger.
+ */
+ if( pTab->tabFlags & TF_HasGenerated ){
+ int bProgress;
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+ do{
+ bProgress = 0;
+ for(i=0; i<pTab->nCol; i++){
+ if( aXRef[i]>=0 ) continue;
+ if( (pTab->aCol[i].colFlags & COLFLAG_GENERATED)==0 ) continue;
+ if( sqlite3ExprReferencesUpdatedColumn(pTab->aCol[i].pDflt,
+ aXRef, chngRowid) ){
+ aXRef[i] = 99999;
+ bProgress = 1;
+ }
+ }
+ }while( bProgress );
+ }
+#endif
+
+ /* The SET expressions are not actually used inside the WHERE loop.
+ ** So reset the colUsed mask. Unless this is a virtual table. In that
+ ** case, set all bits of the colUsed mask (to ensure that the virtual
+ ** table implementation makes all columns available).
+ */
+ pTabList->a[0].colUsed = IsVirtual(pTab) ? ALLBITS : 0;
+
+ hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey);
+
+ /* There is one entry in the aRegIdx[] array for each index on the table
+ ** being updated. Fill in aRegIdx[] with a register number that will hold
+ ** the key for accessing each index.
+ */
+ if( onError==OE_Replace ) bReplace = 1;
+ for(nAllIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nAllIdx++){
+ int reg;
+ if( chngKey || hasFK>1 || pIdx==pPk
+ || indexWhereClauseMightChange(pIdx,aXRef,chngRowid)
+ ){
+ reg = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
+ }else{
+ reg = 0;
+ for(i=0; i<pIdx->nKeyCol; i++){
+ if( indexColumnIsBeingUpdated(pIdx, i, aXRef, chngRowid) ){
+ reg = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
+ if( onError==OE_Default && pIdx->onError==OE_Replace ){
+ bReplace = 1;
+ }
+ break;
+ }
+ }
+ }
+ if( reg==0 ) aToOpen[nAllIdx+1] = 0;
+ aRegIdx[nAllIdx] = reg;
+ }
+ aRegIdx[nAllIdx] = ++pParse->nMem; /* Register storing the table record */
+ if( bReplace ){
+ /* If REPLACE conflict resolution might be invoked, open cursors on all
+ ** indexes in case they are needed to delete records. */
+ memset(aToOpen, 1, nIdx+1);
+ }
+
+ if( pParse->nested==0 ) sqlite3VdbeCountChanges(v);
+ sqlite3BeginWriteOperation(pParse, pTrigger || hasFK, iDb);
+
+ /* Allocate required registers. */
+ if( !IsVirtual(pTab) ){
+ /* For now, regRowSet and aRegIdx[nAllIdx] share the same register.
+ ** If regRowSet turns out to be needed, then aRegIdx[nAllIdx] will be
+ ** reallocated. aRegIdx[nAllIdx] is the register in which the main
+ ** table record is written. regRowSet holds the RowSet for the
+ ** two-pass update algorithm. */
+ assert( aRegIdx[nAllIdx]==pParse->nMem );
+ regRowSet = aRegIdx[nAllIdx];
+ regOldRowid = regNewRowid = ++pParse->nMem;
+ if( chngPk || pTrigger || hasFK ){
+ regOld = pParse->nMem + 1;
+ pParse->nMem += pTab->nCol;
+ }
+ if( chngKey || pTrigger || hasFK ){
+ regNewRowid = ++pParse->nMem;
+ }
+ regNew = pParse->nMem + 1;
+ pParse->nMem += pTab->nCol;
+ }
+
+ /* Start the view context. */
+ if( isView ){
+ sqlite3AuthContextPush(pParse, &sContext, pTab->zName);
+ }
+
+ /* If we are trying to update a view, realize that view into
+ ** an ephemeral table.
+ */
+#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
+ if( nChangeFrom==0 && isView ){
+ sqlite3MaterializeView(pParse, pTab,
+ pWhere, pOrderBy, pLimit, iDataCur
+ );
+ pOrderBy = 0;
+ pLimit = 0;
+ }
+#endif
+
+ /* Resolve the column names in all the expressions in the
+ ** WHERE clause.
+ */
+ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pWhere) ){
+ goto update_cleanup;
+ }
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* Virtual tables must be handled separately */
+ if( IsVirtual(pTab) ){
+ updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef,
+ pWhere, onError);
+ goto update_cleanup;
+ }
+#endif
+
+ /* Jump to labelBreak to abandon further processing of this UPDATE */
+ labelContinue = labelBreak = sqlite3VdbeMakeLabel(pParse);
+
+ /* Not an UPSERT. Normal processing. Begin by
+ ** initialize the count of updated rows */
+ if( (db->flags&SQLITE_CountRows)!=0
+ && !pParse->pTriggerTab
+ && !pParse->nested
+ && pUpsert==0
+ ){
+ regRowCount = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount);
+ }
+
+ if( nChangeFrom==0 && HasRowid(pTab) ){
+ sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid);
+ }else{
+ assert( pPk!=0 || HasRowid(pTab) );
+ nPk = pPk ? pPk->nKeyCol : 0;
+ iPk = pParse->nMem+1;
+ pParse->nMem += nPk;
+ pParse->nMem += nChangeFrom;
+ regKey = ++pParse->nMem;
+ if( pUpsert==0 ){
+ int nEphCol = nPk + nChangeFrom + (isView ? pTab->nCol : 0);
+ iEph = pParse->nTab++;
+ if( pPk ) sqlite3VdbeAddOp3(v, OP_Null, 0, iPk, iPk+nPk-1);
+ addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nEphCol);
+ if( pPk ){
+ KeyInfo *pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pPk);
+ if( pKeyInfo ){
+ pKeyInfo->nAllField = nEphCol;
+ sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
+ }
+ }
+ if( nChangeFrom ){
+ updateFromSelect(
+ pParse, iEph, pPk, pChanges, pTabList, pWhere, pOrderBy, pLimit
+ );
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( isView ) iDataCur = iEph;
+#endif
+ }
+ }
+ }
+
+ if( nChangeFrom ){
+ sqlite3MultiWrite(pParse);
+ eOnePass = ONEPASS_OFF;
+ nKey = nPk;
+ regKey = iPk;
+ }else{
+ if( pUpsert ){
+ /* If this is an UPSERT, then all cursors have already been opened by
+ ** the outer INSERT and the data cursor should be pointing at the row
+ ** that is to be updated. So bypass the code that searches for the
+ ** row(s) to be updated.
+ */
+ pWInfo = 0;
+ eOnePass = ONEPASS_SINGLE;
+ sqlite3ExprIfFalse(pParse, pWhere, labelBreak, SQLITE_JUMPIFNULL);
+ bFinishSeek = 0;
+ }else{
+ /* Begin the database scan.
+ **
+ ** Do not consider a single-pass strategy for a multi-row update if
+ ** there are any triggers or foreign keys to process, or rows may
+ ** be deleted as a result of REPLACE conflict handling. Any of these
+ ** things might disturb a cursor being used to scan through the table
+ ** or index, causing a single-pass approach to malfunction. */
+ flags = WHERE_ONEPASS_DESIRED|WHERE_SEEK_UNIQ_TABLE;
+ if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){
+ flags |= WHERE_ONEPASS_MULTIROW;
+ }
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, flags,iIdxCur);
+ if( pWInfo==0 ) goto update_cleanup;
+
+ /* A one-pass strategy that might update more than one row may not
+ ** be used if any column of the index used for the scan is being
+ ** updated. Otherwise, if there is an index on "b", statements like
+ ** the following could create an infinite loop:
+ **
+ ** UPDATE t1 SET b=b+1 WHERE b>?
+ **
+ ** Fall back to ONEPASS_OFF if where.c has selected a ONEPASS_MULTI
+ ** strategy that uses an index for which one or more columns are being
+ ** updated. */
+ eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
+ bFinishSeek = sqlite3WhereUsesDeferredSeek(pWInfo);
+ if( eOnePass!=ONEPASS_SINGLE ){
+ sqlite3MultiWrite(pParse);
+ if( eOnePass==ONEPASS_MULTI ){
+ int iCur = aiCurOnePass[1];
+ if( iCur>=0 && iCur!=iDataCur && aToOpen[iCur-iBaseCur] ){
+ eOnePass = ONEPASS_OFF;
+ }
+ assert( iCur!=iDataCur || !HasRowid(pTab) );
+ }
+ }
+ }
+
+ if( HasRowid(pTab) ){
+ /* Read the rowid of the current row of the WHERE scan. In ONEPASS_OFF
+ ** mode, write the rowid into the FIFO. In either of the one-pass modes,
+ ** leave it in register regOldRowid. */
+ sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid);
+ if( eOnePass==ONEPASS_OFF ){
+ /* We need to use regRowSet, so reallocate aRegIdx[nAllIdx] */
+ aRegIdx[nAllIdx] = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid);
+ }
+ }else{
+ /* Read the PK of the current row into an array of registers. In
+ ** ONEPASS_OFF mode, serialize the array into a record and store it in
+ ** the ephemeral table. Or, in ONEPASS_SINGLE or MULTI mode, change
+ ** the OP_OpenEphemeral instruction to a Noop (the ephemeral table
+ ** is not required) and leave the PK fields in the array of registers. */
+ for(i=0; i<nPk; i++){
+ assert( pPk->aiColumn[i]>=0 );
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur,
+ pPk->aiColumn[i], iPk+i);
+ }
+ if( eOnePass ){
+ if( addrOpen ) sqlite3VdbeChangeToNoop(v, addrOpen);
+ nKey = nPk;
+ regKey = iPk;
+ }else{
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey,
+ sqlite3IndexAffinityStr(db, pPk), nPk);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEph, regKey, iPk, nPk);
+ }
+ }
+ }
+
+ if( pUpsert==0 ){
+ if( nChangeFrom==0 && eOnePass!=ONEPASS_MULTI ){
+ sqlite3WhereEnd(pWInfo);
+ }
+
+ if( !isView ){
+ int addrOnce = 0;
+
+ /* Open every index that needs updating. */
+ if( eOnePass!=ONEPASS_OFF ){
+ if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0;
+ if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0;
+ }
+
+ if( eOnePass==ONEPASS_MULTI && (nIdx-(aiCurOnePass[1]>=0))>0 ){
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
+ sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur,
+ aToOpen, 0, 0);
+ if( addrOnce ){
+ sqlite3VdbeJumpHereOrPopInst(v, addrOnce);
+ }
+ }
+
+ /* Top of the update loop */
+ if( eOnePass!=ONEPASS_OFF ){
+ if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){
+ assert( pPk );
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey,nKey);
+ VdbeCoverage(v);
+ }
+ if( eOnePass!=ONEPASS_SINGLE ){
+ labelContinue = sqlite3VdbeMakeLabel(pParse);
+ }
+ sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak);
+ VdbeCoverageIf(v, pPk==0);
+ VdbeCoverageIf(v, pPk!=0);
+ }else if( pPk || nChangeFrom ){
+ labelContinue = sqlite3VdbeMakeLabel(pParse);
+ sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v);
+ addrTop = sqlite3VdbeCurrentAddr(v);
+ if( nChangeFrom ){
+ if( !isView ){
+ if( pPk ){
+ for(i=0; i<nPk; i++){
+ sqlite3VdbeAddOp3(v, OP_Column, iEph, i, iPk+i);
+ }
+ sqlite3VdbeAddOp4Int(
+ v, OP_NotFound, iDataCur, labelContinue, iPk, nPk
+ ); VdbeCoverage(v);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Rowid, iEph, regOldRowid);
+ sqlite3VdbeAddOp3(
+ v, OP_NotExists, iDataCur, labelContinue, regOldRowid
+ ); VdbeCoverage(v);
+ }
+ }
+ }else{
+ sqlite3VdbeAddOp2(v, OP_RowData, iEph, regKey);
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey,0);
+ VdbeCoverage(v);
+ }
+ }else{
+ labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet,labelBreak,
+ regOldRowid);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid);
+ VdbeCoverage(v);
+ }
+ }
+
+ /* If the rowid value will change, set register regNewRowid to
+ ** contain the new value. If the rowid is not being modified,
+ ** then regNewRowid is the same register as regOldRowid, which is
+ ** already populated. */
+ assert( chngKey || pTrigger || hasFK || regOldRowid==regNewRowid );
+ if( chngRowid ){
+ assert( iRowidExpr>=0 );
+ if( nChangeFrom==0 ){
+ sqlite3ExprCode(pParse, pRowidExpr, regNewRowid);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Column, iEph, iRowidExpr, regNewRowid);
+ }
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); VdbeCoverage(v);
+ }
+
+ /* Compute the old pre-UPDATE content of the row being changed, if that
+ ** information is needed */
+ if( chngPk || hasFK || pTrigger ){
+ u32 oldmask = (hasFK ? sqlite3FkOldmask(pParse, pTab) : 0);
+ oldmask |= sqlite3TriggerColmask(pParse,
+ pTrigger, pChanges, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onError
+ );
+ for(i=0; i<pTab->nCol; i++){
+ u32 colFlags = pTab->aCol[i].colFlags;
+ k = sqlite3TableColumnToStorage(pTab, i) + regOld;
+ if( oldmask==0xffffffff
+ || (i<32 && (oldmask & MASKBIT32(i))!=0)
+ || (colFlags & COLFLAG_PRIMKEY)!=0
+ ){
+ testcase( oldmask!=0xffffffff && i==31 );
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, k);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, k);
+ }
+ }
+ if( chngRowid==0 && pPk==0 ){
+ sqlite3VdbeAddOp2(v, OP_Copy, regOldRowid, regNewRowid);
+ }
+ }
+
+ /* Populate the array of registers beginning at regNew with the new
+ ** row data. This array is used to check constants, create the new
+ ** table and index records, and as the values for any new.* references
+ ** made by triggers.
+ **
+ ** If there are one or more BEFORE triggers, then do not populate the
+ ** registers associated with columns that are (a) not modified by
+ ** this UPDATE statement and (b) not accessed by new.* references. The
+ ** values for registers not modified by the UPDATE must be reloaded from
+ ** the database after the BEFORE triggers are fired anyway (as the trigger
+ ** may have modified them). So not loading those that are not going to
+ ** be used eliminates some redundant opcodes.
+ */
+ newmask = sqlite3TriggerColmask(
+ pParse, pTrigger, pChanges, 1, TRIGGER_BEFORE, pTab, onError
+ );
+ for(i=0, k=regNew; i<pTab->nCol; i++, k++){
+ if( i==pTab->iPKey ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, k);
+ }else if( (pTab->aCol[i].colFlags & COLFLAG_GENERATED)!=0 ){
+ if( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL ) k--;
+ }else{
+ j = aXRef[i];
+ if( j>=0 ){
+ if( nChangeFrom ){
+ int nOff = (isView ? pTab->nCol : nPk);
+ assert( eOnePass==ONEPASS_OFF );
+ sqlite3VdbeAddOp3(v, OP_Column, iEph, nOff+j, k);
+ }else{
+ sqlite3ExprCode(pParse, pChanges->a[j].pExpr, k);
+ }
+ }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask & MASKBIT32(i)) ){
+ /* This branch loads the value of a column that will not be changed
+ ** into a register. This is done if there are no BEFORE triggers, or
+ ** if there are one or more BEFORE triggers that use this value via
+ ** a new.* reference in a trigger program.
+ */
+ testcase( i==31 );
+ testcase( i==32 );
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, k);
+ bFinishSeek = 0;
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, k);
+ }
+ }
+ }
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( pTab->tabFlags & TF_HasGenerated ){
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+ sqlite3ComputeGeneratedColumns(pParse, regNew, pTab);
+ }
+#endif
+
+ /* Fire any BEFORE UPDATE triggers. This happens before constraints are
+ ** verified. One could argue that this is wrong.
+ */
+ if( tmask&TRIGGER_BEFORE ){
+ sqlite3TableAffinity(v, pTab, regNew);
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
+ TRIGGER_BEFORE, pTab, regOldRowid, onError, labelContinue);
+
+ if( !isView ){
+ /* The row-trigger may have deleted the row being updated. In this
+ ** case, jump to the next row. No updates or AFTER triggers are
+ ** required. This behavior - what happens when the row being updated
+ ** is deleted or renamed by a BEFORE trigger - is left undefined in the
+ ** documentation.
+ */
+ if( pPk ){
+ sqlite3VdbeAddOp4Int(v, OP_NotFound,iDataCur,labelContinue,regKey,nKey);
+ VdbeCoverage(v);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue,regOldRowid);
+ VdbeCoverage(v);
+ }
+
+ /* After-BEFORE-trigger-reload-loop:
+ ** If it did not delete it, the BEFORE trigger may still have modified
+ ** some of the columns of the row being updated. Load the values for
+ ** all columns not modified by the update statement into their registers
+ ** in case this has happened. Only unmodified columns are reloaded.
+ ** The values computed for modified columns use the values before the
+ ** BEFORE trigger runs. See test case trigger1-18.0 (added 2018-04-26)
+ ** for an example.
+ */
+ for(i=0, k=regNew; i<pTab->nCol; i++, k++){
+ if( pTab->aCol[i].colFlags & COLFLAG_GENERATED ){
+ if( pTab->aCol[i].colFlags & COLFLAG_VIRTUAL ) k--;
+ }else if( aXRef[i]<0 && i!=pTab->iPKey ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, k);
+ }
+ }
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ if( pTab->tabFlags & TF_HasGenerated ){
+ testcase( pTab->tabFlags & TF_HasVirtual );
+ testcase( pTab->tabFlags & TF_HasStored );
+ sqlite3ComputeGeneratedColumns(pParse, regNew, pTab);
+ }
+#endif
+ }
+ }
+
+ if( !isView ){
+ /* Do constraint checks. */
+ assert( regOldRowid>0 );
+ sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur,
+ regNewRowid, regOldRowid, chngKey, onError, labelContinue, &bReplace,
+ aXRef, 0);
+
+ /* If REPLACE conflict handling may have been used, or if the PK of the
+ ** row is changing, then the GenerateConstraintChecks() above may have
+ ** moved cursor iDataCur. Reseek it. */
+ if( bReplace || chngKey ){
+ if( pPk ){
+ sqlite3VdbeAddOp4Int(v, OP_NotFound,iDataCur,labelContinue,regKey,nKey);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue,regOldRowid);
+ }
+ VdbeCoverageNeverTaken(v);
+ }
+
+ /* Do FK constraint checks. */
+ if( hasFK ){
+ sqlite3FkCheck(pParse, pTab, regOldRowid, 0, aXRef, chngKey);
+ }
+
+ /* Delete the index entries associated with the current record. */
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx, -1);
+
+ /* We must run the OP_FinishSeek opcode to resolve a prior
+ ** OP_DeferredSeek if there is any possibility that there have been
+ ** no OP_Column opcodes since the OP_DeferredSeek was issued. But
+ ** we want to avoid the OP_FinishSeek if possible, as running it
+ ** costs CPU cycles. */
+ if( bFinishSeek ){
+ sqlite3VdbeAddOp1(v, OP_FinishSeek, iDataCur);
+ }
+
+ /* If changing the rowid value, or if there are foreign key constraints
+ ** to process, delete the old record. Otherwise, add a noop OP_Delete
+ ** to invoke the pre-update hook.
+ **
+ ** That (regNew==regnewRowid+1) is true is also important for the
+ ** pre-update hook. If the caller invokes preupdate_new(), the returned
+ ** value is copied from memory cell (regNewRowid+1+iCol), where iCol
+ ** is the column index supplied by the user.
+ */
+ assert( regNew==regNewRowid+1 );
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ sqlite3VdbeAddOp3(v, OP_Delete, iDataCur,
+ OPFLAG_ISUPDATE | ((hasFK>1 || chngKey) ? 0 : OPFLAG_ISNOOP),
+ regNewRowid
+ );
+ if( eOnePass==ONEPASS_MULTI ){
+ assert( hasFK==0 && chngKey==0 );
+ sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION);
+ }
+ if( !pParse->nested ){
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
+ }
+#else
+ if( hasFK>1 || chngKey ){
+ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0);
+ }
+#endif
+
+ if( hasFK ){
+ sqlite3FkCheck(pParse, pTab, 0, regNewRowid, aXRef, chngKey);
+ }
+
+ /* Insert the new index entries and the new record. */
+ sqlite3CompleteInsertion(
+ pParse, pTab, iDataCur, iIdxCur, regNewRowid, aRegIdx,
+ OPFLAG_ISUPDATE | (eOnePass==ONEPASS_MULTI ? OPFLAG_SAVEPOSITION : 0),
+ 0, 0
+ );
+
+ /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to
+ ** handle rows (possibly in other tables) that refer via a foreign key
+ ** to the row just updated. */
+ if( hasFK ){
+ sqlite3FkActions(pParse, pTab, pChanges, regOldRowid, aXRef, chngKey);
+ }
+ }
+
+ /* Increment the row counter
+ */
+ if( regRowCount ){
+ sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1);
+ }
+
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
+ TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue);
+
+ /* Repeat the above with the next record to be updated, until
+ ** all record selected by the WHERE clause have been updated.
+ */
+ if( eOnePass==ONEPASS_SINGLE ){
+ /* Nothing to do at end-of-loop for a single-pass */
+ }else if( eOnePass==ONEPASS_MULTI ){
+ sqlite3VdbeResolveLabel(v, labelContinue);
+ sqlite3WhereEnd(pWInfo);
+ }else if( pPk || nChangeFrom ){
+ sqlite3VdbeResolveLabel(v, labelContinue);
+ sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v);
+ }else{
+ sqlite3VdbeGoto(v, labelContinue);
+ }
+ sqlite3VdbeResolveLabel(v, labelBreak);
+
+ /* Update the sqlite_sequence table by storing the content of the
+ ** maximum rowid counter values recorded while inserting into
+ ** autoincrement tables.
+ */
+ if( pParse->nested==0 && pParse->pTriggerTab==0 && pUpsert==0 ){
+ sqlite3AutoincrementEnd(pParse);
+ }
+
+ /*
+ ** Return the number of rows that were changed, if we are tracking
+ ** that information.
+ */
+ if( regRowCount ){
+ sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC);
+ }
+
+update_cleanup:
+ sqlite3AuthContextPop(&sContext);
+ sqlite3DbFree(db, aXRef); /* Also frees aRegIdx[] and aToOpen[] */
+ sqlite3SrcListDelete(db, pTabList);
+ sqlite3ExprListDelete(db, pChanges);
+ sqlite3ExprDelete(db, pWhere);
+#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT)
+ sqlite3ExprListDelete(db, pOrderBy);
+ sqlite3ExprDelete(db, pLimit);
+#endif
+ return;
+}
+/* Make sure "isView" and other macros defined above are undefined. Otherwise
+** they may interfere with compilation of other functions in this file
+** (or in another file, if this file becomes part of the amalgamation). */
+#ifdef isView
+ #undef isView
+#endif
+#ifdef pTrigger
+ #undef pTrigger
+#endif
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Generate code for an UPDATE of a virtual table.
+**
+** There are two possible strategies - the default and the special
+** "onepass" strategy. Onepass is only used if the virtual table
+** implementation indicates that pWhere may match at most one row.
+**
+** The default strategy is to create an ephemeral table that contains
+** for each row to be changed:
+**
+** (A) The original rowid of that row.
+** (B) The revised rowid for the row.
+** (C) The content of every column in the row.
+**
+** Then loop through the contents of this ephemeral table executing a
+** VUpdate for each row. When finished, drop the ephemeral table.
+**
+** The "onepass" strategy does not use an ephemeral table. Instead, it
+** stores the same values (A, B and C above) in a register array and
+** makes a single invocation of VUpdate.
+*/
+static void updateVirtualTable(
+ Parse *pParse, /* The parsing context */
+ SrcList *pSrc, /* The virtual table to be modified */
+ Table *pTab, /* The virtual table */
+ ExprList *pChanges, /* The columns to change in the UPDATE statement */
+ Expr *pRowid, /* Expression used to recompute the rowid */
+ int *aXRef, /* Mapping from columns of pTab to entries in pChanges */
+ Expr *pWhere, /* WHERE clause of the UPDATE statement */
+ int onError /* ON CONFLICT strategy */
+){
+ Vdbe *v = pParse->pVdbe; /* Virtual machine under construction */
+ int ephemTab; /* Table holding the result of the SELECT */
+ int i; /* Loop counter */
+ sqlite3 *db = pParse->db; /* Database connection */
+ const char *pVTab = (const char*)sqlite3GetVTable(db, pTab);
+ WhereInfo *pWInfo = 0;
+ int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */
+ int regArg; /* First register in VUpdate arg array */
+ int regRec; /* Register in which to assemble record */
+ int regRowid; /* Register for ephem table rowid */
+ int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */
+ int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */
+ int eOnePass; /* True to use onepass strategy */
+ int addr; /* Address of OP_OpenEphemeral */
+
+ /* Allocate nArg registers in which to gather the arguments for VUpdate. Then
+ ** create and open the ephemeral table in which the records created from
+ ** these arguments will be temporarily stored. */
+ assert( v );
+ ephemTab = pParse->nTab++;
+ addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg);
+ regArg = pParse->nMem + 1;
+ pParse->nMem += nArg;
+ if( pSrc->nSrc>1 ){
+ Expr *pRow;
+ ExprList *pList;
+ if( pRowid ){
+ pRow = sqlite3ExprDup(db, pRowid, 0);
+ }else{
+ pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0);
+ }
+ pList = sqlite3ExprListAppend(pParse, 0, pRow);
+
+ for(i=0; i<pTab->nCol; i++){
+ if( aXRef[i]>=0 ){
+ pList = sqlite3ExprListAppend(pParse, pList,
+ sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0)
+ );
+ }else{
+ pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
+ }
+ }
+
+ updateFromSelect(pParse, ephemTab, 0, pList, pSrc, pWhere, 0, 0);
+ sqlite3ExprListDelete(db, pList);
+ eOnePass = ONEPASS_OFF;
+ }else{
+ regRec = ++pParse->nMem;
+ regRowid = ++pParse->nMem;
+
+ /* Start scanning the virtual table */
+ pWInfo = sqlite3WhereBegin(pParse, pSrc,pWhere,0,0,WHERE_ONEPASS_DESIRED,0);
+ if( pWInfo==0 ) return;
+
+ /* Populate the argument registers. */
+ for(i=0; i<pTab->nCol; i++){
+ assert( (pTab->aCol[i].colFlags & COLFLAG_GENERATED)==0 );
+ if( aXRef[i]>=0 ){
+ sqlite3ExprCode(pParse, pChanges->a[aXRef[i]].pExpr, regArg+2+i);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_VColumn, iCsr, i, regArg+2+i);
+ sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG);/* For sqlite3_vtab_nochange() */
+ }
+ }
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg);
+ if( pRowid ){
+ sqlite3ExprCode(pParse, pRowid, regArg+1);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg+1);
+ }
+ }else{
+ Index *pPk; /* PRIMARY KEY index */
+ i16 iPk; /* PRIMARY KEY column */
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+ assert( pPk->nKeyCol==1 );
+ iPk = pPk->aiColumn[0];
+ sqlite3VdbeAddOp3(v, OP_VColumn, iCsr, iPk, regArg);
+ sqlite3VdbeAddOp2(v, OP_SCopy, regArg+2+iPk, regArg+1);
+ }
+
+ eOnePass = sqlite3WhereOkOnePass(pWInfo, aDummy);
+
+ /* There is no ONEPASS_MULTI on virtual tables */
+ assert( eOnePass==ONEPASS_OFF || eOnePass==ONEPASS_SINGLE );
+
+ if( eOnePass ){
+ /* If using the onepass strategy, no-op out the OP_OpenEphemeral coded
+ ** above. */
+ sqlite3VdbeChangeToNoop(v, addr);
+ sqlite3VdbeAddOp1(v, OP_Close, iCsr);
+ }else{
+ /* Create a record from the argument register contents and insert it into
+ ** the ephemeral table. */
+ sqlite3MultiWrite(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regArg, nArg, regRec);
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_NULL_TRIM)
+ /* Signal an assert() within OP_MakeRecord that it is allowed to
+ ** accept no-change records with serial_type 10 */
+ sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG_MAGIC);
+#endif
+ sqlite3VdbeAddOp2(v, OP_NewRowid, ephemTab, regRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, ephemTab, regRec, regRowid);
+ }
+ }
+
+
+ if( eOnePass==ONEPASS_OFF ){
+ /* End the virtual table scan */
+ if( pSrc->nSrc==1 ){
+ sqlite3WhereEnd(pWInfo);
+ }
+
+ /* Begin scannning through the ephemeral table. */
+ addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v);
+
+ /* Extract arguments from the current row of the ephemeral table and
+ ** invoke the VUpdate method. */
+ for(i=0; i<nArg; i++){
+ sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i, regArg+i);
+ }
+ }
+ sqlite3VtabMakeWritable(pParse, pTab);
+ sqlite3VdbeAddOp4(v, OP_VUpdate, 0, nArg, regArg, pVTab, P4_VTAB);
+ sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError);
+ sqlite3MayAbort(pParse);
+
+ /* End of the ephemeral table scan. Or, if using the onepass strategy,
+ ** jump to here if the scan visited zero rows. */
+ if( eOnePass==ONEPASS_OFF ){
+ sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addr);
+ sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0);
+ }else{
+ sqlite3WhereEnd(pWInfo);
+ }
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/************** End of update.c **********************************************/
+/************** Begin file upsert.c ******************************************/
+/*
+** 2018-04-12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code to implement various aspects of UPSERT
+** processing and handling of the Upsert object.
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_UPSERT
+/*
+** Free a list of Upsert objects
+*/
+SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3 *db, Upsert *p){
+ if( p ){
+ sqlite3ExprListDelete(db, p->pUpsertTarget);
+ sqlite3ExprDelete(db, p->pUpsertTargetWhere);
+ sqlite3ExprListDelete(db, p->pUpsertSet);
+ sqlite3ExprDelete(db, p->pUpsertWhere);
+ sqlite3DbFree(db, p);
+ }
+}
+
+/*
+** Duplicate an Upsert object.
+*/
+SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3 *db, Upsert *p){
+ if( p==0 ) return 0;
+ return sqlite3UpsertNew(db,
+ sqlite3ExprListDup(db, p->pUpsertTarget, 0),
+ sqlite3ExprDup(db, p->pUpsertTargetWhere, 0),
+ sqlite3ExprListDup(db, p->pUpsertSet, 0),
+ sqlite3ExprDup(db, p->pUpsertWhere, 0)
+ );
+}
+
+/*
+** Create a new Upsert object.
+*/
+SQLITE_PRIVATE Upsert *sqlite3UpsertNew(
+ sqlite3 *db, /* Determines which memory allocator to use */
+ ExprList *pTarget, /* Target argument to ON CONFLICT, or NULL */
+ Expr *pTargetWhere, /* Optional WHERE clause on the target */
+ ExprList *pSet, /* UPDATE columns, or NULL for a DO NOTHING */
+ Expr *pWhere /* WHERE clause for the ON CONFLICT UPDATE */
+){
+ Upsert *pNew;
+ pNew = sqlite3DbMallocRaw(db, sizeof(Upsert));
+ if( pNew==0 ){
+ sqlite3ExprListDelete(db, pTarget);
+ sqlite3ExprDelete(db, pTargetWhere);
+ sqlite3ExprListDelete(db, pSet);
+ sqlite3ExprDelete(db, pWhere);
+ return 0;
+ }else{
+ pNew->pUpsertTarget = pTarget;
+ pNew->pUpsertTargetWhere = pTargetWhere;
+ pNew->pUpsertSet = pSet;
+ pNew->pUpsertWhere = pWhere;
+ pNew->pUpsertIdx = 0;
+ }
+ return pNew;
+}
+
+/*
+** Analyze the ON CONFLICT clause described by pUpsert. Resolve all
+** symbols in the conflict-target.
+**
+** Return SQLITE_OK if everything works, or an error code is something
+** is wrong.
+*/
+SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(
+ Parse *pParse, /* The parsing context */
+ SrcList *pTabList, /* Table into which we are inserting */
+ Upsert *pUpsert /* The ON CONFLICT clauses */
+){
+ Table *pTab; /* That table into which we are inserting */
+ int rc; /* Result code */
+ int iCursor; /* Cursor used by pTab */
+ Index *pIdx; /* One of the indexes of pTab */
+ ExprList *pTarget; /* The conflict-target clause */
+ Expr *pTerm; /* One term of the conflict-target clause */
+ NameContext sNC; /* Context for resolving symbolic names */
+ Expr sCol[2]; /* Index column converted into an Expr */
+
+ assert( pTabList->nSrc==1 );
+ assert( pTabList->a[0].pTab!=0 );
+ assert( pUpsert!=0 );
+ assert( pUpsert->pUpsertTarget!=0 );
+
+ /* Resolve all symbolic names in the conflict-target clause, which
+ ** includes both the list of columns and the optional partial-index
+ ** WHERE clause.
+ */
+ memset(&sNC, 0, sizeof(sNC));
+ sNC.pParse = pParse;
+ sNC.pSrcList = pTabList;
+ rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget);
+ if( rc ) return rc;
+ rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere);
+ if( rc ) return rc;
+
+ /* Check to see if the conflict target matches the rowid. */
+ pTab = pTabList->a[0].pTab;
+ pTarget = pUpsert->pUpsertTarget;
+ iCursor = pTabList->a[0].iCursor;
+ if( HasRowid(pTab)
+ && pTarget->nExpr==1
+ && (pTerm = pTarget->a[0].pExpr)->op==TK_COLUMN
+ && pTerm->iColumn==XN_ROWID
+ ){
+ /* The conflict-target is the rowid of the primary table */
+ assert( pUpsert->pUpsertIdx==0 );
+ return SQLITE_OK;
+ }
+
+ /* Initialize sCol[0..1] to be an expression parse tree for a
+ ** single column of an index. The sCol[0] node will be the TK_COLLATE
+ ** operator and sCol[1] will be the TK_COLUMN operator. Code below
+ ** will populate the specific collation and column number values
+ ** prior to comparing against the conflict-target expression.
+ */
+ memset(sCol, 0, sizeof(sCol));
+ sCol[0].op = TK_COLLATE;
+ sCol[0].pLeft = &sCol[1];
+ sCol[1].op = TK_COLUMN;
+ sCol[1].iTable = pTabList->a[0].iCursor;
+
+ /* Check for matches against other indexes */
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int ii, jj, nn;
+ if( !IsUniqueIndex(pIdx) ) continue;
+ if( pTarget->nExpr!=pIdx->nKeyCol ) continue;
+ if( pIdx->pPartIdxWhere ){
+ if( pUpsert->pUpsertTargetWhere==0 ) continue;
+ if( sqlite3ExprCompare(pParse, pUpsert->pUpsertTargetWhere,
+ pIdx->pPartIdxWhere, iCursor)!=0 ){
+ continue;
+ }
+ }
+ nn = pIdx->nKeyCol;
+ for(ii=0; ii<nn; ii++){
+ Expr *pExpr;
+ sCol[0].u.zToken = (char*)pIdx->azColl[ii];
+ if( pIdx->aiColumn[ii]==XN_EXPR ){
+ assert( pIdx->aColExpr!=0 );
+ assert( pIdx->aColExpr->nExpr>ii );
+ pExpr = pIdx->aColExpr->a[ii].pExpr;
+ if( pExpr->op!=TK_COLLATE ){
+ sCol[0].pLeft = pExpr;
+ pExpr = &sCol[0];
+ }
+ }else{
+ sCol[0].pLeft = &sCol[1];
+ sCol[1].iColumn = pIdx->aiColumn[ii];
+ pExpr = &sCol[0];
+ }
+ for(jj=0; jj<nn; jj++){
+ if( sqlite3ExprCompare(pParse, pTarget->a[jj].pExpr, pExpr,iCursor)<2 ){
+ break; /* Column ii of the index matches column jj of target */
+ }
+ }
+ if( jj>=nn ){
+ /* The target contains no match for column jj of the index */
+ break;
+ }
+ }
+ if( ii<nn ){
+ /* Column ii of the index did not match any term of the conflict target.
+ ** Continue the search with the next index. */
+ continue;
+ }
+ pUpsert->pUpsertIdx = pIdx;
+ return SQLITE_OK;
+ }
+ sqlite3ErrorMsg(pParse, "ON CONFLICT clause does not match any "
+ "PRIMARY KEY or UNIQUE constraint");
+ return SQLITE_ERROR;
+}
+
+/*
+** Generate bytecode that does an UPDATE as part of an upsert.
+**
+** If pIdx is NULL, then the UNIQUE constraint that failed was the IPK.
+** In this case parameter iCur is a cursor open on the table b-tree that
+** currently points to the conflicting table row. Otherwise, if pIdx
+** is not NULL, then pIdx is the constraint that failed and iCur is a
+** cursor points to the conflicting row.
+*/
+SQLITE_PRIVATE void sqlite3UpsertDoUpdate(
+ Parse *pParse, /* The parsing and code-generating context */
+ Upsert *pUpsert, /* The ON CONFLICT clause for the upsert */
+ Table *pTab, /* The table being updated */
+ Index *pIdx, /* The UNIQUE constraint that failed */
+ int iCur /* Cursor for pIdx (or pTab if pIdx==NULL) */
+){
+ Vdbe *v = pParse->pVdbe;
+ sqlite3 *db = pParse->db;
+ SrcList *pSrc; /* FROM clause for the UPDATE */
+ int iDataCur;
+ int i;
+
+ assert( v!=0 );
+ assert( pUpsert!=0 );
+ VdbeNoopComment((v, "Begin DO UPDATE of UPSERT"));
+ iDataCur = pUpsert->iDataCur;
+ if( pIdx && iCur!=iDataCur ){
+ if( HasRowid(pTab) ){
+ int regRowid = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iCur, regRowid);
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, iDataCur, 0, regRowid);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, regRowid);
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ int nPk = pPk->nKeyCol;
+ int iPk = pParse->nMem+1;
+ pParse->nMem += nPk;
+ for(i=0; i<nPk; i++){
+ int k;
+ assert( pPk->aiColumn[i]>=0 );
+ k = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[i]);
+ sqlite3VdbeAddOp3(v, OP_Column, iCur, k, iPk+i);
+ VdbeComment((v, "%s.%s", pIdx->zName,
+ pTab->aCol[pPk->aiColumn[i]].zName));
+ }
+ sqlite3VdbeVerifyAbortable(v, OE_Abort);
+ i = sqlite3VdbeAddOp4Int(v, OP_Found, iDataCur, 0, iPk, nPk);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CORRUPT, OE_Abort, 0,
+ "corrupt database", P4_STATIC);
+ sqlite3MayAbort(pParse);
+ sqlite3VdbeJumpHere(v, i);
+ }
+ }
+ /* pUpsert does not own pUpsertSrc - the outer INSERT statement does. So
+ ** we have to make a copy before passing it down into sqlite3Update() */
+ pSrc = sqlite3SrcListDup(db, pUpsert->pUpsertSrc, 0);
+ /* excluded.* columns of type REAL need to be converted to a hard real */
+ for(i=0; i<pTab->nCol; i++){
+ if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, pUpsert->regData+i);
+ }
+ }
+ sqlite3Update(pParse, pSrc, pUpsert->pUpsertSet,
+ pUpsert->pUpsertWhere, OE_Abort, 0, 0, pUpsert);
+ pUpsert->pUpsertSet = 0; /* Will have been deleted by sqlite3Update() */
+ pUpsert->pUpsertWhere = 0; /* Will have been deleted by sqlite3Update() */
+ VdbeNoopComment((v, "End DO UPDATE of UPSERT"));
+}
+
+#endif /* SQLITE_OMIT_UPSERT */
+
+/************** End of upsert.c **********************************************/
+/************** Begin file vacuum.c ******************************************/
+/*
+** 2003 April 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to implement the VACUUM command.
+**
+** Most of the code in this file may be omitted by defining the
+** SQLITE_OMIT_VACUUM macro.
+*/
+/* #include "sqliteInt.h" */
+/* #include "vdbeInt.h" */
+
+#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)
+
+/*
+** Execute zSql on database db.
+**
+** If zSql returns rows, then each row will have exactly one
+** column. (This will only happen if zSql begins with "SELECT".)
+** Take each row of result and call execSql() again recursively.
+**
+** The execSqlF() routine does the same thing, except it accepts
+** a format string as its third argument
+*/
+static int execSql(sqlite3 *db, char **pzErrMsg, const char *zSql){
+ sqlite3_stmt *pStmt;
+ int rc;
+
+ /* printf("SQL: [%s]\n", zSql); fflush(stdout); */
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ while( SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){
+ const char *zSubSql = (const char*)sqlite3_column_text(pStmt,0);
+ assert( sqlite3_strnicmp(zSql,"SELECT",6)==0 );
+ /* The secondary SQL must be one of CREATE TABLE, CREATE INDEX,
+ ** or INSERT. Historically there have been attacks that first
+ ** corrupt the sqlite_schema.sql field with other kinds of statements
+ ** then run VACUUM to get those statements to execute at inappropriate
+ ** times. */
+ if( zSubSql
+ && (strncmp(zSubSql,"CRE",3)==0 || strncmp(zSubSql,"INS",3)==0)
+ ){
+ rc = execSql(db, pzErrMsg, zSubSql);
+ if( rc!=SQLITE_OK ) break;
+ }
+ }
+ assert( rc!=SQLITE_ROW );
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ if( rc ){
+ sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db));
+ }
+ (void)sqlite3_finalize(pStmt);
+ return rc;
+}
+static int execSqlF(sqlite3 *db, char **pzErrMsg, const char *zSql, ...){
+ char *z;
+ va_list ap;
+ int rc;
+ va_start(ap, zSql);
+ z = sqlite3VMPrintf(db, zSql, ap);
+ va_end(ap);
+ if( z==0 ) return SQLITE_NOMEM;
+ rc = execSql(db, pzErrMsg, z);
+ sqlite3DbFree(db, z);
+ return rc;
+}
+
+/*
+** The VACUUM command is used to clean up the database,
+** collapse free space, etc. It is modelled after the VACUUM command
+** in PostgreSQL. The VACUUM command works as follows:
+**
+** (1) Create a new transient database file
+** (2) Copy all content from the database being vacuumed into
+** the new transient database file
+** (3) Copy content from the transient database back into the
+** original database.
+**
+** The transient database requires temporary disk space approximately
+** equal to the size of the original database. The copy operation of
+** step (3) requires additional temporary disk space approximately equal
+** to the size of the original database for the rollback journal.
+** Hence, temporary disk space that is approximately 2x the size of the
+** original database is required. Every page of the database is written
+** approximately 3 times: Once for step (2) and twice for step (3).
+** Two writes per page are required in step (3) because the original
+** database content must be written into the rollback journal prior to
+** overwriting the database with the vacuumed content.
+**
+** Only 1x temporary space and only 1x writes would be required if
+** the copy of step (3) were replaced by deleting the original database
+** and renaming the transient database as the original. But that will
+** not work if other processes are attached to the original database.
+** And a power loss in between deleting the original and renaming the
+** transient would cause the database file to appear to be deleted
+** following reboot.
+*/
+SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse, Token *pNm, Expr *pInto){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int iDb = 0;
+ if( v==0 ) goto build_vacuum_end;
+ if( pParse->nErr ) goto build_vacuum_end;
+ if( pNm ){
+#ifndef SQLITE_BUG_COMPATIBLE_20160819
+ /* Default behavior: Report an error if the argument to VACUUM is
+ ** not recognized */
+ iDb = sqlite3TwoPartName(pParse, pNm, pNm, &pNm);
+ if( iDb<0 ) goto build_vacuum_end;
+#else
+ /* When SQLITE_BUG_COMPATIBLE_20160819 is defined, unrecognized arguments
+ ** to VACUUM are silently ignored. This is a back-out of a bug fix that
+ ** occurred on 2016-08-19 (https://www.sqlite.org/src/info/083f9e6270).
+ ** The buggy behavior is required for binary compatibility with some
+ ** legacy applications. */
+ iDb = sqlite3FindDb(pParse->db, pNm);
+ if( iDb<0 ) iDb = 0;
+#endif
+ }
+ if( iDb!=1 ){
+ int iIntoReg = 0;
+ if( pInto && sqlite3ResolveSelfReference(pParse,0,0,pInto,0)==0 ){
+ iIntoReg = ++pParse->nMem;
+ sqlite3ExprCode(pParse, pInto, iIntoReg);
+ }
+ sqlite3VdbeAddOp2(v, OP_Vacuum, iDb, iIntoReg);
+ sqlite3VdbeUsesBtree(v, iDb);
+ }
+build_vacuum_end:
+ sqlite3ExprDelete(pParse->db, pInto);
+ return;
+}
+
+/*
+** This routine implements the OP_Vacuum opcode of the VDBE.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
+ char **pzErrMsg, /* Write error message here */
+ sqlite3 *db, /* Database connection */
+ int iDb, /* Which attached DB to vacuum */
+ sqlite3_value *pOut /* Write results here, if not NULL. VACUUM INTO */
+){
+ int rc = SQLITE_OK; /* Return code from service routines */
+ Btree *pMain; /* The database being vacuumed */
+ Btree *pTemp; /* The temporary database we vacuum into */
+ u32 saved_mDbFlags; /* Saved value of db->mDbFlags */
+ u64 saved_flags; /* Saved value of db->flags */
+ int saved_nChange; /* Saved value of db->nChange */
+ int saved_nTotalChange; /* Saved value of db->nTotalChange */
+ u32 saved_openFlags; /* Saved value of db->openFlags */
+ u8 saved_mTrace; /* Saved trace settings */
+ Db *pDb = 0; /* Database to detach at end of vacuum */
+ int isMemDb; /* True if vacuuming a :memory: database */
+ int nRes; /* Bytes of reserved space at the end of each page */
+ int nDb; /* Number of attached databases */
+ const char *zDbMain; /* Schema name of database to vacuum */
+ const char *zOut; /* Name of output file */
+
+ if( !db->autoCommit ){
+ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
+ return SQLITE_ERROR; /* IMP: R-12218-18073 */
+ }
+ if( db->nVdbeActive>1 ){
+ sqlite3SetString(pzErrMsg, db,"cannot VACUUM - SQL statements in progress");
+ return SQLITE_ERROR; /* IMP: R-15610-35227 */
+ }
+ saved_openFlags = db->openFlags;
+ if( pOut ){
+ if( sqlite3_value_type(pOut)!=SQLITE_TEXT ){
+ sqlite3SetString(pzErrMsg, db, "non-text filename");
+ return SQLITE_ERROR;
+ }
+ zOut = (const char*)sqlite3_value_text(pOut);
+ db->openFlags &= ~SQLITE_OPEN_READONLY;
+ db->openFlags |= SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE;
+ }else{
+ zOut = "";
+ }
+
+ /* Save the current value of the database flags so that it can be
+ ** restored before returning. Then set the writable-schema flag, and
+ ** disable CHECK and foreign key constraints. */
+ saved_flags = db->flags;
+ saved_mDbFlags = db->mDbFlags;
+ saved_nChange = db->nChange;
+ saved_nTotalChange = db->nTotalChange;
+ saved_mTrace = db->mTrace;
+ db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;
+ db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum;
+ db->flags &= ~(u64)(SQLITE_ForeignKeys | SQLITE_ReverseOrder
+ | SQLITE_Defensive | SQLITE_CountRows);
+ db->mTrace = 0;
+
+ zDbMain = db->aDb[iDb].zDbSName;
+ pMain = db->aDb[iDb].pBt;
+ isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain));
+
+ /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
+ ** can be set to 'off' for this file, as it is not recovered if a crash
+ ** occurs anyway. The integrity of the database is maintained by a
+ ** (possibly synchronous) transaction opened on the main database before
+ ** sqlite3BtreeCopyFile() is called.
+ **
+ ** An optimisation would be to use a non-journaled pager.
+ ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
+ ** that actually made the VACUUM run slower. Very little journalling
+ ** actually occurs when doing a vacuum since the vacuum_db is initially
+ ** empty. Only the journal header is written. Apparently it takes more
+ ** time to parse and run the PRAGMA to turn journalling off than it does
+ ** to write the journal header file.
+ */
+ nDb = db->nDb;
+ rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS vacuum_db", zOut);
+ db->openFlags = saved_openFlags;
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ assert( (db->nDb-1)==nDb );
+ pDb = &db->aDb[nDb];
+ assert( strcmp(pDb->zDbSName,"vacuum_db")==0 );
+ pTemp = pDb->pBt;
+ if( pOut ){
+ sqlite3_file *id = sqlite3PagerFile(sqlite3BtreePager(pTemp));
+ i64 sz = 0;
+ if( id->pMethods!=0 && (sqlite3OsFileSize(id, &sz)!=SQLITE_OK || sz>0) ){
+ rc = SQLITE_ERROR;
+ sqlite3SetString(pzErrMsg, db, "output file already exists");
+ goto end_of_vacuum;
+ }
+ db->mDbFlags |= DBFLAG_VacuumInto;
+ }
+ nRes = sqlite3BtreeGetRequestedReserve(pMain);
+
+ sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size);
+ sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0));
+ sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL);
+
+ /* Begin a transaction and take an exclusive lock on the main database
+ ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below,
+ ** to ensure that we do not try to change the page-size on a WAL database.
+ */
+ rc = execSql(db, pzErrMsg, "BEGIN");
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ rc = sqlite3BtreeBeginTrans(pMain, pOut==0 ? 2 : 0, 0);
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+
+ /* Do not attempt to change the page size for a WAL database */
+ if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain))
+ ==PAGER_JOURNALMODE_WAL ){
+ db->nextPagesize = 0;
+ }
+
+ if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0)
+ || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0))
+ || NEVER(db->mallocFailed)
+ ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto end_of_vacuum;
+ }
+
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac :
+ sqlite3BtreeGetAutoVacuum(pMain));
+#endif
+
+ /* Query the schema of the main database. Create a mirror schema
+ ** in the temporary database.
+ */
+ db->init.iDb = nDb; /* force new CREATE statements into vacuum_db */
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT sql FROM \"%w\".sqlite_schema"
+ " WHERE type='table'AND name<>'sqlite_sequence'"
+ " AND coalesce(rootpage,1)>0",
+ zDbMain
+ );
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT sql FROM \"%w\".sqlite_schema"
+ " WHERE type='index'",
+ zDbMain
+ );
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ db->init.iDb = 0;
+
+ /* Loop through the tables in the main database. For each, do
+ ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
+ ** the contents to the temporary database.
+ */
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT'INSERT INTO vacuum_db.'||quote(name)"
+ "||' SELECT*FROM\"%w\".'||quote(name)"
+ "FROM vacuum_db.sqlite_schema "
+ "WHERE type='table'AND coalesce(rootpage,1)>0",
+ zDbMain
+ );
+ assert( (db->mDbFlags & DBFLAG_Vacuum)!=0 );
+ db->mDbFlags &= ~DBFLAG_Vacuum;
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+
+ /* Copy the triggers, views, and virtual tables from the main database
+ ** over to the temporary database. None of these objects has any
+ ** associated storage, so all we have to do is copy their entries
+ ** from the schema table.
+ */
+ rc = execSqlF(db, pzErrMsg,
+ "INSERT INTO vacuum_db.sqlite_schema"
+ " SELECT*FROM \"%w\".sqlite_schema"
+ " WHERE type IN('view','trigger')"
+ " OR(type='table'AND rootpage=0)",
+ zDbMain
+ );
+ if( rc ) goto end_of_vacuum;
+
+ /* At this point, there is a write transaction open on both the
+ ** vacuum database and the main database. Assuming no error occurs,
+ ** both transactions are closed by this block - the main database
+ ** transaction by sqlite3BtreeCopyFile() and the other by an explicit
+ ** call to sqlite3BtreeCommit().
+ */
+ {
+ u32 meta;
+ int i;
+
+ /* This array determines which meta meta values are preserved in the
+ ** vacuum. Even entries are the meta value number and odd entries
+ ** are an increment to apply to the meta value after the vacuum.
+ ** The increment is used to increase the schema cookie so that other
+ ** connections to the same database will know to reread the schema.
+ */
+ static const unsigned char aCopy[] = {
+ BTREE_SCHEMA_VERSION, 1, /* Add one to the old schema cookie */
+ BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */
+ BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */
+ BTREE_USER_VERSION, 0, /* Preserve the user version */
+ BTREE_APPLICATION_ID, 0, /* Preserve the application id */
+ };
+
+ assert( 1==sqlite3BtreeIsInTrans(pTemp) );
+ assert( pOut!=0 || 1==sqlite3BtreeIsInTrans(pMain) );
+
+ /* Copy Btree meta values */
+ for(i=0; i<ArraySize(aCopy); i+=2){
+ /* GetMeta() and UpdateMeta() cannot fail in this context because
+ ** we already have page 1 loaded into cache and marked dirty. */
+ sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
+ rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta+aCopy[i+1]);
+ if( NEVER(rc!=SQLITE_OK) ) goto end_of_vacuum;
+ }
+
+ if( pOut==0 ){
+ rc = sqlite3BtreeCopyFile(pMain, pTemp);
+ }
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ rc = sqlite3BtreeCommit(pTemp);
+ if( rc!=SQLITE_OK ) goto end_of_vacuum;
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pOut==0 ){
+ sqlite3BtreeSetAutoVacuum(pMain, sqlite3BtreeGetAutoVacuum(pTemp));
+ }
+#endif
+ }
+
+ assert( rc==SQLITE_OK );
+ if( pOut==0 ){
+ rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes,1);
+ }
+
+end_of_vacuum:
+ /* Restore the original value of db->flags */
+ db->init.iDb = 0;
+ db->mDbFlags = saved_mDbFlags;
+ db->flags = saved_flags;
+ db->nChange = saved_nChange;
+ db->nTotalChange = saved_nTotalChange;
+ db->mTrace = saved_mTrace;
+ sqlite3BtreeSetPageSize(pMain, -1, 0, 1);
+
+ /* Currently there is an SQL level transaction open on the vacuum
+ ** database. No locks are held on any other files (since the main file
+ ** was committed at the btree level). So it safe to end the transaction
+ ** by manually setting the autoCommit flag to true and detaching the
+ ** vacuum database. The vacuum_db journal file is deleted when the pager
+ ** is closed by the DETACH.
+ */
+ db->autoCommit = 1;
+
+ if( pDb ){
+ sqlite3BtreeClose(pDb->pBt);
+ pDb->pBt = 0;
+ pDb->pSchema = 0;
+ }
+
+ /* This both clears the schemas and reduces the size of the db->aDb[]
+ ** array. */
+ sqlite3ResetAllSchemasOfConnection(db);
+
+ return rc;
+}
+
+#endif /* SQLITE_OMIT_VACUUM && SQLITE_OMIT_ATTACH */
+
+/************** End of vacuum.c **********************************************/
+/************** Begin file vtab.c ********************************************/
+/*
+** 2006 June 10
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code used to help implement virtual tables.
+*/
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* #include "sqliteInt.h" */
+
+/*
+** Before a virtual table xCreate() or xConnect() method is invoked, the
+** sqlite3.pVtabCtx member variable is set to point to an instance of
+** this struct allocated on the stack. It is used by the implementation of
+** the sqlite3_declare_vtab() and sqlite3_vtab_config() APIs, both of which
+** are invoked only from within xCreate and xConnect methods.
+*/
+struct VtabCtx {
+ VTable *pVTable; /* The virtual table being constructed */
+ Table *pTab; /* The Table object to which the virtual table belongs */
+ VtabCtx *pPrior; /* Parent context (if any) */
+ int bDeclared; /* True after sqlite3_declare_vtab() is called */
+};
+
+/*
+** Construct and install a Module object for a virtual table. When this
+** routine is called, it is guaranteed that all appropriate locks are held
+** and the module is not already part of the connection.
+**
+** If there already exists a module with zName, replace it with the new one.
+** If pModule==0, then delete the module zName if it exists.
+*/
+SQLITE_PRIVATE Module *sqlite3VtabCreateModule(
+ sqlite3 *db, /* Database in which module is registered */
+ const char *zName, /* Name assigned to this module */
+ const sqlite3_module *pModule, /* The definition of the module */
+ void *pAux, /* Context pointer for xCreate/xConnect */
+ void (*xDestroy)(void *) /* Module destructor function */
+){
+ Module *pMod;
+ Module *pDel;
+ char *zCopy;
+ if( pModule==0 ){
+ zCopy = (char*)zName;
+ pMod = 0;
+ }else{
+ int nName = sqlite3Strlen30(zName);
+ pMod = (Module *)sqlite3Malloc(sizeof(Module) + nName + 1);
+ if( pMod==0 ){
+ sqlite3OomFault(db);
+ return 0;
+ }
+ zCopy = (char *)(&pMod[1]);
+ memcpy(zCopy, zName, nName+1);
+ pMod->zName = zCopy;
+ pMod->pModule = pModule;
+ pMod->pAux = pAux;
+ pMod->xDestroy = xDestroy;
+ pMod->pEpoTab = 0;
+ pMod->nRefModule = 1;
+ }
+ pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod);
+ if( pDel ){
+ if( pDel==pMod ){
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, pDel);
+ pMod = 0;
+ }else{
+ sqlite3VtabEponymousTableClear(db, pDel);
+ sqlite3VtabModuleUnref(db, pDel);
+ }
+ }
+ return pMod;
+}
+
+/*
+** The actual function that does the work of creating a new module.
+** This function implements the sqlite3_create_module() and
+** sqlite3_create_module_v2() interfaces.
+*/
+static int createModule(
+ sqlite3 *db, /* Database in which module is registered */
+ const char *zName, /* Name assigned to this module */
+ const sqlite3_module *pModule, /* The definition of the module */
+ void *pAux, /* Context pointer for xCreate/xConnect */
+ void (*xDestroy)(void *) /* Module destructor function */
+){
+ int rc = SQLITE_OK;
+
+ sqlite3_mutex_enter(db->mutex);
+ (void)sqlite3VtabCreateModule(db, zName, pModule, pAux, xDestroy);
+ rc = sqlite3ApiExit(db, rc);
+ if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+
+/*
+** External API function used to create a new virtual-table module.
+*/
+SQLITE_API int sqlite3_create_module(
+ sqlite3 *db, /* Database in which module is registered */
+ const char *zName, /* Name assigned to this module */
+ const sqlite3_module *pModule, /* The definition of the module */
+ void *pAux /* Context pointer for xCreate/xConnect */
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ return createModule(db, zName, pModule, pAux, 0);
+}
+
+/*
+** External API function used to create a new virtual-table module.
+*/
+SQLITE_API int sqlite3_create_module_v2(
+ sqlite3 *db, /* Database in which module is registered */
+ const char *zName, /* Name assigned to this module */
+ const sqlite3_module *pModule, /* The definition of the module */
+ void *pAux, /* Context pointer for xCreate/xConnect */
+ void (*xDestroy)(void *) /* Module destructor function */
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ return createModule(db, zName, pModule, pAux, xDestroy);
+}
+
+/*
+** External API to drop all virtual-table modules, except those named
+** on the azNames list.
+*/
+SQLITE_API int sqlite3_drop_modules(sqlite3 *db, const char** azNames){
+ HashElem *pThis, *pNext;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ for(pThis=sqliteHashFirst(&db->aModule); pThis; pThis=pNext){
+ Module *pMod = (Module*)sqliteHashData(pThis);
+ pNext = sqliteHashNext(pThis);
+ if( azNames ){
+ int ii;
+ for(ii=0; azNames[ii]!=0 && strcmp(azNames[ii],pMod->zName)!=0; ii++){}
+ if( azNames[ii]!=0 ) continue;
+ }
+ createModule(db, pMod->zName, 0, 0, 0);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Decrement the reference count on a Module object. Destroy the
+** module when the reference count reaches zero.
+*/
+SQLITE_PRIVATE void sqlite3VtabModuleUnref(sqlite3 *db, Module *pMod){
+ assert( pMod->nRefModule>0 );
+ pMod->nRefModule--;
+ if( pMod->nRefModule==0 ){
+ if( pMod->xDestroy ){
+ pMod->xDestroy(pMod->pAux);
+ }
+ assert( pMod->pEpoTab==0 );
+ sqlite3DbFree(db, pMod);
+ }
+}
+
+/*
+** Lock the virtual table so that it cannot be disconnected.
+** Locks nest. Every lock should have a corresponding unlock.
+** If an unlock is omitted, resources leaks will occur.
+**
+** If a disconnect is attempted while a virtual table is locked,
+** the disconnect is deferred until all locks have been removed.
+*/
+SQLITE_PRIVATE void sqlite3VtabLock(VTable *pVTab){
+ pVTab->nRef++;
+}
+
+
+/*
+** pTab is a pointer to a Table structure representing a virtual-table.
+** Return a pointer to the VTable object used by connection db to access
+** this virtual-table, if one has been created, or NULL otherwise.
+*/
+SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){
+ VTable *pVtab;
+ assert( IsVirtual(pTab) );
+ for(pVtab=pTab->pVTable; pVtab && pVtab->db!=db; pVtab=pVtab->pNext);
+ return pVtab;
+}
+
+/*
+** Decrement the ref-count on a virtual table object. When the ref-count
+** reaches zero, call the xDisconnect() method to delete the object.
+*/
+SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){
+ sqlite3 *db = pVTab->db;
+
+ assert( db );
+ assert( pVTab->nRef>0 );
+ assert( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ZOMBIE );
+
+ pVTab->nRef--;
+ if( pVTab->nRef==0 ){
+ sqlite3_vtab *p = pVTab->pVtab;
+ sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod);
+ if( p ){
+ p->pModule->xDisconnect(p);
+ }
+ sqlite3DbFree(db, pVTab);
+ }
+}
+
+/*
+** Table p is a virtual table. This function moves all elements in the
+** p->pVTable list to the sqlite3.pDisconnect lists of their associated
+** database connections to be disconnected at the next opportunity.
+** Except, if argument db is not NULL, then the entry associated with
+** connection db is left in the p->pVTable list.
+*/
+static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){
+ VTable *pRet = 0;
+ VTable *pVTable = p->pVTable;
+ p->pVTable = 0;
+
+ /* Assert that the mutex (if any) associated with the BtShared database
+ ** that contains table p is held by the caller. See header comments
+ ** above function sqlite3VtabUnlockList() for an explanation of why
+ ** this makes it safe to access the sqlite3.pDisconnect list of any
+ ** database connection that may have an entry in the p->pVTable list.
+ */
+ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, p->pSchema) );
+
+ while( pVTable ){
+ sqlite3 *db2 = pVTable->db;
+ VTable *pNext = pVTable->pNext;
+ assert( db2 );
+ if( db2==db ){
+ pRet = pVTable;
+ p->pVTable = pRet;
+ pRet->pNext = 0;
+ }else{
+ pVTable->pNext = db2->pDisconnect;
+ db2->pDisconnect = pVTable;
+ }
+ pVTable = pNext;
+ }
+
+ assert( !db || pRet );
+ return pRet;
+}
+
+/*
+** Table *p is a virtual table. This function removes the VTable object
+** for table *p associated with database connection db from the linked
+** list in p->pVTab. It also decrements the VTable ref count. This is
+** used when closing database connection db to free all of its VTable
+** objects without disturbing the rest of the Schema object (which may
+** be being used by other shared-cache connections).
+*/
+SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p){
+ VTable **ppVTab;
+
+ assert( IsVirtual(p) );
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ assert( sqlite3_mutex_held(db->mutex) );
+
+ for(ppVTab=&p->pVTable; *ppVTab; ppVTab=&(*ppVTab)->pNext){
+ if( (*ppVTab)->db==db ){
+ VTable *pVTab = *ppVTab;
+ *ppVTab = pVTab->pNext;
+ sqlite3VtabUnlock(pVTab);
+ break;
+ }
+ }
+}
+
+
+/*
+** Disconnect all the virtual table objects in the sqlite3.pDisconnect list.
+**
+** This function may only be called when the mutexes associated with all
+** shared b-tree databases opened using connection db are held by the
+** caller. This is done to protect the sqlite3.pDisconnect list. The
+** sqlite3.pDisconnect list is accessed only as follows:
+**
+** 1) By this function. In this case, all BtShared mutexes and the mutex
+** associated with the database handle itself must be held.
+**
+** 2) By function vtabDisconnectAll(), when it adds a VTable entry to
+** the sqlite3.pDisconnect list. In this case either the BtShared mutex
+** associated with the database the virtual table is stored in is held
+** or, if the virtual table is stored in a non-sharable database, then
+** the database handle mutex is held.
+**
+** As a result, a sqlite3.pDisconnect cannot be accessed simultaneously
+** by multiple threads. It is thread-safe.
+*/
+SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){
+ VTable *p = db->pDisconnect;
+
+ assert( sqlite3BtreeHoldsAllMutexes(db) );
+ assert( sqlite3_mutex_held(db->mutex) );
+
+ if( p ){
+ db->pDisconnect = 0;
+ sqlite3ExpirePreparedStatements(db, 0);
+ do {
+ VTable *pNext = p->pNext;
+ sqlite3VtabUnlock(p);
+ p = pNext;
+ }while( p );
+ }
+}
+
+/*
+** Clear any and all virtual-table information from the Table record.
+** This routine is called, for example, just before deleting the Table
+** record.
+**
+** Since it is a virtual-table, the Table structure contains a pointer
+** to the head of a linked list of VTable structures. Each VTable
+** structure is associated with a single sqlite3* user of the schema.
+** The reference count of the VTable structure associated with database
+** connection db is decremented immediately (which may lead to the
+** structure being xDisconnected and free). Any other VTable structures
+** in the list are moved to the sqlite3.pDisconnect list of the associated
+** database connection.
+*/
+SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){
+ if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p);
+ if( p->azModuleArg ){
+ int i;
+ for(i=0; i<p->nModuleArg; i++){
+ if( i!=1 ) sqlite3DbFree(db, p->azModuleArg[i]);
+ }
+ sqlite3DbFree(db, p->azModuleArg);
+ }
+}
+
+/*
+** Add a new module argument to pTable->azModuleArg[].
+** The string is not copied - the pointer is stored. The
+** string will be freed automatically when the table is
+** deleted.
+*/
+static void addModuleArgument(Parse *pParse, Table *pTable, char *zArg){
+ sqlite3_int64 nBytes = sizeof(char *)*(2+pTable->nModuleArg);
+ char **azModuleArg;
+ sqlite3 *db = pParse->db;
+ if( pTable->nModuleArg+3>=db->aLimit[SQLITE_LIMIT_COLUMN] ){
+ sqlite3ErrorMsg(pParse, "too many columns on %s", pTable->zName);
+ }
+ azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes);
+ if( azModuleArg==0 ){
+ sqlite3DbFree(db, zArg);
+ }else{
+ int i = pTable->nModuleArg++;
+ azModuleArg[i] = zArg;
+ azModuleArg[i+1] = 0;
+ pTable->azModuleArg = azModuleArg;
+ }
+}
+
+/*
+** The parser calls this routine when it first sees a CREATE VIRTUAL TABLE
+** statement. The module name has been parsed, but the optional list
+** of parameters that follow the module name are still pending.
+*/
+SQLITE_PRIVATE void sqlite3VtabBeginParse(
+ Parse *pParse, /* Parsing context */
+ Token *pName1, /* Name of new table, or database name */
+ Token *pName2, /* Name of new table or NULL */
+ Token *pModuleName, /* Name of the module for the virtual table */
+ int ifNotExists /* No error if the table already exists */
+){
+ Table *pTable; /* The new virtual table */
+ sqlite3 *db; /* Database connection */
+
+ sqlite3StartTable(pParse, pName1, pName2, 0, 0, 1, ifNotExists);
+ pTable = pParse->pNewTable;
+ if( pTable==0 ) return;
+ assert( 0==pTable->pIndex );
+
+ db = pParse->db;
+
+ assert( pTable->nModuleArg==0 );
+ addModuleArgument(pParse, pTable, sqlite3NameFromToken(db, pModuleName));
+ addModuleArgument(pParse, pTable, 0);
+ addModuleArgument(pParse, pTable, sqlite3DbStrDup(db, pTable->zName));
+ assert( (pParse->sNameToken.z==pName2->z && pName2->z!=0)
+ || (pParse->sNameToken.z==pName1->z && pName2->z==0)
+ );
+ pParse->sNameToken.n = (int)(
+ &pModuleName->z[pModuleName->n] - pParse->sNameToken.z
+ );
+
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ /* Creating a virtual table invokes the authorization callback twice.
+ ** The first invocation, to obtain permission to INSERT a row into the
+ ** sqlite_schema table, has already been made by sqlite3StartTable().
+ ** The second call, to obtain permission to create the table, is made now.
+ */
+ if( pTable->azModuleArg ){
+ int iDb = sqlite3SchemaToIndex(db, pTable->pSchema);
+ assert( iDb>=0 ); /* The database the table is being created in */
+ sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName,
+ pTable->azModuleArg[0], pParse->db->aDb[iDb].zDbSName);
+ }
+#endif
+}
+
+/*
+** This routine takes the module argument that has been accumulating
+** in pParse->zArg[] and appends it to the list of arguments on the
+** virtual table currently under construction in pParse->pTable.
+*/
+static void addArgumentToVtab(Parse *pParse){
+ if( pParse->sArg.z && pParse->pNewTable ){
+ const char *z = (const char*)pParse->sArg.z;
+ int n = pParse->sArg.n;
+ sqlite3 *db = pParse->db;
+ addModuleArgument(pParse, pParse->pNewTable, sqlite3DbStrNDup(db, z, n));
+ }
+}
+
+/*
+** The parser calls this routine after the CREATE VIRTUAL TABLE statement
+** has been completely parsed.
+*/
+SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
+ Table *pTab = pParse->pNewTable; /* The table being constructed */
+ sqlite3 *db = pParse->db; /* The database connection */
+
+ if( pTab==0 ) return;
+ addArgumentToVtab(pParse);
+ pParse->sArg.z = 0;
+ if( pTab->nModuleArg<1 ) return;
+
+ /* If the CREATE VIRTUAL TABLE statement is being entered for the
+ ** first time (in other words if the virtual table is actually being
+ ** created now instead of just being read out of sqlite_schema) then
+ ** do additional initialization work and store the statement text
+ ** in the sqlite_schema table.
+ */
+ if( !db->init.busy ){
+ char *zStmt;
+ char *zWhere;
+ int iDb;
+ int iReg;
+ Vdbe *v;
+
+ sqlite3MayAbort(pParse);
+
+ /* Compute the complete text of the CREATE VIRTUAL TABLE statement */
+ if( pEnd ){
+ pParse->sNameToken.n = (int)(pEnd->z - pParse->sNameToken.z) + pEnd->n;
+ }
+ zStmt = sqlite3MPrintf(db, "CREATE VIRTUAL TABLE %T", &pParse->sNameToken);
+
+ /* A slot for the record has already been allocated in the
+ ** schema table. We just need to update that slot with all
+ ** the information we've collected.
+ **
+ ** The VM register number pParse->regRowid holds the rowid of an
+ ** entry in the sqlite_schema table tht was created for this vtab
+ ** by sqlite3StartTable().
+ */
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ sqlite3NestedParse(pParse,
+ "UPDATE %Q." DFLT_SCHEMA_TABLE " "
+ "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q "
+ "WHERE rowid=#%d",
+ db->aDb[iDb].zDbSName,
+ pTab->zName,
+ pTab->zName,
+ zStmt,
+ pParse->regRowid
+ );
+ v = sqlite3GetVdbe(pParse);
+ sqlite3ChangeCookie(pParse, iDb);
+
+ sqlite3VdbeAddOp0(v, OP_Expire);
+ zWhere = sqlite3MPrintf(db, "name=%Q AND sql=%Q", pTab->zName, zStmt);
+ sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere);
+ sqlite3DbFree(db, zStmt);
+
+ iReg = ++pParse->nMem;
+ sqlite3VdbeLoadString(v, iReg, pTab->zName);
+ sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg);
+ }
+
+ /* If we are rereading the sqlite_schema table create the in-memory
+ ** record of the table. The xConnect() method is not called until
+ ** the first time the virtual table is used in an SQL statement. This
+ ** allows a schema that contains virtual tables to be loaded before
+ ** the required virtual table implementations are registered. */
+ else {
+ Table *pOld;
+ Schema *pSchema = pTab->pSchema;
+ const char *zName = pTab->zName;
+ assert( sqlite3SchemaMutexHeld(db, 0, pSchema) );
+ pOld = sqlite3HashInsert(&pSchema->tblHash, zName, pTab);
+ if( pOld ){
+ sqlite3OomFault(db);
+ assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */
+ return;
+ }
+ pParse->pNewTable = 0;
+ }
+}
+
+/*
+** The parser calls this routine when it sees the first token
+** of an argument to the module name in a CREATE VIRTUAL TABLE statement.
+*/
+SQLITE_PRIVATE void sqlite3VtabArgInit(Parse *pParse){
+ addArgumentToVtab(pParse);
+ pParse->sArg.z = 0;
+ pParse->sArg.n = 0;
+}
+
+/*
+** The parser calls this routine for each token after the first token
+** in an argument to the module name in a CREATE VIRTUAL TABLE statement.
+*/
+SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse *pParse, Token *p){
+ Token *pArg = &pParse->sArg;
+ if( pArg->z==0 ){
+ pArg->z = p->z;
+ pArg->n = p->n;
+ }else{
+ assert(pArg->z <= p->z);
+ pArg->n = (int)(&p->z[p->n] - pArg->z);
+ }
+}
+
+/*
+** Invoke a virtual table constructor (either xCreate or xConnect). The
+** pointer to the function to invoke is passed as the fourth parameter
+** to this procedure.
+*/
+static int vtabCallConstructor(
+ sqlite3 *db,
+ Table *pTab,
+ Module *pMod,
+ int (*xConstruct)(sqlite3*,void*,int,const char*const*,sqlite3_vtab**,char**),
+ char **pzErr
+){
+ VtabCtx sCtx;
+ VTable *pVTable;
+ int rc;
+ const char *const*azArg = (const char *const*)pTab->azModuleArg;
+ int nArg = pTab->nModuleArg;
+ char *zErr = 0;
+ char *zModuleName;
+ int iDb;
+ VtabCtx *pCtx;
+
+ /* Check that the virtual-table is not already being initialized */
+ for(pCtx=db->pVtabCtx; pCtx; pCtx=pCtx->pPrior){
+ if( pCtx->pTab==pTab ){
+ *pzErr = sqlite3MPrintf(db,
+ "vtable constructor called recursively: %s", pTab->zName
+ );
+ return SQLITE_LOCKED;
+ }
+ }
+
+ zModuleName = sqlite3DbStrDup(db, pTab->zName);
+ if( !zModuleName ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ pVTable = sqlite3MallocZero(sizeof(VTable));
+ if( !pVTable ){
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, zModuleName);
+ return SQLITE_NOMEM_BKPT;
+ }
+ pVTable->db = db;
+ pVTable->pMod = pMod;
+ pVTable->eVtabRisk = SQLITE_VTABRISK_Normal;
+
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ pTab->azModuleArg[1] = db->aDb[iDb].zDbSName;
+
+ /* Invoke the virtual table constructor */
+ assert( &db->pVtabCtx );
+ assert( xConstruct );
+ sCtx.pTab = pTab;
+ sCtx.pVTable = pVTable;
+ sCtx.pPrior = db->pVtabCtx;
+ sCtx.bDeclared = 0;
+ db->pVtabCtx = &sCtx;
+ rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr);
+ db->pVtabCtx = sCtx.pPrior;
+ if( rc==SQLITE_NOMEM ) sqlite3OomFault(db);
+ assert( sCtx.pTab==pTab );
+
+ if( SQLITE_OK!=rc ){
+ if( zErr==0 ){
+ *pzErr = sqlite3MPrintf(db, "vtable constructor failed: %s", zModuleName);
+ }else {
+ *pzErr = sqlite3MPrintf(db, "%s", zErr);
+ sqlite3_free(zErr);
+ }
+ sqlite3DbFree(db, pVTable);
+ }else if( ALWAYS(pVTable->pVtab) ){
+ /* Justification of ALWAYS(): A correct vtab constructor must allocate
+ ** the sqlite3_vtab object if successful. */
+ memset(pVTable->pVtab, 0, sizeof(pVTable->pVtab[0]));
+ pVTable->pVtab->pModule = pMod->pModule;
+ pMod->nRefModule++;
+ pVTable->nRef = 1;
+ if( sCtx.bDeclared==0 ){
+ const char *zFormat = "vtable constructor did not declare schema: %s";
+ *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName);
+ sqlite3VtabUnlock(pVTable);
+ rc = SQLITE_ERROR;
+ }else{
+ int iCol;
+ u16 oooHidden = 0;
+ /* If everything went according to plan, link the new VTable structure
+ ** into the linked list headed by pTab->pVTable. Then loop through the
+ ** columns of the table to see if any of them contain the token "hidden".
+ ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from
+ ** the type string. */
+ pVTable->pNext = pTab->pVTable;
+ pTab->pVTable = pVTable;
+
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ char *zType = sqlite3ColumnType(&pTab->aCol[iCol], "");
+ int nType;
+ int i = 0;
+ nType = sqlite3Strlen30(zType);
+ for(i=0; i<nType; i++){
+ if( 0==sqlite3StrNICmp("hidden", &zType[i], 6)
+ && (i==0 || zType[i-1]==' ')
+ && (zType[i+6]=='\0' || zType[i+6]==' ')
+ ){
+ break;
+ }
+ }
+ if( i<nType ){
+ int j;
+ int nDel = 6 + (zType[i+6] ? 1 : 0);
+ for(j=i; (j+nDel)<=nType; j++){
+ zType[j] = zType[j+nDel];
+ }
+ if( zType[i]=='\0' && i>0 ){
+ assert(zType[i-1]==' ');
+ zType[i-1] = '\0';
+ }
+ pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN;
+ oooHidden = TF_OOOHidden;
+ }else{
+ pTab->tabFlags |= oooHidden;
+ }
+ }
+ }
+ }
+
+ sqlite3DbFree(db, zModuleName);
+ return rc;
+}
+
+/*
+** This function is invoked by the parser to call the xConnect() method
+** of the virtual table pTab. If an error occurs, an error code is returned
+** and an error left in pParse.
+**
+** This call is a no-op if table pTab is not a virtual table.
+*/
+SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){
+ sqlite3 *db = pParse->db;
+ const char *zMod;
+ Module *pMod;
+ int rc;
+
+ assert( pTab );
+ if( !IsVirtual(pTab) || sqlite3GetVTable(db, pTab) ){
+ return SQLITE_OK;
+ }
+
+ /* Locate the required virtual table module */
+ zMod = pTab->azModuleArg[0];
+ pMod = (Module*)sqlite3HashFind(&db->aModule, zMod);
+
+ if( !pMod ){
+ const char *zModule = pTab->azModuleArg[0];
+ sqlite3ErrorMsg(pParse, "no such module: %s", zModule);
+ rc = SQLITE_ERROR;
+ }else{
+ char *zErr = 0;
+ rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xConnect, &zErr);
+ if( rc!=SQLITE_OK ){
+ sqlite3ErrorMsg(pParse, "%s", zErr);
+ pParse->rc = rc;
+ }
+ sqlite3DbFree(db, zErr);
+ }
+
+ return rc;
+}
+/*
+** Grow the db->aVTrans[] array so that there is room for at least one
+** more v-table. Return SQLITE_NOMEM if a malloc fails, or SQLITE_OK otherwise.
+*/
+static int growVTrans(sqlite3 *db){
+ const int ARRAY_INCR = 5;
+
+ /* Grow the sqlite3.aVTrans array if required */
+ if( (db->nVTrans%ARRAY_INCR)==0 ){
+ VTable **aVTrans;
+ sqlite3_int64 nBytes = sizeof(sqlite3_vtab*)*
+ ((sqlite3_int64)db->nVTrans + ARRAY_INCR);
+ aVTrans = sqlite3DbRealloc(db, (void *)db->aVTrans, nBytes);
+ if( !aVTrans ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ memset(&aVTrans[db->nVTrans], 0, sizeof(sqlite3_vtab *)*ARRAY_INCR);
+ db->aVTrans = aVTrans;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Add the virtual table pVTab to the array sqlite3.aVTrans[]. Space should
+** have already been reserved using growVTrans().
+*/
+static void addToVTrans(sqlite3 *db, VTable *pVTab){
+ /* Add pVtab to the end of sqlite3.aVTrans */
+ db->aVTrans[db->nVTrans++] = pVTab;
+ sqlite3VtabLock(pVTab);
+}
+
+/*
+** This function is invoked by the vdbe to call the xCreate method
+** of the virtual table named zTab in database iDb.
+**
+** If an error occurs, *pzErr is set to point to an English language
+** description of the error and an SQLITE_XXX error code is returned.
+** In this case the caller must call sqlite3DbFree(db, ) on *pzErr.
+*/
+SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, char **pzErr){
+ int rc = SQLITE_OK;
+ Table *pTab;
+ Module *pMod;
+ const char *zMod;
+
+ pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName);
+ assert( pTab && IsVirtual(pTab) && !pTab->pVTable );
+
+ /* Locate the required virtual table module */
+ zMod = pTab->azModuleArg[0];
+ pMod = (Module*)sqlite3HashFind(&db->aModule, zMod);
+
+ /* If the module has been registered and includes a Create method,
+ ** invoke it now. If the module has not been registered, return an
+ ** error. Otherwise, do nothing.
+ */
+ if( pMod==0 || pMod->pModule->xCreate==0 || pMod->pModule->xDestroy==0 ){
+ *pzErr = sqlite3MPrintf(db, "no such module: %s", zMod);
+ rc = SQLITE_ERROR;
+ }else{
+ rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xCreate, pzErr);
+ }
+
+ /* Justification of ALWAYS(): The xConstructor method is required to
+ ** create a valid sqlite3_vtab if it returns SQLITE_OK. */
+ if( rc==SQLITE_OK && ALWAYS(sqlite3GetVTable(db, pTab)) ){
+ rc = growVTrans(db);
+ if( rc==SQLITE_OK ){
+ addToVTrans(db, sqlite3GetVTable(db, pTab));
+ }
+ }
+
+ return rc;
+}
+
+/*
+** This function is used to set the schema of a virtual table. It is only
+** valid to call this function from within the xCreate() or xConnect() of a
+** virtual table module.
+*/
+SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
+ VtabCtx *pCtx;
+ int rc = SQLITE_OK;
+ Table *pTab;
+ char *zErr = 0;
+ Parse sParse;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pCtx = db->pVtabCtx;
+ if( !pCtx || pCtx->bDeclared ){
+ sqlite3Error(db, SQLITE_MISUSE);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_MISUSE_BKPT;
+ }
+ pTab = pCtx->pTab;
+ assert( IsVirtual(pTab) );
+
+ memset(&sParse, 0, sizeof(sParse));
+ sParse.eParseMode = PARSE_MODE_DECLARE_VTAB;
+ sParse.db = db;
+ sParse.nQueryLoop = 1;
+ if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable, &zErr)
+ && sParse.pNewTable
+ && !db->mallocFailed
+ && !sParse.pNewTable->pSelect
+ && !IsVirtual(sParse.pNewTable)
+ ){
+ if( !pTab->aCol ){
+ Table *pNew = sParse.pNewTable;
+ Index *pIdx;
+ pTab->aCol = pNew->aCol;
+ pTab->nCol = pNew->nCol;
+ pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid);
+ pNew->nCol = 0;
+ pNew->aCol = 0;
+ assert( pTab->pIndex==0 );
+ assert( HasRowid(pNew) || sqlite3PrimaryKeyIndex(pNew)!=0 );
+ if( !HasRowid(pNew)
+ && pCtx->pVTable->pMod->pModule->xUpdate!=0
+ && sqlite3PrimaryKeyIndex(pNew)->nKeyCol!=1
+ ){
+ /* WITHOUT ROWID virtual tables must either be read-only (xUpdate==0)
+ ** or else must have a single-column PRIMARY KEY */
+ rc = SQLITE_ERROR;
+ }
+ pIdx = pNew->pIndex;
+ if( pIdx ){
+ assert( pIdx->pNext==0 );
+ pTab->pIndex = pIdx;
+ pNew->pIndex = 0;
+ pIdx->pTable = pTab;
+ }
+ }
+ pCtx->bDeclared = 1;
+ }else{
+ sqlite3ErrorWithMsg(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr);
+ sqlite3DbFree(db, zErr);
+ rc = SQLITE_ERROR;
+ }
+ sParse.eParseMode = PARSE_MODE_NORMAL;
+
+ if( sParse.pVdbe ){
+ sqlite3VdbeFinalize(sParse.pVdbe);
+ }
+ sqlite3DeleteTable(db, sParse.pNewTable);
+ sqlite3ParserReset(&sParse);
+
+ assert( (rc&0xff)==rc );
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** This function is invoked by the vdbe to call the xDestroy method
+** of the virtual table named zTab in database iDb. This occurs
+** when a DROP TABLE is mentioned.
+**
+** This call is a no-op if zTab is not a virtual table.
+*/
+SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab){
+ int rc = SQLITE_OK;
+ Table *pTab;
+
+ pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName);
+ if( pTab!=0 && ALWAYS(pTab->pVTable!=0) ){
+ VTable *p;
+ int (*xDestroy)(sqlite3_vtab *);
+ for(p=pTab->pVTable; p; p=p->pNext){
+ assert( p->pVtab );
+ if( p->pVtab->nRef>0 ){
+ return SQLITE_LOCKED;
+ }
+ }
+ p = vtabDisconnectAll(db, pTab);
+ xDestroy = p->pMod->pModule->xDestroy;
+ if( xDestroy==0 ) xDestroy = p->pMod->pModule->xDisconnect;
+ assert( xDestroy!=0 );
+ pTab->nTabRef++;
+ rc = xDestroy(p->pVtab);
+ /* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */
+ if( rc==SQLITE_OK ){
+ assert( pTab->pVTable==p && p->pNext==0 );
+ p->pVtab = 0;
+ pTab->pVTable = 0;
+ sqlite3VtabUnlock(p);
+ }
+ sqlite3DeleteTable(db, pTab);
+ }
+
+ return rc;
+}
+
+/*
+** This function invokes either the xRollback or xCommit method
+** of each of the virtual tables in the sqlite3.aVTrans array. The method
+** called is identified by the second argument, "offset", which is
+** the offset of the method to call in the sqlite3_module structure.
+**
+** The array is cleared after invoking the callbacks.
+*/
+static void callFinaliser(sqlite3 *db, int offset){
+ int i;
+ if( db->aVTrans ){
+ VTable **aVTrans = db->aVTrans;
+ db->aVTrans = 0;
+ for(i=0; i<db->nVTrans; i++){
+ VTable *pVTab = aVTrans[i];
+ sqlite3_vtab *p = pVTab->pVtab;
+ if( p ){
+ int (*x)(sqlite3_vtab *);
+ x = *(int (**)(sqlite3_vtab *))((char *)p->pModule + offset);
+ if( x ) x(p);
+ }
+ pVTab->iSavepoint = 0;
+ sqlite3VtabUnlock(pVTab);
+ }
+ sqlite3DbFree(db, aVTrans);
+ db->nVTrans = 0;
+ }
+}
+
+/*
+** Invoke the xSync method of all virtual tables in the sqlite3.aVTrans
+** array. Return the error code for the first error that occurs, or
+** SQLITE_OK if all xSync operations are successful.
+**
+** If an error message is available, leave it in p->zErrMsg.
+*/
+SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe *p){
+ int i;
+ int rc = SQLITE_OK;
+ VTable **aVTrans = db->aVTrans;
+
+ db->aVTrans = 0;
+ for(i=0; rc==SQLITE_OK && i<db->nVTrans; i++){
+ int (*x)(sqlite3_vtab *);
+ sqlite3_vtab *pVtab = aVTrans[i]->pVtab;
+ if( pVtab && (x = pVtab->pModule->xSync)!=0 ){
+ rc = x(pVtab);
+ sqlite3VtabImportErrmsg(p, pVtab);
+ }
+ }
+ db->aVTrans = aVTrans;
+ return rc;
+}
+
+/*
+** Invoke the xRollback method of all virtual tables in the
+** sqlite3.aVTrans array. Then clear the array itself.
+*/
+SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db){
+ callFinaliser(db, offsetof(sqlite3_module,xRollback));
+ return SQLITE_OK;
+}
+
+/*
+** Invoke the xCommit method of all virtual tables in the
+** sqlite3.aVTrans array. Then clear the array itself.
+*/
+SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db){
+ callFinaliser(db, offsetof(sqlite3_module,xCommit));
+ return SQLITE_OK;
+}
+
+/*
+** If the virtual table pVtab supports the transaction interface
+** (xBegin/xRollback/xCommit and optionally xSync) and a transaction is
+** not currently open, invoke the xBegin method now.
+**
+** If the xBegin call is successful, place the sqlite3_vtab pointer
+** in the sqlite3.aVTrans array.
+*/
+SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){
+ int rc = SQLITE_OK;
+ const sqlite3_module *pModule;
+
+ /* Special case: If db->aVTrans is NULL and db->nVTrans is greater
+ ** than zero, then this function is being called from within a
+ ** virtual module xSync() callback. It is illegal to write to
+ ** virtual module tables in this case, so return SQLITE_LOCKED.
+ */
+ if( sqlite3VtabInSync(db) ){
+ return SQLITE_LOCKED;
+ }
+ if( !pVTab ){
+ return SQLITE_OK;
+ }
+ pModule = pVTab->pVtab->pModule;
+
+ if( pModule->xBegin ){
+ int i;
+
+ /* If pVtab is already in the aVTrans array, return early */
+ for(i=0; i<db->nVTrans; i++){
+ if( db->aVTrans[i]==pVTab ){
+ return SQLITE_OK;
+ }
+ }
+
+ /* Invoke the xBegin method. If successful, add the vtab to the
+ ** sqlite3.aVTrans[] array. */
+ rc = growVTrans(db);
+ if( rc==SQLITE_OK ){
+ rc = pModule->xBegin(pVTab->pVtab);
+ if( rc==SQLITE_OK ){
+ int iSvpt = db->nStatement + db->nSavepoint;
+ addToVTrans(db, pVTab);
+ if( iSvpt && pModule->xSavepoint ){
+ pVTab->iSavepoint = iSvpt;
+ rc = pModule->xSavepoint(pVTab->pVtab, iSvpt-1);
+ }
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Invoke either the xSavepoint, xRollbackTo or xRelease method of all
+** virtual tables that currently have an open transaction. Pass iSavepoint
+** as the second argument to the virtual table method invoked.
+**
+** If op is SAVEPOINT_BEGIN, the xSavepoint method is invoked. If it is
+** SAVEPOINT_ROLLBACK, the xRollbackTo method. Otherwise, if op is
+** SAVEPOINT_RELEASE, then the xRelease method of each virtual table with
+** an open transaction is invoked.
+**
+** If any virtual table method returns an error code other than SQLITE_OK,
+** processing is abandoned and the error returned to the caller of this
+** function immediately. If all calls to virtual table methods are successful,
+** SQLITE_OK is returned.
+*/
+SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){
+ int rc = SQLITE_OK;
+
+ assert( op==SAVEPOINT_RELEASE||op==SAVEPOINT_ROLLBACK||op==SAVEPOINT_BEGIN );
+ assert( iSavepoint>=-1 );
+ if( db->aVTrans ){
+ int i;
+ for(i=0; rc==SQLITE_OK && i<db->nVTrans; i++){
+ VTable *pVTab = db->aVTrans[i];
+ const sqlite3_module *pMod = pVTab->pMod->pModule;
+ if( pVTab->pVtab && pMod->iVersion>=2 ){
+ int (*xMethod)(sqlite3_vtab *, int);
+ sqlite3VtabLock(pVTab);
+ switch( op ){
+ case SAVEPOINT_BEGIN:
+ xMethod = pMod->xSavepoint;
+ pVTab->iSavepoint = iSavepoint+1;
+ break;
+ case SAVEPOINT_ROLLBACK:
+ xMethod = pMod->xRollbackTo;
+ break;
+ default:
+ xMethod = pMod->xRelease;
+ break;
+ }
+ if( xMethod && pVTab->iSavepoint>iSavepoint ){
+ rc = xMethod(pVTab->pVtab, iSavepoint);
+ }
+ sqlite3VtabUnlock(pVTab);
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** The first parameter (pDef) is a function implementation. The
+** second parameter (pExpr) is the first argument to this function.
+** If pExpr is a column in a virtual table, then let the virtual
+** table implementation have an opportunity to overload the function.
+**
+** This routine is used to allow virtual table implementations to
+** overload MATCH, LIKE, GLOB, and REGEXP operators.
+**
+** Return either the pDef argument (indicating no change) or a
+** new FuncDef structure that is marked as ephemeral using the
+** SQLITE_FUNC_EPHEM flag.
+*/
+SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(
+ sqlite3 *db, /* Database connection for reporting malloc problems */
+ FuncDef *pDef, /* Function to possibly overload */
+ int nArg, /* Number of arguments to the function */
+ Expr *pExpr /* First argument to the function */
+){
+ Table *pTab;
+ sqlite3_vtab *pVtab;
+ sqlite3_module *pMod;
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value**) = 0;
+ void *pArg = 0;
+ FuncDef *pNew;
+ int rc = 0;
+
+ /* Check to see the left operand is a column in a virtual table */
+ if( NEVER(pExpr==0) ) return pDef;
+ if( pExpr->op!=TK_COLUMN ) return pDef;
+ pTab = pExpr->y.pTab;
+ if( pTab==0 ) return pDef;
+ if( !IsVirtual(pTab) ) return pDef;
+ pVtab = sqlite3GetVTable(db, pTab)->pVtab;
+ assert( pVtab!=0 );
+ assert( pVtab->pModule!=0 );
+ pMod = (sqlite3_module *)pVtab->pModule;
+ if( pMod->xFindFunction==0 ) return pDef;
+
+ /* Call the xFindFunction method on the virtual table implementation
+ ** to see if the implementation wants to overload this function.
+ **
+ ** Though undocumented, we have historically always invoked xFindFunction
+ ** with an all lower-case function name. Continue in this tradition to
+ ** avoid any chance of an incompatibility.
+ */
+#ifdef SQLITE_DEBUG
+ {
+ int i;
+ for(i=0; pDef->zName[i]; i++){
+ unsigned char x = (unsigned char)pDef->zName[i];
+ assert( x==sqlite3UpperToLower[x] );
+ }
+ }
+#endif
+ rc = pMod->xFindFunction(pVtab, nArg, pDef->zName, &xSFunc, &pArg);
+ if( rc==0 ){
+ return pDef;
+ }
+
+ /* Create a new ephemeral function definition for the overloaded
+ ** function */
+ pNew = sqlite3DbMallocZero(db, sizeof(*pNew)
+ + sqlite3Strlen30(pDef->zName) + 1);
+ if( pNew==0 ){
+ return pDef;
+ }
+ *pNew = *pDef;
+ pNew->zName = (const char*)&pNew[1];
+ memcpy((char*)&pNew[1], pDef->zName, sqlite3Strlen30(pDef->zName)+1);
+ pNew->xSFunc = xSFunc;
+ pNew->pUserData = pArg;
+ pNew->funcFlags |= SQLITE_FUNC_EPHEM;
+ return pNew;
+}
+
+/*
+** Make sure virtual table pTab is contained in the pParse->apVirtualLock[]
+** array so that an OP_VBegin will get generated for it. Add pTab to the
+** array if it is missing. If pTab is already in the array, this routine
+** is a no-op.
+*/
+SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){
+ Parse *pToplevel = sqlite3ParseToplevel(pParse);
+ int i, n;
+ Table **apVtabLock;
+
+ assert( IsVirtual(pTab) );
+ for(i=0; i<pToplevel->nVtabLock; i++){
+ if( pTab==pToplevel->apVtabLock[i] ) return;
+ }
+ n = (pToplevel->nVtabLock+1)*sizeof(pToplevel->apVtabLock[0]);
+ apVtabLock = sqlite3Realloc(pToplevel->apVtabLock, n);
+ if( apVtabLock ){
+ pToplevel->apVtabLock = apVtabLock;
+ pToplevel->apVtabLock[pToplevel->nVtabLock++] = pTab;
+ }else{
+ sqlite3OomFault(pToplevel->db);
+ }
+}
+
+/*
+** Check to see if virtual table module pMod can be have an eponymous
+** virtual table instance. If it can, create one if one does not already
+** exist. Return non-zero if the eponymous virtual table instance exists
+** when this routine returns, and return zero if it does not exist.
+**
+** An eponymous virtual table instance is one that is named after its
+** module, and more importantly, does not require a CREATE VIRTUAL TABLE
+** statement in order to come into existance. Eponymous virtual table
+** instances always exist. They cannot be DROP-ed.
+**
+** Any virtual table module for which xConnect and xCreate are the same
+** method can have an eponymous virtual table instance.
+*/
+SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){
+ const sqlite3_module *pModule = pMod->pModule;
+ Table *pTab;
+ char *zErr = 0;
+ int rc;
+ sqlite3 *db = pParse->db;
+ if( pMod->pEpoTab ) return 1;
+ if( pModule->xCreate!=0 && pModule->xCreate!=pModule->xConnect ) return 0;
+ pTab = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTab==0 ) return 0;
+ pTab->zName = sqlite3DbStrDup(db, pMod->zName);
+ if( pTab->zName==0 ){
+ sqlite3DbFree(db, pTab);
+ return 0;
+ }
+ pMod->pEpoTab = pTab;
+ pTab->nTabRef = 1;
+ pTab->pSchema = db->aDb[0].pSchema;
+ assert( pTab->nModuleArg==0 );
+ pTab->iPKey = -1;
+ addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName));
+ addModuleArgument(pParse, pTab, 0);
+ addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName));
+ rc = vtabCallConstructor(db, pTab, pMod, pModule->xConnect, &zErr);
+ if( rc ){
+ sqlite3ErrorMsg(pParse, "%s", zErr);
+ sqlite3DbFree(db, zErr);
+ sqlite3VtabEponymousTableClear(db, pMod);
+ return 0;
+ }
+ return 1;
+}
+
+/*
+** Erase the eponymous virtual table instance associated with
+** virtual table module pMod, if it exists.
+*/
+SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3 *db, Module *pMod){
+ Table *pTab = pMod->pEpoTab;
+ if( pTab!=0 ){
+ /* Mark the table as Ephemeral prior to deleting it, so that the
+ ** sqlite3DeleteTable() routine will know that it is not stored in
+ ** the schema. */
+ pTab->tabFlags |= TF_Ephemeral;
+ sqlite3DeleteTable(db, pTab);
+ pMod->pEpoTab = 0;
+ }
+}
+
+/*
+** Return the ON CONFLICT resolution mode in effect for the virtual
+** table update operation currently in progress.
+**
+** The results of this routine are undefined unless it is called from
+** within an xUpdate method.
+*/
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *db){
+ static const unsigned char aMap[] = {
+ SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE
+ };
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ assert( OE_Rollback==1 && OE_Abort==2 && OE_Fail==3 );
+ assert( OE_Ignore==4 && OE_Replace==5 );
+ assert( db->vtabOnConflict>=1 && db->vtabOnConflict<=5 );
+ return (int)aMap[db->vtabOnConflict-1];
+}
+
+/*
+** Call from within the xCreate() or xConnect() methods to provide
+** the SQLite core with additional information about the behavior
+** of the virtual table being implemented.
+*/
+SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){
+ va_list ap;
+ int rc = SQLITE_OK;
+ VtabCtx *p;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ p = db->pVtabCtx;
+ if( !p ){
+ rc = SQLITE_MISUSE_BKPT;
+ }else{
+ assert( p->pTab==0 || IsVirtual(p->pTab) );
+ va_start(ap, op);
+ switch( op ){
+ case SQLITE_VTAB_CONSTRAINT_SUPPORT: {
+ p->pVTable->bConstraint = (u8)va_arg(ap, int);
+ break;
+ }
+ case SQLITE_VTAB_INNOCUOUS: {
+ p->pVTable->eVtabRisk = SQLITE_VTABRISK_Low;
+ break;
+ }
+ case SQLITE_VTAB_DIRECTONLY: {
+ p->pVTable->eVtabRisk = SQLITE_VTABRISK_High;
+ break;
+ }
+ default: {
+ rc = SQLITE_MISUSE_BKPT;
+ break;
+ }
+ }
+ va_end(ap);
+ }
+
+ if( rc!=SQLITE_OK ) sqlite3Error(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/************** End of vtab.c ************************************************/
+/************** Begin file wherecode.c ***************************************/
+/*
+** 2015-06-06
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This module contains C code that generates VDBE code used to process
+** the WHERE clause of SQL statements.
+**
+** This file was split off from where.c on 2015-06-06 in order to reduce the
+** size of where.c and make it easier to edit. This file contains the routines
+** that actually generate the bulk of the WHERE loop code. The original where.c
+** file retains the code that does query planning and analysis.
+*/
+/* #include "sqliteInt.h" */
+/************** Include whereInt.h in the middle of wherecode.c **************/
+/************** Begin file whereInt.h ****************************************/
+/*
+** 2013-11-12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains structure and macro definitions for the query
+** planner logic in "where.c". These definitions are broken out into
+** a separate source file for easier editing.
+*/
+#ifndef SQLITE_WHEREINT_H
+#define SQLITE_WHEREINT_H
+
+/*
+** Trace output macros
+*/
+#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG)
+/***/ extern int sqlite3WhereTrace;
+#endif
+#if defined(SQLITE_DEBUG) \
+ && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE))
+# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X
+# define WHERETRACE_ENABLED 1
+#else
+# define WHERETRACE(K,X)
+#endif
+
+/* Forward references
+*/
+typedef struct WhereClause WhereClause;
+typedef struct WhereMaskSet WhereMaskSet;
+typedef struct WhereOrInfo WhereOrInfo;
+typedef struct WhereAndInfo WhereAndInfo;
+typedef struct WhereLevel WhereLevel;
+typedef struct WhereLoop WhereLoop;
+typedef struct WherePath WherePath;
+typedef struct WhereTerm WhereTerm;
+typedef struct WhereLoopBuilder WhereLoopBuilder;
+typedef struct WhereScan WhereScan;
+typedef struct WhereOrCost WhereOrCost;
+typedef struct WhereOrSet WhereOrSet;
+
+/*
+** This object contains information needed to implement a single nested
+** loop in WHERE clause.
+**
+** Contrast this object with WhereLoop. This object describes the
+** implementation of the loop. WhereLoop describes the algorithm.
+** This object contains a pointer to the WhereLoop algorithm as one of
+** its elements.
+**
+** The WhereInfo object contains a single instance of this object for
+** each term in the FROM clause (which is to say, for each of the
+** nested loops as implemented). The order of WhereLevel objects determines
+** the loop nested order, with WhereInfo.a[0] being the outer loop and
+** WhereInfo.a[WhereInfo.nLevel-1] being the inner loop.
+*/
+struct WhereLevel {
+ int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */
+ int iTabCur; /* The VDBE cursor used to access the table */
+ int iIdxCur; /* The VDBE cursor used to access pIdx */
+ int addrBrk; /* Jump here to break out of the loop */
+ int addrNxt; /* Jump here to start the next IN combination */
+ int addrSkip; /* Jump here for next iteration of skip-scan */
+ int addrCont; /* Jump here to continue with the next loop cycle */
+ int addrFirst; /* First instruction of interior of the loop */
+ int addrBody; /* Beginning of the body of this loop */
+ int regBignull; /* big-null flag reg. True if a NULL-scan is needed */
+ int addrBignull; /* Jump here for next part of big-null scan */
+#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ u32 iLikeRepCntr; /* LIKE range processing counter register (times 2) */
+ int addrLikeRep; /* LIKE range processing address */
+#endif
+ u8 iFrom; /* Which entry in the FROM clause */
+ u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */
+ int p1, p2; /* Operands of the opcode used to end the loop */
+ union { /* Information that depends on pWLoop->wsFlags */
+ struct {
+ int nIn; /* Number of entries in aInLoop[] */
+ struct InLoop {
+ int iCur; /* The VDBE cursor used by this IN operator */
+ int addrInTop; /* Top of the IN loop */
+ int iBase; /* Base register of multi-key index record */
+ int nPrefix; /* Number of prior entires in the key */
+ u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */
+ } *aInLoop; /* Information about each nested IN operator */
+ } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */
+ Index *pCovidx; /* Possible covering index for WHERE_MULTI_OR */
+ } u;
+ struct WhereLoop *pWLoop; /* The selected WhereLoop object */
+ Bitmask notReady; /* FROM entries not usable at this level */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrVisit; /* Address at which row is visited */
+#endif
+};
+
+/*
+** Each instance of this object represents an algorithm for evaluating one
+** term of a join. Every term of the FROM clause will have at least
+** one corresponding WhereLoop object (unless INDEXED BY constraints
+** prevent a query solution - which is an error) and many terms of the
+** FROM clause will have multiple WhereLoop objects, each describing a
+** potential way of implementing that FROM-clause term, together with
+** dependencies and cost estimates for using the chosen algorithm.
+**
+** Query planning consists of building up a collection of these WhereLoop
+** objects, then computing a particular sequence of WhereLoop objects, with
+** one WhereLoop object per FROM clause term, that satisfy all dependencies
+** and that minimize the overall cost.
+*/
+struct WhereLoop {
+ Bitmask prereq; /* Bitmask of other loops that must run first */
+ Bitmask maskSelf; /* Bitmask identifying table iTab */
+#ifdef SQLITE_DEBUG
+ char cId; /* Symbolic ID of this loop for debugging use */
+#endif
+ u8 iTab; /* Position in FROM clause of table for this loop */
+ u8 iSortIdx; /* Sorting index number. 0==None */
+ LogEst rSetup; /* One-time setup cost (ex: create transient index) */
+ LogEst rRun; /* Cost of running each loop */
+ LogEst nOut; /* Estimated number of output rows */
+ union {
+ struct { /* Information for internal btree tables */
+ u16 nEq; /* Number of equality constraints */
+ u16 nBtm; /* Size of BTM vector */
+ u16 nTop; /* Size of TOP vector */
+ u16 nDistinctCol; /* Index columns used to sort for DISTINCT */
+ Index *pIndex; /* Index used, or NULL */
+ } btree;
+ struct { /* Information for virtual tables */
+ int idxNum; /* Index number */
+ u8 needFree; /* True if sqlite3_free(idxStr) is needed */
+ i8 isOrdered; /* True if satisfies ORDER BY */
+ u16 omitMask; /* Terms that may be omitted */
+ char *idxStr; /* Index identifier string */
+ } vtab;
+ } u;
+ u32 wsFlags; /* WHERE_* flags describing the plan */
+ u16 nLTerm; /* Number of entries in aLTerm[] */
+ u16 nSkip; /* Number of NULL aLTerm[] entries */
+ /**** whereLoopXfer() copies fields above ***********************/
+# define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot)
+ u16 nLSlot; /* Number of slots allocated for aLTerm[] */
+ WhereTerm **aLTerm; /* WhereTerms used */
+ WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */
+ WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */
+};
+
+/* This object holds the prerequisites and the cost of running a
+** subquery on one operand of an OR operator in the WHERE clause.
+** See WhereOrSet for additional information
+*/
+struct WhereOrCost {
+ Bitmask prereq; /* Prerequisites */
+ LogEst rRun; /* Cost of running this subquery */
+ LogEst nOut; /* Number of outputs for this subquery */
+};
+
+/* The WhereOrSet object holds a set of possible WhereOrCosts that
+** correspond to the subquery(s) of OR-clause processing. Only the
+** best N_OR_COST elements are retained.
+*/
+#define N_OR_COST 3
+struct WhereOrSet {
+ u16 n; /* Number of valid a[] entries */
+ WhereOrCost a[N_OR_COST]; /* Set of best costs */
+};
+
+/*
+** Each instance of this object holds a sequence of WhereLoop objects
+** that implement some or all of a query plan.
+**
+** Think of each WhereLoop object as a node in a graph with arcs
+** showing dependencies and costs for travelling between nodes. (That is
+** not a completely accurate description because WhereLoop costs are a
+** vector, not a scalar, and because dependencies are many-to-one, not
+** one-to-one as are graph nodes. But it is a useful visualization aid.)
+** Then a WherePath object is a path through the graph that visits some
+** or all of the WhereLoop objects once.
+**
+** The "solver" works by creating the N best WherePath objects of length
+** 1. Then using those as a basis to compute the N best WherePath objects
+** of length 2. And so forth until the length of WherePaths equals the
+** number of nodes in the FROM clause. The best (lowest cost) WherePath
+** at the end is the chosen query plan.
+*/
+struct WherePath {
+ Bitmask maskLoop; /* Bitmask of all WhereLoop objects in this path */
+ Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */
+ LogEst nRow; /* Estimated number of rows generated by this path */
+ LogEst rCost; /* Total cost of this path */
+ LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */
+ i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */
+ WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */
+};
+
+/*
+** The query generator uses an array of instances of this structure to
+** help it analyze the subexpressions of the WHERE clause. Each WHERE
+** clause subexpression is separated from the others by AND operators,
+** usually, or sometimes subexpressions separated by OR.
+**
+** All WhereTerms are collected into a single WhereClause structure.
+** The following identity holds:
+**
+** WhereTerm.pWC->a[WhereTerm.idx] == WhereTerm
+**
+** When a term is of the form:
+**
+** X <op> <expr>
+**
+** where X is a column name and <op> is one of certain operators,
+** then WhereTerm.leftCursor and WhereTerm.u.leftColumn record the
+** cursor number and column number for X. WhereTerm.eOperator records
+** the <op> using a bitmask encoding defined by WO_xxx below. The
+** use of a bitmask encoding for the operator allows us to search
+** quickly for terms that match any of several different operators.
+**
+** A WhereTerm might also be two or more subterms connected by OR:
+**
+** (t1.X <op> <expr>) OR (t1.Y <op> <expr>) OR ....
+**
+** In this second case, wtFlag has the TERM_ORINFO bit set and eOperator==WO_OR
+** and the WhereTerm.u.pOrInfo field points to auxiliary information that
+** is collected about the OR clause.
+**
+** If a term in the WHERE clause does not match either of the two previous
+** categories, then eOperator==0. The WhereTerm.pExpr field is still set
+** to the original subexpression content and wtFlags is set up appropriately
+** but no other fields in the WhereTerm object are meaningful.
+**
+** When eOperator!=0, prereqRight and prereqAll record sets of cursor numbers,
+** but they do so indirectly. A single WhereMaskSet structure translates
+** cursor number into bits and the translated bit is stored in the prereq
+** fields. The translation is used in order to maximize the number of
+** bits that will fit in a Bitmask. The VDBE cursor numbers might be
+** spread out over the non-negative integers. For example, the cursor
+** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The WhereMaskSet
+** translates these sparse cursor numbers into consecutive integers
+** beginning with 0 in order to make the best possible use of the available
+** bits in the Bitmask. So, in the example above, the cursor numbers
+** would be mapped into integers 0 through 7.
+**
+** The number of terms in a join is limited by the number of bits
+** in prereqRight and prereqAll. The default is 64 bits, hence SQLite
+** is only able to process joins with 64 or fewer tables.
+*/
+struct WhereTerm {
+ Expr *pExpr; /* Pointer to the subexpression that is this term */
+ WhereClause *pWC; /* The clause this term is part of */
+ LogEst truthProb; /* Probability of truth for this expression */
+ u16 wtFlags; /* TERM_xxx bit flags. See below */
+ u16 eOperator; /* A WO_xx value describing <op> */
+ u8 nChild; /* Number of children that must disable us */
+ u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */
+ int iParent; /* Disable pWC->a[iParent] when this term disabled */
+ int leftCursor; /* Cursor number of X in "X <op> <expr>" */
+ int iField; /* Field in (?,?,?) IN (SELECT...) vector */
+ union {
+ int leftColumn; /* Column number of X in "X <op> <expr>" */
+ WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */
+ WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */
+ } u;
+ Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */
+ Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */
+};
+
+/*
+** Allowed values of WhereTerm.wtFlags
+*/
+#define TERM_DYNAMIC 0x0001 /* Need to call sqlite3ExprDelete(db, pExpr) */
+#define TERM_VIRTUAL 0x0002 /* Added by the optimizer. Do not code */
+#define TERM_CODED 0x0004 /* This term is already coded */
+#define TERM_COPIED 0x0008 /* Has a child */
+#define TERM_ORINFO 0x0010 /* Need to free the WhereTerm.u.pOrInfo object */
+#define TERM_ANDINFO 0x0020 /* Need to free the WhereTerm.u.pAndInfo obj */
+#define TERM_OR_OK 0x0040 /* Used during OR-clause processing */
+#ifdef SQLITE_ENABLE_STAT4
+# define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */
+#else
+# define TERM_VNULL 0x0000 /* Disabled if not using stat4 */
+#endif
+#define TERM_LIKEOPT 0x0100 /* Virtual terms from the LIKE optimization */
+#define TERM_LIKECOND 0x0200 /* Conditionally this LIKE operator term */
+#define TERM_LIKE 0x0400 /* The original LIKE operator */
+#define TERM_IS 0x0800 /* Term.pExpr is an IS operator */
+#define TERM_VARSELECT 0x1000 /* Term.pExpr contains a correlated sub-query */
+#define TERM_HEURTRUTH 0x2000 /* Heuristic truthProb used */
+#ifdef SQLITE_ENABLE_STAT4
+# define TERM_HIGHTRUTH 0x4000 /* Term excludes few rows */
+#else
+# define TERM_HIGHTRUTH 0 /* Only used with STAT4 */
+#endif
+
+/*
+** An instance of the WhereScan object is used as an iterator for locating
+** terms in the WHERE clause that are useful to the query planner.
+*/
+struct WhereScan {
+ WhereClause *pOrigWC; /* Original, innermost WhereClause */
+ WhereClause *pWC; /* WhereClause currently being scanned */
+ const char *zCollName; /* Required collating sequence, if not NULL */
+ Expr *pIdxExpr; /* Search for this index expression */
+ char idxaff; /* Must match this affinity, if zCollName!=NULL */
+ unsigned char nEquiv; /* Number of entries in aEquiv[] */
+ unsigned char iEquiv; /* Next unused slot in aEquiv[] */
+ u32 opMask; /* Acceptable operators */
+ int k; /* Resume scanning at this->pWC->a[this->k] */
+ int aiCur[11]; /* Cursors in the equivalence class */
+ i16 aiColumn[11]; /* Corresponding column number in the eq-class */
+};
+
+/*
+** An instance of the following structure holds all information about a
+** WHERE clause. Mostly this is a container for one or more WhereTerms.
+**
+** Explanation of pOuter: For a WHERE clause of the form
+**
+** a AND ((b AND c) OR (d AND e)) AND f
+**
+** There are separate WhereClause objects for the whole clause and for
+** the subclauses "(b AND c)" and "(d AND e)". The pOuter field of the
+** subclauses points to the WhereClause object for the whole clause.
+*/
+struct WhereClause {
+ WhereInfo *pWInfo; /* WHERE clause processing context */
+ WhereClause *pOuter; /* Outer conjunction */
+ u8 op; /* Split operator. TK_AND or TK_OR */
+ u8 hasOr; /* True if any a[].eOperator is WO_OR */
+ int nTerm; /* Number of terms */
+ int nSlot; /* Number of entries in a[] */
+ WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */
+#if defined(SQLITE_SMALL_STACK)
+ WhereTerm aStatic[1]; /* Initial static space for a[] */
+#else
+ WhereTerm aStatic[8]; /* Initial static space for a[] */
+#endif
+};
+
+/*
+** A WhereTerm with eOperator==WO_OR has its u.pOrInfo pointer set to
+** a dynamically allocated instance of the following structure.
+*/
+struct WhereOrInfo {
+ WhereClause wc; /* Decomposition into subterms */
+ Bitmask indexable; /* Bitmask of all indexable tables in the clause */
+};
+
+/*
+** A WhereTerm with eOperator==WO_AND has its u.pAndInfo pointer set to
+** a dynamically allocated instance of the following structure.
+*/
+struct WhereAndInfo {
+ WhereClause wc; /* The subexpression broken out */
+};
+
+/*
+** An instance of the following structure keeps track of a mapping
+** between VDBE cursor numbers and bits of the bitmasks in WhereTerm.
+**
+** The VDBE cursor numbers are small integers contained in
+** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE
+** clause, the cursor numbers might not begin with 0 and they might
+** contain gaps in the numbering sequence. But we want to make maximum
+** use of the bits in our bitmasks. This structure provides a mapping
+** from the sparse cursor numbers into consecutive integers beginning
+** with 0.
+**
+** If WhereMaskSet.ix[A]==B it means that The A-th bit of a Bitmask
+** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<<A.
+**
+** For example, if the WHERE clause expression used these VDBE
+** cursors: 4, 5, 8, 29, 57, 73. Then the WhereMaskSet structure
+** would map those cursor numbers into bits 0 through 5.
+**
+** Note that the mapping is not necessarily ordered. In the example
+** above, the mapping might go like this: 4->3, 5->1, 8->2, 29->0,
+** 57->5, 73->4. Or one of 719 other combinations might be used. It
+** does not really matter. What is important is that sparse cursor
+** numbers all get mapped into bit numbers that begin with 0 and contain
+** no gaps.
+*/
+struct WhereMaskSet {
+ int bVarSelect; /* Used by sqlite3WhereExprUsage() */
+ int n; /* Number of assigned cursor values */
+ int ix[BMS]; /* Cursor assigned to each bit */
+};
+
+/*
+** Initialize a WhereMaskSet object
+*/
+#define initMaskSet(P) (P)->n=0
+
+/*
+** This object is a convenience wrapper holding all information needed
+** to construct WhereLoop objects for a particular query.
+*/
+struct WhereLoopBuilder {
+ WhereInfo *pWInfo; /* Information about this WHERE */
+ WhereClause *pWC; /* WHERE clause terms */
+ ExprList *pOrderBy; /* ORDER BY clause */
+ WhereLoop *pNew; /* Template WhereLoop */
+ WhereOrSet *pOrSet; /* Record best loops here, if not NULL */
+#ifdef SQLITE_ENABLE_STAT4
+ UnpackedRecord *pRec; /* Probe for stat4 (if required) */
+ int nRecValid; /* Number of valid fields currently in pRec */
+#endif
+ unsigned char bldFlags1; /* First set of SQLITE_BLDF_* flags */
+ unsigned char bldFlags2; /* Second set of SQLITE_BLDF_* flags */
+ unsigned int iPlanLimit; /* Search limiter */
+};
+
+/* Allowed values for WhereLoopBuider.bldFlags */
+#define SQLITE_BLDF1_INDEXED 0x0001 /* An index is used */
+#define SQLITE_BLDF1_UNIQUE 0x0002 /* All keys of a UNIQUE index used */
+
+#define SQLITE_BLDF2_2NDPASS 0x0004 /* Second builder pass needed */
+
+/* The WhereLoopBuilder.iPlanLimit is used to limit the number of
+** index+constraint combinations the query planner will consider for a
+** particular query. If this parameter is unlimited, then certain
+** pathological queries can spend excess time in the sqlite3WhereBegin()
+** routine. The limit is high enough that is should not impact real-world
+** queries.
+**
+** SQLITE_QUERY_PLANNER_LIMIT is the baseline limit. The limit is
+** increased by SQLITE_QUERY_PLANNER_LIMIT_INCR before each term of the FROM
+** clause is processed, so that every table in a join is guaranteed to be
+** able to propose a some index+constraint combinations even if the initial
+** baseline limit was exhausted by prior tables of the join.
+*/
+#ifndef SQLITE_QUERY_PLANNER_LIMIT
+# define SQLITE_QUERY_PLANNER_LIMIT 20000
+#endif
+#ifndef SQLITE_QUERY_PLANNER_LIMIT_INCR
+# define SQLITE_QUERY_PLANNER_LIMIT_INCR 1000
+#endif
+
+/*
+** Each instance of this object records a change to a single node
+** in an expression tree to cause that node to point to a column
+** of an index rather than an expression or a virtual column. All
+** such transformations need to be undone at the end of WHERE clause
+** processing.
+*/
+typedef struct WhereExprMod WhereExprMod;
+struct WhereExprMod {
+ WhereExprMod *pNext; /* Next translation on a list of them all */
+ Expr *pExpr; /* The Expr node that was transformed */
+ Expr orig; /* Original value of the Expr node */
+};
+
+/*
+** The WHERE clause processing routine has two halves. The
+** first part does the start of the WHERE loop and the second
+** half does the tail of the WHERE loop. An instance of
+** this structure is returned by the first half and passed
+** into the second half to give some continuity.
+**
+** An instance of this object holds the complete state of the query
+** planner.
+*/
+struct WhereInfo {
+ Parse *pParse; /* Parsing and code generating context */
+ SrcList *pTabList; /* List of tables in the join */
+ ExprList *pOrderBy; /* The ORDER BY clause or NULL */
+ ExprList *pResultSet; /* Result set of the query */
+ Expr *pWhere; /* The complete WHERE clause */
+ int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */
+ int iContinue; /* Jump here to continue with next record */
+ int iBreak; /* Jump here to break out of the loop */
+ int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */
+ u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */
+ LogEst iLimit; /* LIMIT if wctrlFlags has WHERE_USE_LIMIT */
+ u8 nLevel; /* Number of nested loop */
+ i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */
+ u8 eOnePass; /* ONEPASS_OFF, or _SINGLE, or _MULTI */
+ u8 eDistinct; /* One of the WHERE_DISTINCT_* values */
+ unsigned bDeferredSeek :1; /* Uses OP_DeferredSeek */
+ unsigned untestedTerms :1; /* Not all WHERE terms resolved by outer loop */
+ unsigned bOrderedInnerLoop:1;/* True if only the inner-most loop is ordered */
+ unsigned sorted :1; /* True if really sorted (not just grouped) */
+ LogEst nRowOut; /* Estimated number of output rows */
+ int iTop; /* The very beginning of the WHERE loop */
+ WhereLoop *pLoops; /* List of all WhereLoop objects */
+ WhereExprMod *pExprMods; /* Expression modifications */
+ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */
+ WhereClause sWC; /* Decomposition of the WHERE clause */
+ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */
+ WhereLevel a[1]; /* Information about each nest loop in WHERE */
+};
+
+/*
+** Private interfaces - callable only by other where.c routines.
+**
+** where.c:
+*/
+SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int);
+#ifdef WHERETRACE_ENABLED
+SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC);
+SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm);
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC);
+#endif
+SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
+ WhereClause *pWC, /* The WHERE clause to be searched */
+ int iCur, /* Cursor number of LHS */
+ int iColumn, /* Column number of LHS */
+ Bitmask notReady, /* RHS must not overlap with this mask */
+ u32 op, /* Mask of WO_xx values describing operator */
+ Index *pIdx /* Must be compatible with this index, if not NULL */
+);
+
+/* wherecode.c: */
+#ifndef SQLITE_OMIT_EXPLAIN
+SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
+ Parse *pParse, /* Parse context */
+ SrcList *pTabList, /* Table list this loop refers to */
+ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
+ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
+);
+#else
+# define sqlite3WhereExplainOneScan(u,v,w,x) 0
+#endif /* SQLITE_OMIT_EXPLAIN */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
+ Vdbe *v, /* Vdbe to add scanstatus entry to */
+ SrcList *pSrclist, /* FROM clause pLvl reads data from */
+ WhereLevel *pLvl, /* Level to add scanstatus() entry for */
+ int addrExplain /* Address of OP_Explain (or 0) */
+);
+#else
+# define sqlite3WhereAddScanStatus(a, b, c, d) ((void)d)
+#endif
+SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
+ Parse *pParse, /* Parsing context */
+ Vdbe *v, /* Prepared statement under construction */
+ WhereInfo *pWInfo, /* Complete information about the WHERE clause */
+ int iLevel, /* Which level of pWInfo->a[] should be coded */
+ WhereLevel *pLevel, /* The current level pointer */
+ Bitmask notReady /* Which tables are currently available */
+);
+
+/* whereexpr.c: */
+SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*);
+SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*);
+SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8);
+SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*);
+SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet*, Expr*);
+SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*);
+SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*);
+SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*);
+
+
+
+
+
+/*
+** Bitmasks for the operators on WhereTerm objects. These are all
+** operators that are of interest to the query planner. An
+** OR-ed combination of these values can be used when searching for
+** particular WhereTerms within a WhereClause.
+**
+** Value constraints:
+** WO_EQ == SQLITE_INDEX_CONSTRAINT_EQ
+** WO_LT == SQLITE_INDEX_CONSTRAINT_LT
+** WO_LE == SQLITE_INDEX_CONSTRAINT_LE
+** WO_GT == SQLITE_INDEX_CONSTRAINT_GT
+** WO_GE == SQLITE_INDEX_CONSTRAINT_GE
+*/
+#define WO_IN 0x0001
+#define WO_EQ 0x0002
+#define WO_LT (WO_EQ<<(TK_LT-TK_EQ))
+#define WO_LE (WO_EQ<<(TK_LE-TK_EQ))
+#define WO_GT (WO_EQ<<(TK_GT-TK_EQ))
+#define WO_GE (WO_EQ<<(TK_GE-TK_EQ))
+#define WO_AUX 0x0040 /* Op useful to virtual tables only */
+#define WO_IS 0x0080
+#define WO_ISNULL 0x0100
+#define WO_OR 0x0200 /* Two or more OR-connected terms */
+#define WO_AND 0x0400 /* Two or more AND-connected terms */
+#define WO_EQUIV 0x0800 /* Of the form A==B, both columns */
+#define WO_NOOP 0x1000 /* This term does not restrict search space */
+
+#define WO_ALL 0x1fff /* Mask of all possible WO_* values */
+#define WO_SINGLE 0x01ff /* Mask of all non-compound WO_* values */
+
+/*
+** These are definitions of bits in the WhereLoop.wsFlags field.
+** The particular combination of bits in each WhereLoop help to
+** determine the algorithm that WhereLoop represents.
+*/
+#define WHERE_COLUMN_EQ 0x00000001 /* x=EXPR */
+#define WHERE_COLUMN_RANGE 0x00000002 /* x<EXPR and/or x>EXPR */
+#define WHERE_COLUMN_IN 0x00000004 /* x IN (...) */
+#define WHERE_COLUMN_NULL 0x00000008 /* x IS NULL */
+#define WHERE_CONSTRAINT 0x0000000f /* Any of the WHERE_COLUMN_xxx values */
+#define WHERE_TOP_LIMIT 0x00000010 /* x<EXPR or x<=EXPR constraint */
+#define WHERE_BTM_LIMIT 0x00000020 /* x>EXPR or x>=EXPR constraint */
+#define WHERE_BOTH_LIMIT 0x00000030 /* Both x>EXPR and x<EXPR */
+#define WHERE_IDX_ONLY 0x00000040 /* Use index only - omit table */
+#define WHERE_IPK 0x00000100 /* x is the INTEGER PRIMARY KEY */
+#define WHERE_INDEXED 0x00000200 /* WhereLoop.u.btree.pIndex is valid */
+#define WHERE_VIRTUALTABLE 0x00000400 /* WhereLoop.u.vtab is valid */
+#define WHERE_IN_ABLE 0x00000800 /* Able to support an IN operator */
+#define WHERE_ONEROW 0x00001000 /* Selects no more than one row */
+#define WHERE_MULTI_OR 0x00002000 /* OR using multiple indices */
+#define WHERE_AUTO_INDEX 0x00004000 /* Uses an ephemeral index */
+#define WHERE_SKIPSCAN 0x00008000 /* Uses the skip-scan algorithm */
+#define WHERE_UNQ_WANTED 0x00010000 /* WHERE_ONEROW would have been helpful*/
+#define WHERE_PARTIALIDX 0x00020000 /* The automatic index is partial */
+#define WHERE_IN_EARLYOUT 0x00040000 /* Perhaps quit IN loops early */
+#define WHERE_BIGNULL_SORT 0x00080000 /* Column nEq of index is BIGNULL */
+
+#endif /* !defined(SQLITE_WHEREINT_H) */
+
+/************** End of whereInt.h ********************************************/
+/************** Continuing where we left off in wherecode.c ******************/
+
+#ifndef SQLITE_OMIT_EXPLAIN
+
+/*
+** Return the name of the i-th column of the pIdx index.
+*/
+static const char *explainIndexColumnName(Index *pIdx, int i){
+ i = pIdx->aiColumn[i];
+ if( i==XN_EXPR ) return "<expr>";
+ if( i==XN_ROWID ) return "rowid";
+ return pIdx->pTable->aCol[i].zName;
+}
+
+/*
+** This routine is a helper for explainIndexRange() below
+**
+** pStr holds the text of an expression that we are building up one term
+** at a time. This routine adds a new term to the end of the expression.
+** Terms are separated by AND so add the "AND" text for second and subsequent
+** terms only.
+*/
+static void explainAppendTerm(
+ StrAccum *pStr, /* The text expression being built */
+ Index *pIdx, /* Index to read column names from */
+ int nTerm, /* Number of terms */
+ int iTerm, /* Zero-based index of first term. */
+ int bAnd, /* Non-zero to append " AND " */
+ const char *zOp /* Name of the operator */
+){
+ int i;
+
+ assert( nTerm>=1 );
+ if( bAnd ) sqlite3_str_append(pStr, " AND ", 5);
+
+ if( nTerm>1 ) sqlite3_str_append(pStr, "(", 1);
+ for(i=0; i<nTerm; i++){
+ if( i ) sqlite3_str_append(pStr, ",", 1);
+ sqlite3_str_appendall(pStr, explainIndexColumnName(pIdx, iTerm+i));
+ }
+ if( nTerm>1 ) sqlite3_str_append(pStr, ")", 1);
+
+ sqlite3_str_append(pStr, zOp, 1);
+
+ if( nTerm>1 ) sqlite3_str_append(pStr, "(", 1);
+ for(i=0; i<nTerm; i++){
+ if( i ) sqlite3_str_append(pStr, ",", 1);
+ sqlite3_str_append(pStr, "?", 1);
+ }
+ if( nTerm>1 ) sqlite3_str_append(pStr, ")", 1);
+}
+
+/*
+** Argument pLevel describes a strategy for scanning table pTab. This
+** function appends text to pStr that describes the subset of table
+** rows scanned by the strategy in the form of an SQL expression.
+**
+** For example, if the query:
+**
+** SELECT * FROM t1 WHERE a=1 AND b>2;
+**
+** is run and there is an index on (a, b), then this function returns a
+** string similar to:
+**
+** "a=? AND b>?"
+*/
+static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){
+ Index *pIndex = pLoop->u.btree.pIndex;
+ u16 nEq = pLoop->u.btree.nEq;
+ u16 nSkip = pLoop->nSkip;
+ int i, j;
+
+ if( nEq==0 && (pLoop->wsFlags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ) return;
+ sqlite3_str_append(pStr, " (", 2);
+ for(i=0; i<nEq; i++){
+ const char *z = explainIndexColumnName(pIndex, i);
+ if( i ) sqlite3_str_append(pStr, " AND ", 5);
+ sqlite3_str_appendf(pStr, i>=nSkip ? "%s=?" : "ANY(%s)", z);
+ }
+
+ j = i;
+ if( pLoop->wsFlags&WHERE_BTM_LIMIT ){
+ explainAppendTerm(pStr, pIndex, pLoop->u.btree.nBtm, j, i, ">");
+ i = 1;
+ }
+ if( pLoop->wsFlags&WHERE_TOP_LIMIT ){
+ explainAppendTerm(pStr, pIndex, pLoop->u.btree.nTop, j, i, "<");
+ }
+ sqlite3_str_append(pStr, ")", 1);
+}
+
+/*
+** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN
+** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was
+** defined at compile-time. If it is not a no-op, a single OP_Explain opcode
+** is added to the output to describe the table scan strategy in pLevel.
+**
+** If an OP_Explain opcode is added to the VM, its address is returned.
+** Otherwise, if no OP_Explain is coded, zero is returned.
+*/
+SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
+ Parse *pParse, /* Parse context */
+ SrcList *pTabList, /* Table list this loop refers to */
+ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
+ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
+){
+ int ret = 0;
+#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+ if( sqlite3ParseToplevel(pParse)->explain==2 )
+#endif
+ {
+ struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom];
+ Vdbe *v = pParse->pVdbe; /* VM being constructed */
+ sqlite3 *db = pParse->db; /* Database handle */
+ int isSearch; /* True for a SEARCH. False for SCAN. */
+ WhereLoop *pLoop; /* The controlling WhereLoop object */
+ u32 flags; /* Flags that describe this loop */
+ char *zMsg; /* Text to add to EQP output */
+ StrAccum str; /* EQP output string */
+ char zBuf[100]; /* Initial space for EQP output string */
+
+ pLoop = pLevel->pWLoop;
+ flags = pLoop->wsFlags;
+ if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0;
+
+ isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0
+ || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0))
+ || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX));
+
+ sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH);
+ sqlite3_str_appendall(&str, isSearch ? "SEARCH" : "SCAN");
+ if( pItem->pSelect ){
+ sqlite3_str_appendf(&str, " SUBQUERY %u", pItem->pSelect->selId);
+ }else{
+ sqlite3_str_appendf(&str, " TABLE %s", pItem->zName);
+ }
+
+ if( pItem->zAlias ){
+ sqlite3_str_appendf(&str, " AS %s", pItem->zAlias);
+ }
+ if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){
+ const char *zFmt = 0;
+ Index *pIdx;
+
+ assert( pLoop->u.btree.pIndex!=0 );
+ pIdx = pLoop->u.btree.pIndex;
+ assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) );
+ if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){
+ if( isSearch ){
+ zFmt = "PRIMARY KEY";
+ }
+ }else if( flags & WHERE_PARTIALIDX ){
+ zFmt = "AUTOMATIC PARTIAL COVERING INDEX";
+ }else if( flags & WHERE_AUTO_INDEX ){
+ zFmt = "AUTOMATIC COVERING INDEX";
+ }else if( flags & WHERE_IDX_ONLY ){
+ zFmt = "COVERING INDEX %s";
+ }else{
+ zFmt = "INDEX %s";
+ }
+ if( zFmt ){
+ sqlite3_str_append(&str, " USING ", 7);
+ sqlite3_str_appendf(&str, zFmt, pIdx->zName);
+ explainIndexRange(&str, pLoop);
+ }
+ }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){
+ const char *zRangeOp;
+ if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){
+ zRangeOp = "=";
+ }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){
+ zRangeOp = ">? AND rowid<";
+ }else if( flags&WHERE_BTM_LIMIT ){
+ zRangeOp = ">";
+ }else{
+ assert( flags&WHERE_TOP_LIMIT);
+ zRangeOp = "<";
+ }
+ sqlite3_str_appendf(&str,
+ " USING INTEGER PRIMARY KEY (rowid%s?)",zRangeOp);
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ else if( (flags & WHERE_VIRTUALTABLE)!=0 ){
+ sqlite3_str_appendf(&str, " VIRTUAL TABLE INDEX %d:%s",
+ pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr);
+ }
+#endif
+#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
+ if( pLoop->nOut>=10 ){
+ sqlite3_str_appendf(&str, " (~%llu rows)",
+ sqlite3LogEstToInt(pLoop->nOut));
+ }else{
+ sqlite3_str_append(&str, " (~1 row)", 9);
+ }
+#endif
+ zMsg = sqlite3StrAccumFinish(&str);
+ sqlite3ExplainBreakpoint("",zMsg);
+ ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v),
+ pParse->addrExplain, 0, zMsg,P4_DYNAMIC);
+ }
+ return ret;
+}
+#endif /* SQLITE_OMIT_EXPLAIN */
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+/*
+** Configure the VM passed as the first argument with an
+** sqlite3_stmt_scanstatus() entry corresponding to the scan used to
+** implement level pLvl. Argument pSrclist is a pointer to the FROM
+** clause that the scan reads data from.
+**
+** If argument addrExplain is not 0, it must be the address of an
+** OP_Explain instruction that describes the same loop.
+*/
+SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
+ Vdbe *v, /* Vdbe to add scanstatus entry to */
+ SrcList *pSrclist, /* FROM clause pLvl reads data from */
+ WhereLevel *pLvl, /* Level to add scanstatus() entry for */
+ int addrExplain /* Address of OP_Explain (or 0) */
+){
+ const char *zObj = 0;
+ WhereLoop *pLoop = pLvl->pWLoop;
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){
+ zObj = pLoop->u.btree.pIndex->zName;
+ }else{
+ zObj = pSrclist->a[pLvl->iFrom].zName;
+ }
+ sqlite3VdbeScanStatus(
+ v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj
+ );
+}
+#endif
+
+
+/*
+** Disable a term in the WHERE clause. Except, do not disable the term
+** if it controls a LEFT OUTER JOIN and it did not originate in the ON
+** or USING clause of that join.
+**
+** Consider the term t2.z='ok' in the following queries:
+**
+** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok'
+** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok'
+** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok'
+**
+** The t2.z='ok' is disabled in the in (2) because it originates
+** in the ON clause. The term is disabled in (3) because it is not part
+** of a LEFT OUTER JOIN. In (1), the term is not disabled.
+**
+** Disabling a term causes that term to not be tested in the inner loop
+** of the join. Disabling is an optimization. When terms are satisfied
+** by indices, we disable them to prevent redundant tests in the inner
+** loop. We would get the correct results if nothing were ever disabled,
+** but joins might run a little slower. The trick is to disable as much
+** as we can without disabling too much. If we disabled in (1), we'd get
+** the wrong answer. See ticket #813.
+**
+** If all the children of a term are disabled, then that term is also
+** automatically disabled. In this way, terms get disabled if derived
+** virtual terms are tested first. For example:
+**
+** x GLOB 'abc*' AND x>='abc' AND x<'acd'
+** \___________/ \______/ \_____/
+** parent child1 child2
+**
+** Only the parent term was in the original WHERE clause. The child1
+** and child2 terms were added by the LIKE optimization. If both of
+** the virtual child terms are valid, then testing of the parent can be
+** skipped.
+**
+** Usually the parent term is marked as TERM_CODED. But if the parent
+** term was originally TERM_LIKE, then the parent gets TERM_LIKECOND instead.
+** The TERM_LIKECOND marking indicates that the term should be coded inside
+** a conditional such that is only evaluated on the second pass of a
+** LIKE-optimization loop, when scanning BLOBs instead of strings.
+*/
+static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
+ int nLoop = 0;
+ assert( pTerm!=0 );
+ while( (pTerm->wtFlags & TERM_CODED)==0
+ && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ && (pLevel->notReady & pTerm->prereqAll)==0
+ ){
+ if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){
+ pTerm->wtFlags |= TERM_LIKECOND;
+ }else{
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ if( pTerm->iParent<0 ) break;
+ pTerm = &pTerm->pWC->a[pTerm->iParent];
+ assert( pTerm!=0 );
+ pTerm->nChild--;
+ if( pTerm->nChild!=0 ) break;
+ nLoop++;
+ }
+}
+
+/*
+** Code an OP_Affinity opcode to apply the column affinity string zAff
+** to the n registers starting at base.
+**
+** As an optimization, SQLITE_AFF_BLOB and SQLITE_AFF_NONE entries (which
+** are no-ops) at the beginning and end of zAff are ignored. If all entries
+** in zAff are SQLITE_AFF_BLOB or SQLITE_AFF_NONE, then no code gets generated.
+**
+** This routine makes its own copy of zAff so that the caller is free
+** to modify zAff after this routine returns.
+*/
+static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
+ Vdbe *v = pParse->pVdbe;
+ if( zAff==0 ){
+ assert( pParse->db->mallocFailed );
+ return;
+ }
+ assert( v!=0 );
+
+ /* Adjust base and n to skip over SQLITE_AFF_BLOB and SQLITE_AFF_NONE
+ ** entries at the beginning and end of the affinity string.
+ */
+ assert( SQLITE_AFF_NONE<SQLITE_AFF_BLOB );
+ while( n>0 && zAff[0]<=SQLITE_AFF_BLOB ){
+ n--;
+ base++;
+ zAff++;
+ }
+ while( n>1 && zAff[n-1]<=SQLITE_AFF_BLOB ){
+ n--;
+ }
+
+ /* Code the OP_Affinity opcode if there is anything left to do. */
+ if( n>0 ){
+ sqlite3VdbeAddOp4(v, OP_Affinity, base, n, 0, zAff, n);
+ }
+}
+
+/*
+** Expression pRight, which is the RHS of a comparison operation, is
+** either a vector of n elements or, if n==1, a scalar expression.
+** Before the comparison operation, affinity zAff is to be applied
+** to the pRight values. This function modifies characters within the
+** affinity string to SQLITE_AFF_BLOB if either:
+**
+** * the comparison will be performed with no affinity, or
+** * the affinity change in zAff is guaranteed not to change the value.
+*/
+static void updateRangeAffinityStr(
+ Expr *pRight, /* RHS of comparison */
+ int n, /* Number of vector elements in comparison */
+ char *zAff /* Affinity string to modify */
+){
+ int i;
+ for(i=0; i<n; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pRight, i);
+ if( sqlite3CompareAffinity(p, zAff[i])==SQLITE_AFF_BLOB
+ || sqlite3ExprNeedsNoAffinityChange(p, zAff[i])
+ ){
+ zAff[i] = SQLITE_AFF_BLOB;
+ }
+ }
+}
+
+
+/*
+** pX is an expression of the form: (vector) IN (SELECT ...)
+** In other words, it is a vector IN operator with a SELECT clause on the
+** LHS. But not all terms in the vector are indexable and the terms might
+** not be in the correct order for indexing.
+**
+** This routine makes a copy of the input pX expression and then adjusts
+** the vector on the LHS with corresponding changes to the SELECT so that
+** the vector contains only index terms and those terms are in the correct
+** order. The modified IN expression is returned. The caller is responsible
+** for deleting the returned expression.
+**
+** Example:
+**
+** CREATE TABLE t1(a,b,c,d,e,f);
+** CREATE INDEX t1x1 ON t1(e,c);
+** SELECT * FROM t1 WHERE (a,b,c,d,e) IN (SELECT v,w,x,y,z FROM t2)
+** \_______________________________________/
+** The pX expression
+**
+** Since only columns e and c can be used with the index, in that order,
+** the modified IN expression that is returned will be:
+**
+** (e,c) IN (SELECT z,x FROM t2)
+**
+** The reduced pX is different from the original (obviously) and thus is
+** only used for indexing, to improve performance. The original unaltered
+** IN expression must also be run on each output row for correctness.
+*/
+static Expr *removeUnindexableInClauseTerms(
+ Parse *pParse, /* The parsing context */
+ int iEq, /* Look at loop terms starting here */
+ WhereLoop *pLoop, /* The current loop */
+ Expr *pX /* The IN expression to be reduced */
+){
+ sqlite3 *db = pParse->db;
+ Expr *pNew;
+ pNew = sqlite3ExprDup(db, pX, 0);
+ if( db->mallocFailed==0 ){
+ ExprList *pOrigRhs = pNew->x.pSelect->pEList; /* Original unmodified RHS */
+ ExprList *pOrigLhs = pNew->pLeft->x.pList; /* Original unmodified LHS */
+ ExprList *pRhs = 0; /* New RHS after modifications */
+ ExprList *pLhs = 0; /* New LHS after mods */
+ int i; /* Loop counter */
+ Select *pSelect; /* Pointer to the SELECT on the RHS */
+
+ for(i=iEq; i<pLoop->nLTerm; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ int iField = pLoop->aLTerm[i]->iField - 1;
+ if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */
+ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr);
+ pOrigRhs->a[iField].pExpr = 0;
+ assert( pOrigLhs->a[iField].pExpr!=0 );
+ pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr);
+ pOrigLhs->a[iField].pExpr = 0;
+ }
+ }
+ sqlite3ExprListDelete(db, pOrigRhs);
+ sqlite3ExprListDelete(db, pOrigLhs);
+ pNew->pLeft->x.pList = pLhs;
+ pNew->x.pSelect->pEList = pRhs;
+ if( pLhs && pLhs->nExpr==1 ){
+ /* Take care here not to generate a TK_VECTOR containing only a
+ ** single value. Since the parser never creates such a vector, some
+ ** of the subroutines do not handle this case. */
+ Expr *p = pLhs->a[0].pExpr;
+ pLhs->a[0].pExpr = 0;
+ sqlite3ExprDelete(db, pNew->pLeft);
+ pNew->pLeft = p;
+ }
+ pSelect = pNew->x.pSelect;
+ if( pSelect->pOrderBy ){
+ /* If the SELECT statement has an ORDER BY clause, zero the
+ ** iOrderByCol variables. These are set to non-zero when an
+ ** ORDER BY term exactly matches one of the terms of the
+ ** result-set. Since the result-set of the SELECT statement may
+ ** have been modified or reordered, these variables are no longer
+ ** set correctly. Since setting them is just an optimization,
+ ** it's easiest just to zero them here. */
+ ExprList *pOrderBy = pSelect->pOrderBy;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].u.x.iOrderByCol = 0;
+ }
+ }
+
+#if 0
+ printf("For indexing, change the IN expr:\n");
+ sqlite3TreeViewExpr(0, pX, 0);
+ printf("Into:\n");
+ sqlite3TreeViewExpr(0, pNew, 0);
+#endif
+ }
+ return pNew;
+}
+
+
+/*
+** Generate code for a single equality term of the WHERE clause. An equality
+** term can be either X=expr or X IN (...). pTerm is the term to be
+** coded.
+**
+** The current value for the constraint is left in a register, the index
+** of which is returned. An attempt is made store the result in iTarget but
+** this is only guaranteed for TK_ISNULL and TK_IN constraints. If the
+** constraint is a TK_EQ or TK_IS, then the current value might be left in
+** some other register and it is the caller's responsibility to compensate.
+**
+** For a constraint of the form X=expr, the expression is evaluated in
+** straight-line code. For constraints of the form X IN (...)
+** this routine sets up a loop that will iterate over all values of X.
+*/
+static int codeEqualityTerm(
+ Parse *pParse, /* The parsing context */
+ WhereTerm *pTerm, /* The term of the WHERE clause to be coded */
+ WhereLevel *pLevel, /* The level of the FROM clause we are working on */
+ int iEq, /* Index of the equality term within this level */
+ int bRev, /* True for reverse-order IN operations */
+ int iTarget /* Attempt to leave results in this register */
+){
+ Expr *pX = pTerm->pExpr;
+ Vdbe *v = pParse->pVdbe;
+ int iReg; /* Register holding results */
+
+ assert( pLevel->pWLoop->aLTerm[iEq]==pTerm );
+ assert( iTarget>0 );
+ if( pX->op==TK_EQ || pX->op==TK_IS ){
+ iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget);
+ }else if( pX->op==TK_ISNULL ){
+ iReg = iTarget;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, iReg);
+#ifndef SQLITE_OMIT_SUBQUERY
+ }else{
+ int eType = IN_INDEX_NOOP;
+ int iTab;
+ struct InLoop *pIn;
+ WhereLoop *pLoop = pLevel->pWLoop;
+ int i;
+ int nEq = 0;
+ int *aiMap = 0;
+
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
+ && pLoop->u.btree.pIndex!=0
+ && pLoop->u.btree.pIndex->aSortOrder[iEq]
+ ){
+ testcase( iEq==0 );
+ testcase( bRev );
+ bRev = !bRev;
+ }
+ assert( pX->op==TK_IN );
+ iReg = iTarget;
+
+ for(i=0; i<iEq; i++){
+ if( pLoop->aLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){
+ disableTerm(pLevel, pTerm);
+ return iTarget;
+ }
+ }
+ for(i=iEq;i<pLoop->nLTerm; i++){
+ assert( pLoop->aLTerm[i]!=0 );
+ if( pLoop->aLTerm[i]->pExpr==pX ) nEq++;
+ }
+
+ iTab = 0;
+ if( (pX->flags & EP_xIsSelect)==0 || pX->x.pSelect->pEList->nExpr==1 ){
+ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab);
+ }else{
+ sqlite3 *db = pParse->db;
+ pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX);
+
+ if( !db->mallocFailed ){
+ aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq);
+ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab);
+ pTerm->pExpr->iTable = iTab;
+ }
+ sqlite3ExprDelete(db, pX);
+ pX = pTerm->pExpr;
+ }
+
+ if( eType==IN_INDEX_INDEX_DESC ){
+ testcase( bRev );
+ bRev = !bRev;
+ }
+ sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0);
+ VdbeCoverageIf(v, bRev);
+ VdbeCoverageIf(v, !bRev);
+ assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 );
+
+ pLoop->wsFlags |= WHERE_IN_ABLE;
+ if( pLevel->u.in.nIn==0 ){
+ pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse);
+ }
+
+ i = pLevel->u.in.nIn;
+ pLevel->u.in.nIn += nEq;
+ pLevel->u.in.aInLoop =
+ sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
+ sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
+ pIn = pLevel->u.in.aInLoop;
+ if( pIn ){
+ int iMap = 0; /* Index in aiMap[] */
+ pIn += i;
+ for(i=iEq;i<pLoop->nLTerm; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ int iOut = iReg + i - iEq;
+ if( eType==IN_INDEX_ROWID ){
+ pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut);
+ }else{
+ int iCol = aiMap ? aiMap[iMap++] : 0;
+ pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut);
+ }
+ sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v);
+ if( i==iEq ){
+ pIn->iCur = iTab;
+ pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next;
+ if( iEq>0 ){
+ pIn->iBase = iReg - i;
+ pIn->nPrefix = i;
+ pLoop->wsFlags |= WHERE_IN_EARLYOUT;
+ }else{
+ pIn->nPrefix = 0;
+ }
+ }else{
+ pIn->eEndLoopOp = OP_Noop;
+ }
+ pIn++;
+ }
+ }
+ }else{
+ pLevel->u.in.nIn = 0;
+ }
+ sqlite3DbFree(pParse->db, aiMap);
+#endif
+ }
+ disableTerm(pLevel, pTerm);
+ return iReg;
+}
+
+/*
+** Generate code that will evaluate all == and IN constraints for an
+** index scan.
+**
+** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c).
+** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10
+** The index has as many as three equality constraints, but in this
+** example, the third "c" value is an inequality. So only two
+** constraints are coded. This routine will generate code to evaluate
+** a==5 and b IN (1,2,3). The current values for a and b will be stored
+** in consecutive registers and the index of the first register is returned.
+**
+** In the example above nEq==2. But this subroutine works for any value
+** of nEq including 0. If nEq==0, this routine is nearly a no-op.
+** The only thing it does is allocate the pLevel->iMem memory cell and
+** compute the affinity string.
+**
+** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints
+** are == or IN and are covered by the nEq. nExtraReg is 1 if there is
+** an inequality constraint (such as the "c>=5 AND c<10" in the example) that
+** occurs after the nEq quality constraints.
+**
+** This routine allocates a range of nEq+nExtraReg memory cells and returns
+** the index of the first memory cell in that range. The code that
+** calls this routine will use that memory range to store keys for
+** start and termination conditions of the loop.
+** key value of the loop. If one or more IN operators appear, then
+** this routine allocates an additional nEq memory cells for internal
+** use.
+**
+** Before returning, *pzAff is set to point to a buffer containing a
+** copy of the column affinity string of the index allocated using
+** sqlite3DbMalloc(). Except, entries in the copy of the string associated
+** with equality constraints that use BLOB or NONE affinity are set to
+** SQLITE_AFF_BLOB. This is to deal with SQL such as the following:
+**
+** CREATE TABLE t1(a TEXT PRIMARY KEY, b);
+** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b;
+**
+** In the example above, the index on t1(a) has TEXT affinity. But since
+** the right hand side of the equality constraint (t2.b) has BLOB/NONE affinity,
+** no conversion should be attempted before using a t2.b value as part of
+** a key to search the index. Hence the first byte in the returned affinity
+** string in this example would be set to SQLITE_AFF_BLOB.
+*/
+static int codeAllEqualityTerms(
+ Parse *pParse, /* Parsing context */
+ WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */
+ int bRev, /* Reverse the order of IN operators */
+ int nExtraReg, /* Number of extra registers to allocate */
+ char **pzAff /* OUT: Set to point to affinity string */
+){
+ u16 nEq; /* The number of == or IN constraints to code */
+ u16 nSkip; /* Number of left-most columns to skip */
+ Vdbe *v = pParse->pVdbe; /* The vm under construction */
+ Index *pIdx; /* The index being used for this loop */
+ WhereTerm *pTerm; /* A single constraint term */
+ WhereLoop *pLoop; /* The WhereLoop object */
+ int j; /* Loop counter */
+ int regBase; /* Base register */
+ int nReg; /* Number of registers to allocate */
+ char *zAff; /* Affinity string to return */
+
+ /* This module is only called on query plans that use an index. */
+ pLoop = pLevel->pWLoop;
+ assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ nEq = pLoop->u.btree.nEq;
+ nSkip = pLoop->nSkip;
+ pIdx = pLoop->u.btree.pIndex;
+ assert( pIdx!=0 );
+
+ /* Figure out how many memory cells we will need then allocate them.
+ */
+ regBase = pParse->nMem + 1;
+ nReg = pLoop->u.btree.nEq + nExtraReg;
+ pParse->nMem += nReg;
+
+ zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));
+ assert( zAff!=0 || pParse->db->mallocFailed );
+
+ if( nSkip ){
+ int iIdxCur = pLevel->iIdxCur;
+ sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur);
+ VdbeCoverageIf(v, bRev==0);
+ VdbeCoverageIf(v, bRev!=0);
+ VdbeComment((v, "begin skip-scan on %s", pIdx->zName));
+ j = sqlite3VdbeAddOp0(v, OP_Goto);
+ pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT),
+ iIdxCur, 0, regBase, nSkip);
+ VdbeCoverageIf(v, bRev==0);
+ VdbeCoverageIf(v, bRev!=0);
+ sqlite3VdbeJumpHere(v, j);
+ for(j=0; j<nSkip; j++){
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, j, regBase+j);
+ testcase( pIdx->aiColumn[j]==XN_EXPR );
+ VdbeComment((v, "%s", explainIndexColumnName(pIdx, j)));
+ }
+ }
+
+ /* Evaluate the equality constraints
+ */
+ assert( zAff==0 || (int)strlen(zAff)>=nEq );
+ for(j=nSkip; j<nEq; j++){
+ int r1;
+ pTerm = pLoop->aLTerm[j];
+ assert( pTerm!=0 );
+ /* The following testcase is true for indices with redundant columns.
+ ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */
+ testcase( (pTerm->wtFlags & TERM_CODED)!=0 );
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j);
+ if( r1!=regBase+j ){
+ if( nReg==1 ){
+ sqlite3ReleaseTempReg(pParse, regBase);
+ regBase = r1;
+ }else{
+ sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j);
+ }
+ }
+ if( pTerm->eOperator & WO_IN ){
+ if( pTerm->pExpr->flags & EP_xIsSelect ){
+ /* No affinity ever needs to be (or should be) applied to a value
+ ** from the RHS of an "? IN (SELECT ...)" expression. The
+ ** sqlite3FindInIndex() routine has already ensured that the
+ ** affinity of the comparison has been applied to the value. */
+ if( zAff ) zAff[j] = SQLITE_AFF_BLOB;
+ }
+ }else if( (pTerm->eOperator & WO_ISNULL)==0 ){
+ Expr *pRight = pTerm->pExpr->pRight;
+ if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk);
+ VdbeCoverage(v);
+ }
+ if( zAff ){
+ if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){
+ zAff[j] = SQLITE_AFF_BLOB;
+ }
+ if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){
+ zAff[j] = SQLITE_AFF_BLOB;
+ }
+ }
+ }
+ }
+ *pzAff = zAff;
+ return regBase;
+}
+
+#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+/*
+** If the most recently coded instruction is a constant range constraint
+** (a string literal) that originated from the LIKE optimization, then
+** set P3 and P5 on the OP_String opcode so that the string will be cast
+** to a BLOB at appropriate times.
+**
+** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range
+** expression: "x>='ABC' AND x<'abd'". But this requires that the range
+** scan loop run twice, once for strings and a second time for BLOBs.
+** The OP_String opcodes on the second pass convert the upper and lower
+** bound string constants to blobs. This routine makes the necessary changes
+** to the OP_String opcodes for that to happen.
+**
+** Except, of course, if SQLITE_LIKE_DOESNT_MATCH_BLOBS is defined, then
+** only the one pass through the string space is required, so this routine
+** becomes a no-op.
+*/
+static void whereLikeOptimizationStringFixup(
+ Vdbe *v, /* prepared statement under construction */
+ WhereLevel *pLevel, /* The loop that contains the LIKE operator */
+ WhereTerm *pTerm /* The upper or lower bound just coded */
+){
+ if( pTerm->wtFlags & TERM_LIKEOPT ){
+ VdbeOp *pOp;
+ assert( pLevel->iLikeRepCntr>0 );
+ pOp = sqlite3VdbeGetOp(v, -1);
+ assert( pOp!=0 );
+ assert( pOp->opcode==OP_String8
+ || pTerm->pWC->pWInfo->pParse->db->mallocFailed );
+ pOp->p3 = (int)(pLevel->iLikeRepCntr>>1); /* Register holding counter */
+ pOp->p5 = (u8)(pLevel->iLikeRepCntr&1); /* ASC or DESC */
+ }
+}
+#else
+# define whereLikeOptimizationStringFixup(A,B,C)
+#endif
+
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+/*
+** Information is passed from codeCursorHint() down to individual nodes of
+** the expression tree (by sqlite3WalkExpr()) using an instance of this
+** structure.
+*/
+struct CCurHint {
+ int iTabCur; /* Cursor for the main table */
+ int iIdxCur; /* Cursor for the index, if pIdx!=0. Unused otherwise */
+ Index *pIdx; /* The index used to access the table */
+};
+
+/*
+** This function is called for every node of an expression that is a candidate
+** for a cursor hint on an index cursor. For TK_COLUMN nodes that reference
+** the table CCurHint.iTabCur, verify that the same column can be
+** accessed through the index. If it cannot, then set pWalker->eCode to 1.
+*/
+static int codeCursorHintCheckExpr(Walker *pWalker, Expr *pExpr){
+ struct CCurHint *pHint = pWalker->u.pCCurHint;
+ assert( pHint->pIdx!=0 );
+ if( pExpr->op==TK_COLUMN
+ && pExpr->iTable==pHint->iTabCur
+ && sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn)<0
+ ){
+ pWalker->eCode = 1;
+ }
+ return WRC_Continue;
+}
+
+/*
+** Test whether or not expression pExpr, which was part of a WHERE clause,
+** should be included in the cursor-hint for a table that is on the rhs
+** of a LEFT JOIN. Set Walker.eCode to non-zero before returning if the
+** expression is not suitable.
+**
+** An expression is unsuitable if it might evaluate to non NULL even if
+** a TK_COLUMN node that does affect the value of the expression is set
+** to NULL. For example:
+**
+** col IS NULL
+** col IS NOT NULL
+** coalesce(col, 1)
+** CASE WHEN col THEN 0 ELSE 1 END
+*/
+static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_IS
+ || pExpr->op==TK_ISNULL || pExpr->op==TK_ISNOT
+ || pExpr->op==TK_NOTNULL || pExpr->op==TK_CASE
+ ){
+ pWalker->eCode = 1;
+ }else if( pExpr->op==TK_FUNCTION ){
+ int d1;
+ char d2[4];
+ if( 0==sqlite3IsLikeFunction(pWalker->pParse->db, pExpr, &d1, d2) ){
+ pWalker->eCode = 1;
+ }
+ }
+
+ return WRC_Continue;
+}
+
+
+/*
+** This function is called on every node of an expression tree used as an
+** argument to the OP_CursorHint instruction. If the node is a TK_COLUMN
+** that accesses any table other than the one identified by
+** CCurHint.iTabCur, then do the following:
+**
+** 1) allocate a register and code an OP_Column instruction to read
+** the specified column into the new register, and
+**
+** 2) transform the expression node to a TK_REGISTER node that reads
+** from the newly populated register.
+**
+** Also, if the node is a TK_COLUMN that does access the table idenified
+** by pCCurHint.iTabCur, and an index is being used (which we will
+** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into
+** an access of the index rather than the original table.
+*/
+static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){
+ int rc = WRC_Continue;
+ struct CCurHint *pHint = pWalker->u.pCCurHint;
+ if( pExpr->op==TK_COLUMN ){
+ if( pExpr->iTable!=pHint->iTabCur ){
+ int reg = ++pWalker->pParse->nMem; /* Register for column value */
+ sqlite3ExprCode(pWalker->pParse, pExpr, reg);
+ pExpr->op = TK_REGISTER;
+ pExpr->iTable = reg;
+ }else if( pHint->pIdx!=0 ){
+ pExpr->iTable = pHint->iIdxCur;
+ pExpr->iColumn = sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn);
+ assert( pExpr->iColumn>=0 );
+ }
+ }else if( pExpr->op==TK_AGG_FUNCTION ){
+ /* An aggregate function in the WHERE clause of a query means this must
+ ** be a correlated sub-query, and expression pExpr is an aggregate from
+ ** the parent context. Do not walk the function arguments in this case.
+ **
+ ** todo: It should be possible to replace this node with a TK_REGISTER
+ ** expression, as the result of the expression must be stored in a
+ ** register at this point. The same holds for TK_AGG_COLUMN nodes. */
+ rc = WRC_Prune;
+ }
+ return rc;
+}
+
+/*
+** Insert an OP_CursorHint instruction if it is appropriate to do so.
+*/
+static void codeCursorHint(
+ struct SrcList_item *pTabItem, /* FROM clause item */
+ WhereInfo *pWInfo, /* The where clause */
+ WhereLevel *pLevel, /* Which loop to provide hints for */
+ WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */
+){
+ Parse *pParse = pWInfo->pParse;
+ sqlite3 *db = pParse->db;
+ Vdbe *v = pParse->pVdbe;
+ Expr *pExpr = 0;
+ WhereLoop *pLoop = pLevel->pWLoop;
+ int iCur;
+ WhereClause *pWC;
+ WhereTerm *pTerm;
+ int i, j;
+ struct CCurHint sHint;
+ Walker sWalker;
+
+ if( OptimizationDisabled(db, SQLITE_CursorHints) ) return;
+ iCur = pLevel->iTabCur;
+ assert( iCur==pWInfo->pTabList->a[pLevel->iFrom].iCursor );
+ sHint.iTabCur = iCur;
+ sHint.iIdxCur = pLevel->iIdxCur;
+ sHint.pIdx = pLoop->u.btree.pIndex;
+ memset(&sWalker, 0, sizeof(sWalker));
+ sWalker.pParse = pParse;
+ sWalker.u.pCCurHint = &sHint;
+ pWC = &pWInfo->sWC;
+ for(i=0; i<pWC->nTerm; i++){
+ pTerm = &pWC->a[i];
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( pTerm->prereqAll & pLevel->notReady ) continue;
+
+ /* Any terms specified as part of the ON(...) clause for any LEFT
+ ** JOIN for which the current table is not the rhs are omitted
+ ** from the cursor-hint.
+ **
+ ** If this table is the rhs of a LEFT JOIN, "IS" or "IS NULL" terms
+ ** that were specified as part of the WHERE clause must be excluded.
+ ** This is to address the following:
+ **
+ ** SELECT ... t1 LEFT JOIN t2 ON (t1.a=t2.b) WHERE t2.c IS NULL;
+ **
+ ** Say there is a single row in t2 that matches (t1.a=t2.b), but its
+ ** t2.c values is not NULL. If the (t2.c IS NULL) constraint is
+ ** pushed down to the cursor, this row is filtered out, causing
+ ** SQLite to synthesize a row of NULL values. Which does match the
+ ** WHERE clause, and so the query returns a row. Which is incorrect.
+ **
+ ** For the same reason, WHERE terms such as:
+ **
+ ** WHERE 1 = (t2.c IS NULL)
+ **
+ ** are also excluded. See codeCursorHintIsOrFunction() for details.
+ */
+ if( pTabItem->fg.jointype & JT_LEFT ){
+ Expr *pExpr = pTerm->pExpr;
+ if( !ExprHasProperty(pExpr, EP_FromJoin)
+ || pExpr->iRightJoinTable!=pTabItem->iCursor
+ ){
+ sWalker.eCode = 0;
+ sWalker.xExprCallback = codeCursorHintIsOrFunction;
+ sqlite3WalkExpr(&sWalker, pTerm->pExpr);
+ if( sWalker.eCode ) continue;
+ }
+ }else{
+ if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue;
+ }
+
+ /* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize
+ ** the cursor. These terms are not needed as hints for a pure range
+ ** scan (that has no == terms) so omit them. */
+ if( pLoop->u.btree.nEq==0 && pTerm!=pEndRange ){
+ for(j=0; j<pLoop->nLTerm && pLoop->aLTerm[j]!=pTerm; j++){}
+ if( j<pLoop->nLTerm ) continue;
+ }
+
+ /* No subqueries or non-deterministic functions allowed */
+ if( sqlite3ExprContainsSubquery(pTerm->pExpr) ) continue;
+
+ /* For an index scan, make sure referenced columns are actually in
+ ** the index. */
+ if( sHint.pIdx!=0 ){
+ sWalker.eCode = 0;
+ sWalker.xExprCallback = codeCursorHintCheckExpr;
+ sqlite3WalkExpr(&sWalker, pTerm->pExpr);
+ if( sWalker.eCode ) continue;
+ }
+
+ /* If we survive all prior tests, that means this term is worth hinting */
+ pExpr = sqlite3ExprAnd(pParse, pExpr, sqlite3ExprDup(db, pTerm->pExpr, 0));
+ }
+ if( pExpr!=0 ){
+ sWalker.xExprCallback = codeCursorHintFixExpr;
+ sqlite3WalkExpr(&sWalker, pExpr);
+ sqlite3VdbeAddOp4(v, OP_CursorHint,
+ (sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0,
+ (const char*)pExpr, P4_EXPR);
+ }
+}
+#else
+# define codeCursorHint(A,B,C,D) /* No-op */
+#endif /* SQLITE_ENABLE_CURSOR_HINTS */
+
+/*
+** Cursor iCur is open on an intkey b-tree (a table). Register iRowid contains
+** a rowid value just read from cursor iIdxCur, open on index pIdx. This
+** function generates code to do a deferred seek of cursor iCur to the
+** rowid stored in register iRowid.
+**
+** Normally, this is just:
+**
+** OP_DeferredSeek $iCur $iRowid
+**
+** However, if the scan currently being coded is a branch of an OR-loop and
+** the statement currently being coded is a SELECT, then P3 of OP_DeferredSeek
+** is set to iIdxCur and P4 is set to point to an array of integers
+** containing one entry for each column of the table cursor iCur is open
+** on. For each table column, if the column is the i'th column of the
+** index, then the corresponding array entry is set to (i+1). If the column
+** does not appear in the index at all, the array entry is set to 0.
+*/
+static void codeDeferredSeek(
+ WhereInfo *pWInfo, /* Where clause context */
+ Index *pIdx, /* Index scan is using */
+ int iCur, /* Cursor for IPK b-tree */
+ int iIdxCur /* Index cursor */
+){
+ Parse *pParse = pWInfo->pParse; /* Parse context */
+ Vdbe *v = pParse->pVdbe; /* Vdbe to generate code within */
+
+ assert( iIdxCur>0 );
+ assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 );
+
+ pWInfo->bDeferredSeek = 1;
+ sqlite3VdbeAddOp3(v, OP_DeferredSeek, iIdxCur, 0, iCur);
+ if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
+ && DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask)
+ ){
+ int i;
+ Table *pTab = pIdx->pTable;
+ u32 *ai = (u32*)sqlite3DbMallocZero(pParse->db, sizeof(u32)*(pTab->nCol+1));
+ if( ai ){
+ ai[0] = pTab->nCol;
+ for(i=0; i<pIdx->nColumn-1; i++){
+ int x1, x2;
+ assert( pIdx->aiColumn[i]<pTab->nCol );
+ x1 = pIdx->aiColumn[i];
+ x2 = sqlite3TableColumnToStorage(pTab, x1);
+ testcase( x1!=x2 );
+ if( x1>=0 ) ai[x2+1] = i+1;
+ }
+ sqlite3VdbeChangeP4(v, -1, (char*)ai, P4_INTARRAY);
+ }
+ }
+}
+
+/*
+** If the expression passed as the second argument is a vector, generate
+** code to write the first nReg elements of the vector into an array
+** of registers starting with iReg.
+**
+** If the expression is not a vector, then nReg must be passed 1. In
+** this case, generate code to evaluate the expression and leave the
+** result in register iReg.
+*/
+static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){
+ assert( nReg>0 );
+ if( p && sqlite3ExprIsVector(p) ){
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( (p->flags & EP_xIsSelect) ){
+ Vdbe *v = pParse->pVdbe;
+ int iSelect;
+ assert( p->op==TK_SELECT );
+ iSelect = sqlite3CodeSubselect(pParse, p);
+ sqlite3VdbeAddOp3(v, OP_Copy, iSelect, iReg, nReg-1);
+ }else
+#endif
+ {
+ int i;
+ ExprList *pList = p->x.pList;
+ assert( nReg<=pList->nExpr );
+ for(i=0; i<nReg; i++){
+ sqlite3ExprCode(pParse, pList->a[i].pExpr, iReg+i);
+ }
+ }
+ }else{
+ assert( nReg==1 );
+ sqlite3ExprCode(pParse, p, iReg);
+ }
+}
+
+/* An instance of the IdxExprTrans object carries information about a
+** mapping from an expression on table columns into a column in an index
+** down through the Walker.
+*/
+typedef struct IdxExprTrans {
+ Expr *pIdxExpr; /* The index expression */
+ int iTabCur; /* The cursor of the corresponding table */
+ int iIdxCur; /* The cursor for the index */
+ int iIdxCol; /* The column for the index */
+ int iTabCol; /* The column for the table */
+ WhereInfo *pWInfo; /* Complete WHERE clause information */
+ sqlite3 *db; /* Database connection (for malloc()) */
+} IdxExprTrans;
+
+/*
+** Preserve pExpr on the WhereETrans list of the WhereInfo.
+*/
+static void preserveExpr(IdxExprTrans *pTrans, Expr *pExpr){
+ WhereExprMod *pNew;
+ pNew = sqlite3DbMallocRaw(pTrans->db, sizeof(*pNew));
+ if( pNew==0 ) return;
+ pNew->pNext = pTrans->pWInfo->pExprMods;
+ pTrans->pWInfo->pExprMods = pNew;
+ pNew->pExpr = pExpr;
+ memcpy(&pNew->orig, pExpr, sizeof(*pExpr));
+}
+
+/* The walker node callback used to transform matching expressions into
+** a reference to an index column for an index on an expression.
+**
+** If pExpr matches, then transform it into a reference to the index column
+** that contains the value of pExpr.
+*/
+static int whereIndexExprTransNode(Walker *p, Expr *pExpr){
+ IdxExprTrans *pX = p->u.pIdxTrans;
+ if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){
+ preserveExpr(pX, pExpr);
+ pExpr->affExpr = sqlite3ExprAffinity(pExpr);
+ pExpr->op = TK_COLUMN;
+ pExpr->iTable = pX->iIdxCur;
+ pExpr->iColumn = pX->iIdxCol;
+ pExpr->y.pTab = 0;
+ testcase( ExprHasProperty(pExpr, EP_Skip) );
+ testcase( ExprHasProperty(pExpr, EP_Unlikely) );
+ ExprClearProperty(pExpr, EP_Skip|EP_Unlikely);
+ return WRC_Prune;
+ }else{
+ return WRC_Continue;
+ }
+}
+
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+/* A walker node callback that translates a column reference to a table
+** into a corresponding column reference of an index.
+*/
+static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){
+ if( pExpr->op==TK_COLUMN ){
+ IdxExprTrans *pX = p->u.pIdxTrans;
+ if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){
+ assert( pExpr->y.pTab!=0 );
+ preserveExpr(pX, pExpr);
+ pExpr->affExpr = sqlite3TableColumnAffinity(pExpr->y.pTab,pExpr->iColumn);
+ pExpr->iTable = pX->iIdxCur;
+ pExpr->iColumn = pX->iIdxCol;
+ pExpr->y.pTab = 0;
+ }
+ }
+ return WRC_Continue;
+}
+#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
+
+/*
+** For an indexes on expression X, locate every instance of expression X
+** in pExpr and change that subexpression into a reference to the appropriate
+** column of the index.
+**
+** 2019-10-24: Updated to also translate references to a VIRTUAL column in
+** the table into references to the corresponding (stored) column of the
+** index.
+*/
+static void whereIndexExprTrans(
+ Index *pIdx, /* The Index */
+ int iTabCur, /* Cursor of the table that is being indexed */
+ int iIdxCur, /* Cursor of the index itself */
+ WhereInfo *pWInfo /* Transform expressions in this WHERE clause */
+){
+ int iIdxCol; /* Column number of the index */
+ ExprList *aColExpr; /* Expressions that are indexed */
+ Table *pTab;
+ Walker w;
+ IdxExprTrans x;
+ aColExpr = pIdx->aColExpr;
+ if( aColExpr==0 && !pIdx->bHasVCol ){
+ /* The index does not reference any expressions or virtual columns
+ ** so no translations are needed. */
+ return;
+ }
+ pTab = pIdx->pTable;
+ memset(&w, 0, sizeof(w));
+ w.u.pIdxTrans = &x;
+ x.iTabCur = iTabCur;
+ x.iIdxCur = iIdxCur;
+ x.pWInfo = pWInfo;
+ x.db = pWInfo->pParse->db;
+ for(iIdxCol=0; iIdxCol<pIdx->nColumn; iIdxCol++){
+ i16 iRef = pIdx->aiColumn[iIdxCol];
+ if( iRef==XN_EXPR ){
+ assert( aColExpr->a[iIdxCol].pExpr!=0 );
+ x.pIdxExpr = aColExpr->a[iIdxCol].pExpr;
+ if( sqlite3ExprIsConstant(x.pIdxExpr) ) continue;
+ w.xExprCallback = whereIndexExprTransNode;
+#ifndef SQLITE_OMIT_GENERATED_COLUMNS
+ }else if( iRef>=0
+ && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0
+ && (pTab->aCol[iRef].zColl==0
+ || sqlite3StrICmp(pTab->aCol[iRef].zColl, sqlite3StrBINARY)==0)
+ ){
+ /* Check to see if there are direct references to generated columns
+ ** that are contained in the index. Pulling the generated column
+ ** out of the index is an optimization only - the main table is always
+ ** available if the index cannot be used. To avoid unnecessary
+ ** complication, omit this optimization if the collating sequence for
+ ** the column is non-standard */
+ x.iTabCol = iRef;
+ w.xExprCallback = whereIndexExprTransColumn;
+#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
+ }else{
+ continue;
+ }
+ x.iIdxCol = iIdxCol;
+ sqlite3WalkExpr(&w, pWInfo->pWhere);
+ sqlite3WalkExprList(&w, pWInfo->pOrderBy);
+ sqlite3WalkExprList(&w, pWInfo->pResultSet);
+ }
+}
+
+/*
+** The pTruth expression is always true because it is the WHERE clause
+** a partial index that is driving a query loop. Look through all of the
+** WHERE clause terms on the query, and if any of those terms must be
+** true because pTruth is true, then mark those WHERE clause terms as
+** coded.
+*/
+static void whereApplyPartialIndexConstraints(
+ Expr *pTruth,
+ int iTabCur,
+ WhereClause *pWC
+){
+ int i;
+ WhereTerm *pTerm;
+ while( pTruth->op==TK_AND ){
+ whereApplyPartialIndexConstraints(pTruth->pLeft, iTabCur, pWC);
+ pTruth = pTruth->pRight;
+ }
+ for(i=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
+ Expr *pExpr;
+ if( pTerm->wtFlags & TERM_CODED ) continue;
+ pExpr = pTerm->pExpr;
+ if( sqlite3ExprCompare(0, pExpr, pTruth, iTabCur)==0 ){
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ }
+}
+
+/*
+** Generate code for the start of the iLevel-th loop in the WHERE clause
+** implementation described by pWInfo.
+*/
+SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
+ Parse *pParse, /* Parsing context */
+ Vdbe *v, /* Prepared statement under construction */
+ WhereInfo *pWInfo, /* Complete information about the WHERE clause */
+ int iLevel, /* Which level of pWInfo->a[] should be coded */
+ WhereLevel *pLevel, /* The current level pointer */
+ Bitmask notReady /* Which tables are currently available */
+){
+ int j, k; /* Loop counters */
+ int iCur; /* The VDBE cursor for the table */
+ int addrNxt; /* Where to jump to continue with the next IN case */
+ int bRev; /* True if we need to scan in reverse order */
+ WhereLoop *pLoop; /* The WhereLoop object being coded */
+ WhereClause *pWC; /* Decomposition of the entire WHERE clause */
+ WhereTerm *pTerm; /* A WHERE clause term */
+ sqlite3 *db; /* Database connection */
+ struct SrcList_item *pTabItem; /* FROM clause term being coded */
+ int addrBrk; /* Jump here to break out of the loop */
+ int addrHalt; /* addrBrk for the outermost loop */
+ int addrCont; /* Jump here to continue with next cycle */
+ int iRowidReg = 0; /* Rowid is stored in this register, if not zero */
+ int iReleaseReg = 0; /* Temp register to free before returning */
+ Index *pIdx = 0; /* Index used by loop (if any) */
+ int iLoop; /* Iteration of constraint generator loop */
+
+ pWC = &pWInfo->sWC;
+ db = pParse->db;
+ pLoop = pLevel->pWLoop;
+ pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
+ iCur = pTabItem->iCursor;
+ pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur);
+ bRev = (pWInfo->revMask>>iLevel)&1;
+ VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName));
+#if WHERETRACE_ENABLED /* 0x20800 */
+ if( sqlite3WhereTrace & 0x800 ){
+ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n",
+ iLevel, pWInfo->nLevel, (u64)notReady, pLevel->iFrom);
+ sqlite3WhereLoopPrint(pLoop, pWC);
+ }
+ if( sqlite3WhereTrace & 0x20000 ){
+ if( iLevel==0 ){
+ sqlite3DebugPrintf("WHERE clause being coded:\n");
+ sqlite3TreeViewExpr(0, pWInfo->pWhere, 0);
+ }
+ sqlite3DebugPrintf("All WHERE-clause terms before coding:\n");
+ sqlite3WhereClausePrint(pWC);
+ }
+#endif
+
+ /* Create labels for the "break" and "continue" instructions
+ ** for the current loop. Jump to addrBrk to break out of a loop.
+ ** Jump to cont to go immediately to the next iteration of the
+ ** loop.
+ **
+ ** When there is an IN operator, we also have a "addrNxt" label that
+ ** means to continue with the next IN value combination. When
+ ** there are no IN operators in the constraints, the "addrNxt" label
+ ** is the same as "addrBrk".
+ */
+ addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse);
+ addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(pParse);
+
+ /* If this is the right table of a LEFT OUTER JOIN, allocate and
+ ** initialize a memory cell that records if this table matches any
+ ** row of the left table of the join.
+ */
+ assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
+ || pLevel->iFrom>0 || (pTabItem[0].fg.jointype & JT_LEFT)==0
+ );
+ if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){
+ pLevel->iLeftJoin = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin);
+ VdbeComment((v, "init LEFT JOIN no-match flag"));
+ }
+
+ /* Compute a safe address to jump to if we discover that the table for
+ ** this loop is empty and can never contribute content. */
+ for(j=iLevel; j>0 && pWInfo->a[j].iLeftJoin==0; j--){}
+ addrHalt = pWInfo->a[j].addrBrk;
+
+ /* Special case of a FROM clause subquery implemented as a co-routine */
+ if( pTabItem->fg.viaCoroutine ){
+ int regYield = pTabItem->regReturn;
+ sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
+ pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk);
+ VdbeCoverage(v);
+ VdbeComment((v, "next row of %s", pTabItem->pTab->zName));
+ pLevel->op = OP_Goto;
+ }else
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){
+ /* Case 1: The table is a virtual-table. Use the VFilter and VNext
+ ** to access the data.
+ */
+ int iReg; /* P3 Value for OP_VFilter */
+ int addrNotFound;
+ int nConstraint = pLoop->nLTerm;
+ int iIn; /* Counter for IN constraints */
+
+ iReg = sqlite3GetTempRange(pParse, nConstraint+2);
+ addrNotFound = pLevel->addrBrk;
+ for(j=0; j<nConstraint; j++){
+ int iTarget = iReg+j+2;
+ pTerm = pLoop->aLTerm[j];
+ if( NEVER(pTerm==0) ) continue;
+ if( pTerm->eOperator & WO_IN ){
+ codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
+ addrNotFound = pLevel->addrNxt;
+ }else{
+ Expr *pRight = pTerm->pExpr->pRight;
+ codeExprOrVector(pParse, pRight, iTarget, 1);
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg);
+ sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1);
+ sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg,
+ pLoop->u.vtab.idxStr,
+ pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC);
+ VdbeCoverage(v);
+ pLoop->u.vtab.needFree = 0;
+ pLevel->p1 = iCur;
+ pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext;
+ pLevel->p2 = sqlite3VdbeCurrentAddr(v);
+ iIn = pLevel->u.in.nIn;
+ for(j=nConstraint-1; j>=0; j--){
+ pTerm = pLoop->aLTerm[j];
+ if( (pTerm->eOperator & WO_IN)!=0 ) iIn--;
+ if( j<16 && (pLoop->u.vtab.omitMask>>j)&1 ){
+ disableTerm(pLevel, pTerm);
+ }else if( (pTerm->eOperator & WO_IN)!=0
+ && sqlite3ExprVectorSize(pTerm->pExpr->pLeft)==1
+ ){
+ Expr *pCompare; /* The comparison operator */
+ Expr *pRight; /* RHS of the comparison */
+ VdbeOp *pOp; /* Opcode to access the value of the IN constraint */
+
+ /* Reload the constraint value into reg[iReg+j+2]. The same value
+ ** was loaded into the same register prior to the OP_VFilter, but
+ ** the xFilter implementation might have changed the datatype or
+ ** encoding of the value in the register, so it *must* be reloaded. */
+ assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed );
+ if( !db->mallocFailed ){
+ assert( iIn>=0 && iIn<pLevel->u.in.nIn );
+ pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[iIn].addrInTop);
+ assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid );
+ assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 );
+ assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 );
+ testcase( pOp->opcode==OP_Rowid );
+ sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3);
+ }
+
+ /* Generate code that will continue to the next row if
+ ** the IN constraint is not satisfied */
+ pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0);
+ assert( pCompare!=0 || db->mallocFailed );
+ if( pCompare ){
+ pCompare->pLeft = pTerm->pExpr->pLeft;
+ pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0);
+ if( pRight ){
+ pRight->iTable = iReg+j+2;
+ sqlite3ExprIfFalse(
+ pParse, pCompare, pLevel->addrCont, SQLITE_JUMPIFNULL
+ );
+ }
+ pCompare->pLeft = 0;
+ sqlite3ExprDelete(db, pCompare);
+ }
+ }
+ }
+ assert( iIn==0 || db->mallocFailed );
+ /* These registers need to be preserved in case there is an IN operator
+ ** loop. So we could deallocate the registers here (and potentially
+ ** reuse them later) if (pLoop->wsFlags & WHERE_IN_ABLE)==0. But it seems
+ ** simpler and safer to simply not reuse the registers.
+ **
+ ** sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
+ */
+ }else
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+ if( (pLoop->wsFlags & WHERE_IPK)!=0
+ && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0
+ ){
+ /* Case 2: We can directly reference a single row using an
+ ** equality comparison against the ROWID field. Or
+ ** we reference multiple rows using a "rowid IN (...)"
+ ** construct.
+ */
+ assert( pLoop->u.btree.nEq==1 );
+ pTerm = pLoop->aLTerm[0];
+ assert( pTerm!=0 );
+ assert( pTerm->pExpr!=0 );
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ iReleaseReg = ++pParse->nMem;
+ iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg);
+ if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg);
+ addrNxt = pLevel->addrNxt;
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg);
+ VdbeCoverage(v);
+ pLevel->op = OP_Noop;
+ if( (pTerm->prereqAll & pLevel->notReady)==0 ){
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ }else if( (pLoop->wsFlags & WHERE_IPK)!=0
+ && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0
+ ){
+ /* Case 3: We have an inequality comparison against the ROWID field.
+ */
+ int testOp = OP_Noop;
+ int start;
+ int memEndValue = 0;
+ WhereTerm *pStart, *pEnd;
+
+ j = 0;
+ pStart = pEnd = 0;
+ if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++];
+ if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++];
+ assert( pStart!=0 || pEnd!=0 );
+ if( bRev ){
+ pTerm = pStart;
+ pStart = pEnd;
+ pEnd = pTerm;
+ }
+ codeCursorHint(pTabItem, pWInfo, pLevel, pEnd);
+ if( pStart ){
+ Expr *pX; /* The expression that defines the start bound */
+ int r1, rTemp; /* Registers for holding the start boundary */
+ int op; /* Cursor seek operation */
+
+ /* The following constant maps TK_xx codes into corresponding
+ ** seek opcodes. It depends on a particular ordering of TK_xx
+ */
+ const u8 aMoveOp[] = {
+ /* TK_GT */ OP_SeekGT,
+ /* TK_LE */ OP_SeekLE,
+ /* TK_LT */ OP_SeekLT,
+ /* TK_GE */ OP_SeekGE
+ };
+ assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
+ assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
+ assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
+
+ assert( (pStart->wtFlags & TERM_VNULL)==0 );
+ testcase( pStart->wtFlags & TERM_VIRTUAL );
+ pX = pStart->pExpr;
+ assert( pX!=0 );
+ testcase( pStart->leftCursor!=iCur ); /* transitive constraints */
+ if( sqlite3ExprIsVector(pX->pRight) ){
+ r1 = rTemp = sqlite3GetTempReg(pParse);
+ codeExprOrVector(pParse, pX->pRight, r1, 1);
+ testcase( pX->op==TK_GT );
+ testcase( pX->op==TK_GE );
+ testcase( pX->op==TK_LT );
+ testcase( pX->op==TK_LE );
+ op = aMoveOp[((pX->op - TK_GT - 1) & 0x3) | 0x1];
+ assert( pX->op!=TK_GT || op==OP_SeekGE );
+ assert( pX->op!=TK_GE || op==OP_SeekGE );
+ assert( pX->op!=TK_LT || op==OP_SeekLE );
+ assert( pX->op!=TK_LE || op==OP_SeekLE );
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
+ disableTerm(pLevel, pStart);
+ op = aMoveOp[(pX->op - TK_GT)];
+ }
+ sqlite3VdbeAddOp3(v, op, iCur, addrBrk, r1);
+ VdbeComment((v, "pk"));
+ VdbeCoverageIf(v, pX->op==TK_GT);
+ VdbeCoverageIf(v, pX->op==TK_LE);
+ VdbeCoverageIf(v, pX->op==TK_LT);
+ VdbeCoverageIf(v, pX->op==TK_GE);
+ sqlite3ReleaseTempReg(pParse, rTemp);
+ }else{
+ sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrHalt);
+ VdbeCoverageIf(v, bRev==0);
+ VdbeCoverageIf(v, bRev!=0);
+ }
+ if( pEnd ){
+ Expr *pX;
+ pX = pEnd->pExpr;
+ assert( pX!=0 );
+ assert( (pEnd->wtFlags & TERM_VNULL)==0 );
+ testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */
+ testcase( pEnd->wtFlags & TERM_VIRTUAL );
+ memEndValue = ++pParse->nMem;
+ codeExprOrVector(pParse, pX->pRight, memEndValue, 1);
+ if( 0==sqlite3ExprIsVector(pX->pRight)
+ && (pX->op==TK_LT || pX->op==TK_GT)
+ ){
+ testOp = bRev ? OP_Le : OP_Ge;
+ }else{
+ testOp = bRev ? OP_Lt : OP_Gt;
+ }
+ if( 0==sqlite3ExprIsVector(pX->pRight) ){
+ disableTerm(pLevel, pEnd);
+ }
+ }
+ start = sqlite3VdbeCurrentAddr(v);
+ pLevel->op = bRev ? OP_Prev : OP_Next;
+ pLevel->p1 = iCur;
+ pLevel->p2 = start;
+ assert( pLevel->p5==0 );
+ if( testOp!=OP_Noop ){
+ iRowidReg = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg);
+ sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg);
+ VdbeCoverageIf(v, testOp==OP_Le);
+ VdbeCoverageIf(v, testOp==OP_Lt);
+ VdbeCoverageIf(v, testOp==OP_Ge);
+ VdbeCoverageIf(v, testOp==OP_Gt);
+ sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL);
+ }
+ }else if( pLoop->wsFlags & WHERE_INDEXED ){
+ /* Case 4: A scan using an index.
+ **
+ ** The WHERE clause may contain zero or more equality
+ ** terms ("==" or "IN" operators) that refer to the N
+ ** left-most columns of the index. It may also contain
+ ** inequality constraints (>, <, >= or <=) on the indexed
+ ** column that immediately follows the N equalities. Only
+ ** the right-most column can be an inequality - the rest must
+ ** use the "==" and "IN" operators. For example, if the
+ ** index is on (x,y,z), then the following clauses are all
+ ** optimized:
+ **
+ ** x=5
+ ** x=5 AND y=10
+ ** x=5 AND y<10
+ ** x=5 AND y>5 AND y<10
+ ** x=5 AND y=5 AND z<=10
+ **
+ ** The z<10 term of the following cannot be used, only
+ ** the x=5 term:
+ **
+ ** x=5 AND z<10
+ **
+ ** N may be zero if there are inequality constraints.
+ ** If there are no inequality constraints, then N is at
+ ** least one.
+ **
+ ** This case is also used when there are no WHERE clause
+ ** constraints but an index is selected anyway, in order
+ ** to force the output order to conform to an ORDER BY.
+ */
+ static const u8 aStartOp[] = {
+ 0,
+ 0,
+ OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */
+ OP_Last, /* 3: (!start_constraints && startEq && bRev) */
+ OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */
+ OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */
+ OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */
+ OP_SeekLE /* 7: (start_constraints && startEq && bRev) */
+ };
+ static const u8 aEndOp[] = {
+ OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */
+ OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */
+ OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */
+ OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */
+ };
+ u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */
+ u16 nBtm = pLoop->u.btree.nBtm; /* Length of BTM vector */
+ u16 nTop = pLoop->u.btree.nTop; /* Length of TOP vector */
+ int regBase; /* Base register holding constraint values */
+ WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */
+ WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */
+ int startEq; /* True if range start uses ==, >= or <= */
+ int endEq; /* True if range end uses ==, >= or <= */
+ int start_constraints; /* Start of range is constrained */
+ int nConstraint; /* Number of constraint terms */
+ int iIdxCur; /* The VDBE cursor for the index */
+ int nExtraReg = 0; /* Number of extra registers needed */
+ int op; /* Instruction opcode */
+ char *zStartAff; /* Affinity for start of range constraint */
+ char *zEndAff = 0; /* Affinity for end of range constraint */
+ u8 bSeekPastNull = 0; /* True to seek past initial nulls */
+ u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
+ int omitTable; /* True if we use the index only */
+ int regBignull = 0; /* big-null flag register */
+
+ pIdx = pLoop->u.btree.pIndex;
+ iIdxCur = pLevel->iIdxCur;
+ assert( nEq>=pLoop->nSkip );
+
+ /* Find any inequality constraint terms for the start and end
+ ** of the range.
+ */
+ j = nEq;
+ if( pLoop->wsFlags & WHERE_BTM_LIMIT ){
+ pRangeStart = pLoop->aLTerm[j++];
+ nExtraReg = MAX(nExtraReg, pLoop->u.btree.nBtm);
+ /* Like optimization range constraints always occur in pairs */
+ assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 ||
+ (pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 );
+ }
+ if( pLoop->wsFlags & WHERE_TOP_LIMIT ){
+ pRangeEnd = pLoop->aLTerm[j++];
+ nExtraReg = MAX(nExtraReg, pLoop->u.btree.nTop);
+#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){
+ assert( pRangeStart!=0 ); /* LIKE opt constraints */
+ assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */
+ pLevel->iLikeRepCntr = (u32)++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, (int)pLevel->iLikeRepCntr);
+ VdbeComment((v, "LIKE loop counter"));
+ pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v);
+ /* iLikeRepCntr actually stores 2x the counter register number. The
+ ** bottom bit indicates whether the search order is ASC or DESC. */
+ testcase( bRev );
+ testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC );
+ assert( (bRev & ~1)==0 );
+ pLevel->iLikeRepCntr <<=1;
+ pLevel->iLikeRepCntr |= bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC);
+ }
+#endif
+ if( pRangeStart==0 ){
+ j = pIdx->aiColumn[nEq];
+ if( (j>=0 && pIdx->pTable->aCol[j].notNull==0) || j==XN_EXPR ){
+ bSeekPastNull = 1;
+ }
+ }
+ }
+ assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 );
+
+ /* If the WHERE_BIGNULL_SORT flag is set, then index column nEq uses
+ ** a non-default "big-null" sort (either ASC NULLS LAST or DESC NULLS
+ ** FIRST). In both cases separate ordered scans are made of those
+ ** index entries for which the column is null and for those for which
+ ** it is not. For an ASC sort, the non-NULL entries are scanned first.
+ ** For DESC, NULL entries are scanned first.
+ */
+ if( (pLoop->wsFlags & (WHERE_TOP_LIMIT|WHERE_BTM_LIMIT))==0
+ && (pLoop->wsFlags & WHERE_BIGNULL_SORT)!=0
+ ){
+ assert( bSeekPastNull==0 && nExtraReg==0 && nBtm==0 && nTop==0 );
+ assert( pRangeEnd==0 && pRangeStart==0 );
+ testcase( pLoop->nSkip>0 );
+ nExtraReg = 1;
+ bSeekPastNull = 1;
+ pLevel->regBignull = regBignull = ++pParse->nMem;
+ if( pLevel->iLeftJoin ){
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regBignull);
+ }
+ pLevel->addrBignull = sqlite3VdbeMakeLabel(pParse);
+ }
+
+ /* If we are doing a reverse order scan on an ascending index, or
+ ** a forward order scan on a descending index, interchange the
+ ** start and end terms (pRangeStart and pRangeEnd).
+ */
+ if( (nEq<pIdx->nKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC))
+ || (bRev && pIdx->nKeyCol==nEq)
+ ){
+ SWAP(WhereTerm *, pRangeEnd, pRangeStart);
+ SWAP(u8, bSeekPastNull, bStopAtNull);
+ SWAP(u8, nBtm, nTop);
+ }
+
+ /* Generate code to evaluate all constraint terms using == or IN
+ ** and store the values of those terms in an array of registers
+ ** starting at regBase.
+ */
+ codeCursorHint(pTabItem, pWInfo, pLevel, pRangeEnd);
+ regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff);
+ assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq );
+ if( zStartAff && nTop ){
+ zEndAff = sqlite3DbStrDup(db, &zStartAff[nEq]);
+ }
+ addrNxt = (regBignull ? pLevel->addrBignull : pLevel->addrNxt);
+
+ testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 );
+ testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 );
+ testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 );
+ testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 );
+ startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE);
+ endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE);
+ start_constraints = pRangeStart || nEq>0;
+
+ /* Seek the index cursor to the start of the range. */
+ nConstraint = nEq;
+ if( pRangeStart ){
+ Expr *pRight = pRangeStart->pExpr->pRight;
+ codeExprOrVector(pParse, pRight, regBase+nEq, nBtm);
+ whereLikeOptimizationStringFixup(v, pLevel, pRangeStart);
+ if( (pRangeStart->wtFlags & TERM_VNULL)==0
+ && sqlite3ExprCanBeNull(pRight)
+ ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
+ VdbeCoverage(v);
+ }
+ if( zStartAff ){
+ updateRangeAffinityStr(pRight, nBtm, &zStartAff[nEq]);
+ }
+ nConstraint += nBtm;
+ testcase( pRangeStart->wtFlags & TERM_VIRTUAL );
+ if( sqlite3ExprIsVector(pRight)==0 ){
+ disableTerm(pLevel, pRangeStart);
+ }else{
+ startEq = 1;
+ }
+ bSeekPastNull = 0;
+ }else if( bSeekPastNull ){
+ startEq = 0;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
+ start_constraints = 1;
+ nConstraint++;
+ }else if( regBignull ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
+ start_constraints = 1;
+ nConstraint++;
+ }
+ codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff);
+ if( pLoop->nSkip>0 && nConstraint==pLoop->nSkip ){
+ /* The skip-scan logic inside the call to codeAllEqualityConstraints()
+ ** above has already left the cursor sitting on the correct row,
+ ** so no further seeking is needed */
+ }else{
+ if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
+ sqlite3VdbeAddOp1(v, OP_SeekHit, iIdxCur);
+ }
+ if( regBignull ){
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, regBignull);
+ VdbeComment((v, "NULL-scan pass ctr"));
+ }
+
+ op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
+ assert( op!=0 );
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
+ VdbeCoverage(v);
+ VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
+ VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last );
+ VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT );
+ VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE );
+ VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE );
+ VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT );
+
+ assert( bSeekPastNull==0 || bStopAtNull==0 );
+ if( regBignull ){
+ assert( bSeekPastNull==1 || bStopAtNull==1 );
+ assert( bSeekPastNull==!bStopAtNull );
+ assert( bStopAtNull==startEq );
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+2);
+ op = aStartOp[(nConstraint>1)*4 + 2 + bRev];
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase,
+ nConstraint-startEq);
+ VdbeCoverage(v);
+ VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
+ VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last );
+ VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE );
+ VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE );
+ assert( op==OP_Rewind || op==OP_Last || op==OP_SeekGE || op==OP_SeekLE);
+ }
+ }
+
+ /* Load the value for the inequality constraint at the end of the
+ ** range (if any).
+ */
+ nConstraint = nEq;
+ if( pRangeEnd ){
+ Expr *pRight = pRangeEnd->pExpr->pRight;
+ codeExprOrVector(pParse, pRight, regBase+nEq, nTop);
+ whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd);
+ if( (pRangeEnd->wtFlags & TERM_VNULL)==0
+ && sqlite3ExprCanBeNull(pRight)
+ ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
+ VdbeCoverage(v);
+ }
+ if( zEndAff ){
+ updateRangeAffinityStr(pRight, nTop, zEndAff);
+ codeApplyAffinity(pParse, regBase+nEq, nTop, zEndAff);
+ }else{
+ assert( pParse->db->mallocFailed );
+ }
+ nConstraint += nTop;
+ testcase( pRangeEnd->wtFlags & TERM_VIRTUAL );
+
+ if( sqlite3ExprIsVector(pRight)==0 ){
+ disableTerm(pLevel, pRangeEnd);
+ }else{
+ endEq = 1;
+ }
+ }else if( bStopAtNull ){
+ if( regBignull==0 ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
+ endEq = 0;
+ }
+ nConstraint++;
+ }
+ sqlite3DbFree(db, zStartAff);
+ sqlite3DbFree(db, zEndAff);
+
+ /* Top of the loop body */
+ pLevel->p2 = sqlite3VdbeCurrentAddr(v);
+
+ /* Check if the index cursor is past the end of the range. */
+ if( nConstraint ){
+ if( regBignull ){
+ /* Except, skip the end-of-range check while doing the NULL-scan */
+ sqlite3VdbeAddOp2(v, OP_IfNot, regBignull, sqlite3VdbeCurrentAddr(v)+3);
+ VdbeComment((v, "If NULL-scan 2nd pass"));
+ VdbeCoverage(v);
+ }
+ op = aEndOp[bRev*2 + endEq];
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
+ testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
+ testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
+ testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
+ testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
+ }
+ if( regBignull ){
+ /* During a NULL-scan, check to see if we have reached the end of
+ ** the NULLs */
+ assert( bSeekPastNull==!bStopAtNull );
+ assert( bSeekPastNull+bStopAtNull==1 );
+ assert( nConstraint+bSeekPastNull>0 );
+ sqlite3VdbeAddOp2(v, OP_If, regBignull, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeComment((v, "If NULL-scan 1st pass"));
+ VdbeCoverage(v);
+ op = aEndOp[bRev*2 + bSeekPastNull];
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase,
+ nConstraint+bSeekPastNull);
+ testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
+ testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
+ testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
+ testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
+ }
+
+ if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
+ sqlite3VdbeAddOp2(v, OP_SeekHit, iIdxCur, 1);
+ }
+
+ /* Seek the table cursor, if required */
+ omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0
+ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0;
+ if( omitTable ){
+ /* pIdx is a covering index. No need to access the main table. */
+ }else if( HasRowid(pIdx->pTable) ){
+ if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE)
+ || ( (pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE)!=0
+ && (pWInfo->eOnePass==ONEPASS_SINGLE || pLoop->nLTerm==0) )
+ ){
+ iRowidReg = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg);
+ VdbeCoverage(v);
+ }else{
+ codeDeferredSeek(pWInfo, pIdx, iCur, iIdxCur);
+ }
+ }else if( iCur!=iIdxCur ){
+ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
+ iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol);
+ for(j=0; j<pPk->nKeyCol; j++){
+ k = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[j]);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j);
+ }
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont,
+ iRowidReg, pPk->nKeyCol); VdbeCoverage(v);
+ }
+
+ if( pLevel->iLeftJoin==0 ){
+ /* If pIdx is an index on one or more expressions, then look through
+ ** all the expressions in pWInfo and try to transform matching expressions
+ ** into reference to index columns. Also attempt to translate references
+ ** to virtual columns in the table into references to (stored) columns
+ ** of the index.
+ **
+ ** Do not do this for the RHS of a LEFT JOIN. This is because the
+ ** expression may be evaluated after OP_NullRow has been executed on
+ ** the cursor. In this case it is important to do the full evaluation,
+ ** as the result of the expression may not be NULL, even if all table
+ ** column values are. https://www.sqlite.org/src/info/7fa8049685b50b5a
+ **
+ ** Also, do not do this when processing one index an a multi-index
+ ** OR clause, since the transformation will become invalid once we
+ ** move forward to the next index.
+ ** https://sqlite.org/src/info/4e8e4857d32d401f
+ */
+ if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){
+ whereIndexExprTrans(pIdx, iCur, iIdxCur, pWInfo);
+ }
+
+ /* If a partial index is driving the loop, try to eliminate WHERE clause
+ ** terms from the query that must be true due to the WHERE clause of
+ ** the partial index.
+ **
+ ** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work
+ ** for a LEFT JOIN.
+ */
+ if( pIdx->pPartIdxWhere ){
+ whereApplyPartialIndexConstraints(pIdx->pPartIdxWhere, iCur, pWC);
+ }
+ }else{
+ testcase( pIdx->pPartIdxWhere );
+ /* The following assert() is not a requirement, merely an observation:
+ ** The OR-optimization doesn't work for the right hand table of
+ ** a LEFT JOIN: */
+ assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 );
+ }
+
+ /* Record the instruction used to terminate the loop. */
+ if( pLoop->wsFlags & WHERE_ONEROW ){
+ pLevel->op = OP_Noop;
+ }else if( bRev ){
+ pLevel->op = OP_Prev;
+ }else{
+ pLevel->op = OP_Next;
+ }
+ pLevel->p1 = iIdxCur;
+ pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0;
+ if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){
+ pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
+ }else{
+ assert( pLevel->p5==0 );
+ }
+ if( omitTable ) pIdx = 0;
+ }else
+
+#ifndef SQLITE_OMIT_OR_OPTIMIZATION
+ if( pLoop->wsFlags & WHERE_MULTI_OR ){
+ /* Case 5: Two or more separately indexed terms connected by OR
+ **
+ ** Example:
+ **
+ ** CREATE TABLE t1(a,b,c,d);
+ ** CREATE INDEX i1 ON t1(a);
+ ** CREATE INDEX i2 ON t1(b);
+ ** CREATE INDEX i3 ON t1(c);
+ **
+ ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13)
+ **
+ ** In the example, there are three indexed terms connected by OR.
+ ** The top of the loop looks like this:
+ **
+ ** Null 1 # Zero the rowset in reg 1
+ **
+ ** Then, for each indexed term, the following. The arguments to
+ ** RowSetTest are such that the rowid of the current row is inserted
+ ** into the RowSet. If it is already present, control skips the
+ ** Gosub opcode and jumps straight to the code generated by WhereEnd().
+ **
+ ** sqlite3WhereBegin(<term>)
+ ** RowSetTest # Insert rowid into rowset
+ ** Gosub 2 A
+ ** sqlite3WhereEnd()
+ **
+ ** Following the above, code to terminate the loop. Label A, the target
+ ** of the Gosub above, jumps to the instruction right after the Goto.
+ **
+ ** Null 1 # Zero the rowset in reg 1
+ ** Goto B # The loop is finished.
+ **
+ ** A: <loop body> # Return data, whatever.
+ **
+ ** Return 2 # Jump back to the Gosub
+ **
+ ** B: <after the loop>
+ **
+ ** Added 2014-05-26: If the table is a WITHOUT ROWID table, then
+ ** use an ephemeral index instead of a RowSet to record the primary
+ ** keys of the rows we have already seen.
+ **
+ */
+ WhereClause *pOrWc; /* The OR-clause broken out into subterms */
+ SrcList *pOrTab; /* Shortened table list or OR-clause generation */
+ Index *pCov = 0; /* Potential covering index (or NULL) */
+ int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */
+
+ int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */
+ int regRowset = 0; /* Register for RowSet object */
+ int regRowid = 0; /* Register holding rowid */
+ int iLoopBody = sqlite3VdbeMakeLabel(pParse);/* Start of loop body */
+ int iRetInit; /* Address of regReturn init */
+ int untestedTerms = 0; /* Some terms not completely tested */
+ int ii; /* Loop counter */
+ u16 wctrlFlags; /* Flags for sub-WHERE clause */
+ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */
+ Table *pTab = pTabItem->pTab;
+
+ pTerm = pLoop->aLTerm[0];
+ assert( pTerm!=0 );
+ assert( pTerm->eOperator & WO_OR );
+ assert( (pTerm->wtFlags & TERM_ORINFO)!=0 );
+ pOrWc = &pTerm->u.pOrInfo->wc;
+ pLevel->op = OP_Return;
+ pLevel->p1 = regReturn;
+
+ /* Set up a new SrcList in pOrTab containing the table being scanned
+ ** by this loop in the a[0] slot and all notReady tables in a[1..] slots.
+ ** This becomes the SrcList in the recursive call to sqlite3WhereBegin().
+ */
+ if( pWInfo->nLevel>1 ){
+ int nNotReady; /* The number of notReady tables */
+ struct SrcList_item *origSrc; /* Original list of tables */
+ nNotReady = pWInfo->nLevel - iLevel - 1;
+ pOrTab = sqlite3StackAllocRaw(db,
+ sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0]));
+ if( pOrTab==0 ) return notReady;
+ pOrTab->nAlloc = (u8)(nNotReady + 1);
+ pOrTab->nSrc = pOrTab->nAlloc;
+ memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem));
+ origSrc = pWInfo->pTabList->a;
+ for(k=1; k<=nNotReady; k++){
+ memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k]));
+ }
+ }else{
+ pOrTab = pWInfo->pTabList;
+ }
+
+ /* Initialize the rowset register to contain NULL. An SQL NULL is
+ ** equivalent to an empty rowset. Or, create an ephemeral index
+ ** capable of holding primary keys in the case of a WITHOUT ROWID.
+ **
+ ** Also initialize regReturn to contain the address of the instruction
+ ** immediately following the OP_Return at the bottom of the loop. This
+ ** is required in a few obscure LEFT JOIN cases where control jumps
+ ** over the top of the loop into the body of it. In this case the
+ ** correct response for the end-of-loop code (the OP_Return) is to
+ ** fall through to the next instruction, just as an OP_Next does if
+ ** called on an uninitialized cursor.
+ */
+ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
+ if( HasRowid(pTab) ){
+ regRowset = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset);
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ regRowset = pParse->nTab++;
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol);
+ sqlite3VdbeSetP4KeyInfo(pParse, pPk);
+ }
+ regRowid = ++pParse->nMem;
+ }
+ iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn);
+
+ /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y
+ ** Then for every term xN, evaluate as the subexpression: xN AND z
+ ** That way, terms in y that are factored into the disjunction will
+ ** be picked up by the recursive calls to sqlite3WhereBegin() below.
+ **
+ ** Actually, each subexpression is converted to "xN AND w" where w is
+ ** the "interesting" terms of z - terms that did not originate in the
+ ** ON or USING clause of a LEFT JOIN, and terms that are usable as
+ ** indices.
+ **
+ ** This optimization also only applies if the (x1 OR x2 OR ...) term
+ ** is not contained in the ON clause of a LEFT JOIN.
+ ** See ticket http://www.sqlite.org/src/info/f2369304e4
+ */
+ if( pWC->nTerm>1 ){
+ int iTerm;
+ for(iTerm=0; iTerm<pWC->nTerm; iTerm++){
+ Expr *pExpr = pWC->a[iTerm].pExpr;
+ if( &pWC->a[iTerm] == pTerm ) continue;
+ testcase( pWC->a[iTerm].wtFlags & TERM_VIRTUAL );
+ testcase( pWC->a[iTerm].wtFlags & TERM_CODED );
+ if( (pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_CODED))!=0 ) continue;
+ if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue;
+ testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO );
+ pExpr = sqlite3ExprDup(db, pExpr, 0);
+ pAndExpr = sqlite3ExprAnd(pParse, pAndExpr, pExpr);
+ }
+ if( pAndExpr ){
+ /* The extra 0x10000 bit on the opcode is masked off and does not
+ ** become part of the new Expr.op. However, it does make the
+ ** op==TK_AND comparison inside of sqlite3PExpr() false, and this
+ ** prevents sqlite3PExpr() from implementing AND short-circuit
+ ** optimization, which we do not want here. */
+ pAndExpr = sqlite3PExpr(pParse, TK_AND|0x10000, 0, pAndExpr);
+ }
+ }
+
+ /* Run a separate WHERE clause for each term of the OR clause. After
+ ** eliminating duplicates from other WHERE clauses, the action for each
+ ** sub-WHERE clause is to to invoke the main loop body as a subroutine.
+ */
+ wctrlFlags = WHERE_OR_SUBCLAUSE | (pWInfo->wctrlFlags & WHERE_SEEK_TABLE);
+ ExplainQueryPlan((pParse, 1, "MULTI-INDEX OR"));
+ for(ii=0; ii<pOrWc->nTerm; ii++){
+ WhereTerm *pOrTerm = &pOrWc->a[ii];
+ if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){
+ WhereInfo *pSubWInfo; /* Info for single OR-term scan */
+ Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */
+ int jmp1 = 0; /* Address of jump operation */
+ testcase( (pTabItem[0].fg.jointype & JT_LEFT)!=0
+ && !ExprHasProperty(pOrExpr, EP_FromJoin)
+ ); /* See TH3 vtab25.400 and ticket 614b25314c766238 */
+ if( pAndExpr ){
+ pAndExpr->pLeft = pOrExpr;
+ pOrExpr = pAndExpr;
+ }
+ /* Loop through table entries that match term pOrTerm. */
+ ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1));
+ WHERETRACE(0xffff, ("Subplan for OR-clause:\n"));
+ pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0,
+ wctrlFlags, iCovCur);
+ assert( pSubWInfo || pParse->nErr || db->mallocFailed );
+ if( pSubWInfo ){
+ WhereLoop *pSubLoop;
+ int addrExplain = sqlite3WhereExplainOneScan(
+ pParse, pOrTab, &pSubWInfo->a[0], 0
+ );
+ sqlite3WhereAddScanStatus(v, pOrTab, &pSubWInfo->a[0], addrExplain);
+
+ /* This is the sub-WHERE clause body. First skip over
+ ** duplicate rows from prior sub-WHERE clauses, and record the
+ ** rowid (or PRIMARY KEY) for the current row so that the same
+ ** row will be skipped in subsequent sub-WHERE clauses.
+ */
+ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
+ int iSet = ((ii==pOrWc->nTerm-1)?-1:ii);
+ if( HasRowid(pTab) ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, regRowid);
+ jmp1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0,
+ regRowid, iSet);
+ VdbeCoverage(v);
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ int nPk = pPk->nKeyCol;
+ int iPk;
+ int r;
+
+ /* Read the PK into an array of temp registers. */
+ r = sqlite3GetTempRange(pParse, nPk);
+ for(iPk=0; iPk<nPk; iPk++){
+ int iCol = pPk->aiColumn[iPk];
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, iCol,r+iPk);
+ }
+
+ /* Check if the temp table already contains this key. If so,
+ ** the row has already been included in the result set and
+ ** can be ignored (by jumping past the Gosub below). Otherwise,
+ ** insert the key into the temp table and proceed with processing
+ ** the row.
+ **
+ ** Use some of the same optimizations as OP_RowSetTest: If iSet
+ ** is zero, assume that the key cannot already be present in
+ ** the temp table. And if iSet is -1, assume that there is no
+ ** need to insert the key into the temp table, as it will never
+ ** be tested for. */
+ if( iSet ){
+ jmp1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk);
+ VdbeCoverage(v);
+ }
+ if( iSet>=0 ){
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, regRowset, regRowid,
+ r, nPk);
+ if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ }
+
+ /* Release the array of temp registers */
+ sqlite3ReleaseTempRange(pParse, r, nPk);
+ }
+ }
+
+ /* Invoke the main loop body as a subroutine */
+ sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody);
+
+ /* Jump here (skipping the main loop body subroutine) if the
+ ** current sub-WHERE row is a duplicate from prior sub-WHEREs. */
+ if( jmp1 ) sqlite3VdbeJumpHere(v, jmp1);
+
+ /* The pSubWInfo->untestedTerms flag means that this OR term
+ ** contained one or more AND term from a notReady table. The
+ ** terms from the notReady table could not be tested and will
+ ** need to be tested later.
+ */
+ if( pSubWInfo->untestedTerms ) untestedTerms = 1;
+
+ /* If all of the OR-connected terms are optimized using the same
+ ** index, and the index is opened using the same cursor number
+ ** by each call to sqlite3WhereBegin() made by this loop, it may
+ ** be possible to use that index as a covering index.
+ **
+ ** If the call to sqlite3WhereBegin() above resulted in a scan that
+ ** uses an index, and this is either the first OR-connected term
+ ** processed or the index is the same as that used by all previous
+ ** terms, set pCov to the candidate covering index. Otherwise, set
+ ** pCov to NULL to indicate that no candidate covering index will
+ ** be available.
+ */
+ pSubLoop = pSubWInfo->a[0].pWLoop;
+ assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
+ if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0
+ && (ii==0 || pSubLoop->u.btree.pIndex==pCov)
+ && (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex))
+ ){
+ assert( pSubWInfo->a[0].iIdxCur==iCovCur );
+ pCov = pSubLoop->u.btree.pIndex;
+ }else{
+ pCov = 0;
+ }
+
+ /* Finish the loop through table entries that match term pOrTerm. */
+ sqlite3WhereEnd(pSubWInfo);
+ ExplainQueryPlanPop(pParse);
+ }
+ }
+ }
+ ExplainQueryPlanPop(pParse);
+ pLevel->u.pCovidx = pCov;
+ if( pCov ) pLevel->iIdxCur = iCovCur;
+ if( pAndExpr ){
+ pAndExpr->pLeft = 0;
+ sqlite3ExprDelete(db, pAndExpr);
+ }
+ sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v));
+ sqlite3VdbeGoto(v, pLevel->addrBrk);
+ sqlite3VdbeResolveLabel(v, iLoopBody);
+
+ if( pWInfo->nLevel>1 ){ sqlite3StackFree(db, pOrTab); }
+ if( !untestedTerms ) disableTerm(pLevel, pTerm);
+ }else
+#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
+
+ {
+ /* Case 6: There is no usable index. We must do a complete
+ ** scan of the entire table.
+ */
+ static const u8 aStep[] = { OP_Next, OP_Prev };
+ static const u8 aStart[] = { OP_Rewind, OP_Last };
+ assert( bRev==0 || bRev==1 );
+ if( pTabItem->fg.isRecursive ){
+ /* Tables marked isRecursive have only a single row that is stored in
+ ** a pseudo-cursor. No need to Rewind or Next such cursors. */
+ pLevel->op = OP_Noop;
+ }else{
+ codeCursorHint(pTabItem, pWInfo, pLevel, 0);
+ pLevel->op = aStep[bRev];
+ pLevel->p1 = iCur;
+ pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrHalt);
+ VdbeCoverageIf(v, bRev==0);
+ VdbeCoverageIf(v, bRev!=0);
+ pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
+ }
+ }
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ pLevel->addrVisit = sqlite3VdbeCurrentAddr(v);
+#endif
+
+ /* Insert code to test every subexpression that can be completely
+ ** computed using the current set of tables.
+ **
+ ** This loop may run between one and three times, depending on the
+ ** constraints to be generated. The value of stack variable iLoop
+ ** determines the constraints coded by each iteration, as follows:
+ **
+ ** iLoop==1: Code only expressions that are entirely covered by pIdx.
+ ** iLoop==2: Code remaining expressions that do not contain correlated
+ ** sub-queries.
+ ** iLoop==3: Code all remaining expressions.
+ **
+ ** An effort is made to skip unnecessary iterations of the loop.
+ */
+ iLoop = (pIdx ? 1 : 2);
+ do{
+ int iNext = 0; /* Next value for iLoop */
+ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
+ Expr *pE;
+ int skipLikeAddr = 0;
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ testcase( pTerm->wtFlags & TERM_CODED );
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
+ testcase( pWInfo->untestedTerms==0
+ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 );
+ pWInfo->untestedTerms = 1;
+ continue;
+ }
+ pE = pTerm->pExpr;
+ assert( pE!=0 );
+ if( (pTabItem->fg.jointype&JT_LEFT) && !ExprHasProperty(pE,EP_FromJoin) ){
+ continue;
+ }
+
+ if( iLoop==1 && !sqlite3ExprCoveredByIndex(pE, pLevel->iTabCur, pIdx) ){
+ iNext = 2;
+ continue;
+ }
+ if( iLoop<3 && (pTerm->wtFlags & TERM_VARSELECT) ){
+ if( iNext==0 ) iNext = 3;
+ continue;
+ }
+
+ if( (pTerm->wtFlags & TERM_LIKECOND)!=0 ){
+ /* If the TERM_LIKECOND flag is set, that means that the range search
+ ** is sufficient to guarantee that the LIKE operator is true, so we
+ ** can skip the call to the like(A,B) function. But this only works
+ ** for strings. So do not skip the call to the function on the pass
+ ** that compares BLOBs. */
+#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ continue;
+#else
+ u32 x = pLevel->iLikeRepCntr;
+ if( x>0 ){
+ skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)?OP_IfNot:OP_If,(int)(x>>1));
+ VdbeCoverageIf(v, (x&1)==1);
+ VdbeCoverageIf(v, (x&1)==0);
+ }
+#endif
+ }
+#ifdef WHERETRACE_ENABLED /* 0xffff */
+ if( sqlite3WhereTrace ){
+ VdbeNoopComment((v, "WhereTerm[%d] (%p) priority=%d",
+ pWC->nTerm-j, pTerm, iLoop));
+ }
+ if( sqlite3WhereTrace & 0x800 ){
+ sqlite3DebugPrintf("Coding auxiliary constraint:\n");
+ sqlite3WhereTermPrint(pTerm, pWC->nTerm-j);
+ }
+#endif
+ sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL);
+ if( skipLikeAddr ) sqlite3VdbeJumpHere(v, skipLikeAddr);
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ iLoop = iNext;
+ }while( iLoop>0 );
+
+ /* Insert code to test for implied constraints based on transitivity
+ ** of the "==" operator.
+ **
+ ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123"
+ ** and we are coding the t1 loop and the t2 loop has not yet coded,
+ ** then we cannot use the "t1.a=t2.b" constraint, but we can code
+ ** the implied "t1.a=123" constraint.
+ */
+ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
+ Expr *pE, sEAlt;
+ WhereTerm *pAlt;
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue;
+ if( (pTerm->eOperator & WO_EQUIV)==0 ) continue;
+ if( pTerm->leftCursor!=iCur ) continue;
+ if( pTabItem->fg.jointype & JT_LEFT ) continue;
+ pE = pTerm->pExpr;
+#ifdef WHERETRACE_ENABLED /* 0x800 */
+ if( sqlite3WhereTrace & 0x800 ){
+ sqlite3DebugPrintf("Coding transitive constraint:\n");
+ sqlite3WhereTermPrint(pTerm, pWC->nTerm-j);
+ }
+#endif
+ assert( !ExprHasProperty(pE, EP_FromJoin) );
+ assert( (pTerm->prereqRight & pLevel->notReady)!=0 );
+ pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady,
+ WO_EQ|WO_IN|WO_IS, 0);
+ if( pAlt==0 ) continue;
+ if( pAlt->wtFlags & (TERM_CODED) ) continue;
+ if( (pAlt->eOperator & WO_IN)
+ && (pAlt->pExpr->flags & EP_xIsSelect)
+ && (pAlt->pExpr->x.pSelect->pEList->nExpr>1)
+ ){
+ continue;
+ }
+ testcase( pAlt->eOperator & WO_EQ );
+ testcase( pAlt->eOperator & WO_IS );
+ testcase( pAlt->eOperator & WO_IN );
+ VdbeModuleComment((v, "begin transitive constraint"));
+ sEAlt = *pAlt->pExpr;
+ sEAlt.pLeft = pE->pLeft;
+ sqlite3ExprIfFalse(pParse, &sEAlt, addrCont, SQLITE_JUMPIFNULL);
+ }
+
+ /* For a LEFT OUTER JOIN, generate code that will record the fact that
+ ** at least one row of the right table has matched the left table.
+ */
+ if( pLevel->iLeftJoin ){
+ pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
+ VdbeComment((v, "record LEFT JOIN hit"));
+ for(pTerm=pWC->a, j=0; j<pWC->nTerm; j++, pTerm++){
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ testcase( pTerm->wtFlags & TERM_CODED );
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
+ assert( pWInfo->untestedTerms );
+ continue;
+ }
+ assert( pTerm->pExpr );
+ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL);
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ }
+
+#if WHERETRACE_ENABLED /* 0x20800 */
+ if( sqlite3WhereTrace & 0x20000 ){
+ sqlite3DebugPrintf("All WHERE-clause terms after coding level %d:\n",
+ iLevel);
+ sqlite3WhereClausePrint(pWC);
+ }
+ if( sqlite3WhereTrace & 0x800 ){
+ sqlite3DebugPrintf("End Coding level %d: notReady=%llx\n",
+ iLevel, (u64)pLevel->notReady);
+ }
+#endif
+ return pLevel->notReady;
+}
+
+/************** End of wherecode.c *******************************************/
+/************** Begin file whereexpr.c ***************************************/
+/*
+** 2015-06-08
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This module contains C code that generates VDBE code used to process
+** the WHERE clause of SQL statements.
+**
+** This file was originally part of where.c but was split out to improve
+** readability and editabiliity. This file contains utility routines for
+** analyzing Expr objects in the WHERE clause.
+*/
+/* #include "sqliteInt.h" */
+/* #include "whereInt.h" */
+
+/* Forward declarations */
+static void exprAnalyze(SrcList*, WhereClause*, int);
+
+/*
+** Deallocate all memory associated with a WhereOrInfo object.
+*/
+static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){
+ sqlite3WhereClauseClear(&p->wc);
+ sqlite3DbFree(db, p);
+}
+
+/*
+** Deallocate all memory associated with a WhereAndInfo object.
+*/
+static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){
+ sqlite3WhereClauseClear(&p->wc);
+ sqlite3DbFree(db, p);
+}
+
+/*
+** Add a single new WhereTerm entry to the WhereClause object pWC.
+** The new WhereTerm object is constructed from Expr p and with wtFlags.
+** The index in pWC->a[] of the new WhereTerm is returned on success.
+** 0 is returned if the new WhereTerm could not be added due to a memory
+** allocation error. The memory allocation failure will be recorded in
+** the db->mallocFailed flag so that higher-level functions can detect it.
+**
+** This routine will increase the size of the pWC->a[] array as necessary.
+**
+** If the wtFlags argument includes TERM_DYNAMIC, then responsibility
+** for freeing the expression p is assumed by the WhereClause object pWC.
+** This is true even if this routine fails to allocate a new WhereTerm.
+**
+** WARNING: This routine might reallocate the space used to store
+** WhereTerms. All pointers to WhereTerms should be invalidated after
+** calling this routine. Such pointers may be reinitialized by referencing
+** the pWC->a[] array.
+*/
+static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
+ WhereTerm *pTerm;
+ int idx;
+ testcase( wtFlags & TERM_VIRTUAL );
+ if( pWC->nTerm>=pWC->nSlot ){
+ WhereTerm *pOld = pWC->a;
+ sqlite3 *db = pWC->pWInfo->pParse->db;
+ pWC->a = sqlite3DbMallocRawNN(db, sizeof(pWC->a[0])*pWC->nSlot*2 );
+ if( pWC->a==0 ){
+ if( wtFlags & TERM_DYNAMIC ){
+ sqlite3ExprDelete(db, p);
+ }
+ pWC->a = pOld;
+ return 0;
+ }
+ memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm);
+ if( pOld!=pWC->aStatic ){
+ sqlite3DbFree(db, pOld);
+ }
+ pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]);
+ }
+ pTerm = &pWC->a[idx = pWC->nTerm++];
+ if( p && ExprHasProperty(p, EP_Unlikely) ){
+ pTerm->truthProb = sqlite3LogEst(p->iTable) - 270;
+ }else{
+ pTerm->truthProb = 1;
+ }
+ pTerm->pExpr = sqlite3ExprSkipCollateAndLikely(p);
+ pTerm->wtFlags = wtFlags;
+ pTerm->pWC = pWC;
+ pTerm->iParent = -1;
+ memset(&pTerm->eOperator, 0,
+ sizeof(WhereTerm) - offsetof(WhereTerm,eOperator));
+ return idx;
+}
+
+/*
+** Return TRUE if the given operator is one of the operators that is
+** allowed for an indexable WHERE clause term. The allowed operators are
+** "=", "<", ">", "<=", ">=", "IN", "IS", and "IS NULL"
+*/
+static int allowedOp(int op){
+ assert( TK_GT>TK_EQ && TK_GT<TK_GE );
+ assert( TK_LT>TK_EQ && TK_LT<TK_GE );
+ assert( TK_LE>TK_EQ && TK_LE<TK_GE );
+ assert( TK_GE==TK_EQ+4 );
+ return op==TK_IN || (op>=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS;
+}
+
+/*
+** Commute a comparison operator. Expressions of the form "X op Y"
+** are converted into "Y op X".
+*/
+static u16 exprCommute(Parse *pParse, Expr *pExpr){
+ if( pExpr->pLeft->op==TK_VECTOR
+ || pExpr->pRight->op==TK_VECTOR
+ || sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight) !=
+ sqlite3BinaryCompareCollSeq(pParse, pExpr->pRight, pExpr->pLeft)
+ ){
+ pExpr->flags ^= EP_Commuted;
+ }
+ SWAP(Expr*,pExpr->pRight,pExpr->pLeft);
+ if( pExpr->op>=TK_GT ){
+ assert( TK_LT==TK_GT+2 );
+ assert( TK_GE==TK_LE+2 );
+ assert( TK_GT>TK_EQ );
+ assert( TK_GT<TK_LE );
+ assert( pExpr->op>=TK_GT && pExpr->op<=TK_GE );
+ pExpr->op = ((pExpr->op-TK_GT)^2)+TK_GT;
+ }
+ return 0;
+}
+
+/*
+** Translate from TK_xx operator to WO_xx bitmask.
+*/
+static u16 operatorMask(int op){
+ u16 c;
+ assert( allowedOp(op) );
+ if( op==TK_IN ){
+ c = WO_IN;
+ }else if( op==TK_ISNULL ){
+ c = WO_ISNULL;
+ }else if( op==TK_IS ){
+ c = WO_IS;
+ }else{
+ assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff );
+ c = (u16)(WO_EQ<<(op-TK_EQ));
+ }
+ assert( op!=TK_ISNULL || c==WO_ISNULL );
+ assert( op!=TK_IN || c==WO_IN );
+ assert( op!=TK_EQ || c==WO_EQ );
+ assert( op!=TK_LT || c==WO_LT );
+ assert( op!=TK_LE || c==WO_LE );
+ assert( op!=TK_GT || c==WO_GT );
+ assert( op!=TK_GE || c==WO_GE );
+ assert( op!=TK_IS || c==WO_IS );
+ return c;
+}
+
+
+#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION
+/*
+** Check to see if the given expression is a LIKE or GLOB operator that
+** can be optimized using inequality constraints. Return TRUE if it is
+** so and false if not.
+**
+** In order for the operator to be optimizible, the RHS must be a string
+** literal that does not begin with a wildcard. The LHS must be a column
+** that may only be NULL, a string, or a BLOB, never a number. (This means
+** that virtual tables cannot participate in the LIKE optimization.) The
+** collating sequence for the column on the LHS must be appropriate for
+** the operator.
+*/
+static int isLikeOrGlob(
+ Parse *pParse, /* Parsing and code generating context */
+ Expr *pExpr, /* Test this expression */
+ Expr **ppPrefix, /* Pointer to TK_STRING expression with pattern prefix */
+ int *pisComplete, /* True if the only wildcard is % in the last character */
+ int *pnoCase /* True if uppercase is equivalent to lowercase */
+){
+ const u8 *z = 0; /* String on RHS of LIKE operator */
+ Expr *pRight, *pLeft; /* Right and left size of LIKE operator */
+ ExprList *pList; /* List of operands to the LIKE operator */
+ u8 c; /* One character in z[] */
+ int cnt; /* Number of non-wildcard prefix characters */
+ u8 wc[4]; /* Wildcard characters */
+ sqlite3 *db = pParse->db; /* Database connection */
+ sqlite3_value *pVal = 0;
+ int op; /* Opcode of pRight */
+ int rc; /* Result code to return */
+
+ if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, (char*)wc) ){
+ return 0;
+ }
+#ifdef SQLITE_EBCDIC
+ if( *pnoCase ) return 0;
+#endif
+ pList = pExpr->x.pList;
+ pLeft = pList->a[1].pExpr;
+
+ pRight = sqlite3ExprSkipCollate(pList->a[0].pExpr);
+ op = pRight->op;
+ if( op==TK_VARIABLE && (db->flags & SQLITE_EnableQPSG)==0 ){
+ Vdbe *pReprepare = pParse->pReprepare;
+ int iCol = pRight->iColumn;
+ pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_BLOB);
+ if( pVal && sqlite3_value_type(pVal)==SQLITE_TEXT ){
+ z = sqlite3_value_text(pVal);
+ }
+ sqlite3VdbeSetVarmask(pParse->pVdbe, iCol);
+ assert( pRight->op==TK_VARIABLE || pRight->op==TK_REGISTER );
+ }else if( op==TK_STRING ){
+ z = (u8*)pRight->u.zToken;
+ }
+ if( z ){
+
+ /* Count the number of prefix characters prior to the first wildcard */
+ cnt = 0;
+ while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){
+ cnt++;
+ if( c==wc[3] && z[cnt]!=0 ) cnt++;
+ }
+
+ /* The optimization is possible only if (1) the pattern does not begin
+ ** with a wildcard and if (2) the non-wildcard prefix does not end with
+ ** an (illegal 0xff) character, or (3) the pattern does not consist of
+ ** a single escape character. The second condition is necessary so
+ ** that we can increment the prefix key to find an upper bound for the
+ ** range search. The third is because the caller assumes that the pattern
+ ** consists of at least one character after all escapes have been
+ ** removed. */
+ if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){
+ Expr *pPrefix;
+
+ /* A "complete" match if the pattern ends with "*" or "%" */
+ *pisComplete = c==wc[0] && z[cnt+1]==0;
+
+ /* Get the pattern prefix. Remove all escapes from the prefix. */
+ pPrefix = sqlite3Expr(db, TK_STRING, (char*)z);
+ if( pPrefix ){
+ int iFrom, iTo;
+ char *zNew = pPrefix->u.zToken;
+ zNew[cnt] = 0;
+ for(iFrom=iTo=0; iFrom<cnt; iFrom++){
+ if( zNew[iFrom]==wc[3] ) iFrom++;
+ zNew[iTo++] = zNew[iFrom];
+ }
+ zNew[iTo] = 0;
+ assert( iTo>0 );
+
+ /* If the LHS is not an ordinary column with TEXT affinity, then the
+ ** pattern prefix boundaries (both the start and end boundaries) must
+ ** not look like a number. Otherwise the pattern might be treated as
+ ** a number, which will invalidate the LIKE optimization.
+ **
+ ** Getting this right has been a persistent source of bugs in the
+ ** LIKE optimization. See, for example:
+ ** 2018-09-10 https://sqlite.org/src/info/c94369cae9b561b1
+ ** 2019-05-02 https://sqlite.org/src/info/b043a54c3de54b28
+ ** 2019-06-10 https://sqlite.org/src/info/fd76310a5e843e07
+ ** 2019-06-14 https://sqlite.org/src/info/ce8717f0885af975
+ ** 2019-09-03 https://sqlite.org/src/info/0f0428096f17252a
+ */
+ if( pLeft->op!=TK_COLUMN
+ || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT
+ || IsVirtual(pLeft->y.pTab) /* Value might be numeric */
+ ){
+ int isNum;
+ double rDummy;
+ isNum = sqlite3AtoF(zNew, &rDummy, iTo, SQLITE_UTF8);
+ if( isNum<=0 ){
+ if( iTo==1 && zNew[0]=='-' ){
+ isNum = +1;
+ }else{
+ zNew[iTo-1]++;
+ isNum = sqlite3AtoF(zNew, &rDummy, iTo, SQLITE_UTF8);
+ zNew[iTo-1]--;
+ }
+ }
+ if( isNum>0 ){
+ sqlite3ExprDelete(db, pPrefix);
+ sqlite3ValueFree(pVal);
+ return 0;
+ }
+ }
+ }
+ *ppPrefix = pPrefix;
+
+ /* If the RHS pattern is a bound parameter, make arrangements to
+ ** reprepare the statement when that parameter is rebound */
+ if( op==TK_VARIABLE ){
+ Vdbe *v = pParse->pVdbe;
+ sqlite3VdbeSetVarmask(v, pRight->iColumn);
+ if( *pisComplete && pRight->u.zToken[1] ){
+ /* If the rhs of the LIKE expression is a variable, and the current
+ ** value of the variable means there is no need to invoke the LIKE
+ ** function, then no OP_Variable will be added to the program.
+ ** This causes problems for the sqlite3_bind_parameter_name()
+ ** API. To work around them, add a dummy OP_Variable here.
+ */
+ int r1 = sqlite3GetTempReg(pParse);
+ sqlite3ExprCodeTarget(pParse, pRight, r1);
+ sqlite3VdbeChangeP3(v, sqlite3VdbeCurrentAddr(v)-1, 0);
+ sqlite3ReleaseTempReg(pParse, r1);
+ }
+ }
+ }else{
+ z = 0;
+ }
+ }
+
+ rc = (z!=0);
+ sqlite3ValueFree(pVal);
+ return rc;
+}
+#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */
+
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Check to see if the pExpr expression is a form that needs to be passed
+** to the xBestIndex method of virtual tables. Forms of interest include:
+**
+** Expression Virtual Table Operator
+** ----------------------- ---------------------------------
+** 1. column MATCH expr SQLITE_INDEX_CONSTRAINT_MATCH
+** 2. column GLOB expr SQLITE_INDEX_CONSTRAINT_GLOB
+** 3. column LIKE expr SQLITE_INDEX_CONSTRAINT_LIKE
+** 4. column REGEXP expr SQLITE_INDEX_CONSTRAINT_REGEXP
+** 5. column != expr SQLITE_INDEX_CONSTRAINT_NE
+** 6. expr != column SQLITE_INDEX_CONSTRAINT_NE
+** 7. column IS NOT expr SQLITE_INDEX_CONSTRAINT_ISNOT
+** 8. expr IS NOT column SQLITE_INDEX_CONSTRAINT_ISNOT
+** 9. column IS NOT NULL SQLITE_INDEX_CONSTRAINT_ISNOTNULL
+**
+** In every case, "column" must be a column of a virtual table. If there
+** is a match, set *ppLeft to the "column" expression, set *ppRight to the
+** "expr" expression (even though in forms (6) and (8) the column is on the
+** right and the expression is on the left). Also set *peOp2 to the
+** appropriate virtual table operator. The return value is 1 or 2 if there
+** is a match. The usual return is 1, but if the RHS is also a column
+** of virtual table in forms (5) or (7) then return 2.
+**
+** If the expression matches none of the patterns above, return 0.
+*/
+static int isAuxiliaryVtabOperator(
+ sqlite3 *db, /* Parsing context */
+ Expr *pExpr, /* Test this expression */
+ unsigned char *peOp2, /* OUT: 0 for MATCH, or else an op2 value */
+ Expr **ppLeft, /* Column expression to left of MATCH/op2 */
+ Expr **ppRight /* Expression to left of MATCH/op2 */
+){
+ if( pExpr->op==TK_FUNCTION ){
+ static const struct Op2 {
+ const char *zOp;
+ unsigned char eOp2;
+ } aOp[] = {
+ { "match", SQLITE_INDEX_CONSTRAINT_MATCH },
+ { "glob", SQLITE_INDEX_CONSTRAINT_GLOB },
+ { "like", SQLITE_INDEX_CONSTRAINT_LIKE },
+ { "regexp", SQLITE_INDEX_CONSTRAINT_REGEXP }
+ };
+ ExprList *pList;
+ Expr *pCol; /* Column reference */
+ int i;
+
+ pList = pExpr->x.pList;
+ if( pList==0 || pList->nExpr!=2 ){
+ return 0;
+ }
+
+ /* Built-in operators MATCH, GLOB, LIKE, and REGEXP attach to a
+ ** virtual table on their second argument, which is the same as
+ ** the left-hand side operand in their in-fix form.
+ **
+ ** vtab_column MATCH expression
+ ** MATCH(expression,vtab_column)
+ */
+ pCol = pList->a[1].pExpr;
+ testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 );
+ if( ExprIsVtab(pCol) ){
+ for(i=0; i<ArraySize(aOp); i++){
+ if( sqlite3StrICmp(pExpr->u.zToken, aOp[i].zOp)==0 ){
+ *peOp2 = aOp[i].eOp2;
+ *ppRight = pList->a[0].pExpr;
+ *ppLeft = pCol;
+ return 1;
+ }
+ }
+ }
+
+ /* We can also match against the first column of overloaded
+ ** functions where xFindFunction returns a value of at least
+ ** SQLITE_INDEX_CONSTRAINT_FUNCTION.
+ **
+ ** OVERLOADED(vtab_column,expression)
+ **
+ ** Historically, xFindFunction expected to see lower-case function
+ ** names. But for this use case, xFindFunction is expected to deal
+ ** with function names in an arbitrary case.
+ */
+ pCol = pList->a[0].pExpr;
+ testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 );
+ if( ExprIsVtab(pCol) ){
+ sqlite3_vtab *pVtab;
+ sqlite3_module *pMod;
+ void (*xNotUsed)(sqlite3_context*,int,sqlite3_value**);
+ void *pNotUsed;
+ pVtab = sqlite3GetVTable(db, pCol->y.pTab)->pVtab;
+ assert( pVtab!=0 );
+ assert( pVtab->pModule!=0 );
+ pMod = (sqlite3_module *)pVtab->pModule;
+ if( pMod->xFindFunction!=0 ){
+ i = pMod->xFindFunction(pVtab,2, pExpr->u.zToken, &xNotUsed, &pNotUsed);
+ if( i>=SQLITE_INDEX_CONSTRAINT_FUNCTION ){
+ *peOp2 = i;
+ *ppRight = pList->a[1].pExpr;
+ *ppLeft = pCol;
+ return 1;
+ }
+ }
+ }
+ }else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){
+ int res = 0;
+ Expr *pLeft = pExpr->pLeft;
+ Expr *pRight = pExpr->pRight;
+ testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 );
+ if( ExprIsVtab(pLeft) ){
+ res++;
+ }
+ testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 );
+ if( pRight && ExprIsVtab(pRight) ){
+ res++;
+ SWAP(Expr*, pLeft, pRight);
+ }
+ *ppLeft = pLeft;
+ *ppRight = pRight;
+ if( pExpr->op==TK_NE ) *peOp2 = SQLITE_INDEX_CONSTRAINT_NE;
+ if( pExpr->op==TK_ISNOT ) *peOp2 = SQLITE_INDEX_CONSTRAINT_ISNOT;
+ if( pExpr->op==TK_NOTNULL ) *peOp2 = SQLITE_INDEX_CONSTRAINT_ISNOTNULL;
+ return res;
+ }
+ return 0;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/*
+** If the pBase expression originated in the ON or USING clause of
+** a join, then transfer the appropriate markings over to derived.
+*/
+static void transferJoinMarkings(Expr *pDerived, Expr *pBase){
+ if( pDerived ){
+ pDerived->flags |= pBase->flags & EP_FromJoin;
+ pDerived->iRightJoinTable = pBase->iRightJoinTable;
+ }
+}
+
+/*
+** Mark term iChild as being a child of term iParent
+*/
+static void markTermAsChild(WhereClause *pWC, int iChild, int iParent){
+ pWC->a[iChild].iParent = iParent;
+ pWC->a[iChild].truthProb = pWC->a[iParent].truthProb;
+ pWC->a[iParent].nChild++;
+}
+
+/*
+** Return the N-th AND-connected subterm of pTerm. Or if pTerm is not
+** a conjunction, then return just pTerm when N==0. If N is exceeds
+** the number of available subterms, return NULL.
+*/
+static WhereTerm *whereNthSubterm(WhereTerm *pTerm, int N){
+ if( pTerm->eOperator!=WO_AND ){
+ return N==0 ? pTerm : 0;
+ }
+ if( N<pTerm->u.pAndInfo->wc.nTerm ){
+ return &pTerm->u.pAndInfo->wc.a[N];
+ }
+ return 0;
+}
+
+/*
+** Subterms pOne and pTwo are contained within WHERE clause pWC. The
+** two subterms are in disjunction - they are OR-ed together.
+**
+** If these two terms are both of the form: "A op B" with the same
+** A and B values but different operators and if the operators are
+** compatible (if one is = and the other is <, for example) then
+** add a new virtual AND term to pWC that is the combination of the
+** two.
+**
+** Some examples:
+**
+** x<y OR x=y --> x<=y
+** x=y OR x=y --> x=y
+** x<=y OR x<y --> x<=y
+**
+** The following is NOT generated:
+**
+** x<y OR x>y --> x!=y
+*/
+static void whereCombineDisjuncts(
+ SrcList *pSrc, /* the FROM clause */
+ WhereClause *pWC, /* The complete WHERE clause */
+ WhereTerm *pOne, /* First disjunct */
+ WhereTerm *pTwo /* Second disjunct */
+){
+ u16 eOp = pOne->eOperator | pTwo->eOperator;
+ sqlite3 *db; /* Database connection (for malloc) */
+ Expr *pNew; /* New virtual expression */
+ int op; /* Operator for the combined expression */
+ int idxNew; /* Index in pWC of the next virtual term */
+
+ if( (pOne->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return;
+ if( (pTwo->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return;
+ if( (eOp & (WO_EQ|WO_LT|WO_LE))!=eOp
+ && (eOp & (WO_EQ|WO_GT|WO_GE))!=eOp ) return;
+ assert( pOne->pExpr->pLeft!=0 && pOne->pExpr->pRight!=0 );
+ assert( pTwo->pExpr->pLeft!=0 && pTwo->pExpr->pRight!=0 );
+ if( sqlite3ExprCompare(0,pOne->pExpr->pLeft, pTwo->pExpr->pLeft, -1) ) return;
+ if( sqlite3ExprCompare(0,pOne->pExpr->pRight, pTwo->pExpr->pRight,-1) )return;
+ /* If we reach this point, it means the two subterms can be combined */
+ if( (eOp & (eOp-1))!=0 ){
+ if( eOp & (WO_LT|WO_LE) ){
+ eOp = WO_LE;
+ }else{
+ assert( eOp & (WO_GT|WO_GE) );
+ eOp = WO_GE;
+ }
+ }
+ db = pWC->pWInfo->pParse->db;
+ pNew = sqlite3ExprDup(db, pOne->pExpr, 0);
+ if( pNew==0 ) return;
+ for(op=TK_EQ; eOp!=(WO_EQ<<(op-TK_EQ)); op++){ assert( op<TK_GE ); }
+ pNew->op = op;
+ idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC);
+ exprAnalyze(pSrc, pWC, idxNew);
+}
+
+#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY)
+/*
+** Analyze a term that consists of two or more OR-connected
+** subterms. So in:
+**
+** ... WHERE (a=5) AND (b=7 OR c=9 OR d=13) AND (d=13)
+** ^^^^^^^^^^^^^^^^^^^^
+**
+** This routine analyzes terms such as the middle term in the above example.
+** A WhereOrTerm object is computed and attached to the term under
+** analysis, regardless of the outcome of the analysis. Hence:
+**
+** WhereTerm.wtFlags |= TERM_ORINFO
+** WhereTerm.u.pOrInfo = a dynamically allocated WhereOrTerm object
+**
+** The term being analyzed must have two or more of OR-connected subterms.
+** A single subterm might be a set of AND-connected sub-subterms.
+** Examples of terms under analysis:
+**
+** (A) t1.x=t2.y OR t1.x=t2.z OR t1.y=15 OR t1.z=t3.a+5
+** (B) x=expr1 OR expr2=x OR x=expr3
+** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15)
+** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*')
+** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6)
+** (F) x>A OR (x=A AND y>=B)
+**
+** CASE 1:
+**
+** If all subterms are of the form T.C=expr for some single column of C and
+** a single table T (as shown in example B above) then create a new virtual
+** term that is an equivalent IN expression. In other words, if the term
+** being analyzed is:
+**
+** x = expr1 OR expr2 = x OR x = expr3
+**
+** then create a new virtual term like this:
+**
+** x IN (expr1,expr2,expr3)
+**
+** CASE 2:
+**
+** If there are exactly two disjuncts and one side has x>A and the other side
+** has x=A (for the same x and A) then add a new virtual conjunct term to the
+** WHERE clause of the form "x>=A". Example:
+**
+** x>A OR (x=A AND y>B) adds: x>=A
+**
+** The added conjunct can sometimes be helpful in query planning.
+**
+** CASE 3:
+**
+** If all subterms are indexable by a single table T, then set
+**
+** WhereTerm.eOperator = WO_OR
+** WhereTerm.u.pOrInfo->indexable |= the cursor number for table T
+**
+** A subterm is "indexable" if it is of the form
+** "T.C <op> <expr>" where C is any column of table T and
+** <op> is one of "=", "<", "<=", ">", ">=", "IS NULL", or "IN".
+** A subterm is also indexable if it is an AND of two or more
+** subsubterms at least one of which is indexable. Indexable AND
+** subterms have their eOperator set to WO_AND and they have
+** u.pAndInfo set to a dynamically allocated WhereAndTerm object.
+**
+** From another point of view, "indexable" means that the subterm could
+** potentially be used with an index if an appropriate index exists.
+** This analysis does not consider whether or not the index exists; that
+** is decided elsewhere. This analysis only looks at whether subterms
+** appropriate for indexing exist.
+**
+** All examples A through E above satisfy case 3. But if a term
+** also satisfies case 1 (such as B) we know that the optimizer will
+** always prefer case 1, so in that case we pretend that case 3 is not
+** satisfied.
+**
+** It might be the case that multiple tables are indexable. For example,
+** (E) above is indexable on tables P, Q, and R.
+**
+** Terms that satisfy case 3 are candidates for lookup by using
+** separate indices to find rowids for each subterm and composing
+** the union of all rowids using a RowSet object. This is similar
+** to "bitmap indices" in other database engines.
+**
+** OTHERWISE:
+**
+** If none of cases 1, 2, or 3 apply, then leave the eOperator set to
+** zero. This term is not useful for search.
+*/
+static void exprAnalyzeOrTerm(
+ SrcList *pSrc, /* the FROM clause */
+ WhereClause *pWC, /* the complete WHERE clause */
+ int idxTerm /* Index of the OR-term to be analyzed */
+){
+ WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */
+ Parse *pParse = pWInfo->pParse; /* Parser context */
+ sqlite3 *db = pParse->db; /* Database connection */
+ WhereTerm *pTerm = &pWC->a[idxTerm]; /* The term to be analyzed */
+ Expr *pExpr = pTerm->pExpr; /* The expression of the term */
+ int i; /* Loop counters */
+ WhereClause *pOrWc; /* Breakup of pTerm into subterms */
+ WhereTerm *pOrTerm; /* A Sub-term within the pOrWc */
+ WhereOrInfo *pOrInfo; /* Additional information associated with pTerm */
+ Bitmask chngToIN; /* Tables that might satisfy case 1 */
+ Bitmask indexable; /* Tables that are indexable, satisfying case 2 */
+
+ /*
+ ** Break the OR clause into its separate subterms. The subterms are
+ ** stored in a WhereClause structure containing within the WhereOrInfo
+ ** object that is attached to the original OR clause term.
+ */
+ assert( (pTerm->wtFlags & (TERM_DYNAMIC|TERM_ORINFO|TERM_ANDINFO))==0 );
+ assert( pExpr->op==TK_OR );
+ pTerm->u.pOrInfo = pOrInfo = sqlite3DbMallocZero(db, sizeof(*pOrInfo));
+ if( pOrInfo==0 ) return;
+ pTerm->wtFlags |= TERM_ORINFO;
+ pOrWc = &pOrInfo->wc;
+ memset(pOrWc->aStatic, 0, sizeof(pOrWc->aStatic));
+ sqlite3WhereClauseInit(pOrWc, pWInfo);
+ sqlite3WhereSplit(pOrWc, pExpr, TK_OR);
+ sqlite3WhereExprAnalyze(pSrc, pOrWc);
+ if( db->mallocFailed ) return;
+ assert( pOrWc->nTerm>=2 );
+
+ /*
+ ** Compute the set of tables that might satisfy cases 1 or 3.
+ */
+ indexable = ~(Bitmask)0;
+ chngToIN = ~(Bitmask)0;
+ for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0 && indexable; i--, pOrTerm++){
+ if( (pOrTerm->eOperator & WO_SINGLE)==0 ){
+ WhereAndInfo *pAndInfo;
+ assert( (pOrTerm->wtFlags & (TERM_ANDINFO|TERM_ORINFO))==0 );
+ chngToIN = 0;
+ pAndInfo = sqlite3DbMallocRawNN(db, sizeof(*pAndInfo));
+ if( pAndInfo ){
+ WhereClause *pAndWC;
+ WhereTerm *pAndTerm;
+ int j;
+ Bitmask b = 0;
+ pOrTerm->u.pAndInfo = pAndInfo;
+ pOrTerm->wtFlags |= TERM_ANDINFO;
+ pOrTerm->eOperator = WO_AND;
+ pAndWC = &pAndInfo->wc;
+ memset(pAndWC->aStatic, 0, sizeof(pAndWC->aStatic));
+ sqlite3WhereClauseInit(pAndWC, pWC->pWInfo);
+ sqlite3WhereSplit(pAndWC, pOrTerm->pExpr, TK_AND);
+ sqlite3WhereExprAnalyze(pSrc, pAndWC);
+ pAndWC->pOuter = pWC;
+ if( !db->mallocFailed ){
+ for(j=0, pAndTerm=pAndWC->a; j<pAndWC->nTerm; j++, pAndTerm++){
+ assert( pAndTerm->pExpr );
+ if( allowedOp(pAndTerm->pExpr->op)
+ || pAndTerm->eOperator==WO_AUX
+ ){
+ b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pAndTerm->leftCursor);
+ }
+ }
+ }
+ indexable &= b;
+ }
+ }else if( pOrTerm->wtFlags & TERM_COPIED ){
+ /* Skip this term for now. We revisit it when we process the
+ ** corresponding TERM_VIRTUAL term */
+ }else{
+ Bitmask b;
+ b = sqlite3WhereGetMask(&pWInfo->sMaskSet, pOrTerm->leftCursor);
+ if( pOrTerm->wtFlags & TERM_VIRTUAL ){
+ WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent];
+ b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pOther->leftCursor);
+ }
+ indexable &= b;
+ if( (pOrTerm->eOperator & WO_EQ)==0 ){
+ chngToIN = 0;
+ }else{
+ chngToIN &= b;
+ }
+ }
+ }
+
+ /*
+ ** Record the set of tables that satisfy case 3. The set might be
+ ** empty.
+ */
+ pOrInfo->indexable = indexable;
+ if( indexable ){
+ pTerm->eOperator = WO_OR;
+ pWC->hasOr = 1;
+ }else{
+ pTerm->eOperator = WO_OR;
+ }
+
+ /* For a two-way OR, attempt to implementation case 2.
+ */
+ if( indexable && pOrWc->nTerm==2 ){
+ int iOne = 0;
+ WhereTerm *pOne;
+ while( (pOne = whereNthSubterm(&pOrWc->a[0],iOne++))!=0 ){
+ int iTwo = 0;
+ WhereTerm *pTwo;
+ while( (pTwo = whereNthSubterm(&pOrWc->a[1],iTwo++))!=0 ){
+ whereCombineDisjuncts(pSrc, pWC, pOne, pTwo);
+ }
+ }
+ }
+
+ /*
+ ** chngToIN holds a set of tables that *might* satisfy case 1. But
+ ** we have to do some additional checking to see if case 1 really
+ ** is satisfied.
+ **
+ ** chngToIN will hold either 0, 1, or 2 bits. The 0-bit case means
+ ** that there is no possibility of transforming the OR clause into an
+ ** IN operator because one or more terms in the OR clause contain
+ ** something other than == on a column in the single table. The 1-bit
+ ** case means that every term of the OR clause is of the form
+ ** "table.column=expr" for some single table. The one bit that is set
+ ** will correspond to the common table. We still need to check to make
+ ** sure the same column is used on all terms. The 2-bit case is when
+ ** the all terms are of the form "table1.column=table2.column". It
+ ** might be possible to form an IN operator with either table1.column
+ ** or table2.column as the LHS if either is common to every term of
+ ** the OR clause.
+ **
+ ** Note that terms of the form "table.column1=table.column2" (the
+ ** same table on both sizes of the ==) cannot be optimized.
+ */
+ if( chngToIN ){
+ int okToChngToIN = 0; /* True if the conversion to IN is valid */
+ int iColumn = -1; /* Column index on lhs of IN operator */
+ int iCursor = -1; /* Table cursor common to all terms */
+ int j = 0; /* Loop counter */
+
+ /* Search for a table and column that appears on one side or the
+ ** other of the == operator in every subterm. That table and column
+ ** will be recorded in iCursor and iColumn. There might not be any
+ ** such table and column. Set okToChngToIN if an appropriate table
+ ** and column is found but leave okToChngToIN false if not found.
+ */
+ for(j=0; j<2 && !okToChngToIN; j++){
+ Expr *pLeft = 0;
+ pOrTerm = pOrWc->a;
+ for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){
+ assert( pOrTerm->eOperator & WO_EQ );
+ pOrTerm->wtFlags &= ~TERM_OR_OK;
+ if( pOrTerm->leftCursor==iCursor ){
+ /* This is the 2-bit case and we are on the second iteration and
+ ** current term is from the first iteration. So skip this term. */
+ assert( j==1 );
+ continue;
+ }
+ if( (chngToIN & sqlite3WhereGetMask(&pWInfo->sMaskSet,
+ pOrTerm->leftCursor))==0 ){
+ /* This term must be of the form t1.a==t2.b where t2 is in the
+ ** chngToIN set but t1 is not. This term will be either preceded
+ ** or follwed by an inverted copy (t2.b==t1.a). Skip this term
+ ** and use its inversion. */
+ testcase( pOrTerm->wtFlags & TERM_COPIED );
+ testcase( pOrTerm->wtFlags & TERM_VIRTUAL );
+ assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) );
+ continue;
+ }
+ iColumn = pOrTerm->u.leftColumn;
+ iCursor = pOrTerm->leftCursor;
+ pLeft = pOrTerm->pExpr->pLeft;
+ break;
+ }
+ if( i<0 ){
+ /* No candidate table+column was found. This can only occur
+ ** on the second iteration */
+ assert( j==1 );
+ assert( IsPowerOfTwo(chngToIN) );
+ assert( chngToIN==sqlite3WhereGetMask(&pWInfo->sMaskSet, iCursor) );
+ break;
+ }
+ testcase( j==1 );
+
+ /* We have found a candidate table and column. Check to see if that
+ ** table and column is common to every term in the OR clause */
+ okToChngToIN = 1;
+ for(; i>=0 && okToChngToIN; i--, pOrTerm++){
+ assert( pOrTerm->eOperator & WO_EQ );
+ if( pOrTerm->leftCursor!=iCursor ){
+ pOrTerm->wtFlags &= ~TERM_OR_OK;
+ }else if( pOrTerm->u.leftColumn!=iColumn || (iColumn==XN_EXPR
+ && sqlite3ExprCompare(pParse, pOrTerm->pExpr->pLeft, pLeft, -1)
+ )){
+ okToChngToIN = 0;
+ }else{
+ int affLeft, affRight;
+ /* If the right-hand side is also a column, then the affinities
+ ** of both right and left sides must be such that no type
+ ** conversions are required on the right. (Ticket #2249)
+ */
+ affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight);
+ affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft);
+ if( affRight!=0 && affRight!=affLeft ){
+ okToChngToIN = 0;
+ }else{
+ pOrTerm->wtFlags |= TERM_OR_OK;
+ }
+ }
+ }
+ }
+
+ /* At this point, okToChngToIN is true if original pTerm satisfies
+ ** case 1. In that case, construct a new virtual term that is
+ ** pTerm converted into an IN operator.
+ */
+ if( okToChngToIN ){
+ Expr *pDup; /* A transient duplicate expression */
+ ExprList *pList = 0; /* The RHS of the IN operator */
+ Expr *pLeft = 0; /* The LHS of the IN operator */
+ Expr *pNew; /* The complete IN operator */
+
+ for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){
+ if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue;
+ assert( pOrTerm->eOperator & WO_EQ );
+ assert( pOrTerm->leftCursor==iCursor );
+ assert( pOrTerm->u.leftColumn==iColumn );
+ pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0);
+ pList = sqlite3ExprListAppend(pWInfo->pParse, pList, pDup);
+ pLeft = pOrTerm->pExpr->pLeft;
+ }
+ assert( pLeft!=0 );
+ pDup = sqlite3ExprDup(db, pLeft, 0);
+ pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0);
+ if( pNew ){
+ int idxNew;
+ transferJoinMarkings(pNew, pExpr);
+ assert( !ExprHasProperty(pNew, EP_xIsSelect) );
+ pNew->x.pList = pList;
+ idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC);
+ testcase( idxNew==0 );
+ exprAnalyze(pSrc, pWC, idxNew);
+ /* pTerm = &pWC->a[idxTerm]; // would be needed if pTerm where used again */
+ markTermAsChild(pWC, idxNew, idxTerm);
+ }else{
+ sqlite3ExprListDelete(db, pList);
+ }
+ }
+ }
+}
+#endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */
+
+/*
+** We already know that pExpr is a binary operator where both operands are
+** column references. This routine checks to see if pExpr is an equivalence
+** relation:
+** 1. The SQLITE_Transitive optimization must be enabled
+** 2. Must be either an == or an IS operator
+** 3. Not originating in the ON clause of an OUTER JOIN
+** 4. The affinities of A and B must be compatible
+** 5a. Both operands use the same collating sequence OR
+** 5b. The overall collating sequence is BINARY
+** If this routine returns TRUE, that means that the RHS can be substituted
+** for the LHS anyplace else in the WHERE clause where the LHS column occurs.
+** This is an optimization. No harm comes from returning 0. But if 1 is
+** returned when it should not be, then incorrect answers might result.
+*/
+static int termIsEquivalence(Parse *pParse, Expr *pExpr){
+ char aff1, aff2;
+ CollSeq *pColl;
+ if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0;
+ if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0;
+ if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0;
+ aff1 = sqlite3ExprAffinity(pExpr->pLeft);
+ aff2 = sqlite3ExprAffinity(pExpr->pRight);
+ if( aff1!=aff2
+ && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2))
+ ){
+ return 0;
+ }
+ pColl = sqlite3ExprCompareCollSeq(pParse, pExpr);
+ if( sqlite3IsBinary(pColl) ) return 1;
+ return sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight);
+}
+
+/*
+** Recursively walk the expressions of a SELECT statement and generate
+** a bitmask indicating which tables are used in that expression
+** tree.
+*/
+static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
+ Bitmask mask = 0;
+ while( pS ){
+ SrcList *pSrc = pS->pSrc;
+ mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pEList);
+ mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pGroupBy);
+ mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pOrderBy);
+ mask |= sqlite3WhereExprUsage(pMaskSet, pS->pWhere);
+ mask |= sqlite3WhereExprUsage(pMaskSet, pS->pHaving);
+ if( ALWAYS(pSrc!=0) ){
+ int i;
+ for(i=0; i<pSrc->nSrc; i++){
+ mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect);
+ mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn);
+ if( pSrc->a[i].fg.isTabFunc ){
+ mask |= sqlite3WhereExprListUsage(pMaskSet, pSrc->a[i].u1.pFuncArg);
+ }
+ }
+ }
+ pS = pS->pPrior;
+ }
+ return mask;
+}
+
+/*
+** Expression pExpr is one operand of a comparison operator that might
+** be useful for indexing. This routine checks to see if pExpr appears
+** in any index. Return TRUE (1) if pExpr is an indexed term and return
+** FALSE (0) if not. If TRUE is returned, also set aiCurCol[0] to the cursor
+** number of the table that is indexed and aiCurCol[1] to the column number
+** of the column that is indexed, or XN_EXPR (-2) if an expression is being
+** indexed.
+**
+** If pExpr is a TK_COLUMN column reference, then this routine always returns
+** true even if that particular column is not indexed, because the column
+** might be added to an automatic index later.
+*/
+static SQLITE_NOINLINE int exprMightBeIndexed2(
+ SrcList *pFrom, /* The FROM clause */
+ Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
+ int *aiCurCol, /* Write the referenced table cursor and column here */
+ Expr *pExpr /* An operand of a comparison operator */
+){
+ Index *pIdx;
+ int i;
+ int iCur;
+ for(i=0; mPrereq>1; i++, mPrereq>>=1){}
+ iCur = pFrom->a[i].iCursor;
+ for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->aColExpr==0 ) continue;
+ for(i=0; i<pIdx->nKeyCol; i++){
+ if( pIdx->aiColumn[i]!=XN_EXPR ) continue;
+ if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){
+ aiCurCol[0] = iCur;
+ aiCurCol[1] = XN_EXPR;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+static int exprMightBeIndexed(
+ SrcList *pFrom, /* The FROM clause */
+ Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
+ int *aiCurCol, /* Write the referenced table cursor & column here */
+ Expr *pExpr, /* An operand of a comparison operator */
+ int op /* The specific comparison operator */
+){
+ /* If this expression is a vector to the left or right of a
+ ** inequality constraint (>, <, >= or <=), perform the processing
+ ** on the first element of the vector. */
+ assert( TK_GT+1==TK_LE && TK_GT+2==TK_LT && TK_GT+3==TK_GE );
+ assert( TK_IS<TK_GE && TK_ISNULL<TK_GE && TK_IN<TK_GE );
+ assert( op<=TK_GE );
+ if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){
+ pExpr = pExpr->x.pList->a[0].pExpr;
+ }
+
+ if( pExpr->op==TK_COLUMN ){
+ aiCurCol[0] = pExpr->iTable;
+ aiCurCol[1] = pExpr->iColumn;
+ return 1;
+ }
+ if( mPrereq==0 ) return 0; /* No table references */
+ if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */
+ return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr);
+}
+
+/*
+** The input to this routine is an WhereTerm structure with only the
+** "pExpr" field filled in. The job of this routine is to analyze the
+** subexpression and populate all the other fields of the WhereTerm
+** structure.
+**
+** If the expression is of the form "<expr> <op> X" it gets commuted
+** to the standard form of "X <op> <expr>".
+**
+** If the expression is of the form "X <op> Y" where both X and Y are
+** columns, then the original expression is unchanged and a new virtual
+** term of the form "Y <op> X" is added to the WHERE clause and
+** analyzed separately. The original term is marked with TERM_COPIED
+** and the new term is marked with TERM_DYNAMIC (because it's pExpr
+** needs to be freed with the WhereClause) and TERM_VIRTUAL (because it
+** is a commuted copy of a prior term.) The original term has nChild=1
+** and the copy has idxParent set to the index of the original term.
+*/
+static void exprAnalyze(
+ SrcList *pSrc, /* the FROM clause */
+ WhereClause *pWC, /* the WHERE clause */
+ int idxTerm /* Index of the term to be analyzed */
+){
+ WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */
+ WhereTerm *pTerm; /* The term to be analyzed */
+ WhereMaskSet *pMaskSet; /* Set of table index masks */
+ Expr *pExpr; /* The expression to be analyzed */
+ Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */
+ Bitmask prereqAll; /* Prerequesites of pExpr */
+ Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */
+ Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */
+ int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */
+ int noCase = 0; /* uppercase equivalent to lowercase */
+ int op; /* Top-level operator. pExpr->op */
+ Parse *pParse = pWInfo->pParse; /* Parsing context */
+ sqlite3 *db = pParse->db; /* Database connection */
+ unsigned char eOp2 = 0; /* op2 value for LIKE/REGEXP/GLOB */
+ int nLeft; /* Number of elements on left side vector */
+
+ if( db->mallocFailed ){
+ return;
+ }
+ pTerm = &pWC->a[idxTerm];
+ pMaskSet = &pWInfo->sMaskSet;
+ pExpr = pTerm->pExpr;
+ assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE );
+ prereqLeft = sqlite3WhereExprUsage(pMaskSet, pExpr->pLeft);
+ op = pExpr->op;
+ if( op==TK_IN ){
+ assert( pExpr->pRight==0 );
+ if( sqlite3ExprCheckIN(pParse, pExpr) ) return;
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect);
+ }else{
+ pTerm->prereqRight = sqlite3WhereExprListUsage(pMaskSet, pExpr->x.pList);
+ }
+ }else if( op==TK_ISNULL ){
+ pTerm->prereqRight = 0;
+ }else{
+ pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight);
+ }
+ pMaskSet->bVarSelect = 0;
+ prereqAll = sqlite3WhereExprUsageNN(pMaskSet, pExpr);
+ if( pMaskSet->bVarSelect ) pTerm->wtFlags |= TERM_VARSELECT;
+ if( ExprHasProperty(pExpr, EP_FromJoin) ){
+ Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable);
+ prereqAll |= x;
+ extraRight = x-1; /* ON clause terms may not be used with an index
+ ** on left table of a LEFT JOIN. Ticket #3015 */
+ if( (prereqAll>>1)>=x ){
+ sqlite3ErrorMsg(pParse, "ON clause references tables to its right");
+ return;
+ }
+ }
+ pTerm->prereqAll = prereqAll;
+ pTerm->leftCursor = -1;
+ pTerm->iParent = -1;
+ pTerm->eOperator = 0;
+ if( allowedOp(op) ){
+ int aiCurCol[2];
+ Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft);
+ Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight);
+ u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV;
+
+ if( pTerm->iField>0 ){
+ assert( op==TK_IN );
+ assert( pLeft->op==TK_VECTOR );
+ pLeft = pLeft->x.pList->a[pTerm->iField-1].pExpr;
+ }
+
+ if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){
+ pTerm->leftCursor = aiCurCol[0];
+ pTerm->u.leftColumn = aiCurCol[1];
+ pTerm->eOperator = operatorMask(op) & opMask;
+ }
+ if( op==TK_IS ) pTerm->wtFlags |= TERM_IS;
+ if( pRight
+ && exprMightBeIndexed(pSrc, pTerm->prereqRight, aiCurCol, pRight, op)
+ ){
+ WhereTerm *pNew;
+ Expr *pDup;
+ u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */
+ assert( pTerm->iField==0 );
+ if( pTerm->leftCursor>=0 ){
+ int idxNew;
+ pDup = sqlite3ExprDup(db, pExpr, 0);
+ if( db->mallocFailed ){
+ sqlite3ExprDelete(db, pDup);
+ return;
+ }
+ idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC);
+ if( idxNew==0 ) return;
+ pNew = &pWC->a[idxNew];
+ markTermAsChild(pWC, idxNew, idxTerm);
+ if( op==TK_IS ) pNew->wtFlags |= TERM_IS;
+ pTerm = &pWC->a[idxTerm];
+ pTerm->wtFlags |= TERM_COPIED;
+
+ if( termIsEquivalence(pParse, pDup) ){
+ pTerm->eOperator |= WO_EQUIV;
+ eExtraOp = WO_EQUIV;
+ }
+ }else{
+ pDup = pExpr;
+ pNew = pTerm;
+ }
+ pNew->wtFlags |= exprCommute(pParse, pDup);
+ pNew->leftCursor = aiCurCol[0];
+ pNew->u.leftColumn = aiCurCol[1];
+ testcase( (prereqLeft | extraRight) != prereqLeft );
+ pNew->prereqRight = prereqLeft | extraRight;
+ pNew->prereqAll = prereqAll;
+ pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask;
+ }
+ }
+
+#ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION
+ /* If a term is the BETWEEN operator, create two new virtual terms
+ ** that define the range that the BETWEEN implements. For example:
+ **
+ ** a BETWEEN b AND c
+ **
+ ** is converted into:
+ **
+ ** (a BETWEEN b AND c) AND (a>=b) AND (a<=c)
+ **
+ ** The two new terms are added onto the end of the WhereClause object.
+ ** The new terms are "dynamic" and are children of the original BETWEEN
+ ** term. That means that if the BETWEEN term is coded, the children are
+ ** skipped. Or, if the children are satisfied by an index, the original
+ ** BETWEEN term is skipped.
+ */
+ else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){
+ ExprList *pList = pExpr->x.pList;
+ int i;
+ static const u8 ops[] = {TK_GE, TK_LE};
+ assert( pList!=0 );
+ assert( pList->nExpr==2 );
+ for(i=0; i<2; i++){
+ Expr *pNewExpr;
+ int idxNew;
+ pNewExpr = sqlite3PExpr(pParse, ops[i],
+ sqlite3ExprDup(db, pExpr->pLeft, 0),
+ sqlite3ExprDup(db, pList->a[i].pExpr, 0));
+ transferJoinMarkings(pNewExpr, pExpr);
+ idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC);
+ testcase( idxNew==0 );
+ exprAnalyze(pSrc, pWC, idxNew);
+ pTerm = &pWC->a[idxTerm];
+ markTermAsChild(pWC, idxNew, idxTerm);
+ }
+ }
+#endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */
+
+#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY)
+ /* Analyze a term that is composed of two or more subterms connected by
+ ** an OR operator.
+ */
+ else if( pExpr->op==TK_OR ){
+ assert( pWC->op==TK_AND );
+ exprAnalyzeOrTerm(pSrc, pWC, idxTerm);
+ pTerm = &pWC->a[idxTerm];
+ }
+#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
+
+#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION
+ /* Add constraints to reduce the search space on a LIKE or GLOB
+ ** operator.
+ **
+ ** A like pattern of the form "x LIKE 'aBc%'" is changed into constraints
+ **
+ ** x>='ABC' AND x<'abd' AND x LIKE 'aBc%'
+ **
+ ** The last character of the prefix "abc" is incremented to form the
+ ** termination condition "abd". If case is not significant (the default
+ ** for LIKE) then the lower-bound is made all uppercase and the upper-
+ ** bound is made all lowercase so that the bounds also work when comparing
+ ** BLOBs.
+ */
+ if( pWC->op==TK_AND
+ && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase)
+ ){
+ Expr *pLeft; /* LHS of LIKE/GLOB operator */
+ Expr *pStr2; /* Copy of pStr1 - RHS of LIKE/GLOB operator */
+ Expr *pNewExpr1;
+ Expr *pNewExpr2;
+ int idxNew1;
+ int idxNew2;
+ const char *zCollSeqName; /* Name of collating sequence */
+ const u16 wtFlags = TERM_LIKEOPT | TERM_VIRTUAL | TERM_DYNAMIC;
+
+ pLeft = pExpr->x.pList->a[1].pExpr;
+ pStr2 = sqlite3ExprDup(db, pStr1, 0);
+
+ /* Convert the lower bound to upper-case and the upper bound to
+ ** lower-case (upper-case is less than lower-case in ASCII) so that
+ ** the range constraints also work for BLOBs
+ */
+ if( noCase && !pParse->db->mallocFailed ){
+ int i;
+ char c;
+ pTerm->wtFlags |= TERM_LIKE;
+ for(i=0; (c = pStr1->u.zToken[i])!=0; i++){
+ pStr1->u.zToken[i] = sqlite3Toupper(c);
+ pStr2->u.zToken[i] = sqlite3Tolower(c);
+ }
+ }
+
+ if( !db->mallocFailed ){
+ u8 c, *pC; /* Last character before the first wildcard */
+ pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1];
+ c = *pC;
+ if( noCase ){
+ /* The point is to increment the last character before the first
+ ** wildcard. But if we increment '@', that will push it into the
+ ** alphabetic range where case conversions will mess up the
+ ** inequality. To avoid this, make sure to also run the full
+ ** LIKE on all candidate expressions by clearing the isComplete flag
+ */
+ if( c=='A'-1 ) isComplete = 0;
+ c = sqlite3UpperToLower[c];
+ }
+ *pC = c + 1;
+ }
+ zCollSeqName = noCase ? "NOCASE" : sqlite3StrBINARY;
+ pNewExpr1 = sqlite3ExprDup(db, pLeft, 0);
+ pNewExpr1 = sqlite3PExpr(pParse, TK_GE,
+ sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName),
+ pStr1);
+ transferJoinMarkings(pNewExpr1, pExpr);
+ idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags);
+ testcase( idxNew1==0 );
+ exprAnalyze(pSrc, pWC, idxNew1);
+ pNewExpr2 = sqlite3ExprDup(db, pLeft, 0);
+ pNewExpr2 = sqlite3PExpr(pParse, TK_LT,
+ sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName),
+ pStr2);
+ transferJoinMarkings(pNewExpr2, pExpr);
+ idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags);
+ testcase( idxNew2==0 );
+ exprAnalyze(pSrc, pWC, idxNew2);
+ pTerm = &pWC->a[idxTerm];
+ if( isComplete ){
+ markTermAsChild(pWC, idxNew1, idxTerm);
+ markTermAsChild(pWC, idxNew2, idxTerm);
+ }
+ }
+#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* Add a WO_AUX auxiliary term to the constraint set if the
+ ** current expression is of the form "column OP expr" where OP
+ ** is an operator that gets passed into virtual tables but which is
+ ** not normally optimized for ordinary tables. In other words, OP
+ ** is one of MATCH, LIKE, GLOB, REGEXP, !=, IS, IS NOT, or NOT NULL.
+ ** This information is used by the xBestIndex methods of
+ ** virtual tables. The native query optimizer does not attempt
+ ** to do anything with MATCH functions.
+ */
+ if( pWC->op==TK_AND ){
+ Expr *pRight = 0, *pLeft = 0;
+ int res = isAuxiliaryVtabOperator(db, pExpr, &eOp2, &pLeft, &pRight);
+ while( res-- > 0 ){
+ int idxNew;
+ WhereTerm *pNewTerm;
+ Bitmask prereqColumn, prereqExpr;
+
+ prereqExpr = sqlite3WhereExprUsage(pMaskSet, pRight);
+ prereqColumn = sqlite3WhereExprUsage(pMaskSet, pLeft);
+ if( (prereqExpr & prereqColumn)==0 ){
+ Expr *pNewExpr;
+ pNewExpr = sqlite3PExpr(pParse, TK_MATCH,
+ 0, sqlite3ExprDup(db, pRight, 0));
+ if( ExprHasProperty(pExpr, EP_FromJoin) && pNewExpr ){
+ ExprSetProperty(pNewExpr, EP_FromJoin);
+ pNewExpr->iRightJoinTable = pExpr->iRightJoinTable;
+ }
+ idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC);
+ testcase( idxNew==0 );
+ pNewTerm = &pWC->a[idxNew];
+ pNewTerm->prereqRight = prereqExpr;
+ pNewTerm->leftCursor = pLeft->iTable;
+ pNewTerm->u.leftColumn = pLeft->iColumn;
+ pNewTerm->eOperator = WO_AUX;
+ pNewTerm->eMatchOp = eOp2;
+ markTermAsChild(pWC, idxNew, idxTerm);
+ pTerm = &pWC->a[idxTerm];
+ pTerm->wtFlags |= TERM_COPIED;
+ pNewTerm->prereqAll = pTerm->prereqAll;
+ }
+ SWAP(Expr*, pLeft, pRight);
+ }
+ }
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+ /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create
+ ** new terms for each component comparison - "a = ?" and "b = ?". The
+ ** new terms completely replace the original vector comparison, which is
+ ** no longer used.
+ **
+ ** This is only required if at least one side of the comparison operation
+ ** is not a sub-select. */
+ if( pWC->op==TK_AND
+ && (pExpr->op==TK_EQ || pExpr->op==TK_IS)
+ && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1
+ && sqlite3ExprVectorSize(pExpr->pRight)==nLeft
+ && ( (pExpr->pLeft->flags & EP_xIsSelect)==0
+ || (pExpr->pRight->flags & EP_xIsSelect)==0)
+ ){
+ int i;
+ for(i=0; i<nLeft; i++){
+ int idxNew;
+ Expr *pNew;
+ Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i);
+ Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i);
+
+ pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight);
+ transferJoinMarkings(pNew, pExpr);
+ idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC);
+ exprAnalyze(pSrc, pWC, idxNew);
+ }
+ pTerm = &pWC->a[idxTerm];
+ pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */
+ pTerm->eOperator = 0;
+ }
+
+ /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create
+ ** a virtual term for each vector component. The expression object
+ ** used by each such virtual term is pExpr (the full vector IN(...)
+ ** expression). The WhereTerm.iField variable identifies the index within
+ ** the vector on the LHS that the virtual term represents.
+ **
+ ** This only works if the RHS is a simple SELECT (not a compound) that does
+ ** not use window functions.
+ */
+ if( pWC->op==TK_AND && pExpr->op==TK_IN && pTerm->iField==0
+ && pExpr->pLeft->op==TK_VECTOR
+ && pExpr->x.pSelect->pPrior==0
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ && pExpr->x.pSelect->pWin==0
+#endif
+ ){
+ int i;
+ for(i=0; i<sqlite3ExprVectorSize(pExpr->pLeft); i++){
+ int idxNew;
+ idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL);
+ pWC->a[idxNew].iField = i+1;
+ exprAnalyze(pSrc, pWC, idxNew);
+ markTermAsChild(pWC, idxNew, idxTerm);
+ }
+ }
+
+#ifdef SQLITE_ENABLE_STAT4
+ /* When sqlite_stat4 histogram data is available an operator of the
+ ** form "x IS NOT NULL" can sometimes be evaluated more efficiently
+ ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a
+ ** virtual term of that form.
+ **
+ ** Note that the virtual term must be tagged with TERM_VNULL.
+ */
+ if( pExpr->op==TK_NOTNULL
+ && pExpr->pLeft->op==TK_COLUMN
+ && pExpr->pLeft->iColumn>=0
+ && !ExprHasProperty(pExpr, EP_FromJoin)
+ && OptimizationEnabled(db, SQLITE_Stat4)
+ ){
+ Expr *pNewExpr;
+ Expr *pLeft = pExpr->pLeft;
+ int idxNew;
+ WhereTerm *pNewTerm;
+
+ pNewExpr = sqlite3PExpr(pParse, TK_GT,
+ sqlite3ExprDup(db, pLeft, 0),
+ sqlite3ExprAlloc(db, TK_NULL, 0, 0));
+
+ idxNew = whereClauseInsert(pWC, pNewExpr,
+ TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL);
+ if( idxNew ){
+ pNewTerm = &pWC->a[idxNew];
+ pNewTerm->prereqRight = 0;
+ pNewTerm->leftCursor = pLeft->iTable;
+ pNewTerm->u.leftColumn = pLeft->iColumn;
+ pNewTerm->eOperator = WO_GT;
+ markTermAsChild(pWC, idxNew, idxTerm);
+ pTerm = &pWC->a[idxTerm];
+ pTerm->wtFlags |= TERM_COPIED;
+ pNewTerm->prereqAll = pTerm->prereqAll;
+ }
+ }
+#endif /* SQLITE_ENABLE_STAT4 */
+
+ /* Prevent ON clause terms of a LEFT JOIN from being used to drive
+ ** an index for tables to the left of the join.
+ */
+ testcase( pTerm!=&pWC->a[idxTerm] );
+ pTerm = &pWC->a[idxTerm];
+ pTerm->prereqRight |= extraRight;
+}
+
+/***************************************************************************
+** Routines with file scope above. Interface to the rest of the where.c
+** subsystem follows.
+***************************************************************************/
+
+/*
+** This routine identifies subexpressions in the WHERE clause where
+** each subexpression is separated by the AND operator or some other
+** operator specified in the op parameter. The WhereClause structure
+** is filled with pointers to subexpressions. For example:
+**
+** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22)
+** \________/ \_______________/ \________________/
+** slot[0] slot[1] slot[2]
+**
+** The original WHERE clause in pExpr is unaltered. All this routine
+** does is make slot[] entries point to substructure within pExpr.
+**
+** In the previous sentence and in the diagram, "slot[]" refers to
+** the WhereClause.a[] array. The slot[] array grows as needed to contain
+** all terms of the WHERE clause.
+*/
+SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){
+ Expr *pE2 = sqlite3ExprSkipCollateAndLikely(pExpr);
+ pWC->op = op;
+ if( pE2==0 ) return;
+ if( pE2->op!=op ){
+ whereClauseInsert(pWC, pExpr, 0);
+ }else{
+ sqlite3WhereSplit(pWC, pE2->pLeft, op);
+ sqlite3WhereSplit(pWC, pE2->pRight, op);
+ }
+}
+
+/*
+** Initialize a preallocated WhereClause structure.
+*/
+SQLITE_PRIVATE void sqlite3WhereClauseInit(
+ WhereClause *pWC, /* The WhereClause to be initialized */
+ WhereInfo *pWInfo /* The WHERE processing context */
+){
+ pWC->pWInfo = pWInfo;
+ pWC->hasOr = 0;
+ pWC->pOuter = 0;
+ pWC->nTerm = 0;
+ pWC->nSlot = ArraySize(pWC->aStatic);
+ pWC->a = pWC->aStatic;
+}
+
+/*
+** Deallocate a WhereClause structure. The WhereClause structure
+** itself is not freed. This routine is the inverse of
+** sqlite3WhereClauseInit().
+*/
+SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){
+ int i;
+ WhereTerm *a;
+ sqlite3 *db = pWC->pWInfo->pParse->db;
+ for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){
+ if( a->wtFlags & TERM_DYNAMIC ){
+ sqlite3ExprDelete(db, a->pExpr);
+ }
+ if( a->wtFlags & TERM_ORINFO ){
+ whereOrInfoDelete(db, a->u.pOrInfo);
+ }else if( a->wtFlags & TERM_ANDINFO ){
+ whereAndInfoDelete(db, a->u.pAndInfo);
+ }
+ }
+ if( pWC->a!=pWC->aStatic ){
+ sqlite3DbFree(db, pWC->a);
+ }
+}
+
+
+/*
+** These routines walk (recursively) an expression tree and generate
+** a bitmask indicating which tables are used in that expression
+** tree.
+*/
+SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){
+ Bitmask mask;
+ if( p->op==TK_COLUMN && !ExprHasProperty(p, EP_FixedCol) ){
+ return sqlite3WhereGetMask(pMaskSet, p->iTable);
+ }else if( ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
+ assert( p->op!=TK_IF_NULL_ROW );
+ return 0;
+ }
+ mask = (p->op==TK_IF_NULL_ROW) ? sqlite3WhereGetMask(pMaskSet, p->iTable) : 0;
+ if( p->pLeft ) mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pLeft);
+ if( p->pRight ){
+ mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pRight);
+ assert( p->x.pList==0 );
+ }else if( ExprHasProperty(p, EP_xIsSelect) ){
+ if( ExprHasProperty(p, EP_VarSelect) ) pMaskSet->bVarSelect = 1;
+ mask |= exprSelectUsage(pMaskSet, p->x.pSelect);
+ }else if( p->x.pList ){
+ mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList);
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( (p->op==TK_FUNCTION || p->op==TK_AGG_FUNCTION) && p->y.pWin ){
+ mask |= sqlite3WhereExprListUsage(pMaskSet, p->y.pWin->pPartition);
+ mask |= sqlite3WhereExprListUsage(pMaskSet, p->y.pWin->pOrderBy);
+ mask |= sqlite3WhereExprUsage(pMaskSet, p->y.pWin->pFilter);
+ }
+#endif
+ return mask;
+}
+SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){
+ return p ? sqlite3WhereExprUsageNN(pMaskSet,p) : 0;
+}
+SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet *pMaskSet, ExprList *pList){
+ int i;
+ Bitmask mask = 0;
+ if( pList ){
+ for(i=0; i<pList->nExpr; i++){
+ mask |= sqlite3WhereExprUsage(pMaskSet, pList->a[i].pExpr);
+ }
+ }
+ return mask;
+}
+
+
+/*
+** Call exprAnalyze on all terms in a WHERE clause.
+**
+** Note that exprAnalyze() might add new virtual terms onto the
+** end of the WHERE clause. We do not want to analyze these new
+** virtual terms, so start analyzing at the end and work forward
+** so that the added virtual terms are never processed.
+*/
+SQLITE_PRIVATE void sqlite3WhereExprAnalyze(
+ SrcList *pTabList, /* the FROM clause */
+ WhereClause *pWC /* the WHERE clause to be analyzed */
+){
+ int i;
+ for(i=pWC->nTerm-1; i>=0; i--){
+ exprAnalyze(pTabList, pWC, i);
+ }
+}
+
+/*
+** For table-valued-functions, transform the function arguments into
+** new WHERE clause terms.
+**
+** Each function argument translates into an equality constraint against
+** a HIDDEN column in the table.
+*/
+SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
+ Parse *pParse, /* Parsing context */
+ struct SrcList_item *pItem, /* The FROM clause term to process */
+ WhereClause *pWC /* Xfer function arguments to here */
+){
+ Table *pTab;
+ int j, k;
+ ExprList *pArgs;
+ Expr *pColRef;
+ Expr *pTerm;
+ if( pItem->fg.isTabFunc==0 ) return;
+ pTab = pItem->pTab;
+ assert( pTab!=0 );
+ pArgs = pItem->u1.pFuncArg;
+ if( pArgs==0 ) return;
+ for(j=k=0; j<pArgs->nExpr; j++){
+ Expr *pRhs;
+ while( k<pTab->nCol && (pTab->aCol[k].colFlags & COLFLAG_HIDDEN)==0 ){k++;}
+ if( k>=pTab->nCol ){
+ sqlite3ErrorMsg(pParse, "too many arguments on %s() - max %d",
+ pTab->zName, j);
+ return;
+ }
+ pColRef = sqlite3ExprAlloc(pParse->db, TK_COLUMN, 0, 0);
+ if( pColRef==0 ) return;
+ pColRef->iTable = pItem->iCursor;
+ pColRef->iColumn = k++;
+ pColRef->y.pTab = pTab;
+ pRhs = sqlite3PExpr(pParse, TK_UPLUS,
+ sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0);
+ pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs);
+ if( pItem->fg.jointype & JT_LEFT ){
+ sqlite3SetJoinExpr(pTerm, pItem->iCursor);
+ }
+ whereClauseInsert(pWC, pTerm, TERM_DYNAMIC);
+ }
+}
+
+/************** End of whereexpr.c *******************************************/
+/************** Begin file where.c *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This module contains C code that generates VDBE code used to process
+** the WHERE clause of SQL statements. This module is responsible for
+** generating the code that loops through a table looking for applicable
+** rows. Indices are selected and used to speed the search when doing
+** so is applicable. Because this module is responsible for selecting
+** indices, you might also think of this module as the "query optimizer".
+*/
+/* #include "sqliteInt.h" */
+/* #include "whereInt.h" */
+
+/*
+** Extra information appended to the end of sqlite3_index_info but not
+** visible to the xBestIndex function, at least not directly. The
+** sqlite3_vtab_collation() interface knows how to reach it, however.
+**
+** This object is not an API and can be changed from one release to the
+** next. As long as allocateIndexInfo() and sqlite3_vtab_collation()
+** agree on the structure, all will be well.
+*/
+typedef struct HiddenIndexInfo HiddenIndexInfo;
+struct HiddenIndexInfo {
+ WhereClause *pWC; /* The Where clause being analyzed */
+ Parse *pParse; /* The parsing context */
+};
+
+/* Forward declaration of methods */
+static int whereLoopResize(sqlite3*, WhereLoop*, int);
+
+/* Test variable that can be set to enable WHERE tracing */
+#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG)
+/***/ int sqlite3WhereTrace = 0;
+#endif
+
+
+/*
+** Return the estimated number of output rows from a WHERE clause
+*/
+SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo *pWInfo){
+ return pWInfo->nRowOut;
+}
+
+/*
+** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this
+** WHERE clause returns outputs for DISTINCT processing.
+*/
+SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){
+ return pWInfo->eDistinct;
+}
+
+/*
+** Return the number of ORDER BY terms that are satisfied by the
+** WHERE clause. A return of 0 means that the output must be
+** completely sorted. A return equal to the number of ORDER BY
+** terms means that no sorting is needed at all. A return that
+** is positive but less than the number of ORDER BY terms means that
+** block sorting is required.
+*/
+SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){
+ return pWInfo->nOBSat;
+}
+
+/*
+** In the ORDER BY LIMIT optimization, if the inner-most loop is known
+** to emit rows in increasing order, and if the last row emitted by the
+** inner-most loop did not fit within the sorter, then we can skip all
+** subsequent rows for the current iteration of the inner loop (because they
+** will not fit in the sorter either) and continue with the second inner
+** loop - the loop immediately outside the inner-most.
+**
+** When a row does not fit in the sorter (because the sorter already
+** holds LIMIT+OFFSET rows that are smaller), then a jump is made to the
+** label returned by this function.
+**
+** If the ORDER BY LIMIT optimization applies, the jump destination should
+** be the continuation for the second-inner-most loop. If the ORDER BY
+** LIMIT optimization does not apply, then the jump destination should
+** be the continuation for the inner-most loop.
+**
+** It is always safe for this routine to return the continuation of the
+** inner-most loop, in the sense that a correct answer will result.
+** Returning the continuation the second inner loop is an optimization
+** that might make the code run a little faster, but should not change
+** the final answer.
+*/
+SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo *pWInfo){
+ WhereLevel *pInner;
+ if( !pWInfo->bOrderedInnerLoop ){
+ /* The ORDER BY LIMIT optimization does not apply. Jump to the
+ ** continuation of the inner-most loop. */
+ return pWInfo->iContinue;
+ }
+ pInner = &pWInfo->a[pWInfo->nLevel-1];
+ assert( pInner->addrNxt!=0 );
+ return pInner->addrNxt;
+}
+
+/*
+** Return the VDBE address or label to jump to in order to continue
+** immediately with the next row of a WHERE clause.
+*/
+SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){
+ assert( pWInfo->iContinue!=0 );
+ return pWInfo->iContinue;
+}
+
+/*
+** Return the VDBE address or label to jump to in order to break
+** out of a WHERE loop.
+*/
+SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){
+ return pWInfo->iBreak;
+}
+
+/*
+** Return ONEPASS_OFF (0) if an UPDATE or DELETE statement is unable to
+** operate directly on the rowids returned by a WHERE clause. Return
+** ONEPASS_SINGLE (1) if the statement can operation directly because only
+** a single row is to be changed. Return ONEPASS_MULTI (2) if the one-pass
+** optimization can be used on multiple
+**
+** If the ONEPASS optimization is used (if this routine returns true)
+** then also write the indices of open cursors used by ONEPASS
+** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data
+** table and iaCur[1] gets the cursor used by an auxiliary index.
+** Either value may be -1, indicating that cursor is not used.
+** Any cursors returned will have been opened for writing.
+**
+** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is
+** unable to use the ONEPASS optimization.
+*/
+SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){
+ memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2);
+#ifdef WHERETRACE_ENABLED
+ if( sqlite3WhereTrace && pWInfo->eOnePass!=ONEPASS_OFF ){
+ sqlite3DebugPrintf("%s cursors: %d %d\n",
+ pWInfo->eOnePass==ONEPASS_SINGLE ? "ONEPASS_SINGLE" : "ONEPASS_MULTI",
+ aiCur[0], aiCur[1]);
+ }
+#endif
+ return pWInfo->eOnePass;
+}
+
+/*
+** Return TRUE if the WHERE loop uses the OP_DeferredSeek opcode to move
+** the data cursor to the row selected by the index cursor.
+*/
+SQLITE_PRIVATE int sqlite3WhereUsesDeferredSeek(WhereInfo *pWInfo){
+ return pWInfo->bDeferredSeek;
+}
+
+/*
+** Move the content of pSrc into pDest
+*/
+static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){
+ pDest->n = pSrc->n;
+ memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0]));
+}
+
+/*
+** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet.
+**
+** The new entry might overwrite an existing entry, or it might be
+** appended, or it might be discarded. Do whatever is the right thing
+** so that pSet keeps the N_OR_COST best entries seen so far.
+*/
+static int whereOrInsert(
+ WhereOrSet *pSet, /* The WhereOrSet to be updated */
+ Bitmask prereq, /* Prerequisites of the new entry */
+ LogEst rRun, /* Run-cost of the new entry */
+ LogEst nOut /* Number of outputs for the new entry */
+){
+ u16 i;
+ WhereOrCost *p;
+ for(i=pSet->n, p=pSet->a; i>0; i--, p++){
+ if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){
+ goto whereOrInsert_done;
+ }
+ if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){
+ return 0;
+ }
+ }
+ if( pSet->n<N_OR_COST ){
+ p = &pSet->a[pSet->n++];
+ p->nOut = nOut;
+ }else{
+ p = pSet->a;
+ for(i=1; i<pSet->n; i++){
+ if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i;
+ }
+ if( p->rRun<=rRun ) return 0;
+ }
+whereOrInsert_done:
+ p->prereq = prereq;
+ p->rRun = rRun;
+ if( p->nOut>nOut ) p->nOut = nOut;
+ return 1;
+}
+
+/*
+** Return the bitmask for the given cursor number. Return 0 if
+** iCursor is not in the set.
+*/
+SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){
+ int i;
+ assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 );
+ for(i=0; i<pMaskSet->n; i++){
+ if( pMaskSet->ix[i]==iCursor ){
+ return MASKBIT(i);
+ }
+ }
+ return 0;
+}
+
+/*
+** Create a new mask for cursor iCursor.
+**
+** There is one cursor per table in the FROM clause. The number of
+** tables in the FROM clause is limited by a test early in the
+** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[]
+** array will never overflow.
+*/
+static void createMask(WhereMaskSet *pMaskSet, int iCursor){
+ assert( pMaskSet->n < ArraySize(pMaskSet->ix) );
+ pMaskSet->ix[pMaskSet->n++] = iCursor;
+}
+
+/*
+** Advance to the next WhereTerm that matches according to the criteria
+** established when the pScan object was initialized by whereScanInit().
+** Return NULL if there are no more matching WhereTerms.
+*/
+static WhereTerm *whereScanNext(WhereScan *pScan){
+ int iCur; /* The cursor on the LHS of the term */
+ i16 iColumn; /* The column on the LHS of the term. -1 for IPK */
+ Expr *pX; /* An expression being tested */
+ WhereClause *pWC; /* Shorthand for pScan->pWC */
+ WhereTerm *pTerm; /* The term being tested */
+ int k = pScan->k; /* Where to start scanning */
+
+ assert( pScan->iEquiv<=pScan->nEquiv );
+ pWC = pScan->pWC;
+ while(1){
+ iColumn = pScan->aiColumn[pScan->iEquiv-1];
+ iCur = pScan->aiCur[pScan->iEquiv-1];
+ assert( pWC!=0 );
+ do{
+ for(pTerm=pWC->a+k; k<pWC->nTerm; k++, pTerm++){
+ if( pTerm->leftCursor==iCur
+ && pTerm->u.leftColumn==iColumn
+ && (iColumn!=XN_EXPR
+ || sqlite3ExprCompareSkip(pTerm->pExpr->pLeft,
+ pScan->pIdxExpr,iCur)==0)
+ && (pScan->iEquiv<=1 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ ){
+ if( (pTerm->eOperator & WO_EQUIV)!=0
+ && pScan->nEquiv<ArraySize(pScan->aiCur)
+ && (pX = sqlite3ExprSkipCollateAndLikely(pTerm->pExpr->pRight))->op
+ ==TK_COLUMN
+ ){
+ int j;
+ for(j=0; j<pScan->nEquiv; j++){
+ if( pScan->aiCur[j]==pX->iTable
+ && pScan->aiColumn[j]==pX->iColumn ){
+ break;
+ }
+ }
+ if( j==pScan->nEquiv ){
+ pScan->aiCur[j] = pX->iTable;
+ pScan->aiColumn[j] = pX->iColumn;
+ pScan->nEquiv++;
+ }
+ }
+ if( (pTerm->eOperator & pScan->opMask)!=0 ){
+ /* Verify the affinity and collating sequence match */
+ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){
+ CollSeq *pColl;
+ Parse *pParse = pWC->pWInfo->pParse;
+ pX = pTerm->pExpr;
+ if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){
+ continue;
+ }
+ assert(pX->pLeft);
+ pColl = sqlite3ExprCompareCollSeq(pParse, pX);
+ if( pColl==0 ) pColl = pParse->db->pDfltColl;
+ if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){
+ continue;
+ }
+ }
+ if( (pTerm->eOperator & (WO_EQ|WO_IS))!=0
+ && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN
+ && pX->iTable==pScan->aiCur[0]
+ && pX->iColumn==pScan->aiColumn[0]
+ ){
+ testcase( pTerm->eOperator & WO_IS );
+ continue;
+ }
+ pScan->pWC = pWC;
+ pScan->k = k+1;
+ return pTerm;
+ }
+ }
+ }
+ pWC = pWC->pOuter;
+ k = 0;
+ }while( pWC!=0 );
+ if( pScan->iEquiv>=pScan->nEquiv ) break;
+ pWC = pScan->pOrigWC;
+ k = 0;
+ pScan->iEquiv++;
+ }
+ return 0;
+}
+
+/*
+** This is whereScanInit() for the case of an index on an expression.
+** It is factored out into a separate tail-recursion subroutine so that
+** the normal whereScanInit() routine, which is a high-runner, does not
+** need to push registers onto the stack as part of its prologue.
+*/
+static SQLITE_NOINLINE WhereTerm *whereScanInitIndexExpr(WhereScan *pScan){
+ pScan->idxaff = sqlite3ExprAffinity(pScan->pIdxExpr);
+ return whereScanNext(pScan);
+}
+
+/*
+** Initialize a WHERE clause scanner object. Return a pointer to the
+** first match. Return NULL if there are no matches.
+**
+** The scanner will be searching the WHERE clause pWC. It will look
+** for terms of the form "X <op> <expr>" where X is column iColumn of table
+** iCur. Or if pIdx!=0 then X is column iColumn of index pIdx. pIdx
+** must be one of the indexes of table iCur.
+**
+** The <op> must be one of the operators described by opMask.
+**
+** If the search is for X and the WHERE clause contains terms of the
+** form X=Y then this routine might also return terms of the form
+** "Y <op> <expr>". The number of levels of transitivity is limited,
+** but is enough to handle most commonly occurring SQL statements.
+**
+** If X is not the INTEGER PRIMARY KEY then X must be compatible with
+** index pIdx.
+*/
+static WhereTerm *whereScanInit(
+ WhereScan *pScan, /* The WhereScan object being initialized */
+ WhereClause *pWC, /* The WHERE clause to be scanned */
+ int iCur, /* Cursor to scan for */
+ int iColumn, /* Column to scan for */
+ u32 opMask, /* Operator(s) to scan for */
+ Index *pIdx /* Must be compatible with this index */
+){
+ pScan->pOrigWC = pWC;
+ pScan->pWC = pWC;
+ pScan->pIdxExpr = 0;
+ pScan->idxaff = 0;
+ pScan->zCollName = 0;
+ pScan->opMask = opMask;
+ pScan->k = 0;
+ pScan->aiCur[0] = iCur;
+ pScan->nEquiv = 1;
+ pScan->iEquiv = 1;
+ if( pIdx ){
+ int j = iColumn;
+ iColumn = pIdx->aiColumn[j];
+ if( iColumn==XN_EXPR ){
+ pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr;
+ pScan->zCollName = pIdx->azColl[j];
+ pScan->aiColumn[0] = XN_EXPR;
+ return whereScanInitIndexExpr(pScan);
+ }else if( iColumn==pIdx->pTable->iPKey ){
+ iColumn = XN_ROWID;
+ }else if( iColumn>=0 ){
+ pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity;
+ pScan->zCollName = pIdx->azColl[j];
+ }
+ }else if( iColumn==XN_EXPR ){
+ return 0;
+ }
+ pScan->aiColumn[0] = iColumn;
+ return whereScanNext(pScan);
+}
+
+/*
+** Search for a term in the WHERE clause that is of the form "X <op> <expr>"
+** where X is a reference to the iColumn of table iCur or of index pIdx
+** if pIdx!=0 and <op> is one of the WO_xx operator codes specified by
+** the op parameter. Return a pointer to the term. Return 0 if not found.
+**
+** If pIdx!=0 then it must be one of the indexes of table iCur.
+** Search for terms matching the iColumn-th column of pIdx
+** rather than the iColumn-th column of table iCur.
+**
+** The term returned might by Y=<expr> if there is another constraint in
+** the WHERE clause that specifies that X=Y. Any such constraints will be
+** identified by the WO_EQUIV bit in the pTerm->eOperator field. The
+** aiCur[]/iaColumn[] arrays hold X and all its equivalents. There are 11
+** slots in aiCur[]/aiColumn[] so that means we can look for X plus up to 10
+** other equivalent values. Hence a search for X will return <expr> if X=A1
+** and A1=A2 and A2=A3 and ... and A9=A10 and A10=<expr>.
+**
+** If there are multiple terms in the WHERE clause of the form "X <op> <expr>"
+** then try for the one with no dependencies on <expr> - in other words where
+** <expr> is a constant expression of some kind. Only return entries of
+** the form "X <op> Y" where Y is a column in another table if no terms of
+** the form "X <op> <const-expr>" exist. If no terms with a constant RHS
+** exist, try to return a term that does not use WO_EQUIV.
+*/
+SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
+ WhereClause *pWC, /* The WHERE clause to be searched */
+ int iCur, /* Cursor number of LHS */
+ int iColumn, /* Column number of LHS */
+ Bitmask notReady, /* RHS must not overlap with this mask */
+ u32 op, /* Mask of WO_xx values describing operator */
+ Index *pIdx /* Must be compatible with this index, if not NULL */
+){
+ WhereTerm *pResult = 0;
+ WhereTerm *p;
+ WhereScan scan;
+
+ p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx);
+ op &= WO_EQ|WO_IS;
+ while( p ){
+ if( (p->prereqRight & notReady)==0 ){
+ if( p->prereqRight==0 && (p->eOperator&op)!=0 ){
+ testcase( p->eOperator & WO_IS );
+ return p;
+ }
+ if( pResult==0 ) pResult = p;
+ }
+ p = whereScanNext(&scan);
+ }
+ return pResult;
+}
+
+/*
+** This function searches pList for an entry that matches the iCol-th column
+** of index pIdx.
+**
+** If such an expression is found, its index in pList->a[] is returned. If
+** no expression is found, -1 is returned.
+*/
+static int findIndexCol(
+ Parse *pParse, /* Parse context */
+ ExprList *pList, /* Expression list to search */
+ int iBase, /* Cursor for table associated with pIdx */
+ Index *pIdx, /* Index to match column of */
+ int iCol /* Column of index to match */
+){
+ int i;
+ const char *zColl = pIdx->azColl[iCol];
+
+ for(i=0; i<pList->nExpr; i++){
+ Expr *p = sqlite3ExprSkipCollateAndLikely(pList->a[i].pExpr);
+ if( p->op==TK_COLUMN
+ && p->iColumn==pIdx->aiColumn[iCol]
+ && p->iTable==iBase
+ ){
+ CollSeq *pColl = sqlite3ExprNNCollSeq(pParse, pList->a[i].pExpr);
+ if( 0==sqlite3StrICmp(pColl->zName, zColl) ){
+ return i;
+ }
+ }
+ }
+
+ return -1;
+}
+
+/*
+** Return TRUE if the iCol-th column of index pIdx is NOT NULL
+*/
+static int indexColumnNotNull(Index *pIdx, int iCol){
+ int j;
+ assert( pIdx!=0 );
+ assert( iCol>=0 && iCol<pIdx->nColumn );
+ j = pIdx->aiColumn[iCol];
+ if( j>=0 ){
+ return pIdx->pTable->aCol[j].notNull;
+ }else if( j==(-1) ){
+ return 1;
+ }else{
+ assert( j==(-2) );
+ return 0; /* Assume an indexed expression can always yield a NULL */
+
+ }
+}
+
+/*
+** Return true if the DISTINCT expression-list passed as the third argument
+** is redundant.
+**
+** A DISTINCT list is redundant if any subset of the columns in the
+** DISTINCT list are collectively unique and individually non-null.
+*/
+static int isDistinctRedundant(
+ Parse *pParse, /* Parsing context */
+ SrcList *pTabList, /* The FROM clause */
+ WhereClause *pWC, /* The WHERE clause */
+ ExprList *pDistinct /* The result set that needs to be DISTINCT */
+){
+ Table *pTab;
+ Index *pIdx;
+ int i;
+ int iBase;
+
+ /* If there is more than one table or sub-select in the FROM clause of
+ ** this query, then it will not be possible to show that the DISTINCT
+ ** clause is redundant. */
+ if( pTabList->nSrc!=1 ) return 0;
+ iBase = pTabList->a[0].iCursor;
+ pTab = pTabList->a[0].pTab;
+
+ /* If any of the expressions is an IPK column on table iBase, then return
+ ** true. Note: The (p->iTable==iBase) part of this test may be false if the
+ ** current SELECT is a correlated sub-query.
+ */
+ for(i=0; i<pDistinct->nExpr; i++){
+ Expr *p = sqlite3ExprSkipCollateAndLikely(pDistinct->a[i].pExpr);
+ if( p->op==TK_COLUMN && p->iTable==iBase && p->iColumn<0 ) return 1;
+ }
+
+ /* Loop through all indices on the table, checking each to see if it makes
+ ** the DISTINCT qualifier redundant. It does so if:
+ **
+ ** 1. The index is itself UNIQUE, and
+ **
+ ** 2. All of the columns in the index are either part of the pDistinct
+ ** list, or else the WHERE clause contains a term of the form "col=X",
+ ** where X is a constant value. The collation sequences of the
+ ** comparison and select-list expressions must match those of the index.
+ **
+ ** 3. All of those index columns for which the WHERE clause does not
+ ** contain a "col=X" term are subject to a NOT NULL constraint.
+ */
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( !IsUniqueIndex(pIdx) ) continue;
+ for(i=0; i<pIdx->nKeyCol; i++){
+ if( 0==sqlite3WhereFindTerm(pWC, iBase, i, ~(Bitmask)0, WO_EQ, pIdx) ){
+ if( findIndexCol(pParse, pDistinct, iBase, pIdx, i)<0 ) break;
+ if( indexColumnNotNull(pIdx, i)==0 ) break;
+ }
+ }
+ if( i==pIdx->nKeyCol ){
+ /* This index implies that the DISTINCT qualifier is redundant. */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+** Estimate the logarithm of the input value to base 2.
+*/
+static LogEst estLog(LogEst N){
+ return N<=10 ? 0 : sqlite3LogEst(N) - 33;
+}
+
+/*
+** Convert OP_Column opcodes to OP_Copy in previously generated code.
+**
+** This routine runs over generated VDBE code and translates OP_Column
+** opcodes into OP_Copy when the table is being accessed via co-routine
+** instead of via table lookup.
+**
+** If the iAutoidxCur is not zero, then any OP_Rowid instructions on
+** cursor iTabCur are transformed into OP_Sequence opcode for the
+** iAutoidxCur cursor, in order to generate unique rowids for the
+** automatic index being generated.
+*/
+static void translateColumnToCopy(
+ Parse *pParse, /* Parsing context */
+ int iStart, /* Translate from this opcode to the end */
+ int iTabCur, /* OP_Column/OP_Rowid references to this table */
+ int iRegister, /* The first column is in this register */
+ int iAutoidxCur /* If non-zero, cursor of autoindex being generated */
+){
+ Vdbe *v = pParse->pVdbe;
+ VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart);
+ int iEnd = sqlite3VdbeCurrentAddr(v);
+ if( pParse->db->mallocFailed ) return;
+ for(; iStart<iEnd; iStart++, pOp++){
+ if( pOp->p1!=iTabCur ) continue;
+ if( pOp->opcode==OP_Column ){
+ pOp->opcode = OP_Copy;
+ pOp->p1 = pOp->p2 + iRegister;
+ pOp->p2 = pOp->p3;
+ pOp->p3 = 0;
+ }else if( pOp->opcode==OP_Rowid ){
+ if( iAutoidxCur ){
+ pOp->opcode = OP_Sequence;
+ pOp->p1 = iAutoidxCur;
+ }else{
+ pOp->opcode = OP_Null;
+ pOp->p1 = 0;
+ pOp->p3 = 0;
+ }
+ }
+ }
+}
+
+/*
+** Two routines for printing the content of an sqlite3_index_info
+** structure. Used for testing and debugging only. If neither
+** SQLITE_TEST or SQLITE_DEBUG are defined, then these routines
+** are no-ops.
+*/
+#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED)
+static void whereTraceIndexInfoInputs(sqlite3_index_info *p){
+ int i;
+ if( !sqlite3WhereTrace ) return;
+ for(i=0; i<p->nConstraint; i++){
+ sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n",
+ i,
+ p->aConstraint[i].iColumn,
+ p->aConstraint[i].iTermOffset,
+ p->aConstraint[i].op,
+ p->aConstraint[i].usable);
+ }
+ for(i=0; i<p->nOrderBy; i++){
+ sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n",
+ i,
+ p->aOrderBy[i].iColumn,
+ p->aOrderBy[i].desc);
+ }
+}
+static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){
+ int i;
+ if( !sqlite3WhereTrace ) return;
+ for(i=0; i<p->nConstraint; i++){
+ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n",
+ i,
+ p->aConstraintUsage[i].argvIndex,
+ p->aConstraintUsage[i].omit);
+ }
+ sqlite3DebugPrintf(" idxNum=%d\n", p->idxNum);
+ sqlite3DebugPrintf(" idxStr=%s\n", p->idxStr);
+ sqlite3DebugPrintf(" orderByConsumed=%d\n", p->orderByConsumed);
+ sqlite3DebugPrintf(" estimatedCost=%g\n", p->estimatedCost);
+ sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows);
+}
+#else
+#define whereTraceIndexInfoInputs(A)
+#define whereTraceIndexInfoOutputs(A)
+#endif
+
+#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
+/*
+** Return TRUE if the WHERE clause term pTerm is of a form where it
+** could be used with an index to access pSrc, assuming an appropriate
+** index existed.
+*/
+static int termCanDriveIndex(
+ WhereTerm *pTerm, /* WHERE clause term to check */
+ struct SrcList_item *pSrc, /* Table we are trying to access */
+ Bitmask notReady /* Tables in outer loops of the join */
+){
+ char aff;
+ if( pTerm->leftCursor!=pSrc->iCursor ) return 0;
+ if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0;
+ if( (pSrc->fg.jointype & JT_LEFT)
+ && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ && (pTerm->eOperator & WO_IS)
+ ){
+ /* Cannot use an IS term from the WHERE clause as an index driver for
+ ** the RHS of a LEFT JOIN. Such a term can only be used if it is from
+ ** the ON clause. */
+ return 0;
+ }
+ if( (pTerm->prereqRight & notReady)!=0 ) return 0;
+ if( pTerm->u.leftColumn<0 ) return 0;
+ aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity;
+ if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0;
+ testcase( pTerm->pExpr->op==TK_IS );
+ return 1;
+}
+#endif
+
+
+#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
+/*
+** Generate code to construct the Index object for an automatic index
+** and to set up the WhereLevel object pLevel so that the code generator
+** makes use of the automatic index.
+*/
+static void constructAutomaticIndex(
+ Parse *pParse, /* The parsing context */
+ WhereClause *pWC, /* The WHERE clause */
+ struct SrcList_item *pSrc, /* The FROM clause term to get the next index */
+ Bitmask notReady, /* Mask of cursors that are not available */
+ WhereLevel *pLevel /* Write new index here */
+){
+ int nKeyCol; /* Number of columns in the constructed index */
+ WhereTerm *pTerm; /* A single term of the WHERE clause */
+ WhereTerm *pWCEnd; /* End of pWC->a[] */
+ Index *pIdx; /* Object describing the transient index */
+ Vdbe *v; /* Prepared statement under construction */
+ int addrInit; /* Address of the initialization bypass jump */
+ Table *pTable; /* The table being indexed */
+ int addrTop; /* Top of the index fill loop */
+ int regRecord; /* Register holding an index record */
+ int n; /* Column counter */
+ int i; /* Loop counter */
+ int mxBitCol; /* Maximum column in pSrc->colUsed */
+ CollSeq *pColl; /* Collating sequence to on a column */
+ WhereLoop *pLoop; /* The Loop object */
+ char *zNotUsed; /* Extra space on the end of pIdx */
+ Bitmask idxCols; /* Bitmap of columns used for indexing */
+ Bitmask extraCols; /* Bitmap of additional columns */
+ u8 sentWarning = 0; /* True if a warnning has been issued */
+ Expr *pPartial = 0; /* Partial Index Expression */
+ int iContinue = 0; /* Jump here to skip excluded rows */
+ struct SrcList_item *pTabItem; /* FROM clause term being indexed */
+ int addrCounter = 0; /* Address where integer counter is initialized */
+ int regBase; /* Array of registers where record is assembled */
+
+ /* Generate code to skip over the creation and initialization of the
+ ** transient index on 2nd and subsequent iterations of the loop. */
+ v = pParse->pVdbe;
+ assert( v!=0 );
+ addrInit = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+
+ /* Count the number of columns that will be added to the index
+ ** and used to match WHERE clause constraints */
+ nKeyCol = 0;
+ pTable = pSrc->pTab;
+ pWCEnd = &pWC->a[pWC->nTerm];
+ pLoop = pLevel->pWLoop;
+ idxCols = 0;
+ for(pTerm=pWC->a; pTerm<pWCEnd; pTerm++){
+ Expr *pExpr = pTerm->pExpr;
+ assert( !ExprHasProperty(pExpr, EP_FromJoin) /* prereq always non-zero */
+ || pExpr->iRightJoinTable!=pSrc->iCursor /* for the right-hand */
+ || pLoop->prereq!=0 ); /* table of a LEFT JOIN */
+ if( pLoop->prereq==0
+ && (pTerm->wtFlags & TERM_VIRTUAL)==0
+ && !ExprHasProperty(pExpr, EP_FromJoin)
+ && sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor) ){
+ pPartial = sqlite3ExprAnd(pParse, pPartial,
+ sqlite3ExprDup(pParse->db, pExpr, 0));
+ }
+ if( termCanDriveIndex(pTerm, pSrc, notReady) ){
+ int iCol = pTerm->u.leftColumn;
+ Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol);
+ testcase( iCol==BMS );
+ testcase( iCol==BMS-1 );
+ if( !sentWarning ){
+ sqlite3_log(SQLITE_WARNING_AUTOINDEX,
+ "automatic index on %s(%s)", pTable->zName,
+ pTable->aCol[iCol].zName);
+ sentWarning = 1;
+ }
+ if( (idxCols & cMask)==0 ){
+ if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ){
+ goto end_auto_index_create;
+ }
+ pLoop->aLTerm[nKeyCol++] = pTerm;
+ idxCols |= cMask;
+ }
+ }
+ }
+ assert( nKeyCol>0 );
+ pLoop->u.btree.nEq = pLoop->nLTerm = nKeyCol;
+ pLoop->wsFlags = WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WHERE_INDEXED
+ | WHERE_AUTO_INDEX;
+
+ /* Count the number of additional columns needed to create a
+ ** covering index. A "covering index" is an index that contains all
+ ** columns that are needed by the query. With a covering index, the
+ ** original table never needs to be accessed. Automatic indices must
+ ** be a covering index because the index will not be updated if the
+ ** original table changes and the index and table cannot both be used
+ ** if they go out of sync.
+ */
+ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1));
+ mxBitCol = MIN(BMS-1,pTable->nCol);
+ testcase( pTable->nCol==BMS-1 );
+ testcase( pTable->nCol==BMS-2 );
+ for(i=0; i<mxBitCol; i++){
+ if( extraCols & MASKBIT(i) ) nKeyCol++;
+ }
+ if( pSrc->colUsed & MASKBIT(BMS-1) ){
+ nKeyCol += pTable->nCol - BMS + 1;
+ }
+
+ /* Construct the Index object to describe this index */
+ pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed);
+ if( pIdx==0 ) goto end_auto_index_create;
+ pLoop->u.btree.pIndex = pIdx;
+ pIdx->zName = "auto-index";
+ pIdx->pTable = pTable;
+ n = 0;
+ idxCols = 0;
+ for(pTerm=pWC->a; pTerm<pWCEnd; pTerm++){
+ if( termCanDriveIndex(pTerm, pSrc, notReady) ){
+ int iCol = pTerm->u.leftColumn;
+ Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol);
+ testcase( iCol==BMS-1 );
+ testcase( iCol==BMS );
+ if( (idxCols & cMask)==0 ){
+ Expr *pX = pTerm->pExpr;
+ idxCols |= cMask;
+ pIdx->aiColumn[n] = pTerm->u.leftColumn;
+ pColl = sqlite3ExprCompareCollSeq(pParse, pX);
+ assert( pColl!=0 || pParse->nErr>0 ); /* TH3 collate01.800 */
+ pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY;
+ n++;
+ }
+ }
+ }
+ assert( (u32)n==pLoop->u.btree.nEq );
+
+ /* Add additional columns needed to make the automatic index into
+ ** a covering index */
+ for(i=0; i<mxBitCol; i++){
+ if( extraCols & MASKBIT(i) ){
+ pIdx->aiColumn[n] = i;
+ pIdx->azColl[n] = sqlite3StrBINARY;
+ n++;
+ }
+ }
+ if( pSrc->colUsed & MASKBIT(BMS-1) ){
+ for(i=BMS-1; i<pTable->nCol; i++){
+ pIdx->aiColumn[n] = i;
+ pIdx->azColl[n] = sqlite3StrBINARY;
+ n++;
+ }
+ }
+ assert( n==nKeyCol );
+ pIdx->aiColumn[n] = XN_ROWID;
+ pIdx->azColl[n] = sqlite3StrBINARY;
+
+ /* Create the automatic index */
+ assert( pLevel->iIdxCur>=0 );
+ pLevel->iIdxCur = pParse->nTab++;
+ sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ VdbeComment((v, "for %s", pTable->zName));
+
+ /* Fill the automatic index with content */
+ pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom];
+ if( pTabItem->fg.viaCoroutine ){
+ int regYield = pTabItem->regReturn;
+ addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0);
+ sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
+ addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield);
+ VdbeCoverage(v);
+ VdbeComment((v, "next row of %s", pTabItem->pTab->zName));
+ }else{
+ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v);
+ }
+ if( pPartial ){
+ iContinue = sqlite3VdbeMakeLabel(pParse);
+ sqlite3ExprIfFalse(pParse, pPartial, iContinue, SQLITE_JUMPIFNULL);
+ pLoop->wsFlags |= WHERE_PARTIALIDX;
+ }
+ regRecord = sqlite3GetTempReg(pParse);
+ regBase = sqlite3GenerateIndexKey(
+ pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0
+ );
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue);
+ if( pTabItem->fg.viaCoroutine ){
+ sqlite3VdbeChangeP2(v, addrCounter, regBase+n);
+ testcase( pParse->db->mallocFailed );
+ assert( pLevel->iIdxCur>0 );
+ translateColumnToCopy(pParse, addrTop, pLevel->iTabCur,
+ pTabItem->regResult, pLevel->iIdxCur);
+ sqlite3VdbeGoto(v, addrTop);
+ pTabItem->fg.viaCoroutine = 0;
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v);
+ sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX);
+ }
+ sqlite3VdbeJumpHere(v, addrTop);
+ sqlite3ReleaseTempReg(pParse, regRecord);
+
+ /* Jump here when skipping the initialization */
+ sqlite3VdbeJumpHere(v, addrInit);
+
+end_auto_index_create:
+ sqlite3ExprDelete(pParse->db, pPartial);
+}
+#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*
+** Allocate and populate an sqlite3_index_info structure. It is the
+** responsibility of the caller to eventually release the structure
+** by passing the pointer returned by this function to sqlite3_free().
+*/
+static sqlite3_index_info *allocateIndexInfo(
+ Parse *pParse, /* The parsing context */
+ WhereClause *pWC, /* The WHERE clause being analyzed */
+ Bitmask mUnusable, /* Ignore terms with these prereqs */
+ struct SrcList_item *pSrc, /* The FROM clause term that is the vtab */
+ ExprList *pOrderBy, /* The ORDER BY clause */
+ u16 *pmNoOmit /* Mask of terms not to omit */
+){
+ int i, j;
+ int nTerm;
+ struct sqlite3_index_constraint *pIdxCons;
+ struct sqlite3_index_orderby *pIdxOrderBy;
+ struct sqlite3_index_constraint_usage *pUsage;
+ struct HiddenIndexInfo *pHidden;
+ WhereTerm *pTerm;
+ int nOrderBy;
+ sqlite3_index_info *pIdxInfo;
+ u16 mNoOmit = 0;
+
+ /* Count the number of possible WHERE clause constraints referring
+ ** to this virtual table */
+ for(i=nTerm=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
+ if( pTerm->leftCursor != pSrc->iCursor ) continue;
+ if( pTerm->prereqRight & mUnusable ) continue;
+ assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) );
+ testcase( pTerm->eOperator & WO_IN );
+ testcase( pTerm->eOperator & WO_ISNULL );
+ testcase( pTerm->eOperator & WO_IS );
+ testcase( pTerm->eOperator & WO_ALL );
+ if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue;
+ if( pTerm->wtFlags & TERM_VNULL ) continue;
+ assert( pTerm->u.leftColumn>=(-1) );
+ nTerm++;
+ }
+
+ /* If the ORDER BY clause contains only columns in the current
+ ** virtual table then allocate space for the aOrderBy part of
+ ** the sqlite3_index_info structure.
+ */
+ nOrderBy = 0;
+ if( pOrderBy ){
+ int n = pOrderBy->nExpr;
+ for(i=0; i<n; i++){
+ Expr *pExpr = pOrderBy->a[i].pExpr;
+ if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break;
+ if( pOrderBy->a[i].sortFlags & KEYINFO_ORDER_BIGNULL ) break;
+ }
+ if( i==n){
+ nOrderBy = n;
+ }
+ }
+
+ /* Allocate the sqlite3_index_info structure
+ */
+ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo)
+ + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm
+ + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) );
+ if( pIdxInfo==0 ){
+ sqlite3ErrorMsg(pParse, "out of memory");
+ return 0;
+ }
+ pHidden = (struct HiddenIndexInfo*)&pIdxInfo[1];
+ pIdxCons = (struct sqlite3_index_constraint*)&pHidden[1];
+ pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm];
+ pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy];
+ pIdxInfo->nOrderBy = nOrderBy;
+ pIdxInfo->aConstraint = pIdxCons;
+ pIdxInfo->aOrderBy = pIdxOrderBy;
+ pIdxInfo->aConstraintUsage = pUsage;
+ pHidden->pWC = pWC;
+ pHidden->pParse = pParse;
+ for(i=j=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
+ u16 op;
+ if( pTerm->leftCursor != pSrc->iCursor ) continue;
+ if( pTerm->prereqRight & mUnusable ) continue;
+ assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) );
+ testcase( pTerm->eOperator & WO_IN );
+ testcase( pTerm->eOperator & WO_IS );
+ testcase( pTerm->eOperator & WO_ISNULL );
+ testcase( pTerm->eOperator & WO_ALL );
+ if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue;
+ if( pTerm->wtFlags & TERM_VNULL ) continue;
+
+ /* tag-20191211-002: WHERE-clause constraints are not useful to the
+ ** right-hand table of a LEFT JOIN. See tag-20191211-001 for the
+ ** equivalent restriction for ordinary tables. */
+ if( (pSrc->fg.jointype & JT_LEFT)!=0
+ && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ ){
+ continue;
+ }
+ assert( pTerm->u.leftColumn>=(-1) );
+ pIdxCons[j].iColumn = pTerm->u.leftColumn;
+ pIdxCons[j].iTermOffset = i;
+ op = pTerm->eOperator & WO_ALL;
+ if( op==WO_IN ) op = WO_EQ;
+ if( op==WO_AUX ){
+ pIdxCons[j].op = pTerm->eMatchOp;
+ }else if( op & (WO_ISNULL|WO_IS) ){
+ if( op==WO_ISNULL ){
+ pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL;
+ }else{
+ pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS;
+ }
+ }else{
+ pIdxCons[j].op = (u8)op;
+ /* The direct assignment in the previous line is possible only because
+ ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The
+ ** following asserts verify this fact. */
+ assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ );
+ assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT );
+ assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE );
+ assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT );
+ assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE );
+ assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) );
+
+ if( op & (WO_LT|WO_LE|WO_GT|WO_GE)
+ && sqlite3ExprIsVector(pTerm->pExpr->pRight)
+ ){
+ testcase( j!=i );
+ if( j<16 ) mNoOmit |= (1 << j);
+ if( op==WO_LT ) pIdxCons[j].op = WO_LE;
+ if( op==WO_GT ) pIdxCons[j].op = WO_GE;
+ }
+ }
+
+ j++;
+ }
+ pIdxInfo->nConstraint = j;
+ for(i=0; i<nOrderBy; i++){
+ Expr *pExpr = pOrderBy->a[i].pExpr;
+ pIdxOrderBy[i].iColumn = pExpr->iColumn;
+ pIdxOrderBy[i].desc = pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC;
+ }
+
+ *pmNoOmit = mNoOmit;
+ return pIdxInfo;
+}
+
+/*
+** The table object reference passed as the second argument to this function
+** must represent a virtual table. This function invokes the xBestIndex()
+** method of the virtual table with the sqlite3_index_info object that
+** comes in as the 3rd argument to this function.
+**
+** If an error occurs, pParse is populated with an error message and an
+** appropriate error code is returned. A return of SQLITE_CONSTRAINT from
+** xBestIndex is not considered an error. SQLITE_CONSTRAINT indicates that
+** the current configuration of "unusable" flags in sqlite3_index_info can
+** not result in a valid plan.
+**
+** Whether or not an error is returned, it is the responsibility of the
+** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates
+** that this is required.
+*/
+static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){
+ sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab;
+ int rc;
+
+ whereTraceIndexInfoInputs(p);
+ rc = pVtab->pModule->xBestIndex(pVtab, p);
+ whereTraceIndexInfoOutputs(p);
+
+ if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){
+ if( rc==SQLITE_NOMEM ){
+ sqlite3OomFault(pParse->db);
+ }else if( !pVtab->zErrMsg ){
+ sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc));
+ }else{
+ sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg);
+ }
+ }
+ sqlite3_free(pVtab->zErrMsg);
+ pVtab->zErrMsg = 0;
+ return rc;
+}
+#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Estimate the location of a particular key among all keys in an
+** index. Store the results in aStat as follows:
+**
+** aStat[0] Est. number of rows less than pRec
+** aStat[1] Est. number of rows equal to pRec
+**
+** Return the index of the sample that is the smallest sample that
+** is greater than or equal to pRec. Note that this index is not an index
+** into the aSample[] array - it is an index into a virtual set of samples
+** based on the contents of aSample[] and the number of fields in record
+** pRec.
+*/
+static int whereKeyStats(
+ Parse *pParse, /* Database connection */
+ Index *pIdx, /* Index to consider domain of */
+ UnpackedRecord *pRec, /* Vector of values to consider */
+ int roundUp, /* Round up if true. Round down if false */
+ tRowcnt *aStat /* OUT: stats written here */
+){
+ IndexSample *aSample = pIdx->aSample;
+ int iCol; /* Index of required stats in anEq[] etc. */
+ int i; /* Index of first sample >= pRec */
+ int iSample; /* Smallest sample larger than or equal to pRec */
+ int iMin = 0; /* Smallest sample not yet tested */
+ int iTest; /* Next sample to test */
+ int res; /* Result of comparison operation */
+ int nField; /* Number of fields in pRec */
+ tRowcnt iLower = 0; /* anLt[] + anEq[] of largest sample pRec is > */
+
+#ifndef SQLITE_DEBUG
+ UNUSED_PARAMETER( pParse );
+#endif
+ assert( pRec!=0 );
+ assert( pIdx->nSample>0 );
+ assert( pRec->nField>0 && pRec->nField<=pIdx->nSampleCol );
+
+ /* Do a binary search to find the first sample greater than or equal
+ ** to pRec. If pRec contains a single field, the set of samples to search
+ ** is simply the aSample[] array. If the samples in aSample[] contain more
+ ** than one fields, all fields following the first are ignored.
+ **
+ ** If pRec contains N fields, where N is more than one, then as well as the
+ ** samples in aSample[] (truncated to N fields), the search also has to
+ ** consider prefixes of those samples. For example, if the set of samples
+ ** in aSample is:
+ **
+ ** aSample[0] = (a, 5)
+ ** aSample[1] = (a, 10)
+ ** aSample[2] = (b, 5)
+ ** aSample[3] = (c, 100)
+ ** aSample[4] = (c, 105)
+ **
+ ** Then the search space should ideally be the samples above and the
+ ** unique prefixes [a], [b] and [c]. But since that is hard to organize,
+ ** the code actually searches this set:
+ **
+ ** 0: (a)
+ ** 1: (a, 5)
+ ** 2: (a, 10)
+ ** 3: (a, 10)
+ ** 4: (b)
+ ** 5: (b, 5)
+ ** 6: (c)
+ ** 7: (c, 100)
+ ** 8: (c, 105)
+ ** 9: (c, 105)
+ **
+ ** For each sample in the aSample[] array, N samples are present in the
+ ** effective sample array. In the above, samples 0 and 1 are based on
+ ** sample aSample[0]. Samples 2 and 3 on aSample[1] etc.
+ **
+ ** Often, sample i of each block of N effective samples has (i+1) fields.
+ ** Except, each sample may be extended to ensure that it is greater than or
+ ** equal to the previous sample in the array. For example, in the above,
+ ** sample 2 is the first sample of a block of N samples, so at first it
+ ** appears that it should be 1 field in size. However, that would make it
+ ** smaller than sample 1, so the binary search would not work. As a result,
+ ** it is extended to two fields. The duplicates that this creates do not
+ ** cause any problems.
+ */
+ nField = pRec->nField;
+ iCol = 0;
+ iSample = pIdx->nSample * nField;
+ do{
+ int iSamp; /* Index in aSample[] of test sample */
+ int n; /* Number of fields in test sample */
+
+ iTest = (iMin+iSample)/2;
+ iSamp = iTest / nField;
+ if( iSamp>0 ){
+ /* The proposed effective sample is a prefix of sample aSample[iSamp].
+ ** Specifically, the shortest prefix of at least (1 + iTest%nField)
+ ** fields that is greater than the previous effective sample. */
+ for(n=(iTest % nField) + 1; n<nField; n++){
+ if( aSample[iSamp-1].anLt[n-1]!=aSample[iSamp].anLt[n-1] ) break;
+ }
+ }else{
+ n = iTest + 1;
+ }
+
+ pRec->nField = n;
+ res = sqlite3VdbeRecordCompare(aSample[iSamp].n, aSample[iSamp].p, pRec);
+ if( res<0 ){
+ iLower = aSample[iSamp].anLt[n-1] + aSample[iSamp].anEq[n-1];
+ iMin = iTest+1;
+ }else if( res==0 && n<nField ){
+ iLower = aSample[iSamp].anLt[n-1];
+ iMin = iTest+1;
+ res = -1;
+ }else{
+ iSample = iTest;
+ iCol = n-1;
+ }
+ }while( res && iMin<iSample );
+ i = iSample / nField;
+
+#ifdef SQLITE_DEBUG
+ /* The following assert statements check that the binary search code
+ ** above found the right answer. This block serves no purpose other
+ ** than to invoke the asserts. */
+ if( pParse->db->mallocFailed==0 ){
+ if( res==0 ){
+ /* If (res==0) is true, then pRec must be equal to sample i. */
+ assert( i<pIdx->nSample );
+ assert( iCol==nField-1 );
+ pRec->nField = nField;
+ assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)
+ || pParse->db->mallocFailed
+ );
+ }else{
+ /* Unless i==pIdx->nSample, indicating that pRec is larger than
+ ** all samples in the aSample[] array, pRec must be smaller than the
+ ** (iCol+1) field prefix of sample i. */
+ assert( i<=pIdx->nSample && i>=0 );
+ pRec->nField = iCol+1;
+ assert( i==pIdx->nSample
+ || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)>0
+ || pParse->db->mallocFailed );
+
+ /* if i==0 and iCol==0, then record pRec is smaller than all samples
+ ** in the aSample[] array. Otherwise, if (iCol>0) then pRec must
+ ** be greater than or equal to the (iCol) field prefix of sample i.
+ ** If (i>0), then pRec must also be greater than sample (i-1). */
+ if( iCol>0 ){
+ pRec->nField = iCol;
+ assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0
+ || pParse->db->mallocFailed );
+ }
+ if( i>0 ){
+ pRec->nField = nField;
+ assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0
+ || pParse->db->mallocFailed );
+ }
+ }
+ }
+#endif /* ifdef SQLITE_DEBUG */
+
+ if( res==0 ){
+ /* Record pRec is equal to sample i */
+ assert( iCol==nField-1 );
+ aStat[0] = aSample[i].anLt[iCol];
+ aStat[1] = aSample[i].anEq[iCol];
+ }else{
+ /* At this point, the (iCol+1) field prefix of aSample[i] is the first
+ ** sample that is greater than pRec. Or, if i==pIdx->nSample then pRec
+ ** is larger than all samples in the array. */
+ tRowcnt iUpper, iGap;
+ if( i>=pIdx->nSample ){
+ iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]);
+ }else{
+ iUpper = aSample[i].anLt[iCol];
+ }
+
+ if( iLower>=iUpper ){
+ iGap = 0;
+ }else{
+ iGap = iUpper - iLower;
+ }
+ if( roundUp ){
+ iGap = (iGap*2)/3;
+ }else{
+ iGap = iGap/3;
+ }
+ aStat[0] = iLower + iGap;
+ aStat[1] = pIdx->aAvgEq[nField-1];
+ }
+
+ /* Restore the pRec->nField value before returning. */
+ pRec->nField = nField;
+ return i;
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+/*
+** If it is not NULL, pTerm is a term that provides an upper or lower
+** bound on a range scan. Without considering pTerm, it is estimated
+** that the scan will visit nNew rows. This function returns the number
+** estimated to be visited after taking pTerm into account.
+**
+** If the user explicitly specified a likelihood() value for this term,
+** then the return value is the likelihood multiplied by the number of
+** input rows. Otherwise, this function assumes that an "IS NOT NULL" term
+** has a likelihood of 0.50, and any other term a likelihood of 0.25.
+*/
+static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){
+ LogEst nRet = nNew;
+ if( pTerm ){
+ if( pTerm->truthProb<=0 ){
+ nRet += pTerm->truthProb;
+ }else if( (pTerm->wtFlags & TERM_VNULL)==0 ){
+ nRet -= 20; assert( 20==sqlite3LogEst(4) );
+ }
+ }
+ return nRet;
+}
+
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Return the affinity for a single column of an index.
+*/
+SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCol){
+ assert( iCol>=0 && iCol<pIdx->nColumn );
+ if( !pIdx->zColAff ){
+ if( sqlite3IndexAffinityStr(db, pIdx)==0 ) return SQLITE_AFF_BLOB;
+ }
+ assert( pIdx->zColAff[iCol]!=0 );
+ return pIdx->zColAff[iCol];
+}
+#endif
+
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** This function is called to estimate the number of rows visited by a
+** range-scan on a skip-scan index. For example:
+**
+** CREATE INDEX i1 ON t1(a, b, c);
+** SELECT * FROM t1 WHERE a=? AND c BETWEEN ? AND ?;
+**
+** Value pLoop->nOut is currently set to the estimated number of rows
+** visited for scanning (a=? AND b=?). This function reduces that estimate
+** by some factor to account for the (c BETWEEN ? AND ?) expression based
+** on the stat4 data for the index. this scan will be peformed multiple
+** times (once for each (a,b) combination that matches a=?) is dealt with
+** by the caller.
+**
+** It does this by scanning through all stat4 samples, comparing values
+** extracted from pLower and pUpper with the corresponding column in each
+** sample. If L and U are the number of samples found to be less than or
+** equal to the values extracted from pLower and pUpper respectively, and
+** N is the total number of samples, the pLoop->nOut value is adjusted
+** as follows:
+**
+** nOut = nOut * ( min(U - L, 1) / N )
+**
+** If pLower is NULL, or a value cannot be extracted from the term, L is
+** set to zero. If pUpper is NULL, or a value cannot be extracted from it,
+** U is set to N.
+**
+** Normally, this function sets *pbDone to 1 before returning. However,
+** if no value can be extracted from either pLower or pUpper (and so the
+** estimate of the number of rows delivered remains unchanged), *pbDone
+** is left as is.
+**
+** If an error occurs, an SQLite error code is returned. Otherwise,
+** SQLITE_OK.
+*/
+static int whereRangeSkipScanEst(
+ Parse *pParse, /* Parsing & code generating context */
+ WhereTerm *pLower, /* Lower bound on the range. ex: "x>123" Might be NULL */
+ WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */
+ WhereLoop *pLoop, /* Update the .nOut value of this loop */
+ int *pbDone /* Set to true if at least one expr. value extracted */
+){
+ Index *p = pLoop->u.btree.pIndex;
+ int nEq = pLoop->u.btree.nEq;
+ sqlite3 *db = pParse->db;
+ int nLower = -1;
+ int nUpper = p->nSample+1;
+ int rc = SQLITE_OK;
+ u8 aff = sqlite3IndexColumnAffinity(db, p, nEq);
+ CollSeq *pColl;
+
+ sqlite3_value *p1 = 0; /* Value extracted from pLower */
+ sqlite3_value *p2 = 0; /* Value extracted from pUpper */
+ sqlite3_value *pVal = 0; /* Value extracted from record */
+
+ pColl = sqlite3LocateCollSeq(pParse, p->azColl[nEq]);
+ if( pLower ){
+ rc = sqlite3Stat4ValueFromExpr(pParse, pLower->pExpr->pRight, aff, &p1);
+ nLower = 0;
+ }
+ if( pUpper && rc==SQLITE_OK ){
+ rc = sqlite3Stat4ValueFromExpr(pParse, pUpper->pExpr->pRight, aff, &p2);
+ nUpper = p2 ? 0 : p->nSample;
+ }
+
+ if( p1 || p2 ){
+ int i;
+ int nDiff;
+ for(i=0; rc==SQLITE_OK && i<p->nSample; i++){
+ rc = sqlite3Stat4Column(db, p->aSample[i].p, p->aSample[i].n, nEq, &pVal);
+ if( rc==SQLITE_OK && p1 ){
+ int res = sqlite3MemCompare(p1, pVal, pColl);
+ if( res>=0 ) nLower++;
+ }
+ if( rc==SQLITE_OK && p2 ){
+ int res = sqlite3MemCompare(p2, pVal, pColl);
+ if( res>=0 ) nUpper++;
+ }
+ }
+ nDiff = (nUpper - nLower);
+ if( nDiff<=0 ) nDiff = 1;
+
+ /* If there is both an upper and lower bound specified, and the
+ ** comparisons indicate that they are close together, use the fallback
+ ** method (assume that the scan visits 1/64 of the rows) for estimating
+ ** the number of rows visited. Otherwise, estimate the number of rows
+ ** using the method described in the header comment for this function. */
+ if( nDiff!=1 || pUpper==0 || pLower==0 ){
+ int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff));
+ pLoop->nOut -= nAdjust;
+ *pbDone = 1;
+ WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n",
+ nLower, nUpper, nAdjust*-1, pLoop->nOut));
+ }
+
+ }else{
+ assert( *pbDone==0 );
+ }
+
+ sqlite3ValueFree(p1);
+ sqlite3ValueFree(p2);
+ sqlite3ValueFree(pVal);
+
+ return rc;
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+/*
+** This function is used to estimate the number of rows that will be visited
+** by scanning an index for a range of values. The range may have an upper
+** bound, a lower bound, or both. The WHERE clause terms that set the upper
+** and lower bounds are represented by pLower and pUpper respectively. For
+** example, assuming that index p is on t1(a):
+**
+** ... FROM t1 WHERE a > ? AND a < ? ...
+** |_____| |_____|
+** | |
+** pLower pUpper
+**
+** If either of the upper or lower bound is not present, then NULL is passed in
+** place of the corresponding WhereTerm.
+**
+** The value in (pBuilder->pNew->u.btree.nEq) is the number of the index
+** column subject to the range constraint. Or, equivalently, the number of
+** equality constraints optimized by the proposed index scan. For example,
+** assuming index p is on t1(a, b), and the SQL query is:
+**
+** ... FROM t1 WHERE a = ? AND b > ? AND b < ? ...
+**
+** then nEq is set to 1 (as the range restricted column, b, is the second
+** left-most column of the index). Or, if the query is:
+**
+** ... FROM t1 WHERE a > ? AND a < ? ...
+**
+** then nEq is set to 0.
+**
+** When this function is called, *pnOut is set to the sqlite3LogEst() of the
+** number of rows that the index scan is expected to visit without
+** considering the range constraints. If nEq is 0, then *pnOut is the number of
+** rows in the index. Assuming no error occurs, *pnOut is adjusted (reduced)
+** to account for the range constraints pLower and pUpper.
+**
+** In the absence of sqlite_stat4 ANALYZE data, or if such data cannot be
+** used, a single range inequality reduces the search space by a factor of 4.
+** and a pair of constraints (x>? AND x<?) reduces the expected number of
+** rows visited by a factor of 64.
+*/
+static int whereRangeScanEst(
+ Parse *pParse, /* Parsing & code generating context */
+ WhereLoopBuilder *pBuilder,
+ WhereTerm *pLower, /* Lower bound on the range. ex: "x>123" Might be NULL */
+ WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */
+ WhereLoop *pLoop /* Modify the .nOut and maybe .rRun fields */
+){
+ int rc = SQLITE_OK;
+ int nOut = pLoop->nOut;
+ LogEst nNew;
+
+#ifdef SQLITE_ENABLE_STAT4
+ Index *p = pLoop->u.btree.pIndex;
+ int nEq = pLoop->u.btree.nEq;
+
+ if( p->nSample>0 && ALWAYS(nEq<p->nSampleCol)
+ && OptimizationEnabled(pParse->db, SQLITE_Stat4)
+ ){
+ if( nEq==pBuilder->nRecValid ){
+ UnpackedRecord *pRec = pBuilder->pRec;
+ tRowcnt a[2];
+ int nBtm = pLoop->u.btree.nBtm;
+ int nTop = pLoop->u.btree.nTop;
+
+ /* Variable iLower will be set to the estimate of the number of rows in
+ ** the index that are less than the lower bound of the range query. The
+ ** lower bound being the concatenation of $P and $L, where $P is the
+ ** key-prefix formed by the nEq values matched against the nEq left-most
+ ** columns of the index, and $L is the value in pLower.
+ **
+ ** Or, if pLower is NULL or $L cannot be extracted from it (because it
+ ** is not a simple variable or literal value), the lower bound of the
+ ** range is $P. Due to a quirk in the way whereKeyStats() works, even
+ ** if $L is available, whereKeyStats() is called for both ($P) and
+ ** ($P:$L) and the larger of the two returned values is used.
+ **
+ ** Similarly, iUpper is to be set to the estimate of the number of rows
+ ** less than the upper bound of the range query. Where the upper bound
+ ** is either ($P) or ($P:$U). Again, even if $U is available, both values
+ ** of iUpper are requested of whereKeyStats() and the smaller used.
+ **
+ ** The number of rows between the two bounds is then just iUpper-iLower.
+ */
+ tRowcnt iLower; /* Rows less than the lower bound */
+ tRowcnt iUpper; /* Rows less than the upper bound */
+ int iLwrIdx = -2; /* aSample[] for the lower bound */
+ int iUprIdx = -1; /* aSample[] for the upper bound */
+
+ if( pRec ){
+ testcase( pRec->nField!=pBuilder->nRecValid );
+ pRec->nField = pBuilder->nRecValid;
+ }
+ /* Determine iLower and iUpper using ($P) only. */
+ if( nEq==0 ){
+ iLower = 0;
+ iUpper = p->nRowEst0;
+ }else{
+ /* Note: this call could be optimized away - since the same values must
+ ** have been requested when testing key $P in whereEqualScanEst(). */
+ whereKeyStats(pParse, p, pRec, 0, a);
+ iLower = a[0];
+ iUpper = a[0] + a[1];
+ }
+
+ assert( pLower==0 || (pLower->eOperator & (WO_GT|WO_GE))!=0 );
+ assert( pUpper==0 || (pUpper->eOperator & (WO_LT|WO_LE))!=0 );
+ assert( p->aSortOrder!=0 );
+ if( p->aSortOrder[nEq] ){
+ /* The roles of pLower and pUpper are swapped for a DESC index */
+ SWAP(WhereTerm*, pLower, pUpper);
+ SWAP(int, nBtm, nTop);
+ }
+
+ /* If possible, improve on the iLower estimate using ($P:$L). */
+ if( pLower ){
+ int n; /* Values extracted from pExpr */
+ Expr *pExpr = pLower->pExpr->pRight;
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nBtm, nEq, &n);
+ if( rc==SQLITE_OK && n ){
+ tRowcnt iNew;
+ u16 mask = WO_GT|WO_LE;
+ if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT);
+ iLwrIdx = whereKeyStats(pParse, p, pRec, 0, a);
+ iNew = a[0] + ((pLower->eOperator & mask) ? a[1] : 0);
+ if( iNew>iLower ) iLower = iNew;
+ nOut--;
+ pLower = 0;
+ }
+ }
+
+ /* If possible, improve on the iUpper estimate using ($P:$U). */
+ if( pUpper ){
+ int n; /* Values extracted from pExpr */
+ Expr *pExpr = pUpper->pExpr->pRight;
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nTop, nEq, &n);
+ if( rc==SQLITE_OK && n ){
+ tRowcnt iNew;
+ u16 mask = WO_GT|WO_LE;
+ if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT);
+ iUprIdx = whereKeyStats(pParse, p, pRec, 1, a);
+ iNew = a[0] + ((pUpper->eOperator & mask) ? a[1] : 0);
+ if( iNew<iUpper ) iUpper = iNew;
+ nOut--;
+ pUpper = 0;
+ }
+ }
+
+ pBuilder->pRec = pRec;
+ if( rc==SQLITE_OK ){
+ if( iUpper>iLower ){
+ nNew = sqlite3LogEst(iUpper - iLower);
+ /* TUNING: If both iUpper and iLower are derived from the same
+ ** sample, then assume they are 4x more selective. This brings
+ ** the estimated selectivity more in line with what it would be
+ ** if estimated without the use of STAT4 tables. */
+ if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ }else{
+ nNew = 10; assert( 10==sqlite3LogEst(2) );
+ }
+ if( nNew<nOut ){
+ nOut = nNew;
+ }
+ WHERETRACE(0x10, ("STAT4 range scan: %u..%u est=%d\n",
+ (u32)iLower, (u32)iUpper, nOut));
+ }
+ }else{
+ int bDone = 0;
+ rc = whereRangeSkipScanEst(pParse, pLower, pUpper, pLoop, &bDone);
+ if( bDone ) return rc;
+ }
+ }
+#else
+ UNUSED_PARAMETER(pParse);
+ UNUSED_PARAMETER(pBuilder);
+ assert( pLower || pUpper );
+#endif
+ assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 );
+ nNew = whereRangeAdjust(pLower, nOut);
+ nNew = whereRangeAdjust(pUpper, nNew);
+
+ /* TUNING: If there is both an upper and lower limit and neither limit
+ ** has an application-defined likelihood(), assume the range is
+ ** reduced by an additional 75%. This means that, by default, an open-ended
+ ** range query (e.g. col > ?) is assumed to match 1/4 of the rows in the
+ ** index. While a closed range (e.g. col BETWEEN ? AND ?) is estimated to
+ ** match 1/64 of the index. */
+ if( pLower && pLower->truthProb>0 && pUpper && pUpper->truthProb>0 ){
+ nNew -= 20;
+ }
+
+ nOut -= (pLower!=0) + (pUpper!=0);
+ if( nNew<10 ) nNew = 10;
+ if( nNew<nOut ) nOut = nNew;
+#if defined(WHERETRACE_ENABLED)
+ if( pLoop->nOut>nOut ){
+ WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n",
+ pLoop->nOut, nOut));
+ }
+#endif
+ pLoop->nOut = (LogEst)nOut;
+ return rc;
+}
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Estimate the number of rows that will be returned based on
+** an equality constraint x=VALUE and where that VALUE occurs in
+** the histogram data. This only works when x is the left-most
+** column of an index and sqlite_stat4 histogram data is available
+** for that index. When pExpr==NULL that means the constraint is
+** "x IS NULL" instead of "x=VALUE".
+**
+** Write the estimated row count into *pnRow and return SQLITE_OK.
+** If unable to make an estimate, leave *pnRow unchanged and return
+** non-zero.
+**
+** This routine can fail if it is unable to load a collating sequence
+** required for string comparison, or if unable to allocate memory
+** for a UTF conversion required for comparison. The error is stored
+** in the pParse structure.
+*/
+static int whereEqualScanEst(
+ Parse *pParse, /* Parsing & code generating context */
+ WhereLoopBuilder *pBuilder,
+ Expr *pExpr, /* Expression for VALUE in the x=VALUE constraint */
+ tRowcnt *pnRow /* Write the revised row estimate here */
+){
+ Index *p = pBuilder->pNew->u.btree.pIndex;
+ int nEq = pBuilder->pNew->u.btree.nEq;
+ UnpackedRecord *pRec = pBuilder->pRec;
+ int rc; /* Subfunction return code */
+ tRowcnt a[2]; /* Statistics */
+ int bOk;
+
+ assert( nEq>=1 );
+ assert( nEq<=p->nColumn );
+ assert( p->aSample!=0 );
+ assert( p->nSample>0 );
+ assert( pBuilder->nRecValid<nEq );
+
+ /* If values are not available for all fields of the index to the left
+ ** of this one, no estimate can be made. Return SQLITE_NOTFOUND. */
+ if( pBuilder->nRecValid<(nEq-1) ){
+ return SQLITE_NOTFOUND;
+ }
+
+ /* This is an optimization only. The call to sqlite3Stat4ProbeSetValue()
+ ** below would return the same value. */
+ if( nEq>=p->nColumn ){
+ *pnRow = 1;
+ return SQLITE_OK;
+ }
+
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, 1, nEq-1, &bOk);
+ pBuilder->pRec = pRec;
+ if( rc!=SQLITE_OK ) return rc;
+ if( bOk==0 ) return SQLITE_NOTFOUND;
+ pBuilder->nRecValid = nEq;
+
+ whereKeyStats(pParse, p, pRec, 0, a);
+ WHERETRACE(0x10,("equality scan regions %s(%d): %d\n",
+ p->zName, nEq-1, (int)a[1]));
+ *pnRow = a[1];
+
+ return rc;
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+#ifdef SQLITE_ENABLE_STAT4
+/*
+** Estimate the number of rows that will be returned based on
+** an IN constraint where the right-hand side of the IN operator
+** is a list of values. Example:
+**
+** WHERE x IN (1,2,3,4)
+**
+** Write the estimated row count into *pnRow and return SQLITE_OK.
+** If unable to make an estimate, leave *pnRow unchanged and return
+** non-zero.
+**
+** This routine can fail if it is unable to load a collating sequence
+** required for string comparison, or if unable to allocate memory
+** for a UTF conversion required for comparison. The error is stored
+** in the pParse structure.
+*/
+static int whereInScanEst(
+ Parse *pParse, /* Parsing & code generating context */
+ WhereLoopBuilder *pBuilder,
+ ExprList *pList, /* The value list on the RHS of "x IN (v1,v2,v3,...)" */
+ tRowcnt *pnRow /* Write the revised row estimate here */
+){
+ Index *p = pBuilder->pNew->u.btree.pIndex;
+ i64 nRow0 = sqlite3LogEstToInt(p->aiRowLogEst[0]);
+ int nRecValid = pBuilder->nRecValid;
+ int rc = SQLITE_OK; /* Subfunction return code */
+ tRowcnt nEst; /* Number of rows for a single term */
+ tRowcnt nRowEst = 0; /* New estimate of the number of rows */
+ int i; /* Loop counter */
+
+ assert( p->aSample!=0 );
+ for(i=0; rc==SQLITE_OK && i<pList->nExpr; i++){
+ nEst = nRow0;
+ rc = whereEqualScanEst(pParse, pBuilder, pList->a[i].pExpr, &nEst);
+ nRowEst += nEst;
+ pBuilder->nRecValid = nRecValid;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( nRowEst > nRow0 ) nRowEst = nRow0;
+ *pnRow = nRowEst;
+ WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst));
+ }
+ assert( pBuilder->nRecValid==nRecValid );
+ return rc;
+}
+#endif /* SQLITE_ENABLE_STAT4 */
+
+
+#ifdef WHERETRACE_ENABLED
+/*
+** Print the content of a WhereTerm object
+*/
+SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){
+ if( pTerm==0 ){
+ sqlite3DebugPrintf("TERM-%-3d NULL\n", iTerm);
+ }else{
+ char zType[8];
+ char zLeft[50];
+ memcpy(zType, "....", 5);
+ if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V';
+ if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E';
+ if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L';
+ if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C';
+ if( pTerm->eOperator & WO_SINGLE ){
+ sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}",
+ pTerm->leftCursor, pTerm->u.leftColumn);
+ }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){
+ sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld",
+ pTerm->u.pOrInfo->indexable);
+ }else{
+ sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor);
+ }
+ sqlite3DebugPrintf(
+ "TERM-%-3d %p %s %-12s op=%03x wtFlags=%04x",
+ iTerm, pTerm, zType, zLeft, pTerm->eOperator, pTerm->wtFlags);
+ /* The 0x10000 .wheretrace flag causes extra information to be
+ ** shown about each Term */
+ if( sqlite3WhereTrace & 0x10000 ){
+ sqlite3DebugPrintf(" prob=%-3d prereq=%llx,%llx",
+ pTerm->truthProb, (u64)pTerm->prereqAll, (u64)pTerm->prereqRight);
+ }
+ if( pTerm->iField ){
+ sqlite3DebugPrintf(" iField=%d", pTerm->iField);
+ }
+ if( pTerm->iParent>=0 ){
+ sqlite3DebugPrintf(" iParent=%d", pTerm->iParent);
+ }
+ sqlite3DebugPrintf("\n");
+ sqlite3TreeViewExpr(0, pTerm->pExpr, 0);
+ }
+}
+#endif
+
+#ifdef WHERETRACE_ENABLED
+/*
+** Show the complete content of a WhereClause
+*/
+SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){
+ int i;
+ for(i=0; i<pWC->nTerm; i++){
+ sqlite3WhereTermPrint(&pWC->a[i], i);
+ }
+}
+#endif
+
+#ifdef WHERETRACE_ENABLED
+/*
+** Print a WhereLoop object for debugging purposes
+*/
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
+ WhereInfo *pWInfo = pWC->pWInfo;
+ int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
+ struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab;
+ Table *pTab = pItem->pTab;
+ Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
+ sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
+ p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
+ sqlite3DebugPrintf(" %12s",
+ pItem->zAlias ? pItem->zAlias : pTab->zName);
+ if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
+ const char *zName;
+ if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
+ if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){
+ int i = sqlite3Strlen30(zName) - 1;
+ while( zName[i]!='_' ) i--;
+ zName += i;
+ }
+ sqlite3DebugPrintf(".%-16s %2d", zName, p->u.btree.nEq);
+ }else{
+ sqlite3DebugPrintf("%20s","");
+ }
+ }else{
+ char *z;
+ if( p->u.vtab.idxStr ){
+ z = sqlite3_mprintf("(%d,\"%s\",%#x)",
+ p->u.vtab.idxNum, p->u.vtab.idxStr, p->u.vtab.omitMask);
+ }else{
+ z = sqlite3_mprintf("(%d,%x)", p->u.vtab.idxNum, p->u.vtab.omitMask);
+ }
+ sqlite3DebugPrintf(" %-19s", z);
+ sqlite3_free(z);
+ }
+ if( p->wsFlags & WHERE_SKIPSCAN ){
+ sqlite3DebugPrintf(" f %05x %d-%d", p->wsFlags, p->nLTerm,p->nSkip);
+ }else{
+ sqlite3DebugPrintf(" f %05x N %d", p->wsFlags, p->nLTerm);
+ }
+ sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut);
+ if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){
+ int i;
+ for(i=0; i<p->nLTerm; i++){
+ sqlite3WhereTermPrint(p->aLTerm[i], i);
+ }
+ }
+}
+#endif
+
+/*
+** Convert bulk memory into a valid WhereLoop that can be passed
+** to whereLoopClear harmlessly.
+*/
+static void whereLoopInit(WhereLoop *p){
+ p->aLTerm = p->aLTermSpace;
+ p->nLTerm = 0;
+ p->nLSlot = ArraySize(p->aLTermSpace);
+ p->wsFlags = 0;
+}
+
+/*
+** Clear the WhereLoop.u union. Leave WhereLoop.pLTerm intact.
+*/
+static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){
+ if( p->wsFlags & (WHERE_VIRTUALTABLE|WHERE_AUTO_INDEX) ){
+ if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 && p->u.vtab.needFree ){
+ sqlite3_free(p->u.vtab.idxStr);
+ p->u.vtab.needFree = 0;
+ p->u.vtab.idxStr = 0;
+ }else if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && p->u.btree.pIndex!=0 ){
+ sqlite3DbFree(db, p->u.btree.pIndex->zColAff);
+ sqlite3DbFreeNN(db, p->u.btree.pIndex);
+ p->u.btree.pIndex = 0;
+ }
+ }
+}
+
+/*
+** Deallocate internal memory used by a WhereLoop object
+*/
+static void whereLoopClear(sqlite3 *db, WhereLoop *p){
+ if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFreeNN(db, p->aLTerm);
+ whereLoopClearUnion(db, p);
+ whereLoopInit(p);
+}
+
+/*
+** Increase the memory allocation for pLoop->aLTerm[] to be at least n.
+*/
+static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){
+ WhereTerm **paNew;
+ if( p->nLSlot>=n ) return SQLITE_OK;
+ n = (n+7)&~7;
+ paNew = sqlite3DbMallocRawNN(db, sizeof(p->aLTerm[0])*n);
+ if( paNew==0 ) return SQLITE_NOMEM_BKPT;
+ memcpy(paNew, p->aLTerm, sizeof(p->aLTerm[0])*p->nLSlot);
+ if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFreeNN(db, p->aLTerm);
+ p->aLTerm = paNew;
+ p->nLSlot = n;
+ return SQLITE_OK;
+}
+
+/*
+** Transfer content from the second pLoop into the first.
+*/
+static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){
+ whereLoopClearUnion(db, pTo);
+ if( whereLoopResize(db, pTo, pFrom->nLTerm) ){
+ memset(&pTo->u, 0, sizeof(pTo->u));
+ return SQLITE_NOMEM_BKPT;
+ }
+ memcpy(pTo, pFrom, WHERE_LOOP_XFER_SZ);
+ memcpy(pTo->aLTerm, pFrom->aLTerm, pTo->nLTerm*sizeof(pTo->aLTerm[0]));
+ if( pFrom->wsFlags & WHERE_VIRTUALTABLE ){
+ pFrom->u.vtab.needFree = 0;
+ }else if( (pFrom->wsFlags & WHERE_AUTO_INDEX)!=0 ){
+ pFrom->u.btree.pIndex = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Delete a WhereLoop object
+*/
+static void whereLoopDelete(sqlite3 *db, WhereLoop *p){
+ whereLoopClear(db, p);
+ sqlite3DbFreeNN(db, p);
+}
+
+/*
+** Free a WhereInfo structure
+*/
+static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
+ int i;
+ assert( pWInfo!=0 );
+ for(i=0; i<pWInfo->nLevel; i++){
+ WhereLevel *pLevel = &pWInfo->a[i];
+ if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE) ){
+ sqlite3DbFree(db, pLevel->u.in.aInLoop);
+ }
+ }
+ sqlite3WhereClauseClear(&pWInfo->sWC);
+ while( pWInfo->pLoops ){
+ WhereLoop *p = pWInfo->pLoops;
+ pWInfo->pLoops = p->pNextLoop;
+ whereLoopDelete(db, p);
+ }
+ assert( pWInfo->pExprMods==0 );
+ sqlite3DbFreeNN(db, pWInfo);
+}
+
+/*
+** Return TRUE if all of the following are true:
+**
+** (1) X has the same or lower cost that Y
+** (2) X uses fewer WHERE clause terms than Y
+** (3) Every WHERE clause term used by X is also used by Y
+** (4) X skips at least as many columns as Y
+** (5) If X is a covering index, than Y is too
+**
+** Conditions (2) and (3) mean that X is a "proper subset" of Y.
+** If X is a proper subset of Y then Y is a better choice and ought
+** to have a lower cost. This routine returns TRUE when that cost
+** relationship is inverted and needs to be adjusted. Constraint (4)
+** was added because if X uses skip-scan less than Y it still might
+** deserve a lower cost even if it is a proper subset of Y. Constraint (5)
+** was added because a covering index probably deserves to have a lower cost
+** than a non-covering index even if it is a proper subset.
+*/
+static int whereLoopCheaperProperSubset(
+ const WhereLoop *pX, /* First WhereLoop to compare */
+ const WhereLoop *pY /* Compare against this WhereLoop */
+){
+ int i, j;
+ if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){
+ return 0; /* X is not a subset of Y */
+ }
+ if( pY->nSkip > pX->nSkip ) return 0;
+ if( pX->rRun >= pY->rRun ){
+ if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */
+ if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */
+ }
+ for(i=pX->nLTerm-1; i>=0; i--){
+ if( pX->aLTerm[i]==0 ) continue;
+ for(j=pY->nLTerm-1; j>=0; j--){
+ if( pY->aLTerm[j]==pX->aLTerm[i] ) break;
+ }
+ if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */
+ }
+ if( (pX->wsFlags&WHERE_IDX_ONLY)!=0
+ && (pY->wsFlags&WHERE_IDX_ONLY)==0 ){
+ return 0; /* Constraint (5) */
+ }
+ return 1; /* All conditions meet */
+}
+
+/*
+** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so
+** that:
+**
+** (1) pTemplate costs less than any other WhereLoops that are a proper
+** subset of pTemplate
+**
+** (2) pTemplate costs more than any other WhereLoops for which pTemplate
+** is a proper subset.
+**
+** To say "WhereLoop X is a proper subset of Y" means that X uses fewer
+** WHERE clause terms than Y and that every WHERE clause term used by X is
+** also used by Y.
+*/
+static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){
+ if( (pTemplate->wsFlags & WHERE_INDEXED)==0 ) return;
+ for(; p; p=p->pNextLoop){
+ if( p->iTab!=pTemplate->iTab ) continue;
+ if( (p->wsFlags & WHERE_INDEXED)==0 ) continue;
+ if( whereLoopCheaperProperSubset(p, pTemplate) ){
+ /* Adjust pTemplate cost downward so that it is cheaper than its
+ ** subset p. */
+ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n",
+ pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut-1));
+ pTemplate->rRun = p->rRun;
+ pTemplate->nOut = p->nOut - 1;
+ }else if( whereLoopCheaperProperSubset(pTemplate, p) ){
+ /* Adjust pTemplate cost upward so that it is costlier than p since
+ ** pTemplate is a proper subset of p */
+ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n",
+ pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut+1));
+ pTemplate->rRun = p->rRun;
+ pTemplate->nOut = p->nOut + 1;
+ }
+ }
+}
+
+/*
+** Search the list of WhereLoops in *ppPrev looking for one that can be
+** replaced by pTemplate.
+**
+** Return NULL if pTemplate does not belong on the WhereLoop list.
+** In other words if pTemplate ought to be dropped from further consideration.
+**
+** If pX is a WhereLoop that pTemplate can replace, then return the
+** link that points to pX.
+**
+** If pTemplate cannot replace any existing element of the list but needs
+** to be added to the list as a new entry, then return a pointer to the
+** tail of the list.
+*/
+static WhereLoop **whereLoopFindLesser(
+ WhereLoop **ppPrev,
+ const WhereLoop *pTemplate
+){
+ WhereLoop *p;
+ for(p=(*ppPrev); p; ppPrev=&p->pNextLoop, p=*ppPrev){
+ if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){
+ /* If either the iTab or iSortIdx values for two WhereLoop are different
+ ** then those WhereLoops need to be considered separately. Neither is
+ ** a candidate to replace the other. */
+ continue;
+ }
+ /* In the current implementation, the rSetup value is either zero
+ ** or the cost of building an automatic index (NlogN) and the NlogN
+ ** is the same for compatible WhereLoops. */
+ assert( p->rSetup==0 || pTemplate->rSetup==0
+ || p->rSetup==pTemplate->rSetup );
+
+ /* whereLoopAddBtree() always generates and inserts the automatic index
+ ** case first. Hence compatible candidate WhereLoops never have a larger
+ ** rSetup. Call this SETUP-INVARIANT */
+ assert( p->rSetup>=pTemplate->rSetup );
+
+ /* Any loop using an appliation-defined index (or PRIMARY KEY or
+ ** UNIQUE constraint) with one or more == constraints is better
+ ** than an automatic index. Unless it is a skip-scan. */
+ if( (p->wsFlags & WHERE_AUTO_INDEX)!=0
+ && (pTemplate->nSkip)==0
+ && (pTemplate->wsFlags & WHERE_INDEXED)!=0
+ && (pTemplate->wsFlags & WHERE_COLUMN_EQ)!=0
+ && (p->prereq & pTemplate->prereq)==pTemplate->prereq
+ ){
+ break;
+ }
+
+ /* If existing WhereLoop p is better than pTemplate, pTemplate can be
+ ** discarded. WhereLoop p is better if:
+ ** (1) p has no more dependencies than pTemplate, and
+ ** (2) p has an equal or lower cost than pTemplate
+ */
+ if( (p->prereq & pTemplate->prereq)==p->prereq /* (1) */
+ && p->rSetup<=pTemplate->rSetup /* (2a) */
+ && p->rRun<=pTemplate->rRun /* (2b) */
+ && p->nOut<=pTemplate->nOut /* (2c) */
+ ){
+ return 0; /* Discard pTemplate */
+ }
+
+ /* If pTemplate is always better than p, then cause p to be overwritten
+ ** with pTemplate. pTemplate is better than p if:
+ ** (1) pTemplate has no more dependences than p, and
+ ** (2) pTemplate has an equal or lower cost than p.
+ */
+ if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */
+ && p->rRun>=pTemplate->rRun /* (2a) */
+ && p->nOut>=pTemplate->nOut /* (2b) */
+ ){
+ assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */
+ break; /* Cause p to be overwritten by pTemplate */
+ }
+ }
+ return ppPrev;
+}
+
+/*
+** Insert or replace a WhereLoop entry using the template supplied.
+**
+** An existing WhereLoop entry might be overwritten if the new template
+** is better and has fewer dependencies. Or the template will be ignored
+** and no insert will occur if an existing WhereLoop is faster and has
+** fewer dependencies than the template. Otherwise a new WhereLoop is
+** added based on the template.
+**
+** If pBuilder->pOrSet is not NULL then we care about only the
+** prerequisites and rRun and nOut costs of the N best loops. That
+** information is gathered in the pBuilder->pOrSet object. This special
+** processing mode is used only for OR clause processing.
+**
+** When accumulating multiple loops (when pBuilder->pOrSet is NULL) we
+** still might overwrite similar loops with the new template if the
+** new template is better. Loops may be overwritten if the following
+** conditions are met:
+**
+** (1) They have the same iTab.
+** (2) They have the same iSortIdx.
+** (3) The template has same or fewer dependencies than the current loop
+** (4) The template has the same or lower cost than the current loop
+*/
+static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
+ WhereLoop **ppPrev, *p;
+ WhereInfo *pWInfo = pBuilder->pWInfo;
+ sqlite3 *db = pWInfo->pParse->db;
+ int rc;
+
+ /* Stop the search once we hit the query planner search limit */
+ if( pBuilder->iPlanLimit==0 ){
+ WHERETRACE(0xffffffff,("=== query planner search limit reached ===\n"));
+ if( pBuilder->pOrSet ) pBuilder->pOrSet->n = 0;
+ return SQLITE_DONE;
+ }
+ pBuilder->iPlanLimit--;
+
+ whereLoopAdjustCost(pWInfo->pLoops, pTemplate);
+
+ /* If pBuilder->pOrSet is defined, then only keep track of the costs
+ ** and prereqs.
+ */
+ if( pBuilder->pOrSet!=0 ){
+ if( pTemplate->nLTerm ){
+#if WHERETRACE_ENABLED
+ u16 n = pBuilder->pOrSet->n;
+ int x =
+#endif
+ whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun,
+ pTemplate->nOut);
+#if WHERETRACE_ENABLED /* 0x8 */
+ if( sqlite3WhereTrace & 0x8 ){
+ sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n);
+ sqlite3WhereLoopPrint(pTemplate, pBuilder->pWC);
+ }
+#endif
+ }
+ return SQLITE_OK;
+ }
+
+ /* Look for an existing WhereLoop to replace with pTemplate
+ */
+ ppPrev = whereLoopFindLesser(&pWInfo->pLoops, pTemplate);
+
+ if( ppPrev==0 ){
+ /* There already exists a WhereLoop on the list that is better
+ ** than pTemplate, so just ignore pTemplate */
+#if WHERETRACE_ENABLED /* 0x8 */
+ if( sqlite3WhereTrace & 0x8 ){
+ sqlite3DebugPrintf(" skip: ");
+ sqlite3WhereLoopPrint(pTemplate, pBuilder->pWC);
+ }
+#endif
+ return SQLITE_OK;
+ }else{
+ p = *ppPrev;
+ }
+
+ /* If we reach this point it means that either p[] should be overwritten
+ ** with pTemplate[] if p[] exists, or if p==NULL then allocate a new
+ ** WhereLoop and insert it.
+ */
+#if WHERETRACE_ENABLED /* 0x8 */
+ if( sqlite3WhereTrace & 0x8 ){
+ if( p!=0 ){
+ sqlite3DebugPrintf("replace: ");
+ sqlite3WhereLoopPrint(p, pBuilder->pWC);
+ sqlite3DebugPrintf(" with: ");
+ }else{
+ sqlite3DebugPrintf(" add: ");
+ }
+ sqlite3WhereLoopPrint(pTemplate, pBuilder->pWC);
+ }
+#endif
+ if( p==0 ){
+ /* Allocate a new WhereLoop to add to the end of the list */
+ *ppPrev = p = sqlite3DbMallocRawNN(db, sizeof(WhereLoop));
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ whereLoopInit(p);
+ p->pNextLoop = 0;
+ }else{
+ /* We will be overwriting WhereLoop p[]. But before we do, first
+ ** go through the rest of the list and delete any other entries besides
+ ** p[] that are also supplated by pTemplate */
+ WhereLoop **ppTail = &p->pNextLoop;
+ WhereLoop *pToDel;
+ while( *ppTail ){
+ ppTail = whereLoopFindLesser(ppTail, pTemplate);
+ if( ppTail==0 ) break;
+ pToDel = *ppTail;
+ if( pToDel==0 ) break;
+ *ppTail = pToDel->pNextLoop;
+#if WHERETRACE_ENABLED /* 0x8 */
+ if( sqlite3WhereTrace & 0x8 ){
+ sqlite3DebugPrintf(" delete: ");
+ sqlite3WhereLoopPrint(pToDel, pBuilder->pWC);
+ }
+#endif
+ whereLoopDelete(db, pToDel);
+ }
+ }
+ rc = whereLoopXfer(db, p, pTemplate);
+ if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
+ Index *pIndex = p->u.btree.pIndex;
+ if( pIndex && pIndex->idxType==SQLITE_IDXTYPE_IPK ){
+ p->u.btree.pIndex = 0;
+ }
+ }
+ return rc;
+}
+
+/*
+** Adjust the WhereLoop.nOut value downward to account for terms of the
+** WHERE clause that reference the loop but which are not used by an
+** index.
+*
+** For every WHERE clause term that is not used by the index
+** and which has a truth probability assigned by one of the likelihood(),
+** likely(), or unlikely() SQL functions, reduce the estimated number
+** of output rows by the probability specified.
+**
+** TUNING: For every WHERE clause term that is not used by the index
+** and which does not have an assigned truth probability, heuristics
+** described below are used to try to estimate the truth probability.
+** TODO --> Perhaps this is something that could be improved by better
+** table statistics.
+**
+** Heuristic 1: Estimate the truth probability as 93.75%. The 93.75%
+** value corresponds to -1 in LogEst notation, so this means decrement
+** the WhereLoop.nOut field for every such WHERE clause term.
+**
+** Heuristic 2: If there exists one or more WHERE clause terms of the
+** form "x==EXPR" and EXPR is not a constant 0 or 1, then make sure the
+** final output row estimate is no greater than 1/4 of the total number
+** of rows in the table. In other words, assume that x==EXPR will filter
+** out at least 3 out of 4 rows. If EXPR is -1 or 0 or 1, then maybe the
+** "x" column is boolean or else -1 or 0 or 1 is a common default value
+** on the "x" column and so in that case only cap the output row estimate
+** at 1/2 instead of 1/4.
+*/
+static void whereLoopOutputAdjust(
+ WhereClause *pWC, /* The WHERE clause */
+ WhereLoop *pLoop, /* The loop to adjust downward */
+ LogEst nRow /* Number of rows in the entire table */
+){
+ WhereTerm *pTerm, *pX;
+ Bitmask notAllowed = ~(pLoop->prereq|pLoop->maskSelf);
+ int i, j;
+ LogEst iReduce = 0; /* pLoop->nOut should not exceed nRow-iReduce */
+
+ assert( (pLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
+ for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){
+ assert( pTerm!=0 );
+ if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break;
+ if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue;
+ if( (pTerm->prereqAll & notAllowed)!=0 ) continue;
+ for(j=pLoop->nLTerm-1; j>=0; j--){
+ pX = pLoop->aLTerm[j];
+ if( pX==0 ) continue;
+ if( pX==pTerm ) break;
+ if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break;
+ }
+ if( j<0 ){
+ if( pTerm->truthProb<=0 ){
+ /* If a truth probability is specified using the likelihood() hints,
+ ** then use the probability provided by the application. */
+ pLoop->nOut += pTerm->truthProb;
+ }else{
+ /* In the absence of explicit truth probabilities, use heuristics to
+ ** guess a reasonable truth probability. */
+ pLoop->nOut--;
+ if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0
+ && (pTerm->wtFlags & TERM_HIGHTRUTH)==0 /* tag-20200224-1 */
+ ){
+ Expr *pRight = pTerm->pExpr->pRight;
+ int k = 0;
+ testcase( pTerm->pExpr->op==TK_IS );
+ if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){
+ k = 10;
+ }else{
+ k = 20;
+ }
+ if( iReduce<k ){
+ pTerm->wtFlags |= TERM_HEURTRUTH;
+ iReduce = k;
+ }
+ }
+ }
+ }
+ }
+ if( pLoop->nOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce;
+}
+
+/*
+** Term pTerm is a vector range comparison operation. The first comparison
+** in the vector can be optimized using column nEq of the index. This
+** function returns the total number of vector elements that can be used
+** as part of the range comparison.
+**
+** For example, if the query is:
+**
+** WHERE a = ? AND (b, c, d) > (?, ?, ?)
+**
+** and the index:
+**
+** CREATE INDEX ... ON (a, b, c, d, e)
+**
+** then this function would be invoked with nEq=1. The value returned in
+** this case is 3.
+*/
+static int whereRangeVectorLen(
+ Parse *pParse, /* Parsing context */
+ int iCur, /* Cursor open on pIdx */
+ Index *pIdx, /* The index to be used for a inequality constraint */
+ int nEq, /* Number of prior equality constraints on same index */
+ WhereTerm *pTerm /* The vector inequality constraint */
+){
+ int nCmp = sqlite3ExprVectorSize(pTerm->pExpr->pLeft);
+ int i;
+
+ nCmp = MIN(nCmp, (pIdx->nColumn - nEq));
+ for(i=1; i<nCmp; i++){
+ /* Test if comparison i of pTerm is compatible with column (i+nEq)
+ ** of the index. If not, exit the loop. */
+ char aff; /* Comparison affinity */
+ char idxaff = 0; /* Indexed columns affinity */
+ CollSeq *pColl; /* Comparison collation sequence */
+ Expr *pLhs = pTerm->pExpr->pLeft->x.pList->a[i].pExpr;
+ Expr *pRhs = pTerm->pExpr->pRight;
+ if( pRhs->flags & EP_xIsSelect ){
+ pRhs = pRhs->x.pSelect->pEList->a[i].pExpr;
+ }else{
+ pRhs = pRhs->x.pList->a[i].pExpr;
+ }
+
+ /* Check that the LHS of the comparison is a column reference to
+ ** the right column of the right source table. And that the sort
+ ** order of the index column is the same as the sort order of the
+ ** leftmost index column. */
+ if( pLhs->op!=TK_COLUMN
+ || pLhs->iTable!=iCur
+ || pLhs->iColumn!=pIdx->aiColumn[i+nEq]
+ || pIdx->aSortOrder[i+nEq]!=pIdx->aSortOrder[nEq]
+ ){
+ break;
+ }
+
+ testcase( pLhs->iColumn==XN_ROWID );
+ aff = sqlite3CompareAffinity(pRhs, sqlite3ExprAffinity(pLhs));
+ idxaff = sqlite3TableColumnAffinity(pIdx->pTable, pLhs->iColumn);
+ if( aff!=idxaff ) break;
+
+ pColl = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs);
+ if( pColl==0 ) break;
+ if( sqlite3StrICmp(pColl->zName, pIdx->azColl[i+nEq]) ) break;
+ }
+ return i;
+}
+
+/*
+** Adjust the cost C by the costMult facter T. This only occurs if
+** compiled with -DSQLITE_ENABLE_COSTMULT
+*/
+#ifdef SQLITE_ENABLE_COSTMULT
+# define ApplyCostMultiplier(C,T) C += T
+#else
+# define ApplyCostMultiplier(C,T)
+#endif
+
+/*
+** We have so far matched pBuilder->pNew->u.btree.nEq terms of the
+** index pIndex. Try to match one more.
+**
+** When this function is called, pBuilder->pNew->nOut contains the
+** number of rows expected to be visited by filtering using the nEq
+** terms only. If it is modified, this value is restored before this
+** function returns.
+**
+** If pProbe->idxType==SQLITE_IDXTYPE_IPK, that means pIndex is
+** a fake index used for the INTEGER PRIMARY KEY.
+*/
+static int whereLoopAddBtreeIndex(
+ WhereLoopBuilder *pBuilder, /* The WhereLoop factory */
+ struct SrcList_item *pSrc, /* FROM clause term being analyzed */
+ Index *pProbe, /* An index on pSrc */
+ LogEst nInMul /* log(Number of iterations due to IN) */
+){
+ WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */
+ Parse *pParse = pWInfo->pParse; /* Parsing context */
+ sqlite3 *db = pParse->db; /* Database connection malloc context */
+ WhereLoop *pNew; /* Template WhereLoop under construction */
+ WhereTerm *pTerm; /* A WhereTerm under consideration */
+ int opMask; /* Valid operators for constraints */
+ WhereScan scan; /* Iterator for WHERE terms */
+ Bitmask saved_prereq; /* Original value of pNew->prereq */
+ u16 saved_nLTerm; /* Original value of pNew->nLTerm */
+ u16 saved_nEq; /* Original value of pNew->u.btree.nEq */
+ u16 saved_nBtm; /* Original value of pNew->u.btree.nBtm */
+ u16 saved_nTop; /* Original value of pNew->u.btree.nTop */
+ u16 saved_nSkip; /* Original value of pNew->nSkip */
+ u32 saved_wsFlags; /* Original value of pNew->wsFlags */
+ LogEst saved_nOut; /* Original value of pNew->nOut */
+ int rc = SQLITE_OK; /* Return code */
+ LogEst rSize; /* Number of rows in the table */
+ LogEst rLogSize; /* Logarithm of table size */
+ WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */
+
+ pNew = pBuilder->pNew;
+ if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
+ WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d\n",
+ pProbe->pTable->zName,pProbe->zName,
+ pNew->u.btree.nEq, pNew->nSkip));
+
+ assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 );
+ if( pNew->wsFlags & WHERE_BTM_LIMIT ){
+ opMask = WO_LT|WO_LE;
+ }else{
+ assert( pNew->u.btree.nBtm==0 );
+ opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
+ }
+ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+
+ assert( pNew->u.btree.nEq<pProbe->nColumn );
+
+ saved_nEq = pNew->u.btree.nEq;
+ saved_nBtm = pNew->u.btree.nBtm;
+ saved_nTop = pNew->u.btree.nTop;
+ saved_nSkip = pNew->nSkip;
+ saved_nLTerm = pNew->nLTerm;
+ saved_wsFlags = pNew->wsFlags;
+ saved_prereq = pNew->prereq;
+ saved_nOut = pNew->nOut;
+ pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, saved_nEq,
+ opMask, pProbe);
+ pNew->rSetup = 0;
+ rSize = pProbe->aiRowLogEst[0];
+ rLogSize = estLog(rSize);
+ for(; rc==SQLITE_OK && pTerm!=0; pTerm = whereScanNext(&scan)){
+ u16 eOp = pTerm->eOperator; /* Shorthand for pTerm->eOperator */
+ LogEst rCostIdx;
+ LogEst nOutUnadjusted; /* nOut before IN() and WHERE adjustments */
+ int nIn = 0;
+#ifdef SQLITE_ENABLE_STAT4
+ int nRecValid = pBuilder->nRecValid;
+#endif
+ if( (eOp==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0)
+ && indexColumnNotNull(pProbe, saved_nEq)
+ ){
+ continue; /* ignore IS [NOT] NULL constraints on NOT NULL columns */
+ }
+ if( pTerm->prereqRight & pNew->maskSelf ) continue;
+
+ /* Do not allow the upper bound of a LIKE optimization range constraint
+ ** to mix with a lower range bound from some other source */
+ if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue;
+
+ /* tag-20191211-001: Do not allow constraints from the WHERE clause to
+ ** be used by the right table of a LEFT JOIN. Only constraints in the
+ ** ON clause are allowed. See tag-20191211-002 for the vtab equivalent. */
+ if( (pSrc->fg.jointype & JT_LEFT)!=0
+ && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ ){
+ continue;
+ }
+
+ if( IsUniqueIndex(pProbe) && saved_nEq==pProbe->nKeyCol-1 ){
+ pBuilder->bldFlags1 |= SQLITE_BLDF1_UNIQUE;
+ }else{
+ pBuilder->bldFlags1 |= SQLITE_BLDF1_INDEXED;
+ }
+ pNew->wsFlags = saved_wsFlags;
+ pNew->u.btree.nEq = saved_nEq;
+ pNew->u.btree.nBtm = saved_nBtm;
+ pNew->u.btree.nTop = saved_nTop;
+ pNew->nLTerm = saved_nLTerm;
+ if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
+ pNew->aLTerm[pNew->nLTerm++] = pTerm;
+ pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf;
+
+ assert( nInMul==0
+ || (pNew->wsFlags & WHERE_COLUMN_NULL)!=0
+ || (pNew->wsFlags & WHERE_COLUMN_IN)!=0
+ || (pNew->wsFlags & WHERE_SKIPSCAN)!=0
+ );
+
+ if( eOp & WO_IN ){
+ Expr *pExpr = pTerm->pExpr;
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */
+ int i;
+ nIn = 46; assert( 46==sqlite3LogEst(25) );
+
+ /* The expression may actually be of the form (x, y) IN (SELECT...).
+ ** In this case there is a separate term for each of (x) and (y).
+ ** However, the nIn multiplier should only be applied once, not once
+ ** for each such term. The following loop checks that pTerm is the
+ ** first such term in use, and sets nIn back to 0 if it is not. */
+ for(i=0; i<pNew->nLTerm-1; i++){
+ if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0;
+ }
+ }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){
+ /* "x IN (value, value, ...)" */
+ nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
+ }
+ if( pProbe->hasStat1 ){
+ LogEst M, logK, safetyMargin;
+ /* Let:
+ ** N = the total number of rows in the table
+ ** K = the number of entries on the RHS of the IN operator
+ ** M = the number of rows in the table that match terms to the
+ ** to the left in the same index. If the IN operator is on
+ ** the left-most index column, M==N.
+ **
+ ** Given the definitions above, it is better to omit the IN operator
+ ** from the index lookup and instead do a scan of the M elements,
+ ** testing each scanned row against the IN operator separately, if:
+ **
+ ** M*log(K) < K*log(N)
+ **
+ ** Our estimates for M, K, and N might be inaccurate, so we build in
+ ** a safety margin of 2 (LogEst: 10) that favors using the IN operator
+ ** with the index, as using an index has better worst-case behavior.
+ ** If we do not have real sqlite_stat1 data, always prefer to use
+ ** the index.
+ */
+ M = pProbe->aiRowLogEst[saved_nEq];
+ logK = estLog(nIn);
+ safetyMargin = 10; /* TUNING: extra weight for indexed IN */
+ if( M + logK + safetyMargin < nIn + rLogSize ){
+ WHERETRACE(0x40,
+ ("Scan preferred over IN operator on column %d of \"%s\" (%d<%d)\n",
+ saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize));
+ continue;
+ }else{
+ WHERETRACE(0x40,
+ ("IN operator preferred on column %d of \"%s\" (%d>=%d)\n",
+ saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize));
+ }
+ }
+ pNew->wsFlags |= WHERE_COLUMN_IN;
+ }else if( eOp & (WO_EQ|WO_IS) ){
+ int iCol = pProbe->aiColumn[saved_nEq];
+ pNew->wsFlags |= WHERE_COLUMN_EQ;
+ assert( saved_nEq==pNew->u.btree.nEq );
+ if( iCol==XN_ROWID
+ || (iCol>=0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1)
+ ){
+ if( iCol==XN_ROWID || pProbe->uniqNotNull
+ || (pProbe->nKeyCol==1 && pProbe->onError && eOp==WO_EQ)
+ ){
+ pNew->wsFlags |= WHERE_ONEROW;
+ }else{
+ pNew->wsFlags |= WHERE_UNQ_WANTED;
+ }
+ }
+ }else if( eOp & WO_ISNULL ){
+ pNew->wsFlags |= WHERE_COLUMN_NULL;
+ }else if( eOp & (WO_GT|WO_GE) ){
+ testcase( eOp & WO_GT );
+ testcase( eOp & WO_GE );
+ pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT;
+ pNew->u.btree.nBtm = whereRangeVectorLen(
+ pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
+ );
+ pBtm = pTerm;
+ pTop = 0;
+ if( pTerm->wtFlags & TERM_LIKEOPT ){
+ /* Range contraints that come from the LIKE optimization are
+ ** always used in pairs. */
+ pTop = &pTerm[1];
+ assert( (pTop-(pTerm->pWC->a))<pTerm->pWC->nTerm );
+ assert( pTop->wtFlags & TERM_LIKEOPT );
+ assert( pTop->eOperator==WO_LT );
+ if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
+ pNew->aLTerm[pNew->nLTerm++] = pTop;
+ pNew->wsFlags |= WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = 1;
+ }
+ }else{
+ assert( eOp & (WO_LT|WO_LE) );
+ testcase( eOp & WO_LT );
+ testcase( eOp & WO_LE );
+ pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = whereRangeVectorLen(
+ pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
+ );
+ pTop = pTerm;
+ pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ?
+ pNew->aLTerm[pNew->nLTerm-2] : 0;
+ }
+
+ /* At this point pNew->nOut is set to the number of rows expected to
+ ** be visited by the index scan before considering term pTerm, or the
+ ** values of nIn and nInMul. In other words, assuming that all
+ ** "x IN(...)" terms are replaced with "x = ?". This block updates
+ ** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */
+ assert( pNew->nOut==saved_nOut );
+ if( pNew->wsFlags & WHERE_COLUMN_RANGE ){
+ /* Adjust nOut using stat4 data. Or, if there is no stat4
+ ** data, using some other estimate. */
+ whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew);
+ }else{
+ int nEq = ++pNew->u.btree.nEq;
+ assert( eOp & (WO_ISNULL|WO_EQ|WO_IN|WO_IS) );
+
+ assert( pNew->nOut==saved_nOut );
+ if( pTerm->truthProb<=0 && pProbe->aiColumn[saved_nEq]>=0 ){
+ assert( (eOp & WO_IN) || nIn==0 );
+ testcase( eOp & WO_IN );
+ pNew->nOut += pTerm->truthProb;
+ pNew->nOut -= nIn;
+ }else{
+#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt nOut = 0;
+ if( nInMul==0
+ && pProbe->nSample
+ && pNew->u.btree.nEq<=pProbe->nSampleCol
+ && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect))
+ && OptimizationEnabled(db, SQLITE_Stat4)
+ ){
+ Expr *pExpr = pTerm->pExpr;
+ if( (eOp & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){
+ testcase( eOp & WO_EQ );
+ testcase( eOp & WO_IS );
+ testcase( eOp & WO_ISNULL );
+ rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut);
+ }else{
+ rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut);
+ }
+ if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
+ if( rc!=SQLITE_OK ) break; /* Jump out of the pTerm loop */
+ if( nOut ){
+ pNew->nOut = sqlite3LogEst(nOut);
+ if( nEq==1
+ /* TUNING: Mark terms as "low selectivity" if they seem likely
+ ** to be true for half or more of the rows in the table.
+ ** See tag-202002240-1 */
+ && pNew->nOut+10 > pProbe->aiRowLogEst[0]
+ ){
+#if WHERETRACE_ENABLED /* 0x01 */
+ if( sqlite3WhereTrace & 0x01 ){
+ sqlite3DebugPrintf(
+ "STAT4 determines term has low selectivity:\n");
+ sqlite3WhereTermPrint(pTerm, 999);
+ }
+#endif
+ pTerm->wtFlags |= TERM_HIGHTRUTH;
+ if( pTerm->wtFlags & TERM_HEURTRUTH ){
+ /* If the term has previously been used with an assumption of
+ ** higher selectivity, then set the flag to rerun the
+ ** loop computations. */
+ pBuilder->bldFlags2 |= SQLITE_BLDF2_2NDPASS;
+ }
+ }
+ if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut;
+ pNew->nOut -= nIn;
+ }
+ }
+ if( nOut==0 )
+#endif
+ {
+ pNew->nOut += (pProbe->aiRowLogEst[nEq] - pProbe->aiRowLogEst[nEq-1]);
+ if( eOp & WO_ISNULL ){
+ /* TUNING: If there is no likelihood() value, assume that a
+ ** "col IS NULL" expression matches twice as many rows
+ ** as (col=?). */
+ pNew->nOut += 10;
+ }
+ }
+ }
+ }
+
+ /* Set rCostIdx to the cost of visiting selected rows in index. Add
+ ** it to pNew->rRun, which is currently set to the cost of the index
+ ** seek only. Then, if this is a non-covering index, add the cost of
+ ** visiting the rows in the main table. */
+ assert( pSrc->pTab->szTabRow>0 );
+ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow;
+ pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx);
+ if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){
+ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16);
+ }
+ ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult);
+
+ nOutUnadjusted = pNew->nOut;
+ pNew->rRun += nInMul + nIn;
+ pNew->nOut += nInMul + nIn;
+ whereLoopOutputAdjust(pBuilder->pWC, pNew, rSize);
+ rc = whereLoopInsert(pBuilder, pNew);
+
+ if( pNew->wsFlags & WHERE_COLUMN_RANGE ){
+ pNew->nOut = saved_nOut;
+ }else{
+ pNew->nOut = nOutUnadjusted;
+ }
+
+ if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0
+ && pNew->u.btree.nEq<pProbe->nColumn
+ ){
+ whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn);
+ }
+ pNew->nOut = saved_nOut;
+#ifdef SQLITE_ENABLE_STAT4
+ pBuilder->nRecValid = nRecValid;
+#endif
+ }
+ pNew->prereq = saved_prereq;
+ pNew->u.btree.nEq = saved_nEq;
+ pNew->u.btree.nBtm = saved_nBtm;
+ pNew->u.btree.nTop = saved_nTop;
+ pNew->nSkip = saved_nSkip;
+ pNew->wsFlags = saved_wsFlags;
+ pNew->nOut = saved_nOut;
+ pNew->nLTerm = saved_nLTerm;
+
+ /* Consider using a skip-scan if there are no WHERE clause constraints
+ ** available for the left-most terms of the index, and if the average
+ ** number of repeats in the left-most terms is at least 18.
+ **
+ ** The magic number 18 is selected on the basis that scanning 17 rows
+ ** is almost always quicker than an index seek (even though if the index
+ ** contains fewer than 2^17 rows we assume otherwise in other parts of
+ ** the code). And, even if it is not, it should not be too much slower.
+ ** On the other hand, the extra seeks could end up being significantly
+ ** more expensive. */
+ assert( 42==sqlite3LogEst(18) );
+ if( saved_nEq==saved_nSkip
+ && saved_nEq+1<pProbe->nKeyCol
+ && saved_nEq==pNew->nLTerm
+ && pProbe->noSkipScan==0
+ && pProbe->hasStat1!=0
+ && OptimizationEnabled(db, SQLITE_SkipScan)
+ && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */
+ && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK
+ ){
+ LogEst nIter;
+ pNew->u.btree.nEq++;
+ pNew->nSkip++;
+ pNew->aLTerm[pNew->nLTerm++] = 0;
+ pNew->wsFlags |= WHERE_SKIPSCAN;
+ nIter = pProbe->aiRowLogEst[saved_nEq] - pProbe->aiRowLogEst[saved_nEq+1];
+ pNew->nOut -= nIter;
+ /* TUNING: Because uncertainties in the estimates for skip-scan queries,
+ ** add a 1.375 fudge factor to make skip-scan slightly less likely. */
+ nIter += 5;
+ whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter + nInMul);
+ pNew->nOut = saved_nOut;
+ pNew->u.btree.nEq = saved_nEq;
+ pNew->nSkip = saved_nSkip;
+ pNew->wsFlags = saved_wsFlags;
+ }
+
+ WHERETRACE(0x800, ("END %s.addBtreeIdx(%s), nEq=%d, rc=%d\n",
+ pProbe->pTable->zName, pProbe->zName, saved_nEq, rc));
+ return rc;
+}
+
+/*
+** Return True if it is possible that pIndex might be useful in
+** implementing the ORDER BY clause in pBuilder.
+**
+** Return False if pBuilder does not contain an ORDER BY clause or
+** if there is no way for pIndex to be useful in implementing that
+** ORDER BY clause.
+*/
+static int indexMightHelpWithOrderBy(
+ WhereLoopBuilder *pBuilder,
+ Index *pIndex,
+ int iCursor
+){
+ ExprList *pOB;
+ ExprList *aColExpr;
+ int ii, jj;
+
+ if( pIndex->bUnordered ) return 0;
+ if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0;
+ for(ii=0; ii<pOB->nExpr; ii++){
+ Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr);
+ if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){
+ if( pExpr->iColumn<0 ) return 1;
+ for(jj=0; jj<pIndex->nKeyCol; jj++){
+ if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1;
+ }
+ }else if( (aColExpr = pIndex->aColExpr)!=0 ){
+ for(jj=0; jj<pIndex->nKeyCol; jj++){
+ if( pIndex->aiColumn[jj]!=XN_EXPR ) continue;
+ if( sqlite3ExprCompareSkip(pExpr,aColExpr->a[jj].pExpr,iCursor)==0 ){
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* Check to see if a partial index with pPartIndexWhere can be used
+** in the current query. Return true if it can be and false if not.
+*/
+static int whereUsablePartialIndex(
+ int iTab, /* The table for which we want an index */
+ int isLeft, /* True if iTab is the right table of a LEFT JOIN */
+ WhereClause *pWC, /* The WHERE clause of the query */
+ Expr *pWhere /* The WHERE clause from the partial index */
+){
+ int i;
+ WhereTerm *pTerm;
+ Parse *pParse = pWC->pWInfo->pParse;
+ while( pWhere->op==TK_AND ){
+ if( !whereUsablePartialIndex(iTab,isLeft,pWC,pWhere->pLeft) ) return 0;
+ pWhere = pWhere->pRight;
+ }
+ if( pParse->db->flags & SQLITE_EnableQPSG ) pParse = 0;
+ for(i=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
+ Expr *pExpr;
+ pExpr = pTerm->pExpr;
+ if( (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab)
+ && (isLeft==0 || ExprHasProperty(pExpr, EP_FromJoin))
+ && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Add all WhereLoop objects for a single table of the join where the table
+** is identified by pBuilder->pNew->iTab. That table is guaranteed to be
+** a b-tree table, not a virtual table.
+**
+** The costs (WhereLoop.rRun) of the b-tree loops added by this function
+** are calculated as follows:
+**
+** For a full scan, assuming the table (or index) contains nRow rows:
+**
+** cost = nRow * 3.0 // full-table scan
+** cost = nRow * K // scan of covering index
+** cost = nRow * (K+3.0) // scan of non-covering index
+**
+** where K is a value between 1.1 and 3.0 set based on the relative
+** estimated average size of the index and table records.
+**
+** For an index scan, where nVisit is the number of index rows visited
+** by the scan, and nSeek is the number of seek operations required on
+** the index b-tree:
+**
+** cost = nSeek * (log(nRow) + K * nVisit) // covering index
+** cost = nSeek * (log(nRow) + (K+3.0) * nVisit) // non-covering index
+**
+** Normally, nSeek is 1. nSeek values greater than 1 come about if the
+** WHERE clause includes "x IN (....)" terms used in place of "x=?". Or when
+** implicit "x IN (SELECT x FROM tbl)" terms are added for skip-scans.
+**
+** The estimated values (nRow, nVisit, nSeek) often contain a large amount
+** of uncertainty. For this reason, scoring is designed to pick plans that
+** "do the least harm" if the estimates are inaccurate. For example, a
+** log(nRow) factor is omitted from a non-covering index scan in order to
+** bias the scoring in favor of using an index, since the worst-case
+** performance of using an index is far better than the worst-case performance
+** of a full table scan.
+*/
+static int whereLoopAddBtree(
+ WhereLoopBuilder *pBuilder, /* WHERE clause information */
+ Bitmask mPrereq /* Extra prerequesites for using this table */
+){
+ WhereInfo *pWInfo; /* WHERE analysis context */
+ Index *pProbe; /* An index we are evaluating */
+ Index sPk; /* A fake index object for the primary key */
+ LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */
+ i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */
+ SrcList *pTabList; /* The FROM clause */
+ struct SrcList_item *pSrc; /* The FROM clause btree term to add */
+ WhereLoop *pNew; /* Template WhereLoop object */
+ int rc = SQLITE_OK; /* Return code */
+ int iSortIdx = 1; /* Index number */
+ int b; /* A boolean value */
+ LogEst rSize; /* number of rows in the table */
+ LogEst rLogSize; /* Logarithm of the number of rows in the table */
+ WhereClause *pWC; /* The parsed WHERE clause */
+ Table *pTab; /* Table being queried */
+
+ pNew = pBuilder->pNew;
+ pWInfo = pBuilder->pWInfo;
+ pTabList = pWInfo->pTabList;
+ pSrc = pTabList->a + pNew->iTab;
+ pTab = pSrc->pTab;
+ pWC = pBuilder->pWC;
+ assert( !IsVirtual(pSrc->pTab) );
+
+ if( pSrc->pIBIndex ){
+ /* An INDEXED BY clause specifies a particular index to use */
+ pProbe = pSrc->pIBIndex;
+ }else if( !HasRowid(pTab) ){
+ pProbe = pTab->pIndex;
+ }else{
+ /* There is no INDEXED BY clause. Create a fake Index object in local
+ ** variable sPk to represent the rowid primary key index. Make this
+ ** fake index the first in a chain of Index objects with all of the real
+ ** indices to follow */
+ Index *pFirst; /* First of real indices on the table */
+ memset(&sPk, 0, sizeof(Index));
+ sPk.nKeyCol = 1;
+ sPk.nColumn = 1;
+ sPk.aiColumn = &aiColumnPk;
+ sPk.aiRowLogEst = aiRowEstPk;
+ sPk.onError = OE_Replace;
+ sPk.pTable = pTab;
+ sPk.szIdxRow = pTab->szTabRow;
+ sPk.idxType = SQLITE_IDXTYPE_IPK;
+ aiRowEstPk[0] = pTab->nRowLogEst;
+ aiRowEstPk[1] = 0;
+ pFirst = pSrc->pTab->pIndex;
+ if( pSrc->fg.notIndexed==0 ){
+ /* The real indices of the table are only considered if the
+ ** NOT INDEXED qualifier is omitted from the FROM clause */
+ sPk.pNext = pFirst;
+ }
+ pProbe = &sPk;
+ }
+ rSize = pTab->nRowLogEst;
+ rLogSize = estLog(rSize);
+
+#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
+ /* Automatic indexes */
+ if( !pBuilder->pOrSet /* Not part of an OR optimization */
+ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0
+ && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0
+ && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */
+ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */
+ && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */
+ && !pSrc->fg.isCorrelated /* Not a correlated subquery */
+ && !pSrc->fg.isRecursive /* Not a recursive common table expression. */
+ ){
+ /* Generate auto-index WhereLoops */
+ WhereTerm *pTerm;
+ WhereTerm *pWCEnd = pWC->a + pWC->nTerm;
+ for(pTerm=pWC->a; rc==SQLITE_OK && pTerm<pWCEnd; pTerm++){
+ if( pTerm->prereqRight & pNew->maskSelf ) continue;
+ if( termCanDriveIndex(pTerm, pSrc, 0) ){
+ pNew->u.btree.nEq = 1;
+ pNew->nSkip = 0;
+ pNew->u.btree.pIndex = 0;
+ pNew->nLTerm = 1;
+ pNew->aLTerm[0] = pTerm;
+ /* TUNING: One-time cost for computing the automatic index is
+ ** estimated to be X*N*log2(N) where N is the number of rows in
+ ** the table being indexed and where X is 7 (LogEst=28) for normal
+ ** tables or 0.5 (LogEst=-10) for views and subqueries. The value
+ ** of X is smaller for views and subqueries so that the query planner
+ ** will be more aggressive about generating automatic indexes for
+ ** those objects, since there is no opportunity to add schema
+ ** indexes on subqueries and views. */
+ pNew->rSetup = rLogSize + rSize;
+ if( pTab->pSelect==0 && (pTab->tabFlags & TF_Ephemeral)==0 ){
+ pNew->rSetup += 28;
+ }else{
+ pNew->rSetup -= 10;
+ }
+ ApplyCostMultiplier(pNew->rSetup, pTab->costMult);
+ if( pNew->rSetup<0 ) pNew->rSetup = 0;
+ /* TUNING: Each index lookup yields 20 rows in the table. This
+ ** is more than the usual guess of 10 rows, since we have no way
+ ** of knowing how selective the index will ultimately be. It would
+ ** not be unreasonable to make this value much larger. */
+ pNew->nOut = 43; assert( 43==sqlite3LogEst(20) );
+ pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut);
+ pNew->wsFlags = WHERE_AUTO_INDEX;
+ pNew->prereq = mPrereq | pTerm->prereqRight;
+ rc = whereLoopInsert(pBuilder, pNew);
+ }
+ }
+ }
+#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */
+
+ /* Loop over all indices. If there was an INDEXED BY clause, then only
+ ** consider index pProbe. */
+ for(; rc==SQLITE_OK && pProbe;
+ pProbe=(pSrc->pIBIndex ? 0 : pProbe->pNext), iSortIdx++
+ ){
+ int isLeft = (pSrc->fg.jointype & JT_OUTER)!=0;
+ if( pProbe->pPartIdxWhere!=0
+ && !whereUsablePartialIndex(pSrc->iCursor, isLeft, pWC,
+ pProbe->pPartIdxWhere)
+ ){
+ testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */
+ continue; /* Partial index inappropriate for this query */
+ }
+ if( pProbe->bNoQuery ) continue;
+ rSize = pProbe->aiRowLogEst[0];
+ pNew->u.btree.nEq = 0;
+ pNew->u.btree.nBtm = 0;
+ pNew->u.btree.nTop = 0;
+ pNew->nSkip = 0;
+ pNew->nLTerm = 0;
+ pNew->iSortIdx = 0;
+ pNew->rSetup = 0;
+ pNew->prereq = mPrereq;
+ pNew->nOut = rSize;
+ pNew->u.btree.pIndex = pProbe;
+ b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor);
+
+ /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */
+ assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 );
+ if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){
+ /* Integer primary key index */
+ pNew->wsFlags = WHERE_IPK;
+
+ /* Full table scan */
+ pNew->iSortIdx = b ? iSortIdx : 0;
+ /* TUNING: Cost of full table scan is (N*3.0). */
+ pNew->rRun = rSize + 16;
+ ApplyCostMultiplier(pNew->rRun, pTab->costMult);
+ whereLoopOutputAdjust(pWC, pNew, rSize);
+ rc = whereLoopInsert(pBuilder, pNew);
+ pNew->nOut = rSize;
+ if( rc ) break;
+ }else{
+ Bitmask m;
+ if( pProbe->isCovering ){
+ pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
+ m = 0;
+ }else{
+ m = pSrc->colUsed & pProbe->colNotIdxed;
+ pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED;
+ }
+
+ /* Full scan via index */
+ if( b
+ || !HasRowid(pTab)
+ || pProbe->pPartIdxWhere!=0
+ || pSrc->fg.isIndexedBy
+ || ( m==0
+ && pProbe->bUnordered==0
+ && (pProbe->szIdxRow<pTab->szTabRow)
+ && (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0
+ && sqlite3GlobalConfig.bUseCis
+ && OptimizationEnabled(pWInfo->pParse->db, SQLITE_CoverIdxScan)
+ )
+ ){
+ pNew->iSortIdx = b ? iSortIdx : 0;
+
+ /* The cost of visiting the index rows is N*K, where K is
+ ** between 1.1 and 3.0, depending on the relative sizes of the
+ ** index and table rows. */
+ pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow;
+ if( m!=0 ){
+ /* If this is a non-covering index scan, add in the cost of
+ ** doing table lookups. The cost will be 3x the number of
+ ** lookups. Take into account WHERE clause terms that can be
+ ** satisfied using just the index, and that do not require a
+ ** table lookup. */
+ LogEst nLookup = rSize + 16; /* Base cost: N*3 */
+ int ii;
+ int iCur = pSrc->iCursor;
+ WhereClause *pWC2 = &pWInfo->sWC;
+ for(ii=0; ii<pWC2->nTerm; ii++){
+ WhereTerm *pTerm = &pWC2->a[ii];
+ if( !sqlite3ExprCoveredByIndex(pTerm->pExpr, iCur, pProbe) ){
+ break;
+ }
+ /* pTerm can be evaluated using just the index. So reduce
+ ** the expected number of table lookups accordingly */
+ if( pTerm->truthProb<=0 ){
+ nLookup += pTerm->truthProb;
+ }else{
+ nLookup--;
+ if( pTerm->eOperator & (WO_EQ|WO_IS) ) nLookup -= 19;
+ }
+ }
+
+ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, nLookup);
+ }
+ ApplyCostMultiplier(pNew->rRun, pTab->costMult);
+ whereLoopOutputAdjust(pWC, pNew, rSize);
+ rc = whereLoopInsert(pBuilder, pNew);
+ pNew->nOut = rSize;
+ if( rc ) break;
+ }
+ }
+
+ pBuilder->bldFlags1 = 0;
+ rc = whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, 0);
+ if( pBuilder->bldFlags1==SQLITE_BLDF1_INDEXED ){
+ /* If a non-unique index is used, or if a prefix of the key for
+ ** unique index is used (making the index functionally non-unique)
+ ** then the sqlite_stat1 data becomes important for scoring the
+ ** plan */
+ pTab->tabFlags |= TF_StatsUsed;
+ }
+#ifdef SQLITE_ENABLE_STAT4
+ sqlite3Stat4ProbeFree(pBuilder->pRec);
+ pBuilder->nRecValid = 0;
+ pBuilder->pRec = 0;
+#endif
+ }
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+
+/*
+** Argument pIdxInfo is already populated with all constraints that may
+** be used by the virtual table identified by pBuilder->pNew->iTab. This
+** function marks a subset of those constraints usable, invokes the
+** xBestIndex method and adds the returned plan to pBuilder.
+**
+** A constraint is marked usable if:
+**
+** * Argument mUsable indicates that its prerequisites are available, and
+**
+** * It is not one of the operators specified in the mExclude mask passed
+** as the fourth argument (which in practice is either WO_IN or 0).
+**
+** Argument mPrereq is a mask of tables that must be scanned before the
+** virtual table in question. These are added to the plans prerequisites
+** before it is added to pBuilder.
+**
+** Output parameter *pbIn is set to true if the plan added to pBuilder
+** uses one or more WO_IN terms, or false otherwise.
+*/
+static int whereLoopAddVirtualOne(
+ WhereLoopBuilder *pBuilder,
+ Bitmask mPrereq, /* Mask of tables that must be used. */
+ Bitmask mUsable, /* Mask of usable tables */
+ u16 mExclude, /* Exclude terms using these operators */
+ sqlite3_index_info *pIdxInfo, /* Populated object for xBestIndex */
+ u16 mNoOmit, /* Do not omit these constraints */
+ int *pbIn /* OUT: True if plan uses an IN(...) op */
+){
+ WhereClause *pWC = pBuilder->pWC;
+ struct sqlite3_index_constraint *pIdxCons;
+ struct sqlite3_index_constraint_usage *pUsage = pIdxInfo->aConstraintUsage;
+ int i;
+ int mxTerm;
+ int rc = SQLITE_OK;
+ WhereLoop *pNew = pBuilder->pNew;
+ Parse *pParse = pBuilder->pWInfo->pParse;
+ struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab];
+ int nConstraint = pIdxInfo->nConstraint;
+
+ assert( (mUsable & mPrereq)==mPrereq );
+ *pbIn = 0;
+ pNew->prereq = mPrereq;
+
+ /* Set the usable flag on the subset of constraints identified by
+ ** arguments mUsable and mExclude. */
+ pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
+ for(i=0; i<nConstraint; i++, pIdxCons++){
+ WhereTerm *pTerm = &pWC->a[pIdxCons->iTermOffset];
+ pIdxCons->usable = 0;
+ if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight
+ && (pTerm->eOperator & mExclude)==0
+ ){
+ pIdxCons->usable = 1;
+ }
+ }
+
+ /* Initialize the output fields of the sqlite3_index_info structure */
+ memset(pUsage, 0, sizeof(pUsage[0])*nConstraint);
+ assert( pIdxInfo->needToFreeIdxStr==0 );
+ pIdxInfo->idxStr = 0;
+ pIdxInfo->idxNum = 0;
+ pIdxInfo->orderByConsumed = 0;
+ pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2;
+ pIdxInfo->estimatedRows = 25;
+ pIdxInfo->idxFlags = 0;
+ pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed;
+
+ /* Invoke the virtual table xBestIndex() method */
+ rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo);
+ if( rc ){
+ if( rc==SQLITE_CONSTRAINT ){
+ /* If the xBestIndex method returns SQLITE_CONSTRAINT, that means
+ ** that the particular combination of parameters provided is unusable.
+ ** Make no entries in the loop table.
+ */
+ WHERETRACE(0xffff, (" ^^^^--- non-viable plan rejected!\n"));
+ return SQLITE_OK;
+ }
+ return rc;
+ }
+
+ mxTerm = -1;
+ assert( pNew->nLSlot>=nConstraint );
+ for(i=0; i<nConstraint; i++) pNew->aLTerm[i] = 0;
+ pNew->u.vtab.omitMask = 0;
+ pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
+ for(i=0; i<nConstraint; i++, pIdxCons++){
+ int iTerm;
+ if( (iTerm = pUsage[i].argvIndex - 1)>=0 ){
+ WhereTerm *pTerm;
+ int j = pIdxCons->iTermOffset;
+ if( iTerm>=nConstraint
+ || j<0
+ || j>=pWC->nTerm
+ || pNew->aLTerm[iTerm]!=0
+ || pIdxCons->usable==0
+ ){
+ sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName);
+ testcase( pIdxInfo->needToFreeIdxStr );
+ return SQLITE_ERROR;
+ }
+ testcase( iTerm==nConstraint-1 );
+ testcase( j==0 );
+ testcase( j==pWC->nTerm-1 );
+ pTerm = &pWC->a[j];
+ pNew->prereq |= pTerm->prereqRight;
+ assert( iTerm<pNew->nLSlot );
+ pNew->aLTerm[iTerm] = pTerm;
+ if( iTerm>mxTerm ) mxTerm = iTerm;
+ testcase( iTerm==15 );
+ testcase( iTerm==16 );
+ if( pUsage[i].omit ){
+ if( i<16 && ((1<<i)&mNoOmit)==0 ){
+ testcase( i!=iTerm );
+ pNew->u.vtab.omitMask |= 1<<iTerm;
+ }else{
+ testcase( i!=iTerm );
+ }
+ }
+ if( (pTerm->eOperator & WO_IN)!=0 ){
+ /* A virtual table that is constrained by an IN clause may not
+ ** consume the ORDER BY clause because (1) the order of IN terms
+ ** is not necessarily related to the order of output terms and
+ ** (2) Multiple outputs from a single IN value will not merge
+ ** together. */
+ pIdxInfo->orderByConsumed = 0;
+ pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE;
+ *pbIn = 1; assert( (mExclude & WO_IN)==0 );
+ }
+ }
+ }
+
+ pNew->nLTerm = mxTerm+1;
+ for(i=0; i<=mxTerm; i++){
+ if( pNew->aLTerm[i]==0 ){
+ /* The non-zero argvIdx values must be contiguous. Raise an
+ ** error if they are not */
+ sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName);
+ testcase( pIdxInfo->needToFreeIdxStr );
+ return SQLITE_ERROR;
+ }
+ }
+ assert( pNew->nLTerm<=pNew->nLSlot );
+ pNew->u.vtab.idxNum = pIdxInfo->idxNum;
+ pNew->u.vtab.needFree = pIdxInfo->needToFreeIdxStr;
+ pIdxInfo->needToFreeIdxStr = 0;
+ pNew->u.vtab.idxStr = pIdxInfo->idxStr;
+ pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ?
+ pIdxInfo->nOrderBy : 0);
+ pNew->rSetup = 0;
+ pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost);
+ pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows);
+
+ /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated
+ ** that the scan will visit at most one row. Clear it otherwise. */
+ if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){
+ pNew->wsFlags |= WHERE_ONEROW;
+ }else{
+ pNew->wsFlags &= ~WHERE_ONEROW;
+ }
+ rc = whereLoopInsert(pBuilder, pNew);
+ if( pNew->u.vtab.needFree ){
+ sqlite3_free(pNew->u.vtab.idxStr);
+ pNew->u.vtab.needFree = 0;
+ }
+ WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n",
+ *pbIn, (sqlite3_uint64)mPrereq,
+ (sqlite3_uint64)(pNew->prereq & ~mPrereq)));
+
+ return rc;
+}
+
+/*
+** If this function is invoked from within an xBestIndex() callback, it
+** returns a pointer to a buffer containing the name of the collation
+** sequence associated with element iCons of the sqlite3_index_info.aConstraint
+** array. Or, if iCons is out of range or there is no active xBestIndex
+** call, return NULL.
+*/
+SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int iCons){
+ HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1];
+ const char *zRet = 0;
+ if( iCons>=0 && iCons<pIdxInfo->nConstraint ){
+ CollSeq *pC = 0;
+ int iTerm = pIdxInfo->aConstraint[iCons].iTermOffset;
+ Expr *pX = pHidden->pWC->a[iTerm].pExpr;
+ if( pX->pLeft ){
+ pC = sqlite3ExprCompareCollSeq(pHidden->pParse, pX);
+ }
+ zRet = (pC ? pC->zName : sqlite3StrBINARY);
+ }
+ return zRet;
+}
+
+/*
+** Add all WhereLoop objects for a table of the join identified by
+** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table.
+**
+** If there are no LEFT or CROSS JOIN joins in the query, both mPrereq and
+** mUnusable are set to 0. Otherwise, mPrereq is a mask of all FROM clause
+** entries that occur before the virtual table in the FROM clause and are
+** separated from it by at least one LEFT or CROSS JOIN. Similarly, the
+** mUnusable mask contains all FROM clause entries that occur after the
+** virtual table and are separated from it by at least one LEFT or
+** CROSS JOIN.
+**
+** For example, if the query were:
+**
+** ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6;
+**
+** then mPrereq corresponds to (t1, t2) and mUnusable to (t5, t6).
+**
+** All the tables in mPrereq must be scanned before the current virtual
+** table. So any terms for which all prerequisites are satisfied by
+** mPrereq may be specified as "usable" in all calls to xBestIndex.
+** Conversely, all tables in mUnusable must be scanned after the current
+** virtual table, so any terms for which the prerequisites overlap with
+** mUnusable should always be configured as "not-usable" for xBestIndex.
+*/
+static int whereLoopAddVirtual(
+ WhereLoopBuilder *pBuilder, /* WHERE clause information */
+ Bitmask mPrereq, /* Tables that must be scanned before this one */
+ Bitmask mUnusable /* Tables that must be scanned after this one */
+){
+ int rc = SQLITE_OK; /* Return code */
+ WhereInfo *pWInfo; /* WHERE analysis context */
+ Parse *pParse; /* The parsing context */
+ WhereClause *pWC; /* The WHERE clause */
+ struct SrcList_item *pSrc; /* The FROM clause term to search */
+ sqlite3_index_info *p; /* Object to pass to xBestIndex() */
+ int nConstraint; /* Number of constraints in p */
+ int bIn; /* True if plan uses IN(...) operator */
+ WhereLoop *pNew;
+ Bitmask mBest; /* Tables used by best possible plan */
+ u16 mNoOmit;
+
+ assert( (mPrereq & mUnusable)==0 );
+ pWInfo = pBuilder->pWInfo;
+ pParse = pWInfo->pParse;
+ pWC = pBuilder->pWC;
+ pNew = pBuilder->pNew;
+ pSrc = &pWInfo->pTabList->a[pNew->iTab];
+ assert( IsVirtual(pSrc->pTab) );
+ p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy,
+ &mNoOmit);
+ if( p==0 ) return SQLITE_NOMEM_BKPT;
+ pNew->rSetup = 0;
+ pNew->wsFlags = WHERE_VIRTUALTABLE;
+ pNew->nLTerm = 0;
+ pNew->u.vtab.needFree = 0;
+ nConstraint = p->nConstraint;
+ if( whereLoopResize(pParse->db, pNew, nConstraint) ){
+ sqlite3DbFree(pParse->db, p);
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ /* First call xBestIndex() with all constraints usable. */
+ WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName));
+ WHERETRACE(0x40, (" VirtualOne: all usable\n"));
+ rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn);
+
+ /* If the call to xBestIndex() with all terms enabled produced a plan
+ ** that does not require any source tables (IOW: a plan with mBest==0)
+ ** and does not use an IN(...) operator, then there is no point in making
+ ** any further calls to xBestIndex() since they will all return the same
+ ** result (if the xBestIndex() implementation is sane). */
+ if( rc==SQLITE_OK && ((mBest = (pNew->prereq & ~mPrereq))!=0 || bIn) ){
+ int seenZero = 0; /* True if a plan with no prereqs seen */
+ int seenZeroNoIN = 0; /* Plan with no prereqs and no IN(...) seen */
+ Bitmask mPrev = 0;
+ Bitmask mBestNoIn = 0;
+
+ /* If the plan produced by the earlier call uses an IN(...) term, call
+ ** xBestIndex again, this time with IN(...) terms disabled. */
+ if( bIn ){
+ WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n"));
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn);
+ assert( bIn==0 );
+ mBestNoIn = pNew->prereq & ~mPrereq;
+ if( mBestNoIn==0 ){
+ seenZero = 1;
+ seenZeroNoIN = 1;
+ }
+ }
+
+ /* Call xBestIndex once for each distinct value of (prereqRight & ~mPrereq)
+ ** in the set of terms that apply to the current virtual table. */
+ while( rc==SQLITE_OK ){
+ int i;
+ Bitmask mNext = ALLBITS;
+ assert( mNext>0 );
+ for(i=0; i<nConstraint; i++){
+ Bitmask mThis = (
+ pWC->a[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq
+ );
+ if( mThis>mPrev && mThis<mNext ) mNext = mThis;
+ }
+ mPrev = mNext;
+ if( mNext==ALLBITS ) break;
+ if( mNext==mBest || mNext==mBestNoIn ) continue;
+ WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n",
+ (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext));
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn);
+ if( pNew->prereq==mPrereq ){
+ seenZero = 1;
+ if( bIn==0 ) seenZeroNoIN = 1;
+ }
+ }
+
+ /* If the calls to xBestIndex() in the above loop did not find a plan
+ ** that requires no source tables at all (i.e. one guaranteed to be
+ ** usable), make a call here with all source tables disabled */
+ if( rc==SQLITE_OK && seenZero==0 ){
+ WHERETRACE(0x40, (" VirtualOne: all disabled\n"));
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn);
+ if( bIn==0 ) seenZeroNoIN = 1;
+ }
+
+ /* If the calls to xBestIndex() have so far failed to find a plan
+ ** that requires no source tables at all and does not use an IN(...)
+ ** operator, make a final call to obtain one here. */
+ if( rc==SQLITE_OK && seenZeroNoIN==0 ){
+ WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n"));
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn);
+ }
+ }
+
+ if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr);
+ sqlite3DbFreeNN(pParse->db, p);
+ WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pTab->zName, rc));
+ return rc;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/*
+** Add WhereLoop entries to handle OR terms. This works for either
+** btrees or virtual tables.
+*/
+static int whereLoopAddOr(
+ WhereLoopBuilder *pBuilder,
+ Bitmask mPrereq,
+ Bitmask mUnusable
+){
+ WhereInfo *pWInfo = pBuilder->pWInfo;
+ WhereClause *pWC;
+ WhereLoop *pNew;
+ WhereTerm *pTerm, *pWCEnd;
+ int rc = SQLITE_OK;
+ int iCur;
+ WhereClause tempWC;
+ WhereLoopBuilder sSubBuild;
+ WhereOrSet sSum, sCur;
+ struct SrcList_item *pItem;
+
+ pWC = pBuilder->pWC;
+ pWCEnd = pWC->a + pWC->nTerm;
+ pNew = pBuilder->pNew;
+ memset(&sSum, 0, sizeof(sSum));
+ pItem = pWInfo->pTabList->a + pNew->iTab;
+ iCur = pItem->iCursor;
+
+ for(pTerm=pWC->a; pTerm<pWCEnd && rc==SQLITE_OK; pTerm++){
+ if( (pTerm->eOperator & WO_OR)!=0
+ && (pTerm->u.pOrInfo->indexable & pNew->maskSelf)!=0
+ ){
+ WhereClause * const pOrWC = &pTerm->u.pOrInfo->wc;
+ WhereTerm * const pOrWCEnd = &pOrWC->a[pOrWC->nTerm];
+ WhereTerm *pOrTerm;
+ int once = 1;
+ int i, j;
+
+ sSubBuild = *pBuilder;
+ sSubBuild.pOrderBy = 0;
+ sSubBuild.pOrSet = &sCur;
+
+ WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm));
+ for(pOrTerm=pOrWC->a; pOrTerm<pOrWCEnd; pOrTerm++){
+ if( (pOrTerm->eOperator & WO_AND)!=0 ){
+ sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc;
+ }else if( pOrTerm->leftCursor==iCur ){
+ tempWC.pWInfo = pWC->pWInfo;
+ tempWC.pOuter = pWC;
+ tempWC.op = TK_AND;
+ tempWC.nTerm = 1;
+ tempWC.a = pOrTerm;
+ sSubBuild.pWC = &tempWC;
+ }else{
+ continue;
+ }
+ sCur.n = 0;
+#ifdef WHERETRACE_ENABLED
+ WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n",
+ (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm));
+ if( sqlite3WhereTrace & 0x400 ){
+ sqlite3WhereClausePrint(sSubBuild.pWC);
+ }
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pItem->pTab) ){
+ rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable);
+ }else
+#endif
+ {
+ rc = whereLoopAddBtree(&sSubBuild, mPrereq);
+ }
+ if( rc==SQLITE_OK ){
+ rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable);
+ }
+ assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0 );
+ testcase( rc==SQLITE_DONE );
+ if( sCur.n==0 ){
+ sSum.n = 0;
+ break;
+ }else if( once ){
+ whereOrMove(&sSum, &sCur);
+ once = 0;
+ }else{
+ WhereOrSet sPrev;
+ whereOrMove(&sPrev, &sSum);
+ sSum.n = 0;
+ for(i=0; i<sPrev.n; i++){
+ for(j=0; j<sCur.n; j++){
+ whereOrInsert(&sSum, sPrev.a[i].prereq | sCur.a[j].prereq,
+ sqlite3LogEstAdd(sPrev.a[i].rRun, sCur.a[j].rRun),
+ sqlite3LogEstAdd(sPrev.a[i].nOut, sCur.a[j].nOut));
+ }
+ }
+ }
+ }
+ pNew->nLTerm = 1;
+ pNew->aLTerm[0] = pTerm;
+ pNew->wsFlags = WHERE_MULTI_OR;
+ pNew->rSetup = 0;
+ pNew->iSortIdx = 0;
+ memset(&pNew->u, 0, sizeof(pNew->u));
+ for(i=0; rc==SQLITE_OK && i<sSum.n; i++){
+ /* TUNING: Currently sSum.a[i].rRun is set to the sum of the costs
+ ** of all sub-scans required by the OR-scan. However, due to rounding
+ ** errors, it may be that the cost of the OR-scan is equal to its
+ ** most expensive sub-scan. Add the smallest possible penalty
+ ** (equivalent to multiplying the cost by 1.07) to ensure that
+ ** this does not happen. Otherwise, for WHERE clauses such as the
+ ** following where there is an index on "y":
+ **
+ ** WHERE likelihood(x=?, 0.99) OR y=?
+ **
+ ** the planner may elect to "OR" together a full-table scan and an
+ ** index lookup. And other similarly odd results. */
+ pNew->rRun = sSum.a[i].rRun + 1;
+ pNew->nOut = sSum.a[i].nOut;
+ pNew->prereq = sSum.a[i].prereq;
+ rc = whereLoopInsert(pBuilder, pNew);
+ }
+ WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm));
+ }
+ }
+ return rc;
+}
+
+/*
+** Add all WhereLoop objects for all tables
+*/
+static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
+ WhereInfo *pWInfo = pBuilder->pWInfo;
+ Bitmask mPrereq = 0;
+ Bitmask mPrior = 0;
+ int iTab;
+ SrcList *pTabList = pWInfo->pTabList;
+ struct SrcList_item *pItem;
+ struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel];
+ sqlite3 *db = pWInfo->pParse->db;
+ int rc = SQLITE_OK;
+ WhereLoop *pNew;
+
+ /* Loop over the tables in the join, from left to right */
+ pNew = pBuilder->pNew;
+ whereLoopInit(pNew);
+ pBuilder->iPlanLimit = SQLITE_QUERY_PLANNER_LIMIT;
+ for(iTab=0, pItem=pTabList->a; pItem<pEnd; iTab++, pItem++){
+ Bitmask mUnusable = 0;
+ pNew->iTab = iTab;
+ pBuilder->iPlanLimit += SQLITE_QUERY_PLANNER_LIMIT_INCR;
+ pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor);
+ if( (pItem->fg.jointype & (JT_LEFT|JT_CROSS))!=0 ){
+ /* This condition is true when pItem is the FROM clause term on the
+ ** right-hand-side of a LEFT or CROSS JOIN. */
+ mPrereq = mPrior;
+ }else{
+ mPrereq = 0;
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( IsVirtual(pItem->pTab) ){
+ struct SrcList_item *p;
+ for(p=&pItem[1]; p<pEnd; p++){
+ if( mUnusable || (p->fg.jointype & (JT_LEFT|JT_CROSS)) ){
+ mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor);
+ }
+ }
+ rc = whereLoopAddVirtual(pBuilder, mPrereq, mUnusable);
+ }else
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+ {
+ rc = whereLoopAddBtree(pBuilder, mPrereq);
+ }
+ if( rc==SQLITE_OK && pBuilder->pWC->hasOr ){
+ rc = whereLoopAddOr(pBuilder, mPrereq, mUnusable);
+ }
+ mPrior |= pNew->maskSelf;
+ if( rc || db->mallocFailed ){
+ if( rc==SQLITE_DONE ){
+ /* We hit the query planner search limit set by iPlanLimit */
+ sqlite3_log(SQLITE_WARNING, "abbreviated query algorithm search");
+ rc = SQLITE_OK;
+ }else{
+ break;
+ }
+ }
+ }
+
+ whereLoopClear(db, pNew);
+ return rc;
+}
+
+/*
+** Examine a WherePath (with the addition of the extra WhereLoop of the 6th
+** parameters) to see if it outputs rows in the requested ORDER BY
+** (or GROUP BY) without requiring a separate sort operation. Return N:
+**
+** N>0: N terms of the ORDER BY clause are satisfied
+** N==0: No terms of the ORDER BY clause are satisfied
+** N<0: Unknown yet how many terms of ORDER BY might be satisfied.
+**
+** Note that processing for WHERE_GROUPBY and WHERE_DISTINCTBY is not as
+** strict. With GROUP BY and DISTINCT the only requirement is that
+** equivalent rows appear immediately adjacent to one another. GROUP BY
+** and DISTINCT do not require rows to appear in any particular order as long
+** as equivalent rows are grouped together. Thus for GROUP BY and DISTINCT
+** the pOrderBy terms can be matched in any order. With ORDER BY, the
+** pOrderBy terms must be matched in strict left-to-right order.
+*/
+static i8 wherePathSatisfiesOrderBy(
+ WhereInfo *pWInfo, /* The WHERE clause */
+ ExprList *pOrderBy, /* ORDER BY or GROUP BY or DISTINCT clause to check */
+ WherePath *pPath, /* The WherePath to check */
+ u16 wctrlFlags, /* WHERE_GROUPBY or _DISTINCTBY or _ORDERBY_LIMIT */
+ u16 nLoop, /* Number of entries in pPath->aLoop[] */
+ WhereLoop *pLast, /* Add this WhereLoop to the end of pPath->aLoop[] */
+ Bitmask *pRevMask /* OUT: Mask of WhereLoops to run in reverse order */
+){
+ u8 revSet; /* True if rev is known */
+ u8 rev; /* Composite sort order */
+ u8 revIdx; /* Index sort order */
+ u8 isOrderDistinct; /* All prior WhereLoops are order-distinct */
+ u8 distinctColumns; /* True if the loop has UNIQUE NOT NULL columns */
+ u8 isMatch; /* iColumn matches a term of the ORDER BY clause */
+ u16 eqOpMask; /* Allowed equality operators */
+ u16 nKeyCol; /* Number of key columns in pIndex */
+ u16 nColumn; /* Total number of ordered columns in the index */
+ u16 nOrderBy; /* Number terms in the ORDER BY clause */
+ int iLoop; /* Index of WhereLoop in pPath being processed */
+ int i, j; /* Loop counters */
+ int iCur; /* Cursor number for current WhereLoop */
+ int iColumn; /* A column number within table iCur */
+ WhereLoop *pLoop = 0; /* Current WhereLoop being processed. */
+ WhereTerm *pTerm; /* A single term of the WHERE clause */
+ Expr *pOBExpr; /* An expression from the ORDER BY clause */
+ CollSeq *pColl; /* COLLATE function from an ORDER BY clause term */
+ Index *pIndex; /* The index associated with pLoop */
+ sqlite3 *db = pWInfo->pParse->db; /* Database connection */
+ Bitmask obSat = 0; /* Mask of ORDER BY terms satisfied so far */
+ Bitmask obDone; /* Mask of all ORDER BY terms */
+ Bitmask orderDistinctMask; /* Mask of all well-ordered loops */
+ Bitmask ready; /* Mask of inner loops */
+
+ /*
+ ** We say the WhereLoop is "one-row" if it generates no more than one
+ ** row of output. A WhereLoop is one-row if all of the following are true:
+ ** (a) All index columns match with WHERE_COLUMN_EQ.
+ ** (b) The index is unique
+ ** Any WhereLoop with an WHERE_COLUMN_EQ constraint on the rowid is one-row.
+ ** Every one-row WhereLoop will have the WHERE_ONEROW bit set in wsFlags.
+ **
+ ** We say the WhereLoop is "order-distinct" if the set of columns from
+ ** that WhereLoop that are in the ORDER BY clause are different for every
+ ** row of the WhereLoop. Every one-row WhereLoop is automatically
+ ** order-distinct. A WhereLoop that has no columns in the ORDER BY clause
+ ** is not order-distinct. To be order-distinct is not quite the same as being
+ ** UNIQUE since a UNIQUE column or index can have multiple rows that
+ ** are NULL and NULL values are equivalent for the purpose of order-distinct.
+ ** To be order-distinct, the columns must be UNIQUE and NOT NULL.
+ **
+ ** The rowid for a table is always UNIQUE and NOT NULL so whenever the
+ ** rowid appears in the ORDER BY clause, the corresponding WhereLoop is
+ ** automatically order-distinct.
+ */
+
+ assert( pOrderBy!=0 );
+ if( nLoop && OptimizationDisabled(db, SQLITE_OrderByIdxJoin) ) return 0;
+
+ nOrderBy = pOrderBy->nExpr;
+ testcase( nOrderBy==BMS-1 );
+ if( nOrderBy>BMS-1 ) return 0; /* Cannot optimize overly large ORDER BYs */
+ isOrderDistinct = 1;
+ obDone = MASKBIT(nOrderBy)-1;
+ orderDistinctMask = 0;
+ ready = 0;
+ eqOpMask = WO_EQ | WO_IS | WO_ISNULL;
+ if( wctrlFlags & (WHERE_ORDERBY_LIMIT|WHERE_ORDERBY_MAX|WHERE_ORDERBY_MIN) ){
+ eqOpMask |= WO_IN;
+ }
+ for(iLoop=0; isOrderDistinct && obSat<obDone && iLoop<=nLoop; iLoop++){
+ if( iLoop>0 ) ready |= pLoop->maskSelf;
+ if( iLoop<nLoop ){
+ pLoop = pPath->aLoop[iLoop];
+ if( wctrlFlags & WHERE_ORDERBY_LIMIT ) continue;
+ }else{
+ pLoop = pLast;
+ }
+ if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){
+ if( pLoop->u.vtab.isOrdered && (wctrlFlags & WHERE_DISTINCTBY)==0 ){
+ obSat = obDone;
+ }
+ break;
+ }else if( wctrlFlags & WHERE_DISTINCTBY ){
+ pLoop->u.btree.nDistinctCol = 0;
+ }
+ iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor;
+
+ /* Mark off any ORDER BY term X that is a column in the table of
+ ** the current loop for which there is term in the WHERE
+ ** clause of the form X IS NULL or X=? that reference only outer
+ ** loops.
+ */
+ for(i=0; i<nOrderBy; i++){
+ if( MASKBIT(i) & obSat ) continue;
+ pOBExpr = sqlite3ExprSkipCollateAndLikely(pOrderBy->a[i].pExpr);
+ if( pOBExpr->op!=TK_COLUMN ) continue;
+ if( pOBExpr->iTable!=iCur ) continue;
+ pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn,
+ ~ready, eqOpMask, 0);
+ if( pTerm==0 ) continue;
+ if( pTerm->eOperator==WO_IN ){
+ /* IN terms are only valid for sorting in the ORDER BY LIMIT
+ ** optimization, and then only if they are actually used
+ ** by the query plan */
+ assert( wctrlFlags &
+ (WHERE_ORDERBY_LIMIT|WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX) );
+ for(j=0; j<pLoop->nLTerm && pTerm!=pLoop->aLTerm[j]; j++){}
+ if( j>=pLoop->nLTerm ) continue;
+ }
+ if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){
+ Parse *pParse = pWInfo->pParse;
+ CollSeq *pColl1 = sqlite3ExprNNCollSeq(pParse, pOrderBy->a[i].pExpr);
+ CollSeq *pColl2 = sqlite3ExprCompareCollSeq(pParse, pTerm->pExpr);
+ assert( pColl1 );
+ if( pColl2==0 || sqlite3StrICmp(pColl1->zName, pColl2->zName) ){
+ continue;
+ }
+ testcase( pTerm->pExpr->op==TK_IS );
+ }
+ obSat |= MASKBIT(i);
+ }
+
+ if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){
+ if( pLoop->wsFlags & WHERE_IPK ){
+ pIndex = 0;
+ nKeyCol = 0;
+ nColumn = 1;
+ }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){
+ return 0;
+ }else{
+ nKeyCol = pIndex->nKeyCol;
+ nColumn = pIndex->nColumn;
+ assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) );
+ assert( pIndex->aiColumn[nColumn-1]==XN_ROWID
+ || !HasRowid(pIndex->pTable));
+ isOrderDistinct = IsUniqueIndex(pIndex)
+ && (pLoop->wsFlags & WHERE_SKIPSCAN)==0;
+ }
+
+ /* Loop through all columns of the index and deal with the ones
+ ** that are not constrained by == or IN.
+ */
+ rev = revSet = 0;
+ distinctColumns = 0;
+ for(j=0; j<nColumn; j++){
+ u8 bOnce = 1; /* True to run the ORDER BY search loop */
+
+ assert( j>=pLoop->u.btree.nEq
+ || (pLoop->aLTerm[j]==0)==(j<pLoop->nSkip)
+ );
+ if( j<pLoop->u.btree.nEq && j>=pLoop->nSkip ){
+ u16 eOp = pLoop->aLTerm[j]->eOperator;
+
+ /* Skip over == and IS and ISNULL terms. (Also skip IN terms when
+ ** doing WHERE_ORDERBY_LIMIT processing). Except, IS and ISNULL
+ ** terms imply that the index is not UNIQUE NOT NULL in which case
+ ** the loop need to be marked as not order-distinct because it can
+ ** have repeated NULL rows.
+ **
+ ** If the current term is a column of an ((?,?) IN (SELECT...))
+ ** expression for which the SELECT returns more than one column,
+ ** check that it is the only column used by this loop. Otherwise,
+ ** if it is one of two or more, none of the columns can be
+ ** considered to match an ORDER BY term.
+ */
+ if( (eOp & eqOpMask)!=0 ){
+ if( eOp & (WO_ISNULL|WO_IS) ){
+ testcase( eOp & WO_ISNULL );
+ testcase( eOp & WO_IS );
+ testcase( isOrderDistinct );
+ isOrderDistinct = 0;
+ }
+ continue;
+ }else if( ALWAYS(eOp & WO_IN) ){
+ /* ALWAYS() justification: eOp is an equality operator due to the
+ ** j<pLoop->u.btree.nEq constraint above. Any equality other
+ ** than WO_IN is captured by the previous "if". So this one
+ ** always has to be WO_IN. */
+ Expr *pX = pLoop->aLTerm[j]->pExpr;
+ for(i=j+1; i<pLoop->u.btree.nEq; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ assert( (pLoop->aLTerm[i]->eOperator & WO_IN) );
+ bOnce = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Get the column number in the table (iColumn) and sort order
+ ** (revIdx) for the j-th column of the index.
+ */
+ if( pIndex ){
+ iColumn = pIndex->aiColumn[j];
+ revIdx = pIndex->aSortOrder[j] & KEYINFO_ORDER_DESC;
+ if( iColumn==pIndex->pTable->iPKey ) iColumn = XN_ROWID;
+ }else{
+ iColumn = XN_ROWID;
+ revIdx = 0;
+ }
+
+ /* An unconstrained column that might be NULL means that this
+ ** WhereLoop is not well-ordered
+ */
+ if( isOrderDistinct
+ && iColumn>=0
+ && j>=pLoop->u.btree.nEq
+ && pIndex->pTable->aCol[iColumn].notNull==0
+ ){
+ isOrderDistinct = 0;
+ }
+
+ /* Find the ORDER BY term that corresponds to the j-th column
+ ** of the index and mark that ORDER BY term off
+ */
+ isMatch = 0;
+ for(i=0; bOnce && i<nOrderBy; i++){
+ if( MASKBIT(i) & obSat ) continue;
+ pOBExpr = sqlite3ExprSkipCollateAndLikely(pOrderBy->a[i].pExpr);
+ testcase( wctrlFlags & WHERE_GROUPBY );
+ testcase( wctrlFlags & WHERE_DISTINCTBY );
+ if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0;
+ if( iColumn>=XN_ROWID ){
+ if( pOBExpr->op!=TK_COLUMN ) continue;
+ if( pOBExpr->iTable!=iCur ) continue;
+ if( pOBExpr->iColumn!=iColumn ) continue;
+ }else{
+ Expr *pIdxExpr = pIndex->aColExpr->a[j].pExpr;
+ if( sqlite3ExprCompareSkip(pOBExpr, pIdxExpr, iCur) ){
+ continue;
+ }
+ }
+ if( iColumn!=XN_ROWID ){
+ pColl = sqlite3ExprNNCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr);
+ if( sqlite3StrICmp(pColl->zName, pIndex->azColl[j])!=0 ) continue;
+ }
+ if( wctrlFlags & WHERE_DISTINCTBY ){
+ pLoop->u.btree.nDistinctCol = j+1;
+ }
+ isMatch = 1;
+ break;
+ }
+ if( isMatch && (wctrlFlags & WHERE_GROUPBY)==0 ){
+ /* Make sure the sort order is compatible in an ORDER BY clause.
+ ** Sort order is irrelevant for a GROUP BY clause. */
+ if( revSet ){
+ if( (rev ^ revIdx)!=(pOrderBy->a[i].sortFlags&KEYINFO_ORDER_DESC) ){
+ isMatch = 0;
+ }
+ }else{
+ rev = revIdx ^ (pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC);
+ if( rev ) *pRevMask |= MASKBIT(iLoop);
+ revSet = 1;
+ }
+ }
+ if( isMatch && (pOrderBy->a[i].sortFlags & KEYINFO_ORDER_BIGNULL) ){
+ if( j==pLoop->u.btree.nEq ){
+ pLoop->wsFlags |= WHERE_BIGNULL_SORT;
+ }else{
+ isMatch = 0;
+ }
+ }
+ if( isMatch ){
+ if( iColumn==XN_ROWID ){
+ testcase( distinctColumns==0 );
+ distinctColumns = 1;
+ }
+ obSat |= MASKBIT(i);
+ }else{
+ /* No match found */
+ if( j==0 || j<nKeyCol ){
+ testcase( isOrderDistinct!=0 );
+ isOrderDistinct = 0;
+ }
+ break;
+ }
+ } /* end Loop over all index columns */
+ if( distinctColumns ){
+ testcase( isOrderDistinct==0 );
+ isOrderDistinct = 1;
+ }
+ } /* end-if not one-row */
+
+ /* Mark off any other ORDER BY terms that reference pLoop */
+ if( isOrderDistinct ){
+ orderDistinctMask |= pLoop->maskSelf;
+ for(i=0; i<nOrderBy; i++){
+ Expr *p;
+ Bitmask mTerm;
+ if( MASKBIT(i) & obSat ) continue;
+ p = pOrderBy->a[i].pExpr;
+ mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p);
+ if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue;
+ if( (mTerm&~orderDistinctMask)==0 ){
+ obSat |= MASKBIT(i);
+ }
+ }
+ }
+ } /* End the loop over all WhereLoops from outer-most down to inner-most */
+ if( obSat==obDone ) return (i8)nOrderBy;
+ if( !isOrderDistinct ){
+ for(i=nOrderBy-1; i>0; i--){
+ Bitmask m = MASKBIT(i) - 1;
+ if( (obSat&m)==m ) return i;
+ }
+ return 0;
+ }
+ return -1;
+}
+
+
+/*
+** If the WHERE_GROUPBY flag is set in the mask passed to sqlite3WhereBegin(),
+** the planner assumes that the specified pOrderBy list is actually a GROUP
+** BY clause - and so any order that groups rows as required satisfies the
+** request.
+**
+** Normally, in this case it is not possible for the caller to determine
+** whether or not the rows are really being delivered in sorted order, or
+** just in some other order that provides the required grouping. However,
+** if the WHERE_SORTBYGROUP flag is also passed to sqlite3WhereBegin(), then
+** this function may be called on the returned WhereInfo object. It returns
+** true if the rows really will be sorted in the specified order, or false
+** otherwise.
+**
+** For example, assuming:
+**
+** CREATE INDEX i1 ON t1(x, Y);
+**
+** then
+**
+** SELECT * FROM t1 GROUP BY x,y ORDER BY x,y; -- IsSorted()==1
+** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0
+*/
+SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){
+ assert( pWInfo->wctrlFlags & WHERE_GROUPBY );
+ assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP );
+ return pWInfo->sorted;
+}
+
+#ifdef WHERETRACE_ENABLED
+/* For debugging use only: */
+static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){
+ static char zName[65];
+ int i;
+ for(i=0; i<nLoop; i++){ zName[i] = pPath->aLoop[i]->cId; }
+ if( pLast ) zName[i++] = pLast->cId;
+ zName[i] = 0;
+ return zName;
+}
+#endif
+
+/*
+** Return the cost of sorting nRow rows, assuming that the keys have
+** nOrderby columns and that the first nSorted columns are already in
+** order.
+*/
+static LogEst whereSortingCost(
+ WhereInfo *pWInfo,
+ LogEst nRow,
+ int nOrderBy,
+ int nSorted
+){
+ /* TUNING: Estimated cost of a full external sort, where N is
+ ** the number of rows to sort is:
+ **
+ ** cost = (3.0 * N * log(N)).
+ **
+ ** Or, if the order-by clause has X terms but only the last Y
+ ** terms are out of order, then block-sorting will reduce the
+ ** sorting cost to:
+ **
+ ** cost = (3.0 * N * log(N)) * (Y/X)
+ **
+ ** The (Y/X) term is implemented using stack variable rScale
+ ** below. */
+ LogEst rScale, rSortCost;
+ assert( nOrderBy>0 && 66==sqlite3LogEst(100) );
+ rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66;
+ rSortCost = nRow + rScale + 16;
+
+ /* Multiple by log(M) where M is the number of output rows.
+ ** Use the LIMIT for M if it is smaller */
+ if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimit<nRow ){
+ nRow = pWInfo->iLimit;
+ }
+ rSortCost += estLog(nRow);
+ return rSortCost;
+}
+
+/*
+** Given the list of WhereLoop objects at pWInfo->pLoops, this routine
+** attempts to find the lowest cost path that visits each WhereLoop
+** once. This path is then loaded into the pWInfo->a[].pWLoop fields.
+**
+** Assume that the total number of output rows that will need to be sorted
+** will be nRowEst (in the 10*log2 representation). Or, ignore sorting
+** costs if nRowEst==0.
+**
+** Return SQLITE_OK on success or SQLITE_NOMEM of a memory allocation
+** error occurs.
+*/
+static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
+ int mxChoice; /* Maximum number of simultaneous paths tracked */
+ int nLoop; /* Number of terms in the join */
+ Parse *pParse; /* Parsing context */
+ sqlite3 *db; /* The database connection */
+ int iLoop; /* Loop counter over the terms of the join */
+ int ii, jj; /* Loop counters */
+ int mxI = 0; /* Index of next entry to replace */
+ int nOrderBy; /* Number of ORDER BY clause terms */
+ LogEst mxCost = 0; /* Maximum cost of a set of paths */
+ LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */
+ int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */
+ WherePath *aFrom; /* All nFrom paths at the previous level */
+ WherePath *aTo; /* The nTo best paths at the current level */
+ WherePath *pFrom; /* An element of aFrom[] that we are working on */
+ WherePath *pTo; /* An element of aTo[] that we are working on */
+ WhereLoop *pWLoop; /* One of the WhereLoop objects */
+ WhereLoop **pX; /* Used to divy up the pSpace memory */
+ LogEst *aSortCost = 0; /* Sorting and partial sorting costs */
+ char *pSpace; /* Temporary memory used by this routine */
+ int nSpace; /* Bytes of space allocated at pSpace */
+
+ pParse = pWInfo->pParse;
+ db = pParse->db;
+ nLoop = pWInfo->nLevel;
+ /* TUNING: For simple queries, only the best path is tracked.
+ ** For 2-way joins, the 5 best paths are followed.
+ ** For joins of 3 or more tables, track the 10 best paths */
+ mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10);
+ assert( nLoop<=pWInfo->pTabList->nSrc );
+ WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst));
+
+ /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this
+ ** case the purpose of this call is to estimate the number of rows returned
+ ** by the overall query. Once this estimate has been obtained, the caller
+ ** will invoke this function a second time, passing the estimate as the
+ ** nRowEst parameter. */
+ if( pWInfo->pOrderBy==0 || nRowEst==0 ){
+ nOrderBy = 0;
+ }else{
+ nOrderBy = pWInfo->pOrderBy->nExpr;
+ }
+
+ /* Allocate and initialize space for aTo, aFrom and aSortCost[] */
+ nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2;
+ nSpace += sizeof(LogEst) * nOrderBy;
+ pSpace = sqlite3DbMallocRawNN(db, nSpace);
+ if( pSpace==0 ) return SQLITE_NOMEM_BKPT;
+ aTo = (WherePath*)pSpace;
+ aFrom = aTo+mxChoice;
+ memset(aFrom, 0, sizeof(aFrom[0]));
+ pX = (WhereLoop**)(aFrom+mxChoice);
+ for(ii=mxChoice*2, pFrom=aTo; ii>0; ii--, pFrom++, pX += nLoop){
+ pFrom->aLoop = pX;
+ }
+ if( nOrderBy ){
+ /* If there is an ORDER BY clause and it is not being ignored, set up
+ ** space for the aSortCost[] array. Each element of the aSortCost array
+ ** is either zero - meaning it has not yet been initialized - or the
+ ** cost of sorting nRowEst rows of data where the first X terms of
+ ** the ORDER BY clause are already in order, where X is the array
+ ** index. */
+ aSortCost = (LogEst*)pX;
+ memset(aSortCost, 0, sizeof(LogEst) * nOrderBy);
+ }
+ assert( aSortCost==0 || &pSpace[nSpace]==(char*)&aSortCost[nOrderBy] );
+ assert( aSortCost!=0 || &pSpace[nSpace]==(char*)pX );
+
+ /* Seed the search with a single WherePath containing zero WhereLoops.
+ **
+ ** TUNING: Do not let the number of iterations go above 28. If the cost
+ ** of computing an automatic index is not paid back within the first 28
+ ** rows, then do not use the automatic index. */
+ aFrom[0].nRow = MIN(pParse->nQueryLoop, 48); assert( 48==sqlite3LogEst(28) );
+ nFrom = 1;
+ assert( aFrom[0].isOrdered==0 );
+ if( nOrderBy ){
+ /* If nLoop is zero, then there are no FROM terms in the query. Since
+ ** in this case the query may return a maximum of one row, the results
+ ** are already in the requested order. Set isOrdered to nOrderBy to
+ ** indicate this. Or, if nLoop is greater than zero, set isOrdered to
+ ** -1, indicating that the result set may or may not be ordered,
+ ** depending on the loops added to the current plan. */
+ aFrom[0].isOrdered = nLoop>0 ? -1 : nOrderBy;
+ }
+
+ /* Compute successively longer WherePaths using the previous generation
+ ** of WherePaths as the basis for the next. Keep track of the mxChoice
+ ** best paths at each generation */
+ for(iLoop=0; iLoop<nLoop; iLoop++){
+ nTo = 0;
+ for(ii=0, pFrom=aFrom; ii<nFrom; ii++, pFrom++){
+ for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){
+ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */
+ LogEst rCost; /* Cost of path (pFrom+pWLoop) */
+ LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */
+ i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */
+ Bitmask maskNew; /* Mask of src visited by (..) */
+ Bitmask revMask = 0; /* Mask of rev-order loops for (..) */
+
+ if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue;
+ if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue;
+ if( (pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 && pFrom->nRow<3 ){
+ /* Do not use an automatic index if the this loop is expected
+ ** to run less than 1.25 times. It is tempting to also exclude
+ ** automatic index usage on an outer loop, but sometimes an automatic
+ ** index is useful in the outer loop of a correlated subquery. */
+ assert( 10==sqlite3LogEst(2) );
+ continue;
+ }
+
+ /* At this point, pWLoop is a candidate to be the next loop.
+ ** Compute its cost */
+ rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow);
+ rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted);
+ nOut = pFrom->nRow + pWLoop->nOut;
+ maskNew = pFrom->maskLoop | pWLoop->maskSelf;
+ if( isOrdered<0 ){
+ isOrdered = wherePathSatisfiesOrderBy(pWInfo,
+ pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags,
+ iLoop, pWLoop, &revMask);
+ }else{
+ revMask = pFrom->revLoop;
+ }
+ if( isOrdered>=0 && isOrdered<nOrderBy ){
+ if( aSortCost[isOrdered]==0 ){
+ aSortCost[isOrdered] = whereSortingCost(
+ pWInfo, nRowEst, nOrderBy, isOrdered
+ );
+ }
+ /* TUNING: Add a small extra penalty (5) to sorting as an
+ ** extra encouragment to the query planner to select a plan
+ ** where the rows emerge in the correct order without any sorting
+ ** required. */
+ rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 5;
+
+ WHERETRACE(0x002,
+ ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n",
+ aSortCost[isOrdered], (nOrderBy-isOrdered), nOrderBy,
+ rUnsorted, rCost));
+ }else{
+ rCost = rUnsorted;
+ rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */
+ }
+
+ /* Check to see if pWLoop should be added to the set of
+ ** mxChoice best-so-far paths.
+ **
+ ** First look for an existing path among best-so-far paths
+ ** that covers the same set of loops and has the same isOrdered
+ ** setting as the current path candidate.
+ **
+ ** The term "((pTo->isOrdered^isOrdered)&0x80)==0" is equivalent
+ ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range
+ ** of legal values for isOrdered, -1..64.
+ */
+ for(jj=0, pTo=aTo; jj<nTo; jj++, pTo++){
+ if( pTo->maskLoop==maskNew
+ && ((pTo->isOrdered^isOrdered)&0x80)==0
+ ){
+ testcase( jj==nTo-1 );
+ break;
+ }
+ }
+ if( jj>=nTo ){
+ /* None of the existing best-so-far paths match the candidate. */
+ if( nTo>=mxChoice
+ && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted))
+ ){
+ /* The current candidate is no better than any of the mxChoice
+ ** paths currently in the best-so-far buffer. So discard
+ ** this candidate as not viable. */
+#ifdef WHERETRACE_ENABLED /* 0x4 */
+ if( sqlite3WhereTrace&0x4 ){
+ sqlite3DebugPrintf("Skip %s cost=%-3d,%3d,%3d order=%c\n",
+ wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted,
+ isOrdered>=0 ? isOrdered+'0' : '?');
+ }
+#endif
+ continue;
+ }
+ /* If we reach this points it means that the new candidate path
+ ** needs to be added to the set of best-so-far paths. */
+ if( nTo<mxChoice ){
+ /* Increase the size of the aTo set by one */
+ jj = nTo++;
+ }else{
+ /* New path replaces the prior worst to keep count below mxChoice */
+ jj = mxI;
+ }
+ pTo = &aTo[jj];
+#ifdef WHERETRACE_ENABLED /* 0x4 */
+ if( sqlite3WhereTrace&0x4 ){
+ sqlite3DebugPrintf("New %s cost=%-3d,%3d,%3d order=%c\n",
+ wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted,
+ isOrdered>=0 ? isOrdered+'0' : '?');
+ }
+#endif
+ }else{
+ /* Control reaches here if best-so-far path pTo=aTo[jj] covers the
+ ** same set of loops and has the same isOrdered setting as the
+ ** candidate path. Check to see if the candidate should replace
+ ** pTo or if the candidate should be skipped.
+ **
+ ** The conditional is an expanded vector comparison equivalent to:
+ ** (pTo->rCost,pTo->nRow,pTo->rUnsorted) <= (rCost,nOut,rUnsorted)
+ */
+ if( pTo->rCost<rCost
+ || (pTo->rCost==rCost
+ && (pTo->nRow<nOut
+ || (pTo->nRow==nOut && pTo->rUnsorted<=rUnsorted)
+ )
+ )
+ ){
+#ifdef WHERETRACE_ENABLED /* 0x4 */
+ if( sqlite3WhereTrace&0x4 ){
+ sqlite3DebugPrintf(
+ "Skip %s cost=%-3d,%3d,%3d order=%c",
+ wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted,
+ isOrdered>=0 ? isOrdered+'0' : '?');
+ sqlite3DebugPrintf(" vs %s cost=%-3d,%3d,%3d order=%c\n",
+ wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
+ pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?');
+ }
+#endif
+ /* Discard the candidate path from further consideration */
+ testcase( pTo->rCost==rCost );
+ continue;
+ }
+ testcase( pTo->rCost==rCost+1 );
+ /* Control reaches here if the candidate path is better than the
+ ** pTo path. Replace pTo with the candidate. */
+#ifdef WHERETRACE_ENABLED /* 0x4 */
+ if( sqlite3WhereTrace&0x4 ){
+ sqlite3DebugPrintf(
+ "Update %s cost=%-3d,%3d,%3d order=%c",
+ wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted,
+ isOrdered>=0 ? isOrdered+'0' : '?');
+ sqlite3DebugPrintf(" was %s cost=%-3d,%3d,%3d order=%c\n",
+ wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
+ pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?');
+ }
+#endif
+ }
+ /* pWLoop is a winner. Add it to the set of best so far */
+ pTo->maskLoop = pFrom->maskLoop | pWLoop->maskSelf;
+ pTo->revLoop = revMask;
+ pTo->nRow = nOut;
+ pTo->rCost = rCost;
+ pTo->rUnsorted = rUnsorted;
+ pTo->isOrdered = isOrdered;
+ memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop);
+ pTo->aLoop[iLoop] = pWLoop;
+ if( nTo>=mxChoice ){
+ mxI = 0;
+ mxCost = aTo[0].rCost;
+ mxUnsorted = aTo[0].nRow;
+ for(jj=1, pTo=&aTo[1]; jj<mxChoice; jj++, pTo++){
+ if( pTo->rCost>mxCost
+ || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted)
+ ){
+ mxCost = pTo->rCost;
+ mxUnsorted = pTo->rUnsorted;
+ mxI = jj;
+ }
+ }
+ }
+ }
+ }
+
+#ifdef WHERETRACE_ENABLED /* >=2 */
+ if( sqlite3WhereTrace & 0x02 ){
+ sqlite3DebugPrintf("---- after round %d ----\n", iLoop);
+ for(ii=0, pTo=aTo; ii<nTo; ii++, pTo++){
+ sqlite3DebugPrintf(" %s cost=%-3d nrow=%-3d order=%c",
+ wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
+ pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?');
+ if( pTo->isOrdered>0 ){
+ sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop);
+ }else{
+ sqlite3DebugPrintf("\n");
+ }
+ }
+ }
+#endif
+
+ /* Swap the roles of aFrom and aTo for the next generation */
+ pFrom = aTo;
+ aTo = aFrom;
+ aFrom = pFrom;
+ nFrom = nTo;
+ }
+
+ if( nFrom==0 ){
+ sqlite3ErrorMsg(pParse, "no query solution");
+ sqlite3DbFreeNN(db, pSpace);
+ return SQLITE_ERROR;
+ }
+
+ /* Find the lowest cost path. pFrom will be left pointing to that path */
+ pFrom = aFrom;
+ for(ii=1; ii<nFrom; ii++){
+ if( pFrom->rCost>aFrom[ii].rCost ) pFrom = &aFrom[ii];
+ }
+ assert( pWInfo->nLevel==nLoop );
+ /* Load the lowest cost path into pWInfo */
+ for(iLoop=0; iLoop<nLoop; iLoop++){
+ WhereLevel *pLevel = pWInfo->a + iLoop;
+ pLevel->pWLoop = pWLoop = pFrom->aLoop[iLoop];
+ pLevel->iFrom = pWLoop->iTab;
+ pLevel->iTabCur = pWInfo->pTabList->a[pLevel->iFrom].iCursor;
+ }
+ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0
+ && (pWInfo->wctrlFlags & WHERE_DISTINCTBY)==0
+ && pWInfo->eDistinct==WHERE_DISTINCT_NOOP
+ && nRowEst
+ ){
+ Bitmask notUsed;
+ int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pResultSet, pFrom,
+ WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], &notUsed);
+ if( rc==pWInfo->pResultSet->nExpr ){
+ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
+ }
+ }
+ pWInfo->bOrderedInnerLoop = 0;
+ if( pWInfo->pOrderBy ){
+ if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){
+ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
+ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
+ }
+ }else{
+ pWInfo->nOBSat = pFrom->isOrdered;
+ pWInfo->revMask = pFrom->revLoop;
+ if( pWInfo->nOBSat<=0 ){
+ pWInfo->nOBSat = 0;
+ if( nLoop>0 ){
+ u32 wsFlags = pFrom->aLoop[nLoop-1]->wsFlags;
+ if( (wsFlags & WHERE_ONEROW)==0
+ && (wsFlags&(WHERE_IPK|WHERE_COLUMN_IN))!=(WHERE_IPK|WHERE_COLUMN_IN)
+ ){
+ Bitmask m = 0;
+ int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom,
+ WHERE_ORDERBY_LIMIT, nLoop-1, pFrom->aLoop[nLoop-1], &m);
+ testcase( wsFlags & WHERE_IPK );
+ testcase( wsFlags & WHERE_COLUMN_IN );
+ if( rc==pWInfo->pOrderBy->nExpr ){
+ pWInfo->bOrderedInnerLoop = 1;
+ pWInfo->revMask = m;
+ }
+ }
+ }
+ }else if( nLoop
+ && pWInfo->nOBSat==1
+ && (pWInfo->wctrlFlags & (WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX))!=0
+ ){
+ pWInfo->bOrderedInnerLoop = 1;
+ }
+ }
+ if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP)
+ && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr && nLoop>0
+ ){
+ Bitmask revMask = 0;
+ int nOrder = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy,
+ pFrom, 0, nLoop-1, pFrom->aLoop[nLoop-1], &revMask
+ );
+ assert( pWInfo->sorted==0 );
+ if( nOrder==pWInfo->pOrderBy->nExpr ){
+ pWInfo->sorted = 1;
+ pWInfo->revMask = revMask;
+ }
+ }
+ }
+
+
+ pWInfo->nRowOut = pFrom->nRow;
+
+ /* Free temporary memory and return success */
+ sqlite3DbFreeNN(db, pSpace);
+ return SQLITE_OK;
+}
+
+/*
+** Most queries use only a single table (they are not joins) and have
+** simple == constraints against indexed fields. This routine attempts
+** to plan those simple cases using much less ceremony than the
+** general-purpose query planner, and thereby yield faster sqlite3_prepare()
+** times for the common case.
+**
+** Return non-zero on success, if this query can be handled by this
+** no-frills query planner. Return zero if this query needs the
+** general-purpose query planner.
+*/
+static int whereShortCut(WhereLoopBuilder *pBuilder){
+ WhereInfo *pWInfo;
+ struct SrcList_item *pItem;
+ WhereClause *pWC;
+ WhereTerm *pTerm;
+ WhereLoop *pLoop;
+ int iCur;
+ int j;
+ Table *pTab;
+ Index *pIdx;
+
+ pWInfo = pBuilder->pWInfo;
+ if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0;
+ assert( pWInfo->pTabList->nSrc>=1 );
+ pItem = pWInfo->pTabList->a;
+ pTab = pItem->pTab;
+ if( IsVirtual(pTab) ) return 0;
+ if( pItem->fg.isIndexedBy ) return 0;
+ iCur = pItem->iCursor;
+ pWC = &pWInfo->sWC;
+ pLoop = pBuilder->pNew;
+ pLoop->wsFlags = 0;
+ pLoop->nSkip = 0;
+ pTerm = sqlite3WhereFindTerm(pWC, iCur, -1, 0, WO_EQ|WO_IS, 0);
+ if( pTerm ){
+ testcase( pTerm->eOperator & WO_IS );
+ pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW;
+ pLoop->aLTerm[0] = pTerm;
+ pLoop->nLTerm = 1;
+ pLoop->u.btree.nEq = 1;
+ /* TUNING: Cost of a rowid lookup is 10 */
+ pLoop->rRun = 33; /* 33==sqlite3LogEst(10) */
+ }else{
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ int opMask;
+ assert( pLoop->aLTermSpace==pLoop->aLTerm );
+ if( !IsUniqueIndex(pIdx)
+ || pIdx->pPartIdxWhere!=0
+ || pIdx->nKeyCol>ArraySize(pLoop->aLTermSpace)
+ ) continue;
+ opMask = pIdx->uniqNotNull ? (WO_EQ|WO_IS) : WO_EQ;
+ for(j=0; j<pIdx->nKeyCol; j++){
+ pTerm = sqlite3WhereFindTerm(pWC, iCur, j, 0, opMask, pIdx);
+ if( pTerm==0 ) break;
+ testcase( pTerm->eOperator & WO_IS );
+ pLoop->aLTerm[j] = pTerm;
+ }
+ if( j!=pIdx->nKeyCol ) continue;
+ pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_ONEROW|WHERE_INDEXED;
+ if( pIdx->isCovering || (pItem->colUsed & pIdx->colNotIdxed)==0 ){
+ pLoop->wsFlags |= WHERE_IDX_ONLY;
+ }
+ pLoop->nLTerm = j;
+ pLoop->u.btree.nEq = j;
+ pLoop->u.btree.pIndex = pIdx;
+ /* TUNING: Cost of a unique index lookup is 15 */
+ pLoop->rRun = 39; /* 39==sqlite3LogEst(15) */
+ break;
+ }
+ }
+ if( pLoop->wsFlags ){
+ pLoop->nOut = (LogEst)1;
+ pWInfo->a[0].pWLoop = pLoop;
+ assert( pWInfo->sMaskSet.n==1 && iCur==pWInfo->sMaskSet.ix[0] );
+ pLoop->maskSelf = 1; /* sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); */
+ pWInfo->a[0].iTabCur = iCur;
+ pWInfo->nRowOut = 1;
+ if( pWInfo->pOrderBy ) pWInfo->nOBSat = pWInfo->pOrderBy->nExpr;
+ if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){
+ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE;
+ }
+#ifdef SQLITE_DEBUG
+ pLoop->cId = '0';
+#endif
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** Helper function for exprIsDeterministic().
+*/
+static int exprNodeIsDeterministic(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_FUNCTION && ExprHasProperty(pExpr, EP_ConstFunc)==0 ){
+ pWalker->eCode = 0;
+ return WRC_Abort;
+ }
+ return WRC_Continue;
+}
+
+/*
+** Return true if the expression contains no non-deterministic SQL
+** functions. Do not consider non-deterministic SQL functions that are
+** part of sub-select statements.
+*/
+static int exprIsDeterministic(Expr *p){
+ Walker w;
+ memset(&w, 0, sizeof(w));
+ w.eCode = 1;
+ w.xExprCallback = exprNodeIsDeterministic;
+ w.xSelectCallback = sqlite3SelectWalkFail;
+ sqlite3WalkExpr(&w, p);
+ return w.eCode;
+}
+
+
+#ifdef WHERETRACE_ENABLED
+/*
+** Display all WhereLoops in pWInfo
+*/
+static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){
+ if( sqlite3WhereTrace ){ /* Display all of the WhereLoop objects */
+ WhereLoop *p;
+ int i;
+ static const char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz"
+ "ABCDEFGHIJKLMNOPQRSTUVWYXZ";
+ for(p=pWInfo->pLoops, i=0; p; p=p->pNextLoop, i++){
+ p->cId = zLabel[i%(sizeof(zLabel)-1)];
+ sqlite3WhereLoopPrint(p, pWC);
+ }
+ }
+}
+# define WHERETRACE_ALL_LOOPS(W,C) showAllWhereLoops(W,C)
+#else
+# define WHERETRACE_ALL_LOOPS(W,C)
+#endif
+
+/*
+** Generate the beginning of the loop used for WHERE clause processing.
+** The return value is a pointer to an opaque structure that contains
+** information needed to terminate the loop. Later, the calling routine
+** should invoke sqlite3WhereEnd() with the return value of this function
+** in order to complete the WHERE clause processing.
+**
+** If an error occurs, this routine returns NULL.
+**
+** The basic idea is to do a nested loop, one loop for each table in
+** the FROM clause of a select. (INSERT and UPDATE statements are the
+** same as a SELECT with only a single table in the FROM clause.) For
+** example, if the SQL is this:
+**
+** SELECT * FROM t1, t2, t3 WHERE ...;
+**
+** Then the code generated is conceptually like the following:
+**
+** foreach row1 in t1 do \ Code generated
+** foreach row2 in t2 do |-- by sqlite3WhereBegin()
+** foreach row3 in t3 do /
+** ...
+** end \ Code generated
+** end |-- by sqlite3WhereEnd()
+** end /
+**
+** Note that the loops might not be nested in the order in which they
+** appear in the FROM clause if a different order is better able to make
+** use of indices. Note also that when the IN operator appears in
+** the WHERE clause, it might result in additional nested loops for
+** scanning through all values on the right-hand side of the IN.
+**
+** There are Btree cursors associated with each table. t1 uses cursor
+** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor.
+** And so forth. This routine generates code to open those VDBE cursors
+** and sqlite3WhereEnd() generates the code to close them.
+**
+** The code that sqlite3WhereBegin() generates leaves the cursors named
+** in pTabList pointing at their appropriate entries. The [...] code
+** can use OP_Column and OP_Rowid opcodes on these cursors to extract
+** data from the various tables of the loop.
+**
+** If the WHERE clause is empty, the foreach loops must each scan their
+** entire tables. Thus a three-way join is an O(N^3) operation. But if
+** the tables have indices and there are terms in the WHERE clause that
+** refer to those indices, a complete table scan can be avoided and the
+** code will run much faster. Most of the work of this routine is checking
+** to see if there are indices that can be used to speed up the loop.
+**
+** Terms of the WHERE clause are also used to limit which rows actually
+** make it to the "..." in the middle of the loop. After each "foreach",
+** terms of the WHERE clause that use only terms in that loop and outer
+** loops are evaluated and if false a jump is made around all subsequent
+** inner loops (or around the "..." if the test occurs within the inner-
+** most loop)
+**
+** OUTER JOINS
+**
+** An outer join of tables t1 and t2 is conceptally coded as follows:
+**
+** foreach row1 in t1 do
+** flag = 0
+** foreach row2 in t2 do
+** start:
+** ...
+** flag = 1
+** end
+** if flag==0 then
+** move the row2 cursor to a null row
+** goto start
+** fi
+** end
+**
+** ORDER BY CLAUSE PROCESSING
+**
+** pOrderBy is a pointer to the ORDER BY clause (or the GROUP BY clause
+** if the WHERE_GROUPBY flag is set in wctrlFlags) of a SELECT statement
+** if there is one. If there is no ORDER BY clause or if this routine
+** is called from an UPDATE or DELETE statement, then pOrderBy is NULL.
+**
+** The iIdxCur parameter is the cursor number of an index. If
+** WHERE_OR_SUBCLAUSE is set, iIdxCur is the cursor number of an index
+** to use for OR clause processing. The WHERE clause should use this
+** specific cursor. If WHERE_ONEPASS_DESIRED is set, then iIdxCur is
+** the first cursor in an array of cursors for all indices. iIdxCur should
+** be used to compute the appropriate cursor depending on which index is
+** used.
+*/
+SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
+ Parse *pParse, /* The parser context */
+ SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */
+ Expr *pWhere, /* The WHERE clause */
+ ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */
+ ExprList *pResultSet, /* Query result set. Req'd for DISTINCT */
+ u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */
+ int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number
+ ** If WHERE_USE_LIMIT, then the limit amount */
+){
+ int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */
+ int nTabList; /* Number of elements in pTabList */
+ WhereInfo *pWInfo; /* Will become the return value of this function */
+ Vdbe *v = pParse->pVdbe; /* The virtual database engine */
+ Bitmask notReady; /* Cursors that are not yet positioned */
+ WhereLoopBuilder sWLB; /* The WhereLoop builder */
+ WhereMaskSet *pMaskSet; /* The expression mask set */
+ WhereLevel *pLevel; /* A single level in pWInfo->a[] */
+ WhereLoop *pLoop; /* Pointer to a single WhereLoop object */
+ int ii; /* Loop counter */
+ sqlite3 *db; /* Database connection */
+ int rc; /* Return code */
+ u8 bFordelete = 0; /* OPFLAG_FORDELETE or zero, as appropriate */
+
+ assert( (wctrlFlags & WHERE_ONEPASS_MULTIROW)==0 || (
+ (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0
+ && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0
+ ));
+
+ /* Only one of WHERE_OR_SUBCLAUSE or WHERE_USE_LIMIT */
+ assert( (wctrlFlags & WHERE_OR_SUBCLAUSE)==0
+ || (wctrlFlags & WHERE_USE_LIMIT)==0 );
+
+ /* Variable initialization */
+ db = pParse->db;
+ memset(&sWLB, 0, sizeof(sWLB));
+
+ /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */
+ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 );
+ if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0;
+ sWLB.pOrderBy = pOrderBy;
+
+ /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via
+ ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */
+ if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){
+ wctrlFlags &= ~WHERE_WANT_DISTINCT;
+ }
+
+ /* The number of tables in the FROM clause is limited by the number of
+ ** bits in a Bitmask
+ */
+ testcase( pTabList->nSrc==BMS );
+ if( pTabList->nSrc>BMS ){
+ sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS);
+ return 0;
+ }
+
+ /* This function normally generates a nested loop for all tables in
+ ** pTabList. But if the WHERE_OR_SUBCLAUSE flag is set, then we should
+ ** only generate code for the first table in pTabList and assume that
+ ** any cursors associated with subsequent tables are uninitialized.
+ */
+ nTabList = (wctrlFlags & WHERE_OR_SUBCLAUSE) ? 1 : pTabList->nSrc;
+
+ /* Allocate and initialize the WhereInfo structure that will become the
+ ** return value. A single allocation is used to store the WhereInfo
+ ** struct, the contents of WhereInfo.a[], the WhereClause structure
+ ** and the WhereMaskSet structure. Since WhereClause contains an 8-byte
+ ** field (type Bitmask) it must be aligned on an 8-byte boundary on
+ ** some architectures. Hence the ROUND8() below.
+ */
+ nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
+ pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
+ if( db->mallocFailed ){
+ sqlite3DbFree(db, pWInfo);
+ pWInfo = 0;
+ goto whereBeginError;
+ }
+ pWInfo->pParse = pParse;
+ pWInfo->pTabList = pTabList;
+ pWInfo->pOrderBy = pOrderBy;
+ pWInfo->pWhere = pWhere;
+ pWInfo->pResultSet = pResultSet;
+ pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1;
+ pWInfo->nLevel = nTabList;
+ pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(pParse);
+ pWInfo->wctrlFlags = wctrlFlags;
+ pWInfo->iLimit = iAuxArg;
+ pWInfo->savedNQueryLoop = pParse->nQueryLoop;
+ memset(&pWInfo->nOBSat, 0,
+ offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat));
+ memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel));
+ assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */
+ pMaskSet = &pWInfo->sMaskSet;
+ sWLB.pWInfo = pWInfo;
+ sWLB.pWC = &pWInfo->sWC;
+ sWLB.pNew = (WhereLoop*)(((char*)pWInfo)+nByteWInfo);
+ assert( EIGHT_BYTE_ALIGNMENT(sWLB.pNew) );
+ whereLoopInit(sWLB.pNew);
+#ifdef SQLITE_DEBUG
+ sWLB.pNew->cId = '*';
+#endif
+
+ /* Split the WHERE clause into separate subexpressions where each
+ ** subexpression is separated by an AND operator.
+ */
+ initMaskSet(pMaskSet);
+ sqlite3WhereClauseInit(&pWInfo->sWC, pWInfo);
+ sqlite3WhereSplit(&pWInfo->sWC, pWhere, TK_AND);
+
+ /* Special case: No FROM clause
+ */
+ if( nTabList==0 ){
+ if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr;
+ if( wctrlFlags & WHERE_WANT_DISTINCT ){
+ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE;
+ }
+ ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW"));
+ }else{
+ /* Assign a bit from the bitmask to every term in the FROM clause.
+ **
+ ** The N-th term of the FROM clause is assigned a bitmask of 1<<N.
+ **
+ ** The rule of the previous sentence ensures thta if X is the bitmask for
+ ** a table T, then X-1 is the bitmask for all other tables to the left of T.
+ ** Knowing the bitmask for all tables to the left of a left join is
+ ** important. Ticket #3015.
+ **
+ ** Note that bitmasks are created for all pTabList->nSrc tables in
+ ** pTabList, not just the first nTabList tables. nTabList is normally
+ ** equal to pTabList->nSrc but might be shortened to 1 if the
+ ** WHERE_OR_SUBCLAUSE flag is set.
+ */
+ ii = 0;
+ do{
+ createMask(pMaskSet, pTabList->a[ii].iCursor);
+ sqlite3WhereTabFuncArgs(pParse, &pTabList->a[ii], &pWInfo->sWC);
+ }while( (++ii)<pTabList->nSrc );
+ #ifdef SQLITE_DEBUG
+ {
+ Bitmask mx = 0;
+ for(ii=0; ii<pTabList->nSrc; ii++){
+ Bitmask m = sqlite3WhereGetMask(pMaskSet, pTabList->a[ii].iCursor);
+ assert( m>=mx );
+ mx = m;
+ }
+ }
+ #endif
+ }
+
+ /* Analyze all of the subexpressions. */
+ sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC);
+ if( db->mallocFailed ) goto whereBeginError;
+
+ /* Special case: WHERE terms that do not refer to any tables in the join
+ ** (constant expressions). Evaluate each such term, and jump over all the
+ ** generated code if the result is not true.
+ **
+ ** Do not do this if the expression contains non-deterministic functions
+ ** that are not within a sub-select. This is not strictly required, but
+ ** preserves SQLite's legacy behaviour in the following two cases:
+ **
+ ** FROM ... WHERE random()>0; -- eval random() once per row
+ ** FROM ... WHERE (SELECT random())>0; -- eval random() once overall
+ */
+ for(ii=0; ii<sWLB.pWC->nTerm; ii++){
+ WhereTerm *pT = &sWLB.pWC->a[ii];
+ if( pT->wtFlags & TERM_VIRTUAL ) continue;
+ if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){
+ sqlite3ExprIfFalse(pParse, pT->pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL);
+ pT->wtFlags |= TERM_CODED;
+ }
+ }
+
+ if( wctrlFlags & WHERE_WANT_DISTINCT ){
+ if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){
+ /* The DISTINCT marking is pointless. Ignore it. */
+ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE;
+ }else if( pOrderBy==0 ){
+ /* Try to ORDER BY the result set to make distinct processing easier */
+ pWInfo->wctrlFlags |= WHERE_DISTINCTBY;
+ pWInfo->pOrderBy = pResultSet;
+ }
+ }
+
+ /* Construct the WhereLoop objects */
+#if defined(WHERETRACE_ENABLED)
+ if( sqlite3WhereTrace & 0xffff ){
+ sqlite3DebugPrintf("*** Optimizer Start *** (wctrlFlags: 0x%x",wctrlFlags);
+ if( wctrlFlags & WHERE_USE_LIMIT ){
+ sqlite3DebugPrintf(", limit: %d", iAuxArg);
+ }
+ sqlite3DebugPrintf(")\n");
+ if( sqlite3WhereTrace & 0x100 ){
+ Select sSelect;
+ memset(&sSelect, 0, sizeof(sSelect));
+ sSelect.selFlags = SF_WhereBegin;
+ sSelect.pSrc = pTabList;
+ sSelect.pWhere = pWhere;
+ sSelect.pOrderBy = pOrderBy;
+ sSelect.pEList = pResultSet;
+ sqlite3TreeViewSelect(0, &sSelect, 0);
+ }
+ }
+ if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */
+ sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n");
+ sqlite3WhereClausePrint(sWLB.pWC);
+ }
+#endif
+
+ if( nTabList!=1 || whereShortCut(&sWLB)==0 ){
+ rc = whereLoopAddAll(&sWLB);
+ if( rc ) goto whereBeginError;
+
+#ifdef SQLITE_ENABLE_STAT4
+ /* If one or more WhereTerm.truthProb values were used in estimating
+ ** loop parameters, but then those truthProb values were subsequently
+ ** changed based on STAT4 information while computing subsequent loops,
+ ** then we need to rerun the whole loop building process so that all
+ ** loops will be built using the revised truthProb values. */
+ if( sWLB.bldFlags2 & SQLITE_BLDF2_2NDPASS ){
+ WHERETRACE_ALL_LOOPS(pWInfo, sWLB.pWC);
+ WHERETRACE(0xffff,
+ ("**** Redo all loop computations due to"
+ " TERM_HIGHTRUTH changes ****\n"));
+ while( pWInfo->pLoops ){
+ WhereLoop *p = pWInfo->pLoops;
+ pWInfo->pLoops = p->pNextLoop;
+ whereLoopDelete(db, p);
+ }
+ rc = whereLoopAddAll(&sWLB);
+ if( rc ) goto whereBeginError;
+ }
+#endif
+ WHERETRACE_ALL_LOOPS(pWInfo, sWLB.pWC);
+
+ wherePathSolver(pWInfo, 0);
+ if( db->mallocFailed ) goto whereBeginError;
+ if( pWInfo->pOrderBy ){
+ wherePathSolver(pWInfo, pWInfo->nRowOut+1);
+ if( db->mallocFailed ) goto whereBeginError;
+ }
+ }
+ if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){
+ pWInfo->revMask = ALLBITS;
+ }
+ if( pParse->nErr || NEVER(db->mallocFailed) ){
+ goto whereBeginError;
+ }
+#ifdef WHERETRACE_ENABLED
+ if( sqlite3WhereTrace ){
+ sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut);
+ if( pWInfo->nOBSat>0 ){
+ sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask);
+ }
+ switch( pWInfo->eDistinct ){
+ case WHERE_DISTINCT_UNIQUE: {
+ sqlite3DebugPrintf(" DISTINCT=unique");
+ break;
+ }
+ case WHERE_DISTINCT_ORDERED: {
+ sqlite3DebugPrintf(" DISTINCT=ordered");
+ break;
+ }
+ case WHERE_DISTINCT_UNORDERED: {
+ sqlite3DebugPrintf(" DISTINCT=unordered");
+ break;
+ }
+ }
+ sqlite3DebugPrintf("\n");
+ for(ii=0; ii<pWInfo->nLevel; ii++){
+ sqlite3WhereLoopPrint(pWInfo->a[ii].pWLoop, sWLB.pWC);
+ }
+ }
+#endif
+
+ /* Attempt to omit tables from the join that do not affect the result.
+ ** For a table to not affect the result, the following must be true:
+ **
+ ** 1) The query must not be an aggregate.
+ ** 2) The table must be the RHS of a LEFT JOIN.
+ ** 3) Either the query must be DISTINCT, or else the ON or USING clause
+ ** must contain a constraint that limits the scan of the table to
+ ** at most a single row.
+ ** 4) The table must not be referenced by any part of the query apart
+ ** from its own USING or ON clause.
+ **
+ ** For example, given:
+ **
+ ** CREATE TABLE t1(ipk INTEGER PRIMARY KEY, v1);
+ ** CREATE TABLE t2(ipk INTEGER PRIMARY KEY, v2);
+ ** CREATE TABLE t3(ipk INTEGER PRIMARY KEY, v3);
+ **
+ ** then table t2 can be omitted from the following:
+ **
+ ** SELECT v1, v3 FROM t1
+ ** LEFT JOIN t2 ON (t1.ipk=t2.ipk)
+ ** LEFT JOIN t3 ON (t1.ipk=t3.ipk)
+ **
+ ** or from:
+ **
+ ** SELECT DISTINCT v1, v3 FROM t1
+ ** LEFT JOIN t2
+ ** LEFT JOIN t3 ON (t1.ipk=t3.ipk)
+ */
+ notReady = ~(Bitmask)0;
+ if( pWInfo->nLevel>=2
+ && pResultSet!=0 /* guarantees condition (1) above */
+ && OptimizationEnabled(db, SQLITE_OmitNoopJoin)
+ ){
+ int i;
+ Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pResultSet);
+ if( sWLB.pOrderBy ){
+ tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy);
+ }
+ for(i=pWInfo->nLevel-1; i>=1; i--){
+ WhereTerm *pTerm, *pEnd;
+ struct SrcList_item *pItem;
+ pLoop = pWInfo->a[i].pWLoop;
+ pItem = &pWInfo->pTabList->a[pLoop->iTab];
+ if( (pItem->fg.jointype & JT_LEFT)==0 ) continue;
+ if( (wctrlFlags & WHERE_WANT_DISTINCT)==0
+ && (pLoop->wsFlags & WHERE_ONEROW)==0
+ ){
+ continue;
+ }
+ if( (tabUsed & pLoop->maskSelf)!=0 ) continue;
+ pEnd = sWLB.pWC->a + sWLB.pWC->nTerm;
+ for(pTerm=sWLB.pWC->a; pTerm<pEnd; pTerm++){
+ if( (pTerm->prereqAll & pLoop->maskSelf)!=0 ){
+ if( !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ || pTerm->pExpr->iRightJoinTable!=pItem->iCursor
+ ){
+ break;
+ }
+ }
+ }
+ if( pTerm<pEnd ) continue;
+ WHERETRACE(0xffff, ("-> drop loop %c not used\n", pLoop->cId));
+ notReady &= ~pLoop->maskSelf;
+ for(pTerm=sWLB.pWC->a; pTerm<pEnd; pTerm++){
+ if( (pTerm->prereqAll & pLoop->maskSelf)!=0 ){
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ }
+ if( i!=pWInfo->nLevel-1 ){
+ int nByte = (pWInfo->nLevel-1-i) * sizeof(WhereLevel);
+ memmove(&pWInfo->a[i], &pWInfo->a[i+1], nByte);
+ }
+ pWInfo->nLevel--;
+ nTabList--;
+ }
+ }
+#if defined(WHERETRACE_ENABLED)
+ if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */
+ sqlite3DebugPrintf("---- WHERE clause at end of analysis:\n");
+ sqlite3WhereClausePrint(sWLB.pWC);
+ }
+ WHERETRACE(0xffff,("*** Optimizer Finished ***\n"));
+#endif
+ pWInfo->pParse->nQueryLoop += pWInfo->nRowOut;
+
+ /* If the caller is an UPDATE or DELETE statement that is requesting
+ ** to use a one-pass algorithm, determine if this is appropriate.
+ **
+ ** A one-pass approach can be used if the caller has requested one
+ ** and either (a) the scan visits at most one row or (b) each
+ ** of the following are true:
+ **
+ ** * the caller has indicated that a one-pass approach can be used
+ ** with multiple rows (by setting WHERE_ONEPASS_MULTIROW), and
+ ** * the table is not a virtual table, and
+ ** * either the scan does not use the OR optimization or the caller
+ ** is a DELETE operation (WHERE_DUPLICATES_OK is only specified
+ ** for DELETE).
+ **
+ ** The last qualification is because an UPDATE statement uses
+ ** WhereInfo.aiCurOnePass[1] to determine whether or not it really can
+ ** use a one-pass approach, and this is not set accurately for scans
+ ** that use the OR optimization.
+ */
+ assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 );
+ if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){
+ int wsFlags = pWInfo->a[0].pWLoop->wsFlags;
+ int bOnerow = (wsFlags & WHERE_ONEROW)!=0;
+ assert( !(wsFlags & WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pTab) );
+ if( bOnerow || (
+ 0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW)
+ && !IsVirtual(pTabList->a[0].pTab)
+ && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK))
+ )){
+ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI;
+ if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){
+ if( wctrlFlags & WHERE_ONEPASS_MULTIROW ){
+ bFordelete = OPFLAG_FORDELETE;
+ }
+ pWInfo->a[0].pWLoop->wsFlags = (wsFlags & ~WHERE_IDX_ONLY);
+ }
+ }
+ }
+
+ /* Open all tables in the pTabList and any indices selected for
+ ** searching those tables.
+ */
+ for(ii=0, pLevel=pWInfo->a; ii<nTabList; ii++, pLevel++){
+ Table *pTab; /* Table to open */
+ int iDb; /* Index of database containing table/index */
+ struct SrcList_item *pTabItem;
+
+ pTabItem = &pTabList->a[pLevel->iFrom];
+ pTab = pTabItem->pTab;
+ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ pLoop = pLevel->pWLoop;
+ if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){
+ /* Do nothing */
+ }else
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){
+ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab);
+ int iCur = pTabItem->iCursor;
+ sqlite3VdbeAddOp4(v, OP_VOpen, iCur, 0, 0, pVTab, P4_VTAB);
+ }else if( IsVirtual(pTab) ){
+ /* noop */
+ }else
+#endif
+ if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
+ && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){
+ int op = OP_OpenRead;
+ if( pWInfo->eOnePass!=ONEPASS_OFF ){
+ op = OP_OpenWrite;
+ pWInfo->aiCurOnePass[0] = pTabItem->iCursor;
+ };
+ sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op);
+ assert( pTabItem->iCursor==pLevel->iTabCur );
+ testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS-1 );
+ testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS );
+ if( pWInfo->eOnePass==ONEPASS_OFF
+ && pTab->nCol<BMS
+ && (pTab->tabFlags & (TF_HasGenerated|TF_WithoutRowid))==0
+ ){
+ /* If we know that only a prefix of the record will be used,
+ ** it is advantageous to reduce the "column count" field in
+ ** the P4 operand of the OP_OpenRead/Write opcode. */
+ Bitmask b = pTabItem->colUsed;
+ int n = 0;
+ for(; b; b=b>>1, n++){}
+ sqlite3VdbeChangeP4(v, -1, SQLITE_INT_TO_PTR(n), P4_INT32);
+ assert( n<=pTab->nCol );
+ }
+#ifdef SQLITE_ENABLE_CURSOR_HINTS
+ if( pLoop->u.btree.pIndex!=0 ){
+ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ|bFordelete);
+ }else
+#endif
+ {
+ sqlite3VdbeChangeP5(v, bFordelete);
+ }
+#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
+ sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, pTabItem->iCursor, 0, 0,
+ (const u8*)&pTabItem->colUsed, P4_INT64);
+#endif
+ }else{
+ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
+ }
+ if( pLoop->wsFlags & WHERE_INDEXED ){
+ Index *pIx = pLoop->u.btree.pIndex;
+ int iIndexCur;
+ int op = OP_OpenRead;
+ /* iAuxArg is always set to a positive value if ONEPASS is possible */
+ assert( iAuxArg!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 );
+ if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx)
+ && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0
+ ){
+ /* This is one term of an OR-optimization using the PRIMARY KEY of a
+ ** WITHOUT ROWID table. No need for a separate index */
+ iIndexCur = pLevel->iTabCur;
+ op = 0;
+ }else if( pWInfo->eOnePass!=ONEPASS_OFF ){
+ Index *pJ = pTabItem->pTab->pIndex;
+ iIndexCur = iAuxArg;
+ assert( wctrlFlags & WHERE_ONEPASS_DESIRED );
+ while( ALWAYS(pJ) && pJ!=pIx ){
+ iIndexCur++;
+ pJ = pJ->pNext;
+ }
+ op = OP_OpenWrite;
+ pWInfo->aiCurOnePass[1] = iIndexCur;
+ }else if( iAuxArg && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ){
+ iIndexCur = iAuxArg;
+ op = OP_ReopenIdx;
+ }else{
+ iIndexCur = pParse->nTab++;
+ }
+ pLevel->iIdxCur = iIndexCur;
+ assert( pIx->pSchema==pTab->pSchema );
+ assert( iIndexCur>=0 );
+ if( op ){
+ sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIx);
+ if( (pLoop->wsFlags & WHERE_CONSTRAINT)!=0
+ && (pLoop->wsFlags & (WHERE_COLUMN_RANGE|WHERE_SKIPSCAN))==0
+ && (pLoop->wsFlags & WHERE_BIGNULL_SORT)==0
+ && (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0
+ && pWInfo->eDistinct!=WHERE_DISTINCT_ORDERED
+ ){
+ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ);
+ }
+ VdbeComment((v, "%s", pIx->zName));
+#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
+ {
+ u64 colUsed = 0;
+ int ii, jj;
+ for(ii=0; ii<pIx->nColumn; ii++){
+ jj = pIx->aiColumn[ii];
+ if( jj<0 ) continue;
+ if( jj>63 ) jj = 63;
+ if( (pTabItem->colUsed & MASKBIT(jj))==0 ) continue;
+ colUsed |= ((u64)1)<<(ii<63 ? ii : 63);
+ }
+ sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, iIndexCur, 0, 0,
+ (u8*)&colUsed, P4_INT64);
+ }
+#endif /* SQLITE_ENABLE_COLUMN_USED_MASK */
+ }
+ }
+ if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb);
+ }
+ pWInfo->iTop = sqlite3VdbeCurrentAddr(v);
+ if( db->mallocFailed ) goto whereBeginError;
+
+ /* Generate the code to do the search. Each iteration of the for
+ ** loop below generates code for a single nested loop of the VM
+ ** program.
+ */
+ for(ii=0; ii<nTabList; ii++){
+ int addrExplain;
+ int wsFlags;
+ pLevel = &pWInfo->a[ii];
+ wsFlags = pLevel->pWLoop->wsFlags;
+#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
+ if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){
+ constructAutomaticIndex(pParse, &pWInfo->sWC,
+ &pTabList->a[pLevel->iFrom], notReady, pLevel);
+ if( db->mallocFailed ) goto whereBeginError;
+ }
+#endif
+ addrExplain = sqlite3WhereExplainOneScan(
+ pParse, pTabList, pLevel, wctrlFlags
+ );
+ pLevel->addrBody = sqlite3VdbeCurrentAddr(v);
+ notReady = sqlite3WhereCodeOneLoopStart(pParse,v,pWInfo,ii,pLevel,notReady);
+ pWInfo->iContinue = pLevel->addrCont;
+ if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_OR_SUBCLAUSE)==0 ){
+ sqlite3WhereAddScanStatus(v, pTabList, pLevel, addrExplain);
+ }
+ }
+
+ /* Done. */
+ VdbeModuleComment((v, "Begin WHERE-core"));
+ return pWInfo;
+
+ /* Jump here if malloc fails */
+whereBeginError:
+ if( pWInfo ){
+ pParse->nQueryLoop = pWInfo->savedNQueryLoop;
+ whereInfoFree(db, pWInfo);
+ }
+ return 0;
+}
+
+/*
+** Part of sqlite3WhereEnd() will rewrite opcodes to reference the
+** index rather than the main table. In SQLITE_DEBUG mode, we want
+** to trace those changes if PRAGMA vdbe_addoptrace=on. This routine
+** does that.
+*/
+#ifndef SQLITE_DEBUG
+# define OpcodeRewriteTrace(D,K,P) /* no-op */
+#else
+# define OpcodeRewriteTrace(D,K,P) sqlite3WhereOpcodeRewriteTrace(D,K,P)
+ static void sqlite3WhereOpcodeRewriteTrace(
+ sqlite3 *db,
+ int pc,
+ VdbeOp *pOp
+ ){
+ if( (db->flags & SQLITE_VdbeAddopTrace)==0 ) return;
+ sqlite3VdbePrintOp(0, pc, pOp);
+ }
+#endif
+
+/*
+** Generate the end of the WHERE loop. See comments on
+** sqlite3WhereBegin() for additional information.
+*/
+SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
+ Parse *pParse = pWInfo->pParse;
+ Vdbe *v = pParse->pVdbe;
+ int i;
+ WhereLevel *pLevel;
+ WhereLoop *pLoop;
+ SrcList *pTabList = pWInfo->pTabList;
+ sqlite3 *db = pParse->db;
+
+ /* Generate loop termination code.
+ */
+ VdbeModuleComment((v, "End WHERE-core"));
+ for(i=pWInfo->nLevel-1; i>=0; i--){
+ int addr;
+ pLevel = &pWInfo->a[i];
+ pLoop = pLevel->pWLoop;
+ if( pLevel->op!=OP_Noop ){
+#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
+ int addrSeek = 0;
+ Index *pIdx;
+ int n;
+ if( pWInfo->eDistinct==WHERE_DISTINCT_ORDERED
+ && i==pWInfo->nLevel-1 /* Ticket [ef9318757b152e3] 2017-10-21 */
+ && (pLoop->wsFlags & WHERE_INDEXED)!=0
+ && (pIdx = pLoop->u.btree.pIndex)->hasStat1
+ && (n = pLoop->u.btree.nDistinctCol)>0
+ && pIdx->aiRowLogEst[n]>=36
+ ){
+ int r1 = pParse->nMem+1;
+ int j, op;
+ for(j=0; j<n; j++){
+ sqlite3VdbeAddOp3(v, OP_Column, pLevel->iIdxCur, j, r1+j);
+ }
+ pParse->nMem += n+1;
+ op = pLevel->op==OP_Prev ? OP_SeekLT : OP_SeekGT;
+ addrSeek = sqlite3VdbeAddOp4Int(v, op, pLevel->iIdxCur, 0, r1, n);
+ VdbeCoverageIf(v, op==OP_SeekLT);
+ VdbeCoverageIf(v, op==OP_SeekGT);
+ sqlite3VdbeAddOp2(v, OP_Goto, 1, pLevel->p2);
+ }
+#endif /* SQLITE_DISABLE_SKIPAHEAD_DISTINCT */
+ /* The common case: Advance to the next row */
+ sqlite3VdbeResolveLabel(v, pLevel->addrCont);
+ sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3);
+ sqlite3VdbeChangeP5(v, pLevel->p5);
+ VdbeCoverage(v);
+ VdbeCoverageIf(v, pLevel->op==OP_Next);
+ VdbeCoverageIf(v, pLevel->op==OP_Prev);
+ VdbeCoverageIf(v, pLevel->op==OP_VNext);
+ if( pLevel->regBignull ){
+ sqlite3VdbeResolveLabel(v, pLevel->addrBignull);
+ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, pLevel->regBignull, pLevel->p2-1);
+ VdbeCoverage(v);
+ }
+#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
+ if( addrSeek ) sqlite3VdbeJumpHere(v, addrSeek);
+#endif
+ }else{
+ sqlite3VdbeResolveLabel(v, pLevel->addrCont);
+ }
+ if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){
+ struct InLoop *pIn;
+ int j;
+ sqlite3VdbeResolveLabel(v, pLevel->addrNxt);
+ for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){
+ sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
+ if( pIn->eEndLoopOp!=OP_Noop ){
+ if( pIn->nPrefix ){
+ assert( pLoop->wsFlags & WHERE_IN_EARLYOUT );
+ if( pLevel->iLeftJoin ){
+ /* For LEFT JOIN queries, cursor pIn->iCur may not have been
+ ** opened yet. This occurs for WHERE clauses such as
+ ** "a = ? AND b IN (...)", where the index is on (a, b). If
+ ** the RHS of the (a=?) is NULL, then the "b IN (...)" may
+ ** never have been coded, but the body of the loop run to
+ ** return the null-row. So, if the cursor is not open yet,
+ ** jump over the OP_Next or OP_Prev instruction about to
+ ** be coded. */
+ sqlite3VdbeAddOp2(v, OP_IfNotOpen, pIn->iCur,
+ sqlite3VdbeCurrentAddr(v) + 2 +
+ ((pLoop->wsFlags & WHERE_VIRTUALTABLE)==0)
+ );
+ VdbeCoverage(v);
+ }
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ){
+ sqlite3VdbeAddOp4Int(v, OP_IfNoHope, pLevel->iIdxCur,
+ sqlite3VdbeCurrentAddr(v)+2,
+ pIn->iBase, pIn->nPrefix);
+ VdbeCoverage(v);
+ }
+ }
+ sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
+ VdbeCoverage(v);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Prev);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Next);
+ }
+ sqlite3VdbeJumpHere(v, pIn->addrInTop-1);
+ }
+ }
+ sqlite3VdbeResolveLabel(v, pLevel->addrBrk);
+ if( pLevel->addrSkip ){
+ sqlite3VdbeGoto(v, pLevel->addrSkip);
+ VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName));
+ sqlite3VdbeJumpHere(v, pLevel->addrSkip);
+ sqlite3VdbeJumpHere(v, pLevel->addrSkip-2);
+ }
+#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
+ if( pLevel->addrLikeRep ){
+ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, (int)(pLevel->iLikeRepCntr>>1),
+ pLevel->addrLikeRep);
+ VdbeCoverage(v);
+ }
+#endif
+ if( pLevel->iLeftJoin ){
+ int ws = pLoop->wsFlags;
+ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v);
+ assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 );
+ if( (ws & WHERE_IDX_ONLY)==0 ){
+ assert( pLevel->iTabCur==pTabList->a[pLevel->iFrom].iCursor );
+ sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur);
+ }
+ if( (ws & WHERE_INDEXED)
+ || ((ws & WHERE_MULTI_OR) && pLevel->u.pCovidx)
+ ){
+ sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur);
+ }
+ if( pLevel->op==OP_Return ){
+ sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst);
+ }else{
+ sqlite3VdbeGoto(v, pLevel->addrFirst);
+ }
+ sqlite3VdbeJumpHere(v, addr);
+ }
+ VdbeModuleComment((v, "End WHERE-loop%d: %s", i,
+ pWInfo->pTabList->a[pLevel->iFrom].pTab->zName));
+ }
+
+ /* The "break" point is here, just past the end of the outer loop.
+ ** Set it.
+ */
+ sqlite3VdbeResolveLabel(v, pWInfo->iBreak);
+
+ assert( pWInfo->nLevel<=pTabList->nSrc );
+ for(i=0, pLevel=pWInfo->a; i<pWInfo->nLevel; i++, pLevel++){
+ int k, last;
+ VdbeOp *pOp;
+ Index *pIdx = 0;
+ struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom];
+ Table *pTab = pTabItem->pTab;
+ assert( pTab!=0 );
+ pLoop = pLevel->pWLoop;
+
+ /* For a co-routine, change all OP_Column references to the table of
+ ** the co-routine into OP_Copy of result contained in a register.
+ ** OP_Rowid becomes OP_Null.
+ */
+ if( pTabItem->fg.viaCoroutine ){
+ testcase( pParse->db->mallocFailed );
+ translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur,
+ pTabItem->regResult, 0);
+ continue;
+ }
+
+#ifdef SQLITE_ENABLE_EARLY_CURSOR_CLOSE
+ /* Close all of the cursors that were opened by sqlite3WhereBegin.
+ ** Except, do not close cursors that will be reused by the OR optimization
+ ** (WHERE_OR_SUBCLAUSE). And do not close the OP_OpenWrite cursors
+ ** created for the ONEPASS optimization.
+ */
+ if( (pTab->tabFlags & TF_Ephemeral)==0
+ && pTab->pSelect==0
+ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0
+ ){
+ int ws = pLoop->wsFlags;
+ if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){
+ sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor);
+ }
+ if( (ws & WHERE_INDEXED)!=0
+ && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0
+ && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1]
+ ){
+ sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur);
+ }
+ }
+#endif
+
+ /* If this scan uses an index, make VDBE code substitutions to read data
+ ** from the index instead of from the table where possible. In some cases
+ ** this optimization prevents the table from ever being read, which can
+ ** yield a significant performance boost.
+ **
+ ** Calls to the code generator in between sqlite3WhereBegin and
+ ** sqlite3WhereEnd will have created code that references the table
+ ** directly. This loop scans all that code looking for opcodes
+ ** that reference the table and converts them into opcodes that
+ ** reference the index.
+ */
+ if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){
+ pIdx = pLoop->u.btree.pIndex;
+ }else if( pLoop->wsFlags & WHERE_MULTI_OR ){
+ pIdx = pLevel->u.pCovidx;
+ }
+ if( pIdx
+ && (pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable))
+ && !db->mallocFailed
+ ){
+ last = sqlite3VdbeCurrentAddr(v);
+ k = pLevel->addrBody;
+#ifdef SQLITE_DEBUG
+ if( db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE opcodes in range %d..%d\n", k, last-1);
+ }
+#endif
+ pOp = sqlite3VdbeGetOp(v, k);
+ for(; k<last; k++, pOp++){
+ if( pOp->p1!=pLevel->iTabCur ) continue;
+ if( pOp->opcode==OP_Column
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+ || pOp->opcode==OP_Offset
+#endif
+ ){
+ int x = pOp->p2;
+ assert( pIdx->pTable==pTab );
+ if( !HasRowid(pTab) ){
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ x = pPk->aiColumn[x];
+ assert( x>=0 );
+ }else{
+ testcase( x!=sqlite3StorageColumnToTable(pTab,x) );
+ x = sqlite3StorageColumnToTable(pTab,x);
+ }
+ x = sqlite3TableColumnToIndex(pIdx, x);
+ if( x>=0 ){
+ pOp->p2 = x;
+ pOp->p1 = pLevel->iIdxCur;
+ OpcodeRewriteTrace(db, k, pOp);
+ }
+ assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0
+ || pWInfo->eOnePass );
+ }else if( pOp->opcode==OP_Rowid ){
+ pOp->p1 = pLevel->iIdxCur;
+ pOp->opcode = OP_IdxRowid;
+ OpcodeRewriteTrace(db, k, pOp);
+ }else if( pOp->opcode==OP_IfNullRow ){
+ pOp->p1 = pLevel->iIdxCur;
+ OpcodeRewriteTrace(db, k, pOp);
+ }
+ }
+#ifdef SQLITE_DEBUG
+ if( db->flags & SQLITE_VdbeAddopTrace ) printf("TRANSLATE complete\n");
+#endif
+ }
+ }
+
+ /* Undo all Expr node modifications */
+ while( pWInfo->pExprMods ){
+ WhereExprMod *p = pWInfo->pExprMods;
+ pWInfo->pExprMods = p->pNext;
+ memcpy(p->pExpr, &p->orig, sizeof(p->orig));
+ sqlite3DbFree(db, p);
+ }
+
+ /* Final cleanup
+ */
+ pParse->nQueryLoop = pWInfo->savedNQueryLoop;
+ whereInfoFree(db, pWInfo);
+ return;
+}
+
+/************** End of where.c ***********************************************/
+/************** Begin file window.c ******************************************/
+/*
+** 2018 May 08
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+*/
+/* #include "sqliteInt.h" */
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+
+/*
+** SELECT REWRITING
+**
+** Any SELECT statement that contains one or more window functions in
+** either the select list or ORDER BY clause (the only two places window
+** functions may be used) is transformed by function sqlite3WindowRewrite()
+** in order to support window function processing. For example, with the
+** schema:
+**
+** CREATE TABLE t1(a, b, c, d, e, f, g);
+**
+** the statement:
+**
+** SELECT a+1, max(b) OVER (PARTITION BY c ORDER BY d) FROM t1 ORDER BY e;
+**
+** is transformed to:
+**
+** SELECT a+1, max(b) OVER (PARTITION BY c ORDER BY d) FROM (
+** SELECT a, e, c, d, b FROM t1 ORDER BY c, d
+** ) ORDER BY e;
+**
+** The flattening optimization is disabled when processing this transformed
+** SELECT statement. This allows the implementation of the window function
+** (in this case max()) to process rows sorted in order of (c, d), which
+** makes things easier for obvious reasons. More generally:
+**
+** * FROM, WHERE, GROUP BY and HAVING clauses are all moved to
+** the sub-query.
+**
+** * ORDER BY, LIMIT and OFFSET remain part of the parent query.
+**
+** * Terminals from each of the expression trees that make up the
+** select-list and ORDER BY expressions in the parent query are
+** selected by the sub-query. For the purposes of the transformation,
+** terminals are column references and aggregate functions.
+**
+** If there is more than one window function in the SELECT that uses
+** the same window declaration (the OVER bit), then a single scan may
+** be used to process more than one window function. For example:
+**
+** SELECT max(b) OVER (PARTITION BY c ORDER BY d),
+** min(e) OVER (PARTITION BY c ORDER BY d)
+** FROM t1;
+**
+** is transformed in the same way as the example above. However:
+**
+** SELECT max(b) OVER (PARTITION BY c ORDER BY d),
+** min(e) OVER (PARTITION BY a ORDER BY b)
+** FROM t1;
+**
+** Must be transformed to:
+**
+** SELECT max(b) OVER (PARTITION BY c ORDER BY d) FROM (
+** SELECT e, min(e) OVER (PARTITION BY a ORDER BY b), c, d, b FROM
+** SELECT a, e, c, d, b FROM t1 ORDER BY a, b
+** ) ORDER BY c, d
+** ) ORDER BY e;
+**
+** so that both min() and max() may process rows in the order defined by
+** their respective window declarations.
+**
+** INTERFACE WITH SELECT.C
+**
+** When processing the rewritten SELECT statement, code in select.c calls
+** sqlite3WhereBegin() to begin iterating through the results of the
+** sub-query, which is always implemented as a co-routine. It then calls
+** sqlite3WindowCodeStep() to process rows and finish the scan by calling
+** sqlite3WhereEnd().
+**
+** sqlite3WindowCodeStep() generates VM code so that, for each row returned
+** by the sub-query a sub-routine (OP_Gosub) coded by select.c is invoked.
+** When the sub-routine is invoked:
+**
+** * The results of all window-functions for the row are stored
+** in the associated Window.regResult registers.
+**
+** * The required terminal values are stored in the current row of
+** temp table Window.iEphCsr.
+**
+** In some cases, depending on the window frame and the specific window
+** functions invoked, sqlite3WindowCodeStep() caches each entire partition
+** in a temp table before returning any rows. In other cases it does not.
+** This detail is encapsulated within this file, the code generated by
+** select.c is the same in either case.
+**
+** BUILT-IN WINDOW FUNCTIONS
+**
+** This implementation features the following built-in window functions:
+**
+** row_number()
+** rank()
+** dense_rank()
+** percent_rank()
+** cume_dist()
+** ntile(N)
+** lead(expr [, offset [, default]])
+** lag(expr [, offset [, default]])
+** first_value(expr)
+** last_value(expr)
+** nth_value(expr, N)
+**
+** These are the same built-in window functions supported by Postgres.
+** Although the behaviour of aggregate window functions (functions that
+** can be used as either aggregates or window funtions) allows them to
+** be implemented using an API, built-in window functions are much more
+** esoteric. Additionally, some window functions (e.g. nth_value())
+** may only be implemented by caching the entire partition in memory.
+** As such, some built-in window functions use the same API as aggregate
+** window functions and some are implemented directly using VDBE
+** instructions. Additionally, for those functions that use the API, the
+** window frame is sometimes modified before the SELECT statement is
+** rewritten. For example, regardless of the specified window frame, the
+** row_number() function always uses:
+**
+** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+**
+** See sqlite3WindowUpdate() for details.
+**
+** As well as some of the built-in window functions, aggregate window
+** functions min() and max() are implemented using VDBE instructions if
+** the start of the window frame is declared as anything other than
+** UNBOUNDED PRECEDING.
+*/
+
+/*
+** Implementation of built-in window function row_number(). Assumes that the
+** window frame has been coerced to:
+**
+** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+*/
+static void row_numberStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ i64 *p = (i64*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ) (*p)++;
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+}
+static void row_numberValueFunc(sqlite3_context *pCtx){
+ i64 *p = (i64*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ sqlite3_result_int64(pCtx, (p ? *p : 0));
+}
+
+/*
+** Context object type used by rank(), dense_rank(), percent_rank() and
+** cume_dist().
+*/
+struct CallCount {
+ i64 nValue;
+ i64 nStep;
+ i64 nTotal;
+};
+
+/*
+** Implementation of built-in window function dense_rank(). Assumes that
+** the window frame has been set to:
+**
+** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+*/
+static void dense_rankStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ) p->nStep = 1;
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+}
+static void dense_rankValueFunc(sqlite3_context *pCtx){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ if( p->nStep ){
+ p->nValue++;
+ p->nStep = 0;
+ }
+ sqlite3_result_int64(pCtx, p->nValue);
+ }
+}
+
+/*
+** Implementation of built-in window function nth_value(). This
+** implementation is used in "slow mode" only - when the EXCLUDE clause
+** is not set to the default value "NO OTHERS".
+*/
+struct NthValueCtx {
+ i64 nStep;
+ sqlite3_value *pValue;
+};
+static void nth_valueStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct NthValueCtx *p;
+ p = (struct NthValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ i64 iVal;
+ switch( sqlite3_value_numeric_type(apArg[1]) ){
+ case SQLITE_INTEGER:
+ iVal = sqlite3_value_int64(apArg[1]);
+ break;
+ case SQLITE_FLOAT: {
+ double fVal = sqlite3_value_double(apArg[1]);
+ if( ((i64)fVal)!=fVal ) goto error_out;
+ iVal = (i64)fVal;
+ break;
+ }
+ default:
+ goto error_out;
+ }
+ if( iVal<=0 ) goto error_out;
+
+ p->nStep++;
+ if( iVal==p->nStep ){
+ p->pValue = sqlite3_value_dup(apArg[0]);
+ if( !p->pValue ){
+ sqlite3_result_error_nomem(pCtx);
+ }
+ }
+ }
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+ return;
+
+ error_out:
+ sqlite3_result_error(
+ pCtx, "second argument to nth_value must be a positive integer", -1
+ );
+}
+static void nth_valueFinalizeFunc(sqlite3_context *pCtx){
+ struct NthValueCtx *p;
+ p = (struct NthValueCtx*)sqlite3_aggregate_context(pCtx, 0);
+ if( p && p->pValue ){
+ sqlite3_result_value(pCtx, p->pValue);
+ sqlite3_value_free(p->pValue);
+ p->pValue = 0;
+ }
+}
+#define nth_valueInvFunc noopStepFunc
+#define nth_valueValueFunc noopValueFunc
+
+static void first_valueStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct NthValueCtx *p;
+ p = (struct NthValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p && p->pValue==0 ){
+ p->pValue = sqlite3_value_dup(apArg[0]);
+ if( !p->pValue ){
+ sqlite3_result_error_nomem(pCtx);
+ }
+ }
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+}
+static void first_valueFinalizeFunc(sqlite3_context *pCtx){
+ struct NthValueCtx *p;
+ p = (struct NthValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p && p->pValue ){
+ sqlite3_result_value(pCtx, p->pValue);
+ sqlite3_value_free(p->pValue);
+ p->pValue = 0;
+ }
+}
+#define first_valueInvFunc noopStepFunc
+#define first_valueValueFunc noopValueFunc
+
+/*
+** Implementation of built-in window function rank(). Assumes that
+** the window frame has been set to:
+**
+** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+*/
+static void rankStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ p->nStep++;
+ if( p->nValue==0 ){
+ p->nValue = p->nStep;
+ }
+ }
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+}
+static void rankValueFunc(sqlite3_context *pCtx){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ sqlite3_result_int64(pCtx, p->nValue);
+ p->nValue = 0;
+ }
+}
+
+/*
+** Implementation of built-in window function percent_rank(). Assumes that
+** the window frame has been set to:
+**
+** GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
+*/
+static void percent_rankStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ UNUSED_PARAMETER(nArg); assert( nArg==0 );
+ UNUSED_PARAMETER(apArg);
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ p->nTotal++;
+ }
+}
+static void percent_rankInvFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ UNUSED_PARAMETER(nArg); assert( nArg==0 );
+ UNUSED_PARAMETER(apArg);
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ p->nStep++;
+}
+static void percent_rankValueFunc(sqlite3_context *pCtx){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ p->nValue = p->nStep;
+ if( p->nTotal>1 ){
+ double r = (double)p->nValue / (double)(p->nTotal-1);
+ sqlite3_result_double(pCtx, r);
+ }else{
+ sqlite3_result_double(pCtx, 0.0);
+ }
+ }
+}
+#define percent_rankFinalizeFunc percent_rankValueFunc
+
+/*
+** Implementation of built-in window function cume_dist(). Assumes that
+** the window frame has been set to:
+**
+** GROUPS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING
+*/
+static void cume_distStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ UNUSED_PARAMETER(nArg); assert( nArg==0 );
+ UNUSED_PARAMETER(apArg);
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ p->nTotal++;
+ }
+}
+static void cume_distInvFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct CallCount *p;
+ UNUSED_PARAMETER(nArg); assert( nArg==0 );
+ UNUSED_PARAMETER(apArg);
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ p->nStep++;
+}
+static void cume_distValueFunc(sqlite3_context *pCtx){
+ struct CallCount *p;
+ p = (struct CallCount*)sqlite3_aggregate_context(pCtx, 0);
+ if( p ){
+ double r = (double)(p->nStep) / (double)(p->nTotal);
+ sqlite3_result_double(pCtx, r);
+ }
+}
+#define cume_distFinalizeFunc cume_distValueFunc
+
+/*
+** Context object for ntile() window function.
+*/
+struct NtileCtx {
+ i64 nTotal; /* Total rows in partition */
+ i64 nParam; /* Parameter passed to ntile(N) */
+ i64 iRow; /* Current row */
+};
+
+/*
+** Implementation of ntile(). This assumes that the window frame has
+** been coerced to:
+**
+** ROWS CURRENT ROW AND UNBOUNDED FOLLOWING
+*/
+static void ntileStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct NtileCtx *p;
+ assert( nArg==1 ); UNUSED_PARAMETER(nArg);
+ p = (struct NtileCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ if( p->nTotal==0 ){
+ p->nParam = sqlite3_value_int64(apArg[0]);
+ if( p->nParam<=0 ){
+ sqlite3_result_error(
+ pCtx, "argument of ntile must be a positive integer", -1
+ );
+ }
+ }
+ p->nTotal++;
+ }
+}
+static void ntileInvFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct NtileCtx *p;
+ assert( nArg==1 ); UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+ p = (struct NtileCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ p->iRow++;
+}
+static void ntileValueFunc(sqlite3_context *pCtx){
+ struct NtileCtx *p;
+ p = (struct NtileCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p && p->nParam>0 ){
+ int nSize = (p->nTotal / p->nParam);
+ if( nSize==0 ){
+ sqlite3_result_int64(pCtx, p->iRow+1);
+ }else{
+ i64 nLarge = p->nTotal - p->nParam*nSize;
+ i64 iSmall = nLarge*(nSize+1);
+ i64 iRow = p->iRow;
+
+ assert( (nLarge*(nSize+1) + (p->nParam-nLarge)*nSize)==p->nTotal );
+
+ if( iRow<iSmall ){
+ sqlite3_result_int64(pCtx, 1 + iRow/(nSize+1));
+ }else{
+ sqlite3_result_int64(pCtx, 1 + nLarge + (iRow-iSmall)/nSize);
+ }
+ }
+ }
+}
+#define ntileFinalizeFunc ntileValueFunc
+
+/*
+** Context object for last_value() window function.
+*/
+struct LastValueCtx {
+ sqlite3_value *pVal;
+ int nVal;
+};
+
+/*
+** Implementation of last_value().
+*/
+static void last_valueStepFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct LastValueCtx *p;
+ UNUSED_PARAMETER(nArg);
+ p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p ){
+ sqlite3_value_free(p->pVal);
+ p->pVal = sqlite3_value_dup(apArg[0]);
+ if( p->pVal==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ }else{
+ p->nVal++;
+ }
+ }
+}
+static void last_valueInvFunc(
+ sqlite3_context *pCtx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ struct LastValueCtx *p;
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(apArg);
+ p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( ALWAYS(p) ){
+ p->nVal--;
+ if( p->nVal==0 ){
+ sqlite3_value_free(p->pVal);
+ p->pVal = 0;
+ }
+ }
+}
+static void last_valueValueFunc(sqlite3_context *pCtx){
+ struct LastValueCtx *p;
+ p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, 0);
+ if( p && p->pVal ){
+ sqlite3_result_value(pCtx, p->pVal);
+ }
+}
+static void last_valueFinalizeFunc(sqlite3_context *pCtx){
+ struct LastValueCtx *p;
+ p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
+ if( p && p->pVal ){
+ sqlite3_result_value(pCtx, p->pVal);
+ sqlite3_value_free(p->pVal);
+ p->pVal = 0;
+ }
+}
+
+/*
+** Static names for the built-in window function names. These static
+** names are used, rather than string literals, so that FuncDef objects
+** can be associated with a particular window function by direct
+** comparison of the zName pointer. Example:
+**
+** if( pFuncDef->zName==row_valueName ){ ... }
+*/
+static const char row_numberName[] = "row_number";
+static const char dense_rankName[] = "dense_rank";
+static const char rankName[] = "rank";
+static const char percent_rankName[] = "percent_rank";
+static const char cume_distName[] = "cume_dist";
+static const char ntileName[] = "ntile";
+static const char last_valueName[] = "last_value";
+static const char nth_valueName[] = "nth_value";
+static const char first_valueName[] = "first_value";
+static const char leadName[] = "lead";
+static const char lagName[] = "lag";
+
+/*
+** No-op implementations of xStep() and xFinalize(). Used as place-holders
+** for built-in window functions that never call those interfaces.
+**
+** The noopValueFunc() is called but is expected to do nothing. The
+** noopStepFunc() is never called, and so it is marked with NO_TEST to
+** let the test coverage routine know not to expect this function to be
+** invoked.
+*/
+static void noopStepFunc( /*NO_TEST*/
+ sqlite3_context *p, /*NO_TEST*/
+ int n, /*NO_TEST*/
+ sqlite3_value **a /*NO_TEST*/
+){ /*NO_TEST*/
+ UNUSED_PARAMETER(p); /*NO_TEST*/
+ UNUSED_PARAMETER(n); /*NO_TEST*/
+ UNUSED_PARAMETER(a); /*NO_TEST*/
+ assert(0); /*NO_TEST*/
+} /*NO_TEST*/
+static void noopValueFunc(sqlite3_context *p){ UNUSED_PARAMETER(p); /*no-op*/ }
+
+/* Window functions that use all window interfaces: xStep, xFinal,
+** xValue, and xInverse */
+#define WINDOWFUNCALL(name,nArg,extra) { \
+ nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
+ name ## StepFunc, name ## FinalizeFunc, name ## ValueFunc, \
+ name ## InvFunc, name ## Name, {0} \
+}
+
+/* Window functions that are implemented using bytecode and thus have
+** no-op routines for their methods */
+#define WINDOWFUNCNOOP(name,nArg,extra) { \
+ nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
+ noopStepFunc, noopValueFunc, noopValueFunc, \
+ noopStepFunc, name ## Name, {0} \
+}
+
+/* Window functions that use all window interfaces: xStep, the
+** same routine for xFinalize and xValue and which never call
+** xInverse. */
+#define WINDOWFUNCX(name,nArg,extra) { \
+ nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
+ name ## StepFunc, name ## ValueFunc, name ## ValueFunc, \
+ noopStepFunc, name ## Name, {0} \
+}
+
+
+/*
+** Register those built-in window functions that are not also aggregates.
+*/
+SQLITE_PRIVATE void sqlite3WindowFunctions(void){
+ static FuncDef aWindowFuncs[] = {
+ WINDOWFUNCX(row_number, 0, 0),
+ WINDOWFUNCX(dense_rank, 0, 0),
+ WINDOWFUNCX(rank, 0, 0),
+ WINDOWFUNCALL(percent_rank, 0, 0),
+ WINDOWFUNCALL(cume_dist, 0, 0),
+ WINDOWFUNCALL(ntile, 1, 0),
+ WINDOWFUNCALL(last_value, 1, 0),
+ WINDOWFUNCALL(nth_value, 2, 0),
+ WINDOWFUNCALL(first_value, 1, 0),
+ WINDOWFUNCNOOP(lead, 1, 0),
+ WINDOWFUNCNOOP(lead, 2, 0),
+ WINDOWFUNCNOOP(lead, 3, 0),
+ WINDOWFUNCNOOP(lag, 1, 0),
+ WINDOWFUNCNOOP(lag, 2, 0),
+ WINDOWFUNCNOOP(lag, 3, 0),
+ };
+ sqlite3InsertBuiltinFuncs(aWindowFuncs, ArraySize(aWindowFuncs));
+}
+
+static Window *windowFind(Parse *pParse, Window *pList, const char *zName){
+ Window *p;
+ for(p=pList; p; p=p->pNextWin){
+ if( sqlite3StrICmp(p->zName, zName)==0 ) break;
+ }
+ if( p==0 ){
+ sqlite3ErrorMsg(pParse, "no such window: %s", zName);
+ }
+ return p;
+}
+
+/*
+** This function is called immediately after resolving the function name
+** for a window function within a SELECT statement. Argument pList is a
+** linked list of WINDOW definitions for the current SELECT statement.
+** Argument pFunc is the function definition just resolved and pWin
+** is the Window object representing the associated OVER clause. This
+** function updates the contents of pWin as follows:
+**
+** * If the OVER clause refered to a named window (as in "max(x) OVER win"),
+** search list pList for a matching WINDOW definition, and update pWin
+** accordingly. If no such WINDOW clause can be found, leave an error
+** in pParse.
+**
+** * If the function is a built-in window function that requires the
+** window to be coerced (see "BUILT-IN WINDOW FUNCTIONS" at the top
+** of this file), pWin is updated here.
+*/
+SQLITE_PRIVATE void sqlite3WindowUpdate(
+ Parse *pParse,
+ Window *pList, /* List of named windows for this SELECT */
+ Window *pWin, /* Window frame to update */
+ FuncDef *pFunc /* Window function definition */
+){
+ if( pWin->zName && pWin->eFrmType==0 ){
+ Window *p = windowFind(pParse, pList, pWin->zName);
+ if( p==0 ) return;
+ pWin->pPartition = sqlite3ExprListDup(pParse->db, p->pPartition, 0);
+ pWin->pOrderBy = sqlite3ExprListDup(pParse->db, p->pOrderBy, 0);
+ pWin->pStart = sqlite3ExprDup(pParse->db, p->pStart, 0);
+ pWin->pEnd = sqlite3ExprDup(pParse->db, p->pEnd, 0);
+ pWin->eStart = p->eStart;
+ pWin->eEnd = p->eEnd;
+ pWin->eFrmType = p->eFrmType;
+ pWin->eExclude = p->eExclude;
+ }else{
+ sqlite3WindowChain(pParse, pWin, pList);
+ }
+ if( (pWin->eFrmType==TK_RANGE)
+ && (pWin->pStart || pWin->pEnd)
+ && (pWin->pOrderBy==0 || pWin->pOrderBy->nExpr!=1)
+ ){
+ sqlite3ErrorMsg(pParse,
+ "RANGE with offset PRECEDING/FOLLOWING requires one ORDER BY expression"
+ );
+ }else
+ if( pFunc->funcFlags & SQLITE_FUNC_WINDOW ){
+ sqlite3 *db = pParse->db;
+ if( pWin->pFilter ){
+ sqlite3ErrorMsg(pParse,
+ "FILTER clause may only be used with aggregate window functions"
+ );
+ }else{
+ struct WindowUpdate {
+ const char *zFunc;
+ int eFrmType;
+ int eStart;
+ int eEnd;
+ } aUp[] = {
+ { row_numberName, TK_ROWS, TK_UNBOUNDED, TK_CURRENT },
+ { dense_rankName, TK_RANGE, TK_UNBOUNDED, TK_CURRENT },
+ { rankName, TK_RANGE, TK_UNBOUNDED, TK_CURRENT },
+ { percent_rankName, TK_GROUPS, TK_CURRENT, TK_UNBOUNDED },
+ { cume_distName, TK_GROUPS, TK_FOLLOWING, TK_UNBOUNDED },
+ { ntileName, TK_ROWS, TK_CURRENT, TK_UNBOUNDED },
+ { leadName, TK_ROWS, TK_UNBOUNDED, TK_UNBOUNDED },
+ { lagName, TK_ROWS, TK_UNBOUNDED, TK_CURRENT },
+ };
+ int i;
+ for(i=0; i<ArraySize(aUp); i++){
+ if( pFunc->zName==aUp[i].zFunc ){
+ sqlite3ExprDelete(db, pWin->pStart);
+ sqlite3ExprDelete(db, pWin->pEnd);
+ pWin->pEnd = pWin->pStart = 0;
+ pWin->eFrmType = aUp[i].eFrmType;
+ pWin->eStart = aUp[i].eStart;
+ pWin->eEnd = aUp[i].eEnd;
+ pWin->eExclude = 0;
+ if( pWin->eStart==TK_FOLLOWING ){
+ pWin->pStart = sqlite3Expr(db, TK_INTEGER, "1");
+ }
+ break;
+ }
+ }
+ }
+ }
+ pWin->pFunc = pFunc;
+}
+
+/*
+** Context object passed through sqlite3WalkExprList() to
+** selectWindowRewriteExprCb() by selectWindowRewriteEList().
+*/
+typedef struct WindowRewrite WindowRewrite;
+struct WindowRewrite {
+ Window *pWin;
+ SrcList *pSrc;
+ ExprList *pSub;
+ Table *pTab;
+ Select *pSubSelect; /* Current sub-select, if any */
+};
+
+/*
+** Callback function used by selectWindowRewriteEList(). If necessary,
+** this function appends to the output expression-list and updates
+** expression (*ppExpr) in place.
+*/
+static int selectWindowRewriteExprCb(Walker *pWalker, Expr *pExpr){
+ struct WindowRewrite *p = pWalker->u.pRewrite;
+ Parse *pParse = pWalker->pParse;
+ assert( p!=0 );
+ assert( p->pWin!=0 );
+
+ /* If this function is being called from within a scalar sub-select
+ ** that used by the SELECT statement being processed, only process
+ ** TK_COLUMN expressions that refer to it (the outer SELECT). Do
+ ** not process aggregates or window functions at all, as they belong
+ ** to the scalar sub-select. */
+ if( p->pSubSelect ){
+ if( pExpr->op!=TK_COLUMN ){
+ return WRC_Continue;
+ }else{
+ int nSrc = p->pSrc->nSrc;
+ int i;
+ for(i=0; i<nSrc; i++){
+ if( pExpr->iTable==p->pSrc->a[i].iCursor ) break;
+ }
+ if( i==nSrc ) return WRC_Continue;
+ }
+ }
+
+ switch( pExpr->op ){
+
+ case TK_FUNCTION:
+ if( !ExprHasProperty(pExpr, EP_WinFunc) ){
+ break;
+ }else{
+ Window *pWin;
+ for(pWin=p->pWin; pWin; pWin=pWin->pNextWin){
+ if( pExpr->y.pWin==pWin ){
+ assert( pWin->pOwner==pExpr );
+ return WRC_Prune;
+ }
+ }
+ }
+ /* no break */ deliberate_fall_through
+
+ case TK_AGG_FUNCTION:
+ case TK_COLUMN: {
+ int iCol = -1;
+ if( p->pSub ){
+ int i;
+ for(i=0; i<p->pSub->nExpr; i++){
+ if( 0==sqlite3ExprCompare(0, p->pSub->a[i].pExpr, pExpr, -1) ){
+ iCol = i;
+ break;
+ }
+ }
+ }
+ if( iCol<0 ){
+ Expr *pDup = sqlite3ExprDup(pParse->db, pExpr, 0);
+ if( pDup && pDup->op==TK_AGG_FUNCTION ) pDup->op = TK_FUNCTION;
+ p->pSub = sqlite3ExprListAppend(pParse, p->pSub, pDup);
+ }
+ if( p->pSub ){
+ int f = pExpr->flags & EP_Collate;
+ assert( ExprHasProperty(pExpr, EP_Static)==0 );
+ ExprSetProperty(pExpr, EP_Static);
+ sqlite3ExprDelete(pParse->db, pExpr);
+ ExprClearProperty(pExpr, EP_Static);
+ memset(pExpr, 0, sizeof(Expr));
+
+ pExpr->op = TK_COLUMN;
+ pExpr->iColumn = (iCol<0 ? p->pSub->nExpr-1: iCol);
+ pExpr->iTable = p->pWin->iEphCsr;
+ pExpr->y.pTab = p->pTab;
+ pExpr->flags = f;
+ }
+ if( pParse->db->mallocFailed ) return WRC_Abort;
+ break;
+ }
+
+ default: /* no-op */
+ break;
+ }
+
+ return WRC_Continue;
+}
+static int selectWindowRewriteSelectCb(Walker *pWalker, Select *pSelect){
+ struct WindowRewrite *p = pWalker->u.pRewrite;
+ Select *pSave = p->pSubSelect;
+ if( pSave==pSelect ){
+ return WRC_Continue;
+ }else{
+ p->pSubSelect = pSelect;
+ sqlite3WalkSelect(pWalker, pSelect);
+ p->pSubSelect = pSave;
+ }
+ return WRC_Prune;
+}
+
+
+/*
+** Iterate through each expression in expression-list pEList. For each:
+**
+** * TK_COLUMN,
+** * aggregate function, or
+** * window function with a Window object that is not a member of the
+** Window list passed as the second argument (pWin).
+**
+** Append the node to output expression-list (*ppSub). And replace it
+** with a TK_COLUMN that reads the (N-1)th element of table
+** pWin->iEphCsr, where N is the number of elements in (*ppSub) after
+** appending the new one.
+*/
+static void selectWindowRewriteEList(
+ Parse *pParse,
+ Window *pWin,
+ SrcList *pSrc,
+ ExprList *pEList, /* Rewrite expressions in this list */
+ Table *pTab,
+ ExprList **ppSub /* IN/OUT: Sub-select expression-list */
+){
+ Walker sWalker;
+ WindowRewrite sRewrite;
+
+ assert( pWin!=0 );
+ memset(&sWalker, 0, sizeof(Walker));
+ memset(&sRewrite, 0, sizeof(WindowRewrite));
+
+ sRewrite.pSub = *ppSub;
+ sRewrite.pWin = pWin;
+ sRewrite.pSrc = pSrc;
+ sRewrite.pTab = pTab;
+
+ sWalker.pParse = pParse;
+ sWalker.xExprCallback = selectWindowRewriteExprCb;
+ sWalker.xSelectCallback = selectWindowRewriteSelectCb;
+ sWalker.u.pRewrite = &sRewrite;
+
+ (void)sqlite3WalkExprList(&sWalker, pEList);
+
+ *ppSub = sRewrite.pSub;
+}
+
+/*
+** Append a copy of each expression in expression-list pAppend to
+** expression list pList. Return a pointer to the result list.
+*/
+static ExprList *exprListAppendList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to append. Might be NULL */
+ ExprList *pAppend, /* List of values to append. Might be NULL */
+ int bIntToNull
+){
+ if( pAppend ){
+ int i;
+ int nInit = pList ? pList->nExpr : 0;
+ for(i=0; i<pAppend->nExpr; i++){
+ Expr *pDup = sqlite3ExprDup(pParse->db, pAppend->a[i].pExpr, 0);
+ assert( pDup==0 || !ExprHasProperty(pDup, EP_MemToken) );
+ if( bIntToNull && pDup ){
+ int iDummy;
+ Expr *pSub;
+ for(pSub=pDup; ExprHasProperty(pSub, EP_Skip); pSub=pSub->pLeft){
+ assert( pSub );
+ }
+ if( sqlite3ExprIsInteger(pSub, &iDummy) ){
+ pSub->op = TK_NULL;
+ pSub->flags &= ~(EP_IntValue|EP_IsTrue|EP_IsFalse);
+ pSub->u.zToken = 0;
+ }
+ }
+ pList = sqlite3ExprListAppend(pParse, pList, pDup);
+ if( pList ) pList->a[nInit+i].sortFlags = pAppend->a[i].sortFlags;
+ }
+ }
+ return pList;
+}
+
+/*
+** When rewriting a query, if the new subquery in the FROM clause
+** contains TK_AGG_FUNCTION nodes that refer to an outer query,
+** then we have to increase the Expr->op2 values of those nodes
+** due to the extra subquery layer that was added.
+**
+** See also the incrAggDepth() routine in resolve.c
+*/
+static int sqlite3WindowExtraAggFuncDepth(Walker *pWalker, Expr *pExpr){
+ if( pExpr->op==TK_AGG_FUNCTION
+ && pExpr->op2>=pWalker->walkerDepth
+ ){
+ pExpr->op2++;
+ }
+ return WRC_Continue;
+}
+
+/*
+** If the SELECT statement passed as the second argument does not invoke
+** any SQL window functions, this function is a no-op. Otherwise, it
+** rewrites the SELECT statement so that window function xStep functions
+** are invoked in the correct order as described under "SELECT REWRITING"
+** at the top of this file.
+*/
+SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
+ int rc = SQLITE_OK;
+ if( p->pWin && p->pPrior==0 && (p->selFlags & SF_WinRewrite)==0 ){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ sqlite3 *db = pParse->db;
+ Select *pSub = 0; /* The subquery */
+ SrcList *pSrc = p->pSrc;
+ Expr *pWhere = p->pWhere;
+ ExprList *pGroupBy = p->pGroupBy;
+ Expr *pHaving = p->pHaving;
+ ExprList *pSort = 0;
+
+ ExprList *pSublist = 0; /* Expression list for sub-query */
+ Window *pMWin = p->pWin; /* Main window object */
+ Window *pWin; /* Window object iterator */
+ Table *pTab;
+ Walker w;
+
+ u32 selFlags = p->selFlags;
+
+ pTab = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTab==0 ){
+ return sqlite3ErrorToParser(db, SQLITE_NOMEM);
+ }
+ sqlite3AggInfoPersistWalkerInit(&w, pParse);
+ sqlite3WalkSelect(&w, p);
+
+ p->pSrc = 0;
+ p->pWhere = 0;
+ p->pGroupBy = 0;
+ p->pHaving = 0;
+ p->selFlags &= ~SF_Aggregate;
+ p->selFlags |= SF_WinRewrite;
+
+ /* Create the ORDER BY clause for the sub-select. This is the concatenation
+ ** of the window PARTITION and ORDER BY clauses. Then, if this makes it
+ ** redundant, remove the ORDER BY from the parent SELECT. */
+ pSort = exprListAppendList(pParse, 0, pMWin->pPartition, 1);
+ pSort = exprListAppendList(pParse, pSort, pMWin->pOrderBy, 1);
+ if( pSort && p->pOrderBy && p->pOrderBy->nExpr<=pSort->nExpr ){
+ int nSave = pSort->nExpr;
+ pSort->nExpr = p->pOrderBy->nExpr;
+ if( sqlite3ExprListCompare(pSort, p->pOrderBy, -1)==0 ){
+ sqlite3ExprListDelete(db, p->pOrderBy);
+ p->pOrderBy = 0;
+ }
+ pSort->nExpr = nSave;
+ }
+
+ /* Assign a cursor number for the ephemeral table used to buffer rows.
+ ** The OpenEphemeral instruction is coded later, after it is known how
+ ** many columns the table will have. */
+ pMWin->iEphCsr = pParse->nTab++;
+ pParse->nTab += 3;
+
+ selectWindowRewriteEList(pParse, pMWin, pSrc, p->pEList, pTab, &pSublist);
+ selectWindowRewriteEList(pParse, pMWin, pSrc, p->pOrderBy, pTab, &pSublist);
+ pMWin->nBufferCol = (pSublist ? pSublist->nExpr : 0);
+
+ /* Append the PARTITION BY and ORDER BY expressions to the to the
+ ** sub-select expression list. They are required to figure out where
+ ** boundaries for partitions and sets of peer rows lie. */
+ pSublist = exprListAppendList(pParse, pSublist, pMWin->pPartition, 0);
+ pSublist = exprListAppendList(pParse, pSublist, pMWin->pOrderBy, 0);
+
+ /* Append the arguments passed to each window function to the
+ ** sub-select expression list. Also allocate two registers for each
+ ** window function - one for the accumulator, another for interim
+ ** results. */
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ ExprList *pArgs = pWin->pOwner->x.pList;
+ if( pWin->pFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){
+ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist);
+ pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
+ pWin->bExprArgs = 1;
+ }else{
+ pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
+ pSublist = exprListAppendList(pParse, pSublist, pArgs, 0);
+ }
+ if( pWin->pFilter ){
+ Expr *pFilter = sqlite3ExprDup(db, pWin->pFilter, 0);
+ pSublist = sqlite3ExprListAppend(pParse, pSublist, pFilter);
+ }
+ pWin->regAccum = ++pParse->nMem;
+ pWin->regResult = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
+ }
+
+ /* If there is no ORDER BY or PARTITION BY clause, and the window
+ ** function accepts zero arguments, and there are no other columns
+ ** selected (e.g. "SELECT row_number() OVER () FROM t1"), it is possible
+ ** that pSublist is still NULL here. Add a constant expression here to
+ ** keep everything legal in this case.
+ */
+ if( pSublist==0 ){
+ pSublist = sqlite3ExprListAppend(pParse, 0,
+ sqlite3Expr(db, TK_INTEGER, "0")
+ );
+ }
+
+ pSub = sqlite3SelectNew(
+ pParse, pSublist, pSrc, pWhere, pGroupBy, pHaving, pSort, 0, 0
+ );
+ SELECTTRACE(1,pParse,pSub,
+ ("New window-function subquery in FROM clause of (%u/%p)\n",
+ p->selId, p));
+ p->pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
+ if( p->pSrc ){
+ Table *pTab2;
+ p->pSrc->a[0].pSelect = pSub;
+ sqlite3SrcListAssignCursors(pParse, p->pSrc);
+ pSub->selFlags |= SF_Expanded;
+ pTab2 = sqlite3ResultSetOfSelect(pParse, pSub, SQLITE_AFF_NONE);
+ pSub->selFlags |= (selFlags & SF_Aggregate);
+ if( pTab2==0 ){
+ /* Might actually be some other kind of error, but in that case
+ ** pParse->nErr will be set, so if SQLITE_NOMEM is set, we will get
+ ** the correct error message regardless. */
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pTab, pTab2, sizeof(Table));
+ pTab->tabFlags |= TF_Ephemeral;
+ p->pSrc->a[0].pTab = pTab;
+ pTab = pTab2;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = sqlite3WindowExtraAggFuncDepth;
+ w.xSelectCallback = sqlite3WalkerDepthIncrease;
+ w.xSelectCallback2 = sqlite3WalkerDepthDecrease;
+ sqlite3WalkSelect(&w, pSub);
+ }
+ }else{
+ sqlite3SelectDelete(db, pSub);
+ }
+ if( db->mallocFailed ) rc = SQLITE_NOMEM;
+ sqlite3DbFree(db, pTab);
+ }
+
+ if( rc ){
+ if( pParse->nErr==0 ){
+ assert( pParse->db->mallocFailed );
+ sqlite3ErrorToParser(pParse->db, SQLITE_NOMEM);
+ }
+ }
+ return rc;
+}
+
+/*
+** Unlink the Window object from the Select to which it is attached,
+** if it is attached.
+*/
+SQLITE_PRIVATE void sqlite3WindowUnlinkFromSelect(Window *p){
+ if( p->ppThis ){
+ *p->ppThis = p->pNextWin;
+ if( p->pNextWin ) p->pNextWin->ppThis = p->ppThis;
+ p->ppThis = 0;
+ }
+}
+
+/*
+** Free the Window object passed as the second argument.
+*/
+SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3 *db, Window *p){
+ if( p ){
+ sqlite3WindowUnlinkFromSelect(p);
+ sqlite3ExprDelete(db, p->pFilter);
+ sqlite3ExprListDelete(db, p->pPartition);
+ sqlite3ExprListDelete(db, p->pOrderBy);
+ sqlite3ExprDelete(db, p->pEnd);
+ sqlite3ExprDelete(db, p->pStart);
+ sqlite3DbFree(db, p->zName);
+ sqlite3DbFree(db, p->zBase);
+ sqlite3DbFree(db, p);
+ }
+}
+
+/*
+** Free the linked list of Window objects starting at the second argument.
+*/
+SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p){
+ while( p ){
+ Window *pNext = p->pNextWin;
+ sqlite3WindowDelete(db, p);
+ p = pNext;
+ }
+}
+
+/*
+** The argument expression is an PRECEDING or FOLLOWING offset. The
+** value should be a non-negative integer. If the value is not a
+** constant, change it to NULL. The fact that it is then a non-negative
+** integer will be caught later. But it is important not to leave
+** variable values in the expression tree.
+*/
+static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){
+ if( 0==sqlite3ExprIsConstant(pExpr) ){
+ if( IN_RENAME_OBJECT ) sqlite3RenameExprUnmap(pParse, pExpr);
+ sqlite3ExprDelete(pParse->db, pExpr);
+ pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0);
+ }
+ return pExpr;
+}
+
+/*
+** Allocate and return a new Window object describing a Window Definition.
+*/
+SQLITE_PRIVATE Window *sqlite3WindowAlloc(
+ Parse *pParse, /* Parsing context */
+ int eType, /* Frame type. TK_RANGE, TK_ROWS, TK_GROUPS, or 0 */
+ int eStart, /* Start type: CURRENT, PRECEDING, FOLLOWING, UNBOUNDED */
+ Expr *pStart, /* Start window size if TK_PRECEDING or FOLLOWING */
+ int eEnd, /* End type: CURRENT, FOLLOWING, TK_UNBOUNDED, PRECEDING */
+ Expr *pEnd, /* End window size if TK_FOLLOWING or PRECEDING */
+ u8 eExclude /* EXCLUDE clause */
+){
+ Window *pWin = 0;
+ int bImplicitFrame = 0;
+
+ /* Parser assures the following: */
+ assert( eType==0 || eType==TK_RANGE || eType==TK_ROWS || eType==TK_GROUPS );
+ assert( eStart==TK_CURRENT || eStart==TK_PRECEDING
+ || eStart==TK_UNBOUNDED || eStart==TK_FOLLOWING );
+ assert( eEnd==TK_CURRENT || eEnd==TK_FOLLOWING
+ || eEnd==TK_UNBOUNDED || eEnd==TK_PRECEDING );
+ assert( (eStart==TK_PRECEDING || eStart==TK_FOLLOWING)==(pStart!=0) );
+ assert( (eEnd==TK_FOLLOWING || eEnd==TK_PRECEDING)==(pEnd!=0) );
+
+ if( eType==0 ){
+ bImplicitFrame = 1;
+ eType = TK_RANGE;
+ }
+
+ /* Additionally, the
+ ** starting boundary type may not occur earlier in the following list than
+ ** the ending boundary type:
+ **
+ ** UNBOUNDED PRECEDING
+ ** <expr> PRECEDING
+ ** CURRENT ROW
+ ** <expr> FOLLOWING
+ ** UNBOUNDED FOLLOWING
+ **
+ ** The parser ensures that "UNBOUNDED PRECEDING" cannot be used as an ending
+ ** boundary, and than "UNBOUNDED FOLLOWING" cannot be used as a starting
+ ** frame boundary.
+ */
+ if( (eStart==TK_CURRENT && eEnd==TK_PRECEDING)
+ || (eStart==TK_FOLLOWING && (eEnd==TK_PRECEDING || eEnd==TK_CURRENT))
+ ){
+ sqlite3ErrorMsg(pParse, "unsupported frame specification");
+ goto windowAllocErr;
+ }
+
+ pWin = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
+ if( pWin==0 ) goto windowAllocErr;
+ pWin->eFrmType = eType;
+ pWin->eStart = eStart;
+ pWin->eEnd = eEnd;
+ if( eExclude==0 && OptimizationDisabled(pParse->db, SQLITE_WindowFunc) ){
+ eExclude = TK_NO;
+ }
+ pWin->eExclude = eExclude;
+ pWin->bImplicitFrame = bImplicitFrame;
+ pWin->pEnd = sqlite3WindowOffsetExpr(pParse, pEnd);
+ pWin->pStart = sqlite3WindowOffsetExpr(pParse, pStart);
+ return pWin;
+
+windowAllocErr:
+ sqlite3ExprDelete(pParse->db, pEnd);
+ sqlite3ExprDelete(pParse->db, pStart);
+ return 0;
+}
+
+/*
+** Attach PARTITION and ORDER BY clauses pPartition and pOrderBy to window
+** pWin. Also, if parameter pBase is not NULL, set pWin->zBase to the
+** equivalent nul-terminated string.
+*/
+SQLITE_PRIVATE Window *sqlite3WindowAssemble(
+ Parse *pParse,
+ Window *pWin,
+ ExprList *pPartition,
+ ExprList *pOrderBy,
+ Token *pBase
+){
+ if( pWin ){
+ pWin->pPartition = pPartition;
+ pWin->pOrderBy = pOrderBy;
+ if( pBase ){
+ pWin->zBase = sqlite3DbStrNDup(pParse->db, pBase->z, pBase->n);
+ }
+ }else{
+ sqlite3ExprListDelete(pParse->db, pPartition);
+ sqlite3ExprListDelete(pParse->db, pOrderBy);
+ }
+ return pWin;
+}
+
+/*
+** Window *pWin has just been created from a WINDOW clause. Tokne pBase
+** is the base window. Earlier windows from the same WINDOW clause are
+** stored in the linked list starting at pWin->pNextWin. This function
+** either updates *pWin according to the base specification, or else
+** leaves an error in pParse.
+*/
+SQLITE_PRIVATE void sqlite3WindowChain(Parse *pParse, Window *pWin, Window *pList){
+ if( pWin->zBase ){
+ sqlite3 *db = pParse->db;
+ Window *pExist = windowFind(pParse, pList, pWin->zBase);
+ if( pExist ){
+ const char *zErr = 0;
+ /* Check for errors */
+ if( pWin->pPartition ){
+ zErr = "PARTITION clause";
+ }else if( pExist->pOrderBy && pWin->pOrderBy ){
+ zErr = "ORDER BY clause";
+ }else if( pExist->bImplicitFrame==0 ){
+ zErr = "frame specification";
+ }
+ if( zErr ){
+ sqlite3ErrorMsg(pParse,
+ "cannot override %s of window: %s", zErr, pWin->zBase
+ );
+ }else{
+ pWin->pPartition = sqlite3ExprListDup(db, pExist->pPartition, 0);
+ if( pExist->pOrderBy ){
+ assert( pWin->pOrderBy==0 );
+ pWin->pOrderBy = sqlite3ExprListDup(db, pExist->pOrderBy, 0);
+ }
+ sqlite3DbFree(db, pWin->zBase);
+ pWin->zBase = 0;
+ }
+ }
+ }
+}
+
+/*
+** Attach window object pWin to expression p.
+*/
+SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){
+ if( p ){
+ assert( p->op==TK_FUNCTION );
+ assert( pWin );
+ p->y.pWin = pWin;
+ ExprSetProperty(p, EP_WinFunc);
+ pWin->pOwner = p;
+ if( (p->flags & EP_Distinct) && pWin->eFrmType!=TK_FILTER ){
+ sqlite3ErrorMsg(pParse,
+ "DISTINCT is not supported for window functions"
+ );
+ }
+ }else{
+ sqlite3WindowDelete(pParse->db, pWin);
+ }
+}
+
+/*
+** Possibly link window pWin into the list at pSel->pWin (window functions
+** to be processed as part of SELECT statement pSel). The window is linked
+** in if either (a) there are no other windows already linked to this
+** SELECT, or (b) the windows already linked use a compatible window frame.
+*/
+SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin){
+ if( pSel!=0
+ && (0==pSel->pWin || 0==sqlite3WindowCompare(0, pSel->pWin, pWin, 0))
+ ){
+ pWin->pNextWin = pSel->pWin;
+ if( pSel->pWin ){
+ pSel->pWin->ppThis = &pWin->pNextWin;
+ }
+ pSel->pWin = pWin;
+ pWin->ppThis = &pSel->pWin;
+ }
+}
+
+/*
+** Return 0 if the two window objects are identical, 1 if they are
+** different, or 2 if it cannot be determined if the objects are identical
+** or not. Identical window objects can be processed in a single scan.
+*/
+SQLITE_PRIVATE int sqlite3WindowCompare(Parse *pParse, Window *p1, Window *p2, int bFilter){
+ int res;
+ if( NEVER(p1==0) || NEVER(p2==0) ) return 1;
+ if( p1->eFrmType!=p2->eFrmType ) return 1;
+ if( p1->eStart!=p2->eStart ) return 1;
+ if( p1->eEnd!=p2->eEnd ) return 1;
+ if( p1->eExclude!=p2->eExclude ) return 1;
+ if( sqlite3ExprCompare(pParse, p1->pStart, p2->pStart, -1) ) return 1;
+ if( sqlite3ExprCompare(pParse, p1->pEnd, p2->pEnd, -1) ) return 1;
+ if( (res = sqlite3ExprListCompare(p1->pPartition, p2->pPartition, -1)) ){
+ return res;
+ }
+ if( (res = sqlite3ExprListCompare(p1->pOrderBy, p2->pOrderBy, -1)) ){
+ return res;
+ }
+ if( bFilter ){
+ if( (res = sqlite3ExprCompare(pParse, p1->pFilter, p2->pFilter, -1)) ){
+ return res;
+ }
+ }
+ return 0;
+}
+
+
+/*
+** This is called by code in select.c before it calls sqlite3WhereBegin()
+** to begin iterating through the sub-query results. It is used to allocate
+** and initialize registers and cursors used by sqlite3WindowCodeStep().
+*/
+SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){
+ int nEphExpr = pSelect->pSrc->a[0].pSelect->pEList->nExpr;
+ Window *pMWin = pSelect->pWin;
+ Window *pWin;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pMWin->iEphCsr, nEphExpr);
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+1, pMWin->iEphCsr);
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+2, pMWin->iEphCsr);
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+3, pMWin->iEphCsr);
+
+ /* Allocate registers to use for PARTITION BY values, if any. Initialize
+ ** said registers to NULL. */
+ if( pMWin->pPartition ){
+ int nExpr = pMWin->pPartition->nExpr;
+ pMWin->regPart = pParse->nMem+1;
+ pParse->nMem += nExpr;
+ sqlite3VdbeAddOp3(v, OP_Null, 0, pMWin->regPart, pMWin->regPart+nExpr-1);
+ }
+
+ pMWin->regOne = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pMWin->regOne);
+
+ if( pMWin->eExclude ){
+ pMWin->regStartRowid = ++pParse->nMem;
+ pMWin->regEndRowid = ++pParse->nMem;
+ pMWin->csrApp = pParse->nTab++;
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pMWin->regStartRowid);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pMWin->regEndRowid);
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->csrApp, pMWin->iEphCsr);
+ return;
+ }
+
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ FuncDef *p = pWin->pFunc;
+ if( (p->funcFlags & SQLITE_FUNC_MINMAX) && pWin->eStart!=TK_UNBOUNDED ){
+ /* The inline versions of min() and max() require a single ephemeral
+ ** table and 3 registers. The registers are used as follows:
+ **
+ ** regApp+0: slot to copy min()/max() argument to for MakeRecord
+ ** regApp+1: integer value used to ensure keys are unique
+ ** regApp+2: output of MakeRecord
+ */
+ ExprList *pList = pWin->pOwner->x.pList;
+ KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pList, 0, 0);
+ pWin->csrApp = pParse->nTab++;
+ pWin->regApp = pParse->nMem+1;
+ pParse->nMem += 3;
+ if( pKeyInfo && pWin->pFunc->zName[1]=='i' ){
+ assert( pKeyInfo->aSortFlags[0]==0 );
+ pKeyInfo->aSortFlags[0] = KEYINFO_ORDER_DESC;
+ }
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pWin->csrApp, 2);
+ sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
+ }
+ else if( p->zName==nth_valueName || p->zName==first_valueName ){
+ /* Allocate two registers at pWin->regApp. These will be used to
+ ** store the start and end index of the current frame. */
+ pWin->regApp = pParse->nMem+1;
+ pWin->csrApp = pParse->nTab++;
+ pParse->nMem += 2;
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pWin->csrApp, pMWin->iEphCsr);
+ }
+ else if( p->zName==leadName || p->zName==lagName ){
+ pWin->csrApp = pParse->nTab++;
+ sqlite3VdbeAddOp2(v, OP_OpenDup, pWin->csrApp, pMWin->iEphCsr);
+ }
+ }
+}
+
+#define WINDOW_STARTING_INT 0
+#define WINDOW_ENDING_INT 1
+#define WINDOW_NTH_VALUE_INT 2
+#define WINDOW_STARTING_NUM 3
+#define WINDOW_ENDING_NUM 4
+
+/*
+** A "PRECEDING <expr>" (eCond==0) or "FOLLOWING <expr>" (eCond==1) or the
+** value of the second argument to nth_value() (eCond==2) has just been
+** evaluated and the result left in register reg. This function generates VM
+** code to check that the value is a non-negative integer and throws an
+** exception if it is not.
+*/
+static void windowCheckValue(Parse *pParse, int reg, int eCond){
+ static const char *azErr[] = {
+ "frame starting offset must be a non-negative integer",
+ "frame ending offset must be a non-negative integer",
+ "second argument to nth_value must be a positive integer",
+ "frame starting offset must be a non-negative number",
+ "frame ending offset must be a non-negative number",
+ };
+ static int aOp[] = { OP_Ge, OP_Ge, OP_Gt, OP_Ge, OP_Ge };
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int regZero = sqlite3GetTempReg(pParse);
+ assert( eCond>=0 && eCond<ArraySize(azErr) );
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regZero);
+ if( eCond>=WINDOW_STARTING_NUM ){
+ int regString = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regString, 0, "", P4_STATIC);
+ sqlite3VdbeAddOp3(v, OP_Ge, regString, sqlite3VdbeCurrentAddr(v)+2, reg);
+ sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC|SQLITE_JUMPIFNULL);
+ VdbeCoverage(v);
+ assert( eCond==3 || eCond==4 );
+ VdbeCoverageIf(v, eCond==3);
+ VdbeCoverageIf(v, eCond==4);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_MustBeInt, reg, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ assert( eCond==0 || eCond==1 || eCond==2 );
+ VdbeCoverageIf(v, eCond==0);
+ VdbeCoverageIf(v, eCond==1);
+ VdbeCoverageIf(v, eCond==2);
+ }
+ sqlite3VdbeAddOp3(v, aOp[eCond], regZero, sqlite3VdbeCurrentAddr(v)+2, reg);
+ VdbeCoverageNeverNullIf(v, eCond==0); /* NULL case captured by */
+ VdbeCoverageNeverNullIf(v, eCond==1); /* the OP_MustBeInt */
+ VdbeCoverageNeverNullIf(v, eCond==2);
+ VdbeCoverageNeverNullIf(v, eCond==3); /* NULL case caught by */
+ VdbeCoverageNeverNullIf(v, eCond==4); /* the OP_Ge */
+ sqlite3MayAbort(pParse);
+ sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_ERROR, OE_Abort);
+ sqlite3VdbeAppendP4(v, (void*)azErr[eCond], P4_STATIC);
+ sqlite3ReleaseTempReg(pParse, regZero);
+}
+
+/*
+** Return the number of arguments passed to the window-function associated
+** with the object passed as the only argument to this function.
+*/
+static int windowArgCount(Window *pWin){
+ ExprList *pList = pWin->pOwner->x.pList;
+ return (pList ? pList->nExpr : 0);
+}
+
+typedef struct WindowCodeArg WindowCodeArg;
+typedef struct WindowCsrAndReg WindowCsrAndReg;
+
+/*
+** See comments above struct WindowCodeArg.
+*/
+struct WindowCsrAndReg {
+ int csr; /* Cursor number */
+ int reg; /* First in array of peer values */
+};
+
+/*
+** A single instance of this structure is allocated on the stack by
+** sqlite3WindowCodeStep() and a pointer to it passed to the various helper
+** routines. This is to reduce the number of arguments required by each
+** helper function.
+**
+** regArg:
+** Each window function requires an accumulator register (just as an
+** ordinary aggregate function does). This variable is set to the first
+** in an array of accumulator registers - one for each window function
+** in the WindowCodeArg.pMWin list.
+**
+** eDelete:
+** The window functions implementation sometimes caches the input rows
+** that it processes in a temporary table. If it is not zero, this
+** variable indicates when rows may be removed from the temp table (in
+** order to reduce memory requirements - it would always be safe just
+** to leave them there). Possible values for eDelete are:
+**
+** WINDOW_RETURN_ROW:
+** An input row can be discarded after it is returned to the caller.
+**
+** WINDOW_AGGINVERSE:
+** An input row can be discarded after the window functions xInverse()
+** callbacks have been invoked in it.
+**
+** WINDOW_AGGSTEP:
+** An input row can be discarded after the window functions xStep()
+** callbacks have been invoked in it.
+**
+** start,current,end
+** Consider a window-frame similar to the following:
+**
+** (ORDER BY a, b GROUPS BETWEEN 2 PRECEDING AND 2 FOLLOWING)
+**
+** The windows functions implmentation caches the input rows in a temp
+** table, sorted by "a, b" (it actually populates the cache lazily, and
+** aggressively removes rows once they are no longer required, but that's
+** a mere detail). It keeps three cursors open on the temp table. One
+** (current) that points to the next row to return to the query engine
+** once its window function values have been calculated. Another (end)
+** points to the next row to call the xStep() method of each window function
+** on (so that it is 2 groups ahead of current). And a third (start) that
+** points to the next row to call the xInverse() method of each window
+** function on.
+**
+** Each cursor (start, current and end) consists of a VDBE cursor
+** (WindowCsrAndReg.csr) and an array of registers (starting at
+** WindowCodeArg.reg) that always contains a copy of the peer values
+** read from the corresponding cursor.
+**
+** Depending on the window-frame in question, all three cursors may not
+** be required. In this case both WindowCodeArg.csr and reg are set to
+** 0.
+*/
+struct WindowCodeArg {
+ Parse *pParse; /* Parse context */
+ Window *pMWin; /* First in list of functions being processed */
+ Vdbe *pVdbe; /* VDBE object */
+ int addrGosub; /* OP_Gosub to this address to return one row */
+ int regGosub; /* Register used with OP_Gosub(addrGosub) */
+ int regArg; /* First in array of accumulator registers */
+ int eDelete; /* See above */
+
+ WindowCsrAndReg start;
+ WindowCsrAndReg current;
+ WindowCsrAndReg end;
+};
+
+/*
+** Generate VM code to read the window frames peer values from cursor csr into
+** an array of registers starting at reg.
+*/
+static void windowReadPeerValues(
+ WindowCodeArg *p,
+ int csr,
+ int reg
+){
+ Window *pMWin = p->pMWin;
+ ExprList *pOrderBy = pMWin->pOrderBy;
+ if( pOrderBy ){
+ Vdbe *v = sqlite3GetVdbe(p->pParse);
+ ExprList *pPart = pMWin->pPartition;
+ int iColOff = pMWin->nBufferCol + (pPart ? pPart->nExpr : 0);
+ int i;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ sqlite3VdbeAddOp3(v, OP_Column, csr, iColOff+i, reg+i);
+ }
+ }
+}
+
+/*
+** Generate VM code to invoke either xStep() (if bInverse is 0) or
+** xInverse (if bInverse is non-zero) for each window function in the
+** linked list starting at pMWin. Or, for built-in window functions
+** that do not use the standard function API, generate the required
+** inline VM code.
+**
+** If argument csr is greater than or equal to 0, then argument reg is
+** the first register in an array of registers guaranteed to be large
+** enough to hold the array of arguments for each function. In this case
+** the arguments are extracted from the current row of csr into the
+** array of registers before invoking OP_AggStep or OP_AggInverse
+**
+** Or, if csr is less than zero, then the array of registers at reg is
+** already populated with all columns from the current row of the sub-query.
+**
+** If argument regPartSize is non-zero, then it is a register containing the
+** number of rows in the current partition.
+*/
+static void windowAggStep(
+ WindowCodeArg *p,
+ Window *pMWin, /* Linked list of window functions */
+ int csr, /* Read arguments from this cursor */
+ int bInverse, /* True to invoke xInverse instead of xStep */
+ int reg /* Array of registers */
+){
+ Parse *pParse = p->pParse;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ Window *pWin;
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ FuncDef *pFunc = pWin->pFunc;
+ int regArg;
+ int nArg = pWin->bExprArgs ? 0 : windowArgCount(pWin);
+ int i;
+
+ assert( bInverse==0 || pWin->eStart!=TK_UNBOUNDED );
+
+ /* All OVER clauses in the same window function aggregate step must
+ ** be the same. */
+ assert( pWin==pMWin || sqlite3WindowCompare(pParse,pWin,pMWin,0)!=1 );
+
+ for(i=0; i<nArg; i++){
+ if( i!=1 || pFunc->zName!=nth_valueName ){
+ sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+i, reg+i);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Column, pMWin->iEphCsr, pWin->iArgCol+i, reg+i);
+ }
+ }
+ regArg = reg;
+
+ if( pMWin->regStartRowid==0
+ && (pFunc->funcFlags & SQLITE_FUNC_MINMAX)
+ && (pWin->eStart!=TK_UNBOUNDED)
+ ){
+ int addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regArg);
+ VdbeCoverage(v);
+ if( bInverse==0 ){
+ sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1, 1);
+ sqlite3VdbeAddOp2(v, OP_SCopy, regArg, pWin->regApp);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, pWin->regApp, 2, pWin->regApp+2);
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, pWin->csrApp, pWin->regApp+2);
+ }else{
+ sqlite3VdbeAddOp4Int(v, OP_SeekGE, pWin->csrApp, 0, regArg, 1);
+ VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp1(v, OP_Delete, pWin->csrApp);
+ sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
+ }
+ sqlite3VdbeJumpHere(v, addrIsNull);
+ }else if( pWin->regApp ){
+ assert( pFunc->zName==nth_valueName
+ || pFunc->zName==first_valueName
+ );
+ assert( bInverse==0 || bInverse==1 );
+ sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1-bInverse, 1);
+ }else if( pFunc->xSFunc!=noopStepFunc ){
+ int addrIf = 0;
+ if( pWin->pFilter ){
+ int regTmp;
+ assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr );
+ assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 );
+ regTmp = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp);
+ addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, regTmp);
+ }
+
+ if( pWin->bExprArgs ){
+ int iStart = sqlite3VdbeCurrentAddr(v);
+ VdbeOp *pOp, *pEnd;
+
+ nArg = pWin->pOwner->x.pList->nExpr;
+ regArg = sqlite3GetTempRange(pParse, nArg);
+ sqlite3ExprCodeExprList(pParse, pWin->pOwner->x.pList, regArg, 0, 0);
+
+ pEnd = sqlite3VdbeGetOp(v, -1);
+ for(pOp=sqlite3VdbeGetOp(v, iStart); pOp<=pEnd; pOp++){
+ if( pOp->opcode==OP_Column && pOp->p1==pWin->iEphCsr ){
+ pOp->p1 = csr;
+ }
+ }
+ }
+ if( pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ CollSeq *pColl;
+ assert( nArg>0 );
+ pColl = sqlite3ExprNNCollSeq(pParse, pWin->pOwner->x.pList->a[0].pExpr);
+ sqlite3VdbeAddOp4(v, OP_CollSeq, 0,0,0, (const char*)pColl, P4_COLLSEQ);
+ }
+ sqlite3VdbeAddOp3(v, bInverse? OP_AggInverse : OP_AggStep,
+ bInverse, regArg, pWin->regAccum);
+ sqlite3VdbeAppendP4(v, pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ if( pWin->bExprArgs ){
+ sqlite3ReleaseTempRange(pParse, regArg, nArg);
+ }
+ if( addrIf ) sqlite3VdbeJumpHere(v, addrIf);
+ }
+ }
+}
+
+/*
+** Values that may be passed as the second argument to windowCodeOp().
+*/
+#define WINDOW_RETURN_ROW 1
+#define WINDOW_AGGINVERSE 2
+#define WINDOW_AGGSTEP 3
+
+/*
+** Generate VM code to invoke either xValue() (bFin==0) or xFinalize()
+** (bFin==1) for each window function in the linked list starting at
+** pMWin. Or, for built-in window-functions that do not use the standard
+** API, generate the equivalent VM code.
+*/
+static void windowAggFinal(WindowCodeArg *p, int bFin){
+ Parse *pParse = p->pParse;
+ Window *pMWin = p->pMWin;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ Window *pWin;
+
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ if( pMWin->regStartRowid==0
+ && (pWin->pFunc->funcFlags & SQLITE_FUNC_MINMAX)
+ && (pWin->eStart!=TK_UNBOUNDED)
+ ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
+ sqlite3VdbeAddOp1(v, OP_Last, pWin->csrApp);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Column, pWin->csrApp, 0, pWin->regResult);
+ sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
+ }else if( pWin->regApp ){
+ assert( pMWin->regStartRowid==0 );
+ }else{
+ int nArg = windowArgCount(pWin);
+ if( bFin ){
+ sqlite3VdbeAddOp2(v, OP_AggFinal, pWin->regAccum, nArg);
+ sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
+ sqlite3VdbeAddOp2(v, OP_Copy, pWin->regAccum, pWin->regResult);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_AggValue,pWin->regAccum,nArg,pWin->regResult);
+ sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
+ }
+ }
+ }
+}
+
+/*
+** Generate code to calculate the current values of all window functions in the
+** p->pMWin list by doing a full scan of the current window frame. Store the
+** results in the Window.regResult registers, ready to return the upper
+** layer.
+*/
+static void windowFullScan(WindowCodeArg *p){
+ Window *pWin;
+ Parse *pParse = p->pParse;
+ Window *pMWin = p->pMWin;
+ Vdbe *v = p->pVdbe;
+
+ int regCRowid = 0; /* Current rowid value */
+ int regCPeer = 0; /* Current peer values */
+ int regRowid = 0; /* AggStep rowid value */
+ int regPeer = 0; /* AggStep peer values */
+
+ int nPeer;
+ int lblNext;
+ int lblBrk;
+ int addrNext;
+ int csr;
+
+ VdbeModuleComment((v, "windowFullScan begin"));
+
+ assert( pMWin!=0 );
+ csr = pMWin->csrApp;
+ nPeer = (pMWin->pOrderBy ? pMWin->pOrderBy->nExpr : 0);
+
+ lblNext = sqlite3VdbeMakeLabel(pParse);
+ lblBrk = sqlite3VdbeMakeLabel(pParse);
+
+ regCRowid = sqlite3GetTempReg(pParse);
+ regRowid = sqlite3GetTempReg(pParse);
+ if( nPeer ){
+ regCPeer = sqlite3GetTempRange(pParse, nPeer);
+ regPeer = sqlite3GetTempRange(pParse, nPeer);
+ }
+
+ sqlite3VdbeAddOp2(v, OP_Rowid, pMWin->iEphCsr, regCRowid);
+ windowReadPeerValues(p, pMWin->iEphCsr, regCPeer);
+
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
+ }
+
+ sqlite3VdbeAddOp3(v, OP_SeekGE, csr, lblBrk, pMWin->regStartRowid);
+ VdbeCoverage(v);
+ addrNext = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_Rowid, csr, regRowid);
+ sqlite3VdbeAddOp3(v, OP_Gt, pMWin->regEndRowid, lblBrk, regRowid);
+ VdbeCoverageNeverNull(v);
+
+ if( pMWin->eExclude==TK_CURRENT ){
+ sqlite3VdbeAddOp3(v, OP_Eq, regCRowid, lblNext, regRowid);
+ VdbeCoverageNeverNull(v);
+ }else if( pMWin->eExclude!=TK_NO ){
+ int addr;
+ int addrEq = 0;
+ KeyInfo *pKeyInfo = 0;
+
+ if( pMWin->pOrderBy ){
+ pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pMWin->pOrderBy, 0, 0);
+ }
+ if( pMWin->eExclude==TK_TIES ){
+ addrEq = sqlite3VdbeAddOp3(v, OP_Eq, regCRowid, 0, regRowid);
+ VdbeCoverageNeverNull(v);
+ }
+ if( pKeyInfo ){
+ windowReadPeerValues(p, csr, regPeer);
+ sqlite3VdbeAddOp3(v, OP_Compare, regPeer, regCPeer, nPeer);
+ sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
+ addr = sqlite3VdbeCurrentAddr(v)+1;
+ sqlite3VdbeAddOp3(v, OP_Jump, addr, lblNext, addr);
+ VdbeCoverageEqNe(v);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, lblNext);
+ }
+ if( addrEq ) sqlite3VdbeJumpHere(v, addrEq);
+ }
+
+ windowAggStep(p, pMWin, csr, 0, p->regArg);
+
+ sqlite3VdbeResolveLabel(v, lblNext);
+ sqlite3VdbeAddOp2(v, OP_Next, csr, addrNext);
+ VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, addrNext-1);
+ sqlite3VdbeJumpHere(v, addrNext+1);
+ sqlite3ReleaseTempReg(pParse, regRowid);
+ sqlite3ReleaseTempReg(pParse, regCRowid);
+ if( nPeer ){
+ sqlite3ReleaseTempRange(pParse, regPeer, nPeer);
+ sqlite3ReleaseTempRange(pParse, regCPeer, nPeer);
+ }
+
+ windowAggFinal(p, 1);
+ VdbeModuleComment((v, "windowFullScan end"));
+}
+
+/*
+** Invoke the sub-routine at regGosub (generated by code in select.c) to
+** return the current row of Window.iEphCsr. If all window functions are
+** aggregate window functions that use the standard API, a single
+** OP_Gosub instruction is all that this routine generates. Extra VM code
+** for per-row processing is only generated for the following built-in window
+** functions:
+**
+** nth_value()
+** first_value()
+** lag()
+** lead()
+*/
+static void windowReturnOneRow(WindowCodeArg *p){
+ Window *pMWin = p->pMWin;
+ Vdbe *v = p->pVdbe;
+
+ if( pMWin->regStartRowid ){
+ windowFullScan(p);
+ }else{
+ Parse *pParse = p->pParse;
+ Window *pWin;
+
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ FuncDef *pFunc = pWin->pFunc;
+ if( pFunc->zName==nth_valueName
+ || pFunc->zName==first_valueName
+ ){
+ int csr = pWin->csrApp;
+ int lbl = sqlite3VdbeMakeLabel(pParse);
+ int tmpReg = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
+
+ if( pFunc->zName==nth_valueName ){
+ sqlite3VdbeAddOp3(v, OP_Column,pMWin->iEphCsr,pWin->iArgCol+1,tmpReg);
+ windowCheckValue(pParse, tmpReg, 2);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, tmpReg);
+ }
+ sqlite3VdbeAddOp3(v, OP_Add, tmpReg, pWin->regApp, tmpReg);
+ sqlite3VdbeAddOp3(v, OP_Gt, pWin->regApp+1, lbl, tmpReg);
+ VdbeCoverageNeverNull(v);
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, csr, 0, tmpReg);
+ VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol, pWin->regResult);
+ sqlite3VdbeResolveLabel(v, lbl);
+ sqlite3ReleaseTempReg(pParse, tmpReg);
+ }
+ else if( pFunc->zName==leadName || pFunc->zName==lagName ){
+ int nArg = pWin->pOwner->x.pList->nExpr;
+ int csr = pWin->csrApp;
+ int lbl = sqlite3VdbeMakeLabel(pParse);
+ int tmpReg = sqlite3GetTempReg(pParse);
+ int iEph = pMWin->iEphCsr;
+
+ if( nArg<3 ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Column, iEph,pWin->iArgCol+2,pWin->regResult);
+ }
+ sqlite3VdbeAddOp2(v, OP_Rowid, iEph, tmpReg);
+ if( nArg<2 ){
+ int val = (pFunc->zName==leadName ? 1 : -1);
+ sqlite3VdbeAddOp2(v, OP_AddImm, tmpReg, val);
+ }else{
+ int op = (pFunc->zName==leadName ? OP_Add : OP_Subtract);
+ int tmpReg2 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_Column, iEph, pWin->iArgCol+1, tmpReg2);
+ sqlite3VdbeAddOp3(v, op, tmpReg2, tmpReg, tmpReg);
+ sqlite3ReleaseTempReg(pParse, tmpReg2);
+ }
+
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, csr, lbl, tmpReg);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol, pWin->regResult);
+ sqlite3VdbeResolveLabel(v, lbl);
+ sqlite3ReleaseTempReg(pParse, tmpReg);
+ }
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_Gosub, p->regGosub, p->addrGosub);
+}
+
+/*
+** Generate code to set the accumulator register for each window function
+** in the linked list passed as the second argument to NULL. And perform
+** any equivalent initialization required by any built-in window functions
+** in the list.
+*/
+static int windowInitAccum(Parse *pParse, Window *pMWin){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int regArg;
+ int nArg = 0;
+ Window *pWin;
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ FuncDef *pFunc = pWin->pFunc;
+ assert( pWin->regAccum );
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
+ nArg = MAX(nArg, windowArgCount(pWin));
+ if( pMWin->regStartRowid==0 ){
+ if( pFunc->zName==nth_valueName || pFunc->zName==first_valueName ){
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
+ }
+
+ if( (pFunc->funcFlags & SQLITE_FUNC_MINMAX) && pWin->csrApp ){
+ assert( pWin->eStart!=TK_UNBOUNDED );
+ sqlite3VdbeAddOp1(v, OP_ResetSorter, pWin->csrApp);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
+ }
+ }
+ }
+ regArg = pParse->nMem+1;
+ pParse->nMem += nArg;
+ return regArg;
+}
+
+/*
+** Return true if the current frame should be cached in the ephemeral table,
+** even if there are no xInverse() calls required.
+*/
+static int windowCacheFrame(Window *pMWin){
+ Window *pWin;
+ if( pMWin->regStartRowid ) return 1;
+ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
+ FuncDef *pFunc = pWin->pFunc;
+ if( (pFunc->zName==nth_valueName)
+ || (pFunc->zName==first_valueName)
+ || (pFunc->zName==leadName)
+ || (pFunc->zName==lagName)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** regOld and regNew are each the first register in an array of size
+** pOrderBy->nExpr. This function generates code to compare the two
+** arrays of registers using the collation sequences and other comparison
+** parameters specified by pOrderBy.
+**
+** If the two arrays are not equal, the contents of regNew is copied to
+** regOld and control falls through. Otherwise, if the contents of the arrays
+** are equal, an OP_Goto is executed. The address of the OP_Goto is returned.
+*/
+static void windowIfNewPeer(
+ Parse *pParse,
+ ExprList *pOrderBy,
+ int regNew, /* First in array of new values */
+ int regOld, /* First in array of old values */
+ int addr /* Jump here */
+){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ if( pOrderBy ){
+ int nVal = pOrderBy->nExpr;
+ KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOrderBy, 0, 0);
+ sqlite3VdbeAddOp3(v, OP_Compare, regOld, regNew, nVal);
+ sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
+ sqlite3VdbeAddOp3(v, OP_Jump,
+ sqlite3VdbeCurrentAddr(v)+1, addr, sqlite3VdbeCurrentAddr(v)+1
+ );
+ VdbeCoverageEqNe(v);
+ sqlite3VdbeAddOp3(v, OP_Copy, regNew, regOld, nVal-1);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addr);
+ }
+}
+
+/*
+** This function is called as part of generating VM programs for RANGE
+** offset PRECEDING/FOLLOWING frame boundaries. Assuming "ASC" order for
+** the ORDER BY term in the window, and that argument op is OP_Ge, it generates
+** code equivalent to:
+**
+** if( csr1.peerVal + regVal >= csr2.peerVal ) goto lbl;
+**
+** The value of parameter op may also be OP_Gt or OP_Le. In these cases the
+** operator in the above pseudo-code is replaced with ">" or "<=", respectively.
+**
+** If the sort-order for the ORDER BY term in the window is DESC, then the
+** comparison is reversed. Instead of adding regVal to csr1.peerVal, it is
+** subtracted. And the comparison operator is inverted to - ">=" becomes "<=",
+** ">" becomes "<", and so on. So, with DESC sort order, if the argument op
+** is OP_Ge, the generated code is equivalent to:
+**
+** if( csr1.peerVal - regVal <= csr2.peerVal ) goto lbl;
+**
+** A special type of arithmetic is used such that if csr1.peerVal is not
+** a numeric type (real or integer), then the result of the addition addition
+** or subtraction is a a copy of csr1.peerVal.
+*/
+static void windowCodeRangeTest(
+ WindowCodeArg *p,
+ int op, /* OP_Ge, OP_Gt, or OP_Le */
+ int csr1, /* Cursor number for cursor 1 */
+ int regVal, /* Register containing non-negative number */
+ int csr2, /* Cursor number for cursor 2 */
+ int lbl /* Jump destination if condition is true */
+){
+ Parse *pParse = p->pParse;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ ExprList *pOrderBy = p->pMWin->pOrderBy; /* ORDER BY clause for window */
+ int reg1 = sqlite3GetTempReg(pParse); /* Reg. for csr1.peerVal+regVal */
+ int reg2 = sqlite3GetTempReg(pParse); /* Reg. for csr2.peerVal */
+ int regString = ++pParse->nMem; /* Reg. for constant value '' */
+ int arith = OP_Add; /* OP_Add or OP_Subtract */
+ int addrGe; /* Jump destination */
+
+ assert( op==OP_Ge || op==OP_Gt || op==OP_Le );
+ assert( pOrderBy && pOrderBy->nExpr==1 );
+ if( pOrderBy->a[0].sortFlags & KEYINFO_ORDER_DESC ){
+ switch( op ){
+ case OP_Ge: op = OP_Le; break;
+ case OP_Gt: op = OP_Lt; break;
+ default: assert( op==OP_Le ); op = OP_Ge; break;
+ }
+ arith = OP_Subtract;
+ }
+
+ /* Read the peer-value from each cursor into a register */
+ windowReadPeerValues(p, csr1, reg1);
+ windowReadPeerValues(p, csr2, reg2);
+
+ VdbeModuleComment((v, "CodeRangeTest: if( R%d %s R%d %s R%d ) goto lbl",
+ reg1, (arith==OP_Add ? "+" : "-"), regVal,
+ ((op==OP_Ge) ? ">=" : (op==OP_Le) ? "<=" : (op==OP_Gt) ? ">" : "<"), reg2
+ ));
+
+ /* Register reg1 currently contains csr1.peerVal (the peer-value from csr1).
+ ** This block adds (or subtracts for DESC) the numeric value in regVal
+ ** from it. Or, if reg1 is not numeric (it is a NULL, a text value or a blob),
+ ** then leave reg1 as it is. In pseudo-code, this is implemented as:
+ **
+ ** if( reg1>='' ) goto addrGe;
+ ** reg1 = reg1 +/- regVal
+ ** addrGe:
+ **
+ ** Since all strings and blobs are greater-than-or-equal-to an empty string,
+ ** the add/subtract is skipped for these, as required. If reg1 is a NULL,
+ ** then the arithmetic is performed, but since adding or subtracting from
+ ** NULL is always NULL anyway, this case is handled as required too. */
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regString, 0, "", P4_STATIC);
+ addrGe = sqlite3VdbeAddOp3(v, OP_Ge, regString, 0, reg1);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, arith, regVal, reg1, reg1);
+ sqlite3VdbeJumpHere(v, addrGe);
+
+ /* If the BIGNULL flag is set for the ORDER BY, then it is required to
+ ** consider NULL values to be larger than all other values, instead of
+ ** the usual smaller. The VDBE opcodes OP_Ge and so on do not handle this
+ ** (and adding that capability causes a performance regression), so
+ ** instead if the BIGNULL flag is set then cases where either reg1 or
+ ** reg2 are NULL are handled separately in the following block. The code
+ ** generated is equivalent to:
+ **
+ ** if( reg1 IS NULL ){
+ ** if( op==OP_Ge ) goto lbl;
+ ** if( op==OP_Gt && reg2 IS NOT NULL ) goto lbl;
+ ** if( op==OP_Le && reg2 IS NULL ) goto lbl;
+ ** }else if( reg2 IS NULL ){
+ ** if( op==OP_Le ) goto lbl;
+ ** }
+ **
+ ** Additionally, if either reg1 or reg2 are NULL but the jump to lbl is
+ ** not taken, control jumps over the comparison operator coded below this
+ ** block. */
+ if( pOrderBy->a[0].sortFlags & KEYINFO_ORDER_BIGNULL ){
+ /* This block runs if reg1 contains a NULL. */
+ int addr = sqlite3VdbeAddOp1(v, OP_NotNull, reg1); VdbeCoverage(v);
+ switch( op ){
+ case OP_Ge:
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, lbl);
+ break;
+ case OP_Gt:
+ sqlite3VdbeAddOp2(v, OP_NotNull, reg2, lbl);
+ VdbeCoverage(v);
+ break;
+ case OP_Le:
+ sqlite3VdbeAddOp2(v, OP_IsNull, reg2, lbl);
+ VdbeCoverage(v);
+ break;
+ default: assert( op==OP_Lt ); /* no-op */ break;
+ }
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+3);
+
+ /* This block runs if reg1 is not NULL, but reg2 is. */
+ sqlite3VdbeJumpHere(v, addr);
+ sqlite3VdbeAddOp2(v, OP_IsNull, reg2, lbl); VdbeCoverage(v);
+ if( op==OP_Gt || op==OP_Ge ){
+ sqlite3VdbeChangeP2(v, -1, sqlite3VdbeCurrentAddr(v)+1);
+ }
+ }
+
+ /* Compare registers reg2 and reg1, taking the jump if required. Note that
+ ** control skips over this test if the BIGNULL flag is set and either
+ ** reg1 or reg2 contain a NULL value. */
+ sqlite3VdbeAddOp3(v, op, reg2, lbl, reg1); VdbeCoverage(v);
+ sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
+
+ assert( op==OP_Ge || op==OP_Gt || op==OP_Lt || op==OP_Le );
+ testcase(op==OP_Ge); VdbeCoverageIf(v, op==OP_Ge);
+ testcase(op==OP_Lt); VdbeCoverageIf(v, op==OP_Lt);
+ testcase(op==OP_Le); VdbeCoverageIf(v, op==OP_Le);
+ testcase(op==OP_Gt); VdbeCoverageIf(v, op==OP_Gt);
+ sqlite3ReleaseTempReg(pParse, reg1);
+ sqlite3ReleaseTempReg(pParse, reg2);
+
+ VdbeModuleComment((v, "CodeRangeTest: end"));
+}
+
+/*
+** Helper function for sqlite3WindowCodeStep(). Each call to this function
+** generates VM code for a single RETURN_ROW, AGGSTEP or AGGINVERSE
+** operation. Refer to the header comment for sqlite3WindowCodeStep() for
+** details.
+*/
+static int windowCodeOp(
+ WindowCodeArg *p, /* Context object */
+ int op, /* WINDOW_RETURN_ROW, AGGSTEP or AGGINVERSE */
+ int regCountdown, /* Register for OP_IfPos countdown */
+ int jumpOnEof /* Jump here if stepped cursor reaches EOF */
+){
+ int csr, reg;
+ Parse *pParse = p->pParse;
+ Window *pMWin = p->pMWin;
+ int ret = 0;
+ Vdbe *v = p->pVdbe;
+ int addrContinue = 0;
+ int bPeer = (pMWin->eFrmType!=TK_ROWS);
+
+ int lblDone = sqlite3VdbeMakeLabel(pParse);
+ int addrNextRange = 0;
+
+ /* Special case - WINDOW_AGGINVERSE is always a no-op if the frame
+ ** starts with UNBOUNDED PRECEDING. */
+ if( op==WINDOW_AGGINVERSE && pMWin->eStart==TK_UNBOUNDED ){
+ assert( regCountdown==0 && jumpOnEof==0 );
+ return 0;
+ }
+
+ if( regCountdown>0 ){
+ if( pMWin->eFrmType==TK_RANGE ){
+ addrNextRange = sqlite3VdbeCurrentAddr(v);
+ assert( op==WINDOW_AGGINVERSE || op==WINDOW_AGGSTEP );
+ if( op==WINDOW_AGGINVERSE ){
+ if( pMWin->eStart==TK_FOLLOWING ){
+ windowCodeRangeTest(
+ p, OP_Le, p->current.csr, regCountdown, p->start.csr, lblDone
+ );
+ }else{
+ windowCodeRangeTest(
+ p, OP_Ge, p->start.csr, regCountdown, p->current.csr, lblDone
+ );
+ }
+ }else{
+ windowCodeRangeTest(
+ p, OP_Gt, p->end.csr, regCountdown, p->current.csr, lblDone
+ );
+ }
+ }else{
+ sqlite3VdbeAddOp3(v, OP_IfPos, regCountdown, lblDone, 1);
+ VdbeCoverage(v);
+ }
+ }
+
+ if( op==WINDOW_RETURN_ROW && pMWin->regStartRowid==0 ){
+ windowAggFinal(p, 0);
+ }
+ addrContinue = sqlite3VdbeCurrentAddr(v);
+
+ /* If this is a (RANGE BETWEEN a FOLLOWING AND b FOLLOWING) or
+ ** (RANGE BETWEEN b PRECEDING AND a PRECEDING) frame, ensure the
+ ** start cursor does not advance past the end cursor within the
+ ** temporary table. It otherwise might, if (a>b). */
+ if( pMWin->eStart==pMWin->eEnd && regCountdown
+ && pMWin->eFrmType==TK_RANGE && op==WINDOW_AGGINVERSE
+ ){
+ int regRowid1 = sqlite3GetTempReg(pParse);
+ int regRowid2 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_Rowid, p->start.csr, regRowid1);
+ sqlite3VdbeAddOp2(v, OP_Rowid, p->end.csr, regRowid2);
+ sqlite3VdbeAddOp3(v, OP_Ge, regRowid2, lblDone, regRowid1);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, regRowid1);
+ sqlite3ReleaseTempReg(pParse, regRowid2);
+ assert( pMWin->eStart==TK_PRECEDING || pMWin->eStart==TK_FOLLOWING );
+ }
+
+ switch( op ){
+ case WINDOW_RETURN_ROW:
+ csr = p->current.csr;
+ reg = p->current.reg;
+ windowReturnOneRow(p);
+ break;
+
+ case WINDOW_AGGINVERSE:
+ csr = p->start.csr;
+ reg = p->start.reg;
+ if( pMWin->regStartRowid ){
+ assert( pMWin->regEndRowid );
+ sqlite3VdbeAddOp2(v, OP_AddImm, pMWin->regStartRowid, 1);
+ }else{
+ windowAggStep(p, pMWin, csr, 1, p->regArg);
+ }
+ break;
+
+ default:
+ assert( op==WINDOW_AGGSTEP );
+ csr = p->end.csr;
+ reg = p->end.reg;
+ if( pMWin->regStartRowid ){
+ assert( pMWin->regEndRowid );
+ sqlite3VdbeAddOp2(v, OP_AddImm, pMWin->regEndRowid, 1);
+ }else{
+ windowAggStep(p, pMWin, csr, 0, p->regArg);
+ }
+ break;
+ }
+
+ if( op==p->eDelete ){
+ sqlite3VdbeAddOp1(v, OP_Delete, csr);
+ sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION);
+ }
+
+ if( jumpOnEof ){
+ sqlite3VdbeAddOp2(v, OP_Next, csr, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ ret = sqlite3VdbeAddOp0(v, OP_Goto);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Next, csr, sqlite3VdbeCurrentAddr(v)+1+bPeer);
+ VdbeCoverage(v);
+ if( bPeer ){
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, lblDone);
+ }
+ }
+
+ if( bPeer ){
+ int nReg = (pMWin->pOrderBy ? pMWin->pOrderBy->nExpr : 0);
+ int regTmp = (nReg ? sqlite3GetTempRange(pParse, nReg) : 0);
+ windowReadPeerValues(p, csr, regTmp);
+ windowIfNewPeer(pParse, pMWin->pOrderBy, regTmp, reg, addrContinue);
+ sqlite3ReleaseTempRange(pParse, regTmp, nReg);
+ }
+
+ if( addrNextRange ){
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrNextRange);
+ }
+ sqlite3VdbeResolveLabel(v, lblDone);
+ return ret;
+}
+
+
+/*
+** Allocate and return a duplicate of the Window object indicated by the
+** third argument. Set the Window.pOwner field of the new object to
+** pOwner.
+*/
+SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p){
+ Window *pNew = 0;
+ if( ALWAYS(p) ){
+ pNew = sqlite3DbMallocZero(db, sizeof(Window));
+ if( pNew ){
+ pNew->zName = sqlite3DbStrDup(db, p->zName);
+ pNew->zBase = sqlite3DbStrDup(db, p->zBase);
+ pNew->pFilter = sqlite3ExprDup(db, p->pFilter, 0);
+ pNew->pFunc = p->pFunc;
+ pNew->pPartition = sqlite3ExprListDup(db, p->pPartition, 0);
+ pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, 0);
+ pNew->eFrmType = p->eFrmType;
+ pNew->eEnd = p->eEnd;
+ pNew->eStart = p->eStart;
+ pNew->eExclude = p->eExclude;
+ pNew->regResult = p->regResult;
+ pNew->regAccum = p->regAccum;
+ pNew->iArgCol = p->iArgCol;
+ pNew->iEphCsr = p->iEphCsr;
+ pNew->bExprArgs = p->bExprArgs;
+ pNew->pStart = sqlite3ExprDup(db, p->pStart, 0);
+ pNew->pEnd = sqlite3ExprDup(db, p->pEnd, 0);
+ pNew->pOwner = pOwner;
+ pNew->bImplicitFrame = p->bImplicitFrame;
+ }
+ }
+ return pNew;
+}
+
+/*
+** Return a copy of the linked list of Window objects passed as the
+** second argument.
+*/
+SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p){
+ Window *pWin;
+ Window *pRet = 0;
+ Window **pp = &pRet;
+
+ for(pWin=p; pWin; pWin=pWin->pNextWin){
+ *pp = sqlite3WindowDup(db, 0, pWin);
+ if( *pp==0 ) break;
+ pp = &((*pp)->pNextWin);
+ }
+
+ return pRet;
+}
+
+/*
+** Return true if it can be determined at compile time that expression
+** pExpr evaluates to a value that, when cast to an integer, is greater
+** than zero. False otherwise.
+**
+** If an OOM error occurs, this function sets the Parse.db.mallocFailed
+** flag and returns zero.
+*/
+static int windowExprGtZero(Parse *pParse, Expr *pExpr){
+ int ret = 0;
+ sqlite3 *db = pParse->db;
+ sqlite3_value *pVal = 0;
+ sqlite3ValueFromExpr(db, pExpr, db->enc, SQLITE_AFF_NUMERIC, &pVal);
+ if( pVal && sqlite3_value_int(pVal)>0 ){
+ ret = 1;
+ }
+ sqlite3ValueFree(pVal);
+ return ret;
+}
+
+/*
+** sqlite3WhereBegin() has already been called for the SELECT statement
+** passed as the second argument when this function is invoked. It generates
+** code to populate the Window.regResult register for each window function
+** and invoke the sub-routine at instruction addrGosub once for each row.
+** sqlite3WhereEnd() is always called before returning.
+**
+** This function handles several different types of window frames, which
+** require slightly different processing. The following pseudo code is
+** used to implement window frames of the form:
+**
+** ROWS BETWEEN <expr1> PRECEDING AND <expr2> FOLLOWING
+**
+** Other window frame types use variants of the following:
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+**
+** if( first row of partition ){
+** // Rewind three cursors, all open on the eph table.
+** Rewind(csrEnd);
+** Rewind(csrStart);
+** Rewind(csrCurrent);
+**
+** regEnd = <expr2> // FOLLOWING expression
+** regStart = <expr1> // PRECEDING expression
+** }else{
+** // First time this branch is taken, the eph table contains two
+** // rows. The first row in the partition, which all three cursors
+** // currently point to, and the following row.
+** AGGSTEP
+** if( (regEnd--)<=0 ){
+** RETURN_ROW
+** if( (regStart--)<=0 ){
+** AGGINVERSE
+** }
+** }
+** }
+** }
+** flush:
+** AGGSTEP
+** while( 1 ){
+** RETURN ROW
+** if( csrCurrent is EOF ) break;
+** if( (regStart--)<=0 ){
+** AggInverse(csrStart)
+** Next(csrStart)
+** }
+** }
+**
+** The pseudo-code above uses the following shorthand:
+**
+** AGGSTEP: invoke the aggregate xStep() function for each window function
+** with arguments read from the current row of cursor csrEnd, then
+** step cursor csrEnd forward one row (i.e. sqlite3BtreeNext()).
+**
+** RETURN_ROW: return a row to the caller based on the contents of the
+** current row of csrCurrent and the current state of all
+** aggregates. Then step cursor csrCurrent forward one row.
+**
+** AGGINVERSE: invoke the aggregate xInverse() function for each window
+** functions with arguments read from the current row of cursor
+** csrStart. Then step csrStart forward one row.
+**
+** There are two other ROWS window frames that are handled significantly
+** differently from the above - "BETWEEN <expr> PRECEDING AND <expr> PRECEDING"
+** and "BETWEEN <expr> FOLLOWING AND <expr> FOLLOWING". These are special
+** cases because they change the order in which the three cursors (csrStart,
+** csrCurrent and csrEnd) iterate through the ephemeral table. Cases that
+** use UNBOUNDED or CURRENT ROW are much simpler variations on one of these
+** three.
+**
+** ROWS BETWEEN <expr1> PRECEDING AND <expr2> PRECEDING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** }else{
+** if( (regEnd--)<=0 ){
+** AGGSTEP
+** }
+** RETURN_ROW
+** if( (regStart--)<=0 ){
+** AGGINVERSE
+** }
+** }
+** }
+** flush:
+** if( (regEnd--)<=0 ){
+** AGGSTEP
+** }
+** RETURN_ROW
+**
+**
+** ROWS BETWEEN <expr1> FOLLOWING AND <expr2> FOLLOWING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = regEnd - <expr1>
+** }else{
+** AGGSTEP
+** if( (regEnd--)<=0 ){
+** RETURN_ROW
+** }
+** if( (regStart--)<=0 ){
+** AGGINVERSE
+** }
+** }
+** }
+** flush:
+** AGGSTEP
+** while( 1 ){
+** if( (regEnd--)<=0 ){
+** RETURN_ROW
+** if( eof ) break;
+** }
+** if( (regStart--)<=0 ){
+** AGGINVERSE
+** if( eof ) break
+** }
+** }
+** while( !eof csrCurrent ){
+** RETURN_ROW
+** }
+**
+** For the most part, the patterns above are adapted to support UNBOUNDED by
+** assuming that it is equivalent to "infinity PRECEDING/FOLLOWING" and
+** CURRENT ROW by assuming that it is equivilent to "0 PRECEDING/FOLLOWING".
+** This is optimized of course - branches that will never be taken and
+** conditions that are always true are omitted from the VM code. The only
+** exceptional case is:
+**
+** ROWS BETWEEN <expr1> FOLLOWING AND UNBOUNDED FOLLOWING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regStart = <expr1>
+** }else{
+** AGGSTEP
+** }
+** }
+** flush:
+** AGGSTEP
+** while( 1 ){
+** if( (regStart--)<=0 ){
+** AGGINVERSE
+** if( eof ) break
+** }
+** RETURN_ROW
+** }
+** while( !eof csrCurrent ){
+** RETURN_ROW
+** }
+**
+** Also requiring special handling are the cases:
+**
+** ROWS BETWEEN <expr1> PRECEDING AND <expr2> PRECEDING
+** ROWS BETWEEN <expr1> FOLLOWING AND <expr2> FOLLOWING
+**
+** when (expr1 < expr2). This is detected at runtime, not by this function.
+** To handle this case, the pseudo-code programs depicted above are modified
+** slightly to be:
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** if( regEnd < regStart ){
+** RETURN_ROW
+** delete eph table contents
+** continue
+** }
+** ...
+**
+** The new "continue" statement in the above jumps to the next iteration
+** of the outer loop - the one started by sqlite3WhereBegin().
+**
+** The various GROUPS cases are implemented using the same patterns as
+** ROWS. The VM code is modified slightly so that:
+**
+** 1. The else branch in the main loop is only taken if the row just
+** added to the ephemeral table is the start of a new group. In
+** other words, it becomes:
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** }else if( new group ){
+** ...
+** }
+** }
+**
+** 2. Instead of processing a single row, each RETURN_ROW, AGGSTEP or
+** AGGINVERSE step processes the current row of the relevant cursor and
+** all subsequent rows belonging to the same group.
+**
+** RANGE window frames are a little different again. As for GROUPS, the
+** main loop runs once per group only. And RETURN_ROW, AGGSTEP and AGGINVERSE
+** deal in groups instead of rows. As for ROWS and GROUPS, there are three
+** basic cases:
+**
+** RANGE BETWEEN <expr1> PRECEDING AND <expr2> FOLLOWING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** }else{
+** AGGSTEP
+** while( (csrCurrent.key + regEnd) < csrEnd.key ){
+** RETURN_ROW
+** while( csrStart.key + regStart) < csrCurrent.key ){
+** AGGINVERSE
+** }
+** }
+** }
+** }
+** flush:
+** AGGSTEP
+** while( 1 ){
+** RETURN ROW
+** if( csrCurrent is EOF ) break;
+** while( csrStart.key + regStart) < csrCurrent.key ){
+** AGGINVERSE
+** }
+** }
+** }
+**
+** In the above notation, "csr.key" means the current value of the ORDER BY
+** expression (there is only ever 1 for a RANGE that uses an <expr> FOLLOWING
+** or <expr PRECEDING) read from cursor csr.
+**
+** RANGE BETWEEN <expr1> PRECEDING AND <expr2> PRECEDING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** }else{
+** while( (csrEnd.key + regEnd) <= csrCurrent.key ){
+** AGGSTEP
+** }
+** while( (csrStart.key + regStart) < csrCurrent.key ){
+** AGGINVERSE
+** }
+** RETURN_ROW
+** }
+** }
+** flush:
+** while( (csrEnd.key + regEnd) <= csrCurrent.key ){
+** AGGSTEP
+** }
+** while( (csrStart.key + regStart) < csrCurrent.key ){
+** AGGINVERSE
+** }
+** RETURN_ROW
+**
+** RANGE BETWEEN <expr1> FOLLOWING AND <expr2> FOLLOWING
+**
+** ... loop started by sqlite3WhereBegin() ...
+** if( new partition ){
+** Gosub flush
+** }
+** Insert new row into eph table.
+** if( first row of partition ){
+** Rewind(csrEnd) ; Rewind(csrStart) ; Rewind(csrCurrent)
+** regEnd = <expr2>
+** regStart = <expr1>
+** }else{
+** AGGSTEP
+** while( (csrCurrent.key + regEnd) < csrEnd.key ){
+** while( (csrCurrent.key + regStart) > csrStart.key ){
+** AGGINVERSE
+** }
+** RETURN_ROW
+** }
+** }
+** }
+** flush:
+** AGGSTEP
+** while( 1 ){
+** while( (csrCurrent.key + regStart) > csrStart.key ){
+** AGGINVERSE
+** if( eof ) break "while( 1 )" loop.
+** }
+** RETURN_ROW
+** }
+** while( !eof csrCurrent ){
+** RETURN_ROW
+** }
+**
+** The text above leaves out many details. Refer to the code and comments
+** below for a more complete picture.
+*/
+SQLITE_PRIVATE void sqlite3WindowCodeStep(
+ Parse *pParse, /* Parse context */
+ Select *p, /* Rewritten SELECT statement */
+ WhereInfo *pWInfo, /* Context returned by sqlite3WhereBegin() */
+ int regGosub, /* Register for OP_Gosub */
+ int addrGosub /* OP_Gosub here to return each row */
+){
+ Window *pMWin = p->pWin;
+ ExprList *pOrderBy = pMWin->pOrderBy;
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int csrWrite; /* Cursor used to write to eph. table */
+ int csrInput = p->pSrc->a[0].iCursor; /* Cursor of sub-select */
+ int nInput = p->pSrc->a[0].pTab->nCol; /* Number of cols returned by sub */
+ int iInput; /* To iterate through sub cols */
+ int addrNe; /* Address of OP_Ne */
+ int addrGosubFlush = 0; /* Address of OP_Gosub to flush: */
+ int addrInteger = 0; /* Address of OP_Integer */
+ int addrEmpty; /* Address of OP_Rewind in flush: */
+ int regNew; /* Array of registers holding new input row */
+ int regRecord; /* regNew array in record form */
+ int regRowid; /* Rowid for regRecord in eph table */
+ int regNewPeer = 0; /* Peer values for new row (part of regNew) */
+ int regPeer = 0; /* Peer values for current row */
+ int regFlushPart = 0; /* Register for "Gosub flush_partition" */
+ WindowCodeArg s; /* Context object for sub-routines */
+ int lblWhereEnd; /* Label just before sqlite3WhereEnd() code */
+ int regStart = 0; /* Value of <expr> PRECEDING */
+ int regEnd = 0; /* Value of <expr> FOLLOWING */
+
+ assert( pMWin->eStart==TK_PRECEDING || pMWin->eStart==TK_CURRENT
+ || pMWin->eStart==TK_FOLLOWING || pMWin->eStart==TK_UNBOUNDED
+ );
+ assert( pMWin->eEnd==TK_FOLLOWING || pMWin->eEnd==TK_CURRENT
+ || pMWin->eEnd==TK_UNBOUNDED || pMWin->eEnd==TK_PRECEDING
+ );
+ assert( pMWin->eExclude==0 || pMWin->eExclude==TK_CURRENT
+ || pMWin->eExclude==TK_GROUP || pMWin->eExclude==TK_TIES
+ || pMWin->eExclude==TK_NO
+ );
+
+ lblWhereEnd = sqlite3VdbeMakeLabel(pParse);
+
+ /* Fill in the context object */
+ memset(&s, 0, sizeof(WindowCodeArg));
+ s.pParse = pParse;
+ s.pMWin = pMWin;
+ s.pVdbe = v;
+ s.regGosub = regGosub;
+ s.addrGosub = addrGosub;
+ s.current.csr = pMWin->iEphCsr;
+ csrWrite = s.current.csr+1;
+ s.start.csr = s.current.csr+2;
+ s.end.csr = s.current.csr+3;
+
+ /* Figure out when rows may be deleted from the ephemeral table. There
+ ** are four options - they may never be deleted (eDelete==0), they may
+ ** be deleted as soon as they are no longer part of the window frame
+ ** (eDelete==WINDOW_AGGINVERSE), they may be deleted as after the row
+ ** has been returned to the caller (WINDOW_RETURN_ROW), or they may
+ ** be deleted after they enter the frame (WINDOW_AGGSTEP). */
+ switch( pMWin->eStart ){
+ case TK_FOLLOWING:
+ if( pMWin->eFrmType!=TK_RANGE
+ && windowExprGtZero(pParse, pMWin->pStart)
+ ){
+ s.eDelete = WINDOW_RETURN_ROW;
+ }
+ break;
+ case TK_UNBOUNDED:
+ if( windowCacheFrame(pMWin)==0 ){
+ if( pMWin->eEnd==TK_PRECEDING ){
+ if( pMWin->eFrmType!=TK_RANGE
+ && windowExprGtZero(pParse, pMWin->pEnd)
+ ){
+ s.eDelete = WINDOW_AGGSTEP;
+ }
+ }else{
+ s.eDelete = WINDOW_RETURN_ROW;
+ }
+ }
+ break;
+ default:
+ s.eDelete = WINDOW_AGGINVERSE;
+ break;
+ }
+
+ /* Allocate registers for the array of values from the sub-query, the
+ ** samve values in record form, and the rowid used to insert said record
+ ** into the ephemeral table. */
+ regNew = pParse->nMem+1;
+ pParse->nMem += nInput;
+ regRecord = ++pParse->nMem;
+ regRowid = ++pParse->nMem;
+
+ /* If the window frame contains an "<expr> PRECEDING" or "<expr> FOLLOWING"
+ ** clause, allocate registers to store the results of evaluating each
+ ** <expr>. */
+ if( pMWin->eStart==TK_PRECEDING || pMWin->eStart==TK_FOLLOWING ){
+ regStart = ++pParse->nMem;
+ }
+ if( pMWin->eEnd==TK_PRECEDING || pMWin->eEnd==TK_FOLLOWING ){
+ regEnd = ++pParse->nMem;
+ }
+
+ /* If this is not a "ROWS BETWEEN ..." frame, then allocate arrays of
+ ** registers to store copies of the ORDER BY expressions (peer values)
+ ** for the main loop, and for each cursor (start, current and end). */
+ if( pMWin->eFrmType!=TK_ROWS ){
+ int nPeer = (pOrderBy ? pOrderBy->nExpr : 0);
+ regNewPeer = regNew + pMWin->nBufferCol;
+ if( pMWin->pPartition ) regNewPeer += pMWin->pPartition->nExpr;
+ regPeer = pParse->nMem+1; pParse->nMem += nPeer;
+ s.start.reg = pParse->nMem+1; pParse->nMem += nPeer;
+ s.current.reg = pParse->nMem+1; pParse->nMem += nPeer;
+ s.end.reg = pParse->nMem+1; pParse->nMem += nPeer;
+ }
+
+ /* Load the column values for the row returned by the sub-select
+ ** into an array of registers starting at regNew. Assemble them into
+ ** a record in register regRecord. */
+ for(iInput=0; iInput<nInput; iInput++){
+ sqlite3VdbeAddOp3(v, OP_Column, csrInput, iInput, regNew+iInput);
+ }
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regNew, nInput, regRecord);
+
+ /* An input row has just been read into an array of registers starting
+ ** at regNew. If the window has a PARTITION clause, this block generates
+ ** VM code to check if the input row is the start of a new partition.
+ ** If so, it does an OP_Gosub to an address to be filled in later. The
+ ** address of the OP_Gosub is stored in local variable addrGosubFlush. */
+ if( pMWin->pPartition ){
+ int addr;
+ ExprList *pPart = pMWin->pPartition;
+ int nPart = pPart->nExpr;
+ int regNewPart = regNew + pMWin->nBufferCol;
+ KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pPart, 0, 0);
+
+ regFlushPart = ++pParse->nMem;
+ addr = sqlite3VdbeAddOp3(v, OP_Compare, regNewPart, pMWin->regPart, nPart);
+ sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
+ sqlite3VdbeAddOp3(v, OP_Jump, addr+2, addr+4, addr+2);
+ VdbeCoverageEqNe(v);
+ addrGosubFlush = sqlite3VdbeAddOp1(v, OP_Gosub, regFlushPart);
+ VdbeComment((v, "call flush_partition"));
+ sqlite3VdbeAddOp3(v, OP_Copy, regNewPart, pMWin->regPart, nPart-1);
+ }
+
+ /* Insert the new row into the ephemeral table */
+ sqlite3VdbeAddOp2(v, OP_NewRowid, csrWrite, regRowid);
+ sqlite3VdbeAddOp3(v, OP_Insert, csrWrite, regRecord, regRowid);
+ addrNe = sqlite3VdbeAddOp3(v, OP_Ne, pMWin->regOne, 0, regRowid);
+ VdbeCoverageNeverNull(v);
+
+ /* This block is run for the first row of each partition */
+ s.regArg = windowInitAccum(pParse, pMWin);
+
+ if( regStart ){
+ sqlite3ExprCode(pParse, pMWin->pStart, regStart);
+ windowCheckValue(pParse, regStart, 0 + (pMWin->eFrmType==TK_RANGE?3:0));
+ }
+ if( regEnd ){
+ sqlite3ExprCode(pParse, pMWin->pEnd, regEnd);
+ windowCheckValue(pParse, regEnd, 1 + (pMWin->eFrmType==TK_RANGE?3:0));
+ }
+
+ if( pMWin->eFrmType!=TK_RANGE && pMWin->eStart==pMWin->eEnd && regStart ){
+ int op = ((pMWin->eStart==TK_FOLLOWING) ? OP_Ge : OP_Le);
+ int addrGe = sqlite3VdbeAddOp3(v, op, regStart, 0, regEnd);
+ VdbeCoverageNeverNullIf(v, op==OP_Ge); /* NeverNull because bound <expr> */
+ VdbeCoverageNeverNullIf(v, op==OP_Le); /* values previously checked */
+ windowAggFinal(&s, 0);
+ sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1);
+ VdbeCoverageNeverTaken(v);
+ windowReturnOneRow(&s);
+ sqlite3VdbeAddOp1(v, OP_ResetSorter, s.current.csr);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, lblWhereEnd);
+ sqlite3VdbeJumpHere(v, addrGe);
+ }
+ if( pMWin->eStart==TK_FOLLOWING && pMWin->eFrmType!=TK_RANGE && regEnd ){
+ assert( pMWin->eEnd==TK_FOLLOWING );
+ sqlite3VdbeAddOp3(v, OP_Subtract, regStart, regEnd, regStart);
+ }
+
+ if( pMWin->eStart!=TK_UNBOUNDED ){
+ sqlite3VdbeAddOp2(v, OP_Rewind, s.start.csr, 1);
+ VdbeCoverageNeverTaken(v);
+ }
+ sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1);
+ VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp2(v, OP_Rewind, s.end.csr, 1);
+ VdbeCoverageNeverTaken(v);
+ if( regPeer && pOrderBy ){
+ sqlite3VdbeAddOp3(v, OP_Copy, regNewPeer, regPeer, pOrderBy->nExpr-1);
+ sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.start.reg, pOrderBy->nExpr-1);
+ sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.current.reg, pOrderBy->nExpr-1);
+ sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.end.reg, pOrderBy->nExpr-1);
+ }
+
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, lblWhereEnd);
+
+ sqlite3VdbeJumpHere(v, addrNe);
+
+ /* Beginning of the block executed for the second and subsequent rows. */
+ if( regPeer ){
+ windowIfNewPeer(pParse, pOrderBy, regNewPeer, regPeer, lblWhereEnd);
+ }
+ if( pMWin->eStart==TK_FOLLOWING ){
+ windowCodeOp(&s, WINDOW_AGGSTEP, 0, 0);
+ if( pMWin->eEnd!=TK_UNBOUNDED ){
+ if( pMWin->eFrmType==TK_RANGE ){
+ int lbl = sqlite3VdbeMakeLabel(pParse);
+ int addrNext = sqlite3VdbeCurrentAddr(v);
+ windowCodeRangeTest(&s, OP_Ge, s.current.csr, regEnd, s.end.csr, lbl);
+ windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 0);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrNext);
+ sqlite3VdbeResolveLabel(v, lbl);
+ }else{
+ windowCodeOp(&s, WINDOW_RETURN_ROW, regEnd, 0);
+ windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ }
+ }
+ }else
+ if( pMWin->eEnd==TK_PRECEDING ){
+ int bRPS = (pMWin->eStart==TK_PRECEDING && pMWin->eFrmType==TK_RANGE);
+ windowCodeOp(&s, WINDOW_AGGSTEP, regEnd, 0);
+ if( bRPS ) windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 0);
+ if( !bRPS ) windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ }else{
+ int addr = 0;
+ windowCodeOp(&s, WINDOW_AGGSTEP, 0, 0);
+ if( pMWin->eEnd!=TK_UNBOUNDED ){
+ if( pMWin->eFrmType==TK_RANGE ){
+ int lbl = 0;
+ addr = sqlite3VdbeCurrentAddr(v);
+ if( regEnd ){
+ lbl = sqlite3VdbeMakeLabel(pParse);
+ windowCodeRangeTest(&s, OP_Ge, s.current.csr, regEnd, s.end.csr, lbl);
+ }
+ windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 0);
+ windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ if( regEnd ){
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addr);
+ sqlite3VdbeResolveLabel(v, lbl);
+ }
+ }else{
+ if( regEnd ){
+ addr = sqlite3VdbeAddOp3(v, OP_IfPos, regEnd, 0, 1);
+ VdbeCoverage(v);
+ }
+ windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 0);
+ windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ if( regEnd ) sqlite3VdbeJumpHere(v, addr);
+ }
+ }
+ }
+
+ /* End of the main input loop */
+ sqlite3VdbeResolveLabel(v, lblWhereEnd);
+ sqlite3WhereEnd(pWInfo);
+
+ /* Fall through */
+ if( pMWin->pPartition ){
+ addrInteger = sqlite3VdbeAddOp2(v, OP_Integer, 0, regFlushPart);
+ sqlite3VdbeJumpHere(v, addrGosubFlush);
+ }
+
+ addrEmpty = sqlite3VdbeAddOp1(v, OP_Rewind, csrWrite);
+ VdbeCoverage(v);
+ if( pMWin->eEnd==TK_PRECEDING ){
+ int bRPS = (pMWin->eStart==TK_PRECEDING && pMWin->eFrmType==TK_RANGE);
+ windowCodeOp(&s, WINDOW_AGGSTEP, regEnd, 0);
+ if( bRPS ) windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 0);
+ }else if( pMWin->eStart==TK_FOLLOWING ){
+ int addrStart;
+ int addrBreak1;
+ int addrBreak2;
+ int addrBreak3;
+ windowCodeOp(&s, WINDOW_AGGSTEP, 0, 0);
+ if( pMWin->eFrmType==TK_RANGE ){
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ addrBreak2 = windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 1);
+ addrBreak1 = windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 1);
+ }else
+ if( pMWin->eEnd==TK_UNBOUNDED ){
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ addrBreak1 = windowCodeOp(&s, WINDOW_RETURN_ROW, regStart, 1);
+ addrBreak2 = windowCodeOp(&s, WINDOW_AGGINVERSE, 0, 1);
+ }else{
+ assert( pMWin->eEnd==TK_FOLLOWING );
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ addrBreak1 = windowCodeOp(&s, WINDOW_RETURN_ROW, regEnd, 1);
+ addrBreak2 = windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 1);
+ }
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrStart);
+ sqlite3VdbeJumpHere(v, addrBreak2);
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ addrBreak3 = windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 1);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrStart);
+ sqlite3VdbeJumpHere(v, addrBreak1);
+ sqlite3VdbeJumpHere(v, addrBreak3);
+ }else{
+ int addrBreak;
+ int addrStart;
+ windowCodeOp(&s, WINDOW_AGGSTEP, 0, 0);
+ addrStart = sqlite3VdbeCurrentAddr(v);
+ addrBreak = windowCodeOp(&s, WINDOW_RETURN_ROW, 0, 1);
+ windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 0);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrStart);
+ sqlite3VdbeJumpHere(v, addrBreak);
+ }
+ sqlite3VdbeJumpHere(v, addrEmpty);
+
+ sqlite3VdbeAddOp1(v, OP_ResetSorter, s.current.csr);
+ if( pMWin->pPartition ){
+ if( pMWin->regStartRowid ){
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pMWin->regStartRowid);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pMWin->regEndRowid);
+ }
+ sqlite3VdbeChangeP1(v, addrInteger, sqlite3VdbeCurrentAddr(v));
+ sqlite3VdbeAddOp1(v, OP_Return, regFlushPart);
+ }
+}
+
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/************** End of window.c **********************************************/
+/************** Begin file parse.c *******************************************/
+/*
+** 2000-05-29
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Driver template for the LEMON parser generator.
+**
+** The "lemon" program processes an LALR(1) input grammar file, then uses
+** this template to construct a parser. The "lemon" program inserts text
+** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
+** interstitial "-" characters) contained in this template is changed into
+** the value of the %name directive from the grammar. Otherwise, the content
+** of this template is copied straight through into the generate parser
+** source file.
+**
+** The following is the concatenation of all %include directives from the
+** input grammar file:
+*/
+/* #include <stdio.h> */
+/* #include <assert.h> */
+/************ Begin %include sections from the grammar ************************/
+
+/* #include "sqliteInt.h" */
+
+/*
+** Disable all error recovery processing in the parser push-down
+** automaton.
+*/
+#define YYNOERRORRECOVERY 1
+
+/*
+** Make yytestcase() the same as testcase()
+*/
+#define yytestcase(X) testcase(X)
+
+/*
+** Indicate that sqlite3ParserFree() will never be called with a null
+** pointer.
+*/
+#define YYPARSEFREENEVERNULL 1
+
+/*
+** In the amalgamation, the parse.c file generated by lemon and the
+** tokenize.c file are concatenated. In that case, sqlite3RunParser()
+** has access to the the size of the yyParser object and so the parser
+** engine can be allocated from stack. In that case, only the
+** sqlite3ParserInit() and sqlite3ParserFinalize() routines are invoked
+** and the sqlite3ParserAlloc() and sqlite3ParserFree() routines can be
+** omitted.
+*/
+#ifdef SQLITE_AMALGAMATION
+# define sqlite3Parser_ENGINEALWAYSONSTACK 1
+#endif
+
+/*
+** Alternative datatype for the argument to the malloc() routine passed
+** into sqlite3ParserAlloc(). The default is size_t.
+*/
+#define YYMALLOCARGTYPE u64
+
+/*
+** An instance of the following structure describes the event of a
+** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT,
+** TK_DELETE, or TK_INSTEAD. If the event is of the form
+**
+** UPDATE ON (a,b,c)
+**
+** Then the "b" IdList records the list "a,b,c".
+*/
+struct TrigEvent { int a; IdList * b; };
+
+struct FrameBound { int eType; Expr *pExpr; };
+
+/*
+** Disable lookaside memory allocation for objects that might be
+** shared across database connections.
+*/
+static void disableLookaside(Parse *pParse){
+ sqlite3 *db = pParse->db;
+ pParse->disableLookaside++;
+ DisableLookaside;
+}
+
+#if !defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) \
+ && defined(SQLITE_UDL_CAPABLE_PARSER)
+/*
+** Issue an error message if an ORDER BY or LIMIT clause occurs on an
+** UPDATE or DELETE statement.
+*/
+static void updateDeleteLimitError(
+ Parse *pParse,
+ ExprList *pOrderBy,
+ Expr *pLimit
+){
+ if( pOrderBy ){
+ sqlite3ErrorMsg(pParse, "syntax error near \"ORDER BY\"");
+ }else{
+ sqlite3ErrorMsg(pParse, "syntax error near \"LIMIT\"");
+ }
+ sqlite3ExprListDelete(pParse->db, pOrderBy);
+ sqlite3ExprDelete(pParse->db, pLimit);
+}
+#endif /* SQLITE_ENABLE_UPDATE_DELETE_LIMIT */
+
+
+ /*
+ ** For a compound SELECT statement, make sure p->pPrior->pNext==p for
+ ** all elements in the list. And make sure list length does not exceed
+ ** SQLITE_LIMIT_COMPOUND_SELECT.
+ */
+ static void parserDoubleLinkSelect(Parse *pParse, Select *p){
+ assert( p!=0 );
+ if( p->pPrior ){
+ Select *pNext = 0, *pLoop;
+ int mxSelect, cnt = 0;
+ for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){
+ pLoop->pNext = pNext;
+ pLoop->selFlags |= SF_Compound;
+ }
+ if( (p->selFlags & SF_MultiValue)==0 &&
+ (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 &&
+ cnt>mxSelect
+ ){
+ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT");
+ }
+ }
+ }
+
+
+ /* Construct a new Expr object from a single identifier. Use the
+ ** new Expr to populate pOut. Set the span of pOut to be the identifier
+ ** that created the expression.
+ */
+ static Expr *tokenExpr(Parse *pParse, int op, Token t){
+ Expr *p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)+t.n+1);
+ if( p ){
+ /* memset(p, 0, sizeof(Expr)); */
+ p->op = (u8)op;
+ p->affExpr = 0;
+ p->flags = EP_Leaf;
+ ExprClearVVAProperties(p);
+ p->iAgg = -1;
+ p->pLeft = p->pRight = 0;
+ p->x.pList = 0;
+ p->pAggInfo = 0;
+ p->y.pTab = 0;
+ p->op2 = 0;
+ p->iTable = 0;
+ p->iColumn = 0;
+ p->u.zToken = (char*)&p[1];
+ memcpy(p->u.zToken, t.z, t.n);
+ p->u.zToken[t.n] = 0;
+ if( sqlite3Isquote(p->u.zToken[0]) ){
+ sqlite3DequoteExpr(p);
+ }
+#if SQLITE_MAX_EXPR_DEPTH>0
+ p->nHeight = 1;
+#endif
+ if( IN_RENAME_OBJECT ){
+ return (Expr*)sqlite3RenameTokenMap(pParse, (void*)p, &t);
+ }
+ }
+ return p;
+ }
+
+
+ /* A routine to convert a binary TK_IS or TK_ISNOT expression into a
+ ** unary TK_ISNULL or TK_NOTNULL expression. */
+ static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){
+ sqlite3 *db = pParse->db;
+ if( pA && pY && pY->op==TK_NULL && !IN_RENAME_OBJECT ){
+ pA->op = (u8)op;
+ sqlite3ExprDelete(db, pA->pRight);
+ pA->pRight = 0;
+ }
+ }
+
+ /* Add a single new term to an ExprList that is used to store a
+ ** list of identifiers. Report an error if the ID list contains
+ ** a COLLATE clause or an ASC or DESC keyword, except ignore the
+ ** error while parsing a legacy schema.
+ */
+ static ExprList *parserAddExprIdListTerm(
+ Parse *pParse,
+ ExprList *pPrior,
+ Token *pIdToken,
+ int hasCollate,
+ int sortOrder
+ ){
+ ExprList *p = sqlite3ExprListAppend(pParse, pPrior, 0);
+ if( (hasCollate || sortOrder!=SQLITE_SO_UNDEFINED)
+ && pParse->db->init.busy==0
+ ){
+ sqlite3ErrorMsg(pParse, "syntax error after column name \"%.*s\"",
+ pIdToken->n, pIdToken->z);
+ }
+ sqlite3ExprListSetName(pParse, p, pIdToken, 1);
+ return p;
+ }
+
+#if TK_SPAN>255
+# error too many tokens in the grammar
+#endif
+/**************** End of %include directives **********************************/
+/* These constants specify the various numeric values for terminal symbols
+** in a format understandable to "makeheaders". This section is blank unless
+** "lemon" is run with the "-m" command-line option.
+***************** Begin makeheaders token definitions *************************/
+/**************** End makeheaders token definitions ***************************/
+
+/* The next sections is a series of control #defines.
+** various aspects of the generated parser.
+** YYCODETYPE is the data type used to store the integer codes
+** that represent terminal and non-terminal symbols.
+** "unsigned char" is used if there are fewer than
+** 256 symbols. Larger types otherwise.
+** YYNOCODE is a number of type YYCODETYPE that is not used for
+** any terminal or nonterminal symbol.
+** YYFALLBACK If defined, this indicates that one or more tokens
+** (also known as: "terminal symbols") have fall-back
+** values which should be used if the original symbol
+** would not parse. This permits keywords to sometimes
+** be used as identifiers, for example.
+** YYACTIONTYPE is the data type used for "action codes" - numbers
+** that indicate what to do in response to the next
+** token.
+** sqlite3ParserTOKENTYPE is the data type used for minor type for terminal
+** symbols. Background: A "minor type" is a semantic
+** value associated with a terminal or non-terminal
+** symbols. For example, for an "ID" terminal symbol,
+** the minor type might be the name of the identifier.
+** Each non-terminal can have a different minor type.
+** Terminal symbols all have the same minor type, though.
+** This macros defines the minor type for terminal
+** symbols.
+** YYMINORTYPE is the data type used for all minor types.
+** This is typically a union of many types, one of
+** which is sqlite3ParserTOKENTYPE. The entry in the union
+** for terminal symbols is called "yy0".
+** YYSTACKDEPTH is the maximum depth of the parser's stack. If
+** zero the stack is dynamically sized using realloc()
+** sqlite3ParserARG_SDECL A static variable declaration for the %extra_argument
+** sqlite3ParserARG_PDECL A parameter declaration for the %extra_argument
+** sqlite3ParserARG_PARAM Code to pass %extra_argument as a subroutine parameter
+** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser
+** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser
+** sqlite3ParserCTX_* As sqlite3ParserARG_ except for %extra_context
+** YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
+** YYNSTATE the combined number of states.
+** YYNRULE the number of rules in the grammar
+** YYNTOKEN Number of terminal symbols
+** YY_MAX_SHIFT Maximum value for shift actions
+** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
+** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
+** YY_ERROR_ACTION The yy_action[] code for syntax error
+** YY_ACCEPT_ACTION The yy_action[] code for accept
+** YY_NO_ACTION The yy_action[] code for no-op
+** YY_MIN_REDUCE Minimum value for reduce actions
+** YY_MAX_REDUCE Maximum value for reduce actions
+*/
+#ifndef INTERFACE
+# define INTERFACE 1
+#endif
+/************* Begin control #defines *****************************************/
+#define YYCODETYPE unsigned short int
+#define YYNOCODE 310
+#define YYACTIONTYPE unsigned short int
+#define YYWILDCARD 100
+#define sqlite3ParserTOKENTYPE Token
+typedef union {
+ int yyinit;
+ sqlite3ParserTOKENTYPE yy0;
+ SrcList* yy47;
+ u8 yy58;
+ struct FrameBound yy77;
+ With* yy131;
+ int yy192;
+ Expr* yy202;
+ struct {int value; int mask;} yy207;
+ struct TrigEvent yy230;
+ ExprList* yy242;
+ Window* yy303;
+ Upsert* yy318;
+ const char* yy436;
+ TriggerStep* yy447;
+ Select* yy539;
+ IdList* yy600;
+} YYMINORTYPE;
+#ifndef YYSTACKDEPTH
+#define YYSTACKDEPTH 100
+#endif
+#define sqlite3ParserARG_SDECL
+#define sqlite3ParserARG_PDECL
+#define sqlite3ParserARG_PARAM
+#define sqlite3ParserARG_FETCH
+#define sqlite3ParserARG_STORE
+#define sqlite3ParserCTX_SDECL Parse *pParse;
+#define sqlite3ParserCTX_PDECL ,Parse *pParse
+#define sqlite3ParserCTX_PARAM ,pParse
+#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse;
+#define sqlite3ParserCTX_STORE yypParser->pParse=pParse;
+#define YYFALLBACK 1
+#define YYNSTATE 557
+#define YYNRULE 385
+#define YYNRULE_WITH_ACTION 325
+#define YYNTOKEN 181
+#define YY_MAX_SHIFT 556
+#define YY_MIN_SHIFTREDUCE 807
+#define YY_MAX_SHIFTREDUCE 1191
+#define YY_ERROR_ACTION 1192
+#define YY_ACCEPT_ACTION 1193
+#define YY_NO_ACTION 1194
+#define YY_MIN_REDUCE 1195
+#define YY_MAX_REDUCE 1579
+/************* End control #defines *******************************************/
+#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
+
+/* Define the yytestcase() macro to be a no-op if is not already defined
+** otherwise.
+**
+** Applications can choose to define yytestcase() in the %include section
+** to a macro that can assist in verifying code coverage. For production
+** code the yytestcase() macro should be turned off. But it is useful
+** for testing.
+*/
+#ifndef yytestcase
+# define yytestcase(X)
+#endif
+
+
+/* Next are the tables used to determine what action to take based on the
+** current state and lookahead token. These tables are used to implement
+** functions that take a state number and lookahead value and return an
+** action integer.
+**
+** Suppose the action integer is N. Then the action is determined as
+** follows
+**
+** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
+** token onto the stack and goto state N.
+**
+** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
+** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
+**
+** N == YY_ERROR_ACTION A syntax error has occurred.
+**
+** N == YY_ACCEPT_ACTION The parser accepts its input.
+**
+** N == YY_NO_ACTION No such action. Denotes unused
+** slots in the yy_action[] table.
+**
+** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
+** and YY_MAX_REDUCE
+**
+** The action table is constructed as a single large table named yy_action[].
+** Given state S and lookahead X, the action is computed as either:
+**
+** (A) N = yy_action[ yy_shift_ofst[S] + X ]
+** (B) N = yy_default[S]
+**
+** The (A) formula is preferred. The B formula is used instead if
+** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X.
+**
+** The formulas above are for computing the action when the lookahead is
+** a terminal symbol. If the lookahead is a non-terminal (as occurs after
+** a reduce action) then the yy_reduce_ofst[] array is used in place of
+** the yy_shift_ofst[] array.
+**
+** The following are the tables generated in this section:
+**
+** yy_action[] A single table containing all actions.
+** yy_lookahead[] A table containing the lookahead for each entry in
+** yy_action. Used to detect hash collisions.
+** yy_shift_ofst[] For each state, the offset into yy_action for
+** shifting terminals.
+** yy_reduce_ofst[] For each state, the offset into yy_action for
+** shifting non-terminals after a reduce.
+** yy_default[] Default action for each state.
+**
+*********** Begin parsing tables **********************************************/
+#define YY_ACTTAB_COUNT (1974)
+static const YYACTIONTYPE yy_action[] = {
+ /* 0 */ 550, 1226, 550, 455, 1264, 550, 1243, 550, 114, 111,
+ /* 10 */ 211, 550, 1541, 550, 1264, 527, 114, 111, 211, 396,
+ /* 20 */ 1236, 348, 42, 42, 42, 42, 1229, 42, 42, 71,
+ /* 30 */ 71, 941, 1228, 71, 71, 71, 71, 1466, 1497, 942,
+ /* 40 */ 824, 457, 6, 121, 122, 112, 1169, 1169, 1010, 1013,
+ /* 50 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 1547, 396,
+ /* 60 */ 1362, 1521, 556, 2, 1197, 194, 532, 440, 143, 291,
+ /* 70 */ 532, 136, 532, 375, 261, 508, 272, 389, 1277, 531,
+ /* 80 */ 507, 497, 164, 121, 122, 112, 1169, 1169, 1010, 1013,
+ /* 90 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 1362, 446,
+ /* 100 */ 1518, 118, 118, 118, 118, 117, 117, 116, 116, 116,
+ /* 110 */ 115, 428, 266, 266, 266, 266, 1502, 362, 1504, 439,
+ /* 120 */ 361, 1502, 521, 528, 1489, 547, 1118, 547, 1118, 396,
+ /* 130 */ 409, 241, 208, 114, 111, 211, 98, 290, 541, 221,
+ /* 140 */ 1033, 118, 118, 118, 118, 117, 117, 116, 116, 116,
+ /* 150 */ 115, 428, 1146, 121, 122, 112, 1169, 1169, 1010, 1013,
+ /* 160 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 410, 432,
+ /* 170 */ 117, 117, 116, 116, 116, 115, 428, 1422, 472, 123,
+ /* 180 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115,
+ /* 190 */ 428, 116, 116, 116, 115, 428, 544, 544, 544, 396,
+ /* 200 */ 509, 120, 120, 120, 120, 113, 1055, 1146, 1147, 1148,
+ /* 210 */ 1055, 118, 118, 118, 118, 117, 117, 116, 116, 116,
+ /* 220 */ 115, 428, 1465, 121, 122, 112, 1169, 1169, 1010, 1013,
+ /* 230 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 396, 448,
+ /* 240 */ 320, 83, 467, 81, 363, 386, 1146, 80, 118, 118,
+ /* 250 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 179,
+ /* 260 */ 438, 428, 121, 122, 112, 1169, 1169, 1010, 1013, 1003,
+ /* 270 */ 1003, 119, 119, 120, 120, 120, 120, 438, 437, 266,
+ /* 280 */ 266, 118, 118, 118, 118, 117, 117, 116, 116, 116,
+ /* 290 */ 115, 428, 547, 1113, 907, 510, 1146, 114, 111, 211,
+ /* 300 */ 1435, 1146, 1147, 1148, 206, 495, 1113, 396, 453, 1113,
+ /* 310 */ 549, 334, 120, 120, 120, 120, 298, 1435, 1437, 17,
+ /* 320 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115,
+ /* 330 */ 428, 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003,
+ /* 340 */ 119, 119, 120, 120, 120, 120, 396, 1362, 438, 1146,
+ /* 350 */ 486, 1146, 1147, 1148, 1000, 1000, 1011, 1014, 449, 118,
+ /* 360 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 428,
+ /* 370 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119,
+ /* 380 */ 119, 120, 120, 120, 120, 1058, 1058, 469, 1435, 118,
+ /* 390 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 428,
+ /* 400 */ 1146, 455, 550, 1430, 1146, 1147, 1148, 233, 970, 1146,
+ /* 410 */ 485, 482, 481, 171, 364, 396, 164, 411, 418, 846,
+ /* 420 */ 480, 164, 185, 338, 71, 71, 1247, 1004, 118, 118,
+ /* 430 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 121,
+ /* 440 */ 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, 119,
+ /* 450 */ 120, 120, 120, 120, 396, 1146, 1147, 1148, 839, 12,
+ /* 460 */ 318, 513, 163, 360, 1146, 1147, 1148, 114, 111, 211,
+ /* 470 */ 512, 290, 541, 550, 276, 180, 290, 541, 121, 122,
+ /* 480 */ 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, 119, 120,
+ /* 490 */ 120, 120, 120, 349, 488, 71, 71, 118, 118, 118,
+ /* 500 */ 118, 117, 117, 116, 116, 116, 115, 428, 1146, 209,
+ /* 510 */ 415, 527, 1146, 1113, 1575, 382, 252, 269, 346, 491,
+ /* 520 */ 341, 490, 238, 396, 517, 368, 1113, 1131, 337, 1113,
+ /* 530 */ 191, 413, 286, 32, 461, 447, 118, 118, 118, 118,
+ /* 540 */ 117, 117, 116, 116, 116, 115, 428, 121, 122, 112,
+ /* 550 */ 1169, 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120,
+ /* 560 */ 120, 120, 396, 1146, 1147, 1148, 991, 1146, 1147, 1148,
+ /* 570 */ 1146, 233, 496, 1496, 485, 482, 481, 6, 163, 550,
+ /* 580 */ 516, 550, 115, 428, 480, 5, 121, 122, 112, 1169,
+ /* 590 */ 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120,
+ /* 600 */ 120, 13, 13, 13, 13, 118, 118, 118, 118, 117,
+ /* 610 */ 117, 116, 116, 116, 115, 428, 407, 506, 412, 550,
+ /* 620 */ 1490, 548, 1146, 896, 896, 1146, 1147, 1148, 1477, 1146,
+ /* 630 */ 275, 396, 812, 813, 814, 975, 426, 426, 426, 16,
+ /* 640 */ 16, 55, 55, 1246, 118, 118, 118, 118, 117, 117,
+ /* 650 */ 116, 116, 116, 115, 428, 121, 122, 112, 1169, 1169,
+ /* 660 */ 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120, 120,
+ /* 670 */ 396, 1193, 1, 1, 556, 2, 1197, 1146, 1147, 1148,
+ /* 680 */ 194, 291, 902, 136, 1146, 1147, 1148, 901, 525, 1496,
+ /* 690 */ 1277, 3, 384, 6, 121, 122, 112, 1169, 1169, 1010,
+ /* 700 */ 1013, 1003, 1003, 119, 119, 120, 120, 120, 120, 862,
+ /* 710 */ 550, 928, 550, 118, 118, 118, 118, 117, 117, 116,
+ /* 720 */ 116, 116, 115, 428, 266, 266, 1096, 1573, 1146, 555,
+ /* 730 */ 1573, 1197, 13, 13, 13, 13, 291, 547, 136, 396,
+ /* 740 */ 489, 425, 424, 970, 348, 1277, 472, 414, 863, 279,
+ /* 750 */ 140, 221, 118, 118, 118, 118, 117, 117, 116, 116,
+ /* 760 */ 116, 115, 428, 121, 122, 112, 1169, 1169, 1010, 1013,
+ /* 770 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 550, 266,
+ /* 780 */ 266, 432, 396, 1146, 1147, 1148, 1176, 834, 1176, 472,
+ /* 790 */ 435, 145, 547, 1150, 405, 318, 443, 304, 842, 1494,
+ /* 800 */ 71, 71, 416, 6, 1094, 477, 221, 100, 112, 1169,
+ /* 810 */ 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120,
+ /* 820 */ 120, 118, 118, 118, 118, 117, 117, 116, 116, 116,
+ /* 830 */ 115, 428, 237, 1429, 550, 455, 432, 287, 990, 550,
+ /* 840 */ 236, 235, 234, 834, 97, 533, 433, 1269, 1269, 1150,
+ /* 850 */ 498, 311, 434, 842, 981, 550, 71, 71, 980, 1245,
+ /* 860 */ 550, 51, 51, 300, 118, 118, 118, 118, 117, 117,
+ /* 870 */ 116, 116, 116, 115, 428, 194, 103, 70, 70, 266,
+ /* 880 */ 266, 550, 71, 71, 266, 266, 30, 395, 348, 980,
+ /* 890 */ 980, 982, 547, 532, 1113, 332, 396, 547, 499, 401,
+ /* 900 */ 1474, 195, 534, 13, 13, 1362, 240, 1113, 277, 280,
+ /* 910 */ 1113, 280, 308, 461, 310, 337, 396, 31, 188, 423,
+ /* 920 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119,
+ /* 930 */ 119, 120, 120, 120, 120, 142, 396, 369, 461, 990,
+ /* 940 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119,
+ /* 950 */ 119, 120, 120, 120, 120, 981, 327, 1146, 330, 980,
+ /* 960 */ 121, 110, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119,
+ /* 970 */ 119, 120, 120, 120, 120, 468, 381, 1189, 118, 118,
+ /* 980 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 1146,
+ /* 990 */ 980, 980, 982, 309, 9, 370, 244, 366, 118, 118,
+ /* 1000 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 317,
+ /* 1010 */ 550, 348, 1146, 1147, 1148, 299, 290, 541, 118, 118,
+ /* 1020 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 1267,
+ /* 1030 */ 1267, 1167, 13, 13, 278, 425, 424, 472, 396, 927,
+ /* 1040 */ 260, 260, 289, 1173, 1146, 1147, 1148, 189, 1175, 266,
+ /* 1050 */ 266, 472, 394, 547, 1190, 550, 1174, 263, 144, 493,
+ /* 1060 */ 926, 550, 547, 122, 112, 1169, 1169, 1010, 1013, 1003,
+ /* 1070 */ 1003, 119, 119, 120, 120, 120, 120, 71, 71, 1146,
+ /* 1080 */ 1176, 1276, 1176, 13, 13, 902, 1074, 1167, 550, 472,
+ /* 1090 */ 901, 107, 542, 1495, 4, 1272, 1113, 6, 529, 1053,
+ /* 1100 */ 12, 1075, 1096, 1574, 316, 459, 1574, 524, 545, 1113,
+ /* 1110 */ 56, 56, 1113, 1493, 427, 1362, 1076, 6, 349, 285,
+ /* 1120 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115,
+ /* 1130 */ 428, 429, 1275, 325, 1146, 1147, 1148, 882, 266, 266,
+ /* 1140 */ 1281, 107, 542, 539, 4, 1492, 293, 883, 1215, 6,
+ /* 1150 */ 210, 547, 547, 164, 294, 500, 420, 204, 545, 267,
+ /* 1160 */ 267, 1218, 402, 515, 503, 204, 266, 266, 400, 535,
+ /* 1170 */ 8, 990, 547, 523, 550, 926, 462, 105, 105, 547,
+ /* 1180 */ 1094, 429, 266, 266, 106, 421, 429, 552, 551, 266,
+ /* 1190 */ 266, 980, 522, 539, 1377, 547, 15, 15, 266, 266,
+ /* 1200 */ 460, 1124, 547, 266, 266, 1074, 1376, 519, 290, 541,
+ /* 1210 */ 550, 547, 518, 97, 448, 320, 547, 550, 926, 125,
+ /* 1220 */ 1075, 990, 980, 980, 982, 983, 27, 105, 105, 405,
+ /* 1230 */ 347, 1515, 44, 44, 106, 1076, 429, 552, 551, 57,
+ /* 1240 */ 57, 980, 347, 1515, 107, 542, 550, 4, 466, 405,
+ /* 1250 */ 214, 1124, 463, 297, 381, 1095, 538, 1313, 550, 543,
+ /* 1260 */ 402, 545, 290, 541, 104, 244, 102, 530, 58, 58,
+ /* 1270 */ 550, 199, 980, 980, 982, 983, 27, 1520, 1135, 431,
+ /* 1280 */ 59, 59, 270, 237, 429, 138, 95, 379, 379, 378,
+ /* 1290 */ 255, 376, 60, 60, 821, 1184, 539, 550, 273, 550,
+ /* 1300 */ 1167, 1312, 393, 392, 550, 442, 550, 215, 210, 296,
+ /* 1310 */ 519, 853, 550, 265, 208, 520, 1480, 295, 274, 61,
+ /* 1320 */ 61, 62, 62, 312, 990, 109, 45, 45, 46, 46,
+ /* 1330 */ 105, 105, 1190, 926, 47, 47, 345, 106, 550, 429,
+ /* 1340 */ 552, 551, 1546, 550, 980, 871, 344, 217, 550, 941,
+ /* 1350 */ 401, 107, 542, 218, 4, 156, 1167, 942, 158, 550,
+ /* 1360 */ 49, 49, 1166, 550, 268, 50, 50, 550, 545, 1454,
+ /* 1370 */ 63, 63, 550, 1453, 216, 980, 980, 982, 983, 27,
+ /* 1380 */ 450, 64, 64, 550, 464, 65, 65, 550, 322, 14,
+ /* 1390 */ 14, 429, 1309, 550, 66, 66, 1091, 550, 141, 383,
+ /* 1400 */ 38, 550, 967, 539, 326, 127, 127, 550, 397, 67,
+ /* 1410 */ 67, 550, 329, 290, 541, 52, 52, 519, 550, 68,
+ /* 1420 */ 68, 849, 518, 69, 69, 403, 165, 861, 860, 53,
+ /* 1430 */ 53, 990, 315, 151, 151, 97, 436, 105, 105, 331,
+ /* 1440 */ 152, 152, 530, 1052, 106, 1052, 429, 552, 551, 1135,
+ /* 1450 */ 431, 980, 1036, 270, 972, 239, 333, 243, 379, 379,
+ /* 1460 */ 378, 255, 376, 944, 945, 821, 1300, 550, 220, 550,
+ /* 1470 */ 107, 542, 550, 4, 550, 1260, 199, 849, 215, 1040,
+ /* 1480 */ 296, 1534, 980, 980, 982, 983, 27, 545, 295, 76,
+ /* 1490 */ 76, 54, 54, 984, 72, 72, 128, 128, 868, 869,
+ /* 1500 */ 107, 542, 550, 4, 1051, 550, 1051, 537, 473, 550,
+ /* 1510 */ 429, 550, 454, 1244, 550, 243, 550, 545, 217, 550,
+ /* 1520 */ 456, 197, 539, 243, 73, 73, 156, 129, 129, 158,
+ /* 1530 */ 340, 130, 130, 126, 126, 1040, 150, 150, 149, 149,
+ /* 1540 */ 429, 134, 134, 321, 478, 216, 97, 239, 335, 984,
+ /* 1550 */ 990, 97, 539, 350, 351, 550, 105, 105, 906, 935,
+ /* 1560 */ 550, 899, 243, 106, 109, 429, 552, 551, 550, 1509,
+ /* 1570 */ 980, 832, 99, 542, 139, 4, 550, 133, 133, 397,
+ /* 1580 */ 990, 1321, 131, 131, 290, 541, 105, 105, 1361, 545,
+ /* 1590 */ 132, 132, 1296, 106, 1307, 429, 552, 551, 75, 75,
+ /* 1600 */ 980, 980, 980, 982, 983, 27, 550, 436, 900, 1293,
+ /* 1610 */ 536, 109, 429, 1367, 550, 1225, 1217, 1206, 258, 550,
+ /* 1620 */ 353, 550, 1205, 11, 539, 1207, 1528, 355, 77, 77,
+ /* 1630 */ 380, 980, 980, 982, 983, 27, 74, 74, 357, 213,
+ /* 1640 */ 303, 43, 43, 48, 48, 441, 314, 201, 307, 1354,
+ /* 1650 */ 319, 359, 990, 458, 483, 1243, 343, 192, 105, 105,
+ /* 1660 */ 1426, 1425, 193, 540, 205, 106, 1531, 429, 552, 551,
+ /* 1670 */ 1184, 167, 980, 270, 247, 1473, 1471, 1181, 379, 379,
+ /* 1680 */ 378, 255, 376, 200, 373, 821, 404, 83, 79, 82,
+ /* 1690 */ 1431, 452, 177, 124, 530, 1346, 95, 301, 215, 302,
+ /* 1700 */ 296, 161, 169, 980, 980, 982, 983, 27, 295, 1343,
+ /* 1710 */ 305, 306, 444, 445, 1351, 172, 35, 173, 174, 175,
+ /* 1720 */ 476, 223, 387, 385, 36, 451, 465, 1357, 181, 388,
+ /* 1730 */ 88, 471, 227, 1420, 186, 474, 259, 1442, 217, 229,
+ /* 1740 */ 230, 324, 328, 390, 492, 231, 156, 1263, 1208, 158,
+ /* 1750 */ 417, 1254, 90, 853, 1262, 1261, 206, 419, 1514, 511,
+ /* 1760 */ 1304, 94, 1545, 352, 354, 216, 1305, 1303, 283, 1233,
+ /* 1770 */ 284, 391, 1232, 1544, 342, 1231, 1543, 356, 245, 1302,
+ /* 1780 */ 1253, 502, 505, 358, 246, 1500, 1499, 422, 10, 367,
+ /* 1790 */ 101, 1328, 1327, 514, 1406, 96, 253, 1214, 34, 397,
+ /* 1800 */ 553, 1141, 254, 365, 290, 541, 256, 257, 554, 1286,
+ /* 1810 */ 372, 196, 1285, 371, 1203, 1198, 153, 1458, 137, 1459,
+ /* 1820 */ 154, 1457, 281, 1456, 155, 808, 430, 436, 202, 398,
+ /* 1830 */ 203, 78, 288, 198, 292, 212, 271, 1050, 135, 1048,
+ /* 1840 */ 964, 157, 219, 168, 170, 885, 313, 1064, 222, 176,
+ /* 1850 */ 968, 159, 406, 84, 408, 178, 85, 86, 87, 160,
+ /* 1860 */ 1067, 224, 1063, 225, 146, 166, 399, 18, 226, 323,
+ /* 1870 */ 1056, 1178, 470, 243, 182, 228, 183, 37, 823, 475,
+ /* 1880 */ 344, 232, 479, 487, 184, 89, 19, 851, 336, 20,
+ /* 1890 */ 339, 484, 91, 282, 162, 147, 864, 92, 494, 93,
+ /* 1900 */ 1129, 148, 1016, 1099, 39, 501, 1100, 40, 504, 207,
+ /* 1910 */ 262, 264, 934, 187, 929, 109, 1119, 1115, 1117, 7,
+ /* 1920 */ 242, 1103, 33, 1123, 21, 526, 22, 23, 24, 1122,
+ /* 1930 */ 25, 190, 97, 26, 1031, 1017, 1015, 1019, 1073, 1020,
+ /* 1940 */ 1072, 249, 248, 28, 41, 895, 985, 833, 108, 29,
+ /* 1950 */ 377, 546, 250, 374, 1137, 1136, 1194, 1194, 251, 1194,
+ /* 1960 */ 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194,
+ /* 1970 */ 1194, 1194, 1536, 1535,
+};
+static const YYCODETYPE yy_lookahead[] = {
+ /* 0 */ 189, 211, 189, 189, 218, 189, 220, 189, 267, 268,
+ /* 10 */ 269, 189, 210, 189, 228, 189, 267, 268, 269, 19,
+ /* 20 */ 218, 189, 211, 212, 211, 212, 211, 211, 212, 211,
+ /* 30 */ 212, 31, 211, 211, 212, 211, 212, 288, 300, 39,
+ /* 40 */ 21, 189, 304, 43, 44, 45, 46, 47, 48, 49,
+ /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 225, 19,
+ /* 60 */ 189, 183, 184, 185, 186, 189, 248, 263, 236, 191,
+ /* 70 */ 248, 193, 248, 197, 208, 257, 262, 201, 200, 257,
+ /* 80 */ 200, 257, 81, 43, 44, 45, 46, 47, 48, 49,
+ /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 80,
+ /* 100 */ 189, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 110 */ 110, 111, 234, 235, 234, 235, 305, 306, 305, 118,
+ /* 120 */ 307, 305, 306, 297, 298, 247, 86, 247, 88, 19,
+ /* 130 */ 259, 251, 252, 267, 268, 269, 26, 136, 137, 261,
+ /* 140 */ 121, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 150 */ 110, 111, 59, 43, 44, 45, 46, 47, 48, 49,
+ /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 259, 291,
+ /* 170 */ 105, 106, 107, 108, 109, 110, 111, 158, 189, 69,
+ /* 180 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ /* 190 */ 111, 107, 108, 109, 110, 111, 205, 206, 207, 19,
+ /* 200 */ 19, 54, 55, 56, 57, 58, 29, 114, 115, 116,
+ /* 210 */ 33, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 220 */ 110, 111, 233, 43, 44, 45, 46, 47, 48, 49,
+ /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 126,
+ /* 240 */ 127, 148, 65, 24, 214, 200, 59, 67, 101, 102,
+ /* 250 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 22,
+ /* 260 */ 189, 111, 43, 44, 45, 46, 47, 48, 49, 50,
+ /* 270 */ 51, 52, 53, 54, 55, 56, 57, 206, 207, 234,
+ /* 280 */ 235, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 290 */ 110, 111, 247, 76, 107, 114, 59, 267, 268, 269,
+ /* 300 */ 189, 114, 115, 116, 162, 163, 89, 19, 263, 92,
+ /* 310 */ 189, 23, 54, 55, 56, 57, 189, 206, 207, 22,
+ /* 320 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ /* 330 */ 111, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ /* 340 */ 52, 53, 54, 55, 56, 57, 19, 189, 277, 59,
+ /* 350 */ 23, 114, 115, 116, 46, 47, 48, 49, 61, 101,
+ /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ /* 370 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 380 */ 53, 54, 55, 56, 57, 125, 126, 127, 277, 101,
+ /* 390 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ /* 400 */ 59, 189, 189, 276, 114, 115, 116, 117, 73, 59,
+ /* 410 */ 120, 121, 122, 72, 214, 19, 81, 259, 19, 23,
+ /* 420 */ 130, 81, 72, 24, 211, 212, 221, 119, 101, 102,
+ /* 430 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 43,
+ /* 440 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ /* 450 */ 54, 55, 56, 57, 19, 114, 115, 116, 23, 208,
+ /* 460 */ 125, 248, 189, 189, 114, 115, 116, 267, 268, 269,
+ /* 470 */ 189, 136, 137, 189, 262, 22, 136, 137, 43, 44,
+ /* 480 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ /* 490 */ 55, 56, 57, 189, 95, 211, 212, 101, 102, 103,
+ /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 59, 189,
+ /* 510 */ 111, 189, 59, 76, 294, 295, 117, 118, 119, 120,
+ /* 520 */ 121, 122, 123, 19, 87, 189, 89, 23, 129, 92,
+ /* 530 */ 279, 227, 248, 22, 189, 284, 101, 102, 103, 104,
+ /* 540 */ 105, 106, 107, 108, 109, 110, 111, 43, 44, 45,
+ /* 550 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ /* 560 */ 56, 57, 19, 114, 115, 116, 23, 114, 115, 116,
+ /* 570 */ 59, 117, 299, 300, 120, 121, 122, 304, 189, 189,
+ /* 580 */ 143, 189, 110, 111, 130, 22, 43, 44, 45, 46,
+ /* 590 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ /* 600 */ 57, 211, 212, 211, 212, 101, 102, 103, 104, 105,
+ /* 610 */ 106, 107, 108, 109, 110, 111, 226, 189, 226, 189,
+ /* 620 */ 298, 132, 59, 134, 135, 114, 115, 116, 189, 59,
+ /* 630 */ 285, 19, 7, 8, 9, 23, 205, 206, 207, 211,
+ /* 640 */ 212, 211, 212, 221, 101, 102, 103, 104, 105, 106,
+ /* 650 */ 107, 108, 109, 110, 111, 43, 44, 45, 46, 47,
+ /* 660 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ /* 670 */ 19, 181, 182, 183, 184, 185, 186, 114, 115, 116,
+ /* 680 */ 189, 191, 133, 193, 114, 115, 116, 138, 299, 300,
+ /* 690 */ 200, 22, 201, 304, 43, 44, 45, 46, 47, 48,
+ /* 700 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 35,
+ /* 710 */ 189, 141, 189, 101, 102, 103, 104, 105, 106, 107,
+ /* 720 */ 108, 109, 110, 111, 234, 235, 22, 23, 59, 184,
+ /* 730 */ 26, 186, 211, 212, 211, 212, 191, 247, 193, 19,
+ /* 740 */ 66, 105, 106, 73, 189, 200, 189, 226, 74, 226,
+ /* 750 */ 22, 261, 101, 102, 103, 104, 105, 106, 107, 108,
+ /* 760 */ 109, 110, 111, 43, 44, 45, 46, 47, 48, 49,
+ /* 770 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 234,
+ /* 780 */ 235, 291, 19, 114, 115, 116, 150, 59, 152, 189,
+ /* 790 */ 233, 236, 247, 59, 189, 125, 126, 127, 59, 300,
+ /* 800 */ 211, 212, 128, 304, 100, 19, 261, 156, 45, 46,
+ /* 810 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ /* 820 */ 57, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 830 */ 110, 111, 46, 233, 189, 189, 291, 248, 99, 189,
+ /* 840 */ 125, 126, 127, 115, 26, 200, 289, 230, 231, 115,
+ /* 850 */ 200, 16, 189, 114, 115, 189, 211, 212, 119, 221,
+ /* 860 */ 189, 211, 212, 258, 101, 102, 103, 104, 105, 106,
+ /* 870 */ 107, 108, 109, 110, 111, 189, 156, 211, 212, 234,
+ /* 880 */ 235, 189, 211, 212, 234, 235, 22, 201, 189, 150,
+ /* 890 */ 151, 152, 247, 248, 76, 16, 19, 247, 248, 113,
+ /* 900 */ 189, 24, 257, 211, 212, 189, 26, 89, 262, 223,
+ /* 910 */ 92, 225, 77, 189, 79, 129, 19, 53, 226, 248,
+ /* 920 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 930 */ 53, 54, 55, 56, 57, 236, 19, 271, 189, 99,
+ /* 940 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 950 */ 53, 54, 55, 56, 57, 115, 77, 59, 79, 119,
+ /* 960 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 970 */ 53, 54, 55, 56, 57, 259, 22, 23, 101, 102,
+ /* 980 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 59,
+ /* 990 */ 150, 151, 152, 158, 22, 244, 24, 246, 101, 102,
+ /* 1000 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 285,
+ /* 1010 */ 189, 189, 114, 115, 116, 200, 136, 137, 101, 102,
+ /* 1020 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 230,
+ /* 1030 */ 231, 59, 211, 212, 285, 105, 106, 189, 19, 141,
+ /* 1040 */ 234, 235, 239, 113, 114, 115, 116, 226, 118, 234,
+ /* 1050 */ 235, 189, 249, 247, 100, 189, 126, 23, 236, 107,
+ /* 1060 */ 26, 189, 247, 44, 45, 46, 47, 48, 49, 50,
+ /* 1070 */ 51, 52, 53, 54, 55, 56, 57, 211, 212, 59,
+ /* 1080 */ 150, 233, 152, 211, 212, 133, 12, 115, 189, 189,
+ /* 1090 */ 138, 19, 20, 300, 22, 233, 76, 304, 226, 11,
+ /* 1100 */ 208, 27, 22, 23, 200, 19, 26, 87, 36, 89,
+ /* 1110 */ 211, 212, 92, 300, 248, 189, 42, 304, 189, 250,
+ /* 1120 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ /* 1130 */ 111, 59, 200, 233, 114, 115, 116, 63, 234, 235,
+ /* 1140 */ 235, 19, 20, 71, 22, 300, 189, 73, 200, 304,
+ /* 1150 */ 116, 247, 247, 81, 189, 200, 227, 26, 36, 234,
+ /* 1160 */ 235, 203, 204, 143, 200, 26, 234, 235, 194, 200,
+ /* 1170 */ 48, 99, 247, 66, 189, 141, 284, 105, 106, 247,
+ /* 1180 */ 100, 59, 234, 235, 112, 259, 114, 115, 116, 234,
+ /* 1190 */ 235, 119, 85, 71, 266, 247, 211, 212, 234, 235,
+ /* 1200 */ 114, 94, 247, 234, 235, 12, 266, 85, 136, 137,
+ /* 1210 */ 189, 247, 90, 26, 126, 127, 247, 189, 26, 22,
+ /* 1220 */ 27, 99, 150, 151, 152, 153, 154, 105, 106, 189,
+ /* 1230 */ 302, 303, 211, 212, 112, 42, 114, 115, 116, 211,
+ /* 1240 */ 212, 119, 302, 303, 19, 20, 189, 22, 274, 189,
+ /* 1250 */ 15, 144, 278, 189, 22, 23, 63, 189, 189, 203,
+ /* 1260 */ 204, 36, 136, 137, 155, 24, 157, 143, 211, 212,
+ /* 1270 */ 189, 140, 150, 151, 152, 153, 154, 0, 1, 2,
+ /* 1280 */ 211, 212, 5, 46, 59, 161, 147, 10, 11, 12,
+ /* 1290 */ 13, 14, 211, 212, 17, 60, 71, 189, 258, 189,
+ /* 1300 */ 59, 189, 105, 106, 189, 189, 189, 30, 116, 32,
+ /* 1310 */ 85, 124, 189, 251, 252, 90, 189, 40, 258, 211,
+ /* 1320 */ 212, 211, 212, 189, 99, 26, 211, 212, 211, 212,
+ /* 1330 */ 105, 106, 100, 141, 211, 212, 119, 112, 189, 114,
+ /* 1340 */ 115, 116, 23, 189, 119, 26, 129, 70, 189, 31,
+ /* 1350 */ 113, 19, 20, 24, 22, 78, 115, 39, 81, 189,
+ /* 1360 */ 211, 212, 26, 189, 22, 211, 212, 189, 36, 189,
+ /* 1370 */ 211, 212, 189, 189, 97, 150, 151, 152, 153, 154,
+ /* 1380 */ 127, 211, 212, 189, 189, 211, 212, 189, 189, 211,
+ /* 1390 */ 212, 59, 189, 189, 211, 212, 23, 189, 22, 26,
+ /* 1400 */ 24, 189, 149, 71, 189, 211, 212, 189, 131, 211,
+ /* 1410 */ 212, 189, 189, 136, 137, 211, 212, 85, 189, 211,
+ /* 1420 */ 212, 59, 90, 211, 212, 292, 293, 118, 119, 211,
+ /* 1430 */ 212, 99, 23, 211, 212, 26, 159, 105, 106, 189,
+ /* 1440 */ 211, 212, 143, 150, 112, 152, 114, 115, 116, 1,
+ /* 1450 */ 2, 119, 23, 5, 23, 26, 189, 26, 10, 11,
+ /* 1460 */ 12, 13, 14, 83, 84, 17, 253, 189, 139, 189,
+ /* 1470 */ 19, 20, 189, 22, 189, 189, 140, 115, 30, 59,
+ /* 1480 */ 32, 139, 150, 151, 152, 153, 154, 36, 40, 211,
+ /* 1490 */ 212, 211, 212, 59, 211, 212, 211, 212, 7, 8,
+ /* 1500 */ 19, 20, 189, 22, 150, 189, 152, 231, 281, 189,
+ /* 1510 */ 59, 189, 23, 189, 189, 26, 189, 36, 70, 189,
+ /* 1520 */ 23, 237, 71, 26, 211, 212, 78, 211, 212, 81,
+ /* 1530 */ 189, 211, 212, 211, 212, 115, 211, 212, 211, 212,
+ /* 1540 */ 59, 211, 212, 23, 23, 97, 26, 26, 23, 115,
+ /* 1550 */ 99, 26, 71, 189, 189, 189, 105, 106, 107, 23,
+ /* 1560 */ 189, 23, 26, 112, 26, 114, 115, 116, 189, 309,
+ /* 1570 */ 119, 23, 19, 20, 26, 22, 189, 211, 212, 131,
+ /* 1580 */ 99, 189, 211, 212, 136, 137, 105, 106, 189, 36,
+ /* 1590 */ 211, 212, 189, 112, 189, 114, 115, 116, 211, 212,
+ /* 1600 */ 119, 150, 151, 152, 153, 154, 189, 159, 23, 250,
+ /* 1610 */ 189, 26, 59, 189, 189, 189, 189, 189, 280, 189,
+ /* 1620 */ 250, 189, 189, 238, 71, 189, 189, 250, 211, 212,
+ /* 1630 */ 187, 150, 151, 152, 153, 154, 211, 212, 250, 290,
+ /* 1640 */ 240, 211, 212, 211, 212, 254, 286, 209, 254, 241,
+ /* 1650 */ 240, 254, 99, 286, 215, 220, 214, 244, 105, 106,
+ /* 1660 */ 214, 214, 244, 273, 224, 112, 192, 114, 115, 116,
+ /* 1670 */ 60, 290, 119, 5, 139, 196, 196, 38, 10, 11,
+ /* 1680 */ 12, 13, 14, 238, 240, 17, 196, 148, 287, 287,
+ /* 1690 */ 276, 113, 22, 146, 143, 245, 147, 244, 30, 241,
+ /* 1700 */ 32, 43, 229, 150, 151, 152, 153, 154, 40, 245,
+ /* 1710 */ 244, 241, 18, 196, 265, 232, 264, 232, 232, 232,
+ /* 1720 */ 18, 195, 265, 241, 264, 241, 196, 229, 229, 241,
+ /* 1730 */ 155, 62, 195, 241, 22, 216, 196, 283, 70, 195,
+ /* 1740 */ 195, 282, 196, 216, 113, 195, 78, 213, 196, 81,
+ /* 1750 */ 64, 222, 22, 124, 213, 213, 162, 111, 303, 142,
+ /* 1760 */ 256, 113, 219, 255, 255, 97, 256, 256, 275, 213,
+ /* 1770 */ 275, 216, 215, 219, 213, 213, 213, 255, 196, 256,
+ /* 1780 */ 222, 216, 216, 255, 91, 308, 308, 82, 22, 196,
+ /* 1790 */ 155, 260, 260, 144, 270, 145, 25, 199, 26, 131,
+ /* 1800 */ 198, 13, 190, 244, 136, 137, 190, 6, 188, 245,
+ /* 1810 */ 241, 243, 245, 242, 188, 188, 202, 208, 217, 208,
+ /* 1820 */ 202, 208, 217, 208, 202, 4, 3, 159, 209, 296,
+ /* 1830 */ 209, 208, 272, 22, 160, 15, 98, 23, 16, 23,
+ /* 1840 */ 137, 128, 24, 148, 140, 20, 16, 1, 142, 140,
+ /* 1850 */ 149, 128, 61, 53, 37, 148, 53, 53, 53, 128,
+ /* 1860 */ 114, 34, 1, 139, 5, 293, 296, 22, 113, 158,
+ /* 1870 */ 68, 75, 41, 26, 68, 139, 113, 24, 20, 19,
+ /* 1880 */ 129, 123, 67, 96, 22, 22, 22, 59, 23, 22,
+ /* 1890 */ 24, 67, 22, 67, 37, 23, 28, 147, 22, 26,
+ /* 1900 */ 23, 23, 23, 23, 22, 24, 23, 22, 24, 139,
+ /* 1910 */ 23, 23, 114, 22, 141, 26, 75, 88, 86, 44,
+ /* 1920 */ 34, 23, 22, 75, 34, 24, 34, 34, 34, 93,
+ /* 1930 */ 34, 26, 26, 34, 23, 23, 23, 23, 23, 11,
+ /* 1940 */ 23, 22, 26, 22, 22, 133, 23, 23, 22, 22,
+ /* 1950 */ 15, 26, 139, 23, 1, 1, 310, 310, 139, 310,
+ /* 1960 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 1970 */ 310, 310, 139, 139, 310, 310, 310, 310, 310, 310,
+ /* 1980 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 1990 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2000 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2010 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2020 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2030 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2040 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2050 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2060 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2070 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2080 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2090 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2100 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2110 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2120 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2130 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2140 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310,
+ /* 2150 */ 310, 310, 310, 310, 310,
+};
+#define YY_SHIFT_COUNT (556)
+#define YY_SHIFT_MIN (0)
+#define YY_SHIFT_MAX (1954)
+static const unsigned short int yy_shift_ofst[] = {
+ /* 0 */ 1448, 1277, 1668, 1072, 1072, 340, 1122, 1225, 1332, 1481,
+ /* 10 */ 1481, 1481, 335, 0, 0, 180, 897, 1481, 1481, 1481,
+ /* 20 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 30 */ 930, 930, 1020, 1020, 290, 1, 340, 340, 340, 340,
+ /* 40 */ 340, 340, 40, 110, 219, 288, 327, 396, 435, 504,
+ /* 50 */ 543, 612, 651, 720, 877, 897, 897, 897, 897, 897,
+ /* 60 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897,
+ /* 70 */ 897, 897, 897, 917, 897, 1019, 763, 763, 1451, 1481,
+ /* 80 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 90 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 100 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 110 */ 1481, 1481, 1553, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 120 */ 1481, 1481, 1481, 1481, 1481, 1481, 147, 258, 258, 258,
+ /* 130 */ 258, 258, 79, 65, 84, 449, 19, 786, 449, 636,
+ /* 140 */ 636, 449, 880, 880, 880, 880, 113, 142, 142, 472,
+ /* 150 */ 150, 1974, 1974, 399, 399, 399, 93, 237, 341, 237,
+ /* 160 */ 237, 1074, 1074, 437, 350, 704, 1080, 449, 449, 449,
+ /* 170 */ 449, 449, 449, 449, 449, 449, 449, 449, 449, 449,
+ /* 180 */ 449, 449, 449, 449, 449, 449, 449, 449, 818, 818,
+ /* 190 */ 449, 1088, 217, 217, 734, 734, 1124, 1126, 1974, 1974,
+ /* 200 */ 1974, 739, 840, 840, 453, 454, 511, 187, 563, 570,
+ /* 210 */ 898, 669, 449, 449, 449, 449, 449, 449, 449, 449,
+ /* 220 */ 449, 670, 449, 449, 449, 449, 449, 449, 449, 449,
+ /* 230 */ 449, 449, 449, 449, 674, 674, 674, 449, 449, 449,
+ /* 240 */ 449, 1034, 449, 449, 449, 972, 1107, 449, 449, 1193,
+ /* 250 */ 449, 449, 449, 449, 449, 449, 449, 449, 260, 177,
+ /* 260 */ 489, 1241, 1241, 1241, 1241, 1192, 489, 489, 952, 1197,
+ /* 270 */ 625, 1235, 1131, 181, 181, 1086, 1139, 1131, 1086, 1187,
+ /* 280 */ 1319, 1237, 1318, 1318, 1318, 181, 1299, 1299, 1109, 1336,
+ /* 290 */ 549, 1376, 1610, 1535, 1535, 1639, 1639, 1535, 1539, 1578,
+ /* 300 */ 1670, 1547, 1551, 1549, 1658, 1547, 1551, 1549, 1694, 1694,
+ /* 310 */ 1694, 1694, 1535, 1702, 1549, 1549, 1578, 1670, 1658, 1549,
+ /* 320 */ 1658, 1549, 1535, 1702, 1575, 1669, 1535, 1702, 1712, 1535,
+ /* 330 */ 1702, 1535, 1702, 1712, 1631, 1631, 1631, 1686, 1730, 1730,
+ /* 340 */ 1712, 1631, 1629, 1631, 1686, 1631, 1631, 1594, 1712, 1646,
+ /* 350 */ 1646, 1712, 1617, 1648, 1617, 1648, 1617, 1648, 1617, 1648,
+ /* 360 */ 1535, 1693, 1693, 1705, 1705, 1547, 1551, 1766, 1535, 1635,
+ /* 370 */ 1547, 1650, 1649, 1549, 1771, 1772, 1788, 1788, 1801, 1801,
+ /* 380 */ 1801, 1974, 1974, 1974, 1974, 1974, 1974, 1974, 1974, 1974,
+ /* 390 */ 1974, 1974, 1974, 1974, 1974, 1974, 308, 835, 954, 1232,
+ /* 400 */ 879, 715, 728, 1373, 864, 1329, 1253, 1409, 297, 1431,
+ /* 410 */ 1489, 1497, 1520, 1521, 1525, 1362, 1309, 1491, 1217, 1420,
+ /* 420 */ 1429, 1536, 1380, 1538, 1293, 1354, 1548, 1585, 1434, 1342,
+ /* 430 */ 1821, 1823, 1811, 1674, 1820, 1738, 1822, 1814, 1816, 1703,
+ /* 440 */ 1695, 1713, 1818, 1704, 1825, 1706, 1830, 1846, 1709, 1701,
+ /* 450 */ 1723, 1791, 1817, 1707, 1800, 1803, 1804, 1805, 1731, 1746,
+ /* 460 */ 1827, 1724, 1861, 1859, 1845, 1755, 1711, 1802, 1847, 1806,
+ /* 470 */ 1796, 1831, 1736, 1763, 1853, 1858, 1860, 1751, 1758, 1862,
+ /* 480 */ 1815, 1863, 1864, 1865, 1867, 1824, 1828, 1866, 1787, 1868,
+ /* 490 */ 1870, 1826, 1857, 1872, 1750, 1876, 1877, 1878, 1879, 1873,
+ /* 500 */ 1880, 1882, 1881, 1883, 1885, 1884, 1770, 1887, 1888, 1798,
+ /* 510 */ 1886, 1891, 1773, 1889, 1890, 1892, 1893, 1894, 1829, 1841,
+ /* 520 */ 1832, 1875, 1848, 1836, 1896, 1898, 1900, 1901, 1905, 1906,
+ /* 530 */ 1899, 1911, 1889, 1912, 1913, 1914, 1915, 1916, 1917, 1919,
+ /* 540 */ 1928, 1921, 1922, 1923, 1924, 1926, 1927, 1925, 1812, 1813,
+ /* 550 */ 1819, 1833, 1834, 1930, 1935, 1953, 1954,
+};
+#define YY_REDUCE_COUNT (395)
+#define YY_REDUCE_MIN (-262)
+#define YY_REDUCE_MAX (1627)
+static const short yy_reduce_ofst[] = {
+ /* 0 */ 490, -122, 545, 645, 650, -120, -189, -187, -184, -182,
+ /* 10 */ -178, -176, 45, 30, 200, -251, -134, 390, 392, 521,
+ /* 20 */ 523, 213, 692, 821, 284, 589, 872, 666, 671, 866,
+ /* 30 */ 71, 111, 273, 389, 686, 815, 904, 932, 948, 955,
+ /* 40 */ 964, 969, -259, -259, -259, -259, -259, -259, -259, -259,
+ /* 50 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259,
+ /* 60 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259,
+ /* 70 */ -259, -259, -259, -259, -259, -259, -259, -259, 428, 430,
+ /* 80 */ 899, 985, 1021, 1028, 1057, 1069, 1081, 1108, 1110, 1115,
+ /* 90 */ 1117, 1123, 1149, 1154, 1159, 1170, 1174, 1178, 1183, 1194,
+ /* 100 */ 1198, 1204, 1208, 1212, 1218, 1222, 1229, 1278, 1280, 1283,
+ /* 110 */ 1285, 1313, 1316, 1320, 1322, 1325, 1327, 1330, 1366, 1371,
+ /* 120 */ 1379, 1387, 1417, 1425, 1430, 1432, -259, -259, -259, -259,
+ /* 130 */ -259, -259, -259, -259, -259, 557, 974, -214, -174, -9,
+ /* 140 */ 431, -124, 806, 925, 806, 925, 251, 928, 940, -259,
+ /* 150 */ -259, -259, -259, -198, -198, -198, 127, -186, -168, 212,
+ /* 160 */ 646, 617, 799, -262, 555, 220, 220, 491, 605, 1040,
+ /* 170 */ 1060, 699, -11, 600, 848, 862, 345, -129, 724, -91,
+ /* 180 */ 158, 749, 716, 900, 304, 822, 929, 926, 499, 793,
+ /* 190 */ 322, 892, 813, 845, 958, 1056, 751, 905, 1133, 1062,
+ /* 200 */ 803, -210, -185, -179, -148, -167, -89, 121, 274, 281,
+ /* 210 */ 320, 336, 439, 663, 711, 957, 965, 1064, 1068, 1112,
+ /* 220 */ 1116, -196, 1127, 1134, 1180, 1184, 1195, 1199, 1203, 1215,
+ /* 230 */ 1223, 1250, 1267, 1286, 205, 422, 638, 1324, 1341, 1364,
+ /* 240 */ 1365, 1213, 1392, 1399, 1403, 869, 1260, 1405, 1421, 1276,
+ /* 250 */ 1424, 121, 1426, 1427, 1428, 1433, 1436, 1437, 1227, 1338,
+ /* 260 */ 1284, 1359, 1370, 1377, 1388, 1213, 1284, 1284, 1385, 1438,
+ /* 270 */ 1443, 1349, 1400, 1391, 1394, 1360, 1408, 1410, 1367, 1439,
+ /* 280 */ 1440, 1435, 1442, 1446, 1447, 1397, 1413, 1418, 1390, 1444,
+ /* 290 */ 1445, 1474, 1381, 1479, 1480, 1401, 1402, 1490, 1414, 1449,
+ /* 300 */ 1452, 1450, 1453, 1458, 1473, 1464, 1466, 1470, 1483, 1485,
+ /* 310 */ 1486, 1487, 1517, 1526, 1482, 1484, 1457, 1460, 1498, 1488,
+ /* 320 */ 1499, 1492, 1530, 1537, 1454, 1459, 1540, 1544, 1519, 1546,
+ /* 330 */ 1545, 1552, 1550, 1527, 1534, 1541, 1542, 1529, 1543, 1554,
+ /* 340 */ 1555, 1556, 1557, 1561, 1558, 1562, 1563, 1455, 1565, 1493,
+ /* 350 */ 1495, 1566, 1504, 1508, 1510, 1509, 1511, 1522, 1523, 1528,
+ /* 360 */ 1582, 1477, 1478, 1531, 1532, 1564, 1559, 1524, 1593, 1560,
+ /* 370 */ 1567, 1568, 1571, 1569, 1598, 1602, 1612, 1616, 1620, 1626,
+ /* 380 */ 1627, 1533, 1570, 1572, 1614, 1609, 1611, 1613, 1615, 1618,
+ /* 390 */ 1601, 1605, 1619, 1621, 1623, 1622,
+};
+static const YYACTIONTYPE yy_default[] = {
+ /* 0 */ 1579, 1579, 1579, 1415, 1192, 1301, 1192, 1192, 1192, 1415,
+ /* 10 */ 1415, 1415, 1192, 1331, 1331, 1468, 1223, 1192, 1192, 1192,
+ /* 20 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1414, 1192, 1192,
+ /* 30 */ 1192, 1192, 1498, 1498, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 40 */ 1192, 1192, 1192, 1340, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 50 */ 1416, 1417, 1192, 1192, 1192, 1467, 1469, 1432, 1350, 1349,
+ /* 60 */ 1348, 1347, 1450, 1318, 1345, 1338, 1342, 1410, 1411, 1409,
+ /* 70 */ 1413, 1417, 1416, 1192, 1341, 1381, 1395, 1380, 1192, 1192,
+ /* 80 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 90 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 100 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 110 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 120 */ 1192, 1192, 1192, 1192, 1192, 1192, 1389, 1394, 1400, 1393,
+ /* 130 */ 1390, 1383, 1382, 1384, 1385, 1192, 1213, 1265, 1192, 1192,
+ /* 140 */ 1192, 1192, 1486, 1485, 1192, 1192, 1223, 1375, 1374, 1386,
+ /* 150 */ 1387, 1397, 1396, 1475, 1533, 1532, 1433, 1192, 1192, 1192,
+ /* 160 */ 1192, 1192, 1192, 1498, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 170 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 180 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1498, 1498,
+ /* 190 */ 1192, 1223, 1498, 1498, 1219, 1219, 1325, 1192, 1481, 1301,
+ /* 200 */ 1292, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 210 */ 1192, 1192, 1192, 1192, 1192, 1472, 1470, 1192, 1192, 1192,
+ /* 220 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 230 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 240 */ 1192, 1192, 1192, 1192, 1192, 1297, 1192, 1192, 1192, 1192,
+ /* 250 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1527, 1192, 1445,
+ /* 260 */ 1279, 1297, 1297, 1297, 1297, 1299, 1280, 1278, 1291, 1224,
+ /* 270 */ 1199, 1571, 1298, 1320, 1320, 1568, 1344, 1298, 1568, 1240,
+ /* 280 */ 1549, 1235, 1331, 1331, 1331, 1320, 1325, 1325, 1412, 1298,
+ /* 290 */ 1291, 1192, 1571, 1306, 1306, 1570, 1570, 1306, 1433, 1353,
+ /* 300 */ 1359, 1339, 1325, 1344, 1268, 1339, 1325, 1344, 1274, 1274,
+ /* 310 */ 1274, 1274, 1306, 1210, 1344, 1344, 1353, 1359, 1268, 1344,
+ /* 320 */ 1268, 1344, 1306, 1210, 1449, 1565, 1306, 1210, 1423, 1306,
+ /* 330 */ 1210, 1306, 1210, 1423, 1266, 1266, 1266, 1255, 1192, 1192,
+ /* 340 */ 1423, 1266, 1240, 1266, 1255, 1266, 1266, 1516, 1423, 1427,
+ /* 350 */ 1427, 1423, 1324, 1319, 1324, 1319, 1324, 1319, 1324, 1319,
+ /* 360 */ 1306, 1508, 1508, 1334, 1334, 1339, 1325, 1418, 1306, 1192,
+ /* 370 */ 1339, 1337, 1335, 1344, 1216, 1258, 1530, 1530, 1526, 1526,
+ /* 380 */ 1526, 1576, 1576, 1481, 1542, 1223, 1223, 1223, 1223, 1542,
+ /* 390 */ 1242, 1242, 1224, 1224, 1223, 1542, 1192, 1192, 1192, 1192,
+ /* 400 */ 1192, 1192, 1537, 1192, 1434, 1310, 1192, 1192, 1192, 1192,
+ /* 410 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 420 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1364,
+ /* 430 */ 1192, 1195, 1478, 1192, 1192, 1476, 1192, 1192, 1192, 1192,
+ /* 440 */ 1192, 1192, 1311, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 450 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 460 */ 1192, 1567, 1192, 1192, 1192, 1192, 1192, 1192, 1448, 1447,
+ /* 470 */ 1192, 1192, 1308, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 480 */ 1192, 1192, 1192, 1192, 1192, 1192, 1238, 1192, 1192, 1192,
+ /* 490 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 500 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 510 */ 1192, 1192, 1192, 1336, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 520 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1513, 1326,
+ /* 530 */ 1192, 1192, 1558, 1192, 1192, 1192, 1192, 1192, 1192, 1192,
+ /* 540 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1553, 1282, 1366,
+ /* 550 */ 1192, 1365, 1369, 1192, 1204, 1192, 1192,
+};
+/********** End of lemon-generated parsing tables *****************************/
+
+/* The next table maps tokens (terminal symbols) into fallback tokens.
+** If a construct like the following:
+**
+** %fallback ID X Y Z.
+**
+** appears in the grammar, then ID becomes a fallback token for X, Y,
+** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
+** but it does not parse, the type of the token is changed to ID and
+** the parse is retried before an error is thrown.
+**
+** This feature can be used, for example, to cause some keywords in a language
+** to revert to identifiers if they keyword does not apply in the context where
+** it appears.
+*/
+#ifdef YYFALLBACK
+static const YYCODETYPE yyFallback[] = {
+ 0, /* $ => nothing */
+ 0, /* SEMI => nothing */
+ 59, /* EXPLAIN => ID */
+ 59, /* QUERY => ID */
+ 59, /* PLAN => ID */
+ 59, /* BEGIN => ID */
+ 0, /* TRANSACTION => nothing */
+ 59, /* DEFERRED => ID */
+ 59, /* IMMEDIATE => ID */
+ 59, /* EXCLUSIVE => ID */
+ 0, /* COMMIT => nothing */
+ 59, /* END => ID */
+ 59, /* ROLLBACK => ID */
+ 59, /* SAVEPOINT => ID */
+ 59, /* RELEASE => ID */
+ 0, /* TO => nothing */
+ 0, /* TABLE => nothing */
+ 0, /* CREATE => nothing */
+ 59, /* IF => ID */
+ 0, /* NOT => nothing */
+ 0, /* EXISTS => nothing */
+ 59, /* TEMP => ID */
+ 0, /* LP => nothing */
+ 0, /* RP => nothing */
+ 0, /* AS => nothing */
+ 59, /* WITHOUT => ID */
+ 0, /* COMMA => nothing */
+ 59, /* ABORT => ID */
+ 59, /* ACTION => ID */
+ 59, /* AFTER => ID */
+ 59, /* ANALYZE => ID */
+ 59, /* ASC => ID */
+ 59, /* ATTACH => ID */
+ 59, /* BEFORE => ID */
+ 59, /* BY => ID */
+ 59, /* CASCADE => ID */
+ 59, /* CAST => ID */
+ 59, /* CONFLICT => ID */
+ 59, /* DATABASE => ID */
+ 59, /* DESC => ID */
+ 59, /* DETACH => ID */
+ 59, /* EACH => ID */
+ 59, /* FAIL => ID */
+ 0, /* OR => nothing */
+ 0, /* AND => nothing */
+ 0, /* IS => nothing */
+ 59, /* MATCH => ID */
+ 59, /* LIKE_KW => ID */
+ 0, /* BETWEEN => nothing */
+ 0, /* IN => nothing */
+ 0, /* ISNULL => nothing */
+ 0, /* NOTNULL => nothing */
+ 0, /* NE => nothing */
+ 0, /* EQ => nothing */
+ 0, /* GT => nothing */
+ 0, /* LE => nothing */
+ 0, /* LT => nothing */
+ 0, /* GE => nothing */
+ 0, /* ESCAPE => nothing */
+ 0, /* ID => nothing */
+ 59, /* COLUMNKW => ID */
+ 59, /* DO => ID */
+ 59, /* FOR => ID */
+ 59, /* IGNORE => ID */
+ 59, /* INITIALLY => ID */
+ 59, /* INSTEAD => ID */
+ 59, /* NO => ID */
+ 59, /* KEY => ID */
+ 59, /* OF => ID */
+ 59, /* OFFSET => ID */
+ 59, /* PRAGMA => ID */
+ 59, /* RAISE => ID */
+ 59, /* RECURSIVE => ID */
+ 59, /* REPLACE => ID */
+ 59, /* RESTRICT => ID */
+ 59, /* ROW => ID */
+ 59, /* ROWS => ID */
+ 59, /* TRIGGER => ID */
+ 59, /* VACUUM => ID */
+ 59, /* VIEW => ID */
+ 59, /* VIRTUAL => ID */
+ 59, /* WITH => ID */
+ 59, /* NULLS => ID */
+ 59, /* FIRST => ID */
+ 59, /* LAST => ID */
+ 59, /* CURRENT => ID */
+ 59, /* FOLLOWING => ID */
+ 59, /* PARTITION => ID */
+ 59, /* PRECEDING => ID */
+ 59, /* RANGE => ID */
+ 59, /* UNBOUNDED => ID */
+ 59, /* EXCLUDE => ID */
+ 59, /* GROUPS => ID */
+ 59, /* OTHERS => ID */
+ 59, /* TIES => ID */
+ 59, /* GENERATED => ID */
+ 59, /* ALWAYS => ID */
+ 59, /* REINDEX => ID */
+ 59, /* RENAME => ID */
+ 59, /* CTIME_KW => ID */
+ 0, /* ANY => nothing */
+ 0, /* BITAND => nothing */
+ 0, /* BITOR => nothing */
+ 0, /* LSHIFT => nothing */
+ 0, /* RSHIFT => nothing */
+ 0, /* PLUS => nothing */
+ 0, /* MINUS => nothing */
+ 0, /* STAR => nothing */
+ 0, /* SLASH => nothing */
+ 0, /* REM => nothing */
+ 0, /* CONCAT => nothing */
+ 0, /* COLLATE => nothing */
+ 0, /* BITNOT => nothing */
+ 0, /* ON => nothing */
+ 0, /* INDEXED => nothing */
+ 0, /* STRING => nothing */
+ 0, /* JOIN_KW => nothing */
+ 0, /* CONSTRAINT => nothing */
+ 0, /* DEFAULT => nothing */
+ 0, /* NULL => nothing */
+ 0, /* PRIMARY => nothing */
+ 0, /* UNIQUE => nothing */
+ 0, /* CHECK => nothing */
+ 0, /* REFERENCES => nothing */
+ 0, /* AUTOINCR => nothing */
+ 0, /* INSERT => nothing */
+ 0, /* DELETE => nothing */
+ 0, /* UPDATE => nothing */
+ 0, /* SET => nothing */
+ 0, /* DEFERRABLE => nothing */
+ 0, /* FOREIGN => nothing */
+ 0, /* DROP => nothing */
+ 0, /* UNION => nothing */
+ 0, /* ALL => nothing */
+ 0, /* EXCEPT => nothing */
+ 0, /* INTERSECT => nothing */
+ 0, /* SELECT => nothing */
+ 0, /* VALUES => nothing */
+ 0, /* DISTINCT => nothing */
+ 0, /* DOT => nothing */
+ 0, /* FROM => nothing */
+ 0, /* JOIN => nothing */
+ 0, /* USING => nothing */
+ 0, /* ORDER => nothing */
+ 0, /* GROUP => nothing */
+ 0, /* HAVING => nothing */
+ 0, /* LIMIT => nothing */
+ 0, /* WHERE => nothing */
+ 0, /* INTO => nothing */
+ 0, /* NOTHING => nothing */
+ 0, /* FLOAT => nothing */
+ 0, /* BLOB => nothing */
+ 0, /* INTEGER => nothing */
+ 0, /* VARIABLE => nothing */
+ 0, /* CASE => nothing */
+ 0, /* WHEN => nothing */
+ 0, /* THEN => nothing */
+ 0, /* ELSE => nothing */
+ 0, /* INDEX => nothing */
+ 0, /* ALTER => nothing */
+ 0, /* ADD => nothing */
+ 0, /* WINDOW => nothing */
+ 0, /* OVER => nothing */
+ 0, /* FILTER => nothing */
+ 0, /* COLUMN => nothing */
+ 0, /* AGG_FUNCTION => nothing */
+ 0, /* AGG_COLUMN => nothing */
+ 0, /* TRUEFALSE => nothing */
+ 0, /* ISNOT => nothing */
+ 0, /* FUNCTION => nothing */
+ 0, /* UMINUS => nothing */
+ 0, /* UPLUS => nothing */
+ 0, /* TRUTH => nothing */
+ 0, /* REGISTER => nothing */
+ 0, /* VECTOR => nothing */
+ 0, /* SELECT_COLUMN => nothing */
+ 0, /* IF_NULL_ROW => nothing */
+ 0, /* ASTERISK => nothing */
+ 0, /* SPAN => nothing */
+ 0, /* SPACE => nothing */
+ 0, /* ILLEGAL => nothing */
+};
+#endif /* YYFALLBACK */
+
+/* The following structure represents a single element of the
+** parser's stack. Information stored includes:
+**
+** + The state number for the parser at this level of the stack.
+**
+** + The value of the token stored at this level of the stack.
+** (In other words, the "major" token.)
+**
+** + The semantic value stored at this level of the stack. This is
+** the information used by the action routines in the grammar.
+** It is sometimes called the "minor" token.
+**
+** After the "shift" half of a SHIFTREDUCE action, the stateno field
+** actually contains the reduce action for the second half of the
+** SHIFTREDUCE.
+*/
+struct yyStackEntry {
+ YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
+ YYCODETYPE major; /* The major token value. This is the code
+ ** number for the token at this stack level */
+ YYMINORTYPE minor; /* The user-supplied minor token value. This
+ ** is the value of the token */
+};
+typedef struct yyStackEntry yyStackEntry;
+
+/* The state of the parser is completely contained in an instance of
+** the following structure */
+struct yyParser {
+ yyStackEntry *yytos; /* Pointer to top element of the stack */
+#ifdef YYTRACKMAXSTACKDEPTH
+ int yyhwm; /* High-water mark of the stack */
+#endif
+#ifndef YYNOERRORRECOVERY
+ int yyerrcnt; /* Shifts left before out of the error */
+#endif
+ sqlite3ParserARG_SDECL /* A place to hold %extra_argument */
+ sqlite3ParserCTX_SDECL /* A place to hold %extra_context */
+#if YYSTACKDEPTH<=0
+ int yystksz; /* Current side of the stack */
+ yyStackEntry *yystack; /* The parser's stack */
+ yyStackEntry yystk0; /* First stack entry */
+#else
+ yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
+ yyStackEntry *yystackEnd; /* Last entry in the stack */
+#endif
+};
+typedef struct yyParser yyParser;
+
+#ifndef NDEBUG
+/* #include <stdio.h> */
+static FILE *yyTraceFILE = 0;
+static char *yyTracePrompt = 0;
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Turn parser tracing on by giving a stream to which to write the trace
+** and a prompt to preface each trace message. Tracing is turned off
+** by making either argument NULL
+**
+** Inputs:
+** <ul>
+** <li> A FILE* to which trace output should be written.
+** If NULL, then tracing is turned off.
+** <li> A prefix string written at the beginning of every
+** line of trace output. If NULL, then tracing is
+** turned off.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+SQLITE_PRIVATE void sqlite3ParserTrace(FILE *TraceFILE, char *zTracePrompt){
+ yyTraceFILE = TraceFILE;
+ yyTracePrompt = zTracePrompt;
+ if( yyTraceFILE==0 ) yyTracePrompt = 0;
+ else if( yyTracePrompt==0 ) yyTraceFILE = 0;
+}
+#endif /* NDEBUG */
+
+#if defined(YYCOVERAGE) || !defined(NDEBUG)
+/* For tracing shifts, the names of all terminals and nonterminals
+** are required. The following table supplies these names */
+static const char *const yyTokenName[] = {
+ /* 0 */ "$",
+ /* 1 */ "SEMI",
+ /* 2 */ "EXPLAIN",
+ /* 3 */ "QUERY",
+ /* 4 */ "PLAN",
+ /* 5 */ "BEGIN",
+ /* 6 */ "TRANSACTION",
+ /* 7 */ "DEFERRED",
+ /* 8 */ "IMMEDIATE",
+ /* 9 */ "EXCLUSIVE",
+ /* 10 */ "COMMIT",
+ /* 11 */ "END",
+ /* 12 */ "ROLLBACK",
+ /* 13 */ "SAVEPOINT",
+ /* 14 */ "RELEASE",
+ /* 15 */ "TO",
+ /* 16 */ "TABLE",
+ /* 17 */ "CREATE",
+ /* 18 */ "IF",
+ /* 19 */ "NOT",
+ /* 20 */ "EXISTS",
+ /* 21 */ "TEMP",
+ /* 22 */ "LP",
+ /* 23 */ "RP",
+ /* 24 */ "AS",
+ /* 25 */ "WITHOUT",
+ /* 26 */ "COMMA",
+ /* 27 */ "ABORT",
+ /* 28 */ "ACTION",
+ /* 29 */ "AFTER",
+ /* 30 */ "ANALYZE",
+ /* 31 */ "ASC",
+ /* 32 */ "ATTACH",
+ /* 33 */ "BEFORE",
+ /* 34 */ "BY",
+ /* 35 */ "CASCADE",
+ /* 36 */ "CAST",
+ /* 37 */ "CONFLICT",
+ /* 38 */ "DATABASE",
+ /* 39 */ "DESC",
+ /* 40 */ "DETACH",
+ /* 41 */ "EACH",
+ /* 42 */ "FAIL",
+ /* 43 */ "OR",
+ /* 44 */ "AND",
+ /* 45 */ "IS",
+ /* 46 */ "MATCH",
+ /* 47 */ "LIKE_KW",
+ /* 48 */ "BETWEEN",
+ /* 49 */ "IN",
+ /* 50 */ "ISNULL",
+ /* 51 */ "NOTNULL",
+ /* 52 */ "NE",
+ /* 53 */ "EQ",
+ /* 54 */ "GT",
+ /* 55 */ "LE",
+ /* 56 */ "LT",
+ /* 57 */ "GE",
+ /* 58 */ "ESCAPE",
+ /* 59 */ "ID",
+ /* 60 */ "COLUMNKW",
+ /* 61 */ "DO",
+ /* 62 */ "FOR",
+ /* 63 */ "IGNORE",
+ /* 64 */ "INITIALLY",
+ /* 65 */ "INSTEAD",
+ /* 66 */ "NO",
+ /* 67 */ "KEY",
+ /* 68 */ "OF",
+ /* 69 */ "OFFSET",
+ /* 70 */ "PRAGMA",
+ /* 71 */ "RAISE",
+ /* 72 */ "RECURSIVE",
+ /* 73 */ "REPLACE",
+ /* 74 */ "RESTRICT",
+ /* 75 */ "ROW",
+ /* 76 */ "ROWS",
+ /* 77 */ "TRIGGER",
+ /* 78 */ "VACUUM",
+ /* 79 */ "VIEW",
+ /* 80 */ "VIRTUAL",
+ /* 81 */ "WITH",
+ /* 82 */ "NULLS",
+ /* 83 */ "FIRST",
+ /* 84 */ "LAST",
+ /* 85 */ "CURRENT",
+ /* 86 */ "FOLLOWING",
+ /* 87 */ "PARTITION",
+ /* 88 */ "PRECEDING",
+ /* 89 */ "RANGE",
+ /* 90 */ "UNBOUNDED",
+ /* 91 */ "EXCLUDE",
+ /* 92 */ "GROUPS",
+ /* 93 */ "OTHERS",
+ /* 94 */ "TIES",
+ /* 95 */ "GENERATED",
+ /* 96 */ "ALWAYS",
+ /* 97 */ "REINDEX",
+ /* 98 */ "RENAME",
+ /* 99 */ "CTIME_KW",
+ /* 100 */ "ANY",
+ /* 101 */ "BITAND",
+ /* 102 */ "BITOR",
+ /* 103 */ "LSHIFT",
+ /* 104 */ "RSHIFT",
+ /* 105 */ "PLUS",
+ /* 106 */ "MINUS",
+ /* 107 */ "STAR",
+ /* 108 */ "SLASH",
+ /* 109 */ "REM",
+ /* 110 */ "CONCAT",
+ /* 111 */ "COLLATE",
+ /* 112 */ "BITNOT",
+ /* 113 */ "ON",
+ /* 114 */ "INDEXED",
+ /* 115 */ "STRING",
+ /* 116 */ "JOIN_KW",
+ /* 117 */ "CONSTRAINT",
+ /* 118 */ "DEFAULT",
+ /* 119 */ "NULL",
+ /* 120 */ "PRIMARY",
+ /* 121 */ "UNIQUE",
+ /* 122 */ "CHECK",
+ /* 123 */ "REFERENCES",
+ /* 124 */ "AUTOINCR",
+ /* 125 */ "INSERT",
+ /* 126 */ "DELETE",
+ /* 127 */ "UPDATE",
+ /* 128 */ "SET",
+ /* 129 */ "DEFERRABLE",
+ /* 130 */ "FOREIGN",
+ /* 131 */ "DROP",
+ /* 132 */ "UNION",
+ /* 133 */ "ALL",
+ /* 134 */ "EXCEPT",
+ /* 135 */ "INTERSECT",
+ /* 136 */ "SELECT",
+ /* 137 */ "VALUES",
+ /* 138 */ "DISTINCT",
+ /* 139 */ "DOT",
+ /* 140 */ "FROM",
+ /* 141 */ "JOIN",
+ /* 142 */ "USING",
+ /* 143 */ "ORDER",
+ /* 144 */ "GROUP",
+ /* 145 */ "HAVING",
+ /* 146 */ "LIMIT",
+ /* 147 */ "WHERE",
+ /* 148 */ "INTO",
+ /* 149 */ "NOTHING",
+ /* 150 */ "FLOAT",
+ /* 151 */ "BLOB",
+ /* 152 */ "INTEGER",
+ /* 153 */ "VARIABLE",
+ /* 154 */ "CASE",
+ /* 155 */ "WHEN",
+ /* 156 */ "THEN",
+ /* 157 */ "ELSE",
+ /* 158 */ "INDEX",
+ /* 159 */ "ALTER",
+ /* 160 */ "ADD",
+ /* 161 */ "WINDOW",
+ /* 162 */ "OVER",
+ /* 163 */ "FILTER",
+ /* 164 */ "COLUMN",
+ /* 165 */ "AGG_FUNCTION",
+ /* 166 */ "AGG_COLUMN",
+ /* 167 */ "TRUEFALSE",
+ /* 168 */ "ISNOT",
+ /* 169 */ "FUNCTION",
+ /* 170 */ "UMINUS",
+ /* 171 */ "UPLUS",
+ /* 172 */ "TRUTH",
+ /* 173 */ "REGISTER",
+ /* 174 */ "VECTOR",
+ /* 175 */ "SELECT_COLUMN",
+ /* 176 */ "IF_NULL_ROW",
+ /* 177 */ "ASTERISK",
+ /* 178 */ "SPAN",
+ /* 179 */ "SPACE",
+ /* 180 */ "ILLEGAL",
+ /* 181 */ "input",
+ /* 182 */ "cmdlist",
+ /* 183 */ "ecmd",
+ /* 184 */ "cmdx",
+ /* 185 */ "explain",
+ /* 186 */ "cmd",
+ /* 187 */ "transtype",
+ /* 188 */ "trans_opt",
+ /* 189 */ "nm",
+ /* 190 */ "savepoint_opt",
+ /* 191 */ "create_table",
+ /* 192 */ "create_table_args",
+ /* 193 */ "createkw",
+ /* 194 */ "temp",
+ /* 195 */ "ifnotexists",
+ /* 196 */ "dbnm",
+ /* 197 */ "columnlist",
+ /* 198 */ "conslist_opt",
+ /* 199 */ "table_options",
+ /* 200 */ "select",
+ /* 201 */ "columnname",
+ /* 202 */ "carglist",
+ /* 203 */ "typetoken",
+ /* 204 */ "typename",
+ /* 205 */ "signed",
+ /* 206 */ "plus_num",
+ /* 207 */ "minus_num",
+ /* 208 */ "scanpt",
+ /* 209 */ "scantok",
+ /* 210 */ "ccons",
+ /* 211 */ "term",
+ /* 212 */ "expr",
+ /* 213 */ "onconf",
+ /* 214 */ "sortorder",
+ /* 215 */ "autoinc",
+ /* 216 */ "eidlist_opt",
+ /* 217 */ "refargs",
+ /* 218 */ "defer_subclause",
+ /* 219 */ "generated",
+ /* 220 */ "refarg",
+ /* 221 */ "refact",
+ /* 222 */ "init_deferred_pred_opt",
+ /* 223 */ "conslist",
+ /* 224 */ "tconscomma",
+ /* 225 */ "tcons",
+ /* 226 */ "sortlist",
+ /* 227 */ "eidlist",
+ /* 228 */ "defer_subclause_opt",
+ /* 229 */ "orconf",
+ /* 230 */ "resolvetype",
+ /* 231 */ "raisetype",
+ /* 232 */ "ifexists",
+ /* 233 */ "fullname",
+ /* 234 */ "selectnowith",
+ /* 235 */ "oneselect",
+ /* 236 */ "wqlist",
+ /* 237 */ "multiselect_op",
+ /* 238 */ "distinct",
+ /* 239 */ "selcollist",
+ /* 240 */ "from",
+ /* 241 */ "where_opt",
+ /* 242 */ "groupby_opt",
+ /* 243 */ "having_opt",
+ /* 244 */ "orderby_opt",
+ /* 245 */ "limit_opt",
+ /* 246 */ "window_clause",
+ /* 247 */ "values",
+ /* 248 */ "nexprlist",
+ /* 249 */ "sclp",
+ /* 250 */ "as",
+ /* 251 */ "seltablist",
+ /* 252 */ "stl_prefix",
+ /* 253 */ "joinop",
+ /* 254 */ "indexed_opt",
+ /* 255 */ "on_opt",
+ /* 256 */ "using_opt",
+ /* 257 */ "exprlist",
+ /* 258 */ "xfullname",
+ /* 259 */ "idlist",
+ /* 260 */ "nulls",
+ /* 261 */ "with",
+ /* 262 */ "setlist",
+ /* 263 */ "insert_cmd",
+ /* 264 */ "idlist_opt",
+ /* 265 */ "upsert",
+ /* 266 */ "filter_over",
+ /* 267 */ "likeop",
+ /* 268 */ "between_op",
+ /* 269 */ "in_op",
+ /* 270 */ "paren_exprlist",
+ /* 271 */ "case_operand",
+ /* 272 */ "case_exprlist",
+ /* 273 */ "case_else",
+ /* 274 */ "uniqueflag",
+ /* 275 */ "collate",
+ /* 276 */ "vinto",
+ /* 277 */ "nmnum",
+ /* 278 */ "trigger_decl",
+ /* 279 */ "trigger_cmd_list",
+ /* 280 */ "trigger_time",
+ /* 281 */ "trigger_event",
+ /* 282 */ "foreach_clause",
+ /* 283 */ "when_clause",
+ /* 284 */ "trigger_cmd",
+ /* 285 */ "trnm",
+ /* 286 */ "tridxby",
+ /* 287 */ "database_kw_opt",
+ /* 288 */ "key_opt",
+ /* 289 */ "add_column_fullname",
+ /* 290 */ "kwcolumn_opt",
+ /* 291 */ "create_vtab",
+ /* 292 */ "vtabarglist",
+ /* 293 */ "vtabarg",
+ /* 294 */ "vtabargtoken",
+ /* 295 */ "lp",
+ /* 296 */ "anylist",
+ /* 297 */ "windowdefn_list",
+ /* 298 */ "windowdefn",
+ /* 299 */ "window",
+ /* 300 */ "frame_opt",
+ /* 301 */ "part_opt",
+ /* 302 */ "filter_clause",
+ /* 303 */ "over_clause",
+ /* 304 */ "range_or_rows",
+ /* 305 */ "frame_bound",
+ /* 306 */ "frame_bound_s",
+ /* 307 */ "frame_bound_e",
+ /* 308 */ "frame_exclude_opt",
+ /* 309 */ "frame_exclude",
+};
+#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
+
+#ifndef NDEBUG
+/* For tracing reduce actions, the names of all rules are required.
+*/
+static const char *const yyRuleName[] = {
+ /* 0 */ "explain ::= EXPLAIN",
+ /* 1 */ "explain ::= EXPLAIN QUERY PLAN",
+ /* 2 */ "cmdx ::= cmd",
+ /* 3 */ "cmd ::= BEGIN transtype trans_opt",
+ /* 4 */ "transtype ::=",
+ /* 5 */ "transtype ::= DEFERRED",
+ /* 6 */ "transtype ::= IMMEDIATE",
+ /* 7 */ "transtype ::= EXCLUSIVE",
+ /* 8 */ "cmd ::= COMMIT|END trans_opt",
+ /* 9 */ "cmd ::= ROLLBACK trans_opt",
+ /* 10 */ "cmd ::= SAVEPOINT nm",
+ /* 11 */ "cmd ::= RELEASE savepoint_opt nm",
+ /* 12 */ "cmd ::= ROLLBACK trans_opt TO savepoint_opt nm",
+ /* 13 */ "create_table ::= createkw temp TABLE ifnotexists nm dbnm",
+ /* 14 */ "createkw ::= CREATE",
+ /* 15 */ "ifnotexists ::=",
+ /* 16 */ "ifnotexists ::= IF NOT EXISTS",
+ /* 17 */ "temp ::= TEMP",
+ /* 18 */ "temp ::=",
+ /* 19 */ "create_table_args ::= LP columnlist conslist_opt RP table_options",
+ /* 20 */ "create_table_args ::= AS select",
+ /* 21 */ "table_options ::=",
+ /* 22 */ "table_options ::= WITHOUT nm",
+ /* 23 */ "columnname ::= nm typetoken",
+ /* 24 */ "typetoken ::=",
+ /* 25 */ "typetoken ::= typename LP signed RP",
+ /* 26 */ "typetoken ::= typename LP signed COMMA signed RP",
+ /* 27 */ "typename ::= typename ID|STRING",
+ /* 28 */ "scanpt ::=",
+ /* 29 */ "scantok ::=",
+ /* 30 */ "ccons ::= CONSTRAINT nm",
+ /* 31 */ "ccons ::= DEFAULT scantok term",
+ /* 32 */ "ccons ::= DEFAULT LP expr RP",
+ /* 33 */ "ccons ::= DEFAULT PLUS scantok term",
+ /* 34 */ "ccons ::= DEFAULT MINUS scantok term",
+ /* 35 */ "ccons ::= DEFAULT scantok ID|INDEXED",
+ /* 36 */ "ccons ::= NOT NULL onconf",
+ /* 37 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc",
+ /* 38 */ "ccons ::= UNIQUE onconf",
+ /* 39 */ "ccons ::= CHECK LP expr RP",
+ /* 40 */ "ccons ::= REFERENCES nm eidlist_opt refargs",
+ /* 41 */ "ccons ::= defer_subclause",
+ /* 42 */ "ccons ::= COLLATE ID|STRING",
+ /* 43 */ "generated ::= LP expr RP",
+ /* 44 */ "generated ::= LP expr RP ID",
+ /* 45 */ "autoinc ::=",
+ /* 46 */ "autoinc ::= AUTOINCR",
+ /* 47 */ "refargs ::=",
+ /* 48 */ "refargs ::= refargs refarg",
+ /* 49 */ "refarg ::= MATCH nm",
+ /* 50 */ "refarg ::= ON INSERT refact",
+ /* 51 */ "refarg ::= ON DELETE refact",
+ /* 52 */ "refarg ::= ON UPDATE refact",
+ /* 53 */ "refact ::= SET NULL",
+ /* 54 */ "refact ::= SET DEFAULT",
+ /* 55 */ "refact ::= CASCADE",
+ /* 56 */ "refact ::= RESTRICT",
+ /* 57 */ "refact ::= NO ACTION",
+ /* 58 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt",
+ /* 59 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt",
+ /* 60 */ "init_deferred_pred_opt ::=",
+ /* 61 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED",
+ /* 62 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE",
+ /* 63 */ "conslist_opt ::=",
+ /* 64 */ "tconscomma ::= COMMA",
+ /* 65 */ "tcons ::= CONSTRAINT nm",
+ /* 66 */ "tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf",
+ /* 67 */ "tcons ::= UNIQUE LP sortlist RP onconf",
+ /* 68 */ "tcons ::= CHECK LP expr RP onconf",
+ /* 69 */ "tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt",
+ /* 70 */ "defer_subclause_opt ::=",
+ /* 71 */ "onconf ::=",
+ /* 72 */ "onconf ::= ON CONFLICT resolvetype",
+ /* 73 */ "orconf ::=",
+ /* 74 */ "orconf ::= OR resolvetype",
+ /* 75 */ "resolvetype ::= IGNORE",
+ /* 76 */ "resolvetype ::= REPLACE",
+ /* 77 */ "cmd ::= DROP TABLE ifexists fullname",
+ /* 78 */ "ifexists ::= IF EXISTS",
+ /* 79 */ "ifexists ::=",
+ /* 80 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select",
+ /* 81 */ "cmd ::= DROP VIEW ifexists fullname",
+ /* 82 */ "cmd ::= select",
+ /* 83 */ "select ::= WITH wqlist selectnowith",
+ /* 84 */ "select ::= WITH RECURSIVE wqlist selectnowith",
+ /* 85 */ "select ::= selectnowith",
+ /* 86 */ "selectnowith ::= selectnowith multiselect_op oneselect",
+ /* 87 */ "multiselect_op ::= UNION",
+ /* 88 */ "multiselect_op ::= UNION ALL",
+ /* 89 */ "multiselect_op ::= EXCEPT|INTERSECT",
+ /* 90 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt",
+ /* 91 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt",
+ /* 92 */ "values ::= VALUES LP nexprlist RP",
+ /* 93 */ "values ::= values COMMA LP nexprlist RP",
+ /* 94 */ "distinct ::= DISTINCT",
+ /* 95 */ "distinct ::= ALL",
+ /* 96 */ "distinct ::=",
+ /* 97 */ "sclp ::=",
+ /* 98 */ "selcollist ::= sclp scanpt expr scanpt as",
+ /* 99 */ "selcollist ::= sclp scanpt STAR",
+ /* 100 */ "selcollist ::= sclp scanpt nm DOT STAR",
+ /* 101 */ "as ::= AS nm",
+ /* 102 */ "as ::=",
+ /* 103 */ "from ::=",
+ /* 104 */ "from ::= FROM seltablist",
+ /* 105 */ "stl_prefix ::= seltablist joinop",
+ /* 106 */ "stl_prefix ::=",
+ /* 107 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
+ /* 108 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt",
+ /* 109 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
+ /* 110 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
+ /* 111 */ "dbnm ::=",
+ /* 112 */ "dbnm ::= DOT nm",
+ /* 113 */ "fullname ::= nm",
+ /* 114 */ "fullname ::= nm DOT nm",
+ /* 115 */ "xfullname ::= nm",
+ /* 116 */ "xfullname ::= nm DOT nm",
+ /* 117 */ "xfullname ::= nm DOT nm AS nm",
+ /* 118 */ "xfullname ::= nm AS nm",
+ /* 119 */ "joinop ::= COMMA|JOIN",
+ /* 120 */ "joinop ::= JOIN_KW JOIN",
+ /* 121 */ "joinop ::= JOIN_KW nm JOIN",
+ /* 122 */ "joinop ::= JOIN_KW nm nm JOIN",
+ /* 123 */ "on_opt ::= ON expr",
+ /* 124 */ "on_opt ::=",
+ /* 125 */ "indexed_opt ::=",
+ /* 126 */ "indexed_opt ::= INDEXED BY nm",
+ /* 127 */ "indexed_opt ::= NOT INDEXED",
+ /* 128 */ "using_opt ::= USING LP idlist RP",
+ /* 129 */ "using_opt ::=",
+ /* 130 */ "orderby_opt ::=",
+ /* 131 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 132 */ "sortlist ::= sortlist COMMA expr sortorder nulls",
+ /* 133 */ "sortlist ::= expr sortorder nulls",
+ /* 134 */ "sortorder ::= ASC",
+ /* 135 */ "sortorder ::= DESC",
+ /* 136 */ "sortorder ::=",
+ /* 137 */ "nulls ::= NULLS FIRST",
+ /* 138 */ "nulls ::= NULLS LAST",
+ /* 139 */ "nulls ::=",
+ /* 140 */ "groupby_opt ::=",
+ /* 141 */ "groupby_opt ::= GROUP BY nexprlist",
+ /* 142 */ "having_opt ::=",
+ /* 143 */ "having_opt ::= HAVING expr",
+ /* 144 */ "limit_opt ::=",
+ /* 145 */ "limit_opt ::= LIMIT expr",
+ /* 146 */ "limit_opt ::= LIMIT expr OFFSET expr",
+ /* 147 */ "limit_opt ::= LIMIT expr COMMA expr",
+ /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt",
+ /* 149 */ "where_opt ::=",
+ /* 150 */ "where_opt ::= WHERE expr",
+ /* 151 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt",
+ /* 152 */ "setlist ::= setlist COMMA nm EQ expr",
+ /* 153 */ "setlist ::= setlist COMMA LP idlist RP EQ expr",
+ /* 154 */ "setlist ::= nm EQ expr",
+ /* 155 */ "setlist ::= LP idlist RP EQ expr",
+ /* 156 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert",
+ /* 157 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES",
+ /* 158 */ "upsert ::=",
+ /* 159 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt",
+ /* 160 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING",
+ /* 161 */ "upsert ::= ON CONFLICT DO NOTHING",
+ /* 162 */ "insert_cmd ::= INSERT orconf",
+ /* 163 */ "insert_cmd ::= REPLACE",
+ /* 164 */ "idlist_opt ::=",
+ /* 165 */ "idlist_opt ::= LP idlist RP",
+ /* 166 */ "idlist ::= idlist COMMA nm",
+ /* 167 */ "idlist ::= nm",
+ /* 168 */ "expr ::= LP expr RP",
+ /* 169 */ "expr ::= ID|INDEXED",
+ /* 170 */ "expr ::= JOIN_KW",
+ /* 171 */ "expr ::= nm DOT nm",
+ /* 172 */ "expr ::= nm DOT nm DOT nm",
+ /* 173 */ "term ::= NULL|FLOAT|BLOB",
+ /* 174 */ "term ::= STRING",
+ /* 175 */ "term ::= INTEGER",
+ /* 176 */ "expr ::= VARIABLE",
+ /* 177 */ "expr ::= expr COLLATE ID|STRING",
+ /* 178 */ "expr ::= CAST LP expr AS typetoken RP",
+ /* 179 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
+ /* 180 */ "expr ::= ID|INDEXED LP STAR RP",
+ /* 181 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over",
+ /* 182 */ "expr ::= ID|INDEXED LP STAR RP filter_over",
+ /* 183 */ "term ::= CTIME_KW",
+ /* 184 */ "expr ::= LP nexprlist COMMA expr RP",
+ /* 185 */ "expr ::= expr AND expr",
+ /* 186 */ "expr ::= expr OR expr",
+ /* 187 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 188 */ "expr ::= expr EQ|NE expr",
+ /* 189 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 190 */ "expr ::= expr PLUS|MINUS expr",
+ /* 191 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 192 */ "expr ::= expr CONCAT expr",
+ /* 193 */ "likeop ::= NOT LIKE_KW|MATCH",
+ /* 194 */ "expr ::= expr likeop expr",
+ /* 195 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 196 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 197 */ "expr ::= expr NOT NULL",
+ /* 198 */ "expr ::= expr IS expr",
+ /* 199 */ "expr ::= expr IS NOT expr",
+ /* 200 */ "expr ::= NOT expr",
+ /* 201 */ "expr ::= BITNOT expr",
+ /* 202 */ "expr ::= PLUS|MINUS expr",
+ /* 203 */ "between_op ::= BETWEEN",
+ /* 204 */ "between_op ::= NOT BETWEEN",
+ /* 205 */ "expr ::= expr between_op expr AND expr",
+ /* 206 */ "in_op ::= IN",
+ /* 207 */ "in_op ::= NOT IN",
+ /* 208 */ "expr ::= expr in_op LP exprlist RP",
+ /* 209 */ "expr ::= LP select RP",
+ /* 210 */ "expr ::= expr in_op LP select RP",
+ /* 211 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 212 */ "expr ::= EXISTS LP select RP",
+ /* 213 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 214 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 215 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 216 */ "case_else ::= ELSE expr",
+ /* 217 */ "case_else ::=",
+ /* 218 */ "case_operand ::= expr",
+ /* 219 */ "case_operand ::=",
+ /* 220 */ "exprlist ::=",
+ /* 221 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 222 */ "nexprlist ::= expr",
+ /* 223 */ "paren_exprlist ::=",
+ /* 224 */ "paren_exprlist ::= LP exprlist RP",
+ /* 225 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 226 */ "uniqueflag ::= UNIQUE",
+ /* 227 */ "uniqueflag ::=",
+ /* 228 */ "eidlist_opt ::=",
+ /* 229 */ "eidlist_opt ::= LP eidlist RP",
+ /* 230 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 231 */ "eidlist ::= nm collate sortorder",
+ /* 232 */ "collate ::=",
+ /* 233 */ "collate ::= COLLATE ID|STRING",
+ /* 234 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 235 */ "cmd ::= VACUUM vinto",
+ /* 236 */ "cmd ::= VACUUM nm vinto",
+ /* 237 */ "vinto ::= INTO expr",
+ /* 238 */ "vinto ::=",
+ /* 239 */ "cmd ::= PRAGMA nm dbnm",
+ /* 240 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 241 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 242 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 243 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 244 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 245 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 246 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 247 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 248 */ "trigger_time ::= BEFORE|AFTER",
+ /* 249 */ "trigger_time ::= INSTEAD OF",
+ /* 250 */ "trigger_time ::=",
+ /* 251 */ "trigger_event ::= DELETE|INSERT",
+ /* 252 */ "trigger_event ::= UPDATE",
+ /* 253 */ "trigger_event ::= UPDATE OF idlist",
+ /* 254 */ "when_clause ::=",
+ /* 255 */ "when_clause ::= WHEN expr",
+ /* 256 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 257 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 258 */ "trnm ::= nm DOT nm",
+ /* 259 */ "tridxby ::= INDEXED BY nm",
+ /* 260 */ "tridxby ::= NOT INDEXED",
+ /* 261 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
+ /* 262 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
+ /* 263 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
+ /* 264 */ "trigger_cmd ::= scanpt select scanpt",
+ /* 265 */ "expr ::= RAISE LP IGNORE RP",
+ /* 266 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 267 */ "raisetype ::= ROLLBACK",
+ /* 268 */ "raisetype ::= ABORT",
+ /* 269 */ "raisetype ::= FAIL",
+ /* 270 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 271 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 272 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 273 */ "key_opt ::=",
+ /* 274 */ "key_opt ::= KEY expr",
+ /* 275 */ "cmd ::= REINDEX",
+ /* 276 */ "cmd ::= REINDEX nm dbnm",
+ /* 277 */ "cmd ::= ANALYZE",
+ /* 278 */ "cmd ::= ANALYZE nm dbnm",
+ /* 279 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 280 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 281 */ "add_column_fullname ::= fullname",
+ /* 282 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
+ /* 283 */ "cmd ::= create_vtab",
+ /* 284 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 285 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 286 */ "vtabarg ::=",
+ /* 287 */ "vtabargtoken ::= ANY",
+ /* 288 */ "vtabargtoken ::= lp anylist RP",
+ /* 289 */ "lp ::= LP",
+ /* 290 */ "with ::= WITH wqlist",
+ /* 291 */ "with ::= WITH RECURSIVE wqlist",
+ /* 292 */ "wqlist ::= nm eidlist_opt AS LP select RP",
+ /* 293 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
+ /* 294 */ "windowdefn_list ::= windowdefn",
+ /* 295 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
+ /* 296 */ "windowdefn ::= nm AS LP window RP",
+ /* 297 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 298 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 299 */ "window ::= ORDER BY sortlist frame_opt",
+ /* 300 */ "window ::= nm ORDER BY sortlist frame_opt",
+ /* 301 */ "window ::= frame_opt",
+ /* 302 */ "window ::= nm frame_opt",
+ /* 303 */ "frame_opt ::=",
+ /* 304 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt",
+ /* 305 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt",
+ /* 306 */ "range_or_rows ::= RANGE|ROWS|GROUPS",
+ /* 307 */ "frame_bound_s ::= frame_bound",
+ /* 308 */ "frame_bound_s ::= UNBOUNDED PRECEDING",
+ /* 309 */ "frame_bound_e ::= frame_bound",
+ /* 310 */ "frame_bound_e ::= UNBOUNDED FOLLOWING",
+ /* 311 */ "frame_bound ::= expr PRECEDING|FOLLOWING",
+ /* 312 */ "frame_bound ::= CURRENT ROW",
+ /* 313 */ "frame_exclude_opt ::=",
+ /* 314 */ "frame_exclude_opt ::= EXCLUDE frame_exclude",
+ /* 315 */ "frame_exclude ::= NO OTHERS",
+ /* 316 */ "frame_exclude ::= CURRENT ROW",
+ /* 317 */ "frame_exclude ::= GROUP|TIES",
+ /* 318 */ "window_clause ::= WINDOW windowdefn_list",
+ /* 319 */ "filter_over ::= filter_clause over_clause",
+ /* 320 */ "filter_over ::= over_clause",
+ /* 321 */ "filter_over ::= filter_clause",
+ /* 322 */ "over_clause ::= OVER LP window RP",
+ /* 323 */ "over_clause ::= OVER nm",
+ /* 324 */ "filter_clause ::= FILTER LP WHERE expr RP",
+ /* 325 */ "input ::= cmdlist",
+ /* 326 */ "cmdlist ::= cmdlist ecmd",
+ /* 327 */ "cmdlist ::= ecmd",
+ /* 328 */ "ecmd ::= SEMI",
+ /* 329 */ "ecmd ::= cmdx SEMI",
+ /* 330 */ "ecmd ::= explain cmdx SEMI",
+ /* 331 */ "trans_opt ::=",
+ /* 332 */ "trans_opt ::= TRANSACTION",
+ /* 333 */ "trans_opt ::= TRANSACTION nm",
+ /* 334 */ "savepoint_opt ::= SAVEPOINT",
+ /* 335 */ "savepoint_opt ::=",
+ /* 336 */ "cmd ::= create_table create_table_args",
+ /* 337 */ "columnlist ::= columnlist COMMA columnname carglist",
+ /* 338 */ "columnlist ::= columnname carglist",
+ /* 339 */ "nm ::= ID|INDEXED",
+ /* 340 */ "nm ::= STRING",
+ /* 341 */ "nm ::= JOIN_KW",
+ /* 342 */ "typetoken ::= typename",
+ /* 343 */ "typename ::= ID|STRING",
+ /* 344 */ "signed ::= plus_num",
+ /* 345 */ "signed ::= minus_num",
+ /* 346 */ "carglist ::= carglist ccons",
+ /* 347 */ "carglist ::=",
+ /* 348 */ "ccons ::= NULL onconf",
+ /* 349 */ "ccons ::= GENERATED ALWAYS AS generated",
+ /* 350 */ "ccons ::= AS generated",
+ /* 351 */ "conslist_opt ::= COMMA conslist",
+ /* 352 */ "conslist ::= conslist tconscomma tcons",
+ /* 353 */ "conslist ::= tcons",
+ /* 354 */ "tconscomma ::=",
+ /* 355 */ "defer_subclause_opt ::= defer_subclause",
+ /* 356 */ "resolvetype ::= raisetype",
+ /* 357 */ "selectnowith ::= oneselect",
+ /* 358 */ "oneselect ::= values",
+ /* 359 */ "sclp ::= selcollist COMMA",
+ /* 360 */ "as ::= ID|STRING",
+ /* 361 */ "expr ::= term",
+ /* 362 */ "likeop ::= LIKE_KW|MATCH",
+ /* 363 */ "exprlist ::= nexprlist",
+ /* 364 */ "nmnum ::= plus_num",
+ /* 365 */ "nmnum ::= nm",
+ /* 366 */ "nmnum ::= ON",
+ /* 367 */ "nmnum ::= DELETE",
+ /* 368 */ "nmnum ::= DEFAULT",
+ /* 369 */ "plus_num ::= INTEGER|FLOAT",
+ /* 370 */ "foreach_clause ::=",
+ /* 371 */ "foreach_clause ::= FOR EACH ROW",
+ /* 372 */ "trnm ::= nm",
+ /* 373 */ "tridxby ::=",
+ /* 374 */ "database_kw_opt ::= DATABASE",
+ /* 375 */ "database_kw_opt ::=",
+ /* 376 */ "kwcolumn_opt ::=",
+ /* 377 */ "kwcolumn_opt ::= COLUMNKW",
+ /* 378 */ "vtabarglist ::= vtabarg",
+ /* 379 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
+ /* 380 */ "vtabarg ::= vtabarg vtabargtoken",
+ /* 381 */ "anylist ::=",
+ /* 382 */ "anylist ::= anylist LP anylist RP",
+ /* 383 */ "anylist ::= anylist ANY",
+ /* 384 */ "with ::=",
+};
+#endif /* NDEBUG */
+
+
+#if YYSTACKDEPTH<=0
+/*
+** Try to increase the size of the parser stack. Return the number
+** of errors. Return 0 on success.
+*/
+static int yyGrowStack(yyParser *p){
+ int newSize;
+ int idx;
+ yyStackEntry *pNew;
+
+ newSize = p->yystksz*2 + 100;
+ idx = p->yytos ? (int)(p->yytos - p->yystack) : 0;
+ if( p->yystack==&p->yystk0 ){
+ pNew = malloc(newSize*sizeof(pNew[0]));
+ if( pNew ) pNew[0] = p->yystk0;
+ }else{
+ pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
+ }
+ if( pNew ){
+ p->yystack = pNew;
+ p->yytos = &p->yystack[idx];
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n",
+ yyTracePrompt, p->yystksz, newSize);
+ }
+#endif
+ p->yystksz = newSize;
+ }
+ return pNew==0;
+}
+#endif
+
+/* Datatype of the argument to the memory allocated passed as the
+** second argument to sqlite3ParserAlloc() below. This can be changed by
+** putting an appropriate #define in the %include section of the input
+** grammar.
+*/
+#ifndef YYMALLOCARGTYPE
+# define YYMALLOCARGTYPE size_t
+#endif
+
+/* Initialize a new parser that has already been allocated.
+*/
+SQLITE_PRIVATE void sqlite3ParserInit(void *yypRawParser sqlite3ParserCTX_PDECL){
+ yyParser *yypParser = (yyParser*)yypRawParser;
+ sqlite3ParserCTX_STORE
+#ifdef YYTRACKMAXSTACKDEPTH
+ yypParser->yyhwm = 0;
+#endif
+#if YYSTACKDEPTH<=0
+ yypParser->yytos = NULL;
+ yypParser->yystack = NULL;
+ yypParser->yystksz = 0;
+ if( yyGrowStack(yypParser) ){
+ yypParser->yystack = &yypParser->yystk0;
+ yypParser->yystksz = 1;
+ }
+#endif
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ yypParser->yytos = yypParser->yystack;
+ yypParser->yystack[0].stateno = 0;
+ yypParser->yystack[0].major = 0;
+#if YYSTACKDEPTH>0
+ yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
+#endif
+}
+
+#ifndef sqlite3Parser_ENGINEALWAYSONSTACK
+/*
+** This function allocates a new parser.
+** The only argument is a pointer to a function which works like
+** malloc.
+**
+** Inputs:
+** A pointer to the function used to allocate memory.
+**
+** Outputs:
+** A pointer to a parser. This pointer is used in subsequent calls
+** to sqlite3Parser and sqlite3ParserFree.
+*/
+SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) sqlite3ParserCTX_PDECL){
+ yyParser *yypParser;
+ yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
+ if( yypParser ){
+ sqlite3ParserCTX_STORE
+ sqlite3ParserInit(yypParser sqlite3ParserCTX_PARAM);
+ }
+ return (void*)yypParser;
+}
+#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */
+
+
+/* The following function deletes the "minor type" or semantic value
+** associated with a symbol. The symbol can be either a terminal
+** or nonterminal. "yymajor" is the symbol code, and "yypminor" is
+** a pointer to the value to be deleted. The code used to do the
+** deletions is derived from the %destructor and/or %token_destructor
+** directives of the input grammar.
+*/
+static void yy_destructor(
+ yyParser *yypParser, /* The parser */
+ YYCODETYPE yymajor, /* Type code for object to destroy */
+ YYMINORTYPE *yypminor /* The object to be destroyed */
+){
+ sqlite3ParserARG_FETCH
+ sqlite3ParserCTX_FETCH
+ switch( yymajor ){
+ /* Here is inserted the actions which take place when a
+ ** terminal or non-terminal is destroyed. This can happen
+ ** when the symbol is popped from the stack during a
+ ** reduce or during error processing or when a parser is
+ ** being destroyed before it is finished parsing.
+ **
+ ** Note: during a reduce, the only symbols destroyed are those
+ ** which appear on the RHS of the rule, but which are *not* used
+ ** inside the C code.
+ */
+/********* Begin destructor definitions ***************************************/
+ case 200: /* select */
+ case 234: /* selectnowith */
+ case 235: /* oneselect */
+ case 247: /* values */
+{
+sqlite3SelectDelete(pParse->db, (yypminor->yy539));
+}
+ break;
+ case 211: /* term */
+ case 212: /* expr */
+ case 241: /* where_opt */
+ case 243: /* having_opt */
+ case 255: /* on_opt */
+ case 271: /* case_operand */
+ case 273: /* case_else */
+ case 276: /* vinto */
+ case 283: /* when_clause */
+ case 288: /* key_opt */
+ case 302: /* filter_clause */
+{
+sqlite3ExprDelete(pParse->db, (yypminor->yy202));
+}
+ break;
+ case 216: /* eidlist_opt */
+ case 226: /* sortlist */
+ case 227: /* eidlist */
+ case 239: /* selcollist */
+ case 242: /* groupby_opt */
+ case 244: /* orderby_opt */
+ case 248: /* nexprlist */
+ case 249: /* sclp */
+ case 257: /* exprlist */
+ case 262: /* setlist */
+ case 270: /* paren_exprlist */
+ case 272: /* case_exprlist */
+ case 301: /* part_opt */
+{
+sqlite3ExprListDelete(pParse->db, (yypminor->yy242));
+}
+ break;
+ case 233: /* fullname */
+ case 240: /* from */
+ case 251: /* seltablist */
+ case 252: /* stl_prefix */
+ case 258: /* xfullname */
+{
+sqlite3SrcListDelete(pParse->db, (yypminor->yy47));
+}
+ break;
+ case 236: /* wqlist */
+{
+sqlite3WithDelete(pParse->db, (yypminor->yy131));
+}
+ break;
+ case 246: /* window_clause */
+ case 297: /* windowdefn_list */
+{
+sqlite3WindowListDelete(pParse->db, (yypminor->yy303));
+}
+ break;
+ case 256: /* using_opt */
+ case 259: /* idlist */
+ case 264: /* idlist_opt */
+{
+sqlite3IdListDelete(pParse->db, (yypminor->yy600));
+}
+ break;
+ case 266: /* filter_over */
+ case 298: /* windowdefn */
+ case 299: /* window */
+ case 300: /* frame_opt */
+ case 303: /* over_clause */
+{
+sqlite3WindowDelete(pParse->db, (yypminor->yy303));
+}
+ break;
+ case 279: /* trigger_cmd_list */
+ case 284: /* trigger_cmd */
+{
+sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy447));
+}
+ break;
+ case 281: /* trigger_event */
+{
+sqlite3IdListDelete(pParse->db, (yypminor->yy230).b);
+}
+ break;
+ case 305: /* frame_bound */
+ case 306: /* frame_bound_s */
+ case 307: /* frame_bound_e */
+{
+sqlite3ExprDelete(pParse->db, (yypminor->yy77).pExpr);
+}
+ break;
+/********* End destructor definitions *****************************************/
+ default: break; /* If no destructor action specified: do nothing */
+ }
+}
+
+/*
+** Pop the parser's stack once.
+**
+** If there is a destructor routine associated with the token which
+** is popped from the stack, then call it.
+*/
+static void yy_pop_parser_stack(yyParser *pParser){
+ yyStackEntry *yytos;
+ assert( pParser->yytos!=0 );
+ assert( pParser->yytos > pParser->yystack );
+ yytos = pParser->yytos--;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sPopping %s\n",
+ yyTracePrompt,
+ yyTokenName[yytos->major]);
+ }
+#endif
+ yy_destructor(pParser, yytos->major, &yytos->minor);
+}
+
+/*
+** Clear all secondary memory allocations from the parser
+*/
+SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){
+ yyParser *pParser = (yyParser*)p;
+ while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
+#if YYSTACKDEPTH<=0
+ if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
+#endif
+}
+
+#ifndef sqlite3Parser_ENGINEALWAYSONSTACK
+/*
+** Deallocate and destroy a parser. Destructors are called for
+** all stack elements before shutting the parser down.
+**
+** If the YYPARSEFREENEVERNULL macro exists (for example because it
+** is defined in a %include section of the input grammar) then it is
+** assumed that the input pointer is never NULL.
+*/
+SQLITE_PRIVATE void sqlite3ParserFree(
+ void *p, /* The parser to be deleted */
+ void (*freeProc)(void*) /* Function used to reclaim memory */
+){
+#ifndef YYPARSEFREENEVERNULL
+ if( p==0 ) return;
+#endif
+ sqlite3ParserFinalize(p);
+ (*freeProc)(p);
+}
+#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */
+
+/*
+** Return the peak depth of the stack for a parser.
+*/
+#ifdef YYTRACKMAXSTACKDEPTH
+SQLITE_PRIVATE int sqlite3ParserStackPeak(void *p){
+ yyParser *pParser = (yyParser*)p;
+ return pParser->yyhwm;
+}
+#endif
+
+/* This array of booleans keeps track of the parser statement
+** coverage. The element yycoverage[X][Y] is set when the parser
+** is in state X and has a lookahead token Y. In a well-tested
+** systems, every element of this matrix should end up being set.
+*/
+#if defined(YYCOVERAGE)
+static unsigned char yycoverage[YYNSTATE][YYNTOKEN];
+#endif
+
+/*
+** Write into out a description of every state/lookahead combination that
+**
+** (1) has not been used by the parser, and
+** (2) is not a syntax error.
+**
+** Return the number of missed state/lookahead combinations.
+*/
+#if defined(YYCOVERAGE)
+SQLITE_PRIVATE int sqlite3ParserCoverage(FILE *out){
+ int stateno, iLookAhead, i;
+ int nMissed = 0;
+ for(stateno=0; stateno<YYNSTATE; stateno++){
+ i = yy_shift_ofst[stateno];
+ for(iLookAhead=0; iLookAhead<YYNTOKEN; iLookAhead++){
+ if( yy_lookahead[i+iLookAhead]!=iLookAhead ) continue;
+ if( yycoverage[stateno][iLookAhead]==0 ) nMissed++;
+ if( out ){
+ fprintf(out,"State %d lookahead %s %s\n", stateno,
+ yyTokenName[iLookAhead],
+ yycoverage[stateno][iLookAhead] ? "ok" : "missed");
+ }
+ }
+ }
+ return nMissed;
+}
+#endif
+
+/*
+** Find the appropriate action for a parser given the terminal
+** look-ahead token iLookAhead.
+*/
+static YYACTIONTYPE yy_find_shift_action(
+ YYCODETYPE iLookAhead, /* The look-ahead token */
+ YYACTIONTYPE stateno /* Current state number */
+){
+ int i;
+
+ if( stateno>YY_MAX_SHIFT ) return stateno;
+ assert( stateno <= YY_SHIFT_COUNT );
+#if defined(YYCOVERAGE)
+ yycoverage[stateno][iLookAhead] = 1;
+#endif
+ do{
+ i = yy_shift_ofst[stateno];
+ assert( i>=0 );
+ assert( i<=YY_ACTTAB_COUNT );
+ assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD );
+ assert( iLookAhead!=YYNOCODE );
+ assert( iLookAhead < YYNTOKEN );
+ i += iLookAhead;
+ assert( i<(int)YY_NLOOKAHEAD );
+ if( yy_lookahead[i]!=iLookAhead ){
+#ifdef YYFALLBACK
+ YYCODETYPE iFallback; /* Fallback token */
+ assert( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0]) );
+ iFallback = yyFallback[iLookAhead];
+ if( iFallback!=0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
+ }
+#endif
+ assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
+ iLookAhead = iFallback;
+ continue;
+ }
+#endif
+#ifdef YYWILDCARD
+ {
+ int j = i - iLookAhead + YYWILDCARD;
+ assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) );
+ if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead],
+ yyTokenName[YYWILDCARD]);
+ }
+#endif /* NDEBUG */
+ return yy_action[j];
+ }
+ }
+#endif /* YYWILDCARD */
+ return yy_default[stateno];
+ }else{
+ assert( i>=0 && i<sizeof(yy_action)/sizeof(yy_action[0]) );
+ return yy_action[i];
+ }
+ }while(1);
+}
+
+/*
+** Find the appropriate action for a parser given the non-terminal
+** look-ahead token iLookAhead.
+*/
+static YYACTIONTYPE yy_find_reduce_action(
+ YYACTIONTYPE stateno, /* Current state number */
+ YYCODETYPE iLookAhead /* The look-ahead token */
+){
+ int i;
+#ifdef YYERRORSYMBOL
+ if( stateno>YY_REDUCE_COUNT ){
+ return yy_default[stateno];
+ }
+#else
+ assert( stateno<=YY_REDUCE_COUNT );
+#endif
+ i = yy_reduce_ofst[stateno];
+ assert( iLookAhead!=YYNOCODE );
+ i += iLookAhead;
+#ifdef YYERRORSYMBOL
+ if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
+ return yy_default[stateno];
+ }
+#else
+ assert( i>=0 && i<YY_ACTTAB_COUNT );
+ assert( yy_lookahead[i]==iLookAhead );
+#endif
+ return yy_action[i];
+}
+
+/*
+** The following routine is called if the stack overflows.
+*/
+static void yyStackOverflow(yyParser *yypParser){
+ sqlite3ParserARG_FETCH
+ sqlite3ParserCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will execute if the parser
+ ** stack every overflows */
+/******** Begin %stack_overflow code ******************************************/
+
+ sqlite3ErrorMsg(pParse, "parser stack overflow");
+/******** End %stack_overflow code ********************************************/
+ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument var */
+ sqlite3ParserCTX_STORE
+}
+
+/*
+** Print tracing information for a SHIFT action
+*/
+#ifndef NDEBUG
+static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){
+ if( yyTraceFILE ){
+ if( yyNewState<YYNSTATE ){
+ fprintf(yyTraceFILE,"%s%s '%s', go to state %d\n",
+ yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
+ yyNewState);
+ }else{
+ fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n",
+ yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
+ yyNewState - YY_MIN_REDUCE);
+ }
+ }
+}
+#else
+# define yyTraceShift(X,Y,Z)
+#endif
+
+/*
+** Perform a shift action.
+*/
+static void yy_shift(
+ yyParser *yypParser, /* The parser to be shifted */
+ YYACTIONTYPE yyNewState, /* The new state to shift in */
+ YYCODETYPE yyMajor, /* The major token to shift in */
+ sqlite3ParserTOKENTYPE yyMinor /* The minor token to shift in */
+){
+ yyStackEntry *yytos;
+ yypParser->yytos++;
+#ifdef YYTRACKMAXSTACKDEPTH
+ if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
+ yypParser->yyhwm++;
+ assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) );
+ }
+#endif
+#if YYSTACKDEPTH>0
+ if( yypParser->yytos>yypParser->yystackEnd ){
+ yypParser->yytos--;
+ yyStackOverflow(yypParser);
+ return;
+ }
+#else
+ if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){
+ if( yyGrowStack(yypParser) ){
+ yypParser->yytos--;
+ yyStackOverflow(yypParser);
+ return;
+ }
+ }
+#endif
+ if( yyNewState > YY_MAX_SHIFT ){
+ yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
+ }
+ yytos = yypParser->yytos;
+ yytos->stateno = yyNewState;
+ yytos->major = yyMajor;
+ yytos->minor.yy0 = yyMinor;
+ yyTraceShift(yypParser, yyNewState, "Shift");
+}
+
+/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side
+** of that rule */
+static const YYCODETYPE yyRuleInfoLhs[] = {
+ 185, /* (0) explain ::= EXPLAIN */
+ 185, /* (1) explain ::= EXPLAIN QUERY PLAN */
+ 184, /* (2) cmdx ::= cmd */
+ 186, /* (3) cmd ::= BEGIN transtype trans_opt */
+ 187, /* (4) transtype ::= */
+ 187, /* (5) transtype ::= DEFERRED */
+ 187, /* (6) transtype ::= IMMEDIATE */
+ 187, /* (7) transtype ::= EXCLUSIVE */
+ 186, /* (8) cmd ::= COMMIT|END trans_opt */
+ 186, /* (9) cmd ::= ROLLBACK trans_opt */
+ 186, /* (10) cmd ::= SAVEPOINT nm */
+ 186, /* (11) cmd ::= RELEASE savepoint_opt nm */
+ 186, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */
+ 191, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */
+ 193, /* (14) createkw ::= CREATE */
+ 195, /* (15) ifnotexists ::= */
+ 195, /* (16) ifnotexists ::= IF NOT EXISTS */
+ 194, /* (17) temp ::= TEMP */
+ 194, /* (18) temp ::= */
+ 192, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */
+ 192, /* (20) create_table_args ::= AS select */
+ 199, /* (21) table_options ::= */
+ 199, /* (22) table_options ::= WITHOUT nm */
+ 201, /* (23) columnname ::= nm typetoken */
+ 203, /* (24) typetoken ::= */
+ 203, /* (25) typetoken ::= typename LP signed RP */
+ 203, /* (26) typetoken ::= typename LP signed COMMA signed RP */
+ 204, /* (27) typename ::= typename ID|STRING */
+ 208, /* (28) scanpt ::= */
+ 209, /* (29) scantok ::= */
+ 210, /* (30) ccons ::= CONSTRAINT nm */
+ 210, /* (31) ccons ::= DEFAULT scantok term */
+ 210, /* (32) ccons ::= DEFAULT LP expr RP */
+ 210, /* (33) ccons ::= DEFAULT PLUS scantok term */
+ 210, /* (34) ccons ::= DEFAULT MINUS scantok term */
+ 210, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */
+ 210, /* (36) ccons ::= NOT NULL onconf */
+ 210, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */
+ 210, /* (38) ccons ::= UNIQUE onconf */
+ 210, /* (39) ccons ::= CHECK LP expr RP */
+ 210, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */
+ 210, /* (41) ccons ::= defer_subclause */
+ 210, /* (42) ccons ::= COLLATE ID|STRING */
+ 219, /* (43) generated ::= LP expr RP */
+ 219, /* (44) generated ::= LP expr RP ID */
+ 215, /* (45) autoinc ::= */
+ 215, /* (46) autoinc ::= AUTOINCR */
+ 217, /* (47) refargs ::= */
+ 217, /* (48) refargs ::= refargs refarg */
+ 220, /* (49) refarg ::= MATCH nm */
+ 220, /* (50) refarg ::= ON INSERT refact */
+ 220, /* (51) refarg ::= ON DELETE refact */
+ 220, /* (52) refarg ::= ON UPDATE refact */
+ 221, /* (53) refact ::= SET NULL */
+ 221, /* (54) refact ::= SET DEFAULT */
+ 221, /* (55) refact ::= CASCADE */
+ 221, /* (56) refact ::= RESTRICT */
+ 221, /* (57) refact ::= NO ACTION */
+ 218, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
+ 218, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
+ 222, /* (60) init_deferred_pred_opt ::= */
+ 222, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */
+ 222, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
+ 198, /* (63) conslist_opt ::= */
+ 224, /* (64) tconscomma ::= COMMA */
+ 225, /* (65) tcons ::= CONSTRAINT nm */
+ 225, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
+ 225, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */
+ 225, /* (68) tcons ::= CHECK LP expr RP onconf */
+ 225, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
+ 228, /* (70) defer_subclause_opt ::= */
+ 213, /* (71) onconf ::= */
+ 213, /* (72) onconf ::= ON CONFLICT resolvetype */
+ 229, /* (73) orconf ::= */
+ 229, /* (74) orconf ::= OR resolvetype */
+ 230, /* (75) resolvetype ::= IGNORE */
+ 230, /* (76) resolvetype ::= REPLACE */
+ 186, /* (77) cmd ::= DROP TABLE ifexists fullname */
+ 232, /* (78) ifexists ::= IF EXISTS */
+ 232, /* (79) ifexists ::= */
+ 186, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
+ 186, /* (81) cmd ::= DROP VIEW ifexists fullname */
+ 186, /* (82) cmd ::= select */
+ 200, /* (83) select ::= WITH wqlist selectnowith */
+ 200, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */
+ 200, /* (85) select ::= selectnowith */
+ 234, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */
+ 237, /* (87) multiselect_op ::= UNION */
+ 237, /* (88) multiselect_op ::= UNION ALL */
+ 237, /* (89) multiselect_op ::= EXCEPT|INTERSECT */
+ 235, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
+ 235, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */
+ 247, /* (92) values ::= VALUES LP nexprlist RP */
+ 247, /* (93) values ::= values COMMA LP nexprlist RP */
+ 238, /* (94) distinct ::= DISTINCT */
+ 238, /* (95) distinct ::= ALL */
+ 238, /* (96) distinct ::= */
+ 249, /* (97) sclp ::= */
+ 239, /* (98) selcollist ::= sclp scanpt expr scanpt as */
+ 239, /* (99) selcollist ::= sclp scanpt STAR */
+ 239, /* (100) selcollist ::= sclp scanpt nm DOT STAR */
+ 250, /* (101) as ::= AS nm */
+ 250, /* (102) as ::= */
+ 240, /* (103) from ::= */
+ 240, /* (104) from ::= FROM seltablist */
+ 252, /* (105) stl_prefix ::= seltablist joinop */
+ 252, /* (106) stl_prefix ::= */
+ 251, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+ 251, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ 251, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ 251, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ 196, /* (111) dbnm ::= */
+ 196, /* (112) dbnm ::= DOT nm */
+ 233, /* (113) fullname ::= nm */
+ 233, /* (114) fullname ::= nm DOT nm */
+ 258, /* (115) xfullname ::= nm */
+ 258, /* (116) xfullname ::= nm DOT nm */
+ 258, /* (117) xfullname ::= nm DOT nm AS nm */
+ 258, /* (118) xfullname ::= nm AS nm */
+ 253, /* (119) joinop ::= COMMA|JOIN */
+ 253, /* (120) joinop ::= JOIN_KW JOIN */
+ 253, /* (121) joinop ::= JOIN_KW nm JOIN */
+ 253, /* (122) joinop ::= JOIN_KW nm nm JOIN */
+ 255, /* (123) on_opt ::= ON expr */
+ 255, /* (124) on_opt ::= */
+ 254, /* (125) indexed_opt ::= */
+ 254, /* (126) indexed_opt ::= INDEXED BY nm */
+ 254, /* (127) indexed_opt ::= NOT INDEXED */
+ 256, /* (128) using_opt ::= USING LP idlist RP */
+ 256, /* (129) using_opt ::= */
+ 244, /* (130) orderby_opt ::= */
+ 244, /* (131) orderby_opt ::= ORDER BY sortlist */
+ 226, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */
+ 226, /* (133) sortlist ::= expr sortorder nulls */
+ 214, /* (134) sortorder ::= ASC */
+ 214, /* (135) sortorder ::= DESC */
+ 214, /* (136) sortorder ::= */
+ 260, /* (137) nulls ::= NULLS FIRST */
+ 260, /* (138) nulls ::= NULLS LAST */
+ 260, /* (139) nulls ::= */
+ 242, /* (140) groupby_opt ::= */
+ 242, /* (141) groupby_opt ::= GROUP BY nexprlist */
+ 243, /* (142) having_opt ::= */
+ 243, /* (143) having_opt ::= HAVING expr */
+ 245, /* (144) limit_opt ::= */
+ 245, /* (145) limit_opt ::= LIMIT expr */
+ 245, /* (146) limit_opt ::= LIMIT expr OFFSET expr */
+ 245, /* (147) limit_opt ::= LIMIT expr COMMA expr */
+ 186, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */
+ 241, /* (149) where_opt ::= */
+ 241, /* (150) where_opt ::= WHERE expr */
+ 186, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */
+ 262, /* (152) setlist ::= setlist COMMA nm EQ expr */
+ 262, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */
+ 262, /* (154) setlist ::= nm EQ expr */
+ 262, /* (155) setlist ::= LP idlist RP EQ expr */
+ 186, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
+ 186, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
+ 265, /* (158) upsert ::= */
+ 265, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
+ 265, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
+ 265, /* (161) upsert ::= ON CONFLICT DO NOTHING */
+ 263, /* (162) insert_cmd ::= INSERT orconf */
+ 263, /* (163) insert_cmd ::= REPLACE */
+ 264, /* (164) idlist_opt ::= */
+ 264, /* (165) idlist_opt ::= LP idlist RP */
+ 259, /* (166) idlist ::= idlist COMMA nm */
+ 259, /* (167) idlist ::= nm */
+ 212, /* (168) expr ::= LP expr RP */
+ 212, /* (169) expr ::= ID|INDEXED */
+ 212, /* (170) expr ::= JOIN_KW */
+ 212, /* (171) expr ::= nm DOT nm */
+ 212, /* (172) expr ::= nm DOT nm DOT nm */
+ 211, /* (173) term ::= NULL|FLOAT|BLOB */
+ 211, /* (174) term ::= STRING */
+ 211, /* (175) term ::= INTEGER */
+ 212, /* (176) expr ::= VARIABLE */
+ 212, /* (177) expr ::= expr COLLATE ID|STRING */
+ 212, /* (178) expr ::= CAST LP expr AS typetoken RP */
+ 212, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */
+ 212, /* (180) expr ::= ID|INDEXED LP STAR RP */
+ 212, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */
+ 212, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */
+ 211, /* (183) term ::= CTIME_KW */
+ 212, /* (184) expr ::= LP nexprlist COMMA expr RP */
+ 212, /* (185) expr ::= expr AND expr */
+ 212, /* (186) expr ::= expr OR expr */
+ 212, /* (187) expr ::= expr LT|GT|GE|LE expr */
+ 212, /* (188) expr ::= expr EQ|NE expr */
+ 212, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ 212, /* (190) expr ::= expr PLUS|MINUS expr */
+ 212, /* (191) expr ::= expr STAR|SLASH|REM expr */
+ 212, /* (192) expr ::= expr CONCAT expr */
+ 267, /* (193) likeop ::= NOT LIKE_KW|MATCH */
+ 212, /* (194) expr ::= expr likeop expr */
+ 212, /* (195) expr ::= expr likeop expr ESCAPE expr */
+ 212, /* (196) expr ::= expr ISNULL|NOTNULL */
+ 212, /* (197) expr ::= expr NOT NULL */
+ 212, /* (198) expr ::= expr IS expr */
+ 212, /* (199) expr ::= expr IS NOT expr */
+ 212, /* (200) expr ::= NOT expr */
+ 212, /* (201) expr ::= BITNOT expr */
+ 212, /* (202) expr ::= PLUS|MINUS expr */
+ 268, /* (203) between_op ::= BETWEEN */
+ 268, /* (204) between_op ::= NOT BETWEEN */
+ 212, /* (205) expr ::= expr between_op expr AND expr */
+ 269, /* (206) in_op ::= IN */
+ 269, /* (207) in_op ::= NOT IN */
+ 212, /* (208) expr ::= expr in_op LP exprlist RP */
+ 212, /* (209) expr ::= LP select RP */
+ 212, /* (210) expr ::= expr in_op LP select RP */
+ 212, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */
+ 212, /* (212) expr ::= EXISTS LP select RP */
+ 212, /* (213) expr ::= CASE case_operand case_exprlist case_else END */
+ 272, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ 272, /* (215) case_exprlist ::= WHEN expr THEN expr */
+ 273, /* (216) case_else ::= ELSE expr */
+ 273, /* (217) case_else ::= */
+ 271, /* (218) case_operand ::= expr */
+ 271, /* (219) case_operand ::= */
+ 257, /* (220) exprlist ::= */
+ 248, /* (221) nexprlist ::= nexprlist COMMA expr */
+ 248, /* (222) nexprlist ::= expr */
+ 270, /* (223) paren_exprlist ::= */
+ 270, /* (224) paren_exprlist ::= LP exprlist RP */
+ 186, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ 274, /* (226) uniqueflag ::= UNIQUE */
+ 274, /* (227) uniqueflag ::= */
+ 216, /* (228) eidlist_opt ::= */
+ 216, /* (229) eidlist_opt ::= LP eidlist RP */
+ 227, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */
+ 227, /* (231) eidlist ::= nm collate sortorder */
+ 275, /* (232) collate ::= */
+ 275, /* (233) collate ::= COLLATE ID|STRING */
+ 186, /* (234) cmd ::= DROP INDEX ifexists fullname */
+ 186, /* (235) cmd ::= VACUUM vinto */
+ 186, /* (236) cmd ::= VACUUM nm vinto */
+ 276, /* (237) vinto ::= INTO expr */
+ 276, /* (238) vinto ::= */
+ 186, /* (239) cmd ::= PRAGMA nm dbnm */
+ 186, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ 186, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ 186, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ 186, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ 206, /* (244) plus_num ::= PLUS INTEGER|FLOAT */
+ 207, /* (245) minus_num ::= MINUS INTEGER|FLOAT */
+ 186, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ 278, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ 280, /* (248) trigger_time ::= BEFORE|AFTER */
+ 280, /* (249) trigger_time ::= INSTEAD OF */
+ 280, /* (250) trigger_time ::= */
+ 281, /* (251) trigger_event ::= DELETE|INSERT */
+ 281, /* (252) trigger_event ::= UPDATE */
+ 281, /* (253) trigger_event ::= UPDATE OF idlist */
+ 283, /* (254) when_clause ::= */
+ 283, /* (255) when_clause ::= WHEN expr */
+ 279, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ 279, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */
+ 285, /* (258) trnm ::= nm DOT nm */
+ 286, /* (259) tridxby ::= INDEXED BY nm */
+ 286, /* (260) tridxby ::= NOT INDEXED */
+ 284, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ 284, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ 284, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ 284, /* (264) trigger_cmd ::= scanpt select scanpt */
+ 212, /* (265) expr ::= RAISE LP IGNORE RP */
+ 212, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */
+ 231, /* (267) raisetype ::= ROLLBACK */
+ 231, /* (268) raisetype ::= ABORT */
+ 231, /* (269) raisetype ::= FAIL */
+ 186, /* (270) cmd ::= DROP TRIGGER ifexists fullname */
+ 186, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ 186, /* (272) cmd ::= DETACH database_kw_opt expr */
+ 288, /* (273) key_opt ::= */
+ 288, /* (274) key_opt ::= KEY expr */
+ 186, /* (275) cmd ::= REINDEX */
+ 186, /* (276) cmd ::= REINDEX nm dbnm */
+ 186, /* (277) cmd ::= ANALYZE */
+ 186, /* (278) cmd ::= ANALYZE nm dbnm */
+ 186, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ 186, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ 289, /* (281) add_column_fullname ::= fullname */
+ 186, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ 186, /* (283) cmd ::= create_vtab */
+ 186, /* (284) cmd ::= create_vtab LP vtabarglist RP */
+ 291, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 293, /* (286) vtabarg ::= */
+ 294, /* (287) vtabargtoken ::= ANY */
+ 294, /* (288) vtabargtoken ::= lp anylist RP */
+ 295, /* (289) lp ::= LP */
+ 261, /* (290) with ::= WITH wqlist */
+ 261, /* (291) with ::= WITH RECURSIVE wqlist */
+ 236, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */
+ 236, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+ 297, /* (294) windowdefn_list ::= windowdefn */
+ 297, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ 298, /* (296) windowdefn ::= nm AS LP window RP */
+ 299, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ 299, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ 299, /* (299) window ::= ORDER BY sortlist frame_opt */
+ 299, /* (300) window ::= nm ORDER BY sortlist frame_opt */
+ 299, /* (301) window ::= frame_opt */
+ 299, /* (302) window ::= nm frame_opt */
+ 300, /* (303) frame_opt ::= */
+ 300, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+ 300, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+ 304, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */
+ 306, /* (307) frame_bound_s ::= frame_bound */
+ 306, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */
+ 307, /* (309) frame_bound_e ::= frame_bound */
+ 307, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */
+ 305, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */
+ 305, /* (312) frame_bound ::= CURRENT ROW */
+ 308, /* (313) frame_exclude_opt ::= */
+ 308, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */
+ 309, /* (315) frame_exclude ::= NO OTHERS */
+ 309, /* (316) frame_exclude ::= CURRENT ROW */
+ 309, /* (317) frame_exclude ::= GROUP|TIES */
+ 246, /* (318) window_clause ::= WINDOW windowdefn_list */
+ 266, /* (319) filter_over ::= filter_clause over_clause */
+ 266, /* (320) filter_over ::= over_clause */
+ 266, /* (321) filter_over ::= filter_clause */
+ 303, /* (322) over_clause ::= OVER LP window RP */
+ 303, /* (323) over_clause ::= OVER nm */
+ 302, /* (324) filter_clause ::= FILTER LP WHERE expr RP */
+ 181, /* (325) input ::= cmdlist */
+ 182, /* (326) cmdlist ::= cmdlist ecmd */
+ 182, /* (327) cmdlist ::= ecmd */
+ 183, /* (328) ecmd ::= SEMI */
+ 183, /* (329) ecmd ::= cmdx SEMI */
+ 183, /* (330) ecmd ::= explain cmdx SEMI */
+ 188, /* (331) trans_opt ::= */
+ 188, /* (332) trans_opt ::= TRANSACTION */
+ 188, /* (333) trans_opt ::= TRANSACTION nm */
+ 190, /* (334) savepoint_opt ::= SAVEPOINT */
+ 190, /* (335) savepoint_opt ::= */
+ 186, /* (336) cmd ::= create_table create_table_args */
+ 197, /* (337) columnlist ::= columnlist COMMA columnname carglist */
+ 197, /* (338) columnlist ::= columnname carglist */
+ 189, /* (339) nm ::= ID|INDEXED */
+ 189, /* (340) nm ::= STRING */
+ 189, /* (341) nm ::= JOIN_KW */
+ 203, /* (342) typetoken ::= typename */
+ 204, /* (343) typename ::= ID|STRING */
+ 205, /* (344) signed ::= plus_num */
+ 205, /* (345) signed ::= minus_num */
+ 202, /* (346) carglist ::= carglist ccons */
+ 202, /* (347) carglist ::= */
+ 210, /* (348) ccons ::= NULL onconf */
+ 210, /* (349) ccons ::= GENERATED ALWAYS AS generated */
+ 210, /* (350) ccons ::= AS generated */
+ 198, /* (351) conslist_opt ::= COMMA conslist */
+ 223, /* (352) conslist ::= conslist tconscomma tcons */
+ 223, /* (353) conslist ::= tcons */
+ 224, /* (354) tconscomma ::= */
+ 228, /* (355) defer_subclause_opt ::= defer_subclause */
+ 230, /* (356) resolvetype ::= raisetype */
+ 234, /* (357) selectnowith ::= oneselect */
+ 235, /* (358) oneselect ::= values */
+ 249, /* (359) sclp ::= selcollist COMMA */
+ 250, /* (360) as ::= ID|STRING */
+ 212, /* (361) expr ::= term */
+ 267, /* (362) likeop ::= LIKE_KW|MATCH */
+ 257, /* (363) exprlist ::= nexprlist */
+ 277, /* (364) nmnum ::= plus_num */
+ 277, /* (365) nmnum ::= nm */
+ 277, /* (366) nmnum ::= ON */
+ 277, /* (367) nmnum ::= DELETE */
+ 277, /* (368) nmnum ::= DEFAULT */
+ 206, /* (369) plus_num ::= INTEGER|FLOAT */
+ 282, /* (370) foreach_clause ::= */
+ 282, /* (371) foreach_clause ::= FOR EACH ROW */
+ 285, /* (372) trnm ::= nm */
+ 286, /* (373) tridxby ::= */
+ 287, /* (374) database_kw_opt ::= DATABASE */
+ 287, /* (375) database_kw_opt ::= */
+ 290, /* (376) kwcolumn_opt ::= */
+ 290, /* (377) kwcolumn_opt ::= COLUMNKW */
+ 292, /* (378) vtabarglist ::= vtabarg */
+ 292, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */
+ 293, /* (380) vtabarg ::= vtabarg vtabargtoken */
+ 296, /* (381) anylist ::= */
+ 296, /* (382) anylist ::= anylist LP anylist RP */
+ 296, /* (383) anylist ::= anylist ANY */
+ 261, /* (384) with ::= */
+};
+
+/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
+** of symbols on the right-hand side of that rule. */
+static const signed char yyRuleInfoNRhs[] = {
+ -1, /* (0) explain ::= EXPLAIN */
+ -3, /* (1) explain ::= EXPLAIN QUERY PLAN */
+ -1, /* (2) cmdx ::= cmd */
+ -3, /* (3) cmd ::= BEGIN transtype trans_opt */
+ 0, /* (4) transtype ::= */
+ -1, /* (5) transtype ::= DEFERRED */
+ -1, /* (6) transtype ::= IMMEDIATE */
+ -1, /* (7) transtype ::= EXCLUSIVE */
+ -2, /* (8) cmd ::= COMMIT|END trans_opt */
+ -2, /* (9) cmd ::= ROLLBACK trans_opt */
+ -2, /* (10) cmd ::= SAVEPOINT nm */
+ -3, /* (11) cmd ::= RELEASE savepoint_opt nm */
+ -5, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */
+ -6, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */
+ -1, /* (14) createkw ::= CREATE */
+ 0, /* (15) ifnotexists ::= */
+ -3, /* (16) ifnotexists ::= IF NOT EXISTS */
+ -1, /* (17) temp ::= TEMP */
+ 0, /* (18) temp ::= */
+ -5, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */
+ -2, /* (20) create_table_args ::= AS select */
+ 0, /* (21) table_options ::= */
+ -2, /* (22) table_options ::= WITHOUT nm */
+ -2, /* (23) columnname ::= nm typetoken */
+ 0, /* (24) typetoken ::= */
+ -4, /* (25) typetoken ::= typename LP signed RP */
+ -6, /* (26) typetoken ::= typename LP signed COMMA signed RP */
+ -2, /* (27) typename ::= typename ID|STRING */
+ 0, /* (28) scanpt ::= */
+ 0, /* (29) scantok ::= */
+ -2, /* (30) ccons ::= CONSTRAINT nm */
+ -3, /* (31) ccons ::= DEFAULT scantok term */
+ -4, /* (32) ccons ::= DEFAULT LP expr RP */
+ -4, /* (33) ccons ::= DEFAULT PLUS scantok term */
+ -4, /* (34) ccons ::= DEFAULT MINUS scantok term */
+ -3, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */
+ -3, /* (36) ccons ::= NOT NULL onconf */
+ -5, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */
+ -2, /* (38) ccons ::= UNIQUE onconf */
+ -4, /* (39) ccons ::= CHECK LP expr RP */
+ -4, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */
+ -1, /* (41) ccons ::= defer_subclause */
+ -2, /* (42) ccons ::= COLLATE ID|STRING */
+ -3, /* (43) generated ::= LP expr RP */
+ -4, /* (44) generated ::= LP expr RP ID */
+ 0, /* (45) autoinc ::= */
+ -1, /* (46) autoinc ::= AUTOINCR */
+ 0, /* (47) refargs ::= */
+ -2, /* (48) refargs ::= refargs refarg */
+ -2, /* (49) refarg ::= MATCH nm */
+ -3, /* (50) refarg ::= ON INSERT refact */
+ -3, /* (51) refarg ::= ON DELETE refact */
+ -3, /* (52) refarg ::= ON UPDATE refact */
+ -2, /* (53) refact ::= SET NULL */
+ -2, /* (54) refact ::= SET DEFAULT */
+ -1, /* (55) refact ::= CASCADE */
+ -1, /* (56) refact ::= RESTRICT */
+ -2, /* (57) refact ::= NO ACTION */
+ -3, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
+ -2, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
+ 0, /* (60) init_deferred_pred_opt ::= */
+ -2, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */
+ -2, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
+ 0, /* (63) conslist_opt ::= */
+ -1, /* (64) tconscomma ::= COMMA */
+ -2, /* (65) tcons ::= CONSTRAINT nm */
+ -7, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
+ -5, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */
+ -5, /* (68) tcons ::= CHECK LP expr RP onconf */
+ -10, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
+ 0, /* (70) defer_subclause_opt ::= */
+ 0, /* (71) onconf ::= */
+ -3, /* (72) onconf ::= ON CONFLICT resolvetype */
+ 0, /* (73) orconf ::= */
+ -2, /* (74) orconf ::= OR resolvetype */
+ -1, /* (75) resolvetype ::= IGNORE */
+ -1, /* (76) resolvetype ::= REPLACE */
+ -4, /* (77) cmd ::= DROP TABLE ifexists fullname */
+ -2, /* (78) ifexists ::= IF EXISTS */
+ 0, /* (79) ifexists ::= */
+ -9, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
+ -4, /* (81) cmd ::= DROP VIEW ifexists fullname */
+ -1, /* (82) cmd ::= select */
+ -3, /* (83) select ::= WITH wqlist selectnowith */
+ -4, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */
+ -1, /* (85) select ::= selectnowith */
+ -3, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */
+ -1, /* (87) multiselect_op ::= UNION */
+ -2, /* (88) multiselect_op ::= UNION ALL */
+ -1, /* (89) multiselect_op ::= EXCEPT|INTERSECT */
+ -9, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
+ -10, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */
+ -4, /* (92) values ::= VALUES LP nexprlist RP */
+ -5, /* (93) values ::= values COMMA LP nexprlist RP */
+ -1, /* (94) distinct ::= DISTINCT */
+ -1, /* (95) distinct ::= ALL */
+ 0, /* (96) distinct ::= */
+ 0, /* (97) sclp ::= */
+ -5, /* (98) selcollist ::= sclp scanpt expr scanpt as */
+ -3, /* (99) selcollist ::= sclp scanpt STAR */
+ -5, /* (100) selcollist ::= sclp scanpt nm DOT STAR */
+ -2, /* (101) as ::= AS nm */
+ 0, /* (102) as ::= */
+ 0, /* (103) from ::= */
+ -2, /* (104) from ::= FROM seltablist */
+ -2, /* (105) stl_prefix ::= seltablist joinop */
+ 0, /* (106) stl_prefix ::= */
+ -7, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+ -9, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ -7, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ -7, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ 0, /* (111) dbnm ::= */
+ -2, /* (112) dbnm ::= DOT nm */
+ -1, /* (113) fullname ::= nm */
+ -3, /* (114) fullname ::= nm DOT nm */
+ -1, /* (115) xfullname ::= nm */
+ -3, /* (116) xfullname ::= nm DOT nm */
+ -5, /* (117) xfullname ::= nm DOT nm AS nm */
+ -3, /* (118) xfullname ::= nm AS nm */
+ -1, /* (119) joinop ::= COMMA|JOIN */
+ -2, /* (120) joinop ::= JOIN_KW JOIN */
+ -3, /* (121) joinop ::= JOIN_KW nm JOIN */
+ -4, /* (122) joinop ::= JOIN_KW nm nm JOIN */
+ -2, /* (123) on_opt ::= ON expr */
+ 0, /* (124) on_opt ::= */
+ 0, /* (125) indexed_opt ::= */
+ -3, /* (126) indexed_opt ::= INDEXED BY nm */
+ -2, /* (127) indexed_opt ::= NOT INDEXED */
+ -4, /* (128) using_opt ::= USING LP idlist RP */
+ 0, /* (129) using_opt ::= */
+ 0, /* (130) orderby_opt ::= */
+ -3, /* (131) orderby_opt ::= ORDER BY sortlist */
+ -5, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */
+ -3, /* (133) sortlist ::= expr sortorder nulls */
+ -1, /* (134) sortorder ::= ASC */
+ -1, /* (135) sortorder ::= DESC */
+ 0, /* (136) sortorder ::= */
+ -2, /* (137) nulls ::= NULLS FIRST */
+ -2, /* (138) nulls ::= NULLS LAST */
+ 0, /* (139) nulls ::= */
+ 0, /* (140) groupby_opt ::= */
+ -3, /* (141) groupby_opt ::= GROUP BY nexprlist */
+ 0, /* (142) having_opt ::= */
+ -2, /* (143) having_opt ::= HAVING expr */
+ 0, /* (144) limit_opt ::= */
+ -2, /* (145) limit_opt ::= LIMIT expr */
+ -4, /* (146) limit_opt ::= LIMIT expr OFFSET expr */
+ -4, /* (147) limit_opt ::= LIMIT expr COMMA expr */
+ -8, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */
+ 0, /* (149) where_opt ::= */
+ -2, /* (150) where_opt ::= WHERE expr */
+ -11, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */
+ -5, /* (152) setlist ::= setlist COMMA nm EQ expr */
+ -7, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */
+ -3, /* (154) setlist ::= nm EQ expr */
+ -5, /* (155) setlist ::= LP idlist RP EQ expr */
+ -7, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
+ -7, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
+ 0, /* (158) upsert ::= */
+ -11, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
+ -8, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
+ -4, /* (161) upsert ::= ON CONFLICT DO NOTHING */
+ -2, /* (162) insert_cmd ::= INSERT orconf */
+ -1, /* (163) insert_cmd ::= REPLACE */
+ 0, /* (164) idlist_opt ::= */
+ -3, /* (165) idlist_opt ::= LP idlist RP */
+ -3, /* (166) idlist ::= idlist COMMA nm */
+ -1, /* (167) idlist ::= nm */
+ -3, /* (168) expr ::= LP expr RP */
+ -1, /* (169) expr ::= ID|INDEXED */
+ -1, /* (170) expr ::= JOIN_KW */
+ -3, /* (171) expr ::= nm DOT nm */
+ -5, /* (172) expr ::= nm DOT nm DOT nm */
+ -1, /* (173) term ::= NULL|FLOAT|BLOB */
+ -1, /* (174) term ::= STRING */
+ -1, /* (175) term ::= INTEGER */
+ -1, /* (176) expr ::= VARIABLE */
+ -3, /* (177) expr ::= expr COLLATE ID|STRING */
+ -6, /* (178) expr ::= CAST LP expr AS typetoken RP */
+ -5, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */
+ -4, /* (180) expr ::= ID|INDEXED LP STAR RP */
+ -6, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */
+ -5, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */
+ -1, /* (183) term ::= CTIME_KW */
+ -5, /* (184) expr ::= LP nexprlist COMMA expr RP */
+ -3, /* (185) expr ::= expr AND expr */
+ -3, /* (186) expr ::= expr OR expr */
+ -3, /* (187) expr ::= expr LT|GT|GE|LE expr */
+ -3, /* (188) expr ::= expr EQ|NE expr */
+ -3, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ -3, /* (190) expr ::= expr PLUS|MINUS expr */
+ -3, /* (191) expr ::= expr STAR|SLASH|REM expr */
+ -3, /* (192) expr ::= expr CONCAT expr */
+ -2, /* (193) likeop ::= NOT LIKE_KW|MATCH */
+ -3, /* (194) expr ::= expr likeop expr */
+ -5, /* (195) expr ::= expr likeop expr ESCAPE expr */
+ -2, /* (196) expr ::= expr ISNULL|NOTNULL */
+ -3, /* (197) expr ::= expr NOT NULL */
+ -3, /* (198) expr ::= expr IS expr */
+ -4, /* (199) expr ::= expr IS NOT expr */
+ -2, /* (200) expr ::= NOT expr */
+ -2, /* (201) expr ::= BITNOT expr */
+ -2, /* (202) expr ::= PLUS|MINUS expr */
+ -1, /* (203) between_op ::= BETWEEN */
+ -2, /* (204) between_op ::= NOT BETWEEN */
+ -5, /* (205) expr ::= expr between_op expr AND expr */
+ -1, /* (206) in_op ::= IN */
+ -2, /* (207) in_op ::= NOT IN */
+ -5, /* (208) expr ::= expr in_op LP exprlist RP */
+ -3, /* (209) expr ::= LP select RP */
+ -5, /* (210) expr ::= expr in_op LP select RP */
+ -5, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */
+ -4, /* (212) expr ::= EXISTS LP select RP */
+ -5, /* (213) expr ::= CASE case_operand case_exprlist case_else END */
+ -5, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ -4, /* (215) case_exprlist ::= WHEN expr THEN expr */
+ -2, /* (216) case_else ::= ELSE expr */
+ 0, /* (217) case_else ::= */
+ -1, /* (218) case_operand ::= expr */
+ 0, /* (219) case_operand ::= */
+ 0, /* (220) exprlist ::= */
+ -3, /* (221) nexprlist ::= nexprlist COMMA expr */
+ -1, /* (222) nexprlist ::= expr */
+ 0, /* (223) paren_exprlist ::= */
+ -3, /* (224) paren_exprlist ::= LP exprlist RP */
+ -12, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ -1, /* (226) uniqueflag ::= UNIQUE */
+ 0, /* (227) uniqueflag ::= */
+ 0, /* (228) eidlist_opt ::= */
+ -3, /* (229) eidlist_opt ::= LP eidlist RP */
+ -5, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */
+ -3, /* (231) eidlist ::= nm collate sortorder */
+ 0, /* (232) collate ::= */
+ -2, /* (233) collate ::= COLLATE ID|STRING */
+ -4, /* (234) cmd ::= DROP INDEX ifexists fullname */
+ -2, /* (235) cmd ::= VACUUM vinto */
+ -3, /* (236) cmd ::= VACUUM nm vinto */
+ -2, /* (237) vinto ::= INTO expr */
+ 0, /* (238) vinto ::= */
+ -3, /* (239) cmd ::= PRAGMA nm dbnm */
+ -5, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ -6, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ -5, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ -6, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ -2, /* (244) plus_num ::= PLUS INTEGER|FLOAT */
+ -2, /* (245) minus_num ::= MINUS INTEGER|FLOAT */
+ -5, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ -11, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ -1, /* (248) trigger_time ::= BEFORE|AFTER */
+ -2, /* (249) trigger_time ::= INSTEAD OF */
+ 0, /* (250) trigger_time ::= */
+ -1, /* (251) trigger_event ::= DELETE|INSERT */
+ -1, /* (252) trigger_event ::= UPDATE */
+ -3, /* (253) trigger_event ::= UPDATE OF idlist */
+ 0, /* (254) when_clause ::= */
+ -2, /* (255) when_clause ::= WHEN expr */
+ -3, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ -2, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */
+ -3, /* (258) trnm ::= nm DOT nm */
+ -3, /* (259) tridxby ::= INDEXED BY nm */
+ -2, /* (260) tridxby ::= NOT INDEXED */
+ -9, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ -8, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ -6, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ -3, /* (264) trigger_cmd ::= scanpt select scanpt */
+ -4, /* (265) expr ::= RAISE LP IGNORE RP */
+ -6, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */
+ -1, /* (267) raisetype ::= ROLLBACK */
+ -1, /* (268) raisetype ::= ABORT */
+ -1, /* (269) raisetype ::= FAIL */
+ -4, /* (270) cmd ::= DROP TRIGGER ifexists fullname */
+ -6, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ -3, /* (272) cmd ::= DETACH database_kw_opt expr */
+ 0, /* (273) key_opt ::= */
+ -2, /* (274) key_opt ::= KEY expr */
+ -1, /* (275) cmd ::= REINDEX */
+ -3, /* (276) cmd ::= REINDEX nm dbnm */
+ -1, /* (277) cmd ::= ANALYZE */
+ -3, /* (278) cmd ::= ANALYZE nm dbnm */
+ -6, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ -7, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ -1, /* (281) add_column_fullname ::= fullname */
+ -8, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ -1, /* (283) cmd ::= create_vtab */
+ -4, /* (284) cmd ::= create_vtab LP vtabarglist RP */
+ -8, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 0, /* (286) vtabarg ::= */
+ -1, /* (287) vtabargtoken ::= ANY */
+ -3, /* (288) vtabargtoken ::= lp anylist RP */
+ -1, /* (289) lp ::= LP */
+ -2, /* (290) with ::= WITH wqlist */
+ -3, /* (291) with ::= WITH RECURSIVE wqlist */
+ -6, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */
+ -8, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+ -1, /* (294) windowdefn_list ::= windowdefn */
+ -3, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ -5, /* (296) windowdefn ::= nm AS LP window RP */
+ -5, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ -6, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ -4, /* (299) window ::= ORDER BY sortlist frame_opt */
+ -5, /* (300) window ::= nm ORDER BY sortlist frame_opt */
+ -1, /* (301) window ::= frame_opt */
+ -2, /* (302) window ::= nm frame_opt */
+ 0, /* (303) frame_opt ::= */
+ -3, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+ -6, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+ -1, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */
+ -1, /* (307) frame_bound_s ::= frame_bound */
+ -2, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */
+ -1, /* (309) frame_bound_e ::= frame_bound */
+ -2, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */
+ -2, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */
+ -2, /* (312) frame_bound ::= CURRENT ROW */
+ 0, /* (313) frame_exclude_opt ::= */
+ -2, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */
+ -2, /* (315) frame_exclude ::= NO OTHERS */
+ -2, /* (316) frame_exclude ::= CURRENT ROW */
+ -1, /* (317) frame_exclude ::= GROUP|TIES */
+ -2, /* (318) window_clause ::= WINDOW windowdefn_list */
+ -2, /* (319) filter_over ::= filter_clause over_clause */
+ -1, /* (320) filter_over ::= over_clause */
+ -1, /* (321) filter_over ::= filter_clause */
+ -4, /* (322) over_clause ::= OVER LP window RP */
+ -2, /* (323) over_clause ::= OVER nm */
+ -5, /* (324) filter_clause ::= FILTER LP WHERE expr RP */
+ -1, /* (325) input ::= cmdlist */
+ -2, /* (326) cmdlist ::= cmdlist ecmd */
+ -1, /* (327) cmdlist ::= ecmd */
+ -1, /* (328) ecmd ::= SEMI */
+ -2, /* (329) ecmd ::= cmdx SEMI */
+ -3, /* (330) ecmd ::= explain cmdx SEMI */
+ 0, /* (331) trans_opt ::= */
+ -1, /* (332) trans_opt ::= TRANSACTION */
+ -2, /* (333) trans_opt ::= TRANSACTION nm */
+ -1, /* (334) savepoint_opt ::= SAVEPOINT */
+ 0, /* (335) savepoint_opt ::= */
+ -2, /* (336) cmd ::= create_table create_table_args */
+ -4, /* (337) columnlist ::= columnlist COMMA columnname carglist */
+ -2, /* (338) columnlist ::= columnname carglist */
+ -1, /* (339) nm ::= ID|INDEXED */
+ -1, /* (340) nm ::= STRING */
+ -1, /* (341) nm ::= JOIN_KW */
+ -1, /* (342) typetoken ::= typename */
+ -1, /* (343) typename ::= ID|STRING */
+ -1, /* (344) signed ::= plus_num */
+ -1, /* (345) signed ::= minus_num */
+ -2, /* (346) carglist ::= carglist ccons */
+ 0, /* (347) carglist ::= */
+ -2, /* (348) ccons ::= NULL onconf */
+ -4, /* (349) ccons ::= GENERATED ALWAYS AS generated */
+ -2, /* (350) ccons ::= AS generated */
+ -2, /* (351) conslist_opt ::= COMMA conslist */
+ -3, /* (352) conslist ::= conslist tconscomma tcons */
+ -1, /* (353) conslist ::= tcons */
+ 0, /* (354) tconscomma ::= */
+ -1, /* (355) defer_subclause_opt ::= defer_subclause */
+ -1, /* (356) resolvetype ::= raisetype */
+ -1, /* (357) selectnowith ::= oneselect */
+ -1, /* (358) oneselect ::= values */
+ -2, /* (359) sclp ::= selcollist COMMA */
+ -1, /* (360) as ::= ID|STRING */
+ -1, /* (361) expr ::= term */
+ -1, /* (362) likeop ::= LIKE_KW|MATCH */
+ -1, /* (363) exprlist ::= nexprlist */
+ -1, /* (364) nmnum ::= plus_num */
+ -1, /* (365) nmnum ::= nm */
+ -1, /* (366) nmnum ::= ON */
+ -1, /* (367) nmnum ::= DELETE */
+ -1, /* (368) nmnum ::= DEFAULT */
+ -1, /* (369) plus_num ::= INTEGER|FLOAT */
+ 0, /* (370) foreach_clause ::= */
+ -3, /* (371) foreach_clause ::= FOR EACH ROW */
+ -1, /* (372) trnm ::= nm */
+ 0, /* (373) tridxby ::= */
+ -1, /* (374) database_kw_opt ::= DATABASE */
+ 0, /* (375) database_kw_opt ::= */
+ 0, /* (376) kwcolumn_opt ::= */
+ -1, /* (377) kwcolumn_opt ::= COLUMNKW */
+ -1, /* (378) vtabarglist ::= vtabarg */
+ -3, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */
+ -2, /* (380) vtabarg ::= vtabarg vtabargtoken */
+ 0, /* (381) anylist ::= */
+ -4, /* (382) anylist ::= anylist LP anylist RP */
+ -2, /* (383) anylist ::= anylist ANY */
+ 0, /* (384) with ::= */
+};
+
+static void yy_accept(yyParser*); /* Forward Declaration */
+
+/*
+** Perform a reduce action and the shift that must immediately
+** follow the reduce.
+**
+** The yyLookahead and yyLookaheadToken parameters provide reduce actions
+** access to the lookahead token (if any). The yyLookahead will be YYNOCODE
+** if the lookahead token has already been consumed. As this procedure is
+** only called from one place, optimizing compilers will in-line it, which
+** means that the extra parameters have no performance impact.
+*/
+static YYACTIONTYPE yy_reduce(
+ yyParser *yypParser, /* The parser */
+ unsigned int yyruleno, /* Number of the rule by which to reduce */
+ int yyLookahead, /* Lookahead token, or YYNOCODE if none */
+ sqlite3ParserTOKENTYPE yyLookaheadToken /* Value of the lookahead token */
+ sqlite3ParserCTX_PDECL /* %extra_context */
+){
+ int yygoto; /* The next state */
+ YYACTIONTYPE yyact; /* The next action */
+ yyStackEntry *yymsp; /* The top of the parser's stack */
+ int yysize; /* Amount to pop the stack */
+ sqlite3ParserARG_FETCH
+ (void)yyLookahead;
+ (void)yyLookaheadToken;
+ yymsp = yypParser->yytos;
+#ifndef NDEBUG
+ if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
+ yysize = yyRuleInfoNRhs[yyruleno];
+ if( yysize ){
+ fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
+ yyTracePrompt,
+ yyruleno, yyRuleName[yyruleno],
+ yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action",
+ yymsp[yysize].stateno);
+ }else{
+ fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n",
+ yyTracePrompt, yyruleno, yyRuleName[yyruleno],
+ yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action");
+ }
+ }
+#endif /* NDEBUG */
+
+ /* Check that the stack is large enough to grow by a single entry
+ ** if the RHS of the rule is empty. This ensures that there is room
+ ** enough on the stack to push the LHS value */
+ if( yyRuleInfoNRhs[yyruleno]==0 ){
+#ifdef YYTRACKMAXSTACKDEPTH
+ if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
+ yypParser->yyhwm++;
+ assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack));
+ }
+#endif
+#if YYSTACKDEPTH>0
+ if( yypParser->yytos>=yypParser->yystackEnd ){
+ yyStackOverflow(yypParser);
+ /* The call to yyStackOverflow() above pops the stack until it is
+ ** empty, causing the main parser loop to exit. So the return value
+ ** is never used and does not matter. */
+ return 0;
+ }
+#else
+ if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){
+ if( yyGrowStack(yypParser) ){
+ yyStackOverflow(yypParser);
+ /* The call to yyStackOverflow() above pops the stack until it is
+ ** empty, causing the main parser loop to exit. So the return value
+ ** is never used and does not matter. */
+ return 0;
+ }
+ yymsp = yypParser->yytos;
+ }
+#endif
+ }
+
+ switch( yyruleno ){
+ /* Beginning here are the reduction cases. A typical example
+ ** follows:
+ ** case 0:
+ ** #line <lineno> <grammarfile>
+ ** { ... } // User supplied code
+ ** #line <lineno> <thisfile>
+ ** break;
+ */
+/********** Begin reduce actions **********************************************/
+ YYMINORTYPE yylhsminor;
+ case 0: /* explain ::= EXPLAIN */
+{ pParse->explain = 1; }
+ break;
+ case 1: /* explain ::= EXPLAIN QUERY PLAN */
+{ pParse->explain = 2; }
+ break;
+ case 2: /* cmdx ::= cmd */
+{ sqlite3FinishCoding(pParse); }
+ break;
+ case 3: /* cmd ::= BEGIN transtype trans_opt */
+{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy192);}
+ break;
+ case 4: /* transtype ::= */
+{yymsp[1].minor.yy192 = TK_DEFERRED;}
+ break;
+ case 5: /* transtype ::= DEFERRED */
+ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6);
+ case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7);
+ case 306: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==306);
+{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/}
+ break;
+ case 8: /* cmd ::= COMMIT|END trans_opt */
+ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9);
+{sqlite3EndTransaction(pParse,yymsp[-1].major);}
+ break;
+ case 10: /* cmd ::= SAVEPOINT nm */
+{
+ sqlite3Savepoint(pParse, SAVEPOINT_BEGIN, &yymsp[0].minor.yy0);
+}
+ break;
+ case 11: /* cmd ::= RELEASE savepoint_opt nm */
+{
+ sqlite3Savepoint(pParse, SAVEPOINT_RELEASE, &yymsp[0].minor.yy0);
+}
+ break;
+ case 12: /* cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */
+{
+ sqlite3Savepoint(pParse, SAVEPOINT_ROLLBACK, &yymsp[0].minor.yy0);
+}
+ break;
+ case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */
+{
+ sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy192,0,0,yymsp[-2].minor.yy192);
+}
+ break;
+ case 14: /* createkw ::= CREATE */
+{disableLookaside(pParse);}
+ break;
+ case 15: /* ifnotexists ::= */
+ case 18: /* temp ::= */ yytestcase(yyruleno==18);
+ case 21: /* table_options ::= */ yytestcase(yyruleno==21);
+ case 45: /* autoinc ::= */ yytestcase(yyruleno==45);
+ case 60: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==60);
+ case 70: /* defer_subclause_opt ::= */ yytestcase(yyruleno==70);
+ case 79: /* ifexists ::= */ yytestcase(yyruleno==79);
+ case 96: /* distinct ::= */ yytestcase(yyruleno==96);
+ case 232: /* collate ::= */ yytestcase(yyruleno==232);
+{yymsp[1].minor.yy192 = 0;}
+ break;
+ case 16: /* ifnotexists ::= IF NOT EXISTS */
+{yymsp[-2].minor.yy192 = 1;}
+ break;
+ case 17: /* temp ::= TEMP */
+ case 46: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==46);
+{yymsp[0].minor.yy192 = 1;}
+ break;
+ case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_options */
+{
+ sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy192,0);
+}
+ break;
+ case 20: /* create_table_args ::= AS select */
+{
+ sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy539);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539);
+}
+ break;
+ case 22: /* table_options ::= WITHOUT nm */
+{
+ if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){
+ yymsp[-1].minor.yy192 = TF_WithoutRowid | TF_NoVisibleRowid;
+ }else{
+ yymsp[-1].minor.yy192 = 0;
+ sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z);
+ }
+}
+ break;
+ case 23: /* columnname ::= nm typetoken */
+{sqlite3AddColumn(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);}
+ break;
+ case 24: /* typetoken ::= */
+ case 63: /* conslist_opt ::= */ yytestcase(yyruleno==63);
+ case 102: /* as ::= */ yytestcase(yyruleno==102);
+{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;}
+ break;
+ case 25: /* typetoken ::= typename LP signed RP */
+{
+ yymsp[-3].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z);
+}
+ break;
+ case 26: /* typetoken ::= typename LP signed COMMA signed RP */
+{
+ yymsp[-5].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z);
+}
+ break;
+ case 27: /* typename ::= typename ID|STRING */
+{yymsp[-1].minor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);}
+ break;
+ case 28: /* scanpt ::= */
+{
+ assert( yyLookahead!=YYNOCODE );
+ yymsp[1].minor.yy436 = yyLookaheadToken.z;
+}
+ break;
+ case 29: /* scantok ::= */
+{
+ assert( yyLookahead!=YYNOCODE );
+ yymsp[1].minor.yy0 = yyLookaheadToken;
+}
+ break;
+ case 30: /* ccons ::= CONSTRAINT nm */
+ case 65: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==65);
+{pParse->constraintName = yymsp[0].minor.yy0;}
+ break;
+ case 31: /* ccons ::= DEFAULT scantok term */
+{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);}
+ break;
+ case 32: /* ccons ::= DEFAULT LP expr RP */
+{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy202,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);}
+ break;
+ case 33: /* ccons ::= DEFAULT PLUS scantok term */
+{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);}
+ break;
+ case 34: /* ccons ::= DEFAULT MINUS scantok term */
+{
+ Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy202, 0);
+ sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);
+}
+ break;
+ case 35: /* ccons ::= DEFAULT scantok ID|INDEXED */
+{
+ Expr *p = tokenExpr(pParse, TK_STRING, yymsp[0].minor.yy0);
+ if( p ){
+ sqlite3ExprIdToTrueFalse(p);
+ testcase( p->op==TK_TRUEFALSE && sqlite3ExprTruthValue(p) );
+ }
+ sqlite3AddDefaultValue(pParse,p,yymsp[0].minor.yy0.z,yymsp[0].minor.yy0.z+yymsp[0].minor.yy0.n);
+}
+ break;
+ case 36: /* ccons ::= NOT NULL onconf */
+{sqlite3AddNotNull(pParse, yymsp[0].minor.yy192);}
+ break;
+ case 37: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */
+{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy192,yymsp[0].minor.yy192,yymsp[-2].minor.yy192);}
+ break;
+ case 38: /* ccons ::= UNIQUE onconf */
+{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy192,0,0,0,0,
+ SQLITE_IDXTYPE_UNIQUE);}
+ break;
+ case 39: /* ccons ::= CHECK LP expr RP */
+{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy202);}
+ break;
+ case 40: /* ccons ::= REFERENCES nm eidlist_opt refargs */
+{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy242,yymsp[0].minor.yy192);}
+ break;
+ case 41: /* ccons ::= defer_subclause */
+{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy192);}
+ break;
+ case 42: /* ccons ::= COLLATE ID|STRING */
+{sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);}
+ break;
+ case 43: /* generated ::= LP expr RP */
+{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy202,0);}
+ break;
+ case 44: /* generated ::= LP expr RP ID */
+{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy202,&yymsp[0].minor.yy0);}
+ break;
+ case 47: /* refargs ::= */
+{ yymsp[1].minor.yy192 = OE_None*0x0101; /* EV: R-19803-45884 */}
+ break;
+ case 48: /* refargs ::= refargs refarg */
+{ yymsp[-1].minor.yy192 = (yymsp[-1].minor.yy192 & ~yymsp[0].minor.yy207.mask) | yymsp[0].minor.yy207.value; }
+ break;
+ case 49: /* refarg ::= MATCH nm */
+{ yymsp[-1].minor.yy207.value = 0; yymsp[-1].minor.yy207.mask = 0x000000; }
+ break;
+ case 50: /* refarg ::= ON INSERT refact */
+{ yymsp[-2].minor.yy207.value = 0; yymsp[-2].minor.yy207.mask = 0x000000; }
+ break;
+ case 51: /* refarg ::= ON DELETE refact */
+{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192; yymsp[-2].minor.yy207.mask = 0x0000ff; }
+ break;
+ case 52: /* refarg ::= ON UPDATE refact */
+{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192<<8; yymsp[-2].minor.yy207.mask = 0x00ff00; }
+ break;
+ case 53: /* refact ::= SET NULL */
+{ yymsp[-1].minor.yy192 = OE_SetNull; /* EV: R-33326-45252 */}
+ break;
+ case 54: /* refact ::= SET DEFAULT */
+{ yymsp[-1].minor.yy192 = OE_SetDflt; /* EV: R-33326-45252 */}
+ break;
+ case 55: /* refact ::= CASCADE */
+{ yymsp[0].minor.yy192 = OE_Cascade; /* EV: R-33326-45252 */}
+ break;
+ case 56: /* refact ::= RESTRICT */
+{ yymsp[0].minor.yy192 = OE_Restrict; /* EV: R-33326-45252 */}
+ break;
+ case 57: /* refact ::= NO ACTION */
+{ yymsp[-1].minor.yy192 = OE_None; /* EV: R-33326-45252 */}
+ break;
+ case 58: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
+{yymsp[-2].minor.yy192 = 0;}
+ break;
+ case 59: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
+ case 74: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==74);
+ case 162: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==162);
+{yymsp[-1].minor.yy192 = yymsp[0].minor.yy192;}
+ break;
+ case 61: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
+ case 78: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==78);
+ case 204: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==204);
+ case 207: /* in_op ::= NOT IN */ yytestcase(yyruleno==207);
+ case 233: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==233);
+{yymsp[-1].minor.yy192 = 1;}
+ break;
+ case 62: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
+{yymsp[-1].minor.yy192 = 0;}
+ break;
+ case 64: /* tconscomma ::= COMMA */
+{pParse->constraintName.n = 0;}
+ break;
+ case 66: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
+{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy242,yymsp[0].minor.yy192,yymsp[-2].minor.yy192,0);}
+ break;
+ case 67: /* tcons ::= UNIQUE LP sortlist RP onconf */
+{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy242,yymsp[0].minor.yy192,0,0,0,0,
+ SQLITE_IDXTYPE_UNIQUE);}
+ break;
+ case 68: /* tcons ::= CHECK LP expr RP onconf */
+{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy202);}
+ break;
+ case 69: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
+{
+ sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy242, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[-1].minor.yy192);
+ sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy192);
+}
+ break;
+ case 71: /* onconf ::= */
+ case 73: /* orconf ::= */ yytestcase(yyruleno==73);
+{yymsp[1].minor.yy192 = OE_Default;}
+ break;
+ case 72: /* onconf ::= ON CONFLICT resolvetype */
+{yymsp[-2].minor.yy192 = yymsp[0].minor.yy192;}
+ break;
+ case 75: /* resolvetype ::= IGNORE */
+{yymsp[0].minor.yy192 = OE_Ignore;}
+ break;
+ case 76: /* resolvetype ::= REPLACE */
+ case 163: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==163);
+{yymsp[0].minor.yy192 = OE_Replace;}
+ break;
+ case 77: /* cmd ::= DROP TABLE ifexists fullname */
+{
+ sqlite3DropTable(pParse, yymsp[0].minor.yy47, 0, yymsp[-1].minor.yy192);
+}
+ break;
+ case 80: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
+{
+ sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[0].minor.yy539, yymsp[-7].minor.yy192, yymsp[-5].minor.yy192);
+}
+ break;
+ case 81: /* cmd ::= DROP VIEW ifexists fullname */
+{
+ sqlite3DropTable(pParse, yymsp[0].minor.yy47, 1, yymsp[-1].minor.yy192);
+}
+ break;
+ case 82: /* cmd ::= select */
+{
+ SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0};
+ sqlite3Select(pParse, yymsp[0].minor.yy539, &dest);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539);
+}
+ break;
+ case 83: /* select ::= WITH wqlist selectnowith */
+{
+ Select *p = yymsp[0].minor.yy539;
+ if( p ){
+ p->pWith = yymsp[-1].minor.yy131;
+ parserDoubleLinkSelect(pParse, p);
+ }else{
+ sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131);
+ }
+ yymsp[-2].minor.yy539 = p;
+}
+ break;
+ case 84: /* select ::= WITH RECURSIVE wqlist selectnowith */
+{
+ Select *p = yymsp[0].minor.yy539;
+ if( p ){
+ p->pWith = yymsp[-1].minor.yy131;
+ parserDoubleLinkSelect(pParse, p);
+ }else{
+ sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131);
+ }
+ yymsp[-3].minor.yy539 = p;
+}
+ break;
+ case 85: /* select ::= selectnowith */
+{
+ Select *p = yymsp[0].minor.yy539;
+ if( p ){
+ parserDoubleLinkSelect(pParse, p);
+ }
+ yymsp[0].minor.yy539 = p; /*A-overwrites-X*/
+}
+ break;
+ case 86: /* selectnowith ::= selectnowith multiselect_op oneselect */
+{
+ Select *pRhs = yymsp[0].minor.yy539;
+ Select *pLhs = yymsp[-2].minor.yy539;
+ if( pRhs && pRhs->pPrior ){
+ SrcList *pFrom;
+ Token x;
+ x.n = 0;
+ parserDoubleLinkSelect(pParse, pRhs);
+ pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0);
+ pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0);
+ }
+ if( pRhs ){
+ pRhs->op = (u8)yymsp[-1].minor.yy192;
+ pRhs->pPrior = pLhs;
+ if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue;
+ pRhs->selFlags &= ~SF_MultiValue;
+ if( yymsp[-1].minor.yy192!=TK_ALL ) pParse->hasCompound = 1;
+ }else{
+ sqlite3SelectDelete(pParse->db, pLhs);
+ }
+ yymsp[-2].minor.yy539 = pRhs;
+}
+ break;
+ case 87: /* multiselect_op ::= UNION */
+ case 89: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==89);
+{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-OP*/}
+ break;
+ case 88: /* multiselect_op ::= UNION ALL */
+{yymsp[-1].minor.yy192 = TK_ALL;}
+ break;
+ case 90: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
+{
+ yymsp[-8].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy242,yymsp[-5].minor.yy47,yymsp[-4].minor.yy202,yymsp[-3].minor.yy242,yymsp[-2].minor.yy202,yymsp[-1].minor.yy242,yymsp[-7].minor.yy192,yymsp[0].minor.yy202);
+}
+ break;
+ case 91: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */
+{
+ yymsp[-9].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy242,yymsp[-6].minor.yy47,yymsp[-5].minor.yy202,yymsp[-4].minor.yy242,yymsp[-3].minor.yy202,yymsp[-1].minor.yy242,yymsp[-8].minor.yy192,yymsp[0].minor.yy202);
+ if( yymsp[-9].minor.yy539 ){
+ yymsp[-9].minor.yy539->pWinDefn = yymsp[-2].minor.yy303;
+ }else{
+ sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy303);
+ }
+}
+ break;
+ case 92: /* values ::= VALUES LP nexprlist RP */
+{
+ yymsp[-3].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values,0);
+}
+ break;
+ case 93: /* values ::= values COMMA LP nexprlist RP */
+{
+ Select *pRight, *pLeft = yymsp[-4].minor.yy539;
+ pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values|SF_MultiValue,0);
+ if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue;
+ if( pRight ){
+ pRight->op = TK_ALL;
+ pRight->pPrior = pLeft;
+ yymsp[-4].minor.yy539 = pRight;
+ }else{
+ yymsp[-4].minor.yy539 = pLeft;
+ }
+}
+ break;
+ case 94: /* distinct ::= DISTINCT */
+{yymsp[0].minor.yy192 = SF_Distinct;}
+ break;
+ case 95: /* distinct ::= ALL */
+{yymsp[0].minor.yy192 = SF_All;}
+ break;
+ case 97: /* sclp ::= */
+ case 130: /* orderby_opt ::= */ yytestcase(yyruleno==130);
+ case 140: /* groupby_opt ::= */ yytestcase(yyruleno==140);
+ case 220: /* exprlist ::= */ yytestcase(yyruleno==220);
+ case 223: /* paren_exprlist ::= */ yytestcase(yyruleno==223);
+ case 228: /* eidlist_opt ::= */ yytestcase(yyruleno==228);
+{yymsp[1].minor.yy242 = 0;}
+ break;
+ case 98: /* selcollist ::= sclp scanpt expr scanpt as */
+{
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[-2].minor.yy202);
+ if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[0].minor.yy0, 1);
+ sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy242,yymsp[-3].minor.yy436,yymsp[-1].minor.yy436);
+}
+ break;
+ case 99: /* selcollist ::= sclp scanpt STAR */
+{
+ Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0);
+ yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy242, p);
+}
+ break;
+ case 100: /* selcollist ::= sclp scanpt nm DOT STAR */
+{
+ Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
+ Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, pDot);
+}
+ break;
+ case 101: /* as ::= AS nm */
+ case 112: /* dbnm ::= DOT nm */ yytestcase(yyruleno==112);
+ case 244: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==244);
+ case 245: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==245);
+{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
+ break;
+ case 103: /* from ::= */
+ case 106: /* stl_prefix ::= */ yytestcase(yyruleno==106);
+{yymsp[1].minor.yy47 = 0;}
+ break;
+ case 104: /* from ::= FROM seltablist */
+{
+ yymsp[-1].minor.yy47 = yymsp[0].minor.yy47;
+ sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy47);
+}
+ break;
+ case 105: /* stl_prefix ::= seltablist joinop */
+{
+ if( ALWAYS(yymsp[-1].minor.yy47 && yymsp[-1].minor.yy47->nSrc>0) ) yymsp[-1].minor.yy47->a[yymsp[-1].minor.yy47->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy192;
+}
+ break;
+ case 107: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+{
+ yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy47, &yymsp[-2].minor.yy0);
+}
+ break;
+ case 108: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+{
+ yymsp[-8].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy47,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600);
+ sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy47, yymsp[-4].minor.yy242);
+}
+ break;
+ case 109: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+{
+ yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy539,yymsp[-1].minor.yy202,yymsp[0].minor.yy600);
+ }
+ break;
+ case 110: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+{
+ if( yymsp[-6].minor.yy47==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy202==0 && yymsp[0].minor.yy600==0 ){
+ yymsp[-6].minor.yy47 = yymsp[-4].minor.yy47;
+ }else if( yymsp[-4].minor.yy47->nSrc==1 ){
+ yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600);
+ if( yymsp[-6].minor.yy47 ){
+ struct SrcList_item *pNew = &yymsp[-6].minor.yy47->a[yymsp[-6].minor.yy47->nSrc-1];
+ struct SrcList_item *pOld = yymsp[-4].minor.yy47->a;
+ pNew->zName = pOld->zName;
+ pNew->zDatabase = pOld->zDatabase;
+ pNew->pSelect = pOld->pSelect;
+ if( pOld->fg.isTabFunc ){
+ pNew->u1.pFuncArg = pOld->u1.pFuncArg;
+ pOld->u1.pFuncArg = 0;
+ pOld->fg.isTabFunc = 0;
+ pNew->fg.isTabFunc = 1;
+ }
+ pOld->zName = pOld->zDatabase = 0;
+ pOld->pSelect = 0;
+ }
+ sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy47);
+ }else{
+ Select *pSubquery;
+ sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy47);
+ pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy47,0,0,0,0,SF_NestedFrom,0);
+ yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy202,yymsp[0].minor.yy600);
+ }
+ }
+ break;
+ case 111: /* dbnm ::= */
+ case 125: /* indexed_opt ::= */ yytestcase(yyruleno==125);
+{yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;}
+ break;
+ case 113: /* fullname ::= nm */
+{
+ yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0);
+ if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0);
+}
+ yymsp[0].minor.yy47 = yylhsminor.yy47;
+ break;
+ case 114: /* fullname ::= nm DOT nm */
+{
+ yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);
+ if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0);
+}
+ yymsp[-2].minor.yy47 = yylhsminor.yy47;
+ break;
+ case 115: /* xfullname ::= nm */
+{yymsp[0].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/}
+ break;
+ case 116: /* xfullname ::= nm DOT nm */
+{yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ break;
+ case 117: /* xfullname ::= nm DOT nm AS nm */
+{
+ yymsp[-4].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/
+ if( yymsp[-4].minor.yy47 ) yymsp[-4].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
+}
+ break;
+ case 118: /* xfullname ::= nm AS nm */
+{
+ yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/
+ if( yymsp[-2].minor.yy47 ) yymsp[-2].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
+}
+ break;
+ case 119: /* joinop ::= COMMA|JOIN */
+{ yymsp[0].minor.yy192 = JT_INNER; }
+ break;
+ case 120: /* joinop ::= JOIN_KW JOIN */
+{yymsp[-1].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/}
+ break;
+ case 121: /* joinop ::= JOIN_KW nm JOIN */
+{yymsp[-2].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/}
+ break;
+ case 122: /* joinop ::= JOIN_KW nm nm JOIN */
+{yymsp[-3].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/}
+ break;
+ case 123: /* on_opt ::= ON expr */
+ case 143: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==143);
+ case 150: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==150);
+ case 216: /* case_else ::= ELSE expr */ yytestcase(yyruleno==216);
+ case 237: /* vinto ::= INTO expr */ yytestcase(yyruleno==237);
+{yymsp[-1].minor.yy202 = yymsp[0].minor.yy202;}
+ break;
+ case 124: /* on_opt ::= */
+ case 142: /* having_opt ::= */ yytestcase(yyruleno==142);
+ case 144: /* limit_opt ::= */ yytestcase(yyruleno==144);
+ case 149: /* where_opt ::= */ yytestcase(yyruleno==149);
+ case 217: /* case_else ::= */ yytestcase(yyruleno==217);
+ case 219: /* case_operand ::= */ yytestcase(yyruleno==219);
+ case 238: /* vinto ::= */ yytestcase(yyruleno==238);
+{yymsp[1].minor.yy202 = 0;}
+ break;
+ case 126: /* indexed_opt ::= INDEXED BY nm */
+{yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;}
+ break;
+ case 127: /* indexed_opt ::= NOT INDEXED */
+{yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;}
+ break;
+ case 128: /* using_opt ::= USING LP idlist RP */
+{yymsp[-3].minor.yy600 = yymsp[-1].minor.yy600;}
+ break;
+ case 129: /* using_opt ::= */
+ case 164: /* idlist_opt ::= */ yytestcase(yyruleno==164);
+{yymsp[1].minor.yy600 = 0;}
+ break;
+ case 131: /* orderby_opt ::= ORDER BY sortlist */
+ case 141: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==141);
+{yymsp[-2].minor.yy242 = yymsp[0].minor.yy242;}
+ break;
+ case 132: /* sortlist ::= sortlist COMMA expr sortorder nulls */
+{
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202);
+ sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192);
+}
+ break;
+ case 133: /* sortlist ::= expr sortorder nulls */
+{
+ yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy202); /*A-overwrites-Y*/
+ sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192);
+}
+ break;
+ case 134: /* sortorder ::= ASC */
+{yymsp[0].minor.yy192 = SQLITE_SO_ASC;}
+ break;
+ case 135: /* sortorder ::= DESC */
+{yymsp[0].minor.yy192 = SQLITE_SO_DESC;}
+ break;
+ case 136: /* sortorder ::= */
+ case 139: /* nulls ::= */ yytestcase(yyruleno==139);
+{yymsp[1].minor.yy192 = SQLITE_SO_UNDEFINED;}
+ break;
+ case 137: /* nulls ::= NULLS FIRST */
+{yymsp[-1].minor.yy192 = SQLITE_SO_ASC;}
+ break;
+ case 138: /* nulls ::= NULLS LAST */
+{yymsp[-1].minor.yy192 = SQLITE_SO_DESC;}
+ break;
+ case 145: /* limit_opt ::= LIMIT expr */
+{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,0);}
+ break;
+ case 146: /* limit_opt ::= LIMIT expr OFFSET expr */
+{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);}
+ break;
+ case 147: /* limit_opt ::= LIMIT expr COMMA expr */
+{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,yymsp[-2].minor.yy202);}
+ break;
+ case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */
+{
+ sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy47, &yymsp[-3].minor.yy0);
+#ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( yymsp[-1].minor.yy242 || yymsp[0].minor.yy202 ){
+ updateDeleteLimitError(pParse,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);
+ yymsp[-1].minor.yy242 = 0;
+ yymsp[0].minor.yy202 = 0;
+ }
+#endif
+ sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy47,yymsp[-2].minor.yy202,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);
+}
+ break;
+ case 151: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */
+{
+ sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy47, &yymsp[-6].minor.yy0);
+ yymsp[-7].minor.yy47 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy47, yymsp[-3].minor.yy47);
+ sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy242,"set list");
+#ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+ if( yymsp[-1].minor.yy242 || yymsp[0].minor.yy202 ){
+ updateDeleteLimitError(pParse,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);
+ yymsp[-1].minor.yy242 = 0;
+ yymsp[0].minor.yy202 = 0;
+ }
+#endif
+ sqlite3Update(pParse,yymsp[-7].minor.yy47,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202,yymsp[-8].minor.yy192,yymsp[-1].minor.yy242,yymsp[0].minor.yy202,0);
+}
+ break;
+ case 152: /* setlist ::= setlist COMMA nm EQ expr */
+{
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[0].minor.yy202);
+ sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, 1);
+}
+ break;
+ case 153: /* setlist ::= setlist COMMA LP idlist RP EQ expr */
+{
+ yymsp[-6].minor.yy242 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy242, yymsp[-3].minor.yy600, yymsp[0].minor.yy202);
+}
+ break;
+ case 154: /* setlist ::= nm EQ expr */
+{
+ yylhsminor.yy242 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy202);
+ sqlite3ExprListSetName(pParse, yylhsminor.yy242, &yymsp[-2].minor.yy0, 1);
+}
+ yymsp[-2].minor.yy242 = yylhsminor.yy242;
+ break;
+ case 155: /* setlist ::= LP idlist RP EQ expr */
+{
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy600, yymsp[0].minor.yy202);
+}
+ break;
+ case 156: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
+{
+ sqlite3Insert(pParse, yymsp[-3].minor.yy47, yymsp[-1].minor.yy539, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, yymsp[0].minor.yy318);
+}
+ break;
+ case 157: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
+{
+ sqlite3Insert(pParse, yymsp[-3].minor.yy47, 0, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, 0);
+}
+ break;
+ case 158: /* upsert ::= */
+{ yymsp[1].minor.yy318 = 0; }
+ break;
+ case 159: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
+{ yymsp[-10].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-7].minor.yy242,yymsp[-5].minor.yy202,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);}
+ break;
+ case 160: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
+{ yymsp[-7].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202,0,0); }
+ break;
+ case 161: /* upsert ::= ON CONFLICT DO NOTHING */
+{ yymsp[-3].minor.yy318 = sqlite3UpsertNew(pParse->db,0,0,0,0); }
+ break;
+ case 165: /* idlist_opt ::= LP idlist RP */
+{yymsp[-2].minor.yy600 = yymsp[-1].minor.yy600;}
+ break;
+ case 166: /* idlist ::= idlist COMMA nm */
+{yymsp[-2].minor.yy600 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy600,&yymsp[0].minor.yy0);}
+ break;
+ case 167: /* idlist ::= nm */
+{yymsp[0].minor.yy600 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/}
+ break;
+ case 168: /* expr ::= LP expr RP */
+{yymsp[-2].minor.yy202 = yymsp[-1].minor.yy202;}
+ break;
+ case 169: /* expr ::= ID|INDEXED */
+ case 170: /* expr ::= JOIN_KW */ yytestcase(yyruleno==170);
+{yymsp[0].minor.yy202=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ break;
+ case 171: /* expr ::= nm DOT nm */
+{
+ Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[0].minor.yy0);
+ sqlite3RenameTokenMap(pParse, (void*)temp1, &yymsp[-2].minor.yy0);
+ }
+ yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2);
+}
+ yymsp[-2].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 172: /* expr ::= nm DOT nm DOT nm */
+{
+ Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1);
+ Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
+ Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3);
+ if( IN_RENAME_OBJECT ){
+ sqlite3RenameTokenMap(pParse, (void*)temp3, &yymsp[0].minor.yy0);
+ sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[-2].minor.yy0);
+ }
+ yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4);
+}
+ yymsp[-4].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 173: /* term ::= NULL|FLOAT|BLOB */
+ case 174: /* term ::= STRING */ yytestcase(yyruleno==174);
+{yymsp[0].minor.yy202=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ break;
+ case 175: /* term ::= INTEGER */
+{
+ yylhsminor.yy202 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1);
+}
+ yymsp[0].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 176: /* expr ::= VARIABLE */
+{
+ if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){
+ u32 n = yymsp[0].minor.yy0.n;
+ yymsp[0].minor.yy202 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0);
+ sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy202, n);
+ }else{
+ /* When doing a nested parse, one can include terms in an expression
+ ** that look like this: #1 #2 ... These terms refer to registers
+ ** in the virtual machine. #N is the N-th register. */
+ Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/
+ assert( t.n>=2 );
+ if( pParse->nested==0 ){
+ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t);
+ yymsp[0].minor.yy202 = 0;
+ }else{
+ yymsp[0].minor.yy202 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0);
+ if( yymsp[0].minor.yy202 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy202->iTable);
+ }
+ }
+}
+ break;
+ case 177: /* expr ::= expr COLLATE ID|STRING */
+{
+ yymsp[-2].minor.yy202 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy202, &yymsp[0].minor.yy0, 1);
+}
+ break;
+ case 178: /* expr ::= CAST LP expr AS typetoken RP */
+{
+ yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1);
+ sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy202, yymsp[-3].minor.yy202, 0);
+}
+ break;
+ case 179: /* expr ::= ID|INDEXED LP distinct exprlist RP */
+{
+ yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy192);
+}
+ yymsp[-4].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 180: /* expr ::= ID|INDEXED LP STAR RP */
+{
+ yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0);
+}
+ yymsp[-3].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 181: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */
+{
+ yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy242, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy192);
+ sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303);
+}
+ yymsp[-5].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 182: /* expr ::= ID|INDEXED LP STAR RP filter_over */
+{
+ yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0);
+ sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303);
+}
+ yymsp[-4].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 183: /* term ::= CTIME_KW */
+{
+ yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0);
+}
+ yymsp[0].minor.yy202 = yylhsminor.yy202;
+ break;
+ case 184: /* expr ::= LP nexprlist COMMA expr RP */
+{
+ ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202);
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
+ if( yymsp[-4].minor.yy202 ){
+ yymsp[-4].minor.yy202->x.pList = pList;
+ if( ALWAYS(pList->nExpr) ){
+ yymsp[-4].minor.yy202->flags |= pList->a[0].pExpr->flags & EP_Propagate;
+ }
+ }else{
+ sqlite3ExprListDelete(pParse->db, pList);
+ }
+}
+ break;
+ case 185: /* expr ::= expr AND expr */
+{yymsp[-2].minor.yy202=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);}
+ break;
+ case 186: /* expr ::= expr OR expr */
+ case 187: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==187);
+ case 188: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==188);
+ case 189: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==189);
+ case 190: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==190);
+ case 191: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==191);
+ case 192: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==192);
+{yymsp[-2].minor.yy202=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);}
+ break;
+ case 193: /* likeop ::= NOT LIKE_KW|MATCH */
+{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/}
+ break;
+ case 194: /* expr ::= expr likeop expr */
+{
+ ExprList *pList;
+ int bNot = yymsp[-1].minor.yy0.n & 0x80000000;
+ yymsp[-1].minor.yy0.n &= 0x7fffffff;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy202);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy202);
+ yymsp[-2].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0);
+ if( bNot ) yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy202, 0);
+ if( yymsp[-2].minor.yy202 ) yymsp[-2].minor.yy202->flags |= EP_InfixFunc;
+}
+ break;
+ case 195: /* expr ::= expr likeop expr ESCAPE expr */
+{
+ ExprList *pList;
+ int bNot = yymsp[-3].minor.yy0.n & 0x80000000;
+ yymsp[-3].minor.yy0.n &= 0x7fffffff;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy202);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202);
+ yymsp[-4].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0);
+ if( bNot ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+ if( yymsp[-4].minor.yy202 ) yymsp[-4].minor.yy202->flags |= EP_InfixFunc;
+}
+ break;
+ case 196: /* expr ::= expr ISNULL|NOTNULL */
+{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy202,0);}
+ break;
+ case 197: /* expr ::= expr NOT NULL */
+{yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy202,0);}
+ break;
+ case 198: /* expr ::= expr IS expr */
+{
+ yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-2].minor.yy202, TK_ISNULL);
+}
+ break;
+ case 199: /* expr ::= expr IS NOT expr */
+{
+ yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy202,yymsp[0].minor.yy202);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-3].minor.yy202, TK_NOTNULL);
+}
+ break;
+ case 200: /* expr ::= NOT expr */
+ case 201: /* expr ::= BITNOT expr */ yytestcase(yyruleno==201);
+{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy202, 0);/*A-overwrites-B*/}
+ break;
+ case 202: /* expr ::= PLUS|MINUS expr */
+{
+ yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy202, 0);
+ /*A-overwrites-B*/
+}
+ break;
+ case 203: /* between_op ::= BETWEEN */
+ case 206: /* in_op ::= IN */ yytestcase(yyruleno==206);
+{yymsp[0].minor.yy192 = 0;}
+ break;
+ case 205: /* expr ::= expr between_op expr AND expr */
+{
+ ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202);
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy202, 0);
+ if( yymsp[-4].minor.yy202 ){
+ yymsp[-4].minor.yy202->x.pList = pList;
+ }else{
+ sqlite3ExprListDelete(pParse->db, pList);
+ }
+ if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+}
+ break;
+ case 208: /* expr ::= expr in_op LP exprlist RP */
+{
+ if( yymsp[-1].minor.yy242==0 ){
+ /* Expressions of the form
+ **
+ ** expr1 IN ()
+ ** expr1 NOT IN ()
+ **
+ ** simplify to constants 0 (false) and 1 (true), respectively,
+ ** regardless of the value of expr1.
+ */
+ sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy202);
+ yymsp[-4].minor.yy202 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy192 ? "1" : "0");
+ }else if( yymsp[-1].minor.yy242->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy242->a[0].pExpr) ){
+ Expr *pRHS = yymsp[-1].minor.yy242->a[0].pExpr;
+ yymsp[-1].minor.yy242->a[0].pExpr = 0;
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242);
+ pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0);
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy202, pRHS);
+ if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+ }else{
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0);
+ if( yymsp[-4].minor.yy202 ){
+ yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy242;
+ sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202);
+ }else{
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242);
+ }
+ if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+ }
+ }
+ break;
+ case 209: /* expr ::= LP select RP */
+{
+ yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy202, yymsp[-1].minor.yy539);
+ }
+ break;
+ case 210: /* expr ::= expr in_op LP select RP */
+{
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, yymsp[-1].minor.yy539);
+ if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+ }
+ break;
+ case 211: /* expr ::= expr in_op nm dbnm paren_exprlist */
+{
+ SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
+ Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);
+ if( yymsp[0].minor.yy242 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy242);
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, pSelect);
+ if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0);
+ }
+ break;
+ case 212: /* expr ::= EXISTS LP select RP */
+{
+ Expr *p;
+ p = yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
+ sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy539);
+ }
+ break;
+ case 213: /* expr ::= CASE case_operand case_exprlist case_else END */
+{
+ yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy202, 0);
+ if( yymsp[-4].minor.yy202 ){
+ yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy202 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[-1].minor.yy202) : yymsp[-2].minor.yy242;
+ sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202);
+ }else{
+ sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy242);
+ sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy202);
+ }
+}
+ break;
+ case 214: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+{
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[-2].minor.yy202);
+ yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[0].minor.yy202);
+}
+ break;
+ case 215: /* case_exprlist ::= WHEN expr THEN expr */
+{
+ yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202);
+ yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy242, yymsp[0].minor.yy202);
+}
+ break;
+ case 218: /* case_operand ::= expr */
+{yymsp[0].minor.yy202 = yymsp[0].minor.yy202; /*A-overwrites-X*/}
+ break;
+ case 221: /* nexprlist ::= nexprlist COMMA expr */
+{yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[0].minor.yy202);}
+ break;
+ case 222: /* nexprlist ::= expr */
+{yymsp[0].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy202); /*A-overwrites-Y*/}
+ break;
+ case 224: /* paren_exprlist ::= LP exprlist RP */
+ case 229: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==229);
+{yymsp[-2].minor.yy242 = yymsp[-1].minor.yy242;}
+ break;
+ case 225: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+{
+ sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
+ sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy242, yymsp[-10].minor.yy192,
+ &yymsp[-11].minor.yy0, yymsp[0].minor.yy202, SQLITE_SO_ASC, yymsp[-8].minor.yy192, SQLITE_IDXTYPE_APPDEF);
+ if( IN_RENAME_OBJECT && pParse->pNewIndex ){
+ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0);
+ }
+}
+ break;
+ case 226: /* uniqueflag ::= UNIQUE */
+ case 268: /* raisetype ::= ABORT */ yytestcase(yyruleno==268);
+{yymsp[0].minor.yy192 = OE_Abort;}
+ break;
+ case 227: /* uniqueflag ::= */
+{yymsp[1].minor.yy192 = OE_None;}
+ break;
+ case 230: /* eidlist ::= eidlist COMMA nm collate sortorder */
+{
+ yymsp[-4].minor.yy242 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192);
+}
+ break;
+ case 231: /* eidlist ::= nm collate sortorder */
+{
+ yymsp[-2].minor.yy242 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192); /*A-overwrites-Y*/
+}
+ break;
+ case 234: /* cmd ::= DROP INDEX ifexists fullname */
+{sqlite3DropIndex(pParse, yymsp[0].minor.yy47, yymsp[-1].minor.yy192);}
+ break;
+ case 235: /* cmd ::= VACUUM vinto */
+{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy202);}
+ break;
+ case 236: /* cmd ::= VACUUM nm vinto */
+{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy202);}
+ break;
+ case 239: /* cmd ::= PRAGMA nm dbnm */
+{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
+ break;
+ case 240: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
+ break;
+ case 241: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
+ break;
+ case 242: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
+ break;
+ case 243: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
+ break;
+ case 246: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+{
+ Token all;
+ all.z = yymsp[-3].minor.yy0.z;
+ all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n;
+ sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy447, &all);
+}
+ break;
+ case 247: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+{
+ sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy192, yymsp[-4].minor.yy230.a, yymsp[-4].minor.yy230.b, yymsp[-2].minor.yy47, yymsp[0].minor.yy202, yymsp[-10].minor.yy192, yymsp[-8].minor.yy192);
+ yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
+}
+ break;
+ case 248: /* trigger_time ::= BEFORE|AFTER */
+{ yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/ }
+ break;
+ case 249: /* trigger_time ::= INSTEAD OF */
+{ yymsp[-1].minor.yy192 = TK_INSTEAD;}
+ break;
+ case 250: /* trigger_time ::= */
+{ yymsp[1].minor.yy192 = TK_BEFORE; }
+ break;
+ case 251: /* trigger_event ::= DELETE|INSERT */
+ case 252: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==252);
+{yymsp[0].minor.yy230.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy230.b = 0;}
+ break;
+ case 253: /* trigger_event ::= UPDATE OF idlist */
+{yymsp[-2].minor.yy230.a = TK_UPDATE; yymsp[-2].minor.yy230.b = yymsp[0].minor.yy600;}
+ break;
+ case 254: /* when_clause ::= */
+ case 273: /* key_opt ::= */ yytestcase(yyruleno==273);
+{ yymsp[1].minor.yy202 = 0; }
+ break;
+ case 255: /* when_clause ::= WHEN expr */
+ case 274: /* key_opt ::= KEY expr */ yytestcase(yyruleno==274);
+{ yymsp[-1].minor.yy202 = yymsp[0].minor.yy202; }
+ break;
+ case 256: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+{
+ assert( yymsp[-2].minor.yy447!=0 );
+ yymsp[-2].minor.yy447->pLast->pNext = yymsp[-1].minor.yy447;
+ yymsp[-2].minor.yy447->pLast = yymsp[-1].minor.yy447;
+}
+ break;
+ case 257: /* trigger_cmd_list ::= trigger_cmd SEMI */
+{
+ assert( yymsp[-1].minor.yy447!=0 );
+ yymsp[-1].minor.yy447->pLast = yymsp[-1].minor.yy447;
+}
+ break;
+ case 258: /* trnm ::= nm DOT nm */
+{
+ yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
+ sqlite3ErrorMsg(pParse,
+ "qualified table names are not allowed on INSERT, UPDATE, and DELETE "
+ "statements within triggers");
+}
+ break;
+ case 259: /* tridxby ::= INDEXED BY nm */
+{
+ sqlite3ErrorMsg(pParse,
+ "the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
+ "within triggers");
+}
+ break;
+ case 260: /* tridxby ::= NOT INDEXED */
+{
+ sqlite3ErrorMsg(pParse,
+ "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
+ "within triggers");
+}
+ break;
+ case 261: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+{yylhsminor.yy447 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy47, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202, yymsp[-7].minor.yy192, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy436);}
+ yymsp[-8].minor.yy447 = yylhsminor.yy447;
+ break;
+ case 262: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+{
+ yylhsminor.yy447 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy600,yymsp[-2].minor.yy539,yymsp[-6].minor.yy192,yymsp[-1].minor.yy318,yymsp[-7].minor.yy436,yymsp[0].minor.yy436);/*yylhsminor.yy447-overwrites-yymsp[-6].minor.yy192*/
+}
+ yymsp[-7].minor.yy447 = yylhsminor.yy447;
+ break;
+ case 263: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+{yylhsminor.yy447 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy202, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy436);}
+ yymsp[-5].minor.yy447 = yylhsminor.yy447;
+ break;
+ case 264: /* trigger_cmd ::= scanpt select scanpt */
+{yylhsminor.yy447 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy539, yymsp[-2].minor.yy436, yymsp[0].minor.yy436); /*yylhsminor.yy447-overwrites-yymsp[-1].minor.yy539*/}
+ yymsp[-2].minor.yy447 = yylhsminor.yy447;
+ break;
+ case 265: /* expr ::= RAISE LP IGNORE RP */
+{
+ yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
+ if( yymsp[-3].minor.yy202 ){
+ yymsp[-3].minor.yy202->affExpr = OE_Ignore;
+ }
+}
+ break;
+ case 266: /* expr ::= RAISE LP raisetype COMMA nm RP */
+{
+ yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
+ if( yymsp[-5].minor.yy202 ) {
+ yymsp[-5].minor.yy202->affExpr = (char)yymsp[-3].minor.yy192;
+ }
+}
+ break;
+ case 267: /* raisetype ::= ROLLBACK */
+{yymsp[0].minor.yy192 = OE_Rollback;}
+ break;
+ case 269: /* raisetype ::= FAIL */
+{yymsp[0].minor.yy192 = OE_Fail;}
+ break;
+ case 270: /* cmd ::= DROP TRIGGER ifexists fullname */
+{
+ sqlite3DropTrigger(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy192);
+}
+ break;
+ case 271: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+{
+ sqlite3Attach(pParse, yymsp[-3].minor.yy202, yymsp[-1].minor.yy202, yymsp[0].minor.yy202);
+}
+ break;
+ case 272: /* cmd ::= DETACH database_kw_opt expr */
+{
+ sqlite3Detach(pParse, yymsp[0].minor.yy202);
+}
+ break;
+ case 275: /* cmd ::= REINDEX */
+{sqlite3Reindex(pParse, 0, 0);}
+ break;
+ case 276: /* cmd ::= REINDEX nm dbnm */
+{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
+ break;
+ case 277: /* cmd ::= ANALYZE */
+{sqlite3Analyze(pParse, 0, 0);}
+ break;
+ case 278: /* cmd ::= ANALYZE nm dbnm */
+{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
+ break;
+ case 279: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+{
+ sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy47,&yymsp[0].minor.yy0);
+}
+ break;
+ case 280: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+{
+ yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
+ sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
+}
+ break;
+ case 281: /* add_column_fullname ::= fullname */
+{
+ disableLookaside(pParse);
+ sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy47);
+}
+ break;
+ case 282: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+{
+ sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy47, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
+}
+ break;
+ case 283: /* cmd ::= create_vtab */
+{sqlite3VtabFinishParse(pParse,0);}
+ break;
+ case 284: /* cmd ::= create_vtab LP vtabarglist RP */
+{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
+ break;
+ case 285: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+{
+ sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy192);
+}
+ break;
+ case 286: /* vtabarg ::= */
+{sqlite3VtabArgInit(pParse);}
+ break;
+ case 287: /* vtabargtoken ::= ANY */
+ case 288: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==288);
+ case 289: /* lp ::= LP */ yytestcase(yyruleno==289);
+{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
+ break;
+ case 290: /* with ::= WITH wqlist */
+ case 291: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==291);
+{ sqlite3WithPush(pParse, yymsp[0].minor.yy131, 1); }
+ break;
+ case 292: /* wqlist ::= nm eidlist_opt AS LP select RP */
+{
+ yymsp[-5].minor.yy131 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539); /*A-overwrites-X*/
+}
+ break;
+ case 293: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+{
+ yymsp[-7].minor.yy131 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy131, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539);
+}
+ break;
+ case 294: /* windowdefn_list ::= windowdefn */
+{ yylhsminor.yy303 = yymsp[0].minor.yy303; }
+ yymsp[0].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 295: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
+{
+ assert( yymsp[0].minor.yy303!=0 );
+ sqlite3WindowChain(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy303);
+ yymsp[0].minor.yy303->pNextWin = yymsp[-2].minor.yy303;
+ yylhsminor.yy303 = yymsp[0].minor.yy303;
+}
+ yymsp[-2].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 296: /* windowdefn ::= nm AS LP window RP */
+{
+ if( ALWAYS(yymsp[-1].minor.yy303) ){
+ yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n);
+ }
+ yylhsminor.yy303 = yymsp[-1].minor.yy303;
+}
+ yymsp[-4].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 297: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+{
+ yymsp[-4].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, 0);
+}
+ break;
+ case 298: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+{
+ yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, &yymsp[-5].minor.yy0);
+}
+ yymsp[-5].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 299: /* window ::= ORDER BY sortlist frame_opt */
+{
+ yymsp[-3].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, 0);
+}
+ break;
+ case 300: /* window ::= nm ORDER BY sortlist frame_opt */
+{
+ yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0);
+}
+ yymsp[-4].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 301: /* window ::= frame_opt */
+ case 320: /* filter_over ::= over_clause */ yytestcase(yyruleno==320);
+{
+ yylhsminor.yy303 = yymsp[0].minor.yy303;
+}
+ yymsp[0].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 302: /* window ::= nm frame_opt */
+{
+ yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, 0, &yymsp[-1].minor.yy0);
+}
+ yymsp[-1].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 303: /* frame_opt ::= */
+{
+ yymsp[1].minor.yy303 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0);
+}
+ break;
+ case 304: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+{
+ yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy192, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy58);
+}
+ yymsp[-2].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 305: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+{
+ yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy192, yymsp[-3].minor.yy77.eType, yymsp[-3].minor.yy77.pExpr, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, yymsp[0].minor.yy58);
+}
+ yymsp[-5].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 307: /* frame_bound_s ::= frame_bound */
+ case 309: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==309);
+{yylhsminor.yy77 = yymsp[0].minor.yy77;}
+ yymsp[0].minor.yy77 = yylhsminor.yy77;
+ break;
+ case 308: /* frame_bound_s ::= UNBOUNDED PRECEDING */
+ case 310: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==310);
+ case 312: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==312);
+{yylhsminor.yy77.eType = yymsp[-1].major; yylhsminor.yy77.pExpr = 0;}
+ yymsp[-1].minor.yy77 = yylhsminor.yy77;
+ break;
+ case 311: /* frame_bound ::= expr PRECEDING|FOLLOWING */
+{yylhsminor.yy77.eType = yymsp[0].major; yylhsminor.yy77.pExpr = yymsp[-1].minor.yy202;}
+ yymsp[-1].minor.yy77 = yylhsminor.yy77;
+ break;
+ case 313: /* frame_exclude_opt ::= */
+{yymsp[1].minor.yy58 = 0;}
+ break;
+ case 314: /* frame_exclude_opt ::= EXCLUDE frame_exclude */
+{yymsp[-1].minor.yy58 = yymsp[0].minor.yy58;}
+ break;
+ case 315: /* frame_exclude ::= NO OTHERS */
+ case 316: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==316);
+{yymsp[-1].minor.yy58 = yymsp[-1].major; /*A-overwrites-X*/}
+ break;
+ case 317: /* frame_exclude ::= GROUP|TIES */
+{yymsp[0].minor.yy58 = yymsp[0].major; /*A-overwrites-X*/}
+ break;
+ case 318: /* window_clause ::= WINDOW windowdefn_list */
+{ yymsp[-1].minor.yy303 = yymsp[0].minor.yy303; }
+ break;
+ case 319: /* filter_over ::= filter_clause over_clause */
+{
+ yymsp[0].minor.yy303->pFilter = yymsp[-1].minor.yy202;
+ yylhsminor.yy303 = yymsp[0].minor.yy303;
+}
+ yymsp[-1].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 321: /* filter_over ::= filter_clause */
+{
+ yylhsminor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
+ if( yylhsminor.yy303 ){
+ yylhsminor.yy303->eFrmType = TK_FILTER;
+ yylhsminor.yy303->pFilter = yymsp[0].minor.yy202;
+ }else{
+ sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy202);
+ }
+}
+ yymsp[0].minor.yy303 = yylhsminor.yy303;
+ break;
+ case 322: /* over_clause ::= OVER LP window RP */
+{
+ yymsp[-3].minor.yy303 = yymsp[-1].minor.yy303;
+ assert( yymsp[-3].minor.yy303!=0 );
+}
+ break;
+ case 323: /* over_clause ::= OVER nm */
+{
+ yymsp[-1].minor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
+ if( yymsp[-1].minor.yy303 ){
+ yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n);
+ }
+}
+ break;
+ case 324: /* filter_clause ::= FILTER LP WHERE expr RP */
+{ yymsp[-4].minor.yy202 = yymsp[-1].minor.yy202; }
+ break;
+ default:
+ /* (325) input ::= cmdlist */ yytestcase(yyruleno==325);
+ /* (326) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==326);
+ /* (327) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=327);
+ /* (328) ecmd ::= SEMI */ yytestcase(yyruleno==328);
+ /* (329) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==329);
+ /* (330) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=330);
+ /* (331) trans_opt ::= */ yytestcase(yyruleno==331);
+ /* (332) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==332);
+ /* (333) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==333);
+ /* (334) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==334);
+ /* (335) savepoint_opt ::= */ yytestcase(yyruleno==335);
+ /* (336) cmd ::= create_table create_table_args */ yytestcase(yyruleno==336);
+ /* (337) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==337);
+ /* (338) columnlist ::= columnname carglist */ yytestcase(yyruleno==338);
+ /* (339) nm ::= ID|INDEXED */ yytestcase(yyruleno==339);
+ /* (340) nm ::= STRING */ yytestcase(yyruleno==340);
+ /* (341) nm ::= JOIN_KW */ yytestcase(yyruleno==341);
+ /* (342) typetoken ::= typename */ yytestcase(yyruleno==342);
+ /* (343) typename ::= ID|STRING */ yytestcase(yyruleno==343);
+ /* (344) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=344);
+ /* (345) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=345);
+ /* (346) carglist ::= carglist ccons */ yytestcase(yyruleno==346);
+ /* (347) carglist ::= */ yytestcase(yyruleno==347);
+ /* (348) ccons ::= NULL onconf */ yytestcase(yyruleno==348);
+ /* (349) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==349);
+ /* (350) ccons ::= AS generated */ yytestcase(yyruleno==350);
+ /* (351) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==351);
+ /* (352) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==352);
+ /* (353) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=353);
+ /* (354) tconscomma ::= */ yytestcase(yyruleno==354);
+ /* (355) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=355);
+ /* (356) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=356);
+ /* (357) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=357);
+ /* (358) oneselect ::= values */ yytestcase(yyruleno==358);
+ /* (359) sclp ::= selcollist COMMA */ yytestcase(yyruleno==359);
+ /* (360) as ::= ID|STRING */ yytestcase(yyruleno==360);
+ /* (361) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=361);
+ /* (362) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==362);
+ /* (363) exprlist ::= nexprlist */ yytestcase(yyruleno==363);
+ /* (364) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=364);
+ /* (365) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=365);
+ /* (366) nmnum ::= ON */ yytestcase(yyruleno==366);
+ /* (367) nmnum ::= DELETE */ yytestcase(yyruleno==367);
+ /* (368) nmnum ::= DEFAULT */ yytestcase(yyruleno==368);
+ /* (369) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==369);
+ /* (370) foreach_clause ::= */ yytestcase(yyruleno==370);
+ /* (371) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==371);
+ /* (372) trnm ::= nm */ yytestcase(yyruleno==372);
+ /* (373) tridxby ::= */ yytestcase(yyruleno==373);
+ /* (374) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==374);
+ /* (375) database_kw_opt ::= */ yytestcase(yyruleno==375);
+ /* (376) kwcolumn_opt ::= */ yytestcase(yyruleno==376);
+ /* (377) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==377);
+ /* (378) vtabarglist ::= vtabarg */ yytestcase(yyruleno==378);
+ /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==379);
+ /* (380) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==380);
+ /* (381) anylist ::= */ yytestcase(yyruleno==381);
+ /* (382) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==382);
+ /* (383) anylist ::= anylist ANY */ yytestcase(yyruleno==383);
+ /* (384) with ::= */ yytestcase(yyruleno==384);
+ break;
+/********** End reduce actions ************************************************/
+ };
+ assert( yyruleno<sizeof(yyRuleInfoLhs)/sizeof(yyRuleInfoLhs[0]) );
+ yygoto = yyRuleInfoLhs[yyruleno];
+ yysize = yyRuleInfoNRhs[yyruleno];
+ yyact = yy_find_reduce_action(yymsp[yysize].stateno,(YYCODETYPE)yygoto);
+
+ /* There are no SHIFTREDUCE actions on nonterminals because the table
+ ** generator has simplified them to pure REDUCE actions. */
+ assert( !(yyact>YY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) );
+
+ /* It is not possible for a REDUCE to be followed by an error */
+ assert( yyact!=YY_ERROR_ACTION );
+
+ yymsp += yysize+1;
+ yypParser->yytos = yymsp;
+ yymsp->stateno = (YYACTIONTYPE)yyact;
+ yymsp->major = (YYCODETYPE)yygoto;
+ yyTraceShift(yypParser, yyact, "... then shift");
+ return yyact;
+}
+
+/*
+** The following code executes when the parse fails
+*/
+#ifndef YYNOERRORRECOVERY
+static void yy_parse_failed(
+ yyParser *yypParser /* The parser */
+){
+ sqlite3ParserARG_FETCH
+ sqlite3ParserCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser fails */
+/************ Begin %parse_failure code ***************************************/
+/************ End %parse_failure code *****************************************/
+ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3ParserCTX_STORE
+}
+#endif /* YYNOERRORRECOVERY */
+
+/*
+** The following code executes when a syntax error first occurs.
+*/
+static void yy_syntax_error(
+ yyParser *yypParser, /* The parser */
+ int yymajor, /* The major type of the error token */
+ sqlite3ParserTOKENTYPE yyminor /* The minor type of the error token */
+){
+ sqlite3ParserARG_FETCH
+ sqlite3ParserCTX_FETCH
+#define TOKEN yyminor
+/************ Begin %syntax_error code ****************************************/
+
+ UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */
+ if( TOKEN.z[0] ){
+ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN);
+ }else{
+ sqlite3ErrorMsg(pParse, "incomplete input");
+ }
+/************ End %syntax_error code ******************************************/
+ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3ParserCTX_STORE
+}
+
+/*
+** The following is executed when the parser accepts
+*/
+static void yy_accept(
+ yyParser *yypParser /* The parser */
+){
+ sqlite3ParserARG_FETCH
+ sqlite3ParserCTX_FETCH
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
+ }
+#endif
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ assert( yypParser->yytos==yypParser->yystack );
+ /* Here code is inserted which will be executed whenever the
+ ** parser accepts */
+/*********** Begin %parse_accept code *****************************************/
+/*********** End %parse_accept code *******************************************/
+ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3ParserCTX_STORE
+}
+
+/* The main parser program.
+** The first argument is a pointer to a structure obtained from
+** "sqlite3ParserAlloc" which describes the current state of the parser.
+** The second argument is the major token number. The third is
+** the minor token. The fourth optional argument is whatever the
+** user wants (and specified in the grammar) and is available for
+** use by the action routines.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser (an opaque structure.)
+** <li> The major token number.
+** <li> The minor token number.
+** <li> An option argument of a grammar-specified type.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+SQLITE_PRIVATE void sqlite3Parser(
+ void *yyp, /* The parser */
+ int yymajor, /* The major token code number */
+ sqlite3ParserTOKENTYPE yyminor /* The value for the token */
+ sqlite3ParserARG_PDECL /* Optional %extra_argument parameter */
+){
+ YYMINORTYPE yyminorunion;
+ YYACTIONTYPE yyact; /* The parser action. */
+#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
+ int yyendofinput; /* True if we are at the end of input */
+#endif
+#ifdef YYERRORSYMBOL
+ int yyerrorhit = 0; /* True if yymajor has invoked an error */
+#endif
+ yyParser *yypParser = (yyParser*)yyp; /* The parser */
+ sqlite3ParserCTX_FETCH
+ sqlite3ParserARG_STORE
+
+ assert( yypParser->yytos!=0 );
+#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
+ yyendofinput = (yymajor==0);
+#endif
+
+ yyact = yypParser->yytos->stateno;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ if( yyact < YY_MIN_REDUCE ){
+ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n",
+ yyTracePrompt,yyTokenName[yymajor],yyact);
+ }else{
+ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n",
+ yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE);
+ }
+ }
+#endif
+
+ do{
+ assert( yyact==yypParser->yytos->stateno );
+ yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact);
+ if( yyact >= YY_MIN_REDUCE ){
+ yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,
+ yyminor sqlite3ParserCTX_PARAM);
+ }else if( yyact <= YY_MAX_SHIFTREDUCE ){
+ yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt--;
+#endif
+ break;
+ }else if( yyact==YY_ACCEPT_ACTION ){
+ yypParser->yytos--;
+ yy_accept(yypParser);
+ return;
+ }else{
+ assert( yyact == YY_ERROR_ACTION );
+ yyminorunion.yy0 = yyminor;
+#ifdef YYERRORSYMBOL
+ int yymx;
+#endif
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
+ }
+#endif
+#ifdef YYERRORSYMBOL
+ /* A syntax error has occurred.
+ ** The response to an error depends upon whether or not the
+ ** grammar defines an error token "ERROR".
+ **
+ ** This is what we do if the grammar does define ERROR:
+ **
+ ** * Call the %syntax_error function.
+ **
+ ** * Begin popping the stack until we enter a state where
+ ** it is legal to shift the error symbol, then shift
+ ** the error symbol.
+ **
+ ** * Set the error count to three.
+ **
+ ** * Begin accepting and shifting new tokens. No new error
+ ** processing will occur until three tokens have been
+ ** shifted successfully.
+ **
+ */
+ if( yypParser->yyerrcnt<0 ){
+ yy_syntax_error(yypParser,yymajor,yyminor);
+ }
+ yymx = yypParser->yytos->major;
+ if( yymx==YYERRORSYMBOL || yyerrorhit ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sDiscard input token %s\n",
+ yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+ yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion);
+ yymajor = YYNOCODE;
+ }else{
+ while( yypParser->yytos >= yypParser->yystack
+ && (yyact = yy_find_reduce_action(
+ yypParser->yytos->stateno,
+ YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE
+ ){
+ yy_pop_parser_stack(yypParser);
+ }
+ if( yypParser->yytos < yypParser->yystack || yymajor==0 ){
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ yy_parse_failed(yypParser);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ yymajor = YYNOCODE;
+ }else if( yymx!=YYERRORSYMBOL ){
+ yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor);
+ }
+ }
+ yypParser->yyerrcnt = 3;
+ yyerrorhit = 1;
+ if( yymajor==YYNOCODE ) break;
+ yyact = yypParser->yytos->stateno;
+#elif defined(YYNOERRORRECOVERY)
+ /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
+ ** do any kind of error recovery. Instead, simply invoke the syntax
+ ** error routine and continue going as if nothing had happened.
+ **
+ ** Applications can set this macro (for example inside %include) if
+ ** they intend to abandon the parse upon the first syntax error seen.
+ */
+ yy_syntax_error(yypParser,yymajor, yyminor);
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ break;
+#else /* YYERRORSYMBOL is not defined */
+ /* This is what we do if the grammar does not define ERROR:
+ **
+ ** * Report an error message, and throw away the input token.
+ **
+ ** * If the input token is $, then fail the parse.
+ **
+ ** As before, subsequent error messages are suppressed until
+ ** three input tokens have been successfully shifted.
+ */
+ if( yypParser->yyerrcnt<=0 ){
+ yy_syntax_error(yypParser,yymajor, yyminor);
+ }
+ yypParser->yyerrcnt = 3;
+ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
+ if( yyendofinput ){
+ yy_parse_failed(yypParser);
+#ifndef YYNOERRORRECOVERY
+ yypParser->yyerrcnt = -1;
+#endif
+ }
+ break;
+#endif
+ }
+ }while( yypParser->yytos>yypParser->yystack );
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ yyStackEntry *i;
+ char cDiv = '[';
+ fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt);
+ for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){
+ fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]);
+ cDiv = ' ';
+ }
+ fprintf(yyTraceFILE,"]\n");
+ }
+#endif
+ return;
+}
+
+/*
+** Return the fallback token corresponding to canonical token iToken, or
+** 0 if iToken has no fallback.
+*/
+SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){
+#ifdef YYFALLBACK
+ assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) );
+ return yyFallback[iToken];
+#else
+ (void)iToken;
+ return 0;
+#endif
+}
+
+/************** End of parse.c ***********************************************/
+/************** Begin file tokenize.c ****************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** An tokenizer for SQL
+**
+** This file contains C code that splits an SQL input string up into
+** individual tokens and sends those tokens one-by-one over to the
+** parser for analysis.
+*/
+/* #include "sqliteInt.h" */
+/* #include <stdlib.h> */
+
+/* Character classes for tokenizing
+**
+** In the sqlite3GetToken() function, a switch() on aiClass[c] is implemented
+** using a lookup table, whereas a switch() directly on c uses a binary search.
+** The lookup table is much faster. To maximize speed, and to ensure that
+** a lookup table is used, all of the classes need to be small integers and
+** all of them need to be used within the switch.
+*/
+#define CC_X 0 /* The letter 'x', or start of BLOB literal */
+#define CC_KYWD 1 /* Alphabetics or '_'. Usable in a keyword */
+#define CC_ID 2 /* unicode characters usable in IDs */
+#define CC_DIGIT 3 /* Digits */
+#define CC_DOLLAR 4 /* '$' */
+#define CC_VARALPHA 5 /* '@', '#', ':'. Alphabetic SQL variables */
+#define CC_VARNUM 6 /* '?'. Numeric SQL variables */
+#define CC_SPACE 7 /* Space characters */
+#define CC_QUOTE 8 /* '"', '\'', or '`'. String literals, quoted ids */
+#define CC_QUOTE2 9 /* '['. [...] style quoted ids */
+#define CC_PIPE 10 /* '|'. Bitwise OR or concatenate */
+#define CC_MINUS 11 /* '-'. Minus or SQL-style comment */
+#define CC_LT 12 /* '<'. Part of < or <= or <> */
+#define CC_GT 13 /* '>'. Part of > or >= */
+#define CC_EQ 14 /* '='. Part of = or == */
+#define CC_BANG 15 /* '!'. Part of != */
+#define CC_SLASH 16 /* '/'. / or c-style comment */
+#define CC_LP 17 /* '(' */
+#define CC_RP 18 /* ')' */
+#define CC_SEMI 19 /* ';' */
+#define CC_PLUS 20 /* '+' */
+#define CC_STAR 21 /* '*' */
+#define CC_PERCENT 22 /* '%' */
+#define CC_COMMA 23 /* ',' */
+#define CC_AND 24 /* '&' */
+#define CC_TILDA 25 /* '~' */
+#define CC_DOT 26 /* '.' */
+#define CC_ILLEGAL 27 /* Illegal character */
+#define CC_NUL 28 /* 0x00 */
+
+static const unsigned char aiClass[] = {
+#ifdef SQLITE_ASCII
+/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */
+/* 0x */ 28, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27,
+/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+/* 2x */ 7, 15, 8, 5, 4, 22, 24, 8, 17, 18, 21, 20, 23, 11, 26, 16,
+/* 3x */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 19, 12, 14, 13, 6,
+/* 4x */ 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 9, 27, 27, 27, 1,
+/* 6x */ 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 10, 27, 25, 27,
+/* 8x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 9x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Ax */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Bx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Cx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Dx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Ex */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* Fx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+#endif
+#ifdef SQLITE_EBCDIC
+/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */
+/* 0x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 7, 7, 27, 27,
+/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+/* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+/* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10,
+/* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27,
+/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6,
+/* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8,
+/* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
+/* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27,
+/* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Ex */ 27, 27, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 27, 27, 27, 27, 27, 27,
+#endif
+};
+
+/*
+** The charMap() macro maps alphabetic characters (only) into their
+** lower-case ASCII equivalent. On ASCII machines, this is just
+** an upper-to-lower case map. On EBCDIC machines we also need
+** to adjust the encoding. The mapping is only valid for alphabetics
+** which are the only characters for which this feature is used.
+**
+** Used by keywordhash.h
+*/
+#ifdef SQLITE_ASCII
+# define charMap(X) sqlite3UpperToLower[(unsigned char)X]
+#endif
+#ifdef SQLITE_EBCDIC
+# define charMap(X) ebcdicToAscii[(unsigned char)X]
+const unsigned char ebcdicToAscii[] = {
+/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, /* 6x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7x */
+ 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* 8x */
+ 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* 9x */
+ 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ax */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */
+ 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* Cx */
+ 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* Dx */
+ 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ex */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Fx */
+};
+#endif
+
+/*
+** The sqlite3KeywordCode function looks up an identifier to determine if
+** it is a keyword. If it is a keyword, the token code of that keyword is
+** returned. If the input is not a keyword, TK_ID is returned.
+**
+** The implementation of this routine was generated by a program,
+** mkkeywordhash.c, located in the tool subdirectory of the distribution.
+** The output of the mkkeywordhash.c program is written into a file
+** named keywordhash.h and then included into this source file by
+** the #include below.
+*/
+/************** Include keywordhash.h in the middle of tokenize.c ************/
+/************** Begin file keywordhash.h *************************************/
+/***** This file contains automatically generated code ******
+**
+** The code in this file has been automatically generated by
+**
+** sqlite/tool/mkkeywordhash.c
+**
+** The code in this file implements a function that determines whether
+** or not a given identifier is really an SQL keyword. The same thing
+** might be implemented more directly using a hand-written hash table.
+** But by using this automatically generated code, the size of the code
+** is substantially reduced. This is important for embedded applications
+** on platforms with limited memory.
+*/
+/* Hash score: 227 */
+/* zKWText[] encodes 984 bytes of keyword text in 648 bytes */
+/* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */
+/* ABLEFTHENDEFERRABLELSEXCLUDELETEMPORARYISNULLSAVEPOINTERSECT */
+/* IESNOTNULLIKEXCEPTRANSACTIONATURALTERAISEXCLUSIVEXISTS */
+/* CONSTRAINTOFFSETRIGGERANGENERATEDETACHAVINGLOBEGINNEREFERENCES */
+/* UNIQUERYWITHOUTERELEASEATTACHBETWEENOTHINGROUPSCASCADEFAULT */
+/* CASECOLLATECREATECURRENT_DATEIMMEDIATEJOINSERTMATCHPLANALYZE */
+/* PRAGMABORTUPDATEVALUESVIRTUALWAYSWHENWHERECURSIVEAFTERENAMEAND */
+/* EFERREDISTINCTAUTOINCREMENTCASTCOLUMNCOMMITCONFLICTCROSS */
+/* CURRENT_TIMESTAMPARTITIONDROPRECEDINGFAILASTFILTEREPLACEFIRST */
+/* FOLLOWINGFROMFULLIMITIFORDERESTRICTOTHERSOVERIGHTROLLBACKROWS */
+/* UNBOUNDEDUNIONUSINGVACUUMVIEWINDOWBYINITIALLYPRIMARY */
+static const char zKWText[647] = {
+ 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H',
+ 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G',
+ 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A',
+ 'S','E','L','E','C','T','A','B','L','E','F','T','H','E','N','D','E','F',
+ 'E','R','R','A','B','L','E','L','S','E','X','C','L','U','D','E','L','E',
+ 'T','E','M','P','O','R','A','R','Y','I','S','N','U','L','L','S','A','V',
+ 'E','P','O','I','N','T','E','R','S','E','C','T','I','E','S','N','O','T',
+ 'N','U','L','L','I','K','E','X','C','E','P','T','R','A','N','S','A','C',
+ 'T','I','O','N','A','T','U','R','A','L','T','E','R','A','I','S','E','X',
+ 'C','L','U','S','I','V','E','X','I','S','T','S','C','O','N','S','T','R',
+ 'A','I','N','T','O','F','F','S','E','T','R','I','G','G','E','R','A','N',
+ 'G','E','N','E','R','A','T','E','D','E','T','A','C','H','A','V','I','N',
+ 'G','L','O','B','E','G','I','N','N','E','R','E','F','E','R','E','N','C',
+ 'E','S','U','N','I','Q','U','E','R','Y','W','I','T','H','O','U','T','E',
+ 'R','E','L','E','A','S','E','A','T','T','A','C','H','B','E','T','W','E',
+ 'E','N','O','T','H','I','N','G','R','O','U','P','S','C','A','S','C','A',
+ 'D','E','F','A','U','L','T','C','A','S','E','C','O','L','L','A','T','E',
+ 'C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A','T','E',
+ 'I','M','M','E','D','I','A','T','E','J','O','I','N','S','E','R','T','M',
+ 'A','T','C','H','P','L','A','N','A','L','Y','Z','E','P','R','A','G','M',
+ 'A','B','O','R','T','U','P','D','A','T','E','V','A','L','U','E','S','V',
+ 'I','R','T','U','A','L','W','A','Y','S','W','H','E','N','W','H','E','R',
+ 'E','C','U','R','S','I','V','E','A','F','T','E','R','E','N','A','M','E',
+ 'A','N','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','A',
+ 'U','T','O','I','N','C','R','E','M','E','N','T','C','A','S','T','C','O',
+ 'L','U','M','N','C','O','M','M','I','T','C','O','N','F','L','I','C','T',
+ 'C','R','O','S','S','C','U','R','R','E','N','T','_','T','I','M','E','S',
+ 'T','A','M','P','A','R','T','I','T','I','O','N','D','R','O','P','R','E',
+ 'C','E','D','I','N','G','F','A','I','L','A','S','T','F','I','L','T','E',
+ 'R','E','P','L','A','C','E','F','I','R','S','T','F','O','L','L','O','W',
+ 'I','N','G','F','R','O','M','F','U','L','L','I','M','I','T','I','F','O',
+ 'R','D','E','R','E','S','T','R','I','C','T','O','T','H','E','R','S','O',
+ 'V','E','R','I','G','H','T','R','O','L','L','B','A','C','K','R','O','W',
+ 'S','U','N','B','O','U','N','D','E','D','U','N','I','O','N','U','S','I',
+ 'N','G','V','A','C','U','U','M','V','I','E','W','I','N','D','O','W','B',
+ 'Y','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R','Y',
+};
+/* aKWHash[i] is the hash value for the i-th keyword */
+static const unsigned char aKWHash[127] = {
+ 84, 102, 132, 82, 114, 29, 0, 0, 91, 0, 85, 72, 0,
+ 53, 35, 86, 15, 0, 42, 94, 54, 126, 133, 19, 0, 0,
+ 138, 0, 40, 128, 0, 22, 104, 0, 9, 0, 0, 122, 80,
+ 0, 78, 6, 0, 65, 99, 145, 0, 134, 112, 0, 0, 48,
+ 0, 100, 24, 0, 17, 0, 27, 70, 23, 26, 5, 60, 140,
+ 107, 121, 0, 73, 101, 71, 143, 61, 119, 74, 0, 49, 0,
+ 11, 41, 0, 110, 0, 0, 0, 106, 10, 108, 113, 124, 14,
+ 50, 123, 0, 89, 0, 18, 120, 142, 56, 129, 137, 88, 83,
+ 37, 30, 125, 0, 0, 105, 51, 130, 127, 0, 34, 0, 0,
+ 44, 0, 95, 38, 39, 0, 20, 45, 116, 90,
+};
+/* aKWNext[] forms the hash collision chain. If aKWHash[i]==0
+** then the i-th keyword has no more hash collisions. Otherwise,
+** the next keyword with the same hash is aKWHash[i]-1. */
+static const unsigned char aKWNext[145] = {
+ 0, 0, 0, 0, 4, 0, 43, 0, 0, 103, 111, 0, 0,
+ 0, 2, 0, 0, 141, 0, 0, 0, 13, 0, 0, 0, 0,
+ 139, 0, 0, 118, 52, 0, 0, 135, 12, 0, 0, 62, 0,
+ 136, 0, 131, 0, 0, 36, 0, 0, 28, 77, 0, 0, 0,
+ 0, 59, 0, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 69, 0, 0, 0, 0, 0, 144, 3, 0, 58, 0, 1,
+ 75, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 64, 66,
+ 63, 0, 0, 0, 0, 46, 0, 16, 0, 115, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 81, 97, 0, 8, 0, 109,
+ 21, 7, 67, 0, 79, 93, 117, 0, 0, 68, 0, 0, 96,
+ 0, 55, 0, 76, 0, 92, 32, 33, 57, 25, 0, 98, 0,
+ 0, 87,
+};
+/* aKWLen[i] is the length (in bytes) of the i-th keyword */
+static const unsigned char aKWLen[145] = {
+ 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6,
+ 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7,
+ 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4,
+ 4, 6, 11, 6, 2, 7, 5, 5, 9, 6, 10, 4, 6,
+ 2, 3, 7, 5, 9, 6, 6, 4, 5, 5, 10, 6, 5,
+ 7, 4, 5, 7, 6, 7, 7, 6, 5, 7, 3, 7, 4,
+ 7, 6, 12, 9, 4, 6, 5, 4, 7, 6, 5, 6, 6,
+ 7, 6, 4, 5, 9, 5, 6, 3, 8, 8, 2, 13, 2,
+ 2, 4, 6, 6, 8, 5, 17, 12, 7, 9, 4, 9, 4,
+ 4, 6, 7, 5, 9, 4, 4, 5, 2, 5, 8, 6, 4,
+ 5, 8, 4, 3, 9, 5, 5, 6, 4, 6, 2, 2, 9,
+ 3, 7,
+};
+/* aKWOffset[i] is the index into zKWText[] of the start of
+** the text for the i-th keyword. */
+static const unsigned short int aKWOffset[145] = {
+ 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33,
+ 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81,
+ 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126,
+ 129, 132, 137, 142, 146, 147, 152, 156, 160, 168, 174, 181, 184,
+ 184, 187, 189, 195, 198, 206, 211, 216, 219, 222, 226, 236, 239,
+ 244, 244, 248, 252, 259, 265, 271, 277, 277, 283, 284, 288, 295,
+ 299, 306, 312, 324, 333, 335, 341, 346, 348, 355, 360, 365, 371,
+ 377, 382, 388, 392, 395, 404, 408, 414, 416, 423, 424, 431, 433,
+ 435, 444, 448, 454, 460, 468, 473, 473, 473, 489, 498, 501, 510,
+ 513, 517, 522, 529, 534, 543, 547, 550, 555, 557, 561, 569, 575,
+ 578, 583, 591, 591, 595, 604, 609, 614, 620, 623, 626, 629, 631,
+ 636, 640,
+};
+/* aKWCode[i] is the parser symbol code for the i-th keyword */
+static const unsigned char aKWCode[145] = {
+ TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE,
+ TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN,
+ TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD,
+ TK_ADD, TK_DATABASE, TK_AS, TK_SELECT, TK_TABLE,
+ TK_JOIN_KW, TK_THEN, TK_END, TK_DEFERRABLE, TK_ELSE,
+ TK_EXCLUDE, TK_DELETE, TK_TEMP, TK_TEMP, TK_OR,
+ TK_ISNULL, TK_NULLS, TK_SAVEPOINT, TK_INTERSECT, TK_TIES,
+ TK_NOTNULL, TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW,
+ TK_EXCEPT, TK_TRANSACTION,TK_ACTION, TK_ON, TK_JOIN_KW,
+ TK_ALTER, TK_RAISE, TK_EXCLUSIVE, TK_EXISTS, TK_CONSTRAINT,
+ TK_INTO, TK_OFFSET, TK_OF, TK_SET, TK_TRIGGER,
+ TK_RANGE, TK_GENERATED, TK_DETACH, TK_HAVING, TK_LIKE_KW,
+ TK_BEGIN, TK_JOIN_KW, TK_REFERENCES, TK_UNIQUE, TK_QUERY,
+ TK_WITHOUT, TK_WITH, TK_JOIN_KW, TK_RELEASE, TK_ATTACH,
+ TK_BETWEEN, TK_NOTHING, TK_GROUPS, TK_GROUP, TK_CASCADE,
+ TK_ASC, TK_DEFAULT, TK_CASE, TK_COLLATE, TK_CREATE,
+ TK_CTIME_KW, TK_IMMEDIATE, TK_JOIN, TK_INSERT, TK_MATCH,
+ TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT, TK_UPDATE,
+ TK_VALUES, TK_VIRTUAL, TK_ALWAYS, TK_WHEN, TK_WHERE,
+ TK_RECURSIVE, TK_AFTER, TK_RENAME, TK_AND, TK_DEFERRED,
+ TK_DISTINCT, TK_IS, TK_AUTOINCR, TK_TO, TK_IN,
+ TK_CAST, TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW,
+ TK_CTIME_KW, TK_CTIME_KW, TK_CURRENT, TK_PARTITION, TK_DROP,
+ TK_PRECEDING, TK_FAIL, TK_LAST, TK_FILTER, TK_REPLACE,
+ TK_FIRST, TK_FOLLOWING, TK_FROM, TK_JOIN_KW, TK_LIMIT,
+ TK_IF, TK_ORDER, TK_RESTRICT, TK_OTHERS, TK_OVER,
+ TK_JOIN_KW, TK_ROLLBACK, TK_ROWS, TK_ROW, TK_UNBOUNDED,
+ TK_UNION, TK_USING, TK_VACUUM, TK_VIEW, TK_WINDOW,
+ TK_DO, TK_BY, TK_INITIALLY, TK_ALL, TK_PRIMARY,
+};
+/* Hash table decoded:
+** 0: INSERT
+** 1: IS
+** 2: ROLLBACK TRIGGER
+** 3: IMMEDIATE
+** 4: PARTITION
+** 5: TEMP
+** 6:
+** 7:
+** 8: VALUES WITHOUT
+** 9:
+** 10: MATCH
+** 11: NOTHING
+** 12:
+** 13: OF
+** 14: TIES IGNORE
+** 15: PLAN
+** 16: INSTEAD INDEXED
+** 17:
+** 18: TRANSACTION RIGHT
+** 19: WHEN
+** 20: SET HAVING
+** 21: IF
+** 22: ROWS
+** 23: SELECT
+** 24:
+** 25:
+** 26: VACUUM SAVEPOINT
+** 27:
+** 28: LIKE UNION VIRTUAL REFERENCES
+** 29: RESTRICT
+** 30:
+** 31: THEN REGEXP
+** 32: TO
+** 33:
+** 34: BEFORE
+** 35:
+** 36:
+** 37: FOLLOWING COLLATE CASCADE
+** 38: CREATE
+** 39:
+** 40: CASE REINDEX
+** 41: EACH
+** 42:
+** 43: QUERY
+** 44: AND ADD
+** 45: PRIMARY ANALYZE
+** 46:
+** 47: ROW ASC DETACH
+** 48: CURRENT_TIME CURRENT_DATE
+** 49:
+** 50:
+** 51: EXCLUSIVE TEMPORARY
+** 52:
+** 53: DEFERRED
+** 54: DEFERRABLE
+** 55:
+** 56: DATABASE
+** 57:
+** 58: DELETE VIEW GENERATED
+** 59: ATTACH
+** 60: END
+** 61: EXCLUDE
+** 62: ESCAPE DESC
+** 63: GLOB
+** 64: WINDOW ELSE
+** 65: COLUMN
+** 66: FIRST
+** 67:
+** 68: GROUPS ALL
+** 69: DISTINCT DROP KEY
+** 70: BETWEEN
+** 71: INITIALLY
+** 72: BEGIN
+** 73: FILTER CHECK ACTION
+** 74: GROUP INDEX
+** 75:
+** 76: EXISTS DEFAULT
+** 77:
+** 78: FOR CURRENT_TIMESTAMP
+** 79: EXCEPT
+** 80:
+** 81: CROSS
+** 82:
+** 83:
+** 84:
+** 85: CAST
+** 86: FOREIGN AUTOINCREMENT
+** 87: COMMIT
+** 88: CURRENT AFTER ALTER
+** 89: FULL FAIL CONFLICT
+** 90: EXPLAIN
+** 91: CONSTRAINT
+** 92: FROM ALWAYS
+** 93:
+** 94: ABORT
+** 95:
+** 96: AS DO
+** 97: REPLACE WITH RELEASE
+** 98: BY RENAME
+** 99: RANGE RAISE
+** 100: OTHERS
+** 101: USING NULLS
+** 102: PRAGMA
+** 103: JOIN ISNULL OFFSET
+** 104: NOT
+** 105: OR LAST LEFT
+** 106: LIMIT
+** 107:
+** 108:
+** 109: IN
+** 110: INTO
+** 111: OVER RECURSIVE
+** 112: ORDER OUTER
+** 113:
+** 114: INTERSECT UNBOUNDED
+** 115:
+** 116:
+** 117: ON
+** 118:
+** 119: WHERE
+** 120: NO INNER
+** 121: NULL
+** 122:
+** 123: TABLE
+** 124: NATURAL NOTNULL
+** 125: PRECEDING
+** 126: UPDATE UNIQUE
+*/
+/* Check to see if z[0..n-1] is a keyword. If it is, write the
+** parser symbol code for that keyword into *pType. Always
+** return the integer n (the length of the token). */
+static int keywordCode(const char *z, int n, int *pType){
+ int i, j;
+ const char *zKW;
+ if( n>=2 ){
+ i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) % 127;
+ for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){
+ if( aKWLen[i]!=n ) continue;
+ zKW = &zKWText[aKWOffset[i]];
+#ifdef SQLITE_ASCII
+ if( (z[0]&~0x20)!=zKW[0] ) continue;
+ if( (z[1]&~0x20)!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }
+#endif
+#ifdef SQLITE_EBCDIC
+ if( toupper(z[0])!=zKW[0] ) continue;
+ if( toupper(z[1])!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && toupper(z[j])==zKW[j] ){ j++; }
+#endif
+ if( j<n ) continue;
+ testcase( i==0 ); /* REINDEX */
+ testcase( i==1 ); /* INDEXED */
+ testcase( i==2 ); /* INDEX */
+ testcase( i==3 ); /* DESC */
+ testcase( i==4 ); /* ESCAPE */
+ testcase( i==5 ); /* EACH */
+ testcase( i==6 ); /* CHECK */
+ testcase( i==7 ); /* KEY */
+ testcase( i==8 ); /* BEFORE */
+ testcase( i==9 ); /* FOREIGN */
+ testcase( i==10 ); /* FOR */
+ testcase( i==11 ); /* IGNORE */
+ testcase( i==12 ); /* REGEXP */
+ testcase( i==13 ); /* EXPLAIN */
+ testcase( i==14 ); /* INSTEAD */
+ testcase( i==15 ); /* ADD */
+ testcase( i==16 ); /* DATABASE */
+ testcase( i==17 ); /* AS */
+ testcase( i==18 ); /* SELECT */
+ testcase( i==19 ); /* TABLE */
+ testcase( i==20 ); /* LEFT */
+ testcase( i==21 ); /* THEN */
+ testcase( i==22 ); /* END */
+ testcase( i==23 ); /* DEFERRABLE */
+ testcase( i==24 ); /* ELSE */
+ testcase( i==25 ); /* EXCLUDE */
+ testcase( i==26 ); /* DELETE */
+ testcase( i==27 ); /* TEMPORARY */
+ testcase( i==28 ); /* TEMP */
+ testcase( i==29 ); /* OR */
+ testcase( i==30 ); /* ISNULL */
+ testcase( i==31 ); /* NULLS */
+ testcase( i==32 ); /* SAVEPOINT */
+ testcase( i==33 ); /* INTERSECT */
+ testcase( i==34 ); /* TIES */
+ testcase( i==35 ); /* NOTNULL */
+ testcase( i==36 ); /* NOT */
+ testcase( i==37 ); /* NO */
+ testcase( i==38 ); /* NULL */
+ testcase( i==39 ); /* LIKE */
+ testcase( i==40 ); /* EXCEPT */
+ testcase( i==41 ); /* TRANSACTION */
+ testcase( i==42 ); /* ACTION */
+ testcase( i==43 ); /* ON */
+ testcase( i==44 ); /* NATURAL */
+ testcase( i==45 ); /* ALTER */
+ testcase( i==46 ); /* RAISE */
+ testcase( i==47 ); /* EXCLUSIVE */
+ testcase( i==48 ); /* EXISTS */
+ testcase( i==49 ); /* CONSTRAINT */
+ testcase( i==50 ); /* INTO */
+ testcase( i==51 ); /* OFFSET */
+ testcase( i==52 ); /* OF */
+ testcase( i==53 ); /* SET */
+ testcase( i==54 ); /* TRIGGER */
+ testcase( i==55 ); /* RANGE */
+ testcase( i==56 ); /* GENERATED */
+ testcase( i==57 ); /* DETACH */
+ testcase( i==58 ); /* HAVING */
+ testcase( i==59 ); /* GLOB */
+ testcase( i==60 ); /* BEGIN */
+ testcase( i==61 ); /* INNER */
+ testcase( i==62 ); /* REFERENCES */
+ testcase( i==63 ); /* UNIQUE */
+ testcase( i==64 ); /* QUERY */
+ testcase( i==65 ); /* WITHOUT */
+ testcase( i==66 ); /* WITH */
+ testcase( i==67 ); /* OUTER */
+ testcase( i==68 ); /* RELEASE */
+ testcase( i==69 ); /* ATTACH */
+ testcase( i==70 ); /* BETWEEN */
+ testcase( i==71 ); /* NOTHING */
+ testcase( i==72 ); /* GROUPS */
+ testcase( i==73 ); /* GROUP */
+ testcase( i==74 ); /* CASCADE */
+ testcase( i==75 ); /* ASC */
+ testcase( i==76 ); /* DEFAULT */
+ testcase( i==77 ); /* CASE */
+ testcase( i==78 ); /* COLLATE */
+ testcase( i==79 ); /* CREATE */
+ testcase( i==80 ); /* CURRENT_DATE */
+ testcase( i==81 ); /* IMMEDIATE */
+ testcase( i==82 ); /* JOIN */
+ testcase( i==83 ); /* INSERT */
+ testcase( i==84 ); /* MATCH */
+ testcase( i==85 ); /* PLAN */
+ testcase( i==86 ); /* ANALYZE */
+ testcase( i==87 ); /* PRAGMA */
+ testcase( i==88 ); /* ABORT */
+ testcase( i==89 ); /* UPDATE */
+ testcase( i==90 ); /* VALUES */
+ testcase( i==91 ); /* VIRTUAL */
+ testcase( i==92 ); /* ALWAYS */
+ testcase( i==93 ); /* WHEN */
+ testcase( i==94 ); /* WHERE */
+ testcase( i==95 ); /* RECURSIVE */
+ testcase( i==96 ); /* AFTER */
+ testcase( i==97 ); /* RENAME */
+ testcase( i==98 ); /* AND */
+ testcase( i==99 ); /* DEFERRED */
+ testcase( i==100 ); /* DISTINCT */
+ testcase( i==101 ); /* IS */
+ testcase( i==102 ); /* AUTOINCREMENT */
+ testcase( i==103 ); /* TO */
+ testcase( i==104 ); /* IN */
+ testcase( i==105 ); /* CAST */
+ testcase( i==106 ); /* COLUMN */
+ testcase( i==107 ); /* COMMIT */
+ testcase( i==108 ); /* CONFLICT */
+ testcase( i==109 ); /* CROSS */
+ testcase( i==110 ); /* CURRENT_TIMESTAMP */
+ testcase( i==111 ); /* CURRENT_TIME */
+ testcase( i==112 ); /* CURRENT */
+ testcase( i==113 ); /* PARTITION */
+ testcase( i==114 ); /* DROP */
+ testcase( i==115 ); /* PRECEDING */
+ testcase( i==116 ); /* FAIL */
+ testcase( i==117 ); /* LAST */
+ testcase( i==118 ); /* FILTER */
+ testcase( i==119 ); /* REPLACE */
+ testcase( i==120 ); /* FIRST */
+ testcase( i==121 ); /* FOLLOWING */
+ testcase( i==122 ); /* FROM */
+ testcase( i==123 ); /* FULL */
+ testcase( i==124 ); /* LIMIT */
+ testcase( i==125 ); /* IF */
+ testcase( i==126 ); /* ORDER */
+ testcase( i==127 ); /* RESTRICT */
+ testcase( i==128 ); /* OTHERS */
+ testcase( i==129 ); /* OVER */
+ testcase( i==130 ); /* RIGHT */
+ testcase( i==131 ); /* ROLLBACK */
+ testcase( i==132 ); /* ROWS */
+ testcase( i==133 ); /* ROW */
+ testcase( i==134 ); /* UNBOUNDED */
+ testcase( i==135 ); /* UNION */
+ testcase( i==136 ); /* USING */
+ testcase( i==137 ); /* VACUUM */
+ testcase( i==138 ); /* VIEW */
+ testcase( i==139 ); /* WINDOW */
+ testcase( i==140 ); /* DO */
+ testcase( i==141 ); /* BY */
+ testcase( i==142 ); /* INITIALLY */
+ testcase( i==143 ); /* ALL */
+ testcase( i==144 ); /* PRIMARY */
+ *pType = aKWCode[i];
+ break;
+ }
+ }
+ return n;
+}
+SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
+ int id = TK_ID;
+ keywordCode((char*)z, n, &id);
+ return id;
+}
+#define SQLITE_N_KEYWORD 145
+SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){
+ if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR;
+ *pzName = zKWText + aKWOffset[i];
+ *pnName = aKWLen[i];
+ return SQLITE_OK;
+}
+SQLITE_API int sqlite3_keyword_count(void){ return SQLITE_N_KEYWORD; }
+SQLITE_API int sqlite3_keyword_check(const char *zName, int nName){
+ return TK_ID!=sqlite3KeywordCode((const u8*)zName, nName);
+}
+
+/************** End of keywordhash.h *****************************************/
+/************** Continuing where we left off in tokenize.c *******************/
+
+
+/*
+** If X is a character that can be used in an identifier then
+** IdChar(X) will be true. Otherwise it is false.
+**
+** For ASCII, any character with the high-order bit set is
+** allowed in an identifier. For 7-bit characters,
+** sqlite3IsIdChar[X] must be 1.
+**
+** For EBCDIC, the rules are more complex but have the same
+** end result.
+**
+** Ticket #1066. the SQL standard does not allow '$' in the
+** middle of identifiers. But many SQL implementations do.
+** SQLite will allow '$' in identifiers for compatibility.
+** But the feature is undocumented.
+*/
+#ifdef SQLITE_ASCII
+#define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0)
+#endif
+#ifdef SQLITE_EBCDIC
+SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[] = {
+/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 4x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, /* 5x */
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, /* 6x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* 7x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, /* 8x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, /* 9x */
+ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, /* Ax */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Cx */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Dx */
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Ex */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, /* Fx */
+};
+#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40]))
+#endif
+
+/* Make the IdChar function accessible from ctime.c and alter.c */
+SQLITE_PRIVATE int sqlite3IsIdChar(u8 c){ return IdChar(c); }
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** Return the id of the next token in string (*pz). Before returning, set
+** (*pz) to point to the byte following the parsed token.
+*/
+static int getToken(const unsigned char **pz){
+ const unsigned char *z = *pz;
+ int t; /* Token type to return */
+ do {
+ z += sqlite3GetToken(z, &t);
+ }while( t==TK_SPACE );
+ if( t==TK_ID
+ || t==TK_STRING
+ || t==TK_JOIN_KW
+ || t==TK_WINDOW
+ || t==TK_OVER
+ || sqlite3ParserFallback(t)==TK_ID
+ ){
+ t = TK_ID;
+ }
+ *pz = z;
+ return t;
+}
+
+/*
+** The following three functions are called immediately after the tokenizer
+** reads the keywords WINDOW, OVER and FILTER, respectively, to determine
+** whether the token should be treated as a keyword or an SQL identifier.
+** This cannot be handled by the usual lemon %fallback method, due to
+** the ambiguity in some constructions. e.g.
+**
+** SELECT sum(x) OVER ...
+**
+** In the above, "OVER" might be a keyword, or it might be an alias for the
+** sum(x) expression. If a "%fallback ID OVER" directive were added to
+** grammar, then SQLite would always treat "OVER" as an alias, making it
+** impossible to call a window-function without a FILTER clause.
+**
+** WINDOW is treated as a keyword if:
+**
+** * the following token is an identifier, or a keyword that can fallback
+** to being an identifier, and
+** * the token after than one is TK_AS.
+**
+** OVER is a keyword if:
+**
+** * the previous token was TK_RP, and
+** * the next token is either TK_LP or an identifier.
+**
+** FILTER is a keyword if:
+**
+** * the previous token was TK_RP, and
+** * the next token is TK_LP.
+*/
+static int analyzeWindowKeyword(const unsigned char *z){
+ int t;
+ t = getToken(&z);
+ if( t!=TK_ID ) return TK_ID;
+ t = getToken(&z);
+ if( t!=TK_AS ) return TK_ID;
+ return TK_WINDOW;
+}
+static int analyzeOverKeyword(const unsigned char *z, int lastToken){
+ if( lastToken==TK_RP ){
+ int t = getToken(&z);
+ if( t==TK_LP || t==TK_ID ) return TK_OVER;
+ }
+ return TK_ID;
+}
+static int analyzeFilterKeyword(const unsigned char *z, int lastToken){
+ if( lastToken==TK_RP && getToken(&z)==TK_LP ){
+ return TK_FILTER;
+ }
+ return TK_ID;
+}
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+
+/*
+** Return the length (in bytes) of the token that begins at z[0].
+** Store the token type in *tokenType before returning.
+*/
+SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
+ int i, c;
+ switch( aiClass[*z] ){ /* Switch on the character-class of the first byte
+ ** of the token. See the comment on the CC_ defines
+ ** above. */
+ case CC_SPACE: {
+ testcase( z[0]==' ' );
+ testcase( z[0]=='\t' );
+ testcase( z[0]=='\n' );
+ testcase( z[0]=='\f' );
+ testcase( z[0]=='\r' );
+ for(i=1; sqlite3Isspace(z[i]); i++){}
+ *tokenType = TK_SPACE;
+ return i;
+ }
+ case CC_MINUS: {
+ if( z[1]=='-' ){
+ for(i=2; (c=z[i])!=0 && c!='\n'; i++){}
+ *tokenType = TK_SPACE; /* IMP: R-22934-25134 */
+ return i;
+ }
+ *tokenType = TK_MINUS;
+ return 1;
+ }
+ case CC_LP: {
+ *tokenType = TK_LP;
+ return 1;
+ }
+ case CC_RP: {
+ *tokenType = TK_RP;
+ return 1;
+ }
+ case CC_SEMI: {
+ *tokenType = TK_SEMI;
+ return 1;
+ }
+ case CC_PLUS: {
+ *tokenType = TK_PLUS;
+ return 1;
+ }
+ case CC_STAR: {
+ *tokenType = TK_STAR;
+ return 1;
+ }
+ case CC_SLASH: {
+ if( z[1]!='*' || z[2]==0 ){
+ *tokenType = TK_SLASH;
+ return 1;
+ }
+ for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){}
+ if( c ) i++;
+ *tokenType = TK_SPACE; /* IMP: R-22934-25134 */
+ return i;
+ }
+ case CC_PERCENT: {
+ *tokenType = TK_REM;
+ return 1;
+ }
+ case CC_EQ: {
+ *tokenType = TK_EQ;
+ return 1 + (z[1]=='=');
+ }
+ case CC_LT: {
+ if( (c=z[1])=='=' ){
+ *tokenType = TK_LE;
+ return 2;
+ }else if( c=='>' ){
+ *tokenType = TK_NE;
+ return 2;
+ }else if( c=='<' ){
+ *tokenType = TK_LSHIFT;
+ return 2;
+ }else{
+ *tokenType = TK_LT;
+ return 1;
+ }
+ }
+ case CC_GT: {
+ if( (c=z[1])=='=' ){
+ *tokenType = TK_GE;
+ return 2;
+ }else if( c=='>' ){
+ *tokenType = TK_RSHIFT;
+ return 2;
+ }else{
+ *tokenType = TK_GT;
+ return 1;
+ }
+ }
+ case CC_BANG: {
+ if( z[1]!='=' ){
+ *tokenType = TK_ILLEGAL;
+ return 1;
+ }else{
+ *tokenType = TK_NE;
+ return 2;
+ }
+ }
+ case CC_PIPE: {
+ if( z[1]!='|' ){
+ *tokenType = TK_BITOR;
+ return 1;
+ }else{
+ *tokenType = TK_CONCAT;
+ return 2;
+ }
+ }
+ case CC_COMMA: {
+ *tokenType = TK_COMMA;
+ return 1;
+ }
+ case CC_AND: {
+ *tokenType = TK_BITAND;
+ return 1;
+ }
+ case CC_TILDA: {
+ *tokenType = TK_BITNOT;
+ return 1;
+ }
+ case CC_QUOTE: {
+ int delim = z[0];
+ testcase( delim=='`' );
+ testcase( delim=='\'' );
+ testcase( delim=='"' );
+ for(i=1; (c=z[i])!=0; i++){
+ if( c==delim ){
+ if( z[i+1]==delim ){
+ i++;
+ }else{
+ break;
+ }
+ }
+ }
+ if( c=='\'' ){
+ *tokenType = TK_STRING;
+ return i+1;
+ }else if( c!=0 ){
+ *tokenType = TK_ID;
+ return i+1;
+ }else{
+ *tokenType = TK_ILLEGAL;
+ return i;
+ }
+ }
+ case CC_DOT: {
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ if( !sqlite3Isdigit(z[1]) )
+#endif
+ {
+ *tokenType = TK_DOT;
+ return 1;
+ }
+ /* If the next character is a digit, this is a floating point
+ ** number that begins with ".". Fall thru into the next case */
+ /* no break */ deliberate_fall_through
+ }
+ case CC_DIGIT: {
+ testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' );
+ testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' );
+ testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' );
+ testcase( z[0]=='9' );
+ *tokenType = TK_INTEGER;
+#ifndef SQLITE_OMIT_HEX_INTEGER
+ if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){
+ for(i=3; sqlite3Isxdigit(z[i]); i++){}
+ return i;
+ }
+#endif
+ for(i=0; sqlite3Isdigit(z[i]); i++){}
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ if( z[i]=='.' ){
+ i++;
+ while( sqlite3Isdigit(z[i]) ){ i++; }
+ *tokenType = TK_FLOAT;
+ }
+ if( (z[i]=='e' || z[i]=='E') &&
+ ( sqlite3Isdigit(z[i+1])
+ || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2]))
+ )
+ ){
+ i += 2;
+ while( sqlite3Isdigit(z[i]) ){ i++; }
+ *tokenType = TK_FLOAT;
+ }
+#endif
+ while( IdChar(z[i]) ){
+ *tokenType = TK_ILLEGAL;
+ i++;
+ }
+ return i;
+ }
+ case CC_QUOTE2: {
+ for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){}
+ *tokenType = c==']' ? TK_ID : TK_ILLEGAL;
+ return i;
+ }
+ case CC_VARNUM: {
+ *tokenType = TK_VARIABLE;
+ for(i=1; sqlite3Isdigit(z[i]); i++){}
+ return i;
+ }
+ case CC_DOLLAR:
+ case CC_VARALPHA: {
+ int n = 0;
+ testcase( z[0]=='$' ); testcase( z[0]=='@' );
+ testcase( z[0]==':' ); testcase( z[0]=='#' );
+ *tokenType = TK_VARIABLE;
+ for(i=1; (c=z[i])!=0; i++){
+ if( IdChar(c) ){
+ n++;
+#ifndef SQLITE_OMIT_TCL_VARIABLE
+ }else if( c=='(' && n>0 ){
+ do{
+ i++;
+ }while( (c=z[i])!=0 && !sqlite3Isspace(c) && c!=')' );
+ if( c==')' ){
+ i++;
+ }else{
+ *tokenType = TK_ILLEGAL;
+ }
+ break;
+ }else if( c==':' && z[i+1]==':' ){
+ i++;
+#endif
+ }else{
+ break;
+ }
+ }
+ if( n==0 ) *tokenType = TK_ILLEGAL;
+ return i;
+ }
+ case CC_KYWD: {
+ for(i=1; aiClass[z[i]]<=CC_KYWD; i++){}
+ if( IdChar(z[i]) ){
+ /* This token started out using characters that can appear in keywords,
+ ** but z[i] is a character not allowed within keywords, so this must
+ ** be an identifier instead */
+ i++;
+ break;
+ }
+ *tokenType = TK_ID;
+ return keywordCode((char*)z, i, tokenType);
+ }
+ case CC_X: {
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ testcase( z[0]=='x' ); testcase( z[0]=='X' );
+ if( z[1]=='\'' ){
+ *tokenType = TK_BLOB;
+ for(i=2; sqlite3Isxdigit(z[i]); i++){}
+ if( z[i]!='\'' || i%2 ){
+ *tokenType = TK_ILLEGAL;
+ while( z[i] && z[i]!='\'' ){ i++; }
+ }
+ if( z[i] ) i++;
+ return i;
+ }
+#endif
+ /* If it is not a BLOB literal, then it must be an ID, since no
+ ** SQL keywords start with the letter 'x'. Fall through */
+ /* no break */ deliberate_fall_through
+ }
+ case CC_ID: {
+ i = 1;
+ break;
+ }
+ case CC_NUL: {
+ *tokenType = TK_ILLEGAL;
+ return 0;
+ }
+ default: {
+ *tokenType = TK_ILLEGAL;
+ return 1;
+ }
+ }
+ while( IdChar(z[i]) ){ i++; }
+ *tokenType = TK_ID;
+ return i;
+}
+
+/*
+** Run the parser on the given SQL string. The parser structure is
+** passed in. An SQLITE_ status code is returned. If an error occurs
+** then an and attempt is made to write an error message into
+** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that
+** error message.
+*/
+SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){
+ int nErr = 0; /* Number of errors encountered */
+ void *pEngine; /* The LEMON-generated LALR(1) parser */
+ int n = 0; /* Length of the next token token */
+ int tokenType; /* type of the next token */
+ int lastTokenParsed = -1; /* type of the previous token */
+ sqlite3 *db = pParse->db; /* The database connection */
+ int mxSqlLen; /* Max length of an SQL string */
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ yyParser sEngine; /* Space to hold the Lemon-generated Parser object */
+#endif
+ VVA_ONLY( u8 startedWithOom = db->mallocFailed );
+
+ assert( zSql!=0 );
+ mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH];
+ if( db->nVdbeActive==0 ){
+ AtomicStore(&db->u1.isInterrupted, 0);
+ }
+ pParse->rc = SQLITE_OK;
+ pParse->zTail = zSql;
+ assert( pzErrMsg!=0 );
+#ifdef SQLITE_DEBUG
+ if( db->flags & SQLITE_ParserTrace ){
+ printf("parser: [[[%s]]]\n", zSql);
+ sqlite3ParserTrace(stdout, "parser: ");
+ }else{
+ sqlite3ParserTrace(0, 0);
+ }
+#endif
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ pEngine = &sEngine;
+ sqlite3ParserInit(pEngine, pParse);
+#else
+ pEngine = sqlite3ParserAlloc(sqlite3Malloc, pParse);
+ if( pEngine==0 ){
+ sqlite3OomFault(db);
+ return SQLITE_NOMEM_BKPT;
+ }
+#endif
+ assert( pParse->pNewTable==0 );
+ assert( pParse->pNewTrigger==0 );
+ assert( pParse->nVar==0 );
+ assert( pParse->pVList==0 );
+ pParse->pParentParse = db->pParse;
+ db->pParse = pParse;
+ while( 1 ){
+ n = sqlite3GetToken((u8*)zSql, &tokenType);
+ mxSqlLen -= n;
+ if( mxSqlLen<0 ){
+ pParse->rc = SQLITE_TOOBIG;
+ break;
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ if( tokenType>=TK_WINDOW ){
+ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER
+ || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW
+ );
+#else
+ if( tokenType>=TK_SPACE ){
+ assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL );
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ if( AtomicLoad(&db->u1.isInterrupted) ){
+ pParse->rc = SQLITE_INTERRUPT;
+ break;
+ }
+ if( tokenType==TK_SPACE ){
+ zSql += n;
+ continue;
+ }
+ if( zSql[0]==0 ){
+ /* Upon reaching the end of input, call the parser two more times
+ ** with tokens TK_SEMI and 0, in that order. */
+ if( lastTokenParsed==TK_SEMI ){
+ tokenType = 0;
+ }else if( lastTokenParsed==0 ){
+ break;
+ }else{
+ tokenType = TK_SEMI;
+ }
+ n = 0;
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ }else if( tokenType==TK_WINDOW ){
+ assert( n==6 );
+ tokenType = analyzeWindowKeyword((const u8*)&zSql[6]);
+ }else if( tokenType==TK_OVER ){
+ assert( n==4 );
+ tokenType = analyzeOverKeyword((const u8*)&zSql[4], lastTokenParsed);
+ }else if( tokenType==TK_FILTER ){
+ assert( n==6 );
+ tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed);
+#endif /* SQLITE_OMIT_WINDOWFUNC */
+ }else{
+ sqlite3ErrorMsg(pParse, "unrecognized token: \"%.*s\"", n, zSql);
+ break;
+ }
+ }
+ pParse->sLastToken.z = zSql;
+ pParse->sLastToken.n = n;
+ sqlite3Parser(pEngine, tokenType, pParse->sLastToken);
+ lastTokenParsed = tokenType;
+ zSql += n;
+ assert( db->mallocFailed==0 || pParse->rc!=SQLITE_OK || startedWithOom );
+ if( pParse->rc!=SQLITE_OK ) break;
+ }
+ assert( nErr==0 );
+#ifdef YYTRACKMAXSTACKDEPTH
+ sqlite3_mutex_enter(sqlite3MallocMutex());
+ sqlite3StatusHighwater(SQLITE_STATUS_PARSER_STACK,
+ sqlite3ParserStackPeak(pEngine)
+ );
+ sqlite3_mutex_leave(sqlite3MallocMutex());
+#endif /* YYDEBUG */
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ sqlite3ParserFinalize(pEngine);
+#else
+ sqlite3ParserFree(pEngine, sqlite3_free);
+#endif
+ if( db->mallocFailed ){
+ pParse->rc = SQLITE_NOMEM_BKPT;
+ }
+ if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){
+ pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc));
+ }
+ assert( pzErrMsg!=0 );
+ if( pParse->zErrMsg ){
+ *pzErrMsg = pParse->zErrMsg;
+ sqlite3_log(pParse->rc, "%s in \"%s\"",
+ *pzErrMsg, pParse->zTail);
+ pParse->zErrMsg = 0;
+ nErr++;
+ }
+ pParse->zTail = zSql;
+ if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){
+ sqlite3VdbeDelete(pParse->pVdbe);
+ pParse->pVdbe = 0;
+ }
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ if( pParse->nested==0 ){
+ sqlite3DbFree(db, pParse->aTableLock);
+ pParse->aTableLock = 0;
+ pParse->nTableLock = 0;
+ }
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3_free(pParse->apVtabLock);
+#endif
+
+ if( !IN_SPECIAL_PARSE ){
+ /* If the pParse->declareVtab flag is set, do not delete any table
+ ** structure built up in pParse->pNewTable. The calling code (see vtab.c)
+ ** will take responsibility for freeing the Table structure.
+ */
+ sqlite3DeleteTable(db, pParse->pNewTable);
+ }
+ if( !IN_RENAME_OBJECT ){
+ sqlite3DeleteTrigger(db, pParse->pNewTrigger);
+ }
+
+ if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree);
+ sqlite3DbFree(db, pParse->pVList);
+ while( pParse->pAinc ){
+ AutoincInfo *p = pParse->pAinc;
+ pParse->pAinc = p->pNext;
+ sqlite3DbFreeNN(db, p);
+ }
+ while( pParse->pZombieTab ){
+ Table *p = pParse->pZombieTab;
+ pParse->pZombieTab = p->pNextZombie;
+ sqlite3DeleteTable(db, p);
+ }
+ db->pParse = pParse->pParentParse;
+ pParse->pParentParse = 0;
+ assert( nErr==0 || pParse->rc!=SQLITE_OK );
+ return nErr;
+}
+
+
+#ifdef SQLITE_ENABLE_NORMALIZE
+/*
+** Insert a single space character into pStr if the current string
+** ends with an identifier
+*/
+static void addSpaceSeparator(sqlite3_str *pStr){
+ if( pStr->nChar && sqlite3IsIdChar(pStr->zText[pStr->nChar-1]) ){
+ sqlite3_str_append(pStr, " ", 1);
+ }
+}
+
+/*
+** Compute a normalization of the SQL given by zSql[0..nSql-1]. Return
+** the normalization in space obtained from sqlite3DbMalloc(). Or return
+** NULL if anything goes wrong or if zSql is NULL.
+*/
+SQLITE_PRIVATE char *sqlite3Normalize(
+ Vdbe *pVdbe, /* VM being reprepared */
+ const char *zSql /* The original SQL string */
+){
+ sqlite3 *db; /* The database connection */
+ int i; /* Next unread byte of zSql[] */
+ int n; /* length of current token */
+ int tokenType; /* type of current token */
+ int prevType = 0; /* Previous non-whitespace token */
+ int nParen; /* Number of nested levels of parentheses */
+ int iStartIN; /* Start of RHS of IN operator in z[] */
+ int nParenAtIN; /* Value of nParent at start of RHS of IN operator */
+ u32 j; /* Bytes of normalized SQL generated so far */
+ sqlite3_str *pStr; /* The normalized SQL string under construction */
+
+ db = sqlite3VdbeDb(pVdbe);
+ tokenType = -1;
+ nParen = iStartIN = nParenAtIN = 0;
+ pStr = sqlite3_str_new(db);
+ assert( pStr!=0 ); /* sqlite3_str_new() never returns NULL */
+ for(i=0; zSql[i] && pStr->accError==0; i+=n){
+ if( tokenType!=TK_SPACE ){
+ prevType = tokenType;
+ }
+ n = sqlite3GetToken((unsigned char*)zSql+i, &tokenType);
+ if( NEVER(n<=0) ) break;
+ switch( tokenType ){
+ case TK_SPACE: {
+ break;
+ }
+ case TK_NULL: {
+ if( prevType==TK_IS || prevType==TK_NOT ){
+ sqlite3_str_append(pStr, " NULL", 5);
+ break;
+ }
+ /* Fall through */
+ }
+ case TK_STRING:
+ case TK_INTEGER:
+ case TK_FLOAT:
+ case TK_VARIABLE:
+ case TK_BLOB: {
+ sqlite3_str_append(pStr, "?", 1);
+ break;
+ }
+ case TK_LP: {
+ nParen++;
+ if( prevType==TK_IN ){
+ iStartIN = pStr->nChar;
+ nParenAtIN = nParen;
+ }
+ sqlite3_str_append(pStr, "(", 1);
+ break;
+ }
+ case TK_RP: {
+ if( iStartIN>0 && nParen==nParenAtIN ){
+ assert( pStr->nChar>=(u32)iStartIN );
+ pStr->nChar = iStartIN+1;
+ sqlite3_str_append(pStr, "?,?,?", 5);
+ iStartIN = 0;
+ }
+ nParen--;
+ sqlite3_str_append(pStr, ")", 1);
+ break;
+ }
+ case TK_ID: {
+ iStartIN = 0;
+ j = pStr->nChar;
+ if( sqlite3Isquote(zSql[i]) ){
+ char *zId = sqlite3DbStrNDup(db, zSql+i, n);
+ int nId;
+ int eType = 0;
+ if( zId==0 ) break;
+ sqlite3Dequote(zId);
+ if( zSql[i]=='"' && sqlite3VdbeUsesDoubleQuotedString(pVdbe, zId) ){
+ sqlite3_str_append(pStr, "?", 1);
+ sqlite3DbFree(db, zId);
+ break;
+ }
+ nId = sqlite3Strlen30(zId);
+ if( sqlite3GetToken((u8*)zId, &eType)==nId && eType==TK_ID ){
+ addSpaceSeparator(pStr);
+ sqlite3_str_append(pStr, zId, nId);
+ }else{
+ sqlite3_str_appendf(pStr, "\"%w\"", zId);
+ }
+ sqlite3DbFree(db, zId);
+ }else{
+ addSpaceSeparator(pStr);
+ sqlite3_str_append(pStr, zSql+i, n);
+ }
+ while( j<pStr->nChar ){
+ pStr->zText[j] = sqlite3Tolower(pStr->zText[j]);
+ j++;
+ }
+ break;
+ }
+ case TK_SELECT: {
+ iStartIN = 0;
+ /* fall through */
+ }
+ default: {
+ if( sqlite3IsIdChar(zSql[i]) ) addSpaceSeparator(pStr);
+ j = pStr->nChar;
+ sqlite3_str_append(pStr, zSql+i, n);
+ while( j<pStr->nChar ){
+ pStr->zText[j] = sqlite3Toupper(pStr->zText[j]);
+ j++;
+ }
+ break;
+ }
+ }
+ }
+ if( tokenType!=TK_SEMI ) sqlite3_str_append(pStr, ";", 1);
+ return sqlite3_str_finish(pStr);
+}
+#endif /* SQLITE_ENABLE_NORMALIZE */
+
+/************** End of tokenize.c ********************************************/
+/************** Begin file complete.c ****************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** An tokenizer for SQL
+**
+** This file contains C code that implements the sqlite3_complete() API.
+** This code used to be part of the tokenizer.c source file. But by
+** separating it out, the code will be automatically omitted from
+** static links that do not use it.
+*/
+/* #include "sqliteInt.h" */
+#ifndef SQLITE_OMIT_COMPLETE
+
+/*
+** This is defined in tokenize.c. We just have to import the definition.
+*/
+#ifndef SQLITE_AMALGAMATION
+#ifdef SQLITE_ASCII
+#define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0)
+#endif
+#ifdef SQLITE_EBCDIC
+SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[];
+#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40]))
+#endif
+#endif /* SQLITE_AMALGAMATION */
+
+
+/*
+** Token types used by the sqlite3_complete() routine. See the header
+** comments on that procedure for additional information.
+*/
+#define tkSEMI 0
+#define tkWS 1
+#define tkOTHER 2
+#ifndef SQLITE_OMIT_TRIGGER
+#define tkEXPLAIN 3
+#define tkCREATE 4
+#define tkTEMP 5
+#define tkTRIGGER 6
+#define tkEND 7
+#endif
+
+/*
+** Return TRUE if the given SQL string ends in a semicolon.
+**
+** Special handling is require for CREATE TRIGGER statements.
+** Whenever the CREATE TRIGGER keywords are seen, the statement
+** must end with ";END;".
+**
+** This implementation uses a state machine with 8 states:
+**
+** (0) INVALID We have not yet seen a non-whitespace character.
+**
+** (1) START At the beginning or end of an SQL statement. This routine
+** returns 1 if it ends in the START state and 0 if it ends
+** in any other state.
+**
+** (2) NORMAL We are in the middle of statement which ends with a single
+** semicolon.
+**
+** (3) EXPLAIN The keyword EXPLAIN has been seen at the beginning of
+** a statement.
+**
+** (4) CREATE The keyword CREATE has been seen at the beginning of a
+** statement, possibly preceded by EXPLAIN and/or followed by
+** TEMP or TEMPORARY
+**
+** (5) TRIGGER We are in the middle of a trigger definition that must be
+** ended by a semicolon, the keyword END, and another semicolon.
+**
+** (6) SEMI We've seen the first semicolon in the ";END;" that occurs at
+** the end of a trigger definition.
+**
+** (7) END We've seen the ";END" of the ";END;" that occurs at the end
+** of a trigger definition.
+**
+** Transitions between states above are determined by tokens extracted
+** from the input. The following tokens are significant:
+**
+** (0) tkSEMI A semicolon.
+** (1) tkWS Whitespace.
+** (2) tkOTHER Any other SQL token.
+** (3) tkEXPLAIN The "explain" keyword.
+** (4) tkCREATE The "create" keyword.
+** (5) tkTEMP The "temp" or "temporary" keyword.
+** (6) tkTRIGGER The "trigger" keyword.
+** (7) tkEND The "end" keyword.
+**
+** Whitespace never causes a state transition and is always ignored.
+** This means that a SQL string of all whitespace is invalid.
+**
+** If we compile with SQLITE_OMIT_TRIGGER, all of the computation needed
+** to recognize the end of a trigger can be omitted. All we have to do
+** is look for a semicolon that is not part of an string or comment.
+*/
+SQLITE_API int sqlite3_complete(const char *zSql){
+ u8 state = 0; /* Current state, using numbers defined in header comment */
+ u8 token; /* Value of the next token */
+
+#ifndef SQLITE_OMIT_TRIGGER
+ /* A complex statement machine used to detect the end of a CREATE TRIGGER
+ ** statement. This is the normal case.
+ */
+ static const u8 trans[8][8] = {
+ /* Token: */
+ /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */
+ /* 0 INVALID: */ { 1, 0, 2, 3, 4, 2, 2, 2, },
+ /* 1 START: */ { 1, 1, 2, 3, 4, 2, 2, 2, },
+ /* 2 NORMAL: */ { 1, 2, 2, 2, 2, 2, 2, 2, },
+ /* 3 EXPLAIN: */ { 1, 3, 3, 2, 4, 2, 2, 2, },
+ /* 4 CREATE: */ { 1, 4, 2, 2, 2, 4, 5, 2, },
+ /* 5 TRIGGER: */ { 6, 5, 5, 5, 5, 5, 5, 5, },
+ /* 6 SEMI: */ { 6, 6, 5, 5, 5, 5, 5, 7, },
+ /* 7 END: */ { 1, 7, 5, 5, 5, 5, 5, 5, },
+ };
+#else
+ /* If triggers are not supported by this compile then the statement machine
+ ** used to detect the end of a statement is much simpler
+ */
+ static const u8 trans[3][3] = {
+ /* Token: */
+ /* State: ** SEMI WS OTHER */
+ /* 0 INVALID: */ { 1, 0, 2, },
+ /* 1 START: */ { 1, 1, 2, },
+ /* 2 NORMAL: */ { 1, 2, 2, },
+ };
+#endif /* SQLITE_OMIT_TRIGGER */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( zSql==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+
+ while( *zSql ){
+ switch( *zSql ){
+ case ';': { /* A semicolon */
+ token = tkSEMI;
+ break;
+ }
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\f': { /* White space is ignored */
+ token = tkWS;
+ break;
+ }
+ case '/': { /* C-style comments */
+ if( zSql[1]!='*' ){
+ token = tkOTHER;
+ break;
+ }
+ zSql += 2;
+ while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; }
+ if( zSql[0]==0 ) return 0;
+ zSql++;
+ token = tkWS;
+ break;
+ }
+ case '-': { /* SQL-style comments from "--" to end of line */
+ if( zSql[1]!='-' ){
+ token = tkOTHER;
+ break;
+ }
+ while( *zSql && *zSql!='\n' ){ zSql++; }
+ if( *zSql==0 ) return state==1;
+ token = tkWS;
+ break;
+ }
+ case '[': { /* Microsoft-style identifiers in [...] */
+ zSql++;
+ while( *zSql && *zSql!=']' ){ zSql++; }
+ if( *zSql==0 ) return 0;
+ token = tkOTHER;
+ break;
+ }
+ case '`': /* Grave-accent quoted symbols used by MySQL */
+ case '"': /* single- and double-quoted strings */
+ case '\'': {
+ int c = *zSql;
+ zSql++;
+ while( *zSql && *zSql!=c ){ zSql++; }
+ if( *zSql==0 ) return 0;
+ token = tkOTHER;
+ break;
+ }
+ default: {
+#ifdef SQLITE_EBCDIC
+ unsigned char c;
+#endif
+ if( IdChar((u8)*zSql) ){
+ /* Keywords and unquoted identifiers */
+ int nId;
+ for(nId=1; IdChar(zSql[nId]); nId++){}
+#ifdef SQLITE_OMIT_TRIGGER
+ token = tkOTHER;
+#else
+ switch( *zSql ){
+ case 'c': case 'C': {
+ if( nId==6 && sqlite3StrNICmp(zSql, "create", 6)==0 ){
+ token = tkCREATE;
+ }else{
+ token = tkOTHER;
+ }
+ break;
+ }
+ case 't': case 'T': {
+ if( nId==7 && sqlite3StrNICmp(zSql, "trigger", 7)==0 ){
+ token = tkTRIGGER;
+ }else if( nId==4 && sqlite3StrNICmp(zSql, "temp", 4)==0 ){
+ token = tkTEMP;
+ }else if( nId==9 && sqlite3StrNICmp(zSql, "temporary", 9)==0 ){
+ token = tkTEMP;
+ }else{
+ token = tkOTHER;
+ }
+ break;
+ }
+ case 'e': case 'E': {
+ if( nId==3 && sqlite3StrNICmp(zSql, "end", 3)==0 ){
+ token = tkEND;
+ }else
+#ifndef SQLITE_OMIT_EXPLAIN
+ if( nId==7 && sqlite3StrNICmp(zSql, "explain", 7)==0 ){
+ token = tkEXPLAIN;
+ }else
+#endif
+ {
+ token = tkOTHER;
+ }
+ break;
+ }
+ default: {
+ token = tkOTHER;
+ break;
+ }
+ }
+#endif /* SQLITE_OMIT_TRIGGER */
+ zSql += nId-1;
+ }else{
+ /* Operators and special symbols */
+ token = tkOTHER;
+ }
+ break;
+ }
+ }
+ state = trans[state][token];
+ zSql++;
+ }
+ return state==1;
+}
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** This routine is the same as the sqlite3_complete() routine described
+** above, except that the parameter is required to be UTF-16 encoded, not
+** UTF-8.
+*/
+SQLITE_API int sqlite3_complete16(const void *zSql){
+ sqlite3_value *pVal;
+ char const *zSql8;
+ int rc;
+
+#ifndef SQLITE_OMIT_AUTOINIT
+ rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+ pVal = sqlite3ValueNew(0);
+ sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC);
+ zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8);
+ if( zSql8 ){
+ rc = sqlite3_complete(zSql8);
+ }else{
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ sqlite3ValueFree(pVal);
+ return rc & 0xff;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+#endif /* SQLITE_OMIT_COMPLETE */
+
+/************** End of complete.c ********************************************/
+/************** Begin file main.c ********************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Main file for the SQLite library. The routines in this file
+** implement the programmer interface to the library. Routines in
+** other files are for internal use by SQLite and should not be
+** accessed by users of the library.
+*/
+/* #include "sqliteInt.h" */
+
+#ifdef SQLITE_ENABLE_FTS3
+/************** Include fts3.h in the middle of main.c ***********************/
+/************** Begin file fts3.h ********************************************/
+/*
+** 2006 Oct 10
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file is used by programs that want to link against the
+** FTS3 library. All it does is declare the sqlite3Fts3Init() interface.
+*/
+/* #include "sqlite3.h" */
+
+#if 0
+extern "C" {
+#endif /* __cplusplus */
+
+SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db);
+
+#if 0
+} /* extern "C" */
+#endif /* __cplusplus */
+
+/************** End of fts3.h ************************************************/
+/************** Continuing where we left off in main.c ***********************/
+#endif
+#ifdef SQLITE_ENABLE_RTREE
+/************** Include rtree.h in the middle of main.c **********************/
+/************** Begin file rtree.h *******************************************/
+/*
+** 2008 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file is used by programs that want to link against the
+** RTREE library. All it does is declare the sqlite3RtreeInit() interface.
+*/
+/* #include "sqlite3.h" */
+
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+# undef SQLITE_ENABLE_RTREE
+#endif
+
+#if 0
+extern "C" {
+#endif /* __cplusplus */
+
+SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db);
+
+#if 0
+} /* extern "C" */
+#endif /* __cplusplus */
+
+/************** End of rtree.h ***********************************************/
+/************** Continuing where we left off in main.c ***********************/
+#endif
+#if defined(SQLITE_ENABLE_ICU) || defined(SQLITE_ENABLE_ICU_COLLATIONS)
+/************** Include sqliteicu.h in the middle of main.c ******************/
+/************** Begin file sqliteicu.h ***************************************/
+/*
+** 2008 May 26
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file is used by programs that want to link against the
+** ICU extension. All it does is declare the sqlite3IcuInit() interface.
+*/
+/* #include "sqlite3.h" */
+
+#if 0
+extern "C" {
+#endif /* __cplusplus */
+
+SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db);
+
+#if 0
+} /* extern "C" */
+#endif /* __cplusplus */
+
+/************** End of sqliteicu.h *******************************************/
+/************** Continuing where we left off in main.c ***********************/
+#endif
+
+/*
+** This is an extension initializer that is a no-op and always
+** succeeds, except that it fails if the fault-simulation is set
+** to 500.
+*/
+static int sqlite3TestExtInit(sqlite3 *db){
+ (void)db;
+ return sqlite3FaultSim(500);
+}
+
+
+/*
+** Forward declarations of external module initializer functions
+** for modules that need them.
+*/
+#ifdef SQLITE_ENABLE_FTS1
+SQLITE_PRIVATE int sqlite3Fts1Init(sqlite3*);
+#endif
+#ifdef SQLITE_ENABLE_FTS2
+SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*);
+#endif
+#ifdef SQLITE_ENABLE_FTS5
+SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*);
+#endif
+#ifdef SQLITE_ENABLE_JSON1
+SQLITE_PRIVATE int sqlite3Json1Init(sqlite3*);
+#endif
+#ifdef SQLITE_ENABLE_STMTVTAB
+SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*);
+#endif
+
+/*
+** An array of pointers to extension initializer functions for
+** built-in extensions.
+*/
+static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = {
+#ifdef SQLITE_ENABLE_FTS1
+ sqlite3Fts1Init,
+#endif
+#ifdef SQLITE_ENABLE_FTS2
+ sqlite3Fts2Init,
+#endif
+#ifdef SQLITE_ENABLE_FTS3
+ sqlite3Fts3Init,
+#endif
+#ifdef SQLITE_ENABLE_FTS5
+ sqlite3Fts5Init,
+#endif
+#if defined(SQLITE_ENABLE_ICU) || defined(SQLITE_ENABLE_ICU_COLLATIONS)
+ sqlite3IcuInit,
+#endif
+#ifdef SQLITE_ENABLE_RTREE
+ sqlite3RtreeInit,
+#endif
+#ifdef SQLITE_ENABLE_DBPAGE_VTAB
+ sqlite3DbpageRegister,
+#endif
+#ifdef SQLITE_ENABLE_DBSTAT_VTAB
+ sqlite3DbstatRegister,
+#endif
+ sqlite3TestExtInit,
+#ifdef SQLITE_ENABLE_JSON1
+ sqlite3Json1Init,
+#endif
+#ifdef SQLITE_ENABLE_STMTVTAB
+ sqlite3StmtVtabInit,
+#endif
+#ifdef SQLITE_ENABLE_BYTECODE_VTAB
+ sqlite3VdbeBytecodeVtabInit,
+#endif
+};
+
+#ifndef SQLITE_AMALGAMATION
+/* IMPLEMENTATION-OF: R-46656-45156 The sqlite3_version[] string constant
+** contains the text of SQLITE_VERSION macro.
+*/
+SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
+#endif
+
+/* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns
+** a pointer to the to the sqlite3_version[] string constant.
+*/
+SQLITE_API const char *sqlite3_libversion(void){ return sqlite3_version; }
+
+/* IMPLEMENTATION-OF: R-25063-23286 The sqlite3_sourceid() function returns a
+** pointer to a string constant whose value is the same as the
+** SQLITE_SOURCE_ID C preprocessor macro. Except if SQLite is built using
+** an edited copy of the amalgamation, then the last four characters of
+** the hash might be different from SQLITE_SOURCE_ID.
+*/
+/* SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } */
+
+/* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function
+** returns an integer equal to SQLITE_VERSION_NUMBER.
+*/
+SQLITE_API int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; }
+
+/* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns
+** zero if and only if SQLite was compiled with mutexing code omitted due to
+** the SQLITE_THREADSAFE compile-time option being set to 0.
+*/
+SQLITE_API int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; }
+
+/*
+** When compiling the test fixture or with debugging enabled (on Win32),
+** this variable being set to non-zero will cause OSTRACE macros to emit
+** extra diagnostic information.
+*/
+#ifdef SQLITE_HAVE_OS_TRACE
+# ifndef SQLITE_DEBUG_OS_TRACE
+# define SQLITE_DEBUG_OS_TRACE 0
+# endif
+ int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE;
+#endif
+
+#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE)
+/*
+** If the following function pointer is not NULL and if
+** SQLITE_ENABLE_IOTRACE is enabled, then messages describing
+** I/O active are written using this function. These messages
+** are intended for debugging activity only.
+*/
+SQLITE_API void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...) = 0;
+#endif
+
+/*
+** If the following global variable points to a string which is the
+** name of a directory, then that directory will be used to store
+** temporary files.
+**
+** See also the "PRAGMA temp_store_directory" SQL command.
+*/
+SQLITE_API char *sqlite3_temp_directory = 0;
+
+/*
+** If the following global variable points to a string which is the
+** name of a directory, then that directory will be used to store
+** all database files specified with a relative pathname.
+**
+** See also the "PRAGMA data_store_directory" SQL command.
+*/
+SQLITE_API char *sqlite3_data_directory = 0;
+
+/*
+** Initialize SQLite.
+**
+** This routine must be called to initialize the memory allocation,
+** VFS, and mutex subsystems prior to doing any serious work with
+** SQLite. But as long as you do not compile with SQLITE_OMIT_AUTOINIT
+** this routine will be called automatically by key routines such as
+** sqlite3_open().
+**
+** This routine is a no-op except on its very first call for the process,
+** or for the first call after a call to sqlite3_shutdown.
+**
+** The first thread to call this routine runs the initialization to
+** completion. If subsequent threads call this routine before the first
+** thread has finished the initialization process, then the subsequent
+** threads must block until the first thread finishes with the initialization.
+**
+** The first thread might call this routine recursively. Recursive
+** calls to this routine should not block, of course. Otherwise the
+** initialization process would never complete.
+**
+** Let X be the first thread to enter this routine. Let Y be some other
+** thread. Then while the initial invocation of this routine by X is
+** incomplete, it is required that:
+**
+** * Calls to this routine from Y must block until the outer-most
+** call by X completes.
+**
+** * Recursive calls to this routine from thread X return immediately
+** without blocking.
+*/
+SQLITE_API int sqlite3_initialize(void){
+ MUTEX_LOGIC( sqlite3_mutex *pMainMtx; ) /* The main static mutex */
+ int rc; /* Result code */
+#ifdef SQLITE_EXTRA_INIT
+ int bRunExtraInit = 0; /* Extra initialization needed */
+#endif
+
+#ifdef SQLITE_OMIT_WSD
+ rc = sqlite3_wsd_init(4096, 24);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+#endif
+
+ /* If the following assert() fails on some obscure processor/compiler
+ ** combination, the work-around is to set the correct pointer
+ ** size at compile-time using -DSQLITE_PTRSIZE=n compile-time option */
+ assert( SQLITE_PTRSIZE==sizeof(char*) );
+
+ /* If SQLite is already completely initialized, then this call
+ ** to sqlite3_initialize() should be a no-op. But the initialization
+ ** must be complete. So isInit must not be set until the very end
+ ** of this routine.
+ */
+ if( sqlite3GlobalConfig.isInit ){
+ sqlite3MemoryBarrier();
+ return SQLITE_OK;
+ }
+
+ /* Make sure the mutex subsystem is initialized. If unable to
+ ** initialize the mutex subsystem, return early with the error.
+ ** If the system is so sick that we are unable to allocate a mutex,
+ ** there is not much SQLite is going to be able to do.
+ **
+ ** The mutex subsystem must take care of serializing its own
+ ** initialization.
+ */
+ rc = sqlite3MutexInit();
+ if( rc ) return rc;
+
+ /* Initialize the malloc() system and the recursive pInitMutex mutex.
+ ** This operation is protected by the STATIC_MAIN mutex. Note that
+ ** MutexAlloc() is called for a static mutex prior to initializing the
+ ** malloc subsystem - this implies that the allocation of a static
+ ** mutex must not require support from the malloc subsystem.
+ */
+ MUTEX_LOGIC( pMainMtx = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN); )
+ sqlite3_mutex_enter(pMainMtx);
+ sqlite3GlobalConfig.isMutexInit = 1;
+ if( !sqlite3GlobalConfig.isMallocInit ){
+ rc = sqlite3MallocInit();
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3GlobalConfig.isMallocInit = 1;
+ if( !sqlite3GlobalConfig.pInitMutex ){
+ sqlite3GlobalConfig.pInitMutex =
+ sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE);
+ if( sqlite3GlobalConfig.bCoreMutex && !sqlite3GlobalConfig.pInitMutex ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3GlobalConfig.nRefInitMutex++;
+ }
+ sqlite3_mutex_leave(pMainMtx);
+
+ /* If rc is not SQLITE_OK at this point, then either the malloc
+ ** subsystem could not be initialized or the system failed to allocate
+ ** the pInitMutex mutex. Return an error in either case. */
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Do the rest of the initialization under the recursive mutex so
+ ** that we will be able to handle recursive calls into
+ ** sqlite3_initialize(). The recursive calls normally come through
+ ** sqlite3_os_init() when it invokes sqlite3_vfs_register(), but other
+ ** recursive calls might also be possible.
+ **
+ ** IMPLEMENTATION-OF: R-00140-37445 SQLite automatically serializes calls
+ ** to the xInit method, so the xInit method need not be threadsafe.
+ **
+ ** The following mutex is what serializes access to the appdef pcache xInit
+ ** methods. The sqlite3_pcache_methods.xInit() all is embedded in the
+ ** call to sqlite3PcacheInitialize().
+ */
+ sqlite3_mutex_enter(sqlite3GlobalConfig.pInitMutex);
+ if( sqlite3GlobalConfig.isInit==0 && sqlite3GlobalConfig.inProgress==0 ){
+ sqlite3GlobalConfig.inProgress = 1;
+#ifdef SQLITE_ENABLE_SQLLOG
+ {
+ extern void sqlite3_init_sqllog(void);
+ sqlite3_init_sqllog();
+ }
+#endif
+ memset(&sqlite3BuiltinFunctions, 0, sizeof(sqlite3BuiltinFunctions));
+ sqlite3RegisterBuiltinFunctions();
+ if( sqlite3GlobalConfig.isPCacheInit==0 ){
+ rc = sqlite3PcacheInitialize();
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3GlobalConfig.isPCacheInit = 1;
+ rc = sqlite3OsInit();
+ }
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ if( rc==SQLITE_OK ){
+ rc = sqlite3MemdbInit();
+ }
+#endif
+ if( rc==SQLITE_OK ){
+ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage,
+ sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage);
+ sqlite3MemoryBarrier();
+ sqlite3GlobalConfig.isInit = 1;
+#ifdef SQLITE_EXTRA_INIT
+ bRunExtraInit = 1;
+#endif
+ }
+ sqlite3GlobalConfig.inProgress = 0;
+ }
+ sqlite3_mutex_leave(sqlite3GlobalConfig.pInitMutex);
+
+ /* Go back under the static mutex and clean up the recursive
+ ** mutex to prevent a resource leak.
+ */
+ sqlite3_mutex_enter(pMainMtx);
+ sqlite3GlobalConfig.nRefInitMutex--;
+ if( sqlite3GlobalConfig.nRefInitMutex<=0 ){
+ assert( sqlite3GlobalConfig.nRefInitMutex==0 );
+ sqlite3_mutex_free(sqlite3GlobalConfig.pInitMutex);
+ sqlite3GlobalConfig.pInitMutex = 0;
+ }
+ sqlite3_mutex_leave(pMainMtx);
+
+ /* The following is just a sanity check to make sure SQLite has
+ ** been compiled correctly. It is important to run this code, but
+ ** we don't want to run it too often and soak up CPU cycles for no
+ ** reason. So we run it once during initialization.
+ */
+#ifndef NDEBUG
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ /* This section of code's only "output" is via assert() statements. */
+ if( rc==SQLITE_OK ){
+ u64 x = (((u64)1)<<63)-1;
+ double y;
+ assert(sizeof(x)==8);
+ assert(sizeof(x)==sizeof(y));
+ memcpy(&y, &x, 8);
+ assert( sqlite3IsNaN(y) );
+ }
+#endif
+#endif
+
+ /* Do extra initialization steps requested by the SQLITE_EXTRA_INIT
+ ** compile-time option.
+ */
+#ifdef SQLITE_EXTRA_INIT
+ if( bRunExtraInit ){
+ int SQLITE_EXTRA_INIT(const char*);
+ rc = SQLITE_EXTRA_INIT(0);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** Undo the effects of sqlite3_initialize(). Must not be called while
+** there are outstanding database connections or memory allocations or
+** while any part of SQLite is otherwise in use in any thread. This
+** routine is not threadsafe. But it is safe to invoke this routine
+** on when SQLite is already shut down. If SQLite is already shut down
+** when this routine is invoked, then this routine is a harmless no-op.
+*/
+SQLITE_API int sqlite3_shutdown(void){
+#ifdef SQLITE_OMIT_WSD
+ int rc = sqlite3_wsd_init(4096, 24);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+#endif
+
+ if( sqlite3GlobalConfig.isInit ){
+#ifdef SQLITE_EXTRA_SHUTDOWN
+ void SQLITE_EXTRA_SHUTDOWN(void);
+ SQLITE_EXTRA_SHUTDOWN();
+#endif
+ sqlite3_os_end();
+ sqlite3_reset_auto_extension();
+ sqlite3GlobalConfig.isInit = 0;
+ }
+ if( sqlite3GlobalConfig.isPCacheInit ){
+ sqlite3PcacheShutdown();
+ sqlite3GlobalConfig.isPCacheInit = 0;
+ }
+ if( sqlite3GlobalConfig.isMallocInit ){
+ sqlite3MallocEnd();
+ sqlite3GlobalConfig.isMallocInit = 0;
+
+#ifndef SQLITE_OMIT_SHUTDOWN_DIRECTORIES
+ /* The heap subsystem has now been shutdown and these values are supposed
+ ** to be NULL or point to memory that was obtained from sqlite3_malloc(),
+ ** which would rely on that heap subsystem; therefore, make sure these
+ ** values cannot refer to heap memory that was just invalidated when the
+ ** heap subsystem was shutdown. This is only done if the current call to
+ ** this function resulted in the heap subsystem actually being shutdown.
+ */
+ sqlite3_data_directory = 0;
+ sqlite3_temp_directory = 0;
+#endif
+ }
+ if( sqlite3GlobalConfig.isMutexInit ){
+ sqlite3MutexEnd();
+ sqlite3GlobalConfig.isMutexInit = 0;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** This API allows applications to modify the global configuration of
+** the SQLite library at run-time.
+**
+** This routine should only be called when there are no outstanding
+** database connections or memory allocations. This routine is not
+** threadsafe. Failure to heed these warnings can lead to unpredictable
+** behavior.
+*/
+SQLITE_API int sqlite3_config(int op, ...){
+ va_list ap;
+ int rc = SQLITE_OK;
+
+ /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while
+ ** the SQLite library is in use. */
+ if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT;
+
+ va_start(ap, op);
+ switch( op ){
+
+ /* Mutex configuration options are only available in a threadsafe
+ ** compile.
+ */
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-54466-46756 */
+ case SQLITE_CONFIG_SINGLETHREAD: {
+ /* EVIDENCE-OF: R-02748-19096 This option sets the threading mode to
+ ** Single-thread. */
+ sqlite3GlobalConfig.bCoreMutex = 0; /* Disable mutex on core */
+ sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */
+ break;
+ }
+#endif
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-20520-54086 */
+ case SQLITE_CONFIG_MULTITHREAD: {
+ /* EVIDENCE-OF: R-14374-42468 This option sets the threading mode to
+ ** Multi-thread. */
+ sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */
+ sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */
+ break;
+ }
+#endif
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-59593-21810 */
+ case SQLITE_CONFIG_SERIALIZED: {
+ /* EVIDENCE-OF: R-41220-51800 This option sets the threading mode to
+ ** Serialized. */
+ sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */
+ sqlite3GlobalConfig.bFullMutex = 1; /* Enable mutex on connections */
+ break;
+ }
+#endif
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-63666-48755 */
+ case SQLITE_CONFIG_MUTEX: {
+ /* Specify an alternative mutex implementation */
+ sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*);
+ break;
+ }
+#endif
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-14450-37597 */
+ case SQLITE_CONFIG_GETMUTEX: {
+ /* Retrieve the current mutex implementation */
+ *va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex;
+ break;
+ }
+#endif
+
+ case SQLITE_CONFIG_MALLOC: {
+ /* EVIDENCE-OF: R-55594-21030 The SQLITE_CONFIG_MALLOC option takes a
+ ** single argument which is a pointer to an instance of the
+ ** sqlite3_mem_methods structure. The argument specifies alternative
+ ** low-level memory allocation routines to be used in place of the memory
+ ** allocation routines built into SQLite. */
+ sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*);
+ break;
+ }
+ case SQLITE_CONFIG_GETMALLOC: {
+ /* EVIDENCE-OF: R-51213-46414 The SQLITE_CONFIG_GETMALLOC option takes a
+ ** single argument which is a pointer to an instance of the
+ ** sqlite3_mem_methods structure. The sqlite3_mem_methods structure is
+ ** filled with the currently defined memory allocation routines. */
+ if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault();
+ *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m;
+ break;
+ }
+ case SQLITE_CONFIG_MEMSTATUS: {
+ /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes
+ ** single argument of type int, interpreted as a boolean, which enables
+ ** or disables the collection of memory allocation statistics. */
+ sqlite3GlobalConfig.bMemstat = va_arg(ap, int);
+ break;
+ }
+ case SQLITE_CONFIG_SMALL_MALLOC: {
+ sqlite3GlobalConfig.bSmallMalloc = va_arg(ap, int);
+ break;
+ }
+ case SQLITE_CONFIG_PAGECACHE: {
+ /* EVIDENCE-OF: R-18761-36601 There are three arguments to
+ ** SQLITE_CONFIG_PAGECACHE: A pointer to 8-byte aligned memory (pMem),
+ ** the size of each page cache line (sz), and the number of cache lines
+ ** (N). */
+ sqlite3GlobalConfig.pPage = va_arg(ap, void*);
+ sqlite3GlobalConfig.szPage = va_arg(ap, int);
+ sqlite3GlobalConfig.nPage = va_arg(ap, int);
+ break;
+ }
+ case SQLITE_CONFIG_PCACHE_HDRSZ: {
+ /* EVIDENCE-OF: R-39100-27317 The SQLITE_CONFIG_PCACHE_HDRSZ option takes
+ ** a single parameter which is a pointer to an integer and writes into
+ ** that integer the number of extra bytes per page required for each page
+ ** in SQLITE_CONFIG_PAGECACHE. */
+ *va_arg(ap, int*) =
+ sqlite3HeaderSizeBtree() +
+ sqlite3HeaderSizePcache() +
+ sqlite3HeaderSizePcache1();
+ break;
+ }
+
+ case SQLITE_CONFIG_PCACHE: {
+ /* no-op */
+ break;
+ }
+ case SQLITE_CONFIG_GETPCACHE: {
+ /* now an error */
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ case SQLITE_CONFIG_PCACHE2: {
+ /* EVIDENCE-OF: R-63325-48378 The SQLITE_CONFIG_PCACHE2 option takes a
+ ** single argument which is a pointer to an sqlite3_pcache_methods2
+ ** object. This object specifies the interface to a custom page cache
+ ** implementation. */
+ sqlite3GlobalConfig.pcache2 = *va_arg(ap, sqlite3_pcache_methods2*);
+ break;
+ }
+ case SQLITE_CONFIG_GETPCACHE2: {
+ /* EVIDENCE-OF: R-22035-46182 The SQLITE_CONFIG_GETPCACHE2 option takes a
+ ** single argument which is a pointer to an sqlite3_pcache_methods2
+ ** object. SQLite copies of the current page cache implementation into
+ ** that object. */
+ if( sqlite3GlobalConfig.pcache2.xInit==0 ){
+ sqlite3PCacheSetDefault();
+ }
+ *va_arg(ap, sqlite3_pcache_methods2*) = sqlite3GlobalConfig.pcache2;
+ break;
+ }
+
+/* EVIDENCE-OF: R-06626-12911 The SQLITE_CONFIG_HEAP option is only
+** available if SQLite is compiled with either SQLITE_ENABLE_MEMSYS3 or
+** SQLITE_ENABLE_MEMSYS5 and returns SQLITE_ERROR if invoked otherwise. */
+#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5)
+ case SQLITE_CONFIG_HEAP: {
+ /* EVIDENCE-OF: R-19854-42126 There are three arguments to
+ ** SQLITE_CONFIG_HEAP: An 8-byte aligned pointer to the memory, the
+ ** number of bytes in the memory buffer, and the minimum allocation size.
+ */
+ sqlite3GlobalConfig.pHeap = va_arg(ap, void*);
+ sqlite3GlobalConfig.nHeap = va_arg(ap, int);
+ sqlite3GlobalConfig.mnReq = va_arg(ap, int);
+
+ if( sqlite3GlobalConfig.mnReq<1 ){
+ sqlite3GlobalConfig.mnReq = 1;
+ }else if( sqlite3GlobalConfig.mnReq>(1<<12) ){
+ /* cap min request size at 2^12 */
+ sqlite3GlobalConfig.mnReq = (1<<12);
+ }
+
+ if( sqlite3GlobalConfig.pHeap==0 ){
+ /* EVIDENCE-OF: R-49920-60189 If the first pointer (the memory pointer)
+ ** is NULL, then SQLite reverts to using its default memory allocator
+ ** (the system malloc() implementation), undoing any prior invocation of
+ ** SQLITE_CONFIG_MALLOC.
+ **
+ ** Setting sqlite3GlobalConfig.m to all zeros will cause malloc to
+ ** revert to its default implementation when sqlite3_initialize() is run
+ */
+ memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m));
+ }else{
+ /* EVIDENCE-OF: R-61006-08918 If the memory pointer is not NULL then the
+ ** alternative memory allocator is engaged to handle all of SQLites
+ ** memory allocation needs. */
+#ifdef SQLITE_ENABLE_MEMSYS3
+ sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3();
+#endif
+#ifdef SQLITE_ENABLE_MEMSYS5
+ sqlite3GlobalConfig.m = *sqlite3MemGetMemsys5();
+#endif
+ }
+ break;
+ }
+#endif
+
+ case SQLITE_CONFIG_LOOKASIDE: {
+ sqlite3GlobalConfig.szLookaside = va_arg(ap, int);
+ sqlite3GlobalConfig.nLookaside = va_arg(ap, int);
+ break;
+ }
+
+ /* Record a pointer to the logger function and its first argument.
+ ** The default is NULL. Logging is disabled if the function pointer is
+ ** NULL.
+ */
+ case SQLITE_CONFIG_LOG: {
+ /* MSVC is picky about pulling func ptrs from va lists.
+ ** http://support.microsoft.com/kb/47961
+ ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*));
+ */
+ typedef void(*LOGFUNC_t)(void*,int,const char*);
+ sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t);
+ sqlite3GlobalConfig.pLogArg = va_arg(ap, void*);
+ break;
+ }
+
+ /* EVIDENCE-OF: R-55548-33817 The compile-time setting for URI filenames
+ ** can be changed at start-time using the
+ ** sqlite3_config(SQLITE_CONFIG_URI,1) or
+ ** sqlite3_config(SQLITE_CONFIG_URI,0) configuration calls.
+ */
+ case SQLITE_CONFIG_URI: {
+ /* EVIDENCE-OF: R-25451-61125 The SQLITE_CONFIG_URI option takes a single
+ ** argument of type int. If non-zero, then URI handling is globally
+ ** enabled. If the parameter is zero, then URI handling is globally
+ ** disabled. */
+ sqlite3GlobalConfig.bOpenUri = va_arg(ap, int);
+ break;
+ }
+
+ case SQLITE_CONFIG_COVERING_INDEX_SCAN: {
+ /* EVIDENCE-OF: R-36592-02772 The SQLITE_CONFIG_COVERING_INDEX_SCAN
+ ** option takes a single integer argument which is interpreted as a
+ ** boolean in order to enable or disable the use of covering indices for
+ ** full table scans in the query optimizer. */
+ sqlite3GlobalConfig.bUseCis = va_arg(ap, int);
+ break;
+ }
+
+#ifdef SQLITE_ENABLE_SQLLOG
+ case SQLITE_CONFIG_SQLLOG: {
+ typedef void(*SQLLOGFUNC_t)(void*, sqlite3*, const char*, int);
+ sqlite3GlobalConfig.xSqllog = va_arg(ap, SQLLOGFUNC_t);
+ sqlite3GlobalConfig.pSqllogArg = va_arg(ap, void *);
+ break;
+ }
+#endif
+
+ case SQLITE_CONFIG_MMAP_SIZE: {
+ /* EVIDENCE-OF: R-58063-38258 SQLITE_CONFIG_MMAP_SIZE takes two 64-bit
+ ** integer (sqlite3_int64) values that are the default mmap size limit
+ ** (the default setting for PRAGMA mmap_size) and the maximum allowed
+ ** mmap size limit. */
+ sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64);
+ sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64);
+ /* EVIDENCE-OF: R-53367-43190 If either argument to this option is
+ ** negative, then that argument is changed to its compile-time default.
+ **
+ ** EVIDENCE-OF: R-34993-45031 The maximum allowed mmap size will be
+ ** silently truncated if necessary so that it does not exceed the
+ ** compile-time maximum mmap size set by the SQLITE_MAX_MMAP_SIZE
+ ** compile-time option.
+ */
+ if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){
+ mxMmap = SQLITE_MAX_MMAP_SIZE;
+ }
+ if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE;
+ if( szMmap>mxMmap) szMmap = mxMmap;
+ sqlite3GlobalConfig.mxMmap = mxMmap;
+ sqlite3GlobalConfig.szMmap = szMmap;
+ break;
+ }
+
+#if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC) /* IMP: R-04780-55815 */
+ case SQLITE_CONFIG_WIN32_HEAPSIZE: {
+ /* EVIDENCE-OF: R-34926-03360 SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit
+ ** unsigned integer value that specifies the maximum size of the created
+ ** heap. */
+ sqlite3GlobalConfig.nHeap = va_arg(ap, int);
+ break;
+ }
+#endif
+
+ case SQLITE_CONFIG_PMASZ: {
+ sqlite3GlobalConfig.szPma = va_arg(ap, unsigned int);
+ break;
+ }
+
+ case SQLITE_CONFIG_STMTJRNL_SPILL: {
+ sqlite3GlobalConfig.nStmtSpill = va_arg(ap, int);
+ break;
+ }
+
+#ifdef SQLITE_ENABLE_SORTER_REFERENCES
+ case SQLITE_CONFIG_SORTERREF_SIZE: {
+ int iVal = va_arg(ap, int);
+ if( iVal<0 ){
+ iVal = SQLITE_DEFAULT_SORTERREF_SIZE;
+ }
+ sqlite3GlobalConfig.szSorterRef = (u32)iVal;
+ break;
+ }
+#endif /* SQLITE_ENABLE_SORTER_REFERENCES */
+
+#ifdef SQLITE_ENABLE_DESERIALIZE
+ case SQLITE_CONFIG_MEMDB_MAXSIZE: {
+ sqlite3GlobalConfig.mxMemdbSize = va_arg(ap, sqlite3_int64);
+ break;
+ }
+#endif /* SQLITE_ENABLE_DESERIALIZE */
+
+ default: {
+ rc = SQLITE_ERROR;
+ break;
+ }
+ }
+ va_end(ap);
+ return rc;
+}
+
+/*
+** Set up the lookaside buffers for a database connection.
+** Return SQLITE_OK on success.
+** If lookaside is already active, return SQLITE_BUSY.
+**
+** The sz parameter is the number of bytes in each lookaside slot.
+** The cnt parameter is the number of slots. If pStart is NULL the
+** space for the lookaside memory is obtained from sqlite3_malloc().
+** If pStart is not NULL then it is sz*cnt bytes of memory to use for
+** the lookaside memory.
+*/
+static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){
+#ifndef SQLITE_OMIT_LOOKASIDE
+ void *pStart;
+ sqlite3_int64 szAlloc = sz*(sqlite3_int64)cnt;
+ int nBig; /* Number of full-size slots */
+ int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */
+
+ if( sqlite3LookasideUsed(db,0)>0 ){
+ return SQLITE_BUSY;
+ }
+ /* Free any existing lookaside buffer for this handle before
+ ** allocating a new one so we don't have to have space for
+ ** both at the same time.
+ */
+ if( db->lookaside.bMalloced ){
+ sqlite3_free(db->lookaside.pStart);
+ }
+ /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger
+ ** than a pointer to be useful.
+ */
+ sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */
+ if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0;
+ if( cnt<0 ) cnt = 0;
+ if( sz==0 || cnt==0 ){
+ sz = 0;
+ pStart = 0;
+ }else if( pBuf==0 ){
+ sqlite3BeginBenignMalloc();
+ pStart = sqlite3Malloc( szAlloc ); /* IMP: R-61949-35727 */
+ sqlite3EndBenignMalloc();
+ if( pStart ) szAlloc = sqlite3MallocSize(pStart);
+ }else{
+ pStart = pBuf;
+ }
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( sz>=LOOKASIDE_SMALL*3 ){
+ nBig = szAlloc/(3*LOOKASIDE_SMALL+sz);
+ nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL;
+ }else if( sz>=LOOKASIDE_SMALL*2 ){
+ nBig = szAlloc/(LOOKASIDE_SMALL+sz);
+ nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL;
+ }else
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ if( sz>0 ){
+ nBig = szAlloc/sz;
+ nSm = 0;
+ }else{
+ nBig = nSm = 0;
+ }
+ db->lookaside.pStart = pStart;
+ db->lookaside.pInit = 0;
+ db->lookaside.pFree = 0;
+ db->lookaside.sz = (u16)sz;
+ db->lookaside.szTrue = (u16)sz;
+ if( pStart ){
+ int i;
+ LookasideSlot *p;
+ assert( sz > (int)sizeof(LookasideSlot*) );
+ p = (LookasideSlot*)pStart;
+ for(i=0; i<nBig; i++){
+ p->pNext = db->lookaside.pInit;
+ db->lookaside.pInit = p;
+ p = (LookasideSlot*)&((u8*)p)[sz];
+ }
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ db->lookaside.pSmallInit = 0;
+ db->lookaside.pSmallFree = 0;
+ db->lookaside.pMiddle = p;
+ for(i=0; i<nSm; i++){
+ p->pNext = db->lookaside.pSmallInit;
+ db->lookaside.pSmallInit = p;
+ p = (LookasideSlot*)&((u8*)p)[LOOKASIDE_SMALL];
+ }
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ assert( ((uptr)p)<=szAlloc + (uptr)pStart );
+ db->lookaside.pEnd = p;
+ db->lookaside.bDisable = 0;
+ db->lookaside.bMalloced = pBuf==0 ?1:0;
+ db->lookaside.nSlot = nBig+nSm;
+ }else{
+ db->lookaside.pStart = db;
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ db->lookaside.pSmallInit = 0;
+ db->lookaside.pSmallFree = 0;
+ db->lookaside.pMiddle = db;
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ db->lookaside.pEnd = db;
+ db->lookaside.bDisable = 1;
+ db->lookaside.sz = 0;
+ db->lookaside.bMalloced = 0;
+ db->lookaside.nSlot = 0;
+ }
+ assert( sqlite3LookasideUsed(db,0)==0 );
+#endif /* SQLITE_OMIT_LOOKASIDE */
+ return SQLITE_OK;
+}
+
+/*
+** Return the mutex associated with a database connection.
+*/
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return db->mutex;
+}
+
+/*
+** Free up as much memory as we can from the given database
+** connection.
+*/
+SQLITE_API int sqlite3_db_release_memory(sqlite3 *db){
+ int i;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3BtreeEnterAll(db);
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
+ Pager *pPager = sqlite3BtreePager(pBt);
+ sqlite3PagerShrink(pPager);
+ }
+ }
+ sqlite3BtreeLeaveAll(db);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+/*
+** Flush any dirty pages in the pager-cache for any attached database
+** to disk.
+*/
+SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){
+ int i;
+ int rc = SQLITE_OK;
+ int bSeenBusy = 0;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3BtreeEnterAll(db);
+ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt && sqlite3BtreeIsInTrans(pBt) ){
+ Pager *pPager = sqlite3BtreePager(pBt);
+ rc = sqlite3PagerFlush(pPager);
+ if( rc==SQLITE_BUSY ){
+ bSeenBusy = 1;
+ rc = SQLITE_OK;
+ }
+ }
+ }
+ sqlite3BtreeLeaveAll(db);
+ sqlite3_mutex_leave(db->mutex);
+ return ((rc==SQLITE_OK && bSeenBusy) ? SQLITE_BUSY : rc);
+}
+
+/*
+** Configuration settings for an individual database connection
+*/
+SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
+ va_list ap;
+ int rc;
+ va_start(ap, op);
+ switch( op ){
+ case SQLITE_DBCONFIG_MAINDBNAME: {
+ /* IMP: R-06824-28531 */
+ /* IMP: R-36257-52125 */
+ db->aDb[0].zDbSName = va_arg(ap,char*);
+ rc = SQLITE_OK;
+ break;
+ }
+ case SQLITE_DBCONFIG_LOOKASIDE: {
+ void *pBuf = va_arg(ap, void*); /* IMP: R-26835-10964 */
+ int sz = va_arg(ap, int); /* IMP: R-47871-25994 */
+ int cnt = va_arg(ap, int); /* IMP: R-04460-53386 */
+ rc = setupLookaside(db, pBuf, sz, cnt);
+ break;
+ }
+ default: {
+ static const struct {
+ int op; /* The opcode */
+ u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */
+ } aFlagOp[] = {
+ { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys },
+ { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger },
+ { SQLITE_DBCONFIG_ENABLE_VIEW, SQLITE_EnableView },
+ { SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_Fts3Tokenizer },
+ { SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_LoadExtension },
+ { SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, SQLITE_NoCkptOnClose },
+ { SQLITE_DBCONFIG_ENABLE_QPSG, SQLITE_EnableQPSG },
+ { SQLITE_DBCONFIG_TRIGGER_EQP, SQLITE_TriggerEQP },
+ { SQLITE_DBCONFIG_RESET_DATABASE, SQLITE_ResetDatabase },
+ { SQLITE_DBCONFIG_DEFENSIVE, SQLITE_Defensive },
+ { SQLITE_DBCONFIG_WRITABLE_SCHEMA, SQLITE_WriteSchema|
+ SQLITE_NoSchemaError },
+ { SQLITE_DBCONFIG_LEGACY_ALTER_TABLE, SQLITE_LegacyAlter },
+ { SQLITE_DBCONFIG_DQS_DDL, SQLITE_DqsDDL },
+ { SQLITE_DBCONFIG_DQS_DML, SQLITE_DqsDML },
+ { SQLITE_DBCONFIG_LEGACY_FILE_FORMAT, SQLITE_LegacyFileFmt },
+ { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema },
+ };
+ unsigned int i;
+ rc = SQLITE_ERROR; /* IMP: R-42790-23372 */
+ for(i=0; i<ArraySize(aFlagOp); i++){
+ if( aFlagOp[i].op==op ){
+ int onoff = va_arg(ap, int);
+ int *pRes = va_arg(ap, int*);
+ u64 oldFlags = db->flags;
+ if( onoff>0 ){
+ db->flags |= aFlagOp[i].mask;
+ }else if( onoff==0 ){
+ db->flags &= ~(u64)aFlagOp[i].mask;
+ }
+ if( oldFlags!=db->flags ){
+ sqlite3ExpirePreparedStatements(db, 0);
+ }
+ if( pRes ){
+ *pRes = (db->flags & aFlagOp[i].mask)!=0;
+ }
+ rc = SQLITE_OK;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ va_end(ap);
+ return rc;
+}
+
+/*
+** This is the default collating function named "BINARY" which is always
+** available.
+*/
+static int binCollFunc(
+ void *NotUsed,
+ int nKey1, const void *pKey1,
+ int nKey2, const void *pKey2
+){
+ int rc, n;
+ UNUSED_PARAMETER(NotUsed);
+ n = nKey1<nKey2 ? nKey1 : nKey2;
+ /* EVIDENCE-OF: R-65033-28449 The built-in BINARY collation compares
+ ** strings byte by byte using the memcmp() function from the standard C
+ ** library. */
+ assert( pKey1 && pKey2 );
+ rc = memcmp(pKey1, pKey2, n);
+ if( rc==0 ){
+ rc = nKey1 - nKey2;
+ }
+ return rc;
+}
+
+/*
+** This is the collating function named "RTRIM" which is always
+** available. Ignore trailing spaces.
+*/
+static int rtrimCollFunc(
+ void *pUser,
+ int nKey1, const void *pKey1,
+ int nKey2, const void *pKey2
+){
+ const u8 *pK1 = (const u8*)pKey1;
+ const u8 *pK2 = (const u8*)pKey2;
+ while( nKey1 && pK1[nKey1-1]==' ' ) nKey1--;
+ while( nKey2 && pK2[nKey2-1]==' ' ) nKey2--;
+ return binCollFunc(pUser, nKey1, pKey1, nKey2, pKey2);
+}
+
+/*
+** Return true if CollSeq is the default built-in BINARY.
+*/
+SQLITE_PRIVATE int sqlite3IsBinary(const CollSeq *p){
+ assert( p==0 || p->xCmp!=binCollFunc || strcmp(p->zName,"BINARY")==0 );
+ return p==0 || p->xCmp==binCollFunc;
+}
+
+/*
+** Another built-in collating sequence: NOCASE.
+**
+** This collating sequence is intended to be used for "case independent
+** comparison". SQLite's knowledge of upper and lower case equivalents
+** extends only to the 26 characters used in the English language.
+**
+** At the moment there is only a UTF-8 implementation.
+*/
+static int nocaseCollatingFunc(
+ void *NotUsed,
+ int nKey1, const void *pKey1,
+ int nKey2, const void *pKey2
+){
+ int r = sqlite3StrNICmp(
+ (const char *)pKey1, (const char *)pKey2, (nKey1<nKey2)?nKey1:nKey2);
+ UNUSED_PARAMETER(NotUsed);
+ if( 0==r ){
+ r = nKey1-nKey2;
+ }
+ return r;
+}
+
+/*
+** Return the ROWID of the most recent insert
+*/
+SQLITE_API sqlite_int64 sqlite3_last_insert_rowid(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return db->lastRowid;
+}
+
+/*
+** Set the value returned by the sqlite3_last_insert_rowid() API function.
+*/
+SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3 *db, sqlite3_int64 iRowid){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->lastRowid = iRowid;
+ sqlite3_mutex_leave(db->mutex);
+}
+
+/*
+** Return the number of changes in the most recent call to sqlite3_exec().
+*/
+SQLITE_API int sqlite3_changes(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return db->nChange;
+}
+
+/*
+** Return the number of changes since the database handle was opened.
+*/
+SQLITE_API int sqlite3_total_changes(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return db->nTotalChange;
+}
+
+/*
+** Close all open savepoints. This function only manipulates fields of the
+** database handle object, it does not close any savepoints that may be open
+** at the b-tree/pager level.
+*/
+SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *db){
+ while( db->pSavepoint ){
+ Savepoint *pTmp = db->pSavepoint;
+ db->pSavepoint = pTmp->pNext;
+ sqlite3DbFree(db, pTmp);
+ }
+ db->nSavepoint = 0;
+ db->nStatement = 0;
+ db->isTransactionSavepoint = 0;
+}
+
+/*
+** Invoke the destructor function associated with FuncDef p, if any. Except,
+** if this is not the last copy of the function, do not invoke it. Multiple
+** copies of a single function are created when create_function() is called
+** with SQLITE_ANY as the encoding.
+*/
+static void functionDestroy(sqlite3 *db, FuncDef *p){
+ FuncDestructor *pDestructor = p->u.pDestructor;
+ if( pDestructor ){
+ pDestructor->nRef--;
+ if( pDestructor->nRef==0 ){
+ pDestructor->xDestroy(pDestructor->pUserData);
+ sqlite3DbFree(db, pDestructor);
+ }
+ }
+}
+
+/*
+** Disconnect all sqlite3_vtab objects that belong to database connection
+** db. This is called when db is being closed.
+*/
+static void disconnectAllVtab(sqlite3 *db){
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ int i;
+ HashElem *p;
+ sqlite3BtreeEnterAll(db);
+ for(i=0; i<db->nDb; i++){
+ Schema *pSchema = db->aDb[i].pSchema;
+ if( pSchema ){
+ for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){
+ Table *pTab = (Table *)sqliteHashData(p);
+ if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab);
+ }
+ }
+ }
+ for(p=sqliteHashFirst(&db->aModule); p; p=sqliteHashNext(p)){
+ Module *pMod = (Module *)sqliteHashData(p);
+ if( pMod->pEpoTab ){
+ sqlite3VtabDisconnect(db, pMod->pEpoTab);
+ }
+ }
+ sqlite3VtabUnlockList(db);
+ sqlite3BtreeLeaveAll(db);
+#else
+ UNUSED_PARAMETER(db);
+#endif
+}
+
+/*
+** Return TRUE if database connection db has unfinalized prepared
+** statements or unfinished sqlite3_backup objects.
+*/
+static int connectionIsBusy(sqlite3 *db){
+ int j;
+ assert( sqlite3_mutex_held(db->mutex) );
+ if( db->pVdbe ) return 1;
+ for(j=0; j<db->nDb; j++){
+ Btree *pBt = db->aDb[j].pBt;
+ if( pBt && sqlite3BtreeIsInBackup(pBt) ) return 1;
+ }
+ return 0;
+}
+
+/*
+** Close an existing SQLite database
+*/
+static int sqlite3Close(sqlite3 *db, int forceZombie){
+ if( !db ){
+ /* EVIDENCE-OF: R-63257-11740 Calling sqlite3_close() or
+ ** sqlite3_close_v2() with a NULL pointer argument is a harmless no-op. */
+ return SQLITE_OK;
+ }
+ if( !sqlite3SafetyCheckSickOrOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ sqlite3_mutex_enter(db->mutex);
+ if( db->mTrace & SQLITE_TRACE_CLOSE ){
+ db->trace.xV2(SQLITE_TRACE_CLOSE, db->pTraceArg, db, 0);
+ }
+
+ /* Force xDisconnect calls on all virtual tables */
+ disconnectAllVtab(db);
+
+ /* If a transaction is open, the disconnectAllVtab() call above
+ ** will not have called the xDisconnect() method on any virtual
+ ** tables in the db->aVTrans[] array. The following sqlite3VtabRollback()
+ ** call will do so. We need to do this before the check for active
+ ** SQL statements below, as the v-table implementation may be storing
+ ** some prepared statements internally.
+ */
+ sqlite3VtabRollback(db);
+
+ /* Legacy behavior (sqlite3_close() behavior) is to return
+ ** SQLITE_BUSY if the connection can not be closed immediately.
+ */
+ if( !forceZombie && connectionIsBusy(db) ){
+ sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to close due to unfinalized "
+ "statements or unfinished backups");
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_BUSY;
+ }
+
+#ifdef SQLITE_ENABLE_SQLLOG
+ if( sqlite3GlobalConfig.xSqllog ){
+ /* Closing the handle. Fourth parameter is passed the value 2. */
+ sqlite3GlobalConfig.xSqllog(sqlite3GlobalConfig.pSqllogArg, db, 0, 2);
+ }
+#endif
+
+ /* Convert the connection into a zombie and then close it.
+ */
+ db->magic = SQLITE_MAGIC_ZOMBIE;
+ sqlite3LeaveMutexAndCloseZombie(db);
+ return SQLITE_OK;
+}
+
+/*
+** Two variations on the public interface for closing a database
+** connection. The sqlite3_close() version returns SQLITE_BUSY and
+** leaves the connection option if there are unfinalized prepared
+** statements or unfinished sqlite3_backups. The sqlite3_close_v2()
+** version forces the connection to become a zombie if there are
+** unclosed resources, and arranges for deallocation when the last
+** prepare statement or sqlite3_backup closes.
+*/
+SQLITE_API int sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); }
+SQLITE_API int sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); }
+
+
+/*
+** Close the mutex on database connection db.
+**
+** Furthermore, if database connection db is a zombie (meaning that there
+** has been a prior call to sqlite3_close(db) or sqlite3_close_v2(db)) and
+** every sqlite3_stmt has now been finalized and every sqlite3_backup has
+** finished, then free all resources.
+*/
+SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){
+ HashElem *i; /* Hash table iterator */
+ int j;
+
+ /* If there are outstanding sqlite3_stmt or sqlite3_backup objects
+ ** or if the connection has not yet been closed by sqlite3_close_v2(),
+ ** then just leave the mutex and return.
+ */
+ if( db->magic!=SQLITE_MAGIC_ZOMBIE || connectionIsBusy(db) ){
+ sqlite3_mutex_leave(db->mutex);
+ return;
+ }
+
+ /* If we reach this point, it means that the database connection has
+ ** closed all sqlite3_stmt and sqlite3_backup objects and has been
+ ** passed to sqlite3_close (meaning that it is a zombie). Therefore,
+ ** go ahead and free all resources.
+ */
+
+ /* If a transaction is open, roll it back. This also ensures that if
+ ** any database schemas have been modified by an uncommitted transaction
+ ** they are reset. And that the required b-tree mutex is held to make
+ ** the pager rollback and schema reset an atomic operation. */
+ sqlite3RollbackAll(db, SQLITE_OK);
+
+ /* Free any outstanding Savepoint structures. */
+ sqlite3CloseSavepoints(db);
+
+ /* Close all database connections */
+ for(j=0; j<db->nDb; j++){
+ struct Db *pDb = &db->aDb[j];
+ if( pDb->pBt ){
+ sqlite3BtreeClose(pDb->pBt);
+ pDb->pBt = 0;
+ if( j!=1 ){
+ pDb->pSchema = 0;
+ }
+ }
+ }
+ /* Clear the TEMP schema separately and last */
+ if( db->aDb[1].pSchema ){
+ sqlite3SchemaClear(db->aDb[1].pSchema);
+ }
+ sqlite3VtabUnlockList(db);
+
+ /* Free up the array of auxiliary databases */
+ sqlite3CollapseDatabaseArray(db);
+ assert( db->nDb<=2 );
+ assert( db->aDb==db->aDbStatic );
+
+ /* Tell the code in notify.c that the connection no longer holds any
+ ** locks and does not require any further unlock-notify callbacks.
+ */
+ sqlite3ConnectionClosed(db);
+
+ for(i=sqliteHashFirst(&db->aFunc); i; i=sqliteHashNext(i)){
+ FuncDef *pNext, *p;
+ p = sqliteHashData(i);
+ do{
+ functionDestroy(db, p);
+ pNext = p->pNext;
+ sqlite3DbFree(db, p);
+ p = pNext;
+ }while( p );
+ }
+ sqlite3HashClear(&db->aFunc);
+ for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){
+ CollSeq *pColl = (CollSeq *)sqliteHashData(i);
+ /* Invoke any destructors registered for collation sequence user data. */
+ for(j=0; j<3; j++){
+ if( pColl[j].xDel ){
+ pColl[j].xDel(pColl[j].pUser);
+ }
+ }
+ sqlite3DbFree(db, pColl);
+ }
+ sqlite3HashClear(&db->aCollSeq);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ for(i=sqliteHashFirst(&db->aModule); i; i=sqliteHashNext(i)){
+ Module *pMod = (Module *)sqliteHashData(i);
+ sqlite3VtabEponymousTableClear(db, pMod);
+ sqlite3VtabModuleUnref(db, pMod);
+ }
+ sqlite3HashClear(&db->aModule);
+#endif
+
+ sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */
+ sqlite3ValueFree(db->pErr);
+ sqlite3CloseExtensions(db);
+#if SQLITE_USER_AUTHENTICATION
+ sqlite3_free(db->auth.zAuthUser);
+ sqlite3_free(db->auth.zAuthPW);
+#endif
+
+ db->magic = SQLITE_MAGIC_ERROR;
+
+ /* The temp-database schema is allocated differently from the other schema
+ ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()).
+ ** So it needs to be freed here. Todo: Why not roll the temp schema into
+ ** the same sqliteMalloc() as the one that allocates the database
+ ** structure?
+ */
+ sqlite3DbFree(db, db->aDb[1].pSchema);
+ sqlite3_mutex_leave(db->mutex);
+ db->magic = SQLITE_MAGIC_CLOSED;
+ sqlite3_mutex_free(db->mutex);
+ assert( sqlite3LookasideUsed(db,0)==0 );
+ if( db->lookaside.bMalloced ){
+ sqlite3_free(db->lookaside.pStart);
+ }
+ sqlite3_free(db);
+}
+
+/*
+** Rollback all database files. If tripCode is not SQLITE_OK, then
+** any write cursors are invalidated ("tripped" - as in "tripping a circuit
+** breaker") and made to return tripCode if there are any further
+** attempts to use that cursor. Read cursors remain open and valid
+** but are "saved" in case the table pages are moved around.
+*/
+SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
+ int i;
+ int inTrans = 0;
+ int schemaChange;
+ assert( sqlite3_mutex_held(db->mutex) );
+ sqlite3BeginBenignMalloc();
+
+ /* Obtain all b-tree mutexes before making any calls to BtreeRollback().
+ ** This is important in case the transaction being rolled back has
+ ** modified the database schema. If the b-tree mutexes are not taken
+ ** here, then another shared-cache connection might sneak in between
+ ** the database rollback and schema reset, which can cause false
+ ** corruption reports in some cases. */
+ sqlite3BtreeEnterAll(db);
+ schemaChange = (db->mDbFlags & DBFLAG_SchemaChange)!=0 && db->init.busy==0;
+
+ for(i=0; i<db->nDb; i++){
+ Btree *p = db->aDb[i].pBt;
+ if( p ){
+ if( sqlite3BtreeIsInTrans(p) ){
+ inTrans = 1;
+ }
+ sqlite3BtreeRollback(p, tripCode, !schemaChange);
+ }
+ }
+ sqlite3VtabRollback(db);
+ sqlite3EndBenignMalloc();
+
+ if( schemaChange ){
+ sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ResetAllSchemasOfConnection(db);
+ }
+ sqlite3BtreeLeaveAll(db);
+
+ /* Any deferred constraint violations have now been resolved. */
+ db->nDeferredCons = 0;
+ db->nDeferredImmCons = 0;
+ db->flags &= ~(u64)SQLITE_DeferFKs;
+
+ /* If one has been configured, invoke the rollback-hook callback */
+ if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){
+ db->xRollbackCallback(db->pRollbackArg);
+ }
+}
+
+/*
+** Return a static string containing the name corresponding to the error code
+** specified in the argument.
+*/
+#if defined(SQLITE_NEED_ERR_NAME)
+SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
+ const char *zName = 0;
+ int i, origRc = rc;
+ for(i=0; i<2 && zName==0; i++, rc &= 0xff){
+ switch( rc ){
+ case SQLITE_OK: zName = "SQLITE_OK"; break;
+ case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
+ case SQLITE_ERROR_SNAPSHOT: zName = "SQLITE_ERROR_SNAPSHOT"; break;
+ case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
+ case SQLITE_PERM: zName = "SQLITE_PERM"; break;
+ case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
+ case SQLITE_ABORT_ROLLBACK: zName = "SQLITE_ABORT_ROLLBACK"; break;
+ case SQLITE_BUSY: zName = "SQLITE_BUSY"; break;
+ case SQLITE_BUSY_RECOVERY: zName = "SQLITE_BUSY_RECOVERY"; break;
+ case SQLITE_BUSY_SNAPSHOT: zName = "SQLITE_BUSY_SNAPSHOT"; break;
+ case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break;
+ case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break;
+ case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break;
+ case SQLITE_READONLY: zName = "SQLITE_READONLY"; break;
+ case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break;
+ case SQLITE_READONLY_CANTINIT: zName = "SQLITE_READONLY_CANTINIT"; break;
+ case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break;
+ case SQLITE_READONLY_DBMOVED: zName = "SQLITE_READONLY_DBMOVED"; break;
+ case SQLITE_READONLY_DIRECTORY: zName = "SQLITE_READONLY_DIRECTORY";break;
+ case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
+ case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
+ case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break;
+ case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break;
+ case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break;
+ case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break;
+ case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break;
+ case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break;
+ case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break;
+ case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break;
+ case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break;
+ case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break;
+ case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break;
+ case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break;
+ case SQLITE_IOERR_CHECKRESERVEDLOCK:
+ zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break;
+ case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break;
+ case SQLITE_IOERR_CLOSE: zName = "SQLITE_IOERR_CLOSE"; break;
+ case SQLITE_IOERR_DIR_CLOSE: zName = "SQLITE_IOERR_DIR_CLOSE"; break;
+ case SQLITE_IOERR_SHMOPEN: zName = "SQLITE_IOERR_SHMOPEN"; break;
+ case SQLITE_IOERR_SHMSIZE: zName = "SQLITE_IOERR_SHMSIZE"; break;
+ case SQLITE_IOERR_SHMLOCK: zName = "SQLITE_IOERR_SHMLOCK"; break;
+ case SQLITE_IOERR_SHMMAP: zName = "SQLITE_IOERR_SHMMAP"; break;
+ case SQLITE_IOERR_SEEK: zName = "SQLITE_IOERR_SEEK"; break;
+ case SQLITE_IOERR_DELETE_NOENT: zName = "SQLITE_IOERR_DELETE_NOENT";break;
+ case SQLITE_IOERR_MMAP: zName = "SQLITE_IOERR_MMAP"; break;
+ case SQLITE_IOERR_GETTEMPPATH: zName = "SQLITE_IOERR_GETTEMPPATH"; break;
+ case SQLITE_IOERR_CONVPATH: zName = "SQLITE_IOERR_CONVPATH"; break;
+ case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break;
+ case SQLITE_CORRUPT_VTAB: zName = "SQLITE_CORRUPT_VTAB"; break;
+ case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break;
+ case SQLITE_FULL: zName = "SQLITE_FULL"; break;
+ case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break;
+ case SQLITE_CANTOPEN_NOTEMPDIR: zName = "SQLITE_CANTOPEN_NOTEMPDIR";break;
+ case SQLITE_CANTOPEN_ISDIR: zName = "SQLITE_CANTOPEN_ISDIR"; break;
+ case SQLITE_CANTOPEN_FULLPATH: zName = "SQLITE_CANTOPEN_FULLPATH"; break;
+ case SQLITE_CANTOPEN_CONVPATH: zName = "SQLITE_CANTOPEN_CONVPATH"; break;
+ case SQLITE_CANTOPEN_SYMLINK: zName = "SQLITE_CANTOPEN_SYMLINK"; break;
+ case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break;
+ case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break;
+ case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break;
+ case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break;
+ case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break;
+ case SQLITE_CONSTRAINT_UNIQUE: zName = "SQLITE_CONSTRAINT_UNIQUE"; break;
+ case SQLITE_CONSTRAINT_TRIGGER: zName = "SQLITE_CONSTRAINT_TRIGGER";break;
+ case SQLITE_CONSTRAINT_FOREIGNKEY:
+ zName = "SQLITE_CONSTRAINT_FOREIGNKEY"; break;
+ case SQLITE_CONSTRAINT_CHECK: zName = "SQLITE_CONSTRAINT_CHECK"; break;
+ case SQLITE_CONSTRAINT_PRIMARYKEY:
+ zName = "SQLITE_CONSTRAINT_PRIMARYKEY"; break;
+ case SQLITE_CONSTRAINT_NOTNULL: zName = "SQLITE_CONSTRAINT_NOTNULL";break;
+ case SQLITE_CONSTRAINT_COMMITHOOK:
+ zName = "SQLITE_CONSTRAINT_COMMITHOOK"; break;
+ case SQLITE_CONSTRAINT_VTAB: zName = "SQLITE_CONSTRAINT_VTAB"; break;
+ case SQLITE_CONSTRAINT_FUNCTION:
+ zName = "SQLITE_CONSTRAINT_FUNCTION"; break;
+ case SQLITE_CONSTRAINT_ROWID: zName = "SQLITE_CONSTRAINT_ROWID"; break;
+ case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break;
+ case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break;
+ case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break;
+ case SQLITE_AUTH: zName = "SQLITE_AUTH"; break;
+ case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break;
+ case SQLITE_RANGE: zName = "SQLITE_RANGE"; break;
+ case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break;
+ case SQLITE_ROW: zName = "SQLITE_ROW"; break;
+ case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break;
+ case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break;
+ case SQLITE_NOTICE_RECOVER_ROLLBACK:
+ zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break;
+ case SQLITE_WARNING: zName = "SQLITE_WARNING"; break;
+ case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break;
+ case SQLITE_DONE: zName = "SQLITE_DONE"; break;
+ }
+ }
+ if( zName==0 ){
+ static char zBuf[50];
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "SQLITE_UNKNOWN(%d)", origRc);
+ zName = zBuf;
+ }
+ return zName;
+}
+#endif
+
+/*
+** Return a static string that describes the kind of error specified in the
+** argument.
+*/
+SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){
+ static const char* const aMsg[] = {
+ /* SQLITE_OK */ "not an error",
+ /* SQLITE_ERROR */ "SQL logic error",
+ /* SQLITE_INTERNAL */ 0,
+ /* SQLITE_PERM */ "access permission denied",
+ /* SQLITE_ABORT */ "query aborted",
+ /* SQLITE_BUSY */ "database is locked",
+ /* SQLITE_LOCKED */ "database table is locked",
+ /* SQLITE_NOMEM */ "out of memory",
+ /* SQLITE_READONLY */ "attempt to write a readonly database",
+ /* SQLITE_INTERRUPT */ "interrupted",
+ /* SQLITE_IOERR */ "disk I/O error",
+ /* SQLITE_CORRUPT */ "database disk image is malformed",
+ /* SQLITE_NOTFOUND */ "unknown operation",
+ /* SQLITE_FULL */ "database or disk is full",
+ /* SQLITE_CANTOPEN */ "unable to open database file",
+ /* SQLITE_PROTOCOL */ "locking protocol",
+ /* SQLITE_EMPTY */ 0,
+ /* SQLITE_SCHEMA */ "database schema has changed",
+ /* SQLITE_TOOBIG */ "string or blob too big",
+ /* SQLITE_CONSTRAINT */ "constraint failed",
+ /* SQLITE_MISMATCH */ "datatype mismatch",
+ /* SQLITE_MISUSE */ "bad parameter or other API misuse",
+#ifdef SQLITE_DISABLE_LFS
+ /* SQLITE_NOLFS */ "large file support is disabled",
+#else
+ /* SQLITE_NOLFS */ 0,
+#endif
+ /* SQLITE_AUTH */ "authorization denied",
+ /* SQLITE_FORMAT */ 0,
+ /* SQLITE_RANGE */ "column index out of range",
+ /* SQLITE_NOTADB */ "file is not a database",
+ /* SQLITE_NOTICE */ "notification message",
+ /* SQLITE_WARNING */ "warning message",
+ };
+ const char *zErr = "unknown error";
+ switch( rc ){
+ case SQLITE_ABORT_ROLLBACK: {
+ zErr = "abort due to ROLLBACK";
+ break;
+ }
+ case SQLITE_ROW: {
+ zErr = "another row available";
+ break;
+ }
+ case SQLITE_DONE: {
+ zErr = "no more rows available";
+ break;
+ }
+ default: {
+ rc &= 0xff;
+ if( ALWAYS(rc>=0) && rc<ArraySize(aMsg) && aMsg[rc]!=0 ){
+ zErr = aMsg[rc];
+ }
+ break;
+ }
+ }
+ return zErr;
+}
+
+/*
+** This routine implements a busy callback that sleeps and tries
+** again until a timeout value is reached. The timeout value is
+** an integer number of milliseconds passed in as the first
+** argument.
+**
+** Return non-zero to retry the lock. Return zero to stop trying
+** and cause SQLite to return SQLITE_BUSY.
+*/
+static int sqliteDefaultBusyCallback(
+ void *ptr, /* Database connection */
+ int count /* Number of times table has been busy */
+){
+#if SQLITE_OS_WIN || HAVE_USLEEP
+ /* This case is for systems that have support for sleeping for fractions of
+ ** a second. Examples: All windows systems, unix systems with usleep() */
+ static const u8 delays[] =
+ { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 };
+ static const u8 totals[] =
+ { 0, 1, 3, 8, 18, 33, 53, 78, 103, 128, 178, 228 };
+# define NDELAY ArraySize(delays)
+ sqlite3 *db = (sqlite3 *)ptr;
+ int tmout = db->busyTimeout;
+ int delay, prior;
+
+ assert( count>=0 );
+ if( count < NDELAY ){
+ delay = delays[count];
+ prior = totals[count];
+ }else{
+ delay = delays[NDELAY-1];
+ prior = totals[NDELAY-1] + delay*(count-(NDELAY-1));
+ }
+ if( prior + delay > tmout ){
+ delay = tmout - prior;
+ if( delay<=0 ) return 0;
+ }
+ sqlite3OsSleep(db->pVfs, delay*1000);
+ return 1;
+#else
+ /* This case for unix systems that lack usleep() support. Sleeping
+ ** must be done in increments of whole seconds */
+ sqlite3 *db = (sqlite3 *)ptr;
+ int tmout = ((sqlite3 *)ptr)->busyTimeout;
+ if( (count+1)*1000 > tmout ){
+ return 0;
+ }
+ sqlite3OsSleep(db->pVfs, 1000000);
+ return 1;
+#endif
+}
+
+/*
+** Invoke the given busy handler.
+**
+** This routine is called when an operation failed to acquire a
+** lock on VFS file pFile.
+**
+** If this routine returns non-zero, the lock is retried. If it
+** returns 0, the operation aborts with an SQLITE_BUSY error.
+*/
+SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){
+ int rc;
+ if( p->xBusyHandler==0 || p->nBusy<0 ) return 0;
+ rc = p->xBusyHandler(p->pBusyArg, p->nBusy);
+ if( rc==0 ){
+ p->nBusy = -1;
+ }else{
+ p->nBusy++;
+ }
+ return rc;
+}
+
+/*
+** This routine sets the busy callback for an Sqlite database to the
+** given callback function with the given argument.
+*/
+SQLITE_API int sqlite3_busy_handler(
+ sqlite3 *db,
+ int (*xBusy)(void*,int),
+ void *pArg
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->busyHandler.xBusyHandler = xBusy;
+ db->busyHandler.pBusyArg = pArg;
+ db->busyHandler.nBusy = 0;
+ db->busyTimeout = 0;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+/*
+** This routine sets the progress callback for an Sqlite database to the
+** given callback function with the given argument. The progress callback will
+** be invoked every nOps opcodes.
+*/
+SQLITE_API void sqlite3_progress_handler(
+ sqlite3 *db,
+ int nOps,
+ int (*xProgress)(void*),
+ void *pArg
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ if( nOps>0 ){
+ db->xProgress = xProgress;
+ db->nProgressOps = (unsigned)nOps;
+ db->pProgressArg = pArg;
+ }else{
+ db->xProgress = 0;
+ db->nProgressOps = 0;
+ db->pProgressArg = 0;
+ }
+ sqlite3_mutex_leave(db->mutex);
+}
+#endif
+
+
+/*
+** This routine installs a default busy handler that waits for the
+** specified number of milliseconds before returning 0.
+*/
+SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ if( ms>0 ){
+ sqlite3_busy_handler(db, (int(*)(void*,int))sqliteDefaultBusyCallback,
+ (void*)db);
+ db->busyTimeout = ms;
+ }else{
+ sqlite3_busy_handler(db, 0, 0);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Cause any pending operation to stop at its earliest opportunity.
+*/
+SQLITE_API void sqlite3_interrupt(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) && (db==0 || db->magic!=SQLITE_MAGIC_ZOMBIE) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return;
+ }
+#endif
+ AtomicStore(&db->u1.isInterrupted, 1);
+}
+
+
+/*
+** This function is exactly the same as sqlite3_create_function(), except
+** that it is designed to be called by internal code. The difference is
+** that if a malloc() fails in sqlite3_create_function(), an error code
+** is returned and the mallocFailed flag cleared.
+*/
+SQLITE_PRIVATE int sqlite3CreateFunc(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int enc,
+ void *pUserData,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value **),
+ FuncDestructor *pDestructor
+){
+ FuncDef *p;
+ int nName;
+ int extraFlags;
+
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( xValue==0 || xSFunc==0 );
+ if( zFunctionName==0 /* Must have a valid name */
+ || (xSFunc!=0 && xFinal!=0) /* Not both xSFunc and xFinal */
+ || ((xFinal==0)!=(xStep==0)) /* Both or neither of xFinal and xStep */
+ || ((xValue==0)!=(xInverse==0)) /* Both or neither of xValue, xInverse */
+ || (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG)
+ || (255<(nName = sqlite3Strlen30( zFunctionName)))
+ ){
+ return SQLITE_MISUSE_BKPT;
+ }
+
+ assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC );
+ assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY );
+ extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY|
+ SQLITE_SUBTYPE|SQLITE_INNOCUOUS);
+ enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY);
+
+ /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But
+ ** the meaning is inverted. So flip the bit. */
+ assert( SQLITE_FUNC_UNSAFE==SQLITE_INNOCUOUS );
+ extraFlags ^= SQLITE_FUNC_UNSAFE;
+
+
+#ifndef SQLITE_OMIT_UTF16
+ /* If SQLITE_UTF16 is specified as the encoding type, transform this
+ ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the
+ ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally.
+ **
+ ** If SQLITE_ANY is specified, add three versions of the function
+ ** to the hash table.
+ */
+ if( enc==SQLITE_UTF16 ){
+ enc = SQLITE_UTF16NATIVE;
+ }else if( enc==SQLITE_ANY ){
+ int rc;
+ rc = sqlite3CreateFunc(db, zFunctionName, nArg,
+ (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE,
+ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3CreateFunc(db, zFunctionName, nArg,
+ (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE,
+ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ enc = SQLITE_UTF16BE;
+ }
+#else
+ enc = SQLITE_UTF8;
+#endif
+
+ /* Check if an existing function is being overridden or deleted. If so,
+ ** and there are active VMs, then return SQLITE_BUSY. If a function
+ ** is being overridden/deleted but there are no active VMs, allow the
+ ** operation to continue but invalidate all precompiled statements.
+ */
+ p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 0);
+ if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==(u32)enc && p->nArg==nArg ){
+ if( db->nVdbeActive ){
+ sqlite3ErrorWithMsg(db, SQLITE_BUSY,
+ "unable to delete/modify user-function due to active statements");
+ assert( !db->mallocFailed );
+ return SQLITE_BUSY;
+ }else{
+ sqlite3ExpirePreparedStatements(db, 0);
+ }
+ }
+
+ p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 1);
+ assert(p || db->mallocFailed);
+ if( !p ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ /* If an older version of the function with a configured destructor is
+ ** being replaced invoke the destructor function here. */
+ functionDestroy(db, p);
+
+ if( pDestructor ){
+ pDestructor->nRef++;
+ }
+ p->u.pDestructor = pDestructor;
+ p->funcFlags = (p->funcFlags & SQLITE_FUNC_ENCMASK) | extraFlags;
+ testcase( p->funcFlags & SQLITE_DETERMINISTIC );
+ testcase( p->funcFlags & SQLITE_DIRECTONLY );
+ p->xSFunc = xSFunc ? xSFunc : xStep;
+ p->xFinalize = xFinal;
+ p->xValue = xValue;
+ p->xInverse = xInverse;
+ p->pUserData = pUserData;
+ p->nArg = (u16)nArg;
+ return SQLITE_OK;
+}
+
+/*
+** Worker function used by utf-8 APIs that create new functions:
+**
+** sqlite3_create_function()
+** sqlite3_create_function_v2()
+** sqlite3_create_window_function()
+*/
+static int createFunctionApi(
+ sqlite3 *db,
+ const char *zFunc,
+ int nArg,
+ int enc,
+ void *p,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
+ void(*xDestroy)(void*)
+){
+ int rc = SQLITE_ERROR;
+ FuncDestructor *pArg = 0;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ if( xDestroy ){
+ pArg = (FuncDestructor *)sqlite3Malloc(sizeof(FuncDestructor));
+ if( !pArg ){
+ sqlite3OomFault(db);
+ xDestroy(p);
+ goto out;
+ }
+ pArg->nRef = 0;
+ pArg->xDestroy = xDestroy;
+ pArg->pUserData = p;
+ }
+ rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p,
+ xSFunc, xStep, xFinal, xValue, xInverse, pArg
+ );
+ if( pArg && pArg->nRef==0 ){
+ assert( rc!=SQLITE_OK );
+ xDestroy(p);
+ sqlite3_free(pArg);
+ }
+
+ out:
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Create new user functions.
+*/
+SQLITE_API int sqlite3_create_function(
+ sqlite3 *db,
+ const char *zFunc,
+ int nArg,
+ int enc,
+ void *p,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
+ void (*xFinal)(sqlite3_context*)
+){
+ return createFunctionApi(db, zFunc, nArg, enc, p, xSFunc, xStep,
+ xFinal, 0, 0, 0);
+}
+SQLITE_API int sqlite3_create_function_v2(
+ sqlite3 *db,
+ const char *zFunc,
+ int nArg,
+ int enc,
+ void *p,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
+ void (*xFinal)(sqlite3_context*),
+ void (*xDestroy)(void *)
+){
+ return createFunctionApi(db, zFunc, nArg, enc, p, xSFunc, xStep,
+ xFinal, 0, 0, xDestroy);
+}
+SQLITE_API int sqlite3_create_window_function(
+ sqlite3 *db,
+ const char *zFunc,
+ int nArg,
+ int enc,
+ void *p,
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value **),
+ void (*xDestroy)(void *)
+){
+ return createFunctionApi(db, zFunc, nArg, enc, p, 0, xStep,
+ xFinal, xValue, xInverse, xDestroy);
+}
+
+#ifndef SQLITE_OMIT_UTF16
+SQLITE_API int sqlite3_create_function16(
+ sqlite3 *db,
+ const void *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *p,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*)
+){
+ int rc;
+ char *zFunc8;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zFunctionName==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ assert( !db->mallocFailed );
+ zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE);
+ rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xSFunc,xStep,xFinal,0,0,0);
+ sqlite3DbFree(db, zFunc8);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+#endif
+
+
+/*
+** The following is the implementation of an SQL function that always
+** fails with an error message stating that the function is used in the
+** wrong context. The sqlite3_overload_function() API might construct
+** SQL function that use this routine so that the functions will exist
+** for name resolution but are actually overloaded by the xFindFunction
+** method of virtual tables.
+*/
+static void sqlite3InvalidFunction(
+ sqlite3_context *context, /* The function calling context */
+ int NotUsed, /* Number of arguments to the function */
+ sqlite3_value **NotUsed2 /* Value of each argument */
+){
+ const char *zName = (const char*)sqlite3_user_data(context);
+ char *zErr;
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ zErr = sqlite3_mprintf(
+ "unable to use function %s in the requested context", zName);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
+}
+
+/*
+** Declare that a function has been overloaded by a virtual table.
+**
+** If the function already exists as a regular global function, then
+** this routine is a no-op. If the function does not exist, then create
+** a new one that always throws a run-time error.
+**
+** When virtual tables intend to provide an overloaded function, they
+** should call this routine to make sure the global function exists.
+** A global function must exist in order for name resolution to work
+** properly.
+*/
+SQLITE_API int sqlite3_overload_function(
+ sqlite3 *db,
+ const char *zName,
+ int nArg
+){
+ int rc;
+ char *zCopy;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zName==0 || nArg<-2 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ rc = sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)!=0;
+ sqlite3_mutex_leave(db->mutex);
+ if( rc ) return SQLITE_OK;
+ zCopy = sqlite3_mprintf(zName);
+ if( zCopy==0 ) return SQLITE_NOMEM;
+ return sqlite3_create_function_v2(db, zName, nArg, SQLITE_UTF8,
+ zCopy, sqlite3InvalidFunction, 0, 0, sqlite3_free);
+}
+
+#ifndef SQLITE_OMIT_TRACE
+/*
+** Register a trace function. The pArg from the previously registered trace
+** is returned.
+**
+** A NULL trace function means that no tracing is executes. A non-NULL
+** trace is a pointer to a function that is invoked at the start of each
+** SQL statement.
+*/
+#ifndef SQLITE_OMIT_DEPRECATED
+SQLITE_API void *sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,const char*), void *pArg){
+ void *pOld;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pOld = db->pTraceArg;
+ db->mTrace = xTrace ? SQLITE_TRACE_LEGACY : 0;
+ db->trace.xLegacy = xTrace;
+ db->pTraceArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pOld;
+}
+#endif /* SQLITE_OMIT_DEPRECATED */
+
+/* Register a trace callback using the version-2 interface.
+*/
+SQLITE_API int sqlite3_trace_v2(
+ sqlite3 *db, /* Trace this connection */
+ unsigned mTrace, /* Mask of events to be traced */
+ int(*xTrace)(unsigned,void*,void*,void*), /* Callback to invoke */
+ void *pArg /* Context */
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ if( mTrace==0 ) xTrace = 0;
+ if( xTrace==0 ) mTrace = 0;
+ db->mTrace = mTrace;
+ db->trace.xV2 = xTrace;
+ db->pTraceArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Register a profile function. The pArg from the previously registered
+** profile function is returned.
+**
+** A NULL profile function means that no profiling is executes. A non-NULL
+** profile is a pointer to a function that is invoked at the conclusion of
+** each SQL statement that is run.
+*/
+SQLITE_API void *sqlite3_profile(
+ sqlite3 *db,
+ void (*xProfile)(void*,const char*,sqlite_uint64),
+ void *pArg
+){
+ void *pOld;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pOld = db->pProfileArg;
+ db->xProfile = xProfile;
+ db->pProfileArg = pArg;
+ db->mTrace &= SQLITE_TRACE_NONLEGACY_MASK;
+ if( db->xProfile ) db->mTrace |= SQLITE_TRACE_XPROFILE;
+ sqlite3_mutex_leave(db->mutex);
+ return pOld;
+}
+#endif /* SQLITE_OMIT_DEPRECATED */
+#endif /* SQLITE_OMIT_TRACE */
+
+/*
+** Register a function to be invoked when a transaction commits.
+** If the invoked function returns non-zero, then the commit becomes a
+** rollback.
+*/
+SQLITE_API void *sqlite3_commit_hook(
+ sqlite3 *db, /* Attach the hook to this database */
+ int (*xCallback)(void*), /* Function to invoke on each commit */
+ void *pArg /* Argument to the function */
+){
+ void *pOld;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pOld = db->pCommitArg;
+ db->xCommitCallback = xCallback;
+ db->pCommitArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pOld;
+}
+
+/*
+** Register a callback to be invoked each time a row is updated,
+** inserted or deleted using this database connection.
+*/
+SQLITE_API void *sqlite3_update_hook(
+ sqlite3 *db, /* Attach the hook to this database */
+ void (*xCallback)(void*,int,char const *,char const *,sqlite_int64),
+ void *pArg /* Argument to the function */
+){
+ void *pRet;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pRet = db->pUpdateArg;
+ db->xUpdateCallback = xCallback;
+ db->pUpdateArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pRet;
+}
+
+/*
+** Register a callback to be invoked each time a transaction is rolled
+** back by this database connection.
+*/
+SQLITE_API void *sqlite3_rollback_hook(
+ sqlite3 *db, /* Attach the hook to this database */
+ void (*xCallback)(void*), /* Callback function */
+ void *pArg /* Argument to the function */
+){
+ void *pRet;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pRet = db->pRollbackArg;
+ db->xRollbackCallback = xCallback;
+ db->pRollbackArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pRet;
+}
+
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+/*
+** Register a callback to be invoked each time a row is updated,
+** inserted or deleted using this database connection.
+*/
+SQLITE_API void *sqlite3_preupdate_hook(
+ sqlite3 *db, /* Attach the hook to this database */
+ void(*xCallback)( /* Callback function */
+ void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64),
+ void *pArg /* First callback argument */
+){
+ void *pRet;
+ sqlite3_mutex_enter(db->mutex);
+ pRet = db->pPreUpdateArg;
+ db->xPreUpdateCallback = xCallback;
+ db->pPreUpdateArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pRet;
+}
+#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint().
+** Invoke sqlite3_wal_checkpoint if the number of frames in the log file
+** is greater than sqlite3.pWalArg cast to an integer (the value configured by
+** wal_autocheckpoint()).
+*/
+SQLITE_PRIVATE int sqlite3WalDefaultHook(
+ void *pClientData, /* Argument */
+ sqlite3 *db, /* Connection */
+ const char *zDb, /* Database */
+ int nFrame /* Size of WAL */
+){
+ if( nFrame>=SQLITE_PTR_TO_INT(pClientData) ){
+ sqlite3BeginBenignMalloc();
+ sqlite3_wal_checkpoint(db, zDb);
+ sqlite3EndBenignMalloc();
+ }
+ return SQLITE_OK;
+}
+#endif /* SQLITE_OMIT_WAL */
+
+/*
+** Configure an sqlite3_wal_hook() callback to automatically checkpoint
+** a database after committing a transaction if there are nFrame or
+** more frames in the log file. Passing zero or a negative value as the
+** nFrame parameter disables automatic checkpoints entirely.
+**
+** The callback registered by this function replaces any existing callback
+** registered using sqlite3_wal_hook(). Likewise, registering a callback
+** using sqlite3_wal_hook() disables the automatic checkpoint mechanism
+** configured by this function.
+*/
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){
+#ifdef SQLITE_OMIT_WAL
+ UNUSED_PARAMETER(db);
+ UNUSED_PARAMETER(nFrame);
+#else
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ if( nFrame>0 ){
+ sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame));
+ }else{
+ sqlite3_wal_hook(db, 0, 0);
+ }
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** Register a callback to be invoked each time a transaction is written
+** into the write-ahead-log by this database connection.
+*/
+SQLITE_API void *sqlite3_wal_hook(
+ sqlite3 *db, /* Attach the hook to this db handle */
+ int(*xCallback)(void *, sqlite3*, const char*, int),
+ void *pArg /* First argument passed to xCallback() */
+){
+#ifndef SQLITE_OMIT_WAL
+ void *pRet;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pRet = db->pWalArg;
+ db->xWalCallback = xCallback;
+ db->pWalArg = pArg;
+ sqlite3_mutex_leave(db->mutex);
+ return pRet;
+#else
+ return 0;
+#endif
+}
+
+/*
+** Checkpoint database zDb.
+*/
+SQLITE_API int sqlite3_wal_checkpoint_v2(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of attached database (or NULL) */
+ int eMode, /* SQLITE_CHECKPOINT_* value */
+ int *pnLog, /* OUT: Size of WAL log in frames */
+ int *pnCkpt /* OUT: Total number of frames checkpointed */
+){
+#ifdef SQLITE_OMIT_WAL
+ return SQLITE_OK;
+#else
+ int rc; /* Return code */
+ int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+
+ /* Initialize the output variables to -1 in case an error occurs. */
+ if( pnLog ) *pnLog = -1;
+ if( pnCkpt ) *pnCkpt = -1;
+
+ assert( SQLITE_CHECKPOINT_PASSIVE==0 );
+ assert( SQLITE_CHECKPOINT_FULL==1 );
+ assert( SQLITE_CHECKPOINT_RESTART==2 );
+ assert( SQLITE_CHECKPOINT_TRUNCATE==3 );
+ if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_TRUNCATE ){
+ /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint
+ ** mode: */
+ return SQLITE_MISUSE;
+ }
+
+ sqlite3_mutex_enter(db->mutex);
+ if( zDb && zDb[0] ){
+ iDb = sqlite3FindDbName(db, zDb);
+ }
+ if( iDb<0 ){
+ rc = SQLITE_ERROR;
+ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "unknown database: %s", zDb);
+ }else{
+ db->busyHandler.nBusy = 0;
+ rc = sqlite3Checkpoint(db, iDb, eMode, pnLog, pnCkpt);
+ sqlite3Error(db, rc);
+ }
+ rc = sqlite3ApiExit(db, rc);
+
+ /* If there are no active statements, clear the interrupt flag at this
+ ** point. */
+ if( db->nVdbeActive==0 ){
+ AtomicStore(&db->u1.isInterrupted, 0);
+ }
+
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+#endif
+}
+
+
+/*
+** Checkpoint database zDb. If zDb is NULL, or if the buffer zDb points
+** to contains a zero-length string, all attached databases are
+** checkpointed.
+*/
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){
+ /* EVIDENCE-OF: R-41613-20553 The sqlite3_wal_checkpoint(D,X) is equivalent to
+ ** sqlite3_wal_checkpoint_v2(D,X,SQLITE_CHECKPOINT_PASSIVE,0,0). */
+ return sqlite3_wal_checkpoint_v2(db,zDb,SQLITE_CHECKPOINT_PASSIVE,0,0);
+}
+
+#ifndef SQLITE_OMIT_WAL
+/*
+** Run a checkpoint on database iDb. This is a no-op if database iDb is
+** not currently open in WAL mode.
+**
+** If a transaction is open on the database being checkpointed, this
+** function returns SQLITE_LOCKED and a checkpoint is not attempted. If
+** an error occurs while running the checkpoint, an SQLite error code is
+** returned (i.e. SQLITE_IOERR). Otherwise, SQLITE_OK.
+**
+** The mutex on database handle db should be held by the caller. The mutex
+** associated with the specific b-tree being checkpointed is taken by
+** this function while the checkpoint is running.
+**
+** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are
+** checkpointed. If an error is encountered it is returned immediately -
+** no attempt is made to checkpoint any remaining databases.
+**
+** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL, RESTART
+** or TRUNCATE.
+*/
+SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog, int *pnCkpt){
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* Used to iterate through attached dbs */
+ int bBusy = 0; /* True if SQLITE_BUSY has been encountered */
+
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( !pnLog || *pnLog==-1 );
+ assert( !pnCkpt || *pnCkpt==-1 );
+
+ for(i=0; i<db->nDb && rc==SQLITE_OK; i++){
+ if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){
+ rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt);
+ pnLog = 0;
+ pnCkpt = 0;
+ if( rc==SQLITE_BUSY ){
+ bBusy = 1;
+ rc = SQLITE_OK;
+ }
+ }
+ }
+
+ return (rc==SQLITE_OK && bBusy) ? SQLITE_BUSY : rc;
+}
+#endif /* SQLITE_OMIT_WAL */
+
+/*
+** This function returns true if main-memory should be used instead of
+** a temporary file for transient pager files and statement journals.
+** The value returned depends on the value of db->temp_store (runtime
+** parameter) and the compile time value of SQLITE_TEMP_STORE. The
+** following table describes the relationship between these two values
+** and this functions return value.
+**
+** SQLITE_TEMP_STORE db->temp_store Location of temporary database
+** ----------------- -------------- ------------------------------
+** 0 any file (return 0)
+** 1 1 file (return 0)
+** 1 2 memory (return 1)
+** 1 0 file (return 0)
+** 2 1 file (return 0)
+** 2 2 memory (return 1)
+** 2 0 memory (return 1)
+** 3 any memory (return 1)
+*/
+SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){
+#if SQLITE_TEMP_STORE==1
+ return ( db->temp_store==2 );
+#endif
+#if SQLITE_TEMP_STORE==2
+ return ( db->temp_store!=1 );
+#endif
+#if SQLITE_TEMP_STORE==3
+ UNUSED_PARAMETER(db);
+ return 1;
+#endif
+#if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3
+ UNUSED_PARAMETER(db);
+ return 0;
+#endif
+}
+
+/*
+** Return UTF-8 encoded English language explanation of the most recent
+** error.
+*/
+SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){
+ const char *z;
+ if( !db ){
+ return sqlite3ErrStr(SQLITE_NOMEM_BKPT);
+ }
+ if( !sqlite3SafetyCheckSickOrOk(db) ){
+ return sqlite3ErrStr(SQLITE_MISUSE_BKPT);
+ }
+ sqlite3_mutex_enter(db->mutex);
+ if( db->mallocFailed ){
+ z = sqlite3ErrStr(SQLITE_NOMEM_BKPT);
+ }else{
+ testcase( db->pErr==0 );
+ z = db->errCode ? (char*)sqlite3_value_text(db->pErr) : 0;
+ assert( !db->mallocFailed );
+ if( z==0 ){
+ z = sqlite3ErrStr(db->errCode);
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return z;
+}
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Return UTF-16 encoded English language explanation of the most recent
+** error.
+*/
+SQLITE_API const void *sqlite3_errmsg16(sqlite3 *db){
+ static const u16 outOfMem[] = {
+ 'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0
+ };
+ static const u16 misuse[] = {
+ 'b', 'a', 'd', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ',
+ 'o', 'r', ' ', 'o', 't', 'h', 'e', 'r', ' ', 'A', 'P', 'I', ' ',
+ 'm', 'i', 's', 'u', 's', 'e', 0
+ };
+
+ const void *z;
+ if( !db ){
+ return (void *)outOfMem;
+ }
+ if( !sqlite3SafetyCheckSickOrOk(db) ){
+ return (void *)misuse;
+ }
+ sqlite3_mutex_enter(db->mutex);
+ if( db->mallocFailed ){
+ z = (void *)outOfMem;
+ }else{
+ z = sqlite3_value_text16(db->pErr);
+ if( z==0 ){
+ sqlite3ErrorWithMsg(db, db->errCode, sqlite3ErrStr(db->errCode));
+ z = sqlite3_value_text16(db->pErr);
+ }
+ /* A malloc() may have failed within the call to sqlite3_value_text16()
+ ** above. If this is the case, then the db->mallocFailed flag needs to
+ ** be cleared before returning. Do this directly, instead of via
+ ** sqlite3ApiExit(), to avoid setting the database handle error message.
+ */
+ sqlite3OomClear(db);
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return z;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Return the most recent error code generated by an SQLite routine. If NULL is
+** passed to this function, we assume a malloc() failed during sqlite3_open().
+*/
+SQLITE_API int sqlite3_errcode(sqlite3 *db){
+ if( db && !sqlite3SafetyCheckSickOrOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( !db || db->mallocFailed ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ return db->errCode & db->errMask;
+}
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db){
+ if( db && !sqlite3SafetyCheckSickOrOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+ if( !db || db->mallocFailed ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ return db->errCode;
+}
+SQLITE_API int sqlite3_system_errno(sqlite3 *db){
+ return db ? db->iSysErrno : 0;
+}
+
+/*
+** Return a string that describes the kind of error specified in the
+** argument. For now, this simply calls the internal sqlite3ErrStr()
+** function.
+*/
+SQLITE_API const char *sqlite3_errstr(int rc){
+ return sqlite3ErrStr(rc);
+}
+
+/*
+** Create a new collating function for database "db". The name is zName
+** and the encoding is enc.
+*/
+static int createCollation(
+ sqlite3* db,
+ const char *zName,
+ u8 enc,
+ void* pCtx,
+ int(*xCompare)(void*,int,const void*,int,const void*),
+ void(*xDel)(void*)
+){
+ CollSeq *pColl;
+ int enc2;
+
+ assert( sqlite3_mutex_held(db->mutex) );
+
+ /* If SQLITE_UTF16 is specified as the encoding type, transform this
+ ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the
+ ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally.
+ */
+ enc2 = enc;
+ testcase( enc2==SQLITE_UTF16 );
+ testcase( enc2==SQLITE_UTF16_ALIGNED );
+ if( enc2==SQLITE_UTF16 || enc2==SQLITE_UTF16_ALIGNED ){
+ enc2 = SQLITE_UTF16NATIVE;
+ }
+ if( enc2<SQLITE_UTF8 || enc2>SQLITE_UTF16BE ){
+ return SQLITE_MISUSE_BKPT;
+ }
+
+ /* Check if this call is removing or replacing an existing collation
+ ** sequence. If so, and there are active VMs, return busy. If there
+ ** are no active VMs, invalidate any pre-compiled statements.
+ */
+ pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0);
+ if( pColl && pColl->xCmp ){
+ if( db->nVdbeActive ){
+ sqlite3ErrorWithMsg(db, SQLITE_BUSY,
+ "unable to delete/modify collation sequence due to active statements");
+ return SQLITE_BUSY;
+ }
+ sqlite3ExpirePreparedStatements(db, 0);
+
+ /* If collation sequence pColl was created directly by a call to
+ ** sqlite3_create_collation, and not generated by synthCollSeq(),
+ ** then any copies made by synthCollSeq() need to be invalidated.
+ ** Also, collation destructor - CollSeq.xDel() - function may need
+ ** to be called.
+ */
+ if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){
+ CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName);
+ int j;
+ for(j=0; j<3; j++){
+ CollSeq *p = &aColl[j];
+ if( p->enc==pColl->enc ){
+ if( p->xDel ){
+ p->xDel(p->pUser);
+ }
+ p->xCmp = 0;
+ }
+ }
+ }
+ }
+
+ pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1);
+ if( pColl==0 ) return SQLITE_NOMEM_BKPT;
+ pColl->xCmp = xCompare;
+ pColl->pUser = pCtx;
+ pColl->xDel = xDel;
+ pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED));
+ sqlite3Error(db, SQLITE_OK);
+ return SQLITE_OK;
+}
+
+
+/*
+** This array defines hard upper bounds on limit values. The
+** initializer must be kept in sync with the SQLITE_LIMIT_*
+** #defines in sqlite3.h.
+*/
+static const int aHardLimit[] = {
+ SQLITE_MAX_LENGTH,
+ SQLITE_MAX_SQL_LENGTH,
+ SQLITE_MAX_COLUMN,
+ SQLITE_MAX_EXPR_DEPTH,
+ SQLITE_MAX_COMPOUND_SELECT,
+ SQLITE_MAX_VDBE_OP,
+ SQLITE_MAX_FUNCTION_ARG,
+ SQLITE_MAX_ATTACHED,
+ SQLITE_MAX_LIKE_PATTERN_LENGTH,
+ SQLITE_MAX_VARIABLE_NUMBER, /* IMP: R-38091-32352 */
+ SQLITE_MAX_TRIGGER_DEPTH,
+ SQLITE_MAX_WORKER_THREADS,
+};
+
+/*
+** Make sure the hard limits are set to reasonable values
+*/
+#if SQLITE_MAX_LENGTH<100
+# error SQLITE_MAX_LENGTH must be at least 100
+#endif
+#if SQLITE_MAX_SQL_LENGTH<100
+# error SQLITE_MAX_SQL_LENGTH must be at least 100
+#endif
+#if SQLITE_MAX_SQL_LENGTH>SQLITE_MAX_LENGTH
+# error SQLITE_MAX_SQL_LENGTH must not be greater than SQLITE_MAX_LENGTH
+#endif
+#if SQLITE_MAX_COMPOUND_SELECT<2
+# error SQLITE_MAX_COMPOUND_SELECT must be at least 2
+#endif
+#if SQLITE_MAX_VDBE_OP<40
+# error SQLITE_MAX_VDBE_OP must be at least 40
+#endif
+#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>127
+# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 127
+#endif
+#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125
+# error SQLITE_MAX_ATTACHED must be between 0 and 125
+#endif
+#if SQLITE_MAX_LIKE_PATTERN_LENGTH<1
+# error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1
+#endif
+#if SQLITE_MAX_COLUMN>32767
+# error SQLITE_MAX_COLUMN must not exceed 32767
+#endif
+#if SQLITE_MAX_TRIGGER_DEPTH<1
+# error SQLITE_MAX_TRIGGER_DEPTH must be at least 1
+#endif
+#if SQLITE_MAX_WORKER_THREADS<0 || SQLITE_MAX_WORKER_THREADS>50
+# error SQLITE_MAX_WORKER_THREADS must be between 0 and 50
+#endif
+
+
+/*
+** Change the value of a limit. Report the old value.
+** If an invalid limit index is supplied, report -1.
+** Make no changes but still report the old value if the
+** new limit is negative.
+**
+** A new lower limit does not shrink existing constructs.
+** It merely prevents new constructs that exceed the limit
+** from forming.
+*/
+SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){
+ int oldLimit;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return -1;
+ }
+#endif
+
+ /* EVIDENCE-OF: R-30189-54097 For each limit category SQLITE_LIMIT_NAME
+ ** there is a hard upper bound set at compile-time by a C preprocessor
+ ** macro called SQLITE_MAX_NAME. (The "_LIMIT_" in the name is changed to
+ ** "_MAX_".)
+ */
+ assert( aHardLimit[SQLITE_LIMIT_LENGTH]==SQLITE_MAX_LENGTH );
+ assert( aHardLimit[SQLITE_LIMIT_SQL_LENGTH]==SQLITE_MAX_SQL_LENGTH );
+ assert( aHardLimit[SQLITE_LIMIT_COLUMN]==SQLITE_MAX_COLUMN );
+ assert( aHardLimit[SQLITE_LIMIT_EXPR_DEPTH]==SQLITE_MAX_EXPR_DEPTH );
+ assert( aHardLimit[SQLITE_LIMIT_COMPOUND_SELECT]==SQLITE_MAX_COMPOUND_SELECT);
+ assert( aHardLimit[SQLITE_LIMIT_VDBE_OP]==SQLITE_MAX_VDBE_OP );
+ assert( aHardLimit[SQLITE_LIMIT_FUNCTION_ARG]==SQLITE_MAX_FUNCTION_ARG );
+ assert( aHardLimit[SQLITE_LIMIT_ATTACHED]==SQLITE_MAX_ATTACHED );
+ assert( aHardLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]==
+ SQLITE_MAX_LIKE_PATTERN_LENGTH );
+ assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER);
+ assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH );
+ assert( aHardLimit[SQLITE_LIMIT_WORKER_THREADS]==SQLITE_MAX_WORKER_THREADS );
+ assert( SQLITE_LIMIT_WORKER_THREADS==(SQLITE_N_LIMIT-1) );
+
+
+ if( limitId<0 || limitId>=SQLITE_N_LIMIT ){
+ return -1;
+ }
+ oldLimit = db->aLimit[limitId];
+ if( newLimit>=0 ){ /* IMP: R-52476-28732 */
+ if( newLimit>aHardLimit[limitId] ){
+ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */
+ }
+ db->aLimit[limitId] = newLimit;
+ }
+ return oldLimit; /* IMP: R-53341-35419 */
+}
+
+/*
+** This function is used to parse both URIs and non-URI filenames passed by the
+** user to API functions sqlite3_open() or sqlite3_open_v2(), and for database
+** URIs specified as part of ATTACH statements.
+**
+** The first argument to this function is the name of the VFS to use (or
+** a NULL to signify the default VFS) if the URI does not contain a "vfs=xxx"
+** query parameter. The second argument contains the URI (or non-URI filename)
+** itself. When this function is called the *pFlags variable should contain
+** the default flags to open the database handle with. The value stored in
+** *pFlags may be updated before returning if the URI filename contains
+** "cache=xxx" or "mode=xxx" query parameters.
+**
+** If successful, SQLITE_OK is returned. In this case *ppVfs is set to point to
+** the VFS that should be used to open the database file. *pzFile is set to
+** point to a buffer containing the name of the file to open. The value
+** stored in *pzFile is a database name acceptable to sqlite3_uri_parameter()
+** and is in the same format as names created using sqlite3_create_filename().
+** The caller must invoke sqlite3_free_filename() (not sqlite3_free()!) on
+** the value returned in *pzFile to avoid a memory leak.
+**
+** If an error occurs, then an SQLite error code is returned and *pzErrMsg
+** may be set to point to a buffer containing an English language error
+** message. It is the responsibility of the caller to eventually release
+** this buffer by calling sqlite3_free().
+*/
+SQLITE_PRIVATE int sqlite3ParseUri(
+ const char *zDefaultVfs, /* VFS to use if no "vfs=xxx" query option */
+ const char *zUri, /* Nul-terminated URI to parse */
+ unsigned int *pFlags, /* IN/OUT: SQLITE_OPEN_XXX flags */
+ sqlite3_vfs **ppVfs, /* OUT: VFS to use */
+ char **pzFile, /* OUT: Filename component of URI */
+ char **pzErrMsg /* OUT: Error message (if rc!=SQLITE_OK) */
+){
+ int rc = SQLITE_OK;
+ unsigned int flags = *pFlags;
+ const char *zVfs = zDefaultVfs;
+ char *zFile;
+ char c;
+ int nUri = sqlite3Strlen30(zUri);
+
+ assert( *pzErrMsg==0 );
+
+ if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */
+ || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */
+ && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */
+ ){
+ char *zOpt;
+ int eState; /* Parser state when parsing URI */
+ int iIn; /* Input character index */
+ int iOut = 0; /* Output character index */
+ u64 nByte = nUri+8; /* Bytes of space to allocate */
+
+ /* Make sure the SQLITE_OPEN_URI flag is set to indicate to the VFS xOpen
+ ** method that there may be extra parameters following the file-name. */
+ flags |= SQLITE_OPEN_URI;
+
+ for(iIn=0; iIn<nUri; iIn++) nByte += (zUri[iIn]=='&');
+ zFile = sqlite3_malloc64(nByte);
+ if( !zFile ) return SQLITE_NOMEM_BKPT;
+
+ memset(zFile, 0, 4); /* 4-byte of 0x00 is the start of DB name marker */
+ zFile += 4;
+
+ iIn = 5;
+#ifdef SQLITE_ALLOW_URI_AUTHORITY
+ if( strncmp(zUri+5, "///", 3)==0 ){
+ iIn = 7;
+ /* The following condition causes URIs with five leading / characters
+ ** like file://///host/path to be converted into UNCs like //host/path.
+ ** The correct URI for that UNC has only two or four leading / characters
+ ** file://host/path or file:////host/path. But 5 leading slashes is a
+ ** common error, we are told, so we handle it as a special case. */
+ if( strncmp(zUri+7, "///", 3)==0 ){ iIn++; }
+ }else if( strncmp(zUri+5, "//localhost/", 12)==0 ){
+ iIn = 16;
+ }
+#else
+ /* Discard the scheme and authority segments of the URI. */
+ if( zUri[5]=='/' && zUri[6]=='/' ){
+ iIn = 7;
+ while( zUri[iIn] && zUri[iIn]!='/' ) iIn++;
+ if( iIn!=7 && (iIn!=16 || memcmp("localhost", &zUri[7], 9)) ){
+ *pzErrMsg = sqlite3_mprintf("invalid uri authority: %.*s",
+ iIn-7, &zUri[7]);
+ rc = SQLITE_ERROR;
+ goto parse_uri_out;
+ }
+ }
+#endif
+
+ /* Copy the filename and any query parameters into the zFile buffer.
+ ** Decode %HH escape codes along the way.
+ **
+ ** Within this loop, variable eState may be set to 0, 1 or 2, depending
+ ** on the parsing context. As follows:
+ **
+ ** 0: Parsing file-name.
+ ** 1: Parsing name section of a name=value query parameter.
+ ** 2: Parsing value section of a name=value query parameter.
+ */
+ eState = 0;
+ while( (c = zUri[iIn])!=0 && c!='#' ){
+ iIn++;
+ if( c=='%'
+ && sqlite3Isxdigit(zUri[iIn])
+ && sqlite3Isxdigit(zUri[iIn+1])
+ ){
+ int octet = (sqlite3HexToInt(zUri[iIn++]) << 4);
+ octet += sqlite3HexToInt(zUri[iIn++]);
+
+ assert( octet>=0 && octet<256 );
+ if( octet==0 ){
+#ifndef SQLITE_ENABLE_URI_00_ERROR
+ /* This branch is taken when "%00" appears within the URI. In this
+ ** case we ignore all text in the remainder of the path, name or
+ ** value currently being parsed. So ignore the current character
+ ** and skip to the next "?", "=" or "&", as appropriate. */
+ while( (c = zUri[iIn])!=0 && c!='#'
+ && (eState!=0 || c!='?')
+ && (eState!=1 || (c!='=' && c!='&'))
+ && (eState!=2 || c!='&')
+ ){
+ iIn++;
+ }
+ continue;
+#else
+ /* If ENABLE_URI_00_ERROR is defined, "%00" in a URI is an error. */
+ *pzErrMsg = sqlite3_mprintf("unexpected %%00 in uri");
+ rc = SQLITE_ERROR;
+ goto parse_uri_out;
+#endif
+ }
+ c = octet;
+ }else if( eState==1 && (c=='&' || c=='=') ){
+ if( zFile[iOut-1]==0 ){
+ /* An empty option name. Ignore this option altogether. */
+ while( zUri[iIn] && zUri[iIn]!='#' && zUri[iIn-1]!='&' ) iIn++;
+ continue;
+ }
+ if( c=='&' ){
+ zFile[iOut++] = '\0';
+ }else{
+ eState = 2;
+ }
+ c = 0;
+ }else if( (eState==0 && c=='?') || (eState==2 && c=='&') ){
+ c = 0;
+ eState = 1;
+ }
+ zFile[iOut++] = c;
+ }
+ if( eState==1 ) zFile[iOut++] = '\0';
+ memset(zFile+iOut, 0, 4); /* end-of-options + empty journal filenames */
+
+ /* Check if there were any options specified that should be interpreted
+ ** here. Options that are interpreted here include "vfs" and those that
+ ** correspond to flags that may be passed to the sqlite3_open_v2()
+ ** method. */
+ zOpt = &zFile[sqlite3Strlen30(zFile)+1];
+ while( zOpt[0] ){
+ int nOpt = sqlite3Strlen30(zOpt);
+ char *zVal = &zOpt[nOpt+1];
+ int nVal = sqlite3Strlen30(zVal);
+
+ if( nOpt==3 && memcmp("vfs", zOpt, 3)==0 ){
+ zVfs = zVal;
+ }else{
+ struct OpenMode {
+ const char *z;
+ int mode;
+ } *aMode = 0;
+ char *zModeType = 0;
+ int mask = 0;
+ int limit = 0;
+
+ if( nOpt==5 && memcmp("cache", zOpt, 5)==0 ){
+ static struct OpenMode aCacheMode[] = {
+ { "shared", SQLITE_OPEN_SHAREDCACHE },
+ { "private", SQLITE_OPEN_PRIVATECACHE },
+ { 0, 0 }
+ };
+
+ mask = SQLITE_OPEN_SHAREDCACHE|SQLITE_OPEN_PRIVATECACHE;
+ aMode = aCacheMode;
+ limit = mask;
+ zModeType = "cache";
+ }
+ if( nOpt==4 && memcmp("mode", zOpt, 4)==0 ){
+ static struct OpenMode aOpenMode[] = {
+ { "ro", SQLITE_OPEN_READONLY },
+ { "rw", SQLITE_OPEN_READWRITE },
+ { "rwc", SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE },
+ { "memory", SQLITE_OPEN_MEMORY },
+ { 0, 0 }
+ };
+
+ mask = SQLITE_OPEN_READONLY | SQLITE_OPEN_READWRITE
+ | SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY;
+ aMode = aOpenMode;
+ limit = mask & flags;
+ zModeType = "access";
+ }
+
+ if( aMode ){
+ int i;
+ int mode = 0;
+ for(i=0; aMode[i].z; i++){
+ const char *z = aMode[i].z;
+ if( nVal==sqlite3Strlen30(z) && 0==memcmp(zVal, z, nVal) ){
+ mode = aMode[i].mode;
+ break;
+ }
+ }
+ if( mode==0 ){
+ *pzErrMsg = sqlite3_mprintf("no such %s mode: %s", zModeType, zVal);
+ rc = SQLITE_ERROR;
+ goto parse_uri_out;
+ }
+ if( (mode & ~SQLITE_OPEN_MEMORY)>limit ){
+ *pzErrMsg = sqlite3_mprintf("%s mode not allowed: %s",
+ zModeType, zVal);
+ rc = SQLITE_PERM;
+ goto parse_uri_out;
+ }
+ flags = (flags & ~mask) | mode;
+ }
+ }
+
+ zOpt = &zVal[nVal+1];
+ }
+
+ }else{
+ zFile = sqlite3_malloc64(nUri+8);
+ if( !zFile ) return SQLITE_NOMEM_BKPT;
+ memset(zFile, 0, 4);
+ zFile += 4;
+ if( nUri ){
+ memcpy(zFile, zUri, nUri);
+ }
+ memset(zFile+nUri, 0, 4);
+ flags &= ~SQLITE_OPEN_URI;
+ }
+
+ *ppVfs = sqlite3_vfs_find(zVfs);
+ if( *ppVfs==0 ){
+ *pzErrMsg = sqlite3_mprintf("no such vfs: %s", zVfs);
+ rc = SQLITE_ERROR;
+ }
+ parse_uri_out:
+ if( rc!=SQLITE_OK ){
+ sqlite3_free_filename(zFile);
+ zFile = 0;
+ }
+ *pFlags = flags;
+ *pzFile = zFile;
+ return rc;
+}
+
+/*
+** This routine does the core work of extracting URI parameters from a
+** database filename for the sqlite3_uri_parameter() interface.
+*/
+static const char *uriParameter(const char *zFilename, const char *zParam){
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ while( zFilename[0] ){
+ int x = strcmp(zFilename, zParam);
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ if( x==0 ) return zFilename;
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ }
+ return 0;
+}
+
+
+
+/*
+** This routine does the work of opening a database on behalf of
+** sqlite3_open() and sqlite3_open16(). The database filename "zFilename"
+** is UTF-8 encoded.
+*/
+static int openDatabase(
+ const char *zFilename, /* Database filename UTF-8 encoded */
+ sqlite3 **ppDb, /* OUT: Returned database handle */
+ unsigned int flags, /* Operational flags */
+ const char *zVfs /* Name of the VFS to use */
+){
+ sqlite3 *db; /* Store allocated handle here */
+ int rc; /* Return code */
+ int isThreadsafe; /* True for threadsafe connections */
+ char *zOpen = 0; /* Filename argument to pass to BtreeOpen() */
+ char *zErrMsg = 0; /* Error message from sqlite3ParseUri() */
+ int i; /* Loop counter */
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( ppDb==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ *ppDb = 0;
+#ifndef SQLITE_OMIT_AUTOINIT
+ rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+
+ if( sqlite3GlobalConfig.bCoreMutex==0 ){
+ isThreadsafe = 0;
+ }else if( flags & SQLITE_OPEN_NOMUTEX ){
+ isThreadsafe = 0;
+ }else if( flags & SQLITE_OPEN_FULLMUTEX ){
+ isThreadsafe = 1;
+ }else{
+ isThreadsafe = sqlite3GlobalConfig.bFullMutex;
+ }
+
+ if( flags & SQLITE_OPEN_PRIVATECACHE ){
+ flags &= ~SQLITE_OPEN_SHAREDCACHE;
+ }else if( sqlite3GlobalConfig.sharedCacheEnabled ){
+ flags |= SQLITE_OPEN_SHAREDCACHE;
+ }
+
+ /* Remove harmful bits from the flags parameter
+ **
+ ** The SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX flags were
+ ** dealt with in the previous code block. Besides these, the only
+ ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY,
+ ** SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_SHAREDCACHE,
+ ** SQLITE_OPEN_PRIVATECACHE, and some reserved bits. Silently mask
+ ** off all other flags.
+ */
+ flags &= ~( SQLITE_OPEN_DELETEONCLOSE |
+ SQLITE_OPEN_EXCLUSIVE |
+ SQLITE_OPEN_MAIN_DB |
+ SQLITE_OPEN_TEMP_DB |
+ SQLITE_OPEN_TRANSIENT_DB |
+ SQLITE_OPEN_MAIN_JOURNAL |
+ SQLITE_OPEN_TEMP_JOURNAL |
+ SQLITE_OPEN_SUBJOURNAL |
+ SQLITE_OPEN_SUPER_JOURNAL |
+ SQLITE_OPEN_NOMUTEX |
+ SQLITE_OPEN_FULLMUTEX |
+ SQLITE_OPEN_WAL
+ );
+
+ /* Allocate the sqlite data structure */
+ db = sqlite3MallocZero( sizeof(sqlite3) );
+ if( db==0 ) goto opendb_out;
+ if( isThreadsafe
+#ifdef SQLITE_ENABLE_MULTITHREADED_CHECKS
+ || sqlite3GlobalConfig.bCoreMutex
+#endif
+ ){
+ db->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE);
+ if( db->mutex==0 ){
+ sqlite3_free(db);
+ db = 0;
+ goto opendb_out;
+ }
+ if( isThreadsafe==0 ){
+ sqlite3MutexWarnOnContention(db->mutex);
+ }
+ }
+ sqlite3_mutex_enter(db->mutex);
+ db->errMask = 0xff;
+ db->nDb = 2;
+ db->magic = SQLITE_MAGIC_BUSY;
+ db->aDb = db->aDbStatic;
+ db->lookaside.bDisable = 1;
+ db->lookaside.sz = 0;
+
+ assert( sizeof(db->aLimit)==sizeof(aHardLimit) );
+ memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit));
+ db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS;
+ db->autoCommit = 1;
+ db->nextAutovac = -1;
+ db->szMmap = sqlite3GlobalConfig.szMmap;
+ db->nextPagesize = 0;
+ db->nMaxSorterMmap = 0x7FFFFFFF;
+ db->flags |= SQLITE_ShortColNames
+ | SQLITE_EnableTrigger
+ | SQLITE_EnableView
+ | SQLITE_CacheSpill
+#if !defined(SQLITE_TRUSTED_SCHEMA) || SQLITE_TRUSTED_SCHEMA+0!=0
+ | SQLITE_TrustedSchema
+#endif
+/* The SQLITE_DQS compile-time option determines the default settings
+** for SQLITE_DBCONFIG_DQS_DDL and SQLITE_DBCONFIG_DQS_DML.
+**
+** SQLITE_DQS SQLITE_DBCONFIG_DQS_DDL SQLITE_DBCONFIG_DQS_DML
+** ---------- ----------------------- -----------------------
+** undefined on on
+** 3 on on
+** 2 on off
+** 1 off on
+** 0 off off
+**
+** Legacy behavior is 3 (double-quoted string literals are allowed anywhere)
+** and so that is the default. But developers are encouranged to use
+** -DSQLITE_DQS=0 (best) or -DSQLITE_DQS=1 (second choice) if possible.
+*/
+#if !defined(SQLITE_DQS)
+# define SQLITE_DQS 3
+#endif
+#if (SQLITE_DQS&1)==1
+ | SQLITE_DqsDML
+#endif
+#if (SQLITE_DQS&2)==2
+ | SQLITE_DqsDDL
+#endif
+
+#if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX
+ | SQLITE_AutoIndex
+#endif
+#if SQLITE_DEFAULT_CKPTFULLFSYNC
+ | SQLITE_CkptFullFSync
+#endif
+#if SQLITE_DEFAULT_FILE_FORMAT<4
+ | SQLITE_LegacyFileFmt
+#endif
+#ifdef SQLITE_ENABLE_LOAD_EXTENSION
+ | SQLITE_LoadExtension
+#endif
+#if SQLITE_DEFAULT_RECURSIVE_TRIGGERS
+ | SQLITE_RecTriggers
+#endif
+#if defined(SQLITE_DEFAULT_FOREIGN_KEYS) && SQLITE_DEFAULT_FOREIGN_KEYS
+ | SQLITE_ForeignKeys
+#endif
+#if defined(SQLITE_REVERSE_UNORDERED_SELECTS)
+ | SQLITE_ReverseOrder
+#endif
+#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
+ | SQLITE_CellSizeCk
+#endif
+#if defined(SQLITE_ENABLE_FTS3_TOKENIZER)
+ | SQLITE_Fts3Tokenizer
+#endif
+#if defined(SQLITE_ENABLE_QPSG)
+ | SQLITE_EnableQPSG
+#endif
+#if defined(SQLITE_DEFAULT_DEFENSIVE)
+ | SQLITE_Defensive
+#endif
+#if defined(SQLITE_DEFAULT_LEGACY_ALTER_TABLE)
+ | SQLITE_LegacyAlter
+#endif
+ ;
+ sqlite3HashInit(&db->aCollSeq);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3HashInit(&db->aModule);
+#endif
+
+ /* Add the default collation sequence BINARY. BINARY works for both UTF-8
+ ** and UTF-16, so add a version for each to avoid any unnecessary
+ ** conversions. The only error that can occur here is a malloc() failure.
+ **
+ ** EVIDENCE-OF: R-52786-44878 SQLite defines three built-in collating
+ ** functions:
+ */
+ createCollation(db, sqlite3StrBINARY, SQLITE_UTF8, 0, binCollFunc, 0);
+ createCollation(db, sqlite3StrBINARY, SQLITE_UTF16BE, 0, binCollFunc, 0);
+ createCollation(db, sqlite3StrBINARY, SQLITE_UTF16LE, 0, binCollFunc, 0);
+ createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0);
+ createCollation(db, "RTRIM", SQLITE_UTF8, 0, rtrimCollFunc, 0);
+ if( db->mallocFailed ){
+ goto opendb_out;
+ }
+
+ /* Parse the filename/URI argument
+ **
+ ** Only allow sensible combinations of bits in the flags argument.
+ ** Throw an error if any non-sense combination is used. If we
+ ** do not block illegal combinations here, it could trigger
+ ** assert() statements in deeper layers. Sensible combinations
+ ** are:
+ **
+ ** 1: SQLITE_OPEN_READONLY
+ ** 2: SQLITE_OPEN_READWRITE
+ ** 6: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE
+ */
+ db->openFlags = flags;
+ assert( SQLITE_OPEN_READONLY == 0x01 );
+ assert( SQLITE_OPEN_READWRITE == 0x02 );
+ assert( SQLITE_OPEN_CREATE == 0x04 );
+ testcase( (1<<(flags&7))==0x02 ); /* READONLY */
+ testcase( (1<<(flags&7))==0x04 ); /* READWRITE */
+ testcase( (1<<(flags&7))==0x40 ); /* READWRITE | CREATE */
+ if( ((1<<(flags&7)) & 0x46)==0 ){
+ rc = SQLITE_MISUSE_BKPT; /* IMP: R-18321-05872 */
+ }else{
+ rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg);
+ }
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_NOMEM ) sqlite3OomFault(db);
+ sqlite3ErrorWithMsg(db, rc, zErrMsg ? "%s" : 0, zErrMsg);
+ sqlite3_free(zErrMsg);
+ goto opendb_out;
+ }
+
+ /* Open the backend database driver */
+ rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0,
+ flags | SQLITE_OPEN_MAIN_DB);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_IOERR_NOMEM ){
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ sqlite3Error(db, rc);
+ goto opendb_out;
+ }
+ sqlite3BtreeEnter(db->aDb[0].pBt);
+ db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt);
+ if( !db->mallocFailed ){
+ sqlite3SetTextEncoding(db, SCHEMA_ENC(db));
+ }
+ sqlite3BtreeLeave(db->aDb[0].pBt);
+ db->aDb[1].pSchema = sqlite3SchemaGet(db, 0);
+
+ /* The default safety_level for the main database is FULL; for the temp
+ ** database it is OFF. This matches the pager layer defaults.
+ */
+ db->aDb[0].zDbSName = "main";
+ db->aDb[0].safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
+ db->aDb[1].zDbSName = "temp";
+ db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF;
+
+ db->magic = SQLITE_MAGIC_OPEN;
+ if( db->mallocFailed ){
+ goto opendb_out;
+ }
+
+ /* Register all built-in functions, but do not attempt to read the
+ ** database schema yet. This is delayed until the first time the database
+ ** is accessed.
+ */
+ sqlite3Error(db, SQLITE_OK);
+ sqlite3RegisterPerConnectionBuiltinFunctions(db);
+ rc = sqlite3_errcode(db);
+
+
+ /* Load compiled-in extensions */
+ for(i=0; rc==SQLITE_OK && i<ArraySize(sqlite3BuiltinExtensions); i++){
+ rc = sqlite3BuiltinExtensions[i](db);
+ }
+
+ /* Load automatic extensions - extensions that have been registered
+ ** using the sqlite3_automatic_extension() API.
+ */
+ if( rc==SQLITE_OK ){
+ sqlite3AutoLoadExtensions(db);
+ rc = sqlite3_errcode(db);
+ if( rc!=SQLITE_OK ){
+ goto opendb_out;
+ }
+ }
+
+#ifdef SQLITE_ENABLE_INTERNAL_FUNCTIONS
+ /* Testing use only!!! The -DSQLITE_ENABLE_INTERNAL_FUNCTIONS=1 compile-time
+ ** option gives access to internal functions by default.
+ ** Testing use only!!! */
+ db->mDbFlags |= DBFLAG_InternalFunc;
+#endif
+
+ /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking
+ ** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking
+ ** mode. Doing nothing at all also makes NORMAL the default.
+ */
+#ifdef SQLITE_DEFAULT_LOCKING_MODE
+ db->dfltLockMode = SQLITE_DEFAULT_LOCKING_MODE;
+ sqlite3PagerLockingMode(sqlite3BtreePager(db->aDb[0].pBt),
+ SQLITE_DEFAULT_LOCKING_MODE);
+#endif
+
+ if( rc ) sqlite3Error(db, rc);
+
+ /* Enable the lookaside-malloc subsystem */
+ setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside,
+ sqlite3GlobalConfig.nLookaside);
+
+ sqlite3_wal_autocheckpoint(db, SQLITE_DEFAULT_WAL_AUTOCHECKPOINT);
+
+opendb_out:
+ if( db ){
+ assert( db->mutex!=0 || isThreadsafe==0
+ || sqlite3GlobalConfig.bFullMutex==0 );
+ sqlite3_mutex_leave(db->mutex);
+ }
+ rc = sqlite3_errcode(db);
+ assert( db!=0 || rc==SQLITE_NOMEM );
+ if( rc==SQLITE_NOMEM ){
+ sqlite3_close(db);
+ db = 0;
+ }else if( rc!=SQLITE_OK ){
+ db->magic = SQLITE_MAGIC_SICK;
+ }
+ *ppDb = db;
+#ifdef SQLITE_ENABLE_SQLLOG
+ if( sqlite3GlobalConfig.xSqllog ){
+ /* Opening a db handle. Fourth parameter is passed 0. */
+ void *pArg = sqlite3GlobalConfig.pSqllogArg;
+ sqlite3GlobalConfig.xSqllog(pArg, db, zFilename, 0);
+ }
+#endif
+ sqlite3_free_filename(zOpen);
+ return rc & 0xff;
+}
+
+
+/*
+** Open a new database handle.
+*/
+SQLITE_API int sqlite3_open(
+ const char *zFilename,
+ sqlite3 **ppDb
+){
+ return openDatabase(zFilename, ppDb,
+ SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0);
+}
+SQLITE_API int sqlite3_open_v2(
+ const char *filename, /* Database filename (UTF-8) */
+ sqlite3 **ppDb, /* OUT: SQLite db handle */
+ int flags, /* Flags */
+ const char *zVfs /* Name of VFS module to use */
+){
+ return openDatabase(filename, ppDb, (unsigned int)flags, zVfs);
+}
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Open a new database handle.
+*/
+SQLITE_API int sqlite3_open16(
+ const void *zFilename,
+ sqlite3 **ppDb
+){
+ char const *zFilename8; /* zFilename encoded in UTF-8 instead of UTF-16 */
+ sqlite3_value *pVal;
+ int rc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( ppDb==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ *ppDb = 0;
+#ifndef SQLITE_OMIT_AUTOINIT
+ rc = sqlite3_initialize();
+ if( rc ) return rc;
+#endif
+ if( zFilename==0 ) zFilename = "\000\000";
+ pVal = sqlite3ValueNew(0);
+ sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC);
+ zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8);
+ if( zFilename8 ){
+ rc = openDatabase(zFilename8, ppDb,
+ SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0);
+ assert( *ppDb || rc==SQLITE_NOMEM );
+ if( rc==SQLITE_OK && !DbHasProperty(*ppDb, 0, DB_SchemaLoaded) ){
+ SCHEMA_ENC(*ppDb) = ENC(*ppDb) = SQLITE_UTF16NATIVE;
+ }
+ }else{
+ rc = SQLITE_NOMEM_BKPT;
+ }
+ sqlite3ValueFree(pVal);
+
+ return rc & 0xff;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Register a new collation sequence with the database handle db.
+*/
+SQLITE_API int sqlite3_create_collation(
+ sqlite3* db,
+ const char *zName,
+ int enc,
+ void* pCtx,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+){
+ return sqlite3_create_collation_v2(db, zName, enc, pCtx, xCompare, 0);
+}
+
+/*
+** Register a new collation sequence with the database handle db.
+*/
+SQLITE_API int sqlite3_create_collation_v2(
+ sqlite3* db,
+ const char *zName,
+ int enc,
+ void* pCtx,
+ int(*xCompare)(void*,int,const void*,int,const void*),
+ void(*xDel)(void*)
+){
+ int rc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ assert( !db->mallocFailed );
+ rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, xDel);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Register a new collation sequence with the database handle db.
+*/
+SQLITE_API int sqlite3_create_collation16(
+ sqlite3* db,
+ const void *zName,
+ int enc,
+ void* pCtx,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+){
+ int rc = SQLITE_OK;
+ char *zName8;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ assert( !db->mallocFailed );
+ zName8 = sqlite3Utf16to8(db, zName, -1, SQLITE_UTF16NATIVE);
+ if( zName8 ){
+ rc = createCollation(db, zName8, (u8)enc, pCtx, xCompare, 0);
+ sqlite3DbFree(db, zName8);
+ }
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+/*
+** Register a collation sequence factory callback with the database handle
+** db. Replace any previously installed collation sequence factory.
+*/
+SQLITE_API int sqlite3_collation_needed(
+ sqlite3 *db,
+ void *pCollNeededArg,
+ void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*)
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->xCollNeeded = xCollNeeded;
+ db->xCollNeeded16 = 0;
+ db->pCollNeededArg = pCollNeededArg;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+#ifndef SQLITE_OMIT_UTF16
+/*
+** Register a collation sequence factory callback with the database handle
+** db. Replace any previously installed collation sequence factory.
+*/
+SQLITE_API int sqlite3_collation_needed16(
+ sqlite3 *db,
+ void *pCollNeededArg,
+ void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*)
+){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->xCollNeeded = 0;
+ db->xCollNeeded16 = xCollNeeded16;
+ db->pCollNeededArg = pCollNeededArg;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+#endif /* SQLITE_OMIT_UTF16 */
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** This function is now an anachronism. It used to be used to recover from a
+** malloc() failure, but SQLite now does this automatically.
+*/
+SQLITE_API int sqlite3_global_recover(void){
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** Test to see whether or not the database connection is in autocommit
+** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on
+** by default. Autocommit is disabled by a BEGIN statement and reenabled
+** by the next COMMIT or ROLLBACK.
+*/
+SQLITE_API int sqlite3_get_autocommit(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return db->autoCommit;
+}
+
+/*
+** The following routines are substitutes for constants SQLITE_CORRUPT,
+** SQLITE_MISUSE, SQLITE_CANTOPEN, SQLITE_NOMEM and possibly other error
+** constants. They serve two purposes:
+**
+** 1. Serve as a convenient place to set a breakpoint in a debugger
+** to detect when version error conditions occurs.
+**
+** 2. Invoke sqlite3_log() to provide the source code location where
+** a low-level error is first detected.
+*/
+SQLITE_PRIVATE int sqlite3ReportError(int iErr, int lineno, const char *zType){
+ sqlite3_log(iErr, "%s at line %d of [%.10s]",
+ zType, lineno, 20+sqlite3_sourceid());
+ return iErr;
+}
+SQLITE_PRIVATE int sqlite3CorruptError(int lineno){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_CORRUPT, lineno, "database corruption");
+}
+SQLITE_PRIVATE int sqlite3MisuseError(int lineno){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_MISUSE, lineno, "misuse");
+}
+SQLITE_PRIVATE int sqlite3CantopenError(int lineno){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_CANTOPEN, lineno, "cannot open file");
+}
+#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_CORRUPT_PGNO)
+SQLITE_PRIVATE int sqlite3CorruptPgnoError(int lineno, Pgno pgno){
+ char zMsg[100];
+ sqlite3_snprintf(sizeof(zMsg), zMsg, "database corruption page %d", pgno);
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_CORRUPT, lineno, zMsg);
+}
+#endif
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3NomemError(int lineno){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_NOMEM, lineno, "OOM");
+}
+SQLITE_PRIVATE int sqlite3IoerrnomemError(int lineno){
+ testcase( sqlite3GlobalConfig.xLog!=0 );
+ return sqlite3ReportError(SQLITE_IOERR_NOMEM, lineno, "I/O OOM error");
+}
+#endif
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** This is a convenience routine that makes sure that all thread-specific
+** data for this thread has been deallocated.
+**
+** SQLite no longer uses thread-specific data so this routine is now a
+** no-op. It is retained for historical compatibility.
+*/
+SQLITE_API void sqlite3_thread_cleanup(void){
+}
+#endif
+
+/*
+** Return meta information about a specific column of a database table.
+** See comment in sqlite3.h (sqlite.h.in) for details.
+*/
+SQLITE_API int sqlite3_table_column_metadata(
+ sqlite3 *db, /* Connection handle */
+ const char *zDbName, /* Database name or NULL */
+ const char *zTableName, /* Table name */
+ const char *zColumnName, /* Column name */
+ char const **pzDataType, /* OUTPUT: Declared data type */
+ char const **pzCollSeq, /* OUTPUT: Collation sequence name */
+ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */
+ int *pPrimaryKey, /* OUTPUT: True if column part of PK */
+ int *pAutoinc /* OUTPUT: True if column is auto-increment */
+){
+ int rc;
+ char *zErrMsg = 0;
+ Table *pTab = 0;
+ Column *pCol = 0;
+ int iCol = 0;
+ char const *zDataType = 0;
+ char const *zCollSeq = 0;
+ int notnull = 0;
+ int primarykey = 0;
+ int autoinc = 0;
+
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) || zTableName==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+
+ /* Ensure the database schema has been loaded */
+ sqlite3_mutex_enter(db->mutex);
+ sqlite3BtreeEnterAll(db);
+ rc = sqlite3Init(db, &zErrMsg);
+ if( SQLITE_OK!=rc ){
+ goto error_out;
+ }
+
+ /* Locate the table in question */
+ pTab = sqlite3FindTable(db, zTableName, zDbName);
+ if( !pTab || pTab->pSelect ){
+ pTab = 0;
+ goto error_out;
+ }
+
+ /* Find the column for which info is requested */
+ if( zColumnName==0 ){
+ /* Query for existance of table only */
+ }else{
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ pCol = &pTab->aCol[iCol];
+ if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){
+ break;
+ }
+ }
+ if( iCol==pTab->nCol ){
+ if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){
+ iCol = pTab->iPKey;
+ pCol = iCol>=0 ? &pTab->aCol[iCol] : 0;
+ }else{
+ pTab = 0;
+ goto error_out;
+ }
+ }
+ }
+
+ /* The following block stores the meta information that will be returned
+ ** to the caller in local variables zDataType, zCollSeq, notnull, primarykey
+ ** and autoinc. At this point there are two possibilities:
+ **
+ ** 1. The specified column name was rowid", "oid" or "_rowid_"
+ ** and there is no explicitly declared IPK column.
+ **
+ ** 2. The table is not a view and the column name identified an
+ ** explicitly declared column. Copy meta information from *pCol.
+ */
+ if( pCol ){
+ zDataType = sqlite3ColumnType(pCol,0);
+ zCollSeq = pCol->zColl;
+ notnull = pCol->notNull!=0;
+ primarykey = (pCol->colFlags & COLFLAG_PRIMKEY)!=0;
+ autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0;
+ }else{
+ zDataType = "INTEGER";
+ primarykey = 1;
+ }
+ if( !zCollSeq ){
+ zCollSeq = sqlite3StrBINARY;
+ }
+
+error_out:
+ sqlite3BtreeLeaveAll(db);
+
+ /* Whether the function call succeeded or failed, set the output parameters
+ ** to whatever their local counterparts contain. If an error did occur,
+ ** this has the effect of zeroing all output parameters.
+ */
+ if( pzDataType ) *pzDataType = zDataType;
+ if( pzCollSeq ) *pzCollSeq = zCollSeq;
+ if( pNotNull ) *pNotNull = notnull;
+ if( pPrimaryKey ) *pPrimaryKey = primarykey;
+ if( pAutoinc ) *pAutoinc = autoinc;
+
+ if( SQLITE_OK==rc && !pTab ){
+ sqlite3DbFree(db, zErrMsg);
+ zErrMsg = sqlite3MPrintf(db, "no such table column: %s.%s", zTableName,
+ zColumnName);
+ rc = SQLITE_ERROR;
+ }
+ sqlite3ErrorWithMsg(db, rc, (zErrMsg?"%s":0), zErrMsg);
+ sqlite3DbFree(db, zErrMsg);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Sleep for a little while. Return the amount of time slept.
+*/
+SQLITE_API int sqlite3_sleep(int ms){
+ sqlite3_vfs *pVfs;
+ int rc;
+ pVfs = sqlite3_vfs_find(0);
+ if( pVfs==0 ) return 0;
+
+ /* This function works in milliseconds, but the underlying OsSleep()
+ ** API uses microseconds. Hence the 1000's.
+ */
+ rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000);
+ return rc;
+}
+
+/*
+** Enable or disable the extended result codes.
+*/
+SQLITE_API int sqlite3_extended_result_codes(sqlite3 *db, int onoff){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ db->errMask = onoff ? 0xffffffff : 0xff;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+/*
+** Invoke the xFileControl method on a particular database.
+*/
+SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){
+ int rc = SQLITE_ERROR;
+ Btree *pBtree;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ pBtree = sqlite3DbNameToBtree(db, zDbName);
+ if( pBtree ){
+ Pager *pPager;
+ sqlite3_file *fd;
+ sqlite3BtreeEnter(pBtree);
+ pPager = sqlite3BtreePager(pBtree);
+ assert( pPager!=0 );
+ fd = sqlite3PagerFile(pPager);
+ assert( fd!=0 );
+ if( op==SQLITE_FCNTL_FILE_POINTER ){
+ *(sqlite3_file**)pArg = fd;
+ rc = SQLITE_OK;
+ }else if( op==SQLITE_FCNTL_VFS_POINTER ){
+ *(sqlite3_vfs**)pArg = sqlite3PagerVfs(pPager);
+ rc = SQLITE_OK;
+ }else if( op==SQLITE_FCNTL_JOURNAL_POINTER ){
+ *(sqlite3_file**)pArg = sqlite3PagerJrnlFile(pPager);
+ rc = SQLITE_OK;
+ }else if( op==SQLITE_FCNTL_DATA_VERSION ){
+ *(unsigned int*)pArg = sqlite3PagerDataVersion(pPager);
+ rc = SQLITE_OK;
+ }else if( op==SQLITE_FCNTL_RESERVE_BYTES ){
+ int iNew = *(int*)pArg;
+ *(int*)pArg = sqlite3BtreeGetRequestedReserve(pBtree);
+ if( iNew>=0 && iNew<=255 ){
+ sqlite3BtreeSetPageSize(pBtree, 0, iNew, 0);
+ }
+ rc = SQLITE_OK;
+ }else{
+ rc = sqlite3OsFileControl(fd, op, pArg);
+ }
+ sqlite3BtreeLeave(pBtree);
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** Interface to the testing logic.
+*/
+SQLITE_API int sqlite3_test_control(int op, ...){
+ int rc = 0;
+#ifdef SQLITE_UNTESTABLE
+ UNUSED_PARAMETER(op);
+#else
+ va_list ap;
+ va_start(ap, op);
+ switch( op ){
+
+ /*
+ ** Save the current state of the PRNG.
+ */
+ case SQLITE_TESTCTRL_PRNG_SAVE: {
+ sqlite3PrngSaveState();
+ break;
+ }
+
+ /*
+ ** Restore the state of the PRNG to the last state saved using
+ ** PRNG_SAVE. If PRNG_SAVE has never before been called, then
+ ** this verb acts like PRNG_RESET.
+ */
+ case SQLITE_TESTCTRL_PRNG_RESTORE: {
+ sqlite3PrngRestoreState();
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_PRNG_SEED, int x, sqlite3 *db);
+ **
+ ** Control the seed for the pseudo-random number generator (PRNG) that
+ ** is built into SQLite. Cases:
+ **
+ ** x!=0 && db!=0 Seed the PRNG to the current value of the
+ ** schema cookie in the main database for db, or
+ ** x if the schema cookie is zero. This case
+ ** is convenient to use with database fuzzers
+ ** as it allows the fuzzer some control over the
+ ** the PRNG seed.
+ **
+ ** x!=0 && db==0 Seed the PRNG to the value of x.
+ **
+ ** x==0 && db==0 Revert to default behavior of using the
+ ** xRandomness method on the primary VFS.
+ **
+ ** This test-control also resets the PRNG so that the new seed will
+ ** be used for the next call to sqlite3_randomness().
+ */
+#ifndef SQLITE_OMIT_WSD
+ case SQLITE_TESTCTRL_PRNG_SEED: {
+ int x = va_arg(ap, int);
+ int y;
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ assert( db==0 || db->aDb[0].pSchema!=0 );
+ if( db && (y = db->aDb[0].pSchema->schema_cookie)!=0 ){ x = y; }
+ sqlite3Config.iPrngSeed = x;
+ sqlite3_randomness(0,0);
+ break;
+ }
+#endif
+
+ /*
+ ** sqlite3_test_control(BITVEC_TEST, size, program)
+ **
+ ** Run a test against a Bitvec object of size. The program argument
+ ** is an array of integers that defines the test. Return -1 on a
+ ** memory allocation error, 0 on success, or non-zero for an error.
+ ** See the sqlite3BitvecBuiltinTest() for additional information.
+ */
+ case SQLITE_TESTCTRL_BITVEC_TEST: {
+ int sz = va_arg(ap, int);
+ int *aProg = va_arg(ap, int*);
+ rc = sqlite3BitvecBuiltinTest(sz, aProg);
+ break;
+ }
+
+ /*
+ ** sqlite3_test_control(FAULT_INSTALL, xCallback)
+ **
+ ** Arrange to invoke xCallback() whenever sqlite3FaultSim() is called,
+ ** if xCallback is not NULL.
+ **
+ ** As a test of the fault simulator mechanism itself, sqlite3FaultSim(0)
+ ** is called immediately after installing the new callback and the return
+ ** value from sqlite3FaultSim(0) becomes the return from
+ ** sqlite3_test_control().
+ */
+ case SQLITE_TESTCTRL_FAULT_INSTALL: {
+ /* MSVC is picky about pulling func ptrs from va lists.
+ ** http://support.microsoft.com/kb/47961
+ ** sqlite3GlobalConfig.xTestCallback = va_arg(ap, int(*)(int));
+ */
+ typedef int(*TESTCALLBACKFUNC_t)(int);
+ sqlite3GlobalConfig.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t);
+ rc = sqlite3FaultSim(0);
+ break;
+ }
+
+ /*
+ ** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd)
+ **
+ ** Register hooks to call to indicate which malloc() failures
+ ** are benign.
+ */
+ case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: {
+ typedef void (*void_function)(void);
+ void_function xBenignBegin;
+ void_function xBenignEnd;
+ xBenignBegin = va_arg(ap, void_function);
+ xBenignEnd = va_arg(ap, void_function);
+ sqlite3BenignMallocHooks(xBenignBegin, xBenignEnd);
+ break;
+ }
+
+ /*
+ ** sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, unsigned int X)
+ **
+ ** Set the PENDING byte to the value in the argument, if X>0.
+ ** Make no changes if X==0. Return the value of the pending byte
+ ** as it existing before this routine was called.
+ **
+ ** IMPORTANT: Changing the PENDING byte from 0x40000000 results in
+ ** an incompatible database file format. Changing the PENDING byte
+ ** while any database connection is open results in undefined and
+ ** deleterious behavior.
+ */
+ case SQLITE_TESTCTRL_PENDING_BYTE: {
+ rc = PENDING_BYTE;
+#ifndef SQLITE_OMIT_WSD
+ {
+ unsigned int newVal = va_arg(ap, unsigned int);
+ if( newVal ) sqlite3PendingByte = newVal;
+ }
+#endif
+ break;
+ }
+
+ /*
+ ** sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, int X)
+ **
+ ** This action provides a run-time test to see whether or not
+ ** assert() was enabled at compile-time. If X is true and assert()
+ ** is enabled, then the return value is true. If X is true and
+ ** assert() is disabled, then the return value is zero. If X is
+ ** false and assert() is enabled, then the assertion fires and the
+ ** process aborts. If X is false and assert() is disabled, then the
+ ** return value is zero.
+ */
+ case SQLITE_TESTCTRL_ASSERT: {
+ volatile int x = 0;
+ assert( /*side-effects-ok*/ (x = va_arg(ap,int))!=0 );
+ rc = x;
+ break;
+ }
+
+
+ /*
+ ** sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, int X)
+ **
+ ** This action provides a run-time test to see how the ALWAYS and
+ ** NEVER macros were defined at compile-time.
+ **
+ ** The return value is ALWAYS(X) if X is true, or 0 if X is false.
+ **
+ ** The recommended test is X==2. If the return value is 2, that means
+ ** ALWAYS() and NEVER() are both no-op pass-through macros, which is the
+ ** default setting. If the return value is 1, then ALWAYS() is either
+ ** hard-coded to true or else it asserts if its argument is false.
+ ** The first behavior (hard-coded to true) is the case if
+ ** SQLITE_TESTCTRL_ASSERT shows that assert() is disabled and the second
+ ** behavior (assert if the argument to ALWAYS() is false) is the case if
+ ** SQLITE_TESTCTRL_ASSERT shows that assert() is enabled.
+ **
+ ** The run-time test procedure might look something like this:
+ **
+ ** if( sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, 2)==2 ){
+ ** // ALWAYS() and NEVER() are no-op pass-through macros
+ ** }else if( sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, 1) ){
+ ** // ALWAYS(x) asserts that x is true. NEVER(x) asserts x is false.
+ ** }else{
+ ** // ALWAYS(x) is a constant 1. NEVER(x) is a constant 0.
+ ** }
+ */
+ case SQLITE_TESTCTRL_ALWAYS: {
+ int x = va_arg(ap,int);
+ rc = x ? ALWAYS(x) : 0;
+ break;
+ }
+
+ /*
+ ** sqlite3_test_control(SQLITE_TESTCTRL_BYTEORDER);
+ **
+ ** The integer returned reveals the byte-order of the computer on which
+ ** SQLite is running:
+ **
+ ** 1 big-endian, determined at run-time
+ ** 10 little-endian, determined at run-time
+ ** 432101 big-endian, determined at compile-time
+ ** 123410 little-endian, determined at compile-time
+ */
+ case SQLITE_TESTCTRL_BYTEORDER: {
+ rc = SQLITE_BYTEORDER*100 + SQLITE_LITTLEENDIAN*10 + SQLITE_BIGENDIAN;
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS, sqlite3 *db, int N)
+ **
+ ** Enable or disable various optimizations for testing purposes. The
+ ** argument N is a bitmask of optimizations to be disabled. For normal
+ ** operation N should be 0. The idea is that a test program (like the
+ ** SQL Logic Test or SLT test module) can run the same SQL multiple times
+ ** with various optimizations disabled to verify that the same answer
+ ** is obtained in every case.
+ */
+ case SQLITE_TESTCTRL_OPTIMIZATIONS: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff);
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, int onoff);
+ **
+ ** If parameter onoff is non-zero, subsequent calls to localtime()
+ ** and its variants fail. If onoff is zero, undo this setting.
+ */
+ case SQLITE_TESTCTRL_LOCALTIME_FAULT: {
+ sqlite3GlobalConfig.bLocaltimeFault = va_arg(ap, int);
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_INTERNAL_FUNCTIONS, sqlite3*);
+ **
+ ** Toggle the ability to use internal functions on or off for
+ ** the database connection given in the argument.
+ */
+ case SQLITE_TESTCTRL_INTERNAL_FUNCTIONS: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ db->mDbFlags ^= DBFLAG_InternalFunc;
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_NEVER_CORRUPT, int);
+ **
+ ** Set or clear a flag that indicates that the database file is always well-
+ ** formed and never corrupt. This flag is clear by default, indicating that
+ ** database files might have arbitrary corruption. Setting the flag during
+ ** testing causes certain assert() statements in the code to be activated
+ ** that demonstrat invariants on well-formed database files.
+ */
+ case SQLITE_TESTCTRL_NEVER_CORRUPT: {
+ sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int);
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS, int);
+ **
+ ** Set or clear a flag that causes SQLite to verify that type, name,
+ ** and tbl_name fields of the sqlite_schema table. This is normally
+ ** on, but it is sometimes useful to turn it off for testing.
+ **
+ ** 2020-07-22: Disabling EXTRA_SCHEMA_CHECKS also disables the
+ ** verification of rootpage numbers when parsing the schema. This
+ ** is useful to make it easier to reach strange internal error states
+ ** during testing. The EXTRA_SCHEMA_CHECKS setting is always enabled
+ ** in production.
+ */
+ case SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS: {
+ sqlite3GlobalConfig.bExtraSchemaChecks = va_arg(ap, int);
+ break;
+ }
+
+ /* Set the threshold at which OP_Once counters reset back to zero.
+ ** By default this is 0x7ffffffe (over 2 billion), but that value is
+ ** too big to test in a reasonable amount of time, so this control is
+ ** provided to set a small and easily reachable reset value.
+ */
+ case SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD: {
+ sqlite3GlobalConfig.iOnceResetThreshold = va_arg(ap, int);
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr);
+ **
+ ** Set the VDBE coverage callback function to xCallback with context
+ ** pointer ptr.
+ */
+ case SQLITE_TESTCTRL_VDBE_COVERAGE: {
+#ifdef SQLITE_VDBE_COVERAGE
+ typedef void (*branch_callback)(void*,unsigned int,
+ unsigned char,unsigned char);
+ sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback);
+ sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*);
+#endif
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, nMax); */
+ case SQLITE_TESTCTRL_SORTER_MMAP: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ db->nMaxSorterMmap = va_arg(ap, int);
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_ISINIT);
+ **
+ ** Return SQLITE_OK if SQLite has been initialized and SQLITE_ERROR if
+ ** not.
+ */
+ case SQLITE_TESTCTRL_ISINIT: {
+ if( sqlite3GlobalConfig.isInit==0 ) rc = SQLITE_ERROR;
+ break;
+ }
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, onOff, tnum);
+ **
+ ** This test control is used to create imposter tables. "db" is a pointer
+ ** to the database connection. dbName is the database name (ex: "main" or
+ ** "temp") which will receive the imposter. "onOff" turns imposter mode on
+ ** or off. "tnum" is the root page of the b-tree to which the imposter
+ ** table should connect.
+ **
+ ** Enable imposter mode only when the schema has already been parsed. Then
+ ** run a single CREATE TABLE statement to construct the imposter table in
+ ** the parsed schema. Then turn imposter mode back off again.
+ **
+ ** If onOff==0 and tnum>0 then reset the schema for all databases, causing
+ ** the schema to be reparsed the next time it is needed. This has the
+ ** effect of erasing all imposter tables.
+ */
+ case SQLITE_TESTCTRL_IMPOSTER: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ sqlite3_mutex_enter(db->mutex);
+ db->init.iDb = sqlite3FindDbName(db, va_arg(ap,const char*));
+ db->init.busy = db->init.imposterTable = va_arg(ap,int);
+ db->init.newTnum = va_arg(ap,int);
+ if( db->init.busy==0 && db->init.newTnum>0 ){
+ sqlite3ResetAllSchemasOfConnection(db);
+ }
+ sqlite3_mutex_leave(db->mutex);
+ break;
+ }
+
+#if defined(YYCOVERAGE)
+ /* sqlite3_test_control(SQLITE_TESTCTRL_PARSER_COVERAGE, FILE *out)
+ **
+ ** This test control (only available when SQLite is compiled with
+ ** -DYYCOVERAGE) writes a report onto "out" that shows all
+ ** state/lookahead combinations in the parser state machine
+ ** which are never exercised. If any state is missed, make the
+ ** return code SQLITE_ERROR.
+ */
+ case SQLITE_TESTCTRL_PARSER_COVERAGE: {
+ FILE *out = va_arg(ap, FILE*);
+ if( sqlite3ParserCoverage(out) ) rc = SQLITE_ERROR;
+ break;
+ }
+#endif /* defined(YYCOVERAGE) */
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_RESULT_INTREAL, sqlite3_context*);
+ **
+ ** This test-control causes the most recent sqlite3_result_int64() value
+ ** to be interpreted as a MEM_IntReal instead of as an MEM_Int. Normally,
+ ** MEM_IntReal values only arise during an INSERT operation of integer
+ ** values into a REAL column, so they can be challenging to test. This
+ ** test-control enables us to write an intreal() SQL function that can
+ ** inject an intreal() value at arbitrary places in an SQL statement,
+ ** for testing purposes.
+ */
+ case SQLITE_TESTCTRL_RESULT_INTREAL: {
+ sqlite3_context *pCtx = va_arg(ap, sqlite3_context*);
+ sqlite3ResultIntReal(pCtx);
+ break;
+ }
+ }
+ va_end(ap);
+#endif /* SQLITE_UNTESTABLE */
+ return rc;
+}
+
+/*
+** The Pager stores the Database filename, Journal filename, and WAL filename
+** consecutively in memory, in that order. The database filename is prefixed
+** by four zero bytes. Locate the start of the database filename by searching
+** backwards for the first byte following four consecutive zero bytes.
+**
+** This only works if the filename passed in was obtained from the Pager.
+*/
+static const char *databaseName(const char *zName){
+ while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){
+ zName--;
+ }
+ return zName;
+}
+
+/*
+** Append text z[] to the end of p[]. Return a pointer to the first
+** character after then zero terminator on the new text in p[].
+*/
+static char *appendText(char *p, const char *z){
+ size_t n = strlen(z);
+ memcpy(p, z, n+1);
+ return p+n+1;
+}
+
+/*
+** Allocate memory to hold names for a database, journal file, WAL file,
+** and query parameters. The pointer returned is valid for use by
+** sqlite3_filename_database() and sqlite3_uri_parameter() and related
+** functions.
+**
+** Memory layout must be compatible with that generated by the pager
+** and expected by sqlite3_uri_parameter() and databaseName().
+*/
+SQLITE_API char *sqlite3_create_filename(
+ const char *zDatabase,
+ const char *zJournal,
+ const char *zWal,
+ int nParam,
+ const char **azParam
+){
+ sqlite3_int64 nByte;
+ int i;
+ char *pResult, *p;
+ nByte = strlen(zDatabase) + strlen(zJournal) + strlen(zWal) + 10;
+ for(i=0; i<nParam*2; i++){
+ nByte += strlen(azParam[i])+1;
+ }
+ pResult = p = sqlite3_malloc64( nByte );
+ if( p==0 ) return 0;
+ memset(p, 0, 4);
+ p += 4;
+ p = appendText(p, zDatabase);
+ for(i=0; i<nParam*2; i++){
+ p = appendText(p, azParam[i]);
+ }
+ *(p++) = 0;
+ p = appendText(p, zJournal);
+ p = appendText(p, zWal);
+ *(p++) = 0;
+ *(p++) = 0;
+ assert( (sqlite3_int64)(p - pResult)==nByte );
+ return pResult + 4;
+}
+
+/*
+** Free memory obtained from sqlite3_create_filename(). It is a severe
+** error to call this routine with any parameter other than a pointer
+** previously obtained from sqlite3_create_filename() or a NULL pointer.
+*/
+SQLITE_API void sqlite3_free_filename(char *p){
+ if( p==0 ) return;
+ p = (char*)databaseName(p);
+ sqlite3_free(p - 4);
+}
+
+
+/*
+** This is a utility routine, useful to VFS implementations, that checks
+** to see if a database file was a URI that contained a specific query
+** parameter, and if so obtains the value of the query parameter.
+**
+** The zFilename argument is the filename pointer passed into the xOpen()
+** method of a VFS implementation. The zParam argument is the name of the
+** query parameter we seek. This routine returns the value of the zParam
+** parameter if it exists. If the parameter does not exist, this routine
+** returns a NULL pointer.
+*/
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam){
+ if( zFilename==0 || zParam==0 ) return 0;
+ zFilename = databaseName(zFilename);
+ return uriParameter(zFilename, zParam);
+}
+
+/*
+** Return a pointer to the name of Nth query parameter of the filename.
+*/
+SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N){
+ if( zFilename==0 || N<0 ) return 0;
+ zFilename = databaseName(zFilename);
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ while( zFilename[0] && (N--)>0 ){
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ }
+ return zFilename[0] ? zFilename : 0;
+}
+
+/*
+** Return a boolean value for a query parameter.
+*/
+SQLITE_API int sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){
+ const char *z = sqlite3_uri_parameter(zFilename, zParam);
+ bDflt = bDflt!=0;
+ return z ? sqlite3GetBoolean(z, bDflt) : bDflt;
+}
+
+/*
+** Return a 64-bit integer value for a query parameter.
+*/
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(
+ const char *zFilename, /* Filename as passed to xOpen */
+ const char *zParam, /* URI parameter sought */
+ sqlite3_int64 bDflt /* return if parameter is missing */
+){
+ const char *z = sqlite3_uri_parameter(zFilename, zParam);
+ sqlite3_int64 v;
+ if( z && sqlite3DecOrHexToI64(z, &v)==0 ){
+ bDflt = v;
+ }
+ return bDflt;
+}
+
+/*
+** Translate a filename that was handed to a VFS routine into the corresponding
+** database, journal, or WAL file.
+**
+** It is an error to pass this routine a filename string that was not
+** passed into the VFS from the SQLite core. Doing so is similar to
+** passing free() a pointer that was not obtained from malloc() - it is
+** an error that we cannot easily detect but that will likely cause memory
+** corruption.
+*/
+SQLITE_API const char *sqlite3_filename_database(const char *zFilename){
+ return databaseName(zFilename);
+}
+SQLITE_API const char *sqlite3_filename_journal(const char *zFilename){
+ zFilename = databaseName(zFilename);
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ while( zFilename[0] ){
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ }
+ return zFilename + 1;
+}
+SQLITE_API const char *sqlite3_filename_wal(const char *zFilename){
+#ifdef SQLITE_OMIT_WAL
+ return 0;
+#else
+ zFilename = sqlite3_filename_journal(zFilename);
+ zFilename += sqlite3Strlen30(zFilename) + 1;
+ return zFilename;
+#endif
+}
+
+/*
+** Return the Btree pointer identified by zDbName. Return NULL if not found.
+*/
+SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
+ int iDb = zDbName ? sqlite3FindDbName(db, zDbName) : 0;
+ return iDb<0 ? 0 : db->aDb[iDb].pBt;
+}
+
+/*
+** Return the filename of the database associated with a database
+** connection.
+*/
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){
+ Btree *pBt;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ pBt = sqlite3DbNameToBtree(db, zDbName);
+ return pBt ? sqlite3BtreeGetFilename(pBt) : 0;
+}
+
+/*
+** Return 1 if database is read-only or 0 if read/write. Return -1 if
+** no such database exists.
+*/
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){
+ Btree *pBt;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return -1;
+ }
+#endif
+ pBt = sqlite3DbNameToBtree(db, zDbName);
+ return pBt ? sqlite3BtreeIsReadonly(pBt) : -1;
+}
+
+#ifdef SQLITE_ENABLE_SNAPSHOT
+/*
+** Obtain a snapshot handle for the snapshot of database zDb currently
+** being read by handle db.
+*/
+SQLITE_API int sqlite3_snapshot_get(
+ sqlite3 *db,
+ const char *zDb,
+ sqlite3_snapshot **ppSnapshot
+){
+ int rc = SQLITE_ERROR;
+#ifndef SQLITE_OMIT_WAL
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+
+ if( db->autoCommit==0 ){
+ int iDb = sqlite3FindDbName(db, zDb);
+ if( iDb==0 || iDb>1 ){
+ Btree *pBt = db->aDb[iDb].pBt;
+ if( 0==sqlite3BtreeIsInTrans(pBt) ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot);
+ }
+ }
+ }
+ }
+
+ sqlite3_mutex_leave(db->mutex);
+#endif /* SQLITE_OMIT_WAL */
+ return rc;
+}
+
+/*
+** Open a read-transaction on the snapshot idendified by pSnapshot.
+*/
+SQLITE_API int sqlite3_snapshot_open(
+ sqlite3 *db,
+ const char *zDb,
+ sqlite3_snapshot *pSnapshot
+){
+ int rc = SQLITE_ERROR;
+#ifndef SQLITE_OMIT_WAL
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ sqlite3_mutex_enter(db->mutex);
+ if( db->autoCommit==0 ){
+ int iDb;
+ iDb = sqlite3FindDbName(db, zDb);
+ if( iDb==0 || iDb>1 ){
+ Btree *pBt = db->aDb[iDb].pBt;
+ if( sqlite3BtreeIsInTrans(pBt)==0 ){
+ Pager *pPager = sqlite3BtreePager(pBt);
+ int bUnlock = 0;
+ if( sqlite3BtreeIsInReadTrans(pBt) ){
+ if( db->nVdbeActive==0 ){
+ rc = sqlite3PagerSnapshotCheck(pPager, pSnapshot);
+ if( rc==SQLITE_OK ){
+ bUnlock = 1;
+ rc = sqlite3BtreeCommit(pBt);
+ }
+ }
+ }else{
+ rc = SQLITE_OK;
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSnapshotOpen(pPager, pSnapshot);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ sqlite3PagerSnapshotOpen(pPager, 0);
+ }
+ if( bUnlock ){
+ sqlite3PagerSnapshotUnlock(pPager);
+ }
+ }
+ }
+ }
+
+ sqlite3_mutex_leave(db->mutex);
+#endif /* SQLITE_OMIT_WAL */
+ return rc;
+}
+
+/*
+** Recover as many snapshots as possible from the wal file associated with
+** schema zDb of database db.
+*/
+SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){
+ int rc = SQLITE_ERROR;
+ int iDb;
+#ifndef SQLITE_OMIT_WAL
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+
+ sqlite3_mutex_enter(db->mutex);
+ iDb = sqlite3FindDbName(db, zDb);
+ if( iDb==0 || iDb>1 ){
+ Btree *pBt = db->aDb[iDb].pBt;
+ if( 0==sqlite3BtreeIsInReadTrans(pBt) ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt));
+ sqlite3BtreeCommit(pBt);
+ }
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+#endif /* SQLITE_OMIT_WAL */
+ return rc;
+}
+
+/*
+** Free a snapshot handle obtained from sqlite3_snapshot_get().
+*/
+SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){
+ sqlite3_free(pSnapshot);
+}
+#endif /* SQLITE_ENABLE_SNAPSHOT */
+
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+/*
+** Given the name of a compile-time option, return true if that option
+** was used and false if not.
+**
+** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix
+** is not required for a match.
+*/
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
+ int i, n;
+ int nOpt;
+ const char **azCompileOpt;
+
+#if SQLITE_ENABLE_API_ARMOR
+ if( zOptName==0 ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+
+ azCompileOpt = sqlite3CompileOptions(&nOpt);
+
+ if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7;
+ n = sqlite3Strlen30(zOptName);
+
+ /* Since nOpt is normally in single digits, a linear search is
+ ** adequate. No need for a binary search. */
+ for(i=0; i<nOpt; i++){
+ if( sqlite3StrNICmp(zOptName, azCompileOpt[i], n)==0
+ && sqlite3IsIdChar((unsigned char)azCompileOpt[i][n])==0
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+** Return the N-th compile-time option string. If N is out of range,
+** return a NULL pointer.
+*/
+SQLITE_API const char *sqlite3_compileoption_get(int N){
+ int nOpt;
+ const char **azCompileOpt;
+ azCompileOpt = sqlite3CompileOptions(&nOpt);
+ if( N>=0 && N<nOpt ){
+ return azCompileOpt[N];
+ }
+ return 0;
+}
+#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
+
+/************** End of main.c ************************************************/
+/************** Begin file notify.c ******************************************/
+/*
+** 2009 March 3
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains the implementation of the sqlite3_unlock_notify()
+** API method and its associated functionality.
+*/
+/* #include "sqliteInt.h" */
+/* #include "btreeInt.h" */
+
+/* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */
+#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
+
+/*
+** Public interfaces:
+**
+** sqlite3ConnectionBlocked()
+** sqlite3ConnectionUnlocked()
+** sqlite3ConnectionClosed()
+** sqlite3_unlock_notify()
+*/
+
+#define assertMutexHeld() \
+ assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN)) )
+
+/*
+** Head of a linked list of all sqlite3 objects created by this process
+** for which either sqlite3.pBlockingConnection or sqlite3.pUnlockConnection
+** is not NULL. This variable may only accessed while the STATIC_MAIN
+** mutex is held.
+*/
+static sqlite3 *SQLITE_WSD sqlite3BlockedList = 0;
+
+#ifndef NDEBUG
+/*
+** This function is a complex assert() that verifies the following
+** properties of the blocked connections list:
+**
+** 1) Each entry in the list has a non-NULL value for either
+** pUnlockConnection or pBlockingConnection, or both.
+**
+** 2) All entries in the list that share a common value for
+** xUnlockNotify are grouped together.
+**
+** 3) If the argument db is not NULL, then none of the entries in the
+** blocked connections list have pUnlockConnection or pBlockingConnection
+** set to db. This is used when closing connection db.
+*/
+static void checkListProperties(sqlite3 *db){
+ sqlite3 *p;
+ for(p=sqlite3BlockedList; p; p=p->pNextBlocked){
+ int seen = 0;
+ sqlite3 *p2;
+
+ /* Verify property (1) */
+ assert( p->pUnlockConnection || p->pBlockingConnection );
+
+ /* Verify property (2) */
+ for(p2=sqlite3BlockedList; p2!=p; p2=p2->pNextBlocked){
+ if( p2->xUnlockNotify==p->xUnlockNotify ) seen = 1;
+ assert( p2->xUnlockNotify==p->xUnlockNotify || !seen );
+ assert( db==0 || p->pUnlockConnection!=db );
+ assert( db==0 || p->pBlockingConnection!=db );
+ }
+ }
+}
+#else
+# define checkListProperties(x)
+#endif
+
+/*
+** Remove connection db from the blocked connections list. If connection
+** db is not currently a part of the list, this function is a no-op.
+*/
+static void removeFromBlockedList(sqlite3 *db){
+ sqlite3 **pp;
+ assertMutexHeld();
+ for(pp=&sqlite3BlockedList; *pp; pp = &(*pp)->pNextBlocked){
+ if( *pp==db ){
+ *pp = (*pp)->pNextBlocked;
+ break;
+ }
+ }
+}
+
+/*
+** Add connection db to the blocked connections list. It is assumed
+** that it is not already a part of the list.
+*/
+static void addToBlockedList(sqlite3 *db){
+ sqlite3 **pp;
+ assertMutexHeld();
+ for(
+ pp=&sqlite3BlockedList;
+ *pp && (*pp)->xUnlockNotify!=db->xUnlockNotify;
+ pp=&(*pp)->pNextBlocked
+ );
+ db->pNextBlocked = *pp;
+ *pp = db;
+}
+
+/*
+** Obtain the STATIC_MAIN mutex.
+*/
+static void enterMutex(void){
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN));
+ checkListProperties(0);
+}
+
+/*
+** Release the STATIC_MAIN mutex.
+*/
+static void leaveMutex(void){
+ assertMutexHeld();
+ checkListProperties(0);
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MAIN));
+}
+
+/*
+** Register an unlock-notify callback.
+**
+** This is called after connection "db" has attempted some operation
+** but has received an SQLITE_LOCKED error because another connection
+** (call it pOther) in the same process was busy using the same shared
+** cache. pOther is found by looking at db->pBlockingConnection.
+**
+** If there is no blocking connection, the callback is invoked immediately,
+** before this routine returns.
+**
+** If pOther is already blocked on db, then report SQLITE_LOCKED, to indicate
+** a deadlock.
+**
+** Otherwise, make arrangements to invoke xNotify when pOther drops
+** its locks.
+**
+** Each call to this routine overrides any prior callbacks registered
+** on the same "db". If xNotify==0 then any prior callbacks are immediately
+** cancelled.
+*/
+SQLITE_API int sqlite3_unlock_notify(
+ sqlite3 *db,
+ void (*xNotify)(void **, int),
+ void *pArg
+){
+ int rc = SQLITE_OK;
+
+ sqlite3_mutex_enter(db->mutex);
+ enterMutex();
+
+ if( xNotify==0 ){
+ removeFromBlockedList(db);
+ db->pBlockingConnection = 0;
+ db->pUnlockConnection = 0;
+ db->xUnlockNotify = 0;
+ db->pUnlockArg = 0;
+ }else if( 0==db->pBlockingConnection ){
+ /* The blocking transaction has been concluded. Or there never was a
+ ** blocking transaction. In either case, invoke the notify callback
+ ** immediately.
+ */
+ xNotify(&pArg, 1);
+ }else{
+ sqlite3 *p;
+
+ for(p=db->pBlockingConnection; p && p!=db; p=p->pUnlockConnection){}
+ if( p ){
+ rc = SQLITE_LOCKED; /* Deadlock detected. */
+ }else{
+ db->pUnlockConnection = db->pBlockingConnection;
+ db->xUnlockNotify = xNotify;
+ db->pUnlockArg = pArg;
+ removeFromBlockedList(db);
+ addToBlockedList(db);
+ }
+ }
+
+ leaveMutex();
+ assert( !db->mallocFailed );
+ sqlite3ErrorWithMsg(db, rc, (rc?"database is deadlocked":0));
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
+}
+
+/*
+** This function is called while stepping or preparing a statement
+** associated with connection db. The operation will return SQLITE_LOCKED
+** to the user because it requires a lock that will not be available
+** until connection pBlocker concludes its current transaction.
+*/
+SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *db, sqlite3 *pBlocker){
+ enterMutex();
+ if( db->pBlockingConnection==0 && db->pUnlockConnection==0 ){
+ addToBlockedList(db);
+ }
+ db->pBlockingConnection = pBlocker;
+ leaveMutex();
+}
+
+/*
+** This function is called when
+** the transaction opened by database db has just finished. Locks held
+** by database connection db have been released.
+**
+** This function loops through each entry in the blocked connections
+** list and does the following:
+**
+** 1) If the sqlite3.pBlockingConnection member of a list entry is
+** set to db, then set pBlockingConnection=0.
+**
+** 2) If the sqlite3.pUnlockConnection member of a list entry is
+** set to db, then invoke the configured unlock-notify callback and
+** set pUnlockConnection=0.
+**
+** 3) If the two steps above mean that pBlockingConnection==0 and
+** pUnlockConnection==0, remove the entry from the blocked connections
+** list.
+*/
+SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db){
+ void (*xUnlockNotify)(void **, int) = 0; /* Unlock-notify cb to invoke */
+ int nArg = 0; /* Number of entries in aArg[] */
+ sqlite3 **pp; /* Iterator variable */
+ void **aArg; /* Arguments to the unlock callback */
+ void **aDyn = 0; /* Dynamically allocated space for aArg[] */
+ void *aStatic[16]; /* Starter space for aArg[]. No malloc required */
+
+ aArg = aStatic;
+ enterMutex(); /* Enter STATIC_MAIN mutex */
+
+ /* This loop runs once for each entry in the blocked-connections list. */
+ for(pp=&sqlite3BlockedList; *pp; /* no-op */ ){
+ sqlite3 *p = *pp;
+
+ /* Step 1. */
+ if( p->pBlockingConnection==db ){
+ p->pBlockingConnection = 0;
+ }
+
+ /* Step 2. */
+ if( p->pUnlockConnection==db ){
+ assert( p->xUnlockNotify );
+ if( p->xUnlockNotify!=xUnlockNotify && nArg!=0 ){
+ xUnlockNotify(aArg, nArg);
+ nArg = 0;
+ }
+
+ sqlite3BeginBenignMalloc();
+ assert( aArg==aDyn || (aDyn==0 && aArg==aStatic) );
+ assert( nArg<=(int)ArraySize(aStatic) || aArg==aDyn );
+ if( (!aDyn && nArg==(int)ArraySize(aStatic))
+ || (aDyn && nArg==(int)(sqlite3MallocSize(aDyn)/sizeof(void*)))
+ ){
+ /* The aArg[] array needs to grow. */
+ void **pNew = (void **)sqlite3Malloc(nArg*sizeof(void *)*2);
+ if( pNew ){
+ memcpy(pNew, aArg, nArg*sizeof(void *));
+ sqlite3_free(aDyn);
+ aDyn = aArg = pNew;
+ }else{
+ /* This occurs when the array of context pointers that need to
+ ** be passed to the unlock-notify callback is larger than the
+ ** aStatic[] array allocated on the stack and the attempt to
+ ** allocate a larger array from the heap has failed.
+ **
+ ** This is a difficult situation to handle. Returning an error
+ ** code to the caller is insufficient, as even if an error code
+ ** is returned the transaction on connection db will still be
+ ** closed and the unlock-notify callbacks on blocked connections
+ ** will go unissued. This might cause the application to wait
+ ** indefinitely for an unlock-notify callback that will never
+ ** arrive.
+ **
+ ** Instead, invoke the unlock-notify callback with the context
+ ** array already accumulated. We can then clear the array and
+ ** begin accumulating any further context pointers without
+ ** requiring any dynamic allocation. This is sub-optimal because
+ ** it means that instead of one callback with a large array of
+ ** context pointers the application will receive two or more
+ ** callbacks with smaller arrays of context pointers, which will
+ ** reduce the applications ability to prioritize multiple
+ ** connections. But it is the best that can be done under the
+ ** circumstances.
+ */
+ xUnlockNotify(aArg, nArg);
+ nArg = 0;
+ }
+ }
+ sqlite3EndBenignMalloc();
+
+ aArg[nArg++] = p->pUnlockArg;
+ xUnlockNotify = p->xUnlockNotify;
+ p->pUnlockConnection = 0;
+ p->xUnlockNotify = 0;
+ p->pUnlockArg = 0;
+ }
+
+ /* Step 3. */
+ if( p->pBlockingConnection==0 && p->pUnlockConnection==0 ){
+ /* Remove connection p from the blocked connections list. */
+ *pp = p->pNextBlocked;
+ p->pNextBlocked = 0;
+ }else{
+ pp = &p->pNextBlocked;
+ }
+ }
+
+ if( nArg!=0 ){
+ xUnlockNotify(aArg, nArg);
+ }
+ sqlite3_free(aDyn);
+ leaveMutex(); /* Leave STATIC_MAIN mutex */
+}
+
+/*
+** This is called when the database connection passed as an argument is
+** being closed. The connection is removed from the blocked list.
+*/
+SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){
+ sqlite3ConnectionUnlocked(db);
+ enterMutex();
+ removeFromBlockedList(db);
+ checkListProperties(db);
+ leaveMutex();
+}
+#endif
+
+/************** End of notify.c **********************************************/
+/************** Begin file fts3.c ********************************************/
+/*
+** 2006 Oct 10
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This is an SQLite module implementing full-text search.
+*/
+
+/*
+** The code in this file is only compiled if:
+**
+** * The FTS3 module is being built as an extension
+** (in which case SQLITE_CORE is not defined), or
+**
+** * The FTS3 module is being built into the core of
+** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
+*/
+
+/* The full-text index is stored in a series of b+tree (-like)
+** structures called segments which map terms to doclists. The
+** structures are like b+trees in layout, but are constructed from the
+** bottom up in optimal fashion and are not updatable. Since trees
+** are built from the bottom up, things will be described from the
+** bottom up.
+**
+**
+**** Varints ****
+** The basic unit of encoding is a variable-length integer called a
+** varint. We encode variable-length integers in little-endian order
+** using seven bits * per byte as follows:
+**
+** KEY:
+** A = 0xxxxxxx 7 bits of data and one flag bit
+** B = 1xxxxxxx 7 bits of data and one flag bit
+**
+** 7 bits - A
+** 14 bits - BA
+** 21 bits - BBA
+** and so on.
+**
+** This is similar in concept to how sqlite encodes "varints" but
+** the encoding is not the same. SQLite varints are big-endian
+** are are limited to 9 bytes in length whereas FTS3 varints are
+** little-endian and can be up to 10 bytes in length (in theory).
+**
+** Example encodings:
+**
+** 1: 0x01
+** 127: 0x7f
+** 128: 0x81 0x00
+**
+**
+**** Document lists ****
+** A doclist (document list) holds a docid-sorted list of hits for a
+** given term. Doclists hold docids and associated token positions.
+** A docid is the unique integer identifier for a single document.
+** A position is the index of a word within the document. The first
+** word of the document has a position of 0.
+**
+** FTS3 used to optionally store character offsets using a compile-time
+** option. But that functionality is no longer supported.
+**
+** A doclist is stored like this:
+**
+** array {
+** varint docid; (delta from previous doclist)
+** array { (position list for column 0)
+** varint position; (2 more than the delta from previous position)
+** }
+** array {
+** varint POS_COLUMN; (marks start of position list for new column)
+** varint column; (index of new column)
+** array {
+** varint position; (2 more than the delta from previous position)
+** }
+** }
+** varint POS_END; (marks end of positions for this document.
+** }
+**
+** Here, array { X } means zero or more occurrences of X, adjacent in
+** memory. A "position" is an index of a token in the token stream
+** generated by the tokenizer. Note that POS_END and POS_COLUMN occur
+** in the same logical place as the position element, and act as sentinals
+** ending a position list array. POS_END is 0. POS_COLUMN is 1.
+** The positions numbers are not stored literally but rather as two more
+** than the difference from the prior position, or the just the position plus
+** 2 for the first position. Example:
+**
+** label: A B C D E F G H I J K
+** value: 123 5 9 1 1 14 35 0 234 72 0
+**
+** The 123 value is the first docid. For column zero in this document
+** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1
+** at D signals the start of a new column; the 1 at E indicates that the
+** new column is column number 1. There are two positions at 12 and 45
+** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The
+** 234 at I is the delta to next docid (357). It has one position 70
+** (72-2) and then terminates with the 0 at K.
+**
+** A "position-list" is the list of positions for multiple columns for
+** a single docid. A "column-list" is the set of positions for a single
+** column. Hence, a position-list consists of one or more column-lists,
+** a document record consists of a docid followed by a position-list and
+** a doclist consists of one or more document records.
+**
+** A bare doclist omits the position information, becoming an
+** array of varint-encoded docids.
+**
+**** Segment leaf nodes ****
+** Segment leaf nodes store terms and doclists, ordered by term. Leaf
+** nodes are written using LeafWriter, and read using LeafReader (to
+** iterate through a single leaf node's data) and LeavesReader (to
+** iterate through a segment's entire leaf layer). Leaf nodes have
+** the format:
+**
+** varint iHeight; (height from leaf level, always 0)
+** varint nTerm; (length of first term)
+** char pTerm[nTerm]; (content of first term)
+** varint nDoclist; (length of term's associated doclist)
+** char pDoclist[nDoclist]; (content of doclist)
+** array {
+** (further terms are delta-encoded)
+** varint nPrefix; (length of prefix shared with previous term)
+** varint nSuffix; (length of unshared suffix)
+** char pTermSuffix[nSuffix];(unshared suffix of next term)
+** varint nDoclist; (length of term's associated doclist)
+** char pDoclist[nDoclist]; (content of doclist)
+** }
+**
+** Here, array { X } means zero or more occurrences of X, adjacent in
+** memory.
+**
+** Leaf nodes are broken into blocks which are stored contiguously in
+** the %_segments table in sorted order. This means that when the end
+** of a node is reached, the next term is in the node with the next
+** greater node id.
+**
+** New data is spilled to a new leaf node when the current node
+** exceeds LEAF_MAX bytes (default 2048). New data which itself is
+** larger than STANDALONE_MIN (default 1024) is placed in a standalone
+** node (a leaf node with a single term and doclist). The goal of
+** these settings is to pack together groups of small doclists while
+** making it efficient to directly access large doclists. The
+** assumption is that large doclists represent terms which are more
+** likely to be query targets.
+**
+** TODO(shess) It may be useful for blocking decisions to be more
+** dynamic. For instance, it may make more sense to have a 2.5k leaf
+** node rather than splitting into 2k and .5k nodes. My intuition is
+** that this might extend through 2x or 4x the pagesize.
+**
+**
+**** Segment interior nodes ****
+** Segment interior nodes store blockids for subtree nodes and terms
+** to describe what data is stored by the each subtree. Interior
+** nodes are written using InteriorWriter, and read using
+** InteriorReader. InteriorWriters are created as needed when
+** SegmentWriter creates new leaf nodes, or when an interior node
+** itself grows too big and must be split. The format of interior
+** nodes:
+**
+** varint iHeight; (height from leaf level, always >0)
+** varint iBlockid; (block id of node's leftmost subtree)
+** optional {
+** varint nTerm; (length of first term)
+** char pTerm[nTerm]; (content of first term)
+** array {
+** (further terms are delta-encoded)
+** varint nPrefix; (length of shared prefix with previous term)
+** varint nSuffix; (length of unshared suffix)
+** char pTermSuffix[nSuffix]; (unshared suffix of next term)
+** }
+** }
+**
+** Here, optional { X } means an optional element, while array { X }
+** means zero or more occurrences of X, adjacent in memory.
+**
+** An interior node encodes n terms separating n+1 subtrees. The
+** subtree blocks are contiguous, so only the first subtree's blockid
+** is encoded. The subtree at iBlockid will contain all terms less
+** than the first term encoded (or all terms if no term is encoded).
+** Otherwise, for terms greater than or equal to pTerm[i] but less
+** than pTerm[i+1], the subtree for that term will be rooted at
+** iBlockid+i. Interior nodes only store enough term data to
+** distinguish adjacent children (if the rightmost term of the left
+** child is "something", and the leftmost term of the right child is
+** "wicked", only "w" is stored).
+**
+** New data is spilled to a new interior node at the same height when
+** the current node exceeds INTERIOR_MAX bytes (default 2048).
+** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing
+** interior nodes and making the tree too skinny. The interior nodes
+** at a given height are naturally tracked by interior nodes at
+** height+1, and so on.
+**
+**
+**** Segment directory ****
+** The segment directory in table %_segdir stores meta-information for
+** merging and deleting segments, and also the root node of the
+** segment's tree.
+**
+** The root node is the top node of the segment's tree after encoding
+** the entire segment, restricted to ROOT_MAX bytes (default 1024).
+** This could be either a leaf node or an interior node. If the top
+** node requires more than ROOT_MAX bytes, it is flushed to %_segments
+** and a new root interior node is generated (which should always fit
+** within ROOT_MAX because it only needs space for 2 varints, the
+** height and the blockid of the previous root).
+**
+** The meta-information in the segment directory is:
+** level - segment level (see below)
+** idx - index within level
+** - (level,idx uniquely identify a segment)
+** start_block - first leaf node
+** leaves_end_block - last leaf node
+** end_block - last block (including interior nodes)
+** root - contents of root node
+**
+** If the root node is a leaf node, then start_block,
+** leaves_end_block, and end_block are all 0.
+**
+**
+**** Segment merging ****
+** To amortize update costs, segments are grouped into levels and
+** merged in batches. Each increase in level represents exponentially
+** more documents.
+**
+** New documents (actually, document updates) are tokenized and
+** written individually (using LeafWriter) to a level 0 segment, with
+** incrementing idx. When idx reaches MERGE_COUNT (default 16), all
+** level 0 segments are merged into a single level 1 segment. Level 1
+** is populated like level 0, and eventually MERGE_COUNT level 1
+** segments are merged to a single level 2 segment (representing
+** MERGE_COUNT^2 updates), and so on.
+**
+** A segment merge traverses all segments at a given level in
+** parallel, performing a straightforward sorted merge. Since segment
+** leaf nodes are written in to the %_segments table in order, this
+** merge traverses the underlying sqlite disk structures efficiently.
+** After the merge, all segment blocks from the merged level are
+** deleted.
+**
+** MERGE_COUNT controls how often we merge segments. 16 seems to be
+** somewhat of a sweet spot for insertion performance. 32 and 64 show
+** very similar performance numbers to 16 on insertion, though they're
+** a tiny bit slower (perhaps due to more overhead in merge-time
+** sorting). 8 is about 20% slower than 16, 4 about 50% slower than
+** 16, 2 about 66% slower than 16.
+**
+** At query time, high MERGE_COUNT increases the number of segments
+** which need to be scanned and merged. For instance, with 100k docs
+** inserted:
+**
+** MERGE_COUNT segments
+** 16 25
+** 8 12
+** 4 10
+** 2 6
+**
+** This appears to have only a moderate impact on queries for very
+** frequent terms (which are somewhat dominated by segment merge
+** costs), and infrequent and non-existent terms still seem to be fast
+** even with many segments.
+**
+** TODO(shess) That said, it would be nice to have a better query-side
+** argument for MERGE_COUNT of 16. Also, it is possible/likely that
+** optimizations to things like doclist merging will swing the sweet
+** spot around.
+**
+**
+**
+**** Handling of deletions and updates ****
+** Since we're using a segmented structure, with no docid-oriented
+** index into the term index, we clearly cannot simply update the term
+** index when a document is deleted or updated. For deletions, we
+** write an empty doclist (varint(docid) varint(POS_END)), for updates
+** we simply write the new doclist. Segment merges overwrite older
+** data for a particular docid with newer data, so deletes or updates
+** will eventually overtake the earlier data and knock it out. The
+** query logic likewise merges doclists so that newer data knocks out
+** older data.
+*/
+
+/************** Include fts3Int.h in the middle of fts3.c ********************/
+/************** Begin file fts3Int.h *****************************************/
+/*
+** 2009 Nov 12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+#ifndef _FTSINT_H
+#define _FTSINT_H
+
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+
+/* FTS3/FTS4 require virtual tables */
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+# undef SQLITE_ENABLE_FTS3
+# undef SQLITE_ENABLE_FTS4
+#endif
+
+/*
+** FTS4 is really an extension for FTS3. It is enabled using the
+** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all
+** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3.
+*/
+#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3)
+# define SQLITE_ENABLE_FTS3
+#endif
+
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* If not building as part of the core, include sqlite3ext.h. */
+#ifndef SQLITE_CORE
+/* # include "sqlite3ext.h" */
+SQLITE_EXTENSION_INIT3
+#endif
+
+/* #include "sqlite3.h" */
+/************** Include fts3_tokenizer.h in the middle of fts3Int.h **********/
+/************** Begin file fts3_tokenizer.h **********************************/
+/*
+** 2006 July 10
+**
+** The author disclaims copyright to this source code.
+**
+*************************************************************************
+** Defines the interface to tokenizers used by fulltext-search. There
+** are three basic components:
+**
+** sqlite3_tokenizer_module is a singleton defining the tokenizer
+** interface functions. This is essentially the class structure for
+** tokenizers.
+**
+** sqlite3_tokenizer is used to define a particular tokenizer, perhaps
+** including customization information defined at creation time.
+**
+** sqlite3_tokenizer_cursor is generated by a tokenizer to generate
+** tokens from a particular input.
+*/
+#ifndef _FTS3_TOKENIZER_H_
+#define _FTS3_TOKENIZER_H_
+
+/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time.
+** If tokenizers are to be allowed to call sqlite3_*() functions, then
+** we will need a way to register the API consistently.
+*/
+/* #include "sqlite3.h" */
+
+/*
+** Structures used by the tokenizer interface. When a new tokenizer
+** implementation is registered, the caller provides a pointer to
+** an sqlite3_tokenizer_module containing pointers to the callback
+** functions that make up an implementation.
+**
+** When an fts3 table is created, it passes any arguments passed to
+** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the
+** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer
+** implementation. The xCreate() function in turn returns an
+** sqlite3_tokenizer structure representing the specific tokenizer to
+** be used for the fts3 table (customized by the tokenizer clause arguments).
+**
+** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen()
+** method is called. It returns an sqlite3_tokenizer_cursor object
+** that may be used to tokenize a specific input buffer based on
+** the tokenization rules supplied by a specific sqlite3_tokenizer
+** object.
+*/
+typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module;
+typedef struct sqlite3_tokenizer sqlite3_tokenizer;
+typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor;
+
+struct sqlite3_tokenizer_module {
+
+ /*
+ ** Structure version. Should always be set to 0 or 1.
+ */
+ int iVersion;
+
+ /*
+ ** Create a new tokenizer. The values in the argv[] array are the
+ ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL
+ ** TABLE statement that created the fts3 table. For example, if
+ ** the following SQL is executed:
+ **
+ ** CREATE .. USING fts3( ... , tokenizer <tokenizer-name> arg1 arg2)
+ **
+ ** then argc is set to 2, and the argv[] array contains pointers
+ ** to the strings "arg1" and "arg2".
+ **
+ ** This method should return either SQLITE_OK (0), or an SQLite error
+ ** code. If SQLITE_OK is returned, then *ppTokenizer should be set
+ ** to point at the newly created tokenizer structure. The generic
+ ** sqlite3_tokenizer.pModule variable should not be initialized by
+ ** this callback. The caller will do so.
+ */
+ int (*xCreate)(
+ int argc, /* Size of argv array */
+ const char *const*argv, /* Tokenizer argument strings */
+ sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */
+ );
+
+ /*
+ ** Destroy an existing tokenizer. The fts3 module calls this method
+ ** exactly once for each successful call to xCreate().
+ */
+ int (*xDestroy)(sqlite3_tokenizer *pTokenizer);
+
+ /*
+ ** Create a tokenizer cursor to tokenize an input buffer. The caller
+ ** is responsible for ensuring that the input buffer remains valid
+ ** until the cursor is closed (using the xClose() method).
+ */
+ int (*xOpen)(
+ sqlite3_tokenizer *pTokenizer, /* Tokenizer object */
+ const char *pInput, int nBytes, /* Input buffer */
+ sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */
+ );
+
+ /*
+ ** Destroy an existing tokenizer cursor. The fts3 module calls this
+ ** method exactly once for each successful call to xOpen().
+ */
+ int (*xClose)(sqlite3_tokenizer_cursor *pCursor);
+
+ /*
+ ** Retrieve the next token from the tokenizer cursor pCursor. This
+ ** method should either return SQLITE_OK and set the values of the
+ ** "OUT" variables identified below, or SQLITE_DONE to indicate that
+ ** the end of the buffer has been reached, or an SQLite error code.
+ **
+ ** *ppToken should be set to point at a buffer containing the
+ ** normalized version of the token (i.e. after any case-folding and/or
+ ** stemming has been performed). *pnBytes should be set to the length
+ ** of this buffer in bytes. The input text that generated the token is
+ ** identified by the byte offsets returned in *piStartOffset and
+ ** *piEndOffset. *piStartOffset should be set to the index of the first
+ ** byte of the token in the input buffer. *piEndOffset should be set
+ ** to the index of the first byte just past the end of the token in
+ ** the input buffer.
+ **
+ ** The buffer *ppToken is set to point at is managed by the tokenizer
+ ** implementation. It is only required to be valid until the next call
+ ** to xNext() or xClose().
+ */
+ /* TODO(shess) current implementation requires pInput to be
+ ** nul-terminated. This should either be fixed, or pInput/nBytes
+ ** should be converted to zInput.
+ */
+ int (*xNext)(
+ sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */
+ const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */
+ int *piStartOffset, /* OUT: Byte offset of token in input buffer */
+ int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */
+ int *piPosition /* OUT: Number of tokens returned before this one */
+ );
+
+ /***********************************************************************
+ ** Methods below this point are only available if iVersion>=1.
+ */
+
+ /*
+ ** Configure the language id of a tokenizer cursor.
+ */
+ int (*xLanguageid)(sqlite3_tokenizer_cursor *pCsr, int iLangid);
+};
+
+struct sqlite3_tokenizer {
+ const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */
+ /* Tokenizer implementations will typically add additional fields */
+};
+
+struct sqlite3_tokenizer_cursor {
+ sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */
+ /* Tokenizer implementations will typically add additional fields */
+};
+
+int fts3_global_term_cnt(int iTerm, int iCol);
+int fts3_term_cnt(int iTerm, int iCol);
+
+
+#endif /* _FTS3_TOKENIZER_H_ */
+
+/************** End of fts3_tokenizer.h **************************************/
+/************** Continuing where we left off in fts3Int.h ********************/
+/************** Include fts3_hash.h in the middle of fts3Int.h ***************/
+/************** Begin file fts3_hash.h ***************************************/
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the header file for the generic hash-table implementation
+** used in SQLite. We've modified it slightly to serve as a standalone
+** hash table implementation for the full-text indexing module.
+**
+*/
+#ifndef _FTS3_HASH_H_
+#define _FTS3_HASH_H_
+
+/* Forward declarations of structures. */
+typedef struct Fts3Hash Fts3Hash;
+typedef struct Fts3HashElem Fts3HashElem;
+
+/* A complete hash table is an instance of the following structure.
+** The internals of this structure are intended to be opaque -- client
+** code should not attempt to access or modify the fields of this structure
+** directly. Change this structure only by using the routines below.
+** However, many of the "procedures" and "functions" for modifying and
+** accessing this structure are really macros, so we can't really make
+** this structure opaque.
+*/
+struct Fts3Hash {
+ char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */
+ char copyKey; /* True if copy of key made on insert */
+ int count; /* Number of entries in this table */
+ Fts3HashElem *first; /* The first element of the array */
+ int htsize; /* Number of buckets in the hash table */
+ struct _fts3ht { /* the hash table */
+ int count; /* Number of entries with this hash */
+ Fts3HashElem *chain; /* Pointer to first entry with this hash */
+ } *ht;
+};
+
+/* Each element in the hash table is an instance of the following
+** structure. All elements are stored on a single doubly-linked list.
+**
+** Again, this structure is intended to be opaque, but it can't really
+** be opaque because it is used by macros.
+*/
+struct Fts3HashElem {
+ Fts3HashElem *next, *prev; /* Next and previous elements in the table */
+ void *data; /* Data associated with this element */
+ void *pKey; int nKey; /* Key associated with this element */
+};
+
+/*
+** There are 2 different modes of operation for a hash table:
+**
+** FTS3_HASH_STRING pKey points to a string that is nKey bytes long
+** (including the null-terminator, if any). Case
+** is respected in comparisons.
+**
+** FTS3_HASH_BINARY pKey points to binary data nKey bytes long.
+** memcmp() is used to compare keys.
+**
+** A copy of the key is made if the copyKey parameter to fts3HashInit is 1.
+*/
+#define FTS3_HASH_STRING 1
+#define FTS3_HASH_BINARY 2
+
+/*
+** Access routines. To delete, insert a NULL pointer.
+*/
+SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey);
+SQLITE_PRIVATE void *sqlite3Fts3HashInsert(Fts3Hash*, const void *pKey, int nKey, void *pData);
+SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash*, const void *pKey, int nKey);
+SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash*);
+SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const void *, int);
+
+/*
+** Shorthand for the functions above
+*/
+#define fts3HashInit sqlite3Fts3HashInit
+#define fts3HashInsert sqlite3Fts3HashInsert
+#define fts3HashFind sqlite3Fts3HashFind
+#define fts3HashClear sqlite3Fts3HashClear
+#define fts3HashFindElem sqlite3Fts3HashFindElem
+
+/*
+** Macros for looping over all elements of a hash table. The idiom is
+** like this:
+**
+** Fts3Hash h;
+** Fts3HashElem *p;
+** ...
+** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){
+** SomeStructure *pData = fts3HashData(p);
+** // do something with pData
+** }
+*/
+#define fts3HashFirst(H) ((H)->first)
+#define fts3HashNext(E) ((E)->next)
+#define fts3HashData(E) ((E)->data)
+#define fts3HashKey(E) ((E)->pKey)
+#define fts3HashKeysize(E) ((E)->nKey)
+
+/*
+** Number of entries in a hash table
+*/
+#define fts3HashCount(H) ((H)->count)
+
+#endif /* _FTS3_HASH_H_ */
+
+/************** End of fts3_hash.h *******************************************/
+/************** Continuing where we left off in fts3Int.h ********************/
+
+/*
+** This constant determines the maximum depth of an FTS expression tree
+** that the library will create and use. FTS uses recursion to perform
+** various operations on the query tree, so the disadvantage of a large
+** limit is that it may allow very large queries to use large amounts
+** of stack space (perhaps causing a stack overflow).
+*/
+#ifndef SQLITE_FTS3_MAX_EXPR_DEPTH
+# define SQLITE_FTS3_MAX_EXPR_DEPTH 12
+#endif
+
+
+/*
+** This constant controls how often segments are merged. Once there are
+** FTS3_MERGE_COUNT segments of level N, they are merged into a single
+** segment of level N+1.
+*/
+#define FTS3_MERGE_COUNT 16
+
+/*
+** This is the maximum amount of data (in bytes) to store in the
+** Fts3Table.pendingTerms hash table. Normally, the hash table is
+** populated as documents are inserted/updated/deleted in a transaction
+** and used to create a new segment when the transaction is committed.
+** However if this limit is reached midway through a transaction, a new
+** segment is created and the hash table cleared immediately.
+*/
+#define FTS3_MAX_PENDING_DATA (1*1024*1024)
+
+/*
+** Macro to return the number of elements in an array. SQLite has a
+** similar macro called ArraySize(). Use a different name to avoid
+** a collision when building an amalgamation with built-in FTS3.
+*/
+#define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0])))
+
+
+#ifndef MIN
+# define MIN(x,y) ((x)<(y)?(x):(y))
+#endif
+#ifndef MAX
+# define MAX(x,y) ((x)>(y)?(x):(y))
+#endif
+
+/*
+** Maximum length of a varint encoded integer. The varint format is different
+** from that used by SQLite, so the maximum length is 10, not 9.
+*/
+#define FTS3_VARINT_MAX 10
+
+#define FTS3_BUFFER_PADDING 8
+
+/*
+** FTS4 virtual tables may maintain multiple indexes - one index of all terms
+** in the document set and zero or more prefix indexes. All indexes are stored
+** as one or more b+-trees in the %_segments and %_segdir tables.
+**
+** It is possible to determine which index a b+-tree belongs to based on the
+** value stored in the "%_segdir.level" column. Given this value L, the index
+** that the b+-tree belongs to is (L<<10). In other words, all b+-trees with
+** level values between 0 and 1023 (inclusive) belong to index 0, all levels
+** between 1024 and 2047 to index 1, and so on.
+**
+** It is considered impossible for an index to use more than 1024 levels. In
+** theory though this may happen, but only after at least
+** (FTS3_MERGE_COUNT^1024) separate flushes of the pending-terms tables.
+*/
+#define FTS3_SEGDIR_MAXLEVEL 1024
+#define FTS3_SEGDIR_MAXLEVEL_STR "1024"
+
+/*
+** The testcase() macro is only used by the amalgamation. If undefined,
+** make it a no-op.
+*/
+#ifndef testcase
+# define testcase(X)
+#endif
+
+/*
+** Terminator values for position-lists and column-lists.
+*/
+#define POS_COLUMN (1) /* Column-list terminator */
+#define POS_END (0) /* Position-list terminator */
+
+/*
+** The assert_fts3_nc() macro is similar to the assert() macro, except that it
+** is used for assert() conditions that are true only if it can be
+** guranteed that the database is not corrupt.
+*/
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+SQLITE_API extern int sqlite3_fts3_may_be_corrupt;
+# define assert_fts3_nc(x) assert(sqlite3_fts3_may_be_corrupt || (x))
+#else
+# define assert_fts3_nc(x) assert(x)
+#endif
+
+/*
+** This section provides definitions to allow the
+** FTS3 extension to be compiled outside of the
+** amalgamation.
+*/
+#ifndef SQLITE_AMALGAMATION
+/*
+** Macros indicating that conditional expressions are always true or
+** false.
+*/
+#ifdef SQLITE_COVERAGE_TEST
+# define ALWAYS(x) (1)
+# define NEVER(X) (0)
+#elif defined(SQLITE_DEBUG)
+# define ALWAYS(x) sqlite3Fts3Always((x)!=0)
+# define NEVER(x) sqlite3Fts3Never((x)!=0)
+SQLITE_PRIVATE int sqlite3Fts3Always(int b);
+SQLITE_PRIVATE int sqlite3Fts3Never(int b);
+#else
+# define ALWAYS(x) (x)
+# define NEVER(x) (x)
+#endif
+
+/*
+** Internal types used by SQLite.
+*/
+typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */
+typedef short int i16; /* 2-byte (or larger) signed integer */
+typedef unsigned int u32; /* 4-byte unsigned integer */
+typedef sqlite3_uint64 u64; /* 8-byte unsigned integer */
+typedef sqlite3_int64 i64; /* 8-byte signed integer */
+
+/*
+** Macro used to suppress compiler warnings for unused parameters.
+*/
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/*
+** Activate assert() only if SQLITE_TEST is enabled.
+*/
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+
+/*
+** The TESTONLY macro is used to enclose variable declarations or
+** other bits of code that are needed to support the arguments
+** within testcase() and assert() macros.
+*/
+#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
+# define TESTONLY(X) X
+#else
+# define TESTONLY(X)
+#endif
+
+#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
+#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64)
+
+#define deliberate_fall_through
+
+#endif /* SQLITE_AMALGAMATION */
+
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3Fts3Corrupt(void);
+# define FTS_CORRUPT_VTAB sqlite3Fts3Corrupt()
+#else
+# define FTS_CORRUPT_VTAB SQLITE_CORRUPT_VTAB
+#endif
+
+typedef struct Fts3Table Fts3Table;
+typedef struct Fts3Cursor Fts3Cursor;
+typedef struct Fts3Expr Fts3Expr;
+typedef struct Fts3Phrase Fts3Phrase;
+typedef struct Fts3PhraseToken Fts3PhraseToken;
+
+typedef struct Fts3Doclist Fts3Doclist;
+typedef struct Fts3SegFilter Fts3SegFilter;
+typedef struct Fts3DeferredToken Fts3DeferredToken;
+typedef struct Fts3SegReader Fts3SegReader;
+typedef struct Fts3MultiSegReader Fts3MultiSegReader;
+
+typedef struct MatchinfoBuffer MatchinfoBuffer;
+
+/*
+** A connection to a fulltext index is an instance of the following
+** structure. The xCreate and xConnect methods create an instance
+** of this structure and xDestroy and xDisconnect free that instance.
+** All other methods receive a pointer to the structure as one of their
+** arguments.
+*/
+struct Fts3Table {
+ sqlite3_vtab base; /* Base class used by SQLite core */
+ sqlite3 *db; /* The database connection */
+ const char *zDb; /* logical database name */
+ const char *zName; /* virtual table name */
+ int nColumn; /* number of named columns in virtual table */
+ char **azColumn; /* column names. malloced */
+ u8 *abNotindexed; /* True for 'notindexed' columns */
+ sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */
+ char *zContentTbl; /* content=xxx option, or NULL */
+ char *zLanguageid; /* languageid=xxx option, or NULL */
+ int nAutoincrmerge; /* Value configured by 'automerge' */
+ u32 nLeafAdd; /* Number of leaf blocks added this trans */
+ int bLock; /* Used to prevent recursive content= tbls */
+
+ /* Precompiled statements used by the implementation. Each of these
+ ** statements is run and reset within a single virtual table API call.
+ */
+ sqlite3_stmt *aStmt[40];
+ sqlite3_stmt *pSeekStmt; /* Cache for fts3CursorSeekStmt() */
+
+ char *zReadExprlist;
+ char *zWriteExprlist;
+
+ int nNodeSize; /* Soft limit for node size */
+ u8 bFts4; /* True for FTS4, false for FTS3 */
+ u8 bHasStat; /* True if %_stat table exists (2==unknown) */
+ u8 bHasDocsize; /* True if %_docsize table exists */
+ u8 bDescIdx; /* True if doclists are in reverse order */
+ u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */
+ int nPgsz; /* Page size for host database */
+ char *zSegmentsTbl; /* Name of %_segments table */
+ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */
+
+ /*
+ ** The following array of hash tables is used to buffer pending index
+ ** updates during transactions. All pending updates buffered at any one
+ ** time must share a common language-id (see the FTS4 langid= feature).
+ ** The current language id is stored in variable iPrevLangid.
+ **
+ ** A single FTS4 table may have multiple full-text indexes. For each index
+ ** there is an entry in the aIndex[] array. Index 0 is an index of all the
+ ** terms that appear in the document set. Each subsequent index in aIndex[]
+ ** is an index of prefixes of a specific length.
+ **
+ ** Variable nPendingData contains an estimate the memory consumed by the
+ ** pending data structures, including hash table overhead, but not including
+ ** malloc overhead. When nPendingData exceeds nMaxPendingData, all hash
+ ** tables are flushed to disk. Variable iPrevDocid is the docid of the most
+ ** recently inserted record.
+ */
+ int nIndex; /* Size of aIndex[] */
+ struct Fts3Index {
+ int nPrefix; /* Prefix length (0 for main terms index) */
+ Fts3Hash hPending; /* Pending terms table for this index */
+ } *aIndex;
+ int nMaxPendingData; /* Max pending data before flush to disk */
+ int nPendingData; /* Current bytes of pending data */
+ sqlite_int64 iPrevDocid; /* Docid of most recently inserted document */
+ int iPrevLangid; /* Langid of recently inserted document */
+ int bPrevDelete; /* True if last operation was a delete */
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
+ /* State variables used for validating that the transaction control
+ ** methods of the virtual table are called at appropriate times. These
+ ** values do not contribute to FTS functionality; they are used for
+ ** verifying the operation of the SQLite core.
+ */
+ int inTransaction; /* True after xBegin but before xCommit/xRollback */
+ int mxSavepoint; /* Largest valid xSavepoint integer */
+#endif
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ /* True to disable the incremental doclist optimization. This is controled
+ ** by special insert command 'test-no-incr-doclist'. */
+ int bNoIncrDoclist;
+
+ /* Number of segments in a level */
+ int nMergeCount;
+#endif
+};
+
+/* Macro to find the number of segments to merge */
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+# define MergeCount(P) ((P)->nMergeCount)
+#else
+# define MergeCount(P) FTS3_MERGE_COUNT
+#endif
+
+/*
+** When the core wants to read from the virtual table, it creates a
+** virtual table cursor (an instance of the following structure) using
+** the xOpen method. Cursors are destroyed using the xClose method.
+*/
+struct Fts3Cursor {
+ sqlite3_vtab_cursor base; /* Base class used by SQLite core */
+ i16 eSearch; /* Search strategy (see below) */
+ u8 isEof; /* True if at End Of Results */
+ u8 isRequireSeek; /* True if must seek pStmt to %_content row */
+ u8 bSeekStmt; /* True if pStmt is a seek */
+ sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */
+ Fts3Expr *pExpr; /* Parsed MATCH query string */
+ int iLangid; /* Language being queried for */
+ int nPhrase; /* Number of matchable phrases in query */
+ Fts3DeferredToken *pDeferred; /* Deferred search tokens, if any */
+ sqlite3_int64 iPrevId; /* Previous id read from aDoclist */
+ char *pNextId; /* Pointer into the body of aDoclist */
+ char *aDoclist; /* List of docids for full-text queries */
+ int nDoclist; /* Size of buffer at aDoclist */
+ u8 bDesc; /* True to sort in descending order */
+ int eEvalmode; /* An FTS3_EVAL_XX constant */
+ int nRowAvg; /* Average size of database rows, in pages */
+ sqlite3_int64 nDoc; /* Documents in table */
+ i64 iMinDocid; /* Minimum docid to return */
+ i64 iMaxDocid; /* Maximum docid to return */
+ int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */
+ MatchinfoBuffer *pMIBuffer; /* Buffer for matchinfo data */
+};
+
+#define FTS3_EVAL_FILTER 0
+#define FTS3_EVAL_NEXT 1
+#define FTS3_EVAL_MATCHINFO 2
+
+/*
+** The Fts3Cursor.eSearch member is always set to one of the following.
+** Actualy, Fts3Cursor.eSearch can be greater than or equal to
+** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index
+** of the column to be searched. For example, in
+**
+** CREATE VIRTUAL TABLE ex1 USING fts3(a,b,c,d);
+** SELECT docid FROM ex1 WHERE b MATCH 'one two three';
+**
+** Because the LHS of the MATCH operator is 2nd column "b",
+** Fts3Cursor.eSearch will be set to FTS3_FULLTEXT_SEARCH+1. (+0 for a,
+** +1 for b, +2 for c, +3 for d.) If the LHS of MATCH were "ex1"
+** indicating that all columns should be searched,
+** then eSearch would be set to FTS3_FULLTEXT_SEARCH+4.
+*/
+#define FTS3_FULLSCAN_SEARCH 0 /* Linear scan of %_content table */
+#define FTS3_DOCID_SEARCH 1 /* Lookup by rowid on %_content table */
+#define FTS3_FULLTEXT_SEARCH 2 /* Full-text index search */
+
+/*
+** The lower 16-bits of the sqlite3_index_info.idxNum value set by
+** the xBestIndex() method contains the Fts3Cursor.eSearch value described
+** above. The upper 16-bits contain a combination of the following
+** bits, used to describe extra constraints on full-text searches.
+*/
+#define FTS3_HAVE_LANGID 0x00010000 /* languageid=? */
+#define FTS3_HAVE_DOCID_GE 0x00020000 /* docid>=? */
+#define FTS3_HAVE_DOCID_LE 0x00040000 /* docid<=? */
+
+struct Fts3Doclist {
+ char *aAll; /* Array containing doclist (or NULL) */
+ int nAll; /* Size of a[] in bytes */
+ char *pNextDocid; /* Pointer to next docid */
+
+ sqlite3_int64 iDocid; /* Current docid (if pList!=0) */
+ int bFreeList; /* True if pList should be sqlite3_free()d */
+ char *pList; /* Pointer to position list following iDocid */
+ int nList; /* Length of position list */
+};
+
+/*
+** A "phrase" is a sequence of one or more tokens that must match in
+** sequence. A single token is the base case and the most common case.
+** For a sequence of tokens contained in double-quotes (i.e. "one two three")
+** nToken will be the number of tokens in the string.
+*/
+struct Fts3PhraseToken {
+ char *z; /* Text of the token */
+ int n; /* Number of bytes in buffer z */
+ int isPrefix; /* True if token ends with a "*" character */
+ int bFirst; /* True if token must appear at position 0 */
+
+ /* Variables above this point are populated when the expression is
+ ** parsed (by code in fts3_expr.c). Below this point the variables are
+ ** used when evaluating the expression. */
+ Fts3DeferredToken *pDeferred; /* Deferred token object for this token */
+ Fts3MultiSegReader *pSegcsr; /* Segment-reader for this token */
+};
+
+struct Fts3Phrase {
+ /* Cache of doclist for this phrase. */
+ Fts3Doclist doclist;
+ int bIncr; /* True if doclist is loaded incrementally */
+ int iDoclistToken;
+
+ /* Used by sqlite3Fts3EvalPhrasePoslist() if this is a descendent of an
+ ** OR condition. */
+ char *pOrPoslist;
+ i64 iOrDocid;
+
+ /* Variables below this point are populated by fts3_expr.c when parsing
+ ** a MATCH expression. Everything above is part of the evaluation phase.
+ */
+ int nToken; /* Number of tokens in the phrase */
+ int iColumn; /* Index of column this phrase must match */
+ Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */
+};
+
+/*
+** A tree of these objects forms the RHS of a MATCH operator.
+**
+** If Fts3Expr.eType is FTSQUERY_PHRASE and isLoaded is true, then aDoclist
+** points to a malloced buffer, size nDoclist bytes, containing the results
+** of this phrase query in FTS3 doclist format. As usual, the initial
+** "Length" field found in doclists stored on disk is omitted from this
+** buffer.
+**
+** Variable aMI is used only for FTSQUERY_NEAR nodes to store the global
+** matchinfo data. If it is not NULL, it points to an array of size nCol*3,
+** where nCol is the number of columns in the queried FTS table. The array
+** is populated as follows:
+**
+** aMI[iCol*3 + 0] = Undefined
+** aMI[iCol*3 + 1] = Number of occurrences
+** aMI[iCol*3 + 2] = Number of rows containing at least one instance
+**
+** The aMI array is allocated using sqlite3_malloc(). It should be freed
+** when the expression node is.
+*/
+struct Fts3Expr {
+ int eType; /* One of the FTSQUERY_XXX values defined below */
+ int nNear; /* Valid if eType==FTSQUERY_NEAR */
+ Fts3Expr *pParent; /* pParent->pLeft==this or pParent->pRight==this */
+ Fts3Expr *pLeft; /* Left operand */
+ Fts3Expr *pRight; /* Right operand */
+ Fts3Phrase *pPhrase; /* Valid if eType==FTSQUERY_PHRASE */
+
+ /* The following are used by the fts3_eval.c module. */
+ sqlite3_int64 iDocid; /* Current docid */
+ u8 bEof; /* True this expression is at EOF already */
+ u8 bStart; /* True if iDocid is valid */
+ u8 bDeferred; /* True if this expression is entirely deferred */
+
+ /* The following are used by the fts3_snippet.c module. */
+ int iPhrase; /* Index of this phrase in matchinfo() results */
+ u32 *aMI; /* See above */
+};
+
+/*
+** Candidate values for Fts3Query.eType. Note that the order of the first
+** four values is in order of precedence when parsing expressions. For
+** example, the following:
+**
+** "a OR b AND c NOT d NEAR e"
+**
+** is equivalent to:
+**
+** "a OR (b AND (c NOT (d NEAR e)))"
+*/
+#define FTSQUERY_NEAR 1
+#define FTSQUERY_NOT 2
+#define FTSQUERY_AND 3
+#define FTSQUERY_OR 4
+#define FTSQUERY_PHRASE 5
+
+
+/* fts3_write.c */
+SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(sqlite3_vtab*,int,sqlite3_value**,sqlite3_int64*);
+SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *);
+SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *);
+SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *);
+SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(int, int, sqlite3_int64,
+ sqlite3_int64, sqlite3_int64, const char *, int, Fts3SegReader**);
+SQLITE_PRIVATE int sqlite3Fts3SegReaderPending(
+ Fts3Table*,int,const char*,int,int,Fts3SegReader**);
+SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *);
+SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(Fts3Table*, int, int, int, sqlite3_stmt **);
+SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char **, int*, int*);
+
+SQLITE_PRIVATE int sqlite3Fts3SelectDoctotal(Fts3Table *, sqlite3_stmt **);
+SQLITE_PRIVATE int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **);
+
+#ifndef SQLITE_DISABLE_FTS4_DEFERRED
+SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *);
+SQLITE_PRIVATE int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int);
+SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *);
+SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *);
+SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(Fts3DeferredToken *, char **, int *);
+#else
+# define sqlite3Fts3FreeDeferredTokens(x)
+# define sqlite3Fts3DeferToken(x,y,z) SQLITE_OK
+# define sqlite3Fts3CacheDeferredDoclists(x) SQLITE_OK
+# define sqlite3Fts3FreeDeferredDoclists(x)
+# define sqlite3Fts3DeferredTokenList(x,y,z) SQLITE_OK
+#endif
+
+SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *);
+SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *, int *);
+
+/* Special values interpreted by sqlite3SegReaderCursor() */
+#define FTS3_SEGCURSOR_PENDING -1
+#define FTS3_SEGCURSOR_ALL -2
+
+SQLITE_PRIVATE int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*);
+SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *);
+SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(Fts3MultiSegReader *);
+
+SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor(Fts3Table *,
+ int, int, int, const char *, int, int, int, Fts3MultiSegReader *);
+
+/* Flags allowed as part of the 4th argument to SegmentReaderIterate() */
+#define FTS3_SEGMENT_REQUIRE_POS 0x00000001
+#define FTS3_SEGMENT_IGNORE_EMPTY 0x00000002
+#define FTS3_SEGMENT_COLUMN_FILTER 0x00000004
+#define FTS3_SEGMENT_PREFIX 0x00000008
+#define FTS3_SEGMENT_SCAN 0x00000010
+#define FTS3_SEGMENT_FIRST 0x00000020
+
+/* Type passed as 4th argument to SegmentReaderIterate() */
+struct Fts3SegFilter {
+ const char *zTerm;
+ int nTerm;
+ int iCol;
+ int flags;
+};
+
+struct Fts3MultiSegReader {
+ /* Used internally by sqlite3Fts3SegReaderXXX() calls */
+ Fts3SegReader **apSegment; /* Array of Fts3SegReader objects */
+ int nSegment; /* Size of apSegment array */
+ int nAdvance; /* How many seg-readers to advance */
+ Fts3SegFilter *pFilter; /* Pointer to filter object */
+ char *aBuffer; /* Buffer to merge doclists in */
+ int nBuffer; /* Allocated size of aBuffer[] in bytes */
+
+ int iColFilter; /* If >=0, filter for this column */
+ int bRestart;
+
+ /* Used by fts3.c only. */
+ int nCost; /* Cost of running iterator */
+ int bLookup; /* True if a lookup of a single entry. */
+
+ /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */
+ char *zTerm; /* Pointer to term buffer */
+ int nTerm; /* Size of zTerm in bytes */
+ char *aDoclist; /* Pointer to doclist buffer */
+ int nDoclist; /* Size of aDoclist[] in bytes */
+};
+
+SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table*,int,int);
+
+#define fts3GetVarint32(p, piVal) ( \
+ (*(u8*)(p)&0x80) ? sqlite3Fts3GetVarint32(p, piVal) : (*piVal=*(u8*)(p), 1) \
+)
+
+/* fts3.c */
+SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char**,const char*,...);
+SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *, sqlite3_int64);
+SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *, sqlite_int64 *);
+SQLITE_PRIVATE int sqlite3Fts3GetVarintU(const char *, sqlite_uint64 *);
+SQLITE_PRIVATE int sqlite3Fts3GetVarintBounded(const char*,const char*,sqlite3_int64*);
+SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *);
+SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64);
+SQLITE_PRIVATE void sqlite3Fts3Dequote(char *);
+SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*);
+SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *);
+SQLITE_PRIVATE int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *);
+SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int*, Fts3Table*);
+SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc);
+SQLITE_PRIVATE int sqlite3Fts3ReadInt(const char *z, int *pnOut);
+
+/* fts3_tokenizer.c */
+SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *);
+SQLITE_PRIVATE int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *);
+SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *,
+ sqlite3_tokenizer **, char **
+);
+SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char);
+
+/* fts3_snippet.c */
+SQLITE_PRIVATE void sqlite3Fts3Offsets(sqlite3_context*, Fts3Cursor*);
+SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const char *,
+ const char *, const char *, int, int
+);
+SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const char *);
+SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p);
+
+/* fts3_expr.c */
+SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int,
+ char **, int, int, int, const char *, int, Fts3Expr **, char **
+);
+SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *);
+#ifdef SQLITE_TEST
+SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db, Fts3Hash*);
+SQLITE_PRIVATE int sqlite3Fts3InitTerm(sqlite3 *db);
+#endif
+
+SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(sqlite3_tokenizer *, int, const char *, int,
+ sqlite3_tokenizer_cursor **
+);
+
+/* fts3_aux.c */
+SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db);
+
+SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *);
+
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart(
+ Fts3Table*, Fts3MultiSegReader*, int, const char*, int);
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext(
+ Fts3Table *, Fts3MultiSegReader *, sqlite3_int64 *, char **, int *);
+SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **);
+SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *);
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr);
+
+/* fts3_tokenize_vtab.c */
+SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *);
+
+/* fts3_unicode2.c (functions generated by parsing unicode text files) */
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int);
+SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int);
+SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
+#endif
+
+#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */
+#endif /* _FTSINT_H */
+
+/************** End of fts3Int.h *********************************************/
+/************** Continuing where we left off in fts3.c ***********************/
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE)
+# define SQLITE_CORE 1
+#endif
+
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <stddef.h> */
+/* #include <stdio.h> */
+/* #include <string.h> */
+/* #include <stdarg.h> */
+
+/* #include "fts3.h" */
+#ifndef SQLITE_CORE
+/* # include "sqlite3ext.h" */
+ SQLITE_EXTENSION_INIT1
+#endif
+
+static int fts3EvalNext(Fts3Cursor *pCsr);
+static int fts3EvalStart(Fts3Cursor *pCsr);
+static int fts3TermSegReaderCursor(
+ Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **);
+
+#ifndef SQLITE_AMALGAMATION
+# if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE int sqlite3Fts3Always(int b) { assert( b ); return b; }
+SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; }
+# endif
+#endif
+
+/*
+** This variable is set to false when running tests for which the on disk
+** structures should not be corrupt. Otherwise, true. If it is false, extra
+** assert() conditions in the fts3 code are activated - conditions that are
+** only true if it is guaranteed that the fts3 database is not corrupt.
+*/
+SQLITE_API int sqlite3_fts3_may_be_corrupt = 1;
+
+/*
+** Write a 64-bit variable-length integer to memory starting at p[0].
+** The length of data written will be between 1 and FTS3_VARINT_MAX bytes.
+** The number of bytes written is returned.
+*/
+SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *p, sqlite_int64 v){
+ unsigned char *q = (unsigned char *) p;
+ sqlite_uint64 vu = v;
+ do{
+ *q++ = (unsigned char) ((vu & 0x7f) | 0x80);
+ vu >>= 7;
+ }while( vu!=0 );
+ q[-1] &= 0x7f; /* turn off high bit in final byte */
+ assert( q - (unsigned char *)p <= FTS3_VARINT_MAX );
+ return (int) (q - (unsigned char *)p);
+}
+
+#define GETVARINT_STEP(v, ptr, shift, mask1, mask2, var, ret) \
+ v = (v & mask1) | ( (*(const unsigned char*)(ptr++)) << shift ); \
+ if( (v & mask2)==0 ){ var = v; return ret; }
+#define GETVARINT_INIT(v, ptr, shift, mask1, mask2, var, ret) \
+ v = (*ptr++); \
+ if( (v & mask2)==0 ){ var = v; return ret; }
+
+SQLITE_PRIVATE int sqlite3Fts3GetVarintU(const char *pBuf, sqlite_uint64 *v){
+ const unsigned char *p = (const unsigned char*)pBuf;
+ const unsigned char *pStart = p;
+ u32 a;
+ u64 b;
+ int shift;
+
+ GETVARINT_INIT(a, p, 0, 0x00, 0x80, *v, 1);
+ GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *v, 2);
+ GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *v, 3);
+ GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *v, 4);
+ b = (a & 0x0FFFFFFF );
+
+ for(shift=28; shift<=63; shift+=7){
+ u64 c = *p++;
+ b += (c&0x7F) << shift;
+ if( (c & 0x80)==0 ) break;
+ }
+ *v = b;
+ return (int)(p - pStart);
+}
+
+/*
+** Read a 64-bit variable-length integer from memory starting at p[0].
+** Return the number of bytes read, or 0 on error.
+** The value is stored in *v.
+*/
+SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *pBuf, sqlite_int64 *v){
+ return sqlite3Fts3GetVarintU(pBuf, (sqlite3_uint64*)v);
+}
+
+/*
+** Read a 64-bit variable-length integer from memory starting at p[0] and
+** not extending past pEnd[-1].
+** Return the number of bytes read, or 0 on error.
+** The value is stored in *v.
+*/
+SQLITE_PRIVATE int sqlite3Fts3GetVarintBounded(
+ const char *pBuf,
+ const char *pEnd,
+ sqlite_int64 *v
+){
+ const unsigned char *p = (const unsigned char*)pBuf;
+ const unsigned char *pStart = p;
+ const unsigned char *pX = (const unsigned char*)pEnd;
+ u64 b = 0;
+ int shift;
+ for(shift=0; shift<=63; shift+=7){
+ u64 c = p<pX ? *p : 0;
+ p++;
+ b += (c&0x7F) << shift;
+ if( (c & 0x80)==0 ) break;
+ }
+ *v = b;
+ return (int)(p - pStart);
+}
+
+/*
+** Similar to sqlite3Fts3GetVarint(), except that the output is truncated to
+** a non-negative 32-bit integer before it is returned.
+*/
+SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *p, int *pi){
+ const unsigned char *ptr = (const unsigned char*)p;
+ u32 a;
+
+#ifndef fts3GetVarint32
+ GETVARINT_INIT(a, ptr, 0, 0x00, 0x80, *pi, 1);
+#else
+ a = (*ptr++);
+ assert( a & 0x80 );
+#endif
+
+ GETVARINT_STEP(a, ptr, 7, 0x7F, 0x4000, *pi, 2);
+ GETVARINT_STEP(a, ptr, 14, 0x3FFF, 0x200000, *pi, 3);
+ GETVARINT_STEP(a, ptr, 21, 0x1FFFFF, 0x10000000, *pi, 4);
+ a = (a & 0x0FFFFFFF );
+ *pi = (int)(a | ((u32)(*ptr & 0x07) << 28));
+ assert( 0==(a & 0x80000000) );
+ assert( *pi>=0 );
+ return 5;
+}
+
+/*
+** Return the number of bytes required to encode v as a varint
+*/
+SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64 v){
+ int i = 0;
+ do{
+ i++;
+ v >>= 7;
+ }while( v!=0 );
+ return i;
+}
+
+/*
+** Convert an SQL-style quoted string into a normal string by removing
+** the quote characters. The conversion is done in-place. If the
+** input does not begin with a quote character, then this routine
+** is a no-op.
+**
+** Examples:
+**
+** "abc" becomes abc
+** 'xyz' becomes xyz
+** [pqr] becomes pqr
+** `mno` becomes mno
+**
+*/
+SQLITE_PRIVATE void sqlite3Fts3Dequote(char *z){
+ char quote; /* Quote character (if any ) */
+
+ quote = z[0];
+ if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){
+ int iIn = 1; /* Index of next byte to read from input */
+ int iOut = 0; /* Index of next byte to write to output */
+
+ /* If the first byte was a '[', then the close-quote character is a ']' */
+ if( quote=='[' ) quote = ']';
+
+ while( z[iIn] ){
+ if( z[iIn]==quote ){
+ if( z[iIn+1]!=quote ) break;
+ z[iOut++] = quote;
+ iIn += 2;
+ }else{
+ z[iOut++] = z[iIn++];
+ }
+ }
+ z[iOut] = '\0';
+ }
+}
+
+/*
+** Read a single varint from the doclist at *pp and advance *pp to point
+** to the first byte past the end of the varint. Add the value of the varint
+** to *pVal.
+*/
+static void fts3GetDeltaVarint(char **pp, sqlite3_int64 *pVal){
+ sqlite3_int64 iVal;
+ *pp += sqlite3Fts3GetVarint(*pp, &iVal);
+ *pVal += iVal;
+}
+
+/*
+** When this function is called, *pp points to the first byte following a
+** varint that is part of a doclist (or position-list, or any other list
+** of varints). This function moves *pp to point to the start of that varint,
+** and sets *pVal by the varint value.
+**
+** Argument pStart points to the first byte of the doclist that the
+** varint is part of.
+*/
+static void fts3GetReverseVarint(
+ char **pp,
+ char *pStart,
+ sqlite3_int64 *pVal
+){
+ sqlite3_int64 iVal;
+ char *p;
+
+ /* Pointer p now points at the first byte past the varint we are
+ ** interested in. So, unless the doclist is corrupt, the 0x80 bit is
+ ** clear on character p[-1]. */
+ for(p = (*pp)-2; p>=pStart && *p&0x80; p--);
+ p++;
+ *pp = p;
+
+ sqlite3Fts3GetVarint(p, &iVal);
+ *pVal = iVal;
+}
+
+/*
+** The xDisconnect() virtual table method.
+*/
+static int fts3DisconnectMethod(sqlite3_vtab *pVtab){
+ Fts3Table *p = (Fts3Table *)pVtab;
+ int i;
+
+ assert( p->nPendingData==0 );
+ assert( p->pSegments==0 );
+
+ /* Free any prepared statements held */
+ sqlite3_finalize(p->pSeekStmt);
+ for(i=0; i<SizeofArray(p->aStmt); i++){
+ sqlite3_finalize(p->aStmt[i]);
+ }
+ sqlite3_free(p->zSegmentsTbl);
+ sqlite3_free(p->zReadExprlist);
+ sqlite3_free(p->zWriteExprlist);
+ sqlite3_free(p->zContentTbl);
+ sqlite3_free(p->zLanguageid);
+
+ /* Invoke the tokenizer destructor to free the tokenizer. */
+ p->pTokenizer->pModule->xDestroy(p->pTokenizer);
+
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Write an error message into *pzErr
+*/
+SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char **pzErr, const char *zFormat, ...){
+ va_list ap;
+ sqlite3_free(*pzErr);
+ va_start(ap, zFormat);
+ *pzErr = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** Construct one or more SQL statements from the format string given
+** and then evaluate those statements. The success code is written
+** into *pRc.
+**
+** If *pRc is initially non-zero then this routine is a no-op.
+*/
+static void fts3DbExec(
+ int *pRc, /* Success code */
+ sqlite3 *db, /* Database in which to run SQL */
+ const char *zFormat, /* Format string for SQL */
+ ... /* Arguments to the format string */
+){
+ va_list ap;
+ char *zSql;
+ if( *pRc ) return;
+ va_start(ap, zFormat);
+ zSql = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+ if( zSql==0 ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ *pRc = sqlite3_exec(db, zSql, 0, 0, 0);
+ sqlite3_free(zSql);
+ }
+}
+
+/*
+** The xDestroy() virtual table method.
+*/
+static int fts3DestroyMethod(sqlite3_vtab *pVtab){
+ Fts3Table *p = (Fts3Table *)pVtab;
+ int rc = SQLITE_OK; /* Return code */
+ const char *zDb = p->zDb; /* Name of database (e.g. "main", "temp") */
+ sqlite3 *db = p->db; /* Database handle */
+
+ /* Drop the shadow tables */
+ fts3DbExec(&rc, db,
+ "DROP TABLE IF EXISTS %Q.'%q_segments';"
+ "DROP TABLE IF EXISTS %Q.'%q_segdir';"
+ "DROP TABLE IF EXISTS %Q.'%q_docsize';"
+ "DROP TABLE IF EXISTS %Q.'%q_stat';"
+ "%s DROP TABLE IF EXISTS %Q.'%q_content';",
+ zDb, p->zName,
+ zDb, p->zName,
+ zDb, p->zName,
+ zDb, p->zName,
+ (p->zContentTbl ? "--" : ""), zDb,p->zName
+ );
+
+ /* If everything has worked, invoke fts3DisconnectMethod() to free the
+ ** memory associated with the Fts3Table structure and return SQLITE_OK.
+ ** Otherwise, return an SQLite error code.
+ */
+ return (rc==SQLITE_OK ? fts3DisconnectMethod(pVtab) : rc);
+}
+
+
+/*
+** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table
+** passed as the first argument. This is done as part of the xConnect()
+** and xCreate() methods.
+**
+** If *pRc is non-zero when this function is called, it is a no-op.
+** Otherwise, if an error occurs, an SQLite error code is stored in *pRc
+** before returning.
+*/
+static void fts3DeclareVtab(int *pRc, Fts3Table *p){
+ if( *pRc==SQLITE_OK ){
+ int i; /* Iterator variable */
+ int rc; /* Return code */
+ char *zSql; /* SQL statement passed to declare_vtab() */
+ char *zCols; /* List of user defined columns */
+ const char *zLanguageid;
+
+ zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid");
+ sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+
+ /* Create a list of user columns for the virtual table */
+ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]);
+ for(i=1; zCols && i<p->nColumn; i++){
+ zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]);
+ }
+
+ /* Create the whole "CREATE TABLE" statement to pass to SQLite */
+ zSql = sqlite3_mprintf(
+ "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN, %Q HIDDEN)",
+ zCols, p->zName, zLanguageid
+ );
+ if( !zCols || !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_declare_vtab(p->db, zSql);
+ }
+
+ sqlite3_free(zSql);
+ sqlite3_free(zCols);
+ *pRc = rc;
+ }
+}
+
+/*
+** Create the %_stat table if it does not already exist.
+*/
+SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int *pRc, Fts3Table *p){
+ fts3DbExec(pRc, p->db,
+ "CREATE TABLE IF NOT EXISTS %Q.'%q_stat'"
+ "(id INTEGER PRIMARY KEY, value BLOB);",
+ p->zDb, p->zName
+ );
+ if( (*pRc)==SQLITE_OK ) p->bHasStat = 1;
+}
+
+/*
+** Create the backing store tables (%_content, %_segments and %_segdir)
+** required by the FTS3 table passed as the only argument. This is done
+** as part of the vtab xCreate() method.
+**
+** If the p->bHasDocsize boolean is true (indicating that this is an
+** FTS4 table, not an FTS3 table) then also create the %_docsize and
+** %_stat tables required by FTS4.
+*/
+static int fts3CreateTables(Fts3Table *p){
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* Iterator variable */
+ sqlite3 *db = p->db; /* The database connection */
+
+ if( p->zContentTbl==0 ){
+ const char *zLanguageid = p->zLanguageid;
+ char *zContentCols; /* Columns of %_content table */
+
+ /* Create a list of user columns for the content table */
+ zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY");
+ for(i=0; zContentCols && i<p->nColumn; i++){
+ char *z = p->azColumn[i];
+ zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z);
+ }
+ if( zLanguageid && zContentCols ){
+ zContentCols = sqlite3_mprintf("%z, langid", zContentCols, zLanguageid);
+ }
+ if( zContentCols==0 ) rc = SQLITE_NOMEM;
+
+ /* Create the content table */
+ fts3DbExec(&rc, db,
+ "CREATE TABLE %Q.'%q_content'(%s)",
+ p->zDb, p->zName, zContentCols
+ );
+ sqlite3_free(zContentCols);
+ }
+
+ /* Create other tables */
+ fts3DbExec(&rc, db,
+ "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);",
+ p->zDb, p->zName
+ );
+ fts3DbExec(&rc, db,
+ "CREATE TABLE %Q.'%q_segdir'("
+ "level INTEGER,"
+ "idx INTEGER,"
+ "start_block INTEGER,"
+ "leaves_end_block INTEGER,"
+ "end_block INTEGER,"
+ "root BLOB,"
+ "PRIMARY KEY(level, idx)"
+ ");",
+ p->zDb, p->zName
+ );
+ if( p->bHasDocsize ){
+ fts3DbExec(&rc, db,
+ "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);",
+ p->zDb, p->zName
+ );
+ }
+ assert( p->bHasStat==p->bFts4 );
+ if( p->bHasStat ){
+ sqlite3Fts3CreateStatTable(&rc, p);
+ }
+ return rc;
+}
+
+/*
+** Store the current database page-size in bytes in p->nPgsz.
+**
+** If *pRc is non-zero when this function is called, it is a no-op.
+** Otherwise, if an error occurs, an SQLite error code is stored in *pRc
+** before returning.
+*/
+static void fts3DatabasePageSize(int *pRc, Fts3Table *p){
+ if( *pRc==SQLITE_OK ){
+ int rc; /* Return code */
+ char *zSql; /* SQL text "PRAGMA %Q.page_size" */
+ sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */
+
+ zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pStmt);
+ p->nPgsz = sqlite3_column_int(pStmt, 0);
+ rc = sqlite3_finalize(pStmt);
+ }else if( rc==SQLITE_AUTH ){
+ p->nPgsz = 1024;
+ rc = SQLITE_OK;
+ }
+ }
+ assert( p->nPgsz>0 || rc!=SQLITE_OK );
+ sqlite3_free(zSql);
+ *pRc = rc;
+ }
+}
+
+/*
+** "Special" FTS4 arguments are column specifications of the following form:
+**
+** <key> = <value>
+**
+** There may not be whitespace surrounding the "=" character. The <value>
+** term may be quoted, but the <key> may not.
+*/
+static int fts3IsSpecialColumn(
+ const char *z,
+ int *pnKey,
+ char **pzValue
+){
+ char *zValue;
+ const char *zCsr = z;
+
+ while( *zCsr!='=' ){
+ if( *zCsr=='\0' ) return 0;
+ zCsr++;
+ }
+
+ *pnKey = (int)(zCsr-z);
+ zValue = sqlite3_mprintf("%s", &zCsr[1]);
+ if( zValue ){
+ sqlite3Fts3Dequote(zValue);
+ }
+ *pzValue = zValue;
+ return 1;
+}
+
+/*
+** Append the output of a printf() style formatting to an existing string.
+*/
+static void fts3Appendf(
+ int *pRc, /* IN/OUT: Error code */
+ char **pz, /* IN/OUT: Pointer to string buffer */
+ const char *zFormat, /* Printf format string to append */
+ ... /* Arguments for printf format string */
+){
+ if( *pRc==SQLITE_OK ){
+ va_list ap;
+ char *z;
+ va_start(ap, zFormat);
+ z = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+ if( z && *pz ){
+ char *z2 = sqlite3_mprintf("%s%s", *pz, z);
+ sqlite3_free(z);
+ z = z2;
+ }
+ if( z==0 ) *pRc = SQLITE_NOMEM;
+ sqlite3_free(*pz);
+ *pz = z;
+ }
+}
+
+/*
+** Return a copy of input string zInput enclosed in double-quotes (") and
+** with all double quote characters escaped. For example:
+**
+** fts3QuoteId("un \"zip\"") -> "un \"\"zip\"\""
+**
+** The pointer returned points to memory obtained from sqlite3_malloc(). It
+** is the callers responsibility to call sqlite3_free() to release this
+** memory.
+*/
+static char *fts3QuoteId(char const *zInput){
+ sqlite3_int64 nRet;
+ char *zRet;
+ nRet = 2 + (int)strlen(zInput)*2 + 1;
+ zRet = sqlite3_malloc64(nRet);
+ if( zRet ){
+ int i;
+ char *z = zRet;
+ *(z++) = '"';
+ for(i=0; zInput[i]; i++){
+ if( zInput[i]=='"' ) *(z++) = '"';
+ *(z++) = zInput[i];
+ }
+ *(z++) = '"';
+ *(z++) = '\0';
+ }
+ return zRet;
+}
+
+/*
+** Return a list of comma separated SQL expressions and a FROM clause that
+** could be used in a SELECT statement such as the following:
+**
+** SELECT <list of expressions> FROM %_content AS x ...
+**
+** to return the docid, followed by each column of text data in order
+** from left to write. If parameter zFunc is not NULL, then instead of
+** being returned directly each column of text data is passed to an SQL
+** function named zFunc first. For example, if zFunc is "unzip" and the
+** table has the three user-defined columns "a", "b", and "c", the following
+** string is returned:
+**
+** "docid, unzip(x.'a'), unzip(x.'b'), unzip(x.'c') FROM %_content AS x"
+**
+** The pointer returned points to a buffer allocated by sqlite3_malloc(). It
+** is the responsibility of the caller to eventually free it.
+**
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and
+** a NULL pointer is returned). Otherwise, if an OOM error is encountered
+** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If
+** no error occurs, *pRc is left unmodified.
+*/
+static char *fts3ReadExprList(Fts3Table *p, const char *zFunc, int *pRc){
+ char *zRet = 0;
+ char *zFree = 0;
+ char *zFunction;
+ int i;
+
+ if( p->zContentTbl==0 ){
+ if( !zFunc ){
+ zFunction = "";
+ }else{
+ zFree = zFunction = fts3QuoteId(zFunc);
+ }
+ fts3Appendf(pRc, &zRet, "docid");
+ for(i=0; i<p->nColumn; i++){
+ fts3Appendf(pRc, &zRet, ",%s(x.'c%d%q')", zFunction, i, p->azColumn[i]);
+ }
+ if( p->zLanguageid ){
+ fts3Appendf(pRc, &zRet, ", x.%Q", "langid");
+ }
+ sqlite3_free(zFree);
+ }else{
+ fts3Appendf(pRc, &zRet, "rowid");
+ for(i=0; i<p->nColumn; i++){
+ fts3Appendf(pRc, &zRet, ", x.'%q'", p->azColumn[i]);
+ }
+ if( p->zLanguageid ){
+ fts3Appendf(pRc, &zRet, ", x.%Q", p->zLanguageid);
+ }
+ }
+ fts3Appendf(pRc, &zRet, " FROM '%q'.'%q%s' AS x",
+ p->zDb,
+ (p->zContentTbl ? p->zContentTbl : p->zName),
+ (p->zContentTbl ? "" : "_content")
+ );
+ return zRet;
+}
+
+/*
+** Return a list of N comma separated question marks, where N is the number
+** of columns in the %_content table (one for the docid plus one for each
+** user-defined text column).
+**
+** If argument zFunc is not NULL, then all but the first question mark
+** is preceded by zFunc and an open bracket, and followed by a closed
+** bracket. For example, if zFunc is "zip" and the FTS3 table has three
+** user-defined text columns, the following string is returned:
+**
+** "?, zip(?), zip(?), zip(?)"
+**
+** The pointer returned points to a buffer allocated by sqlite3_malloc(). It
+** is the responsibility of the caller to eventually free it.
+**
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and
+** a NULL pointer is returned). Otherwise, if an OOM error is encountered
+** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If
+** no error occurs, *pRc is left unmodified.
+*/
+static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){
+ char *zRet = 0;
+ char *zFree = 0;
+ char *zFunction;
+ int i;
+
+ if( !zFunc ){
+ zFunction = "";
+ }else{
+ zFree = zFunction = fts3QuoteId(zFunc);
+ }
+ fts3Appendf(pRc, &zRet, "?");
+ for(i=0; i<p->nColumn; i++){
+ fts3Appendf(pRc, &zRet, ",%s(?)", zFunction);
+ }
+ if( p->zLanguageid ){
+ fts3Appendf(pRc, &zRet, ", ?");
+ }
+ sqlite3_free(zFree);
+ return zRet;
+}
+
+/*
+** Buffer z contains a positive integer value encoded as utf-8 text.
+** Decode this value and store it in *pnOut, returning the number of bytes
+** consumed. If an overflow error occurs return a negative value.
+*/
+SQLITE_PRIVATE int sqlite3Fts3ReadInt(const char *z, int *pnOut){
+ u64 iVal = 0;
+ int i;
+ for(i=0; z[i]>='0' && z[i]<='9'; i++){
+ iVal = iVal*10 + (z[i] - '0');
+ if( iVal>0x7FFFFFFF ) return -1;
+ }
+ *pnOut = (int)iVal;
+ return i;
+}
+
+/*
+** This function interprets the string at (*pp) as a non-negative integer
+** value. It reads the integer and sets *pnOut to the value read, then
+** sets *pp to point to the byte immediately following the last byte of
+** the integer value.
+**
+** Only decimal digits ('0'..'9') may be part of an integer value.
+**
+** If *pp does not being with a decimal digit SQLITE_ERROR is returned and
+** the output value undefined. Otherwise SQLITE_OK is returned.
+**
+** This function is used when parsing the "prefix=" FTS4 parameter.
+*/
+static int fts3GobbleInt(const char **pp, int *pnOut){
+ const int MAX_NPREFIX = 10000000;
+ int nInt = 0; /* Output value */
+ int nByte;
+ nByte = sqlite3Fts3ReadInt(*pp, &nInt);
+ if( nInt>MAX_NPREFIX ){
+ nInt = 0;
+ }
+ if( nByte==0 ){
+ return SQLITE_ERROR;
+ }
+ *pnOut = nInt;
+ *pp += nByte;
+ return SQLITE_OK;
+}
+
+/*
+** This function is called to allocate an array of Fts3Index structures
+** representing the indexes maintained by the current FTS table. FTS tables
+** always maintain the main "terms" index, but may also maintain one or
+** more "prefix" indexes, depending on the value of the "prefix=" parameter
+** (if any) specified as part of the CREATE VIRTUAL TABLE statement.
+**
+** Argument zParam is passed the value of the "prefix=" option if one was
+** specified, or NULL otherwise.
+**
+** If no error occurs, SQLITE_OK is returned and *apIndex set to point to
+** the allocated array. *pnIndex is set to the number of elements in the
+** array. If an error does occur, an SQLite error code is returned.
+**
+** Regardless of whether or not an error is returned, it is the responsibility
+** of the caller to call sqlite3_free() on the output array to free it.
+*/
+static int fts3PrefixParameter(
+ const char *zParam, /* ABC in prefix=ABC parameter to parse */
+ int *pnIndex, /* OUT: size of *apIndex[] array */
+ struct Fts3Index **apIndex /* OUT: Array of indexes for this table */
+){
+ struct Fts3Index *aIndex; /* Allocated array */
+ int nIndex = 1; /* Number of entries in array */
+
+ if( zParam && zParam[0] ){
+ const char *p;
+ nIndex++;
+ for(p=zParam; *p; p++){
+ if( *p==',' ) nIndex++;
+ }
+ }
+
+ aIndex = sqlite3_malloc64(sizeof(struct Fts3Index) * nIndex);
+ *apIndex = aIndex;
+ if( !aIndex ){
+ return SQLITE_NOMEM;
+ }
+
+ memset(aIndex, 0, sizeof(struct Fts3Index) * nIndex);
+ if( zParam ){
+ const char *p = zParam;
+ int i;
+ for(i=1; i<nIndex; i++){
+ int nPrefix = 0;
+ if( fts3GobbleInt(&p, &nPrefix) ) return SQLITE_ERROR;
+ assert( nPrefix>=0 );
+ if( nPrefix==0 ){
+ nIndex--;
+ i--;
+ }else{
+ aIndex[i].nPrefix = nPrefix;
+ }
+ p++;
+ }
+ }
+
+ *pnIndex = nIndex;
+ return SQLITE_OK;
+}
+
+/*
+** This function is called when initializing an FTS4 table that uses the
+** content=xxx option. It determines the number of and names of the columns
+** of the new FTS4 table.
+**
+** The third argument passed to this function is the value passed to the
+** config=xxx option (i.e. "xxx"). This function queries the database for
+** a table of that name. If found, the output variables are populated
+** as follows:
+**
+** *pnCol: Set to the number of columns table xxx has,
+**
+** *pnStr: Set to the total amount of space required to store a copy
+** of each columns name, including the nul-terminator.
+**
+** *pazCol: Set to point to an array of *pnCol strings. Each string is
+** the name of the corresponding column in table xxx. The array
+** and its contents are allocated using a single allocation. It
+** is the responsibility of the caller to free this allocation
+** by eventually passing the *pazCol value to sqlite3_free().
+**
+** If the table cannot be found, an error code is returned and the output
+** variables are undefined. Or, if an OOM is encountered, SQLITE_NOMEM is
+** returned (and the output variables are undefined).
+*/
+static int fts3ContentColumns(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of db (i.e. "main", "temp" etc.) */
+ const char *zTbl, /* Name of content table */
+ const char ***pazCol, /* OUT: Malloc'd array of column names */
+ int *pnCol, /* OUT: Size of array *pazCol */
+ int *pnStr, /* OUT: Bytes of string content */
+ char **pzErr /* OUT: error message */
+){
+ int rc = SQLITE_OK; /* Return code */
+ char *zSql; /* "SELECT *" statement on zTbl */
+ sqlite3_stmt *pStmt = 0; /* Compiled version of zSql */
+
+ zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", zDb, zTbl);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ErrMsg(pzErr, "%s", sqlite3_errmsg(db));
+ }
+ }
+ sqlite3_free(zSql);
+
+ if( rc==SQLITE_OK ){
+ const char **azCol; /* Output array */
+ sqlite3_int64 nStr = 0; /* Size of all column names (incl. 0x00) */
+ int nCol; /* Number of table columns */
+ int i; /* Used to iterate through columns */
+
+ /* Loop through the returned columns. Set nStr to the number of bytes of
+ ** space required to store a copy of each column name, including the
+ ** nul-terminator byte. */
+ nCol = sqlite3_column_count(pStmt);
+ for(i=0; i<nCol; i++){
+ const char *zCol = sqlite3_column_name(pStmt, i);
+ nStr += strlen(zCol) + 1;
+ }
+
+ /* Allocate and populate the array to return. */
+ azCol = (const char **)sqlite3_malloc64(sizeof(char *) * nCol + nStr);
+ if( azCol==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ char *p = (char *)&azCol[nCol];
+ for(i=0; i<nCol; i++){
+ const char *zCol = sqlite3_column_name(pStmt, i);
+ int n = (int)strlen(zCol)+1;
+ memcpy(p, zCol, n);
+ azCol[i] = p;
+ p += n;
+ }
+ }
+ sqlite3_finalize(pStmt);
+
+ /* Set the output variables. */
+ *pnCol = nCol;
+ *pnStr = nStr;
+ *pazCol = azCol;
+ }
+
+ return rc;
+}
+
+/*
+** This function is the implementation of both the xConnect and xCreate
+** methods of the FTS3 virtual table.
+**
+** The argv[] array contains the following:
+**
+** argv[0] -> module name ("fts3" or "fts4")
+** argv[1] -> database name
+** argv[2] -> table name
+** argv[...] -> "column name" and other module argument fields.
+*/
+static int fts3InitVtab(
+ int isCreate, /* True for xCreate, false for xConnect */
+ sqlite3 *db, /* The SQLite database connection */
+ void *pAux, /* Hash table containing tokenizers */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */
+ char **pzErr /* Write any error message here */
+){
+ Fts3Hash *pHash = (Fts3Hash *)pAux;
+ Fts3Table *p = 0; /* Pointer to allocated vtab */
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* Iterator variable */
+ sqlite3_int64 nByte; /* Size of allocation used for *p */
+ int iCol; /* Column index */
+ int nString = 0; /* Bytes required to hold all column names */
+ int nCol = 0; /* Number of columns in the FTS table */
+ char *zCsr; /* Space for holding column names */
+ int nDb; /* Bytes required to hold database name */
+ int nName; /* Bytes required to hold table name */
+ int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */
+ const char **aCol; /* Array of column names */
+ sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */
+
+ int nIndex = 0; /* Size of aIndex[] array */
+ struct Fts3Index *aIndex = 0; /* Array of indexes for this table */
+
+ /* The results of parsing supported FTS4 key=value options: */
+ int bNoDocsize = 0; /* True to omit %_docsize table */
+ int bDescIdx = 0; /* True to store descending indexes */
+ char *zPrefix = 0; /* Prefix parameter value (or NULL) */
+ char *zCompress = 0; /* compress=? parameter (or NULL) */
+ char *zUncompress = 0; /* uncompress=? parameter (or NULL) */
+ char *zContent = 0; /* content=? parameter (or NULL) */
+ char *zLanguageid = 0; /* languageid=? parameter (or NULL) */
+ char **azNotindexed = 0; /* The set of notindexed= columns */
+ int nNotindexed = 0; /* Size of azNotindexed[] array */
+
+ assert( strlen(argv[0])==4 );
+ assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4)
+ || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4)
+ );
+
+ nDb = (int)strlen(argv[1]) + 1;
+ nName = (int)strlen(argv[2]) + 1;
+
+ nByte = sizeof(const char *) * (argc-2);
+ aCol = (const char **)sqlite3_malloc64(nByte);
+ if( aCol ){
+ memset((void*)aCol, 0, nByte);
+ azNotindexed = (char **)sqlite3_malloc64(nByte);
+ }
+ if( azNotindexed ){
+ memset(azNotindexed, 0, nByte);
+ }
+ if( !aCol || !azNotindexed ){
+ rc = SQLITE_NOMEM;
+ goto fts3_init_out;
+ }
+
+ /* Loop through all of the arguments passed by the user to the FTS3/4
+ ** module (i.e. all the column names and special arguments). This loop
+ ** does the following:
+ **
+ ** + Figures out the number of columns the FTSX table will have, and
+ ** the number of bytes of space that must be allocated to store copies
+ ** of the column names.
+ **
+ ** + If there is a tokenizer specification included in the arguments,
+ ** initializes the tokenizer pTokenizer.
+ */
+ for(i=3; rc==SQLITE_OK && i<argc; i++){
+ char const *z = argv[i];
+ int nKey;
+ char *zVal;
+
+ /* Check if this is a tokenizer specification */
+ if( !pTokenizer
+ && strlen(z)>8
+ && 0==sqlite3_strnicmp(z, "tokenize", 8)
+ && 0==sqlite3Fts3IsIdChar(z[8])
+ ){
+ rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr);
+ }
+
+ /* Check if it is an FTS4 special argument. */
+ else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){
+ struct Fts4Option {
+ const char *zOpt;
+ int nOpt;
+ } aFts4Opt[] = {
+ { "matchinfo", 9 }, /* 0 -> MATCHINFO */
+ { "prefix", 6 }, /* 1 -> PREFIX */
+ { "compress", 8 }, /* 2 -> COMPRESS */
+ { "uncompress", 10 }, /* 3 -> UNCOMPRESS */
+ { "order", 5 }, /* 4 -> ORDER */
+ { "content", 7 }, /* 5 -> CONTENT */
+ { "languageid", 10 }, /* 6 -> LANGUAGEID */
+ { "notindexed", 10 } /* 7 -> NOTINDEXED */
+ };
+
+ int iOpt;
+ if( !zVal ){
+ rc = SQLITE_NOMEM;
+ }else{
+ for(iOpt=0; iOpt<SizeofArray(aFts4Opt); iOpt++){
+ struct Fts4Option *pOp = &aFts4Opt[iOpt];
+ if( nKey==pOp->nOpt && !sqlite3_strnicmp(z, pOp->zOpt, pOp->nOpt) ){
+ break;
+ }
+ }
+ switch( iOpt ){
+ case 0: /* MATCHINFO */
+ if( strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "fts3", 4) ){
+ sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo: %s", zVal);
+ rc = SQLITE_ERROR;
+ }
+ bNoDocsize = 1;
+ break;
+
+ case 1: /* PREFIX */
+ sqlite3_free(zPrefix);
+ zPrefix = zVal;
+ zVal = 0;
+ break;
+
+ case 2: /* COMPRESS */
+ sqlite3_free(zCompress);
+ zCompress = zVal;
+ zVal = 0;
+ break;
+
+ case 3: /* UNCOMPRESS */
+ sqlite3_free(zUncompress);
+ zUncompress = zVal;
+ zVal = 0;
+ break;
+
+ case 4: /* ORDER */
+ if( (strlen(zVal)!=3 || sqlite3_strnicmp(zVal, "asc", 3))
+ && (strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "desc", 4))
+ ){
+ sqlite3Fts3ErrMsg(pzErr, "unrecognized order: %s", zVal);
+ rc = SQLITE_ERROR;
+ }
+ bDescIdx = (zVal[0]=='d' || zVal[0]=='D');
+ break;
+
+ case 5: /* CONTENT */
+ sqlite3_free(zContent);
+ zContent = zVal;
+ zVal = 0;
+ break;
+
+ case 6: /* LANGUAGEID */
+ assert( iOpt==6 );
+ sqlite3_free(zLanguageid);
+ zLanguageid = zVal;
+ zVal = 0;
+ break;
+
+ case 7: /* NOTINDEXED */
+ azNotindexed[nNotindexed++] = zVal;
+ zVal = 0;
+ break;
+
+ default:
+ assert( iOpt==SizeofArray(aFts4Opt) );
+ sqlite3Fts3ErrMsg(pzErr, "unrecognized parameter: %s", z);
+ rc = SQLITE_ERROR;
+ break;
+ }
+ sqlite3_free(zVal);
+ }
+ }
+
+ /* Otherwise, the argument is a column name. */
+ else {
+ nString += (int)(strlen(z) + 1);
+ aCol[nCol++] = z;
+ }
+ }
+
+ /* If a content=xxx option was specified, the following:
+ **
+ ** 1. Ignore any compress= and uncompress= options.
+ **
+ ** 2. If no column names were specified as part of the CREATE VIRTUAL
+ ** TABLE statement, use all columns from the content table.
+ */
+ if( rc==SQLITE_OK && zContent ){
+ sqlite3_free(zCompress);
+ sqlite3_free(zUncompress);
+ zCompress = 0;
+ zUncompress = 0;
+ if( nCol==0 ){
+ sqlite3_free((void*)aCol);
+ aCol = 0;
+ rc = fts3ContentColumns(db, argv[1], zContent,&aCol,&nCol,&nString,pzErr);
+
+ /* If a languageid= option was specified, remove the language id
+ ** column from the aCol[] array. */
+ if( rc==SQLITE_OK && zLanguageid ){
+ int j;
+ for(j=0; j<nCol; j++){
+ if( sqlite3_stricmp(zLanguageid, aCol[j])==0 ){
+ int k;
+ for(k=j; k<nCol; k++) aCol[k] = aCol[k+1];
+ nCol--;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if( rc!=SQLITE_OK ) goto fts3_init_out;
+
+ if( nCol==0 ){
+ assert( nString==0 );
+ aCol[0] = "content";
+ nString = 8;
+ nCol = 1;
+ }
+
+ if( pTokenizer==0 ){
+ rc = sqlite3Fts3InitTokenizer(pHash, "simple", &pTokenizer, pzErr);
+ if( rc!=SQLITE_OK ) goto fts3_init_out;
+ }
+ assert( pTokenizer );
+
+ rc = fts3PrefixParameter(zPrefix, &nIndex, &aIndex);
+ if( rc==SQLITE_ERROR ){
+ assert( zPrefix );
+ sqlite3Fts3ErrMsg(pzErr, "error parsing prefix parameter: %s", zPrefix);
+ }
+ if( rc!=SQLITE_OK ) goto fts3_init_out;
+
+ /* Allocate and populate the Fts3Table structure. */
+ nByte = sizeof(Fts3Table) + /* Fts3Table */
+ nCol * sizeof(char *) + /* azColumn */
+ nIndex * sizeof(struct Fts3Index) + /* aIndex */
+ nCol * sizeof(u8) + /* abNotindexed */
+ nName + /* zName */
+ nDb + /* zDb */
+ nString; /* Space for azColumn strings */
+ p = (Fts3Table*)sqlite3_malloc64(nByte);
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ goto fts3_init_out;
+ }
+ memset(p, 0, nByte);
+ p->db = db;
+ p->nColumn = nCol;
+ p->nPendingData = 0;
+ p->azColumn = (char **)&p[1];
+ p->pTokenizer = pTokenizer;
+ p->nMaxPendingData = FTS3_MAX_PENDING_DATA;
+ p->bHasDocsize = (isFts4 && bNoDocsize==0);
+ p->bHasStat = (u8)isFts4;
+ p->bFts4 = (u8)isFts4;
+ p->bDescIdx = (u8)bDescIdx;
+ p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */
+ p->zContentTbl = zContent;
+ p->zLanguageid = zLanguageid;
+ zContent = 0;
+ zLanguageid = 0;
+ TESTONLY( p->inTransaction = -1 );
+ TESTONLY( p->mxSavepoint = -1 );
+
+ p->aIndex = (struct Fts3Index *)&p->azColumn[nCol];
+ memcpy(p->aIndex, aIndex, sizeof(struct Fts3Index) * nIndex);
+ p->nIndex = nIndex;
+ for(i=0; i<nIndex; i++){
+ fts3HashInit(&p->aIndex[i].hPending, FTS3_HASH_STRING, 1);
+ }
+ p->abNotindexed = (u8 *)&p->aIndex[nIndex];
+
+ /* Fill in the zName and zDb fields of the vtab structure. */
+ zCsr = (char *)&p->abNotindexed[nCol];
+ p->zName = zCsr;
+ memcpy(zCsr, argv[2], nName);
+ zCsr += nName;
+ p->zDb = zCsr;
+ memcpy(zCsr, argv[1], nDb);
+ zCsr += nDb;
+
+ /* Fill in the azColumn array */
+ for(iCol=0; iCol<nCol; iCol++){
+ char *z;
+ int n = 0;
+ z = (char *)sqlite3Fts3NextToken(aCol[iCol], &n);
+ if( n>0 ){
+ memcpy(zCsr, z, n);
+ }
+ zCsr[n] = '\0';
+ sqlite3Fts3Dequote(zCsr);
+ p->azColumn[iCol] = zCsr;
+ zCsr += n+1;
+ assert( zCsr <= &((char *)p)[nByte] );
+ }
+
+ /* Fill in the abNotindexed array */
+ for(iCol=0; iCol<nCol; iCol++){
+ int n = (int)strlen(p->azColumn[iCol]);
+ for(i=0; i<nNotindexed; i++){
+ char *zNot = azNotindexed[i];
+ if( zNot && n==(int)strlen(zNot)
+ && 0==sqlite3_strnicmp(p->azColumn[iCol], zNot, n)
+ ){
+ p->abNotindexed[iCol] = 1;
+ sqlite3_free(zNot);
+ azNotindexed[i] = 0;
+ }
+ }
+ }
+ for(i=0; i<nNotindexed; i++){
+ if( azNotindexed[i] ){
+ sqlite3Fts3ErrMsg(pzErr, "no such column: %s", azNotindexed[i]);
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ if( rc==SQLITE_OK && (zCompress==0)!=(zUncompress==0) ){
+ char const *zMiss = (zCompress==0 ? "compress" : "uncompress");
+ rc = SQLITE_ERROR;
+ sqlite3Fts3ErrMsg(pzErr, "missing %s parameter in fts4 constructor", zMiss);
+ }
+ p->zReadExprlist = fts3ReadExprList(p, zUncompress, &rc);
+ p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc);
+ if( rc!=SQLITE_OK ) goto fts3_init_out;
+
+ /* If this is an xCreate call, create the underlying tables in the
+ ** database. TODO: For xConnect(), it could verify that said tables exist.
+ */
+ if( isCreate ){
+ rc = fts3CreateTables(p);
+ }
+
+ /* Check to see if a legacy fts3 table has been "upgraded" by the
+ ** addition of a %_stat table so that it can use incremental merge.
+ */
+ if( !isFts4 && !isCreate ){
+ p->bHasStat = 2;
+ }
+
+ /* Figure out the page-size for the database. This is required in order to
+ ** estimate the cost of loading large doclists from the database. */
+ fts3DatabasePageSize(&rc, p);
+ p->nNodeSize = p->nPgsz-35;
+
+#if defined(SQLITE_DEBUG)||defined(SQLITE_TEST)
+ p->nMergeCount = FTS3_MERGE_COUNT;
+#endif
+
+ /* Declare the table schema to SQLite. */
+ fts3DeclareVtab(&rc, p);
+
+fts3_init_out:
+ sqlite3_free(zPrefix);
+ sqlite3_free(aIndex);
+ sqlite3_free(zCompress);
+ sqlite3_free(zUncompress);
+ sqlite3_free(zContent);
+ sqlite3_free(zLanguageid);
+ for(i=0; i<nNotindexed; i++) sqlite3_free(azNotindexed[i]);
+ sqlite3_free((void *)aCol);
+ sqlite3_free((void *)azNotindexed);
+ if( rc!=SQLITE_OK ){
+ if( p ){
+ fts3DisconnectMethod((sqlite3_vtab *)p);
+ }else if( pTokenizer ){
+ pTokenizer->pModule->xDestroy(pTokenizer);
+ }
+ }else{
+ assert( p->pSegments==0 );
+ *ppVTab = &p->base;
+ }
+ return rc;
+}
+
+/*
+** The xConnect() and xCreate() methods for the virtual table. All the
+** work is done in function fts3InitVtab().
+*/
+static int fts3ConnectMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts3InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr);
+}
+static int fts3CreateMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr);
+}
+
+/*
+** Set the pIdxInfo->estimatedRows variable to nRow. Unless this
+** extension is currently being used by a version of SQLite too old to
+** support estimatedRows. In that case this function is a no-op.
+*/
+static void fts3SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){
+#if SQLITE_VERSION_NUMBER>=3008002
+ if( sqlite3_libversion_number()>=3008002 ){
+ pIdxInfo->estimatedRows = nRow;
+ }
+#endif
+}
+
+/*
+** Set the SQLITE_INDEX_SCAN_UNIQUE flag in pIdxInfo->flags. Unless this
+** extension is currently being used by a version of SQLite too old to
+** support index-info flags. In that case this function is a no-op.
+*/
+static void fts3SetUniqueFlag(sqlite3_index_info *pIdxInfo){
+#if SQLITE_VERSION_NUMBER>=3008012
+ if( sqlite3_libversion_number()>=3008012 ){
+ pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
+ }
+#endif
+}
+
+/*
+** Implementation of the xBestIndex method for FTS3 tables. There
+** are three possible strategies, in order of preference:
+**
+** 1. Direct lookup by rowid or docid.
+** 2. Full-text search using a MATCH operator on a non-docid column.
+** 3. Linear scan of %_content table.
+*/
+static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
+ Fts3Table *p = (Fts3Table *)pVTab;
+ int i; /* Iterator variable */
+ int iCons = -1; /* Index of constraint to use */
+
+ int iLangidCons = -1; /* Index of langid=x constraint, if present */
+ int iDocidGe = -1; /* Index of docid>=x constraint, if present */
+ int iDocidLe = -1; /* Index of docid<=x constraint, if present */
+ int iIdx;
+
+ if( p->bLock ){
+ return SQLITE_ERROR;
+ }
+
+ /* By default use a full table scan. This is an expensive option,
+ ** so search through the constraints to see if a more efficient
+ ** strategy is possible.
+ */
+ pInfo->idxNum = FTS3_FULLSCAN_SEARCH;
+ pInfo->estimatedCost = 5000000;
+ for(i=0; i<pInfo->nConstraint; i++){
+ int bDocid; /* True if this constraint is on docid */
+ struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i];
+ if( pCons->usable==0 ){
+ if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){
+ /* There exists an unusable MATCH constraint. This means that if
+ ** the planner does elect to use the results of this call as part
+ ** of the overall query plan the user will see an "unable to use
+ ** function MATCH in the requested context" error. To discourage
+ ** this, return a very high cost here. */
+ pInfo->idxNum = FTS3_FULLSCAN_SEARCH;
+ pInfo->estimatedCost = 1e50;
+ fts3SetEstimatedRows(pInfo, ((sqlite3_int64)1) << 50);
+ return SQLITE_OK;
+ }
+ continue;
+ }
+
+ bDocid = (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1);
+
+ /* A direct lookup on the rowid or docid column. Assign a cost of 1.0. */
+ if( iCons<0 && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ && bDocid ){
+ pInfo->idxNum = FTS3_DOCID_SEARCH;
+ pInfo->estimatedCost = 1.0;
+ iCons = i;
+ }
+
+ /* A MATCH constraint. Use a full-text search.
+ **
+ ** If there is more than one MATCH constraint available, use the first
+ ** one encountered. If there is both a MATCH constraint and a direct
+ ** rowid/docid lookup, prefer the MATCH strategy. This is done even
+ ** though the rowid/docid lookup is faster than a MATCH query, selecting
+ ** it would lead to an "unable to use function MATCH in the requested
+ ** context" error.
+ */
+ if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH
+ && pCons->iColumn>=0 && pCons->iColumn<=p->nColumn
+ ){
+ pInfo->idxNum = FTS3_FULLTEXT_SEARCH + pCons->iColumn;
+ pInfo->estimatedCost = 2.0;
+ iCons = i;
+ }
+
+ /* Equality constraint on the langid column */
+ if( pCons->op==SQLITE_INDEX_CONSTRAINT_EQ
+ && pCons->iColumn==p->nColumn + 2
+ ){
+ iLangidCons = i;
+ }
+
+ if( bDocid ){
+ switch( pCons->op ){
+ case SQLITE_INDEX_CONSTRAINT_GE:
+ case SQLITE_INDEX_CONSTRAINT_GT:
+ iDocidGe = i;
+ break;
+
+ case SQLITE_INDEX_CONSTRAINT_LE:
+ case SQLITE_INDEX_CONSTRAINT_LT:
+ iDocidLe = i;
+ break;
+ }
+ }
+ }
+
+ /* If using a docid=? or rowid=? strategy, set the UNIQUE flag. */
+ if( pInfo->idxNum==FTS3_DOCID_SEARCH ) fts3SetUniqueFlag(pInfo);
+
+ iIdx = 1;
+ if( iCons>=0 ){
+ pInfo->aConstraintUsage[iCons].argvIndex = iIdx++;
+ pInfo->aConstraintUsage[iCons].omit = 1;
+ }
+ if( iLangidCons>=0 ){
+ pInfo->idxNum |= FTS3_HAVE_LANGID;
+ pInfo->aConstraintUsage[iLangidCons].argvIndex = iIdx++;
+ }
+ if( iDocidGe>=0 ){
+ pInfo->idxNum |= FTS3_HAVE_DOCID_GE;
+ pInfo->aConstraintUsage[iDocidGe].argvIndex = iIdx++;
+ }
+ if( iDocidLe>=0 ){
+ pInfo->idxNum |= FTS3_HAVE_DOCID_LE;
+ pInfo->aConstraintUsage[iDocidLe].argvIndex = iIdx++;
+ }
+
+ /* Regardless of the strategy selected, FTS can deliver rows in rowid (or
+ ** docid) order. Both ascending and descending are possible.
+ */
+ if( pInfo->nOrderBy==1 ){
+ struct sqlite3_index_orderby *pOrder = &pInfo->aOrderBy[0];
+ if( pOrder->iColumn<0 || pOrder->iColumn==p->nColumn+1 ){
+ if( pOrder->desc ){
+ pInfo->idxStr = "DESC";
+ }else{
+ pInfo->idxStr = "ASC";
+ }
+ pInfo->orderByConsumed = 1;
+ }
+ }
+
+ assert( p->pSegments==0 );
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of xOpen method.
+*/
+static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
+ sqlite3_vtab_cursor *pCsr; /* Allocated cursor */
+
+ UNUSED_PARAMETER(pVTab);
+
+ /* Allocate a buffer large enough for an Fts3Cursor structure. If the
+ ** allocation succeeds, zero it and return SQLITE_OK. Otherwise,
+ ** if the allocation fails, return SQLITE_NOMEM.
+ */
+ *ppCsr = pCsr = (sqlite3_vtab_cursor *)sqlite3_malloc(sizeof(Fts3Cursor));
+ if( !pCsr ){
+ return SQLITE_NOMEM;
+ }
+ memset(pCsr, 0, sizeof(Fts3Cursor));
+ return SQLITE_OK;
+}
+
+/*
+** Finalize the statement handle at pCsr->pStmt.
+**
+** Or, if that statement handle is one created by fts3CursorSeekStmt(),
+** and the Fts3Table.pSeekStmt slot is currently NULL, save the statement
+** pointer there instead of finalizing it.
+*/
+static void fts3CursorFinalizeStmt(Fts3Cursor *pCsr){
+ if( pCsr->bSeekStmt ){
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
+ if( p->pSeekStmt==0 ){
+ p->pSeekStmt = pCsr->pStmt;
+ sqlite3_reset(pCsr->pStmt);
+ pCsr->pStmt = 0;
+ }
+ pCsr->bSeekStmt = 0;
+ }
+ sqlite3_finalize(pCsr->pStmt);
+}
+
+/*
+** Free all resources currently held by the cursor passed as the only
+** argument.
+*/
+static void fts3ClearCursor(Fts3Cursor *pCsr){
+ fts3CursorFinalizeStmt(pCsr);
+ sqlite3Fts3FreeDeferredTokens(pCsr);
+ sqlite3_free(pCsr->aDoclist);
+ sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
+ sqlite3Fts3ExprFree(pCsr->pExpr);
+ memset(&(&pCsr->base)[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor));
+}
+
+/*
+** Close the cursor. For additional information see the documentation
+** on the xClose method of the virtual table interface.
+*/
+static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor;
+ assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
+ fts3ClearCursor(pCsr);
+ assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** If pCsr->pStmt has not been prepared (i.e. if pCsr->pStmt==0), then
+** compose and prepare an SQL statement of the form:
+**
+** "SELECT <columns> FROM %_content WHERE rowid = ?"
+**
+** (or the equivalent for a content=xxx table) and set pCsr->pStmt to
+** it. If an error occurs, return an SQLite error code.
+*/
+static int fts3CursorSeekStmt(Fts3Cursor *pCsr){
+ int rc = SQLITE_OK;
+ if( pCsr->pStmt==0 ){
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
+ char *zSql;
+ if( p->pSeekStmt ){
+ pCsr->pStmt = p->pSeekStmt;
+ p->pSeekStmt = 0;
+ }else{
+ zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist);
+ if( !zSql ) return SQLITE_NOMEM;
+ p->bLock++;
+ rc = sqlite3_prepare_v3(
+ p->db, zSql,-1,SQLITE_PREPARE_PERSISTENT,&pCsr->pStmt,0
+ );
+ p->bLock--;
+ sqlite3_free(zSql);
+ }
+ if( rc==SQLITE_OK ) pCsr->bSeekStmt = 1;
+ }
+ return rc;
+}
+
+/*
+** Position the pCsr->pStmt statement so that it is on the row
+** of the %_content table that contains the last match. Return
+** SQLITE_OK on success.
+*/
+static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){
+ int rc = SQLITE_OK;
+ if( pCsr->isRequireSeek ){
+ rc = fts3CursorSeekStmt(pCsr);
+ if( rc==SQLITE_OK ){
+ Fts3Table *pTab = (Fts3Table*)pCsr->base.pVtab;
+ pTab->bLock++;
+ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId);
+ pCsr->isRequireSeek = 0;
+ if( SQLITE_ROW==sqlite3_step(pCsr->pStmt) ){
+ pTab->bLock--;
+ return SQLITE_OK;
+ }else{
+ pTab->bLock--;
+ rc = sqlite3_reset(pCsr->pStmt);
+ if( rc==SQLITE_OK && ((Fts3Table *)pCsr->base.pVtab)->zContentTbl==0 ){
+ /* If no row was found and no error has occurred, then the %_content
+ ** table is missing a row that is present in the full-text index.
+ ** The data structures are corrupt. */
+ rc = FTS_CORRUPT_VTAB;
+ pCsr->isEof = 1;
+ }
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK && pContext ){
+ sqlite3_result_error_code(pContext, rc);
+ }
+ return rc;
+}
+
+/*
+** This function is used to process a single interior node when searching
+** a b-tree for a term or term prefix. The node data is passed to this
+** function via the zNode/nNode parameters. The term to search for is
+** passed in zTerm/nTerm.
+**
+** If piFirst is not NULL, then this function sets *piFirst to the blockid
+** of the child node that heads the sub-tree that may contain the term.
+**
+** If piLast is not NULL, then *piLast is set to the right-most child node
+** that heads a sub-tree that may contain a term for which zTerm/nTerm is
+** a prefix.
+**
+** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK.
+*/
+static int fts3ScanInteriorNode(
+ const char *zTerm, /* Term to select leaves for */
+ int nTerm, /* Size of term zTerm in bytes */
+ const char *zNode, /* Buffer containing segment interior node */
+ int nNode, /* Size of buffer at zNode */
+ sqlite3_int64 *piFirst, /* OUT: Selected child node */
+ sqlite3_int64 *piLast /* OUT: Selected child node */
+){
+ int rc = SQLITE_OK; /* Return code */
+ const char *zCsr = zNode; /* Cursor to iterate through node */
+ const char *zEnd = &zCsr[nNode];/* End of interior node buffer */
+ char *zBuffer = 0; /* Buffer to load terms into */
+ i64 nAlloc = 0; /* Size of allocated buffer */
+ int isFirstTerm = 1; /* True when processing first term on page */
+ sqlite3_int64 iChild; /* Block id of child node to descend to */
+ int nBuffer = 0; /* Total term size */
+
+ /* Skip over the 'height' varint that occurs at the start of every
+ ** interior node. Then load the blockid of the left-child of the b-tree
+ ** node into variable iChild.
+ **
+ ** Even if the data structure on disk is corrupted, this (reading two
+ ** varints from the buffer) does not risk an overread. If zNode is a
+ ** root node, then the buffer comes from a SELECT statement. SQLite does
+ ** not make this guarantee explicitly, but in practice there are always
+ ** either more than 20 bytes of allocated space following the nNode bytes of
+ ** contents, or two zero bytes. Or, if the node is read from the %_segments
+ ** table, then there are always 20 bytes of zeroed padding following the
+ ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details).
+ */
+ zCsr += sqlite3Fts3GetVarint(zCsr, &iChild);
+ zCsr += sqlite3Fts3GetVarint(zCsr, &iChild);
+ if( zCsr>zEnd ){
+ return FTS_CORRUPT_VTAB;
+ }
+
+ while( zCsr<zEnd && (piFirst || piLast) ){
+ int cmp; /* memcmp() result */
+ int nSuffix; /* Size of term suffix */
+ int nPrefix = 0; /* Size of term prefix */
+
+ /* Load the next term on the node into zBuffer. Use realloc() to expand
+ ** the size of zBuffer if required. */
+ if( !isFirstTerm ){
+ zCsr += fts3GetVarint32(zCsr, &nPrefix);
+ if( nPrefix>nBuffer ){
+ rc = FTS_CORRUPT_VTAB;
+ goto finish_scan;
+ }
+ }
+ isFirstTerm = 0;
+ zCsr += fts3GetVarint32(zCsr, &nSuffix);
+
+ assert( nPrefix>=0 && nSuffix>=0 );
+ if( nPrefix>zCsr-zNode || nSuffix>zEnd-zCsr || nSuffix==0 ){
+ rc = FTS_CORRUPT_VTAB;
+ goto finish_scan;
+ }
+ if( (i64)nPrefix+nSuffix>nAlloc ){
+ char *zNew;
+ nAlloc = ((i64)nPrefix+nSuffix) * 2;
+ zNew = (char *)sqlite3_realloc64(zBuffer, nAlloc);
+ if( !zNew ){
+ rc = SQLITE_NOMEM;
+ goto finish_scan;
+ }
+ zBuffer = zNew;
+ }
+ assert( zBuffer );
+ memcpy(&zBuffer[nPrefix], zCsr, nSuffix);
+ nBuffer = nPrefix + nSuffix;
+ zCsr += nSuffix;
+
+ /* Compare the term we are searching for with the term just loaded from
+ ** the interior node. If the specified term is greater than or equal
+ ** to the term from the interior node, then all terms on the sub-tree
+ ** headed by node iChild are smaller than zTerm. No need to search
+ ** iChild.
+ **
+ ** If the interior node term is larger than the specified term, then
+ ** the tree headed by iChild may contain the specified term.
+ */
+ cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer));
+ if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){
+ *piFirst = iChild;
+ piFirst = 0;
+ }
+
+ if( piLast && cmp<0 ){
+ *piLast = iChild;
+ piLast = 0;
+ }
+
+ iChild++;
+ };
+
+ if( piFirst ) *piFirst = iChild;
+ if( piLast ) *piLast = iChild;
+
+ finish_scan:
+ sqlite3_free(zBuffer);
+ return rc;
+}
+
+
+/*
+** The buffer pointed to by argument zNode (size nNode bytes) contains an
+** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes)
+** contains a term. This function searches the sub-tree headed by the zNode
+** node for the range of leaf nodes that may contain the specified term
+** or terms for which the specified term is a prefix.
+**
+** If piLeaf is not NULL, then *piLeaf is set to the blockid of the
+** left-most leaf node in the tree that may contain the specified term.
+** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the
+** right-most leaf node that may contain a term for which the specified
+** term is a prefix.
+**
+** It is possible that the range of returned leaf nodes does not contain
+** the specified term or any terms for which it is a prefix. However, if the
+** segment does contain any such terms, they are stored within the identified
+** range. Because this function only inspects interior segment nodes (and
+** never loads leaf nodes into memory), it is not possible to be sure.
+**
+** If an error occurs, an error code other than SQLITE_OK is returned.
+*/
+static int fts3SelectLeaf(
+ Fts3Table *p, /* Virtual table handle */
+ const char *zTerm, /* Term to select leaves for */
+ int nTerm, /* Size of term zTerm in bytes */
+ const char *zNode, /* Buffer containing segment interior node */
+ int nNode, /* Size of buffer at zNode */
+ sqlite3_int64 *piLeaf, /* Selected leaf node */
+ sqlite3_int64 *piLeaf2 /* Selected leaf node */
+){
+ int rc = SQLITE_OK; /* Return code */
+ int iHeight; /* Height of this node in tree */
+
+ assert( piLeaf || piLeaf2 );
+
+ fts3GetVarint32(zNode, &iHeight);
+ rc = fts3ScanInteriorNode(zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2);
+ assert_fts3_nc( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) );
+
+ if( rc==SQLITE_OK && iHeight>1 ){
+ char *zBlob = 0; /* Blob read from %_segments table */
+ int nBlob = 0; /* Size of zBlob in bytes */
+
+ if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){
+ rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob, 0);
+ if( rc==SQLITE_OK ){
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0);
+ }
+ sqlite3_free(zBlob);
+ piLeaf = 0;
+ zBlob = 0;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3ReadBlock(p, piLeaf?*piLeaf:*piLeaf2, &zBlob, &nBlob, 0);
+ }
+ if( rc==SQLITE_OK ){
+ int iNewHeight = 0;
+ fts3GetVarint32(zBlob, &iNewHeight);
+ if( iNewHeight>=iHeight ){
+ rc = FTS_CORRUPT_VTAB;
+ }else{
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2);
+ }
+ }
+ sqlite3_free(zBlob);
+ }
+
+ return rc;
+}
+
+/*
+** This function is used to create delta-encoded serialized lists of FTS3
+** varints. Each call to this function appends a single varint to a list.
+*/
+static void fts3PutDeltaVarint(
+ char **pp, /* IN/OUT: Output pointer */
+ sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */
+ sqlite3_int64 iVal /* Write this value to the list */
+){
+ assert_fts3_nc( iVal-*piPrev > 0 || (*piPrev==0 && iVal==0) );
+ *pp += sqlite3Fts3PutVarint(*pp, iVal-*piPrev);
+ *piPrev = iVal;
+}
+
+/*
+** When this function is called, *ppPoslist is assumed to point to the
+** start of a position-list. After it returns, *ppPoslist points to the
+** first byte after the position-list.
+**
+** A position list is list of positions (delta encoded) and columns for
+** a single document record of a doclist. So, in other words, this
+** routine advances *ppPoslist so that it points to the next docid in
+** the doclist, or to the first byte past the end of the doclist.
+**
+** If pp is not NULL, then the contents of the position list are copied
+** to *pp. *pp is set to point to the first byte past the last byte copied
+** before this function returns.
+*/
+static void fts3PoslistCopy(char **pp, char **ppPoslist){
+ char *pEnd = *ppPoslist;
+ char c = 0;
+
+ /* The end of a position list is marked by a zero encoded as an FTS3
+ ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by
+ ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail
+ ** of some other, multi-byte, value.
+ **
+ ** The following while-loop moves pEnd to point to the first byte that is not
+ ** immediately preceded by a byte with the 0x80 bit set. Then increments
+ ** pEnd once more so that it points to the byte immediately following the
+ ** last byte in the position-list.
+ */
+ while( *pEnd | c ){
+ c = *pEnd++ & 0x80;
+ testcase( c!=0 && (*pEnd)==0 );
+ }
+ pEnd++; /* Advance past the POS_END terminator byte */
+
+ if( pp ){
+ int n = (int)(pEnd - *ppPoslist);
+ char *p = *pp;
+ memcpy(p, *ppPoslist, n);
+ p += n;
+ *pp = p;
+ }
+ *ppPoslist = pEnd;
+}
+
+/*
+** When this function is called, *ppPoslist is assumed to point to the
+** start of a column-list. After it returns, *ppPoslist points to the
+** to the terminator (POS_COLUMN or POS_END) byte of the column-list.
+**
+** A column-list is list of delta-encoded positions for a single column
+** within a single document within a doclist.
+**
+** The column-list is terminated either by a POS_COLUMN varint (1) or
+** a POS_END varint (0). This routine leaves *ppPoslist pointing to
+** the POS_COLUMN or POS_END that terminates the column-list.
+**
+** If pp is not NULL, then the contents of the column-list are copied
+** to *pp. *pp is set to point to the first byte past the last byte copied
+** before this function returns. The POS_COLUMN or POS_END terminator
+** is not copied into *pp.
+*/
+static void fts3ColumnlistCopy(char **pp, char **ppPoslist){
+ char *pEnd = *ppPoslist;
+ char c = 0;
+
+ /* A column-list is terminated by either a 0x01 or 0x00 byte that is
+ ** not part of a multi-byte varint.
+ */
+ while( 0xFE & (*pEnd | c) ){
+ c = *pEnd++ & 0x80;
+ testcase( c!=0 && ((*pEnd)&0xfe)==0 );
+ }
+ if( pp ){
+ int n = (int)(pEnd - *ppPoslist);
+ char *p = *pp;
+ memcpy(p, *ppPoslist, n);
+ p += n;
+ *pp = p;
+ }
+ *ppPoslist = pEnd;
+}
+
+/*
+** Value used to signify the end of an position-list. This must be
+** as large or larger than any value that might appear on the
+** position-list, even a position list that has been corrupted.
+*/
+#define POSITION_LIST_END LARGEST_INT64
+
+/*
+** This function is used to help parse position-lists. When this function is
+** called, *pp may point to the start of the next varint in the position-list
+** being parsed, or it may point to 1 byte past the end of the position-list
+** (in which case **pp will be a terminator bytes POS_END (0) or
+** (1)).
+**
+** If *pp points past the end of the current position-list, set *pi to
+** POSITION_LIST_END and return. Otherwise, read the next varint from *pp,
+** increment the current value of *pi by the value read, and set *pp to
+** point to the next value before returning.
+**
+** Before calling this routine *pi must be initialized to the value of
+** the previous position, or zero if we are reading the first position
+** in the position-list. Because positions are delta-encoded, the value
+** of the previous position is needed in order to compute the value of
+** the next position.
+*/
+static void fts3ReadNextPos(
+ char **pp, /* IN/OUT: Pointer into position-list buffer */
+ sqlite3_int64 *pi /* IN/OUT: Value read from position-list */
+){
+ if( (**pp)&0xFE ){
+ int iVal;
+ *pp += fts3GetVarint32((*pp), &iVal);
+ *pi += iVal;
+ *pi -= 2;
+ }else{
+ *pi = POSITION_LIST_END;
+ }
+}
+
+/*
+** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by
+** the value of iCol encoded as a varint to *pp. This will start a new
+** column list.
+**
+** Set *pp to point to the byte just after the last byte written before
+** returning (do not modify it if iCol==0). Return the total number of bytes
+** written (0 if iCol==0).
+*/
+static int fts3PutColNumber(char **pp, int iCol){
+ int n = 0; /* Number of bytes written */
+ if( iCol ){
+ char *p = *pp; /* Output pointer */
+ n = 1 + sqlite3Fts3PutVarint(&p[1], iCol);
+ *p = 0x01;
+ *pp = &p[n];
+ }
+ return n;
+}
+
+/*
+** Compute the union of two position lists. The output written
+** into *pp contains all positions of both *pp1 and *pp2 in sorted
+** order and with any duplicates removed. All pointers are
+** updated appropriately. The caller is responsible for insuring
+** that there is enough space in *pp to hold the complete output.
+*/
+static int fts3PoslistMerge(
+ char **pp, /* Output buffer */
+ char **pp1, /* Left input list */
+ char **pp2 /* Right input list */
+){
+ char *p = *pp;
+ char *p1 = *pp1;
+ char *p2 = *pp2;
+
+ while( *p1 || *p2 ){
+ int iCol1; /* The current column index in pp1 */
+ int iCol2; /* The current column index in pp2 */
+
+ if( *p1==POS_COLUMN ){
+ fts3GetVarint32(&p1[1], &iCol1);
+ if( iCol1==0 ) return FTS_CORRUPT_VTAB;
+ }
+ else if( *p1==POS_END ) iCol1 = 0x7fffffff;
+ else iCol1 = 0;
+
+ if( *p2==POS_COLUMN ){
+ fts3GetVarint32(&p2[1], &iCol2);
+ if( iCol2==0 ) return FTS_CORRUPT_VTAB;
+ }
+ else if( *p2==POS_END ) iCol2 = 0x7fffffff;
+ else iCol2 = 0;
+
+ if( iCol1==iCol2 ){
+ sqlite3_int64 i1 = 0; /* Last position from pp1 */
+ sqlite3_int64 i2 = 0; /* Last position from pp2 */
+ sqlite3_int64 iPrev = 0;
+ int n = fts3PutColNumber(&p, iCol1);
+ p1 += n;
+ p2 += n;
+
+ /* At this point, both p1 and p2 point to the start of column-lists
+ ** for the same column (the column with index iCol1 and iCol2).
+ ** A column-list is a list of non-negative delta-encoded varints, each
+ ** incremented by 2 before being stored. Each list is terminated by a
+ ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists
+ ** and writes the results to buffer p. p is left pointing to the byte
+ ** after the list written. No terminator (POS_END or POS_COLUMN) is
+ ** written to the output.
+ */
+ fts3GetDeltaVarint(&p1, &i1);
+ fts3GetDeltaVarint(&p2, &i2);
+ if( i1<2 || i2<2 ){
+ break;
+ }
+ do {
+ fts3PutDeltaVarint(&p, &iPrev, (i1<i2) ? i1 : i2);
+ iPrev -= 2;
+ if( i1==i2 ){
+ fts3ReadNextPos(&p1, &i1);
+ fts3ReadNextPos(&p2, &i2);
+ }else if( i1<i2 ){
+ fts3ReadNextPos(&p1, &i1);
+ }else{
+ fts3ReadNextPos(&p2, &i2);
+ }
+ }while( i1!=POSITION_LIST_END || i2!=POSITION_LIST_END );
+ }else if( iCol1<iCol2 ){
+ p1 += fts3PutColNumber(&p, iCol1);
+ fts3ColumnlistCopy(&p, &p1);
+ }else{
+ p2 += fts3PutColNumber(&p, iCol2);
+ fts3ColumnlistCopy(&p, &p2);
+ }
+ }
+
+ *p++ = POS_END;
+ *pp = p;
+ *pp1 = p1 + 1;
+ *pp2 = p2 + 1;
+ return SQLITE_OK;
+}
+
+/*
+** This function is used to merge two position lists into one. When it is
+** called, *pp1 and *pp2 must both point to position lists. A position-list is
+** the part of a doclist that follows each document id. For example, if a row
+** contains:
+**
+** 'a b c'|'x y z'|'a b b a'
+**
+** Then the position list for this row for token 'b' would consist of:
+**
+** 0x02 0x01 0x02 0x03 0x03 0x00
+**
+** When this function returns, both *pp1 and *pp2 are left pointing to the
+** byte following the 0x00 terminator of their respective position lists.
+**
+** If isSaveLeft is 0, an entry is added to the output position list for
+** each position in *pp2 for which there exists one or more positions in
+** *pp1 so that (pos(*pp2)>pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e.
+** when the *pp1 token appears before the *pp2 token, but not more than nToken
+** slots before it.
+**
+** e.g. nToken==1 searches for adjacent positions.
+*/
+static int fts3PoslistPhraseMerge(
+ char **pp, /* IN/OUT: Preallocated output buffer */
+ int nToken, /* Maximum difference in token positions */
+ int isSaveLeft, /* Save the left position */
+ int isExact, /* If *pp1 is exactly nTokens before *pp2 */
+ char **pp1, /* IN/OUT: Left input list */
+ char **pp2 /* IN/OUT: Right input list */
+){
+ char *p = *pp;
+ char *p1 = *pp1;
+ char *p2 = *pp2;
+ int iCol1 = 0;
+ int iCol2 = 0;
+
+ /* Never set both isSaveLeft and isExact for the same invocation. */
+ assert( isSaveLeft==0 || isExact==0 );
+
+ assert_fts3_nc( p!=0 && *p1!=0 && *p2!=0 );
+ if( *p1==POS_COLUMN ){
+ p1++;
+ p1 += fts3GetVarint32(p1, &iCol1);
+ }
+ if( *p2==POS_COLUMN ){
+ p2++;
+ p2 += fts3GetVarint32(p2, &iCol2);
+ }
+
+ while( 1 ){
+ if( iCol1==iCol2 ){
+ char *pSave = p;
+ sqlite3_int64 iPrev = 0;
+ sqlite3_int64 iPos1 = 0;
+ sqlite3_int64 iPos2 = 0;
+
+ if( iCol1 ){
+ *p++ = POS_COLUMN;
+ p += sqlite3Fts3PutVarint(p, iCol1);
+ }
+
+ fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2;
+ fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2;
+ if( iPos1<0 || iPos2<0 ) break;
+
+ while( 1 ){
+ if( iPos2==iPos1+nToken
+ || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken)
+ ){
+ sqlite3_int64 iSave;
+ iSave = isSaveLeft ? iPos1 : iPos2;
+ fts3PutDeltaVarint(&p, &iPrev, iSave+2); iPrev -= 2;
+ pSave = 0;
+ assert( p );
+ }
+ if( (!isSaveLeft && iPos2<=(iPos1+nToken)) || iPos2<=iPos1 ){
+ if( (*p2&0xFE)==0 ) break;
+ fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2;
+ }else{
+ if( (*p1&0xFE)==0 ) break;
+ fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2;
+ }
+ }
+
+ if( pSave ){
+ assert( pp && p );
+ p = pSave;
+ }
+
+ fts3ColumnlistCopy(0, &p1);
+ fts3ColumnlistCopy(0, &p2);
+ assert( (*p1&0xFE)==0 && (*p2&0xFE)==0 );
+ if( 0==*p1 || 0==*p2 ) break;
+
+ p1++;
+ p1 += fts3GetVarint32(p1, &iCol1);
+ p2++;
+ p2 += fts3GetVarint32(p2, &iCol2);
+ }
+
+ /* Advance pointer p1 or p2 (whichever corresponds to the smaller of
+ ** iCol1 and iCol2) so that it points to either the 0x00 that marks the
+ ** end of the position list, or the 0x01 that precedes the next
+ ** column-number in the position list.
+ */
+ else if( iCol1<iCol2 ){
+ fts3ColumnlistCopy(0, &p1);
+ if( 0==*p1 ) break;
+ p1++;
+ p1 += fts3GetVarint32(p1, &iCol1);
+ }else{
+ fts3ColumnlistCopy(0, &p2);
+ if( 0==*p2 ) break;
+ p2++;
+ p2 += fts3GetVarint32(p2, &iCol2);
+ }
+ }
+
+ fts3PoslistCopy(0, &p2);
+ fts3PoslistCopy(0, &p1);
+ *pp1 = p1;
+ *pp2 = p2;
+ if( *pp==p ){
+ return 0;
+ }
+ *p++ = 0x00;
+ *pp = p;
+ return 1;
+}
+
+/*
+** Merge two position-lists as required by the NEAR operator. The argument
+** position lists correspond to the left and right phrases of an expression
+** like:
+**
+** "phrase 1" NEAR "phrase number 2"
+**
+** Position list *pp1 corresponds to the left-hand side of the NEAR
+** expression and *pp2 to the right. As usual, the indexes in the position
+** lists are the offsets of the last token in each phrase (tokens "1" and "2"
+** in the example above).
+**
+** The output position list - written to *pp - is a copy of *pp2 with those
+** entries that are not sufficiently NEAR entries in *pp1 removed.
+*/
+static int fts3PoslistNearMerge(
+ char **pp, /* Output buffer */
+ char *aTmp, /* Temporary buffer space */
+ int nRight, /* Maximum difference in token positions */
+ int nLeft, /* Maximum difference in token positions */
+ char **pp1, /* IN/OUT: Left input list */
+ char **pp2 /* IN/OUT: Right input list */
+){
+ char *p1 = *pp1;
+ char *p2 = *pp2;
+
+ char *pTmp1 = aTmp;
+ char *pTmp2;
+ char *aTmp2;
+ int res = 1;
+
+ fts3PoslistPhraseMerge(&pTmp1, nRight, 0, 0, pp1, pp2);
+ aTmp2 = pTmp2 = pTmp1;
+ *pp1 = p1;
+ *pp2 = p2;
+ fts3PoslistPhraseMerge(&pTmp2, nLeft, 1, 0, pp2, pp1);
+ if( pTmp1!=aTmp && pTmp2!=aTmp2 ){
+ fts3PoslistMerge(pp, &aTmp, &aTmp2);
+ }else if( pTmp1!=aTmp ){
+ fts3PoslistCopy(pp, &aTmp);
+ }else if( pTmp2!=aTmp2 ){
+ fts3PoslistCopy(pp, &aTmp2);
+ }else{
+ res = 0;
+ }
+
+ return res;
+}
+
+/*
+** An instance of this function is used to merge together the (potentially
+** large number of) doclists for each term that matches a prefix query.
+** See function fts3TermSelectMerge() for details.
+*/
+typedef struct TermSelect TermSelect;
+struct TermSelect {
+ char *aaOutput[16]; /* Malloc'd output buffers */
+ int anOutput[16]; /* Size each output buffer in bytes */
+};
+
+/*
+** This function is used to read a single varint from a buffer. Parameter
+** pEnd points 1 byte past the end of the buffer. When this function is
+** called, if *pp points to pEnd or greater, then the end of the buffer
+** has been reached. In this case *pp is set to 0 and the function returns.
+**
+** If *pp does not point to or past pEnd, then a single varint is read
+** from *pp. *pp is then set to point 1 byte past the end of the read varint.
+**
+** If bDescIdx is false, the value read is added to *pVal before returning.
+** If it is true, the value read is subtracted from *pVal before this
+** function returns.
+*/
+static void fts3GetDeltaVarint3(
+ char **pp, /* IN/OUT: Point to read varint from */
+ char *pEnd, /* End of buffer */
+ int bDescIdx, /* True if docids are descending */
+ sqlite3_int64 *pVal /* IN/OUT: Integer value */
+){
+ if( *pp>=pEnd ){
+ *pp = 0;
+ }else{
+ u64 iVal;
+ *pp += sqlite3Fts3GetVarintU(*pp, &iVal);
+ if( bDescIdx ){
+ *pVal = (i64)((u64)*pVal - iVal);
+ }else{
+ *pVal = (i64)((u64)*pVal + iVal);
+ }
+ }
+}
+
+/*
+** This function is used to write a single varint to a buffer. The varint
+** is written to *pp. Before returning, *pp is set to point 1 byte past the
+** end of the value written.
+**
+** If *pbFirst is zero when this function is called, the value written to
+** the buffer is that of parameter iVal.
+**
+** If *pbFirst is non-zero when this function is called, then the value
+** written is either (iVal-*piPrev) (if bDescIdx is zero) or (*piPrev-iVal)
+** (if bDescIdx is non-zero).
+**
+** Before returning, this function always sets *pbFirst to 1 and *piPrev
+** to the value of parameter iVal.
+*/
+static void fts3PutDeltaVarint3(
+ char **pp, /* IN/OUT: Output pointer */
+ int bDescIdx, /* True for descending docids */
+ sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */
+ int *pbFirst, /* IN/OUT: True after first int written */
+ sqlite3_int64 iVal /* Write this value to the list */
+){
+ sqlite3_uint64 iWrite;
+ if( bDescIdx==0 || *pbFirst==0 ){
+ assert_fts3_nc( *pbFirst==0 || iVal>=*piPrev );
+ iWrite = (u64)iVal - (u64)*piPrev;
+ }else{
+ assert_fts3_nc( *piPrev>=iVal );
+ iWrite = (u64)*piPrev - (u64)iVal;
+ }
+ assert( *pbFirst || *piPrev==0 );
+ assert_fts3_nc( *pbFirst==0 || iWrite>0 );
+ *pp += sqlite3Fts3PutVarint(*pp, iWrite);
+ *piPrev = iVal;
+ *pbFirst = 1;
+}
+
+
+/*
+** This macro is used by various functions that merge doclists. The two
+** arguments are 64-bit docid values. If the value of the stack variable
+** bDescDoclist is 0 when this macro is invoked, then it returns (i1-i2).
+** Otherwise, (i2-i1).
+**
+** Using this makes it easier to write code that can merge doclists that are
+** sorted in either ascending or descending order.
+*/
+/* #define DOCID_CMP(i1, i2) ((bDescDoclist?-1:1) * (i64)((u64)i1-i2)) */
+#define DOCID_CMP(i1, i2) ((bDescDoclist?-1:1) * (i1>i2?1:((i1==i2)?0:-1)))
+
+/*
+** This function does an "OR" merge of two doclists (output contains all
+** positions contained in either argument doclist). If the docids in the
+** input doclists are sorted in ascending order, parameter bDescDoclist
+** should be false. If they are sorted in ascending order, it should be
+** passed a non-zero value.
+**
+** If no error occurs, *paOut is set to point at an sqlite3_malloc'd buffer
+** containing the output doclist and SQLITE_OK is returned. In this case
+** *pnOut is set to the number of bytes in the output doclist.
+**
+** If an error occurs, an SQLite error code is returned. The output values
+** are undefined in this case.
+*/
+static int fts3DoclistOrMerge(
+ int bDescDoclist, /* True if arguments are desc */
+ char *a1, int n1, /* First doclist */
+ char *a2, int n2, /* Second doclist */
+ char **paOut, int *pnOut /* OUT: Malloc'd doclist */
+){
+ int rc = SQLITE_OK;
+ sqlite3_int64 i1 = 0;
+ sqlite3_int64 i2 = 0;
+ sqlite3_int64 iPrev = 0;
+ char *pEnd1 = &a1[n1];
+ char *pEnd2 = &a2[n2];
+ char *p1 = a1;
+ char *p2 = a2;
+ char *p;
+ char *aOut;
+ int bFirstOut = 0;
+
+ *paOut = 0;
+ *pnOut = 0;
+
+ /* Allocate space for the output. Both the input and output doclists
+ ** are delta encoded. If they are in ascending order (bDescDoclist==0),
+ ** then the first docid in each list is simply encoded as a varint. For
+ ** each subsequent docid, the varint stored is the difference between the
+ ** current and previous docid (a positive number - since the list is in
+ ** ascending order).
+ **
+ ** The first docid written to the output is therefore encoded using the
+ ** same number of bytes as it is in whichever of the input lists it is
+ ** read from. And each subsequent docid read from the same input list
+ ** consumes either the same or less bytes as it did in the input (since
+ ** the difference between it and the previous value in the output must
+ ** be a positive value less than or equal to the delta value read from
+ ** the input list). The same argument applies to all but the first docid
+ ** read from the 'other' list. And to the contents of all position lists
+ ** that will be copied and merged from the input to the output.
+ **
+ ** However, if the first docid copied to the output is a negative number,
+ ** then the encoding of the first docid from the 'other' input list may
+ ** be larger in the output than it was in the input (since the delta value
+ ** may be a larger positive integer than the actual docid).
+ **
+ ** The space required to store the output is therefore the sum of the
+ ** sizes of the two inputs, plus enough space for exactly one of the input
+ ** docids to grow.
+ **
+ ** A symetric argument may be made if the doclists are in descending
+ ** order.
+ */
+ aOut = sqlite3_malloc64((i64)n1+n2+FTS3_VARINT_MAX-1+FTS3_BUFFER_PADDING);
+ if( !aOut ) return SQLITE_NOMEM;
+
+ p = aOut;
+ fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1);
+ fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2);
+ while( p1 || p2 ){
+ sqlite3_int64 iDiff = DOCID_CMP(i1, i2);
+
+ if( p2 && p1 && iDiff==0 ){
+ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1);
+ rc = fts3PoslistMerge(&p, &p1, &p2);
+ if( rc ) break;
+ fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1);
+ fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2);
+ }else if( !p2 || (p1 && iDiff<0) ){
+ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1);
+ fts3PoslistCopy(&p, &p1);
+ fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1);
+ }else{
+ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2);
+ fts3PoslistCopy(&p, &p2);
+ fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2);
+ }
+
+ assert( (p-aOut)<=((p1?(p1-a1):n1)+(p2?(p2-a2):n2)+FTS3_VARINT_MAX-1) );
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(aOut);
+ p = aOut = 0;
+ }else{
+ assert( (p-aOut)<=n1+n2+FTS3_VARINT_MAX-1 );
+ memset(&aOut[(p-aOut)], 0, FTS3_BUFFER_PADDING);
+ }
+ *paOut = aOut;
+ *pnOut = (int)(p-aOut);
+ return rc;
+}
+
+/*
+** This function does a "phrase" merge of two doclists. In a phrase merge,
+** the output contains a copy of each position from the right-hand input
+** doclist for which there is a position in the left-hand input doclist
+** exactly nDist tokens before it.
+**
+** If the docids in the input doclists are sorted in ascending order,
+** parameter bDescDoclist should be false. If they are sorted in ascending
+** order, it should be passed a non-zero value.
+**
+** The right-hand input doclist is overwritten by this function.
+*/
+static int fts3DoclistPhraseMerge(
+ int bDescDoclist, /* True if arguments are desc */
+ int nDist, /* Distance from left to right (1=adjacent) */
+ char *aLeft, int nLeft, /* Left doclist */
+ char **paRight, int *pnRight /* IN/OUT: Right/output doclist */
+){
+ sqlite3_int64 i1 = 0;
+ sqlite3_int64 i2 = 0;
+ sqlite3_int64 iPrev = 0;
+ char *aRight = *paRight;
+ char *pEnd1 = &aLeft[nLeft];
+ char *pEnd2 = &aRight[*pnRight];
+ char *p1 = aLeft;
+ char *p2 = aRight;
+ char *p;
+ int bFirstOut = 0;
+ char *aOut;
+
+ assert( nDist>0 );
+ if( bDescDoclist ){
+ aOut = sqlite3_malloc64((sqlite3_int64)*pnRight + FTS3_VARINT_MAX);
+ if( aOut==0 ) return SQLITE_NOMEM;
+ }else{
+ aOut = aRight;
+ }
+ p = aOut;
+
+ fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1);
+ fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2);
+
+ while( p1 && p2 ){
+ sqlite3_int64 iDiff = DOCID_CMP(i1, i2);
+ if( iDiff==0 ){
+ char *pSave = p;
+ sqlite3_int64 iPrevSave = iPrev;
+ int bFirstOutSave = bFirstOut;
+
+ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1);
+ if( 0==fts3PoslistPhraseMerge(&p, nDist, 0, 1, &p1, &p2) ){
+ p = pSave;
+ iPrev = iPrevSave;
+ bFirstOut = bFirstOutSave;
+ }
+ fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1);
+ fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2);
+ }else if( iDiff<0 ){
+ fts3PoslistCopy(0, &p1);
+ fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1);
+ }else{
+ fts3PoslistCopy(0, &p2);
+ fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2);
+ }
+ }
+
+ *pnRight = (int)(p - aOut);
+ if( bDescDoclist ){
+ sqlite3_free(aRight);
+ *paRight = aOut;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Argument pList points to a position list nList bytes in size. This
+** function checks to see if the position list contains any entries for
+** a token in position 0 (of any column). If so, it writes argument iDelta
+** to the output buffer pOut, followed by a position list consisting only
+** of the entries from pList at position 0, and terminated by an 0x00 byte.
+** The value returned is the number of bytes written to pOut (if any).
+*/
+SQLITE_PRIVATE int sqlite3Fts3FirstFilter(
+ sqlite3_int64 iDelta, /* Varint that may be written to pOut */
+ char *pList, /* Position list (no 0x00 term) */
+ int nList, /* Size of pList in bytes */
+ char *pOut /* Write output here */
+){
+ int nOut = 0;
+ int bWritten = 0; /* True once iDelta has been written */
+ char *p = pList;
+ char *pEnd = &pList[nList];
+
+ if( *p!=0x01 ){
+ if( *p==0x02 ){
+ nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta);
+ pOut[nOut++] = 0x02;
+ bWritten = 1;
+ }
+ fts3ColumnlistCopy(0, &p);
+ }
+
+ while( p<pEnd ){
+ sqlite3_int64 iCol;
+ p++;
+ p += sqlite3Fts3GetVarint(p, &iCol);
+ if( *p==0x02 ){
+ if( bWritten==0 ){
+ nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta);
+ bWritten = 1;
+ }
+ pOut[nOut++] = 0x01;
+ nOut += sqlite3Fts3PutVarint(&pOut[nOut], iCol);
+ pOut[nOut++] = 0x02;
+ }
+ fts3ColumnlistCopy(0, &p);
+ }
+ if( bWritten ){
+ pOut[nOut++] = 0x00;
+ }
+
+ return nOut;
+}
+
+
+/*
+** Merge all doclists in the TermSelect.aaOutput[] array into a single
+** doclist stored in TermSelect.aaOutput[0]. If successful, delete all
+** other doclists (except the aaOutput[0] one) and return SQLITE_OK.
+**
+** If an OOM error occurs, return SQLITE_NOMEM. In this case it is
+** the responsibility of the caller to free any doclists left in the
+** TermSelect.aaOutput[] array.
+*/
+static int fts3TermSelectFinishMerge(Fts3Table *p, TermSelect *pTS){
+ char *aOut = 0;
+ int nOut = 0;
+ int i;
+
+ /* Loop through the doclists in the aaOutput[] array. Merge them all
+ ** into a single doclist.
+ */
+ for(i=0; i<SizeofArray(pTS->aaOutput); i++){
+ if( pTS->aaOutput[i] ){
+ if( !aOut ){
+ aOut = pTS->aaOutput[i];
+ nOut = pTS->anOutput[i];
+ pTS->aaOutput[i] = 0;
+ }else{
+ int nNew;
+ char *aNew;
+
+ int rc = fts3DoclistOrMerge(p->bDescIdx,
+ pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, &aNew, &nNew
+ );
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(aOut);
+ return rc;
+ }
+
+ sqlite3_free(pTS->aaOutput[i]);
+ sqlite3_free(aOut);
+ pTS->aaOutput[i] = 0;
+ aOut = aNew;
+ nOut = nNew;
+ }
+ }
+ }
+
+ pTS->aaOutput[0] = aOut;
+ pTS->anOutput[0] = nOut;
+ return SQLITE_OK;
+}
+
+/*
+** Merge the doclist aDoclist/nDoclist into the TermSelect object passed
+** as the first argument. The merge is an "OR" merge (see function
+** fts3DoclistOrMerge() for details).
+**
+** This function is called with the doclist for each term that matches
+** a queried prefix. It merges all these doclists into one, the doclist
+** for the specified prefix. Since there can be a very large number of
+** doclists to merge, the merging is done pair-wise using the TermSelect
+** object.
+**
+** This function returns SQLITE_OK if the merge is successful, or an
+** SQLite error code (SQLITE_NOMEM) if an error occurs.
+*/
+static int fts3TermSelectMerge(
+ Fts3Table *p, /* FTS table handle */
+ TermSelect *pTS, /* TermSelect object to merge into */
+ char *aDoclist, /* Pointer to doclist */
+ int nDoclist /* Size of aDoclist in bytes */
+){
+ if( pTS->aaOutput[0]==0 ){
+ /* If this is the first term selected, copy the doclist to the output
+ ** buffer using memcpy().
+ **
+ ** Add FTS3_VARINT_MAX bytes of unused space to the end of the
+ ** allocation. This is so as to ensure that the buffer is big enough
+ ** to hold the current doclist AND'd with any other doclist. If the
+ ** doclists are stored in order=ASC order, this padding would not be
+ ** required (since the size of [doclistA AND doclistB] is always less
+ ** than or equal to the size of [doclistA] in that case). But this is
+ ** not true for order=DESC. For example, a doclist containing (1, -1)
+ ** may be smaller than (-1), as in the first example the -1 may be stored
+ ** as a single-byte delta, whereas in the second it must be stored as a
+ ** FTS3_VARINT_MAX byte varint.
+ **
+ ** Similar padding is added in the fts3DoclistOrMerge() function.
+ */
+ pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1);
+ pTS->anOutput[0] = nDoclist;
+ if( pTS->aaOutput[0] ){
+ memcpy(pTS->aaOutput[0], aDoclist, nDoclist);
+ memset(&pTS->aaOutput[0][nDoclist], 0, FTS3_VARINT_MAX);
+ }else{
+ return SQLITE_NOMEM;
+ }
+ }else{
+ char *aMerge = aDoclist;
+ int nMerge = nDoclist;
+ int iOut;
+
+ for(iOut=0; iOut<SizeofArray(pTS->aaOutput); iOut++){
+ if( pTS->aaOutput[iOut]==0 ){
+ assert( iOut>0 );
+ pTS->aaOutput[iOut] = aMerge;
+ pTS->anOutput[iOut] = nMerge;
+ break;
+ }else{
+ char *aNew;
+ int nNew;
+
+ int rc = fts3DoclistOrMerge(p->bDescIdx, aMerge, nMerge,
+ pTS->aaOutput[iOut], pTS->anOutput[iOut], &aNew, &nNew
+ );
+ if( rc!=SQLITE_OK ){
+ if( aMerge!=aDoclist ) sqlite3_free(aMerge);
+ return rc;
+ }
+
+ if( aMerge!=aDoclist ) sqlite3_free(aMerge);
+ sqlite3_free(pTS->aaOutput[iOut]);
+ pTS->aaOutput[iOut] = 0;
+
+ aMerge = aNew;
+ nMerge = nNew;
+ if( (iOut+1)==SizeofArray(pTS->aaOutput) ){
+ pTS->aaOutput[iOut] = aMerge;
+ pTS->anOutput[iOut] = nMerge;
+ }
+ }
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Append SegReader object pNew to the end of the pCsr->apSegment[] array.
+*/
+static int fts3SegReaderCursorAppend(
+ Fts3MultiSegReader *pCsr,
+ Fts3SegReader *pNew
+){
+ if( (pCsr->nSegment%16)==0 ){
+ Fts3SegReader **apNew;
+ sqlite3_int64 nByte = (pCsr->nSegment + 16)*sizeof(Fts3SegReader*);
+ apNew = (Fts3SegReader **)sqlite3_realloc64(pCsr->apSegment, nByte);
+ if( !apNew ){
+ sqlite3Fts3SegReaderFree(pNew);
+ return SQLITE_NOMEM;
+ }
+ pCsr->apSegment = apNew;
+ }
+ pCsr->apSegment[pCsr->nSegment++] = pNew;
+ return SQLITE_OK;
+}
+
+/*
+** Add seg-reader objects to the Fts3MultiSegReader object passed as the
+** 8th argument.
+**
+** This function returns SQLITE_OK if successful, or an SQLite error code
+** otherwise.
+*/
+static int fts3SegReaderCursor(
+ Fts3Table *p, /* FTS3 table handle */
+ int iLangid, /* Language id */
+ int iIndex, /* Index to search (from 0 to p->nIndex-1) */
+ int iLevel, /* Level of segments to scan */
+ const char *zTerm, /* Term to query for */
+ int nTerm, /* Size of zTerm in bytes */
+ int isPrefix, /* True for a prefix search */
+ int isScan, /* True to scan from zTerm to EOF */
+ Fts3MultiSegReader *pCsr /* Cursor object to populate */
+){
+ int rc = SQLITE_OK; /* Error code */
+ sqlite3_stmt *pStmt = 0; /* Statement to iterate through segments */
+ int rc2; /* Result of sqlite3_reset() */
+
+ /* If iLevel is less than 0 and this is not a scan, include a seg-reader
+ ** for the pending-terms. If this is a scan, then this call must be being
+ ** made by an fts4aux module, not an FTS table. In this case calling
+ ** Fts3SegReaderPending might segfault, as the data structures used by
+ ** fts4aux are not completely populated. So it's easiest to filter these
+ ** calls out here. */
+ if( iLevel<0 && p->aIndex && p->iPrevLangid==iLangid ){
+ Fts3SegReader *pSeg = 0;
+ rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix||isScan, &pSeg);
+ if( rc==SQLITE_OK && pSeg ){
+ rc = fts3SegReaderCursorAppend(pCsr, pSeg);
+ }
+ }
+
+ if( iLevel!=FTS3_SEGCURSOR_PENDING ){
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3AllSegdirs(p, iLangid, iIndex, iLevel, &pStmt);
+ }
+
+ while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){
+ Fts3SegReader *pSeg = 0;
+
+ /* Read the values returned by the SELECT into local variables. */
+ sqlite3_int64 iStartBlock = sqlite3_column_int64(pStmt, 1);
+ sqlite3_int64 iLeavesEndBlock = sqlite3_column_int64(pStmt, 2);
+ sqlite3_int64 iEndBlock = sqlite3_column_int64(pStmt, 3);
+ int nRoot = sqlite3_column_bytes(pStmt, 4);
+ char const *zRoot = sqlite3_column_blob(pStmt, 4);
+
+ /* If zTerm is not NULL, and this segment is not stored entirely on its
+ ** root node, the range of leaves scanned can be reduced. Do this. */
+ if( iStartBlock && zTerm && zRoot ){
+ sqlite3_int64 *pi = (isPrefix ? &iLeavesEndBlock : 0);
+ rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &iStartBlock, pi);
+ if( rc!=SQLITE_OK ) goto finished;
+ if( isPrefix==0 && isScan==0 ) iLeavesEndBlock = iStartBlock;
+ }
+
+ rc = sqlite3Fts3SegReaderNew(pCsr->nSegment+1,
+ (isPrefix==0 && isScan==0),
+ iStartBlock, iLeavesEndBlock,
+ iEndBlock, zRoot, nRoot, &pSeg
+ );
+ if( rc!=SQLITE_OK ) goto finished;
+ rc = fts3SegReaderCursorAppend(pCsr, pSeg);
+ }
+ }
+
+ finished:
+ rc2 = sqlite3_reset(pStmt);
+ if( rc==SQLITE_DONE ) rc = rc2;
+
+ return rc;
+}
+
+/*
+** Set up a cursor object for iterating through a full-text index or a
+** single level therein.
+*/
+SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor(
+ Fts3Table *p, /* FTS3 table handle */
+ int iLangid, /* Language-id to search */
+ int iIndex, /* Index to search (from 0 to p->nIndex-1) */
+ int iLevel, /* Level of segments to scan */
+ const char *zTerm, /* Term to query for */
+ int nTerm, /* Size of zTerm in bytes */
+ int isPrefix, /* True for a prefix search */
+ int isScan, /* True to scan from zTerm to EOF */
+ Fts3MultiSegReader *pCsr /* Cursor object to populate */
+){
+ assert( iIndex>=0 && iIndex<p->nIndex );
+ assert( iLevel==FTS3_SEGCURSOR_ALL
+ || iLevel==FTS3_SEGCURSOR_PENDING
+ || iLevel>=0
+ );
+ assert( iLevel<FTS3_SEGDIR_MAXLEVEL );
+ assert( FTS3_SEGCURSOR_ALL<0 && FTS3_SEGCURSOR_PENDING<0 );
+ assert( isPrefix==0 || isScan==0 );
+
+ memset(pCsr, 0, sizeof(Fts3MultiSegReader));
+ return fts3SegReaderCursor(
+ p, iLangid, iIndex, iLevel, zTerm, nTerm, isPrefix, isScan, pCsr
+ );
+}
+
+/*
+** In addition to its current configuration, have the Fts3MultiSegReader
+** passed as the 4th argument also scan the doclist for term zTerm/nTerm.
+**
+** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code.
+*/
+static int fts3SegReaderCursorAddZero(
+ Fts3Table *p, /* FTS virtual table handle */
+ int iLangid,
+ const char *zTerm, /* Term to scan doclist of */
+ int nTerm, /* Number of bytes in zTerm */
+ Fts3MultiSegReader *pCsr /* Fts3MultiSegReader to modify */
+){
+ return fts3SegReaderCursor(p,
+ iLangid, 0, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 0, 0,pCsr
+ );
+}
+
+/*
+** Open an Fts3MultiSegReader to scan the doclist for term zTerm/nTerm. Or,
+** if isPrefix is true, to scan the doclist for all terms for which
+** zTerm/nTerm is a prefix. If successful, return SQLITE_OK and write
+** a pointer to the new Fts3MultiSegReader to *ppSegcsr. Otherwise, return
+** an SQLite error code.
+**
+** It is the responsibility of the caller to free this object by eventually
+** passing it to fts3SegReaderCursorFree()
+**
+** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code.
+** Output parameter *ppSegcsr is set to 0 if an error occurs.
+*/
+static int fts3TermSegReaderCursor(
+ Fts3Cursor *pCsr, /* Virtual table cursor handle */
+ const char *zTerm, /* Term to query for */
+ int nTerm, /* Size of zTerm in bytes */
+ int isPrefix, /* True for a prefix search */
+ Fts3MultiSegReader **ppSegcsr /* OUT: Allocated seg-reader cursor */
+){
+ Fts3MultiSegReader *pSegcsr; /* Object to allocate and return */
+ int rc = SQLITE_NOMEM; /* Return code */
+
+ pSegcsr = sqlite3_malloc(sizeof(Fts3MultiSegReader));
+ if( pSegcsr ){
+ int i;
+ int bFound = 0; /* True once an index has been found */
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
+
+ if( isPrefix ){
+ for(i=1; bFound==0 && i<p->nIndex; i++){
+ if( p->aIndex[i].nPrefix==nTerm ){
+ bFound = 1;
+ rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid,
+ i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 0, 0, pSegcsr
+ );
+ pSegcsr->bLookup = 1;
+ }
+ }
+
+ for(i=1; bFound==0 && i<p->nIndex; i++){
+ if( p->aIndex[i].nPrefix==nTerm+1 ){
+ bFound = 1;
+ rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid,
+ i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 1, 0, pSegcsr
+ );
+ if( rc==SQLITE_OK ){
+ rc = fts3SegReaderCursorAddZero(
+ p, pCsr->iLangid, zTerm, nTerm, pSegcsr
+ );
+ }
+ }
+ }
+ }
+
+ if( bFound==0 ){
+ rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid,
+ 0, FTS3_SEGCURSOR_ALL, zTerm, nTerm, isPrefix, 0, pSegcsr
+ );
+ pSegcsr->bLookup = !isPrefix;
+ }
+ }
+
+ *ppSegcsr = pSegcsr;
+ return rc;
+}
+
+/*
+** Free an Fts3MultiSegReader allocated by fts3TermSegReaderCursor().
+*/
+static void fts3SegReaderCursorFree(Fts3MultiSegReader *pSegcsr){
+ sqlite3Fts3SegReaderFinish(pSegcsr);
+ sqlite3_free(pSegcsr);
+}
+
+/*
+** This function retrieves the doclist for the specified term (or term
+** prefix) from the database.
+*/
+static int fts3TermSelect(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3PhraseToken *pTok, /* Token to query for */
+ int iColumn, /* Column to query (or -ve for all columns) */
+ int *pnOut, /* OUT: Size of buffer at *ppOut */
+ char **ppOut /* OUT: Malloced result buffer */
+){
+ int rc; /* Return code */
+ Fts3MultiSegReader *pSegcsr; /* Seg-reader cursor for this term */
+ TermSelect tsc; /* Object for pair-wise doclist merging */
+ Fts3SegFilter filter; /* Segment term filter configuration */
+
+ pSegcsr = pTok->pSegcsr;
+ memset(&tsc, 0, sizeof(TermSelect));
+
+ filter.flags = FTS3_SEGMENT_IGNORE_EMPTY | FTS3_SEGMENT_REQUIRE_POS
+ | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0)
+ | (pTok->bFirst ? FTS3_SEGMENT_FIRST : 0)
+ | (iColumn<p->nColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0);
+ filter.iCol = iColumn;
+ filter.zTerm = pTok->z;
+ filter.nTerm = pTok->n;
+
+ rc = sqlite3Fts3SegReaderStart(p, pSegcsr, &filter);
+ while( SQLITE_OK==rc
+ && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pSegcsr))
+ ){
+ rc = fts3TermSelectMerge(p, &tsc, pSegcsr->aDoclist, pSegcsr->nDoclist);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = fts3TermSelectFinishMerge(p, &tsc);
+ }
+ if( rc==SQLITE_OK ){
+ *ppOut = tsc.aaOutput[0];
+ *pnOut = tsc.anOutput[0];
+ }else{
+ int i;
+ for(i=0; i<SizeofArray(tsc.aaOutput); i++){
+ sqlite3_free(tsc.aaOutput[i]);
+ }
+ }
+
+ fts3SegReaderCursorFree(pSegcsr);
+ pTok->pSegcsr = 0;
+ return rc;
+}
+
+/*
+** This function counts the total number of docids in the doclist stored
+** in buffer aList[], size nList bytes.
+**
+** If the isPoslist argument is true, then it is assumed that the doclist
+** contains a position-list following each docid. Otherwise, it is assumed
+** that the doclist is simply a list of docids stored as delta encoded
+** varints.
+*/
+static int fts3DoclistCountDocids(char *aList, int nList){
+ int nDoc = 0; /* Return value */
+ if( aList ){
+ char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */
+ char *p = aList; /* Cursor */
+ while( p<aEnd ){
+ nDoc++;
+ while( (*p++)&0x80 ); /* Skip docid varint */
+ fts3PoslistCopy(0, &p); /* Skip over position list */
+ }
+ }
+
+ return nDoc;
+}
+
+/*
+** Advance the cursor to the next row in the %_content table that
+** matches the search criteria. For a MATCH search, this will be
+** the next row that matches. For a full-table scan, this will be
+** simply the next row in the %_content table. For a docid lookup,
+** this routine simply sets the EOF flag.
+**
+** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned
+** even if we reach end-of-file. The fts3EofMethod() will be called
+** subsequently to determine whether or not an EOF was hit.
+*/
+static int fts3NextMethod(sqlite3_vtab_cursor *pCursor){
+ int rc;
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor;
+ if( pCsr->eSearch==FTS3_DOCID_SEARCH || pCsr->eSearch==FTS3_FULLSCAN_SEARCH ){
+ Fts3Table *pTab = (Fts3Table*)pCursor->pVtab;
+ pTab->bLock++;
+ if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){
+ pCsr->isEof = 1;
+ rc = sqlite3_reset(pCsr->pStmt);
+ }else{
+ pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0);
+ rc = SQLITE_OK;
+ }
+ pTab->bLock--;
+ }else{
+ rc = fts3EvalNext((Fts3Cursor *)pCursor);
+ }
+ assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
+ return rc;
+}
+
+/*
+** If the numeric type of argument pVal is "integer", then return it
+** converted to a 64-bit signed integer. Otherwise, return a copy of
+** the second parameter, iDefault.
+*/
+static sqlite3_int64 fts3DocidRange(sqlite3_value *pVal, i64 iDefault){
+ if( pVal ){
+ int eType = sqlite3_value_numeric_type(pVal);
+ if( eType==SQLITE_INTEGER ){
+ return sqlite3_value_int64(pVal);
+ }
+ }
+ return iDefault;
+}
+
+/*
+** This is the xFilter interface for the virtual table. See
+** the virtual table xFilter method documentation for additional
+** information.
+**
+** If idxNum==FTS3_FULLSCAN_SEARCH then do a full table scan against
+** the %_content table.
+**
+** If idxNum==FTS3_DOCID_SEARCH then do a docid lookup for a single entry
+** in the %_content table.
+**
+** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The
+** column on the left-hand side of the MATCH operator is column
+** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand
+** side of the MATCH operator.
+*/
+static int fts3FilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *idxStr, /* Unused */
+ int nVal, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ int rc = SQLITE_OK;
+ char *zSql; /* SQL statement used to access %_content */
+ int eSearch;
+ Fts3Table *p = (Fts3Table *)pCursor->pVtab;
+ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor;
+
+ sqlite3_value *pCons = 0; /* The MATCH or rowid constraint, if any */
+ sqlite3_value *pLangid = 0; /* The "langid = ?" constraint, if any */
+ sqlite3_value *pDocidGe = 0; /* The "docid >= ?" constraint, if any */
+ sqlite3_value *pDocidLe = 0; /* The "docid <= ?" constraint, if any */
+ int iIdx;
+
+ UNUSED_PARAMETER(idxStr);
+ UNUSED_PARAMETER(nVal);
+
+ if( p->bLock ){
+ return SQLITE_ERROR;
+ }
+
+ eSearch = (idxNum & 0x0000FFFF);
+ assert( eSearch>=0 && eSearch<=(FTS3_FULLTEXT_SEARCH+p->nColumn) );
+ assert( p->pSegments==0 );
+
+ /* Collect arguments into local variables */
+ iIdx = 0;
+ if( eSearch!=FTS3_FULLSCAN_SEARCH ) pCons = apVal[iIdx++];
+ if( idxNum & FTS3_HAVE_LANGID ) pLangid = apVal[iIdx++];
+ if( idxNum & FTS3_HAVE_DOCID_GE ) pDocidGe = apVal[iIdx++];
+ if( idxNum & FTS3_HAVE_DOCID_LE ) pDocidLe = apVal[iIdx++];
+ assert( iIdx==nVal );
+
+ /* In case the cursor has been used before, clear it now. */
+ fts3ClearCursor(pCsr);
+
+ /* Set the lower and upper bounds on docids to return */
+ pCsr->iMinDocid = fts3DocidRange(pDocidGe, SMALLEST_INT64);
+ pCsr->iMaxDocid = fts3DocidRange(pDocidLe, LARGEST_INT64);
+
+ if( idxStr ){
+ pCsr->bDesc = (idxStr[0]=='D');
+ }else{
+ pCsr->bDesc = p->bDescIdx;
+ }
+ pCsr->eSearch = (i16)eSearch;
+
+ if( eSearch!=FTS3_DOCID_SEARCH && eSearch!=FTS3_FULLSCAN_SEARCH ){
+ int iCol = eSearch-FTS3_FULLTEXT_SEARCH;
+ const char *zQuery = (const char *)sqlite3_value_text(pCons);
+
+ if( zQuery==0 && sqlite3_value_type(pCons)!=SQLITE_NULL ){
+ return SQLITE_NOMEM;
+ }
+
+ pCsr->iLangid = 0;
+ if( pLangid ) pCsr->iLangid = sqlite3_value_int(pLangid);
+
+ assert( p->base.zErrMsg==0 );
+ rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid,
+ p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr,
+ &p->base.zErrMsg
+ );
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ rc = fts3EvalStart(pCsr);
+ sqlite3Fts3SegmentsClose(p);
+ if( rc!=SQLITE_OK ) return rc;
+ pCsr->pNextId = pCsr->aDoclist;
+ pCsr->iPrevId = 0;
+ }
+
+ /* Compile a SELECT statement for this cursor. For a full-table-scan, the
+ ** statement loops through all rows of the %_content table. For a
+ ** full-text query or docid lookup, the statement retrieves a single
+ ** row by docid.
+ */
+ if( eSearch==FTS3_FULLSCAN_SEARCH ){
+ if( pDocidGe || pDocidLe ){
+ zSql = sqlite3_mprintf(
+ "SELECT %s WHERE rowid BETWEEN %lld AND %lld ORDER BY rowid %s",
+ p->zReadExprlist, pCsr->iMinDocid, pCsr->iMaxDocid,
+ (pCsr->bDesc ? "DESC" : "ASC")
+ );
+ }else{
+ zSql = sqlite3_mprintf("SELECT %s ORDER BY rowid %s",
+ p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC")
+ );
+ }
+ if( zSql ){
+ p->bLock++;
+ rc = sqlite3_prepare_v3(
+ p->db,zSql,-1,SQLITE_PREPARE_PERSISTENT,&pCsr->pStmt,0
+ );
+ p->bLock--;
+ sqlite3_free(zSql);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }else if( eSearch==FTS3_DOCID_SEARCH ){
+ rc = fts3CursorSeekStmt(pCsr);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons);
+ }
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ return fts3NextMethod(pCursor);
+}
+
+/*
+** This is the xEof method of the virtual table. SQLite calls this
+** routine to find out if it has reached the end of a result set.
+*/
+static int fts3EofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3Cursor *pCsr = (Fts3Cursor*)pCursor;
+ if( pCsr->isEof ){
+ fts3ClearCursor(pCsr);
+ pCsr->isEof = 1;
+ }
+ return pCsr->isEof;
+}
+
+/*
+** This is the xRowid method. The SQLite core calls this routine to
+** retrieve the rowid for the current row of the result set. fts3
+** exposes %_content.docid as the rowid for the virtual table. The
+** rowid should be written to *pRowid.
+*/
+static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
+ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor;
+ *pRowid = pCsr->iPrevId;
+ return SQLITE_OK;
+}
+
+/*
+** This is the xColumn method, called by SQLite to request a value from
+** the row that the supplied cursor currently points to.
+**
+** If:
+**
+** (iCol < p->nColumn) -> The value of the iCol'th user column.
+** (iCol == p->nColumn) -> Magic column with the same name as the table.
+** (iCol == p->nColumn+1) -> Docid column
+** (iCol == p->nColumn+2) -> Langid column
+*/
+static int fts3ColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ int rc = SQLITE_OK; /* Return Code */
+ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor;
+ Fts3Table *p = (Fts3Table *)pCursor->pVtab;
+
+ /* The column value supplied by SQLite must be in range. */
+ assert( iCol>=0 && iCol<=p->nColumn+2 );
+
+ switch( iCol-p->nColumn ){
+ case 0:
+ /* The special 'table-name' column */
+ sqlite3_result_pointer(pCtx, pCsr, "fts3cursor", 0);
+ break;
+
+ case 1:
+ /* The docid column */
+ sqlite3_result_int64(pCtx, pCsr->iPrevId);
+ break;
+
+ case 2:
+ if( pCsr->pExpr ){
+ sqlite3_result_int64(pCtx, pCsr->iLangid);
+ break;
+ }else if( p->zLanguageid==0 ){
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }else{
+ iCol = p->nColumn;
+ /* no break */ deliberate_fall_through
+ }
+
+ default:
+ /* A user column. Or, if this is a full-table scan, possibly the
+ ** language-id column. Seek the cursor. */
+ rc = fts3CursorSeek(0, pCsr);
+ if( rc==SQLITE_OK && sqlite3_data_count(pCsr->pStmt)-1>iCol ){
+ sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1));
+ }
+ break;
+ }
+
+ assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
+ return rc;
+}
+
+/*
+** This function is the implementation of the xUpdate callback used by
+** FTS3 virtual tables. It is invoked by SQLite each time a row is to be
+** inserted, updated or deleted.
+*/
+static int fts3UpdateMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ int nArg, /* Size of argument array */
+ sqlite3_value **apVal, /* Array of arguments */
+ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */
+){
+ return sqlite3Fts3UpdateMethod(pVtab, nArg, apVal, pRowid);
+}
+
+/*
+** Implementation of xSync() method. Flush the contents of the pending-terms
+** hash-table to the database.
+*/
+static int fts3SyncMethod(sqlite3_vtab *pVtab){
+
+ /* Following an incremental-merge operation, assuming that the input
+ ** segments are not completely consumed (the usual case), they are updated
+ ** in place to remove the entries that have already been merged. This
+ ** involves updating the leaf block that contains the smallest unmerged
+ ** entry and each block (if any) between the leaf and the root node. So
+ ** if the height of the input segment b-trees is N, and input segments
+ ** are merged eight at a time, updating the input segments at the end
+ ** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually
+ ** small - often between 0 and 2. So the overhead of the incremental
+ ** merge is somewhere between 8 and 24 blocks. To avoid this overhead
+ ** dwarfing the actual productive work accomplished, the incremental merge
+ ** is only attempted if it will write at least 64 leaf blocks. Hence
+ ** nMinMerge.
+ **
+ ** Of course, updating the input segments also involves deleting a bunch
+ ** of blocks from the segments table. But this is not considered overhead
+ ** as it would also be required by a crisis-merge that used the same input
+ ** segments.
+ */
+ const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */
+
+ Fts3Table *p = (Fts3Table*)pVtab;
+ int rc;
+ i64 iLastRowid = sqlite3_last_insert_rowid(p->db);
+
+ rc = sqlite3Fts3PendingTermsFlush(p);
+ if( rc==SQLITE_OK
+ && p->nLeafAdd>(nMinMerge/16)
+ && p->nAutoincrmerge && p->nAutoincrmerge!=0xff
+ ){
+ int mxLevel = 0; /* Maximum relative level value in db */
+ int A; /* Incr-merge parameter A */
+
+ rc = sqlite3Fts3MaxLevel(p, &mxLevel);
+ assert( rc==SQLITE_OK || mxLevel==0 );
+ A = p->nLeafAdd * mxLevel;
+ A += (A/2);
+ if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge);
+ }
+ sqlite3Fts3SegmentsClose(p);
+ sqlite3_set_last_insert_rowid(p->db, iLastRowid);
+ return rc;
+}
+
+/*
+** If it is currently unknown whether or not the FTS table has an %_stat
+** table (if p->bHasStat==2), attempt to determine this (set p->bHasStat
+** to 0 or 1). Return SQLITE_OK if successful, or an SQLite error code
+** if an error occurs.
+*/
+static int fts3SetHasStat(Fts3Table *p){
+ int rc = SQLITE_OK;
+ if( p->bHasStat==2 ){
+ char *zTbl = sqlite3_mprintf("%s_stat", p->zName);
+ if( zTbl ){
+ int res = sqlite3_table_column_metadata(p->db, p->zDb, zTbl, 0,0,0,0,0,0);
+ sqlite3_free(zTbl);
+ p->bHasStat = (res==SQLITE_OK);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+ return rc;
+}
+
+/*
+** Implementation of xBegin() method.
+*/
+static int fts3BeginMethod(sqlite3_vtab *pVtab){
+ Fts3Table *p = (Fts3Table*)pVtab;
+ UNUSED_PARAMETER(pVtab);
+ assert( p->pSegments==0 );
+ assert( p->nPendingData==0 );
+ assert( p->inTransaction!=1 );
+ TESTONLY( p->inTransaction = 1 );
+ TESTONLY( p->mxSavepoint = -1; );
+ p->nLeafAdd = 0;
+ return fts3SetHasStat(p);
+}
+
+/*
+** Implementation of xCommit() method. This is a no-op. The contents of
+** the pending-terms hash-table have already been flushed into the database
+** by fts3SyncMethod().
+*/
+static int fts3CommitMethod(sqlite3_vtab *pVtab){
+ TESTONLY( Fts3Table *p = (Fts3Table*)pVtab );
+ UNUSED_PARAMETER(pVtab);
+ assert( p->nPendingData==0 );
+ assert( p->inTransaction!=0 );
+ assert( p->pSegments==0 );
+ TESTONLY( p->inTransaction = 0 );
+ TESTONLY( p->mxSavepoint = -1; );
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of xRollback(). Discard the contents of the pending-terms
+** hash-table. Any changes made to the database are reverted by SQLite.
+*/
+static int fts3RollbackMethod(sqlite3_vtab *pVtab){
+ Fts3Table *p = (Fts3Table*)pVtab;
+ sqlite3Fts3PendingTermsClear(p);
+ assert( p->inTransaction!=0 );
+ TESTONLY( p->inTransaction = 0 );
+ TESTONLY( p->mxSavepoint = -1; );
+ return SQLITE_OK;
+}
+
+/*
+** When called, *ppPoslist must point to the byte immediately following the
+** end of a position-list. i.e. ( (*ppPoslist)[-1]==POS_END ). This function
+** moves *ppPoslist so that it instead points to the first byte of the
+** same position list.
+*/
+static void fts3ReversePoslist(char *pStart, char **ppPoslist){
+ char *p = &(*ppPoslist)[-2];
+ char c = 0;
+
+ /* Skip backwards passed any trailing 0x00 bytes added by NearTrim() */
+ while( p>pStart && (c=*p--)==0 );
+
+ /* Search backwards for a varint with value zero (the end of the previous
+ ** poslist). This is an 0x00 byte preceded by some byte that does not
+ ** have the 0x80 bit set. */
+ while( p>pStart && (*p & 0x80) | c ){
+ c = *p--;
+ }
+ assert( p==pStart || c==0 );
+
+ /* At this point p points to that preceding byte without the 0x80 bit
+ ** set. So to find the start of the poslist, skip forward 2 bytes then
+ ** over a varint.
+ **
+ ** Normally. The other case is that p==pStart and the poslist to return
+ ** is the first in the doclist. In this case do not skip forward 2 bytes.
+ ** The second part of the if condition (c==0 && *ppPoslist>&p[2])
+ ** is required for cases where the first byte of a doclist and the
+ ** doclist is empty. For example, if the first docid is 10, a doclist
+ ** that begins with:
+ **
+ ** 0x0A 0x00 <next docid delta varint>
+ */
+ if( p>pStart || (c==0 && *ppPoslist>&p[2]) ){ p = &p[2]; }
+ while( *p++&0x80 );
+ *ppPoslist = p;
+}
+
+/*
+** Helper function used by the implementation of the overloaded snippet(),
+** offsets() and optimize() SQL functions.
+**
+** If the value passed as the third argument is a blob of size
+** sizeof(Fts3Cursor*), then the blob contents are copied to the
+** output variable *ppCsr and SQLITE_OK is returned. Otherwise, an error
+** message is written to context pContext and SQLITE_ERROR returned. The
+** string passed via zFunc is used as part of the error message.
+*/
+static int fts3FunctionArg(
+ sqlite3_context *pContext, /* SQL function call context */
+ const char *zFunc, /* Function name */
+ sqlite3_value *pVal, /* argv[0] passed to function */
+ Fts3Cursor **ppCsr /* OUT: Store cursor handle here */
+){
+ int rc;
+ *ppCsr = (Fts3Cursor*)sqlite3_value_pointer(pVal, "fts3cursor");
+ if( (*ppCsr)!=0 ){
+ rc = SQLITE_OK;
+ }else{
+ char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc);
+ sqlite3_result_error(pContext, zErr, -1);
+ sqlite3_free(zErr);
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
+
+/*
+** Implementation of the snippet() function for FTS3
+*/
+static void fts3SnippetFunc(
+ sqlite3_context *pContext, /* SQLite function call context */
+ int nVal, /* Size of apVal[] array */
+ sqlite3_value **apVal /* Array of arguments */
+){
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */
+ const char *zStart = "<b>";
+ const char *zEnd = "</b>";
+ const char *zEllipsis = "<b>...</b>";
+ int iCol = -1;
+ int nToken = 15; /* Default number of tokens in snippet */
+
+ /* There must be at least one argument passed to this function (otherwise
+ ** the non-overloaded version would have been called instead of this one).
+ */
+ assert( nVal>=1 );
+
+ if( nVal>6 ){
+ sqlite3_result_error(pContext,
+ "wrong number of arguments to function snippet()", -1);
+ return;
+ }
+ if( fts3FunctionArg(pContext, "snippet", apVal[0], &pCsr) ) return;
+
+ switch( nVal ){
+ case 6: nToken = sqlite3_value_int(apVal[5]);
+ /* no break */ deliberate_fall_through
+ case 5: iCol = sqlite3_value_int(apVal[4]);
+ /* no break */ deliberate_fall_through
+ case 4: zEllipsis = (const char*)sqlite3_value_text(apVal[3]);
+ /* no break */ deliberate_fall_through
+ case 3: zEnd = (const char*)sqlite3_value_text(apVal[2]);
+ /* no break */ deliberate_fall_through
+ case 2: zStart = (const char*)sqlite3_value_text(apVal[1]);
+ }
+ if( !zEllipsis || !zEnd || !zStart ){
+ sqlite3_result_error_nomem(pContext);
+ }else if( nToken==0 ){
+ sqlite3_result_text(pContext, "", -1, SQLITE_STATIC);
+ }else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){
+ sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken);
+ }
+}
+
+/*
+** Implementation of the offsets() function for FTS3
+*/
+static void fts3OffsetsFunc(
+ sqlite3_context *pContext, /* SQLite function call context */
+ int nVal, /* Size of argument array */
+ sqlite3_value **apVal /* Array of arguments */
+){
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */
+
+ UNUSED_PARAMETER(nVal);
+
+ assert( nVal==1 );
+ if( fts3FunctionArg(pContext, "offsets", apVal[0], &pCsr) ) return;
+ assert( pCsr );
+ if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){
+ sqlite3Fts3Offsets(pContext, pCsr);
+ }
+}
+
+/*
+** Implementation of the special optimize() function for FTS3. This
+** function merges all segments in the database to a single segment.
+** Example usage is:
+**
+** SELECT optimize(t) FROM t LIMIT 1;
+**
+** where 't' is the name of an FTS3 table.
+*/
+static void fts3OptimizeFunc(
+ sqlite3_context *pContext, /* SQLite function call context */
+ int nVal, /* Size of argument array */
+ sqlite3_value **apVal /* Array of arguments */
+){
+ int rc; /* Return code */
+ Fts3Table *p; /* Virtual table handle */
+ Fts3Cursor *pCursor; /* Cursor handle passed through apVal[0] */
+
+ UNUSED_PARAMETER(nVal);
+
+ assert( nVal==1 );
+ if( fts3FunctionArg(pContext, "optimize", apVal[0], &pCursor) ) return;
+ p = (Fts3Table *)pCursor->base.pVtab;
+ assert( p );
+
+ rc = sqlite3Fts3Optimize(p);
+
+ switch( rc ){
+ case SQLITE_OK:
+ sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC);
+ break;
+ case SQLITE_DONE:
+ sqlite3_result_text(pContext, "Index already optimal", -1, SQLITE_STATIC);
+ break;
+ default:
+ sqlite3_result_error_code(pContext, rc);
+ break;
+ }
+}
+
+/*
+** Implementation of the matchinfo() function for FTS3
+*/
+static void fts3MatchinfoFunc(
+ sqlite3_context *pContext, /* SQLite function call context */
+ int nVal, /* Size of argument array */
+ sqlite3_value **apVal /* Array of arguments */
+){
+ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */
+ assert( nVal==1 || nVal==2 );
+ if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){
+ const char *zArg = 0;
+ if( nVal>1 ){
+ zArg = (const char *)sqlite3_value_text(apVal[1]);
+ }
+ sqlite3Fts3Matchinfo(pContext, pCsr, zArg);
+ }
+}
+
+/*
+** This routine implements the xFindFunction method for the FTS3
+** virtual table.
+*/
+static int fts3FindFunctionMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ int nArg, /* Number of SQL function arguments */
+ const char *zName, /* Name of SQL function */
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */
+ void **ppArg /* Unused */
+){
+ struct Overloaded {
+ const char *zName;
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
+ } aOverload[] = {
+ { "snippet", fts3SnippetFunc },
+ { "offsets", fts3OffsetsFunc },
+ { "optimize", fts3OptimizeFunc },
+ { "matchinfo", fts3MatchinfoFunc },
+ };
+ int i; /* Iterator variable */
+
+ UNUSED_PARAMETER(pVtab);
+ UNUSED_PARAMETER(nArg);
+ UNUSED_PARAMETER(ppArg);
+
+ for(i=0; i<SizeofArray(aOverload); i++){
+ if( strcmp(zName, aOverload[i].zName)==0 ){
+ *pxFunc = aOverload[i].xFunc;
+ return 1;
+ }
+ }
+
+ /* No function of the specified name was found. Return 0. */
+ return 0;
+}
+
+/*
+** Implementation of FTS3 xRename method. Rename an fts3 table.
+*/
+static int fts3RenameMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ const char *zName /* New name of table */
+){
+ Fts3Table *p = (Fts3Table *)pVtab;
+ sqlite3 *db = p->db; /* Database connection */
+ int rc; /* Return Code */
+
+ /* At this point it must be known if the %_stat table exists or not.
+ ** So bHasStat may not be 2. */
+ rc = fts3SetHasStat(p);
+
+ /* As it happens, the pending terms table is always empty here. This is
+ ** because an "ALTER TABLE RENAME TABLE" statement inside a transaction
+ ** always opens a savepoint transaction. And the xSavepoint() method
+ ** flushes the pending terms table. But leave the (no-op) call to
+ ** PendingTermsFlush() in in case that changes.
+ */
+ assert( p->nPendingData==0 );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3PendingTermsFlush(p);
+ }
+
+ if( p->zContentTbl==0 ){
+ fts3DbExec(&rc, db,
+ "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';",
+ p->zDb, p->zName, zName
+ );
+ }
+
+ if( p->bHasDocsize ){
+ fts3DbExec(&rc, db,
+ "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';",
+ p->zDb, p->zName, zName
+ );
+ }
+ if( p->bHasStat ){
+ fts3DbExec(&rc, db,
+ "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';",
+ p->zDb, p->zName, zName
+ );
+ }
+ fts3DbExec(&rc, db,
+ "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';",
+ p->zDb, p->zName, zName
+ );
+ fts3DbExec(&rc, db,
+ "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';",
+ p->zDb, p->zName, zName
+ );
+ return rc;
+}
+
+/*
+** The xSavepoint() method.
+**
+** Flush the contents of the pending-terms table to disk.
+*/
+static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ int rc = SQLITE_OK;
+ UNUSED_PARAMETER(iSavepoint);
+ assert( ((Fts3Table *)pVtab)->inTransaction );
+ assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint );
+ TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint );
+ if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){
+ rc = fts3SyncMethod(pVtab);
+ }
+ return rc;
+}
+
+/*
+** The xRelease() method.
+**
+** This is a no-op.
+*/
+static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ TESTONLY( Fts3Table *p = (Fts3Table*)pVtab );
+ UNUSED_PARAMETER(iSavepoint);
+ UNUSED_PARAMETER(pVtab);
+ assert( p->inTransaction );
+ assert( p->mxSavepoint >= iSavepoint );
+ TESTONLY( p->mxSavepoint = iSavepoint-1 );
+ return SQLITE_OK;
+}
+
+/*
+** The xRollbackTo() method.
+**
+** Discard the contents of the pending terms table.
+*/
+static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ Fts3Table *p = (Fts3Table*)pVtab;
+ UNUSED_PARAMETER(iSavepoint);
+ assert( p->inTransaction );
+ TESTONLY( p->mxSavepoint = iSavepoint );
+ sqlite3Fts3PendingTermsClear(p);
+ return SQLITE_OK;
+}
+
+/*
+** Return true if zName is the extension on one of the shadow tables used
+** by this module.
+*/
+static int fts3ShadowName(const char *zName){
+ static const char *azName[] = {
+ "content", "docsize", "segdir", "segments", "stat",
+ };
+ unsigned int i;
+ for(i=0; i<sizeof(azName)/sizeof(azName[0]); i++){
+ if( sqlite3_stricmp(zName, azName[i])==0 ) return 1;
+ }
+ return 0;
+}
+
+static const sqlite3_module fts3Module = {
+ /* iVersion */ 3,
+ /* xCreate */ fts3CreateMethod,
+ /* xConnect */ fts3ConnectMethod,
+ /* xBestIndex */ fts3BestIndexMethod,
+ /* xDisconnect */ fts3DisconnectMethod,
+ /* xDestroy */ fts3DestroyMethod,
+ /* xOpen */ fts3OpenMethod,
+ /* xClose */ fts3CloseMethod,
+ /* xFilter */ fts3FilterMethod,
+ /* xNext */ fts3NextMethod,
+ /* xEof */ fts3EofMethod,
+ /* xColumn */ fts3ColumnMethod,
+ /* xRowid */ fts3RowidMethod,
+ /* xUpdate */ fts3UpdateMethod,
+ /* xBegin */ fts3BeginMethod,
+ /* xSync */ fts3SyncMethod,
+ /* xCommit */ fts3CommitMethod,
+ /* xRollback */ fts3RollbackMethod,
+ /* xFindFunction */ fts3FindFunctionMethod,
+ /* xRename */ fts3RenameMethod,
+ /* xSavepoint */ fts3SavepointMethod,
+ /* xRelease */ fts3ReleaseMethod,
+ /* xRollbackTo */ fts3RollbackToMethod,
+ /* xShadowName */ fts3ShadowName,
+};
+
+/*
+** This function is registered as the module destructor (called when an
+** FTS3 enabled database connection is closed). It frees the memory
+** allocated for the tokenizer hash table.
+*/
+static void hashDestroy(void *p){
+ Fts3Hash *pHash = (Fts3Hash *)p;
+ sqlite3Fts3HashClear(pHash);
+ sqlite3_free(pHash);
+}
+
+/*
+** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are
+** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c
+** respectively. The following three forward declarations are for functions
+** declared in these files used to retrieve the respective implementations.
+**
+** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed
+** to by the argument to point to the "simple" tokenizer implementation.
+** And so on.
+*/
+SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule);
+SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule);
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const**ppModule);
+#endif
+#ifdef SQLITE_ENABLE_ICU
+SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule);
+#endif
+
+/*
+** Initialize the fts3 extension. If this extension is built as part
+** of the sqlite library, then this function is called directly by
+** SQLite. If fts3 is built as a dynamically loadable extension, this
+** function is called by the sqlite3_extension_init() entry point.
+*/
+SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){
+ int rc = SQLITE_OK;
+ Fts3Hash *pHash = 0;
+ const sqlite3_tokenizer_module *pSimple = 0;
+ const sqlite3_tokenizer_module *pPorter = 0;
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+ const sqlite3_tokenizer_module *pUnicode = 0;
+#endif
+
+#ifdef SQLITE_ENABLE_ICU
+ const sqlite3_tokenizer_module *pIcu = 0;
+ sqlite3Fts3IcuTokenizerModule(&pIcu);
+#endif
+
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+ sqlite3Fts3UnicodeTokenizer(&pUnicode);
+#endif
+
+#ifdef SQLITE_TEST
+ rc = sqlite3Fts3InitTerm(db);
+ if( rc!=SQLITE_OK ) return rc;
+#endif
+
+ rc = sqlite3Fts3InitAux(db);
+ if( rc!=SQLITE_OK ) return rc;
+
+ sqlite3Fts3SimpleTokenizerModule(&pSimple);
+ sqlite3Fts3PorterTokenizerModule(&pPorter);
+
+ /* Allocate and initialize the hash-table used to store tokenizers. */
+ pHash = sqlite3_malloc(sizeof(Fts3Hash));
+ if( !pHash ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1);
+ }
+
+ /* Load the built-in tokenizers into the hash table */
+ if( rc==SQLITE_OK ){
+ if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple)
+ || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter)
+
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+ || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode)
+#endif
+#ifdef SQLITE_ENABLE_ICU
+ || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu))
+#endif
+ ){
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+#ifdef SQLITE_TEST
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3ExprInitTestInterface(db, pHash);
+ }
+#endif
+
+ /* Create the virtual table wrapper around the hash-table and overload
+ ** the four scalar functions. If this is successful, register the
+ ** module with sqlite.
+ */
+ if( SQLITE_OK==rc
+ && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer"))
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1))
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1))
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1))
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2))
+ && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1))
+ ){
+ rc = sqlite3_create_module_v2(
+ db, "fts3", &fts3Module, (void *)pHash, hashDestroy
+ );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_module_v2(
+ db, "fts4", &fts3Module, (void *)pHash, 0
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3InitTok(db, (void *)pHash);
+ }
+ return rc;
+ }
+
+
+ /* An error has occurred. Delete the hash table and return the error code. */
+ assert( rc!=SQLITE_OK );
+ if( pHash ){
+ sqlite3Fts3HashClear(pHash);
+ sqlite3_free(pHash);
+ }
+ return rc;
+}
+
+/*
+** Allocate an Fts3MultiSegReader for each token in the expression headed
+** by pExpr.
+**
+** An Fts3SegReader object is a cursor that can seek or scan a range of
+** entries within a single segment b-tree. An Fts3MultiSegReader uses multiple
+** Fts3SegReader objects internally to provide an interface to seek or scan
+** within the union of all segments of a b-tree. Hence the name.
+**
+** If the allocated Fts3MultiSegReader just seeks to a single entry in a
+** segment b-tree (if the term is not a prefix or it is a prefix for which
+** there exists prefix b-tree of the right length) then it may be traversed
+** and merged incrementally. Otherwise, it has to be merged into an in-memory
+** doclist and then traversed.
+*/
+static void fts3EvalAllocateReaders(
+ Fts3Cursor *pCsr, /* FTS cursor handle */
+ Fts3Expr *pExpr, /* Allocate readers for this expression */
+ int *pnToken, /* OUT: Total number of tokens in phrase. */
+ int *pnOr, /* OUT: Total number of OR nodes in expr. */
+ int *pRc /* IN/OUT: Error code */
+){
+ if( pExpr && SQLITE_OK==*pRc ){
+ if( pExpr->eType==FTSQUERY_PHRASE ){
+ int i;
+ int nToken = pExpr->pPhrase->nToken;
+ *pnToken += nToken;
+ for(i=0; i<nToken; i++){
+ Fts3PhraseToken *pToken = &pExpr->pPhrase->aToken[i];
+ int rc = fts3TermSegReaderCursor(pCsr,
+ pToken->z, pToken->n, pToken->isPrefix, &pToken->pSegcsr
+ );
+ if( rc!=SQLITE_OK ){
+ *pRc = rc;
+ return;
+ }
+ }
+ assert( pExpr->pPhrase->iDoclistToken==0 );
+ pExpr->pPhrase->iDoclistToken = -1;
+ }else{
+ *pnOr += (pExpr->eType==FTSQUERY_OR);
+ fts3EvalAllocateReaders(pCsr, pExpr->pLeft, pnToken, pnOr, pRc);
+ fts3EvalAllocateReaders(pCsr, pExpr->pRight, pnToken, pnOr, pRc);
+ }
+ }
+}
+
+/*
+** Arguments pList/nList contain the doclist for token iToken of phrase p.
+** It is merged into the main doclist stored in p->doclist.aAll/nAll.
+**
+** This function assumes that pList points to a buffer allocated using
+** sqlite3_malloc(). This function takes responsibility for eventually
+** freeing the buffer.
+**
+** SQLITE_OK is returned if successful, or SQLITE_NOMEM if an error occurs.
+*/
+static int fts3EvalPhraseMergeToken(
+ Fts3Table *pTab, /* FTS Table pointer */
+ Fts3Phrase *p, /* Phrase to merge pList/nList into */
+ int iToken, /* Token pList/nList corresponds to */
+ char *pList, /* Pointer to doclist */
+ int nList /* Number of bytes in pList */
+){
+ int rc = SQLITE_OK;
+ assert( iToken!=p->iDoclistToken );
+
+ if( pList==0 ){
+ sqlite3_free(p->doclist.aAll);
+ p->doclist.aAll = 0;
+ p->doclist.nAll = 0;
+ }
+
+ else if( p->iDoclistToken<0 ){
+ p->doclist.aAll = pList;
+ p->doclist.nAll = nList;
+ }
+
+ else if( p->doclist.aAll==0 ){
+ sqlite3_free(pList);
+ }
+
+ else {
+ char *pLeft;
+ char *pRight;
+ int nLeft;
+ int nRight;
+ int nDiff;
+
+ if( p->iDoclistToken<iToken ){
+ pLeft = p->doclist.aAll;
+ nLeft = p->doclist.nAll;
+ pRight = pList;
+ nRight = nList;
+ nDiff = iToken - p->iDoclistToken;
+ }else{
+ pRight = p->doclist.aAll;
+ nRight = p->doclist.nAll;
+ pLeft = pList;
+ nLeft = nList;
+ nDiff = p->iDoclistToken - iToken;
+ }
+
+ rc = fts3DoclistPhraseMerge(
+ pTab->bDescIdx, nDiff, pLeft, nLeft, &pRight, &nRight
+ );
+ sqlite3_free(pLeft);
+ p->doclist.aAll = pRight;
+ p->doclist.nAll = nRight;
+ }
+
+ if( iToken>p->iDoclistToken ) p->iDoclistToken = iToken;
+ return rc;
+}
+
+/*
+** Load the doclist for phrase p into p->doclist.aAll/nAll. The loaded doclist
+** does not take deferred tokens into account.
+**
+** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code.
+*/
+static int fts3EvalPhraseLoad(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Phrase *p /* Phrase object */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int iToken;
+ int rc = SQLITE_OK;
+
+ for(iToken=0; rc==SQLITE_OK && iToken<p->nToken; iToken++){
+ Fts3PhraseToken *pToken = &p->aToken[iToken];
+ assert( pToken->pDeferred==0 || pToken->pSegcsr==0 );
+
+ if( pToken->pSegcsr ){
+ int nThis = 0;
+ char *pThis = 0;
+ rc = fts3TermSelect(pTab, pToken, p->iColumn, &nThis, &pThis);
+ if( rc==SQLITE_OK ){
+ rc = fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis);
+ }
+ }
+ assert( pToken->pSegcsr==0 );
+ }
+
+ return rc;
+}
+
+#ifndef SQLITE_DISABLE_FTS4_DEFERRED
+/*
+** This function is called on each phrase after the position lists for
+** any deferred tokens have been loaded into memory. It updates the phrases
+** current position list to include only those positions that are really
+** instances of the phrase (after considering deferred tokens). If this
+** means that the phrase does not appear in the current row, doclist.pList
+** and doclist.nList are both zeroed.
+**
+** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code.
+*/
+static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
+ int iToken; /* Used to iterate through phrase tokens */
+ char *aPoslist = 0; /* Position list for deferred tokens */
+ int nPoslist = 0; /* Number of bytes in aPoslist */
+ int iPrev = -1; /* Token number of previous deferred token */
+
+ assert( pPhrase->doclist.bFreeList==0 );
+
+ for(iToken=0; iToken<pPhrase->nToken; iToken++){
+ Fts3PhraseToken *pToken = &pPhrase->aToken[iToken];
+ Fts3DeferredToken *pDeferred = pToken->pDeferred;
+
+ if( pDeferred ){
+ char *pList;
+ int nList;
+ int rc = sqlite3Fts3DeferredTokenList(pDeferred, &pList, &nList);
+ if( rc!=SQLITE_OK ) return rc;
+
+ if( pList==0 ){
+ sqlite3_free(aPoslist);
+ pPhrase->doclist.pList = 0;
+ pPhrase->doclist.nList = 0;
+ return SQLITE_OK;
+
+ }else if( aPoslist==0 ){
+ aPoslist = pList;
+ nPoslist = nList;
+
+ }else{
+ char *aOut = pList;
+ char *p1 = aPoslist;
+ char *p2 = aOut;
+
+ assert( iPrev>=0 );
+ fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2);
+ sqlite3_free(aPoslist);
+ aPoslist = pList;
+ nPoslist = (int)(aOut - aPoslist);
+ if( nPoslist==0 ){
+ sqlite3_free(aPoslist);
+ pPhrase->doclist.pList = 0;
+ pPhrase->doclist.nList = 0;
+ return SQLITE_OK;
+ }
+ }
+ iPrev = iToken;
+ }
+ }
+
+ if( iPrev>=0 ){
+ int nMaxUndeferred = pPhrase->iDoclistToken;
+ if( nMaxUndeferred<0 ){
+ pPhrase->doclist.pList = aPoslist;
+ pPhrase->doclist.nList = nPoslist;
+ pPhrase->doclist.iDocid = pCsr->iPrevId;
+ pPhrase->doclist.bFreeList = 1;
+ }else{
+ int nDistance;
+ char *p1;
+ char *p2;
+ char *aOut;
+
+ if( nMaxUndeferred>iPrev ){
+ p1 = aPoslist;
+ p2 = pPhrase->doclist.pList;
+ nDistance = nMaxUndeferred - iPrev;
+ }else{
+ p1 = pPhrase->doclist.pList;
+ p2 = aPoslist;
+ nDistance = iPrev - nMaxUndeferred;
+ }
+
+ aOut = (char *)sqlite3_malloc(nPoslist+8);
+ if( !aOut ){
+ sqlite3_free(aPoslist);
+ return SQLITE_NOMEM;
+ }
+
+ pPhrase->doclist.pList = aOut;
+ if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){
+ pPhrase->doclist.bFreeList = 1;
+ pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList);
+ }else{
+ sqlite3_free(aOut);
+ pPhrase->doclist.pList = 0;
+ pPhrase->doclist.nList = 0;
+ }
+ sqlite3_free(aPoslist);
+ }
+ }
+
+ return SQLITE_OK;
+}
+#endif /* SQLITE_DISABLE_FTS4_DEFERRED */
+
+/*
+** Maximum number of tokens a phrase may have to be considered for the
+** incremental doclists strategy.
+*/
+#define MAX_INCR_PHRASE_TOKENS 4
+
+/*
+** This function is called for each Fts3Phrase in a full-text query
+** expression to initialize the mechanism for returning rows. Once this
+** function has been called successfully on an Fts3Phrase, it may be
+** used with fts3EvalPhraseNext() to iterate through the matching docids.
+**
+** If parameter bOptOk is true, then the phrase may (or may not) use the
+** incremental loading strategy. Otherwise, the entire doclist is loaded into
+** memory within this call.
+**
+** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code.
+*/
+static int fts3EvalPhraseStart(Fts3Cursor *pCsr, int bOptOk, Fts3Phrase *p){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc = SQLITE_OK; /* Error code */
+ int i;
+
+ /* Determine if doclists may be loaded from disk incrementally. This is
+ ** possible if the bOptOk argument is true, the FTS doclists will be
+ ** scanned in forward order, and the phrase consists of
+ ** MAX_INCR_PHRASE_TOKENS or fewer tokens, none of which are are "^first"
+ ** tokens or prefix tokens that cannot use a prefix-index. */
+ int bHaveIncr = 0;
+ int bIncrOk = (bOptOk
+ && pCsr->bDesc==pTab->bDescIdx
+ && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ && pTab->bNoIncrDoclist==0
+#endif
+ );
+ for(i=0; bIncrOk==1 && i<p->nToken; i++){
+ Fts3PhraseToken *pToken = &p->aToken[i];
+ if( pToken->bFirst || (pToken->pSegcsr!=0 && !pToken->pSegcsr->bLookup) ){
+ bIncrOk = 0;
+ }
+ if( pToken->pSegcsr ) bHaveIncr = 1;
+ }
+
+ if( bIncrOk && bHaveIncr ){
+ /* Use the incremental approach. */
+ int iCol = (p->iColumn >= pTab->nColumn ? -1 : p->iColumn);
+ for(i=0; rc==SQLITE_OK && i<p->nToken; i++){
+ Fts3PhraseToken *pToken = &p->aToken[i];
+ Fts3MultiSegReader *pSegcsr = pToken->pSegcsr;
+ if( pSegcsr ){
+ rc = sqlite3Fts3MsrIncrStart(pTab, pSegcsr, iCol, pToken->z, pToken->n);
+ }
+ }
+ p->bIncr = 1;
+ }else{
+ /* Load the full doclist for the phrase into memory. */
+ rc = fts3EvalPhraseLoad(pCsr, p);
+ p->bIncr = 0;
+ }
+
+ assert( rc!=SQLITE_OK || p->nToken<1 || p->aToken[0].pSegcsr==0 || p->bIncr );
+ return rc;
+}
+
+/*
+** This function is used to iterate backwards (from the end to start)
+** through doclists. It is used by this module to iterate through phrase
+** doclists in reverse and by the fts3_write.c module to iterate through
+** pending-terms lists when writing to databases with "order=desc".
+**
+** The doclist may be sorted in ascending (parameter bDescIdx==0) or
+** descending (parameter bDescIdx==1) order of docid. Regardless, this
+** function iterates from the end of the doclist to the beginning.
+*/
+SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(
+ int bDescIdx, /* True if the doclist is desc */
+ char *aDoclist, /* Pointer to entire doclist */
+ int nDoclist, /* Length of aDoclist in bytes */
+ char **ppIter, /* IN/OUT: Iterator pointer */
+ sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */
+ int *pnList, /* OUT: List length pointer */
+ u8 *pbEof /* OUT: End-of-file flag */
+){
+ char *p = *ppIter;
+
+ assert( nDoclist>0 );
+ assert( *pbEof==0 );
+ assert( p || *piDocid==0 );
+ assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) );
+
+ if( p==0 ){
+ sqlite3_int64 iDocid = 0;
+ char *pNext = 0;
+ char *pDocid = aDoclist;
+ char *pEnd = &aDoclist[nDoclist];
+ int iMul = 1;
+
+ while( pDocid<pEnd ){
+ sqlite3_int64 iDelta;
+ pDocid += sqlite3Fts3GetVarint(pDocid, &iDelta);
+ iDocid += (iMul * iDelta);
+ pNext = pDocid;
+ fts3PoslistCopy(0, &pDocid);
+ while( pDocid<pEnd && *pDocid==0 ) pDocid++;
+ iMul = (bDescIdx ? -1 : 1);
+ }
+
+ *pnList = (int)(pEnd - pNext);
+ *ppIter = pNext;
+ *piDocid = iDocid;
+ }else{
+ int iMul = (bDescIdx ? -1 : 1);
+ sqlite3_int64 iDelta;
+ fts3GetReverseVarint(&p, aDoclist, &iDelta);
+ *piDocid -= (iMul * iDelta);
+
+ if( p==aDoclist ){
+ *pbEof = 1;
+ }else{
+ char *pSave = p;
+ fts3ReversePoslist(aDoclist, &p);
+ *pnList = (int)(pSave - p);
+ }
+ *ppIter = p;
+ }
+}
+
+/*
+** Iterate forwards through a doclist.
+*/
+SQLITE_PRIVATE void sqlite3Fts3DoclistNext(
+ int bDescIdx, /* True if the doclist is desc */
+ char *aDoclist, /* Pointer to entire doclist */
+ int nDoclist, /* Length of aDoclist in bytes */
+ char **ppIter, /* IN/OUT: Iterator pointer */
+ sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */
+ u8 *pbEof /* OUT: End-of-file flag */
+){
+ char *p = *ppIter;
+
+ assert( nDoclist>0 );
+ assert( *pbEof==0 );
+ assert_fts3_nc( p || *piDocid==0 );
+ assert( !p || (p>=aDoclist && p<=&aDoclist[nDoclist]) );
+
+ if( p==0 ){
+ p = aDoclist;
+ p += sqlite3Fts3GetVarint(p, piDocid);
+ }else{
+ fts3PoslistCopy(0, &p);
+ while( p<&aDoclist[nDoclist] && *p==0 ) p++;
+ if( p>=&aDoclist[nDoclist] ){
+ *pbEof = 1;
+ }else{
+ sqlite3_int64 iVar;
+ p += sqlite3Fts3GetVarint(p, &iVar);
+ *piDocid += ((bDescIdx ? -1 : 1) * iVar);
+ }
+ }
+
+ *ppIter = p;
+}
+
+/*
+** Advance the iterator pDL to the next entry in pDL->aAll/nAll. Set *pbEof
+** to true if EOF is reached.
+*/
+static void fts3EvalDlPhraseNext(
+ Fts3Table *pTab,
+ Fts3Doclist *pDL,
+ u8 *pbEof
+){
+ char *pIter; /* Used to iterate through aAll */
+ char *pEnd; /* 1 byte past end of aAll */
+
+ if( pDL->pNextDocid ){
+ pIter = pDL->pNextDocid;
+ assert( pDL->aAll!=0 || pIter==0 );
+ }else{
+ pIter = pDL->aAll;
+ }
+
+ if( pIter==0 || pIter>=(pEnd = pDL->aAll + pDL->nAll) ){
+ /* We have already reached the end of this doclist. EOF. */
+ *pbEof = 1;
+ }else{
+ sqlite3_int64 iDelta;
+ pIter += sqlite3Fts3GetVarint(pIter, &iDelta);
+ if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){
+ pDL->iDocid += iDelta;
+ }else{
+ pDL->iDocid -= iDelta;
+ }
+ pDL->pList = pIter;
+ fts3PoslistCopy(0, &pIter);
+ pDL->nList = (int)(pIter - pDL->pList);
+
+ /* pIter now points just past the 0x00 that terminates the position-
+ ** list for document pDL->iDocid. However, if this position-list was
+ ** edited in place by fts3EvalNearTrim(), then pIter may not actually
+ ** point to the start of the next docid value. The following line deals
+ ** with this case by advancing pIter past the zero-padding added by
+ ** fts3EvalNearTrim(). */
+ while( pIter<pEnd && *pIter==0 ) pIter++;
+
+ pDL->pNextDocid = pIter;
+ assert( pIter>=&pDL->aAll[pDL->nAll] || *pIter );
+ *pbEof = 0;
+ }
+}
+
+/*
+** Helper type used by fts3EvalIncrPhraseNext() and incrPhraseTokenNext().
+*/
+typedef struct TokenDoclist TokenDoclist;
+struct TokenDoclist {
+ int bIgnore;
+ sqlite3_int64 iDocid;
+ char *pList;
+ int nList;
+};
+
+/*
+** Token pToken is an incrementally loaded token that is part of a
+** multi-token phrase. Advance it to the next matching document in the
+** database and populate output variable *p with the details of the new
+** entry. Or, if the iterator has reached EOF, set *pbEof to true.
+**
+** If an error occurs, return an SQLite error code. Otherwise, return
+** SQLITE_OK.
+*/
+static int incrPhraseTokenNext(
+ Fts3Table *pTab, /* Virtual table handle */
+ Fts3Phrase *pPhrase, /* Phrase to advance token of */
+ int iToken, /* Specific token to advance */
+ TokenDoclist *p, /* OUT: Docid and doclist for new entry */
+ u8 *pbEof /* OUT: True if iterator is at EOF */
+){
+ int rc = SQLITE_OK;
+
+ if( pPhrase->iDoclistToken==iToken ){
+ assert( p->bIgnore==0 );
+ assert( pPhrase->aToken[iToken].pSegcsr==0 );
+ fts3EvalDlPhraseNext(pTab, &pPhrase->doclist, pbEof);
+ p->pList = pPhrase->doclist.pList;
+ p->nList = pPhrase->doclist.nList;
+ p->iDocid = pPhrase->doclist.iDocid;
+ }else{
+ Fts3PhraseToken *pToken = &pPhrase->aToken[iToken];
+ assert( pToken->pDeferred==0 );
+ assert( pToken->pSegcsr || pPhrase->iDoclistToken>=0 );
+ if( pToken->pSegcsr ){
+ assert( p->bIgnore==0 );
+ rc = sqlite3Fts3MsrIncrNext(
+ pTab, pToken->pSegcsr, &p->iDocid, &p->pList, &p->nList
+ );
+ if( p->pList==0 ) *pbEof = 1;
+ }else{
+ p->bIgnore = 1;
+ }
+ }
+
+ return rc;
+}
+
+
+/*
+** The phrase iterator passed as the second argument:
+**
+** * features at least one token that uses an incremental doclist, and
+**
+** * does not contain any deferred tokens.
+**
+** Advance it to the next matching documnent in the database and populate
+** the Fts3Doclist.pList and nList fields.
+**
+** If there is no "next" entry and no error occurs, then *pbEof is set to
+** 1 before returning. Otherwise, if no error occurs and the iterator is
+** successfully advanced, *pbEof is set to 0.
+**
+** If an error occurs, return an SQLite error code. Otherwise, return
+** SQLITE_OK.
+*/
+static int fts3EvalIncrPhraseNext(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Phrase *p, /* Phrase object to advance to next docid */
+ u8 *pbEof /* OUT: Set to 1 if EOF */
+){
+ int rc = SQLITE_OK;
+ Fts3Doclist *pDL = &p->doclist;
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ u8 bEof = 0;
+
+ /* This is only called if it is guaranteed that the phrase has at least
+ ** one incremental token. In which case the bIncr flag is set. */
+ assert( p->bIncr==1 );
+
+ if( p->nToken==1 ){
+ rc = sqlite3Fts3MsrIncrNext(pTab, p->aToken[0].pSegcsr,
+ &pDL->iDocid, &pDL->pList, &pDL->nList
+ );
+ if( pDL->pList==0 ) bEof = 1;
+ }else{
+ int bDescDoclist = pCsr->bDesc;
+ struct TokenDoclist a[MAX_INCR_PHRASE_TOKENS];
+
+ memset(a, 0, sizeof(a));
+ assert( p->nToken<=MAX_INCR_PHRASE_TOKENS );
+ assert( p->iDoclistToken<MAX_INCR_PHRASE_TOKENS );
+
+ while( bEof==0 ){
+ int bMaxSet = 0;
+ sqlite3_int64 iMax = 0; /* Largest docid for all iterators */
+ int i; /* Used to iterate through tokens */
+
+ /* Advance the iterator for each token in the phrase once. */
+ for(i=0; rc==SQLITE_OK && i<p->nToken && bEof==0; i++){
+ rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof);
+ if( a[i].bIgnore==0 && (bMaxSet==0 || DOCID_CMP(iMax, a[i].iDocid)<0) ){
+ iMax = a[i].iDocid;
+ bMaxSet = 1;
+ }
+ }
+ assert( rc!=SQLITE_OK || (p->nToken>=1 && a[p->nToken-1].bIgnore==0) );
+ assert( rc!=SQLITE_OK || bMaxSet );
+
+ /* Keep advancing iterators until they all point to the same document */
+ for(i=0; i<p->nToken; i++){
+ while( rc==SQLITE_OK && bEof==0
+ && a[i].bIgnore==0 && DOCID_CMP(a[i].iDocid, iMax)<0
+ ){
+ rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof);
+ if( DOCID_CMP(a[i].iDocid, iMax)>0 ){
+ iMax = a[i].iDocid;
+ i = 0;
+ }
+ }
+ }
+
+ /* Check if the current entries really are a phrase match */
+ if( bEof==0 ){
+ int nList = 0;
+ int nByte = a[p->nToken-1].nList;
+ char *aDoclist = sqlite3_malloc(nByte+FTS3_BUFFER_PADDING);
+ if( !aDoclist ) return SQLITE_NOMEM;
+ memcpy(aDoclist, a[p->nToken-1].pList, nByte+1);
+ memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING);
+
+ for(i=0; i<(p->nToken-1); i++){
+ if( a[i].bIgnore==0 ){
+ char *pL = a[i].pList;
+ char *pR = aDoclist;
+ char *pOut = aDoclist;
+ int nDist = p->nToken-1-i;
+ int res = fts3PoslistPhraseMerge(&pOut, nDist, 0, 1, &pL, &pR);
+ if( res==0 ) break;
+ nList = (int)(pOut - aDoclist);
+ }
+ }
+ if( i==(p->nToken-1) ){
+ pDL->iDocid = iMax;
+ pDL->pList = aDoclist;
+ pDL->nList = nList;
+ pDL->bFreeList = 1;
+ break;
+ }
+ sqlite3_free(aDoclist);
+ }
+ }
+ }
+
+ *pbEof = bEof;
+ return rc;
+}
+
+/*
+** Attempt to move the phrase iterator to point to the next matching docid.
+** If an error occurs, return an SQLite error code. Otherwise, return
+** SQLITE_OK.
+**
+** If there is no "next" entry and no error occurs, then *pbEof is set to
+** 1 before returning. Otherwise, if no error occurs and the iterator is
+** successfully advanced, *pbEof is set to 0.
+*/
+static int fts3EvalPhraseNext(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Phrase *p, /* Phrase object to advance to next docid */
+ u8 *pbEof /* OUT: Set to 1 if EOF */
+){
+ int rc = SQLITE_OK;
+ Fts3Doclist *pDL = &p->doclist;
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+
+ if( p->bIncr ){
+ rc = fts3EvalIncrPhraseNext(pCsr, p, pbEof);
+ }else if( pCsr->bDesc!=pTab->bDescIdx && pDL->nAll ){
+ sqlite3Fts3DoclistPrev(pTab->bDescIdx, pDL->aAll, pDL->nAll,
+ &pDL->pNextDocid, &pDL->iDocid, &pDL->nList, pbEof
+ );
+ pDL->pList = pDL->pNextDocid;
+ }else{
+ fts3EvalDlPhraseNext(pTab, pDL, pbEof);
+ }
+
+ return rc;
+}
+
+/*
+**
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op.
+** Otherwise, fts3EvalPhraseStart() is called on all phrases within the
+** expression. Also the Fts3Expr.bDeferred variable is set to true for any
+** expressions for which all descendent tokens are deferred.
+**
+** If parameter bOptOk is zero, then it is guaranteed that the
+** Fts3Phrase.doclist.aAll/nAll variables contain the entire doclist for
+** each phrase in the expression (subject to deferred token processing).
+** Or, if bOptOk is non-zero, then one or more tokens within the expression
+** may be loaded incrementally, meaning doclist.aAll/nAll is not available.
+**
+** If an error occurs within this function, *pRc is set to an SQLite error
+** code before returning.
+*/
+static void fts3EvalStartReaders(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Expr *pExpr, /* Expression to initialize phrases in */
+ int *pRc /* IN/OUT: Error code */
+){
+ if( pExpr && SQLITE_OK==*pRc ){
+ if( pExpr->eType==FTSQUERY_PHRASE ){
+ int nToken = pExpr->pPhrase->nToken;
+ if( nToken ){
+ int i;
+ for(i=0; i<nToken; i++){
+ if( pExpr->pPhrase->aToken[i].pDeferred==0 ) break;
+ }
+ pExpr->bDeferred = (i==nToken);
+ }
+ *pRc = fts3EvalPhraseStart(pCsr, 1, pExpr->pPhrase);
+ }else{
+ fts3EvalStartReaders(pCsr, pExpr->pLeft, pRc);
+ fts3EvalStartReaders(pCsr, pExpr->pRight, pRc);
+ pExpr->bDeferred = (pExpr->pLeft->bDeferred && pExpr->pRight->bDeferred);
+ }
+ }
+}
+
+/*
+** An array of the following structures is assembled as part of the process
+** of selecting tokens to defer before the query starts executing (as part
+** of the xFilter() method). There is one element in the array for each
+** token in the FTS expression.
+**
+** Tokens are divided into AND/NEAR clusters. All tokens in a cluster belong
+** to phrases that are connected only by AND and NEAR operators (not OR or
+** NOT). When determining tokens to defer, each AND/NEAR cluster is considered
+** separately. The root of a tokens AND/NEAR cluster is stored in
+** Fts3TokenAndCost.pRoot.
+*/
+typedef struct Fts3TokenAndCost Fts3TokenAndCost;
+struct Fts3TokenAndCost {
+ Fts3Phrase *pPhrase; /* The phrase the token belongs to */
+ int iToken; /* Position of token in phrase */
+ Fts3PhraseToken *pToken; /* The token itself */
+ Fts3Expr *pRoot; /* Root of NEAR/AND cluster */
+ int nOvfl; /* Number of overflow pages to load doclist */
+ int iCol; /* The column the token must match */
+};
+
+/*
+** This function is used to populate an allocated Fts3TokenAndCost array.
+**
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op.
+** Otherwise, if an error occurs during execution, *pRc is set to an
+** SQLite error code.
+*/
+static void fts3EvalTokenCosts(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Expr *pRoot, /* Root of current AND/NEAR cluster */
+ Fts3Expr *pExpr, /* Expression to consider */
+ Fts3TokenAndCost **ppTC, /* Write new entries to *(*ppTC)++ */
+ Fts3Expr ***ppOr, /* Write new OR root to *(*ppOr)++ */
+ int *pRc /* IN/OUT: Error code */
+){
+ if( *pRc==SQLITE_OK ){
+ if( pExpr->eType==FTSQUERY_PHRASE ){
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ int i;
+ for(i=0; *pRc==SQLITE_OK && i<pPhrase->nToken; i++){
+ Fts3TokenAndCost *pTC = (*ppTC)++;
+ pTC->pPhrase = pPhrase;
+ pTC->iToken = i;
+ pTC->pRoot = pRoot;
+ pTC->pToken = &pPhrase->aToken[i];
+ pTC->iCol = pPhrase->iColumn;
+ *pRc = sqlite3Fts3MsrOvfl(pCsr, pTC->pToken->pSegcsr, &pTC->nOvfl);
+ }
+ }else if( pExpr->eType!=FTSQUERY_NOT ){
+ assert( pExpr->eType==FTSQUERY_OR
+ || pExpr->eType==FTSQUERY_AND
+ || pExpr->eType==FTSQUERY_NEAR
+ );
+ assert( pExpr->pLeft && pExpr->pRight );
+ if( pExpr->eType==FTSQUERY_OR ){
+ pRoot = pExpr->pLeft;
+ **ppOr = pRoot;
+ (*ppOr)++;
+ }
+ fts3EvalTokenCosts(pCsr, pRoot, pExpr->pLeft, ppTC, ppOr, pRc);
+ if( pExpr->eType==FTSQUERY_OR ){
+ pRoot = pExpr->pRight;
+ **ppOr = pRoot;
+ (*ppOr)++;
+ }
+ fts3EvalTokenCosts(pCsr, pRoot, pExpr->pRight, ppTC, ppOr, pRc);
+ }
+ }
+}
+
+/*
+** Determine the average document (row) size in pages. If successful,
+** write this value to *pnPage and return SQLITE_OK. Otherwise, return
+** an SQLite error code.
+**
+** The average document size in pages is calculated by first calculating
+** determining the average size in bytes, B. If B is less than the amount
+** of data that will fit on a single leaf page of an intkey table in
+** this database, then the average docsize is 1. Otherwise, it is 1 plus
+** the number of overflow pages consumed by a record B bytes in size.
+*/
+static int fts3EvalAverageDocsize(Fts3Cursor *pCsr, int *pnPage){
+ int rc = SQLITE_OK;
+ if( pCsr->nRowAvg==0 ){
+ /* The average document size, which is required to calculate the cost
+ ** of each doclist, has not yet been determined. Read the required
+ ** data from the %_stat table to calculate it.
+ **
+ ** Entry 0 of the %_stat table is a blob containing (nCol+1) FTS3
+ ** varints, where nCol is the number of columns in the FTS3 table.
+ ** The first varint is the number of documents currently stored in
+ ** the table. The following nCol varints contain the total amount of
+ ** data stored in all rows of each column of the table, from left
+ ** to right.
+ */
+ Fts3Table *p = (Fts3Table*)pCsr->base.pVtab;
+ sqlite3_stmt *pStmt;
+ sqlite3_int64 nDoc = 0;
+ sqlite3_int64 nByte = 0;
+ const char *pEnd;
+ const char *a;
+
+ rc = sqlite3Fts3SelectDoctotal(p, &pStmt);
+ if( rc!=SQLITE_OK ) return rc;
+ a = sqlite3_column_blob(pStmt, 0);
+ testcase( a==0 ); /* If %_stat.value set to X'' */
+ if( a ){
+ pEnd = &a[sqlite3_column_bytes(pStmt, 0)];
+ a += sqlite3Fts3GetVarintBounded(a, pEnd, &nDoc);
+ while( a<pEnd ){
+ a += sqlite3Fts3GetVarintBounded(a, pEnd, &nByte);
+ }
+ }
+ if( nDoc==0 || nByte==0 ){
+ sqlite3_reset(pStmt);
+ return FTS_CORRUPT_VTAB;
+ }
+
+ pCsr->nDoc = nDoc;
+ pCsr->nRowAvg = (int)(((nByte / nDoc) + p->nPgsz) / p->nPgsz);
+ assert( pCsr->nRowAvg>0 );
+ rc = sqlite3_reset(pStmt);
+ }
+
+ *pnPage = pCsr->nRowAvg;
+ return rc;
+}
+
+/*
+** This function is called to select the tokens (if any) that will be
+** deferred. The array aTC[] has already been populated when this is
+** called.
+**
+** This function is called once for each AND/NEAR cluster in the
+** expression. Each invocation determines which tokens to defer within
+** the cluster with root node pRoot. See comments above the definition
+** of struct Fts3TokenAndCost for more details.
+**
+** If no error occurs, SQLITE_OK is returned and sqlite3Fts3DeferToken()
+** called on each token to defer. Otherwise, an SQLite error code is
+** returned.
+*/
+static int fts3EvalSelectDeferred(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Expr *pRoot, /* Consider tokens with this root node */
+ Fts3TokenAndCost *aTC, /* Array of expression tokens and costs */
+ int nTC /* Number of entries in aTC[] */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int nDocSize = 0; /* Number of pages per doc loaded */
+ int rc = SQLITE_OK; /* Return code */
+ int ii; /* Iterator variable for various purposes */
+ int nOvfl = 0; /* Total overflow pages used by doclists */
+ int nToken = 0; /* Total number of tokens in cluster */
+
+ int nMinEst = 0; /* The minimum count for any phrase so far. */
+ int nLoad4 = 1; /* (Phrases that will be loaded)^4. */
+
+ /* Tokens are never deferred for FTS tables created using the content=xxx
+ ** option. The reason being that it is not guaranteed that the content
+ ** table actually contains the same data as the index. To prevent this from
+ ** causing any problems, the deferred token optimization is completely
+ ** disabled for content=xxx tables. */
+ if( pTab->zContentTbl ){
+ return SQLITE_OK;
+ }
+
+ /* Count the tokens in this AND/NEAR cluster. If none of the doclists
+ ** associated with the tokens spill onto overflow pages, or if there is
+ ** only 1 token, exit early. No tokens to defer in this case. */
+ for(ii=0; ii<nTC; ii++){
+ if( aTC[ii].pRoot==pRoot ){
+ nOvfl += aTC[ii].nOvfl;
+ nToken++;
+ }
+ }
+ if( nOvfl==0 || nToken<2 ) return SQLITE_OK;
+
+ /* Obtain the average docsize (in pages). */
+ rc = fts3EvalAverageDocsize(pCsr, &nDocSize);
+ assert( rc!=SQLITE_OK || nDocSize>0 );
+
+
+ /* Iterate through all tokens in this AND/NEAR cluster, in ascending order
+ ** of the number of overflow pages that will be loaded by the pager layer
+ ** to retrieve the entire doclist for the token from the full-text index.
+ ** Load the doclists for tokens that are either:
+ **
+ ** a. The cheapest token in the entire query (i.e. the one visited by the
+ ** first iteration of this loop), or
+ **
+ ** b. Part of a multi-token phrase.
+ **
+ ** After each token doclist is loaded, merge it with the others from the
+ ** same phrase and count the number of documents that the merged doclist
+ ** contains. Set variable "nMinEst" to the smallest number of documents in
+ ** any phrase doclist for which 1 or more token doclists have been loaded.
+ ** Let nOther be the number of other phrases for which it is certain that
+ ** one or more tokens will not be deferred.
+ **
+ ** Then, for each token, defer it if loading the doclist would result in
+ ** loading N or more overflow pages into memory, where N is computed as:
+ **
+ ** (nMinEst + 4^nOther - 1) / (4^nOther)
+ */
+ for(ii=0; ii<nToken && rc==SQLITE_OK; ii++){
+ int iTC; /* Used to iterate through aTC[] array. */
+ Fts3TokenAndCost *pTC = 0; /* Set to cheapest remaining token. */
+
+ /* Set pTC to point to the cheapest remaining token. */
+ for(iTC=0; iTC<nTC; iTC++){
+ if( aTC[iTC].pToken && aTC[iTC].pRoot==pRoot
+ && (!pTC || aTC[iTC].nOvfl<pTC->nOvfl)
+ ){
+ pTC = &aTC[iTC];
+ }
+ }
+ assert( pTC );
+
+ if( ii && pTC->nOvfl>=((nMinEst+(nLoad4/4)-1)/(nLoad4/4))*nDocSize ){
+ /* The number of overflow pages to load for this (and therefore all
+ ** subsequent) tokens is greater than the estimated number of pages
+ ** that will be loaded if all subsequent tokens are deferred.
+ */
+ Fts3PhraseToken *pToken = pTC->pToken;
+ rc = sqlite3Fts3DeferToken(pCsr, pToken, pTC->iCol);
+ fts3SegReaderCursorFree(pToken->pSegcsr);
+ pToken->pSegcsr = 0;
+ }else{
+ /* Set nLoad4 to the value of (4^nOther) for the next iteration of the
+ ** for-loop. Except, limit the value to 2^24 to prevent it from
+ ** overflowing the 32-bit integer it is stored in. */
+ if( ii<12 ) nLoad4 = nLoad4*4;
+
+ if( ii==0 || (pTC->pPhrase->nToken>1 && ii!=nToken-1) ){
+ /* Either this is the cheapest token in the entire query, or it is
+ ** part of a multi-token phrase. Either way, the entire doclist will
+ ** (eventually) be loaded into memory. It may as well be now. */
+ Fts3PhraseToken *pToken = pTC->pToken;
+ int nList = 0;
+ char *pList = 0;
+ rc = fts3TermSelect(pTab, pToken, pTC->iCol, &nList, &pList);
+ assert( rc==SQLITE_OK || pList==0 );
+ if( rc==SQLITE_OK ){
+ rc = fts3EvalPhraseMergeToken(
+ pTab, pTC->pPhrase, pTC->iToken,pList,nList
+ );
+ }
+ if( rc==SQLITE_OK ){
+ int nCount;
+ nCount = fts3DoclistCountDocids(
+ pTC->pPhrase->doclist.aAll, pTC->pPhrase->doclist.nAll
+ );
+ if( ii==0 || nCount<nMinEst ) nMinEst = nCount;
+ }
+ }
+ }
+ pTC->pToken = 0;
+ }
+
+ return rc;
+}
+
+/*
+** This function is called from within the xFilter method. It initializes
+** the full-text query currently stored in pCsr->pExpr. To iterate through
+** the results of a query, the caller does:
+**
+** fts3EvalStart(pCsr);
+** while( 1 ){
+** fts3EvalNext(pCsr);
+** if( pCsr->bEof ) break;
+** ... return row pCsr->iPrevId to the caller ...
+** }
+*/
+static int fts3EvalStart(Fts3Cursor *pCsr){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc = SQLITE_OK;
+ int nToken = 0;
+ int nOr = 0;
+
+ /* Allocate a MultiSegReader for each token in the expression. */
+ fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc);
+
+ /* Determine which, if any, tokens in the expression should be deferred. */
+#ifndef SQLITE_DISABLE_FTS4_DEFERRED
+ if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){
+ Fts3TokenAndCost *aTC;
+ Fts3Expr **apOr;
+ aTC = (Fts3TokenAndCost *)sqlite3_malloc64(
+ sizeof(Fts3TokenAndCost) * nToken
+ + sizeof(Fts3Expr *) * nOr * 2
+ );
+ apOr = (Fts3Expr **)&aTC[nToken];
+
+ if( !aTC ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int ii;
+ Fts3TokenAndCost *pTC = aTC;
+ Fts3Expr **ppOr = apOr;
+
+ fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc);
+ nToken = (int)(pTC-aTC);
+ nOr = (int)(ppOr-apOr);
+
+ if( rc==SQLITE_OK ){
+ rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken);
+ for(ii=0; rc==SQLITE_OK && ii<nOr; ii++){
+ rc = fts3EvalSelectDeferred(pCsr, apOr[ii], aTC, nToken);
+ }
+ }
+
+ sqlite3_free(aTC);
+ }
+ }
+#endif
+
+ fts3EvalStartReaders(pCsr, pCsr->pExpr, &rc);
+ return rc;
+}
+
+/*
+** Invalidate the current position list for phrase pPhrase.
+*/
+static void fts3EvalInvalidatePoslist(Fts3Phrase *pPhrase){
+ if( pPhrase->doclist.bFreeList ){
+ sqlite3_free(pPhrase->doclist.pList);
+ }
+ pPhrase->doclist.pList = 0;
+ pPhrase->doclist.nList = 0;
+ pPhrase->doclist.bFreeList = 0;
+}
+
+/*
+** This function is called to edit the position list associated with
+** the phrase object passed as the fifth argument according to a NEAR
+** condition. For example:
+**
+** abc NEAR/5 "def ghi"
+**
+** Parameter nNear is passed the NEAR distance of the expression (5 in
+** the example above). When this function is called, *paPoslist points to
+** the position list, and *pnToken is the number of phrase tokens in the
+** phrase on the other side of the NEAR operator to pPhrase. For example,
+** if pPhrase refers to the "def ghi" phrase, then *paPoslist points to
+** the position list associated with phrase "abc".
+**
+** All positions in the pPhrase position list that are not sufficiently
+** close to a position in the *paPoslist position list are removed. If this
+** leaves 0 positions, zero is returned. Otherwise, non-zero.
+**
+** Before returning, *paPoslist is set to point to the position lsit
+** associated with pPhrase. And *pnToken is set to the number of tokens in
+** pPhrase.
+*/
+static int fts3EvalNearTrim(
+ int nNear, /* NEAR distance. As in "NEAR/nNear". */
+ char *aTmp, /* Temporary space to use */
+ char **paPoslist, /* IN/OUT: Position list */
+ int *pnToken, /* IN/OUT: Tokens in phrase of *paPoslist */
+ Fts3Phrase *pPhrase /* The phrase object to trim the doclist of */
+){
+ int nParam1 = nNear + pPhrase->nToken;
+ int nParam2 = nNear + *pnToken;
+ int nNew;
+ char *p2;
+ char *pOut;
+ int res;
+
+ assert( pPhrase->doclist.pList );
+
+ p2 = pOut = pPhrase->doclist.pList;
+ res = fts3PoslistNearMerge(
+ &pOut, aTmp, nParam1, nParam2, paPoslist, &p2
+ );
+ if( res ){
+ nNew = (int)(pOut - pPhrase->doclist.pList) - 1;
+ if( nNew>=0 ){
+ assert( pPhrase->doclist.pList[nNew]=='\0' );
+ assert( nNew<=pPhrase->doclist.nList && nNew>0 );
+ memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew);
+ pPhrase->doclist.nList = nNew;
+ }
+ *paPoslist = pPhrase->doclist.pList;
+ *pnToken = pPhrase->nToken;
+ }
+
+ return res;
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is called.
+** Otherwise, it advances the expression passed as the second argument to
+** point to the next matching row in the database. Expressions iterate through
+** matching rows in docid order. Ascending order if Fts3Cursor.bDesc is zero,
+** or descending if it is non-zero.
+**
+** If an error occurs, *pRc is set to an SQLite error code. Otherwise, if
+** successful, the following variables in pExpr are set:
+**
+** Fts3Expr.bEof (non-zero if EOF - there is no next row)
+** Fts3Expr.iDocid (valid if bEof==0. The docid of the next row)
+**
+** If the expression is of type FTSQUERY_PHRASE, and the expression is not
+** at EOF, then the following variables are populated with the position list
+** for the phrase for the visited row:
+**
+** FTs3Expr.pPhrase->doclist.nList (length of pList in bytes)
+** FTs3Expr.pPhrase->doclist.pList (pointer to position list)
+**
+** It says above that this function advances the expression to the next
+** matching row. This is usually true, but there are the following exceptions:
+**
+** 1. Deferred tokens are not taken into account. If a phrase consists
+** entirely of deferred tokens, it is assumed to match every row in
+** the db. In this case the position-list is not populated at all.
+**
+** Or, if a phrase contains one or more deferred tokens and one or
+** more non-deferred tokens, then the expression is advanced to the
+** next possible match, considering only non-deferred tokens. In other
+** words, if the phrase is "A B C", and "B" is deferred, the expression
+** is advanced to the next row that contains an instance of "A * C",
+** where "*" may match any single token. The position list in this case
+** is populated as for "A * C" before returning.
+**
+** 2. NEAR is treated as AND. If the expression is "x NEAR y", it is
+** advanced to point to the next row that matches "x AND y".
+**
+** See sqlite3Fts3EvalTestDeferred() for details on testing if a row is
+** really a match, taking into account deferred tokens and NEAR operators.
+*/
+static void fts3EvalNextRow(
+ Fts3Cursor *pCsr, /* FTS Cursor handle */
+ Fts3Expr *pExpr, /* Expr. to advance to next matching row */
+ int *pRc /* IN/OUT: Error code */
+){
+ if( *pRc==SQLITE_OK ){
+ int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */
+ assert( pExpr->bEof==0 );
+ pExpr->bStart = 1;
+
+ switch( pExpr->eType ){
+ case FTSQUERY_NEAR:
+ case FTSQUERY_AND: {
+ Fts3Expr *pLeft = pExpr->pLeft;
+ Fts3Expr *pRight = pExpr->pRight;
+ assert( !pLeft->bDeferred || !pRight->bDeferred );
+
+ if( pLeft->bDeferred ){
+ /* LHS is entirely deferred. So we assume it matches every row.
+ ** Advance the RHS iterator to find the next row visited. */
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ pExpr->iDocid = pRight->iDocid;
+ pExpr->bEof = pRight->bEof;
+ }else if( pRight->bDeferred ){
+ /* RHS is entirely deferred. So we assume it matches every row.
+ ** Advance the LHS iterator to find the next row visited. */
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ pExpr->iDocid = pLeft->iDocid;
+ pExpr->bEof = pLeft->bEof;
+ }else{
+ /* Neither the RHS or LHS are deferred. */
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ while( !pLeft->bEof && !pRight->bEof && *pRc==SQLITE_OK ){
+ sqlite3_int64 iDiff = DOCID_CMP(pLeft->iDocid, pRight->iDocid);
+ if( iDiff==0 ) break;
+ if( iDiff<0 ){
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ }else{
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ }
+ }
+ pExpr->iDocid = pLeft->iDocid;
+ pExpr->bEof = (pLeft->bEof || pRight->bEof);
+ if( pExpr->eType==FTSQUERY_NEAR && pExpr->bEof ){
+ assert( pRight->eType==FTSQUERY_PHRASE );
+ if( pRight->pPhrase->doclist.aAll ){
+ Fts3Doclist *pDl = &pRight->pPhrase->doclist;
+ while( *pRc==SQLITE_OK && pRight->bEof==0 ){
+ memset(pDl->pList, 0, pDl->nList);
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ }
+ }
+ if( pLeft->pPhrase && pLeft->pPhrase->doclist.aAll ){
+ Fts3Doclist *pDl = &pLeft->pPhrase->doclist;
+ while( *pRc==SQLITE_OK && pLeft->bEof==0 ){
+ memset(pDl->pList, 0, pDl->nList);
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ }
+ }
+ pRight->bEof = pLeft->bEof = 1;
+ }
+ }
+ break;
+ }
+
+ case FTSQUERY_OR: {
+ Fts3Expr *pLeft = pExpr->pLeft;
+ Fts3Expr *pRight = pExpr->pRight;
+ sqlite3_int64 iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid);
+
+ assert( pLeft->bStart || pLeft->iDocid==pRight->iDocid );
+ assert( pRight->bStart || pLeft->iDocid==pRight->iDocid );
+
+ if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ }else if( pLeft->bEof || iCmp>0 ){
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ }else{
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ }
+
+ pExpr->bEof = (pLeft->bEof && pRight->bEof);
+ iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid);
+ if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){
+ pExpr->iDocid = pLeft->iDocid;
+ }else{
+ pExpr->iDocid = pRight->iDocid;
+ }
+
+ break;
+ }
+
+ case FTSQUERY_NOT: {
+ Fts3Expr *pLeft = pExpr->pLeft;
+ Fts3Expr *pRight = pExpr->pRight;
+
+ if( pRight->bStart==0 ){
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ assert( *pRc!=SQLITE_OK || pRight->bStart );
+ }
+
+ fts3EvalNextRow(pCsr, pLeft, pRc);
+ if( pLeft->bEof==0 ){
+ while( !*pRc
+ && !pRight->bEof
+ && DOCID_CMP(pLeft->iDocid, pRight->iDocid)>0
+ ){
+ fts3EvalNextRow(pCsr, pRight, pRc);
+ }
+ }
+ pExpr->iDocid = pLeft->iDocid;
+ pExpr->bEof = pLeft->bEof;
+ break;
+ }
+
+ default: {
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ fts3EvalInvalidatePoslist(pPhrase);
+ *pRc = fts3EvalPhraseNext(pCsr, pPhrase, &pExpr->bEof);
+ pExpr->iDocid = pPhrase->doclist.iDocid;
+ break;
+ }
+ }
+ }
+}
+
+/*
+** If *pRc is not SQLITE_OK, or if pExpr is not the root node of a NEAR
+** cluster, then this function returns 1 immediately.
+**
+** Otherwise, it checks if the current row really does match the NEAR
+** expression, using the data currently stored in the position lists
+** (Fts3Expr->pPhrase.doclist.pList/nList) for each phrase in the expression.
+**
+** If the current row is a match, the position list associated with each
+** phrase in the NEAR expression is edited in place to contain only those
+** phrase instances sufficiently close to their peers to satisfy all NEAR
+** constraints. In this case it returns 1. If the NEAR expression does not
+** match the current row, 0 is returned. The position lists may or may not
+** be edited if 0 is returned.
+*/
+static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){
+ int res = 1;
+
+ /* The following block runs if pExpr is the root of a NEAR query.
+ ** For example, the query:
+ **
+ ** "w" NEAR "x" NEAR "y" NEAR "z"
+ **
+ ** which is represented in tree form as:
+ **
+ ** |
+ ** +--NEAR--+ <-- root of NEAR query
+ ** | |
+ ** +--NEAR--+ "z"
+ ** | |
+ ** +--NEAR--+ "y"
+ ** | |
+ ** "w" "x"
+ **
+ ** The right-hand child of a NEAR node is always a phrase. The
+ ** left-hand child may be either a phrase or a NEAR node. There are
+ ** no exceptions to this - it's the way the parser in fts3_expr.c works.
+ */
+ if( *pRc==SQLITE_OK
+ && pExpr->eType==FTSQUERY_NEAR
+ && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR)
+ ){
+ Fts3Expr *p;
+ sqlite3_int64 nTmp = 0; /* Bytes of temp space */
+ char *aTmp; /* Temp space for PoslistNearMerge() */
+
+ /* Allocate temporary working space. */
+ for(p=pExpr; p->pLeft; p=p->pLeft){
+ assert( p->pRight->pPhrase->doclist.nList>0 );
+ nTmp += p->pRight->pPhrase->doclist.nList;
+ }
+ nTmp += p->pPhrase->doclist.nList;
+ aTmp = sqlite3_malloc64(nTmp*2);
+ if( !aTmp ){
+ *pRc = SQLITE_NOMEM;
+ res = 0;
+ }else{
+ char *aPoslist = p->pPhrase->doclist.pList;
+ int nToken = p->pPhrase->nToken;
+
+ for(p=p->pParent;res && p && p->eType==FTSQUERY_NEAR; p=p->pParent){
+ Fts3Phrase *pPhrase = p->pRight->pPhrase;
+ int nNear = p->nNear;
+ res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase);
+ }
+
+ aPoslist = pExpr->pRight->pPhrase->doclist.pList;
+ nToken = pExpr->pRight->pPhrase->nToken;
+ for(p=pExpr->pLeft; p && res; p=p->pLeft){
+ int nNear;
+ Fts3Phrase *pPhrase;
+ assert( p->pParent && p->pParent->pLeft==p );
+ nNear = p->pParent->nNear;
+ pPhrase = (
+ p->eType==FTSQUERY_NEAR ? p->pRight->pPhrase : p->pPhrase
+ );
+ res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase);
+ }
+ }
+
+ sqlite3_free(aTmp);
+ }
+
+ return res;
+}
+
+/*
+** This function is a helper function for sqlite3Fts3EvalTestDeferred().
+** Assuming no error occurs or has occurred, It returns non-zero if the
+** expression passed as the second argument matches the row that pCsr
+** currently points to, or zero if it does not.
+**
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op.
+** If an error occurs during execution of this function, *pRc is set to
+** the appropriate SQLite error code. In this case the returned value is
+** undefined.
+*/
+static int fts3EvalTestExpr(
+ Fts3Cursor *pCsr, /* FTS cursor handle */
+ Fts3Expr *pExpr, /* Expr to test. May or may not be root. */
+ int *pRc /* IN/OUT: Error code */
+){
+ int bHit = 1; /* Return value */
+ if( *pRc==SQLITE_OK ){
+ switch( pExpr->eType ){
+ case FTSQUERY_NEAR:
+ case FTSQUERY_AND:
+ bHit = (
+ fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc)
+ && fts3EvalTestExpr(pCsr, pExpr->pRight, pRc)
+ && fts3EvalNearTest(pExpr, pRc)
+ );
+
+ /* If the NEAR expression does not match any rows, zero the doclist for
+ ** all phrases involved in the NEAR. This is because the snippet(),
+ ** offsets() and matchinfo() functions are not supposed to recognize
+ ** any instances of phrases that are part of unmatched NEAR queries.
+ ** For example if this expression:
+ **
+ ** ... MATCH 'a OR (b NEAR c)'
+ **
+ ** is matched against a row containing:
+ **
+ ** 'a b d e'
+ **
+ ** then any snippet() should ony highlight the "a" term, not the "b"
+ ** (as "b" is part of a non-matching NEAR clause).
+ */
+ if( bHit==0
+ && pExpr->eType==FTSQUERY_NEAR
+ && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR)
+ ){
+ Fts3Expr *p;
+ for(p=pExpr; p->pPhrase==0; p=p->pLeft){
+ if( p->pRight->iDocid==pCsr->iPrevId ){
+ fts3EvalInvalidatePoslist(p->pRight->pPhrase);
+ }
+ }
+ if( p->iDocid==pCsr->iPrevId ){
+ fts3EvalInvalidatePoslist(p->pPhrase);
+ }
+ }
+
+ break;
+
+ case FTSQUERY_OR: {
+ int bHit1 = fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc);
+ int bHit2 = fts3EvalTestExpr(pCsr, pExpr->pRight, pRc);
+ bHit = bHit1 || bHit2;
+ break;
+ }
+
+ case FTSQUERY_NOT:
+ bHit = (
+ fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc)
+ && !fts3EvalTestExpr(pCsr, pExpr->pRight, pRc)
+ );
+ break;
+
+ default: {
+#ifndef SQLITE_DISABLE_FTS4_DEFERRED
+ if( pCsr->pDeferred
+ && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred)
+ ){
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 );
+ if( pExpr->bDeferred ){
+ fts3EvalInvalidatePoslist(pPhrase);
+ }
+ *pRc = fts3EvalDeferredPhrase(pCsr, pPhrase);
+ bHit = (pPhrase->doclist.pList!=0);
+ pExpr->iDocid = pCsr->iPrevId;
+ }else
+#endif
+ {
+ bHit = (
+ pExpr->bEof==0 && pExpr->iDocid==pCsr->iPrevId
+ && pExpr->pPhrase->doclist.nList>0
+ );
+ }
+ break;
+ }
+ }
+ }
+ return bHit;
+}
+
+/*
+** This function is called as the second part of each xNext operation when
+** iterating through the results of a full-text query. At this point the
+** cursor points to a row that matches the query expression, with the
+** following caveats:
+**
+** * Up until this point, "NEAR" operators in the expression have been
+** treated as "AND".
+**
+** * Deferred tokens have not yet been considered.
+**
+** If *pRc is not SQLITE_OK when this function is called, it immediately
+** returns 0. Otherwise, it tests whether or not after considering NEAR
+** operators and deferred tokens the current row is still a match for the
+** expression. It returns 1 if both of the following are true:
+**
+** 1. *pRc is SQLITE_OK when this function returns, and
+**
+** 2. After scanning the current FTS table row for the deferred tokens,
+** it is determined that the row does *not* match the query.
+**
+** Or, if no error occurs and it seems the current row does match the FTS
+** query, return 0.
+*/
+SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc){
+ int rc = *pRc;
+ int bMiss = 0;
+ if( rc==SQLITE_OK ){
+
+ /* If there are one or more deferred tokens, load the current row into
+ ** memory and scan it to determine the position list for each deferred
+ ** token. Then, see if this row is really a match, considering deferred
+ ** tokens and NEAR operators (neither of which were taken into account
+ ** earlier, by fts3EvalNextRow()).
+ */
+ if( pCsr->pDeferred ){
+ rc = fts3CursorSeek(0, pCsr);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3CacheDeferredDoclists(pCsr);
+ }
+ }
+ bMiss = (0==fts3EvalTestExpr(pCsr, pCsr->pExpr, &rc));
+
+ /* Free the position-lists accumulated for each deferred token above. */
+ sqlite3Fts3FreeDeferredDoclists(pCsr);
+ *pRc = rc;
+ }
+ return (rc==SQLITE_OK && bMiss);
+}
+
+/*
+** Advance to the next document that matches the FTS expression in
+** Fts3Cursor.pExpr.
+*/
+static int fts3EvalNext(Fts3Cursor *pCsr){
+ int rc = SQLITE_OK; /* Return Code */
+ Fts3Expr *pExpr = pCsr->pExpr;
+ assert( pCsr->isEof==0 );
+ if( pExpr==0 ){
+ pCsr->isEof = 1;
+ }else{
+ do {
+ if( pCsr->isRequireSeek==0 ){
+ sqlite3_reset(pCsr->pStmt);
+ }
+ assert( sqlite3_data_count(pCsr->pStmt)==0 );
+ fts3EvalNextRow(pCsr, pExpr, &rc);
+ pCsr->isEof = pExpr->bEof;
+ pCsr->isRequireSeek = 1;
+ pCsr->isMatchinfoNeeded = 1;
+ pCsr->iPrevId = pExpr->iDocid;
+ }while( pCsr->isEof==0 && sqlite3Fts3EvalTestDeferred(pCsr, &rc) );
+ }
+
+ /* Check if the cursor is past the end of the docid range specified
+ ** by Fts3Cursor.iMinDocid/iMaxDocid. If so, set the EOF flag. */
+ if( rc==SQLITE_OK && (
+ (pCsr->bDesc==0 && pCsr->iPrevId>pCsr->iMaxDocid)
+ || (pCsr->bDesc!=0 && pCsr->iPrevId<pCsr->iMinDocid)
+ )){
+ pCsr->isEof = 1;
+ }
+
+ return rc;
+}
+
+/*
+** Restart interation for expression pExpr so that the next call to
+** fts3EvalNext() visits the first row. Do not allow incremental
+** loading or merging of phrase doclists for this iteration.
+**
+** If *pRc is other than SQLITE_OK when this function is called, it is
+** a no-op. If an error occurs within this function, *pRc is set to an
+** SQLite error code before returning.
+*/
+static void fts3EvalRestart(
+ Fts3Cursor *pCsr,
+ Fts3Expr *pExpr,
+ int *pRc
+){
+ if( pExpr && *pRc==SQLITE_OK ){
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+
+ if( pPhrase ){
+ fts3EvalInvalidatePoslist(pPhrase);
+ if( pPhrase->bIncr ){
+ int i;
+ for(i=0; i<pPhrase->nToken; i++){
+ Fts3PhraseToken *pToken = &pPhrase->aToken[i];
+ assert( pToken->pDeferred==0 );
+ if( pToken->pSegcsr ){
+ sqlite3Fts3MsrIncrRestart(pToken->pSegcsr);
+ }
+ }
+ *pRc = fts3EvalPhraseStart(pCsr, 0, pPhrase);
+ }
+ pPhrase->doclist.pNextDocid = 0;
+ pPhrase->doclist.iDocid = 0;
+ pPhrase->pOrPoslist = 0;
+ }
+
+ pExpr->iDocid = 0;
+ pExpr->bEof = 0;
+ pExpr->bStart = 0;
+
+ fts3EvalRestart(pCsr, pExpr->pLeft, pRc);
+ fts3EvalRestart(pCsr, pExpr->pRight, pRc);
+ }
+}
+
+/*
+** After allocating the Fts3Expr.aMI[] array for each phrase in the
+** expression rooted at pExpr, the cursor iterates through all rows matched
+** by pExpr, calling this function for each row. This function increments
+** the values in Fts3Expr.aMI[] according to the position-list currently
+** found in Fts3Expr.pPhrase->doclist.pList for each of the phrase
+** expression nodes.
+*/
+static void fts3EvalUpdateCounts(Fts3Expr *pExpr, int nCol){
+ if( pExpr ){
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ if( pPhrase && pPhrase->doclist.pList ){
+ int iCol = 0;
+ char *p = pPhrase->doclist.pList;
+
+ do{
+ u8 c = 0;
+ int iCnt = 0;
+ while( 0xFE & (*p | c) ){
+ if( (c&0x80)==0 ) iCnt++;
+ c = *p++ & 0x80;
+ }
+
+ /* aMI[iCol*3 + 1] = Number of occurrences
+ ** aMI[iCol*3 + 2] = Number of rows containing at least one instance
+ */
+ pExpr->aMI[iCol*3 + 1] += iCnt;
+ pExpr->aMI[iCol*3 + 2] += (iCnt>0);
+ if( *p==0x00 ) break;
+ p++;
+ p += fts3GetVarint32(p, &iCol);
+ }while( iCol<nCol );
+ }
+
+ fts3EvalUpdateCounts(pExpr->pLeft, nCol);
+ fts3EvalUpdateCounts(pExpr->pRight, nCol);
+ }
+}
+
+/*
+** Expression pExpr must be of type FTSQUERY_PHRASE.
+**
+** If it is not already allocated and populated, this function allocates and
+** populates the Fts3Expr.aMI[] array for expression pExpr. If pExpr is part
+** of a NEAR expression, then it also allocates and populates the same array
+** for all other phrases that are part of the NEAR expression.
+**
+** SQLITE_OK is returned if the aMI[] array is successfully allocated and
+** populated. Otherwise, if an error occurs, an SQLite error code is returned.
+*/
+static int fts3EvalGatherStats(
+ Fts3Cursor *pCsr, /* Cursor object */
+ Fts3Expr *pExpr /* FTSQUERY_PHRASE expression */
+){
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( pExpr->eType==FTSQUERY_PHRASE );
+ if( pExpr->aMI==0 ){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ Fts3Expr *pRoot; /* Root of NEAR expression */
+ Fts3Expr *p; /* Iterator used for several purposes */
+
+ sqlite3_int64 iPrevId = pCsr->iPrevId;
+ sqlite3_int64 iDocid;
+ u8 bEof;
+
+ /* Find the root of the NEAR expression */
+ pRoot = pExpr;
+ while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){
+ pRoot = pRoot->pParent;
+ }
+ iDocid = pRoot->iDocid;
+ bEof = pRoot->bEof;
+ assert( pRoot->bStart );
+
+ /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */
+ for(p=pRoot; p; p=p->pLeft){
+ Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight);
+ assert( pE->aMI==0 );
+ pE->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32));
+ if( !pE->aMI ) return SQLITE_NOMEM;
+ memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32));
+ }
+
+ fts3EvalRestart(pCsr, pRoot, &rc);
+
+ while( pCsr->isEof==0 && rc==SQLITE_OK ){
+
+ do {
+ /* Ensure the %_content statement is reset. */
+ if( pCsr->isRequireSeek==0 ) sqlite3_reset(pCsr->pStmt);
+ assert( sqlite3_data_count(pCsr->pStmt)==0 );
+
+ /* Advance to the next document */
+ fts3EvalNextRow(pCsr, pRoot, &rc);
+ pCsr->isEof = pRoot->bEof;
+ pCsr->isRequireSeek = 1;
+ pCsr->isMatchinfoNeeded = 1;
+ pCsr->iPrevId = pRoot->iDocid;
+ }while( pCsr->isEof==0
+ && pRoot->eType==FTSQUERY_NEAR
+ && sqlite3Fts3EvalTestDeferred(pCsr, &rc)
+ );
+
+ if( rc==SQLITE_OK && pCsr->isEof==0 ){
+ fts3EvalUpdateCounts(pRoot, pTab->nColumn);
+ }
+ }
+
+ pCsr->isEof = 0;
+ pCsr->iPrevId = iPrevId;
+
+ if( bEof ){
+ pRoot->bEof = bEof;
+ }else{
+ /* Caution: pRoot may iterate through docids in ascending or descending
+ ** order. For this reason, even though it seems more defensive, the
+ ** do loop can not be written:
+ **
+ ** do {...} while( pRoot->iDocid<iDocid && rc==SQLITE_OK );
+ */
+ fts3EvalRestart(pCsr, pRoot, &rc);
+ do {
+ fts3EvalNextRow(pCsr, pRoot, &rc);
+ assert_fts3_nc( pRoot->bEof==0 );
+ if( pRoot->bEof ) rc = FTS_CORRUPT_VTAB;
+ }while( pRoot->iDocid!=iDocid && rc==SQLITE_OK );
+ }
+ }
+ return rc;
+}
+
+/*
+** This function is used by the matchinfo() module to query a phrase
+** expression node for the following information:
+**
+** 1. The total number of occurrences of the phrase in each column of
+** the FTS table (considering all rows), and
+**
+** 2. For each column, the number of rows in the table for which the
+** column contains at least one instance of the phrase.
+**
+** If no error occurs, SQLITE_OK is returned and the values for each column
+** written into the array aiOut as follows:
+**
+** aiOut[iCol*3 + 1] = Number of occurrences
+** aiOut[iCol*3 + 2] = Number of rows containing at least one instance
+**
+** Caveats:
+**
+** * If a phrase consists entirely of deferred tokens, then all output
+** values are set to the number of documents in the table. In other
+** words we assume that very common tokens occur exactly once in each
+** column of each row of the table.
+**
+** * If a phrase contains some deferred tokens (and some non-deferred
+** tokens), count the potential occurrence identified by considering
+** the non-deferred tokens instead of actual phrase occurrences.
+**
+** * If the phrase is part of a NEAR expression, then only phrase instances
+** that meet the NEAR constraint are included in the counts.
+*/
+SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(
+ Fts3Cursor *pCsr, /* FTS cursor handle */
+ Fts3Expr *pExpr, /* Phrase expression */
+ u32 *aiOut /* Array to write results into (see above) */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc = SQLITE_OK;
+ int iCol;
+
+ if( pExpr->bDeferred && pExpr->pParent->eType!=FTSQUERY_NEAR ){
+ assert( pCsr->nDoc>0 );
+ for(iCol=0; iCol<pTab->nColumn; iCol++){
+ aiOut[iCol*3 + 1] = (u32)pCsr->nDoc;
+ aiOut[iCol*3 + 2] = (u32)pCsr->nDoc;
+ }
+ }else{
+ rc = fts3EvalGatherStats(pCsr, pExpr);
+ if( rc==SQLITE_OK ){
+ assert( pExpr->aMI );
+ for(iCol=0; iCol<pTab->nColumn; iCol++){
+ aiOut[iCol*3 + 1] = pExpr->aMI[iCol*3 + 1];
+ aiOut[iCol*3 + 2] = pExpr->aMI[iCol*3 + 2];
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** The expression pExpr passed as the second argument to this function
+** must be of type FTSQUERY_PHRASE.
+**
+** The returned value is either NULL or a pointer to a buffer containing
+** a position-list indicating the occurrences of the phrase in column iCol
+** of the current row.
+**
+** More specifically, the returned buffer contains 1 varint for each
+** occurrence of the phrase in the column, stored using the normal (delta+2)
+** compression and is terminated by either an 0x01 or 0x00 byte. For example,
+** if the requested column contains "a b X c d X X" and the position-list
+** for 'X' is requested, the buffer returned may contain:
+**
+** 0x04 0x05 0x03 0x01 or 0x04 0x05 0x03 0x00
+**
+** This function works regardless of whether or not the phrase is deferred,
+** incremental, or neither.
+*/
+SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
+ Fts3Cursor *pCsr, /* FTS3 cursor object */
+ Fts3Expr *pExpr, /* Phrase to return doclist for */
+ int iCol, /* Column to return position list for */
+ char **ppOut /* OUT: Pointer to position list */
+){
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ char *pIter;
+ int iThis;
+ sqlite3_int64 iDocid;
+
+ /* If this phrase is applies specifically to some column other than
+ ** column iCol, return a NULL pointer. */
+ *ppOut = 0;
+ assert( iCol>=0 && iCol<pTab->nColumn );
+ if( (pPhrase->iColumn<pTab->nColumn && pPhrase->iColumn!=iCol) ){
+ return SQLITE_OK;
+ }
+
+ iDocid = pExpr->iDocid;
+ pIter = pPhrase->doclist.pList;
+ if( iDocid!=pCsr->iPrevId || pExpr->bEof ){
+ int rc = SQLITE_OK;
+ int bDescDoclist = pTab->bDescIdx; /* For DOCID_CMP macro */
+ int bOr = 0;
+ u8 bTreeEof = 0;
+ Fts3Expr *p; /* Used to iterate from pExpr to root */
+ Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */
+ int bMatch;
+
+ /* Check if this phrase descends from an OR expression node. If not,
+ ** return NULL. Otherwise, the entry that corresponds to docid
+ ** pCsr->iPrevId may lie earlier in the doclist buffer. Or, if the
+ ** tree that the node is part of has been marked as EOF, but the node
+ ** itself is not EOF, then it may point to an earlier entry. */
+ pNear = pExpr;
+ for(p=pExpr->pParent; p; p=p->pParent){
+ if( p->eType==FTSQUERY_OR ) bOr = 1;
+ if( p->eType==FTSQUERY_NEAR ) pNear = p;
+ if( p->bEof ) bTreeEof = 1;
+ }
+ if( bOr==0 ) return SQLITE_OK;
+
+ /* This is the descendent of an OR node. In this case we cannot use
+ ** an incremental phrase. Load the entire doclist for the phrase
+ ** into memory in this case. */
+ if( pPhrase->bIncr ){
+ int bEofSave = pNear->bEof;
+ fts3EvalRestart(pCsr, pNear, &rc);
+ while( rc==SQLITE_OK && !pNear->bEof ){
+ fts3EvalNextRow(pCsr, pNear, &rc);
+ if( bEofSave==0 && pNear->iDocid==iDocid ) break;
+ }
+ assert( rc!=SQLITE_OK || pPhrase->bIncr==0 );
+ }
+ if( bTreeEof ){
+ while( rc==SQLITE_OK && !pNear->bEof ){
+ fts3EvalNextRow(pCsr, pNear, &rc);
+ }
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ bMatch = 1;
+ for(p=pNear; p; p=p->pLeft){
+ u8 bEof = 0;
+ Fts3Expr *pTest = p;
+ Fts3Phrase *pPh;
+ assert( pTest->eType==FTSQUERY_NEAR || pTest->eType==FTSQUERY_PHRASE );
+ if( pTest->eType==FTSQUERY_NEAR ) pTest = pTest->pRight;
+ assert( pTest->eType==FTSQUERY_PHRASE );
+ pPh = pTest->pPhrase;
+
+ pIter = pPh->pOrPoslist;
+ iDocid = pPh->iOrDocid;
+ if( pCsr->bDesc==bDescDoclist ){
+ bEof = !pPh->doclist.nAll ||
+ (pIter >= (pPh->doclist.aAll + pPh->doclist.nAll));
+ while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){
+ sqlite3Fts3DoclistNext(
+ bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll,
+ &pIter, &iDocid, &bEof
+ );
+ }
+ }else{
+ bEof = !pPh->doclist.nAll || (pIter && pIter<=pPh->doclist.aAll);
+ while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)>0 ) && bEof==0 ){
+ int dummy;
+ sqlite3Fts3DoclistPrev(
+ bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll,
+ &pIter, &iDocid, &dummy, &bEof
+ );
+ }
+ }
+ pPh->pOrPoslist = pIter;
+ pPh->iOrDocid = iDocid;
+ if( bEof || iDocid!=pCsr->iPrevId ) bMatch = 0;
+ }
+
+ if( bMatch ){
+ pIter = pPhrase->pOrPoslist;
+ }else{
+ pIter = 0;
+ }
+ }
+ if( pIter==0 ) return SQLITE_OK;
+
+ if( *pIter==0x01 ){
+ pIter++;
+ pIter += fts3GetVarint32(pIter, &iThis);
+ }else{
+ iThis = 0;
+ }
+ while( iThis<iCol ){
+ fts3ColumnlistCopy(0, &pIter);
+ if( *pIter==0x00 ) return SQLITE_OK;
+ pIter++;
+ pIter += fts3GetVarint32(pIter, &iThis);
+ }
+ if( *pIter==0x00 ){
+ pIter = 0;
+ }
+
+ *ppOut = ((iCol==iThis)?pIter:0);
+ return SQLITE_OK;
+}
+
+/*
+** Free all components of the Fts3Phrase structure that were allocated by
+** the eval module. Specifically, this means to free:
+**
+** * the contents of pPhrase->doclist, and
+** * any Fts3MultiSegReader objects held by phrase tokens.
+*/
+SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *pPhrase){
+ if( pPhrase ){
+ int i;
+ sqlite3_free(pPhrase->doclist.aAll);
+ fts3EvalInvalidatePoslist(pPhrase);
+ memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist));
+ for(i=0; i<pPhrase->nToken; i++){
+ fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr);
+ pPhrase->aToken[i].pSegcsr = 0;
+ }
+ }
+}
+
+
+/*
+** Return SQLITE_CORRUPT_VTAB.
+*/
+#ifdef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3Fts3Corrupt(){
+ return SQLITE_CORRUPT_VTAB;
+}
+#endif
+
+#if !SQLITE_CORE
+/*
+** Initialize API pointer table, if required.
+*/
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_fts3_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi)
+ return sqlite3Fts3Init(db);
+}
+#endif
+
+#endif
+
+/************** End of fts3.c ************************************************/
+/************** Begin file fts3_aux.c ****************************************/
+/*
+** 2011 Jan 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <string.h> */
+/* #include <assert.h> */
+
+typedef struct Fts3auxTable Fts3auxTable;
+typedef struct Fts3auxCursor Fts3auxCursor;
+
+struct Fts3auxTable {
+ sqlite3_vtab base; /* Base class used by SQLite core */
+ Fts3Table *pFts3Tab;
+};
+
+struct Fts3auxCursor {
+ sqlite3_vtab_cursor base; /* Base class used by SQLite core */
+ Fts3MultiSegReader csr; /* Must be right after "base" */
+ Fts3SegFilter filter;
+ char *zStop;
+ int nStop; /* Byte-length of string zStop */
+ int iLangid; /* Language id to query */
+ int isEof; /* True if cursor is at EOF */
+ sqlite3_int64 iRowid; /* Current rowid */
+
+ int iCol; /* Current value of 'col' column */
+ int nStat; /* Size of aStat[] array */
+ struct Fts3auxColstats {
+ sqlite3_int64 nDoc; /* 'documents' values for current csr row */
+ sqlite3_int64 nOcc; /* 'occurrences' values for current csr row */
+ } *aStat;
+};
+
+/*
+** Schema of the terms table.
+*/
+#define FTS3_AUX_SCHEMA \
+ "CREATE TABLE x(term, col, documents, occurrences, languageid HIDDEN)"
+
+/*
+** This function does all the work for both the xConnect and xCreate methods.
+** These tables have no persistent representation of their own, so xConnect
+** and xCreate are identical operations.
+*/
+static int fts3auxConnectMethod(
+ sqlite3 *db, /* Database connection */
+ void *pUnused, /* Unused */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ char const *zDb; /* Name of database (e.g. "main") */
+ char const *zFts3; /* Name of fts3 table */
+ int nDb; /* Result of strlen(zDb) */
+ int nFts3; /* Result of strlen(zFts3) */
+ sqlite3_int64 nByte; /* Bytes of space to allocate here */
+ int rc; /* value returned by declare_vtab() */
+ Fts3auxTable *p; /* Virtual table object to return */
+
+ UNUSED_PARAMETER(pUnused);
+
+ /* The user should invoke this in one of two forms:
+ **
+ ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table);
+ ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table-db, fts4-table);
+ */
+ if( argc!=4 && argc!=5 ) goto bad_args;
+
+ zDb = argv[1];
+ nDb = (int)strlen(zDb);
+ if( argc==5 ){
+ if( nDb==4 && 0==sqlite3_strnicmp("temp", zDb, 4) ){
+ zDb = argv[3];
+ nDb = (int)strlen(zDb);
+ zFts3 = argv[4];
+ }else{
+ goto bad_args;
+ }
+ }else{
+ zFts3 = argv[3];
+ }
+ nFts3 = (int)strlen(zFts3);
+
+ rc = sqlite3_declare_vtab(db, FTS3_AUX_SCHEMA);
+ if( rc!=SQLITE_OK ) return rc;
+
+ nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2;
+ p = (Fts3auxTable *)sqlite3_malloc64(nByte);
+ if( !p ) return SQLITE_NOMEM;
+ memset(p, 0, nByte);
+
+ p->pFts3Tab = (Fts3Table *)&p[1];
+ p->pFts3Tab->zDb = (char *)&p->pFts3Tab[1];
+ p->pFts3Tab->zName = &p->pFts3Tab->zDb[nDb+1];
+ p->pFts3Tab->db = db;
+ p->pFts3Tab->nIndex = 1;
+
+ memcpy((char *)p->pFts3Tab->zDb, zDb, nDb);
+ memcpy((char *)p->pFts3Tab->zName, zFts3, nFts3);
+ sqlite3Fts3Dequote((char *)p->pFts3Tab->zName);
+
+ *ppVtab = (sqlite3_vtab *)p;
+ return SQLITE_OK;
+
+ bad_args:
+ sqlite3Fts3ErrMsg(pzErr, "invalid arguments to fts4aux constructor");
+ return SQLITE_ERROR;
+}
+
+/*
+** This function does the work for both the xDisconnect and xDestroy methods.
+** These tables have no persistent representation of their own, so xDisconnect
+** and xDestroy are identical operations.
+*/
+static int fts3auxDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts3auxTable *p = (Fts3auxTable *)pVtab;
+ Fts3Table *pFts3 = p->pFts3Tab;
+ int i;
+
+ /* Free any prepared statements held */
+ for(i=0; i<SizeofArray(pFts3->aStmt); i++){
+ sqlite3_finalize(pFts3->aStmt[i]);
+ }
+ sqlite3_free(pFts3->zSegmentsTbl);
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+#define FTS4AUX_EQ_CONSTRAINT 1
+#define FTS4AUX_GE_CONSTRAINT 2
+#define FTS4AUX_LE_CONSTRAINT 4
+
+/*
+** xBestIndex - Analyze a WHERE and ORDER BY clause.
+*/
+static int fts3auxBestIndexMethod(
+ sqlite3_vtab *pVTab,
+ sqlite3_index_info *pInfo
+){
+ int i;
+ int iEq = -1;
+ int iGe = -1;
+ int iLe = -1;
+ int iLangid = -1;
+ int iNext = 1; /* Next free argvIndex value */
+
+ UNUSED_PARAMETER(pVTab);
+
+ /* This vtab delivers always results in "ORDER BY term ASC" order. */
+ if( pInfo->nOrderBy==1
+ && pInfo->aOrderBy[0].iColumn==0
+ && pInfo->aOrderBy[0].desc==0
+ ){
+ pInfo->orderByConsumed = 1;
+ }
+
+ /* Search for equality and range constraints on the "term" column.
+ ** And equality constraints on the hidden "languageid" column. */
+ for(i=0; i<pInfo->nConstraint; i++){
+ if( pInfo->aConstraint[i].usable ){
+ int op = pInfo->aConstraint[i].op;
+ int iCol = pInfo->aConstraint[i].iColumn;
+
+ if( iCol==0 ){
+ if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iEq = i;
+ if( op==SQLITE_INDEX_CONSTRAINT_LT ) iLe = i;
+ if( op==SQLITE_INDEX_CONSTRAINT_LE ) iLe = i;
+ if( op==SQLITE_INDEX_CONSTRAINT_GT ) iGe = i;
+ if( op==SQLITE_INDEX_CONSTRAINT_GE ) iGe = i;
+ }
+ if( iCol==4 ){
+ if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iLangid = i;
+ }
+ }
+ }
+
+ if( iEq>=0 ){
+ pInfo->idxNum = FTS4AUX_EQ_CONSTRAINT;
+ pInfo->aConstraintUsage[iEq].argvIndex = iNext++;
+ pInfo->estimatedCost = 5;
+ }else{
+ pInfo->idxNum = 0;
+ pInfo->estimatedCost = 20000;
+ if( iGe>=0 ){
+ pInfo->idxNum += FTS4AUX_GE_CONSTRAINT;
+ pInfo->aConstraintUsage[iGe].argvIndex = iNext++;
+ pInfo->estimatedCost /= 2;
+ }
+ if( iLe>=0 ){
+ pInfo->idxNum += FTS4AUX_LE_CONSTRAINT;
+ pInfo->aConstraintUsage[iLe].argvIndex = iNext++;
+ pInfo->estimatedCost /= 2;
+ }
+ }
+ if( iLangid>=0 ){
+ pInfo->aConstraintUsage[iLangid].argvIndex = iNext++;
+ pInfo->estimatedCost--;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** xOpen - Open a cursor.
+*/
+static int fts3auxOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
+ Fts3auxCursor *pCsr; /* Pointer to cursor object to return */
+
+ UNUSED_PARAMETER(pVTab);
+
+ pCsr = (Fts3auxCursor *)sqlite3_malloc(sizeof(Fts3auxCursor));
+ if( !pCsr ) return SQLITE_NOMEM;
+ memset(pCsr, 0, sizeof(Fts3auxCursor));
+
+ *ppCsr = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+/*
+** xClose - Close a cursor.
+*/
+static int fts3auxCloseMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab;
+ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor;
+
+ sqlite3Fts3SegmentsClose(pFts3);
+ sqlite3Fts3SegReaderFinish(&pCsr->csr);
+ sqlite3_free((void *)pCsr->filter.zTerm);
+ sqlite3_free(pCsr->zStop);
+ sqlite3_free(pCsr->aStat);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+static int fts3auxGrowStatArray(Fts3auxCursor *pCsr, int nSize){
+ if( nSize>pCsr->nStat ){
+ struct Fts3auxColstats *aNew;
+ aNew = (struct Fts3auxColstats *)sqlite3_realloc64(pCsr->aStat,
+ sizeof(struct Fts3auxColstats) * nSize
+ );
+ if( aNew==0 ) return SQLITE_NOMEM;
+ memset(&aNew[pCsr->nStat], 0,
+ sizeof(struct Fts3auxColstats) * (nSize - pCsr->nStat)
+ );
+ pCsr->aStat = aNew;
+ pCsr->nStat = nSize;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** xNext - Advance the cursor to the next row, if any.
+*/
+static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor;
+ Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab;
+ int rc;
+
+ /* Increment our pretend rowid value. */
+ pCsr->iRowid++;
+
+ for(pCsr->iCol++; pCsr->iCol<pCsr->nStat; pCsr->iCol++){
+ if( pCsr->aStat[pCsr->iCol].nDoc>0 ) return SQLITE_OK;
+ }
+
+ rc = sqlite3Fts3SegReaderStep(pFts3, &pCsr->csr);
+ if( rc==SQLITE_ROW ){
+ int i = 0;
+ int nDoclist = pCsr->csr.nDoclist;
+ char *aDoclist = pCsr->csr.aDoclist;
+ int iCol;
+
+ int eState = 0;
+
+ if( pCsr->zStop ){
+ int n = (pCsr->nStop<pCsr->csr.nTerm) ? pCsr->nStop : pCsr->csr.nTerm;
+ int mc = memcmp(pCsr->zStop, pCsr->csr.zTerm, n);
+ if( mc<0 || (mc==0 && pCsr->csr.nTerm>pCsr->nStop) ){
+ pCsr->isEof = 1;
+ return SQLITE_OK;
+ }
+ }
+
+ if( fts3auxGrowStatArray(pCsr, 2) ) return SQLITE_NOMEM;
+ memset(pCsr->aStat, 0, sizeof(struct Fts3auxColstats) * pCsr->nStat);
+ iCol = 0;
+
+ while( i<nDoclist ){
+ sqlite3_int64 v = 0;
+
+ i += sqlite3Fts3GetVarint(&aDoclist[i], &v);
+ switch( eState ){
+ /* State 0. In this state the integer just read was a docid. */
+ case 0:
+ pCsr->aStat[0].nDoc++;
+ eState = 1;
+ iCol = 0;
+ break;
+
+ /* State 1. In this state we are expecting either a 1, indicating
+ ** that the following integer will be a column number, or the
+ ** start of a position list for column 0.
+ **
+ ** The only difference between state 1 and state 2 is that if the
+ ** integer encountered in state 1 is not 0 or 1, then we need to
+ ** increment the column 0 "nDoc" count for this term.
+ */
+ case 1:
+ assert( iCol==0 );
+ if( v>1 ){
+ pCsr->aStat[1].nDoc++;
+ }
+ eState = 2;
+ /* fall through */
+
+ case 2:
+ if( v==0 ){ /* 0x00. Next integer will be a docid. */
+ eState = 0;
+ }else if( v==1 ){ /* 0x01. Next integer will be a column number. */
+ eState = 3;
+ }else{ /* 2 or greater. A position. */
+ pCsr->aStat[iCol+1].nOcc++;
+ pCsr->aStat[0].nOcc++;
+ }
+ break;
+
+ /* State 3. The integer just read is a column number. */
+ default: assert( eState==3 );
+ iCol = (int)v;
+ if( fts3auxGrowStatArray(pCsr, iCol+2) ) return SQLITE_NOMEM;
+ pCsr->aStat[iCol+1].nDoc++;
+ eState = 2;
+ break;
+ }
+ }
+
+ pCsr->iCol = 0;
+ rc = SQLITE_OK;
+ }else{
+ pCsr->isEof = 1;
+ }
+ return rc;
+}
+
+/*
+** xFilter - Initialize a cursor to point at the start of its data.
+*/
+static int fts3auxFilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *idxStr, /* Unused */
+ int nVal, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor;
+ Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab;
+ int rc;
+ int isScan = 0;
+ int iLangVal = 0; /* Language id to query */
+
+ int iEq = -1; /* Index of term=? value in apVal */
+ int iGe = -1; /* Index of term>=? value in apVal */
+ int iLe = -1; /* Index of term<=? value in apVal */
+ int iLangid = -1; /* Index of languageid=? value in apVal */
+ int iNext = 0;
+
+ UNUSED_PARAMETER(nVal);
+ UNUSED_PARAMETER(idxStr);
+
+ assert( idxStr==0 );
+ assert( idxNum==FTS4AUX_EQ_CONSTRAINT || idxNum==0
+ || idxNum==FTS4AUX_LE_CONSTRAINT || idxNum==FTS4AUX_GE_CONSTRAINT
+ || idxNum==(FTS4AUX_LE_CONSTRAINT|FTS4AUX_GE_CONSTRAINT)
+ );
+
+ if( idxNum==FTS4AUX_EQ_CONSTRAINT ){
+ iEq = iNext++;
+ }else{
+ isScan = 1;
+ if( idxNum & FTS4AUX_GE_CONSTRAINT ){
+ iGe = iNext++;
+ }
+ if( idxNum & FTS4AUX_LE_CONSTRAINT ){
+ iLe = iNext++;
+ }
+ }
+ if( iNext<nVal ){
+ iLangid = iNext++;
+ }
+
+ /* In case this cursor is being reused, close and zero it. */
+ testcase(pCsr->filter.zTerm);
+ sqlite3Fts3SegReaderFinish(&pCsr->csr);
+ sqlite3_free((void *)pCsr->filter.zTerm);
+ sqlite3_free(pCsr->aStat);
+ memset(&pCsr->csr, 0, ((u8*)&pCsr[1]) - (u8*)&pCsr->csr);
+
+ pCsr->filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY;
+ if( isScan ) pCsr->filter.flags |= FTS3_SEGMENT_SCAN;
+
+ if( iEq>=0 || iGe>=0 ){
+ const unsigned char *zStr = sqlite3_value_text(apVal[0]);
+ assert( (iEq==0 && iGe==-1) || (iEq==-1 && iGe==0) );
+ if( zStr ){
+ pCsr->filter.zTerm = sqlite3_mprintf("%s", zStr);
+ if( pCsr->filter.zTerm==0 ) return SQLITE_NOMEM;
+ pCsr->filter.nTerm = (int)strlen(pCsr->filter.zTerm);
+ }
+ }
+
+ if( iLe>=0 ){
+ pCsr->zStop = sqlite3_mprintf("%s", sqlite3_value_text(apVal[iLe]));
+ if( pCsr->zStop==0 ) return SQLITE_NOMEM;
+ pCsr->nStop = (int)strlen(pCsr->zStop);
+ }
+
+ if( iLangid>=0 ){
+ iLangVal = sqlite3_value_int(apVal[iLangid]);
+
+ /* If the user specified a negative value for the languageid, use zero
+ ** instead. This works, as the "languageid=?" constraint will also
+ ** be tested by the VDBE layer. The test will always be false (since
+ ** this module will not return a row with a negative languageid), and
+ ** so the overall query will return zero rows. */
+ if( iLangVal<0 ) iLangVal = 0;
+ }
+ pCsr->iLangid = iLangVal;
+
+ rc = sqlite3Fts3SegReaderCursor(pFts3, iLangVal, 0, FTS3_SEGCURSOR_ALL,
+ pCsr->filter.zTerm, pCsr->filter.nTerm, 0, isScan, &pCsr->csr
+ );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3SegReaderStart(pFts3, &pCsr->csr, &pCsr->filter);
+ }
+
+ if( rc==SQLITE_OK ) rc = fts3auxNextMethod(pCursor);
+ return rc;
+}
+
+/*
+** xEof - Return true if the cursor is at EOF, or false otherwise.
+*/
+static int fts3auxEofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor;
+ return pCsr->isEof;
+}
+
+/*
+** xColumn - Return a column value.
+*/
+static int fts3auxColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ Fts3auxCursor *p = (Fts3auxCursor *)pCursor;
+
+ assert( p->isEof==0 );
+ switch( iCol ){
+ case 0: /* term */
+ sqlite3_result_text(pCtx, p->csr.zTerm, p->csr.nTerm, SQLITE_TRANSIENT);
+ break;
+
+ case 1: /* col */
+ if( p->iCol ){
+ sqlite3_result_int(pCtx, p->iCol-1);
+ }else{
+ sqlite3_result_text(pCtx, "*", -1, SQLITE_STATIC);
+ }
+ break;
+
+ case 2: /* documents */
+ sqlite3_result_int64(pCtx, p->aStat[p->iCol].nDoc);
+ break;
+
+ case 3: /* occurrences */
+ sqlite3_result_int64(pCtx, p->aStat[p->iCol].nOcc);
+ break;
+
+ default: /* languageid */
+ assert( iCol==4 );
+ sqlite3_result_int(pCtx, p->iLangid);
+ break;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** xRowid - Return the current rowid for the cursor.
+*/
+static int fts3auxRowidMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite_int64 *pRowid /* OUT: Rowid value */
+){
+ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor;
+ *pRowid = pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Register the fts3aux module with database connection db. Return SQLITE_OK
+** if successful or an error code if sqlite3_create_module() fails.
+*/
+SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){
+ static const sqlite3_module fts3aux_module = {
+ 0, /* iVersion */
+ fts3auxConnectMethod, /* xCreate */
+ fts3auxConnectMethod, /* xConnect */
+ fts3auxBestIndexMethod, /* xBestIndex */
+ fts3auxDisconnectMethod, /* xDisconnect */
+ fts3auxDisconnectMethod, /* xDestroy */
+ fts3auxOpenMethod, /* xOpen */
+ fts3auxCloseMethod, /* xClose */
+ fts3auxFilterMethod, /* xFilter */
+ fts3auxNextMethod, /* xNext */
+ fts3auxEofMethod, /* xEof */
+ fts3auxColumnMethod, /* xColumn */
+ fts3auxRowidMethod, /* xRowid */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindFunction */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+ };
+ int rc; /* Return code */
+
+ rc = sqlite3_create_module(db, "fts4aux", &fts3aux_module, 0);
+ return rc;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_aux.c ********************************************/
+/************** Begin file fts3_expr.c ***************************************/
+/*
+** 2008 Nov 28
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This module contains code that implements a parser for fts3 query strings
+** (the right-hand argument to the MATCH operator). Because the supported
+** syntax is relatively simple, the whole tokenizer/parser system is
+** hand-coded.
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/*
+** By default, this module parses the legacy syntax that has been
+** traditionally used by fts3. Or, if SQLITE_ENABLE_FTS3_PARENTHESIS
+** is defined, then it uses the new syntax. The differences between
+** the new and the old syntaxes are:
+**
+** a) The new syntax supports parenthesis. The old does not.
+**
+** b) The new syntax supports the AND and NOT operators. The old does not.
+**
+** c) The old syntax supports the "-" token qualifier. This is not
+** supported by the new syntax (it is replaced by the NOT operator).
+**
+** d) When using the old syntax, the OR operator has a greater precedence
+** than an implicit AND. When using the new, both implicity and explicit
+** AND operators have a higher precedence than OR.
+**
+** If compiled with SQLITE_TEST defined, then this module exports the
+** symbol "int sqlite3_fts3_enable_parentheses". Setting this variable
+** to zero causes the module to use the old syntax. If it is set to
+** non-zero the new syntax is activated. This is so both syntaxes can
+** be tested using a single build of testfixture.
+**
+** The following describes the syntax supported by the fts3 MATCH
+** operator in a similar format to that used by the lemon parser
+** generator. This module does not use actually lemon, it uses a
+** custom parser.
+**
+** query ::= andexpr (OR andexpr)*.
+**
+** andexpr ::= notexpr (AND? notexpr)*.
+**
+** notexpr ::= nearexpr (NOT nearexpr|-TOKEN)*.
+** notexpr ::= LP query RP.
+**
+** nearexpr ::= phrase (NEAR distance_opt nearexpr)*.
+**
+** distance_opt ::= .
+** distance_opt ::= / INTEGER.
+**
+** phrase ::= TOKEN.
+** phrase ::= COLUMN:TOKEN.
+** phrase ::= "TOKEN TOKEN TOKEN...".
+*/
+
+#ifdef SQLITE_TEST
+SQLITE_API int sqlite3_fts3_enable_parentheses = 0;
+#else
+# ifdef SQLITE_ENABLE_FTS3_PARENTHESIS
+# define sqlite3_fts3_enable_parentheses 1
+# else
+# define sqlite3_fts3_enable_parentheses 0
+# endif
+#endif
+
+/*
+** Default span for NEAR operators.
+*/
+#define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10
+
+/* #include <string.h> */
+/* #include <assert.h> */
+
+/*
+** isNot:
+** This variable is used by function getNextNode(). When getNextNode() is
+** called, it sets ParseContext.isNot to true if the 'next node' is a
+** FTSQUERY_PHRASE with a unary "-" attached to it. i.e. "mysql" in the
+** FTS3 query "sqlite -mysql". Otherwise, ParseContext.isNot is set to
+** zero.
+*/
+typedef struct ParseContext ParseContext;
+struct ParseContext {
+ sqlite3_tokenizer *pTokenizer; /* Tokenizer module */
+ int iLangid; /* Language id used with tokenizer */
+ const char **azCol; /* Array of column names for fts3 table */
+ int bFts4; /* True to allow FTS4-only syntax */
+ int nCol; /* Number of entries in azCol[] */
+ int iDefaultCol; /* Default column to query */
+ int isNot; /* True if getNextNode() sees a unary - */
+ sqlite3_context *pCtx; /* Write error message here */
+ int nNest; /* Number of nested brackets */
+};
+
+/*
+** This function is equivalent to the standard isspace() function.
+**
+** The standard isspace() can be awkward to use safely, because although it
+** is defined to accept an argument of type int, its behavior when passed
+** an integer that falls outside of the range of the unsigned char type
+** is undefined (and sometimes, "undefined" means segfault). This wrapper
+** is defined to accept an argument of type char, and always returns 0 for
+** any values that fall outside of the range of the unsigned char type (i.e.
+** negative values).
+*/
+static int fts3isspace(char c){
+ return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f';
+}
+
+/*
+** Allocate nByte bytes of memory using sqlite3_malloc(). If successful,
+** zero the memory before returning a pointer to it. If unsuccessful,
+** return NULL.
+*/
+static void *fts3MallocZero(sqlite3_int64 nByte){
+ void *pRet = sqlite3_malloc64(nByte);
+ if( pRet ) memset(pRet, 0, nByte);
+ return pRet;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(
+ sqlite3_tokenizer *pTokenizer,
+ int iLangid,
+ const char *z,
+ int n,
+ sqlite3_tokenizer_cursor **ppCsr
+){
+ sqlite3_tokenizer_module const *pModule = pTokenizer->pModule;
+ sqlite3_tokenizer_cursor *pCsr = 0;
+ int rc;
+
+ rc = pModule->xOpen(pTokenizer, z, n, &pCsr);
+ assert( rc==SQLITE_OK || pCsr==0 );
+ if( rc==SQLITE_OK ){
+ pCsr->pTokenizer = pTokenizer;
+ if( pModule->iVersion>=1 ){
+ rc = pModule->xLanguageid(pCsr, iLangid);
+ if( rc!=SQLITE_OK ){
+ pModule->xClose(pCsr);
+ pCsr = 0;
+ }
+ }
+ }
+ *ppCsr = pCsr;
+ return rc;
+}
+
+/*
+** Function getNextNode(), which is called by fts3ExprParse(), may itself
+** call fts3ExprParse(). So this forward declaration is required.
+*/
+static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *);
+
+/*
+** Extract the next token from buffer z (length n) using the tokenizer
+** and other information (column names etc.) in pParse. Create an Fts3Expr
+** structure of type FTSQUERY_PHRASE containing a phrase consisting of this
+** single token and set *ppExpr to point to it. If the end of the buffer is
+** reached before a token is found, set *ppExpr to zero. It is the
+** responsibility of the caller to eventually deallocate the allocated
+** Fts3Expr structure (if any) by passing it to sqlite3_free().
+**
+** Return SQLITE_OK if successful, or SQLITE_NOMEM if a memory allocation
+** fails.
+*/
+static int getNextToken(
+ ParseContext *pParse, /* fts3 query parse context */
+ int iCol, /* Value for Fts3Phrase.iColumn */
+ const char *z, int n, /* Input string */
+ Fts3Expr **ppExpr, /* OUT: expression */
+ int *pnConsumed /* OUT: Number of bytes consumed */
+){
+ sqlite3_tokenizer *pTokenizer = pParse->pTokenizer;
+ sqlite3_tokenizer_module const *pModule = pTokenizer->pModule;
+ int rc;
+ sqlite3_tokenizer_cursor *pCursor;
+ Fts3Expr *pRet = 0;
+ int i = 0;
+
+ /* Set variable i to the maximum number of bytes of input to tokenize. */
+ for(i=0; i<n; i++){
+ if( sqlite3_fts3_enable_parentheses && (z[i]=='(' || z[i]==')') ) break;
+ if( z[i]=='"' ) break;
+ }
+
+ *pnConsumed = i;
+ rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, i, &pCursor);
+ if( rc==SQLITE_OK ){
+ const char *zToken;
+ int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0;
+ sqlite3_int64 nByte; /* total space to allocate */
+
+ rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition);
+ if( rc==SQLITE_OK ){
+ nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken;
+ pRet = (Fts3Expr *)fts3MallocZero(nByte);
+ if( !pRet ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pRet->eType = FTSQUERY_PHRASE;
+ pRet->pPhrase = (Fts3Phrase *)&pRet[1];
+ pRet->pPhrase->nToken = 1;
+ pRet->pPhrase->iColumn = iCol;
+ pRet->pPhrase->aToken[0].n = nToken;
+ pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1];
+ memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken);
+
+ if( iEnd<n && z[iEnd]=='*' ){
+ pRet->pPhrase->aToken[0].isPrefix = 1;
+ iEnd++;
+ }
+
+ while( 1 ){
+ if( !sqlite3_fts3_enable_parentheses
+ && iStart>0 && z[iStart-1]=='-'
+ ){
+ pParse->isNot = 1;
+ iStart--;
+ }else if( pParse->bFts4 && iStart>0 && z[iStart-1]=='^' ){
+ pRet->pPhrase->aToken[0].bFirst = 1;
+ iStart--;
+ }else{
+ break;
+ }
+ }
+
+ }
+ *pnConsumed = iEnd;
+ }else if( i && rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ }
+
+ pModule->xClose(pCursor);
+ }
+
+ *ppExpr = pRet;
+ return rc;
+}
+
+
+/*
+** Enlarge a memory allocation. If an out-of-memory allocation occurs,
+** then free the old allocation.
+*/
+static void *fts3ReallocOrFree(void *pOrig, sqlite3_int64 nNew){
+ void *pRet = sqlite3_realloc64(pOrig, nNew);
+ if( !pRet ){
+ sqlite3_free(pOrig);
+ }
+ return pRet;
+}
+
+/*
+** Buffer zInput, length nInput, contains the contents of a quoted string
+** that appeared as part of an fts3 query expression. Neither quote character
+** is included in the buffer. This function attempts to tokenize the entire
+** input buffer and create an Fts3Expr structure of type FTSQUERY_PHRASE
+** containing the results.
+**
+** If successful, SQLITE_OK is returned and *ppExpr set to point at the
+** allocated Fts3Expr structure. Otherwise, either SQLITE_NOMEM (out of memory
+** error) or SQLITE_ERROR (tokenization error) is returned and *ppExpr set
+** to 0.
+*/
+static int getNextString(
+ ParseContext *pParse, /* fts3 query parse context */
+ const char *zInput, int nInput, /* Input string */
+ Fts3Expr **ppExpr /* OUT: expression */
+){
+ sqlite3_tokenizer *pTokenizer = pParse->pTokenizer;
+ sqlite3_tokenizer_module const *pModule = pTokenizer->pModule;
+ int rc;
+ Fts3Expr *p = 0;
+ sqlite3_tokenizer_cursor *pCursor = 0;
+ char *zTemp = 0;
+ int nTemp = 0;
+
+ const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase);
+ int nToken = 0;
+
+ /* The final Fts3Expr data structure, including the Fts3Phrase,
+ ** Fts3PhraseToken structures token buffers are all stored as a single
+ ** allocation so that the expression can be freed with a single call to
+ ** sqlite3_free(). Setting this up requires a two pass approach.
+ **
+ ** The first pass, in the block below, uses a tokenizer cursor to iterate
+ ** through the tokens in the expression. This pass uses fts3ReallocOrFree()
+ ** to assemble data in two dynamic buffers:
+ **
+ ** Buffer p: Points to the Fts3Expr structure, followed by the Fts3Phrase
+ ** structure, followed by the array of Fts3PhraseToken
+ ** structures. This pass only populates the Fts3PhraseToken array.
+ **
+ ** Buffer zTemp: Contains copies of all tokens.
+ **
+ ** The second pass, in the block that begins "if( rc==SQLITE_DONE )" below,
+ ** appends buffer zTemp to buffer p, and fills in the Fts3Expr and Fts3Phrase
+ ** structures.
+ */
+ rc = sqlite3Fts3OpenTokenizer(
+ pTokenizer, pParse->iLangid, zInput, nInput, &pCursor);
+ if( rc==SQLITE_OK ){
+ int ii;
+ for(ii=0; rc==SQLITE_OK; ii++){
+ const char *zByte;
+ int nByte = 0, iBegin = 0, iEnd = 0, iPos = 0;
+ rc = pModule->xNext(pCursor, &zByte, &nByte, &iBegin, &iEnd, &iPos);
+ if( rc==SQLITE_OK ){
+ Fts3PhraseToken *pToken;
+
+ p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken));
+ if( !p ) goto no_mem;
+
+ zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte);
+ if( !zTemp ) goto no_mem;
+
+ assert( nToken==ii );
+ pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii];
+ memset(pToken, 0, sizeof(Fts3PhraseToken));
+
+ memcpy(&zTemp[nTemp], zByte, nByte);
+ nTemp += nByte;
+
+ pToken->n = nByte;
+ pToken->isPrefix = (iEnd<nInput && zInput[iEnd]=='*');
+ pToken->bFirst = (iBegin>0 && zInput[iBegin-1]=='^');
+ nToken = ii+1;
+ }
+ }
+
+ pModule->xClose(pCursor);
+ pCursor = 0;
+ }
+
+ if( rc==SQLITE_DONE ){
+ int jj;
+ char *zBuf = 0;
+
+ p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp);
+ if( !p ) goto no_mem;
+ memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p);
+ p->eType = FTSQUERY_PHRASE;
+ p->pPhrase = (Fts3Phrase *)&p[1];
+ p->pPhrase->iColumn = pParse->iDefaultCol;
+ p->pPhrase->nToken = nToken;
+
+ zBuf = (char *)&p->pPhrase->aToken[nToken];
+ if( zTemp ){
+ memcpy(zBuf, zTemp, nTemp);
+ sqlite3_free(zTemp);
+ }else{
+ assert( nTemp==0 );
+ }
+
+ for(jj=0; jj<p->pPhrase->nToken; jj++){
+ p->pPhrase->aToken[jj].z = zBuf;
+ zBuf += p->pPhrase->aToken[jj].n;
+ }
+ rc = SQLITE_OK;
+ }
+
+ *ppExpr = p;
+ return rc;
+no_mem:
+
+ if( pCursor ){
+ pModule->xClose(pCursor);
+ }
+ sqlite3_free(zTemp);
+ sqlite3_free(p);
+ *ppExpr = 0;
+ return SQLITE_NOMEM;
+}
+
+/*
+** The output variable *ppExpr is populated with an allocated Fts3Expr
+** structure, or set to 0 if the end of the input buffer is reached.
+**
+** Returns an SQLite error code. SQLITE_OK if everything works, SQLITE_NOMEM
+** if a malloc failure occurs, or SQLITE_ERROR if a parse error is encountered.
+** If SQLITE_ERROR is returned, pContext is populated with an error message.
+*/
+static int getNextNode(
+ ParseContext *pParse, /* fts3 query parse context */
+ const char *z, int n, /* Input string */
+ Fts3Expr **ppExpr, /* OUT: expression */
+ int *pnConsumed /* OUT: Number of bytes consumed */
+){
+ static const struct Fts3Keyword {
+ char *z; /* Keyword text */
+ unsigned char n; /* Length of the keyword */
+ unsigned char parenOnly; /* Only valid in paren mode */
+ unsigned char eType; /* Keyword code */
+ } aKeyword[] = {
+ { "OR" , 2, 0, FTSQUERY_OR },
+ { "AND", 3, 1, FTSQUERY_AND },
+ { "NOT", 3, 1, FTSQUERY_NOT },
+ { "NEAR", 4, 0, FTSQUERY_NEAR }
+ };
+ int ii;
+ int iCol;
+ int iColLen;
+ int rc;
+ Fts3Expr *pRet = 0;
+
+ const char *zInput = z;
+ int nInput = n;
+
+ pParse->isNot = 0;
+
+ /* Skip over any whitespace before checking for a keyword, an open or
+ ** close bracket, or a quoted string.
+ */
+ while( nInput>0 && fts3isspace(*zInput) ){
+ nInput--;
+ zInput++;
+ }
+ if( nInput==0 ){
+ return SQLITE_DONE;
+ }
+
+ /* See if we are dealing with a keyword. */
+ for(ii=0; ii<(int)(sizeof(aKeyword)/sizeof(struct Fts3Keyword)); ii++){
+ const struct Fts3Keyword *pKey = &aKeyword[ii];
+
+ if( (pKey->parenOnly & ~sqlite3_fts3_enable_parentheses)!=0 ){
+ continue;
+ }
+
+ if( nInput>=pKey->n && 0==memcmp(zInput, pKey->z, pKey->n) ){
+ int nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM;
+ int nKey = pKey->n;
+ char cNext;
+
+ /* If this is a "NEAR" keyword, check for an explicit nearness. */
+ if( pKey->eType==FTSQUERY_NEAR ){
+ assert( nKey==4 );
+ if( zInput[4]=='/' && zInput[5]>='0' && zInput[5]<='9' ){
+ nKey += 1+sqlite3Fts3ReadInt(&zInput[nKey+1], &nNear);
+ }
+ }
+
+ /* At this point this is probably a keyword. But for that to be true,
+ ** the next byte must contain either whitespace, an open or close
+ ** parenthesis, a quote character, or EOF.
+ */
+ cNext = zInput[nKey];
+ if( fts3isspace(cNext)
+ || cNext=='"' || cNext=='(' || cNext==')' || cNext==0
+ ){
+ pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr));
+ if( !pRet ){
+ return SQLITE_NOMEM;
+ }
+ pRet->eType = pKey->eType;
+ pRet->nNear = nNear;
+ *ppExpr = pRet;
+ *pnConsumed = (int)((zInput - z) + nKey);
+ return SQLITE_OK;
+ }
+
+ /* Turns out that wasn't a keyword after all. This happens if the
+ ** user has supplied a token such as "ORacle". Continue.
+ */
+ }
+ }
+
+ /* See if we are dealing with a quoted phrase. If this is the case, then
+ ** search for the closing quote and pass the whole string to getNextString()
+ ** for processing. This is easy to do, as fts3 has no syntax for escaping
+ ** a quote character embedded in a string.
+ */
+ if( *zInput=='"' ){
+ for(ii=1; ii<nInput && zInput[ii]!='"'; ii++);
+ *pnConsumed = (int)((zInput - z) + ii + 1);
+ if( ii==nInput ){
+ return SQLITE_ERROR;
+ }
+ return getNextString(pParse, &zInput[1], ii-1, ppExpr);
+ }
+
+ if( sqlite3_fts3_enable_parentheses ){
+ if( *zInput=='(' ){
+ int nConsumed = 0;
+ pParse->nNest++;
+ rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed);
+ *pnConsumed = (int)(zInput - z) + 1 + nConsumed;
+ return rc;
+ }else if( *zInput==')' ){
+ pParse->nNest--;
+ *pnConsumed = (int)((zInput - z) + 1);
+ *ppExpr = 0;
+ return SQLITE_DONE;
+ }
+ }
+
+ /* If control flows to this point, this must be a regular token, or
+ ** the end of the input. Read a regular token using the sqlite3_tokenizer
+ ** interface. Before doing so, figure out if there is an explicit
+ ** column specifier for the token.
+ **
+ ** TODO: Strangely, it is not possible to associate a column specifier
+ ** with a quoted phrase, only with a single token. Not sure if this was
+ ** an implementation artifact or an intentional decision when fts3 was
+ ** first implemented. Whichever it was, this module duplicates the
+ ** limitation.
+ */
+ iCol = pParse->iDefaultCol;
+ iColLen = 0;
+ for(ii=0; ii<pParse->nCol; ii++){
+ const char *zStr = pParse->azCol[ii];
+ int nStr = (int)strlen(zStr);
+ if( nInput>nStr && zInput[nStr]==':'
+ && sqlite3_strnicmp(zStr, zInput, nStr)==0
+ ){
+ iCol = ii;
+ iColLen = (int)((zInput - z) + nStr + 1);
+ break;
+ }
+ }
+ rc = getNextToken(pParse, iCol, &z[iColLen], n-iColLen, ppExpr, pnConsumed);
+ *pnConsumed += iColLen;
+ return rc;
+}
+
+/*
+** The argument is an Fts3Expr structure for a binary operator (any type
+** except an FTSQUERY_PHRASE). Return an integer value representing the
+** precedence of the operator. Lower values have a higher precedence (i.e.
+** group more tightly). For example, in the C language, the == operator
+** groups more tightly than ||, and would therefore have a higher precedence.
+**
+** When using the new fts3 query syntax (when SQLITE_ENABLE_FTS3_PARENTHESIS
+** is defined), the order of the operators in precedence from highest to
+** lowest is:
+**
+** NEAR
+** NOT
+** AND (including implicit ANDs)
+** OR
+**
+** Note that when using the old query syntax, the OR operator has a higher
+** precedence than the AND operator.
+*/
+static int opPrecedence(Fts3Expr *p){
+ assert( p->eType!=FTSQUERY_PHRASE );
+ if( sqlite3_fts3_enable_parentheses ){
+ return p->eType;
+ }else if( p->eType==FTSQUERY_NEAR ){
+ return 1;
+ }else if( p->eType==FTSQUERY_OR ){
+ return 2;
+ }
+ assert( p->eType==FTSQUERY_AND );
+ return 3;
+}
+
+/*
+** Argument ppHead contains a pointer to the current head of a query
+** expression tree being parsed. pPrev is the expression node most recently
+** inserted into the tree. This function adds pNew, which is always a binary
+** operator node, into the expression tree based on the relative precedence
+** of pNew and the existing nodes of the tree. This may result in the head
+** of the tree changing, in which case *ppHead is set to the new root node.
+*/
+static void insertBinaryOperator(
+ Fts3Expr **ppHead, /* Pointer to the root node of a tree */
+ Fts3Expr *pPrev, /* Node most recently inserted into the tree */
+ Fts3Expr *pNew /* New binary node to insert into expression tree */
+){
+ Fts3Expr *pSplit = pPrev;
+ while( pSplit->pParent && opPrecedence(pSplit->pParent)<=opPrecedence(pNew) ){
+ pSplit = pSplit->pParent;
+ }
+
+ if( pSplit->pParent ){
+ assert( pSplit->pParent->pRight==pSplit );
+ pSplit->pParent->pRight = pNew;
+ pNew->pParent = pSplit->pParent;
+ }else{
+ *ppHead = pNew;
+ }
+ pNew->pLeft = pSplit;
+ pSplit->pParent = pNew;
+}
+
+/*
+** Parse the fts3 query expression found in buffer z, length n. This function
+** returns either when the end of the buffer is reached or an unmatched
+** closing bracket - ')' - is encountered.
+**
+** If successful, SQLITE_OK is returned, *ppExpr is set to point to the
+** parsed form of the expression and *pnConsumed is set to the number of
+** bytes read from buffer z. Otherwise, *ppExpr is set to 0 and SQLITE_NOMEM
+** (out of memory error) or SQLITE_ERROR (parse error) is returned.
+*/
+static int fts3ExprParse(
+ ParseContext *pParse, /* fts3 query parse context */
+ const char *z, int n, /* Text of MATCH query */
+ Fts3Expr **ppExpr, /* OUT: Parsed query structure */
+ int *pnConsumed /* OUT: Number of bytes consumed */
+){
+ Fts3Expr *pRet = 0;
+ Fts3Expr *pPrev = 0;
+ Fts3Expr *pNotBranch = 0; /* Only used in legacy parse mode */
+ int nIn = n;
+ const char *zIn = z;
+ int rc = SQLITE_OK;
+ int isRequirePhrase = 1;
+
+ while( rc==SQLITE_OK ){
+ Fts3Expr *p = 0;
+ int nByte = 0;
+
+ rc = getNextNode(pParse, zIn, nIn, &p, &nByte);
+ assert( nByte>0 || (rc!=SQLITE_OK && p==0) );
+ if( rc==SQLITE_OK ){
+ if( p ){
+ int isPhrase;
+
+ if( !sqlite3_fts3_enable_parentheses
+ && p->eType==FTSQUERY_PHRASE && pParse->isNot
+ ){
+ /* Create an implicit NOT operator. */
+ Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr));
+ if( !pNot ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_NOMEM;
+ goto exprparse_out;
+ }
+ pNot->eType = FTSQUERY_NOT;
+ pNot->pRight = p;
+ p->pParent = pNot;
+ if( pNotBranch ){
+ pNot->pLeft = pNotBranch;
+ pNotBranch->pParent = pNot;
+ }
+ pNotBranch = pNot;
+ p = pPrev;
+ }else{
+ int eType = p->eType;
+ isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft);
+
+ /* The isRequirePhrase variable is set to true if a phrase or
+ ** an expression contained in parenthesis is required. If a
+ ** binary operator (AND, OR, NOT or NEAR) is encounted when
+ ** isRequirePhrase is set, this is a syntax error.
+ */
+ if( !isPhrase && isRequirePhrase ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_ERROR;
+ goto exprparse_out;
+ }
+
+ if( isPhrase && !isRequirePhrase ){
+ /* Insert an implicit AND operator. */
+ Fts3Expr *pAnd;
+ assert( pRet && pPrev );
+ pAnd = fts3MallocZero(sizeof(Fts3Expr));
+ if( !pAnd ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_NOMEM;
+ goto exprparse_out;
+ }
+ pAnd->eType = FTSQUERY_AND;
+ insertBinaryOperator(&pRet, pPrev, pAnd);
+ pPrev = pAnd;
+ }
+
+ /* This test catches attempts to make either operand of a NEAR
+ ** operator something other than a phrase. For example, either of
+ ** the following:
+ **
+ ** (bracketed expression) NEAR phrase
+ ** phrase NEAR (bracketed expression)
+ **
+ ** Return an error in either case.
+ */
+ if( pPrev && (
+ (eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE)
+ || (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR)
+ )){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_ERROR;
+ goto exprparse_out;
+ }
+
+ if( isPhrase ){
+ if( pRet ){
+ assert( pPrev && pPrev->pLeft && pPrev->pRight==0 );
+ pPrev->pRight = p;
+ p->pParent = pPrev;
+ }else{
+ pRet = p;
+ }
+ }else{
+ insertBinaryOperator(&pRet, pPrev, p);
+ }
+ isRequirePhrase = !isPhrase;
+ }
+ pPrev = p;
+ }
+ assert( nByte>0 );
+ }
+ assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) );
+ nIn -= nByte;
+ zIn += nByte;
+ }
+
+ if( rc==SQLITE_DONE && pRet && isRequirePhrase ){
+ rc = SQLITE_ERROR;
+ }
+
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ if( !sqlite3_fts3_enable_parentheses && pNotBranch ){
+ if( !pRet ){
+ rc = SQLITE_ERROR;
+ }else{
+ Fts3Expr *pIter = pNotBranch;
+ while( pIter->pLeft ){
+ pIter = pIter->pLeft;
+ }
+ pIter->pLeft = pRet;
+ pRet->pParent = pIter;
+ pRet = pNotBranch;
+ }
+ }
+ }
+ *pnConsumed = n - nIn;
+
+exprparse_out:
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ExprFree(pRet);
+ sqlite3Fts3ExprFree(pNotBranch);
+ pRet = 0;
+ }
+ *ppExpr = pRet;
+ return rc;
+}
+
+/*
+** Return SQLITE_ERROR if the maximum depth of the expression tree passed
+** as the only argument is more than nMaxDepth.
+*/
+static int fts3ExprCheckDepth(Fts3Expr *p, int nMaxDepth){
+ int rc = SQLITE_OK;
+ if( p ){
+ if( nMaxDepth<0 ){
+ rc = SQLITE_TOOBIG;
+ }else{
+ rc = fts3ExprCheckDepth(p->pLeft, nMaxDepth-1);
+ if( rc==SQLITE_OK ){
+ rc = fts3ExprCheckDepth(p->pRight, nMaxDepth-1);
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** This function attempts to transform the expression tree at (*pp) to
+** an equivalent but more balanced form. The tree is modified in place.
+** If successful, SQLITE_OK is returned and (*pp) set to point to the
+** new root expression node.
+**
+** nMaxDepth is the maximum allowable depth of the balanced sub-tree.
+**
+** Otherwise, if an error occurs, an SQLite error code is returned and
+** expression (*pp) freed.
+*/
+static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){
+ int rc = SQLITE_OK; /* Return code */
+ Fts3Expr *pRoot = *pp; /* Initial root node */
+ Fts3Expr *pFree = 0; /* List of free nodes. Linked by pParent. */
+ int eType = pRoot->eType; /* Type of node in this tree */
+
+ if( nMaxDepth==0 ){
+ rc = SQLITE_ERROR;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){
+ Fts3Expr **apLeaf;
+ apLeaf = (Fts3Expr **)sqlite3_malloc64(sizeof(Fts3Expr *) * nMaxDepth);
+ if( 0==apLeaf ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth);
+ }
+
+ if( rc==SQLITE_OK ){
+ int i;
+ Fts3Expr *p;
+
+ /* Set $p to point to the left-most leaf in the tree of eType nodes. */
+ for(p=pRoot; p->eType==eType; p=p->pLeft){
+ assert( p->pParent==0 || p->pParent->pLeft==p );
+ assert( p->pLeft && p->pRight );
+ }
+
+ /* This loop runs once for each leaf in the tree of eType nodes. */
+ while( 1 ){
+ int iLvl;
+ Fts3Expr *pParent = p->pParent; /* Current parent of p */
+
+ assert( pParent==0 || pParent->pLeft==p );
+ p->pParent = 0;
+ if( pParent ){
+ pParent->pLeft = 0;
+ }else{
+ pRoot = 0;
+ }
+ rc = fts3ExprBalance(&p, nMaxDepth-1);
+ if( rc!=SQLITE_OK ) break;
+
+ for(iLvl=0; p && iLvl<nMaxDepth; iLvl++){
+ if( apLeaf[iLvl]==0 ){
+ apLeaf[iLvl] = p;
+ p = 0;
+ }else{
+ assert( pFree );
+ pFree->pLeft = apLeaf[iLvl];
+ pFree->pRight = p;
+ pFree->pLeft->pParent = pFree;
+ pFree->pRight->pParent = pFree;
+
+ p = pFree;
+ pFree = pFree->pParent;
+ p->pParent = 0;
+ apLeaf[iLvl] = 0;
+ }
+ }
+ if( p ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_TOOBIG;
+ break;
+ }
+
+ /* If that was the last leaf node, break out of the loop */
+ if( pParent==0 ) break;
+
+ /* Set $p to point to the next leaf in the tree of eType nodes */
+ for(p=pParent->pRight; p->eType==eType; p=p->pLeft);
+
+ /* Remove pParent from the original tree. */
+ assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent );
+ pParent->pRight->pParent = pParent->pParent;
+ if( pParent->pParent ){
+ pParent->pParent->pLeft = pParent->pRight;
+ }else{
+ assert( pParent==pRoot );
+ pRoot = pParent->pRight;
+ }
+
+ /* Link pParent into the free node list. It will be used as an
+ ** internal node of the new tree. */
+ pParent->pParent = pFree;
+ pFree = pParent;
+ }
+
+ if( rc==SQLITE_OK ){
+ p = 0;
+ for(i=0; i<nMaxDepth; i++){
+ if( apLeaf[i] ){
+ if( p==0 ){
+ p = apLeaf[i];
+ p->pParent = 0;
+ }else{
+ assert( pFree!=0 );
+ pFree->pRight = p;
+ pFree->pLeft = apLeaf[i];
+ pFree->pLeft->pParent = pFree;
+ pFree->pRight->pParent = pFree;
+
+ p = pFree;
+ pFree = pFree->pParent;
+ p->pParent = 0;
+ }
+ }
+ }
+ pRoot = p;
+ }else{
+ /* An error occurred. Delete the contents of the apLeaf[] array
+ ** and pFree list. Everything else is cleaned up by the call to
+ ** sqlite3Fts3ExprFree(pRoot) below. */
+ Fts3Expr *pDel;
+ for(i=0; i<nMaxDepth; i++){
+ sqlite3Fts3ExprFree(apLeaf[i]);
+ }
+ while( (pDel=pFree)!=0 ){
+ pFree = pDel->pParent;
+ sqlite3_free(pDel);
+ }
+ }
+
+ assert( pFree==0 );
+ sqlite3_free( apLeaf );
+ }
+ }else if( eType==FTSQUERY_NOT ){
+ Fts3Expr *pLeft = pRoot->pLeft;
+ Fts3Expr *pRight = pRoot->pRight;
+
+ pRoot->pLeft = 0;
+ pRoot->pRight = 0;
+ pLeft->pParent = 0;
+ pRight->pParent = 0;
+
+ rc = fts3ExprBalance(&pLeft, nMaxDepth-1);
+ if( rc==SQLITE_OK ){
+ rc = fts3ExprBalance(&pRight, nMaxDepth-1);
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ExprFree(pRight);
+ sqlite3Fts3ExprFree(pLeft);
+ }else{
+ assert( pLeft && pRight );
+ pRoot->pLeft = pLeft;
+ pLeft->pParent = pRoot;
+ pRoot->pRight = pRight;
+ pRight->pParent = pRoot;
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ExprFree(pRoot);
+ pRoot = 0;
+ }
+ *pp = pRoot;
+ return rc;
+}
+
+/*
+** This function is similar to sqlite3Fts3ExprParse(), with the following
+** differences:
+**
+** 1. It does not do expression rebalancing.
+** 2. It does not check that the expression does not exceed the
+** maximum allowable depth.
+** 3. Even if it fails, *ppExpr may still be set to point to an
+** expression tree. It should be deleted using sqlite3Fts3ExprFree()
+** in this case.
+*/
+static int fts3ExprParseUnbalanced(
+ sqlite3_tokenizer *pTokenizer, /* Tokenizer module */
+ int iLangid, /* Language id for tokenizer */
+ char **azCol, /* Array of column names for fts3 table */
+ int bFts4, /* True to allow FTS4-only syntax */
+ int nCol, /* Number of entries in azCol[] */
+ int iDefaultCol, /* Default column to query */
+ const char *z, int n, /* Text of MATCH query */
+ Fts3Expr **ppExpr /* OUT: Parsed query structure */
+){
+ int nParsed;
+ int rc;
+ ParseContext sParse;
+
+ memset(&sParse, 0, sizeof(ParseContext));
+ sParse.pTokenizer = pTokenizer;
+ sParse.iLangid = iLangid;
+ sParse.azCol = (const char **)azCol;
+ sParse.nCol = nCol;
+ sParse.iDefaultCol = iDefaultCol;
+ sParse.bFts4 = bFts4;
+ if( z==0 ){
+ *ppExpr = 0;
+ return SQLITE_OK;
+ }
+ if( n<0 ){
+ n = (int)strlen(z);
+ }
+ rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed);
+ assert( rc==SQLITE_OK || *ppExpr==0 );
+
+ /* Check for mismatched parenthesis */
+ if( rc==SQLITE_OK && sParse.nNest ){
+ rc = SQLITE_ERROR;
+ }
+
+ return rc;
+}
+
+/*
+** Parameters z and n contain a pointer to and length of a buffer containing
+** an fts3 query expression, respectively. This function attempts to parse the
+** query expression and create a tree of Fts3Expr structures representing the
+** parsed expression. If successful, *ppExpr is set to point to the head
+** of the parsed expression tree and SQLITE_OK is returned. If an error
+** occurs, either SQLITE_NOMEM (out-of-memory error) or SQLITE_ERROR (parse
+** error) is returned and *ppExpr is set to 0.
+**
+** If parameter n is a negative number, then z is assumed to point to a
+** nul-terminated string and the length is determined using strlen().
+**
+** The first parameter, pTokenizer, is passed the fts3 tokenizer module to
+** use to normalize query tokens while parsing the expression. The azCol[]
+** array, which is assumed to contain nCol entries, should contain the names
+** of each column in the target fts3 table, in order from left to right.
+** Column names must be nul-terminated strings.
+**
+** The iDefaultCol parameter should be passed the index of the table column
+** that appears on the left-hand-side of the MATCH operator (the default
+** column to match against for tokens for which a column name is not explicitly
+** specified as part of the query string), or -1 if tokens may by default
+** match any table column.
+*/
+SQLITE_PRIVATE int sqlite3Fts3ExprParse(
+ sqlite3_tokenizer *pTokenizer, /* Tokenizer module */
+ int iLangid, /* Language id for tokenizer */
+ char **azCol, /* Array of column names for fts3 table */
+ int bFts4, /* True to allow FTS4-only syntax */
+ int nCol, /* Number of entries in azCol[] */
+ int iDefaultCol, /* Default column to query */
+ const char *z, int n, /* Text of MATCH query */
+ Fts3Expr **ppExpr, /* OUT: Parsed query structure */
+ char **pzErr /* OUT: Error message (sqlite3_malloc) */
+){
+ int rc = fts3ExprParseUnbalanced(
+ pTokenizer, iLangid, azCol, bFts4, nCol, iDefaultCol, z, n, ppExpr
+ );
+
+ /* Rebalance the expression. And check that its depth does not exceed
+ ** SQLITE_FTS3_MAX_EXPR_DEPTH. */
+ if( rc==SQLITE_OK && *ppExpr ){
+ rc = fts3ExprBalance(ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH);
+ if( rc==SQLITE_OK ){
+ rc = fts3ExprCheckDepth(*ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH);
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ExprFree(*ppExpr);
+ *ppExpr = 0;
+ if( rc==SQLITE_TOOBIG ){
+ sqlite3Fts3ErrMsg(pzErr,
+ "FTS expression tree is too large (maximum depth %d)",
+ SQLITE_FTS3_MAX_EXPR_DEPTH
+ );
+ rc = SQLITE_ERROR;
+ }else if( rc==SQLITE_ERROR ){
+ sqlite3Fts3ErrMsg(pzErr, "malformed MATCH expression: [%s]", z);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Free a single node of an expression tree.
+*/
+static void fts3FreeExprNode(Fts3Expr *p){
+ assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 );
+ sqlite3Fts3EvalPhraseCleanup(p->pPhrase);
+ sqlite3_free(p->aMI);
+ sqlite3_free(p);
+}
+
+/*
+** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse().
+**
+** This function would be simpler if it recursively called itself. But
+** that would mean passing a sufficiently large expression to ExprParse()
+** could cause a stack overflow.
+*/
+SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *pDel){
+ Fts3Expr *p;
+ assert( pDel==0 || pDel->pParent==0 );
+ for(p=pDel; p && (p->pLeft||p->pRight); p=(p->pLeft ? p->pLeft : p->pRight)){
+ assert( p->pParent==0 || p==p->pParent->pRight || p==p->pParent->pLeft );
+ }
+ while( p ){
+ Fts3Expr *pParent = p->pParent;
+ fts3FreeExprNode(p);
+ if( pParent && p==pParent->pLeft && pParent->pRight ){
+ p = pParent->pRight;
+ while( p && (p->pLeft || p->pRight) ){
+ assert( p==p->pParent->pRight || p==p->pParent->pLeft );
+ p = (p->pLeft ? p->pLeft : p->pRight);
+ }
+ }else{
+ p = pParent;
+ }
+ }
+}
+
+/****************************************************************************
+*****************************************************************************
+** Everything after this point is just test code.
+*/
+
+#ifdef SQLITE_TEST
+
+/* #include <stdio.h> */
+
+/*
+** Return a pointer to a buffer containing a text representation of the
+** expression passed as the first argument. The buffer is obtained from
+** sqlite3_malloc(). It is the responsibility of the caller to use
+** sqlite3_free() to release the memory. If an OOM condition is encountered,
+** NULL is returned.
+**
+** If the second argument is not NULL, then its contents are prepended to
+** the returned expression text and then freed using sqlite3_free().
+*/
+static char *exprToString(Fts3Expr *pExpr, char *zBuf){
+ if( pExpr==0 ){
+ return sqlite3_mprintf("");
+ }
+ switch( pExpr->eType ){
+ case FTSQUERY_PHRASE: {
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ int i;
+ zBuf = sqlite3_mprintf(
+ "%zPHRASE %d 0", zBuf, pPhrase->iColumn);
+ for(i=0; zBuf && i<pPhrase->nToken; i++){
+ zBuf = sqlite3_mprintf("%z %.*s%s", zBuf,
+ pPhrase->aToken[i].n, pPhrase->aToken[i].z,
+ (pPhrase->aToken[i].isPrefix?"+":"")
+ );
+ }
+ return zBuf;
+ }
+
+ case FTSQUERY_NEAR:
+ zBuf = sqlite3_mprintf("%zNEAR/%d ", zBuf, pExpr->nNear);
+ break;
+ case FTSQUERY_NOT:
+ zBuf = sqlite3_mprintf("%zNOT ", zBuf);
+ break;
+ case FTSQUERY_AND:
+ zBuf = sqlite3_mprintf("%zAND ", zBuf);
+ break;
+ case FTSQUERY_OR:
+ zBuf = sqlite3_mprintf("%zOR ", zBuf);
+ break;
+ }
+
+ if( zBuf ) zBuf = sqlite3_mprintf("%z{", zBuf);
+ if( zBuf ) zBuf = exprToString(pExpr->pLeft, zBuf);
+ if( zBuf ) zBuf = sqlite3_mprintf("%z} {", zBuf);
+
+ if( zBuf ) zBuf = exprToString(pExpr->pRight, zBuf);
+ if( zBuf ) zBuf = sqlite3_mprintf("%z}", zBuf);
+
+ return zBuf;
+}
+
+/*
+** This is the implementation of a scalar SQL function used to test the
+** expression parser. It should be called as follows:
+**
+** fts3_exprtest(<tokenizer>, <expr>, <column 1>, ...);
+**
+** The first argument, <tokenizer>, is the name of the fts3 tokenizer used
+** to parse the query expression (see README.tokenizers). The second argument
+** is the query expression to parse. Each subsequent argument is the name
+** of a column of the fts3 table that the query expression may refer to.
+** For example:
+**
+** SELECT fts3_exprtest('simple', 'Bill col2:Bloggs', 'col1', 'col2');
+*/
+static void fts3ExprTestCommon(
+ int bRebalance,
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ sqlite3_tokenizer *pTokenizer = 0;
+ int rc;
+ char **azCol = 0;
+ const char *zExpr;
+ int nExpr;
+ int nCol;
+ int ii;
+ Fts3Expr *pExpr;
+ char *zBuf = 0;
+ Fts3Hash *pHash = (Fts3Hash*)sqlite3_user_data(context);
+ const char *zTokenizer = 0;
+ char *zErr = 0;
+
+ if( argc<3 ){
+ sqlite3_result_error(context,
+ "Usage: fts3_exprtest(tokenizer, expr, col1, ...", -1
+ );
+ return;
+ }
+
+ zTokenizer = (const char*)sqlite3_value_text(argv[0]);
+ rc = sqlite3Fts3InitTokenizer(pHash, zTokenizer, &pTokenizer, &zErr);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ sqlite3_result_error(context, zErr, -1);
+ }
+ sqlite3_free(zErr);
+ return;
+ }
+
+ zExpr = (const char *)sqlite3_value_text(argv[1]);
+ nExpr = sqlite3_value_bytes(argv[1]);
+ nCol = argc-2;
+ azCol = (char **)sqlite3_malloc64(nCol*sizeof(char *));
+ if( !azCol ){
+ sqlite3_result_error_nomem(context);
+ goto exprtest_out;
+ }
+ for(ii=0; ii<nCol; ii++){
+ azCol[ii] = (char *)sqlite3_value_text(argv[ii+2]);
+ }
+
+ if( bRebalance ){
+ char *zDummy = 0;
+ rc = sqlite3Fts3ExprParse(
+ pTokenizer, 0, azCol, 0, nCol, nCol, zExpr, nExpr, &pExpr, &zDummy
+ );
+ assert( rc==SQLITE_OK || pExpr==0 );
+ sqlite3_free(zDummy);
+ }else{
+ rc = fts3ExprParseUnbalanced(
+ pTokenizer, 0, azCol, 0, nCol, nCol, zExpr, nExpr, &pExpr
+ );
+ }
+
+ if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){
+ sqlite3Fts3ExprFree(pExpr);
+ sqlite3_result_error(context, "Error parsing expression", -1);
+ }else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+ sqlite3_free(zBuf);
+ }
+
+ sqlite3Fts3ExprFree(pExpr);
+
+exprtest_out:
+ if( pTokenizer ){
+ rc = pTokenizer->pModule->xDestroy(pTokenizer);
+ }
+ sqlite3_free(azCol);
+}
+
+static void fts3ExprTest(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ fts3ExprTestCommon(0, context, argc, argv);
+}
+static void fts3ExprTestRebalance(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ fts3ExprTestCommon(1, context, argc, argv);
+}
+
+/*
+** Register the query expression parser test function fts3_exprtest()
+** with database connection db.
+*/
+SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db, Fts3Hash *pHash){
+ int rc = sqlite3_create_function(
+ db, "fts3_exprtest", -1, SQLITE_UTF8, (void*)pHash, fts3ExprTest, 0, 0
+ );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "fts3_exprtest_rebalance",
+ -1, SQLITE_UTF8, (void*)pHash, fts3ExprTestRebalance, 0, 0
+ );
+ }
+ return rc;
+}
+
+#endif
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_expr.c *******************************************/
+/************** Begin file fts3_hash.c ***************************************/
+/*
+** 2001 September 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This is the implementation of generic hash-tables used in SQLite.
+** We've modified it slightly to serve as a standalone hash table
+** implementation for the full-text indexing module.
+*/
+
+/*
+** The code in this file is only compiled if:
+**
+** * The FTS3 module is being built as an extension
+** (in which case SQLITE_CORE is not defined), or
+**
+** * The FTS3 module is being built into the core of
+** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <string.h> */
+
+/* #include "fts3_hash.h" */
+
+/*
+** Malloc and Free functions
+*/
+static void *fts3HashMalloc(sqlite3_int64 n){
+ void *p = sqlite3_malloc64(n);
+ if( p ){
+ memset(p, 0, n);
+ }
+ return p;
+}
+static void fts3HashFree(void *p){
+ sqlite3_free(p);
+}
+
+/* Turn bulk memory into a hash table object by initializing the
+** fields of the Hash structure.
+**
+** "pNew" is a pointer to the hash table that is to be initialized.
+** keyClass is one of the constants
+** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass
+** determines what kind of key the hash table will use. "copyKey" is
+** true if the hash table should make its own private copy of keys and
+** false if it should just use the supplied pointer.
+*/
+SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey){
+ assert( pNew!=0 );
+ assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY );
+ pNew->keyClass = keyClass;
+ pNew->copyKey = copyKey;
+ pNew->first = 0;
+ pNew->count = 0;
+ pNew->htsize = 0;
+ pNew->ht = 0;
+}
+
+/* Remove all entries from a hash table. Reclaim all memory.
+** Call this routine to delete a hash table or to reset a hash table
+** to the empty state.
+*/
+SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash *pH){
+ Fts3HashElem *elem; /* For looping over all elements of the table */
+
+ assert( pH!=0 );
+ elem = pH->first;
+ pH->first = 0;
+ fts3HashFree(pH->ht);
+ pH->ht = 0;
+ pH->htsize = 0;
+ while( elem ){
+ Fts3HashElem *next_elem = elem->next;
+ if( pH->copyKey && elem->pKey ){
+ fts3HashFree(elem->pKey);
+ }
+ fts3HashFree(elem);
+ elem = next_elem;
+ }
+ pH->count = 0;
+}
+
+/*
+** Hash and comparison functions when the mode is FTS3_HASH_STRING
+*/
+static int fts3StrHash(const void *pKey, int nKey){
+ const char *z = (const char *)pKey;
+ unsigned h = 0;
+ if( nKey<=0 ) nKey = (int) strlen(z);
+ while( nKey > 0 ){
+ h = (h<<3) ^ h ^ *z++;
+ nKey--;
+ }
+ return (int)(h & 0x7fffffff);
+}
+static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ if( n1!=n2 ) return 1;
+ return strncmp((const char*)pKey1,(const char*)pKey2,n1);
+}
+
+/*
+** Hash and comparison functions when the mode is FTS3_HASH_BINARY
+*/
+static int fts3BinHash(const void *pKey, int nKey){
+ int h = 0;
+ const char *z = (const char *)pKey;
+ while( nKey-- > 0 ){
+ h = (h<<3) ^ h ^ *(z++);
+ }
+ return h & 0x7fffffff;
+}
+static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){
+ if( n1!=n2 ) return 1;
+ return memcmp(pKey1,pKey2,n1);
+}
+
+/*
+** Return a pointer to the appropriate hash function given the key class.
+**
+** The C syntax in this function definition may be unfamilar to some
+** programmers, so we provide the following additional explanation:
+**
+** The name of the function is "ftsHashFunction". The function takes a
+** single parameter "keyClass". The return value of ftsHashFunction()
+** is a pointer to another function. Specifically, the return value
+** of ftsHashFunction() is a pointer to a function that takes two parameters
+** with types "const void*" and "int" and returns an "int".
+*/
+static int (*ftsHashFunction(int keyClass))(const void*,int){
+ if( keyClass==FTS3_HASH_STRING ){
+ return &fts3StrHash;
+ }else{
+ assert( keyClass==FTS3_HASH_BINARY );
+ return &fts3BinHash;
+ }
+}
+
+/*
+** Return a pointer to the appropriate hash function given the key class.
+**
+** For help in interpreted the obscure C code in the function definition,
+** see the header comment on the previous function.
+*/
+static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){
+ if( keyClass==FTS3_HASH_STRING ){
+ return &fts3StrCompare;
+ }else{
+ assert( keyClass==FTS3_HASH_BINARY );
+ return &fts3BinCompare;
+ }
+}
+
+/* Link an element into the hash table
+*/
+static void fts3HashInsertElement(
+ Fts3Hash *pH, /* The complete hash table */
+ struct _fts3ht *pEntry, /* The entry into which pNew is inserted */
+ Fts3HashElem *pNew /* The element to be inserted */
+){
+ Fts3HashElem *pHead; /* First element already in pEntry */
+ pHead = pEntry->chain;
+ if( pHead ){
+ pNew->next = pHead;
+ pNew->prev = pHead->prev;
+ if( pHead->prev ){ pHead->prev->next = pNew; }
+ else { pH->first = pNew; }
+ pHead->prev = pNew;
+ }else{
+ pNew->next = pH->first;
+ if( pH->first ){ pH->first->prev = pNew; }
+ pNew->prev = 0;
+ pH->first = pNew;
+ }
+ pEntry->count++;
+ pEntry->chain = pNew;
+}
+
+
+/* Resize the hash table so that it cantains "new_size" buckets.
+** "new_size" must be a power of 2. The hash table might fail
+** to resize if sqliteMalloc() fails.
+**
+** Return non-zero if a memory allocation error occurs.
+*/
+static int fts3Rehash(Fts3Hash *pH, int new_size){
+ struct _fts3ht *new_ht; /* The new hash table */
+ Fts3HashElem *elem, *next_elem; /* For looping over existing elements */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ assert( (new_size & (new_size-1))==0 );
+ new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) );
+ if( new_ht==0 ) return 1;
+ fts3HashFree(pH->ht);
+ pH->ht = new_ht;
+ pH->htsize = new_size;
+ xHash = ftsHashFunction(pH->keyClass);
+ for(elem=pH->first, pH->first=0; elem; elem = next_elem){
+ int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1);
+ next_elem = elem->next;
+ fts3HashInsertElement(pH, &new_ht[h], elem);
+ }
+ return 0;
+}
+
+/* This function (for internal use only) locates an element in an
+** hash table that matches the given key. The hash for this key has
+** already been computed and is passed as the 4th parameter.
+*/
+static Fts3HashElem *fts3FindElementByHash(
+ const Fts3Hash *pH, /* The pH to be searched */
+ const void *pKey, /* The key we are searching for */
+ int nKey,
+ int h /* The hash for this key. */
+){
+ Fts3HashElem *elem; /* Used to loop thru the element list */
+ int count; /* Number of elements left to test */
+ int (*xCompare)(const void*,int,const void*,int); /* comparison function */
+
+ if( pH->ht ){
+ struct _fts3ht *pEntry = &pH->ht[h];
+ elem = pEntry->chain;
+ count = pEntry->count;
+ xCompare = ftsCompareFunction(pH->keyClass);
+ while( count-- && elem ){
+ if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){
+ return elem;
+ }
+ elem = elem->next;
+ }
+ }
+ return 0;
+}
+
+/* Remove a single entry from the hash table given a pointer to that
+** element and a hash on the element's key.
+*/
+static void fts3RemoveElementByHash(
+ Fts3Hash *pH, /* The pH containing "elem" */
+ Fts3HashElem* elem, /* The element to be removed from the pH */
+ int h /* Hash value for the element */
+){
+ struct _fts3ht *pEntry;
+ if( elem->prev ){
+ elem->prev->next = elem->next;
+ }else{
+ pH->first = elem->next;
+ }
+ if( elem->next ){
+ elem->next->prev = elem->prev;
+ }
+ pEntry = &pH->ht[h];
+ if( pEntry->chain==elem ){
+ pEntry->chain = elem->next;
+ }
+ pEntry->count--;
+ if( pEntry->count<=0 ){
+ pEntry->chain = 0;
+ }
+ if( pH->copyKey && elem->pKey ){
+ fts3HashFree(elem->pKey);
+ }
+ fts3HashFree( elem );
+ pH->count--;
+ if( pH->count<=0 ){
+ assert( pH->first==0 );
+ assert( pH->count==0 );
+ fts3HashClear(pH);
+ }
+}
+
+SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(
+ const Fts3Hash *pH,
+ const void *pKey,
+ int nKey
+){
+ int h; /* A hash on key */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ if( pH==0 || pH->ht==0 ) return 0;
+ xHash = ftsHashFunction(pH->keyClass);
+ assert( xHash!=0 );
+ h = (*xHash)(pKey,nKey);
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ return fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1));
+}
+
+/*
+** Attempt to locate an element of the hash table pH with a key
+** that matches pKey,nKey. Return the data for this element if it is
+** found, or NULL if there is no match.
+*/
+SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash *pH, const void *pKey, int nKey){
+ Fts3HashElem *pElem; /* The element that matches key (if any) */
+
+ pElem = sqlite3Fts3HashFindElem(pH, pKey, nKey);
+ return pElem ? pElem->data : 0;
+}
+
+/* Insert an element into the hash table pH. The key is pKey,nKey
+** and the data is "data".
+**
+** If no element exists with a matching key, then a new
+** element is created. A copy of the key is made if the copyKey
+** flag is set. NULL is returned.
+**
+** If another element already exists with the same key, then the
+** new data replaces the old data and the old data is returned.
+** The key is not copied in this instance. If a malloc fails, then
+** the new data is returned and the hash table is unchanged.
+**
+** If the "data" parameter to this function is NULL, then the
+** element corresponding to "key" is removed from the hash table.
+*/
+SQLITE_PRIVATE void *sqlite3Fts3HashInsert(
+ Fts3Hash *pH, /* The hash table to insert into */
+ const void *pKey, /* The key */
+ int nKey, /* Number of bytes in the key */
+ void *data /* The data */
+){
+ int hraw; /* Raw hash value of the key */
+ int h; /* the hash of the key modulo hash table size */
+ Fts3HashElem *elem; /* Used to loop thru the element list */
+ Fts3HashElem *new_elem; /* New element added to the pH */
+ int (*xHash)(const void*,int); /* The hash function */
+
+ assert( pH!=0 );
+ xHash = ftsHashFunction(pH->keyClass);
+ assert( xHash!=0 );
+ hraw = (*xHash)(pKey, nKey);
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ h = hraw & (pH->htsize-1);
+ elem = fts3FindElementByHash(pH,pKey,nKey,h);
+ if( elem ){
+ void *old_data = elem->data;
+ if( data==0 ){
+ fts3RemoveElementByHash(pH,elem,h);
+ }else{
+ elem->data = data;
+ }
+ return old_data;
+ }
+ if( data==0 ) return 0;
+ if( (pH->htsize==0 && fts3Rehash(pH,8))
+ || (pH->count>=pH->htsize && fts3Rehash(pH, pH->htsize*2))
+ ){
+ pH->count = 0;
+ return data;
+ }
+ assert( pH->htsize>0 );
+ new_elem = (Fts3HashElem*)fts3HashMalloc( sizeof(Fts3HashElem) );
+ if( new_elem==0 ) return data;
+ if( pH->copyKey && pKey!=0 ){
+ new_elem->pKey = fts3HashMalloc( nKey );
+ if( new_elem->pKey==0 ){
+ fts3HashFree(new_elem);
+ return data;
+ }
+ memcpy((void*)new_elem->pKey, pKey, nKey);
+ }else{
+ new_elem->pKey = (void*)pKey;
+ }
+ new_elem->nKey = nKey;
+ pH->count++;
+ assert( pH->htsize>0 );
+ assert( (pH->htsize & (pH->htsize-1))==0 );
+ h = hraw & (pH->htsize-1);
+ fts3HashInsertElement(pH, &pH->ht[h], new_elem);
+ new_elem->data = data;
+ return 0;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_hash.c *******************************************/
+/************** Begin file fts3_porter.c *************************************/
+/*
+** 2006 September 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Implementation of the full-text-search tokenizer that implements
+** a Porter stemmer.
+*/
+
+/*
+** The code in this file is only compiled if:
+**
+** * The FTS3 module is being built as an extension
+** (in which case SQLITE_CORE is not defined), or
+**
+** * The FTS3 module is being built into the core of
+** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <stdio.h> */
+/* #include <string.h> */
+
+/* #include "fts3_tokenizer.h" */
+
+/*
+** Class derived from sqlite3_tokenizer
+*/
+typedef struct porter_tokenizer {
+ sqlite3_tokenizer base; /* Base class */
+} porter_tokenizer;
+
+/*
+** Class derived from sqlite3_tokenizer_cursor
+*/
+typedef struct porter_tokenizer_cursor {
+ sqlite3_tokenizer_cursor base;
+ const char *zInput; /* input we are tokenizing */
+ int nInput; /* size of the input */
+ int iOffset; /* current position in zInput */
+ int iToken; /* index of next token to be returned */
+ char *zToken; /* storage for current token */
+ int nAllocated; /* space allocated to zToken buffer */
+} porter_tokenizer_cursor;
+
+
+/*
+** Create a new tokenizer instance.
+*/
+static int porterCreate(
+ int argc, const char * const *argv,
+ sqlite3_tokenizer **ppTokenizer
+){
+ porter_tokenizer *t;
+
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(argv);
+
+ t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t));
+ if( t==NULL ) return SQLITE_NOMEM;
+ memset(t, 0, sizeof(*t));
+ *ppTokenizer = &t->base;
+ return SQLITE_OK;
+}
+
+/*
+** Destroy a tokenizer
+*/
+static int porterDestroy(sqlite3_tokenizer *pTokenizer){
+ sqlite3_free(pTokenizer);
+ return SQLITE_OK;
+}
+
+/*
+** Prepare to begin tokenizing a particular string. The input
+** string to be tokenized is zInput[0..nInput-1]. A cursor
+** used to incrementally tokenize this string is returned in
+** *ppCursor.
+*/
+static int porterOpen(
+ sqlite3_tokenizer *pTokenizer, /* The tokenizer */
+ const char *zInput, int nInput, /* String to be tokenized */
+ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */
+){
+ porter_tokenizer_cursor *c;
+
+ UNUSED_PARAMETER(pTokenizer);
+
+ c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c));
+ if( c==NULL ) return SQLITE_NOMEM;
+
+ c->zInput = zInput;
+ if( zInput==0 ){
+ c->nInput = 0;
+ }else if( nInput<0 ){
+ c->nInput = (int)strlen(zInput);
+ }else{
+ c->nInput = nInput;
+ }
+ c->iOffset = 0; /* start tokenizing at the beginning */
+ c->iToken = 0;
+ c->zToken = NULL; /* no space allocated, yet. */
+ c->nAllocated = 0;
+
+ *ppCursor = &c->base;
+ return SQLITE_OK;
+}
+
+/*
+** Close a tokenization cursor previously opened by a call to
+** porterOpen() above.
+*/
+static int porterClose(sqlite3_tokenizer_cursor *pCursor){
+ porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor;
+ sqlite3_free(c->zToken);
+ sqlite3_free(c);
+ return SQLITE_OK;
+}
+/*
+** Vowel or consonant
+*/
+static const char cType[] = {
+ 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 2, 1
+};
+
+/*
+** isConsonant() and isVowel() determine if their first character in
+** the string they point to is a consonant or a vowel, according
+** to Porter ruls.
+**
+** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'.
+** 'Y' is a consonant unless it follows another consonant,
+** in which case it is a vowel.
+**
+** In these routine, the letters are in reverse order. So the 'y' rule
+** is that 'y' is a consonant unless it is followed by another
+** consonent.
+*/
+static int isVowel(const char*);
+static int isConsonant(const char *z){
+ int j;
+ char x = *z;
+ if( x==0 ) return 0;
+ assert( x>='a' && x<='z' );
+ j = cType[x-'a'];
+ if( j<2 ) return j;
+ return z[1]==0 || isVowel(z + 1);
+}
+static int isVowel(const char *z){
+ int j;
+ char x = *z;
+ if( x==0 ) return 0;
+ assert( x>='a' && x<='z' );
+ j = cType[x-'a'];
+ if( j<2 ) return 1-j;
+ return isConsonant(z + 1);
+}
+
+/*
+** Let any sequence of one or more vowels be represented by V and let
+** C be sequence of one or more consonants. Then every word can be
+** represented as:
+**
+** [C] (VC){m} [V]
+**
+** In prose: A word is an optional consonant followed by zero or
+** vowel-consonant pairs followed by an optional vowel. "m" is the
+** number of vowel consonant pairs. This routine computes the value
+** of m for the first i bytes of a word.
+**
+** Return true if the m-value for z is 1 or more. In other words,
+** return true if z contains at least one vowel that is followed
+** by a consonant.
+**
+** In this routine z[] is in reverse order. So we are really looking
+** for an instance of a consonant followed by a vowel.
+*/
+static int m_gt_0(const char *z){
+ while( isVowel(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isConsonant(z) ){ z++; }
+ return *z!=0;
+}
+
+/* Like mgt0 above except we are looking for a value of m which is
+** exactly 1
+*/
+static int m_eq_1(const char *z){
+ while( isVowel(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isConsonant(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isVowel(z) ){ z++; }
+ if( *z==0 ) return 1;
+ while( isConsonant(z) ){ z++; }
+ return *z==0;
+}
+
+/* Like mgt0 above except we are looking for a value of m>1 instead
+** or m>0
+*/
+static int m_gt_1(const char *z){
+ while( isVowel(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isConsonant(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isVowel(z) ){ z++; }
+ if( *z==0 ) return 0;
+ while( isConsonant(z) ){ z++; }
+ return *z!=0;
+}
+
+/*
+** Return TRUE if there is a vowel anywhere within z[0..n-1]
+*/
+static int hasVowel(const char *z){
+ while( isConsonant(z) ){ z++; }
+ return *z!=0;
+}
+
+/*
+** Return TRUE if the word ends in a double consonant.
+**
+** The text is reversed here. So we are really looking at
+** the first two characters of z[].
+*/
+static int doubleConsonant(const char *z){
+ return isConsonant(z) && z[0]==z[1];
+}
+
+/*
+** Return TRUE if the word ends with three letters which
+** are consonant-vowel-consonent and where the final consonant
+** is not 'w', 'x', or 'y'.
+**
+** The word is reversed here. So we are really checking the
+** first three letters and the first one cannot be in [wxy].
+*/
+static int star_oh(const char *z){
+ return
+ isConsonant(z) &&
+ z[0]!='w' && z[0]!='x' && z[0]!='y' &&
+ isVowel(z+1) &&
+ isConsonant(z+2);
+}
+
+/*
+** If the word ends with zFrom and xCond() is true for the stem
+** of the word that preceeds the zFrom ending, then change the
+** ending to zTo.
+**
+** The input word *pz and zFrom are both in reverse order. zTo
+** is in normal order.
+**
+** Return TRUE if zFrom matches. Return FALSE if zFrom does not
+** match. Not that TRUE is returned even if xCond() fails and
+** no substitution occurs.
+*/
+static int stem(
+ char **pz, /* The word being stemmed (Reversed) */
+ const char *zFrom, /* If the ending matches this... (Reversed) */
+ const char *zTo, /* ... change the ending to this (not reversed) */
+ int (*xCond)(const char*) /* Condition that must be true */
+){
+ char *z = *pz;
+ while( *zFrom && *zFrom==*z ){ z++; zFrom++; }
+ if( *zFrom!=0 ) return 0;
+ if( xCond && !xCond(z) ) return 1;
+ while( *zTo ){
+ *(--z) = *(zTo++);
+ }
+ *pz = z;
+ return 1;
+}
+
+/*
+** This is the fallback stemmer used when the porter stemmer is
+** inappropriate. The input word is copied into the output with
+** US-ASCII case folding. If the input word is too long (more
+** than 20 bytes if it contains no digits or more than 6 bytes if
+** it contains digits) then word is truncated to 20 or 6 bytes
+** by taking 10 or 3 bytes from the beginning and end.
+*/
+static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
+ int i, mx, j;
+ int hasDigit = 0;
+ for(i=0; i<nIn; i++){
+ char c = zIn[i];
+ if( c>='A' && c<='Z' ){
+ zOut[i] = c - 'A' + 'a';
+ }else{
+ if( c>='0' && c<='9' ) hasDigit = 1;
+ zOut[i] = c;
+ }
+ }
+ mx = hasDigit ? 3 : 10;
+ if( nIn>mx*2 ){
+ for(j=mx, i=nIn-mx; i<nIn; i++, j++){
+ zOut[j] = zOut[i];
+ }
+ i = j;
+ }
+ zOut[i] = 0;
+ *pnOut = i;
+}
+
+
+/*
+** Stem the input word zIn[0..nIn-1]. Store the output in zOut.
+** zOut is at least big enough to hold nIn bytes. Write the actual
+** size of the output word (exclusive of the '\0' terminator) into *pnOut.
+**
+** Any upper-case characters in the US-ASCII character set ([A-Z])
+** are converted to lower case. Upper-case UTF characters are
+** unchanged.
+**
+** Words that are longer than about 20 bytes are stemmed by retaining
+** a few bytes from the beginning and the end of the word. If the
+** word contains digits, 3 bytes are taken from the beginning and
+** 3 bytes from the end. For long words without digits, 10 bytes
+** are taken from each end. US-ASCII case folding still applies.
+**
+** If the input word contains not digits but does characters not
+** in [a-zA-Z] then no stemming is attempted and this routine just
+** copies the input into the input into the output with US-ASCII
+** case folding.
+**
+** Stemming never increases the length of the word. So there is
+** no chance of overflowing the zOut buffer.
+*/
+static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
+ int i, j;
+ char zReverse[28];
+ char *z, *z2;
+ if( nIn<3 || nIn>=(int)sizeof(zReverse)-7 ){
+ /* The word is too big or too small for the porter stemmer.
+ ** Fallback to the copy stemmer */
+ copy_stemmer(zIn, nIn, zOut, pnOut);
+ return;
+ }
+ for(i=0, j=sizeof(zReverse)-6; i<nIn; i++, j--){
+ char c = zIn[i];
+ if( c>='A' && c<='Z' ){
+ zReverse[j] = c + 'a' - 'A';
+ }else if( c>='a' && c<='z' ){
+ zReverse[j] = c;
+ }else{
+ /* The use of a character not in [a-zA-Z] means that we fallback
+ ** to the copy stemmer */
+ copy_stemmer(zIn, nIn, zOut, pnOut);
+ return;
+ }
+ }
+ memset(&zReverse[sizeof(zReverse)-5], 0, 5);
+ z = &zReverse[j+1];
+
+
+ /* Step 1a */
+ if( z[0]=='s' ){
+ if(
+ !stem(&z, "sess", "ss", 0) &&
+ !stem(&z, "sei", "i", 0) &&
+ !stem(&z, "ss", "ss", 0)
+ ){
+ z++;
+ }
+ }
+
+ /* Step 1b */
+ z2 = z;
+ if( stem(&z, "dee", "ee", m_gt_0) ){
+ /* Do nothing. The work was all in the test */
+ }else if(
+ (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel))
+ && z!=z2
+ ){
+ if( stem(&z, "ta", "ate", 0) ||
+ stem(&z, "lb", "ble", 0) ||
+ stem(&z, "zi", "ize", 0) ){
+ /* Do nothing. The work was all in the test */
+ }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){
+ z++;
+ }else if( m_eq_1(z) && star_oh(z) ){
+ *(--z) = 'e';
+ }
+ }
+
+ /* Step 1c */
+ if( z[0]=='y' && hasVowel(z+1) ){
+ z[0] = 'i';
+ }
+
+ /* Step 2 */
+ switch( z[1] ){
+ case 'a':
+ if( !stem(&z, "lanoita", "ate", m_gt_0) ){
+ stem(&z, "lanoit", "tion", m_gt_0);
+ }
+ break;
+ case 'c':
+ if( !stem(&z, "icne", "ence", m_gt_0) ){
+ stem(&z, "icna", "ance", m_gt_0);
+ }
+ break;
+ case 'e':
+ stem(&z, "rezi", "ize", m_gt_0);
+ break;
+ case 'g':
+ stem(&z, "igol", "log", m_gt_0);
+ break;
+ case 'l':
+ if( !stem(&z, "ilb", "ble", m_gt_0)
+ && !stem(&z, "illa", "al", m_gt_0)
+ && !stem(&z, "iltne", "ent", m_gt_0)
+ && !stem(&z, "ile", "e", m_gt_0)
+ ){
+ stem(&z, "ilsuo", "ous", m_gt_0);
+ }
+ break;
+ case 'o':
+ if( !stem(&z, "noitazi", "ize", m_gt_0)
+ && !stem(&z, "noita", "ate", m_gt_0)
+ ){
+ stem(&z, "rota", "ate", m_gt_0);
+ }
+ break;
+ case 's':
+ if( !stem(&z, "msila", "al", m_gt_0)
+ && !stem(&z, "ssenevi", "ive", m_gt_0)
+ && !stem(&z, "ssenluf", "ful", m_gt_0)
+ ){
+ stem(&z, "ssensuo", "ous", m_gt_0);
+ }
+ break;
+ case 't':
+ if( !stem(&z, "itila", "al", m_gt_0)
+ && !stem(&z, "itivi", "ive", m_gt_0)
+ ){
+ stem(&z, "itilib", "ble", m_gt_0);
+ }
+ break;
+ }
+
+ /* Step 3 */
+ switch( z[0] ){
+ case 'e':
+ if( !stem(&z, "etaci", "ic", m_gt_0)
+ && !stem(&z, "evita", "", m_gt_0)
+ ){
+ stem(&z, "ezila", "al", m_gt_0);
+ }
+ break;
+ case 'i':
+ stem(&z, "itici", "ic", m_gt_0);
+ break;
+ case 'l':
+ if( !stem(&z, "laci", "ic", m_gt_0) ){
+ stem(&z, "luf", "", m_gt_0);
+ }
+ break;
+ case 's':
+ stem(&z, "ssen", "", m_gt_0);
+ break;
+ }
+
+ /* Step 4 */
+ switch( z[1] ){
+ case 'a':
+ if( z[0]=='l' && m_gt_1(z+2) ){
+ z += 2;
+ }
+ break;
+ case 'c':
+ if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){
+ z += 4;
+ }
+ break;
+ case 'e':
+ if( z[0]=='r' && m_gt_1(z+2) ){
+ z += 2;
+ }
+ break;
+ case 'i':
+ if( z[0]=='c' && m_gt_1(z+2) ){
+ z += 2;
+ }
+ break;
+ case 'l':
+ if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){
+ z += 4;
+ }
+ break;
+ case 'n':
+ if( z[0]=='t' ){
+ if( z[2]=='a' ){
+ if( m_gt_1(z+3) ){
+ z += 3;
+ }
+ }else if( z[2]=='e' ){
+ if( !stem(&z, "tneme", "", m_gt_1)
+ && !stem(&z, "tnem", "", m_gt_1)
+ ){
+ stem(&z, "tne", "", m_gt_1);
+ }
+ }
+ }
+ break;
+ case 'o':
+ if( z[0]=='u' ){
+ if( m_gt_1(z+2) ){
+ z += 2;
+ }
+ }else if( z[3]=='s' || z[3]=='t' ){
+ stem(&z, "noi", "", m_gt_1);
+ }
+ break;
+ case 's':
+ if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){
+ z += 3;
+ }
+ break;
+ case 't':
+ if( !stem(&z, "eta", "", m_gt_1) ){
+ stem(&z, "iti", "", m_gt_1);
+ }
+ break;
+ case 'u':
+ if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){
+ z += 3;
+ }
+ break;
+ case 'v':
+ case 'z':
+ if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){
+ z += 3;
+ }
+ break;
+ }
+
+ /* Step 5a */
+ if( z[0]=='e' ){
+ if( m_gt_1(z+1) ){
+ z++;
+ }else if( m_eq_1(z+1) && !star_oh(z+1) ){
+ z++;
+ }
+ }
+
+ /* Step 5b */
+ if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){
+ z++;
+ }
+
+ /* z[] is now the stemmed word in reverse order. Flip it back
+ ** around into forward order and return.
+ */
+ *pnOut = i = (int)strlen(z);
+ zOut[i] = 0;
+ while( *z ){
+ zOut[--i] = *(z++);
+ }
+}
+
+/*
+** Characters that can be part of a token. We assume any character
+** whose value is greater than 0x80 (any UTF character) can be
+** part of a token. In other words, delimiters all must have
+** values of 0x7f or lower.
+*/
+static const char porterIdChar[] = {
+/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */
+};
+#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30]))
+
+/*
+** Extract the next token from a tokenization cursor. The cursor must
+** have been opened by a prior call to porterOpen().
+*/
+static int porterNext(
+ sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */
+ const char **pzToken, /* OUT: *pzToken is the token text */
+ int *pnBytes, /* OUT: Number of bytes in token */
+ int *piStartOffset, /* OUT: Starting offset of token */
+ int *piEndOffset, /* OUT: Ending offset of token */
+ int *piPosition /* OUT: Position integer of token */
+){
+ porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor;
+ const char *z = c->zInput;
+
+ while( c->iOffset<c->nInput ){
+ int iStartOffset, ch;
+
+ /* Scan past delimiter characters */
+ while( c->iOffset<c->nInput && isDelim(z[c->iOffset]) ){
+ c->iOffset++;
+ }
+
+ /* Count non-delimiter characters. */
+ iStartOffset = c->iOffset;
+ while( c->iOffset<c->nInput && !isDelim(z[c->iOffset]) ){
+ c->iOffset++;
+ }
+
+ if( c->iOffset>iStartOffset ){
+ int n = c->iOffset-iStartOffset;
+ if( n>c->nAllocated ){
+ char *pNew;
+ c->nAllocated = n+20;
+ pNew = sqlite3_realloc(c->zToken, c->nAllocated);
+ if( !pNew ) return SQLITE_NOMEM;
+ c->zToken = pNew;
+ }
+ porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes);
+ *pzToken = c->zToken;
+ *piStartOffset = iStartOffset;
+ *piEndOffset = c->iOffset;
+ *piPosition = c->iToken++;
+ return SQLITE_OK;
+ }
+ }
+ return SQLITE_DONE;
+}
+
+/*
+** The set of routines that implement the porter-stemmer tokenizer
+*/
+static const sqlite3_tokenizer_module porterTokenizerModule = {
+ 0,
+ porterCreate,
+ porterDestroy,
+ porterOpen,
+ porterClose,
+ porterNext,
+ 0
+};
+
+/*
+** Allocate a new porter tokenizer. Return a pointer to the new
+** tokenizer in *ppModule
+*/
+SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(
+ sqlite3_tokenizer_module const**ppModule
+){
+ *ppModule = &porterTokenizerModule;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_porter.c *****************************************/
+/************** Begin file fts3_tokenizer.c **********************************/
+/*
+** 2007 June 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This is part of an SQLite module implementing full-text search.
+** This particular file implements the generic tokenizer interface.
+*/
+
+/*
+** The code in this file is only compiled if:
+**
+** * The FTS3 module is being built as an extension
+** (in which case SQLITE_CORE is not defined), or
+**
+** * The FTS3 module is being built into the core of
+** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <assert.h> */
+/* #include <string.h> */
+
+/*
+** Return true if the two-argument version of fts3_tokenizer()
+** has been activated via a prior call to sqlite3_db_config(db,
+** SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, 1, 0);
+*/
+static int fts3TokenizerEnabled(sqlite3_context *context){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ int isEnabled = 0;
+ sqlite3_db_config(db,SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER,-1,&isEnabled);
+ return isEnabled;
+}
+
+/*
+** Implementation of the SQL scalar function for accessing the underlying
+** hash table. This function may be called as follows:
+**
+** SELECT <function-name>(<key-name>);
+** SELECT <function-name>(<key-name>, <pointer>);
+**
+** where <function-name> is the name passed as the second argument
+** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer').
+**
+** If the <pointer> argument is specified, it must be a blob value
+** containing a pointer to be stored as the hash data corresponding
+** to the string <key-name>. If <pointer> is not specified, then
+** the string <key-name> must already exist in the has table. Otherwise,
+** an error is returned.
+**
+** Whether or not the <pointer> argument is specified, the value returned
+** is a blob containing the pointer stored as the hash data corresponding
+** to string <key-name> (after the hash-table is updated, if applicable).
+*/
+static void fts3TokenizerFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ Fts3Hash *pHash;
+ void *pPtr = 0;
+ const unsigned char *zName;
+ int nName;
+
+ assert( argc==1 || argc==2 );
+
+ pHash = (Fts3Hash *)sqlite3_user_data(context);
+
+ zName = sqlite3_value_text(argv[0]);
+ nName = sqlite3_value_bytes(argv[0])+1;
+
+ if( argc==2 ){
+ if( fts3TokenizerEnabled(context) || sqlite3_value_frombind(argv[1]) ){
+ void *pOld;
+ int n = sqlite3_value_bytes(argv[1]);
+ if( zName==0 || n!=sizeof(pPtr) ){
+ sqlite3_result_error(context, "argument type mismatch", -1);
+ return;
+ }
+ pPtr = *(void **)sqlite3_value_blob(argv[1]);
+ pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr);
+ if( pOld==pPtr ){
+ sqlite3_result_error(context, "out of memory", -1);
+ }
+ }else{
+ sqlite3_result_error(context, "fts3tokenize disabled", -1);
+ return;
+ }
+ }else{
+ if( zName ){
+ pPtr = sqlite3Fts3HashFind(pHash, zName, nName);
+ }
+ if( !pPtr ){
+ char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
+ return;
+ }
+ }
+ if( fts3TokenizerEnabled(context) || sqlite3_value_frombind(argv[0]) ){
+ sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT);
+ }
+}
+
+SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char c){
+ static const char isFtsIdChar[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */
+ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */
+ };
+ return (c&0x80 || isFtsIdChar[(int)(c)]);
+}
+
+SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *zStr, int *pn){
+ const char *z1;
+ const char *z2 = 0;
+
+ /* Find the start of the next token. */
+ z1 = zStr;
+ while( z2==0 ){
+ char c = *z1;
+ switch( c ){
+ case '\0': return 0; /* No more tokens here */
+ case '\'':
+ case '"':
+ case '`': {
+ z2 = z1;
+ while( *++z2 && (*z2!=c || *++z2==c) );
+ break;
+ }
+ case '[':
+ z2 = &z1[1];
+ while( *z2 && z2[0]!=']' ) z2++;
+ if( *z2 ) z2++;
+ break;
+
+ default:
+ if( sqlite3Fts3IsIdChar(*z1) ){
+ z2 = &z1[1];
+ while( sqlite3Fts3IsIdChar(*z2) ) z2++;
+ }else{
+ z1++;
+ }
+ }
+ }
+
+ *pn = (int)(z2-z1);
+ return z1;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(
+ Fts3Hash *pHash, /* Tokenizer hash table */
+ const char *zArg, /* Tokenizer name */
+ sqlite3_tokenizer **ppTok, /* OUT: Tokenizer (if applicable) */
+ char **pzErr /* OUT: Set to malloced error message */
+){
+ int rc;
+ char *z = (char *)zArg;
+ int n = 0;
+ char *zCopy;
+ char *zEnd; /* Pointer to nul-term of zCopy */
+ sqlite3_tokenizer_module *m;
+
+ zCopy = sqlite3_mprintf("%s", zArg);
+ if( !zCopy ) return SQLITE_NOMEM;
+ zEnd = &zCopy[strlen(zCopy)];
+
+ z = (char *)sqlite3Fts3NextToken(zCopy, &n);
+ if( z==0 ){
+ assert( n==0 );
+ z = zCopy;
+ }
+ z[n] = '\0';
+ sqlite3Fts3Dequote(z);
+
+ m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1);
+ if( !m ){
+ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", z);
+ rc = SQLITE_ERROR;
+ }else{
+ char const **aArg = 0;
+ int iArg = 0;
+ z = &z[n+1];
+ while( z<zEnd && (NULL!=(z = (char *)sqlite3Fts3NextToken(z, &n))) ){
+ sqlite3_int64 nNew = sizeof(char *)*(iArg+1);
+ char const **aNew = (const char **)sqlite3_realloc64((void *)aArg, nNew);
+ if( !aNew ){
+ sqlite3_free(zCopy);
+ sqlite3_free((void *)aArg);
+ return SQLITE_NOMEM;
+ }
+ aArg = aNew;
+ aArg[iArg++] = z;
+ z[n] = '\0';
+ sqlite3Fts3Dequote(z);
+ z = &z[n+1];
+ }
+ rc = m->xCreate(iArg, aArg, ppTok);
+ assert( rc!=SQLITE_OK || *ppTok );
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer");
+ }else{
+ (*ppTok)->pModule = m;
+ }
+ sqlite3_free((void *)aArg);
+ }
+
+ sqlite3_free(zCopy);
+ return rc;
+}
+
+
+#ifdef SQLITE_TEST
+
+#if defined(INCLUDE_SQLITE_TCL_H)
+# include "sqlite_tcl.h"
+#else
+# include "tcl.h"
+#endif
+/* #include <string.h> */
+
+/*
+** Implementation of a special SQL scalar function for testing tokenizers
+** designed to be used in concert with the Tcl testing framework. This
+** function must be called with two or more arguments:
+**
+** SELECT <function-name>(<key-name>, ..., <input-string>);
+**
+** where <function-name> is the name passed as the second argument
+** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer')
+** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test').
+**
+** The return value is a string that may be interpreted as a Tcl
+** list. For each token in the <input-string>, three elements are
+** added to the returned list. The first is the token position, the
+** second is the token text (folded, stemmed, etc.) and the third is the
+** substring of <input-string> associated with the token. For example,
+** using the built-in "simple" tokenizer:
+**
+** SELECT fts_tokenizer_test('simple', 'I don't see how');
+**
+** will return the string:
+**
+** "{0 i I 1 dont don't 2 see see 3 how how}"
+**
+*/
+static void testFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ Fts3Hash *pHash;
+ sqlite3_tokenizer_module *p;
+ sqlite3_tokenizer *pTokenizer = 0;
+ sqlite3_tokenizer_cursor *pCsr = 0;
+
+ const char *zErr = 0;
+
+ const char *zName;
+ int nName;
+ const char *zInput;
+ int nInput;
+
+ const char *azArg[64];
+
+ const char *zToken;
+ int nToken = 0;
+ int iStart = 0;
+ int iEnd = 0;
+ int iPos = 0;
+ int i;
+
+ Tcl_Obj *pRet;
+
+ if( argc<2 ){
+ sqlite3_result_error(context, "insufficient arguments", -1);
+ return;
+ }
+
+ nName = sqlite3_value_bytes(argv[0]);
+ zName = (const char *)sqlite3_value_text(argv[0]);
+ nInput = sqlite3_value_bytes(argv[argc-1]);
+ zInput = (const char *)sqlite3_value_text(argv[argc-1]);
+
+ pHash = (Fts3Hash *)sqlite3_user_data(context);
+ p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1);
+
+ if( !p ){
+ char *zErr2 = sqlite3_mprintf("unknown tokenizer: %s", zName);
+ sqlite3_result_error(context, zErr2, -1);
+ sqlite3_free(zErr2);
+ return;
+ }
+
+ pRet = Tcl_NewObj();
+ Tcl_IncrRefCount(pRet);
+
+ for(i=1; i<argc-1; i++){
+ azArg[i-1] = (const char *)sqlite3_value_text(argv[i]);
+ }
+
+ if( SQLITE_OK!=p->xCreate(argc-2, azArg, &pTokenizer) ){
+ zErr = "error in xCreate()";
+ goto finish;
+ }
+ pTokenizer->pModule = p;
+ if( sqlite3Fts3OpenTokenizer(pTokenizer, 0, zInput, nInput, &pCsr) ){
+ zErr = "error in xOpen()";
+ goto finish;
+ }
+
+ while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){
+ Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos));
+ Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken));
+ zToken = &zInput[iStart];
+ nToken = iEnd-iStart;
+ Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken));
+ }
+
+ if( SQLITE_OK!=p->xClose(pCsr) ){
+ zErr = "error in xClose()";
+ goto finish;
+ }
+ if( SQLITE_OK!=p->xDestroy(pTokenizer) ){
+ zErr = "error in xDestroy()";
+ goto finish;
+ }
+
+finish:
+ if( zErr ){
+ sqlite3_result_error(context, zErr, -1);
+ }else{
+ sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT);
+ }
+ Tcl_DecrRefCount(pRet);
+}
+
+static
+int registerTokenizer(
+ sqlite3 *db,
+ char *zName,
+ const sqlite3_tokenizer_module *p
+){
+ int rc;
+ sqlite3_stmt *pStmt;
+ const char zSql[] = "SELECT fts3_tokenizer(?, ?)";
+
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC);
+ sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC);
+ sqlite3_step(pStmt);
+
+ return sqlite3_finalize(pStmt);
+}
+
+
+static
+int queryTokenizer(
+ sqlite3 *db,
+ char *zName,
+ const sqlite3_tokenizer_module **pp
+){
+ int rc;
+ sqlite3_stmt *pStmt;
+ const char zSql[] = "SELECT fts3_tokenizer(?)";
+
+ *pp = 0;
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC);
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB
+ && sqlite3_column_bytes(pStmt, 0)==sizeof(*pp)
+ ){
+ memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp));
+ }
+ }
+
+ return sqlite3_finalize(pStmt);
+}
+
+SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule);
+
+/*
+** Implementation of the scalar function fts3_tokenizer_internal_test().
+** This function is used for testing only, it is not included in the
+** build unless SQLITE_TEST is defined.
+**
+** The purpose of this is to test that the fts3_tokenizer() function
+** can be used as designed by the C-code in the queryTokenizer and
+** registerTokenizer() functions above. These two functions are repeated
+** in the README.tokenizer file as an example, so it is important to
+** test them.
+**
+** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar
+** function with no arguments. An assert() will fail if a problem is
+** detected. i.e.:
+**
+** SELECT fts3_tokenizer_internal_test();
+**
+*/
+static void intTestFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int rc;
+ const sqlite3_tokenizer_module *p1;
+ const sqlite3_tokenizer_module *p2;
+ sqlite3 *db = (sqlite3 *)sqlite3_user_data(context);
+
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(argv);
+
+ /* Test the query function */
+ sqlite3Fts3SimpleTokenizerModule(&p1);
+ rc = queryTokenizer(db, "simple", &p2);
+ assert( rc==SQLITE_OK );
+ assert( p1==p2 );
+ rc = queryTokenizer(db, "nosuchtokenizer", &p2);
+ assert( rc==SQLITE_ERROR );
+ assert( p2==0 );
+ assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") );
+
+ /* Test the storage function */
+ if( fts3TokenizerEnabled(context) ){
+ rc = registerTokenizer(db, "nosuchtokenizer", p1);
+ assert( rc==SQLITE_OK );
+ rc = queryTokenizer(db, "nosuchtokenizer", &p2);
+ assert( rc==SQLITE_OK );
+ assert( p2==p1 );
+ }
+
+ sqlite3_result_text(context, "ok", -1, SQLITE_STATIC);
+}
+
+#endif
+
+/*
+** Set up SQL objects in database db used to access the contents of
+** the hash table pointed to by argument pHash. The hash table must
+** been initialized to use string keys, and to take a private copy
+** of the key when a value is inserted. i.e. by a call similar to:
+**
+** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1);
+**
+** This function adds a scalar function (see header comment above
+** fts3TokenizerFunc() in this file for details) and, if ENABLE_TABLE is
+** defined at compilation time, a temporary virtual table (see header
+** comment above struct HashTableVtab) to the database schema. Both
+** provide read/write access to the contents of *pHash.
+**
+** The third argument to this function, zName, is used as the name
+** of both the scalar and, if created, the virtual table.
+*/
+SQLITE_PRIVATE int sqlite3Fts3InitHashTable(
+ sqlite3 *db,
+ Fts3Hash *pHash,
+ const char *zName
+){
+ int rc = SQLITE_OK;
+ void *p = (void *)pHash;
+ const int any = SQLITE_UTF8|SQLITE_DIRECTONLY;
+
+#ifdef SQLITE_TEST
+ char *zTest = 0;
+ char *zTest2 = 0;
+ void *pdb = (void *)db;
+ zTest = sqlite3_mprintf("%s_test", zName);
+ zTest2 = sqlite3_mprintf("%s_internal_test", zName);
+ if( !zTest || !zTest2 ){
+ rc = SQLITE_NOMEM;
+ }
+#endif
+
+ if( SQLITE_OK==rc ){
+ rc = sqlite3_create_function(db, zName, 1, any, p, fts3TokenizerFunc, 0, 0);
+ }
+ if( SQLITE_OK==rc ){
+ rc = sqlite3_create_function(db, zName, 2, any, p, fts3TokenizerFunc, 0, 0);
+ }
+#ifdef SQLITE_TEST
+ if( SQLITE_OK==rc ){
+ rc = sqlite3_create_function(db, zTest, -1, any, p, testFunc, 0, 0);
+ }
+ if( SQLITE_OK==rc ){
+ rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0);
+ }
+#endif
+
+#ifdef SQLITE_TEST
+ sqlite3_free(zTest);
+ sqlite3_free(zTest2);
+#endif
+
+ return rc;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_tokenizer.c **************************************/
+/************** Begin file fts3_tokenizer1.c *********************************/
+/*
+** 2006 Oct 10
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Implementation of the "simple" full-text-search tokenizer.
+*/
+
+/*
+** The code in this file is only compiled if:
+**
+** * The FTS3 module is being built as an extension
+** (in which case SQLITE_CORE is not defined), or
+**
+** * The FTS3 module is being built into the core of
+** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <stdio.h> */
+/* #include <string.h> */
+
+/* #include "fts3_tokenizer.h" */
+
+typedef struct simple_tokenizer {
+ sqlite3_tokenizer base;
+ char delim[128]; /* flag ASCII delimiters */
+} simple_tokenizer;
+
+typedef struct simple_tokenizer_cursor {
+ sqlite3_tokenizer_cursor base;
+ const char *pInput; /* input we are tokenizing */
+ int nBytes; /* size of the input */
+ int iOffset; /* current position in pInput */
+ int iToken; /* index of next token to be returned */
+ char *pToken; /* storage for current token */
+ int nTokenAllocated; /* space allocated to zToken buffer */
+} simple_tokenizer_cursor;
+
+
+static int simpleDelim(simple_tokenizer *t, unsigned char c){
+ return c<0x80 && t->delim[c];
+}
+static int fts3_isalnum(int x){
+ return (x>='0' && x<='9') || (x>='A' && x<='Z') || (x>='a' && x<='z');
+}
+
+/*
+** Create a new tokenizer instance.
+*/
+static int simpleCreate(
+ int argc, const char * const *argv,
+ sqlite3_tokenizer **ppTokenizer
+){
+ simple_tokenizer *t;
+
+ t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t));
+ if( t==NULL ) return SQLITE_NOMEM;
+ memset(t, 0, sizeof(*t));
+
+ /* TODO(shess) Delimiters need to remain the same from run to run,
+ ** else we need to reindex. One solution would be a meta-table to
+ ** track such information in the database, then we'd only want this
+ ** information on the initial create.
+ */
+ if( argc>1 ){
+ int i, n = (int)strlen(argv[1]);
+ for(i=0; i<n; i++){
+ unsigned char ch = argv[1][i];
+ /* We explicitly don't support UTF-8 delimiters for now. */
+ if( ch>=0x80 ){
+ sqlite3_free(t);
+ return SQLITE_ERROR;
+ }
+ t->delim[ch] = 1;
+ }
+ } else {
+ /* Mark non-alphanumeric ASCII characters as delimiters */
+ int i;
+ for(i=1; i<0x80; i++){
+ t->delim[i] = !fts3_isalnum(i) ? -1 : 0;
+ }
+ }
+
+ *ppTokenizer = &t->base;
+ return SQLITE_OK;
+}
+
+/*
+** Destroy a tokenizer
+*/
+static int simpleDestroy(sqlite3_tokenizer *pTokenizer){
+ sqlite3_free(pTokenizer);
+ return SQLITE_OK;
+}
+
+/*
+** Prepare to begin tokenizing a particular string. The input
+** string to be tokenized is pInput[0..nBytes-1]. A cursor
+** used to incrementally tokenize this string is returned in
+** *ppCursor.
+*/
+static int simpleOpen(
+ sqlite3_tokenizer *pTokenizer, /* The tokenizer */
+ const char *pInput, int nBytes, /* String to be tokenized */
+ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */
+){
+ simple_tokenizer_cursor *c;
+
+ UNUSED_PARAMETER(pTokenizer);
+
+ c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c));
+ if( c==NULL ) return SQLITE_NOMEM;
+
+ c->pInput = pInput;
+ if( pInput==0 ){
+ c->nBytes = 0;
+ }else if( nBytes<0 ){
+ c->nBytes = (int)strlen(pInput);
+ }else{
+ c->nBytes = nBytes;
+ }
+ c->iOffset = 0; /* start tokenizing at the beginning */
+ c->iToken = 0;
+ c->pToken = NULL; /* no space allocated, yet. */
+ c->nTokenAllocated = 0;
+
+ *ppCursor = &c->base;
+ return SQLITE_OK;
+}
+
+/*
+** Close a tokenization cursor previously opened by a call to
+** simpleOpen() above.
+*/
+static int simpleClose(sqlite3_tokenizer_cursor *pCursor){
+ simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor;
+ sqlite3_free(c->pToken);
+ sqlite3_free(c);
+ return SQLITE_OK;
+}
+
+/*
+** Extract the next token from a tokenization cursor. The cursor must
+** have been opened by a prior call to simpleOpen().
+*/
+static int simpleNext(
+ sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */
+ const char **ppToken, /* OUT: *ppToken is the token text */
+ int *pnBytes, /* OUT: Number of bytes in token */
+ int *piStartOffset, /* OUT: Starting offset of token */
+ int *piEndOffset, /* OUT: Ending offset of token */
+ int *piPosition /* OUT: Position integer of token */
+){
+ simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor;
+ simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer;
+ unsigned char *p = (unsigned char *)c->pInput;
+
+ while( c->iOffset<c->nBytes ){
+ int iStartOffset;
+
+ /* Scan past delimiter characters */
+ while( c->iOffset<c->nBytes && simpleDelim(t, p[c->iOffset]) ){
+ c->iOffset++;
+ }
+
+ /* Count non-delimiter characters. */
+ iStartOffset = c->iOffset;
+ while( c->iOffset<c->nBytes && !simpleDelim(t, p[c->iOffset]) ){
+ c->iOffset++;
+ }
+
+ if( c->iOffset>iStartOffset ){
+ int i, n = c->iOffset-iStartOffset;
+ if( n>c->nTokenAllocated ){
+ char *pNew;
+ c->nTokenAllocated = n+20;
+ pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated);
+ if( !pNew ) return SQLITE_NOMEM;
+ c->pToken = pNew;
+ }
+ for(i=0; i<n; i++){
+ /* TODO(shess) This needs expansion to handle UTF-8
+ ** case-insensitivity.
+ */
+ unsigned char ch = p[iStartOffset+i];
+ c->pToken[i] = (char)((ch>='A' && ch<='Z') ? ch-'A'+'a' : ch);
+ }
+ *ppToken = c->pToken;
+ *pnBytes = n;
+ *piStartOffset = iStartOffset;
+ *piEndOffset = c->iOffset;
+ *piPosition = c->iToken++;
+
+ return SQLITE_OK;
+ }
+ }
+ return SQLITE_DONE;
+}
+
+/*
+** The set of routines that implement the simple tokenizer
+*/
+static const sqlite3_tokenizer_module simpleTokenizerModule = {
+ 0,
+ simpleCreate,
+ simpleDestroy,
+ simpleOpen,
+ simpleClose,
+ simpleNext,
+ 0,
+};
+
+/*
+** Allocate a new simple tokenizer. Return a pointer to the new
+** tokenizer in *ppModule
+*/
+SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(
+ sqlite3_tokenizer_module const**ppModule
+){
+ *ppModule = &simpleTokenizerModule;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_tokenizer1.c *************************************/
+/************** Begin file fts3_tokenize_vtab.c ******************************/
+/*
+** 2013 Apr 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code for the "fts3tokenize" virtual table module.
+** An fts3tokenize virtual table is created as follows:
+**
+** CREATE VIRTUAL TABLE <tbl> USING fts3tokenize(
+** <tokenizer-name>, <arg-1>, ...
+** );
+**
+** The table created has the following schema:
+**
+** CREATE TABLE <tbl>(input, token, start, end, position)
+**
+** When queried, the query must include a WHERE clause of type:
+**
+** input = <string>
+**
+** The virtual table module tokenizes this <string>, using the FTS3
+** tokenizer specified by the arguments to the CREATE VIRTUAL TABLE
+** statement and returns one row for each token in the result. With
+** fields set as follows:
+**
+** input: Always set to a copy of <string>
+** token: A token from the input.
+** start: Byte offset of the token within the input <string>.
+** end: Byte offset of the byte immediately following the end of the
+** token within the input string.
+** pos: Token offset of token within input.
+**
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <string.h> */
+/* #include <assert.h> */
+
+typedef struct Fts3tokTable Fts3tokTable;
+typedef struct Fts3tokCursor Fts3tokCursor;
+
+/*
+** Virtual table structure.
+*/
+struct Fts3tokTable {
+ sqlite3_vtab base; /* Base class used by SQLite core */
+ const sqlite3_tokenizer_module *pMod;
+ sqlite3_tokenizer *pTok;
+};
+
+/*
+** Virtual table cursor structure.
+*/
+struct Fts3tokCursor {
+ sqlite3_vtab_cursor base; /* Base class used by SQLite core */
+ char *zInput; /* Input string */
+ sqlite3_tokenizer_cursor *pCsr; /* Cursor to iterate through zInput */
+ int iRowid; /* Current 'rowid' value */
+ const char *zToken; /* Current 'token' value */
+ int nToken; /* Size of zToken in bytes */
+ int iStart; /* Current 'start' value */
+ int iEnd; /* Current 'end' value */
+ int iPos; /* Current 'pos' value */
+};
+
+/*
+** Query FTS for the tokenizer implementation named zName.
+*/
+static int fts3tokQueryTokenizer(
+ Fts3Hash *pHash,
+ const char *zName,
+ const sqlite3_tokenizer_module **pp,
+ char **pzErr
+){
+ sqlite3_tokenizer_module *p;
+ int nName = (int)strlen(zName);
+
+ p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1);
+ if( !p ){
+ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", zName);
+ return SQLITE_ERROR;
+ }
+
+ *pp = p;
+ return SQLITE_OK;
+}
+
+/*
+** The second argument, argv[], is an array of pointers to nul-terminated
+** strings. This function makes a copy of the array and strings into a
+** single block of memory. It then dequotes any of the strings that appear
+** to be quoted.
+**
+** If successful, output parameter *pazDequote is set to point at the
+** array of dequoted strings and SQLITE_OK is returned. The caller is
+** responsible for eventually calling sqlite3_free() to free the array
+** in this case. Or, if an error occurs, an SQLite error code is returned.
+** The final value of *pazDequote is undefined in this case.
+*/
+static int fts3tokDequoteArray(
+ int argc, /* Number of elements in argv[] */
+ const char * const *argv, /* Input array */
+ char ***pazDequote /* Output array */
+){
+ int rc = SQLITE_OK; /* Return code */
+ if( argc==0 ){
+ *pazDequote = 0;
+ }else{
+ int i;
+ int nByte = 0;
+ char **azDequote;
+
+ for(i=0; i<argc; i++){
+ nByte += (int)(strlen(argv[i]) + 1);
+ }
+
+ *pazDequote = azDequote = sqlite3_malloc64(sizeof(char *)*argc + nByte);
+ if( azDequote==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ char *pSpace = (char *)&azDequote[argc];
+ for(i=0; i<argc; i++){
+ int n = (int)strlen(argv[i]);
+ azDequote[i] = pSpace;
+ memcpy(pSpace, argv[i], n+1);
+ sqlite3Fts3Dequote(pSpace);
+ pSpace += (n+1);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Schema of the tokenizer table.
+*/
+#define FTS3_TOK_SCHEMA "CREATE TABLE x(input, token, start, end, position)"
+
+/*
+** This function does all the work for both the xConnect and xCreate methods.
+** These tables have no persistent representation of their own, so xConnect
+** and xCreate are identical operations.
+**
+** argv[0]: module name
+** argv[1]: database name
+** argv[2]: table name
+** argv[3]: first argument (tokenizer name)
+*/
+static int fts3tokConnectMethod(
+ sqlite3 *db, /* Database connection */
+ void *pHash, /* Hash table of tokenizers */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ Fts3tokTable *pTab = 0;
+ const sqlite3_tokenizer_module *pMod = 0;
+ sqlite3_tokenizer *pTok = 0;
+ int rc;
+ char **azDequote = 0;
+ int nDequote;
+
+ rc = sqlite3_declare_vtab(db, FTS3_TOK_SCHEMA);
+ if( rc!=SQLITE_OK ) return rc;
+
+ nDequote = argc-3;
+ rc = fts3tokDequoteArray(nDequote, &argv[3], &azDequote);
+
+ if( rc==SQLITE_OK ){
+ const char *zModule;
+ if( nDequote<1 ){
+ zModule = "simple";
+ }else{
+ zModule = azDequote[0];
+ }
+ rc = fts3tokQueryTokenizer((Fts3Hash*)pHash, zModule, &pMod, pzErr);
+ }
+
+ assert( (rc==SQLITE_OK)==(pMod!=0) );
+ if( rc==SQLITE_OK ){
+ const char * const *azArg = 0;
+ if( nDequote>1 ) azArg = (const char * const *)&azDequote[1];
+ rc = pMod->xCreate((nDequote>1 ? nDequote-1 : 0), azArg, &pTok);
+ }
+
+ if( rc==SQLITE_OK ){
+ pTab = (Fts3tokTable *)sqlite3_malloc(sizeof(Fts3tokTable));
+ if( pTab==0 ){
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ memset(pTab, 0, sizeof(Fts3tokTable));
+ pTab->pMod = pMod;
+ pTab->pTok = pTok;
+ *ppVtab = &pTab->base;
+ }else{
+ if( pTok ){
+ pMod->xDestroy(pTok);
+ }
+ }
+
+ sqlite3_free(azDequote);
+ return rc;
+}
+
+/*
+** This function does the work for both the xDisconnect and xDestroy methods.
+** These tables have no persistent representation of their own, so xDisconnect
+** and xDestroy are identical operations.
+*/
+static int fts3tokDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts3tokTable *pTab = (Fts3tokTable *)pVtab;
+
+ pTab->pMod->xDestroy(pTab->pTok);
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/*
+** xBestIndex - Analyze a WHERE and ORDER BY clause.
+*/
+static int fts3tokBestIndexMethod(
+ sqlite3_vtab *pVTab,
+ sqlite3_index_info *pInfo
+){
+ int i;
+ UNUSED_PARAMETER(pVTab);
+
+ for(i=0; i<pInfo->nConstraint; i++){
+ if( pInfo->aConstraint[i].usable
+ && pInfo->aConstraint[i].iColumn==0
+ && pInfo->aConstraint[i].op==SQLITE_INDEX_CONSTRAINT_EQ
+ ){
+ pInfo->idxNum = 1;
+ pInfo->aConstraintUsage[i].argvIndex = 1;
+ pInfo->aConstraintUsage[i].omit = 1;
+ pInfo->estimatedCost = 1;
+ return SQLITE_OK;
+ }
+ }
+
+ pInfo->idxNum = 0;
+ assert( pInfo->estimatedCost>1000000.0 );
+
+ return SQLITE_OK;
+}
+
+/*
+** xOpen - Open a cursor.
+*/
+static int fts3tokOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
+ Fts3tokCursor *pCsr;
+ UNUSED_PARAMETER(pVTab);
+
+ pCsr = (Fts3tokCursor *)sqlite3_malloc(sizeof(Fts3tokCursor));
+ if( pCsr==0 ){
+ return SQLITE_NOMEM;
+ }
+ memset(pCsr, 0, sizeof(Fts3tokCursor));
+
+ *ppCsr = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+/*
+** Reset the tokenizer cursor passed as the only argument. As if it had
+** just been returned by fts3tokOpenMethod().
+*/
+static void fts3tokResetCursor(Fts3tokCursor *pCsr){
+ if( pCsr->pCsr ){
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCsr->base.pVtab);
+ pTab->pMod->xClose(pCsr->pCsr);
+ pCsr->pCsr = 0;
+ }
+ sqlite3_free(pCsr->zInput);
+ pCsr->zInput = 0;
+ pCsr->zToken = 0;
+ pCsr->nToken = 0;
+ pCsr->iStart = 0;
+ pCsr->iEnd = 0;
+ pCsr->iPos = 0;
+ pCsr->iRowid = 0;
+}
+
+/*
+** xClose - Close a cursor.
+*/
+static int fts3tokCloseMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+
+ fts3tokResetCursor(pCsr);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** xNext - Advance the cursor to the next row, if any.
+*/
+static int fts3tokNextMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab);
+ int rc; /* Return code */
+
+ pCsr->iRowid++;
+ rc = pTab->pMod->xNext(pCsr->pCsr,
+ &pCsr->zToken, &pCsr->nToken,
+ &pCsr->iStart, &pCsr->iEnd, &pCsr->iPos
+ );
+
+ if( rc!=SQLITE_OK ){
+ fts3tokResetCursor(pCsr);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ }
+
+ return rc;
+}
+
+/*
+** xFilter - Initialize a cursor to point at the start of its data.
+*/
+static int fts3tokFilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *idxStr, /* Unused */
+ int nVal, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ int rc = SQLITE_ERROR;
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab);
+ UNUSED_PARAMETER(idxStr);
+ UNUSED_PARAMETER(nVal);
+
+ fts3tokResetCursor(pCsr);
+ if( idxNum==1 ){
+ const char *zByte = (const char *)sqlite3_value_text(apVal[0]);
+ int nByte = sqlite3_value_bytes(apVal[0]);
+ pCsr->zInput = sqlite3_malloc64(nByte+1);
+ if( pCsr->zInput==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ if( nByte>0 ) memcpy(pCsr->zInput, zByte, nByte);
+ pCsr->zInput[nByte] = 0;
+ rc = pTab->pMod->xOpen(pTab->pTok, pCsr->zInput, nByte, &pCsr->pCsr);
+ if( rc==SQLITE_OK ){
+ pCsr->pCsr->pTokenizer = pTab->pTok;
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ) return rc;
+ return fts3tokNextMethod(pCursor);
+}
+
+/*
+** xEof - Return true if the cursor is at EOF, or false otherwise.
+*/
+static int fts3tokEofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ return (pCsr->zToken==0);
+}
+
+/*
+** xColumn - Return a column value.
+*/
+static int fts3tokColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+
+ /* CREATE TABLE x(input, token, start, end, position) */
+ switch( iCol ){
+ case 0:
+ sqlite3_result_text(pCtx, pCsr->zInput, -1, SQLITE_TRANSIENT);
+ break;
+ case 1:
+ sqlite3_result_text(pCtx, pCsr->zToken, pCsr->nToken, SQLITE_TRANSIENT);
+ break;
+ case 2:
+ sqlite3_result_int(pCtx, pCsr->iStart);
+ break;
+ case 3:
+ sqlite3_result_int(pCtx, pCsr->iEnd);
+ break;
+ default:
+ assert( iCol==4 );
+ sqlite3_result_int(pCtx, pCsr->iPos);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** xRowid - Return the current rowid for the cursor.
+*/
+static int fts3tokRowidMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite_int64 *pRowid /* OUT: Rowid value */
+){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ *pRowid = (sqlite3_int64)pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Register the fts3tok module with database connection db. Return SQLITE_OK
+** if successful or an error code if sqlite3_create_module() fails.
+*/
+SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){
+ static const sqlite3_module fts3tok_module = {
+ 0, /* iVersion */
+ fts3tokConnectMethod, /* xCreate */
+ fts3tokConnectMethod, /* xConnect */
+ fts3tokBestIndexMethod, /* xBestIndex */
+ fts3tokDisconnectMethod, /* xDisconnect */
+ fts3tokDisconnectMethod, /* xDestroy */
+ fts3tokOpenMethod, /* xOpen */
+ fts3tokCloseMethod, /* xClose */
+ fts3tokFilterMethod, /* xFilter */
+ fts3tokNextMethod, /* xNext */
+ fts3tokEofMethod, /* xEof */
+ fts3tokColumnMethod, /* xColumn */
+ fts3tokRowidMethod, /* xRowid */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindFunction */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+ };
+ int rc; /* Return code */
+
+ rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash);
+ return rc;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_tokenize_vtab.c **********************************/
+/************** Begin file fts3_write.c **************************************/
+/*
+** 2009 Oct 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file is part of the SQLite FTS3 extension module. Specifically,
+** this file contains code to insert, update and delete rows from FTS3
+** tables. It also contains code to merge FTS3 b-tree segments. Some
+** of the sub-routines used to merge segments are also used by the query
+** code in fts3.c.
+*/
+
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <string.h> */
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <stdio.h> */
+
+#define FTS_MAX_APPENDABLE_HEIGHT 16
+
+/*
+** When full-text index nodes are loaded from disk, the buffer that they
+** are loaded into has the following number of bytes of padding at the end
+** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer
+** of 920 bytes is allocated for it.
+**
+** This means that if we have a pointer into a buffer containing node data,
+** it is always safe to read up to two varints from it without risking an
+** overread, even if the node data is corrupted.
+*/
+#define FTS3_NODE_PADDING (FTS3_VARINT_MAX*2)
+
+/*
+** Under certain circumstances, b-tree nodes (doclists) can be loaded into
+** memory incrementally instead of all at once. This can be a big performance
+** win (reduced IO and CPU) if SQLite stops calling the virtual table xNext()
+** method before retrieving all query results (as may happen, for example,
+** if a query has a LIMIT clause).
+**
+** Incremental loading is used for b-tree nodes FTS3_NODE_CHUNK_THRESHOLD
+** bytes and larger. Nodes are loaded in chunks of FTS3_NODE_CHUNKSIZE bytes.
+** The code is written so that the hard lower-limit for each of these values
+** is 1. Clearly such small values would be inefficient, but can be useful
+** for testing purposes.
+**
+** If this module is built with SQLITE_TEST defined, these constants may
+** be overridden at runtime for testing purposes. File fts3_test.c contains
+** a Tcl interface to read and write the values.
+*/
+#ifdef SQLITE_TEST
+int test_fts3_node_chunksize = (4*1024);
+int test_fts3_node_chunk_threshold = (4*1024)*4;
+# define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize
+# define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold
+#else
+# define FTS3_NODE_CHUNKSIZE (4*1024)
+# define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4)
+#endif
+
+/*
+** The values that may be meaningfully bound to the :1 parameter in
+** statements SQL_REPLACE_STAT and SQL_SELECT_STAT.
+*/
+#define FTS_STAT_DOCTOTAL 0
+#define FTS_STAT_INCRMERGEHINT 1
+#define FTS_STAT_AUTOINCRMERGE 2
+
+/*
+** If FTS_LOG_MERGES is defined, call sqlite3_log() to report each automatic
+** and incremental merge operation that takes place. This is used for
+** debugging FTS only, it should not usually be turned on in production
+** systems.
+*/
+#ifdef FTS3_LOG_MERGES
+static void fts3LogMerge(int nMerge, sqlite3_int64 iAbsLevel){
+ sqlite3_log(SQLITE_OK, "%d-way merge from level %d", nMerge, (int)iAbsLevel);
+}
+#else
+#define fts3LogMerge(x, y)
+#endif
+
+
+typedef struct PendingList PendingList;
+typedef struct SegmentNode SegmentNode;
+typedef struct SegmentWriter SegmentWriter;
+
+/*
+** An instance of the following data structure is used to build doclists
+** incrementally. See function fts3PendingListAppend() for details.
+*/
+struct PendingList {
+ int nData;
+ char *aData;
+ int nSpace;
+ sqlite3_int64 iLastDocid;
+ sqlite3_int64 iLastCol;
+ sqlite3_int64 iLastPos;
+};
+
+
+/*
+** Each cursor has a (possibly empty) linked list of the following objects.
+*/
+struct Fts3DeferredToken {
+ Fts3PhraseToken *pToken; /* Pointer to corresponding expr token */
+ int iCol; /* Column token must occur in */
+ Fts3DeferredToken *pNext; /* Next in list of deferred tokens */
+ PendingList *pList; /* Doclist is assembled here */
+};
+
+/*
+** An instance of this structure is used to iterate through the terms on
+** a contiguous set of segment b-tree leaf nodes. Although the details of
+** this structure are only manipulated by code in this file, opaque handles
+** of type Fts3SegReader* are also used by code in fts3.c to iterate through
+** terms when querying the full-text index. See functions:
+**
+** sqlite3Fts3SegReaderNew()
+** sqlite3Fts3SegReaderFree()
+** sqlite3Fts3SegReaderIterate()
+**
+** Methods used to manipulate Fts3SegReader structures:
+**
+** fts3SegReaderNext()
+** fts3SegReaderFirstDocid()
+** fts3SegReaderNextDocid()
+*/
+struct Fts3SegReader {
+ int iIdx; /* Index within level, or 0x7FFFFFFF for PT */
+ u8 bLookup; /* True for a lookup only */
+ u8 rootOnly; /* True for a root-only reader */
+
+ sqlite3_int64 iStartBlock; /* Rowid of first leaf block to traverse */
+ sqlite3_int64 iLeafEndBlock; /* Rowid of final leaf block to traverse */
+ sqlite3_int64 iEndBlock; /* Rowid of final block in segment (or 0) */
+ sqlite3_int64 iCurrentBlock; /* Current leaf block (or 0) */
+
+ char *aNode; /* Pointer to node data (or NULL) */
+ int nNode; /* Size of buffer at aNode (or 0) */
+ int nPopulate; /* If >0, bytes of buffer aNode[] loaded */
+ sqlite3_blob *pBlob; /* If not NULL, blob handle to read node */
+
+ Fts3HashElem **ppNextElem;
+
+ /* Variables set by fts3SegReaderNext(). These may be read directly
+ ** by the caller. They are valid from the time SegmentReaderNew() returns
+ ** until SegmentReaderNext() returns something other than SQLITE_OK
+ ** (i.e. SQLITE_DONE).
+ */
+ int nTerm; /* Number of bytes in current term */
+ char *zTerm; /* Pointer to current term */
+ int nTermAlloc; /* Allocated size of zTerm buffer */
+ char *aDoclist; /* Pointer to doclist of current entry */
+ int nDoclist; /* Size of doclist in current entry */
+
+ /* The following variables are used by fts3SegReaderNextDocid() to iterate
+ ** through the current doclist (aDoclist/nDoclist).
+ */
+ char *pOffsetList;
+ int nOffsetList; /* For descending pending seg-readers only */
+ sqlite3_int64 iDocid;
+};
+
+#define fts3SegReaderIsPending(p) ((p)->ppNextElem!=0)
+#define fts3SegReaderIsRootOnly(p) ((p)->rootOnly!=0)
+
+/*
+** An instance of this structure is used to create a segment b-tree in the
+** database. The internal details of this type are only accessed by the
+** following functions:
+**
+** fts3SegWriterAdd()
+** fts3SegWriterFlush()
+** fts3SegWriterFree()
+*/
+struct SegmentWriter {
+ SegmentNode *pTree; /* Pointer to interior tree structure */
+ sqlite3_int64 iFirst; /* First slot in %_segments written */
+ sqlite3_int64 iFree; /* Next free slot in %_segments */
+ char *zTerm; /* Pointer to previous term buffer */
+ int nTerm; /* Number of bytes in zTerm */
+ int nMalloc; /* Size of malloc'd buffer at zMalloc */
+ char *zMalloc; /* Malloc'd space (possibly) used for zTerm */
+ int nSize; /* Size of allocation at aData */
+ int nData; /* Bytes of data in aData */
+ char *aData; /* Pointer to block from malloc() */
+ i64 nLeafData; /* Number of bytes of leaf data written */
+};
+
+/*
+** Type SegmentNode is used by the following three functions to create
+** the interior part of the segment b+-tree structures (everything except
+** the leaf nodes). These functions and type are only ever used by code
+** within the fts3SegWriterXXX() family of functions described above.
+**
+** fts3NodeAddTerm()
+** fts3NodeWrite()
+** fts3NodeFree()
+**
+** When a b+tree is written to the database (either as a result of a merge
+** or the pending-terms table being flushed), leaves are written into the
+** database file as soon as they are completely populated. The interior of
+** the tree is assembled in memory and written out only once all leaves have
+** been populated and stored. This is Ok, as the b+-tree fanout is usually
+** very large, meaning that the interior of the tree consumes relatively
+** little memory.
+*/
+struct SegmentNode {
+ SegmentNode *pParent; /* Parent node (or NULL for root node) */
+ SegmentNode *pRight; /* Pointer to right-sibling */
+ SegmentNode *pLeftmost; /* Pointer to left-most node of this depth */
+ int nEntry; /* Number of terms written to node so far */
+ char *zTerm; /* Pointer to previous term buffer */
+ int nTerm; /* Number of bytes in zTerm */
+ int nMalloc; /* Size of malloc'd buffer at zMalloc */
+ char *zMalloc; /* Malloc'd space (possibly) used for zTerm */
+ int nData; /* Bytes of valid data so far */
+ char *aData; /* Node data */
+};
+
+/*
+** Valid values for the second argument to fts3SqlStmt().
+*/
+#define SQL_DELETE_CONTENT 0
+#define SQL_IS_EMPTY 1
+#define SQL_DELETE_ALL_CONTENT 2
+#define SQL_DELETE_ALL_SEGMENTS 3
+#define SQL_DELETE_ALL_SEGDIR 4
+#define SQL_DELETE_ALL_DOCSIZE 5
+#define SQL_DELETE_ALL_STAT 6
+#define SQL_SELECT_CONTENT_BY_ROWID 7
+#define SQL_NEXT_SEGMENT_INDEX 8
+#define SQL_INSERT_SEGMENTS 9
+#define SQL_NEXT_SEGMENTS_ID 10
+#define SQL_INSERT_SEGDIR 11
+#define SQL_SELECT_LEVEL 12
+#define SQL_SELECT_LEVEL_RANGE 13
+#define SQL_SELECT_LEVEL_COUNT 14
+#define SQL_SELECT_SEGDIR_MAX_LEVEL 15
+#define SQL_DELETE_SEGDIR_LEVEL 16
+#define SQL_DELETE_SEGMENTS_RANGE 17
+#define SQL_CONTENT_INSERT 18
+#define SQL_DELETE_DOCSIZE 19
+#define SQL_REPLACE_DOCSIZE 20
+#define SQL_SELECT_DOCSIZE 21
+#define SQL_SELECT_STAT 22
+#define SQL_REPLACE_STAT 23
+
+#define SQL_SELECT_ALL_PREFIX_LEVEL 24
+#define SQL_DELETE_ALL_TERMS_SEGDIR 25
+#define SQL_DELETE_SEGDIR_RANGE 26
+#define SQL_SELECT_ALL_LANGID 27
+#define SQL_FIND_MERGE_LEVEL 28
+#define SQL_MAX_LEAF_NODE_ESTIMATE 29
+#define SQL_DELETE_SEGDIR_ENTRY 30
+#define SQL_SHIFT_SEGDIR_ENTRY 31
+#define SQL_SELECT_SEGDIR 32
+#define SQL_CHOMP_SEGDIR 33
+#define SQL_SEGMENT_IS_APPENDABLE 34
+#define SQL_SELECT_INDEXES 35
+#define SQL_SELECT_MXLEVEL 36
+
+#define SQL_SELECT_LEVEL_RANGE2 37
+#define SQL_UPDATE_LEVEL_IDX 38
+#define SQL_UPDATE_LEVEL 39
+
+/*
+** This function is used to obtain an SQLite prepared statement handle
+** for the statement identified by the second argument. If successful,
+** *pp is set to the requested statement handle and SQLITE_OK returned.
+** Otherwise, an SQLite error code is returned and *pp is set to 0.
+**
+** If argument apVal is not NULL, then it must point to an array with
+** at least as many entries as the requested statement has bound
+** parameters. The values are bound to the statements parameters before
+** returning.
+*/
+static int fts3SqlStmt(
+ Fts3Table *p, /* Virtual table handle */
+ int eStmt, /* One of the SQL_XXX constants above */
+ sqlite3_stmt **pp, /* OUT: Statement handle */
+ sqlite3_value **apVal /* Values to bind to statement */
+){
+ const char *azSql[] = {
+/* 0 */ "DELETE FROM %Q.'%q_content' WHERE rowid = ?",
+/* 1 */ "SELECT NOT EXISTS(SELECT docid FROM %Q.'%q_content' WHERE rowid!=?)",
+/* 2 */ "DELETE FROM %Q.'%q_content'",
+/* 3 */ "DELETE FROM %Q.'%q_segments'",
+/* 4 */ "DELETE FROM %Q.'%q_segdir'",
+/* 5 */ "DELETE FROM %Q.'%q_docsize'",
+/* 6 */ "DELETE FROM %Q.'%q_stat'",
+/* 7 */ "SELECT %s WHERE rowid=?",
+/* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1",
+/* 9 */ "REPLACE INTO %Q.'%q_segments'(blockid, block) VALUES(?, ?)",
+/* 10 */ "SELECT coalesce((SELECT max(blockid) FROM %Q.'%q_segments') + 1, 1)",
+/* 11 */ "REPLACE INTO %Q.'%q_segdir' VALUES(?,?,?,?,?,?)",
+
+ /* Return segments in order from oldest to newest.*/
+/* 12 */ "SELECT idx, start_block, leaves_end_block, end_block, root "
+ "FROM %Q.'%q_segdir' WHERE level = ? ORDER BY idx ASC",
+/* 13 */ "SELECT idx, start_block, leaves_end_block, end_block, root "
+ "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?"
+ "ORDER BY level DESC, idx ASC",
+
+/* 14 */ "SELECT count(*) FROM %Q.'%q_segdir' WHERE level = ?",
+/* 15 */ "SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?",
+
+/* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?",
+/* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?",
+/* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%s)",
+/* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?",
+/* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)",
+/* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?",
+/* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=?",
+/* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(?,?)",
+/* 24 */ "",
+/* 25 */ "",
+
+/* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?",
+/* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'",
+
+/* This statement is used to determine which level to read the input from
+** when performing an incremental merge. It returns the absolute level number
+** of the oldest level in the db that contains at least ? segments. Or,
+** if no level in the FTS index contains more than ? segments, the statement
+** returns zero rows. */
+/* 28 */ "SELECT level, count(*) AS cnt FROM %Q.'%q_segdir' "
+ " GROUP BY level HAVING cnt>=?"
+ " ORDER BY (level %% 1024) ASC, 2 DESC LIMIT 1",
+
+/* Estimate the upper limit on the number of leaf nodes in a new segment
+** created by merging the oldest :2 segments from absolute level :1. See
+** function sqlite3Fts3Incrmerge() for details. */
+/* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) "
+ " FROM (SELECT * FROM %Q.'%q_segdir' "
+ " WHERE level = ? ORDER BY idx ASC LIMIT ?"
+ " )",
+
+/* SQL_DELETE_SEGDIR_ENTRY
+** Delete the %_segdir entry on absolute level :1 with index :2. */
+/* 30 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?",
+
+/* SQL_SHIFT_SEGDIR_ENTRY
+** Modify the idx value for the segment with idx=:3 on absolute level :2
+** to :1. */
+/* 31 */ "UPDATE %Q.'%q_segdir' SET idx = ? WHERE level=? AND idx=?",
+
+/* SQL_SELECT_SEGDIR
+** Read a single entry from the %_segdir table. The entry from absolute
+** level :1 with index value :2. */
+/* 32 */ "SELECT idx, start_block, leaves_end_block, end_block, root "
+ "FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?",
+
+/* SQL_CHOMP_SEGDIR
+** Update the start_block (:1) and root (:2) fields of the %_segdir
+** entry located on absolute level :3 with index :4. */
+/* 33 */ "UPDATE %Q.'%q_segdir' SET start_block = ?, root = ?"
+ "WHERE level = ? AND idx = ?",
+
+/* SQL_SEGMENT_IS_APPENDABLE
+** Return a single row if the segment with end_block=? is appendable. Or
+** no rows otherwise. */
+/* 34 */ "SELECT 1 FROM %Q.'%q_segments' WHERE blockid=? AND block IS NULL",
+
+/* SQL_SELECT_INDEXES
+** Return the list of valid segment indexes for absolute level ? */
+/* 35 */ "SELECT idx FROM %Q.'%q_segdir' WHERE level=? ORDER BY 1 ASC",
+
+/* SQL_SELECT_MXLEVEL
+** Return the largest relative level in the FTS index or indexes. */
+/* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'",
+
+ /* Return segments in order from oldest to newest.*/
+/* 37 */ "SELECT level, idx, end_block "
+ "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? "
+ "ORDER BY level DESC, idx ASC",
+
+ /* Update statements used while promoting segments */
+/* 38 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=-1,idx=? "
+ "WHERE level=? AND idx=?",
+/* 39 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=? WHERE level=-1"
+
+ };
+ int rc = SQLITE_OK;
+ sqlite3_stmt *pStmt;
+
+ assert( SizeofArray(azSql)==SizeofArray(p->aStmt) );
+ assert( eStmt<SizeofArray(azSql) && eStmt>=0 );
+
+ pStmt = p->aStmt[eStmt];
+ if( !pStmt ){
+ int f = SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB;
+ char *zSql;
+ if( eStmt==SQL_CONTENT_INSERT ){
+ zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName, p->zWriteExprlist);
+ }else if( eStmt==SQL_SELECT_CONTENT_BY_ROWID ){
+ f &= ~SQLITE_PREPARE_NO_VTAB;
+ zSql = sqlite3_mprintf(azSql[eStmt], p->zReadExprlist);
+ }else{
+ zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName);
+ }
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v3(p->db, zSql, -1, f, &pStmt, NULL);
+ sqlite3_free(zSql);
+ assert( rc==SQLITE_OK || pStmt==0 );
+ p->aStmt[eStmt] = pStmt;
+ }
+ }
+ if( apVal ){
+ int i;
+ int nParam = sqlite3_bind_parameter_count(pStmt);
+ for(i=0; rc==SQLITE_OK && i<nParam; i++){
+ rc = sqlite3_bind_value(pStmt, i+1, apVal[i]);
+ }
+ }
+ *pp = pStmt;
+ return rc;
+}
+
+
+static int fts3SelectDocsize(
+ Fts3Table *pTab, /* FTS3 table handle */
+ sqlite3_int64 iDocid, /* Docid to bind for SQL_SELECT_DOCSIZE */
+ sqlite3_stmt **ppStmt /* OUT: Statement handle */
+){
+ sqlite3_stmt *pStmt = 0; /* Statement requested from fts3SqlStmt() */
+ int rc; /* Return code */
+
+ rc = fts3SqlStmt(pTab, SQL_SELECT_DOCSIZE, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pStmt, 1, iDocid);
+ rc = sqlite3_step(pStmt);
+ if( rc!=SQLITE_ROW || sqlite3_column_type(pStmt, 0)!=SQLITE_BLOB ){
+ rc = sqlite3_reset(pStmt);
+ if( rc==SQLITE_OK ) rc = FTS_CORRUPT_VTAB;
+ pStmt = 0;
+ }else{
+ rc = SQLITE_OK;
+ }
+ }
+
+ *ppStmt = pStmt;
+ return rc;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3SelectDoctotal(
+ Fts3Table *pTab, /* Fts3 table handle */
+ sqlite3_stmt **ppStmt /* OUT: Statement handle */
+){
+ sqlite3_stmt *pStmt = 0;
+ int rc;
+ rc = fts3SqlStmt(pTab, SQL_SELECT_STAT, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL);
+ if( sqlite3_step(pStmt)!=SQLITE_ROW
+ || sqlite3_column_type(pStmt, 0)!=SQLITE_BLOB
+ ){
+ rc = sqlite3_reset(pStmt);
+ if( rc==SQLITE_OK ) rc = FTS_CORRUPT_VTAB;
+ pStmt = 0;
+ }
+ }
+ *ppStmt = pStmt;
+ return rc;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3SelectDocsize(
+ Fts3Table *pTab, /* Fts3 table handle */
+ sqlite3_int64 iDocid, /* Docid to read size data for */
+ sqlite3_stmt **ppStmt /* OUT: Statement handle */
+){
+ return fts3SelectDocsize(pTab, iDocid, ppStmt);
+}
+
+/*
+** Similar to fts3SqlStmt(). Except, after binding the parameters in
+** array apVal[] to the SQL statement identified by eStmt, the statement
+** is executed.
+**
+** Returns SQLITE_OK if the statement is successfully executed, or an
+** SQLite error code otherwise.
+*/
+static void fts3SqlExec(
+ int *pRC, /* Result code */
+ Fts3Table *p, /* The FTS3 table */
+ int eStmt, /* Index of statement to evaluate */
+ sqlite3_value **apVal /* Parameters to bind */
+){
+ sqlite3_stmt *pStmt;
+ int rc;
+ if( *pRC ) return;
+ rc = fts3SqlStmt(p, eStmt, &pStmt, apVal);
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pStmt);
+ rc = sqlite3_reset(pStmt);
+ }
+ *pRC = rc;
+}
+
+
+/*
+** This function ensures that the caller has obtained an exclusive
+** shared-cache table-lock on the %_segdir table. This is required before
+** writing data to the fts3 table. If this lock is not acquired first, then
+** the caller may end up attempting to take this lock as part of committing
+** a transaction, causing SQLite to return SQLITE_LOCKED or
+** LOCKED_SHAREDCACHEto a COMMIT command.
+**
+** It is best to avoid this because if FTS3 returns any error when
+** committing a transaction, the whole transaction will be rolled back.
+** And this is not what users expect when they get SQLITE_LOCKED_SHAREDCACHE.
+** It can still happen if the user locks the underlying tables directly
+** instead of accessing them via FTS.
+*/
+static int fts3Writelock(Fts3Table *p){
+ int rc = SQLITE_OK;
+
+ if( p->nPendingData==0 ){
+ sqlite3_stmt *pStmt;
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_null(pStmt, 1);
+ sqlite3_step(pStmt);
+ rc = sqlite3_reset(pStmt);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** FTS maintains a separate indexes for each language-id (a 32-bit integer).
+** Within each language id, a separate index is maintained to store the
+** document terms, and each configured prefix size (configured the FTS
+** "prefix=" option). And each index consists of multiple levels ("relative
+** levels").
+**
+** All three of these values (the language id, the specific index and the
+** level within the index) are encoded in 64-bit integer values stored
+** in the %_segdir table on disk. This function is used to convert three
+** separate component values into the single 64-bit integer value that
+** can be used to query the %_segdir table.
+**
+** Specifically, each language-id/index combination is allocated 1024
+** 64-bit integer level values ("absolute levels"). The main terms index
+** for language-id 0 is allocate values 0-1023. The first prefix index
+** (if any) for language-id 0 is allocated values 1024-2047. And so on.
+** Language 1 indexes are allocated immediately following language 0.
+**
+** So, for a system with nPrefix prefix indexes configured, the block of
+** absolute levels that corresponds to language-id iLangid and index
+** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024).
+*/
+static sqlite3_int64 getAbsoluteLevel(
+ Fts3Table *p, /* FTS3 table handle */
+ int iLangid, /* Language id */
+ int iIndex, /* Index in p->aIndex[] */
+ int iLevel /* Level of segments */
+){
+ sqlite3_int64 iBase; /* First absolute level for iLangid/iIndex */
+ assert_fts3_nc( iLangid>=0 );
+ assert( p->nIndex>0 );
+ assert( iIndex>=0 && iIndex<p->nIndex );
+
+ iBase = ((sqlite3_int64)iLangid * p->nIndex + iIndex) * FTS3_SEGDIR_MAXLEVEL;
+ return iBase + iLevel;
+}
+
+/*
+** Set *ppStmt to a statement handle that may be used to iterate through
+** all rows in the %_segdir table, from oldest to newest. If successful,
+** return SQLITE_OK. If an error occurs while preparing the statement,
+** return an SQLite error code.
+**
+** There is only ever one instance of this SQL statement compiled for
+** each FTS3 table.
+**
+** The statement returns the following columns from the %_segdir table:
+**
+** 0: idx
+** 1: start_block
+** 2: leaves_end_block
+** 3: end_block
+** 4: root
+*/
+SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(
+ Fts3Table *p, /* FTS3 table */
+ int iLangid, /* Language being queried */
+ int iIndex, /* Index for p->aIndex[] */
+ int iLevel, /* Level to select (relative level) */
+ sqlite3_stmt **ppStmt /* OUT: Compiled statement */
+){
+ int rc;
+ sqlite3_stmt *pStmt = 0;
+
+ assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel>=0 );
+ assert( iLevel<FTS3_SEGDIR_MAXLEVEL );
+ assert( iIndex>=0 && iIndex<p->nIndex );
+
+ if( iLevel<0 ){
+ /* "SELECT * FROM %_segdir WHERE level BETWEEN ? AND ? ORDER BY ..." */
+ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0));
+ sqlite3_bind_int64(pStmt, 2,
+ getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1)
+ );
+ }
+ }else{
+ /* "SELECT * FROM %_segdir WHERE level = ? ORDER BY ..." */
+ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex,iLevel));
+ }
+ }
+ *ppStmt = pStmt;
+ return rc;
+}
+
+
+/*
+** Append a single varint to a PendingList buffer. SQLITE_OK is returned
+** if successful, or an SQLite error code otherwise.
+**
+** This function also serves to allocate the PendingList structure itself.
+** For example, to create a new PendingList structure containing two
+** varints:
+**
+** PendingList *p = 0;
+** fts3PendingListAppendVarint(&p, 1);
+** fts3PendingListAppendVarint(&p, 2);
+*/
+static int fts3PendingListAppendVarint(
+ PendingList **pp, /* IN/OUT: Pointer to PendingList struct */
+ sqlite3_int64 i /* Value to append to data */
+){
+ PendingList *p = *pp;
+
+ /* Allocate or grow the PendingList as required. */
+ if( !p ){
+ p = sqlite3_malloc(sizeof(*p) + 100);
+ if( !p ){
+ return SQLITE_NOMEM;
+ }
+ p->nSpace = 100;
+ p->aData = (char *)&p[1];
+ p->nData = 0;
+ }
+ else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){
+ int nNew = p->nSpace * 2;
+ p = sqlite3_realloc(p, sizeof(*p) + nNew);
+ if( !p ){
+ sqlite3_free(*pp);
+ *pp = 0;
+ return SQLITE_NOMEM;
+ }
+ p->nSpace = nNew;
+ p->aData = (char *)&p[1];
+ }
+
+ /* Append the new serialized varint to the end of the list. */
+ p->nData += sqlite3Fts3PutVarint(&p->aData[p->nData], i);
+ p->aData[p->nData] = '\0';
+ *pp = p;
+ return SQLITE_OK;
+}
+
+/*
+** Add a docid/column/position entry to a PendingList structure. Non-zero
+** is returned if the structure is sqlite3_realloced as part of adding
+** the entry. Otherwise, zero.
+**
+** If an OOM error occurs, *pRc is set to SQLITE_NOMEM before returning.
+** Zero is always returned in this case. Otherwise, if no OOM error occurs,
+** it is set to SQLITE_OK.
+*/
+static int fts3PendingListAppend(
+ PendingList **pp, /* IN/OUT: PendingList structure */
+ sqlite3_int64 iDocid, /* Docid for entry to add */
+ sqlite3_int64 iCol, /* Column for entry to add */
+ sqlite3_int64 iPos, /* Position of term for entry to add */
+ int *pRc /* OUT: Return code */
+){
+ PendingList *p = *pp;
+ int rc = SQLITE_OK;
+
+ assert( !p || p->iLastDocid<=iDocid );
+
+ if( !p || p->iLastDocid!=iDocid ){
+ u64 iDelta = (u64)iDocid - (u64)(p ? p->iLastDocid : 0);
+ if( p ){
+ assert( p->nData<p->nSpace );
+ assert( p->aData[p->nData]==0 );
+ p->nData++;
+ }
+ if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iDelta)) ){
+ goto pendinglistappend_out;
+ }
+ p->iLastCol = -1;
+ p->iLastPos = 0;
+ p->iLastDocid = iDocid;
+ }
+ if( iCol>0 && p->iLastCol!=iCol ){
+ if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, 1))
+ || SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iCol))
+ ){
+ goto pendinglistappend_out;
+ }
+ p->iLastCol = iCol;
+ p->iLastPos = 0;
+ }
+ if( iCol>=0 ){
+ assert( iPos>p->iLastPos || (iPos==0 && p->iLastPos==0) );
+ rc = fts3PendingListAppendVarint(&p, 2+iPos-p->iLastPos);
+ if( rc==SQLITE_OK ){
+ p->iLastPos = iPos;
+ }
+ }
+
+ pendinglistappend_out:
+ *pRc = rc;
+ if( p!=*pp ){
+ *pp = p;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+** Free a PendingList object allocated by fts3PendingListAppend().
+*/
+static void fts3PendingListDelete(PendingList *pList){
+ sqlite3_free(pList);
+}
+
+/*
+** Add an entry to one of the pending-terms hash tables.
+*/
+static int fts3PendingTermsAddOne(
+ Fts3Table *p,
+ int iCol,
+ int iPos,
+ Fts3Hash *pHash, /* Pending terms hash table to add entry to */
+ const char *zToken,
+ int nToken
+){
+ PendingList *pList;
+ int rc = SQLITE_OK;
+
+ pList = (PendingList *)fts3HashFind(pHash, zToken, nToken);
+ if( pList ){
+ p->nPendingData -= (pList->nData + nToken + sizeof(Fts3HashElem));
+ }
+ if( fts3PendingListAppend(&pList, p->iPrevDocid, iCol, iPos, &rc) ){
+ if( pList==fts3HashInsert(pHash, zToken, nToken, pList) ){
+ /* Malloc failed while inserting the new entry. This can only
+ ** happen if there was no previous entry for this token.
+ */
+ assert( 0==fts3HashFind(pHash, zToken, nToken) );
+ sqlite3_free(pList);
+ rc = SQLITE_NOMEM;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ p->nPendingData += (pList->nData + nToken + sizeof(Fts3HashElem));
+ }
+ return rc;
+}
+
+/*
+** Tokenize the nul-terminated string zText and add all tokens to the
+** pending-terms hash-table. The docid used is that currently stored in
+** p->iPrevDocid, and the column is specified by argument iCol.
+**
+** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code.
+*/
+static int fts3PendingTermsAdd(
+ Fts3Table *p, /* Table into which text will be inserted */
+ int iLangid, /* Language id to use */
+ const char *zText, /* Text of document to be inserted */
+ int iCol, /* Column into which text is being inserted */
+ u32 *pnWord /* IN/OUT: Incr. by number tokens inserted */
+){
+ int rc;
+ int iStart = 0;
+ int iEnd = 0;
+ int iPos = 0;
+ int nWord = 0;
+
+ char const *zToken;
+ int nToken = 0;
+
+ sqlite3_tokenizer *pTokenizer = p->pTokenizer;
+ sqlite3_tokenizer_module const *pModule = pTokenizer->pModule;
+ sqlite3_tokenizer_cursor *pCsr;
+ int (*xNext)(sqlite3_tokenizer_cursor *pCursor,
+ const char**,int*,int*,int*,int*);
+
+ assert( pTokenizer && pModule );
+
+ /* If the user has inserted a NULL value, this function may be called with
+ ** zText==0. In this case, add zero token entries to the hash table and
+ ** return early. */
+ if( zText==0 ){
+ *pnWord = 0;
+ return SQLITE_OK;
+ }
+
+ rc = sqlite3Fts3OpenTokenizer(pTokenizer, iLangid, zText, -1, &pCsr);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ xNext = pModule->xNext;
+ while( SQLITE_OK==rc
+ && SQLITE_OK==(rc = xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos))
+ ){
+ int i;
+ if( iPos>=nWord ) nWord = iPos+1;
+
+ /* Positions cannot be negative; we use -1 as a terminator internally.
+ ** Tokens must have a non-zero length.
+ */
+ if( iPos<0 || !zToken || nToken<=0 ){
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ /* Add the term to the terms index */
+ rc = fts3PendingTermsAddOne(
+ p, iCol, iPos, &p->aIndex[0].hPending, zToken, nToken
+ );
+
+ /* Add the term to each of the prefix indexes that it is not too
+ ** short for. */
+ for(i=1; rc==SQLITE_OK && i<p->nIndex; i++){
+ struct Fts3Index *pIndex = &p->aIndex[i];
+ if( nToken<pIndex->nPrefix ) continue;
+ rc = fts3PendingTermsAddOne(
+ p, iCol, iPos, &pIndex->hPending, zToken, pIndex->nPrefix
+ );
+ }
+ }
+
+ pModule->xClose(pCsr);
+ *pnWord += nWord;
+ return (rc==SQLITE_DONE ? SQLITE_OK : rc);
+}
+
+/*
+** Calling this function indicates that subsequent calls to
+** fts3PendingTermsAdd() are to add term/position-list pairs for the
+** contents of the document with docid iDocid.
+*/
+static int fts3PendingTermsDocid(
+ Fts3Table *p, /* Full-text table handle */
+ int bDelete, /* True if this op is a delete */
+ int iLangid, /* Language id of row being written */
+ sqlite_int64 iDocid /* Docid of row being written */
+){
+ assert( iLangid>=0 );
+ assert( bDelete==1 || bDelete==0 );
+
+ /* TODO(shess) Explore whether partially flushing the buffer on
+ ** forced-flush would provide better performance. I suspect that if
+ ** we ordered the doclists by size and flushed the largest until the
+ ** buffer was half empty, that would let the less frequent terms
+ ** generate longer doclists.
+ */
+ if( iDocid<p->iPrevDocid
+ || (iDocid==p->iPrevDocid && p->bPrevDelete==0)
+ || p->iPrevLangid!=iLangid
+ || p->nPendingData>p->nMaxPendingData
+ ){
+ int rc = sqlite3Fts3PendingTermsFlush(p);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ p->iPrevDocid = iDocid;
+ p->iPrevLangid = iLangid;
+ p->bPrevDelete = bDelete;
+ return SQLITE_OK;
+}
+
+/*
+** Discard the contents of the pending-terms hash tables.
+*/
+SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *p){
+ int i;
+ for(i=0; i<p->nIndex; i++){
+ Fts3HashElem *pElem;
+ Fts3Hash *pHash = &p->aIndex[i].hPending;
+ for(pElem=fts3HashFirst(pHash); pElem; pElem=fts3HashNext(pElem)){
+ PendingList *pList = (PendingList *)fts3HashData(pElem);
+ fts3PendingListDelete(pList);
+ }
+ fts3HashClear(pHash);
+ }
+ p->nPendingData = 0;
+}
+
+/*
+** This function is called by the xUpdate() method as part of an INSERT
+** operation. It adds entries for each term in the new record to the
+** pendingTerms hash table.
+**
+** Argument apVal is the same as the similarly named argument passed to
+** fts3InsertData(). Parameter iDocid is the docid of the new row.
+*/
+static int fts3InsertTerms(
+ Fts3Table *p,
+ int iLangid,
+ sqlite3_value **apVal,
+ u32 *aSz
+){
+ int i; /* Iterator variable */
+ for(i=2; i<p->nColumn+2; i++){
+ int iCol = i-2;
+ if( p->abNotindexed[iCol]==0 ){
+ const char *zText = (const char *)sqlite3_value_text(apVal[i]);
+ int rc = fts3PendingTermsAdd(p, iLangid, zText, iCol, &aSz[iCol]);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]);
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** This function is called by the xUpdate() method for an INSERT operation.
+** The apVal parameter is passed a copy of the apVal argument passed by
+** SQLite to the xUpdate() method. i.e:
+**
+** apVal[0] Not used for INSERT.
+** apVal[1] rowid
+** apVal[2] Left-most user-defined column
+** ...
+** apVal[p->nColumn+1] Right-most user-defined column
+** apVal[p->nColumn+2] Hidden column with same name as table
+** apVal[p->nColumn+3] Hidden "docid" column (alias for rowid)
+** apVal[p->nColumn+4] Hidden languageid column
+*/
+static int fts3InsertData(
+ Fts3Table *p, /* Full-text table */
+ sqlite3_value **apVal, /* Array of values to insert */
+ sqlite3_int64 *piDocid /* OUT: Docid for row just inserted */
+){
+ int rc; /* Return code */
+ sqlite3_stmt *pContentInsert; /* INSERT INTO %_content VALUES(...) */
+
+ if( p->zContentTbl ){
+ sqlite3_value *pRowid = apVal[p->nColumn+3];
+ if( sqlite3_value_type(pRowid)==SQLITE_NULL ){
+ pRowid = apVal[1];
+ }
+ if( sqlite3_value_type(pRowid)!=SQLITE_INTEGER ){
+ return SQLITE_CONSTRAINT;
+ }
+ *piDocid = sqlite3_value_int64(pRowid);
+ return SQLITE_OK;
+ }
+
+ /* Locate the statement handle used to insert data into the %_content
+ ** table. The SQL for this statement is:
+ **
+ ** INSERT INTO %_content VALUES(?, ?, ?, ...)
+ **
+ ** The statement features N '?' variables, where N is the number of user
+ ** defined columns in the FTS3 table, plus one for the docid field.
+ */
+ rc = fts3SqlStmt(p, SQL_CONTENT_INSERT, &pContentInsert, &apVal[1]);
+ if( rc==SQLITE_OK && p->zLanguageid ){
+ rc = sqlite3_bind_int(
+ pContentInsert, p->nColumn+2,
+ sqlite3_value_int(apVal[p->nColumn+4])
+ );
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* There is a quirk here. The users INSERT statement may have specified
+ ** a value for the "rowid" field, for the "docid" field, or for both.
+ ** Which is a problem, since "rowid" and "docid" are aliases for the
+ ** same value. For example:
+ **
+ ** INSERT INTO fts3tbl(rowid, docid) VALUES(1, 2);
+ **
+ ** In FTS3, this is an error. It is an error to specify non-NULL values
+ ** for both docid and some other rowid alias.
+ */
+ if( SQLITE_NULL!=sqlite3_value_type(apVal[3+p->nColumn]) ){
+ if( SQLITE_NULL==sqlite3_value_type(apVal[0])
+ && SQLITE_NULL!=sqlite3_value_type(apVal[1])
+ ){
+ /* A rowid/docid conflict. */
+ return SQLITE_ERROR;
+ }
+ rc = sqlite3_bind_value(pContentInsert, 1, apVal[3+p->nColumn]);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+
+ /* Execute the statement to insert the record. Set *piDocid to the
+ ** new docid value.
+ */
+ sqlite3_step(pContentInsert);
+ rc = sqlite3_reset(pContentInsert);
+
+ *piDocid = sqlite3_last_insert_rowid(p->db);
+ return rc;
+}
+
+
+
+/*
+** Remove all data from the FTS3 table. Clear the hash table containing
+** pending terms.
+*/
+static int fts3DeleteAll(Fts3Table *p, int bContent){
+ int rc = SQLITE_OK; /* Return code */
+
+ /* Discard the contents of the pending-terms hash table. */
+ sqlite3Fts3PendingTermsClear(p);
+
+ /* Delete everything from the shadow tables. Except, leave %_content as
+ ** is if bContent is false. */
+ assert( p->zContentTbl==0 || bContent==0 );
+ if( bContent ) fts3SqlExec(&rc, p, SQL_DELETE_ALL_CONTENT, 0);
+ fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGMENTS, 0);
+ fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGDIR, 0);
+ if( p->bHasDocsize ){
+ fts3SqlExec(&rc, p, SQL_DELETE_ALL_DOCSIZE, 0);
+ }
+ if( p->bHasStat ){
+ fts3SqlExec(&rc, p, SQL_DELETE_ALL_STAT, 0);
+ }
+ return rc;
+}
+
+/*
+**
+*/
+static int langidFromSelect(Fts3Table *p, sqlite3_stmt *pSelect){
+ int iLangid = 0;
+ if( p->zLanguageid ) iLangid = sqlite3_column_int(pSelect, p->nColumn+1);
+ return iLangid;
+}
+
+/*
+** The first element in the apVal[] array is assumed to contain the docid
+** (an integer) of a row about to be deleted. Remove all terms from the
+** full-text index.
+*/
+static void fts3DeleteTerms(
+ int *pRC, /* Result code */
+ Fts3Table *p, /* The FTS table to delete from */
+ sqlite3_value *pRowid, /* The docid to be deleted */
+ u32 *aSz, /* Sizes of deleted document written here */
+ int *pbFound /* OUT: Set to true if row really does exist */
+){
+ int rc;
+ sqlite3_stmt *pSelect;
+
+ assert( *pbFound==0 );
+ if( *pRC ) return;
+ rc = fts3SqlStmt(p, SQL_SELECT_CONTENT_BY_ROWID, &pSelect, &pRowid);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pSelect) ){
+ int i;
+ int iLangid = langidFromSelect(p, pSelect);
+ i64 iDocid = sqlite3_column_int64(pSelect, 0);
+ rc = fts3PendingTermsDocid(p, 1, iLangid, iDocid);
+ for(i=1; rc==SQLITE_OK && i<=p->nColumn; i++){
+ int iCol = i-1;
+ if( p->abNotindexed[iCol]==0 ){
+ const char *zText = (const char *)sqlite3_column_text(pSelect, i);
+ rc = fts3PendingTermsAdd(p, iLangid, zText, -1, &aSz[iCol]);
+ aSz[p->nColumn] += sqlite3_column_bytes(pSelect, i);
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3_reset(pSelect);
+ *pRC = rc;
+ return;
+ }
+ *pbFound = 1;
+ }
+ rc = sqlite3_reset(pSelect);
+ }else{
+ sqlite3_reset(pSelect);
+ }
+ *pRC = rc;
+}
+
+/*
+** Forward declaration to account for the circular dependency between
+** functions fts3SegmentMerge() and fts3AllocateSegdirIdx().
+*/
+static int fts3SegmentMerge(Fts3Table *, int, int, int);
+
+/*
+** This function allocates a new level iLevel index in the segdir table.
+** Usually, indexes are allocated within a level sequentially starting
+** with 0, so the allocated index is one greater than the value returned
+** by:
+**
+** SELECT max(idx) FROM %_segdir WHERE level = :iLevel
+**
+** However, if there are already FTS3_MERGE_COUNT indexes at the requested
+** level, they are merged into a single level (iLevel+1) segment and the
+** allocated index is 0.
+**
+** If successful, *piIdx is set to the allocated index slot and SQLITE_OK
+** returned. Otherwise, an SQLite error code is returned.
+*/
+static int fts3AllocateSegdirIdx(
+ Fts3Table *p,
+ int iLangid, /* Language id */
+ int iIndex, /* Index for p->aIndex */
+ int iLevel,
+ int *piIdx
+){
+ int rc; /* Return Code */
+ sqlite3_stmt *pNextIdx; /* Query for next idx at level iLevel */
+ int iNext = 0; /* Result of query pNextIdx */
+
+ assert( iLangid>=0 );
+ assert( p->nIndex>=1 );
+
+ /* Set variable iNext to the next available segdir index at level iLevel. */
+ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pNextIdx, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(
+ pNextIdx, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel)
+ );
+ if( SQLITE_ROW==sqlite3_step(pNextIdx) ){
+ iNext = sqlite3_column_int(pNextIdx, 0);
+ }
+ rc = sqlite3_reset(pNextIdx);
+ }
+
+ if( rc==SQLITE_OK ){
+ /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already
+ ** full, merge all segments in level iLevel into a single iLevel+1
+ ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise,
+ ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext.
+ */
+ if( iNext>=MergeCount(p) ){
+ fts3LogMerge(16, getAbsoluteLevel(p, iLangid, iIndex, iLevel));
+ rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel);
+ *piIdx = 0;
+ }else{
+ *piIdx = iNext;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** The %_segments table is declared as follows:
+**
+** CREATE TABLE %_segments(blockid INTEGER PRIMARY KEY, block BLOB)
+**
+** This function reads data from a single row of the %_segments table. The
+** specific row is identified by the iBlockid parameter. If paBlob is not
+** NULL, then a buffer is allocated using sqlite3_malloc() and populated
+** with the contents of the blob stored in the "block" column of the
+** identified table row is. Whether or not paBlob is NULL, *pnBlob is set
+** to the size of the blob in bytes before returning.
+**
+** If an error occurs, or the table does not contain the specified row,
+** an SQLite error code is returned. Otherwise, SQLITE_OK is returned. If
+** paBlob is non-NULL, then it is the responsibility of the caller to
+** eventually free the returned buffer.
+**
+** This function may leave an open sqlite3_blob* handle in the
+** Fts3Table.pSegments variable. This handle is reused by subsequent calls
+** to this function. The handle may be closed by calling the
+** sqlite3Fts3SegmentsClose() function. Reusing a blob handle is a handy
+** performance improvement, but the blob handle should always be closed
+** before control is returned to the user (to prevent a lock being held
+** on the database file for longer than necessary). Thus, any virtual table
+** method (xFilter etc.) that may directly or indirectly call this function
+** must call sqlite3Fts3SegmentsClose() before returning.
+*/
+SQLITE_PRIVATE int sqlite3Fts3ReadBlock(
+ Fts3Table *p, /* FTS3 table handle */
+ sqlite3_int64 iBlockid, /* Access the row with blockid=$iBlockid */
+ char **paBlob, /* OUT: Blob data in malloc'd buffer */
+ int *pnBlob, /* OUT: Size of blob data */
+ int *pnLoad /* OUT: Bytes actually loaded */
+){
+ int rc; /* Return code */
+
+ /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */
+ assert( pnBlob );
+
+ if( p->pSegments ){
+ rc = sqlite3_blob_reopen(p->pSegments, iBlockid);
+ }else{
+ if( 0==p->zSegmentsTbl ){
+ p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName);
+ if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM;
+ }
+ rc = sqlite3_blob_open(
+ p->db, p->zDb, p->zSegmentsTbl, "block", iBlockid, 0, &p->pSegments
+ );
+ }
+
+ if( rc==SQLITE_OK ){
+ int nByte = sqlite3_blob_bytes(p->pSegments);
+ *pnBlob = nByte;
+ if( paBlob ){
+ char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING);
+ if( !aByte ){
+ rc = SQLITE_NOMEM;
+ }else{
+ if( pnLoad && nByte>(FTS3_NODE_CHUNK_THRESHOLD) ){
+ nByte = FTS3_NODE_CHUNKSIZE;
+ *pnLoad = nByte;
+ }
+ rc = sqlite3_blob_read(p->pSegments, aByte, nByte, 0);
+ memset(&aByte[nByte], 0, FTS3_NODE_PADDING);
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(aByte);
+ aByte = 0;
+ }
+ }
+ *paBlob = aByte;
+ }
+ }else if( rc==SQLITE_ERROR ){
+ rc = FTS_CORRUPT_VTAB;
+ }
+
+ return rc;
+}
+
+/*
+** Close the blob handle at p->pSegments, if it is open. See comments above
+** the sqlite3Fts3ReadBlock() function for details.
+*/
+SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *p){
+ sqlite3_blob_close(p->pSegments);
+ p->pSegments = 0;
+}
+
+static int fts3SegReaderIncrRead(Fts3SegReader *pReader){
+ int nRead; /* Number of bytes to read */
+ int rc; /* Return code */
+
+ nRead = MIN(pReader->nNode - pReader->nPopulate, FTS3_NODE_CHUNKSIZE);
+ rc = sqlite3_blob_read(
+ pReader->pBlob,
+ &pReader->aNode[pReader->nPopulate],
+ nRead,
+ pReader->nPopulate
+ );
+
+ if( rc==SQLITE_OK ){
+ pReader->nPopulate += nRead;
+ memset(&pReader->aNode[pReader->nPopulate], 0, FTS3_NODE_PADDING);
+ if( pReader->nPopulate==pReader->nNode ){
+ sqlite3_blob_close(pReader->pBlob);
+ pReader->pBlob = 0;
+ pReader->nPopulate = 0;
+ }
+ }
+ return rc;
+}
+
+static int fts3SegReaderRequire(Fts3SegReader *pReader, char *pFrom, int nByte){
+ int rc = SQLITE_OK;
+ assert( !pReader->pBlob
+ || (pFrom>=pReader->aNode && pFrom<&pReader->aNode[pReader->nNode])
+ );
+ while( pReader->pBlob && rc==SQLITE_OK
+ && (pFrom - pReader->aNode + nByte)>pReader->nPopulate
+ ){
+ rc = fts3SegReaderIncrRead(pReader);
+ }
+ return rc;
+}
+
+/*
+** Set an Fts3SegReader cursor to point at EOF.
+*/
+static void fts3SegReaderSetEof(Fts3SegReader *pSeg){
+ if( !fts3SegReaderIsRootOnly(pSeg) ){
+ sqlite3_free(pSeg->aNode);
+ sqlite3_blob_close(pSeg->pBlob);
+ pSeg->pBlob = 0;
+ }
+ pSeg->aNode = 0;
+}
+
+/*
+** Move the iterator passed as the first argument to the next term in the
+** segment. If successful, SQLITE_OK is returned. If there is no next term,
+** SQLITE_DONE. Otherwise, an SQLite error code.
+*/
+static int fts3SegReaderNext(
+ Fts3Table *p,
+ Fts3SegReader *pReader,
+ int bIncr
+){
+ int rc; /* Return code of various sub-routines */
+ char *pNext; /* Cursor variable */
+ int nPrefix; /* Number of bytes in term prefix */
+ int nSuffix; /* Number of bytes in term suffix */
+
+ if( !pReader->aDoclist ){
+ pNext = pReader->aNode;
+ }else{
+ pNext = &pReader->aDoclist[pReader->nDoclist];
+ }
+
+ if( !pNext || pNext>=&pReader->aNode[pReader->nNode] ){
+
+ if( fts3SegReaderIsPending(pReader) ){
+ Fts3HashElem *pElem = *(pReader->ppNextElem);
+ sqlite3_free(pReader->aNode);
+ pReader->aNode = 0;
+ if( pElem ){
+ char *aCopy;
+ PendingList *pList = (PendingList *)fts3HashData(pElem);
+ int nCopy = pList->nData+1;
+ pReader->zTerm = (char *)fts3HashKey(pElem);
+ pReader->nTerm = fts3HashKeysize(pElem);
+ aCopy = (char*)sqlite3_malloc(nCopy);
+ if( !aCopy ) return SQLITE_NOMEM;
+ memcpy(aCopy, pList->aData, nCopy);
+ pReader->nNode = pReader->nDoclist = nCopy;
+ pReader->aNode = pReader->aDoclist = aCopy;
+ pReader->ppNextElem++;
+ assert( pReader->aNode );
+ }
+ return SQLITE_OK;
+ }
+
+ fts3SegReaderSetEof(pReader);
+
+ /* If iCurrentBlock>=iLeafEndBlock, this is an EOF condition. All leaf
+ ** blocks have already been traversed. */
+#ifdef CORRUPT_DB
+ assert( pReader->iCurrentBlock<=pReader->iLeafEndBlock || CORRUPT_DB );
+#endif
+ if( pReader->iCurrentBlock>=pReader->iLeafEndBlock ){
+ return SQLITE_OK;
+ }
+
+ rc = sqlite3Fts3ReadBlock(
+ p, ++pReader->iCurrentBlock, &pReader->aNode, &pReader->nNode,
+ (bIncr ? &pReader->nPopulate : 0)
+ );
+ if( rc!=SQLITE_OK ) return rc;
+ assert( pReader->pBlob==0 );
+ if( bIncr && pReader->nPopulate<pReader->nNode ){
+ pReader->pBlob = p->pSegments;
+ p->pSegments = 0;
+ }
+ pNext = pReader->aNode;
+ }
+
+ assert( !fts3SegReaderIsPending(pReader) );
+
+ rc = fts3SegReaderRequire(pReader, pNext, FTS3_VARINT_MAX*2);
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Because of the FTS3_NODE_PADDING bytes of padding, the following is
+ ** safe (no risk of overread) even if the node data is corrupted. */
+ pNext += fts3GetVarint32(pNext, &nPrefix);
+ pNext += fts3GetVarint32(pNext, &nSuffix);
+ if( nSuffix<=0
+ || (&pReader->aNode[pReader->nNode] - pNext)<nSuffix
+ || nPrefix>pReader->nTerm
+ ){
+ return FTS_CORRUPT_VTAB;
+ }
+
+ /* Both nPrefix and nSuffix were read by fts3GetVarint32() and so are
+ ** between 0 and 0x7FFFFFFF. But the sum of the two may cause integer
+ ** overflow - hence the (i64) casts. */
+ if( (i64)nPrefix+nSuffix>(i64)pReader->nTermAlloc ){
+ i64 nNew = ((i64)nPrefix+nSuffix)*2;
+ char *zNew = sqlite3_realloc64(pReader->zTerm, nNew);
+ if( !zNew ){
+ return SQLITE_NOMEM;
+ }
+ pReader->zTerm = zNew;
+ pReader->nTermAlloc = nNew;
+ }
+
+ rc = fts3SegReaderRequire(pReader, pNext, nSuffix+FTS3_VARINT_MAX);
+ if( rc!=SQLITE_OK ) return rc;
+
+ memcpy(&pReader->zTerm[nPrefix], pNext, nSuffix);
+ pReader->nTerm = nPrefix+nSuffix;
+ pNext += nSuffix;
+ pNext += fts3GetVarint32(pNext, &pReader->nDoclist);
+ pReader->aDoclist = pNext;
+ pReader->pOffsetList = 0;
+
+ /* Check that the doclist does not appear to extend past the end of the
+ ** b-tree node. And that the final byte of the doclist is 0x00. If either
+ ** of these statements is untrue, then the data structure is corrupt.
+ */
+ if( pReader->nDoclist > pReader->nNode-(pReader->aDoclist-pReader->aNode)
+ || (pReader->nPopulate==0 && pReader->aDoclist[pReader->nDoclist-1])
+ || pReader->nDoclist==0
+ ){
+ return FTS_CORRUPT_VTAB;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Set the SegReader to point to the first docid in the doclist associated
+** with the current term.
+*/
+static int fts3SegReaderFirstDocid(Fts3Table *pTab, Fts3SegReader *pReader){
+ int rc = SQLITE_OK;
+ assert( pReader->aDoclist );
+ assert( !pReader->pOffsetList );
+ if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){
+ u8 bEof = 0;
+ pReader->iDocid = 0;
+ pReader->nOffsetList = 0;
+ sqlite3Fts3DoclistPrev(0,
+ pReader->aDoclist, pReader->nDoclist, &pReader->pOffsetList,
+ &pReader->iDocid, &pReader->nOffsetList, &bEof
+ );
+ }else{
+ rc = fts3SegReaderRequire(pReader, pReader->aDoclist, FTS3_VARINT_MAX);
+ if( rc==SQLITE_OK ){
+ int n = sqlite3Fts3GetVarint(pReader->aDoclist, &pReader->iDocid);
+ pReader->pOffsetList = &pReader->aDoclist[n];
+ }
+ }
+ return rc;
+}
+
+/*
+** Advance the SegReader to point to the next docid in the doclist
+** associated with the current term.
+**
+** If arguments ppOffsetList and pnOffsetList are not NULL, then
+** *ppOffsetList is set to point to the first column-offset list
+** in the doclist entry (i.e. immediately past the docid varint).
+** *pnOffsetList is set to the length of the set of column-offset
+** lists, not including the nul-terminator byte. For example:
+*/
+static int fts3SegReaderNextDocid(
+ Fts3Table *pTab,
+ Fts3SegReader *pReader, /* Reader to advance to next docid */
+ char **ppOffsetList, /* OUT: Pointer to current position-list */
+ int *pnOffsetList /* OUT: Length of *ppOffsetList in bytes */
+){
+ int rc = SQLITE_OK;
+ char *p = pReader->pOffsetList;
+ char c = 0;
+
+ assert( p );
+
+ if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){
+ /* A pending-terms seg-reader for an FTS4 table that uses order=desc.
+ ** Pending-terms doclists are always built up in ascending order, so
+ ** we have to iterate through them backwards here. */
+ u8 bEof = 0;
+ if( ppOffsetList ){
+ *ppOffsetList = pReader->pOffsetList;
+ *pnOffsetList = pReader->nOffsetList - 1;
+ }
+ sqlite3Fts3DoclistPrev(0,
+ pReader->aDoclist, pReader->nDoclist, &p, &pReader->iDocid,
+ &pReader->nOffsetList, &bEof
+ );
+ if( bEof ){
+ pReader->pOffsetList = 0;
+ }else{
+ pReader->pOffsetList = p;
+ }
+ }else{
+ char *pEnd = &pReader->aDoclist[pReader->nDoclist];
+
+ /* Pointer p currently points at the first byte of an offset list. The
+ ** following block advances it to point one byte past the end of
+ ** the same offset list. */
+ while( 1 ){
+
+ /* The following line of code (and the "p++" below the while() loop) is
+ ** normally all that is required to move pointer p to the desired
+ ** position. The exception is if this node is being loaded from disk
+ ** incrementally and pointer "p" now points to the first byte past
+ ** the populated part of pReader->aNode[].
+ */
+ while( *p | c ) c = *p++ & 0x80;
+ assert( *p==0 );
+
+ if( pReader->pBlob==0 || p<&pReader->aNode[pReader->nPopulate] ) break;
+ rc = fts3SegReaderIncrRead(pReader);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ p++;
+
+ /* If required, populate the output variables with a pointer to and the
+ ** size of the previous offset-list.
+ */
+ if( ppOffsetList ){
+ *ppOffsetList = pReader->pOffsetList;
+ *pnOffsetList = (int)(p - pReader->pOffsetList - 1);
+ }
+
+ /* List may have been edited in place by fts3EvalNearTrim() */
+ while( p<pEnd && *p==0 ) p++;
+
+ /* If there are no more entries in the doclist, set pOffsetList to
+ ** NULL. Otherwise, set Fts3SegReader.iDocid to the next docid and
+ ** Fts3SegReader.pOffsetList to point to the next offset list before
+ ** returning.
+ */
+ if( p>=pEnd ){
+ pReader->pOffsetList = 0;
+ }else{
+ rc = fts3SegReaderRequire(pReader, p, FTS3_VARINT_MAX);
+ if( rc==SQLITE_OK ){
+ u64 iDelta;
+ pReader->pOffsetList = p + sqlite3Fts3GetVarintU(p, &iDelta);
+ if( pTab->bDescIdx ){
+ pReader->iDocid = (i64)((u64)pReader->iDocid - iDelta);
+ }else{
+ pReader->iDocid = (i64)((u64)pReader->iDocid + iDelta);
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(
+ Fts3Cursor *pCsr,
+ Fts3MultiSegReader *pMsr,
+ int *pnOvfl
+){
+ Fts3Table *p = (Fts3Table*)pCsr->base.pVtab;
+ int nOvfl = 0;
+ int ii;
+ int rc = SQLITE_OK;
+ int pgsz = p->nPgsz;
+
+ assert( p->bFts4 );
+ assert( pgsz>0 );
+
+ for(ii=0; rc==SQLITE_OK && ii<pMsr->nSegment; ii++){
+ Fts3SegReader *pReader = pMsr->apSegment[ii];
+ if( !fts3SegReaderIsPending(pReader)
+ && !fts3SegReaderIsRootOnly(pReader)
+ ){
+ sqlite3_int64 jj;
+ for(jj=pReader->iStartBlock; jj<=pReader->iLeafEndBlock; jj++){
+ int nBlob;
+ rc = sqlite3Fts3ReadBlock(p, jj, 0, &nBlob, 0);
+ if( rc!=SQLITE_OK ) break;
+ if( (nBlob+35)>pgsz ){
+ nOvfl += (nBlob + 34)/pgsz;
+ }
+ }
+ }
+ }
+ *pnOvfl = nOvfl;
+ return rc;
+}
+
+/*
+** Free all allocations associated with the iterator passed as the
+** second argument.
+*/
+SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){
+ if( pReader ){
+ if( !fts3SegReaderIsPending(pReader) ){
+ sqlite3_free(pReader->zTerm);
+ }
+ if( !fts3SegReaderIsRootOnly(pReader) ){
+ sqlite3_free(pReader->aNode);
+ }
+ sqlite3_blob_close(pReader->pBlob);
+ }
+ sqlite3_free(pReader);
+}
+
+/*
+** Allocate a new SegReader object.
+*/
+SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(
+ int iAge, /* Segment "age". */
+ int bLookup, /* True for a lookup only */
+ sqlite3_int64 iStartLeaf, /* First leaf to traverse */
+ sqlite3_int64 iEndLeaf, /* Final leaf to traverse */
+ sqlite3_int64 iEndBlock, /* Final block of segment */
+ const char *zRoot, /* Buffer containing root node */
+ int nRoot, /* Size of buffer containing root node */
+ Fts3SegReader **ppReader /* OUT: Allocated Fts3SegReader */
+){
+ Fts3SegReader *pReader; /* Newly allocated SegReader object */
+ int nExtra = 0; /* Bytes to allocate segment root node */
+
+ assert( zRoot!=0 || nRoot==0 );
+#ifdef CORRUPT_DB
+ assert( zRoot!=0 || CORRUPT_DB );
+#endif
+
+ if( iStartLeaf==0 ){
+ if( iEndLeaf!=0 ) return FTS_CORRUPT_VTAB;
+ nExtra = nRoot + FTS3_NODE_PADDING;
+ }
+
+ pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra);
+ if( !pReader ){
+ return SQLITE_NOMEM;
+ }
+ memset(pReader, 0, sizeof(Fts3SegReader));
+ pReader->iIdx = iAge;
+ pReader->bLookup = bLookup!=0;
+ pReader->iStartBlock = iStartLeaf;
+ pReader->iLeafEndBlock = iEndLeaf;
+ pReader->iEndBlock = iEndBlock;
+
+ if( nExtra ){
+ /* The entire segment is stored in the root node. */
+ pReader->aNode = (char *)&pReader[1];
+ pReader->rootOnly = 1;
+ pReader->nNode = nRoot;
+ if( nRoot ) memcpy(pReader->aNode, zRoot, nRoot);
+ memset(&pReader->aNode[nRoot], 0, FTS3_NODE_PADDING);
+ }else{
+ pReader->iCurrentBlock = iStartLeaf-1;
+ }
+ *ppReader = pReader;
+ return SQLITE_OK;
+}
+
+/*
+** This is a comparison function used as a qsort() callback when sorting
+** an array of pending terms by term. This occurs as part of flushing
+** the contents of the pending-terms hash table to the database.
+*/
+static int SQLITE_CDECL fts3CompareElemByTerm(
+ const void *lhs,
+ const void *rhs
+){
+ char *z1 = fts3HashKey(*(Fts3HashElem **)lhs);
+ char *z2 = fts3HashKey(*(Fts3HashElem **)rhs);
+ int n1 = fts3HashKeysize(*(Fts3HashElem **)lhs);
+ int n2 = fts3HashKeysize(*(Fts3HashElem **)rhs);
+
+ int n = (n1<n2 ? n1 : n2);
+ int c = memcmp(z1, z2, n);
+ if( c==0 ){
+ c = n1 - n2;
+ }
+ return c;
+}
+
+/*
+** This function is used to allocate an Fts3SegReader that iterates through
+** a subset of the terms stored in the Fts3Table.pendingTerms array.
+**
+** If the isPrefixIter parameter is zero, then the returned SegReader iterates
+** through each term in the pending-terms table. Or, if isPrefixIter is
+** non-zero, it iterates through each term and its prefixes. For example, if
+** the pending terms hash table contains the terms "sqlite", "mysql" and
+** "firebird", then the iterator visits the following 'terms' (in the order
+** shown):
+**
+** f fi fir fire fireb firebi firebir firebird
+** m my mys mysq mysql
+** s sq sql sqli sqlit sqlite
+**
+** Whereas if isPrefixIter is zero, the terms visited are:
+**
+** firebird mysql sqlite
+*/
+SQLITE_PRIVATE int sqlite3Fts3SegReaderPending(
+ Fts3Table *p, /* Virtual table handle */
+ int iIndex, /* Index for p->aIndex */
+ const char *zTerm, /* Term to search for */
+ int nTerm, /* Size of buffer zTerm */
+ int bPrefix, /* True for a prefix iterator */
+ Fts3SegReader **ppReader /* OUT: SegReader for pending-terms */
+){
+ Fts3SegReader *pReader = 0; /* Fts3SegReader object to return */
+ Fts3HashElem *pE; /* Iterator variable */
+ Fts3HashElem **aElem = 0; /* Array of term hash entries to scan */
+ int nElem = 0; /* Size of array at aElem */
+ int rc = SQLITE_OK; /* Return Code */
+ Fts3Hash *pHash;
+
+ pHash = &p->aIndex[iIndex].hPending;
+ if( bPrefix ){
+ int nAlloc = 0; /* Size of allocated array at aElem */
+
+ for(pE=fts3HashFirst(pHash); pE; pE=fts3HashNext(pE)){
+ char *zKey = (char *)fts3HashKey(pE);
+ int nKey = fts3HashKeysize(pE);
+ if( nTerm==0 || (nKey>=nTerm && 0==memcmp(zKey, zTerm, nTerm)) ){
+ if( nElem==nAlloc ){
+ Fts3HashElem **aElem2;
+ nAlloc += 16;
+ aElem2 = (Fts3HashElem **)sqlite3_realloc(
+ aElem, nAlloc*sizeof(Fts3HashElem *)
+ );
+ if( !aElem2 ){
+ rc = SQLITE_NOMEM;
+ nElem = 0;
+ break;
+ }
+ aElem = aElem2;
+ }
+
+ aElem[nElem++] = pE;
+ }
+ }
+
+ /* If more than one term matches the prefix, sort the Fts3HashElem
+ ** objects in term order using qsort(). This uses the same comparison
+ ** callback as is used when flushing terms to disk.
+ */
+ if( nElem>1 ){
+ qsort(aElem, nElem, sizeof(Fts3HashElem *), fts3CompareElemByTerm);
+ }
+
+ }else{
+ /* The query is a simple term lookup that matches at most one term in
+ ** the index. All that is required is a straight hash-lookup.
+ **
+ ** Because the stack address of pE may be accessed via the aElem pointer
+ ** below, the "Fts3HashElem *pE" must be declared so that it is valid
+ ** within this entire function, not just this "else{...}" block.
+ */
+ pE = fts3HashFindElem(pHash, zTerm, nTerm);
+ if( pE ){
+ aElem = &pE;
+ nElem = 1;
+ }
+ }
+
+ if( nElem>0 ){
+ sqlite3_int64 nByte;
+ nByte = sizeof(Fts3SegReader) + (nElem+1)*sizeof(Fts3HashElem *);
+ pReader = (Fts3SegReader *)sqlite3_malloc64(nByte);
+ if( !pReader ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pReader, 0, nByte);
+ pReader->iIdx = 0x7FFFFFFF;
+ pReader->ppNextElem = (Fts3HashElem **)&pReader[1];
+ memcpy(pReader->ppNextElem, aElem, nElem*sizeof(Fts3HashElem *));
+ }
+ }
+
+ if( bPrefix ){
+ sqlite3_free(aElem);
+ }
+ *ppReader = pReader;
+ return rc;
+}
+
+/*
+** Compare the entries pointed to by two Fts3SegReader structures.
+** Comparison is as follows:
+**
+** 1) EOF is greater than not EOF.
+**
+** 2) The current terms (if any) are compared using memcmp(). If one
+** term is a prefix of another, the longer term is considered the
+** larger.
+**
+** 3) By segment age. An older segment is considered larger.
+*/
+static int fts3SegReaderCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){
+ int rc;
+ if( pLhs->aNode && pRhs->aNode ){
+ int rc2 = pLhs->nTerm - pRhs->nTerm;
+ if( rc2<0 ){
+ rc = memcmp(pLhs->zTerm, pRhs->zTerm, pLhs->nTerm);
+ }else{
+ rc = memcmp(pLhs->zTerm, pRhs->zTerm, pRhs->nTerm);
+ }
+ if( rc==0 ){
+ rc = rc2;
+ }
+ }else{
+ rc = (pLhs->aNode==0) - (pRhs->aNode==0);
+ }
+ if( rc==0 ){
+ rc = pRhs->iIdx - pLhs->iIdx;
+ }
+ assert( rc!=0 );
+ return rc;
+}
+
+/*
+** A different comparison function for SegReader structures. In this
+** version, it is assumed that each SegReader points to an entry in
+** a doclist for identical terms. Comparison is made as follows:
+**
+** 1) EOF (end of doclist in this case) is greater than not EOF.
+**
+** 2) By current docid.
+**
+** 3) By segment age. An older segment is considered larger.
+*/
+static int fts3SegReaderDoclistCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){
+ int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0);
+ if( rc==0 ){
+ if( pLhs->iDocid==pRhs->iDocid ){
+ rc = pRhs->iIdx - pLhs->iIdx;
+ }else{
+ rc = (pLhs->iDocid > pRhs->iDocid) ? 1 : -1;
+ }
+ }
+ assert( pLhs->aNode && pRhs->aNode );
+ return rc;
+}
+static int fts3SegReaderDoclistCmpRev(Fts3SegReader *pLhs, Fts3SegReader *pRhs){
+ int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0);
+ if( rc==0 ){
+ if( pLhs->iDocid==pRhs->iDocid ){
+ rc = pRhs->iIdx - pLhs->iIdx;
+ }else{
+ rc = (pLhs->iDocid < pRhs->iDocid) ? 1 : -1;
+ }
+ }
+ assert( pLhs->aNode && pRhs->aNode );
+ return rc;
+}
+
+/*
+** Compare the term that the Fts3SegReader object passed as the first argument
+** points to with the term specified by arguments zTerm and nTerm.
+**
+** If the pSeg iterator is already at EOF, return 0. Otherwise, return
+** -ve if the pSeg term is less than zTerm/nTerm, 0 if the two terms are
+** equal, or +ve if the pSeg term is greater than zTerm/nTerm.
+*/
+static int fts3SegReaderTermCmp(
+ Fts3SegReader *pSeg, /* Segment reader object */
+ const char *zTerm, /* Term to compare to */
+ int nTerm /* Size of term zTerm in bytes */
+){
+ int res = 0;
+ if( pSeg->aNode ){
+ if( pSeg->nTerm>nTerm ){
+ res = memcmp(pSeg->zTerm, zTerm, nTerm);
+ }else{
+ res = memcmp(pSeg->zTerm, zTerm, pSeg->nTerm);
+ }
+ if( res==0 ){
+ res = pSeg->nTerm-nTerm;
+ }
+ }
+ return res;
+}
+
+/*
+** Argument apSegment is an array of nSegment elements. It is known that
+** the final (nSegment-nSuspect) members are already in sorted order
+** (according to the comparison function provided). This function shuffles
+** the array around until all entries are in sorted order.
+*/
+static void fts3SegReaderSort(
+ Fts3SegReader **apSegment, /* Array to sort entries of */
+ int nSegment, /* Size of apSegment array */
+ int nSuspect, /* Unsorted entry count */
+ int (*xCmp)(Fts3SegReader *, Fts3SegReader *) /* Comparison function */
+){
+ int i; /* Iterator variable */
+
+ assert( nSuspect<=nSegment );
+
+ if( nSuspect==nSegment ) nSuspect--;
+ for(i=nSuspect-1; i>=0; i--){
+ int j;
+ for(j=i; j<(nSegment-1); j++){
+ Fts3SegReader *pTmp;
+ if( xCmp(apSegment[j], apSegment[j+1])<0 ) break;
+ pTmp = apSegment[j+1];
+ apSegment[j+1] = apSegment[j];
+ apSegment[j] = pTmp;
+ }
+ }
+
+#ifndef NDEBUG
+ /* Check that the list really is sorted now. */
+ for(i=0; i<(nSuspect-1); i++){
+ assert( xCmp(apSegment[i], apSegment[i+1])<0 );
+ }
+#endif
+}
+
+/*
+** Insert a record into the %_segments table.
+*/
+static int fts3WriteSegment(
+ Fts3Table *p, /* Virtual table handle */
+ sqlite3_int64 iBlock, /* Block id for new block */
+ char *z, /* Pointer to buffer containing block data */
+ int n /* Size of buffer z in bytes */
+){
+ sqlite3_stmt *pStmt;
+ int rc = fts3SqlStmt(p, SQL_INSERT_SEGMENTS, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pStmt, 1, iBlock);
+ sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC);
+ sqlite3_step(pStmt);
+ rc = sqlite3_reset(pStmt);
+ sqlite3_bind_null(pStmt, 2);
+ }
+ return rc;
+}
+
+/*
+** Find the largest relative level number in the table. If successful, set
+** *pnMax to this value and return SQLITE_OK. Otherwise, if an error occurs,
+** set *pnMax to zero and return an SQLite error code.
+*/
+SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *p, int *pnMax){
+ int rc;
+ int mxLevel = 0;
+ sqlite3_stmt *pStmt = 0;
+
+ rc = fts3SqlStmt(p, SQL_SELECT_MXLEVEL, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ mxLevel = sqlite3_column_int(pStmt, 0);
+ }
+ rc = sqlite3_reset(pStmt);
+ }
+ *pnMax = mxLevel;
+ return rc;
+}
+
+/*
+** Insert a record into the %_segdir table.
+*/
+static int fts3WriteSegdir(
+ Fts3Table *p, /* Virtual table handle */
+ sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */
+ int iIdx, /* Value for "idx" field */
+ sqlite3_int64 iStartBlock, /* Value for "start_block" field */
+ sqlite3_int64 iLeafEndBlock, /* Value for "leaves_end_block" field */
+ sqlite3_int64 iEndBlock, /* Value for "end_block" field */
+ sqlite3_int64 nLeafData, /* Bytes of leaf data in segment */
+ char *zRoot, /* Blob value for "root" field */
+ int nRoot /* Number of bytes in buffer zRoot */
+){
+ sqlite3_stmt *pStmt;
+ int rc = fts3SqlStmt(p, SQL_INSERT_SEGDIR, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pStmt, 1, iLevel);
+ sqlite3_bind_int(pStmt, 2, iIdx);
+ sqlite3_bind_int64(pStmt, 3, iStartBlock);
+ sqlite3_bind_int64(pStmt, 4, iLeafEndBlock);
+ if( nLeafData==0 ){
+ sqlite3_bind_int64(pStmt, 5, iEndBlock);
+ }else{
+ char *zEnd = sqlite3_mprintf("%lld %lld", iEndBlock, nLeafData);
+ if( !zEnd ) return SQLITE_NOMEM;
+ sqlite3_bind_text(pStmt, 5, zEnd, -1, sqlite3_free);
+ }
+ sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC);
+ sqlite3_step(pStmt);
+ rc = sqlite3_reset(pStmt);
+ sqlite3_bind_null(pStmt, 6);
+ }
+ return rc;
+}
+
+/*
+** Return the size of the common prefix (if any) shared by zPrev and
+** zNext, in bytes. For example,
+**
+** fts3PrefixCompress("abc", 3, "abcdef", 6) // returns 3
+** fts3PrefixCompress("abX", 3, "abcdef", 6) // returns 2
+** fts3PrefixCompress("abX", 3, "Xbcdef", 6) // returns 0
+*/
+static int fts3PrefixCompress(
+ const char *zPrev, /* Buffer containing previous term */
+ int nPrev, /* Size of buffer zPrev in bytes */
+ const char *zNext, /* Buffer containing next term */
+ int nNext /* Size of buffer zNext in bytes */
+){
+ int n;
+ UNUSED_PARAMETER(nNext);
+ for(n=0; n<nPrev && zPrev[n]==zNext[n]; n++);
+ return n;
+}
+
+/*
+** Add term zTerm to the SegmentNode. It is guaranteed that zTerm is larger
+** (according to memcmp) than the previous term.
+*/
+static int fts3NodeAddTerm(
+ Fts3Table *p, /* Virtual table handle */
+ SegmentNode **ppTree, /* IN/OUT: SegmentNode handle */
+ int isCopyTerm, /* True if zTerm/nTerm is transient */
+ const char *zTerm, /* Pointer to buffer containing term */
+ int nTerm /* Size of term in bytes */
+){
+ SegmentNode *pTree = *ppTree;
+ int rc;
+ SegmentNode *pNew;
+
+ /* First try to append the term to the current node. Return early if
+ ** this is possible.
+ */
+ if( pTree ){
+ int nData = pTree->nData; /* Current size of node in bytes */
+ int nReq = nData; /* Required space after adding zTerm */
+ int nPrefix; /* Number of bytes of prefix compression */
+ int nSuffix; /* Suffix length */
+
+ nPrefix = fts3PrefixCompress(pTree->zTerm, pTree->nTerm, zTerm, nTerm);
+ nSuffix = nTerm-nPrefix;
+
+ /* If nSuffix is zero or less, then zTerm/nTerm must be a prefix of
+ ** pWriter->zTerm/pWriter->nTerm. i.e. must be equal to or less than when
+ ** compared with BINARY collation. This indicates corruption. */
+ if( nSuffix<=0 ) return FTS_CORRUPT_VTAB;
+
+ nReq += sqlite3Fts3VarintLen(nPrefix)+sqlite3Fts3VarintLen(nSuffix)+nSuffix;
+ if( nReq<=p->nNodeSize || !pTree->zTerm ){
+
+ if( nReq>p->nNodeSize ){
+ /* An unusual case: this is the first term to be added to the node
+ ** and the static node buffer (p->nNodeSize bytes) is not large
+ ** enough. Use a separately malloced buffer instead This wastes
+ ** p->nNodeSize bytes, but since this scenario only comes about when
+ ** the database contain two terms that share a prefix of almost 2KB,
+ ** this is not expected to be a serious problem.
+ */
+ assert( pTree->aData==(char *)&pTree[1] );
+ pTree->aData = (char *)sqlite3_malloc(nReq);
+ if( !pTree->aData ){
+ return SQLITE_NOMEM;
+ }
+ }
+
+ if( pTree->zTerm ){
+ /* There is no prefix-length field for first term in a node */
+ nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nPrefix);
+ }
+
+ nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nSuffix);
+ memcpy(&pTree->aData[nData], &zTerm[nPrefix], nSuffix);
+ pTree->nData = nData + nSuffix;
+ pTree->nEntry++;
+
+ if( isCopyTerm ){
+ if( pTree->nMalloc<nTerm ){
+ char *zNew = sqlite3_realloc(pTree->zMalloc, nTerm*2);
+ if( !zNew ){
+ return SQLITE_NOMEM;
+ }
+ pTree->nMalloc = nTerm*2;
+ pTree->zMalloc = zNew;
+ }
+ pTree->zTerm = pTree->zMalloc;
+ memcpy(pTree->zTerm, zTerm, nTerm);
+ pTree->nTerm = nTerm;
+ }else{
+ pTree->zTerm = (char *)zTerm;
+ pTree->nTerm = nTerm;
+ }
+ return SQLITE_OK;
+ }
+ }
+
+ /* If control flows to here, it was not possible to append zTerm to the
+ ** current node. Create a new node (a right-sibling of the current node).
+ ** If this is the first node in the tree, the term is added to it.
+ **
+ ** Otherwise, the term is not added to the new node, it is left empty for
+ ** now. Instead, the term is inserted into the parent of pTree. If pTree
+ ** has no parent, one is created here.
+ */
+ pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize);
+ if( !pNew ){
+ return SQLITE_NOMEM;
+ }
+ memset(pNew, 0, sizeof(SegmentNode));
+ pNew->nData = 1 + FTS3_VARINT_MAX;
+ pNew->aData = (char *)&pNew[1];
+
+ if( pTree ){
+ SegmentNode *pParent = pTree->pParent;
+ rc = fts3NodeAddTerm(p, &pParent, isCopyTerm, zTerm, nTerm);
+ if( pTree->pParent==0 ){
+ pTree->pParent = pParent;
+ }
+ pTree->pRight = pNew;
+ pNew->pLeftmost = pTree->pLeftmost;
+ pNew->pParent = pParent;
+ pNew->zMalloc = pTree->zMalloc;
+ pNew->nMalloc = pTree->nMalloc;
+ pTree->zMalloc = 0;
+ }else{
+ pNew->pLeftmost = pNew;
+ rc = fts3NodeAddTerm(p, &pNew, isCopyTerm, zTerm, nTerm);
+ }
+
+ *ppTree = pNew;
+ return rc;
+}
+
+/*
+** Helper function for fts3NodeWrite().
+*/
+static int fts3TreeFinishNode(
+ SegmentNode *pTree,
+ int iHeight,
+ sqlite3_int64 iLeftChild
+){
+ int nStart;
+ assert( iHeight>=1 && iHeight<128 );
+ nStart = FTS3_VARINT_MAX - sqlite3Fts3VarintLen(iLeftChild);
+ pTree->aData[nStart] = (char)iHeight;
+ sqlite3Fts3PutVarint(&pTree->aData[nStart+1], iLeftChild);
+ return nStart;
+}
+
+/*
+** Write the buffer for the segment node pTree and all of its peers to the
+** database. Then call this function recursively to write the parent of
+** pTree and its peers to the database.
+**
+** Except, if pTree is a root node, do not write it to the database. Instead,
+** set output variables *paRoot and *pnRoot to contain the root node.
+**
+** If successful, SQLITE_OK is returned and output variable *piLast is
+** set to the largest blockid written to the database (or zero if no
+** blocks were written to the db). Otherwise, an SQLite error code is
+** returned.
+*/
+static int fts3NodeWrite(
+ Fts3Table *p, /* Virtual table handle */
+ SegmentNode *pTree, /* SegmentNode handle */
+ int iHeight, /* Height of this node in tree */
+ sqlite3_int64 iLeaf, /* Block id of first leaf node */
+ sqlite3_int64 iFree, /* Block id of next free slot in %_segments */
+ sqlite3_int64 *piLast, /* OUT: Block id of last entry written */
+ char **paRoot, /* OUT: Data for root node */
+ int *pnRoot /* OUT: Size of root node in bytes */
+){
+ int rc = SQLITE_OK;
+
+ if( !pTree->pParent ){
+ /* Root node of the tree. */
+ int nStart = fts3TreeFinishNode(pTree, iHeight, iLeaf);
+ *piLast = iFree-1;
+ *pnRoot = pTree->nData - nStart;
+ *paRoot = &pTree->aData[nStart];
+ }else{
+ SegmentNode *pIter;
+ sqlite3_int64 iNextFree = iFree;
+ sqlite3_int64 iNextLeaf = iLeaf;
+ for(pIter=pTree->pLeftmost; pIter && rc==SQLITE_OK; pIter=pIter->pRight){
+ int nStart = fts3TreeFinishNode(pIter, iHeight, iNextLeaf);
+ int nWrite = pIter->nData - nStart;
+
+ rc = fts3WriteSegment(p, iNextFree, &pIter->aData[nStart], nWrite);
+ iNextFree++;
+ iNextLeaf += (pIter->nEntry+1);
+ }
+ if( rc==SQLITE_OK ){
+ assert( iNextLeaf==iFree );
+ rc = fts3NodeWrite(
+ p, pTree->pParent, iHeight+1, iFree, iNextFree, piLast, paRoot, pnRoot
+ );
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Free all memory allocations associated with the tree pTree.
+*/
+static void fts3NodeFree(SegmentNode *pTree){
+ if( pTree ){
+ SegmentNode *p = pTree->pLeftmost;
+ fts3NodeFree(p->pParent);
+ while( p ){
+ SegmentNode *pRight = p->pRight;
+ if( p->aData!=(char *)&p[1] ){
+ sqlite3_free(p->aData);
+ }
+ assert( pRight==0 || p->zMalloc==0 );
+ sqlite3_free(p->zMalloc);
+ sqlite3_free(p);
+ p = pRight;
+ }
+ }
+}
+
+/*
+** Add a term to the segment being constructed by the SegmentWriter object
+** *ppWriter. When adding the first term to a segment, *ppWriter should
+** be passed NULL. This function will allocate a new SegmentWriter object
+** and return it via the input/output variable *ppWriter in this case.
+**
+** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code.
+*/
+static int fts3SegWriterAdd(
+ Fts3Table *p, /* Virtual table handle */
+ SegmentWriter **ppWriter, /* IN/OUT: SegmentWriter handle */
+ int isCopyTerm, /* True if buffer zTerm must be copied */
+ const char *zTerm, /* Pointer to buffer containing term */
+ int nTerm, /* Size of term in bytes */
+ const char *aDoclist, /* Pointer to buffer containing doclist */
+ int nDoclist /* Size of doclist in bytes */
+){
+ int nPrefix; /* Size of term prefix in bytes */
+ int nSuffix; /* Size of term suffix in bytes */
+ int nReq; /* Number of bytes required on leaf page */
+ int nData;
+ SegmentWriter *pWriter = *ppWriter;
+
+ if( !pWriter ){
+ int rc;
+ sqlite3_stmt *pStmt;
+
+ /* Allocate the SegmentWriter structure */
+ pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter));
+ if( !pWriter ) return SQLITE_NOMEM;
+ memset(pWriter, 0, sizeof(SegmentWriter));
+ *ppWriter = pWriter;
+
+ /* Allocate a buffer in which to accumulate data */
+ pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize);
+ if( !pWriter->aData ) return SQLITE_NOMEM;
+ pWriter->nSize = p->nNodeSize;
+
+ /* Find the next free blockid in the %_segments table */
+ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pStmt, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ pWriter->iFree = sqlite3_column_int64(pStmt, 0);
+ pWriter->iFirst = pWriter->iFree;
+ }
+ rc = sqlite3_reset(pStmt);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ nData = pWriter->nData;
+
+ nPrefix = fts3PrefixCompress(pWriter->zTerm, pWriter->nTerm, zTerm, nTerm);
+ nSuffix = nTerm-nPrefix;
+
+ /* If nSuffix is zero or less, then zTerm/nTerm must be a prefix of
+ ** pWriter->zTerm/pWriter->nTerm. i.e. must be equal to or less than when
+ ** compared with BINARY collation. This indicates corruption. */
+ if( nSuffix<=0 ) return FTS_CORRUPT_VTAB;
+
+ /* Figure out how many bytes are required by this new entry */
+ nReq = sqlite3Fts3VarintLen(nPrefix) + /* varint containing prefix size */
+ sqlite3Fts3VarintLen(nSuffix) + /* varint containing suffix size */
+ nSuffix + /* Term suffix */
+ sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */
+ nDoclist; /* Doclist data */
+
+ if( nData>0 && nData+nReq>p->nNodeSize ){
+ int rc;
+
+ /* The current leaf node is full. Write it out to the database. */
+ if( pWriter->iFree==LARGEST_INT64 ) return FTS_CORRUPT_VTAB;
+ rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData);
+ if( rc!=SQLITE_OK ) return rc;
+ p->nLeafAdd++;
+
+ /* Add the current term to the interior node tree. The term added to
+ ** the interior tree must:
+ **
+ ** a) be greater than the largest term on the leaf node just written
+ ** to the database (still available in pWriter->zTerm), and
+ **
+ ** b) be less than or equal to the term about to be added to the new
+ ** leaf node (zTerm/nTerm).
+ **
+ ** In other words, it must be the prefix of zTerm 1 byte longer than
+ ** the common prefix (if any) of zTerm and pWriter->zTerm.
+ */
+ assert( nPrefix<nTerm );
+ rc = fts3NodeAddTerm(p, &pWriter->pTree, isCopyTerm, zTerm, nPrefix+1);
+ if( rc!=SQLITE_OK ) return rc;
+
+ nData = 0;
+ pWriter->nTerm = 0;
+
+ nPrefix = 0;
+ nSuffix = nTerm;
+ nReq = 1 + /* varint containing prefix size */
+ sqlite3Fts3VarintLen(nTerm) + /* varint containing suffix size */
+ nTerm + /* Term suffix */
+ sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */
+ nDoclist; /* Doclist data */
+ }
+
+ /* Increase the total number of bytes written to account for the new entry. */
+ pWriter->nLeafData += nReq;
+
+ /* If the buffer currently allocated is too small for this entry, realloc
+ ** the buffer to make it large enough.
+ */
+ if( nReq>pWriter->nSize ){
+ char *aNew = sqlite3_realloc(pWriter->aData, nReq);
+ if( !aNew ) return SQLITE_NOMEM;
+ pWriter->aData = aNew;
+ pWriter->nSize = nReq;
+ }
+ assert( nData+nReq<=pWriter->nSize );
+
+ /* Append the prefix-compressed term and doclist to the buffer. */
+ nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nPrefix);
+ nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nSuffix);
+ assert( nSuffix>0 );
+ memcpy(&pWriter->aData[nData], &zTerm[nPrefix], nSuffix);
+ nData += nSuffix;
+ nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nDoclist);
+ assert( nDoclist>0 );
+ memcpy(&pWriter->aData[nData], aDoclist, nDoclist);
+ pWriter->nData = nData + nDoclist;
+
+ /* Save the current term so that it can be used to prefix-compress the next.
+ ** If the isCopyTerm parameter is true, then the buffer pointed to by
+ ** zTerm is transient, so take a copy of the term data. Otherwise, just
+ ** store a copy of the pointer.
+ */
+ if( isCopyTerm ){
+ if( nTerm>pWriter->nMalloc ){
+ char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2);
+ if( !zNew ){
+ return SQLITE_NOMEM;
+ }
+ pWriter->nMalloc = nTerm*2;
+ pWriter->zMalloc = zNew;
+ pWriter->zTerm = zNew;
+ }
+ assert( pWriter->zTerm==pWriter->zMalloc );
+ assert( nTerm>0 );
+ memcpy(pWriter->zTerm, zTerm, nTerm);
+ }else{
+ pWriter->zTerm = (char *)zTerm;
+ }
+ pWriter->nTerm = nTerm;
+
+ return SQLITE_OK;
+}
+
+/*
+** Flush all data associated with the SegmentWriter object pWriter to the
+** database. This function must be called after all terms have been added
+** to the segment using fts3SegWriterAdd(). If successful, SQLITE_OK is
+** returned. Otherwise, an SQLite error code.
+*/
+static int fts3SegWriterFlush(
+ Fts3Table *p, /* Virtual table handle */
+ SegmentWriter *pWriter, /* SegmentWriter to flush to the db */
+ sqlite3_int64 iLevel, /* Value for 'level' column of %_segdir */
+ int iIdx /* Value for 'idx' column of %_segdir */
+){
+ int rc; /* Return code */
+ if( pWriter->pTree ){
+ sqlite3_int64 iLast = 0; /* Largest block id written to database */
+ sqlite3_int64 iLastLeaf; /* Largest leaf block id written to db */
+ char *zRoot = NULL; /* Pointer to buffer containing root node */
+ int nRoot = 0; /* Size of buffer zRoot */
+
+ iLastLeaf = pWriter->iFree;
+ rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, pWriter->nData);
+ if( rc==SQLITE_OK ){
+ rc = fts3NodeWrite(p, pWriter->pTree, 1,
+ pWriter->iFirst, pWriter->iFree, &iLast, &zRoot, &nRoot);
+ }
+ if( rc==SQLITE_OK ){
+ rc = fts3WriteSegdir(p, iLevel, iIdx,
+ pWriter->iFirst, iLastLeaf, iLast, pWriter->nLeafData, zRoot, nRoot);
+ }
+ }else{
+ /* The entire tree fits on the root node. Write it to the segdir table. */
+ rc = fts3WriteSegdir(p, iLevel, iIdx,
+ 0, 0, 0, pWriter->nLeafData, pWriter->aData, pWriter->nData);
+ }
+ p->nLeafAdd++;
+ return rc;
+}
+
+/*
+** Release all memory held by the SegmentWriter object passed as the
+** first argument.
+*/
+static void fts3SegWriterFree(SegmentWriter *pWriter){
+ if( pWriter ){
+ sqlite3_free(pWriter->aData);
+ sqlite3_free(pWriter->zMalloc);
+ fts3NodeFree(pWriter->pTree);
+ sqlite3_free(pWriter);
+ }
+}
+
+/*
+** The first value in the apVal[] array is assumed to contain an integer.
+** This function tests if there exist any documents with docid values that
+** are different from that integer. i.e. if deleting the document with docid
+** pRowid would mean the FTS3 table were empty.
+**
+** If successful, *pisEmpty is set to true if the table is empty except for
+** document pRowid, or false otherwise, and SQLITE_OK is returned. If an
+** error occurs, an SQLite error code is returned.
+*/
+static int fts3IsEmpty(Fts3Table *p, sqlite3_value *pRowid, int *pisEmpty){
+ sqlite3_stmt *pStmt;
+ int rc;
+ if( p->zContentTbl ){
+ /* If using the content=xxx option, assume the table is never empty */
+ *pisEmpty = 0;
+ rc = SQLITE_OK;
+ }else{
+ rc = fts3SqlStmt(p, SQL_IS_EMPTY, &pStmt, &pRowid);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ *pisEmpty = sqlite3_column_int(pStmt, 0);
+ }
+ rc = sqlite3_reset(pStmt);
+ }
+ }
+ return rc;
+}
+
+/*
+** Set *pnMax to the largest segment level in the database for the index
+** iIndex.
+**
+** Segment levels are stored in the 'level' column of the %_segdir table.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if not.
+*/
+static int fts3SegmentMaxLevel(
+ Fts3Table *p,
+ int iLangid,
+ int iIndex,
+ sqlite3_int64 *pnMax
+){
+ sqlite3_stmt *pStmt;
+ int rc;
+ assert( iIndex>=0 && iIndex<p->nIndex );
+
+ /* Set pStmt to the compiled version of:
+ **
+ ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?
+ **
+ ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR).
+ */
+ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0));
+ sqlite3_bind_int64(pStmt, 2,
+ getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1)
+ );
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ *pnMax = sqlite3_column_int64(pStmt, 0);
+ }
+ return sqlite3_reset(pStmt);
+}
+
+/*
+** iAbsLevel is an absolute level that may be assumed to exist within
+** the database. This function checks if it is the largest level number
+** within its index. Assuming no error occurs, *pbMax is set to 1 if
+** iAbsLevel is indeed the largest level, or 0 otherwise, and SQLITE_OK
+** is returned. If an error occurs, an error code is returned and the
+** final value of *pbMax is undefined.
+*/
+static int fts3SegmentIsMaxLevel(Fts3Table *p, i64 iAbsLevel, int *pbMax){
+
+ /* Set pStmt to the compiled version of:
+ **
+ ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?
+ **
+ ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR).
+ */
+ sqlite3_stmt *pStmt;
+ int rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ sqlite3_bind_int64(pStmt, 1, iAbsLevel+1);
+ sqlite3_bind_int64(pStmt, 2,
+ (((u64)iAbsLevel/FTS3_SEGDIR_MAXLEVEL)+1) * FTS3_SEGDIR_MAXLEVEL
+ );
+
+ *pbMax = 0;
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ *pbMax = sqlite3_column_type(pStmt, 0)==SQLITE_NULL;
+ }
+ return sqlite3_reset(pStmt);
+}
+
+/*
+** Delete all entries in the %_segments table associated with the segment
+** opened with seg-reader pSeg. This function does not affect the contents
+** of the %_segdir table.
+*/
+static int fts3DeleteSegment(
+ Fts3Table *p, /* FTS table handle */
+ Fts3SegReader *pSeg /* Segment to delete */
+){
+ int rc = SQLITE_OK; /* Return code */
+ if( pSeg->iStartBlock ){
+ sqlite3_stmt *pDelete; /* SQL statement to delete rows */
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDelete, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDelete, 1, pSeg->iStartBlock);
+ sqlite3_bind_int64(pDelete, 2, pSeg->iEndBlock);
+ sqlite3_step(pDelete);
+ rc = sqlite3_reset(pDelete);
+ }
+ }
+ return rc;
+}
+
+/*
+** This function is used after merging multiple segments into a single large
+** segment to delete the old, now redundant, segment b-trees. Specifically,
+** it:
+**
+** 1) Deletes all %_segments entries for the segments associated with
+** each of the SegReader objects in the array passed as the third
+** argument, and
+**
+** 2) deletes all %_segdir entries with level iLevel, or all %_segdir
+** entries regardless of level if (iLevel<0).
+**
+** SQLITE_OK is returned if successful, otherwise an SQLite error code.
+*/
+static int fts3DeleteSegdir(
+ Fts3Table *p, /* Virtual table handle */
+ int iLangid, /* Language id */
+ int iIndex, /* Index for p->aIndex */
+ int iLevel, /* Level of %_segdir entries to delete */
+ Fts3SegReader **apSegment, /* Array of SegReader objects */
+ int nReader /* Size of array apSegment */
+){
+ int rc = SQLITE_OK; /* Return Code */
+ int i; /* Iterator variable */
+ sqlite3_stmt *pDelete = 0; /* SQL statement to delete rows */
+
+ for(i=0; rc==SQLITE_OK && i<nReader; i++){
+ rc = fts3DeleteSegment(p, apSegment[i]);
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ assert( iLevel>=0 || iLevel==FTS3_SEGCURSOR_ALL );
+ if( iLevel==FTS3_SEGCURSOR_ALL ){
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_RANGE, &pDelete, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, 0));
+ sqlite3_bind_int64(pDelete, 2,
+ getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1)
+ );
+ }
+ }else{
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pDelete, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(
+ pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel)
+ );
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pDelete);
+ rc = sqlite3_reset(pDelete);
+ }
+
+ return rc;
+}
+
+/*
+** When this function is called, buffer *ppList (size *pnList bytes) contains
+** a position list that may (or may not) feature multiple columns. This
+** function adjusts the pointer *ppList and the length *pnList so that they
+** identify the subset of the position list that corresponds to column iCol.
+**
+** If there are no entries in the input position list for column iCol, then
+** *pnList is set to zero before returning.
+**
+** If parameter bZero is non-zero, then any part of the input list following
+** the end of the output list is zeroed before returning.
+*/
+static void fts3ColumnFilter(
+ int iCol, /* Column to filter on */
+ int bZero, /* Zero out anything following *ppList */
+ char **ppList, /* IN/OUT: Pointer to position list */
+ int *pnList /* IN/OUT: Size of buffer *ppList in bytes */
+){
+ char *pList = *ppList;
+ int nList = *pnList;
+ char *pEnd = &pList[nList];
+ int iCurrent = 0;
+ char *p = pList;
+
+ assert( iCol>=0 );
+ while( 1 ){
+ char c = 0;
+ while( p<pEnd && (c | *p)&0xFE ) c = *p++ & 0x80;
+
+ if( iCol==iCurrent ){
+ nList = (int)(p - pList);
+ break;
+ }
+
+ nList -= (int)(p - pList);
+ pList = p;
+ if( nList<=0 ){
+ break;
+ }
+ p = &pList[1];
+ p += fts3GetVarint32(p, &iCurrent);
+ }
+
+ if( bZero && (pEnd - &pList[nList])>0){
+ memset(&pList[nList], 0, pEnd - &pList[nList]);
+ }
+ *ppList = pList;
+ *pnList = nList;
+}
+
+/*
+** Cache data in the Fts3MultiSegReader.aBuffer[] buffer (overwriting any
+** existing data). Grow the buffer if required.
+**
+** If successful, return SQLITE_OK. Otherwise, if an OOM error is encountered
+** trying to resize the buffer, return SQLITE_NOMEM.
+*/
+static int fts3MsrBufferData(
+ Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */
+ char *pList,
+ int nList
+){
+ if( nList>pMsr->nBuffer ){
+ char *pNew;
+ pMsr->nBuffer = nList*2;
+ pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer);
+ if( !pNew ) return SQLITE_NOMEM;
+ pMsr->aBuffer = pNew;
+ }
+
+ assert( nList>0 );
+ memcpy(pMsr->aBuffer, pList, nList);
+ return SQLITE_OK;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */
+ sqlite3_int64 *piDocid, /* OUT: Docid value */
+ char **paPoslist, /* OUT: Pointer to position list */
+ int *pnPoslist /* OUT: Size of position list in bytes */
+){
+ int nMerge = pMsr->nAdvance;
+ Fts3SegReader **apSegment = pMsr->apSegment;
+ int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = (
+ p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp
+ );
+
+ if( nMerge==0 ){
+ *paPoslist = 0;
+ return SQLITE_OK;
+ }
+
+ while( 1 ){
+ Fts3SegReader *pSeg;
+ pSeg = pMsr->apSegment[0];
+
+ if( pSeg->pOffsetList==0 ){
+ *paPoslist = 0;
+ break;
+ }else{
+ int rc;
+ char *pList;
+ int nList;
+ int j;
+ sqlite3_int64 iDocid = apSegment[0]->iDocid;
+
+ rc = fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList);
+ j = 1;
+ while( rc==SQLITE_OK
+ && j<nMerge
+ && apSegment[j]->pOffsetList
+ && apSegment[j]->iDocid==iDocid
+ ){
+ rc = fts3SegReaderNextDocid(p, apSegment[j], 0, 0);
+ j++;
+ }
+ if( rc!=SQLITE_OK ) return rc;
+ fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp);
+
+ if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){
+ rc = fts3MsrBufferData(pMsr, pList, nList+1);
+ if( rc!=SQLITE_OK ) return rc;
+ assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 );
+ pList = pMsr->aBuffer;
+ }
+
+ if( pMsr->iColFilter>=0 ){
+ fts3ColumnFilter(pMsr->iColFilter, 1, &pList, &nList);
+ }
+
+ if( nList>0 ){
+ *paPoslist = pList;
+ *piDocid = iDocid;
+ *pnPoslist = nList;
+ break;
+ }
+ }
+ }
+
+ return SQLITE_OK;
+}
+
+static int fts3SegReaderStart(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3MultiSegReader *pCsr, /* Cursor object */
+ const char *zTerm, /* Term searched for (or NULL) */
+ int nTerm /* Length of zTerm in bytes */
+){
+ int i;
+ int nSeg = pCsr->nSegment;
+
+ /* If the Fts3SegFilter defines a specific term (or term prefix) to search
+ ** for, then advance each segment iterator until it points to a term of
+ ** equal or greater value than the specified term. This prevents many
+ ** unnecessary merge/sort operations for the case where single segment
+ ** b-tree leaf nodes contain more than one term.
+ */
+ for(i=0; pCsr->bRestart==0 && i<pCsr->nSegment; i++){
+ int res = 0;
+ Fts3SegReader *pSeg = pCsr->apSegment[i];
+ do {
+ int rc = fts3SegReaderNext(p, pSeg, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ }while( zTerm && (res = fts3SegReaderTermCmp(pSeg, zTerm, nTerm))<0 );
+
+ if( pSeg->bLookup && res!=0 ){
+ fts3SegReaderSetEof(pSeg);
+ }
+ }
+ fts3SegReaderSort(pCsr->apSegment, nSeg, nSeg, fts3SegReaderCmp);
+
+ return SQLITE_OK;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3SegReaderStart(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3MultiSegReader *pCsr, /* Cursor object */
+ Fts3SegFilter *pFilter /* Restrictions on range of iteration */
+){
+ pCsr->pFilter = pFilter;
+ return fts3SegReaderStart(p, pCsr, pFilter->zTerm, pFilter->nTerm);
+}
+
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3MultiSegReader *pCsr, /* Cursor object */
+ int iCol, /* Column to match on. */
+ const char *zTerm, /* Term to iterate through a doclist for */
+ int nTerm /* Number of bytes in zTerm */
+){
+ int i;
+ int rc;
+ int nSegment = pCsr->nSegment;
+ int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = (
+ p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp
+ );
+
+ assert( pCsr->pFilter==0 );
+ assert( zTerm && nTerm>0 );
+
+ /* Advance each segment iterator until it points to the term zTerm/nTerm. */
+ rc = fts3SegReaderStart(p, pCsr, zTerm, nTerm);
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Determine how many of the segments actually point to zTerm/nTerm. */
+ for(i=0; i<nSegment; i++){
+ Fts3SegReader *pSeg = pCsr->apSegment[i];
+ if( !pSeg->aNode || fts3SegReaderTermCmp(pSeg, zTerm, nTerm) ){
+ break;
+ }
+ }
+ pCsr->nAdvance = i;
+
+ /* Advance each of the segments to point to the first docid. */
+ for(i=0; i<pCsr->nAdvance; i++){
+ rc = fts3SegReaderFirstDocid(p, pCsr->apSegment[i]);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ fts3SegReaderSort(pCsr->apSegment, i, i, xCmp);
+
+ assert( iCol<0 || iCol<p->nColumn );
+ pCsr->iColFilter = iCol;
+
+ return SQLITE_OK;
+}
+
+/*
+** This function is called on a MultiSegReader that has been started using
+** sqlite3Fts3MsrIncrStart(). One or more calls to MsrIncrNext() may also
+** have been made. Calling this function puts the MultiSegReader in such
+** a state that if the next two calls are:
+**
+** sqlite3Fts3SegReaderStart()
+** sqlite3Fts3SegReaderStep()
+**
+** then the entire doclist for the term is available in
+** MultiSegReader.aDoclist/nDoclist.
+*/
+SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){
+ int i; /* Used to iterate through segment-readers */
+
+ assert( pCsr->zTerm==0 );
+ assert( pCsr->nTerm==0 );
+ assert( pCsr->aDoclist==0 );
+ assert( pCsr->nDoclist==0 );
+
+ pCsr->nAdvance = 0;
+ pCsr->bRestart = 1;
+ for(i=0; i<pCsr->nSegment; i++){
+ pCsr->apSegment[i]->pOffsetList = 0;
+ pCsr->apSegment[i]->nOffsetList = 0;
+ pCsr->apSegment[i]->iDocid = 0;
+ }
+
+ return SQLITE_OK;
+}
+
+static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, int nReq){
+ if( nReq>pCsr->nBuffer ){
+ char *aNew;
+ pCsr->nBuffer = nReq*2;
+ aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer);
+ if( !aNew ){
+ return SQLITE_NOMEM;
+ }
+ pCsr->aBuffer = aNew;
+ }
+ return SQLITE_OK;
+}
+
+
+SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
+ Fts3Table *p, /* Virtual table handle */
+ Fts3MultiSegReader *pCsr /* Cursor object */
+){
+ int rc = SQLITE_OK;
+
+ int isIgnoreEmpty = (pCsr->pFilter->flags & FTS3_SEGMENT_IGNORE_EMPTY);
+ int isRequirePos = (pCsr->pFilter->flags & FTS3_SEGMENT_REQUIRE_POS);
+ int isColFilter = (pCsr->pFilter->flags & FTS3_SEGMENT_COLUMN_FILTER);
+ int isPrefix = (pCsr->pFilter->flags & FTS3_SEGMENT_PREFIX);
+ int isScan = (pCsr->pFilter->flags & FTS3_SEGMENT_SCAN);
+ int isFirst = (pCsr->pFilter->flags & FTS3_SEGMENT_FIRST);
+
+ Fts3SegReader **apSegment = pCsr->apSegment;
+ int nSegment = pCsr->nSegment;
+ Fts3SegFilter *pFilter = pCsr->pFilter;
+ int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = (
+ p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp
+ );
+
+ if( pCsr->nSegment==0 ) return SQLITE_OK;
+
+ do {
+ int nMerge;
+ int i;
+
+ /* Advance the first pCsr->nAdvance entries in the apSegment[] array
+ ** forward. Then sort the list in order of current term again.
+ */
+ for(i=0; i<pCsr->nAdvance; i++){
+ Fts3SegReader *pSeg = apSegment[i];
+ if( pSeg->bLookup ){
+ fts3SegReaderSetEof(pSeg);
+ }else{
+ rc = fts3SegReaderNext(p, pSeg, 0);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ fts3SegReaderSort(apSegment, nSegment, pCsr->nAdvance, fts3SegReaderCmp);
+ pCsr->nAdvance = 0;
+
+ /* If all the seg-readers are at EOF, we're finished. return SQLITE_OK. */
+ assert( rc==SQLITE_OK );
+ if( apSegment[0]->aNode==0 ) break;
+
+ pCsr->nTerm = apSegment[0]->nTerm;
+ pCsr->zTerm = apSegment[0]->zTerm;
+
+ /* If this is a prefix-search, and if the term that apSegment[0] points
+ ** to does not share a suffix with pFilter->zTerm/nTerm, then all
+ ** required callbacks have been made. In this case exit early.
+ **
+ ** Similarly, if this is a search for an exact match, and the first term
+ ** of segment apSegment[0] is not a match, exit early.
+ */
+ if( pFilter->zTerm && !isScan ){
+ if( pCsr->nTerm<pFilter->nTerm
+ || (!isPrefix && pCsr->nTerm>pFilter->nTerm)
+ || memcmp(pCsr->zTerm, pFilter->zTerm, pFilter->nTerm)
+ ){
+ break;
+ }
+ }
+
+ nMerge = 1;
+ while( nMerge<nSegment
+ && apSegment[nMerge]->aNode
+ && apSegment[nMerge]->nTerm==pCsr->nTerm
+ && 0==memcmp(pCsr->zTerm, apSegment[nMerge]->zTerm, pCsr->nTerm)
+ ){
+ nMerge++;
+ }
+
+ assert( isIgnoreEmpty || (isRequirePos && !isColFilter) );
+ if( nMerge==1
+ && !isIgnoreEmpty
+ && !isFirst
+ && (p->bDescIdx==0 || fts3SegReaderIsPending(apSegment[0])==0)
+ ){
+ pCsr->nDoclist = apSegment[0]->nDoclist;
+ if( fts3SegReaderIsPending(apSegment[0]) ){
+ rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist);
+ pCsr->aDoclist = pCsr->aBuffer;
+ }else{
+ pCsr->aDoclist = apSegment[0]->aDoclist;
+ }
+ if( rc==SQLITE_OK ) rc = SQLITE_ROW;
+ }else{
+ int nDoclist = 0; /* Size of doclist */
+ sqlite3_int64 iPrev = 0; /* Previous docid stored in doclist */
+
+ /* The current term of the first nMerge entries in the array
+ ** of Fts3SegReader objects is the same. The doclists must be merged
+ ** and a single term returned with the merged doclist.
+ */
+ for(i=0; i<nMerge; i++){
+ fts3SegReaderFirstDocid(p, apSegment[i]);
+ }
+ fts3SegReaderSort(apSegment, nMerge, nMerge, xCmp);
+ while( apSegment[0]->pOffsetList ){
+ int j; /* Number of segments that share a docid */
+ char *pList = 0;
+ int nList = 0;
+ int nByte;
+ sqlite3_int64 iDocid = apSegment[0]->iDocid;
+ fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList);
+ j = 1;
+ while( j<nMerge
+ && apSegment[j]->pOffsetList
+ && apSegment[j]->iDocid==iDocid
+ ){
+ fts3SegReaderNextDocid(p, apSegment[j], 0, 0);
+ j++;
+ }
+
+ if( isColFilter ){
+ fts3ColumnFilter(pFilter->iCol, 0, &pList, &nList);
+ }
+
+ if( !isIgnoreEmpty || nList>0 ){
+
+ /* Calculate the 'docid' delta value to write into the merged
+ ** doclist. */
+ sqlite3_int64 iDelta;
+ if( p->bDescIdx && nDoclist>0 ){
+ if( iPrev<=iDocid ) return FTS_CORRUPT_VTAB;
+ iDelta = (i64)((u64)iPrev - (u64)iDocid);
+ }else{
+ if( nDoclist>0 && iPrev>=iDocid ) return FTS_CORRUPT_VTAB;
+ iDelta = (i64)((u64)iDocid - (u64)iPrev);
+ }
+
+ nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0);
+
+ rc = fts3GrowSegReaderBuffer(pCsr, nByte+nDoclist);
+ if( rc ) return rc;
+
+ if( isFirst ){
+ char *a = &pCsr->aBuffer[nDoclist];
+ int nWrite;
+
+ nWrite = sqlite3Fts3FirstFilter(iDelta, pList, nList, a);
+ if( nWrite ){
+ iPrev = iDocid;
+ nDoclist += nWrite;
+ }
+ }else{
+ nDoclist += sqlite3Fts3PutVarint(&pCsr->aBuffer[nDoclist], iDelta);
+ iPrev = iDocid;
+ if( isRequirePos ){
+ memcpy(&pCsr->aBuffer[nDoclist], pList, nList);
+ nDoclist += nList;
+ pCsr->aBuffer[nDoclist++] = '\0';
+ }
+ }
+ }
+
+ fts3SegReaderSort(apSegment, nMerge, j, xCmp);
+ }
+ if( nDoclist>0 ){
+ rc = fts3GrowSegReaderBuffer(pCsr, nDoclist+FTS3_NODE_PADDING);
+ if( rc ) return rc;
+ memset(&pCsr->aBuffer[nDoclist], 0, FTS3_NODE_PADDING);
+ pCsr->aDoclist = pCsr->aBuffer;
+ pCsr->nDoclist = nDoclist;
+ rc = SQLITE_ROW;
+ }
+ }
+ pCsr->nAdvance = nMerge;
+ }while( rc==SQLITE_OK );
+
+ return rc;
+}
+
+
+SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(
+ Fts3MultiSegReader *pCsr /* Cursor object */
+){
+ if( pCsr ){
+ int i;
+ for(i=0; i<pCsr->nSegment; i++){
+ sqlite3Fts3SegReaderFree(pCsr->apSegment[i]);
+ }
+ sqlite3_free(pCsr->apSegment);
+ sqlite3_free(pCsr->aBuffer);
+
+ pCsr->nSegment = 0;
+ pCsr->apSegment = 0;
+ pCsr->aBuffer = 0;
+ }
+}
+
+/*
+** Decode the "end_block" field, selected by column iCol of the SELECT
+** statement passed as the first argument.
+**
+** The "end_block" field may contain either an integer, or a text field
+** containing the text representation of two non-negative integers separated
+** by one or more space (0x20) characters. In the first case, set *piEndBlock
+** to the integer value and *pnByte to zero before returning. In the second,
+** set *piEndBlock to the first value and *pnByte to the second.
+*/
+static void fts3ReadEndBlockField(
+ sqlite3_stmt *pStmt,
+ int iCol,
+ i64 *piEndBlock,
+ i64 *pnByte
+){
+ const unsigned char *zText = sqlite3_column_text(pStmt, iCol);
+ if( zText ){
+ int i;
+ int iMul = 1;
+ u64 iVal = 0;
+ for(i=0; zText[i]>='0' && zText[i]<='9'; i++){
+ iVal = iVal*10 + (zText[i] - '0');
+ }
+ *piEndBlock = (i64)iVal;
+ while( zText[i]==' ' ) i++;
+ iVal = 0;
+ if( zText[i]=='-' ){
+ i++;
+ iMul = -1;
+ }
+ for(/* no-op */; zText[i]>='0' && zText[i]<='9'; i++){
+ iVal = iVal*10 + (zText[i] - '0');
+ }
+ *pnByte = ((i64)iVal * (i64)iMul);
+ }
+}
+
+
+/*
+** A segment of size nByte bytes has just been written to absolute level
+** iAbsLevel. Promote any segments that should be promoted as a result.
+*/
+static int fts3PromoteSegments(
+ Fts3Table *p, /* FTS table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level just updated */
+ sqlite3_int64 nByte /* Size of new segment at iAbsLevel */
+){
+ int rc = SQLITE_OK;
+ sqlite3_stmt *pRange;
+
+ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE2, &pRange, 0);
+
+ if( rc==SQLITE_OK ){
+ int bOk = 0;
+ i64 iLast = (iAbsLevel/FTS3_SEGDIR_MAXLEVEL + 1) * FTS3_SEGDIR_MAXLEVEL - 1;
+ i64 nLimit = (nByte*3)/2;
+
+ /* Loop through all entries in the %_segdir table corresponding to
+ ** segments in this index on levels greater than iAbsLevel. If there is
+ ** at least one such segment, and it is possible to determine that all
+ ** such segments are smaller than nLimit bytes in size, they will be
+ ** promoted to level iAbsLevel. */
+ sqlite3_bind_int64(pRange, 1, iAbsLevel+1);
+ sqlite3_bind_int64(pRange, 2, iLast);
+ while( SQLITE_ROW==sqlite3_step(pRange) ){
+ i64 nSize = 0, dummy;
+ fts3ReadEndBlockField(pRange, 2, &dummy, &nSize);
+ if( nSize<=0 || nSize>nLimit ){
+ /* If nSize==0, then the %_segdir.end_block field does not not
+ ** contain a size value. This happens if it was written by an
+ ** old version of FTS. In this case it is not possible to determine
+ ** the size of the segment, and so segment promotion does not
+ ** take place. */
+ bOk = 0;
+ break;
+ }
+ bOk = 1;
+ }
+ rc = sqlite3_reset(pRange);
+
+ if( bOk ){
+ int iIdx = 0;
+ sqlite3_stmt *pUpdate1 = 0;
+ sqlite3_stmt *pUpdate2 = 0;
+
+ if( rc==SQLITE_OK ){
+ rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL_IDX, &pUpdate1, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL, &pUpdate2, 0);
+ }
+
+ if( rc==SQLITE_OK ){
+
+ /* Loop through all %_segdir entries for segments in this index with
+ ** levels equal to or greater than iAbsLevel. As each entry is visited,
+ ** updated it to set (level = -1) and (idx = N), where N is 0 for the
+ ** oldest segment in the range, 1 for the next oldest, and so on.
+ **
+ ** In other words, move all segments being promoted to level -1,
+ ** setting the "idx" fields as appropriate to keep them in the same
+ ** order. The contents of level -1 (which is never used, except
+ ** transiently here), will be moved back to level iAbsLevel below. */
+ sqlite3_bind_int64(pRange, 1, iAbsLevel);
+ while( SQLITE_ROW==sqlite3_step(pRange) ){
+ sqlite3_bind_int(pUpdate1, 1, iIdx++);
+ sqlite3_bind_int(pUpdate1, 2, sqlite3_column_int(pRange, 0));
+ sqlite3_bind_int(pUpdate1, 3, sqlite3_column_int(pRange, 1));
+ sqlite3_step(pUpdate1);
+ rc = sqlite3_reset(pUpdate1);
+ if( rc!=SQLITE_OK ){
+ sqlite3_reset(pRange);
+ break;
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_reset(pRange);
+ }
+
+ /* Move level -1 to level iAbsLevel */
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pUpdate2, 1, iAbsLevel);
+ sqlite3_step(pUpdate2);
+ rc = sqlite3_reset(pUpdate2);
+ }
+ }
+ }
+
+
+ return rc;
+}
+
+/*
+** Merge all level iLevel segments in the database into a single
+** iLevel+1 segment. Or, if iLevel<0, merge all segments into a
+** single segment with a level equal to the numerically largest level
+** currently present in the database.
+**
+** If this function is called with iLevel<0, but there is only one
+** segment in the database, SQLITE_DONE is returned immediately.
+** Otherwise, if successful, SQLITE_OK is returned. If an error occurs,
+** an SQLite error code is returned.
+*/
+static int fts3SegmentMerge(
+ Fts3Table *p,
+ int iLangid, /* Language id to merge */
+ int iIndex, /* Index in p->aIndex[] to merge */
+ int iLevel /* Level to merge */
+){
+ int rc; /* Return code */
+ int iIdx = 0; /* Index of new segment */
+ sqlite3_int64 iNewLevel = 0; /* Level/index to create new segment at */
+ SegmentWriter *pWriter = 0; /* Used to write the new, merged, segment */
+ Fts3SegFilter filter; /* Segment term filter condition */
+ Fts3MultiSegReader csr; /* Cursor to iterate through level(s) */
+ int bIgnoreEmpty = 0; /* True to ignore empty segments */
+ i64 iMaxLevel = 0; /* Max level number for this index/langid */
+
+ assert( iLevel==FTS3_SEGCURSOR_ALL
+ || iLevel==FTS3_SEGCURSOR_PENDING
+ || iLevel>=0
+ );
+ assert( iLevel<FTS3_SEGDIR_MAXLEVEL );
+ assert( iIndex>=0 && iIndex<p->nIndex );
+
+ rc = sqlite3Fts3SegReaderCursor(p, iLangid, iIndex, iLevel, 0, 0, 1, 0, &csr);
+ if( rc!=SQLITE_OK || csr.nSegment==0 ) goto finished;
+
+ if( iLevel!=FTS3_SEGCURSOR_PENDING ){
+ rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iMaxLevel);
+ if( rc!=SQLITE_OK ) goto finished;
+ }
+
+ if( iLevel==FTS3_SEGCURSOR_ALL ){
+ /* This call is to merge all segments in the database to a single
+ ** segment. The level of the new segment is equal to the numerically
+ ** greatest segment level currently present in the database for this
+ ** index. The idx of the new segment is always 0. */
+ if( csr.nSegment==1 && 0==fts3SegReaderIsPending(csr.apSegment[0]) ){
+ rc = SQLITE_DONE;
+ goto finished;
+ }
+ iNewLevel = iMaxLevel;
+ bIgnoreEmpty = 1;
+
+ }else{
+ /* This call is to merge all segments at level iLevel. find the next
+ ** available segment index at level iLevel+1. The call to
+ ** fts3AllocateSegdirIdx() will merge the segments at level iLevel+1 to
+ ** a single iLevel+2 segment if necessary. */
+ assert( FTS3_SEGCURSOR_PENDING==-1 );
+ iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1);
+ rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, iLevel+1, &iIdx);
+ bIgnoreEmpty = (iLevel!=FTS3_SEGCURSOR_PENDING) && (iNewLevel>iMaxLevel);
+ }
+ if( rc!=SQLITE_OK ) goto finished;
+
+ assert( csr.nSegment>0 );
+ assert_fts3_nc( iNewLevel>=getAbsoluteLevel(p, iLangid, iIndex, 0) );
+ assert_fts3_nc(
+ iNewLevel<getAbsoluteLevel(p, iLangid, iIndex,FTS3_SEGDIR_MAXLEVEL)
+ );
+
+ memset(&filter, 0, sizeof(Fts3SegFilter));
+ filter.flags = FTS3_SEGMENT_REQUIRE_POS;
+ filter.flags |= (bIgnoreEmpty ? FTS3_SEGMENT_IGNORE_EMPTY : 0);
+
+ rc = sqlite3Fts3SegReaderStart(p, &csr, &filter);
+ while( SQLITE_OK==rc ){
+ rc = sqlite3Fts3SegReaderStep(p, &csr);
+ if( rc!=SQLITE_ROW ) break;
+ rc = fts3SegWriterAdd(p, &pWriter, 1,
+ csr.zTerm, csr.nTerm, csr.aDoclist, csr.nDoclist);
+ }
+ if( rc!=SQLITE_OK ) goto finished;
+ assert_fts3_nc( pWriter || bIgnoreEmpty );
+
+ if( iLevel!=FTS3_SEGCURSOR_PENDING ){
+ rc = fts3DeleteSegdir(
+ p, iLangid, iIndex, iLevel, csr.apSegment, csr.nSegment
+ );
+ if( rc!=SQLITE_OK ) goto finished;
+ }
+ if( pWriter ){
+ rc = fts3SegWriterFlush(p, pWriter, iNewLevel, iIdx);
+ if( rc==SQLITE_OK ){
+ if( iLevel==FTS3_SEGCURSOR_PENDING || iNewLevel<iMaxLevel ){
+ rc = fts3PromoteSegments(p, iNewLevel, pWriter->nLeafData);
+ }
+ }
+ }
+
+ finished:
+ fts3SegWriterFree(pWriter);
+ sqlite3Fts3SegReaderFinish(&csr);
+ return rc;
+}
+
+
+/*
+** Flush the contents of pendingTerms to level 0 segments.
+*/
+SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
+ int rc = SQLITE_OK;
+ int i;
+
+ for(i=0; rc==SQLITE_OK && i<p->nIndex; i++){
+ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ }
+ sqlite3Fts3PendingTermsClear(p);
+
+ /* Determine the auto-incr-merge setting if unknown. If enabled,
+ ** estimate the number of leaf blocks of content to be written
+ */
+ if( rc==SQLITE_OK && p->bHasStat
+ && p->nAutoincrmerge==0xff && p->nLeafAdd>0
+ ){
+ sqlite3_stmt *pStmt = 0;
+ rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE);
+ rc = sqlite3_step(pStmt);
+ if( rc==SQLITE_ROW ){
+ p->nAutoincrmerge = sqlite3_column_int(pStmt, 0);
+ if( p->nAutoincrmerge==1 ) p->nAutoincrmerge = 8;
+ }else if( rc==SQLITE_DONE ){
+ p->nAutoincrmerge = 0;
+ }
+ rc = sqlite3_reset(pStmt);
+ }
+ }
+ return rc;
+}
+
+/*
+** Encode N integers as varints into a blob.
+*/
+static void fts3EncodeIntArray(
+ int N, /* The number of integers to encode */
+ u32 *a, /* The integer values */
+ char *zBuf, /* Write the BLOB here */
+ int *pNBuf /* Write number of bytes if zBuf[] used here */
+){
+ int i, j;
+ for(i=j=0; i<N; i++){
+ j += sqlite3Fts3PutVarint(&zBuf[j], (sqlite3_int64)a[i]);
+ }
+ *pNBuf = j;
+}
+
+/*
+** Decode a blob of varints into N integers
+*/
+static void fts3DecodeIntArray(
+ int N, /* The number of integers to decode */
+ u32 *a, /* Write the integer values */
+ const char *zBuf, /* The BLOB containing the varints */
+ int nBuf /* size of the BLOB */
+){
+ int i = 0;
+ if( nBuf && (zBuf[nBuf-1]&0x80)==0 ){
+ int j;
+ for(i=j=0; i<N && j<nBuf; i++){
+ sqlite3_int64 x;
+ j += sqlite3Fts3GetVarint(&zBuf[j], &x);
+ a[i] = (u32)(x & 0xffffffff);
+ }
+ }
+ while( i<N ) a[i++] = 0;
+}
+
+/*
+** Insert the sizes (in tokens) for each column of the document
+** with docid equal to p->iPrevDocid. The sizes are encoded as
+** a blob of varints.
+*/
+static void fts3InsertDocsize(
+ int *pRC, /* Result code */
+ Fts3Table *p, /* Table into which to insert */
+ u32 *aSz /* Sizes of each column, in tokens */
+){
+ char *pBlob; /* The BLOB encoding of the document size */
+ int nBlob; /* Number of bytes in the BLOB */
+ sqlite3_stmt *pStmt; /* Statement used to insert the encoding */
+ int rc; /* Result code from subfunctions */
+
+ if( *pRC ) return;
+ pBlob = sqlite3_malloc64( 10*(sqlite3_int64)p->nColumn );
+ if( pBlob==0 ){
+ *pRC = SQLITE_NOMEM;
+ return;
+ }
+ fts3EncodeIntArray(p->nColumn, aSz, pBlob, &nBlob);
+ rc = fts3SqlStmt(p, SQL_REPLACE_DOCSIZE, &pStmt, 0);
+ if( rc ){
+ sqlite3_free(pBlob);
+ *pRC = rc;
+ return;
+ }
+ sqlite3_bind_int64(pStmt, 1, p->iPrevDocid);
+ sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, sqlite3_free);
+ sqlite3_step(pStmt);
+ *pRC = sqlite3_reset(pStmt);
+}
+
+/*
+** Record 0 of the %_stat table contains a blob consisting of N varints,
+** where N is the number of user defined columns in the fts3 table plus
+** two. If nCol is the number of user defined columns, then values of the
+** varints are set as follows:
+**
+** Varint 0: Total number of rows in the table.
+**
+** Varint 1..nCol: For each column, the total number of tokens stored in
+** the column for all rows of the table.
+**
+** Varint 1+nCol: The total size, in bytes, of all text values in all
+** columns of all rows of the table.
+**
+*/
+static void fts3UpdateDocTotals(
+ int *pRC, /* The result code */
+ Fts3Table *p, /* Table being updated */
+ u32 *aSzIns, /* Size increases */
+ u32 *aSzDel, /* Size decreases */
+ int nChng /* Change in the number of documents */
+){
+ char *pBlob; /* Storage for BLOB written into %_stat */
+ int nBlob; /* Size of BLOB written into %_stat */
+ u32 *a; /* Array of integers that becomes the BLOB */
+ sqlite3_stmt *pStmt; /* Statement for reading and writing */
+ int i; /* Loop counter */
+ int rc; /* Result code from subfunctions */
+
+ const int nStat = p->nColumn+2;
+
+ if( *pRC ) return;
+ a = sqlite3_malloc64( (sizeof(u32)+10)*(sqlite3_int64)nStat );
+ if( a==0 ){
+ *pRC = SQLITE_NOMEM;
+ return;
+ }
+ pBlob = (char*)&a[nStat];
+ rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0);
+ if( rc ){
+ sqlite3_free(a);
+ *pRC = rc;
+ return;
+ }
+ sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL);
+ if( sqlite3_step(pStmt)==SQLITE_ROW ){
+ fts3DecodeIntArray(nStat, a,
+ sqlite3_column_blob(pStmt, 0),
+ sqlite3_column_bytes(pStmt, 0));
+ }else{
+ memset(a, 0, sizeof(u32)*(nStat) );
+ }
+ rc = sqlite3_reset(pStmt);
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(a);
+ *pRC = rc;
+ return;
+ }
+ if( nChng<0 && a[0]<(u32)(-nChng) ){
+ a[0] = 0;
+ }else{
+ a[0] += nChng;
+ }
+ for(i=0; i<p->nColumn+1; i++){
+ u32 x = a[i+1];
+ if( x+aSzIns[i] < aSzDel[i] ){
+ x = 0;
+ }else{
+ x = x + aSzIns[i] - aSzDel[i];
+ }
+ a[i+1] = x;
+ }
+ fts3EncodeIntArray(nStat, a, pBlob, &nBlob);
+ rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0);
+ if( rc ){
+ sqlite3_free(a);
+ *pRC = rc;
+ return;
+ }
+ sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL);
+ sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC);
+ sqlite3_step(pStmt);
+ *pRC = sqlite3_reset(pStmt);
+ sqlite3_bind_null(pStmt, 2);
+ sqlite3_free(a);
+}
+
+/*
+** Merge the entire database so that there is one segment for each
+** iIndex/iLangid combination.
+*/
+static int fts3DoOptimize(Fts3Table *p, int bReturnDone){
+ int bSeenDone = 0;
+ int rc;
+ sqlite3_stmt *pAllLangid = 0;
+
+ rc = sqlite3Fts3PendingTermsFlush(p);
+ if( rc==SQLITE_OK ){
+ rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0);
+ }
+ if( rc==SQLITE_OK ){
+ int rc2;
+ sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid);
+ sqlite3_bind_int(pAllLangid, 2, p->nIndex);
+ while( sqlite3_step(pAllLangid)==SQLITE_ROW ){
+ int i;
+ int iLangid = sqlite3_column_int(pAllLangid, 0);
+ for(i=0; rc==SQLITE_OK && i<p->nIndex; i++){
+ rc = fts3SegmentMerge(p, iLangid, i, FTS3_SEGCURSOR_ALL);
+ if( rc==SQLITE_DONE ){
+ bSeenDone = 1;
+ rc = SQLITE_OK;
+ }
+ }
+ }
+ rc2 = sqlite3_reset(pAllLangid);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ sqlite3Fts3SegmentsClose(p);
+
+ return (rc==SQLITE_OK && bReturnDone && bSeenDone) ? SQLITE_DONE : rc;
+}
+
+/*
+** This function is called when the user executes the following statement:
+**
+** INSERT INTO <tbl>(<tbl>) VALUES('rebuild');
+**
+** The entire FTS index is discarded and rebuilt. If the table is one
+** created using the content=xxx option, then the new index is based on
+** the current contents of the xxx table. Otherwise, it is rebuilt based
+** on the contents of the %_content table.
+*/
+static int fts3DoRebuild(Fts3Table *p){
+ int rc; /* Return Code */
+
+ rc = fts3DeleteAll(p, 0);
+ if( rc==SQLITE_OK ){
+ u32 *aSz = 0;
+ u32 *aSzIns = 0;
+ u32 *aSzDel = 0;
+ sqlite3_stmt *pStmt = 0;
+ int nEntry = 0;
+
+ /* Compose and prepare an SQL statement to loop through the content table */
+ char *zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
+ sqlite3_free(zSql);
+ }
+
+ if( rc==SQLITE_OK ){
+ sqlite3_int64 nByte = sizeof(u32) * ((sqlite3_int64)p->nColumn+1)*3;
+ aSz = (u32 *)sqlite3_malloc64(nByte);
+ if( aSz==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(aSz, 0, nByte);
+ aSzIns = &aSz[p->nColumn+1];
+ aSzDel = &aSzIns[p->nColumn+1];
+ }
+ }
+
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ int iCol;
+ int iLangid = langidFromSelect(p, pStmt);
+ rc = fts3PendingTermsDocid(p, 0, iLangid, sqlite3_column_int64(pStmt, 0));
+ memset(aSz, 0, sizeof(aSz[0]) * (p->nColumn+1));
+ for(iCol=0; rc==SQLITE_OK && iCol<p->nColumn; iCol++){
+ if( p->abNotindexed[iCol]==0 ){
+ const char *z = (const char *) sqlite3_column_text(pStmt, iCol+1);
+ rc = fts3PendingTermsAdd(p, iLangid, z, iCol, &aSz[iCol]);
+ aSz[p->nColumn] += sqlite3_column_bytes(pStmt, iCol+1);
+ }
+ }
+ if( p->bHasDocsize ){
+ fts3InsertDocsize(&rc, p, aSz);
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3_finalize(pStmt);
+ pStmt = 0;
+ }else{
+ nEntry++;
+ for(iCol=0; iCol<=p->nColumn; iCol++){
+ aSzIns[iCol] += aSz[iCol];
+ }
+ }
+ }
+ if( p->bFts4 ){
+ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nEntry);
+ }
+ sqlite3_free(aSz);
+
+ if( pStmt ){
+ int rc2 = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/*
+** This function opens a cursor used to read the input data for an
+** incremental merge operation. Specifically, it opens a cursor to scan
+** the oldest nSeg segments (idx=0 through idx=(nSeg-1)) in absolute
+** level iAbsLevel.
+*/
+static int fts3IncrmergeCsr(
+ Fts3Table *p, /* FTS3 table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level to open */
+ int nSeg, /* Number of segments to merge */
+ Fts3MultiSegReader *pCsr /* Cursor object to populate */
+){
+ int rc; /* Return Code */
+ sqlite3_stmt *pStmt = 0; /* Statement used to read %_segdir entry */
+ sqlite3_int64 nByte; /* Bytes allocated at pCsr->apSegment[] */
+
+ /* Allocate space for the Fts3MultiSegReader.aCsr[] array */
+ memset(pCsr, 0, sizeof(*pCsr));
+ nByte = sizeof(Fts3SegReader *) * nSeg;
+ pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc64(nByte);
+
+ if( pCsr->apSegment==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pCsr->apSegment, 0, nByte);
+ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0);
+ }
+ if( rc==SQLITE_OK ){
+ int i;
+ int rc2;
+ sqlite3_bind_int64(pStmt, 1, iAbsLevel);
+ assert( pCsr->nSegment==0 );
+ for(i=0; rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW && i<nSeg; i++){
+ rc = sqlite3Fts3SegReaderNew(i, 0,
+ sqlite3_column_int64(pStmt, 1), /* segdir.start_block */
+ sqlite3_column_int64(pStmt, 2), /* segdir.leaves_end_block */
+ sqlite3_column_int64(pStmt, 3), /* segdir.end_block */
+ sqlite3_column_blob(pStmt, 4), /* segdir.root */
+ sqlite3_column_bytes(pStmt, 4), /* segdir.root */
+ &pCsr->apSegment[i]
+ );
+ pCsr->nSegment++;
+ }
+ rc2 = sqlite3_reset(pStmt);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ return rc;
+}
+
+typedef struct IncrmergeWriter IncrmergeWriter;
+typedef struct NodeWriter NodeWriter;
+typedef struct Blob Blob;
+typedef struct NodeReader NodeReader;
+
+/*
+** An instance of the following structure is used as a dynamic buffer
+** to build up nodes or other blobs of data in.
+**
+** The function blobGrowBuffer() is used to extend the allocation.
+*/
+struct Blob {
+ char *a; /* Pointer to allocation */
+ int n; /* Number of valid bytes of data in a[] */
+ int nAlloc; /* Allocated size of a[] (nAlloc>=n) */
+};
+
+/*
+** This structure is used to build up buffers containing segment b-tree
+** nodes (blocks).
+*/
+struct NodeWriter {
+ sqlite3_int64 iBlock; /* Current block id */
+ Blob key; /* Last key written to the current block */
+ Blob block; /* Current block image */
+};
+
+/*
+** An object of this type contains the state required to create or append
+** to an appendable b-tree segment.
+*/
+struct IncrmergeWriter {
+ int nLeafEst; /* Space allocated for leaf blocks */
+ int nWork; /* Number of leaf pages flushed */
+ sqlite3_int64 iAbsLevel; /* Absolute level of input segments */
+ int iIdx; /* Index of *output* segment in iAbsLevel+1 */
+ sqlite3_int64 iStart; /* Block number of first allocated block */
+ sqlite3_int64 iEnd; /* Block number of last allocated block */
+ sqlite3_int64 nLeafData; /* Bytes of leaf page data so far */
+ u8 bNoLeafData; /* If true, store 0 for segment size */
+ NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT];
+};
+
+/*
+** An object of the following type is used to read data from a single
+** FTS segment node. See the following functions:
+**
+** nodeReaderInit()
+** nodeReaderNext()
+** nodeReaderRelease()
+*/
+struct NodeReader {
+ const char *aNode;
+ int nNode;
+ int iOff; /* Current offset within aNode[] */
+
+ /* Output variables. Containing the current node entry. */
+ sqlite3_int64 iChild; /* Pointer to child node */
+ Blob term; /* Current term */
+ const char *aDoclist; /* Pointer to doclist */
+ int nDoclist; /* Size of doclist in bytes */
+};
+
+/*
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op.
+** Otherwise, if the allocation at pBlob->a is not already at least nMin
+** bytes in size, extend (realloc) it to be so.
+**
+** If an OOM error occurs, set *pRc to SQLITE_NOMEM and leave pBlob->a
+** unmodified. Otherwise, if the allocation succeeds, update pBlob->nAlloc
+** to reflect the new size of the pBlob->a[] buffer.
+*/
+static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){
+ if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){
+ int nAlloc = nMin;
+ char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc);
+ if( a ){
+ pBlob->nAlloc = nAlloc;
+ pBlob->a = a;
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+}
+
+/*
+** Attempt to advance the node-reader object passed as the first argument to
+** the next entry on the node.
+**
+** Return an error code if an error occurs (SQLITE_NOMEM is possible).
+** Otherwise return SQLITE_OK. If there is no next entry on the node
+** (e.g. because the current entry is the last) set NodeReader->aNode to
+** NULL to indicate EOF. Otherwise, populate the NodeReader structure output
+** variables for the new entry.
+*/
+static int nodeReaderNext(NodeReader *p){
+ int bFirst = (p->term.n==0); /* True for first term on the node */
+ int nPrefix = 0; /* Bytes to copy from previous term */
+ int nSuffix = 0; /* Bytes to append to the prefix */
+ int rc = SQLITE_OK; /* Return code */
+
+ assert( p->aNode );
+ if( p->iChild && bFirst==0 ) p->iChild++;
+ if( p->iOff>=p->nNode ){
+ /* EOF */
+ p->aNode = 0;
+ }else{
+ if( bFirst==0 ){
+ p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nPrefix);
+ }
+ p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nSuffix);
+
+ if( nPrefix>p->term.n || nSuffix>p->nNode-p->iOff || nSuffix==0 ){
+ return FTS_CORRUPT_VTAB;
+ }
+ blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc);
+ if( rc==SQLITE_OK ){
+ memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix);
+ p->term.n = nPrefix+nSuffix;
+ p->iOff += nSuffix;
+ if( p->iChild==0 ){
+ p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &p->nDoclist);
+ if( (p->nNode-p->iOff)<p->nDoclist ){
+ return FTS_CORRUPT_VTAB;
+ }
+ p->aDoclist = &p->aNode[p->iOff];
+ p->iOff += p->nDoclist;
+ }
+ }
+ }
+
+ assert_fts3_nc( p->iOff<=p->nNode );
+ return rc;
+}
+
+/*
+** Release all dynamic resources held by node-reader object *p.
+*/
+static void nodeReaderRelease(NodeReader *p){
+ sqlite3_free(p->term.a);
+}
+
+/*
+** Initialize a node-reader object to read the node in buffer aNode/nNode.
+**
+** If successful, SQLITE_OK is returned and the NodeReader object set to
+** point to the first entry on the node (if any). Otherwise, an SQLite
+** error code is returned.
+*/
+static int nodeReaderInit(NodeReader *p, const char *aNode, int nNode){
+ memset(p, 0, sizeof(NodeReader));
+ p->aNode = aNode;
+ p->nNode = nNode;
+
+ /* Figure out if this is a leaf or an internal node. */
+ if( aNode && aNode[0] ){
+ /* An internal node. */
+ p->iOff = 1 + sqlite3Fts3GetVarint(&p->aNode[1], &p->iChild);
+ }else{
+ p->iOff = 1;
+ }
+
+ return aNode ? nodeReaderNext(p) : SQLITE_OK;
+}
+
+/*
+** This function is called while writing an FTS segment each time a leaf o
+** node is finished and written to disk. The key (zTerm/nTerm) is guaranteed
+** to be greater than the largest key on the node just written, but smaller
+** than or equal to the first key that will be written to the next leaf
+** node.
+**
+** The block id of the leaf node just written to disk may be found in
+** (pWriter->aNodeWriter[0].iBlock) when this function is called.
+*/
+static int fts3IncrmergePush(
+ Fts3Table *p, /* Fts3 table handle */
+ IncrmergeWriter *pWriter, /* Writer object */
+ const char *zTerm, /* Term to write to internal node */
+ int nTerm /* Bytes at zTerm */
+){
+ sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock;
+ int iLayer;
+
+ assert( nTerm>0 );
+ for(iLayer=1; ALWAYS(iLayer<FTS_MAX_APPENDABLE_HEIGHT); iLayer++){
+ sqlite3_int64 iNextPtr = 0;
+ NodeWriter *pNode = &pWriter->aNodeWriter[iLayer];
+ int rc = SQLITE_OK;
+ int nPrefix;
+ int nSuffix;
+ int nSpace;
+
+ /* Figure out how much space the key will consume if it is written to
+ ** the current node of layer iLayer. Due to the prefix compression,
+ ** the space required changes depending on which node the key is to
+ ** be added to. */
+ nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm);
+ nSuffix = nTerm - nPrefix;
+ if(nSuffix<=0 ) return FTS_CORRUPT_VTAB;
+ nSpace = sqlite3Fts3VarintLen(nPrefix);
+ nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;
+
+ if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){
+ /* If the current node of layer iLayer contains zero keys, or if adding
+ ** the key to it will not cause it to grow to larger than nNodeSize
+ ** bytes in size, write the key here. */
+
+ Blob *pBlk = &pNode->block;
+ if( pBlk->n==0 ){
+ blobGrowBuffer(pBlk, p->nNodeSize, &rc);
+ if( rc==SQLITE_OK ){
+ pBlk->a[0] = (char)iLayer;
+ pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr);
+ }
+ }
+ blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc);
+ blobGrowBuffer(&pNode->key, nTerm, &rc);
+
+ if( rc==SQLITE_OK ){
+ if( pNode->key.n ){
+ pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix);
+ }
+ pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix);
+ memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix);
+ pBlk->n += nSuffix;
+
+ memcpy(pNode->key.a, zTerm, nTerm);
+ pNode->key.n = nTerm;
+ }
+ }else{
+ /* Otherwise, flush the current node of layer iLayer to disk.
+ ** Then allocate a new, empty sibling node. The key will be written
+ ** into the parent of this node. */
+ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n);
+
+ assert( pNode->block.nAlloc>=p->nNodeSize );
+ pNode->block.a[0] = (char)iLayer;
+ pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1);
+
+ iNextPtr = pNode->iBlock;
+ pNode->iBlock++;
+ pNode->key.n = 0;
+ }
+
+ if( rc!=SQLITE_OK || iNextPtr==0 ) return rc;
+ iPtr = iNextPtr;
+ }
+
+ assert( 0 );
+ return 0;
+}
+
+/*
+** Append a term and (optionally) doclist to the FTS segment node currently
+** stored in blob *pNode. The node need not contain any terms, but the
+** header must be written before this function is called.
+**
+** A node header is a single 0x00 byte for a leaf node, or a height varint
+** followed by the left-hand-child varint for an internal node.
+**
+** The term to be appended is passed via arguments zTerm/nTerm. For a
+** leaf node, the doclist is passed as aDoclist/nDoclist. For an internal
+** node, both aDoclist and nDoclist must be passed 0.
+**
+** If the size of the value in blob pPrev is zero, then this is the first
+** term written to the node. Otherwise, pPrev contains a copy of the
+** previous term. Before this function returns, it is updated to contain a
+** copy of zTerm/nTerm.
+**
+** It is assumed that the buffer associated with pNode is already large
+** enough to accommodate the new entry. The buffer associated with pPrev
+** is extended by this function if requrired.
+**
+** If an error (i.e. OOM condition) occurs, an SQLite error code is
+** returned. Otherwise, SQLITE_OK.
+*/
+static int fts3AppendToNode(
+ Blob *pNode, /* Current node image to append to */
+ Blob *pPrev, /* Buffer containing previous term written */
+ const char *zTerm, /* New term to write */
+ int nTerm, /* Size of zTerm in bytes */
+ const char *aDoclist, /* Doclist (or NULL) to write */
+ int nDoclist /* Size of aDoclist in bytes */
+){
+ int rc = SQLITE_OK; /* Return code */
+ int bFirst = (pPrev->n==0); /* True if this is the first term written */
+ int nPrefix; /* Size of term prefix in bytes */
+ int nSuffix; /* Size of term suffix in bytes */
+
+ /* Node must have already been started. There must be a doclist for a
+ ** leaf node, and there must not be a doclist for an internal node. */
+ assert( pNode->n>0 );
+ assert_fts3_nc( (pNode->a[0]=='\0')==(aDoclist!=0) );
+
+ blobGrowBuffer(pPrev, nTerm, &rc);
+ if( rc!=SQLITE_OK ) return rc;
+
+ nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm);
+ nSuffix = nTerm - nPrefix;
+ if( nSuffix<=0 ) return FTS_CORRUPT_VTAB;
+ memcpy(pPrev->a, zTerm, nTerm);
+ pPrev->n = nTerm;
+
+ if( bFirst==0 ){
+ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nPrefix);
+ }
+ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nSuffix);
+ memcpy(&pNode->a[pNode->n], &zTerm[nPrefix], nSuffix);
+ pNode->n += nSuffix;
+
+ if( aDoclist ){
+ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nDoclist);
+ memcpy(&pNode->a[pNode->n], aDoclist, nDoclist);
+ pNode->n += nDoclist;
+ }
+
+ assert( pNode->n<=pNode->nAlloc );
+
+ return SQLITE_OK;
+}
+
+/*
+** Append the current term and doclist pointed to by cursor pCsr to the
+** appendable b-tree segment opened for writing by pWriter.
+**
+** Return SQLITE_OK if successful, or an SQLite error code otherwise.
+*/
+static int fts3IncrmergeAppend(
+ Fts3Table *p, /* Fts3 table handle */
+ IncrmergeWriter *pWriter, /* Writer object */
+ Fts3MultiSegReader *pCsr /* Cursor containing term and doclist */
+){
+ const char *zTerm = pCsr->zTerm;
+ int nTerm = pCsr->nTerm;
+ const char *aDoclist = pCsr->aDoclist;
+ int nDoclist = pCsr->nDoclist;
+ int rc = SQLITE_OK; /* Return code */
+ int nSpace; /* Total space in bytes required on leaf */
+ int nPrefix; /* Size of prefix shared with previous term */
+ int nSuffix; /* Size of suffix (nTerm - nPrefix) */
+ NodeWriter *pLeaf; /* Object used to write leaf nodes */
+
+ pLeaf = &pWriter->aNodeWriter[0];
+ nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm);
+ nSuffix = nTerm - nPrefix;
+
+ nSpace = sqlite3Fts3VarintLen(nPrefix);
+ nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;
+ nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist;
+
+ /* If the current block is not empty, and if adding this term/doclist
+ ** to the current block would make it larger than Fts3Table.nNodeSize
+ ** bytes, write this block out to the database. */
+ if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){
+ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n);
+ pWriter->nWork++;
+
+ /* Add the current term to the parent node. The term added to the
+ ** parent must:
+ **
+ ** a) be greater than the largest term on the leaf node just written
+ ** to the database (still available in pLeaf->key), and
+ **
+ ** b) be less than or equal to the term about to be added to the new
+ ** leaf node (zTerm/nTerm).
+ **
+ ** In other words, it must be the prefix of zTerm 1 byte longer than
+ ** the common prefix (if any) of zTerm and pWriter->zTerm.
+ */
+ if( rc==SQLITE_OK ){
+ rc = fts3IncrmergePush(p, pWriter, zTerm, nPrefix+1);
+ }
+
+ /* Advance to the next output block */
+ pLeaf->iBlock++;
+ pLeaf->key.n = 0;
+ pLeaf->block.n = 0;
+
+ nSuffix = nTerm;
+ nSpace = 1;
+ nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;
+ nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist;
+ }
+
+ pWriter->nLeafData += nSpace;
+ blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc);
+ if( rc==SQLITE_OK ){
+ if( pLeaf->block.n==0 ){
+ pLeaf->block.n = 1;
+ pLeaf->block.a[0] = '\0';
+ }
+ rc = fts3AppendToNode(
+ &pLeaf->block, &pLeaf->key, zTerm, nTerm, aDoclist, nDoclist
+ );
+ }
+
+ return rc;
+}
+
+/*
+** This function is called to release all dynamic resources held by the
+** merge-writer object pWriter, and if no error has occurred, to flush
+** all outstanding node buffers held by pWriter to disk.
+**
+** If *pRc is not SQLITE_OK when this function is called, then no attempt
+** is made to write any data to disk. Instead, this function serves only
+** to release outstanding resources.
+**
+** Otherwise, if *pRc is initially SQLITE_OK and an error occurs while
+** flushing buffers to disk, *pRc is set to an SQLite error code before
+** returning.
+*/
+static void fts3IncrmergeRelease(
+ Fts3Table *p, /* FTS3 table handle */
+ IncrmergeWriter *pWriter, /* Merge-writer object */
+ int *pRc /* IN/OUT: Error code */
+){
+ int i; /* Used to iterate through non-root layers */
+ int iRoot; /* Index of root in pWriter->aNodeWriter */
+ NodeWriter *pRoot; /* NodeWriter for root node */
+ int rc = *pRc; /* Error code */
+
+ /* Set iRoot to the index in pWriter->aNodeWriter[] of the output segment
+ ** root node. If the segment fits entirely on a single leaf node, iRoot
+ ** will be set to 0. If the root node is the parent of the leaves, iRoot
+ ** will be 1. And so on. */
+ for(iRoot=FTS_MAX_APPENDABLE_HEIGHT-1; iRoot>=0; iRoot--){
+ NodeWriter *pNode = &pWriter->aNodeWriter[iRoot];
+ if( pNode->block.n>0 ) break;
+ assert( *pRc || pNode->block.nAlloc==0 );
+ assert( *pRc || pNode->key.nAlloc==0 );
+ sqlite3_free(pNode->block.a);
+ sqlite3_free(pNode->key.a);
+ }
+
+ /* Empty output segment. This is a no-op. */
+ if( iRoot<0 ) return;
+
+ /* The entire output segment fits on a single node. Normally, this means
+ ** the node would be stored as a blob in the "root" column of the %_segdir
+ ** table. However, this is not permitted in this case. The problem is that
+ ** space has already been reserved in the %_segments table, and so the
+ ** start_block and end_block fields of the %_segdir table must be populated.
+ ** And, by design or by accident, released versions of FTS cannot handle
+ ** segments that fit entirely on the root node with start_block!=0.
+ **
+ ** Instead, create a synthetic root node that contains nothing but a
+ ** pointer to the single content node. So that the segment consists of a
+ ** single leaf and a single interior (root) node.
+ **
+ ** Todo: Better might be to defer allocating space in the %_segments
+ ** table until we are sure it is needed.
+ */
+ if( iRoot==0 ){
+ Blob *pBlock = &pWriter->aNodeWriter[1].block;
+ blobGrowBuffer(pBlock, 1 + FTS3_VARINT_MAX, &rc);
+ if( rc==SQLITE_OK ){
+ pBlock->a[0] = 0x01;
+ pBlock->n = 1 + sqlite3Fts3PutVarint(
+ &pBlock->a[1], pWriter->aNodeWriter[0].iBlock
+ );
+ }
+ iRoot = 1;
+ }
+ pRoot = &pWriter->aNodeWriter[iRoot];
+
+ /* Flush all currently outstanding nodes to disk. */
+ for(i=0; i<iRoot; i++){
+ NodeWriter *pNode = &pWriter->aNodeWriter[i];
+ if( pNode->block.n>0 && rc==SQLITE_OK ){
+ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n);
+ }
+ sqlite3_free(pNode->block.a);
+ sqlite3_free(pNode->key.a);
+ }
+
+ /* Write the %_segdir record. */
+ if( rc==SQLITE_OK ){
+ rc = fts3WriteSegdir(p,
+ pWriter->iAbsLevel+1, /* level */
+ pWriter->iIdx, /* idx */
+ pWriter->iStart, /* start_block */
+ pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */
+ pWriter->iEnd, /* end_block */
+ (pWriter->bNoLeafData==0 ? pWriter->nLeafData : 0), /* end_block */
+ pRoot->block.a, pRoot->block.n /* root */
+ );
+ }
+ sqlite3_free(pRoot->block.a);
+ sqlite3_free(pRoot->key.a);
+
+ *pRc = rc;
+}
+
+/*
+** Compare the term in buffer zLhs (size in bytes nLhs) with that in
+** zRhs (size in bytes nRhs) using memcmp. If one term is a prefix of
+** the other, it is considered to be smaller than the other.
+**
+** Return -ve if zLhs is smaller than zRhs, 0 if it is equal, or +ve
+** if it is greater.
+*/
+static int fts3TermCmp(
+ const char *zLhs, int nLhs, /* LHS of comparison */
+ const char *zRhs, int nRhs /* RHS of comparison */
+){
+ int nCmp = MIN(nLhs, nRhs);
+ int res;
+
+ res = (nCmp ? memcmp(zLhs, zRhs, nCmp) : 0);
+ if( res==0 ) res = nLhs - nRhs;
+
+ return res;
+}
+
+
+/*
+** Query to see if the entry in the %_segments table with blockid iEnd is
+** NULL. If no error occurs and the entry is NULL, set *pbRes 1 before
+** returning. Otherwise, set *pbRes to 0.
+**
+** Or, if an error occurs while querying the database, return an SQLite
+** error code. The final value of *pbRes is undefined in this case.
+**
+** This is used to test if a segment is an "appendable" segment. If it
+** is, then a NULL entry has been inserted into the %_segments table
+** with blockid %_segdir.end_block.
+*/
+static int fts3IsAppendable(Fts3Table *p, sqlite3_int64 iEnd, int *pbRes){
+ int bRes = 0; /* Result to set *pbRes to */
+ sqlite3_stmt *pCheck = 0; /* Statement to query database with */
+ int rc; /* Return code */
+
+ rc = fts3SqlStmt(p, SQL_SEGMENT_IS_APPENDABLE, &pCheck, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pCheck, 1, iEnd);
+ if( SQLITE_ROW==sqlite3_step(pCheck) ) bRes = 1;
+ rc = sqlite3_reset(pCheck);
+ }
+
+ *pbRes = bRes;
+ return rc;
+}
+
+/*
+** This function is called when initializing an incremental-merge operation.
+** It checks if the existing segment with index value iIdx at absolute level
+** (iAbsLevel+1) can be appended to by the incremental merge. If it can, the
+** merge-writer object *pWriter is initialized to write to it.
+**
+** An existing segment can be appended to by an incremental merge if:
+**
+** * It was initially created as an appendable segment (with all required
+** space pre-allocated), and
+**
+** * The first key read from the input (arguments zKey and nKey) is
+** greater than the largest key currently stored in the potential
+** output segment.
+*/
+static int fts3IncrmergeLoad(
+ Fts3Table *p, /* Fts3 table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */
+ int iIdx, /* Index of candidate output segment */
+ const char *zKey, /* First key to write */
+ int nKey, /* Number of bytes in nKey */
+ IncrmergeWriter *pWriter /* Populate this object */
+){
+ int rc; /* Return code */
+ sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */
+
+ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */
+ sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */
+ sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */
+ const char *aRoot = 0; /* Pointer to %_segdir.root buffer */
+ int nRoot = 0; /* Size of aRoot[] in bytes */
+ int rc2; /* Return code from sqlite3_reset() */
+ int bAppendable = 0; /* Set to true if segment is appendable */
+
+ /* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */
+ sqlite3_bind_int64(pSelect, 1, iAbsLevel+1);
+ sqlite3_bind_int(pSelect, 2, iIdx);
+ if( sqlite3_step(pSelect)==SQLITE_ROW ){
+ iStart = sqlite3_column_int64(pSelect, 1);
+ iLeafEnd = sqlite3_column_int64(pSelect, 2);
+ fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData);
+ if( pWriter->nLeafData<0 ){
+ pWriter->nLeafData = pWriter->nLeafData * -1;
+ }
+ pWriter->bNoLeafData = (pWriter->nLeafData==0);
+ nRoot = sqlite3_column_bytes(pSelect, 4);
+ aRoot = sqlite3_column_blob(pSelect, 4);
+ if( aRoot==0 ){
+ sqlite3_reset(pSelect);
+ return nRoot ? SQLITE_NOMEM : FTS_CORRUPT_VTAB;
+ }
+ }else{
+ return sqlite3_reset(pSelect);
+ }
+
+ /* Check for the zero-length marker in the %_segments table */
+ rc = fts3IsAppendable(p, iEnd, &bAppendable);
+
+ /* Check that zKey/nKey is larger than the largest key the candidate */
+ if( rc==SQLITE_OK && bAppendable ){
+ char *aLeaf = 0;
+ int nLeaf = 0;
+
+ rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0);
+ if( rc==SQLITE_OK ){
+ NodeReader reader;
+ for(rc = nodeReaderInit(&reader, aLeaf, nLeaf);
+ rc==SQLITE_OK && reader.aNode;
+ rc = nodeReaderNext(&reader)
+ ){
+ assert( reader.aNode );
+ }
+ if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){
+ bAppendable = 0;
+ }
+ nodeReaderRelease(&reader);
+ }
+ sqlite3_free(aLeaf);
+ }
+
+ if( rc==SQLITE_OK && bAppendable ){
+ /* It is possible to append to this segment. Set up the IncrmergeWriter
+ ** object to do so. */
+ int i;
+ int nHeight = (int)aRoot[0];
+ NodeWriter *pNode;
+ if( nHeight<1 || nHeight>=FTS_MAX_APPENDABLE_HEIGHT ){
+ sqlite3_reset(pSelect);
+ return FTS_CORRUPT_VTAB;
+ }
+
+ pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT;
+ pWriter->iStart = iStart;
+ pWriter->iEnd = iEnd;
+ pWriter->iAbsLevel = iAbsLevel;
+ pWriter->iIdx = iIdx;
+
+ for(i=nHeight+1; i<FTS_MAX_APPENDABLE_HEIGHT; i++){
+ pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst;
+ }
+
+ pNode = &pWriter->aNodeWriter[nHeight];
+ pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight;
+ blobGrowBuffer(&pNode->block,
+ MAX(nRoot, p->nNodeSize)+FTS3_NODE_PADDING, &rc
+ );
+ if( rc==SQLITE_OK ){
+ memcpy(pNode->block.a, aRoot, nRoot);
+ pNode->block.n = nRoot;
+ memset(&pNode->block.a[nRoot], 0, FTS3_NODE_PADDING);
+ }
+
+ for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){
+ NodeReader reader;
+ pNode = &pWriter->aNodeWriter[i];
+
+ if( pNode->block.a){
+ rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n);
+ while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader);
+ blobGrowBuffer(&pNode->key, reader.term.n, &rc);
+ if( rc==SQLITE_OK ){
+ memcpy(pNode->key.a, reader.term.a, reader.term.n);
+ pNode->key.n = reader.term.n;
+ if( i>0 ){
+ char *aBlock = 0;
+ int nBlock = 0;
+ pNode = &pWriter->aNodeWriter[i-1];
+ pNode->iBlock = reader.iChild;
+ rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0);
+ blobGrowBuffer(&pNode->block,
+ MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc
+ );
+ if( rc==SQLITE_OK ){
+ memcpy(pNode->block.a, aBlock, nBlock);
+ pNode->block.n = nBlock;
+ memset(&pNode->block.a[nBlock], 0, FTS3_NODE_PADDING);
+ }
+ sqlite3_free(aBlock);
+ }
+ }
+ }
+ nodeReaderRelease(&reader);
+ }
+ }
+
+ rc2 = sqlite3_reset(pSelect);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ return rc;
+}
+
+/*
+** Determine the largest segment index value that exists within absolute
+** level iAbsLevel+1. If no error occurs, set *piIdx to this value plus
+** one before returning SQLITE_OK. Or, if there are no segments at all
+** within level iAbsLevel, set *piIdx to zero.
+**
+** If an error occurs, return an SQLite error code. The final value of
+** *piIdx is undefined in this case.
+*/
+static int fts3IncrmergeOutputIdx(
+ Fts3Table *p, /* FTS Table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute index of input segments */
+ int *piIdx /* OUT: Next free index at iAbsLevel+1 */
+){
+ int rc;
+ sqlite3_stmt *pOutputIdx = 0; /* SQL used to find output index */
+
+ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pOutputIdx, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pOutputIdx, 1, iAbsLevel+1);
+ sqlite3_step(pOutputIdx);
+ *piIdx = sqlite3_column_int(pOutputIdx, 0);
+ rc = sqlite3_reset(pOutputIdx);
+ }
+
+ return rc;
+}
+
+/*
+** Allocate an appendable output segment on absolute level iAbsLevel+1
+** with idx value iIdx.
+**
+** In the %_segdir table, a segment is defined by the values in three
+** columns:
+**
+** start_block
+** leaves_end_block
+** end_block
+**
+** When an appendable segment is allocated, it is estimated that the
+** maximum number of leaf blocks that may be required is the sum of the
+** number of leaf blocks consumed by the input segments, plus the number
+** of input segments, multiplied by two. This value is stored in stack
+** variable nLeafEst.
+**
+** A total of 16*nLeafEst blocks are allocated when an appendable segment
+** is created ((1 + end_block - start_block)==16*nLeafEst). The contiguous
+** array of leaf nodes starts at the first block allocated. The array
+** of interior nodes that are parents of the leaf nodes start at block
+** (start_block + (1 + end_block - start_block) / 16). And so on.
+**
+** In the actual code below, the value "16" is replaced with the
+** pre-processor macro FTS_MAX_APPENDABLE_HEIGHT.
+*/
+static int fts3IncrmergeWriter(
+ Fts3Table *p, /* Fts3 table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */
+ int iIdx, /* Index of new output segment */
+ Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */
+ IncrmergeWriter *pWriter /* Populate this object */
+){
+ int rc; /* Return Code */
+ int i; /* Iterator variable */
+ int nLeafEst = 0; /* Blocks allocated for leaf nodes */
+ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */
+ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */
+
+ /* Calculate nLeafEst. */
+ rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pLeafEst, 1, iAbsLevel);
+ sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment);
+ if( SQLITE_ROW==sqlite3_step(pLeafEst) ){
+ nLeafEst = sqlite3_column_int(pLeafEst, 0);
+ }
+ rc = sqlite3_reset(pLeafEst);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Calculate the first block to use in the output segment */
+ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){
+ pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0);
+ pWriter->iEnd = pWriter->iStart - 1;
+ pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT;
+ }
+ rc = sqlite3_reset(pFirstBlock);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Insert the marker in the %_segments table to make sure nobody tries
+ ** to steal the space just allocated. This is also used to identify
+ ** appendable segments. */
+ rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0);
+ if( rc!=SQLITE_OK ) return rc;
+
+ pWriter->iAbsLevel = iAbsLevel;
+ pWriter->nLeafEst = nLeafEst;
+ pWriter->iIdx = iIdx;
+
+ /* Set up the array of NodeWriter objects */
+ for(i=0; i<FTS_MAX_APPENDABLE_HEIGHT; i++){
+ pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Remove an entry from the %_segdir table. This involves running the
+** following two statements:
+**
+** DELETE FROM %_segdir WHERE level = :iAbsLevel AND idx = :iIdx
+** UPDATE %_segdir SET idx = idx - 1 WHERE level = :iAbsLevel AND idx > :iIdx
+**
+** The DELETE statement removes the specific %_segdir level. The UPDATE
+** statement ensures that the remaining segments have contiguously allocated
+** idx values.
+*/
+static int fts3RemoveSegdirEntry(
+ Fts3Table *p, /* FTS3 table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level to delete from */
+ int iIdx /* Index of %_segdir entry to delete */
+){
+ int rc; /* Return code */
+ sqlite3_stmt *pDelete = 0; /* DELETE statement */
+
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_ENTRY, &pDelete, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDelete, 1, iAbsLevel);
+ sqlite3_bind_int(pDelete, 2, iIdx);
+ sqlite3_step(pDelete);
+ rc = sqlite3_reset(pDelete);
+ }
+
+ return rc;
+}
+
+/*
+** One or more segments have just been removed from absolute level iAbsLevel.
+** Update the 'idx' values of the remaining segments in the level so that
+** the idx values are a contiguous sequence starting from 0.
+*/
+static int fts3RepackSegdirLevel(
+ Fts3Table *p, /* FTS3 table handle */
+ sqlite3_int64 iAbsLevel /* Absolute level to repack */
+){
+ int rc; /* Return code */
+ int *aIdx = 0; /* Array of remaining idx values */
+ int nIdx = 0; /* Valid entries in aIdx[] */
+ int nAlloc = 0; /* Allocated size of aIdx[] */
+ int i; /* Iterator variable */
+ sqlite3_stmt *pSelect = 0; /* Select statement to read idx values */
+ sqlite3_stmt *pUpdate = 0; /* Update statement to modify idx values */
+
+ rc = fts3SqlStmt(p, SQL_SELECT_INDEXES, &pSelect, 0);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ sqlite3_bind_int64(pSelect, 1, iAbsLevel);
+ while( SQLITE_ROW==sqlite3_step(pSelect) ){
+ if( nIdx>=nAlloc ){
+ int *aNew;
+ nAlloc += 16;
+ aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int));
+ if( !aNew ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ aIdx = aNew;
+ }
+ aIdx[nIdx++] = sqlite3_column_int(pSelect, 0);
+ }
+ rc2 = sqlite3_reset(pSelect);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = fts3SqlStmt(p, SQL_SHIFT_SEGDIR_ENTRY, &pUpdate, 0);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pUpdate, 2, iAbsLevel);
+ }
+
+ assert( p->bIgnoreSavepoint==0 );
+ p->bIgnoreSavepoint = 1;
+ for(i=0; rc==SQLITE_OK && i<nIdx; i++){
+ if( aIdx[i]!=i ){
+ sqlite3_bind_int(pUpdate, 3, aIdx[i]);
+ sqlite3_bind_int(pUpdate, 1, i);
+ sqlite3_step(pUpdate);
+ rc = sqlite3_reset(pUpdate);
+ }
+ }
+ p->bIgnoreSavepoint = 0;
+
+ sqlite3_free(aIdx);
+ return rc;
+}
+
+static void fts3StartNode(Blob *pNode, int iHeight, sqlite3_int64 iChild){
+ pNode->a[0] = (char)iHeight;
+ if( iChild ){
+ assert( pNode->nAlloc>=1+sqlite3Fts3VarintLen(iChild) );
+ pNode->n = 1 + sqlite3Fts3PutVarint(&pNode->a[1], iChild);
+ }else{
+ assert( pNode->nAlloc>=1 );
+ pNode->n = 1;
+ }
+}
+
+/*
+** The first two arguments are a pointer to and the size of a segment b-tree
+** node. The node may be a leaf or an internal node.
+**
+** This function creates a new node image in blob object *pNew by copying
+** all terms that are greater than or equal to zTerm/nTerm (for leaf nodes)
+** or greater than zTerm/nTerm (for internal nodes) from aNode/nNode.
+*/
+static int fts3TruncateNode(
+ const char *aNode, /* Current node image */
+ int nNode, /* Size of aNode in bytes */
+ Blob *pNew, /* OUT: Write new node image here */
+ const char *zTerm, /* Omit all terms smaller than this */
+ int nTerm, /* Size of zTerm in bytes */
+ sqlite3_int64 *piBlock /* OUT: Block number in next layer down */
+){
+ NodeReader reader; /* Reader object */
+ Blob prev = {0, 0, 0}; /* Previous term written to new node */
+ int rc = SQLITE_OK; /* Return code */
+ int bLeaf; /* True for a leaf node */
+
+ if( nNode<1 ) return FTS_CORRUPT_VTAB;
+ bLeaf = aNode[0]=='\0';
+
+ /* Allocate required output space */
+ blobGrowBuffer(pNew, nNode, &rc);
+ if( rc!=SQLITE_OK ) return rc;
+ pNew->n = 0;
+
+ /* Populate new node buffer */
+ for(rc = nodeReaderInit(&reader, aNode, nNode);
+ rc==SQLITE_OK && reader.aNode;
+ rc = nodeReaderNext(&reader)
+ ){
+ if( pNew->n==0 ){
+ int res = fts3TermCmp(reader.term.a, reader.term.n, zTerm, nTerm);
+ if( res<0 || (bLeaf==0 && res==0) ) continue;
+ fts3StartNode(pNew, (int)aNode[0], reader.iChild);
+ *piBlock = reader.iChild;
+ }
+ rc = fts3AppendToNode(
+ pNew, &prev, reader.term.a, reader.term.n,
+ reader.aDoclist, reader.nDoclist
+ );
+ if( rc!=SQLITE_OK ) break;
+ }
+ if( pNew->n==0 ){
+ fts3StartNode(pNew, (int)aNode[0], reader.iChild);
+ *piBlock = reader.iChild;
+ }
+ assert( pNew->n<=pNew->nAlloc );
+
+ nodeReaderRelease(&reader);
+ sqlite3_free(prev.a);
+ return rc;
+}
+
+/*
+** Remove all terms smaller than zTerm/nTerm from segment iIdx in absolute
+** level iAbsLevel. This may involve deleting entries from the %_segments
+** table, and modifying existing entries in both the %_segments and %_segdir
+** tables.
+**
+** SQLITE_OK is returned if the segment is updated successfully. Or an
+** SQLite error code otherwise.
+*/
+static int fts3TruncateSegment(
+ Fts3Table *p, /* FTS3 table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level of segment to modify */
+ int iIdx, /* Index within level of segment to modify */
+ const char *zTerm, /* Remove terms smaller than this */
+ int nTerm /* Number of bytes in buffer zTerm */
+){
+ int rc = SQLITE_OK; /* Return code */
+ Blob root = {0,0,0}; /* New root page image */
+ Blob block = {0,0,0}; /* Buffer used for any other block */
+ sqlite3_int64 iBlock = 0; /* Block id */
+ sqlite3_int64 iNewStart = 0; /* New value for iStartBlock */
+ sqlite3_int64 iOldStart = 0; /* Old value for iStartBlock */
+ sqlite3_stmt *pFetch = 0; /* Statement used to fetch segdir */
+
+ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pFetch, 0);
+ if( rc==SQLITE_OK ){
+ int rc2; /* sqlite3_reset() return code */
+ sqlite3_bind_int64(pFetch, 1, iAbsLevel);
+ sqlite3_bind_int(pFetch, 2, iIdx);
+ if( SQLITE_ROW==sqlite3_step(pFetch) ){
+ const char *aRoot = sqlite3_column_blob(pFetch, 4);
+ int nRoot = sqlite3_column_bytes(pFetch, 4);
+ iOldStart = sqlite3_column_int64(pFetch, 1);
+ rc = fts3TruncateNode(aRoot, nRoot, &root, zTerm, nTerm, &iBlock);
+ }
+ rc2 = sqlite3_reset(pFetch);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ while( rc==SQLITE_OK && iBlock ){
+ char *aBlock = 0;
+ int nBlock = 0;
+ iNewStart = iBlock;
+
+ rc = sqlite3Fts3ReadBlock(p, iBlock, &aBlock, &nBlock, 0);
+ if( rc==SQLITE_OK ){
+ rc = fts3TruncateNode(aBlock, nBlock, &block, zTerm, nTerm, &iBlock);
+ }
+ if( rc==SQLITE_OK ){
+ rc = fts3WriteSegment(p, iNewStart, block.a, block.n);
+ }
+ sqlite3_free(aBlock);
+ }
+
+ /* Variable iNewStart now contains the first valid leaf node. */
+ if( rc==SQLITE_OK && iNewStart ){
+ sqlite3_stmt *pDel = 0;
+ rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDel, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDel, 1, iOldStart);
+ sqlite3_bind_int64(pDel, 2, iNewStart-1);
+ sqlite3_step(pDel);
+ rc = sqlite3_reset(pDel);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ sqlite3_stmt *pChomp = 0;
+ rc = fts3SqlStmt(p, SQL_CHOMP_SEGDIR, &pChomp, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pChomp, 1, iNewStart);
+ sqlite3_bind_blob(pChomp, 2, root.a, root.n, SQLITE_STATIC);
+ sqlite3_bind_int64(pChomp, 3, iAbsLevel);
+ sqlite3_bind_int(pChomp, 4, iIdx);
+ sqlite3_step(pChomp);
+ rc = sqlite3_reset(pChomp);
+ sqlite3_bind_null(pChomp, 2);
+ }
+ }
+
+ sqlite3_free(root.a);
+ sqlite3_free(block.a);
+ return rc;
+}
+
+/*
+** This function is called after an incrmental-merge operation has run to
+** merge (or partially merge) two or more segments from absolute level
+** iAbsLevel.
+**
+** Each input segment is either removed from the db completely (if all of
+** its data was copied to the output segment by the incrmerge operation)
+** or modified in place so that it no longer contains those entries that
+** have been duplicated in the output segment.
+*/
+static int fts3IncrmergeChomp(
+ Fts3Table *p, /* FTS table handle */
+ sqlite3_int64 iAbsLevel, /* Absolute level containing segments */
+ Fts3MultiSegReader *pCsr, /* Chomp all segments opened by this cursor */
+ int *pnRem /* Number of segments not deleted */
+){
+ int i;
+ int nRem = 0;
+ int rc = SQLITE_OK;
+
+ for(i=pCsr->nSegment-1; i>=0 && rc==SQLITE_OK; i--){
+ Fts3SegReader *pSeg = 0;
+ int j;
+
+ /* Find the Fts3SegReader object with Fts3SegReader.iIdx==i. It is hiding
+ ** somewhere in the pCsr->apSegment[] array. */
+ for(j=0; ALWAYS(j<pCsr->nSegment); j++){
+ pSeg = pCsr->apSegment[j];
+ if( pSeg->iIdx==i ) break;
+ }
+ assert( j<pCsr->nSegment && pSeg->iIdx==i );
+
+ if( pSeg->aNode==0 ){
+ /* Seg-reader is at EOF. Remove the entire input segment. */
+ rc = fts3DeleteSegment(p, pSeg);
+ if( rc==SQLITE_OK ){
+ rc = fts3RemoveSegdirEntry(p, iAbsLevel, pSeg->iIdx);
+ }
+ *pnRem = 0;
+ }else{
+ /* The incremental merge did not copy all the data from this
+ ** segment to the upper level. The segment is modified in place
+ ** so that it contains no keys smaller than zTerm/nTerm. */
+ const char *zTerm = pSeg->zTerm;
+ int nTerm = pSeg->nTerm;
+ rc = fts3TruncateSegment(p, iAbsLevel, pSeg->iIdx, zTerm, nTerm);
+ nRem++;
+ }
+ }
+
+ if( rc==SQLITE_OK && nRem!=pCsr->nSegment ){
+ rc = fts3RepackSegdirLevel(p, iAbsLevel);
+ }
+
+ *pnRem = nRem;
+ return rc;
+}
+
+/*
+** Store an incr-merge hint in the database.
+*/
+static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){
+ sqlite3_stmt *pReplace = 0;
+ int rc; /* Return code */
+
+ rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pReplace, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int(pReplace, 1, FTS_STAT_INCRMERGEHINT);
+ sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC);
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ sqlite3_bind_null(pReplace, 2);
+ }
+
+ return rc;
+}
+
+/*
+** Load an incr-merge hint from the database. The incr-merge hint, if one
+** exists, is stored in the rowid==1 row of the %_stat table.
+**
+** If successful, populate blob *pHint with the value read from the %_stat
+** table and return SQLITE_OK. Otherwise, if an error occurs, return an
+** SQLite error code.
+*/
+static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){
+ sqlite3_stmt *pSelect = 0;
+ int rc;
+
+ pHint->n = 0;
+ rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pSelect, 0);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ sqlite3_bind_int(pSelect, 1, FTS_STAT_INCRMERGEHINT);
+ if( SQLITE_ROW==sqlite3_step(pSelect) ){
+ const char *aHint = sqlite3_column_blob(pSelect, 0);
+ int nHint = sqlite3_column_bytes(pSelect, 0);
+ if( aHint ){
+ blobGrowBuffer(pHint, nHint, &rc);
+ if( rc==SQLITE_OK ){
+ memcpy(pHint->a, aHint, nHint);
+ pHint->n = nHint;
+ }
+ }
+ }
+ rc2 = sqlite3_reset(pSelect);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ return rc;
+}
+
+/*
+** If *pRc is not SQLITE_OK when this function is called, it is a no-op.
+** Otherwise, append an entry to the hint stored in blob *pHint. Each entry
+** consists of two varints, the absolute level number of the input segments
+** and the number of input segments.
+**
+** If successful, leave *pRc set to SQLITE_OK and return. If an error occurs,
+** set *pRc to an SQLite error code before returning.
+*/
+static void fts3IncrmergeHintPush(
+ Blob *pHint, /* Hint blob to append to */
+ i64 iAbsLevel, /* First varint to store in hint */
+ int nInput, /* Second varint to store in hint */
+ int *pRc /* IN/OUT: Error code */
+){
+ blobGrowBuffer(pHint, pHint->n + 2*FTS3_VARINT_MAX, pRc);
+ if( *pRc==SQLITE_OK ){
+ pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], iAbsLevel);
+ pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], (i64)nInput);
+ }
+}
+
+/*
+** Read the last entry (most recently pushed) from the hint blob *pHint
+** and then remove the entry. Write the two values read to *piAbsLevel and
+** *pnInput before returning.
+**
+** If no error occurs, return SQLITE_OK. If the hint blob in *pHint does
+** not contain at least two valid varints, return SQLITE_CORRUPT_VTAB.
+*/
+static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){
+ const int nHint = pHint->n;
+ int i;
+
+ i = pHint->n-1;
+ if( (pHint->a[i] & 0x80) ) return FTS_CORRUPT_VTAB;
+ while( i>0 && (pHint->a[i-1] & 0x80) ) i--;
+ if( i==0 ) return FTS_CORRUPT_VTAB;
+ i--;
+ while( i>0 && (pHint->a[i-1] & 0x80) ) i--;
+
+ pHint->n = i;
+ i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel);
+ i += fts3GetVarint32(&pHint->a[i], pnInput);
+ assert( i<=nHint );
+ if( i!=nHint ) return FTS_CORRUPT_VTAB;
+
+ return SQLITE_OK;
+}
+
+
+/*
+** Attempt an incremental merge that writes nMerge leaf blocks.
+**
+** Incremental merges happen nMin segments at a time. The segments
+** to be merged are the nMin oldest segments (the ones with the smallest
+** values for the _segdir.idx field) in the highest level that contains
+** at least nMin segments. Multiple merges might occur in an attempt to
+** write the quota of nMerge leaf blocks.
+*/
+SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
+ int rc; /* Return code */
+ int nRem = nMerge; /* Number of leaf pages yet to be written */
+ Fts3MultiSegReader *pCsr; /* Cursor used to read input data */
+ Fts3SegFilter *pFilter; /* Filter used with cursor pCsr */
+ IncrmergeWriter *pWriter; /* Writer object */
+ int nSeg = 0; /* Number of input segments */
+ sqlite3_int64 iAbsLevel = 0; /* Absolute level number to work on */
+ Blob hint = {0, 0, 0}; /* Hint read from %_stat table */
+ int bDirtyHint = 0; /* True if blob 'hint' has been modified */
+
+ /* Allocate space for the cursor, filter and writer objects */
+ const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter);
+ pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc);
+ if( !pWriter ) return SQLITE_NOMEM;
+ pFilter = (Fts3SegFilter *)&pWriter[1];
+ pCsr = (Fts3MultiSegReader *)&pFilter[1];
+
+ rc = fts3IncrmergeHintLoad(p, &hint);
+ while( rc==SQLITE_OK && nRem>0 ){
+ const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex;
+ sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */
+ int bUseHint = 0; /* True if attempting to append */
+ int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */
+
+ /* Search the %_segdir table for the absolute level with the smallest
+ ** relative level number that contains at least nMin segments, if any.
+ ** If one is found, set iAbsLevel to the absolute level number and
+ ** nSeg to nMin. If no level with at least nMin segments can be found,
+ ** set nSeg to -1.
+ */
+ rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0);
+ sqlite3_bind_int(pFindLevel, 1, MAX(2, nMin));
+ if( sqlite3_step(pFindLevel)==SQLITE_ROW ){
+ iAbsLevel = sqlite3_column_int64(pFindLevel, 0);
+ nSeg = sqlite3_column_int(pFindLevel, 1);
+ assert( nSeg>=2 );
+ }else{
+ nSeg = -1;
+ }
+ rc = sqlite3_reset(pFindLevel);
+
+ /* If the hint read from the %_stat table is not empty, check if the
+ ** last entry in it specifies a relative level smaller than or equal
+ ** to the level identified by the block above (if any). If so, this
+ ** iteration of the loop will work on merging at the hinted level.
+ */
+ if( rc==SQLITE_OK && hint.n ){
+ int nHint = hint.n;
+ sqlite3_int64 iHintAbsLevel = 0; /* Hint level */
+ int nHintSeg = 0; /* Hint number of segments */
+
+ rc = fts3IncrmergeHintPop(&hint, &iHintAbsLevel, &nHintSeg);
+ if( nSeg<0 || (iAbsLevel % nMod) >= (iHintAbsLevel % nMod) ){
+ /* Based on the scan in the block above, it is known that there
+ ** are no levels with a relative level smaller than that of
+ ** iAbsLevel with more than nSeg segments, or if nSeg is -1,
+ ** no levels with more than nMin segments. Use this to limit the
+ ** value of nHintSeg to avoid a large memory allocation in case the
+ ** merge-hint is corrupt*/
+ iAbsLevel = iHintAbsLevel;
+ nSeg = MIN(MAX(nMin,nSeg), nHintSeg);
+ bUseHint = 1;
+ bDirtyHint = 1;
+ }else{
+ /* This undoes the effect of the HintPop() above - so that no entry
+ ** is removed from the hint blob. */
+ hint.n = nHint;
+ }
+ }
+
+ /* If nSeg is less that zero, then there is no level with at least
+ ** nMin segments and no hint in the %_stat table. No work to do.
+ ** Exit early in this case. */
+ if( nSeg<=0 ) break;
+
+ assert( nMod<=0x7FFFFFFF );
+ if( iAbsLevel<0 || iAbsLevel>(nMod<<32) ){
+ rc = FTS_CORRUPT_VTAB;
+ break;
+ }
+
+ /* Open a cursor to iterate through the contents of the oldest nSeg
+ ** indexes of absolute level iAbsLevel. If this cursor is opened using
+ ** the 'hint' parameters, it is possible that there are less than nSeg
+ ** segments available in level iAbsLevel. In this case, no work is
+ ** done on iAbsLevel - fall through to the next iteration of the loop
+ ** to start work on some other level. */
+ memset(pWriter, 0, nAlloc);
+ pFilter->flags = FTS3_SEGMENT_REQUIRE_POS;
+
+ if( rc==SQLITE_OK ){
+ rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx);
+ assert( bUseHint==1 || bUseHint==0 );
+ if( iIdx==0 || (bUseHint && iIdx==1) ){
+ int bIgnore = 0;
+ rc = fts3SegmentIsMaxLevel(p, iAbsLevel+1, &bIgnore);
+ if( bIgnore ){
+ pFilter->flags |= FTS3_SEGMENT_IGNORE_EMPTY;
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr);
+ }
+ if( SQLITE_OK==rc && pCsr->nSegment==nSeg
+ && SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter))
+ ){
+ int bEmpty = 0;
+ rc = sqlite3Fts3SegReaderStep(p, pCsr);
+ if( rc==SQLITE_OK ){
+ bEmpty = 1;
+ }else if( rc!=SQLITE_ROW ){
+ sqlite3Fts3SegReaderFinish(pCsr);
+ break;
+ }
+ if( bUseHint && iIdx>0 ){
+ const char *zKey = pCsr->zTerm;
+ int nKey = pCsr->nTerm;
+ rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter);
+ }else{
+ rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter);
+ }
+
+ if( rc==SQLITE_OK && pWriter->nLeafEst ){
+ fts3LogMerge(nSeg, iAbsLevel);
+ if( bEmpty==0 ){
+ do {
+ rc = fts3IncrmergeAppend(p, pWriter, pCsr);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts3SegReaderStep(p, pCsr);
+ if( pWriter->nWork>=nRem && rc==SQLITE_ROW ) rc = SQLITE_OK;
+ }while( rc==SQLITE_ROW );
+ }
+
+ /* Update or delete the input segments */
+ if( rc==SQLITE_OK ){
+ nRem -= (1 + pWriter->nWork);
+ rc = fts3IncrmergeChomp(p, iAbsLevel, pCsr, &nSeg);
+ if( nSeg!=0 ){
+ bDirtyHint = 1;
+ fts3IncrmergeHintPush(&hint, iAbsLevel, nSeg, &rc);
+ }
+ }
+ }
+
+ if( nSeg!=0 ){
+ pWriter->nLeafData = pWriter->nLeafData * -1;
+ }
+ fts3IncrmergeRelease(p, pWriter, &rc);
+ if( nSeg==0 && pWriter->bNoLeafData==0 ){
+ fts3PromoteSegments(p, iAbsLevel+1, pWriter->nLeafData);
+ }
+ }
+
+ sqlite3Fts3SegReaderFinish(pCsr);
+ }
+
+ /* Write the hint values into the %_stat table for the next incr-merger */
+ if( bDirtyHint && rc==SQLITE_OK ){
+ rc = fts3IncrmergeHintStore(p, &hint);
+ }
+
+ sqlite3_free(pWriter);
+ sqlite3_free(hint.a);
+ return rc;
+}
+
+/*
+** Convert the text beginning at *pz into an integer and return
+** its value. Advance *pz to point to the first character past
+** the integer.
+**
+** This function used for parameters to merge= and incrmerge=
+** commands.
+*/
+static int fts3Getint(const char **pz){
+ const char *z = *pz;
+ int i = 0;
+ while( (*z)>='0' && (*z)<='9' && i<214748363 ) i = 10*i + *(z++) - '0';
+ *pz = z;
+ return i;
+}
+
+/*
+** Process statements of the form:
+**
+** INSERT INTO table(table) VALUES('merge=A,B');
+**
+** A and B are integers that decode to be the number of leaf pages
+** written for the merge, and the minimum number of segments on a level
+** before it will be selected for a merge, respectively.
+*/
+static int fts3DoIncrmerge(
+ Fts3Table *p, /* FTS3 table handle */
+ const char *zParam /* Nul-terminated string containing "A,B" */
+){
+ int rc;
+ int nMin = (MergeCount(p) / 2);
+ int nMerge = 0;
+ const char *z = zParam;
+
+ /* Read the first integer value */
+ nMerge = fts3Getint(&z);
+
+ /* If the first integer value is followed by a ',', read the second
+ ** integer value. */
+ if( z[0]==',' && z[1]!='\0' ){
+ z++;
+ nMin = fts3Getint(&z);
+ }
+
+ if( z[0]!='\0' || nMin<2 ){
+ rc = SQLITE_ERROR;
+ }else{
+ rc = SQLITE_OK;
+ if( !p->bHasStat ){
+ assert( p->bFts4==0 );
+ sqlite3Fts3CreateStatTable(&rc, p);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3Incrmerge(p, nMerge, nMin);
+ }
+ sqlite3Fts3SegmentsClose(p);
+ }
+ return rc;
+}
+
+/*
+** Process statements of the form:
+**
+** INSERT INTO table(table) VALUES('automerge=X');
+**
+** where X is an integer. X==0 means to turn automerge off. X!=0 means
+** turn it on. The setting is persistent.
+*/
+static int fts3DoAutoincrmerge(
+ Fts3Table *p, /* FTS3 table handle */
+ const char *zParam /* Nul-terminated string containing boolean */
+){
+ int rc = SQLITE_OK;
+ sqlite3_stmt *pStmt = 0;
+ p->nAutoincrmerge = fts3Getint(&zParam);
+ if( p->nAutoincrmerge==1 || p->nAutoincrmerge>MergeCount(p) ){
+ p->nAutoincrmerge = 8;
+ }
+ if( !p->bHasStat ){
+ assert( p->bFts4==0 );
+ sqlite3Fts3CreateStatTable(&rc, p);
+ if( rc ) return rc;
+ }
+ rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0);
+ if( rc ) return rc;
+ sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE);
+ sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge);
+ sqlite3_step(pStmt);
+ rc = sqlite3_reset(pStmt);
+ return rc;
+}
+
+/*
+** Return a 64-bit checksum for the FTS index entry specified by the
+** arguments to this function.
+*/
+static u64 fts3ChecksumEntry(
+ const char *zTerm, /* Pointer to buffer containing term */
+ int nTerm, /* Size of zTerm in bytes */
+ int iLangid, /* Language id for current row */
+ int iIndex, /* Index (0..Fts3Table.nIndex-1) */
+ i64 iDocid, /* Docid for current row. */
+ int iCol, /* Column number */
+ int iPos /* Position */
+){
+ int i;
+ u64 ret = (u64)iDocid;
+
+ ret += (ret<<3) + iLangid;
+ ret += (ret<<3) + iIndex;
+ ret += (ret<<3) + iCol;
+ ret += (ret<<3) + iPos;
+ for(i=0; i<nTerm; i++) ret += (ret<<3) + zTerm[i];
+
+ return ret;
+}
+
+/*
+** Return a checksum of all entries in the FTS index that correspond to
+** language id iLangid. The checksum is calculated by XORing the checksums
+** of each individual entry (see fts3ChecksumEntry()) together.
+**
+** If successful, the checksum value is returned and *pRc set to SQLITE_OK.
+** Otherwise, if an error occurs, *pRc is set to an SQLite error code. The
+** return value is undefined in this case.
+*/
+static u64 fts3ChecksumIndex(
+ Fts3Table *p, /* FTS3 table handle */
+ int iLangid, /* Language id to return cksum for */
+ int iIndex, /* Index to cksum (0..p->nIndex-1) */
+ int *pRc /* OUT: Return code */
+){
+ Fts3SegFilter filter;
+ Fts3MultiSegReader csr;
+ int rc;
+ u64 cksum = 0;
+
+ assert( *pRc==SQLITE_OK );
+
+ memset(&filter, 0, sizeof(filter));
+ memset(&csr, 0, sizeof(csr));
+ filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY;
+ filter.flags |= FTS3_SEGMENT_SCAN;
+
+ rc = sqlite3Fts3SegReaderCursor(
+ p, iLangid, iIndex, FTS3_SEGCURSOR_ALL, 0, 0, 0, 1,&csr
+ );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3SegReaderStart(p, &csr, &filter);
+ }
+
+ if( rc==SQLITE_OK ){
+ while( SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, &csr)) ){
+ char *pCsr = csr.aDoclist;
+ char *pEnd = &pCsr[csr.nDoclist];
+
+ i64 iDocid = 0;
+ i64 iCol = 0;
+ u64 iPos = 0;
+
+ pCsr += sqlite3Fts3GetVarint(pCsr, &iDocid);
+ while( pCsr<pEnd ){
+ u64 iVal = 0;
+ pCsr += sqlite3Fts3GetVarintU(pCsr, &iVal);
+ if( pCsr<pEnd ){
+ if( iVal==0 || iVal==1 ){
+ iCol = 0;
+ iPos = 0;
+ if( iVal ){
+ pCsr += sqlite3Fts3GetVarint(pCsr, &iCol);
+ }else{
+ pCsr += sqlite3Fts3GetVarintU(pCsr, &iVal);
+ if( p->bDescIdx ){
+ iDocid = (i64)((u64)iDocid - iVal);
+ }else{
+ iDocid = (i64)((u64)iDocid + iVal);
+ }
+ }
+ }else{
+ iPos += (iVal - 2);
+ cksum = cksum ^ fts3ChecksumEntry(
+ csr.zTerm, csr.nTerm, iLangid, iIndex, iDocid,
+ (int)iCol, (int)iPos
+ );
+ }
+ }
+ }
+ }
+ }
+ sqlite3Fts3SegReaderFinish(&csr);
+
+ *pRc = rc;
+ return cksum;
+}
+
+/*
+** Check if the contents of the FTS index match the current contents of the
+** content table. If no error occurs and the contents do match, set *pbOk
+** to true and return SQLITE_OK. Or if the contents do not match, set *pbOk
+** to false before returning.
+**
+** If an error occurs (e.g. an OOM or IO error), return an SQLite error
+** code. The final value of *pbOk is undefined in this case.
+*/
+static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
+ int rc = SQLITE_OK; /* Return code */
+ u64 cksum1 = 0; /* Checksum based on FTS index contents */
+ u64 cksum2 = 0; /* Checksum based on %_content contents */
+ sqlite3_stmt *pAllLangid = 0; /* Statement to return all language-ids */
+
+ /* This block calculates the checksum according to the FTS index. */
+ rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid);
+ sqlite3_bind_int(pAllLangid, 2, p->nIndex);
+ while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){
+ int iLangid = sqlite3_column_int(pAllLangid, 0);
+ int i;
+ for(i=0; i<p->nIndex; i++){
+ cksum1 = cksum1 ^ fts3ChecksumIndex(p, iLangid, i, &rc);
+ }
+ }
+ rc2 = sqlite3_reset(pAllLangid);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ /* This block calculates the checksum according to the %_content table */
+ if( rc==SQLITE_OK ){
+ sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule;
+ sqlite3_stmt *pStmt = 0;
+ char *zSql;
+
+ zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
+ sqlite3_free(zSql);
+ }
+
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ i64 iDocid = sqlite3_column_int64(pStmt, 0);
+ int iLang = langidFromSelect(p, pStmt);
+ int iCol;
+
+ for(iCol=0; rc==SQLITE_OK && iCol<p->nColumn; iCol++){
+ if( p->abNotindexed[iCol]==0 ){
+ const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1);
+ sqlite3_tokenizer_cursor *pT = 0;
+
+ rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, -1, &pT);
+ while( rc==SQLITE_OK ){
+ char const *zToken; /* Buffer containing token */
+ int nToken = 0; /* Number of bytes in token */
+ int iDum1 = 0, iDum2 = 0; /* Dummy variables */
+ int iPos = 0; /* Position of token in zText */
+
+ rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos);
+ if( rc==SQLITE_OK ){
+ int i;
+ cksum2 = cksum2 ^ fts3ChecksumEntry(
+ zToken, nToken, iLang, 0, iDocid, iCol, iPos
+ );
+ for(i=1; i<p->nIndex; i++){
+ if( p->aIndex[i].nPrefix<=nToken ){
+ cksum2 = cksum2 ^ fts3ChecksumEntry(
+ zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos
+ );
+ }
+ }
+ }
+ }
+ if( pT ) pModule->xClose(pT);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ }
+ }
+ }
+
+ sqlite3_finalize(pStmt);
+ }
+
+ *pbOk = (cksum1==cksum2);
+ return rc;
+}
+
+/*
+** Run the integrity-check. If no error occurs and the current contents of
+** the FTS index are correct, return SQLITE_OK. Or, if the contents of the
+** FTS index are incorrect, return SQLITE_CORRUPT_VTAB.
+**
+** Or, if an error (e.g. an OOM or IO error) occurs, return an SQLite
+** error code.
+**
+** The integrity-check works as follows. For each token and indexed token
+** prefix in the document set, a 64-bit checksum is calculated (by code
+** in fts3ChecksumEntry()) based on the following:
+**
+** + The index number (0 for the main index, 1 for the first prefix
+** index etc.),
+** + The token (or token prefix) text itself,
+** + The language-id of the row it appears in,
+** + The docid of the row it appears in,
+** + The column it appears in, and
+** + The tokens position within that column.
+**
+** The checksums for all entries in the index are XORed together to create
+** a single checksum for the entire index.
+**
+** The integrity-check code calculates the same checksum in two ways:
+**
+** 1. By scanning the contents of the FTS index, and
+** 2. By scanning and tokenizing the content table.
+**
+** If the two checksums are identical, the integrity-check is deemed to have
+** passed.
+*/
+static int fts3DoIntegrityCheck(
+ Fts3Table *p /* FTS3 table handle */
+){
+ int rc;
+ int bOk = 0;
+ rc = fts3IntegrityCheck(p, &bOk);
+ if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB;
+ return rc;
+}
+
+/*
+** Handle a 'special' INSERT of the form:
+**
+** "INSERT INTO tbl(tbl) VALUES(<expr>)"
+**
+** Argument pVal contains the result of <expr>. Currently the only
+** meaningful value to insert is the text 'optimize'.
+*/
+static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){
+ int rc = SQLITE_ERROR; /* Return Code */
+ const char *zVal = (const char *)sqlite3_value_text(pVal);
+ int nVal = sqlite3_value_bytes(pVal);
+
+ if( !zVal ){
+ return SQLITE_NOMEM;
+ }else if( nVal==8 && 0==sqlite3_strnicmp(zVal, "optimize", 8) ){
+ rc = fts3DoOptimize(p, 0);
+ }else if( nVal==7 && 0==sqlite3_strnicmp(zVal, "rebuild", 7) ){
+ rc = fts3DoRebuild(p);
+ }else if( nVal==15 && 0==sqlite3_strnicmp(zVal, "integrity-check", 15) ){
+ rc = fts3DoIntegrityCheck(p);
+ }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){
+ rc = fts3DoIncrmerge(p, &zVal[6]);
+ }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){
+ rc = fts3DoAutoincrmerge(p, &zVal[10]);
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+ }else{
+ int v;
+ if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){
+ v = atoi(&zVal[9]);
+ if( v>=24 && v<=p->nPgsz-35 ) p->nNodeSize = v;
+ rc = SQLITE_OK;
+ }else if( nVal>11 && 0==sqlite3_strnicmp(zVal, "maxpending=", 9) ){
+ v = atoi(&zVal[11]);
+ if( v>=64 && v<=FTS3_MAX_PENDING_DATA ) p->nMaxPendingData = v;
+ rc = SQLITE_OK;
+ }else if( nVal>21 && 0==sqlite3_strnicmp(zVal,"test-no-incr-doclist=",21) ){
+ p->bNoIncrDoclist = atoi(&zVal[21]);
+ rc = SQLITE_OK;
+ }else if( nVal>11 && 0==sqlite3_strnicmp(zVal,"mergecount=",11) ){
+ v = atoi(&zVal[11]);
+ if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v;
+ rc = SQLITE_OK;
+ }
+#endif
+ }
+ return rc;
+}
+
+#ifndef SQLITE_DISABLE_FTS4_DEFERRED
+/*
+** Delete all cached deferred doclists. Deferred doclists are cached
+** (allocated) by the sqlite3Fts3CacheDeferredDoclists() function.
+*/
+SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *pCsr){
+ Fts3DeferredToken *pDef;
+ for(pDef=pCsr->pDeferred; pDef; pDef=pDef->pNext){
+ fts3PendingListDelete(pDef->pList);
+ pDef->pList = 0;
+ }
+}
+
+/*
+** Free all entries in the pCsr->pDeffered list. Entries are added to
+** this list using sqlite3Fts3DeferToken().
+*/
+SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *pCsr){
+ Fts3DeferredToken *pDef;
+ Fts3DeferredToken *pNext;
+ for(pDef=pCsr->pDeferred; pDef; pDef=pNext){
+ pNext = pDef->pNext;
+ fts3PendingListDelete(pDef->pList);
+ sqlite3_free(pDef);
+ }
+ pCsr->pDeferred = 0;
+}
+
+/*
+** Generate deferred-doclists for all tokens in the pCsr->pDeferred list
+** based on the row that pCsr currently points to.
+**
+** A deferred-doclist is like any other doclist with position information
+** included, except that it only contains entries for a single row of the
+** table, not for all rows.
+*/
+SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *pCsr){
+ int rc = SQLITE_OK; /* Return code */
+ if( pCsr->pDeferred ){
+ int i; /* Used to iterate through table columns */
+ sqlite3_int64 iDocid; /* Docid of the row pCsr points to */
+ Fts3DeferredToken *pDef; /* Used to iterate through deferred tokens */
+
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
+ sqlite3_tokenizer *pT = p->pTokenizer;
+ sqlite3_tokenizer_module const *pModule = pT->pModule;
+
+ assert( pCsr->isRequireSeek==0 );
+ iDocid = sqlite3_column_int64(pCsr->pStmt, 0);
+
+ for(i=0; i<p->nColumn && rc==SQLITE_OK; i++){
+ if( p->abNotindexed[i]==0 ){
+ const char *zText = (const char *)sqlite3_column_text(pCsr->pStmt, i+1);
+ sqlite3_tokenizer_cursor *pTC = 0;
+
+ rc = sqlite3Fts3OpenTokenizer(pT, pCsr->iLangid, zText, -1, &pTC);
+ while( rc==SQLITE_OK ){
+ char const *zToken; /* Buffer containing token */
+ int nToken = 0; /* Number of bytes in token */
+ int iDum1 = 0, iDum2 = 0; /* Dummy variables */
+ int iPos = 0; /* Position of token in zText */
+
+ rc = pModule->xNext(pTC, &zToken, &nToken, &iDum1, &iDum2, &iPos);
+ for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){
+ Fts3PhraseToken *pPT = pDef->pToken;
+ if( (pDef->iCol>=p->nColumn || pDef->iCol==i)
+ && (pPT->bFirst==0 || iPos==0)
+ && (pPT->n==nToken || (pPT->isPrefix && pPT->n<nToken))
+ && (0==memcmp(zToken, pPT->z, pPT->n))
+ ){
+ fts3PendingListAppend(&pDef->pList, iDocid, i, iPos, &rc);
+ }
+ }
+ }
+ if( pTC ) pModule->xClose(pTC);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ }
+ }
+
+ for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){
+ if( pDef->pList ){
+ rc = fts3PendingListAppendVarint(&pDef->pList, 0);
+ }
+ }
+ }
+
+ return rc;
+}
+
+SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(
+ Fts3DeferredToken *p,
+ char **ppData,
+ int *pnData
+){
+ char *pRet;
+ int nSkip;
+ sqlite3_int64 dummy;
+
+ *ppData = 0;
+ *pnData = 0;
+
+ if( p->pList==0 ){
+ return SQLITE_OK;
+ }
+
+ pRet = (char *)sqlite3_malloc(p->pList->nData);
+ if( !pRet ) return SQLITE_NOMEM;
+
+ nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy);
+ *pnData = p->pList->nData - nSkip;
+ *ppData = pRet;
+
+ memcpy(pRet, &p->pList->aData[nSkip], *pnData);
+ return SQLITE_OK;
+}
+
+/*
+** Add an entry for token pToken to the pCsr->pDeferred list.
+*/
+SQLITE_PRIVATE int sqlite3Fts3DeferToken(
+ Fts3Cursor *pCsr, /* Fts3 table cursor */
+ Fts3PhraseToken *pToken, /* Token to defer */
+ int iCol /* Column that token must appear in (or -1) */
+){
+ Fts3DeferredToken *pDeferred;
+ pDeferred = sqlite3_malloc(sizeof(*pDeferred));
+ if( !pDeferred ){
+ return SQLITE_NOMEM;
+ }
+ memset(pDeferred, 0, sizeof(*pDeferred));
+ pDeferred->pToken = pToken;
+ pDeferred->pNext = pCsr->pDeferred;
+ pDeferred->iCol = iCol;
+ pCsr->pDeferred = pDeferred;
+
+ assert( pToken->pDeferred==0 );
+ pToken->pDeferred = pDeferred;
+
+ return SQLITE_OK;
+}
+#endif
+
+/*
+** SQLite value pRowid contains the rowid of a row that may or may not be
+** present in the FTS3 table. If it is, delete it and adjust the contents
+** of subsiduary data structures accordingly.
+*/
+static int fts3DeleteByRowid(
+ Fts3Table *p,
+ sqlite3_value *pRowid,
+ int *pnChng, /* IN/OUT: Decrement if row is deleted */
+ u32 *aSzDel
+){
+ int rc = SQLITE_OK; /* Return code */
+ int bFound = 0; /* True if *pRowid really is in the table */
+
+ fts3DeleteTerms(&rc, p, pRowid, aSzDel, &bFound);
+ if( bFound && rc==SQLITE_OK ){
+ int isEmpty = 0; /* Deleting *pRowid leaves the table empty */
+ rc = fts3IsEmpty(p, pRowid, &isEmpty);
+ if( rc==SQLITE_OK ){
+ if( isEmpty ){
+ /* Deleting this row means the whole table is empty. In this case
+ ** delete the contents of all three tables and throw away any
+ ** data in the pendingTerms hash table. */
+ rc = fts3DeleteAll(p, 1);
+ *pnChng = 0;
+ memset(aSzDel, 0, sizeof(u32) * (p->nColumn+1) * 2);
+ }else{
+ *pnChng = *pnChng - 1;
+ if( p->zContentTbl==0 ){
+ fts3SqlExec(&rc, p, SQL_DELETE_CONTENT, &pRowid);
+ }
+ if( p->bHasDocsize ){
+ fts3SqlExec(&rc, p, SQL_DELETE_DOCSIZE, &pRowid);
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** This function does the work for the xUpdate method of FTS3 virtual
+** tables. The schema of the virtual table being:
+**
+** CREATE TABLE <table name>(
+** <user columns>,
+** <table name> HIDDEN,
+** docid HIDDEN,
+** <langid> HIDDEN
+** );
+**
+**
+*/
+SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(
+ sqlite3_vtab *pVtab, /* FTS3 vtab object */
+ int nArg, /* Size of argument array */
+ sqlite3_value **apVal, /* Array of arguments */
+ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */
+){
+ Fts3Table *p = (Fts3Table *)pVtab;
+ int rc = SQLITE_OK; /* Return Code */
+ u32 *aSzIns = 0; /* Sizes of inserted documents */
+ u32 *aSzDel = 0; /* Sizes of deleted documents */
+ int nChng = 0; /* Net change in number of documents */
+ int bInsertDone = 0;
+
+ /* At this point it must be known if the %_stat table exists or not.
+ ** So bHasStat may not be 2. */
+ assert( p->bHasStat==0 || p->bHasStat==1 );
+
+ assert( p->pSegments==0 );
+ assert(
+ nArg==1 /* DELETE operations */
+ || nArg==(2 + p->nColumn + 3) /* INSERT or UPDATE operations */
+ );
+
+ /* Check for a "special" INSERT operation. One of the form:
+ **
+ ** INSERT INTO xyz(xyz) VALUES('command');
+ */
+ if( nArg>1
+ && sqlite3_value_type(apVal[0])==SQLITE_NULL
+ && sqlite3_value_type(apVal[p->nColumn+2])!=SQLITE_NULL
+ ){
+ rc = fts3SpecialInsert(p, apVal[p->nColumn+2]);
+ goto update_out;
+ }
+
+ if( nArg>1 && sqlite3_value_int(apVal[2 + p->nColumn + 2])<0 ){
+ rc = SQLITE_CONSTRAINT;
+ goto update_out;
+ }
+
+ /* Allocate space to hold the change in document sizes */
+ aSzDel = sqlite3_malloc64(sizeof(aSzDel[0])*((sqlite3_int64)p->nColumn+1)*2);
+ if( aSzDel==0 ){
+ rc = SQLITE_NOMEM;
+ goto update_out;
+ }
+ aSzIns = &aSzDel[p->nColumn+1];
+ memset(aSzDel, 0, sizeof(aSzDel[0])*(p->nColumn+1)*2);
+
+ rc = fts3Writelock(p);
+ if( rc!=SQLITE_OK ) goto update_out;
+
+ /* If this is an INSERT operation, or an UPDATE that modifies the rowid
+ ** value, then this operation requires constraint handling.
+ **
+ ** If the on-conflict mode is REPLACE, this means that the existing row
+ ** should be deleted from the database before inserting the new row. Or,
+ ** if the on-conflict mode is other than REPLACE, then this method must
+ ** detect the conflict and return SQLITE_CONSTRAINT before beginning to
+ ** modify the database file.
+ */
+ if( nArg>1 && p->zContentTbl==0 ){
+ /* Find the value object that holds the new rowid value. */
+ sqlite3_value *pNewRowid = apVal[3+p->nColumn];
+ if( sqlite3_value_type(pNewRowid)==SQLITE_NULL ){
+ pNewRowid = apVal[1];
+ }
+
+ if( sqlite3_value_type(pNewRowid)!=SQLITE_NULL && (
+ sqlite3_value_type(apVal[0])==SQLITE_NULL
+ || sqlite3_value_int64(apVal[0])!=sqlite3_value_int64(pNewRowid)
+ )){
+ /* The new rowid is not NULL (in this case the rowid will be
+ ** automatically assigned and there is no chance of a conflict), and
+ ** the statement is either an INSERT or an UPDATE that modifies the
+ ** rowid column. So if the conflict mode is REPLACE, then delete any
+ ** existing row with rowid=pNewRowid.
+ **
+ ** Or, if the conflict mode is not REPLACE, insert the new record into
+ ** the %_content table. If we hit the duplicate rowid constraint (or any
+ ** other error) while doing so, return immediately.
+ **
+ ** This branch may also run if pNewRowid contains a value that cannot
+ ** be losslessly converted to an integer. In this case, the eventual
+ ** call to fts3InsertData() (either just below or further on in this
+ ** function) will return SQLITE_MISMATCH. If fts3DeleteByRowid is
+ ** invoked, it will delete zero rows (since no row will have
+ ** docid=$pNewRowid if $pNewRowid is not an integer value).
+ */
+ if( sqlite3_vtab_on_conflict(p->db)==SQLITE_REPLACE ){
+ rc = fts3DeleteByRowid(p, pNewRowid, &nChng, aSzDel);
+ }else{
+ rc = fts3InsertData(p, apVal, pRowid);
+ bInsertDone = 1;
+ }
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ goto update_out;
+ }
+
+ /* If this is a DELETE or UPDATE operation, remove the old record. */
+ if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){
+ assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER );
+ rc = fts3DeleteByRowid(p, apVal[0], &nChng, aSzDel);
+ }
+
+ /* If this is an INSERT or UPDATE operation, insert the new record. */
+ if( nArg>1 && rc==SQLITE_OK ){
+ int iLangid = sqlite3_value_int(apVal[2 + p->nColumn + 2]);
+ if( bInsertDone==0 ){
+ rc = fts3InsertData(p, apVal, pRowid);
+ if( rc==SQLITE_CONSTRAINT && p->zContentTbl==0 ){
+ rc = FTS_CORRUPT_VTAB;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = fts3PendingTermsDocid(p, 0, iLangid, *pRowid);
+ }
+ if( rc==SQLITE_OK ){
+ assert( p->iPrevDocid==*pRowid );
+ rc = fts3InsertTerms(p, iLangid, apVal, aSzIns);
+ }
+ if( p->bHasDocsize ){
+ fts3InsertDocsize(&rc, p, aSzIns);
+ }
+ nChng++;
+ }
+
+ if( p->bFts4 ){
+ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng);
+ }
+
+ update_out:
+ sqlite3_free(aSzDel);
+ sqlite3Fts3SegmentsClose(p);
+ return rc;
+}
+
+/*
+** Flush any data in the pending-terms hash table to disk. If successful,
+** merge all segments in the database (including the new segment, if
+** there was any data to flush) into a single segment.
+*/
+SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){
+ int rc;
+ rc = sqlite3_exec(p->db, "SAVEPOINT fts3", 0, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = fts3DoOptimize(p, 1);
+ if( rc==SQLITE_OK || rc==SQLITE_DONE ){
+ int rc2 = sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0);
+ if( rc2!=SQLITE_OK ) rc = rc2;
+ }else{
+ sqlite3_exec(p->db, "ROLLBACK TO fts3", 0, 0, 0);
+ sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0);
+ }
+ }
+ sqlite3Fts3SegmentsClose(p);
+ return rc;
+}
+
+#endif
+
+/************** End of fts3_write.c ******************************************/
+/************** Begin file fts3_snippet.c ************************************/
+/*
+** 2009 Oct 23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <string.h> */
+/* #include <assert.h> */
+
+/*
+** Characters that may appear in the second argument to matchinfo().
+*/
+#define FTS3_MATCHINFO_NPHRASE 'p' /* 1 value */
+#define FTS3_MATCHINFO_NCOL 'c' /* 1 value */
+#define FTS3_MATCHINFO_NDOC 'n' /* 1 value */
+#define FTS3_MATCHINFO_AVGLENGTH 'a' /* nCol values */
+#define FTS3_MATCHINFO_LENGTH 'l' /* nCol values */
+#define FTS3_MATCHINFO_LCS 's' /* nCol values */
+#define FTS3_MATCHINFO_HITS 'x' /* 3*nCol*nPhrase values */
+#define FTS3_MATCHINFO_LHITS 'y' /* nCol*nPhrase values */
+#define FTS3_MATCHINFO_LHITS_BM 'b' /* nCol*nPhrase values */
+
+/*
+** The default value for the second argument to matchinfo().
+*/
+#define FTS3_MATCHINFO_DEFAULT "pcx"
+
+
+/*
+** Used as an fts3ExprIterate() context when loading phrase doclists to
+** Fts3Expr.aDoclist[]/nDoclist.
+*/
+typedef struct LoadDoclistCtx LoadDoclistCtx;
+struct LoadDoclistCtx {
+ Fts3Cursor *pCsr; /* FTS3 Cursor */
+ int nPhrase; /* Number of phrases seen so far */
+ int nToken; /* Number of tokens seen so far */
+};
+
+/*
+** The following types are used as part of the implementation of the
+** fts3BestSnippet() routine.
+*/
+typedef struct SnippetIter SnippetIter;
+typedef struct SnippetPhrase SnippetPhrase;
+typedef struct SnippetFragment SnippetFragment;
+
+struct SnippetIter {
+ Fts3Cursor *pCsr; /* Cursor snippet is being generated from */
+ int iCol; /* Extract snippet from this column */
+ int nSnippet; /* Requested snippet length (in tokens) */
+ int nPhrase; /* Number of phrases in query */
+ SnippetPhrase *aPhrase; /* Array of size nPhrase */
+ int iCurrent; /* First token of current snippet */
+};
+
+struct SnippetPhrase {
+ int nToken; /* Number of tokens in phrase */
+ char *pList; /* Pointer to start of phrase position list */
+ int iHead; /* Next value in position list */
+ char *pHead; /* Position list data following iHead */
+ int iTail; /* Next value in trailing position list */
+ char *pTail; /* Position list data following iTail */
+};
+
+struct SnippetFragment {
+ int iCol; /* Column snippet is extracted from */
+ int iPos; /* Index of first token in snippet */
+ u64 covered; /* Mask of query phrases covered */
+ u64 hlmask; /* Mask of snippet terms to highlight */
+};
+
+/*
+** This type is used as an fts3ExprIterate() context object while
+** accumulating the data returned by the matchinfo() function.
+*/
+typedef struct MatchInfo MatchInfo;
+struct MatchInfo {
+ Fts3Cursor *pCursor; /* FTS3 Cursor */
+ int nCol; /* Number of columns in table */
+ int nPhrase; /* Number of matchable phrases in query */
+ sqlite3_int64 nDoc; /* Number of docs in database */
+ char flag;
+ u32 *aMatchinfo; /* Pre-allocated buffer */
+};
+
+/*
+** An instance of this structure is used to manage a pair of buffers, each
+** (nElem * sizeof(u32)) bytes in size. See the MatchinfoBuffer code below
+** for details.
+*/
+struct MatchinfoBuffer {
+ u8 aRef[3];
+ int nElem;
+ int bGlobal; /* Set if global data is loaded */
+ char *zMatchinfo;
+ u32 aMatchinfo[1];
+};
+
+
+/*
+** The snippet() and offsets() functions both return text values. An instance
+** of the following structure is used to accumulate those values while the
+** functions are running. See fts3StringAppend() for details.
+*/
+typedef struct StrBuffer StrBuffer;
+struct StrBuffer {
+ char *z; /* Pointer to buffer containing string */
+ int n; /* Length of z in bytes (excl. nul-term) */
+ int nAlloc; /* Allocated size of buffer z in bytes */
+};
+
+
+/*************************************************************************
+** Start of MatchinfoBuffer code.
+*/
+
+/*
+** Allocate a two-slot MatchinfoBuffer object.
+*/
+static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){
+ MatchinfoBuffer *pRet;
+ sqlite3_int64 nByte = sizeof(u32) * (2*(sqlite3_int64)nElem + 1)
+ + sizeof(MatchinfoBuffer);
+ sqlite3_int64 nStr = strlen(zMatchinfo);
+
+ pRet = sqlite3_malloc64(nByte + nStr+1);
+ if( pRet ){
+ memset(pRet, 0, nByte);
+ pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet;
+ pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0]
+ + sizeof(u32)*((int)nElem+1);
+ pRet->nElem = (int)nElem;
+ pRet->zMatchinfo = ((char*)pRet) + nByte;
+ memcpy(pRet->zMatchinfo, zMatchinfo, nStr+1);
+ pRet->aRef[0] = 1;
+ }
+
+ return pRet;
+}
+
+static void fts3MIBufferFree(void *p){
+ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]);
+
+ assert( (u32*)p==&pBuf->aMatchinfo[1]
+ || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2]
+ );
+ if( (u32*)p==&pBuf->aMatchinfo[1] ){
+ pBuf->aRef[1] = 0;
+ }else{
+ pBuf->aRef[2] = 0;
+ }
+
+ if( pBuf->aRef[0]==0 && pBuf->aRef[1]==0 && pBuf->aRef[2]==0 ){
+ sqlite3_free(pBuf);
+ }
+}
+
+static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){
+ void (*xRet)(void*) = 0;
+ u32 *aOut = 0;
+
+ if( p->aRef[1]==0 ){
+ p->aRef[1] = 1;
+ aOut = &p->aMatchinfo[1];
+ xRet = fts3MIBufferFree;
+ }
+ else if( p->aRef[2]==0 ){
+ p->aRef[2] = 1;
+ aOut = &p->aMatchinfo[p->nElem+2];
+ xRet = fts3MIBufferFree;
+ }else{
+ aOut = (u32*)sqlite3_malloc64(p->nElem * sizeof(u32));
+ if( aOut ){
+ xRet = sqlite3_free;
+ if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32));
+ }
+ }
+
+ *paOut = aOut;
+ return xRet;
+}
+
+static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){
+ p->bGlobal = 1;
+ memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32));
+}
+
+/*
+** Free a MatchinfoBuffer object allocated using fts3MIBufferNew()
+*/
+SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p){
+ if( p ){
+ assert( p->aRef[0]==1 );
+ p->aRef[0] = 0;
+ if( p->aRef[0]==0 && p->aRef[1]==0 && p->aRef[2]==0 ){
+ sqlite3_free(p);
+ }
+ }
+}
+
+/*
+** End of MatchinfoBuffer code.
+*************************************************************************/
+
+
+/*
+** This function is used to help iterate through a position-list. A position
+** list is a list of unique integers, sorted from smallest to largest. Each
+** element of the list is represented by an FTS3 varint that takes the value
+** of the difference between the current element and the previous one plus
+** two. For example, to store the position-list:
+**
+** 4 9 113
+**
+** the three varints:
+**
+** 6 7 106
+**
+** are encoded.
+**
+** When this function is called, *pp points to the start of an element of
+** the list. *piPos contains the value of the previous entry in the list.
+** After it returns, *piPos contains the value of the next element of the
+** list and *pp is advanced to the following varint.
+*/
+static void fts3GetDeltaPosition(char **pp, int *piPos){
+ int iVal;
+ *pp += fts3GetVarint32(*pp, &iVal);
+ *piPos += (iVal-2);
+}
+
+/*
+** Helper function for fts3ExprIterate() (see below).
+*/
+static int fts3ExprIterate2(
+ Fts3Expr *pExpr, /* Expression to iterate phrases of */
+ int *piPhrase, /* Pointer to phrase counter */
+ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */
+ void *pCtx /* Second argument to pass to callback */
+){
+ int rc; /* Return code */
+ int eType = pExpr->eType; /* Type of expression node pExpr */
+
+ if( eType!=FTSQUERY_PHRASE ){
+ assert( pExpr->pLeft && pExpr->pRight );
+ rc = fts3ExprIterate2(pExpr->pLeft, piPhrase, x, pCtx);
+ if( rc==SQLITE_OK && eType!=FTSQUERY_NOT ){
+ rc = fts3ExprIterate2(pExpr->pRight, piPhrase, x, pCtx);
+ }
+ }else{
+ rc = x(pExpr, *piPhrase, pCtx);
+ (*piPhrase)++;
+ }
+ return rc;
+}
+
+/*
+** Iterate through all phrase nodes in an FTS3 query, except those that
+** are part of a sub-tree that is the right-hand-side of a NOT operator.
+** For each phrase node found, the supplied callback function is invoked.
+**
+** If the callback function returns anything other than SQLITE_OK,
+** the iteration is abandoned and the error code returned immediately.
+** Otherwise, SQLITE_OK is returned after a callback has been made for
+** all eligible phrase nodes.
+*/
+static int fts3ExprIterate(
+ Fts3Expr *pExpr, /* Expression to iterate phrases of */
+ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */
+ void *pCtx /* Second argument to pass to callback */
+){
+ int iPhrase = 0; /* Variable used as the phrase counter */
+ return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx);
+}
+
+
+/*
+** This is an fts3ExprIterate() callback used while loading the doclists
+** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also
+** fts3ExprLoadDoclists().
+*/
+static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){
+ int rc = SQLITE_OK;
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ LoadDoclistCtx *p = (LoadDoclistCtx *)ctx;
+
+ UNUSED_PARAMETER(iPhrase);
+
+ p->nPhrase++;
+ p->nToken += pPhrase->nToken;
+
+ return rc;
+}
+
+/*
+** Load the doclists for each phrase in the query associated with FTS3 cursor
+** pCsr.
+**
+** If pnPhrase is not NULL, then *pnPhrase is set to the number of matchable
+** phrases in the expression (all phrases except those directly or
+** indirectly descended from the right-hand-side of a NOT operator). If
+** pnToken is not NULL, then it is set to the number of tokens in all
+** matchable phrases of the expression.
+*/
+static int fts3ExprLoadDoclists(
+ Fts3Cursor *pCsr, /* Fts3 cursor for current query */
+ int *pnPhrase, /* OUT: Number of phrases in query */
+ int *pnToken /* OUT: Number of tokens in query */
+){
+ int rc; /* Return Code */
+ LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */
+ sCtx.pCsr = pCsr;
+ rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx);
+ if( pnPhrase ) *pnPhrase = sCtx.nPhrase;
+ if( pnToken ) *pnToken = sCtx.nToken;
+ return rc;
+}
+
+static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){
+ (*(int *)ctx)++;
+ pExpr->iPhrase = iPhrase;
+ return SQLITE_OK;
+}
+static int fts3ExprPhraseCount(Fts3Expr *pExpr){
+ int nPhrase = 0;
+ (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase);
+ return nPhrase;
+}
+
+/*
+** Advance the position list iterator specified by the first two
+** arguments so that it points to the first element with a value greater
+** than or equal to parameter iNext.
+*/
+static void fts3SnippetAdvance(char **ppIter, int *piIter, int iNext){
+ char *pIter = *ppIter;
+ if( pIter ){
+ int iIter = *piIter;
+
+ while( iIter<iNext ){
+ if( 0==(*pIter & 0xFE) ){
+ iIter = -1;
+ pIter = 0;
+ break;
+ }
+ fts3GetDeltaPosition(&pIter, &iIter);
+ }
+
+ *piIter = iIter;
+ *ppIter = pIter;
+ }
+}
+
+/*
+** Advance the snippet iterator to the next candidate snippet.
+*/
+static int fts3SnippetNextCandidate(SnippetIter *pIter){
+ int i; /* Loop counter */
+
+ if( pIter->iCurrent<0 ){
+ /* The SnippetIter object has just been initialized. The first snippet
+ ** candidate always starts at offset 0 (even if this candidate has a
+ ** score of 0.0).
+ */
+ pIter->iCurrent = 0;
+
+ /* Advance the 'head' iterator of each phrase to the first offset that
+ ** is greater than or equal to (iNext+nSnippet).
+ */
+ for(i=0; i<pIter->nPhrase; i++){
+ SnippetPhrase *pPhrase = &pIter->aPhrase[i];
+ fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, pIter->nSnippet);
+ }
+ }else{
+ int iStart;
+ int iEnd = 0x7FFFFFFF;
+
+ for(i=0; i<pIter->nPhrase; i++){
+ SnippetPhrase *pPhrase = &pIter->aPhrase[i];
+ if( pPhrase->pHead && pPhrase->iHead<iEnd ){
+ iEnd = pPhrase->iHead;
+ }
+ }
+ if( iEnd==0x7FFFFFFF ){
+ return 1;
+ }
+
+ pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1;
+ for(i=0; i<pIter->nPhrase; i++){
+ SnippetPhrase *pPhrase = &pIter->aPhrase[i];
+ fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, iEnd+1);
+ fts3SnippetAdvance(&pPhrase->pTail, &pPhrase->iTail, iStart);
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Retrieve information about the current candidate snippet of snippet
+** iterator pIter.
+*/
+static void fts3SnippetDetails(
+ SnippetIter *pIter, /* Snippet iterator */
+ u64 mCovered, /* Bitmask of phrases already covered */
+ int *piToken, /* OUT: First token of proposed snippet */
+ int *piScore, /* OUT: "Score" for this snippet */
+ u64 *pmCover, /* OUT: Bitmask of phrases covered */
+ u64 *pmHighlight /* OUT: Bitmask of terms to highlight */
+){
+ int iStart = pIter->iCurrent; /* First token of snippet */
+ int iScore = 0; /* Score of this snippet */
+ int i; /* Loop counter */
+ u64 mCover = 0; /* Mask of phrases covered by this snippet */
+ u64 mHighlight = 0; /* Mask of tokens to highlight in snippet */
+
+ for(i=0; i<pIter->nPhrase; i++){
+ SnippetPhrase *pPhrase = &pIter->aPhrase[i];
+ if( pPhrase->pTail ){
+ char *pCsr = pPhrase->pTail;
+ int iCsr = pPhrase->iTail;
+
+ while( iCsr<(iStart+pIter->nSnippet) && iCsr>=iStart ){
+ int j;
+ u64 mPhrase = (u64)1 << (i%64);
+ u64 mPos = (u64)1 << (iCsr - iStart);
+ assert( iCsr>=iStart && (iCsr - iStart)<=64 );
+ assert( i>=0 );
+ if( (mCover|mCovered)&mPhrase ){
+ iScore++;
+ }else{
+ iScore += 1000;
+ }
+ mCover |= mPhrase;
+
+ for(j=0; j<pPhrase->nToken; j++){
+ mHighlight |= (mPos>>j);
+ }
+
+ if( 0==(*pCsr & 0x0FE) ) break;
+ fts3GetDeltaPosition(&pCsr, &iCsr);
+ }
+ }
+ }
+
+ /* Set the output variables before returning. */
+ *piToken = iStart;
+ *piScore = iScore;
+ *pmCover = mCover;
+ *pmHighlight = mHighlight;
+}
+
+/*
+** This function is an fts3ExprIterate() callback used by fts3BestSnippet().
+** Each invocation populates an element of the SnippetIter.aPhrase[] array.
+*/
+static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){
+ SnippetIter *p = (SnippetIter *)ctx;
+ SnippetPhrase *pPhrase = &p->aPhrase[iPhrase];
+ char *pCsr;
+ int rc;
+
+ pPhrase->nToken = pExpr->pPhrase->nToken;
+ rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pCsr);
+ assert( rc==SQLITE_OK || pCsr==0 );
+ if( pCsr ){
+ int iFirst = 0;
+ pPhrase->pList = pCsr;
+ fts3GetDeltaPosition(&pCsr, &iFirst);
+ if( iFirst<0 ){
+ rc = FTS_CORRUPT_VTAB;
+ }else{
+ pPhrase->pHead = pCsr;
+ pPhrase->pTail = pCsr;
+ pPhrase->iHead = iFirst;
+ pPhrase->iTail = iFirst;
+ }
+ }else{
+ assert( rc!=SQLITE_OK || (
+ pPhrase->pList==0 && pPhrase->pHead==0 && pPhrase->pTail==0
+ ));
+ }
+
+ return rc;
+}
+
+/*
+** Select the fragment of text consisting of nFragment contiguous tokens
+** from column iCol that represent the "best" snippet. The best snippet
+** is the snippet with the highest score, where scores are calculated
+** by adding:
+**
+** (a) +1 point for each occurrence of a matchable phrase in the snippet.
+**
+** (b) +1000 points for the first occurrence of each matchable phrase in
+** the snippet for which the corresponding mCovered bit is not set.
+**
+** The selected snippet parameters are stored in structure *pFragment before
+** returning. The score of the selected snippet is stored in *piScore
+** before returning.
+*/
+static int fts3BestSnippet(
+ int nSnippet, /* Desired snippet length */
+ Fts3Cursor *pCsr, /* Cursor to create snippet for */
+ int iCol, /* Index of column to create snippet from */
+ u64 mCovered, /* Mask of phrases already covered */
+ u64 *pmSeen, /* IN/OUT: Mask of phrases seen */
+ SnippetFragment *pFragment, /* OUT: Best snippet found */
+ int *piScore /* OUT: Score of snippet pFragment */
+){
+ int rc; /* Return Code */
+ int nList; /* Number of phrases in expression */
+ SnippetIter sIter; /* Iterates through snippet candidates */
+ sqlite3_int64 nByte; /* Number of bytes of space to allocate */
+ int iBestScore = -1; /* Best snippet score found so far */
+ int i; /* Loop counter */
+
+ memset(&sIter, 0, sizeof(sIter));
+
+ /* Iterate through the phrases in the expression to count them. The same
+ ** callback makes sure the doclists are loaded for each phrase.
+ */
+ rc = fts3ExprLoadDoclists(pCsr, &nList, 0);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Now that it is known how many phrases there are, allocate and zero
+ ** the required space using malloc().
+ */
+ nByte = sizeof(SnippetPhrase) * nList;
+ sIter.aPhrase = (SnippetPhrase *)sqlite3_malloc64(nByte);
+ if( !sIter.aPhrase ){
+ return SQLITE_NOMEM;
+ }
+ memset(sIter.aPhrase, 0, nByte);
+
+ /* Initialize the contents of the SnippetIter object. Then iterate through
+ ** the set of phrases in the expression to populate the aPhrase[] array.
+ */
+ sIter.pCsr = pCsr;
+ sIter.iCol = iCol;
+ sIter.nSnippet = nSnippet;
+ sIter.nPhrase = nList;
+ sIter.iCurrent = -1;
+ rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter);
+ if( rc==SQLITE_OK ){
+
+ /* Set the *pmSeen output variable. */
+ for(i=0; i<nList; i++){
+ if( sIter.aPhrase[i].pHead ){
+ *pmSeen |= (u64)1 << (i%64);
+ }
+ }
+
+ /* Loop through all candidate snippets. Store the best snippet in
+ ** *pFragment. Store its associated 'score' in iBestScore.
+ */
+ pFragment->iCol = iCol;
+ while( !fts3SnippetNextCandidate(&sIter) ){
+ int iPos;
+ int iScore;
+ u64 mCover;
+ u64 mHighlite;
+ fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover,&mHighlite);
+ assert( iScore>=0 );
+ if( iScore>iBestScore ){
+ pFragment->iPos = iPos;
+ pFragment->hlmask = mHighlite;
+ pFragment->covered = mCover;
+ iBestScore = iScore;
+ }
+ }
+
+ *piScore = iBestScore;
+ }
+ sqlite3_free(sIter.aPhrase);
+ return rc;
+}
+
+
+/*
+** Append a string to the string-buffer passed as the first argument.
+**
+** If nAppend is negative, then the length of the string zAppend is
+** determined using strlen().
+*/
+static int fts3StringAppend(
+ StrBuffer *pStr, /* Buffer to append to */
+ const char *zAppend, /* Pointer to data to append to buffer */
+ int nAppend /* Size of zAppend in bytes (or -1) */
+){
+ if( nAppend<0 ){
+ nAppend = (int)strlen(zAppend);
+ }
+
+ /* If there is insufficient space allocated at StrBuffer.z, use realloc()
+ ** to grow the buffer until so that it is big enough to accomadate the
+ ** appended data.
+ */
+ if( pStr->n+nAppend+1>=pStr->nAlloc ){
+ sqlite3_int64 nAlloc = pStr->nAlloc+(sqlite3_int64)nAppend+100;
+ char *zNew = sqlite3_realloc64(pStr->z, nAlloc);
+ if( !zNew ){
+ return SQLITE_NOMEM;
+ }
+ pStr->z = zNew;
+ pStr->nAlloc = nAlloc;
+ }
+ assert( pStr->z!=0 && (pStr->nAlloc >= pStr->n+nAppend+1) );
+
+ /* Append the data to the string buffer. */
+ memcpy(&pStr->z[pStr->n], zAppend, nAppend);
+ pStr->n += nAppend;
+ pStr->z[pStr->n] = '\0';
+
+ return SQLITE_OK;
+}
+
+/*
+** The fts3BestSnippet() function often selects snippets that end with a
+** query term. That is, the final term of the snippet is always a term
+** that requires highlighting. For example, if 'X' is a highlighted term
+** and '.' is a non-highlighted term, BestSnippet() may select:
+**
+** ........X.....X
+**
+** This function "shifts" the beginning of the snippet forward in the
+** document so that there are approximately the same number of
+** non-highlighted terms to the right of the final highlighted term as there
+** are to the left of the first highlighted term. For example, to this:
+**
+** ....X.....X....
+**
+** This is done as part of extracting the snippet text, not when selecting
+** the snippet. Snippet selection is done based on doclists only, so there
+** is no way for fts3BestSnippet() to know whether or not the document
+** actually contains terms that follow the final highlighted term.
+*/
+static int fts3SnippetShift(
+ Fts3Table *pTab, /* FTS3 table snippet comes from */
+ int iLangid, /* Language id to use in tokenizing */
+ int nSnippet, /* Number of tokens desired for snippet */
+ const char *zDoc, /* Document text to extract snippet from */
+ int nDoc, /* Size of buffer zDoc in bytes */
+ int *piPos, /* IN/OUT: First token of snippet */
+ u64 *pHlmask /* IN/OUT: Mask of tokens to highlight */
+){
+ u64 hlmask = *pHlmask; /* Local copy of initial highlight-mask */
+
+ if( hlmask ){
+ int nLeft; /* Tokens to the left of first highlight */
+ int nRight; /* Tokens to the right of last highlight */
+ int nDesired; /* Ideal number of tokens to shift forward */
+
+ for(nLeft=0; !(hlmask & ((u64)1 << nLeft)); nLeft++);
+ for(nRight=0; !(hlmask & ((u64)1 << (nSnippet-1-nRight))); nRight++);
+ assert( (nSnippet-1-nRight)<=63 && (nSnippet-1-nRight)>=0 );
+ nDesired = (nLeft-nRight)/2;
+
+ /* Ideally, the start of the snippet should be pushed forward in the
+ ** document nDesired tokens. This block checks if there are actually
+ ** nDesired tokens to the right of the snippet. If so, *piPos and
+ ** *pHlMask are updated to shift the snippet nDesired tokens to the
+ ** right. Otherwise, the snippet is shifted by the number of tokens
+ ** available.
+ */
+ if( nDesired>0 ){
+ int nShift; /* Number of tokens to shift snippet by */
+ int iCurrent = 0; /* Token counter */
+ int rc; /* Return Code */
+ sqlite3_tokenizer_module *pMod;
+ sqlite3_tokenizer_cursor *pC;
+ pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule;
+
+ /* Open a cursor on zDoc/nDoc. Check if there are (nSnippet+nDesired)
+ ** or more tokens in zDoc/nDoc.
+ */
+ rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, iLangid, zDoc, nDoc, &pC);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ while( rc==SQLITE_OK && iCurrent<(nSnippet+nDesired) ){
+ const char *ZDUMMY; int DUMMY1 = 0, DUMMY2 = 0, DUMMY3 = 0;
+ rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &DUMMY2, &DUMMY3, &iCurrent);
+ }
+ pMod->xClose(pC);
+ if( rc!=SQLITE_OK && rc!=SQLITE_DONE ){ return rc; }
+
+ nShift = (rc==SQLITE_DONE)+iCurrent-nSnippet;
+ assert( nShift<=nDesired );
+ if( nShift>0 ){
+ *piPos += nShift;
+ *pHlmask = hlmask >> nShift;
+ }
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Extract the snippet text for fragment pFragment from cursor pCsr and
+** append it to string buffer pOut.
+*/
+static int fts3SnippetText(
+ Fts3Cursor *pCsr, /* FTS3 Cursor */
+ SnippetFragment *pFragment, /* Snippet to extract */
+ int iFragment, /* Fragment number */
+ int isLast, /* True for final fragment in snippet */
+ int nSnippet, /* Number of tokens in extracted snippet */
+ const char *zOpen, /* String inserted before highlighted term */
+ const char *zClose, /* String inserted after highlighted term */
+ const char *zEllipsis, /* String inserted between snippets */
+ StrBuffer *pOut /* Write output here */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc; /* Return code */
+ const char *zDoc; /* Document text to extract snippet from */
+ int nDoc; /* Size of zDoc in bytes */
+ int iCurrent = 0; /* Current token number of document */
+ int iEnd = 0; /* Byte offset of end of current token */
+ int isShiftDone = 0; /* True after snippet is shifted */
+ int iPos = pFragment->iPos; /* First token of snippet */
+ u64 hlmask = pFragment->hlmask; /* Highlight-mask for snippet */
+ int iCol = pFragment->iCol+1; /* Query column to extract text from */
+ sqlite3_tokenizer_module *pMod; /* Tokenizer module methods object */
+ sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor open on zDoc/nDoc */
+
+ zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol);
+ if( zDoc==0 ){
+ if( sqlite3_column_type(pCsr->pStmt, iCol)!=SQLITE_NULL ){
+ return SQLITE_NOMEM;
+ }
+ return SQLITE_OK;
+ }
+ nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol);
+
+ /* Open a token cursor on the document. */
+ pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule;
+ rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, zDoc,nDoc,&pC);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ while( rc==SQLITE_OK ){
+ const char *ZDUMMY; /* Dummy argument used with tokenizer */
+ int DUMMY1 = -1; /* Dummy argument used with tokenizer */
+ int iBegin = 0; /* Offset in zDoc of start of token */
+ int iFin = 0; /* Offset in zDoc of end of token */
+ int isHighlight = 0; /* True for highlighted terms */
+
+ /* Variable DUMMY1 is initialized to a negative value above. Elsewhere
+ ** in the FTS code the variable that the third argument to xNext points to
+ ** is initialized to zero before the first (*but not necessarily
+ ** subsequent*) call to xNext(). This is done for a particular application
+ ** that needs to know whether or not the tokenizer is being used for
+ ** snippet generation or for some other purpose.
+ **
+ ** Extreme care is required when writing code to depend on this
+ ** initialization. It is not a documented part of the tokenizer interface.
+ ** If a tokenizer is used directly by any code outside of FTS, this
+ ** convention might not be respected. */
+ rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &iBegin, &iFin, &iCurrent);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ){
+ /* Special case - the last token of the snippet is also the last token
+ ** of the column. Append any punctuation that occurred between the end
+ ** of the previous token and the end of the document to the output.
+ ** Then break out of the loop. */
+ rc = fts3StringAppend(pOut, &zDoc[iEnd], -1);
+ }
+ break;
+ }
+ if( iCurrent<iPos ){ continue; }
+
+ if( !isShiftDone ){
+ int n = nDoc - iBegin;
+ rc = fts3SnippetShift(
+ pTab, pCsr->iLangid, nSnippet, &zDoc[iBegin], n, &iPos, &hlmask
+ );
+ isShiftDone = 1;
+
+ /* Now that the shift has been done, check if the initial "..." are
+ ** required. They are required if (a) this is not the first fragment,
+ ** or (b) this fragment does not begin at position 0 of its column.
+ */
+ if( rc==SQLITE_OK ){
+ if( iPos>0 || iFragment>0 ){
+ rc = fts3StringAppend(pOut, zEllipsis, -1);
+ }else if( iBegin ){
+ rc = fts3StringAppend(pOut, zDoc, iBegin);
+ }
+ }
+ if( rc!=SQLITE_OK || iCurrent<iPos ) continue;
+ }
+
+ if( iCurrent>=(iPos+nSnippet) ){
+ if( isLast ){
+ rc = fts3StringAppend(pOut, zEllipsis, -1);
+ }
+ break;
+ }
+
+ /* Set isHighlight to true if this term should be highlighted. */
+ isHighlight = (hlmask & ((u64)1 << (iCurrent-iPos)))!=0;
+
+ if( iCurrent>iPos ) rc = fts3StringAppend(pOut, &zDoc[iEnd], iBegin-iEnd);
+ if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zOpen, -1);
+ if( rc==SQLITE_OK ) rc = fts3StringAppend(pOut, &zDoc[iBegin], iFin-iBegin);
+ if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zClose, -1);
+
+ iEnd = iFin;
+ }
+
+ pMod->xClose(pC);
+ return rc;
+}
+
+
+/*
+** This function is used to count the entries in a column-list (a
+** delta-encoded list of term offsets within a single column of a single
+** row). When this function is called, *ppCollist should point to the
+** beginning of the first varint in the column-list (the varint that
+** contains the position of the first matching term in the column data).
+** Before returning, *ppCollist is set to point to the first byte after
+** the last varint in the column-list (either the 0x00 signifying the end
+** of the position-list, or the 0x01 that precedes the column number of
+** the next column in the position-list).
+**
+** The number of elements in the column-list is returned.
+*/
+static int fts3ColumnlistCount(char **ppCollist){
+ char *pEnd = *ppCollist;
+ char c = 0;
+ int nEntry = 0;
+
+ /* A column-list is terminated by either a 0x01 or 0x00. */
+ while( 0xFE & (*pEnd | c) ){
+ c = *pEnd++ & 0x80;
+ if( !c ) nEntry++;
+ }
+
+ *ppCollist = pEnd;
+ return nEntry;
+}
+
+/*
+** This function gathers 'y' or 'b' data for a single phrase.
+*/
+static int fts3ExprLHits(
+ Fts3Expr *pExpr, /* Phrase expression node */
+ MatchInfo *p /* Matchinfo context */
+){
+ Fts3Table *pTab = (Fts3Table *)p->pCursor->base.pVtab;
+ int iStart;
+ Fts3Phrase *pPhrase = pExpr->pPhrase;
+ char *pIter = pPhrase->doclist.pList;
+ int iCol = 0;
+
+ assert( p->flag==FTS3_MATCHINFO_LHITS_BM || p->flag==FTS3_MATCHINFO_LHITS );
+ if( p->flag==FTS3_MATCHINFO_LHITS ){
+ iStart = pExpr->iPhrase * p->nCol;
+ }else{
+ iStart = pExpr->iPhrase * ((p->nCol + 31) / 32);
+ }
+
+ if( pIter ) while( 1 ){
+ int nHit = fts3ColumnlistCount(&pIter);
+ if( (pPhrase->iColumn>=pTab->nColumn || pPhrase->iColumn==iCol) ){
+ if( p->flag==FTS3_MATCHINFO_LHITS ){
+ p->aMatchinfo[iStart + iCol] = (u32)nHit;
+ }else if( nHit ){
+ p->aMatchinfo[iStart + (iCol+1)/32] |= (1 << (iCol&0x1F));
+ }
+ }
+ assert( *pIter==0x00 || *pIter==0x01 );
+ if( *pIter!=0x01 ) break;
+ pIter++;
+ pIter += fts3GetVarint32(pIter, &iCol);
+ if( iCol>=p->nCol ) return FTS_CORRUPT_VTAB;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Gather the results for matchinfo directives 'y' and 'b'.
+*/
+static int fts3ExprLHitGather(
+ Fts3Expr *pExpr,
+ MatchInfo *p
+){
+ int rc = SQLITE_OK;
+ assert( (pExpr->pLeft==0)==(pExpr->pRight==0) );
+ if( pExpr->bEof==0 && pExpr->iDocid==p->pCursor->iPrevId ){
+ if( pExpr->pLeft ){
+ rc = fts3ExprLHitGather(pExpr->pLeft, p);
+ if( rc==SQLITE_OK ) rc = fts3ExprLHitGather(pExpr->pRight, p);
+ }else{
+ rc = fts3ExprLHits(pExpr, p);
+ }
+ }
+ return rc;
+}
+
+/*
+** fts3ExprIterate() callback used to collect the "global" matchinfo stats
+** for a single query.
+**
+** fts3ExprIterate() callback to load the 'global' elements of a
+** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements
+** of the matchinfo array that are constant for all rows returned by the
+** current query.
+**
+** Argument pCtx is actually a pointer to a struct of type MatchInfo. This
+** function populates Matchinfo.aMatchinfo[] as follows:
+**
+** for(iCol=0; iCol<nCol; iCol++){
+** aMatchinfo[3*iPhrase*nCol + 3*iCol + 1] = X;
+** aMatchinfo[3*iPhrase*nCol + 3*iCol + 2] = Y;
+** }
+**
+** where X is the number of matches for phrase iPhrase is column iCol of all
+** rows of the table. Y is the number of rows for which column iCol contains
+** at least one instance of phrase iPhrase.
+**
+** If the phrase pExpr consists entirely of deferred tokens, then all X and
+** Y values are set to nDoc, where nDoc is the number of documents in the
+** file system. This is done because the full-text index doclist is required
+** to calculate these values properly, and the full-text index doclist is
+** not available for deferred tokens.
+*/
+static int fts3ExprGlobalHitsCb(
+ Fts3Expr *pExpr, /* Phrase expression node */
+ int iPhrase, /* Phrase number (numbered from zero) */
+ void *pCtx /* Pointer to MatchInfo structure */
+){
+ MatchInfo *p = (MatchInfo *)pCtx;
+ return sqlite3Fts3EvalPhraseStats(
+ p->pCursor, pExpr, &p->aMatchinfo[3*iPhrase*p->nCol]
+ );
+}
+
+/*
+** fts3ExprIterate() callback used to collect the "local" part of the
+** FTS3_MATCHINFO_HITS array. The local stats are those elements of the
+** array that are different for each row returned by the query.
+*/
+static int fts3ExprLocalHitsCb(
+ Fts3Expr *pExpr, /* Phrase expression node */
+ int iPhrase, /* Phrase number */
+ void *pCtx /* Pointer to MatchInfo structure */
+){
+ int rc = SQLITE_OK;
+ MatchInfo *p = (MatchInfo *)pCtx;
+ int iStart = iPhrase * p->nCol * 3;
+ int i;
+
+ for(i=0; i<p->nCol && rc==SQLITE_OK; i++){
+ char *pCsr;
+ rc = sqlite3Fts3EvalPhrasePoslist(p->pCursor, pExpr, i, &pCsr);
+ if( pCsr ){
+ p->aMatchinfo[iStart+i*3] = fts3ColumnlistCount(&pCsr);
+ }else{
+ p->aMatchinfo[iStart+i*3] = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int fts3MatchinfoCheck(
+ Fts3Table *pTab,
+ char cArg,
+ char **pzErr
+){
+ if( (cArg==FTS3_MATCHINFO_NPHRASE)
+ || (cArg==FTS3_MATCHINFO_NCOL)
+ || (cArg==FTS3_MATCHINFO_NDOC && pTab->bFts4)
+ || (cArg==FTS3_MATCHINFO_AVGLENGTH && pTab->bFts4)
+ || (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize)
+ || (cArg==FTS3_MATCHINFO_LCS)
+ || (cArg==FTS3_MATCHINFO_HITS)
+ || (cArg==FTS3_MATCHINFO_LHITS)
+ || (cArg==FTS3_MATCHINFO_LHITS_BM)
+ ){
+ return SQLITE_OK;
+ }
+ sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo request: %c", cArg);
+ return SQLITE_ERROR;
+}
+
+static size_t fts3MatchinfoSize(MatchInfo *pInfo, char cArg){
+ size_t nVal; /* Number of integers output by cArg */
+
+ switch( cArg ){
+ case FTS3_MATCHINFO_NDOC:
+ case FTS3_MATCHINFO_NPHRASE:
+ case FTS3_MATCHINFO_NCOL:
+ nVal = 1;
+ break;
+
+ case FTS3_MATCHINFO_AVGLENGTH:
+ case FTS3_MATCHINFO_LENGTH:
+ case FTS3_MATCHINFO_LCS:
+ nVal = pInfo->nCol;
+ break;
+
+ case FTS3_MATCHINFO_LHITS:
+ nVal = pInfo->nCol * pInfo->nPhrase;
+ break;
+
+ case FTS3_MATCHINFO_LHITS_BM:
+ nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32);
+ break;
+
+ default:
+ assert( cArg==FTS3_MATCHINFO_HITS );
+ nVal = pInfo->nCol * pInfo->nPhrase * 3;
+ break;
+ }
+
+ return nVal;
+}
+
+static int fts3MatchinfoSelectDoctotal(
+ Fts3Table *pTab,
+ sqlite3_stmt **ppStmt,
+ sqlite3_int64 *pnDoc,
+ const char **paLen,
+ const char **ppEnd
+){
+ sqlite3_stmt *pStmt;
+ const char *a;
+ const char *pEnd;
+ sqlite3_int64 nDoc;
+ int n;
+
+
+ if( !*ppStmt ){
+ int rc = sqlite3Fts3SelectDoctotal(pTab, ppStmt);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ pStmt = *ppStmt;
+ assert( sqlite3_data_count(pStmt)==1 );
+
+ n = sqlite3_column_bytes(pStmt, 0);
+ a = sqlite3_column_blob(pStmt, 0);
+ if( a==0 ){
+ return FTS_CORRUPT_VTAB;
+ }
+ pEnd = a + n;
+ a += sqlite3Fts3GetVarintBounded(a, pEnd, &nDoc);
+ if( nDoc<=0 || a>pEnd ){
+ return FTS_CORRUPT_VTAB;
+ }
+ *pnDoc = nDoc;
+
+ if( paLen ) *paLen = a;
+ if( ppEnd ) *ppEnd = pEnd;
+ return SQLITE_OK;
+}
+
+/*
+** An instance of the following structure is used to store state while
+** iterating through a multi-column position-list corresponding to the
+** hits for a single phrase on a single row in order to calculate the
+** values for a matchinfo() FTS3_MATCHINFO_LCS request.
+*/
+typedef struct LcsIterator LcsIterator;
+struct LcsIterator {
+ Fts3Expr *pExpr; /* Pointer to phrase expression */
+ int iPosOffset; /* Tokens count up to end of this phrase */
+ char *pRead; /* Cursor used to iterate through aDoclist */
+ int iPos; /* Current position */
+};
+
+/*
+** If LcsIterator.iCol is set to the following value, the iterator has
+** finished iterating through all offsets for all columns.
+*/
+#define LCS_ITERATOR_FINISHED 0x7FFFFFFF;
+
+static int fts3MatchinfoLcsCb(
+ Fts3Expr *pExpr, /* Phrase expression node */
+ int iPhrase, /* Phrase number (numbered from zero) */
+ void *pCtx /* Pointer to MatchInfo structure */
+){
+ LcsIterator *aIter = (LcsIterator *)pCtx;
+ aIter[iPhrase].pExpr = pExpr;
+ return SQLITE_OK;
+}
+
+/*
+** Advance the iterator passed as an argument to the next position. Return
+** 1 if the iterator is at EOF or if it now points to the start of the
+** position list for the next column.
+*/
+static int fts3LcsIteratorAdvance(LcsIterator *pIter){
+ char *pRead = pIter->pRead;
+ sqlite3_int64 iRead;
+ int rc = 0;
+
+ pRead += sqlite3Fts3GetVarint(pRead, &iRead);
+ if( iRead==0 || iRead==1 ){
+ pRead = 0;
+ rc = 1;
+ }else{
+ pIter->iPos += (int)(iRead-2);
+ }
+
+ pIter->pRead = pRead;
+ return rc;
+}
+
+/*
+** This function implements the FTS3_MATCHINFO_LCS matchinfo() flag.
+**
+** If the call is successful, the longest-common-substring lengths for each
+** column are written into the first nCol elements of the pInfo->aMatchinfo[]
+** array before returning. SQLITE_OK is returned in this case.
+**
+** Otherwise, if an error occurs, an SQLite error code is returned and the
+** data written to the first nCol elements of pInfo->aMatchinfo[] is
+** undefined.
+*/
+static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){
+ LcsIterator *aIter;
+ int i;
+ int iCol;
+ int nToken = 0;
+ int rc = SQLITE_OK;
+
+ /* Allocate and populate the array of LcsIterator objects. The array
+ ** contains one element for each matchable phrase in the query.
+ **/
+ aIter = sqlite3_malloc64(sizeof(LcsIterator) * pCsr->nPhrase);
+ if( !aIter ) return SQLITE_NOMEM;
+ memset(aIter, 0, sizeof(LcsIterator) * pCsr->nPhrase);
+ (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter);
+
+ for(i=0; i<pInfo->nPhrase; i++){
+ LcsIterator *pIter = &aIter[i];
+ nToken -= pIter->pExpr->pPhrase->nToken;
+ pIter->iPosOffset = nToken;
+ }
+
+ for(iCol=0; iCol<pInfo->nCol; iCol++){
+ int nLcs = 0; /* LCS value for this column */
+ int nLive = 0; /* Number of iterators in aIter not at EOF */
+
+ for(i=0; i<pInfo->nPhrase; i++){
+ LcsIterator *pIt = &aIter[i];
+ rc = sqlite3Fts3EvalPhrasePoslist(pCsr, pIt->pExpr, iCol, &pIt->pRead);
+ if( rc!=SQLITE_OK ) goto matchinfo_lcs_out;
+ if( pIt->pRead ){
+ pIt->iPos = pIt->iPosOffset;
+ fts3LcsIteratorAdvance(pIt);
+ if( pIt->pRead==0 ){
+ rc = FTS_CORRUPT_VTAB;
+ goto matchinfo_lcs_out;
+ }
+ nLive++;
+ }
+ }
+
+ while( nLive>0 ){
+ LcsIterator *pAdv = 0; /* The iterator to advance by one position */
+ int nThisLcs = 0; /* LCS for the current iterator positions */
+
+ for(i=0; i<pInfo->nPhrase; i++){
+ LcsIterator *pIter = &aIter[i];
+ if( pIter->pRead==0 ){
+ /* This iterator is already at EOF for this column. */
+ nThisLcs = 0;
+ }else{
+ if( pAdv==0 || pIter->iPos<pAdv->iPos ){
+ pAdv = pIter;
+ }
+ if( nThisLcs==0 || pIter->iPos==pIter[-1].iPos ){
+ nThisLcs++;
+ }else{
+ nThisLcs = 1;
+ }
+ if( nThisLcs>nLcs ) nLcs = nThisLcs;
+ }
+ }
+ if( fts3LcsIteratorAdvance(pAdv) ) nLive--;
+ }
+
+ pInfo->aMatchinfo[iCol] = nLcs;
+ }
+
+ matchinfo_lcs_out:
+ sqlite3_free(aIter);
+ return rc;
+}
+
+/*
+** Populate the buffer pInfo->aMatchinfo[] with an array of integers to
+** be returned by the matchinfo() function. Argument zArg contains the
+** format string passed as the second argument to matchinfo (or the
+** default value "pcx" if no second argument was specified). The format
+** string has already been validated and the pInfo->aMatchinfo[] array
+** is guaranteed to be large enough for the output.
+**
+** If bGlobal is true, then populate all fields of the matchinfo() output.
+** If it is false, then assume that those fields that do not change between
+** rows (i.e. FTS3_MATCHINFO_NPHRASE, NCOL, NDOC, AVGLENGTH and part of HITS)
+** have already been populated.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs. If a value other than SQLITE_OK is returned, the state the
+** pInfo->aMatchinfo[] buffer is left in is undefined.
+*/
+static int fts3MatchinfoValues(
+ Fts3Cursor *pCsr, /* FTS3 cursor object */
+ int bGlobal, /* True to grab the global stats */
+ MatchInfo *pInfo, /* Matchinfo context object */
+ const char *zArg /* Matchinfo format string */
+){
+ int rc = SQLITE_OK;
+ int i;
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ sqlite3_stmt *pSelect = 0;
+
+ for(i=0; rc==SQLITE_OK && zArg[i]; i++){
+ pInfo->flag = zArg[i];
+ switch( zArg[i] ){
+ case FTS3_MATCHINFO_NPHRASE:
+ if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nPhrase;
+ break;
+
+ case FTS3_MATCHINFO_NCOL:
+ if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nCol;
+ break;
+
+ case FTS3_MATCHINFO_NDOC:
+ if( bGlobal ){
+ sqlite3_int64 nDoc = 0;
+ rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, 0, 0);
+ pInfo->aMatchinfo[0] = (u32)nDoc;
+ }
+ break;
+
+ case FTS3_MATCHINFO_AVGLENGTH:
+ if( bGlobal ){
+ sqlite3_int64 nDoc; /* Number of rows in table */
+ const char *a; /* Aggregate column length array */
+ const char *pEnd; /* First byte past end of length array */
+
+ rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, &a, &pEnd);
+ if( rc==SQLITE_OK ){
+ int iCol;
+ for(iCol=0; iCol<pInfo->nCol; iCol++){
+ u32 iVal;
+ sqlite3_int64 nToken;
+ a += sqlite3Fts3GetVarint(a, &nToken);
+ if( a>pEnd ){
+ rc = SQLITE_CORRUPT_VTAB;
+ break;
+ }
+ iVal = (u32)(((u32)(nToken&0xffffffff)+nDoc/2)/nDoc);
+ pInfo->aMatchinfo[iCol] = iVal;
+ }
+ }
+ }
+ break;
+
+ case FTS3_MATCHINFO_LENGTH: {
+ sqlite3_stmt *pSelectDocsize = 0;
+ rc = sqlite3Fts3SelectDocsize(pTab, pCsr->iPrevId, &pSelectDocsize);
+ if( rc==SQLITE_OK ){
+ int iCol;
+ const char *a = sqlite3_column_blob(pSelectDocsize, 0);
+ const char *pEnd = a + sqlite3_column_bytes(pSelectDocsize, 0);
+ for(iCol=0; iCol<pInfo->nCol; iCol++){
+ sqlite3_int64 nToken;
+ a += sqlite3Fts3GetVarintBounded(a, pEnd, &nToken);
+ if( a>pEnd ){
+ rc = SQLITE_CORRUPT_VTAB;
+ break;
+ }
+ pInfo->aMatchinfo[iCol] = (u32)nToken;
+ }
+ }
+ sqlite3_reset(pSelectDocsize);
+ break;
+ }
+
+ case FTS3_MATCHINFO_LCS:
+ rc = fts3ExprLoadDoclists(pCsr, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = fts3MatchinfoLcs(pCsr, pInfo);
+ }
+ break;
+
+ case FTS3_MATCHINFO_LHITS_BM:
+ case FTS3_MATCHINFO_LHITS: {
+ size_t nZero = fts3MatchinfoSize(pInfo, zArg[i]) * sizeof(u32);
+ memset(pInfo->aMatchinfo, 0, nZero);
+ rc = fts3ExprLHitGather(pCsr->pExpr, pInfo);
+ break;
+ }
+
+ default: {
+ Fts3Expr *pExpr;
+ assert( zArg[i]==FTS3_MATCHINFO_HITS );
+ pExpr = pCsr->pExpr;
+ rc = fts3ExprLoadDoclists(pCsr, 0, 0);
+ if( rc!=SQLITE_OK ) break;
+ if( bGlobal ){
+ if( pCsr->pDeferred ){
+ rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc,0,0);
+ if( rc!=SQLITE_OK ) break;
+ }
+ rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo);
+ sqlite3Fts3EvalTestDeferred(pCsr, &rc);
+ if( rc!=SQLITE_OK ) break;
+ }
+ (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo);
+ break;
+ }
+ }
+
+ pInfo->aMatchinfo += fts3MatchinfoSize(pInfo, zArg[i]);
+ }
+
+ sqlite3_reset(pSelect);
+ return rc;
+}
+
+
+/*
+** Populate pCsr->aMatchinfo[] with data for the current row. The
+** 'matchinfo' data is an array of 32-bit unsigned integers (C type u32).
+*/
+static void fts3GetMatchinfo(
+ sqlite3_context *pCtx, /* Return results here */
+ Fts3Cursor *pCsr, /* FTS3 Cursor object */
+ const char *zArg /* Second argument to matchinfo() function */
+){
+ MatchInfo sInfo;
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc = SQLITE_OK;
+ int bGlobal = 0; /* Collect 'global' stats as well as local */
+
+ u32 *aOut = 0;
+ void (*xDestroyOut)(void*) = 0;
+
+ memset(&sInfo, 0, sizeof(MatchInfo));
+ sInfo.pCursor = pCsr;
+ sInfo.nCol = pTab->nColumn;
+
+ /* If there is cached matchinfo() data, but the format string for the
+ ** cache does not match the format string for this request, discard
+ ** the cached data. */
+ if( pCsr->pMIBuffer && strcmp(pCsr->pMIBuffer->zMatchinfo, zArg) ){
+ sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
+ pCsr->pMIBuffer = 0;
+ }
+
+ /* If Fts3Cursor.pMIBuffer is NULL, then this is the first time the
+ ** matchinfo function has been called for this query. In this case
+ ** allocate the array used to accumulate the matchinfo data and
+ ** initialize those elements that are constant for every row.
+ */
+ if( pCsr->pMIBuffer==0 ){
+ size_t nMatchinfo = 0; /* Number of u32 elements in match-info */
+ int i; /* Used to iterate through zArg */
+
+ /* Determine the number of phrases in the query */
+ pCsr->nPhrase = fts3ExprPhraseCount(pCsr->pExpr);
+ sInfo.nPhrase = pCsr->nPhrase;
+
+ /* Determine the number of integers in the buffer returned by this call. */
+ for(i=0; zArg[i]; i++){
+ char *zErr = 0;
+ if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
+ return;
+ }
+ nMatchinfo += fts3MatchinfoSize(&sInfo, zArg[i]);
+ }
+
+ /* Allocate space for Fts3Cursor.aMatchinfo[] and Fts3Cursor.zMatchinfo. */
+ pCsr->pMIBuffer = fts3MIBufferNew(nMatchinfo, zArg);
+ if( !pCsr->pMIBuffer ) rc = SQLITE_NOMEM;
+
+ pCsr->isMatchinfoNeeded = 1;
+ bGlobal = 1;
+ }
+
+ if( rc==SQLITE_OK ){
+ xDestroyOut = fts3MIBufferAlloc(pCsr->pMIBuffer, &aOut);
+ if( xDestroyOut==0 ){
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ sInfo.aMatchinfo = aOut;
+ sInfo.nPhrase = pCsr->nPhrase;
+ rc = fts3MatchinfoValues(pCsr, bGlobal, &sInfo, zArg);
+ if( bGlobal ){
+ fts3MIBufferSetGlobal(pCsr->pMIBuffer);
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pCtx, rc);
+ if( xDestroyOut ) xDestroyOut(aOut);
+ }else{
+ int n = pCsr->pMIBuffer->nElem * sizeof(u32);
+ sqlite3_result_blob(pCtx, aOut, n, xDestroyOut);
+ }
+}
+
+/*
+** Implementation of snippet() function.
+*/
+SQLITE_PRIVATE void sqlite3Fts3Snippet(
+ sqlite3_context *pCtx, /* SQLite function call context */
+ Fts3Cursor *pCsr, /* Cursor object */
+ const char *zStart, /* Snippet start text - "<b>" */
+ const char *zEnd, /* Snippet end text - "</b>" */
+ const char *zEllipsis, /* Snippet ellipsis text - "<b>...</b>" */
+ int iCol, /* Extract snippet from this column */
+ int nToken /* Approximate number of tokens in snippet */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc = SQLITE_OK;
+ int i;
+ StrBuffer res = {0, 0, 0};
+
+ /* The returned text includes up to four fragments of text extracted from
+ ** the data in the current row. The first iteration of the for(...) loop
+ ** below attempts to locate a single fragment of text nToken tokens in
+ ** size that contains at least one instance of all phrases in the query
+ ** expression that appear in the current row. If such a fragment of text
+ ** cannot be found, the second iteration of the loop attempts to locate
+ ** a pair of fragments, and so on.
+ */
+ int nSnippet = 0; /* Number of fragments in this snippet */
+ SnippetFragment aSnippet[4]; /* Maximum of 4 fragments per snippet */
+ int nFToken = -1; /* Number of tokens in each fragment */
+
+ if( !pCsr->pExpr ){
+ sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC);
+ return;
+ }
+
+ /* Limit the snippet length to 64 tokens. */
+ if( nToken<-64 ) nToken = -64;
+ if( nToken>+64 ) nToken = +64;
+
+ for(nSnippet=1; 1; nSnippet++){
+
+ int iSnip; /* Loop counter 0..nSnippet-1 */
+ u64 mCovered = 0; /* Bitmask of phrases covered by snippet */
+ u64 mSeen = 0; /* Bitmask of phrases seen by BestSnippet() */
+
+ if( nToken>=0 ){
+ nFToken = (nToken+nSnippet-1) / nSnippet;
+ }else{
+ nFToken = -1 * nToken;
+ }
+
+ for(iSnip=0; iSnip<nSnippet; iSnip++){
+ int iBestScore = -1; /* Best score of columns checked so far */
+ int iRead; /* Used to iterate through columns */
+ SnippetFragment *pFragment = &aSnippet[iSnip];
+
+ memset(pFragment, 0, sizeof(*pFragment));
+
+ /* Loop through all columns of the table being considered for snippets.
+ ** If the iCol argument to this function was negative, this means all
+ ** columns of the FTS3 table. Otherwise, only column iCol is considered.
+ */
+ for(iRead=0; iRead<pTab->nColumn; iRead++){
+ SnippetFragment sF = {0, 0, 0, 0};
+ int iS = 0;
+ if( iCol>=0 && iRead!=iCol ) continue;
+
+ /* Find the best snippet of nFToken tokens in column iRead. */
+ rc = fts3BestSnippet(nFToken, pCsr, iRead, mCovered, &mSeen, &sF, &iS);
+ if( rc!=SQLITE_OK ){
+ goto snippet_out;
+ }
+ if( iS>iBestScore ){
+ *pFragment = sF;
+ iBestScore = iS;
+ }
+ }
+
+ mCovered |= pFragment->covered;
+ }
+
+ /* If all query phrases seen by fts3BestSnippet() are present in at least
+ ** one of the nSnippet snippet fragments, break out of the loop.
+ */
+ assert( (mCovered&mSeen)==mCovered );
+ if( mSeen==mCovered || nSnippet==SizeofArray(aSnippet) ) break;
+ }
+
+ assert( nFToken>0 );
+
+ for(i=0; i<nSnippet && rc==SQLITE_OK; i++){
+ rc = fts3SnippetText(pCsr, &aSnippet[i],
+ i, (i==nSnippet-1), nFToken, zStart, zEnd, zEllipsis, &res
+ );
+ }
+
+ snippet_out:
+ sqlite3Fts3SegmentsClose(pTab);
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pCtx, rc);
+ sqlite3_free(res.z);
+ }else{
+ sqlite3_result_text(pCtx, res.z, -1, sqlite3_free);
+ }
+}
+
+
+typedef struct TermOffset TermOffset;
+typedef struct TermOffsetCtx TermOffsetCtx;
+
+struct TermOffset {
+ char *pList; /* Position-list */
+ int iPos; /* Position just read from pList */
+ int iOff; /* Offset of this term from read positions */
+};
+
+struct TermOffsetCtx {
+ Fts3Cursor *pCsr;
+ int iCol; /* Column of table to populate aTerm for */
+ int iTerm;
+ sqlite3_int64 iDocid;
+ TermOffset *aTerm;
+};
+
+/*
+** This function is an fts3ExprIterate() callback used by sqlite3Fts3Offsets().
+*/
+static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){
+ TermOffsetCtx *p = (TermOffsetCtx *)ctx;
+ int nTerm; /* Number of tokens in phrase */
+ int iTerm; /* For looping through nTerm phrase terms */
+ char *pList; /* Pointer to position list for phrase */
+ int iPos = 0; /* First position in position-list */
+ int rc;
+
+ UNUSED_PARAMETER(iPhrase);
+ rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pList);
+ nTerm = pExpr->pPhrase->nToken;
+ if( pList ){
+ fts3GetDeltaPosition(&pList, &iPos);
+ assert_fts3_nc( iPos>=0 );
+ }
+
+ for(iTerm=0; iTerm<nTerm; iTerm++){
+ TermOffset *pT = &p->aTerm[p->iTerm++];
+ pT->iOff = nTerm-iTerm-1;
+ pT->pList = pList;
+ pT->iPos = iPos;
+ }
+
+ return rc;
+}
+
+/*
+** Implementation of offsets() function.
+*/
+SQLITE_PRIVATE void sqlite3Fts3Offsets(
+ sqlite3_context *pCtx, /* SQLite function call context */
+ Fts3Cursor *pCsr /* Cursor object */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ sqlite3_tokenizer_module const *pMod = pTab->pTokenizer->pModule;
+ int rc; /* Return Code */
+ int nToken; /* Number of tokens in query */
+ int iCol; /* Column currently being processed */
+ StrBuffer res = {0, 0, 0}; /* Result string */
+ TermOffsetCtx sCtx; /* Context for fts3ExprTermOffsetInit() */
+
+ if( !pCsr->pExpr ){
+ sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC);
+ return;
+ }
+
+ memset(&sCtx, 0, sizeof(sCtx));
+ assert( pCsr->isRequireSeek==0 );
+
+ /* Count the number of terms in the query */
+ rc = fts3ExprLoadDoclists(pCsr, 0, &nToken);
+ if( rc!=SQLITE_OK ) goto offsets_out;
+
+ /* Allocate the array of TermOffset iterators. */
+ sCtx.aTerm = (TermOffset *)sqlite3_malloc64(sizeof(TermOffset)*nToken);
+ if( 0==sCtx.aTerm ){
+ rc = SQLITE_NOMEM;
+ goto offsets_out;
+ }
+ sCtx.iDocid = pCsr->iPrevId;
+ sCtx.pCsr = pCsr;
+
+ /* Loop through the table columns, appending offset information to
+ ** string-buffer res for each column.
+ */
+ for(iCol=0; iCol<pTab->nColumn; iCol++){
+ sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor */
+ const char *ZDUMMY; /* Dummy argument used with xNext() */
+ int NDUMMY = 0; /* Dummy argument used with xNext() */
+ int iStart = 0;
+ int iEnd = 0;
+ int iCurrent = 0;
+ const char *zDoc;
+ int nDoc;
+
+ /* Initialize the contents of sCtx.aTerm[] for column iCol. There is
+ ** no way that this operation can fail, so the return code from
+ ** fts3ExprIterate() can be discarded.
+ */
+ sCtx.iCol = iCol;
+ sCtx.iTerm = 0;
+ (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx);
+
+ /* Retreive the text stored in column iCol. If an SQL NULL is stored
+ ** in column iCol, jump immediately to the next iteration of the loop.
+ ** If an OOM occurs while retrieving the data (this can happen if SQLite
+ ** needs to transform the data from utf-16 to utf-8), return SQLITE_NOMEM
+ ** to the caller.
+ */
+ zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol+1);
+ nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol+1);
+ if( zDoc==0 ){
+ if( sqlite3_column_type(pCsr->pStmt, iCol+1)==SQLITE_NULL ){
+ continue;
+ }
+ rc = SQLITE_NOMEM;
+ goto offsets_out;
+ }
+
+ /* Initialize a tokenizer iterator to iterate through column iCol. */
+ rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid,
+ zDoc, nDoc, &pC
+ );
+ if( rc!=SQLITE_OK ) goto offsets_out;
+
+ rc = pMod->xNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent);
+ while( rc==SQLITE_OK ){
+ int i; /* Used to loop through terms */
+ int iMinPos = 0x7FFFFFFF; /* Position of next token */
+ TermOffset *pTerm = 0; /* TermOffset associated with next token */
+
+ for(i=0; i<nToken; i++){
+ TermOffset *pT = &sCtx.aTerm[i];
+ if( pT->pList && (pT->iPos-pT->iOff)<iMinPos ){
+ iMinPos = pT->iPos-pT->iOff;
+ pTerm = pT;
+ }
+ }
+
+ if( !pTerm ){
+ /* All offsets for this column have been gathered. */
+ rc = SQLITE_DONE;
+ }else{
+ assert_fts3_nc( iCurrent<=iMinPos );
+ if( 0==(0xFE&*pTerm->pList) ){
+ pTerm->pList = 0;
+ }else{
+ fts3GetDeltaPosition(&pTerm->pList, &pTerm->iPos);
+ }
+ while( rc==SQLITE_OK && iCurrent<iMinPos ){
+ rc = pMod->xNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent);
+ }
+ if( rc==SQLITE_OK ){
+ char aBuffer[64];
+ sqlite3_snprintf(sizeof(aBuffer), aBuffer,
+ "%d %d %d %d ", iCol, pTerm-sCtx.aTerm, iStart, iEnd-iStart
+ );
+ rc = fts3StringAppend(&res, aBuffer, -1);
+ }else if( rc==SQLITE_DONE && pTab->zContentTbl==0 ){
+ rc = FTS_CORRUPT_VTAB;
+ }
+ }
+ }
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ }
+
+ pMod->xClose(pC);
+ if( rc!=SQLITE_OK ) goto offsets_out;
+ }
+
+ offsets_out:
+ sqlite3_free(sCtx.aTerm);
+ assert( rc!=SQLITE_DONE );
+ sqlite3Fts3SegmentsClose(pTab);
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pCtx, rc);
+ sqlite3_free(res.z);
+ }else{
+ sqlite3_result_text(pCtx, res.z, res.n-1, sqlite3_free);
+ }
+ return;
+}
+
+/*
+** Implementation of matchinfo() function.
+*/
+SQLITE_PRIVATE void sqlite3Fts3Matchinfo(
+ sqlite3_context *pContext, /* Function call context */
+ Fts3Cursor *pCsr, /* FTS3 table cursor */
+ const char *zArg /* Second arg to matchinfo() function */
+){
+ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ const char *zFormat;
+
+ if( zArg ){
+ zFormat = zArg;
+ }else{
+ zFormat = FTS3_MATCHINFO_DEFAULT;
+ }
+
+ if( !pCsr->pExpr ){
+ sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC);
+ return;
+ }else{
+ /* Retrieve matchinfo() data. */
+ fts3GetMatchinfo(pContext, pCsr, zFormat);
+ sqlite3Fts3SegmentsClose(pTab);
+ }
+}
+
+#endif
+
+/************** End of fts3_snippet.c ****************************************/
+/************** Begin file fts3_unicode.c ************************************/
+/*
+** 2012 May 24
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Implementation of the "unicode" full-text-search tokenizer.
+*/
+
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+/* #include <stdio.h> */
+/* #include <string.h> */
+
+/* #include "fts3_tokenizer.h" */
+
+/*
+** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied
+** from the sqlite3 source file utf.c. If this file is compiled as part
+** of the amalgamation, they are not required.
+*/
+#ifndef SQLITE_AMALGAMATION
+
+static const unsigned char sqlite3Utf8Trans1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00,
+};
+
+#define READ_UTF8(zIn, zTerm, c) \
+ c = *(zIn++); \
+ if( c>=0xc0 ){ \
+ c = sqlite3Utf8Trans1[c-0xc0]; \
+ while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \
+ c = (c<<6) + (0x3f & *(zIn++)); \
+ } \
+ if( c<0x80 \
+ || (c&0xFFFFF800)==0xD800 \
+ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \
+ }
+
+#define WRITE_UTF8(zOut, c) { \
+ if( c<0x00080 ){ \
+ *zOut++ = (u8)(c&0xFF); \
+ } \
+ else if( c<0x00800 ){ \
+ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ } \
+ else if( c<0x10000 ){ \
+ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ }else{ \
+ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \
+ *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \
+ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (u8)(c & 0x3F); \
+ } \
+}
+
+#endif /* ifndef SQLITE_AMALGAMATION */
+
+typedef struct unicode_tokenizer unicode_tokenizer;
+typedef struct unicode_cursor unicode_cursor;
+
+struct unicode_tokenizer {
+ sqlite3_tokenizer base;
+ int eRemoveDiacritic;
+ int nException;
+ int *aiException;
+};
+
+struct unicode_cursor {
+ sqlite3_tokenizer_cursor base;
+ const unsigned char *aInput; /* Input text being tokenized */
+ int nInput; /* Size of aInput[] in bytes */
+ int iOff; /* Current offset within aInput[] */
+ int iToken; /* Index of next token to be returned */
+ char *zToken; /* storage for current token */
+ int nAlloc; /* space allocated at zToken */
+};
+
+
+/*
+** Destroy a tokenizer allocated by unicodeCreate().
+*/
+static int unicodeDestroy(sqlite3_tokenizer *pTokenizer){
+ if( pTokenizer ){
+ unicode_tokenizer *p = (unicode_tokenizer *)pTokenizer;
+ sqlite3_free(p->aiException);
+ sqlite3_free(p);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** As part of a tokenchars= or separators= option, the CREATE VIRTUAL TABLE
+** statement has specified that the tokenizer for this table shall consider
+** all characters in string zIn/nIn to be separators (if bAlnum==0) or
+** token characters (if bAlnum==1).
+**
+** For each codepoint in the zIn/nIn string, this function checks if the
+** sqlite3FtsUnicodeIsalnum() function already returns the desired result.
+** If so, no action is taken. Otherwise, the codepoint is added to the
+** unicode_tokenizer.aiException[] array. For the purposes of tokenization,
+** the return value of sqlite3FtsUnicodeIsalnum() is inverted for all
+** codepoints in the aiException[] array.
+**
+** If a standalone diacritic mark (one that sqlite3FtsUnicodeIsdiacritic()
+** identifies as a diacritic) occurs in the zIn/nIn string it is ignored.
+** It is not possible to change the behavior of the tokenizer with respect
+** to these codepoints.
+*/
+static int unicodeAddExceptions(
+ unicode_tokenizer *p, /* Tokenizer to add exceptions to */
+ int bAlnum, /* Replace Isalnum() return value with this */
+ const char *zIn, /* Array of characters to make exceptions */
+ int nIn /* Length of z in bytes */
+){
+ const unsigned char *z = (const unsigned char *)zIn;
+ const unsigned char *zTerm = &z[nIn];
+ unsigned int iCode;
+ int nEntry = 0;
+
+ assert( bAlnum==0 || bAlnum==1 );
+
+ while( z<zTerm ){
+ READ_UTF8(z, zTerm, iCode);
+ assert( (sqlite3FtsUnicodeIsalnum((int)iCode) & 0xFFFFFFFE)==0 );
+ if( sqlite3FtsUnicodeIsalnum((int)iCode)!=bAlnum
+ && sqlite3FtsUnicodeIsdiacritic((int)iCode)==0
+ ){
+ nEntry++;
+ }
+ }
+
+ if( nEntry ){
+ int *aNew; /* New aiException[] array */
+ int nNew; /* Number of valid entries in array aNew[] */
+
+ aNew = sqlite3_realloc64(p->aiException,(p->nException+nEntry)*sizeof(int));
+ if( aNew==0 ) return SQLITE_NOMEM;
+ nNew = p->nException;
+
+ z = (const unsigned char *)zIn;
+ while( z<zTerm ){
+ READ_UTF8(z, zTerm, iCode);
+ if( sqlite3FtsUnicodeIsalnum((int)iCode)!=bAlnum
+ && sqlite3FtsUnicodeIsdiacritic((int)iCode)==0
+ ){
+ int i, j;
+ for(i=0; i<nNew && aNew[i]<(int)iCode; i++);
+ for(j=nNew; j>i; j--) aNew[j] = aNew[j-1];
+ aNew[i] = (int)iCode;
+ nNew++;
+ }
+ }
+ p->aiException = aNew;
+ p->nException = nNew;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Return true if the p->aiException[] array contains the value iCode.
+*/
+static int unicodeIsException(unicode_tokenizer *p, int iCode){
+ if( p->nException>0 ){
+ int *a = p->aiException;
+ int iLo = 0;
+ int iHi = p->nException-1;
+
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( iCode==a[iTest] ){
+ return 1;
+ }else if( iCode>a[iTest] ){
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Return true if, for the purposes of tokenization, codepoint iCode is
+** considered a token character (not a separator).
+*/
+static int unicodeIsAlnum(unicode_tokenizer *p, int iCode){
+ assert( (sqlite3FtsUnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 );
+ return sqlite3FtsUnicodeIsalnum(iCode) ^ unicodeIsException(p, iCode);
+}
+
+/*
+** Create a new tokenizer instance.
+*/
+static int unicodeCreate(
+ int nArg, /* Size of array argv[] */
+ const char * const *azArg, /* Tokenizer creation arguments */
+ sqlite3_tokenizer **pp /* OUT: New tokenizer handle */
+){
+ unicode_tokenizer *pNew; /* New tokenizer object */
+ int i;
+ int rc = SQLITE_OK;
+
+ pNew = (unicode_tokenizer *) sqlite3_malloc(sizeof(unicode_tokenizer));
+ if( pNew==NULL ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(unicode_tokenizer));
+ pNew->eRemoveDiacritic = 1;
+
+ for(i=0; rc==SQLITE_OK && i<nArg; i++){
+ const char *z = azArg[i];
+ int n = (int)strlen(z);
+
+ if( n==19 && memcmp("remove_diacritics=1", z, 19)==0 ){
+ pNew->eRemoveDiacritic = 1;
+ }
+ else if( n==19 && memcmp("remove_diacritics=0", z, 19)==0 ){
+ pNew->eRemoveDiacritic = 0;
+ }
+ else if( n==19 && memcmp("remove_diacritics=2", z, 19)==0 ){
+ pNew->eRemoveDiacritic = 2;
+ }
+ else if( n>=11 && memcmp("tokenchars=", z, 11)==0 ){
+ rc = unicodeAddExceptions(pNew, 1, &z[11], n-11);
+ }
+ else if( n>=11 && memcmp("separators=", z, 11)==0 ){
+ rc = unicodeAddExceptions(pNew, 0, &z[11], n-11);
+ }
+ else{
+ /* Unrecognized argument */
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ unicodeDestroy((sqlite3_tokenizer *)pNew);
+ pNew = 0;
+ }
+ *pp = (sqlite3_tokenizer *)pNew;
+ return rc;
+}
+
+/*
+** Prepare to begin tokenizing a particular string. The input
+** string to be tokenized is pInput[0..nBytes-1]. A cursor
+** used to incrementally tokenize this string is returned in
+** *ppCursor.
+*/
+static int unicodeOpen(
+ sqlite3_tokenizer *p, /* The tokenizer */
+ const char *aInput, /* Input string */
+ int nInput, /* Size of string aInput in bytes */
+ sqlite3_tokenizer_cursor **pp /* OUT: New cursor object */
+){
+ unicode_cursor *pCsr;
+
+ pCsr = (unicode_cursor *)sqlite3_malloc(sizeof(unicode_cursor));
+ if( pCsr==0 ){
+ return SQLITE_NOMEM;
+ }
+ memset(pCsr, 0, sizeof(unicode_cursor));
+
+ pCsr->aInput = (const unsigned char *)aInput;
+ if( aInput==0 ){
+ pCsr->nInput = 0;
+ }else if( nInput<0 ){
+ pCsr->nInput = (int)strlen(aInput);
+ }else{
+ pCsr->nInput = nInput;
+ }
+
+ *pp = &pCsr->base;
+ UNUSED_PARAMETER(p);
+ return SQLITE_OK;
+}
+
+/*
+** Close a tokenization cursor previously opened by a call to
+** simpleOpen() above.
+*/
+static int unicodeClose(sqlite3_tokenizer_cursor *pCursor){
+ unicode_cursor *pCsr = (unicode_cursor *) pCursor;
+ sqlite3_free(pCsr->zToken);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** Extract the next token from a tokenization cursor. The cursor must
+** have been opened by a prior call to simpleOpen().
+*/
+static int unicodeNext(
+ sqlite3_tokenizer_cursor *pC, /* Cursor returned by simpleOpen */
+ const char **paToken, /* OUT: Token text */
+ int *pnToken, /* OUT: Number of bytes at *paToken */
+ int *piStart, /* OUT: Starting offset of token */
+ int *piEnd, /* OUT: Ending offset of token */
+ int *piPos /* OUT: Position integer of token */
+){
+ unicode_cursor *pCsr = (unicode_cursor *)pC;
+ unicode_tokenizer *p = ((unicode_tokenizer *)pCsr->base.pTokenizer);
+ unsigned int iCode = 0;
+ char *zOut;
+ const unsigned char *z = &pCsr->aInput[pCsr->iOff];
+ const unsigned char *zStart = z;
+ const unsigned char *zEnd;
+ const unsigned char *zTerm = &pCsr->aInput[pCsr->nInput];
+
+ /* Scan past any delimiter characters before the start of the next token.
+ ** Return SQLITE_DONE early if this takes us all the way to the end of
+ ** the input. */
+ while( z<zTerm ){
+ READ_UTF8(z, zTerm, iCode);
+ if( unicodeIsAlnum(p, (int)iCode) ) break;
+ zStart = z;
+ }
+ if( zStart>=zTerm ) return SQLITE_DONE;
+
+ zOut = pCsr->zToken;
+ do {
+ int iOut;
+
+ /* Grow the output buffer if required. */
+ if( (zOut-pCsr->zToken)>=(pCsr->nAlloc-4) ){
+ char *zNew = sqlite3_realloc64(pCsr->zToken, pCsr->nAlloc+64);
+ if( !zNew ) return SQLITE_NOMEM;
+ zOut = &zNew[zOut - pCsr->zToken];
+ pCsr->zToken = zNew;
+ pCsr->nAlloc += 64;
+ }
+
+ /* Write the folded case of the last character read to the output */
+ zEnd = z;
+ iOut = sqlite3FtsUnicodeFold((int)iCode, p->eRemoveDiacritic);
+ if( iOut ){
+ WRITE_UTF8(zOut, iOut);
+ }
+
+ /* If the cursor is not at EOF, read the next character */
+ if( z>=zTerm ) break;
+ READ_UTF8(z, zTerm, iCode);
+ }while( unicodeIsAlnum(p, (int)iCode)
+ || sqlite3FtsUnicodeIsdiacritic((int)iCode)
+ );
+
+ /* Set the output variables and return. */
+ pCsr->iOff = (int)(z - pCsr->aInput);
+ *paToken = pCsr->zToken;
+ *pnToken = (int)(zOut - pCsr->zToken);
+ *piStart = (int)(zStart - pCsr->aInput);
+ *piEnd = (int)(zEnd - pCsr->aInput);
+ *piPos = pCsr->iToken++;
+ return SQLITE_OK;
+}
+
+/*
+** Set *ppModule to a pointer to the sqlite3_tokenizer_module
+** structure for the unicode tokenizer.
+*/
+SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const **ppModule){
+ static const sqlite3_tokenizer_module module = {
+ 0,
+ unicodeCreate,
+ unicodeDestroy,
+ unicodeOpen,
+ unicodeClose,
+ unicodeNext,
+ 0,
+ };
+ *ppModule = &module;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+#endif /* ifndef SQLITE_DISABLE_FTS3_UNICODE */
+
+/************** End of fts3_unicode.c ****************************************/
+/************** Begin file fts3_unicode2.c ***********************************/
+/*
+** 2012-05-25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+/*
+** DO NOT EDIT THIS MACHINE GENERATED FILE.
+*/
+
+#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4)
+
+/* #include <assert.h> */
+
+/*
+** Return true if the argument corresponds to a unicode codepoint
+** classified as either a letter or a number. Otherwise false.
+**
+** The results are undefined if the value passed to this function
+** is less than zero.
+*/
+SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){
+ /* Each unsigned integer in the following array corresponds to a contiguous
+ ** range of unicode codepoints that are not either letters or numbers (i.e.
+ ** codepoints for which this function should return 0).
+ **
+ ** The most significant 22 bits in each 32-bit value contain the first
+ ** codepoint in the range. The least significant 10 bits are used to store
+ ** the size of the range (always at least 1). In other words, the value
+ ** ((C<<22) + N) represents a range of N codepoints starting with codepoint
+ ** C. It is not possible to represent a range larger than 1023 codepoints
+ ** using this format.
+ */
+ static const unsigned int aEntry[] = {
+ 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07,
+ 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01,
+ 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401,
+ 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01,
+ 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01,
+ 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802,
+ 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F,
+ 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401,
+ 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804,
+ 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403,
+ 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812,
+ 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001,
+ 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802,
+ 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805,
+ 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401,
+ 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03,
+ 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807,
+ 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001,
+ 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01,
+ 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804,
+ 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001,
+ 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802,
+ 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01,
+ 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06,
+ 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007,
+ 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006,
+ 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417,
+ 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14,
+ 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07,
+ 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01,
+ 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001,
+ 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802,
+ 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F,
+ 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002,
+ 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802,
+ 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006,
+ 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D,
+ 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802,
+ 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027,
+ 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403,
+ 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805,
+ 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04,
+ 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401,
+ 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005,
+ 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B,
+ 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A,
+ 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001,
+ 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59,
+ 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807,
+ 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01,
+ 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E,
+ 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100,
+ 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10,
+ 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402,
+ 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804,
+ 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012,
+ 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004,
+ 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002,
+ 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803,
+ 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07,
+ 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02,
+ 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802,
+ 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013,
+ 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06,
+ 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003,
+ 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01,
+ 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403,
+ 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009,
+ 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003,
+ 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003,
+ 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E,
+ 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046,
+ 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401,
+ 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401,
+ 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F,
+ 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C,
+ 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002,
+ 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025,
+ 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6,
+ 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46,
+ 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060,
+ 0x380400F0,
+ };
+ static const unsigned int aAscii[4] = {
+ 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001,
+ };
+
+ if( (unsigned int)c<128 ){
+ return ( (aAscii[c >> 5] & ((unsigned int)1 << (c & 0x001F)))==0 );
+ }else if( (unsigned int)c<(1<<22) ){
+ unsigned int key = (((unsigned int)c)<<10) | 0x000003FF;
+ int iRes = 0;
+ int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
+ int iLo = 0;
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( key >= aEntry[iTest] ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ assert( aEntry[0]<key );
+ assert( key>=aEntry[iRes] );
+ return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF)));
+ }
+ return 1;
+}
+
+
+/*
+** If the argument is a codepoint corresponding to a lowercase letter
+** in the ASCII range with a diacritic added, return the codepoint
+** of the ASCII letter only. For example, if passed 235 - "LATIN
+** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER
+** E"). The resuls of passing a codepoint that corresponds to an
+** uppercase letter are undefined.
+*/
+static int remove_diacritic(int c, int bComplex){
+ unsigned short aDia[] = {
+ 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995,
+ 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286,
+ 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732,
+ 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336,
+ 3456, 3696, 3712, 3728, 3744, 3766, 3832, 3896,
+ 3912, 3928, 3944, 3968, 4008, 4040, 4056, 4106,
+ 4138, 4170, 4202, 4234, 4266, 4296, 4312, 4344,
+ 4408, 4424, 4442, 4472, 4488, 4504, 6148, 6198,
+ 6264, 6280, 6360, 6429, 6505, 6529, 61448, 61468,
+ 61512, 61534, 61592, 61610, 61642, 61672, 61688, 61704,
+ 61726, 61784, 61800, 61816, 61836, 61880, 61896, 61914,
+ 61948, 61998, 62062, 62122, 62154, 62184, 62200, 62218,
+ 62252, 62302, 62364, 62410, 62442, 62478, 62536, 62554,
+ 62584, 62604, 62640, 62648, 62656, 62664, 62730, 62766,
+ 62830, 62890, 62924, 62974, 63032, 63050, 63082, 63118,
+ 63182, 63242, 63274, 63310, 63368, 63390,
+ };
+#define HIBIT ((unsigned char)0x80)
+ unsigned char aChar[] = {
+ '\0', 'a', 'c', 'e', 'i', 'n',
+ 'o', 'u', 'y', 'y', 'a', 'c',
+ 'd', 'e', 'e', 'g', 'h', 'i',
+ 'j', 'k', 'l', 'n', 'o', 'r',
+ 's', 't', 'u', 'u', 'w', 'y',
+ 'z', 'o', 'u', 'a', 'i', 'o',
+ 'u', 'u'|HIBIT, 'a'|HIBIT, 'g', 'k', 'o',
+ 'o'|HIBIT, 'j', 'g', 'n', 'a'|HIBIT, 'a',
+ 'e', 'i', 'o', 'r', 'u', 's',
+ 't', 'h', 'a', 'e', 'o'|HIBIT, 'o',
+ 'o'|HIBIT, 'y', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', 'a', 'b',
+ 'c'|HIBIT, 'd', 'd', 'e'|HIBIT, 'e', 'e'|HIBIT,
+ 'f', 'g', 'h', 'h', 'i', 'i'|HIBIT,
+ 'k', 'l', 'l'|HIBIT, 'l', 'm', 'n',
+ 'o'|HIBIT, 'p', 'r', 'r'|HIBIT, 'r', 's',
+ 's'|HIBIT, 't', 'u', 'u'|HIBIT, 'v', 'w',
+ 'w', 'x', 'y', 'z', 'h', 't',
+ 'w', 'y', 'a', 'a'|HIBIT, 'a'|HIBIT, 'a'|HIBIT,
+ 'e', 'e'|HIBIT, 'e'|HIBIT, 'i', 'o', 'o'|HIBIT,
+ 'o'|HIBIT, 'o'|HIBIT, 'u', 'u'|HIBIT, 'u'|HIBIT, 'y',
+ };
+
+ unsigned int key = (((unsigned int)c)<<3) | 0x00000007;
+ int iRes = 0;
+ int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1;
+ int iLo = 0;
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( key >= aDia[iTest] ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ assert( key>=aDia[iRes] );
+ if( bComplex==0 && (aChar[iRes] & 0x80) ) return c;
+ return (c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : ((int)aChar[iRes] & 0x7F);
+}
+
+
+/*
+** Return true if the argument interpreted as a unicode codepoint
+** is a diacritical modifier character.
+*/
+SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int c){
+ unsigned int mask0 = 0x08029FDF;
+ unsigned int mask1 = 0x000361F8;
+ if( c<768 || c>817 ) return 0;
+ return (c < 768+32) ?
+ (mask0 & ((unsigned int)1 << (c-768))) :
+ (mask1 & ((unsigned int)1 << (c-768-32)));
+}
+
+
+/*
+** Interpret the argument as a unicode codepoint. If the codepoint
+** is an upper case character that has a lower case equivalent,
+** return the codepoint corresponding to the lower case version.
+** Otherwise, return a copy of the argument.
+**
+** The results are undefined if the value passed to this function
+** is less than zero.
+*/
+SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){
+ /* Each entry in the following array defines a rule for folding a range
+ ** of codepoints to lower case. The rule applies to a range of nRange
+ ** codepoints starting at codepoint iCode.
+ **
+ ** If the least significant bit in flags is clear, then the rule applies
+ ** to all nRange codepoints (i.e. all nRange codepoints are upper case and
+ ** need to be folded). Or, if it is set, then the rule only applies to
+ ** every second codepoint in the range, starting with codepoint C.
+ **
+ ** The 7 most significant bits in flags are an index into the aiOff[]
+ ** array. If a specific codepoint C does require folding, then its lower
+ ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF).
+ **
+ ** The contents of this array are generated by parsing the CaseFolding.txt
+ ** file distributed as part of the "Unicode Character Database". See
+ ** http://www.unicode.org for details.
+ */
+ static const struct TableEntry {
+ unsigned short iCode;
+ unsigned char flags;
+ unsigned char nRange;
+ } aEntry[] = {
+ {65, 14, 26}, {181, 64, 1}, {192, 14, 23},
+ {216, 14, 7}, {256, 1, 48}, {306, 1, 6},
+ {313, 1, 16}, {330, 1, 46}, {376, 116, 1},
+ {377, 1, 6}, {383, 104, 1}, {385, 50, 1},
+ {386, 1, 4}, {390, 44, 1}, {391, 0, 1},
+ {393, 42, 2}, {395, 0, 1}, {398, 32, 1},
+ {399, 38, 1}, {400, 40, 1}, {401, 0, 1},
+ {403, 42, 1}, {404, 46, 1}, {406, 52, 1},
+ {407, 48, 1}, {408, 0, 1}, {412, 52, 1},
+ {413, 54, 1}, {415, 56, 1}, {416, 1, 6},
+ {422, 60, 1}, {423, 0, 1}, {425, 60, 1},
+ {428, 0, 1}, {430, 60, 1}, {431, 0, 1},
+ {433, 58, 2}, {435, 1, 4}, {439, 62, 1},
+ {440, 0, 1}, {444, 0, 1}, {452, 2, 1},
+ {453, 0, 1}, {455, 2, 1}, {456, 0, 1},
+ {458, 2, 1}, {459, 1, 18}, {478, 1, 18},
+ {497, 2, 1}, {498, 1, 4}, {502, 122, 1},
+ {503, 134, 1}, {504, 1, 40}, {544, 110, 1},
+ {546, 1, 18}, {570, 70, 1}, {571, 0, 1},
+ {573, 108, 1}, {574, 68, 1}, {577, 0, 1},
+ {579, 106, 1}, {580, 28, 1}, {581, 30, 1},
+ {582, 1, 10}, {837, 36, 1}, {880, 1, 4},
+ {886, 0, 1}, {902, 18, 1}, {904, 16, 3},
+ {908, 26, 1}, {910, 24, 2}, {913, 14, 17},
+ {931, 14, 9}, {962, 0, 1}, {975, 4, 1},
+ {976, 140, 1}, {977, 142, 1}, {981, 146, 1},
+ {982, 144, 1}, {984, 1, 24}, {1008, 136, 1},
+ {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1},
+ {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1},
+ {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32},
+ {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1},
+ {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38},
+ {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1},
+ {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1},
+ {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6},
+ {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6},
+ {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8},
+ {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2},
+ {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1},
+ {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2},
+ {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2},
+ {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2},
+ {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1},
+ {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16},
+ {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47},
+ {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1},
+ {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1},
+ {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1},
+ {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2},
+ {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1},
+ {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14},
+ {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1},
+ {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1},
+ {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1},
+ {65313, 14, 26},
+ };
+ static const unsigned short aiOff[] = {
+ 1, 2, 8, 15, 16, 26, 28, 32,
+ 37, 38, 40, 48, 63, 64, 69, 71,
+ 79, 80, 116, 202, 203, 205, 206, 207,
+ 209, 210, 211, 213, 214, 217, 218, 219,
+ 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721,
+ 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274,
+ 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406,
+ 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462,
+ 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511,
+ 65514, 65521, 65527, 65528, 65529,
+ };
+
+ int ret = c;
+
+ assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 );
+
+ if( c<128 ){
+ if( c>='A' && c<='Z' ) ret = c + ('a' - 'A');
+ }else if( c<65536 ){
+ const struct TableEntry *p;
+ int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
+ int iLo = 0;
+ int iRes = -1;
+
+ assert( c>aEntry[0].iCode );
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ int cmp = (c - aEntry[iTest].iCode);
+ if( cmp>=0 ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+
+ assert( iRes>=0 && c>=aEntry[iRes].iCode );
+ p = &aEntry[iRes];
+ if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){
+ ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF;
+ assert( ret>0 );
+ }
+
+ if( eRemoveDiacritic ){
+ ret = remove_diacritic(ret, eRemoveDiacritic==2);
+ }
+ }
+
+ else if( c>=66560 && c<66600 ){
+ ret = c + 40;
+ }
+
+ return ret;
+}
+#endif /* defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) */
+#endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */
+
+/************** End of fts3_unicode2.c ***************************************/
+/************** Begin file json1.c *******************************************/
+/*
+** 2015-08-12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This SQLite extension implements JSON functions. The interface is
+** modeled after MySQL JSON functions:
+**
+** https://dev.mysql.com/doc/refman/5.7/en/json.html
+**
+** For the time being, all JSON is stored as pure text. (We might add
+** a JSONB type in the future which stores a binary encoding of JSON in
+** a BLOB, but there is no support for JSONB in the current implementation.
+** This implementation parses JSON text at 250 MB/s, so it is hard to see
+** how JSONB might improve on that.)
+*/
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1)
+#if !defined(SQLITEINT_H)
+/* #include "sqlite3ext.h" */
+#endif
+SQLITE_EXTENSION_INIT1
+/* #include <assert.h> */
+/* #include <string.h> */
+/* #include <stdlib.h> */
+/* #include <stdarg.h> */
+
+/* Mark a function parameter as unused, to suppress nuisance compiler
+** warnings. */
+#ifndef UNUSED_PARAM
+# define UNUSED_PARAM(X) (void)(X)
+#endif
+
+#ifndef LARGEST_INT64
+# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32))
+# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64)
+#endif
+
+#ifndef deliberate_fall_through
+# define deliberate_fall_through
+#endif
+
+/*
+** Versions of isspace(), isalnum() and isdigit() to which it is safe
+** to pass signed char values.
+*/
+#ifdef sqlite3Isdigit
+ /* Use the SQLite core versions if this routine is part of the
+ ** SQLite amalgamation */
+# define safe_isdigit(x) sqlite3Isdigit(x)
+# define safe_isalnum(x) sqlite3Isalnum(x)
+# define safe_isxdigit(x) sqlite3Isxdigit(x)
+#else
+ /* Use the standard library for separate compilation */
+#include <ctype.h> /* amalgamator: keep */
+# define safe_isdigit(x) isdigit((unsigned char)(x))
+# define safe_isalnum(x) isalnum((unsigned char)(x))
+# define safe_isxdigit(x) isxdigit((unsigned char)(x))
+#endif
+
+/*
+** Growing our own isspace() routine this way is twice as fast as
+** the library isspace() function, resulting in a 7% overall performance
+** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
+*/
+static const char jsonIsSpace[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#define safe_isspace(x) (jsonIsSpace[(unsigned char)x])
+
+#ifndef SQLITE_AMALGAMATION
+ /* Unsigned integer types. These are already defined in the sqliteInt.h,
+ ** but the definitions need to be repeated for separate compilation. */
+ typedef sqlite3_uint64 u64;
+ typedef unsigned int u32;
+ typedef unsigned short int u16;
+ typedef unsigned char u8;
+#endif
+
+/* Objects */
+typedef struct JsonString JsonString;
+typedef struct JsonNode JsonNode;
+typedef struct JsonParse JsonParse;
+
+/* An instance of this object represents a JSON string
+** under construction. Really, this is a generic string accumulator
+** that can be and is used to create strings other than JSON.
+*/
+struct JsonString {
+ sqlite3_context *pCtx; /* Function context - put error messages here */
+ char *zBuf; /* Append JSON content here */
+ u64 nAlloc; /* Bytes of storage available in zBuf[] */
+ u64 nUsed; /* Bytes of zBuf[] currently used */
+ u8 bStatic; /* True if zBuf is static space */
+ u8 bErr; /* True if an error has been encountered */
+ char zSpace[100]; /* Initial static space */
+};
+
+/* JSON type values
+*/
+#define JSON_NULL 0
+#define JSON_TRUE 1
+#define JSON_FALSE 2
+#define JSON_INT 3
+#define JSON_REAL 4
+#define JSON_STRING 5
+#define JSON_ARRAY 6
+#define JSON_OBJECT 7
+
+/* The "subtype" set for JSON values */
+#define JSON_SUBTYPE 74 /* Ascii for "J" */
+
+/*
+** Names of the various JSON types:
+*/
+static const char * const jsonType[] = {
+ "null", "true", "false", "integer", "real", "text", "array", "object"
+};
+
+/* Bit values for the JsonNode.jnFlag field
+*/
+#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
+#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
+#define JNODE_REMOVE 0x04 /* Do not output */
+#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */
+#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */
+#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */
+#define JNODE_LABEL 0x40 /* Is a label of an object */
+
+
+/* A single node of parsed JSON
+*/
+struct JsonNode {
+ u8 eType; /* One of the JSON_ type values */
+ u8 jnFlags; /* JNODE flags */
+ u32 n; /* Bytes of content, or number of sub-nodes */
+ union {
+ const char *zJContent; /* Content for INT, REAL, and STRING */
+ u32 iAppend; /* More terms for ARRAY and OBJECT */
+ u32 iKey; /* Key for ARRAY objects in json_tree() */
+ u32 iReplace; /* Replacement content for JNODE_REPLACE */
+ JsonNode *pPatch; /* Node chain of patch for JNODE_PATCH */
+ } u;
+};
+
+/* A completely parsed JSON string
+*/
+struct JsonParse {
+ u32 nNode; /* Number of slots of aNode[] used */
+ u32 nAlloc; /* Number of slots of aNode[] allocated */
+ JsonNode *aNode; /* Array of nodes containing the parse */
+ const char *zJson; /* Original JSON string */
+ u32 *aUp; /* Index of parent of each node */
+ u8 oom; /* Set to true if out of memory */
+ u8 nErr; /* Number of errors seen */
+ u16 iDepth; /* Nesting depth */
+ int nJson; /* Length of the zJson string in bytes */
+ u32 iHold; /* Replace cache line with the lowest iHold value */
+};
+
+/*
+** Maximum nesting depth of JSON for this implementation.
+**
+** This limit is needed to avoid a stack overflow in the recursive
+** descent parser. A depth of 2000 is far deeper than any sane JSON
+** should go.
+*/
+#define JSON_MAX_DEPTH 2000
+
+/**************************************************************************
+** Utility routines for dealing with JsonString objects
+**************************************************************************/
+
+/* Set the JsonString object to an empty string
+*/
+static void jsonZero(JsonString *p){
+ p->zBuf = p->zSpace;
+ p->nAlloc = sizeof(p->zSpace);
+ p->nUsed = 0;
+ p->bStatic = 1;
+}
+
+/* Initialize the JsonString object
+*/
+static void jsonInit(JsonString *p, sqlite3_context *pCtx){
+ p->pCtx = pCtx;
+ p->bErr = 0;
+ jsonZero(p);
+}
+
+
+/* Free all allocated memory and reset the JsonString object back to its
+** initial state.
+*/
+static void jsonReset(JsonString *p){
+ if( !p->bStatic ) sqlite3_free(p->zBuf);
+ jsonZero(p);
+}
+
+
+/* Report an out-of-memory (OOM) condition
+*/
+static void jsonOom(JsonString *p){
+ p->bErr = 1;
+ sqlite3_result_error_nomem(p->pCtx);
+ jsonReset(p);
+}
+
+/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
+** Return zero on success. Return non-zero on an OOM error
+*/
+static int jsonGrow(JsonString *p, u32 N){
+ u64 nTotal = N<p->nAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
+ char *zNew;
+ if( p->bStatic ){
+ if( p->bErr ) return 1;
+ zNew = sqlite3_malloc64(nTotal);
+ if( zNew==0 ){
+ jsonOom(p);
+ return SQLITE_NOMEM;
+ }
+ memcpy(zNew, p->zBuf, (size_t)p->nUsed);
+ p->zBuf = zNew;
+ p->bStatic = 0;
+ }else{
+ zNew = sqlite3_realloc64(p->zBuf, nTotal);
+ if( zNew==0 ){
+ jsonOom(p);
+ return SQLITE_NOMEM;
+ }
+ p->zBuf = zNew;
+ }
+ p->nAlloc = nTotal;
+ return SQLITE_OK;
+}
+
+/* Append N bytes from zIn onto the end of the JsonString string.
+*/
+static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
+ if( N==0 ) return;
+ if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+}
+
+/* Append formatted text (not to exceed N bytes) to the JsonString.
+*/
+static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
+ va_list ap;
+ if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
+ va_start(ap, zFormat);
+ sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
+ va_end(ap);
+ p->nUsed += (int)strlen(p->zBuf+p->nUsed);
+}
+
+/* Append a single character
+*/
+static void jsonAppendChar(JsonString *p, char c){
+ if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
+ p->zBuf[p->nUsed++] = c;
+}
+
+/* Append a comma separator to the output buffer, if the previous
+** character is not '[' or '{'.
+*/
+static void jsonAppendSeparator(JsonString *p){
+ char c;
+ if( p->nUsed==0 ) return;
+ c = p->zBuf[p->nUsed-1];
+ if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
+}
+
+/* Append the N-byte string in zIn to the end of the JsonString string
+** under construction. Enclose the string in "..." and escape
+** any double-quotes or backslash characters contained within the
+** string.
+*/
+static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
+ u32 i;
+ if( (N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0 ) return;
+ p->zBuf[p->nUsed++] = '"';
+ for(i=0; i<N; i++){
+ unsigned char c = ((unsigned const char*)zIn)[i];
+ if( c=='"' || c=='\\' ){
+ json_simple_escape:
+ if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
+ p->zBuf[p->nUsed++] = '\\';
+ }else if( c<=0x1f ){
+ static const char aSpecial[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ assert( sizeof(aSpecial)==32 );
+ assert( aSpecial['\b']=='b' );
+ assert( aSpecial['\f']=='f' );
+ assert( aSpecial['\n']=='n' );
+ assert( aSpecial['\r']=='r' );
+ assert( aSpecial['\t']=='t' );
+ if( aSpecial[c] ){
+ c = aSpecial[c];
+ goto json_simple_escape;
+ }
+ if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
+ p->zBuf[p->nUsed++] = '\\';
+ p->zBuf[p->nUsed++] = 'u';
+ p->zBuf[p->nUsed++] = '0';
+ p->zBuf[p->nUsed++] = '0';
+ p->zBuf[p->nUsed++] = '0' + (c>>4);
+ c = "0123456789abcdef"[c&0xf];
+ }
+ p->zBuf[p->nUsed++] = c;
+ }
+ p->zBuf[p->nUsed++] = '"';
+ assert( p->nUsed<p->nAlloc );
+}
+
+/*
+** Append a function parameter value to the JSON string under
+** construction.
+*/
+static void jsonAppendValue(
+ JsonString *p, /* Append to this JSON string */
+ sqlite3_value *pValue /* Value to append */
+){
+ switch( sqlite3_value_type(pValue) ){
+ case SQLITE_NULL: {
+ jsonAppendRaw(p, "null", 4);
+ break;
+ }
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT: {
+ const char *z = (const char*)sqlite3_value_text(pValue);
+ u32 n = (u32)sqlite3_value_bytes(pValue);
+ jsonAppendRaw(p, z, n);
+ break;
+ }
+ case SQLITE_TEXT: {
+ const char *z = (const char*)sqlite3_value_text(pValue);
+ u32 n = (u32)sqlite3_value_bytes(pValue);
+ if( sqlite3_value_subtype(pValue)==JSON_SUBTYPE ){
+ jsonAppendRaw(p, z, n);
+ }else{
+ jsonAppendString(p, z, n);
+ }
+ break;
+ }
+ default: {
+ if( p->bErr==0 ){
+ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
+ p->bErr = 2;
+ jsonReset(p);
+ }
+ break;
+ }
+ }
+}
+
+
+/* Make the JSON in p the result of the SQL function.
+*/
+static void jsonResult(JsonString *p){
+ if( p->bErr==0 ){
+ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
+ p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
+ SQLITE_UTF8);
+ jsonZero(p);
+ }
+ assert( p->bStatic );
+}
+
+/**************************************************************************
+** Utility routines for dealing with JsonNode and JsonParse objects
+**************************************************************************/
+
+/*
+** Return the number of consecutive JsonNode slots need to represent
+** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
+** OBJECT types, the number might be larger.
+**
+** Appended elements are not counted. The value returned is the number
+** by which the JsonNode counter should increment in order to go to the
+** next peer value.
+*/
+static u32 jsonNodeSize(JsonNode *pNode){
+ return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
+}
+
+/*
+** Reclaim all memory allocated by a JsonParse object. But do not
+** delete the JsonParse object itself.
+*/
+static void jsonParseReset(JsonParse *pParse){
+ sqlite3_free(pParse->aNode);
+ pParse->aNode = 0;
+ pParse->nNode = 0;
+ pParse->nAlloc = 0;
+ sqlite3_free(pParse->aUp);
+ pParse->aUp = 0;
+}
+
+/*
+** Free a JsonParse object that was obtained from sqlite3_malloc().
+*/
+static void jsonParseFree(JsonParse *pParse){
+ jsonParseReset(pParse);
+ sqlite3_free(pParse);
+}
+
+/*
+** Convert the JsonNode pNode into a pure JSON string and
+** append to pOut. Subsubstructure is also included. Return
+** the number of JsonNode objects that are encoded.
+*/
+static void jsonRenderNode(
+ JsonNode *pNode, /* The node to render */
+ JsonString *pOut, /* Write JSON here */
+ sqlite3_value **aReplace /* Replacement values */
+){
+ if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){
+ if( pNode->jnFlags & JNODE_REPLACE ){
+ jsonAppendValue(pOut, aReplace[pNode->u.iReplace]);
+ return;
+ }
+ pNode = pNode->u.pPatch;
+ }
+ switch( pNode->eType ){
+ default: {
+ assert( pNode->eType==JSON_NULL );
+ jsonAppendRaw(pOut, "null", 4);
+ break;
+ }
+ case JSON_TRUE: {
+ jsonAppendRaw(pOut, "true", 4);
+ break;
+ }
+ case JSON_FALSE: {
+ jsonAppendRaw(pOut, "false", 5);
+ break;
+ }
+ case JSON_STRING: {
+ if( pNode->jnFlags & JNODE_RAW ){
+ jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
+ break;
+ }
+ /* no break */ deliberate_fall_through
+ }
+ case JSON_REAL:
+ case JSON_INT: {
+ jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
+ break;
+ }
+ case JSON_ARRAY: {
+ u32 j = 1;
+ jsonAppendChar(pOut, '[');
+ for(;;){
+ while( j<=pNode->n ){
+ if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){
+ jsonAppendSeparator(pOut);
+ jsonRenderNode(&pNode[j], pOut, aReplace);
+ }
+ j += jsonNodeSize(&pNode[j]);
+ }
+ if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
+ pNode = &pNode[pNode->u.iAppend];
+ j = 1;
+ }
+ jsonAppendChar(pOut, ']');
+ break;
+ }
+ case JSON_OBJECT: {
+ u32 j = 1;
+ jsonAppendChar(pOut, '{');
+ for(;;){
+ while( j<=pNode->n ){
+ if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
+ jsonAppendSeparator(pOut);
+ jsonRenderNode(&pNode[j], pOut, aReplace);
+ jsonAppendChar(pOut, ':');
+ jsonRenderNode(&pNode[j+1], pOut, aReplace);
+ }
+ j += 1 + jsonNodeSize(&pNode[j+1]);
+ }
+ if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
+ pNode = &pNode[pNode->u.iAppend];
+ j = 1;
+ }
+ jsonAppendChar(pOut, '}');
+ break;
+ }
+ }
+}
+
+/*
+** Return a JsonNode and all its descendents as a JSON string.
+*/
+static void jsonReturnJson(
+ JsonNode *pNode, /* Node to return */
+ sqlite3_context *pCtx, /* Return value for this function */
+ sqlite3_value **aReplace /* Array of replacement values */
+){
+ JsonString s;
+ jsonInit(&s, pCtx);
+ jsonRenderNode(pNode, &s, aReplace);
+ jsonResult(&s);
+ sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
+}
+
+/*
+** Translate a single byte of Hex into an integer.
+** This routine only works if h really is a valid hexadecimal
+** character: 0..9a..fA..F
+*/
+static u8 jsonHexToInt(int h){
+ assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') );
+#ifdef SQLITE_EBCDIC
+ h += 9*(1&~(h>>4));
+#else
+ h += 9*(1&(h>>6));
+#endif
+ return (u8)(h & 0xf);
+}
+
+/*
+** Convert a 4-byte hex string into an integer
+*/
+static u32 jsonHexToInt4(const char *z){
+ u32 v;
+ assert( safe_isxdigit(z[0]) );
+ assert( safe_isxdigit(z[1]) );
+ assert( safe_isxdigit(z[2]) );
+ assert( safe_isxdigit(z[3]) );
+ v = (jsonHexToInt(z[0])<<12)
+ + (jsonHexToInt(z[1])<<8)
+ + (jsonHexToInt(z[2])<<4)
+ + jsonHexToInt(z[3]);
+ return v;
+}
+
+/*
+** Make the JsonNode the return value of the function.
+*/
+static void jsonReturn(
+ JsonNode *pNode, /* Node to return */
+ sqlite3_context *pCtx, /* Return value for this function */
+ sqlite3_value **aReplace /* Array of replacement values */
+){
+ switch( pNode->eType ){
+ default: {
+ assert( pNode->eType==JSON_NULL );
+ sqlite3_result_null(pCtx);
+ break;
+ }
+ case JSON_TRUE: {
+ sqlite3_result_int(pCtx, 1);
+ break;
+ }
+ case JSON_FALSE: {
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }
+ case JSON_INT: {
+ sqlite3_int64 i = 0;
+ const char *z = pNode->u.zJContent;
+ if( z[0]=='-' ){ z++; }
+ while( z[0]>='0' && z[0]<='9' ){
+ unsigned v = *(z++) - '0';
+ if( i>=LARGEST_INT64/10 ){
+ if( i>LARGEST_INT64/10 ) goto int_as_real;
+ if( z[0]>='0' && z[0]<='9' ) goto int_as_real;
+ if( v==9 ) goto int_as_real;
+ if( v==8 ){
+ if( pNode->u.zJContent[0]=='-' ){
+ sqlite3_result_int64(pCtx, SMALLEST_INT64);
+ goto int_done;
+ }else{
+ goto int_as_real;
+ }
+ }
+ }
+ i = i*10 + v;
+ }
+ if( pNode->u.zJContent[0]=='-' ){ i = -i; }
+ sqlite3_result_int64(pCtx, i);
+ int_done:
+ break;
+ int_as_real: i=0; /* no break */ deliberate_fall_through
+ }
+ case JSON_REAL: {
+ double r;
+#ifdef SQLITE_AMALGAMATION
+ const char *z = pNode->u.zJContent;
+ sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
+#else
+ r = strtod(pNode->u.zJContent, 0);
+#endif
+ sqlite3_result_double(pCtx, r);
+ break;
+ }
+ case JSON_STRING: {
+#if 0 /* Never happens because JNODE_RAW is only set by json_set(),
+ ** json_insert() and json_replace() and those routines do not
+ ** call jsonReturn() */
+ if( pNode->jnFlags & JNODE_RAW ){
+ sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
+ SQLITE_TRANSIENT);
+ }else
+#endif
+ assert( (pNode->jnFlags & JNODE_RAW)==0 );
+ if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
+ /* JSON formatted without any backslash-escapes */
+ sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
+ SQLITE_TRANSIENT);
+ }else{
+ /* Translate JSON formatted string into raw text */
+ u32 i;
+ u32 n = pNode->n;
+ const char *z = pNode->u.zJContent;
+ char *zOut;
+ u32 j;
+ zOut = sqlite3_malloc( n+1 );
+ if( zOut==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ break;
+ }
+ for(i=1, j=0; i<n-1; i++){
+ char c = z[i];
+ if( c!='\\' ){
+ zOut[j++] = c;
+ }else{
+ c = z[++i];
+ if( c=='u' ){
+ u32 v = jsonHexToInt4(z+i+1);
+ i += 4;
+ if( v==0 ) break;
+ if( v<=0x7f ){
+ zOut[j++] = (char)v;
+ }else if( v<=0x7ff ){
+ zOut[j++] = (char)(0xc0 | (v>>6));
+ zOut[j++] = 0x80 | (v&0x3f);
+ }else{
+ u32 vlo;
+ if( (v&0xfc00)==0xd800
+ && i<n-6
+ && z[i+1]=='\\'
+ && z[i+2]=='u'
+ && ((vlo = jsonHexToInt4(z+i+3))&0xfc00)==0xdc00
+ ){
+ /* We have a surrogate pair */
+ v = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
+ i += 6;
+ zOut[j++] = 0xf0 | (v>>18);
+ zOut[j++] = 0x80 | ((v>>12)&0x3f);
+ zOut[j++] = 0x80 | ((v>>6)&0x3f);
+ zOut[j++] = 0x80 | (v&0x3f);
+ }else{
+ zOut[j++] = 0xe0 | (v>>12);
+ zOut[j++] = 0x80 | ((v>>6)&0x3f);
+ zOut[j++] = 0x80 | (v&0x3f);
+ }
+ }
+ }else{
+ if( c=='b' ){
+ c = '\b';
+ }else if( c=='f' ){
+ c = '\f';
+ }else if( c=='n' ){
+ c = '\n';
+ }else if( c=='r' ){
+ c = '\r';
+ }else if( c=='t' ){
+ c = '\t';
+ }
+ zOut[j++] = c;
+ }
+ }
+ }
+ zOut[j] = 0;
+ sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
+ }
+ break;
+ }
+ case JSON_ARRAY:
+ case JSON_OBJECT: {
+ jsonReturnJson(pNode, pCtx, aReplace);
+ break;
+ }
+ }
+}
+
+/* Forward reference */
+static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
+
+/*
+** A macro to hint to the compiler that a function should not be
+** inlined.
+*/
+#if defined(__GNUC__)
+# define JSON_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER) && _MSC_VER>=1310
+# define JSON_NOINLINE __declspec(noinline)
+#else
+# define JSON_NOINLINE
+#endif
+
+
+static JSON_NOINLINE int jsonParseAddNodeExpand(
+ JsonParse *pParse, /* Append the node to this object */
+ u32 eType, /* Node type */
+ u32 n, /* Content size or sub-node count */
+ const char *zContent /* Content */
+){
+ u32 nNew;
+ JsonNode *pNew;
+ assert( pParse->nNode>=pParse->nAlloc );
+ if( pParse->oom ) return -1;
+ nNew = pParse->nAlloc*2 + 10;
+ pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew);
+ if( pNew==0 ){
+ pParse->oom = 1;
+ return -1;
+ }
+ pParse->nAlloc = nNew;
+ pParse->aNode = pNew;
+ assert( pParse->nNode<pParse->nAlloc );
+ return jsonParseAddNode(pParse, eType, n, zContent);
+}
+
+/*
+** Create a new JsonNode instance based on the arguments and append that
+** instance to the JsonParse. Return the index in pParse->aNode[] of the
+** new node, or -1 if a memory allocation fails.
+*/
+static int jsonParseAddNode(
+ JsonParse *pParse, /* Append the node to this object */
+ u32 eType, /* Node type */
+ u32 n, /* Content size or sub-node count */
+ const char *zContent /* Content */
+){
+ JsonNode *p;
+ if( pParse->nNode>=pParse->nAlloc ){
+ return jsonParseAddNodeExpand(pParse, eType, n, zContent);
+ }
+ p = &pParse->aNode[pParse->nNode];
+ p->eType = (u8)eType;
+ p->jnFlags = 0;
+ p->n = n;
+ p->u.zJContent = zContent;
+ return pParse->nNode++;
+}
+
+/*
+** Return true if z[] begins with 4 (or more) hexadecimal digits
+*/
+static int jsonIs4Hex(const char *z){
+ int i;
+ for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0;
+ return 1;
+}
+
+/*
+** Parse a single JSON value which begins at pParse->zJson[i]. Return the
+** index of the first character past the end of the value parsed.
+**
+** Return negative for a syntax error. Special cases: return -2 if the
+** first non-whitespace character is '}' and return -3 if the first
+** non-whitespace character is ']'.
+*/
+static int jsonParseValue(JsonParse *pParse, u32 i){
+ char c;
+ u32 j;
+ int iThis;
+ int x;
+ JsonNode *pNode;
+ const char *z = pParse->zJson;
+ while( safe_isspace(z[i]) ){ i++; }
+ if( (c = z[i])=='{' ){
+ /* Parse object */
+ iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
+ if( iThis<0 ) return -1;
+ for(j=i+1;;j++){
+ while( safe_isspace(z[j]) ){ j++; }
+ if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
+ x = jsonParseValue(pParse, j);
+ if( x<0 ){
+ pParse->iDepth--;
+ if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1;
+ return -1;
+ }
+ if( pParse->oom ) return -1;
+ pNode = &pParse->aNode[pParse->nNode-1];
+ if( pNode->eType!=JSON_STRING ) return -1;
+ pNode->jnFlags |= JNODE_LABEL;
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ if( z[j]!=':' ) return -1;
+ j++;
+ x = jsonParseValue(pParse, j);
+ pParse->iDepth--;
+ if( x<0 ) return -1;
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ c = z[j];
+ if( c==',' ) continue;
+ if( c!='}' ) return -1;
+ break;
+ }
+ pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ return j+1;
+ }else if( c=='[' ){
+ /* Parse array */
+ iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
+ if( iThis<0 ) return -1;
+ for(j=i+1;;j++){
+ while( safe_isspace(z[j]) ){ j++; }
+ if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
+ x = jsonParseValue(pParse, j);
+ pParse->iDepth--;
+ if( x<0 ){
+ if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1;
+ return -1;
+ }
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ c = z[j];
+ if( c==',' ) continue;
+ if( c!=']' ) return -1;
+ break;
+ }
+ pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ return j+1;
+ }else if( c=='"' ){
+ /* Parse string */
+ u8 jnFlags = 0;
+ j = i+1;
+ for(;;){
+ c = z[j];
+ if( (c & ~0x1f)==0 ){
+ /* Control characters are not allowed in strings */
+ return -1;
+ }
+ if( c=='\\' ){
+ c = z[++j];
+ if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
+ || c=='n' || c=='r' || c=='t'
+ || (c=='u' && jsonIs4Hex(z+j+1)) ){
+ jnFlags = JNODE_ESCAPE;
+ }else{
+ return -1;
+ }
+ }else if( c=='"' ){
+ break;
+ }
+ j++;
+ }
+ jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]);
+ if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags;
+ return j+1;
+ }else if( c=='n'
+ && strncmp(z+i,"null",4)==0
+ && !safe_isalnum(z[i+4]) ){
+ jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ return i+4;
+ }else if( c=='t'
+ && strncmp(z+i,"true",4)==0
+ && !safe_isalnum(z[i+4]) ){
+ jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ return i+4;
+ }else if( c=='f'
+ && strncmp(z+i,"false",5)==0
+ && !safe_isalnum(z[i+5]) ){
+ jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
+ return i+5;
+ }else if( c=='-' || (c>='0' && c<='9') ){
+ /* Parse number */
+ u8 seenDP = 0;
+ u8 seenE = 0;
+ assert( '-' < '0' );
+ if( c<='0' ){
+ j = c=='-' ? i+1 : i;
+ if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1;
+ }
+ j = i+1;
+ for(;; j++){
+ c = z[j];
+ if( c>='0' && c<='9' ) continue;
+ if( c=='.' ){
+ if( z[j-1]=='-' ) return -1;
+ if( seenDP ) return -1;
+ seenDP = 1;
+ continue;
+ }
+ if( c=='e' || c=='E' ){
+ if( z[j-1]<'0' ) return -1;
+ if( seenE ) return -1;
+ seenDP = seenE = 1;
+ c = z[j+1];
+ if( c=='+' || c=='-' ){
+ j++;
+ c = z[j+1];
+ }
+ if( c<'0' || c>'9' ) return -1;
+ continue;
+ }
+ break;
+ }
+ if( z[j-1]<'0' ) return -1;
+ jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT,
+ j - i, &z[i]);
+ return j;
+ }else if( c=='}' ){
+ return -2; /* End of {...} */
+ }else if( c==']' ){
+ return -3; /* End of [...] */
+ }else if( c==0 ){
+ return 0; /* End of file */
+ }else{
+ return -1; /* Syntax error */
+ }
+}
+
+/*
+** Parse a complete JSON string. Return 0 on success or non-zero if there
+** are any errors. If an error occurs, free all memory associated with
+** pParse.
+**
+** pParse is uninitialized when this routine is called.
+*/
+static int jsonParse(
+ JsonParse *pParse, /* Initialize and fill this JsonParse object */
+ sqlite3_context *pCtx, /* Report errors here */
+ const char *zJson /* Input JSON text to be parsed */
+){
+ int i;
+ memset(pParse, 0, sizeof(*pParse));
+ if( zJson==0 ) return 1;
+ pParse->zJson = zJson;
+ i = jsonParseValue(pParse, 0);
+ if( pParse->oom ) i = -1;
+ if( i>0 ){
+ assert( pParse->iDepth==0 );
+ while( safe_isspace(zJson[i]) ) i++;
+ if( zJson[i] ) i = -1;
+ }
+ if( i<=0 ){
+ if( pCtx!=0 ){
+ if( pParse->oom ){
+ sqlite3_result_error_nomem(pCtx);
+ }else{
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ }
+ }
+ jsonParseReset(pParse);
+ return 1;
+ }
+ return 0;
+}
+
+/* Mark node i of pParse as being a child of iParent. Call recursively
+** to fill in all the descendants of node i.
+*/
+static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
+ JsonNode *pNode = &pParse->aNode[i];
+ u32 j;
+ pParse->aUp[i] = iParent;
+ switch( pNode->eType ){
+ case JSON_ARRAY: {
+ for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
+ jsonParseFillInParentage(pParse, i+j, i);
+ }
+ break;
+ }
+ case JSON_OBJECT: {
+ for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
+ pParse->aUp[i+j] = i;
+ jsonParseFillInParentage(pParse, i+j+1, i);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+}
+
+/*
+** Compute the parentage of all nodes in a completed parse.
+*/
+static int jsonParseFindParents(JsonParse *pParse){
+ u32 *aUp;
+ assert( pParse->aUp==0 );
+ aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode );
+ if( aUp==0 ){
+ pParse->oom = 1;
+ return SQLITE_NOMEM;
+ }
+ jsonParseFillInParentage(pParse, 0, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+*/
+#define JSON_CACHE_ID (-429938) /* First cache entry */
+#define JSON_CACHE_SZ 4 /* Max number of cache entries */
+
+/*
+** Obtain a complete parse of the JSON found in the first argument
+** of the argv array. Use the sqlite3_get_auxdata() cache for this
+** parse if it is available. If the cache is not available or if it
+** is no longer valid, parse the JSON again and return the new parse,
+** and also register the new parse so that it will be available for
+** future sqlite3_get_auxdata() calls.
+*/
+static JsonParse *jsonParseCached(
+ sqlite3_context *pCtx,
+ sqlite3_value **argv,
+ sqlite3_context *pErrCtx
+){
+ const char *zJson = (const char*)sqlite3_value_text(argv[0]);
+ int nJson = sqlite3_value_bytes(argv[0]);
+ JsonParse *p;
+ JsonParse *pMatch = 0;
+ int iKey;
+ int iMinKey = 0;
+ u32 iMinHold = 0xffffffff;
+ u32 iMaxHold = 0;
+ if( zJson==0 ) return 0;
+ for(iKey=0; iKey<JSON_CACHE_SZ; iKey++){
+ p = (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iKey);
+ if( p==0 ){
+ iMinKey = iKey;
+ break;
+ }
+ if( pMatch==0
+ && p->nJson==nJson
+ && memcmp(p->zJson,zJson,nJson)==0
+ ){
+ p->nErr = 0;
+ pMatch = p;
+ }else if( p->iHold<iMinHold ){
+ iMinHold = p->iHold;
+ iMinKey = iKey;
+ }
+ if( p->iHold>iMaxHold ){
+ iMaxHold = p->iHold;
+ }
+ }
+ if( pMatch ){
+ pMatch->nErr = 0;
+ pMatch->iHold = iMaxHold+1;
+ return pMatch;
+ }
+ p = sqlite3_malloc64( sizeof(*p) + nJson + 1 );
+ if( p==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ return 0;
+ }
+ memset(p, 0, sizeof(*p));
+ p->zJson = (char*)&p[1];
+ memcpy((char*)p->zJson, zJson, nJson+1);
+ if( jsonParse(p, pErrCtx, p->zJson) ){
+ sqlite3_free(p);
+ return 0;
+ }
+ p->nJson = nJson;
+ p->iHold = iMaxHold+1;
+ sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p,
+ (void(*)(void*))jsonParseFree);
+ return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);
+}
+
+/*
+** Compare the OBJECT label at pNode against zKey,nKey. Return true on
+** a match.
+*/
+static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){
+ if( pNode->jnFlags & JNODE_RAW ){
+ if( pNode->n!=nKey ) return 0;
+ return strncmp(pNode->u.zJContent, zKey, nKey)==0;
+ }else{
+ if( pNode->n!=nKey+2 ) return 0;
+ return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
+ }
+}
+
+/* forward declaration */
+static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
+
+/*
+** Search along zPath to find the node specified. Return a pointer
+** to that node, or NULL if zPath is malformed or if there is no such
+** node.
+**
+** If pApnd!=0, then try to append new nodes to complete zPath if it is
+** possible to do so and if no existing node corresponds to zPath. If
+** new nodes are appended *pApnd is set to 1.
+*/
+static JsonNode *jsonLookupStep(
+ JsonParse *pParse, /* The JSON to search */
+ u32 iRoot, /* Begin the search at this node */
+ const char *zPath, /* The path to search */
+ int *pApnd, /* Append nodes to complete path if not NULL */
+ const char **pzErr /* Make *pzErr point to any syntax error in zPath */
+){
+ u32 i, j, nKey;
+ const char *zKey;
+ JsonNode *pRoot = &pParse->aNode[iRoot];
+ if( zPath[0]==0 ) return pRoot;
+ if( pRoot->jnFlags & JNODE_REPLACE ) return 0;
+ if( zPath[0]=='.' ){
+ if( pRoot->eType!=JSON_OBJECT ) return 0;
+ zPath++;
+ if( zPath[0]=='"' ){
+ zKey = zPath + 1;
+ for(i=1; zPath[i] && zPath[i]!='"'; i++){}
+ nKey = i-1;
+ if( zPath[i] ){
+ i++;
+ }else{
+ *pzErr = zPath;
+ return 0;
+ }
+ }else{
+ zKey = zPath;
+ for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
+ nKey = i;
+ }
+ if( nKey==0 ){
+ *pzErr = zPath;
+ return 0;
+ }
+ j = 1;
+ for(;;){
+ while( j<=pRoot->n ){
+ if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
+ return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
+ }
+ j++;
+ j += jsonNodeSize(&pRoot[j]);
+ }
+ if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
+ iRoot += pRoot->u.iAppend;
+ pRoot = &pParse->aNode[iRoot];
+ j = 1;
+ }
+ if( pApnd ){
+ u32 iStart, iLabel;
+ JsonNode *pNode;
+ iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
+ iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
+ zPath += i;
+ pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
+ if( pParse->oom ) return 0;
+ if( pNode ){
+ pRoot = &pParse->aNode[iRoot];
+ pRoot->u.iAppend = iStart - iRoot;
+ pRoot->jnFlags |= JNODE_APPEND;
+ pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
+ }
+ return pNode;
+ }
+ }else if( zPath[0]=='[' ){
+ i = 0;
+ j = 1;
+ while( safe_isdigit(zPath[j]) ){
+ i = i*10 + zPath[j] - '0';
+ j++;
+ }
+ if( j<2 || zPath[j]!=']' ){
+ if( zPath[1]=='#' ){
+ JsonNode *pBase = pRoot;
+ int iBase = iRoot;
+ if( pRoot->eType!=JSON_ARRAY ) return 0;
+ for(;;){
+ while( j<=pBase->n ){
+ if( (pBase[j].jnFlags & JNODE_REMOVE)==0 ) i++;
+ j += jsonNodeSize(&pBase[j]);
+ }
+ if( (pBase->jnFlags & JNODE_APPEND)==0 ) break;
+ iBase += pBase->u.iAppend;
+ pBase = &pParse->aNode[iBase];
+ j = 1;
+ }
+ j = 2;
+ if( zPath[2]=='-' && safe_isdigit(zPath[3]) ){
+ unsigned int x = 0;
+ j = 3;
+ do{
+ x = x*10 + zPath[j] - '0';
+ j++;
+ }while( safe_isdigit(zPath[j]) );
+ if( x>i ) return 0;
+ i -= x;
+ }
+ if( zPath[j]!=']' ){
+ *pzErr = zPath;
+ return 0;
+ }
+ }else{
+ *pzErr = zPath;
+ return 0;
+ }
+ }
+ if( pRoot->eType!=JSON_ARRAY ) return 0;
+ zPath += j + 1;
+ j = 1;
+ for(;;){
+ while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
+ if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
+ j += jsonNodeSize(&pRoot[j]);
+ }
+ if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
+ iRoot += pRoot->u.iAppend;
+ pRoot = &pParse->aNode[iRoot];
+ j = 1;
+ }
+ if( j<=pRoot->n ){
+ return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
+ }
+ if( i==0 && pApnd ){
+ u32 iStart;
+ JsonNode *pNode;
+ iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
+ pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
+ if( pParse->oom ) return 0;
+ if( pNode ){
+ pRoot = &pParse->aNode[iRoot];
+ pRoot->u.iAppend = iStart - iRoot;
+ pRoot->jnFlags |= JNODE_APPEND;
+ }
+ return pNode;
+ }
+ }else{
+ *pzErr = zPath;
+ }
+ return 0;
+}
+
+/*
+** Append content to pParse that will complete zPath. Return a pointer
+** to the inserted node, or return NULL if the append fails.
+*/
+static JsonNode *jsonLookupAppend(
+ JsonParse *pParse, /* Append content to the JSON parse */
+ const char *zPath, /* Description of content to append */
+ int *pApnd, /* Set this flag to 1 */
+ const char **pzErr /* Make this point to any syntax error */
+){
+ *pApnd = 1;
+ if( zPath[0]==0 ){
+ jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
+ }
+ if( zPath[0]=='.' ){
+ jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
+ }else if( strncmp(zPath,"[0]",3)==0 ){
+ jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
+ }else{
+ return 0;
+ }
+ if( pParse->oom ) return 0;
+ return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
+}
+
+/*
+** Return the text of a syntax error message on a JSON path. Space is
+** obtained from sqlite3_malloc().
+*/
+static char *jsonPathSyntaxError(const char *zErr){
+ return sqlite3_mprintf("JSON path error near '%q'", zErr);
+}
+
+/*
+** Do a node lookup using zPath. Return a pointer to the node on success.
+** Return NULL if not found or if there is an error.
+**
+** On an error, write an error message into pCtx and increment the
+** pParse->nErr counter.
+**
+** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
+** nodes are appended.
+*/
+static JsonNode *jsonLookup(
+ JsonParse *pParse, /* The JSON to search */
+ const char *zPath, /* The path to search */
+ int *pApnd, /* Append nodes to complete path if not NULL */
+ sqlite3_context *pCtx /* Report errors here, if not NULL */
+){
+ const char *zErr = 0;
+ JsonNode *pNode = 0;
+ char *zMsg;
+
+ if( zPath==0 ) return 0;
+ if( zPath[0]!='$' ){
+ zErr = zPath;
+ goto lookup_err;
+ }
+ zPath++;
+ pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
+ if( zErr==0 ) return pNode;
+
+lookup_err:
+ pParse->nErr++;
+ assert( zErr!=0 && pCtx!=0 );
+ zMsg = jsonPathSyntaxError(zErr);
+ if( zMsg ){
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+ }else{
+ sqlite3_result_error_nomem(pCtx);
+ }
+ return 0;
+}
+
+
+/*
+** Report the wrong number of arguments for json_insert(), json_replace()
+** or json_set().
+*/
+static void jsonWrongNumArgs(
+ sqlite3_context *pCtx,
+ const char *zFuncName
+){
+ char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
+ zFuncName);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+}
+
+/*
+** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
+*/
+static void jsonRemoveAllNulls(JsonNode *pNode){
+ int i, n;
+ assert( pNode->eType==JSON_OBJECT );
+ n = pNode->n;
+ for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
+ switch( pNode[i].eType ){
+ case JSON_NULL:
+ pNode[i].jnFlags |= JNODE_REMOVE;
+ break;
+ case JSON_OBJECT:
+ jsonRemoveAllNulls(&pNode[i]);
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+** SQL functions used for testing and debugging
+****************************************************************************/
+
+#ifdef SQLITE_DEBUG
+/*
+** The json_parse(JSON) function returns a string which describes
+** a parse of the JSON provided. Or it returns NULL if JSON is not
+** well-formed.
+*/
+static void jsonParseFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString s; /* Output string - not real JSON */
+ JsonParse x; /* The parse */
+ u32 i;
+
+ assert( argc==1 );
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ jsonParseFindParents(&x);
+ jsonInit(&s, ctx);
+ for(i=0; i<x.nNode; i++){
+ const char *zType;
+ if( x.aNode[i].jnFlags & JNODE_LABEL ){
+ assert( x.aNode[i].eType==JSON_STRING );
+ zType = "label";
+ }else{
+ zType = jsonType[x.aNode[i].eType];
+ }
+ jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d",
+ i, zType, x.aNode[i].n, x.aUp[i]);
+ if( x.aNode[i].u.zJContent!=0 ){
+ jsonAppendRaw(&s, " ", 1);
+ jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n);
+ }
+ jsonAppendRaw(&s, "\n", 1);
+ }
+ jsonParseReset(&x);
+ jsonResult(&s);
+}
+
+/*
+** The json_test1(JSON) function return true (1) if the input is JSON
+** text generated by another json function. It returns (0) if the input
+** is not known to be JSON.
+*/
+static void jsonTest1Func(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ UNUSED_PARAM(argc);
+ sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE);
+}
+#endif /* SQLITE_DEBUG */
+
+/****************************************************************************
+** Scalar SQL function implementations
+****************************************************************************/
+
+/*
+** Implementation of the json_QUOTE(VALUE) function. Return a JSON value
+** corresponding to the SQL value input. Mostly this means putting
+** double-quotes around strings and returning the unquoted string "null"
+** when given a NULL input.
+*/
+static void jsonQuoteFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString jx;
+ UNUSED_PARAM(argc);
+
+ jsonInit(&jx, ctx);
+ jsonAppendValue(&jx, argv[0]);
+ jsonResult(&jx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+
+/*
+** Implementation of the json_array(VALUE,...) function. Return a JSON
+** array that contains all values given in arguments. Or if any argument
+** is a BLOB, throw an error.
+*/
+static void jsonArrayFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ int i;
+ JsonString jx;
+
+ jsonInit(&jx, ctx);
+ jsonAppendChar(&jx, '[');
+ for(i=0; i<argc; i++){
+ jsonAppendSeparator(&jx);
+ jsonAppendValue(&jx, argv[i]);
+ }
+ jsonAppendChar(&jx, ']');
+ jsonResult(&jx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+
+
+/*
+** json_array_length(JSON)
+** json_array_length(JSON, PATH)
+**
+** Return the number of elements in the top-level JSON array.
+** Return 0 if the input is not a well-formed JSON array.
+*/
+static void jsonArrayLengthFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse *p; /* The parse */
+ sqlite3_int64 n = 0;
+ u32 i;
+ JsonNode *pNode;
+
+ p = jsonParseCached(ctx, argv, ctx);
+ if( p==0 ) return;
+ assert( p->nNode );
+ if( argc==2 ){
+ const char *zPath = (const char*)sqlite3_value_text(argv[1]);
+ pNode = jsonLookup(p, zPath, 0, ctx);
+ }else{
+ pNode = p->aNode;
+ }
+ if( pNode==0 ){
+ return;
+ }
+ if( pNode->eType==JSON_ARRAY ){
+ assert( (pNode->jnFlags & JNODE_APPEND)==0 );
+ for(i=1; i<=pNode->n; n++){
+ i += jsonNodeSize(&pNode[i]);
+ }
+ }
+ sqlite3_result_int64(ctx, n);
+}
+
+/*
+** json_extract(JSON, PATH, ...)
+**
+** Return the element described by PATH. Return NULL if there is no
+** PATH element. If there are multiple PATHs, then return a JSON array
+** with the result from each path. Throw an error if the JSON or any PATH
+** is malformed.
+*/
+static void jsonExtractFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse *p; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ JsonString jx;
+ int i;
+
+ if( argc<2 ) return;
+ p = jsonParseCached(ctx, argv, ctx);
+ if( p==0 ) return;
+ jsonInit(&jx, ctx);
+ jsonAppendChar(&jx, '[');
+ for(i=1; i<argc; i++){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ pNode = jsonLookup(p, zPath, 0, ctx);
+ if( p->nErr ) break;
+ if( argc>2 ){
+ jsonAppendSeparator(&jx);
+ if( pNode ){
+ jsonRenderNode(pNode, &jx, 0);
+ }else{
+ jsonAppendRaw(&jx, "null", 4);
+ }
+ }else if( pNode ){
+ jsonReturn(pNode, ctx, 0);
+ }
+ }
+ if( argc>2 && i==argc ){
+ jsonAppendChar(&jx, ']');
+ jsonResult(&jx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+ jsonReset(&jx);
+}
+
+/* This is the RFC 7396 MergePatch algorithm.
+*/
+static JsonNode *jsonMergePatch(
+ JsonParse *pParse, /* The JSON parser that contains the TARGET */
+ u32 iTarget, /* Node of the TARGET in pParse */
+ JsonNode *pPatch /* The PATCH */
+){
+ u32 i, j;
+ u32 iRoot;
+ JsonNode *pTarget;
+ if( pPatch->eType!=JSON_OBJECT ){
+ return pPatch;
+ }
+ assert( iTarget>=0 && iTarget<pParse->nNode );
+ pTarget = &pParse->aNode[iTarget];
+ assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
+ if( pTarget->eType!=JSON_OBJECT ){
+ jsonRemoveAllNulls(pPatch);
+ return pPatch;
+ }
+ iRoot = iTarget;
+ for(i=1; i<pPatch->n; i += jsonNodeSize(&pPatch[i+1])+1){
+ u32 nKey;
+ const char *zKey;
+ assert( pPatch[i].eType==JSON_STRING );
+ assert( pPatch[i].jnFlags & JNODE_LABEL );
+ nKey = pPatch[i].n;
+ zKey = pPatch[i].u.zJContent;
+ assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
+ for(j=1; j<pTarget->n; j += jsonNodeSize(&pTarget[j+1])+1 ){
+ assert( pTarget[j].eType==JSON_STRING );
+ assert( pTarget[j].jnFlags & JNODE_LABEL );
+ assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
+ if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){
+ if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break;
+ if( pPatch[i+1].eType==JSON_NULL ){
+ pTarget[j+1].jnFlags |= JNODE_REMOVE;
+ }else{
+ JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
+ if( pNew==0 ) return 0;
+ pTarget = &pParse->aNode[iTarget];
+ if( pNew!=&pTarget[j+1] ){
+ pTarget[j+1].u.pPatch = pNew;
+ pTarget[j+1].jnFlags |= JNODE_PATCH;
+ }
+ }
+ break;
+ }
+ }
+ if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
+ int iStart, iPatch;
+ iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
+ jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
+ iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ if( pParse->oom ) return 0;
+ jsonRemoveAllNulls(pPatch);
+ pTarget = &pParse->aNode[iTarget];
+ pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
+ pParse->aNode[iRoot].u.iAppend = iStart - iRoot;
+ iRoot = iStart;
+ pParse->aNode[iPatch].jnFlags |= JNODE_PATCH;
+ pParse->aNode[iPatch].u.pPatch = &pPatch[i+1];
+ }
+ }
+ return pTarget;
+}
+
+/*
+** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
+** object that is the result of running the RFC 7396 MergePatch() algorithm
+** on the two arguments.
+*/
+static void jsonPatchFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The JSON that is being patched */
+ JsonParse y; /* The patch */
+ JsonNode *pResult; /* The result of the merge */
+
+ UNUSED_PARAM(argc);
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){
+ jsonParseReset(&x);
+ return;
+ }
+ pResult = jsonMergePatch(&x, 0, y.aNode);
+ assert( pResult!=0 || x.oom );
+ if( pResult ){
+ jsonReturnJson(pResult, ctx, 0);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ jsonParseReset(&x);
+ jsonParseReset(&y);
+}
+
+
+/*
+** Implementation of the json_object(NAME,VALUE,...) function. Return a JSON
+** object that contains all name/value given in arguments. Or if any name
+** is not a string or if any value is a BLOB, throw an error.
+*/
+static void jsonObjectFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ int i;
+ JsonString jx;
+ const char *z;
+ u32 n;
+
+ if( argc&1 ){
+ sqlite3_result_error(ctx, "json_object() requires an even number "
+ "of arguments", -1);
+ return;
+ }
+ jsonInit(&jx, ctx);
+ jsonAppendChar(&jx, '{');
+ for(i=0; i<argc; i+=2){
+ if( sqlite3_value_type(argv[i])!=SQLITE_TEXT ){
+ sqlite3_result_error(ctx, "json_object() labels must be TEXT", -1);
+ jsonReset(&jx);
+ return;
+ }
+ jsonAppendSeparator(&jx);
+ z = (const char*)sqlite3_value_text(argv[i]);
+ n = (u32)sqlite3_value_bytes(argv[i]);
+ jsonAppendString(&jx, z, n);
+ jsonAppendChar(&jx, ':');
+ jsonAppendValue(&jx, argv[i+1]);
+ }
+ jsonAppendChar(&jx, '}');
+ jsonResult(&jx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+
+
+/*
+** json_remove(JSON, PATH, ...)
+**
+** Remove the named elements from JSON and return the result. malformed
+** JSON or PATH arguments result in an error.
+*/
+static void jsonRemoveFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ u32 i;
+
+ if( argc<1 ) return;
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ for(i=1; i<(u32)argc; i++){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ if( zPath==0 ) goto remove_done;
+ pNode = jsonLookup(&x, zPath, 0, ctx);
+ if( x.nErr ) goto remove_done;
+ if( pNode ) pNode->jnFlags |= JNODE_REMOVE;
+ }
+ if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
+ jsonReturnJson(x.aNode, ctx, 0);
+ }
+remove_done:
+ jsonParseReset(&x);
+}
+
+/*
+** json_replace(JSON, PATH, VALUE, ...)
+**
+** Replace the value at PATH with VALUE. If PATH does not already exist,
+** this routine is a no-op. If JSON or PATH is malformed, throw an error.
+*/
+static void jsonReplaceFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ u32 i;
+
+ if( argc<1 ) return;
+ if( (argc&1)==0 ) {
+ jsonWrongNumArgs(ctx, "replace");
+ return;
+ }
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ for(i=1; i<(u32)argc; i+=2){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ pNode = jsonLookup(&x, zPath, 0, ctx);
+ if( x.nErr ) goto replace_err;
+ if( pNode ){
+ pNode->jnFlags |= (u8)JNODE_REPLACE;
+ pNode->u.iReplace = i + 1;
+ }
+ }
+ if( x.aNode[0].jnFlags & JNODE_REPLACE ){
+ sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
+ }else{
+ jsonReturnJson(x.aNode, ctx, argv);
+ }
+replace_err:
+ jsonParseReset(&x);
+}
+
+/*
+** json_set(JSON, PATH, VALUE, ...)
+**
+** Set the value at PATH to VALUE. Create the PATH if it does not already
+** exist. Overwrite existing values that do exist.
+** If JSON or PATH is malformed, throw an error.
+**
+** json_insert(JSON, PATH, VALUE, ...)
+**
+** Create PATH and initialize it to VALUE. If PATH already exists, this
+** routine is a no-op. If JSON or PATH is malformed, throw an error.
+*/
+static void jsonSetFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ u32 i;
+ int bApnd;
+ int bIsSet = *(int*)sqlite3_user_data(ctx);
+
+ if( argc<1 ) return;
+ if( (argc&1)==0 ) {
+ jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
+ return;
+ }
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ for(i=1; i<(u32)argc; i+=2){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ bApnd = 0;
+ pNode = jsonLookup(&x, zPath, &bApnd, ctx);
+ if( x.oom ){
+ sqlite3_result_error_nomem(ctx);
+ goto jsonSetDone;
+ }else if( x.nErr ){
+ goto jsonSetDone;
+ }else if( pNode && (bApnd || bIsSet) ){
+ pNode->jnFlags |= (u8)JNODE_REPLACE;
+ pNode->u.iReplace = i + 1;
+ }
+ }
+ if( x.aNode[0].jnFlags & JNODE_REPLACE ){
+ sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
+ }else{
+ jsonReturnJson(x.aNode, ctx, argv);
+ }
+jsonSetDone:
+ jsonParseReset(&x);
+}
+
+/*
+** json_type(JSON)
+** json_type(JSON, PATH)
+**
+** Return the top-level "type" of a JSON string. Throw an error if
+** either the JSON or PATH inputs are not well-formed.
+*/
+static void jsonTypeFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse *p; /* The parse */
+ const char *zPath;
+ JsonNode *pNode;
+
+ p = jsonParseCached(ctx, argv, ctx);
+ if( p==0 ) return;
+ if( argc==2 ){
+ zPath = (const char*)sqlite3_value_text(argv[1]);
+ pNode = jsonLookup(p, zPath, 0, ctx);
+ }else{
+ pNode = p->aNode;
+ }
+ if( pNode ){
+ sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
+ }
+}
+
+/*
+** json_valid(JSON)
+**
+** Return 1 if JSON is a well-formed JSON string according to RFC-7159.
+** Return 0 otherwise.
+*/
+static void jsonValidFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse *p; /* The parse */
+ UNUSED_PARAM(argc);
+ p = jsonParseCached(ctx, argv, 0);
+ sqlite3_result_int(ctx, p!=0);
+}
+
+
+/****************************************************************************
+** Aggregate SQL function implementations
+****************************************************************************/
+/*
+** json_group_array(VALUE)
+**
+** Return a JSON array composed of all values in the aggregate.
+*/
+static void jsonArrayStep(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString *pStr;
+ UNUSED_PARAM(argc);
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
+ if( pStr ){
+ if( pStr->zBuf==0 ){
+ jsonInit(pStr, ctx);
+ jsonAppendChar(pStr, '[');
+ }else if( pStr->nUsed>1 ){
+ jsonAppendChar(pStr, ',');
+ pStr->pCtx = ctx;
+ }
+ jsonAppendValue(pStr, argv[0]);
+ }
+}
+static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){
+ JsonString *pStr;
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
+ if( pStr ){
+ pStr->pCtx = ctx;
+ jsonAppendChar(pStr, ']');
+ if( pStr->bErr ){
+ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
+ assert( pStr->bStatic );
+ }else if( isFinal ){
+ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
+ pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic = 1;
+ }else{
+ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
+ pStr->nUsed--;
+ }
+ }else{
+ sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
+ }
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+static void jsonArrayValue(sqlite3_context *ctx){
+ jsonArrayCompute(ctx, 0);
+}
+static void jsonArrayFinal(sqlite3_context *ctx){
+ jsonArrayCompute(ctx, 1);
+}
+
+#ifndef SQLITE_OMIT_WINDOWFUNC
+/*
+** This method works for both json_group_array() and json_group_object().
+** It works by removing the first element of the group by searching forward
+** to the first comma (",") that is not within a string and deleting all
+** text through that comma.
+*/
+static void jsonGroupInverse(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ unsigned int i;
+ int inStr = 0;
+ int nNest = 0;
+ char *z;
+ char c;
+ JsonString *pStr;
+ UNUSED_PARAM(argc);
+ UNUSED_PARAM(argv);
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
+#ifdef NEVER
+ /* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will
+ ** always have been called to initalize it */
+ if( NEVER(!pStr) ) return;
+#endif
+ z = pStr->zBuf;
+ for(i=1; (c = z[i])!=',' || inStr || nNest; i++){
+ if( i>=pStr->nUsed ){
+ pStr->nUsed = 1;
+ return;
+ }
+ if( c=='"' ){
+ inStr = !inStr;
+ }else if( c=='\\' ){
+ i++;
+ }else if( !inStr ){
+ if( c=='{' || c=='[' ) nNest++;
+ if( c=='}' || c==']' ) nNest--;
+ }
+ }
+ pStr->nUsed -= i;
+ memmove(&z[1], &z[i+1], (size_t)pStr->nUsed-1);
+}
+#else
+# define jsonGroupInverse 0
+#endif
+
+
+/*
+** json_group_obj(NAME,VALUE)
+**
+** Return a JSON object composed of all names and values in the aggregate.
+*/
+static void jsonObjectStep(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString *pStr;
+ const char *z;
+ u32 n;
+ UNUSED_PARAM(argc);
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
+ if( pStr ){
+ if( pStr->zBuf==0 ){
+ jsonInit(pStr, ctx);
+ jsonAppendChar(pStr, '{');
+ }else if( pStr->nUsed>1 ){
+ jsonAppendChar(pStr, ',');
+ pStr->pCtx = ctx;
+ }
+ z = (const char*)sqlite3_value_text(argv[0]);
+ n = (u32)sqlite3_value_bytes(argv[0]);
+ jsonAppendString(pStr, z, n);
+ jsonAppendChar(pStr, ':');
+ jsonAppendValue(pStr, argv[1]);
+ }
+}
+static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){
+ JsonString *pStr;
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
+ if( pStr ){
+ jsonAppendChar(pStr, '}');
+ if( pStr->bErr ){
+ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
+ assert( pStr->bStatic );
+ }else if( isFinal ){
+ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
+ pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic = 1;
+ }else{
+ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
+ pStr->nUsed--;
+ }
+ }else{
+ sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
+ }
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+static void jsonObjectValue(sqlite3_context *ctx){
+ jsonObjectCompute(ctx, 0);
+}
+static void jsonObjectFinal(sqlite3_context *ctx){
+ jsonObjectCompute(ctx, 1);
+}
+
+
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/****************************************************************************
+** The json_each virtual table
+****************************************************************************/
+typedef struct JsonEachCursor JsonEachCursor;
+struct JsonEachCursor {
+ sqlite3_vtab_cursor base; /* Base class - must be first */
+ u32 iRowid; /* The rowid */
+ u32 iBegin; /* The first node of the scan */
+ u32 i; /* Index in sParse.aNode[] of current row */
+ u32 iEnd; /* EOF when i equals or exceeds this value */
+ u8 eType; /* Type of top-level element */
+ u8 bRecursive; /* True for json_tree(). False for json_each() */
+ char *zJson; /* Input JSON */
+ char *zRoot; /* Path by which to filter zJson */
+ JsonParse sParse; /* Parse of the input JSON */
+};
+
+/* Constructor for the json_each virtual table */
+static int jsonEachConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ sqlite3_vtab *pNew;
+ int rc;
+
+/* Column numbers */
+#define JEACH_KEY 0
+#define JEACH_VALUE 1
+#define JEACH_TYPE 2
+#define JEACH_ATOM 3
+#define JEACH_ID 4
+#define JEACH_PARENT 5
+#define JEACH_FULLKEY 6
+#define JEACH_PATH 7
+/* The xBestIndex method assumes that the JSON and ROOT columns are
+** the last two columns in the table. Should this ever changes, be
+** sure to update the xBestIndex method. */
+#define JEACH_JSON 8
+#define JEACH_ROOT 9
+
+ UNUSED_PARAM(pzErr);
+ UNUSED_PARAM(argv);
+ UNUSED_PARAM(argc);
+ UNUSED_PARAM(pAux);
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
+ "json HIDDEN,root HIDDEN)");
+ if( rc==SQLITE_OK ){
+ pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
+ if( pNew==0 ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(*pNew));
+ sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ }
+ return rc;
+}
+
+/* destructor for json_each virtual table */
+static int jsonEachDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/* constructor for a JsonEachCursor object for json_each(). */
+static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ JsonEachCursor *pCur;
+
+ UNUSED_PARAM(p);
+ pCur = sqlite3_malloc( sizeof(*pCur) );
+ if( pCur==0 ) return SQLITE_NOMEM;
+ memset(pCur, 0, sizeof(*pCur));
+ *ppCursor = &pCur->base;
+ return SQLITE_OK;
+}
+
+/* constructor for a JsonEachCursor object for json_tree(). */
+static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ int rc = jsonEachOpenEach(p, ppCursor);
+ if( rc==SQLITE_OK ){
+ JsonEachCursor *pCur = (JsonEachCursor*)*ppCursor;
+ pCur->bRecursive = 1;
+ }
+ return rc;
+}
+
+/* Reset a JsonEachCursor back to its original state. Free any memory
+** held. */
+static void jsonEachCursorReset(JsonEachCursor *p){
+ sqlite3_free(p->zJson);
+ sqlite3_free(p->zRoot);
+ jsonParseReset(&p->sParse);
+ p->iRowid = 0;
+ p->i = 0;
+ p->iEnd = 0;
+ p->eType = 0;
+ p->zJson = 0;
+ p->zRoot = 0;
+}
+
+/* Destructor for a jsonEachCursor object */
+static int jsonEachClose(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ jsonEachCursorReset(p);
+ sqlite3_free(cur);
+ return SQLITE_OK;
+}
+
+/* Return TRUE if the jsonEachCursor object has been advanced off the end
+** of the JSON object */
+static int jsonEachEof(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ return p->i >= p->iEnd;
+}
+
+/* Advance the cursor to the next element for json_tree() */
+static int jsonEachNext(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ if( p->bRecursive ){
+ if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
+ p->i++;
+ p->iRowid++;
+ if( p->i<p->iEnd ){
+ u32 iUp = p->sParse.aUp[p->i];
+ JsonNode *pUp = &p->sParse.aNode[iUp];
+ p->eType = pUp->eType;
+ if( pUp->eType==JSON_ARRAY ){
+ if( iUp==p->i-1 ){
+ pUp->u.iKey = 0;
+ }else{
+ pUp->u.iKey++;
+ }
+ }
+ }
+ }else{
+ switch( p->eType ){
+ case JSON_ARRAY: {
+ p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
+ p->iRowid++;
+ break;
+ }
+ case JSON_OBJECT: {
+ p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
+ p->iRowid++;
+ break;
+ }
+ default: {
+ p->i = p->iEnd;
+ break;
+ }
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Append the name of the path for element i to pStr
+*/
+static void jsonEachComputePath(
+ JsonEachCursor *p, /* The cursor */
+ JsonString *pStr, /* Write the path here */
+ u32 i /* Path to this element */
+){
+ JsonNode *pNode, *pUp;
+ u32 iUp;
+ if( i==0 ){
+ jsonAppendChar(pStr, '$');
+ return;
+ }
+ iUp = p->sParse.aUp[i];
+ jsonEachComputePath(p, pStr, iUp);
+ pNode = &p->sParse.aNode[i];
+ pUp = &p->sParse.aNode[iUp];
+ if( pUp->eType==JSON_ARRAY ){
+ jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
+ }else{
+ assert( pUp->eType==JSON_OBJECT );
+ if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
+ assert( pNode->eType==JSON_STRING );
+ assert( pNode->jnFlags & JNODE_LABEL );
+ jsonPrintf(pNode->n+1, pStr, ".%.*s", pNode->n-2, pNode->u.zJContent+1);
+ }
+}
+
+/* Return the value of a column */
+static int jsonEachColumn(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ JsonNode *pThis = &p->sParse.aNode[p->i];
+ switch( i ){
+ case JEACH_KEY: {
+ if( p->i==0 ) break;
+ if( p->eType==JSON_OBJECT ){
+ jsonReturn(pThis, ctx, 0);
+ }else if( p->eType==JSON_ARRAY ){
+ u32 iKey;
+ if( p->bRecursive ){
+ if( p->iRowid==0 ) break;
+ iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
+ }else{
+ iKey = p->iRowid;
+ }
+ sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
+ }
+ break;
+ }
+ case JEACH_VALUE: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ jsonReturn(pThis, ctx, 0);
+ break;
+ }
+ case JEACH_TYPE: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
+ break;
+ }
+ case JEACH_ATOM: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ if( pThis->eType>=JSON_ARRAY ) break;
+ jsonReturn(pThis, ctx, 0);
+ break;
+ }
+ case JEACH_ID: {
+ sqlite3_result_int64(ctx,
+ (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
+ break;
+ }
+ case JEACH_PARENT: {
+ if( p->i>p->iBegin && p->bRecursive ){
+ sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
+ }
+ break;
+ }
+ case JEACH_FULLKEY: {
+ JsonString x;
+ jsonInit(&x, ctx);
+ if( p->bRecursive ){
+ jsonEachComputePath(p, &x, p->i);
+ }else{
+ if( p->zRoot ){
+ jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
+ }else{
+ jsonAppendChar(&x, '$');
+ }
+ if( p->eType==JSON_ARRAY ){
+ jsonPrintf(30, &x, "[%d]", p->iRowid);
+ }else if( p->eType==JSON_OBJECT ){
+ jsonPrintf(pThis->n, &x, ".%.*s", pThis->n-2, pThis->u.zJContent+1);
+ }
+ }
+ jsonResult(&x);
+ break;
+ }
+ case JEACH_PATH: {
+ if( p->bRecursive ){
+ JsonString x;
+ jsonInit(&x, ctx);
+ jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
+ jsonResult(&x);
+ break;
+ }
+ /* For json_each() path and root are the same so fall through
+ ** into the root case */
+ /* no break */ deliberate_fall_through
+ }
+ default: {
+ const char *zRoot = p->zRoot;
+ if( zRoot==0 ) zRoot = "$";
+ sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
+ break;
+ }
+ case JEACH_JSON: {
+ assert( i==JEACH_JSON );
+ sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ break;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Return the current rowid value */
+static int jsonEachRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ *pRowid = p->iRowid;
+ return SQLITE_OK;
+}
+
+/* The query strategy is to look for an equality constraint on the json
+** column. Without such a constraint, the table cannot operate. idxNum is
+** 1 if the constraint is found, 3 if the constraint and zRoot are found,
+** and 0 otherwise.
+*/
+static int jsonEachBestIndex(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ int i; /* Loop counter or computed array index */
+ int aIdx[2]; /* Index of constraints for JSON and ROOT */
+ int unusableMask = 0; /* Mask of unusable JSON and ROOT constraints */
+ int idxMask = 0; /* Mask of usable == constraints JSON and ROOT */
+ const struct sqlite3_index_constraint *pConstraint;
+
+ /* This implementation assumes that JSON and ROOT are the last two
+ ** columns in the table */
+ assert( JEACH_ROOT == JEACH_JSON+1 );
+ UNUSED_PARAM(tab);
+ aIdx[0] = aIdx[1] = -1;
+ pConstraint = pIdxInfo->aConstraint;
+ for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){
+ int iCol;
+ int iMask;
+ if( pConstraint->iColumn < JEACH_JSON ) continue;
+ iCol = pConstraint->iColumn - JEACH_JSON;
+ assert( iCol==0 || iCol==1 );
+ iMask = 1 << iCol;
+ if( pConstraint->usable==0 ){
+ unusableMask |= iMask;
+ }else if( pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){
+ aIdx[iCol] = i;
+ idxMask |= iMask;
+ }
+ }
+ if( (unusableMask & ~idxMask)!=0 ){
+ /* If there are any unusable constraints on JSON or ROOT, then reject
+ ** this entire plan */
+ return SQLITE_CONSTRAINT;
+ }
+ if( aIdx[0]<0 ){
+ /* No JSON input. Leave estimatedCost at the huge value that it was
+ ** initialized to to discourage the query planner from selecting this
+ ** plan. */
+ pIdxInfo->idxNum = 0;
+ }else{
+ pIdxInfo->estimatedCost = 1.0;
+ i = aIdx[0];
+ pIdxInfo->aConstraintUsage[i].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ if( aIdx[1]<0 ){
+ pIdxInfo->idxNum = 1; /* Only JSON supplied. Plan 1 */
+ }else{
+ i = aIdx[1];
+ pIdxInfo->aConstraintUsage[i].argvIndex = 2;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ pIdxInfo->idxNum = 3; /* Both JSON and ROOT are supplied. Plan 3 */
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Start a search on a new JSON string */
+static int jsonEachFilter(
+ sqlite3_vtab_cursor *cur,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ const char *z;
+ const char *zRoot = 0;
+ sqlite3_int64 n;
+
+ UNUSED_PARAM(idxStr);
+ UNUSED_PARAM(argc);
+ jsonEachCursorReset(p);
+ if( idxNum==0 ) return SQLITE_OK;
+ z = (const char*)sqlite3_value_text(argv[0]);
+ if( z==0 ) return SQLITE_OK;
+ n = sqlite3_value_bytes(argv[0]);
+ p->zJson = sqlite3_malloc64( n+1 );
+ if( p->zJson==0 ) return SQLITE_NOMEM;
+ memcpy(p->zJson, z, (size_t)n+1);
+ if( jsonParse(&p->sParse, 0, p->zJson) ){
+ int rc = SQLITE_NOMEM;
+ if( p->sParse.oom==0 ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
+ if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
+ }
+ jsonEachCursorReset(p);
+ return rc;
+ }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
+ jsonEachCursorReset(p);
+ return SQLITE_NOMEM;
+ }else{
+ JsonNode *pNode = 0;
+ if( idxNum==3 ){
+ const char *zErr = 0;
+ zRoot = (const char*)sqlite3_value_text(argv[1]);
+ if( zRoot==0 ) return SQLITE_OK;
+ n = sqlite3_value_bytes(argv[1]);
+ p->zRoot = sqlite3_malloc64( n+1 );
+ if( p->zRoot==0 ) return SQLITE_NOMEM;
+ memcpy(p->zRoot, zRoot, (size_t)n+1);
+ if( zRoot[0]!='$' ){
+ zErr = zRoot;
+ }else{
+ pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
+ }
+ if( zErr ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
+ }else if( pNode==0 ){
+ return SQLITE_OK;
+ }
+ }else{
+ pNode = p->sParse.aNode;
+ }
+ p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
+ p->eType = pNode->eType;
+ if( p->eType>=JSON_ARRAY ){
+ pNode->u.iKey = 0;
+ p->iEnd = p->i + pNode->n + 1;
+ if( p->bRecursive ){
+ p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
+ if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
+ p->i--;
+ }
+ }else{
+ p->i++;
+ }
+ }else{
+ p->iEnd = p->i+1;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* The methods of the json_each virtual table */
+static sqlite3_module jsonEachModule = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ jsonEachConnect, /* xConnect */
+ jsonEachBestIndex, /* xBestIndex */
+ jsonEachDisconnect, /* xDisconnect */
+ 0, /* xDestroy */
+ jsonEachOpenEach, /* xOpen - open a cursor */
+ jsonEachClose, /* xClose - close a cursor */
+ jsonEachFilter, /* xFilter - configure scan constraints */
+ jsonEachNext, /* xNext - advance a cursor */
+ jsonEachEof, /* xEof - check for end of scan */
+ jsonEachColumn, /* xColumn - read data */
+ jsonEachRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+};
+
+/* The methods of the json_tree virtual table. */
+static sqlite3_module jsonTreeModule = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ jsonEachConnect, /* xConnect */
+ jsonEachBestIndex, /* xBestIndex */
+ jsonEachDisconnect, /* xDisconnect */
+ 0, /* xDestroy */
+ jsonEachOpenTree, /* xOpen - open a cursor */
+ jsonEachClose, /* xClose - close a cursor */
+ jsonEachFilter, /* xFilter - configure scan constraints */
+ jsonEachNext, /* xNext - advance a cursor */
+ jsonEachEof, /* xEof - check for end of scan */
+ jsonEachColumn, /* xColumn - read data */
+ jsonEachRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+};
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/****************************************************************************
+** The following routines are the only publically visible identifiers in this
+** file. Call the following routines in order to register the various SQL
+** functions and the virtual table implemented by this file.
+****************************************************************************/
+
+SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){
+ int rc = SQLITE_OK;
+ unsigned int i;
+ static const struct {
+ const char *zName;
+ int nArg;
+ int flag;
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
+ } aFunc[] = {
+ { "json", 1, 0, jsonRemoveFunc },
+ { "json_array", -1, 0, jsonArrayFunc },
+ { "json_array_length", 1, 0, jsonArrayLengthFunc },
+ { "json_array_length", 2, 0, jsonArrayLengthFunc },
+ { "json_extract", -1, 0, jsonExtractFunc },
+ { "json_insert", -1, 0, jsonSetFunc },
+ { "json_object", -1, 0, jsonObjectFunc },
+ { "json_patch", 2, 0, jsonPatchFunc },
+ { "json_quote", 1, 0, jsonQuoteFunc },
+ { "json_remove", -1, 0, jsonRemoveFunc },
+ { "json_replace", -1, 0, jsonReplaceFunc },
+ { "json_set", -1, 1, jsonSetFunc },
+ { "json_type", 1, 0, jsonTypeFunc },
+ { "json_type", 2, 0, jsonTypeFunc },
+ { "json_valid", 1, 0, jsonValidFunc },
+
+#if SQLITE_DEBUG
+ /* DEBUG and TESTING functions */
+ { "json_parse", 1, 0, jsonParseFunc },
+ { "json_test1", 1, 0, jsonTest1Func },
+#endif
+ };
+ static const struct {
+ const char *zName;
+ int nArg;
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**);
+ void (*xFinal)(sqlite3_context*);
+ void (*xValue)(sqlite3_context*);
+ } aAgg[] = {
+ { "json_group_array", 1,
+ jsonArrayStep, jsonArrayFinal, jsonArrayValue },
+ { "json_group_object", 2,
+ jsonObjectStep, jsonObjectFinal, jsonObjectValue },
+ };
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ static const struct {
+ const char *zName;
+ sqlite3_module *pModule;
+ } aMod[] = {
+ { "json_each", &jsonEachModule },
+ { "json_tree", &jsonTreeModule },
+ };
+#endif
+ static const int enc =
+ SQLITE_UTF8 |
+ SQLITE_DETERMINISTIC |
+ SQLITE_INNOCUOUS;
+ for(i=0; i<sizeof(aFunc)/sizeof(aFunc[0]) && rc==SQLITE_OK; i++){
+ rc = sqlite3_create_function(db, aFunc[i].zName, aFunc[i].nArg, enc,
+ (void*)&aFunc[i].flag,
+ aFunc[i].xFunc, 0, 0);
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ for(i=0; i<sizeof(aAgg)/sizeof(aAgg[0]) && rc==SQLITE_OK; i++){
+ rc = sqlite3_create_window_function(db, aAgg[i].zName, aAgg[i].nArg,
+ SQLITE_SUBTYPE | enc, 0,
+ aAgg[i].xStep, aAgg[i].xFinal,
+ aAgg[i].xValue, jsonGroupInverse, 0);
+ }
+#endif
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ for(i=0; i<sizeof(aMod)/sizeof(aMod[0]) && rc==SQLITE_OK; i++){
+ rc = sqlite3_create_module(db, aMod[i].zName, aMod[i].pModule, 0);
+ }
+#endif
+ return rc;
+}
+
+
+#ifndef SQLITE_CORE
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_json_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi);
+ (void)pzErrMsg; /* Unused parameter */
+ return sqlite3Json1Init(db);
+}
+#endif
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1) */
+
+/************** End of json1.c ***********************************************/
+/************** Begin file rtree.c *******************************************/
+/*
+** 2001 September 15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file contains code for implementations of the r-tree and r*-tree
+** algorithms packaged as an SQLite virtual table module.
+*/
+
+/*
+** Database Format of R-Tree Tables
+** --------------------------------
+**
+** The data structure for a single virtual r-tree table is stored in three
+** native SQLite tables declared as follows. In each case, the '%' character
+** in the table name is replaced with the user-supplied name of the r-tree
+** table.
+**
+** CREATE TABLE %_node(nodeno INTEGER PRIMARY KEY, data BLOB)
+** CREATE TABLE %_parent(nodeno INTEGER PRIMARY KEY, parentnode INTEGER)
+** CREATE TABLE %_rowid(rowid INTEGER PRIMARY KEY, nodeno INTEGER, ...)
+**
+** The data for each node of the r-tree structure is stored in the %_node
+** table. For each node that is not the root node of the r-tree, there is
+** an entry in the %_parent table associating the node with its parent.
+** And for each row of data in the table, there is an entry in the %_rowid
+** table that maps from the entries rowid to the id of the node that it
+** is stored on. If the r-tree contains auxiliary columns, those are stored
+** on the end of the %_rowid table.
+**
+** The root node of an r-tree always exists, even if the r-tree table is
+** empty. The nodeno of the root node is always 1. All other nodes in the
+** table must be the same size as the root node. The content of each node
+** is formatted as follows:
+**
+** 1. If the node is the root node (node 1), then the first 2 bytes
+** of the node contain the tree depth as a big-endian integer.
+** For non-root nodes, the first 2 bytes are left unused.
+**
+** 2. The next 2 bytes contain the number of entries currently
+** stored in the node.
+**
+** 3. The remainder of the node contains the node entries. Each entry
+** consists of a single 8-byte integer followed by an even number
+** of 4-byte coordinates. For leaf nodes the integer is the rowid
+** of a record. For internal nodes it is the node number of a
+** child page.
+*/
+
+#if !defined(SQLITE_CORE) \
+ || (defined(SQLITE_ENABLE_RTREE) && !defined(SQLITE_OMIT_VIRTUALTABLE))
+
+#ifndef SQLITE_CORE
+/* #include "sqlite3ext.h" */
+ SQLITE_EXTENSION_INIT1
+#else
+/* #include "sqlite3.h" */
+#endif
+SQLITE_PRIVATE int sqlite3GetToken(const unsigned char*,int*); /* In the SQLite core */
+
+#ifndef SQLITE_AMALGAMATION
+#include "sqlite3rtree.h"
+typedef sqlite3_int64 i64;
+typedef sqlite3_uint64 u64;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+#if defined(NDEBUG) && defined(SQLITE_DEBUG)
+# undef NDEBUG
+#endif
+#endif
+
+/* #include <string.h> */
+/* #include <stdio.h> */
+/* #include <assert.h> */
+/* #include <stdlib.h> */
+
+/* The following macro is used to suppress compiler warnings.
+*/
+#ifndef UNUSED_PARAMETER
+# define UNUSED_PARAMETER(x) (void)(x)
+#endif
+
+typedef struct Rtree Rtree;
+typedef struct RtreeCursor RtreeCursor;
+typedef struct RtreeNode RtreeNode;
+typedef struct RtreeCell RtreeCell;
+typedef struct RtreeConstraint RtreeConstraint;
+typedef struct RtreeMatchArg RtreeMatchArg;
+typedef struct RtreeGeomCallback RtreeGeomCallback;
+typedef union RtreeCoord RtreeCoord;
+typedef struct RtreeSearchPoint RtreeSearchPoint;
+
+/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */
+#define RTREE_MAX_DIMENSIONS 5
+
+/* Maximum number of auxiliary columns */
+#define RTREE_MAX_AUX_COLUMN 100
+
+/* Size of hash table Rtree.aHash. This hash table is not expected to
+** ever contain very many entries, so a fixed number of buckets is
+** used.
+*/
+#define HASHSIZE 97
+
+/* The xBestIndex method of this virtual table requires an estimate of
+** the number of rows in the virtual table to calculate the costs of
+** various strategies. If possible, this estimate is loaded from the
+** sqlite_stat1 table (with RTREE_MIN_ROWEST as a hard-coded minimum).
+** Otherwise, if no sqlite_stat1 entry is available, use
+** RTREE_DEFAULT_ROWEST.
+*/
+#define RTREE_DEFAULT_ROWEST 1048576
+#define RTREE_MIN_ROWEST 100
+
+/*
+** An rtree virtual-table object.
+*/
+struct Rtree {
+ sqlite3_vtab base; /* Base class. Must be first */
+ sqlite3 *db; /* Host database connection */
+ int iNodeSize; /* Size in bytes of each node in the node table */
+ u8 nDim; /* Number of dimensions */
+ u8 nDim2; /* Twice the number of dimensions */
+ u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */
+ u8 nBytesPerCell; /* Bytes consumed per cell */
+ u8 inWrTrans; /* True if inside write transaction */
+ u8 nAux; /* # of auxiliary columns in %_rowid */
+ u8 nAuxNotNull; /* Number of initial not-null aux columns */
+#ifdef SQLITE_DEBUG
+ u8 bCorrupt; /* Shadow table corruption detected */
+#endif
+ int iDepth; /* Current depth of the r-tree structure */
+ char *zDb; /* Name of database containing r-tree table */
+ char *zName; /* Name of r-tree table */
+ u32 nBusy; /* Current number of users of this structure */
+ i64 nRowEst; /* Estimated number of rows in this table */
+ u32 nCursor; /* Number of open cursors */
+ u32 nNodeRef; /* Number RtreeNodes with positive nRef */
+ char *zReadAuxSql; /* SQL for statement to read aux data */
+
+ /* List of nodes removed during a CondenseTree operation. List is
+ ** linked together via the pointer normally used for hash chains -
+ ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree
+ ** headed by the node (leaf nodes have RtreeNode.iNode==0).
+ */
+ RtreeNode *pDeleted;
+ int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */
+
+ /* Blob I/O on xxx_node */
+ sqlite3_blob *pNodeBlob;
+
+ /* Statements to read/write/delete a record from xxx_node */
+ sqlite3_stmt *pWriteNode;
+ sqlite3_stmt *pDeleteNode;
+
+ /* Statements to read/write/delete a record from xxx_rowid */
+ sqlite3_stmt *pReadRowid;
+ sqlite3_stmt *pWriteRowid;
+ sqlite3_stmt *pDeleteRowid;
+
+ /* Statements to read/write/delete a record from xxx_parent */
+ sqlite3_stmt *pReadParent;
+ sqlite3_stmt *pWriteParent;
+ sqlite3_stmt *pDeleteParent;
+
+ /* Statement for writing to the "aux:" fields, if there are any */
+ sqlite3_stmt *pWriteAux;
+
+ RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */
+};
+
+/* Possible values for Rtree.eCoordType: */
+#define RTREE_COORD_REAL32 0
+#define RTREE_COORD_INT32 1
+
+/*
+** If SQLITE_RTREE_INT_ONLY is defined, then this virtual table will
+** only deal with integer coordinates. No floating point operations
+** will be done.
+*/
+#ifdef SQLITE_RTREE_INT_ONLY
+ typedef sqlite3_int64 RtreeDValue; /* High accuracy coordinate */
+ typedef int RtreeValue; /* Low accuracy coordinate */
+# define RTREE_ZERO 0
+#else
+ typedef double RtreeDValue; /* High accuracy coordinate */
+ typedef float RtreeValue; /* Low accuracy coordinate */
+# define RTREE_ZERO 0.0
+#endif
+
+/*
+** Set the Rtree.bCorrupt flag
+*/
+#ifdef SQLITE_DEBUG
+# define RTREE_IS_CORRUPT(X) ((X)->bCorrupt = 1)
+#else
+# define RTREE_IS_CORRUPT(X)
+#endif
+
+/*
+** When doing a search of an r-tree, instances of the following structure
+** record intermediate results from the tree walk.
+**
+** The id is always a node-id. For iLevel>=1 the id is the node-id of
+** the node that the RtreeSearchPoint represents. When iLevel==0, however,
+** the id is of the parent node and the cell that RtreeSearchPoint
+** represents is the iCell-th entry in the parent node.
+*/
+struct RtreeSearchPoint {
+ RtreeDValue rScore; /* The score for this node. Smallest goes first. */
+ sqlite3_int64 id; /* Node ID */
+ u8 iLevel; /* 0=entries. 1=leaf node. 2+ for higher */
+ u8 eWithin; /* PARTLY_WITHIN or FULLY_WITHIN */
+ u8 iCell; /* Cell index within the node */
+};
+
+/*
+** The minimum number of cells allowed for a node is a third of the
+** maximum. In Gutman's notation:
+**
+** m = M/3
+**
+** If an R*-tree "Reinsert" operation is required, the same number of
+** cells are removed from the overfull node and reinserted into the tree.
+*/
+#define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3)
+#define RTREE_REINSERT(p) RTREE_MINCELLS(p)
+#define RTREE_MAXCELLS 51
+
+/*
+** The smallest possible node-size is (512-64)==448 bytes. And the largest
+** supported cell size is 48 bytes (8 byte rowid + ten 4 byte coordinates).
+** Therefore all non-root nodes must contain at least 3 entries. Since
+** 3^40 is greater than 2^64, an r-tree structure always has a depth of
+** 40 or less.
+*/
+#define RTREE_MAX_DEPTH 40
+
+
+/*
+** Number of entries in the cursor RtreeNode cache. The first entry is
+** used to cache the RtreeNode for RtreeCursor.sPoint. The remaining
+** entries cache the RtreeNode for the first elements of the priority queue.
+*/
+#define RTREE_CACHE_SZ 5
+
+/*
+** An rtree cursor object.
+*/
+struct RtreeCursor {
+ sqlite3_vtab_cursor base; /* Base class. Must be first */
+ u8 atEOF; /* True if at end of search */
+ u8 bPoint; /* True if sPoint is valid */
+ u8 bAuxValid; /* True if pReadAux is valid */
+ int iStrategy; /* Copy of idxNum search parameter */
+ int nConstraint; /* Number of entries in aConstraint */
+ RtreeConstraint *aConstraint; /* Search constraints. */
+ int nPointAlloc; /* Number of slots allocated for aPoint[] */
+ int nPoint; /* Number of slots used in aPoint[] */
+ int mxLevel; /* iLevel value for root of the tree */
+ RtreeSearchPoint *aPoint; /* Priority queue for search points */
+ sqlite3_stmt *pReadAux; /* Statement to read aux-data */
+ RtreeSearchPoint sPoint; /* Cached next search point */
+ RtreeNode *aNode[RTREE_CACHE_SZ]; /* Rtree node cache */
+ u32 anQueue[RTREE_MAX_DEPTH+1]; /* Number of queued entries by iLevel */
+};
+
+/* Return the Rtree of a RtreeCursor */
+#define RTREE_OF_CURSOR(X) ((Rtree*)((X)->base.pVtab))
+
+/*
+** A coordinate can be either a floating point number or a integer. All
+** coordinates within a single R-Tree are always of the same time.
+*/
+union RtreeCoord {
+ RtreeValue f; /* Floating point value */
+ int i; /* Integer value */
+ u32 u; /* Unsigned for byte-order conversions */
+};
+
+/*
+** The argument is an RtreeCoord. Return the value stored within the RtreeCoord
+** formatted as a RtreeDValue (double or int64). This macro assumes that local
+** variable pRtree points to the Rtree structure associated with the
+** RtreeCoord.
+*/
+#ifdef SQLITE_RTREE_INT_ONLY
+# define DCOORD(coord) ((RtreeDValue)coord.i)
+#else
+# define DCOORD(coord) ( \
+ (pRtree->eCoordType==RTREE_COORD_REAL32) ? \
+ ((double)coord.f) : \
+ ((double)coord.i) \
+ )
+#endif
+
+/*
+** A search constraint.
+*/
+struct RtreeConstraint {
+ int iCoord; /* Index of constrained coordinate */
+ int op; /* Constraining operation */
+ union {
+ RtreeDValue rValue; /* Constraint value. */
+ int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*);
+ int (*xQueryFunc)(sqlite3_rtree_query_info*);
+ } u;
+ sqlite3_rtree_query_info *pInfo; /* xGeom and xQueryFunc argument */
+};
+
+/* Possible values for RtreeConstraint.op */
+#define RTREE_EQ 0x41 /* A */
+#define RTREE_LE 0x42 /* B */
+#define RTREE_LT 0x43 /* C */
+#define RTREE_GE 0x44 /* D */
+#define RTREE_GT 0x45 /* E */
+#define RTREE_MATCH 0x46 /* F: Old-style sqlite3_rtree_geometry_callback() */
+#define RTREE_QUERY 0x47 /* G: New-style sqlite3_rtree_query_callback() */
+
+/* Special operators available only on cursors. Needs to be consecutive
+** with the normal values above, but must be less than RTREE_MATCH. These
+** are used in the cursor for contraints such as x=NULL (RTREE_FALSE) or
+** x<'xyz' (RTREE_TRUE) */
+#define RTREE_TRUE 0x3f /* ? */
+#define RTREE_FALSE 0x40 /* @ */
+
+/*
+** An rtree structure node.
+*/
+struct RtreeNode {
+ RtreeNode *pParent; /* Parent node */
+ i64 iNode; /* The node number */
+ int nRef; /* Number of references to this node */
+ int isDirty; /* True if the node needs to be written to disk */
+ u8 *zData; /* Content of the node, as should be on disk */
+ RtreeNode *pNext; /* Next node in this hash collision chain */
+};
+
+/* Return the number of cells in a node */
+#define NCELL(pNode) readInt16(&(pNode)->zData[2])
+
+/*
+** A single cell from a node, deserialized
+*/
+struct RtreeCell {
+ i64 iRowid; /* Node or entry ID */
+ RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; /* Bounding box coordinates */
+};
+
+
+/*
+** This object becomes the sqlite3_user_data() for the SQL functions
+** that are created by sqlite3_rtree_geometry_callback() and
+** sqlite3_rtree_query_callback() and which appear on the right of MATCH
+** operators in order to constrain a search.
+**
+** xGeom and xQueryFunc are the callback functions. Exactly one of
+** xGeom and xQueryFunc fields is non-NULL, depending on whether the
+** SQL function was created using sqlite3_rtree_geometry_callback() or
+** sqlite3_rtree_query_callback().
+**
+** This object is deleted automatically by the destructor mechanism in
+** sqlite3_create_function_v2().
+*/
+struct RtreeGeomCallback {
+ int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*);
+ int (*xQueryFunc)(sqlite3_rtree_query_info*);
+ void (*xDestructor)(void*);
+ void *pContext;
+};
+
+/*
+** An instance of this structure (in the form of a BLOB) is returned by
+** the SQL functions that sqlite3_rtree_geometry_callback() and
+** sqlite3_rtree_query_callback() create, and is read as the right-hand
+** operand to the MATCH operator of an R-Tree.
+*/
+struct RtreeMatchArg {
+ u32 iSize; /* Size of this object */
+ RtreeGeomCallback cb; /* Info about the callback functions */
+ int nParam; /* Number of parameters to the SQL function */
+ sqlite3_value **apSqlParam; /* Original SQL parameter values */
+ RtreeDValue aParam[1]; /* Values for parameters to the SQL function */
+};
+
+#ifndef MAX
+# define MAX(x,y) ((x) < (y) ? (y) : (x))
+#endif
+#ifndef MIN
+# define MIN(x,y) ((x) > (y) ? (y) : (x))
+#endif
+
+/* What version of GCC is being used. 0 means GCC is not being used .
+** Note that the GCC_VERSION macro will also be set correctly when using
+** clang, since clang works hard to be gcc compatible. So the gcc
+** optimizations will also work when compiling with clang.
+*/
+#ifndef GCC_VERSION
+#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
+#else
+# define GCC_VERSION 0
+#endif
+#endif
+
+/* The testcase() macro should already be defined in the amalgamation. If
+** it is not, make it a no-op.
+*/
+#ifndef SQLITE_AMALGAMATION
+# define testcase(X)
+#endif
+
+/*
+** Make sure that the compiler intrinsics we desire are enabled when
+** compiling with an appropriate version of MSVC unless prevented by
+** the SQLITE_DISABLE_INTRINSIC define.
+*/
+#if !defined(SQLITE_DISABLE_INTRINSIC)
+# if defined(_MSC_VER) && _MSC_VER>=1400
+# if !defined(_WIN32_WCE)
+/* # include <intrin.h> */
+# pragma intrinsic(_byteswap_ulong)
+# pragma intrinsic(_byteswap_uint64)
+# else
+/* # include <cmnintrin.h> */
+# endif
+# endif
+#endif
+
+/*
+** Macros to determine whether the machine is big or little endian,
+** and whether or not that determination is run-time or compile-time.
+**
+** For best performance, an attempt is made to guess at the byte-order
+** using C-preprocessor macros. If that is unsuccessful, or if
+** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
+** at run-time.
+*/
+#ifndef SQLITE_BYTEORDER
+#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
+ defined(__arm__)
+# define SQLITE_BYTEORDER 1234
+#elif defined(sparc) || defined(__ppc__)
+# define SQLITE_BYTEORDER 4321
+#else
+# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */
+#endif
+#endif
+
+
+/* What version of MSVC is being used. 0 means MSVC is not being used */
+#ifndef MSVC_VERSION
+#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define MSVC_VERSION _MSC_VER
+#else
+# define MSVC_VERSION 0
+#endif
+#endif
+
+/*
+** Functions to deserialize a 16 bit integer, 32 bit real number and
+** 64 bit integer. The deserialized value is returned.
+*/
+static int readInt16(u8 *p){
+ return (p[0]<<8) + p[1];
+}
+static void readCoord(u8 *p, RtreeCoord *pCoord){
+ assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ pCoord->u = _byteswap_ulong(*(u32*)p);
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ pCoord->u = __builtin_bswap32(*(u32*)p);
+#elif SQLITE_BYTEORDER==4321
+ pCoord->u = *(u32*)p;
+#else
+ pCoord->u = (
+ (((u32)p[0]) << 24) +
+ (((u32)p[1]) << 16) +
+ (((u32)p[2]) << 8) +
+ (((u32)p[3]) << 0)
+ );
+#endif
+}
+static i64 readInt64(u8 *p){
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ u64 x;
+ memcpy(&x, p, 8);
+ return (i64)_byteswap_uint64(x);
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ u64 x;
+ memcpy(&x, p, 8);
+ return (i64)__builtin_bswap64(x);
+#elif SQLITE_BYTEORDER==4321
+ i64 x;
+ memcpy(&x, p, 8);
+ return x;
+#else
+ return (i64)(
+ (((u64)p[0]) << 56) +
+ (((u64)p[1]) << 48) +
+ (((u64)p[2]) << 40) +
+ (((u64)p[3]) << 32) +
+ (((u64)p[4]) << 24) +
+ (((u64)p[5]) << 16) +
+ (((u64)p[6]) << 8) +
+ (((u64)p[7]) << 0)
+ );
+#endif
+}
+
+/*
+** Functions to serialize a 16 bit integer, 32 bit real number and
+** 64 bit integer. The value returned is the number of bytes written
+** to the argument buffer (always 2, 4 and 8 respectively).
+*/
+static void writeInt16(u8 *p, int i){
+ p[0] = (i>> 8)&0xFF;
+ p[1] = (i>> 0)&0xFF;
+}
+static int writeCoord(u8 *p, RtreeCoord *pCoord){
+ u32 i;
+ assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
+ assert( sizeof(RtreeCoord)==4 );
+ assert( sizeof(u32)==4 );
+#if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ i = __builtin_bswap32(pCoord->u);
+ memcpy(p, &i, 4);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ i = _byteswap_ulong(pCoord->u);
+ memcpy(p, &i, 4);
+#elif SQLITE_BYTEORDER==4321
+ i = pCoord->u;
+ memcpy(p, &i, 4);
+#else
+ i = pCoord->u;
+ p[0] = (i>>24)&0xFF;
+ p[1] = (i>>16)&0xFF;
+ p[2] = (i>> 8)&0xFF;
+ p[3] = (i>> 0)&0xFF;
+#endif
+ return 4;
+}
+static int writeInt64(u8 *p, i64 i){
+#if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+ i = (i64)__builtin_bswap64((u64)i);
+ memcpy(p, &i, 8);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ i = (i64)_byteswap_uint64((u64)i);
+ memcpy(p, &i, 8);
+#elif SQLITE_BYTEORDER==4321
+ memcpy(p, &i, 8);
+#else
+ p[0] = (i>>56)&0xFF;
+ p[1] = (i>>48)&0xFF;
+ p[2] = (i>>40)&0xFF;
+ p[3] = (i>>32)&0xFF;
+ p[4] = (i>>24)&0xFF;
+ p[5] = (i>>16)&0xFF;
+ p[6] = (i>> 8)&0xFF;
+ p[7] = (i>> 0)&0xFF;
+#endif
+ return 8;
+}
+
+/*
+** Increment the reference count of node p.
+*/
+static void nodeReference(RtreeNode *p){
+ if( p ){
+ assert( p->nRef>0 );
+ p->nRef++;
+ }
+}
+
+/*
+** Clear the content of node p (set all bytes to 0x00).
+*/
+static void nodeZero(Rtree *pRtree, RtreeNode *p){
+ memset(&p->zData[2], 0, pRtree->iNodeSize-2);
+ p->isDirty = 1;
+}
+
+/*
+** Given a node number iNode, return the corresponding key to use
+** in the Rtree.aHash table.
+*/
+static unsigned int nodeHash(i64 iNode){
+ return ((unsigned)iNode) % HASHSIZE;
+}
+
+/*
+** Search the node hash table for node iNode. If found, return a pointer
+** to it. Otherwise, return 0.
+*/
+static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){
+ RtreeNode *p;
+ for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext);
+ return p;
+}
+
+/*
+** Add node pNode to the node hash table.
+*/
+static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){
+ int iHash;
+ assert( pNode->pNext==0 );
+ iHash = nodeHash(pNode->iNode);
+ pNode->pNext = pRtree->aHash[iHash];
+ pRtree->aHash[iHash] = pNode;
+}
+
+/*
+** Remove node pNode from the node hash table.
+*/
+static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){
+ RtreeNode **pp;
+ if( pNode->iNode!=0 ){
+ pp = &pRtree->aHash[nodeHash(pNode->iNode)];
+ for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); }
+ *pp = pNode->pNext;
+ pNode->pNext = 0;
+ }
+}
+
+/*
+** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0),
+** indicating that node has not yet been assigned a node number. It is
+** assigned a node number when nodeWrite() is called to write the
+** node contents out to the database.
+*/
+static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){
+ RtreeNode *pNode;
+ pNode = (RtreeNode *)sqlite3_malloc64(sizeof(RtreeNode) + pRtree->iNodeSize);
+ if( pNode ){
+ memset(pNode, 0, sizeof(RtreeNode) + pRtree->iNodeSize);
+ pNode->zData = (u8 *)&pNode[1];
+ pNode->nRef = 1;
+ pRtree->nNodeRef++;
+ pNode->pParent = pParent;
+ pNode->isDirty = 1;
+ nodeReference(pParent);
+ }
+ return pNode;
+}
+
+/*
+** Clear the Rtree.pNodeBlob object
+*/
+static void nodeBlobReset(Rtree *pRtree){
+ if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){
+ sqlite3_blob *pBlob = pRtree->pNodeBlob;
+ pRtree->pNodeBlob = 0;
+ sqlite3_blob_close(pBlob);
+ }
+}
+
+/*
+** Check to see if pNode is the same as pParent or any of the parents
+** of pParent.
+*/
+static int nodeInParentChain(const RtreeNode *pNode, const RtreeNode *pParent){
+ do{
+ if( pNode==pParent ) return 1;
+ pParent = pParent->pParent;
+ }while( pParent );
+ return 0;
+}
+
+/*
+** Obtain a reference to an r-tree node.
+*/
+static int nodeAcquire(
+ Rtree *pRtree, /* R-tree structure */
+ i64 iNode, /* Node number to load */
+ RtreeNode *pParent, /* Either the parent node or NULL */
+ RtreeNode **ppNode /* OUT: Acquired node */
+){
+ int rc = SQLITE_OK;
+ RtreeNode *pNode = 0;
+
+ /* Check if the requested node is already in the hash table. If so,
+ ** increase its reference count and return it.
+ */
+ if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){
+ if( pParent && !pNode->pParent ){
+ if( nodeInParentChain(pNode, pParent) ){
+ RTREE_IS_CORRUPT(pRtree);
+ return SQLITE_CORRUPT_VTAB;
+ }
+ pParent->nRef++;
+ pNode->pParent = pParent;
+ }else if( pParent && pNode->pParent && pParent!=pNode->pParent ){
+ RTREE_IS_CORRUPT(pRtree);
+ return SQLITE_CORRUPT_VTAB;
+ }
+ pNode->nRef++;
+ *ppNode = pNode;
+ return SQLITE_OK;
+ }
+
+ if( pRtree->pNodeBlob ){
+ sqlite3_blob *pBlob = pRtree->pNodeBlob;
+ pRtree->pNodeBlob = 0;
+ rc = sqlite3_blob_reopen(pBlob, iNode);
+ pRtree->pNodeBlob = pBlob;
+ if( rc ){
+ nodeBlobReset(pRtree);
+ if( rc==SQLITE_NOMEM ) return SQLITE_NOMEM;
+ }
+ }
+ if( pRtree->pNodeBlob==0 ){
+ char *zTab = sqlite3_mprintf("%s_node", pRtree->zName);
+ if( zTab==0 ) return SQLITE_NOMEM;
+ rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0,
+ &pRtree->pNodeBlob);
+ sqlite3_free(zTab);
+ }
+ if( rc ){
+ nodeBlobReset(pRtree);
+ *ppNode = 0;
+ /* If unable to open an sqlite3_blob on the desired row, that can only
+ ** be because the shadow tables hold erroneous data. */
+ if( rc==SQLITE_ERROR ){
+ rc = SQLITE_CORRUPT_VTAB;
+ RTREE_IS_CORRUPT(pRtree);
+ }
+ }else if( pRtree->iNodeSize==sqlite3_blob_bytes(pRtree->pNodeBlob) ){
+ pNode = (RtreeNode *)sqlite3_malloc64(sizeof(RtreeNode)+pRtree->iNodeSize);
+ if( !pNode ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pNode->pParent = pParent;
+ pNode->zData = (u8 *)&pNode[1];
+ pNode->nRef = 1;
+ pRtree->nNodeRef++;
+ pNode->iNode = iNode;
+ pNode->isDirty = 0;
+ pNode->pNext = 0;
+ rc = sqlite3_blob_read(pRtree->pNodeBlob, pNode->zData,
+ pRtree->iNodeSize, 0);
+ }
+ }
+
+ /* If the root node was just loaded, set pRtree->iDepth to the height
+ ** of the r-tree structure. A height of zero means all data is stored on
+ ** the root node. A height of one means the children of the root node
+ ** are the leaves, and so on. If the depth as specified on the root node
+ ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt.
+ */
+ if( pNode && iNode==1 ){
+ pRtree->iDepth = readInt16(pNode->zData);
+ if( pRtree->iDepth>RTREE_MAX_DEPTH ){
+ rc = SQLITE_CORRUPT_VTAB;
+ RTREE_IS_CORRUPT(pRtree);
+ }
+ }
+
+ /* If no error has occurred so far, check if the "number of entries"
+ ** field on the node is too large. If so, set the return code to
+ ** SQLITE_CORRUPT_VTAB.
+ */
+ if( pNode && rc==SQLITE_OK ){
+ if( NCELL(pNode)>((pRtree->iNodeSize-4)/pRtree->nBytesPerCell) ){
+ rc = SQLITE_CORRUPT_VTAB;
+ RTREE_IS_CORRUPT(pRtree);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ if( pNode!=0 ){
+ nodeReference(pParent);
+ nodeHashInsert(pRtree, pNode);
+ }else{
+ rc = SQLITE_CORRUPT_VTAB;
+ RTREE_IS_CORRUPT(pRtree);
+ }
+ *ppNode = pNode;
+ }else{
+ if( pNode ){
+ pRtree->nNodeRef--;
+ sqlite3_free(pNode);
+ }
+ *ppNode = 0;
+ }
+
+ return rc;
+}
+
+/*
+** Overwrite cell iCell of node pNode with the contents of pCell.
+*/
+static void nodeOverwriteCell(
+ Rtree *pRtree, /* The overall R-Tree */
+ RtreeNode *pNode, /* The node into which the cell is to be written */
+ RtreeCell *pCell, /* The cell to write */
+ int iCell /* Index into pNode into which pCell is written */
+){
+ int ii;
+ u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell];
+ p += writeInt64(p, pCell->iRowid);
+ for(ii=0; ii<pRtree->nDim2; ii++){
+ p += writeCoord(p, &pCell->aCoord[ii]);
+ }
+ pNode->isDirty = 1;
+}
+
+/*
+** Remove the cell with index iCell from node pNode.
+*/
+static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){
+ u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell];
+ u8 *pSrc = &pDst[pRtree->nBytesPerCell];
+ int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell;
+ memmove(pDst, pSrc, nByte);
+ writeInt16(&pNode->zData[2], NCELL(pNode)-1);
+ pNode->isDirty = 1;
+}
+
+/*
+** Insert the contents of cell pCell into node pNode. If the insert
+** is successful, return SQLITE_OK.
+**
+** If there is not enough free space in pNode, return SQLITE_FULL.
+*/
+static int nodeInsertCell(
+ Rtree *pRtree, /* The overall R-Tree */
+ RtreeNode *pNode, /* Write new cell into this node */
+ RtreeCell *pCell /* The cell to be inserted */
+){
+ int nCell; /* Current number of cells in pNode */
+ int nMaxCell; /* Maximum number of cells for pNode */
+
+ nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell;
+ nCell = NCELL(pNode);
+
+ assert( nCell<=nMaxCell );
+ if( nCell<nMaxCell ){
+ nodeOverwriteCell(pRtree, pNode, pCell, nCell);
+ writeInt16(&pNode->zData[2], nCell+1);
+ pNode->isDirty = 1;
+ }
+
+ return (nCell==nMaxCell);
+}
+
+/*
+** If the node is dirty, write it out to the database.
+*/
+static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){
+ int rc = SQLITE_OK;
+ if( pNode->isDirty ){
+ sqlite3_stmt *p = pRtree->pWriteNode;
+ if( pNode->iNode ){
+ sqlite3_bind_int64(p, 1, pNode->iNode);
+ }else{
+ sqlite3_bind_null(p, 1);
+ }
+ sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC);
+ sqlite3_step(p);
+ pNode->isDirty = 0;
+ rc = sqlite3_reset(p);
+ sqlite3_bind_null(p, 2);
+ if( pNode->iNode==0 && rc==SQLITE_OK ){
+ pNode->iNode = sqlite3_last_insert_rowid(pRtree->db);
+ nodeHashInsert(pRtree, pNode);
+ }
+ }
+ return rc;
+}
+
+/*
+** Release a reference to a node. If the node is dirty and the reference
+** count drops to zero, the node data is written to the database.
+*/
+static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){
+ int rc = SQLITE_OK;
+ if( pNode ){
+ assert( pNode->nRef>0 );
+ assert( pRtree->nNodeRef>0 );
+ pNode->nRef--;
+ if( pNode->nRef==0 ){
+ pRtree->nNodeRef--;
+ if( pNode->iNode==1 ){
+ pRtree->iDepth = -1;
+ }
+ if( pNode->pParent ){
+ rc = nodeRelease(pRtree, pNode->pParent);
+ }
+ if( rc==SQLITE_OK ){
+ rc = nodeWrite(pRtree, pNode);
+ }
+ nodeHashDelete(pRtree, pNode);
+ sqlite3_free(pNode);
+ }
+ }
+ return rc;
+}
+
+/*
+** Return the 64-bit integer value associated with cell iCell of
+** node pNode. If pNode is a leaf node, this is a rowid. If it is
+** an internal node, then the 64-bit integer is a child page number.
+*/
+static i64 nodeGetRowid(
+ Rtree *pRtree, /* The overall R-Tree */
+ RtreeNode *pNode, /* The node from which to extract the ID */
+ int iCell /* The cell index from which to extract the ID */
+){
+ assert( iCell<NCELL(pNode) );
+ return readInt64(&pNode->zData[4 + pRtree->nBytesPerCell*iCell]);
+}
+
+/*
+** Return coordinate iCoord from cell iCell in node pNode.
+*/
+static void nodeGetCoord(
+ Rtree *pRtree, /* The overall R-Tree */
+ RtreeNode *pNode, /* The node from which to extract a coordinate */
+ int iCell, /* The index of the cell within the node */
+ int iCoord, /* Which coordinate to extract */
+ RtreeCoord *pCoord /* OUT: Space to write result to */
+){
+ readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord);
+}
+
+/*
+** Deserialize cell iCell of node pNode. Populate the structure pointed
+** to by pCell with the results.
+*/
+static void nodeGetCell(
+ Rtree *pRtree, /* The overall R-Tree */
+ RtreeNode *pNode, /* The node containing the cell to be read */
+ int iCell, /* Index of the cell within the node */
+ RtreeCell *pCell /* OUT: Write the cell contents here */
+){
+ u8 *pData;
+ RtreeCoord *pCoord;
+ int ii = 0;
+ pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell);
+ pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell);
+ pCoord = pCell->aCoord;
+ do{
+ readCoord(pData, &pCoord[ii]);
+ readCoord(pData+4, &pCoord[ii+1]);
+ pData += 8;
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
+}
+
+
+/* Forward declaration for the function that does the work of
+** the virtual table module xCreate() and xConnect() methods.
+*/
+static int rtreeInit(
+ sqlite3 *, void *, int, const char *const*, sqlite3_vtab **, char **, int
+);
+
+/*
+** Rtree virtual table module xCreate method.
+*/
+static int rtreeCreate(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 1);
+}
+
+/*
+** Rtree virtual table module xConnect method.
+*/
+static int rtreeConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 0);
+}
+
+/*
+** Increment the r-tree reference count.
+*/
+static void rtreeReference(Rtree *pRtree){
+ pRtree->nBusy++;
+}
+
+/*
+** Decrement the r-tree reference count. When the reference count reaches
+** zero the structure is deleted.
+*/
+static void rtreeRelease(Rtree *pRtree){
+ pRtree->nBusy--;
+ if( pRtree->nBusy==0 ){
+ pRtree->inWrTrans = 0;
+ assert( pRtree->nCursor==0 );
+ nodeBlobReset(pRtree);
+ assert( pRtree->nNodeRef==0 || pRtree->bCorrupt );
+ sqlite3_finalize(pRtree->pWriteNode);
+ sqlite3_finalize(pRtree->pDeleteNode);
+ sqlite3_finalize(pRtree->pReadRowid);
+ sqlite3_finalize(pRtree->pWriteRowid);
+ sqlite3_finalize(pRtree->pDeleteRowid);
+ sqlite3_finalize(pRtree->pReadParent);
+ sqlite3_finalize(pRtree->pWriteParent);
+ sqlite3_finalize(pRtree->pDeleteParent);
+ sqlite3_finalize(pRtree->pWriteAux);
+ sqlite3_free(pRtree->zReadAuxSql);
+ sqlite3_free(pRtree);
+ }
+}
+
+/*
+** Rtree virtual table module xDisconnect method.
+*/
+static int rtreeDisconnect(sqlite3_vtab *pVtab){
+ rtreeRelease((Rtree *)pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** Rtree virtual table module xDestroy method.
+*/
+static int rtreeDestroy(sqlite3_vtab *pVtab){
+ Rtree *pRtree = (Rtree *)pVtab;
+ int rc;
+ char *zCreate = sqlite3_mprintf(
+ "DROP TABLE '%q'.'%q_node';"
+ "DROP TABLE '%q'.'%q_rowid';"
+ "DROP TABLE '%q'.'%q_parent';",
+ pRtree->zDb, pRtree->zName,
+ pRtree->zDb, pRtree->zName,
+ pRtree->zDb, pRtree->zName
+ );
+ if( !zCreate ){
+ rc = SQLITE_NOMEM;
+ }else{
+ nodeBlobReset(pRtree);
+ rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0);
+ sqlite3_free(zCreate);
+ }
+ if( rc==SQLITE_OK ){
+ rtreeRelease(pRtree);
+ }
+
+ return rc;
+}
+
+/*
+** Rtree virtual table module xOpen method.
+*/
+static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
+ int rc = SQLITE_NOMEM;
+ Rtree *pRtree = (Rtree *)pVTab;
+ RtreeCursor *pCsr;
+
+ pCsr = (RtreeCursor *)sqlite3_malloc64(sizeof(RtreeCursor));
+ if( pCsr ){
+ memset(pCsr, 0, sizeof(RtreeCursor));
+ pCsr->base.pVtab = pVTab;
+ rc = SQLITE_OK;
+ pRtree->nCursor++;
+ }
+ *ppCursor = (sqlite3_vtab_cursor *)pCsr;
+
+ return rc;
+}
+
+
+/*
+** Reset a cursor back to its initial state.
+*/
+static void resetCursor(RtreeCursor *pCsr){
+ Rtree *pRtree = (Rtree *)(pCsr->base.pVtab);
+ int ii;
+ sqlite3_stmt *pStmt;
+ if( pCsr->aConstraint ){
+ int i; /* Used to iterate through constraint array */
+ for(i=0; i<pCsr->nConstraint; i++){
+ sqlite3_rtree_query_info *pInfo = pCsr->aConstraint[i].pInfo;
+ if( pInfo ){
+ if( pInfo->xDelUser ) pInfo->xDelUser(pInfo->pUser);
+ sqlite3_free(pInfo);
+ }
+ }
+ sqlite3_free(pCsr->aConstraint);
+ pCsr->aConstraint = 0;
+ }
+ for(ii=0; ii<RTREE_CACHE_SZ; ii++) nodeRelease(pRtree, pCsr->aNode[ii]);
+ sqlite3_free(pCsr->aPoint);
+ pStmt = pCsr->pReadAux;
+ memset(pCsr, 0, sizeof(RtreeCursor));
+ pCsr->base.pVtab = (sqlite3_vtab*)pRtree;
+ pCsr->pReadAux = pStmt;
+
+}
+
+/*
+** Rtree virtual table module xClose method.
+*/
+static int rtreeClose(sqlite3_vtab_cursor *cur){
+ Rtree *pRtree = (Rtree *)(cur->pVtab);
+ RtreeCursor *pCsr = (RtreeCursor *)cur;
+ assert( pRtree->nCursor>0 );
+ resetCursor(pCsr);
+ sqlite3_finalize(pCsr->pReadAux);
+ sqlite3_free(pCsr);
+ pRtree->nCursor--;
+ nodeBlobReset(pRtree);
+ return SQLITE_OK;
+}
+
+/*
+** Rtree virtual table module xEof method.
+**
+** Return non-zero if the cursor does not currently point to a valid
+** record (i.e if the scan has finished), or zero otherwise.
+*/
+static int rtreeEof(sqlite3_vtab_cursor *cur){
+ RtreeCursor *pCsr = (RtreeCursor *)cur;
+ return pCsr->atEOF;
+}
+
+/*
+** Convert raw bits from the on-disk RTree record into a coordinate value.
+** The on-disk format is big-endian and needs to be converted for little-
+** endian platforms. The on-disk record stores integer coordinates if
+** eInt is true and it stores 32-bit floating point records if eInt is
+** false. a[] is the four bytes of the on-disk record to be decoded.
+** Store the results in "r".
+**
+** There are five versions of this macro. The last one is generic. The
+** other four are various architectures-specific optimizations.
+*/
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ c.u = _byteswap_ulong(*(u32*)a); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ c.u = __builtin_bswap32(*(u32*)a); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#elif SQLITE_BYTEORDER==1234
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ memcpy(&c.u,a,4); \
+ c.u = ((c.u>>24)&0xff)|((c.u>>8)&0xff00)| \
+ ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#elif SQLITE_BYTEORDER==4321
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ memcpy(&c.u,a,4); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#else
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ c.u = ((u32)a[0]<<24) + ((u32)a[1]<<16) \
+ +((u32)a[2]<<8) + a[3]; \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#endif
+
+/*
+** Check the RTree node or entry given by pCellData and p against the MATCH
+** constraint pConstraint.
+*/
+static int rtreeCallbackConstraint(
+ RtreeConstraint *pConstraint, /* The constraint to test */
+ int eInt, /* True if RTree holding integer coordinates */
+ u8 *pCellData, /* Raw cell content */
+ RtreeSearchPoint *pSearch, /* Container of this cell */
+ sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */
+ int *peWithin /* OUT: visibility of the cell */
+){
+ sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */
+ int nCoord = pInfo->nCoord; /* No. of coordinates */
+ int rc; /* Callback return code */
+ RtreeCoord c; /* Translator union */
+ sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */
+
+ assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY );
+ assert( nCoord==2 || nCoord==4 || nCoord==6 || nCoord==8 || nCoord==10 );
+
+ if( pConstraint->op==RTREE_QUERY && pSearch->iLevel==1 ){
+ pInfo->iRowid = readInt64(pCellData);
+ }
+ pCellData += 8;
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( eInt==0 ){
+ switch( nCoord ){
+ case 10: readCoord(pCellData+36, &c); aCoord[9] = c.f;
+ readCoord(pCellData+32, &c); aCoord[8] = c.f;
+ case 8: readCoord(pCellData+28, &c); aCoord[7] = c.f;
+ readCoord(pCellData+24, &c); aCoord[6] = c.f;
+ case 6: readCoord(pCellData+20, &c); aCoord[5] = c.f;
+ readCoord(pCellData+16, &c); aCoord[4] = c.f;
+ case 4: readCoord(pCellData+12, &c); aCoord[3] = c.f;
+ readCoord(pCellData+8, &c); aCoord[2] = c.f;
+ default: readCoord(pCellData+4, &c); aCoord[1] = c.f;
+ readCoord(pCellData, &c); aCoord[0] = c.f;
+ }
+ }else
+#endif
+ {
+ switch( nCoord ){
+ case 10: readCoord(pCellData+36, &c); aCoord[9] = c.i;
+ readCoord(pCellData+32, &c); aCoord[8] = c.i;
+ case 8: readCoord(pCellData+28, &c); aCoord[7] = c.i;
+ readCoord(pCellData+24, &c); aCoord[6] = c.i;
+ case 6: readCoord(pCellData+20, &c); aCoord[5] = c.i;
+ readCoord(pCellData+16, &c); aCoord[4] = c.i;
+ case 4: readCoord(pCellData+12, &c); aCoord[3] = c.i;
+ readCoord(pCellData+8, &c); aCoord[2] = c.i;
+ default: readCoord(pCellData+4, &c); aCoord[1] = c.i;
+ readCoord(pCellData, &c); aCoord[0] = c.i;
+ }
+ }
+ if( pConstraint->op==RTREE_MATCH ){
+ int eWithin = 0;
+ rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo,
+ nCoord, aCoord, &eWithin);
+ if( eWithin==0 ) *peWithin = NOT_WITHIN;
+ *prScore = RTREE_ZERO;
+ }else{
+ pInfo->aCoord = aCoord;
+ pInfo->iLevel = pSearch->iLevel - 1;
+ pInfo->rScore = pInfo->rParentScore = pSearch->rScore;
+ pInfo->eWithin = pInfo->eParentWithin = pSearch->eWithin;
+ rc = pConstraint->u.xQueryFunc(pInfo);
+ if( pInfo->eWithin<*peWithin ) *peWithin = pInfo->eWithin;
+ if( pInfo->rScore<*prScore || *prScore<RTREE_ZERO ){
+ *prScore = pInfo->rScore;
+ }
+ }
+ return rc;
+}
+
+/*
+** Check the internal RTree node given by pCellData against constraint p.
+** If this constraint cannot be satisfied by any child within the node,
+** set *peWithin to NOT_WITHIN.
+*/
+static void rtreeNonleafConstraint(
+ RtreeConstraint *p, /* The constraint to test */
+ int eInt, /* True if RTree holds integer coordinates */
+ u8 *pCellData, /* Raw cell content as appears on disk */
+ int *peWithin /* Adjust downward, as appropriate */
+){
+ sqlite3_rtree_dbl val; /* Coordinate value convert to a double */
+
+ /* p->iCoord might point to either a lower or upper bound coordinate
+ ** in a coordinate pair. But make pCellData point to the lower bound.
+ */
+ pCellData += 8 + 4*(p->iCoord&0xfe);
+
+ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
+ || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
+ || p->op==RTREE_FALSE );
+ assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
+ switch( p->op ){
+ case RTREE_TRUE: return; /* Always satisfied */
+ case RTREE_FALSE: break; /* Never satisfied */
+ case RTREE_LE:
+ case RTREE_LT:
+ case RTREE_EQ:
+ RTREE_DECODE_COORD(eInt, pCellData, val);
+ /* val now holds the lower bound of the coordinate pair */
+ if( p->u.rValue>=val ) return;
+ if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */
+ /* Fall through for the RTREE_EQ case */
+
+ default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */
+ pCellData += 4;
+ RTREE_DECODE_COORD(eInt, pCellData, val);
+ /* val now holds the upper bound of the coordinate pair */
+ if( p->u.rValue<=val ) return;
+ }
+ *peWithin = NOT_WITHIN;
+}
+
+/*
+** Check the leaf RTree cell given by pCellData against constraint p.
+** If this constraint is not satisfied, set *peWithin to NOT_WITHIN.
+** If the constraint is satisfied, leave *peWithin unchanged.
+**
+** The constraint is of the form: xN op $val
+**
+** The op is given by p->op. The xN is p->iCoord-th coordinate in
+** pCellData. $val is given by p->u.rValue.
+*/
+static void rtreeLeafConstraint(
+ RtreeConstraint *p, /* The constraint to test */
+ int eInt, /* True if RTree holds integer coordinates */
+ u8 *pCellData, /* Raw cell content as appears on disk */
+ int *peWithin /* Adjust downward, as appropriate */
+){
+ RtreeDValue xN; /* Coordinate value converted to a double */
+
+ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
+ || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
+ || p->op==RTREE_FALSE );
+ pCellData += 8 + p->iCoord*4;
+ assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
+ RTREE_DECODE_COORD(eInt, pCellData, xN);
+ switch( p->op ){
+ case RTREE_TRUE: return; /* Always satisfied */
+ case RTREE_FALSE: break; /* Never satisfied */
+ case RTREE_LE: if( xN <= p->u.rValue ) return; break;
+ case RTREE_LT: if( xN < p->u.rValue ) return; break;
+ case RTREE_GE: if( xN >= p->u.rValue ) return; break;
+ case RTREE_GT: if( xN > p->u.rValue ) return; break;
+ default: if( xN == p->u.rValue ) return; break;
+ }
+ *peWithin = NOT_WITHIN;
+}
+
+/*
+** One of the cells in node pNode is guaranteed to have a 64-bit
+** integer value equal to iRowid. Return the index of this cell.
+*/
+static int nodeRowidIndex(
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ i64 iRowid,
+ int *piIndex
+){
+ int ii;
+ int nCell = NCELL(pNode);
+ assert( nCell<200 );
+ for(ii=0; ii<nCell; ii++){
+ if( nodeGetRowid(pRtree, pNode, ii)==iRowid ){
+ *piIndex = ii;
+ return SQLITE_OK;
+ }
+ }
+ RTREE_IS_CORRUPT(pRtree);
+ return SQLITE_CORRUPT_VTAB;
+}
+
+/*
+** Return the index of the cell containing a pointer to node pNode
+** in its parent. If pNode is the root node, return -1.
+*/
+static int nodeParentIndex(Rtree *pRtree, RtreeNode *pNode, int *piIndex){
+ RtreeNode *pParent = pNode->pParent;
+ if( pParent ){
+ return nodeRowidIndex(pRtree, pParent, pNode->iNode, piIndex);
+ }
+ *piIndex = -1;
+ return SQLITE_OK;
+}
+
+/*
+** Compare two search points. Return negative, zero, or positive if the first
+** is less than, equal to, or greater than the second.
+**
+** The rScore is the primary key. Smaller rScore values come first.
+** If the rScore is a tie, then use iLevel as the tie breaker with smaller
+** iLevel values coming first. In this way, if rScore is the same for all
+** SearchPoints, then iLevel becomes the deciding factor and the result
+** is a depth-first search, which is the desired default behavior.
+*/
+static int rtreeSearchPointCompare(
+ const RtreeSearchPoint *pA,
+ const RtreeSearchPoint *pB
+){
+ if( pA->rScore<pB->rScore ) return -1;
+ if( pA->rScore>pB->rScore ) return +1;
+ if( pA->iLevel<pB->iLevel ) return -1;
+ if( pA->iLevel>pB->iLevel ) return +1;
+ return 0;
+}
+
+/*
+** Interchange two search points in a cursor.
+*/
+static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){
+ RtreeSearchPoint t = p->aPoint[i];
+ assert( i<j );
+ p->aPoint[i] = p->aPoint[j];
+ p->aPoint[j] = t;
+ i++; j++;
+ if( i<RTREE_CACHE_SZ ){
+ if( j>=RTREE_CACHE_SZ ){
+ nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]);
+ p->aNode[i] = 0;
+ }else{
+ RtreeNode *pTemp = p->aNode[i];
+ p->aNode[i] = p->aNode[j];
+ p->aNode[j] = pTemp;
+ }
+ }
+}
+
+/*
+** Return the search point with the lowest current score.
+*/
+static RtreeSearchPoint *rtreeSearchPointFirst(RtreeCursor *pCur){
+ return pCur->bPoint ? &pCur->sPoint : pCur->nPoint ? pCur->aPoint : 0;
+}
+
+/*
+** Get the RtreeNode for the search point with the lowest score.
+*/
+static RtreeNode *rtreeNodeOfFirstSearchPoint(RtreeCursor *pCur, int *pRC){
+ sqlite3_int64 id;
+ int ii = 1 - pCur->bPoint;
+ assert( ii==0 || ii==1 );
+ assert( pCur->bPoint || pCur->nPoint );
+ if( pCur->aNode[ii]==0 ){
+ assert( pRC!=0 );
+ id = ii ? pCur->aPoint[0].id : pCur->sPoint.id;
+ *pRC = nodeAcquire(RTREE_OF_CURSOR(pCur), id, 0, &pCur->aNode[ii]);
+ }
+ return pCur->aNode[ii];
+}
+
+/*
+** Push a new element onto the priority queue
+*/
+static RtreeSearchPoint *rtreeEnqueue(
+ RtreeCursor *pCur, /* The cursor */
+ RtreeDValue rScore, /* Score for the new search point */
+ u8 iLevel /* Level for the new search point */
+){
+ int i, j;
+ RtreeSearchPoint *pNew;
+ if( pCur->nPoint>=pCur->nPointAlloc ){
+ int nNew = pCur->nPointAlloc*2 + 8;
+ pNew = sqlite3_realloc64(pCur->aPoint, nNew*sizeof(pCur->aPoint[0]));
+ if( pNew==0 ) return 0;
+ pCur->aPoint = pNew;
+ pCur->nPointAlloc = nNew;
+ }
+ i = pCur->nPoint++;
+ pNew = pCur->aPoint + i;
+ pNew->rScore = rScore;
+ pNew->iLevel = iLevel;
+ assert( iLevel<=RTREE_MAX_DEPTH );
+ while( i>0 ){
+ RtreeSearchPoint *pParent;
+ j = (i-1)/2;
+ pParent = pCur->aPoint + j;
+ if( rtreeSearchPointCompare(pNew, pParent)>=0 ) break;
+ rtreeSearchPointSwap(pCur, j, i);
+ i = j;
+ pNew = pParent;
+ }
+ return pNew;
+}
+
+/*
+** Allocate a new RtreeSearchPoint and return a pointer to it. Return
+** NULL if malloc fails.
+*/
+static RtreeSearchPoint *rtreeSearchPointNew(
+ RtreeCursor *pCur, /* The cursor */
+ RtreeDValue rScore, /* Score for the new search point */
+ u8 iLevel /* Level for the new search point */
+){
+ RtreeSearchPoint *pNew, *pFirst;
+ pFirst = rtreeSearchPointFirst(pCur);
+ pCur->anQueue[iLevel]++;
+ if( pFirst==0
+ || pFirst->rScore>rScore
+ || (pFirst->rScore==rScore && pFirst->iLevel>iLevel)
+ ){
+ if( pCur->bPoint ){
+ int ii;
+ pNew = rtreeEnqueue(pCur, rScore, iLevel);
+ if( pNew==0 ) return 0;
+ ii = (int)(pNew - pCur->aPoint) + 1;
+ if( ii<RTREE_CACHE_SZ ){
+ assert( pCur->aNode[ii]==0 );
+ pCur->aNode[ii] = pCur->aNode[0];
+ }else{
+ nodeRelease(RTREE_OF_CURSOR(pCur), pCur->aNode[0]);
+ }
+ pCur->aNode[0] = 0;
+ *pNew = pCur->sPoint;
+ }
+ pCur->sPoint.rScore = rScore;
+ pCur->sPoint.iLevel = iLevel;
+ pCur->bPoint = 1;
+ return &pCur->sPoint;
+ }else{
+ return rtreeEnqueue(pCur, rScore, iLevel);
+ }
+}
+
+#if 0
+/* Tracing routines for the RtreeSearchPoint queue */
+static void tracePoint(RtreeSearchPoint *p, int idx, RtreeCursor *pCur){
+ if( idx<0 ){ printf(" s"); }else{ printf("%2d", idx); }
+ printf(" %d.%05lld.%02d %g %d",
+ p->iLevel, p->id, p->iCell, p->rScore, p->eWithin
+ );
+ idx++;
+ if( idx<RTREE_CACHE_SZ ){
+ printf(" %p\n", pCur->aNode[idx]);
+ }else{
+ printf("\n");
+ }
+}
+static void traceQueue(RtreeCursor *pCur, const char *zPrefix){
+ int ii;
+ printf("=== %9s ", zPrefix);
+ if( pCur->bPoint ){
+ tracePoint(&pCur->sPoint, -1, pCur);
+ }
+ for(ii=0; ii<pCur->nPoint; ii++){
+ if( ii>0 || pCur->bPoint ) printf(" ");
+ tracePoint(&pCur->aPoint[ii], ii, pCur);
+ }
+}
+# define RTREE_QUEUE_TRACE(A,B) traceQueue(A,B)
+#else
+# define RTREE_QUEUE_TRACE(A,B) /* no-op */
+#endif
+
+/* Remove the search point with the lowest current score.
+*/
+static void rtreeSearchPointPop(RtreeCursor *p){
+ int i, j, k, n;
+ i = 1 - p->bPoint;
+ assert( i==0 || i==1 );
+ if( p->aNode[i] ){
+ nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]);
+ p->aNode[i] = 0;
+ }
+ if( p->bPoint ){
+ p->anQueue[p->sPoint.iLevel]--;
+ p->bPoint = 0;
+ }else if( p->nPoint ){
+ p->anQueue[p->aPoint[0].iLevel]--;
+ n = --p->nPoint;
+ p->aPoint[0] = p->aPoint[n];
+ if( n<RTREE_CACHE_SZ-1 ){
+ p->aNode[1] = p->aNode[n+1];
+ p->aNode[n+1] = 0;
+ }
+ i = 0;
+ while( (j = i*2+1)<n ){
+ k = j+1;
+ if( k<n && rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[j])<0 ){
+ if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){
+ rtreeSearchPointSwap(p, i, k);
+ i = k;
+ }else{
+ break;
+ }
+ }else{
+ if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){
+ rtreeSearchPointSwap(p, i, j);
+ i = j;
+ }else{
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+/*
+** Continue the search on cursor pCur until the front of the queue
+** contains an entry suitable for returning as a result-set row,
+** or until the RtreeSearchPoint queue is empty, indicating that the
+** query has completed.
+*/
+static int rtreeStepToLeaf(RtreeCursor *pCur){
+ RtreeSearchPoint *p;
+ Rtree *pRtree = RTREE_OF_CURSOR(pCur);
+ RtreeNode *pNode;
+ int eWithin;
+ int rc = SQLITE_OK;
+ int nCell;
+ int nConstraint = pCur->nConstraint;
+ int ii;
+ int eInt;
+ RtreeSearchPoint x;
+
+ eInt = pRtree->eCoordType==RTREE_COORD_INT32;
+ while( (p = rtreeSearchPointFirst(pCur))!=0 && p->iLevel>0 ){
+ u8 *pCellData;
+ pNode = rtreeNodeOfFirstSearchPoint(pCur, &rc);
+ if( rc ) return rc;
+ nCell = NCELL(pNode);
+ assert( nCell<200 );
+ pCellData = pNode->zData + (4+pRtree->nBytesPerCell*p->iCell);
+ while( p->iCell<nCell ){
+ sqlite3_rtree_dbl rScore = (sqlite3_rtree_dbl)-1;
+ eWithin = FULLY_WITHIN;
+ for(ii=0; ii<nConstraint; ii++){
+ RtreeConstraint *pConstraint = pCur->aConstraint + ii;
+ if( pConstraint->op>=RTREE_MATCH ){
+ rc = rtreeCallbackConstraint(pConstraint, eInt, pCellData, p,
+ &rScore, &eWithin);
+ if( rc ) return rc;
+ }else if( p->iLevel==1 ){
+ rtreeLeafConstraint(pConstraint, eInt, pCellData, &eWithin);
+ }else{
+ rtreeNonleafConstraint(pConstraint, eInt, pCellData, &eWithin);
+ }
+ if( eWithin==NOT_WITHIN ){
+ p->iCell++;
+ pCellData += pRtree->nBytesPerCell;
+ break;
+ }
+ }
+ if( eWithin==NOT_WITHIN ) continue;
+ p->iCell++;
+ x.iLevel = p->iLevel - 1;
+ if( x.iLevel ){
+ x.id = readInt64(pCellData);
+ for(ii=0; ii<pCur->nPoint; ii++){
+ if( pCur->aPoint[ii].id==x.id ){
+ RTREE_IS_CORRUPT(pRtree);
+ return SQLITE_CORRUPT_VTAB;
+ }
+ }
+ x.iCell = 0;
+ }else{
+ x.id = p->id;
+ x.iCell = p->iCell - 1;
+ }
+ if( p->iCell>=nCell ){
+ RTREE_QUEUE_TRACE(pCur, "POP-S:");
+ rtreeSearchPointPop(pCur);
+ }
+ if( rScore<RTREE_ZERO ) rScore = RTREE_ZERO;
+ p = rtreeSearchPointNew(pCur, rScore, x.iLevel);
+ if( p==0 ) return SQLITE_NOMEM;
+ p->eWithin = (u8)eWithin;
+ p->id = x.id;
+ p->iCell = x.iCell;
+ RTREE_QUEUE_TRACE(pCur, "PUSH-S:");
+ break;
+ }
+ if( p->iCell>=nCell ){
+ RTREE_QUEUE_TRACE(pCur, "POP-Se:");
+ rtreeSearchPointPop(pCur);
+ }
+ }
+ pCur->atEOF = p==0;
+ return SQLITE_OK;
+}
+
+/*
+** Rtree virtual table module xNext method.
+*/
+static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){
+ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
+ int rc = SQLITE_OK;
+
+ /* Move to the next entry that matches the configured constraints. */
+ RTREE_QUEUE_TRACE(pCsr, "POP-Nx:");
+ if( pCsr->bAuxValid ){
+ pCsr->bAuxValid = 0;
+ sqlite3_reset(pCsr->pReadAux);
+ }
+ rtreeSearchPointPop(pCsr);
+ rc = rtreeStepToLeaf(pCsr);
+ return rc;
+}
+
+/*
+** Rtree virtual table module xRowid method.
+*/
+static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){
+ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
+ RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
+ int rc = SQLITE_OK;
+ RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
+ if( rc==SQLITE_OK && p ){
+ *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell);
+ }
+ return rc;
+}
+
+/*
+** Rtree virtual table module xColumn method.
+*/
+static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
+ Rtree *pRtree = (Rtree *)cur->pVtab;
+ RtreeCursor *pCsr = (RtreeCursor *)cur;
+ RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
+ RtreeCoord c;
+ int rc = SQLITE_OK;
+ RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
+
+ if( rc ) return rc;
+ if( p==0 ) return SQLITE_OK;
+ if( i==0 ){
+ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell));
+ }else if( i<=pRtree->nDim2 ){
+ nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c);
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
+ sqlite3_result_double(ctx, c.f);
+ }else
+#endif
+ {
+ assert( pRtree->eCoordType==RTREE_COORD_INT32 );
+ sqlite3_result_int(ctx, c.i);
+ }
+ }else{
+ if( !pCsr->bAuxValid ){
+ if( pCsr->pReadAux==0 ){
+ rc = sqlite3_prepare_v3(pRtree->db, pRtree->zReadAuxSql, -1, 0,
+ &pCsr->pReadAux, 0);
+ if( rc ) return rc;
+ }
+ sqlite3_bind_int64(pCsr->pReadAux, 1,
+ nodeGetRowid(pRtree, pNode, p->iCell));
+ rc = sqlite3_step(pCsr->pReadAux);
+ if( rc==SQLITE_ROW ){
+ pCsr->bAuxValid = 1;
+ }else{
+ sqlite3_reset(pCsr->pReadAux);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ return rc;
+ }
+ }
+ sqlite3_result_value(ctx,
+ sqlite3_column_value(pCsr->pReadAux, i - pRtree->nDim2 + 1));
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Use nodeAcquire() to obtain the leaf node containing the record with
+** rowid iRowid. If successful, set *ppLeaf to point to the node and
+** return SQLITE_OK. If there is no such record in the table, set
+** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf
+** to zero and return an SQLite error code.
+*/
+static int findLeafNode(
+ Rtree *pRtree, /* RTree to search */
+ i64 iRowid, /* The rowid searching for */
+ RtreeNode **ppLeaf, /* Write the node here */
+ sqlite3_int64 *piNode /* Write the node-id here */
+){
+ int rc;
+ *ppLeaf = 0;
+ sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid);
+ if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){
+ i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0);
+ if( piNode ) *piNode = iNode;
+ rc = nodeAcquire(pRtree, iNode, 0, ppLeaf);
+ sqlite3_reset(pRtree->pReadRowid);
+ }else{
+ rc = sqlite3_reset(pRtree->pReadRowid);
+ }
+ return rc;
+}
+
+/*
+** This function is called to configure the RtreeConstraint object passed
+** as the second argument for a MATCH constraint. The value passed as the
+** first argument to this function is the right-hand operand to the MATCH
+** operator.
+*/
+static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){
+ RtreeMatchArg *pBlob, *pSrc; /* BLOB returned by geometry function */
+ sqlite3_rtree_query_info *pInfo; /* Callback information */
+
+ pSrc = sqlite3_value_pointer(pValue, "RtreeMatchArg");
+ if( pSrc==0 ) return SQLITE_ERROR;
+ pInfo = (sqlite3_rtree_query_info*)
+ sqlite3_malloc64( sizeof(*pInfo)+pSrc->iSize );
+ if( !pInfo ) return SQLITE_NOMEM;
+ memset(pInfo, 0, sizeof(*pInfo));
+ pBlob = (RtreeMatchArg*)&pInfo[1];
+ memcpy(pBlob, pSrc, pSrc->iSize);
+ pInfo->pContext = pBlob->cb.pContext;
+ pInfo->nParam = pBlob->nParam;
+ pInfo->aParam = pBlob->aParam;
+ pInfo->apSqlParam = pBlob->apSqlParam;
+
+ if( pBlob->cb.xGeom ){
+ pCons->u.xGeom = pBlob->cb.xGeom;
+ }else{
+ pCons->op = RTREE_QUERY;
+ pCons->u.xQueryFunc = pBlob->cb.xQueryFunc;
+ }
+ pCons->pInfo = pInfo;
+ return SQLITE_OK;
+}
+
+/*
+** Rtree virtual table module xFilter method.
+*/
+static int rtreeFilter(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ Rtree *pRtree = (Rtree *)pVtabCursor->pVtab;
+ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
+ RtreeNode *pRoot = 0;
+ int ii;
+ int rc = SQLITE_OK;
+ int iCell = 0;
+
+ rtreeReference(pRtree);
+
+ /* Reset the cursor to the same state as rtreeOpen() leaves it in. */
+ resetCursor(pCsr);
+
+ pCsr->iStrategy = idxNum;
+ if( idxNum==1 ){
+ /* Special case - lookup by rowid. */
+ RtreeNode *pLeaf; /* Leaf on which the required cell resides */
+ RtreeSearchPoint *p; /* Search point for the leaf */
+ i64 iRowid = sqlite3_value_int64(argv[0]);
+ i64 iNode = 0;
+ int eType = sqlite3_value_numeric_type(argv[0]);
+ if( eType==SQLITE_INTEGER
+ || (eType==SQLITE_FLOAT && sqlite3_value_double(argv[0])==iRowid)
+ ){
+ rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode);
+ }else{
+ rc = SQLITE_OK;
+ pLeaf = 0;
+ }
+ if( rc==SQLITE_OK && pLeaf!=0 ){
+ p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0);
+ assert( p!=0 ); /* Always returns pCsr->sPoint */
+ pCsr->aNode[0] = pLeaf;
+ p->id = iNode;
+ p->eWithin = PARTLY_WITHIN;
+ rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell);
+ p->iCell = (u8)iCell;
+ RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:");
+ }else{
+ pCsr->atEOF = 1;
+ }
+ }else{
+ /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array
+ ** with the configured constraints.
+ */
+ rc = nodeAcquire(pRtree, 1, 0, &pRoot);
+ if( rc==SQLITE_OK && argc>0 ){
+ pCsr->aConstraint = sqlite3_malloc64(sizeof(RtreeConstraint)*argc);
+ pCsr->nConstraint = argc;
+ if( !pCsr->aConstraint ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*argc);
+ memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1));
+ assert( (idxStr==0 && argc==0)
+ || (idxStr && (int)strlen(idxStr)==argc*2) );
+ for(ii=0; ii<argc; ii++){
+ RtreeConstraint *p = &pCsr->aConstraint[ii];
+ int eType = sqlite3_value_numeric_type(argv[ii]);
+ p->op = idxStr[ii*2];
+ p->iCoord = idxStr[ii*2+1]-'0';
+ if( p->op>=RTREE_MATCH ){
+ /* A MATCH operator. The right-hand-side must be a blob that
+ ** can be cast into an RtreeMatchArg object. One created using
+ ** an sqlite3_rtree_geometry_callback() SQL user function.
+ */
+ rc = deserializeGeometry(argv[ii], p);
+ if( rc!=SQLITE_OK ){
+ break;
+ }
+ p->pInfo->nCoord = pRtree->nDim2;
+ p->pInfo->anQueue = pCsr->anQueue;
+ p->pInfo->mxLevel = pRtree->iDepth + 1;
+ }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+#ifdef SQLITE_RTREE_INT_ONLY
+ p->u.rValue = sqlite3_value_int64(argv[ii]);
+#else
+ p->u.rValue = sqlite3_value_double(argv[ii]);
+#endif
+ }else{
+ p->u.rValue = RTREE_ZERO;
+ if( eType==SQLITE_NULL ){
+ p->op = RTREE_FALSE;
+ }else if( p->op==RTREE_LT || p->op==RTREE_LE ){
+ p->op = RTREE_TRUE;
+ }else{
+ p->op = RTREE_FALSE;
+ }
+ }
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ RtreeSearchPoint *pNew;
+ pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1));
+ if( pNew==0 ) return SQLITE_NOMEM;
+ pNew->id = 1;
+ pNew->iCell = 0;
+ pNew->eWithin = PARTLY_WITHIN;
+ assert( pCsr->bPoint==1 );
+ pCsr->aNode[0] = pRoot;
+ pRoot = 0;
+ RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:");
+ rc = rtreeStepToLeaf(pCsr);
+ }
+ }
+
+ nodeRelease(pRtree, pRoot);
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+/*
+** Rtree virtual table module xBestIndex method. There are three
+** table scan strategies to choose from (in order from most to
+** least desirable):
+**
+** idxNum idxStr Strategy
+** ------------------------------------------------
+** 1 Unused Direct lookup by rowid.
+** 2 See below R-tree query or full-table scan.
+** ------------------------------------------------
+**
+** If strategy 1 is used, then idxStr is not meaningful. If strategy
+** 2 is used, idxStr is formatted to contain 2 bytes for each
+** constraint used. The first two bytes of idxStr correspond to
+** the constraint in sqlite3_index_info.aConstraintUsage[] with
+** (argvIndex==1) etc.
+**
+** The first of each pair of bytes in idxStr identifies the constraint
+** operator as follows:
+**
+** Operator Byte Value
+** ----------------------
+** = 0x41 ('A')
+** <= 0x42 ('B')
+** < 0x43 ('C')
+** >= 0x44 ('D')
+** > 0x45 ('E')
+** MATCH 0x46 ('F')
+** ----------------------
+**
+** The second of each pair of bytes identifies the coordinate column
+** to which the constraint applies. The leftmost coordinate column
+** is 'a', the second from the left 'b' etc.
+*/
+static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+ Rtree *pRtree = (Rtree*)tab;
+ int rc = SQLITE_OK;
+ int ii;
+ int bMatch = 0; /* True if there exists a MATCH constraint */
+ i64 nRow; /* Estimated rows returned by this scan */
+
+ int iIdx = 0;
+ char zIdxStr[RTREE_MAX_DIMENSIONS*8+1];
+ memset(zIdxStr, 0, sizeof(zIdxStr));
+
+ /* Check if there exists a MATCH constraint - even an unusable one. If there
+ ** is, do not consider the lookup-by-rowid plan as using such a plan would
+ ** require the VDBE to evaluate the MATCH constraint, which is not currently
+ ** possible. */
+ for(ii=0; ii<pIdxInfo->nConstraint; ii++){
+ if( pIdxInfo->aConstraint[ii].op==SQLITE_INDEX_CONSTRAINT_MATCH ){
+ bMatch = 1;
+ }
+ }
+
+ assert( pIdxInfo->idxStr==0 );
+ for(ii=0; ii<pIdxInfo->nConstraint && iIdx<(int)(sizeof(zIdxStr)-1); ii++){
+ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];
+
+ if( bMatch==0 && p->usable
+ && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ
+ ){
+ /* We have an equality constraint on the rowid. Use strategy 1. */
+ int jj;
+ for(jj=0; jj<ii; jj++){
+ pIdxInfo->aConstraintUsage[jj].argvIndex = 0;
+ pIdxInfo->aConstraintUsage[jj].omit = 0;
+ }
+ pIdxInfo->idxNum = 1;
+ pIdxInfo->aConstraintUsage[ii].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[jj].omit = 1;
+
+ /* This strategy involves a two rowid lookups on an B-Tree structures
+ ** and then a linear search of an R-Tree node. This should be
+ ** considered almost as quick as a direct rowid lookup (for which
+ ** sqlite uses an internal cost of 0.0). It is expected to return
+ ** a single row.
+ */
+ pIdxInfo->estimatedCost = 30.0;
+ pIdxInfo->estimatedRows = 1;
+ pIdxInfo->idxFlags = SQLITE_INDEX_SCAN_UNIQUE;
+ return SQLITE_OK;
+ }
+
+ if( p->usable
+ && ((p->iColumn>0 && p->iColumn<=pRtree->nDim2)
+ || p->op==SQLITE_INDEX_CONSTRAINT_MATCH)
+ ){
+ u8 op;
+ switch( p->op ){
+ case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break;
+ case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break;
+ case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break;
+ case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break;
+ case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break;
+ case SQLITE_INDEX_CONSTRAINT_MATCH: op = RTREE_MATCH; break;
+ default: op = 0; break;
+ }
+ if( op ){
+ zIdxStr[iIdx++] = op;
+ zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0');
+ pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2);
+ pIdxInfo->aConstraintUsage[ii].omit = 1;
+ }
+ }
+ }
+
+ pIdxInfo->idxNum = 2;
+ pIdxInfo->needToFreeIdxStr = 1;
+ if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){
+ return SQLITE_NOMEM;
+ }
+
+ nRow = pRtree->nRowEst >> (iIdx/2);
+ pIdxInfo->estimatedCost = (double)6.0 * (double)nRow;
+ pIdxInfo->estimatedRows = nRow;
+
+ return rc;
+}
+
+/*
+** Return the N-dimensional volumn of the cell stored in *p.
+*/
+static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){
+ RtreeDValue area = (RtreeDValue)1;
+ assert( pRtree->nDim>=1 && pRtree->nDim<=5 );
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
+ switch( pRtree->nDim ){
+ case 5: area = p->aCoord[9].f - p->aCoord[8].f;
+ case 4: area *= p->aCoord[7].f - p->aCoord[6].f;
+ case 3: area *= p->aCoord[5].f - p->aCoord[4].f;
+ case 2: area *= p->aCoord[3].f - p->aCoord[2].f;
+ default: area *= p->aCoord[1].f - p->aCoord[0].f;
+ }
+ }else
+#endif
+ {
+ switch( pRtree->nDim ){
+ case 5: area = (i64)p->aCoord[9].i - (i64)p->aCoord[8].i;
+ case 4: area *= (i64)p->aCoord[7].i - (i64)p->aCoord[6].i;
+ case 3: area *= (i64)p->aCoord[5].i - (i64)p->aCoord[4].i;
+ case 2: area *= (i64)p->aCoord[3].i - (i64)p->aCoord[2].i;
+ default: area *= (i64)p->aCoord[1].i - (i64)p->aCoord[0].i;
+ }
+ }
+ return area;
+}
+
+/*
+** Return the margin length of cell p. The margin length is the sum
+** of the objects size in each dimension.
+*/
+static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){
+ RtreeDValue margin = 0;
+ int ii = pRtree->nDim2 - 2;
+ do{
+ margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii]));
+ ii -= 2;
+ }while( ii>=0 );
+ return margin;
+}
+
+/*
+** Store the union of cells p1 and p2 in p1.
+*/
+static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
+ int ii = 0;
+ if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
+ do{
+ p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f);
+ p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f);
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
+ }else{
+ do{
+ p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i);
+ p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i);
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
+ }
+}
+
+/*
+** Return true if the area covered by p2 is a subset of the area covered
+** by p1. False otherwise.
+*/
+static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
+ int ii;
+ int isInt = (pRtree->eCoordType==RTREE_COORD_INT32);
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
+ RtreeCoord *a1 = &p1->aCoord[ii];
+ RtreeCoord *a2 = &p2->aCoord[ii];
+ if( (!isInt && (a2[0].f<a1[0].f || a2[1].f>a1[1].f))
+ || ( isInt && (a2[0].i<a1[0].i || a2[1].i>a1[1].i))
+ ){
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+** Return the amount cell p would grow by if it were unioned with pCell.
+*/
+static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){
+ RtreeDValue area;
+ RtreeCell cell;
+ memcpy(&cell, p, sizeof(RtreeCell));
+ area = cellArea(pRtree, &cell);
+ cellUnion(pRtree, &cell, pCell);
+ return (cellArea(pRtree, &cell)-area);
+}
+
+static RtreeDValue cellOverlap(
+ Rtree *pRtree,
+ RtreeCell *p,
+ RtreeCell *aCell,
+ int nCell
+){
+ int ii;
+ RtreeDValue overlap = RTREE_ZERO;
+ for(ii=0; ii<nCell; ii++){
+ int jj;
+ RtreeDValue o = (RtreeDValue)1;
+ for(jj=0; jj<pRtree->nDim2; jj+=2){
+ RtreeDValue x1, x2;
+ x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj]));
+ x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1]));
+ if( x2<x1 ){
+ o = (RtreeDValue)0;
+ break;
+ }else{
+ o = o * (x2-x1);
+ }
+ }
+ overlap += o;
+ }
+ return overlap;
+}
+
+
+/*
+** This function implements the ChooseLeaf algorithm from Gutman[84].
+** ChooseSubTree in r*tree terminology.
+*/
+static int ChooseLeaf(
+ Rtree *pRtree, /* Rtree table */
+ RtreeCell *pCell, /* Cell to insert into rtree */
+ int iHeight, /* Height of sub-tree rooted at pCell */
+ RtreeNode **ppLeaf /* OUT: Selected leaf page */
+){
+ int rc;
+ int ii;
+ RtreeNode *pNode = 0;
+ rc = nodeAcquire(pRtree, 1, 0, &pNode);
+
+ for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){
+ int iCell;
+ sqlite3_int64 iBest = 0;
+
+ RtreeDValue fMinGrowth = RTREE_ZERO;
+ RtreeDValue fMinArea = RTREE_ZERO;
+
+ int nCell = NCELL(pNode);
+ RtreeCell cell;
+ RtreeNode *pChild;
+
+ RtreeCell *aCell = 0;
+
+ /* Select the child node which will be enlarged the least if pCell
+ ** is inserted into it. Resolve ties by choosing the entry with
+ ** the smallest area.
+ */
+ for(iCell=0; iCell<nCell; iCell++){
+ int bBest = 0;
+ RtreeDValue growth;
+ RtreeDValue area;
+ nodeGetCell(pRtree, pNode, iCell, &cell);
+ growth = cellGrowth(pRtree, &cell, pCell);
+ area = cellArea(pRtree, &cell);
+ if( iCell==0||growth<fMinGrowth||(growth==fMinGrowth && area<fMinArea) ){
+ bBest = 1;
+ }
+ if( bBest ){
+ fMinGrowth = growth;
+ fMinArea = area;
+ iBest = cell.iRowid;
+ }
+ }
+
+ sqlite3_free(aCell);
+ rc = nodeAcquire(pRtree, iBest, pNode, &pChild);
+ nodeRelease(pRtree, pNode);
+ pNode = pChild;
+ }
+
+ *ppLeaf = pNode;
+ return rc;
+}
+
+/*
+** A cell with the same content as pCell has just been inserted into
+** the node pNode. This function updates the bounding box cells in
+** all ancestor elements.
+*/
+static int AdjustTree(
+ Rtree *pRtree, /* Rtree table */
+ RtreeNode *pNode, /* Adjust ancestry of this node. */
+ RtreeCell *pCell /* This cell was just inserted */
+){
+ RtreeNode *p = pNode;
+ int cnt = 0;
+ while( p->pParent ){
+ RtreeNode *pParent = p->pParent;
+ RtreeCell cell;
+ int iCell;
+
+ if( (++cnt)>1000 || nodeParentIndex(pRtree, p, &iCell) ){
+ RTREE_IS_CORRUPT(pRtree);
+ return SQLITE_CORRUPT_VTAB;
+ }
+
+ nodeGetCell(pRtree, pParent, iCell, &cell);
+ if( !cellContains(pRtree, &cell, pCell) ){
+ cellUnion(pRtree, &cell, pCell);
+ nodeOverwriteCell(pRtree, pParent, &cell, iCell);
+ }
+
+ p = pParent;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Write mapping (iRowid->iNode) to the <rtree>_rowid table.
+*/
+static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){
+ sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid);
+ sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode);
+ sqlite3_step(pRtree->pWriteRowid);
+ return sqlite3_reset(pRtree->pWriteRowid);
+}
+
+/*
+** Write mapping (iNode->iPar) to the <rtree>_parent table.
+*/
+static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){
+ sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode);
+ sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar);
+ sqlite3_step(pRtree->pWriteParent);
+ return sqlite3_reset(pRtree->pWriteParent);
+}
+
+static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int);
+
+
+/*
+** Arguments aIdx, aDistance and aSpare all point to arrays of size
+** nIdx. The aIdx array contains the set of integers from 0 to
+** (nIdx-1) in no particular order. This function sorts the values
+** in aIdx according to the indexed values in aDistance. For
+** example, assuming the inputs:
+**
+** aIdx = { 0, 1, 2, 3 }
+** aDistance = { 5.0, 2.0, 7.0, 6.0 }
+**
+** this function sets the aIdx array to contain:
+**
+** aIdx = { 0, 1, 2, 3 }
+**
+** The aSpare array is used as temporary working space by the
+** sorting algorithm.
+*/
+static void SortByDistance(
+ int *aIdx,
+ int nIdx,
+ RtreeDValue *aDistance,
+ int *aSpare
+){
+ if( nIdx>1 ){
+ int iLeft = 0;
+ int iRight = 0;
+
+ int nLeft = nIdx/2;
+ int nRight = nIdx-nLeft;
+ int *aLeft = aIdx;
+ int *aRight = &aIdx[nLeft];
+
+ SortByDistance(aLeft, nLeft, aDistance, aSpare);
+ SortByDistance(aRight, nRight, aDistance, aSpare);
+
+ memcpy(aSpare, aLeft, sizeof(int)*nLeft);
+ aLeft = aSpare;
+
+ while( iLeft<nLeft || iRight<nRight ){
+ if( iLeft==nLeft ){
+ aIdx[iLeft+iRight] = aRight[iRight];
+ iRight++;
+ }else if( iRight==nRight ){
+ aIdx[iLeft+iRight] = aLeft[iLeft];
+ iLeft++;
+ }else{
+ RtreeDValue fLeft = aDistance[aLeft[iLeft]];
+ RtreeDValue fRight = aDistance[aRight[iRight]];
+ if( fLeft<fRight ){
+ aIdx[iLeft+iRight] = aLeft[iLeft];
+ iLeft++;
+ }else{
+ aIdx[iLeft+iRight] = aRight[iRight];
+ iRight++;
+ }
+ }
+ }
+
+#if 0
+ /* Check that the sort worked */
+ {
+ int jj;
+ for(jj=1; jj<nIdx; jj++){
+ RtreeDValue left = aDistance[aIdx[jj-1]];
+ RtreeDValue right = aDistance[aIdx[jj]];
+ assert( left<=right );
+ }
+ }
+#endif
+ }
+}
+
+/*
+** Arguments aIdx, aCell and aSpare all point to arrays of size
+** nIdx. The aIdx array contains the set of integers from 0 to
+** (nIdx-1) in no particular order. This function sorts the values
+** in aIdx according to dimension iDim of the cells in aCell. The
+** minimum value of dimension iDim is considered first, the
+** maximum used to break ties.
+**
+** The aSpare array is used as temporary working space by the
+** sorting algorithm.
+*/
+static void SortByDimension(
+ Rtree *pRtree,
+ int *aIdx,
+ int nIdx,
+ int iDim,
+ RtreeCell *aCell,
+ int *aSpare
+){
+ if( nIdx>1 ){
+
+ int iLeft = 0;
+ int iRight = 0;
+
+ int nLeft = nIdx/2;
+ int nRight = nIdx-nLeft;
+ int *aLeft = aIdx;
+ int *aRight = &aIdx[nLeft];
+
+ SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare);
+ SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare);
+
+ memcpy(aSpare, aLeft, sizeof(int)*nLeft);
+ aLeft = aSpare;
+ while( iLeft<nLeft || iRight<nRight ){
+ RtreeDValue xleft1 = DCOORD(aCell[aLeft[iLeft]].aCoord[iDim*2]);
+ RtreeDValue xleft2 = DCOORD(aCell[aLeft[iLeft]].aCoord[iDim*2+1]);
+ RtreeDValue xright1 = DCOORD(aCell[aRight[iRight]].aCoord[iDim*2]);
+ RtreeDValue xright2 = DCOORD(aCell[aRight[iRight]].aCoord[iDim*2+1]);
+ if( (iLeft!=nLeft) && ((iRight==nRight)
+ || (xleft1<xright1)
+ || (xleft1==xright1 && xleft2<xright2)
+ )){
+ aIdx[iLeft+iRight] = aLeft[iLeft];
+ iLeft++;
+ }else{
+ aIdx[iLeft+iRight] = aRight[iRight];
+ iRight++;
+ }
+ }
+
+#if 0
+ /* Check that the sort worked */
+ {
+ int jj;
+ for(jj=1; jj<nIdx; jj++){
+ RtreeDValue xleft1 = aCell[aIdx[jj-1]].aCoord[iDim*2];
+ RtreeDValue xleft2 = aCell[aIdx[jj-1]].aCoord[iDim*2+1];
+ RtreeDValue xright1 = aCell[aIdx[jj]].aCoord[iDim*2];
+ RtreeDValue xright2 = aCell[aIdx[jj]].aCoord[iDim*2+1];
+ assert( xleft1<=xright1 && (xleft1<xright1 || xleft2<=xright2) );
+ }
+ }
+#endif
+ }
+}
+
+/*
+** Implementation of the R*-tree variant of SplitNode from Beckman[1990].
+*/
+static int splitNodeStartree(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ RtreeNode *pLeft,
+ RtreeNode *pRight,
+ RtreeCell *pBboxLeft,
+ RtreeCell *pBboxRight
+){
+ int **aaSorted;
+ int *aSpare;
+ int ii;
+
+ int iBestDim = 0;
+ int iBestSplit = 0;
+ RtreeDValue fBestMargin = RTREE_ZERO;
+
+ sqlite3_int64 nByte = (pRtree->nDim+1)*(sizeof(int*)+nCell*sizeof(int));
+
+ aaSorted = (int **)sqlite3_malloc64(nByte);
+ if( !aaSorted ){
+ return SQLITE_NOMEM;
+ }
+
+ aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell];
+ memset(aaSorted, 0, nByte);
+ for(ii=0; ii<pRtree->nDim; ii++){
+ int jj;
+ aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell];
+ for(jj=0; jj<nCell; jj++){
+ aaSorted[ii][jj] = jj;
+ }
+ SortByDimension(pRtree, aaSorted[ii], nCell, ii, aCell, aSpare);
+ }
+
+ for(ii=0; ii<pRtree->nDim; ii++){
+ RtreeDValue margin = RTREE_ZERO;
+ RtreeDValue fBestOverlap = RTREE_ZERO;
+ RtreeDValue fBestArea = RTREE_ZERO;
+ int iBestLeft = 0;
+ int nLeft;
+
+ for(
+ nLeft=RTREE_MINCELLS(pRtree);
+ nLeft<=(nCell-RTREE_MINCELLS(pRtree));
+ nLeft++
+ ){
+ RtreeCell left;
+ RtreeCell right;
+ int kk;
+ RtreeDValue overlap;
+ RtreeDValue area;
+
+ memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell));
+ memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell));
+ for(kk=1; kk<(nCell-1); kk++){
+ if( kk<nLeft ){
+ cellUnion(pRtree, &left, &aCell[aaSorted[ii][kk]]);
+ }else{
+ cellUnion(pRtree, &right, &aCell[aaSorted[ii][kk]]);
+ }
+ }
+ margin += cellMargin(pRtree, &left);
+ margin += cellMargin(pRtree, &right);
+ overlap = cellOverlap(pRtree, &left, &right, 1);
+ area = cellArea(pRtree, &left) + cellArea(pRtree, &right);
+ if( (nLeft==RTREE_MINCELLS(pRtree))
+ || (overlap<fBestOverlap)
+ || (overlap==fBestOverlap && area<fBestArea)
+ ){
+ iBestLeft = nLeft;
+ fBestOverlap = overlap;
+ fBestArea = area;
+ }
+ }
+
+ if( ii==0 || margin<fBestMargin ){
+ iBestDim = ii;
+ fBestMargin = margin;
+ iBestSplit = iBestLeft;
+ }
+ }
+
+ memcpy(pBboxLeft, &aCell[aaSorted[iBestDim][0]], sizeof(RtreeCell));
+ memcpy(pBboxRight, &aCell[aaSorted[iBestDim][iBestSplit]], sizeof(RtreeCell));
+ for(ii=0; ii<nCell; ii++){
+ RtreeNode *pTarget = (ii<iBestSplit)?pLeft:pRight;
+ RtreeCell *pBbox = (ii<iBestSplit)?pBboxLeft:pBboxRight;
+ RtreeCell *pCell = &aCell[aaSorted[iBestDim][ii]];
+ nodeInsertCell(pRtree, pTarget, pCell);
+ cellUnion(pRtree, pBbox, pCell);
+ }
+
+ sqlite3_free(aaSorted);
+ return SQLITE_OK;
+}
+
+
+static int updateMapping(
+ Rtree *pRtree,
+ i64 iRowid,
+ RtreeNode *pNode,
+ int iHeight
+){
+ int (*xSetMapping)(Rtree *, sqlite3_int64, sqlite3_int64);
+ xSetMapping = ((iHeight==0)?rowidWrite:parentWrite);
+ if( iHeight>0 ){
+ RtreeNode *pChild = nodeHashLookup(pRtree, iRowid);
+ if( pChild ){
+ nodeRelease(pRtree, pChild->pParent);
+ nodeReference(pNode);
+ pChild->pParent = pNode;
+ }
+ }
+ return xSetMapping(pRtree, iRowid, pNode->iNode);
+}
+
+static int SplitNode(
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ RtreeCell *pCell,
+ int iHeight
+){
+ int i;
+ int newCellIsRight = 0;
+
+ int rc = SQLITE_OK;
+ int nCell = NCELL(pNode);
+ RtreeCell *aCell;
+ int *aiUsed;
+
+ RtreeNode *pLeft = 0;
+ RtreeNode *pRight = 0;
+
+ RtreeCell leftbbox;
+ RtreeCell rightbbox;
+
+ /* Allocate an array and populate it with a copy of pCell and
+ ** all cells from node pLeft. Then zero the original node.
+ */
+ aCell = sqlite3_malloc64((sizeof(RtreeCell)+sizeof(int))*(nCell+1));
+ if( !aCell ){
+ rc = SQLITE_NOMEM;
+ goto splitnode_out;
+ }
+ aiUsed = (int *)&aCell[nCell+1];
+ memset(aiUsed, 0, sizeof(int)*(nCell+1));
+ for(i=0; i<nCell; i++){
+ nodeGetCell(pRtree, pNode, i, &aCell[i]);
+ }
+ nodeZero(pRtree, pNode);
+ memcpy(&aCell[nCell], pCell, sizeof(RtreeCell));
+ nCell++;
+
+ if( pNode->iNode==1 ){
+ pRight = nodeNew(pRtree, pNode);
+ pLeft = nodeNew(pRtree, pNode);
+ pRtree->iDepth++;
+ pNode->isDirty = 1;
+ writeInt16(pNode->zData, pRtree->iDepth);
+ }else{
+ pLeft = pNode;
+ pRight = nodeNew(pRtree, pLeft->pParent);
+ pLeft->nRef++;
+ }
+
+ if( !pLeft || !pRight ){
+ rc = SQLITE_NOMEM;
+ goto splitnode_out;
+ }
+
+ memset(pLeft->zData, 0, pRtree->iNodeSize);
+ memset(pRight->zData, 0, pRtree->iNodeSize);
+
+ rc = splitNodeStartree(pRtree, aCell, nCell, pLeft, pRight,
+ &leftbbox, &rightbbox);
+ if( rc!=SQLITE_OK ){
+ goto splitnode_out;
+ }
+
+ /* Ensure both child nodes have node numbers assigned to them by calling
+ ** nodeWrite(). Node pRight always needs a node number, as it was created
+ ** by nodeNew() above. But node pLeft sometimes already has a node number.
+ ** In this case avoid the all to nodeWrite().
+ */
+ if( SQLITE_OK!=(rc = nodeWrite(pRtree, pRight))
+ || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft)))
+ ){
+ goto splitnode_out;
+ }
+
+ rightbbox.iRowid = pRight->iNode;
+ leftbbox.iRowid = pLeft->iNode;
+
+ if( pNode->iNode==1 ){
+ rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1);
+ if( rc!=SQLITE_OK ){
+ goto splitnode_out;
+ }
+ }else{
+ RtreeNode *pParent = pLeft->pParent;
+ int iCell;
+ rc = nodeParentIndex(pRtree, pLeft, &iCell);
+ if( rc==SQLITE_OK ){
+ nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell);
+ rc = AdjustTree(pRtree, pParent, &leftbbox);
+ }
+ if( rc!=SQLITE_OK ){
+ goto splitnode_out;
+ }
+ }
+ if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){
+ goto splitnode_out;
+ }
+
+ for(i=0; i<NCELL(pRight); i++){
+ i64 iRowid = nodeGetRowid(pRtree, pRight, i);
+ rc = updateMapping(pRtree, iRowid, pRight, iHeight);
+ if( iRowid==pCell->iRowid ){
+ newCellIsRight = 1;
+ }
+ if( rc!=SQLITE_OK ){
+ goto splitnode_out;
+ }
+ }
+ if( pNode->iNode==1 ){
+ for(i=0; i<NCELL(pLeft); i++){
+ i64 iRowid = nodeGetRowid(pRtree, pLeft, i);
+ rc = updateMapping(pRtree, iRowid, pLeft, iHeight);
+ if( rc!=SQLITE_OK ){
+ goto splitnode_out;
+ }
+ }
+ }else if( newCellIsRight==0 ){
+ rc = updateMapping(pRtree, pCell->iRowid, pLeft, iHeight);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = nodeRelease(pRtree, pRight);
+ pRight = 0;
+ }
+ if( rc==SQLITE_OK ){
+ rc = nodeRelease(pRtree, pLeft);
+ pLeft = 0;
+ }
+
+splitnode_out:
+ nodeRelease(pRtree, pRight);
+ nodeRelease(pRtree, pLeft);
+ sqlite3_free(aCell);
+ return rc;
+}
+
+/*
+** If node pLeaf is not the root of the r-tree and its pParent pointer is
+** still NULL, load all ancestor nodes of pLeaf into memory and populate
+** the pLeaf->pParent chain all the way up to the root node.
+**
+** This operation is required when a row is deleted (or updated - an update
+** is implemented as a delete followed by an insert). SQLite provides the
+** rowid of the row to delete, which can be used to find the leaf on which
+** the entry resides (argument pLeaf). Once the leaf is located, this
+** function is called to determine its ancestry.
+*/
+static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){
+ int rc = SQLITE_OK;
+ RtreeNode *pChild = pLeaf;
+ while( rc==SQLITE_OK && pChild->iNode!=1 && pChild->pParent==0 ){
+ int rc2 = SQLITE_OK; /* sqlite3_reset() return code */
+ sqlite3_bind_int64(pRtree->pReadParent, 1, pChild->iNode);
+ rc = sqlite3_step(pRtree->pReadParent);
+ if( rc==SQLITE_ROW ){
+ RtreeNode *pTest; /* Used to test for reference loops */
+ i64 iNode; /* Node number of parent node */
+
+ /* Before setting pChild->pParent, test that we are not creating a
+ ** loop of references (as we would if, say, pChild==pParent). We don't
+ ** want to do this as it leads to a memory leak when trying to delete
+ ** the referenced counted node structures.
+ */
+ iNode = sqlite3_column_int64(pRtree->pReadParent, 0);
+ for(pTest=pLeaf; pTest && pTest->iNode!=iNode; pTest=pTest->pParent);
+ if( !pTest ){
+ rc2 = nodeAcquire(pRtree, iNode, 0, &pChild->pParent);
+ }
+ }
+ rc = sqlite3_reset(pRtree->pReadParent);
+ if( rc==SQLITE_OK ) rc = rc2;
+ if( rc==SQLITE_OK && !pChild->pParent ){
+ RTREE_IS_CORRUPT(pRtree);
+ rc = SQLITE_CORRUPT_VTAB;
+ }
+ pChild = pChild->pParent;
+ }
+ return rc;
+}
+
+static int deleteCell(Rtree *, RtreeNode *, int, int);
+
+static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){
+ int rc;
+ int rc2;
+ RtreeNode *pParent = 0;
+ int iCell;
+
+ assert( pNode->nRef==1 );
+
+ /* Remove the entry in the parent cell. */
+ rc = nodeParentIndex(pRtree, pNode, &iCell);
+ if( rc==SQLITE_OK ){
+ pParent = pNode->pParent;
+ pNode->pParent = 0;
+ rc = deleteCell(pRtree, pParent, iCell, iHeight+1);
+ }
+ rc2 = nodeRelease(pRtree, pParent);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+
+ /* Remove the xxx_node entry. */
+ sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode);
+ sqlite3_step(pRtree->pDeleteNode);
+ if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){
+ return rc;
+ }
+
+ /* Remove the xxx_parent entry. */
+ sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode);
+ sqlite3_step(pRtree->pDeleteParent);
+ if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){
+ return rc;
+ }
+
+ /* Remove the node from the in-memory hash table and link it into
+ ** the Rtree.pDeleted list. Its contents will be re-inserted later on.
+ */
+ nodeHashDelete(pRtree, pNode);
+ pNode->iNode = iHeight;
+ pNode->pNext = pRtree->pDeleted;
+ pNode->nRef++;
+ pRtree->pDeleted = pNode;
+
+ return SQLITE_OK;
+}
+
+static int fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){
+ RtreeNode *pParent = pNode->pParent;
+ int rc = SQLITE_OK;
+ if( pParent ){
+ int ii;
+ int nCell = NCELL(pNode);
+ RtreeCell box; /* Bounding box for pNode */
+ nodeGetCell(pRtree, pNode, 0, &box);
+ for(ii=1; ii<nCell; ii++){
+ RtreeCell cell;
+ nodeGetCell(pRtree, pNode, ii, &cell);
+ cellUnion(pRtree, &box, &cell);
+ }
+ box.iRowid = pNode->iNode;
+ rc = nodeParentIndex(pRtree, pNode, &ii);
+ if( rc==SQLITE_OK ){
+ nodeOverwriteCell(pRtree, pParent, &box, ii);
+ rc = fixBoundingBox(pRtree, pParent);
+ }
+ }
+ return rc;
+}
+
+/*
+** Delete the cell at index iCell of node pNode. After removing the
+** cell, adjust the r-tree data structure if required.
+*/
+static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){
+ RtreeNode *pParent;
+ int rc;
+
+ if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){
+ return rc;
+ }
+
+ /* Remove the cell from the node. This call just moves bytes around
+ ** the in-memory node image, so it cannot fail.
+ */
+ nodeDeleteCell(pRtree, pNode, iCell);
+
+ /* If the node is not the tree root and now has less than the minimum
+ ** number of cells, remove it from the tree. Otherwise, update the
+ ** cell in the parent node so that it tightly contains the updated
+ ** node.
+ */
+ pParent = pNode->pParent;
+ assert( pParent || pNode->iNode==1 );
+ if( pParent ){
+ if( NCELL(pNode)<RTREE_MINCELLS(pRtree) ){
+ rc = removeNode(pRtree, pNode, iHeight);
+ }else{
+ rc = fixBoundingBox(pRtree, pNode);
+ }
+ }
+
+ return rc;
+}
+
+static int Reinsert(
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ RtreeCell *pCell,
+ int iHeight
+){
+ int *aOrder;
+ int *aSpare;
+ RtreeCell *aCell;
+ RtreeDValue *aDistance;
+ int nCell;
+ RtreeDValue aCenterCoord[RTREE_MAX_DIMENSIONS];
+ int iDim;
+ int ii;
+ int rc = SQLITE_OK;
+ int n;
+
+ memset(aCenterCoord, 0, sizeof(RtreeDValue)*RTREE_MAX_DIMENSIONS);
+
+ nCell = NCELL(pNode)+1;
+ n = (nCell+1)&(~1);
+
+ /* Allocate the buffers used by this operation. The allocation is
+ ** relinquished before this function returns.
+ */
+ aCell = (RtreeCell *)sqlite3_malloc64(n * (
+ sizeof(RtreeCell) + /* aCell array */
+ sizeof(int) + /* aOrder array */
+ sizeof(int) + /* aSpare array */
+ sizeof(RtreeDValue) /* aDistance array */
+ ));
+ if( !aCell ){
+ return SQLITE_NOMEM;
+ }
+ aOrder = (int *)&aCell[n];
+ aSpare = (int *)&aOrder[n];
+ aDistance = (RtreeDValue *)&aSpare[n];
+
+ for(ii=0; ii<nCell; ii++){
+ if( ii==(nCell-1) ){
+ memcpy(&aCell[ii], pCell, sizeof(RtreeCell));
+ }else{
+ nodeGetCell(pRtree, pNode, ii, &aCell[ii]);
+ }
+ aOrder[ii] = ii;
+ for(iDim=0; iDim<pRtree->nDim; iDim++){
+ aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]);
+ aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]);
+ }
+ }
+ for(iDim=0; iDim<pRtree->nDim; iDim++){
+ aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2));
+ }
+
+ for(ii=0; ii<nCell; ii++){
+ aDistance[ii] = RTREE_ZERO;
+ for(iDim=0; iDim<pRtree->nDim; iDim++){
+ RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) -
+ DCOORD(aCell[ii].aCoord[iDim*2]));
+ aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]);
+ }
+ }
+
+ SortByDistance(aOrder, nCell, aDistance, aSpare);
+ nodeZero(pRtree, pNode);
+
+ for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){
+ RtreeCell *p = &aCell[aOrder[ii]];
+ nodeInsertCell(pRtree, pNode, p);
+ if( p->iRowid==pCell->iRowid ){
+ if( iHeight==0 ){
+ rc = rowidWrite(pRtree, p->iRowid, pNode->iNode);
+ }else{
+ rc = parentWrite(pRtree, p->iRowid, pNode->iNode);
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = fixBoundingBox(pRtree, pNode);
+ }
+ for(; rc==SQLITE_OK && ii<nCell; ii++){
+ /* Find a node to store this cell in. pNode->iNode currently contains
+ ** the height of the sub-tree headed by the cell.
+ */
+ RtreeNode *pInsert;
+ RtreeCell *p = &aCell[aOrder[ii]];
+ rc = ChooseLeaf(pRtree, p, iHeight, &pInsert);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ rc = rtreeInsertCell(pRtree, pInsert, p, iHeight);
+ rc2 = nodeRelease(pRtree, pInsert);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+
+ sqlite3_free(aCell);
+ return rc;
+}
+
+/*
+** Insert cell pCell into node pNode. Node pNode is the head of a
+** subtree iHeight high (leaf nodes have iHeight==0).
+*/
+static int rtreeInsertCell(
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ RtreeCell *pCell,
+ int iHeight
+){
+ int rc = SQLITE_OK;
+ if( iHeight>0 ){
+ RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid);
+ if( pChild ){
+ nodeRelease(pRtree, pChild->pParent);
+ nodeReference(pNode);
+ pChild->pParent = pNode;
+ }
+ }
+ if( nodeInsertCell(pRtree, pNode, pCell) ){
+ if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){
+ rc = SplitNode(pRtree, pNode, pCell, iHeight);
+ }else{
+ pRtree->iReinsertHeight = iHeight;
+ rc = Reinsert(pRtree, pNode, pCell, iHeight);
+ }
+ }else{
+ rc = AdjustTree(pRtree, pNode, pCell);
+ if( rc==SQLITE_OK ){
+ if( iHeight==0 ){
+ rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode);
+ }else{
+ rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode);
+ }
+ }
+ }
+ return rc;
+}
+
+static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){
+ int ii;
+ int rc = SQLITE_OK;
+ int nCell = NCELL(pNode);
+
+ for(ii=0; rc==SQLITE_OK && ii<nCell; ii++){
+ RtreeNode *pInsert;
+ RtreeCell cell;
+ nodeGetCell(pRtree, pNode, ii, &cell);
+
+ /* Find a node to store this cell in. pNode->iNode currently contains
+ ** the height of the sub-tree headed by the cell.
+ */
+ rc = ChooseLeaf(pRtree, &cell, (int)pNode->iNode, &pInsert);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ rc = rtreeInsertCell(pRtree, pInsert, &cell, (int)pNode->iNode);
+ rc2 = nodeRelease(pRtree, pInsert);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** Select a currently unused rowid for a new r-tree record.
+*/
+static int rtreeNewRowid(Rtree *pRtree, i64 *piRowid){
+ int rc;
+ sqlite3_bind_null(pRtree->pWriteRowid, 1);
+ sqlite3_bind_null(pRtree->pWriteRowid, 2);
+ sqlite3_step(pRtree->pWriteRowid);
+ rc = sqlite3_reset(pRtree->pWriteRowid);
+ *piRowid = sqlite3_last_insert_rowid(pRtree->db);
+ return rc;
+}
+
+/*
+** Remove the entry with rowid=iDelete from the r-tree structure.
+*/
+static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){
+ int rc; /* Return code */
+ RtreeNode *pLeaf = 0; /* Leaf node containing record iDelete */
+ int iCell; /* Index of iDelete cell in pLeaf */
+ RtreeNode *pRoot = 0; /* Root node of rtree structure */
+
+
+ /* Obtain a reference to the root node to initialize Rtree.iDepth */
+ rc = nodeAcquire(pRtree, 1, 0, &pRoot);
+
+ /* Obtain a reference to the leaf node that contains the entry
+ ** about to be deleted.
+ */
+ if( rc==SQLITE_OK ){
+ rc = findLeafNode(pRtree, iDelete, &pLeaf, 0);
+ }
+
+#ifdef CORRUPT_DB
+ assert( pLeaf!=0 || rc!=SQLITE_OK || CORRUPT_DB );
+#endif
+
+ /* Delete the cell in question from the leaf node. */
+ if( rc==SQLITE_OK && pLeaf ){
+ int rc2;
+ rc = nodeRowidIndex(pRtree, pLeaf, iDelete, &iCell);
+ if( rc==SQLITE_OK ){
+ rc = deleteCell(pRtree, pLeaf, iCell, 0);
+ }
+ rc2 = nodeRelease(pRtree, pLeaf);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+
+ /* Delete the corresponding entry in the <rtree>_rowid table. */
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete);
+ sqlite3_step(pRtree->pDeleteRowid);
+ rc = sqlite3_reset(pRtree->pDeleteRowid);
+ }
+
+ /* Check if the root node now has exactly one child. If so, remove
+ ** it, schedule the contents of the child for reinsertion and
+ ** reduce the tree height by one.
+ **
+ ** This is equivalent to copying the contents of the child into
+ ** the root node (the operation that Gutman's paper says to perform
+ ** in this scenario).
+ */
+ if( rc==SQLITE_OK && pRtree->iDepth>0 && NCELL(pRoot)==1 ){
+ int rc2;
+ RtreeNode *pChild = 0;
+ i64 iChild = nodeGetRowid(pRtree, pRoot, 0);
+ rc = nodeAcquire(pRtree, iChild, pRoot, &pChild);
+ if( rc==SQLITE_OK ){
+ rc = removeNode(pRtree, pChild, pRtree->iDepth-1);
+ }
+ rc2 = nodeRelease(pRtree, pChild);
+ if( rc==SQLITE_OK ) rc = rc2;
+ if( rc==SQLITE_OK ){
+ pRtree->iDepth--;
+ writeInt16(pRoot->zData, pRtree->iDepth);
+ pRoot->isDirty = 1;
+ }
+ }
+
+ /* Re-insert the contents of any underfull nodes removed from the tree. */
+ for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){
+ if( rc==SQLITE_OK ){
+ rc = reinsertNodeContent(pRtree, pLeaf);
+ }
+ pRtree->pDeleted = pLeaf->pNext;
+ pRtree->nNodeRef--;
+ sqlite3_free(pLeaf);
+ }
+
+ /* Release the reference to the root node. */
+ if( rc==SQLITE_OK ){
+ rc = nodeRelease(pRtree, pRoot);
+ }else{
+ nodeRelease(pRtree, pRoot);
+ }
+
+ return rc;
+}
+
+/*
+** Rounding constants for float->double conversion.
+*/
+#define RNDTOWARDS (1.0 - 1.0/8388608.0) /* Round towards zero */
+#define RNDAWAY (1.0 + 1.0/8388608.0) /* Round away from zero */
+
+#if !defined(SQLITE_RTREE_INT_ONLY)
+/*
+** Convert an sqlite3_value into an RtreeValue (presumably a float)
+** while taking care to round toward negative or positive, respectively.
+*/
+static RtreeValue rtreeValueDown(sqlite3_value *v){
+ double d = sqlite3_value_double(v);
+ float f = (float)d;
+ if( f>d ){
+ f = (float)(d*(d<0 ? RNDAWAY : RNDTOWARDS));
+ }
+ return f;
+}
+static RtreeValue rtreeValueUp(sqlite3_value *v){
+ double d = sqlite3_value_double(v);
+ float f = (float)d;
+ if( f<d ){
+ f = (float)(d*(d<0 ? RNDTOWARDS : RNDAWAY));
+ }
+ return f;
+}
+#endif /* !defined(SQLITE_RTREE_INT_ONLY) */
+
+/*
+** A constraint has failed while inserting a row into an rtree table.
+** Assuming no OOM error occurs, this function sets the error message
+** (at pRtree->base.zErrMsg) to an appropriate value and returns
+** SQLITE_CONSTRAINT.
+**
+** Parameter iCol is the index of the leftmost column involved in the
+** constraint failure. If it is 0, then the constraint that failed is
+** the unique constraint on the id column. Otherwise, it is the rtree
+** (c1<=c2) constraint on columns iCol and iCol+1 that has failed.
+**
+** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT.
+*/
+static int rtreeConstraintError(Rtree *pRtree, int iCol){
+ sqlite3_stmt *pStmt = 0;
+ char *zSql;
+ int rc;
+
+ assert( iCol==0 || iCol%2 );
+ zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", pRtree->zDb, pRtree->zName);
+ if( zSql ){
+ rc = sqlite3_prepare_v2(pRtree->db, zSql, -1, &pStmt, 0);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ sqlite3_free(zSql);
+
+ if( rc==SQLITE_OK ){
+ if( iCol==0 ){
+ const char *zCol = sqlite3_column_name(pStmt, 0);
+ pRtree->base.zErrMsg = sqlite3_mprintf(
+ "UNIQUE constraint failed: %s.%s", pRtree->zName, zCol
+ );
+ }else{
+ const char *zCol1 = sqlite3_column_name(pStmt, iCol);
+ const char *zCol2 = sqlite3_column_name(pStmt, iCol+1);
+ pRtree->base.zErrMsg = sqlite3_mprintf(
+ "rtree constraint failed: %s.(%s<=%s)", pRtree->zName, zCol1, zCol2
+ );
+ }
+ }
+
+ sqlite3_finalize(pStmt);
+ return (rc==SQLITE_OK ? SQLITE_CONSTRAINT : rc);
+}
+
+
+
+/*
+** The xUpdate method for rtree module virtual tables.
+*/
+static int rtreeUpdate(
+ sqlite3_vtab *pVtab,
+ int nData,
+ sqlite3_value **aData,
+ sqlite_int64 *pRowid
+){
+ Rtree *pRtree = (Rtree *)pVtab;
+ int rc = SQLITE_OK;
+ RtreeCell cell; /* New cell to insert if nData>1 */
+ int bHaveRowid = 0; /* Set to 1 after new rowid is determined */
+
+ if( pRtree->nNodeRef ){
+ /* Unable to write to the btree while another cursor is reading from it,
+ ** since the write might do a rebalance which would disrupt the read
+ ** cursor. */
+ return SQLITE_LOCKED_VTAB;
+ }
+ rtreeReference(pRtree);
+ assert(nData>=1);
+
+ cell.iRowid = 0; /* Used only to suppress a compiler warning */
+
+ /* Constraint handling. A write operation on an r-tree table may return
+ ** SQLITE_CONSTRAINT for two reasons:
+ **
+ ** 1. A duplicate rowid value, or
+ ** 2. The supplied data violates the "x2>=x1" constraint.
+ **
+ ** In the first case, if the conflict-handling mode is REPLACE, then
+ ** the conflicting row can be removed before proceeding. In the second
+ ** case, SQLITE_CONSTRAINT must be returned regardless of the
+ ** conflict-handling mode specified by the user.
+ */
+ if( nData>1 ){
+ int ii;
+ int nn = nData - 4;
+
+ if( nn > pRtree->nDim2 ) nn = pRtree->nDim2;
+ /* Populate the cell.aCoord[] array. The first coordinate is aData[3].
+ **
+ ** NB: nData can only be less than nDim*2+3 if the rtree is mis-declared
+ ** with "column" that are interpreted as table constraints.
+ ** Example: CREATE VIRTUAL TABLE bad USING rtree(x,y,CHECK(y>5));
+ ** This problem was discovered after years of use, so we silently ignore
+ ** these kinds of misdeclared tables to avoid breaking any legacy.
+ */
+
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
+ for(ii=0; ii<nn; ii+=2){
+ cell.aCoord[ii].f = rtreeValueDown(aData[ii+3]);
+ cell.aCoord[ii+1].f = rtreeValueUp(aData[ii+4]);
+ if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){
+ rc = rtreeConstraintError(pRtree, ii+1);
+ goto constraint;
+ }
+ }
+ }else
+#endif
+ {
+ for(ii=0; ii<nn; ii+=2){
+ cell.aCoord[ii].i = sqlite3_value_int(aData[ii+3]);
+ cell.aCoord[ii+1].i = sqlite3_value_int(aData[ii+4]);
+ if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){
+ rc = rtreeConstraintError(pRtree, ii+1);
+ goto constraint;
+ }
+ }
+ }
+
+ /* If a rowid value was supplied, check if it is already present in
+ ** the table. If so, the constraint has failed. */
+ if( sqlite3_value_type(aData[2])!=SQLITE_NULL ){
+ cell.iRowid = sqlite3_value_int64(aData[2]);
+ if( sqlite3_value_type(aData[0])==SQLITE_NULL
+ || sqlite3_value_int64(aData[0])!=cell.iRowid
+ ){
+ int steprc;
+ sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid);
+ steprc = sqlite3_step(pRtree->pReadRowid);
+ rc = sqlite3_reset(pRtree->pReadRowid);
+ if( SQLITE_ROW==steprc ){
+ if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){
+ rc = rtreeDeleteRowid(pRtree, cell.iRowid);
+ }else{
+ rc = rtreeConstraintError(pRtree, 0);
+ goto constraint;
+ }
+ }
+ }
+ bHaveRowid = 1;
+ }
+ }
+
+ /* If aData[0] is not an SQL NULL value, it is the rowid of a
+ ** record to delete from the r-tree table. The following block does
+ ** just that.
+ */
+ if( sqlite3_value_type(aData[0])!=SQLITE_NULL ){
+ rc = rtreeDeleteRowid(pRtree, sqlite3_value_int64(aData[0]));
+ }
+
+ /* If the aData[] array contains more than one element, elements
+ ** (aData[2]..aData[argc-1]) contain a new record to insert into
+ ** the r-tree structure.
+ */
+ if( rc==SQLITE_OK && nData>1 ){
+ /* Insert the new record into the r-tree */
+ RtreeNode *pLeaf = 0;
+
+ /* Figure out the rowid of the new row. */
+ if( bHaveRowid==0 ){
+ rc = rtreeNewRowid(pRtree, &cell.iRowid);
+ }
+ *pRowid = cell.iRowid;
+
+ if( rc==SQLITE_OK ){
+ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf);
+ }
+ if( rc==SQLITE_OK ){
+ int rc2;
+ pRtree->iReinsertHeight = -1;
+ rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
+ rc2 = nodeRelease(pRtree, pLeaf);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ if( rc==SQLITE_OK && pRtree->nAux ){
+ sqlite3_stmt *pUp = pRtree->pWriteAux;
+ int jj;
+ sqlite3_bind_int64(pUp, 1, *pRowid);
+ for(jj=0; jj<pRtree->nAux; jj++){
+ sqlite3_bind_value(pUp, jj+2, aData[pRtree->nDim2+3+jj]);
+ }
+ sqlite3_step(pUp);
+ rc = sqlite3_reset(pUp);
+ }
+ }
+
+constraint:
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+/*
+** Called when a transaction starts.
+*/
+static int rtreeBeginTransaction(sqlite3_vtab *pVtab){
+ Rtree *pRtree = (Rtree *)pVtab;
+ assert( pRtree->inWrTrans==0 );
+ pRtree->inWrTrans++;
+ return SQLITE_OK;
+}
+
+/*
+** Called when a transaction completes (either by COMMIT or ROLLBACK).
+** The sqlite3_blob object should be released at this point.
+*/
+static int rtreeEndTransaction(sqlite3_vtab *pVtab){
+ Rtree *pRtree = (Rtree *)pVtab;
+ pRtree->inWrTrans = 0;
+ nodeBlobReset(pRtree);
+ return SQLITE_OK;
+}
+
+/*
+** The xRename method for rtree module virtual tables.
+*/
+static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){
+ Rtree *pRtree = (Rtree *)pVtab;
+ int rc = SQLITE_NOMEM;
+ char *zSql = sqlite3_mprintf(
+ "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";"
+ "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";"
+ "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";"
+ , pRtree->zDb, pRtree->zName, zNewName
+ , pRtree->zDb, pRtree->zName, zNewName
+ , pRtree->zDb, pRtree->zName, zNewName
+ );
+ if( zSql ){
+ nodeBlobReset(pRtree);
+ rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0);
+ sqlite3_free(zSql);
+ }
+ return rc;
+}
+
+/*
+** The xSavepoint method.
+**
+** This module does not need to do anything to support savepoints. However,
+** it uses this hook to close any open blob handle. This is done because a
+** DROP TABLE command - which fortunately always opens a savepoint - cannot
+** succeed if there are any open blob handles. i.e. if the blob handle were
+** not closed here, the following would fail:
+**
+** BEGIN;
+** INSERT INTO rtree...
+** DROP TABLE <tablename>; -- Would fail with SQLITE_LOCKED
+** COMMIT;
+*/
+static int rtreeSavepoint(sqlite3_vtab *pVtab, int iSavepoint){
+ Rtree *pRtree = (Rtree *)pVtab;
+ u8 iwt = pRtree->inWrTrans;
+ UNUSED_PARAMETER(iSavepoint);
+ pRtree->inWrTrans = 0;
+ nodeBlobReset(pRtree);
+ pRtree->inWrTrans = iwt;
+ return SQLITE_OK;
+}
+
+/*
+** This function populates the pRtree->nRowEst variable with an estimate
+** of the number of rows in the virtual table. If possible, this is based
+** on sqlite_stat1 data. Otherwise, use RTREE_DEFAULT_ROWEST.
+*/
+static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){
+ const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'";
+ char *zSql;
+ sqlite3_stmt *p;
+ int rc;
+ i64 nRow = 0;
+
+ rc = sqlite3_table_column_metadata(
+ db, pRtree->zDb, "sqlite_stat1",0,0,0,0,0,0
+ );
+ if( rc!=SQLITE_OK ){
+ pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
+ return rc==SQLITE_ERROR ? SQLITE_OK : rc;
+ }
+ zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0);
+ if( rc==SQLITE_OK ){
+ if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0);
+ rc = sqlite3_finalize(p);
+ }else if( rc!=SQLITE_NOMEM ){
+ rc = SQLITE_OK;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( nRow==0 ){
+ pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
+ }else{
+ pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST);
+ }
+ }
+ sqlite3_free(zSql);
+ }
+
+ return rc;
+}
+
+
+/*
+** Return true if zName is the extension on one of the shadow tables used
+** by this module.
+*/
+static int rtreeShadowName(const char *zName){
+ static const char *azName[] = {
+ "node", "parent", "rowid"
+ };
+ unsigned int i;
+ for(i=0; i<sizeof(azName)/sizeof(azName[0]); i++){
+ if( sqlite3_stricmp(zName, azName[i])==0 ) return 1;
+ }
+ return 0;
+}
+
+static sqlite3_module rtreeModule = {
+ 3, /* iVersion */
+ rtreeCreate, /* xCreate - create a table */
+ rtreeConnect, /* xConnect - connect to an existing table */
+ rtreeBestIndex, /* xBestIndex - Determine search strategy */
+ rtreeDisconnect, /* xDisconnect - Disconnect from a table */
+ rtreeDestroy, /* xDestroy - Drop a table */
+ rtreeOpen, /* xOpen - open a cursor */
+ rtreeClose, /* xClose - close a cursor */
+ rtreeFilter, /* xFilter - configure scan constraints */
+ rtreeNext, /* xNext - advance a cursor */
+ rtreeEof, /* xEof */
+ rtreeColumn, /* xColumn - read data */
+ rtreeRowid, /* xRowid - read data */
+ rtreeUpdate, /* xUpdate - write data */
+ rtreeBeginTransaction, /* xBegin - begin transaction */
+ rtreeEndTransaction, /* xSync - sync transaction */
+ rtreeEndTransaction, /* xCommit - commit transaction */
+ rtreeEndTransaction, /* xRollback - rollback transaction */
+ 0, /* xFindFunction - function overloading */
+ rtreeRename, /* xRename - rename the table */
+ rtreeSavepoint, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ rtreeShadowName /* xShadowName */
+};
+
+static int rtreeSqlInit(
+ Rtree *pRtree,
+ sqlite3 *db,
+ const char *zDb,
+ const char *zPrefix,
+ int isCreate
+){
+ int rc = SQLITE_OK;
+
+ #define N_STATEMENT 8
+ static const char *azSql[N_STATEMENT] = {
+ /* Write the xxx_node table */
+ "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(?1, ?2)",
+ "DELETE FROM '%q'.'%q_node' WHERE nodeno = ?1",
+
+ /* Read and write the xxx_rowid table */
+ "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = ?1",
+ "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(?1, ?2)",
+ "DELETE FROM '%q'.'%q_rowid' WHERE rowid = ?1",
+
+ /* Read and write the xxx_parent table */
+ "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = ?1",
+ "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(?1, ?2)",
+ "DELETE FROM '%q'.'%q_parent' WHERE nodeno = ?1"
+ };
+ sqlite3_stmt **appStmt[N_STATEMENT];
+ int i;
+ const int f = SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB;
+
+ pRtree->db = db;
+
+ if( isCreate ){
+ char *zCreate;
+ sqlite3_str *p = sqlite3_str_new(db);
+ int ii;
+ sqlite3_str_appendf(p,
+ "CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY,nodeno",
+ zDb, zPrefix);
+ for(ii=0; ii<pRtree->nAux; ii++){
+ sqlite3_str_appendf(p,",a%d",ii);
+ }
+ sqlite3_str_appendf(p,
+ ");CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY,data);",
+ zDb, zPrefix);
+ sqlite3_str_appendf(p,
+ "CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,parentnode);",
+ zDb, zPrefix);
+ sqlite3_str_appendf(p,
+ "INSERT INTO \"%w\".\"%w_node\"VALUES(1,zeroblob(%d))",
+ zDb, zPrefix, pRtree->iNodeSize);
+ zCreate = sqlite3_str_finish(p);
+ if( !zCreate ){
+ return SQLITE_NOMEM;
+ }
+ rc = sqlite3_exec(db, zCreate, 0, 0, 0);
+ sqlite3_free(zCreate);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ }
+
+ appStmt[0] = &pRtree->pWriteNode;
+ appStmt[1] = &pRtree->pDeleteNode;
+ appStmt[2] = &pRtree->pReadRowid;
+ appStmt[3] = &pRtree->pWriteRowid;
+ appStmt[4] = &pRtree->pDeleteRowid;
+ appStmt[5] = &pRtree->pReadParent;
+ appStmt[6] = &pRtree->pWriteParent;
+ appStmt[7] = &pRtree->pDeleteParent;
+
+ rc = rtreeQueryStat1(db, pRtree);
+ for(i=0; i<N_STATEMENT && rc==SQLITE_OK; i++){
+ char *zSql;
+ const char *zFormat;
+ if( i!=3 || pRtree->nAux==0 ){
+ zFormat = azSql[i];
+ }else {
+ /* An UPSERT is very slightly slower than REPLACE, but it is needed
+ ** if there are auxiliary columns */
+ zFormat = "INSERT INTO\"%w\".\"%w_rowid\"(rowid,nodeno)VALUES(?1,?2)"
+ "ON CONFLICT(rowid)DO UPDATE SET nodeno=excluded.nodeno";
+ }
+ zSql = sqlite3_mprintf(zFormat, zDb, zPrefix);
+ if( zSql ){
+ rc = sqlite3_prepare_v3(db, zSql, -1, f, appStmt[i], 0);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ sqlite3_free(zSql);
+ }
+ if( pRtree->nAux ){
+ pRtree->zReadAuxSql = sqlite3_mprintf(
+ "SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1",
+ zDb, zPrefix);
+ if( pRtree->zReadAuxSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_str *p = sqlite3_str_new(db);
+ int ii;
+ char *zSql;
+ sqlite3_str_appendf(p, "UPDATE \"%w\".\"%w_rowid\"SET ", zDb, zPrefix);
+ for(ii=0; ii<pRtree->nAux; ii++){
+ if( ii ) sqlite3_str_append(p, ",", 1);
+ if( ii<pRtree->nAuxNotNull ){
+ sqlite3_str_appendf(p,"a%d=coalesce(?%d,a%d)",ii,ii+2,ii);
+ }else{
+ sqlite3_str_appendf(p,"a%d=?%d",ii,ii+2);
+ }
+ }
+ sqlite3_str_appendf(p, " WHERE rowid=?1");
+ zSql = sqlite3_str_finish(p);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v3(db, zSql, -1, f, &pRtree->pWriteAux, 0);
+ sqlite3_free(zSql);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** The second argument to this function contains the text of an SQL statement
+** that returns a single integer value. The statement is compiled and executed
+** using database connection db. If successful, the integer value returned
+** is written to *piVal and SQLITE_OK returned. Otherwise, an SQLite error
+** code is returned and the value of *piVal after returning is not defined.
+*/
+static int getIntFromStmt(sqlite3 *db, const char *zSql, int *piVal){
+ int rc = SQLITE_NOMEM;
+ if( zSql ){
+ sqlite3_stmt *pStmt = 0;
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ *piVal = sqlite3_column_int(pStmt, 0);
+ }
+ rc = sqlite3_finalize(pStmt);
+ }
+ }
+ return rc;
+}
+
+/*
+** This function is called from within the xConnect() or xCreate() method to
+** determine the node-size used by the rtree table being created or connected
+** to. If successful, pRtree->iNodeSize is populated and SQLITE_OK returned.
+** Otherwise, an SQLite error code is returned.
+**
+** If this function is being called as part of an xConnect(), then the rtree
+** table already exists. In this case the node-size is determined by inspecting
+** the root node of the tree.
+**
+** Otherwise, for an xCreate(), use 64 bytes less than the database page-size.
+** This ensures that each node is stored on a single database page. If the
+** database page-size is so large that more than RTREE_MAXCELLS entries
+** would fit in a single node, use a smaller node-size.
+*/
+static int getNodeSize(
+ sqlite3 *db, /* Database handle */
+ Rtree *pRtree, /* Rtree handle */
+ int isCreate, /* True for xCreate, false for xConnect */
+ char **pzErr /* OUT: Error message, if any */
+){
+ int rc;
+ char *zSql;
+ if( isCreate ){
+ int iPageSize = 0;
+ zSql = sqlite3_mprintf("PRAGMA %Q.page_size", pRtree->zDb);
+ rc = getIntFromStmt(db, zSql, &iPageSize);
+ if( rc==SQLITE_OK ){
+ pRtree->iNodeSize = iPageSize-64;
+ if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)<pRtree->iNodeSize ){
+ pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS;
+ }
+ }else{
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+ }else{
+ zSql = sqlite3_mprintf(
+ "SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1",
+ pRtree->zDb, pRtree->zName
+ );
+ rc = getIntFromStmt(db, zSql, &pRtree->iNodeSize);
+ if( rc!=SQLITE_OK ){
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }else if( pRtree->iNodeSize<(512-64) ){
+ rc = SQLITE_CORRUPT_VTAB;
+ RTREE_IS_CORRUPT(pRtree);
+ *pzErr = sqlite3_mprintf("undersize RTree blobs in \"%q_node\"",
+ pRtree->zName);
+ }
+ }
+
+ sqlite3_free(zSql);
+ return rc;
+}
+
+/*
+** Return the length of a token
+*/
+static int rtreeTokenLength(const char *z){
+ int dummy = 0;
+ return sqlite3GetToken((const unsigned char*)z,&dummy);
+}
+
+/*
+** This function is the implementation of both the xConnect and xCreate
+** methods of the r-tree virtual table.
+**
+** argv[0] -> module name
+** argv[1] -> database name
+** argv[2] -> table name
+** argv[...] -> column names...
+*/
+static int rtreeInit(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* One of the RTREE_COORD_* constants */
+ int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */
+ sqlite3_vtab **ppVtab, /* OUT: New virtual table */
+ char **pzErr, /* OUT: Error message, if any */
+ int isCreate /* True for xCreate, false for xConnect */
+){
+ int rc = SQLITE_OK;
+ Rtree *pRtree;
+ int nDb; /* Length of string argv[1] */
+ int nName; /* Length of string argv[2] */
+ int eCoordType = (pAux ? RTREE_COORD_INT32 : RTREE_COORD_REAL32);
+ sqlite3_str *pSql;
+ char *zSql;
+ int ii = 4;
+ int iErr;
+
+ const char *aErrMsg[] = {
+ 0, /* 0 */
+ "Wrong number of columns for an rtree table", /* 1 */
+ "Too few columns for an rtree table", /* 2 */
+ "Too many columns for an rtree table", /* 3 */
+ "Auxiliary rtree columns must be last" /* 4 */
+ };
+
+ assert( RTREE_MAX_AUX_COLUMN<256 ); /* Aux columns counted by a u8 */
+ if( argc<6 || argc>RTREE_MAX_AUX_COLUMN+3 ){
+ *pzErr = sqlite3_mprintf("%s", aErrMsg[2 + (argc>=6)]);
+ return SQLITE_ERROR;
+ }
+
+ sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+
+ /* Allocate the sqlite3_vtab structure */
+ nDb = (int)strlen(argv[1]);
+ nName = (int)strlen(argv[2]);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ if( !pRtree ){
+ return SQLITE_NOMEM;
+ }
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ pRtree->nBusy = 1;
+ pRtree->base.pModule = &rtreeModule;
+ pRtree->zDb = (char *)&pRtree[1];
+ pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->eCoordType = (u8)eCoordType;
+ memcpy(pRtree->zDb, argv[1], nDb);
+ memcpy(pRtree->zName, argv[2], nName);
+
+
+ /* Create/Connect to the underlying relational database schema. If
+ ** that is successful, call sqlite3_declare_vtab() to configure
+ ** the r-tree table schema.
+ */
+ pSql = sqlite3_str_new(db);
+ sqlite3_str_appendf(pSql, "CREATE TABLE x(%.*s INT",
+ rtreeTokenLength(argv[3]), argv[3]);
+ for(ii=4; ii<argc; ii++){
+ const char *zArg = argv[ii];
+ if( zArg[0]=='+' ){
+ pRtree->nAux++;
+ sqlite3_str_appendf(pSql, ",%.*s", rtreeTokenLength(zArg+1), zArg+1);
+ }else if( pRtree->nAux>0 ){
+ break;
+ }else{
+ static const char *azFormat[] = {",%.*s REAL", ",%.*s INT"};
+ pRtree->nDim2++;
+ sqlite3_str_appendf(pSql, azFormat[eCoordType],
+ rtreeTokenLength(zArg), zArg);
+ }
+ }
+ sqlite3_str_appendf(pSql, ");");
+ zSql = sqlite3_str_finish(pSql);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else if( ii<argc ){
+ *pzErr = sqlite3_mprintf("%s", aErrMsg[4]);
+ rc = SQLITE_ERROR;
+ }else if( SQLITE_OK!=(rc = sqlite3_declare_vtab(db, zSql)) ){
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+ sqlite3_free(zSql);
+ if( rc ) goto rtreeInit_fail;
+ pRtree->nDim = pRtree->nDim2/2;
+ if( pRtree->nDim<1 ){
+ iErr = 2;
+ }else if( pRtree->nDim2>RTREE_MAX_DIMENSIONS*2 ){
+ iErr = 3;
+ }else if( pRtree->nDim2 % 2 ){
+ iErr = 1;
+ }else{
+ iErr = 0;
+ }
+ if( iErr ){
+ *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]);
+ goto rtreeInit_fail;
+ }
+ pRtree->nBytesPerCell = 8 + pRtree->nDim2*4;
+
+ /* Figure out the node size to use. */
+ rc = getNodeSize(db, pRtree, isCreate, pzErr);
+ if( rc ) goto rtreeInit_fail;
+ rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate);
+ if( rc ){
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ goto rtreeInit_fail;
+ }
+
+ *ppVtab = (sqlite3_vtab *)pRtree;
+ return SQLITE_OK;
+
+rtreeInit_fail:
+ if( rc==SQLITE_OK ) rc = SQLITE_ERROR;
+ assert( *ppVtab==0 );
+ assert( pRtree->nBusy==1 );
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+
+/*
+** Implementation of a scalar function that decodes r-tree nodes to
+** human readable strings. This can be used for debugging and analysis.
+**
+** The scalar function takes two arguments: (1) the number of dimensions
+** to the rtree (between 1 and 5, inclusive) and (2) a blob of data containing
+** an r-tree node. For a two-dimensional r-tree structure called "rt", to
+** deserialize all nodes, a statement like:
+**
+** SELECT rtreenode(2, data) FROM rt_node;
+**
+** The human readable string takes the form of a Tcl list with one
+** entry for each cell in the r-tree node. Each entry is itself a
+** list, containing the 8-byte rowid/pageno followed by the
+** <num-dimension>*2 coordinates.
+*/
+static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
+ RtreeNode node;
+ Rtree tree;
+ int ii;
+ int nData;
+ int errCode;
+ sqlite3_str *pOut;
+
+ UNUSED_PARAMETER(nArg);
+ memset(&node, 0, sizeof(RtreeNode));
+ memset(&tree, 0, sizeof(Rtree));
+ tree.nDim = (u8)sqlite3_value_int(apArg[0]);
+ if( tree.nDim<1 || tree.nDim>5 ) return;
+ tree.nDim2 = tree.nDim*2;
+ tree.nBytesPerCell = 8 + 8 * tree.nDim;
+ node.zData = (u8 *)sqlite3_value_blob(apArg[1]);
+ nData = sqlite3_value_bytes(apArg[1]);
+ if( nData<4 ) return;
+ if( nData<NCELL(&node)*tree.nBytesPerCell ) return;
+
+ pOut = sqlite3_str_new(0);
+ for(ii=0; ii<NCELL(&node); ii++){
+ RtreeCell cell;
+ int jj;
+
+ nodeGetCell(&tree, &node, ii, &cell);
+ if( ii>0 ) sqlite3_str_append(pOut, " ", 1);
+ sqlite3_str_appendf(pOut, "{%lld", cell.iRowid);
+ for(jj=0; jj<tree.nDim2; jj++){
+#ifndef SQLITE_RTREE_INT_ONLY
+ sqlite3_str_appendf(pOut, " %g", (double)cell.aCoord[jj].f);
+#else
+ sqlite3_str_appendf(pOut, " %d", cell.aCoord[jj].i);
+#endif
+ }
+ sqlite3_str_append(pOut, "}", 1);
+ }
+ errCode = sqlite3_str_errcode(pOut);
+ sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free);
+ sqlite3_result_error_code(ctx, errCode);
+}
+
+/* This routine implements an SQL function that returns the "depth" parameter
+** from the front of a blob that is an r-tree node. For example:
+**
+** SELECT rtreedepth(data) FROM rt_node WHERE nodeno=1;
+**
+** The depth value is 0 for all nodes other than the root node, and the root
+** node always has nodeno=1, so the example above is the primary use for this
+** routine. This routine is intended for testing and analysis only.
+*/
+static void rtreedepth(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
+ UNUSED_PARAMETER(nArg);
+ if( sqlite3_value_type(apArg[0])!=SQLITE_BLOB
+ || sqlite3_value_bytes(apArg[0])<2
+ ){
+ sqlite3_result_error(ctx, "Invalid argument to rtreedepth()", -1);
+ }else{
+ u8 *zBlob = (u8 *)sqlite3_value_blob(apArg[0]);
+ sqlite3_result_int(ctx, readInt16(zBlob));
+ }
+}
+
+/*
+** Context object passed between the various routines that make up the
+** implementation of integrity-check function rtreecheck().
+*/
+typedef struct RtreeCheck RtreeCheck;
+struct RtreeCheck {
+ sqlite3 *db; /* Database handle */
+ const char *zDb; /* Database containing rtree table */
+ const char *zTab; /* Name of rtree table */
+ int bInt; /* True for rtree_i32 table */
+ int nDim; /* Number of dimensions for this rtree tbl */
+ sqlite3_stmt *pGetNode; /* Statement used to retrieve nodes */
+ sqlite3_stmt *aCheckMapping[2]; /* Statements to query %_parent/%_rowid */
+ int nLeaf; /* Number of leaf cells in table */
+ int nNonLeaf; /* Number of non-leaf cells in table */
+ int rc; /* Return code */
+ char *zReport; /* Message to report */
+ int nErr; /* Number of lines in zReport */
+};
+
+#define RTREE_CHECK_MAX_ERROR 100
+
+/*
+** Reset SQL statement pStmt. If the sqlite3_reset() call returns an error,
+** and RtreeCheck.rc==SQLITE_OK, set RtreeCheck.rc to the error code.
+*/
+static void rtreeCheckReset(RtreeCheck *pCheck, sqlite3_stmt *pStmt){
+ int rc = sqlite3_reset(pStmt);
+ if( pCheck->rc==SQLITE_OK ) pCheck->rc = rc;
+}
+
+/*
+** The second and subsequent arguments to this function are a format string
+** and printf style arguments. This function formats the string and attempts
+** to compile it as an SQL statement.
+**
+** If successful, a pointer to the new SQL statement is returned. Otherwise,
+** NULL is returned and an error code left in RtreeCheck.rc.
+*/
+static sqlite3_stmt *rtreeCheckPrepare(
+ RtreeCheck *pCheck, /* RtreeCheck object */
+ const char *zFmt, ... /* Format string and trailing args */
+){
+ va_list ap;
+ char *z;
+ sqlite3_stmt *pRet = 0;
+
+ va_start(ap, zFmt);
+ z = sqlite3_vmprintf(zFmt, ap);
+
+ if( pCheck->rc==SQLITE_OK ){
+ if( z==0 ){
+ pCheck->rc = SQLITE_NOMEM;
+ }else{
+ pCheck->rc = sqlite3_prepare_v2(pCheck->db, z, -1, &pRet, 0);
+ }
+ }
+
+ sqlite3_free(z);
+ va_end(ap);
+ return pRet;
+}
+
+/*
+** The second and subsequent arguments to this function are a printf()
+** style format string and arguments. This function formats the string and
+** appends it to the report being accumuated in pCheck.
+*/
+static void rtreeCheckAppendMsg(RtreeCheck *pCheck, const char *zFmt, ...){
+ va_list ap;
+ va_start(ap, zFmt);
+ if( pCheck->rc==SQLITE_OK && pCheck->nErr<RTREE_CHECK_MAX_ERROR ){
+ char *z = sqlite3_vmprintf(zFmt, ap);
+ if( z==0 ){
+ pCheck->rc = SQLITE_NOMEM;
+ }else{
+ pCheck->zReport = sqlite3_mprintf("%z%s%z",
+ pCheck->zReport, (pCheck->zReport ? "\n" : ""), z
+ );
+ if( pCheck->zReport==0 ){
+ pCheck->rc = SQLITE_NOMEM;
+ }
+ }
+ pCheck->nErr++;
+ }
+ va_end(ap);
+}
+
+/*
+** This function is a no-op if there is already an error code stored
+** in the RtreeCheck object indicated by the first argument. NULL is
+** returned in this case.
+**
+** Otherwise, the contents of rtree table node iNode are loaded from
+** the database and copied into a buffer obtained from sqlite3_malloc().
+** If no error occurs, a pointer to the buffer is returned and (*pnNode)
+** is set to the size of the buffer in bytes.
+**
+** Or, if an error does occur, NULL is returned and an error code left
+** in the RtreeCheck object. The final value of *pnNode is undefined in
+** this case.
+*/
+static u8 *rtreeCheckGetNode(RtreeCheck *pCheck, i64 iNode, int *pnNode){
+ u8 *pRet = 0; /* Return value */
+
+ if( pCheck->rc==SQLITE_OK && pCheck->pGetNode==0 ){
+ pCheck->pGetNode = rtreeCheckPrepare(pCheck,
+ "SELECT data FROM %Q.'%q_node' WHERE nodeno=?",
+ pCheck->zDb, pCheck->zTab
+ );
+ }
+
+ if( pCheck->rc==SQLITE_OK ){
+ sqlite3_bind_int64(pCheck->pGetNode, 1, iNode);
+ if( sqlite3_step(pCheck->pGetNode)==SQLITE_ROW ){
+ int nNode = sqlite3_column_bytes(pCheck->pGetNode, 0);
+ const u8 *pNode = (const u8*)sqlite3_column_blob(pCheck->pGetNode, 0);
+ pRet = sqlite3_malloc64(nNode);
+ if( pRet==0 ){
+ pCheck->rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pRet, pNode, nNode);
+ *pnNode = nNode;
+ }
+ }
+ rtreeCheckReset(pCheck, pCheck->pGetNode);
+ if( pCheck->rc==SQLITE_OK && pRet==0 ){
+ rtreeCheckAppendMsg(pCheck, "Node %lld missing from database", iNode);
+ }
+ }
+
+ return pRet;
+}
+
+/*
+** This function is used to check that the %_parent (if bLeaf==0) or %_rowid
+** (if bLeaf==1) table contains a specified entry. The schemas of the
+** two tables are:
+**
+** CREATE TABLE %_parent(nodeno INTEGER PRIMARY KEY, parentnode INTEGER)
+** CREATE TABLE %_rowid(rowid INTEGER PRIMARY KEY, nodeno INTEGER, ...)
+**
+** In both cases, this function checks that there exists an entry with
+** IPK value iKey and the second column set to iVal.
+**
+*/
+static void rtreeCheckMapping(
+ RtreeCheck *pCheck, /* RtreeCheck object */
+ int bLeaf, /* True for a leaf cell, false for interior */
+ i64 iKey, /* Key for mapping */
+ i64 iVal /* Expected value for mapping */
+){
+ int rc;
+ sqlite3_stmt *pStmt;
+ const char *azSql[2] = {
+ "SELECT parentnode FROM %Q.'%q_parent' WHERE nodeno=?1",
+ "SELECT nodeno FROM %Q.'%q_rowid' WHERE rowid=?1"
+ };
+
+ assert( bLeaf==0 || bLeaf==1 );
+ if( pCheck->aCheckMapping[bLeaf]==0 ){
+ pCheck->aCheckMapping[bLeaf] = rtreeCheckPrepare(pCheck,
+ azSql[bLeaf], pCheck->zDb, pCheck->zTab
+ );
+ }
+ if( pCheck->rc!=SQLITE_OK ) return;
+
+ pStmt = pCheck->aCheckMapping[bLeaf];
+ sqlite3_bind_int64(pStmt, 1, iKey);
+ rc = sqlite3_step(pStmt);
+ if( rc==SQLITE_DONE ){
+ rtreeCheckAppendMsg(pCheck, "Mapping (%lld -> %lld) missing from %s table",
+ iKey, iVal, (bLeaf ? "%_rowid" : "%_parent")
+ );
+ }else if( rc==SQLITE_ROW ){
+ i64 ii = sqlite3_column_int64(pStmt, 0);
+ if( ii!=iVal ){
+ rtreeCheckAppendMsg(pCheck,
+ "Found (%lld -> %lld) in %s table, expected (%lld -> %lld)",
+ iKey, ii, (bLeaf ? "%_rowid" : "%_parent"), iKey, iVal
+ );
+ }
+ }
+ rtreeCheckReset(pCheck, pStmt);
+}
+
+/*
+** Argument pCell points to an array of coordinates stored on an rtree page.
+** This function checks that the coordinates are internally consistent (no
+** x1>x2 conditions) and adds an error message to the RtreeCheck object
+** if they are not.
+**
+** Additionally, if pParent is not NULL, then it is assumed to point to
+** the array of coordinates on the parent page that bound the page
+** containing pCell. In this case it is also verified that the two
+** sets of coordinates are mutually consistent and an error message added
+** to the RtreeCheck object if they are not.
+*/
+static void rtreeCheckCellCoord(
+ RtreeCheck *pCheck,
+ i64 iNode, /* Node id to use in error messages */
+ int iCell, /* Cell number to use in error messages */
+ u8 *pCell, /* Pointer to cell coordinates */
+ u8 *pParent /* Pointer to parent coordinates */
+){
+ RtreeCoord c1, c2;
+ RtreeCoord p1, p2;
+ int i;
+
+ for(i=0; i<pCheck->nDim; i++){
+ readCoord(&pCell[4*2*i], &c1);
+ readCoord(&pCell[4*(2*i + 1)], &c2);
+
+ /* printf("%e, %e\n", c1.u.f, c2.u.f); */
+ if( pCheck->bInt ? c1.i>c2.i : c1.f>c2.f ){
+ rtreeCheckAppendMsg(pCheck,
+ "Dimension %d of cell %d on node %lld is corrupt", i, iCell, iNode
+ );
+ }
+
+ if( pParent ){
+ readCoord(&pParent[4*2*i], &p1);
+ readCoord(&pParent[4*(2*i + 1)], &p2);
+
+ if( (pCheck->bInt ? c1.i<p1.i : c1.f<p1.f)
+ || (pCheck->bInt ? c2.i>p2.i : c2.f>p2.f)
+ ){
+ rtreeCheckAppendMsg(pCheck,
+ "Dimension %d of cell %d on node %lld is corrupt relative to parent"
+ , i, iCell, iNode
+ );
+ }
+ }
+ }
+}
+
+/*
+** Run rtreecheck() checks on node iNode, which is at depth iDepth within
+** the r-tree structure. Argument aParent points to the array of coordinates
+** that bound node iNode on the parent node.
+**
+** If any problems are discovered, an error message is appended to the
+** report accumulated in the RtreeCheck object.
+*/
+static void rtreeCheckNode(
+ RtreeCheck *pCheck,
+ int iDepth, /* Depth of iNode (0==leaf) */
+ u8 *aParent, /* Buffer containing parent coords */
+ i64 iNode /* Node to check */
+){
+ u8 *aNode = 0;
+ int nNode = 0;
+
+ assert( iNode==1 || aParent!=0 );
+ assert( pCheck->nDim>0 );
+
+ aNode = rtreeCheckGetNode(pCheck, iNode, &nNode);
+ if( aNode ){
+ if( nNode<4 ){
+ rtreeCheckAppendMsg(pCheck,
+ "Node %lld is too small (%d bytes)", iNode, nNode
+ );
+ }else{
+ int nCell; /* Number of cells on page */
+ int i; /* Used to iterate through cells */
+ if( aParent==0 ){
+ iDepth = readInt16(aNode);
+ if( iDepth>RTREE_MAX_DEPTH ){
+ rtreeCheckAppendMsg(pCheck, "Rtree depth out of range (%d)", iDepth);
+ sqlite3_free(aNode);
+ return;
+ }
+ }
+ nCell = readInt16(&aNode[2]);
+ if( (4 + nCell*(8 + pCheck->nDim*2*4))>nNode ){
+ rtreeCheckAppendMsg(pCheck,
+ "Node %lld is too small for cell count of %d (%d bytes)",
+ iNode, nCell, nNode
+ );
+ }else{
+ for(i=0; i<nCell; i++){
+ u8 *pCell = &aNode[4 + i*(8 + pCheck->nDim*2*4)];
+ i64 iVal = readInt64(pCell);
+ rtreeCheckCellCoord(pCheck, iNode, i, &pCell[8], aParent);
+
+ if( iDepth>0 ){
+ rtreeCheckMapping(pCheck, 0, iVal, iNode);
+ rtreeCheckNode(pCheck, iDepth-1, &pCell[8], iVal);
+ pCheck->nNonLeaf++;
+ }else{
+ rtreeCheckMapping(pCheck, 1, iVal, iNode);
+ pCheck->nLeaf++;
+ }
+ }
+ }
+ }
+ sqlite3_free(aNode);
+ }
+}
+
+/*
+** The second argument to this function must be either "_rowid" or
+** "_parent". This function checks that the number of entries in the
+** %_rowid or %_parent table is exactly nExpect. If not, it adds
+** an error message to the report in the RtreeCheck object indicated
+** by the first argument.
+*/
+static void rtreeCheckCount(RtreeCheck *pCheck, const char *zTbl, i64 nExpect){
+ if( pCheck->rc==SQLITE_OK ){
+ sqlite3_stmt *pCount;
+ pCount = rtreeCheckPrepare(pCheck, "SELECT count(*) FROM %Q.'%q%s'",
+ pCheck->zDb, pCheck->zTab, zTbl
+ );
+ if( pCount ){
+ if( sqlite3_step(pCount)==SQLITE_ROW ){
+ i64 nActual = sqlite3_column_int64(pCount, 0);
+ if( nActual!=nExpect ){
+ rtreeCheckAppendMsg(pCheck, "Wrong number of entries in %%%s table"
+ " - expected %lld, actual %lld" , zTbl, nExpect, nActual
+ );
+ }
+ }
+ pCheck->rc = sqlite3_finalize(pCount);
+ }
+ }
+}
+
+/*
+** This function does the bulk of the work for the rtree integrity-check.
+** It is called by rtreecheck(), which is the SQL function implementation.
+*/
+static int rtreeCheckTable(
+ sqlite3 *db, /* Database handle to access db through */
+ const char *zDb, /* Name of db ("main", "temp" etc.) */
+ const char *zTab, /* Name of rtree table to check */
+ char **pzReport /* OUT: sqlite3_malloc'd report text */
+){
+ RtreeCheck check; /* Common context for various routines */
+ sqlite3_stmt *pStmt = 0; /* Used to find column count of rtree table */
+ int bEnd = 0; /* True if transaction should be closed */
+ int nAux = 0; /* Number of extra columns. */
+
+ /* Initialize the context object */
+ memset(&check, 0, sizeof(check));
+ check.db = db;
+ check.zDb = zDb;
+ check.zTab = zTab;
+
+ /* If there is not already an open transaction, open one now. This is
+ ** to ensure that the queries run as part of this integrity-check operate
+ ** on a consistent snapshot. */
+ if( sqlite3_get_autocommit(db) ){
+ check.rc = sqlite3_exec(db, "BEGIN", 0, 0, 0);
+ bEnd = 1;
+ }
+
+ /* Find the number of auxiliary columns */
+ if( check.rc==SQLITE_OK ){
+ pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
+ if( pStmt ){
+ nAux = sqlite3_column_count(pStmt) - 2;
+ sqlite3_finalize(pStmt);
+ }
+ check.rc = SQLITE_OK;
+ }
+
+ /* Find number of dimensions in the rtree table. */
+ pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.%Q", zDb, zTab);
+ if( pStmt ){
+ int rc;
+ check.nDim = (sqlite3_column_count(pStmt) - 1 - nAux) / 2;
+ if( check.nDim<1 ){
+ rtreeCheckAppendMsg(&check, "Schema corrupt or not an rtree");
+ }else if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ check.bInt = (sqlite3_column_type(pStmt, 1)==SQLITE_INTEGER);
+ }
+ rc = sqlite3_finalize(pStmt);
+ if( rc!=SQLITE_CORRUPT ) check.rc = rc;
+ }
+
+ /* Do the actual integrity-check */
+ if( check.nDim>=1 ){
+ if( check.rc==SQLITE_OK ){
+ rtreeCheckNode(&check, 0, 0, 1);
+ }
+ rtreeCheckCount(&check, "_rowid", check.nLeaf);
+ rtreeCheckCount(&check, "_parent", check.nNonLeaf);
+ }
+
+ /* Finalize SQL statements used by the integrity-check */
+ sqlite3_finalize(check.pGetNode);
+ sqlite3_finalize(check.aCheckMapping[0]);
+ sqlite3_finalize(check.aCheckMapping[1]);
+
+ /* If one was opened, close the transaction */
+ if( bEnd ){
+ int rc = sqlite3_exec(db, "END", 0, 0, 0);
+ if( check.rc==SQLITE_OK ) check.rc = rc;
+ }
+ *pzReport = check.zReport;
+ return check.rc;
+}
+
+/*
+** Usage:
+**
+** rtreecheck(<rtree-table>);
+** rtreecheck(<database>, <rtree-table>);
+**
+** Invoking this SQL function runs an integrity-check on the named rtree
+** table. The integrity-check verifies the following:
+**
+** 1. For each cell in the r-tree structure (%_node table), that:
+**
+** a) for each dimension, (coord1 <= coord2).
+**
+** b) unless the cell is on the root node, that the cell is bounded
+** by the parent cell on the parent node.
+**
+** c) for leaf nodes, that there is an entry in the %_rowid
+** table corresponding to the cell's rowid value that
+** points to the correct node.
+**
+** d) for cells on non-leaf nodes, that there is an entry in the
+** %_parent table mapping from the cell's child node to the
+** node that it resides on.
+**
+** 2. That there are the same number of entries in the %_rowid table
+** as there are leaf cells in the r-tree structure, and that there
+** is a leaf cell that corresponds to each entry in the %_rowid table.
+**
+** 3. That there are the same number of entries in the %_parent table
+** as there are non-leaf cells in the r-tree structure, and that
+** there is a non-leaf cell that corresponds to each entry in the
+** %_parent table.
+*/
+static void rtreecheck(
+ sqlite3_context *ctx,
+ int nArg,
+ sqlite3_value **apArg
+){
+ if( nArg!=1 && nArg!=2 ){
+ sqlite3_result_error(ctx,
+ "wrong number of arguments to function rtreecheck()", -1
+ );
+ }else{
+ int rc;
+ char *zReport = 0;
+ const char *zDb = (const char*)sqlite3_value_text(apArg[0]);
+ const char *zTab;
+ if( nArg==1 ){
+ zTab = zDb;
+ zDb = "main";
+ }else{
+ zTab = (const char*)sqlite3_value_text(apArg[1]);
+ }
+ rc = rtreeCheckTable(sqlite3_context_db_handle(ctx), zDb, zTab, &zReport);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_text(ctx, zReport ? zReport : "ok", -1, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_result_error_code(ctx, rc);
+ }
+ sqlite3_free(zReport);
+ }
+}
+
+/* Conditionally include the geopoly code */
+#ifdef SQLITE_ENABLE_GEOPOLY
+/************** Include geopoly.c in the middle of rtree.c *******************/
+/************** Begin file geopoly.c *****************************************/
+/*
+** 2018-05-25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file implements an alternative R-Tree virtual table that
+** uses polygons to express the boundaries of 2-dimensional objects.
+**
+** This file is #include-ed onto the end of "rtree.c" so that it has
+** access to all of the R-Tree internals.
+*/
+/* #include <stdlib.h> */
+
+/* Enable -DGEOPOLY_ENABLE_DEBUG for debugging facilities */
+#ifdef GEOPOLY_ENABLE_DEBUG
+ static int geo_debug = 0;
+# define GEODEBUG(X) if(geo_debug)printf X
+#else
+# define GEODEBUG(X)
+#endif
+
+#ifndef JSON_NULL /* The following stuff repeats things found in json1 */
+/*
+** Versions of isspace(), isalnum() and isdigit() to which it is safe
+** to pass signed char values.
+*/
+#ifdef sqlite3Isdigit
+ /* Use the SQLite core versions if this routine is part of the
+ ** SQLite amalgamation */
+# define safe_isdigit(x) sqlite3Isdigit(x)
+# define safe_isalnum(x) sqlite3Isalnum(x)
+# define safe_isxdigit(x) sqlite3Isxdigit(x)
+#else
+ /* Use the standard library for separate compilation */
+#include <ctype.h> /* amalgamator: keep */
+# define safe_isdigit(x) isdigit((unsigned char)(x))
+# define safe_isalnum(x) isalnum((unsigned char)(x))
+# define safe_isxdigit(x) isxdigit((unsigned char)(x))
+#endif
+
+/*
+** Growing our own isspace() routine this way is twice as fast as
+** the library isspace() function.
+*/
+static const char geopolyIsSpace[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#define safe_isspace(x) (geopolyIsSpace[(unsigned char)x])
+#endif /* JSON NULL - back to original code */
+
+/* Compiler and version */
+#ifndef GCC_VERSION
+#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
+#else
+# define GCC_VERSION 0
+#endif
+#endif
+#ifndef MSVC_VERSION
+#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define MSVC_VERSION _MSC_VER
+#else
+# define MSVC_VERSION 0
+#endif
+#endif
+
+/* Datatype for coordinates
+*/
+typedef float GeoCoord;
+
+/*
+** Internal representation of a polygon.
+**
+** The polygon consists of a sequence of vertexes. There is a line
+** segment between each pair of vertexes, and one final segment from
+** the last vertex back to the first. (This differs from the GeoJSON
+** standard in which the final vertex is a repeat of the first.)
+**
+** The polygon follows the right-hand rule. The area to the right of
+** each segment is "outside" and the area to the left is "inside".
+**
+** The on-disk representation consists of a 4-byte header followed by
+** the values. The 4-byte header is:
+**
+** encoding (1 byte) 0=big-endian, 1=little-endian
+** nvertex (3 bytes) Number of vertexes as a big-endian integer
+**
+** Enough space is allocated for 4 coordinates, to work around over-zealous
+** warnings coming from some compiler (notably, clang). In reality, the size
+** of each GeoPoly memory allocate is adjusted as necessary so that the
+** GeoPoly.a[] array at the end is the appropriate size.
+*/
+typedef struct GeoPoly GeoPoly;
+struct GeoPoly {
+ int nVertex; /* Number of vertexes */
+ unsigned char hdr[4]; /* Header for on-disk representation */
+ GeoCoord a[8]; /* 2*nVertex values. X (longitude) first, then Y */
+};
+
+/* The size of a memory allocation needed for a GeoPoly object sufficient
+** to hold N coordinate pairs.
+*/
+#define GEOPOLY_SZ(N) (sizeof(GeoPoly) + sizeof(GeoCoord)*2*((N)-4))
+
+/* Macros to access coordinates of a GeoPoly.
+** We have to use these macros, rather than just say p->a[i] in order
+** to silence (incorrect) UBSAN warnings if the array index is too large.
+*/
+#define GeoX(P,I) (((GeoCoord*)(P)->a)[(I)*2])
+#define GeoY(P,I) (((GeoCoord*)(P)->a)[(I)*2+1])
+
+
+/*
+** State of a parse of a GeoJSON input.
+*/
+typedef struct GeoParse GeoParse;
+struct GeoParse {
+ const unsigned char *z; /* Unparsed input */
+ int nVertex; /* Number of vertexes in a[] */
+ int nAlloc; /* Space allocated to a[] */
+ int nErr; /* Number of errors encountered */
+ GeoCoord *a; /* Array of vertexes. From sqlite3_malloc64() */
+};
+
+/* Do a 4-byte byte swap */
+static void geopolySwab32(unsigned char *a){
+ unsigned char t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+ t = a[1];
+ a[1] = a[2];
+ a[2] = t;
+}
+
+/* Skip whitespace. Return the next non-whitespace character. */
+static char geopolySkipSpace(GeoParse *p){
+ while( safe_isspace(p->z[0]) ) p->z++;
+ return p->z[0];
+}
+
+/* Parse out a number. Write the value into *pVal if pVal!=0.
+** return non-zero on success and zero if the next token is not a number.
+*/
+static int geopolyParseNumber(GeoParse *p, GeoCoord *pVal){
+ char c = geopolySkipSpace(p);
+ const unsigned char *z = p->z;
+ int j = 0;
+ int seenDP = 0;
+ int seenE = 0;
+ if( c=='-' ){
+ j = 1;
+ c = z[j];
+ }
+ if( c=='0' && z[j+1]>='0' && z[j+1]<='9' ) return 0;
+ for(;; j++){
+ c = z[j];
+ if( safe_isdigit(c) ) continue;
+ if( c=='.' ){
+ if( z[j-1]=='-' ) return 0;
+ if( seenDP ) return 0;
+ seenDP = 1;
+ continue;
+ }
+ if( c=='e' || c=='E' ){
+ if( z[j-1]<'0' ) return 0;
+ if( seenE ) return -1;
+ seenDP = seenE = 1;
+ c = z[j+1];
+ if( c=='+' || c=='-' ){
+ j++;
+ c = z[j+1];
+ }
+ if( c<'0' || c>'9' ) return 0;
+ continue;
+ }
+ break;
+ }
+ if( z[j-1]<'0' ) return 0;
+ if( pVal ){
+#ifdef SQLITE_AMALGAMATION
+ /* The sqlite3AtoF() routine is much much faster than atof(), if it
+ ** is available */
+ double r;
+ (void)sqlite3AtoF((const char*)p->z, &r, j, SQLITE_UTF8);
+ *pVal = r;
+#else
+ *pVal = (GeoCoord)atof((const char*)p->z);
+#endif
+ }
+ p->z += j;
+ return 1;
+}
+
+/*
+** If the input is a well-formed JSON array of coordinates with at least
+** four coordinates and where each coordinate is itself a two-value array,
+** then convert the JSON into a GeoPoly object and return a pointer to
+** that object.
+**
+** If any error occurs, return NULL.
+*/
+static GeoPoly *geopolyParseJson(const unsigned char *z, int *pRc){
+ GeoParse s;
+ int rc = SQLITE_OK;
+ memset(&s, 0, sizeof(s));
+ s.z = z;
+ if( geopolySkipSpace(&s)=='[' ){
+ s.z++;
+ while( geopolySkipSpace(&s)=='[' ){
+ int ii = 0;
+ char c;
+ s.z++;
+ if( s.nVertex>=s.nAlloc ){
+ GeoCoord *aNew;
+ s.nAlloc = s.nAlloc*2 + 16;
+ aNew = sqlite3_realloc64(s.a, s.nAlloc*sizeof(GeoCoord)*2 );
+ if( aNew==0 ){
+ rc = SQLITE_NOMEM;
+ s.nErr++;
+ break;
+ }
+ s.a = aNew;
+ }
+ while( geopolyParseNumber(&s, ii<=1 ? &s.a[s.nVertex*2+ii] : 0) ){
+ ii++;
+ if( ii==2 ) s.nVertex++;
+ c = geopolySkipSpace(&s);
+ s.z++;
+ if( c==',' ) continue;
+ if( c==']' && ii>=2 ) break;
+ s.nErr++;
+ rc = SQLITE_ERROR;
+ goto parse_json_err;
+ }
+ if( geopolySkipSpace(&s)==',' ){
+ s.z++;
+ continue;
+ }
+ break;
+ }
+ if( geopolySkipSpace(&s)==']'
+ && s.nVertex>=4
+ && s.a[0]==s.a[s.nVertex*2-2]
+ && s.a[1]==s.a[s.nVertex*2-1]
+ && (s.z++, geopolySkipSpace(&s)==0)
+ ){
+ GeoPoly *pOut;
+ int x = 1;
+ s.nVertex--; /* Remove the redundant vertex at the end */
+ pOut = sqlite3_malloc64( GEOPOLY_SZ((sqlite3_int64)s.nVertex) );
+ x = 1;
+ if( pOut==0 ) goto parse_json_err;
+ pOut->nVertex = s.nVertex;
+ memcpy(pOut->a, s.a, s.nVertex*2*sizeof(GeoCoord));
+ pOut->hdr[0] = *(unsigned char*)&x;
+ pOut->hdr[1] = (s.nVertex>>16)&0xff;
+ pOut->hdr[2] = (s.nVertex>>8)&0xff;
+ pOut->hdr[3] = s.nVertex&0xff;
+ sqlite3_free(s.a);
+ if( pRc ) *pRc = SQLITE_OK;
+ return pOut;
+ }else{
+ s.nErr++;
+ rc = SQLITE_ERROR;
+ }
+ }
+parse_json_err:
+ if( pRc ) *pRc = rc;
+ sqlite3_free(s.a);
+ return 0;
+}
+
+/*
+** Given a function parameter, try to interpret it as a polygon, either
+** in the binary format or JSON text. Compute a GeoPoly object and
+** return a pointer to that object. Or if the input is not a well-formed
+** polygon, put an error message in sqlite3_context and return NULL.
+*/
+static GeoPoly *geopolyFuncParam(
+ sqlite3_context *pCtx, /* Context for error messages */
+ sqlite3_value *pVal, /* The value to decode */
+ int *pRc /* Write error here */
+){
+ GeoPoly *p = 0;
+ int nByte;
+ if( sqlite3_value_type(pVal)==SQLITE_BLOB
+ && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord))
+ ){
+ const unsigned char *a = sqlite3_value_blob(pVal);
+ int nVertex;
+ nVertex = (a[1]<<16) + (a[2]<<8) + a[3];
+ if( (a[0]==0 || a[0]==1)
+ && (nVertex*2*sizeof(GeoCoord) + 4)==(unsigned int)nByte
+ ){
+ p = sqlite3_malloc64( sizeof(*p) + (nVertex-1)*2*sizeof(GeoCoord) );
+ if( p==0 ){
+ if( pRc ) *pRc = SQLITE_NOMEM;
+ if( pCtx ) sqlite3_result_error_nomem(pCtx);
+ }else{
+ int x = 1;
+ p->nVertex = nVertex;
+ memcpy(p->hdr, a, nByte);
+ if( a[0] != *(unsigned char*)&x ){
+ int ii;
+ for(ii=0; ii<nVertex; ii++){
+ geopolySwab32((unsigned char*)&GeoX(p,ii));
+ geopolySwab32((unsigned char*)&GeoY(p,ii));
+ }
+ p->hdr[0] ^= 1;
+ }
+ }
+ }
+ if( pRc ) *pRc = SQLITE_OK;
+ return p;
+ }else if( sqlite3_value_type(pVal)==SQLITE_TEXT ){
+ const unsigned char *zJson = sqlite3_value_text(pVal);
+ if( zJson==0 ){
+ if( pRc ) *pRc = SQLITE_NOMEM;
+ return 0;
+ }
+ return geopolyParseJson(zJson, pRc);
+ }else{
+ if( pRc ) *pRc = SQLITE_ERROR;
+ return 0;
+ }
+}
+
+/*
+** Implementation of the geopoly_blob(X) function.
+**
+** If the input is a well-formed Geopoly BLOB or JSON string
+** then return the BLOB representation of the polygon. Otherwise
+** return NULL.
+*/
+static void geopolyBlobFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ if( p ){
+ sqlite3_result_blob(context, p->hdr,
+ 4+8*p->nVertex, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** SQL function: geopoly_json(X)
+**
+** Interpret X as a polygon and render it as a JSON array
+** of coordinates. Or, if X is not a valid polygon, return NULL.
+*/
+static void geopolyJsonFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ if( p ){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ sqlite3_str *x = sqlite3_str_new(db);
+ int i;
+ sqlite3_str_append(x, "[", 1);
+ for(i=0; i<p->nVertex; i++){
+ sqlite3_str_appendf(x, "[%!g,%!g],", GeoX(p,i), GeoY(p,i));
+ }
+ sqlite3_str_appendf(x, "[%!g,%!g]]", GeoX(p,0), GeoY(p,0));
+ sqlite3_result_text(context, sqlite3_str_finish(x), -1, sqlite3_free);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** SQL function: geopoly_svg(X, ....)
+**
+** Interpret X as a polygon and render it as a SVG <polyline>.
+** Additional arguments are added as attributes to the <polyline>.
+*/
+static void geopolySvgFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p;
+ if( argc<1 ) return;
+ p = geopolyFuncParam(context, argv[0], 0);
+ if( p ){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ sqlite3_str *x = sqlite3_str_new(db);
+ int i;
+ char cSep = '\'';
+ sqlite3_str_appendf(x, "<polyline points=");
+ for(i=0; i<p->nVertex; i++){
+ sqlite3_str_appendf(x, "%c%g,%g", cSep, GeoX(p,i), GeoY(p,i));
+ cSep = ' ';
+ }
+ sqlite3_str_appendf(x, " %g,%g'", GeoX(p,0), GeoY(p,0));
+ for(i=1; i<argc; i++){
+ const char *z = (const char*)sqlite3_value_text(argv[i]);
+ if( z && z[0] ){
+ sqlite3_str_appendf(x, " %s", z);
+ }
+ }
+ sqlite3_str_appendf(x, "></polyline>");
+ sqlite3_result_text(context, sqlite3_str_finish(x), -1, sqlite3_free);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** SQL Function: geopoly_xform(poly, A, B, C, D, E, F)
+**
+** Transform and/or translate a polygon as follows:
+**
+** x1 = A*x0 + B*y0 + E
+** y1 = C*x0 + D*y0 + F
+**
+** For a translation:
+**
+** geopoly_xform(poly, 1, 0, 0, 1, x-offset, y-offset)
+**
+** Rotate by R around the point (0,0):
+**
+** geopoly_xform(poly, cos(R), sin(R), -sin(R), cos(R), 0, 0)
+*/
+static void geopolyXformFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ double A = sqlite3_value_double(argv[1]);
+ double B = sqlite3_value_double(argv[2]);
+ double C = sqlite3_value_double(argv[3]);
+ double D = sqlite3_value_double(argv[4]);
+ double E = sqlite3_value_double(argv[5]);
+ double F = sqlite3_value_double(argv[6]);
+ GeoCoord x1, y1, x0, y0;
+ int ii;
+ if( p ){
+ for(ii=0; ii<p->nVertex; ii++){
+ x0 = GeoX(p,ii);
+ y0 = GeoY(p,ii);
+ x1 = (GeoCoord)(A*x0 + B*y0 + E);
+ y1 = (GeoCoord)(C*x0 + D*y0 + F);
+ GeoX(p,ii) = x1;
+ GeoY(p,ii) = y1;
+ }
+ sqlite3_result_blob(context, p->hdr,
+ 4+8*p->nVertex, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Compute the area enclosed by the polygon.
+**
+** This routine can also be used to detect polygons that rotate in
+** the wrong direction. Polygons are suppose to be counter-clockwise (CCW).
+** This routine returns a negative value for clockwise (CW) polygons.
+*/
+static double geopolyArea(GeoPoly *p){
+ double rArea = 0.0;
+ int ii;
+ for(ii=0; ii<p->nVertex-1; ii++){
+ rArea += (GeoX(p,ii) - GeoX(p,ii+1)) /* (x0 - x1) */
+ * (GeoY(p,ii) + GeoY(p,ii+1)) /* (y0 + y1) */
+ * 0.5;
+ }
+ rArea += (GeoX(p,ii) - GeoX(p,0)) /* (xN - x0) */
+ * (GeoY(p,ii) + GeoY(p,0)) /* (yN + y0) */
+ * 0.5;
+ return rArea;
+}
+
+/*
+** Implementation of the geopoly_area(X) function.
+**
+** If the input is a well-formed Geopoly BLOB then return the area
+** enclosed by the polygon. If the polygon circulates clockwise instead
+** of counterclockwise (as it should) then return the negative of the
+** enclosed area. Otherwise return NULL.
+*/
+static void geopolyAreaFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ if( p ){
+ sqlite3_result_double(context, geopolyArea(p));
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Implementation of the geopoly_ccw(X) function.
+**
+** If the rotation of polygon X is clockwise (incorrect) instead of
+** counter-clockwise (the correct winding order according to RFC7946)
+** then reverse the order of the vertexes in polygon X.
+**
+** In other words, this routine returns a CCW polygon regardless of the
+** winding order of its input.
+**
+** Use this routine to sanitize historical inputs that that sometimes
+** contain polygons that wind in the wrong direction.
+*/
+static void geopolyCcwFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ if( p ){
+ if( geopolyArea(p)<0.0 ){
+ int ii, jj;
+ for(ii=1, jj=p->nVertex-1; ii<jj; ii++, jj--){
+ GeoCoord t = GeoX(p,ii);
+ GeoX(p,ii) = GeoX(p,jj);
+ GeoX(p,jj) = t;
+ t = GeoY(p,ii);
+ GeoY(p,ii) = GeoY(p,jj);
+ GeoY(p,jj) = t;
+ }
+ }
+ sqlite3_result_blob(context, p->hdr,
+ 4+8*p->nVertex, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+ }
+}
+
+#define GEOPOLY_PI 3.1415926535897932385
+
+/* Fast approximation for sine(X) for X between -0.5*pi and 2*pi
+*/
+static double geopolySine(double r){
+ assert( r>=-0.5*GEOPOLY_PI && r<=2.0*GEOPOLY_PI );
+ if( r>=1.5*GEOPOLY_PI ){
+ r -= 2.0*GEOPOLY_PI;
+ }
+ if( r>=0.5*GEOPOLY_PI ){
+ return -geopolySine(r-GEOPOLY_PI);
+ }else{
+ double r2 = r*r;
+ double r3 = r2*r;
+ double r5 = r3*r2;
+ return 0.9996949*r - 0.1656700*r3 + 0.0075134*r5;
+ }
+}
+
+/*
+** Function: geopoly_regular(X,Y,R,N)
+**
+** Construct a simple, convex, regular polygon centered at X, Y
+** with circumradius R and with N sides.
+*/
+static void geopolyRegularFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ double x = sqlite3_value_double(argv[0]);
+ double y = sqlite3_value_double(argv[1]);
+ double r = sqlite3_value_double(argv[2]);
+ int n = sqlite3_value_int(argv[3]);
+ int i;
+ GeoPoly *p;
+
+ if( n<3 || r<=0.0 ) return;
+ if( n>1000 ) n = 1000;
+ p = sqlite3_malloc64( sizeof(*p) + (n-1)*2*sizeof(GeoCoord) );
+ if( p==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ i = 1;
+ p->hdr[0] = *(unsigned char*)&i;
+ p->hdr[1] = 0;
+ p->hdr[2] = (n>>8)&0xff;
+ p->hdr[3] = n&0xff;
+ for(i=0; i<n; i++){
+ double rAngle = 2.0*GEOPOLY_PI*i/n;
+ GeoX(p,i) = x - r*geopolySine(rAngle-0.5*GEOPOLY_PI);
+ GeoY(p,i) = y + r*geopolySine(rAngle);
+ }
+ sqlite3_result_blob(context, p->hdr, 4+8*n, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+}
+
+/*
+** If pPoly is a polygon, compute its bounding box. Then:
+**
+** (1) if aCoord!=0 store the bounding box in aCoord, returning NULL
+** (2) otherwise, compute a GeoPoly for the bounding box and return the
+** new GeoPoly
+**
+** If pPoly is NULL but aCoord is not NULL, then compute a new GeoPoly from
+** the bounding box in aCoord and return a pointer to that GeoPoly.
+*/
+static GeoPoly *geopolyBBox(
+ sqlite3_context *context, /* For recording the error */
+ sqlite3_value *pPoly, /* The polygon */
+ RtreeCoord *aCoord, /* Results here */
+ int *pRc /* Error code here */
+){
+ GeoPoly *pOut = 0;
+ GeoPoly *p;
+ float mnX, mxX, mnY, mxY;
+ if( pPoly==0 && aCoord!=0 ){
+ p = 0;
+ mnX = aCoord[0].f;
+ mxX = aCoord[1].f;
+ mnY = aCoord[2].f;
+ mxY = aCoord[3].f;
+ goto geopolyBboxFill;
+ }else{
+ p = geopolyFuncParam(context, pPoly, pRc);
+ }
+ if( p ){
+ int ii;
+ mnX = mxX = GeoX(p,0);
+ mnY = mxY = GeoY(p,0);
+ for(ii=1; ii<p->nVertex; ii++){
+ double r = GeoX(p,ii);
+ if( r<mnX ) mnX = (float)r;
+ else if( r>mxX ) mxX = (float)r;
+ r = GeoY(p,ii);
+ if( r<mnY ) mnY = (float)r;
+ else if( r>mxY ) mxY = (float)r;
+ }
+ if( pRc ) *pRc = SQLITE_OK;
+ if( aCoord==0 ){
+ geopolyBboxFill:
+ pOut = sqlite3_realloc64(p, GEOPOLY_SZ(4));
+ if( pOut==0 ){
+ sqlite3_free(p);
+ if( context ) sqlite3_result_error_nomem(context);
+ if( pRc ) *pRc = SQLITE_NOMEM;
+ return 0;
+ }
+ pOut->nVertex = 4;
+ ii = 1;
+ pOut->hdr[0] = *(unsigned char*)&ii;
+ pOut->hdr[1] = 0;
+ pOut->hdr[2] = 0;
+ pOut->hdr[3] = 4;
+ GeoX(pOut,0) = mnX;
+ GeoY(pOut,0) = mnY;
+ GeoX(pOut,1) = mxX;
+ GeoY(pOut,1) = mnY;
+ GeoX(pOut,2) = mxX;
+ GeoY(pOut,2) = mxY;
+ GeoX(pOut,3) = mnX;
+ GeoY(pOut,3) = mxY;
+ }else{
+ sqlite3_free(p);
+ aCoord[0].f = mnX;
+ aCoord[1].f = mxX;
+ aCoord[2].f = mnY;
+ aCoord[3].f = mxY;
+ }
+ }else{
+ memset(aCoord, 0, sizeof(RtreeCoord)*4);
+ }
+ return pOut;
+}
+
+/*
+** Implementation of the geopoly_bbox(X) SQL function.
+*/
+static void geopolyBBoxFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p = geopolyBBox(context, argv[0], 0, 0);
+ if( p ){
+ sqlite3_result_blob(context, p->hdr,
+ 4+8*p->nVertex, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** State vector for the geopoly_group_bbox() aggregate function.
+*/
+typedef struct GeoBBox GeoBBox;
+struct GeoBBox {
+ int isInit;
+ RtreeCoord a[4];
+};
+
+
+/*
+** Implementation of the geopoly_group_bbox(X) aggregate SQL function.
+*/
+static void geopolyBBoxStep(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ RtreeCoord a[4];
+ int rc = SQLITE_OK;
+ (void)geopolyBBox(context, argv[0], a, &rc);
+ if( rc==SQLITE_OK ){
+ GeoBBox *pBBox;
+ pBBox = (GeoBBox*)sqlite3_aggregate_context(context, sizeof(*pBBox));
+ if( pBBox==0 ) return;
+ if( pBBox->isInit==0 ){
+ pBBox->isInit = 1;
+ memcpy(pBBox->a, a, sizeof(RtreeCoord)*4);
+ }else{
+ if( a[0].f < pBBox->a[0].f ) pBBox->a[0] = a[0];
+ if( a[1].f > pBBox->a[1].f ) pBBox->a[1] = a[1];
+ if( a[2].f < pBBox->a[2].f ) pBBox->a[2] = a[2];
+ if( a[3].f > pBBox->a[3].f ) pBBox->a[3] = a[3];
+ }
+ }
+}
+static void geopolyBBoxFinal(
+ sqlite3_context *context
+){
+ GeoPoly *p;
+ GeoBBox *pBBox;
+ pBBox = (GeoBBox*)sqlite3_aggregate_context(context, 0);
+ if( pBBox==0 ) return;
+ p = geopolyBBox(context, 0, pBBox->a, 0);
+ if( p ){
+ sqlite3_result_blob(context, p->hdr,
+ 4+8*p->nVertex, SQLITE_TRANSIENT);
+ sqlite3_free(p);
+ }
+}
+
+
+/*
+** Determine if point (x0,y0) is beneath line segment (x1,y1)->(x2,y2).
+** Returns:
+**
+** +2 x0,y0 is on the line segement
+**
+** +1 x0,y0 is beneath line segment
+**
+** 0 x0,y0 is not on or beneath the line segment or the line segment
+** is vertical and x0,y0 is not on the line segment
+**
+** The left-most coordinate min(x1,x2) is not considered to be part of
+** the line segment for the purposes of this analysis.
+*/
+static int pointBeneathLine(
+ double x0, double y0,
+ double x1, double y1,
+ double x2, double y2
+){
+ double y;
+ if( x0==x1 && y0==y1 ) return 2;
+ if( x1<x2 ){
+ if( x0<=x1 || x0>x2 ) return 0;
+ }else if( x1>x2 ){
+ if( x0<=x2 || x0>x1 ) return 0;
+ }else{
+ /* Vertical line segment */
+ if( x0!=x1 ) return 0;
+ if( y0<y1 && y0<y2 ) return 0;
+ if( y0>y1 && y0>y2 ) return 0;
+ return 2;
+ }
+ y = y1 + (y2-y1)*(x0-x1)/(x2-x1);
+ if( y0==y ) return 2;
+ if( y0<y ) return 1;
+ return 0;
+}
+
+/*
+** SQL function: geopoly_contains_point(P,X,Y)
+**
+** Return +2 if point X,Y is within polygon P.
+** Return +1 if point X,Y is on the polygon boundary.
+** Return 0 if point X,Y is outside the polygon
+*/
+static void geopolyContainsPointFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
+ double x0 = sqlite3_value_double(argv[1]);
+ double y0 = sqlite3_value_double(argv[2]);
+ int v = 0;
+ int cnt = 0;
+ int ii;
+ if( p1==0 ) return;
+ for(ii=0; ii<p1->nVertex-1; ii++){
+ v = pointBeneathLine(x0,y0,GeoX(p1,ii), GeoY(p1,ii),
+ GeoX(p1,ii+1),GeoY(p1,ii+1));
+ if( v==2 ) break;
+ cnt += v;
+ }
+ if( v!=2 ){
+ v = pointBeneathLine(x0,y0,GeoX(p1,ii), GeoY(p1,ii),
+ GeoX(p1,0), GeoY(p1,0));
+ }
+ if( v==2 ){
+ sqlite3_result_int(context, 1);
+ }else if( ((v+cnt)&1)==0 ){
+ sqlite3_result_int(context, 0);
+ }else{
+ sqlite3_result_int(context, 2);
+ }
+ sqlite3_free(p1);
+}
+
+/* Forward declaration */
+static int geopolyOverlap(GeoPoly *p1, GeoPoly *p2);
+
+/*
+** SQL function: geopoly_within(P1,P2)
+**
+** Return +2 if P1 and P2 are the same polygon
+** Return +1 if P2 is contained within P1
+** Return 0 if any part of P2 is on the outside of P1
+**
+*/
+static void geopolyWithinFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
+ GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
+ if( p1 && p2 ){
+ int x = geopolyOverlap(p1, p2);
+ if( x<0 ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ sqlite3_result_int(context, x==2 ? 1 : x==4 ? 2 : 0);
+ }
+ }
+ sqlite3_free(p1);
+ sqlite3_free(p2);
+}
+
+/* Objects used by the overlap algorihm. */
+typedef struct GeoEvent GeoEvent;
+typedef struct GeoSegment GeoSegment;
+typedef struct GeoOverlap GeoOverlap;
+struct GeoEvent {
+ double x; /* X coordinate at which event occurs */
+ int eType; /* 0 for ADD, 1 for REMOVE */
+ GeoSegment *pSeg; /* The segment to be added or removed */
+ GeoEvent *pNext; /* Next event in the sorted list */
+};
+struct GeoSegment {
+ double C, B; /* y = C*x + B */
+ double y; /* Current y value */
+ float y0; /* Initial y value */
+ unsigned char side; /* 1 for p1, 2 for p2 */
+ unsigned int idx; /* Which segment within the side */
+ GeoSegment *pNext; /* Next segment in a list sorted by y */
+};
+struct GeoOverlap {
+ GeoEvent *aEvent; /* Array of all events */
+ GeoSegment *aSegment; /* Array of all segments */
+ int nEvent; /* Number of events */
+ int nSegment; /* Number of segments */
+};
+
+/*
+** Add a single segment and its associated events.
+*/
+static void geopolyAddOneSegment(
+ GeoOverlap *p,
+ GeoCoord x0,
+ GeoCoord y0,
+ GeoCoord x1,
+ GeoCoord y1,
+ unsigned char side,
+ unsigned int idx
+){
+ GeoSegment *pSeg;
+ GeoEvent *pEvent;
+ if( x0==x1 ) return; /* Ignore vertical segments */
+ if( x0>x1 ){
+ GeoCoord t = x0;
+ x0 = x1;
+ x1 = t;
+ t = y0;
+ y0 = y1;
+ y1 = t;
+ }
+ pSeg = p->aSegment + p->nSegment;
+ p->nSegment++;
+ pSeg->C = (y1-y0)/(x1-x0);
+ pSeg->B = y1 - x1*pSeg->C;
+ pSeg->y0 = y0;
+ pSeg->side = side;
+ pSeg->idx = idx;
+ pEvent = p->aEvent + p->nEvent;
+ p->nEvent++;
+ pEvent->x = x0;
+ pEvent->eType = 0;
+ pEvent->pSeg = pSeg;
+ pEvent = p->aEvent + p->nEvent;
+ p->nEvent++;
+ pEvent->x = x1;
+ pEvent->eType = 1;
+ pEvent->pSeg = pSeg;
+}
+
+
+
+/*
+** Insert all segments and events for polygon pPoly.
+*/
+static void geopolyAddSegments(
+ GeoOverlap *p, /* Add segments to this Overlap object */
+ GeoPoly *pPoly, /* Take all segments from this polygon */
+ unsigned char side /* The side of pPoly */
+){
+ unsigned int i;
+ GeoCoord *x;
+ for(i=0; i<(unsigned)pPoly->nVertex-1; i++){
+ x = &GeoX(pPoly,i);
+ geopolyAddOneSegment(p, x[0], x[1], x[2], x[3], side, i);
+ }
+ x = &GeoX(pPoly,i);
+ geopolyAddOneSegment(p, x[0], x[1], pPoly->a[0], pPoly->a[1], side, i);
+}
+
+/*
+** Merge two lists of sorted events by X coordinate
+*/
+static GeoEvent *geopolyEventMerge(GeoEvent *pLeft, GeoEvent *pRight){
+ GeoEvent head, *pLast;
+ head.pNext = 0;
+ pLast = &head;
+ while( pRight && pLeft ){
+ if( pRight->x <= pLeft->x ){
+ pLast->pNext = pRight;
+ pLast = pRight;
+ pRight = pRight->pNext;
+ }else{
+ pLast->pNext = pLeft;
+ pLast = pLeft;
+ pLeft = pLeft->pNext;
+ }
+ }
+ pLast->pNext = pRight ? pRight : pLeft;
+ return head.pNext;
+}
+
+/*
+** Sort an array of nEvent event objects into a list.
+*/
+static GeoEvent *geopolySortEventsByX(GeoEvent *aEvent, int nEvent){
+ int mx = 0;
+ int i, j;
+ GeoEvent *p;
+ GeoEvent *a[50];
+ for(i=0; i<nEvent; i++){
+ p = &aEvent[i];
+ p->pNext = 0;
+ for(j=0; j<mx && a[j]; j++){
+ p = geopolyEventMerge(a[j], p);
+ a[j] = 0;
+ }
+ a[j] = p;
+ if( j>=mx ) mx = j+1;
+ }
+ p = 0;
+ for(i=0; i<mx; i++){
+ p = geopolyEventMerge(a[i], p);
+ }
+ return p;
+}
+
+/*
+** Merge two lists of sorted segments by Y, and then by C.
+*/
+static GeoSegment *geopolySegmentMerge(GeoSegment *pLeft, GeoSegment *pRight){
+ GeoSegment head, *pLast;
+ head.pNext = 0;
+ pLast = &head;
+ while( pRight && pLeft ){
+ double r = pRight->y - pLeft->y;
+ if( r==0.0 ) r = pRight->C - pLeft->C;
+ if( r<0.0 ){
+ pLast->pNext = pRight;
+ pLast = pRight;
+ pRight = pRight->pNext;
+ }else{
+ pLast->pNext = pLeft;
+ pLast = pLeft;
+ pLeft = pLeft->pNext;
+ }
+ }
+ pLast->pNext = pRight ? pRight : pLeft;
+ return head.pNext;
+}
+
+/*
+** Sort a list of GeoSegments in order of increasing Y and in the event of
+** a tie, increasing C (slope).
+*/
+static GeoSegment *geopolySortSegmentsByYAndC(GeoSegment *pList){
+ int mx = 0;
+ int i;
+ GeoSegment *p;
+ GeoSegment *a[50];
+ while( pList ){
+ p = pList;
+ pList = pList->pNext;
+ p->pNext = 0;
+ for(i=0; i<mx && a[i]; i++){
+ p = geopolySegmentMerge(a[i], p);
+ a[i] = 0;
+ }
+ a[i] = p;
+ if( i>=mx ) mx = i+1;
+ }
+ p = 0;
+ for(i=0; i<mx; i++){
+ p = geopolySegmentMerge(a[i], p);
+ }
+ return p;
+}
+
+/*
+** Determine the overlap between two polygons
+*/
+static int geopolyOverlap(GeoPoly *p1, GeoPoly *p2){
+ sqlite3_int64 nVertex = p1->nVertex + p2->nVertex + 2;
+ GeoOverlap *p;
+ sqlite3_int64 nByte;
+ GeoEvent *pThisEvent;
+ double rX;
+ int rc = 0;
+ int needSort = 0;
+ GeoSegment *pActive = 0;
+ GeoSegment *pSeg;
+ unsigned char aOverlap[4];
+
+ nByte = sizeof(GeoEvent)*nVertex*2
+ + sizeof(GeoSegment)*nVertex
+ + sizeof(GeoOverlap);
+ p = sqlite3_malloc64( nByte );
+ if( p==0 ) return -1;
+ p->aEvent = (GeoEvent*)&p[1];
+ p->aSegment = (GeoSegment*)&p->aEvent[nVertex*2];
+ p->nEvent = p->nSegment = 0;
+ geopolyAddSegments(p, p1, 1);
+ geopolyAddSegments(p, p2, 2);
+ pThisEvent = geopolySortEventsByX(p->aEvent, p->nEvent);
+ rX = pThisEvent->x==0.0 ? -1.0 : 0.0;
+ memset(aOverlap, 0, sizeof(aOverlap));
+ while( pThisEvent ){
+ if( pThisEvent->x!=rX ){
+ GeoSegment *pPrev = 0;
+ int iMask = 0;
+ GEODEBUG(("Distinct X: %g\n", pThisEvent->x));
+ rX = pThisEvent->x;
+ if( needSort ){
+ GEODEBUG(("SORT\n"));
+ pActive = geopolySortSegmentsByYAndC(pActive);
+ needSort = 0;
+ }
+ for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
+ if( pPrev ){
+ if( pPrev->y!=pSeg->y ){
+ GEODEBUG(("MASK: %d\n", iMask));
+ aOverlap[iMask] = 1;
+ }
+ }
+ iMask ^= pSeg->side;
+ pPrev = pSeg;
+ }
+ pPrev = 0;
+ for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
+ double y = pSeg->C*rX + pSeg->B;
+ GEODEBUG(("Segment %d.%d %g->%g\n", pSeg->side, pSeg->idx, pSeg->y, y));
+ pSeg->y = y;
+ if( pPrev ){
+ if( pPrev->y>pSeg->y && pPrev->side!=pSeg->side ){
+ rc = 1;
+ GEODEBUG(("Crossing: %d.%d and %d.%d\n",
+ pPrev->side, pPrev->idx,
+ pSeg->side, pSeg->idx));
+ goto geopolyOverlapDone;
+ }else if( pPrev->y!=pSeg->y ){
+ GEODEBUG(("MASK: %d\n", iMask));
+ aOverlap[iMask] = 1;
+ }
+ }
+ iMask ^= pSeg->side;
+ pPrev = pSeg;
+ }
+ }
+ GEODEBUG(("%s %d.%d C=%g B=%g\n",
+ pThisEvent->eType ? "RM " : "ADD",
+ pThisEvent->pSeg->side, pThisEvent->pSeg->idx,
+ pThisEvent->pSeg->C,
+ pThisEvent->pSeg->B));
+ if( pThisEvent->eType==0 ){
+ /* Add a segment */
+ pSeg = pThisEvent->pSeg;
+ pSeg->y = pSeg->y0;
+ pSeg->pNext = pActive;
+ pActive = pSeg;
+ needSort = 1;
+ }else{
+ /* Remove a segment */
+ if( pActive==pThisEvent->pSeg ){
+ pActive = pActive->pNext;
+ }else{
+ for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
+ if( pSeg->pNext==pThisEvent->pSeg ){
+ pSeg->pNext = pSeg->pNext->pNext;
+ break;
+ }
+ }
+ }
+ }
+ pThisEvent = pThisEvent->pNext;
+ }
+ if( aOverlap[3]==0 ){
+ rc = 0;
+ }else if( aOverlap[1]!=0 && aOverlap[2]==0 ){
+ rc = 3;
+ }else if( aOverlap[1]==0 && aOverlap[2]!=0 ){
+ rc = 2;
+ }else if( aOverlap[1]==0 && aOverlap[2]==0 ){
+ rc = 4;
+ }else{
+ rc = 1;
+ }
+
+geopolyOverlapDone:
+ sqlite3_free(p);
+ return rc;
+}
+
+/*
+** SQL function: geopoly_overlap(P1,P2)
+**
+** Determine whether or not P1 and P2 overlap. Return value:
+**
+** 0 The two polygons are disjoint
+** 1 They overlap
+** 2 P1 is completely contained within P2
+** 3 P2 is completely contained within P1
+** 4 P1 and P2 are the same polygon
+** NULL Either P1 or P2 or both are not valid polygons
+*/
+static void geopolyOverlapFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
+ GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
+ if( p1 && p2 ){
+ int x = geopolyOverlap(p1, p2);
+ if( x<0 ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ sqlite3_result_int(context, x);
+ }
+ }
+ sqlite3_free(p1);
+ sqlite3_free(p2);
+}
+
+/*
+** Enable or disable debugging output
+*/
+static void geopolyDebugFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+#ifdef GEOPOLY_ENABLE_DEBUG
+ geo_debug = sqlite3_value_int(argv[0]);
+#endif
+}
+
+/*
+** This function is the implementation of both the xConnect and xCreate
+** methods of the geopoly virtual table.
+**
+** argv[0] -> module name
+** argv[1] -> database name
+** argv[2] -> table name
+** argv[...] -> column names...
+*/
+static int geopolyInit(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* One of the RTREE_COORD_* constants */
+ int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */
+ sqlite3_vtab **ppVtab, /* OUT: New virtual table */
+ char **pzErr, /* OUT: Error message, if any */
+ int isCreate /* True for xCreate, false for xConnect */
+){
+ int rc = SQLITE_OK;
+ Rtree *pRtree;
+ sqlite3_int64 nDb; /* Length of string argv[1] */
+ sqlite3_int64 nName; /* Length of string argv[2] */
+ sqlite3_str *pSql;
+ char *zSql;
+ int ii;
+
+ sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+
+ /* Allocate the sqlite3_vtab structure */
+ nDb = strlen(argv[1]);
+ nName = strlen(argv[2]);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ if( !pRtree ){
+ return SQLITE_NOMEM;
+ }
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ pRtree->nBusy = 1;
+ pRtree->base.pModule = &rtreeModule;
+ pRtree->zDb = (char *)&pRtree[1];
+ pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->eCoordType = RTREE_COORD_REAL32;
+ pRtree->nDim = 2;
+ pRtree->nDim2 = 4;
+ memcpy(pRtree->zDb, argv[1], nDb);
+ memcpy(pRtree->zName, argv[2], nName);
+
+
+ /* Create/Connect to the underlying relational database schema. If
+ ** that is successful, call sqlite3_declare_vtab() to configure
+ ** the r-tree table schema.
+ */
+ pSql = sqlite3_str_new(db);
+ sqlite3_str_appendf(pSql, "CREATE TABLE x(_shape");
+ pRtree->nAux = 1; /* Add one for _shape */
+ pRtree->nAuxNotNull = 1; /* The _shape column is always not-null */
+ for(ii=3; ii<argc; ii++){
+ pRtree->nAux++;
+ sqlite3_str_appendf(pSql, ",%s", argv[ii]);
+ }
+ sqlite3_str_appendf(pSql, ");");
+ zSql = sqlite3_str_finish(pSql);
+ if( !zSql ){
+ rc = SQLITE_NOMEM;
+ }else if( SQLITE_OK!=(rc = sqlite3_declare_vtab(db, zSql)) ){
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+ sqlite3_free(zSql);
+ if( rc ) goto geopolyInit_fail;
+ pRtree->nBytesPerCell = 8 + pRtree->nDim2*4;
+
+ /* Figure out the node size to use. */
+ rc = getNodeSize(db, pRtree, isCreate, pzErr);
+ if( rc ) goto geopolyInit_fail;
+ rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate);
+ if( rc ){
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ goto geopolyInit_fail;
+ }
+
+ *ppVtab = (sqlite3_vtab *)pRtree;
+ return SQLITE_OK;
+
+geopolyInit_fail:
+ if( rc==SQLITE_OK ) rc = SQLITE_ERROR;
+ assert( *ppVtab==0 );
+ assert( pRtree->nBusy==1 );
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+
+/*
+** GEOPOLY virtual table module xCreate method.
+*/
+static int geopolyCreate(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ return geopolyInit(db, pAux, argc, argv, ppVtab, pzErr, 1);
+}
+
+/*
+** GEOPOLY virtual table module xConnect method.
+*/
+static int geopolyConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ return geopolyInit(db, pAux, argc, argv, ppVtab, pzErr, 0);
+}
+
+
+/*
+** GEOPOLY virtual table module xFilter method.
+**
+** Query plans:
+**
+** 1 rowid lookup
+** 2 search for objects overlapping the same bounding box
+** that contains polygon argv[0]
+** 3 search for objects overlapping the same bounding box
+** that contains polygon argv[0]
+** 4 full table scan
+*/
+static int geopolyFilter(
+ sqlite3_vtab_cursor *pVtabCursor, /* The cursor to initialize */
+ int idxNum, /* Query plan */
+ const char *idxStr, /* Not Used */
+ int argc, sqlite3_value **argv /* Parameters to the query plan */
+){
+ Rtree *pRtree = (Rtree *)pVtabCursor->pVtab;
+ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
+ RtreeNode *pRoot = 0;
+ int rc = SQLITE_OK;
+ int iCell = 0;
+
+ rtreeReference(pRtree);
+
+ /* Reset the cursor to the same state as rtreeOpen() leaves it in. */
+ resetCursor(pCsr);
+
+ pCsr->iStrategy = idxNum;
+ if( idxNum==1 ){
+ /* Special case - lookup by rowid. */
+ RtreeNode *pLeaf; /* Leaf on which the required cell resides */
+ RtreeSearchPoint *p; /* Search point for the leaf */
+ i64 iRowid = sqlite3_value_int64(argv[0]);
+ i64 iNode = 0;
+ rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode);
+ if( rc==SQLITE_OK && pLeaf!=0 ){
+ p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0);
+ assert( p!=0 ); /* Always returns pCsr->sPoint */
+ pCsr->aNode[0] = pLeaf;
+ p->id = iNode;
+ p->eWithin = PARTLY_WITHIN;
+ rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell);
+ p->iCell = (u8)iCell;
+ RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:");
+ }else{
+ pCsr->atEOF = 1;
+ }
+ }else{
+ /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array
+ ** with the configured constraints.
+ */
+ rc = nodeAcquire(pRtree, 1, 0, &pRoot);
+ if( rc==SQLITE_OK && idxNum<=3 ){
+ RtreeCoord bbox[4];
+ RtreeConstraint *p;
+ assert( argc==1 );
+ geopolyBBox(0, argv[0], bbox, &rc);
+ if( rc ){
+ goto geopoly_filter_end;
+ }
+ pCsr->aConstraint = p = sqlite3_malloc(sizeof(RtreeConstraint)*4);
+ pCsr->nConstraint = 4;
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*4);
+ memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1));
+ if( idxNum==2 ){
+ /* Overlap query */
+ p->op = 'B';
+ p->iCoord = 0;
+ p->u.rValue = bbox[1].f;
+ p++;
+ p->op = 'D';
+ p->iCoord = 1;
+ p->u.rValue = bbox[0].f;
+ p++;
+ p->op = 'B';
+ p->iCoord = 2;
+ p->u.rValue = bbox[3].f;
+ p++;
+ p->op = 'D';
+ p->iCoord = 3;
+ p->u.rValue = bbox[2].f;
+ }else{
+ /* Within query */
+ p->op = 'D';
+ p->iCoord = 0;
+ p->u.rValue = bbox[0].f;
+ p++;
+ p->op = 'B';
+ p->iCoord = 1;
+ p->u.rValue = bbox[1].f;
+ p++;
+ p->op = 'D';
+ p->iCoord = 2;
+ p->u.rValue = bbox[2].f;
+ p++;
+ p->op = 'B';
+ p->iCoord = 3;
+ p->u.rValue = bbox[3].f;
+ }
+ }
+ }
+ if( rc==SQLITE_OK ){
+ RtreeSearchPoint *pNew;
+ pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1));
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ goto geopoly_filter_end;
+ }
+ pNew->id = 1;
+ pNew->iCell = 0;
+ pNew->eWithin = PARTLY_WITHIN;
+ assert( pCsr->bPoint==1 );
+ pCsr->aNode[0] = pRoot;
+ pRoot = 0;
+ RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:");
+ rc = rtreeStepToLeaf(pCsr);
+ }
+ }
+
+geopoly_filter_end:
+ nodeRelease(pRtree, pRoot);
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+/*
+** Rtree virtual table module xBestIndex method. There are three
+** table scan strategies to choose from (in order from most to
+** least desirable):
+**
+** idxNum idxStr Strategy
+** ------------------------------------------------
+** 1 "rowid" Direct lookup by rowid.
+** 2 "rtree" R-tree overlap query using geopoly_overlap()
+** 3 "rtree" R-tree within query using geopoly_within()
+** 4 "fullscan" full-table scan.
+** ------------------------------------------------
+*/
+static int geopolyBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+ int ii;
+ int iRowidTerm = -1;
+ int iFuncTerm = -1;
+ int idxNum = 0;
+
+ for(ii=0; ii<pIdxInfo->nConstraint; ii++){
+ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];
+ if( !p->usable ) continue;
+ if( p->iColumn<0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){
+ iRowidTerm = ii;
+ break;
+ }
+ if( p->iColumn==0 && p->op>=SQLITE_INDEX_CONSTRAINT_FUNCTION ){
+ /* p->op==SQLITE_INDEX_CONSTRAINT_FUNCTION for geopoly_overlap()
+ ** p->op==(SQLITE_INDEX_CONTRAINT_FUNCTION+1) for geopoly_within().
+ ** See geopolyFindFunction() */
+ iFuncTerm = ii;
+ idxNum = p->op - SQLITE_INDEX_CONSTRAINT_FUNCTION + 2;
+ }
+ }
+
+ if( iRowidTerm>=0 ){
+ pIdxInfo->idxNum = 1;
+ pIdxInfo->idxStr = "rowid";
+ pIdxInfo->aConstraintUsage[iRowidTerm].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[iRowidTerm].omit = 1;
+ pIdxInfo->estimatedCost = 30.0;
+ pIdxInfo->estimatedRows = 1;
+ pIdxInfo->idxFlags = SQLITE_INDEX_SCAN_UNIQUE;
+ return SQLITE_OK;
+ }
+ if( iFuncTerm>=0 ){
+ pIdxInfo->idxNum = idxNum;
+ pIdxInfo->idxStr = "rtree";
+ pIdxInfo->aConstraintUsage[iFuncTerm].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[iFuncTerm].omit = 0;
+ pIdxInfo->estimatedCost = 300.0;
+ pIdxInfo->estimatedRows = 10;
+ return SQLITE_OK;
+ }
+ pIdxInfo->idxNum = 4;
+ pIdxInfo->idxStr = "fullscan";
+ pIdxInfo->estimatedCost = 3000000.0;
+ pIdxInfo->estimatedRows = 100000;
+ return SQLITE_OK;
+}
+
+
+/*
+** GEOPOLY virtual table module xColumn method.
+*/
+static int geopolyColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
+ Rtree *pRtree = (Rtree *)cur->pVtab;
+ RtreeCursor *pCsr = (RtreeCursor *)cur;
+ RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
+ int rc = SQLITE_OK;
+ RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
+
+ if( rc ) return rc;
+ if( p==0 ) return SQLITE_OK;
+ if( i==0 && sqlite3_vtab_nochange(ctx) ) return SQLITE_OK;
+ if( i<=pRtree->nAux ){
+ if( !pCsr->bAuxValid ){
+ if( pCsr->pReadAux==0 ){
+ rc = sqlite3_prepare_v3(pRtree->db, pRtree->zReadAuxSql, -1, 0,
+ &pCsr->pReadAux, 0);
+ if( rc ) return rc;
+ }
+ sqlite3_bind_int64(pCsr->pReadAux, 1,
+ nodeGetRowid(pRtree, pNode, p->iCell));
+ rc = sqlite3_step(pCsr->pReadAux);
+ if( rc==SQLITE_ROW ){
+ pCsr->bAuxValid = 1;
+ }else{
+ sqlite3_reset(pCsr->pReadAux);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ return rc;
+ }
+ }
+ sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pReadAux, i+2));
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** The xUpdate method for GEOPOLY module virtual tables.
+**
+** For DELETE:
+**
+** argv[0] = the rowid to be deleted
+**
+** For INSERT:
+**
+** argv[0] = SQL NULL
+** argv[1] = rowid to insert, or an SQL NULL to select automatically
+** argv[2] = _shape column
+** argv[3] = first application-defined column....
+**
+** For UPDATE:
+**
+** argv[0] = rowid to modify. Never NULL
+** argv[1] = rowid after the change. Never NULL
+** argv[2] = new value for _shape
+** argv[3] = new value for first application-defined column....
+*/
+static int geopolyUpdate(
+ sqlite3_vtab *pVtab,
+ int nData,
+ sqlite3_value **aData,
+ sqlite_int64 *pRowid
+){
+ Rtree *pRtree = (Rtree *)pVtab;
+ int rc = SQLITE_OK;
+ RtreeCell cell; /* New cell to insert if nData>1 */
+ i64 oldRowid; /* The old rowid */
+ int oldRowidValid; /* True if oldRowid is valid */
+ i64 newRowid; /* The new rowid */
+ int newRowidValid; /* True if newRowid is valid */
+ int coordChange = 0; /* Change in coordinates */
+
+ if( pRtree->nNodeRef ){
+ /* Unable to write to the btree while another cursor is reading from it,
+ ** since the write might do a rebalance which would disrupt the read
+ ** cursor. */
+ return SQLITE_LOCKED_VTAB;
+ }
+ rtreeReference(pRtree);
+ assert(nData>=1);
+
+ oldRowidValid = sqlite3_value_type(aData[0])!=SQLITE_NULL;;
+ oldRowid = oldRowidValid ? sqlite3_value_int64(aData[0]) : 0;
+ newRowidValid = nData>1 && sqlite3_value_type(aData[1])!=SQLITE_NULL;
+ newRowid = newRowidValid ? sqlite3_value_int64(aData[1]) : 0;
+ cell.iRowid = newRowid;
+
+ if( nData>1 /* not a DELETE */
+ && (!oldRowidValid /* INSERT */
+ || !sqlite3_value_nochange(aData[2]) /* UPDATE _shape */
+ || oldRowid!=newRowid) /* Rowid change */
+ ){
+ geopolyBBox(0, aData[2], cell.aCoord, &rc);
+ if( rc ){
+ if( rc==SQLITE_ERROR ){
+ pVtab->zErrMsg =
+ sqlite3_mprintf("_shape does not contain a valid polygon");
+ }
+ goto geopoly_update_end;
+ }
+ coordChange = 1;
+
+ /* If a rowid value was supplied, check if it is already present in
+ ** the table. If so, the constraint has failed. */
+ if( newRowidValid && (!oldRowidValid || oldRowid!=newRowid) ){
+ int steprc;
+ sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid);
+ steprc = sqlite3_step(pRtree->pReadRowid);
+ rc = sqlite3_reset(pRtree->pReadRowid);
+ if( SQLITE_ROW==steprc ){
+ if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){
+ rc = rtreeDeleteRowid(pRtree, cell.iRowid);
+ }else{
+ rc = rtreeConstraintError(pRtree, 0);
+ }
+ }
+ }
+ }
+
+ /* If aData[0] is not an SQL NULL value, it is the rowid of a
+ ** record to delete from the r-tree table. The following block does
+ ** just that.
+ */
+ if( rc==SQLITE_OK && (nData==1 || (coordChange && oldRowidValid)) ){
+ rc = rtreeDeleteRowid(pRtree, oldRowid);
+ }
+
+ /* If the aData[] array contains more than one element, elements
+ ** (aData[2]..aData[argc-1]) contain a new record to insert into
+ ** the r-tree structure.
+ */
+ if( rc==SQLITE_OK && nData>1 && coordChange ){
+ /* Insert the new record into the r-tree */
+ RtreeNode *pLeaf = 0;
+ if( !newRowidValid ){
+ rc = rtreeNewRowid(pRtree, &cell.iRowid);
+ }
+ *pRowid = cell.iRowid;
+ if( rc==SQLITE_OK ){
+ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf);
+ }
+ if( rc==SQLITE_OK ){
+ int rc2;
+ pRtree->iReinsertHeight = -1;
+ rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
+ rc2 = nodeRelease(pRtree, pLeaf);
+ if( rc==SQLITE_OK ){
+ rc = rc2;
+ }
+ }
+ }
+
+ /* Change the data */
+ if( rc==SQLITE_OK && nData>1 ){
+ sqlite3_stmt *pUp = pRtree->pWriteAux;
+ int jj;
+ int nChange = 0;
+ sqlite3_bind_int64(pUp, 1, cell.iRowid);
+ assert( pRtree->nAux>=1 );
+ if( sqlite3_value_nochange(aData[2]) ){
+ sqlite3_bind_null(pUp, 2);
+ }else{
+ GeoPoly *p = 0;
+ if( sqlite3_value_type(aData[2])==SQLITE_TEXT
+ && (p = geopolyFuncParam(0, aData[2], &rc))!=0
+ && rc==SQLITE_OK
+ ){
+ sqlite3_bind_blob(pUp, 2, p->hdr, 4+8*p->nVertex, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_bind_value(pUp, 2, aData[2]);
+ }
+ sqlite3_free(p);
+ nChange = 1;
+ }
+ for(jj=1; jj<pRtree->nAux; jj++){
+ nChange++;
+ sqlite3_bind_value(pUp, jj+2, aData[jj+2]);
+ }
+ if( nChange ){
+ sqlite3_step(pUp);
+ rc = sqlite3_reset(pUp);
+ }
+ }
+
+geopoly_update_end:
+ rtreeRelease(pRtree);
+ return rc;
+}
+
+/*
+** Report that geopoly_overlap() is an overloaded function suitable
+** for use in xBestIndex.
+*/
+static int geopolyFindFunction(
+ sqlite3_vtab *pVtab,
+ int nArg,
+ const char *zName,
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
+ void **ppArg
+){
+ if( sqlite3_stricmp(zName, "geopoly_overlap")==0 ){
+ *pxFunc = geopolyOverlapFunc;
+ *ppArg = 0;
+ return SQLITE_INDEX_CONSTRAINT_FUNCTION;
+ }
+ if( sqlite3_stricmp(zName, "geopoly_within")==0 ){
+ *pxFunc = geopolyWithinFunc;
+ *ppArg = 0;
+ return SQLITE_INDEX_CONSTRAINT_FUNCTION+1;
+ }
+ return 0;
+}
+
+
+static sqlite3_module geopolyModule = {
+ 3, /* iVersion */
+ geopolyCreate, /* xCreate - create a table */
+ geopolyConnect, /* xConnect - connect to an existing table */
+ geopolyBestIndex, /* xBestIndex - Determine search strategy */
+ rtreeDisconnect, /* xDisconnect - Disconnect from a table */
+ rtreeDestroy, /* xDestroy - Drop a table */
+ rtreeOpen, /* xOpen - open a cursor */
+ rtreeClose, /* xClose - close a cursor */
+ geopolyFilter, /* xFilter - configure scan constraints */
+ rtreeNext, /* xNext - advance a cursor */
+ rtreeEof, /* xEof */
+ geopolyColumn, /* xColumn - read data */
+ rtreeRowid, /* xRowid - read data */
+ geopolyUpdate, /* xUpdate - write data */
+ rtreeBeginTransaction, /* xBegin - begin transaction */
+ rtreeEndTransaction, /* xSync - sync transaction */
+ rtreeEndTransaction, /* xCommit - commit transaction */
+ rtreeEndTransaction, /* xRollback - rollback transaction */
+ geopolyFindFunction, /* xFindFunction - function overloading */
+ rtreeRename, /* xRename - rename the table */
+ rtreeSavepoint, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ rtreeShadowName /* xShadowName */
+};
+
+static int sqlite3_geopoly_init(sqlite3 *db){
+ int rc = SQLITE_OK;
+ static const struct {
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
+ signed char nArg;
+ unsigned char bPure;
+ const char *zName;
+ } aFunc[] = {
+ { geopolyAreaFunc, 1, 1, "geopoly_area" },
+ { geopolyBlobFunc, 1, 1, "geopoly_blob" },
+ { geopolyJsonFunc, 1, 1, "geopoly_json" },
+ { geopolySvgFunc, -1, 1, "geopoly_svg" },
+ { geopolyWithinFunc, 2, 1, "geopoly_within" },
+ { geopolyContainsPointFunc, 3, 1, "geopoly_contains_point" },
+ { geopolyOverlapFunc, 2, 1, "geopoly_overlap" },
+ { geopolyDebugFunc, 1, 0, "geopoly_debug" },
+ { geopolyBBoxFunc, 1, 1, "geopoly_bbox" },
+ { geopolyXformFunc, 7, 1, "geopoly_xform" },
+ { geopolyRegularFunc, 4, 1, "geopoly_regular" },
+ { geopolyCcwFunc, 1, 1, "geopoly_ccw" },
+ };
+ static const struct {
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**);
+ void (*xFinal)(sqlite3_context*);
+ const char *zName;
+ } aAgg[] = {
+ { geopolyBBoxStep, geopolyBBoxFinal, "geopoly_group_bbox" },
+ };
+ int i;
+ for(i=0; i<sizeof(aFunc)/sizeof(aFunc[0]) && rc==SQLITE_OK; i++){
+ int enc;
+ if( aFunc[i].bPure ){
+ enc = SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS;
+ }else{
+ enc = SQLITE_UTF8|SQLITE_DIRECTONLY;
+ }
+ rc = sqlite3_create_function(db, aFunc[i].zName, aFunc[i].nArg,
+ enc, 0,
+ aFunc[i].xFunc, 0, 0);
+ }
+ for(i=0; i<sizeof(aAgg)/sizeof(aAgg[0]) && rc==SQLITE_OK; i++){
+ rc = sqlite3_create_function(db, aAgg[i].zName, 1,
+ SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, 0,
+ 0, aAgg[i].xStep, aAgg[i].xFinal);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_module_v2(db, "geopoly", &geopolyModule, 0, 0);
+ }
+ return rc;
+}
+
+/************** End of geopoly.c *********************************************/
+/************** Continuing where we left off in rtree.c **********************/
+#endif
+
+/*
+** Register the r-tree module with database handle db. This creates the
+** virtual table module "rtree" and the debugging/analysis scalar
+** function "rtreenode".
+*/
+SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db){
+ const int utf8 = SQLITE_UTF8;
+ int rc;
+
+ rc = sqlite3_create_function(db, "rtreenode", 2, utf8, 0, rtreenode, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "rtreedepth", 1, utf8, 0,rtreedepth, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "rtreecheck", -1, utf8, 0,rtreecheck, 0,0);
+ }
+ if( rc==SQLITE_OK ){
+#ifdef SQLITE_RTREE_INT_ONLY
+ void *c = (void *)RTREE_COORD_INT32;
+#else
+ void *c = (void *)RTREE_COORD_REAL32;
+#endif
+ rc = sqlite3_create_module_v2(db, "rtree", &rtreeModule, c, 0);
+ }
+ if( rc==SQLITE_OK ){
+ void *c = (void *)RTREE_COORD_INT32;
+ rc = sqlite3_create_module_v2(db, "rtree_i32", &rtreeModule, c, 0);
+ }
+#ifdef SQLITE_ENABLE_GEOPOLY
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_geopoly_init(db);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** This routine deletes the RtreeGeomCallback object that was attached
+** one of the SQL functions create by sqlite3_rtree_geometry_callback()
+** or sqlite3_rtree_query_callback(). In other words, this routine is the
+** destructor for an RtreeGeomCallback objecct. This routine is called when
+** the corresponding SQL function is deleted.
+*/
+static void rtreeFreeCallback(void *p){
+ RtreeGeomCallback *pInfo = (RtreeGeomCallback*)p;
+ if( pInfo->xDestructor ) pInfo->xDestructor(pInfo->pContext);
+ sqlite3_free(p);
+}
+
+/*
+** This routine frees the BLOB that is returned by geomCallback().
+*/
+static void rtreeMatchArgFree(void *pArg){
+ int i;
+ RtreeMatchArg *p = (RtreeMatchArg*)pArg;
+ for(i=0; i<p->nParam; i++){
+ sqlite3_value_free(p->apSqlParam[i]);
+ }
+ sqlite3_free(p);
+}
+
+/*
+** Each call to sqlite3_rtree_geometry_callback() or
+** sqlite3_rtree_query_callback() creates an ordinary SQLite
+** scalar function that is implemented by this routine.
+**
+** All this function does is construct an RtreeMatchArg object that
+** contains the geometry-checking callback routines and a list of
+** parameters to this function, then return that RtreeMatchArg object
+** as a BLOB.
+**
+** The R-Tree MATCH operator will read the returned BLOB, deserialize
+** the RtreeMatchArg object, and use the RtreeMatchArg object to figure
+** out which elements of the R-Tree should be returned by the query.
+*/
+static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){
+ RtreeGeomCallback *pGeomCtx = (RtreeGeomCallback *)sqlite3_user_data(ctx);
+ RtreeMatchArg *pBlob;
+ sqlite3_int64 nBlob;
+ int memErr = 0;
+
+ nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue)
+ + nArg*sizeof(sqlite3_value*);
+ pBlob = (RtreeMatchArg *)sqlite3_malloc64(nBlob);
+ if( !pBlob ){
+ sqlite3_result_error_nomem(ctx);
+ }else{
+ int i;
+ pBlob->iSize = nBlob;
+ pBlob->cb = pGeomCtx[0];
+ pBlob->apSqlParam = (sqlite3_value**)&pBlob->aParam[nArg];
+ pBlob->nParam = nArg;
+ for(i=0; i<nArg; i++){
+ pBlob->apSqlParam[i] = sqlite3_value_dup(aArg[i]);
+ if( pBlob->apSqlParam[i]==0 ) memErr = 1;
+#ifdef SQLITE_RTREE_INT_ONLY
+ pBlob->aParam[i] = sqlite3_value_int64(aArg[i]);
+#else
+ pBlob->aParam[i] = sqlite3_value_double(aArg[i]);
+#endif
+ }
+ if( memErr ){
+ sqlite3_result_error_nomem(ctx);
+ rtreeMatchArgFree(pBlob);
+ }else{
+ sqlite3_result_pointer(ctx, pBlob, "RtreeMatchArg", rtreeMatchArgFree);
+ }
+ }
+}
+
+/*
+** Register a new geometry function for use with the r-tree MATCH operator.
+*/
+SQLITE_API int sqlite3_rtree_geometry_callback(
+ sqlite3 *db, /* Register SQL function on this connection */
+ const char *zGeom, /* Name of the new SQL function */
+ int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */
+ void *pContext /* Extra data associated with the callback */
+){
+ RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */
+
+ /* Allocate and populate the context object. */
+ pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback));
+ if( !pGeomCtx ) return SQLITE_NOMEM;
+ pGeomCtx->xGeom = xGeom;
+ pGeomCtx->xQueryFunc = 0;
+ pGeomCtx->xDestructor = 0;
+ pGeomCtx->pContext = pContext;
+ return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY,
+ (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback
+ );
+}
+
+/*
+** Register a new 2nd-generation geometry function for use with the
+** r-tree MATCH operator.
+*/
+SQLITE_API int sqlite3_rtree_query_callback(
+ sqlite3 *db, /* Register SQL function on this connection */
+ const char *zQueryFunc, /* Name of new SQL function */
+ int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */
+ void *pContext, /* Extra data passed into the callback */
+ void (*xDestructor)(void*) /* Destructor for the extra data */
+){
+ RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */
+
+ /* Allocate and populate the context object. */
+ pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback));
+ if( !pGeomCtx ) return SQLITE_NOMEM;
+ pGeomCtx->xGeom = 0;
+ pGeomCtx->xQueryFunc = xQueryFunc;
+ pGeomCtx->xDestructor = xDestructor;
+ pGeomCtx->pContext = pContext;
+ return sqlite3_create_function_v2(db, zQueryFunc, -1, SQLITE_ANY,
+ (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback
+ );
+}
+
+#if !SQLITE_CORE
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_rtree_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi)
+ return sqlite3RtreeInit(db);
+}
+#endif
+
+#endif
+
+/************** End of rtree.c ***********************************************/
+/************** Begin file icu.c *********************************************/
+/*
+** 2007 May 6
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $
+**
+** This file implements an integration between the ICU library
+** ("International Components for Unicode", an open-source library
+** for handling unicode data) and SQLite. The integration uses
+** ICU to provide the following to SQLite:
+**
+** * An implementation of the SQL regexp() function (and hence REGEXP
+** operator) using the ICU uregex_XX() APIs.
+**
+** * Implementations of the SQL scalar upper() and lower() functions
+** for case mapping.
+**
+** * Integration of ICU and SQLite collation sequences.
+**
+** * An implementation of the LIKE operator that uses ICU to
+** provide case-independent matching.
+*/
+
+#if !defined(SQLITE_CORE) \
+ || defined(SQLITE_ENABLE_ICU) \
+ || defined(SQLITE_ENABLE_ICU_COLLATIONS)
+
+/* Include ICU headers */
+#include <unicode/utypes.h>
+#include <unicode/uregex.h>
+#include <unicode/ustring.h>
+#include <unicode/ucol.h>
+
+/* #include <assert.h> */
+
+#ifndef SQLITE_CORE
+/* #include "sqlite3ext.h" */
+ SQLITE_EXTENSION_INIT1
+#else
+/* #include "sqlite3.h" */
+#endif
+
+/*
+** This function is called when an ICU function called from within
+** the implementation of an SQL scalar function returns an error.
+**
+** The scalar function context passed as the first argument is
+** loaded with an error message based on the following two args.
+*/
+static void icuFunctionError(
+ sqlite3_context *pCtx, /* SQLite scalar function context */
+ const char *zName, /* Name of ICU function that failed */
+ UErrorCode e /* Error code returned by ICU function */
+){
+ char zBuf[128];
+ sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e));
+ zBuf[127] = '\0';
+ sqlite3_result_error(pCtx, zBuf, -1);
+}
+
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU)
+
+/*
+** Maximum length (in bytes) of the pattern in a LIKE or GLOB
+** operator.
+*/
+#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH
+# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000
+#endif
+
+/*
+** Version of sqlite3_free() that is always a function, never a macro.
+*/
+static void xFree(void *p){
+ sqlite3_free(p);
+}
+
+/*
+** This lookup table is used to help decode the first byte of
+** a multi-byte UTF8 character. It is copied here from SQLite source
+** code file utf8.c.
+*/
+static const unsigned char icuUtf8Trans1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00,
+};
+
+#define SQLITE_ICU_READ_UTF8(zIn, c) \
+ c = *(zIn++); \
+ if( c>=0xc0 ){ \
+ c = icuUtf8Trans1[c-0xc0]; \
+ while( (*zIn & 0xc0)==0x80 ){ \
+ c = (c<<6) + (0x3f & *(zIn++)); \
+ } \
+ }
+
+#define SQLITE_ICU_SKIP_UTF8(zIn) \
+ assert( *zIn ); \
+ if( *(zIn++)>=0xc0 ){ \
+ while( (*zIn & 0xc0)==0x80 ){zIn++;} \
+ }
+
+
+/*
+** Compare two UTF-8 strings for equality where the first string is
+** a "LIKE" expression. Return true (1) if they are the same and
+** false (0) if they are different.
+*/
+static int icuLikeCompare(
+ const uint8_t *zPattern, /* LIKE pattern */
+ const uint8_t *zString, /* The UTF-8 string to compare against */
+ const UChar32 uEsc /* The escape character */
+){
+ static const uint32_t MATCH_ONE = (uint32_t)'_';
+ static const uint32_t MATCH_ALL = (uint32_t)'%';
+
+ int prevEscape = 0; /* True if the previous character was uEsc */
+
+ while( 1 ){
+
+ /* Read (and consume) the next character from the input pattern. */
+ uint32_t uPattern;
+ SQLITE_ICU_READ_UTF8(zPattern, uPattern);
+ if( uPattern==0 ) break;
+
+ /* There are now 4 possibilities:
+ **
+ ** 1. uPattern is an unescaped match-all character "%",
+ ** 2. uPattern is an unescaped match-one character "_",
+ ** 3. uPattern is an unescaped escape character, or
+ ** 4. uPattern is to be handled as an ordinary character
+ */
+ if( uPattern==MATCH_ALL && !prevEscape && uPattern!=(uint32_t)uEsc ){
+ /* Case 1. */
+ uint8_t c;
+
+ /* Skip any MATCH_ALL or MATCH_ONE characters that follow a
+ ** MATCH_ALL. For each MATCH_ONE, skip one character in the
+ ** test string.
+ */
+ while( (c=*zPattern) == MATCH_ALL || c == MATCH_ONE ){
+ if( c==MATCH_ONE ){
+ if( *zString==0 ) return 0;
+ SQLITE_ICU_SKIP_UTF8(zString);
+ }
+ zPattern++;
+ }
+
+ if( *zPattern==0 ) return 1;
+
+ while( *zString ){
+ if( icuLikeCompare(zPattern, zString, uEsc) ){
+ return 1;
+ }
+ SQLITE_ICU_SKIP_UTF8(zString);
+ }
+ return 0;
+
+ }else if( uPattern==MATCH_ONE && !prevEscape && uPattern!=(uint32_t)uEsc ){
+ /* Case 2. */
+ if( *zString==0 ) return 0;
+ SQLITE_ICU_SKIP_UTF8(zString);
+
+ }else if( uPattern==(uint32_t)uEsc && !prevEscape ){
+ /* Case 3. */
+ prevEscape = 1;
+
+ }else{
+ /* Case 4. */
+ uint32_t uString;
+ SQLITE_ICU_READ_UTF8(zString, uString);
+ uString = (uint32_t)u_foldCase((UChar32)uString, U_FOLD_CASE_DEFAULT);
+ uPattern = (uint32_t)u_foldCase((UChar32)uPattern, U_FOLD_CASE_DEFAULT);
+ if( uString!=uPattern ){
+ return 0;
+ }
+ prevEscape = 0;
+ }
+ }
+
+ return *zString==0;
+}
+
+/*
+** Implementation of the like() SQL function. This function implements
+** the build-in LIKE operator. The first argument to the function is the
+** pattern and the second argument is the string. So, the SQL statements:
+**
+** A LIKE B
+**
+** is implemented as like(B, A). If there is an escape character E,
+**
+** A LIKE B ESCAPE E
+**
+** is mapped to like(B, A, E).
+*/
+static void icuLikeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const unsigned char *zA = sqlite3_value_text(argv[0]);
+ const unsigned char *zB = sqlite3_value_text(argv[1]);
+ UChar32 uEsc = 0;
+
+ /* Limit the length of the LIKE or GLOB pattern to avoid problems
+ ** of deep recursion and N*N behavior in patternCompare().
+ */
+ if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){
+ sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1);
+ return;
+ }
+
+
+ if( argc==3 ){
+ /* The escape character string must consist of a single UTF-8 character.
+ ** Otherwise, return an error.
+ */
+ int nE= sqlite3_value_bytes(argv[2]);
+ const unsigned char *zE = sqlite3_value_text(argv[2]);
+ int i = 0;
+ if( zE==0 ) return;
+ U8_NEXT(zE, i, nE, uEsc);
+ if( i!=nE){
+ sqlite3_result_error(context,
+ "ESCAPE expression must be a single character", -1);
+ return;
+ }
+ }
+
+ if( zA && zB ){
+ sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc));
+ }
+}
+
+/*
+** Function to delete compiled regexp objects. Registered as
+** a destructor function with sqlite3_set_auxdata().
+*/
+static void icuRegexpDelete(void *p){
+ URegularExpression *pExpr = (URegularExpression *)p;
+ uregex_close(pExpr);
+}
+
+/*
+** Implementation of SQLite REGEXP operator. This scalar function takes
+** two arguments. The first is a regular expression pattern to compile
+** the second is a string to match against that pattern. If either
+** argument is an SQL NULL, then NULL Is returned. Otherwise, the result
+** is 1 if the string matches the pattern, or 0 otherwise.
+**
+** SQLite maps the regexp() function to the regexp() operator such
+** that the following two are equivalent:
+**
+** zString REGEXP zPattern
+** regexp(zPattern, zString)
+**
+** Uses the following ICU regexp APIs:
+**
+** uregex_open()
+** uregex_matches()
+** uregex_close()
+*/
+static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){
+ UErrorCode status = U_ZERO_ERROR;
+ URegularExpression *pExpr;
+ UBool res;
+ const UChar *zString = sqlite3_value_text16(apArg[1]);
+
+ (void)nArg; /* Unused parameter */
+
+ /* If the left hand side of the regexp operator is NULL,
+ ** then the result is also NULL.
+ */
+ if( !zString ){
+ return;
+ }
+
+ pExpr = sqlite3_get_auxdata(p, 0);
+ if( !pExpr ){
+ const UChar *zPattern = sqlite3_value_text16(apArg[0]);
+ if( !zPattern ){
+ return;
+ }
+ pExpr = uregex_open(zPattern, -1, 0, 0, &status);
+
+ if( U_SUCCESS(status) ){
+ sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete);
+ }else{
+ assert(!pExpr);
+ icuFunctionError(p, "uregex_open", status);
+ return;
+ }
+ }
+
+ /* Configure the text that the regular expression operates on. */
+ uregex_setText(pExpr, zString, -1, &status);
+ if( !U_SUCCESS(status) ){
+ icuFunctionError(p, "uregex_setText", status);
+ return;
+ }
+
+ /* Attempt the match */
+ res = uregex_matches(pExpr, 0, &status);
+ if( !U_SUCCESS(status) ){
+ icuFunctionError(p, "uregex_matches", status);
+ return;
+ }
+
+ /* Set the text that the regular expression operates on to a NULL
+ ** pointer. This is not really necessary, but it is tidier than
+ ** leaving the regular expression object configured with an invalid
+ ** pointer after this function returns.
+ */
+ uregex_setText(pExpr, 0, 0, &status);
+
+ /* Return 1 or 0. */
+ sqlite3_result_int(p, res ? 1 : 0);
+}
+
+/*
+** Implementations of scalar functions for case mapping - upper() and
+** lower(). Function upper() converts its input to upper-case (ABC).
+** Function lower() converts to lower-case (abc).
+**
+** ICU provides two types of case mapping, "general" case mapping and
+** "language specific". Refer to ICU documentation for the differences
+** between the two.
+**
+** To utilise "general" case mapping, the upper() or lower() scalar
+** functions are invoked with one argument:
+**
+** upper('ABC') -> 'abc'
+** lower('abc') -> 'ABC'
+**
+** To access ICU "language specific" case mapping, upper() or lower()
+** should be invoked with two arguments. The second argument is the name
+** of the locale to use. Passing an empty string ("") or SQL NULL value
+** as the second argument is the same as invoking the 1 argument version
+** of upper() or lower().
+**
+** lower('I', 'en_us') -> 'i'
+** lower('I', 'tr_tr') -> '\u131' (small dotless i)
+**
+** http://www.icu-project.org/userguide/posix.html#case_mappings
+*/
+static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){
+ const UChar *zInput; /* Pointer to input string */
+ UChar *zOutput = 0; /* Pointer to output buffer */
+ int nInput; /* Size of utf-16 input string in bytes */
+ int nOut; /* Size of output buffer in bytes */
+ int cnt;
+ int bToUpper; /* True for toupper(), false for tolower() */
+ UErrorCode status;
+ const char *zLocale = 0;
+
+ assert(nArg==1 || nArg==2);
+ bToUpper = (sqlite3_user_data(p)!=0);
+ if( nArg==2 ){
+ zLocale = (const char *)sqlite3_value_text(apArg[1]);
+ }
+
+ zInput = sqlite3_value_text16(apArg[0]);
+ if( !zInput ){
+ return;
+ }
+ nOut = nInput = sqlite3_value_bytes16(apArg[0]);
+ if( nOut==0 ){
+ sqlite3_result_text16(p, "", 0, SQLITE_STATIC);
+ return;
+ }
+
+ for(cnt=0; cnt<2; cnt++){
+ UChar *zNew = sqlite3_realloc(zOutput, nOut);
+ if( zNew==0 ){
+ sqlite3_free(zOutput);
+ sqlite3_result_error_nomem(p);
+ return;
+ }
+ zOutput = zNew;
+ status = U_ZERO_ERROR;
+ if( bToUpper ){
+ nOut = 2*u_strToUpper(zOutput,nOut/2,zInput,nInput/2,zLocale,&status);
+ }else{
+ nOut = 2*u_strToLower(zOutput,nOut/2,zInput,nInput/2,zLocale,&status);
+ }
+
+ if( U_SUCCESS(status) ){
+ sqlite3_result_text16(p, zOutput, nOut, xFree);
+ }else if( status==U_BUFFER_OVERFLOW_ERROR ){
+ assert( cnt==0 );
+ continue;
+ }else{
+ icuFunctionError(p, bToUpper ? "u_strToUpper" : "u_strToLower", status);
+ }
+ return;
+ }
+ assert( 0 ); /* Unreachable */
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) */
+
+/*
+** Collation sequence destructor function. The pCtx argument points to
+** a UCollator structure previously allocated using ucol_open().
+*/
+static void icuCollationDel(void *pCtx){
+ UCollator *p = (UCollator *)pCtx;
+ ucol_close(p);
+}
+
+/*
+** Collation sequence comparison function. The pCtx argument points to
+** a UCollator structure previously allocated using ucol_open().
+*/
+static int icuCollationColl(
+ void *pCtx,
+ int nLeft,
+ const void *zLeft,
+ int nRight,
+ const void *zRight
+){
+ UCollationResult res;
+ UCollator *p = (UCollator *)pCtx;
+ res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2);
+ switch( res ){
+ case UCOL_LESS: return -1;
+ case UCOL_GREATER: return +1;
+ case UCOL_EQUAL: return 0;
+ }
+ assert(!"Unexpected return value from ucol_strcoll()");
+ return 0;
+}
+
+/*
+** Implementation of the scalar function icu_load_collation().
+**
+** This scalar function is used to add ICU collation based collation
+** types to an SQLite database connection. It is intended to be called
+** as follows:
+**
+** SELECT icu_load_collation(<locale>, <collation-name>);
+**
+** Where <locale> is a string containing an ICU locale identifier (i.e.
+** "en_AU", "tr_TR" etc.) and <collation-name> is the name of the
+** collation sequence to create.
+*/
+static void icuLoadCollation(
+ sqlite3_context *p,
+ int nArg,
+ sqlite3_value **apArg
+){
+ sqlite3 *db = (sqlite3 *)sqlite3_user_data(p);
+ UErrorCode status = U_ZERO_ERROR;
+ const char *zLocale; /* Locale identifier - (eg. "jp_JP") */
+ const char *zName; /* SQL Collation sequence name (eg. "japanese") */
+ UCollator *pUCollator; /* ICU library collation object */
+ int rc; /* Return code from sqlite3_create_collation_x() */
+
+ assert(nArg==2);
+ (void)nArg; /* Unused parameter */
+ zLocale = (const char *)sqlite3_value_text(apArg[0]);
+ zName = (const char *)sqlite3_value_text(apArg[1]);
+
+ if( !zLocale || !zName ){
+ return;
+ }
+
+ pUCollator = ucol_open(zLocale, &status);
+ if( !U_SUCCESS(status) ){
+ icuFunctionError(p, "ucol_open", status);
+ return;
+ }
+ assert(p);
+
+ rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator,
+ icuCollationColl, icuCollationDel
+ );
+ if( rc!=SQLITE_OK ){
+ ucol_close(pUCollator);
+ sqlite3_result_error(p, "Error registering collation function", -1);
+ }
+}
+
+/*
+** Register the ICU extension functions with database db.
+*/
+SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){
+# define SQLITEICU_EXTRAFLAGS (SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS)
+ static const struct IcuScalar {
+ const char *zName; /* Function name */
+ unsigned char nArg; /* Number of arguments */
+ unsigned int enc; /* Optimal text encoding */
+ unsigned char iContext; /* sqlite3_user_data() context */
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
+ } scalars[] = {
+ {"icu_load_collation",2,SQLITE_UTF8|SQLITE_DIRECTONLY,1, icuLoadCollation},
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU)
+ {"regexp", 2, SQLITE_ANY|SQLITEICU_EXTRAFLAGS, 0, icuRegexpFunc},
+ {"lower", 1, SQLITE_UTF16|SQLITEICU_EXTRAFLAGS, 0, icuCaseFunc16},
+ {"lower", 2, SQLITE_UTF16|SQLITEICU_EXTRAFLAGS, 0, icuCaseFunc16},
+ {"upper", 1, SQLITE_UTF16|SQLITEICU_EXTRAFLAGS, 1, icuCaseFunc16},
+ {"upper", 2, SQLITE_UTF16|SQLITEICU_EXTRAFLAGS, 1, icuCaseFunc16},
+ {"lower", 1, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 0, icuCaseFunc16},
+ {"lower", 2, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 0, icuCaseFunc16},
+ {"upper", 1, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 1, icuCaseFunc16},
+ {"upper", 2, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 1, icuCaseFunc16},
+ {"like", 2, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 0, icuLikeFunc},
+ {"like", 3, SQLITE_UTF8|SQLITEICU_EXTRAFLAGS, 0, icuLikeFunc},
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) */
+ };
+ int rc = SQLITE_OK;
+ int i;
+
+ for(i=0; rc==SQLITE_OK && i<(int)(sizeof(scalars)/sizeof(scalars[0])); i++){
+ const struct IcuScalar *p = &scalars[i];
+ rc = sqlite3_create_function(
+ db, p->zName, p->nArg, p->enc,
+ p->iContext ? (void*)db : (void*)0,
+ p->xFunc, 0, 0
+ );
+ }
+
+ return rc;
+}
+
+#if !SQLITE_CORE
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_icu_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi)
+ return sqlite3IcuInit(db);
+}
+#endif
+
+#endif
+
+/************** End of icu.c *************************************************/
+/************** Begin file fts3_icu.c ****************************************/
+/*
+** 2007 June 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file implements a tokenizer for fts3 based on the ICU library.
+*/
+/* #include "fts3Int.h" */
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+#ifdef SQLITE_ENABLE_ICU
+
+/* #include <assert.h> */
+/* #include <string.h> */
+/* #include "fts3_tokenizer.h" */
+
+#include <unicode/ubrk.h>
+/* #include <unicode/ucol.h> */
+/* #include <unicode/ustring.h> */
+#include <unicode/utf16.h>
+
+typedef struct IcuTokenizer IcuTokenizer;
+typedef struct IcuCursor IcuCursor;
+
+struct IcuTokenizer {
+ sqlite3_tokenizer base;
+ char *zLocale;
+};
+
+struct IcuCursor {
+ sqlite3_tokenizer_cursor base;
+
+ UBreakIterator *pIter; /* ICU break-iterator object */
+ int nChar; /* Number of UChar elements in pInput */
+ UChar *aChar; /* Copy of input using utf-16 encoding */
+ int *aOffset; /* Offsets of each character in utf-8 input */
+
+ int nBuffer;
+ char *zBuffer;
+
+ int iToken;
+};
+
+/*
+** Create a new tokenizer instance.
+*/
+static int icuCreate(
+ int argc, /* Number of entries in argv[] */
+ const char * const *argv, /* Tokenizer creation arguments */
+ sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */
+){
+ IcuTokenizer *p;
+ int n = 0;
+
+ if( argc>0 ){
+ n = strlen(argv[0])+1;
+ }
+ p = (IcuTokenizer *)sqlite3_malloc64(sizeof(IcuTokenizer)+n);
+ if( !p ){
+ return SQLITE_NOMEM;
+ }
+ memset(p, 0, sizeof(IcuTokenizer));
+
+ if( n ){
+ p->zLocale = (char *)&p[1];
+ memcpy(p->zLocale, argv[0], n);
+ }
+
+ *ppTokenizer = (sqlite3_tokenizer *)p;
+
+ return SQLITE_OK;
+}
+
+/*
+** Destroy a tokenizer
+*/
+static int icuDestroy(sqlite3_tokenizer *pTokenizer){
+ IcuTokenizer *p = (IcuTokenizer *)pTokenizer;
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Prepare to begin tokenizing a particular string. The input
+** string to be tokenized is pInput[0..nBytes-1]. A cursor
+** used to incrementally tokenize this string is returned in
+** *ppCursor.
+*/
+static int icuOpen(
+ sqlite3_tokenizer *pTokenizer, /* The tokenizer */
+ const char *zInput, /* Input string */
+ int nInput, /* Length of zInput in bytes */
+ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */
+){
+ IcuTokenizer *p = (IcuTokenizer *)pTokenizer;
+ IcuCursor *pCsr;
+
+ const int32_t opt = U_FOLD_CASE_DEFAULT;
+ UErrorCode status = U_ZERO_ERROR;
+ int nChar;
+
+ UChar32 c;
+ int iInput = 0;
+ int iOut = 0;
+
+ *ppCursor = 0;
+
+ if( zInput==0 ){
+ nInput = 0;
+ zInput = "";
+ }else if( nInput<0 ){
+ nInput = strlen(zInput);
+ }
+ nChar = nInput+1;
+ pCsr = (IcuCursor *)sqlite3_malloc64(
+ sizeof(IcuCursor) + /* IcuCursor */
+ ((nChar+3)&~3) * sizeof(UChar) + /* IcuCursor.aChar[] */
+ (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */
+ );
+ if( !pCsr ){
+ return SQLITE_NOMEM;
+ }
+ memset(pCsr, 0, sizeof(IcuCursor));
+ pCsr->aChar = (UChar *)&pCsr[1];
+ pCsr->aOffset = (int *)&pCsr->aChar[(nChar+3)&~3];
+
+ pCsr->aOffset[iOut] = iInput;
+ U8_NEXT(zInput, iInput, nInput, c);
+ while( c>0 ){
+ int isError = 0;
+ c = u_foldCase(c, opt);
+ U16_APPEND(pCsr->aChar, iOut, nChar, c, isError);
+ if( isError ){
+ sqlite3_free(pCsr);
+ return SQLITE_ERROR;
+ }
+ pCsr->aOffset[iOut] = iInput;
+
+ if( iInput<nInput ){
+ U8_NEXT(zInput, iInput, nInput, c);
+ }else{
+ c = 0;
+ }
+ }
+
+ pCsr->pIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status);
+ if( !U_SUCCESS(status) ){
+ sqlite3_free(pCsr);
+ return SQLITE_ERROR;
+ }
+ pCsr->nChar = iOut;
+
+ ubrk_first(pCsr->pIter);
+ *ppCursor = (sqlite3_tokenizer_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+/*
+** Close a tokenization cursor previously opened by a call to icuOpen().
+*/
+static int icuClose(sqlite3_tokenizer_cursor *pCursor){
+ IcuCursor *pCsr = (IcuCursor *)pCursor;
+ ubrk_close(pCsr->pIter);
+ sqlite3_free(pCsr->zBuffer);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** Extract the next token from a tokenization cursor.
+*/
+static int icuNext(
+ sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */
+ const char **ppToken, /* OUT: *ppToken is the token text */
+ int *pnBytes, /* OUT: Number of bytes in token */
+ int *piStartOffset, /* OUT: Starting offset of token */
+ int *piEndOffset, /* OUT: Ending offset of token */
+ int *piPosition /* OUT: Position integer of token */
+){
+ IcuCursor *pCsr = (IcuCursor *)pCursor;
+
+ int iStart = 0;
+ int iEnd = 0;
+ int nByte = 0;
+
+ while( iStart==iEnd ){
+ UChar32 c;
+
+ iStart = ubrk_current(pCsr->pIter);
+ iEnd = ubrk_next(pCsr->pIter);
+ if( iEnd==UBRK_DONE ){
+ return SQLITE_DONE;
+ }
+
+ while( iStart<iEnd ){
+ int iWhite = iStart;
+ U16_NEXT(pCsr->aChar, iWhite, pCsr->nChar, c);
+ if( u_isspace(c) ){
+ iStart = iWhite;
+ }else{
+ break;
+ }
+ }
+ assert(iStart<=iEnd);
+ }
+
+ do {
+ UErrorCode status = U_ZERO_ERROR;
+ if( nByte ){
+ char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte);
+ if( !zNew ){
+ return SQLITE_NOMEM;
+ }
+ pCsr->zBuffer = zNew;
+ pCsr->nBuffer = nByte;
+ }
+
+ u_strToUTF8(
+ pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */
+ &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */
+ &status /* Output success/failure */
+ );
+ } while( nByte>pCsr->nBuffer );
+
+ *ppToken = pCsr->zBuffer;
+ *pnBytes = nByte;
+ *piStartOffset = pCsr->aOffset[iStart];
+ *piEndOffset = pCsr->aOffset[iEnd];
+ *piPosition = pCsr->iToken++;
+
+ return SQLITE_OK;
+}
+
+/*
+** The set of routines that implement the simple tokenizer
+*/
+static const sqlite3_tokenizer_module icuTokenizerModule = {
+ 0, /* iVersion */
+ icuCreate, /* xCreate */
+ icuDestroy, /* xCreate */
+ icuOpen, /* xOpen */
+ icuClose, /* xClose */
+ icuNext, /* xNext */
+ 0, /* xLanguageid */
+};
+
+/*
+** Set *ppModule to point at the implementation of the ICU tokenizer.
+*/
+SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(
+ sqlite3_tokenizer_module const**ppModule
+){
+ *ppModule = &icuTokenizerModule;
+}
+
+#endif /* defined(SQLITE_ENABLE_ICU) */
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_icu.c ********************************************/
+/************** Begin file sqlite3rbu.c **************************************/
+/*
+** 2014 August 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+**
+** OVERVIEW
+**
+** The RBU extension requires that the RBU update be packaged as an
+** SQLite database. The tables it expects to find are described in
+** sqlite3rbu.h. Essentially, for each table xyz in the target database
+** that the user wishes to write to, a corresponding data_xyz table is
+** created in the RBU database and populated with one row for each row to
+** update, insert or delete from the target table.
+**
+** The update proceeds in three stages:
+**
+** 1) The database is updated. The modified database pages are written
+** to a *-oal file. A *-oal file is just like a *-wal file, except
+** that it is named "<database>-oal" instead of "<database>-wal".
+** Because regular SQLite clients do not look for file named
+** "<database>-oal", they go on using the original database in
+** rollback mode while the *-oal file is being generated.
+**
+** During this stage RBU does not update the database by writing
+** directly to the target tables. Instead it creates "imposter"
+** tables using the SQLITE_TESTCTRL_IMPOSTER interface that it uses
+** to update each b-tree individually. All updates required by each
+** b-tree are completed before moving on to the next, and all
+** updates are done in sorted key order.
+**
+** 2) The "<database>-oal" file is moved to the equivalent "<database>-wal"
+** location using a call to rename(2). Before doing this the RBU
+** module takes an EXCLUSIVE lock on the database file, ensuring
+** that there are no other active readers.
+**
+** Once the EXCLUSIVE lock is released, any other database readers
+** detect the new *-wal file and read the database in wal mode. At
+** this point they see the new version of the database - including
+** the updates made as part of the RBU update.
+**
+** 3) The new *-wal file is checkpointed. This proceeds in the same way
+** as a regular database checkpoint, except that a single frame is
+** checkpointed each time sqlite3rbu_step() is called. If the RBU
+** handle is closed before the entire *-wal file is checkpointed,
+** the checkpoint progress is saved in the RBU database and the
+** checkpoint can be resumed by another RBU client at some point in
+** the future.
+**
+** POTENTIAL PROBLEMS
+**
+** The rename() call might not be portable. And RBU is not currently
+** syncing the directory after renaming the file.
+**
+** When state is saved, any commit to the *-oal file and the commit to
+** the RBU update database are not atomic. So if the power fails at the
+** wrong moment they might get out of sync. As the main database will be
+** committed before the RBU update database this will likely either just
+** pass unnoticed, or result in SQLITE_CONSTRAINT errors (due to UNIQUE
+** constraint violations).
+**
+** If some client does modify the target database mid RBU update, or some
+** other error occurs, the RBU extension will keep throwing errors. It's
+** not really clear how to get out of this state. The system could just
+** by delete the RBU update database and *-oal file and have the device
+** download the update again and start over.
+**
+** At present, for an UPDATE, both the new.* and old.* records are
+** collected in the rbu_xyz table. And for both UPDATEs and DELETEs all
+** fields are collected. This means we're probably writing a lot more
+** data to disk when saving the state of an ongoing update to the RBU
+** update database than is strictly necessary.
+**
+*/
+
+/* #include <assert.h> */
+/* #include <string.h> */
+/* #include <stdio.h> */
+
+/* #include "sqlite3.h" */
+
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU)
+/************** Include sqlite3rbu.h in the middle of sqlite3rbu.c ***********/
+/************** Begin file sqlite3rbu.h **************************************/
+/*
+** 2014 August 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file contains the public interface for the RBU extension.
+*/
+
+/*
+** SUMMARY
+**
+** Writing a transaction containing a large number of operations on
+** b-tree indexes that are collectively larger than the available cache
+** memory can be very inefficient.
+**
+** The problem is that in order to update a b-tree, the leaf page (at least)
+** containing the entry being inserted or deleted must be modified. If the
+** working set of leaves is larger than the available cache memory, then a
+** single leaf that is modified more than once as part of the transaction
+** may be loaded from or written to the persistent media multiple times.
+** Additionally, because the index updates are likely to be applied in
+** random order, access to pages within the database is also likely to be in
+** random order, which is itself quite inefficient.
+**
+** One way to improve the situation is to sort the operations on each index
+** by index key before applying them to the b-tree. This leads to an IO
+** pattern that resembles a single linear scan through the index b-tree,
+** and all but guarantees each modified leaf page is loaded and stored
+** exactly once. SQLite uses this trick to improve the performance of
+** CREATE INDEX commands. This extension allows it to be used to improve
+** the performance of large transactions on existing databases.
+**
+** Additionally, this extension allows the work involved in writing the
+** large transaction to be broken down into sub-transactions performed
+** sequentially by separate processes. This is useful if the system cannot
+** guarantee that a single update process will run for long enough to apply
+** the entire update, for example because the update is being applied on a
+** mobile device that is frequently rebooted. Even after the writer process
+** has committed one or more sub-transactions, other database clients continue
+** to read from the original database snapshot. In other words, partially
+** applied transactions are not visible to other clients.
+**
+** "RBU" stands for "Resumable Bulk Update". As in a large database update
+** transmitted via a wireless network to a mobile device. A transaction
+** applied using this extension is hence refered to as an "RBU update".
+**
+**
+** LIMITATIONS
+**
+** An "RBU update" transaction is subject to the following limitations:
+**
+** * The transaction must consist of INSERT, UPDATE and DELETE operations
+** only.
+**
+** * INSERT statements may not use any default values.
+**
+** * UPDATE and DELETE statements must identify their target rows by
+** non-NULL PRIMARY KEY values. Rows with NULL values stored in PRIMARY
+** KEY fields may not be updated or deleted. If the table being written
+** has no PRIMARY KEY, affected rows must be identified by rowid.
+**
+** * UPDATE statements may not modify PRIMARY KEY columns.
+**
+** * No triggers will be fired.
+**
+** * No foreign key violations are detected or reported.
+**
+** * CHECK constraints are not enforced.
+**
+** * No constraint handling mode except for "OR ROLLBACK" is supported.
+**
+**
+** PREPARATION
+**
+** An "RBU update" is stored as a separate SQLite database. A database
+** containing an RBU update is an "RBU database". For each table in the
+** target database to be updated, the RBU database should contain a table
+** named "data_<target name>" containing the same set of columns as the
+** target table, and one more - "rbu_control". The data_% table should
+** have no PRIMARY KEY or UNIQUE constraints, but each column should have
+** the same type as the corresponding column in the target database.
+** The "rbu_control" column should have no type at all. For example, if
+** the target database contains:
+**
+** CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c UNIQUE);
+**
+** Then the RBU database should contain:
+**
+** CREATE TABLE data_t1(a INTEGER, b TEXT, c, rbu_control);
+**
+** The order of the columns in the data_% table does not matter.
+**
+** Instead of a regular table, the RBU database may also contain virtual
+** tables or view named using the data_<target> naming scheme.
+**
+** Instead of the plain data_<target> naming scheme, RBU database tables
+** may also be named data<integer>_<target>, where <integer> is any sequence
+** of zero or more numeric characters (0-9). This can be significant because
+** tables within the RBU database are always processed in order sorted by
+** name. By judicious selection of the <integer> portion of the names
+** of the RBU tables the user can therefore control the order in which they
+** are processed. This can be useful, for example, to ensure that "external
+** content" FTS4 tables are updated before their underlying content tables.
+**
+** If the target database table is a virtual table or a table that has no
+** PRIMARY KEY declaration, the data_% table must also contain a column
+** named "rbu_rowid". This column is mapped to the tables implicit primary
+** key column - "rowid". Virtual tables for which the "rowid" column does
+** not function like a primary key value cannot be updated using RBU. For
+** example, if the target db contains either of the following:
+**
+** CREATE VIRTUAL TABLE x1 USING fts3(a, b);
+** CREATE TABLE x1(a, b)
+**
+** then the RBU database should contain:
+**
+** CREATE TABLE data_x1(a, b, rbu_rowid, rbu_control);
+**
+** All non-hidden columns (i.e. all columns matched by "SELECT *") of the
+** target table must be present in the input table. For virtual tables,
+** hidden columns are optional - they are updated by RBU if present in
+** the input table, or not otherwise. For example, to write to an fts4
+** table with a hidden languageid column such as:
+**
+** CREATE VIRTUAL TABLE ft1 USING fts4(a, b, languageid='langid');
+**
+** Either of the following input table schemas may be used:
+**
+** CREATE TABLE data_ft1(a, b, langid, rbu_rowid, rbu_control);
+** CREATE TABLE data_ft1(a, b, rbu_rowid, rbu_control);
+**
+** For each row to INSERT into the target database as part of the RBU
+** update, the corresponding data_% table should contain a single record
+** with the "rbu_control" column set to contain integer value 0. The
+** other columns should be set to the values that make up the new record
+** to insert.
+**
+** If the target database table has an INTEGER PRIMARY KEY, it is not
+** possible to insert a NULL value into the IPK column. Attempting to
+** do so results in an SQLITE_MISMATCH error.
+**
+** For each row to DELETE from the target database as part of the RBU
+** update, the corresponding data_% table should contain a single record
+** with the "rbu_control" column set to contain integer value 1. The
+** real primary key values of the row to delete should be stored in the
+** corresponding columns of the data_% table. The values stored in the
+** other columns are not used.
+**
+** For each row to UPDATE from the target database as part of the RBU
+** update, the corresponding data_% table should contain a single record
+** with the "rbu_control" column set to contain a value of type text.
+** The real primary key values identifying the row to update should be
+** stored in the corresponding columns of the data_% table row, as should
+** the new values of all columns being update. The text value in the
+** "rbu_control" column must contain the same number of characters as
+** there are columns in the target database table, and must consist entirely
+** of 'x' and '.' characters (or in some special cases 'd' - see below). For
+** each column that is being updated, the corresponding character is set to
+** 'x'. For those that remain as they are, the corresponding character of the
+** rbu_control value should be set to '.'. For example, given the tables
+** above, the update statement:
+**
+** UPDATE t1 SET c = 'usa' WHERE a = 4;
+**
+** is represented by the data_t1 row created by:
+**
+** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..x');
+**
+** Instead of an 'x' character, characters of the rbu_control value specified
+** for UPDATEs may also be set to 'd'. In this case, instead of updating the
+** target table with the value stored in the corresponding data_% column, the
+** user-defined SQL function "rbu_delta()" is invoked and the result stored in
+** the target table column. rbu_delta() is invoked with two arguments - the
+** original value currently stored in the target table column and the
+** value specified in the data_xxx table.
+**
+** For example, this row:
+**
+** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..d');
+**
+** is similar to an UPDATE statement such as:
+**
+** UPDATE t1 SET c = rbu_delta(c, 'usa') WHERE a = 4;
+**
+** Finally, if an 'f' character appears in place of a 'd' or 's' in an
+** ota_control string, the contents of the data_xxx table column is assumed
+** to be a "fossil delta" - a patch to be applied to a blob value in the
+** format used by the fossil source-code management system. In this case
+** the existing value within the target database table must be of type BLOB.
+** It is replaced by the result of applying the specified fossil delta to
+** itself.
+**
+** If the target database table is a virtual table or a table with no PRIMARY
+** KEY, the rbu_control value should not include a character corresponding
+** to the rbu_rowid value. For example, this:
+**
+** INSERT INTO data_ft1(a, b, rbu_rowid, rbu_control)
+** VALUES(NULL, 'usa', 12, '.x');
+**
+** causes a result similar to:
+**
+** UPDATE ft1 SET b = 'usa' WHERE rowid = 12;
+**
+** The data_xxx tables themselves should have no PRIMARY KEY declarations.
+** However, RBU is more efficient if reading the rows in from each data_xxx
+** table in "rowid" order is roughly the same as reading them sorted by
+** the PRIMARY KEY of the corresponding target database table. In other
+** words, rows should be sorted using the destination table PRIMARY KEY
+** fields before they are inserted into the data_xxx tables.
+**
+** USAGE
+**
+** The API declared below allows an application to apply an RBU update
+** stored on disk to an existing target database. Essentially, the
+** application:
+**
+** 1) Opens an RBU handle using the sqlite3rbu_open() function.
+**
+** 2) Registers any required virtual table modules with the database
+** handle returned by sqlite3rbu_db(). Also, if required, register
+** the rbu_delta() implementation.
+**
+** 3) Calls the sqlite3rbu_step() function one or more times on
+** the new handle. Each call to sqlite3rbu_step() performs a single
+** b-tree operation, so thousands of calls may be required to apply
+** a complete update.
+**
+** 4) Calls sqlite3rbu_close() to close the RBU update handle. If
+** sqlite3rbu_step() has been called enough times to completely
+** apply the update to the target database, then the RBU database
+** is marked as fully applied. Otherwise, the state of the RBU
+** update application is saved in the RBU database for later
+** resumption.
+**
+** See comments below for more detail on APIs.
+**
+** If an update is only partially applied to the target database by the
+** time sqlite3rbu_close() is called, various state information is saved
+** within the RBU database. This allows subsequent processes to automatically
+** resume the RBU update from where it left off.
+**
+** To remove all RBU extension state information, returning an RBU database
+** to its original contents, it is sufficient to drop all tables that begin
+** with the prefix "rbu_"
+**
+** DATABASE LOCKING
+**
+** An RBU update may not be applied to a database in WAL mode. Attempting
+** to do so is an error (SQLITE_ERROR).
+**
+** While an RBU handle is open, a SHARED lock may be held on the target
+** database file. This means it is possible for other clients to read the
+** database, but not to write it.
+**
+** If an RBU update is started and then suspended before it is completed,
+** then an external client writes to the database, then attempting to resume
+** the suspended RBU update is also an error (SQLITE_BUSY).
+*/
+
+#ifndef _SQLITE3RBU_H
+#define _SQLITE3RBU_H
+
+/* #include "sqlite3.h" ** Required for error code definitions ** */
+
+#if 0
+extern "C" {
+#endif
+
+typedef struct sqlite3rbu sqlite3rbu;
+
+/*
+** Open an RBU handle.
+**
+** Argument zTarget is the path to the target database. Argument zRbu is
+** the path to the RBU database. Each call to this function must be matched
+** by a call to sqlite3rbu_close(). When opening the databases, RBU passes
+** the SQLITE_CONFIG_URI flag to sqlite3_open_v2(). So if either zTarget
+** or zRbu begin with "file:", it will be interpreted as an SQLite
+** database URI, not a regular file name.
+**
+** If the zState argument is passed a NULL value, the RBU extension stores
+** the current state of the update (how many rows have been updated, which
+** indexes are yet to be updated etc.) within the RBU database itself. This
+** can be convenient, as it means that the RBU application does not need to
+** organize removing a separate state file after the update is concluded.
+** Or, if zState is non-NULL, it must be a path to a database file in which
+** the RBU extension can store the state of the update.
+**
+** When resuming an RBU update, the zState argument must be passed the same
+** value as when the RBU update was started.
+**
+** Once the RBU update is finished, the RBU extension does not
+** automatically remove any zState database file, even if it created it.
+**
+** By default, RBU uses the default VFS to access the files on disk. To
+** use a VFS other than the default, an SQLite "file:" URI containing a
+** "vfs=..." option may be passed as the zTarget option.
+**
+** IMPORTANT NOTE FOR ZIPVFS USERS: The RBU extension works with all of
+** SQLite's built-in VFSs, including the multiplexor VFS. However it does
+** not work out of the box with zipvfs. Refer to the comment describing
+** the zipvfs_create_vfs() API below for details on using RBU with zipvfs.
+*/
+SQLITE_API sqlite3rbu *sqlite3rbu_open(
+ const char *zTarget,
+ const char *zRbu,
+ const char *zState
+);
+
+/*
+** Open an RBU handle to perform an RBU vacuum on database file zTarget.
+** An RBU vacuum is similar to SQLite's built-in VACUUM command, except
+** that it can be suspended and resumed like an RBU update.
+**
+** The second argument to this function identifies a database in which
+** to store the state of the RBU vacuum operation if it is suspended. The
+** first time sqlite3rbu_vacuum() is called, to start an RBU vacuum
+** operation, the state database should either not exist or be empty
+** (contain no tables). If an RBU vacuum is suspended by calling
+** sqlite3rbu_close() on the RBU handle before sqlite3rbu_step() has
+** returned SQLITE_DONE, the vacuum state is stored in the state database.
+** The vacuum can be resumed by calling this function to open a new RBU
+** handle specifying the same target and state databases.
+**
+** If the second argument passed to this function is NULL, then the
+** name of the state database is "<database>-vacuum", where <database>
+** is the name of the target database file. In this case, on UNIX, if the
+** state database is not already present in the file-system, it is created
+** with the same permissions as the target db is made.
+**
+** With an RBU vacuum, it is an SQLITE_MISUSE error if the name of the
+** state database ends with "-vactmp". This name is reserved for internal
+** use.
+**
+** This function does not delete the state database after an RBU vacuum
+** is completed, even if it created it. However, if the call to
+** sqlite3rbu_close() returns any value other than SQLITE_OK, the contents
+** of the state tables within the state database are zeroed. This way,
+** the next call to sqlite3rbu_vacuum() opens a handle that starts a
+** new RBU vacuum operation.
+**
+** As with sqlite3rbu_open(), Zipvfs users should rever to the comment
+** describing the sqlite3rbu_create_vfs() API function below for
+** a description of the complications associated with using RBU with
+** zipvfs databases.
+*/
+SQLITE_API sqlite3rbu *sqlite3rbu_vacuum(
+ const char *zTarget,
+ const char *zState
+);
+
+/*
+** Configure a limit for the amount of temp space that may be used by
+** the RBU handle passed as the first argument. The new limit is specified
+** in bytes by the second parameter. If it is positive, the limit is updated.
+** If the second parameter to this function is passed zero, then the limit
+** is removed entirely. If the second parameter is negative, the limit is
+** not modified (this is useful for querying the current limit).
+**
+** In all cases the returned value is the current limit in bytes (zero
+** indicates unlimited).
+**
+** If the temp space limit is exceeded during operation, an SQLITE_FULL
+** error is returned.
+*/
+SQLITE_API sqlite3_int64 sqlite3rbu_temp_size_limit(sqlite3rbu*, sqlite3_int64);
+
+/*
+** Return the current amount of temp file space, in bytes, currently used by
+** the RBU handle passed as the only argument.
+*/
+SQLITE_API sqlite3_int64 sqlite3rbu_temp_size(sqlite3rbu*);
+
+/*
+** Internally, each RBU connection uses a separate SQLite database
+** connection to access the target and rbu update databases. This
+** API allows the application direct access to these database handles.
+**
+** The first argument passed to this function must be a valid, open, RBU
+** handle. The second argument should be passed zero to access the target
+** database handle, or non-zero to access the rbu update database handle.
+** Accessing the underlying database handles may be useful in the
+** following scenarios:
+**
+** * If any target tables are virtual tables, it may be necessary to
+** call sqlite3_create_module() on the target database handle to
+** register the required virtual table implementations.
+**
+** * If the data_xxx tables in the RBU source database are virtual
+** tables, the application may need to call sqlite3_create_module() on
+** the rbu update db handle to any required virtual table
+** implementations.
+**
+** * If the application uses the "rbu_delta()" feature described above,
+** it must use sqlite3_create_function() or similar to register the
+** rbu_delta() implementation with the target database handle.
+**
+** If an error has occurred, either while opening or stepping the RBU object,
+** this function may return NULL. The error code and message may be collected
+** when sqlite3rbu_close() is called.
+**
+** Database handles returned by this function remain valid until the next
+** call to any sqlite3rbu_xxx() function other than sqlite3rbu_db().
+*/
+SQLITE_API sqlite3 *sqlite3rbu_db(sqlite3rbu*, int bRbu);
+
+/*
+** Do some work towards applying the RBU update to the target db.
+**
+** Return SQLITE_DONE if the update has been completely applied, or
+** SQLITE_OK if no error occurs but there remains work to do to apply
+** the RBU update. If an error does occur, some other error code is
+** returned.
+**
+** Once a call to sqlite3rbu_step() has returned a value other than
+** SQLITE_OK, all subsequent calls on the same RBU handle are no-ops
+** that immediately return the same value.
+*/
+SQLITE_API int sqlite3rbu_step(sqlite3rbu *pRbu);
+
+/*
+** Force RBU to save its state to disk.
+**
+** If a power failure or application crash occurs during an update, following
+** system recovery RBU may resume the update from the point at which the state
+** was last saved. In other words, from the most recent successful call to
+** sqlite3rbu_close() or this function.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu);
+
+/*
+** Close an RBU handle.
+**
+** If the RBU update has been completely applied, mark the RBU database
+** as fully applied. Otherwise, assuming no error has occurred, save the
+** current state of the RBU update appliation to the RBU database.
+**
+** If an error has already occurred as part of an sqlite3rbu_step()
+** or sqlite3rbu_open() call, or if one occurs within this function, an
+** SQLite error code is returned. Additionally, if pzErrmsg is not NULL,
+** *pzErrmsg may be set to point to a buffer containing a utf-8 formatted
+** English language error message. It is the responsibility of the caller to
+** eventually free any such buffer using sqlite3_free().
+**
+** Otherwise, if no error occurs, this function returns SQLITE_OK if the
+** update has been partially applied, or SQLITE_DONE if it has been
+** completely applied.
+*/
+SQLITE_API int sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg);
+
+/*
+** Return the total number of key-value operations (inserts, deletes or
+** updates) that have been performed on the target database since the
+** current RBU update was started.
+*/
+SQLITE_API sqlite3_int64 sqlite3rbu_progress(sqlite3rbu *pRbu);
+
+/*
+** Obtain permyriadage (permyriadage is to 10000 as percentage is to 100)
+** progress indications for the two stages of an RBU update. This API may
+** be useful for driving GUI progress indicators and similar.
+**
+** An RBU update is divided into two stages:
+**
+** * Stage 1, in which changes are accumulated in an oal/wal file, and
+** * Stage 2, in which the contents of the wal file are copied into the
+** main database.
+**
+** The update is visible to non-RBU clients during stage 2. During stage 1
+** non-RBU reader clients may see the original database.
+**
+** If this API is called during stage 2 of the update, output variable
+** (*pnOne) is set to 10000 to indicate that stage 1 has finished and (*pnTwo)
+** to a value between 0 and 10000 to indicate the permyriadage progress of
+** stage 2. A value of 5000 indicates that stage 2 is half finished,
+** 9000 indicates that it is 90% finished, and so on.
+**
+** If this API is called during stage 1 of the update, output variable
+** (*pnTwo) is set to 0 to indicate that stage 2 has not yet started. The
+** value to which (*pnOne) is set depends on whether or not the RBU
+** database contains an "rbu_count" table. The rbu_count table, if it
+** exists, must contain the same columns as the following:
+**
+** CREATE TABLE rbu_count(tbl TEXT PRIMARY KEY, cnt INTEGER) WITHOUT ROWID;
+**
+** There must be one row in the table for each source (data_xxx) table within
+** the RBU database. The 'tbl' column should contain the name of the source
+** table. The 'cnt' column should contain the number of rows within the
+** source table.
+**
+** If the rbu_count table is present and populated correctly and this
+** API is called during stage 1, the *pnOne output variable is set to the
+** permyriadage progress of the same stage. If the rbu_count table does
+** not exist, then (*pnOne) is set to -1 during stage 1. If the rbu_count
+** table exists but is not correctly populated, the value of the *pnOne
+** output variable during stage 1 is undefined.
+*/
+SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int*pnTwo);
+
+/*
+** Obtain an indication as to the current stage of an RBU update or vacuum.
+** This function always returns one of the SQLITE_RBU_STATE_XXX constants
+** defined in this file. Return values should be interpreted as follows:
+**
+** SQLITE_RBU_STATE_OAL:
+** RBU is currently building a *-oal file. The next call to sqlite3rbu_step()
+** may either add further data to the *-oal file, or compute data that will
+** be added by a subsequent call.
+**
+** SQLITE_RBU_STATE_MOVE:
+** RBU has finished building the *-oal file. The next call to sqlite3rbu_step()
+** will move the *-oal file to the equivalent *-wal path. If the current
+** operation is an RBU update, then the updated version of the database
+** file will become visible to ordinary SQLite clients following the next
+** call to sqlite3rbu_step().
+**
+** SQLITE_RBU_STATE_CHECKPOINT:
+** RBU is currently performing an incremental checkpoint. The next call to
+** sqlite3rbu_step() will copy a page of data from the *-wal file into
+** the target database file.
+**
+** SQLITE_RBU_STATE_DONE:
+** The RBU operation has finished. Any subsequent calls to sqlite3rbu_step()
+** will immediately return SQLITE_DONE.
+**
+** SQLITE_RBU_STATE_ERROR:
+** An error has occurred. Any subsequent calls to sqlite3rbu_step() will
+** immediately return the SQLite error code associated with the error.
+*/
+#define SQLITE_RBU_STATE_OAL 1
+#define SQLITE_RBU_STATE_MOVE 2
+#define SQLITE_RBU_STATE_CHECKPOINT 3
+#define SQLITE_RBU_STATE_DONE 4
+#define SQLITE_RBU_STATE_ERROR 5
+
+SQLITE_API int sqlite3rbu_state(sqlite3rbu *pRbu);
+
+/*
+** Create an RBU VFS named zName that accesses the underlying file-system
+** via existing VFS zParent. Or, if the zParent parameter is passed NULL,
+** then the new RBU VFS uses the default system VFS to access the file-system.
+** The new object is registered as a non-default VFS with SQLite before
+** returning.
+**
+** Part of the RBU implementation uses a custom VFS object. Usually, this
+** object is created and deleted automatically by RBU.
+**
+** The exception is for applications that also use zipvfs. In this case,
+** the custom VFS must be explicitly created by the user before the RBU
+** handle is opened. The RBU VFS should be installed so that the zipvfs
+** VFS uses the RBU VFS, which in turn uses any other VFS layers in use
+** (for example multiplexor) to access the file-system. For example,
+** to assemble an RBU enabled VFS stack that uses both zipvfs and
+** multiplexor (error checking omitted):
+**
+** // Create a VFS named "multiplex" (not the default).
+** sqlite3_multiplex_initialize(0, 0);
+**
+** // Create an rbu VFS named "rbu" that uses multiplexor. If the
+** // second argument were replaced with NULL, the "rbu" VFS would
+** // access the file-system via the system default VFS, bypassing the
+** // multiplexor.
+** sqlite3rbu_create_vfs("rbu", "multiplex");
+**
+** // Create a zipvfs VFS named "zipvfs" that uses rbu.
+** zipvfs_create_vfs_v3("zipvfs", "rbu", 0, xCompressorAlgorithmDetector);
+**
+** // Make zipvfs the default VFS.
+** sqlite3_vfs_register(sqlite3_vfs_find("zipvfs"), 1);
+**
+** Because the default VFS created above includes a RBU functionality, it
+** may be used by RBU clients. Attempting to use RBU with a zipvfs VFS stack
+** that does not include the RBU layer results in an error.
+**
+** The overhead of adding the "rbu" VFS to the system is negligible for
+** non-RBU users. There is no harm in an application accessing the
+** file-system via "rbu" all the time, even if it only uses RBU functionality
+** occasionally.
+*/
+SQLITE_API int sqlite3rbu_create_vfs(const char *zName, const char *zParent);
+
+/*
+** Deregister and destroy an RBU vfs created by an earlier call to
+** sqlite3rbu_create_vfs().
+**
+** VFS objects are not reference counted. If a VFS object is destroyed
+** before all database handles that use it have been closed, the results
+** are undefined.
+*/
+SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName);
+
+#if 0
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* _SQLITE3RBU_H */
+
+/************** End of sqlite3rbu.h ******************************************/
+/************** Continuing where we left off in sqlite3rbu.c *****************/
+
+#if defined(_WIN32_WCE)
+/* #include "windows.h" */
+#endif
+
+/* Maximum number of prepared UPDATE statements held by this module */
+#define SQLITE_RBU_UPDATE_CACHESIZE 16
+
+/* Delta checksums disabled by default. Compile with -DRBU_ENABLE_DELTA_CKSUM
+** to enable checksum verification.
+*/
+#ifndef RBU_ENABLE_DELTA_CKSUM
+# define RBU_ENABLE_DELTA_CKSUM 0
+#endif
+
+/*
+** Swap two objects of type TYPE.
+*/
+#if !defined(SQLITE_AMALGAMATION)
+# define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
+#endif
+
+/*
+** The rbu_state table is used to save the state of a partially applied
+** update so that it can be resumed later. The table consists of integer
+** keys mapped to values as follows:
+**
+** RBU_STATE_STAGE:
+** May be set to integer values 1, 2, 4 or 5. As follows:
+** 1: the *-rbu file is currently under construction.
+** 2: the *-rbu file has been constructed, but not yet moved
+** to the *-wal path.
+** 4: the checkpoint is underway.
+** 5: the rbu update has been checkpointed.
+**
+** RBU_STATE_TBL:
+** Only valid if STAGE==1. The target database name of the table
+** currently being written.
+**
+** RBU_STATE_IDX:
+** Only valid if STAGE==1. The target database name of the index
+** currently being written, or NULL if the main table is currently being
+** updated.
+**
+** RBU_STATE_ROW:
+** Only valid if STAGE==1. Number of rows already processed for the current
+** table/index.
+**
+** RBU_STATE_PROGRESS:
+** Trbul number of sqlite3rbu_step() calls made so far as part of this
+** rbu update.
+**
+** RBU_STATE_CKPT:
+** Valid if STAGE==4. The 64-bit checksum associated with the wal-index
+** header created by recovering the *-wal file. This is used to detect
+** cases when another client appends frames to the *-wal file in the
+** middle of an incremental checkpoint (an incremental checkpoint cannot
+** be continued if this happens).
+**
+** RBU_STATE_COOKIE:
+** Valid if STAGE==1. The current change-counter cookie value in the
+** target db file.
+**
+** RBU_STATE_OALSZ:
+** Valid if STAGE==1. The size in bytes of the *-oal file.
+**
+** RBU_STATE_DATATBL:
+** Only valid if STAGE==1. The RBU database name of the table
+** currently being read.
+*/
+#define RBU_STATE_STAGE 1
+#define RBU_STATE_TBL 2
+#define RBU_STATE_IDX 3
+#define RBU_STATE_ROW 4
+#define RBU_STATE_PROGRESS 5
+#define RBU_STATE_CKPT 6
+#define RBU_STATE_COOKIE 7
+#define RBU_STATE_OALSZ 8
+#define RBU_STATE_PHASEONESTEP 9
+#define RBU_STATE_DATATBL 10
+
+#define RBU_STAGE_OAL 1
+#define RBU_STAGE_MOVE 2
+#define RBU_STAGE_CAPTURE 3
+#define RBU_STAGE_CKPT 4
+#define RBU_STAGE_DONE 5
+
+
+#define RBU_CREATE_STATE \
+ "CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)"
+
+typedef struct RbuFrame RbuFrame;
+typedef struct RbuObjIter RbuObjIter;
+typedef struct RbuState RbuState;
+typedef struct RbuSpan RbuSpan;
+typedef struct rbu_vfs rbu_vfs;
+typedef struct rbu_file rbu_file;
+typedef struct RbuUpdateStmt RbuUpdateStmt;
+
+#if !defined(SQLITE_AMALGAMATION)
+typedef unsigned int u32;
+typedef unsigned short u16;
+typedef unsigned char u8;
+typedef sqlite3_int64 i64;
+#endif
+
+/*
+** These values must match the values defined in wal.c for the equivalent
+** locks. These are not magic numbers as they are part of the SQLite file
+** format.
+*/
+#define WAL_LOCK_WRITE 0
+#define WAL_LOCK_CKPT 1
+#define WAL_LOCK_READ0 3
+
+#define SQLITE_FCNTL_RBUCNT 5149216
+
+/*
+** A structure to store values read from the rbu_state table in memory.
+*/
+struct RbuState {
+ int eStage;
+ char *zTbl;
+ char *zDataTbl;
+ char *zIdx;
+ i64 iWalCksum;
+ int nRow;
+ i64 nProgress;
+ u32 iCookie;
+ i64 iOalSz;
+ i64 nPhaseOneStep;
+};
+
+struct RbuUpdateStmt {
+ char *zMask; /* Copy of update mask used with pUpdate */
+ sqlite3_stmt *pUpdate; /* Last update statement (or NULL) */
+ RbuUpdateStmt *pNext;
+};
+
+struct RbuSpan {
+ const char *zSpan;
+ int nSpan;
+};
+
+/*
+** An iterator of this type is used to iterate through all objects in
+** the target database that require updating. For each such table, the
+** iterator visits, in order:
+**
+** * the table itself,
+** * each index of the table (zero or more points to visit), and
+** * a special "cleanup table" state.
+**
+** abIndexed:
+** If the table has no indexes on it, abIndexed is set to NULL. Otherwise,
+** it points to an array of flags nTblCol elements in size. The flag is
+** set for each column that is either a part of the PK or a part of an
+** index. Or clear otherwise.
+**
+** If there are one or more partial indexes on the table, all fields of
+** this array set set to 1. This is because in that case, the module has
+** no way to tell which fields will be required to add and remove entries
+** from the partial indexes.
+**
+*/
+struct RbuObjIter {
+ sqlite3_stmt *pTblIter; /* Iterate through tables */
+ sqlite3_stmt *pIdxIter; /* Index iterator */
+ int nTblCol; /* Size of azTblCol[] array */
+ char **azTblCol; /* Array of unquoted target column names */
+ char **azTblType; /* Array of target column types */
+ int *aiSrcOrder; /* src table col -> target table col */
+ u8 *abTblPk; /* Array of flags, set on target PK columns */
+ u8 *abNotNull; /* Array of flags, set on NOT NULL columns */
+ u8 *abIndexed; /* Array of flags, set on indexed & PK cols */
+ int eType; /* Table type - an RBU_PK_XXX value */
+
+ /* Output variables. zTbl==0 implies EOF. */
+ int bCleanup; /* True in "cleanup" state */
+ const char *zTbl; /* Name of target db table */
+ const char *zDataTbl; /* Name of rbu db table (or null) */
+ const char *zIdx; /* Name of target db index (or null) */
+ int iTnum; /* Root page of current object */
+ int iPkTnum; /* If eType==EXTERNAL, root of PK index */
+ int bUnique; /* Current index is unique */
+ int nIndex; /* Number of aux. indexes on table zTbl */
+
+ /* Statements created by rbuObjIterPrepareAll() */
+ int nCol; /* Number of columns in current object */
+ sqlite3_stmt *pSelect; /* Source data */
+ sqlite3_stmt *pInsert; /* Statement for INSERT operations */
+ sqlite3_stmt *pDelete; /* Statement for DELETE ops */
+ sqlite3_stmt *pTmpInsert; /* Insert into rbu_tmp_$zDataTbl */
+ int nIdxCol;
+ RbuSpan *aIdxCol;
+ char *zIdxSql;
+
+ /* Last UPDATE used (for PK b-tree updates only), or NULL. */
+ RbuUpdateStmt *pRbuUpdate;
+};
+
+/*
+** Values for RbuObjIter.eType
+**
+** 0: Table does not exist (error)
+** 1: Table has an implicit rowid.
+** 2: Table has an explicit IPK column.
+** 3: Table has an external PK index.
+** 4: Table is WITHOUT ROWID.
+** 5: Table is a virtual table.
+*/
+#define RBU_PK_NOTABLE 0
+#define RBU_PK_NONE 1
+#define RBU_PK_IPK 2
+#define RBU_PK_EXTERNAL 3
+#define RBU_PK_WITHOUT_ROWID 4
+#define RBU_PK_VTAB 5
+
+
+/*
+** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs
+** one of the following operations.
+*/
+#define RBU_INSERT 1 /* Insert on a main table b-tree */
+#define RBU_DELETE 2 /* Delete a row from a main table b-tree */
+#define RBU_REPLACE 3 /* Delete and then insert a row */
+#define RBU_IDX_DELETE 4 /* Delete a row from an aux. index b-tree */
+#define RBU_IDX_INSERT 5 /* Insert on an aux. index b-tree */
+
+#define RBU_UPDATE 6 /* Update a row in a main table b-tree */
+
+/*
+** A single step of an incremental checkpoint - frame iWalFrame of the wal
+** file should be copied to page iDbPage of the database file.
+*/
+struct RbuFrame {
+ u32 iDbPage;
+ u32 iWalFrame;
+};
+
+/*
+** RBU handle.
+**
+** nPhaseOneStep:
+** If the RBU database contains an rbu_count table, this value is set to
+** a running estimate of the number of b-tree operations required to
+** finish populating the *-oal file. This allows the sqlite3_bp_progress()
+** API to calculate the permyriadage progress of populating the *-oal file
+** using the formula:
+**
+** permyriadage = (10000 * nProgress) / nPhaseOneStep
+**
+** nPhaseOneStep is initialized to the sum of:
+**
+** nRow * (nIndex + 1)
+**
+** for all source tables in the RBU database, where nRow is the number
+** of rows in the source table and nIndex the number of indexes on the
+** corresponding target database table.
+**
+** This estimate is accurate if the RBU update consists entirely of
+** INSERT operations. However, it is inaccurate if:
+**
+** * the RBU update contains any UPDATE operations. If the PK specified
+** for an UPDATE operation does not exist in the target table, then
+** no b-tree operations are required on index b-trees. Or if the
+** specified PK does exist, then (nIndex*2) such operations are
+** required (one delete and one insert on each index b-tree).
+**
+** * the RBU update contains any DELETE operations for which the specified
+** PK does not exist. In this case no operations are required on index
+** b-trees.
+**
+** * the RBU update contains REPLACE operations. These are similar to
+** UPDATE operations.
+**
+** nPhaseOneStep is updated to account for the conditions above during the
+** first pass of each source table. The updated nPhaseOneStep value is
+** stored in the rbu_state table if the RBU update is suspended.
+*/
+struct sqlite3rbu {
+ int eStage; /* Value of RBU_STATE_STAGE field */
+ sqlite3 *dbMain; /* target database handle */
+ sqlite3 *dbRbu; /* rbu database handle */
+ char *zTarget; /* Path to target db */
+ char *zRbu; /* Path to rbu db */
+ char *zState; /* Path to state db (or NULL if zRbu) */
+ char zStateDb[5]; /* Db name for state ("stat" or "main") */
+ int rc; /* Value returned by last rbu_step() call */
+ char *zErrmsg; /* Error message if rc!=SQLITE_OK */
+ int nStep; /* Rows processed for current object */
+ int nProgress; /* Rows processed for all objects */
+ RbuObjIter objiter; /* Iterator for skipping through tbl/idx */
+ const char *zVfsName; /* Name of automatically created rbu vfs */
+ rbu_file *pTargetFd; /* File handle open on target db */
+ int nPagePerSector; /* Pages per sector for pTargetFd */
+ i64 iOalSz;
+ i64 nPhaseOneStep;
+
+ /* The following state variables are used as part of the incremental
+ ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding
+ ** function rbuSetupCheckpoint() for details. */
+ u32 iMaxFrame; /* Largest iWalFrame value in aFrame[] */
+ u32 mLock;
+ int nFrame; /* Entries in aFrame[] array */
+ int nFrameAlloc; /* Allocated size of aFrame[] array */
+ RbuFrame *aFrame;
+ int pgsz;
+ u8 *aBuf;
+ i64 iWalCksum;
+ i64 szTemp; /* Current size of all temp files in use */
+ i64 szTempLimit; /* Total size limit for temp files */
+
+ /* Used in RBU vacuum mode only */
+ int nRbu; /* Number of RBU VFS in the stack */
+ rbu_file *pRbuFd; /* Fd for main db of dbRbu */
+};
+
+/*
+** An rbu VFS is implemented using an instance of this structure.
+**
+** Variable pRbu is only non-NULL for automatically created RBU VFS objects.
+** It is NULL for RBU VFS objects created explicitly using
+** sqlite3rbu_create_vfs(). It is used to track the total amount of temp
+** space used by the RBU handle.
+*/
+struct rbu_vfs {
+ sqlite3_vfs base; /* rbu VFS shim methods */
+ sqlite3_vfs *pRealVfs; /* Underlying VFS */
+ sqlite3_mutex *mutex; /* Mutex to protect pMain */
+ sqlite3rbu *pRbu; /* Owner RBU object */
+ rbu_file *pMain; /* List of main db files */
+ rbu_file *pMainRbu; /* List of main db files with pRbu!=0 */
+};
+
+/*
+** Each file opened by an rbu VFS is represented by an instance of
+** the following structure.
+**
+** If this is a temporary file (pRbu!=0 && flags&DELETE_ON_CLOSE), variable
+** "sz" is set to the current size of the database file.
+*/
+struct rbu_file {
+ sqlite3_file base; /* sqlite3_file methods */
+ sqlite3_file *pReal; /* Underlying file handle */
+ rbu_vfs *pRbuVfs; /* Pointer to the rbu_vfs object */
+ sqlite3rbu *pRbu; /* Pointer to rbu object (rbu target only) */
+ i64 sz; /* Size of file in bytes (temp only) */
+
+ int openFlags; /* Flags this file was opened with */
+ u32 iCookie; /* Cookie value for main db files */
+ u8 iWriteVer; /* "write-version" value for main db files */
+ u8 bNolock; /* True to fail EXCLUSIVE locks */
+
+ int nShm; /* Number of entries in apShm[] array */
+ char **apShm; /* Array of mmap'd *-shm regions */
+ char *zDel; /* Delete this when closing file */
+
+ const char *zWal; /* Wal filename for this main db file */
+ rbu_file *pWalFd; /* Wal file descriptor for this main db */
+ rbu_file *pMainNext; /* Next MAIN_DB file */
+ rbu_file *pMainRbuNext; /* Next MAIN_DB file with pRbu!=0 */
+};
+
+/*
+** True for an RBU vacuum handle, or false otherwise.
+*/
+#define rbuIsVacuum(p) ((p)->zTarget==0)
+
+
+/*************************************************************************
+** The following three functions, found below:
+**
+** rbuDeltaGetInt()
+** rbuDeltaChecksum()
+** rbuDeltaApply()
+**
+** are lifted from the fossil source code (http://fossil-scm.org). They
+** are used to implement the scalar SQL function rbu_fossil_delta().
+*/
+
+/*
+** Read bytes from *pz and convert them into a positive integer. When
+** finished, leave *pz pointing to the first character past the end of
+** the integer. The *pLen parameter holds the length of the string
+** in *pz and is decremented once for each character in the integer.
+*/
+static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){
+ static const signed char zValue[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, 36,
+ -1, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, -1, -1, -1, 63, -1,
+ };
+ unsigned int v = 0;
+ int c;
+ unsigned char *z = (unsigned char*)*pz;
+ unsigned char *zStart = z;
+ while( (c = zValue[0x7f&*(z++)])>=0 ){
+ v = (v<<6) + c;
+ }
+ z--;
+ *pLen -= z - zStart;
+ *pz = (char*)z;
+ return v;
+}
+
+#if RBU_ENABLE_DELTA_CKSUM
+/*
+** Compute a 32-bit checksum on the N-byte buffer. Return the result.
+*/
+static unsigned int rbuDeltaChecksum(const char *zIn, size_t N){
+ const unsigned char *z = (const unsigned char *)zIn;
+ unsigned sum0 = 0;
+ unsigned sum1 = 0;
+ unsigned sum2 = 0;
+ unsigned sum3 = 0;
+ while(N >= 16){
+ sum0 += ((unsigned)z[0] + z[4] + z[8] + z[12]);
+ sum1 += ((unsigned)z[1] + z[5] + z[9] + z[13]);
+ sum2 += ((unsigned)z[2] + z[6] + z[10]+ z[14]);
+ sum3 += ((unsigned)z[3] + z[7] + z[11]+ z[15]);
+ z += 16;
+ N -= 16;
+ }
+ while(N >= 4){
+ sum0 += z[0];
+ sum1 += z[1];
+ sum2 += z[2];
+ sum3 += z[3];
+ z += 4;
+ N -= 4;
+ }
+ sum3 += (sum2 << 8) + (sum1 << 16) + (sum0 << 24);
+ switch(N){
+ case 3: sum3 += (z[2] << 8);
+ case 2: sum3 += (z[1] << 16);
+ case 1: sum3 += (z[0] << 24);
+ default: ;
+ }
+ return sum3;
+}
+#endif
+
+/*
+** Apply a delta.
+**
+** The output buffer should be big enough to hold the whole output
+** file and a NUL terminator at the end. The delta_output_size()
+** routine will determine this size for you.
+**
+** The delta string should be null-terminated. But the delta string
+** may contain embedded NUL characters (if the input and output are
+** binary files) so we also have to pass in the length of the delta in
+** the lenDelta parameter.
+**
+** This function returns the size of the output file in bytes (excluding
+** the final NUL terminator character). Except, if the delta string is
+** malformed or intended for use with a source file other than zSrc,
+** then this routine returns -1.
+**
+** Refer to the delta_create() documentation above for a description
+** of the delta file format.
+*/
+static int rbuDeltaApply(
+ const char *zSrc, /* The source or pattern file */
+ int lenSrc, /* Length of the source file */
+ const char *zDelta, /* Delta to apply to the pattern */
+ int lenDelta, /* Length of the delta */
+ char *zOut /* Write the output into this preallocated buffer */
+){
+ unsigned int limit;
+ unsigned int total = 0;
+#if RBU_ENABLE_DELTA_CKSUM
+ char *zOrigOut = zOut;
+#endif
+
+ limit = rbuDeltaGetInt(&zDelta, &lenDelta);
+ if( *zDelta!='\n' ){
+ /* ERROR: size integer not terminated by "\n" */
+ return -1;
+ }
+ zDelta++; lenDelta--;
+ while( *zDelta && lenDelta>0 ){
+ unsigned int cnt, ofst;
+ cnt = rbuDeltaGetInt(&zDelta, &lenDelta);
+ switch( zDelta[0] ){
+ case '@': {
+ zDelta++; lenDelta--;
+ ofst = rbuDeltaGetInt(&zDelta, &lenDelta);
+ if( lenDelta>0 && zDelta[0]!=',' ){
+ /* ERROR: copy command not terminated by ',' */
+ return -1;
+ }
+ zDelta++; lenDelta--;
+ total += cnt;
+ if( total>limit ){
+ /* ERROR: copy exceeds output file size */
+ return -1;
+ }
+ if( (int)(ofst+cnt) > lenSrc ){
+ /* ERROR: copy extends past end of input */
+ return -1;
+ }
+ memcpy(zOut, &zSrc[ofst], cnt);
+ zOut += cnt;
+ break;
+ }
+ case ':': {
+ zDelta++; lenDelta--;
+ total += cnt;
+ if( total>limit ){
+ /* ERROR: insert command gives an output larger than predicted */
+ return -1;
+ }
+ if( (int)cnt>lenDelta ){
+ /* ERROR: insert count exceeds size of delta */
+ return -1;
+ }
+ memcpy(zOut, zDelta, cnt);
+ zOut += cnt;
+ zDelta += cnt;
+ lenDelta -= cnt;
+ break;
+ }
+ case ';': {
+ zDelta++; lenDelta--;
+ zOut[0] = 0;
+#if RBU_ENABLE_DELTA_CKSUM
+ if( cnt!=rbuDeltaChecksum(zOrigOut, total) ){
+ /* ERROR: bad checksum */
+ return -1;
+ }
+#endif
+ if( total!=limit ){
+ /* ERROR: generated size does not match predicted size */
+ return -1;
+ }
+ return total;
+ }
+ default: {
+ /* ERROR: unknown delta operator */
+ return -1;
+ }
+ }
+ }
+ /* ERROR: unterminated delta */
+ return -1;
+}
+
+static int rbuDeltaOutputSize(const char *zDelta, int lenDelta){
+ int size;
+ size = rbuDeltaGetInt(&zDelta, &lenDelta);
+ if( *zDelta!='\n' ){
+ /* ERROR: size integer not terminated by "\n" */
+ return -1;
+ }
+ return size;
+}
+
+/*
+** End of code taken from fossil.
+*************************************************************************/
+
+/*
+** Implementation of SQL scalar function rbu_fossil_delta().
+**
+** This function applies a fossil delta patch to a blob. Exactly two
+** arguments must be passed to this function. The first is the blob to
+** patch and the second the patch to apply. If no error occurs, this
+** function returns the patched blob.
+*/
+static void rbuFossilDeltaFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ const char *aDelta;
+ int nDelta;
+ const char *aOrig;
+ int nOrig;
+
+ int nOut;
+ int nOut2;
+ char *aOut;
+
+ assert( argc==2 );
+
+ nOrig = sqlite3_value_bytes(argv[0]);
+ aOrig = (const char*)sqlite3_value_blob(argv[0]);
+ nDelta = sqlite3_value_bytes(argv[1]);
+ aDelta = (const char*)sqlite3_value_blob(argv[1]);
+
+ /* Figure out the size of the output */
+ nOut = rbuDeltaOutputSize(aDelta, nDelta);
+ if( nOut<0 ){
+ sqlite3_result_error(context, "corrupt fossil delta", -1);
+ return;
+ }
+
+ aOut = sqlite3_malloc(nOut+1);
+ if( aOut==0 ){
+ sqlite3_result_error_nomem(context);
+ }else{
+ nOut2 = rbuDeltaApply(aOrig, nOrig, aDelta, nDelta, aOut);
+ if( nOut2!=nOut ){
+ sqlite3_free(aOut);
+ sqlite3_result_error(context, "corrupt fossil delta", -1);
+ }else{
+ sqlite3_result_blob(context, aOut, nOut, sqlite3_free);
+ }
+ }
+}
+
+
+/*
+** Prepare the SQL statement in buffer zSql against database handle db.
+** If successful, set *ppStmt to point to the new statement and return
+** SQLITE_OK.
+**
+** Otherwise, if an error does occur, set *ppStmt to NULL and return
+** an SQLite error code. Additionally, set output variable *pzErrmsg to
+** point to a buffer containing an error message. It is the responsibility
+** of the caller to (eventually) free this buffer using sqlite3_free().
+*/
+static int prepareAndCollectError(
+ sqlite3 *db,
+ sqlite3_stmt **ppStmt,
+ char **pzErrmsg,
+ const char *zSql
+){
+ int rc = sqlite3_prepare_v2(db, zSql, -1, ppStmt, 0);
+ if( rc!=SQLITE_OK ){
+ *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ *ppStmt = 0;
+ }
+ return rc;
+}
+
+/*
+** Reset the SQL statement passed as the first argument. Return a copy
+** of the value returned by sqlite3_reset().
+**
+** If an error has occurred, then set *pzErrmsg to point to a buffer
+** containing an error message. It is the responsibility of the caller
+** to eventually free this buffer using sqlite3_free().
+*/
+static int resetAndCollectError(sqlite3_stmt *pStmt, char **pzErrmsg){
+ int rc = sqlite3_reset(pStmt);
+ if( rc!=SQLITE_OK ){
+ *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(sqlite3_db_handle(pStmt)));
+ }
+ return rc;
+}
+
+/*
+** Unless it is NULL, argument zSql points to a buffer allocated using
+** sqlite3_malloc containing an SQL statement. This function prepares the SQL
+** statement against database db and frees the buffer. If statement
+** compilation is successful, *ppStmt is set to point to the new statement
+** handle and SQLITE_OK is returned.
+**
+** Otherwise, if an error occurs, *ppStmt is set to NULL and an error code
+** returned. In this case, *pzErrmsg may also be set to point to an error
+** message. It is the responsibility of the caller to free this error message
+** buffer using sqlite3_free().
+**
+** If argument zSql is NULL, this function assumes that an OOM has occurred.
+** In this case SQLITE_NOMEM is returned and *ppStmt set to NULL.
+*/
+static int prepareFreeAndCollectError(
+ sqlite3 *db,
+ sqlite3_stmt **ppStmt,
+ char **pzErrmsg,
+ char *zSql
+){
+ int rc;
+ assert( *pzErrmsg==0 );
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ *ppStmt = 0;
+ }else{
+ rc = prepareAndCollectError(db, ppStmt, pzErrmsg, zSql);
+ sqlite3_free(zSql);
+ }
+ return rc;
+}
+
+/*
+** Free the RbuObjIter.azTblCol[] and RbuObjIter.abTblPk[] arrays allocated
+** by an earlier call to rbuObjIterCacheTableInfo().
+*/
+static void rbuObjIterFreeCols(RbuObjIter *pIter){
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ sqlite3_free(pIter->azTblCol[i]);
+ sqlite3_free(pIter->azTblType[i]);
+ }
+ sqlite3_free(pIter->azTblCol);
+ pIter->azTblCol = 0;
+ pIter->azTblType = 0;
+ pIter->aiSrcOrder = 0;
+ pIter->abTblPk = 0;
+ pIter->abNotNull = 0;
+ pIter->nTblCol = 0;
+ pIter->eType = 0; /* Invalid value */
+}
+
+/*
+** Finalize all statements and free all allocations that are specific to
+** the current object (table/index pair).
+*/
+static void rbuObjIterClearStatements(RbuObjIter *pIter){
+ RbuUpdateStmt *pUp;
+
+ sqlite3_finalize(pIter->pSelect);
+ sqlite3_finalize(pIter->pInsert);
+ sqlite3_finalize(pIter->pDelete);
+ sqlite3_finalize(pIter->pTmpInsert);
+ pUp = pIter->pRbuUpdate;
+ while( pUp ){
+ RbuUpdateStmt *pTmp = pUp->pNext;
+ sqlite3_finalize(pUp->pUpdate);
+ sqlite3_free(pUp);
+ pUp = pTmp;
+ }
+ sqlite3_free(pIter->aIdxCol);
+ sqlite3_free(pIter->zIdxSql);
+
+ pIter->pSelect = 0;
+ pIter->pInsert = 0;
+ pIter->pDelete = 0;
+ pIter->pRbuUpdate = 0;
+ pIter->pTmpInsert = 0;
+ pIter->nCol = 0;
+ pIter->nIdxCol = 0;
+ pIter->aIdxCol = 0;
+ pIter->zIdxSql = 0;
+}
+
+/*
+** Clean up any resources allocated as part of the iterator object passed
+** as the only argument.
+*/
+static void rbuObjIterFinalize(RbuObjIter *pIter){
+ rbuObjIterClearStatements(pIter);
+ sqlite3_finalize(pIter->pTblIter);
+ sqlite3_finalize(pIter->pIdxIter);
+ rbuObjIterFreeCols(pIter);
+ memset(pIter, 0, sizeof(RbuObjIter));
+}
+
+/*
+** Advance the iterator to the next position.
+**
+** If no error occurs, SQLITE_OK is returned and the iterator is left
+** pointing to the next entry. Otherwise, an error code and message is
+** left in the RBU handle passed as the first argument. A copy of the
+** error code is returned.
+*/
+static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){
+ int rc = p->rc;
+ if( rc==SQLITE_OK ){
+
+ /* Free any SQLite statements used while processing the previous object */
+ rbuObjIterClearStatements(pIter);
+ if( pIter->zIdx==0 ){
+ rc = sqlite3_exec(p->dbMain,
+ "DROP TRIGGER IF EXISTS temp.rbu_insert_tr;"
+ "DROP TRIGGER IF EXISTS temp.rbu_update1_tr;"
+ "DROP TRIGGER IF EXISTS temp.rbu_update2_tr;"
+ "DROP TRIGGER IF EXISTS temp.rbu_delete_tr;"
+ , 0, 0, &p->zErrmsg
+ );
+ }
+
+ if( rc==SQLITE_OK ){
+ if( pIter->bCleanup ){
+ rbuObjIterFreeCols(pIter);
+ pIter->bCleanup = 0;
+ rc = sqlite3_step(pIter->pTblIter);
+ if( rc!=SQLITE_ROW ){
+ rc = resetAndCollectError(pIter->pTblIter, &p->zErrmsg);
+ pIter->zTbl = 0;
+ }else{
+ pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0);
+ pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1);
+ rc = (pIter->zDataTbl && pIter->zTbl) ? SQLITE_OK : SQLITE_NOMEM;
+ }
+ }else{
+ if( pIter->zIdx==0 ){
+ sqlite3_stmt *pIdx = pIter->pIdxIter;
+ rc = sqlite3_bind_text(pIdx, 1, pIter->zTbl, -1, SQLITE_STATIC);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_step(pIter->pIdxIter);
+ if( rc!=SQLITE_ROW ){
+ rc = resetAndCollectError(pIter->pIdxIter, &p->zErrmsg);
+ pIter->bCleanup = 1;
+ pIter->zIdx = 0;
+ }else{
+ pIter->zIdx = (const char*)sqlite3_column_text(pIter->pIdxIter, 0);
+ pIter->iTnum = sqlite3_column_int(pIter->pIdxIter, 1);
+ pIter->bUnique = sqlite3_column_int(pIter->pIdxIter, 2);
+ rc = pIter->zIdx ? SQLITE_OK : SQLITE_NOMEM;
+ }
+ }
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ rbuObjIterFinalize(pIter);
+ p->rc = rc;
+ }
+ return rc;
+}
+
+
+/*
+** The implementation of the rbu_target_name() SQL function. This function
+** accepts one or two arguments. The first argument is the name of a table -
+** the name of a table in the RBU database. The second, if it is present, is 1
+** for a view or 0 for a table.
+**
+** For a non-vacuum RBU handle, if the table name matches the pattern:
+**
+** data[0-9]_<name>
+**
+** where <name> is any sequence of 1 or more characters, <name> is returned.
+** Otherwise, if the only argument does not match the above pattern, an SQL
+** NULL is returned.
+**
+** "data_t1" -> "t1"
+** "data0123_t2" -> "t2"
+** "dataAB_t3" -> NULL
+**
+** For an rbu vacuum handle, a copy of the first argument is returned if
+** the second argument is either missing or 0 (not a view).
+*/
+static void rbuTargetNameFunc(
+ sqlite3_context *pCtx,
+ int argc,
+ sqlite3_value **argv
+){
+ sqlite3rbu *p = sqlite3_user_data(pCtx);
+ const char *zIn;
+ assert( argc==1 || argc==2 );
+
+ zIn = (const char*)sqlite3_value_text(argv[0]);
+ if( zIn ){
+ if( rbuIsVacuum(p) ){
+ assert( argc==2 || argc==1 );
+ if( argc==1 || 0==sqlite3_value_int(argv[1]) ){
+ sqlite3_result_text(pCtx, zIn, -1, SQLITE_STATIC);
+ }
+ }else{
+ if( strlen(zIn)>4 && memcmp("data", zIn, 4)==0 ){
+ int i;
+ for(i=4; zIn[i]>='0' && zIn[i]<='9'; i++);
+ if( zIn[i]=='_' && zIn[i+1] ){
+ sqlite3_result_text(pCtx, &zIn[i+1], -1, SQLITE_STATIC);
+ }
+ }
+ }
+ }
+}
+
+/*
+** Initialize the iterator structure passed as the second argument.
+**
+** If no error occurs, SQLITE_OK is returned and the iterator is left
+** pointing to the first entry. Otherwise, an error code and message is
+** left in the RBU handle passed as the first argument. A copy of the
+** error code is returned.
+*/
+static int rbuObjIterFirst(sqlite3rbu *p, RbuObjIter *pIter){
+ int rc;
+ memset(pIter, 0, sizeof(RbuObjIter));
+
+ rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg,
+ sqlite3_mprintf(
+ "SELECT rbu_target_name(name, type='view') AS target, name "
+ "FROM sqlite_schema "
+ "WHERE type IN ('table', 'view') AND target IS NOT NULL "
+ " %s "
+ "ORDER BY name"
+ , rbuIsVacuum(p) ? "AND rootpage!=0 AND rootpage IS NOT NULL" : ""));
+
+ if( rc==SQLITE_OK ){
+ rc = prepareAndCollectError(p->dbMain, &pIter->pIdxIter, &p->zErrmsg,
+ "SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' "
+ " FROM main.sqlite_schema "
+ " WHERE type='index' AND tbl_name = ?"
+ );
+ }
+
+ pIter->bCleanup = 1;
+ p->rc = rc;
+ return rbuObjIterNext(p, pIter);
+}
+
+/*
+** This is a wrapper around "sqlite3_mprintf(zFmt, ...)". If an OOM occurs,
+** an error code is stored in the RBU handle passed as the first argument.
+**
+** If an error has already occurred (p->rc is already set to something other
+** than SQLITE_OK), then this function returns NULL without modifying the
+** stored error code. In this case it still calls sqlite3_free() on any
+** printf() parameters associated with %z conversions.
+*/
+static char *rbuMPrintf(sqlite3rbu *p, const char *zFmt, ...){
+ char *zSql = 0;
+ va_list ap;
+ va_start(ap, zFmt);
+ zSql = sqlite3_vmprintf(zFmt, ap);
+ if( p->rc==SQLITE_OK ){
+ if( zSql==0 ) p->rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_free(zSql);
+ zSql = 0;
+ }
+ va_end(ap);
+ return zSql;
+}
+
+/*
+** Argument zFmt is a sqlite3_mprintf() style format string. The trailing
+** arguments are the usual subsitution values. This function performs
+** the printf() style substitutions and executes the result as an SQL
+** statement on the RBU handles database.
+**
+** If an error occurs, an error code and error message is stored in the
+** RBU handle. If an error has already occurred when this function is
+** called, it is a no-op.
+*/
+static int rbuMPrintfExec(sqlite3rbu *p, sqlite3 *db, const char *zFmt, ...){
+ va_list ap;
+ char *zSql;
+ va_start(ap, zFmt);
+ zSql = sqlite3_vmprintf(zFmt, ap);
+ if( p->rc==SQLITE_OK ){
+ if( zSql==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ p->rc = sqlite3_exec(db, zSql, 0, 0, &p->zErrmsg);
+ }
+ }
+ sqlite3_free(zSql);
+ va_end(ap);
+ return p->rc;
+}
+
+/*
+** Attempt to allocate and return a pointer to a zeroed block of nByte
+** bytes.
+**
+** If an error (i.e. an OOM condition) occurs, return NULL and leave an
+** error code in the rbu handle passed as the first argument. Or, if an
+** error has already occurred when this function is called, return NULL
+** immediately without attempting the allocation or modifying the stored
+** error code.
+*/
+static void *rbuMalloc(sqlite3rbu *p, sqlite3_int64 nByte){
+ void *pRet = 0;
+ if( p->rc==SQLITE_OK ){
+ assert( nByte>0 );
+ pRet = sqlite3_malloc64(nByte);
+ if( pRet==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ memset(pRet, 0, nByte);
+ }
+ }
+ return pRet;
+}
+
+
+/*
+** Allocate and zero the pIter->azTblCol[] and abTblPk[] arrays so that
+** there is room for at least nCol elements. If an OOM occurs, store an
+** error code in the RBU handle passed as the first argument.
+*/
+static void rbuAllocateIterArrays(sqlite3rbu *p, RbuObjIter *pIter, int nCol){
+ sqlite3_int64 nByte = (2*sizeof(char*) + sizeof(int) + 3*sizeof(u8)) * nCol;
+ char **azNew;
+
+ azNew = (char**)rbuMalloc(p, nByte);
+ if( azNew ){
+ pIter->azTblCol = azNew;
+ pIter->azTblType = &azNew[nCol];
+ pIter->aiSrcOrder = (int*)&pIter->azTblType[nCol];
+ pIter->abTblPk = (u8*)&pIter->aiSrcOrder[nCol];
+ pIter->abNotNull = (u8*)&pIter->abTblPk[nCol];
+ pIter->abIndexed = (u8*)&pIter->abNotNull[nCol];
+ }
+}
+
+/*
+** The first argument must be a nul-terminated string. This function
+** returns a copy of the string in memory obtained from sqlite3_malloc().
+** It is the responsibility of the caller to eventually free this memory
+** using sqlite3_free().
+**
+** If an OOM condition is encountered when attempting to allocate memory,
+** output variable (*pRc) is set to SQLITE_NOMEM before returning. Otherwise,
+** if the allocation succeeds, (*pRc) is left unchanged.
+*/
+static char *rbuStrndup(const char *zStr, int *pRc){
+ char *zRet = 0;
+
+ if( *pRc==SQLITE_OK ){
+ if( zStr ){
+ size_t nCopy = strlen(zStr) + 1;
+ zRet = (char*)sqlite3_malloc64(nCopy);
+ if( zRet ){
+ memcpy(zRet, zStr, nCopy);
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+ }
+
+ return zRet;
+}
+
+/*
+** Finalize the statement passed as the second argument.
+**
+** If the sqlite3_finalize() call indicates that an error occurs, and the
+** rbu handle error code is not already set, set the error code and error
+** message accordingly.
+*/
+static void rbuFinalize(sqlite3rbu *p, sqlite3_stmt *pStmt){
+ sqlite3 *db = sqlite3_db_handle(pStmt);
+ int rc = sqlite3_finalize(pStmt);
+ if( p->rc==SQLITE_OK && rc!=SQLITE_OK ){
+ p->rc = rc;
+ p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+}
+
+/* Determine the type of a table.
+**
+** peType is of type (int*), a pointer to an output parameter of type
+** (int). This call sets the output parameter as follows, depending
+** on the type of the table specified by parameters dbName and zTbl.
+**
+** RBU_PK_NOTABLE: No such table.
+** RBU_PK_NONE: Table has an implicit rowid.
+** RBU_PK_IPK: Table has an explicit IPK column.
+** RBU_PK_EXTERNAL: Table has an external PK index.
+** RBU_PK_WITHOUT_ROWID: Table is WITHOUT ROWID.
+** RBU_PK_VTAB: Table is a virtual table.
+**
+** Argument *piPk is also of type (int*), and also points to an output
+** parameter. Unless the table has an external primary key index
+** (i.e. unless *peType is set to 3), then *piPk is set to zero. Or,
+** if the table does have an external primary key index, then *piPk
+** is set to the root page number of the primary key index before
+** returning.
+**
+** ALGORITHM:
+**
+** if( no entry exists in sqlite_schema ){
+** return RBU_PK_NOTABLE
+** }else if( sql for the entry starts with "CREATE VIRTUAL" ){
+** return RBU_PK_VTAB
+** }else if( "PRAGMA index_list()" for the table contains a "pk" index ){
+** if( the index that is the pk exists in sqlite_schema ){
+** *piPK = rootpage of that index.
+** return RBU_PK_EXTERNAL
+** }else{
+** return RBU_PK_WITHOUT_ROWID
+** }
+** }else if( "PRAGMA table_info()" lists one or more "pk" columns ){
+** return RBU_PK_IPK
+** }else{
+** return RBU_PK_NONE
+** }
+*/
+static void rbuTableType(
+ sqlite3rbu *p,
+ const char *zTab,
+ int *peType,
+ int *piTnum,
+ int *piPk
+){
+ /*
+ ** 0) SELECT count(*) FROM sqlite_schema where name=%Q AND IsVirtual(%Q)
+ ** 1) PRAGMA index_list = ?
+ ** 2) SELECT count(*) FROM sqlite_schema where name=%Q
+ ** 3) PRAGMA table_info = ?
+ */
+ sqlite3_stmt *aStmt[4] = {0, 0, 0, 0};
+
+ *peType = RBU_PK_NOTABLE;
+ *piPk = 0;
+
+ assert( p->rc==SQLITE_OK );
+ p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[0], &p->zErrmsg,
+ sqlite3_mprintf(
+ "SELECT (sql LIKE 'create virtual%%'), rootpage"
+ " FROM sqlite_schema"
+ " WHERE name=%Q", zTab
+ ));
+ if( p->rc!=SQLITE_OK || sqlite3_step(aStmt[0])!=SQLITE_ROW ){
+ /* Either an error, or no such table. */
+ goto rbuTableType_end;
+ }
+ if( sqlite3_column_int(aStmt[0], 0) ){
+ *peType = RBU_PK_VTAB; /* virtual table */
+ goto rbuTableType_end;
+ }
+ *piTnum = sqlite3_column_int(aStmt[0], 1);
+
+ p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[1], &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA index_list=%Q",zTab)
+ );
+ if( p->rc ) goto rbuTableType_end;
+ while( sqlite3_step(aStmt[1])==SQLITE_ROW ){
+ const u8 *zOrig = sqlite3_column_text(aStmt[1], 3);
+ const u8 *zIdx = sqlite3_column_text(aStmt[1], 1);
+ if( zOrig && zIdx && zOrig[0]=='p' ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[2], &p->zErrmsg,
+ sqlite3_mprintf(
+ "SELECT rootpage FROM sqlite_schema WHERE name = %Q", zIdx
+ ));
+ if( p->rc==SQLITE_OK ){
+ if( sqlite3_step(aStmt[2])==SQLITE_ROW ){
+ *piPk = sqlite3_column_int(aStmt[2], 0);
+ *peType = RBU_PK_EXTERNAL;
+ }else{
+ *peType = RBU_PK_WITHOUT_ROWID;
+ }
+ }
+ goto rbuTableType_end;
+ }
+ }
+
+ p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[3], &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA table_info=%Q",zTab)
+ );
+ if( p->rc==SQLITE_OK ){
+ while( sqlite3_step(aStmt[3])==SQLITE_ROW ){
+ if( sqlite3_column_int(aStmt[3],5)>0 ){
+ *peType = RBU_PK_IPK; /* explicit IPK column */
+ goto rbuTableType_end;
+ }
+ }
+ *peType = RBU_PK_NONE;
+ }
+
+rbuTableType_end: {
+ unsigned int i;
+ for(i=0; i<sizeof(aStmt)/sizeof(aStmt[0]); i++){
+ rbuFinalize(p, aStmt[i]);
+ }
+ }
+}
+
+/*
+** This is a helper function for rbuObjIterCacheTableInfo(). It populates
+** the pIter->abIndexed[] array.
+*/
+static void rbuObjIterCacheIndexedCols(sqlite3rbu *p, RbuObjIter *pIter){
+ sqlite3_stmt *pList = 0;
+ int bIndex = 0;
+
+ if( p->rc==SQLITE_OK ){
+ memcpy(pIter->abIndexed, pIter->abTblPk, sizeof(u8)*pIter->nTblCol);
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pList, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl)
+ );
+ }
+
+ pIter->nIndex = 0;
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pList) ){
+ const char *zIdx = (const char*)sqlite3_column_text(pList, 1);
+ int bPartial = sqlite3_column_int(pList, 4);
+ sqlite3_stmt *pXInfo = 0;
+ if( zIdx==0 ) break;
+ if( bPartial ){
+ memset(pIter->abIndexed, 0x01, sizeof(u8)*pIter->nTblCol);
+ }
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
+ );
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
+ int iCid = sqlite3_column_int(pXInfo, 1);
+ if( iCid>=0 ) pIter->abIndexed[iCid] = 1;
+ if( iCid==-2 ){
+ memset(pIter->abIndexed, 0x01, sizeof(u8)*pIter->nTblCol);
+ }
+ }
+ rbuFinalize(p, pXInfo);
+ bIndex = 1;
+ pIter->nIndex++;
+ }
+
+ if( pIter->eType==RBU_PK_WITHOUT_ROWID ){
+ /* "PRAGMA index_list" includes the main PK b-tree */
+ pIter->nIndex--;
+ }
+
+ rbuFinalize(p, pList);
+ if( bIndex==0 ) pIter->abIndexed = 0;
+}
+
+
+/*
+** If they are not already populated, populate the pIter->azTblCol[],
+** pIter->abTblPk[], pIter->nTblCol and pIter->bRowid variables according to
+** the table (not index) that the iterator currently points to.
+**
+** Return SQLITE_OK if successful, or an SQLite error code otherwise. If
+** an error does occur, an error code and error message are also left in
+** the RBU handle.
+*/
+static int rbuObjIterCacheTableInfo(sqlite3rbu *p, RbuObjIter *pIter){
+ if( pIter->azTblCol==0 ){
+ sqlite3_stmt *pStmt = 0;
+ int nCol = 0;
+ int i; /* for() loop iterator variable */
+ int bRbuRowid = 0; /* If input table has column "rbu_rowid" */
+ int iOrder = 0;
+ int iTnum = 0;
+
+ /* Figure out the type of table this step will deal with. */
+ assert( pIter->eType==0 );
+ rbuTableType(p, pIter->zTbl, &pIter->eType, &iTnum, &pIter->iPkTnum);
+ if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_NOTABLE ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("no such table: %s", pIter->zTbl);
+ }
+ if( p->rc ) return p->rc;
+ if( pIter->zIdx==0 ) pIter->iTnum = iTnum;
+
+ assert( pIter->eType==RBU_PK_NONE || pIter->eType==RBU_PK_IPK
+ || pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_WITHOUT_ROWID
+ || pIter->eType==RBU_PK_VTAB
+ );
+
+ /* Populate the azTblCol[] and nTblCol variables based on the columns
+ ** of the input table. Ignore any input table columns that begin with
+ ** "rbu_". */
+ p->rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
+ sqlite3_mprintf("SELECT * FROM '%q'", pIter->zDataTbl)
+ );
+ if( p->rc==SQLITE_OK ){
+ nCol = sqlite3_column_count(pStmt);
+ rbuAllocateIterArrays(p, pIter, nCol);
+ }
+ for(i=0; p->rc==SQLITE_OK && i<nCol; i++){
+ const char *zName = (const char*)sqlite3_column_name(pStmt, i);
+ if( sqlite3_strnicmp("rbu_", zName, 4) ){
+ char *zCopy = rbuStrndup(zName, &p->rc);
+ pIter->aiSrcOrder[pIter->nTblCol] = pIter->nTblCol;
+ pIter->azTblCol[pIter->nTblCol++] = zCopy;
+ }
+ else if( 0==sqlite3_stricmp("rbu_rowid", zName) ){
+ bRbuRowid = 1;
+ }
+ }
+ sqlite3_finalize(pStmt);
+ pStmt = 0;
+
+ if( p->rc==SQLITE_OK
+ && rbuIsVacuum(p)==0
+ && bRbuRowid!=(pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE)
+ ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf(
+ "table %q %s rbu_rowid column", pIter->zDataTbl,
+ (bRbuRowid ? "may not have" : "requires")
+ );
+ }
+
+ /* Check that all non-HIDDEN columns in the destination table are also
+ ** present in the input table. Populate the abTblPk[], azTblType[] and
+ ** aiTblOrder[] arrays at the same time. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pStmt, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA table_info(%Q)", pIter->zTbl)
+ );
+ }
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ const char *zName = (const char*)sqlite3_column_text(pStmt, 1);
+ if( zName==0 ) break; /* An OOM - finalize() below returns S_NOMEM */
+ for(i=iOrder; i<pIter->nTblCol; i++){
+ if( 0==strcmp(zName, pIter->azTblCol[i]) ) break;
+ }
+ if( i==pIter->nTblCol ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("column missing from %q: %s",
+ pIter->zDataTbl, zName
+ );
+ }else{
+ int iPk = sqlite3_column_int(pStmt, 5);
+ int bNotNull = sqlite3_column_int(pStmt, 3);
+ const char *zType = (const char*)sqlite3_column_text(pStmt, 2);
+
+ if( i!=iOrder ){
+ SWAP(int, pIter->aiSrcOrder[i], pIter->aiSrcOrder[iOrder]);
+ SWAP(char*, pIter->azTblCol[i], pIter->azTblCol[iOrder]);
+ }
+
+ pIter->azTblType[iOrder] = rbuStrndup(zType, &p->rc);
+ assert( iPk>=0 );
+ pIter->abTblPk[iOrder] = (u8)iPk;
+ pIter->abNotNull[iOrder] = (u8)bNotNull || (iPk!=0);
+ iOrder++;
+ }
+ }
+
+ rbuFinalize(p, pStmt);
+ rbuObjIterCacheIndexedCols(p, pIter);
+ assert( pIter->eType!=RBU_PK_VTAB || pIter->abIndexed==0 );
+ assert( pIter->eType!=RBU_PK_VTAB || pIter->nIndex==0 );
+ }
+
+ return p->rc;
+}
+
+/*
+** This function constructs and returns a pointer to a nul-terminated
+** string containing some SQL clause or list based on one or more of the
+** column names currently stored in the pIter->azTblCol[] array.
+*/
+static char *rbuObjIterGetCollist(
+ sqlite3rbu *p, /* RBU object */
+ RbuObjIter *pIter /* Object iterator for column names */
+){
+ char *zList = 0;
+ const char *zSep = "";
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ const char *z = pIter->azTblCol[i];
+ zList = rbuMPrintf(p, "%z%s\"%w\"", zList, zSep, z);
+ zSep = ", ";
+ }
+ return zList;
+}
+
+/*
+** Return a comma separated list of the quoted PRIMARY KEY column names,
+** in order, for the current table. Before each column name, add the text
+** zPre. After each column name, add the zPost text. Use zSeparator as
+** the separator text (usually ", ").
+*/
+static char *rbuObjIterGetPkList(
+ sqlite3rbu *p, /* RBU object */
+ RbuObjIter *pIter, /* Object iterator for column names */
+ const char *zPre, /* Before each quoted column name */
+ const char *zSeparator, /* Separator to use between columns */
+ const char *zPost /* After each quoted column name */
+){
+ int iPk = 1;
+ char *zRet = 0;
+ const char *zSep = "";
+ while( 1 ){
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ if( (int)pIter->abTblPk[i]==iPk ){
+ const char *zCol = pIter->azTblCol[i];
+ zRet = rbuMPrintf(p, "%z%s%s\"%w\"%s", zRet, zSep, zPre, zCol, zPost);
+ zSep = zSeparator;
+ break;
+ }
+ }
+ if( i==pIter->nTblCol ) break;
+ iPk++;
+ }
+ return zRet;
+}
+
+/*
+** This function is called as part of restarting an RBU vacuum within
+** stage 1 of the process (while the *-oal file is being built) while
+** updating a table (not an index). The table may be a rowid table or
+** a WITHOUT ROWID table. It queries the target database to find the
+** largest key that has already been written to the target table and
+** constructs a WHERE clause that can be used to extract the remaining
+** rows from the source table. For a rowid table, the WHERE clause
+** is of the form:
+**
+** "WHERE _rowid_ > ?"
+**
+** and for WITHOUT ROWID tables:
+**
+** "WHERE (key1, key2) > (?, ?)"
+**
+** Instead of "?" placeholders, the actual WHERE clauses created by
+** this function contain literal SQL values.
+*/
+static char *rbuVacuumTableStart(
+ sqlite3rbu *p, /* RBU handle */
+ RbuObjIter *pIter, /* RBU iterator object */
+ int bRowid, /* True for a rowid table */
+ const char *zWrite /* Target table name prefix */
+){
+ sqlite3_stmt *pMax = 0;
+ char *zRet = 0;
+ if( bRowid ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pMax, &p->zErrmsg,
+ sqlite3_mprintf(
+ "SELECT max(_rowid_) FROM \"%s%w\"", zWrite, pIter->zTbl
+ )
+ );
+ if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pMax) ){
+ sqlite3_int64 iMax = sqlite3_column_int64(pMax, 0);
+ zRet = rbuMPrintf(p, " WHERE _rowid_ > %lld ", iMax);
+ }
+ rbuFinalize(p, pMax);
+ }else{
+ char *zOrder = rbuObjIterGetPkList(p, pIter, "", ", ", " DESC");
+ char *zSelect = rbuObjIterGetPkList(p, pIter, "quote(", "||','||", ")");
+ char *zList = rbuObjIterGetPkList(p, pIter, "", ", ", "");
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pMax, &p->zErrmsg,
+ sqlite3_mprintf(
+ "SELECT %s FROM \"%s%w\" ORDER BY %s LIMIT 1",
+ zSelect, zWrite, pIter->zTbl, zOrder
+ )
+ );
+ if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pMax) ){
+ const char *zVal = (const char*)sqlite3_column_text(pMax, 0);
+ zRet = rbuMPrintf(p, " WHERE (%s) > (%s) ", zList, zVal);
+ }
+ rbuFinalize(p, pMax);
+ }
+
+ sqlite3_free(zOrder);
+ sqlite3_free(zSelect);
+ sqlite3_free(zList);
+ }
+ return zRet;
+}
+
+/*
+** This function is called as part of restating an RBU vacuum when the
+** current operation is writing content to an index. If possible, it
+** queries the target index b-tree for the largest key already written to
+** it, then composes and returns an expression that can be used in a WHERE
+** clause to select the remaining required rows from the source table.
+** It is only possible to return such an expression if:
+**
+** * The index contains no DESC columns, and
+** * The last key written to the index before the operation was
+** suspended does not contain any NULL values.
+**
+** The expression is of the form:
+**
+** (index-field1, index-field2, ...) > (?, ?, ...)
+**
+** except that the "?" placeholders are replaced with literal values.
+**
+** If the expression cannot be created, NULL is returned. In this case,
+** the caller has to use an OFFSET clause to extract only the required
+** rows from the sourct table, just as it does for an RBU update operation.
+*/
+char *rbuVacuumIndexStart(
+ sqlite3rbu *p, /* RBU handle */
+ RbuObjIter *pIter /* RBU iterator object */
+){
+ char *zOrder = 0;
+ char *zLhs = 0;
+ char *zSelect = 0;
+ char *zVector = 0;
+ char *zRet = 0;
+ int bFailed = 0;
+ const char *zSep = "";
+ int iCol = 0;
+ sqlite3_stmt *pXInfo = 0;
+
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", pIter->zIdx)
+ );
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
+ int iCid = sqlite3_column_int(pXInfo, 1);
+ const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4);
+ const char *zCol;
+ if( sqlite3_column_int(pXInfo, 3) ){
+ bFailed = 1;
+ break;
+ }
+
+ if( iCid<0 ){
+ if( pIter->eType==RBU_PK_IPK ){
+ int i;
+ for(i=0; pIter->abTblPk[i]==0; i++);
+ assert( i<pIter->nTblCol );
+ zCol = pIter->azTblCol[i];
+ }else{
+ zCol = "_rowid_";
+ }
+ }else{
+ zCol = pIter->azTblCol[iCid];
+ }
+
+ zLhs = rbuMPrintf(p, "%z%s \"%w\" COLLATE %Q",
+ zLhs, zSep, zCol, zCollate
+ );
+ zOrder = rbuMPrintf(p, "%z%s \"rbu_imp_%d%w\" COLLATE %Q DESC",
+ zOrder, zSep, iCol, zCol, zCollate
+ );
+ zSelect = rbuMPrintf(p, "%z%s quote(\"rbu_imp_%d%w\")",
+ zSelect, zSep, iCol, zCol
+ );
+ zSep = ", ";
+ iCol++;
+ }
+ rbuFinalize(p, pXInfo);
+ if( bFailed ) goto index_start_out;
+
+ if( p->rc==SQLITE_OK ){
+ sqlite3_stmt *pSel = 0;
+
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pSel, &p->zErrmsg,
+ sqlite3_mprintf("SELECT %s FROM \"rbu_imp_%w\" ORDER BY %s LIMIT 1",
+ zSelect, pIter->zTbl, zOrder
+ )
+ );
+ if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pSel) ){
+ zSep = "";
+ for(iCol=0; iCol<pIter->nCol; iCol++){
+ const char *zQuoted = (const char*)sqlite3_column_text(pSel, iCol);
+ if( zQuoted[0]=='N' ){
+ bFailed = 1;
+ break;
+ }
+ zVector = rbuMPrintf(p, "%z%s%s", zVector, zSep, zQuoted);
+ zSep = ", ";
+ }
+
+ if( !bFailed ){
+ zRet = rbuMPrintf(p, "(%s) > (%s)", zLhs, zVector);
+ }
+ }
+ rbuFinalize(p, pSel);
+ }
+
+ index_start_out:
+ sqlite3_free(zOrder);
+ sqlite3_free(zSelect);
+ sqlite3_free(zVector);
+ sqlite3_free(zLhs);
+ return zRet;
+}
+
+/*
+** This function is used to create a SELECT list (the list of SQL
+** expressions that follows a SELECT keyword) for a SELECT statement
+** used to read from an data_xxx or rbu_tmp_xxx table while updating the
+** index object currently indicated by the iterator object passed as the
+** second argument. A "PRAGMA index_xinfo = <idxname>" statement is used
+** to obtain the required information.
+**
+** If the index is of the following form:
+**
+** CREATE INDEX i1 ON t1(c, b COLLATE nocase);
+**
+** and "t1" is a table with an explicit INTEGER PRIMARY KEY column
+** "ipk", the returned string is:
+**
+** "`c` COLLATE 'BINARY', `b` COLLATE 'NOCASE', `ipk` COLLATE 'BINARY'"
+**
+** As well as the returned string, three other malloc'd strings are
+** returned via output parameters. As follows:
+**
+** pzImposterCols: ...
+** pzImposterPk: ...
+** pzWhere: ...
+*/
+static char *rbuObjIterGetIndexCols(
+ sqlite3rbu *p, /* RBU object */
+ RbuObjIter *pIter, /* Object iterator for column names */
+ char **pzImposterCols, /* OUT: Columns for imposter table */
+ char **pzImposterPk, /* OUT: Imposter PK clause */
+ char **pzWhere, /* OUT: WHERE clause */
+ int *pnBind /* OUT: Trbul number of columns */
+){
+ int rc = p->rc; /* Error code */
+ int rc2; /* sqlite3_finalize() return code */
+ char *zRet = 0; /* String to return */
+ char *zImpCols = 0; /* String to return via *pzImposterCols */
+ char *zImpPK = 0; /* String to return via *pzImposterPK */
+ char *zWhere = 0; /* String to return via *pzWhere */
+ int nBind = 0; /* Value to return via *pnBind */
+ const char *zCom = ""; /* Set to ", " later on */
+ const char *zAnd = ""; /* Set to " AND " later on */
+ sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = ? */
+
+ if( rc==SQLITE_OK ){
+ assert( p->zErrmsg==0 );
+ rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", pIter->zIdx)
+ );
+ }
+
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
+ int iCid = sqlite3_column_int(pXInfo, 1);
+ int bDesc = sqlite3_column_int(pXInfo, 3);
+ const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4);
+ const char *zCol = 0;
+ const char *zType;
+
+ if( iCid==-2 ){
+ int iSeq = sqlite3_column_int(pXInfo, 0);
+ zRet = sqlite3_mprintf("%z%s(%.*s) COLLATE %Q", zRet, zCom,
+ pIter->aIdxCol[iSeq].nSpan, pIter->aIdxCol[iSeq].zSpan, zCollate
+ );
+ zType = "";
+ }else {
+ if( iCid<0 ){
+ /* An integer primary key. If the table has an explicit IPK, use
+ ** its name. Otherwise, use "rbu_rowid". */
+ if( pIter->eType==RBU_PK_IPK ){
+ int i;
+ for(i=0; pIter->abTblPk[i]==0; i++);
+ assert( i<pIter->nTblCol );
+ zCol = pIter->azTblCol[i];
+ }else if( rbuIsVacuum(p) ){
+ zCol = "_rowid_";
+ }else{
+ zCol = "rbu_rowid";
+ }
+ zType = "INTEGER";
+ }else{
+ zCol = pIter->azTblCol[iCid];
+ zType = pIter->azTblType[iCid];
+ }
+ zRet = sqlite3_mprintf("%z%s\"%w\" COLLATE %Q", zRet, zCom,zCol,zCollate);
+ }
+
+ if( pIter->bUnique==0 || sqlite3_column_int(pXInfo, 5) ){
+ const char *zOrder = (bDesc ? " DESC" : "");
+ zImpPK = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\"%s",
+ zImpPK, zCom, nBind, zCol, zOrder
+ );
+ }
+ zImpCols = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\" %s COLLATE %Q",
+ zImpCols, zCom, nBind, zCol, zType, zCollate
+ );
+ zWhere = sqlite3_mprintf(
+ "%z%s\"rbu_imp_%d%w\" IS ?", zWhere, zAnd, nBind, zCol
+ );
+ if( zRet==0 || zImpPK==0 || zImpCols==0 || zWhere==0 ) rc = SQLITE_NOMEM;
+ zCom = ", ";
+ zAnd = " AND ";
+ nBind++;
+ }
+
+ rc2 = sqlite3_finalize(pXInfo);
+ if( rc==SQLITE_OK ) rc = rc2;
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(zRet);
+ sqlite3_free(zImpCols);
+ sqlite3_free(zImpPK);
+ sqlite3_free(zWhere);
+ zRet = 0;
+ zImpCols = 0;
+ zImpPK = 0;
+ zWhere = 0;
+ p->rc = rc;
+ }
+
+ *pzImposterCols = zImpCols;
+ *pzImposterPk = zImpPK;
+ *pzWhere = zWhere;
+ *pnBind = nBind;
+ return zRet;
+}
+
+/*
+** Assuming the current table columns are "a", "b" and "c", and the zObj
+** paramter is passed "old", return a string of the form:
+**
+** "old.a, old.b, old.b"
+**
+** With the column names escaped.
+**
+** For tables with implicit rowids - RBU_PK_EXTERNAL and RBU_PK_NONE, append
+** the text ", old._rowid_" to the returned value.
+*/
+static char *rbuObjIterGetOldlist(
+ sqlite3rbu *p,
+ RbuObjIter *pIter,
+ const char *zObj
+){
+ char *zList = 0;
+ if( p->rc==SQLITE_OK && pIter->abIndexed ){
+ const char *zS = "";
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ if( pIter->abIndexed[i] ){
+ const char *zCol = pIter->azTblCol[i];
+ zList = sqlite3_mprintf("%z%s%s.\"%w\"", zList, zS, zObj, zCol);
+ }else{
+ zList = sqlite3_mprintf("%z%sNULL", zList, zS);
+ }
+ zS = ", ";
+ if( zList==0 ){
+ p->rc = SQLITE_NOMEM;
+ break;
+ }
+ }
+
+ /* For a table with implicit rowids, append "old._rowid_" to the list. */
+ if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
+ zList = rbuMPrintf(p, "%z, %s._rowid_", zList, zObj);
+ }
+ }
+ return zList;
+}
+
+/*
+** Return an expression that can be used in a WHERE clause to match the
+** primary key of the current table. For example, if the table is:
+**
+** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, c));
+**
+** Return the string:
+**
+** "b = ?1 AND c = ?2"
+*/
+static char *rbuObjIterGetWhere(
+ sqlite3rbu *p,
+ RbuObjIter *pIter
+){
+ char *zList = 0;
+ if( pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE ){
+ zList = rbuMPrintf(p, "_rowid_ = ?%d", pIter->nTblCol+1);
+ }else if( pIter->eType==RBU_PK_EXTERNAL ){
+ const char *zSep = "";
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ if( pIter->abTblPk[i] ){
+ zList = rbuMPrintf(p, "%z%sc%d=?%d", zList, zSep, i, i+1);
+ zSep = " AND ";
+ }
+ }
+ zList = rbuMPrintf(p,
+ "_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)", zList
+ );
+
+ }else{
+ const char *zSep = "";
+ int i;
+ for(i=0; i<pIter->nTblCol; i++){
+ if( pIter->abTblPk[i] ){
+ const char *zCol = pIter->azTblCol[i];
+ zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", zList, zSep, zCol, i+1);
+ zSep = " AND ";
+ }
+ }
+ }
+ return zList;
+}
+
+/*
+** The SELECT statement iterating through the keys for the current object
+** (p->objiter.pSelect) currently points to a valid row. However, there
+** is something wrong with the rbu_control value in the rbu_control value
+** stored in the (p->nCol+1)'th column. Set the error code and error message
+** of the RBU handle to something reflecting this.
+*/
+static void rbuBadControlError(sqlite3rbu *p){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("invalid rbu_control value");
+}
+
+
+/*
+** Return a nul-terminated string containing the comma separated list of
+** assignments that should be included following the "SET" keyword of
+** an UPDATE statement used to update the table object that the iterator
+** passed as the second argument currently points to if the rbu_control
+** column of the data_xxx table entry is set to zMask.
+**
+** The memory for the returned string is obtained from sqlite3_malloc().
+** It is the responsibility of the caller to eventually free it using
+** sqlite3_free().
+**
+** If an OOM error is encountered when allocating space for the new
+** string, an error code is left in the rbu handle passed as the first
+** argument and NULL is returned. Or, if an error has already occurred
+** when this function is called, NULL is returned immediately, without
+** attempting the allocation or modifying the stored error code.
+*/
+static char *rbuObjIterGetSetlist(
+ sqlite3rbu *p,
+ RbuObjIter *pIter,
+ const char *zMask
+){
+ char *zList = 0;
+ if( p->rc==SQLITE_OK ){
+ int i;
+
+ if( (int)strlen(zMask)!=pIter->nTblCol ){
+ rbuBadControlError(p);
+ }else{
+ const char *zSep = "";
+ for(i=0; i<pIter->nTblCol; i++){
+ char c = zMask[pIter->aiSrcOrder[i]];
+ if( c=='x' ){
+ zList = rbuMPrintf(p, "%z%s\"%w\"=?%d",
+ zList, zSep, pIter->azTblCol[i], i+1
+ );
+ zSep = ", ";
+ }
+ else if( c=='d' ){
+ zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_delta(\"%w\", ?%d)",
+ zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1
+ );
+ zSep = ", ";
+ }
+ else if( c=='f' ){
+ zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)",
+ zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1
+ );
+ zSep = ", ";
+ }
+ }
+ }
+ }
+ return zList;
+}
+
+/*
+** Return a nul-terminated string consisting of nByte comma separated
+** "?" expressions. For example, if nByte is 3, return a pointer to
+** a buffer containing the string "?,?,?".
+**
+** The memory for the returned string is obtained from sqlite3_malloc().
+** It is the responsibility of the caller to eventually free it using
+** sqlite3_free().
+**
+** If an OOM error is encountered when allocating space for the new
+** string, an error code is left in the rbu handle passed as the first
+** argument and NULL is returned. Or, if an error has already occurred
+** when this function is called, NULL is returned immediately, without
+** attempting the allocation or modifying the stored error code.
+*/
+static char *rbuObjIterGetBindlist(sqlite3rbu *p, int nBind){
+ char *zRet = 0;
+ sqlite3_int64 nByte = 2*(sqlite3_int64)nBind + 1;
+
+ zRet = (char*)rbuMalloc(p, nByte);
+ if( zRet ){
+ int i;
+ for(i=0; i<nBind; i++){
+ zRet[i*2] = '?';
+ zRet[i*2+1] = (i+1==nBind) ? '\0' : ',';
+ }
+ }
+ return zRet;
+}
+
+/*
+** The iterator currently points to a table (not index) of type
+** RBU_PK_WITHOUT_ROWID. This function creates the PRIMARY KEY
+** declaration for the corresponding imposter table. For example,
+** if the iterator points to a table created as:
+**
+** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, a DESC)) WITHOUT ROWID
+**
+** this function returns:
+**
+** PRIMARY KEY("b", "a" DESC)
+*/
+static char *rbuWithoutRowidPK(sqlite3rbu *p, RbuObjIter *pIter){
+ char *z = 0;
+ assert( pIter->zIdx==0 );
+ if( p->rc==SQLITE_OK ){
+ const char *zSep = "PRIMARY KEY(";
+ sqlite3_stmt *pXList = 0; /* PRAGMA index_list = (pIter->zTbl) */
+ sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = <pk-index> */
+
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pXList, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl)
+ );
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXList) ){
+ const char *zOrig = (const char*)sqlite3_column_text(pXList,3);
+ if( zOrig && strcmp(zOrig, "pk")==0 ){
+ const char *zIdx = (const char*)sqlite3_column_text(pXList,1);
+ if( zIdx ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
+ );
+ }
+ break;
+ }
+ }
+ rbuFinalize(p, pXList);
+
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
+ if( sqlite3_column_int(pXInfo, 5) ){
+ /* int iCid = sqlite3_column_int(pXInfo, 0); */
+ const char *zCol = (const char*)sqlite3_column_text(pXInfo, 2);
+ const char *zDesc = sqlite3_column_int(pXInfo, 3) ? " DESC" : "";
+ z = rbuMPrintf(p, "%z%s\"%w\"%s", z, zSep, zCol, zDesc);
+ zSep = ", ";
+ }
+ }
+ z = rbuMPrintf(p, "%z)", z);
+ rbuFinalize(p, pXInfo);
+ }
+ return z;
+}
+
+/*
+** This function creates the second imposter table used when writing to
+** a table b-tree where the table has an external primary key. If the
+** iterator passed as the second argument does not currently point to
+** a table (not index) with an external primary key, this function is a
+** no-op.
+**
+** Assuming the iterator does point to a table with an external PK, this
+** function creates a WITHOUT ROWID imposter table named "rbu_imposter2"
+** used to access that PK index. For example, if the target table is
+** declared as follows:
+**
+** CREATE TABLE t1(a, b TEXT, c REAL, PRIMARY KEY(b, c));
+**
+** then the imposter table schema is:
+**
+** CREATE TABLE rbu_imposter2(c1 TEXT, c2 REAL, id INTEGER) WITHOUT ROWID;
+**
+*/
+static void rbuCreateImposterTable2(sqlite3rbu *p, RbuObjIter *pIter){
+ if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_EXTERNAL ){
+ int tnum = pIter->iPkTnum; /* Root page of PK index */
+ sqlite3_stmt *pQuery = 0; /* SELECT name ... WHERE rootpage = $tnum */
+ const char *zIdx = 0; /* Name of PK index */
+ sqlite3_stmt *pXInfo = 0; /* PRAGMA main.index_xinfo = $zIdx */
+ const char *zComma = "";
+ char *zCols = 0; /* Used to build up list of table cols */
+ char *zPk = 0; /* Used to build up table PK declaration */
+
+ /* Figure out the name of the primary key index for the current table.
+ ** This is needed for the argument to "PRAGMA index_xinfo". Set
+ ** zIdx to point to a nul-terminated string containing this name. */
+ p->rc = prepareAndCollectError(p->dbMain, &pQuery, &p->zErrmsg,
+ "SELECT name FROM sqlite_schema WHERE rootpage = ?"
+ );
+ if( p->rc==SQLITE_OK ){
+ sqlite3_bind_int(pQuery, 1, tnum);
+ if( SQLITE_ROW==sqlite3_step(pQuery) ){
+ zIdx = (const char*)sqlite3_column_text(pQuery, 0);
+ }
+ }
+ if( zIdx ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
+ );
+ }
+ rbuFinalize(p, pQuery);
+
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
+ int bKey = sqlite3_column_int(pXInfo, 5);
+ if( bKey ){
+ int iCid = sqlite3_column_int(pXInfo, 1);
+ int bDesc = sqlite3_column_int(pXInfo, 3);
+ const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4);
+ zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %Q", zCols, zComma,
+ iCid, pIter->azTblType[iCid], zCollate
+ );
+ zPk = rbuMPrintf(p, "%z%sc%d%s", zPk, zComma, iCid, bDesc?" DESC":"");
+ zComma = ", ";
+ }
+ }
+ zCols = rbuMPrintf(p, "%z, id INTEGER", zCols);
+ rbuFinalize(p, pXInfo);
+
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum);
+ rbuMPrintfExec(p, p->dbMain,
+ "CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID",
+ zCols, zPk
+ );
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
+ }
+}
+
+/*
+** If an error has already occurred when this function is called, it
+** immediately returns zero (without doing any work). Or, if an error
+** occurs during the execution of this function, it sets the error code
+** in the sqlite3rbu object indicated by the first argument and returns
+** zero.
+**
+** The iterator passed as the second argument is guaranteed to point to
+** a table (not an index) when this function is called. This function
+** attempts to create any imposter table required to write to the main
+** table b-tree of the table before returning. Non-zero is returned if
+** an imposter table are created, or zero otherwise.
+**
+** An imposter table is required in all cases except RBU_PK_VTAB. Only
+** virtual tables are written to directly. The imposter table has the
+** same schema as the actual target table (less any UNIQUE constraints).
+** More precisely, the "same schema" means the same columns, types,
+** collation sequences. For tables that do not have an external PRIMARY
+** KEY, it also means the same PRIMARY KEY declaration.
+*/
+static void rbuCreateImposterTable(sqlite3rbu *p, RbuObjIter *pIter){
+ if( p->rc==SQLITE_OK && pIter->eType!=RBU_PK_VTAB ){
+ int tnum = pIter->iTnum;
+ const char *zComma = "";
+ char *zSql = 0;
+ int iCol;
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1);
+
+ for(iCol=0; p->rc==SQLITE_OK && iCol<pIter->nTblCol; iCol++){
+ const char *zPk = "";
+ const char *zCol = pIter->azTblCol[iCol];
+ const char *zColl = 0;
+
+ p->rc = sqlite3_table_column_metadata(
+ p->dbMain, "main", pIter->zTbl, zCol, 0, &zColl, 0, 0, 0
+ );
+
+ if( pIter->eType==RBU_PK_IPK && pIter->abTblPk[iCol] ){
+ /* If the target table column is an "INTEGER PRIMARY KEY", add
+ ** "PRIMARY KEY" to the imposter table column declaration. */
+ zPk = "PRIMARY KEY ";
+ }
+ zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %Q%s",
+ zSql, zComma, zCol, pIter->azTblType[iCol], zPk, zColl,
+ (pIter->abNotNull[iCol] ? " NOT NULL" : "")
+ );
+ zComma = ", ";
+ }
+
+ if( pIter->eType==RBU_PK_WITHOUT_ROWID ){
+ char *zPk = rbuWithoutRowidPK(p, pIter);
+ if( zPk ){
+ zSql = rbuMPrintf(p, "%z, %z", zSql, zPk);
+ }
+ }
+
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum);
+ rbuMPrintfExec(p, p->dbMain, "CREATE TABLE \"rbu_imp_%w\"(%z)%s",
+ pIter->zTbl, zSql,
+ (pIter->eType==RBU_PK_WITHOUT_ROWID ? " WITHOUT ROWID" : "")
+ );
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
+ }
+}
+
+/*
+** Prepare a statement used to insert rows into the "rbu_tmp_xxx" table.
+** Specifically a statement of the form:
+**
+** INSERT INTO rbu_tmp_xxx VALUES(?, ?, ? ...);
+**
+** The number of bound variables is equal to the number of columns in
+** the target table, plus one (for the rbu_control column), plus one more
+** (for the rbu_rowid column) if the target table is an implicit IPK or
+** virtual table.
+*/
+static void rbuObjIterPrepareTmpInsert(
+ sqlite3rbu *p,
+ RbuObjIter *pIter,
+ const char *zCollist,
+ const char *zRbuRowid
+){
+ int bRbuRowid = (pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE);
+ char *zBind = rbuObjIterGetBindlist(p, pIter->nTblCol + 1 + bRbuRowid);
+ if( zBind ){
+ assert( pIter->pTmpInsert==0 );
+ p->rc = prepareFreeAndCollectError(
+ p->dbRbu, &pIter->pTmpInsert, &p->zErrmsg, sqlite3_mprintf(
+ "INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)",
+ p->zStateDb, pIter->zDataTbl, zCollist, zRbuRowid, zBind
+ ));
+ }
+}
+
+static void rbuTmpInsertFunc(
+ sqlite3_context *pCtx,
+ int nVal,
+ sqlite3_value **apVal
+){
+ sqlite3rbu *p = sqlite3_user_data(pCtx);
+ int rc = SQLITE_OK;
+ int i;
+
+ assert( sqlite3_value_int(apVal[0])!=0
+ || p->objiter.eType==RBU_PK_EXTERNAL
+ || p->objiter.eType==RBU_PK_NONE
+ );
+ if( sqlite3_value_int(apVal[0])!=0 ){
+ p->nPhaseOneStep += p->objiter.nIndex;
+ }
+
+ for(i=0; rc==SQLITE_OK && i<nVal; i++){
+ rc = sqlite3_bind_value(p->objiter.pTmpInsert, i+1, apVal[i]);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_step(p->objiter.pTmpInsert);
+ rc = sqlite3_reset(p->objiter.pTmpInsert);
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pCtx, rc);
+ }
+}
+
+static char *rbuObjIterGetIndexWhere(sqlite3rbu *p, RbuObjIter *pIter){
+ sqlite3_stmt *pStmt = 0;
+ int rc = p->rc;
+ char *zRet = 0;
+
+ assert( pIter->zIdxSql==0 && pIter->nIdxCol==0 && pIter->aIdxCol==0 );
+
+ if( rc==SQLITE_OK ){
+ rc = prepareAndCollectError(p->dbMain, &pStmt, &p->zErrmsg,
+ "SELECT trim(sql) FROM sqlite_schema WHERE type='index' AND name=?"
+ );
+ }
+ if( rc==SQLITE_OK ){
+ int rc2;
+ rc = sqlite3_bind_text(pStmt, 1, pIter->zIdx, -1, SQLITE_STATIC);
+ if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ char *zSql = (char*)sqlite3_column_text(pStmt, 0);
+ if( zSql ){
+ pIter->zIdxSql = zSql = rbuStrndup(zSql, &rc);
+ }
+ if( zSql ){
+ int nParen = 0; /* Number of open parenthesis */
+ int i;
+ int iIdxCol = 0;
+ int nIdxAlloc = 0;
+ for(i=0; zSql[i]; i++){
+ char c = zSql[i];
+
+ /* If necessary, grow the pIter->aIdxCol[] array */
+ if( iIdxCol==nIdxAlloc ){
+ RbuSpan *aIdxCol = (RbuSpan*)sqlite3_realloc(
+ pIter->aIdxCol, (nIdxAlloc+16)*sizeof(RbuSpan)
+ );
+ if( aIdxCol==0 ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ pIter->aIdxCol = aIdxCol;
+ nIdxAlloc += 16;
+ }
+
+ if( c=='(' ){
+ if( nParen==0 ){
+ assert( iIdxCol==0 );
+ pIter->aIdxCol[0].zSpan = &zSql[i+1];
+ }
+ nParen++;
+ }
+ else if( c==')' ){
+ nParen--;
+ if( nParen==0 ){
+ int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan;
+ pIter->aIdxCol[iIdxCol++].nSpan = nSpan;
+ i++;
+ break;
+ }
+ }else if( c==',' && nParen==1 ){
+ int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan;
+ pIter->aIdxCol[iIdxCol++].nSpan = nSpan;
+ pIter->aIdxCol[iIdxCol].zSpan = &zSql[i+1];
+ }else if( c=='"' || c=='\'' || c=='`' ){
+ for(i++; 1; i++){
+ if( zSql[i]==c ){
+ if( zSql[i+1]!=c ) break;
+ i++;
+ }
+ }
+ }else if( c=='[' ){
+ for(i++; 1; i++){
+ if( zSql[i]==']' ) break;
+ }
+ }else if( c=='-' && zSql[i+1]=='-' ){
+ for(i=i+2; zSql[i] && zSql[i]!='\n'; i++);
+ if( zSql[i]=='\0' ) break;
+ }else if( c=='/' && zSql[i+1]=='*' ){
+ for(i=i+2; zSql[i] && (zSql[i]!='*' || zSql[i+1]!='/'); i++);
+ if( zSql[i]=='\0' ) break;
+ i++;
+ }
+ }
+ if( zSql[i] ){
+ zRet = rbuStrndup(&zSql[i], &rc);
+ }
+ pIter->nIdxCol = iIdxCol;
+ }
+ }
+
+ rc2 = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ p->rc = rc;
+ return zRet;
+}
+
+/*
+** Ensure that the SQLite statement handles required to update the
+** target database object currently indicated by the iterator passed
+** as the second argument are available.
+*/
+static int rbuObjIterPrepareAll(
+ sqlite3rbu *p,
+ RbuObjIter *pIter,
+ int nOffset /* Add "LIMIT -1 OFFSET $nOffset" to SELECT */
+){
+ assert( pIter->bCleanup==0 );
+ if( pIter->pSelect==0 && rbuObjIterCacheTableInfo(p, pIter)==SQLITE_OK ){
+ const int tnum = pIter->iTnum;
+ char *zCollist = 0; /* List of indexed columns */
+ char **pz = &p->zErrmsg;
+ const char *zIdx = pIter->zIdx;
+ char *zLimit = 0;
+
+ if( nOffset ){
+ zLimit = sqlite3_mprintf(" LIMIT -1 OFFSET %d", nOffset);
+ if( !zLimit ) p->rc = SQLITE_NOMEM;
+ }
+
+ if( zIdx ){
+ const char *zTbl = pIter->zTbl;
+ char *zImposterCols = 0; /* Columns for imposter table */
+ char *zImposterPK = 0; /* Primary key declaration for imposter */
+ char *zWhere = 0; /* WHERE clause on PK columns */
+ char *zBind = 0;
+ char *zPart = 0;
+ int nBind = 0;
+
+ assert( pIter->eType!=RBU_PK_VTAB );
+ zPart = rbuObjIterGetIndexWhere(p, pIter);
+ zCollist = rbuObjIterGetIndexCols(
+ p, pIter, &zImposterCols, &zImposterPK, &zWhere, &nBind
+ );
+ zBind = rbuObjIterGetBindlist(p, nBind);
+
+ /* Create the imposter table used to write to this index. */
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1);
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1,tnum);
+ rbuMPrintfExec(p, p->dbMain,
+ "CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID",
+ zTbl, zImposterCols, zImposterPK
+ );
+ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
+
+ /* Create the statement to insert index entries */
+ pIter->nCol = nBind;
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(
+ p->dbMain, &pIter->pInsert, &p->zErrmsg,
+ sqlite3_mprintf("INSERT INTO \"rbu_imp_%w\" VALUES(%s)", zTbl, zBind)
+ );
+ }
+
+ /* And to delete index entries */
+ if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(
+ p->dbMain, &pIter->pDelete, &p->zErrmsg,
+ sqlite3_mprintf("DELETE FROM \"rbu_imp_%w\" WHERE %s", zTbl, zWhere)
+ );
+ }
+
+ /* Create the SELECT statement to read keys in sorted order */
+ if( p->rc==SQLITE_OK ){
+ char *zSql;
+ if( rbuIsVacuum(p) ){
+ char *zStart = 0;
+ if( nOffset ){
+ zStart = rbuVacuumIndexStart(p, pIter);
+ if( zStart ){
+ sqlite3_free(zLimit);
+ zLimit = 0;
+ }
+ }
+
+ zSql = sqlite3_mprintf(
+ "SELECT %s, 0 AS rbu_control FROM '%q' %s %s %s ORDER BY %s%s",
+ zCollist,
+ pIter->zDataTbl,
+ zPart,
+ (zStart ? (zPart ? "AND" : "WHERE") : ""), zStart,
+ zCollist, zLimit
+ );
+ sqlite3_free(zStart);
+ }else
+
+ if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
+ zSql = sqlite3_mprintf(
+ "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s ORDER BY %s%s",
+ zCollist, p->zStateDb, pIter->zDataTbl,
+ zPart, zCollist, zLimit
+ );
+ }else{
+ zSql = sqlite3_mprintf(
+ "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' %s "
+ "UNION ALL "
+ "SELECT %s, rbu_control FROM '%q' "
+ "%s %s typeof(rbu_control)='integer' AND rbu_control!=1 "
+ "ORDER BY %s%s",
+ zCollist, p->zStateDb, pIter->zDataTbl, zPart,
+ zCollist, pIter->zDataTbl,
+ zPart,
+ (zPart ? "AND" : "WHERE"),
+ zCollist, zLimit
+ );
+ }
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbRbu,&pIter->pSelect,pz,zSql);
+ }else{
+ sqlite3_free(zSql);
+ }
+ }
+
+ sqlite3_free(zImposterCols);
+ sqlite3_free(zImposterPK);
+ sqlite3_free(zWhere);
+ sqlite3_free(zBind);
+ sqlite3_free(zPart);
+ }else{
+ int bRbuRowid = (pIter->eType==RBU_PK_VTAB)
+ ||(pIter->eType==RBU_PK_NONE)
+ ||(pIter->eType==RBU_PK_EXTERNAL && rbuIsVacuum(p));
+ const char *zTbl = pIter->zTbl; /* Table this step applies to */
+ const char *zWrite; /* Imposter table name */
+
+ char *zBindings = rbuObjIterGetBindlist(p, pIter->nTblCol + bRbuRowid);
+ char *zWhere = rbuObjIterGetWhere(p, pIter);
+ char *zOldlist = rbuObjIterGetOldlist(p, pIter, "old");
+ char *zNewlist = rbuObjIterGetOldlist(p, pIter, "new");
+
+ zCollist = rbuObjIterGetCollist(p, pIter);
+ pIter->nCol = pIter->nTblCol;
+
+ /* Create the imposter table or tables (if required). */
+ rbuCreateImposterTable(p, pIter);
+ rbuCreateImposterTable2(p, pIter);
+ zWrite = (pIter->eType==RBU_PK_VTAB ? "" : "rbu_imp_");
+
+ /* Create the INSERT statement to write to the target PK b-tree */
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pInsert, pz,
+ sqlite3_mprintf(
+ "INSERT INTO \"%s%w\"(%s%s) VALUES(%s)",
+ zWrite, zTbl, zCollist, (bRbuRowid ? ", _rowid_" : ""), zBindings
+ )
+ );
+ }
+
+ /* Create the DELETE statement to write to the target PK b-tree.
+ ** Because it only performs INSERT operations, this is not required for
+ ** an rbu vacuum handle. */
+ if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pDelete, pz,
+ sqlite3_mprintf(
+ "DELETE FROM \"%s%w\" WHERE %s", zWrite, zTbl, zWhere
+ )
+ );
+ }
+
+ if( rbuIsVacuum(p)==0 && pIter->abIndexed ){
+ const char *zRbuRowid = "";
+ if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
+ zRbuRowid = ", rbu_rowid";
+ }
+
+ /* Create the rbu_tmp_xxx table and the triggers to populate it. */
+ rbuMPrintfExec(p, p->dbRbu,
+ "CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS "
+ "SELECT *%s FROM '%q' WHERE 0;"
+ , p->zStateDb, pIter->zDataTbl
+ , (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "")
+ , pIter->zDataTbl
+ );
+
+ rbuMPrintfExec(p, p->dbMain,
+ "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" "
+ "BEGIN "
+ " SELECT rbu_tmp_insert(3, %s);"
+ "END;"
+
+ "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" "
+ "BEGIN "
+ " SELECT rbu_tmp_insert(3, %s);"
+ "END;"
+
+ "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" "
+ "BEGIN "
+ " SELECT rbu_tmp_insert(4, %s);"
+ "END;",
+ zWrite, zTbl, zOldlist,
+ zWrite, zTbl, zOldlist,
+ zWrite, zTbl, zNewlist
+ );
+
+ if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
+ rbuMPrintfExec(p, p->dbMain,
+ "CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" "
+ "BEGIN "
+ " SELECT rbu_tmp_insert(0, %s);"
+ "END;",
+ zWrite, zTbl, zNewlist
+ );
+ }
+
+ rbuObjIterPrepareTmpInsert(p, pIter, zCollist, zRbuRowid);
+ }
+
+ /* Create the SELECT statement to read keys from data_xxx */
+ if( p->rc==SQLITE_OK ){
+ const char *zRbuRowid = "";
+ char *zStart = 0;
+ char *zOrder = 0;
+ if( bRbuRowid ){
+ zRbuRowid = rbuIsVacuum(p) ? ",_rowid_ " : ",rbu_rowid";
+ }
+
+ if( rbuIsVacuum(p) ){
+ if( nOffset ){
+ zStart = rbuVacuumTableStart(p, pIter, bRbuRowid, zWrite);
+ if( zStart ){
+ sqlite3_free(zLimit);
+ zLimit = 0;
+ }
+ }
+ if( bRbuRowid ){
+ zOrder = rbuMPrintf(p, "_rowid_");
+ }else{
+ zOrder = rbuObjIterGetPkList(p, pIter, "", ", ", "");
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz,
+ sqlite3_mprintf(
+ "SELECT %s,%s rbu_control%s FROM '%q'%s %s %s %s",
+ zCollist,
+ (rbuIsVacuum(p) ? "0 AS " : ""),
+ zRbuRowid,
+ pIter->zDataTbl, (zStart ? zStart : ""),
+ (zOrder ? "ORDER BY" : ""), zOrder,
+ zLimit
+ )
+ );
+ }
+ sqlite3_free(zStart);
+ sqlite3_free(zOrder);
+ }
+
+ sqlite3_free(zWhere);
+ sqlite3_free(zOldlist);
+ sqlite3_free(zNewlist);
+ sqlite3_free(zBindings);
+ }
+ sqlite3_free(zCollist);
+ sqlite3_free(zLimit);
+ }
+
+ return p->rc;
+}
+
+/*
+** Set output variable *ppStmt to point to an UPDATE statement that may
+** be used to update the imposter table for the main table b-tree of the
+** table object that pIter currently points to, assuming that the
+** rbu_control column of the data_xyz table contains zMask.
+**
+** If the zMask string does not specify any columns to update, then this
+** is not an error. Output variable *ppStmt is set to NULL in this case.
+*/
+static int rbuGetUpdateStmt(
+ sqlite3rbu *p, /* RBU handle */
+ RbuObjIter *pIter, /* Object iterator */
+ const char *zMask, /* rbu_control value ('x.x.') */
+ sqlite3_stmt **ppStmt /* OUT: UPDATE statement handle */
+){
+ RbuUpdateStmt **pp;
+ RbuUpdateStmt *pUp = 0;
+ int nUp = 0;
+
+ /* In case an error occurs */
+ *ppStmt = 0;
+
+ /* Search for an existing statement. If one is found, shift it to the front
+ ** of the LRU queue and return immediately. Otherwise, leave nUp pointing
+ ** to the number of statements currently in the cache and pUp to the
+ ** last object in the list. */
+ for(pp=&pIter->pRbuUpdate; *pp; pp=&((*pp)->pNext)){
+ pUp = *pp;
+ if( strcmp(pUp->zMask, zMask)==0 ){
+ *pp = pUp->pNext;
+ pUp->pNext = pIter->pRbuUpdate;
+ pIter->pRbuUpdate = pUp;
+ *ppStmt = pUp->pUpdate;
+ return SQLITE_OK;
+ }
+ nUp++;
+ }
+ assert( pUp==0 || pUp->pNext==0 );
+
+ if( nUp>=SQLITE_RBU_UPDATE_CACHESIZE ){
+ for(pp=&pIter->pRbuUpdate; *pp!=pUp; pp=&((*pp)->pNext));
+ *pp = 0;
+ sqlite3_finalize(pUp->pUpdate);
+ pUp->pUpdate = 0;
+ }else{
+ pUp = (RbuUpdateStmt*)rbuMalloc(p, sizeof(RbuUpdateStmt)+pIter->nTblCol+1);
+ }
+
+ if( pUp ){
+ char *zWhere = rbuObjIterGetWhere(p, pIter);
+ char *zSet = rbuObjIterGetSetlist(p, pIter, zMask);
+ char *zUpdate = 0;
+
+ pUp->zMask = (char*)&pUp[1];
+ memcpy(pUp->zMask, zMask, pIter->nTblCol);
+ pUp->pNext = pIter->pRbuUpdate;
+ pIter->pRbuUpdate = pUp;
+
+ if( zSet ){
+ const char *zPrefix = "";
+
+ if( pIter->eType!=RBU_PK_VTAB ) zPrefix = "rbu_imp_";
+ zUpdate = sqlite3_mprintf("UPDATE \"%s%w\" SET %s WHERE %s",
+ zPrefix, pIter->zTbl, zSet, zWhere
+ );
+ p->rc = prepareFreeAndCollectError(
+ p->dbMain, &pUp->pUpdate, &p->zErrmsg, zUpdate
+ );
+ *ppStmt = pUp->pUpdate;
+ }
+ sqlite3_free(zWhere);
+ sqlite3_free(zSet);
+ }
+
+ return p->rc;
+}
+
+static sqlite3 *rbuOpenDbhandle(
+ sqlite3rbu *p,
+ const char *zName,
+ int bUseVfs
+){
+ sqlite3 *db = 0;
+ if( p->rc==SQLITE_OK ){
+ const int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_URI;
+ p->rc = sqlite3_open_v2(zName, &db, flags, bUseVfs ? p->zVfsName : 0);
+ if( p->rc ){
+ p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ sqlite3_close(db);
+ db = 0;
+ }
+ }
+ return db;
+}
+
+/*
+** Free an RbuState object allocated by rbuLoadState().
+*/
+static void rbuFreeState(RbuState *p){
+ if( p ){
+ sqlite3_free(p->zTbl);
+ sqlite3_free(p->zDataTbl);
+ sqlite3_free(p->zIdx);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Allocate an RbuState object and load the contents of the rbu_state
+** table into it. Return a pointer to the new object. It is the
+** responsibility of the caller to eventually free the object using
+** sqlite3_free().
+**
+** If an error occurs, leave an error code and message in the rbu handle
+** and return NULL.
+*/
+static RbuState *rbuLoadState(sqlite3rbu *p){
+ RbuState *pRet = 0;
+ sqlite3_stmt *pStmt = 0;
+ int rc;
+ int rc2;
+
+ pRet = (RbuState*)rbuMalloc(p, sizeof(RbuState));
+ if( pRet==0 ) return 0;
+
+ rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
+ sqlite3_mprintf("SELECT k, v FROM %s.rbu_state", p->zStateDb)
+ );
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ switch( sqlite3_column_int(pStmt, 0) ){
+ case RBU_STATE_STAGE:
+ pRet->eStage = sqlite3_column_int(pStmt, 1);
+ if( pRet->eStage!=RBU_STAGE_OAL
+ && pRet->eStage!=RBU_STAGE_MOVE
+ && pRet->eStage!=RBU_STAGE_CKPT
+ ){
+ p->rc = SQLITE_CORRUPT;
+ }
+ break;
+
+ case RBU_STATE_TBL:
+ pRet->zTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc);
+ break;
+
+ case RBU_STATE_IDX:
+ pRet->zIdx = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc);
+ break;
+
+ case RBU_STATE_ROW:
+ pRet->nRow = sqlite3_column_int(pStmt, 1);
+ break;
+
+ case RBU_STATE_PROGRESS:
+ pRet->nProgress = sqlite3_column_int64(pStmt, 1);
+ break;
+
+ case RBU_STATE_CKPT:
+ pRet->iWalCksum = sqlite3_column_int64(pStmt, 1);
+ break;
+
+ case RBU_STATE_COOKIE:
+ pRet->iCookie = (u32)sqlite3_column_int64(pStmt, 1);
+ break;
+
+ case RBU_STATE_OALSZ:
+ pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1);
+ break;
+
+ case RBU_STATE_PHASEONESTEP:
+ pRet->nPhaseOneStep = sqlite3_column_int64(pStmt, 1);
+ break;
+
+ case RBU_STATE_DATATBL:
+ pRet->zDataTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc);
+ break;
+
+ default:
+ rc = SQLITE_CORRUPT;
+ break;
+ }
+ }
+ rc2 = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ) rc = rc2;
+
+ p->rc = rc;
+ return pRet;
+}
+
+
+/*
+** Open the database handle and attach the RBU database as "rbu". If an
+** error occurs, leave an error code and message in the RBU handle.
+*/
+static void rbuOpenDatabase(sqlite3rbu *p, int *pbRetry){
+ assert( p->rc || (p->dbMain==0 && p->dbRbu==0) );
+ assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 );
+
+ /* Open the RBU database */
+ p->dbRbu = rbuOpenDbhandle(p, p->zRbu, 1);
+
+ if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){
+ sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p);
+ if( p->zState==0 ){
+ const char *zFile = sqlite3_db_filename(p->dbRbu, "main");
+ p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile);
+ }
+ }
+
+ /* If using separate RBU and state databases, attach the state database to
+ ** the RBU db handle now. */
+ if( p->zState ){
+ rbuMPrintfExec(p, p->dbRbu, "ATTACH %Q AS stat", p->zState);
+ memcpy(p->zStateDb, "stat", 4);
+ }else{
+ memcpy(p->zStateDb, "main", 4);
+ }
+
+#if 0
+ if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){
+ p->rc = sqlite3_exec(p->dbRbu, "BEGIN", 0, 0, 0);
+ }
+#endif
+
+ /* If it has not already been created, create the rbu_state table */
+ rbuMPrintfExec(p, p->dbRbu, RBU_CREATE_STATE, p->zStateDb);
+
+#if 0
+ if( rbuIsVacuum(p) ){
+ if( p->rc==SQLITE_OK ){
+ int rc2;
+ int bOk = 0;
+ sqlite3_stmt *pCnt = 0;
+ p->rc = prepareAndCollectError(p->dbRbu, &pCnt, &p->zErrmsg,
+ "SELECT count(*) FROM stat.sqlite_schema"
+ );
+ if( p->rc==SQLITE_OK
+ && sqlite3_step(pCnt)==SQLITE_ROW
+ && 1==sqlite3_column_int(pCnt, 0)
+ ){
+ bOk = 1;
+ }
+ rc2 = sqlite3_finalize(pCnt);
+ if( p->rc==SQLITE_OK ) p->rc = rc2;
+
+ if( p->rc==SQLITE_OK && bOk==0 ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("invalid state database");
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, 0);
+ }
+ }
+ }
+#endif
+
+ if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){
+ int bOpen = 0;
+ int rc;
+ p->nRbu = 0;
+ p->pRbuFd = 0;
+ rc = sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p);
+ if( rc!=SQLITE_NOTFOUND ) p->rc = rc;
+ if( p->eStage>=RBU_STAGE_MOVE ){
+ bOpen = 1;
+ }else{
+ RbuState *pState = rbuLoadState(p);
+ if( pState ){
+ bOpen = (pState->eStage>=RBU_STAGE_MOVE);
+ rbuFreeState(pState);
+ }
+ }
+ if( bOpen ) p->dbMain = rbuOpenDbhandle(p, p->zRbu, p->nRbu<=1);
+ }
+
+ p->eStage = 0;
+ if( p->rc==SQLITE_OK && p->dbMain==0 ){
+ if( !rbuIsVacuum(p) ){
+ p->dbMain = rbuOpenDbhandle(p, p->zTarget, 1);
+ }else if( p->pRbuFd->pWalFd ){
+ if( pbRetry ){
+ p->pRbuFd->bNolock = 0;
+ sqlite3_close(p->dbRbu);
+ sqlite3_close(p->dbMain);
+ p->dbMain = 0;
+ p->dbRbu = 0;
+ *pbRetry = 1;
+ return;
+ }
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("cannot vacuum wal mode database");
+ }else{
+ char *zTarget;
+ char *zExtra = 0;
+ if( strlen(p->zRbu)>=5 && 0==memcmp("file:", p->zRbu, 5) ){
+ zExtra = &p->zRbu[5];
+ while( *zExtra ){
+ if( *zExtra++=='?' ) break;
+ }
+ if( *zExtra=='\0' ) zExtra = 0;
+ }
+
+ zTarget = sqlite3_mprintf("file:%s-vactmp?rbu_memory=1%s%s",
+ sqlite3_db_filename(p->dbRbu, "main"),
+ (zExtra==0 ? "" : "&"), (zExtra==0 ? "" : zExtra)
+ );
+
+ if( zTarget==0 ){
+ p->rc = SQLITE_NOMEM;
+ return;
+ }
+ p->dbMain = rbuOpenDbhandle(p, zTarget, p->nRbu<=1);
+ sqlite3_free(zTarget);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_create_function(p->dbMain,
+ "rbu_tmp_insert", -1, SQLITE_UTF8, (void*)p, rbuTmpInsertFunc, 0, 0
+ );
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_create_function(p->dbMain,
+ "rbu_fossil_delta", 2, SQLITE_UTF8, 0, rbuFossilDeltaFunc, 0, 0
+ );
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_create_function(p->dbRbu,
+ "rbu_target_name", -1, SQLITE_UTF8, (void*)p, rbuTargetNameFunc, 0, 0
+ );
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p);
+ }
+ rbuMPrintfExec(p, p->dbMain, "SELECT * FROM sqlite_schema");
+
+ /* Mark the database file just opened as an RBU target database. If
+ ** this call returns SQLITE_NOTFOUND, then the RBU vfs is not in use.
+ ** This is an error. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p);
+ }
+
+ if( p->rc==SQLITE_NOTFOUND ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("rbu vfs not found");
+ }
+}
+
+/*
+** This routine is a copy of the sqlite3FileSuffix3() routine from the core.
+** It is a no-op unless SQLITE_ENABLE_8_3_NAMES is defined.
+**
+** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database
+** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and
+** if filename in z[] has a suffix (a.k.a. "extension") that is longer than
+** three characters, then shorten the suffix on z[] to be the last three
+** characters of the original suffix.
+**
+** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always
+** do the suffix shortening regardless of URI parameter.
+**
+** Examples:
+**
+** test.db-journal => test.nal
+** test.db-wal => test.wal
+** test.db-shm => test.shm
+** test.db-mj7f3319fa => test.9fa
+*/
+static void rbuFileSuffix3(const char *zBase, char *z){
+#ifdef SQLITE_ENABLE_8_3_NAMES
+#if SQLITE_ENABLE_8_3_NAMES<2
+ if( sqlite3_uri_boolean(zBase, "8_3_names", 0) )
+#endif
+ {
+ int i, sz;
+ sz = (int)strlen(z)&0xffffff;
+ for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){}
+ if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4);
+ }
+#endif
+}
+
+/*
+** Return the current wal-index header checksum for the target database
+** as a 64-bit integer.
+**
+** The checksum is store in the first page of xShmMap memory as an 8-byte
+** blob starting at byte offset 40.
+*/
+static i64 rbuShmChecksum(sqlite3rbu *p){
+ i64 iRet = 0;
+ if( p->rc==SQLITE_OK ){
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+ u32 volatile *ptr;
+ p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr);
+ if( p->rc==SQLITE_OK ){
+ iRet = ((i64)ptr[10] << 32) + ptr[11];
+ }
+ }
+ return iRet;
+}
+
+/*
+** This function is called as part of initializing or reinitializing an
+** incremental checkpoint.
+**
+** It populates the sqlite3rbu.aFrame[] array with the set of
+** (wal frame -> db page) copy operations required to checkpoint the
+** current wal file, and obtains the set of shm locks required to safely
+** perform the copy operations directly on the file-system.
+**
+** If argument pState is not NULL, then the incremental checkpoint is
+** being resumed. In this case, if the checksum of the wal-index-header
+** following recovery is not the same as the checksum saved in the RbuState
+** object, then the rbu handle is set to DONE state. This occurs if some
+** other client appends a transaction to the wal file in the middle of
+** an incremental checkpoint.
+*/
+static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){
+
+ /* If pState is NULL, then the wal file may not have been opened and
+ ** recovered. Running a read-statement here to ensure that doing so
+ ** does not interfere with the "capture" process below. */
+ if( pState==0 ){
+ p->eStage = 0;
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(p->dbMain, "SELECT * FROM sqlite_schema", 0, 0, 0);
+ }
+ }
+
+ /* Assuming no error has occurred, run a "restart" checkpoint with the
+ ** sqlite3rbu.eStage variable set to CAPTURE. This turns on the following
+ ** special behaviour in the rbu VFS:
+ **
+ ** * If the exclusive shm WRITER or READ0 lock cannot be obtained,
+ ** the checkpoint fails with SQLITE_BUSY (normally SQLite would
+ ** proceed with running a passive checkpoint instead of failing).
+ **
+ ** * Attempts to read from the *-wal file or write to the database file
+ ** do not perform any IO. Instead, the frame/page combinations that
+ ** would be read/written are recorded in the sqlite3rbu.aFrame[]
+ ** array.
+ **
+ ** * Calls to xShmLock(UNLOCK) to release the exclusive shm WRITER,
+ ** READ0 and CHECKPOINT locks taken as part of the checkpoint are
+ ** no-ops. These locks will not be released until the connection
+ ** is closed.
+ **
+ ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL
+ ** error.
+ **
+ ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the
+ ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[]
+ ** array populated with a set of (frame -> page) mappings. Because the
+ ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy
+ ** data from the wal file into the database file according to the
+ ** contents of aFrame[].
+ */
+ if( p->rc==SQLITE_OK ){
+ int rc2;
+ p->eStage = RBU_STAGE_CAPTURE;
+ rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0);
+ if( rc2!=SQLITE_INTERNAL ) p->rc = rc2;
+ }
+
+ if( p->rc==SQLITE_OK && p->nFrame>0 ){
+ p->eStage = RBU_STAGE_CKPT;
+ p->nStep = (pState ? pState->nRow : 0);
+ p->aBuf = rbuMalloc(p, p->pgsz);
+ p->iWalCksum = rbuShmChecksum(p);
+ }
+
+ if( p->rc==SQLITE_OK ){
+ if( p->nFrame==0 || (pState && pState->iWalCksum!=p->iWalCksum) ){
+ p->rc = SQLITE_DONE;
+ p->eStage = RBU_STAGE_DONE;
+ }else{
+ int nSectorSize;
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+ sqlite3_file *pWal = p->pTargetFd->pWalFd->pReal;
+ assert( p->nPagePerSector==0 );
+ nSectorSize = pDb->pMethods->xSectorSize(pDb);
+ if( nSectorSize>p->pgsz ){
+ p->nPagePerSector = nSectorSize / p->pgsz;
+ }else{
+ p->nPagePerSector = 1;
+ }
+
+ /* Call xSync() on the wal file. This causes SQLite to sync the
+ ** directory in which the target database and the wal file reside, in
+ ** case it has not been synced since the rename() call in
+ ** rbuMoveOalFile(). */
+ p->rc = pWal->pMethods->xSync(pWal, SQLITE_SYNC_NORMAL);
+ }
+ }
+}
+
+/*
+** Called when iAmt bytes are read from offset iOff of the wal file while
+** the rbu object is in capture mode. Record the frame number of the frame
+** being read in the aFrame[] array.
+*/
+static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){
+ const u32 mReq = (1<<WAL_LOCK_WRITE)|(1<<WAL_LOCK_CKPT)|(1<<WAL_LOCK_READ0);
+ u32 iFrame;
+
+ if( pRbu->mLock!=mReq ){
+ pRbu->rc = SQLITE_BUSY;
+ return SQLITE_INTERNAL;
+ }
+
+ pRbu->pgsz = iAmt;
+ if( pRbu->nFrame==pRbu->nFrameAlloc ){
+ int nNew = (pRbu->nFrameAlloc ? pRbu->nFrameAlloc : 64) * 2;
+ RbuFrame *aNew;
+ aNew = (RbuFrame*)sqlite3_realloc64(pRbu->aFrame, nNew * sizeof(RbuFrame));
+ if( aNew==0 ) return SQLITE_NOMEM;
+ pRbu->aFrame = aNew;
+ pRbu->nFrameAlloc = nNew;
+ }
+
+ iFrame = (u32)((iOff-32) / (i64)(iAmt+24)) + 1;
+ if( pRbu->iMaxFrame<iFrame ) pRbu->iMaxFrame = iFrame;
+ pRbu->aFrame[pRbu->nFrame].iWalFrame = iFrame;
+ pRbu->aFrame[pRbu->nFrame].iDbPage = 0;
+ pRbu->nFrame++;
+ return SQLITE_OK;
+}
+
+/*
+** Called when a page of data is written to offset iOff of the database
+** file while the rbu handle is in capture mode. Record the page number
+** of the page being written in the aFrame[] array.
+*/
+static int rbuCaptureDbWrite(sqlite3rbu *pRbu, i64 iOff){
+ pRbu->aFrame[pRbu->nFrame-1].iDbPage = (u32)(iOff / pRbu->pgsz) + 1;
+ return SQLITE_OK;
+}
+
+/*
+** This is called as part of an incremental checkpoint operation. Copy
+** a single frame of data from the wal file into the database file, as
+** indicated by the RbuFrame object.
+*/
+static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){
+ sqlite3_file *pWal = p->pTargetFd->pWalFd->pReal;
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+ i64 iOff;
+
+ assert( p->rc==SQLITE_OK );
+ iOff = (i64)(pFrame->iWalFrame-1) * (p->pgsz + 24) + 32 + 24;
+ p->rc = pWal->pMethods->xRead(pWal, p->aBuf, p->pgsz, iOff);
+ if( p->rc ) return;
+
+ iOff = (i64)(pFrame->iDbPage-1) * p->pgsz;
+ p->rc = pDb->pMethods->xWrite(pDb, p->aBuf, p->pgsz, iOff);
+}
+
+
+/*
+** Take an EXCLUSIVE lock on the database file.
+*/
+static void rbuLockDatabase(sqlite3rbu *p){
+ sqlite3_file *pReal = p->pTargetFd->pReal;
+ assert( p->rc==SQLITE_OK );
+ p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_SHARED);
+ if( p->rc==SQLITE_OK ){
+ p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_EXCLUSIVE);
+ }
+}
+
+#if defined(_WIN32_WCE)
+static LPWSTR rbuWinUtf8ToUnicode(const char *zFilename){
+ int nChar;
+ LPWSTR zWideFilename;
+
+ nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0);
+ if( nChar==0 ){
+ return 0;
+ }
+ zWideFilename = sqlite3_malloc64( nChar*sizeof(zWideFilename[0]) );
+ if( zWideFilename==0 ){
+ return 0;
+ }
+ memset(zWideFilename, 0, nChar*sizeof(zWideFilename[0]));
+ nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename,
+ nChar);
+ if( nChar==0 ){
+ sqlite3_free(zWideFilename);
+ zWideFilename = 0;
+ }
+ return zWideFilename;
+}
+#endif
+
+/*
+** The RBU handle is currently in RBU_STAGE_OAL state, with a SHARED lock
+** on the database file. This proc moves the *-oal file to the *-wal path,
+** then reopens the database file (this time in vanilla, non-oal, WAL mode).
+** If an error occurs, leave an error code and error message in the rbu
+** handle.
+*/
+static void rbuMoveOalFile(sqlite3rbu *p){
+ const char *zBase = sqlite3_db_filename(p->dbMain, "main");
+ const char *zMove = zBase;
+ char *zOal;
+ char *zWal;
+
+ if( rbuIsVacuum(p) ){
+ zMove = sqlite3_db_filename(p->dbRbu, "main");
+ }
+ zOal = sqlite3_mprintf("%s-oal", zMove);
+ zWal = sqlite3_mprintf("%s-wal", zMove);
+
+ assert( p->eStage==RBU_STAGE_MOVE );
+ assert( p->rc==SQLITE_OK && p->zErrmsg==0 );
+ if( zWal==0 || zOal==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ /* Move the *-oal file to *-wal. At this point connection p->db is
+ ** holding a SHARED lock on the target database file (because it is
+ ** in WAL mode). So no other connection may be writing the db.
+ **
+ ** In order to ensure that there are no database readers, an EXCLUSIVE
+ ** lock is obtained here before the *-oal is moved to *-wal.
+ */
+ rbuLockDatabase(p);
+ if( p->rc==SQLITE_OK ){
+ rbuFileSuffix3(zBase, zWal);
+ rbuFileSuffix3(zBase, zOal);
+
+ /* Re-open the databases. */
+ rbuObjIterFinalize(&p->objiter);
+ sqlite3_close(p->dbRbu);
+ sqlite3_close(p->dbMain);
+ p->dbMain = 0;
+ p->dbRbu = 0;
+
+#if defined(_WIN32_WCE)
+ {
+ LPWSTR zWideOal;
+ LPWSTR zWideWal;
+
+ zWideOal = rbuWinUtf8ToUnicode(zOal);
+ if( zWideOal ){
+ zWideWal = rbuWinUtf8ToUnicode(zWal);
+ if( zWideWal ){
+ if( MoveFileW(zWideOal, zWideWal) ){
+ p->rc = SQLITE_OK;
+ }else{
+ p->rc = SQLITE_IOERR;
+ }
+ sqlite3_free(zWideWal);
+ }else{
+ p->rc = SQLITE_IOERR_NOMEM;
+ }
+ sqlite3_free(zWideOal);
+ }else{
+ p->rc = SQLITE_IOERR_NOMEM;
+ }
+ }
+#else
+ p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK;
+#endif
+
+ if( p->rc==SQLITE_OK ){
+ rbuOpenDatabase(p, 0);
+ rbuSetupCheckpoint(p, 0);
+ }
+ }
+ }
+
+ sqlite3_free(zWal);
+ sqlite3_free(zOal);
+}
+
+/*
+** The SELECT statement iterating through the keys for the current object
+** (p->objiter.pSelect) currently points to a valid row. This function
+** determines the type of operation requested by this row and returns
+** one of the following values to indicate the result:
+**
+** * RBU_INSERT
+** * RBU_DELETE
+** * RBU_IDX_DELETE
+** * RBU_UPDATE
+**
+** If RBU_UPDATE is returned, then output variable *pzMask is set to
+** point to the text value indicating the columns to update.
+**
+** If the rbu_control field contains an invalid value, an error code and
+** message are left in the RBU handle and zero returned.
+*/
+static int rbuStepType(sqlite3rbu *p, const char **pzMask){
+ int iCol = p->objiter.nCol; /* Index of rbu_control column */
+ int res = 0; /* Return value */
+
+ switch( sqlite3_column_type(p->objiter.pSelect, iCol) ){
+ case SQLITE_INTEGER: {
+ int iVal = sqlite3_column_int(p->objiter.pSelect, iCol);
+ switch( iVal ){
+ case 0: res = RBU_INSERT; break;
+ case 1: res = RBU_DELETE; break;
+ case 2: res = RBU_REPLACE; break;
+ case 3: res = RBU_IDX_DELETE; break;
+ case 4: res = RBU_IDX_INSERT; break;
+ }
+ break;
+ }
+
+ case SQLITE_TEXT: {
+ const unsigned char *z = sqlite3_column_text(p->objiter.pSelect, iCol);
+ if( z==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ *pzMask = (const char*)z;
+ }
+ res = RBU_UPDATE;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if( res==0 ){
+ rbuBadControlError(p);
+ }
+ return res;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** Assert that column iCol of statement pStmt is named zName.
+*/
+static void assertColumnName(sqlite3_stmt *pStmt, int iCol, const char *zName){
+ const char *zCol = sqlite3_column_name(pStmt, iCol);
+ assert( 0==sqlite3_stricmp(zName, zCol) );
+}
+#else
+# define assertColumnName(x,y,z)
+#endif
+
+/*
+** Argument eType must be one of RBU_INSERT, RBU_DELETE, RBU_IDX_INSERT or
+** RBU_IDX_DELETE. This function performs the work of a single
+** sqlite3rbu_step() call for the type of operation specified by eType.
+*/
+static void rbuStepOneOp(sqlite3rbu *p, int eType){
+ RbuObjIter *pIter = &p->objiter;
+ sqlite3_value *pVal;
+ sqlite3_stmt *pWriter;
+ int i;
+
+ assert( p->rc==SQLITE_OK );
+ assert( eType!=RBU_DELETE || pIter->zIdx==0 );
+ assert( eType==RBU_DELETE || eType==RBU_IDX_DELETE
+ || eType==RBU_INSERT || eType==RBU_IDX_INSERT
+ );
+
+ /* If this is a delete, decrement nPhaseOneStep by nIndex. If the DELETE
+ ** statement below does actually delete a row, nPhaseOneStep will be
+ ** incremented by the same amount when SQL function rbu_tmp_insert()
+ ** is invoked by the trigger. */
+ if( eType==RBU_DELETE ){
+ p->nPhaseOneStep -= p->objiter.nIndex;
+ }
+
+ if( eType==RBU_IDX_DELETE || eType==RBU_DELETE ){
+ pWriter = pIter->pDelete;
+ }else{
+ pWriter = pIter->pInsert;
+ }
+
+ for(i=0; i<pIter->nCol; i++){
+ /* If this is an INSERT into a table b-tree and the table has an
+ ** explicit INTEGER PRIMARY KEY, check that this is not an attempt
+ ** to write a NULL into the IPK column. That is not permitted. */
+ if( eType==RBU_INSERT
+ && pIter->zIdx==0 && pIter->eType==RBU_PK_IPK && pIter->abTblPk[i]
+ && sqlite3_column_type(pIter->pSelect, i)==SQLITE_NULL
+ ){
+ p->rc = SQLITE_MISMATCH;
+ p->zErrmsg = sqlite3_mprintf("datatype mismatch");
+ return;
+ }
+
+ if( eType==RBU_DELETE && pIter->abTblPk[i]==0 ){
+ continue;
+ }
+
+ pVal = sqlite3_column_value(pIter->pSelect, i);
+ p->rc = sqlite3_bind_value(pWriter, i+1, pVal);
+ if( p->rc ) return;
+ }
+ if( pIter->zIdx==0 ){
+ if( pIter->eType==RBU_PK_VTAB
+ || pIter->eType==RBU_PK_NONE
+ || (pIter->eType==RBU_PK_EXTERNAL && rbuIsVacuum(p))
+ ){
+ /* For a virtual table, or a table with no primary key, the
+ ** SELECT statement is:
+ **
+ ** SELECT <cols>, rbu_control, rbu_rowid FROM ....
+ **
+ ** Hence column_value(pIter->nCol+1).
+ */
+ assertColumnName(pIter->pSelect, pIter->nCol+1,
+ rbuIsVacuum(p) ? "rowid" : "rbu_rowid"
+ );
+ pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
+ p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal);
+ }
+ }
+ if( p->rc==SQLITE_OK ){
+ sqlite3_step(pWriter);
+ p->rc = resetAndCollectError(pWriter, &p->zErrmsg);
+ }
+}
+
+/*
+** This function does the work for an sqlite3rbu_step() call.
+**
+** The object-iterator (p->objiter) currently points to a valid object,
+** and the input cursor (p->objiter.pSelect) currently points to a valid
+** input row. Perform whatever processing is required and return.
+**
+** If no error occurs, SQLITE_OK is returned. Otherwise, an error code
+** and message is left in the RBU handle and a copy of the error code
+** returned.
+*/
+static int rbuStep(sqlite3rbu *p){
+ RbuObjIter *pIter = &p->objiter;
+ const char *zMask = 0;
+ int eType = rbuStepType(p, &zMask);
+
+ if( eType ){
+ assert( eType==RBU_INSERT || eType==RBU_DELETE
+ || eType==RBU_REPLACE || eType==RBU_IDX_DELETE
+ || eType==RBU_IDX_INSERT || eType==RBU_UPDATE
+ );
+ assert( eType!=RBU_UPDATE || pIter->zIdx==0 );
+
+ if( pIter->zIdx==0 && (eType==RBU_IDX_DELETE || eType==RBU_IDX_INSERT) ){
+ rbuBadControlError(p);
+ }
+ else if( eType==RBU_REPLACE ){
+ if( pIter->zIdx==0 ){
+ p->nPhaseOneStep += p->objiter.nIndex;
+ rbuStepOneOp(p, RBU_DELETE);
+ }
+ if( p->rc==SQLITE_OK ) rbuStepOneOp(p, RBU_INSERT);
+ }
+ else if( eType!=RBU_UPDATE ){
+ rbuStepOneOp(p, eType);
+ }
+ else{
+ sqlite3_value *pVal;
+ sqlite3_stmt *pUpdate = 0;
+ assert( eType==RBU_UPDATE );
+ p->nPhaseOneStep -= p->objiter.nIndex;
+ rbuGetUpdateStmt(p, pIter, zMask, &pUpdate);
+ if( pUpdate ){
+ int i;
+ for(i=0; p->rc==SQLITE_OK && i<pIter->nCol; i++){
+ char c = zMask[pIter->aiSrcOrder[i]];
+ pVal = sqlite3_column_value(pIter->pSelect, i);
+ if( pIter->abTblPk[i] || c!='.' ){
+ p->rc = sqlite3_bind_value(pUpdate, i+1, pVal);
+ }
+ }
+ if( p->rc==SQLITE_OK
+ && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE)
+ ){
+ /* Bind the rbu_rowid value to column _rowid_ */
+ assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid");
+ pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
+ p->rc = sqlite3_bind_value(pUpdate, pIter->nCol+1, pVal);
+ }
+ if( p->rc==SQLITE_OK ){
+ sqlite3_step(pUpdate);
+ p->rc = resetAndCollectError(pUpdate, &p->zErrmsg);
+ }
+ }
+ }
+ }
+ return p->rc;
+}
+
+/*
+** Increment the schema cookie of the main database opened by p->dbMain.
+**
+** Or, if this is an RBU vacuum, set the schema cookie of the main db
+** opened by p->dbMain to one more than the schema cookie of the main
+** db opened by p->dbRbu.
+*/
+static void rbuIncrSchemaCookie(sqlite3rbu *p){
+ if( p->rc==SQLITE_OK ){
+ sqlite3 *dbread = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain);
+ int iCookie = 1000000;
+ sqlite3_stmt *pStmt;
+
+ p->rc = prepareAndCollectError(dbread, &pStmt, &p->zErrmsg,
+ "PRAGMA schema_version"
+ );
+ if( p->rc==SQLITE_OK ){
+ /* Coverage: it may be that this sqlite3_step() cannot fail. There
+ ** is already a transaction open, so the prepared statement cannot
+ ** throw an SQLITE_SCHEMA exception. The only database page the
+ ** statement reads is page 1, which is guaranteed to be in the cache.
+ ** And no memory allocations are required. */
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ iCookie = sqlite3_column_int(pStmt, 0);
+ }
+ rbuFinalize(p, pStmt);
+ }
+ if( p->rc==SQLITE_OK ){
+ rbuMPrintfExec(p, p->dbMain, "PRAGMA schema_version = %d", iCookie+1);
+ }
+ }
+}
+
+/*
+** Update the contents of the rbu_state table within the rbu database. The
+** value stored in the RBU_STATE_STAGE column is eStage. All other values
+** are determined by inspecting the rbu handle passed as the first argument.
+*/
+static void rbuSaveState(sqlite3rbu *p, int eStage){
+ if( p->rc==SQLITE_OK || p->rc==SQLITE_DONE ){
+ sqlite3_stmt *pInsert = 0;
+ rbu_file *pFd = (rbuIsVacuum(p) ? p->pRbuFd : p->pTargetFd);
+ int rc;
+
+ assert( p->zErrmsg==0 );
+ rc = prepareFreeAndCollectError(p->dbRbu, &pInsert, &p->zErrmsg,
+ sqlite3_mprintf(
+ "INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES "
+ "(%d, %d), "
+ "(%d, %Q), "
+ "(%d, %Q), "
+ "(%d, %d), "
+ "(%d, %d), "
+ "(%d, %lld), "
+ "(%d, %lld), "
+ "(%d, %lld), "
+ "(%d, %lld), "
+ "(%d, %Q) ",
+ p->zStateDb,
+ RBU_STATE_STAGE, eStage,
+ RBU_STATE_TBL, p->objiter.zTbl,
+ RBU_STATE_IDX, p->objiter.zIdx,
+ RBU_STATE_ROW, p->nStep,
+ RBU_STATE_PROGRESS, p->nProgress,
+ RBU_STATE_CKPT, p->iWalCksum,
+ RBU_STATE_COOKIE, (i64)pFd->iCookie,
+ RBU_STATE_OALSZ, p->iOalSz,
+ RBU_STATE_PHASEONESTEP, p->nPhaseOneStep,
+ RBU_STATE_DATATBL, p->objiter.zDataTbl
+ )
+ );
+ assert( pInsert==0 || rc==SQLITE_OK );
+
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pInsert);
+ rc = sqlite3_finalize(pInsert);
+ }
+ if( rc!=SQLITE_OK ) p->rc = rc;
+ }
+}
+
+
+/*
+** The second argument passed to this function is the name of a PRAGMA
+** setting - "page_size", "auto_vacuum", "user_version" or "application_id".
+** This function executes the following on sqlite3rbu.dbRbu:
+**
+** "PRAGMA main.$zPragma"
+**
+** where $zPragma is the string passed as the second argument, then
+** on sqlite3rbu.dbMain:
+**
+** "PRAGMA main.$zPragma = $val"
+**
+** where $val is the value returned by the first PRAGMA invocation.
+**
+** In short, it copies the value of the specified PRAGMA setting from
+** dbRbu to dbMain.
+*/
+static void rbuCopyPragma(sqlite3rbu *p, const char *zPragma){
+ if( p->rc==SQLITE_OK ){
+ sqlite3_stmt *pPragma = 0;
+ p->rc = prepareFreeAndCollectError(p->dbRbu, &pPragma, &p->zErrmsg,
+ sqlite3_mprintf("PRAGMA main.%s", zPragma)
+ );
+ if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPragma) ){
+ p->rc = rbuMPrintfExec(p, p->dbMain, "PRAGMA main.%s = %d",
+ zPragma, sqlite3_column_int(pPragma, 0)
+ );
+ }
+ rbuFinalize(p, pPragma);
+ }
+}
+
+/*
+** The RBU handle passed as the only argument has just been opened and
+** the state database is empty. If this RBU handle was opened for an
+** RBU vacuum operation, create the schema in the target db.
+*/
+static void rbuCreateTargetSchema(sqlite3rbu *p){
+ sqlite3_stmt *pSql = 0;
+ sqlite3_stmt *pInsert = 0;
+
+ assert( rbuIsVacuum(p) );
+ p->rc = sqlite3_exec(p->dbMain, "PRAGMA writable_schema=1", 0,0, &p->zErrmsg);
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareAndCollectError(p->dbRbu, &pSql, &p->zErrmsg,
+ "SELECT sql FROM sqlite_schema WHERE sql!='' AND rootpage!=0"
+ " AND name!='sqlite_sequence' "
+ " ORDER BY type DESC"
+ );
+ }
+
+ while( p->rc==SQLITE_OK && sqlite3_step(pSql)==SQLITE_ROW ){
+ const char *zSql = (const char*)sqlite3_column_text(pSql, 0);
+ p->rc = sqlite3_exec(p->dbMain, zSql, 0, 0, &p->zErrmsg);
+ }
+ rbuFinalize(p, pSql);
+ if( p->rc!=SQLITE_OK ) return;
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareAndCollectError(p->dbRbu, &pSql, &p->zErrmsg,
+ "SELECT * FROM sqlite_schema WHERE rootpage=0 OR rootpage IS NULL"
+ );
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareAndCollectError(p->dbMain, &pInsert, &p->zErrmsg,
+ "INSERT INTO sqlite_schema VALUES(?,?,?,?,?)"
+ );
+ }
+
+ while( p->rc==SQLITE_OK && sqlite3_step(pSql)==SQLITE_ROW ){
+ int i;
+ for(i=0; i<5; i++){
+ sqlite3_bind_value(pInsert, i+1, sqlite3_column_value(pSql, i));
+ }
+ sqlite3_step(pInsert);
+ p->rc = sqlite3_reset(pInsert);
+ }
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(p->dbMain, "PRAGMA writable_schema=0",0,0,&p->zErrmsg);
+ }
+
+ rbuFinalize(p, pSql);
+ rbuFinalize(p, pInsert);
+}
+
+/*
+** Step the RBU object.
+*/
+SQLITE_API int sqlite3rbu_step(sqlite3rbu *p){
+ if( p ){
+ switch( p->eStage ){
+ case RBU_STAGE_OAL: {
+ RbuObjIter *pIter = &p->objiter;
+
+ /* If this is an RBU vacuum operation and the state table was empty
+ ** when this handle was opened, create the target database schema. */
+ if( rbuIsVacuum(p) && p->nProgress==0 && p->rc==SQLITE_OK ){
+ rbuCreateTargetSchema(p);
+ rbuCopyPragma(p, "user_version");
+ rbuCopyPragma(p, "application_id");
+ }
+
+ while( p->rc==SQLITE_OK && pIter->zTbl ){
+
+ if( pIter->bCleanup ){
+ /* Clean up the rbu_tmp_xxx table for the previous table. It
+ ** cannot be dropped as there are currently active SQL statements.
+ ** But the contents can be deleted. */
+ if( rbuIsVacuum(p)==0 && pIter->abIndexed ){
+ rbuMPrintfExec(p, p->dbRbu,
+ "DELETE FROM %s.'rbu_tmp_%q'", p->zStateDb, pIter->zDataTbl
+ );
+ }
+ }else{
+ rbuObjIterPrepareAll(p, pIter, 0);
+
+ /* Advance to the next row to process. */
+ if( p->rc==SQLITE_OK ){
+ int rc = sqlite3_step(pIter->pSelect);
+ if( rc==SQLITE_ROW ){
+ p->nProgress++;
+ p->nStep++;
+ return rbuStep(p);
+ }
+ p->rc = sqlite3_reset(pIter->pSelect);
+ p->nStep = 0;
+ }
+ }
+
+ rbuObjIterNext(p, pIter);
+ }
+
+ if( p->rc==SQLITE_OK ){
+ assert( pIter->zTbl==0 );
+ rbuSaveState(p, RBU_STAGE_MOVE);
+ rbuIncrSchemaCookie(p);
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, &p->zErrmsg);
+ }
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, &p->zErrmsg);
+ }
+ p->eStage = RBU_STAGE_MOVE;
+ }
+ break;
+ }
+
+ case RBU_STAGE_MOVE: {
+ if( p->rc==SQLITE_OK ){
+ rbuMoveOalFile(p);
+ p->nProgress++;
+ }
+ break;
+ }
+
+ case RBU_STAGE_CKPT: {
+ if( p->rc==SQLITE_OK ){
+ if( p->nStep>=p->nFrame ){
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+
+ /* Sync the db file */
+ p->rc = pDb->pMethods->xSync(pDb, SQLITE_SYNC_NORMAL);
+
+ /* Update nBackfill */
+ if( p->rc==SQLITE_OK ){
+ void volatile *ptr;
+ p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, &ptr);
+ if( p->rc==SQLITE_OK ){
+ ((u32 volatile*)ptr)[24] = p->iMaxFrame;
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ p->eStage = RBU_STAGE_DONE;
+ p->rc = SQLITE_DONE;
+ }
+ }else{
+ /* At one point the following block copied a single frame from the
+ ** wal file to the database file. So that one call to sqlite3rbu_step()
+ ** checkpointed a single frame.
+ **
+ ** However, if the sector-size is larger than the page-size, and the
+ ** application calls sqlite3rbu_savestate() or close() immediately
+ ** after this step, then rbu_step() again, then a power failure occurs,
+ ** then the database page written here may be damaged. Work around
+ ** this by checkpointing frames until the next page in the aFrame[]
+ ** lies on a different disk sector to the current one. */
+ u32 iSector;
+ do{
+ RbuFrame *pFrame = &p->aFrame[p->nStep];
+ iSector = (pFrame->iDbPage-1) / p->nPagePerSector;
+ rbuCheckpointFrame(p, pFrame);
+ p->nStep++;
+ }while( p->nStep<p->nFrame
+ && iSector==((p->aFrame[p->nStep].iDbPage-1) / p->nPagePerSector)
+ && p->rc==SQLITE_OK
+ );
+ }
+ p->nProgress++;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ return p->rc;
+ }else{
+ return SQLITE_NOMEM;
+ }
+}
+
+/*
+** Compare strings z1 and z2, returning 0 if they are identical, or non-zero
+** otherwise. Either or both argument may be NULL. Two NULL values are
+** considered equal, and NULL is considered distinct from all other values.
+*/
+static int rbuStrCompare(const char *z1, const char *z2){
+ if( z1==0 && z2==0 ) return 0;
+ if( z1==0 || z2==0 ) return 1;
+ return (sqlite3_stricmp(z1, z2)!=0);
+}
+
+/*
+** This function is called as part of sqlite3rbu_open() when initializing
+** an rbu handle in OAL stage. If the rbu update has not started (i.e.
+** the rbu_state table was empty) it is a no-op. Otherwise, it arranges
+** things so that the next call to sqlite3rbu_step() continues on from
+** where the previous rbu handle left off.
+**
+** If an error occurs, an error code and error message are left in the
+** rbu handle passed as the first argument.
+*/
+static void rbuSetupOal(sqlite3rbu *p, RbuState *pState){
+ assert( p->rc==SQLITE_OK );
+ if( pState->zTbl ){
+ RbuObjIter *pIter = &p->objiter;
+ int rc = SQLITE_OK;
+
+ while( rc==SQLITE_OK && pIter->zTbl && (pIter->bCleanup
+ || rbuStrCompare(pIter->zIdx, pState->zIdx)
+ || (pState->zDataTbl==0 && rbuStrCompare(pIter->zTbl, pState->zTbl))
+ || (pState->zDataTbl && rbuStrCompare(pIter->zDataTbl, pState->zDataTbl))
+ )){
+ rc = rbuObjIterNext(p, pIter);
+ }
+
+ if( rc==SQLITE_OK && !pIter->zTbl ){
+ rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("rbu_state mismatch error");
+ }
+
+ if( rc==SQLITE_OK ){
+ p->nStep = pState->nRow;
+ rc = rbuObjIterPrepareAll(p, &p->objiter, p->nStep);
+ }
+
+ p->rc = rc;
+ }
+}
+
+/*
+** If there is a "*-oal" file in the file-system corresponding to the
+** target database in the file-system, delete it. If an error occurs,
+** leave an error code and error message in the rbu handle.
+*/
+static void rbuDeleteOalFile(sqlite3rbu *p){
+ char *zOal = rbuMPrintf(p, "%s-oal", p->zTarget);
+ if( zOal ){
+ sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
+ assert( pVfs && p->rc==SQLITE_OK && p->zErrmsg==0 );
+ pVfs->xDelete(pVfs, zOal, 0);
+ sqlite3_free(zOal);
+ }
+}
+
+/*
+** Allocate a private rbu VFS for the rbu handle passed as the only
+** argument. This VFS will be used unless the call to sqlite3rbu_open()
+** specified a URI with a vfs=? option in place of a target database
+** file name.
+*/
+static void rbuCreateVfs(sqlite3rbu *p){
+ int rnd;
+ char zRnd[64];
+
+ assert( p->rc==SQLITE_OK );
+ sqlite3_randomness(sizeof(int), (void*)&rnd);
+ sqlite3_snprintf(sizeof(zRnd), zRnd, "rbu_vfs_%d", rnd);
+ p->rc = sqlite3rbu_create_vfs(zRnd, 0);
+ if( p->rc==SQLITE_OK ){
+ sqlite3_vfs *pVfs = sqlite3_vfs_find(zRnd);
+ assert( pVfs );
+ p->zVfsName = pVfs->zName;
+ ((rbu_vfs*)pVfs)->pRbu = p;
+ }
+}
+
+/*
+** Destroy the private VFS created for the rbu handle passed as the only
+** argument by an earlier call to rbuCreateVfs().
+*/
+static void rbuDeleteVfs(sqlite3rbu *p){
+ if( p->zVfsName ){
+ sqlite3rbu_destroy_vfs(p->zVfsName);
+ p->zVfsName = 0;
+ }
+}
+
+/*
+** This user-defined SQL function is invoked with a single argument - the
+** name of a table expected to appear in the target database. It returns
+** the number of auxilliary indexes on the table.
+*/
+static void rbuIndexCntFunc(
+ sqlite3_context *pCtx,
+ int nVal,
+ sqlite3_value **apVal
+){
+ sqlite3rbu *p = (sqlite3rbu*)sqlite3_user_data(pCtx);
+ sqlite3_stmt *pStmt = 0;
+ char *zErrmsg = 0;
+ int rc;
+ sqlite3 *db = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain);
+
+ assert( nVal==1 );
+
+ rc = prepareFreeAndCollectError(db, &pStmt, &zErrmsg,
+ sqlite3_mprintf("SELECT count(*) FROM sqlite_schema "
+ "WHERE type='index' AND tbl_name = %Q", sqlite3_value_text(apVal[0]))
+ );
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error(pCtx, zErrmsg, -1);
+ }else{
+ int nIndex = 0;
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ nIndex = sqlite3_column_int(pStmt, 0);
+ }
+ rc = sqlite3_finalize(pStmt);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_int(pCtx, nIndex);
+ }else{
+ sqlite3_result_error(pCtx, sqlite3_errmsg(db), -1);
+ }
+ }
+
+ sqlite3_free(zErrmsg);
+}
+
+/*
+** If the RBU database contains the rbu_count table, use it to initialize
+** the sqlite3rbu.nPhaseOneStep variable. The schema of the rbu_count table
+** is assumed to contain the same columns as:
+**
+** CREATE TABLE rbu_count(tbl TEXT PRIMARY KEY, cnt INTEGER) WITHOUT ROWID;
+**
+** There should be one row in the table for each data_xxx table in the
+** database. The 'tbl' column should contain the name of a data_xxx table,
+** and the cnt column the number of rows it contains.
+**
+** sqlite3rbu.nPhaseOneStep is initialized to the sum of (1 + nIndex) * cnt
+** for all rows in the rbu_count table, where nIndex is the number of
+** indexes on the corresponding target database table.
+*/
+static void rbuInitPhaseOneSteps(sqlite3rbu *p){
+ if( p->rc==SQLITE_OK ){
+ sqlite3_stmt *pStmt = 0;
+ int bExists = 0; /* True if rbu_count exists */
+
+ p->nPhaseOneStep = -1;
+
+ p->rc = sqlite3_create_function(p->dbRbu,
+ "rbu_index_cnt", 1, SQLITE_UTF8, (void*)p, rbuIndexCntFunc, 0, 0
+ );
+
+ /* Check for the rbu_count table. If it does not exist, or if an error
+ ** occurs, nPhaseOneStep will be left set to -1. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = prepareAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
+ "SELECT 1 FROM sqlite_schema WHERE tbl_name = 'rbu_count'"
+ );
+ }
+ if( p->rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ bExists = 1;
+ }
+ p->rc = sqlite3_finalize(pStmt);
+ }
+
+ if( p->rc==SQLITE_OK && bExists ){
+ p->rc = prepareAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
+ "SELECT sum(cnt * (1 + rbu_index_cnt(rbu_target_name(tbl))))"
+ "FROM rbu_count"
+ );
+ if( p->rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ p->nPhaseOneStep = sqlite3_column_int64(pStmt, 0);
+ }
+ p->rc = sqlite3_finalize(pStmt);
+ }
+ }
+ }
+}
+
+
+static sqlite3rbu *openRbuHandle(
+ const char *zTarget,
+ const char *zRbu,
+ const char *zState
+){
+ sqlite3rbu *p;
+ size_t nTarget = zTarget ? strlen(zTarget) : 0;
+ size_t nRbu = strlen(zRbu);
+ size_t nByte = sizeof(sqlite3rbu) + nTarget+1 + nRbu+1;
+
+ p = (sqlite3rbu*)sqlite3_malloc64(nByte);
+ if( p ){
+ RbuState *pState = 0;
+
+ /* Create the custom VFS. */
+ memset(p, 0, sizeof(sqlite3rbu));
+ rbuCreateVfs(p);
+
+ /* Open the target, RBU and state databases */
+ if( p->rc==SQLITE_OK ){
+ char *pCsr = (char*)&p[1];
+ int bRetry = 0;
+ if( zTarget ){
+ p->zTarget = pCsr;
+ memcpy(p->zTarget, zTarget, nTarget+1);
+ pCsr += nTarget+1;
+ }
+ p->zRbu = pCsr;
+ memcpy(p->zRbu, zRbu, nRbu+1);
+ pCsr += nRbu+1;
+ if( zState ){
+ p->zState = rbuMPrintf(p, "%s", zState);
+ }
+
+ /* If the first attempt to open the database file fails and the bRetry
+ ** flag it set, this means that the db was not opened because it seemed
+ ** to be a wal-mode db. But, this may have happened due to an earlier
+ ** RBU vacuum operation leaving an old wal file in the directory.
+ ** If this is the case, it will have been checkpointed and deleted
+ ** when the handle was closed and a second attempt to open the
+ ** database may succeed. */
+ rbuOpenDatabase(p, &bRetry);
+ if( bRetry ){
+ rbuOpenDatabase(p, 0);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pState = rbuLoadState(p);
+ assert( pState || p->rc!=SQLITE_OK );
+ if( p->rc==SQLITE_OK ){
+
+ if( pState->eStage==0 ){
+ rbuDeleteOalFile(p);
+ rbuInitPhaseOneSteps(p);
+ p->eStage = RBU_STAGE_OAL;
+ }else{
+ p->eStage = pState->eStage;
+ p->nPhaseOneStep = pState->nPhaseOneStep;
+ }
+ p->nProgress = pState->nProgress;
+ p->iOalSz = pState->iOalSz;
+ }
+ }
+ assert( p->rc!=SQLITE_OK || p->eStage!=0 );
+
+ if( p->rc==SQLITE_OK && p->pTargetFd->pWalFd ){
+ if( p->eStage==RBU_STAGE_OAL ){
+ p->rc = SQLITE_ERROR;
+ p->zErrmsg = sqlite3_mprintf("cannot update wal mode database");
+ }else if( p->eStage==RBU_STAGE_MOVE ){
+ p->eStage = RBU_STAGE_CKPT;
+ p->nStep = 0;
+ }
+ }
+
+ if( p->rc==SQLITE_OK
+ && (p->eStage==RBU_STAGE_OAL || p->eStage==RBU_STAGE_MOVE)
+ && pState->eStage!=0
+ ){
+ rbu_file *pFd = (rbuIsVacuum(p) ? p->pRbuFd : p->pTargetFd);
+ if( pFd->iCookie!=pState->iCookie ){
+ /* At this point (pTargetFd->iCookie) contains the value of the
+ ** change-counter cookie (the thing that gets incremented when a
+ ** transaction is committed in rollback mode) currently stored on
+ ** page 1 of the database file. */
+ p->rc = SQLITE_BUSY;
+ p->zErrmsg = sqlite3_mprintf("database modified during rbu %s",
+ (rbuIsVacuum(p) ? "vacuum" : "update")
+ );
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ if( p->eStage==RBU_STAGE_OAL ){
+ sqlite3 *db = p->dbMain;
+ p->rc = sqlite3_exec(p->dbRbu, "BEGIN", 0, 0, &p->zErrmsg);
+
+ /* Point the object iterator at the first object */
+ if( p->rc==SQLITE_OK ){
+ p->rc = rbuObjIterFirst(p, &p->objiter);
+ }
+
+ /* If the RBU database contains no data_xxx tables, declare the RBU
+ ** update finished. */
+ if( p->rc==SQLITE_OK && p->objiter.zTbl==0 ){
+ p->rc = SQLITE_DONE;
+ p->eStage = RBU_STAGE_DONE;
+ }else{
+ if( p->rc==SQLITE_OK && pState->eStage==0 && rbuIsVacuum(p) ){
+ rbuCopyPragma(p, "page_size");
+ rbuCopyPragma(p, "auto_vacuum");
+ }
+
+ /* Open transactions both databases. The *-oal file is opened or
+ ** created at this point. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3_exec(db, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg);
+ }
+
+ /* Check if the main database is a zipvfs db. If it is, set the upper
+ ** level pager to use "journal_mode=off". This prevents it from
+ ** generating a large journal using a temp file. */
+ if( p->rc==SQLITE_OK ){
+ int frc = sqlite3_file_control(db, "main", SQLITE_FCNTL_ZIPVFS, 0);
+ if( frc==SQLITE_OK ){
+ p->rc = sqlite3_exec(
+ db, "PRAGMA journal_mode=off",0,0,&p->zErrmsg);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ rbuSetupOal(p, pState);
+ }
+ }
+ }else if( p->eStage==RBU_STAGE_MOVE ){
+ /* no-op */
+ }else if( p->eStage==RBU_STAGE_CKPT ){
+ rbuSetupCheckpoint(p, pState);
+ }else if( p->eStage==RBU_STAGE_DONE ){
+ p->rc = SQLITE_DONE;
+ }else{
+ p->rc = SQLITE_CORRUPT;
+ }
+ }
+
+ rbuFreeState(pState);
+ }
+
+ return p;
+}
+
+/*
+** Allocate and return an RBU handle with all fields zeroed except for the
+** error code, which is set to SQLITE_MISUSE.
+*/
+static sqlite3rbu *rbuMisuseError(void){
+ sqlite3rbu *pRet;
+ pRet = sqlite3_malloc64(sizeof(sqlite3rbu));
+ if( pRet ){
+ memset(pRet, 0, sizeof(sqlite3rbu));
+ pRet->rc = SQLITE_MISUSE;
+ }
+ return pRet;
+}
+
+/*
+** Open and return a new RBU handle.
+*/
+SQLITE_API sqlite3rbu *sqlite3rbu_open(
+ const char *zTarget,
+ const char *zRbu,
+ const char *zState
+){
+ if( zTarget==0 || zRbu==0 ){ return rbuMisuseError(); }
+ /* TODO: Check that zTarget and zRbu are non-NULL */
+ return openRbuHandle(zTarget, zRbu, zState);
+}
+
+/*
+** Open a handle to begin or resume an RBU VACUUM operation.
+*/
+SQLITE_API sqlite3rbu *sqlite3rbu_vacuum(
+ const char *zTarget,
+ const char *zState
+){
+ if( zTarget==0 ){ return rbuMisuseError(); }
+ if( zState ){
+ int n = strlen(zState);
+ if( n>=7 && 0==memcmp("-vactmp", &zState[n-7], 7) ){
+ return rbuMisuseError();
+ }
+ }
+ /* TODO: Check that both arguments are non-NULL */
+ return openRbuHandle(0, zTarget, zState);
+}
+
+/*
+** Return the database handle used by pRbu.
+*/
+SQLITE_API sqlite3 *sqlite3rbu_db(sqlite3rbu *pRbu, int bRbu){
+ sqlite3 *db = 0;
+ if( pRbu ){
+ db = (bRbu ? pRbu->dbRbu : pRbu->dbMain);
+ }
+ return db;
+}
+
+
+/*
+** If the error code currently stored in the RBU handle is SQLITE_CONSTRAINT,
+** then edit any error message string so as to remove all occurrences of
+** the pattern "rbu_imp_[0-9]*".
+*/
+static void rbuEditErrmsg(sqlite3rbu *p){
+ if( p->rc==SQLITE_CONSTRAINT && p->zErrmsg ){
+ unsigned int i;
+ size_t nErrmsg = strlen(p->zErrmsg);
+ for(i=0; i<(nErrmsg-8); i++){
+ if( memcmp(&p->zErrmsg[i], "rbu_imp_", 8)==0 ){
+ int nDel = 8;
+ while( p->zErrmsg[i+nDel]>='0' && p->zErrmsg[i+nDel]<='9' ) nDel++;
+ memmove(&p->zErrmsg[i], &p->zErrmsg[i+nDel], nErrmsg + 1 - i - nDel);
+ nErrmsg -= nDel;
+ }
+ }
+ }
+}
+
+/*
+** Close the RBU handle.
+*/
+SQLITE_API int sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
+ int rc;
+ if( p ){
+
+ /* Commit the transaction to the *-oal file. */
+ if( p->rc==SQLITE_OK && p->eStage==RBU_STAGE_OAL ){
+ p->rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, &p->zErrmsg);
+ }
+
+ /* Sync the db file if currently doing an incremental checkpoint */
+ if( p->rc==SQLITE_OK && p->eStage==RBU_STAGE_CKPT ){
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+ p->rc = pDb->pMethods->xSync(pDb, SQLITE_SYNC_NORMAL);
+ }
+
+ rbuSaveState(p, p->eStage);
+
+ if( p->rc==SQLITE_OK && p->eStage==RBU_STAGE_OAL ){
+ p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, &p->zErrmsg);
+ }
+
+ /* Close any open statement handles. */
+ rbuObjIterFinalize(&p->objiter);
+
+ /* If this is an RBU vacuum handle and the vacuum has either finished
+ ** successfully or encountered an error, delete the contents of the
+ ** state table. This causes the next call to sqlite3rbu_vacuum()
+ ** specifying the current target and state databases to start a new
+ ** vacuum from scratch. */
+ if( rbuIsVacuum(p) && p->rc!=SQLITE_OK && p->dbRbu ){
+ int rc2 = sqlite3_exec(p->dbRbu, "DELETE FROM stat.rbu_state", 0, 0, 0);
+ if( p->rc==SQLITE_DONE && rc2!=SQLITE_OK ) p->rc = rc2;
+ }
+
+ /* Close the open database handle and VFS object. */
+ sqlite3_close(p->dbRbu);
+ sqlite3_close(p->dbMain);
+ assert( p->szTemp==0 );
+ rbuDeleteVfs(p);
+ sqlite3_free(p->aBuf);
+ sqlite3_free(p->aFrame);
+
+ rbuEditErrmsg(p);
+ rc = p->rc;
+ if( pzErrmsg ){
+ *pzErrmsg = p->zErrmsg;
+ }else{
+ sqlite3_free(p->zErrmsg);
+ }
+ sqlite3_free(p->zState);
+ sqlite3_free(p);
+ }else{
+ rc = SQLITE_NOMEM;
+ *pzErrmsg = 0;
+ }
+ return rc;
+}
+
+/*
+** Return the total number of key-value operations (inserts, deletes or
+** updates) that have been performed on the target database since the
+** current RBU update was started.
+*/
+SQLITE_API sqlite3_int64 sqlite3rbu_progress(sqlite3rbu *pRbu){
+ return pRbu->nProgress;
+}
+
+/*
+** Return permyriadage progress indications for the two main stages of
+** an RBU update.
+*/
+SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *p, int *pnOne, int *pnTwo){
+ const int MAX_PROGRESS = 10000;
+ switch( p->eStage ){
+ case RBU_STAGE_OAL:
+ if( p->nPhaseOneStep>0 ){
+ *pnOne = (int)(MAX_PROGRESS * (i64)p->nProgress/(i64)p->nPhaseOneStep);
+ }else{
+ *pnOne = -1;
+ }
+ *pnTwo = 0;
+ break;
+
+ case RBU_STAGE_MOVE:
+ *pnOne = MAX_PROGRESS;
+ *pnTwo = 0;
+ break;
+
+ case RBU_STAGE_CKPT:
+ *pnOne = MAX_PROGRESS;
+ *pnTwo = (int)(MAX_PROGRESS * (i64)p->nStep / (i64)p->nFrame);
+ break;
+
+ case RBU_STAGE_DONE:
+ *pnOne = MAX_PROGRESS;
+ *pnTwo = MAX_PROGRESS;
+ break;
+
+ default:
+ assert( 0 );
+ }
+}
+
+/*
+** Return the current state of the RBU vacuum or update operation.
+*/
+SQLITE_API int sqlite3rbu_state(sqlite3rbu *p){
+ int aRes[] = {
+ 0, SQLITE_RBU_STATE_OAL, SQLITE_RBU_STATE_MOVE,
+ 0, SQLITE_RBU_STATE_CHECKPOINT, SQLITE_RBU_STATE_DONE
+ };
+
+ assert( RBU_STAGE_OAL==1 );
+ assert( RBU_STAGE_MOVE==2 );
+ assert( RBU_STAGE_CKPT==4 );
+ assert( RBU_STAGE_DONE==5 );
+ assert( aRes[RBU_STAGE_OAL]==SQLITE_RBU_STATE_OAL );
+ assert( aRes[RBU_STAGE_MOVE]==SQLITE_RBU_STATE_MOVE );
+ assert( aRes[RBU_STAGE_CKPT]==SQLITE_RBU_STATE_CHECKPOINT );
+ assert( aRes[RBU_STAGE_DONE]==SQLITE_RBU_STATE_DONE );
+
+ if( p->rc!=SQLITE_OK && p->rc!=SQLITE_DONE ){
+ return SQLITE_RBU_STATE_ERROR;
+ }else{
+ assert( p->rc!=SQLITE_DONE || p->eStage==RBU_STAGE_DONE );
+ assert( p->eStage==RBU_STAGE_OAL
+ || p->eStage==RBU_STAGE_MOVE
+ || p->eStage==RBU_STAGE_CKPT
+ || p->eStage==RBU_STAGE_DONE
+ );
+ return aRes[p->eStage];
+ }
+}
+
+SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){
+ int rc = p->rc;
+ if( rc==SQLITE_DONE ) return SQLITE_OK;
+
+ assert( p->eStage>=RBU_STAGE_OAL && p->eStage<=RBU_STAGE_DONE );
+ if( p->eStage==RBU_STAGE_OAL ){
+ assert( rc!=SQLITE_DONE );
+ if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, 0);
+ }
+
+ /* Sync the db file */
+ if( rc==SQLITE_OK && p->eStage==RBU_STAGE_CKPT ){
+ sqlite3_file *pDb = p->pTargetFd->pReal;
+ rc = pDb->pMethods->xSync(pDb, SQLITE_SYNC_NORMAL);
+ }
+
+ p->rc = rc;
+ rbuSaveState(p, p->eStage);
+ rc = p->rc;
+
+ if( p->eStage==RBU_STAGE_OAL ){
+ assert( rc!=SQLITE_DONE );
+ if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, 0);
+ if( rc==SQLITE_OK ){
+ const char *zBegin = rbuIsVacuum(p) ? "BEGIN" : "BEGIN IMMEDIATE";
+ rc = sqlite3_exec(p->dbRbu, zBegin, 0, 0, 0);
+ }
+ if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbMain, "BEGIN IMMEDIATE", 0, 0,0);
+ }
+
+ p->rc = rc;
+ return rc;
+}
+
+/**************************************************************************
+** Beginning of RBU VFS shim methods. The VFS shim modifies the behaviour
+** of a standard VFS in the following ways:
+**
+** 1. Whenever the first page of a main database file is read or
+** written, the value of the change-counter cookie is stored in
+** rbu_file.iCookie. Similarly, the value of the "write-version"
+** database header field is stored in rbu_file.iWriteVer. This ensures
+** that the values are always trustworthy within an open transaction.
+**
+** 2. Whenever an SQLITE_OPEN_WAL file is opened, the (rbu_file.pWalFd)
+** member variable of the associated database file descriptor is set
+** to point to the new file. A mutex protected linked list of all main
+** db fds opened using a particular RBU VFS is maintained at
+** rbu_vfs.pMain to facilitate this.
+**
+** 3. Using a new file-control "SQLITE_FCNTL_RBU", a main db rbu_file
+** object can be marked as the target database of an RBU update. This
+** turns on the following extra special behaviour:
+**
+** 3a. If xAccess() is called to check if there exists a *-wal file
+** associated with an RBU target database currently in RBU_STAGE_OAL
+** stage (preparing the *-oal file), the following special handling
+** applies:
+**
+** * if the *-wal file does exist, return SQLITE_CANTOPEN. An RBU
+** target database may not be in wal mode already.
+**
+** * if the *-wal file does not exist, set the output parameter to
+** non-zero (to tell SQLite that it does exist) anyway.
+**
+** Then, when xOpen() is called to open the *-wal file associated with
+** the RBU target in RBU_STAGE_OAL stage, instead of opening the *-wal
+** file, the rbu vfs opens the corresponding *-oal file instead.
+**
+** 3b. The *-shm pages returned by xShmMap() for a target db file in
+** RBU_STAGE_OAL mode are actually stored in heap memory. This is to
+** avoid creating a *-shm file on disk. Additionally, xShmLock() calls
+** are no-ops on target database files in RBU_STAGE_OAL mode. This is
+** because assert() statements in some VFS implementations fail if
+** xShmLock() is called before xShmMap().
+**
+** 3c. If an EXCLUSIVE lock is attempted on a target database file in any
+** mode except RBU_STAGE_DONE (all work completed and checkpointed), it
+** fails with an SQLITE_BUSY error. This is to stop RBU connections
+** from automatically checkpointing a *-wal (or *-oal) file from within
+** sqlite3_close().
+**
+** 3d. In RBU_STAGE_CAPTURE mode, all xRead() calls on the wal file, and
+** all xWrite() calls on the target database file perform no IO.
+** Instead the frame and page numbers that would be read and written
+** are recorded. Additionally, successful attempts to obtain exclusive
+** xShmLock() WRITER, CHECKPOINTER and READ0 locks on the target
+** database file are recorded. xShmLock() calls to unlock the same
+** locks are no-ops (so that once obtained, these locks are never
+** relinquished). Finally, calls to xSync() on the target database
+** file fail with SQLITE_INTERNAL errors.
+*/
+
+static void rbuUnlockShm(rbu_file *p){
+ assert( p->openFlags & SQLITE_OPEN_MAIN_DB );
+ if( p->pRbu ){
+ int (*xShmLock)(sqlite3_file*,int,int,int) = p->pReal->pMethods->xShmLock;
+ int i;
+ for(i=0; i<SQLITE_SHM_NLOCK;i++){
+ if( (1<<i) & p->pRbu->mLock ){
+ xShmLock(p->pReal, i, 1, SQLITE_SHM_UNLOCK|SQLITE_SHM_EXCLUSIVE);
+ }
+ }
+ p->pRbu->mLock = 0;
+ }
+}
+
+/*
+*/
+static int rbuUpdateTempSize(rbu_file *pFd, sqlite3_int64 nNew){
+ sqlite3rbu *pRbu = pFd->pRbu;
+ i64 nDiff = nNew - pFd->sz;
+ pRbu->szTemp += nDiff;
+ pFd->sz = nNew;
+ assert( pRbu->szTemp>=0 );
+ if( pRbu->szTempLimit && pRbu->szTemp>pRbu->szTempLimit ) return SQLITE_FULL;
+ return SQLITE_OK;
+}
+
+/*
+** Add an item to the main-db lists, if it is not already present.
+**
+** There are two main-db lists. One for all file descriptors, and one
+** for all file descriptors with rbu_file.pDb!=0. If the argument has
+** rbu_file.pDb!=0, then it is assumed to already be present on the
+** main list and is only added to the pDb!=0 list.
+*/
+static void rbuMainlistAdd(rbu_file *p){
+ rbu_vfs *pRbuVfs = p->pRbuVfs;
+ rbu_file *pIter;
+ assert( (p->openFlags & SQLITE_OPEN_MAIN_DB) );
+ sqlite3_mutex_enter(pRbuVfs->mutex);
+ if( p->pRbu==0 ){
+ for(pIter=pRbuVfs->pMain; pIter; pIter=pIter->pMainNext);
+ p->pMainNext = pRbuVfs->pMain;
+ pRbuVfs->pMain = p;
+ }else{
+ for(pIter=pRbuVfs->pMainRbu; pIter && pIter!=p; pIter=pIter->pMainRbuNext){}
+ if( pIter==0 ){
+ p->pMainRbuNext = pRbuVfs->pMainRbu;
+ pRbuVfs->pMainRbu = p;
+ }
+ }
+ sqlite3_mutex_leave(pRbuVfs->mutex);
+}
+
+/*
+** Remove an item from the main-db lists.
+*/
+static void rbuMainlistRemove(rbu_file *p){
+ rbu_file **pp;
+ sqlite3_mutex_enter(p->pRbuVfs->mutex);
+ for(pp=&p->pRbuVfs->pMain; *pp && *pp!=p; pp=&((*pp)->pMainNext)){}
+ if( *pp ) *pp = p->pMainNext;
+ p->pMainNext = 0;
+ for(pp=&p->pRbuVfs->pMainRbu; *pp && *pp!=p; pp=&((*pp)->pMainRbuNext)){}
+ if( *pp ) *pp = p->pMainRbuNext;
+ p->pMainRbuNext = 0;
+ sqlite3_mutex_leave(p->pRbuVfs->mutex);
+}
+
+/*
+** Given that zWal points to a buffer containing a wal file name passed to
+** either the xOpen() or xAccess() VFS method, search the main-db list for
+** a file-handle opened by the same database connection on the corresponding
+** database file.
+**
+** If parameter bRbu is true, only search for file-descriptors with
+** rbu_file.pDb!=0.
+*/
+static rbu_file *rbuFindMaindb(rbu_vfs *pRbuVfs, const char *zWal, int bRbu){
+ rbu_file *pDb;
+ sqlite3_mutex_enter(pRbuVfs->mutex);
+ if( bRbu ){
+ for(pDb=pRbuVfs->pMainRbu; pDb && pDb->zWal!=zWal; pDb=pDb->pMainRbuNext){}
+ }else{
+ for(pDb=pRbuVfs->pMain; pDb && pDb->zWal!=zWal; pDb=pDb->pMainNext){}
+ }
+ sqlite3_mutex_leave(pRbuVfs->mutex);
+ return pDb;
+}
+
+/*
+** Close an rbu file.
+*/
+static int rbuVfsClose(sqlite3_file *pFile){
+ rbu_file *p = (rbu_file*)pFile;
+ int rc;
+ int i;
+
+ /* Free the contents of the apShm[] array. And the array itself. */
+ for(i=0; i<p->nShm; i++){
+ sqlite3_free(p->apShm[i]);
+ }
+ sqlite3_free(p->apShm);
+ p->apShm = 0;
+ sqlite3_free(p->zDel);
+
+ if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
+ rbuMainlistRemove(p);
+ rbuUnlockShm(p);
+ p->pReal->pMethods->xShmUnmap(p->pReal, 0);
+ }
+ else if( (p->openFlags & SQLITE_OPEN_DELETEONCLOSE) && p->pRbu ){
+ rbuUpdateTempSize(p, 0);
+ }
+ assert( p->pMainNext==0 && p->pRbuVfs->pMain!=p );
+
+ /* Close the underlying file handle */
+ rc = p->pReal->pMethods->xClose(p->pReal);
+ return rc;
+}
+
+
+/*
+** Read and return an unsigned 32-bit big-endian integer from the buffer
+** passed as the only argument.
+*/
+static u32 rbuGetU32(u8 *aBuf){
+ return ((u32)aBuf[0] << 24)
+ + ((u32)aBuf[1] << 16)
+ + ((u32)aBuf[2] << 8)
+ + ((u32)aBuf[3]);
+}
+
+/*
+** Write an unsigned 32-bit value in big-endian format to the supplied
+** buffer.
+*/
+static void rbuPutU32(u8 *aBuf, u32 iVal){
+ aBuf[0] = (iVal >> 24) & 0xFF;
+ aBuf[1] = (iVal >> 16) & 0xFF;
+ aBuf[2] = (iVal >> 8) & 0xFF;
+ aBuf[3] = (iVal >> 0) & 0xFF;
+}
+
+static void rbuPutU16(u8 *aBuf, u16 iVal){
+ aBuf[0] = (iVal >> 8) & 0xFF;
+ aBuf[1] = (iVal >> 0) & 0xFF;
+}
+
+/*
+** Read data from an rbuVfs-file.
+*/
+static int rbuVfsRead(
+ sqlite3_file *pFile,
+ void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ rbu_file *p = (rbu_file*)pFile;
+ sqlite3rbu *pRbu = p->pRbu;
+ int rc;
+
+ if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){
+ assert( p->openFlags & SQLITE_OPEN_WAL );
+ rc = rbuCaptureWalRead(p->pRbu, iOfst, iAmt);
+ }else{
+ if( pRbu && pRbu->eStage==RBU_STAGE_OAL
+ && (p->openFlags & SQLITE_OPEN_WAL)
+ && iOfst>=pRbu->iOalSz
+ ){
+ rc = SQLITE_OK;
+ memset(zBuf, 0, iAmt);
+ }else{
+ rc = p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst);
+#if 1
+ /* If this is being called to read the first page of the target
+ ** database as part of an rbu vacuum operation, synthesize the
+ ** contents of the first page if it does not yet exist. Otherwise,
+ ** SQLite will not check for a *-wal file. */
+ if( pRbu && rbuIsVacuum(pRbu)
+ && rc==SQLITE_IOERR_SHORT_READ && iOfst==0
+ && (p->openFlags & SQLITE_OPEN_MAIN_DB)
+ && pRbu->rc==SQLITE_OK
+ ){
+ sqlite3_file *pFd = (sqlite3_file*)pRbu->pRbuFd;
+ rc = pFd->pMethods->xRead(pFd, zBuf, iAmt, iOfst);
+ if( rc==SQLITE_OK ){
+ u8 *aBuf = (u8*)zBuf;
+ u32 iRoot = rbuGetU32(&aBuf[52]) ? 1 : 0;
+ rbuPutU32(&aBuf[52], iRoot); /* largest root page number */
+ rbuPutU32(&aBuf[36], 0); /* number of free pages */
+ rbuPutU32(&aBuf[32], 0); /* first page on free list trunk */
+ rbuPutU32(&aBuf[28], 1); /* size of db file in pages */
+ rbuPutU32(&aBuf[24], pRbu->pRbuFd->iCookie+1); /* Change counter */
+
+ if( iAmt>100 ){
+ memset(&aBuf[100], 0, iAmt-100);
+ rbuPutU16(&aBuf[105], iAmt & 0xFFFF);
+ aBuf[100] = 0x0D;
+ }
+ }
+ }
+#endif
+ }
+ if( rc==SQLITE_OK && iOfst==0 && (p->openFlags & SQLITE_OPEN_MAIN_DB) ){
+ /* These look like magic numbers. But they are stable, as they are part
+ ** of the definition of the SQLite file format, which may not change. */
+ u8 *pBuf = (u8*)zBuf;
+ p->iCookie = rbuGetU32(&pBuf[24]);
+ p->iWriteVer = pBuf[19];
+ }
+ }
+ return rc;
+}
+
+/*
+** Write data to an rbuVfs-file.
+*/
+static int rbuVfsWrite(
+ sqlite3_file *pFile,
+ const void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ rbu_file *p = (rbu_file*)pFile;
+ sqlite3rbu *pRbu = p->pRbu;
+ int rc;
+
+ if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){
+ assert( p->openFlags & SQLITE_OPEN_MAIN_DB );
+ rc = rbuCaptureDbWrite(p->pRbu, iOfst);
+ }else{
+ if( pRbu ){
+ if( pRbu->eStage==RBU_STAGE_OAL
+ && (p->openFlags & SQLITE_OPEN_WAL)
+ && iOfst>=pRbu->iOalSz
+ ){
+ pRbu->iOalSz = iAmt + iOfst;
+ }else if( p->openFlags & SQLITE_OPEN_DELETEONCLOSE ){
+ i64 szNew = iAmt+iOfst;
+ if( szNew>p->sz ){
+ rc = rbuUpdateTempSize(p, szNew);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ }
+ }
+ rc = p->pReal->pMethods->xWrite(p->pReal, zBuf, iAmt, iOfst);
+ if( rc==SQLITE_OK && iOfst==0 && (p->openFlags & SQLITE_OPEN_MAIN_DB) ){
+ /* These look like magic numbers. But they are stable, as they are part
+ ** of the definition of the SQLite file format, which may not change. */
+ u8 *pBuf = (u8*)zBuf;
+ p->iCookie = rbuGetU32(&pBuf[24]);
+ p->iWriteVer = pBuf[19];
+ }
+ }
+ return rc;
+}
+
+/*
+** Truncate an rbuVfs-file.
+*/
+static int rbuVfsTruncate(sqlite3_file *pFile, sqlite_int64 size){
+ rbu_file *p = (rbu_file*)pFile;
+ if( (p->openFlags & SQLITE_OPEN_DELETEONCLOSE) && p->pRbu ){
+ int rc = rbuUpdateTempSize(p, size);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ return p->pReal->pMethods->xTruncate(p->pReal, size);
+}
+
+/*
+** Sync an rbuVfs-file.
+*/
+static int rbuVfsSync(sqlite3_file *pFile, int flags){
+ rbu_file *p = (rbu_file *)pFile;
+ if( p->pRbu && p->pRbu->eStage==RBU_STAGE_CAPTURE ){
+ if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
+ return SQLITE_INTERNAL;
+ }
+ return SQLITE_OK;
+ }
+ return p->pReal->pMethods->xSync(p->pReal, flags);
+}
+
+/*
+** Return the current file-size of an rbuVfs-file.
+*/
+static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){
+ rbu_file *p = (rbu_file *)pFile;
+ int rc;
+ rc = p->pReal->pMethods->xFileSize(p->pReal, pSize);
+
+ /* If this is an RBU vacuum operation and this is the target database,
+ ** pretend that it has at least one page. Otherwise, SQLite will not
+ ** check for the existance of a *-wal file. rbuVfsRead() contains
+ ** similar logic. */
+ if( rc==SQLITE_OK && *pSize==0
+ && p->pRbu && rbuIsVacuum(p->pRbu)
+ && (p->openFlags & SQLITE_OPEN_MAIN_DB)
+ ){
+ *pSize = 1024;
+ }
+ return rc;
+}
+
+/*
+** Lock an rbuVfs-file.
+*/
+static int rbuVfsLock(sqlite3_file *pFile, int eLock){
+ rbu_file *p = (rbu_file*)pFile;
+ sqlite3rbu *pRbu = p->pRbu;
+ int rc = SQLITE_OK;
+
+ assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
+ if( eLock==SQLITE_LOCK_EXCLUSIVE
+ && (p->bNolock || (pRbu && pRbu->eStage!=RBU_STAGE_DONE))
+ ){
+ /* Do not allow EXCLUSIVE locks. Preventing SQLite from taking this
+ ** prevents it from checkpointing the database from sqlite3_close(). */
+ rc = SQLITE_BUSY;
+ }else{
+ rc = p->pReal->pMethods->xLock(p->pReal, eLock);
+ }
+
+ return rc;
+}
+
+/*
+** Unlock an rbuVfs-file.
+*/
+static int rbuVfsUnlock(sqlite3_file *pFile, int eLock){
+ rbu_file *p = (rbu_file *)pFile;
+ return p->pReal->pMethods->xUnlock(p->pReal, eLock);
+}
+
+/*
+** Check if another file-handle holds a RESERVED lock on an rbuVfs-file.
+*/
+static int rbuVfsCheckReservedLock(sqlite3_file *pFile, int *pResOut){
+ rbu_file *p = (rbu_file *)pFile;
+ return p->pReal->pMethods->xCheckReservedLock(p->pReal, pResOut);
+}
+
+/*
+** File control method. For custom operations on an rbuVfs-file.
+*/
+static int rbuVfsFileControl(sqlite3_file *pFile, int op, void *pArg){
+ rbu_file *p = (rbu_file *)pFile;
+ int (*xControl)(sqlite3_file*,int,void*) = p->pReal->pMethods->xFileControl;
+ int rc;
+
+ assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB)
+ || p->openFlags & (SQLITE_OPEN_TRANSIENT_DB|SQLITE_OPEN_TEMP_JOURNAL)
+ );
+ if( op==SQLITE_FCNTL_RBU ){
+ sqlite3rbu *pRbu = (sqlite3rbu*)pArg;
+
+ /* First try to find another RBU vfs lower down in the vfs stack. If
+ ** one is found, this vfs will operate in pass-through mode. The lower
+ ** level vfs will do the special RBU handling. */
+ rc = xControl(p->pReal, op, pArg);
+
+ if( rc==SQLITE_NOTFOUND ){
+ /* Now search for a zipvfs instance lower down in the VFS stack. If
+ ** one is found, this is an error. */
+ void *dummy = 0;
+ rc = xControl(p->pReal, SQLITE_FCNTL_ZIPVFS, &dummy);
+ if( rc==SQLITE_OK ){
+ rc = SQLITE_ERROR;
+ pRbu->zErrmsg = sqlite3_mprintf("rbu/zipvfs setup error");
+ }else if( rc==SQLITE_NOTFOUND ){
+ pRbu->pTargetFd = p;
+ p->pRbu = pRbu;
+ rbuMainlistAdd(p);
+ if( p->pWalFd ) p->pWalFd->pRbu = pRbu;
+ rc = SQLITE_OK;
+ }
+ }
+ return rc;
+ }
+ else if( op==SQLITE_FCNTL_RBUCNT ){
+ sqlite3rbu *pRbu = (sqlite3rbu*)pArg;
+ pRbu->nRbu++;
+ pRbu->pRbuFd = p;
+ p->bNolock = 1;
+ }
+
+ rc = xControl(p->pReal, op, pArg);
+ if( rc==SQLITE_OK && op==SQLITE_FCNTL_VFSNAME ){
+ rbu_vfs *pRbuVfs = p->pRbuVfs;
+ char *zIn = *(char**)pArg;
+ char *zOut = sqlite3_mprintf("rbu(%s)/%z", pRbuVfs->base.zName, zIn);
+ *(char**)pArg = zOut;
+ if( zOut==0 ) rc = SQLITE_NOMEM;
+ }
+
+ return rc;
+}
+
+/*
+** Return the sector-size in bytes for an rbuVfs-file.
+*/
+static int rbuVfsSectorSize(sqlite3_file *pFile){
+ rbu_file *p = (rbu_file *)pFile;
+ return p->pReal->pMethods->xSectorSize(p->pReal);
+}
+
+/*
+** Return the device characteristic flags supported by an rbuVfs-file.
+*/
+static int rbuVfsDeviceCharacteristics(sqlite3_file *pFile){
+ rbu_file *p = (rbu_file *)pFile;
+ return p->pReal->pMethods->xDeviceCharacteristics(p->pReal);
+}
+
+/*
+** Take or release a shared-memory lock.
+*/
+static int rbuVfsShmLock(sqlite3_file *pFile, int ofst, int n, int flags){
+ rbu_file *p = (rbu_file*)pFile;
+ sqlite3rbu *pRbu = p->pRbu;
+ int rc = SQLITE_OK;
+
+#ifdef SQLITE_AMALGAMATION
+ assert( WAL_CKPT_LOCK==1 );
+#endif
+
+ assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
+ if( pRbu && (pRbu->eStage==RBU_STAGE_OAL || pRbu->eStage==RBU_STAGE_MOVE) ){
+ /* Magic number 1 is the WAL_CKPT_LOCK lock. Preventing SQLite from
+ ** taking this lock also prevents any checkpoints from occurring.
+ ** todo: really, it's not clear why this might occur, as
+ ** wal_autocheckpoint ought to be turned off. */
+ if( ofst==WAL_LOCK_CKPT && n==1 ) rc = SQLITE_BUSY;
+ }else{
+ int bCapture = 0;
+ if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){
+ bCapture = 1;
+ }
+
+ if( bCapture==0 || 0==(flags & SQLITE_SHM_UNLOCK) ){
+ rc = p->pReal->pMethods->xShmLock(p->pReal, ofst, n, flags);
+ if( bCapture && rc==SQLITE_OK ){
+ pRbu->mLock |= (1 << ofst);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Obtain a pointer to a mapping of a single 32KiB page of the *-shm file.
+*/
+static int rbuVfsShmMap(
+ sqlite3_file *pFile,
+ int iRegion,
+ int szRegion,
+ int isWrite,
+ void volatile **pp
+){
+ rbu_file *p = (rbu_file*)pFile;
+ int rc = SQLITE_OK;
+ int eStage = (p->pRbu ? p->pRbu->eStage : 0);
+
+ /* If not in RBU_STAGE_OAL, allow this call to pass through. Or, if this
+ ** rbu is in the RBU_STAGE_OAL state, use heap memory for *-shm space
+ ** instead of a file on disk. */
+ assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
+ if( eStage==RBU_STAGE_OAL ){
+ sqlite3_int64 nByte = (iRegion+1) * sizeof(char*);
+ char **apNew = (char**)sqlite3_realloc64(p->apShm, nByte);
+
+ /* This is an RBU connection that uses its own heap memory for the
+ ** pages of the *-shm file. Since no other process can have run
+ ** recovery, the connection must request *-shm pages in order
+ ** from start to finish. */
+ assert( iRegion==p->nShm );
+ if( apNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(&apNew[p->nShm], 0, sizeof(char*) * (1 + iRegion - p->nShm));
+ p->apShm = apNew;
+ p->nShm = iRegion+1;
+ }
+
+ if( rc==SQLITE_OK ){
+ char *pNew = (char*)sqlite3_malloc64(szRegion);
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pNew, 0, szRegion);
+ p->apShm[iRegion] = pNew;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ *pp = p->apShm[iRegion];
+ }else{
+ *pp = 0;
+ }
+ }else{
+ assert( p->apShm==0 );
+ rc = p->pReal->pMethods->xShmMap(p->pReal, iRegion, szRegion, isWrite, pp);
+ }
+
+ return rc;
+}
+
+/*
+** Memory barrier.
+*/
+static void rbuVfsShmBarrier(sqlite3_file *pFile){
+ rbu_file *p = (rbu_file *)pFile;
+ p->pReal->pMethods->xShmBarrier(p->pReal);
+}
+
+/*
+** The xShmUnmap method.
+*/
+static int rbuVfsShmUnmap(sqlite3_file *pFile, int delFlag){
+ rbu_file *p = (rbu_file*)pFile;
+ int rc = SQLITE_OK;
+ int eStage = (p->pRbu ? p->pRbu->eStage : 0);
+
+ assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
+ if( eStage==RBU_STAGE_OAL || eStage==RBU_STAGE_MOVE ){
+ /* no-op */
+ }else{
+ /* Release the checkpointer and writer locks */
+ rbuUnlockShm(p);
+ rc = p->pReal->pMethods->xShmUnmap(p->pReal, delFlag);
+ }
+ return rc;
+}
+
+/*
+** Open an rbu file handle.
+*/
+static int rbuVfsOpen(
+ sqlite3_vfs *pVfs,
+ const char *zName,
+ sqlite3_file *pFile,
+ int flags,
+ int *pOutFlags
+){
+ static sqlite3_io_methods rbuvfs_io_methods = {
+ 2, /* iVersion */
+ rbuVfsClose, /* xClose */
+ rbuVfsRead, /* xRead */
+ rbuVfsWrite, /* xWrite */
+ rbuVfsTruncate, /* xTruncate */
+ rbuVfsSync, /* xSync */
+ rbuVfsFileSize, /* xFileSize */
+ rbuVfsLock, /* xLock */
+ rbuVfsUnlock, /* xUnlock */
+ rbuVfsCheckReservedLock, /* xCheckReservedLock */
+ rbuVfsFileControl, /* xFileControl */
+ rbuVfsSectorSize, /* xSectorSize */
+ rbuVfsDeviceCharacteristics, /* xDeviceCharacteristics */
+ rbuVfsShmMap, /* xShmMap */
+ rbuVfsShmLock, /* xShmLock */
+ rbuVfsShmBarrier, /* xShmBarrier */
+ rbuVfsShmUnmap, /* xShmUnmap */
+ 0, 0 /* xFetch, xUnfetch */
+ };
+ rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs;
+ sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs;
+ rbu_file *pFd = (rbu_file *)pFile;
+ int rc = SQLITE_OK;
+ const char *zOpen = zName;
+ int oflags = flags;
+
+ memset(pFd, 0, sizeof(rbu_file));
+ pFd->pReal = (sqlite3_file*)&pFd[1];
+ pFd->pRbuVfs = pRbuVfs;
+ pFd->openFlags = flags;
+ if( zName ){
+ if( flags & SQLITE_OPEN_MAIN_DB ){
+ /* A main database has just been opened. The following block sets
+ ** (pFd->zWal) to point to a buffer owned by SQLite that contains
+ ** the name of the *-wal file this db connection will use. SQLite
+ ** happens to pass a pointer to this buffer when using xAccess()
+ ** or xOpen() to operate on the *-wal file. */
+ pFd->zWal = sqlite3_filename_wal(zName);
+ }
+ else if( flags & SQLITE_OPEN_WAL ){
+ rbu_file *pDb = rbuFindMaindb(pRbuVfs, zName, 0);
+ if( pDb ){
+ if( pDb->pRbu && pDb->pRbu->eStage==RBU_STAGE_OAL ){
+ /* This call is to open a *-wal file. Intead, open the *-oal. This
+ ** code ensures that the string passed to xOpen() is terminated by a
+ ** pair of '\0' bytes in case the VFS attempts to extract a URI
+ ** parameter from it. */
+ const char *zBase = zName;
+ size_t nCopy;
+ char *zCopy;
+ if( rbuIsVacuum(pDb->pRbu) ){
+ zBase = sqlite3_db_filename(pDb->pRbu->dbRbu, "main");
+ zBase = sqlite3_filename_wal(zBase);
+ }
+ nCopy = strlen(zBase);
+ zCopy = sqlite3_malloc64(nCopy+2);
+ if( zCopy ){
+ memcpy(zCopy, zBase, nCopy);
+ zCopy[nCopy-3] = 'o';
+ zCopy[nCopy] = '\0';
+ zCopy[nCopy+1] = '\0';
+ zOpen = (const char*)(pFd->zDel = zCopy);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ pFd->pRbu = pDb->pRbu;
+ }
+ pDb->pWalFd = pFd;
+ }
+ }
+ }else{
+ pFd->pRbu = pRbuVfs->pRbu;
+ }
+
+ if( oflags & SQLITE_OPEN_MAIN_DB
+ && sqlite3_uri_boolean(zName, "rbu_memory", 0)
+ ){
+ assert( oflags & SQLITE_OPEN_MAIN_DB );
+ oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE |
+ SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE;
+ zOpen = 0;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, oflags, pOutFlags);
+ }
+ if( pFd->pReal->pMethods ){
+ /* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods
+ ** pointer and, if the file is a main database file, link it into the
+ ** mutex protected linked list of all such files. */
+ pFile->pMethods = &rbuvfs_io_methods;
+ if( flags & SQLITE_OPEN_MAIN_DB ){
+ rbuMainlistAdd(pFd);
+ }
+ }else{
+ sqlite3_free(pFd->zDel);
+ }
+
+ return rc;
+}
+
+/*
+** Delete the file located at zPath.
+*/
+static int rbuVfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xDelete(pRealVfs, zPath, dirSync);
+}
+
+/*
+** Test for access permissions. Return true if the requested permission
+** is available, or false otherwise.
+*/
+static int rbuVfsAccess(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int flags,
+ int *pResOut
+){
+ rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs;
+ sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs;
+ int rc;
+
+ rc = pRealVfs->xAccess(pRealVfs, zPath, flags, pResOut);
+
+ /* If this call is to check if a *-wal file associated with an RBU target
+ ** database connection exists, and the RBU update is in RBU_STAGE_OAL,
+ ** the following special handling is activated:
+ **
+ ** a) if the *-wal file does exist, return SQLITE_CANTOPEN. This
+ ** ensures that the RBU extension never tries to update a database
+ ** in wal mode, even if the first page of the database file has
+ ** been damaged.
+ **
+ ** b) if the *-wal file does not exist, claim that it does anyway,
+ ** causing SQLite to call xOpen() to open it. This call will also
+ ** be intercepted (see the rbuVfsOpen() function) and the *-oal
+ ** file opened instead.
+ */
+ if( rc==SQLITE_OK && flags==SQLITE_ACCESS_EXISTS ){
+ rbu_file *pDb = rbuFindMaindb(pRbuVfs, zPath, 1);
+ if( pDb && pDb->pRbu->eStage==RBU_STAGE_OAL ){
+ assert( pDb->pRbu );
+ if( *pResOut ){
+ rc = SQLITE_CANTOPEN;
+ }else{
+ sqlite3_int64 sz = 0;
+ rc = rbuVfsFileSize(&pDb->base, &sz);
+ *pResOut = (sz>0);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Populate buffer zOut with the full canonical pathname corresponding
+** to the pathname in zPath. zOut is guaranteed to point to a buffer
+** of at least (DEVSYM_MAX_PATHNAME+1) bytes.
+*/
+static int rbuVfsFullPathname(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int nOut,
+ char *zOut
+){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xFullPathname(pRealVfs, zPath, nOut, zOut);
+}
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+/*
+** Open the dynamic library located at zPath and return a handle.
+*/
+static void *rbuVfsDlOpen(sqlite3_vfs *pVfs, const char *zPath){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xDlOpen(pRealVfs, zPath);
+}
+
+/*
+** Populate the buffer zErrMsg (size nByte bytes) with a human readable
+** utf-8 string describing the most recent error encountered associated
+** with dynamic libraries.
+*/
+static void rbuVfsDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ pRealVfs->xDlError(pRealVfs, nByte, zErrMsg);
+}
+
+/*
+** Return a pointer to the symbol zSymbol in the dynamic library pHandle.
+*/
+static void (*rbuVfsDlSym(
+ sqlite3_vfs *pVfs,
+ void *pArg,
+ const char *zSym
+))(void){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xDlSym(pRealVfs, pArg, zSym);
+}
+
+/*
+** Close the dynamic library handle pHandle.
+*/
+static void rbuVfsDlClose(sqlite3_vfs *pVfs, void *pHandle){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ pRealVfs->xDlClose(pRealVfs, pHandle);
+}
+#endif /* SQLITE_OMIT_LOAD_EXTENSION */
+
+/*
+** Populate the buffer pointed to by zBufOut with nByte bytes of
+** random data.
+*/
+static int rbuVfsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xRandomness(pRealVfs, nByte, zBufOut);
+}
+
+/*
+** Sleep for nMicro microseconds. Return the number of microseconds
+** actually slept.
+*/
+static int rbuVfsSleep(sqlite3_vfs *pVfs, int nMicro){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xSleep(pRealVfs, nMicro);
+}
+
+/*
+** Return the current time as a Julian Day number in *pTimeOut.
+*/
+static int rbuVfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){
+ sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
+ return pRealVfs->xCurrentTime(pRealVfs, pTimeOut);
+}
+
+/*
+** No-op.
+*/
+static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){
+ return 0;
+}
+
+/*
+** Deregister and destroy an RBU vfs created by an earlier call to
+** sqlite3rbu_create_vfs().
+*/
+SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName){
+ sqlite3_vfs *pVfs = sqlite3_vfs_find(zName);
+ if( pVfs && pVfs->xOpen==rbuVfsOpen ){
+ sqlite3_mutex_free(((rbu_vfs*)pVfs)->mutex);
+ sqlite3_vfs_unregister(pVfs);
+ sqlite3_free(pVfs);
+ }
+}
+
+/*
+** Create an RBU VFS named zName that accesses the underlying file-system
+** via existing VFS zParent. The new object is registered as a non-default
+** VFS with SQLite before returning.
+*/
+SQLITE_API int sqlite3rbu_create_vfs(const char *zName, const char *zParent){
+
+ /* Template for VFS */
+ static sqlite3_vfs vfs_template = {
+ 1, /* iVersion */
+ 0, /* szOsFile */
+ 0, /* mxPathname */
+ 0, /* pNext */
+ 0, /* zName */
+ 0, /* pAppData */
+ rbuVfsOpen, /* xOpen */
+ rbuVfsDelete, /* xDelete */
+ rbuVfsAccess, /* xAccess */
+ rbuVfsFullPathname, /* xFullPathname */
+
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+ rbuVfsDlOpen, /* xDlOpen */
+ rbuVfsDlError, /* xDlError */
+ rbuVfsDlSym, /* xDlSym */
+ rbuVfsDlClose, /* xDlClose */
+#else
+ 0, 0, 0, 0,
+#endif
+
+ rbuVfsRandomness, /* xRandomness */
+ rbuVfsSleep, /* xSleep */
+ rbuVfsCurrentTime, /* xCurrentTime */
+ rbuVfsGetLastError, /* xGetLastError */
+ 0, /* xCurrentTimeInt64 (version 2) */
+ 0, 0, 0 /* Unimplemented version 3 methods */
+ };
+
+ rbu_vfs *pNew = 0; /* Newly allocated VFS */
+ int rc = SQLITE_OK;
+ size_t nName;
+ size_t nByte;
+
+ nName = strlen(zName);
+ nByte = sizeof(rbu_vfs) + nName + 1;
+ pNew = (rbu_vfs*)sqlite3_malloc64(nByte);
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_vfs *pParent; /* Parent VFS */
+ memset(pNew, 0, nByte);
+ pParent = sqlite3_vfs_find(zParent);
+ if( pParent==0 ){
+ rc = SQLITE_NOTFOUND;
+ }else{
+ char *zSpace;
+ memcpy(&pNew->base, &vfs_template, sizeof(sqlite3_vfs));
+ pNew->base.mxPathname = pParent->mxPathname;
+ pNew->base.szOsFile = sizeof(rbu_file) + pParent->szOsFile;
+ pNew->pRealVfs = pParent;
+ pNew->base.zName = (const char*)(zSpace = (char*)&pNew[1]);
+ memcpy(zSpace, zName, nName);
+
+ /* Allocate the mutex and register the new VFS (not as the default) */
+ pNew->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_RECURSIVE);
+ if( pNew->mutex==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_vfs_register(&pNew->base, 0);
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_mutex_free(pNew->mutex);
+ sqlite3_free(pNew);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Configure the aggregate temp file size limit for this RBU handle.
+*/
+SQLITE_API sqlite3_int64 sqlite3rbu_temp_size_limit(sqlite3rbu *pRbu, sqlite3_int64 n){
+ if( n>=0 ){
+ pRbu->szTempLimit = n;
+ }
+ return pRbu->szTempLimit;
+}
+
+SQLITE_API sqlite3_int64 sqlite3rbu_temp_size(sqlite3rbu *pRbu){
+ return pRbu->szTemp;
+}
+
+
+/**************************************************************************/
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU) */
+
+/************** End of sqlite3rbu.c ******************************************/
+/************** Begin file dbstat.c ******************************************/
+/*
+** 2010 July 12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains an implementation of the "dbstat" virtual table.
+**
+** The dbstat virtual table is used to extract low-level storage
+** information from an SQLite database in order to implement the
+** "sqlite3_analyzer" utility. See the ../tool/spaceanal.tcl script
+** for an example implementation.
+**
+** Additional information is available on the "dbstat.html" page of the
+** official SQLite documentation.
+*/
+
+/* #include "sqliteInt.h" ** Requires access to internal data structures ** */
+#if (defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)) \
+ && !defined(SQLITE_OMIT_VIRTUALTABLE)
+
+/*
+** Page paths:
+**
+** The value of the 'path' column describes the path taken from the
+** root-node of the b-tree structure to each page. The value of the
+** root-node path is '/'.
+**
+** The value of the path for the left-most child page of the root of
+** a b-tree is '/000/'. (Btrees store content ordered from left to right
+** so the pages to the left have smaller keys than the pages to the right.)
+** The next to left-most child of the root page is
+** '/001', and so on, each sibling page identified by a 3-digit hex
+** value. The children of the 451st left-most sibling have paths such
+** as '/1c2/000/, '/1c2/001/' etc.
+**
+** Overflow pages are specified by appending a '+' character and a
+** six-digit hexadecimal value to the path to the cell they are linked
+** from. For example, the three overflow pages in a chain linked from
+** the left-most cell of the 450th child of the root page are identified
+** by the paths:
+**
+** '/1c2/000+000000' // First page in overflow chain
+** '/1c2/000+000001' // Second page in overflow chain
+** '/1c2/000+000002' // Third page in overflow chain
+**
+** If the paths are sorted using the BINARY collation sequence, then
+** the overflow pages associated with a cell will appear earlier in the
+** sort-order than its child page:
+**
+** '/1c2/000/' // Left-most child of 451st child of root
+*/
+static const char zDbstatSchema[] =
+ "CREATE TABLE x("
+ " name TEXT," /* 0 Name of table or index */
+ " path TEXT," /* 1 Path to page from root (NULL for agg) */
+ " pageno INTEGER," /* 2 Page number (page count for aggregates) */
+ " pagetype TEXT," /* 3 'internal', 'leaf', 'overflow', or NULL */
+ " ncell INTEGER," /* 4 Cells on page (0 for overflow) */
+ " payload INTEGER," /* 5 Bytes of payload on this page */
+ " unused INTEGER," /* 6 Bytes of unused space on this page */
+ " mx_payload INTEGER," /* 7 Largest payload size of all cells */
+ " pgoffset INTEGER," /* 8 Offset of page in file (NULL for agg) */
+ " pgsize INTEGER," /* 9 Size of the page (sum for aggregate) */
+ " schema TEXT HIDDEN," /* 10 Database schema being analyzed */
+ " aggregate BOOLEAN HIDDEN" /* 11 aggregate info for each table */
+ ")"
+;
+
+/* Forward reference to data structured used in this module */
+typedef struct StatTable StatTable;
+typedef struct StatCursor StatCursor;
+typedef struct StatPage StatPage;
+typedef struct StatCell StatCell;
+
+/* Size information for a single cell within a btree page */
+struct StatCell {
+ int nLocal; /* Bytes of local payload */
+ u32 iChildPg; /* Child node (or 0 if this is a leaf) */
+ int nOvfl; /* Entries in aOvfl[] */
+ u32 *aOvfl; /* Array of overflow page numbers */
+ int nLastOvfl; /* Bytes of payload on final overflow page */
+ int iOvfl; /* Iterates through aOvfl[] */
+};
+
+/* Size information for a single btree page */
+struct StatPage {
+ u32 iPgno; /* Page number */
+ DbPage *pPg; /* Page content */
+ int iCell; /* Current cell */
+
+ char *zPath; /* Path to this page */
+
+ /* Variables populated by statDecodePage(): */
+ u8 flags; /* Copy of flags byte */
+ int nCell; /* Number of cells on page */
+ int nUnused; /* Number of unused bytes on page */
+ StatCell *aCell; /* Array of parsed cells */
+ u32 iRightChildPg; /* Right-child page number (or 0) */
+ int nMxPayload; /* Largest payload of any cell on the page */
+};
+
+/* The cursor for scanning the dbstat virtual table */
+struct StatCursor {
+ sqlite3_vtab_cursor base; /* base class. MUST BE FIRST! */
+ sqlite3_stmt *pStmt; /* Iterates through set of root pages */
+ u8 isEof; /* After pStmt has returned SQLITE_DONE */
+ u8 isAgg; /* Aggregate results for each table */
+ int iDb; /* Schema used for this query */
+
+ StatPage aPage[32]; /* Pages in path to current page */
+ int iPage; /* Current entry in aPage[] */
+
+ /* Values to return. */
+ u32 iPageno; /* Value of 'pageno' column */
+ char *zName; /* Value of 'name' column */
+ char *zPath; /* Value of 'path' column */
+ char *zPagetype; /* Value of 'pagetype' column */
+ int nPage; /* Number of pages in current btree */
+ int nCell; /* Value of 'ncell' column */
+ int nMxPayload; /* Value of 'mx_payload' column */
+ i64 nUnused; /* Value of 'unused' column */
+ i64 nPayload; /* Value of 'payload' column */
+ i64 iOffset; /* Value of 'pgOffset' column */
+ i64 szPage; /* Value of 'pgSize' column */
+};
+
+/* An instance of the DBSTAT virtual table */
+struct StatTable {
+ sqlite3_vtab base; /* base class. MUST BE FIRST! */
+ sqlite3 *db; /* Database connection that owns this vtab */
+ int iDb; /* Index of database to analyze */
+};
+
+#ifndef get2byte
+# define get2byte(x) ((x)[0]<<8 | (x)[1])
+#endif
+
+/*
+** Connect to or create a new DBSTAT virtual table.
+*/
+static int statConnect(
+ sqlite3 *db,
+ void *pAux __maybe_unused,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ StatTable *pTab = 0;
+ int rc = SQLITE_OK;
+ int iDb;
+
+ if( argc>=4 ){
+ Token nm;
+ sqlite3TokenInit(&nm, (char*)argv[3]);
+ iDb = sqlite3FindDb(db, &nm);
+ if( iDb<0 ){
+ *pzErr = sqlite3_mprintf("no such database: %s", argv[3]);
+ return SQLITE_ERROR;
+ }
+ }else{
+ iDb = 0;
+ }
+ sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY);
+ rc = sqlite3_declare_vtab(db, zDbstatSchema);
+ if( rc==SQLITE_OK ){
+ pTab = (StatTable *)sqlite3_malloc64(sizeof(StatTable));
+ if( pTab==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+
+ assert( rc==SQLITE_OK || pTab==0 );
+ if( rc==SQLITE_OK ){
+ memset(pTab, 0, sizeof(StatTable));
+ pTab->db = db;
+ pTab->iDb = iDb;
+ }
+
+ *ppVtab = (sqlite3_vtab*)pTab;
+ return rc;
+}
+
+/*
+** Disconnect from or destroy the DBSTAT virtual table.
+*/
+static int statDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** Compute the best query strategy and return the result in idxNum.
+**
+** idxNum-Bit Meaning
+** ---------- ----------------------------------------------
+** 0x01 There is a schema=? term in the WHERE clause
+** 0x02 There is a name=? term in the WHERE clause
+** 0x04 There is an aggregate=? term in the WHERE clause
+** 0x08 Output should be ordered by name and path
+*/
+static int statBestIndex(sqlite3_vtab *tab __maybe_unused, sqlite3_index_info *pIdxInfo){
+ int i;
+ int iSchema = -1;
+ int iName = -1;
+ int iAgg = -1;
+
+ /* Look for a valid schema=? constraint. If found, change the idxNum to
+ ** 1 and request the value of that constraint be sent to xFilter. And
+ ** lower the cost estimate to encourage the constrained version to be
+ ** used.
+ */
+ for(i=0; i<pIdxInfo->nConstraint; i++){
+ if( pIdxInfo->aConstraint[i].op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
+ if( pIdxInfo->aConstraint[i].usable==0 ){
+ /* Force DBSTAT table should always be the right-most table in a join */
+ return SQLITE_CONSTRAINT;
+ }
+ switch( pIdxInfo->aConstraint[i].iColumn ){
+ case 0: { /* name */
+ iName = i;
+ break;
+ }
+ case 10: { /* schema */
+ iSchema = i;
+ break;
+ }
+ case 11: { /* aggregate */
+ iAgg = i;
+ break;
+ }
+ }
+ }
+ i = 0;
+ if( iSchema>=0 ){
+ pIdxInfo->aConstraintUsage[iSchema].argvIndex = ++i;
+ pIdxInfo->aConstraintUsage[iSchema].omit = 1;
+ pIdxInfo->idxNum |= 0x01;
+ }
+ if( iName>=0 ){
+ pIdxInfo->aConstraintUsage[iName].argvIndex = ++i;
+ pIdxInfo->idxNum |= 0x02;
+ }
+ if( iAgg>=0 ){
+ pIdxInfo->aConstraintUsage[iAgg].argvIndex = ++i;
+ pIdxInfo->idxNum |= 0x04;
+ }
+ pIdxInfo->estimatedCost = 1.0;
+
+ /* Records are always returned in ascending order of (name, path).
+ ** If this will satisfy the client, set the orderByConsumed flag so that
+ ** SQLite does not do an external sort.
+ */
+ if( ( pIdxInfo->nOrderBy==1
+ && pIdxInfo->aOrderBy[0].iColumn==0
+ && pIdxInfo->aOrderBy[0].desc==0
+ ) ||
+ ( pIdxInfo->nOrderBy==2
+ && pIdxInfo->aOrderBy[0].iColumn==0
+ && pIdxInfo->aOrderBy[0].desc==0
+ && pIdxInfo->aOrderBy[1].iColumn==1
+ && pIdxInfo->aOrderBy[1].desc==0
+ )
+ ){
+ pIdxInfo->orderByConsumed = 1;
+ pIdxInfo->idxNum |= 0x08;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Open a new DBSTAT cursor.
+*/
+static int statOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
+ StatTable *pTab = (StatTable *)pVTab;
+ StatCursor *pCsr;
+
+ pCsr = (StatCursor *)sqlite3_malloc64(sizeof(StatCursor));
+ if( pCsr==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ memset(pCsr, 0, sizeof(StatCursor));
+ pCsr->base.pVtab = pVTab;
+ pCsr->iDb = pTab->iDb;
+ }
+
+ *ppCursor = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+static void statClearCells(StatPage *p){
+ int i;
+ if( p->aCell ){
+ for(i=0; i<p->nCell; i++){
+ sqlite3_free(p->aCell[i].aOvfl);
+ }
+ sqlite3_free(p->aCell);
+ }
+ p->nCell = 0;
+ p->aCell = 0;
+}
+
+static void statClearPage(StatPage *p){
+ statClearCells(p);
+ sqlite3PagerUnref(p->pPg);
+ sqlite3_free(p->zPath);
+ memset(p, 0, sizeof(StatPage));
+}
+
+static void statResetCsr(StatCursor *pCsr){
+ int i;
+ sqlite3_reset(pCsr->pStmt);
+ for(i=0; i<ArraySize(pCsr->aPage); i++){
+ statClearPage(&pCsr->aPage[i]);
+ }
+ pCsr->iPage = 0;
+ sqlite3_free(pCsr->zPath);
+ pCsr->zPath = 0;
+ pCsr->isEof = 0;
+}
+
+/* Resize the space-used counters inside of the cursor */
+static void statResetCounts(StatCursor *pCsr){
+ pCsr->nCell = 0;
+ pCsr->nMxPayload = 0;
+ pCsr->nUnused = 0;
+ pCsr->nPayload = 0;
+ pCsr->szPage = 0;
+ pCsr->nPage = 0;
+}
+
+/*
+** Close a DBSTAT cursor.
+*/
+static int statClose(sqlite3_vtab_cursor *pCursor){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ statResetCsr(pCsr);
+ sqlite3_finalize(pCsr->pStmt);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** For a single cell on a btree page, compute the number of bytes of
+** content (payload) stored on that page. That is to say, compute the
+** number of bytes of content not found on overflow pages.
+*/
+static int getLocalPayload(
+ int nUsable, /* Usable bytes per page */
+ u8 flags, /* Page flags */
+ int nTotal /* Total record (payload) size */
+){
+ int nLocal;
+ int nMinLocal;
+ int nMaxLocal;
+
+ if( flags==0x0D ){ /* Table leaf node */
+ nMinLocal = (nUsable - 12) * 32 / 255 - 23;
+ nMaxLocal = nUsable - 35;
+ }else{ /* Index interior and leaf nodes */
+ nMinLocal = (nUsable - 12) * 32 / 255 - 23;
+ nMaxLocal = (nUsable - 12) * 64 / 255 - 23;
+ }
+
+ nLocal = nMinLocal + (nTotal - nMinLocal) % (nUsable - 4);
+ if( nLocal>nMaxLocal ) nLocal = nMinLocal;
+ return nLocal;
+}
+
+/* Populate the StatPage object with information about the all
+** cells found on the page currently under analysis.
+*/
+static int statDecodePage(Btree *pBt, StatPage *p){
+ int nUnused;
+ int iOff;
+ int nHdr;
+ int isLeaf;
+ int szPage;
+
+ u8 *aData = sqlite3PagerGetData(p->pPg);
+ u8 *aHdr = &aData[p->iPgno==1 ? 100 : 0];
+
+ p->flags = aHdr[0];
+ if( p->flags==0x0A || p->flags==0x0D ){
+ isLeaf = 1;
+ nHdr = 8;
+ }else if( p->flags==0x05 || p->flags==0x02 ){
+ isLeaf = 0;
+ nHdr = 12;
+ }else{
+ goto statPageIsCorrupt;
+ }
+ if( p->iPgno==1 ) nHdr += 100;
+ p->nCell = get2byte(&aHdr[3]);
+ p->nMxPayload = 0;
+ szPage = sqlite3BtreeGetPageSize(pBt);
+
+ nUnused = get2byte(&aHdr[5]) - nHdr - 2*p->nCell;
+ nUnused += (int)aHdr[7];
+ iOff = get2byte(&aHdr[1]);
+ while( iOff ){
+ int iNext;
+ if( iOff>=szPage ) goto statPageIsCorrupt;
+ nUnused += get2byte(&aData[iOff+2]);
+ iNext = get2byte(&aData[iOff]);
+ if( iNext<iOff+4 && iNext>0 ) goto statPageIsCorrupt;
+ iOff = iNext;
+ }
+ p->nUnused = nUnused;
+ p->iRightChildPg = isLeaf ? 0 : sqlite3Get4byte(&aHdr[8]);
+
+ if( p->nCell ){
+ int i; /* Used to iterate through cells */
+ int nUsable; /* Usable bytes per page */
+
+ sqlite3BtreeEnter(pBt);
+ nUsable = szPage - sqlite3BtreeGetReserveNoMutex(pBt);
+ sqlite3BtreeLeave(pBt);
+ p->aCell = sqlite3_malloc64((p->nCell+1) * sizeof(StatCell));
+ if( p->aCell==0 ) return SQLITE_NOMEM_BKPT;
+ memset(p->aCell, 0, (p->nCell+1) * sizeof(StatCell));
+
+ for(i=0; i<p->nCell; i++){
+ StatCell *pCell = &p->aCell[i];
+
+ iOff = get2byte(&aData[nHdr+i*2]);
+ if( iOff<nHdr || iOff>=szPage ) goto statPageIsCorrupt;
+ if( !isLeaf ){
+ pCell->iChildPg = sqlite3Get4byte(&aData[iOff]);
+ iOff += 4;
+ }
+ if( p->flags==0x05 ){
+ /* A table interior node. nPayload==0. */
+ }else{
+ u32 nPayload; /* Bytes of payload total (local+overflow) */
+ int nLocal; /* Bytes of payload stored locally */
+ iOff += getVarint32(&aData[iOff], nPayload);
+ if( p->flags==0x0D ){
+ u64 dummy;
+ iOff += sqlite3GetVarint(&aData[iOff], &dummy);
+ }
+ if( nPayload>(u32)p->nMxPayload ) p->nMxPayload = nPayload;
+ nLocal = getLocalPayload(nUsable, p->flags, nPayload);
+ if( nLocal<0 ) goto statPageIsCorrupt;
+ pCell->nLocal = nLocal;
+ assert( nPayload>=(u32)nLocal );
+ assert( nLocal<=(nUsable-35) );
+ if( nPayload>(u32)nLocal ){
+ int j;
+ int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4);
+ if( iOff+nLocal>nUsable || nPayload>0x7fffffff ){
+ goto statPageIsCorrupt;
+ }
+ pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4);
+ pCell->nOvfl = nOvfl;
+ pCell->aOvfl = sqlite3_malloc64(sizeof(u32)*nOvfl);
+ if( pCell->aOvfl==0 ) return SQLITE_NOMEM_BKPT;
+ pCell->aOvfl[0] = sqlite3Get4byte(&aData[iOff+nLocal]);
+ for(j=1; j<nOvfl; j++){
+ int rc;
+ u32 iPrev = pCell->aOvfl[j-1];
+ DbPage *pPg = 0;
+ rc = sqlite3PagerGet(sqlite3BtreePager(pBt), iPrev, &pPg, 0);
+ if( rc!=SQLITE_OK ){
+ assert( pPg==0 );
+ return rc;
+ }
+ pCell->aOvfl[j] = sqlite3Get4byte(sqlite3PagerGetData(pPg));
+ sqlite3PagerUnref(pPg);
+ }
+ }
+ }
+ }
+ }
+
+ return SQLITE_OK;
+
+statPageIsCorrupt:
+ p->flags = 0;
+ statClearCells(p);
+ return SQLITE_OK;
+}
+
+/*
+** Populate the pCsr->iOffset and pCsr->szPage member variables. Based on
+** the current value of pCsr->iPageno.
+*/
+static void statSizeAndOffset(StatCursor *pCsr){
+ StatTable *pTab = (StatTable *)((sqlite3_vtab_cursor *)pCsr)->pVtab;
+ Btree *pBt = pTab->db->aDb[pTab->iDb].pBt;
+ Pager *pPager = sqlite3BtreePager(pBt);
+ sqlite3_file *fd;
+ sqlite3_int64 x[2];
+
+ /* If connected to a ZIPVFS backend, find the page size and
+ ** offset from ZIPVFS.
+ */
+ fd = sqlite3PagerFile(pPager);
+ x[0] = pCsr->iPageno;
+ if( sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){
+ pCsr->iOffset = x[0];
+ pCsr->szPage += x[1];
+ }else{
+ /* Not ZIPVFS: The default page size and offset */
+ pCsr->szPage += sqlite3BtreeGetPageSize(pBt);
+ pCsr->iOffset = (i64)pCsr->szPage * (pCsr->iPageno - 1);
+ }
+}
+
+/*
+** Move a DBSTAT cursor to the next entry. Normally, the next
+** entry will be the next page, but in aggregated mode (pCsr->isAgg!=0),
+** the next entry is the next btree.
+*/
+static int statNext(sqlite3_vtab_cursor *pCursor){
+ int rc;
+ int nPayload;
+ char *z;
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ StatTable *pTab = (StatTable *)pCursor->pVtab;
+ Btree *pBt = pTab->db->aDb[pCsr->iDb].pBt;
+ Pager *pPager = sqlite3BtreePager(pBt);
+
+ sqlite3_free(pCsr->zPath);
+ pCsr->zPath = 0;
+
+statNextRestart:
+ if( pCsr->aPage[0].pPg==0 ){
+ /* Start measuring space on the next btree */
+ statResetCounts(pCsr);
+ rc = sqlite3_step(pCsr->pStmt);
+ if( rc==SQLITE_ROW ){
+ int nPage;
+ u32 iRoot = (u32)sqlite3_column_int64(pCsr->pStmt, 1);
+ sqlite3PagerPagecount(pPager, &nPage);
+ if( nPage==0 ){
+ pCsr->isEof = 1;
+ return sqlite3_reset(pCsr->pStmt);
+ }
+ rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg, 0);
+ pCsr->aPage[0].iPgno = iRoot;
+ pCsr->aPage[0].iCell = 0;
+ if( !pCsr->isAgg ){
+ pCsr->aPage[0].zPath = z = sqlite3_mprintf("/");
+ if( z==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+ pCsr->iPage = 0;
+ pCsr->nPage = 1;
+ }else{
+ pCsr->isEof = 1;
+ return sqlite3_reset(pCsr->pStmt);
+ }
+ }else{
+ /* Continue analyzing the btree previously started */
+ StatPage *p = &pCsr->aPage[pCsr->iPage];
+ if( !pCsr->isAgg ) statResetCounts(pCsr);
+ while( p->iCell<p->nCell ){
+ StatCell *pCell = &p->aCell[p->iCell];
+ while( pCell->iOvfl<pCell->nOvfl ){
+ int nUsable, iOvfl;
+ sqlite3BtreeEnter(pBt);
+ nUsable = sqlite3BtreeGetPageSize(pBt) -
+ sqlite3BtreeGetReserveNoMutex(pBt);
+ sqlite3BtreeLeave(pBt);
+ pCsr->nPage++;
+ statSizeAndOffset(pCsr);
+ if( pCell->iOvfl<pCell->nOvfl-1 ){
+ pCsr->nPayload += nUsable - 4;
+ }else{
+ pCsr->nPayload += pCell->nLastOvfl;
+ pCsr->nUnused += nUsable - 4 - pCell->nLastOvfl;
+ }
+ iOvfl = pCell->iOvfl;
+ pCell->iOvfl++;
+ if( !pCsr->isAgg ){
+ pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
+ pCsr->iPageno = pCell->aOvfl[iOvfl];
+ pCsr->zPagetype = "overflow";
+ pCsr->zPath = z = sqlite3_mprintf(
+ "%s%.3x+%.6x", p->zPath, p->iCell, iOvfl
+ );
+ return z==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
+ }
+ }
+ if( p->iRightChildPg ) break;
+ p->iCell++;
+ }
+
+ if( !p->iRightChildPg || p->iCell>p->nCell ){
+ statClearPage(p);
+ if( pCsr->iPage>0 ){
+ pCsr->iPage--;
+ }else if( pCsr->isAgg ){
+ /* label-statNext-done: When computing aggregate space usage over
+ ** an entire btree, this is the exit point from this function */
+ return SQLITE_OK;
+ }
+ goto statNextRestart; /* Tail recursion */
+ }
+ pCsr->iPage++;
+ if( pCsr->iPage>=ArraySize(pCsr->aPage) ){
+ statResetCsr(pCsr);
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( p==&pCsr->aPage[pCsr->iPage-1] );
+
+ if( p->iCell==p->nCell ){
+ p[1].iPgno = p->iRightChildPg;
+ }else{
+ p[1].iPgno = p->aCell[p->iCell].iChildPg;
+ }
+ rc = sqlite3PagerGet(pPager, p[1].iPgno, &p[1].pPg, 0);
+ pCsr->nPage++;
+ p[1].iCell = 0;
+ if( !pCsr->isAgg ){
+ p[1].zPath = z = sqlite3_mprintf("%s%.3x/", p->zPath, p->iCell);
+ if( z==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+ p->iCell++;
+ }
+
+
+ /* Populate the StatCursor fields with the values to be returned
+ ** by the xColumn() and xRowid() methods.
+ */
+ if( rc==SQLITE_OK ){
+ int i;
+ StatPage *p = &pCsr->aPage[pCsr->iPage];
+ pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
+ pCsr->iPageno = p->iPgno;
+
+ rc = statDecodePage(pBt, p);
+ if( rc==SQLITE_OK ){
+ statSizeAndOffset(pCsr);
+
+ switch( p->flags ){
+ case 0x05: /* table internal */
+ case 0x02: /* index internal */
+ pCsr->zPagetype = "internal";
+ break;
+ case 0x0D: /* table leaf */
+ case 0x0A: /* index leaf */
+ pCsr->zPagetype = "leaf";
+ break;
+ default:
+ pCsr->zPagetype = "corrupted";
+ break;
+ }
+ pCsr->nCell += p->nCell;
+ pCsr->nUnused += p->nUnused;
+ if( p->nMxPayload>pCsr->nMxPayload ) pCsr->nMxPayload = p->nMxPayload;
+ if( !pCsr->isAgg ){
+ pCsr->zPath = z = sqlite3_mprintf("%s", p->zPath);
+ if( z==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+ nPayload = 0;
+ for(i=0; i<p->nCell; i++){
+ nPayload += p->aCell[i].nLocal;
+ }
+ pCsr->nPayload += nPayload;
+
+ /* If computing aggregate space usage by btree, continue with the
+ ** next page. The loop will exit via the return at label-statNext-done
+ */
+ if( pCsr->isAgg ) goto statNextRestart;
+ }
+ }
+
+ return rc;
+}
+
+static int statEof(sqlite3_vtab_cursor *pCursor){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ return pCsr->isEof;
+}
+
+/* Initialize a cursor according to the query plan idxNum using the
+** arguments in argv[0]. See statBestIndex() for a description of the
+** meaning of the bits in idxNum.
+*/
+static int statFilter(
+ sqlite3_vtab_cursor *pCursor,
+ int idxNum, const char *idxStr __maybe_unused,
+ int argc __maybe_unused, sqlite3_value **argv
+){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ StatTable *pTab = (StatTable*)(pCursor->pVtab);
+ sqlite3_str *pSql; /* Query of btrees to analyze */
+ char *zSql; /* String value of pSql */
+ int iArg = 0; /* Count of argv[] parameters used so far */
+ int rc = SQLITE_OK; /* Result of this operation */
+ const char *zName = 0; /* Only provide analysis of this table */
+
+ statResetCsr(pCsr);
+ sqlite3_finalize(pCsr->pStmt);
+ pCsr->pStmt = 0;
+ if( idxNum & 0x01 ){
+ /* schema=? constraint is present. Get its value */
+ const char *zDbase = (const char*)sqlite3_value_text(argv[iArg++]);
+ pCsr->iDb = sqlite3FindDbName(pTab->db, zDbase);
+ if( pCsr->iDb<0 ){
+ pCsr->iDb = 0;
+ pCsr->isEof = 1;
+ return SQLITE_OK;
+ }
+ }else{
+ pCsr->iDb = pTab->iDb;
+ }
+ if( idxNum & 0x02 ){
+ /* name=? constraint is present */
+ zName = (const char*)sqlite3_value_text(argv[iArg++]);
+ }
+ if( idxNum & 0x04 ){
+ /* aggregate=? constraint is present */
+ pCsr->isAgg = sqlite3_value_double(argv[iArg++])!=0.0;
+ }else{
+ pCsr->isAgg = 0;
+ }
+ pSql = sqlite3_str_new(pTab->db);
+ sqlite3_str_appendf(pSql,
+ "SELECT * FROM ("
+ "SELECT 'sqlite_schema' AS name,1 AS rootpage,'table' AS type"
+ " UNION ALL "
+ "SELECT name,rootpage,type"
+ " FROM \"%w\".sqlite_schema WHERE rootpage!=0)",
+ pTab->db->aDb[pCsr->iDb].zDbSName);
+ if( zName ){
+ sqlite3_str_appendf(pSql, "WHERE name=%Q", zName);
+ }
+ if( idxNum & 0x08 ){
+ sqlite3_str_appendf(pSql, " ORDER BY name");
+ }
+ zSql = sqlite3_str_finish(pSql);
+ if( zSql==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pStmt, 0);
+ sqlite3_free(zSql);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = statNext(pCursor);
+ }
+ return rc;
+}
+
+static int statColumn(
+ sqlite3_vtab_cursor *pCursor,
+ sqlite3_context *ctx,
+ int i
+){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ switch( i ){
+ case 0: /* name */
+ sqlite3_result_text(ctx, pCsr->zName, -1, SQLITE_TRANSIENT);
+ break;
+ case 1: /* path */
+ if( !pCsr->isAgg ){
+ sqlite3_result_text(ctx, pCsr->zPath, -1, SQLITE_TRANSIENT);
+ }
+ break;
+ case 2: /* pageno */
+ if( pCsr->isAgg ){
+ sqlite3_result_int64(ctx, pCsr->nPage);
+ }else{
+ sqlite3_result_int64(ctx, pCsr->iPageno);
+ }
+ break;
+ case 3: /* pagetype */
+ if( !pCsr->isAgg ){
+ sqlite3_result_text(ctx, pCsr->zPagetype, -1, SQLITE_STATIC);
+ }
+ break;
+ case 4: /* ncell */
+ sqlite3_result_int(ctx, pCsr->nCell);
+ break;
+ case 5: /* payload */
+ sqlite3_result_int(ctx, pCsr->nPayload);
+ break;
+ case 6: /* unused */
+ sqlite3_result_int(ctx, pCsr->nUnused);
+ break;
+ case 7: /* mx_payload */
+ sqlite3_result_int(ctx, pCsr->nMxPayload);
+ break;
+ case 8: /* pgoffset */
+ if( !pCsr->isAgg ){
+ sqlite3_result_int64(ctx, pCsr->iOffset);
+ }
+ break;
+ case 9: /* pgsize */
+ sqlite3_result_int(ctx, pCsr->szPage);
+ break;
+ case 10: { /* schema */
+ sqlite3 *db = sqlite3_context_db_handle(ctx);
+ int iDb = pCsr->iDb;
+ sqlite3_result_text(ctx, db->aDb[iDb].zDbSName, -1, SQLITE_STATIC);
+ break;
+ }
+ default: { /* aggregate */
+ sqlite3_result_int(ctx, pCsr->isAgg);
+ break;
+ }
+ }
+ return SQLITE_OK;
+}
+
+static int statRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ *pRowid = pCsr->iPageno;
+ return SQLITE_OK;
+}
+
+/*
+** Invoke this routine to register the "dbstat" virtual table module
+*/
+SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){
+ static sqlite3_module dbstat_module = {
+ 0, /* iVersion */
+ statConnect, /* xCreate */
+ statConnect, /* xConnect */
+ statBestIndex, /* xBestIndex */
+ statDisconnect, /* xDisconnect */
+ statDisconnect, /* xDestroy */
+ statOpen, /* xOpen - open a cursor */
+ statClose, /* xClose - close a cursor */
+ statFilter, /* xFilter - configure scan constraints */
+ statNext, /* xNext - advance a cursor */
+ statEof, /* xEof - check for end of scan */
+ statColumn, /* xColumn - read data */
+ statRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+ };
+ return sqlite3_create_module(db, "dbstat", &dbstat_module, 0);
+}
+#elif defined(SQLITE_ENABLE_DBSTAT_VTAB)
+SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; }
+#endif /* SQLITE_ENABLE_DBSTAT_VTAB */
+
+/************** End of dbstat.c **********************************************/
+/************** Begin file dbpage.c ******************************************/
+/*
+** 2017-10-11
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains an implementation of the "sqlite_dbpage" virtual table.
+**
+** The sqlite_dbpage virtual table is used to read or write whole raw
+** pages of the database file. The pager interface is used so that
+** uncommitted changes and changes recorded in the WAL file are correctly
+** retrieved.
+**
+** Usage example:
+**
+** SELECT data FROM sqlite_dbpage('aux1') WHERE pgno=123;
+**
+** This is an eponymous virtual table so it does not need to be created before
+** use. The optional argument to the sqlite_dbpage() table name is the
+** schema for the database file that is to be read. The default schema is
+** "main".
+**
+** The data field of sqlite_dbpage table can be updated. The new
+** value must be a BLOB which is the correct page size, otherwise the
+** update fails. Rows may not be deleted or inserted.
+*/
+
+/* #include "sqliteInt.h" ** Requires access to internal data structures ** */
+#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \
+ && !defined(SQLITE_OMIT_VIRTUALTABLE)
+
+typedef struct DbpageTable DbpageTable;
+typedef struct DbpageCursor DbpageCursor;
+
+struct DbpageCursor {
+ sqlite3_vtab_cursor base; /* Base class. Must be first */
+ int pgno; /* Current page number */
+ int mxPgno; /* Last page to visit on this scan */
+ Pager *pPager; /* Pager being read/written */
+ DbPage *pPage1; /* Page 1 of the database */
+ int iDb; /* Index of database to analyze */
+ int szPage; /* Size of each page in bytes */
+};
+
+struct DbpageTable {
+ sqlite3_vtab base; /* Base class. Must be first */
+ sqlite3 *db; /* The database */
+};
+
+/* Columns */
+#define DBPAGE_COLUMN_PGNO 0
+#define DBPAGE_COLUMN_DATA 1
+#define DBPAGE_COLUMN_SCHEMA 2
+
+
+
+/*
+** Connect to or create a dbpagevfs virtual table.
+*/
+static int dbpageConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ DbpageTable *pTab = 0;
+ int rc = SQLITE_OK;
+
+ sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY);
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE x(pgno INTEGER PRIMARY KEY, data BLOB, schema HIDDEN)");
+ if( rc==SQLITE_OK ){
+ pTab = (DbpageTable *)sqlite3_malloc64(sizeof(DbpageTable));
+ if( pTab==0 ) rc = SQLITE_NOMEM_BKPT;
+ }
+
+ assert( rc==SQLITE_OK || pTab==0 );
+ if( rc==SQLITE_OK ){
+ memset(pTab, 0, sizeof(DbpageTable));
+ pTab->db = db;
+ }
+
+ *ppVtab = (sqlite3_vtab*)pTab;
+ return rc;
+}
+
+/*
+** Disconnect from or destroy a dbpagevfs virtual table.
+*/
+static int dbpageDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** idxNum:
+**
+** 0 schema=main, full table scan
+** 1 schema=main, pgno=?1
+** 2 schema=?1, full table scan
+** 3 schema=?1, pgno=?2
+*/
+static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+ int i;
+ int iPlan = 0;
+
+ /* If there is a schema= constraint, it must be honored. Report a
+ ** ridiculously large estimated cost if the schema= constraint is
+ ** unavailable
+ */
+ for(i=0; i<pIdxInfo->nConstraint; i++){
+ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[i];
+ if( p->iColumn!=DBPAGE_COLUMN_SCHEMA ) continue;
+ if( p->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
+ if( !p->usable ){
+ /* No solution. */
+ return SQLITE_CONSTRAINT;
+ }
+ iPlan = 2;
+ pIdxInfo->aConstraintUsage[i].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ break;
+ }
+
+ /* If we reach this point, it means that either there is no schema=
+ ** constraint (in which case we use the "main" schema) or else the
+ ** schema constraint was accepted. Lower the estimated cost accordingly
+ */
+ pIdxInfo->estimatedCost = 1.0e6;
+
+ /* Check for constraints against pgno */
+ for(i=0; i<pIdxInfo->nConstraint; i++){
+ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[i];
+ if( p->usable && p->iColumn<=0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){
+ pIdxInfo->estimatedRows = 1;
+ pIdxInfo->idxFlags = SQLITE_INDEX_SCAN_UNIQUE;
+ pIdxInfo->estimatedCost = 1.0;
+ pIdxInfo->aConstraintUsage[i].argvIndex = iPlan ? 2 : 1;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ iPlan |= 1;
+ break;
+ }
+ }
+ pIdxInfo->idxNum = iPlan;
+
+ if( pIdxInfo->nOrderBy>=1
+ && pIdxInfo->aOrderBy[0].iColumn<=0
+ && pIdxInfo->aOrderBy[0].desc==0
+ ){
+ pIdxInfo->orderByConsumed = 1;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Open a new dbpagevfs cursor.
+*/
+static int dbpageOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
+ DbpageCursor *pCsr;
+
+ pCsr = (DbpageCursor *)sqlite3_malloc64(sizeof(DbpageCursor));
+ if( pCsr==0 ){
+ return SQLITE_NOMEM_BKPT;
+ }else{
+ memset(pCsr, 0, sizeof(DbpageCursor));
+ pCsr->base.pVtab = pVTab;
+ pCsr->pgno = -1;
+ }
+
+ *ppCursor = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+/*
+** Close a dbpagevfs cursor.
+*/
+static int dbpageClose(sqlite3_vtab_cursor *pCursor){
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ if( pCsr->pPage1 ) sqlite3PagerUnrefPageOne(pCsr->pPage1);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** Move a dbpagevfs cursor to the next entry in the file.
+*/
+static int dbpageNext(sqlite3_vtab_cursor *pCursor){
+ int rc = SQLITE_OK;
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ pCsr->pgno++;
+ return rc;
+}
+
+static int dbpageEof(sqlite3_vtab_cursor *pCursor){
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ return pCsr->pgno > pCsr->mxPgno;
+}
+
+/*
+** idxNum:
+**
+** 0 schema=main, full table scan
+** 1 schema=main, pgno=?1
+** 2 schema=?1, full table scan
+** 3 schema=?1, pgno=?2
+**
+** idxStr is not used
+*/
+static int dbpageFilter(
+ sqlite3_vtab_cursor *pCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ DbpageTable *pTab = (DbpageTable *)pCursor->pVtab;
+ int rc;
+ sqlite3 *db = pTab->db;
+ Btree *pBt;
+
+ /* Default setting is no rows of result */
+ pCsr->pgno = 1;
+ pCsr->mxPgno = 0;
+
+ if( idxNum & 2 ){
+ const char *zSchema;
+ assert( argc>=1 );
+ zSchema = (const char*)sqlite3_value_text(argv[0]);
+ pCsr->iDb = sqlite3FindDbName(db, zSchema);
+ if( pCsr->iDb<0 ) return SQLITE_OK;
+ }else{
+ pCsr->iDb = 0;
+ }
+ pBt = db->aDb[pCsr->iDb].pBt;
+ if( pBt==0 ) return SQLITE_OK;
+ pCsr->pPager = sqlite3BtreePager(pBt);
+ pCsr->szPage = sqlite3BtreeGetPageSize(pBt);
+ pCsr->mxPgno = sqlite3BtreeLastPage(pBt);
+ if( idxNum & 1 ){
+ assert( argc>(idxNum>>1) );
+ pCsr->pgno = sqlite3_value_int(argv[idxNum>>1]);
+ if( pCsr->pgno<1 || pCsr->pgno>pCsr->mxPgno ){
+ pCsr->pgno = 1;
+ pCsr->mxPgno = 0;
+ }else{
+ pCsr->mxPgno = pCsr->pgno;
+ }
+ }else{
+ assert( pCsr->pgno==1 );
+ }
+ if( pCsr->pPage1 ) sqlite3PagerUnrefPageOne(pCsr->pPage1);
+ rc = sqlite3PagerGet(pCsr->pPager, 1, &pCsr->pPage1, 0);
+ return rc;
+}
+
+static int dbpageColumn(
+ sqlite3_vtab_cursor *pCursor,
+ sqlite3_context *ctx,
+ int i
+){
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ int rc = SQLITE_OK;
+ switch( i ){
+ case 0: { /* pgno */
+ sqlite3_result_int(ctx, pCsr->pgno);
+ break;
+ }
+ case 1: { /* data */
+ DbPage *pDbPage = 0;
+ rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage,
+ SQLITE_TRANSIENT);
+ }
+ sqlite3PagerUnref(pDbPage);
+ break;
+ }
+ default: { /* schema */
+ sqlite3 *db = sqlite3_context_db_handle(ctx);
+ sqlite3_result_text(ctx, db->aDb[pCsr->iDb].zDbSName, -1, SQLITE_STATIC);
+ break;
+ }
+ }
+ return SQLITE_OK;
+}
+
+static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
+ DbpageCursor *pCsr = (DbpageCursor *)pCursor;
+ *pRowid = pCsr->pgno;
+ return SQLITE_OK;
+}
+
+static int dbpageUpdate(
+ sqlite3_vtab *pVtab,
+ int argc,
+ sqlite3_value **argv,
+ sqlite_int64 *pRowid
+){
+ DbpageTable *pTab = (DbpageTable *)pVtab;
+ Pgno pgno;
+ DbPage *pDbPage = 0;
+ int rc = SQLITE_OK;
+ char *zErr = 0;
+ const char *zSchema;
+ int iDb;
+ Btree *pBt;
+ Pager *pPager;
+ int szPage;
+
+ if( pTab->db->flags & SQLITE_Defensive ){
+ zErr = "read-only";
+ goto update_fail;
+ }
+ if( argc==1 ){
+ zErr = "cannot delete";
+ goto update_fail;
+ }
+ pgno = sqlite3_value_int(argv[0]);
+ if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){
+ zErr = "cannot insert";
+ goto update_fail;
+ }
+ zSchema = (const char*)sqlite3_value_text(argv[4]);
+ iDb = zSchema ? sqlite3FindDbName(pTab->db, zSchema) : -1;
+ if( iDb<0 ){
+ zErr = "no such schema";
+ goto update_fail;
+ }
+ pBt = pTab->db->aDb[iDb].pBt;
+ if( pgno<1 || pBt==0 || pgno>(int)sqlite3BtreeLastPage(pBt) ){
+ zErr = "bad page number";
+ goto update_fail;
+ }
+ szPage = sqlite3BtreeGetPageSize(pBt);
+ if( sqlite3_value_type(argv[3])!=SQLITE_BLOB
+ || sqlite3_value_bytes(argv[3])!=szPage
+ ){
+ zErr = "bad page value";
+ goto update_fail;
+ }
+ pPager = sqlite3BtreePager(pBt);
+ rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerWrite(pDbPage);
+ if( rc==SQLITE_OK ){
+ memcpy(sqlite3PagerGetData(pDbPage),
+ sqlite3_value_blob(argv[3]),
+ szPage);
+ }
+ }
+ sqlite3PagerUnref(pDbPage);
+ return rc;
+
+update_fail:
+ sqlite3_free(pVtab->zErrMsg);
+ pVtab->zErrMsg = sqlite3_mprintf("%s", zErr);
+ return SQLITE_ERROR;
+}
+
+/* Since we do not know in advance which database files will be
+** written by the sqlite_dbpage virtual table, start a write transaction
+** on them all.
+*/
+static int dbpageBegin(sqlite3_vtab *pVtab){
+ DbpageTable *pTab = (DbpageTable *)pVtab;
+ sqlite3 *db = pTab->db;
+ int i;
+ for(i=0; i<db->nDb; i++){
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ) sqlite3BtreeBeginTrans(pBt, 1, 0);
+ }
+ return SQLITE_OK;
+}
+
+
+/*
+** Invoke this routine to register the "dbpage" virtual table module
+*/
+SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){
+ static sqlite3_module dbpage_module = {
+ 0, /* iVersion */
+ dbpageConnect, /* xCreate */
+ dbpageConnect, /* xConnect */
+ dbpageBestIndex, /* xBestIndex */
+ dbpageDisconnect, /* xDisconnect */
+ dbpageDisconnect, /* xDestroy */
+ dbpageOpen, /* xOpen - open a cursor */
+ dbpageClose, /* xClose - close a cursor */
+ dbpageFilter, /* xFilter - configure scan constraints */
+ dbpageNext, /* xNext - advance a cursor */
+ dbpageEof, /* xEof - check for end of scan */
+ dbpageColumn, /* xColumn - read data */
+ dbpageRowid, /* xRowid - read data */
+ dbpageUpdate, /* xUpdate */
+ dbpageBegin, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0 /* xShadowName */
+ };
+ return sqlite3_create_module(db, "sqlite_dbpage", &dbpage_module, 0);
+}
+#elif defined(SQLITE_ENABLE_DBPAGE_VTAB)
+SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ return SQLITE_OK; }
+#endif /* SQLITE_ENABLE_DBSTAT_VTAB */
+
+/************** End of dbpage.c **********************************************/
+/************** Begin file sqlite3session.c **********************************/
+
+#if defined(SQLITE_ENABLE_SESSION) && defined(SQLITE_ENABLE_PREUPDATE_HOOK)
+/* #include "sqlite3session.h" */
+/* #include <assert.h> */
+/* #include <string.h> */
+
+#ifndef SQLITE_AMALGAMATION
+/* # include "sqliteInt.h" */
+/* # include "vdbeInt.h" */
+#endif
+
+typedef struct SessionTable SessionTable;
+typedef struct SessionChange SessionChange;
+typedef struct SessionBuffer SessionBuffer;
+typedef struct SessionInput SessionInput;
+
+/*
+** Minimum chunk size used by streaming versions of functions.
+*/
+#ifndef SESSIONS_STRM_CHUNK_SIZE
+# ifdef SQLITE_TEST
+# define SESSIONS_STRM_CHUNK_SIZE 64
+# else
+# define SESSIONS_STRM_CHUNK_SIZE 1024
+# endif
+#endif
+
+static int sessions_strm_chunk_size = SESSIONS_STRM_CHUNK_SIZE;
+
+typedef struct SessionHook SessionHook;
+struct SessionHook {
+ void *pCtx;
+ int (*xOld)(void*,int,sqlite3_value**);
+ int (*xNew)(void*,int,sqlite3_value**);
+ int (*xCount)(void*);
+ int (*xDepth)(void*);
+};
+
+/*
+** Session handle structure.
+*/
+struct sqlite3_session {
+ sqlite3 *db; /* Database handle session is attached to */
+ char *zDb; /* Name of database session is attached to */
+ int bEnable; /* True if currently recording */
+ int bIndirect; /* True if all changes are indirect */
+ int bAutoAttach; /* True to auto-attach tables */
+ int rc; /* Non-zero if an error has occurred */
+ void *pFilterCtx; /* First argument to pass to xTableFilter */
+ int (*xTableFilter)(void *pCtx, const char *zTab);
+ sqlite3_value *pZeroBlob; /* Value containing X'' */
+ sqlite3_session *pNext; /* Next session object on same db. */
+ SessionTable *pTable; /* List of attached tables */
+ SessionHook hook; /* APIs to grab new and old data with */
+};
+
+/*
+** Instances of this structure are used to build strings or binary records.
+*/
+struct SessionBuffer {
+ u8 *aBuf; /* Pointer to changeset buffer */
+ int nBuf; /* Size of buffer aBuf */
+ int nAlloc; /* Size of allocation containing aBuf */
+};
+
+/*
+** An object of this type is used internally as an abstraction for
+** input data. Input data may be supplied either as a single large buffer
+** (e.g. sqlite3changeset_start()) or using a stream function (e.g.
+** sqlite3changeset_start_strm()).
+*/
+struct SessionInput {
+ int bNoDiscard; /* If true, do not discard in InputBuffer() */
+ int iCurrent; /* Offset in aData[] of current change */
+ int iNext; /* Offset in aData[] of next change */
+ u8 *aData; /* Pointer to buffer containing changeset */
+ int nData; /* Number of bytes in aData */
+
+ SessionBuffer buf; /* Current read buffer */
+ int (*xInput)(void*, void*, int*); /* Input stream call (or NULL) */
+ void *pIn; /* First argument to xInput */
+ int bEof; /* Set to true after xInput finished */
+};
+
+/*
+** Structure for changeset iterators.
+*/
+struct sqlite3_changeset_iter {
+ SessionInput in; /* Input buffer or stream */
+ SessionBuffer tblhdr; /* Buffer to hold apValue/zTab/abPK/ */
+ int bPatchset; /* True if this is a patchset */
+ int bInvert; /* True to invert changeset */
+ int rc; /* Iterator error code */
+ sqlite3_stmt *pConflict; /* Points to conflicting row, if any */
+ char *zTab; /* Current table */
+ int nCol; /* Number of columns in zTab */
+ int op; /* Current operation */
+ int bIndirect; /* True if current change was indirect */
+ u8 *abPK; /* Primary key array */
+ sqlite3_value **apValue; /* old.* and new.* values */
+};
+
+/*
+** Each session object maintains a set of the following structures, one
+** for each table the session object is monitoring. The structures are
+** stored in a linked list starting at sqlite3_session.pTable.
+**
+** The keys of the SessionTable.aChange[] hash table are all rows that have
+** been modified in any way since the session object was attached to the
+** table.
+**
+** The data associated with each hash-table entry is a structure containing
+** a subset of the initial values that the modified row contained at the
+** start of the session. Or no initial values if the row was inserted.
+*/
+struct SessionTable {
+ SessionTable *pNext;
+ char *zName; /* Local name of table */
+ int nCol; /* Number of columns in table zName */
+ int bStat1; /* True if this is sqlite_stat1 */
+ const char **azCol; /* Column names */
+ u8 *abPK; /* Array of primary key flags */
+ int nEntry; /* Total number of entries in hash table */
+ int nChange; /* Size of apChange[] array */
+ SessionChange **apChange; /* Hash table buckets */
+};
+
+/*
+** RECORD FORMAT:
+**
+** The following record format is similar to (but not compatible with) that
+** used in SQLite database files. This format is used as part of the
+** change-set binary format, and so must be architecture independent.
+**
+** Unlike the SQLite database record format, each field is self-contained -
+** there is no separation of header and data. Each field begins with a
+** single byte describing its type, as follows:
+**
+** 0x00: Undefined value.
+** 0x01: Integer value.
+** 0x02: Real value.
+** 0x03: Text value.
+** 0x04: Blob value.
+** 0x05: SQL NULL value.
+**
+** Note that the above match the definitions of SQLITE_INTEGER, SQLITE_TEXT
+** and so on in sqlite3.h. For undefined and NULL values, the field consists
+** only of the single type byte. For other types of values, the type byte
+** is followed by:
+**
+** Text values:
+** A varint containing the number of bytes in the value (encoded using
+** UTF-8). Followed by a buffer containing the UTF-8 representation
+** of the text value. There is no nul terminator.
+**
+** Blob values:
+** A varint containing the number of bytes in the value, followed by
+** a buffer containing the value itself.
+**
+** Integer values:
+** An 8-byte big-endian integer value.
+**
+** Real values:
+** An 8-byte big-endian IEEE 754-2008 real value.
+**
+** Varint values are encoded in the same way as varints in the SQLite
+** record format.
+**
+** CHANGESET FORMAT:
+**
+** A changeset is a collection of DELETE, UPDATE and INSERT operations on
+** one or more tables. Operations on a single table are grouped together,
+** but may occur in any order (i.e. deletes, updates and inserts are all
+** mixed together).
+**
+** Each group of changes begins with a table header:
+**
+** 1 byte: Constant 0x54 (capital 'T')
+** Varint: Number of columns in the table.
+** nCol bytes: 0x01 for PK columns, 0x00 otherwise.
+** N bytes: Unqualified table name (encoded using UTF-8). Nul-terminated.
+**
+** Followed by one or more changes to the table.
+**
+** 1 byte: Either SQLITE_INSERT (0x12), UPDATE (0x17) or DELETE (0x09).
+** 1 byte: The "indirect-change" flag.
+** old.* record: (delete and update only)
+** new.* record: (insert and update only)
+**
+** The "old.*" and "new.*" records, if present, are N field records in the
+** format described above under "RECORD FORMAT", where N is the number of
+** columns in the table. The i'th field of each record is associated with
+** the i'th column of the table, counting from left to right in the order
+** in which columns were declared in the CREATE TABLE statement.
+**
+** The new.* record that is part of each INSERT change contains the values
+** that make up the new row. Similarly, the old.* record that is part of each
+** DELETE change contains the values that made up the row that was deleted
+** from the database. In the changeset format, the records that are part
+** of INSERT or DELETE changes never contain any undefined (type byte 0x00)
+** fields.
+**
+** Within the old.* record associated with an UPDATE change, all fields
+** associated with table columns that are not PRIMARY KEY columns and are
+** not modified by the UPDATE change are set to "undefined". Other fields
+** are set to the values that made up the row before the UPDATE that the
+** change records took place. Within the new.* record, fields associated
+** with table columns modified by the UPDATE change contain the new
+** values. Fields associated with table columns that are not modified
+** are set to "undefined".
+**
+** PATCHSET FORMAT:
+**
+** A patchset is also a collection of changes. It is similar to a changeset,
+** but leaves undefined those fields that are not useful if no conflict
+** resolution is required when applying the changeset.
+**
+** Each group of changes begins with a table header:
+**
+** 1 byte: Constant 0x50 (capital 'P')
+** Varint: Number of columns in the table.
+** nCol bytes: 0x01 for PK columns, 0x00 otherwise.
+** N bytes: Unqualified table name (encoded using UTF-8). Nul-terminated.
+**
+** Followed by one or more changes to the table.
+**
+** 1 byte: Either SQLITE_INSERT (0x12), UPDATE (0x17) or DELETE (0x09).
+** 1 byte: The "indirect-change" flag.
+** single record: (PK fields for DELETE, PK and modified fields for UPDATE,
+** full record for INSERT).
+**
+** As in the changeset format, each field of the single record that is part
+** of a patchset change is associated with the correspondingly positioned
+** table column, counting from left to right within the CREATE TABLE
+** statement.
+**
+** For a DELETE change, all fields within the record except those associated
+** with PRIMARY KEY columns are omitted. The PRIMARY KEY fields contain the
+** values identifying the row to delete.
+**
+** For an UPDATE change, all fields except those associated with PRIMARY KEY
+** columns and columns that are modified by the UPDATE are set to "undefined".
+** PRIMARY KEY fields contain the values identifying the table row to update,
+** and fields associated with modified columns contain the new column values.
+**
+** The records associated with INSERT changes are in the same format as for
+** changesets. It is not possible for a record associated with an INSERT
+** change to contain a field set to "undefined".
+**
+** REBASE BLOB FORMAT:
+**
+** A rebase blob may be output by sqlite3changeset_apply_v2() and its
+** streaming equivalent for use with the sqlite3_rebaser APIs to rebase
+** existing changesets. A rebase blob contains one entry for each conflict
+** resolved using either the OMIT or REPLACE strategies within the apply_v2()
+** call.
+**
+** The format used for a rebase blob is very similar to that used for
+** changesets. All entries related to a single table are grouped together.
+**
+** Each group of entries begins with a table header in changeset format:
+**
+** 1 byte: Constant 0x54 (capital 'T')
+** Varint: Number of columns in the table.
+** nCol bytes: 0x01 for PK columns, 0x00 otherwise.
+** N bytes: Unqualified table name (encoded using UTF-8). Nul-terminated.
+**
+** Followed by one or more entries associated with the table.
+**
+** 1 byte: Either SQLITE_INSERT (0x12), DELETE (0x09).
+** 1 byte: Flag. 0x01 for REPLACE, 0x00 for OMIT.
+** record: (in the record format defined above).
+**
+** In a rebase blob, the first field is set to SQLITE_INSERT if the change
+** that caused the conflict was an INSERT or UPDATE, or to SQLITE_DELETE if
+** it was a DELETE. The second field is set to 0x01 if the conflict
+** resolution strategy was REPLACE, or 0x00 if it was OMIT.
+**
+** If the change that caused the conflict was a DELETE, then the single
+** record is a copy of the old.* record from the original changeset. If it
+** was an INSERT, then the single record is a copy of the new.* record. If
+** the conflicting change was an UPDATE, then the single record is a copy
+** of the new.* record with the PK fields filled in based on the original
+** old.* record.
+*/
+
+/*
+** For each row modified during a session, there exists a single instance of
+** this structure stored in a SessionTable.aChange[] hash table.
+*/
+struct SessionChange {
+ int op; /* One of UPDATE, DELETE, INSERT */
+ int bIndirect; /* True if this change is "indirect" */
+ int nRecord; /* Number of bytes in buffer aRecord[] */
+ u8 *aRecord; /* Buffer containing old.* record */
+ SessionChange *pNext; /* For hash-table collisions */
+};
+
+/*
+** Write a varint with value iVal into the buffer at aBuf. Return the
+** number of bytes written.
+*/
+static int sessionVarintPut(u8 *aBuf, int iVal){
+ return putVarint32(aBuf, iVal);
+}
+
+/*
+** Return the number of bytes required to store value iVal as a varint.
+*/
+static int sessionVarintLen(int iVal){
+ return sqlite3VarintLen(iVal);
+}
+
+/*
+** Read a varint value from aBuf[] into *piVal. Return the number of
+** bytes read.
+*/
+static int sessionVarintGet(u8 *aBuf, int *piVal){
+ return getVarint32(aBuf, *piVal);
+}
+
+/* Load an unaligned and unsigned 32-bit integer */
+#define SESSION_UINT32(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
+
+/*
+** Read a 64-bit big-endian integer value from buffer aRec[]. Return
+** the value read.
+*/
+static sqlite3_int64 sessionGetI64(u8 *aRec){
+ u64 x = SESSION_UINT32(aRec);
+ u32 y = SESSION_UINT32(aRec+4);
+ x = (x<<32) + y;
+ return (sqlite3_int64)x;
+}
+
+/*
+** Write a 64-bit big-endian integer value to the buffer aBuf[].
+*/
+static void sessionPutI64(u8 *aBuf, sqlite3_int64 i){
+ aBuf[0] = (i>>56) & 0xFF;
+ aBuf[1] = (i>>48) & 0xFF;
+ aBuf[2] = (i>>40) & 0xFF;
+ aBuf[3] = (i>>32) & 0xFF;
+ aBuf[4] = (i>>24) & 0xFF;
+ aBuf[5] = (i>>16) & 0xFF;
+ aBuf[6] = (i>> 8) & 0xFF;
+ aBuf[7] = (i>> 0) & 0xFF;
+}
+
+/*
+** This function is used to serialize the contents of value pValue (see
+** comment titled "RECORD FORMAT" above).
+**
+** If it is non-NULL, the serialized form of the value is written to
+** buffer aBuf. *pnWrite is set to the number of bytes written before
+** returning. Or, if aBuf is NULL, the only thing this function does is
+** set *pnWrite.
+**
+** If no error occurs, SQLITE_OK is returned. Or, if an OOM error occurs
+** within a call to sqlite3_value_text() (may fail if the db is utf-16))
+** SQLITE_NOMEM is returned.
+*/
+static int sessionSerializeValue(
+ u8 *aBuf, /* If non-NULL, write serialized value here */
+ sqlite3_value *pValue, /* Value to serialize */
+ sqlite3_int64 *pnWrite /* IN/OUT: Increment by bytes written */
+){
+ int nByte; /* Size of serialized value in bytes */
+
+ if( pValue ){
+ int eType; /* Value type (SQLITE_NULL, TEXT etc.) */
+
+ eType = sqlite3_value_type(pValue);
+ if( aBuf ) aBuf[0] = eType;
+
+ switch( eType ){
+ case SQLITE_NULL:
+ nByte = 1;
+ break;
+
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT:
+ if( aBuf ){
+ /* TODO: SQLite does something special to deal with mixed-endian
+ ** floating point values (e.g. ARM7). This code probably should
+ ** too. */
+ u64 i;
+ if( eType==SQLITE_INTEGER ){
+ i = (u64)sqlite3_value_int64(pValue);
+ }else{
+ double r;
+ assert( sizeof(double)==8 && sizeof(u64)==8 );
+ r = sqlite3_value_double(pValue);
+ memcpy(&i, &r, 8);
+ }
+ sessionPutI64(&aBuf[1], i);
+ }
+ nByte = 9;
+ break;
+
+ default: {
+ u8 *z;
+ int n;
+ int nVarint;
+
+ assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB );
+ if( eType==SQLITE_TEXT ){
+ z = (u8 *)sqlite3_value_text(pValue);
+ }else{
+ z = (u8 *)sqlite3_value_blob(pValue);
+ }
+ n = sqlite3_value_bytes(pValue);
+ if( z==0 && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM;
+ nVarint = sessionVarintLen(n);
+
+ if( aBuf ){
+ sessionVarintPut(&aBuf[1], n);
+ if( n ) memcpy(&aBuf[nVarint + 1], z, n);
+ }
+
+ nByte = 1 + nVarint + n;
+ break;
+ }
+ }
+ }else{
+ nByte = 1;
+ if( aBuf ) aBuf[0] = '\0';
+ }
+
+ if( pnWrite ) *pnWrite += nByte;
+ return SQLITE_OK;
+}
+
+
+/*
+** This macro is used to calculate hash key values for data structures. In
+** order to use this macro, the entire data structure must be represented
+** as a series of unsigned integers. In order to calculate a hash-key value
+** for a data structure represented as three such integers, the macro may
+** then be used as follows:
+**
+** int hash_key_value;
+** hash_key_value = HASH_APPEND(0, <value 1>);
+** hash_key_value = HASH_APPEND(hash_key_value, <value 2>);
+** hash_key_value = HASH_APPEND(hash_key_value, <value 3>);
+**
+** In practice, the data structures this macro is used for are the primary
+** key values of modified rows.
+*/
+#define HASH_APPEND(hash, add) ((hash) << 3) ^ (hash) ^ (unsigned int)(add)
+
+/*
+** Append the hash of the 64-bit integer passed as the second argument to the
+** hash-key value passed as the first. Return the new hash-key value.
+*/
+static unsigned int sessionHashAppendI64(unsigned int h, i64 i){
+ h = HASH_APPEND(h, i & 0xFFFFFFFF);
+ return HASH_APPEND(h, (i>>32)&0xFFFFFFFF);
+}
+
+/*
+** Append the hash of the blob passed via the second and third arguments to
+** the hash-key value passed as the first. Return the new hash-key value.
+*/
+static unsigned int sessionHashAppendBlob(unsigned int h, int n, const u8 *z){
+ int i;
+ for(i=0; i<n; i++) h = HASH_APPEND(h, z[i]);
+ return h;
+}
+
+/*
+** Append the hash of the data type passed as the second argument to the
+** hash-key value passed as the first. Return the new hash-key value.
+*/
+static unsigned int sessionHashAppendType(unsigned int h, int eType){
+ return HASH_APPEND(h, eType);
+}
+
+/*
+** This function may only be called from within a pre-update callback.
+** It calculates a hash based on the primary key values of the old.* or
+** new.* row currently available and, assuming no error occurs, writes it to
+** *piHash before returning. If the primary key contains one or more NULL
+** values, *pbNullPK is set to true before returning.
+**
+** If an error occurs, an SQLite error code is returned and the final values
+** of *piHash asn *pbNullPK are undefined. Otherwise, SQLITE_OK is returned
+** and the output variables are set as described above.
+*/
+static int sessionPreupdateHash(
+ sqlite3_session *pSession, /* Session object that owns pTab */
+ SessionTable *pTab, /* Session table handle */
+ int bNew, /* True to hash the new.* PK */
+ int *piHash, /* OUT: Hash value */
+ int *pbNullPK /* OUT: True if there are NULL values in PK */
+){
+ unsigned int h = 0; /* Hash value to return */
+ int i; /* Used to iterate through columns */
+
+ assert( *pbNullPK==0 );
+ assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) );
+ for(i=0; i<pTab->nCol; i++){
+ if( pTab->abPK[i] ){
+ int rc;
+ int eType;
+ sqlite3_value *pVal;
+
+ if( bNew ){
+ rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal);
+ }else{
+ rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ eType = sqlite3_value_type(pVal);
+ h = sessionHashAppendType(h, eType);
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ i64 iVal;
+ if( eType==SQLITE_INTEGER ){
+ iVal = sqlite3_value_int64(pVal);
+ }else{
+ double rVal = sqlite3_value_double(pVal);
+ assert( sizeof(iVal)==8 && sizeof(rVal)==8 );
+ memcpy(&iVal, &rVal, 8);
+ }
+ h = sessionHashAppendI64(h, iVal);
+ }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){
+ const u8 *z;
+ int n;
+ if( eType==SQLITE_TEXT ){
+ z = (const u8 *)sqlite3_value_text(pVal);
+ }else{
+ z = (const u8 *)sqlite3_value_blob(pVal);
+ }
+ n = sqlite3_value_bytes(pVal);
+ if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM;
+ h = sessionHashAppendBlob(h, n, z);
+ }else{
+ assert( eType==SQLITE_NULL );
+ assert( pTab->bStat1==0 || i!=1 );
+ *pbNullPK = 1;
+ }
+ }
+ }
+
+ *piHash = (h % pTab->nChange);
+ return SQLITE_OK;
+}
+
+/*
+** The buffer that the argument points to contains a serialized SQL value.
+** Return the number of bytes of space occupied by the value (including
+** the type byte).
+*/
+static int sessionSerialLen(u8 *a){
+ int e = *a;
+ int n;
+ if( e==0 || e==0xFF ) return 1;
+ if( e==SQLITE_NULL ) return 1;
+ if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9;
+ return sessionVarintGet(&a[1], &n) + 1 + n;
+}
+
+/*
+** Based on the primary key values stored in change aRecord, calculate a
+** hash key. Assume the has table has nBucket buckets. The hash keys
+** calculated by this function are compatible with those calculated by
+** sessionPreupdateHash().
+**
+** The bPkOnly argument is non-zero if the record at aRecord[] is from
+** a patchset DELETE. In this case the non-PK fields are omitted entirely.
+*/
+static unsigned int sessionChangeHash(
+ SessionTable *pTab, /* Table handle */
+ int bPkOnly, /* Record consists of PK fields only */
+ u8 *aRecord, /* Change record */
+ int nBucket /* Assume this many buckets in hash table */
+){
+ unsigned int h = 0; /* Value to return */
+ int i; /* Used to iterate through columns */
+ u8 *a = aRecord; /* Used to iterate through change record */
+
+ for(i=0; i<pTab->nCol; i++){
+ int eType = *a;
+ int isPK = pTab->abPK[i];
+ if( bPkOnly && isPK==0 ) continue;
+
+ /* It is not possible for eType to be SQLITE_NULL here. The session
+ ** module does not record changes for rows with NULL values stored in
+ ** primary key columns. */
+ assert( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT
+ || eType==SQLITE_TEXT || eType==SQLITE_BLOB
+ || eType==SQLITE_NULL || eType==0
+ );
+ assert( !isPK || (eType!=0 && eType!=SQLITE_NULL) );
+
+ if( isPK ){
+ a++;
+ h = sessionHashAppendType(h, eType);
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ h = sessionHashAppendI64(h, sessionGetI64(a));
+ a += 8;
+ }else{
+ int n;
+ a += sessionVarintGet(a, &n);
+ h = sessionHashAppendBlob(h, n, a);
+ a += n;
+ }
+ }else{
+ a += sessionSerialLen(a);
+ }
+ }
+ return (h % nBucket);
+}
+
+/*
+** Arguments aLeft and aRight are pointers to change records for table pTab.
+** This function returns true if the two records apply to the same row (i.e.
+** have the same values stored in the primary key columns), or false
+** otherwise.
+*/
+static int sessionChangeEqual(
+ SessionTable *pTab, /* Table used for PK definition */
+ int bLeftPkOnly, /* True if aLeft[] contains PK fields only */
+ u8 *aLeft, /* Change record */
+ int bRightPkOnly, /* True if aRight[] contains PK fields only */
+ u8 *aRight /* Change record */
+){
+ u8 *a1 = aLeft; /* Cursor to iterate through aLeft */
+ u8 *a2 = aRight; /* Cursor to iterate through aRight */
+ int iCol; /* Used to iterate through table columns */
+
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( pTab->abPK[iCol] ){
+ int n1 = sessionSerialLen(a1);
+ int n2 = sessionSerialLen(a2);
+
+ if( n1!=n2 || memcmp(a1, a2, n1) ){
+ return 0;
+ }
+ a1 += n1;
+ a2 += n2;
+ }else{
+ if( bLeftPkOnly==0 ) a1 += sessionSerialLen(a1);
+ if( bRightPkOnly==0 ) a2 += sessionSerialLen(a2);
+ }
+ }
+
+ return 1;
+}
+
+/*
+** Arguments aLeft and aRight both point to buffers containing change
+** records with nCol columns. This function "merges" the two records into
+** a single records which is written to the buffer at *paOut. *paOut is
+** then set to point to one byte after the last byte written before
+** returning.
+**
+** The merging of records is done as follows: For each column, if the
+** aRight record contains a value for the column, copy the value from
+** their. Otherwise, if aLeft contains a value, copy it. If neither
+** record contains a value for a given column, then neither does the
+** output record.
+*/
+static void sessionMergeRecord(
+ u8 **paOut,
+ int nCol,
+ u8 *aLeft,
+ u8 *aRight
+){
+ u8 *a1 = aLeft; /* Cursor used to iterate through aLeft */
+ u8 *a2 = aRight; /* Cursor used to iterate through aRight */
+ u8 *aOut = *paOut; /* Output cursor */
+ int iCol; /* Used to iterate from 0 to nCol */
+
+ for(iCol=0; iCol<nCol; iCol++){
+ int n1 = sessionSerialLen(a1);
+ int n2 = sessionSerialLen(a2);
+ if( *a2 ){
+ memcpy(aOut, a2, n2);
+ aOut += n2;
+ }else{
+ memcpy(aOut, a1, n1);
+ aOut += n1;
+ }
+ a1 += n1;
+ a2 += n2;
+ }
+
+ *paOut = aOut;
+}
+
+/*
+** This is a helper function used by sessionMergeUpdate().
+**
+** When this function is called, both *paOne and *paTwo point to a value
+** within a change record. Before it returns, both have been advanced so
+** as to point to the next value in the record.
+**
+** If, when this function is called, *paTwo points to a valid value (i.e.
+** *paTwo[0] is not 0x00 - the "no value" placeholder), a copy of the *paTwo
+** pointer is returned and *pnVal is set to the number of bytes in the
+** serialized value. Otherwise, a copy of *paOne is returned and *pnVal
+** set to the number of bytes in the value at *paOne. If *paOne points
+** to the "no value" placeholder, *pnVal is set to 1. In other words:
+**
+** if( *paTwo is valid ) return *paTwo;
+** return *paOne;
+**
+*/
+static u8 *sessionMergeValue(
+ u8 **paOne, /* IN/OUT: Left-hand buffer pointer */
+ u8 **paTwo, /* IN/OUT: Right-hand buffer pointer */
+ int *pnVal /* OUT: Bytes in returned value */
+){
+ u8 *a1 = *paOne;
+ u8 *a2 = *paTwo;
+ u8 *pRet = 0;
+ int n1;
+
+ assert( a1 );
+ if( a2 ){
+ int n2 = sessionSerialLen(a2);
+ if( *a2 ){
+ *pnVal = n2;
+ pRet = a2;
+ }
+ *paTwo = &a2[n2];
+ }
+
+ n1 = sessionSerialLen(a1);
+ if( pRet==0 ){
+ *pnVal = n1;
+ pRet = a1;
+ }
+ *paOne = &a1[n1];
+
+ return pRet;
+}
+
+/*
+** This function is used by changeset_concat() to merge two UPDATE changes
+** on the same row.
+*/
+static int sessionMergeUpdate(
+ u8 **paOut, /* IN/OUT: Pointer to output buffer */
+ SessionTable *pTab, /* Table change pertains to */
+ int bPatchset, /* True if records are patchset records */
+ u8 *aOldRecord1, /* old.* record for first change */
+ u8 *aOldRecord2, /* old.* record for second change */
+ u8 *aNewRecord1, /* new.* record for first change */
+ u8 *aNewRecord2 /* new.* record for second change */
+){
+ u8 *aOld1 = aOldRecord1;
+ u8 *aOld2 = aOldRecord2;
+ u8 *aNew1 = aNewRecord1;
+ u8 *aNew2 = aNewRecord2;
+
+ u8 *aOut = *paOut;
+ int i;
+
+ if( bPatchset==0 ){
+ int bRequired = 0;
+
+ assert( aOldRecord1 && aNewRecord1 );
+
+ /* Write the old.* vector first. */
+ for(i=0; i<pTab->nCol; i++){
+ int nOld;
+ u8 *aOld;
+ int nNew;
+ u8 *aNew;
+
+ aOld = sessionMergeValue(&aOld1, &aOld2, &nOld);
+ aNew = sessionMergeValue(&aNew1, &aNew2, &nNew);
+ if( pTab->abPK[i] || nOld!=nNew || memcmp(aOld, aNew, nNew) ){
+ if( pTab->abPK[i]==0 ) bRequired = 1;
+ memcpy(aOut, aOld, nOld);
+ aOut += nOld;
+ }else{
+ *(aOut++) = '\0';
+ }
+ }
+
+ if( !bRequired ) return 0;
+ }
+
+ /* Write the new.* vector */
+ aOld1 = aOldRecord1;
+ aOld2 = aOldRecord2;
+ aNew1 = aNewRecord1;
+ aNew2 = aNewRecord2;
+ for(i=0; i<pTab->nCol; i++){
+ int nOld;
+ u8 *aOld;
+ int nNew;
+ u8 *aNew;
+
+ aOld = sessionMergeValue(&aOld1, &aOld2, &nOld);
+ aNew = sessionMergeValue(&aNew1, &aNew2, &nNew);
+ if( bPatchset==0
+ && (pTab->abPK[i] || (nOld==nNew && 0==memcmp(aOld, aNew, nNew)))
+ ){
+ *(aOut++) = '\0';
+ }else{
+ memcpy(aOut, aNew, nNew);
+ aOut += nNew;
+ }
+ }
+
+ *paOut = aOut;
+ return 1;
+}
+
+/*
+** This function is only called from within a pre-update-hook callback.
+** It determines if the current pre-update-hook change affects the same row
+** as the change stored in argument pChange. If so, it returns true. Otherwise
+** if the pre-update-hook does not affect the same row as pChange, it returns
+** false.
+*/
+static int sessionPreupdateEqual(
+ sqlite3_session *pSession, /* Session object that owns SessionTable */
+ SessionTable *pTab, /* Table associated with change */
+ SessionChange *pChange, /* Change to compare to */
+ int op /* Current pre-update operation */
+){
+ int iCol; /* Used to iterate through columns */
+ u8 *a = pChange->aRecord; /* Cursor used to scan change record */
+
+ assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE );
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( !pTab->abPK[iCol] ){
+ a += sessionSerialLen(a);
+ }else{
+ sqlite3_value *pVal; /* Value returned by preupdate_new/old */
+ int rc; /* Error code from preupdate_new/old */
+ int eType = *a++; /* Type of value from change record */
+
+ /* The following calls to preupdate_new() and preupdate_old() can not
+ ** fail. This is because they cache their return values, and by the
+ ** time control flows to here they have already been called once from
+ ** within sessionPreupdateHash(). The first two asserts below verify
+ ** this (that the method has already been called). */
+ if( op==SQLITE_INSERT ){
+ /* assert( db->pPreUpdate->pNewUnpacked || db->pPreUpdate->aNew ); */
+ rc = pSession->hook.xNew(pSession->hook.pCtx, iCol, &pVal);
+ }else{
+ /* assert( db->pPreUpdate->pUnpacked ); */
+ rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal);
+ }
+ assert( rc==SQLITE_OK );
+ if( sqlite3_value_type(pVal)!=eType ) return 0;
+
+ /* A SessionChange object never has a NULL value in a PK column */
+ assert( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT
+ || eType==SQLITE_BLOB || eType==SQLITE_TEXT
+ );
+
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ i64 iVal = sessionGetI64(a);
+ a += 8;
+ if( eType==SQLITE_INTEGER ){
+ if( sqlite3_value_int64(pVal)!=iVal ) return 0;
+ }else{
+ double rVal;
+ assert( sizeof(iVal)==8 && sizeof(rVal)==8 );
+ memcpy(&rVal, &iVal, 8);
+ if( sqlite3_value_double(pVal)!=rVal ) return 0;
+ }
+ }else{
+ int n;
+ const u8 *z;
+ a += sessionVarintGet(a, &n);
+ if( sqlite3_value_bytes(pVal)!=n ) return 0;
+ if( eType==SQLITE_TEXT ){
+ z = sqlite3_value_text(pVal);
+ }else{
+ z = sqlite3_value_blob(pVal);
+ }
+ if( n>0 && memcmp(a, z, n) ) return 0;
+ a += n;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/*
+** If required, grow the hash table used to store changes on table pTab
+** (part of the session pSession). If a fatal OOM error occurs, set the
+** session object to failed and return SQLITE_ERROR. Otherwise, return
+** SQLITE_OK.
+**
+** It is possible that a non-fatal OOM error occurs in this function. In
+** that case the hash-table does not grow, but SQLITE_OK is returned anyway.
+** Growing the hash table in this case is a performance optimization only,
+** it is not required for correct operation.
+*/
+static int sessionGrowHash(int bPatchset, SessionTable *pTab){
+ if( pTab->nChange==0 || pTab->nEntry>=(pTab->nChange/2) ){
+ int i;
+ SessionChange **apNew;
+ sqlite3_int64 nNew = 2*(sqlite3_int64)(pTab->nChange ? pTab->nChange : 128);
+
+ apNew = (SessionChange **)sqlite3_malloc64(sizeof(SessionChange *) * nNew);
+ if( apNew==0 ){
+ if( pTab->nChange==0 ){
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+ }
+ memset(apNew, 0, sizeof(SessionChange *) * nNew);
+
+ for(i=0; i<pTab->nChange; i++){
+ SessionChange *p;
+ SessionChange *pNext;
+ for(p=pTab->apChange[i]; p; p=pNext){
+ int bPkOnly = (p->op==SQLITE_DELETE && bPatchset);
+ int iHash = sessionChangeHash(pTab, bPkOnly, p->aRecord, nNew);
+ pNext = p->pNext;
+ p->pNext = apNew[iHash];
+ apNew[iHash] = p;
+ }
+ }
+
+ sqlite3_free(pTab->apChange);
+ pTab->nChange = nNew;
+ pTab->apChange = apNew;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** This function queries the database for the names of the columns of table
+** zThis, in schema zDb.
+**
+** Otherwise, if they are not NULL, variable *pnCol is set to the number
+** of columns in the database table and variable *pzTab is set to point to a
+** nul-terminated copy of the table name. *pazCol (if not NULL) is set to
+** point to an array of pointers to column names. And *pabPK (again, if not
+** NULL) is set to point to an array of booleans - true if the corresponding
+** column is part of the primary key.
+**
+** For example, if the table is declared as:
+**
+** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z));
+**
+** Then the four output variables are populated as follows:
+**
+** *pnCol = 4
+** *pzTab = "tbl1"
+** *pazCol = {"w", "x", "y", "z"}
+** *pabPK = {1, 0, 0, 1}
+**
+** All returned buffers are part of the same single allocation, which must
+** be freed using sqlite3_free() by the caller
+*/
+static int sessionTableInfo(
+ sqlite3 *db, /* Database connection */
+ const char *zDb, /* Name of attached database (e.g. "main") */
+ const char *zThis, /* Table name */
+ int *pnCol, /* OUT: number of columns */
+ const char **pzTab, /* OUT: Copy of zThis */
+ const char ***pazCol, /* OUT: Array of column names for table */
+ u8 **pabPK /* OUT: Array of booleans - true for PK col */
+){
+ char *zPragma;
+ sqlite3_stmt *pStmt;
+ int rc;
+ sqlite3_int64 nByte;
+ int nDbCol = 0;
+ int nThis;
+ int i;
+ u8 *pAlloc = 0;
+ char **azCol = 0;
+ u8 *abPK = 0;
+
+ assert( pazCol && pabPK );
+
+ nThis = sqlite3Strlen30(zThis);
+ if( nThis==12 && 0==sqlite3_stricmp("sqlite_stat1", zThis) ){
+ rc = sqlite3_table_column_metadata(db, zDb, zThis, 0, 0, 0, 0, 0, 0);
+ if( rc==SQLITE_OK ){
+ /* For sqlite_stat1, pretend that (tbl,idx) is the PRIMARY KEY. */
+ zPragma = sqlite3_mprintf(
+ "SELECT 0, 'tbl', '', 0, '', 1 UNION ALL "
+ "SELECT 1, 'idx', '', 0, '', 2 UNION ALL "
+ "SELECT 2, 'stat', '', 0, '', 0"
+ );
+ }else if( rc==SQLITE_ERROR ){
+ zPragma = sqlite3_mprintf("");
+ }else{
+ return rc;
+ }
+ }else{
+ zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis);
+ }
+ if( !zPragma ) return SQLITE_NOMEM;
+
+ rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0);
+ sqlite3_free(zPragma);
+ if( rc!=SQLITE_OK ) return rc;
+
+ nByte = nThis + 1;
+ while( SQLITE_ROW==sqlite3_step(pStmt) ){
+ nByte += sqlite3_column_bytes(pStmt, 1);
+ nDbCol++;
+ }
+ rc = sqlite3_reset(pStmt);
+
+ if( rc==SQLITE_OK ){
+ nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1);
+ pAlloc = sqlite3_malloc64(nByte);
+ if( pAlloc==0 ){
+ rc = SQLITE_NOMEM;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ azCol = (char **)pAlloc;
+ pAlloc = (u8 *)&azCol[nDbCol];
+ abPK = (u8 *)pAlloc;
+ pAlloc = &abPK[nDbCol];
+ if( pzTab ){
+ memcpy(pAlloc, zThis, nThis+1);
+ *pzTab = (char *)pAlloc;
+ pAlloc += nThis+1;
+ }
+
+ i = 0;
+ while( SQLITE_ROW==sqlite3_step(pStmt) ){
+ int nName = sqlite3_column_bytes(pStmt, 1);
+ const unsigned char *zName = sqlite3_column_text(pStmt, 1);
+ if( zName==0 ) break;
+ memcpy(pAlloc, zName, nName+1);
+ azCol[i] = (char *)pAlloc;
+ pAlloc += nName+1;
+ abPK[i] = sqlite3_column_int(pStmt, 5);
+ i++;
+ }
+ rc = sqlite3_reset(pStmt);
+
+ }
+
+ /* If successful, populate the output variables. Otherwise, zero them and
+ ** free any allocation made. An error code will be returned in this case.
+ */
+ if( rc==SQLITE_OK ){
+ *pazCol = (const char **)azCol;
+ *pabPK = abPK;
+ *pnCol = nDbCol;
+ }else{
+ *pazCol = 0;
+ *pabPK = 0;
+ *pnCol = 0;
+ if( pzTab ) *pzTab = 0;
+ sqlite3_free(azCol);
+ }
+ sqlite3_finalize(pStmt);
+ return rc;
+}
+
+/*
+** This function is only called from within a pre-update handler for a
+** write to table pTab, part of session pSession. If this is the first
+** write to this table, initalize the SessionTable.nCol, azCol[] and
+** abPK[] arrays accordingly.
+**
+** If an error occurs, an error code is stored in sqlite3_session.rc and
+** non-zero returned. Or, if no error occurs but the table has no primary
+** key, sqlite3_session.rc is left set to SQLITE_OK and non-zero returned to
+** indicate that updates on this table should be ignored. SessionTable.abPK
+** is set to NULL in this case.
+*/
+static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){
+ if( pTab->nCol==0 ){
+ u8 *abPK;
+ assert( pTab->azCol==0 || pTab->abPK==0 );
+ pSession->rc = sessionTableInfo(pSession->db, pSession->zDb,
+ pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK
+ );
+ if( pSession->rc==SQLITE_OK ){
+ int i;
+ for(i=0; i<pTab->nCol; i++){
+ if( abPK[i] ){
+ pTab->abPK = abPK;
+ break;
+ }
+ }
+ if( 0==sqlite3_stricmp("sqlite_stat1", pTab->zName) ){
+ pTab->bStat1 = 1;
+ }
+ }
+ }
+ return (pSession->rc || pTab->abPK==0);
+}
+
+/*
+** Versions of the four methods in object SessionHook for use with the
+** sqlite_stat1 table. The purpose of this is to substitute a zero-length
+** blob each time a NULL value is read from the "idx" column of the
+** sqlite_stat1 table.
+*/
+typedef struct SessionStat1Ctx SessionStat1Ctx;
+struct SessionStat1Ctx {
+ SessionHook hook;
+ sqlite3_session *pSession;
+};
+static int sessionStat1Old(void *pCtx, int iCol, sqlite3_value **ppVal){
+ SessionStat1Ctx *p = (SessionStat1Ctx*)pCtx;
+ sqlite3_value *pVal = 0;
+ int rc = p->hook.xOld(p->hook.pCtx, iCol, &pVal);
+ if( rc==SQLITE_OK && iCol==1 && sqlite3_value_type(pVal)==SQLITE_NULL ){
+ pVal = p->pSession->pZeroBlob;
+ }
+ *ppVal = pVal;
+ return rc;
+}
+static int sessionStat1New(void *pCtx, int iCol, sqlite3_value **ppVal){
+ SessionStat1Ctx *p = (SessionStat1Ctx*)pCtx;
+ sqlite3_value *pVal = 0;
+ int rc = p->hook.xNew(p->hook.pCtx, iCol, &pVal);
+ if( rc==SQLITE_OK && iCol==1 && sqlite3_value_type(pVal)==SQLITE_NULL ){
+ pVal = p->pSession->pZeroBlob;
+ }
+ *ppVal = pVal;
+ return rc;
+}
+static int sessionStat1Count(void *pCtx){
+ SessionStat1Ctx *p = (SessionStat1Ctx*)pCtx;
+ return p->hook.xCount(p->hook.pCtx);
+}
+static int sessionStat1Depth(void *pCtx){
+ SessionStat1Ctx *p = (SessionStat1Ctx*)pCtx;
+ return p->hook.xDepth(p->hook.pCtx);
+}
+
+
+/*
+** This function is only called from with a pre-update-hook reporting a
+** change on table pTab (attached to session pSession). The type of change
+** (UPDATE, INSERT, DELETE) is specified by the first argument.
+**
+** Unless one is already present or an error occurs, an entry is added
+** to the changed-rows hash table associated with table pTab.
+*/
+static void sessionPreupdateOneChange(
+ int op, /* One of SQLITE_UPDATE, INSERT, DELETE */
+ sqlite3_session *pSession, /* Session object pTab is attached to */
+ SessionTable *pTab /* Table that change applies to */
+){
+ int iHash;
+ int bNull = 0;
+ int rc = SQLITE_OK;
+ SessionStat1Ctx stat1 = {{0,0,0,0,0},0};
+
+ if( pSession->rc ) return;
+
+ /* Load table details if required */
+ if( sessionInitTable(pSession, pTab) ) return;
+
+ /* Check the number of columns in this xPreUpdate call matches the
+ ** number of columns in the table. */
+ if( pTab->nCol!=pSession->hook.xCount(pSession->hook.pCtx) ){
+ pSession->rc = SQLITE_SCHEMA;
+ return;
+ }
+
+ /* Grow the hash table if required */
+ if( sessionGrowHash(0, pTab) ){
+ pSession->rc = SQLITE_NOMEM;
+ return;
+ }
+
+ if( pTab->bStat1 ){
+ stat1.hook = pSession->hook;
+ stat1.pSession = pSession;
+ pSession->hook.pCtx = (void*)&stat1;
+ pSession->hook.xNew = sessionStat1New;
+ pSession->hook.xOld = sessionStat1Old;
+ pSession->hook.xCount = sessionStat1Count;
+ pSession->hook.xDepth = sessionStat1Depth;
+ if( pSession->pZeroBlob==0 ){
+ sqlite3_value *p = sqlite3ValueNew(0);
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ goto error_out;
+ }
+ sqlite3ValueSetStr(p, 0, "", 0, SQLITE_STATIC);
+ pSession->pZeroBlob = p;
+ }
+ }
+
+ /* Calculate the hash-key for this change. If the primary key of the row
+ ** includes a NULL value, exit early. Such changes are ignored by the
+ ** session module. */
+ rc = sessionPreupdateHash(pSession, pTab, op==SQLITE_INSERT, &iHash, &bNull);
+ if( rc!=SQLITE_OK ) goto error_out;
+
+ if( bNull==0 ){
+ /* Search the hash table for an existing record for this row. */
+ SessionChange *pC;
+ for(pC=pTab->apChange[iHash]; pC; pC=pC->pNext){
+ if( sessionPreupdateEqual(pSession, pTab, pC, op) ) break;
+ }
+
+ if( pC==0 ){
+ /* Create a new change object containing all the old values (if
+ ** this is an SQLITE_UPDATE or SQLITE_DELETE), or just the PK
+ ** values (if this is an INSERT). */
+ SessionChange *pChange; /* New change object */
+ sqlite3_int64 nByte; /* Number of bytes to allocate */
+ int i; /* Used to iterate through columns */
+
+ assert( rc==SQLITE_OK );
+ pTab->nEntry++;
+
+ /* Figure out how large an allocation is required */
+ nByte = sizeof(SessionChange);
+ for(i=0; i<pTab->nCol; i++){
+ sqlite3_value *p = 0;
+ if( op!=SQLITE_INSERT ){
+ TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p);
+ assert( trc==SQLITE_OK );
+ }else if( pTab->abPK[i] ){
+ TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx, i, &p);
+ assert( trc==SQLITE_OK );
+ }
+
+ /* This may fail if SQLite value p contains a utf-16 string that must
+ ** be converted to utf-8 and an OOM error occurs while doing so. */
+ rc = sessionSerializeValue(0, p, &nByte);
+ if( rc!=SQLITE_OK ) goto error_out;
+ }
+
+ /* Allocate the change object */
+ pChange = (SessionChange *)sqlite3_malloc64(nByte);
+ if( !pChange ){
+ rc = SQLITE_NOMEM;
+ goto error_out;
+ }else{
+ memset(pChange, 0, sizeof(SessionChange));
+ pChange->aRecord = (u8 *)&pChange[1];
+ }
+
+ /* Populate the change object. None of the preupdate_old(),
+ ** preupdate_new() or SerializeValue() calls below may fail as all
+ ** required values and encodings have already been cached in memory.
+ ** It is not possible for an OOM to occur in this block. */
+ nByte = 0;
+ for(i=0; i<pTab->nCol; i++){
+ sqlite3_value *p = 0;
+ if( op!=SQLITE_INSERT ){
+ pSession->hook.xOld(pSession->hook.pCtx, i, &p);
+ }else if( pTab->abPK[i] ){
+ pSession->hook.xNew(pSession->hook.pCtx, i, &p);
+ }
+ sessionSerializeValue(&pChange->aRecord[nByte], p, &nByte);
+ }
+
+ /* Add the change to the hash-table */
+ if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){
+ pChange->bIndirect = 1;
+ }
+ pChange->nRecord = nByte;
+ pChange->op = op;
+ pChange->pNext = pTab->apChange[iHash];
+ pTab->apChange[iHash] = pChange;
+
+ }else if( pC->bIndirect ){
+ /* If the existing change is considered "indirect", but this current
+ ** change is "direct", mark the change object as direct. */
+ if( pSession->hook.xDepth(pSession->hook.pCtx)==0
+ && pSession->bIndirect==0
+ ){
+ pC->bIndirect = 0;
+ }
+ }
+ }
+
+ /* If an error has occurred, mark the session object as failed. */
+ error_out:
+ if( pTab->bStat1 ){
+ pSession->hook = stat1.hook;
+ }
+ if( rc!=SQLITE_OK ){
+ pSession->rc = rc;
+ }
+}
+
+static int sessionFindTable(
+ sqlite3_session *pSession,
+ const char *zName,
+ SessionTable **ppTab
+){
+ int rc = SQLITE_OK;
+ int nName = sqlite3Strlen30(zName);
+ SessionTable *pRet;
+
+ /* Search for an existing table */
+ for(pRet=pSession->pTable; pRet; pRet=pRet->pNext){
+ if( 0==sqlite3_strnicmp(pRet->zName, zName, nName+1) ) break;
+ }
+
+ if( pRet==0 && pSession->bAutoAttach ){
+ /* If there is a table-filter configured, invoke it. If it returns 0,
+ ** do not automatically add the new table. */
+ if( pSession->xTableFilter==0
+ || pSession->xTableFilter(pSession->pFilterCtx, zName)
+ ){
+ rc = sqlite3session_attach(pSession, zName);
+ if( rc==SQLITE_OK ){
+ for(pRet=pSession->pTable; pRet->pNext; pRet=pRet->pNext);
+ assert( 0==sqlite3_strnicmp(pRet->zName, zName, nName+1) );
+ }
+ }
+ }
+
+ assert( rc==SQLITE_OK || pRet==0 );
+ *ppTab = pRet;
+ return rc;
+}
+
+/*
+** The 'pre-update' hook registered by this module with SQLite databases.
+*/
+static void xPreUpdate(
+ void *pCtx, /* Copy of third arg to preupdate_hook() */
+ sqlite3 *db, /* Database handle */
+ int op, /* SQLITE_UPDATE, DELETE or INSERT */
+ char const *zDb, /* Database name */
+ char const *zName, /* Table name */
+ sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */
+ sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */
+){
+ sqlite3_session *pSession;
+ int nDb = sqlite3Strlen30(zDb);
+
+ assert( sqlite3_mutex_held(db->mutex) );
+
+ for(pSession=(sqlite3_session *)pCtx; pSession; pSession=pSession->pNext){
+ SessionTable *pTab;
+
+ /* If this session is attached to a different database ("main", "temp"
+ ** etc.), or if it is not currently enabled, there is nothing to do. Skip
+ ** to the next session object attached to this database. */
+ if( pSession->bEnable==0 ) continue;
+ if( pSession->rc ) continue;
+ if( sqlite3_strnicmp(zDb, pSession->zDb, nDb+1) ) continue;
+
+ pSession->rc = sessionFindTable(pSession, zName, &pTab);
+ if( pTab ){
+ assert( pSession->rc==SQLITE_OK );
+ sessionPreupdateOneChange(op, pSession, pTab);
+ if( op==SQLITE_UPDATE ){
+ sessionPreupdateOneChange(SQLITE_INSERT, pSession, pTab);
+ }
+ }
+ }
+}
+
+/*
+** The pre-update hook implementations.
+*/
+static int sessionPreupdateOld(void *pCtx, int iVal, sqlite3_value **ppVal){
+ return sqlite3_preupdate_old((sqlite3*)pCtx, iVal, ppVal);
+}
+static int sessionPreupdateNew(void *pCtx, int iVal, sqlite3_value **ppVal){
+ return sqlite3_preupdate_new((sqlite3*)pCtx, iVal, ppVal);
+}
+static int sessionPreupdateCount(void *pCtx){
+ return sqlite3_preupdate_count((sqlite3*)pCtx);
+}
+static int sessionPreupdateDepth(void *pCtx){
+ return sqlite3_preupdate_depth((sqlite3*)pCtx);
+}
+
+/*
+** Install the pre-update hooks on the session object passed as the only
+** argument.
+*/
+static void sessionPreupdateHooks(
+ sqlite3_session *pSession
+){
+ pSession->hook.pCtx = (void*)pSession->db;
+ pSession->hook.xOld = sessionPreupdateOld;
+ pSession->hook.xNew = sessionPreupdateNew;
+ pSession->hook.xCount = sessionPreupdateCount;
+ pSession->hook.xDepth = sessionPreupdateDepth;
+}
+
+typedef struct SessionDiffCtx SessionDiffCtx;
+struct SessionDiffCtx {
+ sqlite3_stmt *pStmt;
+ int nOldOff;
+};
+
+/*
+** The diff hook implementations.
+*/
+static int sessionDiffOld(void *pCtx, int iVal, sqlite3_value **ppVal){
+ SessionDiffCtx *p = (SessionDiffCtx*)pCtx;
+ *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff);
+ return SQLITE_OK;
+}
+static int sessionDiffNew(void *pCtx, int iVal, sqlite3_value **ppVal){
+ SessionDiffCtx *p = (SessionDiffCtx*)pCtx;
+ *ppVal = sqlite3_column_value(p->pStmt, iVal);
+ return SQLITE_OK;
+}
+static int sessionDiffCount(void *pCtx){
+ SessionDiffCtx *p = (SessionDiffCtx*)pCtx;
+ return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt);
+}
+static int sessionDiffDepth(void *pCtx){
+ return 0;
+}
+
+/*
+** Install the diff hooks on the session object passed as the only
+** argument.
+*/
+static void sessionDiffHooks(
+ sqlite3_session *pSession,
+ SessionDiffCtx *pDiffCtx
+){
+ pSession->hook.pCtx = (void*)pDiffCtx;
+ pSession->hook.xOld = sessionDiffOld;
+ pSession->hook.xNew = sessionDiffNew;
+ pSession->hook.xCount = sessionDiffCount;
+ pSession->hook.xDepth = sessionDiffDepth;
+}
+
+static char *sessionExprComparePK(
+ int nCol,
+ const char *zDb1, const char *zDb2,
+ const char *zTab,
+ const char **azCol, u8 *abPK
+){
+ int i;
+ const char *zSep = "";
+ char *zRet = 0;
+
+ for(i=0; i<nCol; i++){
+ if( abPK[i] ){
+ zRet = sqlite3_mprintf("%z%s\"%w\".\"%w\".\"%w\"=\"%w\".\"%w\".\"%w\"",
+ zRet, zSep, zDb1, zTab, azCol[i], zDb2, zTab, azCol[i]
+ );
+ zSep = " AND ";
+ if( zRet==0 ) break;
+ }
+ }
+
+ return zRet;
+}
+
+static char *sessionExprCompareOther(
+ int nCol,
+ const char *zDb1, const char *zDb2,
+ const char *zTab,
+ const char **azCol, u8 *abPK
+){
+ int i;
+ const char *zSep = "";
+ char *zRet = 0;
+ int bHave = 0;
+
+ for(i=0; i<nCol; i++){
+ if( abPK[i]==0 ){
+ bHave = 1;
+ zRet = sqlite3_mprintf(
+ "%z%s\"%w\".\"%w\".\"%w\" IS NOT \"%w\".\"%w\".\"%w\"",
+ zRet, zSep, zDb1, zTab, azCol[i], zDb2, zTab, azCol[i]
+ );
+ zSep = " OR ";
+ if( zRet==0 ) break;
+ }
+ }
+
+ if( bHave==0 ){
+ assert( zRet==0 );
+ zRet = sqlite3_mprintf("0");
+ }
+
+ return zRet;
+}
+
+static char *sessionSelectFindNew(
+ int nCol,
+ const char *zDb1, /* Pick rows in this db only */
+ const char *zDb2, /* But not in this one */
+ const char *zTbl, /* Table name */
+ const char *zExpr
+){
+ char *zRet = sqlite3_mprintf(
+ "SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS ("
+ " SELECT 1 FROM \"%w\".\"%w\" WHERE %s"
+ ")",
+ zDb1, zTbl, zDb2, zTbl, zExpr
+ );
+ return zRet;
+}
+
+static int sessionDiffFindNew(
+ int op,
+ sqlite3_session *pSession,
+ SessionTable *pTab,
+ const char *zDb1,
+ const char *zDb2,
+ char *zExpr
+){
+ int rc = SQLITE_OK;
+ char *zStmt = sessionSelectFindNew(pTab->nCol, zDb1, zDb2, pTab->zName,zExpr);
+
+ if( zStmt==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_stmt *pStmt;
+ rc = sqlite3_prepare(pSession->db, zStmt, -1, &pStmt, 0);
+ if( rc==SQLITE_OK ){
+ SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx;
+ pDiffCtx->pStmt = pStmt;
+ pDiffCtx->nOldOff = 0;
+ while( SQLITE_ROW==sqlite3_step(pStmt) ){
+ sessionPreupdateOneChange(op, pSession, pTab);
+ }
+ rc = sqlite3_finalize(pStmt);
+ }
+ sqlite3_free(zStmt);
+ }
+
+ return rc;
+}
+
+static int sessionDiffFindModified(
+ sqlite3_session *pSession,
+ SessionTable *pTab,
+ const char *zFrom,
+ const char *zExpr
+){
+ int rc = SQLITE_OK;
+
+ char *zExpr2 = sessionExprCompareOther(pTab->nCol,
+ pSession->zDb, zFrom, pTab->zName, pTab->azCol, pTab->abPK
+ );
+ if( zExpr2==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ char *zStmt = sqlite3_mprintf(
+ "SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)",
+ pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2
+ );
+ if( zStmt==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_stmt *pStmt;
+ rc = sqlite3_prepare(pSession->db, zStmt, -1, &pStmt, 0);
+
+ if( rc==SQLITE_OK ){
+ SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx;
+ pDiffCtx->pStmt = pStmt;
+ pDiffCtx->nOldOff = pTab->nCol;
+ while( SQLITE_ROW==sqlite3_step(pStmt) ){
+ sessionPreupdateOneChange(SQLITE_UPDATE, pSession, pTab);
+ }
+ rc = sqlite3_finalize(pStmt);
+ }
+ sqlite3_free(zStmt);
+ }
+ }
+
+ return rc;
+}
+
+SQLITE_API int sqlite3session_diff(
+ sqlite3_session *pSession,
+ const char *zFrom,
+ const char *zTbl,
+ char **pzErrMsg
+){
+ const char *zDb = pSession->zDb;
+ int rc = pSession->rc;
+ SessionDiffCtx d;
+
+ memset(&d, 0, sizeof(d));
+ sessionDiffHooks(pSession, &d);
+
+ sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
+ if( pzErrMsg ) *pzErrMsg = 0;
+ if( rc==SQLITE_OK ){
+ char *zExpr = 0;
+ sqlite3 *db = pSession->db;
+ SessionTable *pTo; /* Table zTbl */
+
+ /* Locate and if necessary initialize the target table object */
+ rc = sessionFindTable(pSession, zTbl, &pTo);
+ if( pTo==0 ) goto diff_out;
+ if( sessionInitTable(pSession, pTo) ){
+ rc = pSession->rc;
+ goto diff_out;
+ }
+
+ /* Check the table schemas match */
+ if( rc==SQLITE_OK ){
+ int bHasPk = 0;
+ int bMismatch = 0;
+ int nCol; /* Columns in zFrom.zTbl */
+ u8 *abPK;
+ const char **azCol = 0;
+ rc = sessionTableInfo(db, zFrom, zTbl, &nCol, 0, &azCol, &abPK);
+ if( rc==SQLITE_OK ){
+ if( pTo->nCol!=nCol ){
+ bMismatch = 1;
+ }else{
+ int i;
+ for(i=0; i<nCol; i++){
+ if( pTo->abPK[i]!=abPK[i] ) bMismatch = 1;
+ if( sqlite3_stricmp(azCol[i], pTo->azCol[i]) ) bMismatch = 1;
+ if( abPK[i] ) bHasPk = 1;
+ }
+ }
+ }
+ sqlite3_free((char*)azCol);
+ if( bMismatch ){
+ if( pzErrMsg ){
+ *pzErrMsg = sqlite3_mprintf("table schemas do not match");
+ }
+ rc = SQLITE_SCHEMA;
+ }
+ if( bHasPk==0 ){
+ /* Ignore tables with no primary keys */
+ goto diff_out;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ zExpr = sessionExprComparePK(pTo->nCol,
+ zDb, zFrom, pTo->zName, pTo->azCol, pTo->abPK
+ );
+ }
+
+ /* Find new rows */
+ if( rc==SQLITE_OK ){
+ rc = sessionDiffFindNew(SQLITE_INSERT, pSession, pTo, zDb, zFrom, zExpr);
+ }
+
+ /* Find old rows */
+ if( rc==SQLITE_OK ){
+ rc = sessionDiffFindNew(SQLITE_DELETE, pSession, pTo, zFrom, zDb, zExpr);
+ }
+
+ /* Find modified rows */
+ if( rc==SQLITE_OK ){
+ rc = sessionDiffFindModified(pSession, pTo, zFrom, zExpr);
+ }
+
+ sqlite3_free(zExpr);
+ }
+
+ diff_out:
+ sessionPreupdateHooks(pSession);
+ sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db));
+ return rc;
+}
+
+/*
+** Create a session object. This session object will record changes to
+** database zDb attached to connection db.
+*/
+SQLITE_API int sqlite3session_create(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of db (e.g. "main") */
+ sqlite3_session **ppSession /* OUT: New session object */
+){
+ sqlite3_session *pNew; /* Newly allocated session object */
+ sqlite3_session *pOld; /* Session object already attached to db */
+ int nDb = sqlite3Strlen30(zDb); /* Length of zDb in bytes */
+
+ /* Zero the output value in case an error occurs. */
+ *ppSession = 0;
+
+ /* Allocate and populate the new session object. */
+ pNew = (sqlite3_session *)sqlite3_malloc64(sizeof(sqlite3_session) + nDb + 1);
+ if( !pNew ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(sqlite3_session));
+ pNew->db = db;
+ pNew->zDb = (char *)&pNew[1];
+ pNew->bEnable = 1;
+ memcpy(pNew->zDb, zDb, nDb+1);
+ sessionPreupdateHooks(pNew);
+
+ /* Add the new session object to the linked list of session objects
+ ** attached to database handle $db. Do this under the cover of the db
+ ** handle mutex. */
+ sqlite3_mutex_enter(sqlite3_db_mutex(db));
+ pOld = (sqlite3_session*)sqlite3_preupdate_hook(db, xPreUpdate, (void*)pNew);
+ pNew->pNext = pOld;
+ sqlite3_mutex_leave(sqlite3_db_mutex(db));
+
+ *ppSession = pNew;
+ return SQLITE_OK;
+}
+
+/*
+** Free the list of table objects passed as the first argument. The contents
+** of the changed-rows hash tables are also deleted.
+*/
+static void sessionDeleteTable(SessionTable *pList){
+ SessionTable *pNext;
+ SessionTable *pTab;
+
+ for(pTab=pList; pTab; pTab=pNext){
+ int i;
+ pNext = pTab->pNext;
+ for(i=0; i<pTab->nChange; i++){
+ SessionChange *p;
+ SessionChange *pNextChange;
+ for(p=pTab->apChange[i]; p; p=pNextChange){
+ pNextChange = p->pNext;
+ sqlite3_free(p);
+ }
+ }
+ sqlite3_free((char*)pTab->azCol); /* cast works around VC++ bug */
+ sqlite3_free(pTab->apChange);
+ sqlite3_free(pTab);
+ }
+}
+
+/*
+** Delete a session object previously allocated using sqlite3session_create().
+*/
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){
+ sqlite3 *db = pSession->db;
+ sqlite3_session *pHead;
+ sqlite3_session **pp;
+
+ /* Unlink the session from the linked list of sessions attached to the
+ ** database handle. Hold the db mutex while doing so. */
+ sqlite3_mutex_enter(sqlite3_db_mutex(db));
+ pHead = (sqlite3_session*)sqlite3_preupdate_hook(db, 0, 0);
+ for(pp=&pHead; ALWAYS((*pp)!=0); pp=&((*pp)->pNext)){
+ if( (*pp)==pSession ){
+ *pp = (*pp)->pNext;
+ if( pHead ) sqlite3_preupdate_hook(db, xPreUpdate, (void*)pHead);
+ break;
+ }
+ }
+ sqlite3_mutex_leave(sqlite3_db_mutex(db));
+ sqlite3ValueFree(pSession->pZeroBlob);
+
+ /* Delete all attached table objects. And the contents of their
+ ** associated hash-tables. */
+ sessionDeleteTable(pSession->pTable);
+
+ /* Free the session object itself. */
+ sqlite3_free(pSession);
+}
+
+/*
+** Set a table filter on a Session Object.
+*/
+SQLITE_API void sqlite3session_table_filter(
+ sqlite3_session *pSession,
+ int(*xFilter)(void*, const char*),
+ void *pCtx /* First argument passed to xFilter */
+){
+ pSession->bAutoAttach = 1;
+ pSession->pFilterCtx = pCtx;
+ pSession->xTableFilter = xFilter;
+}
+
+/*
+** Attach a table to a session. All subsequent changes made to the table
+** while the session object is enabled will be recorded.
+**
+** Only tables that have a PRIMARY KEY defined may be attached. It does
+** not matter if the PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias)
+** or not.
+*/
+SQLITE_API int sqlite3session_attach(
+ sqlite3_session *pSession, /* Session object */
+ const char *zName /* Table name */
+){
+ int rc = SQLITE_OK;
+ sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
+
+ if( !zName ){
+ pSession->bAutoAttach = 1;
+ }else{
+ SessionTable *pTab; /* New table object (if required) */
+ int nName; /* Number of bytes in string zName */
+
+ /* First search for an existing entry. If one is found, this call is
+ ** a no-op. Return early. */
+ nName = sqlite3Strlen30(zName);
+ for(pTab=pSession->pTable; pTab; pTab=pTab->pNext){
+ if( 0==sqlite3_strnicmp(pTab->zName, zName, nName+1) ) break;
+ }
+
+ if( !pTab ){
+ /* Allocate new SessionTable object. */
+ pTab = (SessionTable *)sqlite3_malloc64(sizeof(SessionTable) + nName + 1);
+ if( !pTab ){
+ rc = SQLITE_NOMEM;
+ }else{
+ /* Populate the new SessionTable object and link it into the list.
+ ** The new object must be linked onto the end of the list, not
+ ** simply added to the start of it in order to ensure that tables
+ ** appear in the correct order when a changeset or patchset is
+ ** eventually generated. */
+ SessionTable **ppTab;
+ memset(pTab, 0, sizeof(SessionTable));
+ pTab->zName = (char *)&pTab[1];
+ memcpy(pTab->zName, zName, nName+1);
+ for(ppTab=&pSession->pTable; *ppTab; ppTab=&(*ppTab)->pNext);
+ *ppTab = pTab;
+ }
+ }
+ }
+
+ sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db));
+ return rc;
+}
+
+/*
+** Ensure that there is room in the buffer to append nByte bytes of data.
+** If not, use sqlite3_realloc() to grow the buffer so that there is.
+**
+** If successful, return zero. Otherwise, if an OOM condition is encountered,
+** set *pRc to SQLITE_NOMEM and return non-zero.
+*/
+static int sessionBufferGrow(SessionBuffer *p, size_t nByte, int *pRc){
+ if( *pRc==SQLITE_OK && (size_t)(p->nAlloc-p->nBuf)<nByte ){
+ u8 *aNew;
+ i64 nNew = p->nAlloc ? p->nAlloc : 128;
+ do {
+ nNew = nNew*2;
+ }while( (size_t)(nNew-p->nBuf)<nByte );
+
+ aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew);
+ if( 0==aNew ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ p->aBuf = aNew;
+ p->nAlloc = nNew;
+ }
+ }
+ return (*pRc!=SQLITE_OK);
+}
+
+/*
+** Append the value passed as the second argument to the buffer passed
+** as the first.
+**
+** This function is a no-op if *pRc is non-zero when it is called.
+** Otherwise, if an error occurs, *pRc is set to an SQLite error code
+** before returning.
+*/
+static void sessionAppendValue(SessionBuffer *p, sqlite3_value *pVal, int *pRc){
+ int rc = *pRc;
+ if( rc==SQLITE_OK ){
+ sqlite3_int64 nByte = 0;
+ rc = sessionSerializeValue(0, pVal, &nByte);
+ sessionBufferGrow(p, nByte, &rc);
+ if( rc==SQLITE_OK ){
+ rc = sessionSerializeValue(&p->aBuf[p->nBuf], pVal, 0);
+ p->nBuf += nByte;
+ }else{
+ *pRc = rc;
+ }
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a single byte to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendByte(SessionBuffer *p, u8 v, int *pRc){
+ if( 0==sessionBufferGrow(p, 1, pRc) ){
+ p->aBuf[p->nBuf++] = v;
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a single varint to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendVarint(SessionBuffer *p, int v, int *pRc){
+ if( 0==sessionBufferGrow(p, 9, pRc) ){
+ p->nBuf += sessionVarintPut(&p->aBuf[p->nBuf], v);
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a blob of data to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendBlob(
+ SessionBuffer *p,
+ const u8 *aBlob,
+ int nBlob,
+ int *pRc
+){
+ if( nBlob>0 && 0==sessionBufferGrow(p, nBlob, pRc) ){
+ memcpy(&p->aBuf[p->nBuf], aBlob, nBlob);
+ p->nBuf += nBlob;
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a string to the buffer. All bytes in the string
+** up to (but not including) the nul-terminator are written to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendStr(
+ SessionBuffer *p,
+ const char *zStr,
+ int *pRc
+){
+ int nStr = sqlite3Strlen30(zStr);
+ if( 0==sessionBufferGrow(p, nStr, pRc) ){
+ memcpy(&p->aBuf[p->nBuf], zStr, nStr);
+ p->nBuf += nStr;
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append the string representation of integer iVal
+** to the buffer. No nul-terminator is written.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendInteger(
+ SessionBuffer *p, /* Buffer to append to */
+ int iVal, /* Value to write the string rep. of */
+ int *pRc /* IN/OUT: Error code */
+){
+ char aBuf[24];
+ sqlite3_snprintf(sizeof(aBuf)-1, aBuf, "%d", iVal);
+ sessionAppendStr(p, aBuf, pRc);
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append the string zStr enclosed in quotes (") and
+** with any embedded quote characters escaped to the buffer. No
+** nul-terminator byte is written.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendIdent(
+ SessionBuffer *p, /* Buffer to a append to */
+ const char *zStr, /* String to quote, escape and append */
+ int *pRc /* IN/OUT: Error code */
+){
+ int nStr = sqlite3Strlen30(zStr)*2 + 2 + 1;
+ if( 0==sessionBufferGrow(p, nStr, pRc) ){
+ char *zOut = (char *)&p->aBuf[p->nBuf];
+ const char *zIn = zStr;
+ *zOut++ = '"';
+ while( *zIn ){
+ if( *zIn=='"' ) *zOut++ = '"';
+ *zOut++ = *(zIn++);
+ }
+ *zOut++ = '"';
+ p->nBuf = (int)((u8 *)zOut - p->aBuf);
+ }
+}
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwse, it appends the serialized version of the value stored
+** in column iCol of the row that SQL statement pStmt currently points
+** to to the buffer.
+*/
+static void sessionAppendCol(
+ SessionBuffer *p, /* Buffer to append to */
+ sqlite3_stmt *pStmt, /* Handle pointing to row containing value */
+ int iCol, /* Column to read value from */
+ int *pRc /* IN/OUT: Error code */
+){
+ if( *pRc==SQLITE_OK ){
+ int eType = sqlite3_column_type(pStmt, iCol);
+ sessionAppendByte(p, (u8)eType, pRc);
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ sqlite3_int64 i;
+ u8 aBuf[8];
+ if( eType==SQLITE_INTEGER ){
+ i = sqlite3_column_int64(pStmt, iCol);
+ }else{
+ double r = sqlite3_column_double(pStmt, iCol);
+ memcpy(&i, &r, 8);
+ }
+ sessionPutI64(aBuf, i);
+ sessionAppendBlob(p, aBuf, 8, pRc);
+ }
+ if( eType==SQLITE_BLOB || eType==SQLITE_TEXT ){
+ u8 *z;
+ int nByte;
+ if( eType==SQLITE_BLOB ){
+ z = (u8 *)sqlite3_column_blob(pStmt, iCol);
+ }else{
+ z = (u8 *)sqlite3_column_text(pStmt, iCol);
+ }
+ nByte = sqlite3_column_bytes(pStmt, iCol);
+ if( z || (eType==SQLITE_BLOB && nByte==0) ){
+ sessionAppendVarint(p, nByte, pRc);
+ sessionAppendBlob(p, z, nByte, pRc);
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+ }
+}
+
+/*
+**
+** This function appends an update change to the buffer (see the comments
+** under "CHANGESET FORMAT" at the top of the file). An update change
+** consists of:
+**
+** 1 byte: SQLITE_UPDATE (0x17)
+** n bytes: old.* record (see RECORD FORMAT)
+** m bytes: new.* record (see RECORD FORMAT)
+**
+** The SessionChange object passed as the third argument contains the
+** values that were stored in the row when the session began (the old.*
+** values). The statement handle passed as the second argument points
+** at the current version of the row (the new.* values).
+**
+** If all of the old.* values are equal to their corresponding new.* value
+** (i.e. nothing has changed), then no data at all is appended to the buffer.
+**
+** Otherwise, the old.* record contains all primary key values and the
+** original values of any fields that have been modified. The new.* record
+** contains the new values of only those fields that have been modified.
+*/
+static int sessionAppendUpdate(
+ SessionBuffer *pBuf, /* Buffer to append to */
+ int bPatchset, /* True for "patchset", 0 for "changeset" */
+ sqlite3_stmt *pStmt, /* Statement handle pointing at new row */
+ SessionChange *p, /* Object containing old values */
+ u8 *abPK /* Boolean array - true for PK columns */
+){
+ int rc = SQLITE_OK;
+ SessionBuffer buf2 = {0,0,0}; /* Buffer to accumulate new.* record in */
+ int bNoop = 1; /* Set to zero if any values are modified */
+ int nRewind = pBuf->nBuf; /* Set to zero if any values are modified */
+ int i; /* Used to iterate through columns */
+ u8 *pCsr = p->aRecord; /* Used to iterate through old.* values */
+
+ sessionAppendByte(pBuf, SQLITE_UPDATE, &rc);
+ sessionAppendByte(pBuf, p->bIndirect, &rc);
+ for(i=0; i<sqlite3_column_count(pStmt); i++){
+ int bChanged = 0;
+ int nAdvance;
+ int eType = *pCsr;
+ switch( eType ){
+ case SQLITE_NULL:
+ nAdvance = 1;
+ if( sqlite3_column_type(pStmt, i)!=SQLITE_NULL ){
+ bChanged = 1;
+ }
+ break;
+
+ case SQLITE_FLOAT:
+ case SQLITE_INTEGER: {
+ nAdvance = 9;
+ if( eType==sqlite3_column_type(pStmt, i) ){
+ sqlite3_int64 iVal = sessionGetI64(&pCsr[1]);
+ if( eType==SQLITE_INTEGER ){
+ if( iVal==sqlite3_column_int64(pStmt, i) ) break;
+ }else{
+ double dVal;
+ memcpy(&dVal, &iVal, 8);
+ if( dVal==sqlite3_column_double(pStmt, i) ) break;
+ }
+ }
+ bChanged = 1;
+ break;
+ }
+
+ default: {
+ int n;
+ int nHdr = 1 + sessionVarintGet(&pCsr[1], &n);
+ assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB );
+ nAdvance = nHdr + n;
+ if( eType==sqlite3_column_type(pStmt, i)
+ && n==sqlite3_column_bytes(pStmt, i)
+ && (n==0 || 0==memcmp(&pCsr[nHdr], sqlite3_column_blob(pStmt, i), n))
+ ){
+ break;
+ }
+ bChanged = 1;
+ }
+ }
+
+ /* If at least one field has been modified, this is not a no-op. */
+ if( bChanged ) bNoop = 0;
+
+ /* Add a field to the old.* record. This is omitted if this modules is
+ ** currently generating a patchset. */
+ if( bPatchset==0 ){
+ if( bChanged || abPK[i] ){
+ sessionAppendBlob(pBuf, pCsr, nAdvance, &rc);
+ }else{
+ sessionAppendByte(pBuf, 0, &rc);
+ }
+ }
+
+ /* Add a field to the new.* record. Or the only record if currently
+ ** generating a patchset. */
+ if( bChanged || (bPatchset && abPK[i]) ){
+ sessionAppendCol(&buf2, pStmt, i, &rc);
+ }else{
+ sessionAppendByte(&buf2, 0, &rc);
+ }
+
+ pCsr += nAdvance;
+ }
+
+ if( bNoop ){
+ pBuf->nBuf = nRewind;
+ }else{
+ sessionAppendBlob(pBuf, buf2.aBuf, buf2.nBuf, &rc);
+ }
+ sqlite3_free(buf2.aBuf);
+
+ return rc;
+}
+
+/*
+** Append a DELETE change to the buffer passed as the first argument. Use
+** the changeset format if argument bPatchset is zero, or the patchset
+** format otherwise.
+*/
+static int sessionAppendDelete(
+ SessionBuffer *pBuf, /* Buffer to append to */
+ int bPatchset, /* True for "patchset", 0 for "changeset" */
+ SessionChange *p, /* Object containing old values */
+ int nCol, /* Number of columns in table */
+ u8 *abPK /* Boolean array - true for PK columns */
+){
+ int rc = SQLITE_OK;
+
+ sessionAppendByte(pBuf, SQLITE_DELETE, &rc);
+ sessionAppendByte(pBuf, p->bIndirect, &rc);
+
+ if( bPatchset==0 ){
+ sessionAppendBlob(pBuf, p->aRecord, p->nRecord, &rc);
+ }else{
+ int i;
+ u8 *a = p->aRecord;
+ for(i=0; i<nCol; i++){
+ u8 *pStart = a;
+ int eType = *a++;
+
+ switch( eType ){
+ case 0:
+ case SQLITE_NULL:
+ assert( abPK[i]==0 );
+ break;
+
+ case SQLITE_FLOAT:
+ case SQLITE_INTEGER:
+ a += 8;
+ break;
+
+ default: {
+ int n;
+ a += sessionVarintGet(a, &n);
+ a += n;
+ break;
+ }
+ }
+ if( abPK[i] ){
+ sessionAppendBlob(pBuf, pStart, (int)(a-pStart), &rc);
+ }
+ }
+ assert( (a - p->aRecord)==p->nRecord );
+ }
+
+ return rc;
+}
+
+/*
+** Formulate and prepare a SELECT statement to retrieve a row from table
+** zTab in database zDb based on its primary key. i.e.
+**
+** SELECT * FROM zDb.zTab WHERE pk1 = ? AND pk2 = ? AND ...
+*/
+static int sessionSelectStmt(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Database name */
+ const char *zTab, /* Table name */
+ int nCol, /* Number of columns in table */
+ const char **azCol, /* Names of table columns */
+ u8 *abPK, /* PRIMARY KEY array */
+ sqlite3_stmt **ppStmt /* OUT: Prepared SELECT statement */
+){
+ int rc = SQLITE_OK;
+ char *zSql = 0;
+ int nSql = -1;
+
+ if( 0==sqlite3_stricmp("sqlite_stat1", zTab) ){
+ zSql = sqlite3_mprintf(
+ "SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND "
+ "idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", zDb
+ );
+ if( zSql==0 ) rc = SQLITE_NOMEM;
+ }else{
+ int i;
+ const char *zSep = "";
+ SessionBuffer buf = {0, 0, 0};
+
+ sessionAppendStr(&buf, "SELECT * FROM ", &rc);
+ sessionAppendIdent(&buf, zDb, &rc);
+ sessionAppendStr(&buf, ".", &rc);
+ sessionAppendIdent(&buf, zTab, &rc);
+ sessionAppendStr(&buf, " WHERE ", &rc);
+ for(i=0; i<nCol; i++){
+ if( abPK[i] ){
+ sessionAppendStr(&buf, zSep, &rc);
+ sessionAppendIdent(&buf, azCol[i], &rc);
+ sessionAppendStr(&buf, " IS ?", &rc);
+ sessionAppendInteger(&buf, i+1, &rc);
+ zSep = " AND ";
+ }
+ }
+ zSql = (char*)buf.aBuf;
+ nSql = buf.nBuf;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, zSql, nSql, ppStmt, 0);
+ }
+ sqlite3_free(zSql);
+ return rc;
+}
+
+/*
+** Bind the PRIMARY KEY values from the change passed in argument pChange
+** to the SELECT statement passed as the first argument. The SELECT statement
+** is as prepared by function sessionSelectStmt().
+**
+** Return SQLITE_OK if all PK values are successfully bound, or an SQLite
+** error code (e.g. SQLITE_NOMEM) otherwise.
+*/
+static int sessionSelectBind(
+ sqlite3_stmt *pSelect, /* SELECT from sessionSelectStmt() */
+ int nCol, /* Number of columns in table */
+ u8 *abPK, /* PRIMARY KEY array */
+ SessionChange *pChange /* Change structure */
+){
+ int i;
+ int rc = SQLITE_OK;
+ u8 *a = pChange->aRecord;
+
+ for(i=0; i<nCol && rc==SQLITE_OK; i++){
+ int eType = *a++;
+
+ switch( eType ){
+ case 0:
+ case SQLITE_NULL:
+ assert( abPK[i]==0 );
+ break;
+
+ case SQLITE_INTEGER: {
+ if( abPK[i] ){
+ i64 iVal = sessionGetI64(a);
+ rc = sqlite3_bind_int64(pSelect, i+1, iVal);
+ }
+ a += 8;
+ break;
+ }
+
+ case SQLITE_FLOAT: {
+ if( abPK[i] ){
+ double rVal;
+ i64 iVal = sessionGetI64(a);
+ memcpy(&rVal, &iVal, 8);
+ rc = sqlite3_bind_double(pSelect, i+1, rVal);
+ }
+ a += 8;
+ break;
+ }
+
+ case SQLITE_TEXT: {
+ int n;
+ a += sessionVarintGet(a, &n);
+ if( abPK[i] ){
+ rc = sqlite3_bind_text(pSelect, i+1, (char *)a, n, SQLITE_TRANSIENT);
+ }
+ a += n;
+ break;
+ }
+
+ default: {
+ int n;
+ assert( eType==SQLITE_BLOB );
+ a += sessionVarintGet(a, &n);
+ if( abPK[i] ){
+ rc = sqlite3_bind_blob(pSelect, i+1, a, n, SQLITE_TRANSIENT);
+ }
+ a += n;
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** This function is a no-op if *pRc is set to other than SQLITE_OK when it
+** is called. Otherwise, append a serialized table header (part of the binary
+** changeset format) to buffer *pBuf. If an error occurs, set *pRc to an
+** SQLite error code before returning.
+*/
+static void sessionAppendTableHdr(
+ SessionBuffer *pBuf, /* Append header to this buffer */
+ int bPatchset, /* Use the patchset format if true */
+ SessionTable *pTab, /* Table object to append header for */
+ int *pRc /* IN/OUT: Error code */
+){
+ /* Write a table header */
+ sessionAppendByte(pBuf, (bPatchset ? 'P' : 'T'), pRc);
+ sessionAppendVarint(pBuf, pTab->nCol, pRc);
+ sessionAppendBlob(pBuf, pTab->abPK, pTab->nCol, pRc);
+ sessionAppendBlob(pBuf, (u8 *)pTab->zName, (int)strlen(pTab->zName)+1, pRc);
+}
+
+/*
+** Generate either a changeset (if argument bPatchset is zero) or a patchset
+** (if it is non-zero) based on the current contents of the session object
+** passed as the first argument.
+**
+** If no error occurs, SQLITE_OK is returned and the new changeset/patchset
+** stored in output variables *pnChangeset and *ppChangeset. Or, if an error
+** occurs, an SQLite error code is returned and both output variables set
+** to 0.
+*/
+static int sessionGenerateChangeset(
+ sqlite3_session *pSession, /* Session object */
+ int bPatchset, /* True for patchset, false for changeset */
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut, /* First argument for xOutput */
+ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
+ void **ppChangeset /* OUT: Buffer containing changeset */
+){
+ sqlite3 *db = pSession->db; /* Source database handle */
+ SessionTable *pTab; /* Used to iterate through attached tables */
+ SessionBuffer buf = {0,0,0}; /* Buffer in which to accumlate changeset */
+ int rc; /* Return code */
+
+ assert( xOutput==0 || (pnChangeset==0 && ppChangeset==0 ) );
+
+ /* Zero the output variables in case an error occurs. If this session
+ ** object is already in the error state (sqlite3_session.rc != SQLITE_OK),
+ ** this call will be a no-op. */
+ if( xOutput==0 ){
+ *pnChangeset = 0;
+ *ppChangeset = 0;
+ }
+
+ if( pSession->rc ) return pSession->rc;
+ rc = sqlite3_exec(pSession->db, "SAVEPOINT changeset", 0, 0, 0);
+ if( rc!=SQLITE_OK ) return rc;
+
+ sqlite3_mutex_enter(sqlite3_db_mutex(db));
+
+ for(pTab=pSession->pTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){
+ if( pTab->nEntry ){
+ const char *zName = pTab->zName;
+ int nCol; /* Number of columns in table */
+ u8 *abPK; /* Primary key array */
+ const char **azCol = 0; /* Table columns */
+ int i; /* Used to iterate through hash buckets */
+ sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */
+ int nRewind = buf.nBuf; /* Initial size of write buffer */
+ int nNoop; /* Size of buffer after writing tbl header */
+
+ /* Check the table schema is still Ok. */
+ rc = sessionTableInfo(db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK);
+ if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){
+ rc = SQLITE_SCHEMA;
+ }
+
+ /* Write a table header */
+ sessionAppendTableHdr(&buf, bPatchset, pTab, &rc);
+
+ /* Build and compile a statement to execute: */
+ if( rc==SQLITE_OK ){
+ rc = sessionSelectStmt(
+ db, pSession->zDb, zName, nCol, azCol, abPK, &pSel);
+ }
+
+ nNoop = buf.nBuf;
+ for(i=0; i<pTab->nChange && rc==SQLITE_OK; i++){
+ SessionChange *p; /* Used to iterate through changes */
+
+ for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){
+ rc = sessionSelectBind(pSel, nCol, abPK, p);
+ if( rc!=SQLITE_OK ) continue;
+ if( sqlite3_step(pSel)==SQLITE_ROW ){
+ if( p->op==SQLITE_INSERT ){
+ int iCol;
+ sessionAppendByte(&buf, SQLITE_INSERT, &rc);
+ sessionAppendByte(&buf, p->bIndirect, &rc);
+ for(iCol=0; iCol<nCol; iCol++){
+ sessionAppendCol(&buf, pSel, iCol, &rc);
+ }
+ }else{
+ rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK);
+ }
+ }else if( p->op!=SQLITE_INSERT ){
+ rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_reset(pSel);
+ }
+
+ /* If the buffer is now larger than sessions_strm_chunk_size, pass
+ ** its contents to the xOutput() callback. */
+ if( xOutput
+ && rc==SQLITE_OK
+ && buf.nBuf>nNoop
+ && buf.nBuf>sessions_strm_chunk_size
+ ){
+ rc = xOutput(pOut, (void*)buf.aBuf, buf.nBuf);
+ nNoop = -1;
+ buf.nBuf = 0;
+ }
+
+ }
+ }
+
+ sqlite3_finalize(pSel);
+ if( buf.nBuf==nNoop ){
+ buf.nBuf = nRewind;
+ }
+ sqlite3_free((char*)azCol); /* cast works around VC++ bug */
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ if( xOutput==0 ){
+ *pnChangeset = buf.nBuf;
+ *ppChangeset = buf.aBuf;
+ buf.aBuf = 0;
+ }else if( buf.nBuf>0 ){
+ rc = xOutput(pOut, (void*)buf.aBuf, buf.nBuf);
+ }
+ }
+
+ sqlite3_free(buf.aBuf);
+ sqlite3_exec(db, "RELEASE changeset", 0, 0, 0);
+ sqlite3_mutex_leave(sqlite3_db_mutex(db));
+ return rc;
+}
+
+/*
+** Obtain a changeset object containing all changes recorded by the
+** session object passed as the first argument.
+**
+** It is the responsibility of the caller to eventually free the buffer
+** using sqlite3_free().
+*/
+SQLITE_API int sqlite3session_changeset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
+ void **ppChangeset /* OUT: Buffer containing changeset */
+){
+ return sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset);
+}
+
+/*
+** Streaming version of sqlite3session_changeset().
+*/
+SQLITE_API int sqlite3session_changeset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ return sessionGenerateChangeset(pSession, 0, xOutput, pOut, 0, 0);
+}
+
+/*
+** Streaming version of sqlite3session_patchset().
+*/
+SQLITE_API int sqlite3session_patchset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ return sessionGenerateChangeset(pSession, 1, xOutput, pOut, 0, 0);
+}
+
+/*
+** Obtain a patchset object containing all changes recorded by the
+** session object passed as the first argument.
+**
+** It is the responsibility of the caller to eventually free the buffer
+** using sqlite3_free().
+*/
+SQLITE_API int sqlite3session_patchset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */
+ void **ppPatchset /* OUT: Buffer containing changeset */
+){
+ return sessionGenerateChangeset(pSession, 1, 0, 0, pnPatchset, ppPatchset);
+}
+
+/*
+** Enable or disable the session object passed as the first argument.
+*/
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable){
+ int ret;
+ sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
+ if( bEnable>=0 ){
+ pSession->bEnable = bEnable;
+ }
+ ret = pSession->bEnable;
+ sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db));
+ return ret;
+}
+
+/*
+** Enable or disable the session object passed as the first argument.
+*/
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect){
+ int ret;
+ sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
+ if( bIndirect>=0 ){
+ pSession->bIndirect = bIndirect;
+ }
+ ret = pSession->bIndirect;
+ sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db));
+ return ret;
+}
+
+/*
+** Return true if there have been no changes to monitored tables recorded
+** by the session object passed as the only argument.
+*/
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession){
+ int ret = 0;
+ SessionTable *pTab;
+
+ sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
+ for(pTab=pSession->pTable; pTab && ret==0; pTab=pTab->pNext){
+ ret = (pTab->nEntry>0);
+ }
+ sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db));
+
+ return (ret==0);
+}
+
+/*
+** Do the work for either sqlite3changeset_start() or start_strm().
+*/
+static int sessionChangesetStart(
+ sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int nChangeset, /* Size of buffer pChangeset in bytes */
+ void *pChangeset, /* Pointer to buffer containing changeset */
+ int bInvert /* True to invert changeset */
+){
+ sqlite3_changeset_iter *pRet; /* Iterator to return */
+ int nByte; /* Number of bytes to allocate for iterator */
+
+ assert( xInput==0 || (pChangeset==0 && nChangeset==0) );
+
+ /* Zero the output variable in case an error occurs. */
+ *pp = 0;
+
+ /* Allocate and initialize the iterator structure. */
+ nByte = sizeof(sqlite3_changeset_iter);
+ pRet = (sqlite3_changeset_iter *)sqlite3_malloc(nByte);
+ if( !pRet ) return SQLITE_NOMEM;
+ memset(pRet, 0, sizeof(sqlite3_changeset_iter));
+ pRet->in.aData = (u8 *)pChangeset;
+ pRet->in.nData = nChangeset;
+ pRet->in.xInput = xInput;
+ pRet->in.pIn = pIn;
+ pRet->in.bEof = (xInput ? 0 : 1);
+ pRet->bInvert = bInvert;
+
+ /* Populate the output variable and return success. */
+ *pp = pRet;
+ return SQLITE_OK;
+}
+
+/*
+** Create an iterator used to iterate through the contents of a changeset.
+*/
+SQLITE_API int sqlite3changeset_start(
+ sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
+ int nChangeset, /* Size of buffer pChangeset in bytes */
+ void *pChangeset /* Pointer to buffer containing changeset */
+){
+ return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, 0);
+}
+SQLITE_API int sqlite3changeset_start_v2(
+ sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
+ int nChangeset, /* Size of buffer pChangeset in bytes */
+ void *pChangeset, /* Pointer to buffer containing changeset */
+ int flags
+){
+ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT);
+ return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, bInvert);
+}
+
+/*
+** Streaming version of sqlite3changeset_start().
+*/
+SQLITE_API int sqlite3changeset_start_strm(
+ sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+){
+ return sessionChangesetStart(pp, xInput, pIn, 0, 0, 0);
+}
+SQLITE_API int sqlite3changeset_start_v2_strm(
+ sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int flags
+){
+ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT);
+ return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert);
+}
+
+/*
+** If the SessionInput object passed as the only argument is a streaming
+** object and the buffer is full, discard some data to free up space.
+*/
+static void sessionDiscardData(SessionInput *pIn){
+ if( pIn->xInput && pIn->iNext>=sessions_strm_chunk_size ){
+ int nMove = pIn->buf.nBuf - pIn->iNext;
+ assert( nMove>=0 );
+ if( nMove>0 ){
+ memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iNext], nMove);
+ }
+ pIn->buf.nBuf -= pIn->iNext;
+ pIn->iNext = 0;
+ pIn->nData = pIn->buf.nBuf;
+ }
+}
+
+/*
+** Ensure that there are at least nByte bytes available in the buffer. Or,
+** if there are not nByte bytes remaining in the input, that all available
+** data is in the buffer.
+**
+** Return an SQLite error code if an error occurs, or SQLITE_OK otherwise.
+*/
+static int sessionInputBuffer(SessionInput *pIn, int nByte){
+ int rc = SQLITE_OK;
+ if( pIn->xInput ){
+ while( !pIn->bEof && (pIn->iNext+nByte)>=pIn->nData && rc==SQLITE_OK ){
+ int nNew = sessions_strm_chunk_size;
+
+ if( pIn->bNoDiscard==0 ) sessionDiscardData(pIn);
+ if( SQLITE_OK==sessionBufferGrow(&pIn->buf, nNew, &rc) ){
+ rc = pIn->xInput(pIn->pIn, &pIn->buf.aBuf[pIn->buf.nBuf], &nNew);
+ if( nNew==0 ){
+ pIn->bEof = 1;
+ }else{
+ pIn->buf.nBuf += nNew;
+ }
+ }
+
+ pIn->aData = pIn->buf.aBuf;
+ pIn->nData = pIn->buf.nBuf;
+ }
+ }
+ return rc;
+}
+
+/*
+** When this function is called, *ppRec points to the start of a record
+** that contains nCol values. This function advances the pointer *ppRec
+** until it points to the byte immediately following that record.
+*/
+static void sessionSkipRecord(
+ u8 **ppRec, /* IN/OUT: Record pointer */
+ int nCol /* Number of values in record */
+){
+ u8 *aRec = *ppRec;
+ int i;
+ for(i=0; i<nCol; i++){
+ int eType = *aRec++;
+ if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){
+ int nByte;
+ aRec += sessionVarintGet((u8*)aRec, &nByte);
+ aRec += nByte;
+ }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ aRec += 8;
+ }
+ }
+
+ *ppRec = aRec;
+}
+
+/*
+** This function sets the value of the sqlite3_value object passed as the
+** first argument to a copy of the string or blob held in the aData[]
+** buffer. SQLITE_OK is returned if successful, or SQLITE_NOMEM if an OOM
+** error occurs.
+*/
+static int sessionValueSetStr(
+ sqlite3_value *pVal, /* Set the value of this object */
+ u8 *aData, /* Buffer containing string or blob data */
+ int nData, /* Size of buffer aData[] in bytes */
+ u8 enc /* String encoding (0 for blobs) */
+){
+ /* In theory this code could just pass SQLITE_TRANSIENT as the final
+ ** argument to sqlite3ValueSetStr() and have the copy created
+ ** automatically. But doing so makes it difficult to detect any OOM
+ ** error. Hence the code to create the copy externally. */
+ u8 *aCopy = sqlite3_malloc64((sqlite3_int64)nData+1);
+ if( aCopy==0 ) return SQLITE_NOMEM;
+ memcpy(aCopy, aData, nData);
+ sqlite3ValueSetStr(pVal, nData, (char*)aCopy, enc, sqlite3_free);
+ return SQLITE_OK;
+}
+
+/*
+** Deserialize a single record from a buffer in memory. See "RECORD FORMAT"
+** for details.
+**
+** When this function is called, *paChange points to the start of the record
+** to deserialize. Assuming no error occurs, *paChange is set to point to
+** one byte after the end of the same record before this function returns.
+** If the argument abPK is NULL, then the record contains nCol values. Or,
+** if abPK is other than NULL, then the record contains only the PK fields
+** (in other words, it is a patchset DELETE record).
+**
+** If successful, each element of the apOut[] array (allocated by the caller)
+** is set to point to an sqlite3_value object containing the value read
+** from the corresponding position in the record. If that value is not
+** included in the record (i.e. because the record is part of an UPDATE change
+** and the field was not modified), the corresponding element of apOut[] is
+** set to NULL.
+**
+** It is the responsibility of the caller to free all sqlite_value structures
+** using sqlite3_free().
+**
+** If an error occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
+** The apOut[] array may have been partially populated in this case.
+*/
+static int sessionReadRecord(
+ SessionInput *pIn, /* Input data */
+ int nCol, /* Number of values in record */
+ u8 *abPK, /* Array of primary key flags, or NULL */
+ sqlite3_value **apOut /* Write values to this array */
+){
+ int i; /* Used to iterate through columns */
+ int rc = SQLITE_OK;
+
+ for(i=0; i<nCol && rc==SQLITE_OK; i++){
+ int eType = 0; /* Type of value (SQLITE_NULL, TEXT etc.) */
+ if( abPK && abPK[i]==0 ) continue;
+ rc = sessionInputBuffer(pIn, 9);
+ if( rc==SQLITE_OK ){
+ if( pIn->iNext>=pIn->nData ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ eType = pIn->aData[pIn->iNext++];
+ assert( apOut[i]==0 );
+ if( eType ){
+ apOut[i] = sqlite3ValueNew(0);
+ if( !apOut[i] ) rc = SQLITE_NOMEM;
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ u8 *aVal = &pIn->aData[pIn->iNext];
+ if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){
+ int nByte;
+ pIn->iNext += sessionVarintGet(aVal, &nByte);
+ rc = sessionInputBuffer(pIn, nByte);
+ if( rc==SQLITE_OK ){
+ if( nByte<0 || nByte>pIn->nData-pIn->iNext ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ u8 enc = (eType==SQLITE_TEXT ? SQLITE_UTF8 : 0);
+ rc = sessionValueSetStr(apOut[i],&pIn->aData[pIn->iNext],nByte,enc);
+ pIn->iNext += nByte;
+ }
+ }
+ }
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ sqlite3_int64 v = sessionGetI64(aVal);
+ if( eType==SQLITE_INTEGER ){
+ sqlite3VdbeMemSetInt64(apOut[i], v);
+ }else{
+ double d;
+ memcpy(&d, &v, 8);
+ sqlite3VdbeMemSetDouble(apOut[i], d);
+ }
+ pIn->iNext += 8;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** The input pointer currently points to the second byte of a table-header.
+** Specifically, to the following:
+**
+** + number of columns in table (varint)
+** + array of PK flags (1 byte per column),
+** + table name (nul terminated).
+**
+** This function ensures that all of the above is present in the input
+** buffer (i.e. that it can be accessed without any calls to xInput()).
+** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code.
+** The input pointer is not moved.
+*/
+static int sessionChangesetBufferTblhdr(SessionInput *pIn, int *pnByte){
+ int rc = SQLITE_OK;
+ int nCol = 0;
+ int nRead = 0;
+
+ rc = sessionInputBuffer(pIn, 9);
+ if( rc==SQLITE_OK ){
+ nRead += sessionVarintGet(&pIn->aData[pIn->iNext + nRead], &nCol);
+ /* The hard upper limit for the number of columns in an SQLite
+ ** database table is, according to sqliteLimit.h, 32676. So
+ ** consider any table-header that purports to have more than 65536
+ ** columns to be corrupt. This is convenient because otherwise,
+ ** if the (nCol>65536) condition below were omitted, a sufficiently
+ ** large value for nCol may cause nRead to wrap around and become
+ ** negative. Leading to a crash. */
+ if( nCol<0 || nCol>65536 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = sessionInputBuffer(pIn, nRead+nCol+100);
+ nRead += nCol;
+ }
+ }
+
+ while( rc==SQLITE_OK ){
+ while( (pIn->iNext + nRead)<pIn->nData && pIn->aData[pIn->iNext + nRead] ){
+ nRead++;
+ }
+ if( (pIn->iNext + nRead)<pIn->nData ) break;
+ rc = sessionInputBuffer(pIn, nRead + 100);
+ }
+ *pnByte = nRead+1;
+ return rc;
+}
+
+/*
+** The input pointer currently points to the first byte of the first field
+** of a record consisting of nCol columns. This function ensures the entire
+** record is buffered. It does not move the input pointer.
+**
+** If successful, SQLITE_OK is returned and *pnByte is set to the size of
+** the record in bytes. Otherwise, an SQLite error code is returned. The
+** final value of *pnByte is undefined in this case.
+*/
+static int sessionChangesetBufferRecord(
+ SessionInput *pIn, /* Input data */
+ int nCol, /* Number of columns in record */
+ int *pnByte /* OUT: Size of record in bytes */
+){
+ int rc = SQLITE_OK;
+ int nByte = 0;
+ int i;
+ for(i=0; rc==SQLITE_OK && i<nCol; i++){
+ int eType;
+ rc = sessionInputBuffer(pIn, nByte + 10);
+ if( rc==SQLITE_OK ){
+ eType = pIn->aData[pIn->iNext + nByte++];
+ if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){
+ int n;
+ nByte += sessionVarintGet(&pIn->aData[pIn->iNext+nByte], &n);
+ nByte += n;
+ rc = sessionInputBuffer(pIn, nByte);
+ }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ nByte += 8;
+ }
+ }
+ }
+ *pnByte = nByte;
+ return rc;
+}
+
+/*
+** The input pointer currently points to the second byte of a table-header.
+** Specifically, to the following:
+**
+** + number of columns in table (varint)
+** + array of PK flags (1 byte per column),
+** + table name (nul terminated).
+**
+** This function decodes the table-header and populates the p->nCol,
+** p->zTab and p->abPK[] variables accordingly. The p->apValue[] array is
+** also allocated or resized according to the new value of p->nCol. The
+** input pointer is left pointing to the byte following the table header.
+**
+** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code
+** is returned and the final values of the various fields enumerated above
+** are undefined.
+*/
+static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){
+ int rc;
+ int nCopy;
+ assert( p->rc==SQLITE_OK );
+
+ rc = sessionChangesetBufferTblhdr(&p->in, &nCopy);
+ if( rc==SQLITE_OK ){
+ int nByte;
+ int nVarint;
+ nVarint = sessionVarintGet(&p->in.aData[p->in.iNext], &p->nCol);
+ if( p->nCol>0 ){
+ nCopy -= nVarint;
+ p->in.iNext += nVarint;
+ nByte = p->nCol * sizeof(sqlite3_value*) * 2 + nCopy;
+ p->tblhdr.nBuf = 0;
+ sessionBufferGrow(&p->tblhdr, nByte, &rc);
+ }else{
+ rc = SQLITE_CORRUPT_BKPT;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ size_t iPK = sizeof(sqlite3_value*)*p->nCol*2;
+ memset(p->tblhdr.aBuf, 0, iPK);
+ memcpy(&p->tblhdr.aBuf[iPK], &p->in.aData[p->in.iNext], nCopy);
+ p->in.iNext += nCopy;
+ }
+
+ p->apValue = (sqlite3_value**)p->tblhdr.aBuf;
+ if( p->apValue==0 ){
+ p->abPK = 0;
+ p->zTab = 0;
+ }else{
+ p->abPK = (u8*)&p->apValue[p->nCol*2];
+ p->zTab = p->abPK ? (char*)&p->abPK[p->nCol] : 0;
+ }
+ return (p->rc = rc);
+}
+
+/*
+** Advance the changeset iterator to the next change.
+**
+** If both paRec and pnRec are NULL, then this function works like the public
+** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the
+** sqlite3changeset_new() and old() APIs may be used to query for values.
+**
+** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change
+** record is written to *paRec before returning and the number of bytes in
+** the record to *pnRec.
+**
+** Either way, this function returns SQLITE_ROW if the iterator is
+** successfully advanced to the next change in the changeset, an SQLite
+** error code if an error occurs, or SQLITE_DONE if there are no further
+** changes in the changeset.
+*/
+static int sessionChangesetNext(
+ sqlite3_changeset_iter *p, /* Changeset iterator */
+ u8 **paRec, /* If non-NULL, store record pointer here */
+ int *pnRec, /* If non-NULL, store size of record here */
+ int *pbNew /* If non-NULL, true if new table */
+){
+ int i;
+ u8 op;
+
+ assert( (paRec==0 && pnRec==0) || (paRec && pnRec) );
+
+ /* If the iterator is in the error-state, return immediately. */
+ if( p->rc!=SQLITE_OK ) return p->rc;
+
+ /* Free the current contents of p->apValue[], if any. */
+ if( p->apValue ){
+ for(i=0; i<p->nCol*2; i++){
+ sqlite3ValueFree(p->apValue[i]);
+ }
+ memset(p->apValue, 0, sizeof(sqlite3_value*)*p->nCol*2);
+ }
+
+ /* Make sure the buffer contains at least 10 bytes of input data, or all
+ ** remaining data if there are less than 10 bytes available. This is
+ ** sufficient either for the 'T' or 'P' byte and the varint that follows
+ ** it, or for the two single byte values otherwise. */
+ p->rc = sessionInputBuffer(&p->in, 2);
+ if( p->rc!=SQLITE_OK ) return p->rc;
+
+ /* If the iterator is already at the end of the changeset, return DONE. */
+ if( p->in.iNext>=p->in.nData ){
+ return SQLITE_DONE;
+ }
+
+ sessionDiscardData(&p->in);
+ p->in.iCurrent = p->in.iNext;
+
+ op = p->in.aData[p->in.iNext++];
+ while( op=='T' || op=='P' ){
+ if( pbNew ) *pbNew = 1;
+ p->bPatchset = (op=='P');
+ if( sessionChangesetReadTblhdr(p) ) return p->rc;
+ if( (p->rc = sessionInputBuffer(&p->in, 2)) ) return p->rc;
+ p->in.iCurrent = p->in.iNext;
+ if( p->in.iNext>=p->in.nData ) return SQLITE_DONE;
+ op = p->in.aData[p->in.iNext++];
+ }
+
+ if( p->zTab==0 || (p->bPatchset && p->bInvert) ){
+ /* The first record in the changeset is not a table header. Must be a
+ ** corrupt changeset. */
+ assert( p->in.iNext==1 || p->zTab );
+ return (p->rc = SQLITE_CORRUPT_BKPT);
+ }
+
+ p->op = op;
+ p->bIndirect = p->in.aData[p->in.iNext++];
+ if( p->op!=SQLITE_UPDATE && p->op!=SQLITE_DELETE && p->op!=SQLITE_INSERT ){
+ return (p->rc = SQLITE_CORRUPT_BKPT);
+ }
+
+ if( paRec ){
+ int nVal; /* Number of values to buffer */
+ if( p->bPatchset==0 && op==SQLITE_UPDATE ){
+ nVal = p->nCol * 2;
+ }else if( p->bPatchset && op==SQLITE_DELETE ){
+ nVal = 0;
+ for(i=0; i<p->nCol; i++) if( p->abPK[i] ) nVal++;
+ }else{
+ nVal = p->nCol;
+ }
+ p->rc = sessionChangesetBufferRecord(&p->in, nVal, pnRec);
+ if( p->rc!=SQLITE_OK ) return p->rc;
+ *paRec = &p->in.aData[p->in.iNext];
+ p->in.iNext += *pnRec;
+ }else{
+ sqlite3_value **apOld = (p->bInvert ? &p->apValue[p->nCol] : p->apValue);
+ sqlite3_value **apNew = (p->bInvert ? p->apValue : &p->apValue[p->nCol]);
+
+ /* If this is an UPDATE or DELETE, read the old.* record. */
+ if( p->op!=SQLITE_INSERT && (p->bPatchset==0 || p->op==SQLITE_DELETE) ){
+ u8 *abPK = p->bPatchset ? p->abPK : 0;
+ p->rc = sessionReadRecord(&p->in, p->nCol, abPK, apOld);
+ if( p->rc!=SQLITE_OK ) return p->rc;
+ }
+
+ /* If this is an INSERT or UPDATE, read the new.* record. */
+ if( p->op!=SQLITE_DELETE ){
+ p->rc = sessionReadRecord(&p->in, p->nCol, 0, apNew);
+ if( p->rc!=SQLITE_OK ) return p->rc;
+ }
+
+ if( (p->bPatchset || p->bInvert) && p->op==SQLITE_UPDATE ){
+ /* If this is an UPDATE that is part of a patchset, then all PK and
+ ** modified fields are present in the new.* record. The old.* record
+ ** is currently completely empty. This block shifts the PK fields from
+ ** new.* to old.*, to accommodate the code that reads these arrays. */
+ for(i=0; i<p->nCol; i++){
+ assert( p->bPatchset==0 || p->apValue[i]==0 );
+ if( p->abPK[i] ){
+ assert( p->apValue[i]==0 );
+ p->apValue[i] = p->apValue[i+p->nCol];
+ if( p->apValue[i]==0 ) return (p->rc = SQLITE_CORRUPT_BKPT);
+ p->apValue[i+p->nCol] = 0;
+ }
+ }
+ }else if( p->bInvert ){
+ if( p->op==SQLITE_INSERT ) p->op = SQLITE_DELETE;
+ else if( p->op==SQLITE_DELETE ) p->op = SQLITE_INSERT;
+ }
+ }
+
+ return SQLITE_ROW;
+}
+
+/*
+** Advance an iterator created by sqlite3changeset_start() to the next
+** change in the changeset. This function may return SQLITE_ROW, SQLITE_DONE
+** or SQLITE_CORRUPT.
+**
+** This function may not be called on iterators passed to a conflict handler
+** callback by changeset_apply().
+*/
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *p){
+ return sessionChangesetNext(p, 0, 0, 0);
+}
+
+/*
+** The following function extracts information on the current change
+** from a changeset iterator. It may only be called after changeset_next()
+** has returned SQLITE_ROW.
+*/
+SQLITE_API int sqlite3changeset_op(
+ sqlite3_changeset_iter *pIter, /* Iterator handle */
+ const char **pzTab, /* OUT: Pointer to table name */
+ int *pnCol, /* OUT: Number of columns in table */
+ int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */
+ int *pbIndirect /* OUT: True if change is indirect */
+){
+ *pOp = pIter->op;
+ *pnCol = pIter->nCol;
+ *pzTab = pIter->zTab;
+ if( pbIndirect ) *pbIndirect = pIter->bIndirect;
+ return SQLITE_OK;
+}
+
+/*
+** Return information regarding the PRIMARY KEY and number of columns in
+** the database table affected by the change that pIter currently points
+** to. This function may only be called after changeset_next() returns
+** SQLITE_ROW.
+*/
+SQLITE_API int sqlite3changeset_pk(
+ sqlite3_changeset_iter *pIter, /* Iterator object */
+ unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
+ int *pnCol /* OUT: Number of entries in output array */
+){
+ *pabPK = pIter->abPK;
+ if( pnCol ) *pnCol = pIter->nCol;
+ return SQLITE_OK;
+}
+
+/*
+** This function may only be called while the iterator is pointing to an
+** SQLITE_UPDATE or SQLITE_DELETE change (see sqlite3changeset_op()).
+** Otherwise, SQLITE_MISUSE is returned.
+**
+** It sets *ppValue to point to an sqlite3_value structure containing the
+** iVal'th value in the old.* record. Or, if that particular value is not
+** included in the record (because the change is an UPDATE and the field
+** was not modified and is not a PK column), set *ppValue to NULL.
+**
+** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is
+** not modified. Otherwise, SQLITE_OK.
+*/
+SQLITE_API int sqlite3changeset_old(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Index of old.* value to retrieve */
+ sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
+){
+ if( pIter->op!=SQLITE_UPDATE && pIter->op!=SQLITE_DELETE ){
+ return SQLITE_MISUSE;
+ }
+ if( iVal<0 || iVal>=pIter->nCol ){
+ return SQLITE_RANGE;
+ }
+ *ppValue = pIter->apValue[iVal];
+ return SQLITE_OK;
+}
+
+/*
+** This function may only be called while the iterator is pointing to an
+** SQLITE_UPDATE or SQLITE_INSERT change (see sqlite3changeset_op()).
+** Otherwise, SQLITE_MISUSE is returned.
+**
+** It sets *ppValue to point to an sqlite3_value structure containing the
+** iVal'th value in the new.* record. Or, if that particular value is not
+** included in the record (because the change is an UPDATE and the field
+** was not modified), set *ppValue to NULL.
+**
+** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is
+** not modified. Otherwise, SQLITE_OK.
+*/
+SQLITE_API int sqlite3changeset_new(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Index of new.* value to retrieve */
+ sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
+){
+ if( pIter->op!=SQLITE_UPDATE && pIter->op!=SQLITE_INSERT ){
+ return SQLITE_MISUSE;
+ }
+ if( iVal<0 || iVal>=pIter->nCol ){
+ return SQLITE_RANGE;
+ }
+ *ppValue = pIter->apValue[pIter->nCol+iVal];
+ return SQLITE_OK;
+}
+
+/*
+** The following two macros are used internally. They are similar to the
+** sqlite3changeset_new() and sqlite3changeset_old() functions, except that
+** they omit all error checking and return a pointer to the requested value.
+*/
+#define sessionChangesetNew(pIter, iVal) (pIter)->apValue[(pIter)->nCol+(iVal)]
+#define sessionChangesetOld(pIter, iVal) (pIter)->apValue[(iVal)]
+
+/*
+** This function may only be called with a changeset iterator that has been
+** passed to an SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT
+** conflict-handler function. Otherwise, SQLITE_MISUSE is returned.
+**
+** If successful, *ppValue is set to point to an sqlite3_value structure
+** containing the iVal'th value of the conflicting record.
+**
+** If value iVal is out-of-range or some other error occurs, an SQLite error
+** code is returned. Otherwise, SQLITE_OK.
+*/
+SQLITE_API int sqlite3changeset_conflict(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Index of conflict record value to fetch */
+ sqlite3_value **ppValue /* OUT: Value from conflicting row */
+){
+ if( !pIter->pConflict ){
+ return SQLITE_MISUSE;
+ }
+ if( iVal<0 || iVal>=pIter->nCol ){
+ return SQLITE_RANGE;
+ }
+ *ppValue = sqlite3_column_value(pIter->pConflict, iVal);
+ return SQLITE_OK;
+}
+
+/*
+** This function may only be called with an iterator passed to an
+** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case
+** it sets the output variable to the total number of known foreign key
+** violations in the destination database and returns SQLITE_OK.
+**
+** In all other cases this function returns SQLITE_MISUSE.
+*/
+SQLITE_API int sqlite3changeset_fk_conflicts(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int *pnOut /* OUT: Number of FK violations */
+){
+ if( pIter->pConflict || pIter->apValue ){
+ return SQLITE_MISUSE;
+ }
+ *pnOut = pIter->nCol;
+ return SQLITE_OK;
+}
+
+
+/*
+** Finalize an iterator allocated with sqlite3changeset_start().
+**
+** This function may not be called on iterators passed to a conflict handler
+** callback by changeset_apply().
+*/
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *p){
+ int rc = SQLITE_OK;
+ if( p ){
+ int i; /* Used to iterate through p->apValue[] */
+ rc = p->rc;
+ if( p->apValue ){
+ for(i=0; i<p->nCol*2; i++) sqlite3ValueFree(p->apValue[i]);
+ }
+ sqlite3_free(p->tblhdr.aBuf);
+ sqlite3_free(p->in.buf.aBuf);
+ sqlite3_free(p);
+ }
+ return rc;
+}
+
+static int sessionChangesetInvert(
+ SessionInput *pInput, /* Input changeset */
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut,
+ int *pnInverted, /* OUT: Number of bytes in output changeset */
+ void **ppInverted /* OUT: Inverse of pChangeset */
+){
+ int rc = SQLITE_OK; /* Return value */
+ SessionBuffer sOut; /* Output buffer */
+ int nCol = 0; /* Number of cols in current table */
+ u8 *abPK = 0; /* PK array for current table */
+ sqlite3_value **apVal = 0; /* Space for values for UPDATE inversion */
+ SessionBuffer sPK = {0, 0, 0}; /* PK array for current table */
+
+ /* Initialize the output buffer */
+ memset(&sOut, 0, sizeof(SessionBuffer));
+
+ /* Zero the output variables in case an error occurs. */
+ if( ppInverted ){
+ *ppInverted = 0;
+ *pnInverted = 0;
+ }
+
+ while( 1 ){
+ u8 eType;
+
+ /* Test for EOF. */
+ if( (rc = sessionInputBuffer(pInput, 2)) ) goto finished_invert;
+ if( pInput->iNext>=pInput->nData ) break;
+ eType = pInput->aData[pInput->iNext];
+
+ switch( eType ){
+ case 'T': {
+ /* A 'table' record consists of:
+ **
+ ** * A constant 'T' character,
+ ** * Number of columns in said table (a varint),
+ ** * An array of nCol bytes (sPK),
+ ** * A nul-terminated table name.
+ */
+ int nByte;
+ int nVar;
+ pInput->iNext++;
+ if( (rc = sessionChangesetBufferTblhdr(pInput, &nByte)) ){
+ goto finished_invert;
+ }
+ nVar = sessionVarintGet(&pInput->aData[pInput->iNext], &nCol);
+ sPK.nBuf = 0;
+ sessionAppendBlob(&sPK, &pInput->aData[pInput->iNext+nVar], nCol, &rc);
+ sessionAppendByte(&sOut, eType, &rc);
+ sessionAppendBlob(&sOut, &pInput->aData[pInput->iNext], nByte, &rc);
+ if( rc ) goto finished_invert;
+
+ pInput->iNext += nByte;
+ sqlite3_free(apVal);
+ apVal = 0;
+ abPK = sPK.aBuf;
+ break;
+ }
+
+ case SQLITE_INSERT:
+ case SQLITE_DELETE: {
+ int nByte;
+ int bIndirect = pInput->aData[pInput->iNext+1];
+ int eType2 = (eType==SQLITE_DELETE ? SQLITE_INSERT : SQLITE_DELETE);
+ pInput->iNext += 2;
+ assert( rc==SQLITE_OK );
+ rc = sessionChangesetBufferRecord(pInput, nCol, &nByte);
+ sessionAppendByte(&sOut, eType2, &rc);
+ sessionAppendByte(&sOut, bIndirect, &rc);
+ sessionAppendBlob(&sOut, &pInput->aData[pInput->iNext], nByte, &rc);
+ pInput->iNext += nByte;
+ if( rc ) goto finished_invert;
+ break;
+ }
+
+ case SQLITE_UPDATE: {
+ int iCol;
+
+ if( 0==apVal ){
+ apVal = (sqlite3_value **)sqlite3_malloc64(sizeof(apVal[0])*nCol*2);
+ if( 0==apVal ){
+ rc = SQLITE_NOMEM;
+ goto finished_invert;
+ }
+ memset(apVal, 0, sizeof(apVal[0])*nCol*2);
+ }
+
+ /* Write the header for the new UPDATE change. Same as the original. */
+ sessionAppendByte(&sOut, eType, &rc);
+ sessionAppendByte(&sOut, pInput->aData[pInput->iNext+1], &rc);
+
+ /* Read the old.* and new.* records for the update change. */
+ pInput->iNext += 2;
+ rc = sessionReadRecord(pInput, nCol, 0, &apVal[0]);
+ if( rc==SQLITE_OK ){
+ rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol]);
+ }
+
+ /* Write the new old.* record. Consists of the PK columns from the
+ ** original old.* record, and the other values from the original
+ ** new.* record. */
+ for(iCol=0; iCol<nCol; iCol++){
+ sqlite3_value *pVal = apVal[iCol + (abPK[iCol] ? 0 : nCol)];
+ sessionAppendValue(&sOut, pVal, &rc);
+ }
+
+ /* Write the new new.* record. Consists of a copy of all values
+ ** from the original old.* record, except for the PK columns, which
+ ** are set to "undefined". */
+ for(iCol=0; iCol<nCol; iCol++){
+ sqlite3_value *pVal = (abPK[iCol] ? 0 : apVal[iCol]);
+ sessionAppendValue(&sOut, pVal, &rc);
+ }
+
+ for(iCol=0; iCol<nCol*2; iCol++){
+ sqlite3ValueFree(apVal[iCol]);
+ }
+ memset(apVal, 0, sizeof(apVal[0])*nCol*2);
+ if( rc!=SQLITE_OK ){
+ goto finished_invert;
+ }
+
+ break;
+ }
+
+ default:
+ rc = SQLITE_CORRUPT_BKPT;
+ goto finished_invert;
+ }
+
+ assert( rc==SQLITE_OK );
+ if( xOutput && sOut.nBuf>=sessions_strm_chunk_size ){
+ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf);
+ sOut.nBuf = 0;
+ if( rc!=SQLITE_OK ) goto finished_invert;
+ }
+ }
+
+ assert( rc==SQLITE_OK );
+ if( pnInverted ){
+ *pnInverted = sOut.nBuf;
+ *ppInverted = sOut.aBuf;
+ sOut.aBuf = 0;
+ }else if( sOut.nBuf>0 ){
+ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf);
+ }
+
+ finished_invert:
+ sqlite3_free(sOut.aBuf);
+ sqlite3_free(apVal);
+ sqlite3_free(sPK.aBuf);
+ return rc;
+}
+
+
+/*
+** Invert a changeset object.
+*/
+SQLITE_API int sqlite3changeset_invert(
+ int nChangeset, /* Number of bytes in input */
+ const void *pChangeset, /* Input changeset */
+ int *pnInverted, /* OUT: Number of bytes in output changeset */
+ void **ppInverted /* OUT: Inverse of pChangeset */
+){
+ SessionInput sInput;
+
+ /* Set up the input stream */
+ memset(&sInput, 0, sizeof(SessionInput));
+ sInput.nData = nChangeset;
+ sInput.aData = (u8*)pChangeset;
+
+ return sessionChangesetInvert(&sInput, 0, 0, pnInverted, ppInverted);
+}
+
+/*
+** Streaming version of sqlite3changeset_invert().
+*/
+SQLITE_API int sqlite3changeset_invert_strm(
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ SessionInput sInput;
+ int rc;
+
+ /* Set up the input stream */
+ memset(&sInput, 0, sizeof(SessionInput));
+ sInput.xInput = xInput;
+ sInput.pIn = pIn;
+
+ rc = sessionChangesetInvert(&sInput, xOutput, pOut, 0, 0);
+ sqlite3_free(sInput.buf.aBuf);
+ return rc;
+}
+
+typedef struct SessionApplyCtx SessionApplyCtx;
+struct SessionApplyCtx {
+ sqlite3 *db;
+ sqlite3_stmt *pDelete; /* DELETE statement */
+ sqlite3_stmt *pUpdate; /* UPDATE statement */
+ sqlite3_stmt *pInsert; /* INSERT statement */
+ sqlite3_stmt *pSelect; /* SELECT statement */
+ int nCol; /* Size of azCol[] and abPK[] arrays */
+ const char **azCol; /* Array of column names */
+ u8 *abPK; /* Boolean array - true if column is in PK */
+ int bStat1; /* True if table is sqlite_stat1 */
+ int bDeferConstraints; /* True to defer constraints */
+ int bInvertConstraints; /* Invert when iterating constraints buffer */
+ SessionBuffer constraints; /* Deferred constraints are stored here */
+ SessionBuffer rebase; /* Rebase information (if any) here */
+ u8 bRebaseStarted; /* If table header is already in rebase */
+ u8 bRebase; /* True to collect rebase information */
+};
+
+/*
+** Formulate a statement to DELETE a row from database db. Assuming a table
+** structure like this:
+**
+** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c));
+**
+** The DELETE statement looks like this:
+**
+** DELETE FROM x WHERE a = :1 AND c = :3 AND (:5 OR b IS :2 AND d IS :4)
+**
+** Variable :5 (nCol+1) is a boolean. It should be set to 0 if we require
+** matching b and d values, or 1 otherwise. The second case comes up if the
+** conflict handler is invoked with NOTFOUND and returns CHANGESET_REPLACE.
+**
+** If successful, SQLITE_OK is returned and SessionApplyCtx.pDelete is left
+** pointing to the prepared version of the SQL statement.
+*/
+static int sessionDeleteRow(
+ sqlite3 *db, /* Database handle */
+ const char *zTab, /* Table name */
+ SessionApplyCtx *p /* Session changeset-apply context */
+){
+ int i;
+ const char *zSep = "";
+ int rc = SQLITE_OK;
+ SessionBuffer buf = {0, 0, 0};
+ int nPk = 0;
+
+ sessionAppendStr(&buf, "DELETE FROM main.", &rc);
+ sessionAppendIdent(&buf, zTab, &rc);
+ sessionAppendStr(&buf, " WHERE ", &rc);
+
+ for(i=0; i<p->nCol; i++){
+ if( p->abPK[i] ){
+ nPk++;
+ sessionAppendStr(&buf, zSep, &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " = ?", &rc);
+ sessionAppendInteger(&buf, i+1, &rc);
+ zSep = " AND ";
+ }
+ }
+
+ if( nPk<p->nCol ){
+ sessionAppendStr(&buf, " AND (?", &rc);
+ sessionAppendInteger(&buf, p->nCol+1, &rc);
+ sessionAppendStr(&buf, " OR ", &rc);
+
+ zSep = "";
+ for(i=0; i<p->nCol; i++){
+ if( !p->abPK[i] ){
+ sessionAppendStr(&buf, zSep, &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " IS ?", &rc);
+ sessionAppendInteger(&buf, i+1, &rc);
+ zSep = "AND ";
+ }
+ }
+ sessionAppendStr(&buf, ")", &rc);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pDelete, 0);
+ }
+ sqlite3_free(buf.aBuf);
+
+ return rc;
+}
+
+/*
+** Formulate and prepare a statement to UPDATE a row from database db.
+** Assuming a table structure like this:
+**
+** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c));
+**
+** The UPDATE statement looks like this:
+**
+** UPDATE x SET
+** a = CASE WHEN ?2 THEN ?3 ELSE a END,
+** b = CASE WHEN ?5 THEN ?6 ELSE b END,
+** c = CASE WHEN ?8 THEN ?9 ELSE c END,
+** d = CASE WHEN ?11 THEN ?12 ELSE d END
+** WHERE a = ?1 AND c = ?7 AND (?13 OR
+** (?5==0 OR b IS ?4) AND (?11==0 OR d IS ?10) AND
+** )
+**
+** For each column in the table, there are three variables to bind:
+**
+** ?(i*3+1) The old.* value of the column, if any.
+** ?(i*3+2) A boolean flag indicating that the value is being modified.
+** ?(i*3+3) The new.* value of the column, if any.
+**
+** Also, a boolean flag that, if set to true, causes the statement to update
+** a row even if the non-PK values do not match. This is required if the
+** conflict-handler is invoked with CHANGESET_DATA and returns
+** CHANGESET_REPLACE. This is variable "?(nCol*3+1)".
+**
+** If successful, SQLITE_OK is returned and SessionApplyCtx.pUpdate is left
+** pointing to the prepared version of the SQL statement.
+*/
+static int sessionUpdateRow(
+ sqlite3 *db, /* Database handle */
+ const char *zTab, /* Table name */
+ SessionApplyCtx *p /* Session changeset-apply context */
+){
+ int rc = SQLITE_OK;
+ int i;
+ const char *zSep = "";
+ SessionBuffer buf = {0, 0, 0};
+
+ /* Append "UPDATE tbl SET " */
+ sessionAppendStr(&buf, "UPDATE main.", &rc);
+ sessionAppendIdent(&buf, zTab, &rc);
+ sessionAppendStr(&buf, " SET ", &rc);
+
+ /* Append the assignments */
+ for(i=0; i<p->nCol; i++){
+ sessionAppendStr(&buf, zSep, &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " = CASE WHEN ?", &rc);
+ sessionAppendInteger(&buf, i*3+2, &rc);
+ sessionAppendStr(&buf, " THEN ?", &rc);
+ sessionAppendInteger(&buf, i*3+3, &rc);
+ sessionAppendStr(&buf, " ELSE ", &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " END", &rc);
+ zSep = ", ";
+ }
+
+ /* Append the PK part of the WHERE clause */
+ sessionAppendStr(&buf, " WHERE ", &rc);
+ for(i=0; i<p->nCol; i++){
+ if( p->abPK[i] ){
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " = ?", &rc);
+ sessionAppendInteger(&buf, i*3+1, &rc);
+ sessionAppendStr(&buf, " AND ", &rc);
+ }
+ }
+
+ /* Append the non-PK part of the WHERE clause */
+ sessionAppendStr(&buf, " (?", &rc);
+ sessionAppendInteger(&buf, p->nCol*3+1, &rc);
+ sessionAppendStr(&buf, " OR 1", &rc);
+ for(i=0; i<p->nCol; i++){
+ if( !p->abPK[i] ){
+ sessionAppendStr(&buf, " AND (?", &rc);
+ sessionAppendInteger(&buf, i*3+2, &rc);
+ sessionAppendStr(&buf, "=0 OR ", &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ sessionAppendStr(&buf, " IS ?", &rc);
+ sessionAppendInteger(&buf, i*3+1, &rc);
+ sessionAppendStr(&buf, ")", &rc);
+ }
+ }
+ sessionAppendStr(&buf, ")", &rc);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pUpdate, 0);
+ }
+ sqlite3_free(buf.aBuf);
+
+ return rc;
+}
+
+
+/*
+** Formulate and prepare an SQL statement to query table zTab by primary
+** key. Assuming the following table structure:
+**
+** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c));
+**
+** The SELECT statement looks like this:
+**
+** SELECT * FROM x WHERE a = ?1 AND c = ?3
+**
+** If successful, SQLITE_OK is returned and SessionApplyCtx.pSelect is left
+** pointing to the prepared version of the SQL statement.
+*/
+static int sessionSelectRow(
+ sqlite3 *db, /* Database handle */
+ const char *zTab, /* Table name */
+ SessionApplyCtx *p /* Session changeset-apply context */
+){
+ return sessionSelectStmt(
+ db, "main", zTab, p->nCol, p->azCol, p->abPK, &p->pSelect);
+}
+
+/*
+** Formulate and prepare an INSERT statement to add a record to table zTab.
+** For example:
+**
+** INSERT INTO main."zTab" VALUES(?1, ?2, ?3 ...);
+**
+** If successful, SQLITE_OK is returned and SessionApplyCtx.pInsert is left
+** pointing to the prepared version of the SQL statement.
+*/
+static int sessionInsertRow(
+ sqlite3 *db, /* Database handle */
+ const char *zTab, /* Table name */
+ SessionApplyCtx *p /* Session changeset-apply context */
+){
+ int rc = SQLITE_OK;
+ int i;
+ SessionBuffer buf = {0, 0, 0};
+
+ sessionAppendStr(&buf, "INSERT INTO main.", &rc);
+ sessionAppendIdent(&buf, zTab, &rc);
+ sessionAppendStr(&buf, "(", &rc);
+ for(i=0; i<p->nCol; i++){
+ if( i!=0 ) sessionAppendStr(&buf, ", ", &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ }
+
+ sessionAppendStr(&buf, ") VALUES(?", &rc);
+ for(i=1; i<p->nCol; i++){
+ sessionAppendStr(&buf, ", ?", &rc);
+ }
+ sessionAppendStr(&buf, ")", &rc);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pInsert, 0);
+ }
+ sqlite3_free(buf.aBuf);
+ return rc;
+}
+
+static int sessionPrepare(sqlite3 *db, sqlite3_stmt **pp, const char *zSql){
+ return sqlite3_prepare_v2(db, zSql, -1, pp, 0);
+}
+
+/*
+** Prepare statements for applying changes to the sqlite_stat1 table.
+** These are similar to those created by sessionSelectRow(),
+** sessionInsertRow(), sessionUpdateRow() and sessionDeleteRow() for
+** other tables.
+*/
+static int sessionStat1Sql(sqlite3 *db, SessionApplyCtx *p){
+ int rc = sessionSelectRow(db, "sqlite_stat1", p);
+ if( rc==SQLITE_OK ){
+ rc = sessionPrepare(db, &p->pInsert,
+ "INSERT INTO main.sqlite_stat1 VALUES(?1, "
+ "CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, "
+ "?3)"
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sessionPrepare(db, &p->pUpdate,
+ "UPDATE main.sqlite_stat1 SET "
+ "tbl = CASE WHEN ?2 THEN ?3 ELSE tbl END, "
+ "idx = CASE WHEN ?5 THEN ?6 ELSE idx END, "
+ "stat = CASE WHEN ?8 THEN ?9 ELSE stat END "
+ "WHERE tbl=?1 AND idx IS "
+ "CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END "
+ "AND (?10 OR ?8=0 OR stat IS ?7)"
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sessionPrepare(db, &p->pDelete,
+ "DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS "
+ "CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END "
+ "AND (?4 OR stat IS ?3)"
+ );
+ }
+ return rc;
+}
+
+/*
+** A wrapper around sqlite3_bind_value() that detects an extra problem.
+** See comments in the body of this function for details.
+*/
+static int sessionBindValue(
+ sqlite3_stmt *pStmt, /* Statement to bind value to */
+ int i, /* Parameter number to bind to */
+ sqlite3_value *pVal /* Value to bind */
+){
+ int eType = sqlite3_value_type(pVal);
+ /* COVERAGE: The (pVal->z==0) branch is never true using current versions
+ ** of SQLite. If a malloc fails in an sqlite3_value_xxx() function, either
+ ** the (pVal->z) variable remains as it was or the type of the value is
+ ** set to SQLITE_NULL. */
+ if( (eType==SQLITE_TEXT || eType==SQLITE_BLOB) && pVal->z==0 ){
+ /* This condition occurs when an earlier OOM in a call to
+ ** sqlite3_value_text() or sqlite3_value_blob() (perhaps from within
+ ** a conflict-handler) has zeroed the pVal->z pointer. Return NOMEM. */
+ return SQLITE_NOMEM;
+ }
+ return sqlite3_bind_value(pStmt, i, pVal);
+}
+
+/*
+** Iterator pIter must point to an SQLITE_INSERT entry. This function
+** transfers new.* values from the current iterator entry to statement
+** pStmt. The table being inserted into has nCol columns.
+**
+** New.* value $i from the iterator is bound to variable ($i+1) of
+** statement pStmt. If parameter abPK is NULL, all values from 0 to (nCol-1)
+** are transfered to the statement. Otherwise, if abPK is not NULL, it points
+** to an array nCol elements in size. In this case only those values for
+** which abPK[$i] is true are read from the iterator and bound to the
+** statement.
+**
+** An SQLite error code is returned if an error occurs. Otherwise, SQLITE_OK.
+*/
+static int sessionBindRow(
+ sqlite3_changeset_iter *pIter, /* Iterator to read values from */
+ int(*xValue)(sqlite3_changeset_iter *, int, sqlite3_value **),
+ int nCol, /* Number of columns */
+ u8 *abPK, /* If not NULL, bind only if true */
+ sqlite3_stmt *pStmt /* Bind values to this statement */
+){
+ int i;
+ int rc = SQLITE_OK;
+
+ /* Neither sqlite3changeset_old or sqlite3changeset_new can fail if the
+ ** argument iterator points to a suitable entry. Make sure that xValue
+ ** is one of these to guarantee that it is safe to ignore the return
+ ** in the code below. */
+ assert( xValue==sqlite3changeset_old || xValue==sqlite3changeset_new );
+
+ for(i=0; rc==SQLITE_OK && i<nCol; i++){
+ if( !abPK || abPK[i] ){
+ sqlite3_value *pVal;
+ (void)xValue(pIter, i, &pVal);
+ if( pVal==0 ){
+ /* The value in the changeset was "undefined". This indicates a
+ ** corrupt changeset blob. */
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = sessionBindValue(pStmt, i+1, pVal);
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** SQL statement pSelect is as generated by the sessionSelectRow() function.
+** This function binds the primary key values from the change that changeset
+** iterator pIter points to to the SELECT and attempts to seek to the table
+** entry. If a row is found, the SELECT statement left pointing at the row
+** and SQLITE_ROW is returned. Otherwise, if no row is found and no error
+** has occured, the statement is reset and SQLITE_OK is returned. If an
+** error occurs, the statement is reset and an SQLite error code is returned.
+**
+** If this function returns SQLITE_ROW, the caller must eventually reset()
+** statement pSelect. If any other value is returned, the statement does
+** not require a reset().
+**
+** If the iterator currently points to an INSERT record, bind values from the
+** new.* record to the SELECT statement. Or, if it points to a DELETE or
+** UPDATE, bind values from the old.* record.
+*/
+static int sessionSeekToRow(
+ sqlite3 *db, /* Database handle */
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ u8 *abPK, /* Primary key flags array */
+ sqlite3_stmt *pSelect /* SELECT statement from sessionSelectRow() */
+){
+ int rc; /* Return code */
+ int nCol; /* Number of columns in table */
+ int op; /* Changset operation (SQLITE_UPDATE etc.) */
+ const char *zDummy; /* Unused */
+
+ sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0);
+ rc = sessionBindRow(pIter,
+ op==SQLITE_INSERT ? sqlite3changeset_new : sqlite3changeset_old,
+ nCol, abPK, pSelect
+ );
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_step(pSelect);
+ if( rc!=SQLITE_ROW ) rc = sqlite3_reset(pSelect);
+ }
+
+ return rc;
+}
+
+/*
+** This function is called from within sqlite3changeset_apply_v2() when
+** a conflict is encountered and resolved using conflict resolution
+** mode eType (either SQLITE_CHANGESET_OMIT or SQLITE_CHANGESET_REPLACE)..
+** It adds a conflict resolution record to the buffer in
+** SessionApplyCtx.rebase, which will eventually be returned to the caller
+** of apply_v2() as the "rebase" buffer.
+**
+** Return SQLITE_OK if successful, or an SQLite error code otherwise.
+*/
+static int sessionRebaseAdd(
+ SessionApplyCtx *p, /* Apply context */
+ int eType, /* Conflict resolution (OMIT or REPLACE) */
+ sqlite3_changeset_iter *pIter /* Iterator pointing at current change */
+){
+ int rc = SQLITE_OK;
+ if( p->bRebase ){
+ int i;
+ int eOp = pIter->op;
+ if( p->bRebaseStarted==0 ){
+ /* Append a table-header to the rebase buffer */
+ const char *zTab = pIter->zTab;
+ sessionAppendByte(&p->rebase, 'T', &rc);
+ sessionAppendVarint(&p->rebase, p->nCol, &rc);
+ sessionAppendBlob(&p->rebase, p->abPK, p->nCol, &rc);
+ sessionAppendBlob(&p->rebase, (u8*)zTab, (int)strlen(zTab)+1, &rc);
+ p->bRebaseStarted = 1;
+ }
+
+ assert( eType==SQLITE_CHANGESET_REPLACE||eType==SQLITE_CHANGESET_OMIT );
+ assert( eOp==SQLITE_DELETE || eOp==SQLITE_INSERT || eOp==SQLITE_UPDATE );
+
+ sessionAppendByte(&p->rebase,
+ (eOp==SQLITE_DELETE ? SQLITE_DELETE : SQLITE_INSERT), &rc
+ );
+ sessionAppendByte(&p->rebase, (eType==SQLITE_CHANGESET_REPLACE), &rc);
+ for(i=0; i<p->nCol; i++){
+ sqlite3_value *pVal = 0;
+ if( eOp==SQLITE_DELETE || (eOp==SQLITE_UPDATE && p->abPK[i]) ){
+ sqlite3changeset_old(pIter, i, &pVal);
+ }else{
+ sqlite3changeset_new(pIter, i, &pVal);
+ }
+ sessionAppendValue(&p->rebase, pVal, &rc);
+ }
+ }
+ return rc;
+}
+
+/*
+** Invoke the conflict handler for the change that the changeset iterator
+** currently points to.
+**
+** Argument eType must be either CHANGESET_DATA or CHANGESET_CONFLICT.
+** If argument pbReplace is NULL, then the type of conflict handler invoked
+** depends solely on eType, as follows:
+**
+** eType value Value passed to xConflict
+** -------------------------------------------------
+** CHANGESET_DATA CHANGESET_NOTFOUND
+** CHANGESET_CONFLICT CHANGESET_CONSTRAINT
+**
+** Or, if pbReplace is not NULL, then an attempt is made to find an existing
+** record with the same primary key as the record about to be deleted, updated
+** or inserted. If such a record can be found, it is available to the conflict
+** handler as the "conflicting" record. In this case the type of conflict
+** handler invoked is as follows:
+**
+** eType value PK Record found? Value passed to xConflict
+** ----------------------------------------------------------------
+** CHANGESET_DATA Yes CHANGESET_DATA
+** CHANGESET_DATA No CHANGESET_NOTFOUND
+** CHANGESET_CONFLICT Yes CHANGESET_CONFLICT
+** CHANGESET_CONFLICT No CHANGESET_CONSTRAINT
+**
+** If pbReplace is not NULL, and a record with a matching PK is found, and
+** the conflict handler function returns SQLITE_CHANGESET_REPLACE, *pbReplace
+** is set to non-zero before returning SQLITE_OK.
+**
+** If the conflict handler returns SQLITE_CHANGESET_ABORT, SQLITE_ABORT is
+** returned. Or, if the conflict handler returns an invalid value,
+** SQLITE_MISUSE. If the conflict handler returns SQLITE_CHANGESET_OMIT,
+** this function returns SQLITE_OK.
+*/
+static int sessionConflictHandler(
+ int eType, /* Either CHANGESET_DATA or CONFLICT */
+ SessionApplyCtx *p, /* changeset_apply() context */
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int(*xConflict)(void *, int, sqlite3_changeset_iter*),
+ void *pCtx, /* First argument for conflict handler */
+ int *pbReplace /* OUT: Set to true if PK row is found */
+){
+ int res = 0; /* Value returned by conflict handler */
+ int rc;
+ int nCol;
+ int op;
+ const char *zDummy;
+
+ sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0);
+
+ assert( eType==SQLITE_CHANGESET_CONFLICT || eType==SQLITE_CHANGESET_DATA );
+ assert( SQLITE_CHANGESET_CONFLICT+1==SQLITE_CHANGESET_CONSTRAINT );
+ assert( SQLITE_CHANGESET_DATA+1==SQLITE_CHANGESET_NOTFOUND );
+
+ /* Bind the new.* PRIMARY KEY values to the SELECT statement. */
+ if( pbReplace ){
+ rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect);
+ }else{
+ rc = SQLITE_OK;
+ }
+
+ if( rc==SQLITE_ROW ){
+ /* There exists another row with the new.* primary key. */
+ pIter->pConflict = p->pSelect;
+ res = xConflict(pCtx, eType, pIter);
+ pIter->pConflict = 0;
+ rc = sqlite3_reset(p->pSelect);
+ }else if( rc==SQLITE_OK ){
+ if( p->bDeferConstraints && eType==SQLITE_CHANGESET_CONFLICT ){
+ /* Instead of invoking the conflict handler, append the change blob
+ ** to the SessionApplyCtx.constraints buffer. */
+ u8 *aBlob = &pIter->in.aData[pIter->in.iCurrent];
+ int nBlob = pIter->in.iNext - pIter->in.iCurrent;
+ sessionAppendBlob(&p->constraints, aBlob, nBlob, &rc);
+ return SQLITE_OK;
+ }else{
+ /* No other row with the new.* primary key. */
+ res = xConflict(pCtx, eType+1, pIter);
+ if( res==SQLITE_CHANGESET_REPLACE ) rc = SQLITE_MISUSE;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ switch( res ){
+ case SQLITE_CHANGESET_REPLACE:
+ assert( pbReplace );
+ *pbReplace = 1;
+ break;
+
+ case SQLITE_CHANGESET_OMIT:
+ break;
+
+ case SQLITE_CHANGESET_ABORT:
+ rc = SQLITE_ABORT;
+ break;
+
+ default:
+ rc = SQLITE_MISUSE;
+ break;
+ }
+ if( rc==SQLITE_OK ){
+ rc = sessionRebaseAdd(p, res, pIter);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Attempt to apply the change that the iterator passed as the first argument
+** currently points to to the database. If a conflict is encountered, invoke
+** the conflict handler callback.
+**
+** If argument pbRetry is NULL, then ignore any CHANGESET_DATA conflict. If
+** one is encountered, update or delete the row with the matching primary key
+** instead. Or, if pbRetry is not NULL and a CHANGESET_DATA conflict occurs,
+** invoke the conflict handler. If it returns CHANGESET_REPLACE, set *pbRetry
+** to true before returning. In this case the caller will invoke this function
+** again, this time with pbRetry set to NULL.
+**
+** If argument pbReplace is NULL and a CHANGESET_CONFLICT conflict is
+** encountered invoke the conflict handler with CHANGESET_CONSTRAINT instead.
+** Or, if pbReplace is not NULL, invoke it with CHANGESET_CONFLICT. If such
+** an invocation returns SQLITE_CHANGESET_REPLACE, set *pbReplace to true
+** before retrying. In this case the caller attempts to remove the conflicting
+** row before invoking this function again, this time with pbReplace set
+** to NULL.
+**
+** If any conflict handler returns SQLITE_CHANGESET_ABORT, this function
+** returns SQLITE_ABORT. Otherwise, if no error occurs, SQLITE_OK is
+** returned.
+*/
+static int sessionApplyOneOp(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ SessionApplyCtx *p, /* changeset_apply() context */
+ int(*xConflict)(void *, int, sqlite3_changeset_iter *),
+ void *pCtx, /* First argument for the conflict handler */
+ int *pbReplace, /* OUT: True to remove PK row and retry */
+ int *pbRetry /* OUT: True to retry. */
+){
+ const char *zDummy;
+ int op;
+ int nCol;
+ int rc = SQLITE_OK;
+
+ assert( p->pDelete && p->pUpdate && p->pInsert && p->pSelect );
+ assert( p->azCol && p->abPK );
+ assert( !pbReplace || *pbReplace==0 );
+
+ sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0);
+
+ if( op==SQLITE_DELETE ){
+
+ /* Bind values to the DELETE statement. If conflict handling is required,
+ ** bind values for all columns and set bound variable (nCol+1) to true.
+ ** Or, if conflict handling is not required, bind just the PK column
+ ** values and, if it exists, set (nCol+1) to false. Conflict handling
+ ** is not required if:
+ **
+ ** * this is a patchset, or
+ ** * (pbRetry==0), or
+ ** * all columns of the table are PK columns (in this case there is
+ ** no (nCol+1) variable to bind to).
+ */
+ u8 *abPK = (pIter->bPatchset ? p->abPK : 0);
+ rc = sessionBindRow(pIter, sqlite3changeset_old, nCol, abPK, p->pDelete);
+ if( rc==SQLITE_OK && sqlite3_bind_parameter_count(p->pDelete)>nCol ){
+ rc = sqlite3_bind_int(p->pDelete, nCol+1, (pbRetry==0 || abPK));
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ sqlite3_step(p->pDelete);
+ rc = sqlite3_reset(p->pDelete);
+ if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){
+ rc = sessionConflictHandler(
+ SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry
+ );
+ }else if( (rc&0xff)==SQLITE_CONSTRAINT ){
+ rc = sessionConflictHandler(
+ SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, 0
+ );
+ }
+
+ }else if( op==SQLITE_UPDATE ){
+ int i;
+
+ /* Bind values to the UPDATE statement. */
+ for(i=0; rc==SQLITE_OK && i<nCol; i++){
+ sqlite3_value *pOld = sessionChangesetOld(pIter, i);
+ sqlite3_value *pNew = sessionChangesetNew(pIter, i);
+
+ sqlite3_bind_int(p->pUpdate, i*3+2, !!pNew);
+ if( pOld ){
+ rc = sessionBindValue(p->pUpdate, i*3+1, pOld);
+ }
+ if( rc==SQLITE_OK && pNew ){
+ rc = sessionBindValue(p->pUpdate, i*3+3, pNew);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int(p->pUpdate, nCol*3+1, pbRetry==0 || pIter->bPatchset);
+ }
+ if( rc!=SQLITE_OK ) return rc;
+
+ /* Attempt the UPDATE. In the case of a NOTFOUND or DATA conflict,
+ ** the result will be SQLITE_OK with 0 rows modified. */
+ sqlite3_step(p->pUpdate);
+ rc = sqlite3_reset(p->pUpdate);
+
+ if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){
+ /* A NOTFOUND or DATA error. Search the table to see if it contains
+ ** a row with a matching primary key. If so, this is a DATA conflict.
+ ** Otherwise, if there is no primary key match, it is a NOTFOUND. */
+
+ rc = sessionConflictHandler(
+ SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry
+ );
+
+ }else if( (rc&0xff)==SQLITE_CONSTRAINT ){
+ /* This is always a CONSTRAINT conflict. */
+ rc = sessionConflictHandler(
+ SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, 0
+ );
+ }
+
+ }else{
+ assert( op==SQLITE_INSERT );
+ if( p->bStat1 ){
+ /* Check if there is a conflicting row. For sqlite_stat1, this needs
+ ** to be done using a SELECT, as there is no PRIMARY KEY in the
+ ** database schema to throw an exception if a duplicate is inserted. */
+ rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect);
+ if( rc==SQLITE_ROW ){
+ rc = SQLITE_CONSTRAINT;
+ sqlite3_reset(p->pSelect);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sessionBindRow(pIter, sqlite3changeset_new, nCol, 0, p->pInsert);
+ if( rc!=SQLITE_OK ) return rc;
+
+ sqlite3_step(p->pInsert);
+ rc = sqlite3_reset(p->pInsert);
+ }
+
+ if( (rc&0xff)==SQLITE_CONSTRAINT ){
+ rc = sessionConflictHandler(
+ SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, pbReplace
+ );
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Attempt to apply the change that the iterator passed as the first argument
+** currently points to to the database. If a conflict is encountered, invoke
+** the conflict handler callback.
+**
+** The difference between this function and sessionApplyOne() is that this
+** function handles the case where the conflict-handler is invoked and
+** returns SQLITE_CHANGESET_REPLACE - indicating that the change should be
+** retried in some manner.
+*/
+static int sessionApplyOneWithRetry(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ sqlite3_changeset_iter *pIter, /* Changeset iterator to read change from */
+ SessionApplyCtx *pApply, /* Apply context */
+ int(*xConflict)(void*, int, sqlite3_changeset_iter*),
+ void *pCtx /* First argument passed to xConflict */
+){
+ int bReplace = 0;
+ int bRetry = 0;
+ int rc;
+
+ rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, &bReplace, &bRetry);
+ if( rc==SQLITE_OK ){
+ /* If the bRetry flag is set, the change has not been applied due to an
+ ** SQLITE_CHANGESET_DATA problem (i.e. this is an UPDATE or DELETE and
+ ** a row with the correct PK is present in the db, but one or more other
+ ** fields do not contain the expected values) and the conflict handler
+ ** returned SQLITE_CHANGESET_REPLACE. In this case retry the operation,
+ ** but pass NULL as the final argument so that sessionApplyOneOp() ignores
+ ** the SQLITE_CHANGESET_DATA problem. */
+ if( bRetry ){
+ assert( pIter->op==SQLITE_UPDATE || pIter->op==SQLITE_DELETE );
+ rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0);
+ }
+
+ /* If the bReplace flag is set, the change is an INSERT that has not
+ ** been performed because the database already contains a row with the
+ ** specified primary key and the conflict handler returned
+ ** SQLITE_CHANGESET_REPLACE. In this case remove the conflicting row
+ ** before reattempting the INSERT. */
+ else if( bReplace ){
+ assert( pIter->op==SQLITE_INSERT );
+ rc = sqlite3_exec(db, "SAVEPOINT replace_op", 0, 0, 0);
+ if( rc==SQLITE_OK ){
+ rc = sessionBindRow(pIter,
+ sqlite3changeset_new, pApply->nCol, pApply->abPK, pApply->pDelete);
+ sqlite3_bind_int(pApply->pDelete, pApply->nCol+1, 1);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pApply->pDelete);
+ rc = sqlite3_reset(pApply->pDelete);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_exec(db, "RELEASE replace_op", 0, 0, 0);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Retry the changes accumulated in the pApply->constraints buffer.
+*/
+static int sessionRetryConstraints(
+ sqlite3 *db,
+ int bPatchset,
+ const char *zTab,
+ SessionApplyCtx *pApply,
+ int(*xConflict)(void*, int, sqlite3_changeset_iter*),
+ void *pCtx /* First argument passed to xConflict */
+){
+ int rc = SQLITE_OK;
+
+ while( pApply->constraints.nBuf ){
+ sqlite3_changeset_iter *pIter2 = 0;
+ SessionBuffer cons = pApply->constraints;
+ memset(&pApply->constraints, 0, sizeof(SessionBuffer));
+
+ rc = sessionChangesetStart(
+ &pIter2, 0, 0, cons.nBuf, cons.aBuf, pApply->bInvertConstraints
+ );
+ if( rc==SQLITE_OK ){
+ size_t nByte = 2*pApply->nCol*sizeof(sqlite3_value*);
+ int rc2;
+ pIter2->bPatchset = bPatchset;
+ pIter2->zTab = (char*)zTab;
+ pIter2->nCol = pApply->nCol;
+ pIter2->abPK = pApply->abPK;
+ sessionBufferGrow(&pIter2->tblhdr, nByte, &rc);
+ pIter2->apValue = (sqlite3_value**)pIter2->tblhdr.aBuf;
+ if( rc==SQLITE_OK ) memset(pIter2->apValue, 0, nByte);
+
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3changeset_next(pIter2) ){
+ rc = sessionApplyOneWithRetry(db, pIter2, pApply, xConflict, pCtx);
+ }
+
+ rc2 = sqlite3changeset_finalize(pIter2);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+ assert( pApply->bDeferConstraints || pApply->constraints.nBuf==0 );
+
+ sqlite3_free(cons.aBuf);
+ if( rc!=SQLITE_OK ) break;
+ if( pApply->constraints.nBuf>=cons.nBuf ){
+ /* No progress was made on the last round. */
+ pApply->bDeferConstraints = 0;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Argument pIter is a changeset iterator that has been initialized, but
+** not yet passed to sqlite3changeset_next(). This function applies the
+** changeset to the main database attached to handle "db". The supplied
+** conflict handler callback is invoked to resolve any conflicts encountered
+** while applying the change.
+*/
+static int sessionChangesetApply(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ sqlite3_changeset_iter *pIter, /* Changeset to apply */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of fifth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase, /* OUT: Rebase information */
+ int flags /* SESSION_APPLY_XXX flags */
+){
+ int schemaMismatch = 0;
+ int rc = SQLITE_OK; /* Return code */
+ const char *zTab = 0; /* Name of current table */
+ int nTab = 0; /* Result of sqlite3Strlen30(zTab) */
+ SessionApplyCtx sApply; /* changeset_apply() context object */
+ int bPatchset;
+
+ assert( xConflict!=0 );
+
+ pIter->in.bNoDiscard = 1;
+ memset(&sApply, 0, sizeof(sApply));
+ sApply.bRebase = (ppRebase && pnRebase);
+ sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT);
+ sqlite3_mutex_enter(sqlite3_db_mutex(db));
+ if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){
+ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_exec(db, "PRAGMA defer_foreign_keys = 1", 0, 0, 0);
+ }
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3changeset_next(pIter) ){
+ int nCol;
+ int op;
+ const char *zNew;
+
+ sqlite3changeset_op(pIter, &zNew, &nCol, &op, 0);
+
+ if( zTab==0 || sqlite3_strnicmp(zNew, zTab, nTab+1) ){
+ u8 *abPK;
+
+ rc = sessionRetryConstraints(
+ db, pIter->bPatchset, zTab, &sApply, xConflict, pCtx
+ );
+ if( rc!=SQLITE_OK ) break;
+
+ sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */
+ sqlite3_finalize(sApply.pDelete);
+ sqlite3_finalize(sApply.pUpdate);
+ sqlite3_finalize(sApply.pInsert);
+ sqlite3_finalize(sApply.pSelect);
+ sApply.db = db;
+ sApply.pDelete = 0;
+ sApply.pUpdate = 0;
+ sApply.pInsert = 0;
+ sApply.pSelect = 0;
+ sApply.nCol = 0;
+ sApply.azCol = 0;
+ sApply.abPK = 0;
+ sApply.bStat1 = 0;
+ sApply.bDeferConstraints = 1;
+ sApply.bRebaseStarted = 0;
+ memset(&sApply.constraints, 0, sizeof(SessionBuffer));
+
+ /* If an xFilter() callback was specified, invoke it now. If the
+ ** xFilter callback returns zero, skip this table. If it returns
+ ** non-zero, proceed. */
+ schemaMismatch = (xFilter && (0==xFilter(pCtx, zNew)));
+ if( schemaMismatch ){
+ zTab = sqlite3_mprintf("%s", zNew);
+ if( zTab==0 ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ nTab = (int)strlen(zTab);
+ sApply.azCol = (const char **)zTab;
+ }else{
+ int nMinCol = 0;
+ int i;
+
+ sqlite3changeset_pk(pIter, &abPK, 0);
+ rc = sessionTableInfo(
+ db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK
+ );
+ if( rc!=SQLITE_OK ) break;
+ for(i=0; i<sApply.nCol; i++){
+ if( sApply.abPK[i] ) nMinCol = i+1;
+ }
+
+ if( sApply.nCol==0 ){
+ schemaMismatch = 1;
+ sqlite3_log(SQLITE_SCHEMA,
+ "sqlite3changeset_apply(): no such table: %s", zTab
+ );
+ }
+ else if( sApply.nCol<nCol ){
+ schemaMismatch = 1;
+ sqlite3_log(SQLITE_SCHEMA,
+ "sqlite3changeset_apply(): table %s has %d columns, "
+ "expected %d or more",
+ zTab, sApply.nCol, nCol
+ );
+ }
+ else if( nCol<nMinCol || memcmp(sApply.abPK, abPK, nCol)!=0 ){
+ schemaMismatch = 1;
+ sqlite3_log(SQLITE_SCHEMA, "sqlite3changeset_apply(): "
+ "primary key mismatch for table %s", zTab
+ );
+ }
+ else{
+ sApply.nCol = nCol;
+ if( 0==sqlite3_stricmp(zTab, "sqlite_stat1") ){
+ if( (rc = sessionStat1Sql(db, &sApply) ) ){
+ break;
+ }
+ sApply.bStat1 = 1;
+ }else{
+ if((rc = sessionSelectRow(db, zTab, &sApply))
+ || (rc = sessionUpdateRow(db, zTab, &sApply))
+ || (rc = sessionDeleteRow(db, zTab, &sApply))
+ || (rc = sessionInsertRow(db, zTab, &sApply))
+ ){
+ break;
+ }
+ sApply.bStat1 = 0;
+ }
+ }
+ nTab = sqlite3Strlen30(zTab);
+ }
+ }
+
+ /* If there is a schema mismatch on the current table, proceed to the
+ ** next change. A log message has already been issued. */
+ if( schemaMismatch ) continue;
+
+ rc = sessionApplyOneWithRetry(db, pIter, &sApply, xConflict, pCtx);
+ }
+
+ bPatchset = pIter->bPatchset;
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changeset_finalize(pIter);
+ }else{
+ sqlite3changeset_finalize(pIter);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sessionRetryConstraints(db, bPatchset, zTab, &sApply, xConflict, pCtx);
+ }
+
+ if( rc==SQLITE_OK ){
+ int nFk, notUsed;
+ sqlite3_db_status(db, SQLITE_DBSTATUS_DEFERRED_FKS, &nFk, &notUsed, 0);
+ if( nFk!=0 ){
+ int res = SQLITE_CHANGESET_ABORT;
+ sqlite3_changeset_iter sIter;
+ memset(&sIter, 0, sizeof(sIter));
+ sIter.nCol = nFk;
+ res = xConflict(pCtx, SQLITE_CHANGESET_FOREIGN_KEY, &sIter);
+ if( res!=SQLITE_CHANGESET_OMIT ){
+ rc = SQLITE_CONSTRAINT;
+ }
+ }
+ }
+ sqlite3_exec(db, "PRAGMA defer_foreign_keys = 0", 0, 0, 0);
+
+ if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_exec(db, "RELEASE changeset_apply", 0, 0, 0);
+ }else{
+ sqlite3_exec(db, "ROLLBACK TO changeset_apply", 0, 0, 0);
+ sqlite3_exec(db, "RELEASE changeset_apply", 0, 0, 0);
+ }
+ }
+
+ assert( sApply.bRebase || sApply.rebase.nBuf==0 );
+ if( rc==SQLITE_OK && bPatchset==0 && sApply.bRebase ){
+ *ppRebase = (void*)sApply.rebase.aBuf;
+ *pnRebase = sApply.rebase.nBuf;
+ sApply.rebase.aBuf = 0;
+ }
+ sqlite3_finalize(sApply.pInsert);
+ sqlite3_finalize(sApply.pDelete);
+ sqlite3_finalize(sApply.pUpdate);
+ sqlite3_finalize(sApply.pSelect);
+ sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */
+ sqlite3_free((char*)sApply.constraints.aBuf);
+ sqlite3_free((char*)sApply.rebase.aBuf);
+ sqlite3_mutex_leave(sqlite3_db_mutex(db));
+ return rc;
+}
+
+/*
+** Apply the changeset passed via pChangeset/nChangeset to the main
+** database attached to handle "db".
+*/
+SQLITE_API int sqlite3changeset_apply_v2(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase,
+ int flags
+){
+ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */
+ int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT);
+ int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset,bInverse);
+ if( rc==SQLITE_OK ){
+ rc = sessionChangesetApply(
+ db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags
+ );
+ }
+ return rc;
+}
+
+/*
+** Apply the changeset passed via pChangeset/nChangeset to the main database
+** attached to handle "db". Invoke the supplied conflict handler callback
+** to resolve any conflicts encountered while applying the change.
+*/
+SQLITE_API int sqlite3changeset_apply(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of fifth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+){
+ return sqlite3changeset_apply_v2(
+ db, nChangeset, pChangeset, xFilter, xConflict, pCtx, 0, 0, 0
+ );
+}
+
+/*
+** Apply the changeset passed via xInput/pIn to the main database
+** attached to handle "db". Invoke the supplied conflict handler callback
+** to resolve any conflicts encountered while applying the change.
+*/
+SQLITE_API int sqlite3changeset_apply_v2_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase,
+ int flags
+){
+ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */
+ int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT);
+ int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse);
+ if( rc==SQLITE_OK ){
+ rc = sessionChangesetApply(
+ db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags
+ );
+ }
+ return rc;
+}
+SQLITE_API int sqlite3changeset_apply_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+){
+ return sqlite3changeset_apply_v2_strm(
+ db, xInput, pIn, xFilter, xConflict, pCtx, 0, 0, 0
+ );
+}
+
+/*
+** sqlite3_changegroup handle.
+*/
+struct sqlite3_changegroup {
+ int rc; /* Error code */
+ int bPatch; /* True to accumulate patchsets */
+ SessionTable *pList; /* List of tables in current patch */
+};
+
+/*
+** This function is called to merge two changes to the same row together as
+** part of an sqlite3changeset_concat() operation. A new change object is
+** allocated and a pointer to it stored in *ppNew.
+*/
+static int sessionChangeMerge(
+ SessionTable *pTab, /* Table structure */
+ int bRebase, /* True for a rebase hash-table */
+ int bPatchset, /* True for patchsets */
+ SessionChange *pExist, /* Existing change */
+ int op2, /* Second change operation */
+ int bIndirect, /* True if second change is indirect */
+ u8 *aRec, /* Second change record */
+ int nRec, /* Number of bytes in aRec */
+ SessionChange **ppNew /* OUT: Merged change */
+){
+ SessionChange *pNew = 0;
+ int rc = SQLITE_OK;
+
+ if( !pExist ){
+ pNew = (SessionChange *)sqlite3_malloc64(sizeof(SessionChange) + nRec);
+ if( !pNew ){
+ return SQLITE_NOMEM;
+ }
+ memset(pNew, 0, sizeof(SessionChange));
+ pNew->op = op2;
+ pNew->bIndirect = bIndirect;
+ pNew->aRecord = (u8*)&pNew[1];
+ if( bIndirect==0 || bRebase==0 ){
+ pNew->nRecord = nRec;
+ memcpy(pNew->aRecord, aRec, nRec);
+ }else{
+ int i;
+ u8 *pIn = aRec;
+ u8 *pOut = pNew->aRecord;
+ for(i=0; i<pTab->nCol; i++){
+ int nIn = sessionSerialLen(pIn);
+ if( *pIn==0 ){
+ *pOut++ = 0;
+ }else if( pTab->abPK[i]==0 ){
+ *pOut++ = 0xFF;
+ }else{
+ memcpy(pOut, pIn, nIn);
+ pOut += nIn;
+ }
+ pIn += nIn;
+ }
+ pNew->nRecord = pOut - pNew->aRecord;
+ }
+ }else if( bRebase ){
+ if( pExist->op==SQLITE_DELETE && pExist->bIndirect ){
+ *ppNew = pExist;
+ }else{
+ sqlite3_int64 nByte = nRec + pExist->nRecord + sizeof(SessionChange);
+ pNew = (SessionChange*)sqlite3_malloc64(nByte);
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int i;
+ u8 *a1 = pExist->aRecord;
+ u8 *a2 = aRec;
+ u8 *pOut;
+
+ memset(pNew, 0, nByte);
+ pNew->bIndirect = bIndirect || pExist->bIndirect;
+ pNew->op = op2;
+ pOut = pNew->aRecord = (u8*)&pNew[1];
+
+ for(i=0; i<pTab->nCol; i++){
+ int n1 = sessionSerialLen(a1);
+ int n2 = sessionSerialLen(a2);
+ if( *a1==0xFF || (pTab->abPK[i]==0 && bIndirect) ){
+ *pOut++ = 0xFF;
+ }else if( *a2==0 ){
+ memcpy(pOut, a1, n1);
+ pOut += n1;
+ }else{
+ memcpy(pOut, a2, n2);
+ pOut += n2;
+ }
+ a1 += n1;
+ a2 += n2;
+ }
+ pNew->nRecord = pOut - pNew->aRecord;
+ }
+ sqlite3_free(pExist);
+ }
+ }else{
+ int op1 = pExist->op;
+
+ /*
+ ** op1=INSERT, op2=INSERT -> Unsupported. Discard op2.
+ ** op1=INSERT, op2=UPDATE -> INSERT.
+ ** op1=INSERT, op2=DELETE -> (none)
+ **
+ ** op1=UPDATE, op2=INSERT -> Unsupported. Discard op2.
+ ** op1=UPDATE, op2=UPDATE -> UPDATE.
+ ** op1=UPDATE, op2=DELETE -> DELETE.
+ **
+ ** op1=DELETE, op2=INSERT -> UPDATE.
+ ** op1=DELETE, op2=UPDATE -> Unsupported. Discard op2.
+ ** op1=DELETE, op2=DELETE -> Unsupported. Discard op2.
+ */
+ if( (op1==SQLITE_INSERT && op2==SQLITE_INSERT)
+ || (op1==SQLITE_UPDATE && op2==SQLITE_INSERT)
+ || (op1==SQLITE_DELETE && op2==SQLITE_UPDATE)
+ || (op1==SQLITE_DELETE && op2==SQLITE_DELETE)
+ ){
+ pNew = pExist;
+ }else if( op1==SQLITE_INSERT && op2==SQLITE_DELETE ){
+ sqlite3_free(pExist);
+ assert( pNew==0 );
+ }else{
+ u8 *aExist = pExist->aRecord;
+ sqlite3_int64 nByte;
+ u8 *aCsr;
+
+ /* Allocate a new SessionChange object. Ensure that the aRecord[]
+ ** buffer of the new object is large enough to hold any record that
+ ** may be generated by combining the input records. */
+ nByte = sizeof(SessionChange) + pExist->nRecord + nRec;
+ pNew = (SessionChange *)sqlite3_malloc64(nByte);
+ if( !pNew ){
+ sqlite3_free(pExist);
+ return SQLITE_NOMEM;
+ }
+ memset(pNew, 0, sizeof(SessionChange));
+ pNew->bIndirect = (bIndirect && pExist->bIndirect);
+ aCsr = pNew->aRecord = (u8 *)&pNew[1];
+
+ if( op1==SQLITE_INSERT ){ /* INSERT + UPDATE */
+ u8 *a1 = aRec;
+ assert( op2==SQLITE_UPDATE );
+ pNew->op = SQLITE_INSERT;
+ if( bPatchset==0 ) sessionSkipRecord(&a1, pTab->nCol);
+ sessionMergeRecord(&aCsr, pTab->nCol, aExist, a1);
+ }else if( op1==SQLITE_DELETE ){ /* DELETE + INSERT */
+ assert( op2==SQLITE_INSERT );
+ pNew->op = SQLITE_UPDATE;
+ if( bPatchset ){
+ memcpy(aCsr, aRec, nRec);
+ aCsr += nRec;
+ }else{
+ if( 0==sessionMergeUpdate(&aCsr, pTab, bPatchset, aExist, 0,aRec,0) ){
+ sqlite3_free(pNew);
+ pNew = 0;
+ }
+ }
+ }else if( op2==SQLITE_UPDATE ){ /* UPDATE + UPDATE */
+ u8 *a1 = aExist;
+ u8 *a2 = aRec;
+ assert( op1==SQLITE_UPDATE );
+ if( bPatchset==0 ){
+ sessionSkipRecord(&a1, pTab->nCol);
+ sessionSkipRecord(&a2, pTab->nCol);
+ }
+ pNew->op = SQLITE_UPDATE;
+ if( 0==sessionMergeUpdate(&aCsr, pTab, bPatchset, aRec, aExist,a1,a2) ){
+ sqlite3_free(pNew);
+ pNew = 0;
+ }
+ }else{ /* UPDATE + DELETE */
+ assert( op1==SQLITE_UPDATE && op2==SQLITE_DELETE );
+ pNew->op = SQLITE_DELETE;
+ if( bPatchset ){
+ memcpy(aCsr, aRec, nRec);
+ aCsr += nRec;
+ }else{
+ sessionMergeRecord(&aCsr, pTab->nCol, aRec, aExist);
+ }
+ }
+
+ if( pNew ){
+ pNew->nRecord = (int)(aCsr - pNew->aRecord);
+ }
+ sqlite3_free(pExist);
+ }
+ }
+
+ *ppNew = pNew;
+ return rc;
+}
+
+/*
+** Add all changes in the changeset traversed by the iterator passed as
+** the first argument to the changegroup hash tables.
+*/
+static int sessionChangesetToHash(
+ sqlite3_changeset_iter *pIter, /* Iterator to read from */
+ sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */
+ int bRebase /* True if hash table is for rebasing */
+){
+ u8 *aRec;
+ int nRec;
+ int rc = SQLITE_OK;
+ SessionTable *pTab = 0;
+
+ while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){
+ const char *zNew;
+ int nCol;
+ int op;
+ int iHash;
+ int bIndirect;
+ SessionChange *pChange;
+ SessionChange *pExist = 0;
+ SessionChange **pp;
+
+ if( pGrp->pList==0 ){
+ pGrp->bPatch = pIter->bPatchset;
+ }else if( pIter->bPatchset!=pGrp->bPatch ){
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ sqlite3changeset_op(pIter, &zNew, &nCol, &op, &bIndirect);
+ if( !pTab || sqlite3_stricmp(zNew, pTab->zName) ){
+ /* Search the list for a matching table */
+ int nNew = (int)strlen(zNew);
+ u8 *abPK;
+
+ sqlite3changeset_pk(pIter, &abPK, 0);
+ for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){
+ if( 0==sqlite3_strnicmp(pTab->zName, zNew, nNew+1) ) break;
+ }
+ if( !pTab ){
+ SessionTable **ppTab;
+
+ pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nNew+1);
+ if( !pTab ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ memset(pTab, 0, sizeof(SessionTable));
+ pTab->nCol = nCol;
+ pTab->abPK = (u8*)&pTab[1];
+ memcpy(pTab->abPK, abPK, nCol);
+ pTab->zName = (char*)&pTab->abPK[nCol];
+ memcpy(pTab->zName, zNew, nNew+1);
+
+ /* The new object must be linked on to the end of the list, not
+ ** simply added to the start of it. This is to ensure that the
+ ** tables within the output of sqlite3changegroup_output() are in
+ ** the right order. */
+ for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext);
+ *ppTab = pTab;
+ }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){
+ rc = SQLITE_SCHEMA;
+ break;
+ }
+ }
+
+ if( sessionGrowHash(pIter->bPatchset, pTab) ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ iHash = sessionChangeHash(
+ pTab, (pIter->bPatchset && op==SQLITE_DELETE), aRec, pTab->nChange
+ );
+
+ /* Search for existing entry. If found, remove it from the hash table.
+ ** Code below may link it back in.
+ */
+ for(pp=&pTab->apChange[iHash]; *pp; pp=&(*pp)->pNext){
+ int bPkOnly1 = 0;
+ int bPkOnly2 = 0;
+ if( pIter->bPatchset ){
+ bPkOnly1 = (*pp)->op==SQLITE_DELETE;
+ bPkOnly2 = op==SQLITE_DELETE;
+ }
+ if( sessionChangeEqual(pTab, bPkOnly1, (*pp)->aRecord, bPkOnly2, aRec) ){
+ pExist = *pp;
+ *pp = (*pp)->pNext;
+ pTab->nEntry--;
+ break;
+ }
+ }
+
+ rc = sessionChangeMerge(pTab, bRebase,
+ pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange
+ );
+ if( rc ) break;
+ if( pChange ){
+ pChange->pNext = pTab->apChange[iHash];
+ pTab->apChange[iHash] = pChange;
+ pTab->nEntry++;
+ }
+ }
+
+ if( rc==SQLITE_OK ) rc = pIter->rc;
+ return rc;
+}
+
+/*
+** Serialize a changeset (or patchset) based on all changesets (or patchsets)
+** added to the changegroup object passed as the first argument.
+**
+** If xOutput is not NULL, then the changeset/patchset is returned to the
+** user via one or more calls to xOutput, as with the other streaming
+** interfaces.
+**
+** Or, if xOutput is NULL, then (*ppOut) is populated with a pointer to a
+** buffer containing the output changeset before this function returns. In
+** this case (*pnOut) is set to the size of the output buffer in bytes. It
+** is the responsibility of the caller to free the output buffer using
+** sqlite3_free() when it is no longer required.
+**
+** If successful, SQLITE_OK is returned. Or, if an error occurs, an SQLite
+** error code. If an error occurs and xOutput is NULL, (*ppOut) and (*pnOut)
+** are both set to 0 before returning.
+*/
+static int sessionChangegroupOutput(
+ sqlite3_changegroup *pGrp,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut,
+ int *pnOut,
+ void **ppOut
+){
+ int rc = SQLITE_OK;
+ SessionBuffer buf = {0, 0, 0};
+ SessionTable *pTab;
+ assert( xOutput==0 || (ppOut==0 && pnOut==0) );
+
+ /* Create the serialized output changeset based on the contents of the
+ ** hash tables attached to the SessionTable objects in list p->pList.
+ */
+ for(pTab=pGrp->pList; rc==SQLITE_OK && pTab; pTab=pTab->pNext){
+ int i;
+ if( pTab->nEntry==0 ) continue;
+
+ sessionAppendTableHdr(&buf, pGrp->bPatch, pTab, &rc);
+ for(i=0; i<pTab->nChange; i++){
+ SessionChange *p;
+ for(p=pTab->apChange[i]; p; p=p->pNext){
+ sessionAppendByte(&buf, p->op, &rc);
+ sessionAppendByte(&buf, p->bIndirect, &rc);
+ sessionAppendBlob(&buf, p->aRecord, p->nRecord, &rc);
+ if( rc==SQLITE_OK && xOutput && buf.nBuf>=sessions_strm_chunk_size ){
+ rc = xOutput(pOut, buf.aBuf, buf.nBuf);
+ buf.nBuf = 0;
+ }
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ if( xOutput ){
+ if( buf.nBuf>0 ) rc = xOutput(pOut, buf.aBuf, buf.nBuf);
+ }else{
+ *ppOut = buf.aBuf;
+ *pnOut = buf.nBuf;
+ buf.aBuf = 0;
+ }
+ }
+ sqlite3_free(buf.aBuf);
+
+ return rc;
+}
+
+/*
+** Allocate a new, empty, sqlite3_changegroup.
+*/
+SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp){
+ int rc = SQLITE_OK; /* Return code */
+ sqlite3_changegroup *p; /* New object */
+ p = (sqlite3_changegroup*)sqlite3_malloc(sizeof(sqlite3_changegroup));
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(p, 0, sizeof(sqlite3_changegroup));
+ }
+ *pp = p;
+ return rc;
+}
+
+/*
+** Add the changeset currently stored in buffer pData, size nData bytes,
+** to changeset-group p.
+*/
+SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void *pData){
+ sqlite3_changeset_iter *pIter; /* Iterator opened on pData/nData */
+ int rc; /* Return code */
+
+ rc = sqlite3changeset_start(&pIter, nData, pData);
+ if( rc==SQLITE_OK ){
+ rc = sessionChangesetToHash(pIter, pGrp, 0);
+ }
+ sqlite3changeset_finalize(pIter);
+ return rc;
+}
+
+/*
+** Obtain a buffer containing a changeset representing the concatenation
+** of all changesets added to the group so far.
+*/
+SQLITE_API int sqlite3changegroup_output(
+ sqlite3_changegroup *pGrp,
+ int *pnData,
+ void **ppData
+){
+ return sessionChangegroupOutput(pGrp, 0, 0, pnData, ppData);
+}
+
+/*
+** Streaming versions of changegroup_add().
+*/
+SQLITE_API int sqlite3changegroup_add_strm(
+ sqlite3_changegroup *pGrp,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+){
+ sqlite3_changeset_iter *pIter; /* Iterator opened on pData/nData */
+ int rc; /* Return code */
+
+ rc = sqlite3changeset_start_strm(&pIter, xInput, pIn);
+ if( rc==SQLITE_OK ){
+ rc = sessionChangesetToHash(pIter, pGrp, 0);
+ }
+ sqlite3changeset_finalize(pIter);
+ return rc;
+}
+
+/*
+** Streaming versions of changegroup_output().
+*/
+SQLITE_API int sqlite3changegroup_output_strm(
+ sqlite3_changegroup *pGrp,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ return sessionChangegroupOutput(pGrp, xOutput, pOut, 0, 0);
+}
+
+/*
+** Delete a changegroup object.
+*/
+SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){
+ if( pGrp ){
+ sessionDeleteTable(pGrp->pList);
+ sqlite3_free(pGrp);
+ }
+}
+
+/*
+** Combine two changesets together.
+*/
+SQLITE_API int sqlite3changeset_concat(
+ int nLeft, /* Number of bytes in lhs input */
+ void *pLeft, /* Lhs input changeset */
+ int nRight /* Number of bytes in rhs input */,
+ void *pRight, /* Rhs input changeset */
+ int *pnOut, /* OUT: Number of bytes in output changeset */
+ void **ppOut /* OUT: changeset (left <concat> right) */
+){
+ sqlite3_changegroup *pGrp;
+ int rc;
+
+ rc = sqlite3changegroup_new(&pGrp);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_add(pGrp, nLeft, pLeft);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_add(pGrp, nRight, pRight);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_output(pGrp, pnOut, ppOut);
+ }
+ sqlite3changegroup_delete(pGrp);
+
+ return rc;
+}
+
+/*
+** Streaming version of sqlite3changeset_concat().
+*/
+SQLITE_API int sqlite3changeset_concat_strm(
+ int (*xInputA)(void *pIn, void *pData, int *pnData),
+ void *pInA,
+ int (*xInputB)(void *pIn, void *pData, int *pnData),
+ void *pInB,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ sqlite3_changegroup *pGrp;
+ int rc;
+
+ rc = sqlite3changegroup_new(&pGrp);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_add_strm(pGrp, xInputA, pInA);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_add_strm(pGrp, xInputB, pInB);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3changegroup_output_strm(pGrp, xOutput, pOut);
+ }
+ sqlite3changegroup_delete(pGrp);
+
+ return rc;
+}
+
+/*
+** Changeset rebaser handle.
+*/
+struct sqlite3_rebaser {
+ sqlite3_changegroup grp; /* Hash table */
+};
+
+/*
+** Buffers a1 and a2 must both contain a sessions module record nCol
+** fields in size. This function appends an nCol sessions module
+** record to buffer pBuf that is a copy of a1, except that for
+** each field that is undefined in a1[], swap in the field from a2[].
+*/
+static void sessionAppendRecordMerge(
+ SessionBuffer *pBuf, /* Buffer to append to */
+ int nCol, /* Number of columns in each record */
+ u8 *a1, int n1, /* Record 1 */
+ u8 *a2, int n2, /* Record 2 */
+ int *pRc /* IN/OUT: error code */
+){
+ sessionBufferGrow(pBuf, n1+n2, pRc);
+ if( *pRc==SQLITE_OK ){
+ int i;
+ u8 *pOut = &pBuf->aBuf[pBuf->nBuf];
+ for(i=0; i<nCol; i++){
+ int nn1 = sessionSerialLen(a1);
+ int nn2 = sessionSerialLen(a2);
+ if( *a1==0 || *a1==0xFF ){
+ memcpy(pOut, a2, nn2);
+ pOut += nn2;
+ }else{
+ memcpy(pOut, a1, nn1);
+ pOut += nn1;
+ }
+ a1 += nn1;
+ a2 += nn2;
+ }
+
+ pBuf->nBuf = pOut-pBuf->aBuf;
+ assert( pBuf->nBuf<=pBuf->nAlloc );
+ }
+}
+
+/*
+** This function is called when rebasing a local UPDATE change against one
+** or more remote UPDATE changes. The aRec/nRec buffer contains the current
+** old.* and new.* records for the change. The rebase buffer (a single
+** record) is in aChange/nChange. The rebased change is appended to buffer
+** pBuf.
+**
+** Rebasing the UPDATE involves:
+**
+** * Removing any changes to fields for which the corresponding field
+** in the rebase buffer is set to "replaced" (type 0xFF). If this
+** means the UPDATE change updates no fields, nothing is appended
+** to the output buffer.
+**
+** * For each field modified by the local change for which the
+** corresponding field in the rebase buffer is not "undefined" (0x00)
+** or "replaced" (0xFF), the old.* value is replaced by the value
+** in the rebase buffer.
+*/
+static void sessionAppendPartialUpdate(
+ SessionBuffer *pBuf, /* Append record here */
+ sqlite3_changeset_iter *pIter, /* Iterator pointed at local change */
+ u8 *aRec, int nRec, /* Local change */
+ u8 *aChange, int nChange, /* Record to rebase against */
+ int *pRc /* IN/OUT: Return Code */
+){
+ sessionBufferGrow(pBuf, 2+nRec+nChange, pRc);
+ if( *pRc==SQLITE_OK ){
+ int bData = 0;
+ u8 *pOut = &pBuf->aBuf[pBuf->nBuf];
+ int i;
+ u8 *a1 = aRec;
+ u8 *a2 = aChange;
+
+ *pOut++ = SQLITE_UPDATE;
+ *pOut++ = pIter->bIndirect;
+ for(i=0; i<pIter->nCol; i++){
+ int n1 = sessionSerialLen(a1);
+ int n2 = sessionSerialLen(a2);
+ if( pIter->abPK[i] || a2[0]==0 ){
+ if( !pIter->abPK[i] ) bData = 1;
+ memcpy(pOut, a1, n1);
+ pOut += n1;
+ }else if( a2[0]!=0xFF ){
+ bData = 1;
+ memcpy(pOut, a2, n2);
+ pOut += n2;
+ }else{
+ *pOut++ = '\0';
+ }
+ a1 += n1;
+ a2 += n2;
+ }
+ if( bData ){
+ a2 = aChange;
+ for(i=0; i<pIter->nCol; i++){
+ int n1 = sessionSerialLen(a1);
+ int n2 = sessionSerialLen(a2);
+ if( pIter->abPK[i] || a2[0]!=0xFF ){
+ memcpy(pOut, a1, n1);
+ pOut += n1;
+ }else{
+ *pOut++ = '\0';
+ }
+ a1 += n1;
+ a2 += n2;
+ }
+ pBuf->nBuf = (pOut - pBuf->aBuf);
+ }
+ }
+}
+
+/*
+** pIter is configured to iterate through a changeset. This function rebases
+** that changeset according to the current configuration of the rebaser
+** object passed as the first argument. If no error occurs and argument xOutput
+** is not NULL, then the changeset is returned to the caller by invoking
+** xOutput zero or more times and SQLITE_OK returned. Or, if xOutput is NULL,
+** then (*ppOut) is set to point to a buffer containing the rebased changeset
+** before this function returns. In this case (*pnOut) is set to the size of
+** the buffer in bytes. It is the responsibility of the caller to eventually
+** free the (*ppOut) buffer using sqlite3_free().
+**
+** If an error occurs, an SQLite error code is returned. If ppOut and
+** pnOut are not NULL, then the two output parameters are set to 0 before
+** returning.
+*/
+static int sessionRebase(
+ sqlite3_rebaser *p, /* Rebaser hash table */
+ sqlite3_changeset_iter *pIter, /* Input data */
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut, /* Context for xOutput callback */
+ int *pnOut, /* OUT: Number of bytes in output changeset */
+ void **ppOut /* OUT: Inverse of pChangeset */
+){
+ int rc = SQLITE_OK;
+ u8 *aRec = 0;
+ int nRec = 0;
+ int bNew = 0;
+ SessionTable *pTab = 0;
+ SessionBuffer sOut = {0,0,0};
+
+ while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, &bNew) ){
+ SessionChange *pChange = 0;
+ int bDone = 0;
+
+ if( bNew ){
+ const char *zTab = pIter->zTab;
+ for(pTab=p->grp.pList; pTab; pTab=pTab->pNext){
+ if( 0==sqlite3_stricmp(pTab->zName, zTab) ) break;
+ }
+ bNew = 0;
+
+ /* A patchset may not be rebased */
+ if( pIter->bPatchset ){
+ rc = SQLITE_ERROR;
+ }
+
+ /* Append a table header to the output for this new table */
+ sessionAppendByte(&sOut, pIter->bPatchset ? 'P' : 'T', &rc);
+ sessionAppendVarint(&sOut, pIter->nCol, &rc);
+ sessionAppendBlob(&sOut, pIter->abPK, pIter->nCol, &rc);
+ sessionAppendBlob(&sOut,(u8*)pIter->zTab,(int)strlen(pIter->zTab)+1,&rc);
+ }
+
+ if( pTab && rc==SQLITE_OK ){
+ int iHash = sessionChangeHash(pTab, 0, aRec, pTab->nChange);
+
+ for(pChange=pTab->apChange[iHash]; pChange; pChange=pChange->pNext){
+ if( sessionChangeEqual(pTab, 0, aRec, 0, pChange->aRecord) ){
+ break;
+ }
+ }
+ }
+
+ if( pChange ){
+ assert( pChange->op==SQLITE_DELETE || pChange->op==SQLITE_INSERT );
+ switch( pIter->op ){
+ case SQLITE_INSERT:
+ if( pChange->op==SQLITE_INSERT ){
+ bDone = 1;
+ if( pChange->bIndirect==0 ){
+ sessionAppendByte(&sOut, SQLITE_UPDATE, &rc);
+ sessionAppendByte(&sOut, pIter->bIndirect, &rc);
+ sessionAppendBlob(&sOut, pChange->aRecord, pChange->nRecord, &rc);
+ sessionAppendBlob(&sOut, aRec, nRec, &rc);
+ }
+ }
+ break;
+
+ case SQLITE_UPDATE:
+ bDone = 1;
+ if( pChange->op==SQLITE_DELETE ){
+ if( pChange->bIndirect==0 ){
+ u8 *pCsr = aRec;
+ sessionSkipRecord(&pCsr, pIter->nCol);
+ sessionAppendByte(&sOut, SQLITE_INSERT, &rc);
+ sessionAppendByte(&sOut, pIter->bIndirect, &rc);
+ sessionAppendRecordMerge(&sOut, pIter->nCol,
+ pCsr, nRec-(pCsr-aRec),
+ pChange->aRecord, pChange->nRecord, &rc
+ );
+ }
+ }else{
+ sessionAppendPartialUpdate(&sOut, pIter,
+ aRec, nRec, pChange->aRecord, pChange->nRecord, &rc
+ );
+ }
+ break;
+
+ default:
+ assert( pIter->op==SQLITE_DELETE );
+ bDone = 1;
+ if( pChange->op==SQLITE_INSERT ){
+ sessionAppendByte(&sOut, SQLITE_DELETE, &rc);
+ sessionAppendByte(&sOut, pIter->bIndirect, &rc);
+ sessionAppendRecordMerge(&sOut, pIter->nCol,
+ pChange->aRecord, pChange->nRecord, aRec, nRec, &rc
+ );
+ }
+ break;
+ }
+ }
+
+ if( bDone==0 ){
+ sessionAppendByte(&sOut, pIter->op, &rc);
+ sessionAppendByte(&sOut, pIter->bIndirect, &rc);
+ sessionAppendBlob(&sOut, aRec, nRec, &rc);
+ }
+ if( rc==SQLITE_OK && xOutput && sOut.nBuf>sessions_strm_chunk_size ){
+ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf);
+ sOut.nBuf = 0;
+ }
+ if( rc ) break;
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(sOut.aBuf);
+ memset(&sOut, 0, sizeof(sOut));
+ }
+
+ if( rc==SQLITE_OK ){
+ if( xOutput ){
+ if( sOut.nBuf>0 ){
+ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf);
+ }
+ }else{
+ *ppOut = (void*)sOut.aBuf;
+ *pnOut = sOut.nBuf;
+ sOut.aBuf = 0;
+ }
+ }
+ sqlite3_free(sOut.aBuf);
+ return rc;
+}
+
+/*
+** Create a new rebaser object.
+*/
+SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew){
+ int rc = SQLITE_OK;
+ sqlite3_rebaser *pNew;
+
+ pNew = sqlite3_malloc(sizeof(sqlite3_rebaser));
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pNew, 0, sizeof(sqlite3_rebaser));
+ }
+ *ppNew = pNew;
+ return rc;
+}
+
+/*
+** Call this one or more times to configure a rebaser.
+*/
+SQLITE_API int sqlite3rebaser_configure(
+ sqlite3_rebaser *p,
+ int nRebase, const void *pRebase
+){
+ sqlite3_changeset_iter *pIter = 0; /* Iterator opened on pData/nData */
+ int rc; /* Return code */
+ rc = sqlite3changeset_start(&pIter, nRebase, (void*)pRebase);
+ if( rc==SQLITE_OK ){
+ rc = sessionChangesetToHash(pIter, &p->grp, 1);
+ }
+ sqlite3changeset_finalize(pIter);
+ return rc;
+}
+
+/*
+** Rebase a changeset according to current rebaser configuration
+*/
+SQLITE_API int sqlite3rebaser_rebase(
+ sqlite3_rebaser *p,
+ int nIn, const void *pIn,
+ int *pnOut, void **ppOut
+){
+ sqlite3_changeset_iter *pIter = 0; /* Iterator to skip through input */
+ int rc = sqlite3changeset_start(&pIter, nIn, (void*)pIn);
+
+ if( rc==SQLITE_OK ){
+ rc = sessionRebase(p, pIter, 0, 0, pnOut, ppOut);
+ sqlite3changeset_finalize(pIter);
+ }
+
+ return rc;
+}
+
+/*
+** Rebase a changeset according to current rebaser configuration
+*/
+SQLITE_API int sqlite3rebaser_rebase_strm(
+ sqlite3_rebaser *p,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+){
+ sqlite3_changeset_iter *pIter = 0; /* Iterator to skip through input */
+ int rc = sqlite3changeset_start_strm(&pIter, xInput, pIn);
+
+ if( rc==SQLITE_OK ){
+ rc = sessionRebase(p, pIter, xOutput, pOut, 0, 0);
+ sqlite3changeset_finalize(pIter);
+ }
+
+ return rc;
+}
+
+/*
+** Destroy a rebaser object
+*/
+SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){
+ if( p ){
+ sessionDeleteTable(p->grp.pList);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Global configuration
+*/
+SQLITE_API int sqlite3session_config(int op, void *pArg){
+ int rc = SQLITE_OK;
+ switch( op ){
+ case SQLITE_SESSION_CONFIG_STRMSIZE: {
+ int *pInt = (int*)pArg;
+ if( *pInt>0 ){
+ sessions_strm_chunk_size = *pInt;
+ }
+ *pInt = sessions_strm_chunk_size;
+ break;
+ }
+ default:
+ rc = SQLITE_MISUSE;
+ break;
+ }
+ return rc;
+}
+
+#endif /* SQLITE_ENABLE_SESSION && SQLITE_ENABLE_PREUPDATE_HOOK */
+
+/************** End of sqlite3session.c **************************************/
+/************** Begin file fts5.c ********************************************/
+
+
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5)
+
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+#if defined(NDEBUG) && defined(SQLITE_DEBUG)
+# undef NDEBUG
+#endif
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Interfaces to extend FTS5. Using the interfaces defined in this file,
+** FTS5 may be extended with:
+**
+** * custom tokenizers, and
+** * custom auxiliary functions.
+*/
+
+
+#ifndef _FTS5_H
+#define _FTS5_H
+
+/* #include "sqlite3.h" */
+
+#if 0
+extern "C" {
+#endif
+
+/*************************************************************************
+** CUSTOM AUXILIARY FUNCTIONS
+**
+** Virtual table implementations may overload SQL functions by implementing
+** the sqlite3_module.xFindFunction() method.
+*/
+
+typedef struct Fts5ExtensionApi Fts5ExtensionApi;
+typedef struct Fts5Context Fts5Context;
+typedef struct Fts5PhraseIter Fts5PhraseIter;
+
+typedef void (*fts5_extension_function)(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+);
+
+struct Fts5PhraseIter {
+ const unsigned char *a;
+ const unsigned char *b;
+};
+
+/*
+** EXTENSION API FUNCTIONS
+**
+** xUserData(pFts):
+** Return a copy of the context pointer the extension function was
+** registered with.
+**
+** xColumnTotalSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the FTS5 table. Or, if iCol is
+** non-negative but less than the number of columns in the table, return
+** the total number of tokens in column iCol, considering all rows in
+** the FTS5 table.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** xColumnCount(pFts):
+** Return the number of columns in the table.
+**
+** xColumnSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the current row. Or, if iCol is
+** non-negative but less than the number of columns in the table, set
+** *pnToken to the number of tokens in column iCol of the current row.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** This function may be quite inefficient if used with an FTS5 table
+** created with the "columnsize=0" option.
+**
+** xColumnText:
+** This function attempts to retrieve the text of column iCol of the
+** current document. If successful, (*pz) is set to point to a buffer
+** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
+** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
+** if an error occurs, an SQLite error code is returned and the final values
+** of (*pz) and (*pn) are undefined.
+**
+** xPhraseCount:
+** Returns the number of phrases in the current query expression.
+**
+** xPhraseSize:
+** Returns the number of tokens in phrase iPhrase of the query. Phrases
+** are numbered starting from zero.
+**
+** xInstCount:
+** Set *pnInst to the total number of occurrences of all phrases within
+** the query within the current row. Return SQLITE_OK if successful, or
+** an error code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always returns 0.
+**
+** xInst:
+** Query for the details of phrase match iIdx within the current row.
+** Phrase matches are numbered starting from zero, so the iIdx argument
+** should be greater than or equal to zero and smaller than the value
+** output by xInstCount().
+**
+** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** to the column in which it occurs and *piOff the token offset of the
+** first token of the phrase. Returns SQLITE_OK if successful, or an error
+** code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
+**
+** xRowid:
+** Returns the rowid of the current row.
+**
+** xTokenize:
+** Tokenize text using the tokenizer belonging to the FTS5 table.
+**
+** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
+** This API function is used to query the FTS table for phrase iPhrase
+** of the current query. Specifically, a query equivalent to:
+**
+** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
+**
+** with $p set to a phrase equivalent to the phrase iPhrase of the
+** current query is executed. Any column filter that applies to
+** phrase iPhrase of the current query is included in $p. For each
+** row visited, the callback function passed as the fourth argument
+** is invoked. The context and API objects passed to the callback
+** function may be used to access the properties of each matched row.
+** Invoking Api.xUserData() returns a copy of the pointer passed as
+** the third argument to pUserData.
+**
+** If the callback function returns any value other than SQLITE_OK, the
+** query is abandoned and the xQueryPhrase function returns immediately.
+** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
+** Otherwise, the error code is propagated upwards.
+**
+** If the query runs to completion without incident, SQLITE_OK is returned.
+** Or, if some error occurs before the query completes or is aborted by
+** the callback, an SQLite error code is returned.
+**
+**
+** xSetAuxdata(pFts5, pAux, xDelete)
+**
+** Save the pointer passed as the second argument as the extension function's
+** "auxiliary data". The pointer may then be retrieved by the current or any
+** future invocation of the same fts5 extension function made as part of
+** the same MATCH query using the xGetAuxdata() API.
+**
+** Each extension function is allocated a single auxiliary data slot for
+** each FTS query (MATCH expression). If the extension function is invoked
+** more than once for a single FTS query, then all invocations share a
+** single auxiliary data context.
+**
+** If there is already an auxiliary data pointer when this function is
+** invoked, then it is replaced by the new pointer. If an xDelete callback
+** was specified along with the original pointer, it is invoked at this
+** point.
+**
+** The xDelete callback, if one is specified, is also invoked on the
+** auxiliary data pointer after the FTS5 query has finished.
+**
+** If an error (e.g. an OOM condition) occurs within this function,
+** the auxiliary data is set to NULL and an error code returned. If the
+** xDelete parameter was not NULL, it is invoked on the auxiliary data
+** pointer before returning.
+**
+**
+** xGetAuxdata(pFts5, bClear)
+**
+** Returns the current auxiliary data pointer for the fts5 extension
+** function. See the xSetAuxdata() method for details.
+**
+** If the bClear argument is non-zero, then the auxiliary data is cleared
+** (set to NULL) before this function returns. In this case the xDelete,
+** if any, is not invoked.
+**
+**
+** xRowCount(pFts5, pnRow)
+**
+** This function is used to retrieve the total number of rows in the table.
+** In other words, the same value that would be returned by:
+**
+** SELECT count(*) FROM ftstable;
+**
+** xPhraseFirst()
+** This function is used, along with type Fts5PhraseIter and the xPhraseNext
+** method, to iterate through all instances of a single query phrase within
+** the current row. This is the same information as is accessible via the
+** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
+** to use, this API may be faster under some circumstances. To iterate
+** through instances of phrase iPhrase, use the following code:
+**
+** Fts5PhraseIter iter;
+** int iCol, iOff;
+** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
+** iCol>=0;
+** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
+** ){
+** // An instance of phrase iPhrase at offset iOff of column iCol
+** }
+**
+** The Fts5PhraseIter structure is defined above. Applications should not
+** modify this structure directly - it should only be used as shown above
+** with the xPhraseFirst() and xPhraseNext() API methods (and by
+** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always iterates
+** through an empty set (all calls to xPhraseFirst() set iCol to -1).
+**
+** xPhraseNext()
+** See xPhraseFirst above.
+**
+** xPhraseFirstColumn()
+** This function and xPhraseNextColumn() are similar to the xPhraseFirst()
+** and xPhraseNext() APIs described above. The difference is that instead
+** of iterating through all instances of a phrase in the current row, these
+** APIs are used to iterate through the set of columns in the current row
+** that contain one or more instances of a specified phrase. For example:
+**
+** Fts5PhraseIter iter;
+** int iCol;
+** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
+** iCol>=0;
+** pApi->xPhraseNextColumn(pFts, &iter, &iCol)
+** ){
+** // Column iCol contains at least one instance of phrase iPhrase
+** }
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" option. If the FTS5 table is created with either
+** "detail=none" "content=" option (i.e. if it is a contentless table),
+** then this API always iterates through an empty set (all calls to
+** xPhraseFirstColumn() set iCol to -1).
+**
+** The information accessed using this API and its companion
+** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
+** (or xInst/xInstCount). The chief advantage of this API is that it is
+** significantly more efficient than those alternatives when used with
+** "detail=column" tables.
+**
+** xPhraseNextColumn()
+** See xPhraseFirstColumn above.
+*/
+struct Fts5ExtensionApi {
+ int iVersion; /* Currently always set to 3 */
+
+ void *(*xUserData)(Fts5Context*);
+
+ int (*xColumnCount)(Fts5Context*);
+ int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
+ int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
+
+ int (*xTokenize)(Fts5Context*,
+ const char *pText, int nText, /* Text to tokenize */
+ void *pCtx, /* Context passed to xToken() */
+ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
+ );
+
+ int (*xPhraseCount)(Fts5Context*);
+ int (*xPhraseSize)(Fts5Context*, int iPhrase);
+
+ int (*xInstCount)(Fts5Context*, int *pnInst);
+ int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
+
+ sqlite3_int64 (*xRowid)(Fts5Context*);
+ int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
+ int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
+
+ int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
+ int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
+ );
+ int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
+ void *(*xGetAuxdata)(Fts5Context*, int bClear);
+
+ int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
+ void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
+
+ int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
+ void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+};
+
+/*
+** CUSTOM AUXILIARY FUNCTIONS
+*************************************************************************/
+
+/*************************************************************************
+** CUSTOM TOKENIZERS
+**
+** Applications may also register custom tokenizer types. A tokenizer
+** is registered by providing fts5 with a populated instance of the
+** following structure. All structure methods must be defined, setting
+** any member of the fts5_tokenizer struct to NULL leads to undefined
+** behaviour. The structure methods are expected to function as follows:
+**
+** xCreate:
+** This function is used to allocate and initialize a tokenizer instance.
+** A tokenizer instance is required to actually tokenize text.
+**
+** The first argument passed to this function is a copy of the (void*)
+** pointer provided by the application when the fts5_tokenizer object
+** was registered with FTS5 (the third argument to xCreateTokenizer()).
+** The second and third arguments are an array of nul-terminated strings
+** containing the tokenizer arguments, if any, specified following the
+** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
+** to create the FTS5 table.
+**
+** The final argument is an output variable. If successful, (*ppOut)
+** should be set to point to the new tokenizer handle and SQLITE_OK
+** returned. If an error occurs, some value other than SQLITE_OK should
+** be returned. In this case, fts5 assumes that the final value of *ppOut
+** is undefined.
+**
+** xDelete:
+** This function is invoked to delete a tokenizer handle previously
+** allocated using xCreate(). Fts5 guarantees that this function will
+** be invoked exactly once for each successful call to xCreate().
+**
+** xTokenize:
+** This function is expected to tokenize the nText byte string indicated
+** by argument pText. pText may or may not be nul-terminated. The first
+** argument passed to this function is a pointer to an Fts5Tokenizer object
+** returned by an earlier call to xCreate().
+**
+** The second argument indicates the reason that FTS5 is requesting
+** tokenization of the supplied text. This is always one of the following
+** four values:
+**
+** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
+** or removed from the FTS table. The tokenizer is being invoked to
+** determine the set of tokens to add to (or delete from) the
+** FTS index.
+**
+** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
+** against the FTS index. The tokenizer is being called to tokenize
+** a bareword or quoted string specified as part of the query.
+**
+** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
+** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
+** followed by a "*" character, indicating that the last token
+** returned by the tokenizer will be treated as a token prefix.
+**
+** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
+** satisfy an fts5_api.xTokenize() request made by an auxiliary
+** function. Or an fts5_api.xColumnSize() request made by the same
+** on a columnsize=0 database.
+** </ul>
+**
+** For each token in the input string, the supplied callback xToken() must
+** be invoked. The first argument to it should be a copy of the pointer
+** passed as the second argument to xTokenize(). The third and fourth
+** arguments are a pointer to a buffer containing the token text, and the
+** size of the token in bytes. The 4th and 5th arguments are the byte offsets
+** of the first byte of and first byte immediately following the text from
+** which the token is derived within the input.
+**
+** The second argument passed to the xToken() callback ("tflags") should
+** normally be set to 0. The exception is if the tokenizer supports
+** synonyms. In this case see the discussion below for details.
+**
+** FTS5 assumes the xToken() callback is invoked for each token in the
+** order that they occur within the input text.
+**
+** If an xToken() callback returns any value other than SQLITE_OK, then
+** the tokenization should be abandoned and the xTokenize() method should
+** immediately return a copy of the xToken() return value. Or, if the
+** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
+** if an error occurs with the xTokenize() implementation itself, it
+** may abandon the tokenization and return any error code other than
+** SQLITE_OK or SQLITE_DONE.
+**
+** SYNONYM SUPPORT
+**
+** Custom tokenizers may also support synonyms. Consider a case in which a
+** user wishes to query for a phrase such as "first place". Using the
+** built-in tokenizers, the FTS5 query 'first + place' will match instances
+** of "first place" within the document set, but not alternative forms
+** such as "1st place". In some applications, it would be better to match
+** all instances of "first place" or "1st place" regardless of which form
+** the user specified in the MATCH query text.
+**
+** There are several ways to approach this in FTS5:
+**
+** <ol><li> By mapping all synonyms to a single token. In this case, using
+** the above example, this means that the tokenizer returns the
+** same token for inputs "first" and "1st". Say that token is in
+** fact "first", so that when the user inserts the document "I won
+** 1st place" entries are added to the index for tokens "i", "won",
+** "first" and "place". If the user then queries for '1st + place',
+** the tokenizer substitutes "first" for "1st" and the query works
+** as expected.
+**
+** <li> By querying the index for all synonyms of each query term
+** separately. In this case, when tokenizing query text, the
+** tokenizer may provide multiple synonyms for a single term
+** within the document. FTS5 then queries the index for each
+** synonym individually. For example, faced with the query:
+**
+** <codeblock>
+** ... MATCH 'first place'</codeblock>
+**
+** the tokenizer offers both "1st" and "first" as synonyms for the
+** first token in the MATCH query and FTS5 effectively runs a query
+** similar to:
+**
+** <codeblock>
+** ... MATCH '(first OR 1st) place'</codeblock>
+**
+** except that, for the purposes of auxiliary functions, the query
+** still appears to contain just two phrases - "(first OR 1st)"
+** being treated as a single phrase.
+**
+** <li> By adding multiple synonyms for a single term to the FTS index.
+** Using this method, when tokenizing document text, the tokenizer
+** provides multiple synonyms for each token. So that when a
+** document such as "I won first place" is tokenized, entries are
+** added to the FTS index for "i", "won", "first", "1st" and
+** "place".
+**
+** This way, even if the tokenizer does not provide synonyms
+** when tokenizing query text (it should not - to do so would be
+** inefficient), it doesn't matter if the user queries for
+** 'first + place' or '1st + place', as there are entries in the
+** FTS index corresponding to both forms of the first token.
+** </ol>
+**
+** Whether it is parsing document or query text, any call to xToken that
+** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
+** is considered to supply a synonym for the previous token. For example,
+** when parsing the document "I won first place", a tokenizer that supports
+** synonyms would call xToken() 5 times, as follows:
+**
+** <codeblock>
+** xToken(pCtx, 0, "i", 1, 0, 1);
+** xToken(pCtx, 0, "won", 3, 2, 5);
+** xToken(pCtx, 0, "first", 5, 6, 11);
+** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
+** xToken(pCtx, 0, "place", 5, 12, 17);
+**</codeblock>
+**
+** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
+** xToken() is called. Multiple synonyms may be specified for a single token
+** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
+** There is no limit to the number of synonyms that may be provided for a
+** single token.
+**
+** In many cases, method (1) above is the best approach. It does not add
+** extra data to the FTS index or require FTS5 to query for multiple terms,
+** so it is efficient in terms of disk space and query speed. However, it
+** does not support prefix queries very well. If, as suggested above, the
+** token "first" is substituted for "1st" by the tokenizer, then the query:
+**
+** <codeblock>
+** ... MATCH '1s*'</codeblock>
+**
+** will not match documents that contain the token "1st" (as the tokenizer
+** will probably not map "1s" to any prefix of "first").
+**
+** For full prefix support, method (3) may be preferred. In this case,
+** because the index contains entries for both "first" and "1st", prefix
+** queries such as 'fi*' or '1s*' will match correctly. However, because
+** extra entries are added to the FTS index, this method uses more space
+** within the database.
+**
+** Method (2) offers a midpoint between (1) and (3). Using this method,
+** a query such as '1s*' will match documents that contain the literal
+** token "1st", but not "first" (assuming the tokenizer is not able to
+** provide synonyms for prefixes). However, a non-prefix query like '1st'
+** will match against "1st" and "first". This method does not require
+** extra disk space, as no extra entries are added to the FTS index.
+** On the other hand, it may require more CPU cycles to run MATCH queries,
+** as separate queries of the FTS index are required for each synonym.
+**
+** When using methods (2) or (3), it is important that the tokenizer only
+** provide synonyms when tokenizing document text (method (2)) or query
+** text (method (3)), not both. Doing so will not cause any errors, but is
+** inefficient.
+*/
+typedef struct Fts5Tokenizer Fts5Tokenizer;
+typedef struct fts5_tokenizer fts5_tokenizer;
+struct fts5_tokenizer {
+ int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
+ void (*xDelete)(Fts5Tokenizer*);
+ int (*xTokenize)(Fts5Tokenizer*,
+ void *pCtx,
+ int flags, /* Mask of FTS5_TOKENIZE_* flags */
+ const char *pText, int nText,
+ int (*xToken)(
+ void *pCtx, /* Copy of 2nd argument to xTokenize() */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Pointer to buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStart, /* Byte offset of token within input text */
+ int iEnd /* Byte offset of end of token within input text */
+ )
+ );
+};
+
+/* Flags that may be passed as the third argument to xTokenize() */
+#define FTS5_TOKENIZE_QUERY 0x0001
+#define FTS5_TOKENIZE_PREFIX 0x0002
+#define FTS5_TOKENIZE_DOCUMENT 0x0004
+#define FTS5_TOKENIZE_AUX 0x0008
+
+/* Flags that may be passed by the tokenizer implementation back to FTS5
+** as the third argument to the supplied xToken callback. */
+#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
+
+/*
+** END OF CUSTOM TOKENIZERS
+*************************************************************************/
+
+/*************************************************************************
+** FTS5 EXTENSION REGISTRATION API
+*/
+typedef struct fts5_api fts5_api;
+struct fts5_api {
+ int iVersion; /* Currently always set to 2 */
+
+ /* Create a new tokenizer */
+ int (*xCreateTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_tokenizer *pTokenizer,
+ void (*xDestroy)(void*)
+ );
+
+ /* Find an existing tokenizer */
+ int (*xFindTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void **ppContext,
+ fts5_tokenizer *pTokenizer
+ );
+
+ /* Create a new auxiliary function */
+ int (*xCreateFunction)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_extension_function xFunction,
+ void (*xDestroy)(void*)
+ );
+};
+
+/*
+** END OF REGISTRATION API
+*************************************************************************/
+
+#if 0
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* _FTS5_H */
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+#ifndef _FTS5INT_H
+#define _FTS5INT_H
+
+/* #include "fts5.h" */
+/* #include "sqlite3ext.h" */
+SQLITE_EXTENSION_INIT1
+
+/* #include <string.h> */
+/* #include <assert.h> */
+
+#ifndef SQLITE_AMALGAMATION
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+typedef unsigned short u16;
+typedef short i16;
+typedef sqlite3_int64 i64;
+typedef sqlite3_uint64 u64;
+
+#ifndef ArraySize
+# define ArraySize(x) ((int)(sizeof(x) / sizeof(x[0])))
+#endif
+
+#define testcase(x)
+#define ALWAYS(x) 1
+#define NEVER(x) 0
+
+#define MIN(x,y) (((x) < (y)) ? (x) : (y))
+#define MAX(x,y) (((x) > (y)) ? (x) : (y))
+
+/*
+** Constants for the largest and smallest possible 64-bit signed integers.
+*/
+# define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
+# define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64)
+
+#endif
+
+/* Truncate very long tokens to this many bytes. Hard limit is
+** (65536-1-1-4-9)==65521 bytes. The limiting factor is the 16-bit offset
+** field that occurs at the start of each leaf page (see fts5_index.c). */
+#define FTS5_MAX_TOKEN_SIZE 32768
+
+/*
+** Maximum number of prefix indexes on single FTS5 table. This must be
+** less than 32. If it is set to anything large than that, an #error
+** directive in fts5_index.c will cause the build to fail.
+*/
+#define FTS5_MAX_PREFIX_INDEXES 31
+
+/*
+** Maximum segments permitted in a single index
+*/
+#define FTS5_MAX_SEGMENT 2000
+
+#define FTS5_DEFAULT_NEARDIST 10
+#define FTS5_DEFAULT_RANK "bm25"
+
+/* Name of rank and rowid columns */
+#define FTS5_RANK_NAME "rank"
+#define FTS5_ROWID_NAME "rowid"
+
+#ifdef SQLITE_DEBUG
+# define FTS5_CORRUPT sqlite3Fts5Corrupt()
+static int sqlite3Fts5Corrupt(void);
+#else
+# define FTS5_CORRUPT SQLITE_CORRUPT_VTAB
+#endif
+
+/*
+** The assert_nc() macro is similar to the assert() macro, except that it
+** is used for assert() conditions that are true only if it can be
+** guranteed that the database is not corrupt.
+*/
+#ifdef SQLITE_DEBUG
+SQLITE_API extern int sqlite3_fts5_may_be_corrupt;
+# define assert_nc(x) assert(sqlite3_fts5_may_be_corrupt || (x))
+#else
+# define assert_nc(x) assert(x)
+#endif
+
+/*
+** A version of memcmp() that does not cause asan errors if one of the pointer
+** parameters is NULL and the number of bytes to compare is zero.
+*/
+#define fts5Memcmp(s1, s2, n) ((n)==0 ? 0 : memcmp((s1), (s2), (n)))
+
+/* Mark a function parameter as unused, to suppress nuisance compiler
+** warnings. */
+#ifndef UNUSED_PARAM
+# define UNUSED_PARAM(X) (void)(X)
+#endif
+
+#ifndef UNUSED_PARAM2
+# define UNUSED_PARAM2(X, Y) (void)(X), (void)(Y)
+#endif
+
+typedef struct Fts5Global Fts5Global;
+typedef struct Fts5Colset Fts5Colset;
+
+/* If a NEAR() clump or phrase may only match a specific set of columns,
+** then an object of the following type is used to record the set of columns.
+** Each entry in the aiCol[] array is a column that may be matched.
+**
+** This object is used by fts5_expr.c and fts5_index.c.
+*/
+struct Fts5Colset {
+ int nCol;
+ int aiCol[1];
+};
+
+
+
+/**************************************************************************
+** Interface to code in fts5_config.c. fts5_config.c contains contains code
+** to parse the arguments passed to the CREATE VIRTUAL TABLE statement.
+*/
+
+typedef struct Fts5Config Fts5Config;
+
+/*
+** An instance of the following structure encodes all information that can
+** be gleaned from the CREATE VIRTUAL TABLE statement.
+**
+** And all information loaded from the %_config table.
+**
+** nAutomerge:
+** The minimum number of segments that an auto-merge operation should
+** attempt to merge together. A value of 1 sets the object to use the
+** compile time default. Zero disables auto-merge altogether.
+**
+** zContent:
+**
+** zContentRowid:
+** The value of the content_rowid= option, if one was specified. Or
+** the string "rowid" otherwise. This text is not quoted - if it is
+** used as part of an SQL statement it needs to be quoted appropriately.
+**
+** zContentExprlist:
+**
+** pzErrmsg:
+** This exists in order to allow the fts5_index.c module to return a
+** decent error message if it encounters a file-format version it does
+** not understand.
+**
+** bColumnsize:
+** True if the %_docsize table is created.
+**
+** bPrefixIndex:
+** This is only used for debugging. If set to false, any prefix indexes
+** are ignored. This value is configured using:
+**
+** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex);
+**
+*/
+struct Fts5Config {
+ sqlite3 *db; /* Database handle */
+ char *zDb; /* Database holding FTS index (e.g. "main") */
+ char *zName; /* Name of FTS index */
+ int nCol; /* Number of columns */
+ char **azCol; /* Column names */
+ u8 *abUnindexed; /* True for unindexed columns */
+ int nPrefix; /* Number of prefix indexes */
+ int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */
+ int eContent; /* An FTS5_CONTENT value */
+ char *zContent; /* content table */
+ char *zContentRowid; /* "content_rowid=" option value */
+ int bColumnsize; /* "columnsize=" option value (dflt==1) */
+ int eDetail; /* FTS5_DETAIL_XXX value */
+ char *zContentExprlist;
+ Fts5Tokenizer *pTok;
+ fts5_tokenizer *pTokApi;
+ int bLock; /* True when table is preparing statement */
+
+ /* Values loaded from the %_config table */
+ int iCookie; /* Incremented when %_config is modified */
+ int pgsz; /* Approximate page size used in %_data */
+ int nAutomerge; /* 'automerge' setting */
+ int nCrisisMerge; /* Maximum allowed segments per level */
+ int nUsermerge; /* 'usermerge' setting */
+ int nHashSize; /* Bytes of memory for in-memory hash */
+ char *zRank; /* Name of rank function */
+ char *zRankArgs; /* Arguments to rank function */
+
+ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
+ char **pzErrmsg;
+
+#ifdef SQLITE_DEBUG
+ int bPrefixIndex; /* True to use prefix-indexes */
+#endif
+};
+
+/* Current expected value of %_config table 'version' field */
+#define FTS5_CURRENT_VERSION 4
+
+#define FTS5_CONTENT_NORMAL 0
+#define FTS5_CONTENT_NONE 1
+#define FTS5_CONTENT_EXTERNAL 2
+
+#define FTS5_DETAIL_FULL 0
+#define FTS5_DETAIL_NONE 1
+#define FTS5_DETAIL_COLUMNS 2
+
+
+
+static int sqlite3Fts5ConfigParse(
+ Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char**
+);
+static void sqlite3Fts5ConfigFree(Fts5Config*);
+
+static int sqlite3Fts5ConfigDeclareVtab(Fts5Config *pConfig);
+
+static int sqlite3Fts5Tokenize(
+ Fts5Config *pConfig, /* FTS5 Configuration object */
+ int flags, /* FTS5_TOKENIZE_* flags */
+ const char *pText, int nText, /* Text to tokenize */
+ void *pCtx, /* Context passed to xToken() */
+ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
+);
+
+static void sqlite3Fts5Dequote(char *z);
+
+/* Load the contents of the %_config table */
+static int sqlite3Fts5ConfigLoad(Fts5Config*, int);
+
+/* Set the value of a single config attribute */
+static int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, int*);
+
+static int sqlite3Fts5ConfigParseRank(const char*, char**, char**);
+
+/*
+** End of interface to code in fts5_config.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_buffer.c.
+*/
+
+/*
+** Buffer object for the incremental building of string data.
+*/
+typedef struct Fts5Buffer Fts5Buffer;
+struct Fts5Buffer {
+ u8 *p;
+ int n;
+ int nSpace;
+};
+
+static int sqlite3Fts5BufferSize(int*, Fts5Buffer*, u32);
+static void sqlite3Fts5BufferAppendVarint(int*, Fts5Buffer*, i64);
+static void sqlite3Fts5BufferAppendBlob(int*, Fts5Buffer*, u32, const u8*);
+static void sqlite3Fts5BufferAppendString(int *, Fts5Buffer*, const char*);
+static void sqlite3Fts5BufferFree(Fts5Buffer*);
+static void sqlite3Fts5BufferZero(Fts5Buffer*);
+static void sqlite3Fts5BufferSet(int*, Fts5Buffer*, int, const u8*);
+static void sqlite3Fts5BufferAppendPrintf(int *, Fts5Buffer*, char *zFmt, ...);
+
+static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...);
+
+#define fts5BufferZero(x) sqlite3Fts5BufferZero(x)
+#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,c)
+#define fts5BufferFree(a) sqlite3Fts5BufferFree(a)
+#define fts5BufferAppendBlob(a,b,c,d) sqlite3Fts5BufferAppendBlob(a,b,c,d)
+#define fts5BufferSet(a,b,c,d) sqlite3Fts5BufferSet(a,b,c,d)
+
+#define fts5BufferGrow(pRc,pBuf,nn) ( \
+ (u32)((pBuf)->n) + (u32)(nn) <= (u32)((pBuf)->nSpace) ? 0 : \
+ sqlite3Fts5BufferSize((pRc),(pBuf),(nn)+(pBuf)->n) \
+)
+
+/* Write and decode big-endian 32-bit integer values */
+static void sqlite3Fts5Put32(u8*, int);
+static int sqlite3Fts5Get32(const u8*);
+
+#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32)
+#define FTS5_POS2OFFSET(iPos) (int)(iPos & 0x7FFFFFFF)
+
+typedef struct Fts5PoslistReader Fts5PoslistReader;
+struct Fts5PoslistReader {
+ /* Variables used only by sqlite3Fts5PoslistIterXXX() functions. */
+ const u8 *a; /* Position list to iterate through */
+ int n; /* Size of buffer at a[] in bytes */
+ int i; /* Current offset in a[] */
+
+ u8 bFlag; /* For client use (any custom purpose) */
+
+ /* Output variables */
+ u8 bEof; /* Set to true at EOF */
+ i64 iPos; /* (iCol<<32) + iPos */
+};
+static int sqlite3Fts5PoslistReaderInit(
+ const u8 *a, int n, /* Poslist buffer to iterate through */
+ Fts5PoslistReader *pIter /* Iterator object to initialize */
+);
+static int sqlite3Fts5PoslistReaderNext(Fts5PoslistReader*);
+
+typedef struct Fts5PoslistWriter Fts5PoslistWriter;
+struct Fts5PoslistWriter {
+ i64 iPrev;
+};
+static int sqlite3Fts5PoslistWriterAppend(Fts5Buffer*, Fts5PoslistWriter*, i64);
+static void sqlite3Fts5PoslistSafeAppend(Fts5Buffer*, i64*, i64);
+
+static int sqlite3Fts5PoslistNext64(
+ const u8 *a, int n, /* Buffer containing poslist */
+ int *pi, /* IN/OUT: Offset within a[] */
+ i64 *piOff /* IN/OUT: Current offset */
+);
+
+/* Malloc utility */
+static void *sqlite3Fts5MallocZero(int *pRc, sqlite3_int64 nByte);
+static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn);
+
+/* Character set tests (like isspace(), isalpha() etc.) */
+static int sqlite3Fts5IsBareword(char t);
+
+
+/* Bucket of terms object used by the integrity-check in offsets=0 mode. */
+typedef struct Fts5Termset Fts5Termset;
+static int sqlite3Fts5TermsetNew(Fts5Termset**);
+static int sqlite3Fts5TermsetAdd(Fts5Termset*, int, const char*, int, int *pbPresent);
+static void sqlite3Fts5TermsetFree(Fts5Termset*);
+
+/*
+** End of interface to code in fts5_buffer.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_index.c. fts5_index.c contains contains code
+** to access the data stored in the %_data table.
+*/
+
+typedef struct Fts5Index Fts5Index;
+typedef struct Fts5IndexIter Fts5IndexIter;
+
+struct Fts5IndexIter {
+ i64 iRowid;
+ const u8 *pData;
+ int nData;
+ u8 bEof;
+};
+
+#define sqlite3Fts5IterEof(x) ((x)->bEof)
+
+/*
+** Values used as part of the flags argument passed to IndexQuery().
+*/
+#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
+#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
+#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
+#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
+
+/* The following are used internally by the fts5_index.c module. They are
+** defined here only to make it easier to avoid clashes with the flags
+** above. */
+#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
+#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
+
+/*
+** Create/destroy an Fts5Index object.
+*/
+static int sqlite3Fts5IndexOpen(Fts5Config *pConfig, int bCreate, Fts5Index**, char**);
+static int sqlite3Fts5IndexClose(Fts5Index *p);
+
+/*
+** Return a simple checksum value based on the arguments.
+*/
+static u64 sqlite3Fts5IndexEntryCksum(
+ i64 iRowid,
+ int iCol,
+ int iPos,
+ int iIdx,
+ const char *pTerm,
+ int nTerm
+);
+
+/*
+** Argument p points to a buffer containing utf-8 text that is n bytes in
+** size. Return the number of bytes in the nChar character prefix of the
+** buffer, or 0 if there are less than nChar characters in total.
+*/
+static int sqlite3Fts5IndexCharlenToBytelen(
+ const char *p,
+ int nByte,
+ int nChar
+);
+
+/*
+** Open a new iterator to iterate though all rowids that match the
+** specified token or token prefix.
+*/
+static int sqlite3Fts5IndexQuery(
+ Fts5Index *p, /* FTS index to query */
+ const char *pToken, int nToken, /* Token (or prefix) to query for */
+ int flags, /* Mask of FTS5INDEX_QUERY_X flags */
+ Fts5Colset *pColset, /* Match these columns only */
+ Fts5IndexIter **ppIter /* OUT: New iterator object */
+);
+
+/*
+** The various operations on open token or token prefix iterators opened
+** using sqlite3Fts5IndexQuery().
+*/
+static int sqlite3Fts5IterNext(Fts5IndexIter*);
+static int sqlite3Fts5IterNextFrom(Fts5IndexIter*, i64 iMatch);
+
+/*
+** Close an iterator opened by sqlite3Fts5IndexQuery().
+*/
+static void sqlite3Fts5IterClose(Fts5IndexIter*);
+
+/*
+** Close the reader blob handle, if it is open.
+*/
+static void sqlite3Fts5IndexCloseReader(Fts5Index*);
+
+/*
+** This interface is used by the fts5vocab module.
+*/
+static const char *sqlite3Fts5IterTerm(Fts5IndexIter*, int*);
+static int sqlite3Fts5IterNextScan(Fts5IndexIter*);
+
+
+/*
+** Insert or remove data to or from the index. Each time a document is
+** added to or removed from the index, this function is called one or more
+** times.
+**
+** For an insert, it must be called once for each token in the new document.
+** If the operation is a delete, it must be called (at least) once for each
+** unique token in the document with an iCol value less than zero. The iPos
+** argument is ignored for a delete.
+*/
+static int sqlite3Fts5IndexWrite(
+ Fts5Index *p, /* Index to write to */
+ int iCol, /* Column token appears in (-ve -> delete) */
+ int iPos, /* Position of token within column */
+ const char *pToken, int nToken /* Token to add or remove to or from index */
+);
+
+/*
+** Indicate that subsequent calls to sqlite3Fts5IndexWrite() pertain to
+** document iDocid.
+*/
+static int sqlite3Fts5IndexBeginWrite(
+ Fts5Index *p, /* Index to write to */
+ int bDelete, /* True if current operation is a delete */
+ i64 iDocid /* Docid to add or remove data from */
+);
+
+/*
+** Flush any data stored in the in-memory hash tables to the database.
+** Also close any open blob handles.
+*/
+static int sqlite3Fts5IndexSync(Fts5Index *p);
+
+/*
+** Discard any data stored in the in-memory hash tables. Do not write it
+** to the database. Additionally, assume that the contents of the %_data
+** table may have changed on disk. So any in-memory caches of %_data
+** records must be invalidated.
+*/
+static int sqlite3Fts5IndexRollback(Fts5Index *p);
+
+/*
+** Get or set the "averages" values.
+*/
+static int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize);
+static int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8*, int);
+
+/*
+** Functions called by the storage module as part of integrity-check.
+*/
+static int sqlite3Fts5IndexIntegrityCheck(Fts5Index*, u64 cksum);
+
+/*
+** Called during virtual module initialization to register UDF
+** fts5_decode() with SQLite
+*/
+static int sqlite3Fts5IndexInit(sqlite3*);
+
+static int sqlite3Fts5IndexSetCookie(Fts5Index*, int);
+
+/*
+** Return the total number of entries read from the %_data table by
+** this connection since it was created.
+*/
+static int sqlite3Fts5IndexReads(Fts5Index *p);
+
+static int sqlite3Fts5IndexReinit(Fts5Index *p);
+static int sqlite3Fts5IndexOptimize(Fts5Index *p);
+static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge);
+static int sqlite3Fts5IndexReset(Fts5Index *p);
+
+static int sqlite3Fts5IndexLoadConfig(Fts5Index *p);
+
+/*
+** End of interface to code in fts5_index.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_varint.c.
+*/
+static int sqlite3Fts5GetVarint32(const unsigned char *p, u32 *v);
+static int sqlite3Fts5GetVarintLen(u32 iVal);
+static u8 sqlite3Fts5GetVarint(const unsigned char*, u64*);
+static int sqlite3Fts5PutVarint(unsigned char *p, u64 v);
+
+#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&b)
+#define fts5GetVarint sqlite3Fts5GetVarint
+
+#define fts5FastGetVarint32(a, iOff, nVal) { \
+ nVal = (a)[iOff++]; \
+ if( nVal & 0x80 ){ \
+ iOff--; \
+ iOff += fts5GetVarint32(&(a)[iOff], nVal); \
+ } \
+}
+
+
+/*
+** End of interface to code in fts5_varint.c.
+**************************************************************************/
+
+
+/**************************************************************************
+** Interface to code in fts5_main.c.
+*/
+
+/*
+** Virtual-table object.
+*/
+typedef struct Fts5Table Fts5Table;
+struct Fts5Table {
+ sqlite3_vtab base; /* Base class used by SQLite core */
+ Fts5Config *pConfig; /* Virtual table configuration */
+ Fts5Index *pIndex; /* Full-text index */
+};
+
+static int sqlite3Fts5GetTokenizer(
+ Fts5Global*,
+ const char **azArg,
+ int nArg,
+ Fts5Tokenizer**,
+ fts5_tokenizer**,
+ char **pzErr
+);
+
+static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64);
+
+static int sqlite3Fts5FlushToDisk(Fts5Table*);
+
+/*
+** End of interface to code in fts5.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_hash.c.
+*/
+typedef struct Fts5Hash Fts5Hash;
+
+/*
+** Create a hash table, free a hash table.
+*/
+static int sqlite3Fts5HashNew(Fts5Config*, Fts5Hash**, int *pnSize);
+static void sqlite3Fts5HashFree(Fts5Hash*);
+
+static int sqlite3Fts5HashWrite(
+ Fts5Hash*,
+ i64 iRowid, /* Rowid for this entry */
+ int iCol, /* Column token appears in (-ve -> delete) */
+ int iPos, /* Position of token within column */
+ char bByte,
+ const char *pToken, int nToken /* Token to add or remove to or from index */
+);
+
+/*
+** Empty (but do not delete) a hash table.
+*/
+static void sqlite3Fts5HashClear(Fts5Hash*);
+
+static int sqlite3Fts5HashQuery(
+ Fts5Hash*, /* Hash table to query */
+ int nPre,
+ const char *pTerm, int nTerm, /* Query term */
+ void **ppObj, /* OUT: Pointer to doclist for pTerm */
+ int *pnDoclist /* OUT: Size of doclist in bytes */
+);
+
+static int sqlite3Fts5HashScanInit(
+ Fts5Hash*, /* Hash table to query */
+ const char *pTerm, int nTerm /* Query prefix */
+);
+static void sqlite3Fts5HashScanNext(Fts5Hash*);
+static int sqlite3Fts5HashScanEof(Fts5Hash*);
+static void sqlite3Fts5HashScanEntry(Fts5Hash *,
+ const char **pzTerm, /* OUT: term (nul-terminated) */
+ const u8 **ppDoclist, /* OUT: pointer to doclist */
+ int *pnDoclist /* OUT: size of doclist in bytes */
+);
+
+
+/*
+** End of interface to code in fts5_hash.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_storage.c. fts5_storage.c contains contains
+** code to access the data stored in the %_content and %_docsize tables.
+*/
+
+#define FTS5_STMT_SCAN_ASC 0 /* SELECT rowid, * FROM ... ORDER BY 1 ASC */
+#define FTS5_STMT_SCAN_DESC 1 /* SELECT rowid, * FROM ... ORDER BY 1 DESC */
+#define FTS5_STMT_LOOKUP 2 /* SELECT rowid, * FROM ... WHERE rowid=? */
+
+typedef struct Fts5Storage Fts5Storage;
+
+static int sqlite3Fts5StorageOpen(Fts5Config*, Fts5Index*, int, Fts5Storage**, char**);
+static int sqlite3Fts5StorageClose(Fts5Storage *p);
+static int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName);
+
+static int sqlite3Fts5DropAll(Fts5Config*);
+static int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **);
+
+static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**);
+static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*);
+static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64);
+
+static int sqlite3Fts5StorageIntegrity(Fts5Storage *p);
+
+static int sqlite3Fts5StorageStmt(Fts5Storage *p, int eStmt, sqlite3_stmt**, char**);
+static void sqlite3Fts5StorageStmtRelease(Fts5Storage *p, int eStmt, sqlite3_stmt*);
+
+static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol);
+static int sqlite3Fts5StorageSize(Fts5Storage *p, int iCol, i64 *pnAvg);
+static int sqlite3Fts5StorageRowCount(Fts5Storage *p, i64 *pnRow);
+
+static int sqlite3Fts5StorageSync(Fts5Storage *p);
+static int sqlite3Fts5StorageRollback(Fts5Storage *p);
+
+static int sqlite3Fts5StorageConfigValue(
+ Fts5Storage *p, const char*, sqlite3_value*, int
+);
+
+static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p);
+static int sqlite3Fts5StorageRebuild(Fts5Storage *p);
+static int sqlite3Fts5StorageOptimize(Fts5Storage *p);
+static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge);
+static int sqlite3Fts5StorageReset(Fts5Storage *p);
+
+/*
+** End of interface to code in fts5_storage.c.
+**************************************************************************/
+
+
+/**************************************************************************
+** Interface to code in fts5_expr.c.
+*/
+typedef struct Fts5Expr Fts5Expr;
+typedef struct Fts5ExprNode Fts5ExprNode;
+typedef struct Fts5Parse Fts5Parse;
+typedef struct Fts5Token Fts5Token;
+typedef struct Fts5ExprPhrase Fts5ExprPhrase;
+typedef struct Fts5ExprNearset Fts5ExprNearset;
+
+struct Fts5Token {
+ const char *p; /* Token text (not NULL terminated) */
+ int n; /* Size of buffer p in bytes */
+};
+
+/* Parse a MATCH expression. */
+static int sqlite3Fts5ExprNew(
+ Fts5Config *pConfig,
+ int iCol, /* Column on LHS of MATCH operator */
+ const char *zExpr,
+ Fts5Expr **ppNew,
+ char **pzErr
+);
+
+/*
+** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc);
+** rc==SQLITE_OK && 0==sqlite3Fts5ExprEof(pExpr);
+** rc = sqlite3Fts5ExprNext(pExpr)
+** ){
+** // The document with rowid iRowid matches the expression!
+** i64 iRowid = sqlite3Fts5ExprRowid(pExpr);
+** }
+*/
+static int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, int bDesc);
+static int sqlite3Fts5ExprNext(Fts5Expr*, i64 iMax);
+static int sqlite3Fts5ExprEof(Fts5Expr*);
+static i64 sqlite3Fts5ExprRowid(Fts5Expr*);
+
+static void sqlite3Fts5ExprFree(Fts5Expr*);
+static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2);
+
+/* Called during startup to register a UDF with SQLite */
+static int sqlite3Fts5ExprInit(Fts5Global*, sqlite3*);
+
+static int sqlite3Fts5ExprPhraseCount(Fts5Expr*);
+static int sqlite3Fts5ExprPhraseSize(Fts5Expr*, int iPhrase);
+static int sqlite3Fts5ExprPoslist(Fts5Expr*, int, const u8 **);
+
+typedef struct Fts5PoslistPopulator Fts5PoslistPopulator;
+static Fts5PoslistPopulator *sqlite3Fts5ExprClearPoslists(Fts5Expr*, int);
+static int sqlite3Fts5ExprPopulatePoslists(
+ Fts5Config*, Fts5Expr*, Fts5PoslistPopulator*, int, const char*, int
+);
+static void sqlite3Fts5ExprCheckPoslists(Fts5Expr*, i64);
+
+static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**);
+
+static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *);
+
+/*******************************************
+** The fts5_expr.c API above this point is used by the other hand-written
+** C code in this module. The interfaces below this point are called by
+** the parser code in fts5parse.y. */
+
+static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...);
+
+static Fts5ExprNode *sqlite3Fts5ParseNode(
+ Fts5Parse *pParse,
+ int eType,
+ Fts5ExprNode *pLeft,
+ Fts5ExprNode *pRight,
+ Fts5ExprNearset *pNear
+);
+
+static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
+ Fts5Parse *pParse,
+ Fts5ExprNode *pLeft,
+ Fts5ExprNode *pRight
+);
+
+static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
+ Fts5Parse *pParse,
+ Fts5ExprPhrase *pPhrase,
+ Fts5Token *pToken,
+ int bPrefix
+);
+
+static void sqlite3Fts5ParseSetCaret(Fts5ExprPhrase*);
+
+static Fts5ExprNearset *sqlite3Fts5ParseNearset(
+ Fts5Parse*,
+ Fts5ExprNearset*,
+ Fts5ExprPhrase*
+);
+
+static Fts5Colset *sqlite3Fts5ParseColset(
+ Fts5Parse*,
+ Fts5Colset*,
+ Fts5Token *
+);
+
+static void sqlite3Fts5ParsePhraseFree(Fts5ExprPhrase*);
+static void sqlite3Fts5ParseNearsetFree(Fts5ExprNearset*);
+static void sqlite3Fts5ParseNodeFree(Fts5ExprNode*);
+
+static void sqlite3Fts5ParseSetDistance(Fts5Parse*, Fts5ExprNearset*, Fts5Token*);
+static void sqlite3Fts5ParseSetColset(Fts5Parse*, Fts5ExprNode*, Fts5Colset*);
+static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse*, Fts5Colset*);
+static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p);
+static void sqlite3Fts5ParseNear(Fts5Parse *pParse, Fts5Token*);
+
+/*
+** End of interface to code in fts5_expr.c.
+**************************************************************************/
+
+
+
+/**************************************************************************
+** Interface to code in fts5_aux.c.
+*/
+
+static int sqlite3Fts5AuxInit(fts5_api*);
+/*
+** End of interface to code in fts5_aux.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_tokenizer.c.
+*/
+
+static int sqlite3Fts5TokenizerInit(fts5_api*);
+/*
+** End of interface to code in fts5_tokenizer.c.
+**************************************************************************/
+
+/**************************************************************************
+** Interface to code in fts5_vocab.c.
+*/
+
+static int sqlite3Fts5VocabInit(Fts5Global*, sqlite3*);
+
+/*
+** End of interface to code in fts5_vocab.c.
+**************************************************************************/
+
+
+/**************************************************************************
+** Interface to automatically generated code in fts5_unicode2.c.
+*/
+static int sqlite3Fts5UnicodeIsdiacritic(int c);
+static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic);
+
+static int sqlite3Fts5UnicodeCatParse(const char*, u8*);
+static int sqlite3Fts5UnicodeCategory(u32 iCode);
+static void sqlite3Fts5UnicodeAscii(u8*, u8*);
+/*
+** End of interface to code in fts5_unicode2.c.
+**************************************************************************/
+
+#endif
+
+#define FTS5_OR 1
+#define FTS5_AND 2
+#define FTS5_NOT 3
+#define FTS5_TERM 4
+#define FTS5_COLON 5
+#define FTS5_MINUS 6
+#define FTS5_LCP 7
+#define FTS5_RCP 8
+#define FTS5_STRING 9
+#define FTS5_LP 10
+#define FTS5_RP 11
+#define FTS5_CARET 12
+#define FTS5_COMMA 13
+#define FTS5_PLUS 14
+#define FTS5_STAR 15
+
+/*
+** 2000-05-29
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** Driver template for the LEMON parser generator.
+**
+** The "lemon" program processes an LALR(1) input grammar file, then uses
+** this template to construct a parser. The "lemon" program inserts text
+** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
+** interstitial "-" characters) contained in this template is changed into
+** the value of the %name directive from the grammar. Otherwise, the content
+** of this template is copied straight through into the generate parser
+** source file.
+**
+** The following is the concatenation of all %include directives from the
+** input grammar file:
+*/
+/* #include <stdio.h> */
+/* #include <assert.h> */
+/************ Begin %include sections from the grammar ************************/
+
+/* #include "fts5Int.h" */
+/* #include "fts5parse.h" */
+
+/*
+** Disable all error recovery processing in the parser push-down
+** automaton.
+*/
+#define fts5YYNOERRORRECOVERY 1
+
+/*
+** Make fts5yytestcase() the same as testcase()
+*/
+#define fts5yytestcase(X) testcase(X)
+
+/*
+** Indicate that sqlite3ParserFree() will never be called with a null
+** pointer.
+*/
+#define fts5YYPARSEFREENOTNULL 1
+
+/*
+** Alternative datatype for the argument to the malloc() routine passed
+** into sqlite3ParserAlloc(). The default is size_t.
+*/
+#define fts5YYMALLOCARGTYPE u64
+
+/**************** End of %include directives **********************************/
+/* These constants specify the various numeric values for terminal symbols
+** in a format understandable to "makeheaders". This section is blank unless
+** "lemon" is run with the "-m" command-line option.
+***************** Begin makeheaders token definitions *************************/
+/**************** End makeheaders token definitions ***************************/
+
+/* The next sections is a series of control #defines.
+** various aspects of the generated parser.
+** fts5YYCODETYPE is the data type used to store the integer codes
+** that represent terminal and non-terminal symbols.
+** "unsigned char" is used if there are fewer than
+** 256 symbols. Larger types otherwise.
+** fts5YYNOCODE is a number of type fts5YYCODETYPE that is not used for
+** any terminal or nonterminal symbol.
+** fts5YYFALLBACK If defined, this indicates that one or more tokens
+** (also known as: "terminal symbols") have fall-back
+** values which should be used if the original symbol
+** would not parse. This permits keywords to sometimes
+** be used as identifiers, for example.
+** fts5YYACTIONTYPE is the data type used for "action codes" - numbers
+** that indicate what to do in response to the next
+** token.
+** sqlite3Fts5ParserFTS5TOKENTYPE is the data type used for minor type for terminal
+** symbols. Background: A "minor type" is a semantic
+** value associated with a terminal or non-terminal
+** symbols. For example, for an "ID" terminal symbol,
+** the minor type might be the name of the identifier.
+** Each non-terminal can have a different minor type.
+** Terminal symbols all have the same minor type, though.
+** This macros defines the minor type for terminal
+** symbols.
+** fts5YYMINORTYPE is the data type used for all minor types.
+** This is typically a union of many types, one of
+** which is sqlite3Fts5ParserFTS5TOKENTYPE. The entry in the union
+** for terminal symbols is called "fts5yy0".
+** fts5YYSTACKDEPTH is the maximum depth of the parser's stack. If
+** zero the stack is dynamically sized using realloc()
+** sqlite3Fts5ParserARG_SDECL A static variable declaration for the %extra_argument
+** sqlite3Fts5ParserARG_PDECL A parameter declaration for the %extra_argument
+** sqlite3Fts5ParserARG_PARAM Code to pass %extra_argument as a subroutine parameter
+** sqlite3Fts5ParserARG_STORE Code to store %extra_argument into fts5yypParser
+** sqlite3Fts5ParserARG_FETCH Code to extract %extra_argument from fts5yypParser
+** sqlite3Fts5ParserCTX_* As sqlite3Fts5ParserARG_ except for %extra_context
+** fts5YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
+** fts5YYNSTATE the combined number of states.
+** fts5YYNRULE the number of rules in the grammar
+** fts5YYNFTS5TOKEN Number of terminal symbols
+** fts5YY_MAX_SHIFT Maximum value for shift actions
+** fts5YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
+** fts5YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
+** fts5YY_ERROR_ACTION The fts5yy_action[] code for syntax error
+** fts5YY_ACCEPT_ACTION The fts5yy_action[] code for accept
+** fts5YY_NO_ACTION The fts5yy_action[] code for no-op
+** fts5YY_MIN_REDUCE Minimum value for reduce actions
+** fts5YY_MAX_REDUCE Maximum value for reduce actions
+*/
+#ifndef INTERFACE
+# define INTERFACE 1
+#endif
+/************* Begin control #defines *****************************************/
+#define fts5YYCODETYPE unsigned char
+#define fts5YYNOCODE 27
+#define fts5YYACTIONTYPE unsigned char
+#define sqlite3Fts5ParserFTS5TOKENTYPE Fts5Token
+typedef union {
+ int fts5yyinit;
+ sqlite3Fts5ParserFTS5TOKENTYPE fts5yy0;
+ int fts5yy4;
+ Fts5Colset* fts5yy11;
+ Fts5ExprNode* fts5yy24;
+ Fts5ExprNearset* fts5yy46;
+ Fts5ExprPhrase* fts5yy53;
+} fts5YYMINORTYPE;
+#ifndef fts5YYSTACKDEPTH
+#define fts5YYSTACKDEPTH 100
+#endif
+#define sqlite3Fts5ParserARG_SDECL Fts5Parse *pParse;
+#define sqlite3Fts5ParserARG_PDECL ,Fts5Parse *pParse
+#define sqlite3Fts5ParserARG_PARAM ,pParse
+#define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse=fts5yypParser->pParse;
+#define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse=pParse;
+#define sqlite3Fts5ParserCTX_SDECL
+#define sqlite3Fts5ParserCTX_PDECL
+#define sqlite3Fts5ParserCTX_PARAM
+#define sqlite3Fts5ParserCTX_FETCH
+#define sqlite3Fts5ParserCTX_STORE
+#define fts5YYNSTATE 35
+#define fts5YYNRULE 28
+#define fts5YYNRULE_WITH_ACTION 28
+#define fts5YYNFTS5TOKEN 16
+#define fts5YY_MAX_SHIFT 34
+#define fts5YY_MIN_SHIFTREDUCE 52
+#define fts5YY_MAX_SHIFTREDUCE 79
+#define fts5YY_ERROR_ACTION 80
+#define fts5YY_ACCEPT_ACTION 81
+#define fts5YY_NO_ACTION 82
+#define fts5YY_MIN_REDUCE 83
+#define fts5YY_MAX_REDUCE 110
+/************* End control #defines *******************************************/
+#define fts5YY_NLOOKAHEAD ((int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0])))
+
+/* Define the fts5yytestcase() macro to be a no-op if is not already defined
+** otherwise.
+**
+** Applications can choose to define fts5yytestcase() in the %include section
+** to a macro that can assist in verifying code coverage. For production
+** code the fts5yytestcase() macro should be turned off. But it is useful
+** for testing.
+*/
+#ifndef fts5yytestcase
+# define fts5yytestcase(X)
+#endif
+
+
+/* Next are the tables used to determine what action to take based on the
+** current state and lookahead token. These tables are used to implement
+** functions that take a state number and lookahead value and return an
+** action integer.
+**
+** Suppose the action integer is N. Then the action is determined as
+** follows
+**
+** 0 <= N <= fts5YY_MAX_SHIFT Shift N. That is, push the lookahead
+** token onto the stack and goto state N.
+**
+** N between fts5YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
+** and fts5YY_MAX_SHIFTREDUCE reduce by rule N-fts5YY_MIN_SHIFTREDUCE.
+**
+** N == fts5YY_ERROR_ACTION A syntax error has occurred.
+**
+** N == fts5YY_ACCEPT_ACTION The parser accepts its input.
+**
+** N == fts5YY_NO_ACTION No such action. Denotes unused
+** slots in the fts5yy_action[] table.
+**
+** N between fts5YY_MIN_REDUCE Reduce by rule N-fts5YY_MIN_REDUCE
+** and fts5YY_MAX_REDUCE
+**
+** The action table is constructed as a single large table named fts5yy_action[].
+** Given state S and lookahead X, the action is computed as either:
+**
+** (A) N = fts5yy_action[ fts5yy_shift_ofst[S] + X ]
+** (B) N = fts5yy_default[S]
+**
+** The (A) formula is preferred. The B formula is used instead if
+** fts5yy_lookahead[fts5yy_shift_ofst[S]+X] is not equal to X.
+**
+** The formulas above are for computing the action when the lookahead is
+** a terminal symbol. If the lookahead is a non-terminal (as occurs after
+** a reduce action) then the fts5yy_reduce_ofst[] array is used in place of
+** the fts5yy_shift_ofst[] array.
+**
+** The following are the tables generated in this section:
+**
+** fts5yy_action[] A single table containing all actions.
+** fts5yy_lookahead[] A table containing the lookahead for each entry in
+** fts5yy_action. Used to detect hash collisions.
+** fts5yy_shift_ofst[] For each state, the offset into fts5yy_action for
+** shifting terminals.
+** fts5yy_reduce_ofst[] For each state, the offset into fts5yy_action for
+** shifting non-terminals after a reduce.
+** fts5yy_default[] Default action for each state.
+**
+*********** Begin parsing tables **********************************************/
+#define fts5YY_ACTTAB_COUNT (105)
+static const fts5YYACTIONTYPE fts5yy_action[] = {
+ /* 0 */ 81, 20, 96, 6, 28, 99, 98, 26, 26, 18,
+ /* 10 */ 96, 6, 28, 17, 98, 56, 26, 19, 96, 6,
+ /* 20 */ 28, 14, 98, 14, 26, 31, 92, 96, 6, 28,
+ /* 30 */ 108, 98, 25, 26, 21, 96, 6, 28, 78, 98,
+ /* 40 */ 58, 26, 29, 96, 6, 28, 107, 98, 22, 26,
+ /* 50 */ 24, 16, 12, 11, 1, 13, 13, 24, 16, 23,
+ /* 60 */ 11, 33, 34, 13, 97, 8, 27, 32, 98, 7,
+ /* 70 */ 26, 3, 4, 5, 3, 4, 5, 3, 83, 4,
+ /* 80 */ 5, 3, 63, 5, 3, 62, 12, 2, 86, 13,
+ /* 90 */ 9, 30, 10, 10, 54, 57, 75, 78, 78, 53,
+ /* 100 */ 57, 15, 82, 82, 71,
+};
+static const fts5YYCODETYPE fts5yy_lookahead[] = {
+ /* 0 */ 16, 17, 18, 19, 20, 22, 22, 24, 24, 17,
+ /* 10 */ 18, 19, 20, 7, 22, 9, 24, 17, 18, 19,
+ /* 20 */ 20, 9, 22, 9, 24, 13, 17, 18, 19, 20,
+ /* 30 */ 26, 22, 24, 24, 17, 18, 19, 20, 15, 22,
+ /* 40 */ 9, 24, 17, 18, 19, 20, 26, 22, 21, 24,
+ /* 50 */ 6, 7, 9, 9, 10, 12, 12, 6, 7, 21,
+ /* 60 */ 9, 24, 25, 12, 18, 5, 20, 14, 22, 5,
+ /* 70 */ 24, 3, 1, 2, 3, 1, 2, 3, 0, 1,
+ /* 80 */ 2, 3, 11, 2, 3, 11, 9, 10, 5, 12,
+ /* 90 */ 23, 24, 10, 10, 8, 9, 9, 15, 15, 8,
+ /* 100 */ 9, 9, 27, 27, 11, 27, 27, 27, 27, 27,
+ /* 110 */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ /* 120 */ 27,
+};
+#define fts5YY_SHIFT_COUNT (34)
+#define fts5YY_SHIFT_MIN (0)
+#define fts5YY_SHIFT_MAX (93)
+static const unsigned char fts5yy_shift_ofst[] = {
+ /* 0 */ 44, 44, 44, 44, 44, 44, 51, 77, 43, 12,
+ /* 10 */ 14, 83, 82, 14, 23, 23, 31, 31, 71, 74,
+ /* 20 */ 78, 81, 86, 91, 6, 53, 53, 60, 64, 68,
+ /* 30 */ 53, 87, 92, 53, 93,
+};
+#define fts5YY_REDUCE_COUNT (17)
+#define fts5YY_REDUCE_MIN (-17)
+#define fts5YY_REDUCE_MAX (67)
+static const signed char fts5yy_reduce_ofst[] = {
+ /* 0 */ -16, -8, 0, 9, 17, 25, 46, -17, -17, 37,
+ /* 10 */ 67, 4, 4, 8, 4, 20, 27, 38,
+};
+static const fts5YYACTIONTYPE fts5yy_default[] = {
+ /* 0 */ 80, 80, 80, 80, 80, 80, 95, 80, 80, 105,
+ /* 10 */ 80, 110, 110, 80, 110, 110, 80, 80, 80, 80,
+ /* 20 */ 80, 91, 80, 80, 80, 101, 100, 80, 80, 90,
+ /* 30 */ 103, 80, 80, 104, 80,
+};
+/********** End of lemon-generated parsing tables *****************************/
+
+/* The next table maps tokens (terminal symbols) into fallback tokens.
+** If a construct like the following:
+**
+** %fallback ID X Y Z.
+**
+** appears in the grammar, then ID becomes a fallback token for X, Y,
+** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
+** but it does not parse, the type of the token is changed to ID and
+** the parse is retried before an error is thrown.
+**
+** This feature can be used, for example, to cause some keywords in a language
+** to revert to identifiers if they keyword does not apply in the context where
+** it appears.
+*/
+#ifdef fts5YYFALLBACK
+static const fts5YYCODETYPE fts5yyFallback[] = {
+};
+#endif /* fts5YYFALLBACK */
+
+/* The following structure represents a single element of the
+** parser's stack. Information stored includes:
+**
+** + The state number for the parser at this level of the stack.
+**
+** + The value of the token stored at this level of the stack.
+** (In other words, the "major" token.)
+**
+** + The semantic value stored at this level of the stack. This is
+** the information used by the action routines in the grammar.
+** It is sometimes called the "minor" token.
+**
+** After the "shift" half of a SHIFTREDUCE action, the stateno field
+** actually contains the reduce action for the second half of the
+** SHIFTREDUCE.
+*/
+struct fts5yyStackEntry {
+ fts5YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
+ fts5YYCODETYPE major; /* The major token value. This is the code
+ ** number for the token at this stack level */
+ fts5YYMINORTYPE minor; /* The user-supplied minor token value. This
+ ** is the value of the token */
+};
+typedef struct fts5yyStackEntry fts5yyStackEntry;
+
+/* The state of the parser is completely contained in an instance of
+** the following structure */
+struct fts5yyParser {
+ fts5yyStackEntry *fts5yytos; /* Pointer to top element of the stack */
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+ int fts5yyhwm; /* High-water mark of the stack */
+#endif
+#ifndef fts5YYNOERRORRECOVERY
+ int fts5yyerrcnt; /* Shifts left before out of the error */
+#endif
+ sqlite3Fts5ParserARG_SDECL /* A place to hold %extra_argument */
+ sqlite3Fts5ParserCTX_SDECL /* A place to hold %extra_context */
+#if fts5YYSTACKDEPTH<=0
+ int fts5yystksz; /* Current side of the stack */
+ fts5yyStackEntry *fts5yystack; /* The parser's stack */
+ fts5yyStackEntry fts5yystk0; /* First stack entry */
+#else
+ fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */
+ fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */
+#endif
+};
+typedef struct fts5yyParser fts5yyParser;
+
+#ifndef NDEBUG
+/* #include <stdio.h> */
+static FILE *fts5yyTraceFILE = 0;
+static char *fts5yyTracePrompt = 0;
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Turn parser tracing on by giving a stream to which to write the trace
+** and a prompt to preface each trace message. Tracing is turned off
+** by making either argument NULL
+**
+** Inputs:
+** <ul>
+** <li> A FILE* to which trace output should be written.
+** If NULL, then tracing is turned off.
+** <li> A prefix string written at the beginning of every
+** line of trace output. If NULL, then tracing is
+** turned off.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+static void sqlite3Fts5ParserTrace(FILE *TraceFILE, char *zTracePrompt){
+ fts5yyTraceFILE = TraceFILE;
+ fts5yyTracePrompt = zTracePrompt;
+ if( fts5yyTraceFILE==0 ) fts5yyTracePrompt = 0;
+ else if( fts5yyTracePrompt==0 ) fts5yyTraceFILE = 0;
+}
+#endif /* NDEBUG */
+
+#if defined(fts5YYCOVERAGE) || !defined(NDEBUG)
+/* For tracing shifts, the names of all terminals and nonterminals
+** are required. The following table supplies these names */
+static const char *const fts5yyTokenName[] = {
+ /* 0 */ "$",
+ /* 1 */ "OR",
+ /* 2 */ "AND",
+ /* 3 */ "NOT",
+ /* 4 */ "TERM",
+ /* 5 */ "COLON",
+ /* 6 */ "MINUS",
+ /* 7 */ "LCP",
+ /* 8 */ "RCP",
+ /* 9 */ "STRING",
+ /* 10 */ "LP",
+ /* 11 */ "RP",
+ /* 12 */ "CARET",
+ /* 13 */ "COMMA",
+ /* 14 */ "PLUS",
+ /* 15 */ "STAR",
+ /* 16 */ "input",
+ /* 17 */ "expr",
+ /* 18 */ "cnearset",
+ /* 19 */ "exprlist",
+ /* 20 */ "colset",
+ /* 21 */ "colsetlist",
+ /* 22 */ "nearset",
+ /* 23 */ "nearphrases",
+ /* 24 */ "phrase",
+ /* 25 */ "neardist_opt",
+ /* 26 */ "star_opt",
+};
+#endif /* defined(fts5YYCOVERAGE) || !defined(NDEBUG) */
+
+#ifndef NDEBUG
+/* For tracing reduce actions, the names of all rules are required.
+*/
+static const char *const fts5yyRuleName[] = {
+ /* 0 */ "input ::= expr",
+ /* 1 */ "colset ::= MINUS LCP colsetlist RCP",
+ /* 2 */ "colset ::= LCP colsetlist RCP",
+ /* 3 */ "colset ::= STRING",
+ /* 4 */ "colset ::= MINUS STRING",
+ /* 5 */ "colsetlist ::= colsetlist STRING",
+ /* 6 */ "colsetlist ::= STRING",
+ /* 7 */ "expr ::= expr AND expr",
+ /* 8 */ "expr ::= expr OR expr",
+ /* 9 */ "expr ::= expr NOT expr",
+ /* 10 */ "expr ::= colset COLON LP expr RP",
+ /* 11 */ "expr ::= LP expr RP",
+ /* 12 */ "expr ::= exprlist",
+ /* 13 */ "exprlist ::= cnearset",
+ /* 14 */ "exprlist ::= exprlist cnearset",
+ /* 15 */ "cnearset ::= nearset",
+ /* 16 */ "cnearset ::= colset COLON nearset",
+ /* 17 */ "nearset ::= phrase",
+ /* 18 */ "nearset ::= CARET phrase",
+ /* 19 */ "nearset ::= STRING LP nearphrases neardist_opt RP",
+ /* 20 */ "nearphrases ::= phrase",
+ /* 21 */ "nearphrases ::= nearphrases phrase",
+ /* 22 */ "neardist_opt ::=",
+ /* 23 */ "neardist_opt ::= COMMA STRING",
+ /* 24 */ "phrase ::= phrase PLUS STRING star_opt",
+ /* 25 */ "phrase ::= STRING star_opt",
+ /* 26 */ "star_opt ::= STAR",
+ /* 27 */ "star_opt ::=",
+};
+#endif /* NDEBUG */
+
+
+#if fts5YYSTACKDEPTH<=0
+/*
+** Try to increase the size of the parser stack. Return the number
+** of errors. Return 0 on success.
+*/
+static int fts5yyGrowStack(fts5yyParser *p){
+ int newSize;
+ int idx;
+ fts5yyStackEntry *pNew;
+
+ newSize = p->fts5yystksz*2 + 100;
+ idx = p->fts5yytos ? (int)(p->fts5yytos - p->fts5yystack) : 0;
+ if( p->fts5yystack==&p->fts5yystk0 ){
+ pNew = malloc(newSize*sizeof(pNew[0]));
+ if( pNew ) pNew[0] = p->fts5yystk0;
+ }else{
+ pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0]));
+ }
+ if( pNew ){
+ p->fts5yystack = pNew;
+ p->fts5yytos = &p->fts5yystack[idx];
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n",
+ fts5yyTracePrompt, p->fts5yystksz, newSize);
+ }
+#endif
+ p->fts5yystksz = newSize;
+ }
+ return pNew==0;
+}
+#endif
+
+/* Datatype of the argument to the memory allocated passed as the
+** second argument to sqlite3Fts5ParserAlloc() below. This can be changed by
+** putting an appropriate #define in the %include section of the input
+** grammar.
+*/
+#ifndef fts5YYMALLOCARGTYPE
+# define fts5YYMALLOCARGTYPE size_t
+#endif
+
+/* Initialize a new parser that has already been allocated.
+*/
+static void sqlite3Fts5ParserInit(void *fts5yypRawParser sqlite3Fts5ParserCTX_PDECL){
+ fts5yyParser *fts5yypParser = (fts5yyParser*)fts5yypRawParser;
+ sqlite3Fts5ParserCTX_STORE
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+ fts5yypParser->fts5yyhwm = 0;
+#endif
+#if fts5YYSTACKDEPTH<=0
+ fts5yypParser->fts5yytos = NULL;
+ fts5yypParser->fts5yystack = NULL;
+ fts5yypParser->fts5yystksz = 0;
+ if( fts5yyGrowStack(fts5yypParser) ){
+ fts5yypParser->fts5yystack = &fts5yypParser->fts5yystk0;
+ fts5yypParser->fts5yystksz = 1;
+ }
+#endif
+#ifndef fts5YYNOERRORRECOVERY
+ fts5yypParser->fts5yyerrcnt = -1;
+#endif
+ fts5yypParser->fts5yytos = fts5yypParser->fts5yystack;
+ fts5yypParser->fts5yystack[0].stateno = 0;
+ fts5yypParser->fts5yystack[0].major = 0;
+#if fts5YYSTACKDEPTH>0
+ fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1];
+#endif
+}
+
+#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK
+/*
+** This function allocates a new parser.
+** The only argument is a pointer to a function which works like
+** malloc.
+**
+** Inputs:
+** A pointer to the function used to allocate memory.
+**
+** Outputs:
+** A pointer to a parser. This pointer is used in subsequent calls
+** to sqlite3Fts5Parser and sqlite3Fts5ParserFree.
+*/
+static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(fts5YYMALLOCARGTYPE) sqlite3Fts5ParserCTX_PDECL){
+ fts5yyParser *fts5yypParser;
+ fts5yypParser = (fts5yyParser*)(*mallocProc)( (fts5YYMALLOCARGTYPE)sizeof(fts5yyParser) );
+ if( fts5yypParser ){
+ sqlite3Fts5ParserCTX_STORE
+ sqlite3Fts5ParserInit(fts5yypParser sqlite3Fts5ParserCTX_PARAM);
+ }
+ return (void*)fts5yypParser;
+}
+#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */
+
+
+/* The following function deletes the "minor type" or semantic value
+** associated with a symbol. The symbol can be either a terminal
+** or nonterminal. "fts5yymajor" is the symbol code, and "fts5yypminor" is
+** a pointer to the value to be deleted. The code used to do the
+** deletions is derived from the %destructor and/or %token_destructor
+** directives of the input grammar.
+*/
+static void fts5yy_destructor(
+ fts5yyParser *fts5yypParser, /* The parser */
+ fts5YYCODETYPE fts5yymajor, /* Type code for object to destroy */
+ fts5YYMINORTYPE *fts5yypminor /* The object to be destroyed */
+){
+ sqlite3Fts5ParserARG_FETCH
+ sqlite3Fts5ParserCTX_FETCH
+ switch( fts5yymajor ){
+ /* Here is inserted the actions which take place when a
+ ** terminal or non-terminal is destroyed. This can happen
+ ** when the symbol is popped from the stack during a
+ ** reduce or during error processing or when a parser is
+ ** being destroyed before it is finished parsing.
+ **
+ ** Note: during a reduce, the only symbols destroyed are those
+ ** which appear on the RHS of the rule, but which are *not* used
+ ** inside the C code.
+ */
+/********* Begin destructor definitions ***************************************/
+ case 16: /* input */
+{
+ (void)pParse;
+}
+ break;
+ case 17: /* expr */
+ case 18: /* cnearset */
+ case 19: /* exprlist */
+{
+ sqlite3Fts5ParseNodeFree((fts5yypminor->fts5yy24));
+}
+ break;
+ case 20: /* colset */
+ case 21: /* colsetlist */
+{
+ sqlite3_free((fts5yypminor->fts5yy11));
+}
+ break;
+ case 22: /* nearset */
+ case 23: /* nearphrases */
+{
+ sqlite3Fts5ParseNearsetFree((fts5yypminor->fts5yy46));
+}
+ break;
+ case 24: /* phrase */
+{
+ sqlite3Fts5ParsePhraseFree((fts5yypminor->fts5yy53));
+}
+ break;
+/********* End destructor definitions *****************************************/
+ default: break; /* If no destructor action specified: do nothing */
+ }
+}
+
+/*
+** Pop the parser's stack once.
+**
+** If there is a destructor routine associated with the token which
+** is popped from the stack, then call it.
+*/
+static void fts5yy_pop_parser_stack(fts5yyParser *pParser){
+ fts5yyStackEntry *fts5yytos;
+ assert( pParser->fts5yytos!=0 );
+ assert( pParser->fts5yytos > pParser->fts5yystack );
+ fts5yytos = pParser->fts5yytos--;
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sPopping %s\n",
+ fts5yyTracePrompt,
+ fts5yyTokenName[fts5yytos->major]);
+ }
+#endif
+ fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor);
+}
+
+/*
+** Clear all secondary memory allocations from the parser
+*/
+static void sqlite3Fts5ParserFinalize(void *p){
+ fts5yyParser *pParser = (fts5yyParser*)p;
+ while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser);
+#if fts5YYSTACKDEPTH<=0
+ if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack);
+#endif
+}
+
+#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK
+/*
+** Deallocate and destroy a parser. Destructors are called for
+** all stack elements before shutting the parser down.
+**
+** If the fts5YYPARSEFREENEVERNULL macro exists (for example because it
+** is defined in a %include section of the input grammar) then it is
+** assumed that the input pointer is never NULL.
+*/
+static void sqlite3Fts5ParserFree(
+ void *p, /* The parser to be deleted */
+ void (*freeProc)(void*) /* Function used to reclaim memory */
+){
+#ifndef fts5YYPARSEFREENEVERNULL
+ if( p==0 ) return;
+#endif
+ sqlite3Fts5ParserFinalize(p);
+ (*freeProc)(p);
+}
+#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */
+
+/*
+** Return the peak depth of the stack for a parser.
+*/
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+static int sqlite3Fts5ParserStackPeak(void *p){
+ fts5yyParser *pParser = (fts5yyParser*)p;
+ return pParser->fts5yyhwm;
+}
+#endif
+
+/* This array of booleans keeps track of the parser statement
+** coverage. The element fts5yycoverage[X][Y] is set when the parser
+** is in state X and has a lookahead token Y. In a well-tested
+** systems, every element of this matrix should end up being set.
+*/
+#if defined(fts5YYCOVERAGE)
+static unsigned char fts5yycoverage[fts5YYNSTATE][fts5YYNFTS5TOKEN];
+#endif
+
+/*
+** Write into out a description of every state/lookahead combination that
+**
+** (1) has not been used by the parser, and
+** (2) is not a syntax error.
+**
+** Return the number of missed state/lookahead combinations.
+*/
+#if defined(fts5YYCOVERAGE)
+static int sqlite3Fts5ParserCoverage(FILE *out){
+ int stateno, iLookAhead, i;
+ int nMissed = 0;
+ for(stateno=0; stateno<fts5YYNSTATE; stateno++){
+ i = fts5yy_shift_ofst[stateno];
+ for(iLookAhead=0; iLookAhead<fts5YYNFTS5TOKEN; iLookAhead++){
+ if( fts5yy_lookahead[i+iLookAhead]!=iLookAhead ) continue;
+ if( fts5yycoverage[stateno][iLookAhead]==0 ) nMissed++;
+ if( out ){
+ fprintf(out,"State %d lookahead %s %s\n", stateno,
+ fts5yyTokenName[iLookAhead],
+ fts5yycoverage[stateno][iLookAhead] ? "ok" : "missed");
+ }
+ }
+ }
+ return nMissed;
+}
+#endif
+
+/*
+** Find the appropriate action for a parser given the terminal
+** look-ahead token iLookAhead.
+*/
+static fts5YYACTIONTYPE fts5yy_find_shift_action(
+ fts5YYCODETYPE iLookAhead, /* The look-ahead token */
+ fts5YYACTIONTYPE stateno /* Current state number */
+){
+ int i;
+
+ if( stateno>fts5YY_MAX_SHIFT ) return stateno;
+ assert( stateno <= fts5YY_SHIFT_COUNT );
+#if defined(fts5YYCOVERAGE)
+ fts5yycoverage[stateno][iLookAhead] = 1;
+#endif
+ do{
+ i = fts5yy_shift_ofst[stateno];
+ assert( i>=0 );
+ assert( i<=fts5YY_ACTTAB_COUNT );
+ assert( i+fts5YYNFTS5TOKEN<=(int)fts5YY_NLOOKAHEAD );
+ assert( iLookAhead!=fts5YYNOCODE );
+ assert( iLookAhead < fts5YYNFTS5TOKEN );
+ i += iLookAhead;
+ assert( i<(int)fts5YY_NLOOKAHEAD );
+ if( fts5yy_lookahead[i]!=iLookAhead ){
+#ifdef fts5YYFALLBACK
+ fts5YYCODETYPE iFallback; /* Fallback token */
+ assert( iLookAhead<sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0]) );
+ iFallback = fts5yyFallback[iLookAhead];
+ if( iFallback!=0 ){
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE, "%sFALLBACK %s => %s\n",
+ fts5yyTracePrompt, fts5yyTokenName[iLookAhead], fts5yyTokenName[iFallback]);
+ }
+#endif
+ assert( fts5yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
+ iLookAhead = iFallback;
+ continue;
+ }
+#endif
+#ifdef fts5YYWILDCARD
+ {
+ int j = i - iLookAhead + fts5YYWILDCARD;
+ assert( j<(int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0])) );
+ if( fts5yy_lookahead[j]==fts5YYWILDCARD && iLookAhead>0 ){
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE, "%sWILDCARD %s => %s\n",
+ fts5yyTracePrompt, fts5yyTokenName[iLookAhead],
+ fts5yyTokenName[fts5YYWILDCARD]);
+ }
+#endif /* NDEBUG */
+ return fts5yy_action[j];
+ }
+ }
+#endif /* fts5YYWILDCARD */
+ return fts5yy_default[stateno];
+ }else{
+ assert( i>=0 && i<sizeof(fts5yy_action)/sizeof(fts5yy_action[0]) );
+ return fts5yy_action[i];
+ }
+ }while(1);
+}
+
+/*
+** Find the appropriate action for a parser given the non-terminal
+** look-ahead token iLookAhead.
+*/
+static fts5YYACTIONTYPE fts5yy_find_reduce_action(
+ fts5YYACTIONTYPE stateno, /* Current state number */
+ fts5YYCODETYPE iLookAhead /* The look-ahead token */
+){
+ int i;
+#ifdef fts5YYERRORSYMBOL
+ if( stateno>fts5YY_REDUCE_COUNT ){
+ return fts5yy_default[stateno];
+ }
+#else
+ assert( stateno<=fts5YY_REDUCE_COUNT );
+#endif
+ i = fts5yy_reduce_ofst[stateno];
+ assert( iLookAhead!=fts5YYNOCODE );
+ i += iLookAhead;
+#ifdef fts5YYERRORSYMBOL
+ if( i<0 || i>=fts5YY_ACTTAB_COUNT || fts5yy_lookahead[i]!=iLookAhead ){
+ return fts5yy_default[stateno];
+ }
+#else
+ assert( i>=0 && i<fts5YY_ACTTAB_COUNT );
+ assert( fts5yy_lookahead[i]==iLookAhead );
+#endif
+ return fts5yy_action[i];
+}
+
+/*
+** The following routine is called if the stack overflows.
+*/
+static void fts5yyStackOverflow(fts5yyParser *fts5yypParser){
+ sqlite3Fts5ParserARG_FETCH
+ sqlite3Fts5ParserCTX_FETCH
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sStack Overflow!\n",fts5yyTracePrompt);
+ }
+#endif
+ while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ) fts5yy_pop_parser_stack(fts5yypParser);
+ /* Here code is inserted which will execute if the parser
+ ** stack every overflows */
+/******** Begin %stack_overflow code ******************************************/
+
+ sqlite3Fts5ParseError(pParse, "fts5: parser stack overflow");
+/******** End %stack_overflow code ********************************************/
+ sqlite3Fts5ParserARG_STORE /* Suppress warning about unused %extra_argument var */
+ sqlite3Fts5ParserCTX_STORE
+}
+
+/*
+** Print tracing information for a SHIFT action
+*/
+#ifndef NDEBUG
+static void fts5yyTraceShift(fts5yyParser *fts5yypParser, int fts5yyNewState, const char *zTag){
+ if( fts5yyTraceFILE ){
+ if( fts5yyNewState<fts5YYNSTATE ){
+ fprintf(fts5yyTraceFILE,"%s%s '%s', go to state %d\n",
+ fts5yyTracePrompt, zTag, fts5yyTokenName[fts5yypParser->fts5yytos->major],
+ fts5yyNewState);
+ }else{
+ fprintf(fts5yyTraceFILE,"%s%s '%s', pending reduce %d\n",
+ fts5yyTracePrompt, zTag, fts5yyTokenName[fts5yypParser->fts5yytos->major],
+ fts5yyNewState - fts5YY_MIN_REDUCE);
+ }
+ }
+}
+#else
+# define fts5yyTraceShift(X,Y,Z)
+#endif
+
+/*
+** Perform a shift action.
+*/
+static void fts5yy_shift(
+ fts5yyParser *fts5yypParser, /* The parser to be shifted */
+ fts5YYACTIONTYPE fts5yyNewState, /* The new state to shift in */
+ fts5YYCODETYPE fts5yyMajor, /* The major token to shift in */
+ sqlite3Fts5ParserFTS5TOKENTYPE fts5yyMinor /* The minor token to shift in */
+){
+ fts5yyStackEntry *fts5yytos;
+ fts5yypParser->fts5yytos++;
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+ if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){
+ fts5yypParser->fts5yyhwm++;
+ assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack) );
+ }
+#endif
+#if fts5YYSTACKDEPTH>0
+ if( fts5yypParser->fts5yytos>fts5yypParser->fts5yystackEnd ){
+ fts5yypParser->fts5yytos--;
+ fts5yyStackOverflow(fts5yypParser);
+ return;
+ }
+#else
+ if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){
+ if( fts5yyGrowStack(fts5yypParser) ){
+ fts5yypParser->fts5yytos--;
+ fts5yyStackOverflow(fts5yypParser);
+ return;
+ }
+ }
+#endif
+ if( fts5yyNewState > fts5YY_MAX_SHIFT ){
+ fts5yyNewState += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE;
+ }
+ fts5yytos = fts5yypParser->fts5yytos;
+ fts5yytos->stateno = fts5yyNewState;
+ fts5yytos->major = fts5yyMajor;
+ fts5yytos->minor.fts5yy0 = fts5yyMinor;
+ fts5yyTraceShift(fts5yypParser, fts5yyNewState, "Shift");
+}
+
+/* For rule J, fts5yyRuleInfoLhs[J] contains the symbol on the left-hand side
+** of that rule */
+static const fts5YYCODETYPE fts5yyRuleInfoLhs[] = {
+ 16, /* (0) input ::= expr */
+ 20, /* (1) colset ::= MINUS LCP colsetlist RCP */
+ 20, /* (2) colset ::= LCP colsetlist RCP */
+ 20, /* (3) colset ::= STRING */
+ 20, /* (4) colset ::= MINUS STRING */
+ 21, /* (5) colsetlist ::= colsetlist STRING */
+ 21, /* (6) colsetlist ::= STRING */
+ 17, /* (7) expr ::= expr AND expr */
+ 17, /* (8) expr ::= expr OR expr */
+ 17, /* (9) expr ::= expr NOT expr */
+ 17, /* (10) expr ::= colset COLON LP expr RP */
+ 17, /* (11) expr ::= LP expr RP */
+ 17, /* (12) expr ::= exprlist */
+ 19, /* (13) exprlist ::= cnearset */
+ 19, /* (14) exprlist ::= exprlist cnearset */
+ 18, /* (15) cnearset ::= nearset */
+ 18, /* (16) cnearset ::= colset COLON nearset */
+ 22, /* (17) nearset ::= phrase */
+ 22, /* (18) nearset ::= CARET phrase */
+ 22, /* (19) nearset ::= STRING LP nearphrases neardist_opt RP */
+ 23, /* (20) nearphrases ::= phrase */
+ 23, /* (21) nearphrases ::= nearphrases phrase */
+ 25, /* (22) neardist_opt ::= */
+ 25, /* (23) neardist_opt ::= COMMA STRING */
+ 24, /* (24) phrase ::= phrase PLUS STRING star_opt */
+ 24, /* (25) phrase ::= STRING star_opt */
+ 26, /* (26) star_opt ::= STAR */
+ 26, /* (27) star_opt ::= */
+};
+
+/* For rule J, fts5yyRuleInfoNRhs[J] contains the negative of the number
+** of symbols on the right-hand side of that rule. */
+static const signed char fts5yyRuleInfoNRhs[] = {
+ -1, /* (0) input ::= expr */
+ -4, /* (1) colset ::= MINUS LCP colsetlist RCP */
+ -3, /* (2) colset ::= LCP colsetlist RCP */
+ -1, /* (3) colset ::= STRING */
+ -2, /* (4) colset ::= MINUS STRING */
+ -2, /* (5) colsetlist ::= colsetlist STRING */
+ -1, /* (6) colsetlist ::= STRING */
+ -3, /* (7) expr ::= expr AND expr */
+ -3, /* (8) expr ::= expr OR expr */
+ -3, /* (9) expr ::= expr NOT expr */
+ -5, /* (10) expr ::= colset COLON LP expr RP */
+ -3, /* (11) expr ::= LP expr RP */
+ -1, /* (12) expr ::= exprlist */
+ -1, /* (13) exprlist ::= cnearset */
+ -2, /* (14) exprlist ::= exprlist cnearset */
+ -1, /* (15) cnearset ::= nearset */
+ -3, /* (16) cnearset ::= colset COLON nearset */
+ -1, /* (17) nearset ::= phrase */
+ -2, /* (18) nearset ::= CARET phrase */
+ -5, /* (19) nearset ::= STRING LP nearphrases neardist_opt RP */
+ -1, /* (20) nearphrases ::= phrase */
+ -2, /* (21) nearphrases ::= nearphrases phrase */
+ 0, /* (22) neardist_opt ::= */
+ -2, /* (23) neardist_opt ::= COMMA STRING */
+ -4, /* (24) phrase ::= phrase PLUS STRING star_opt */
+ -2, /* (25) phrase ::= STRING star_opt */
+ -1, /* (26) star_opt ::= STAR */
+ 0, /* (27) star_opt ::= */
+};
+
+static void fts5yy_accept(fts5yyParser*); /* Forward Declaration */
+
+/*
+** Perform a reduce action and the shift that must immediately
+** follow the reduce.
+**
+** The fts5yyLookahead and fts5yyLookaheadToken parameters provide reduce actions
+** access to the lookahead token (if any). The fts5yyLookahead will be fts5YYNOCODE
+** if the lookahead token has already been consumed. As this procedure is
+** only called from one place, optimizing compilers will in-line it, which
+** means that the extra parameters have no performance impact.
+*/
+static fts5YYACTIONTYPE fts5yy_reduce(
+ fts5yyParser *fts5yypParser, /* The parser */
+ unsigned int fts5yyruleno, /* Number of the rule by which to reduce */
+ int fts5yyLookahead, /* Lookahead token, or fts5YYNOCODE if none */
+ sqlite3Fts5ParserFTS5TOKENTYPE fts5yyLookaheadToken /* Value of the lookahead token */
+ sqlite3Fts5ParserCTX_PDECL /* %extra_context */
+){
+ int fts5yygoto; /* The next state */
+ fts5YYACTIONTYPE fts5yyact; /* The next action */
+ fts5yyStackEntry *fts5yymsp; /* The top of the parser's stack */
+ int fts5yysize; /* Amount to pop the stack */
+ sqlite3Fts5ParserARG_FETCH
+ (void)fts5yyLookahead;
+ (void)fts5yyLookaheadToken;
+ fts5yymsp = fts5yypParser->fts5yytos;
+#ifndef NDEBUG
+ if( fts5yyTraceFILE && fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ){
+ fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno];
+ if( fts5yysize ){
+ fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
+ fts5yyTracePrompt,
+ fts5yyruleno, fts5yyRuleName[fts5yyruleno],
+ fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action",
+ fts5yymsp[fts5yysize].stateno);
+ }else{
+ fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s.\n",
+ fts5yyTracePrompt, fts5yyruleno, fts5yyRuleName[fts5yyruleno],
+ fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action");
+ }
+ }
+#endif /* NDEBUG */
+
+ /* Check that the stack is large enough to grow by a single entry
+ ** if the RHS of the rule is empty. This ensures that there is room
+ ** enough on the stack to push the LHS value */
+ if( fts5yyRuleInfoNRhs[fts5yyruleno]==0 ){
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+ if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){
+ fts5yypParser->fts5yyhwm++;
+ assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack));
+ }
+#endif
+#if fts5YYSTACKDEPTH>0
+ if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){
+ fts5yyStackOverflow(fts5yypParser);
+ /* The call to fts5yyStackOverflow() above pops the stack until it is
+ ** empty, causing the main parser loop to exit. So the return value
+ ** is never used and does not matter. */
+ return 0;
+ }
+#else
+ if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){
+ if( fts5yyGrowStack(fts5yypParser) ){
+ fts5yyStackOverflow(fts5yypParser);
+ /* The call to fts5yyStackOverflow() above pops the stack until it is
+ ** empty, causing the main parser loop to exit. So the return value
+ ** is never used and does not matter. */
+ return 0;
+ }
+ fts5yymsp = fts5yypParser->fts5yytos;
+ }
+#endif
+ }
+
+ switch( fts5yyruleno ){
+ /* Beginning here are the reduction cases. A typical example
+ ** follows:
+ ** case 0:
+ ** #line <lineno> <grammarfile>
+ ** { ... } // User supplied code
+ ** #line <lineno> <thisfile>
+ ** break;
+ */
+/********** Begin reduce actions **********************************************/
+ fts5YYMINORTYPE fts5yylhsminor;
+ case 0: /* input ::= expr */
+{ sqlite3Fts5ParseFinished(pParse, fts5yymsp[0].minor.fts5yy24); }
+ break;
+ case 1: /* colset ::= MINUS LCP colsetlist RCP */
+{
+ fts5yymsp[-3].minor.fts5yy11 = sqlite3Fts5ParseColsetInvert(pParse, fts5yymsp[-1].minor.fts5yy11);
+}
+ break;
+ case 2: /* colset ::= LCP colsetlist RCP */
+{ fts5yymsp[-2].minor.fts5yy11 = fts5yymsp[-1].minor.fts5yy11; }
+ break;
+ case 3: /* colset ::= STRING */
+{
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+}
+ fts5yymsp[0].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
+ break;
+ case 4: /* colset ::= MINUS STRING */
+{
+ fts5yymsp[-1].minor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+ fts5yymsp[-1].minor.fts5yy11 = sqlite3Fts5ParseColsetInvert(pParse, fts5yymsp[-1].minor.fts5yy11);
+}
+ break;
+ case 5: /* colsetlist ::= colsetlist STRING */
+{
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, fts5yymsp[-1].minor.fts5yy11, &fts5yymsp[0].minor.fts5yy0); }
+ fts5yymsp[-1].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
+ break;
+ case 6: /* colsetlist ::= STRING */
+{
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+}
+ fts5yymsp[0].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
+ break;
+ case 7: /* expr ::= expr AND expr */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_AND, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
+}
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 8: /* expr ::= expr OR expr */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_OR, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
+}
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 9: /* expr ::= expr NOT expr */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_NOT, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
+}
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 10: /* expr ::= colset COLON LP expr RP */
+{
+ sqlite3Fts5ParseSetColset(pParse, fts5yymsp[-1].minor.fts5yy24, fts5yymsp[-4].minor.fts5yy11);
+ fts5yylhsminor.fts5yy24 = fts5yymsp[-1].minor.fts5yy24;
+}
+ fts5yymsp[-4].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 11: /* expr ::= LP expr RP */
+{fts5yymsp[-2].minor.fts5yy24 = fts5yymsp[-1].minor.fts5yy24;}
+ break;
+ case 12: /* expr ::= exprlist */
+ case 13: /* exprlist ::= cnearset */ fts5yytestcase(fts5yyruleno==13);
+{fts5yylhsminor.fts5yy24 = fts5yymsp[0].minor.fts5yy24;}
+ fts5yymsp[0].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 14: /* exprlist ::= exprlist cnearset */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseImplicitAnd(pParse, fts5yymsp[-1].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24);
+}
+ fts5yymsp[-1].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 15: /* cnearset ::= nearset */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy46);
+}
+ fts5yymsp[0].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 16: /* cnearset ::= colset COLON nearset */
+{
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy46);
+ sqlite3Fts5ParseSetColset(pParse, fts5yylhsminor.fts5yy24, fts5yymsp[-2].minor.fts5yy11);
+}
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 17: /* nearset ::= phrase */
+{ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy53); }
+ fts5yymsp[0].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
+ break;
+ case 18: /* nearset ::= CARET phrase */
+{
+ sqlite3Fts5ParseSetCaret(fts5yymsp[0].minor.fts5yy53);
+ fts5yymsp[-1].minor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy53);
+}
+ break;
+ case 19: /* nearset ::= STRING LP nearphrases neardist_opt RP */
+{
+ sqlite3Fts5ParseNear(pParse, &fts5yymsp[-4].minor.fts5yy0);
+ sqlite3Fts5ParseSetDistance(pParse, fts5yymsp[-2].minor.fts5yy46, &fts5yymsp[-1].minor.fts5yy0);
+ fts5yylhsminor.fts5yy46 = fts5yymsp[-2].minor.fts5yy46;
+}
+ fts5yymsp[-4].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
+ break;
+ case 20: /* nearphrases ::= phrase */
+{
+ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy53);
+}
+ fts5yymsp[0].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
+ break;
+ case 21: /* nearphrases ::= nearphrases phrase */
+{
+ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, fts5yymsp[-1].minor.fts5yy46, fts5yymsp[0].minor.fts5yy53);
+}
+ fts5yymsp[-1].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
+ break;
+ case 22: /* neardist_opt ::= */
+{ fts5yymsp[1].minor.fts5yy0.p = 0; fts5yymsp[1].minor.fts5yy0.n = 0; }
+ break;
+ case 23: /* neardist_opt ::= COMMA STRING */
+{ fts5yymsp[-1].minor.fts5yy0 = fts5yymsp[0].minor.fts5yy0; }
+ break;
+ case 24: /* phrase ::= phrase PLUS STRING star_opt */
+{
+ fts5yylhsminor.fts5yy53 = sqlite3Fts5ParseTerm(pParse, fts5yymsp[-3].minor.fts5yy53, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy4);
+}
+ fts5yymsp[-3].minor.fts5yy53 = fts5yylhsminor.fts5yy53;
+ break;
+ case 25: /* phrase ::= STRING star_opt */
+{
+ fts5yylhsminor.fts5yy53 = sqlite3Fts5ParseTerm(pParse, 0, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy4);
+}
+ fts5yymsp[-1].minor.fts5yy53 = fts5yylhsminor.fts5yy53;
+ break;
+ case 26: /* star_opt ::= STAR */
+{ fts5yymsp[0].minor.fts5yy4 = 1; }
+ break;
+ case 27: /* star_opt ::= */
+{ fts5yymsp[1].minor.fts5yy4 = 0; }
+ break;
+ default:
+ break;
+/********** End reduce actions ************************************************/
+ };
+ assert( fts5yyruleno<sizeof(fts5yyRuleInfoLhs)/sizeof(fts5yyRuleInfoLhs[0]) );
+ fts5yygoto = fts5yyRuleInfoLhs[fts5yyruleno];
+ fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno];
+ fts5yyact = fts5yy_find_reduce_action(fts5yymsp[fts5yysize].stateno,(fts5YYCODETYPE)fts5yygoto);
+
+ /* There are no SHIFTREDUCE actions on nonterminals because the table
+ ** generator has simplified them to pure REDUCE actions. */
+ assert( !(fts5yyact>fts5YY_MAX_SHIFT && fts5yyact<=fts5YY_MAX_SHIFTREDUCE) );
+
+ /* It is not possible for a REDUCE to be followed by an error */
+ assert( fts5yyact!=fts5YY_ERROR_ACTION );
+
+ fts5yymsp += fts5yysize+1;
+ fts5yypParser->fts5yytos = fts5yymsp;
+ fts5yymsp->stateno = (fts5YYACTIONTYPE)fts5yyact;
+ fts5yymsp->major = (fts5YYCODETYPE)fts5yygoto;
+ fts5yyTraceShift(fts5yypParser, fts5yyact, "... then shift");
+ return fts5yyact;
+}
+
+/*
+** The following code executes when the parse fails
+*/
+#ifndef fts5YYNOERRORRECOVERY
+static void fts5yy_parse_failed(
+ fts5yyParser *fts5yypParser /* The parser */
+){
+ sqlite3Fts5ParserARG_FETCH
+ sqlite3Fts5ParserCTX_FETCH
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sFail!\n",fts5yyTracePrompt);
+ }
+#endif
+ while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ) fts5yy_pop_parser_stack(fts5yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser fails */
+/************ Begin %parse_failure code ***************************************/
+/************ End %parse_failure code *****************************************/
+ sqlite3Fts5ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3Fts5ParserCTX_STORE
+}
+#endif /* fts5YYNOERRORRECOVERY */
+
+/*
+** The following code executes when a syntax error first occurs.
+*/
+static void fts5yy_syntax_error(
+ fts5yyParser *fts5yypParser, /* The parser */
+ int fts5yymajor, /* The major type of the error token */
+ sqlite3Fts5ParserFTS5TOKENTYPE fts5yyminor /* The minor type of the error token */
+){
+ sqlite3Fts5ParserARG_FETCH
+ sqlite3Fts5ParserCTX_FETCH
+#define FTS5TOKEN fts5yyminor
+/************ Begin %syntax_error code ****************************************/
+
+ UNUSED_PARAM(fts5yymajor); /* Silence a compiler warning */
+ sqlite3Fts5ParseError(
+ pParse, "fts5: syntax error near \"%.*s\"",FTS5TOKEN.n,FTS5TOKEN.p
+ );
+/************ End %syntax_error code ******************************************/
+ sqlite3Fts5ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3Fts5ParserCTX_STORE
+}
+
+/*
+** The following is executed when the parser accepts
+*/
+static void fts5yy_accept(
+ fts5yyParser *fts5yypParser /* The parser */
+){
+ sqlite3Fts5ParserARG_FETCH
+ sqlite3Fts5ParserCTX_FETCH
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sAccept!\n",fts5yyTracePrompt);
+ }
+#endif
+#ifndef fts5YYNOERRORRECOVERY
+ fts5yypParser->fts5yyerrcnt = -1;
+#endif
+ assert( fts5yypParser->fts5yytos==fts5yypParser->fts5yystack );
+ /* Here code is inserted which will be executed whenever the
+ ** parser accepts */
+/*********** Begin %parse_accept code *****************************************/
+/*********** End %parse_accept code *******************************************/
+ sqlite3Fts5ParserARG_STORE /* Suppress warning about unused %extra_argument variable */
+ sqlite3Fts5ParserCTX_STORE
+}
+
+/* The main parser program.
+** The first argument is a pointer to a structure obtained from
+** "sqlite3Fts5ParserAlloc" which describes the current state of the parser.
+** The second argument is the major token number. The third is
+** the minor token. The fourth optional argument is whatever the
+** user wants (and specified in the grammar) and is available for
+** use by the action routines.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser (an opaque structure.)
+** <li> The major token number.
+** <li> The minor token number.
+** <li> An option argument of a grammar-specified type.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+static void sqlite3Fts5Parser(
+ void *fts5yyp, /* The parser */
+ int fts5yymajor, /* The major token code number */
+ sqlite3Fts5ParserFTS5TOKENTYPE fts5yyminor /* The value for the token */
+ sqlite3Fts5ParserARG_PDECL /* Optional %extra_argument parameter */
+){
+ fts5YYMINORTYPE fts5yyminorunion;
+ fts5YYACTIONTYPE fts5yyact; /* The parser action. */
+#if !defined(fts5YYERRORSYMBOL) && !defined(fts5YYNOERRORRECOVERY)
+ int fts5yyendofinput; /* True if we are at the end of input */
+#endif
+#ifdef fts5YYERRORSYMBOL
+ int fts5yyerrorhit = 0; /* True if fts5yymajor has invoked an error */
+#endif
+ fts5yyParser *fts5yypParser = (fts5yyParser*)fts5yyp; /* The parser */
+ sqlite3Fts5ParserCTX_FETCH
+ sqlite3Fts5ParserARG_STORE
+
+ assert( fts5yypParser->fts5yytos!=0 );
+#if !defined(fts5YYERRORSYMBOL) && !defined(fts5YYNOERRORRECOVERY)
+ fts5yyendofinput = (fts5yymajor==0);
+#endif
+
+ fts5yyact = fts5yypParser->fts5yytos->stateno;
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ if( fts5yyact < fts5YY_MIN_REDUCE ){
+ fprintf(fts5yyTraceFILE,"%sInput '%s' in state %d\n",
+ fts5yyTracePrompt,fts5yyTokenName[fts5yymajor],fts5yyact);
+ }else{
+ fprintf(fts5yyTraceFILE,"%sInput '%s' with pending reduce %d\n",
+ fts5yyTracePrompt,fts5yyTokenName[fts5yymajor],fts5yyact-fts5YY_MIN_REDUCE);
+ }
+ }
+#endif
+
+ do{
+ assert( fts5yyact==fts5yypParser->fts5yytos->stateno );
+ fts5yyact = fts5yy_find_shift_action((fts5YYCODETYPE)fts5yymajor,fts5yyact);
+ if( fts5yyact >= fts5YY_MIN_REDUCE ){
+ fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyact-fts5YY_MIN_REDUCE,fts5yymajor,
+ fts5yyminor sqlite3Fts5ParserCTX_PARAM);
+ }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){
+ fts5yy_shift(fts5yypParser,fts5yyact,(fts5YYCODETYPE)fts5yymajor,fts5yyminor);
+#ifndef fts5YYNOERRORRECOVERY
+ fts5yypParser->fts5yyerrcnt--;
+#endif
+ break;
+ }else if( fts5yyact==fts5YY_ACCEPT_ACTION ){
+ fts5yypParser->fts5yytos--;
+ fts5yy_accept(fts5yypParser);
+ return;
+ }else{
+ assert( fts5yyact == fts5YY_ERROR_ACTION );
+ fts5yyminorunion.fts5yy0 = fts5yyminor;
+#ifdef fts5YYERRORSYMBOL
+ int fts5yymx;
+#endif
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sSyntax Error!\n",fts5yyTracePrompt);
+ }
+#endif
+#ifdef fts5YYERRORSYMBOL
+ /* A syntax error has occurred.
+ ** The response to an error depends upon whether or not the
+ ** grammar defines an error token "ERROR".
+ **
+ ** This is what we do if the grammar does define ERROR:
+ **
+ ** * Call the %syntax_error function.
+ **
+ ** * Begin popping the stack until we enter a state where
+ ** it is legal to shift the error symbol, then shift
+ ** the error symbol.
+ **
+ ** * Set the error count to three.
+ **
+ ** * Begin accepting and shifting new tokens. No new error
+ ** processing will occur until three tokens have been
+ ** shifted successfully.
+ **
+ */
+ if( fts5yypParser->fts5yyerrcnt<0 ){
+ fts5yy_syntax_error(fts5yypParser,fts5yymajor,fts5yyminor);
+ }
+ fts5yymx = fts5yypParser->fts5yytos->major;
+ if( fts5yymx==fts5YYERRORSYMBOL || fts5yyerrorhit ){
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE,"%sDiscard input token %s\n",
+ fts5yyTracePrompt,fts5yyTokenName[fts5yymajor]);
+ }
+#endif
+ fts5yy_destructor(fts5yypParser, (fts5YYCODETYPE)fts5yymajor, &fts5yyminorunion);
+ fts5yymajor = fts5YYNOCODE;
+ }else{
+ while( fts5yypParser->fts5yytos >= fts5yypParser->fts5yystack
+ && (fts5yyact = fts5yy_find_reduce_action(
+ fts5yypParser->fts5yytos->stateno,
+ fts5YYERRORSYMBOL)) > fts5YY_MAX_SHIFTREDUCE
+ ){
+ fts5yy_pop_parser_stack(fts5yypParser);
+ }
+ if( fts5yypParser->fts5yytos < fts5yypParser->fts5yystack || fts5yymajor==0 ){
+ fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
+ fts5yy_parse_failed(fts5yypParser);
+#ifndef fts5YYNOERRORRECOVERY
+ fts5yypParser->fts5yyerrcnt = -1;
+#endif
+ fts5yymajor = fts5YYNOCODE;
+ }else if( fts5yymx!=fts5YYERRORSYMBOL ){
+ fts5yy_shift(fts5yypParser,fts5yyact,fts5YYERRORSYMBOL,fts5yyminor);
+ }
+ }
+ fts5yypParser->fts5yyerrcnt = 3;
+ fts5yyerrorhit = 1;
+ if( fts5yymajor==fts5YYNOCODE ) break;
+ fts5yyact = fts5yypParser->fts5yytos->stateno;
+#elif defined(fts5YYNOERRORRECOVERY)
+ /* If the fts5YYNOERRORRECOVERY macro is defined, then do not attempt to
+ ** do any kind of error recovery. Instead, simply invoke the syntax
+ ** error routine and continue going as if nothing had happened.
+ **
+ ** Applications can set this macro (for example inside %include) if
+ ** they intend to abandon the parse upon the first syntax error seen.
+ */
+ fts5yy_syntax_error(fts5yypParser,fts5yymajor, fts5yyminor);
+ fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
+ break;
+#else /* fts5YYERRORSYMBOL is not defined */
+ /* This is what we do if the grammar does not define ERROR:
+ **
+ ** * Report an error message, and throw away the input token.
+ **
+ ** * If the input token is $, then fail the parse.
+ **
+ ** As before, subsequent error messages are suppressed until
+ ** three input tokens have been successfully shifted.
+ */
+ if( fts5yypParser->fts5yyerrcnt<=0 ){
+ fts5yy_syntax_error(fts5yypParser,fts5yymajor, fts5yyminor);
+ }
+ fts5yypParser->fts5yyerrcnt = 3;
+ fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
+ if( fts5yyendofinput ){
+ fts5yy_parse_failed(fts5yypParser);
+#ifndef fts5YYNOERRORRECOVERY
+ fts5yypParser->fts5yyerrcnt = -1;
+#endif
+ }
+ break;
+#endif
+ }
+ }while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack );
+#ifndef NDEBUG
+ if( fts5yyTraceFILE ){
+ fts5yyStackEntry *i;
+ char cDiv = '[';
+ fprintf(fts5yyTraceFILE,"%sReturn. Stack=",fts5yyTracePrompt);
+ for(i=&fts5yypParser->fts5yystack[1]; i<=fts5yypParser->fts5yytos; i++){
+ fprintf(fts5yyTraceFILE,"%c%s", cDiv, fts5yyTokenName[i->major]);
+ cDiv = ' ';
+ }
+ fprintf(fts5yyTraceFILE,"]\n");
+ }
+#endif
+ return;
+}
+
+/*
+** Return the fallback token corresponding to canonical token iToken, or
+** 0 if iToken has no fallback.
+*/
+static int sqlite3Fts5ParserFallback(int iToken){
+#ifdef fts5YYFALLBACK
+ assert( iToken<(int)(sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0])) );
+ return fts5yyFallback[iToken];
+#else
+ (void)iToken;
+ return 0;
+#endif
+}
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+
+/* #include "fts5Int.h" */
+#include <math.h> /* amalgamator: keep */
+
+/*
+** Object used to iterate through all "coalesced phrase instances" in
+** a single column of the current row. If the phrase instances in the
+** column being considered do not overlap, this object simply iterates
+** through them. Or, if they do overlap (share one or more tokens in
+** common), each set of overlapping instances is treated as a single
+** match. See documentation for the highlight() auxiliary function for
+** details.
+**
+** Usage is:
+**
+** for(rc = fts5CInstIterNext(pApi, pFts, iCol, &iter);
+** (rc==SQLITE_OK && 0==fts5CInstIterEof(&iter);
+** rc = fts5CInstIterNext(&iter)
+** ){
+** printf("instance starts at %d, ends at %d\n", iter.iStart, iter.iEnd);
+** }
+**
+*/
+typedef struct CInstIter CInstIter;
+struct CInstIter {
+ const Fts5ExtensionApi *pApi; /* API offered by current FTS version */
+ Fts5Context *pFts; /* First arg to pass to pApi functions */
+ int iCol; /* Column to search */
+ int iInst; /* Next phrase instance index */
+ int nInst; /* Total number of phrase instances */
+
+ /* Output variables */
+ int iStart; /* First token in coalesced phrase instance */
+ int iEnd; /* Last token in coalesced phrase instance */
+};
+
+/*
+** Advance the iterator to the next coalesced phrase instance. Return
+** an SQLite error code if an error occurs, or SQLITE_OK otherwise.
+*/
+static int fts5CInstIterNext(CInstIter *pIter){
+ int rc = SQLITE_OK;
+ pIter->iStart = -1;
+ pIter->iEnd = -1;
+
+ while( rc==SQLITE_OK && pIter->iInst<pIter->nInst ){
+ int ip; int ic; int io;
+ rc = pIter->pApi->xInst(pIter->pFts, pIter->iInst, &ip, &ic, &io);
+ if( rc==SQLITE_OK ){
+ if( ic==pIter->iCol ){
+ int iEnd = io - 1 + pIter->pApi->xPhraseSize(pIter->pFts, ip);
+ if( pIter->iStart<0 ){
+ pIter->iStart = io;
+ pIter->iEnd = iEnd;
+ }else if( io<=pIter->iEnd ){
+ if( iEnd>pIter->iEnd ) pIter->iEnd = iEnd;
+ }else{
+ break;
+ }
+ }
+ pIter->iInst++;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Initialize the iterator object indicated by the final parameter to
+** iterate through coalesced phrase instances in column iCol.
+*/
+static int fts5CInstIterInit(
+ const Fts5ExtensionApi *pApi,
+ Fts5Context *pFts,
+ int iCol,
+ CInstIter *pIter
+){
+ int rc;
+
+ memset(pIter, 0, sizeof(CInstIter));
+ pIter->pApi = pApi;
+ pIter->pFts = pFts;
+ pIter->iCol = iCol;
+ rc = pApi->xInstCount(pFts, &pIter->nInst);
+
+ if( rc==SQLITE_OK ){
+ rc = fts5CInstIterNext(pIter);
+ }
+
+ return rc;
+}
+
+
+
+/*************************************************************************
+** Start of highlight() implementation.
+*/
+typedef struct HighlightContext HighlightContext;
+struct HighlightContext {
+ CInstIter iter; /* Coalesced Instance Iterator */
+ int iPos; /* Current token offset in zIn[] */
+ int iRangeStart; /* First token to include */
+ int iRangeEnd; /* If non-zero, last token to include */
+ const char *zOpen; /* Opening highlight */
+ const char *zClose; /* Closing highlight */
+ const char *zIn; /* Input text */
+ int nIn; /* Size of input text in bytes */
+ int iOff; /* Current offset within zIn[] */
+ char *zOut; /* Output value */
+};
+
+/*
+** Append text to the HighlightContext output string - p->zOut. Argument
+** z points to a buffer containing n bytes of text to append. If n is
+** negative, everything up until the first '\0' is appended to the output.
+**
+** If *pRc is set to any value other than SQLITE_OK when this function is
+** called, it is a no-op. If an error (i.e. an OOM condition) is encountered,
+** *pRc is set to an error code before returning.
+*/
+static void fts5HighlightAppend(
+ int *pRc,
+ HighlightContext *p,
+ const char *z, int n
+){
+ if( *pRc==SQLITE_OK && z ){
+ if( n<0 ) n = (int)strlen(z);
+ p->zOut = sqlite3_mprintf("%z%.*s", p->zOut, n, z);
+ if( p->zOut==0 ) *pRc = SQLITE_NOMEM;
+ }
+}
+
+/*
+** Tokenizer callback used by implementation of highlight() function.
+*/
+static int fts5HighlightCb(
+ void *pContext, /* Pointer to HighlightContext object */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStartOff, /* Start offset of token */
+ int iEndOff /* End offset of token */
+){
+ HighlightContext *p = (HighlightContext*)pContext;
+ int rc = SQLITE_OK;
+ int iPos;
+
+ UNUSED_PARAM2(pToken, nToken);
+
+ if( tflags & FTS5_TOKEN_COLOCATED ) return SQLITE_OK;
+ iPos = p->iPos++;
+
+ if( p->iRangeEnd>0 ){
+ if( iPos<p->iRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK;
+ if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff;
+ }
+
+ if( iPos==p->iter.iStart ){
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff);
+ fts5HighlightAppend(&rc, p, p->zOpen, -1);
+ p->iOff = iStartOff;
+ }
+
+ if( iPos==p->iter.iEnd ){
+ if( p->iRangeEnd && p->iter.iStart<p->iRangeStart ){
+ fts5HighlightAppend(&rc, p, p->zOpen, -1);
+ }
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->iOff = iEndOff;
+ if( rc==SQLITE_OK ){
+ rc = fts5CInstIterNext(&p->iter);
+ }
+ }
+
+ if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
+ if( iPos>=p->iter.iStart && iPos<p->iter.iEnd ){
+ fts5HighlightAppend(&rc, p, p->zClose, -1);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Implementation of highlight() function.
+*/
+static void fts5HighlightFunction(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+){
+ HighlightContext ctx;
+ int rc;
+ int iCol;
+
+ if( nVal!=3 ){
+ const char *zErr = "wrong number of arguments to function highlight()";
+ sqlite3_result_error(pCtx, zErr, -1);
+ return;
+ }
+
+ iCol = sqlite3_value_int(apVal[0]);
+ memset(&ctx, 0, sizeof(HighlightContext));
+ ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]);
+ ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
+ rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn);
+
+ if( ctx.zIn ){
+ if( rc==SQLITE_OK ){
+ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
+ }
+ fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
+
+ if( rc==SQLITE_OK ){
+ sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
+ }
+ sqlite3_free(ctx.zOut);
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pCtx, rc);
+ }
+}
+/*
+** End of highlight() implementation.
+**************************************************************************/
+
+/*
+** Context object passed to the fts5SentenceFinderCb() function.
+*/
+typedef struct Fts5SFinder Fts5SFinder;
+struct Fts5SFinder {
+ int iPos; /* Current token position */
+ int nFirstAlloc; /* Allocated size of aFirst[] */
+ int nFirst; /* Number of entries in aFirst[] */
+ int *aFirst; /* Array of first token in each sentence */
+ const char *zDoc; /* Document being tokenized */
+};
+
+/*
+** Add an entry to the Fts5SFinder.aFirst[] array. Grow the array if
+** necessary. Return SQLITE_OK if successful, or SQLITE_NOMEM if an
+** error occurs.
+*/
+static int fts5SentenceFinderAdd(Fts5SFinder *p, int iAdd){
+ if( p->nFirstAlloc==p->nFirst ){
+ int nNew = p->nFirstAlloc ? p->nFirstAlloc*2 : 64;
+ int *aNew;
+
+ aNew = (int*)sqlite3_realloc64(p->aFirst, nNew*sizeof(int));
+ if( aNew==0 ) return SQLITE_NOMEM;
+ p->aFirst = aNew;
+ p->nFirstAlloc = nNew;
+ }
+ p->aFirst[p->nFirst++] = iAdd;
+ return SQLITE_OK;
+}
+
+/*
+** This function is an xTokenize() callback used by the auxiliary snippet()
+** function. Its job is to identify tokens that are the first in a sentence.
+** For each such token, an entry is added to the SFinder.aFirst[] array.
+*/
+static int fts5SentenceFinderCb(
+ void *pContext, /* Pointer to HighlightContext object */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStartOff, /* Start offset of token */
+ int iEndOff /* End offset of token */
+){
+ int rc = SQLITE_OK;
+
+ UNUSED_PARAM2(pToken, nToken);
+ UNUSED_PARAM(iEndOff);
+
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 ){
+ Fts5SFinder *p = (Fts5SFinder*)pContext;
+ if( p->iPos>0 ){
+ int i;
+ char c = 0;
+ for(i=iStartOff-1; i>=0; i--){
+ c = p->zDoc[i];
+ if( c!=' ' && c!='\t' && c!='\n' && c!='\r' ) break;
+ }
+ if( i!=iStartOff-1 && (c=='.' || c==':') ){
+ rc = fts5SentenceFinderAdd(p, p->iPos);
+ }
+ }else{
+ rc = fts5SentenceFinderAdd(p, 0);
+ }
+ p->iPos++;
+ }
+ return rc;
+}
+
+static int fts5SnippetScore(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ int nDocsize, /* Size of column in tokens */
+ unsigned char *aSeen, /* Array with one element per query phrase */
+ int iCol, /* Column to score */
+ int iPos, /* Starting offset to score */
+ int nToken, /* Max tokens per snippet */
+ int *pnScore, /* OUT: Score */
+ int *piPos /* OUT: Adjusted offset */
+){
+ int rc;
+ int i;
+ int ip = 0;
+ int ic = 0;
+ int iOff = 0;
+ int iFirst = -1;
+ int nInst;
+ int nScore = 0;
+ int iLast = 0;
+ sqlite3_int64 iEnd = (sqlite3_int64)iPos + nToken;
+
+ rc = pApi->xInstCount(pFts, &nInst);
+ for(i=0; i<nInst && rc==SQLITE_OK; i++){
+ rc = pApi->xInst(pFts, i, &ip, &ic, &iOff);
+ if( rc==SQLITE_OK && ic==iCol && iOff>=iPos && iOff<iEnd ){
+ nScore += (aSeen[ip] ? 1 : 1000);
+ aSeen[ip] = 1;
+ if( iFirst<0 ) iFirst = iOff;
+ iLast = iOff + pApi->xPhraseSize(pFts, ip);
+ }
+ }
+
+ *pnScore = nScore;
+ if( piPos ){
+ sqlite3_int64 iAdj = iFirst - (nToken - (iLast-iFirst)) / 2;
+ if( (iAdj+nToken)>nDocsize ) iAdj = nDocsize - nToken;
+ if( iAdj<0 ) iAdj = 0;
+ *piPos = (int)iAdj;
+ }
+
+ return rc;
+}
+
+/*
+** Return the value in pVal interpreted as utf-8 text. Except, if pVal
+** contains a NULL value, return a pointer to a static string zero
+** bytes in length instead of a NULL pointer.
+*/
+static const char *fts5ValueToText(sqlite3_value *pVal){
+ const char *zRet = (const char*)sqlite3_value_text(pVal);
+ return zRet ? zRet : "";
+}
+
+/*
+** Implementation of snippet() function.
+*/
+static void fts5SnippetFunction(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+){
+ HighlightContext ctx;
+ int rc = SQLITE_OK; /* Return code */
+ int iCol; /* 1st argument to snippet() */
+ const char *zEllips; /* 4th argument to snippet() */
+ int nToken; /* 5th argument to snippet() */
+ int nInst = 0; /* Number of instance matches this row */
+ int i; /* Used to iterate through instances */
+ int nPhrase; /* Number of phrases in query */
+ unsigned char *aSeen; /* Array of "seen instance" flags */
+ int iBestCol; /* Column containing best snippet */
+ int iBestStart = 0; /* First token of best snippet */
+ int nBestScore = 0; /* Score of best snippet */
+ int nColSize = 0; /* Total size of iBestCol in tokens */
+ Fts5SFinder sFinder; /* Used to find the beginnings of sentences */
+ int nCol;
+
+ if( nVal!=5 ){
+ const char *zErr = "wrong number of arguments to function snippet()";
+ sqlite3_result_error(pCtx, zErr, -1);
+ return;
+ }
+
+ nCol = pApi->xColumnCount(pFts);
+ memset(&ctx, 0, sizeof(HighlightContext));
+ iCol = sqlite3_value_int(apVal[0]);
+ ctx.zOpen = fts5ValueToText(apVal[1]);
+ ctx.zClose = fts5ValueToText(apVal[2]);
+ zEllips = fts5ValueToText(apVal[3]);
+ nToken = sqlite3_value_int(apVal[4]);
+
+ iBestCol = (iCol>=0 ? iCol : 0);
+ nPhrase = pApi->xPhraseCount(pFts);
+ aSeen = sqlite3_malloc(nPhrase);
+ if( aSeen==0 ){
+ rc = SQLITE_NOMEM;
+ }
+ if( rc==SQLITE_OK ){
+ rc = pApi->xInstCount(pFts, &nInst);
+ }
+
+ memset(&sFinder, 0, sizeof(Fts5SFinder));
+ for(i=0; i<nCol; i++){
+ if( iCol<0 || iCol==i ){
+ int nDoc;
+ int nDocsize;
+ int ii;
+ sFinder.iPos = 0;
+ sFinder.nFirst = 0;
+ rc = pApi->xColumnText(pFts, i, &sFinder.zDoc, &nDoc);
+ if( rc!=SQLITE_OK ) break;
+ rc = pApi->xTokenize(pFts,
+ sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb
+ );
+ if( rc!=SQLITE_OK ) break;
+ rc = pApi->xColumnSize(pFts, i, &nDocsize);
+ if( rc!=SQLITE_OK ) break;
+
+ for(ii=0; rc==SQLITE_OK && ii<nInst; ii++){
+ int ip, ic, io;
+ int iAdj;
+ int nScore;
+ int jj;
+
+ rc = pApi->xInst(pFts, ii, &ip, &ic, &io);
+ if( ic!=i ) continue;
+ if( io>nDocsize ) rc = FTS5_CORRUPT;
+ if( rc!=SQLITE_OK ) continue;
+ memset(aSeen, 0, nPhrase);
+ rc = fts5SnippetScore(pApi, pFts, nDocsize, aSeen, i,
+ io, nToken, &nScore, &iAdj
+ );
+ if( rc==SQLITE_OK && nScore>nBestScore ){
+ nBestScore = nScore;
+ iBestCol = i;
+ iBestStart = iAdj;
+ nColSize = nDocsize;
+ }
+
+ if( rc==SQLITE_OK && sFinder.nFirst && nDocsize>nToken ){
+ for(jj=0; jj<(sFinder.nFirst-1); jj++){
+ if( sFinder.aFirst[jj+1]>io ) break;
+ }
+
+ if( sFinder.aFirst[jj]<io ){
+ memset(aSeen, 0, nPhrase);
+ rc = fts5SnippetScore(pApi, pFts, nDocsize, aSeen, i,
+ sFinder.aFirst[jj], nToken, &nScore, 0
+ );
+
+ nScore += (sFinder.aFirst[jj]==0 ? 120 : 100);
+ if( rc==SQLITE_OK && nScore>nBestScore ){
+ nBestScore = nScore;
+ iBestCol = i;
+ iBestStart = sFinder.aFirst[jj];
+ nColSize = nDocsize;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = pApi->xColumnText(pFts, iBestCol, &ctx.zIn, &ctx.nIn);
+ }
+ if( rc==SQLITE_OK && nColSize==0 ){
+ rc = pApi->xColumnSize(pFts, iBestCol, &nColSize);
+ }
+ if( ctx.zIn ){
+ if( rc==SQLITE_OK ){
+ rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter);
+ }
+
+ ctx.iRangeStart = iBestStart;
+ ctx.iRangeEnd = iBestStart + nToken - 1;
+
+ if( iBestStart>0 ){
+ fts5HighlightAppend(&rc, &ctx, zEllips, -1);
+ }
+
+ /* Advance iterator ctx.iter so that it points to the first coalesced
+ ** phrase instance at or following position iBestStart. */
+ while( ctx.iter.iStart>=0 && ctx.iter.iStart<iBestStart && rc==SQLITE_OK ){
+ rc = fts5CInstIterNext(&ctx.iter);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
+ }
+ if( ctx.iRangeEnd>=(nColSize-1) ){
+ fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
+ }else{
+ fts5HighlightAppend(&rc, &ctx, zEllips, -1);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_result_error_code(pCtx, rc);
+ }
+ sqlite3_free(ctx.zOut);
+ sqlite3_free(aSeen);
+ sqlite3_free(sFinder.aFirst);
+}
+
+/************************************************************************/
+
+/*
+** The first time the bm25() function is called for a query, an instance
+** of the following structure is allocated and populated.
+*/
+typedef struct Fts5Bm25Data Fts5Bm25Data;
+struct Fts5Bm25Data {
+ int nPhrase; /* Number of phrases in query */
+ double avgdl; /* Average number of tokens in each row */
+ double *aIDF; /* IDF for each phrase */
+ double *aFreq; /* Array used to calculate phrase freq. */
+};
+
+/*
+** Callback used by fts5Bm25GetData() to count the number of rows in the
+** table matched by each individual phrase within the query.
+*/
+static int fts5CountCb(
+ const Fts5ExtensionApi *pApi,
+ Fts5Context *pFts,
+ void *pUserData /* Pointer to sqlite3_int64 variable */
+){
+ sqlite3_int64 *pn = (sqlite3_int64*)pUserData;
+ UNUSED_PARAM2(pApi, pFts);
+ (*pn)++;
+ return SQLITE_OK;
+}
+
+/*
+** Set *ppData to point to the Fts5Bm25Data object for the current query.
+** If the object has not already been allocated, allocate and populate it
+** now.
+*/
+static int fts5Bm25GetData(
+ const Fts5ExtensionApi *pApi,
+ Fts5Context *pFts,
+ Fts5Bm25Data **ppData /* OUT: bm25-data object for this query */
+){
+ int rc = SQLITE_OK; /* Return code */
+ Fts5Bm25Data *p; /* Object to return */
+
+ p = pApi->xGetAuxdata(pFts, 0);
+ if( p==0 ){
+ int nPhrase; /* Number of phrases in query */
+ sqlite3_int64 nRow = 0; /* Number of rows in table */
+ sqlite3_int64 nToken = 0; /* Number of tokens in table */
+ sqlite3_int64 nByte; /* Bytes of space to allocate */
+ int i;
+
+ /* Allocate the Fts5Bm25Data object */
+ nPhrase = pApi->xPhraseCount(pFts);
+ nByte = sizeof(Fts5Bm25Data) + nPhrase*2*sizeof(double);
+ p = (Fts5Bm25Data*)sqlite3_malloc64(nByte);
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(p, 0, (size_t)nByte);
+ p->nPhrase = nPhrase;
+ p->aIDF = (double*)&p[1];
+ p->aFreq = &p->aIDF[nPhrase];
+ }
+
+ /* Calculate the average document length for this FTS5 table */
+ if( rc==SQLITE_OK ) rc = pApi->xRowCount(pFts, &nRow);
+ assert( rc!=SQLITE_OK || nRow>0 );
+ if( rc==SQLITE_OK ) rc = pApi->xColumnTotalSize(pFts, -1, &nToken);
+ if( rc==SQLITE_OK ) p->avgdl = (double)nToken / (double)nRow;
+
+ /* Calculate an IDF for each phrase in the query */
+ for(i=0; rc==SQLITE_OK && i<nPhrase; i++){
+ sqlite3_int64 nHit = 0;
+ rc = pApi->xQueryPhrase(pFts, i, (void*)&nHit, fts5CountCb);
+ if( rc==SQLITE_OK ){
+ /* Calculate the IDF (Inverse Document Frequency) for phrase i.
+ ** This is done using the standard BM25 formula as found on wikipedia:
+ **
+ ** IDF = log( (N - nHit + 0.5) / (nHit + 0.5) )
+ **
+ ** where "N" is the total number of documents in the set and nHit
+ ** is the number that contain at least one instance of the phrase
+ ** under consideration.
+ **
+ ** The problem with this is that if (N < 2*nHit), the IDF is
+ ** negative. Which is undesirable. So the mimimum allowable IDF is
+ ** (1e-6) - roughly the same as a term that appears in just over
+ ** half of set of 5,000,000 documents. */
+ double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) );
+ if( idf<=0.0 ) idf = 1e-6;
+ p->aIDF[i] = idf;
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(p);
+ }else{
+ rc = pApi->xSetAuxdata(pFts, p, sqlite3_free);
+ }
+ if( rc!=SQLITE_OK ) p = 0;
+ }
+ *ppData = p;
+ return rc;
+}
+
+/*
+** Implementation of bm25() function.
+*/
+static void fts5Bm25Function(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+){
+ const double k1 = 1.2; /* Constant "k1" from BM25 formula */
+ const double b = 0.75; /* Constant "b" from BM25 formula */
+ int rc = SQLITE_OK; /* Error code */
+ double score = 0.0; /* SQL function return value */
+ Fts5Bm25Data *pData; /* Values allocated/calculated once only */
+ int i; /* Iterator variable */
+ int nInst = 0; /* Value returned by xInstCount() */
+ double D = 0.0; /* Total number of tokens in row */
+ double *aFreq = 0; /* Array of phrase freq. for current row */
+
+ /* Calculate the phrase frequency (symbol "f(qi,D)" in the documentation)
+ ** for each phrase in the query for the current row. */
+ rc = fts5Bm25GetData(pApi, pFts, &pData);
+ if( rc==SQLITE_OK ){
+ aFreq = pData->aFreq;
+ memset(aFreq, 0, sizeof(double) * pData->nPhrase);
+ rc = pApi->xInstCount(pFts, &nInst);
+ }
+ for(i=0; rc==SQLITE_OK && i<nInst; i++){
+ int ip; int ic; int io;
+ rc = pApi->xInst(pFts, i, &ip, &ic, &io);
+ if( rc==SQLITE_OK ){
+ double w = (nVal > ic) ? sqlite3_value_double(apVal[ic]) : 1.0;
+ aFreq[ip] += w;
+ }
+ }
+
+ /* Figure out the total size of the current row in tokens. */
+ if( rc==SQLITE_OK ){
+ int nTok;
+ rc = pApi->xColumnSize(pFts, -1, &nTok);
+ D = (double)nTok;
+ }
+
+ /* Determine the BM25 score for the current row. */
+ for(i=0; rc==SQLITE_OK && i<pData->nPhrase; i++){
+ score += pData->aIDF[i] * (
+ ( aFreq[i] * (k1 + 1.0) ) /
+ ( aFreq[i] + k1 * (1 - b + b * D / pData->avgdl) )
+ );
+ }
+
+ /* If no error has occurred, return the calculated score. Otherwise,
+ ** throw an SQL exception. */
+ if( rc==SQLITE_OK ){
+ sqlite3_result_double(pCtx, -1.0 * score);
+ }else{
+ sqlite3_result_error_code(pCtx, rc);
+ }
+}
+
+static int sqlite3Fts5AuxInit(fts5_api *pApi){
+ struct Builtin {
+ const char *zFunc; /* Function name (nul-terminated) */
+ void *pUserData; /* User-data pointer */
+ fts5_extension_function xFunc;/* Callback function */
+ void (*xDestroy)(void*); /* Destructor function */
+ } aBuiltin [] = {
+ { "snippet", 0, fts5SnippetFunction, 0 },
+ { "highlight", 0, fts5HighlightFunction, 0 },
+ { "bm25", 0, fts5Bm25Function, 0 },
+ };
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* To iterate through builtin functions */
+
+ for(i=0; rc==SQLITE_OK && i<ArraySize(aBuiltin); i++){
+ rc = pApi->xCreateFunction(pApi,
+ aBuiltin[i].zFunc,
+ aBuiltin[i].pUserData,
+ aBuiltin[i].xFunc,
+ aBuiltin[i].xDestroy
+ );
+ }
+
+ return rc;
+}
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+
+
+/* #include "fts5Int.h" */
+
+static int sqlite3Fts5BufferSize(int *pRc, Fts5Buffer *pBuf, u32 nByte){
+ if( (u32)pBuf->nSpace<nByte ){
+ u64 nNew = pBuf->nSpace ? pBuf->nSpace : 64;
+ u8 *pNew;
+ while( nNew<nByte ){
+ nNew = nNew * 2;
+ }
+ pNew = sqlite3_realloc64(pBuf->p, nNew);
+ if( pNew==0 ){
+ *pRc = SQLITE_NOMEM;
+ return 1;
+ }else{
+ pBuf->nSpace = (int)nNew;
+ pBuf->p = pNew;
+ }
+ }
+ return 0;
+}
+
+
+/*
+** Encode value iVal as an SQLite varint and append it to the buffer object
+** pBuf. If an OOM error occurs, set the error code in p.
+*/
+static void sqlite3Fts5BufferAppendVarint(int *pRc, Fts5Buffer *pBuf, i64 iVal){
+ if( fts5BufferGrow(pRc, pBuf, 9) ) return;
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iVal);
+}
+
+static void sqlite3Fts5Put32(u8 *aBuf, int iVal){
+ aBuf[0] = (iVal>>24) & 0x00FF;
+ aBuf[1] = (iVal>>16) & 0x00FF;
+ aBuf[2] = (iVal>> 8) & 0x00FF;
+ aBuf[3] = (iVal>> 0) & 0x00FF;
+}
+
+static int sqlite3Fts5Get32(const u8 *aBuf){
+ return (int)((((u32)aBuf[0])<<24) + (aBuf[1]<<16) + (aBuf[2]<<8) + aBuf[3]);
+}
+
+/*
+** Append buffer nData/pData to buffer pBuf. If an OOM error occurs, set
+** the error code in p. If an error has already occurred when this function
+** is called, it is a no-op.
+*/
+static void sqlite3Fts5BufferAppendBlob(
+ int *pRc,
+ Fts5Buffer *pBuf,
+ u32 nData,
+ const u8 *pData
+){
+ assert_nc( *pRc || nData>=0 );
+ if( nData ){
+ if( fts5BufferGrow(pRc, pBuf, nData) ) return;
+ memcpy(&pBuf->p[pBuf->n], pData, nData);
+ pBuf->n += nData;
+ }
+}
+
+/*
+** Append the nul-terminated string zStr to the buffer pBuf. This function
+** ensures that the byte following the buffer data is set to 0x00, even
+** though this byte is not included in the pBuf->n count.
+*/
+static void sqlite3Fts5BufferAppendString(
+ int *pRc,
+ Fts5Buffer *pBuf,
+ const char *zStr
+){
+ int nStr = (int)strlen(zStr);
+ sqlite3Fts5BufferAppendBlob(pRc, pBuf, nStr+1, (const u8*)zStr);
+ pBuf->n--;
+}
+
+/*
+** Argument zFmt is a printf() style format string. This function performs
+** the printf() style processing, then appends the results to buffer pBuf.
+**
+** Like sqlite3Fts5BufferAppendString(), this function ensures that the byte
+** following the buffer data is set to 0x00, even though this byte is not
+** included in the pBuf->n count.
+*/
+static void sqlite3Fts5BufferAppendPrintf(
+ int *pRc,
+ Fts5Buffer *pBuf,
+ char *zFmt, ...
+){
+ if( *pRc==SQLITE_OK ){
+ char *zTmp;
+ va_list ap;
+ va_start(ap, zFmt);
+ zTmp = sqlite3_vmprintf(zFmt, ap);
+ va_end(ap);
+
+ if( zTmp==0 ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ sqlite3Fts5BufferAppendString(pRc, pBuf, zTmp);
+ sqlite3_free(zTmp);
+ }
+ }
+}
+
+static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...){
+ char *zRet = 0;
+ if( *pRc==SQLITE_OK ){
+ va_list ap;
+ va_start(ap, zFmt);
+ zRet = sqlite3_vmprintf(zFmt, ap);
+ va_end(ap);
+ if( zRet==0 ){
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+ return zRet;
+}
+
+
+/*
+** Free any buffer allocated by pBuf. Zero the structure before returning.
+*/
+static void sqlite3Fts5BufferFree(Fts5Buffer *pBuf){
+ sqlite3_free(pBuf->p);
+ memset(pBuf, 0, sizeof(Fts5Buffer));
+}
+
+/*
+** Zero the contents of the buffer object. But do not free the associated
+** memory allocation.
+*/
+static void sqlite3Fts5BufferZero(Fts5Buffer *pBuf){
+ pBuf->n = 0;
+}
+
+/*
+** Set the buffer to contain nData/pData. If an OOM error occurs, leave an
+** the error code in p. If an error has already occurred when this function
+** is called, it is a no-op.
+*/
+static void sqlite3Fts5BufferSet(
+ int *pRc,
+ Fts5Buffer *pBuf,
+ int nData,
+ const u8 *pData
+){
+ pBuf->n = 0;
+ sqlite3Fts5BufferAppendBlob(pRc, pBuf, nData, pData);
+}
+
+static int sqlite3Fts5PoslistNext64(
+ const u8 *a, int n, /* Buffer containing poslist */
+ int *pi, /* IN/OUT: Offset within a[] */
+ i64 *piOff /* IN/OUT: Current offset */
+){
+ int i = *pi;
+ if( i>=n ){
+ /* EOF */
+ *piOff = -1;
+ return 1;
+ }else{
+ i64 iOff = *piOff;
+ int iVal;
+ fts5FastGetVarint32(a, i, iVal);
+ if( iVal<=1 ){
+ if( iVal==0 ){
+ *pi = i;
+ return 0;
+ }
+ fts5FastGetVarint32(a, i, iVal);
+ iOff = ((i64)iVal) << 32;
+ fts5FastGetVarint32(a, i, iVal);
+ if( iVal<2 ){
+ /* This is a corrupt record. So stop parsing it here. */
+ *piOff = -1;
+ return 1;
+ }
+ }
+ *piOff = iOff + ((iVal-2) & 0x7FFFFFFF);
+ *pi = i;
+ return 0;
+ }
+}
+
+
+/*
+** Advance the iterator object passed as the only argument. Return true
+** if the iterator reaches EOF, or false otherwise.
+*/
+static int sqlite3Fts5PoslistReaderNext(Fts5PoslistReader *pIter){
+ if( sqlite3Fts5PoslistNext64(pIter->a, pIter->n, &pIter->i, &pIter->iPos) ){
+ pIter->bEof = 1;
+ }
+ return pIter->bEof;
+}
+
+static int sqlite3Fts5PoslistReaderInit(
+ const u8 *a, int n, /* Poslist buffer to iterate through */
+ Fts5PoslistReader *pIter /* Iterator object to initialize */
+){
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->a = a;
+ pIter->n = n;
+ sqlite3Fts5PoslistReaderNext(pIter);
+ return pIter->bEof;
+}
+
+/*
+** Append position iPos to the position list being accumulated in buffer
+** pBuf, which must be already be large enough to hold the new data.
+** The previous position written to this list is *piPrev. *piPrev is set
+** to iPos before returning.
+*/
+static void sqlite3Fts5PoslistSafeAppend(
+ Fts5Buffer *pBuf,
+ i64 *piPrev,
+ i64 iPos
+){
+ static const i64 colmask = ((i64)(0x7FFFFFFF)) << 32;
+ if( (iPos & colmask) != (*piPrev & colmask) ){
+ pBuf->p[pBuf->n++] = 1;
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos>>32));
+ *piPrev = (iPos & colmask);
+ }
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos-*piPrev)+2);
+ *piPrev = iPos;
+}
+
+static int sqlite3Fts5PoslistWriterAppend(
+ Fts5Buffer *pBuf,
+ Fts5PoslistWriter *pWriter,
+ i64 iPos
+){
+ int rc = 0; /* Initialized only to suppress erroneous warning from Clang */
+ if( fts5BufferGrow(&rc, pBuf, 5+5+5) ) return rc;
+ sqlite3Fts5PoslistSafeAppend(pBuf, &pWriter->iPrev, iPos);
+ return SQLITE_OK;
+}
+
+static void *sqlite3Fts5MallocZero(int *pRc, sqlite3_int64 nByte){
+ void *pRet = 0;
+ if( *pRc==SQLITE_OK ){
+ pRet = sqlite3_malloc64(nByte);
+ if( pRet==0 ){
+ if( nByte>0 ) *pRc = SQLITE_NOMEM;
+ }else{
+ memset(pRet, 0, (size_t)nByte);
+ }
+ }
+ return pRet;
+}
+
+/*
+** Return a nul-terminated copy of the string indicated by pIn. If nIn
+** is non-negative, then it is the length of the string in bytes. Otherwise,
+** the length of the string is determined using strlen().
+**
+** It is the responsibility of the caller to eventually free the returned
+** buffer using sqlite3_free(). If an OOM error occurs, NULL is returned.
+*/
+static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){
+ char *zRet = 0;
+ if( *pRc==SQLITE_OK ){
+ if( nIn<0 ){
+ nIn = (int)strlen(pIn);
+ }
+ zRet = (char*)sqlite3_malloc(nIn+1);
+ if( zRet ){
+ memcpy(zRet, pIn, nIn);
+ zRet[nIn] = '\0';
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+ return zRet;
+}
+
+
+/*
+** Return true if character 't' may be part of an FTS5 bareword, or false
+** otherwise. Characters that may be part of barewords:
+**
+** * All non-ASCII characters,
+** * The 52 upper and lower case ASCII characters, and
+** * The 10 integer ASCII characters.
+** * The underscore character "_" (0x5F).
+** * The unicode "subsitute" character (0x1A).
+*/
+static int sqlite3Fts5IsBareword(char t){
+ u8 aBareword[128] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00 .. 0x0F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, /* 0x10 .. 0x1F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20 .. 0x2F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 0x30 .. 0x3F */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x40 .. 0x4F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 0x50 .. 0x5F */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x60 .. 0x6F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 /* 0x70 .. 0x7F */
+ };
+
+ return (t & 0x80) || aBareword[(int)t];
+}
+
+
+/*************************************************************************
+*/
+typedef struct Fts5TermsetEntry Fts5TermsetEntry;
+struct Fts5TermsetEntry {
+ char *pTerm;
+ int nTerm;
+ int iIdx; /* Index (main or aPrefix[] entry) */
+ Fts5TermsetEntry *pNext;
+};
+
+struct Fts5Termset {
+ Fts5TermsetEntry *apHash[512];
+};
+
+static int sqlite3Fts5TermsetNew(Fts5Termset **pp){
+ int rc = SQLITE_OK;
+ *pp = sqlite3Fts5MallocZero(&rc, sizeof(Fts5Termset));
+ return rc;
+}
+
+static int sqlite3Fts5TermsetAdd(
+ Fts5Termset *p,
+ int iIdx,
+ const char *pTerm, int nTerm,
+ int *pbPresent
+){
+ int rc = SQLITE_OK;
+ *pbPresent = 0;
+ if( p ){
+ int i;
+ u32 hash = 13;
+ Fts5TermsetEntry *pEntry;
+
+ /* Calculate a hash value for this term. This is the same hash checksum
+ ** used by the fts5_hash.c module. This is not important for correct
+ ** operation of the module, but is necessary to ensure that some tests
+ ** designed to produce hash table collisions really do work. */
+ for(i=nTerm-1; i>=0; i--){
+ hash = (hash << 3) ^ hash ^ pTerm[i];
+ }
+ hash = (hash << 3) ^ hash ^ iIdx;
+ hash = hash % ArraySize(p->apHash);
+
+ for(pEntry=p->apHash[hash]; pEntry; pEntry=pEntry->pNext){
+ if( pEntry->iIdx==iIdx
+ && pEntry->nTerm==nTerm
+ && memcmp(pEntry->pTerm, pTerm, nTerm)==0
+ ){
+ *pbPresent = 1;
+ break;
+ }
+ }
+
+ if( pEntry==0 ){
+ pEntry = sqlite3Fts5MallocZero(&rc, sizeof(Fts5TermsetEntry) + nTerm);
+ if( pEntry ){
+ pEntry->pTerm = (char*)&pEntry[1];
+ pEntry->nTerm = nTerm;
+ pEntry->iIdx = iIdx;
+ memcpy(pEntry->pTerm, pTerm, nTerm);
+ pEntry->pNext = p->apHash[hash];
+ p->apHash[hash] = pEntry;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static void sqlite3Fts5TermsetFree(Fts5Termset *p){
+ if( p ){
+ u32 i;
+ for(i=0; i<ArraySize(p->apHash); i++){
+ Fts5TermsetEntry *pEntry = p->apHash[i];
+ while( pEntry ){
+ Fts5TermsetEntry *pDel = pEntry;
+ pEntry = pEntry->pNext;
+ sqlite3_free(pDel);
+ }
+ }
+ sqlite3_free(p);
+ }
+}
+
+/*
+** 2014 Jun 09
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This is an SQLite module implementing full-text search.
+*/
+
+
+/* #include "fts5Int.h" */
+
+#define FTS5_DEFAULT_PAGE_SIZE 4050
+#define FTS5_DEFAULT_AUTOMERGE 4
+#define FTS5_DEFAULT_USERMERGE 4
+#define FTS5_DEFAULT_CRISISMERGE 16
+#define FTS5_DEFAULT_HASHSIZE (1024*1024)
+
+/* Maximum allowed page size */
+#define FTS5_MAX_PAGE_SIZE (64*1024)
+
+static int fts5_iswhitespace(char x){
+ return (x==' ');
+}
+
+static int fts5_isopenquote(char x){
+ return (x=='"' || x=='\'' || x=='[' || x=='`');
+}
+
+/*
+** Argument pIn points to a character that is part of a nul-terminated
+** string. Return a pointer to the first character following *pIn in
+** the string that is not a white-space character.
+*/
+static const char *fts5ConfigSkipWhitespace(const char *pIn){
+ const char *p = pIn;
+ if( p ){
+ while( fts5_iswhitespace(*p) ){ p++; }
+ }
+ return p;
+}
+
+/*
+** Argument pIn points to a character that is part of a nul-terminated
+** string. Return a pointer to the first character following *pIn in
+** the string that is not a "bareword" character.
+*/
+static const char *fts5ConfigSkipBareword(const char *pIn){
+ const char *p = pIn;
+ while ( sqlite3Fts5IsBareword(*p) ) p++;
+ if( p==pIn ) p = 0;
+ return p;
+}
+
+static int fts5_isdigit(char a){
+ return (a>='0' && a<='9');
+}
+
+
+
+static const char *fts5ConfigSkipLiteral(const char *pIn){
+ const char *p = pIn;
+ switch( *p ){
+ case 'n': case 'N':
+ if( sqlite3_strnicmp("null", p, 4)==0 ){
+ p = &p[4];
+ }else{
+ p = 0;
+ }
+ break;
+
+ case 'x': case 'X':
+ p++;
+ if( *p=='\'' ){
+ p++;
+ while( (*p>='a' && *p<='f')
+ || (*p>='A' && *p<='F')
+ || (*p>='0' && *p<='9')
+ ){
+ p++;
+ }
+ if( *p=='\'' && 0==((p-pIn)%2) ){
+ p++;
+ }else{
+ p = 0;
+ }
+ }else{
+ p = 0;
+ }
+ break;
+
+ case '\'':
+ p++;
+ while( p ){
+ if( *p=='\'' ){
+ p++;
+ if( *p!='\'' ) break;
+ }
+ p++;
+ if( *p==0 ) p = 0;
+ }
+ break;
+
+ default:
+ /* maybe a number */
+ if( *p=='+' || *p=='-' ) p++;
+ while( fts5_isdigit(*p) ) p++;
+
+ /* At this point, if the literal was an integer, the parse is
+ ** finished. Or, if it is a floating point value, it may continue
+ ** with either a decimal point or an 'E' character. */
+ if( *p=='.' && fts5_isdigit(p[1]) ){
+ p += 2;
+ while( fts5_isdigit(*p) ) p++;
+ }
+ if( p==pIn ) p = 0;
+
+ break;
+ }
+
+ return p;
+}
+
+/*
+** The first character of the string pointed to by argument z is guaranteed
+** to be an open-quote character (see function fts5_isopenquote()).
+**
+** This function searches for the corresponding close-quote character within
+** the string and, if found, dequotes the string in place and adds a new
+** nul-terminator byte.
+**
+** If the close-quote is found, the value returned is the byte offset of
+** the character immediately following it. Or, if the close-quote is not
+** found, -1 is returned. If -1 is returned, the buffer is left in an
+** undefined state.
+*/
+static int fts5Dequote(char *z){
+ char q;
+ int iIn = 1;
+ int iOut = 0;
+ q = z[0];
+
+ /* Set stack variable q to the close-quote character */
+ assert( q=='[' || q=='\'' || q=='"' || q=='`' );
+ if( q=='[' ) q = ']';
+
+ while( z[iIn] ){
+ if( z[iIn]==q ){
+ if( z[iIn+1]!=q ){
+ /* Character iIn was the close quote. */
+ iIn++;
+ break;
+ }else{
+ /* Character iIn and iIn+1 form an escaped quote character. Skip
+ ** the input cursor past both and copy a single quote character
+ ** to the output buffer. */
+ iIn += 2;
+ z[iOut++] = q;
+ }
+ }else{
+ z[iOut++] = z[iIn++];
+ }
+ }
+
+ z[iOut] = '\0';
+ return iIn;
+}
+
+/*
+** Convert an SQL-style quoted string into a normal string by removing
+** the quote characters. The conversion is done in-place. If the
+** input does not begin with a quote character, then this routine
+** is a no-op.
+**
+** Examples:
+**
+** "abc" becomes abc
+** 'xyz' becomes xyz
+** [pqr] becomes pqr
+** `mno` becomes mno
+*/
+static void sqlite3Fts5Dequote(char *z){
+ char quote; /* Quote character (if any ) */
+
+ assert( 0==fts5_iswhitespace(z[0]) );
+ quote = z[0];
+ if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){
+ fts5Dequote(z);
+ }
+}
+
+
+struct Fts5Enum {
+ const char *zName;
+ int eVal;
+};
+typedef struct Fts5Enum Fts5Enum;
+
+static int fts5ConfigSetEnum(
+ const Fts5Enum *aEnum,
+ const char *zEnum,
+ int *peVal
+){
+ int nEnum = (int)strlen(zEnum);
+ int i;
+ int iVal = -1;
+
+ for(i=0; aEnum[i].zName; i++){
+ if( sqlite3_strnicmp(aEnum[i].zName, zEnum, nEnum)==0 ){
+ if( iVal>=0 ) return SQLITE_ERROR;
+ iVal = aEnum[i].eVal;
+ }
+ }
+
+ *peVal = iVal;
+ return iVal<0 ? SQLITE_ERROR : SQLITE_OK;
+}
+
+/*
+** Parse a "special" CREATE VIRTUAL TABLE directive and update
+** configuration object pConfig as appropriate.
+**
+** If successful, object pConfig is updated and SQLITE_OK returned. If
+** an error occurs, an SQLite error code is returned and an error message
+** may be left in *pzErr. It is the responsibility of the caller to
+** eventually free any such error message using sqlite3_free().
+*/
+static int fts5ConfigParseSpecial(
+ Fts5Global *pGlobal,
+ Fts5Config *pConfig, /* Configuration object to update */
+ const char *zCmd, /* Special command to parse */
+ const char *zArg, /* Argument to parse */
+ char **pzErr /* OUT: Error message */
+){
+ int rc = SQLITE_OK;
+ int nCmd = (int)strlen(zCmd);
+ if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){
+ const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES;
+ const char *p;
+ int bFirst = 1;
+ if( pConfig->aPrefix==0 ){
+ pConfig->aPrefix = sqlite3Fts5MallocZero(&rc, nByte);
+ if( rc ) return rc;
+ }
+
+ p = zArg;
+ while( 1 ){
+ int nPre = 0;
+
+ while( p[0]==' ' ) p++;
+ if( bFirst==0 && p[0]==',' ){
+ p++;
+ while( p[0]==' ' ) p++;
+ }else if( p[0]=='\0' ){
+ break;
+ }
+ if( p[0]<'0' || p[0]>'9' ){
+ *pzErr = sqlite3_mprintf("malformed prefix=... directive");
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ if( pConfig->nPrefix==FTS5_MAX_PREFIX_INDEXES ){
+ *pzErr = sqlite3_mprintf(
+ "too many prefix indexes (max %d)", FTS5_MAX_PREFIX_INDEXES
+ );
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ while( p[0]>='0' && p[0]<='9' && nPre<1000 ){
+ nPre = nPre*10 + (p[0] - '0');
+ p++;
+ }
+
+ if( nPre<=0 || nPre>=1000 ){
+ *pzErr = sqlite3_mprintf("prefix length out of range (max 999)");
+ rc = SQLITE_ERROR;
+ break;
+ }
+
+ pConfig->aPrefix[pConfig->nPrefix] = nPre;
+ pConfig->nPrefix++;
+ bFirst = 0;
+ }
+ assert( pConfig->nPrefix<=FTS5_MAX_PREFIX_INDEXES );
+ return rc;
+ }
+
+ if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){
+ const char *p = (const char*)zArg;
+ sqlite3_int64 nArg = strlen(zArg) + 1;
+ char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg);
+ char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2);
+ char *pSpace = pDel;
+
+ if( azArg && pSpace ){
+ if( pConfig->pTok ){
+ *pzErr = sqlite3_mprintf("multiple tokenize=... directives");
+ rc = SQLITE_ERROR;
+ }else{
+ for(nArg=0; p && *p; nArg++){
+ const char *p2 = fts5ConfigSkipWhitespace(p);
+ if( *p2=='\'' ){
+ p = fts5ConfigSkipLiteral(p2);
+ }else{
+ p = fts5ConfigSkipBareword(p2);
+ }
+ if( p ){
+ memcpy(pSpace, p2, p-p2);
+ azArg[nArg] = pSpace;
+ sqlite3Fts5Dequote(pSpace);
+ pSpace += (p - p2) + 1;
+ p = fts5ConfigSkipWhitespace(p);
+ }
+ }
+ if( p==0 ){
+ *pzErr = sqlite3_mprintf("parse error in tokenize directive");
+ rc = SQLITE_ERROR;
+ }else{
+ rc = sqlite3Fts5GetTokenizer(pGlobal,
+ (const char**)azArg, (int)nArg, &pConfig->pTok, &pConfig->pTokApi,
+ pzErr
+ );
+ }
+ }
+ }
+
+ sqlite3_free(azArg);
+ sqlite3_free(pDel);
+ return rc;
+ }
+
+ if( sqlite3_strnicmp("content", zCmd, nCmd)==0 ){
+ if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){
+ *pzErr = sqlite3_mprintf("multiple content=... directives");
+ rc = SQLITE_ERROR;
+ }else{
+ if( zArg[0] ){
+ pConfig->eContent = FTS5_CONTENT_EXTERNAL;
+ pConfig->zContent = sqlite3Fts5Mprintf(&rc, "%Q.%Q", pConfig->zDb,zArg);
+ }else{
+ pConfig->eContent = FTS5_CONTENT_NONE;
+ }
+ }
+ return rc;
+ }
+
+ if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){
+ if( pConfig->zContentRowid ){
+ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->zContentRowid = sqlite3Fts5Strndup(&rc, zArg, -1);
+ }
+ return rc;
+ }
+
+ if( sqlite3_strnicmp("columnsize", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed columnsize=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bColumnsize = (zArg[0]=='1');
+ }
+ return rc;
+ }
+
+ if( sqlite3_strnicmp("detail", zCmd, nCmd)==0 ){
+ const Fts5Enum aDetail[] = {
+ { "none", FTS5_DETAIL_NONE },
+ { "full", FTS5_DETAIL_FULL },
+ { "columns", FTS5_DETAIL_COLUMNS },
+ { 0, 0 }
+ };
+
+ if( (rc = fts5ConfigSetEnum(aDetail, zArg, &pConfig->eDetail)) ){
+ *pzErr = sqlite3_mprintf("malformed detail=... directive");
+ }
+ return rc;
+ }
+
+ *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd);
+ return SQLITE_ERROR;
+}
+
+/*
+** Allocate an instance of the default tokenizer ("simple") at
+** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error
+** code if an error occurs.
+*/
+static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){
+ assert( pConfig->pTok==0 && pConfig->pTokApi==0 );
+ return sqlite3Fts5GetTokenizer(
+ pGlobal, 0, 0, &pConfig->pTok, &pConfig->pTokApi, 0
+ );
+}
+
+/*
+** Gobble up the first bareword or quoted word from the input buffer zIn.
+** Return a pointer to the character immediately following the last in
+** the gobbled word if successful, or a NULL pointer otherwise (failed
+** to find close-quote character).
+**
+** Before returning, set pzOut to point to a new buffer containing a
+** nul-terminated, dequoted copy of the gobbled word. If the word was
+** quoted, *pbQuoted is also set to 1 before returning.
+**
+** If *pRc is other than SQLITE_OK when this function is called, it is
+** a no-op (NULL is returned). Otherwise, if an OOM occurs within this
+** function, *pRc is set to SQLITE_NOMEM before returning. *pRc is *not*
+** set if a parse error (failed to find close quote) occurs.
+*/
+static const char *fts5ConfigGobbleWord(
+ int *pRc, /* IN/OUT: Error code */
+ const char *zIn, /* Buffer to gobble string/bareword from */
+ char **pzOut, /* OUT: malloc'd buffer containing str/bw */
+ int *pbQuoted /* OUT: Set to true if dequoting required */
+){
+ const char *zRet = 0;
+
+ sqlite3_int64 nIn = strlen(zIn);
+ char *zOut = sqlite3_malloc64(nIn+1);
+
+ assert( *pRc==SQLITE_OK );
+ *pbQuoted = 0;
+ *pzOut = 0;
+
+ if( zOut==0 ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ memcpy(zOut, zIn, (size_t)(nIn+1));
+ if( fts5_isopenquote(zOut[0]) ){
+ int ii = fts5Dequote(zOut);
+ zRet = &zIn[ii];
+ *pbQuoted = 1;
+ }else{
+ zRet = fts5ConfigSkipBareword(zIn);
+ if( zRet ){
+ zOut[zRet-zIn] = '\0';
+ }
+ }
+ }
+
+ if( zRet==0 ){
+ sqlite3_free(zOut);
+ }else{
+ *pzOut = zOut;
+ }
+
+ return zRet;
+}
+
+static int fts5ConfigParseColumn(
+ Fts5Config *p,
+ char *zCol,
+ char *zArg,
+ char **pzErr
+){
+ int rc = SQLITE_OK;
+ if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME)
+ || 0==sqlite3_stricmp(zCol, FTS5_ROWID_NAME)
+ ){
+ *pzErr = sqlite3_mprintf("reserved fts5 column name: %s", zCol);
+ rc = SQLITE_ERROR;
+ }else if( zArg ){
+ if( 0==sqlite3_stricmp(zArg, "unindexed") ){
+ p->abUnindexed[p->nCol] = 1;
+ }else{
+ *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg);
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ p->azCol[p->nCol++] = zCol;
+ return rc;
+}
+
+/*
+** Populate the Fts5Config.zContentExprlist string.
+*/
+static int fts5ConfigMakeExprlist(Fts5Config *p){
+ int i;
+ int rc = SQLITE_OK;
+ Fts5Buffer buf = {0, 0, 0};
+
+ sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid);
+ if( p->eContent!=FTS5_CONTENT_NONE ){
+ for(i=0; i<p->nCol; i++){
+ if( p->eContent==FTS5_CONTENT_EXTERNAL ){
+ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]);
+ }else{
+ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i);
+ }
+ }
+ }
+
+ assert( p->zContentExprlist==0 );
+ p->zContentExprlist = (char*)buf.p;
+ return rc;
+}
+
+/*
+** Arguments nArg/azArg contain the string arguments passed to the xCreate
+** or xConnect method of the virtual table. This function attempts to
+** allocate an instance of Fts5Config containing the results of parsing
+** those arguments.
+**
+** If successful, SQLITE_OK is returned and *ppOut is set to point to the
+** new Fts5Config object. If an error occurs, an SQLite error code is
+** returned, *ppOut is set to NULL and an error message may be left in
+** *pzErr. It is the responsibility of the caller to eventually free any
+** such error message using sqlite3_free().
+*/
+static int sqlite3Fts5ConfigParse(
+ Fts5Global *pGlobal,
+ sqlite3 *db,
+ int nArg, /* Number of arguments */
+ const char **azArg, /* Array of nArg CREATE VIRTUAL TABLE args */
+ Fts5Config **ppOut, /* OUT: Results of parse */
+ char **pzErr /* OUT: Error message */
+){
+ int rc = SQLITE_OK; /* Return code */
+ Fts5Config *pRet; /* New object to return */
+ int i;
+ sqlite3_int64 nByte;
+
+ *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config));
+ if( pRet==0 ) return SQLITE_NOMEM;
+ memset(pRet, 0, sizeof(Fts5Config));
+ pRet->db = db;
+ pRet->iCookie = -1;
+
+ nByte = nArg * (sizeof(char*) + sizeof(u8));
+ pRet->azCol = (char**)sqlite3Fts5MallocZero(&rc, nByte);
+ pRet->abUnindexed = (u8*)&pRet->azCol[nArg];
+ pRet->zDb = sqlite3Fts5Strndup(&rc, azArg[1], -1);
+ pRet->zName = sqlite3Fts5Strndup(&rc, azArg[2], -1);
+ pRet->bColumnsize = 1;
+ pRet->eDetail = FTS5_DETAIL_FULL;
+#ifdef SQLITE_DEBUG
+ pRet->bPrefixIndex = 1;
+#endif
+ if( rc==SQLITE_OK && sqlite3_stricmp(pRet->zName, FTS5_RANK_NAME)==0 ){
+ *pzErr = sqlite3_mprintf("reserved fts5 table name: %s", pRet->zName);
+ rc = SQLITE_ERROR;
+ }
+
+ for(i=3; rc==SQLITE_OK && i<nArg; i++){
+ const char *zOrig = azArg[i];
+ const char *z;
+ char *zOne = 0;
+ char *zTwo = 0;
+ int bOption = 0;
+ int bMustBeCol = 0;
+
+ z = fts5ConfigGobbleWord(&rc, zOrig, &zOne, &bMustBeCol);
+ z = fts5ConfigSkipWhitespace(z);
+ if( z && *z=='=' ){
+ bOption = 1;
+ z++;
+ if( bMustBeCol ) z = 0;
+ }
+ z = fts5ConfigSkipWhitespace(z);
+ if( z && z[0] ){
+ int bDummy;
+ z = fts5ConfigGobbleWord(&rc, z, &zTwo, &bDummy);
+ if( z && z[0] ) z = 0;
+ }
+
+ if( rc==SQLITE_OK ){
+ if( z==0 ){
+ *pzErr = sqlite3_mprintf("parse error in \"%s\"", zOrig);
+ rc = SQLITE_ERROR;
+ }else{
+ if( bOption ){
+ rc = fts5ConfigParseSpecial(pGlobal, pRet, zOne, zTwo?zTwo:"", pzErr);
+ }else{
+ rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr);
+ zOne = 0;
+ }
+ }
+ }
+
+ sqlite3_free(zOne);
+ sqlite3_free(zTwo);
+ }
+
+ /* If a tokenizer= option was successfully parsed, the tokenizer has
+ ** already been allocated. Otherwise, allocate an instance of the default
+ ** tokenizer (unicode61) now. */
+ if( rc==SQLITE_OK && pRet->pTok==0 ){
+ rc = fts5ConfigDefaultTokenizer(pGlobal, pRet);
+ }
+
+ /* If no zContent option was specified, fill in the default values. */
+ if( rc==SQLITE_OK && pRet->zContent==0 ){
+ const char *zTail = 0;
+ assert( pRet->eContent==FTS5_CONTENT_NORMAL
+ || pRet->eContent==FTS5_CONTENT_NONE
+ );
+ if( pRet->eContent==FTS5_CONTENT_NORMAL ){
+ zTail = "content";
+ }else if( pRet->bColumnsize ){
+ zTail = "docsize";
+ }
+
+ if( zTail ){
+ pRet->zContent = sqlite3Fts5Mprintf(
+ &rc, "%Q.'%q_%s'", pRet->zDb, pRet->zName, zTail
+ );
+ }
+ }
+
+ if( rc==SQLITE_OK && pRet->zContentRowid==0 ){
+ pRet->zContentRowid = sqlite3Fts5Strndup(&rc, "rowid", -1);
+ }
+
+ /* Formulate the zContentExprlist text */
+ if( rc==SQLITE_OK ){
+ rc = fts5ConfigMakeExprlist(pRet);
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts5ConfigFree(pRet);
+ *ppOut = 0;
+ }
+ return rc;
+}
+
+/*
+** Free the configuration object passed as the only argument.
+*/
+static void sqlite3Fts5ConfigFree(Fts5Config *pConfig){
+ if( pConfig ){
+ int i;
+ if( pConfig->pTok ){
+ pConfig->pTokApi->xDelete(pConfig->pTok);
+ }
+ sqlite3_free(pConfig->zDb);
+ sqlite3_free(pConfig->zName);
+ for(i=0; i<pConfig->nCol; i++){
+ sqlite3_free(pConfig->azCol[i]);
+ }
+ sqlite3_free(pConfig->azCol);
+ sqlite3_free(pConfig->aPrefix);
+ sqlite3_free(pConfig->zRank);
+ sqlite3_free(pConfig->zRankArgs);
+ sqlite3_free(pConfig->zContent);
+ sqlite3_free(pConfig->zContentRowid);
+ sqlite3_free(pConfig->zContentExprlist);
+ sqlite3_free(pConfig);
+ }
+}
+
+/*
+** Call sqlite3_declare_vtab() based on the contents of the configuration
+** object passed as the only argument. Return SQLITE_OK if successful, or
+** an SQLite error code if an error occurs.
+*/
+static int sqlite3Fts5ConfigDeclareVtab(Fts5Config *pConfig){
+ int i;
+ int rc = SQLITE_OK;
+ char *zSql;
+
+ zSql = sqlite3Fts5Mprintf(&rc, "CREATE TABLE x(");
+ for(i=0; zSql && i<pConfig->nCol; i++){
+ const char *zSep = (i==0?"":", ");
+ zSql = sqlite3Fts5Mprintf(&rc, "%z%s%Q", zSql, zSep, pConfig->azCol[i]);
+ }
+ zSql = sqlite3Fts5Mprintf(&rc, "%z, %Q HIDDEN, %s HIDDEN)",
+ zSql, pConfig->zName, FTS5_RANK_NAME
+ );
+
+ assert( zSql || rc==SQLITE_NOMEM );
+ if( zSql ){
+ rc = sqlite3_declare_vtab(pConfig->db, zSql);
+ sqlite3_free(zSql);
+ }
+
+ return rc;
+}
+
+/*
+** Tokenize the text passed via the second and third arguments.
+**
+** The callback is invoked once for each token in the input text. The
+** arguments passed to it are, in order:
+**
+** void *pCtx // Copy of 4th argument to sqlite3Fts5Tokenize()
+** const char *pToken // Pointer to buffer containing token
+** int nToken // Size of token in bytes
+** int iStart // Byte offset of start of token within input text
+** int iEnd // Byte offset of end of token within input text
+** int iPos // Position of token in input (first token is 0)
+**
+** If the callback returns a non-zero value the tokenization is abandoned
+** and no further callbacks are issued.
+**
+** This function returns SQLITE_OK if successful or an SQLite error code
+** if an error occurs. If the tokenization was abandoned early because
+** the callback returned SQLITE_DONE, this is not an error and this function
+** still returns SQLITE_OK. Or, if the tokenization was abandoned early
+** because the callback returned another non-zero value, it is assumed
+** to be an SQLite error code and returned to the caller.
+*/
+static int sqlite3Fts5Tokenize(
+ Fts5Config *pConfig, /* FTS5 Configuration object */
+ int flags, /* FTS5_TOKENIZE_* flags */
+ const char *pText, int nText, /* Text to tokenize */
+ void *pCtx, /* Context passed to xToken() */
+ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
+){
+ if( pText==0 ) return SQLITE_OK;
+ return pConfig->pTokApi->xTokenize(
+ pConfig->pTok, pCtx, flags, pText, nText, xToken
+ );
+}
+
+/*
+** Argument pIn points to the first character in what is expected to be
+** a comma-separated list of SQL literals followed by a ')' character.
+** If it actually is this, return a pointer to the ')'. Otherwise, return
+** NULL to indicate a parse error.
+*/
+static const char *fts5ConfigSkipArgs(const char *pIn){
+ const char *p = pIn;
+
+ while( 1 ){
+ p = fts5ConfigSkipWhitespace(p);
+ p = fts5ConfigSkipLiteral(p);
+ p = fts5ConfigSkipWhitespace(p);
+ if( p==0 || *p==')' ) break;
+ if( *p!=',' ){
+ p = 0;
+ break;
+ }
+ p++;
+ }
+
+ return p;
+}
+
+/*
+** Parameter zIn contains a rank() function specification. The format of
+** this is:
+**
+** + Bareword (function name)
+** + Open parenthesis - "("
+** + Zero or more SQL literals in a comma separated list
+** + Close parenthesis - ")"
+*/
+static int sqlite3Fts5ConfigParseRank(
+ const char *zIn, /* Input string */
+ char **pzRank, /* OUT: Rank function name */
+ char **pzRankArgs /* OUT: Rank function arguments */
+){
+ const char *p = zIn;
+ const char *pRank;
+ char *zRank = 0;
+ char *zRankArgs = 0;
+ int rc = SQLITE_OK;
+
+ *pzRank = 0;
+ *pzRankArgs = 0;
+
+ if( p==0 ){
+ rc = SQLITE_ERROR;
+ }else{
+ p = fts5ConfigSkipWhitespace(p);
+ pRank = p;
+ p = fts5ConfigSkipBareword(p);
+
+ if( p ){
+ zRank = sqlite3Fts5MallocZero(&rc, 1 + p - pRank);
+ if( zRank ) memcpy(zRank, pRank, p-pRank);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+
+ if( rc==SQLITE_OK ){
+ p = fts5ConfigSkipWhitespace(p);
+ if( *p!='(' ) rc = SQLITE_ERROR;
+ p++;
+ }
+ if( rc==SQLITE_OK ){
+ const char *pArgs;
+ p = fts5ConfigSkipWhitespace(p);
+ pArgs = p;
+ if( *p!=')' ){
+ p = fts5ConfigSkipArgs(p);
+ if( p==0 ){
+ rc = SQLITE_ERROR;
+ }else{
+ zRankArgs = sqlite3Fts5MallocZero(&rc, 1 + p - pArgs);
+ if( zRankArgs ) memcpy(zRankArgs, pArgs, p-pArgs);
+ }
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(zRank);
+ assert( zRankArgs==0 );
+ }else{
+ *pzRank = zRank;
+ *pzRankArgs = zRankArgs;
+ }
+ return rc;
+}
+
+static int sqlite3Fts5ConfigSetValue(
+ Fts5Config *pConfig,
+ const char *zKey,
+ sqlite3_value *pVal,
+ int *pbBadkey
+){
+ int rc = SQLITE_OK;
+
+ if( 0==sqlite3_stricmp(zKey, "pgsz") ){
+ int pgsz = 0;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ pgsz = sqlite3_value_int(pVal);
+ }
+ if( pgsz<32 || pgsz>FTS5_MAX_PAGE_SIZE ){
+ *pbBadkey = 1;
+ }else{
+ pConfig->pgsz = pgsz;
+ }
+ }
+
+ else if( 0==sqlite3_stricmp(zKey, "hashsize") ){
+ int nHashSize = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nHashSize = sqlite3_value_int(pVal);
+ }
+ if( nHashSize<=0 ){
+ *pbBadkey = 1;
+ }else{
+ pConfig->nHashSize = nHashSize;
+ }
+ }
+
+ else if( 0==sqlite3_stricmp(zKey, "automerge") ){
+ int nAutomerge = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nAutomerge = sqlite3_value_int(pVal);
+ }
+ if( nAutomerge<0 || nAutomerge>64 ){
+ *pbBadkey = 1;
+ }else{
+ if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE;
+ pConfig->nAutomerge = nAutomerge;
+ }
+ }
+
+ else if( 0==sqlite3_stricmp(zKey, "usermerge") ){
+ int nUsermerge = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nUsermerge = sqlite3_value_int(pVal);
+ }
+ if( nUsermerge<2 || nUsermerge>16 ){
+ *pbBadkey = 1;
+ }else{
+ pConfig->nUsermerge = nUsermerge;
+ }
+ }
+
+ else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){
+ int nCrisisMerge = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nCrisisMerge = sqlite3_value_int(pVal);
+ }
+ if( nCrisisMerge<0 ){
+ *pbBadkey = 1;
+ }else{
+ if( nCrisisMerge<=1 ) nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
+ if( nCrisisMerge>=FTS5_MAX_SEGMENT ) nCrisisMerge = FTS5_MAX_SEGMENT-1;
+ pConfig->nCrisisMerge = nCrisisMerge;
+ }
+ }
+
+ else if( 0==sqlite3_stricmp(zKey, "rank") ){
+ const char *zIn = (const char*)sqlite3_value_text(pVal);
+ char *zRank;
+ char *zRankArgs;
+ rc = sqlite3Fts5ConfigParseRank(zIn, &zRank, &zRankArgs);
+ if( rc==SQLITE_OK ){
+ sqlite3_free(pConfig->zRank);
+ sqlite3_free(pConfig->zRankArgs);
+ pConfig->zRank = zRank;
+ pConfig->zRankArgs = zRankArgs;
+ }else if( rc==SQLITE_ERROR ){
+ rc = SQLITE_OK;
+ *pbBadkey = 1;
+ }
+ }else{
+ *pbBadkey = 1;
+ }
+ return rc;
+}
+
+/*
+** Load the contents of the %_config table into memory.
+*/
+static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){
+ const char *zSelect = "SELECT k, v FROM %Q.'%q_config'";
+ char *zSql;
+ sqlite3_stmt *p = 0;
+ int rc = SQLITE_OK;
+ int iVersion = 0;
+
+ /* Set default values */
+ pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE;
+ pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE;
+ pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE;
+ pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
+ pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE;
+
+ zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
+ if( zSql ){
+ rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0);
+ sqlite3_free(zSql);
+ }
+
+ assert( rc==SQLITE_OK || p==0 );
+ if( rc==SQLITE_OK ){
+ while( SQLITE_ROW==sqlite3_step(p) ){
+ const char *zK = (const char*)sqlite3_column_text(p, 0);
+ sqlite3_value *pVal = sqlite3_column_value(p, 1);
+ if( 0==sqlite3_stricmp(zK, "version") ){
+ iVersion = sqlite3_value_int(pVal);
+ }else{
+ int bDummy = 0;
+ sqlite3Fts5ConfigSetValue(pConfig, zK, pVal, &bDummy);
+ }
+ }
+ rc = sqlite3_finalize(p);
+ }
+
+ if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){
+ rc = SQLITE_ERROR;
+ if( pConfig->pzErrmsg ){
+ assert( 0==*pConfig->pzErrmsg );
+ *pConfig->pzErrmsg = sqlite3_mprintf(
+ "invalid fts5 file format (found %d, expected %d) - run 'rebuild'",
+ iVersion, FTS5_CURRENT_VERSION
+ );
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ pConfig->iCookie = iCookie;
+ }
+ return rc;
+}
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+
+
+
+/* #include "fts5Int.h" */
+/* #include "fts5parse.h" */
+
+/*
+** All token types in the generated fts5parse.h file are greater than 0.
+*/
+#define FTS5_EOF 0
+
+#define FTS5_LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
+
+typedef struct Fts5ExprTerm Fts5ExprTerm;
+
+/*
+** Functions generated by lemon from fts5parse.y.
+*/
+static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(u64));
+static void sqlite3Fts5ParserFree(void*, void (*freeProc)(void*));
+static void sqlite3Fts5Parser(void*, int, Fts5Token, Fts5Parse*);
+#ifndef NDEBUG
+/* #include <stdio.h> */
+static void sqlite3Fts5ParserTrace(FILE*, char*);
+#endif
+static int sqlite3Fts5ParserFallback(int);
+
+
+struct Fts5Expr {
+ Fts5Index *pIndex;
+ Fts5Config *pConfig;
+ Fts5ExprNode *pRoot;
+ int bDesc; /* Iterate in descending rowid order */
+ int nPhrase; /* Number of phrases in expression */
+ Fts5ExprPhrase **apExprPhrase; /* Pointers to phrase objects */
+};
+
+/*
+** eType:
+** Expression node type. Always one of:
+**
+** FTS5_AND (nChild, apChild valid)
+** FTS5_OR (nChild, apChild valid)
+** FTS5_NOT (nChild, apChild valid)
+** FTS5_STRING (pNear valid)
+** FTS5_TERM (pNear valid)
+*/
+struct Fts5ExprNode {
+ int eType; /* Node type */
+ int bEof; /* True at EOF */
+ int bNomatch; /* True if entry is not a match */
+
+ /* Next method for this node. */
+ int (*xNext)(Fts5Expr*, Fts5ExprNode*, int, i64);
+
+ i64 iRowid; /* Current rowid */
+ Fts5ExprNearset *pNear; /* For FTS5_STRING - cluster of phrases */
+
+ /* Child nodes. For a NOT node, this array always contains 2 entries. For
+ ** AND or OR nodes, it contains 2 or more entries. */
+ int nChild; /* Number of child nodes */
+ Fts5ExprNode *apChild[1]; /* Array of child nodes */
+};
+
+#define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING)
+
+/*
+** Invoke the xNext method of an Fts5ExprNode object. This macro should be
+** used as if it has the same signature as the xNext() methods themselves.
+*/
+#define fts5ExprNodeNext(a,b,c,d) (b)->xNext((a), (b), (c), (d))
+
+/*
+** An instance of the following structure represents a single search term
+** or term prefix.
+*/
+struct Fts5ExprTerm {
+ u8 bPrefix; /* True for a prefix term */
+ u8 bFirst; /* True if token must be first in column */
+ char *zTerm; /* nul-terminated term */
+ Fts5IndexIter *pIter; /* Iterator for this term */
+ Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */
+};
+
+/*
+** A phrase. One or more terms that must appear in a contiguous sequence
+** within a document for it to match.
+*/
+struct Fts5ExprPhrase {
+ Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */
+ Fts5Buffer poslist; /* Current position list */
+ int nTerm; /* Number of entries in aTerm[] */
+ Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */
+};
+
+/*
+** One or more phrases that must appear within a certain token distance of
+** each other within each matching document.
+*/
+struct Fts5ExprNearset {
+ int nNear; /* NEAR parameter */
+ Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */
+ int nPhrase; /* Number of entries in aPhrase[] array */
+ Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */
+};
+
+
+/*
+** Parse context.
+*/
+struct Fts5Parse {
+ Fts5Config *pConfig;
+ char *zErr;
+ int rc;
+ int nPhrase; /* Size of apPhrase array */
+ Fts5ExprPhrase **apPhrase; /* Array of all phrases */
+ Fts5ExprNode *pExpr; /* Result of a successful parse */
+};
+
+static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){
+ va_list ap;
+ va_start(ap, zFmt);
+ if( pParse->rc==SQLITE_OK ){
+ pParse->zErr = sqlite3_vmprintf(zFmt, ap);
+ pParse->rc = SQLITE_ERROR;
+ }
+ va_end(ap);
+}
+
+static int fts5ExprIsspace(char t){
+ return t==' ' || t=='\t' || t=='\n' || t=='\r';
+}
+
+/*
+** Read the first token from the nul-terminated string at *pz.
+*/
+static int fts5ExprGetToken(
+ Fts5Parse *pParse,
+ const char **pz, /* IN/OUT: Pointer into buffer */
+ Fts5Token *pToken
+){
+ const char *z = *pz;
+ int tok;
+
+ /* Skip past any whitespace */
+ while( fts5ExprIsspace(*z) ) z++;
+
+ pToken->p = z;
+ pToken->n = 1;
+ switch( *z ){
+ case '(': tok = FTS5_LP; break;
+ case ')': tok = FTS5_RP; break;
+ case '{': tok = FTS5_LCP; break;
+ case '}': tok = FTS5_RCP; break;
+ case ':': tok = FTS5_COLON; break;
+ case ',': tok = FTS5_COMMA; break;
+ case '+': tok = FTS5_PLUS; break;
+ case '*': tok = FTS5_STAR; break;
+ case '-': tok = FTS5_MINUS; break;
+ case '^': tok = FTS5_CARET; break;
+ case '\0': tok = FTS5_EOF; break;
+
+ case '"': {
+ const char *z2;
+ tok = FTS5_STRING;
+
+ for(z2=&z[1]; 1; z2++){
+ if( z2[0]=='"' ){
+ z2++;
+ if( z2[0]!='"' ) break;
+ }
+ if( z2[0]=='\0' ){
+ sqlite3Fts5ParseError(pParse, "unterminated string");
+ return FTS5_EOF;
+ }
+ }
+ pToken->n = (z2 - z);
+ break;
+ }
+
+ default: {
+ const char *z2;
+ if( sqlite3Fts5IsBareword(z[0])==0 ){
+ sqlite3Fts5ParseError(pParse, "fts5: syntax error near \"%.1s\"", z);
+ return FTS5_EOF;
+ }
+ tok = FTS5_STRING;
+ for(z2=&z[1]; sqlite3Fts5IsBareword(*z2); z2++);
+ pToken->n = (z2 - z);
+ if( pToken->n==2 && memcmp(pToken->p, "OR", 2)==0 ) tok = FTS5_OR;
+ if( pToken->n==3 && memcmp(pToken->p, "NOT", 3)==0 ) tok = FTS5_NOT;
+ if( pToken->n==3 && memcmp(pToken->p, "AND", 3)==0 ) tok = FTS5_AND;
+ break;
+ }
+ }
+
+ *pz = &pToken->p[pToken->n];
+ return tok;
+}
+
+static void *fts5ParseAlloc(u64 t){ return sqlite3_malloc64((sqlite3_int64)t);}
+static void fts5ParseFree(void *p){ sqlite3_free(p); }
+
+static int sqlite3Fts5ExprNew(
+ Fts5Config *pConfig, /* FTS5 Configuration */
+ int iCol,
+ const char *zExpr, /* Expression text */
+ Fts5Expr **ppNew,
+ char **pzErr
+){
+ Fts5Parse sParse;
+ Fts5Token token;
+ const char *z = zExpr;
+ int t; /* Next token type */
+ void *pEngine;
+ Fts5Expr *pNew;
+
+ *ppNew = 0;
+ *pzErr = 0;
+ memset(&sParse, 0, sizeof(sParse));
+ pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc);
+ if( pEngine==0 ){ return SQLITE_NOMEM; }
+ sParse.pConfig = pConfig;
+
+ do {
+ t = fts5ExprGetToken(&sParse, &z, &token);
+ sqlite3Fts5Parser(pEngine, t, token, &sParse);
+ }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF );
+ sqlite3Fts5ParserFree(pEngine, fts5ParseFree);
+
+ /* If the LHS of the MATCH expression was a user column, apply the
+ ** implicit column-filter. */
+ if( iCol<pConfig->nCol && sParse.pExpr && sParse.rc==SQLITE_OK ){
+ int n = sizeof(Fts5Colset);
+ Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n);
+ if( pColset ){
+ pColset->nCol = 1;
+ pColset->aiCol[0] = iCol;
+ sqlite3Fts5ParseSetColset(&sParse, sParse.pExpr, pColset);
+ }
+ }
+
+ assert( sParse.rc!=SQLITE_OK || sParse.zErr==0 );
+ if( sParse.rc==SQLITE_OK ){
+ *ppNew = pNew = sqlite3_malloc(sizeof(Fts5Expr));
+ if( pNew==0 ){
+ sParse.rc = SQLITE_NOMEM;
+ sqlite3Fts5ParseNodeFree(sParse.pExpr);
+ }else{
+ if( !sParse.pExpr ){
+ const int nByte = sizeof(Fts5ExprNode);
+ pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&sParse.rc, nByte);
+ if( pNew->pRoot ){
+ pNew->pRoot->bEof = 1;
+ }
+ }else{
+ pNew->pRoot = sParse.pExpr;
+ }
+ pNew->pIndex = 0;
+ pNew->pConfig = pConfig;
+ pNew->apExprPhrase = sParse.apPhrase;
+ pNew->nPhrase = sParse.nPhrase;
+ sParse.apPhrase = 0;
+ }
+ }else{
+ sqlite3Fts5ParseNodeFree(sParse.pExpr);
+ }
+
+ sqlite3_free(sParse.apPhrase);
+ *pzErr = sParse.zErr;
+ return sParse.rc;
+}
+
+/*
+** Free the expression node object passed as the only argument.
+*/
+static void sqlite3Fts5ParseNodeFree(Fts5ExprNode *p){
+ if( p ){
+ int i;
+ for(i=0; i<p->nChild; i++){
+ sqlite3Fts5ParseNodeFree(p->apChild[i]);
+ }
+ sqlite3Fts5ParseNearsetFree(p->pNear);
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Free the expression object passed as the only argument.
+*/
+static void sqlite3Fts5ExprFree(Fts5Expr *p){
+ if( p ){
+ sqlite3Fts5ParseNodeFree(p->pRoot);
+ sqlite3_free(p->apExprPhrase);
+ sqlite3_free(p);
+ }
+}
+
+static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){
+ Fts5Parse sParse;
+ memset(&sParse, 0, sizeof(sParse));
+
+ if( *pp1 ){
+ Fts5Expr *p1 = *pp1;
+ int nPhrase = p1->nPhrase + p2->nPhrase;
+
+ p1->pRoot = sqlite3Fts5ParseNode(&sParse, FTS5_AND, p1->pRoot, p2->pRoot,0);
+ p2->pRoot = 0;
+
+ if( sParse.rc==SQLITE_OK ){
+ Fts5ExprPhrase **ap = (Fts5ExprPhrase**)sqlite3_realloc(
+ p1->apExprPhrase, nPhrase * sizeof(Fts5ExprPhrase*)
+ );
+ if( ap==0 ){
+ sParse.rc = SQLITE_NOMEM;
+ }else{
+ int i;
+ memmove(&ap[p2->nPhrase], ap, p1->nPhrase*sizeof(Fts5ExprPhrase*));
+ for(i=0; i<p2->nPhrase; i++){
+ ap[i] = p2->apExprPhrase[i];
+ }
+ p1->nPhrase = nPhrase;
+ p1->apExprPhrase = ap;
+ }
+ }
+ sqlite3_free(p2->apExprPhrase);
+ sqlite3_free(p2);
+ }else{
+ *pp1 = p2;
+ }
+
+ return sParse.rc;
+}
+
+/*
+** Argument pTerm must be a synonym iterator. Return the current rowid
+** that it points to.
+*/
+static i64 fts5ExprSynonymRowid(Fts5ExprTerm *pTerm, int bDesc, int *pbEof){
+ i64 iRet = 0;
+ int bRetValid = 0;
+ Fts5ExprTerm *p;
+
+ assert( pTerm->pSynonym );
+ assert( bDesc==0 || bDesc==1 );
+ for(p=pTerm; p; p=p->pSynonym){
+ if( 0==sqlite3Fts5IterEof(p->pIter) ){
+ i64 iRowid = p->pIter->iRowid;
+ if( bRetValid==0 || (bDesc!=(iRowid<iRet)) ){
+ iRet = iRowid;
+ bRetValid = 1;
+ }
+ }
+ }
+
+ if( pbEof && bRetValid==0 ) *pbEof = 1;
+ return iRet;
+}
+
+/*
+** Argument pTerm must be a synonym iterator.
+*/
+static int fts5ExprSynonymList(
+ Fts5ExprTerm *pTerm,
+ i64 iRowid,
+ Fts5Buffer *pBuf, /* Use this buffer for space if required */
+ u8 **pa, int *pn
+){
+ Fts5PoslistReader aStatic[4];
+ Fts5PoslistReader *aIter = aStatic;
+ int nIter = 0;
+ int nAlloc = 4;
+ int rc = SQLITE_OK;
+ Fts5ExprTerm *p;
+
+ assert( pTerm->pSynonym );
+ for(p=pTerm; p; p=p->pSynonym){
+ Fts5IndexIter *pIter = p->pIter;
+ if( sqlite3Fts5IterEof(pIter)==0 && pIter->iRowid==iRowid ){
+ if( pIter->nData==0 ) continue;
+ if( nIter==nAlloc ){
+ sqlite3_int64 nByte = sizeof(Fts5PoslistReader) * nAlloc * 2;
+ Fts5PoslistReader *aNew = (Fts5PoslistReader*)sqlite3_malloc64(nByte);
+ if( aNew==0 ){
+ rc = SQLITE_NOMEM;
+ goto synonym_poslist_out;
+ }
+ memcpy(aNew, aIter, sizeof(Fts5PoslistReader) * nIter);
+ nAlloc = nAlloc*2;
+ if( aIter!=aStatic ) sqlite3_free(aIter);
+ aIter = aNew;
+ }
+ sqlite3Fts5PoslistReaderInit(pIter->pData, pIter->nData, &aIter[nIter]);
+ assert( aIter[nIter].bEof==0 );
+ nIter++;
+ }
+ }
+
+ if( nIter==1 ){
+ *pa = (u8*)aIter[0].a;
+ *pn = aIter[0].n;
+ }else{
+ Fts5PoslistWriter writer = {0};
+ i64 iPrev = -1;
+ fts5BufferZero(pBuf);
+ while( 1 ){
+ int i;
+ i64 iMin = FTS5_LARGEST_INT64;
+ for(i=0; i<nIter; i++){
+ if( aIter[i].bEof==0 ){
+ if( aIter[i].iPos==iPrev ){
+ if( sqlite3Fts5PoslistReaderNext(&aIter[i]) ) continue;
+ }
+ if( aIter[i].iPos<iMin ){
+ iMin = aIter[i].iPos;
+ }
+ }
+ }
+ if( iMin==FTS5_LARGEST_INT64 || rc!=SQLITE_OK ) break;
+ rc = sqlite3Fts5PoslistWriterAppend(pBuf, &writer, iMin);
+ iPrev = iMin;
+ }
+ if( rc==SQLITE_OK ){
+ *pa = pBuf->p;
+ *pn = pBuf->n;
+ }
+ }
+
+ synonym_poslist_out:
+ if( aIter!=aStatic ) sqlite3_free(aIter);
+ return rc;
+}
+
+
+/*
+** All individual term iterators in pPhrase are guaranteed to be valid and
+** pointing to the same rowid when this function is called. This function
+** checks if the current rowid really is a match, and if so populates
+** the pPhrase->poslist buffer accordingly. Output parameter *pbMatch
+** is set to true if this is really a match, or false otherwise.
+**
+** SQLITE_OK is returned if an error occurs, or an SQLite error code
+** otherwise. It is not considered an error code if the current rowid is
+** not a match.
+*/
+static int fts5ExprPhraseIsMatch(
+ Fts5ExprNode *pNode, /* Node pPhrase belongs to */
+ Fts5ExprPhrase *pPhrase, /* Phrase object to initialize */
+ int *pbMatch /* OUT: Set to true if really a match */
+){
+ Fts5PoslistWriter writer = {0};
+ Fts5PoslistReader aStatic[4];
+ Fts5PoslistReader *aIter = aStatic;
+ int i;
+ int rc = SQLITE_OK;
+ int bFirst = pPhrase->aTerm[0].bFirst;
+
+ fts5BufferZero(&pPhrase->poslist);
+
+ /* If the aStatic[] array is not large enough, allocate a large array
+ ** using sqlite3_malloc(). This approach could be improved upon. */
+ if( pPhrase->nTerm>ArraySize(aStatic) ){
+ sqlite3_int64 nByte = sizeof(Fts5PoslistReader) * pPhrase->nTerm;
+ aIter = (Fts5PoslistReader*)sqlite3_malloc64(nByte);
+ if( !aIter ) return SQLITE_NOMEM;
+ }
+ memset(aIter, 0, sizeof(Fts5PoslistReader) * pPhrase->nTerm);
+
+ /* Initialize a term iterator for each term in the phrase */
+ for(i=0; i<pPhrase->nTerm; i++){
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
+ int n = 0;
+ int bFlag = 0;
+ u8 *a = 0;
+ if( pTerm->pSynonym ){
+ Fts5Buffer buf = {0, 0, 0};
+ rc = fts5ExprSynonymList(pTerm, pNode->iRowid, &buf, &a, &n);
+ if( rc ){
+ sqlite3_free(a);
+ goto ismatch_out;
+ }
+ if( a==buf.p ) bFlag = 1;
+ }else{
+ a = (u8*)pTerm->pIter->pData;
+ n = pTerm->pIter->nData;
+ }
+ sqlite3Fts5PoslistReaderInit(a, n, &aIter[i]);
+ aIter[i].bFlag = (u8)bFlag;
+ if( aIter[i].bEof ) goto ismatch_out;
+ }
+
+ while( 1 ){
+ int bMatch;
+ i64 iPos = aIter[0].iPos;
+ do {
+ bMatch = 1;
+ for(i=0; i<pPhrase->nTerm; i++){
+ Fts5PoslistReader *pPos = &aIter[i];
+ i64 iAdj = iPos + i;
+ if( pPos->iPos!=iAdj ){
+ bMatch = 0;
+ while( pPos->iPos<iAdj ){
+ if( sqlite3Fts5PoslistReaderNext(pPos) ) goto ismatch_out;
+ }
+ if( pPos->iPos>iAdj ) iPos = pPos->iPos-i;
+ }
+ }
+ }while( bMatch==0 );
+
+ /* Append position iPos to the output */
+ if( bFirst==0 || FTS5_POS2OFFSET(iPos)==0 ){
+ rc = sqlite3Fts5PoslistWriterAppend(&pPhrase->poslist, &writer, iPos);
+ if( rc!=SQLITE_OK ) goto ismatch_out;
+ }
+
+ for(i=0; i<pPhrase->nTerm; i++){
+ if( sqlite3Fts5PoslistReaderNext(&aIter[i]) ) goto ismatch_out;
+ }
+ }
+
+ ismatch_out:
+ *pbMatch = (pPhrase->poslist.n>0);
+ for(i=0; i<pPhrase->nTerm; i++){
+ if( aIter[i].bFlag ) sqlite3_free((u8*)aIter[i].a);
+ }
+ if( aIter!=aStatic ) sqlite3_free(aIter);
+ return rc;
+}
+
+typedef struct Fts5LookaheadReader Fts5LookaheadReader;
+struct Fts5LookaheadReader {
+ const u8 *a; /* Buffer containing position list */
+ int n; /* Size of buffer a[] in bytes */
+ int i; /* Current offset in position list */
+ i64 iPos; /* Current position */
+ i64 iLookahead; /* Next position */
+};
+
+#define FTS5_LOOKAHEAD_EOF (((i64)1) << 62)
+
+static int fts5LookaheadReaderNext(Fts5LookaheadReader *p){
+ p->iPos = p->iLookahead;
+ if( sqlite3Fts5PoslistNext64(p->a, p->n, &p->i, &p->iLookahead) ){
+ p->iLookahead = FTS5_LOOKAHEAD_EOF;
+ }
+ return (p->iPos==FTS5_LOOKAHEAD_EOF);
+}
+
+static int fts5LookaheadReaderInit(
+ const u8 *a, int n, /* Buffer to read position list from */
+ Fts5LookaheadReader *p /* Iterator object to initialize */
+){
+ memset(p, 0, sizeof(Fts5LookaheadReader));
+ p->a = a;
+ p->n = n;
+ fts5LookaheadReaderNext(p);
+ return fts5LookaheadReaderNext(p);
+}
+
+typedef struct Fts5NearTrimmer Fts5NearTrimmer;
+struct Fts5NearTrimmer {
+ Fts5LookaheadReader reader; /* Input iterator */
+ Fts5PoslistWriter writer; /* Writer context */
+ Fts5Buffer *pOut; /* Output poslist */
+};
+
+/*
+** The near-set object passed as the first argument contains more than
+** one phrase. All phrases currently point to the same row. The
+** Fts5ExprPhrase.poslist buffers are populated accordingly. This function
+** tests if the current row contains instances of each phrase sufficiently
+** close together to meet the NEAR constraint. Non-zero is returned if it
+** does, or zero otherwise.
+**
+** If in/out parameter (*pRc) is set to other than SQLITE_OK when this
+** function is called, it is a no-op. Or, if an error (e.g. SQLITE_NOMEM)
+** occurs within this function (*pRc) is set accordingly before returning.
+** The return value is undefined in both these cases.
+**
+** If no error occurs and non-zero (a match) is returned, the position-list
+** of each phrase object is edited to contain only those entries that
+** meet the constraint before returning.
+*/
+static int fts5ExprNearIsMatch(int *pRc, Fts5ExprNearset *pNear){
+ Fts5NearTrimmer aStatic[4];
+ Fts5NearTrimmer *a = aStatic;
+ Fts5ExprPhrase **apPhrase = pNear->apPhrase;
+
+ int i;
+ int rc = *pRc;
+ int bMatch;
+
+ assert( pNear->nPhrase>1 );
+
+ /* If the aStatic[] array is not large enough, allocate a large array
+ ** using sqlite3_malloc(). This approach could be improved upon. */
+ if( pNear->nPhrase>ArraySize(aStatic) ){
+ sqlite3_int64 nByte = sizeof(Fts5NearTrimmer) * pNear->nPhrase;
+ a = (Fts5NearTrimmer*)sqlite3Fts5MallocZero(&rc, nByte);
+ }else{
+ memset(aStatic, 0, sizeof(aStatic));
+ }
+ if( rc!=SQLITE_OK ){
+ *pRc = rc;
+ return 0;
+ }
+
+ /* Initialize a lookahead iterator for each phrase. After passing the
+ ** buffer and buffer size to the lookaside-reader init function, zero
+ ** the phrase poslist buffer. The new poslist for the phrase (containing
+ ** the same entries as the original with some entries removed on account
+ ** of the NEAR constraint) is written over the original even as it is
+ ** being read. This is safe as the entries for the new poslist are a
+ ** subset of the old, so it is not possible for data yet to be read to
+ ** be overwritten. */
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5Buffer *pPoslist = &apPhrase[i]->poslist;
+ fts5LookaheadReaderInit(pPoslist->p, pPoslist->n, &a[i].reader);
+ pPoslist->n = 0;
+ a[i].pOut = pPoslist;
+ }
+
+ while( 1 ){
+ int iAdv;
+ i64 iMin;
+ i64 iMax;
+
+ /* This block advances the phrase iterators until they point to a set of
+ ** entries that together comprise a match. */
+ iMax = a[0].reader.iPos;
+ do {
+ bMatch = 1;
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5LookaheadReader *pPos = &a[i].reader;
+ iMin = iMax - pNear->apPhrase[i]->nTerm - pNear->nNear;
+ if( pPos->iPos<iMin || pPos->iPos>iMax ){
+ bMatch = 0;
+ while( pPos->iPos<iMin ){
+ if( fts5LookaheadReaderNext(pPos) ) goto ismatch_out;
+ }
+ if( pPos->iPos>iMax ) iMax = pPos->iPos;
+ }
+ }
+ }while( bMatch==0 );
+
+ /* Add an entry to each output position list */
+ for(i=0; i<pNear->nPhrase; i++){
+ i64 iPos = a[i].reader.iPos;
+ Fts5PoslistWriter *pWriter = &a[i].writer;
+ if( a[i].pOut->n==0 || iPos!=pWriter->iPrev ){
+ sqlite3Fts5PoslistWriterAppend(a[i].pOut, pWriter, iPos);
+ }
+ }
+
+ iAdv = 0;
+ iMin = a[0].reader.iLookahead;
+ for(i=0; i<pNear->nPhrase; i++){
+ if( a[i].reader.iLookahead < iMin ){
+ iMin = a[i].reader.iLookahead;
+ iAdv = i;
+ }
+ }
+ if( fts5LookaheadReaderNext(&a[iAdv].reader) ) goto ismatch_out;
+ }
+
+ ismatch_out: {
+ int bRet = a[0].pOut->n>0;
+ *pRc = rc;
+ if( a!=aStatic ) sqlite3_free(a);
+ return bRet;
+ }
+}
+
+/*
+** Advance iterator pIter until it points to a value equal to or laster
+** than the initial value of *piLast. If this means the iterator points
+** to a value laster than *piLast, update *piLast to the new lastest value.
+**
+** If the iterator reaches EOF, set *pbEof to true before returning. If
+** an error occurs, set *pRc to an error code. If either *pbEof or *pRc
+** are set, return a non-zero value. Otherwise, return zero.
+*/
+static int fts5ExprAdvanceto(
+ Fts5IndexIter *pIter, /* Iterator to advance */
+ int bDesc, /* True if iterator is "rowid DESC" */
+ i64 *piLast, /* IN/OUT: Lastest rowid seen so far */
+ int *pRc, /* OUT: Error code */
+ int *pbEof /* OUT: Set to true if EOF */
+){
+ i64 iLast = *piLast;
+ i64 iRowid;
+
+ iRowid = pIter->iRowid;
+ if( (bDesc==0 && iLast>iRowid) || (bDesc && iLast<iRowid) ){
+ int rc = sqlite3Fts5IterNextFrom(pIter, iLast);
+ if( rc || sqlite3Fts5IterEof(pIter) ){
+ *pRc = rc;
+ *pbEof = 1;
+ return 1;
+ }
+ iRowid = pIter->iRowid;
+ assert( (bDesc==0 && iRowid>=iLast) || (bDesc==1 && iRowid<=iLast) );
+ }
+ *piLast = iRowid;
+
+ return 0;
+}
+
+static int fts5ExprSynonymAdvanceto(
+ Fts5ExprTerm *pTerm, /* Term iterator to advance */
+ int bDesc, /* True if iterator is "rowid DESC" */
+ i64 *piLast, /* IN/OUT: Lastest rowid seen so far */
+ int *pRc /* OUT: Error code */
+){
+ int rc = SQLITE_OK;
+ i64 iLast = *piLast;
+ Fts5ExprTerm *p;
+ int bEof = 0;
+
+ for(p=pTerm; rc==SQLITE_OK && p; p=p->pSynonym){
+ if( sqlite3Fts5IterEof(p->pIter)==0 ){
+ i64 iRowid = p->pIter->iRowid;
+ if( (bDesc==0 && iLast>iRowid) || (bDesc && iLast<iRowid) ){
+ rc = sqlite3Fts5IterNextFrom(p->pIter, iLast);
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ *pRc = rc;
+ bEof = 1;
+ }else{
+ *piLast = fts5ExprSynonymRowid(pTerm, bDesc, &bEof);
+ }
+ return bEof;
+}
+
+
+static int fts5ExprNearTest(
+ int *pRc,
+ Fts5Expr *pExpr, /* Expression that pNear is a part of */
+ Fts5ExprNode *pNode /* The "NEAR" node (FTS5_STRING) */
+){
+ Fts5ExprNearset *pNear = pNode->pNear;
+ int rc = *pRc;
+
+ if( pExpr->pConfig->eDetail!=FTS5_DETAIL_FULL ){
+ Fts5ExprTerm *pTerm;
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
+ pPhrase->poslist.n = 0;
+ for(pTerm=&pPhrase->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){
+ Fts5IndexIter *pIter = pTerm->pIter;
+ if( sqlite3Fts5IterEof(pIter)==0 ){
+ if( pIter->iRowid==pNode->iRowid && pIter->nData>0 ){
+ pPhrase->poslist.n = 1;
+ }
+ }
+ }
+ return pPhrase->poslist.n;
+ }else{
+ int i;
+
+ /* Check that each phrase in the nearset matches the current row.
+ ** Populate the pPhrase->poslist buffers at the same time. If any
+ ** phrase is not a match, break out of the loop early. */
+ for(i=0; rc==SQLITE_OK && i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+ if( pPhrase->nTerm>1 || pPhrase->aTerm[0].pSynonym
+ || pNear->pColset || pPhrase->aTerm[0].bFirst
+ ){
+ int bMatch = 0;
+ rc = fts5ExprPhraseIsMatch(pNode, pPhrase, &bMatch);
+ if( bMatch==0 ) break;
+ }else{
+ Fts5IndexIter *pIter = pPhrase->aTerm[0].pIter;
+ fts5BufferSet(&rc, &pPhrase->poslist, pIter->nData, pIter->pData);
+ }
+ }
+
+ *pRc = rc;
+ if( i==pNear->nPhrase && (i==1 || fts5ExprNearIsMatch(pRc, pNear)) ){
+ return 1;
+ }
+ return 0;
+ }
+}
+
+
+/*
+** Initialize all term iterators in the pNear object. If any term is found
+** to match no documents at all, return immediately without initializing any
+** further iterators.
+**
+** If an error occurs, return an SQLite error code. Otherwise, return
+** SQLITE_OK. It is not considered an error if some term matches zero
+** documents.
+*/
+static int fts5ExprNearInitAll(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *pNode
+){
+ Fts5ExprNearset *pNear = pNode->pNear;
+ int i;
+
+ assert( pNode->bNomatch==0 );
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+ if( pPhrase->nTerm==0 ){
+ pNode->bEof = 1;
+ return SQLITE_OK;
+ }else{
+ int j;
+ for(j=0; j<pPhrase->nTerm; j++){
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
+ Fts5ExprTerm *p;
+ int bHit = 0;
+
+ for(p=pTerm; p; p=p->pSynonym){
+ int rc;
+ if( p->pIter ){
+ sqlite3Fts5IterClose(p->pIter);
+ p->pIter = 0;
+ }
+ rc = sqlite3Fts5IndexQuery(
+ pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
+ (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
+ (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
+ pNear->pColset,
+ &p->pIter
+ );
+ assert( (rc==SQLITE_OK)==(p->pIter!=0) );
+ if( rc!=SQLITE_OK ) return rc;
+ if( 0==sqlite3Fts5IterEof(p->pIter) ){
+ bHit = 1;
+ }
+ }
+
+ if( bHit==0 ){
+ pNode->bEof = 1;
+ return SQLITE_OK;
+ }
+ }
+ }
+ }
+
+ pNode->bEof = 0;
+ return SQLITE_OK;
+}
+
+/*
+** If pExpr is an ASC iterator, this function returns a value with the
+** same sign as:
+**
+** (iLhs - iRhs)
+**
+** Otherwise, if this is a DESC iterator, the opposite is returned:
+**
+** (iRhs - iLhs)
+*/
+static int fts5RowidCmp(
+ Fts5Expr *pExpr,
+ i64 iLhs,
+ i64 iRhs
+){
+ assert( pExpr->bDesc==0 || pExpr->bDesc==1 );
+ if( pExpr->bDesc==0 ){
+ if( iLhs<iRhs ) return -1;
+ return (iLhs > iRhs);
+ }else{
+ if( iLhs>iRhs ) return -1;
+ return (iLhs < iRhs);
+ }
+}
+
+static void fts5ExprSetEof(Fts5ExprNode *pNode){
+ int i;
+ pNode->bEof = 1;
+ pNode->bNomatch = 0;
+ for(i=0; i<pNode->nChild; i++){
+ fts5ExprSetEof(pNode->apChild[i]);
+ }
+}
+
+static void fts5ExprNodeZeroPoslist(Fts5ExprNode *pNode){
+ if( pNode->eType==FTS5_STRING || pNode->eType==FTS5_TERM ){
+ Fts5ExprNearset *pNear = pNode->pNear;
+ int i;
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+ pPhrase->poslist.n = 0;
+ }
+ }else{
+ int i;
+ for(i=0; i<pNode->nChild; i++){
+ fts5ExprNodeZeroPoslist(pNode->apChild[i]);
+ }
+ }
+}
+
+
+
+/*
+** Compare the values currently indicated by the two nodes as follows:
+**
+** res = (*p1) - (*p2)
+**
+** Nodes that point to values that come later in the iteration order are
+** considered to be larger. Nodes at EOF are the largest of all.
+**
+** This means that if the iteration order is ASC, then numerically larger
+** rowids are considered larger. Or if it is the default DESC, numerically
+** smaller rowids are larger.
+*/
+static int fts5NodeCompare(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *p1,
+ Fts5ExprNode *p2
+){
+ if( p2->bEof ) return -1;
+ if( p1->bEof ) return +1;
+ return fts5RowidCmp(pExpr, p1->iRowid, p2->iRowid);
+}
+
+/*
+** All individual term iterators in pNear are guaranteed to be valid when
+** this function is called. This function checks if all term iterators
+** point to the same rowid, and if not, advances them until they do.
+** If an EOF is reached before this happens, *pbEof is set to true before
+** returning.
+**
+** SQLITE_OK is returned if an error occurs, or an SQLite error code
+** otherwise. It is not considered an error code if an iterator reaches
+** EOF.
+*/
+static int fts5ExprNodeTest_STRING(
+ Fts5Expr *pExpr, /* Expression pPhrase belongs to */
+ Fts5ExprNode *pNode
+){
+ Fts5ExprNearset *pNear = pNode->pNear;
+ Fts5ExprPhrase *pLeft = pNear->apPhrase[0];
+ int rc = SQLITE_OK;
+ i64 iLast; /* Lastest rowid any iterator points to */
+ int i, j; /* Phrase and token index, respectively */
+ int bMatch; /* True if all terms are at the same rowid */
+ const int bDesc = pExpr->bDesc;
+
+ /* Check that this node should not be FTS5_TERM */
+ assert( pNear->nPhrase>1
+ || pNear->apPhrase[0]->nTerm>1
+ || pNear->apPhrase[0]->aTerm[0].pSynonym
+ || pNear->apPhrase[0]->aTerm[0].bFirst
+ );
+
+ /* Initialize iLast, the "lastest" rowid any iterator points to. If the
+ ** iterator skips through rowids in the default ascending order, this means
+ ** the maximum rowid. Or, if the iterator is "ORDER BY rowid DESC", then it
+ ** means the minimum rowid. */
+ if( pLeft->aTerm[0].pSynonym ){
+ iLast = fts5ExprSynonymRowid(&pLeft->aTerm[0], bDesc, 0);
+ }else{
+ iLast = pLeft->aTerm[0].pIter->iRowid;
+ }
+
+ do {
+ bMatch = 1;
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+ for(j=0; j<pPhrase->nTerm; j++){
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
+ if( pTerm->pSynonym ){
+ i64 iRowid = fts5ExprSynonymRowid(pTerm, bDesc, 0);
+ if( iRowid==iLast ) continue;
+ bMatch = 0;
+ if( fts5ExprSynonymAdvanceto(pTerm, bDesc, &iLast, &rc) ){
+ pNode->bNomatch = 0;
+ pNode->bEof = 1;
+ return rc;
+ }
+ }else{
+ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter;
+ if( pIter->iRowid==iLast || pIter->bEof ) continue;
+ bMatch = 0;
+ if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){
+ return rc;
+ }
+ }
+ }
+ }
+ }while( bMatch==0 );
+
+ pNode->iRowid = iLast;
+ pNode->bNomatch = ((0==fts5ExprNearTest(&rc, pExpr, pNode)) && rc==SQLITE_OK);
+ assert( pNode->bEof==0 || pNode->bNomatch==0 );
+
+ return rc;
+}
+
+/*
+** Advance the first term iterator in the first phrase of pNear. Set output
+** variable *pbEof to true if it reaches EOF or if an error occurs.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs.
+*/
+static int fts5ExprNodeNext_STRING(
+ Fts5Expr *pExpr, /* Expression pPhrase belongs to */
+ Fts5ExprNode *pNode, /* FTS5_STRING or FTS5_TERM node */
+ int bFromValid,
+ i64 iFrom
+){
+ Fts5ExprTerm *pTerm = &pNode->pNear->apPhrase[0]->aTerm[0];
+ int rc = SQLITE_OK;
+
+ pNode->bNomatch = 0;
+ if( pTerm->pSynonym ){
+ int bEof = 1;
+ Fts5ExprTerm *p;
+
+ /* Find the firstest rowid any synonym points to. */
+ i64 iRowid = fts5ExprSynonymRowid(pTerm, pExpr->bDesc, 0);
+
+ /* Advance each iterator that currently points to iRowid. Or, if iFrom
+ ** is valid - each iterator that points to a rowid before iFrom. */
+ for(p=pTerm; p; p=p->pSynonym){
+ if( sqlite3Fts5IterEof(p->pIter)==0 ){
+ i64 ii = p->pIter->iRowid;
+ if( ii==iRowid
+ || (bFromValid && ii!=iFrom && (ii>iFrom)==pExpr->bDesc)
+ ){
+ if( bFromValid ){
+ rc = sqlite3Fts5IterNextFrom(p->pIter, iFrom);
+ }else{
+ rc = sqlite3Fts5IterNext(p->pIter);
+ }
+ if( rc!=SQLITE_OK ) break;
+ if( sqlite3Fts5IterEof(p->pIter)==0 ){
+ bEof = 0;
+ }
+ }else{
+ bEof = 0;
+ }
+ }
+ }
+
+ /* Set the EOF flag if either all synonym iterators are at EOF or an
+ ** error has occurred. */
+ pNode->bEof = (rc || bEof);
+ }else{
+ Fts5IndexIter *pIter = pTerm->pIter;
+
+ assert( Fts5NodeIsString(pNode) );
+ if( bFromValid ){
+ rc = sqlite3Fts5IterNextFrom(pIter, iFrom);
+ }else{
+ rc = sqlite3Fts5IterNext(pIter);
+ }
+
+ pNode->bEof = (rc || sqlite3Fts5IterEof(pIter));
+ }
+
+ if( pNode->bEof==0 ){
+ assert( rc==SQLITE_OK );
+ rc = fts5ExprNodeTest_STRING(pExpr, pNode);
+ }
+
+ return rc;
+}
+
+
+static int fts5ExprNodeTest_TERM(
+ Fts5Expr *pExpr, /* Expression that pNear is a part of */
+ Fts5ExprNode *pNode /* The "NEAR" node (FTS5_TERM) */
+){
+ /* As this "NEAR" object is actually a single phrase that consists
+ ** of a single term only, grab pointers into the poslist managed by the
+ ** fts5_index.c iterator object. This is much faster than synthesizing
+ ** a new poslist the way we have to for more complicated phrase or NEAR
+ ** expressions. */
+ Fts5ExprPhrase *pPhrase = pNode->pNear->apPhrase[0];
+ Fts5IndexIter *pIter = pPhrase->aTerm[0].pIter;
+
+ assert( pNode->eType==FTS5_TERM );
+ assert( pNode->pNear->nPhrase==1 && pPhrase->nTerm==1 );
+ assert( pPhrase->aTerm[0].pSynonym==0 );
+
+ pPhrase->poslist.n = pIter->nData;
+ if( pExpr->pConfig->eDetail==FTS5_DETAIL_FULL ){
+ pPhrase->poslist.p = (u8*)pIter->pData;
+ }
+ pNode->iRowid = pIter->iRowid;
+ pNode->bNomatch = (pPhrase->poslist.n==0);
+ return SQLITE_OK;
+}
+
+/*
+** xNext() method for a node of type FTS5_TERM.
+*/
+static int fts5ExprNodeNext_TERM(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *pNode,
+ int bFromValid,
+ i64 iFrom
+){
+ int rc;
+ Fts5IndexIter *pIter = pNode->pNear->apPhrase[0]->aTerm[0].pIter;
+
+ assert( pNode->bEof==0 );
+ if( bFromValid ){
+ rc = sqlite3Fts5IterNextFrom(pIter, iFrom);
+ }else{
+ rc = sqlite3Fts5IterNext(pIter);
+ }
+ if( rc==SQLITE_OK && sqlite3Fts5IterEof(pIter)==0 ){
+ rc = fts5ExprNodeTest_TERM(pExpr, pNode);
+ }else{
+ pNode->bEof = 1;
+ pNode->bNomatch = 0;
+ }
+ return rc;
+}
+
+static void fts5ExprNodeTest_OR(
+ Fts5Expr *pExpr, /* Expression of which pNode is a part */
+ Fts5ExprNode *pNode /* Expression node to test */
+){
+ Fts5ExprNode *pNext = pNode->apChild[0];
+ int i;
+
+ for(i=1; i<pNode->nChild; i++){
+ Fts5ExprNode *pChild = pNode->apChild[i];
+ int cmp = fts5NodeCompare(pExpr, pNext, pChild);
+ if( cmp>0 || (cmp==0 && pChild->bNomatch==0) ){
+ pNext = pChild;
+ }
+ }
+ pNode->iRowid = pNext->iRowid;
+ pNode->bEof = pNext->bEof;
+ pNode->bNomatch = pNext->bNomatch;
+}
+
+static int fts5ExprNodeNext_OR(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *pNode,
+ int bFromValid,
+ i64 iFrom
+){
+ int i;
+ i64 iLast = pNode->iRowid;
+
+ for(i=0; i<pNode->nChild; i++){
+ Fts5ExprNode *p1 = pNode->apChild[i];
+ assert( p1->bEof || fts5RowidCmp(pExpr, p1->iRowid, iLast)>=0 );
+ if( p1->bEof==0 ){
+ if( (p1->iRowid==iLast)
+ || (bFromValid && fts5RowidCmp(pExpr, p1->iRowid, iFrom)<0)
+ ){
+ int rc = fts5ExprNodeNext(pExpr, p1, bFromValid, iFrom);
+ if( rc!=SQLITE_OK ){
+ pNode->bNomatch = 0;
+ return rc;
+ }
+ }
+ }
+ }
+
+ fts5ExprNodeTest_OR(pExpr, pNode);
+ return SQLITE_OK;
+}
+
+/*
+** Argument pNode is an FTS5_AND node.
+*/
+static int fts5ExprNodeTest_AND(
+ Fts5Expr *pExpr, /* Expression pPhrase belongs to */
+ Fts5ExprNode *pAnd /* FTS5_AND node to advance */
+){
+ int iChild;
+ i64 iLast = pAnd->iRowid;
+ int rc = SQLITE_OK;
+ int bMatch;
+
+ assert( pAnd->bEof==0 );
+ do {
+ pAnd->bNomatch = 0;
+ bMatch = 1;
+ for(iChild=0; iChild<pAnd->nChild; iChild++){
+ Fts5ExprNode *pChild = pAnd->apChild[iChild];
+ int cmp = fts5RowidCmp(pExpr, iLast, pChild->iRowid);
+ if( cmp>0 ){
+ /* Advance pChild until it points to iLast or laster */
+ rc = fts5ExprNodeNext(pExpr, pChild, 1, iLast);
+ if( rc!=SQLITE_OK ){
+ pAnd->bNomatch = 0;
+ return rc;
+ }
+ }
+
+ /* If the child node is now at EOF, so is the parent AND node. Otherwise,
+ ** the child node is guaranteed to have advanced at least as far as
+ ** rowid iLast. So if it is not at exactly iLast, pChild->iRowid is the
+ ** new lastest rowid seen so far. */
+ assert( pChild->bEof || fts5RowidCmp(pExpr, iLast, pChild->iRowid)<=0 );
+ if( pChild->bEof ){
+ fts5ExprSetEof(pAnd);
+ bMatch = 1;
+ break;
+ }else if( iLast!=pChild->iRowid ){
+ bMatch = 0;
+ iLast = pChild->iRowid;
+ }
+
+ if( pChild->bNomatch ){
+ pAnd->bNomatch = 1;
+ }
+ }
+ }while( bMatch==0 );
+
+ if( pAnd->bNomatch && pAnd!=pExpr->pRoot ){
+ fts5ExprNodeZeroPoslist(pAnd);
+ }
+ pAnd->iRowid = iLast;
+ return SQLITE_OK;
+}
+
+static int fts5ExprNodeNext_AND(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *pNode,
+ int bFromValid,
+ i64 iFrom
+){
+ int rc = fts5ExprNodeNext(pExpr, pNode->apChild[0], bFromValid, iFrom);
+ if( rc==SQLITE_OK ){
+ rc = fts5ExprNodeTest_AND(pExpr, pNode);
+ }else{
+ pNode->bNomatch = 0;
+ }
+ return rc;
+}
+
+static int fts5ExprNodeTest_NOT(
+ Fts5Expr *pExpr, /* Expression pPhrase belongs to */
+ Fts5ExprNode *pNode /* FTS5_NOT node to advance */
+){
+ int rc = SQLITE_OK;
+ Fts5ExprNode *p1 = pNode->apChild[0];
+ Fts5ExprNode *p2 = pNode->apChild[1];
+ assert( pNode->nChild==2 );
+
+ while( rc==SQLITE_OK && p1->bEof==0 ){
+ int cmp = fts5NodeCompare(pExpr, p1, p2);
+ if( cmp>0 ){
+ rc = fts5ExprNodeNext(pExpr, p2, 1, p1->iRowid);
+ cmp = fts5NodeCompare(pExpr, p1, p2);
+ }
+ assert( rc!=SQLITE_OK || cmp<=0 );
+ if( cmp || p2->bNomatch ) break;
+ rc = fts5ExprNodeNext(pExpr, p1, 0, 0);
+ }
+ pNode->bEof = p1->bEof;
+ pNode->bNomatch = p1->bNomatch;
+ pNode->iRowid = p1->iRowid;
+ if( p1->bEof ){
+ fts5ExprNodeZeroPoslist(p2);
+ }
+ return rc;
+}
+
+static int fts5ExprNodeNext_NOT(
+ Fts5Expr *pExpr,
+ Fts5ExprNode *pNode,
+ int bFromValid,
+ i64 iFrom
+){
+ int rc = fts5ExprNodeNext(pExpr, pNode->apChild[0], bFromValid, iFrom);
+ if( rc==SQLITE_OK ){
+ rc = fts5ExprNodeTest_NOT(pExpr, pNode);
+ }
+ if( rc!=SQLITE_OK ){
+ pNode->bNomatch = 0;
+ }
+ return rc;
+}
+
+/*
+** If pNode currently points to a match, this function returns SQLITE_OK
+** without modifying it. Otherwise, pNode is advanced until it does point
+** to a match or EOF is reached.
+*/
+static int fts5ExprNodeTest(
+ Fts5Expr *pExpr, /* Expression of which pNode is a part */
+ Fts5ExprNode *pNode /* Expression node to test */
+){
+ int rc = SQLITE_OK;
+ if( pNode->bEof==0 ){
+ switch( pNode->eType ){
+
+ case FTS5_STRING: {
+ rc = fts5ExprNodeTest_STRING(pExpr, pNode);
+ break;
+ }
+
+ case FTS5_TERM: {
+ rc = fts5ExprNodeTest_TERM(pExpr, pNode);
+ break;
+ }
+
+ case FTS5_AND: {
+ rc = fts5ExprNodeTest_AND(pExpr, pNode);
+ break;
+ }
+
+ case FTS5_OR: {
+ fts5ExprNodeTest_OR(pExpr, pNode);
+ break;
+ }
+
+ default: assert( pNode->eType==FTS5_NOT ); {
+ rc = fts5ExprNodeTest_NOT(pExpr, pNode);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+
+/*
+** Set node pNode, which is part of expression pExpr, to point to the first
+** match. If there are no matches, set the Node.bEof flag to indicate EOF.
+**
+** Return an SQLite error code if an error occurs, or SQLITE_OK otherwise.
+** It is not an error if there are no matches.
+*/
+static int fts5ExprNodeFirst(Fts5Expr *pExpr, Fts5ExprNode *pNode){
+ int rc = SQLITE_OK;
+ pNode->bEof = 0;
+ pNode->bNomatch = 0;
+
+ if( Fts5NodeIsString(pNode) ){
+ /* Initialize all term iterators in the NEAR object. */
+ rc = fts5ExprNearInitAll(pExpr, pNode);
+ }else if( pNode->xNext==0 ){
+ pNode->bEof = 1;
+ }else{
+ int i;
+ int nEof = 0;
+ for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){
+ Fts5ExprNode *pChild = pNode->apChild[i];
+ rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]);
+ assert( pChild->bEof==0 || pChild->bEof==1 );
+ nEof += pChild->bEof;
+ }
+ pNode->iRowid = pNode->apChild[0]->iRowid;
+
+ switch( pNode->eType ){
+ case FTS5_AND:
+ if( nEof>0 ) fts5ExprSetEof(pNode);
+ break;
+
+ case FTS5_OR:
+ if( pNode->nChild==nEof ) fts5ExprSetEof(pNode);
+ break;
+
+ default:
+ assert( pNode->eType==FTS5_NOT );
+ pNode->bEof = pNode->apChild[0]->bEof;
+ break;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = fts5ExprNodeTest(pExpr, pNode);
+ }
+ return rc;
+}
+
+
+/*
+** Begin iterating through the set of documents in index pIdx matched by
+** the MATCH expression passed as the first argument. If the "bDesc"
+** parameter is passed a non-zero value, iteration is in descending rowid
+** order. Or, if it is zero, in ascending order.
+**
+** If iterating in ascending rowid order (bDesc==0), the first document
+** visited is that with the smallest rowid that is larger than or equal
+** to parameter iFirst. Or, if iterating in ascending order (bDesc==1),
+** then the first document visited must have a rowid smaller than or
+** equal to iFirst.
+**
+** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
+** is not considered an error if the query does not match any documents.
+*/
+static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){
+ Fts5ExprNode *pRoot = p->pRoot;
+ int rc; /* Return code */
+
+ p->pIndex = pIdx;
+ p->bDesc = bDesc;
+ rc = fts5ExprNodeFirst(p, pRoot);
+
+ /* If not at EOF but the current rowid occurs earlier than iFirst in
+ ** the iteration order, move to document iFirst or later. */
+ if( rc==SQLITE_OK
+ && 0==pRoot->bEof
+ && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0
+ ){
+ rc = fts5ExprNodeNext(p, pRoot, 1, iFirst);
+ }
+
+ /* If the iterator is not at a real match, skip forward until it is. */
+ while( pRoot->bNomatch ){
+ assert( pRoot->bEof==0 && rc==SQLITE_OK );
+ rc = fts5ExprNodeNext(p, pRoot, 0, 0);
+ }
+ return rc;
+}
+
+/*
+** Move to the next document
+**
+** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
+** is not considered an error if the query does not match any documents.
+*/
+static int sqlite3Fts5ExprNext(Fts5Expr *p, i64 iLast){
+ int rc;
+ Fts5ExprNode *pRoot = p->pRoot;
+ assert( pRoot->bEof==0 && pRoot->bNomatch==0 );
+ do {
+ rc = fts5ExprNodeNext(p, pRoot, 0, 0);
+ assert( pRoot->bNomatch==0 || (rc==SQLITE_OK && pRoot->bEof==0) );
+ }while( pRoot->bNomatch );
+ if( fts5RowidCmp(p, pRoot->iRowid, iLast)>0 ){
+ pRoot->bEof = 1;
+ }
+ return rc;
+}
+
+static int sqlite3Fts5ExprEof(Fts5Expr *p){
+ return p->pRoot->bEof;
+}
+
+static i64 sqlite3Fts5ExprRowid(Fts5Expr *p){
+ return p->pRoot->iRowid;
+}
+
+static int fts5ParseStringFromToken(Fts5Token *pToken, char **pz){
+ int rc = SQLITE_OK;
+ *pz = sqlite3Fts5Strndup(&rc, pToken->p, pToken->n);
+ return rc;
+}
+
+/*
+** Free the phrase object passed as the only argument.
+*/
+static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){
+ if( pPhrase ){
+ int i;
+ for(i=0; i<pPhrase->nTerm; i++){
+ Fts5ExprTerm *pSyn;
+ Fts5ExprTerm *pNext;
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
+ sqlite3_free(pTerm->zTerm);
+ sqlite3Fts5IterClose(pTerm->pIter);
+ for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){
+ pNext = pSyn->pSynonym;
+ sqlite3Fts5IterClose(pSyn->pIter);
+ fts5BufferFree((Fts5Buffer*)&pSyn[1]);
+ sqlite3_free(pSyn);
+ }
+ }
+ if( pPhrase->poslist.nSpace>0 ) fts5BufferFree(&pPhrase->poslist);
+ sqlite3_free(pPhrase);
+ }
+}
+
+/*
+** Set the "bFirst" flag on the first token of the phrase passed as the
+** only argument.
+*/
+static void sqlite3Fts5ParseSetCaret(Fts5ExprPhrase *pPhrase){
+ if( pPhrase && pPhrase->nTerm ){
+ pPhrase->aTerm[0].bFirst = 1;
+ }
+}
+
+/*
+** If argument pNear is NULL, then a new Fts5ExprNearset object is allocated
+** and populated with pPhrase. Or, if pNear is not NULL, phrase pPhrase is
+** appended to it and the results returned.
+**
+** If an OOM error occurs, both the pNear and pPhrase objects are freed and
+** NULL returned.
+*/
+static Fts5ExprNearset *sqlite3Fts5ParseNearset(
+ Fts5Parse *pParse, /* Parse context */
+ Fts5ExprNearset *pNear, /* Existing nearset, or NULL */
+ Fts5ExprPhrase *pPhrase /* Recently parsed phrase */
+){
+ const int SZALLOC = 8;
+ Fts5ExprNearset *pRet = 0;
+
+ if( pParse->rc==SQLITE_OK ){
+ if( pPhrase==0 ){
+ return pNear;
+ }
+ if( pNear==0 ){
+ sqlite3_int64 nByte;
+ nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*);
+ pRet = sqlite3_malloc64(nByte);
+ if( pRet==0 ){
+ pParse->rc = SQLITE_NOMEM;
+ }else{
+ memset(pRet, 0, (size_t)nByte);
+ }
+ }else if( (pNear->nPhrase % SZALLOC)==0 ){
+ int nNew = pNear->nPhrase + SZALLOC;
+ sqlite3_int64 nByte;
+
+ nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*);
+ pRet = (Fts5ExprNearset*)sqlite3_realloc64(pNear, nByte);
+ if( pRet==0 ){
+ pParse->rc = SQLITE_NOMEM;
+ }
+ }else{
+ pRet = pNear;
+ }
+ }
+
+ if( pRet==0 ){
+ assert( pParse->rc!=SQLITE_OK );
+ sqlite3Fts5ParseNearsetFree(pNear);
+ sqlite3Fts5ParsePhraseFree(pPhrase);
+ }else{
+ if( pRet->nPhrase>0 ){
+ Fts5ExprPhrase *pLast = pRet->apPhrase[pRet->nPhrase-1];
+ assert( pLast==pParse->apPhrase[pParse->nPhrase-2] );
+ if( pPhrase->nTerm==0 ){
+ fts5ExprPhraseFree(pPhrase);
+ pRet->nPhrase--;
+ pParse->nPhrase--;
+ pPhrase = pLast;
+ }else if( pLast->nTerm==0 ){
+ fts5ExprPhraseFree(pLast);
+ pParse->apPhrase[pParse->nPhrase-2] = pPhrase;
+ pParse->nPhrase--;
+ pRet->nPhrase--;
+ }
+ }
+ pRet->apPhrase[pRet->nPhrase++] = pPhrase;
+ }
+ return pRet;
+}
+
+typedef struct TokenCtx TokenCtx;
+struct TokenCtx {
+ Fts5ExprPhrase *pPhrase;
+ int rc;
+};
+
+/*
+** Callback for tokenizing terms used by ParseTerm().
+*/
+static int fts5ParseTokenize(
+ void *pContext, /* Pointer to Fts5InsertCtx object */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iUnused1, /* Start offset of token */
+ int iUnused2 /* End offset of token */
+){
+ int rc = SQLITE_OK;
+ const int SZALLOC = 8;
+ TokenCtx *pCtx = (TokenCtx*)pContext;
+ Fts5ExprPhrase *pPhrase = pCtx->pPhrase;
+
+ UNUSED_PARAM2(iUnused1, iUnused2);
+
+ /* If an error has already occurred, this is a no-op */
+ if( pCtx->rc!=SQLITE_OK ) return pCtx->rc;
+ if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+
+ if( pPhrase && pPhrase->nTerm>0 && (tflags & FTS5_TOKEN_COLOCATED) ){
+ Fts5ExprTerm *pSyn;
+ sqlite3_int64 nByte = sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer) + nToken+1;
+ pSyn = (Fts5ExprTerm*)sqlite3_malloc64(nByte);
+ if( pSyn==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pSyn, 0, (size_t)nByte);
+ pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
+ memcpy(pSyn->zTerm, pToken, nToken);
+ pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym;
+ pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn;
+ }
+ }else{
+ Fts5ExprTerm *pTerm;
+ if( pPhrase==0 || (pPhrase->nTerm % SZALLOC)==0 ){
+ Fts5ExprPhrase *pNew;
+ int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0);
+
+ pNew = (Fts5ExprPhrase*)sqlite3_realloc64(pPhrase,
+ sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew
+ );
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase));
+ pCtx->pPhrase = pPhrase = pNew;
+ pNew->nTerm = nNew - SZALLOC;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ pTerm = &pPhrase->aTerm[pPhrase->nTerm++];
+ memset(pTerm, 0, sizeof(Fts5ExprTerm));
+ pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ }
+ }
+
+ pCtx->rc = rc;
+ return rc;
+}
+
+
+/*
+** Free the phrase object passed as the only argument.
+*/
+static void sqlite3Fts5ParsePhraseFree(Fts5ExprPhrase *pPhrase){
+ fts5ExprPhraseFree(pPhrase);
+}
+
+/*
+** Free the phrase object passed as the second argument.
+*/
+static void sqlite3Fts5ParseNearsetFree(Fts5ExprNearset *pNear){
+ if( pNear ){
+ int i;
+ for(i=0; i<pNear->nPhrase; i++){
+ fts5ExprPhraseFree(pNear->apPhrase[i]);
+ }
+ sqlite3_free(pNear->pColset);
+ sqlite3_free(pNear);
+ }
+}
+
+static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){
+ assert( pParse->pExpr==0 );
+ pParse->pExpr = p;
+}
+
+/*
+** This function is called by the parser to process a string token. The
+** string may or may not be quoted. In any case it is tokenized and a
+** phrase object consisting of all tokens returned.
+*/
+static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
+ Fts5Parse *pParse, /* Parse context */
+ Fts5ExprPhrase *pAppend, /* Phrase to append to */
+ Fts5Token *pToken, /* String to tokenize */
+ int bPrefix /* True if there is a trailing "*" */
+){
+ Fts5Config *pConfig = pParse->pConfig;
+ TokenCtx sCtx; /* Context object passed to callback */
+ int rc; /* Tokenize return code */
+ char *z = 0;
+
+ memset(&sCtx, 0, sizeof(TokenCtx));
+ sCtx.pPhrase = pAppend;
+
+ rc = fts5ParseStringFromToken(pToken, &z);
+ if( rc==SQLITE_OK ){
+ int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_PREFIX : 0);
+ int n;
+ sqlite3Fts5Dequote(z);
+ n = (int)strlen(z);
+ rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize);
+ }
+ sqlite3_free(z);
+ if( rc || (rc = sCtx.rc) ){
+ pParse->rc = rc;
+ fts5ExprPhraseFree(sCtx.pPhrase);
+ sCtx.pPhrase = 0;
+ }else{
+
+ if( pAppend==0 ){
+ if( (pParse->nPhrase % 8)==0 ){
+ sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
+ Fts5ExprPhrase **apNew;
+ apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte);
+ if( apNew==0 ){
+ pParse->rc = SQLITE_NOMEM;
+ fts5ExprPhraseFree(sCtx.pPhrase);
+ return 0;
+ }
+ pParse->apPhrase = apNew;
+ }
+ pParse->nPhrase++;
+ }
+
+ if( sCtx.pPhrase==0 ){
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase));
+ }else if( sCtx.pPhrase->nTerm ){
+ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix;
+ }
+ pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase;
+ }
+
+ return sCtx.pPhrase;
+}
+
+/*
+** Create a new FTS5 expression by cloning phrase iPhrase of the
+** expression passed as the second argument.
+*/
+static int sqlite3Fts5ExprClonePhrase(
+ Fts5Expr *pExpr,
+ int iPhrase,
+ Fts5Expr **ppNew
+){
+ int rc = SQLITE_OK; /* Return code */
+ Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
+ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
+ TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
+
+ pOrig = pExpr->apExprPhrase[iPhrase];
+ pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ if( rc==SQLITE_OK ){
+ pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc,
+ sizeof(Fts5ExprPhrase*));
+ }
+ if( rc==SQLITE_OK ){
+ pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc,
+ sizeof(Fts5ExprNode));
+ }
+ if( rc==SQLITE_OK ){
+ pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc,
+ sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*));
+ }
+ if( rc==SQLITE_OK ){
+ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset;
+ if( pColsetOrig ){
+ sqlite3_int64 nByte;
+ Fts5Colset *pColset;
+ nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int);
+ pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte);
+ if( pColset ){
+ memcpy(pColset, pColsetOrig, (size_t)nByte);
+ }
+ pNew->pRoot->pNear->pColset = pColset;
+ }
+ }
+
+ if( pOrig->nTerm ){
+ int i; /* Used to iterate through phrase terms */
+ for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
+ int tflags = 0;
+ Fts5ExprTerm *p;
+ for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
+ const char *zTerm = p->zTerm;
+ rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
+ 0, 0);
+ tflags = FTS5_TOKEN_COLOCATED;
+ }
+ if( rc==SQLITE_OK ){
+ sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ }
+ }
+ }else{
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
+ }
+
+ if( rc==SQLITE_OK ){
+ /* All the allocations succeeded. Put the expression object together. */
+ pNew->pIndex = pExpr->pIndex;
+ pNew->pConfig = pExpr->pConfig;
+ pNew->nPhrase = 1;
+ pNew->apExprPhrase[0] = sCtx.pPhrase;
+ pNew->pRoot->pNear->apPhrase[0] = sCtx.pPhrase;
+ pNew->pRoot->pNear->nPhrase = 1;
+ sCtx.pPhrase->pNode = pNew->pRoot;
+
+ if( pOrig->nTerm==1
+ && pOrig->aTerm[0].pSynonym==0
+ && pOrig->aTerm[0].bFirst==0
+ ){
+ pNew->pRoot->eType = FTS5_TERM;
+ pNew->pRoot->xNext = fts5ExprNodeNext_TERM;
+ }else{
+ pNew->pRoot->eType = FTS5_STRING;
+ pNew->pRoot->xNext = fts5ExprNodeNext_STRING;
+ }
+ }else{
+ sqlite3Fts5ExprFree(pNew);
+ fts5ExprPhraseFree(sCtx.pPhrase);
+ pNew = 0;
+ }
+
+ *ppNew = pNew;
+ return rc;
+}
+
+
+/*
+** Token pTok has appeared in a MATCH expression where the NEAR operator
+** is expected. If token pTok does not contain "NEAR", store an error
+** in the pParse object.
+*/
+static void sqlite3Fts5ParseNear(Fts5Parse *pParse, Fts5Token *pTok){
+ if( pTok->n!=4 || memcmp("NEAR", pTok->p, 4) ){
+ sqlite3Fts5ParseError(
+ pParse, "fts5: syntax error near \"%.*s\"", pTok->n, pTok->p
+ );
+ }
+}
+
+static void sqlite3Fts5ParseSetDistance(
+ Fts5Parse *pParse,
+ Fts5ExprNearset *pNear,
+ Fts5Token *p
+){
+ if( pNear ){
+ int nNear = 0;
+ int i;
+ if( p->n ){
+ for(i=0; i<p->n; i++){
+ char c = (char)p->p[i];
+ if( c<'0' || c>'9' ){
+ sqlite3Fts5ParseError(
+ pParse, "expected integer, got \"%.*s\"", p->n, p->p
+ );
+ return;
+ }
+ nNear = nNear * 10 + (p->p[i] - '0');
+ }
+ }else{
+ nNear = FTS5_DEFAULT_NEARDIST;
+ }
+ pNear->nNear = nNear;
+ }
+}
+
+/*
+** The second argument passed to this function may be NULL, or it may be
+** an existing Fts5Colset object. This function returns a pointer to
+** a new colset object containing the contents of (p) with new value column
+** number iCol appended.
+**
+** If an OOM error occurs, store an error code in pParse and return NULL.
+** The old colset object (if any) is not freed in this case.
+*/
+static Fts5Colset *fts5ParseColset(
+ Fts5Parse *pParse, /* Store SQLITE_NOMEM here if required */
+ Fts5Colset *p, /* Existing colset object */
+ int iCol /* New column to add to colset object */
+){
+ int nCol = p ? p->nCol : 0; /* Num. columns already in colset object */
+ Fts5Colset *pNew; /* New colset object to return */
+
+ assert( pParse->rc==SQLITE_OK );
+ assert( iCol>=0 && iCol<pParse->pConfig->nCol );
+
+ pNew = sqlite3_realloc64(p, sizeof(Fts5Colset) + sizeof(int)*nCol);
+ if( pNew==0 ){
+ pParse->rc = SQLITE_NOMEM;
+ }else{
+ int *aiCol = pNew->aiCol;
+ int i, j;
+ for(i=0; i<nCol; i++){
+ if( aiCol[i]==iCol ) return pNew;
+ if( aiCol[i]>iCol ) break;
+ }
+ for(j=nCol; j>i; j--){
+ aiCol[j] = aiCol[j-1];
+ }
+ aiCol[i] = iCol;
+ pNew->nCol = nCol+1;
+
+#ifndef NDEBUG
+ /* Check that the array is in order and contains no duplicate entries. */
+ for(i=1; i<pNew->nCol; i++) assert( pNew->aiCol[i]>pNew->aiCol[i-1] );
+#endif
+ }
+
+ return pNew;
+}
+
+/*
+** Allocate and return an Fts5Colset object specifying the inverse of
+** the colset passed as the second argument. Free the colset passed
+** as the second argument before returning.
+*/
+static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p){
+ Fts5Colset *pRet;
+ int nCol = pParse->pConfig->nCol;
+
+ pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc,
+ sizeof(Fts5Colset) + sizeof(int)*nCol
+ );
+ if( pRet ){
+ int i;
+ int iOld = 0;
+ for(i=0; i<nCol; i++){
+ if( iOld>=p->nCol || p->aiCol[iOld]!=i ){
+ pRet->aiCol[pRet->nCol++] = i;
+ }else{
+ iOld++;
+ }
+ }
+ }
+
+ sqlite3_free(p);
+ return pRet;
+}
+
+static Fts5Colset *sqlite3Fts5ParseColset(
+ Fts5Parse *pParse, /* Store SQLITE_NOMEM here if required */
+ Fts5Colset *pColset, /* Existing colset object */
+ Fts5Token *p
+){
+ Fts5Colset *pRet = 0;
+ int iCol;
+ char *z; /* Dequoted copy of token p */
+
+ z = sqlite3Fts5Strndup(&pParse->rc, p->p, p->n);
+ if( pParse->rc==SQLITE_OK ){
+ Fts5Config *pConfig = pParse->pConfig;
+ sqlite3Fts5Dequote(z);
+ for(iCol=0; iCol<pConfig->nCol; iCol++){
+ if( 0==sqlite3_stricmp(pConfig->azCol[iCol], z) ) break;
+ }
+ if( iCol==pConfig->nCol ){
+ sqlite3Fts5ParseError(pParse, "no such column: %s", z);
+ }else{
+ pRet = fts5ParseColset(pParse, pColset, iCol);
+ }
+ sqlite3_free(z);
+ }
+
+ if( pRet==0 ){
+ assert( pParse->rc!=SQLITE_OK );
+ sqlite3_free(pColset);
+ }
+
+ return pRet;
+}
+
+/*
+** If argument pOrig is NULL, or if (*pRc) is set to anything other than
+** SQLITE_OK when this function is called, NULL is returned.
+**
+** Otherwise, a copy of (*pOrig) is made into memory obtained from
+** sqlite3Fts5MallocZero() and a pointer to it returned. If the allocation
+** fails, (*pRc) is set to SQLITE_NOMEM and NULL is returned.
+*/
+static Fts5Colset *fts5CloneColset(int *pRc, Fts5Colset *pOrig){
+ Fts5Colset *pRet;
+ if( pOrig ){
+ sqlite3_int64 nByte = sizeof(Fts5Colset) + (pOrig->nCol-1) * sizeof(int);
+ pRet = (Fts5Colset*)sqlite3Fts5MallocZero(pRc, nByte);
+ if( pRet ){
+ memcpy(pRet, pOrig, (size_t)nByte);
+ }
+ }else{
+ pRet = 0;
+ }
+ return pRet;
+}
+
+/*
+** Remove from colset pColset any columns that are not also in colset pMerge.
+*/
+static void fts5MergeColset(Fts5Colset *pColset, Fts5Colset *pMerge){
+ int iIn = 0; /* Next input in pColset */
+ int iMerge = 0; /* Next input in pMerge */
+ int iOut = 0; /* Next output slot in pColset */
+
+ while( iIn<pColset->nCol && iMerge<pMerge->nCol ){
+ int iDiff = pColset->aiCol[iIn] - pMerge->aiCol[iMerge];
+ if( iDiff==0 ){
+ pColset->aiCol[iOut++] = pMerge->aiCol[iMerge];
+ iMerge++;
+ iIn++;
+ }else if( iDiff>0 ){
+ iMerge++;
+ }else{
+ iIn++;
+ }
+ }
+ pColset->nCol = iOut;
+}
+
+/*
+** Recursively apply colset pColset to expression node pNode and all of
+** its decendents. If (*ppFree) is not NULL, it contains a spare copy
+** of pColset. This function may use the spare copy and set (*ppFree) to
+** zero, or it may create copies of pColset using fts5CloneColset().
+*/
+static void fts5ParseSetColset(
+ Fts5Parse *pParse,
+ Fts5ExprNode *pNode,
+ Fts5Colset *pColset,
+ Fts5Colset **ppFree
+){
+ if( pParse->rc==SQLITE_OK ){
+ assert( pNode->eType==FTS5_TERM || pNode->eType==FTS5_STRING
+ || pNode->eType==FTS5_AND || pNode->eType==FTS5_OR
+ || pNode->eType==FTS5_NOT || pNode->eType==FTS5_EOF
+ );
+ if( pNode->eType==FTS5_STRING || pNode->eType==FTS5_TERM ){
+ Fts5ExprNearset *pNear = pNode->pNear;
+ if( pNear->pColset ){
+ fts5MergeColset(pNear->pColset, pColset);
+ if( pNear->pColset->nCol==0 ){
+ pNode->eType = FTS5_EOF;
+ pNode->xNext = 0;
+ }
+ }else if( *ppFree ){
+ pNear->pColset = pColset;
+ *ppFree = 0;
+ }else{
+ pNear->pColset = fts5CloneColset(&pParse->rc, pColset);
+ }
+ }else{
+ int i;
+ assert( pNode->eType!=FTS5_EOF || pNode->nChild==0 );
+ for(i=0; i<pNode->nChild; i++){
+ fts5ParseSetColset(pParse, pNode->apChild[i], pColset, ppFree);
+ }
+ }
+ }
+}
+
+/*
+** Apply colset pColset to expression node pExpr and all of its descendents.
+*/
+static void sqlite3Fts5ParseSetColset(
+ Fts5Parse *pParse,
+ Fts5ExprNode *pExpr,
+ Fts5Colset *pColset
+){
+ Fts5Colset *pFree = pColset;
+ if( pParse->pConfig->eDetail==FTS5_DETAIL_NONE ){
+ pParse->rc = SQLITE_ERROR;
+ pParse->zErr = sqlite3_mprintf(
+ "fts5: column queries are not supported (detail=none)"
+ );
+ }else{
+ fts5ParseSetColset(pParse, pExpr, pColset, &pFree);
+ }
+ sqlite3_free(pFree);
+}
+
+static void fts5ExprAssignXNext(Fts5ExprNode *pNode){
+ switch( pNode->eType ){
+ case FTS5_STRING: {
+ Fts5ExprNearset *pNear = pNode->pNear;
+ if( pNear->nPhrase==1 && pNear->apPhrase[0]->nTerm==1
+ && pNear->apPhrase[0]->aTerm[0].pSynonym==0
+ && pNear->apPhrase[0]->aTerm[0].bFirst==0
+ ){
+ pNode->eType = FTS5_TERM;
+ pNode->xNext = fts5ExprNodeNext_TERM;
+ }else{
+ pNode->xNext = fts5ExprNodeNext_STRING;
+ }
+ break;
+ };
+
+ case FTS5_OR: {
+ pNode->xNext = fts5ExprNodeNext_OR;
+ break;
+ };
+
+ case FTS5_AND: {
+ pNode->xNext = fts5ExprNodeNext_AND;
+ break;
+ };
+
+ default: assert( pNode->eType==FTS5_NOT ); {
+ pNode->xNext = fts5ExprNodeNext_NOT;
+ break;
+ };
+ }
+}
+
+static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){
+ if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){
+ int nByte = sizeof(Fts5ExprNode*) * pSub->nChild;
+ memcpy(&p->apChild[p->nChild], pSub->apChild, nByte);
+ p->nChild += pSub->nChild;
+ sqlite3_free(pSub);
+ }else{
+ p->apChild[p->nChild++] = pSub;
+ }
+}
+
+/*
+** Allocate and return a new expression object. If anything goes wrong (i.e.
+** OOM error), leave an error code in pParse and return NULL.
+*/
+static Fts5ExprNode *sqlite3Fts5ParseNode(
+ Fts5Parse *pParse, /* Parse context */
+ int eType, /* FTS5_STRING, AND, OR or NOT */
+ Fts5ExprNode *pLeft, /* Left hand child expression */
+ Fts5ExprNode *pRight, /* Right hand child expression */
+ Fts5ExprNearset *pNear /* For STRING expressions, the near cluster */
+){
+ Fts5ExprNode *pRet = 0;
+
+ if( pParse->rc==SQLITE_OK ){
+ int nChild = 0; /* Number of children of returned node */
+ sqlite3_int64 nByte; /* Bytes of space to allocate for this node */
+
+ assert( (eType!=FTS5_STRING && !pNear)
+ || (eType==FTS5_STRING && !pLeft && !pRight)
+ );
+ if( eType==FTS5_STRING && pNear==0 ) return 0;
+ if( eType!=FTS5_STRING && pLeft==0 ) return pRight;
+ if( eType!=FTS5_STRING && pRight==0 ) return pLeft;
+
+ if( eType==FTS5_NOT ){
+ nChild = 2;
+ }else if( eType==FTS5_AND || eType==FTS5_OR ){
+ nChild = 2;
+ if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
+ if( pRight->eType==eType ) nChild += pRight->nChild-1;
+ }
+
+ nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
+ pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
+
+ if( pRet ){
+ pRet->eType = eType;
+ pRet->pNear = pNear;
+ fts5ExprAssignXNext(pRet);
+ if( eType==FTS5_STRING ){
+ int iPhrase;
+ for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
+ pNear->apPhrase[iPhrase]->pNode = pRet;
+ if( pNear->apPhrase[iPhrase]->nTerm==0 ){
+ pRet->xNext = 0;
+ pRet->eType = FTS5_EOF;
+ }
+ }
+
+ if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
+ if( pNear->nPhrase!=1
+ || pPhrase->nTerm>1
+ || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst)
+ ){
+ assert( pParse->rc==SQLITE_OK );
+ pParse->rc = SQLITE_ERROR;
+ assert( pParse->zErr==0 );
+ pParse->zErr = sqlite3_mprintf(
+ "fts5: %s queries are not supported (detail!=full)",
+ pNear->nPhrase==1 ? "phrase": "NEAR"
+ );
+ sqlite3_free(pRet);
+ pRet = 0;
+ }
+ }
+ }else{
+ fts5ExprAddChildren(pRet, pLeft);
+ fts5ExprAddChildren(pRet, pRight);
+ }
+ }
+ }
+
+ if( pRet==0 ){
+ assert( pParse->rc!=SQLITE_OK );
+ sqlite3Fts5ParseNodeFree(pLeft);
+ sqlite3Fts5ParseNodeFree(pRight);
+ sqlite3Fts5ParseNearsetFree(pNear);
+ }
+ return pRet;
+}
+
+static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
+ Fts5Parse *pParse, /* Parse context */
+ Fts5ExprNode *pLeft, /* Left hand child expression */
+ Fts5ExprNode *pRight /* Right hand child expression */
+){
+ Fts5ExprNode *pRet = 0;
+ Fts5ExprNode *pPrev;
+
+ if( pParse->rc ){
+ sqlite3Fts5ParseNodeFree(pLeft);
+ sqlite3Fts5ParseNodeFree(pRight);
+ }else{
+
+ assert( pLeft->eType==FTS5_STRING
+ || pLeft->eType==FTS5_TERM
+ || pLeft->eType==FTS5_EOF
+ || pLeft->eType==FTS5_AND
+ );
+ assert( pRight->eType==FTS5_STRING
+ || pRight->eType==FTS5_TERM
+ || pRight->eType==FTS5_EOF
+ );
+
+ if( pLeft->eType==FTS5_AND ){
+ pPrev = pLeft->apChild[pLeft->nChild-1];
+ }else{
+ pPrev = pLeft;
+ }
+ assert( pPrev->eType==FTS5_STRING
+ || pPrev->eType==FTS5_TERM
+ || pPrev->eType==FTS5_EOF
+ );
+
+ if( pRight->eType==FTS5_EOF ){
+ assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] );
+ sqlite3Fts5ParseNodeFree(pRight);
+ pRet = pLeft;
+ pParse->nPhrase--;
+ }
+ else if( pPrev->eType==FTS5_EOF ){
+ Fts5ExprPhrase **ap;
+
+ if( pPrev==pLeft ){
+ pRet = pRight;
+ }else{
+ pLeft->apChild[pLeft->nChild-1] = pRight;
+ pRet = pLeft;
+ }
+
+ ap = &pParse->apPhrase[pParse->nPhrase-1-pRight->pNear->nPhrase];
+ assert( ap[0]==pPrev->pNear->apPhrase[0] );
+ memmove(ap, &ap[1], sizeof(Fts5ExprPhrase*)*pRight->pNear->nPhrase);
+ pParse->nPhrase--;
+
+ sqlite3Fts5ParseNodeFree(pPrev);
+ }
+ else{
+ pRet = sqlite3Fts5ParseNode(pParse, FTS5_AND, pLeft, pRight, 0);
+ }
+ }
+
+ return pRet;
+}
+
+static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
+ sqlite3_int64 nByte = 0;
+ Fts5ExprTerm *p;
+ char *zQuoted;
+
+ /* Determine the maximum amount of space required. */
+ for(p=pTerm; p; p=p->pSynonym){
+ nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2;
+ }
+ zQuoted = sqlite3_malloc64(nByte);
+
+ if( zQuoted ){
+ int i = 0;
+ for(p=pTerm; p; p=p->pSynonym){
+ char *zIn = p->zTerm;
+ zQuoted[i++] = '"';
+ while( *zIn ){
+ if( *zIn=='"' ) zQuoted[i++] = '"';
+ zQuoted[i++] = *zIn++;
+ }
+ zQuoted[i++] = '"';
+ if( p->pSynonym ) zQuoted[i++] = '|';
+ }
+ if( pTerm->bPrefix ){
+ zQuoted[i++] = ' ';
+ zQuoted[i++] = '*';
+ }
+ zQuoted[i++] = '\0';
+ }
+ return zQuoted;
+}
+
+static char *fts5PrintfAppend(char *zApp, const char *zFmt, ...){
+ char *zNew;
+ va_list ap;
+ va_start(ap, zFmt);
+ zNew = sqlite3_vmprintf(zFmt, ap);
+ va_end(ap);
+ if( zApp && zNew ){
+ char *zNew2 = sqlite3_mprintf("%s%s", zApp, zNew);
+ sqlite3_free(zNew);
+ zNew = zNew2;
+ }
+ sqlite3_free(zApp);
+ return zNew;
+}
+
+/*
+** Compose a tcl-readable representation of expression pExpr. Return a
+** pointer to a buffer containing that representation. It is the
+** responsibility of the caller to at some point free the buffer using
+** sqlite3_free().
+*/
+static char *fts5ExprPrintTcl(
+ Fts5Config *pConfig,
+ const char *zNearsetCmd,
+ Fts5ExprNode *pExpr
+){
+ char *zRet = 0;
+ if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
+ Fts5ExprNearset *pNear = pExpr->pNear;
+ int i;
+ int iTerm;
+
+ zRet = fts5PrintfAppend(zRet, "%s ", zNearsetCmd);
+ if( zRet==0 ) return 0;
+ if( pNear->pColset ){
+ int *aiCol = pNear->pColset->aiCol;
+ int nCol = pNear->pColset->nCol;
+ if( nCol==1 ){
+ zRet = fts5PrintfAppend(zRet, "-col %d ", aiCol[0]);
+ }else{
+ zRet = fts5PrintfAppend(zRet, "-col {%d", aiCol[0]);
+ for(i=1; i<pNear->pColset->nCol; i++){
+ zRet = fts5PrintfAppend(zRet, " %d", aiCol[i]);
+ }
+ zRet = fts5PrintfAppend(zRet, "} ");
+ }
+ if( zRet==0 ) return 0;
+ }
+
+ if( pNear->nPhrase>1 ){
+ zRet = fts5PrintfAppend(zRet, "-near %d ", pNear->nNear);
+ if( zRet==0 ) return 0;
+ }
+
+ zRet = fts5PrintfAppend(zRet, "--");
+ if( zRet==0 ) return 0;
+
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+
+ zRet = fts5PrintfAppend(zRet, " {");
+ for(iTerm=0; zRet && iTerm<pPhrase->nTerm; iTerm++){
+ char *zTerm = pPhrase->aTerm[iTerm].zTerm;
+ zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm);
+ if( pPhrase->aTerm[iTerm].bPrefix ){
+ zRet = fts5PrintfAppend(zRet, "*");
+ }
+ }
+
+ if( zRet ) zRet = fts5PrintfAppend(zRet, "}");
+ if( zRet==0 ) return 0;
+ }
+
+ }else{
+ char const *zOp = 0;
+ int i;
+ switch( pExpr->eType ){
+ case FTS5_AND: zOp = "AND"; break;
+ case FTS5_NOT: zOp = "NOT"; break;
+ default:
+ assert( pExpr->eType==FTS5_OR );
+ zOp = "OR";
+ break;
+ }
+
+ zRet = sqlite3_mprintf("%s", zOp);
+ for(i=0; zRet && i<pExpr->nChild; i++){
+ char *z = fts5ExprPrintTcl(pConfig, zNearsetCmd, pExpr->apChild[i]);
+ if( !z ){
+ sqlite3_free(zRet);
+ zRet = 0;
+ }else{
+ zRet = fts5PrintfAppend(zRet, " [%z]", z);
+ }
+ }
+ }
+
+ return zRet;
+}
+
+static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){
+ char *zRet = 0;
+ if( pExpr->eType==0 ){
+ return sqlite3_mprintf("\"\"");
+ }else
+ if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
+ Fts5ExprNearset *pNear = pExpr->pNear;
+ int i;
+ int iTerm;
+
+ if( pNear->pColset ){
+ int iCol = pNear->pColset->aiCol[0];
+ zRet = fts5PrintfAppend(zRet, "%s : ", pConfig->azCol[iCol]);
+ if( zRet==0 ) return 0;
+ }
+
+ if( pNear->nPhrase>1 ){
+ zRet = fts5PrintfAppend(zRet, "NEAR(");
+ if( zRet==0 ) return 0;
+ }
+
+ for(i=0; i<pNear->nPhrase; i++){
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
+ if( i!=0 ){
+ zRet = fts5PrintfAppend(zRet, " ");
+ if( zRet==0 ) return 0;
+ }
+ for(iTerm=0; iTerm<pPhrase->nTerm; iTerm++){
+ char *zTerm = fts5ExprTermPrint(&pPhrase->aTerm[iTerm]);
+ if( zTerm ){
+ zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" + ", zTerm);
+ sqlite3_free(zTerm);
+ }
+ if( zTerm==0 || zRet==0 ){
+ sqlite3_free(zRet);
+ return 0;
+ }
+ }
+ }
+
+ if( pNear->nPhrase>1 ){
+ zRet = fts5PrintfAppend(zRet, ", %d)", pNear->nNear);
+ if( zRet==0 ) return 0;
+ }
+
+ }else{
+ char const *zOp = 0;
+ int i;
+
+ switch( pExpr->eType ){
+ case FTS5_AND: zOp = " AND "; break;
+ case FTS5_NOT: zOp = " NOT "; break;
+ default:
+ assert( pExpr->eType==FTS5_OR );
+ zOp = " OR ";
+ break;
+ }
+
+ for(i=0; i<pExpr->nChild; i++){
+ char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]);
+ if( z==0 ){
+ sqlite3_free(zRet);
+ zRet = 0;
+ }else{
+ int e = pExpr->apChild[i]->eType;
+ int b = (e!=FTS5_STRING && e!=FTS5_TERM && e!=FTS5_EOF);
+ zRet = fts5PrintfAppend(zRet, "%s%s%z%s",
+ (i==0 ? "" : zOp),
+ (b?"(":""), z, (b?")":"")
+ );
+ }
+ if( zRet==0 ) break;
+ }
+ }
+
+ return zRet;
+}
+
+/*
+** The implementation of user-defined scalar functions fts5_expr() (bTcl==0)
+** and fts5_expr_tcl() (bTcl!=0).
+*/
+static void fts5ExprFunction(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apVal, /* Function arguments */
+ int bTcl
+){
+ Fts5Global *pGlobal = (Fts5Global*)sqlite3_user_data(pCtx);
+ sqlite3 *db = sqlite3_context_db_handle(pCtx);
+ const char *zExpr = 0;
+ char *zErr = 0;
+ Fts5Expr *pExpr = 0;
+ int rc;
+ int i;
+
+ const char **azConfig; /* Array of arguments for Fts5Config */
+ const char *zNearsetCmd = "nearset";
+ int nConfig; /* Size of azConfig[] */
+ Fts5Config *pConfig = 0;
+ int iArg = 1;
+
+ if( nArg<1 ){
+ zErr = sqlite3_mprintf("wrong number of arguments to function %s",
+ bTcl ? "fts5_expr_tcl" : "fts5_expr"
+ );
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
+ return;
+ }
+
+ if( bTcl && nArg>1 ){
+ zNearsetCmd = (const char*)sqlite3_value_text(apVal[1]);
+ iArg = 2;
+ }
+
+ nConfig = 3 + (nArg-iArg);
+ azConfig = (const char**)sqlite3_malloc64(sizeof(char*) * nConfig);
+ if( azConfig==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ return;
+ }
+ azConfig[0] = 0;
+ azConfig[1] = "main";
+ azConfig[2] = "tbl";
+ for(i=3; iArg<nArg; iArg++){
+ const char *z = (const char*)sqlite3_value_text(apVal[iArg]);
+ azConfig[i++] = (z ? z : "");
+ }
+
+ zExpr = (const char*)sqlite3_value_text(apVal[0]);
+ if( zExpr==0 ) zExpr = "";
+
+ rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ExprNew(pConfig, pConfig->nCol, zExpr, &pExpr, &zErr);
+ }
+ if( rc==SQLITE_OK ){
+ char *zText;
+ if( pExpr->pRoot->xNext==0 ){
+ zText = sqlite3_mprintf("");
+ }else if( bTcl ){
+ zText = fts5ExprPrintTcl(pConfig, zNearsetCmd, pExpr->pRoot);
+ }else{
+ zText = fts5ExprPrint(pConfig, pExpr->pRoot);
+ }
+ if( zText==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_result_text(pCtx, zText, -1, SQLITE_TRANSIENT);
+ sqlite3_free(zText);
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ if( zErr ){
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
+ }else{
+ sqlite3_result_error_code(pCtx, rc);
+ }
+ }
+ sqlite3_free((void *)azConfig);
+ sqlite3Fts5ConfigFree(pConfig);
+ sqlite3Fts5ExprFree(pExpr);
+}
+
+static void fts5ExprFunctionHr(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apVal /* Function arguments */
+){
+ fts5ExprFunction(pCtx, nArg, apVal, 0);
+}
+static void fts5ExprFunctionTcl(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apVal /* Function arguments */
+){
+ fts5ExprFunction(pCtx, nArg, apVal, 1);
+}
+
+/*
+** The implementation of an SQLite user-defined-function that accepts a
+** single integer as an argument. If the integer is an alpha-numeric
+** unicode code point, 1 is returned. Otherwise 0.
+*/
+static void fts5ExprIsAlnum(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apVal /* Function arguments */
+){
+ int iCode;
+ u8 aArr[32];
+ if( nArg!=1 ){
+ sqlite3_result_error(pCtx,
+ "wrong number of arguments to function fts5_isalnum", -1
+ );
+ return;
+ }
+ memset(aArr, 0, sizeof(aArr));
+ sqlite3Fts5UnicodeCatParse("L*", aArr);
+ sqlite3Fts5UnicodeCatParse("N*", aArr);
+ sqlite3Fts5UnicodeCatParse("Co", aArr);
+ iCode = sqlite3_value_int(apVal[0]);
+ sqlite3_result_int(pCtx, aArr[sqlite3Fts5UnicodeCategory((u32)iCode)]);
+}
+
+static void fts5ExprFold(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apVal /* Function arguments */
+){
+ if( nArg!=1 && nArg!=2 ){
+ sqlite3_result_error(pCtx,
+ "wrong number of arguments to function fts5_fold", -1
+ );
+ }else{
+ int iCode;
+ int bRemoveDiacritics = 0;
+ iCode = sqlite3_value_int(apVal[0]);
+ if( nArg==2 ) bRemoveDiacritics = sqlite3_value_int(apVal[1]);
+ sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics));
+ }
+}
+
+/*
+** This is called during initialization to register the fts5_expr() scalar
+** UDF with the SQLite handle passed as the only argument.
+*/
+static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){
+ struct Fts5ExprFunc {
+ const char *z;
+ void (*x)(sqlite3_context*,int,sqlite3_value**);
+ } aFunc[] = {
+ { "fts5_expr", fts5ExprFunctionHr },
+ { "fts5_expr_tcl", fts5ExprFunctionTcl },
+ { "fts5_isalnum", fts5ExprIsAlnum },
+ { "fts5_fold", fts5ExprFold },
+ };
+ int i;
+ int rc = SQLITE_OK;
+ void *pCtx = (void*)pGlobal;
+
+ for(i=0; rc==SQLITE_OK && i<ArraySize(aFunc); i++){
+ struct Fts5ExprFunc *p = &aFunc[i];
+ rc = sqlite3_create_function(db, p->z, -1, SQLITE_UTF8, pCtx, p->x, 0, 0);
+ }
+
+ /* Avoid warnings indicating that sqlite3Fts5ParserTrace() and
+ ** sqlite3Fts5ParserFallback() are unused */
+#ifndef NDEBUG
+ (void)sqlite3Fts5ParserTrace;
+#endif
+ (void)sqlite3Fts5ParserFallback;
+
+ return rc;
+}
+
+/*
+** Return the number of phrases in expression pExpr.
+*/
+static int sqlite3Fts5ExprPhraseCount(Fts5Expr *pExpr){
+ return (pExpr ? pExpr->nPhrase : 0);
+}
+
+/*
+** Return the number of terms in the iPhrase'th phrase in pExpr.
+*/
+static int sqlite3Fts5ExprPhraseSize(Fts5Expr *pExpr, int iPhrase){
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ) return 0;
+ return pExpr->apExprPhrase[iPhrase]->nTerm;
+}
+
+/*
+** This function is used to access the current position list for phrase
+** iPhrase.
+*/
+static int sqlite3Fts5ExprPoslist(Fts5Expr *pExpr, int iPhrase, const u8 **pa){
+ int nRet;
+ Fts5ExprPhrase *pPhrase = pExpr->apExprPhrase[iPhrase];
+ Fts5ExprNode *pNode = pPhrase->pNode;
+ if( pNode->bEof==0 && pNode->iRowid==pExpr->pRoot->iRowid ){
+ *pa = pPhrase->poslist.p;
+ nRet = pPhrase->poslist.n;
+ }else{
+ *pa = 0;
+ nRet = 0;
+ }
+ return nRet;
+}
+
+struct Fts5PoslistPopulator {
+ Fts5PoslistWriter writer;
+ int bOk; /* True if ok to populate */
+ int bMiss;
+};
+
+static Fts5PoslistPopulator *sqlite3Fts5ExprClearPoslists(Fts5Expr *pExpr, int bLive){
+ Fts5PoslistPopulator *pRet;
+ pRet = sqlite3_malloc64(sizeof(Fts5PoslistPopulator)*pExpr->nPhrase);
+ if( pRet ){
+ int i;
+ memset(pRet, 0, sizeof(Fts5PoslistPopulator)*pExpr->nPhrase);
+ for(i=0; i<pExpr->nPhrase; i++){
+ Fts5Buffer *pBuf = &pExpr->apExprPhrase[i]->poslist;
+ Fts5ExprNode *pNode = pExpr->apExprPhrase[i]->pNode;
+ assert( pExpr->apExprPhrase[i]->nTerm==1 );
+ if( bLive &&
+ (pBuf->n==0 || pNode->iRowid!=pExpr->pRoot->iRowid || pNode->bEof)
+ ){
+ pRet[i].bMiss = 1;
+ }else{
+ pBuf->n = 0;
+ }
+ }
+ }
+ return pRet;
+}
+
+struct Fts5ExprCtx {
+ Fts5Expr *pExpr;
+ Fts5PoslistPopulator *aPopulator;
+ i64 iOff;
+};
+typedef struct Fts5ExprCtx Fts5ExprCtx;
+
+/*
+** TODO: Make this more efficient!
+*/
+static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){
+ int i;
+ for(i=0; i<pColset->nCol; i++){
+ if( pColset->aiCol[i]==iCol ) return 1;
+ }
+ return 0;
+}
+
+static int fts5ExprPopulatePoslistsCb(
+ void *pCtx, /* Copy of 2nd argument to xTokenize() */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Pointer to buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iUnused1, /* Byte offset of token within input text */
+ int iUnused2 /* Byte offset of end of token within input text */
+){
+ Fts5ExprCtx *p = (Fts5ExprCtx*)pCtx;
+ Fts5Expr *pExpr = p->pExpr;
+ int i;
+
+ UNUSED_PARAM2(iUnused1, iUnused2);
+
+ if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++;
+ for(i=0; i<pExpr->nPhrase; i++){
+ Fts5ExprTerm *pTerm;
+ if( p->aPopulator[i].bOk==0 ) continue;
+ for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){
+ int nTerm = (int)strlen(pTerm->zTerm);
+ if( (nTerm==nToken || (nTerm<nToken && pTerm->bPrefix))
+ && memcmp(pTerm->zTerm, pToken, nTerm)==0
+ ){
+ int rc = sqlite3Fts5PoslistWriterAppend(
+ &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff
+ );
+ if( rc ) return rc;
+ break;
+ }
+ }
+ }
+ return SQLITE_OK;
+}
+
+static int sqlite3Fts5ExprPopulatePoslists(
+ Fts5Config *pConfig,
+ Fts5Expr *pExpr,
+ Fts5PoslistPopulator *aPopulator,
+ int iCol,
+ const char *z, int n
+){
+ int i;
+ Fts5ExprCtx sCtx;
+ sCtx.pExpr = pExpr;
+ sCtx.aPopulator = aPopulator;
+ sCtx.iOff = (((i64)iCol) << 32) - 1;
+
+ for(i=0; i<pExpr->nPhrase; i++){
+ Fts5ExprNode *pNode = pExpr->apExprPhrase[i]->pNode;
+ Fts5Colset *pColset = pNode->pNear->pColset;
+ if( (pColset && 0==fts5ExprColsetTest(pColset, iCol))
+ || aPopulator[i].bMiss
+ ){
+ aPopulator[i].bOk = 0;
+ }else{
+ aPopulator[i].bOk = 1;
+ }
+ }
+
+ return sqlite3Fts5Tokenize(pConfig,
+ FTS5_TOKENIZE_DOCUMENT, z, n, (void*)&sCtx, fts5ExprPopulatePoslistsCb
+ );
+}
+
+static void fts5ExprClearPoslists(Fts5ExprNode *pNode){
+ if( pNode->eType==FTS5_TERM || pNode->eType==FTS5_STRING ){
+ pNode->pNear->apPhrase[0]->poslist.n = 0;
+ }else{
+ int i;
+ for(i=0; i<pNode->nChild; i++){
+ fts5ExprClearPoslists(pNode->apChild[i]);
+ }
+ }
+}
+
+static int fts5ExprCheckPoslists(Fts5ExprNode *pNode, i64 iRowid){
+ pNode->iRowid = iRowid;
+ pNode->bEof = 0;
+ switch( pNode->eType ){
+ case FTS5_TERM:
+ case FTS5_STRING:
+ return (pNode->pNear->apPhrase[0]->poslist.n>0);
+
+ case FTS5_AND: {
+ int i;
+ for(i=0; i<pNode->nChild; i++){
+ if( fts5ExprCheckPoslists(pNode->apChild[i], iRowid)==0 ){
+ fts5ExprClearPoslists(pNode);
+ return 0;
+ }
+ }
+ break;
+ }
+
+ case FTS5_OR: {
+ int i;
+ int bRet = 0;
+ for(i=0; i<pNode->nChild; i++){
+ if( fts5ExprCheckPoslists(pNode->apChild[i], iRowid) ){
+ bRet = 1;
+ }
+ }
+ return bRet;
+ }
+
+ default: {
+ assert( pNode->eType==FTS5_NOT );
+ if( 0==fts5ExprCheckPoslists(pNode->apChild[0], iRowid)
+ || 0!=fts5ExprCheckPoslists(pNode->apChild[1], iRowid)
+ ){
+ fts5ExprClearPoslists(pNode);
+ return 0;
+ }
+ break;
+ }
+ }
+ return 1;
+}
+
+static void sqlite3Fts5ExprCheckPoslists(Fts5Expr *pExpr, i64 iRowid){
+ fts5ExprCheckPoslists(pExpr->pRoot, iRowid);
+}
+
+/*
+** This function is only called for detail=columns tables.
+*/
+static int sqlite3Fts5ExprPhraseCollist(
+ Fts5Expr *pExpr,
+ int iPhrase,
+ const u8 **ppCollist,
+ int *pnCollist
+){
+ Fts5ExprPhrase *pPhrase = pExpr->apExprPhrase[iPhrase];
+ Fts5ExprNode *pNode = pPhrase->pNode;
+ int rc = SQLITE_OK;
+
+ assert( iPhrase>=0 && iPhrase<pExpr->nPhrase );
+ assert( pExpr->pConfig->eDetail==FTS5_DETAIL_COLUMNS );
+
+ if( pNode->bEof==0
+ && pNode->iRowid==pExpr->pRoot->iRowid
+ && pPhrase->poslist.n>0
+ ){
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[0];
+ if( pTerm->pSynonym ){
+ Fts5Buffer *pBuf = (Fts5Buffer*)&pTerm->pSynonym[1];
+ rc = fts5ExprSynonymList(
+ pTerm, pNode->iRowid, pBuf, (u8**)ppCollist, pnCollist
+ );
+ }else{
+ *ppCollist = pPhrase->aTerm[0].pIter->pData;
+ *pnCollist = pPhrase->aTerm[0].pIter->nData;
+ }
+ }else{
+ *ppCollist = 0;
+ *pnCollist = 0;
+ }
+
+ return rc;
+}
+
+/*
+** 2014 August 11
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+
+
+
+/* #include "fts5Int.h" */
+
+typedef struct Fts5HashEntry Fts5HashEntry;
+
+/*
+** This file contains the implementation of an in-memory hash table used
+** to accumuluate "term -> doclist" content before it is flused to a level-0
+** segment.
+*/
+
+
+struct Fts5Hash {
+ int eDetail; /* Copy of Fts5Config.eDetail */
+ int *pnByte; /* Pointer to bytes counter */
+ int nEntry; /* Number of entries currently in hash */
+ int nSlot; /* Size of aSlot[] array */
+ Fts5HashEntry *pScan; /* Current ordered scan item */
+ Fts5HashEntry **aSlot; /* Array of hash slots */
+};
+
+/*
+** Each entry in the hash table is represented by an object of the
+** following type. Each object, its key (a nul-terminated string) and
+** its current data are stored in a single memory allocation. The
+** key immediately follows the object in memory. The position list
+** data immediately follows the key data in memory.
+**
+** The data that follows the key is in a similar, but not identical format
+** to the doclist data stored in the database. It is:
+**
+** * Rowid, as a varint
+** * Position list, without 0x00 terminator.
+** * Size of previous position list and rowid, as a 4 byte
+** big-endian integer.
+**
+** iRowidOff:
+** Offset of last rowid written to data area. Relative to first byte of
+** structure.
+**
+** nData:
+** Bytes of data written since iRowidOff.
+*/
+struct Fts5HashEntry {
+ Fts5HashEntry *pHashNext; /* Next hash entry with same hash-key */
+ Fts5HashEntry *pScanNext; /* Next entry in sorted order */
+
+ int nAlloc; /* Total size of allocation */
+ int iSzPoslist; /* Offset of space for 4-byte poslist size */
+ int nData; /* Total bytes of data (incl. structure) */
+ int nKey; /* Length of key in bytes */
+ u8 bDel; /* Set delete-flag @ iSzPoslist */
+ u8 bContent; /* Set content-flag (detail=none mode) */
+ i16 iCol; /* Column of last value written */
+ int iPos; /* Position of last value written */
+ i64 iRowid; /* Rowid of last value written */
+};
+
+/*
+** Eqivalent to:
+**
+** char *fts5EntryKey(Fts5HashEntry *pEntry){ return zKey; }
+*/
+#define fts5EntryKey(p) ( ((char *)(&(p)[1])) )
+
+
+/*
+** Allocate a new hash table.
+*/
+static int sqlite3Fts5HashNew(Fts5Config *pConfig, Fts5Hash **ppNew, int *pnByte){
+ int rc = SQLITE_OK;
+ Fts5Hash *pNew;
+
+ *ppNew = pNew = (Fts5Hash*)sqlite3_malloc(sizeof(Fts5Hash));
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_int64 nByte;
+ memset(pNew, 0, sizeof(Fts5Hash));
+ pNew->pnByte = pnByte;
+ pNew->eDetail = pConfig->eDetail;
+
+ pNew->nSlot = 1024;
+ nByte = sizeof(Fts5HashEntry*) * pNew->nSlot;
+ pNew->aSlot = (Fts5HashEntry**)sqlite3_malloc64(nByte);
+ if( pNew->aSlot==0 ){
+ sqlite3_free(pNew);
+ *ppNew = 0;
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pNew->aSlot, 0, (size_t)nByte);
+ }
+ }
+ return rc;
+}
+
+/*
+** Free a hash table object.
+*/
+static void sqlite3Fts5HashFree(Fts5Hash *pHash){
+ if( pHash ){
+ sqlite3Fts5HashClear(pHash);
+ sqlite3_free(pHash->aSlot);
+ sqlite3_free(pHash);
+ }
+}
+
+/*
+** Empty (but do not delete) a hash table.
+*/
+static void sqlite3Fts5HashClear(Fts5Hash *pHash){
+ int i;
+ for(i=0; i<pHash->nSlot; i++){
+ Fts5HashEntry *pNext;
+ Fts5HashEntry *pSlot;
+ for(pSlot=pHash->aSlot[i]; pSlot; pSlot=pNext){
+ pNext = pSlot->pHashNext;
+ sqlite3_free(pSlot);
+ }
+ }
+ memset(pHash->aSlot, 0, pHash->nSlot * sizeof(Fts5HashEntry*));
+ pHash->nEntry = 0;
+}
+
+static unsigned int fts5HashKey(int nSlot, const u8 *p, int n){
+ int i;
+ unsigned int h = 13;
+ for(i=n-1; i>=0; i--){
+ h = (h << 3) ^ h ^ p[i];
+ }
+ return (h % nSlot);
+}
+
+static unsigned int fts5HashKey2(int nSlot, u8 b, const u8 *p, int n){
+ int i;
+ unsigned int h = 13;
+ for(i=n-1; i>=0; i--){
+ h = (h << 3) ^ h ^ p[i];
+ }
+ h = (h << 3) ^ h ^ b;
+ return (h % nSlot);
+}
+
+/*
+** Resize the hash table by doubling the number of slots.
+*/
+static int fts5HashResize(Fts5Hash *pHash){
+ int nNew = pHash->nSlot*2;
+ int i;
+ Fts5HashEntry **apNew;
+ Fts5HashEntry **apOld = pHash->aSlot;
+
+ apNew = (Fts5HashEntry**)sqlite3_malloc64(nNew*sizeof(Fts5HashEntry*));
+ if( !apNew ) return SQLITE_NOMEM;
+ memset(apNew, 0, nNew*sizeof(Fts5HashEntry*));
+
+ for(i=0; i<pHash->nSlot; i++){
+ while( apOld[i] ){
+ unsigned int iHash;
+ Fts5HashEntry *p = apOld[i];
+ apOld[i] = p->pHashNext;
+ iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p),
+ (int)strlen(fts5EntryKey(p)));
+ p->pHashNext = apNew[iHash];
+ apNew[iHash] = p;
+ }
+ }
+
+ sqlite3_free(apOld);
+ pHash->nSlot = nNew;
+ pHash->aSlot = apNew;
+ return SQLITE_OK;
+}
+
+static int fts5HashAddPoslistSize(
+ Fts5Hash *pHash,
+ Fts5HashEntry *p,
+ Fts5HashEntry *p2
+){
+ int nRet = 0;
+ if( p->iSzPoslist ){
+ u8 *pPtr = p2 ? (u8*)p2 : (u8*)p;
+ int nData = p->nData;
+ if( pHash->eDetail==FTS5_DETAIL_NONE ){
+ assert( nData==p->iSzPoslist );
+ if( p->bDel ){
+ pPtr[nData++] = 0x00;
+ if( p->bContent ){
+ pPtr[nData++] = 0x00;
+ }
+ }
+ }else{
+ int nSz = (nData - p->iSzPoslist - 1); /* Size in bytes */
+ int nPos = nSz*2 + p->bDel; /* Value of nPos field */
+
+ assert( p->bDel==0 || p->bDel==1 );
+ if( nPos<=127 ){
+ pPtr[p->iSzPoslist] = (u8)nPos;
+ }else{
+ int nByte = sqlite3Fts5GetVarintLen((u32)nPos);
+ memmove(&pPtr[p->iSzPoslist + nByte], &pPtr[p->iSzPoslist + 1], nSz);
+ sqlite3Fts5PutVarint(&pPtr[p->iSzPoslist], nPos);
+ nData += (nByte-1);
+ }
+ }
+
+ nRet = nData - p->nData;
+ if( p2==0 ){
+ p->iSzPoslist = 0;
+ p->bDel = 0;
+ p->bContent = 0;
+ p->nData = nData;
+ }
+ }
+ return nRet;
+}
+
+/*
+** Add an entry to the in-memory hash table. The key is the concatenation
+** of bByte and (pToken/nToken). The value is (iRowid/iCol/iPos).
+**
+** (bByte || pToken) -> (iRowid,iCol,iPos)
+**
+** Or, if iCol is negative, then the value is a delete marker.
+*/
+static int sqlite3Fts5HashWrite(
+ Fts5Hash *pHash,
+ i64 iRowid, /* Rowid for this entry */
+ int iCol, /* Column token appears in (-ve -> delete) */
+ int iPos, /* Position of token within column */
+ char bByte, /* First byte of token */
+ const char *pToken, int nToken /* Token to add or remove to or from index */
+){
+ unsigned int iHash;
+ Fts5HashEntry *p;
+ u8 *pPtr;
+ int nIncr = 0; /* Amount to increment (*pHash->pnByte) by */
+ int bNew; /* If non-delete entry should be written */
+
+ bNew = (pHash->eDetail==FTS5_DETAIL_FULL);
+
+ /* Attempt to locate an existing hash entry */
+ iHash = fts5HashKey2(pHash->nSlot, (u8)bByte, (const u8*)pToken, nToken);
+ for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
+ char *zKey = fts5EntryKey(p);
+ if( zKey[0]==bByte
+ && p->nKey==nToken
+ && memcmp(&zKey[1], pToken, nToken)==0
+ ){
+ break;
+ }
+ }
+
+ /* If an existing hash entry cannot be found, create a new one. */
+ if( p==0 ){
+ /* Figure out how much space to allocate */
+ char *zKey;
+ sqlite3_int64 nByte = sizeof(Fts5HashEntry) + (nToken+1) + 1 + 64;
+ if( nByte<128 ) nByte = 128;
+
+ /* Grow the Fts5Hash.aSlot[] array if necessary. */
+ if( (pHash->nEntry*2)>=pHash->nSlot ){
+ int rc = fts5HashResize(pHash);
+ if( rc!=SQLITE_OK ) return rc;
+ iHash = fts5HashKey2(pHash->nSlot, (u8)bByte, (const u8*)pToken, nToken);
+ }
+
+ /* Allocate new Fts5HashEntry and add it to the hash table. */
+ p = (Fts5HashEntry*)sqlite3_malloc64(nByte);
+ if( !p ) return SQLITE_NOMEM;
+ memset(p, 0, sizeof(Fts5HashEntry));
+ p->nAlloc = (int)nByte;
+ zKey = fts5EntryKey(p);
+ zKey[0] = bByte;
+ memcpy(&zKey[1], pToken, nToken);
+ assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) );
+ p->nKey = nToken;
+ zKey[nToken+1] = '\0';
+ p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry);
+ p->pHashNext = pHash->aSlot[iHash];
+ pHash->aSlot[iHash] = p;
+ pHash->nEntry++;
+
+ /* Add the first rowid field to the hash-entry */
+ p->nData += sqlite3Fts5PutVarint(&((u8*)p)[p->nData], iRowid);
+ p->iRowid = iRowid;
+
+ p->iSzPoslist = p->nData;
+ if( pHash->eDetail!=FTS5_DETAIL_NONE ){
+ p->nData += 1;
+ p->iCol = (pHash->eDetail==FTS5_DETAIL_FULL ? 0 : -1);
+ }
+
+ nIncr += p->nData;
+ }else{
+
+ /* Appending to an existing hash-entry. Check that there is enough
+ ** space to append the largest possible new entry. Worst case scenario
+ ** is:
+ **
+ ** + 9 bytes for a new rowid,
+ ** + 4 byte reserved for the "poslist size" varint.
+ ** + 1 byte for a "new column" byte,
+ ** + 3 bytes for a new column number (16-bit max) as a varint,
+ ** + 5 bytes for the new position offset (32-bit max).
+ */
+ if( (p->nAlloc - p->nData) < (9 + 4 + 1 + 3 + 5) ){
+ sqlite3_int64 nNew = p->nAlloc * 2;
+ Fts5HashEntry *pNew;
+ Fts5HashEntry **pp;
+ pNew = (Fts5HashEntry*)sqlite3_realloc64(p, nNew);
+ if( pNew==0 ) return SQLITE_NOMEM;
+ pNew->nAlloc = (int)nNew;
+ for(pp=&pHash->aSlot[iHash]; *pp!=p; pp=&(*pp)->pHashNext);
+ *pp = pNew;
+ p = pNew;
+ }
+ nIncr -= p->nData;
+ }
+ assert( (p->nAlloc - p->nData) >= (9 + 4 + 1 + 3 + 5) );
+
+ pPtr = (u8*)p;
+
+ /* If this is a new rowid, append the 4-byte size field for the previous
+ ** entry, and the new rowid for this entry. */
+ if( iRowid!=p->iRowid ){
+ fts5HashAddPoslistSize(pHash, p, 0);
+ p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iRowid - p->iRowid);
+ p->iRowid = iRowid;
+ bNew = 1;
+ p->iSzPoslist = p->nData;
+ if( pHash->eDetail!=FTS5_DETAIL_NONE ){
+ p->nData += 1;
+ p->iCol = (pHash->eDetail==FTS5_DETAIL_FULL ? 0 : -1);
+ p->iPos = 0;
+ }
+ }
+
+ if( iCol>=0 ){
+ if( pHash->eDetail==FTS5_DETAIL_NONE ){
+ p->bContent = 1;
+ }else{
+ /* Append a new column value, if necessary */
+ assert( iCol>=p->iCol );
+ if( iCol!=p->iCol ){
+ if( pHash->eDetail==FTS5_DETAIL_FULL ){
+ pPtr[p->nData++] = 0x01;
+ p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iCol);
+ p->iCol = (i16)iCol;
+ p->iPos = 0;
+ }else{
+ bNew = 1;
+ p->iCol = (i16)(iPos = iCol);
+ }
+ }
+
+ /* Append the new position offset, if necessary */
+ if( bNew ){
+ p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iPos - p->iPos + 2);
+ p->iPos = iPos;
+ }
+ }
+ }else{
+ /* This is a delete. Set the delete flag. */
+ p->bDel = 1;
+ }
+
+ nIncr += p->nData;
+ *pHash->pnByte += nIncr;
+ return SQLITE_OK;
+}
+
+
+/*
+** Arguments pLeft and pRight point to linked-lists of hash-entry objects,
+** each sorted in key order. This function merges the two lists into a
+** single list and returns a pointer to its first element.
+*/
+static Fts5HashEntry *fts5HashEntryMerge(
+ Fts5HashEntry *pLeft,
+ Fts5HashEntry *pRight
+){
+ Fts5HashEntry *p1 = pLeft;
+ Fts5HashEntry *p2 = pRight;
+ Fts5HashEntry *pRet = 0;
+ Fts5HashEntry **ppOut = &pRet;
+
+ while( p1 || p2 ){
+ if( p1==0 ){
+ *ppOut = p2;
+ p2 = 0;
+ }else if( p2==0 ){
+ *ppOut = p1;
+ p1 = 0;
+ }else{
+ int i = 0;
+ char *zKey1 = fts5EntryKey(p1);
+ char *zKey2 = fts5EntryKey(p2);
+ while( zKey1[i]==zKey2[i] ) i++;
+
+ if( ((u8)zKey1[i])>((u8)zKey2[i]) ){
+ /* p2 is smaller */
+ *ppOut = p2;
+ ppOut = &p2->pScanNext;
+ p2 = p2->pScanNext;
+ }else{
+ /* p1 is smaller */
+ *ppOut = p1;
+ ppOut = &p1->pScanNext;
+ p1 = p1->pScanNext;
+ }
+ *ppOut = 0;
+ }
+ }
+
+ return pRet;
+}
+
+/*
+** Extract all tokens from hash table iHash and link them into a list
+** in sorted order. The hash table is cleared before returning. It is
+** the responsibility of the caller to free the elements of the returned
+** list.
+*/
+static int fts5HashEntrySort(
+ Fts5Hash *pHash,
+ const char *pTerm, int nTerm, /* Query prefix, if any */
+ Fts5HashEntry **ppSorted
+){
+ const int nMergeSlot = 32;
+ Fts5HashEntry **ap;
+ Fts5HashEntry *pList;
+ int iSlot;
+ int i;
+
+ *ppSorted = 0;
+ ap = sqlite3_malloc64(sizeof(Fts5HashEntry*) * nMergeSlot);
+ if( !ap ) return SQLITE_NOMEM;
+ memset(ap, 0, sizeof(Fts5HashEntry*) * nMergeSlot);
+
+ for(iSlot=0; iSlot<pHash->nSlot; iSlot++){
+ Fts5HashEntry *pIter;
+ for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){
+ if( pTerm==0
+ || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
+ ){
+ Fts5HashEntry *pEntry = pIter;
+ pEntry->pScanNext = 0;
+ for(i=0; ap[i]; i++){
+ pEntry = fts5HashEntryMerge(pEntry, ap[i]);
+ ap[i] = 0;
+ }
+ ap[i] = pEntry;
+ }
+ }
+ }
+
+ pList = 0;
+ for(i=0; i<nMergeSlot; i++){
+ pList = fts5HashEntryMerge(pList, ap[i]);
+ }
+
+ pHash->nEntry = 0;
+ sqlite3_free(ap);
+ *ppSorted = pList;
+ return SQLITE_OK;
+}
+
+/*
+** Query the hash table for a doclist associated with term pTerm/nTerm.
+*/
+static int sqlite3Fts5HashQuery(
+ Fts5Hash *pHash, /* Hash table to query */
+ int nPre,
+ const char *pTerm, int nTerm, /* Query term */
+ void **ppOut, /* OUT: Pointer to new object */
+ int *pnDoclist /* OUT: Size of doclist in bytes */
+){
+ unsigned int iHash = fts5HashKey(pHash->nSlot, (const u8*)pTerm, nTerm);
+ char *zKey = 0;
+ Fts5HashEntry *p;
+
+ for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
+ zKey = fts5EntryKey(p);
+ assert( p->nKey+1==(int)strlen(zKey) );
+ if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break;
+ }
+
+ if( p ){
+ int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1;
+ int nList = p->nData - nHashPre;
+ u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10));
+ if( pRet ){
+ Fts5HashEntry *pFaux = (Fts5HashEntry*)&pRet[nPre-nHashPre];
+ memcpy(&pRet[nPre], &((u8*)p)[nHashPre], nList);
+ nList += fts5HashAddPoslistSize(pHash, p, pFaux);
+ *pnDoclist = nList;
+ }else{
+ *pnDoclist = 0;
+ return SQLITE_NOMEM;
+ }
+ }else{
+ *ppOut = 0;
+ *pnDoclist = 0;
+ }
+
+ return SQLITE_OK;
+}
+
+static int sqlite3Fts5HashScanInit(
+ Fts5Hash *p, /* Hash table to query */
+ const char *pTerm, int nTerm /* Query prefix */
+){
+ return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan);
+}
+
+static void sqlite3Fts5HashScanNext(Fts5Hash *p){
+ assert( !sqlite3Fts5HashScanEof(p) );
+ p->pScan = p->pScan->pScanNext;
+}
+
+static int sqlite3Fts5HashScanEof(Fts5Hash *p){
+ return (p->pScan==0);
+}
+
+static void sqlite3Fts5HashScanEntry(
+ Fts5Hash *pHash,
+ const char **pzTerm, /* OUT: term (nul-terminated) */
+ const u8 **ppDoclist, /* OUT: pointer to doclist */
+ int *pnDoclist /* OUT: size of doclist in bytes */
+){
+ Fts5HashEntry *p;
+ if( (p = pHash->pScan) ){
+ char *zKey = fts5EntryKey(p);
+ int nTerm = (int)strlen(zKey);
+ fts5HashAddPoslistSize(pHash, p, 0);
+ *pzTerm = zKey;
+ *ppDoclist = (const u8*)&zKey[nTerm+1];
+ *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1);
+ }else{
+ *pzTerm = 0;
+ *ppDoclist = 0;
+ *pnDoclist = 0;
+ }
+}
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Low level access to the FTS index stored in the database file. The
+** routines in this file file implement all read and write access to the
+** %_data table. Other parts of the system access this functionality via
+** the interface defined in fts5Int.h.
+*/
+
+
+/* #include "fts5Int.h" */
+
+/*
+** Overview:
+**
+** The %_data table contains all the FTS indexes for an FTS5 virtual table.
+** As well as the main term index, there may be up to 31 prefix indexes.
+** The format is similar to FTS3/4, except that:
+**
+** * all segment b-tree leaf data is stored in fixed size page records
+** (e.g. 1000 bytes). A single doclist may span multiple pages. Care is
+** taken to ensure it is possible to iterate in either direction through
+** the entries in a doclist, or to seek to a specific entry within a
+** doclist, without loading it into memory.
+**
+** * large doclists that span many pages have associated "doclist index"
+** records that contain a copy of the first rowid on each page spanned by
+** the doclist. This is used to speed up seek operations, and merges of
+** large doclists with very small doclists.
+**
+** * extra fields in the "structure record" record the state of ongoing
+** incremental merge operations.
+**
+*/
+
+
+#define FTS5_OPT_WORK_UNIT 1000 /* Number of leaf pages per optimize step */
+#define FTS5_WORK_UNIT 64 /* Number of leaf pages in unit of work */
+
+#define FTS5_MIN_DLIDX_SIZE 4 /* Add dlidx if this many empty pages */
+
+#define FTS5_MAIN_PREFIX '0'
+
+#if FTS5_MAX_PREFIX_INDEXES > 31
+# error "FTS5_MAX_PREFIX_INDEXES is too large"
+#endif
+
+/*
+** Details:
+**
+** The %_data table managed by this module,
+**
+** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB);
+**
+** , contains the following 5 types of records. See the comments surrounding
+** the FTS5_*_ROWID macros below for a description of how %_data rowids are
+** assigned to each fo them.
+**
+** 1. Structure Records:
+**
+** The set of segments that make up an index - the index structure - are
+** recorded in a single record within the %_data table. The record consists
+** of a single 32-bit configuration cookie value followed by a list of
+** SQLite varints. If the FTS table features more than one index (because
+** there are one or more prefix indexes), it is guaranteed that all share
+** the same cookie value.
+**
+** Immediately following the configuration cookie, the record begins with
+** three varints:
+**
+** + number of levels,
+** + total number of segments on all levels,
+** + value of write counter.
+**
+** Then, for each level from 0 to nMax:
+**
+** + number of input segments in ongoing merge.
+** + total number of segments in level.
+** + for each segment from oldest to newest:
+** + segment id (always > 0)
+** + first leaf page number (often 1, always greater than 0)
+** + final leaf page number
+**
+** 2. The Averages Record:
+**
+** A single record within the %_data table. The data is a list of varints.
+** The first value is the number of rows in the index. Then, for each column
+** from left to right, the total number of tokens in the column for all
+** rows of the table.
+**
+** 3. Segment leaves:
+**
+** TERM/DOCLIST FORMAT:
+**
+** Most of each segment leaf is taken up by term/doclist data. The
+** general format of term/doclist, starting with the first term
+** on the leaf page, is:
+**
+** varint : size of first term
+** blob: first term data
+** doclist: first doclist
+** zero-or-more {
+** varint: number of bytes in common with previous term
+** varint: number of bytes of new term data (nNew)
+** blob: nNew bytes of new term data
+** doclist: next doclist
+** }
+**
+** doclist format:
+**
+** varint: first rowid
+** poslist: first poslist
+** zero-or-more {
+** varint: rowid delta (always > 0)
+** poslist: next poslist
+** }
+**
+** poslist format:
+**
+** varint: size of poslist in bytes multiplied by 2, not including
+** this field. Plus 1 if this entry carries the "delete" flag.
+** collist: collist for column 0
+** zero-or-more {
+** 0x01 byte
+** varint: column number (I)
+** collist: collist for column I
+** }
+**
+** collist format:
+**
+** varint: first offset + 2
+** zero-or-more {
+** varint: offset delta + 2
+** }
+**
+** PAGE FORMAT
+**
+** Each leaf page begins with a 4-byte header containing 2 16-bit
+** unsigned integer fields in big-endian format. They are:
+**
+** * The byte offset of the first rowid on the page, if it exists
+** and occurs before the first term (otherwise 0).
+**
+** * The byte offset of the start of the page footer. If the page
+** footer is 0 bytes in size, then this field is the same as the
+** size of the leaf page in bytes.
+**
+** The page footer consists of a single varint for each term located
+** on the page. Each varint is the byte offset of the current term
+** within the page, delta-compressed against the previous value. In
+** other words, the first varint in the footer is the byte offset of
+** the first term, the second is the byte offset of the second less that
+** of the first, and so on.
+**
+** The term/doclist format described above is accurate if the entire
+** term/doclist data fits on a single leaf page. If this is not the case,
+** the format is changed in two ways:
+**
+** + if the first rowid on a page occurs before the first term, it
+** is stored as a literal value:
+**
+** varint: first rowid
+**
+** + the first term on each page is stored in the same way as the
+** very first term of the segment:
+**
+** varint : size of first term
+** blob: first term data
+**
+** 5. Segment doclist indexes:
+**
+** Doclist indexes are themselves b-trees, however they usually consist of
+** a single leaf record only. The format of each doclist index leaf page
+** is:
+**
+** * Flags byte. Bits are:
+** 0x01: Clear if leaf is also the root page, otherwise set.
+**
+** * Page number of fts index leaf page. As a varint.
+**
+** * First rowid on page indicated by previous field. As a varint.
+**
+** * A list of varints, one for each subsequent termless page. A
+** positive delta if the termless page contains at least one rowid,
+** or an 0x00 byte otherwise.
+**
+** Internal doclist index nodes are:
+**
+** * Flags byte. Bits are:
+** 0x01: Clear for root page, otherwise set.
+**
+** * Page number of first child page. As a varint.
+**
+** * Copy of first rowid on page indicated by previous field. As a varint.
+**
+** * A list of delta-encoded varints - the first rowid on each subsequent
+** child page.
+**
+*/
+
+/*
+** Rowids for the averages and structure records in the %_data table.
+*/
+#define FTS5_AVERAGES_ROWID 1 /* Rowid used for the averages record */
+#define FTS5_STRUCTURE_ROWID 10 /* The structure record */
+
+/*
+** Macros determining the rowids used by segment leaves and dlidx leaves
+** and nodes. All nodes and leaves are stored in the %_data table with large
+** positive rowids.
+**
+** Each segment has a unique non-zero 16-bit id.
+**
+** The rowid for each segment leaf is found by passing the segment id and
+** the leaf page number to the FTS5_SEGMENT_ROWID macro. Leaves are numbered
+** sequentially starting from 1.
+*/
+#define FTS5_DATA_ID_B 16 /* Max seg id number 65535 */
+#define FTS5_DATA_DLI_B 1 /* Doclist-index flag (1 bit) */
+#define FTS5_DATA_HEIGHT_B 5 /* Max dlidx tree height of 32 */
+#define FTS5_DATA_PAGE_B 31 /* Max page number of 2147483648 */
+
+#define fts5_dri(segid, dlidx, height, pgno) ( \
+ ((i64)(segid) << (FTS5_DATA_PAGE_B+FTS5_DATA_HEIGHT_B+FTS5_DATA_DLI_B)) + \
+ ((i64)(dlidx) << (FTS5_DATA_PAGE_B + FTS5_DATA_HEIGHT_B)) + \
+ ((i64)(height) << (FTS5_DATA_PAGE_B)) + \
+ ((i64)(pgno)) \
+)
+
+#define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno)
+#define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno)
+
+#ifdef SQLITE_DEBUG
+static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; }
+#endif
+
+
+/*
+** Each time a blob is read from the %_data table, it is padded with this
+** many zero bytes. This makes it easier to decode the various record formats
+** without overreading if the records are corrupt.
+*/
+#define FTS5_DATA_ZERO_PADDING 8
+#define FTS5_DATA_PADDING 20
+
+typedef struct Fts5Data Fts5Data;
+typedef struct Fts5DlidxIter Fts5DlidxIter;
+typedef struct Fts5DlidxLvl Fts5DlidxLvl;
+typedef struct Fts5DlidxWriter Fts5DlidxWriter;
+typedef struct Fts5Iter Fts5Iter;
+typedef struct Fts5PageWriter Fts5PageWriter;
+typedef struct Fts5SegIter Fts5SegIter;
+typedef struct Fts5DoclistIter Fts5DoclistIter;
+typedef struct Fts5SegWriter Fts5SegWriter;
+typedef struct Fts5Structure Fts5Structure;
+typedef struct Fts5StructureLevel Fts5StructureLevel;
+typedef struct Fts5StructureSegment Fts5StructureSegment;
+
+struct Fts5Data {
+ u8 *p; /* Pointer to buffer containing record */
+ int nn; /* Size of record in bytes */
+ int szLeaf; /* Size of leaf without page-index */
+};
+
+/*
+** One object per %_data table.
+*/
+struct Fts5Index {
+ Fts5Config *pConfig; /* Virtual table configuration */
+ char *zDataTbl; /* Name of %_data table */
+ int nWorkUnit; /* Leaf pages in a "unit" of work */
+
+ /*
+ ** Variables related to the accumulation of tokens and doclists within the
+ ** in-memory hash tables before they are flushed to disk.
+ */
+ Fts5Hash *pHash; /* Hash table for in-memory data */
+ int nPendingData; /* Current bytes of pending data */
+ i64 iWriteRowid; /* Rowid for current doc being written */
+ int bDelete; /* Current write is a delete */
+
+ /* Error state. */
+ int rc; /* Current error code */
+
+ /* State used by the fts5DataXXX() functions. */
+ sqlite3_blob *pReader; /* RO incr-blob open on %_data table */
+ sqlite3_stmt *pWriter; /* "INSERT ... %_data VALUES(?,?)" */
+ sqlite3_stmt *pDeleter; /* "DELETE FROM %_data ... id>=? AND id<=?" */
+ sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
+ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=? */
+ sqlite3_stmt *pIdxSelect;
+ int nRead; /* Total number of blocks read */
+
+ sqlite3_stmt *pDataVersion;
+ i64 iStructVersion; /* data_version when pStruct read */
+ Fts5Structure *pStruct; /* Current db structure (or NULL) */
+};
+
+struct Fts5DoclistIter {
+ u8 *aEof; /* Pointer to 1 byte past end of doclist */
+
+ /* Output variables. aPoslist==0 at EOF */
+ i64 iRowid;
+ u8 *aPoslist;
+ int nPoslist;
+ int nSize;
+};
+
+/*
+** The contents of the "structure" record for each index are represented
+** using an Fts5Structure record in memory. Which uses instances of the
+** other Fts5StructureXXX types as components.
+*/
+struct Fts5StructureSegment {
+ int iSegid; /* Segment id */
+ int pgnoFirst; /* First leaf page number in segment */
+ int pgnoLast; /* Last leaf page number in segment */
+};
+struct Fts5StructureLevel {
+ int nMerge; /* Number of segments in incr-merge */
+ int nSeg; /* Total number of segments on level */
+ Fts5StructureSegment *aSeg; /* Array of segments. aSeg[0] is oldest. */
+};
+struct Fts5Structure {
+ int nRef; /* Object reference count */
+ u64 nWriteCounter; /* Total leaves written to level 0 */
+ int nSegment; /* Total segments in this structure */
+ int nLevel; /* Number of levels in this index */
+ Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */
+};
+
+/*
+** An object of type Fts5SegWriter is used to write to segments.
+*/
+struct Fts5PageWriter {
+ int pgno; /* Page number for this page */
+ int iPrevPgidx; /* Previous value written into pgidx */
+ Fts5Buffer buf; /* Buffer containing leaf data */
+ Fts5Buffer pgidx; /* Buffer containing page-index */
+ Fts5Buffer term; /* Buffer containing previous term on page */
+};
+struct Fts5DlidxWriter {
+ int pgno; /* Page number for this page */
+ int bPrevValid; /* True if iPrev is valid */
+ i64 iPrev; /* Previous rowid value written to page */
+ Fts5Buffer buf; /* Buffer containing page data */
+};
+struct Fts5SegWriter {
+ int iSegid; /* Segid to write to */
+ Fts5PageWriter writer; /* PageWriter object */
+ i64 iPrevRowid; /* Previous rowid written to current leaf */
+ u8 bFirstRowidInDoclist; /* True if next rowid is first in doclist */
+ u8 bFirstRowidInPage; /* True if next rowid is first in page */
+ /* TODO1: Can use (writer.pgidx.n==0) instead of bFirstTermInPage */
+ u8 bFirstTermInPage; /* True if next term will be first in leaf */
+ int nLeafWritten; /* Number of leaf pages written */
+ int nEmpty; /* Number of contiguous term-less nodes */
+
+ int nDlidx; /* Allocated size of aDlidx[] array */
+ Fts5DlidxWriter *aDlidx; /* Array of Fts5DlidxWriter objects */
+
+ /* Values to insert into the %_idx table */
+ Fts5Buffer btterm; /* Next term to insert into %_idx table */
+ int iBtPage; /* Page number corresponding to btterm */
+};
+
+typedef struct Fts5CResult Fts5CResult;
+struct Fts5CResult {
+ u16 iFirst; /* aSeg[] index of firstest iterator */
+ u8 bTermEq; /* True if the terms are equal */
+};
+
+/*
+** Object for iterating through a single segment, visiting each term/rowid
+** pair in the segment.
+**
+** pSeg:
+** The segment to iterate through.
+**
+** iLeafPgno:
+** Current leaf page number within segment.
+**
+** iLeafOffset:
+** Byte offset within the current leaf that is the first byte of the
+** position list data (one byte passed the position-list size field).
+** rowid field of the current entry. Usually this is the size field of the
+** position list data. The exception is if the rowid for the current entry
+** is the last thing on the leaf page.
+**
+** pLeaf:
+** Buffer containing current leaf page data. Set to NULL at EOF.
+**
+** iTermLeafPgno, iTermLeafOffset:
+** Leaf page number containing the last term read from the segment. And
+** the offset immediately following the term data.
+**
+** flags:
+** Mask of FTS5_SEGITER_XXX values. Interpreted as follows:
+**
+** FTS5_SEGITER_ONETERM:
+** If set, set the iterator to point to EOF after the current doclist
+** has been exhausted. Do not proceed to the next term in the segment.
+**
+** FTS5_SEGITER_REVERSE:
+** This flag is only ever set if FTS5_SEGITER_ONETERM is also set. If
+** it is set, iterate through rowid in descending order instead of the
+** default ascending order.
+**
+** iRowidOffset/nRowidOffset/aRowidOffset:
+** These are used if the FTS5_SEGITER_REVERSE flag is set.
+**
+** For each rowid on the page corresponding to the current term, the
+** corresponding aRowidOffset[] entry is set to the byte offset of the
+** start of the "position-list-size" field within the page.
+**
+** iTermIdx:
+** Index of current term on iTermLeafPgno.
+*/
+struct Fts5SegIter {
+ Fts5StructureSegment *pSeg; /* Segment to iterate through */
+ int flags; /* Mask of configuration flags */
+ int iLeafPgno; /* Current leaf page number */
+ Fts5Data *pLeaf; /* Current leaf data */
+ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */
+ int iLeafOffset; /* Byte offset within current leaf */
+
+ /* Next method */
+ void (*xNext)(Fts5Index*, Fts5SegIter*, int*);
+
+ /* The page and offset from which the current term was read. The offset
+ ** is the offset of the first rowid in the current doclist. */
+ int iTermLeafPgno;
+ int iTermLeafOffset;
+
+ int iPgidxOff; /* Next offset in pgidx */
+ int iEndofDoclist;
+
+ /* The following are only used if the FTS5_SEGITER_REVERSE flag is set. */
+ int iRowidOffset; /* Current entry in aRowidOffset[] */
+ int nRowidOffset; /* Allocated size of aRowidOffset[] array */
+ int *aRowidOffset; /* Array of offset to rowid fields */
+
+ Fts5DlidxIter *pDlidx; /* If there is a doclist-index */
+
+ /* Variables populated based on current entry. */
+ Fts5Buffer term; /* Current term */
+ i64 iRowid; /* Current rowid */
+ int nPos; /* Number of bytes in current position list */
+ u8 bDel; /* True if the delete flag is set */
+};
+
+/*
+** Argument is a pointer to an Fts5Data structure that contains a
+** leaf page.
+*/
+#define ASSERT_SZLEAF_OK(x) assert( \
+ (x)->szLeaf==(x)->nn || (x)->szLeaf==fts5GetU16(&(x)->p[2]) \
+)
+
+#define FTS5_SEGITER_ONETERM 0x01
+#define FTS5_SEGITER_REVERSE 0x02
+
+/*
+** Argument is a pointer to an Fts5Data structure that contains a leaf
+** page. This macro evaluates to true if the leaf contains no terms, or
+** false if it contains at least one term.
+*/
+#define fts5LeafIsTermless(x) ((x)->szLeaf >= (x)->nn)
+
+#define fts5LeafTermOff(x, i) (fts5GetU16(&(x)->p[(x)->szLeaf + (i)*2]))
+
+#define fts5LeafFirstRowidOff(x) (fts5GetU16((x)->p))
+
+/*
+** Object for iterating through the merged results of one or more segments,
+** visiting each term/rowid pair in the merged data.
+**
+** nSeg is always a power of two greater than or equal to the number of
+** segments that this object is merging data from. Both the aSeg[] and
+** aFirst[] arrays are sized at nSeg entries. The aSeg[] array is padded
+** with zeroed objects - these are handled as if they were iterators opened
+** on empty segments.
+**
+** The results of comparing segments aSeg[N] and aSeg[N+1], where N is an
+** even number, is stored in aFirst[(nSeg+N)/2]. The "result" of the
+** comparison in this context is the index of the iterator that currently
+** points to the smaller term/rowid combination. Iterators at EOF are
+** considered to be greater than all other iterators.
+**
+** aFirst[1] contains the index in aSeg[] of the iterator that points to
+** the smallest key overall. aFirst[0] is unused.
+**
+** poslist:
+** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered.
+** There is no way to tell if this is populated or not.
+*/
+struct Fts5Iter {
+ Fts5IndexIter base; /* Base class containing output vars */
+
+ Fts5Index *pIndex; /* Index that owns this iterator */
+ Fts5Buffer poslist; /* Buffer containing current poslist */
+ Fts5Colset *pColset; /* Restrict matches to these columns */
+
+ /* Invoked to set output variables. */
+ void (*xSetOutputs)(Fts5Iter*, Fts5SegIter*);
+
+ int nSeg; /* Size of aSeg[] array */
+ int bRev; /* True to iterate in reverse order */
+ u8 bSkipEmpty; /* True to skip deleted entries */
+
+ i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */
+ Fts5CResult *aFirst; /* Current merge state (see above) */
+ Fts5SegIter aSeg[1]; /* Array of segment iterators */
+};
+
+
+/*
+** An instance of the following type is used to iterate through the contents
+** of a doclist-index record.
+**
+** pData:
+** Record containing the doclist-index data.
+**
+** bEof:
+** Set to true once iterator has reached EOF.
+**
+** iOff:
+** Set to the current offset within record pData.
+*/
+struct Fts5DlidxLvl {
+ Fts5Data *pData; /* Data for current page of this level */
+ int iOff; /* Current offset into pData */
+ int bEof; /* At EOF already */
+ int iFirstOff; /* Used by reverse iterators */
+
+ /* Output variables */
+ int iLeafPgno; /* Page number of current leaf page */
+ i64 iRowid; /* First rowid on leaf iLeafPgno */
+};
+struct Fts5DlidxIter {
+ int nLvl;
+ int iSegid;
+ Fts5DlidxLvl aLvl[1];
+};
+
+static void fts5PutU16(u8 *aOut, u16 iVal){
+ aOut[0] = (iVal>>8);
+ aOut[1] = (iVal&0xFF);
+}
+
+static u16 fts5GetU16(const u8 *aIn){
+ return ((u16)aIn[0] << 8) + aIn[1];
+}
+
+/*
+** Allocate and return a buffer at least nByte bytes in size.
+**
+** If an OOM error is encountered, return NULL and set the error code in
+** the Fts5Index handle passed as the first argument.
+*/
+static void *fts5IdxMalloc(Fts5Index *p, sqlite3_int64 nByte){
+ return sqlite3Fts5MallocZero(&p->rc, nByte);
+}
+
+/*
+** Compare the contents of the pLeft buffer with the pRight/nRight blob.
+**
+** Return -ve if pLeft is smaller than pRight, 0 if they are equal or
+** +ve if pRight is smaller than pLeft. In other words:
+**
+** res = *pLeft - *pRight
+*/
+#ifdef SQLITE_DEBUG
+static int fts5BufferCompareBlob(
+ Fts5Buffer *pLeft, /* Left hand side of comparison */
+ const u8 *pRight, int nRight /* Right hand side of comparison */
+){
+ int nCmp = MIN(pLeft->n, nRight);
+ int res = memcmp(pLeft->p, pRight, nCmp);
+ return (res==0 ? (pLeft->n - nRight) : res);
+}
+#endif
+
+/*
+** Compare the contents of the two buffers using memcmp(). If one buffer
+** is a prefix of the other, it is considered the lesser.
+**
+** Return -ve if pLeft is smaller than pRight, 0 if they are equal or
+** +ve if pRight is smaller than pLeft. In other words:
+**
+** res = *pLeft - *pRight
+*/
+static int fts5BufferCompare(Fts5Buffer *pLeft, Fts5Buffer *pRight){
+ int nCmp = MIN(pLeft->n, pRight->n);
+ int res = fts5Memcmp(pLeft->p, pRight->p, nCmp);
+ return (res==0 ? (pLeft->n - pRight->n) : res);
+}
+
+static int fts5LeafFirstTermOff(Fts5Data *pLeaf){
+ int ret;
+ fts5GetVarint32(&pLeaf->p[pLeaf->szLeaf], ret);
+ return ret;
+}
+
+/*
+** Close the read-only blob handle, if it is open.
+*/
+static void sqlite3Fts5IndexCloseReader(Fts5Index *p){
+ if( p->pReader ){
+ sqlite3_blob *pReader = p->pReader;
+ p->pReader = 0;
+ sqlite3_blob_close(pReader);
+ }
+}
+
+/*
+** Retrieve a record from the %_data table.
+**
+** If an error occurs, NULL is returned and an error left in the
+** Fts5Index object.
+*/
+static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){
+ Fts5Data *pRet = 0;
+ if( p->rc==SQLITE_OK ){
+ int rc = SQLITE_OK;
+
+ if( p->pReader ){
+ /* This call may return SQLITE_ABORT if there has been a savepoint
+ ** rollback since it was last used. In this case a new blob handle
+ ** is required. */
+ sqlite3_blob *pBlob = p->pReader;
+ p->pReader = 0;
+ rc = sqlite3_blob_reopen(pBlob, iRowid);
+ assert( p->pReader==0 );
+ p->pReader = pBlob;
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts5IndexCloseReader(p);
+ }
+ if( rc==SQLITE_ABORT ) rc = SQLITE_OK;
+ }
+
+ /* If the blob handle is not open at this point, open it and seek
+ ** to the requested entry. */
+ if( p->pReader==0 && rc==SQLITE_OK ){
+ Fts5Config *pConfig = p->pConfig;
+ rc = sqlite3_blob_open(pConfig->db,
+ pConfig->zDb, p->zDataTbl, "block", iRowid, 0, &p->pReader
+ );
+ }
+
+ /* If either of the sqlite3_blob_open() or sqlite3_blob_reopen() calls
+ ** above returned SQLITE_ERROR, return SQLITE_CORRUPT_VTAB instead.
+ ** All the reasons those functions might return SQLITE_ERROR - missing
+ ** table, missing row, non-blob/text in block column - indicate
+ ** backing store corruption. */
+ if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT;
+
+ if( rc==SQLITE_OK ){
+ u8 *aOut = 0; /* Read blob data into this buffer */
+ int nByte = sqlite3_blob_bytes(p->pReader);
+ sqlite3_int64 nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING;
+ pRet = (Fts5Data*)sqlite3_malloc64(nAlloc);
+ if( pRet ){
+ pRet->nn = nByte;
+ aOut = pRet->p = (u8*)&pRet[1];
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_blob_read(p->pReader, aOut, nByte, 0);
+ }
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(pRet);
+ pRet = 0;
+ }else{
+ /* TODO1: Fix this */
+ pRet->p[nByte] = 0x00;
+ pRet->p[nByte+1] = 0x00;
+ pRet->szLeaf = fts5GetU16(&pRet->p[2]);
+ }
+ }
+ p->rc = rc;
+ p->nRead++;
+ }
+
+ assert( (pRet==0)==(p->rc!=SQLITE_OK) );
+ return pRet;
+}
+
+/*
+** Release a reference to data record returned by an earlier call to
+** fts5DataRead().
+*/
+static void fts5DataRelease(Fts5Data *pData){
+ sqlite3_free(pData);
+}
+
+static Fts5Data *fts5LeafRead(Fts5Index *p, i64 iRowid){
+ Fts5Data *pRet = fts5DataRead(p, iRowid);
+ if( pRet ){
+ if( pRet->nn<4 || pRet->szLeaf>pRet->nn ){
+ p->rc = FTS5_CORRUPT;
+ fts5DataRelease(pRet);
+ pRet = 0;
+ }
+ }
+ return pRet;
+}
+
+static int fts5IndexPrepareStmt(
+ Fts5Index *p,
+ sqlite3_stmt **ppStmt,
+ char *zSql
+){
+ if( p->rc==SQLITE_OK ){
+ if( zSql ){
+ p->rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1,
+ SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB,
+ ppStmt, 0);
+ }else{
+ p->rc = SQLITE_NOMEM;
+ }
+ }
+ sqlite3_free(zSql);
+ return p->rc;
+}
+
+
+/*
+** INSERT OR REPLACE a record into the %_data table.
+*/
+static void fts5DataWrite(Fts5Index *p, i64 iRowid, const u8 *pData, int nData){
+ if( p->rc!=SQLITE_OK ) return;
+
+ if( p->pWriter==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pWriter, sqlite3_mprintf(
+ "REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)",
+ pConfig->zDb, pConfig->zName
+ ));
+ if( p->rc ) return;
+ }
+
+ sqlite3_bind_int64(p->pWriter, 1, iRowid);
+ sqlite3_bind_blob(p->pWriter, 2, pData, nData, SQLITE_STATIC);
+ sqlite3_step(p->pWriter);
+ p->rc = sqlite3_reset(p->pWriter);
+ sqlite3_bind_null(p->pWriter, 2);
+}
+
+/*
+** Execute the following SQL:
+**
+** DELETE FROM %_data WHERE id BETWEEN $iFirst AND $iLast
+*/
+static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){
+ if( p->rc!=SQLITE_OK ) return;
+
+ if( p->pDeleter==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ char *zSql = sqlite3_mprintf(
+ "DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?",
+ pConfig->zDb, pConfig->zName
+ );
+ if( fts5IndexPrepareStmt(p, &p->pDeleter, zSql) ) return;
+ }
+
+ sqlite3_bind_int64(p->pDeleter, 1, iFirst);
+ sqlite3_bind_int64(p->pDeleter, 2, iLast);
+ sqlite3_step(p->pDeleter);
+ p->rc = sqlite3_reset(p->pDeleter);
+}
+
+/*
+** Remove all records associated with segment iSegid.
+*/
+static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){
+ i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0);
+ i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1;
+ fts5DataDelete(p, iFirst, iLast);
+ if( p->pIdxDeleter==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf(
+ "DELETE FROM '%q'.'%q_idx' WHERE segid=?",
+ pConfig->zDb, pConfig->zName
+ ));
+ }
+ if( p->rc==SQLITE_OK ){
+ sqlite3_bind_int(p->pIdxDeleter, 1, iSegid);
+ sqlite3_step(p->pIdxDeleter);
+ p->rc = sqlite3_reset(p->pIdxDeleter);
+ }
+}
+
+/*
+** Release a reference to an Fts5Structure object returned by an earlier
+** call to fts5StructureRead() or fts5StructureDecode().
+*/
+static void fts5StructureRelease(Fts5Structure *pStruct){
+ if( pStruct && 0>=(--pStruct->nRef) ){
+ int i;
+ assert( pStruct->nRef==0 );
+ for(i=0; i<pStruct->nLevel; i++){
+ sqlite3_free(pStruct->aLevel[i].aSeg);
+ }
+ sqlite3_free(pStruct);
+ }
+}
+
+static void fts5StructureRef(Fts5Structure *pStruct){
+ pStruct->nRef++;
+}
+
+/*
+** Deserialize and return the structure record currently stored in serialized
+** form within buffer pData/nData.
+**
+** The Fts5Structure.aLevel[] and each Fts5StructureLevel.aSeg[] array
+** are over-allocated by one slot. This allows the structure contents
+** to be more easily edited.
+**
+** If an error occurs, *ppOut is set to NULL and an SQLite error code
+** returned. Otherwise, *ppOut is set to point to the new object and
+** SQLITE_OK returned.
+*/
+static int fts5StructureDecode(
+ const u8 *pData, /* Buffer containing serialized structure */
+ int nData, /* Size of buffer pData in bytes */
+ int *piCookie, /* Configuration cookie value */
+ Fts5Structure **ppOut /* OUT: Deserialized object */
+){
+ int rc = SQLITE_OK;
+ int i = 0;
+ int iLvl;
+ int nLevel = 0;
+ int nSegment = 0;
+ sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */
+ Fts5Structure *pRet = 0; /* Structure object to return */
+
+ /* Grab the cookie value */
+ if( piCookie ) *piCookie = sqlite3Fts5Get32(pData);
+ i = 4;
+
+ /* Read the total number of levels and segments from the start of the
+ ** structure record. */
+ i += fts5GetVarint32(&pData[i], nLevel);
+ i += fts5GetVarint32(&pData[i], nSegment);
+ if( nLevel>FTS5_MAX_SEGMENT || nLevel<0
+ || nSegment>FTS5_MAX_SEGMENT || nSegment<0
+ ){
+ return FTS5_CORRUPT;
+ }
+ nByte = (
+ sizeof(Fts5Structure) + /* Main structure */
+ sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */
+ );
+ pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte);
+
+ if( pRet ){
+ pRet->nRef = 1;
+ pRet->nLevel = nLevel;
+ pRet->nSegment = nSegment;
+ i += sqlite3Fts5GetVarint(&pData[i], &pRet->nWriteCounter);
+
+ for(iLvl=0; rc==SQLITE_OK && iLvl<nLevel; iLvl++){
+ Fts5StructureLevel *pLvl = &pRet->aLevel[iLvl];
+ int nTotal = 0;
+ int iSeg;
+
+ if( i>=nData ){
+ rc = FTS5_CORRUPT;
+ }else{
+ i += fts5GetVarint32(&pData[i], pLvl->nMerge);
+ i += fts5GetVarint32(&pData[i], nTotal);
+ if( nTotal<pLvl->nMerge ) rc = FTS5_CORRUPT;
+ pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&rc,
+ nTotal * sizeof(Fts5StructureSegment)
+ );
+ nSegment -= nTotal;
+ }
+
+ if( rc==SQLITE_OK ){
+ pLvl->nSeg = nTotal;
+ for(iSeg=0; iSeg<nTotal; iSeg++){
+ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
+ if( i>=nData ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ i += fts5GetVarint32(&pData[i], pSeg->iSegid);
+ i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst);
+ i += fts5GetVarint32(&pData[i], pSeg->pgnoLast);
+ if( pSeg->pgnoLast<pSeg->pgnoFirst ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ }
+ if( iLvl>0 && pLvl[-1].nMerge && nTotal==0 ) rc = FTS5_CORRUPT;
+ if( iLvl==nLevel-1 && pLvl->nMerge ) rc = FTS5_CORRUPT;
+ }
+ }
+ if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT;
+
+ if( rc!=SQLITE_OK ){
+ fts5StructureRelease(pRet);
+ pRet = 0;
+ }
+ }
+
+ *ppOut = pRet;
+ return rc;
+}
+
+/*
+**
+*/
+static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){
+ if( *pRc==SQLITE_OK ){
+ Fts5Structure *pStruct = *ppStruct;
+ int nLevel = pStruct->nLevel;
+ sqlite3_int64 nByte = (
+ sizeof(Fts5Structure) + /* Main structure */
+ sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */
+ );
+
+ pStruct = sqlite3_realloc64(pStruct, nByte);
+ if( pStruct ){
+ memset(&pStruct->aLevel[nLevel], 0, sizeof(Fts5StructureLevel));
+ pStruct->nLevel++;
+ *ppStruct = pStruct;
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+}
+
+/*
+** Extend level iLvl so that there is room for at least nExtra more
+** segments.
+*/
+static void fts5StructureExtendLevel(
+ int *pRc,
+ Fts5Structure *pStruct,
+ int iLvl,
+ int nExtra,
+ int bInsert
+){
+ if( *pRc==SQLITE_OK ){
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
+ Fts5StructureSegment *aNew;
+ sqlite3_int64 nByte;
+
+ nByte = (pLvl->nSeg + nExtra) * sizeof(Fts5StructureSegment);
+ aNew = sqlite3_realloc64(pLvl->aSeg, nByte);
+ if( aNew ){
+ if( bInsert==0 ){
+ memset(&aNew[pLvl->nSeg], 0, sizeof(Fts5StructureSegment) * nExtra);
+ }else{
+ int nMove = pLvl->nSeg * sizeof(Fts5StructureSegment);
+ memmove(&aNew[nExtra], aNew, nMove);
+ memset(aNew, 0, sizeof(Fts5StructureSegment) * nExtra);
+ }
+ pLvl->aSeg = aNew;
+ }else{
+ *pRc = SQLITE_NOMEM;
+ }
+ }
+}
+
+static Fts5Structure *fts5StructureReadUncached(Fts5Index *p){
+ Fts5Structure *pRet = 0;
+ Fts5Config *pConfig = p->pConfig;
+ int iCookie; /* Configuration cookie */
+ Fts5Data *pData;
+
+ pData = fts5DataRead(p, FTS5_STRUCTURE_ROWID);
+ if( p->rc==SQLITE_OK ){
+ /* TODO: Do we need this if the leaf-index is appended? Probably... */
+ memset(&pData->p[pData->nn], 0, FTS5_DATA_PADDING);
+ p->rc = fts5StructureDecode(pData->p, pData->nn, &iCookie, &pRet);
+ if( p->rc==SQLITE_OK && (pConfig->pgsz==0 || pConfig->iCookie!=iCookie) ){
+ p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie);
+ }
+ fts5DataRelease(pData);
+ if( p->rc!=SQLITE_OK ){
+ fts5StructureRelease(pRet);
+ pRet = 0;
+ }
+ }
+
+ return pRet;
+}
+
+static i64 fts5IndexDataVersion(Fts5Index *p){
+ i64 iVersion = 0;
+
+ if( p->rc==SQLITE_OK ){
+ if( p->pDataVersion==0 ){
+ p->rc = fts5IndexPrepareStmt(p, &p->pDataVersion,
+ sqlite3_mprintf("PRAGMA %Q.data_version", p->pConfig->zDb)
+ );
+ if( p->rc ) return 0;
+ }
+
+ if( SQLITE_ROW==sqlite3_step(p->pDataVersion) ){
+ iVersion = sqlite3_column_int64(p->pDataVersion, 0);
+ }
+ p->rc = sqlite3_reset(p->pDataVersion);
+ }
+
+ return iVersion;
+}
+
+/*
+** Read, deserialize and return the structure record.
+**
+** The Fts5Structure.aLevel[] and each Fts5StructureLevel.aSeg[] array
+** are over-allocated as described for function fts5StructureDecode()
+** above.
+**
+** If an error occurs, NULL is returned and an error code left in the
+** Fts5Index handle. If an error has already occurred when this function
+** is called, it is a no-op.
+*/
+static Fts5Structure *fts5StructureRead(Fts5Index *p){
+
+ if( p->pStruct==0 ){
+ p->iStructVersion = fts5IndexDataVersion(p);
+ if( p->rc==SQLITE_OK ){
+ p->pStruct = fts5StructureReadUncached(p);
+ }
+ }
+
+#if 0
+ else{
+ Fts5Structure *pTest = fts5StructureReadUncached(p);
+ if( pTest ){
+ int i, j;
+ assert_nc( p->pStruct->nSegment==pTest->nSegment );
+ assert_nc( p->pStruct->nLevel==pTest->nLevel );
+ for(i=0; i<pTest->nLevel; i++){
+ assert_nc( p->pStruct->aLevel[i].nMerge==pTest->aLevel[i].nMerge );
+ assert_nc( p->pStruct->aLevel[i].nSeg==pTest->aLevel[i].nSeg );
+ for(j=0; j<pTest->aLevel[i].nSeg; j++){
+ Fts5StructureSegment *p1 = &pTest->aLevel[i].aSeg[j];
+ Fts5StructureSegment *p2 = &p->pStruct->aLevel[i].aSeg[j];
+ assert_nc( p1->iSegid==p2->iSegid );
+ assert_nc( p1->pgnoFirst==p2->pgnoFirst );
+ assert_nc( p1->pgnoLast==p2->pgnoLast );
+ }
+ }
+ fts5StructureRelease(pTest);
+ }
+ }
+#endif
+
+ if( p->rc!=SQLITE_OK ) return 0;
+ assert( p->iStructVersion!=0 );
+ assert( p->pStruct!=0 );
+ fts5StructureRef(p->pStruct);
+ return p->pStruct;
+}
+
+static void fts5StructureInvalidate(Fts5Index *p){
+ if( p->pStruct ){
+ fts5StructureRelease(p->pStruct);
+ p->pStruct = 0;
+ }
+}
+
+/*
+** Return the total number of segments in index structure pStruct. This
+** function is only ever used as part of assert() conditions.
+*/
+#ifdef SQLITE_DEBUG
+static int fts5StructureCountSegments(Fts5Structure *pStruct){
+ int nSegment = 0; /* Total number of segments */
+ if( pStruct ){
+ int iLvl; /* Used to iterate through levels */
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ nSegment += pStruct->aLevel[iLvl].nSeg;
+ }
+ }
+
+ return nSegment;
+}
+#endif
+
+#define fts5BufferSafeAppendBlob(pBuf, pBlob, nBlob) { \
+ assert( (pBuf)->nSpace>=((pBuf)->n+nBlob) ); \
+ memcpy(&(pBuf)->p[(pBuf)->n], pBlob, nBlob); \
+ (pBuf)->n += nBlob; \
+}
+
+#define fts5BufferSafeAppendVarint(pBuf, iVal) { \
+ (pBuf)->n += sqlite3Fts5PutVarint(&(pBuf)->p[(pBuf)->n], (iVal)); \
+ assert( (pBuf)->nSpace>=(pBuf)->n ); \
+}
+
+
+/*
+** Serialize and store the "structure" record.
+**
+** If an error occurs, leave an error code in the Fts5Index object. If an
+** error has already occurred, this function is a no-op.
+*/
+static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
+ if( p->rc==SQLITE_OK ){
+ Fts5Buffer buf; /* Buffer to serialize record into */
+ int iLvl; /* Used to iterate through levels */
+ int iCookie; /* Cookie value to store */
+
+ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
+ memset(&buf, 0, sizeof(Fts5Buffer));
+
+ /* Append the current configuration cookie */
+ iCookie = p->pConfig->iCookie;
+ if( iCookie<0 ) iCookie = 0;
+
+ if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){
+ sqlite3Fts5Put32(buf.p, iCookie);
+ buf.n = 4;
+ fts5BufferSafeAppendVarint(&buf, pStruct->nLevel);
+ fts5BufferSafeAppendVarint(&buf, pStruct->nSegment);
+ fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter);
+ }
+
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ int iSeg; /* Used to iterate through segments */
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
+ fts5BufferAppendVarint(&p->rc, &buf, pLvl->nMerge);
+ fts5BufferAppendVarint(&p->rc, &buf, pLvl->nSeg);
+ assert( pLvl->nMerge<=pLvl->nSeg );
+
+ for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
+ fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid);
+ fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst);
+ fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast);
+ }
+ }
+
+ fts5DataWrite(p, FTS5_STRUCTURE_ROWID, buf.p, buf.n);
+ fts5BufferFree(&buf);
+ }
+}
+
+#if 0
+static void fts5DebugStructure(int*,Fts5Buffer*,Fts5Structure*);
+static void fts5PrintStructure(const char *zCaption, Fts5Structure *pStruct){
+ int rc = SQLITE_OK;
+ Fts5Buffer buf;
+ memset(&buf, 0, sizeof(buf));
+ fts5DebugStructure(&rc, &buf, pStruct);
+ fprintf(stdout, "%s: %s\n", zCaption, buf.p);
+ fflush(stdout);
+ fts5BufferFree(&buf);
+}
+#else
+# define fts5PrintStructure(x,y)
+#endif
+
+static int fts5SegmentSize(Fts5StructureSegment *pSeg){
+ return 1 + pSeg->pgnoLast - pSeg->pgnoFirst;
+}
+
+/*
+** Return a copy of index structure pStruct. Except, promote as many
+** segments as possible to level iPromote. If an OOM occurs, NULL is
+** returned.
+*/
+static void fts5StructurePromoteTo(
+ Fts5Index *p,
+ int iPromote,
+ int szPromote,
+ Fts5Structure *pStruct
+){
+ int il, is;
+ Fts5StructureLevel *pOut = &pStruct->aLevel[iPromote];
+
+ if( pOut->nMerge==0 ){
+ for(il=iPromote+1; il<pStruct->nLevel; il++){
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[il];
+ if( pLvl->nMerge ) return;
+ for(is=pLvl->nSeg-1; is>=0; is--){
+ int sz = fts5SegmentSize(&pLvl->aSeg[is]);
+ if( sz>szPromote ) return;
+ fts5StructureExtendLevel(&p->rc, pStruct, iPromote, 1, 1);
+ if( p->rc ) return;
+ memcpy(pOut->aSeg, &pLvl->aSeg[is], sizeof(Fts5StructureSegment));
+ pOut->nSeg++;
+ pLvl->nSeg--;
+ }
+ }
+ }
+}
+
+/*
+** A new segment has just been written to level iLvl of index structure
+** pStruct. This function determines if any segments should be promoted
+** as a result. Segments are promoted in two scenarios:
+**
+** a) If the segment just written is smaller than one or more segments
+** within the previous populated level, it is promoted to the previous
+** populated level.
+**
+** b) If the segment just written is larger than the newest segment on
+** the next populated level, then that segment, and any other adjacent
+** segments that are also smaller than the one just written, are
+** promoted.
+**
+** If one or more segments are promoted, the structure object is updated
+** to reflect this.
+*/
+static void fts5StructurePromote(
+ Fts5Index *p, /* FTS5 backend object */
+ int iLvl, /* Index level just updated */
+ Fts5Structure *pStruct /* Index structure */
+){
+ if( p->rc==SQLITE_OK ){
+ int iTst;
+ int iPromote = -1;
+ int szPromote = 0; /* Promote anything this size or smaller */
+ Fts5StructureSegment *pSeg; /* Segment just written */
+ int szSeg; /* Size of segment just written */
+ int nSeg = pStruct->aLevel[iLvl].nSeg;
+
+ if( nSeg==0 ) return;
+ pSeg = &pStruct->aLevel[iLvl].aSeg[pStruct->aLevel[iLvl].nSeg-1];
+ szSeg = (1 + pSeg->pgnoLast - pSeg->pgnoFirst);
+
+ /* Check for condition (a) */
+ for(iTst=iLvl-1; iTst>=0 && pStruct->aLevel[iTst].nSeg==0; iTst--);
+ if( iTst>=0 ){
+ int i;
+ int szMax = 0;
+ Fts5StructureLevel *pTst = &pStruct->aLevel[iTst];
+ assert( pTst->nMerge==0 );
+ for(i=0; i<pTst->nSeg; i++){
+ int sz = pTst->aSeg[i].pgnoLast - pTst->aSeg[i].pgnoFirst + 1;
+ if( sz>szMax ) szMax = sz;
+ }
+ if( szMax>=szSeg ){
+ /* Condition (a) is true. Promote the newest segment on level
+ ** iLvl to level iTst. */
+ iPromote = iTst;
+ szPromote = szMax;
+ }
+ }
+
+ /* If condition (a) is not met, assume (b) is true. StructurePromoteTo()
+ ** is a no-op if it is not. */
+ if( iPromote<0 ){
+ iPromote = iLvl;
+ szPromote = szSeg;
+ }
+ fts5StructurePromoteTo(p, iPromote, szPromote, pStruct);
+ }
+}
+
+
+/*
+** Advance the iterator passed as the only argument. If the end of the
+** doclist-index page is reached, return non-zero.
+*/
+static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){
+ Fts5Data *pData = pLvl->pData;
+
+ if( pLvl->iOff==0 ){
+ assert( pLvl->bEof==0 );
+ pLvl->iOff = 1;
+ pLvl->iOff += fts5GetVarint32(&pData->p[1], pLvl->iLeafPgno);
+ pLvl->iOff += fts5GetVarint(&pData->p[pLvl->iOff], (u64*)&pLvl->iRowid);
+ pLvl->iFirstOff = pLvl->iOff;
+ }else{
+ int iOff;
+ for(iOff=pLvl->iOff; iOff<pData->nn; iOff++){
+ if( pData->p[iOff] ) break;
+ }
+
+ if( iOff<pData->nn ){
+ i64 iVal;
+ pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1;
+ iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal);
+ pLvl->iRowid += iVal;
+ pLvl->iOff = iOff;
+ }else{
+ pLvl->bEof = 1;
+ }
+ }
+
+ return pLvl->bEof;
+}
+
+/*
+** Advance the iterator passed as the only argument.
+*/
+static int fts5DlidxIterNextR(Fts5Index *p, Fts5DlidxIter *pIter, int iLvl){
+ Fts5DlidxLvl *pLvl = &pIter->aLvl[iLvl];
+
+ assert( iLvl<pIter->nLvl );
+ if( fts5DlidxLvlNext(pLvl) ){
+ if( (iLvl+1) < pIter->nLvl ){
+ fts5DlidxIterNextR(p, pIter, iLvl+1);
+ if( pLvl[1].bEof==0 ){
+ fts5DataRelease(pLvl->pData);
+ memset(pLvl, 0, sizeof(Fts5DlidxLvl));
+ pLvl->pData = fts5DataRead(p,
+ FTS5_DLIDX_ROWID(pIter->iSegid, iLvl, pLvl[1].iLeafPgno)
+ );
+ if( pLvl->pData ) fts5DlidxLvlNext(pLvl);
+ }
+ }
+ }
+
+ return pIter->aLvl[0].bEof;
+}
+static int fts5DlidxIterNext(Fts5Index *p, Fts5DlidxIter *pIter){
+ return fts5DlidxIterNextR(p, pIter, 0);
+}
+
+/*
+** The iterator passed as the first argument has the following fields set
+** as follows. This function sets up the rest of the iterator so that it
+** points to the first rowid in the doclist-index.
+**
+** pData:
+** pointer to doclist-index record,
+**
+** When this function is called pIter->iLeafPgno is the page number the
+** doclist is associated with (the one featuring the term).
+*/
+static int fts5DlidxIterFirst(Fts5DlidxIter *pIter){
+ int i;
+ for(i=0; i<pIter->nLvl; i++){
+ fts5DlidxLvlNext(&pIter->aLvl[i]);
+ }
+ return pIter->aLvl[0].bEof;
+}
+
+
+static int fts5DlidxIterEof(Fts5Index *p, Fts5DlidxIter *pIter){
+ return p->rc!=SQLITE_OK || pIter->aLvl[0].bEof;
+}
+
+static void fts5DlidxIterLast(Fts5Index *p, Fts5DlidxIter *pIter){
+ int i;
+
+ /* Advance each level to the last entry on the last page */
+ for(i=pIter->nLvl-1; p->rc==SQLITE_OK && i>=0; i--){
+ Fts5DlidxLvl *pLvl = &pIter->aLvl[i];
+ while( fts5DlidxLvlNext(pLvl)==0 );
+ pLvl->bEof = 0;
+
+ if( i>0 ){
+ Fts5DlidxLvl *pChild = &pLvl[-1];
+ fts5DataRelease(pChild->pData);
+ memset(pChild, 0, sizeof(Fts5DlidxLvl));
+ pChild->pData = fts5DataRead(p,
+ FTS5_DLIDX_ROWID(pIter->iSegid, i-1, pLvl->iLeafPgno)
+ );
+ }
+ }
+}
+
+/*
+** Move the iterator passed as the only argument to the previous entry.
+*/
+static int fts5DlidxLvlPrev(Fts5DlidxLvl *pLvl){
+ int iOff = pLvl->iOff;
+
+ assert( pLvl->bEof==0 );
+ if( iOff<=pLvl->iFirstOff ){
+ pLvl->bEof = 1;
+ }else{
+ u8 *a = pLvl->pData->p;
+ i64 iVal;
+ int iLimit;
+ int ii;
+ int nZero = 0;
+
+ /* Currently iOff points to the first byte of a varint. This block
+ ** decrements iOff until it points to the first byte of the previous
+ ** varint. Taking care not to read any memory locations that occur
+ ** before the buffer in memory. */
+ iLimit = (iOff>9 ? iOff-9 : 0);
+ for(iOff--; iOff>iLimit; iOff--){
+ if( (a[iOff-1] & 0x80)==0 ) break;
+ }
+
+ fts5GetVarint(&a[iOff], (u64*)&iVal);
+ pLvl->iRowid -= iVal;
+ pLvl->iLeafPgno--;
+
+ /* Skip backwards past any 0x00 varints. */
+ for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){
+ nZero++;
+ }
+ if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){
+ /* The byte immediately before the last 0x00 byte has the 0x80 bit
+ ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80
+ ** bytes before a[ii]. */
+ int bZero = 0; /* True if last 0x00 counts */
+ if( (ii-8)>=pLvl->iFirstOff ){
+ int j;
+ for(j=1; j<=8 && (a[ii-j] & 0x80); j++);
+ bZero = (j>8);
+ }
+ if( bZero==0 ) nZero--;
+ }
+ pLvl->iLeafPgno -= nZero;
+ pLvl->iOff = iOff - nZero;
+ }
+
+ return pLvl->bEof;
+}
+
+static int fts5DlidxIterPrevR(Fts5Index *p, Fts5DlidxIter *pIter, int iLvl){
+ Fts5DlidxLvl *pLvl = &pIter->aLvl[iLvl];
+
+ assert( iLvl<pIter->nLvl );
+ if( fts5DlidxLvlPrev(pLvl) ){
+ if( (iLvl+1) < pIter->nLvl ){
+ fts5DlidxIterPrevR(p, pIter, iLvl+1);
+ if( pLvl[1].bEof==0 ){
+ fts5DataRelease(pLvl->pData);
+ memset(pLvl, 0, sizeof(Fts5DlidxLvl));
+ pLvl->pData = fts5DataRead(p,
+ FTS5_DLIDX_ROWID(pIter->iSegid, iLvl, pLvl[1].iLeafPgno)
+ );
+ if( pLvl->pData ){
+ while( fts5DlidxLvlNext(pLvl)==0 );
+ pLvl->bEof = 0;
+ }
+ }
+ }
+ }
+
+ return pIter->aLvl[0].bEof;
+}
+static int fts5DlidxIterPrev(Fts5Index *p, Fts5DlidxIter *pIter){
+ return fts5DlidxIterPrevR(p, pIter, 0);
+}
+
+/*
+** Free a doclist-index iterator object allocated by fts5DlidxIterInit().
+*/
+static void fts5DlidxIterFree(Fts5DlidxIter *pIter){
+ if( pIter ){
+ int i;
+ for(i=0; i<pIter->nLvl; i++){
+ fts5DataRelease(pIter->aLvl[i].pData);
+ }
+ sqlite3_free(pIter);
+ }
+}
+
+static Fts5DlidxIter *fts5DlidxIterInit(
+ Fts5Index *p, /* Fts5 Backend to iterate within */
+ int bRev, /* True for ORDER BY ASC */
+ int iSegid, /* Segment id */
+ int iLeafPg /* Leaf page number to load dlidx for */
+){
+ Fts5DlidxIter *pIter = 0;
+ int i;
+ int bDone = 0;
+
+ for(i=0; p->rc==SQLITE_OK && bDone==0; i++){
+ sqlite3_int64 nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl);
+ Fts5DlidxIter *pNew;
+
+ pNew = (Fts5DlidxIter*)sqlite3_realloc64(pIter, nByte);
+ if( pNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ i64 iRowid = FTS5_DLIDX_ROWID(iSegid, i, iLeafPg);
+ Fts5DlidxLvl *pLvl = &pNew->aLvl[i];
+ pIter = pNew;
+ memset(pLvl, 0, sizeof(Fts5DlidxLvl));
+ pLvl->pData = fts5DataRead(p, iRowid);
+ if( pLvl->pData && (pLvl->pData->p[0] & 0x0001)==0 ){
+ bDone = 1;
+ }
+ pIter->nLvl = i+1;
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pIter->iSegid = iSegid;
+ if( bRev==0 ){
+ fts5DlidxIterFirst(pIter);
+ }else{
+ fts5DlidxIterLast(p, pIter);
+ }
+ }
+
+ if( p->rc!=SQLITE_OK ){
+ fts5DlidxIterFree(pIter);
+ pIter = 0;
+ }
+
+ return pIter;
+}
+
+static i64 fts5DlidxIterRowid(Fts5DlidxIter *pIter){
+ return pIter->aLvl[0].iRowid;
+}
+static int fts5DlidxIterPgno(Fts5DlidxIter *pIter){
+ return pIter->aLvl[0].iLeafPgno;
+}
+
+/*
+** Load the next leaf page into the segment iterator.
+*/
+static void fts5SegIterNextPage(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter /* Iterator to advance to next page */
+){
+ Fts5Data *pLeaf;
+ Fts5StructureSegment *pSeg = pIter->pSeg;
+ fts5DataRelease(pIter->pLeaf);
+ pIter->iLeafPgno++;
+ if( pIter->pNextLeaf ){
+ pIter->pLeaf = pIter->pNextLeaf;
+ pIter->pNextLeaf = 0;
+ }else if( pIter->iLeafPgno<=pSeg->pgnoLast ){
+ pIter->pLeaf = fts5LeafRead(p,
+ FTS5_SEGMENT_ROWID(pSeg->iSegid, pIter->iLeafPgno)
+ );
+ }else{
+ pIter->pLeaf = 0;
+ }
+ pLeaf = pIter->pLeaf;
+
+ if( pLeaf ){
+ pIter->iPgidxOff = pLeaf->szLeaf;
+ if( fts5LeafIsTermless(pLeaf) ){
+ pIter->iEndofDoclist = pLeaf->nn+1;
+ }else{
+ pIter->iPgidxOff += fts5GetVarint32(&pLeaf->p[pIter->iPgidxOff],
+ pIter->iEndofDoclist
+ );
+ }
+ }
+}
+
+/*
+** Argument p points to a buffer containing a varint to be interpreted as a
+** position list size field. Read the varint and return the number of bytes
+** read. Before returning, set *pnSz to the number of bytes in the position
+** list, and *pbDel to true if the delete flag is set, or false otherwise.
+*/
+static int fts5GetPoslistSize(const u8 *p, int *pnSz, int *pbDel){
+ int nSz;
+ int n = 0;
+ fts5FastGetVarint32(p, n, nSz);
+ assert_nc( nSz>=0 );
+ *pnSz = nSz/2;
+ *pbDel = nSz & 0x0001;
+ return n;
+}
+
+/*
+** Fts5SegIter.iLeafOffset currently points to the first byte of a
+** position-list size field. Read the value of the field and store it
+** in the following variables:
+**
+** Fts5SegIter.nPos
+** Fts5SegIter.bDel
+**
+** Leave Fts5SegIter.iLeafOffset pointing to the first byte of the
+** position list content (if any).
+*/
+static void fts5SegIterLoadNPos(Fts5Index *p, Fts5SegIter *pIter){
+ if( p->rc==SQLITE_OK ){
+ int iOff = pIter->iLeafOffset; /* Offset to read at */
+ ASSERT_SZLEAF_OK(pIter->pLeaf);
+ if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){
+ int iEod = MIN(pIter->iEndofDoclist, pIter->pLeaf->szLeaf);
+ pIter->bDel = 0;
+ pIter->nPos = 1;
+ if( iOff<iEod && pIter->pLeaf->p[iOff]==0 ){
+ pIter->bDel = 1;
+ iOff++;
+ if( iOff<iEod && pIter->pLeaf->p[iOff]==0 ){
+ pIter->nPos = 1;
+ iOff++;
+ }else{
+ pIter->nPos = 0;
+ }
+ }
+ }else{
+ int nSz;
+ fts5FastGetVarint32(pIter->pLeaf->p, iOff, nSz);
+ pIter->bDel = (nSz & 0x0001);
+ pIter->nPos = nSz>>1;
+ assert_nc( pIter->nPos>=0 );
+ }
+ pIter->iLeafOffset = iOff;
+ }
+}
+
+static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){
+ u8 *a = pIter->pLeaf->p; /* Buffer to read data from */
+ int iOff = pIter->iLeafOffset;
+
+ ASSERT_SZLEAF_OK(pIter->pLeaf);
+ if( iOff>=pIter->pLeaf->szLeaf ){
+ fts5SegIterNextPage(p, pIter);
+ if( pIter->pLeaf==0 ){
+ if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT;
+ return;
+ }
+ iOff = 4;
+ a = pIter->pLeaf->p;
+ }
+ iOff += sqlite3Fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid);
+ pIter->iLeafOffset = iOff;
+}
+
+/*
+** Fts5SegIter.iLeafOffset currently points to the first byte of the
+** "nSuffix" field of a term. Function parameter nKeep contains the value
+** of the "nPrefix" field (if there was one - it is passed 0 if this is
+** the first term in the segment).
+**
+** This function populates:
+**
+** Fts5SegIter.term
+** Fts5SegIter.rowid
+**
+** accordingly and leaves (Fts5SegIter.iLeafOffset) set to the content of
+** the first position list. The position list belonging to document
+** (Fts5SegIter.iRowid).
+*/
+static void fts5SegIterLoadTerm(Fts5Index *p, Fts5SegIter *pIter, int nKeep){
+ u8 *a = pIter->pLeaf->p; /* Buffer to read data from */
+ int iOff = pIter->iLeafOffset; /* Offset to read at */
+ int nNew; /* Bytes of new data */
+
+ iOff += fts5GetVarint32(&a[iOff], nNew);
+ if( iOff+nNew>pIter->pLeaf->szLeaf || nKeep>pIter->term.n || nNew==0 ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+ pIter->term.n = nKeep;
+ fts5BufferAppendBlob(&p->rc, &pIter->term, nNew, &a[iOff]);
+ assert( pIter->term.n<=pIter->term.nSpace );
+ iOff += nNew;
+ pIter->iTermLeafOffset = iOff;
+ pIter->iTermLeafPgno = pIter->iLeafPgno;
+ pIter->iLeafOffset = iOff;
+
+ if( pIter->iPgidxOff>=pIter->pLeaf->nn ){
+ pIter->iEndofDoclist = pIter->pLeaf->nn+1;
+ }else{
+ int nExtra;
+ pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], nExtra);
+ pIter->iEndofDoclist += nExtra;
+ }
+
+ fts5SegIterLoadRowid(p, pIter);
+}
+
+static void fts5SegIterNext(Fts5Index*, Fts5SegIter*, int*);
+static void fts5SegIterNext_Reverse(Fts5Index*, Fts5SegIter*, int*);
+static void fts5SegIterNext_None(Fts5Index*, Fts5SegIter*, int*);
+
+static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){
+ if( pIter->flags & FTS5_SEGITER_REVERSE ){
+ pIter->xNext = fts5SegIterNext_Reverse;
+ }else if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){
+ pIter->xNext = fts5SegIterNext_None;
+ }else{
+ pIter->xNext = fts5SegIterNext;
+ }
+}
+
+/*
+** Initialize the iterator object pIter to iterate through the entries in
+** segment pSeg. The iterator is left pointing to the first entry when
+** this function returns.
+**
+** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
+** an error has already occurred when this function is called, it is a no-op.
+*/
+static void fts5SegIterInit(
+ Fts5Index *p, /* FTS index object */
+ Fts5StructureSegment *pSeg, /* Description of segment */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ if( pSeg->pgnoFirst==0 ){
+ /* This happens if the segment is being used as an input to an incremental
+ ** merge and all data has already been "trimmed". See function
+ ** fts5TrimSegments() for details. In this case leave the iterator empty.
+ ** The caller will see the (pIter->pLeaf==0) and assume the iterator is
+ ** at EOF already. */
+ assert( pIter->pLeaf==0 );
+ return;
+ }
+
+ if( p->rc==SQLITE_OK ){
+ memset(pIter, 0, sizeof(*pIter));
+ fts5SegIterSetNext(p, pIter);
+ pIter->pSeg = pSeg;
+ pIter->iLeafPgno = pSeg->pgnoFirst-1;
+ fts5SegIterNextPage(p, pIter);
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pIter->iLeafOffset = 4;
+ assert_nc( pIter->pLeaf->nn>4 );
+ assert_nc( fts5LeafFirstTermOff(pIter->pLeaf)==4 );
+ pIter->iPgidxOff = pIter->pLeaf->szLeaf+1;
+ fts5SegIterLoadTerm(p, pIter, 0);
+ fts5SegIterLoadNPos(p, pIter);
+ }
+}
+
+/*
+** This function is only ever called on iterators created by calls to
+** Fts5IndexQuery() with the FTS5INDEX_QUERY_DESC flag set.
+**
+** The iterator is in an unusual state when this function is called: the
+** Fts5SegIter.iLeafOffset variable is set to the offset of the start of
+** the position-list size field for the first relevant rowid on the page.
+** Fts5SegIter.rowid is set, but nPos and bDel are not.
+**
+** This function advances the iterator so that it points to the last
+** relevant rowid on the page and, if necessary, initializes the
+** aRowidOffset[] and iRowidOffset variables. At this point the iterator
+** is in its regular state - Fts5SegIter.iLeafOffset points to the first
+** byte of the position list content associated with said rowid.
+*/
+static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){
+ int eDetail = p->pConfig->eDetail;
+ int n = pIter->pLeaf->szLeaf;
+ int i = pIter->iLeafOffset;
+ u8 *a = pIter->pLeaf->p;
+ int iRowidOffset = 0;
+
+ if( n>pIter->iEndofDoclist ){
+ n = pIter->iEndofDoclist;
+ }
+
+ ASSERT_SZLEAF_OK(pIter->pLeaf);
+ while( 1 ){
+ i64 iDelta = 0;
+
+ if( eDetail==FTS5_DETAIL_NONE ){
+ /* todo */
+ if( i<n && a[i]==0 ){
+ i++;
+ if( i<n && a[i]==0 ) i++;
+ }
+ }else{
+ int nPos;
+ int bDummy;
+ i += fts5GetPoslistSize(&a[i], &nPos, &bDummy);
+ i += nPos;
+ }
+ if( i>=n ) break;
+ i += fts5GetVarint(&a[i], (u64*)&iDelta);
+ pIter->iRowid += iDelta;
+
+ /* If necessary, grow the pIter->aRowidOffset[] array. */
+ if( iRowidOffset>=pIter->nRowidOffset ){
+ int nNew = pIter->nRowidOffset + 8;
+ int *aNew = (int*)sqlite3_realloc64(pIter->aRowidOffset,nNew*sizeof(int));
+ if( aNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ break;
+ }
+ pIter->aRowidOffset = aNew;
+ pIter->nRowidOffset = nNew;
+ }
+
+ pIter->aRowidOffset[iRowidOffset++] = pIter->iLeafOffset;
+ pIter->iLeafOffset = i;
+ }
+ pIter->iRowidOffset = iRowidOffset;
+ fts5SegIterLoadNPos(p, pIter);
+}
+
+/*
+**
+*/
+static void fts5SegIterReverseNewPage(Fts5Index *p, Fts5SegIter *pIter){
+ assert( pIter->flags & FTS5_SEGITER_REVERSE );
+ assert( pIter->flags & FTS5_SEGITER_ONETERM );
+
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = 0;
+ while( p->rc==SQLITE_OK && pIter->iLeafPgno>pIter->iTermLeafPgno ){
+ Fts5Data *pNew;
+ pIter->iLeafPgno--;
+ pNew = fts5DataRead(p, FTS5_SEGMENT_ROWID(
+ pIter->pSeg->iSegid, pIter->iLeafPgno
+ ));
+ if( pNew ){
+ /* iTermLeafOffset may be equal to szLeaf if the term is the last
+ ** thing on the page - i.e. the first rowid is on the following page.
+ ** In this case leave pIter->pLeaf==0, this iterator is at EOF. */
+ if( pIter->iLeafPgno==pIter->iTermLeafPgno ){
+ assert( pIter->pLeaf==0 );
+ if( pIter->iTermLeafOffset<pNew->szLeaf ){
+ pIter->pLeaf = pNew;
+ pIter->iLeafOffset = pIter->iTermLeafOffset;
+ }
+ }else{
+ int iRowidOff;
+ iRowidOff = fts5LeafFirstRowidOff(pNew);
+ if( iRowidOff ){
+ pIter->pLeaf = pNew;
+ pIter->iLeafOffset = iRowidOff;
+ }
+ }
+
+ if( pIter->pLeaf ){
+ u8 *a = &pIter->pLeaf->p[pIter->iLeafOffset];
+ pIter->iLeafOffset += fts5GetVarint(a, (u64*)&pIter->iRowid);
+ break;
+ }else{
+ fts5DataRelease(pNew);
+ }
+ }
+ }
+
+ if( pIter->pLeaf ){
+ pIter->iEndofDoclist = pIter->pLeaf->nn+1;
+ fts5SegIterReverseInitPage(p, pIter);
+ }
+}
+
+/*
+** Return true if the iterator passed as the second argument currently
+** points to a delete marker. A delete marker is an entry with a 0 byte
+** position-list.
+*/
+static int fts5MultiIterIsEmpty(Fts5Index *p, Fts5Iter *pIter){
+ Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
+ return (p->rc==SQLITE_OK && pSeg->pLeaf && pSeg->nPos==0);
+}
+
+/*
+** Advance iterator pIter to the next entry.
+**
+** This version of fts5SegIterNext() is only used by reverse iterators.
+*/
+static void fts5SegIterNext_Reverse(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter, /* Iterator to advance */
+ int *pbUnused /* Unused */
+){
+ assert( pIter->flags & FTS5_SEGITER_REVERSE );
+ assert( pIter->pNextLeaf==0 );
+ UNUSED_PARAM(pbUnused);
+
+ if( pIter->iRowidOffset>0 ){
+ u8 *a = pIter->pLeaf->p;
+ int iOff;
+ i64 iDelta;
+
+ pIter->iRowidOffset--;
+ pIter->iLeafOffset = pIter->aRowidOffset[pIter->iRowidOffset];
+ fts5SegIterLoadNPos(p, pIter);
+ iOff = pIter->iLeafOffset;
+ if( p->pConfig->eDetail!=FTS5_DETAIL_NONE ){
+ iOff += pIter->nPos;
+ }
+ fts5GetVarint(&a[iOff], (u64*)&iDelta);
+ pIter->iRowid -= iDelta;
+ }else{
+ fts5SegIterReverseNewPage(p, pIter);
+ }
+}
+
+/*
+** Advance iterator pIter to the next entry.
+**
+** This version of fts5SegIterNext() is only used if detail=none and the
+** iterator is not a reverse direction iterator.
+*/
+static void fts5SegIterNext_None(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter, /* Iterator to advance */
+ int *pbNewTerm /* OUT: Set for new term */
+){
+ int iOff;
+
+ assert( p->rc==SQLITE_OK );
+ assert( (pIter->flags & FTS5_SEGITER_REVERSE)==0 );
+ assert( p->pConfig->eDetail==FTS5_DETAIL_NONE );
+
+ ASSERT_SZLEAF_OK(pIter->pLeaf);
+ iOff = pIter->iLeafOffset;
+
+ /* Next entry is on the next page */
+ if( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){
+ fts5SegIterNextPage(p, pIter);
+ if( p->rc || pIter->pLeaf==0 ) return;
+ pIter->iRowid = 0;
+ iOff = 4;
+ }
+
+ if( iOff<pIter->iEndofDoclist ){
+ /* Next entry is on the current page */
+ i64 iDelta;
+ iOff += sqlite3Fts5GetVarint(&pIter->pLeaf->p[iOff], (u64*)&iDelta);
+ pIter->iLeafOffset = iOff;
+ pIter->iRowid += iDelta;
+ }else if( (pIter->flags & FTS5_SEGITER_ONETERM)==0 ){
+ if( pIter->pSeg ){
+ int nKeep = 0;
+ if( iOff!=fts5LeafFirstTermOff(pIter->pLeaf) ){
+ iOff += fts5GetVarint32(&pIter->pLeaf->p[iOff], nKeep);
+ }
+ pIter->iLeafOffset = iOff;
+ fts5SegIterLoadTerm(p, pIter, nKeep);
+ }else{
+ const u8 *pList = 0;
+ const char *zTerm = 0;
+ int nList;
+ sqlite3Fts5HashScanNext(p->pHash);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ if( pList==0 ) goto next_none_eof;
+ pIter->pLeaf->p = (u8*)pList;
+ pIter->pLeaf->nn = nList;
+ pIter->pLeaf->szLeaf = nList;
+ pIter->iEndofDoclist = nList;
+ sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm);
+ pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
+ }
+
+ if( pbNewTerm ) *pbNewTerm = 1;
+ }else{
+ goto next_none_eof;
+ }
+
+ fts5SegIterLoadNPos(p, pIter);
+
+ return;
+ next_none_eof:
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = 0;
+}
+
+
+/*
+** Advance iterator pIter to the next entry.
+**
+** If an error occurs, Fts5Index.rc is set to an appropriate error code. It
+** is not considered an error if the iterator reaches EOF. If an error has
+** already occurred when this function is called, it is a no-op.
+*/
+static void fts5SegIterNext(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter, /* Iterator to advance */
+ int *pbNewTerm /* OUT: Set for new term */
+){
+ Fts5Data *pLeaf = pIter->pLeaf;
+ int iOff;
+ int bNewTerm = 0;
+ int nKeep = 0;
+ u8 *a;
+ int n;
+
+ assert( pbNewTerm==0 || *pbNewTerm==0 );
+ assert( p->pConfig->eDetail!=FTS5_DETAIL_NONE );
+
+ /* Search for the end of the position list within the current page. */
+ a = pLeaf->p;
+ n = pLeaf->szLeaf;
+
+ ASSERT_SZLEAF_OK(pLeaf);
+ iOff = pIter->iLeafOffset + pIter->nPos;
+
+ if( iOff<n ){
+ /* The next entry is on the current page. */
+ assert_nc( iOff<=pIter->iEndofDoclist );
+ if( iOff>=pIter->iEndofDoclist ){
+ bNewTerm = 1;
+ if( iOff!=fts5LeafFirstTermOff(pLeaf) ){
+ iOff += fts5GetVarint32(&a[iOff], nKeep);
+ }
+ }else{
+ u64 iDelta;
+ iOff += sqlite3Fts5GetVarint(&a[iOff], &iDelta);
+ pIter->iRowid += iDelta;
+ assert_nc( iDelta>0 );
+ }
+ pIter->iLeafOffset = iOff;
+
+ }else if( pIter->pSeg==0 ){
+ const u8 *pList = 0;
+ const char *zTerm = 0;
+ int nList = 0;
+ assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm );
+ if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){
+ sqlite3Fts5HashScanNext(p->pHash);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ }
+ if( pList==0 ){
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = 0;
+ }else{
+ pIter->pLeaf->p = (u8*)pList;
+ pIter->pLeaf->nn = nList;
+ pIter->pLeaf->szLeaf = nList;
+ pIter->iEndofDoclist = nList+1;
+ sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm),
+ (u8*)zTerm);
+ pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
+ *pbNewTerm = 1;
+ }
+ }else{
+ iOff = 0;
+ /* Next entry is not on the current page */
+ while( iOff==0 ){
+ fts5SegIterNextPage(p, pIter);
+ pLeaf = pIter->pLeaf;
+ if( pLeaf==0 ) break;
+ ASSERT_SZLEAF_OK(pLeaf);
+ if( (iOff = fts5LeafFirstRowidOff(pLeaf)) && iOff<pLeaf->szLeaf ){
+ iOff += sqlite3Fts5GetVarint(&pLeaf->p[iOff], (u64*)&pIter->iRowid);
+ pIter->iLeafOffset = iOff;
+
+ if( pLeaf->nn>pLeaf->szLeaf ){
+ pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
+ &pLeaf->p[pLeaf->szLeaf], pIter->iEndofDoclist
+ );
+ }
+ }
+ else if( pLeaf->nn>pLeaf->szLeaf ){
+ pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
+ &pLeaf->p[pLeaf->szLeaf], iOff
+ );
+ pIter->iLeafOffset = iOff;
+ pIter->iEndofDoclist = iOff;
+ bNewTerm = 1;
+ }
+ assert_nc( iOff<pLeaf->szLeaf );
+ if( iOff>pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+ }
+ }
+
+ /* Check if the iterator is now at EOF. If so, return early. */
+ if( pIter->pLeaf ){
+ if( bNewTerm ){
+ if( pIter->flags & FTS5_SEGITER_ONETERM ){
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = 0;
+ }else{
+ fts5SegIterLoadTerm(p, pIter, nKeep);
+ fts5SegIterLoadNPos(p, pIter);
+ if( pbNewTerm ) *pbNewTerm = 1;
+ }
+ }else{
+ /* The following could be done by calling fts5SegIterLoadNPos(). But
+ ** this block is particularly performance critical, so equivalent
+ ** code is inlined.
+ **
+ ** Later: Switched back to fts5SegIterLoadNPos() because it supports
+ ** detail=none mode. Not ideal.
+ */
+ int nSz;
+ assert( p->rc==SQLITE_OK );
+ assert( pIter->iLeafOffset<=pIter->pLeaf->nn );
+ fts5FastGetVarint32(pIter->pLeaf->p, pIter->iLeafOffset, nSz);
+ pIter->bDel = (nSz & 0x0001);
+ pIter->nPos = nSz>>1;
+ assert_nc( pIter->nPos>=0 );
+ }
+ }
+}
+
+#define SWAPVAL(T, a, b) { T tmp; tmp=a; a=b; b=tmp; }
+
+#define fts5IndexSkipVarint(a, iOff) { \
+ int iEnd = iOff+9; \
+ while( (a[iOff++] & 0x80) && iOff<iEnd ); \
+}
+
+/*
+** Iterator pIter currently points to the first rowid in a doclist. This
+** function sets the iterator up so that iterates in reverse order through
+** the doclist.
+*/
+static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){
+ Fts5DlidxIter *pDlidx = pIter->pDlidx;
+ Fts5Data *pLast = 0;
+ int pgnoLast = 0;
+
+ if( pDlidx ){
+ int iSegid = pIter->pSeg->iSegid;
+ pgnoLast = fts5DlidxIterPgno(pDlidx);
+ pLast = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast));
+ }else{
+ Fts5Data *pLeaf = pIter->pLeaf; /* Current leaf data */
+
+ /* Currently, Fts5SegIter.iLeafOffset points to the first byte of
+ ** position-list content for the current rowid. Back it up so that it
+ ** points to the start of the position-list size field. */
+ int iPoslist;
+ if( pIter->iTermLeafPgno==pIter->iLeafPgno ){
+ iPoslist = pIter->iTermLeafOffset;
+ }else{
+ iPoslist = 4;
+ }
+ fts5IndexSkipVarint(pLeaf->p, iPoslist);
+ pIter->iLeafOffset = iPoslist;
+
+ /* If this condition is true then the largest rowid for the current
+ ** term may not be stored on the current page. So search forward to
+ ** see where said rowid really is. */
+ if( pIter->iEndofDoclist>=pLeaf->szLeaf ){
+ int pgno;
+ Fts5StructureSegment *pSeg = pIter->pSeg;
+
+ /* The last rowid in the doclist may not be on the current page. Search
+ ** forward to find the page containing the last rowid. */
+ for(pgno=pIter->iLeafPgno+1; !p->rc && pgno<=pSeg->pgnoLast; pgno++){
+ i64 iAbs = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno);
+ Fts5Data *pNew = fts5DataRead(p, iAbs);
+ if( pNew ){
+ int iRowid, bTermless;
+ iRowid = fts5LeafFirstRowidOff(pNew);
+ bTermless = fts5LeafIsTermless(pNew);
+ if( iRowid ){
+ SWAPVAL(Fts5Data*, pNew, pLast);
+ pgnoLast = pgno;
+ }
+ fts5DataRelease(pNew);
+ if( bTermless==0 ) break;
+ }
+ }
+ }
+ }
+
+ /* If pLast is NULL at this point, then the last rowid for this doclist
+ ** lies on the page currently indicated by the iterator. In this case
+ ** pIter->iLeafOffset is already set to point to the position-list size
+ ** field associated with the first relevant rowid on the page.
+ **
+ ** Or, if pLast is non-NULL, then it is the page that contains the last
+ ** rowid. In this case configure the iterator so that it points to the
+ ** first rowid on this page.
+ */
+ if( pLast ){
+ int iOff;
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = pLast;
+ pIter->iLeafPgno = pgnoLast;
+ iOff = fts5LeafFirstRowidOff(pLast);
+ iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid);
+ pIter->iLeafOffset = iOff;
+
+ if( fts5LeafIsTermless(pLast) ){
+ pIter->iEndofDoclist = pLast->nn+1;
+ }else{
+ pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast);
+ }
+
+ }
+
+ fts5SegIterReverseInitPage(p, pIter);
+}
+
+/*
+** Iterator pIter currently points to the first rowid of a doclist.
+** There is a doclist-index associated with the final term on the current
+** page. If the current term is the last term on the page, load the
+** doclist-index from disk and initialize an iterator at (pIter->pDlidx).
+*/
+static void fts5SegIterLoadDlidx(Fts5Index *p, Fts5SegIter *pIter){
+ int iSeg = pIter->pSeg->iSegid;
+ int bRev = (pIter->flags & FTS5_SEGITER_REVERSE);
+ Fts5Data *pLeaf = pIter->pLeaf; /* Current leaf data */
+
+ assert( pIter->flags & FTS5_SEGITER_ONETERM );
+ assert( pIter->pDlidx==0 );
+
+ /* Check if the current doclist ends on this page. If it does, return
+ ** early without loading the doclist-index (as it belongs to a different
+ ** term. */
+ if( pIter->iTermLeafPgno==pIter->iLeafPgno
+ && pIter->iEndofDoclist<pLeaf->szLeaf
+ ){
+ return;
+ }
+
+ pIter->pDlidx = fts5DlidxIterInit(p, bRev, iSeg, pIter->iTermLeafPgno);
+}
+
+/*
+** The iterator object passed as the second argument currently contains
+** no valid values except for the Fts5SegIter.pLeaf member variable. This
+** function searches the leaf page for a term matching (pTerm/nTerm).
+**
+** If the specified term is found on the page, then the iterator is left
+** pointing to it. If argument bGe is zero and the term is not found,
+** the iterator is left pointing at EOF.
+**
+** If bGe is non-zero and the specified term is not found, then the
+** iterator is left pointing to the smallest term in the segment that
+** is larger than the specified term, even if this term is not on the
+** current page.
+*/
+static void fts5LeafSeek(
+ Fts5Index *p, /* Leave any error code here */
+ int bGe, /* True for a >= search */
+ Fts5SegIter *pIter, /* Iterator to seek */
+ const u8 *pTerm, int nTerm /* Term to search for */
+){
+ int iOff;
+ const u8 *a = pIter->pLeaf->p;
+ int szLeaf = pIter->pLeaf->szLeaf;
+ int n = pIter->pLeaf->nn;
+
+ u32 nMatch = 0;
+ u32 nKeep = 0;
+ u32 nNew = 0;
+ u32 iTermOff;
+ int iPgidx; /* Current offset in pgidx */
+ int bEndOfPage = 0;
+
+ assert( p->rc==SQLITE_OK );
+
+ iPgidx = szLeaf;
+ iPgidx += fts5GetVarint32(&a[iPgidx], iTermOff);
+ iOff = iTermOff;
+ if( iOff>n ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+
+ while( 1 ){
+
+ /* Figure out how many new bytes are in this term */
+ fts5FastGetVarint32(a, iOff, nNew);
+ if( nKeep<nMatch ){
+ goto search_failed;
+ }
+
+ assert( nKeep>=nMatch );
+ if( nKeep==nMatch ){
+ u32 nCmp;
+ u32 i;
+ nCmp = (u32)MIN(nNew, nTerm-nMatch);
+ for(i=0; i<nCmp; i++){
+ if( a[iOff+i]!=pTerm[nMatch+i] ) break;
+ }
+ nMatch += i;
+
+ if( (u32)nTerm==nMatch ){
+ if( i==nNew ){
+ goto search_success;
+ }else{
+ goto search_failed;
+ }
+ }else if( i<nNew && a[iOff+i]>pTerm[nMatch] ){
+ goto search_failed;
+ }
+ }
+
+ if( iPgidx>=n ){
+ bEndOfPage = 1;
+ break;
+ }
+
+ iPgidx += fts5GetVarint32(&a[iPgidx], nKeep);
+ iTermOff += nKeep;
+ iOff = iTermOff;
+
+ if( iOff>=n ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+
+ /* Read the nKeep field of the next term. */
+ fts5FastGetVarint32(a, iOff, nKeep);
+ }
+
+ search_failed:
+ if( bGe==0 ){
+ fts5DataRelease(pIter->pLeaf);
+ pIter->pLeaf = 0;
+ return;
+ }else if( bEndOfPage ){
+ do {
+ fts5SegIterNextPage(p, pIter);
+ if( pIter->pLeaf==0 ) return;
+ a = pIter->pLeaf->p;
+ if( fts5LeafIsTermless(pIter->pLeaf)==0 ){
+ iPgidx = pIter->pLeaf->szLeaf;
+ iPgidx += fts5GetVarint32(&pIter->pLeaf->p[iPgidx], iOff);
+ if( iOff<4 || iOff>=pIter->pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }else{
+ nKeep = 0;
+ iTermOff = iOff;
+ n = pIter->pLeaf->nn;
+ iOff += fts5GetVarint32(&a[iOff], nNew);
+ break;
+ }
+ }
+ }while( 1 );
+ }
+
+ search_success:
+ if( (i64)iOff+nNew>n || nNew<1 ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+ pIter->iLeafOffset = iOff + nNew;
+ pIter->iTermLeafOffset = pIter->iLeafOffset;
+ pIter->iTermLeafPgno = pIter->iLeafPgno;
+
+ fts5BufferSet(&p->rc, &pIter->term, nKeep, pTerm);
+ fts5BufferAppendBlob(&p->rc, &pIter->term, nNew, &a[iOff]);
+
+ if( iPgidx>=n ){
+ pIter->iEndofDoclist = pIter->pLeaf->nn+1;
+ }else{
+ int nExtra;
+ iPgidx += fts5GetVarint32(&a[iPgidx], nExtra);
+ pIter->iEndofDoclist = iTermOff + nExtra;
+ }
+ pIter->iPgidxOff = iPgidx;
+
+ fts5SegIterLoadRowid(p, pIter);
+ fts5SegIterLoadNPos(p, pIter);
+}
+
+static sqlite3_stmt *fts5IdxSelectStmt(Fts5Index *p){
+ if( p->pIdxSelect==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxSelect, sqlite3_mprintf(
+ "SELECT pgno FROM '%q'.'%q_idx' WHERE "
+ "segid=? AND term<=? ORDER BY term DESC LIMIT 1",
+ pConfig->zDb, pConfig->zName
+ ));
+ }
+ return p->pIdxSelect;
+}
+
+/*
+** Initialize the object pIter to point to term pTerm/nTerm within segment
+** pSeg. If there is no such term in the index, the iterator is set to EOF.
+**
+** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
+** an error has already occurred when this function is called, it is a no-op.
+*/
+static void fts5SegIterSeekInit(
+ Fts5Index *p, /* FTS5 backend */
+ const u8 *pTerm, int nTerm, /* Term to seek to */
+ int flags, /* Mask of FTS5INDEX_XXX flags */
+ Fts5StructureSegment *pSeg, /* Description of segment */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ int iPg = 1;
+ int bGe = (flags & FTS5INDEX_QUERY_SCAN);
+ int bDlidx = 0; /* True if there is a doclist-index */
+ sqlite3_stmt *pIdxSelect = 0;
+
+ assert( bGe==0 || (flags & FTS5INDEX_QUERY_DESC)==0 );
+ assert( pTerm && nTerm );
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->pSeg = pSeg;
+
+ /* This block sets stack variable iPg to the leaf page number that may
+ ** contain term (pTerm/nTerm), if it is present in the segment. */
+ pIdxSelect = fts5IdxSelectStmt(p);
+ if( p->rc ) return;
+ sqlite3_bind_int(pIdxSelect, 1, pSeg->iSegid);
+ sqlite3_bind_blob(pIdxSelect, 2, pTerm, nTerm, SQLITE_STATIC);
+ if( SQLITE_ROW==sqlite3_step(pIdxSelect) ){
+ i64 val = sqlite3_column_int(pIdxSelect, 0);
+ iPg = (int)(val>>1);
+ bDlidx = (val & 0x0001);
+ }
+ p->rc = sqlite3_reset(pIdxSelect);
+ sqlite3_bind_null(pIdxSelect, 2);
+
+ if( iPg<pSeg->pgnoFirst ){
+ iPg = pSeg->pgnoFirst;
+ bDlidx = 0;
+ }
+
+ pIter->iLeafPgno = iPg - 1;
+ fts5SegIterNextPage(p, pIter);
+
+ if( pIter->pLeaf ){
+ fts5LeafSeek(p, bGe, pIter, pTerm, nTerm);
+ }
+
+ if( p->rc==SQLITE_OK && bGe==0 ){
+ pIter->flags |= FTS5_SEGITER_ONETERM;
+ if( pIter->pLeaf ){
+ if( flags & FTS5INDEX_QUERY_DESC ){
+ pIter->flags |= FTS5_SEGITER_REVERSE;
+ }
+ if( bDlidx ){
+ fts5SegIterLoadDlidx(p, pIter);
+ }
+ if( flags & FTS5INDEX_QUERY_DESC ){
+ fts5SegIterReverse(p, pIter);
+ }
+ }
+ }
+
+ fts5SegIterSetNext(p, pIter);
+
+ /* Either:
+ **
+ ** 1) an error has occurred, or
+ ** 2) the iterator points to EOF, or
+ ** 3) the iterator points to an entry with term (pTerm/nTerm), or
+ ** 4) the FTS5INDEX_QUERY_SCAN flag was set and the iterator points
+ ** to an entry with a term greater than or equal to (pTerm/nTerm).
+ */
+ assert_nc( p->rc!=SQLITE_OK /* 1 */
+ || pIter->pLeaf==0 /* 2 */
+ || fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)==0 /* 3 */
+ || (bGe && fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)>0) /* 4 */
+ );
+}
+
+/*
+** Initialize the object pIter to point to term pTerm/nTerm within the
+** in-memory hash table. If there is no such term in the hash-table, the
+** iterator is set to EOF.
+**
+** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
+** an error has already occurred when this function is called, it is a no-op.
+*/
+static void fts5SegIterHashInit(
+ Fts5Index *p, /* FTS5 backend */
+ const u8 *pTerm, int nTerm, /* Term to seek to */
+ int flags, /* Mask of FTS5INDEX_XXX flags */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ int nList = 0;
+ const u8 *z = 0;
+ int n = 0;
+ Fts5Data *pLeaf = 0;
+
+ assert( p->pHash );
+ assert( p->rc==SQLITE_OK );
+
+ if( pTerm==0 || (flags & FTS5INDEX_QUERY_SCAN) ){
+ const u8 *pList = 0;
+
+ p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm);
+ sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList);
+ n = (z ? (int)strlen((const char*)z) : 0);
+ if( pList ){
+ pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data));
+ if( pLeaf ){
+ pLeaf->p = (u8*)pList;
+ }
+ }
+ }else{
+ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data),
+ (const char*)pTerm, nTerm, (void**)&pLeaf, &nList
+ );
+ if( pLeaf ){
+ pLeaf->p = (u8*)&pLeaf[1];
+ }
+ z = pTerm;
+ n = nTerm;
+ pIter->flags |= FTS5_SEGITER_ONETERM;
+ }
+
+ if( pLeaf ){
+ sqlite3Fts5BufferSet(&p->rc, &pIter->term, n, z);
+ pLeaf->nn = pLeaf->szLeaf = nList;
+ pIter->pLeaf = pLeaf;
+ pIter->iLeafOffset = fts5GetVarint(pLeaf->p, (u64*)&pIter->iRowid);
+ pIter->iEndofDoclist = pLeaf->nn;
+
+ if( flags & FTS5INDEX_QUERY_DESC ){
+ pIter->flags |= FTS5_SEGITER_REVERSE;
+ fts5SegIterReverseInitPage(p, pIter);
+ }else{
+ fts5SegIterLoadNPos(p, pIter);
+ }
+ }
+
+ fts5SegIterSetNext(p, pIter);
+}
+
+/*
+** Zero the iterator passed as the only argument.
+*/
+static void fts5SegIterClear(Fts5SegIter *pIter){
+ fts5BufferFree(&pIter->term);
+ fts5DataRelease(pIter->pLeaf);
+ fts5DataRelease(pIter->pNextLeaf);
+ fts5DlidxIterFree(pIter->pDlidx);
+ sqlite3_free(pIter->aRowidOffset);
+ memset(pIter, 0, sizeof(Fts5SegIter));
+}
+
+#ifdef SQLITE_DEBUG
+
+/*
+** This function is used as part of the big assert() procedure implemented by
+** fts5AssertMultiIterSetup(). It ensures that the result currently stored
+** in *pRes is the correct result of comparing the current positions of the
+** two iterators.
+*/
+static void fts5AssertComparisonResult(
+ Fts5Iter *pIter,
+ Fts5SegIter *p1,
+ Fts5SegIter *p2,
+ Fts5CResult *pRes
+){
+ int i1 = p1 - pIter->aSeg;
+ int i2 = p2 - pIter->aSeg;
+
+ if( p1->pLeaf || p2->pLeaf ){
+ if( p1->pLeaf==0 ){
+ assert( pRes->iFirst==i2 );
+ }else if( p2->pLeaf==0 ){
+ assert( pRes->iFirst==i1 );
+ }else{
+ int nMin = MIN(p1->term.n, p2->term.n);
+ int res = fts5Memcmp(p1->term.p, p2->term.p, nMin);
+ if( res==0 ) res = p1->term.n - p2->term.n;
+
+ if( res==0 ){
+ assert( pRes->bTermEq==1 );
+ assert( p1->iRowid!=p2->iRowid );
+ res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : 1;
+ }else{
+ assert( pRes->bTermEq==0 );
+ }
+
+ if( res<0 ){
+ assert( pRes->iFirst==i1 );
+ }else{
+ assert( pRes->iFirst==i2 );
+ }
+ }
+ }
+}
+
+/*
+** This function is a no-op unless SQLITE_DEBUG is defined when this module
+** is compiled. In that case, this function is essentially an assert()
+** statement used to verify that the contents of the pIter->aFirst[] array
+** are correct.
+*/
+static void fts5AssertMultiIterSetup(Fts5Index *p, Fts5Iter *pIter){
+ if( p->rc==SQLITE_OK ){
+ Fts5SegIter *pFirst = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
+ int i;
+
+ assert( (pFirst->pLeaf==0)==pIter->base.bEof );
+
+ /* Check that pIter->iSwitchRowid is set correctly. */
+ for(i=0; i<pIter->nSeg; i++){
+ Fts5SegIter *p1 = &pIter->aSeg[i];
+ assert( p1==pFirst
+ || p1->pLeaf==0
+ || fts5BufferCompare(&pFirst->term, &p1->term)
+ || p1->iRowid==pIter->iSwitchRowid
+ || (p1->iRowid<pIter->iSwitchRowid)==pIter->bRev
+ );
+ }
+
+ for(i=0; i<pIter->nSeg; i+=2){
+ Fts5SegIter *p1 = &pIter->aSeg[i];
+ Fts5SegIter *p2 = &pIter->aSeg[i+1];
+ Fts5CResult *pRes = &pIter->aFirst[(pIter->nSeg + i) / 2];
+ fts5AssertComparisonResult(pIter, p1, p2, pRes);
+ }
+
+ for(i=1; i<(pIter->nSeg / 2); i+=2){
+ Fts5SegIter *p1 = &pIter->aSeg[ pIter->aFirst[i*2].iFirst ];
+ Fts5SegIter *p2 = &pIter->aSeg[ pIter->aFirst[i*2+1].iFirst ];
+ Fts5CResult *pRes = &pIter->aFirst[i];
+ fts5AssertComparisonResult(pIter, p1, p2, pRes);
+ }
+ }
+}
+#else
+# define fts5AssertMultiIterSetup(x,y)
+#endif
+
+/*
+** Do the comparison necessary to populate pIter->aFirst[iOut].
+**
+** If the returned value is non-zero, then it is the index of an entry
+** in the pIter->aSeg[] array that is (a) not at EOF, and (b) pointing
+** to a key that is a duplicate of another, higher priority,
+** segment-iterator in the pSeg->aSeg[] array.
+*/
+static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){
+ int i1; /* Index of left-hand Fts5SegIter */
+ int i2; /* Index of right-hand Fts5SegIter */
+ int iRes;
+ Fts5SegIter *p1; /* Left-hand Fts5SegIter */
+ Fts5SegIter *p2; /* Right-hand Fts5SegIter */
+ Fts5CResult *pRes = &pIter->aFirst[iOut];
+
+ assert( iOut<pIter->nSeg && iOut>0 );
+ assert( pIter->bRev==0 || pIter->bRev==1 );
+
+ if( iOut>=(pIter->nSeg/2) ){
+ i1 = (iOut - pIter->nSeg/2) * 2;
+ i2 = i1 + 1;
+ }else{
+ i1 = pIter->aFirst[iOut*2].iFirst;
+ i2 = pIter->aFirst[iOut*2+1].iFirst;
+ }
+ p1 = &pIter->aSeg[i1];
+ p2 = &pIter->aSeg[i2];
+
+ pRes->bTermEq = 0;
+ if( p1->pLeaf==0 ){ /* If p1 is at EOF */
+ iRes = i2;
+ }else if( p2->pLeaf==0 ){ /* If p2 is at EOF */
+ iRes = i1;
+ }else{
+ int res = fts5BufferCompare(&p1->term, &p2->term);
+ if( res==0 ){
+ assert_nc( i2>i1 );
+ assert_nc( i2!=0 );
+ pRes->bTermEq = 1;
+ if( p1->iRowid==p2->iRowid ){
+ p1->bDel = p2->bDel;
+ return i2;
+ }
+ res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1;
+ }
+ assert( res!=0 );
+ if( res<0 ){
+ iRes = i1;
+ }else{
+ iRes = i2;
+ }
+ }
+
+ pRes->iFirst = (u16)iRes;
+ return 0;
+}
+
+/*
+** Move the seg-iter so that it points to the first rowid on page iLeafPgno.
+** It is an error if leaf iLeafPgno does not exist or contains no rowids.
+*/
+static void fts5SegIterGotoPage(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter, /* Iterator to advance */
+ int iLeafPgno
+){
+ assert( iLeafPgno>pIter->iLeafPgno );
+
+ if( iLeafPgno>pIter->pSeg->pgnoLast ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ fts5DataRelease(pIter->pNextLeaf);
+ pIter->pNextLeaf = 0;
+ pIter->iLeafPgno = iLeafPgno-1;
+ fts5SegIterNextPage(p, pIter);
+ assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno );
+
+ if( p->rc==SQLITE_OK ){
+ int iOff;
+ u8 *a = pIter->pLeaf->p;
+ int n = pIter->pLeaf->szLeaf;
+
+ iOff = fts5LeafFirstRowidOff(pIter->pLeaf);
+ if( iOff<4 || iOff>=n ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid);
+ pIter->iLeafOffset = iOff;
+ fts5SegIterLoadNPos(p, pIter);
+ }
+ }
+ }
+}
+
+/*
+** Advance the iterator passed as the second argument until it is at or
+** past rowid iFrom. Regardless of the value of iFrom, the iterator is
+** always advanced at least once.
+*/
+static void fts5SegIterNextFrom(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegIter *pIter, /* Iterator to advance */
+ i64 iMatch /* Advance iterator at least this far */
+){
+ int bRev = (pIter->flags & FTS5_SEGITER_REVERSE);
+ Fts5DlidxIter *pDlidx = pIter->pDlidx;
+ int iLeafPgno = pIter->iLeafPgno;
+ int bMove = 1;
+
+ assert( pIter->flags & FTS5_SEGITER_ONETERM );
+ assert( pIter->pDlidx );
+ assert( pIter->pLeaf );
+
+ if( bRev==0 ){
+ while( !fts5DlidxIterEof(p, pDlidx) && iMatch>fts5DlidxIterRowid(pDlidx) ){
+ iLeafPgno = fts5DlidxIterPgno(pDlidx);
+ fts5DlidxIterNext(p, pDlidx);
+ }
+ assert_nc( iLeafPgno>=pIter->iLeafPgno || p->rc );
+ if( iLeafPgno>pIter->iLeafPgno ){
+ fts5SegIterGotoPage(p, pIter, iLeafPgno);
+ bMove = 0;
+ }
+ }else{
+ assert( pIter->pNextLeaf==0 );
+ assert( iMatch<pIter->iRowid );
+ while( !fts5DlidxIterEof(p, pDlidx) && iMatch<fts5DlidxIterRowid(pDlidx) ){
+ fts5DlidxIterPrev(p, pDlidx);
+ }
+ iLeafPgno = fts5DlidxIterPgno(pDlidx);
+
+ assert( fts5DlidxIterEof(p, pDlidx) || iLeafPgno<=pIter->iLeafPgno );
+
+ if( iLeafPgno<pIter->iLeafPgno ){
+ pIter->iLeafPgno = iLeafPgno+1;
+ fts5SegIterReverseNewPage(p, pIter);
+ bMove = 0;
+ }
+ }
+
+ do{
+ if( bMove && p->rc==SQLITE_OK ) pIter->xNext(p, pIter, 0);
+ if( pIter->pLeaf==0 ) break;
+ if( bRev==0 && pIter->iRowid>=iMatch ) break;
+ if( bRev!=0 && pIter->iRowid<=iMatch ) break;
+ bMove = 1;
+ }while( p->rc==SQLITE_OK );
+}
+
+
+/*
+** Free the iterator object passed as the second argument.
+*/
+static void fts5MultiIterFree(Fts5Iter *pIter){
+ if( pIter ){
+ int i;
+ for(i=0; i<pIter->nSeg; i++){
+ fts5SegIterClear(&pIter->aSeg[i]);
+ }
+ fts5BufferFree(&pIter->poslist);
+ sqlite3_free(pIter);
+ }
+}
+
+static void fts5MultiIterAdvanced(
+ Fts5Index *p, /* FTS5 backend to iterate within */
+ Fts5Iter *pIter, /* Iterator to update aFirst[] array for */
+ int iChanged, /* Index of sub-iterator just advanced */
+ int iMinset /* Minimum entry in aFirst[] to set */
+){
+ int i;
+ for(i=(pIter->nSeg+iChanged)/2; i>=iMinset && p->rc==SQLITE_OK; i=i/2){
+ int iEq;
+ if( (iEq = fts5MultiIterDoCompare(pIter, i)) ){
+ Fts5SegIter *pSeg = &pIter->aSeg[iEq];
+ assert( p->rc==SQLITE_OK );
+ pSeg->xNext(p, pSeg, 0);
+ i = pIter->nSeg + iEq;
+ }
+ }
+}
+
+/*
+** Sub-iterator iChanged of iterator pIter has just been advanced. It still
+** points to the same term though - just a different rowid. This function
+** attempts to update the contents of the pIter->aFirst[] accordingly.
+** If it does so successfully, 0 is returned. Otherwise 1.
+**
+** If non-zero is returned, the caller should call fts5MultiIterAdvanced()
+** on the iterator instead. That function does the same as this one, except
+** that it deals with more complicated cases as well.
+*/
+static int fts5MultiIterAdvanceRowid(
+ Fts5Iter *pIter, /* Iterator to update aFirst[] array for */
+ int iChanged, /* Index of sub-iterator just advanced */
+ Fts5SegIter **ppFirst
+){
+ Fts5SegIter *pNew = &pIter->aSeg[iChanged];
+
+ if( pNew->iRowid==pIter->iSwitchRowid
+ || (pNew->iRowid<pIter->iSwitchRowid)==pIter->bRev
+ ){
+ int i;
+ Fts5SegIter *pOther = &pIter->aSeg[iChanged ^ 0x0001];
+ pIter->iSwitchRowid = pIter->bRev ? SMALLEST_INT64 : LARGEST_INT64;
+ for(i=(pIter->nSeg+iChanged)/2; 1; i=i/2){
+ Fts5CResult *pRes = &pIter->aFirst[i];
+
+ assert( pNew->pLeaf );
+ assert( pRes->bTermEq==0 || pOther->pLeaf );
+
+ if( pRes->bTermEq ){
+ if( pNew->iRowid==pOther->iRowid ){
+ return 1;
+ }else if( (pOther->iRowid>pNew->iRowid)==pIter->bRev ){
+ pIter->iSwitchRowid = pOther->iRowid;
+ pNew = pOther;
+ }else if( (pOther->iRowid>pIter->iSwitchRowid)==pIter->bRev ){
+ pIter->iSwitchRowid = pOther->iRowid;
+ }
+ }
+ pRes->iFirst = (u16)(pNew - pIter->aSeg);
+ if( i==1 ) break;
+
+ pOther = &pIter->aSeg[ pIter->aFirst[i ^ 0x0001].iFirst ];
+ }
+ }
+
+ *ppFirst = pNew;
+ return 0;
+}
+
+/*
+** Set the pIter->bEof variable based on the state of the sub-iterators.
+*/
+static void fts5MultiIterSetEof(Fts5Iter *pIter){
+ Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
+ pIter->base.bEof = pSeg->pLeaf==0;
+ pIter->iSwitchRowid = pSeg->iRowid;
+}
+
+/*
+** Move the iterator to the next entry.
+**
+** If an error occurs, an error code is left in Fts5Index.rc. It is not
+** considered an error if the iterator reaches EOF, or if it is already at
+** EOF when this function is called.
+*/
+static void fts5MultiIterNext(
+ Fts5Index *p,
+ Fts5Iter *pIter,
+ int bFrom, /* True if argument iFrom is valid */
+ i64 iFrom /* Advance at least as far as this */
+){
+ int bUseFrom = bFrom;
+ assert( pIter->base.bEof==0 );
+ while( p->rc==SQLITE_OK ){
+ int iFirst = pIter->aFirst[1].iFirst;
+ int bNewTerm = 0;
+ Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
+ assert( p->rc==SQLITE_OK );
+ if( bUseFrom && pSeg->pDlidx ){
+ fts5SegIterNextFrom(p, pSeg, iFrom);
+ }else{
+ pSeg->xNext(p, pSeg, &bNewTerm);
+ }
+
+ if( pSeg->pLeaf==0 || bNewTerm
+ || fts5MultiIterAdvanceRowid(pIter, iFirst, &pSeg)
+ ){
+ fts5MultiIterAdvanced(p, pIter, iFirst, 1);
+ fts5MultiIterSetEof(pIter);
+ pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
+ if( pSeg->pLeaf==0 ) return;
+ }
+
+ fts5AssertMultiIterSetup(p, pIter);
+ assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf );
+ if( pIter->bSkipEmpty==0 || pSeg->nPos ){
+ pIter->xSetOutputs(pIter, pSeg);
+ return;
+ }
+ bUseFrom = 0;
+ }
+}
+
+static void fts5MultiIterNext2(
+ Fts5Index *p,
+ Fts5Iter *pIter,
+ int *pbNewTerm /* OUT: True if *might* be new term */
+){
+ assert( pIter->bSkipEmpty );
+ if( p->rc==SQLITE_OK ){
+ *pbNewTerm = 0;
+ do{
+ int iFirst = pIter->aFirst[1].iFirst;
+ Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
+ int bNewTerm = 0;
+
+ assert( p->rc==SQLITE_OK );
+ pSeg->xNext(p, pSeg, &bNewTerm);
+ if( pSeg->pLeaf==0 || bNewTerm
+ || fts5MultiIterAdvanceRowid(pIter, iFirst, &pSeg)
+ ){
+ fts5MultiIterAdvanced(p, pIter, iFirst, 1);
+ fts5MultiIterSetEof(pIter);
+ *pbNewTerm = 1;
+ }
+ fts5AssertMultiIterSetup(p, pIter);
+
+ }while( fts5MultiIterIsEmpty(p, pIter) );
+ }
+}
+
+static void fts5IterSetOutputs_Noop(Fts5Iter *pUnused1, Fts5SegIter *pUnused2){
+ UNUSED_PARAM2(pUnused1, pUnused2);
+}
+
+static Fts5Iter *fts5MultiIterAlloc(
+ Fts5Index *p, /* FTS5 backend to iterate within */
+ int nSeg
+){
+ Fts5Iter *pNew;
+ int nSlot; /* Power of two >= nSeg */
+
+ for(nSlot=2; nSlot<nSeg; nSlot=nSlot*2);
+ pNew = fts5IdxMalloc(p,
+ sizeof(Fts5Iter) + /* pNew */
+ sizeof(Fts5SegIter) * (nSlot-1) + /* pNew->aSeg[] */
+ sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */
+ );
+ if( pNew ){
+ pNew->nSeg = nSlot;
+ pNew->aFirst = (Fts5CResult*)&pNew->aSeg[nSlot];
+ pNew->pIndex = p;
+ pNew->xSetOutputs = fts5IterSetOutputs_Noop;
+ }
+ return pNew;
+}
+
+static void fts5PoslistCallback(
+ Fts5Index *pUnused,
+ void *pContext,
+ const u8 *pChunk, int nChunk
+){
+ UNUSED_PARAM(pUnused);
+ assert_nc( nChunk>=0 );
+ if( nChunk>0 ){
+ fts5BufferSafeAppendBlob((Fts5Buffer*)pContext, pChunk, nChunk);
+ }
+}
+
+typedef struct PoslistCallbackCtx PoslistCallbackCtx;
+struct PoslistCallbackCtx {
+ Fts5Buffer *pBuf; /* Append to this buffer */
+ Fts5Colset *pColset; /* Restrict matches to this column */
+ int eState; /* See above */
+};
+
+typedef struct PoslistOffsetsCtx PoslistOffsetsCtx;
+struct PoslistOffsetsCtx {
+ Fts5Buffer *pBuf; /* Append to this buffer */
+ Fts5Colset *pColset; /* Restrict matches to this column */
+ int iRead;
+ int iWrite;
+};
+
+/*
+** TODO: Make this more efficient!
+*/
+static int fts5IndexColsetTest(Fts5Colset *pColset, int iCol){
+ int i;
+ for(i=0; i<pColset->nCol; i++){
+ if( pColset->aiCol[i]==iCol ) return 1;
+ }
+ return 0;
+}
+
+static void fts5PoslistOffsetsCallback(
+ Fts5Index *pUnused,
+ void *pContext,
+ const u8 *pChunk, int nChunk
+){
+ PoslistOffsetsCtx *pCtx = (PoslistOffsetsCtx*)pContext;
+ UNUSED_PARAM(pUnused);
+ assert_nc( nChunk>=0 );
+ if( nChunk>0 ){
+ int i = 0;
+ while( i<nChunk ){
+ int iVal;
+ i += fts5GetVarint32(&pChunk[i], iVal);
+ iVal += pCtx->iRead - 2;
+ pCtx->iRead = iVal;
+ if( fts5IndexColsetTest(pCtx->pColset, iVal) ){
+ fts5BufferSafeAppendVarint(pCtx->pBuf, iVal + 2 - pCtx->iWrite);
+ pCtx->iWrite = iVal;
+ }
+ }
+ }
+}
+
+static void fts5PoslistFilterCallback(
+ Fts5Index *pUnused,
+ void *pContext,
+ const u8 *pChunk, int nChunk
+){
+ PoslistCallbackCtx *pCtx = (PoslistCallbackCtx*)pContext;
+ UNUSED_PARAM(pUnused);
+ assert_nc( nChunk>=0 );
+ if( nChunk>0 ){
+ /* Search through to find the first varint with value 1. This is the
+ ** start of the next columns hits. */
+ int i = 0;
+ int iStart = 0;
+
+ if( pCtx->eState==2 ){
+ int iCol;
+ fts5FastGetVarint32(pChunk, i, iCol);
+ if( fts5IndexColsetTest(pCtx->pColset, iCol) ){
+ pCtx->eState = 1;
+ fts5BufferSafeAppendVarint(pCtx->pBuf, 1);
+ }else{
+ pCtx->eState = 0;
+ }
+ }
+
+ do {
+ while( i<nChunk && pChunk[i]!=0x01 ){
+ while( pChunk[i] & 0x80 ) i++;
+ i++;
+ }
+ if( pCtx->eState ){
+ fts5BufferSafeAppendBlob(pCtx->pBuf, &pChunk[iStart], i-iStart);
+ }
+ if( i<nChunk ){
+ int iCol;
+ iStart = i;
+ i++;
+ if( i>=nChunk ){
+ pCtx->eState = 2;
+ }else{
+ fts5FastGetVarint32(pChunk, i, iCol);
+ pCtx->eState = fts5IndexColsetTest(pCtx->pColset, iCol);
+ if( pCtx->eState ){
+ fts5BufferSafeAppendBlob(pCtx->pBuf, &pChunk[iStart], i-iStart);
+ iStart = i;
+ }
+ }
+ }
+ }while( i<nChunk );
+ }
+}
+
+static void fts5ChunkIterate(
+ Fts5Index *p, /* Index object */
+ Fts5SegIter *pSeg, /* Poslist of this iterator */
+ void *pCtx, /* Context pointer for xChunk callback */
+ void (*xChunk)(Fts5Index*, void*, const u8*, int)
+){
+ int nRem = pSeg->nPos; /* Number of bytes still to come */
+ Fts5Data *pData = 0;
+ u8 *pChunk = &pSeg->pLeaf->p[pSeg->iLeafOffset];
+ int nChunk = MIN(nRem, pSeg->pLeaf->szLeaf - pSeg->iLeafOffset);
+ int pgno = pSeg->iLeafPgno;
+ int pgnoSave = 0;
+
+ /* This function does notmwork with detail=none databases. */
+ assert( p->pConfig->eDetail!=FTS5_DETAIL_NONE );
+
+ if( (pSeg->flags & FTS5_SEGITER_REVERSE)==0 ){
+ pgnoSave = pgno+1;
+ }
+
+ while( 1 ){
+ xChunk(p, pCtx, pChunk, nChunk);
+ nRem -= nChunk;
+ fts5DataRelease(pData);
+ if( nRem<=0 ){
+ break;
+ }else{
+ pgno++;
+ pData = fts5LeafRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno));
+ if( pData==0 ) break;
+ pChunk = &pData->p[4];
+ nChunk = MIN(nRem, pData->szLeaf - 4);
+ if( pgno==pgnoSave ){
+ assert( pSeg->pNextLeaf==0 );
+ pSeg->pNextLeaf = pData;
+ pData = 0;
+ }
+ }
+ }
+}
+
+/*
+** Iterator pIter currently points to a valid entry (not EOF). This
+** function appends the position list data for the current entry to
+** buffer pBuf. It does not make a copy of the position-list size
+** field.
+*/
+static void fts5SegiterPoslist(
+ Fts5Index *p,
+ Fts5SegIter *pSeg,
+ Fts5Colset *pColset,
+ Fts5Buffer *pBuf
+){
+ if( 0==fts5BufferGrow(&p->rc, pBuf, pSeg->nPos+FTS5_DATA_ZERO_PADDING) ){
+ memset(&pBuf->p[pBuf->n+pSeg->nPos], 0, FTS5_DATA_ZERO_PADDING);
+ if( pColset==0 ){
+ fts5ChunkIterate(p, pSeg, (void*)pBuf, fts5PoslistCallback);
+ }else{
+ if( p->pConfig->eDetail==FTS5_DETAIL_FULL ){
+ PoslistCallbackCtx sCtx;
+ sCtx.pBuf = pBuf;
+ sCtx.pColset = pColset;
+ sCtx.eState = fts5IndexColsetTest(pColset, 0);
+ assert( sCtx.eState==0 || sCtx.eState==1 );
+ fts5ChunkIterate(p, pSeg, (void*)&sCtx, fts5PoslistFilterCallback);
+ }else{
+ PoslistOffsetsCtx sCtx;
+ memset(&sCtx, 0, sizeof(sCtx));
+ sCtx.pBuf = pBuf;
+ sCtx.pColset = pColset;
+ fts5ChunkIterate(p, pSeg, (void*)&sCtx, fts5PoslistOffsetsCallback);
+ }
+ }
+ }
+}
+
+/*
+** IN/OUT parameter (*pa) points to a position list n bytes in size. If
+** the position list contains entries for column iCol, then (*pa) is set
+** to point to the sub-position-list for that column and the number of
+** bytes in it returned. Or, if the argument position list does not
+** contain any entries for column iCol, return 0.
+*/
+static int fts5IndexExtractCol(
+ const u8 **pa, /* IN/OUT: Pointer to poslist */
+ int n, /* IN: Size of poslist in bytes */
+ int iCol /* Column to extract from poslist */
+){
+ int iCurrent = 0; /* Anything before the first 0x01 is col 0 */
+ const u8 *p = *pa;
+ const u8 *pEnd = &p[n]; /* One byte past end of position list */
+
+ while( iCol>iCurrent ){
+ /* Advance pointer p until it points to pEnd or an 0x01 byte that is
+ ** not part of a varint. Note that it is not possible for a negative
+ ** or extremely large varint to occur within an uncorrupted position
+ ** list. So the last byte of each varint may be assumed to have a clear
+ ** 0x80 bit. */
+ while( *p!=0x01 ){
+ while( *p++ & 0x80 );
+ if( p>=pEnd ) return 0;
+ }
+ *pa = p++;
+ iCurrent = *p++;
+ if( iCurrent & 0x80 ){
+ p--;
+ p += fts5GetVarint32(p, iCurrent);
+ }
+ }
+ if( iCol!=iCurrent ) return 0;
+
+ /* Advance pointer p until it points to pEnd or an 0x01 byte that is
+ ** not part of a varint */
+ while( p<pEnd && *p!=0x01 ){
+ while( *p++ & 0x80 );
+ }
+
+ return p - (*pa);
+}
+
+static void fts5IndexExtractColset(
+ int *pRc,
+ Fts5Colset *pColset, /* Colset to filter on */
+ const u8 *pPos, int nPos, /* Position list */
+ Fts5Buffer *pBuf /* Output buffer */
+){
+ if( *pRc==SQLITE_OK ){
+ int i;
+ fts5BufferZero(pBuf);
+ for(i=0; i<pColset->nCol; i++){
+ const u8 *pSub = pPos;
+ int nSub = fts5IndexExtractCol(&pSub, nPos, pColset->aiCol[i]);
+ if( nSub ){
+ fts5BufferAppendBlob(pRc, pBuf, nSub, pSub);
+ }
+ }
+ }
+}
+
+/*
+** xSetOutputs callback used by detail=none tables.
+*/
+static void fts5IterSetOutputs_None(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ assert( pIter->pIndex->pConfig->eDetail==FTS5_DETAIL_NONE );
+ pIter->base.iRowid = pSeg->iRowid;
+ pIter->base.nData = pSeg->nPos;
+}
+
+/*
+** xSetOutputs callback used by detail=full and detail=col tables when no
+** column filters are specified.
+*/
+static void fts5IterSetOutputs_Nocolset(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ pIter->base.iRowid = pSeg->iRowid;
+ pIter->base.nData = pSeg->nPos;
+
+ assert( pIter->pIndex->pConfig->eDetail!=FTS5_DETAIL_NONE );
+ assert( pIter->pColset==0 );
+
+ if( pSeg->iLeafOffset+pSeg->nPos<=pSeg->pLeaf->szLeaf ){
+ /* All data is stored on the current page. Populate the output
+ ** variables to point into the body of the page object. */
+ pIter->base.pData = &pSeg->pLeaf->p[pSeg->iLeafOffset];
+ }else{
+ /* The data is distributed over two or more pages. Copy it into the
+ ** Fts5Iter.poslist buffer and then set the output pointer to point
+ ** to this buffer. */
+ fts5BufferZero(&pIter->poslist);
+ fts5SegiterPoslist(pIter->pIndex, pSeg, 0, &pIter->poslist);
+ pIter->base.pData = pIter->poslist.p;
+ }
+}
+
+/*
+** xSetOutputs callback used when the Fts5Colset object has nCol==0 (match
+** against no columns at all).
+*/
+static void fts5IterSetOutputs_ZeroColset(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ UNUSED_PARAM(pSeg);
+ pIter->base.nData = 0;
+}
+
+/*
+** xSetOutputs callback used by detail=col when there is a column filter
+** and there are 100 or more columns. Also called as a fallback from
+** fts5IterSetOutputs_Col100 if the column-list spans more than one page.
+*/
+static void fts5IterSetOutputs_Col(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ fts5BufferZero(&pIter->poslist);
+ fts5SegiterPoslist(pIter->pIndex, pSeg, pIter->pColset, &pIter->poslist);
+ pIter->base.iRowid = pSeg->iRowid;
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+}
+
+/*
+** xSetOutputs callback used when:
+**
+** * detail=col,
+** * there is a column filter, and
+** * the table contains 100 or fewer columns.
+**
+** The last point is to ensure all column numbers are stored as
+** single-byte varints.
+*/
+static void fts5IterSetOutputs_Col100(Fts5Iter *pIter, Fts5SegIter *pSeg){
+
+ assert( pIter->pIndex->pConfig->eDetail==FTS5_DETAIL_COLUMNS );
+ assert( pIter->pColset );
+
+ if( pSeg->iLeafOffset+pSeg->nPos>pSeg->pLeaf->szLeaf ){
+ fts5IterSetOutputs_Col(pIter, pSeg);
+ }else{
+ u8 *a = (u8*)&pSeg->pLeaf->p[pSeg->iLeafOffset];
+ u8 *pEnd = (u8*)&a[pSeg->nPos];
+ int iPrev = 0;
+ int *aiCol = pIter->pColset->aiCol;
+ int *aiColEnd = &aiCol[pIter->pColset->nCol];
+
+ u8 *aOut = pIter->poslist.p;
+ int iPrevOut = 0;
+
+ pIter->base.iRowid = pSeg->iRowid;
+
+ while( a<pEnd ){
+ iPrev += (int)a++[0] - 2;
+ while( *aiCol<iPrev ){
+ aiCol++;
+ if( aiCol==aiColEnd ) goto setoutputs_col_out;
+ }
+ if( *aiCol==iPrev ){
+ *aOut++ = (u8)((iPrev - iPrevOut) + 2);
+ iPrevOut = iPrev;
+ }
+ }
+
+setoutputs_col_out:
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = aOut - pIter->poslist.p;
+ }
+}
+
+/*
+** xSetOutputs callback used by detail=full when there is a column filter.
+*/
+static void fts5IterSetOutputs_Full(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ Fts5Colset *pColset = pIter->pColset;
+ pIter->base.iRowid = pSeg->iRowid;
+
+ assert( pIter->pIndex->pConfig->eDetail==FTS5_DETAIL_FULL );
+ assert( pColset );
+
+ if( pSeg->iLeafOffset+pSeg->nPos<=pSeg->pLeaf->szLeaf ){
+ /* All data is stored on the current page. Populate the output
+ ** variables to point into the body of the page object. */
+ const u8 *a = &pSeg->pLeaf->p[pSeg->iLeafOffset];
+ if( pColset->nCol==1 ){
+ pIter->base.nData = fts5IndexExtractCol(&a, pSeg->nPos,pColset->aiCol[0]);
+ pIter->base.pData = a;
+ }else{
+ int *pRc = &pIter->pIndex->rc;
+ fts5BufferZero(&pIter->poslist);
+ fts5IndexExtractColset(pRc, pColset, a, pSeg->nPos, &pIter->poslist);
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+ }
+ }else{
+ /* The data is distributed over two or more pages. Copy it into the
+ ** Fts5Iter.poslist buffer and then set the output pointer to point
+ ** to this buffer. */
+ fts5BufferZero(&pIter->poslist);
+ fts5SegiterPoslist(pIter->pIndex, pSeg, pColset, &pIter->poslist);
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+ }
+}
+
+static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){
+ if( *pRc==SQLITE_OK ){
+ Fts5Config *pConfig = pIter->pIndex->pConfig;
+ if( pConfig->eDetail==FTS5_DETAIL_NONE ){
+ pIter->xSetOutputs = fts5IterSetOutputs_None;
+ }
+
+ else if( pIter->pColset==0 ){
+ pIter->xSetOutputs = fts5IterSetOutputs_Nocolset;
+ }
+
+ else if( pIter->pColset->nCol==0 ){
+ pIter->xSetOutputs = fts5IterSetOutputs_ZeroColset;
+ }
+
+ else if( pConfig->eDetail==FTS5_DETAIL_FULL ){
+ pIter->xSetOutputs = fts5IterSetOutputs_Full;
+ }
+
+ else{
+ assert( pConfig->eDetail==FTS5_DETAIL_COLUMNS );
+ if( pConfig->nCol<=100 ){
+ pIter->xSetOutputs = fts5IterSetOutputs_Col100;
+ sqlite3Fts5BufferSize(pRc, &pIter->poslist, pConfig->nCol);
+ }else{
+ pIter->xSetOutputs = fts5IterSetOutputs_Col;
+ }
+ }
+ }
+}
+
+
+/*
+** Allocate a new Fts5Iter object.
+**
+** The new object will be used to iterate through data in structure pStruct.
+** If iLevel is -ve, then all data in all segments is merged. Or, if iLevel
+** is zero or greater, data from the first nSegment segments on level iLevel
+** is merged.
+**
+** The iterator initially points to the first term/rowid entry in the
+** iterated data.
+*/
+static void fts5MultiIterNew(
+ Fts5Index *p, /* FTS5 backend to iterate within */
+ Fts5Structure *pStruct, /* Structure of specific index */
+ int flags, /* FTS5INDEX_QUERY_XXX flags */
+ Fts5Colset *pColset, /* Colset to filter on (or NULL) */
+ const u8 *pTerm, int nTerm, /* Term to seek to (or NULL/0) */
+ int iLevel, /* Level to iterate (-1 for all) */
+ int nSegment, /* Number of segments to merge (iLevel>=0) */
+ Fts5Iter **ppOut /* New object */
+){
+ int nSeg = 0; /* Number of segment-iters in use */
+ int iIter = 0; /* */
+ int iSeg; /* Used to iterate through segments */
+ Fts5StructureLevel *pLvl;
+ Fts5Iter *pNew;
+
+ assert( (pTerm==0 && nTerm==0) || iLevel<0 );
+
+ /* Allocate space for the new multi-seg-iterator. */
+ if( p->rc==SQLITE_OK ){
+ if( iLevel<0 ){
+ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
+ nSeg = pStruct->nSegment;
+ nSeg += (p->pHash ? 1 : 0);
+ }else{
+ nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment);
+ }
+ }
+ *ppOut = pNew = fts5MultiIterAlloc(p, nSeg);
+ if( pNew==0 ) return;
+ pNew->bRev = (0!=(flags & FTS5INDEX_QUERY_DESC));
+ pNew->bSkipEmpty = (0!=(flags & FTS5INDEX_QUERY_SKIPEMPTY));
+ pNew->pColset = pColset;
+ if( (flags & FTS5INDEX_QUERY_NOOUTPUT)==0 ){
+ fts5IterSetOutputCb(&p->rc, pNew);
+ }
+
+ /* Initialize each of the component segment iterators. */
+ if( p->rc==SQLITE_OK ){
+ if( iLevel<0 ){
+ Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel];
+ if( p->pHash ){
+ /* Add a segment iterator for the current contents of the hash table. */
+ Fts5SegIter *pIter = &pNew->aSeg[iIter++];
+ fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter);
+ }
+ for(pLvl=&pStruct->aLevel[0]; pLvl<pEnd; pLvl++){
+ for(iSeg=pLvl->nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
+ Fts5SegIter *pIter = &pNew->aSeg[iIter++];
+ if( pTerm==0 ){
+ fts5SegIterInit(p, pSeg, pIter);
+ }else{
+ fts5SegIterSeekInit(p, pTerm, nTerm, flags, pSeg, pIter);
+ }
+ }
+ }
+ }else{
+ pLvl = &pStruct->aLevel[iLevel];
+ for(iSeg=nSeg-1; iSeg>=0; iSeg--){
+ fts5SegIterInit(p, &pLvl->aSeg[iSeg], &pNew->aSeg[iIter++]);
+ }
+ }
+ assert( iIter==nSeg );
+ }
+
+ /* If the above was successful, each component iterators now points
+ ** to the first entry in its segment. In this case initialize the
+ ** aFirst[] array. Or, if an error has occurred, free the iterator
+ ** object and set the output variable to NULL. */
+ if( p->rc==SQLITE_OK ){
+ for(iIter=pNew->nSeg-1; iIter>0; iIter--){
+ int iEq;
+ if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){
+ Fts5SegIter *pSeg = &pNew->aSeg[iEq];
+ if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
+ fts5MultiIterAdvanced(p, pNew, iEq, iIter);
+ }
+ }
+ fts5MultiIterSetEof(pNew);
+ fts5AssertMultiIterSetup(p, pNew);
+
+ if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){
+ fts5MultiIterNext(p, pNew, 0, 0);
+ }else if( pNew->base.bEof==0 ){
+ Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst];
+ pNew->xSetOutputs(pNew, pSeg);
+ }
+
+ }else{
+ fts5MultiIterFree(pNew);
+ *ppOut = 0;
+ }
+}
+
+/*
+** Create an Fts5Iter that iterates through the doclist provided
+** as the second argument.
+*/
+static void fts5MultiIterNew2(
+ Fts5Index *p, /* FTS5 backend to iterate within */
+ Fts5Data *pData, /* Doclist to iterate through */
+ int bDesc, /* True for descending rowid order */
+ Fts5Iter **ppOut /* New object */
+){
+ Fts5Iter *pNew;
+ pNew = fts5MultiIterAlloc(p, 2);
+ if( pNew ){
+ Fts5SegIter *pIter = &pNew->aSeg[1];
+
+ pIter->flags = FTS5_SEGITER_ONETERM;
+ if( pData->szLeaf>0 ){
+ pIter->pLeaf = pData;
+ pIter->iLeafOffset = fts5GetVarint(pData->p, (u64*)&pIter->iRowid);
+ pIter->iEndofDoclist = pData->nn;
+ pNew->aFirst[1].iFirst = 1;
+ if( bDesc ){
+ pNew->bRev = 1;
+ pIter->flags |= FTS5_SEGITER_REVERSE;
+ fts5SegIterReverseInitPage(p, pIter);
+ }else{
+ fts5SegIterLoadNPos(p, pIter);
+ }
+ pData = 0;
+ }else{
+ pNew->base.bEof = 1;
+ }
+ fts5SegIterSetNext(p, pIter);
+
+ *ppOut = pNew;
+ }
+
+ fts5DataRelease(pData);
+}
+
+/*
+** Return true if the iterator is at EOF or if an error has occurred.
+** False otherwise.
+*/
+static int fts5MultiIterEof(Fts5Index *p, Fts5Iter *pIter){
+ assert( p->rc
+ || (pIter->aSeg[ pIter->aFirst[1].iFirst ].pLeaf==0)==pIter->base.bEof
+ );
+ return (p->rc || pIter->base.bEof);
+}
+
+/*
+** Return the rowid of the entry that the iterator currently points
+** to. If the iterator points to EOF when this function is called the
+** results are undefined.
+*/
+static i64 fts5MultiIterRowid(Fts5Iter *pIter){
+ assert( pIter->aSeg[ pIter->aFirst[1].iFirst ].pLeaf );
+ return pIter->aSeg[ pIter->aFirst[1].iFirst ].iRowid;
+}
+
+/*
+** Move the iterator to the next entry at or following iMatch.
+*/
+static void fts5MultiIterNextFrom(
+ Fts5Index *p,
+ Fts5Iter *pIter,
+ i64 iMatch
+){
+ while( 1 ){
+ i64 iRowid;
+ fts5MultiIterNext(p, pIter, 1, iMatch);
+ if( fts5MultiIterEof(p, pIter) ) break;
+ iRowid = fts5MultiIterRowid(pIter);
+ if( pIter->bRev==0 && iRowid>=iMatch ) break;
+ if( pIter->bRev!=0 && iRowid<=iMatch ) break;
+ }
+}
+
+/*
+** Return a pointer to a buffer containing the term associated with the
+** entry that the iterator currently points to.
+*/
+static const u8 *fts5MultiIterTerm(Fts5Iter *pIter, int *pn){
+ Fts5SegIter *p = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
+ *pn = p->term.n;
+ return p->term.p;
+}
+
+/*
+** Allocate a new segment-id for the structure pStruct. The new segment
+** id must be between 1 and 65335 inclusive, and must not be used by
+** any currently existing segment. If a free segment id cannot be found,
+** SQLITE_FULL is returned.
+**
+** If an error has already occurred, this function is a no-op. 0 is
+** returned in this case.
+*/
+static int fts5AllocateSegid(Fts5Index *p, Fts5Structure *pStruct){
+ int iSegid = 0;
+
+ if( p->rc==SQLITE_OK ){
+ if( pStruct->nSegment>=FTS5_MAX_SEGMENT ){
+ p->rc = SQLITE_FULL;
+ }else{
+ /* FTS5_MAX_SEGMENT is currently defined as 2000. So the following
+ ** array is 63 elements, or 252 bytes, in size. */
+ u32 aUsed[(FTS5_MAX_SEGMENT+31) / 32];
+ int iLvl, iSeg;
+ int i;
+ u32 mask;
+ memset(aUsed, 0, sizeof(aUsed));
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
+ int iId = pStruct->aLevel[iLvl].aSeg[iSeg].iSegid;
+ if( iId<=FTS5_MAX_SEGMENT && iId>0 ){
+ aUsed[(iId-1) / 32] |= (u32)1 << ((iId-1) % 32);
+ }
+ }
+ }
+
+ for(i=0; aUsed[i]==0xFFFFFFFF; i++);
+ mask = aUsed[i];
+ for(iSegid=0; mask & ((u32)1 << iSegid); iSegid++);
+ iSegid += 1 + i*32;
+
+#ifdef SQLITE_DEBUG
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
+ assert_nc( iSegid!=pStruct->aLevel[iLvl].aSeg[iSeg].iSegid );
+ }
+ }
+ assert_nc( iSegid>0 && iSegid<=FTS5_MAX_SEGMENT );
+
+ {
+ sqlite3_stmt *pIdxSelect = fts5IdxSelectStmt(p);
+ if( p->rc==SQLITE_OK ){
+ u8 aBlob[2] = {0xff, 0xff};
+ sqlite3_bind_int(pIdxSelect, 1, iSegid);
+ sqlite3_bind_blob(pIdxSelect, 2, aBlob, 2, SQLITE_STATIC);
+ assert_nc( sqlite3_step(pIdxSelect)!=SQLITE_ROW );
+ p->rc = sqlite3_reset(pIdxSelect);
+ sqlite3_bind_null(pIdxSelect, 2);
+ }
+ }
+#endif
+ }
+ }
+
+ return iSegid;
+}
+
+/*
+** Discard all data currently cached in the hash-tables.
+*/
+static void fts5IndexDiscardData(Fts5Index *p){
+ assert( p->pHash || p->nPendingData==0 );
+ if( p->pHash ){
+ sqlite3Fts5HashClear(p->pHash);
+ p->nPendingData = 0;
+ }
+}
+
+/*
+** Return the size of the prefix, in bytes, that buffer
+** (pNew/<length-unknown>) shares with buffer (pOld/nOld).
+**
+** Buffer (pNew/<length-unknown>) is guaranteed to be greater
+** than buffer (pOld/nOld).
+*/
+static int fts5PrefixCompress(int nOld, const u8 *pOld, const u8 *pNew){
+ int i;
+ for(i=0; i<nOld; i++){
+ if( pOld[i]!=pNew[i] ) break;
+ }
+ return i;
+}
+
+static void fts5WriteDlidxClear(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ int bFlush /* If true, write dlidx to disk */
+){
+ int i;
+ assert( bFlush==0 || (pWriter->nDlidx>0 && pWriter->aDlidx[0].buf.n>0) );
+ for(i=0; i<pWriter->nDlidx; i++){
+ Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[i];
+ if( pDlidx->buf.n==0 ) break;
+ if( bFlush ){
+ assert( pDlidx->pgno!=0 );
+ fts5DataWrite(p,
+ FTS5_DLIDX_ROWID(pWriter->iSegid, i, pDlidx->pgno),
+ pDlidx->buf.p, pDlidx->buf.n
+ );
+ }
+ sqlite3Fts5BufferZero(&pDlidx->buf);
+ pDlidx->bPrevValid = 0;
+ }
+}
+
+/*
+** Grow the pWriter->aDlidx[] array to at least nLvl elements in size.
+** Any new array elements are zeroed before returning.
+*/
+static int fts5WriteDlidxGrow(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ int nLvl
+){
+ if( p->rc==SQLITE_OK && nLvl>=pWriter->nDlidx ){
+ Fts5DlidxWriter *aDlidx = (Fts5DlidxWriter*)sqlite3_realloc64(
+ pWriter->aDlidx, sizeof(Fts5DlidxWriter) * nLvl
+ );
+ if( aDlidx==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ size_t nByte = sizeof(Fts5DlidxWriter) * (nLvl - pWriter->nDlidx);
+ memset(&aDlidx[pWriter->nDlidx], 0, nByte);
+ pWriter->aDlidx = aDlidx;
+ pWriter->nDlidx = nLvl;
+ }
+ }
+ return p->rc;
+}
+
+/*
+** If the current doclist-index accumulating in pWriter->aDlidx[] is large
+** enough, flush it to disk and return 1. Otherwise discard it and return
+** zero.
+*/
+static int fts5WriteFlushDlidx(Fts5Index *p, Fts5SegWriter *pWriter){
+ int bFlag = 0;
+
+ /* If there were FTS5_MIN_DLIDX_SIZE or more empty leaf pages written
+ ** to the database, also write the doclist-index to disk. */
+ if( pWriter->aDlidx[0].buf.n>0 && pWriter->nEmpty>=FTS5_MIN_DLIDX_SIZE ){
+ bFlag = 1;
+ }
+ fts5WriteDlidxClear(p, pWriter, bFlag);
+ pWriter->nEmpty = 0;
+ return bFlag;
+}
+
+/*
+** This function is called whenever processing of the doclist for the
+** last term on leaf page (pWriter->iBtPage) is completed.
+**
+** The doclist-index for that term is currently stored in-memory within the
+** Fts5SegWriter.aDlidx[] array. If it is large enough, this function
+** writes it out to disk. Or, if it is too small to bother with, discards
+** it.
+**
+** Fts5SegWriter.btterm currently contains the first term on page iBtPage.
+*/
+static void fts5WriteFlushBtree(Fts5Index *p, Fts5SegWriter *pWriter){
+ int bFlag;
+
+ assert( pWriter->iBtPage || pWriter->nEmpty==0 );
+ if( pWriter->iBtPage==0 ) return;
+ bFlag = fts5WriteFlushDlidx(p, pWriter);
+
+ if( p->rc==SQLITE_OK ){
+ const char *z = (pWriter->btterm.n>0?(const char*)pWriter->btterm.p:"");
+ /* The following was already done in fts5WriteInit(): */
+ /* sqlite3_bind_int(p->pIdxWriter, 1, pWriter->iSegid); */
+ sqlite3_bind_blob(p->pIdxWriter, 2, z, pWriter->btterm.n, SQLITE_STATIC);
+ sqlite3_bind_int64(p->pIdxWriter, 3, bFlag + ((i64)pWriter->iBtPage<<1));
+ sqlite3_step(p->pIdxWriter);
+ p->rc = sqlite3_reset(p->pIdxWriter);
+ sqlite3_bind_null(p->pIdxWriter, 2);
+ }
+ pWriter->iBtPage = 0;
+}
+
+/*
+** This is called once for each leaf page except the first that contains
+** at least one term. Argument (nTerm/pTerm) is the split-key - a term that
+** is larger than all terms written to earlier leaves, and equal to or
+** smaller than the first term on the new leaf.
+**
+** If an error occurs, an error code is left in Fts5Index.rc. If an error
+** has already occurred when this function is called, it is a no-op.
+*/
+static void fts5WriteBtreeTerm(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegWriter *pWriter, /* Writer object */
+ int nTerm, const u8 *pTerm /* First term on new page */
+){
+ fts5WriteFlushBtree(p, pWriter);
+ if( p->rc==SQLITE_OK ){
+ fts5BufferSet(&p->rc, &pWriter->btterm, nTerm, pTerm);
+ pWriter->iBtPage = pWriter->writer.pgno;
+ }
+}
+
+/*
+** This function is called when flushing a leaf page that contains no
+** terms at all to disk.
+*/
+static void fts5WriteBtreeNoTerm(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5SegWriter *pWriter /* Writer object */
+){
+ /* If there were no rowids on the leaf page either and the doclist-index
+ ** has already been started, append an 0x00 byte to it. */
+ if( pWriter->bFirstRowidInPage && pWriter->aDlidx[0].buf.n>0 ){
+ Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[0];
+ assert( pDlidx->bPrevValid );
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, 0);
+ }
+
+ /* Increment the "number of sequential leaves without a term" counter. */
+ pWriter->nEmpty++;
+}
+
+static i64 fts5DlidxExtractFirstRowid(Fts5Buffer *pBuf){
+ i64 iRowid;
+ int iOff;
+
+ iOff = 1 + fts5GetVarint(&pBuf->p[1], (u64*)&iRowid);
+ fts5GetVarint(&pBuf->p[iOff], (u64*)&iRowid);
+ return iRowid;
+}
+
+/*
+** Rowid iRowid has just been appended to the current leaf page. It is the
+** first on the page. This function appends an appropriate entry to the current
+** doclist-index.
+*/
+static void fts5WriteDlidxAppend(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ i64 iRowid
+){
+ int i;
+ int bDone = 0;
+
+ for(i=0; p->rc==SQLITE_OK && bDone==0; i++){
+ i64 iVal;
+ Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[i];
+
+ if( pDlidx->buf.n>=p->pConfig->pgsz ){
+ /* The current doclist-index page is full. Write it to disk and push
+ ** a copy of iRowid (which will become the first rowid on the next
+ ** doclist-index leaf page) up into the next level of the b-tree
+ ** hierarchy. If the node being flushed is currently the root node,
+ ** also push its first rowid upwards. */
+ pDlidx->buf.p[0] = 0x01; /* Not the root node */
+ fts5DataWrite(p,
+ FTS5_DLIDX_ROWID(pWriter->iSegid, i, pDlidx->pgno),
+ pDlidx->buf.p, pDlidx->buf.n
+ );
+ fts5WriteDlidxGrow(p, pWriter, i+2);
+ pDlidx = &pWriter->aDlidx[i];
+ if( p->rc==SQLITE_OK && pDlidx[1].buf.n==0 ){
+ i64 iFirst = fts5DlidxExtractFirstRowid(&pDlidx->buf);
+
+ /* This was the root node. Push its first rowid up to the new root. */
+ pDlidx[1].pgno = pDlidx->pgno;
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, 0);
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, pDlidx->pgno);
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, iFirst);
+ pDlidx[1].bPrevValid = 1;
+ pDlidx[1].iPrev = iFirst;
+ }
+
+ sqlite3Fts5BufferZero(&pDlidx->buf);
+ pDlidx->bPrevValid = 0;
+ pDlidx->pgno++;
+ }else{
+ bDone = 1;
+ }
+
+ if( pDlidx->bPrevValid ){
+ iVal = iRowid - pDlidx->iPrev;
+ }else{
+ i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno);
+ assert( pDlidx->buf.n==0 );
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, !bDone);
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, iPgno);
+ iVal = iRowid;
+ }
+
+ sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, iVal);
+ pDlidx->bPrevValid = 1;
+ pDlidx->iPrev = iRowid;
+ }
+}
+
+static void fts5WriteFlushLeaf(Fts5Index *p, Fts5SegWriter *pWriter){
+ static const u8 zero[] = { 0x00, 0x00, 0x00, 0x00 };
+ Fts5PageWriter *pPage = &pWriter->writer;
+ i64 iRowid;
+
+ assert( (pPage->pgidx.n==0)==(pWriter->bFirstTermInPage) );
+
+ /* Set the szLeaf header field. */
+ assert( 0==fts5GetU16(&pPage->buf.p[2]) );
+ fts5PutU16(&pPage->buf.p[2], (u16)pPage->buf.n);
+
+ if( pWriter->bFirstTermInPage ){
+ /* No term was written to this page. */
+ assert( pPage->pgidx.n==0 );
+ fts5WriteBtreeNoTerm(p, pWriter);
+ }else{
+ /* Append the pgidx to the page buffer. Set the szLeaf header field. */
+ fts5BufferAppendBlob(&p->rc, &pPage->buf, pPage->pgidx.n, pPage->pgidx.p);
+ }
+
+ /* Write the page out to disk */
+ iRowid = FTS5_SEGMENT_ROWID(pWriter->iSegid, pPage->pgno);
+ fts5DataWrite(p, iRowid, pPage->buf.p, pPage->buf.n);
+
+ /* Initialize the next page. */
+ fts5BufferZero(&pPage->buf);
+ fts5BufferZero(&pPage->pgidx);
+ fts5BufferAppendBlob(&p->rc, &pPage->buf, 4, zero);
+ pPage->iPrevPgidx = 0;
+ pPage->pgno++;
+
+ /* Increase the leaves written counter */
+ pWriter->nLeafWritten++;
+
+ /* The new leaf holds no terms or rowids */
+ pWriter->bFirstTermInPage = 1;
+ pWriter->bFirstRowidInPage = 1;
+}
+
+/*
+** Append term pTerm/nTerm to the segment being written by the writer passed
+** as the second argument.
+**
+** If an error occurs, set the Fts5Index.rc error code. If an error has
+** already occurred, this function is a no-op.
+*/
+static void fts5WriteAppendTerm(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ int nTerm, const u8 *pTerm
+){
+ int nPrefix; /* Bytes of prefix compression for term */
+ Fts5PageWriter *pPage = &pWriter->writer;
+ Fts5Buffer *pPgidx = &pWriter->writer.pgidx;
+ int nMin = MIN(pPage->term.n, nTerm);
+
+ assert( p->rc==SQLITE_OK );
+ assert( pPage->buf.n>=4 );
+ assert( pPage->buf.n>4 || pWriter->bFirstTermInPage );
+
+ /* If the current leaf page is full, flush it to disk. */
+ if( (pPage->buf.n + pPgidx->n + nTerm + 2)>=p->pConfig->pgsz ){
+ if( pPage->buf.n>4 ){
+ fts5WriteFlushLeaf(p, pWriter);
+ if( p->rc!=SQLITE_OK ) return;
+ }
+ fts5BufferGrow(&p->rc, &pPage->buf, nTerm+FTS5_DATA_PADDING);
+ }
+
+ /* TODO1: Updating pgidx here. */
+ pPgidx->n += sqlite3Fts5PutVarint(
+ &pPgidx->p[pPgidx->n], pPage->buf.n - pPage->iPrevPgidx
+ );
+ pPage->iPrevPgidx = pPage->buf.n;
+#if 0
+ fts5PutU16(&pPgidx->p[pPgidx->n], pPage->buf.n);
+ pPgidx->n += 2;
+#endif
+
+ if( pWriter->bFirstTermInPage ){
+ nPrefix = 0;
+ if( pPage->pgno!=1 ){
+ /* This is the first term on a leaf that is not the leftmost leaf in
+ ** the segment b-tree. In this case it is necessary to add a term to
+ ** the b-tree hierarchy that is (a) larger than the largest term
+ ** already written to the segment and (b) smaller than or equal to
+ ** this term. In other words, a prefix of (pTerm/nTerm) that is one
+ ** byte longer than the longest prefix (pTerm/nTerm) shares with the
+ ** previous term.
+ **
+ ** Usually, the previous term is available in pPage->term. The exception
+ ** is if this is the first term written in an incremental-merge step.
+ ** In this case the previous term is not available, so just write a
+ ** copy of (pTerm/nTerm) into the parent node. This is slightly
+ ** inefficient, but still correct. */
+ int n = nTerm;
+ if( pPage->term.n ){
+ n = 1 + fts5PrefixCompress(nMin, pPage->term.p, pTerm);
+ }
+ fts5WriteBtreeTerm(p, pWriter, n, pTerm);
+ if( p->rc!=SQLITE_OK ) return;
+ pPage = &pWriter->writer;
+ }
+ }else{
+ nPrefix = fts5PrefixCompress(nMin, pPage->term.p, pTerm);
+ fts5BufferAppendVarint(&p->rc, &pPage->buf, nPrefix);
+ }
+
+ /* Append the number of bytes of new data, then the term data itself
+ ** to the page. */
+ fts5BufferAppendVarint(&p->rc, &pPage->buf, nTerm - nPrefix);
+ fts5BufferAppendBlob(&p->rc, &pPage->buf, nTerm - nPrefix, &pTerm[nPrefix]);
+
+ /* Update the Fts5PageWriter.term field. */
+ fts5BufferSet(&p->rc, &pPage->term, nTerm, pTerm);
+ pWriter->bFirstTermInPage = 0;
+
+ pWriter->bFirstRowidInPage = 0;
+ pWriter->bFirstRowidInDoclist = 1;
+
+ assert( p->rc || (pWriter->nDlidx>0 && pWriter->aDlidx[0].buf.n==0) );
+ pWriter->aDlidx[0].pgno = pPage->pgno;
+}
+
+/*
+** Append a rowid and position-list size field to the writers output.
+*/
+static void fts5WriteAppendRowid(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ i64 iRowid
+){
+ if( p->rc==SQLITE_OK ){
+ Fts5PageWriter *pPage = &pWriter->writer;
+
+ if( (pPage->buf.n + pPage->pgidx.n)>=p->pConfig->pgsz ){
+ fts5WriteFlushLeaf(p, pWriter);
+ }
+
+ /* If this is to be the first rowid written to the page, set the
+ ** rowid-pointer in the page-header. Also append a value to the dlidx
+ ** buffer, in case a doclist-index is required. */
+ if( pWriter->bFirstRowidInPage ){
+ fts5PutU16(pPage->buf.p, (u16)pPage->buf.n);
+ fts5WriteDlidxAppend(p, pWriter, iRowid);
+ }
+
+ /* Write the rowid. */
+ if( pWriter->bFirstRowidInDoclist || pWriter->bFirstRowidInPage ){
+ fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid);
+ }else{
+ assert_nc( p->rc || iRowid>pWriter->iPrevRowid );
+ fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid - pWriter->iPrevRowid);
+ }
+ pWriter->iPrevRowid = iRowid;
+ pWriter->bFirstRowidInDoclist = 0;
+ pWriter->bFirstRowidInPage = 0;
+ }
+}
+
+static void fts5WriteAppendPoslistData(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ const u8 *aData,
+ int nData
+){
+ Fts5PageWriter *pPage = &pWriter->writer;
+ const u8 *a = aData;
+ int n = nData;
+
+ assert( p->pConfig->pgsz>0 );
+ while( p->rc==SQLITE_OK
+ && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz
+ ){
+ int nReq = p->pConfig->pgsz - pPage->buf.n - pPage->pgidx.n;
+ int nCopy = 0;
+ while( nCopy<nReq ){
+ i64 dummy;
+ nCopy += fts5GetVarint(&a[nCopy], (u64*)&dummy);
+ }
+ fts5BufferAppendBlob(&p->rc, &pPage->buf, nCopy, a);
+ a += nCopy;
+ n -= nCopy;
+ fts5WriteFlushLeaf(p, pWriter);
+ }
+ if( n>0 ){
+ fts5BufferAppendBlob(&p->rc, &pPage->buf, n, a);
+ }
+}
+
+/*
+** Flush any data cached by the writer object to the database. Free any
+** allocations associated with the writer.
+*/
+static void fts5WriteFinish(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter, /* Writer object */
+ int *pnLeaf /* OUT: Number of leaf pages in b-tree */
+){
+ int i;
+ Fts5PageWriter *pLeaf = &pWriter->writer;
+ if( p->rc==SQLITE_OK ){
+ assert( pLeaf->pgno>=1 );
+ if( pLeaf->buf.n>4 ){
+ fts5WriteFlushLeaf(p, pWriter);
+ }
+ *pnLeaf = pLeaf->pgno-1;
+ if( pLeaf->pgno>1 ){
+ fts5WriteFlushBtree(p, pWriter);
+ }
+ }
+ fts5BufferFree(&pLeaf->term);
+ fts5BufferFree(&pLeaf->buf);
+ fts5BufferFree(&pLeaf->pgidx);
+ fts5BufferFree(&pWriter->btterm);
+
+ for(i=0; i<pWriter->nDlidx; i++){
+ sqlite3Fts5BufferFree(&pWriter->aDlidx[i].buf);
+ }
+ sqlite3_free(pWriter->aDlidx);
+}
+
+static void fts5WriteInit(
+ Fts5Index *p,
+ Fts5SegWriter *pWriter,
+ int iSegid
+){
+ const int nBuffer = p->pConfig->pgsz + FTS5_DATA_PADDING;
+
+ memset(pWriter, 0, sizeof(Fts5SegWriter));
+ pWriter->iSegid = iSegid;
+
+ fts5WriteDlidxGrow(p, pWriter, 1);
+ pWriter->writer.pgno = 1;
+ pWriter->bFirstTermInPage = 1;
+ pWriter->iBtPage = 1;
+
+ assert( pWriter->writer.buf.n==0 );
+ assert( pWriter->writer.pgidx.n==0 );
+
+ /* Grow the two buffers to pgsz + padding bytes in size. */
+ sqlite3Fts5BufferSize(&p->rc, &pWriter->writer.pgidx, nBuffer);
+ sqlite3Fts5BufferSize(&p->rc, &pWriter->writer.buf, nBuffer);
+
+ if( p->pIdxWriter==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxWriter, sqlite3_mprintf(
+ "INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)",
+ pConfig->zDb, pConfig->zName
+ ));
+ }
+
+ if( p->rc==SQLITE_OK ){
+ /* Initialize the 4-byte leaf-page header to 0x00. */
+ memset(pWriter->writer.buf.p, 0, 4);
+ pWriter->writer.buf.n = 4;
+
+ /* Bind the current output segment id to the index-writer. This is an
+ ** optimization over binding the same value over and over as rows are
+ ** inserted into %_idx by the current writer. */
+ sqlite3_bind_int(p->pIdxWriter, 1, pWriter->iSegid);
+ }
+}
+
+/*
+** Iterator pIter was used to iterate through the input segments of on an
+** incremental merge operation. This function is called if the incremental
+** merge step has finished but the input has not been completely exhausted.
+*/
+static void fts5TrimSegments(Fts5Index *p, Fts5Iter *pIter){
+ int i;
+ Fts5Buffer buf;
+ memset(&buf, 0, sizeof(Fts5Buffer));
+ for(i=0; i<pIter->nSeg && p->rc==SQLITE_OK; i++){
+ Fts5SegIter *pSeg = &pIter->aSeg[i];
+ if( pSeg->pSeg==0 ){
+ /* no-op */
+ }else if( pSeg->pLeaf==0 ){
+ /* All keys from this input segment have been transfered to the output.
+ ** Set both the first and last page-numbers to 0 to indicate that the
+ ** segment is now empty. */
+ pSeg->pSeg->pgnoLast = 0;
+ pSeg->pSeg->pgnoFirst = 0;
+ }else{
+ int iOff = pSeg->iTermLeafOffset; /* Offset on new first leaf page */
+ i64 iLeafRowid;
+ Fts5Data *pData;
+ int iId = pSeg->pSeg->iSegid;
+ u8 aHdr[4] = {0x00, 0x00, 0x00, 0x00};
+
+ iLeafRowid = FTS5_SEGMENT_ROWID(iId, pSeg->iTermLeafPgno);
+ pData = fts5LeafRead(p, iLeafRowid);
+ if( pData ){
+ if( iOff>pData->szLeaf ){
+ /* This can occur if the pages that the segments occupy overlap - if
+ ** a single page has been assigned to more than one segment. In
+ ** this case a prior iteration of this loop may have corrupted the
+ ** segment currently being trimmed. */
+ p->rc = FTS5_CORRUPT;
+ }else{
+ fts5BufferZero(&buf);
+ fts5BufferGrow(&p->rc, &buf, pData->nn);
+ fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n);
+ fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p);
+ fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff,&pData->p[iOff]);
+ if( p->rc==SQLITE_OK ){
+ /* Set the szLeaf field */
+ fts5PutU16(&buf.p[2], (u16)buf.n);
+ }
+
+ /* Set up the new page-index array */
+ fts5BufferAppendVarint(&p->rc, &buf, 4);
+ if( pSeg->iLeafPgno==pSeg->iTermLeafPgno
+ && pSeg->iEndofDoclist<pData->szLeaf
+ && pSeg->iPgidxOff<=pData->nn
+ ){
+ int nDiff = pData->szLeaf - pSeg->iEndofDoclist;
+ fts5BufferAppendVarint(&p->rc, &buf, buf.n - 1 - nDiff - 4);
+ fts5BufferAppendBlob(&p->rc, &buf,
+ pData->nn - pSeg->iPgidxOff, &pData->p[pSeg->iPgidxOff]
+ );
+ }
+
+ pSeg->pSeg->pgnoFirst = pSeg->iTermLeafPgno;
+ fts5DataDelete(p, FTS5_SEGMENT_ROWID(iId, 1), iLeafRowid);
+ fts5DataWrite(p, iLeafRowid, buf.p, buf.n);
+ }
+ fts5DataRelease(pData);
+ }
+ }
+ }
+ fts5BufferFree(&buf);
+}
+
+static void fts5MergeChunkCallback(
+ Fts5Index *p,
+ void *pCtx,
+ const u8 *pChunk, int nChunk
+){
+ Fts5SegWriter *pWriter = (Fts5SegWriter*)pCtx;
+ fts5WriteAppendPoslistData(p, pWriter, pChunk, nChunk);
+}
+
+/*
+**
+*/
+static void fts5IndexMergeLevel(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Structure **ppStruct, /* IN/OUT: Stucture of index */
+ int iLvl, /* Level to read input from */
+ int *pnRem /* Write up to this many output leaves */
+){
+ Fts5Structure *pStruct = *ppStruct;
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
+ Fts5StructureLevel *pLvlOut;
+ Fts5Iter *pIter = 0; /* Iterator to read input data */
+ int nRem = pnRem ? *pnRem : 0; /* Output leaf pages left to write */
+ int nInput; /* Number of input segments */
+ Fts5SegWriter writer; /* Writer object */
+ Fts5StructureSegment *pSeg; /* Output segment */
+ Fts5Buffer term;
+ int bOldest; /* True if the output segment is the oldest */
+ int eDetail = p->pConfig->eDetail;
+ const int flags = FTS5INDEX_QUERY_NOOUTPUT;
+ int bTermWritten = 0; /* True if current term already output */
+
+ assert( iLvl<pStruct->nLevel );
+ assert( pLvl->nMerge<=pLvl->nSeg );
+
+ memset(&writer, 0, sizeof(Fts5SegWriter));
+ memset(&term, 0, sizeof(Fts5Buffer));
+ if( pLvl->nMerge ){
+ pLvlOut = &pStruct->aLevel[iLvl+1];
+ assert( pLvlOut->nSeg>0 );
+ nInput = pLvl->nMerge;
+ pSeg = &pLvlOut->aSeg[pLvlOut->nSeg-1];
+
+ fts5WriteInit(p, &writer, pSeg->iSegid);
+ writer.writer.pgno = pSeg->pgnoLast+1;
+ writer.iBtPage = 0;
+ }else{
+ int iSegid = fts5AllocateSegid(p, pStruct);
+
+ /* Extend the Fts5Structure object as required to ensure the output
+ ** segment exists. */
+ if( iLvl==pStruct->nLevel-1 ){
+ fts5StructureAddLevel(&p->rc, ppStruct);
+ pStruct = *ppStruct;
+ }
+ fts5StructureExtendLevel(&p->rc, pStruct, iLvl+1, 1, 0);
+ if( p->rc ) return;
+ pLvl = &pStruct->aLevel[iLvl];
+ pLvlOut = &pStruct->aLevel[iLvl+1];
+
+ fts5WriteInit(p, &writer, iSegid);
+
+ /* Add the new segment to the output level */
+ pSeg = &pLvlOut->aSeg[pLvlOut->nSeg];
+ pLvlOut->nSeg++;
+ pSeg->pgnoFirst = 1;
+ pSeg->iSegid = iSegid;
+ pStruct->nSegment++;
+
+ /* Read input from all segments in the input level */
+ nInput = pLvl->nSeg;
+ }
+ bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2);
+
+ assert( iLvl>=0 );
+ for(fts5MultiIterNew(p, pStruct, flags, 0, 0, 0, iLvl, nInput, &pIter);
+ fts5MultiIterEof(p, pIter)==0;
+ fts5MultiIterNext(p, pIter, 0, 0)
+ ){
+ Fts5SegIter *pSegIter = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
+ int nPos; /* position-list size field value */
+ int nTerm;
+ const u8 *pTerm;
+
+ pTerm = fts5MultiIterTerm(pIter, &nTerm);
+ if( nTerm!=term.n || fts5Memcmp(pTerm, term.p, nTerm) ){
+ if( pnRem && writer.nLeafWritten>nRem ){
+ break;
+ }
+ fts5BufferSet(&p->rc, &term, nTerm, pTerm);
+ bTermWritten =0;
+ }
+
+ /* Check for key annihilation. */
+ if( pSegIter->nPos==0 && (bOldest || pSegIter->bDel==0) ) continue;
+
+ if( p->rc==SQLITE_OK && bTermWritten==0 ){
+ /* This is a new term. Append a term to the output segment. */
+ fts5WriteAppendTerm(p, &writer, nTerm, pTerm);
+ bTermWritten = 1;
+ }
+
+ /* Append the rowid to the output */
+ /* WRITEPOSLISTSIZE */
+ fts5WriteAppendRowid(p, &writer, fts5MultiIterRowid(pIter));
+
+ if( eDetail==FTS5_DETAIL_NONE ){
+ if( pSegIter->bDel ){
+ fts5BufferAppendVarint(&p->rc, &writer.writer.buf, 0);
+ if( pSegIter->nPos>0 ){
+ fts5BufferAppendVarint(&p->rc, &writer.writer.buf, 0);
+ }
+ }
+ }else{
+ /* Append the position-list data to the output */
+ nPos = pSegIter->nPos*2 + pSegIter->bDel;
+ fts5BufferAppendVarint(&p->rc, &writer.writer.buf, nPos);
+ fts5ChunkIterate(p, pSegIter, (void*)&writer, fts5MergeChunkCallback);
+ }
+ }
+
+ /* Flush the last leaf page to disk. Set the output segment b-tree height
+ ** and last leaf page number at the same time. */
+ fts5WriteFinish(p, &writer, &pSeg->pgnoLast);
+
+ if( fts5MultiIterEof(p, pIter) ){
+ int i;
+
+ /* Remove the redundant segments from the %_data table */
+ for(i=0; i<nInput; i++){
+ fts5DataRemoveSegment(p, pLvl->aSeg[i].iSegid);
+ }
+
+ /* Remove the redundant segments from the input level */
+ if( pLvl->nSeg!=nInput ){
+ int nMove = (pLvl->nSeg - nInput) * sizeof(Fts5StructureSegment);
+ memmove(pLvl->aSeg, &pLvl->aSeg[nInput], nMove);
+ }
+ pStruct->nSegment -= nInput;
+ pLvl->nSeg -= nInput;
+ pLvl->nMerge = 0;
+ if( pSeg->pgnoLast==0 ){
+ pLvlOut->nSeg--;
+ pStruct->nSegment--;
+ }
+ }else{
+ assert( pSeg->pgnoLast>0 );
+ fts5TrimSegments(p, pIter);
+ pLvl->nMerge = nInput;
+ }
+
+ fts5MultiIterFree(pIter);
+ fts5BufferFree(&term);
+ if( pnRem ) *pnRem -= writer.nLeafWritten;
+}
+
+/*
+** Do up to nPg pages of automerge work on the index.
+**
+** Return true if any changes were actually made, or false otherwise.
+*/
+static int fts5IndexMerge(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */
+ int nPg, /* Pages of work to do */
+ int nMin /* Minimum number of segments to merge */
+){
+ int nRem = nPg;
+ int bRet = 0;
+ Fts5Structure *pStruct = *ppStruct;
+ while( nRem>0 && p->rc==SQLITE_OK ){
+ int iLvl; /* To iterate through levels */
+ int iBestLvl = 0; /* Level offering the most input segments */
+ int nBest = 0; /* Number of input segments on best level */
+
+ /* Set iBestLvl to the level to read input segments from. */
+ assert( pStruct->nLevel>0 );
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
+ if( pLvl->nMerge ){
+ if( pLvl->nMerge>nBest ){
+ iBestLvl = iLvl;
+ nBest = pLvl->nMerge;
+ }
+ break;
+ }
+ if( pLvl->nSeg>nBest ){
+ nBest = pLvl->nSeg;
+ iBestLvl = iLvl;
+ }
+ }
+
+ /* If nBest is still 0, then the index must be empty. */
+#ifdef SQLITE_DEBUG
+ for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
+ assert( pStruct->aLevel[iLvl].nSeg==0 );
+ }
+#endif
+
+ if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){
+ break;
+ }
+ bRet = 1;
+ fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
+ if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
+ fts5StructurePromote(p, iBestLvl+1, pStruct);
+ }
+ }
+ *ppStruct = pStruct;
+ return bRet;
+}
+
+/*
+** A total of nLeaf leaf pages of data has just been flushed to a level-0
+** segment. This function updates the write-counter accordingly and, if
+** necessary, performs incremental merge work.
+**
+** If an error occurs, set the Fts5Index.rc error code. If an error has
+** already occurred, this function is a no-op.
+*/
+static void fts5IndexAutomerge(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */
+ int nLeaf /* Number of output leaves just written */
+){
+ if( p->rc==SQLITE_OK && p->pConfig->nAutomerge>0 ){
+ Fts5Structure *pStruct = *ppStruct;
+ u64 nWrite; /* Initial value of write-counter */
+ int nWork; /* Number of work-quanta to perform */
+ int nRem; /* Number of leaf pages left to write */
+
+ /* Update the write-counter. While doing so, set nWork. */
+ nWrite = pStruct->nWriteCounter;
+ nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit));
+ pStruct->nWriteCounter += nLeaf;
+ nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel);
+
+ fts5IndexMerge(p, ppStruct, nRem, p->pConfig->nAutomerge);
+ }
+}
+
+static void fts5IndexCrisismerge(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Structure **ppStruct /* IN/OUT: Current structure of index */
+){
+ const int nCrisis = p->pConfig->nCrisisMerge;
+ Fts5Structure *pStruct = *ppStruct;
+ int iLvl = 0;
+
+ assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 );
+ while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){
+ fts5IndexMergeLevel(p, &pStruct, iLvl, 0);
+ assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) );
+ fts5StructurePromote(p, iLvl+1, pStruct);
+ iLvl++;
+ }
+ *ppStruct = pStruct;
+}
+
+static int fts5IndexReturn(Fts5Index *p){
+ int rc = p->rc;
+ p->rc = SQLITE_OK;
+ return rc;
+}
+
+typedef struct Fts5FlushCtx Fts5FlushCtx;
+struct Fts5FlushCtx {
+ Fts5Index *pIdx;
+ Fts5SegWriter writer;
+};
+
+/*
+** Buffer aBuf[] contains a list of varints, all small enough to fit
+** in a 32-bit integer. Return the size of the largest prefix of this
+** list nMax bytes or less in size.
+*/
+static int fts5PoslistPrefix(const u8 *aBuf, int nMax){
+ int ret;
+ u32 dummy;
+ ret = fts5GetVarint32(aBuf, dummy);
+ if( ret<nMax ){
+ while( 1 ){
+ int i = fts5GetVarint32(&aBuf[ret], dummy);
+ if( (ret + i) > nMax ) break;
+ ret += i;
+ }
+ }
+ return ret;
+}
+
+/*
+** Flush the contents of in-memory hash table iHash to a new level-0
+** segment on disk. Also update the corresponding structure record.
+**
+** If an error occurs, set the Fts5Index.rc error code. If an error has
+** already occurred, this function is a no-op.
+*/
+static void fts5FlushOneHash(Fts5Index *p){
+ Fts5Hash *pHash = p->pHash;
+ Fts5Structure *pStruct;
+ int iSegid;
+ int pgnoLast = 0; /* Last leaf page number in segment */
+
+ /* Obtain a reference to the index structure and allocate a new segment-id
+ ** for the new level-0 segment. */
+ pStruct = fts5StructureRead(p);
+ iSegid = fts5AllocateSegid(p, pStruct);
+ fts5StructureInvalidate(p);
+
+ if( iSegid ){
+ const int pgsz = p->pConfig->pgsz;
+ int eDetail = p->pConfig->eDetail;
+ Fts5StructureSegment *pSeg; /* New segment within pStruct */
+ Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
+ Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
+
+ Fts5SegWriter writer;
+ fts5WriteInit(p, &writer, iSegid);
+
+ pBuf = &writer.writer.buf;
+ pPgidx = &writer.writer.pgidx;
+
+ /* fts5WriteInit() should have initialized the buffers to (most likely)
+ ** the maximum space required. */
+ assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+ assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+
+ /* Begin scanning through hash table entries. This loop runs once for each
+ ** term/doclist currently stored within the hash table. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
+ }
+ while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
+ const char *zTerm; /* Buffer containing term */
+ const u8 *pDoclist; /* Pointer to doclist for this term */
+ int nDoclist; /* Size of doclist in bytes */
+
+ /* Write the term for this entry to disk. */
+ sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
+ fts5WriteAppendTerm(p, &writer, (int)strlen(zTerm), (const u8*)zTerm);
+ if( p->rc!=SQLITE_OK ) break;
+
+ assert( writer.bFirstRowidInPage==0 );
+ if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
+ /* The entire doclist will fit on the current leaf. */
+ fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
+ }else{
+ i64 iRowid = 0;
+ i64 iDelta = 0;
+ int iOff = 0;
+
+ /* The entire doclist will not fit on this leaf. The following
+ ** loop iterates through the poslists that make up the current
+ ** doclist. */
+ while( p->rc==SQLITE_OK && iOff<nDoclist ){
+ iOff += fts5GetVarint(&pDoclist[iOff], (u64*)&iDelta);
+ iRowid += iDelta;
+
+ if( writer.bFirstRowidInPage ){
+ fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
+ writer.bFirstRowidInPage = 0;
+ fts5WriteDlidxAppend(p, &writer, iRowid);
+ if( p->rc!=SQLITE_OK ) break;
+ }else{
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta);
+ }
+ assert( pBuf->n<=pBuf->nSpace );
+
+ if( eDetail==FTS5_DETAIL_NONE ){
+ if( iOff<nDoclist && pDoclist[iOff]==0 ){
+ pBuf->p[pBuf->n++] = 0;
+ iOff++;
+ if( iOff<nDoclist && pDoclist[iOff]==0 ){
+ pBuf->p[pBuf->n++] = 0;
+ iOff++;
+ }
+ }
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
+ }
+ }else{
+ int bDummy;
+ int nPos;
+ int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
+ nCopy += nPos;
+ if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
+ /* The entire poslist will fit on the current leaf. So copy
+ ** it in one go. */
+ fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
+ }else{
+ /* The entire poslist will not fit on this leaf. So it needs
+ ** to be broken into sections. The only qualification being
+ ** that each varint must be stored contiguously. */
+ const u8 *pPoslist = &pDoclist[iOff];
+ int iPos = 0;
+ while( p->rc==SQLITE_OK ){
+ int nSpace = pgsz - pBuf->n - pPgidx->n;
+ int n = 0;
+ if( (nCopy - iPos)<=nSpace ){
+ n = nCopy - iPos;
+ }else{
+ n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
+ }
+ assert( n>0 );
+ fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
+ iPos += n;
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
+ }
+ if( iPos>=nCopy ) break;
+ }
+ }
+ iOff += nCopy;
+ }
+ }
+ }
+
+ /* TODO2: Doclist terminator written here. */
+ /* pBuf->p[pBuf->n++] = '\0'; */
+ assert( pBuf->n<=pBuf->nSpace );
+ if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
+ }
+ sqlite3Fts5HashClear(pHash);
+ fts5WriteFinish(p, &writer, &pgnoLast);
+
+ /* Update the Fts5Structure. It is written back to the database by the
+ ** fts5StructureRelease() call below. */
+ if( pStruct->nLevel==0 ){
+ fts5StructureAddLevel(&p->rc, &pStruct);
+ }
+ fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
+ if( p->rc==SQLITE_OK ){
+ pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
+ pSeg->iSegid = iSegid;
+ pSeg->pgnoFirst = 1;
+ pSeg->pgnoLast = pgnoLast;
+ pStruct->nSegment++;
+ }
+ fts5StructurePromote(p, 0, pStruct);
+ }
+
+ fts5IndexAutomerge(p, &pStruct, pgnoLast);
+ fts5IndexCrisismerge(p, &pStruct);
+ fts5StructureWrite(p, pStruct);
+ fts5StructureRelease(pStruct);
+}
+
+/*
+** Flush any data stored in the in-memory hash tables to the database.
+*/
+static void fts5IndexFlush(Fts5Index *p){
+ /* Unless it is empty, flush the hash table to disk */
+ if( p->nPendingData ){
+ assert( p->pHash );
+ p->nPendingData = 0;
+ fts5FlushOneHash(p);
+ }
+}
+
+static Fts5Structure *fts5IndexOptimizeStruct(
+ Fts5Index *p,
+ Fts5Structure *pStruct
+){
+ Fts5Structure *pNew = 0;
+ sqlite3_int64 nByte = sizeof(Fts5Structure);
+ int nSeg = pStruct->nSegment;
+ int i;
+
+ /* Figure out if this structure requires optimization. A structure does
+ ** not require optimization if either:
+ **
+ ** + it consists of fewer than two segments, or
+ ** + all segments are on the same level, or
+ ** + all segments except one are currently inputs to a merge operation.
+ **
+ ** In the first case, return NULL. In the second, increment the ref-count
+ ** on *pStruct and return a copy of the pointer to it.
+ */
+ if( nSeg<2 ) return 0;
+ for(i=0; i<pStruct->nLevel; i++){
+ int nThis = pStruct->aLevel[i].nSeg;
+ if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){
+ fts5StructureRef(pStruct);
+ return pStruct;
+ }
+ assert( pStruct->aLevel[i].nMerge<=nThis );
+ }
+
+ nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel);
+ pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte);
+
+ if( pNew ){
+ Fts5StructureLevel *pLvl;
+ nByte = nSeg * sizeof(Fts5StructureSegment);
+ pNew->nLevel = pStruct->nLevel+1;
+ pNew->nRef = 1;
+ pNew->nWriteCounter = pStruct->nWriteCounter;
+ pLvl = &pNew->aLevel[pStruct->nLevel];
+ pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte);
+ if( pLvl->aSeg ){
+ int iLvl, iSeg;
+ int iSegOut = 0;
+ /* Iterate through all segments, from oldest to newest. Add them to
+ ** the new Fts5Level object so that pLvl->aSeg[0] is the oldest
+ ** segment in the data structure. */
+ for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){
+ for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
+ pLvl->aSeg[iSegOut] = pStruct->aLevel[iLvl].aSeg[iSeg];
+ iSegOut++;
+ }
+ }
+ pNew->nSegment = pLvl->nSeg = nSeg;
+ }else{
+ sqlite3_free(pNew);
+ pNew = 0;
+ }
+ }
+
+ return pNew;
+}
+
+static int sqlite3Fts5IndexOptimize(Fts5Index *p){
+ Fts5Structure *pStruct;
+ Fts5Structure *pNew = 0;
+
+ assert( p->rc==SQLITE_OK );
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
+ fts5StructureInvalidate(p);
+
+ if( pStruct ){
+ pNew = fts5IndexOptimizeStruct(p, pStruct);
+ }
+ fts5StructureRelease(pStruct);
+
+ assert( pNew==0 || pNew->nSegment>0 );
+ if( pNew ){
+ int iLvl;
+ for(iLvl=0; pNew->aLevel[iLvl].nSeg==0; iLvl++){}
+ while( p->rc==SQLITE_OK && pNew->aLevel[iLvl].nSeg>0 ){
+ int nRem = FTS5_OPT_WORK_UNIT;
+ fts5IndexMergeLevel(p, &pNew, iLvl, &nRem);
+ }
+
+ fts5StructureWrite(p, pNew);
+ fts5StructureRelease(pNew);
+ }
+
+ return fts5IndexReturn(p);
+}
+
+/*
+** This is called to implement the special "VALUES('merge', $nMerge)"
+** INSERT command.
+*/
+static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
+ Fts5Structure *pStruct = fts5StructureRead(p);
+ if( pStruct ){
+ int nMin = p->pConfig->nUsermerge;
+ fts5StructureInvalidate(p);
+ if( nMerge<0 ){
+ Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct);
+ fts5StructureRelease(pStruct);
+ pStruct = pNew;
+ nMin = 2;
+ nMerge = nMerge*-1;
+ }
+ if( pStruct && pStruct->nLevel ){
+ if( fts5IndexMerge(p, &pStruct, nMerge, nMin) ){
+ fts5StructureWrite(p, pStruct);
+ }
+ }
+ fts5StructureRelease(pStruct);
+ }
+ return fts5IndexReturn(p);
+}
+
+static void fts5AppendRowid(
+ Fts5Index *p,
+ i64 iDelta,
+ Fts5Iter *pUnused,
+ Fts5Buffer *pBuf
+){
+ UNUSED_PARAM(pUnused);
+ fts5BufferAppendVarint(&p->rc, pBuf, iDelta);
+}
+
+static void fts5AppendPoslist(
+ Fts5Index *p,
+ i64 iDelta,
+ Fts5Iter *pMulti,
+ Fts5Buffer *pBuf
+){
+ int nData = pMulti->base.nData;
+ int nByte = nData + 9 + 9 + FTS5_DATA_ZERO_PADDING;
+ assert( nData>0 );
+ if( p->rc==SQLITE_OK && 0==fts5BufferGrow(&p->rc, pBuf, nByte) ){
+ fts5BufferSafeAppendVarint(pBuf, iDelta);
+ fts5BufferSafeAppendVarint(pBuf, nData*2);
+ fts5BufferSafeAppendBlob(pBuf, pMulti->base.pData, nData);
+ memset(&pBuf->p[pBuf->n], 0, FTS5_DATA_ZERO_PADDING);
+ }
+}
+
+
+static void fts5DoclistIterNext(Fts5DoclistIter *pIter){
+ u8 *p = pIter->aPoslist + pIter->nSize + pIter->nPoslist;
+
+ assert( pIter->aPoslist );
+ if( p>=pIter->aEof ){
+ pIter->aPoslist = 0;
+ }else{
+ i64 iDelta;
+
+ p += fts5GetVarint(p, (u64*)&iDelta);
+ pIter->iRowid += iDelta;
+
+ /* Read position list size */
+ if( p[0] & 0x80 ){
+ int nPos;
+ pIter->nSize = fts5GetVarint32(p, nPos);
+ pIter->nPoslist = (nPos>>1);
+ }else{
+ pIter->nPoslist = ((int)(p[0])) >> 1;
+ pIter->nSize = 1;
+ }
+
+ pIter->aPoslist = p;
+ }
+}
+
+static void fts5DoclistIterInit(
+ Fts5Buffer *pBuf,
+ Fts5DoclistIter *pIter
+){
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->aPoslist = pBuf->p;
+ pIter->aEof = &pBuf->p[pBuf->n];
+ fts5DoclistIterNext(pIter);
+}
+
+#if 0
+/*
+** Append a doclist to buffer pBuf.
+**
+** This function assumes that space within the buffer has already been
+** allocated.
+*/
+static void fts5MergeAppendDocid(
+ Fts5Buffer *pBuf, /* Buffer to write to */
+ i64 *piLastRowid, /* IN/OUT: Previous rowid written (if any) */
+ i64 iRowid /* Rowid to append */
+){
+ assert( pBuf->n!=0 || (*piLastRowid)==0 );
+ fts5BufferSafeAppendVarint(pBuf, iRowid - *piLastRowid);
+ *piLastRowid = iRowid;
+}
+#endif
+
+#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \
+ assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \
+ fts5BufferSafeAppendVarint((pBuf), (iRowid) - (iLastRowid)); \
+ (iLastRowid) = (iRowid); \
+}
+
+/*
+** Swap the contents of buffer *p1 with that of *p2.
+*/
+static void fts5BufferSwap(Fts5Buffer *p1, Fts5Buffer *p2){
+ Fts5Buffer tmp = *p1;
+ *p1 = *p2;
+ *p2 = tmp;
+}
+
+static void fts5NextRowid(Fts5Buffer *pBuf, int *piOff, i64 *piRowid){
+ int i = *piOff;
+ if( i>=pBuf->n ){
+ *piOff = -1;
+ }else{
+ u64 iVal;
+ *piOff = i + sqlite3Fts5GetVarint(&pBuf->p[i], &iVal);
+ *piRowid += iVal;
+ }
+}
+
+/*
+** This is the equivalent of fts5MergePrefixLists() for detail=none mode.
+** In this case the buffers consist of a delta-encoded list of rowids only.
+*/
+static void fts5MergeRowidLists(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Buffer *p1, /* First list to merge */
+ Fts5Buffer *p2 /* Second list to merge */
+){
+ int i1 = 0;
+ int i2 = 0;
+ i64 iRowid1 = 0;
+ i64 iRowid2 = 0;
+ i64 iOut = 0;
+
+ Fts5Buffer out;
+ memset(&out, 0, sizeof(out));
+ sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n);
+ if( p->rc ) return;
+
+ fts5NextRowid(p1, &i1, &iRowid1);
+ fts5NextRowid(p2, &i2, &iRowid2);
+ while( i1>=0 || i2>=0 ){
+ if( i1>=0 && (i2<0 || iRowid1<iRowid2) ){
+ assert( iOut==0 || iRowid1>iOut );
+ fts5BufferSafeAppendVarint(&out, iRowid1 - iOut);
+ iOut = iRowid1;
+ fts5NextRowid(p1, &i1, &iRowid1);
+ }else{
+ assert( iOut==0 || iRowid2>iOut );
+ fts5BufferSafeAppendVarint(&out, iRowid2 - iOut);
+ iOut = iRowid2;
+ if( i1>=0 && iRowid1==iRowid2 ){
+ fts5NextRowid(p1, &i1, &iRowid1);
+ }
+ fts5NextRowid(p2, &i2, &iRowid2);
+ }
+ }
+
+ fts5BufferSwap(&out, p1);
+ fts5BufferFree(&out);
+}
+
+/*
+** Buffers p1 and p2 contain doclists. This function merges the content
+** of the two doclists together and sets buffer p1 to the result before
+** returning.
+**
+** If an error occurs, an error code is left in p->rc. If an error has
+** already occurred, this function is a no-op.
+*/
+static void fts5MergePrefixLists(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5Buffer *p1, /* First list to merge */
+ Fts5Buffer *p2 /* Second list to merge */
+){
+ if( p2->n ){
+ i64 iLastRowid = 0;
+ Fts5DoclistIter i1;
+ Fts5DoclistIter i2;
+ Fts5Buffer out = {0, 0, 0};
+ Fts5Buffer tmp = {0, 0, 0};
+
+ /* The maximum size of the output is equal to the sum of the two
+ ** input sizes + 1 varint (9 bytes). The extra varint is because if the
+ ** first rowid in one input is a large negative number, and the first in
+ ** the other a non-negative number, the delta for the non-negative
+ ** number will be larger on disk than the literal integer value
+ ** was.
+ **
+ ** Or, if the input position-lists are corrupt, then the output might
+ ** include up to 2 extra 10-byte positions created by interpreting -1
+ ** (the value PoslistNext64() uses for EOF) as a position and appending
+ ** it to the output. This can happen at most once for each input
+ ** position-list, hence two 10 byte paddings. */
+ if( sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n + 9+10+10) ) return;
+ fts5DoclistIterInit(p1, &i1);
+ fts5DoclistIterInit(p2, &i2);
+
+ while( 1 ){
+ if( i1.iRowid<i2.iRowid ){
+ /* Copy entry from i1 */
+ fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid);
+ fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.nPoslist+i1.nSize);
+ fts5DoclistIterNext(&i1);
+ if( i1.aPoslist==0 ) break;
+ assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) );
+ }
+ else if( i2.iRowid!=i1.iRowid ){
+ /* Copy entry from i2 */
+ fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid);
+ fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.nPoslist+i2.nSize);
+ fts5DoclistIterNext(&i2);
+ if( i2.aPoslist==0 ) break;
+ assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) );
+ }
+ else{
+ /* Merge the two position lists. */
+ i64 iPos1 = 0;
+ i64 iPos2 = 0;
+ int iOff1 = 0;
+ int iOff2 = 0;
+ u8 *a1 = &i1.aPoslist[i1.nSize];
+ u8 *a2 = &i2.aPoslist[i2.nSize];
+ int nCopy;
+ u8 *aCopy;
+
+ i64 iPrev = 0;
+ Fts5PoslistWriter writer;
+ memset(&writer, 0, sizeof(writer));
+
+ /* See the earlier comment in this function for an explanation of why
+ ** corrupt input position lists might cause the output to consume
+ ** at most 20 bytes of unexpected space. */
+ fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid);
+ fts5BufferZero(&tmp);
+ sqlite3Fts5BufferSize(&p->rc, &tmp, i1.nPoslist + i2.nPoslist + 10 + 10);
+ if( p->rc ) break;
+
+ sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1);
+ sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2);
+ assert_nc( iPos1>=0 && iPos2>=0 );
+
+ if( iPos1<iPos2 ){
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1);
+ sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1);
+ }else{
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2);
+ sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2);
+ }
+ if( iPos1>=0 && iPos2>=0 ){
+ while( 1 ){
+ if( iPos1<iPos2 ){
+ if( iPos1!=iPrev ){
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1);
+ }
+ sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1);
+ if( iPos1<0 ) break;
+ }else{
+ assert_nc( iPos2!=iPrev );
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2);
+ sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2);
+ if( iPos2<0 ) break;
+ }
+ }
+ }
+
+ if( iPos1>=0 ){
+ if( iPos1!=iPrev ){
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1);
+ }
+ aCopy = &a1[iOff1];
+ nCopy = i1.nPoslist - iOff1;
+ }else{
+ assert_nc( iPos2>=0 && iPos2!=iPrev );
+ sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2);
+ aCopy = &a2[iOff2];
+ nCopy = i2.nPoslist - iOff2;
+ }
+ if( nCopy>0 ){
+ fts5BufferSafeAppendBlob(&tmp, aCopy, nCopy);
+ }
+
+ /* WRITEPOSLISTSIZE */
+ assert_nc( tmp.n<=i1.nPoslist+i2.nPoslist );
+ assert( tmp.n<=i1.nPoslist+i2.nPoslist+10+10 );
+ if( tmp.n>i1.nPoslist+i2.nPoslist ){
+ if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT;
+ break;
+ }
+ fts5BufferSafeAppendVarint(&out, tmp.n * 2);
+ fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n);
+ fts5DoclistIterNext(&i1);
+ fts5DoclistIterNext(&i2);
+ assert_nc( out.n<=(p1->n+p2->n+9) );
+ if( i1.aPoslist==0 || i2.aPoslist==0 ) break;
+ assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) );
+ }
+ }
+
+ if( i1.aPoslist ){
+ fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid);
+ fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.aEof - i1.aPoslist);
+ }
+ else if( i2.aPoslist ){
+ fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid);
+ fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.aEof - i2.aPoslist);
+ }
+ assert_nc( out.n<=(p1->n+p2->n+9) );
+
+ fts5BufferSet(&p->rc, p1, out.n, out.p);
+ fts5BufferFree(&tmp);
+ fts5BufferFree(&out);
+ }
+}
+
+static void fts5SetupPrefixIter(
+ Fts5Index *p, /* Index to read from */
+ int bDesc, /* True for "ORDER BY rowid DESC" */
+ const u8 *pToken, /* Buffer containing prefix to match */
+ int nToken, /* Size of buffer pToken in bytes */
+ Fts5Colset *pColset, /* Restrict matches to these columns */
+ Fts5Iter **ppIter /* OUT: New iterator */
+){
+ Fts5Structure *pStruct;
+ Fts5Buffer *aBuf;
+ const int nBuf = 32;
+
+ void (*xMerge)(Fts5Index*, Fts5Buffer*, Fts5Buffer*);
+ void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*);
+ if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){
+ xMerge = fts5MergeRowidLists;
+ xAppend = fts5AppendRowid;
+ }else{
+ xMerge = fts5MergePrefixLists;
+ xAppend = fts5AppendPoslist;
+ }
+
+ aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf);
+ pStruct = fts5StructureRead(p);
+
+ if( aBuf && pStruct ){
+ const int flags = FTS5INDEX_QUERY_SCAN
+ | FTS5INDEX_QUERY_SKIPEMPTY
+ | FTS5INDEX_QUERY_NOOUTPUT;
+ int i;
+ i64 iLastRowid = 0;
+ Fts5Iter *p1 = 0; /* Iterator used to gather data from index */
+ Fts5Data *pData;
+ Fts5Buffer doclist;
+ int bNewTerm = 1;
+
+ memset(&doclist, 0, sizeof(doclist));
+ fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1);
+ fts5IterSetOutputCb(&p->rc, p1);
+ for( /* no-op */ ;
+ fts5MultiIterEof(p, p1)==0;
+ fts5MultiIterNext2(p, p1, &bNewTerm)
+ ){
+ Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ];
+ int nTerm = pSeg->term.n;
+ const u8 *pTerm = pSeg->term.p;
+ p1->xSetOutputs(p1, pSeg);
+
+ assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 );
+ if( bNewTerm ){
+ if( nTerm<nToken || memcmp(pToken, pTerm, nToken) ) break;
+ }
+
+ if( p1->base.nData==0 ) continue;
+
+ if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){
+ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){
+ assert( i<nBuf );
+ if( aBuf[i].n==0 ){
+ fts5BufferSwap(&doclist, &aBuf[i]);
+ fts5BufferZero(&doclist);
+ }else{
+ xMerge(p, &doclist, &aBuf[i]);
+ fts5BufferZero(&aBuf[i]);
+ }
+ }
+ iLastRowid = 0;
+ }
+
+ xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist);
+ iLastRowid = p1->base.iRowid;
+ }
+
+ for(i=0; i<nBuf; i++){
+ if( p->rc==SQLITE_OK ){
+ xMerge(p, &doclist, &aBuf[i]);
+ }
+ fts5BufferFree(&aBuf[i]);
+ }
+ fts5MultiIterFree(p1);
+
+ pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING);
+ if( pData ){
+ pData->p = (u8*)&pData[1];
+ pData->nn = pData->szLeaf = doclist.n;
+ if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n);
+ fts5MultiIterNew2(p, pData, bDesc, ppIter);
+ }
+ fts5BufferFree(&doclist);
+ }
+
+ fts5StructureRelease(pStruct);
+ sqlite3_free(aBuf);
+}
+
+
+/*
+** Indicate that all subsequent calls to sqlite3Fts5IndexWrite() pertain
+** to the document with rowid iRowid.
+*/
+static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){
+ assert( p->rc==SQLITE_OK );
+
+ /* Allocate the hash table if it has not already been allocated */
+ if( p->pHash==0 ){
+ p->rc = sqlite3Fts5HashNew(p->pConfig, &p->pHash, &p->nPendingData);
+ }
+
+ /* Flush the hash table to disk if required */
+ if( iRowid<p->iWriteRowid
+ || (iRowid==p->iWriteRowid && p->bDelete==0)
+ || (p->nPendingData > p->pConfig->nHashSize)
+ ){
+ fts5IndexFlush(p);
+ }
+
+ p->iWriteRowid = iRowid;
+ p->bDelete = bDelete;
+ return fts5IndexReturn(p);
+}
+
+/*
+** Commit data to disk.
+*/
+static int sqlite3Fts5IndexSync(Fts5Index *p){
+ assert( p->rc==SQLITE_OK );
+ fts5IndexFlush(p);
+ sqlite3Fts5IndexCloseReader(p);
+ return fts5IndexReturn(p);
+}
+
+/*
+** Discard any data stored in the in-memory hash tables. Do not write it
+** to the database. Additionally, assume that the contents of the %_data
+** table may have changed on disk. So any in-memory caches of %_data
+** records must be invalidated.
+*/
+static int sqlite3Fts5IndexRollback(Fts5Index *p){
+ sqlite3Fts5IndexCloseReader(p);
+ fts5IndexDiscardData(p);
+ fts5StructureInvalidate(p);
+ /* assert( p->rc==SQLITE_OK ); */
+ return SQLITE_OK;
+}
+
+/*
+** The %_data table is completely empty when this function is called. This
+** function populates it with the initial structure objects for each index,
+** and the initial version of the "averages" record (a zero-byte blob).
+*/
+static int sqlite3Fts5IndexReinit(Fts5Index *p){
+ Fts5Structure s;
+ fts5StructureInvalidate(p);
+ fts5IndexDiscardData(p);
+ memset(&s, 0, sizeof(Fts5Structure));
+ fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0);
+ fts5StructureWrite(p, &s);
+ return fts5IndexReturn(p);
+}
+
+/*
+** Open a new Fts5Index handle. If the bCreate argument is true, create
+** and initialize the underlying %_data table.
+**
+** If successful, set *pp to point to the new object and return SQLITE_OK.
+** Otherwise, set *pp to NULL and return an SQLite error code.
+*/
+static int sqlite3Fts5IndexOpen(
+ Fts5Config *pConfig,
+ int bCreate,
+ Fts5Index **pp,
+ char **pzErr
+){
+ int rc = SQLITE_OK;
+ Fts5Index *p; /* New object */
+
+ *pp = p = (Fts5Index*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Index));
+ if( rc==SQLITE_OK ){
+ p->pConfig = pConfig;
+ p->nWorkUnit = FTS5_WORK_UNIT;
+ p->zDataTbl = sqlite3Fts5Mprintf(&rc, "%s_data", pConfig->zName);
+ if( p->zDataTbl && bCreate ){
+ rc = sqlite3Fts5CreateTable(
+ pConfig, "data", "id INTEGER PRIMARY KEY, block BLOB", 0, pzErr
+ );
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5CreateTable(pConfig, "idx",
+ "segid, term, pgno, PRIMARY KEY(segid, term)",
+ 1, pzErr
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexReinit(p);
+ }
+ }
+ }
+
+ assert( rc!=SQLITE_OK || p->rc==SQLITE_OK );
+ if( rc ){
+ sqlite3Fts5IndexClose(p);
+ *pp = 0;
+ }
+ return rc;
+}
+
+/*
+** Close a handle opened by an earlier call to sqlite3Fts5IndexOpen().
+*/
+static int sqlite3Fts5IndexClose(Fts5Index *p){
+ int rc = SQLITE_OK;
+ if( p ){
+ assert( p->pReader==0 );
+ fts5StructureInvalidate(p);
+ sqlite3_finalize(p->pWriter);
+ sqlite3_finalize(p->pDeleter);
+ sqlite3_finalize(p->pIdxWriter);
+ sqlite3_finalize(p->pIdxDeleter);
+ sqlite3_finalize(p->pIdxSelect);
+ sqlite3_finalize(p->pDataVersion);
+ sqlite3Fts5HashFree(p->pHash);
+ sqlite3_free(p->zDataTbl);
+ sqlite3_free(p);
+ }
+ return rc;
+}
+
+/*
+** Argument p points to a buffer containing utf-8 text that is n bytes in
+** size. Return the number of bytes in the nChar character prefix of the
+** buffer, or 0 if there are less than nChar characters in total.
+*/
+static int sqlite3Fts5IndexCharlenToBytelen(
+ const char *p,
+ int nByte,
+ int nChar
+){
+ int n = 0;
+ int i;
+ for(i=0; i<nChar; i++){
+ if( n>=nByte ) return 0; /* Input contains fewer than nChar chars */
+ if( (unsigned char)p[n++]>=0xc0 ){
+ if( n>=nByte ) return 0;
+ while( (p[n] & 0xc0)==0x80 ){
+ n++;
+ if( n>=nByte ){
+ if( i+1==nChar ) break;
+ return 0;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+/*
+** pIn is a UTF-8 encoded string, nIn bytes in size. Return the number of
+** unicode characters in the string.
+*/
+static int fts5IndexCharlen(const char *pIn, int nIn){
+ int nChar = 0;
+ int i = 0;
+ while( i<nIn ){
+ if( (unsigned char)pIn[i++]>=0xc0 ){
+ while( i<nIn && (pIn[i] & 0xc0)==0x80 ) i++;
+ }
+ nChar++;
+ }
+ return nChar;
+}
+
+/*
+** Insert or remove data to or from the index. Each time a document is
+** added to or removed from the index, this function is called one or more
+** times.
+**
+** For an insert, it must be called once for each token in the new document.
+** If the operation is a delete, it must be called (at least) once for each
+** unique token in the document with an iCol value less than zero. The iPos
+** argument is ignored for a delete.
+*/
+static int sqlite3Fts5IndexWrite(
+ Fts5Index *p, /* Index to write to */
+ int iCol, /* Column token appears in (-ve -> delete) */
+ int iPos, /* Position of token within column */
+ const char *pToken, int nToken /* Token to add or remove to or from index */
+){
+ int i; /* Used to iterate through indexes */
+ int rc = SQLITE_OK; /* Return code */
+ Fts5Config *pConfig = p->pConfig;
+
+ assert( p->rc==SQLITE_OK );
+ assert( (iCol<0)==p->bDelete );
+
+ /* Add the entry to the main terms index. */
+ rc = sqlite3Fts5HashWrite(
+ p->pHash, p->iWriteRowid, iCol, iPos, FTS5_MAIN_PREFIX, pToken, nToken
+ );
+
+ for(i=0; i<pConfig->nPrefix && rc==SQLITE_OK; i++){
+ const int nChar = pConfig->aPrefix[i];
+ int nByte = sqlite3Fts5IndexCharlenToBytelen(pToken, nToken, nChar);
+ if( nByte ){
+ rc = sqlite3Fts5HashWrite(p->pHash,
+ p->iWriteRowid, iCol, iPos, (char)(FTS5_MAIN_PREFIX+i+1), pToken,
+ nByte
+ );
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Open a new iterator to iterate though all rowid that match the
+** specified token or token prefix.
+*/
+static int sqlite3Fts5IndexQuery(
+ Fts5Index *p, /* FTS index to query */
+ const char *pToken, int nToken, /* Token (or prefix) to query for */
+ int flags, /* Mask of FTS5INDEX_QUERY_X flags */
+ Fts5Colset *pColset, /* Match these columns only */
+ Fts5IndexIter **ppIter /* OUT: New iterator object */
+){
+ Fts5Config *pConfig = p->pConfig;
+ Fts5Iter *pRet = 0;
+ Fts5Buffer buf = {0, 0, 0};
+
+ /* If the QUERY_SCAN flag is set, all other flags must be clear. */
+ assert( (flags & FTS5INDEX_QUERY_SCAN)==0 || flags==FTS5INDEX_QUERY_SCAN );
+
+ if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){
+ int iIdx = 0; /* Index to search */
+ if( nToken ) memcpy(&buf.p[1], pToken, nToken);
+
+ /* Figure out which index to search and set iIdx accordingly. If this
+ ** is a prefix query for which there is no prefix index, set iIdx to
+ ** greater than pConfig->nPrefix to indicate that the query will be
+ ** satisfied by scanning multiple terms in the main index.
+ **
+ ** If the QUERY_TEST_NOIDX flag was specified, then this must be a
+ ** prefix-query. Instead of using a prefix-index (if one exists),
+ ** evaluate the prefix query using the main FTS index. This is used
+ ** for internal sanity checking by the integrity-check in debug
+ ** mode only. */
+#ifdef SQLITE_DEBUG
+ if( pConfig->bPrefixIndex==0 || (flags & FTS5INDEX_QUERY_TEST_NOIDX) ){
+ assert( flags & FTS5INDEX_QUERY_PREFIX );
+ iIdx = 1+pConfig->nPrefix;
+ }else
+#endif
+ if( flags & FTS5INDEX_QUERY_PREFIX ){
+ int nChar = fts5IndexCharlen(pToken, nToken);
+ for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){
+ if( pConfig->aPrefix[iIdx-1]==nChar ) break;
+ }
+ }
+
+ if( iIdx<=pConfig->nPrefix ){
+ /* Straight index lookup */
+ Fts5Structure *pStruct = fts5StructureRead(p);
+ buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx);
+ if( pStruct ){
+ fts5MultiIterNew(p, pStruct, flags | FTS5INDEX_QUERY_SKIPEMPTY,
+ pColset, buf.p, nToken+1, -1, 0, &pRet
+ );
+ fts5StructureRelease(pStruct);
+ }
+ }else{
+ /* Scan multiple terms in the main index */
+ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0;
+ buf.p[0] = FTS5_MAIN_PREFIX;
+ fts5SetupPrefixIter(p, bDesc, buf.p, nToken+1, pColset, &pRet);
+ assert( p->rc!=SQLITE_OK || pRet->pColset==0 );
+ fts5IterSetOutputCb(&p->rc, pRet);
+ if( p->rc==SQLITE_OK ){
+ Fts5SegIter *pSeg = &pRet->aSeg[pRet->aFirst[1].iFirst];
+ if( pSeg->pLeaf ) pRet->xSetOutputs(pRet, pSeg);
+ }
+ }
+
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pRet);
+ pRet = 0;
+ sqlite3Fts5IndexCloseReader(p);
+ }
+
+ *ppIter = (Fts5IndexIter*)pRet;
+ sqlite3Fts5BufferFree(&buf);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
+** Return true if the iterator passed as the only argument is at EOF.
+*/
+/*
+** Move to the next matching rowid.
+*/
+static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ assert( pIter->pIndex->rc==SQLITE_OK );
+ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ return fts5IndexReturn(pIter->pIndex);
+}
+
+/*
+** Move to the next matching term/rowid. Used by the fts5vocab module.
+*/
+static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5Index *p = pIter->pIndex;
+
+ assert( pIter->pIndex->rc==SQLITE_OK );
+
+ fts5MultiIterNext(p, pIter, 0, 0);
+ if( p->rc==SQLITE_OK ){
+ Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
+ if( pSeg->pLeaf && pSeg->term.p[0]!=FTS5_MAIN_PREFIX ){
+ fts5DataRelease(pSeg->pLeaf);
+ pSeg->pLeaf = 0;
+ pIter->base.bEof = 1;
+ }
+ }
+
+ return fts5IndexReturn(pIter->pIndex);
+}
+
+/*
+** Move to the next matching rowid that occurs at or after iMatch. The
+** definition of "at or after" depends on whether this iterator iterates
+** in ascending or descending rowid order.
+*/
+static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ return fts5IndexReturn(pIter->pIndex);
+}
+
+/*
+** Return the current term.
+*/
+static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){
+ int n;
+ const char *z = (const char*)fts5MultiIterTerm((Fts5Iter*)pIndexIter, &n);
+ *pn = n-1;
+ return &z[1];
+}
+
+/*
+** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery().
+*/
+static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){
+ if( pIndexIter ){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5Index *pIndex = pIter->pIndex;
+ fts5MultiIterFree(pIter);
+ sqlite3Fts5IndexCloseReader(pIndex);
+ }
+}
+
+/*
+** Read and decode the "averages" record from the database.
+**
+** Parameter anSize must point to an array of size nCol, where nCol is
+** the number of user defined columns in the FTS table.
+*/
+static int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize){
+ int nCol = p->pConfig->nCol;
+ Fts5Data *pData;
+
+ *pnRow = 0;
+ memset(anSize, 0, sizeof(i64) * nCol);
+ pData = fts5DataRead(p, FTS5_AVERAGES_ROWID);
+ if( p->rc==SQLITE_OK && pData->nn ){
+ int i = 0;
+ int iCol;
+ i += fts5GetVarint(&pData->p[i], (u64*)pnRow);
+ for(iCol=0; i<pData->nn && iCol<nCol; iCol++){
+ i += fts5GetVarint(&pData->p[i], (u64*)&anSize[iCol]);
+ }
+ }
+
+ fts5DataRelease(pData);
+ return fts5IndexReturn(p);
+}
+
+/*
+** Replace the current "averages" record with the contents of the buffer
+** supplied as the second argument.
+*/
+static int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8 *pData, int nData){
+ assert( p->rc==SQLITE_OK );
+ fts5DataWrite(p, FTS5_AVERAGES_ROWID, pData, nData);
+ return fts5IndexReturn(p);
+}
+
+/*
+** Return the total number of blocks this module has read from the %_data
+** table since it was created.
+*/
+static int sqlite3Fts5IndexReads(Fts5Index *p){
+ return p->nRead;
+}
+
+/*
+** Set the 32-bit cookie value stored at the start of all structure
+** records to the value passed as the second argument.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs.
+*/
+static int sqlite3Fts5IndexSetCookie(Fts5Index *p, int iNew){
+ int rc; /* Return code */
+ Fts5Config *pConfig = p->pConfig; /* Configuration object */
+ u8 aCookie[4]; /* Binary representation of iNew */
+ sqlite3_blob *pBlob = 0;
+
+ assert( p->rc==SQLITE_OK );
+ sqlite3Fts5Put32(aCookie, iNew);
+
+ rc = sqlite3_blob_open(pConfig->db, pConfig->zDb, p->zDataTbl,
+ "block", FTS5_STRUCTURE_ROWID, 1, &pBlob
+ );
+ if( rc==SQLITE_OK ){
+ sqlite3_blob_write(pBlob, aCookie, 4, 0);
+ rc = sqlite3_blob_close(pBlob);
+ }
+
+ return rc;
+}
+
+static int sqlite3Fts5IndexLoadConfig(Fts5Index *p){
+ Fts5Structure *pStruct;
+ pStruct = fts5StructureRead(p);
+ fts5StructureRelease(pStruct);
+ return fts5IndexReturn(p);
+}
+
+
+/*************************************************************************
+**************************************************************************
+** Below this point is the implementation of the integrity-check
+** functionality.
+*/
+
+/*
+** Return a simple checksum value based on the arguments.
+*/
+static u64 sqlite3Fts5IndexEntryCksum(
+ i64 iRowid,
+ int iCol,
+ int iPos,
+ int iIdx,
+ const char *pTerm,
+ int nTerm
+){
+ int i;
+ u64 ret = iRowid;
+ ret += (ret<<3) + iCol;
+ ret += (ret<<3) + iPos;
+ if( iIdx>=0 ) ret += (ret<<3) + (FTS5_MAIN_PREFIX + iIdx);
+ for(i=0; i<nTerm; i++) ret += (ret<<3) + pTerm[i];
+ return ret;
+}
+
+#ifdef SQLITE_DEBUG
+/*
+** This function is purely an internal test. It does not contribute to
+** FTS functionality, or even the integrity-check, in any way.
+**
+** Instead, it tests that the same set of pgno/rowid combinations are
+** visited regardless of whether the doclist-index identified by parameters
+** iSegid/iLeaf is iterated in forwards or reverse order.
+*/
+static void fts5TestDlidxReverse(
+ Fts5Index *p,
+ int iSegid, /* Segment id to load from */
+ int iLeaf /* Load doclist-index for this leaf */
+){
+ Fts5DlidxIter *pDlidx = 0;
+ u64 cksum1 = 13;
+ u64 cksum2 = 13;
+
+ for(pDlidx=fts5DlidxIterInit(p, 0, iSegid, iLeaf);
+ fts5DlidxIterEof(p, pDlidx)==0;
+ fts5DlidxIterNext(p, pDlidx)
+ ){
+ i64 iRowid = fts5DlidxIterRowid(pDlidx);
+ int pgno = fts5DlidxIterPgno(pDlidx);
+ assert( pgno>iLeaf );
+ cksum1 += iRowid + ((i64)pgno<<32);
+ }
+ fts5DlidxIterFree(pDlidx);
+ pDlidx = 0;
+
+ for(pDlidx=fts5DlidxIterInit(p, 1, iSegid, iLeaf);
+ fts5DlidxIterEof(p, pDlidx)==0;
+ fts5DlidxIterPrev(p, pDlidx)
+ ){
+ i64 iRowid = fts5DlidxIterRowid(pDlidx);
+ int pgno = fts5DlidxIterPgno(pDlidx);
+ assert( fts5DlidxIterPgno(pDlidx)>iLeaf );
+ cksum2 += iRowid + ((i64)pgno<<32);
+ }
+ fts5DlidxIterFree(pDlidx);
+ pDlidx = 0;
+
+ if( p->rc==SQLITE_OK && cksum1!=cksum2 ) p->rc = FTS5_CORRUPT;
+}
+
+static int fts5QueryCksum(
+ Fts5Index *p, /* Fts5 index object */
+ int iIdx,
+ const char *z, /* Index key to query for */
+ int n, /* Size of index key in bytes */
+ int flags, /* Flags for Fts5IndexQuery */
+ u64 *pCksum /* IN/OUT: Checksum value */
+){
+ int eDetail = p->pConfig->eDetail;
+ u64 cksum = *pCksum;
+ Fts5IndexIter *pIter = 0;
+ int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter);
+
+ while( rc==SQLITE_OK && 0==sqlite3Fts5IterEof(pIter) ){
+ i64 rowid = pIter->iRowid;
+
+ if( eDetail==FTS5_DETAIL_NONE ){
+ cksum ^= sqlite3Fts5IndexEntryCksum(rowid, 0, 0, iIdx, z, n);
+ }else{
+ Fts5PoslistReader sReader;
+ for(sqlite3Fts5PoslistReaderInit(pIter->pData, pIter->nData, &sReader);
+ sReader.bEof==0;
+ sqlite3Fts5PoslistReaderNext(&sReader)
+ ){
+ int iCol = FTS5_POS2COLUMN(sReader.iPos);
+ int iOff = FTS5_POS2OFFSET(sReader.iPos);
+ cksum ^= sqlite3Fts5IndexEntryCksum(rowid, iCol, iOff, iIdx, z, n);
+ }
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IterNext(pIter);
+ }
+ }
+ sqlite3Fts5IterClose(pIter);
+
+ *pCksum = cksum;
+ return rc;
+}
+
+/*
+** Check if buffer z[], size n bytes, contains as series of valid utf-8
+** encoded codepoints. If so, return 0. Otherwise, if the buffer does not
+** contain valid utf-8, return non-zero.
+*/
+static int fts5TestUtf8(const char *z, int n){
+ int i = 0;
+ assert_nc( n>0 );
+ while( i<n ){
+ if( (z[i] & 0x80)==0x00 ){
+ i++;
+ }else
+ if( (z[i] & 0xE0)==0xC0 ){
+ if( i+1>=n || (z[i+1] & 0xC0)!=0x80 ) return 1;
+ i += 2;
+ }else
+ if( (z[i] & 0xF0)==0xE0 ){
+ if( i+2>=n || (z[i+1] & 0xC0)!=0x80 || (z[i+2] & 0xC0)!=0x80 ) return 1;
+ i += 3;
+ }else
+ if( (z[i] & 0xF8)==0xF0 ){
+ if( i+3>=n || (z[i+1] & 0xC0)!=0x80 || (z[i+2] & 0xC0)!=0x80 ) return 1;
+ if( (z[i+2] & 0xC0)!=0x80 ) return 1;
+ i += 3;
+ }else{
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+** This function is also purely an internal test. It does not contribute to
+** FTS functionality, or even the integrity-check, in any way.
+*/
+static void fts5TestTerm(
+ Fts5Index *p,
+ Fts5Buffer *pPrev, /* Previous term */
+ const char *z, int n, /* Possibly new term to test */
+ u64 expected,
+ u64 *pCksum
+){
+ int rc = p->rc;
+ if( pPrev->n==0 ){
+ fts5BufferSet(&rc, pPrev, n, (const u8*)z);
+ }else
+ if( rc==SQLITE_OK && (pPrev->n!=n || memcmp(pPrev->p, z, n)) ){
+ u64 cksum3 = *pCksum;
+ const char *zTerm = (const char*)&pPrev->p[1]; /* term sans prefix-byte */
+ int nTerm = pPrev->n-1; /* Size of zTerm in bytes */
+ int iIdx = (pPrev->p[0] - FTS5_MAIN_PREFIX);
+ int flags = (iIdx==0 ? 0 : FTS5INDEX_QUERY_PREFIX);
+ u64 ck1 = 0;
+ u64 ck2 = 0;
+
+ /* Check that the results returned for ASC and DESC queries are
+ ** the same. If not, call this corruption. */
+ rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, flags, &ck1);
+ if( rc==SQLITE_OK ){
+ int f = flags|FTS5INDEX_QUERY_DESC;
+ rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
+ }
+ if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
+
+ /* If this is a prefix query, check that the results returned if the
+ ** the index is disabled are the same. In both ASC and DESC order.
+ **
+ ** This check may only be performed if the hash table is empty. This
+ ** is because the hash table only supports a single scan query at
+ ** a time, and the multi-iter loop from which this function is called
+ ** is already performing such a scan.
+ **
+ ** Also only do this if buffer zTerm contains nTerm bytes of valid
+ ** utf-8. Otherwise, the last part of the buffer contents might contain
+ ** a non-utf-8 sequence that happens to be a prefix of a valid utf-8
+ ** character stored in the main fts index, which will cause the
+ ** test to fail. */
+ if( p->nPendingData==0 && 0==fts5TestUtf8(zTerm, nTerm) ){
+ if( iIdx>0 && rc==SQLITE_OK ){
+ int f = flags|FTS5INDEX_QUERY_TEST_NOIDX;
+ ck2 = 0;
+ rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
+ if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
+ }
+ if( iIdx>0 && rc==SQLITE_OK ){
+ int f = flags|FTS5INDEX_QUERY_TEST_NOIDX|FTS5INDEX_QUERY_DESC;
+ ck2 = 0;
+ rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
+ if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
+ }
+ }
+
+ cksum3 ^= ck1;
+ fts5BufferSet(&rc, pPrev, n, (const u8*)z);
+
+ if( rc==SQLITE_OK && cksum3!=expected ){
+ rc = FTS5_CORRUPT;
+ }
+ *pCksum = cksum3;
+ }
+ p->rc = rc;
+}
+
+#else
+# define fts5TestDlidxReverse(x,y,z)
+# define fts5TestTerm(u,v,w,x,y,z)
+#endif
+
+/*
+** Check that:
+**
+** 1) All leaves of pSeg between iFirst and iLast (inclusive) exist and
+** contain zero terms.
+** 2) All leaves of pSeg between iNoRowid and iLast (inclusive) exist and
+** contain zero rowids.
+*/
+static void fts5IndexIntegrityCheckEmpty(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg, /* Segment to check internal consistency */
+ int iFirst,
+ int iNoRowid,
+ int iLast
+){
+ int i;
+
+ /* Now check that the iter.nEmpty leaves following the current leaf
+ ** (a) exist and (b) contain no terms. */
+ for(i=iFirst; p->rc==SQLITE_OK && i<=iLast; i++){
+ Fts5Data *pLeaf = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i));
+ if( pLeaf ){
+ if( !fts5LeafIsTermless(pLeaf) ) p->rc = FTS5_CORRUPT;
+ if( i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf) ) p->rc = FTS5_CORRUPT;
+ }
+ fts5DataRelease(pLeaf);
+ }
+}
+
+static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
+ int iTermOff = 0;
+ int ii;
+
+ Fts5Buffer buf1 = {0,0,0};
+ Fts5Buffer buf2 = {0,0,0};
+
+ ii = pLeaf->szLeaf;
+ while( ii<pLeaf->nn && p->rc==SQLITE_OK ){
+ int res;
+ int iOff;
+ int nIncr;
+
+ ii += fts5GetVarint32(&pLeaf->p[ii], nIncr);
+ iTermOff += nIncr;
+ iOff = iTermOff;
+
+ if( iOff>=pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else if( iTermOff==nIncr ){
+ int nByte;
+ iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte);
+ if( (iOff+nByte)>pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ fts5BufferSet(&p->rc, &buf1, nByte, &pLeaf->p[iOff]);
+ }
+ }else{
+ int nKeep, nByte;
+ iOff += fts5GetVarint32(&pLeaf->p[iOff], nKeep);
+ iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte);
+ if( nKeep>buf1.n || (iOff+nByte)>pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ buf1.n = nKeep;
+ fts5BufferAppendBlob(&p->rc, &buf1, nByte, &pLeaf->p[iOff]);
+ }
+
+ if( p->rc==SQLITE_OK ){
+ res = fts5BufferCompare(&buf1, &buf2);
+ if( res<=0 ) p->rc = FTS5_CORRUPT;
+ }
+ }
+ fts5BufferSet(&p->rc, &buf2, buf1.n, buf1.p);
+ }
+
+ fts5BufferFree(&buf1);
+ fts5BufferFree(&buf2);
+}
+
+static void fts5IndexIntegrityCheckSegment(
+ Fts5Index *p, /* FTS5 backend object */
+ Fts5StructureSegment *pSeg /* Segment to check internal consistency */
+){
+ Fts5Config *pConfig = p->pConfig;
+ sqlite3_stmt *pStmt = 0;
+ int rc2;
+ int iIdxPrevLeaf = pSeg->pgnoFirst-1;
+ int iDlidxPrevLeaf = pSeg->pgnoLast;
+
+ if( pSeg->pgnoFirst==0 ) return;
+
+ fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf(
+ "SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d "
+ "ORDER BY 1, 2",
+ pConfig->zDb, pConfig->zName, pSeg->iSegid
+ ));
+
+ /* Iterate through the b-tree hierarchy. */
+ while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ i64 iRow; /* Rowid for this leaf */
+ Fts5Data *pLeaf; /* Data for this leaf */
+
+ const char *zIdxTerm = (const char*)sqlite3_column_blob(pStmt, 1);
+ int nIdxTerm = sqlite3_column_bytes(pStmt, 1);
+ int iIdxLeaf = sqlite3_column_int(pStmt, 2);
+ int bIdxDlidx = sqlite3_column_int(pStmt, 3);
+
+ /* If the leaf in question has already been trimmed from the segment,
+ ** ignore this b-tree entry. Otherwise, load it into memory. */
+ if( iIdxLeaf<pSeg->pgnoFirst ) continue;
+ iRow = FTS5_SEGMENT_ROWID(pSeg->iSegid, iIdxLeaf);
+ pLeaf = fts5LeafRead(p, iRow);
+ if( pLeaf==0 ) break;
+
+ /* Check that the leaf contains at least one term, and that it is equal
+ ** to or larger than the split-key in zIdxTerm. Also check that if there
+ ** is also a rowid pointer within the leaf page header, it points to a
+ ** location before the term. */
+ if( pLeaf->nn<=pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ int iOff; /* Offset of first term on leaf */
+ int iRowidOff; /* Offset of first rowid on leaf */
+ int nTerm; /* Size of term on leaf in bytes */
+ int res; /* Comparison of term and split-key */
+
+ iOff = fts5LeafFirstTermOff(pLeaf);
+ iRowidOff = fts5LeafFirstRowidOff(pLeaf);
+ if( iRowidOff>=iOff || iOff>=pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ iOff += fts5GetVarint32(&pLeaf->p[iOff], nTerm);
+ res = fts5Memcmp(&pLeaf->p[iOff], zIdxTerm, MIN(nTerm, nIdxTerm));
+ if( res==0 ) res = nTerm - nIdxTerm;
+ if( res<0 ) p->rc = FTS5_CORRUPT;
+ }
+
+ fts5IntegrityCheckPgidx(p, pLeaf);
+ }
+ fts5DataRelease(pLeaf);
+ if( p->rc ) break;
+
+ /* Now check that the iter.nEmpty leaves following the current leaf
+ ** (a) exist and (b) contain no terms. */
+ fts5IndexIntegrityCheckEmpty(
+ p, pSeg, iIdxPrevLeaf+1, iDlidxPrevLeaf+1, iIdxLeaf-1
+ );
+ if( p->rc ) break;
+
+ /* If there is a doclist-index, check that it looks right. */
+ if( bIdxDlidx ){
+ Fts5DlidxIter *pDlidx = 0; /* For iterating through doclist index */
+ int iPrevLeaf = iIdxLeaf;
+ int iSegid = pSeg->iSegid;
+ int iPg = 0;
+ i64 iKey;
+
+ for(pDlidx=fts5DlidxIterInit(p, 0, iSegid, iIdxLeaf);
+ fts5DlidxIterEof(p, pDlidx)==0;
+ fts5DlidxIterNext(p, pDlidx)
+ ){
+
+ /* Check any rowid-less pages that occur before the current leaf. */
+ for(iPg=iPrevLeaf+1; iPg<fts5DlidxIterPgno(pDlidx); iPg++){
+ iKey = FTS5_SEGMENT_ROWID(iSegid, iPg);
+ pLeaf = fts5DataRead(p, iKey);
+ if( pLeaf ){
+ if( fts5LeafFirstRowidOff(pLeaf)!=0 ) p->rc = FTS5_CORRUPT;
+ fts5DataRelease(pLeaf);
+ }
+ }
+ iPrevLeaf = fts5DlidxIterPgno(pDlidx);
+
+ /* Check that the leaf page indicated by the iterator really does
+ ** contain the rowid suggested by the same. */
+ iKey = FTS5_SEGMENT_ROWID(iSegid, iPrevLeaf);
+ pLeaf = fts5DataRead(p, iKey);
+ if( pLeaf ){
+ i64 iRowid;
+ int iRowidOff = fts5LeafFirstRowidOff(pLeaf);
+ ASSERT_SZLEAF_OK(pLeaf);
+ if( iRowidOff>=pLeaf->szLeaf ){
+ p->rc = FTS5_CORRUPT;
+ }else{
+ fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid);
+ if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT;
+ }
+ fts5DataRelease(pLeaf);
+ }
+ }
+
+ iDlidxPrevLeaf = iPg;
+ fts5DlidxIterFree(pDlidx);
+ fts5TestDlidxReverse(p, iSegid, iIdxLeaf);
+ }else{
+ iDlidxPrevLeaf = pSeg->pgnoLast;
+ /* TODO: Check there is no doclist index */
+ }
+
+ iIdxPrevLeaf = iIdxLeaf;
+ }
+
+ rc2 = sqlite3_finalize(pStmt);
+ if( p->rc==SQLITE_OK ) p->rc = rc2;
+
+ /* Page iter.iLeaf must now be the rightmost leaf-page in the segment */
+#if 0
+ if( p->rc==SQLITE_OK && iter.iLeaf!=pSeg->pgnoLast ){
+ p->rc = FTS5_CORRUPT;
+ }
+#endif
+}
+
+
+/*
+** Run internal checks to ensure that the FTS index (a) is internally
+** consistent and (b) contains entries for which the XOR of the checksums
+** as calculated by sqlite3Fts5IndexEntryCksum() is cksum.
+**
+** Return SQLITE_CORRUPT if any of the internal checks fail, or if the
+** checksum does not match. Return SQLITE_OK if all checks pass without
+** error, or some other SQLite error code if another error (e.g. OOM)
+** occurs.
+*/
+static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){
+ int eDetail = p->pConfig->eDetail;
+ u64 cksum2 = 0; /* Checksum based on contents of indexes */
+ Fts5Buffer poslist = {0,0,0}; /* Buffer used to hold a poslist */
+ Fts5Iter *pIter; /* Used to iterate through entire index */
+ Fts5Structure *pStruct; /* Index structure */
+
+#ifdef SQLITE_DEBUG
+ /* Used by extra internal tests only run if NDEBUG is not defined */
+ u64 cksum3 = 0; /* Checksum based on contents of indexes */
+ Fts5Buffer term = {0,0,0}; /* Buffer used to hold most recent term */
+#endif
+ const int flags = FTS5INDEX_QUERY_NOOUTPUT;
+
+ /* Load the FTS index structure */
+ pStruct = fts5StructureRead(p);
+
+ /* Check that the internal nodes of each segment match the leaves */
+ if( pStruct ){
+ int iLvl, iSeg;
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ fts5IndexIntegrityCheckSegment(p, pSeg);
+ }
+ }
+ }
+
+ /* The cksum argument passed to this function is a checksum calculated
+ ** based on all expected entries in the FTS index (including prefix index
+ ** entries). This block checks that a checksum calculated based on the
+ ** actual contents of FTS index is identical.
+ **
+ ** Two versions of the same checksum are calculated. The first (stack
+ ** variable cksum2) based on entries extracted from the full-text index
+ ** while doing a linear scan of each individual index in turn.
+ **
+ ** As each term visited by the linear scans, a separate query for the
+ ** same term is performed. cksum3 is calculated based on the entries
+ ** extracted by these queries.
+ */
+ for(fts5MultiIterNew(p, pStruct, flags, 0, 0, 0, -1, 0, &pIter);
+ fts5MultiIterEof(p, pIter)==0;
+ fts5MultiIterNext(p, pIter, 0, 0)
+ ){
+ int n; /* Size of term in bytes */
+ i64 iPos = 0; /* Position read from poslist */
+ int iOff = 0; /* Offset within poslist */
+ i64 iRowid = fts5MultiIterRowid(pIter);
+ char *z = (char*)fts5MultiIterTerm(pIter, &n);
+
+ /* If this is a new term, query for it. Update cksum3 with the results. */
+ fts5TestTerm(p, &term, z, n, cksum2, &cksum3);
+
+ if( eDetail==FTS5_DETAIL_NONE ){
+ if( 0==fts5MultiIterIsEmpty(p, pIter) ){
+ cksum2 ^= sqlite3Fts5IndexEntryCksum(iRowid, 0, 0, -1, z, n);
+ }
+ }else{
+ poslist.n = 0;
+ fts5SegiterPoslist(p, &pIter->aSeg[pIter->aFirst[1].iFirst], 0, &poslist);
+ while( 0==sqlite3Fts5PoslistNext64(poslist.p, poslist.n, &iOff, &iPos) ){
+ int iCol = FTS5_POS2COLUMN(iPos);
+ int iTokOff = FTS5_POS2OFFSET(iPos);
+ cksum2 ^= sqlite3Fts5IndexEntryCksum(iRowid, iCol, iTokOff, -1, z, n);
+ }
+ }
+ }
+ fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3);
+
+ fts5MultiIterFree(pIter);
+ if( p->rc==SQLITE_OK && cksum!=cksum2 ) p->rc = FTS5_CORRUPT;
+
+ fts5StructureRelease(pStruct);
+#ifdef SQLITE_DEBUG
+ fts5BufferFree(&term);
+#endif
+ fts5BufferFree(&poslist);
+ return fts5IndexReturn(p);
+}
+
+/*************************************************************************
+**************************************************************************
+** Below this point is the implementation of the fts5_decode() scalar
+** function only.
+*/
+
+/*
+** Decode a segment-data rowid from the %_data table. This function is
+** the opposite of macro FTS5_SEGMENT_ROWID().
+*/
+static void fts5DecodeRowid(
+ i64 iRowid, /* Rowid from %_data table */
+ int *piSegid, /* OUT: Segment id */
+ int *pbDlidx, /* OUT: Dlidx flag */
+ int *piHeight, /* OUT: Height */
+ int *piPgno /* OUT: Page number */
+){
+ *piPgno = (int)(iRowid & (((i64)1 << FTS5_DATA_PAGE_B) - 1));
+ iRowid >>= FTS5_DATA_PAGE_B;
+
+ *piHeight = (int)(iRowid & (((i64)1 << FTS5_DATA_HEIGHT_B) - 1));
+ iRowid >>= FTS5_DATA_HEIGHT_B;
+
+ *pbDlidx = (int)(iRowid & 0x0001);
+ iRowid >>= FTS5_DATA_DLI_B;
+
+ *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1));
+}
+
+static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
+ int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */
+ fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno);
+
+ if( iSegid==0 ){
+ if( iKey==FTS5_AVERAGES_ROWID ){
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{averages} ");
+ }else{
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{structure}");
+ }
+ }
+ else{
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}",
+ bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno
+ );
+ }
+}
+
+static void fts5DebugStructure(
+ int *pRc, /* IN/OUT: error code */
+ Fts5Buffer *pBuf,
+ Fts5Structure *p
+){
+ int iLvl, iSeg; /* Iterate through levels, segments */
+
+ for(iLvl=0; iLvl<p->nLevel; iLvl++){
+ Fts5StructureLevel *pLvl = &p->aLevel[iLvl];
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf,
+ " {lvl=%d nMerge=%d nSeg=%d", iLvl, pLvl->nMerge, pLvl->nSeg
+ );
+ for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
+ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}",
+ pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast
+ );
+ }
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
+ }
+}
+
+/*
+** This is part of the fts5_decode() debugging aid.
+**
+** Arguments pBlob/nBlob contain a serialized Fts5Structure object. This
+** function appends a human-readable representation of the same object
+** to the buffer passed as the second argument.
+*/
+static void fts5DecodeStructure(
+ int *pRc, /* IN/OUT: error code */
+ Fts5Buffer *pBuf,
+ const u8 *pBlob, int nBlob
+){
+ int rc; /* Return code */
+ Fts5Structure *p = 0; /* Decoded structure object */
+
+ rc = fts5StructureDecode(pBlob, nBlob, 0, &p);
+ if( rc!=SQLITE_OK ){
+ *pRc = rc;
+ return;
+ }
+
+ fts5DebugStructure(pRc, pBuf, p);
+ fts5StructureRelease(p);
+}
+
+/*
+** This is part of the fts5_decode() debugging aid.
+**
+** Arguments pBlob/nBlob contain an "averages" record. This function
+** appends a human-readable representation of record to the buffer passed
+** as the second argument.
+*/
+static void fts5DecodeAverages(
+ int *pRc, /* IN/OUT: error code */
+ Fts5Buffer *pBuf,
+ const u8 *pBlob, int nBlob
+){
+ int i = 0;
+ const char *zSpace = "";
+
+ while( i<nBlob ){
+ u64 iVal;
+ i += sqlite3Fts5GetVarint(&pBlob[i], &iVal);
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "%s%d", zSpace, (int)iVal);
+ zSpace = " ";
+ }
+}
+
+/*
+** Buffer (a/n) is assumed to contain a list of serialized varints. Read
+** each varint and append its string representation to buffer pBuf. Return
+** after either the input buffer is exhausted or a 0 value is read.
+**
+** The return value is the number of bytes read from the input buffer.
+*/
+static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
+ int iOff = 0;
+ while( iOff<n ){
+ int iVal;
+ iOff += fts5GetVarint32(&a[iOff], iVal);
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %d", iVal);
+ }
+ return iOff;
+}
+
+/*
+** The start of buffer (a/n) contains the start of a doclist. The doclist
+** may or may not finish within the buffer. This function appends a text
+** representation of the part of the doclist that is present to buffer
+** pBuf.
+**
+** The return value is the number of bytes read from the input buffer.
+*/
+static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
+ i64 iDocid = 0;
+ int iOff = 0;
+
+ if( n>0 ){
+ iOff = sqlite3Fts5GetVarint(a, (u64*)&iDocid);
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " id=%lld", iDocid);
+ }
+ while( iOff<n ){
+ int nPos;
+ int bDel;
+ iOff += fts5GetPoslistSize(&a[iOff], &nPos, &bDel);
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " nPos=%d%s", nPos, bDel?"*":"");
+ iOff += fts5DecodePoslist(pRc, pBuf, &a[iOff], MIN(n-iOff, nPos));
+ if( iOff<n ){
+ i64 iDelta;
+ iOff += sqlite3Fts5GetVarint(&a[iOff], (u64*)&iDelta);
+ iDocid += iDelta;
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " id=%lld", iDocid);
+ }
+ }
+
+ return iOff;
+}
+
+/*
+** This function is part of the fts5_decode() debugging function. It is
+** only ever used with detail=none tables.
+**
+** Buffer (pData/nData) contains a doclist in the format used by detail=none
+** tables. This function appends a human-readable version of that list to
+** buffer pBuf.
+**
+** If *pRc is other than SQLITE_OK when this function is called, it is a
+** no-op. If an OOM or other error occurs within this function, *pRc is
+** set to an SQLite error code before returning. The final state of buffer
+** pBuf is undefined in this case.
+*/
+static void fts5DecodeRowidList(
+ int *pRc, /* IN/OUT: Error code */
+ Fts5Buffer *pBuf, /* Buffer to append text to */
+ const u8 *pData, int nData /* Data to decode list-of-rowids from */
+){
+ int i = 0;
+ i64 iRowid = 0;
+
+ while( i<nData ){
+ const char *zApp = "";
+ u64 iVal;
+ i += sqlite3Fts5GetVarint(&pData[i], &iVal);
+ iRowid += iVal;
+
+ if( i<nData && pData[i]==0x00 ){
+ i++;
+ if( i<nData && pData[i]==0x00 ){
+ i++;
+ zApp = "+";
+ }else{
+ zApp = "*";
+ }
+ }
+
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp);
+ }
+}
+
+/*
+** The implementation of user-defined scalar function fts5_decode().
+*/
+static void fts5DecodeFunction(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args (always 2) */
+ sqlite3_value **apVal /* Function arguments */
+){
+ i64 iRowid; /* Rowid for record being decoded */
+ int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */
+ const u8 *aBlob; int n; /* Record to decode */
+ u8 *a = 0;
+ Fts5Buffer s; /* Build up text to return here */
+ int rc = SQLITE_OK; /* Return code */
+ sqlite3_int64 nSpace = 0;
+ int eDetailNone = (sqlite3_user_data(pCtx)!=0);
+
+ assert( nArg==2 );
+ UNUSED_PARAM(nArg);
+ memset(&s, 0, sizeof(Fts5Buffer));
+ iRowid = sqlite3_value_int64(apVal[0]);
+
+ /* Make a copy of the second argument (a blob) in aBlob[]. The aBlob[]
+ ** copy is followed by FTS5_DATA_ZERO_PADDING 0x00 bytes, which prevents
+ ** buffer overreads even if the record is corrupt. */
+ n = sqlite3_value_bytes(apVal[1]);
+ aBlob = sqlite3_value_blob(apVal[1]);
+ nSpace = n + FTS5_DATA_ZERO_PADDING;
+ a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace);
+ if( a==0 ) goto decode_out;
+ if( n>0 ) memcpy(a, aBlob, n);
+
+ fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno);
+
+ fts5DebugRowid(&rc, &s, iRowid);
+ if( bDlidx ){
+ Fts5Data dlidx;
+ Fts5DlidxLvl lvl;
+
+ dlidx.p = a;
+ dlidx.nn = n;
+
+ memset(&lvl, 0, sizeof(Fts5DlidxLvl));
+ lvl.pData = &dlidx;
+ lvl.iLeafPgno = iPgno;
+
+ for(fts5DlidxLvlNext(&lvl); lvl.bEof==0; fts5DlidxLvlNext(&lvl)){
+ sqlite3Fts5BufferAppendPrintf(&rc, &s,
+ " %d(%lld)", lvl.iLeafPgno, lvl.iRowid
+ );
+ }
+ }else if( iSegid==0 ){
+ if( iRowid==FTS5_AVERAGES_ROWID ){
+ fts5DecodeAverages(&rc, &s, a, n);
+ }else{
+ fts5DecodeStructure(&rc, &s, a, n);
+ }
+ }else if( eDetailNone ){
+ Fts5Buffer term; /* Current term read from page */
+ int szLeaf;
+ int iPgidxOff = szLeaf = fts5GetU16(&a[2]);
+ int iTermOff;
+ int nKeep = 0;
+ int iOff;
+
+ memset(&term, 0, sizeof(Fts5Buffer));
+
+ /* Decode any entries that occur before the first term. */
+ if( szLeaf<n ){
+ iPgidxOff += fts5GetVarint32(&a[iPgidxOff], iTermOff);
+ }else{
+ iTermOff = szLeaf;
+ }
+ fts5DecodeRowidList(&rc, &s, &a[4], iTermOff-4);
+
+ iOff = iTermOff;
+ while( iOff<szLeaf ){
+ int nAppend;
+
+ /* Read the term data for the next term*/
+ iOff += fts5GetVarint32(&a[iOff], nAppend);
+ term.n = nKeep;
+ fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]);
+ sqlite3Fts5BufferAppendPrintf(
+ &rc, &s, " term=%.*s", term.n, (const char*)term.p
+ );
+ iOff += nAppend;
+
+ /* Figure out where the doclist for this term ends */
+ if( iPgidxOff<n ){
+ int nIncr;
+ iPgidxOff += fts5GetVarint32(&a[iPgidxOff], nIncr);
+ iTermOff += nIncr;
+ }else{
+ iTermOff = szLeaf;
+ }
+
+ fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff);
+ iOff = iTermOff;
+ if( iOff<szLeaf ){
+ iOff += fts5GetVarint32(&a[iOff], nKeep);
+ }
+ }
+
+ fts5BufferFree(&term);
+ }else{
+ Fts5Buffer term; /* Current term read from page */
+ int szLeaf; /* Offset of pgidx in a[] */
+ int iPgidxOff;
+ int iPgidxPrev = 0; /* Previous value read from pgidx */
+ int iTermOff = 0;
+ int iRowidOff = 0;
+ int iOff;
+ int nDoclist;
+
+ memset(&term, 0, sizeof(Fts5Buffer));
+
+ if( n<4 ){
+ sqlite3Fts5BufferSet(&rc, &s, 7, (const u8*)"corrupt");
+ goto decode_out;
+ }else{
+ iRowidOff = fts5GetU16(&a[0]);
+ iPgidxOff = szLeaf = fts5GetU16(&a[2]);
+ if( iPgidxOff<n ){
+ fts5GetVarint32(&a[iPgidxOff], iTermOff);
+ }else if( iPgidxOff>n ){
+ rc = FTS5_CORRUPT;
+ goto decode_out;
+ }
+ }
+
+ /* Decode the position list tail at the start of the page */
+ if( iRowidOff!=0 ){
+ iOff = iRowidOff;
+ }else if( iTermOff!=0 ){
+ iOff = iTermOff;
+ }else{
+ iOff = szLeaf;
+ }
+ if( iOff>n ){
+ rc = FTS5_CORRUPT;
+ goto decode_out;
+ }
+ fts5DecodePoslist(&rc, &s, &a[4], iOff-4);
+
+ /* Decode any more doclist data that appears on the page before the
+ ** first term. */
+ nDoclist = (iTermOff ? iTermOff : szLeaf) - iOff;
+ if( nDoclist+iOff>n ){
+ rc = FTS5_CORRUPT;
+ goto decode_out;
+ }
+ fts5DecodeDoclist(&rc, &s, &a[iOff], nDoclist);
+
+ while( iPgidxOff<n && rc==SQLITE_OK ){
+ int bFirst = (iPgidxOff==szLeaf); /* True for first term on page */
+ int nByte; /* Bytes of data */
+ int iEnd;
+
+ iPgidxOff += fts5GetVarint32(&a[iPgidxOff], nByte);
+ iPgidxPrev += nByte;
+ iOff = iPgidxPrev;
+
+ if( iPgidxOff<n ){
+ fts5GetVarint32(&a[iPgidxOff], nByte);
+ iEnd = iPgidxPrev + nByte;
+ }else{
+ iEnd = szLeaf;
+ }
+ if( iEnd>szLeaf ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+
+ if( bFirst==0 ){
+ iOff += fts5GetVarint32(&a[iOff], nByte);
+ if( nByte>term.n ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ term.n = nByte;
+ }
+ iOff += fts5GetVarint32(&a[iOff], nByte);
+ if( iOff+nByte>n ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]);
+ iOff += nByte;
+
+ sqlite3Fts5BufferAppendPrintf(
+ &rc, &s, " term=%.*s", term.n, (const char*)term.p
+ );
+ iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff);
+ }
+
+ fts5BufferFree(&term);
+ }
+
+ decode_out:
+ sqlite3_free(a);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_text(pCtx, (const char*)s.p, s.n, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_result_error_code(pCtx, rc);
+ }
+ fts5BufferFree(&s);
+}
+
+/*
+** The implementation of user-defined scalar function fts5_rowid().
+*/
+static void fts5RowidFunction(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args (always 2) */
+ sqlite3_value **apVal /* Function arguments */
+){
+ const char *zArg;
+ if( nArg==0 ){
+ sqlite3_result_error(pCtx, "should be: fts5_rowid(subject, ....)", -1);
+ }else{
+ zArg = (const char*)sqlite3_value_text(apVal[0]);
+ if( 0==sqlite3_stricmp(zArg, "segment") ){
+ i64 iRowid;
+ int segid, pgno;
+ if( nArg!=3 ){
+ sqlite3_result_error(pCtx,
+ "should be: fts5_rowid('segment', segid, pgno))", -1
+ );
+ }else{
+ segid = sqlite3_value_int(apVal[1]);
+ pgno = sqlite3_value_int(apVal[2]);
+ iRowid = FTS5_SEGMENT_ROWID(segid, pgno);
+ sqlite3_result_int64(pCtx, iRowid);
+ }
+ }else{
+ sqlite3_result_error(pCtx,
+ "first arg to fts5_rowid() must be 'segment'" , -1
+ );
+ }
+ }
+}
+
+/*
+** This is called as part of registering the FTS5 module with database
+** connection db. It registers several user-defined scalar functions useful
+** with FTS5.
+**
+** If successful, SQLITE_OK is returned. If an error occurs, some other
+** SQLite error code is returned instead.
+*/
+static int sqlite3Fts5IndexInit(sqlite3 *db){
+ int rc = sqlite3_create_function(
+ db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0
+ );
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(
+ db, "fts5_decode_none", 2,
+ SQLITE_UTF8, (void*)db, fts5DecodeFunction, 0, 0
+ );
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(
+ db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0
+ );
+ }
+ return rc;
+}
+
+
+static int sqlite3Fts5IndexReset(Fts5Index *p){
+ assert( p->pStruct==0 || p->iStructVersion!=0 );
+ if( fts5IndexDataVersion(p)!=p->iStructVersion ){
+ fts5StructureInvalidate(p);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
+** 2014 Jun 09
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This is an SQLite module implementing full-text search.
+*/
+
+
+/* #include "fts5Int.h" */
+
+/*
+** This variable is set to false when running tests for which the on disk
+** structures should not be corrupt. Otherwise, true. If it is false, extra
+** assert() conditions in the fts5 code are activated - conditions that are
+** only true if it is guaranteed that the fts5 database is not corrupt.
+*/
+SQLITE_API int sqlite3_fts5_may_be_corrupt = 1;
+
+
+typedef struct Fts5Auxdata Fts5Auxdata;
+typedef struct Fts5Auxiliary Fts5Auxiliary;
+typedef struct Fts5Cursor Fts5Cursor;
+typedef struct Fts5FullTable Fts5FullTable;
+typedef struct Fts5Sorter Fts5Sorter;
+typedef struct Fts5TokenizerModule Fts5TokenizerModule;
+
+/*
+** NOTES ON TRANSACTIONS:
+**
+** SQLite invokes the following virtual table methods as transactions are
+** opened and closed by the user:
+**
+** xBegin(): Start of a new transaction.
+** xSync(): Initial part of two-phase commit.
+** xCommit(): Final part of two-phase commit.
+** xRollback(): Rollback the transaction.
+**
+** Anything that is required as part of a commit that may fail is performed
+** in the xSync() callback. Current versions of SQLite ignore any errors
+** returned by xCommit().
+**
+** And as sub-transactions are opened/closed:
+**
+** xSavepoint(int S): Open savepoint S.
+** xRelease(int S): Commit and close savepoint S.
+** xRollbackTo(int S): Rollback to start of savepoint S.
+**
+** During a write-transaction the fts5_index.c module may cache some data
+** in-memory. It is flushed to disk whenever xSync(), xRelease() or
+** xSavepoint() is called. And discarded whenever xRollback() or xRollbackTo()
+** is called.
+**
+** Additionally, if SQLITE_DEBUG is defined, an instance of the following
+** structure is used to record the current transaction state. This information
+** is not required, but it is used in the assert() statements executed by
+** function fts5CheckTransactionState() (see below).
+*/
+struct Fts5TransactionState {
+ int eState; /* 0==closed, 1==open, 2==synced */
+ int iSavepoint; /* Number of open savepoints (0 -> none) */
+};
+
+/*
+** A single object of this type is allocated when the FTS5 module is
+** registered with a database handle. It is used to store pointers to
+** all registered FTS5 extensions - tokenizers and auxiliary functions.
+*/
+struct Fts5Global {
+ fts5_api api; /* User visible part of object (see fts5.h) */
+ sqlite3 *db; /* Associated database connection */
+ i64 iNextId; /* Used to allocate unique cursor ids */
+ Fts5Auxiliary *pAux; /* First in list of all aux. functions */
+ Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */
+ Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */
+ Fts5Cursor *pCsr; /* First in list of all open cursors */
+};
+
+/*
+** Each auxiliary function registered with the FTS5 module is represented
+** by an object of the following type. All such objects are stored as part
+** of the Fts5Global.pAux list.
+*/
+struct Fts5Auxiliary {
+ Fts5Global *pGlobal; /* Global context for this function */
+ char *zFunc; /* Function name (nul-terminated) */
+ void *pUserData; /* User-data pointer */
+ fts5_extension_function xFunc; /* Callback function */
+ void (*xDestroy)(void*); /* Destructor function */
+ Fts5Auxiliary *pNext; /* Next registered auxiliary function */
+};
+
+/*
+** Each tokenizer module registered with the FTS5 module is represented
+** by an object of the following type. All such objects are stored as part
+** of the Fts5Global.pTok list.
+*/
+struct Fts5TokenizerModule {
+ char *zName; /* Name of tokenizer */
+ void *pUserData; /* User pointer passed to xCreate() */
+ fts5_tokenizer x; /* Tokenizer functions */
+ void (*xDestroy)(void*); /* Destructor function */
+ Fts5TokenizerModule *pNext; /* Next registered tokenizer module */
+};
+
+struct Fts5FullTable {
+ Fts5Table p; /* Public class members from fts5Int.h */
+ Fts5Storage *pStorage; /* Document store */
+ Fts5Global *pGlobal; /* Global (connection wide) data */
+ Fts5Cursor *pSortCsr; /* Sort data from this cursor */
+#ifdef SQLITE_DEBUG
+ struct Fts5TransactionState ts;
+#endif
+};
+
+struct Fts5MatchPhrase {
+ Fts5Buffer *pPoslist; /* Pointer to current poslist */
+ int nTerm; /* Size of phrase in terms */
+};
+
+/*
+** pStmt:
+** SELECT rowid, <fts> FROM <fts> ORDER BY +rank;
+**
+** aIdx[]:
+** There is one entry in the aIdx[] array for each phrase in the query,
+** the value of which is the offset within aPoslist[] following the last
+** byte of the position list for the corresponding phrase.
+*/
+struct Fts5Sorter {
+ sqlite3_stmt *pStmt;
+ i64 iRowid; /* Current rowid */
+ const u8 *aPoslist; /* Position lists for current row */
+ int nIdx; /* Number of entries in aIdx[] */
+ int aIdx[1]; /* Offsets into aPoslist for current row */
+};
+
+
+/*
+** Virtual-table cursor object.
+**
+** iSpecial:
+** If this is a 'special' query (refer to function fts5SpecialMatch()),
+** then this variable contains the result of the query.
+**
+** iFirstRowid, iLastRowid:
+** These variables are only used for FTS5_PLAN_MATCH cursors. Assuming the
+** cursor iterates in ascending order of rowids, iFirstRowid is the lower
+** limit of rowids to return, and iLastRowid the upper. In other words, the
+** WHERE clause in the user's query might have been:
+**
+** <tbl> MATCH <expr> AND rowid BETWEEN $iFirstRowid AND $iLastRowid
+**
+** If the cursor iterates in descending order of rowid, iFirstRowid
+** is the upper limit (i.e. the "first" rowid visited) and iLastRowid
+** the lower.
+*/
+struct Fts5Cursor {
+ sqlite3_vtab_cursor base; /* Base class used by SQLite core */
+ Fts5Cursor *pNext; /* Next cursor in Fts5Cursor.pCsr list */
+ int *aColumnSize; /* Values for xColumnSize() */
+ i64 iCsrId; /* Cursor id */
+
+ /* Zero from this point onwards on cursor reset */
+ int ePlan; /* FTS5_PLAN_XXX value */
+ int bDesc; /* True for "ORDER BY rowid DESC" queries */
+ i64 iFirstRowid; /* Return no rowids earlier than this */
+ i64 iLastRowid; /* Return no rowids later than this */
+ sqlite3_stmt *pStmt; /* Statement used to read %_content */
+ Fts5Expr *pExpr; /* Expression for MATCH queries */
+ Fts5Sorter *pSorter; /* Sorter for "ORDER BY rank" queries */
+ int csrflags; /* Mask of cursor flags (see below) */
+ i64 iSpecial; /* Result of special query */
+
+ /* "rank" function. Populated on demand from vtab.xColumn(). */
+ char *zRank; /* Custom rank function */
+ char *zRankArgs; /* Custom rank function args */
+ Fts5Auxiliary *pRank; /* Rank callback (or NULL) */
+ int nRankArg; /* Number of trailing arguments for rank() */
+ sqlite3_value **apRankArg; /* Array of trailing arguments */
+ sqlite3_stmt *pRankArgStmt; /* Origin of objects in apRankArg[] */
+
+ /* Auxiliary data storage */
+ Fts5Auxiliary *pAux; /* Currently executing extension function */
+ Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */
+
+ /* Cache used by auxiliary functions xInst() and xInstCount() */
+ Fts5PoslistReader *aInstIter; /* One for each phrase */
+ int nInstAlloc; /* Size of aInst[] array (entries / 3) */
+ int nInstCount; /* Number of phrase instances */
+ int *aInst; /* 3 integers per phrase instance */
+};
+
+/*
+** Bits that make up the "idxNum" parameter passed indirectly by
+** xBestIndex() to xFilter().
+*/
+#define FTS5_BI_MATCH 0x0001 /* <tbl> MATCH ? */
+#define FTS5_BI_RANK 0x0002 /* rank MATCH ? */
+#define FTS5_BI_ROWID_EQ 0x0004 /* rowid == ? */
+#define FTS5_BI_ROWID_LE 0x0008 /* rowid <= ? */
+#define FTS5_BI_ROWID_GE 0x0010 /* rowid >= ? */
+
+#define FTS5_BI_ORDER_RANK 0x0020
+#define FTS5_BI_ORDER_ROWID 0x0040
+#define FTS5_BI_ORDER_DESC 0x0080
+
+/*
+** Values for Fts5Cursor.csrflags
+*/
+#define FTS5CSR_EOF 0x01
+#define FTS5CSR_REQUIRE_CONTENT 0x02
+#define FTS5CSR_REQUIRE_DOCSIZE 0x04
+#define FTS5CSR_REQUIRE_INST 0x08
+#define FTS5CSR_FREE_ZRANK 0x10
+#define FTS5CSR_REQUIRE_RESEEK 0x20
+#define FTS5CSR_REQUIRE_POSLIST 0x40
+
+#define BitFlagAllTest(x,y) (((x) & (y))==(y))
+#define BitFlagTest(x,y) (((x) & (y))!=0)
+
+
+/*
+** Macros to Set(), Clear() and Test() cursor flags.
+*/
+#define CsrFlagSet(pCsr, flag) ((pCsr)->csrflags |= (flag))
+#define CsrFlagClear(pCsr, flag) ((pCsr)->csrflags &= ~(flag))
+#define CsrFlagTest(pCsr, flag) ((pCsr)->csrflags & (flag))
+
+struct Fts5Auxdata {
+ Fts5Auxiliary *pAux; /* Extension to which this belongs */
+ void *pPtr; /* Pointer value */
+ void(*xDelete)(void*); /* Destructor */
+ Fts5Auxdata *pNext; /* Next object in linked list */
+};
+
+#ifdef SQLITE_DEBUG
+#define FTS5_BEGIN 1
+#define FTS5_SYNC 2
+#define FTS5_COMMIT 3
+#define FTS5_ROLLBACK 4
+#define FTS5_SAVEPOINT 5
+#define FTS5_RELEASE 6
+#define FTS5_ROLLBACKTO 7
+static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){
+ switch( op ){
+ case FTS5_BEGIN:
+ assert( p->ts.eState==0 );
+ p->ts.eState = 1;
+ p->ts.iSavepoint = -1;
+ break;
+
+ case FTS5_SYNC:
+ assert( p->ts.eState==1 );
+ p->ts.eState = 2;
+ break;
+
+ case FTS5_COMMIT:
+ assert( p->ts.eState==2 );
+ p->ts.eState = 0;
+ break;
+
+ case FTS5_ROLLBACK:
+ assert( p->ts.eState==1 || p->ts.eState==2 || p->ts.eState==0 );
+ p->ts.eState = 0;
+ break;
+
+ case FTS5_SAVEPOINT:
+ assert( p->ts.eState==1 );
+ assert( iSavepoint>=0 );
+ assert( iSavepoint>=p->ts.iSavepoint );
+ p->ts.iSavepoint = iSavepoint;
+ break;
+
+ case FTS5_RELEASE:
+ assert( p->ts.eState==1 );
+ assert( iSavepoint>=0 );
+ assert( iSavepoint<=p->ts.iSavepoint );
+ p->ts.iSavepoint = iSavepoint-1;
+ break;
+
+ case FTS5_ROLLBACKTO:
+ assert( p->ts.eState==1 );
+ assert( iSavepoint>=-1 );
+ /* The following assert() can fail if another vtab strikes an error
+ ** within an xSavepoint() call then SQLite calls xRollbackTo() - without
+ ** having called xSavepoint() on this vtab. */
+ /* assert( iSavepoint<=p->ts.iSavepoint ); */
+ p->ts.iSavepoint = iSavepoint;
+ break;
+ }
+}
+#else
+# define fts5CheckTransactionState(x,y,z)
+#endif
+
+/*
+** Return true if pTab is a contentless table.
+*/
+static int fts5IsContentless(Fts5FullTable *pTab){
+ return pTab->p.pConfig->eContent==FTS5_CONTENT_NONE;
+}
+
+/*
+** Delete a virtual table handle allocated by fts5InitVtab().
+*/
+static void fts5FreeVtab(Fts5FullTable *pTab){
+ if( pTab ){
+ sqlite3Fts5IndexClose(pTab->p.pIndex);
+ sqlite3Fts5StorageClose(pTab->pStorage);
+ sqlite3Fts5ConfigFree(pTab->p.pConfig);
+ sqlite3_free(pTab);
+ }
+}
+
+/*
+** The xDisconnect() virtual table method.
+*/
+static int fts5DisconnectMethod(sqlite3_vtab *pVtab){
+ fts5FreeVtab((Fts5FullTable*)pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** The xDestroy() virtual table method.
+*/
+static int fts5DestroyMethod(sqlite3_vtab *pVtab){
+ Fts5Table *pTab = (Fts5Table*)pVtab;
+ int rc = sqlite3Fts5DropAll(pTab->pConfig);
+ if( rc==SQLITE_OK ){
+ fts5FreeVtab((Fts5FullTable*)pVtab);
+ }
+ return rc;
+}
+
+/*
+** This function is the implementation of both the xConnect and xCreate
+** methods of the FTS3 virtual table.
+**
+** The argv[] array contains the following:
+**
+** argv[0] -> module name ("fts5")
+** argv[1] -> database name
+** argv[2] -> table name
+** argv[...] -> "column name" and other module argument fields.
+*/
+static int fts5InitVtab(
+ int bCreate, /* True for xCreate, false for xConnect */
+ sqlite3 *db, /* The SQLite database connection */
+ void *pAux, /* Hash table containing tokenizers */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */
+ char **pzErr /* Write any error message here */
+){
+ Fts5Global *pGlobal = (Fts5Global*)pAux;
+ const char **azConfig = (const char**)argv;
+ int rc = SQLITE_OK; /* Return code */
+ Fts5Config *pConfig = 0; /* Results of parsing argc/argv */
+ Fts5FullTable *pTab = 0; /* New virtual table object */
+
+ /* Allocate the new vtab object and parse the configuration */
+ pTab = (Fts5FullTable*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5FullTable));
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ConfigParse(pGlobal, db, argc, azConfig, &pConfig, pzErr);
+ assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 );
+ }
+ if( rc==SQLITE_OK ){
+ pTab->p.pConfig = pConfig;
+ pTab->pGlobal = pGlobal;
+ }
+
+ /* Open the index sub-system */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexOpen(pConfig, bCreate, &pTab->p.pIndex, pzErr);
+ }
+
+ /* Open the storage sub-system */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageOpen(
+ pConfig, pTab->p.pIndex, bCreate, &pTab->pStorage, pzErr
+ );
+ }
+
+ /* Call sqlite3_declare_vtab() */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ConfigDeclareVtab(pConfig);
+ }
+
+ /* Load the initial configuration */
+ if( rc==SQLITE_OK ){
+ assert( pConfig->pzErrmsg==0 );
+ pConfig->pzErrmsg = pzErr;
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ sqlite3Fts5IndexRollback(pTab->p.pIndex);
+ pConfig->pzErrmsg = 0;
+ }
+
+ if( rc!=SQLITE_OK ){
+ fts5FreeVtab(pTab);
+ pTab = 0;
+ }else if( bCreate ){
+ fts5CheckTransactionState(pTab, FTS5_BEGIN, 0);
+ }
+ *ppVTab = (sqlite3_vtab*)pTab;
+ return rc;
+}
+
+/*
+** The xConnect() and xCreate() methods for the virtual table. All the
+** work is done in function fts5InitVtab().
+*/
+static int fts5ConnectMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts5InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr);
+}
+static int fts5CreateMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts5InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr);
+}
+
+/*
+** The different query plans.
+*/
+#define FTS5_PLAN_MATCH 1 /* (<tbl> MATCH ?) */
+#define FTS5_PLAN_SOURCE 2 /* A source cursor for SORTED_MATCH */
+#define FTS5_PLAN_SPECIAL 3 /* An internal query */
+#define FTS5_PLAN_SORTED_MATCH 4 /* (<tbl> MATCH ? ORDER BY rank) */
+#define FTS5_PLAN_SCAN 5 /* No usable constraint */
+#define FTS5_PLAN_ROWID 6 /* (rowid = ?) */
+
+/*
+** Set the SQLITE_INDEX_SCAN_UNIQUE flag in pIdxInfo->flags. Unless this
+** extension is currently being used by a version of SQLite too old to
+** support index-info flags. In that case this function is a no-op.
+*/
+static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){
+#if SQLITE_VERSION_NUMBER>=3008012
+#ifndef SQLITE_CORE
+ if( sqlite3_libversion_number()>=3008012 )
+#endif
+ {
+ pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
+ }
+#endif
+}
+
+/*
+** Implementation of the xBestIndex method for FTS5 tables. Within the
+** WHERE constraint, it searches for the following:
+**
+** 1. A MATCH constraint against the table column.
+** 2. A MATCH constraint against the "rank" column.
+** 3. A MATCH constraint against some other column.
+** 4. An == constraint against the rowid column.
+** 5. A < or <= constraint against the rowid column.
+** 6. A > or >= constraint against the rowid column.
+**
+** Within the ORDER BY, the following are supported:
+**
+** 5. ORDER BY rank [ASC|DESC]
+** 6. ORDER BY rowid [ASC|DESC]
+**
+** Information for the xFilter call is passed via both the idxNum and
+** idxStr variables. Specifically, idxNum is a bitmask of the following
+** flags used to encode the ORDER BY clause:
+**
+** FTS5_BI_ORDER_RANK
+** FTS5_BI_ORDER_ROWID
+** FTS5_BI_ORDER_DESC
+**
+** idxStr is used to encode data from the WHERE clause. For each argument
+** passed to the xFilter method, the following is appended to idxStr:
+**
+** Match against table column: "m"
+** Match against rank column: "r"
+** Match against other column: "<column-number>"
+** Equality constraint against the rowid: "="
+** A < or <= against the rowid: "<"
+** A > or >= against the rowid: ">"
+**
+** This function ensures that there is at most one "r" or "=". And that if
+** there exists an "=" then there is no "<" or ">".
+**
+** Costs are assigned as follows:
+**
+** a) If an unusable MATCH operator is present in the WHERE clause, the
+** cost is unconditionally set to 1e50 (a really big number).
+**
+** a) If a MATCH operator is present, the cost depends on the other
+** constraints also present. As follows:
+**
+** * No other constraints: cost=1000.0
+** * One rowid range constraint: cost=750.0
+** * Both rowid range constraints: cost=500.0
+** * An == rowid constraint: cost=100.0
+**
+** b) Otherwise, if there is no MATCH:
+**
+** * No other constraints: cost=1000000.0
+** * One rowid range constraint: cost=750000.0
+** * Both rowid range constraints: cost=250000.0
+** * An == rowid constraint: cost=10.0
+**
+** Costs are not modified by the ORDER BY clause.
+*/
+static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
+ Fts5Table *pTab = (Fts5Table*)pVTab;
+ Fts5Config *pConfig = pTab->pConfig;
+ const int nCol = pConfig->nCol;
+ int idxFlags = 0; /* Parameter passed through to xFilter() */
+ int i;
+
+ char *idxStr;
+ int iIdxStr = 0;
+ int iCons = 0;
+
+ int bSeenEq = 0;
+ int bSeenGt = 0;
+ int bSeenLt = 0;
+ int bSeenMatch = 0;
+ int bSeenRank = 0;
+
+
+ assert( SQLITE_INDEX_CONSTRAINT_EQ<SQLITE_INDEX_CONSTRAINT_MATCH );
+ assert( SQLITE_INDEX_CONSTRAINT_GT<SQLITE_INDEX_CONSTRAINT_MATCH );
+ assert( SQLITE_INDEX_CONSTRAINT_LE<SQLITE_INDEX_CONSTRAINT_MATCH );
+ assert( SQLITE_INDEX_CONSTRAINT_GE<SQLITE_INDEX_CONSTRAINT_MATCH );
+ assert( SQLITE_INDEX_CONSTRAINT_LE<SQLITE_INDEX_CONSTRAINT_MATCH );
+
+ if( pConfig->bLock ){
+ pTab->base.zErrMsg = sqlite3_mprintf(
+ "recursively defined fts5 content table"
+ );
+ return SQLITE_ERROR;
+ }
+
+ idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 6 + 1);
+ if( idxStr==0 ) return SQLITE_NOMEM;
+ pInfo->idxStr = idxStr;
+ pInfo->needToFreeIdxStr = 1;
+
+ for(i=0; i<pInfo->nConstraint; i++){
+ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i];
+ int iCol = p->iColumn;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_MATCH
+ || (p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol>=nCol)
+ ){
+ /* A MATCH operator or equivalent */
+ if( p->usable==0 || iCol<0 ){
+ /* As there exists an unusable MATCH constraint this is an
+ ** unusable plan. Set a prohibitively high cost. */
+ pInfo->estimatedCost = 1e50;
+ assert( iIdxStr < pInfo->nConstraint*6 + 1 );
+ idxStr[iIdxStr] = 0;
+ return SQLITE_OK;
+ }else{
+ if( iCol==nCol+1 ){
+ if( bSeenRank ) continue;
+ idxStr[iIdxStr++] = 'r';
+ bSeenRank = 1;
+ }else{
+ bSeenMatch = 1;
+ idxStr[iIdxStr++] = 'm';
+ if( iCol<nCol ){
+ sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
+ idxStr += strlen(&idxStr[iIdxStr]);
+ assert( idxStr[iIdxStr]=='\0' );
+ }
+ }
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
+ pInfo->aConstraintUsage[i].omit = 1;
+ }
+ }
+ else if( p->usable && bSeenEq==0
+ && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0
+ ){
+ idxStr[iIdxStr++] = '=';
+ bSeenEq = 1;
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
+ }
+ }
+
+ if( bSeenEq==0 ){
+ for(i=0; i<pInfo->nConstraint; i++){
+ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i];
+ if( p->iColumn<0 && p->usable ){
+ int op = p->op;
+ if( op==SQLITE_INDEX_CONSTRAINT_LT || op==SQLITE_INDEX_CONSTRAINT_LE ){
+ if( bSeenLt ) continue;
+ idxStr[iIdxStr++] = '<';
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
+ bSeenLt = 1;
+ }else
+ if( op==SQLITE_INDEX_CONSTRAINT_GT || op==SQLITE_INDEX_CONSTRAINT_GE ){
+ if( bSeenGt ) continue;
+ idxStr[iIdxStr++] = '>';
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
+ bSeenGt = 1;
+ }
+ }
+ }
+ }
+ idxStr[iIdxStr] = '\0';
+
+ /* Set idxFlags flags for the ORDER BY clause */
+ if( pInfo->nOrderBy==1 ){
+ int iSort = pInfo->aOrderBy[0].iColumn;
+ if( iSort==(pConfig->nCol+1) && bSeenMatch ){
+ idxFlags |= FTS5_BI_ORDER_RANK;
+ }else if( iSort==-1 ){
+ idxFlags |= FTS5_BI_ORDER_ROWID;
+ }
+ if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){
+ pInfo->orderByConsumed = 1;
+ if( pInfo->aOrderBy[0].desc ){
+ idxFlags |= FTS5_BI_ORDER_DESC;
+ }
+ }
+ }
+
+ /* Calculate the estimated cost based on the flags set in idxFlags. */
+ if( bSeenEq ){
+ pInfo->estimatedCost = bSeenMatch ? 100.0 : 10.0;
+ if( bSeenMatch==0 ) fts5SetUniqueFlag(pInfo);
+ }else if( bSeenLt && bSeenGt ){
+ pInfo->estimatedCost = bSeenMatch ? 500.0 : 250000.0;
+ }else if( bSeenLt || bSeenGt ){
+ pInfo->estimatedCost = bSeenMatch ? 750.0 : 750000.0;
+ }else{
+ pInfo->estimatedCost = bSeenMatch ? 1000.0 : 1000000.0;
+ }
+
+ pInfo->idxNum = idxFlags;
+ return SQLITE_OK;
+}
+
+static int fts5NewTransaction(Fts5FullTable *pTab){
+ Fts5Cursor *pCsr;
+ for(pCsr=pTab->pGlobal->pCsr; pCsr; pCsr=pCsr->pNext){
+ if( pCsr->base.pVtab==(sqlite3_vtab*)pTab ) return SQLITE_OK;
+ }
+ return sqlite3Fts5StorageReset(pTab->pStorage);
+}
+
+/*
+** Implementation of xOpen method.
+*/
+static int fts5OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVTab;
+ Fts5Config *pConfig = pTab->p.pConfig;
+ Fts5Cursor *pCsr = 0; /* New cursor object */
+ sqlite3_int64 nByte; /* Bytes of space to allocate */
+ int rc; /* Return code */
+
+ rc = fts5NewTransaction(pTab);
+ if( rc==SQLITE_OK ){
+ nByte = sizeof(Fts5Cursor) + pConfig->nCol * sizeof(int);
+ pCsr = (Fts5Cursor*)sqlite3_malloc64(nByte);
+ if( pCsr ){
+ Fts5Global *pGlobal = pTab->pGlobal;
+ memset(pCsr, 0, (size_t)nByte);
+ pCsr->aColumnSize = (int*)&pCsr[1];
+ pCsr->pNext = pGlobal->pCsr;
+ pGlobal->pCsr = pCsr;
+ pCsr->iCsrId = ++pGlobal->iNextId;
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+ *ppCsr = (sqlite3_vtab_cursor*)pCsr;
+ return rc;
+}
+
+static int fts5StmtType(Fts5Cursor *pCsr){
+ if( pCsr->ePlan==FTS5_PLAN_SCAN ){
+ return (pCsr->bDesc) ? FTS5_STMT_SCAN_DESC : FTS5_STMT_SCAN_ASC;
+ }
+ return FTS5_STMT_LOOKUP;
+}
+
+/*
+** This function is called after the cursor passed as the only argument
+** is moved to point at a different row. It clears all cached data
+** specific to the previous row stored by the cursor object.
+*/
+static void fts5CsrNewrow(Fts5Cursor *pCsr){
+ CsrFlagSet(pCsr,
+ FTS5CSR_REQUIRE_CONTENT
+ | FTS5CSR_REQUIRE_DOCSIZE
+ | FTS5CSR_REQUIRE_INST
+ | FTS5CSR_REQUIRE_POSLIST
+ );
+}
+
+static void fts5FreeCursorComponents(Fts5Cursor *pCsr){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ Fts5Auxdata *pData;
+ Fts5Auxdata *pNext;
+
+ sqlite3_free(pCsr->aInstIter);
+ sqlite3_free(pCsr->aInst);
+ if( pCsr->pStmt ){
+ int eStmt = fts5StmtType(pCsr);
+ sqlite3Fts5StorageStmtRelease(pTab->pStorage, eStmt, pCsr->pStmt);
+ }
+ if( pCsr->pSorter ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ sqlite3_finalize(pSorter->pStmt);
+ sqlite3_free(pSorter);
+ }
+
+ if( pCsr->ePlan!=FTS5_PLAN_SOURCE ){
+ sqlite3Fts5ExprFree(pCsr->pExpr);
+ }
+
+ for(pData=pCsr->pAuxdata; pData; pData=pNext){
+ pNext = pData->pNext;
+ if( pData->xDelete ) pData->xDelete(pData->pPtr);
+ sqlite3_free(pData);
+ }
+
+ sqlite3_finalize(pCsr->pRankArgStmt);
+ sqlite3_free(pCsr->apRankArg);
+
+ if( CsrFlagTest(pCsr, FTS5CSR_FREE_ZRANK) ){
+ sqlite3_free(pCsr->zRank);
+ sqlite3_free(pCsr->zRankArgs);
+ }
+
+ sqlite3Fts5IndexCloseReader(pTab->p.pIndex);
+ memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan - (u8*)pCsr));
+}
+
+
+/*
+** Close the cursor. For additional information see the documentation
+** on the xClose method of the virtual table interface.
+*/
+static int fts5CloseMethod(sqlite3_vtab_cursor *pCursor){
+ if( pCursor ){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCursor->pVtab);
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ Fts5Cursor **pp;
+
+ fts5FreeCursorComponents(pCsr);
+ /* Remove the cursor from the Fts5Global.pCsr list */
+ for(pp=&pTab->pGlobal->pCsr; (*pp)!=pCsr; pp=&(*pp)->pNext);
+ *pp = pCsr->pNext;
+
+ sqlite3_free(pCsr);
+ }
+ return SQLITE_OK;
+}
+
+static int fts5SorterNext(Fts5Cursor *pCsr){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int rc;
+
+ rc = sqlite3_step(pSorter->pStmt);
+ if( rc==SQLITE_DONE ){
+ rc = SQLITE_OK;
+ CsrFlagSet(pCsr, FTS5CSR_EOF);
+ }else if( rc==SQLITE_ROW ){
+ const u8 *a;
+ const u8 *aBlob;
+ int nBlob;
+ int i;
+ int iOff = 0;
+ rc = SQLITE_OK;
+
+ pSorter->iRowid = sqlite3_column_int64(pSorter->pStmt, 0);
+ nBlob = sqlite3_column_bytes(pSorter->pStmt, 1);
+ aBlob = a = sqlite3_column_blob(pSorter->pStmt, 1);
+
+ /* nBlob==0 in detail=none mode. */
+ if( nBlob>0 ){
+ for(i=0; i<(pSorter->nIdx-1); i++){
+ int iVal;
+ a += fts5GetVarint32(a, iVal);
+ iOff += iVal;
+ pSorter->aIdx[i] = iOff;
+ }
+ pSorter->aIdx[i] = &aBlob[nBlob] - a;
+ pSorter->aPoslist = a;
+ }
+
+ fts5CsrNewrow(pCsr);
+ }
+
+ return rc;
+}
+
+
+/*
+** Set the FTS5CSR_REQUIRE_RESEEK flag on all FTS5_PLAN_MATCH cursors
+** open on table pTab.
+*/
+static void fts5TripCursors(Fts5FullTable *pTab){
+ Fts5Cursor *pCsr;
+ for(pCsr=pTab->pGlobal->pCsr; pCsr; pCsr=pCsr->pNext){
+ if( pCsr->ePlan==FTS5_PLAN_MATCH
+ && pCsr->base.pVtab==(sqlite3_vtab*)pTab
+ ){
+ CsrFlagSet(pCsr, FTS5CSR_REQUIRE_RESEEK);
+ }
+ }
+}
+
+/*
+** If the REQUIRE_RESEEK flag is set on the cursor passed as the first
+** argument, close and reopen all Fts5IndexIter iterators that the cursor
+** is using. Then attempt to move the cursor to a rowid equal to or laster
+** (in the cursors sort order - ASC or DESC) than the current rowid.
+**
+** If the new rowid is not equal to the old, set output parameter *pbSkip
+** to 1 before returning. Otherwise, leave it unchanged.
+**
+** Return SQLITE_OK if successful or if no reseek was required, or an
+** error code if an error occurred.
+*/
+static int fts5CursorReseek(Fts5Cursor *pCsr, int *pbSkip){
+ int rc = SQLITE_OK;
+ assert( *pbSkip==0 );
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_RESEEK) ){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ int bDesc = pCsr->bDesc;
+ i64 iRowid = sqlite3Fts5ExprRowid(pCsr->pExpr);
+
+ rc = sqlite3Fts5ExprFirst(pCsr->pExpr, pTab->p.pIndex, iRowid, bDesc);
+ if( rc==SQLITE_OK && iRowid!=sqlite3Fts5ExprRowid(pCsr->pExpr) ){
+ *pbSkip = 1;
+ }
+
+ CsrFlagClear(pCsr, FTS5CSR_REQUIRE_RESEEK);
+ fts5CsrNewrow(pCsr);
+ if( sqlite3Fts5ExprEof(pCsr->pExpr) ){
+ CsrFlagSet(pCsr, FTS5CSR_EOF);
+ *pbSkip = 1;
+ }
+ }
+ return rc;
+}
+
+
+/*
+** Advance the cursor to the next row in the table that matches the
+** search criteria.
+**
+** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned
+** even if we reach end-of-file. The fts5EofMethod() will be called
+** subsequently to determine whether or not an EOF was hit.
+*/
+static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ int rc;
+
+ assert( (pCsr->ePlan<3)==
+ (pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE)
+ );
+ assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) );
+
+ if( pCsr->ePlan<3 ){
+ int bSkip = 0;
+ if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc;
+ rc = sqlite3Fts5ExprNext(pCsr->pExpr, pCsr->iLastRowid);
+ CsrFlagSet(pCsr, sqlite3Fts5ExprEof(pCsr->pExpr));
+ fts5CsrNewrow(pCsr);
+ }else{
+ switch( pCsr->ePlan ){
+ case FTS5_PLAN_SPECIAL: {
+ CsrFlagSet(pCsr, FTS5CSR_EOF);
+ rc = SQLITE_OK;
+ break;
+ }
+
+ case FTS5_PLAN_SORTED_MATCH: {
+ rc = fts5SorterNext(pCsr);
+ break;
+ }
+
+ default: {
+ Fts5Config *pConfig = ((Fts5Table*)pCursor->pVtab)->pConfig;
+ pConfig->bLock++;
+ rc = sqlite3_step(pCsr->pStmt);
+ pConfig->bLock--;
+ if( rc!=SQLITE_ROW ){
+ CsrFlagSet(pCsr, FTS5CSR_EOF);
+ rc = sqlite3_reset(pCsr->pStmt);
+ if( rc!=SQLITE_OK ){
+ pCursor->pVtab->zErrMsg = sqlite3_mprintf(
+ "%s", sqlite3_errmsg(pConfig->db)
+ );
+ }
+ }else{
+ rc = SQLITE_OK;
+ }
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+static int fts5PrepareStatement(
+ sqlite3_stmt **ppStmt,
+ Fts5Config *pConfig,
+ const char *zFmt,
+ ...
+){
+ sqlite3_stmt *pRet = 0;
+ int rc;
+ char *zSql;
+ va_list ap;
+
+ va_start(ap, zFmt);
+ zSql = sqlite3_vmprintf(zFmt, ap);
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_prepare_v3(pConfig->db, zSql, -1,
+ SQLITE_PREPARE_PERSISTENT, &pRet, 0);
+ if( rc!=SQLITE_OK ){
+ *pConfig->pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(pConfig->db));
+ }
+ sqlite3_free(zSql);
+ }
+
+ va_end(ap);
+ *ppStmt = pRet;
+ return rc;
+}
+
+static int fts5CursorFirstSorted(
+ Fts5FullTable *pTab,
+ Fts5Cursor *pCsr,
+ int bDesc
+){
+ Fts5Config *pConfig = pTab->p.pConfig;
+ Fts5Sorter *pSorter;
+ int nPhrase;
+ sqlite3_int64 nByte;
+ int rc;
+ const char *zRank = pCsr->zRank;
+ const char *zRankArgs = pCsr->zRankArgs;
+
+ nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
+ nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1);
+ pSorter = (Fts5Sorter*)sqlite3_malloc64(nByte);
+ if( pSorter==0 ) return SQLITE_NOMEM;
+ memset(pSorter, 0, (size_t)nByte);
+ pSorter->nIdx = nPhrase;
+
+ /* TODO: It would be better to have some system for reusing statement
+ ** handles here, rather than preparing a new one for each query. But that
+ ** is not possible as SQLite reference counts the virtual table objects.
+ ** And since the statement required here reads from this very virtual
+ ** table, saving it creates a circular reference.
+ **
+ ** If SQLite a built-in statement cache, this wouldn't be a problem. */
+ rc = fts5PrepareStatement(&pSorter->pStmt, pConfig,
+ "SELECT rowid, rank FROM %Q.%Q ORDER BY %s(\"%w\"%s%s) %s",
+ pConfig->zDb, pConfig->zName, zRank, pConfig->zName,
+ (zRankArgs ? ", " : ""),
+ (zRankArgs ? zRankArgs : ""),
+ bDesc ? "DESC" : "ASC"
+ );
+
+ pCsr->pSorter = pSorter;
+ if( rc==SQLITE_OK ){
+ assert( pTab->pSortCsr==0 );
+ pTab->pSortCsr = pCsr;
+ rc = fts5SorterNext(pCsr);
+ pTab->pSortCsr = 0;
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_finalize(pSorter->pStmt);
+ sqlite3_free(pSorter);
+ pCsr->pSorter = 0;
+ }
+
+ return rc;
+}
+
+static int fts5CursorFirst(Fts5FullTable *pTab, Fts5Cursor *pCsr, int bDesc){
+ int rc;
+ Fts5Expr *pExpr = pCsr->pExpr;
+ rc = sqlite3Fts5ExprFirst(pExpr, pTab->p.pIndex, pCsr->iFirstRowid, bDesc);
+ if( sqlite3Fts5ExprEof(pExpr) ){
+ CsrFlagSet(pCsr, FTS5CSR_EOF);
+ }
+ fts5CsrNewrow(pCsr);
+ return rc;
+}
+
+/*
+** Process a "special" query. A special query is identified as one with a
+** MATCH expression that begins with a '*' character. The remainder of
+** the text passed to the MATCH operator are used as the special query
+** parameters.
+*/
+static int fts5SpecialMatch(
+ Fts5FullTable *pTab,
+ Fts5Cursor *pCsr,
+ const char *zQuery
+){
+ int rc = SQLITE_OK; /* Return code */
+ const char *z = zQuery; /* Special query text */
+ int n; /* Number of bytes in text at z */
+
+ while( z[0]==' ' ) z++;
+ for(n=0; z[n] && z[n]!=' '; n++);
+
+ assert( pTab->p.base.zErrMsg==0 );
+ pCsr->ePlan = FTS5_PLAN_SPECIAL;
+
+ if( n==5 && 0==sqlite3_strnicmp("reads", z, n) ){
+ pCsr->iSpecial = sqlite3Fts5IndexReads(pTab->p.pIndex);
+ }
+ else if( n==2 && 0==sqlite3_strnicmp("id", z, n) ){
+ pCsr->iSpecial = pCsr->iCsrId;
+ }
+ else{
+ /* An unrecognized directive. Return an error message. */
+ pTab->p.base.zErrMsg = sqlite3_mprintf("unknown special query: %.*s", n, z);
+ rc = SQLITE_ERROR;
+ }
+
+ return rc;
+}
+
+/*
+** Search for an auxiliary function named zName that can be used with table
+** pTab. If one is found, return a pointer to the corresponding Fts5Auxiliary
+** structure. Otherwise, if no such function exists, return NULL.
+*/
+static Fts5Auxiliary *fts5FindAuxiliary(Fts5FullTable *pTab, const char *zName){
+ Fts5Auxiliary *pAux;
+
+ for(pAux=pTab->pGlobal->pAux; pAux; pAux=pAux->pNext){
+ if( sqlite3_stricmp(zName, pAux->zFunc)==0 ) return pAux;
+ }
+
+ /* No function of the specified name was found. Return 0. */
+ return 0;
+}
+
+
+static int fts5FindRankFunction(Fts5Cursor *pCsr){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ Fts5Config *pConfig = pTab->p.pConfig;
+ int rc = SQLITE_OK;
+ Fts5Auxiliary *pAux = 0;
+ const char *zRank = pCsr->zRank;
+ const char *zRankArgs = pCsr->zRankArgs;
+
+ if( zRankArgs ){
+ char *zSql = sqlite3Fts5Mprintf(&rc, "SELECT %s", zRankArgs);
+ if( zSql ){
+ sqlite3_stmt *pStmt = 0;
+ rc = sqlite3_prepare_v3(pConfig->db, zSql, -1,
+ SQLITE_PREPARE_PERSISTENT, &pStmt, 0);
+ sqlite3_free(zSql);
+ assert( rc==SQLITE_OK || pCsr->pRankArgStmt==0 );
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pStmt) ){
+ sqlite3_int64 nByte;
+ pCsr->nRankArg = sqlite3_column_count(pStmt);
+ nByte = sizeof(sqlite3_value*)*pCsr->nRankArg;
+ pCsr->apRankArg = (sqlite3_value**)sqlite3Fts5MallocZero(&rc, nByte);
+ if( rc==SQLITE_OK ){
+ int i;
+ for(i=0; i<pCsr->nRankArg; i++){
+ pCsr->apRankArg[i] = sqlite3_column_value(pStmt, i);
+ }
+ }
+ pCsr->pRankArgStmt = pStmt;
+ }else{
+ rc = sqlite3_finalize(pStmt);
+ assert( rc!=SQLITE_OK );
+ }
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ pAux = fts5FindAuxiliary(pTab, zRank);
+ if( pAux==0 ){
+ assert( pTab->p.base.zErrMsg==0 );
+ pTab->p.base.zErrMsg = sqlite3_mprintf("no such function: %s", zRank);
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ pCsr->pRank = pAux;
+ return rc;
+}
+
+
+static int fts5CursorParseRank(
+ Fts5Config *pConfig,
+ Fts5Cursor *pCsr,
+ sqlite3_value *pRank
+){
+ int rc = SQLITE_OK;
+ if( pRank ){
+ const char *z = (const char*)sqlite3_value_text(pRank);
+ char *zRank = 0;
+ char *zRankArgs = 0;
+
+ if( z==0 ){
+ if( sqlite3_value_type(pRank)==SQLITE_NULL ) rc = SQLITE_ERROR;
+ }else{
+ rc = sqlite3Fts5ConfigParseRank(z, &zRank, &zRankArgs);
+ }
+ if( rc==SQLITE_OK ){
+ pCsr->zRank = zRank;
+ pCsr->zRankArgs = zRankArgs;
+ CsrFlagSet(pCsr, FTS5CSR_FREE_ZRANK);
+ }else if( rc==SQLITE_ERROR ){
+ pCsr->base.pVtab->zErrMsg = sqlite3_mprintf(
+ "parse error in rank function: %s", z
+ );
+ }
+ }else{
+ if( pConfig->zRank ){
+ pCsr->zRank = (char*)pConfig->zRank;
+ pCsr->zRankArgs = (char*)pConfig->zRankArgs;
+ }else{
+ pCsr->zRank = (char*)FTS5_DEFAULT_RANK;
+ pCsr->zRankArgs = 0;
+ }
+ }
+ return rc;
+}
+
+static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){
+ if( pVal ){
+ int eType = sqlite3_value_numeric_type(pVal);
+ if( eType==SQLITE_INTEGER ){
+ return sqlite3_value_int64(pVal);
+ }
+ }
+ return iDefault;
+}
+
+/*
+** This is the xFilter interface for the virtual table. See
+** the virtual table xFilter method documentation for additional
+** information.
+**
+** There are three possible query strategies:
+**
+** 1. Full-text search using a MATCH operator.
+** 2. A by-rowid lookup.
+** 3. A full-table scan.
+*/
+static int fts5FilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *idxStr, /* Unused */
+ int nVal, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCursor->pVtab);
+ Fts5Config *pConfig = pTab->p.pConfig;
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ int rc = SQLITE_OK; /* Error code */
+ int bDesc; /* True if ORDER BY [rank|rowid] DESC */
+ int bOrderByRank; /* True if ORDER BY rank */
+ sqlite3_value *pRank = 0; /* rank MATCH ? expression (or NULL) */
+ sqlite3_value *pRowidEq = 0; /* rowid = ? expression (or NULL) */
+ sqlite3_value *pRowidLe = 0; /* rowid <= ? expression (or NULL) */
+ sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */
+ int iCol; /* Column on LHS of MATCH operator */
+ char **pzErrmsg = pConfig->pzErrmsg;
+ int i;
+ int iIdxStr = 0;
+ Fts5Expr *pExpr = 0;
+
+ if( pConfig->bLock ){
+ pTab->p.base.zErrMsg = sqlite3_mprintf(
+ "recursively defined fts5 content table"
+ );
+ return SQLITE_ERROR;
+ }
+
+ if( pCsr->ePlan ){
+ fts5FreeCursorComponents(pCsr);
+ memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr));
+ }
+
+ assert( pCsr->pStmt==0 );
+ assert( pCsr->pExpr==0 );
+ assert( pCsr->csrflags==0 );
+ assert( pCsr->pRank==0 );
+ assert( pCsr->zRank==0 );
+ assert( pCsr->zRankArgs==0 );
+ assert( pTab->pSortCsr==0 || nVal==0 );
+
+ assert( pzErrmsg==0 || pzErrmsg==&pTab->p.base.zErrMsg );
+ pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
+
+ /* Decode the arguments passed through to this function. */
+ for(i=0; i<nVal; i++){
+ switch( idxStr[iIdxStr++] ){
+ case 'r':
+ pRank = apVal[i];
+ break;
+ case 'm': {
+ const char *zText = (const char*)sqlite3_value_text(apVal[i]);
+ if( zText==0 ) zText = "";
+
+ if( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ){
+ iCol = 0;
+ do{
+ iCol = iCol*10 + (idxStr[iIdxStr]-'0');
+ iIdxStr++;
+ }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
+ }else{
+ iCol = pConfig->nCol;
+ }
+
+ if( zText[0]=='*' ){
+ /* The user has issued a query of the form "MATCH '*...'". This
+ ** indicates that the MATCH expression is not a full text query,
+ ** but a request for an internal parameter. */
+ rc = fts5SpecialMatch(pTab, pCsr, &zText[1]);
+ goto filter_out;
+ }else{
+ char **pzErr = &pTab->p.base.zErrMsg;
+ rc = sqlite3Fts5ExprNew(pConfig, iCol, zText, &pExpr, pzErr);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
+ pExpr = 0;
+ }
+ if( rc!=SQLITE_OK ) goto filter_out;
+ }
+
+ break;
+ }
+ case '=':
+ pRowidEq = apVal[i];
+ break;
+ case '<':
+ pRowidLe = apVal[i];
+ break;
+ default: assert( idxStr[iIdxStr-1]=='>' );
+ pRowidGe = apVal[i];
+ break;
+ }
+ }
+ bOrderByRank = ((idxNum & FTS5_BI_ORDER_RANK) ? 1 : 0);
+ pCsr->bDesc = bDesc = ((idxNum & FTS5_BI_ORDER_DESC) ? 1 : 0);
+
+ /* Set the cursor upper and lower rowid limits. Only some strategies
+ ** actually use them. This is ok, as the xBestIndex() method leaves the
+ ** sqlite3_index_constraint.omit flag clear for range constraints
+ ** on the rowid field. */
+ if( pRowidEq ){
+ pRowidLe = pRowidGe = pRowidEq;
+ }
+ if( bDesc ){
+ pCsr->iFirstRowid = fts5GetRowidLimit(pRowidLe, LARGEST_INT64);
+ pCsr->iLastRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
+ }else{
+ pCsr->iLastRowid = fts5GetRowidLimit(pRowidLe, LARGEST_INT64);
+ pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
+ }
+
+ if( pTab->pSortCsr ){
+ /* If pSortCsr is non-NULL, then this call is being made as part of
+ ** processing for a "... MATCH <expr> ORDER BY rank" query (ePlan is
+ ** set to FTS5_PLAN_SORTED_MATCH). pSortCsr is the cursor that will
+ ** return results to the user for this query. The current cursor
+ ** (pCursor) is used to execute the query issued by function
+ ** fts5CursorFirstSorted() above. */
+ assert( pRowidEq==0 && pRowidLe==0 && pRowidGe==0 && pRank==0 );
+ assert( nVal==0 && bOrderByRank==0 && bDesc==0 );
+ assert( pCsr->iLastRowid==LARGEST_INT64 );
+ assert( pCsr->iFirstRowid==SMALLEST_INT64 );
+ if( pTab->pSortCsr->bDesc ){
+ pCsr->iLastRowid = pTab->pSortCsr->iFirstRowid;
+ pCsr->iFirstRowid = pTab->pSortCsr->iLastRowid;
+ }else{
+ pCsr->iLastRowid = pTab->pSortCsr->iLastRowid;
+ pCsr->iFirstRowid = pTab->pSortCsr->iFirstRowid;
+ }
+ pCsr->ePlan = FTS5_PLAN_SOURCE;
+ pCsr->pExpr = pTab->pSortCsr->pExpr;
+ rc = fts5CursorFirst(pTab, pCsr, bDesc);
+ }else if( pCsr->pExpr ){
+ rc = fts5CursorParseRank(pConfig, pCsr, pRank);
+ if( rc==SQLITE_OK ){
+ if( bOrderByRank ){
+ pCsr->ePlan = FTS5_PLAN_SORTED_MATCH;
+ rc = fts5CursorFirstSorted(pTab, pCsr, bDesc);
+ }else{
+ pCsr->ePlan = FTS5_PLAN_MATCH;
+ rc = fts5CursorFirst(pTab, pCsr, bDesc);
+ }
+ }
+ }else if( pConfig->zContent==0 ){
+ *pConfig->pzErrmsg = sqlite3_mprintf(
+ "%s: table does not support scanning", pConfig->zName
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup
+ ** by rowid (ePlan==FTS5_PLAN_ROWID). */
+ pCsr->ePlan = (pRowidEq ? FTS5_PLAN_ROWID : FTS5_PLAN_SCAN);
+ rc = sqlite3Fts5StorageStmt(
+ pTab->pStorage, fts5StmtType(pCsr), &pCsr->pStmt, &pTab->p.base.zErrMsg
+ );
+ if( rc==SQLITE_OK ){
+ if( pCsr->ePlan==FTS5_PLAN_ROWID ){
+ sqlite3_bind_value(pCsr->pStmt, 1, pRowidEq);
+ }else{
+ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iFirstRowid);
+ sqlite3_bind_int64(pCsr->pStmt, 2, pCsr->iLastRowid);
+ }
+ rc = fts5NextMethod(pCursor);
+ }
+ }
+
+ filter_out:
+ sqlite3Fts5ExprFree(pExpr);
+ pConfig->pzErrmsg = pzErrmsg;
+ return rc;
+}
+
+/*
+** This is the xEof method of the virtual table. SQLite calls this
+** routine to find out if it has reached the end of a result set.
+*/
+static int fts5EofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ return (CsrFlagTest(pCsr, FTS5CSR_EOF) ? 1 : 0);
+}
+
+/*
+** Return the rowid that the cursor currently points to.
+*/
+static i64 fts5CursorRowid(Fts5Cursor *pCsr){
+ assert( pCsr->ePlan==FTS5_PLAN_MATCH
+ || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH
+ || pCsr->ePlan==FTS5_PLAN_SOURCE
+ );
+ if( pCsr->pSorter ){
+ return pCsr->pSorter->iRowid;
+ }else{
+ return sqlite3Fts5ExprRowid(pCsr->pExpr);
+ }
+}
+
+/*
+** This is the xRowid method. The SQLite core calls this routine to
+** retrieve the rowid for the current row of the result set. fts5
+** exposes %_content.rowid as the rowid for the virtual table. The
+** rowid should be written to *pRowid.
+*/
+static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ int ePlan = pCsr->ePlan;
+
+ assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 );
+ switch( ePlan ){
+ case FTS5_PLAN_SPECIAL:
+ *pRowid = 0;
+ break;
+
+ case FTS5_PLAN_SOURCE:
+ case FTS5_PLAN_MATCH:
+ case FTS5_PLAN_SORTED_MATCH:
+ *pRowid = fts5CursorRowid(pCsr);
+ break;
+
+ default:
+ *pRowid = sqlite3_column_int64(pCsr->pStmt, 0);
+ break;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** If the cursor requires seeking (bSeekRequired flag is set), seek it.
+** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise.
+**
+** If argument bErrormsg is true and an error occurs, an error message may
+** be left in sqlite3_vtab.zErrMsg.
+*/
+static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){
+ int rc = SQLITE_OK;
+
+ /* If the cursor does not yet have a statement handle, obtain one now. */
+ if( pCsr->pStmt==0 ){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ int eStmt = fts5StmtType(pCsr);
+ rc = sqlite3Fts5StorageStmt(
+ pTab->pStorage, eStmt, &pCsr->pStmt, (bErrormsg?&pTab->p.base.zErrMsg:0)
+ );
+ assert( rc!=SQLITE_OK || pTab->p.base.zErrMsg==0 );
+ assert( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_CONTENT) );
+ }
+
+ if( rc==SQLITE_OK && CsrFlagTest(pCsr, FTS5CSR_REQUIRE_CONTENT) ){
+ Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
+ assert( pCsr->pExpr );
+ sqlite3_reset(pCsr->pStmt);
+ sqlite3_bind_int64(pCsr->pStmt, 1, fts5CursorRowid(pCsr));
+ pTab->pConfig->bLock++;
+ rc = sqlite3_step(pCsr->pStmt);
+ pTab->pConfig->bLock--;
+ if( rc==SQLITE_ROW ){
+ rc = SQLITE_OK;
+ CsrFlagClear(pCsr, FTS5CSR_REQUIRE_CONTENT);
+ }else{
+ rc = sqlite3_reset(pCsr->pStmt);
+ if( rc==SQLITE_OK ){
+ rc = FTS5_CORRUPT;
+ }else if( pTab->pConfig->pzErrmsg ){
+ *pTab->pConfig->pzErrmsg = sqlite3_mprintf(
+ "%s", sqlite3_errmsg(pTab->pConfig->db)
+ );
+ }
+ }
+ }
+ return rc;
+}
+
+static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){
+ va_list ap; /* ... printf arguments */
+ va_start(ap, zFormat);
+ assert( p->p.base.zErrMsg==0 );
+ p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap);
+ va_end(ap);
+}
+
+/*
+** This function is called to handle an FTS INSERT command. In other words,
+** an INSERT statement of the form:
+**
+** INSERT INTO fts(fts) VALUES($pCmd)
+** INSERT INTO fts(fts, rank) VALUES($pCmd, $pVal)
+**
+** Argument pVal is the value assigned to column "fts" by the INSERT
+** statement. This function returns SQLITE_OK if successful, or an SQLite
+** error code if an error occurs.
+**
+** The commands implemented by this function are documented in the "Special
+** INSERT Directives" section of the documentation. It should be updated if
+** more commands are added to this function.
+*/
+static int fts5SpecialInsert(
+ Fts5FullTable *pTab, /* Fts5 table object */
+ const char *zCmd, /* Text inserted into table-name column */
+ sqlite3_value *pVal /* Value inserted into rank column */
+){
+ Fts5Config *pConfig = pTab->p.pConfig;
+ int rc = SQLITE_OK;
+ int bError = 0;
+
+ if( 0==sqlite3_stricmp("delete-all", zCmd) ){
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ fts5SetVtabError(pTab,
+ "'delete-all' may only be used with a "
+ "contentless or external content fts5 table"
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage);
+ }
+ }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){
+ if( pConfig->eContent==FTS5_CONTENT_NONE ){
+ fts5SetVtabError(pTab,
+ "'rebuild' may not be used with a contentless fts5 table"
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ rc = sqlite3Fts5StorageRebuild(pTab->pStorage);
+ }
+ }else if( 0==sqlite3_stricmp("optimize", zCmd) ){
+ rc = sqlite3Fts5StorageOptimize(pTab->pStorage);
+ }else if( 0==sqlite3_stricmp("merge", zCmd) ){
+ int nMerge = sqlite3_value_int(pVal);
+ rc = sqlite3Fts5StorageMerge(pTab->pStorage, nMerge);
+ }else if( 0==sqlite3_stricmp("integrity-check", zCmd) ){
+ rc = sqlite3Fts5StorageIntegrity(pTab->pStorage);
+#ifdef SQLITE_DEBUG
+ }else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){
+ pConfig->bPrefixIndex = sqlite3_value_int(pVal);
+#endif
+ }else{
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError);
+ }
+ if( rc==SQLITE_OK ){
+ if( bError ){
+ rc = SQLITE_ERROR;
+ }else{
+ rc = sqlite3Fts5StorageConfigValue(pTab->pStorage, zCmd, pVal, 0);
+ }
+ }
+ }
+ return rc;
+}
+
+static int fts5SpecialDelete(
+ Fts5FullTable *pTab,
+ sqlite3_value **apVal
+){
+ int rc = SQLITE_OK;
+ int eType1 = sqlite3_value_type(apVal[1]);
+ if( eType1==SQLITE_INTEGER ){
+ sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]);
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2]);
+ }
+ return rc;
+}
+
+static void fts5StorageInsert(
+ int *pRc,
+ Fts5FullTable *pTab,
+ sqlite3_value **apVal,
+ i64 *piRowid
+){
+ int rc = *pRc;
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid);
+ }
+ *pRc = rc;
+}
+
+/*
+** This function is the implementation of the xUpdate callback used by
+** FTS3 virtual tables. It is invoked by SQLite each time a row is to be
+** inserted, updated or deleted.
+**
+** A delete specifies a single argument - the rowid of the row to remove.
+**
+** Update and insert operations pass:
+**
+** 1. The "old" rowid, or NULL.
+** 2. The "new" rowid.
+** 3. Values for each of the nCol matchable columns.
+** 4. Values for the two hidden columns (<tablename> and "rank").
+*/
+static int fts5UpdateMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ int nArg, /* Size of argument array */
+ sqlite3_value **apVal, /* Array of arguments */
+ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ Fts5Config *pConfig = pTab->p.pConfig;
+ int eType0; /* value_type() of apVal[0] */
+ int rc = SQLITE_OK; /* Return code */
+
+ /* A transaction must be open when this is called. */
+ assert( pTab->ts.eState==1 );
+
+ assert( pVtab->zErrMsg==0 );
+ assert( nArg==1 || nArg==(2+pConfig->nCol+2) );
+ assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER
+ || sqlite3_value_type(apVal[0])==SQLITE_NULL
+ );
+ assert( pTab->p.pConfig->pzErrmsg==0 );
+ pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
+
+ /* Put any active cursors into REQUIRE_SEEK state. */
+ fts5TripCursors(pTab);
+
+ eType0 = sqlite3_value_type(apVal[0]);
+ if( eType0==SQLITE_NULL
+ && sqlite3_value_type(apVal[2+pConfig->nCol])!=SQLITE_NULL
+ ){
+ /* A "special" INSERT op. These are handled separately. */
+ const char *z = (const char*)sqlite3_value_text(apVal[2+pConfig->nCol]);
+ if( pConfig->eContent!=FTS5_CONTENT_NORMAL
+ && 0==sqlite3_stricmp("delete", z)
+ ){
+ rc = fts5SpecialDelete(pTab, apVal);
+ }else{
+ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]);
+ }
+ }else{
+ /* A regular INSERT, UPDATE or DELETE statement. The trick here is that
+ ** any conflict on the rowid value must be detected before any
+ ** modifications are made to the database file. There are 4 cases:
+ **
+ ** 1) DELETE
+ ** 2) UPDATE (rowid not modified)
+ ** 3) UPDATE (rowid modified)
+ ** 4) INSERT
+ **
+ ** Cases 3 and 4 may violate the rowid constraint.
+ */
+ int eConflict = SQLITE_ABORT;
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ eConflict = sqlite3_vtab_on_conflict(pConfig->db);
+ }
+
+ assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL );
+ assert( nArg!=1 || eType0==SQLITE_INTEGER );
+
+ /* Filter out attempts to run UPDATE or DELETE on contentless tables.
+ ** This is not suported. */
+ if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){
+ pTab->p.base.zErrMsg = sqlite3_mprintf(
+ "cannot %s contentless fts5 table: %s",
+ (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
+ );
+ rc = SQLITE_ERROR;
+ }
+
+ /* DELETE */
+ else if( nArg==1 ){
+ i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0);
+ }
+
+ /* INSERT or UPDATE */
+ else{
+ int eType1 = sqlite3_value_numeric_type(apVal[1]);
+
+ if( eType1!=SQLITE_INTEGER && eType1!=SQLITE_NULL ){
+ rc = SQLITE_MISMATCH;
+ }
+
+ else if( eType0!=SQLITE_INTEGER ){
+ /* If this is a REPLACE, first remove the current entry (if any) */
+ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){
+ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
+ }
+ fts5StorageInsert(&rc, pTab, apVal, pRowid);
+ }
+
+ /* UPDATE */
+ else{
+ i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */
+ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */
+ if( eType1==SQLITE_INTEGER && iOld!=iNew ){
+ if( eConflict==SQLITE_REPLACE ){
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
+ }
+ fts5StorageInsert(&rc, pTab, apVal, pRowid);
+ }else{
+ rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal,*pRowid);
+ }
+ }
+ }else{
+ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0);
+ fts5StorageInsert(&rc, pTab, apVal, pRowid);
+ }
+ }
+ }
+ }
+
+ pTab->p.pConfig->pzErrmsg = 0;
+ return rc;
+}
+
+/*
+** Implementation of xSync() method.
+*/
+static int fts5SyncMethod(sqlite3_vtab *pVtab){
+ int rc;
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ fts5CheckTransactionState(pTab, FTS5_SYNC, 0);
+ pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
+ fts5TripCursors(pTab);
+ rc = sqlite3Fts5StorageSync(pTab->pStorage);
+ pTab->p.pConfig->pzErrmsg = 0;
+ return rc;
+}
+
+/*
+** Implementation of xBegin() method.
+*/
+static int fts5BeginMethod(sqlite3_vtab *pVtab){
+ fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0);
+ fts5NewTransaction((Fts5FullTable*)pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of xCommit() method. This is a no-op. The contents of
+** the pending-terms hash-table have already been flushed into the database
+** by fts5SyncMethod().
+*/
+static int fts5CommitMethod(sqlite3_vtab *pVtab){
+ UNUSED_PARAM(pVtab); /* Call below is a no-op for NDEBUG builds */
+ fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_COMMIT, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of xRollback(). Discard the contents of the pending-terms
+** hash-table. Any changes made to the database are reverted by SQLite.
+*/
+static int fts5RollbackMethod(sqlite3_vtab *pVtab){
+ int rc;
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0);
+ rc = sqlite3Fts5StorageRollback(pTab->pStorage);
+ return rc;
+}
+
+static int fts5CsrPoslist(Fts5Cursor*, int, const u8**, int*);
+
+static void *fts5ApiUserData(Fts5Context *pCtx){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return pCsr->pAux->pUserData;
+}
+
+static int fts5ApiColumnCount(Fts5Context *pCtx){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return ((Fts5Table*)(pCsr->base.pVtab))->pConfig->nCol;
+}
+
+static int fts5ApiColumnTotalSize(
+ Fts5Context *pCtx,
+ int iCol,
+ sqlite3_int64 *pnToken
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ return sqlite3Fts5StorageSize(pTab->pStorage, iCol, pnToken);
+}
+
+static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow);
+}
+
+static int fts5ApiTokenize(
+ Fts5Context *pCtx,
+ const char *pText, int nText,
+ void *pUserData,
+ int (*xToken)(void*, int, const char*, int, int, int)
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
+ return sqlite3Fts5Tokenize(
+ pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken
+ );
+}
+
+static int fts5ApiPhraseCount(Fts5Context *pCtx){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
+}
+
+static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase);
+}
+
+static int fts5ApiColumnText(
+ Fts5Context *pCtx,
+ int iCol,
+ const char **pz,
+ int *pn
+){
+ int rc = SQLITE_OK;
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
+ || pCsr->ePlan==FTS5_PLAN_SPECIAL
+ ){
+ *pz = 0;
+ *pn = 0;
+ }else{
+ rc = fts5SeekCursor(pCsr, 0);
+ if( rc==SQLITE_OK ){
+ *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1);
+ *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1);
+ }
+ }
+ return rc;
+}
+
+static int fts5CsrPoslist(
+ Fts5Cursor *pCsr,
+ int iPhrase,
+ const u8 **pa,
+ int *pn
+){
+ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig;
+ int rc = SQLITE_OK;
+ int bLive = (pCsr->pSorter==0);
+
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
+
+ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
+ Fts5PoslistPopulator *aPopulator;
+ int i;
+ aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive);
+ if( aPopulator==0 ) rc = SQLITE_NOMEM;
+ for(i=0; i<pConfig->nCol && rc==SQLITE_OK; i++){
+ int n; const char *z;
+ rc = fts5ApiColumnText((Fts5Context*)pCsr, i, &z, &n);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5ExprPopulatePoslists(
+ pConfig, pCsr->pExpr, aPopulator, i, z, n
+ );
+ }
+ }
+ sqlite3_free(aPopulator);
+
+ if( pCsr->pSorter ){
+ sqlite3Fts5ExprCheckPoslists(pCsr->pExpr, pCsr->pSorter->iRowid);
+ }
+ }
+ CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST);
+ }
+
+ if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
+ *pn = pSorter->aIdx[iPhrase] - i1;
+ *pa = &pSorter->aPoslist[i1];
+ }else{
+ *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ }
+
+ return rc;
+}
+
+/*
+** Ensure that the Fts5Cursor.nInstCount and aInst[] variables are populated
+** correctly for the current view. Return SQLITE_OK if successful, or an
+** SQLite error code otherwise.
+*/
+static int fts5CacheInstArray(Fts5Cursor *pCsr){
+ int rc = SQLITE_OK;
+ Fts5PoslistReader *aIter; /* One iterator for each phrase */
+ int nIter; /* Number of iterators/phrases */
+ int nCol = ((Fts5Table*)pCsr->base.pVtab)->pConfig->nCol;
+
+ nIter = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
+ if( pCsr->aInstIter==0 ){
+ sqlite3_int64 nByte = sizeof(Fts5PoslistReader) * nIter;
+ pCsr->aInstIter = (Fts5PoslistReader*)sqlite3Fts5MallocZero(&rc, nByte);
+ }
+ aIter = pCsr->aInstIter;
+
+ if( aIter ){
+ int nInst = 0; /* Number instances seen so far */
+ int i;
+
+ /* Initialize all iterators */
+ for(i=0; i<nIter && rc==SQLITE_OK; i++){
+ const u8 *a;
+ int n;
+ rc = fts5CsrPoslist(pCsr, i, &a, &n);
+ if( rc==SQLITE_OK ){
+ sqlite3Fts5PoslistReaderInit(a, n, &aIter[i]);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ while( 1 ){
+ int *aInst;
+ int iBest = -1;
+ for(i=0; i<nIter; i++){
+ if( (aIter[i].bEof==0)
+ && (iBest<0 || aIter[i].iPos<aIter[iBest].iPos)
+ ){
+ iBest = i;
+ }
+ }
+ if( iBest<0 ) break;
+
+ nInst++;
+ if( nInst>=pCsr->nInstAlloc ){
+ pCsr->nInstAlloc = pCsr->nInstAlloc ? pCsr->nInstAlloc*2 : 32;
+ aInst = (int*)sqlite3_realloc64(
+ pCsr->aInst, pCsr->nInstAlloc*sizeof(int)*3
+ );
+ if( aInst ){
+ pCsr->aInst = aInst;
+ }else{
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ }
+
+ aInst = &pCsr->aInst[3 * (nInst-1)];
+ aInst[0] = iBest;
+ aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos);
+ aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos);
+ if( aInst[1]<0 || aInst[1]>=nCol ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ sqlite3Fts5PoslistReaderNext(&aIter[iBest]);
+ }
+ }
+
+ pCsr->nInstCount = nInst;
+ CsrFlagClear(pCsr, FTS5CSR_REQUIRE_INST);
+ }
+ return rc;
+}
+
+static int fts5ApiInstCount(Fts5Context *pCtx, int *pnInst){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int rc = SQLITE_OK;
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
+ || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) ){
+ *pnInst = pCsr->nInstCount;
+ }
+ return rc;
+}
+
+static int fts5ApiInst(
+ Fts5Context *pCtx,
+ int iIdx,
+ int *piPhrase,
+ int *piCol,
+ int *piOff
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int rc = SQLITE_OK;
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
+ || SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
+ ){
+ if( iIdx<0 || iIdx>=pCsr->nInstCount ){
+ rc = SQLITE_RANGE;
+#if 0
+ }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){
+ *piPhrase = pCsr->aInst[iIdx*3];
+ *piCol = pCsr->aInst[iIdx*3 + 2];
+ *piOff = -1;
+#endif
+ }else{
+ *piPhrase = pCsr->aInst[iIdx*3];
+ *piCol = pCsr->aInst[iIdx*3 + 1];
+ *piOff = pCsr->aInst[iIdx*3 + 2];
+ }
+ }
+ return rc;
+}
+
+static sqlite3_int64 fts5ApiRowid(Fts5Context *pCtx){
+ return fts5CursorRowid((Fts5Cursor*)pCtx);
+}
+
+static int fts5ColumnSizeCb(
+ void *pContext, /* Pointer to int */
+ int tflags,
+ const char *pUnused, /* Buffer containing token */
+ int nUnused, /* Size of token in bytes */
+ int iUnused1, /* Start offset of token */
+ int iUnused2 /* End offset of token */
+){
+ int *pCnt = (int*)pContext;
+ UNUSED_PARAM2(pUnused, nUnused);
+ UNUSED_PARAM2(iUnused1, iUnused2);
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 ){
+ (*pCnt)++;
+ }
+ return SQLITE_OK;
+}
+
+static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ Fts5Config *pConfig = pTab->p.pConfig;
+ int rc = SQLITE_OK;
+
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_DOCSIZE) ){
+ if( pConfig->bColumnsize ){
+ i64 iRowid = fts5CursorRowid(pCsr);
+ rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize);
+ }else if( pConfig->zContent==0 ){
+ int i;
+ for(i=0; i<pConfig->nCol; i++){
+ if( pConfig->abUnindexed[i]==0 ){
+ pCsr->aColumnSize[i] = -1;
+ }
+ }
+ }else{
+ int i;
+ for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
+ if( pConfig->abUnindexed[i]==0 ){
+ const char *z; int n;
+ void *p = (void*)(&pCsr->aColumnSize[i]);
+ pCsr->aColumnSize[i] = 0;
+ rc = fts5ApiColumnText(pCtx, i, &z, &n);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5Tokenize(
+ pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb
+ );
+ }
+ }
+ }
+ }
+ CsrFlagClear(pCsr, FTS5CSR_REQUIRE_DOCSIZE);
+ }
+ if( iCol<0 ){
+ int i;
+ *pnToken = 0;
+ for(i=0; i<pConfig->nCol; i++){
+ *pnToken += pCsr->aColumnSize[i];
+ }
+ }else if( iCol<pConfig->nCol ){
+ *pnToken = pCsr->aColumnSize[iCol];
+ }else{
+ *pnToken = 0;
+ rc = SQLITE_RANGE;
+ }
+ return rc;
+}
+
+/*
+** Implementation of the xSetAuxdata() method.
+*/
+static int fts5ApiSetAuxdata(
+ Fts5Context *pCtx, /* Fts5 context */
+ void *pPtr, /* Pointer to save as auxdata */
+ void(*xDelete)(void*) /* Destructor for pPtr (or NULL) */
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5Auxdata *pData;
+
+ /* Search through the cursors list of Fts5Auxdata objects for one that
+ ** corresponds to the currently executing auxiliary function. */
+ for(pData=pCsr->pAuxdata; pData; pData=pData->pNext){
+ if( pData->pAux==pCsr->pAux ) break;
+ }
+
+ if( pData ){
+ if( pData->xDelete ){
+ pData->xDelete(pData->pPtr);
+ }
+ }else{
+ int rc = SQLITE_OK;
+ pData = (Fts5Auxdata*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Auxdata));
+ if( pData==0 ){
+ if( xDelete ) xDelete(pPtr);
+ return rc;
+ }
+ pData->pAux = pCsr->pAux;
+ pData->pNext = pCsr->pAuxdata;
+ pCsr->pAuxdata = pData;
+ }
+
+ pData->xDelete = xDelete;
+ pData->pPtr = pPtr;
+ return SQLITE_OK;
+}
+
+static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5Auxdata *pData;
+ void *pRet = 0;
+
+ for(pData=pCsr->pAuxdata; pData; pData=pData->pNext){
+ if( pData->pAux==pCsr->pAux ) break;
+ }
+
+ if( pData ){
+ pRet = pData->pPtr;
+ if( bClear ){
+ pData->pPtr = 0;
+ pData->xDelete = 0;
+ }
+ }
+
+ return pRet;
+}
+
+static void fts5ApiPhraseNext(
+ Fts5Context *pUnused,
+ Fts5PhraseIter *pIter,
+ int *piCol, int *piOff
+){
+ UNUSED_PARAM(pUnused);
+ if( pIter->a>=pIter->b ){
+ *piCol = -1;
+ *piOff = -1;
+ }else{
+ int iVal;
+ pIter->a += fts5GetVarint32(pIter->a, iVal);
+ if( iVal==1 ){
+ pIter->a += fts5GetVarint32(pIter->a, iVal);
+ *piCol = iVal;
+ *piOff = 0;
+ pIter->a += fts5GetVarint32(pIter->a, iVal);
+ }
+ *piOff += (iVal-2);
+ }
+}
+
+static int fts5ApiPhraseFirst(
+ Fts5Context *pCtx,
+ int iPhrase,
+ Fts5PhraseIter *pIter,
+ int *piCol, int *piOff
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int n;
+ int rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n);
+ if( rc==SQLITE_OK ){
+ pIter->b = &pIter->a[n];
+ *piCol = 0;
+ *piOff = 0;
+ fts5ApiPhraseNext(pCtx, pIter, piCol, piOff);
+ }
+ return rc;
+}
+
+static void fts5ApiPhraseNextColumn(
+ Fts5Context *pCtx,
+ Fts5PhraseIter *pIter,
+ int *piCol
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig;
+
+ if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){
+ if( pIter->a>=pIter->b ){
+ *piCol = -1;
+ }else{
+ int iIncr;
+ pIter->a += fts5GetVarint32(&pIter->a[0], iIncr);
+ *piCol += (iIncr-2);
+ }
+ }else{
+ while( 1 ){
+ int dummy;
+ if( pIter->a>=pIter->b ){
+ *piCol = -1;
+ return;
+ }
+ if( pIter->a[0]==0x01 ) break;
+ pIter->a += fts5GetVarint32(pIter->a, dummy);
+ }
+ pIter->a += 1 + fts5GetVarint32(&pIter->a[1], *piCol);
+ }
+}
+
+static int fts5ApiPhraseFirstColumn(
+ Fts5Context *pCtx,
+ int iPhrase,
+ Fts5PhraseIter *pIter,
+ int *piCol
+){
+ int rc = SQLITE_OK;
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig;
+
+ if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int n;
+ if( pSorter ){
+ int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
+ n = pSorter->aIdx[iPhrase] - i1;
+ pIter->a = &pSorter->aPoslist[i1];
+ }else{
+ rc = sqlite3Fts5ExprPhraseCollist(pCsr->pExpr, iPhrase, &pIter->a, &n);
+ }
+ if( rc==SQLITE_OK ){
+ pIter->b = &pIter->a[n];
+ *piCol = 0;
+ fts5ApiPhraseNextColumn(pCtx, pIter, piCol);
+ }
+ }else{
+ int n;
+ rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n);
+ if( rc==SQLITE_OK ){
+ pIter->b = &pIter->a[n];
+ if( n<=0 ){
+ *piCol = -1;
+ }else if( pIter->a[0]==0x01 ){
+ pIter->a += 1 + fts5GetVarint32(&pIter->a[1], *piCol);
+ }else{
+ *piCol = 0;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+static int fts5ApiQueryPhrase(Fts5Context*, int, void*,
+ int(*)(const Fts5ExtensionApi*, Fts5Context*, void*)
+);
+
+static const Fts5ExtensionApi sFts5Api = {
+ 2, /* iVersion */
+ fts5ApiUserData,
+ fts5ApiColumnCount,
+ fts5ApiRowCount,
+ fts5ApiColumnTotalSize,
+ fts5ApiTokenize,
+ fts5ApiPhraseCount,
+ fts5ApiPhraseSize,
+ fts5ApiInstCount,
+ fts5ApiInst,
+ fts5ApiRowid,
+ fts5ApiColumnText,
+ fts5ApiColumnSize,
+ fts5ApiQueryPhrase,
+ fts5ApiSetAuxdata,
+ fts5ApiGetAuxdata,
+ fts5ApiPhraseFirst,
+ fts5ApiPhraseNext,
+ fts5ApiPhraseFirstColumn,
+ fts5ApiPhraseNextColumn,
+};
+
+/*
+** Implementation of API function xQueryPhrase().
+*/
+static int fts5ApiQueryPhrase(
+ Fts5Context *pCtx,
+ int iPhrase,
+ void *pUserData,
+ int(*xCallback)(const Fts5ExtensionApi*, Fts5Context*, void*)
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCsr->base.pVtab);
+ int rc;
+ Fts5Cursor *pNew = 0;
+
+ rc = fts5OpenMethod(pCsr->base.pVtab, (sqlite3_vtab_cursor**)&pNew);
+ if( rc==SQLITE_OK ){
+ pNew->ePlan = FTS5_PLAN_MATCH;
+ pNew->iFirstRowid = SMALLEST_INT64;
+ pNew->iLastRowid = LARGEST_INT64;
+ pNew->base.pVtab = (sqlite3_vtab*)pTab;
+ rc = sqlite3Fts5ExprClonePhrase(pCsr->pExpr, iPhrase, &pNew->pExpr);
+ }
+
+ if( rc==SQLITE_OK ){
+ for(rc = fts5CursorFirst(pTab, pNew, 0);
+ rc==SQLITE_OK && CsrFlagTest(pNew, FTS5CSR_EOF)==0;
+ rc = fts5NextMethod((sqlite3_vtab_cursor*)pNew)
+ ){
+ rc = xCallback(&sFts5Api, (Fts5Context*)pNew, pUserData);
+ if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ break;
+ }
+ }
+ }
+
+ fts5CloseMethod((sqlite3_vtab_cursor*)pNew);
+ return rc;
+}
+
+static void fts5ApiInvoke(
+ Fts5Auxiliary *pAux,
+ Fts5Cursor *pCsr,
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ assert( pCsr->pAux==0 );
+ pCsr->pAux = pAux;
+ pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv);
+ pCsr->pAux = 0;
+}
+
+static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){
+ Fts5Cursor *pCsr;
+ for(pCsr=pGlobal->pCsr; pCsr; pCsr=pCsr->pNext){
+ if( pCsr->iCsrId==iCsrId ) break;
+ }
+ return pCsr;
+}
+
+static void fts5ApiCallback(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+
+ Fts5Auxiliary *pAux;
+ Fts5Cursor *pCsr;
+ i64 iCsrId;
+
+ assert( argc>=1 );
+ pAux = (Fts5Auxiliary*)sqlite3_user_data(context);
+ iCsrId = sqlite3_value_int64(argv[0]);
+
+ pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId);
+ if( pCsr==0 || pCsr->ePlan==0 ){
+ char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
+ }else{
+ fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]);
+ }
+}
+
+
+/*
+** Given cursor id iId, return a pointer to the corresponding Fts5Table
+** object. Or NULL If the cursor id does not exist.
+*/
+static Fts5Table *sqlite3Fts5TableFromCsrid(
+ Fts5Global *pGlobal, /* FTS5 global context for db handle */
+ i64 iCsrId /* Id of cursor to find */
+){
+ Fts5Cursor *pCsr;
+ pCsr = fts5CursorFromCsrid(pGlobal, iCsrId);
+ if( pCsr ){
+ return (Fts5Table*)pCsr->base.pVtab;
+ }
+ return 0;
+}
+
+/*
+** Return a "position-list blob" corresponding to the current position of
+** cursor pCsr via sqlite3_result_blob(). A position-list blob contains
+** the current position-list for each phrase in the query associated with
+** cursor pCsr.
+**
+** A position-list blob begins with (nPhrase-1) varints, where nPhrase is
+** the number of phrases in the query. Following the varints are the
+** concatenated position lists for each phrase, in order.
+**
+** The first varint (if it exists) contains the size of the position list
+** for phrase 0. The second (same disclaimer) contains the size of position
+** list 1. And so on. There is no size field for the final position list,
+** as it can be derived from the total size of the blob.
+*/
+static int fts5PoslistBlob(sqlite3_context *pCtx, Fts5Cursor *pCsr){
+ int i;
+ int rc = SQLITE_OK;
+ int nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
+ Fts5Buffer val;
+
+ memset(&val, 0, sizeof(Fts5Buffer));
+ switch( ((Fts5Table*)(pCsr->base.pVtab))->pConfig->eDetail ){
+ case FTS5_DETAIL_FULL:
+
+ /* Append the varints */
+ for(i=0; i<(nPhrase-1); i++){
+ const u8 *dummy;
+ int nByte = sqlite3Fts5ExprPoslist(pCsr->pExpr, i, &dummy);
+ sqlite3Fts5BufferAppendVarint(&rc, &val, nByte);
+ }
+
+ /* Append the position lists */
+ for(i=0; i<nPhrase; i++){
+ const u8 *pPoslist;
+ int nPoslist;
+ nPoslist = sqlite3Fts5ExprPoslist(pCsr->pExpr, i, &pPoslist);
+ sqlite3Fts5BufferAppendBlob(&rc, &val, nPoslist, pPoslist);
+ }
+ break;
+
+ case FTS5_DETAIL_COLUMNS:
+
+ /* Append the varints */
+ for(i=0; rc==SQLITE_OK && i<(nPhrase-1); i++){
+ const u8 *dummy;
+ int nByte;
+ rc = sqlite3Fts5ExprPhraseCollist(pCsr->pExpr, i, &dummy, &nByte);
+ sqlite3Fts5BufferAppendVarint(&rc, &val, nByte);
+ }
+
+ /* Append the position lists */
+ for(i=0; rc==SQLITE_OK && i<nPhrase; i++){
+ const u8 *pPoslist;
+ int nPoslist;
+ rc = sqlite3Fts5ExprPhraseCollist(pCsr->pExpr, i, &pPoslist, &nPoslist);
+ sqlite3Fts5BufferAppendBlob(&rc, &val, nPoslist, pPoslist);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ sqlite3_result_blob(pCtx, val.p, val.n, sqlite3_free);
+ return rc;
+}
+
+/*
+** This is the xColumn method, called by SQLite to request a value from
+** the row that the supplied cursor currently points to.
+*/
+static int fts5ColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)(pCursor->pVtab);
+ Fts5Config *pConfig = pTab->p.pConfig;
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
+ int rc = SQLITE_OK;
+
+ assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 );
+
+ if( pCsr->ePlan==FTS5_PLAN_SPECIAL ){
+ if( iCol==pConfig->nCol ){
+ sqlite3_result_int64(pCtx, pCsr->iSpecial);
+ }
+ }else
+
+ if( iCol==pConfig->nCol ){
+ /* User is requesting the value of the special column with the same name
+ ** as the table. Return the cursor integer id number. This value is only
+ ** useful in that it may be passed as the first argument to an FTS5
+ ** auxiliary function. */
+ sqlite3_result_int64(pCtx, pCsr->iCsrId);
+ }else if( iCol==pConfig->nCol+1 ){
+
+ /* The value of the "rank" column. */
+ if( pCsr->ePlan==FTS5_PLAN_SOURCE ){
+ fts5PoslistBlob(pCtx, pCsr);
+ }else if(
+ pCsr->ePlan==FTS5_PLAN_MATCH
+ || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH
+ ){
+ if( pCsr->pRank || SQLITE_OK==(rc = fts5FindRankFunction(pCsr)) ){
+ fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg);
+ }
+ }
+ }else if( !fts5IsContentless(pTab) ){
+ pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
+ rc = fts5SeekCursor(pCsr, 1);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1));
+ }
+ pConfig->pzErrmsg = 0;
+ }
+ return rc;
+}
+
+
+/*
+** This routine implements the xFindFunction method for the FTS3
+** virtual table.
+*/
+static int fts5FindFunctionMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ int nUnused, /* Number of SQL function arguments */
+ const char *zName, /* Name of SQL function */
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */
+ void **ppArg /* OUT: User data for *pxFunc */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ Fts5Auxiliary *pAux;
+
+ UNUSED_PARAM(nUnused);
+ pAux = fts5FindAuxiliary(pTab, zName);
+ if( pAux ){
+ *pxFunc = fts5ApiCallback;
+ *ppArg = (void*)pAux;
+ return 1;
+ }
+
+ /* No function of the specified name was found. Return 0. */
+ return 0;
+}
+
+/*
+** Implementation of FTS5 xRename method. Rename an fts5 table.
+*/
+static int fts5RenameMethod(
+ sqlite3_vtab *pVtab, /* Virtual table handle */
+ const char *zName /* New name of table */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ return sqlite3Fts5StorageRename(pTab->pStorage, zName);
+}
+
+static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){
+ fts5TripCursors((Fts5FullTable*)pTab);
+ return sqlite3Fts5StorageSync(((Fts5FullTable*)pTab)->pStorage);
+}
+
+/*
+** The xSavepoint() method.
+**
+** Flush the contents of the pending-terms table to disk.
+*/
+static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
+ fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint);
+ return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+}
+
+/*
+** The xRelease() method.
+**
+** This is a no-op.
+*/
+static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
+ fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint);
+ return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+}
+
+/*
+** The xRollbackTo() method.
+**
+** Discard the contents of the pending terms table.
+*/
+static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
+ fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint);
+ fts5TripCursors(pTab);
+ return sqlite3Fts5StorageRollback(pTab->pStorage);
+}
+
+/*
+** Register a new auxiliary function with global context pGlobal.
+*/
+static int fts5CreateAux(
+ fts5_api *pApi, /* Global context (one per db handle) */
+ const char *zName, /* Name of new function */
+ void *pUserData, /* User data for aux. function */
+ fts5_extension_function xFunc, /* Aux. function implementation */
+ void(*xDestroy)(void*) /* Destructor for pUserData */
+){
+ Fts5Global *pGlobal = (Fts5Global*)pApi;
+ int rc = sqlite3_overload_function(pGlobal->db, zName, -1);
+ if( rc==SQLITE_OK ){
+ Fts5Auxiliary *pAux;
+ sqlite3_int64 nName; /* Size of zName in bytes, including \0 */
+ sqlite3_int64 nByte; /* Bytes of space to allocate */
+
+ nName = strlen(zName) + 1;
+ nByte = sizeof(Fts5Auxiliary) + nName;
+ pAux = (Fts5Auxiliary*)sqlite3_malloc64(nByte);
+ if( pAux ){
+ memset(pAux, 0, (size_t)nByte);
+ pAux->zFunc = (char*)&pAux[1];
+ memcpy(pAux->zFunc, zName, nName);
+ pAux->pGlobal = pGlobal;
+ pAux->pUserData = pUserData;
+ pAux->xFunc = xFunc;
+ pAux->xDestroy = xDestroy;
+ pAux->pNext = pGlobal->pAux;
+ pGlobal->pAux = pAux;
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Register a new tokenizer. This is the implementation of the
+** fts5_api.xCreateTokenizer() method.
+*/
+static int fts5CreateTokenizer(
+ fts5_api *pApi, /* Global context (one per db handle) */
+ const char *zName, /* Name of new function */
+ void *pUserData, /* User data for aux. function */
+ fts5_tokenizer *pTokenizer, /* Tokenizer implementation */
+ void(*xDestroy)(void*) /* Destructor for pUserData */
+){
+ Fts5Global *pGlobal = (Fts5Global*)pApi;
+ Fts5TokenizerModule *pNew;
+ sqlite3_int64 nName; /* Size of zName and its \0 terminator */
+ sqlite3_int64 nByte; /* Bytes of space to allocate */
+ int rc = SQLITE_OK;
+
+ nName = strlen(zName) + 1;
+ nByte = sizeof(Fts5TokenizerModule) + nName;
+ pNew = (Fts5TokenizerModule*)sqlite3_malloc64(nByte);
+ if( pNew ){
+ memset(pNew, 0, (size_t)nByte);
+ pNew->zName = (char*)&pNew[1];
+ memcpy(pNew->zName, zName, nName);
+ pNew->pUserData = pUserData;
+ pNew->x = *pTokenizer;
+ pNew->xDestroy = xDestroy;
+ pNew->pNext = pGlobal->pTok;
+ pGlobal->pTok = pNew;
+ if( pNew->pNext==0 ){
+ pGlobal->pDfltTok = pNew;
+ }
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+
+ return rc;
+}
+
+static Fts5TokenizerModule *fts5LocateTokenizer(
+ Fts5Global *pGlobal,
+ const char *zName
+){
+ Fts5TokenizerModule *pMod = 0;
+
+ if( zName==0 ){
+ pMod = pGlobal->pDfltTok;
+ }else{
+ for(pMod=pGlobal->pTok; pMod; pMod=pMod->pNext){
+ if( sqlite3_stricmp(zName, pMod->zName)==0 ) break;
+ }
+ }
+
+ return pMod;
+}
+
+/*
+** Find a tokenizer. This is the implementation of the
+** fts5_api.xFindTokenizer() method.
+*/
+static int fts5FindTokenizer(
+ fts5_api *pApi, /* Global context (one per db handle) */
+ const char *zName, /* Name of new function */
+ void **ppUserData,
+ fts5_tokenizer *pTokenizer /* Populate this object */
+){
+ int rc = SQLITE_OK;
+ Fts5TokenizerModule *pMod;
+
+ pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName);
+ if( pMod ){
+ *pTokenizer = pMod->x;
+ *ppUserData = pMod->pUserData;
+ }else{
+ memset(pTokenizer, 0, sizeof(fts5_tokenizer));
+ rc = SQLITE_ERROR;
+ }
+
+ return rc;
+}
+
+static int sqlite3Fts5GetTokenizer(
+ Fts5Global *pGlobal,
+ const char **azArg,
+ int nArg,
+ Fts5Tokenizer **ppTok,
+ fts5_tokenizer **ppTokApi,
+ char **pzErr
+){
+ Fts5TokenizerModule *pMod;
+ int rc = SQLITE_OK;
+
+ pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]);
+ if( pMod==0 ){
+ assert( nArg>0 );
+ rc = SQLITE_ERROR;
+ *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]);
+ }else{
+ rc = pMod->x.xCreate(pMod->pUserData, &azArg[1], (nArg?nArg-1:0), ppTok);
+ *ppTokApi = &pMod->x;
+ if( rc!=SQLITE_OK && pzErr ){
+ *pzErr = sqlite3_mprintf("error in tokenizer constructor");
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ *ppTokApi = 0;
+ *ppTok = 0;
+ }
+
+ return rc;
+}
+
+static void fts5ModuleDestroy(void *pCtx){
+ Fts5TokenizerModule *pTok, *pNextTok;
+ Fts5Auxiliary *pAux, *pNextAux;
+ Fts5Global *pGlobal = (Fts5Global*)pCtx;
+
+ for(pAux=pGlobal->pAux; pAux; pAux=pNextAux){
+ pNextAux = pAux->pNext;
+ if( pAux->xDestroy ) pAux->xDestroy(pAux->pUserData);
+ sqlite3_free(pAux);
+ }
+
+ for(pTok=pGlobal->pTok; pTok; pTok=pNextTok){
+ pNextTok = pTok->pNext;
+ if( pTok->xDestroy ) pTok->xDestroy(pTok->pUserData);
+ sqlite3_free(pTok);
+ }
+
+ sqlite3_free(pGlobal);
+}
+
+static void fts5Fts5Func(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apArg /* Function arguments */
+){
+ Fts5Global *pGlobal = (Fts5Global*)sqlite3_user_data(pCtx);
+ fts5_api **ppApi;
+ UNUSED_PARAM(nArg);
+ assert( nArg==1 );
+ ppApi = (fts5_api**)sqlite3_value_pointer(apArg[0], "fts5_api_ptr");
+ if( ppApi ) *ppApi = &pGlobal->api;
+}
+
+/*
+** Implementation of fts5_source_id() function.
+*/
+static void fts5SourceIdFunc(
+ sqlite3_context *pCtx, /* Function call context */
+ int nArg, /* Number of args */
+ sqlite3_value **apUnused /* Function arguments */
+){
+ assert( nArg==0 );
+ UNUSED_PARAM2(nArg, apUnused);
+ sqlite3_result_text(pCtx, "fts5: 2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0ff3f", -1, SQLITE_TRANSIENT);
+}
+
+/*
+** Return true if zName is the extension on one of the shadow tables used
+** by this module.
+*/
+static int fts5ShadowName(const char *zName){
+ static const char *azName[] = {
+ "config", "content", "data", "docsize", "idx"
+ };
+ unsigned int i;
+ for(i=0; i<sizeof(azName)/sizeof(azName[0]); i++){
+ if( sqlite3_stricmp(zName, azName[i])==0 ) return 1;
+ }
+ return 0;
+}
+
+static int fts5Init(sqlite3 *db){
+ static const sqlite3_module fts5Mod = {
+ /* iVersion */ 3,
+ /* xCreate */ fts5CreateMethod,
+ /* xConnect */ fts5ConnectMethod,
+ /* xBestIndex */ fts5BestIndexMethod,
+ /* xDisconnect */ fts5DisconnectMethod,
+ /* xDestroy */ fts5DestroyMethod,
+ /* xOpen */ fts5OpenMethod,
+ /* xClose */ fts5CloseMethod,
+ /* xFilter */ fts5FilterMethod,
+ /* xNext */ fts5NextMethod,
+ /* xEof */ fts5EofMethod,
+ /* xColumn */ fts5ColumnMethod,
+ /* xRowid */ fts5RowidMethod,
+ /* xUpdate */ fts5UpdateMethod,
+ /* xBegin */ fts5BeginMethod,
+ /* xSync */ fts5SyncMethod,
+ /* xCommit */ fts5CommitMethod,
+ /* xRollback */ fts5RollbackMethod,
+ /* xFindFunction */ fts5FindFunctionMethod,
+ /* xRename */ fts5RenameMethod,
+ /* xSavepoint */ fts5SavepointMethod,
+ /* xRelease */ fts5ReleaseMethod,
+ /* xRollbackTo */ fts5RollbackToMethod,
+ /* xShadowName */ fts5ShadowName
+ };
+
+ int rc;
+ Fts5Global *pGlobal = 0;
+
+ pGlobal = (Fts5Global*)sqlite3_malloc(sizeof(Fts5Global));
+ if( pGlobal==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ void *p = (void*)pGlobal;
+ memset(pGlobal, 0, sizeof(Fts5Global));
+ pGlobal->db = db;
+ pGlobal->api.iVersion = 2;
+ pGlobal->api.xCreateFunction = fts5CreateAux;
+ pGlobal->api.xCreateTokenizer = fts5CreateTokenizer;
+ pGlobal->api.xFindTokenizer = fts5FindTokenizer;
+ rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts5AuxInit(&pGlobal->api);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts5TokenizerInit(&pGlobal->api);
+ if( rc==SQLITE_OK ) rc = sqlite3Fts5VocabInit(pGlobal, db);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(
+ db, "fts5", 1, SQLITE_UTF8, p, fts5Fts5Func, 0, 0
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(
+ db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0
+ );
+ }
+ }
+
+ /* If SQLITE_FTS5_ENABLE_TEST_MI is defined, assume that the file
+ ** fts5_test_mi.c is compiled and linked into the executable. And call
+ ** its entry point to enable the matchinfo() demo. */
+#ifdef SQLITE_FTS5_ENABLE_TEST_MI
+ if( rc==SQLITE_OK ){
+ extern int sqlite3Fts5TestRegisterMatchinfo(sqlite3*);
+ rc = sqlite3Fts5TestRegisterMatchinfo(db);
+ }
+#endif
+
+ return rc;
+}
+
+/*
+** The following functions are used to register the module with SQLite. If
+** this module is being built as part of the SQLite core (SQLITE_CORE is
+** defined), then sqlite3_open() will call sqlite3Fts5Init() directly.
+**
+** Or, if this module is being built as a loadable extension,
+** sqlite3Fts5Init() is omitted and the two standard entry points
+** sqlite3_fts_init() and sqlite3_fts5_init() defined instead.
+*/
+#ifndef SQLITE_CORE
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_fts_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi);
+ (void)pzErrMsg; /* Unused parameter */
+ return fts5Init(db);
+}
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_fts5_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ SQLITE_EXTENSION_INIT2(pApi);
+ (void)pzErrMsg; /* Unused parameter */
+ return fts5Init(db);
+}
+#else
+SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3 *db){
+ return fts5Init(db);
+}
+#endif
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+*/
+
+
+
+/* #include "fts5Int.h" */
+
+struct Fts5Storage {
+ Fts5Config *pConfig;
+ Fts5Index *pIndex;
+ int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */
+ i64 nTotalRow; /* Total number of rows in FTS table */
+ i64 *aTotalSize; /* Total sizes of each column */
+ sqlite3_stmt *aStmt[11];
+};
+
+
+#if FTS5_STMT_SCAN_ASC!=0
+# error "FTS5_STMT_SCAN_ASC mismatch"
+#endif
+#if FTS5_STMT_SCAN_DESC!=1
+# error "FTS5_STMT_SCAN_DESC mismatch"
+#endif
+#if FTS5_STMT_LOOKUP!=2
+# error "FTS5_STMT_LOOKUP mismatch"
+#endif
+
+#define FTS5_STMT_INSERT_CONTENT 3
+#define FTS5_STMT_REPLACE_CONTENT 4
+#define FTS5_STMT_DELETE_CONTENT 5
+#define FTS5_STMT_REPLACE_DOCSIZE 6
+#define FTS5_STMT_DELETE_DOCSIZE 7
+#define FTS5_STMT_LOOKUP_DOCSIZE 8
+#define FTS5_STMT_REPLACE_CONFIG 9
+#define FTS5_STMT_SCAN 10
+
+/*
+** Prepare the two insert statements - Fts5Storage.pInsertContent and
+** Fts5Storage.pInsertDocsize - if they have not already been prepared.
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs.
+*/
+static int fts5StorageGetStmt(
+ Fts5Storage *p, /* Storage handle */
+ int eStmt, /* FTS5_STMT_XXX constant */
+ sqlite3_stmt **ppStmt, /* OUT: Prepared statement handle */
+ char **pzErrMsg /* OUT: Error message (if any) */
+){
+ int rc = SQLITE_OK;
+
+ /* If there is no %_docsize table, there should be no requests for
+ ** statements to operate on it. */
+ assert( p->pConfig->bColumnsize || (
+ eStmt!=FTS5_STMT_REPLACE_DOCSIZE
+ && eStmt!=FTS5_STMT_DELETE_DOCSIZE
+ && eStmt!=FTS5_STMT_LOOKUP_DOCSIZE
+ ));
+
+ assert( eStmt>=0 && eStmt<ArraySize(p->aStmt) );
+ if( p->aStmt[eStmt]==0 ){
+ const char *azStmt[] = {
+ "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC",
+ "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC",
+ "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */
+
+ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */
+ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */
+ "DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */
+ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */
+ "DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */
+
+ "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
+
+ "REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */
+ "SELECT %s FROM %s AS T", /* SCAN */
+ };
+ Fts5Config *pC = p->pConfig;
+ char *zSql = 0;
+
+ switch( eStmt ){
+ case FTS5_STMT_SCAN:
+ zSql = sqlite3_mprintf(azStmt[eStmt],
+ pC->zContentExprlist, pC->zContent
+ );
+ break;
+
+ case FTS5_STMT_SCAN_ASC:
+ case FTS5_STMT_SCAN_DESC:
+ zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist,
+ pC->zContent, pC->zContentRowid, pC->zContentRowid,
+ pC->zContentRowid
+ );
+ break;
+
+ case FTS5_STMT_LOOKUP:
+ zSql = sqlite3_mprintf(azStmt[eStmt],
+ pC->zContentExprlist, pC->zContent, pC->zContentRowid
+ );
+ break;
+
+ case FTS5_STMT_INSERT_CONTENT:
+ case FTS5_STMT_REPLACE_CONTENT: {
+ int nCol = pC->nCol + 1;
+ char *zBind;
+ int i;
+
+ zBind = sqlite3_malloc64(1 + nCol*2);
+ if( zBind ){
+ for(i=0; i<nCol; i++){
+ zBind[i*2] = '?';
+ zBind[i*2 + 1] = ',';
+ }
+ zBind[i*2-1] = '\0';
+ zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind);
+ sqlite3_free(zBind);
+ }
+ break;
+ }
+
+ default:
+ zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName);
+ break;
+ }
+
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int f = SQLITE_PREPARE_PERSISTENT;
+ if( eStmt>FTS5_STMT_LOOKUP ) f |= SQLITE_PREPARE_NO_VTAB;
+ p->pConfig->bLock++;
+ rc = sqlite3_prepare_v3(pC->db, zSql, -1, f, &p->aStmt[eStmt], 0);
+ p->pConfig->bLock--;
+ sqlite3_free(zSql);
+ if( rc!=SQLITE_OK && pzErrMsg ){
+ *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db));
+ }
+ }
+ }
+
+ *ppStmt = p->aStmt[eStmt];
+ sqlite3_reset(*ppStmt);
+ return rc;
+}
+
+
+static int fts5ExecPrintf(
+ sqlite3 *db,
+ char **pzErr,
+ const char *zFormat,
+ ...
+){
+ int rc;
+ va_list ap; /* ... printf arguments */
+ char *zSql;
+
+ va_start(ap, zFormat);
+ zSql = sqlite3_vmprintf(zFormat, ap);
+
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ rc = sqlite3_exec(db, zSql, 0, 0, pzErr);
+ sqlite3_free(zSql);
+ }
+
+ va_end(ap);
+ return rc;
+}
+
+/*
+** Drop all shadow tables. Return SQLITE_OK if successful or an SQLite error
+** code otherwise.
+*/
+static int sqlite3Fts5DropAll(Fts5Config *pConfig){
+ int rc = fts5ExecPrintf(pConfig->db, 0,
+ "DROP TABLE IF EXISTS %Q.'%q_data';"
+ "DROP TABLE IF EXISTS %Q.'%q_idx';"
+ "DROP TABLE IF EXISTS %Q.'%q_config';",
+ pConfig->zDb, pConfig->zName,
+ pConfig->zDb, pConfig->zName,
+ pConfig->zDb, pConfig->zName
+ );
+ if( rc==SQLITE_OK && pConfig->bColumnsize ){
+ rc = fts5ExecPrintf(pConfig->db, 0,
+ "DROP TABLE IF EXISTS %Q.'%q_docsize';",
+ pConfig->zDb, pConfig->zName
+ );
+ }
+ if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ rc = fts5ExecPrintf(pConfig->db, 0,
+ "DROP TABLE IF EXISTS %Q.'%q_content';",
+ pConfig->zDb, pConfig->zName
+ );
+ }
+ return rc;
+}
+
+static void fts5StorageRenameOne(
+ Fts5Config *pConfig, /* Current FTS5 configuration */
+ int *pRc, /* IN/OUT: Error code */
+ const char *zTail, /* Tail of table name e.g. "data", "config" */
+ const char *zName /* New name of FTS5 table */
+){
+ if( *pRc==SQLITE_OK ){
+ *pRc = fts5ExecPrintf(pConfig->db, 0,
+ "ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';",
+ pConfig->zDb, pConfig->zName, zTail, zName, zTail
+ );
+ }
+}
+
+static int sqlite3Fts5StorageRename(Fts5Storage *pStorage, const char *zName){
+ Fts5Config *pConfig = pStorage->pConfig;
+ int rc = sqlite3Fts5StorageSync(pStorage);
+
+ fts5StorageRenameOne(pConfig, &rc, "data", zName);
+ fts5StorageRenameOne(pConfig, &rc, "idx", zName);
+ fts5StorageRenameOne(pConfig, &rc, "config", zName);
+ if( pConfig->bColumnsize ){
+ fts5StorageRenameOne(pConfig, &rc, "docsize", zName);
+ }
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ fts5StorageRenameOne(pConfig, &rc, "content", zName);
+ }
+ return rc;
+}
+
+/*
+** Create the shadow table named zPost, with definition zDefn. Return
+** SQLITE_OK if successful, or an SQLite error code otherwise.
+*/
+static int sqlite3Fts5CreateTable(
+ Fts5Config *pConfig, /* FTS5 configuration */
+ const char *zPost, /* Shadow table to create (e.g. "content") */
+ const char *zDefn, /* Columns etc. for shadow table */
+ int bWithout, /* True for without rowid */
+ char **pzErr /* OUT: Error message */
+){
+ int rc;
+ char *zErr = 0;
+
+ rc = fts5ExecPrintf(pConfig->db, &zErr, "CREATE TABLE %Q.'%q_%q'(%s)%s",
+ pConfig->zDb, pConfig->zName, zPost, zDefn,
+#ifndef SQLITE_FTS5_NO_WITHOUT_ROWID
+ bWithout?" WITHOUT ROWID":
+#endif
+ ""
+ );
+ if( zErr ){
+ *pzErr = sqlite3_mprintf(
+ "fts5: error creating shadow table %q_%s: %s",
+ pConfig->zName, zPost, zErr
+ );
+ sqlite3_free(zErr);
+ }
+
+ return rc;
+}
+
+/*
+** Open a new Fts5Index handle. If the bCreate argument is true, create
+** and initialize the underlying tables
+**
+** If successful, set *pp to point to the new object and return SQLITE_OK.
+** Otherwise, set *pp to NULL and return an SQLite error code.
+*/
+static int sqlite3Fts5StorageOpen(
+ Fts5Config *pConfig,
+ Fts5Index *pIndex,
+ int bCreate,
+ Fts5Storage **pp,
+ char **pzErr /* OUT: Error message */
+){
+ int rc = SQLITE_OK;
+ Fts5Storage *p; /* New object */
+ sqlite3_int64 nByte; /* Bytes of space to allocate */
+
+ nByte = sizeof(Fts5Storage) /* Fts5Storage object */
+ + pConfig->nCol * sizeof(i64); /* Fts5Storage.aTotalSize[] */
+ *pp = p = (Fts5Storage*)sqlite3_malloc64(nByte);
+ if( !p ) return SQLITE_NOMEM;
+
+ memset(p, 0, (size_t)nByte);
+ p->aTotalSize = (i64*)&p[1];
+ p->pConfig = pConfig;
+ p->pIndex = pIndex;
+
+ if( bCreate ){
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ int nDefn = 32 + pConfig->nCol*10;
+ char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 10);
+ if( zDefn==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int i;
+ int iOff;
+ sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY");
+ iOff = (int)strlen(zDefn);
+ for(i=0; i<pConfig->nCol; i++){
+ sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i);
+ iOff += (int)strlen(&zDefn[iOff]);
+ }
+ rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr);
+ }
+ sqlite3_free(zDefn);
+ }
+
+ if( rc==SQLITE_OK && pConfig->bColumnsize ){
+ rc = sqlite3Fts5CreateTable(
+ pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5CreateTable(
+ pConfig, "config", "k PRIMARY KEY, v", 1, pzErr
+ );
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageConfigValue(p, "version", 0, FTS5_CURRENT_VERSION);
+ }
+ }
+
+ if( rc ){
+ sqlite3Fts5StorageClose(p);
+ *pp = 0;
+ }
+ return rc;
+}
+
+/*
+** Close a handle opened by an earlier call to sqlite3Fts5StorageOpen().
+*/
+static int sqlite3Fts5StorageClose(Fts5Storage *p){
+ int rc = SQLITE_OK;
+ if( p ){
+ int i;
+
+ /* Finalize all SQL statements */
+ for(i=0; i<ArraySize(p->aStmt); i++){
+ sqlite3_finalize(p->aStmt[i]);
+ }
+
+ sqlite3_free(p);
+ }
+ return rc;
+}
+
+typedef struct Fts5InsertCtx Fts5InsertCtx;
+struct Fts5InsertCtx {
+ Fts5Storage *pStorage;
+ int iCol;
+ int szCol; /* Size of column value in tokens */
+};
+
+/*
+** Tokenization callback used when inserting tokens into the FTS index.
+*/
+static int fts5StorageInsertCallback(
+ void *pContext, /* Pointer to Fts5InsertCtx object */
+ int tflags,
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iUnused1, /* Start offset of token */
+ int iUnused2 /* End offset of token */
+){
+ Fts5InsertCtx *pCtx = (Fts5InsertCtx*)pContext;
+ Fts5Index *pIdx = pCtx->pStorage->pIndex;
+ UNUSED_PARAM2(iUnused1, iUnused2);
+ if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 || pCtx->szCol==0 ){
+ pCtx->szCol++;
+ }
+ return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken);
+}
+
+/*
+** If a row with rowid iDel is present in the %_content table, add the
+** delete-markers to the FTS index necessary to delete it. Do not actually
+** remove the %_content row at this time though.
+*/
+static int fts5StorageDeleteFromIndex(
+ Fts5Storage *p,
+ i64 iDel,
+ sqlite3_value **apVal
+){
+ Fts5Config *pConfig = p->pConfig;
+ sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */
+ int rc; /* Return code */
+ int rc2; /* sqlite3_reset() return code */
+ int iCol;
+ Fts5InsertCtx ctx;
+
+ if( apVal==0 ){
+ rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0);
+ if( rc!=SQLITE_OK ) return rc;
+ sqlite3_bind_int64(pSeek, 1, iDel);
+ if( sqlite3_step(pSeek)!=SQLITE_ROW ){
+ return sqlite3_reset(pSeek);
+ }
+ }
+
+ ctx.pStorage = p;
+ ctx.iCol = -1;
+ rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
+ for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){
+ if( pConfig->abUnindexed[iCol-1]==0 ){
+ const char *zText;
+ int nText;
+ if( pSeek ){
+ zText = (const char*)sqlite3_column_text(pSeek, iCol);
+ nText = sqlite3_column_bytes(pSeek, iCol);
+ }else{
+ zText = (const char*)sqlite3_value_text(apVal[iCol-1]);
+ nText = sqlite3_value_bytes(apVal[iCol-1]);
+ }
+ ctx.szCol = 0;
+ rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT,
+ zText, nText, (void*)&ctx, fts5StorageInsertCallback
+ );
+ p->aTotalSize[iCol-1] -= (i64)ctx.szCol;
+ }
+ }
+ p->nTotalRow--;
+
+ rc2 = sqlite3_reset(pSeek);
+ if( rc==SQLITE_OK ) rc = rc2;
+ return rc;
+}
+
+
+/*
+** Insert a record into the %_docsize table. Specifically, do:
+**
+** INSERT OR REPLACE INTO %_docsize(id, sz) VALUES(iRowid, pBuf);
+**
+** If there is no %_docsize table (as happens if the columnsize=0 option
+** is specified when the FTS5 table is created), this function is a no-op.
+*/
+static int fts5StorageInsertDocsize(
+ Fts5Storage *p, /* Storage module to write to */
+ i64 iRowid, /* id value */
+ Fts5Buffer *pBuf /* sz value */
+){
+ int rc = SQLITE_OK;
+ if( p->pConfig->bColumnsize ){
+ sqlite3_stmt *pReplace = 0;
+ rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pReplace, 1, iRowid);
+ sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ sqlite3_bind_null(pReplace, 2);
+ }
+ }
+ return rc;
+}
+
+/*
+** Load the contents of the "averages" record from disk into the
+** p->nTotalRow and p->aTotalSize[] variables. If successful, and if
+** argument bCache is true, set the p->bTotalsValid flag to indicate
+** that the contents of aTotalSize[] and nTotalRow are valid until
+** further notice.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs.
+*/
+static int fts5StorageLoadTotals(Fts5Storage *p, int bCache){
+ int rc = SQLITE_OK;
+ if( p->bTotalsValid==0 ){
+ rc = sqlite3Fts5IndexGetAverages(p->pIndex, &p->nTotalRow, p->aTotalSize);
+ p->bTotalsValid = bCache;
+ }
+ return rc;
+}
+
+/*
+** Store the current contents of the p->nTotalRow and p->aTotalSize[]
+** variables in the "averages" record on disk.
+**
+** Return SQLITE_OK if successful, or an SQLite error code if an error
+** occurs.
+*/
+static int fts5StorageSaveTotals(Fts5Storage *p){
+ int nCol = p->pConfig->nCol;
+ int i;
+ Fts5Buffer buf;
+ int rc = SQLITE_OK;
+ memset(&buf, 0, sizeof(buf));
+
+ sqlite3Fts5BufferAppendVarint(&rc, &buf, p->nTotalRow);
+ for(i=0; i<nCol; i++){
+ sqlite3Fts5BufferAppendVarint(&rc, &buf, p->aTotalSize[i]);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexSetAverages(p->pIndex, buf.p, buf.n);
+ }
+ sqlite3_free(buf.p);
+
+ return rc;
+}
+
+/*
+** Remove a row from the FTS table.
+*/
+static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){
+ Fts5Config *pConfig = p->pConfig;
+ int rc;
+ sqlite3_stmt *pDel = 0;
+
+ assert( pConfig->eContent!=FTS5_CONTENT_NORMAL || apVal==0 );
+ rc = fts5StorageLoadTotals(p, 1);
+
+ /* Delete the index records */
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageDeleteFromIndex(p, iDel, apVal);
+ }
+
+ /* Delete the %_docsize record */
+ if( rc==SQLITE_OK && pConfig->bColumnsize ){
+ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_DOCSIZE, &pDel, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDel, 1, iDel);
+ sqlite3_step(pDel);
+ rc = sqlite3_reset(pDel);
+ }
+ }
+
+ /* Delete the %_content record */
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pDel, 1, iDel);
+ sqlite3_step(pDel);
+ rc = sqlite3_reset(pDel);
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Delete all entries in the FTS5 index.
+*/
+static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){
+ Fts5Config *pConfig = p->pConfig;
+ int rc;
+
+ p->bTotalsValid = 0;
+
+ /* Delete the contents of the %_data and %_docsize tables. */
+ rc = fts5ExecPrintf(pConfig->db, 0,
+ "DELETE FROM %Q.'%q_data';"
+ "DELETE FROM %Q.'%q_idx';",
+ pConfig->zDb, pConfig->zName,
+ pConfig->zDb, pConfig->zName
+ );
+ if( rc==SQLITE_OK && pConfig->bColumnsize ){
+ rc = fts5ExecPrintf(pConfig->db, 0,
+ "DELETE FROM %Q.'%q_docsize';",
+ pConfig->zDb, pConfig->zName
+ );
+ }
+
+ /* Reinitialize the %_data table. This call creates the initial structure
+ ** and averages records. */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexReinit(p->pIndex);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5StorageConfigValue(p, "version", 0, FTS5_CURRENT_VERSION);
+ }
+ return rc;
+}
+
+static int sqlite3Fts5StorageRebuild(Fts5Storage *p){
+ Fts5Buffer buf = {0,0,0};
+ Fts5Config *pConfig = p->pConfig;
+ sqlite3_stmt *pScan = 0;
+ Fts5InsertCtx ctx;
+ int rc, rc2;
+
+ memset(&ctx, 0, sizeof(Fts5InsertCtx));
+ ctx.pStorage = p;
+ rc = sqlite3Fts5StorageDeleteAll(p);
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageLoadTotals(p, 1);
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
+ }
+
+ while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){
+ i64 iRowid = sqlite3_column_int64(pScan, 0);
+
+ sqlite3Fts5BufferZero(&buf);
+ rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 0, iRowid);
+ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iCol<pConfig->nCol; ctx.iCol++){
+ ctx.szCol = 0;
+ if( pConfig->abUnindexed[ctx.iCol]==0 ){
+ const char *zText = (const char*)sqlite3_column_text(pScan, ctx.iCol+1);
+ int nText = sqlite3_column_bytes(pScan, ctx.iCol+1);
+ rc = sqlite3Fts5Tokenize(pConfig,
+ FTS5_TOKENIZE_DOCUMENT,
+ zText, nText,
+ (void*)&ctx,
+ fts5StorageInsertCallback
+ );
+ }
+ sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol);
+ p->aTotalSize[ctx.iCol] += (i64)ctx.szCol;
+ }
+ p->nTotalRow++;
+
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageInsertDocsize(p, iRowid, &buf);
+ }
+ }
+ sqlite3_free(buf.p);
+ rc2 = sqlite3_reset(pScan);
+ if( rc==SQLITE_OK ) rc = rc2;
+
+ /* Write the averages record */
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageSaveTotals(p);
+ }
+ return rc;
+}
+
+static int sqlite3Fts5StorageOptimize(Fts5Storage *p){
+ return sqlite3Fts5IndexOptimize(p->pIndex);
+}
+
+static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge){
+ return sqlite3Fts5IndexMerge(p->pIndex, nMerge);
+}
+
+static int sqlite3Fts5StorageReset(Fts5Storage *p){
+ return sqlite3Fts5IndexReset(p->pIndex);
+}
+
+/*
+** Allocate a new rowid. This is used for "external content" tables when
+** a NULL value is inserted into the rowid column. The new rowid is allocated
+** by inserting a dummy row into the %_docsize table. The dummy will be
+** overwritten later.
+**
+** If the %_docsize table does not exist, SQLITE_MISMATCH is returned. In
+** this case the user is required to provide a rowid explicitly.
+*/
+static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){
+ int rc = SQLITE_MISMATCH;
+ if( p->pConfig->bColumnsize ){
+ sqlite3_stmt *pReplace = 0;
+ rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_null(pReplace, 1);
+ sqlite3_bind_null(pReplace, 2);
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ }
+ if( rc==SQLITE_OK ){
+ *piRowid = sqlite3_last_insert_rowid(p->pConfig->db);
+ }
+ }
+ return rc;
+}
+
+/*
+** Insert a new row into the FTS content table.
+*/
+static int sqlite3Fts5StorageContentInsert(
+ Fts5Storage *p,
+ sqlite3_value **apVal,
+ i64 *piRowid
+){
+ Fts5Config *pConfig = p->pConfig;
+ int rc = SQLITE_OK;
+
+ /* Insert the new row into the %_content table. */
+ if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){
+ if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){
+ *piRowid = sqlite3_value_int64(apVal[1]);
+ }else{
+ rc = fts5StorageNewRowid(p, piRowid);
+ }
+ }else{
+ sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */
+ int i; /* Counter variable */
+ rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0);
+ for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){
+ rc = sqlite3_bind_value(pInsert, i, apVal[i]);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_step(pInsert);
+ rc = sqlite3_reset(pInsert);
+ }
+ *piRowid = sqlite3_last_insert_rowid(pConfig->db);
+ }
+
+ return rc;
+}
+
+/*
+** Insert new entries into the FTS index and %_docsize table.
+*/
+static int sqlite3Fts5StorageIndexInsert(
+ Fts5Storage *p,
+ sqlite3_value **apVal,
+ i64 iRowid
+){
+ Fts5Config *pConfig = p->pConfig;
+ int rc = SQLITE_OK; /* Return code */
+ Fts5InsertCtx ctx; /* Tokenization callback context object */
+ Fts5Buffer buf; /* Buffer used to build up %_docsize blob */
+
+ memset(&buf, 0, sizeof(Fts5Buffer));
+ ctx.pStorage = p;
+ rc = fts5StorageLoadTotals(p, 1);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 0, iRowid);
+ }
+ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iCol<pConfig->nCol; ctx.iCol++){
+ ctx.szCol = 0;
+ if( pConfig->abUnindexed[ctx.iCol]==0 ){
+ const char *zText = (const char*)sqlite3_value_text(apVal[ctx.iCol+2]);
+ int nText = sqlite3_value_bytes(apVal[ctx.iCol+2]);
+ rc = sqlite3Fts5Tokenize(pConfig,
+ FTS5_TOKENIZE_DOCUMENT,
+ zText, nText,
+ (void*)&ctx,
+ fts5StorageInsertCallback
+ );
+ }
+ sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol);
+ p->aTotalSize[ctx.iCol] += (i64)ctx.szCol;
+ }
+ p->nTotalRow++;
+
+ /* Write the %_docsize record */
+ if( rc==SQLITE_OK ){
+ rc = fts5StorageInsertDocsize(p, iRowid, &buf);
+ }
+ sqlite3_free(buf.p);
+
+ return rc;
+}
+
+static int fts5StorageCount(Fts5Storage *p, const char *zSuffix, i64 *pnRow){
+ Fts5Config *pConfig = p->pConfig;
+ char *zSql;
+ int rc;
+
+ zSql = sqlite3_mprintf("SELECT count(*) FROM %Q.'%q_%s'",
+ pConfig->zDb, pConfig->zName, zSuffix
+ );
+ if( zSql==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3_stmt *pCnt = 0;
+ rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &pCnt, 0);
+ if( rc==SQLITE_OK ){
+ if( SQLITE_ROW==sqlite3_step(pCnt) ){
+ *pnRow = sqlite3_column_int64(pCnt, 0);
+ }
+ rc = sqlite3_finalize(pCnt);
+ }
+ }
+
+ sqlite3_free(zSql);
+ return rc;
+}
+
+/*
+** Context object used by sqlite3Fts5StorageIntegrity().
+*/
+typedef struct Fts5IntegrityCtx Fts5IntegrityCtx;
+struct Fts5IntegrityCtx {
+ i64 iRowid;
+ int iCol;
+ int szCol;
+ u64 cksum;
+ Fts5Termset *pTermset;
+ Fts5Config *pConfig;
+};
+
+
+/*
+** Tokenization callback used by integrity check.
+*/
+static int fts5StorageIntegrityCallback(
+ void *pContext, /* Pointer to Fts5IntegrityCtx object */
+ int tflags,
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iUnused1, /* Start offset of token */
+ int iUnused2 /* End offset of token */
+){
+ Fts5IntegrityCtx *pCtx = (Fts5IntegrityCtx*)pContext;
+ Fts5Termset *pTermset = pCtx->pTermset;
+ int bPresent;
+ int ii;
+ int rc = SQLITE_OK;
+ int iPos;
+ int iCol;
+
+ UNUSED_PARAM2(iUnused1, iUnused2);
+ if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 || pCtx->szCol==0 ){
+ pCtx->szCol++;
+ }
+
+ switch( pCtx->pConfig->eDetail ){
+ case FTS5_DETAIL_FULL:
+ iPos = pCtx->szCol-1;
+ iCol = pCtx->iCol;
+ break;
+
+ case FTS5_DETAIL_COLUMNS:
+ iPos = pCtx->iCol;
+ iCol = 0;
+ break;
+
+ default:
+ assert( pCtx->pConfig->eDetail==FTS5_DETAIL_NONE );
+ iPos = 0;
+ iCol = 0;
+ break;
+ }
+
+ rc = sqlite3Fts5TermsetAdd(pTermset, 0, pToken, nToken, &bPresent);
+ if( rc==SQLITE_OK && bPresent==0 ){
+ pCtx->cksum ^= sqlite3Fts5IndexEntryCksum(
+ pCtx->iRowid, iCol, iPos, 0, pToken, nToken
+ );
+ }
+
+ for(ii=0; rc==SQLITE_OK && ii<pCtx->pConfig->nPrefix; ii++){
+ const int nChar = pCtx->pConfig->aPrefix[ii];
+ int nByte = sqlite3Fts5IndexCharlenToBytelen(pToken, nToken, nChar);
+ if( nByte ){
+ rc = sqlite3Fts5TermsetAdd(pTermset, ii+1, pToken, nByte, &bPresent);
+ if( bPresent==0 ){
+ pCtx->cksum ^= sqlite3Fts5IndexEntryCksum(
+ pCtx->iRowid, iCol, iPos, ii+1, pToken, nByte
+ );
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Check that the contents of the FTS index match that of the %_content
+** table. Return SQLITE_OK if they do, or SQLITE_CORRUPT if not. Return
+** some other SQLite error code if an error occurs while attempting to
+** determine this.
+*/
+static int sqlite3Fts5StorageIntegrity(Fts5Storage *p){
+ Fts5Config *pConfig = p->pConfig;
+ int rc; /* Return code */
+ int *aColSize; /* Array of size pConfig->nCol */
+ i64 *aTotalSize; /* Array of size pConfig->nCol */
+ Fts5IntegrityCtx ctx;
+ sqlite3_stmt *pScan;
+
+ memset(&ctx, 0, sizeof(Fts5IntegrityCtx));
+ ctx.pConfig = p->pConfig;
+ aTotalSize = (i64*)sqlite3_malloc64(pConfig->nCol*(sizeof(int)+sizeof(i64)));
+ if( !aTotalSize ) return SQLITE_NOMEM;
+ aColSize = (int*)&aTotalSize[pConfig->nCol];
+ memset(aTotalSize, 0, sizeof(i64) * pConfig->nCol);
+
+ /* Generate the expected index checksum based on the contents of the
+ ** %_content table. This block stores the checksum in ctx.cksum. */
+ rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
+ if( rc==SQLITE_OK ){
+ int rc2;
+ while( SQLITE_ROW==sqlite3_step(pScan) ){
+ int i;
+ ctx.iRowid = sqlite3_column_int64(pScan, 0);
+ ctx.szCol = 0;
+ if( pConfig->bColumnsize ){
+ rc = sqlite3Fts5StorageDocsize(p, ctx.iRowid, aColSize);
+ }
+ if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_NONE ){
+ rc = sqlite3Fts5TermsetNew(&ctx.pTermset);
+ }
+ for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
+ if( pConfig->abUnindexed[i] ) continue;
+ ctx.iCol = i;
+ ctx.szCol = 0;
+ if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){
+ rc = sqlite3Fts5TermsetNew(&ctx.pTermset);
+ }
+ if( rc==SQLITE_OK ){
+ const char *zText = (const char*)sqlite3_column_text(pScan, i+1);
+ int nText = sqlite3_column_bytes(pScan, i+1);
+ rc = sqlite3Fts5Tokenize(pConfig,
+ FTS5_TOKENIZE_DOCUMENT,
+ zText, nText,
+ (void*)&ctx,
+ fts5StorageIntegrityCallback
+ );
+ }
+ if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){
+ rc = FTS5_CORRUPT;
+ }
+ aTotalSize[i] += ctx.szCol;
+ if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){
+ sqlite3Fts5TermsetFree(ctx.pTermset);
+ ctx.pTermset = 0;
+ }
+ }
+ sqlite3Fts5TermsetFree(ctx.pTermset);
+ ctx.pTermset = 0;
+
+ if( rc!=SQLITE_OK ) break;
+ }
+ rc2 = sqlite3_reset(pScan);
+ if( rc==SQLITE_OK ) rc = rc2;
+ }
+
+ /* Test that the "totals" (sometimes called "averages") record looks Ok */
+ if( rc==SQLITE_OK ){
+ int i;
+ rc = fts5StorageLoadTotals(p, 0);
+ for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
+ if( p->aTotalSize[i]!=aTotalSize[i] ) rc = FTS5_CORRUPT;
+ }
+ }
+
+ /* Check that the %_docsize and %_content tables contain the expected
+ ** number of rows. */
+ if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ i64 nRow = 0;
+ rc = fts5StorageCount(p, "content", &nRow);
+ if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT;
+ }
+ if( rc==SQLITE_OK && pConfig->bColumnsize ){
+ i64 nRow = 0;
+ rc = fts5StorageCount(p, "docsize", &nRow);
+ if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT;
+ }
+
+ /* Pass the expected checksum down to the FTS index module. It will
+ ** verify, amongst other things, that it matches the checksum generated by
+ ** inspecting the index itself. */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexIntegrityCheck(p->pIndex, ctx.cksum);
+ }
+
+ sqlite3_free(aTotalSize);
+ return rc;
+}
+
+/*
+** Obtain an SQLite statement handle that may be used to read data from the
+** %_content table.
+*/
+static int sqlite3Fts5StorageStmt(
+ Fts5Storage *p,
+ int eStmt,
+ sqlite3_stmt **pp,
+ char **pzErrMsg
+){
+ int rc;
+ assert( eStmt==FTS5_STMT_SCAN_ASC
+ || eStmt==FTS5_STMT_SCAN_DESC
+ || eStmt==FTS5_STMT_LOOKUP
+ );
+ rc = fts5StorageGetStmt(p, eStmt, pp, pzErrMsg);
+ if( rc==SQLITE_OK ){
+ assert( p->aStmt[eStmt]==*pp );
+ p->aStmt[eStmt] = 0;
+ }
+ return rc;
+}
+
+/*
+** Release an SQLite statement handle obtained via an earlier call to
+** sqlite3Fts5StorageStmt(). The eStmt parameter passed to this function
+** must match that passed to the sqlite3Fts5StorageStmt() call.
+*/
+static void sqlite3Fts5StorageStmtRelease(
+ Fts5Storage *p,
+ int eStmt,
+ sqlite3_stmt *pStmt
+){
+ assert( eStmt==FTS5_STMT_SCAN_ASC
+ || eStmt==FTS5_STMT_SCAN_DESC
+ || eStmt==FTS5_STMT_LOOKUP
+ );
+ if( p->aStmt[eStmt]==0 ){
+ sqlite3_reset(pStmt);
+ p->aStmt[eStmt] = pStmt;
+ }else{
+ sqlite3_finalize(pStmt);
+ }
+}
+
+static int fts5StorageDecodeSizeArray(
+ int *aCol, int nCol, /* Array to populate */
+ const u8 *aBlob, int nBlob /* Record to read varints from */
+){
+ int i;
+ int iOff = 0;
+ for(i=0; i<nCol; i++){
+ if( iOff>=nBlob ) return 1;
+ iOff += fts5GetVarint32(&aBlob[iOff], aCol[i]);
+ }
+ return (iOff!=nBlob);
+}
+
+/*
+** Argument aCol points to an array of integers containing one entry for
+** each table column. This function reads the %_docsize record for the
+** specified rowid and populates aCol[] with the results.
+**
+** An SQLite error code is returned if an error occurs, or SQLITE_OK
+** otherwise.
+*/
+static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol){
+ int nCol = p->pConfig->nCol; /* Number of user columns in table */
+ sqlite3_stmt *pLookup = 0; /* Statement to query %_docsize */
+ int rc; /* Return Code */
+
+ assert( p->pConfig->bColumnsize );
+ rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0);
+ if( rc==SQLITE_OK ){
+ int bCorrupt = 1;
+ sqlite3_bind_int64(pLookup, 1, iRowid);
+ if( SQLITE_ROW==sqlite3_step(pLookup) ){
+ const u8 *aBlob = sqlite3_column_blob(pLookup, 0);
+ int nBlob = sqlite3_column_bytes(pLookup, 0);
+ if( 0==fts5StorageDecodeSizeArray(aCol, nCol, aBlob, nBlob) ){
+ bCorrupt = 0;
+ }
+ }
+ rc = sqlite3_reset(pLookup);
+ if( bCorrupt && rc==SQLITE_OK ){
+ rc = FTS5_CORRUPT;
+ }
+ }
+
+ return rc;
+}
+
+static int sqlite3Fts5StorageSize(Fts5Storage *p, int iCol, i64 *pnToken){
+ int rc = fts5StorageLoadTotals(p, 0);
+ if( rc==SQLITE_OK ){
+ *pnToken = 0;
+ if( iCol<0 ){
+ int i;
+ for(i=0; i<p->pConfig->nCol; i++){
+ *pnToken += p->aTotalSize[i];
+ }
+ }else if( iCol<p->pConfig->nCol ){
+ *pnToken = p->aTotalSize[iCol];
+ }else{
+ rc = SQLITE_RANGE;
+ }
+ }
+ return rc;
+}
+
+static int sqlite3Fts5StorageRowCount(Fts5Storage *p, i64 *pnRow){
+ int rc = fts5StorageLoadTotals(p, 0);
+ if( rc==SQLITE_OK ){
+ /* nTotalRow being zero does not necessarily indicate a corrupt
+ ** database - it might be that the FTS5 table really does contain zero
+ ** rows. However this function is only called from the xRowCount() API,
+ ** and there is no way for that API to be invoked if the table contains
+ ** no rows. Hence the FTS5_CORRUPT return. */
+ *pnRow = p->nTotalRow;
+ if( p->nTotalRow<=0 ) rc = FTS5_CORRUPT;
+ }
+ return rc;
+}
+
+/*
+** Flush any data currently held in-memory to disk.
+*/
+static int sqlite3Fts5StorageSync(Fts5Storage *p){
+ int rc = SQLITE_OK;
+ i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db);
+ if( p->bTotalsValid ){
+ rc = fts5StorageSaveTotals(p);
+ p->bTotalsValid = 0;
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexSync(p->pIndex);
+ }
+ sqlite3_set_last_insert_rowid(p->pConfig->db, iLastRowid);
+ return rc;
+}
+
+static int sqlite3Fts5StorageRollback(Fts5Storage *p){
+ p->bTotalsValid = 0;
+ return sqlite3Fts5IndexRollback(p->pIndex);
+}
+
+static int sqlite3Fts5StorageConfigValue(
+ Fts5Storage *p,
+ const char *z,
+ sqlite3_value *pVal,
+ int iVal
+){
+ sqlite3_stmt *pReplace = 0;
+ int rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_CONFIG, &pReplace, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_text(pReplace, 1, z, -1, SQLITE_STATIC);
+ if( pVal ){
+ sqlite3_bind_value(pReplace, 2, pVal);
+ }else{
+ sqlite3_bind_int(pReplace, 2, iVal);
+ }
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ sqlite3_bind_null(pReplace, 1);
+ }
+ if( rc==SQLITE_OK && pVal ){
+ int iNew = p->pConfig->iCookie + 1;
+ rc = sqlite3Fts5IndexSetCookie(p->pIndex, iNew);
+ if( rc==SQLITE_OK ){
+ p->pConfig->iCookie = iNew;
+ }
+ }
+ return rc;
+}
+
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+
+/* #include "fts5Int.h" */
+
+/**************************************************************************
+** Start of ascii tokenizer implementation.
+*/
+
+/*
+** For tokenizers with no "unicode" modifier, the set of token characters
+** is the same as the set of ASCII range alphanumeric characters.
+*/
+static unsigned char aAsciiTokenChar[128] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00..0x0F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10..0x1F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20..0x2F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 0x30..0x3F */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x40..0x4F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 0x50..0x5F */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x60..0x6F */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 0x70..0x7F */
+};
+
+typedef struct AsciiTokenizer AsciiTokenizer;
+struct AsciiTokenizer {
+ unsigned char aTokenChar[128];
+};
+
+static void fts5AsciiAddExceptions(
+ AsciiTokenizer *p,
+ const char *zArg,
+ int bTokenChars
+){
+ int i;
+ for(i=0; zArg[i]; i++){
+ if( (zArg[i] & 0x80)==0 ){
+ p->aTokenChar[(int)zArg[i]] = (unsigned char)bTokenChars;
+ }
+ }
+}
+
+/*
+** Delete a "ascii" tokenizer.
+*/
+static void fts5AsciiDelete(Fts5Tokenizer *p){
+ sqlite3_free(p);
+}
+
+/*
+** Create an "ascii" tokenizer.
+*/
+static int fts5AsciiCreate(
+ void *pUnused,
+ const char **azArg, int nArg,
+ Fts5Tokenizer **ppOut
+){
+ int rc = SQLITE_OK;
+ AsciiTokenizer *p = 0;
+ UNUSED_PARAM(pUnused);
+ if( nArg%2 ){
+ rc = SQLITE_ERROR;
+ }else{
+ p = sqlite3_malloc(sizeof(AsciiTokenizer));
+ if( p==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int i;
+ memset(p, 0, sizeof(AsciiTokenizer));
+ memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar));
+ for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
+ const char *zArg = azArg[i+1];
+ if( 0==sqlite3_stricmp(azArg[i], "tokenchars") ){
+ fts5AsciiAddExceptions(p, zArg, 1);
+ }else
+ if( 0==sqlite3_stricmp(azArg[i], "separators") ){
+ fts5AsciiAddExceptions(p, zArg, 0);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ fts5AsciiDelete((Fts5Tokenizer*)p);
+ p = 0;
+ }
+ }
+ }
+
+ *ppOut = (Fts5Tokenizer*)p;
+ return rc;
+}
+
+
+static void asciiFold(char *aOut, const char *aIn, int nByte){
+ int i;
+ for(i=0; i<nByte; i++){
+ char c = aIn[i];
+ if( c>='A' && c<='Z' ) c += 32;
+ aOut[i] = c;
+ }
+}
+
+/*
+** Tokenize some text using the ascii tokenizer.
+*/
+static int fts5AsciiTokenize(
+ Fts5Tokenizer *pTokenizer,
+ void *pCtx,
+ int iUnused,
+ const char *pText, int nText,
+ int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
+){
+ AsciiTokenizer *p = (AsciiTokenizer*)pTokenizer;
+ int rc = SQLITE_OK;
+ int ie;
+ int is = 0;
+
+ char aFold[64];
+ int nFold = sizeof(aFold);
+ char *pFold = aFold;
+ unsigned char *a = p->aTokenChar;
+
+ UNUSED_PARAM(iUnused);
+
+ while( is<nText && rc==SQLITE_OK ){
+ int nByte;
+
+ /* Skip any leading divider characters. */
+ while( is<nText && ((pText[is]&0x80)==0 && a[(int)pText[is]]==0) ){
+ is++;
+ }
+ if( is==nText ) break;
+
+ /* Count the token characters */
+ ie = is+1;
+ while( ie<nText && ((pText[ie]&0x80) || a[(int)pText[ie]] ) ){
+ ie++;
+ }
+
+ /* Fold to lower case */
+ nByte = ie-is;
+ if( nByte>nFold ){
+ if( pFold!=aFold ) sqlite3_free(pFold);
+ pFold = sqlite3_malloc64((sqlite3_int64)nByte*2);
+ if( pFold==0 ){
+ rc = SQLITE_NOMEM;
+ break;
+ }
+ nFold = nByte*2;
+ }
+ asciiFold(pFold, &pText[is], nByte);
+
+ /* Invoke the token callback */
+ rc = xToken(pCtx, 0, pFold, nByte, is, ie);
+ is = ie+1;
+ }
+
+ if( pFold!=aFold ) sqlite3_free(pFold);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ return rc;
+}
+
+/**************************************************************************
+** Start of unicode61 tokenizer implementation.
+*/
+
+
+/*
+** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied
+** from the sqlite3 source file utf.c. If this file is compiled as part
+** of the amalgamation, they are not required.
+*/
+#ifndef SQLITE_AMALGAMATION
+
+static const unsigned char sqlite3Utf8Trans1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00,
+};
+
+#define READ_UTF8(zIn, zTerm, c) \
+ c = *(zIn++); \
+ if( c>=0xc0 ){ \
+ c = sqlite3Utf8Trans1[c-0xc0]; \
+ while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \
+ c = (c<<6) + (0x3f & *(zIn++)); \
+ } \
+ if( c<0x80 \
+ || (c&0xFFFFF800)==0xD800 \
+ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \
+ }
+
+
+#define WRITE_UTF8(zOut, c) { \
+ if( c<0x00080 ){ \
+ *zOut++ = (unsigned char)(c&0xFF); \
+ } \
+ else if( c<0x00800 ){ \
+ *zOut++ = 0xC0 + (unsigned char)((c>>6)&0x1F); \
+ *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
+ } \
+ else if( c<0x10000 ){ \
+ *zOut++ = 0xE0 + (unsigned char)((c>>12)&0x0F); \
+ *zOut++ = 0x80 + (unsigned char)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
+ }else{ \
+ *zOut++ = 0xF0 + (unsigned char)((c>>18) & 0x07); \
+ *zOut++ = 0x80 + (unsigned char)((c>>12) & 0x3F); \
+ *zOut++ = 0x80 + (unsigned char)((c>>6) & 0x3F); \
+ *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
+ } \
+}
+
+#endif /* ifndef SQLITE_AMALGAMATION */
+
+typedef struct Unicode61Tokenizer Unicode61Tokenizer;
+struct Unicode61Tokenizer {
+ unsigned char aTokenChar[128]; /* ASCII range token characters */
+ char *aFold; /* Buffer to fold text into */
+ int nFold; /* Size of aFold[] in bytes */
+ int eRemoveDiacritic; /* True if remove_diacritics=1 is set */
+ int nException;
+ int *aiException;
+
+ unsigned char aCategory[32]; /* True for token char categories */
+};
+
+/* Values for eRemoveDiacritic (must match internals of fts5_unicode2.c) */
+#define FTS5_REMOVE_DIACRITICS_NONE 0
+#define FTS5_REMOVE_DIACRITICS_SIMPLE 1
+#define FTS5_REMOVE_DIACRITICS_COMPLEX 2
+
+static int fts5UnicodeAddExceptions(
+ Unicode61Tokenizer *p, /* Tokenizer object */
+ const char *z, /* Characters to treat as exceptions */
+ int bTokenChars /* 1 for 'tokenchars', 0 for 'separators' */
+){
+ int rc = SQLITE_OK;
+ int n = (int)strlen(z);
+ int *aNew;
+
+ if( n>0 ){
+ aNew = (int*)sqlite3_realloc64(p->aiException,
+ (n+p->nException)*sizeof(int));
+ if( aNew ){
+ int nNew = p->nException;
+ const unsigned char *zCsr = (const unsigned char*)z;
+ const unsigned char *zTerm = (const unsigned char*)&z[n];
+ while( zCsr<zTerm ){
+ u32 iCode;
+ int bToken;
+ READ_UTF8(zCsr, zTerm, iCode);
+ if( iCode<128 ){
+ p->aTokenChar[iCode] = (unsigned char)bTokenChars;
+ }else{
+ bToken = p->aCategory[sqlite3Fts5UnicodeCategory(iCode)];
+ assert( (bToken==0 || bToken==1) );
+ assert( (bTokenChars==0 || bTokenChars==1) );
+ if( bToken!=bTokenChars && sqlite3Fts5UnicodeIsdiacritic(iCode)==0 ){
+ int i;
+ for(i=0; i<nNew; i++){
+ if( (u32)aNew[i]>iCode ) break;
+ }
+ memmove(&aNew[i+1], &aNew[i], (nNew-i)*sizeof(int));
+ aNew[i] = iCode;
+ nNew++;
+ }
+ }
+ }
+ p->aiException = aNew;
+ p->nException = nNew;
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Return true if the p->aiException[] array contains the value iCode.
+*/
+static int fts5UnicodeIsException(Unicode61Tokenizer *p, int iCode){
+ if( p->nException>0 ){
+ int *a = p->aiException;
+ int iLo = 0;
+ int iHi = p->nException-1;
+
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( iCode==a[iTest] ){
+ return 1;
+ }else if( iCode>a[iTest] ){
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Delete a "unicode61" tokenizer.
+*/
+static void fts5UnicodeDelete(Fts5Tokenizer *pTok){
+ if( pTok ){
+ Unicode61Tokenizer *p = (Unicode61Tokenizer*)pTok;
+ sqlite3_free(p->aiException);
+ sqlite3_free(p->aFold);
+ sqlite3_free(p);
+ }
+ return;
+}
+
+static int unicodeSetCategories(Unicode61Tokenizer *p, const char *zCat){
+ const char *z = zCat;
+
+ while( *z ){
+ while( *z==' ' || *z=='\t' ) z++;
+ if( *z && sqlite3Fts5UnicodeCatParse(z, p->aCategory) ){
+ return SQLITE_ERROR;
+ }
+ while( *z!=' ' && *z!='\t' && *z!='\0' ) z++;
+ }
+
+ sqlite3Fts5UnicodeAscii(p->aCategory, p->aTokenChar);
+ return SQLITE_OK;
+}
+
+/*
+** Create a "unicode61" tokenizer.
+*/
+static int fts5UnicodeCreate(
+ void *pUnused,
+ const char **azArg, int nArg,
+ Fts5Tokenizer **ppOut
+){
+ int rc = SQLITE_OK; /* Return code */
+ Unicode61Tokenizer *p = 0; /* New tokenizer object */
+
+ UNUSED_PARAM(pUnused);
+
+ if( nArg%2 ){
+ rc = SQLITE_ERROR;
+ }else{
+ p = (Unicode61Tokenizer*)sqlite3_malloc(sizeof(Unicode61Tokenizer));
+ if( p ){
+ const char *zCat = "L* N* Co";
+ int i;
+ memset(p, 0, sizeof(Unicode61Tokenizer));
+
+ p->eRemoveDiacritic = FTS5_REMOVE_DIACRITICS_SIMPLE;
+ p->nFold = 64;
+ p->aFold = sqlite3_malloc64(p->nFold * sizeof(char));
+ if( p->aFold==0 ){
+ rc = SQLITE_NOMEM;
+ }
+
+ /* Search for a "categories" argument */
+ for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
+ if( 0==sqlite3_stricmp(azArg[i], "categories") ){
+ zCat = azArg[i+1];
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = unicodeSetCategories(p, zCat);
+ }
+
+ for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
+ const char *zArg = azArg[i+1];
+ if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
+ if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
+ rc = SQLITE_ERROR;
+ }else{
+ p->eRemoveDiacritic = (zArg[0] - '0');
+ assert( p->eRemoveDiacritic==FTS5_REMOVE_DIACRITICS_NONE
+ || p->eRemoveDiacritic==FTS5_REMOVE_DIACRITICS_SIMPLE
+ || p->eRemoveDiacritic==FTS5_REMOVE_DIACRITICS_COMPLEX
+ );
+ }
+ }else
+ if( 0==sqlite3_stricmp(azArg[i], "tokenchars") ){
+ rc = fts5UnicodeAddExceptions(p, zArg, 1);
+ }else
+ if( 0==sqlite3_stricmp(azArg[i], "separators") ){
+ rc = fts5UnicodeAddExceptions(p, zArg, 0);
+ }else
+ if( 0==sqlite3_stricmp(azArg[i], "categories") ){
+ /* no-op */
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ }
+
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ if( rc!=SQLITE_OK ){
+ fts5UnicodeDelete((Fts5Tokenizer*)p);
+ p = 0;
+ }
+ *ppOut = (Fts5Tokenizer*)p;
+ }
+ return rc;
+}
+
+/*
+** Return true if, for the purposes of tokenizing with the tokenizer
+** passed as the first argument, codepoint iCode is considered a token
+** character (not a separator).
+*/
+static int fts5UnicodeIsAlnum(Unicode61Tokenizer *p, int iCode){
+ return (
+ p->aCategory[sqlite3Fts5UnicodeCategory((u32)iCode)]
+ ^ fts5UnicodeIsException(p, iCode)
+ );
+}
+
+static int fts5UnicodeTokenize(
+ Fts5Tokenizer *pTokenizer,
+ void *pCtx,
+ int iUnused,
+ const char *pText, int nText,
+ int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
+){
+ Unicode61Tokenizer *p = (Unicode61Tokenizer*)pTokenizer;
+ int rc = SQLITE_OK;
+ unsigned char *a = p->aTokenChar;
+
+ unsigned char *zTerm = (unsigned char*)&pText[nText];
+ unsigned char *zCsr = (unsigned char *)pText;
+
+ /* Output buffer */
+ char *aFold = p->aFold;
+ int nFold = p->nFold;
+ const char *pEnd = &aFold[nFold-6];
+
+ UNUSED_PARAM(iUnused);
+
+ /* Each iteration of this loop gobbles up a contiguous run of separators,
+ ** then the next token. */
+ while( rc==SQLITE_OK ){
+ u32 iCode; /* non-ASCII codepoint read from input */
+ char *zOut = aFold;
+ int is;
+ int ie;
+
+ /* Skip any separator characters. */
+ while( 1 ){
+ if( zCsr>=zTerm ) goto tokenize_done;
+ if( *zCsr & 0x80 ) {
+ /* A character outside of the ascii range. Skip past it if it is
+ ** a separator character. Or break out of the loop if it is not. */
+ is = zCsr - (unsigned char*)pText;
+ READ_UTF8(zCsr, zTerm, iCode);
+ if( fts5UnicodeIsAlnum(p, iCode) ){
+ goto non_ascii_tokenchar;
+ }
+ }else{
+ if( a[*zCsr] ){
+ is = zCsr - (unsigned char*)pText;
+ goto ascii_tokenchar;
+ }
+ zCsr++;
+ }
+ }
+
+ /* Run through the tokenchars. Fold them into the output buffer along
+ ** the way. */
+ while( zCsr<zTerm ){
+
+ /* Grow the output buffer so that there is sufficient space to fit the
+ ** largest possible utf-8 character. */
+ if( zOut>pEnd ){
+ aFold = sqlite3_malloc64((sqlite3_int64)nFold*2);
+ if( aFold==0 ){
+ rc = SQLITE_NOMEM;
+ goto tokenize_done;
+ }
+ zOut = &aFold[zOut - p->aFold];
+ memcpy(aFold, p->aFold, nFold);
+ sqlite3_free(p->aFold);
+ p->aFold = aFold;
+ p->nFold = nFold = nFold*2;
+ pEnd = &aFold[nFold-6];
+ }
+
+ if( *zCsr & 0x80 ){
+ /* An non-ascii-range character. Fold it into the output buffer if
+ ** it is a token character, or break out of the loop if it is not. */
+ READ_UTF8(zCsr, zTerm, iCode);
+ if( fts5UnicodeIsAlnum(p,iCode)||sqlite3Fts5UnicodeIsdiacritic(iCode) ){
+ non_ascii_tokenchar:
+ iCode = sqlite3Fts5UnicodeFold(iCode, p->eRemoveDiacritic);
+ if( iCode ) WRITE_UTF8(zOut, iCode);
+ }else{
+ break;
+ }
+ }else if( a[*zCsr]==0 ){
+ /* An ascii-range separator character. End of token. */
+ break;
+ }else{
+ ascii_tokenchar:
+ if( *zCsr>='A' && *zCsr<='Z' ){
+ *zOut++ = *zCsr + 32;
+ }else{
+ *zOut++ = *zCsr;
+ }
+ zCsr++;
+ }
+ ie = zCsr - (unsigned char*)pText;
+ }
+
+ /* Invoke the token callback */
+ rc = xToken(pCtx, 0, aFold, zOut-aFold, is, ie);
+ }
+
+ tokenize_done:
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ return rc;
+}
+
+/**************************************************************************
+** Start of porter stemmer implementation.
+*/
+
+/* Any tokens larger than this (in bytes) are passed through without
+** stemming. */
+#define FTS5_PORTER_MAX_TOKEN 64
+
+typedef struct PorterTokenizer PorterTokenizer;
+struct PorterTokenizer {
+ fts5_tokenizer tokenizer; /* Parent tokenizer module */
+ Fts5Tokenizer *pTokenizer; /* Parent tokenizer instance */
+ char aBuf[FTS5_PORTER_MAX_TOKEN + 64];
+};
+
+/*
+** Delete a "porter" tokenizer.
+*/
+static void fts5PorterDelete(Fts5Tokenizer *pTok){
+ if( pTok ){
+ PorterTokenizer *p = (PorterTokenizer*)pTok;
+ if( p->pTokenizer ){
+ p->tokenizer.xDelete(p->pTokenizer);
+ }
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Create a "porter" tokenizer.
+*/
+static int fts5PorterCreate(
+ void *pCtx,
+ const char **azArg, int nArg,
+ Fts5Tokenizer **ppOut
+){
+ fts5_api *pApi = (fts5_api*)pCtx;
+ int rc = SQLITE_OK;
+ PorterTokenizer *pRet;
+ void *pUserdata = 0;
+ const char *zBase = "unicode61";
+
+ if( nArg>0 ){
+ zBase = azArg[0];
+ }
+
+ pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer));
+ if( pRet ){
+ memset(pRet, 0, sizeof(PorterTokenizer));
+ rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ if( rc==SQLITE_OK ){
+ int nArg2 = (nArg>0 ? nArg-1 : 0);
+ const char **azArg2 = (nArg2 ? &azArg[1] : 0);
+ rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer);
+ }
+
+ if( rc!=SQLITE_OK ){
+ fts5PorterDelete((Fts5Tokenizer*)pRet);
+ pRet = 0;
+ }
+ *ppOut = (Fts5Tokenizer*)pRet;
+ return rc;
+}
+
+typedef struct PorterContext PorterContext;
+struct PorterContext {
+ void *pCtx;
+ int (*xToken)(void*, int, const char*, int, int, int);
+ char *aBuf;
+};
+
+typedef struct PorterRule PorterRule;
+struct PorterRule {
+ const char *zSuffix;
+ int nSuffix;
+ int (*xCond)(char *zStem, int nStem);
+ const char *zOutput;
+ int nOutput;
+};
+
+#if 0
+static int fts5PorterApply(char *aBuf, int *pnBuf, PorterRule *aRule){
+ int ret = -1;
+ int nBuf = *pnBuf;
+ PorterRule *p;
+
+ for(p=aRule; p->zSuffix; p++){
+ assert( strlen(p->zSuffix)==p->nSuffix );
+ assert( strlen(p->zOutput)==p->nOutput );
+ if( nBuf<p->nSuffix ) continue;
+ if( 0==memcmp(&aBuf[nBuf - p->nSuffix], p->zSuffix, p->nSuffix) ) break;
+ }
+
+ if( p->zSuffix ){
+ int nStem = nBuf - p->nSuffix;
+ if( p->xCond==0 || p->xCond(aBuf, nStem) ){
+ memcpy(&aBuf[nStem], p->zOutput, p->nOutput);
+ *pnBuf = nStem + p->nOutput;
+ ret = p - aRule;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+static int fts5PorterIsVowel(char c, int bYIsVowel){
+ return (
+ c=='a' || c=='e' || c=='i' || c=='o' || c=='u' || (bYIsVowel && c=='y')
+ );
+}
+
+static int fts5PorterGobbleVC(char *zStem, int nStem, int bPrevCons){
+ int i;
+ int bCons = bPrevCons;
+
+ /* Scan for a vowel */
+ for(i=0; i<nStem; i++){
+ if( 0==(bCons = !fts5PorterIsVowel(zStem[i], bCons)) ) break;
+ }
+
+ /* Scan for a consonent */
+ for(i++; i<nStem; i++){
+ if( (bCons = !fts5PorterIsVowel(zStem[i], bCons)) ) return i+1;
+ }
+ return 0;
+}
+
+/* porter rule condition: (m > 0) */
+static int fts5Porter_MGt0(char *zStem, int nStem){
+ return !!fts5PorterGobbleVC(zStem, nStem, 0);
+}
+
+/* porter rule condition: (m > 1) */
+static int fts5Porter_MGt1(char *zStem, int nStem){
+ int n;
+ n = fts5PorterGobbleVC(zStem, nStem, 0);
+ if( n && fts5PorterGobbleVC(&zStem[n], nStem-n, 1) ){
+ return 1;
+ }
+ return 0;
+}
+
+/* porter rule condition: (m = 1) */
+static int fts5Porter_MEq1(char *zStem, int nStem){
+ int n;
+ n = fts5PorterGobbleVC(zStem, nStem, 0);
+ if( n && 0==fts5PorterGobbleVC(&zStem[n], nStem-n, 1) ){
+ return 1;
+ }
+ return 0;
+}
+
+/* porter rule condition: (*o) */
+static int fts5Porter_Ostar(char *zStem, int nStem){
+ if( zStem[nStem-1]=='w' || zStem[nStem-1]=='x' || zStem[nStem-1]=='y' ){
+ return 0;
+ }else{
+ int i;
+ int mask = 0;
+ int bCons = 0;
+ for(i=0; i<nStem; i++){
+ bCons = !fts5PorterIsVowel(zStem[i], bCons);
+ assert( bCons==0 || bCons==1 );
+ mask = (mask << 1) + bCons;
+ }
+ return ((mask & 0x0007)==0x0005);
+ }
+}
+
+/* porter rule condition: (m > 1 and (*S or *T)) */
+static int fts5Porter_MGt1_and_S_or_T(char *zStem, int nStem){
+ assert( nStem>0 );
+ return (zStem[nStem-1]=='s' || zStem[nStem-1]=='t')
+ && fts5Porter_MGt1(zStem, nStem);
+}
+
+/* porter rule condition: (*v*) */
+static int fts5Porter_Vowel(char *zStem, int nStem){
+ int i;
+ for(i=0; i<nStem; i++){
+ if( fts5PorterIsVowel(zStem[i], i>0) ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/**************************************************************************
+***************************************************************************
+** GENERATED CODE STARTS HERE (mkportersteps.tcl)
+*/
+
+static int fts5PorterStep4(char *aBuf, int *pnBuf){
+ int ret = 0;
+ int nBuf = *pnBuf;
+ switch( aBuf[nBuf-2] ){
+
+ case 'a':
+ if( nBuf>2 && 0==memcmp("al", &aBuf[nBuf-2], 2) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-2) ){
+ *pnBuf = nBuf - 2;
+ }
+ }
+ break;
+
+ case 'c':
+ if( nBuf>4 && 0==memcmp("ance", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }else if( nBuf>4 && 0==memcmp("ence", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }
+ break;
+
+ case 'e':
+ if( nBuf>2 && 0==memcmp("er", &aBuf[nBuf-2], 2) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-2) ){
+ *pnBuf = nBuf - 2;
+ }
+ }
+ break;
+
+ case 'i':
+ if( nBuf>2 && 0==memcmp("ic", &aBuf[nBuf-2], 2) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-2) ){
+ *pnBuf = nBuf - 2;
+ }
+ }
+ break;
+
+ case 'l':
+ if( nBuf>4 && 0==memcmp("able", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }else if( nBuf>4 && 0==memcmp("ible", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }
+ break;
+
+ case 'n':
+ if( nBuf>3 && 0==memcmp("ant", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }else if( nBuf>5 && 0==memcmp("ement", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-5) ){
+ *pnBuf = nBuf - 5;
+ }
+ }else if( nBuf>4 && 0==memcmp("ment", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }else if( nBuf>3 && 0==memcmp("ent", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 'o':
+ if( nBuf>3 && 0==memcmp("ion", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1_and_S_or_T(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }else if( nBuf>2 && 0==memcmp("ou", &aBuf[nBuf-2], 2) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-2) ){
+ *pnBuf = nBuf - 2;
+ }
+ }
+ break;
+
+ case 's':
+ if( nBuf>3 && 0==memcmp("ism", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 't':
+ if( nBuf>3 && 0==memcmp("ate", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }else if( nBuf>3 && 0==memcmp("iti", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 'u':
+ if( nBuf>3 && 0==memcmp("ous", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 'v':
+ if( nBuf>3 && 0==memcmp("ive", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 'z':
+ if( nBuf>3 && 0==memcmp("ize", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt1(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ }
+ return ret;
+}
+
+
+static int fts5PorterStep1B2(char *aBuf, int *pnBuf){
+ int ret = 0;
+ int nBuf = *pnBuf;
+ switch( aBuf[nBuf-2] ){
+
+ case 'a':
+ if( nBuf>2 && 0==memcmp("at", &aBuf[nBuf-2], 2) ){
+ memcpy(&aBuf[nBuf-2], "ate", 3);
+ *pnBuf = nBuf - 2 + 3;
+ ret = 1;
+ }
+ break;
+
+ case 'b':
+ if( nBuf>2 && 0==memcmp("bl", &aBuf[nBuf-2], 2) ){
+ memcpy(&aBuf[nBuf-2], "ble", 3);
+ *pnBuf = nBuf - 2 + 3;
+ ret = 1;
+ }
+ break;
+
+ case 'i':
+ if( nBuf>2 && 0==memcmp("iz", &aBuf[nBuf-2], 2) ){
+ memcpy(&aBuf[nBuf-2], "ize", 3);
+ *pnBuf = nBuf - 2 + 3;
+ ret = 1;
+ }
+ break;
+
+ }
+ return ret;
+}
+
+
+static int fts5PorterStep2(char *aBuf, int *pnBuf){
+ int ret = 0;
+ int nBuf = *pnBuf;
+ switch( aBuf[nBuf-2] ){
+
+ case 'a':
+ if( nBuf>7 && 0==memcmp("ational", &aBuf[nBuf-7], 7) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-7) ){
+ memcpy(&aBuf[nBuf-7], "ate", 3);
+ *pnBuf = nBuf - 7 + 3;
+ }
+ }else if( nBuf>6 && 0==memcmp("tional", &aBuf[nBuf-6], 6) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-6) ){
+ memcpy(&aBuf[nBuf-6], "tion", 4);
+ *pnBuf = nBuf - 6 + 4;
+ }
+ }
+ break;
+
+ case 'c':
+ if( nBuf>4 && 0==memcmp("enci", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "ence", 4);
+ *pnBuf = nBuf - 4 + 4;
+ }
+ }else if( nBuf>4 && 0==memcmp("anci", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "ance", 4);
+ *pnBuf = nBuf - 4 + 4;
+ }
+ }
+ break;
+
+ case 'e':
+ if( nBuf>4 && 0==memcmp("izer", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "ize", 3);
+ *pnBuf = nBuf - 4 + 3;
+ }
+ }
+ break;
+
+ case 'g':
+ if( nBuf>4 && 0==memcmp("logi", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "log", 3);
+ *pnBuf = nBuf - 4 + 3;
+ }
+ }
+ break;
+
+ case 'l':
+ if( nBuf>3 && 0==memcmp("bli", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-3) ){
+ memcpy(&aBuf[nBuf-3], "ble", 3);
+ *pnBuf = nBuf - 3 + 3;
+ }
+ }else if( nBuf>4 && 0==memcmp("alli", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "al", 2);
+ *pnBuf = nBuf - 4 + 2;
+ }
+ }else if( nBuf>5 && 0==memcmp("entli", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ent", 3);
+ *pnBuf = nBuf - 5 + 3;
+ }
+ }else if( nBuf>3 && 0==memcmp("eli", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-3) ){
+ memcpy(&aBuf[nBuf-3], "e", 1);
+ *pnBuf = nBuf - 3 + 1;
+ }
+ }else if( nBuf>5 && 0==memcmp("ousli", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ous", 3);
+ *pnBuf = nBuf - 5 + 3;
+ }
+ }
+ break;
+
+ case 'o':
+ if( nBuf>7 && 0==memcmp("ization", &aBuf[nBuf-7], 7) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-7) ){
+ memcpy(&aBuf[nBuf-7], "ize", 3);
+ *pnBuf = nBuf - 7 + 3;
+ }
+ }else if( nBuf>5 && 0==memcmp("ation", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ate", 3);
+ *pnBuf = nBuf - 5 + 3;
+ }
+ }else if( nBuf>4 && 0==memcmp("ator", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "ate", 3);
+ *pnBuf = nBuf - 4 + 3;
+ }
+ }
+ break;
+
+ case 's':
+ if( nBuf>5 && 0==memcmp("alism", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "al", 2);
+ *pnBuf = nBuf - 5 + 2;
+ }
+ }else if( nBuf>7 && 0==memcmp("iveness", &aBuf[nBuf-7], 7) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-7) ){
+ memcpy(&aBuf[nBuf-7], "ive", 3);
+ *pnBuf = nBuf - 7 + 3;
+ }
+ }else if( nBuf>7 && 0==memcmp("fulness", &aBuf[nBuf-7], 7) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-7) ){
+ memcpy(&aBuf[nBuf-7], "ful", 3);
+ *pnBuf = nBuf - 7 + 3;
+ }
+ }else if( nBuf>7 && 0==memcmp("ousness", &aBuf[nBuf-7], 7) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-7) ){
+ memcpy(&aBuf[nBuf-7], "ous", 3);
+ *pnBuf = nBuf - 7 + 3;
+ }
+ }
+ break;
+
+ case 't':
+ if( nBuf>5 && 0==memcmp("aliti", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "al", 2);
+ *pnBuf = nBuf - 5 + 2;
+ }
+ }else if( nBuf>5 && 0==memcmp("iviti", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ive", 3);
+ *pnBuf = nBuf - 5 + 3;
+ }
+ }else if( nBuf>6 && 0==memcmp("biliti", &aBuf[nBuf-6], 6) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-6) ){
+ memcpy(&aBuf[nBuf-6], "ble", 3);
+ *pnBuf = nBuf - 6 + 3;
+ }
+ }
+ break;
+
+ }
+ return ret;
+}
+
+
+static int fts5PorterStep3(char *aBuf, int *pnBuf){
+ int ret = 0;
+ int nBuf = *pnBuf;
+ switch( aBuf[nBuf-2] ){
+
+ case 'a':
+ if( nBuf>4 && 0==memcmp("ical", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ memcpy(&aBuf[nBuf-4], "ic", 2);
+ *pnBuf = nBuf - 4 + 2;
+ }
+ }
+ break;
+
+ case 's':
+ if( nBuf>4 && 0==memcmp("ness", &aBuf[nBuf-4], 4) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-4) ){
+ *pnBuf = nBuf - 4;
+ }
+ }
+ break;
+
+ case 't':
+ if( nBuf>5 && 0==memcmp("icate", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ic", 2);
+ *pnBuf = nBuf - 5 + 2;
+ }
+ }else if( nBuf>5 && 0==memcmp("iciti", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "ic", 2);
+ *pnBuf = nBuf - 5 + 2;
+ }
+ }
+ break;
+
+ case 'u':
+ if( nBuf>3 && 0==memcmp("ful", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ }
+ }
+ break;
+
+ case 'v':
+ if( nBuf>5 && 0==memcmp("ative", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ *pnBuf = nBuf - 5;
+ }
+ }
+ break;
+
+ case 'z':
+ if( nBuf>5 && 0==memcmp("alize", &aBuf[nBuf-5], 5) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-5) ){
+ memcpy(&aBuf[nBuf-5], "al", 2);
+ *pnBuf = nBuf - 5 + 2;
+ }
+ }
+ break;
+
+ }
+ return ret;
+}
+
+
+static int fts5PorterStep1B(char *aBuf, int *pnBuf){
+ int ret = 0;
+ int nBuf = *pnBuf;
+ switch( aBuf[nBuf-2] ){
+
+ case 'e':
+ if( nBuf>3 && 0==memcmp("eed", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_MGt0(aBuf, nBuf-3) ){
+ memcpy(&aBuf[nBuf-3], "ee", 2);
+ *pnBuf = nBuf - 3 + 2;
+ }
+ }else if( nBuf>2 && 0==memcmp("ed", &aBuf[nBuf-2], 2) ){
+ if( fts5Porter_Vowel(aBuf, nBuf-2) ){
+ *pnBuf = nBuf - 2;
+ ret = 1;
+ }
+ }
+ break;
+
+ case 'n':
+ if( nBuf>3 && 0==memcmp("ing", &aBuf[nBuf-3], 3) ){
+ if( fts5Porter_Vowel(aBuf, nBuf-3) ){
+ *pnBuf = nBuf - 3;
+ ret = 1;
+ }
+ }
+ break;
+
+ }
+ return ret;
+}
+
+/*
+** GENERATED CODE ENDS HERE (mkportersteps.tcl)
+***************************************************************************
+**************************************************************************/
+
+static void fts5PorterStep1A(char *aBuf, int *pnBuf){
+ int nBuf = *pnBuf;
+ if( aBuf[nBuf-1]=='s' ){
+ if( aBuf[nBuf-2]=='e' ){
+ if( (nBuf>4 && aBuf[nBuf-4]=='s' && aBuf[nBuf-3]=='s')
+ || (nBuf>3 && aBuf[nBuf-3]=='i' )
+ ){
+ *pnBuf = nBuf-2;
+ }else{
+ *pnBuf = nBuf-1;
+ }
+ }
+ else if( aBuf[nBuf-2]!='s' ){
+ *pnBuf = nBuf-1;
+ }
+ }
+}
+
+static int fts5PorterCb(
+ void *pCtx,
+ int tflags,
+ const char *pToken,
+ int nToken,
+ int iStart,
+ int iEnd
+){
+ PorterContext *p = (PorterContext*)pCtx;
+
+ char *aBuf;
+ int nBuf;
+
+ if( nToken>FTS5_PORTER_MAX_TOKEN || nToken<3 ) goto pass_through;
+ aBuf = p->aBuf;
+ nBuf = nToken;
+ memcpy(aBuf, pToken, nBuf);
+
+ /* Step 1. */
+ fts5PorterStep1A(aBuf, &nBuf);
+ if( fts5PorterStep1B(aBuf, &nBuf) ){
+ if( fts5PorterStep1B2(aBuf, &nBuf)==0 ){
+ char c = aBuf[nBuf-1];
+ if( fts5PorterIsVowel(c, 0)==0
+ && c!='l' && c!='s' && c!='z' && c==aBuf[nBuf-2]
+ ){
+ nBuf--;
+ }else if( fts5Porter_MEq1(aBuf, nBuf) && fts5Porter_Ostar(aBuf, nBuf) ){
+ aBuf[nBuf++] = 'e';
+ }
+ }
+ }
+
+ /* Step 1C. */
+ if( aBuf[nBuf-1]=='y' && fts5Porter_Vowel(aBuf, nBuf-1) ){
+ aBuf[nBuf-1] = 'i';
+ }
+
+ /* Steps 2 through 4. */
+ fts5PorterStep2(aBuf, &nBuf);
+ fts5PorterStep3(aBuf, &nBuf);
+ fts5PorterStep4(aBuf, &nBuf);
+
+ /* Step 5a. */
+ assert( nBuf>0 );
+ if( aBuf[nBuf-1]=='e' ){
+ if( fts5Porter_MGt1(aBuf, nBuf-1)
+ || (fts5Porter_MEq1(aBuf, nBuf-1) && !fts5Porter_Ostar(aBuf, nBuf-1))
+ ){
+ nBuf--;
+ }
+ }
+
+ /* Step 5b. */
+ if( nBuf>1 && aBuf[nBuf-1]=='l'
+ && aBuf[nBuf-2]=='l' && fts5Porter_MGt1(aBuf, nBuf-1)
+ ){
+ nBuf--;
+ }
+
+ return p->xToken(p->pCtx, tflags, aBuf, nBuf, iStart, iEnd);
+
+ pass_through:
+ return p->xToken(p->pCtx, tflags, pToken, nToken, iStart, iEnd);
+}
+
+/*
+** Tokenize using the porter tokenizer.
+*/
+static int fts5PorterTokenize(
+ Fts5Tokenizer *pTokenizer,
+ void *pCtx,
+ int flags,
+ const char *pText, int nText,
+ int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
+){
+ PorterTokenizer *p = (PorterTokenizer*)pTokenizer;
+ PorterContext sCtx;
+ sCtx.xToken = xToken;
+ sCtx.pCtx = pCtx;
+ sCtx.aBuf = p->aBuf;
+ return p->tokenizer.xTokenize(
+ p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb
+ );
+}
+
+/*
+** Register all built-in tokenizers with FTS5.
+*/
+static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
+ struct BuiltinTokenizer {
+ const char *zName;
+ fts5_tokenizer x;
+ } aBuiltin[] = {
+ { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}},
+ { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }},
+ { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }},
+ };
+
+ int rc = SQLITE_OK; /* Return code */
+ int i; /* To iterate through builtin functions */
+
+ for(i=0; rc==SQLITE_OK && i<ArraySize(aBuiltin); i++){
+ rc = pApi->xCreateTokenizer(pApi,
+ aBuiltin[i].zName,
+ (void*)pApi,
+ &aBuiltin[i].x,
+ 0
+ );
+ }
+
+ return rc;
+}
+
+/*
+** 2012-05-25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+/*
+** DO NOT EDIT THIS MACHINE GENERATED FILE.
+*/
+
+
+/* #include <assert.h> */
+
+
+
+/*
+** If the argument is a codepoint corresponding to a lowercase letter
+** in the ASCII range with a diacritic added, return the codepoint
+** of the ASCII letter only. For example, if passed 235 - "LATIN
+** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER
+** E"). The resuls of passing a codepoint that corresponds to an
+** uppercase letter are undefined.
+*/
+static int fts5_remove_diacritic(int c, int bComplex){
+ unsigned short aDia[] = {
+ 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995,
+ 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286,
+ 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732,
+ 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336,
+ 3456, 3696, 3712, 3728, 3744, 3766, 3832, 3896,
+ 3912, 3928, 3944, 3968, 4008, 4040, 4056, 4106,
+ 4138, 4170, 4202, 4234, 4266, 4296, 4312, 4344,
+ 4408, 4424, 4442, 4472, 4488, 4504, 6148, 6198,
+ 6264, 6280, 6360, 6429, 6505, 6529, 61448, 61468,
+ 61512, 61534, 61592, 61610, 61642, 61672, 61688, 61704,
+ 61726, 61784, 61800, 61816, 61836, 61880, 61896, 61914,
+ 61948, 61998, 62062, 62122, 62154, 62184, 62200, 62218,
+ 62252, 62302, 62364, 62410, 62442, 62478, 62536, 62554,
+ 62584, 62604, 62640, 62648, 62656, 62664, 62730, 62766,
+ 62830, 62890, 62924, 62974, 63032, 63050, 63082, 63118,
+ 63182, 63242, 63274, 63310, 63368, 63390,
+ };
+#define HIBIT ((unsigned char)0x80)
+ unsigned char aChar[] = {
+ '\0', 'a', 'c', 'e', 'i', 'n',
+ 'o', 'u', 'y', 'y', 'a', 'c',
+ 'd', 'e', 'e', 'g', 'h', 'i',
+ 'j', 'k', 'l', 'n', 'o', 'r',
+ 's', 't', 'u', 'u', 'w', 'y',
+ 'z', 'o', 'u', 'a', 'i', 'o',
+ 'u', 'u'|HIBIT, 'a'|HIBIT, 'g', 'k', 'o',
+ 'o'|HIBIT, 'j', 'g', 'n', 'a'|HIBIT, 'a',
+ 'e', 'i', 'o', 'r', 'u', 's',
+ 't', 'h', 'a', 'e', 'o'|HIBIT, 'o',
+ 'o'|HIBIT, 'y', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', 'a', 'b',
+ 'c'|HIBIT, 'd', 'd', 'e'|HIBIT, 'e', 'e'|HIBIT,
+ 'f', 'g', 'h', 'h', 'i', 'i'|HIBIT,
+ 'k', 'l', 'l'|HIBIT, 'l', 'm', 'n',
+ 'o'|HIBIT, 'p', 'r', 'r'|HIBIT, 'r', 's',
+ 's'|HIBIT, 't', 'u', 'u'|HIBIT, 'v', 'w',
+ 'w', 'x', 'y', 'z', 'h', 't',
+ 'w', 'y', 'a', 'a'|HIBIT, 'a'|HIBIT, 'a'|HIBIT,
+ 'e', 'e'|HIBIT, 'e'|HIBIT, 'i', 'o', 'o'|HIBIT,
+ 'o'|HIBIT, 'o'|HIBIT, 'u', 'u'|HIBIT, 'u'|HIBIT, 'y',
+ };
+
+ unsigned int key = (((unsigned int)c)<<3) | 0x00000007;
+ int iRes = 0;
+ int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1;
+ int iLo = 0;
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( key >= aDia[iTest] ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ assert( key>=aDia[iRes] );
+ if( bComplex==0 && (aChar[iRes] & 0x80) ) return c;
+ return (c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : ((int)aChar[iRes] & 0x7F);
+}
+
+
+/*
+** Return true if the argument interpreted as a unicode codepoint
+** is a diacritical modifier character.
+*/
+static int sqlite3Fts5UnicodeIsdiacritic(int c){
+ unsigned int mask0 = 0x08029FDF;
+ unsigned int mask1 = 0x000361F8;
+ if( c<768 || c>817 ) return 0;
+ return (c < 768+32) ?
+ (mask0 & ((unsigned int)1 << (c-768))) :
+ (mask1 & ((unsigned int)1 << (c-768-32)));
+}
+
+
+/*
+** Interpret the argument as a unicode codepoint. If the codepoint
+** is an upper case character that has a lower case equivalent,
+** return the codepoint corresponding to the lower case version.
+** Otherwise, return a copy of the argument.
+**
+** The results are undefined if the value passed to this function
+** is less than zero.
+*/
+static int sqlite3Fts5UnicodeFold(int c, int eRemoveDiacritic){
+ /* Each entry in the following array defines a rule for folding a range
+ ** of codepoints to lower case. The rule applies to a range of nRange
+ ** codepoints starting at codepoint iCode.
+ **
+ ** If the least significant bit in flags is clear, then the rule applies
+ ** to all nRange codepoints (i.e. all nRange codepoints are upper case and
+ ** need to be folded). Or, if it is set, then the rule only applies to
+ ** every second codepoint in the range, starting with codepoint C.
+ **
+ ** The 7 most significant bits in flags are an index into the aiOff[]
+ ** array. If a specific codepoint C does require folding, then its lower
+ ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF).
+ **
+ ** The contents of this array are generated by parsing the CaseFolding.txt
+ ** file distributed as part of the "Unicode Character Database". See
+ ** http://www.unicode.org for details.
+ */
+ static const struct TableEntry {
+ unsigned short iCode;
+ unsigned char flags;
+ unsigned char nRange;
+ } aEntry[] = {
+ {65, 14, 26}, {181, 64, 1}, {192, 14, 23},
+ {216, 14, 7}, {256, 1, 48}, {306, 1, 6},
+ {313, 1, 16}, {330, 1, 46}, {376, 116, 1},
+ {377, 1, 6}, {383, 104, 1}, {385, 50, 1},
+ {386, 1, 4}, {390, 44, 1}, {391, 0, 1},
+ {393, 42, 2}, {395, 0, 1}, {398, 32, 1},
+ {399, 38, 1}, {400, 40, 1}, {401, 0, 1},
+ {403, 42, 1}, {404, 46, 1}, {406, 52, 1},
+ {407, 48, 1}, {408, 0, 1}, {412, 52, 1},
+ {413, 54, 1}, {415, 56, 1}, {416, 1, 6},
+ {422, 60, 1}, {423, 0, 1}, {425, 60, 1},
+ {428, 0, 1}, {430, 60, 1}, {431, 0, 1},
+ {433, 58, 2}, {435, 1, 4}, {439, 62, 1},
+ {440, 0, 1}, {444, 0, 1}, {452, 2, 1},
+ {453, 0, 1}, {455, 2, 1}, {456, 0, 1},
+ {458, 2, 1}, {459, 1, 18}, {478, 1, 18},
+ {497, 2, 1}, {498, 1, 4}, {502, 122, 1},
+ {503, 134, 1}, {504, 1, 40}, {544, 110, 1},
+ {546, 1, 18}, {570, 70, 1}, {571, 0, 1},
+ {573, 108, 1}, {574, 68, 1}, {577, 0, 1},
+ {579, 106, 1}, {580, 28, 1}, {581, 30, 1},
+ {582, 1, 10}, {837, 36, 1}, {880, 1, 4},
+ {886, 0, 1}, {902, 18, 1}, {904, 16, 3},
+ {908, 26, 1}, {910, 24, 2}, {913, 14, 17},
+ {931, 14, 9}, {962, 0, 1}, {975, 4, 1},
+ {976, 140, 1}, {977, 142, 1}, {981, 146, 1},
+ {982, 144, 1}, {984, 1, 24}, {1008, 136, 1},
+ {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1},
+ {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1},
+ {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32},
+ {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1},
+ {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38},
+ {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1},
+ {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1},
+ {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6},
+ {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6},
+ {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8},
+ {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2},
+ {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1},
+ {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2},
+ {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2},
+ {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2},
+ {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1},
+ {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16},
+ {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47},
+ {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1},
+ {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1},
+ {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1},
+ {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2},
+ {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1},
+ {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14},
+ {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1},
+ {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1},
+ {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1},
+ {65313, 14, 26},
+ };
+ static const unsigned short aiOff[] = {
+ 1, 2, 8, 15, 16, 26, 28, 32,
+ 37, 38, 40, 48, 63, 64, 69, 71,
+ 79, 80, 116, 202, 203, 205, 206, 207,
+ 209, 210, 211, 213, 214, 217, 218, 219,
+ 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721,
+ 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274,
+ 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406,
+ 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462,
+ 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511,
+ 65514, 65521, 65527, 65528, 65529,
+ };
+
+ int ret = c;
+
+ assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 );
+
+ if( c<128 ){
+ if( c>='A' && c<='Z' ) ret = c + ('a' - 'A');
+ }else if( c<65536 ){
+ const struct TableEntry *p;
+ int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
+ int iLo = 0;
+ int iRes = -1;
+
+ assert( c>aEntry[0].iCode );
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ int cmp = (c - aEntry[iTest].iCode);
+ if( cmp>=0 ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+
+ assert( iRes>=0 && c>=aEntry[iRes].iCode );
+ p = &aEntry[iRes];
+ if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){
+ ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF;
+ assert( ret>0 );
+ }
+
+ if( eRemoveDiacritic ){
+ ret = fts5_remove_diacritic(ret, eRemoveDiacritic==2);
+ }
+ }
+
+ else if( c>=66560 && c<66600 ){
+ ret = c + 40;
+ }
+
+ return ret;
+}
+
+
+static int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){
+ aArray[0] = 1;
+ switch( zCat[0] ){
+ case 'C':
+ switch( zCat[1] ){
+ case 'c': aArray[1] = 1; break;
+ case 'f': aArray[2] = 1; break;
+ case 'n': aArray[3] = 1; break;
+ case 's': aArray[4] = 1; break;
+ case 'o': aArray[31] = 1; break;
+ case '*':
+ aArray[1] = 1;
+ aArray[2] = 1;
+ aArray[3] = 1;
+ aArray[4] = 1;
+ aArray[31] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'L':
+ switch( zCat[1] ){
+ case 'l': aArray[5] = 1; break;
+ case 'm': aArray[6] = 1; break;
+ case 'o': aArray[7] = 1; break;
+ case 't': aArray[8] = 1; break;
+ case 'u': aArray[9] = 1; break;
+ case 'C': aArray[30] = 1; break;
+ case '*':
+ aArray[5] = 1;
+ aArray[6] = 1;
+ aArray[7] = 1;
+ aArray[8] = 1;
+ aArray[9] = 1;
+ aArray[30] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'M':
+ switch( zCat[1] ){
+ case 'c': aArray[10] = 1; break;
+ case 'e': aArray[11] = 1; break;
+ case 'n': aArray[12] = 1; break;
+ case '*':
+ aArray[10] = 1;
+ aArray[11] = 1;
+ aArray[12] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'N':
+ switch( zCat[1] ){
+ case 'd': aArray[13] = 1; break;
+ case 'l': aArray[14] = 1; break;
+ case 'o': aArray[15] = 1; break;
+ case '*':
+ aArray[13] = 1;
+ aArray[14] = 1;
+ aArray[15] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'P':
+ switch( zCat[1] ){
+ case 'c': aArray[16] = 1; break;
+ case 'd': aArray[17] = 1; break;
+ case 'e': aArray[18] = 1; break;
+ case 'f': aArray[19] = 1; break;
+ case 'i': aArray[20] = 1; break;
+ case 'o': aArray[21] = 1; break;
+ case 's': aArray[22] = 1; break;
+ case '*':
+ aArray[16] = 1;
+ aArray[17] = 1;
+ aArray[18] = 1;
+ aArray[19] = 1;
+ aArray[20] = 1;
+ aArray[21] = 1;
+ aArray[22] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'S':
+ switch( zCat[1] ){
+ case 'c': aArray[23] = 1; break;
+ case 'k': aArray[24] = 1; break;
+ case 'm': aArray[25] = 1; break;
+ case 'o': aArray[26] = 1; break;
+ case '*':
+ aArray[23] = 1;
+ aArray[24] = 1;
+ aArray[25] = 1;
+ aArray[26] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ case 'Z':
+ switch( zCat[1] ){
+ case 'l': aArray[27] = 1; break;
+ case 'p': aArray[28] = 1; break;
+ case 's': aArray[29] = 1; break;
+ case '*':
+ aArray[27] = 1;
+ aArray[28] = 1;
+ aArray[29] = 1;
+ break;
+ default: return 1; }
+ break;
+
+ }
+ return 0;
+}
+
+static u16 aFts5UnicodeBlock[] = {
+ 0, 1471, 1753, 1760, 1760, 1760, 1760, 1760, 1760, 1760,
+ 1760, 1760, 1760, 1760, 1760, 1763, 1765,
+ };
+static u16 aFts5UnicodeMap[] = {
+ 0, 32, 33, 36, 37, 40, 41, 42, 43, 44,
+ 45, 46, 48, 58, 60, 63, 65, 91, 92, 93,
+ 94, 95, 96, 97, 123, 124, 125, 126, 127, 160,
+ 161, 162, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 180, 181, 182, 184, 185,
+ 186, 187, 188, 191, 192, 215, 216, 223, 247, 248,
+ 256, 312, 313, 329, 330, 377, 383, 385, 387, 388,
+ 391, 394, 396, 398, 402, 403, 405, 406, 409, 412,
+ 414, 415, 417, 418, 423, 427, 428, 431, 434, 436,
+ 437, 440, 442, 443, 444, 446, 448, 452, 453, 454,
+ 455, 456, 457, 458, 459, 460, 461, 477, 478, 496,
+ 497, 498, 499, 500, 503, 505, 506, 564, 570, 572,
+ 573, 575, 577, 580, 583, 584, 592, 660, 661, 688,
+ 706, 710, 722, 736, 741, 748, 749, 750, 751, 768,
+ 880, 884, 885, 886, 890, 891, 894, 900, 902, 903,
+ 904, 908, 910, 912, 913, 931, 940, 975, 977, 978,
+ 981, 984, 1008, 1012, 1014, 1015, 1018, 1020, 1021, 1072,
+ 1120, 1154, 1155, 1160, 1162, 1217, 1231, 1232, 1329, 1369,
+ 1370, 1377, 1417, 1418, 1423, 1425, 1470, 1471, 1472, 1473,
+ 1475, 1476, 1478, 1479, 1488, 1520, 1523, 1536, 1542, 1545,
+ 1547, 1548, 1550, 1552, 1563, 1566, 1568, 1600, 1601, 1611,
+ 1632, 1642, 1646, 1648, 1649, 1748, 1749, 1750, 1757, 1758,
+ 1759, 1765, 1767, 1769, 1770, 1774, 1776, 1786, 1789, 1791,
+ 1792, 1807, 1808, 1809, 1810, 1840, 1869, 1958, 1969, 1984,
+ 1994, 2027, 2036, 2038, 2039, 2042, 2048, 2070, 2074, 2075,
+ 2084, 2085, 2088, 2089, 2096, 2112, 2137, 2142, 2208, 2210,
+ 2276, 2304, 2307, 2308, 2362, 2363, 2364, 2365, 2366, 2369,
+ 2377, 2381, 2382, 2384, 2385, 2392, 2402, 2404, 2406, 2416,
+ 2417, 2418, 2425, 2433, 2434, 2437, 2447, 2451, 2474, 2482,
+ 2486, 2492, 2493, 2494, 2497, 2503, 2507, 2509, 2510, 2519,
+ 2524, 2527, 2530, 2534, 2544, 2546, 2548, 2554, 2555, 2561,
+ 2563, 2565, 2575, 2579, 2602, 2610, 2613, 2616, 2620, 2622,
+ 2625, 2631, 2635, 2641, 2649, 2654, 2662, 2672, 2674, 2677,
+ 2689, 2691, 2693, 2703, 2707, 2730, 2738, 2741, 2748, 2749,
+ 2750, 2753, 2759, 2761, 2763, 2765, 2768, 2784, 2786, 2790,
+ 2800, 2801, 2817, 2818, 2821, 2831, 2835, 2858, 2866, 2869,
+ 2876, 2877, 2878, 2879, 2880, 2881, 2887, 2891, 2893, 2902,
+ 2903, 2908, 2911, 2914, 2918, 2928, 2929, 2930, 2946, 2947,
+ 2949, 2958, 2962, 2969, 2972, 2974, 2979, 2984, 2990, 3006,
+ 3008, 3009, 3014, 3018, 3021, 3024, 3031, 3046, 3056, 3059,
+ 3065, 3066, 3073, 3077, 3086, 3090, 3114, 3125, 3133, 3134,
+ 3137, 3142, 3146, 3157, 3160, 3168, 3170, 3174, 3192, 3199,
+ 3202, 3205, 3214, 3218, 3242, 3253, 3260, 3261, 3262, 3263,
+ 3264, 3270, 3271, 3274, 3276, 3285, 3294, 3296, 3298, 3302,
+ 3313, 3330, 3333, 3342, 3346, 3389, 3390, 3393, 3398, 3402,
+ 3405, 3406, 3415, 3424, 3426, 3430, 3440, 3449, 3450, 3458,
+ 3461, 3482, 3507, 3517, 3520, 3530, 3535, 3538, 3542, 3544,
+ 3570, 3572, 3585, 3633, 3634, 3636, 3647, 3648, 3654, 3655,
+ 3663, 3664, 3674, 3713, 3716, 3719, 3722, 3725, 3732, 3737,
+ 3745, 3749, 3751, 3754, 3757, 3761, 3762, 3764, 3771, 3773,
+ 3776, 3782, 3784, 3792, 3804, 3840, 3841, 3844, 3859, 3860,
+ 3861, 3864, 3866, 3872, 3882, 3892, 3893, 3894, 3895, 3896,
+ 3897, 3898, 3899, 3900, 3901, 3902, 3904, 3913, 3953, 3967,
+ 3968, 3973, 3974, 3976, 3981, 3993, 4030, 4038, 4039, 4046,
+ 4048, 4053, 4057, 4096, 4139, 4141, 4145, 4146, 4152, 4153,
+ 4155, 4157, 4159, 4160, 4170, 4176, 4182, 4184, 4186, 4190,
+ 4193, 4194, 4197, 4199, 4206, 4209, 4213, 4226, 4227, 4229,
+ 4231, 4237, 4238, 4239, 4240, 4250, 4253, 4254, 4256, 4295,
+ 4301, 4304, 4347, 4348, 4349, 4682, 4688, 4696, 4698, 4704,
+ 4746, 4752, 4786, 4792, 4800, 4802, 4808, 4824, 4882, 4888,
+ 4957, 4960, 4969, 4992, 5008, 5024, 5120, 5121, 5741, 5743,
+ 5760, 5761, 5787, 5788, 5792, 5867, 5870, 5888, 5902, 5906,
+ 5920, 5938, 5941, 5952, 5970, 5984, 5998, 6002, 6016, 6068,
+ 6070, 6071, 6078, 6086, 6087, 6089, 6100, 6103, 6104, 6107,
+ 6108, 6109, 6112, 6128, 6144, 6150, 6151, 6155, 6158, 6160,
+ 6176, 6211, 6212, 6272, 6313, 6314, 6320, 6400, 6432, 6435,
+ 6439, 6441, 6448, 6450, 6451, 6457, 6464, 6468, 6470, 6480,
+ 6512, 6528, 6576, 6593, 6600, 6608, 6618, 6622, 6656, 6679,
+ 6681, 6686, 6688, 6741, 6742, 6743, 6744, 6752, 6753, 6754,
+ 6755, 6757, 6765, 6771, 6783, 6784, 6800, 6816, 6823, 6824,
+ 6912, 6916, 6917, 6964, 6965, 6966, 6971, 6972, 6973, 6978,
+ 6979, 6981, 6992, 7002, 7009, 7019, 7028, 7040, 7042, 7043,
+ 7073, 7074, 7078, 7080, 7082, 7083, 7084, 7086, 7088, 7098,
+ 7142, 7143, 7144, 7146, 7149, 7150, 7151, 7154, 7164, 7168,
+ 7204, 7212, 7220, 7222, 7227, 7232, 7245, 7248, 7258, 7288,
+ 7294, 7360, 7376, 7379, 7380, 7393, 7394, 7401, 7405, 7406,
+ 7410, 7412, 7413, 7424, 7468, 7531, 7544, 7545, 7579, 7616,
+ 7676, 7680, 7830, 7838, 7936, 7944, 7952, 7960, 7968, 7976,
+ 7984, 7992, 8000, 8008, 8016, 8025, 8027, 8029, 8031, 8033,
+ 8040, 8048, 8064, 8072, 8080, 8088, 8096, 8104, 8112, 8118,
+ 8120, 8124, 8125, 8126, 8127, 8130, 8134, 8136, 8140, 8141,
+ 8144, 8150, 8152, 8157, 8160, 8168, 8173, 8178, 8182, 8184,
+ 8188, 8189, 8192, 8203, 8208, 8214, 8216, 8217, 8218, 8219,
+ 8221, 8222, 8223, 8224, 8232, 8233, 8234, 8239, 8240, 8249,
+ 8250, 8251, 8255, 8257, 8260, 8261, 8262, 8263, 8274, 8275,
+ 8276, 8277, 8287, 8288, 8298, 8304, 8305, 8308, 8314, 8317,
+ 8318, 8319, 8320, 8330, 8333, 8334, 8336, 8352, 8400, 8413,
+ 8417, 8418, 8421, 8448, 8450, 8451, 8455, 8456, 8458, 8459,
+ 8462, 8464, 8467, 8468, 8469, 8470, 8472, 8473, 8478, 8484,
+ 8485, 8486, 8487, 8488, 8489, 8490, 8494, 8495, 8496, 8500,
+ 8501, 8505, 8506, 8508, 8510, 8512, 8517, 8519, 8522, 8523,
+ 8524, 8526, 8527, 8528, 8544, 8579, 8581, 8585, 8592, 8597,
+ 8602, 8604, 8608, 8609, 8611, 8612, 8614, 8615, 8622, 8623,
+ 8654, 8656, 8658, 8659, 8660, 8661, 8692, 8960, 8968, 8972,
+ 8992, 8994, 9001, 9002, 9003, 9084, 9085, 9115, 9140, 9180,
+ 9186, 9216, 9280, 9312, 9372, 9450, 9472, 9655, 9656, 9665,
+ 9666, 9720, 9728, 9839, 9840, 9985, 10088, 10089, 10090, 10091,
+ 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100, 10101,
+ 10102, 10132, 10176, 10181, 10182, 10183, 10214, 10215, 10216, 10217,
+ 10218, 10219, 10220, 10221, 10222, 10223, 10224, 10240, 10496, 10627,
+ 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637,
+ 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647,
+ 10648, 10649, 10712, 10713, 10714, 10715, 10716, 10748, 10749, 10750,
+ 11008, 11056, 11077, 11079, 11088, 11264, 11312, 11360, 11363, 11365,
+ 11367, 11374, 11377, 11378, 11380, 11381, 11383, 11388, 11390, 11393,
+ 11394, 11492, 11493, 11499, 11503, 11506, 11513, 11517, 11518, 11520,
+ 11559, 11565, 11568, 11631, 11632, 11647, 11648, 11680, 11688, 11696,
+ 11704, 11712, 11720, 11728, 11736, 11744, 11776, 11778, 11779, 11780,
+ 11781, 11782, 11785, 11786, 11787, 11788, 11789, 11790, 11799, 11800,
+ 11802, 11803, 11804, 11805, 11806, 11808, 11809, 11810, 11811, 11812,
+ 11813, 11814, 11815, 11816, 11817, 11818, 11823, 11824, 11834, 11904,
+ 11931, 12032, 12272, 12288, 12289, 12292, 12293, 12294, 12295, 12296,
+ 12297, 12298, 12299, 12300, 12301, 12302, 12303, 12304, 12305, 12306,
+ 12308, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317,
+ 12318, 12320, 12321, 12330, 12334, 12336, 12337, 12342, 12344, 12347,
+ 12348, 12349, 12350, 12353, 12441, 12443, 12445, 12447, 12448, 12449,
+ 12539, 12540, 12543, 12549, 12593, 12688, 12690, 12694, 12704, 12736,
+ 12784, 12800, 12832, 12842, 12872, 12880, 12881, 12896, 12928, 12938,
+ 12977, 12992, 13056, 13312, 19893, 19904, 19968, 40908, 40960, 40981,
+ 40982, 42128, 42192, 42232, 42238, 42240, 42508, 42509, 42512, 42528,
+ 42538, 42560, 42606, 42607, 42608, 42611, 42612, 42622, 42623, 42624,
+ 42655, 42656, 42726, 42736, 42738, 42752, 42775, 42784, 42786, 42800,
+ 42802, 42864, 42865, 42873, 42878, 42888, 42889, 42891, 42896, 42912,
+ 43000, 43002, 43003, 43010, 43011, 43014, 43015, 43019, 43020, 43043,
+ 43045, 43047, 43048, 43056, 43062, 43064, 43065, 43072, 43124, 43136,
+ 43138, 43188, 43204, 43214, 43216, 43232, 43250, 43256, 43259, 43264,
+ 43274, 43302, 43310, 43312, 43335, 43346, 43359, 43360, 43392, 43395,
+ 43396, 43443, 43444, 43446, 43450, 43452, 43453, 43457, 43471, 43472,
+ 43486, 43520, 43561, 43567, 43569, 43571, 43573, 43584, 43587, 43588,
+ 43596, 43597, 43600, 43612, 43616, 43632, 43633, 43639, 43642, 43643,
+ 43648, 43696, 43697, 43698, 43701, 43703, 43705, 43710, 43712, 43713,
+ 43714, 43739, 43741, 43742, 43744, 43755, 43756, 43758, 43760, 43762,
+ 43763, 43765, 43766, 43777, 43785, 43793, 43808, 43816, 43968, 44003,
+ 44005, 44006, 44008, 44009, 44011, 44012, 44013, 44016, 44032, 55203,
+ 55216, 55243, 55296, 56191, 56319, 57343, 57344, 63743, 63744, 64112,
+ 64256, 64275, 64285, 64286, 64287, 64297, 64298, 64312, 64318, 64320,
+ 64323, 64326, 64434, 64467, 64830, 64831, 64848, 64914, 65008, 65020,
+ 65021, 65024, 65040, 65047, 65048, 65049, 65056, 65072, 65073, 65075,
+ 65077, 65078, 65079, 65080, 65081, 65082, 65083, 65084, 65085, 65086,
+ 65087, 65088, 65089, 65090, 65091, 65092, 65093, 65095, 65096, 65097,
+ 65101, 65104, 65108, 65112, 65113, 65114, 65115, 65116, 65117, 65118,
+ 65119, 65122, 65123, 65124, 65128, 65129, 65130, 65136, 65142, 65279,
+ 65281, 65284, 65285, 65288, 65289, 65290, 65291, 65292, 65293, 65294,
+ 65296, 65306, 65308, 65311, 65313, 65339, 65340, 65341, 65342, 65343,
+ 65344, 65345, 65371, 65372, 65373, 65374, 65375, 65376, 65377, 65378,
+ 65379, 65380, 65382, 65392, 65393, 65438, 65440, 65474, 65482, 65490,
+ 65498, 65504, 65506, 65507, 65508, 65509, 65512, 65513, 65517, 65529,
+ 65532, 0, 13, 40, 60, 63, 80, 128, 256, 263,
+ 311, 320, 373, 377, 394, 400, 464, 509, 640, 672,
+ 768, 800, 816, 833, 834, 842, 896, 927, 928, 968,
+ 976, 977, 1024, 1064, 1104, 1184, 2048, 2056, 2058, 2103,
+ 2108, 2111, 2135, 2136, 2304, 2326, 2335, 2336, 2367, 2432,
+ 2494, 2560, 2561, 2565, 2572, 2576, 2581, 2585, 2616, 2623,
+ 2624, 2640, 2656, 2685, 2687, 2816, 2873, 2880, 2904, 2912,
+ 2936, 3072, 3680, 4096, 4097, 4098, 4099, 4152, 4167, 4178,
+ 4198, 4224, 4226, 4227, 4272, 4275, 4279, 4281, 4283, 4285,
+ 4286, 4304, 4336, 4352, 4355, 4391, 4396, 4397, 4406, 4416,
+ 4480, 4482, 4483, 4531, 4534, 4543, 4545, 4549, 4560, 5760,
+ 5803, 5804, 5805, 5806, 5808, 5814, 5815, 5824, 8192, 9216,
+ 9328, 12288, 26624, 28416, 28496, 28497, 28559, 28563, 45056, 53248,
+ 53504, 53545, 53605, 53607, 53610, 53613, 53619, 53627, 53635, 53637,
+ 53644, 53674, 53678, 53760, 53826, 53829, 54016, 54112, 54272, 54298,
+ 54324, 54350, 54358, 54376, 54402, 54428, 54430, 54434, 54437, 54441,
+ 54446, 54454, 54459, 54461, 54469, 54480, 54506, 54532, 54535, 54541,
+ 54550, 54558, 54584, 54587, 54592, 54598, 54602, 54610, 54636, 54662,
+ 54688, 54714, 54740, 54766, 54792, 54818, 54844, 54870, 54896, 54922,
+ 54952, 54977, 54978, 55003, 55004, 55010, 55035, 55036, 55061, 55062,
+ 55068, 55093, 55094, 55119, 55120, 55126, 55151, 55152, 55177, 55178,
+ 55184, 55209, 55210, 55235, 55236, 55242, 55246, 60928, 60933, 60961,
+ 60964, 60967, 60969, 60980, 60985, 60987, 60994, 60999, 61001, 61003,
+ 61005, 61009, 61012, 61015, 61017, 61019, 61021, 61023, 61025, 61028,
+ 61031, 61036, 61044, 61049, 61054, 61056, 61067, 61089, 61093, 61099,
+ 61168, 61440, 61488, 61600, 61617, 61633, 61649, 61696, 61712, 61744,
+ 61808, 61926, 61968, 62016, 62032, 62208, 62256, 62263, 62336, 62368,
+ 62406, 62432, 62464, 62528, 62530, 62713, 62720, 62784, 62800, 62971,
+ 63045, 63104, 63232, 0, 42710, 42752, 46900, 46912, 47133, 63488,
+ 1, 32, 256, 0, 65533,
+ };
+static u16 aFts5UnicodeData[] = {
+ 1025, 61, 117, 55, 117, 54, 50, 53, 57, 53,
+ 49, 85, 333, 85, 121, 85, 841, 54, 53, 50,
+ 56, 48, 56, 837, 54, 57, 50, 57, 1057, 61,
+ 53, 151, 58, 53, 56, 58, 39, 52, 57, 34,
+ 58, 56, 58, 57, 79, 56, 37, 85, 56, 47,
+ 39, 51, 111, 53, 745, 57, 233, 773, 57, 261,
+ 1822, 37, 542, 37, 1534, 222, 69, 73, 37, 126,
+ 126, 73, 69, 137, 37, 73, 37, 105, 101, 73,
+ 37, 73, 37, 190, 158, 37, 126, 126, 73, 37,
+ 126, 94, 37, 39, 94, 69, 135, 41, 40, 37,
+ 41, 40, 37, 41, 40, 37, 542, 37, 606, 37,
+ 41, 40, 37, 126, 73, 37, 1886, 197, 73, 37,
+ 73, 69, 126, 105, 37, 286, 2181, 39, 869, 582,
+ 152, 390, 472, 166, 248, 38, 56, 38, 568, 3596,
+ 158, 38, 56, 94, 38, 101, 53, 88, 41, 53,
+ 105, 41, 73, 37, 553, 297, 1125, 94, 37, 105,
+ 101, 798, 133, 94, 57, 126, 94, 37, 1641, 1541,
+ 1118, 58, 172, 75, 1790, 478, 37, 2846, 1225, 38,
+ 213, 1253, 53, 49, 55, 1452, 49, 44, 53, 76,
+ 53, 76, 53, 44, 871, 103, 85, 162, 121, 85,
+ 55, 85, 90, 364, 53, 85, 1031, 38, 327, 684,
+ 333, 149, 71, 44, 3175, 53, 39, 236, 34, 58,
+ 204, 70, 76, 58, 140, 71, 333, 103, 90, 39,
+ 469, 34, 39, 44, 967, 876, 2855, 364, 39, 333,
+ 1063, 300, 70, 58, 117, 38, 711, 140, 38, 300,
+ 38, 108, 38, 172, 501, 807, 108, 53, 39, 359,
+ 876, 108, 42, 1735, 44, 42, 44, 39, 106, 268,
+ 138, 44, 74, 39, 236, 327, 76, 85, 333, 53,
+ 38, 199, 231, 44, 74, 263, 71, 711, 231, 39,
+ 135, 44, 39, 106, 140, 74, 74, 44, 39, 42,
+ 71, 103, 76, 333, 71, 87, 207, 58, 55, 76,
+ 42, 199, 71, 711, 231, 71, 71, 71, 44, 106,
+ 76, 76, 108, 44, 135, 39, 333, 76, 103, 44,
+ 76, 42, 295, 103, 711, 231, 71, 167, 44, 39,
+ 106, 172, 76, 42, 74, 44, 39, 71, 76, 333,
+ 53, 55, 44, 74, 263, 71, 711, 231, 71, 167,
+ 44, 39, 42, 44, 42, 140, 74, 74, 44, 44,
+ 42, 71, 103, 76, 333, 58, 39, 207, 44, 39,
+ 199, 103, 135, 71, 39, 71, 71, 103, 391, 74,
+ 44, 74, 106, 106, 44, 39, 42, 333, 111, 218,
+ 55, 58, 106, 263, 103, 743, 327, 167, 39, 108,
+ 138, 108, 140, 76, 71, 71, 76, 333, 239, 58,
+ 74, 263, 103, 743, 327, 167, 44, 39, 42, 44,
+ 170, 44, 74, 74, 76, 74, 39, 71, 76, 333,
+ 71, 74, 263, 103, 1319, 39, 106, 140, 106, 106,
+ 44, 39, 42, 71, 76, 333, 207, 58, 199, 74,
+ 583, 775, 295, 39, 231, 44, 106, 108, 44, 266,
+ 74, 53, 1543, 44, 71, 236, 55, 199, 38, 268,
+ 53, 333, 85, 71, 39, 71, 39, 39, 135, 231,
+ 103, 39, 39, 71, 135, 44, 71, 204, 76, 39,
+ 167, 38, 204, 333, 135, 39, 122, 501, 58, 53,
+ 122, 76, 218, 333, 335, 58, 44, 58, 44, 58,
+ 44, 54, 50, 54, 50, 74, 263, 1159, 460, 42,
+ 172, 53, 76, 167, 364, 1164, 282, 44, 218, 90,
+ 181, 154, 85, 1383, 74, 140, 42, 204, 42, 76,
+ 74, 76, 39, 333, 213, 199, 74, 76, 135, 108,
+ 39, 106, 71, 234, 103, 140, 423, 44, 74, 76,
+ 202, 44, 39, 42, 333, 106, 44, 90, 1225, 41,
+ 41, 1383, 53, 38, 10631, 135, 231, 39, 135, 1319,
+ 135, 1063, 135, 231, 39, 135, 487, 1831, 135, 2151,
+ 108, 309, 655, 519, 346, 2727, 49, 19847, 85, 551,
+ 61, 839, 54, 50, 2407, 117, 110, 423, 135, 108,
+ 583, 108, 85, 583, 76, 423, 103, 76, 1671, 76,
+ 42, 236, 266, 44, 74, 364, 117, 38, 117, 55,
+ 39, 44, 333, 335, 213, 49, 149, 108, 61, 333,
+ 1127, 38, 1671, 1319, 44, 39, 2247, 935, 108, 138,
+ 76, 106, 74, 44, 202, 108, 58, 85, 333, 967,
+ 167, 1415, 554, 231, 74, 333, 47, 1114, 743, 76,
+ 106, 85, 1703, 42, 44, 42, 236, 44, 42, 44,
+ 74, 268, 202, 332, 44, 333, 333, 245, 38, 213,
+ 140, 42, 1511, 44, 42, 172, 42, 44, 170, 44,
+ 74, 231, 333, 245, 346, 300, 314, 76, 42, 967,
+ 42, 140, 74, 76, 42, 44, 74, 71, 333, 1415,
+ 44, 42, 76, 106, 44, 42, 108, 74, 149, 1159,
+ 266, 268, 74, 76, 181, 333, 103, 333, 967, 198,
+ 85, 277, 108, 53, 428, 42, 236, 135, 44, 135,
+ 74, 44, 71, 1413, 2022, 421, 38, 1093, 1190, 1260,
+ 140, 4830, 261, 3166, 261, 265, 197, 201, 261, 265,
+ 261, 265, 197, 201, 261, 41, 41, 41, 94, 229,
+ 265, 453, 261, 264, 261, 264, 261, 264, 165, 69,
+ 137, 40, 56, 37, 120, 101, 69, 137, 40, 120,
+ 133, 69, 137, 120, 261, 169, 120, 101, 69, 137,
+ 40, 88, 381, 162, 209, 85, 52, 51, 54, 84,
+ 51, 54, 52, 277, 59, 60, 162, 61, 309, 52,
+ 51, 149, 80, 117, 57, 54, 50, 373, 57, 53,
+ 48, 341, 61, 162, 194, 47, 38, 207, 121, 54,
+ 50, 38, 335, 121, 54, 50, 422, 855, 428, 139,
+ 44, 107, 396, 90, 41, 154, 41, 90, 37, 105,
+ 69, 105, 37, 58, 41, 90, 57, 169, 218, 41,
+ 58, 41, 58, 41, 58, 137, 58, 37, 137, 37,
+ 135, 37, 90, 69, 73, 185, 94, 101, 58, 57,
+ 90, 37, 58, 527, 1134, 94, 142, 47, 185, 186,
+ 89, 154, 57, 90, 57, 90, 57, 250, 57, 1018,
+ 89, 90, 57, 58, 57, 1018, 8601, 282, 153, 666,
+ 89, 250, 54, 50, 2618, 57, 986, 825, 1306, 217,
+ 602, 1274, 378, 1935, 2522, 719, 5882, 57, 314, 57,
+ 1754, 281, 3578, 57, 4634, 3322, 54, 50, 54, 50,
+ 54, 50, 54, 50, 54, 50, 54, 50, 54, 50,
+ 975, 1434, 185, 54, 50, 1017, 54, 50, 54, 50,
+ 54, 50, 54, 50, 54, 50, 537, 8218, 4217, 54,
+ 50, 54, 50, 54, 50, 54, 50, 54, 50, 54,
+ 50, 54, 50, 54, 50, 54, 50, 54, 50, 54,
+ 50, 2041, 54, 50, 54, 50, 1049, 54, 50, 8281,
+ 1562, 697, 90, 217, 346, 1513, 1509, 126, 73, 69,
+ 254, 105, 37, 94, 37, 94, 165, 70, 105, 37,
+ 3166, 37, 218, 158, 108, 94, 149, 47, 85, 1221,
+ 37, 37, 1799, 38, 53, 44, 743, 231, 231, 231,
+ 231, 231, 231, 231, 231, 1036, 85, 52, 51, 52,
+ 51, 117, 52, 51, 53, 52, 51, 309, 49, 85,
+ 49, 53, 52, 51, 85, 52, 51, 54, 50, 54,
+ 50, 54, 50, 54, 50, 181, 38, 341, 81, 858,
+ 2874, 6874, 410, 61, 117, 58, 38, 39, 46, 54,
+ 50, 54, 50, 54, 50, 54, 50, 54, 50, 90,
+ 54, 50, 54, 50, 54, 50, 54, 50, 49, 54,
+ 82, 58, 302, 140, 74, 49, 166, 90, 110, 38,
+ 39, 53, 90, 2759, 76, 88, 70, 39, 49, 2887,
+ 53, 102, 39, 1319, 3015, 90, 143, 346, 871, 1178,
+ 519, 1018, 335, 986, 271, 58, 495, 1050, 335, 1274,
+ 495, 2042, 8218, 39, 39, 2074, 39, 39, 679, 38,
+ 36583, 1786, 1287, 198, 85, 8583, 38, 117, 519, 333,
+ 71, 1502, 39, 44, 107, 53, 332, 53, 38, 798,
+ 44, 2247, 334, 76, 213, 760, 294, 88, 478, 69,
+ 2014, 38, 261, 190, 350, 38, 88, 158, 158, 382,
+ 70, 37, 231, 44, 103, 44, 135, 44, 743, 74,
+ 76, 42, 154, 207, 90, 55, 58, 1671, 149, 74,
+ 1607, 522, 44, 85, 333, 588, 199, 117, 39, 333,
+ 903, 268, 85, 743, 364, 74, 53, 935, 108, 42,
+ 1511, 44, 74, 140, 74, 44, 138, 437, 38, 333,
+ 85, 1319, 204, 74, 76, 74, 76, 103, 44, 263,
+ 44, 42, 333, 149, 519, 38, 199, 122, 39, 42,
+ 1543, 44, 39, 108, 71, 76, 167, 76, 39, 44,
+ 39, 71, 38, 85, 359, 42, 76, 74, 85, 39,
+ 70, 42, 44, 199, 199, 199, 231, 231, 1127, 74,
+ 44, 74, 44, 74, 53, 42, 44, 333, 39, 39,
+ 743, 1575, 36, 68, 68, 36, 63, 63, 11719, 3399,
+ 229, 165, 39, 44, 327, 57, 423, 167, 39, 71,
+ 71, 3463, 536, 11623, 54, 50, 2055, 1735, 391, 55,
+ 58, 524, 245, 54, 50, 53, 236, 53, 81, 80,
+ 54, 50, 54, 50, 54, 50, 54, 50, 54, 50,
+ 54, 50, 54, 50, 54, 50, 85, 54, 50, 149,
+ 112, 117, 149, 49, 54, 50, 54, 50, 54, 50,
+ 117, 57, 49, 121, 53, 55, 85, 167, 4327, 34,
+ 117, 55, 117, 54, 50, 53, 57, 53, 49, 85,
+ 333, 85, 121, 85, 841, 54, 53, 50, 56, 48,
+ 56, 837, 54, 57, 50, 57, 54, 50, 53, 54,
+ 50, 85, 327, 38, 1447, 70, 999, 199, 199, 199,
+ 103, 87, 57, 56, 58, 87, 58, 153, 90, 98,
+ 90, 391, 839, 615, 71, 487, 455, 3943, 117, 1455,
+ 314, 1710, 143, 570, 47, 410, 1466, 44, 935, 1575,
+ 999, 143, 551, 46, 263, 46, 967, 53, 1159, 263,
+ 53, 174, 1289, 1285, 2503, 333, 199, 39, 1415, 71,
+ 39, 743, 53, 271, 711, 207, 53, 839, 53, 1799,
+ 71, 39, 108, 76, 140, 135, 103, 871, 108, 44,
+ 271, 309, 935, 79, 53, 1735, 245, 711, 271, 615,
+ 271, 2343, 1007, 42, 44, 42, 1703, 492, 245, 655,
+ 333, 76, 42, 1447, 106, 140, 74, 76, 85, 34,
+ 149, 807, 333, 108, 1159, 172, 42, 268, 333, 149,
+ 76, 42, 1543, 106, 300, 74, 135, 149, 333, 1383,
+ 44, 42, 44, 74, 204, 42, 44, 333, 28135, 3182,
+ 149, 34279, 18215, 2215, 39, 1482, 140, 422, 71, 7898,
+ 1274, 1946, 74, 108, 122, 202, 258, 268, 90, 236,
+ 986, 140, 1562, 2138, 108, 58, 2810, 591, 841, 837,
+ 841, 229, 581, 841, 837, 41, 73, 41, 73, 137,
+ 265, 133, 37, 229, 357, 841, 837, 73, 137, 265,
+ 233, 837, 73, 137, 169, 41, 233, 837, 841, 837,
+ 841, 837, 841, 837, 841, 837, 841, 837, 841, 901,
+ 809, 57, 805, 57, 197, 809, 57, 805, 57, 197,
+ 809, 57, 805, 57, 197, 809, 57, 805, 57, 197,
+ 809, 57, 805, 57, 197, 94, 1613, 135, 871, 71,
+ 39, 39, 327, 135, 39, 39, 39, 39, 39, 39,
+ 103, 71, 39, 39, 39, 39, 39, 39, 71, 39,
+ 135, 231, 135, 135, 39, 327, 551, 103, 167, 551,
+ 89, 1434, 3226, 506, 474, 506, 506, 367, 1018, 1946,
+ 1402, 954, 1402, 314, 90, 1082, 218, 2266, 666, 1210,
+ 186, 570, 2042, 58, 5850, 154, 2010, 154, 794, 2266,
+ 378, 2266, 3738, 39, 39, 39, 39, 39, 39, 17351,
+ 34, 3074, 7692, 63, 63,
+ };
+
+static int sqlite3Fts5UnicodeCategory(u32 iCode) {
+ int iRes = -1;
+ int iHi;
+ int iLo;
+ int ret;
+ u16 iKey;
+
+ if( iCode>=(1<<20) ){
+ return 0;
+ }
+ iLo = aFts5UnicodeBlock[(iCode>>16)];
+ iHi = aFts5UnicodeBlock[1+(iCode>>16)];
+ iKey = (iCode & 0xFFFF);
+ while( iHi>iLo ){
+ int iTest = (iHi + iLo) / 2;
+ assert( iTest>=iLo && iTest<iHi );
+ if( iKey>=aFts5UnicodeMap[iTest] ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest;
+ }
+ }
+
+ if( iRes<0 ) return 0;
+ if( iKey>=(aFts5UnicodeMap[iRes]+(aFts5UnicodeData[iRes]>>5)) ) return 0;
+ ret = aFts5UnicodeData[iRes] & 0x1F;
+ if( ret!=30 ) return ret;
+ return ((iKey - aFts5UnicodeMap[iRes]) & 0x01) ? 5 : 9;
+}
+
+static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){
+ int i = 0;
+ int iTbl = 0;
+ while( i<128 ){
+ int bToken = aArray[ aFts5UnicodeData[iTbl] & 0x1F ];
+ int n = (aFts5UnicodeData[iTbl] >> 5) + i;
+ for(; i<128 && i<n; i++){
+ aAscii[i] = (u8)bToken;
+ }
+ iTbl++;
+ }
+}
+
+/*
+** 2015 May 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Routines for varint serialization and deserialization.
+*/
+
+
+/* #include "fts5Int.h" */
+
+/*
+** This is a copy of the sqlite3GetVarint32() routine from the SQLite core.
+** Except, this version does handle the single byte case that the core
+** version depends on being handled before its function is called.
+*/
+static int sqlite3Fts5GetVarint32(const unsigned char *p, u32 *v){
+ u32 a,b;
+
+ /* The 1-byte case. Overwhelmingly the most common. */
+ a = *p;
+ /* a: p0 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* Values between 0 and 127 */
+ *v = a;
+ return 1;
+ }
+
+ /* The 2-byte case */
+ p++;
+ b = *p;
+ /* b: p1 (unmasked) */
+ if (!(b&0x80))
+ {
+ /* Values between 128 and 16383 */
+ a &= 0x7f;
+ a = a<<7;
+ *v = a | b;
+ return 2;
+ }
+
+ /* The 3-byte case */
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<14 | p2 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* Values between 16384 and 2097151 */
+ a &= (0x7f<<14)|(0x7f);
+ b &= 0x7f;
+ b = b<<7;
+ *v = a | b;
+ return 3;
+ }
+
+ /* A 32-bit varint is used to store size information in btrees.
+ ** Objects are rarely larger than 2MiB limit of a 3-byte varint.
+ ** A 3-byte varint is sufficient, for example, to record the size
+ ** of a 1048569-byte BLOB or string.
+ **
+ ** We only unroll the first 1-, 2-, and 3- byte cases. The very
+ ** rare larger cases can be handled by the slower 64-bit varint
+ ** routine.
+ */
+ {
+ u64 v64;
+ u8 n;
+ p -= 2;
+ n = sqlite3Fts5GetVarint(p, &v64);
+ *v = ((u32)v64) & 0x7FFFFFFF;
+ assert( n>3 && n<=9 );
+ return n;
+ }
+}
+
+
+/*
+** Bitmasks used by sqlite3GetVarint(). These precomputed constants
+** are defined here rather than simply putting the constant expressions
+** inline in order to work around bugs in the RVT compiler.
+**
+** SLOT_2_0 A mask for (0x7f<<14) | 0x7f
+**
+** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0
+*/
+#define SLOT_2_0 0x001fc07f
+#define SLOT_4_2_0 0xf01fc07f
+
+/*
+** Read a 64-bit variable-length integer from memory starting at p[0].
+** Return the number of bytes read. The value is stored in *v.
+*/
+static u8 sqlite3Fts5GetVarint(const unsigned char *p, u64 *v){
+ u32 a,b,s;
+
+ a = *p;
+ /* a: p0 (unmasked) */
+ if (!(a&0x80))
+ {
+ *v = a;
+ return 1;
+ }
+
+ p++;
+ b = *p;
+ /* b: p1 (unmasked) */
+ if (!(b&0x80))
+ {
+ a &= 0x7f;
+ a = a<<7;
+ a |= b;
+ *v = a;
+ return 2;
+ }
+
+ /* Verify that constants are precomputed correctly */
+ assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) );
+ assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) );
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<14 | p2 (unmasked) */
+ if (!(a&0x80))
+ {
+ a &= SLOT_2_0;
+ b &= 0x7f;
+ b = b<<7;
+ a |= b;
+ *v = a;
+ return 3;
+ }
+
+ /* CSE1 from below */
+ a &= SLOT_2_0;
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p1<<14 | p3 (unmasked) */
+ if (!(b&0x80))
+ {
+ b &= SLOT_2_0;
+ /* moved CSE1 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ a = a<<7;
+ a |= b;
+ *v = a;
+ return 4;
+ }
+
+ /* a: p0<<14 | p2 (masked) */
+ /* b: p1<<14 | p3 (unmasked) */
+ /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+ /* moved CSE1 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ b &= SLOT_2_0;
+ s = a;
+ /* s: p0<<14 | p2 (masked) */
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p0<<28 | p2<<14 | p4 (unmasked) */
+ if (!(a&0x80))
+ {
+ /* we can skip these cause they were (effectively) done above in calc'ing s */
+ /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
+ /* b &= (0x7f<<14)|(0x7f); */
+ b = b<<7;
+ a |= b;
+ s = s>>18;
+ *v = ((u64)s)<<32 | a;
+ return 5;
+ }
+
+ /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+ s = s<<7;
+ s |= b;
+ /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
+
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p1<<28 | p3<<14 | p5 (unmasked) */
+ if (!(b&0x80))
+ {
+ /* we can skip this cause it was (effectively) done above in calc'ing s */
+ /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
+ a &= SLOT_2_0;
+ a = a<<7;
+ a |= b;
+ s = s>>18;
+ *v = ((u64)s)<<32 | a;
+ return 6;
+ }
+
+ p++;
+ a = a<<14;
+ a |= *p;
+ /* a: p2<<28 | p4<<14 | p6 (unmasked) */
+ if (!(a&0x80))
+ {
+ a &= SLOT_4_2_0;
+ b &= SLOT_2_0;
+ b = b<<7;
+ a |= b;
+ s = s>>11;
+ *v = ((u64)s)<<32 | a;
+ return 7;
+ }
+
+ /* CSE2 from below */
+ a &= SLOT_2_0;
+ p++;
+ b = b<<14;
+ b |= *p;
+ /* b: p3<<28 | p5<<14 | p7 (unmasked) */
+ if (!(b&0x80))
+ {
+ b &= SLOT_4_2_0;
+ /* moved CSE2 up */
+ /* a &= (0x7f<<14)|(0x7f); */
+ a = a<<7;
+ a |= b;
+ s = s>>4;
+ *v = ((u64)s)<<32 | a;
+ return 8;
+ }
+
+ p++;
+ a = a<<15;
+ a |= *p;
+ /* a: p4<<29 | p6<<15 | p8 (unmasked) */
+
+ /* moved CSE2 up */
+ /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */
+ b &= SLOT_2_0;
+ b = b<<8;
+ a |= b;
+
+ s = s<<4;
+ b = p[-4];
+ b &= 0x7f;
+ b = b>>3;
+ s |= b;
+
+ *v = ((u64)s)<<32 | a;
+
+ return 9;
+}
+
+/*
+** The variable-length integer encoding is as follows:
+**
+** KEY:
+** A = 0xxxxxxx 7 bits of data and one flag bit
+** B = 1xxxxxxx 7 bits of data and one flag bit
+** C = xxxxxxxx 8 bits of data
+**
+** 7 bits - A
+** 14 bits - BA
+** 21 bits - BBA
+** 28 bits - BBBA
+** 35 bits - BBBBA
+** 42 bits - BBBBBA
+** 49 bits - BBBBBBA
+** 56 bits - BBBBBBBA
+** 64 bits - BBBBBBBBC
+*/
+
+#ifdef SQLITE_NOINLINE
+# define FTS5_NOINLINE SQLITE_NOINLINE
+#else
+# define FTS5_NOINLINE
+#endif
+
+/*
+** Write a 64-bit variable-length integer to memory starting at p[0].
+** The length of data write will be between 1 and 9 bytes. The number
+** of bytes written is returned.
+**
+** A variable-length integer consists of the lower 7 bits of each byte
+** for all bytes that have the 8th bit set and one byte with the 8th
+** bit clear. Except, if we get to the 9th byte, it stores the full
+** 8 bits and is the last byte.
+*/
+static int FTS5_NOINLINE fts5PutVarint64(unsigned char *p, u64 v){
+ int i, j, n;
+ u8 buf[10];
+ if( v & (((u64)0xff000000)<<32) ){
+ p[8] = (u8)v;
+ v >>= 8;
+ for(i=7; i>=0; i--){
+ p[i] = (u8)((v & 0x7f) | 0x80);
+ v >>= 7;
+ }
+ return 9;
+ }
+ n = 0;
+ do{
+ buf[n++] = (u8)((v & 0x7f) | 0x80);
+ v >>= 7;
+ }while( v!=0 );
+ buf[0] &= 0x7f;
+ assert( n<=9 );
+ for(i=0, j=n-1; j>=0; j--, i++){
+ p[i] = buf[j];
+ }
+ return n;
+}
+
+static int sqlite3Fts5PutVarint(unsigned char *p, u64 v){
+ if( v<=0x7f ){
+ p[0] = v&0x7f;
+ return 1;
+ }
+ if( v<=0x3fff ){
+ p[0] = ((v>>7)&0x7f)|0x80;
+ p[1] = v&0x7f;
+ return 2;
+ }
+ return fts5PutVarint64(p,v);
+}
+
+
+static int sqlite3Fts5GetVarintLen(u32 iVal){
+#if 0
+ if( iVal<(1 << 7 ) ) return 1;
+#endif
+ assert( iVal>=(1 << 7) );
+ if( iVal<(1 << 14) ) return 2;
+ if( iVal<(1 << 21) ) return 3;
+ if( iVal<(1 << 28) ) return 4;
+ return 5;
+}
+
+/*
+** 2015 May 08
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This is an SQLite virtual table module implementing direct access to an
+** existing FTS5 index. The module may create several different types of
+** tables:
+**
+** col:
+** CREATE TABLE vocab(term, col, doc, cnt, PRIMARY KEY(term, col));
+**
+** One row for each term/column combination. The value of $doc is set to
+** the number of fts5 rows that contain at least one instance of term
+** $term within column $col. Field $cnt is set to the total number of
+** instances of term $term in column $col (in any row of the fts5 table).
+**
+** row:
+** CREATE TABLE vocab(term, doc, cnt, PRIMARY KEY(term));
+**
+** One row for each term in the database. The value of $doc is set to
+** the number of fts5 rows that contain at least one instance of term
+** $term. Field $cnt is set to the total number of instances of term
+** $term in the database.
+**
+** instance:
+** CREATE TABLE vocab(term, doc, col, offset, PRIMARY KEY(<all-fields>));
+**
+** One row for each term instance in the database.
+*/
+
+
+/* #include "fts5Int.h" */
+
+
+typedef struct Fts5VocabTable Fts5VocabTable;
+typedef struct Fts5VocabCursor Fts5VocabCursor;
+
+struct Fts5VocabTable {
+ sqlite3_vtab base;
+ char *zFts5Tbl; /* Name of fts5 table */
+ char *zFts5Db; /* Db containing fts5 table */
+ sqlite3 *db; /* Database handle */
+ Fts5Global *pGlobal; /* FTS5 global object for this database */
+ int eType; /* FTS5_VOCAB_COL, ROW or INSTANCE */
+ unsigned bBusy; /* True if busy */
+};
+
+struct Fts5VocabCursor {
+ sqlite3_vtab_cursor base;
+ sqlite3_stmt *pStmt; /* Statement holding lock on pIndex */
+ Fts5Table *pFts5; /* Associated FTS5 table */
+
+ int bEof; /* True if this cursor is at EOF */
+ Fts5IndexIter *pIter; /* Term/rowid iterator object */
+
+ int nLeTerm; /* Size of zLeTerm in bytes */
+ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */
+
+ /* These are used by 'col' tables only */
+ int iCol;
+ i64 *aCnt;
+ i64 *aDoc;
+
+ /* Output values used by all tables. */
+ i64 rowid; /* This table's current rowid value */
+ Fts5Buffer term; /* Current value of 'term' column */
+
+ /* Output values Used by 'instance' tables only */
+ i64 iInstPos;
+ int iInstOff;
+};
+
+#define FTS5_VOCAB_COL 0
+#define FTS5_VOCAB_ROW 1
+#define FTS5_VOCAB_INSTANCE 2
+
+#define FTS5_VOCAB_COL_SCHEMA "term, col, doc, cnt"
+#define FTS5_VOCAB_ROW_SCHEMA "term, doc, cnt"
+#define FTS5_VOCAB_INST_SCHEMA "term, doc, col, offset"
+
+/*
+** Bits for the mask used as the idxNum value by xBestIndex/xFilter.
+*/
+#define FTS5_VOCAB_TERM_EQ 0x01
+#define FTS5_VOCAB_TERM_GE 0x02
+#define FTS5_VOCAB_TERM_LE 0x04
+
+
+/*
+** Translate a string containing an fts5vocab table type to an
+** FTS5_VOCAB_XXX constant. If successful, set *peType to the output
+** value and return SQLITE_OK. Otherwise, set *pzErr to an error message
+** and return SQLITE_ERROR.
+*/
+static int fts5VocabTableType(const char *zType, char **pzErr, int *peType){
+ int rc = SQLITE_OK;
+ char *zCopy = sqlite3Fts5Strndup(&rc, zType, -1);
+ if( rc==SQLITE_OK ){
+ sqlite3Fts5Dequote(zCopy);
+ if( sqlite3_stricmp(zCopy, "col")==0 ){
+ *peType = FTS5_VOCAB_COL;
+ }else
+
+ if( sqlite3_stricmp(zCopy, "row")==0 ){
+ *peType = FTS5_VOCAB_ROW;
+ }else
+ if( sqlite3_stricmp(zCopy, "instance")==0 ){
+ *peType = FTS5_VOCAB_INSTANCE;
+ }else
+ {
+ *pzErr = sqlite3_mprintf("fts5vocab: unknown table type: %Q", zCopy);
+ rc = SQLITE_ERROR;
+ }
+ sqlite3_free(zCopy);
+ }
+
+ return rc;
+}
+
+
+/*
+** The xDisconnect() virtual table method.
+*/
+static int fts5VocabDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts5VocabTable *pTab = (Fts5VocabTable*)pVtab;
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/*
+** The xDestroy() virtual table method.
+*/
+static int fts5VocabDestroyMethod(sqlite3_vtab *pVtab){
+ Fts5VocabTable *pTab = (Fts5VocabTable*)pVtab;
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/*
+** This function is the implementation of both the xConnect and xCreate
+** methods of the FTS3 virtual table.
+**
+** The argv[] array contains the following:
+**
+** argv[0] -> module name ("fts5vocab")
+** argv[1] -> database name
+** argv[2] -> table name
+**
+** then:
+**
+** argv[3] -> name of fts5 table
+** argv[4] -> type of fts5vocab table
+**
+** or, for tables in the TEMP schema only.
+**
+** argv[3] -> name of fts5 tables database
+** argv[4] -> name of fts5 table
+** argv[5] -> type of fts5vocab table
+*/
+static int fts5VocabInitVtab(
+ sqlite3 *db, /* The SQLite database connection */
+ void *pAux, /* Pointer to Fts5Global object */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */
+ char **pzErr /* Write any error message here */
+){
+ const char *azSchema[] = {
+ "CREATE TABlE vocab(" FTS5_VOCAB_COL_SCHEMA ")",
+ "CREATE TABlE vocab(" FTS5_VOCAB_ROW_SCHEMA ")",
+ "CREATE TABlE vocab(" FTS5_VOCAB_INST_SCHEMA ")"
+ };
+
+ Fts5VocabTable *pRet = 0;
+ int rc = SQLITE_OK; /* Return code */
+ int bDb;
+
+ bDb = (argc==6 && strlen(argv[1])==4 && memcmp("temp", argv[1], 4)==0);
+
+ if( argc!=5 && bDb==0 ){
+ *pzErr = sqlite3_mprintf("wrong number of vtable arguments");
+ rc = SQLITE_ERROR;
+ }else{
+ int nByte; /* Bytes of space to allocate */
+ const char *zDb = bDb ? argv[3] : argv[1];
+ const char *zTab = bDb ? argv[4] : argv[3];
+ const char *zType = bDb ? argv[5] : argv[4];
+ int nDb = (int)strlen(zDb)+1;
+ int nTab = (int)strlen(zTab)+1;
+ int eType = 0;
+
+ rc = fts5VocabTableType(zType, pzErr, &eType);
+ if( rc==SQLITE_OK ){
+ assert( eType>=0 && eType<ArraySize(azSchema) );
+ rc = sqlite3_declare_vtab(db, azSchema[eType]);
+ }
+
+ nByte = sizeof(Fts5VocabTable) + nDb + nTab;
+ pRet = sqlite3Fts5MallocZero(&rc, nByte);
+ if( pRet ){
+ pRet->pGlobal = (Fts5Global*)pAux;
+ pRet->eType = eType;
+ pRet->db = db;
+ pRet->zFts5Tbl = (char*)&pRet[1];
+ pRet->zFts5Db = &pRet->zFts5Tbl[nTab];
+ memcpy(pRet->zFts5Tbl, zTab, nTab);
+ memcpy(pRet->zFts5Db, zDb, nDb);
+ sqlite3Fts5Dequote(pRet->zFts5Tbl);
+ sqlite3Fts5Dequote(pRet->zFts5Db);
+ }
+ }
+
+ *ppVTab = (sqlite3_vtab*)pRet;
+ return rc;
+}
+
+
+/*
+** The xConnect() and xCreate() methods for the virtual table. All the
+** work is done in function fts5VocabInitVtab().
+*/
+static int fts5VocabConnectMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts5VocabInitVtab(db, pAux, argc, argv, ppVtab, pzErr);
+}
+static int fts5VocabCreateMethod(
+ sqlite3 *db, /* Database connection */
+ void *pAux, /* Pointer to tokenizer hash table */
+ int argc, /* Number of elements in argv array */
+ const char * const *argv, /* xCreate/xConnect argument array */
+ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
+ char **pzErr /* OUT: sqlite3_malloc'd error message */
+){
+ return fts5VocabInitVtab(db, pAux, argc, argv, ppVtab, pzErr);
+}
+
+/*
+** Implementation of the xBestIndex method.
+**
+** Only constraints of the form:
+**
+** term <= ?
+** term == ?
+** term >= ?
+**
+** are interpreted. Less-than and less-than-or-equal are treated
+** identically, as are greater-than and greater-than-or-equal.
+*/
+static int fts5VocabBestIndexMethod(
+ sqlite3_vtab *pUnused,
+ sqlite3_index_info *pInfo
+){
+ int i;
+ int iTermEq = -1;
+ int iTermGe = -1;
+ int iTermLe = -1;
+ int idxNum = 0;
+ int nArg = 0;
+
+ UNUSED_PARAM(pUnused);
+
+ for(i=0; i<pInfo->nConstraint; i++){
+ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i];
+ if( p->usable==0 ) continue;
+ if( p->iColumn==0 ){ /* term column */
+ if( p->op==SQLITE_INDEX_CONSTRAINT_EQ ) iTermEq = i;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_LE ) iTermLe = i;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_LT ) iTermLe = i;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_GE ) iTermGe = i;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_GT ) iTermGe = i;
+ }
+ }
+
+ if( iTermEq>=0 ){
+ idxNum |= FTS5_VOCAB_TERM_EQ;
+ pInfo->aConstraintUsage[iTermEq].argvIndex = ++nArg;
+ pInfo->estimatedCost = 100;
+ }else{
+ pInfo->estimatedCost = 1000000;
+ if( iTermGe>=0 ){
+ idxNum |= FTS5_VOCAB_TERM_GE;
+ pInfo->aConstraintUsage[iTermGe].argvIndex = ++nArg;
+ pInfo->estimatedCost = pInfo->estimatedCost / 2;
+ }
+ if( iTermLe>=0 ){
+ idxNum |= FTS5_VOCAB_TERM_LE;
+ pInfo->aConstraintUsage[iTermLe].argvIndex = ++nArg;
+ pInfo->estimatedCost = pInfo->estimatedCost / 2;
+ }
+ }
+
+ /* This virtual table always delivers results in ascending order of
+ ** the "term" column (column 0). So if the user has requested this
+ ** specifically - "ORDER BY term" or "ORDER BY term ASC" - set the
+ ** sqlite3_index_info.orderByConsumed flag to tell the core the results
+ ** are already in sorted order. */
+ if( pInfo->nOrderBy==1
+ && pInfo->aOrderBy[0].iColumn==0
+ && pInfo->aOrderBy[0].desc==0
+ ){
+ pInfo->orderByConsumed = 1;
+ }
+
+ pInfo->idxNum = idxNum;
+ return SQLITE_OK;
+}
+
+/*
+** Implementation of xOpen method.
+*/
+static int fts5VocabOpenMethod(
+ sqlite3_vtab *pVTab,
+ sqlite3_vtab_cursor **ppCsr
+){
+ Fts5VocabTable *pTab = (Fts5VocabTable*)pVTab;
+ Fts5Table *pFts5 = 0;
+ Fts5VocabCursor *pCsr = 0;
+ int rc = SQLITE_OK;
+ sqlite3_stmt *pStmt = 0;
+ char *zSql = 0;
+
+ if( pTab->bBusy ){
+ pVTab->zErrMsg = sqlite3_mprintf(
+ "recursive definition for %s.%s", pTab->zFts5Db, pTab->zFts5Tbl
+ );
+ return SQLITE_ERROR;
+ }
+ zSql = sqlite3Fts5Mprintf(&rc,
+ "SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'",
+ pTab->zFts5Tbl, pTab->zFts5Db, pTab->zFts5Tbl, pTab->zFts5Tbl
+ );
+ if( zSql ){
+ rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pStmt, 0);
+ }
+ sqlite3_free(zSql);
+ assert( rc==SQLITE_OK || pStmt==0 );
+ if( rc==SQLITE_ERROR ) rc = SQLITE_OK;
+
+ pTab->bBusy = 1;
+ if( pStmt && sqlite3_step(pStmt)==SQLITE_ROW ){
+ i64 iId = sqlite3_column_int64(pStmt, 0);
+ pFts5 = sqlite3Fts5TableFromCsrid(pTab->pGlobal, iId);
+ }
+ pTab->bBusy = 0;
+
+ if( rc==SQLITE_OK ){
+ if( pFts5==0 ){
+ rc = sqlite3_finalize(pStmt);
+ pStmt = 0;
+ if( rc==SQLITE_OK ){
+ pVTab->zErrMsg = sqlite3_mprintf(
+ "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl
+ );
+ rc = SQLITE_ERROR;
+ }
+ }else{
+ rc = sqlite3Fts5FlushToDisk(pFts5);
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ int nByte = pFts5->pConfig->nCol * sizeof(i64)*2 + sizeof(Fts5VocabCursor);
+ pCsr = (Fts5VocabCursor*)sqlite3Fts5MallocZero(&rc, nByte);
+ }
+
+ if( pCsr ){
+ pCsr->pFts5 = pFts5;
+ pCsr->pStmt = pStmt;
+ pCsr->aCnt = (i64*)&pCsr[1];
+ pCsr->aDoc = &pCsr->aCnt[pFts5->pConfig->nCol];
+ }else{
+ sqlite3_finalize(pStmt);
+ }
+
+ *ppCsr = (sqlite3_vtab_cursor*)pCsr;
+ return rc;
+}
+
+static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){
+ pCsr->rowid = 0;
+ sqlite3Fts5IterClose(pCsr->pIter);
+ pCsr->pIter = 0;
+ sqlite3_free(pCsr->zLeTerm);
+ pCsr->nLeTerm = -1;
+ pCsr->zLeTerm = 0;
+ pCsr->bEof = 0;
+}
+
+/*
+** Close the cursor. For additional information see the documentation
+** on the xClose method of the virtual table interface.
+*/
+static int fts5VocabCloseMethod(sqlite3_vtab_cursor *pCursor){
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ fts5VocabResetCursor(pCsr);
+ sqlite3Fts5BufferFree(&pCsr->term);
+ sqlite3_finalize(pCsr->pStmt);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+static int fts5VocabInstanceNewTerm(Fts5VocabCursor *pCsr){
+ int rc = SQLITE_OK;
+
+ if( sqlite3Fts5IterEof(pCsr->pIter) ){
+ pCsr->bEof = 1;
+ }else{
+ const char *zTerm;
+ int nTerm;
+ zTerm = sqlite3Fts5IterTerm(pCsr->pIter, &nTerm);
+ if( pCsr->nLeTerm>=0 ){
+ int nCmp = MIN(nTerm, pCsr->nLeTerm);
+ int bCmp = memcmp(pCsr->zLeTerm, zTerm, nCmp);
+ if( bCmp<0 || (bCmp==0 && pCsr->nLeTerm<nTerm) ){
+ pCsr->bEof = 1;
+ }
+ }
+
+ sqlite3Fts5BufferSet(&rc, &pCsr->term, nTerm, (const u8*)zTerm);
+ }
+ return rc;
+}
+
+static int fts5VocabInstanceNext(Fts5VocabCursor *pCsr){
+ int eDetail = pCsr->pFts5->pConfig->eDetail;
+ int rc = SQLITE_OK;
+ Fts5IndexIter *pIter = pCsr->pIter;
+ i64 *pp = &pCsr->iInstPos;
+ int *po = &pCsr->iInstOff;
+
+ assert( sqlite3Fts5IterEof(pIter)==0 );
+ assert( pCsr->bEof==0 );
+ while( eDetail==FTS5_DETAIL_NONE
+ || sqlite3Fts5PoslistNext64(pIter->pData, pIter->nData, po, pp)
+ ){
+ pCsr->iInstPos = 0;
+ pCsr->iInstOff = 0;
+
+ rc = sqlite3Fts5IterNextScan(pCsr->pIter);
+ if( rc==SQLITE_OK ){
+ rc = fts5VocabInstanceNewTerm(pCsr);
+ if( pCsr->bEof || eDetail==FTS5_DETAIL_NONE ) break;
+ }
+ if( rc ){
+ pCsr->bEof = 1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/*
+** Advance the cursor to the next row in the table.
+*/
+static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ Fts5VocabTable *pTab = (Fts5VocabTable*)pCursor->pVtab;
+ int rc = SQLITE_OK;
+ int nCol = pCsr->pFts5->pConfig->nCol;
+
+ pCsr->rowid++;
+
+ if( pTab->eType==FTS5_VOCAB_INSTANCE ){
+ return fts5VocabInstanceNext(pCsr);
+ }
+
+ if( pTab->eType==FTS5_VOCAB_COL ){
+ for(pCsr->iCol++; pCsr->iCol<nCol; pCsr->iCol++){
+ if( pCsr->aDoc[pCsr->iCol] ) break;
+ }
+ }
+
+ if( pTab->eType!=FTS5_VOCAB_COL || pCsr->iCol>=nCol ){
+ if( sqlite3Fts5IterEof(pCsr->pIter) ){
+ pCsr->bEof = 1;
+ }else{
+ const char *zTerm;
+ int nTerm;
+
+ zTerm = sqlite3Fts5IterTerm(pCsr->pIter, &nTerm);
+ assert( nTerm>=0 );
+ if( pCsr->nLeTerm>=0 ){
+ int nCmp = MIN(nTerm, pCsr->nLeTerm);
+ int bCmp = memcmp(pCsr->zLeTerm, zTerm, nCmp);
+ if( bCmp<0 || (bCmp==0 && pCsr->nLeTerm<nTerm) ){
+ pCsr->bEof = 1;
+ return SQLITE_OK;
+ }
+ }
+
+ sqlite3Fts5BufferSet(&rc, &pCsr->term, nTerm, (const u8*)zTerm);
+ memset(pCsr->aCnt, 0, nCol * sizeof(i64));
+ memset(pCsr->aDoc, 0, nCol * sizeof(i64));
+ pCsr->iCol = 0;
+
+ assert( pTab->eType==FTS5_VOCAB_COL || pTab->eType==FTS5_VOCAB_ROW );
+ while( rc==SQLITE_OK ){
+ int eDetail = pCsr->pFts5->pConfig->eDetail;
+ const u8 *pPos; int nPos; /* Position list */
+ i64 iPos = 0; /* 64-bit position read from poslist */
+ int iOff = 0; /* Current offset within position list */
+
+ pPos = pCsr->pIter->pData;
+ nPos = pCsr->pIter->nData;
+
+ switch( pTab->eType ){
+ case FTS5_VOCAB_ROW:
+ if( eDetail==FTS5_DETAIL_FULL ){
+ while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){
+ pCsr->aCnt[0]++;
+ }
+ }
+ pCsr->aDoc[0]++;
+ break;
+
+ case FTS5_VOCAB_COL:
+ if( eDetail==FTS5_DETAIL_FULL ){
+ int iCol = -1;
+ while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){
+ int ii = FTS5_POS2COLUMN(iPos);
+ if( iCol!=ii ){
+ if( ii>=nCol ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ pCsr->aDoc[ii]++;
+ iCol = ii;
+ }
+ pCsr->aCnt[ii]++;
+ }
+ }else if( eDetail==FTS5_DETAIL_COLUMNS ){
+ while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff,&iPos) ){
+ assert_nc( iPos>=0 && iPos<nCol );
+ if( iPos>=nCol ){
+ rc = FTS5_CORRUPT;
+ break;
+ }
+ pCsr->aDoc[iPos]++;
+ }
+ }else{
+ assert( eDetail==FTS5_DETAIL_NONE );
+ pCsr->aDoc[0]++;
+ }
+ break;
+
+ default:
+ assert( pTab->eType==FTS5_VOCAB_INSTANCE );
+ break;
+ }
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IterNextScan(pCsr->pIter);
+ }
+ if( pTab->eType==FTS5_VOCAB_INSTANCE ) break;
+
+ if( rc==SQLITE_OK ){
+ zTerm = sqlite3Fts5IterTerm(pCsr->pIter, &nTerm);
+ if( nTerm!=pCsr->term.n
+ || (nTerm>0 && memcmp(zTerm, pCsr->term.p, nTerm))
+ ){
+ break;
+ }
+ if( sqlite3Fts5IterEof(pCsr->pIter) ) break;
+ }
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK && pCsr->bEof==0 && pTab->eType==FTS5_VOCAB_COL ){
+ for(/* noop */; pCsr->iCol<nCol && pCsr->aDoc[pCsr->iCol]==0; pCsr->iCol++);
+ if( pCsr->iCol==nCol ){
+ rc = FTS5_CORRUPT;
+ }
+ }
+ return rc;
+}
+
+/*
+** This is the xFilter implementation for the virtual table.
+*/
+static int fts5VocabFilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *zUnused, /* Unused */
+ int nUnused, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ Fts5VocabTable *pTab = (Fts5VocabTable*)pCursor->pVtab;
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ int eType = pTab->eType;
+ int rc = SQLITE_OK;
+
+ int iVal = 0;
+ int f = FTS5INDEX_QUERY_SCAN;
+ const char *zTerm = 0;
+ int nTerm = 0;
+
+ sqlite3_value *pEq = 0;
+ sqlite3_value *pGe = 0;
+ sqlite3_value *pLe = 0;
+
+ UNUSED_PARAM2(zUnused, nUnused);
+
+ fts5VocabResetCursor(pCsr);
+ if( idxNum & FTS5_VOCAB_TERM_EQ ) pEq = apVal[iVal++];
+ if( idxNum & FTS5_VOCAB_TERM_GE ) pGe = apVal[iVal++];
+ if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++];
+
+ if( pEq ){
+ zTerm = (const char *)sqlite3_value_text(pEq);
+ nTerm = sqlite3_value_bytes(pEq);
+ f = 0;
+ }else{
+ if( pGe ){
+ zTerm = (const char *)sqlite3_value_text(pGe);
+ nTerm = sqlite3_value_bytes(pGe);
+ }
+ if( pLe ){
+ const char *zCopy = (const char *)sqlite3_value_text(pLe);
+ if( zCopy==0 ) zCopy = "";
+ pCsr->nLeTerm = sqlite3_value_bytes(pLe);
+ pCsr->zLeTerm = sqlite3_malloc(pCsr->nLeTerm+1);
+ if( pCsr->zLeTerm==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pCsr->zLeTerm, zCopy, pCsr->nLeTerm+1);
+ }
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ Fts5Index *pIndex = pCsr->pFts5->pIndex;
+ rc = sqlite3Fts5IndexQuery(pIndex, zTerm, nTerm, f, 0, &pCsr->pIter);
+ }
+ if( rc==SQLITE_OK && eType==FTS5_VOCAB_INSTANCE ){
+ rc = fts5VocabInstanceNewTerm(pCsr);
+ }
+ if( rc==SQLITE_OK && !pCsr->bEof
+ && (eType!=FTS5_VOCAB_INSTANCE
+ || pCsr->pFts5->pConfig->eDetail!=FTS5_DETAIL_NONE)
+ ){
+ rc = fts5VocabNextMethod(pCursor);
+ }
+
+ return rc;
+}
+
+/*
+** This is the xEof method of the virtual table. SQLite calls this
+** routine to find out if it has reached the end of a result set.
+*/
+static int fts5VocabEofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ return pCsr->bEof;
+}
+
+static int fts5VocabColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ int eDetail = pCsr->pFts5->pConfig->eDetail;
+ int eType = ((Fts5VocabTable*)(pCursor->pVtab))->eType;
+ i64 iVal = 0;
+
+ if( iCol==0 ){
+ sqlite3_result_text(
+ pCtx, (const char*)pCsr->term.p, pCsr->term.n, SQLITE_TRANSIENT
+ );
+ }else if( eType==FTS5_VOCAB_COL ){
+ assert( iCol==1 || iCol==2 || iCol==3 );
+ if( iCol==1 ){
+ if( eDetail!=FTS5_DETAIL_NONE ){
+ const char *z = pCsr->pFts5->pConfig->azCol[pCsr->iCol];
+ sqlite3_result_text(pCtx, z, -1, SQLITE_STATIC);
+ }
+ }else if( iCol==2 ){
+ iVal = pCsr->aDoc[pCsr->iCol];
+ }else{
+ iVal = pCsr->aCnt[pCsr->iCol];
+ }
+ }else if( eType==FTS5_VOCAB_ROW ){
+ assert( iCol==1 || iCol==2 );
+ if( iCol==1 ){
+ iVal = pCsr->aDoc[0];
+ }else{
+ iVal = pCsr->aCnt[0];
+ }
+ }else{
+ assert( eType==FTS5_VOCAB_INSTANCE );
+ switch( iCol ){
+ case 1:
+ sqlite3_result_int64(pCtx, pCsr->pIter->iRowid);
+ break;
+ case 2: {
+ int ii = -1;
+ if( eDetail==FTS5_DETAIL_FULL ){
+ ii = FTS5_POS2COLUMN(pCsr->iInstPos);
+ }else if( eDetail==FTS5_DETAIL_COLUMNS ){
+ ii = (int)pCsr->iInstPos;
+ }
+ if( ii>=0 && ii<pCsr->pFts5->pConfig->nCol ){
+ const char *z = pCsr->pFts5->pConfig->azCol[ii];
+ sqlite3_result_text(pCtx, z, -1, SQLITE_STATIC);
+ }
+ break;
+ }
+ default: {
+ assert( iCol==3 );
+ if( eDetail==FTS5_DETAIL_FULL ){
+ int ii = FTS5_POS2OFFSET(pCsr->iInstPos);
+ sqlite3_result_int(pCtx, ii);
+ }
+ break;
+ }
+ }
+ }
+
+ if( iVal>0 ) sqlite3_result_int64(pCtx, iVal);
+ return SQLITE_OK;
+}
+
+/*
+** This is the xRowid method. The SQLite core calls this routine to
+** retrieve the rowid for the current row of the result set. The
+** rowid should be written to *pRowid.
+*/
+static int fts5VocabRowidMethod(
+ sqlite3_vtab_cursor *pCursor,
+ sqlite_int64 *pRowid
+){
+ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
+ *pRowid = pCsr->rowid;
+ return SQLITE_OK;
+}
+
+static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
+ static const sqlite3_module fts5Vocab = {
+ /* iVersion */ 2,
+ /* xCreate */ fts5VocabCreateMethod,
+ /* xConnect */ fts5VocabConnectMethod,
+ /* xBestIndex */ fts5VocabBestIndexMethod,
+ /* xDisconnect */ fts5VocabDisconnectMethod,
+ /* xDestroy */ fts5VocabDestroyMethod,
+ /* xOpen */ fts5VocabOpenMethod,
+ /* xClose */ fts5VocabCloseMethod,
+ /* xFilter */ fts5VocabFilterMethod,
+ /* xNext */ fts5VocabNextMethod,
+ /* xEof */ fts5VocabEofMethod,
+ /* xColumn */ fts5VocabColumnMethod,
+ /* xRowid */ fts5VocabRowidMethod,
+ /* xUpdate */ 0,
+ /* xBegin */ 0,
+ /* xSync */ 0,
+ /* xCommit */ 0,
+ /* xRollback */ 0,
+ /* xFindFunction */ 0,
+ /* xRename */ 0,
+ /* xSavepoint */ 0,
+ /* xRelease */ 0,
+ /* xRollbackTo */ 0,
+ /* xShadowName */ 0
+ };
+ void *p = (void*)pGlobal;
+
+ return sqlite3_create_module_v2(db, "fts5vocab", &fts5Vocab, p, 0);
+}
+
+
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */
+
+/************** End of fts5.c ************************************************/
+/************** Begin file stmt.c ********************************************/
+/*
+** 2017-05-31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+**
+** This file demonstrates an eponymous virtual table that returns information
+** about all prepared statements for the database connection.
+**
+** Usage example:
+**
+** .load ./stmt
+** .mode line
+** .header on
+** SELECT * FROM stmt;
+*/
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB)
+#if !defined(SQLITEINT_H)
+/* #include "sqlite3ext.h" */
+#endif
+SQLITE_EXTENSION_INIT1
+/* #include <assert.h> */
+/* #include <string.h> */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+
+/* stmt_vtab is a subclass of sqlite3_vtab which will
+** serve as the underlying representation of a stmt virtual table
+*/
+typedef struct stmt_vtab stmt_vtab;
+struct stmt_vtab {
+ sqlite3_vtab base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection for this stmt vtab */
+};
+
+/* stmt_cursor is a subclass of sqlite3_vtab_cursor which will
+** serve as the underlying representation of a cursor that scans
+** over rows of the result
+*/
+typedef struct stmt_cursor stmt_cursor;
+struct stmt_cursor {
+ sqlite3_vtab_cursor base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection for this cursor */
+ sqlite3_stmt *pStmt; /* Statement cursor is currently pointing at */
+ sqlite3_int64 iRowid; /* The rowid */
+};
+
+/*
+** The stmtConnect() method is invoked to create a new
+** stmt_vtab that describes the stmt virtual table.
+**
+** Think of this routine as the constructor for stmt_vtab objects.
+**
+** All this routine needs to do is:
+**
+** (1) Allocate the stmt_vtab object and initialize all fields.
+**
+** (2) Tell SQLite (via the sqlite3_declare_vtab() interface) what the
+** result set of queries against stmt will look like.
+*/
+static int stmtConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ stmt_vtab *pNew;
+ int rc;
+
+/* Column numbers */
+#define STMT_COLUMN_SQL 0 /* SQL for the statement */
+#define STMT_COLUMN_NCOL 1 /* Number of result columns */
+#define STMT_COLUMN_RO 2 /* True if read-only */
+#define STMT_COLUMN_BUSY 3 /* True if currently busy */
+#define STMT_COLUMN_NSCAN 4 /* SQLITE_STMTSTATUS_FULLSCAN_STEP */
+#define STMT_COLUMN_NSORT 5 /* SQLITE_STMTSTATUS_SORT */
+#define STMT_COLUMN_NAIDX 6 /* SQLITE_STMTSTATUS_AUTOINDEX */
+#define STMT_COLUMN_NSTEP 7 /* SQLITE_STMTSTATUS_VM_STEP */
+#define STMT_COLUMN_REPREP 8 /* SQLITE_STMTSTATUS_REPREPARE */
+#define STMT_COLUMN_RUN 9 /* SQLITE_STMTSTATUS_RUN */
+#define STMT_COLUMN_MEM 10 /* SQLITE_STMTSTATUS_MEMUSED */
+
+
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE x(sql,ncol,ro,busy,nscan,nsort,naidx,nstep,"
+ "reprep,run,mem)");
+ if( rc==SQLITE_OK ){
+ pNew = sqlite3_malloc( sizeof(*pNew) );
+ *ppVtab = (sqlite3_vtab*)pNew;
+ if( pNew==0 ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(*pNew));
+ pNew->db = db;
+ }
+ return rc;
+}
+
+/*
+** This method is the destructor for stmt_cursor objects.
+*/
+static int stmtDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** Constructor for a new stmt_cursor object.
+*/
+static int stmtOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ stmt_cursor *pCur;
+ pCur = sqlite3_malloc( sizeof(*pCur) );
+ if( pCur==0 ) return SQLITE_NOMEM;
+ memset(pCur, 0, sizeof(*pCur));
+ pCur->db = ((stmt_vtab*)p)->db;
+ *ppCursor = &pCur->base;
+ return SQLITE_OK;
+}
+
+/*
+** Destructor for a stmt_cursor.
+*/
+static int stmtClose(sqlite3_vtab_cursor *cur){
+ sqlite3_free(cur);
+ return SQLITE_OK;
+}
+
+
+/*
+** Advance a stmt_cursor to its next row of output.
+*/
+static int stmtNext(sqlite3_vtab_cursor *cur){
+ stmt_cursor *pCur = (stmt_cursor*)cur;
+ pCur->iRowid++;
+ pCur->pStmt = sqlite3_next_stmt(pCur->db, pCur->pStmt);
+ return SQLITE_OK;
+}
+
+/*
+** Return values of columns for the row at which the stmt_cursor
+** is currently pointing.
+*/
+static int stmtColumn(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ stmt_cursor *pCur = (stmt_cursor*)cur;
+ switch( i ){
+ case STMT_COLUMN_SQL: {
+ sqlite3_result_text(ctx, sqlite3_sql(pCur->pStmt), -1, SQLITE_TRANSIENT);
+ break;
+ }
+ case STMT_COLUMN_NCOL: {
+ sqlite3_result_int(ctx, sqlite3_column_count(pCur->pStmt));
+ break;
+ }
+ case STMT_COLUMN_RO: {
+ sqlite3_result_int(ctx, sqlite3_stmt_readonly(pCur->pStmt));
+ break;
+ }
+ case STMT_COLUMN_BUSY: {
+ sqlite3_result_int(ctx, sqlite3_stmt_busy(pCur->pStmt));
+ break;
+ }
+ default: {
+ assert( i==STMT_COLUMN_MEM );
+ i = SQLITE_STMTSTATUS_MEMUSED +
+ STMT_COLUMN_NSCAN - SQLITE_STMTSTATUS_FULLSCAN_STEP;
+ /* Fall thru */
+ }
+ case STMT_COLUMN_NSCAN:
+ case STMT_COLUMN_NSORT:
+ case STMT_COLUMN_NAIDX:
+ case STMT_COLUMN_NSTEP:
+ case STMT_COLUMN_REPREP:
+ case STMT_COLUMN_RUN: {
+ sqlite3_result_int(ctx, sqlite3_stmt_status(pCur->pStmt,
+ i-STMT_COLUMN_NSCAN+SQLITE_STMTSTATUS_FULLSCAN_STEP, 0));
+ break;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return the rowid for the current row. In this implementation, the
+** rowid is the same as the output value.
+*/
+static int stmtRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
+ stmt_cursor *pCur = (stmt_cursor*)cur;
+ *pRowid = pCur->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the cursor has been moved off of the last
+** row of output.
+*/
+static int stmtEof(sqlite3_vtab_cursor *cur){
+ stmt_cursor *pCur = (stmt_cursor*)cur;
+ return pCur->pStmt==0;
+}
+
+/*
+** This method is called to "rewind" the stmt_cursor object back
+** to the first row of output. This method is always called at least
+** once prior to any call to stmtColumn() or stmtRowid() or
+** stmtEof().
+*/
+static int stmtFilter(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ stmt_cursor *pCur = (stmt_cursor *)pVtabCursor;
+ pCur->pStmt = 0;
+ pCur->iRowid = 0;
+ return stmtNext(pVtabCursor);
+}
+
+/*
+** SQLite will invoke this method one or more times while planning a query
+** that uses the stmt virtual table. This routine needs to create
+** a query plan for each invocation and compute an estimated cost for that
+** plan.
+*/
+static int stmtBestIndex(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ pIdxInfo->estimatedCost = (double)500;
+ pIdxInfo->estimatedRows = 500;
+ return SQLITE_OK;
+}
+
+/*
+** This following structure defines all the methods for the
+** stmt virtual table.
+*/
+static sqlite3_module stmtModule = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ stmtConnect, /* xConnect */
+ stmtBestIndex, /* xBestIndex */
+ stmtDisconnect, /* xDisconnect */
+ 0, /* xDestroy */
+ stmtOpen, /* xOpen - open a cursor */
+ stmtClose, /* xClose - close a cursor */
+ stmtFilter, /* xFilter - configure scan constraints */
+ stmtNext, /* xNext - advance a cursor */
+ stmtEof, /* xEof - check for end of scan */
+ stmtColumn, /* xColumn - read data */
+ stmtRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0, /* xShadowName */
+};
+
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3 *db){
+ int rc = SQLITE_OK;
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ rc = sqlite3_create_module(db, "sqlite_stmt", &stmtModule, 0);
+#endif
+ return rc;
+}
+
+#ifndef SQLITE_CORE
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+SQLITE_API int sqlite3_stmt_init(
+ sqlite3 *db,
+ char **pzErrMsg,
+ const sqlite3_api_routines *pApi
+){
+ int rc = SQLITE_OK;
+ SQLITE_EXTENSION_INIT2(pApi);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ rc = sqlite3StmtVtabInit(db);
+#endif
+ return rc;
+}
+#endif /* SQLITE_CORE */
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */
+
+/************** End of stmt.c ************************************************/
+#if __LINE__!=230527
+#undef SQLITE_SOURCE_ID
+#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt2"
+#endif
+/* Return the source-id for this library */
+SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
+/************************** End of sqlite3.c ******************************/
diff --git a/database/sqlite/sqlite3.h b/database/sqlite/sqlite3.h
new file mode 100644
index 0000000..d200c7a
--- /dev/null
+++ b/database/sqlite/sqlite3.h
@@ -0,0 +1,12174 @@
+/*
+** 2001-09-15
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This header file defines the interface that the SQLite library
+** presents to client programs. If a C-function, structure, datatype,
+** or constant definition does not appear in this file, then it is
+** not a published API of SQLite, is subject to change without
+** notice, and should not be referenced by programs that use SQLite.
+**
+** Some of the definitions that are in this file are marked as
+** "experimental". Experimental interfaces are normally new
+** features recently added to SQLite. We do not anticipate changes
+** to experimental interfaces but reserve the right to make minor changes
+** if experience from use "in the wild" suggest such changes are prudent.
+**
+** The official C-language API documentation for SQLite is derived
+** from comments in this file. This file is the authoritative source
+** on how SQLite interfaces are supposed to operate.
+**
+** The name of this file under configuration management is "sqlite.h.in".
+** The makefile makes some minor changes to this file (such as inserting
+** the version number) and changes its name to "sqlite3.h" as
+** part of the build process.
+*/
+#ifndef SQLITE3_H
+#define SQLITE3_H
+#include <stdarg.h> /* Needed for the definition of va_list */
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+** Provide the ability to override linkage features of the interface.
+*/
+#ifndef SQLITE_EXTERN
+# define SQLITE_EXTERN extern
+#endif
+#ifndef SQLITE_API
+# define SQLITE_API
+#endif
+#ifndef SQLITE_CDECL
+# define SQLITE_CDECL
+#endif
+#ifndef SQLITE_APICALL
+# define SQLITE_APICALL
+#endif
+#ifndef SQLITE_STDCALL
+# define SQLITE_STDCALL SQLITE_APICALL
+#endif
+#ifndef SQLITE_CALLBACK
+# define SQLITE_CALLBACK
+#endif
+#ifndef SQLITE_SYSAPI
+# define SQLITE_SYSAPI
+#endif
+
+/*
+** These no-op macros are used in front of interfaces to mark those
+** interfaces as either deprecated or experimental. New applications
+** should not use deprecated interfaces - they are supported for backwards
+** compatibility only. Application writers should be aware that
+** experimental interfaces are subject to change in point releases.
+**
+** These macros used to resolve to various kinds of compiler magic that
+** would generate warning messages when they were used. But that
+** compiler magic ended up generating such a flurry of bug reports
+** that we have taken it all out and gone back to using simple
+** noop macros.
+*/
+#define SQLITE_DEPRECATED
+#define SQLITE_EXPERIMENTAL
+
+/*
+** Ensure these symbols were not defined by some previous header file.
+*/
+#ifdef SQLITE_VERSION
+# undef SQLITE_VERSION
+#endif
+#ifdef SQLITE_VERSION_NUMBER
+# undef SQLITE_VERSION_NUMBER
+#endif
+
+/*
+** CAPI3REF: Compile-Time Library Version Numbers
+**
+** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header
+** evaluates to a string literal that is the SQLite version in the
+** format "X.Y.Z" where X is the major version number (always 3 for
+** SQLite3) and Y is the minor version number and Z is the release number.)^
+** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer
+** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same
+** numbers used in [SQLITE_VERSION].)^
+** The SQLITE_VERSION_NUMBER for any given release of SQLite will also
+** be larger than the release from which it is derived. Either Y will
+** be held constant and Z will be incremented or else Y will be incremented
+** and Z will be reset to zero.
+**
+** Since [version 3.6.18] ([dateof:3.6.18]),
+** SQLite source code has been stored in the
+** <a href="http://www.fossil-scm.org/">Fossil configuration management
+** system</a>. ^The SQLITE_SOURCE_ID macro evaluates to
+** a string which identifies a particular check-in of SQLite
+** within its configuration management system. ^The SQLITE_SOURCE_ID
+** string contains the date and time of the check-in (UTC) and a SHA1
+** or SHA3-256 hash of the entire source tree. If the source code has
+** been edited in any way since it was last checked in, then the last
+** four hexadecimal digits of the hash may be modified.
+**
+** See also: [sqlite3_libversion()],
+** [sqlite3_libversion_number()], [sqlite3_sourceid()],
+** [sqlite_version()] and [sqlite_source_id()].
+*/
+#define SQLITE_VERSION "3.33.0"
+#define SQLITE_VERSION_NUMBER 3033000
+#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt1"
+
+/*
+** CAPI3REF: Run-Time Library Version Numbers
+** KEYWORDS: sqlite3_version sqlite3_sourceid
+**
+** These interfaces provide the same information as the [SQLITE_VERSION],
+** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
+** but are associated with the library instead of the header file. ^(Cautious
+** programmers might include assert() statements in their application to
+** verify that values returned by these interfaces match the macros in
+** the header, and thus ensure that the application is
+** compiled with matching library and header files.
+**
+** <blockquote><pre>
+** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
+** assert( strncmp(sqlite3_sourceid(),SQLITE_SOURCE_ID,80)==0 );
+** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
+** </pre></blockquote>)^
+**
+** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION]
+** macro. ^The sqlite3_libversion() function returns a pointer to the
+** to the sqlite3_version[] string constant. The sqlite3_libversion()
+** function is provided for use in DLLs since DLL users usually do not have
+** direct access to string constants within the DLL. ^The
+** sqlite3_libversion_number() function returns an integer equal to
+** [SQLITE_VERSION_NUMBER]. ^(The sqlite3_sourceid() function returns
+** a pointer to a string constant whose value is the same as the
+** [SQLITE_SOURCE_ID] C preprocessor macro. Except if SQLite is built
+** using an edited copy of [the amalgamation], then the last four characters
+** of the hash might be different from [SQLITE_SOURCE_ID].)^
+**
+** See also: [sqlite_version()] and [sqlite_source_id()].
+*/
+SQLITE_API SQLITE_EXTERN const char sqlite3_version[];
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
+
+/*
+** CAPI3REF: Run-Time Library Compilation Options Diagnostics
+**
+** ^The sqlite3_compileoption_used() function returns 0 or 1
+** indicating whether the specified option was defined at
+** compile time. ^The SQLITE_ prefix may be omitted from the
+** option name passed to sqlite3_compileoption_used().
+**
+** ^The sqlite3_compileoption_get() function allows iterating
+** over the list of options that were defined at compile time by
+** returning the N-th compile time option string. ^If N is out of range,
+** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_
+** prefix is omitted from any strings returned by
+** sqlite3_compileoption_get().
+**
+** ^Support for the diagnostic functions sqlite3_compileoption_used()
+** and sqlite3_compileoption_get() may be omitted by specifying the
+** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time.
+**
+** See also: SQL functions [sqlite_compileoption_used()] and
+** [sqlite_compileoption_get()] and the [compile_options pragma].
+*/
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
+#else
+# define sqlite3_compileoption_used(X) 0
+# define sqlite3_compileoption_get(X) ((void*)0)
+#endif
+
+/*
+** CAPI3REF: Test To See If The Library Is Threadsafe
+**
+** ^The sqlite3_threadsafe() function returns zero if and only if
+** SQLite was compiled with mutexing code omitted due to the
+** [SQLITE_THREADSAFE] compile-time option being set to 0.
+**
+** SQLite can be compiled with or without mutexes. When
+** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes
+** are enabled and SQLite is threadsafe. When the
+** [SQLITE_THREADSAFE] macro is 0,
+** the mutexes are omitted. Without the mutexes, it is not safe
+** to use SQLite concurrently from more than one thread.
+**
+** Enabling mutexes incurs a measurable performance penalty.
+** So if speed is of utmost importance, it makes sense to disable
+** the mutexes. But for maximum safety, mutexes should be enabled.
+** ^The default behavior is for mutexes to be enabled.
+**
+** This interface can be used by an application to make sure that the
+** version of SQLite that it is linking against was compiled with
+** the desired setting of the [SQLITE_THREADSAFE] macro.
+**
+** This interface only reports on the compile-time mutex setting
+** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with
+** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but
+** can be fully or partially disabled using a call to [sqlite3_config()]
+** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD],
+** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the
+** sqlite3_threadsafe() function shows only the compile-time setting of
+** thread safety, not any run-time changes to that setting made by
+** sqlite3_config(). In other words, the return value from sqlite3_threadsafe()
+** is unchanged by calls to sqlite3_config().)^
+**
+** See the [threading mode] documentation for additional information.
+*/
+SQLITE_API int sqlite3_threadsafe(void);
+
+/*
+** CAPI3REF: Database Connection Handle
+** KEYWORDS: {database connection} {database connections}
+**
+** Each open SQLite database is represented by a pointer to an instance of
+** the opaque structure named "sqlite3". It is useful to think of an sqlite3
+** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and
+** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()]
+** and [sqlite3_close_v2()] are its destructors. There are many other
+** interfaces (such as
+** [sqlite3_prepare_v2()], [sqlite3_create_function()], and
+** [sqlite3_busy_timeout()] to name but three) that are methods on an
+** sqlite3 object.
+*/
+typedef struct sqlite3 sqlite3;
+
+/*
+** CAPI3REF: 64-Bit Integer Types
+** KEYWORDS: sqlite_int64 sqlite_uint64
+**
+** Because there is no cross-platform way to specify 64-bit integer types
+** SQLite includes typedefs for 64-bit signed and unsigned integers.
+**
+** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions.
+** The sqlite_int64 and sqlite_uint64 types are supported for backwards
+** compatibility only.
+**
+** ^The sqlite3_int64 and sqlite_int64 types can store integer values
+** between -9223372036854775808 and +9223372036854775807 inclusive. ^The
+** sqlite3_uint64 and sqlite_uint64 types can store integer values
+** between 0 and +18446744073709551615 inclusive.
+*/
+#ifdef SQLITE_INT64_TYPE
+ typedef SQLITE_INT64_TYPE sqlite_int64;
+# ifdef SQLITE_UINT64_TYPE
+ typedef SQLITE_UINT64_TYPE sqlite_uint64;
+# else
+ typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# endif
+#elif defined(_MSC_VER) || defined(__BORLANDC__)
+ typedef __int64 sqlite_int64;
+ typedef unsigned __int64 sqlite_uint64;
+#else
+ typedef long long int sqlite_int64;
+ typedef unsigned long long int sqlite_uint64;
+#endif
+typedef sqlite_int64 sqlite3_int64;
+typedef sqlite_uint64 sqlite3_uint64;
+
+/*
+** If compiling for a processor that lacks floating point support,
+** substitute integer for floating-point.
+*/
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# define double sqlite3_int64
+#endif
+
+/*
+** CAPI3REF: Closing A Database Connection
+** DESTRUCTOR: sqlite3
+**
+** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors
+** for the [sqlite3] object.
+** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if
+** the [sqlite3] object is successfully destroyed and all associated
+** resources are deallocated.
+**
+** Ideally, applications should [sqlite3_finalize | finalize] all
+** [prepared statements], [sqlite3_blob_close | close] all [BLOB handles], and
+** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated
+** with the [sqlite3] object prior to attempting to close the object.
+** ^If the database connection is associated with unfinalized prepared
+** statements, BLOB handlers, and/or unfinished sqlite3_backup objects then
+** sqlite3_close() will leave the database connection open and return
+** [SQLITE_BUSY]. ^If sqlite3_close_v2() is called with unfinalized prepared
+** statements, unclosed BLOB handlers, and/or unfinished sqlite3_backups,
+** it returns [SQLITE_OK] regardless, but instead of deallocating the database
+** connection immediately, it marks the database connection as an unusable
+** "zombie" and makes arrangements to automatically deallocate the database
+** connection after all prepared statements are finalized, all BLOB handles
+** are closed, and all backups have finished. The sqlite3_close_v2() interface
+** is intended for use with host languages that are garbage collected, and
+** where the order in which destructors are called is arbitrary.
+**
+** ^If an [sqlite3] object is destroyed while a transaction is open,
+** the transaction is automatically rolled back.
+**
+** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)]
+** must be either a NULL
+** pointer or an [sqlite3] object pointer obtained
+** from [sqlite3_open()], [sqlite3_open16()], or
+** [sqlite3_open_v2()], and not previously closed.
+** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
+** argument is a harmless no-op.
+*/
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
+
+/*
+** The type for a callback function.
+** This is legacy and deprecated. It is included for historical
+** compatibility and is not documented.
+*/
+typedef int (*sqlite3_callback)(void*,int,char**, char**);
+
+/*
+** CAPI3REF: One-Step Query Execution Interface
+** METHOD: sqlite3
+**
+** The sqlite3_exec() interface is a convenience wrapper around
+** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()],
+** that allows an application to run multiple statements of SQL
+** without having to use a lot of C code.
+**
+** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded,
+** semicolon-separate SQL statements passed into its 2nd argument,
+** in the context of the [database connection] passed in as its 1st
+** argument. ^If the callback function of the 3rd argument to
+** sqlite3_exec() is not NULL, then it is invoked for each result row
+** coming out of the evaluated SQL statements. ^The 4th argument to
+** sqlite3_exec() is relayed through to the 1st argument of each
+** callback invocation. ^If the callback pointer to sqlite3_exec()
+** is NULL, then no callback is ever invoked and result rows are
+** ignored.
+**
+** ^If an error occurs while evaluating the SQL statements passed into
+** sqlite3_exec(), then execution of the current statement stops and
+** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec()
+** is not NULL then any error message is written into memory obtained
+** from [sqlite3_malloc()] and passed back through the 5th parameter.
+** To avoid memory leaks, the application should invoke [sqlite3_free()]
+** on error message strings returned through the 5th parameter of
+** sqlite3_exec() after the error message string is no longer needed.
+** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors
+** occur, then sqlite3_exec() sets the pointer in its 5th parameter to
+** NULL before returning.
+**
+** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec()
+** routine returns SQLITE_ABORT without invoking the callback again and
+** without running any subsequent SQL statements.
+**
+** ^The 2nd argument to the sqlite3_exec() callback function is the
+** number of columns in the result. ^The 3rd argument to the sqlite3_exec()
+** callback is an array of pointers to strings obtained as if from
+** [sqlite3_column_text()], one for each column. ^If an element of a
+** result row is NULL then the corresponding string pointer for the
+** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the
+** sqlite3_exec() callback is an array of pointers to strings where each
+** entry represents the name of corresponding result column as obtained
+** from [sqlite3_column_name()].
+**
+** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer
+** to an empty string, or a pointer that contains only whitespace and/or
+** SQL comments, then no SQL statements are evaluated and the database
+** is not changed.
+**
+** Restrictions:
+**
+** <ul>
+** <li> The application must ensure that the 1st parameter to sqlite3_exec()
+** is a valid and open [database connection].
+** <li> The application must not close the [database connection] specified by
+** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
+** <li> The application must not modify the SQL statement text passed into
+** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
+** </ul>
+*/
+SQLITE_API int sqlite3_exec(
+ sqlite3*, /* An open database */
+ const char *sql, /* SQL to be evaluated */
+ int (*callback)(void*,int,char**,char**), /* Callback function */
+ void *, /* 1st argument to callback */
+ char **errmsg /* Error msg written here */
+);
+
+/*
+** CAPI3REF: Result Codes
+** KEYWORDS: {result code definitions}
+**
+** Many SQLite functions return an integer result code from the set shown
+** here in order to indicate success or failure.
+**
+** New error codes may be added in future versions of SQLite.
+**
+** See also: [extended result code definitions]
+*/
+#define SQLITE_OK 0 /* Successful result */
+/* beginning-of-error-codes */
+#define SQLITE_ERROR 1 /* Generic error */
+#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */
+#define SQLITE_PERM 3 /* Access permission denied */
+#define SQLITE_ABORT 4 /* Callback routine requested an abort */
+#define SQLITE_BUSY 5 /* The database file is locked */
+#define SQLITE_LOCKED 6 /* A table in the database is locked */
+#define SQLITE_NOMEM 7 /* A malloc() failed */
+#define SQLITE_READONLY 8 /* Attempt to write a readonly database */
+#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/
+#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */
+#define SQLITE_CORRUPT 11 /* The database disk image is malformed */
+#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */
+#define SQLITE_FULL 13 /* Insertion failed because database is full */
+#define SQLITE_CANTOPEN 14 /* Unable to open the database file */
+#define SQLITE_PROTOCOL 15 /* Database lock protocol error */
+#define SQLITE_EMPTY 16 /* Internal use only */
+#define SQLITE_SCHEMA 17 /* The database schema changed */
+#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */
+#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */
+#define SQLITE_MISMATCH 20 /* Data type mismatch */
+#define SQLITE_MISUSE 21 /* Library used incorrectly */
+#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */
+#define SQLITE_AUTH 23 /* Authorization denied */
+#define SQLITE_FORMAT 24 /* Not used */
+#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */
+#define SQLITE_NOTADB 26 /* File opened that is not a database file */
+#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */
+#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */
+#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
+#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
+/* end-of-error-codes */
+
+/*
+** CAPI3REF: Extended Result Codes
+** KEYWORDS: {extended result code definitions}
+**
+** In its default configuration, SQLite API routines return one of 30 integer
+** [result codes]. However, experience has shown that many of
+** these result codes are too coarse-grained. They do not provide as
+** much information about problems as programmers might like. In an effort to
+** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8]
+** and later) include
+** support for additional result codes that provide more detailed information
+** about errors. These [extended result codes] are enabled or disabled
+** on a per database connection basis using the
+** [sqlite3_extended_result_codes()] API. Or, the extended code for
+** the most recent error can be obtained using
+** [sqlite3_extended_errcode()].
+*/
+#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8))
+#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8))
+#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8))
+#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
+#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
+#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
+#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8))
+#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8))
+#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8))
+#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8))
+#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8))
+#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8))
+#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8))
+#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8))
+#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8))
+#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8))
+#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8))
+#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8))
+#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8))
+#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8))
+#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8))
+#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8))
+#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8))
+#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8))
+#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8))
+#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8))
+#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
+#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8))
+#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8))
+#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8))
+#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8))
+#define SQLITE_IOERR_BEGIN_ATOMIC (SQLITE_IOERR | (29<<8))
+#define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8))
+#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
+#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
+#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
+#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
+#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
+#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8))
+#define SQLITE_BUSY_TIMEOUT (SQLITE_BUSY | (3<<8))
+#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8))
+#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
+#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
+#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
+#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */
+#define SQLITE_CANTOPEN_SYMLINK (SQLITE_CANTOPEN | (6<<8))
+#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
+#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8))
+#define SQLITE_CORRUPT_INDEX (SQLITE_CORRUPT | (3<<8))
+#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
+#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8))
+#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8))
+#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8))
+#define SQLITE_READONLY_CANTINIT (SQLITE_READONLY | (5<<8))
+#define SQLITE_READONLY_DIRECTORY (SQLITE_READONLY | (6<<8))
+#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8))
+#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8))
+#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8))
+#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8))
+#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8))
+#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8))
+#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8))
+#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8))
+#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8))
+#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8))
+#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8))
+#define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8))
+#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
+#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
+#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
+#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
+#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))
+#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8))
+
+/*
+** CAPI3REF: Flags For File Open Operations
+**
+** These bit values are intended for use in the
+** 3rd parameter to the [sqlite3_open_v2()] interface and
+** in the 4th parameter to the [sqlite3_vfs.xOpen] method.
+*/
+#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */
+#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */
+#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */
+#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */
+#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */
+#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */
+#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */
+#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */
+#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */
+#define SQLITE_OPEN_SUPER_JOURNAL 0x00004000 /* VFS only */
+#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */
+#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */
+#define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */
+
+/* Reserved: 0x00F00000 */
+/* Legacy compatibility: */
+#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */
+
+
+/*
+** CAPI3REF: Device Characteristics
+**
+** The xDeviceCharacteristics method of the [sqlite3_io_methods]
+** object returns an integer which is a vector of these
+** bit values expressing I/O characteristics of the mass storage
+** device that holds the file that the [sqlite3_io_methods]
+** refers to.
+**
+** The SQLITE_IOCAP_ATOMIC property means that all writes of
+** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
+** mean that writes of blocks that are nnn bytes in size and
+** are aligned to an address which is an integer multiple of
+** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
+** that when data is appended to a file, the data is appended
+** first then the size of the file is extended, never the other
+** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
+** information is written to disk in the same order as calls
+** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that
+** after reboot following a crash or power loss, the only bytes in a
+** file that were written at the application level might have changed
+** and that adjacent bytes, even bytes within the same sector are
+** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
+** flag indicates that a file cannot be deleted when open. The
+** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
+** read-only media and cannot be changed even by processes with
+** elevated privileges.
+**
+** The SQLITE_IOCAP_BATCH_ATOMIC property means that the underlying
+** filesystem supports doing multiple write operations atomically when those
+** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and
+** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE].
+*/
+#define SQLITE_IOCAP_ATOMIC 0x00000001
+#define SQLITE_IOCAP_ATOMIC512 0x00000002
+#define SQLITE_IOCAP_ATOMIC1K 0x00000004
+#define SQLITE_IOCAP_ATOMIC2K 0x00000008
+#define SQLITE_IOCAP_ATOMIC4K 0x00000010
+#define SQLITE_IOCAP_ATOMIC8K 0x00000020
+#define SQLITE_IOCAP_ATOMIC16K 0x00000040
+#define SQLITE_IOCAP_ATOMIC32K 0x00000080
+#define SQLITE_IOCAP_ATOMIC64K 0x00000100
+#define SQLITE_IOCAP_SAFE_APPEND 0x00000200
+#define SQLITE_IOCAP_SEQUENTIAL 0x00000400
+#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800
+#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000
+#define SQLITE_IOCAP_IMMUTABLE 0x00002000
+#define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000
+
+/*
+** CAPI3REF: File Locking Levels
+**
+** SQLite uses one of these integer values as the second
+** argument to calls it makes to the xLock() and xUnlock() methods
+** of an [sqlite3_io_methods] object.
+*/
+#define SQLITE_LOCK_NONE 0
+#define SQLITE_LOCK_SHARED 1
+#define SQLITE_LOCK_RESERVED 2
+#define SQLITE_LOCK_PENDING 3
+#define SQLITE_LOCK_EXCLUSIVE 4
+
+/*
+** CAPI3REF: Synchronization Type Flags
+**
+** When SQLite invokes the xSync() method of an
+** [sqlite3_io_methods] object it uses a combination of
+** these integer values as the second argument.
+**
+** When the SQLITE_SYNC_DATAONLY flag is used, it means that the
+** sync operation only needs to flush data to mass storage. Inode
+** information need not be flushed. If the lower four bits of the flag
+** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics.
+** If the lower four bits equal SQLITE_SYNC_FULL, that means
+** to use Mac OS X style fullsync instead of fsync().
+**
+** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags
+** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL
+** settings. The [synchronous pragma] determines when calls to the
+** xSync VFS method occur and applies uniformly across all platforms.
+** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how
+** energetic or rigorous or forceful the sync operations are and
+** only make a difference on Mac OSX for the default SQLite code.
+** (Third-party VFS implementations might also make the distinction
+** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the
+** operating systems natively supported by SQLite, only Mac OSX
+** cares about the difference.)
+*/
+#define SQLITE_SYNC_NORMAL 0x00002
+#define SQLITE_SYNC_FULL 0x00003
+#define SQLITE_SYNC_DATAONLY 0x00010
+
+/*
+** CAPI3REF: OS Interface Open File Handle
+**
+** An [sqlite3_file] object represents an open file in the
+** [sqlite3_vfs | OS interface layer]. Individual OS interface
+** implementations will
+** want to subclass this object by appending additional fields
+** for their own use. The pMethods entry is a pointer to an
+** [sqlite3_io_methods] object that defines methods for performing
+** I/O operations on the open file.
+*/
+typedef struct sqlite3_file sqlite3_file;
+struct sqlite3_file {
+ const struct sqlite3_io_methods *pMethods; /* Methods for an open file */
+};
+
+/*
+** CAPI3REF: OS Interface File Virtual Methods Object
+**
+** Every file opened by the [sqlite3_vfs.xOpen] method populates an
+** [sqlite3_file] object (or, more commonly, a subclass of the
+** [sqlite3_file] object) with a pointer to an instance of this object.
+** This object defines the methods used to perform various operations
+** against the open file represented by the [sqlite3_file] object.
+**
+** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element
+** to a non-NULL pointer, then the sqlite3_io_methods.xClose method
+** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The
+** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen]
+** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element
+** to NULL.
+**
+** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or
+** [SQLITE_SYNC_FULL]. The first choice is the normal fsync().
+** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY]
+** flag may be ORed in to indicate that only the data of the file
+** and not its inode needs to be synced.
+**
+** The integer values to xLock() and xUnlock() are one of
+** <ul>
+** <li> [SQLITE_LOCK_NONE],
+** <li> [SQLITE_LOCK_SHARED],
+** <li> [SQLITE_LOCK_RESERVED],
+** <li> [SQLITE_LOCK_PENDING], or
+** <li> [SQLITE_LOCK_EXCLUSIVE].
+** </ul>
+** xLock() increases the lock. xUnlock() decreases the lock.
+** The xCheckReservedLock() method checks whether any database connection,
+** either in this process or in some other process, is holding a RESERVED,
+** PENDING, or EXCLUSIVE lock on the file. It returns true
+** if such a lock exists and false otherwise.
+**
+** The xFileControl() method is a generic interface that allows custom
+** VFS implementations to directly control an open file using the
+** [sqlite3_file_control()] interface. The second "op" argument is an
+** integer opcode. The third argument is a generic pointer intended to
+** point to a structure that may contain arguments or space in which to
+** write return values. Potential uses for xFileControl() might be
+** functions to enable blocking locks with timeouts, to change the
+** locking strategy (for example to use dot-file locks), to inquire
+** about the status of a lock, or to break stale locks. The SQLite
+** core reserves all opcodes less than 100 for its own use.
+** A [file control opcodes | list of opcodes] less than 100 is available.
+** Applications that define a custom xFileControl method should use opcodes
+** greater than 100 to avoid conflicts. VFS implementations should
+** return [SQLITE_NOTFOUND] for file control opcodes that they do not
+** recognize.
+**
+** The xSectorSize() method returns the sector size of the
+** device that underlies the file. The sector size is the
+** minimum write that can be performed without disturbing
+** other bytes in the file. The xDeviceCharacteristics()
+** method returns a bit vector describing behaviors of the
+** underlying device:
+**
+** <ul>
+** <li> [SQLITE_IOCAP_ATOMIC]
+** <li> [SQLITE_IOCAP_ATOMIC512]
+** <li> [SQLITE_IOCAP_ATOMIC1K]
+** <li> [SQLITE_IOCAP_ATOMIC2K]
+** <li> [SQLITE_IOCAP_ATOMIC4K]
+** <li> [SQLITE_IOCAP_ATOMIC8K]
+** <li> [SQLITE_IOCAP_ATOMIC16K]
+** <li> [SQLITE_IOCAP_ATOMIC32K]
+** <li> [SQLITE_IOCAP_ATOMIC64K]
+** <li> [SQLITE_IOCAP_SAFE_APPEND]
+** <li> [SQLITE_IOCAP_SEQUENTIAL]
+** <li> [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN]
+** <li> [SQLITE_IOCAP_POWERSAFE_OVERWRITE]
+** <li> [SQLITE_IOCAP_IMMUTABLE]
+** <li> [SQLITE_IOCAP_BATCH_ATOMIC]
+** </ul>
+**
+** The SQLITE_IOCAP_ATOMIC property means that all writes of
+** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
+** mean that writes of blocks that are nnn bytes in size and
+** are aligned to an address which is an integer multiple of
+** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
+** that when data is appended to a file, the data is appended
+** first then the size of the file is extended, never the other
+** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
+** information is written to disk in the same order as calls
+** to xWrite().
+**
+** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill
+** in the unread portions of the buffer with zeros. A VFS that
+** fails to zero-fill short reads might seem to work. However,
+** failure to zero-fill short reads will eventually lead to
+** database corruption.
+*/
+typedef struct sqlite3_io_methods sqlite3_io_methods;
+struct sqlite3_io_methods {
+ int iVersion;
+ int (*xClose)(sqlite3_file*);
+ int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
+ int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
+ int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
+ int (*xSync)(sqlite3_file*, int flags);
+ int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
+ int (*xLock)(sqlite3_file*, int);
+ int (*xUnlock)(sqlite3_file*, int);
+ int (*xCheckReservedLock)(sqlite3_file*, int *pResOut);
+ int (*xFileControl)(sqlite3_file*, int op, void *pArg);
+ int (*xSectorSize)(sqlite3_file*);
+ int (*xDeviceCharacteristics)(sqlite3_file*);
+ /* Methods above are valid for version 1 */
+ int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**);
+ int (*xShmLock)(sqlite3_file*, int offset, int n, int flags);
+ void (*xShmBarrier)(sqlite3_file*);
+ int (*xShmUnmap)(sqlite3_file*, int deleteFlag);
+ /* Methods above are valid for version 2 */
+ int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
+ int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p);
+ /* Methods above are valid for version 3 */
+ /* Additional methods may be added in future releases */
+};
+
+/*
+** CAPI3REF: Standard File Control Opcodes
+** KEYWORDS: {file control opcodes} {file control opcode}
+**
+** These integer constants are opcodes for the xFileControl method
+** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()]
+** interface.
+**
+** <ul>
+** <li>[[SQLITE_FCNTL_LOCKSTATE]]
+** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This
+** opcode causes the xFileControl method to write the current state of
+** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
+** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
+** into an integer that the pArg argument points to. This capability
+** is used during testing and is only available when the SQLITE_TEST
+** compile-time option is used.
+**
+** <li>[[SQLITE_FCNTL_SIZE_HINT]]
+** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
+** layer a hint of how large the database file will grow to be during the
+** current transaction. This hint is not guaranteed to be accurate but it
+** is often close. The underlying VFS might choose to preallocate database
+** file space based on this hint in order to help writes to the database
+** file run faster.
+**
+** <li>[[SQLITE_FCNTL_SIZE_LIMIT]]
+** The [SQLITE_FCNTL_SIZE_LIMIT] opcode is used by in-memory VFS that
+** implements [sqlite3_deserialize()] to set an upper bound on the size
+** of the in-memory database. The argument is a pointer to a [sqlite3_int64].
+** If the integer pointed to is negative, then it is filled in with the
+** current limit. Otherwise the limit is set to the larger of the value
+** of the integer pointed to and the current database size. The integer
+** pointed to is set to the new limit.
+**
+** <li>[[SQLITE_FCNTL_CHUNK_SIZE]]
+** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS
+** extends and truncates the database file in chunks of a size specified
+** by the user. The fourth argument to [sqlite3_file_control()] should
+** point to an integer (type int) containing the new chunk-size to use
+** for the nominated database. Allocating database file space in large
+** chunks (say 1MB at a time), may reduce file-system fragmentation and
+** improve performance on some systems.
+**
+** <li>[[SQLITE_FCNTL_FILE_POINTER]]
+** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer
+** to the [sqlite3_file] object associated with a particular database
+** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER].
+**
+** <li>[[SQLITE_FCNTL_JOURNAL_POINTER]]
+** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer
+** to the [sqlite3_file] object associated with the journal file (either
+** the [rollback journal] or the [write-ahead log]) for a particular database
+** connection. See also [SQLITE_FCNTL_FILE_POINTER].
+**
+** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
+** No longer in use.
+**
+** <li>[[SQLITE_FCNTL_SYNC]]
+** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
+** sent to the VFS immediately before the xSync method is invoked on a
+** database file descriptor. Or, if the xSync method is not invoked
+** because the user has configured SQLite with
+** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place
+** of the xSync method. In most cases, the pointer argument passed with
+** this file-control is NULL. However, if the database file is being synced
+** as part of a multi-database commit, the argument points to a nul-terminated
+** string containing the transactions super-journal file name. VFSes that
+** do not need this signal should silently ignore this opcode. Applications
+** should not call [sqlite3_file_control()] with this opcode as doing so may
+** disrupt the operation of the specialized VFSes that do require it.
+**
+** <li>[[SQLITE_FCNTL_COMMIT_PHASETWO]]
+** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite
+** and sent to the VFS after a transaction has been committed immediately
+** but before the database is unlocked. VFSes that do not need this signal
+** should silently ignore this opcode. Applications should not call
+** [sqlite3_file_control()] with this opcode as doing so may disrupt the
+** operation of the specialized VFSes that do require it.
+**
+** <li>[[SQLITE_FCNTL_WIN32_AV_RETRY]]
+** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic
+** retry counts and intervals for certain disk I/O operations for the
+** windows [VFS] in order to provide robustness in the presence of
+** anti-virus programs. By default, the windows VFS will retry file read,
+** file write, and file delete operations up to 10 times, with a delay
+** of 25 milliseconds before the first retry and with the delay increasing
+** by an additional 25 milliseconds with each subsequent retry. This
+** opcode allows these two values (10 retries and 25 milliseconds of delay)
+** to be adjusted. The values are changed for all database connections
+** within the same process. The argument is a pointer to an array of two
+** integers where the first integer is the new retry count and the second
+** integer is the delay. If either integer is negative, then the setting
+** is not changed but instead the prior value of that setting is written
+** into the array entry, allowing the current retry settings to be
+** interrogated. The zDbName parameter is ignored.
+**
+** <li>[[SQLITE_FCNTL_PERSIST_WAL]]
+** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the
+** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary
+** write ahead log ([WAL file]) and shared memory
+** files used for transaction control
+** are automatically deleted when the latest connection to the database
+** closes. Setting persistent WAL mode causes those files to persist after
+** close. Persisting the files is useful when other processes that do not
+** have write permission on the directory containing the database file want
+** to read the database file, as the WAL and shared memory files must exist
+** in order for the database to be readable. The fourth parameter to
+** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
+** That integer is 0 to disable persistent WAL mode or 1 to enable persistent
+** WAL mode. If the integer is -1, then it is overwritten with the current
+** WAL persistence setting.
+**
+** <li>[[SQLITE_FCNTL_POWERSAFE_OVERWRITE]]
+** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the
+** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting
+** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the
+** xDeviceCharacteristics methods. The fourth parameter to
+** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
+** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage
+** mode. If the integer is -1, then it is overwritten with the current
+** zero-damage mode setting.
+**
+** <li>[[SQLITE_FCNTL_OVERWRITE]]
+** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening
+** a write transaction to indicate that, unless it is rolled back for some
+** reason, the entire database file will be overwritten by the current
+** transaction. This is used by VACUUM operations.
+**
+** <li>[[SQLITE_FCNTL_VFSNAME]]
+** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of
+** all [VFSes] in the VFS stack. The names are of all VFS shims and the
+** final bottom-level VFS are written into memory obtained from
+** [sqlite3_malloc()] and the result is stored in the char* variable
+** that the fourth parameter of [sqlite3_file_control()] points to.
+** The caller is responsible for freeing the memory when done. As with
+** all file-control actions, there is no guarantee that this will actually
+** do anything. Callers should initialize the char* variable to a NULL
+** pointer in case this file-control is not implemented. This file-control
+** is intended for diagnostic use only.
+**
+** <li>[[SQLITE_FCNTL_VFS_POINTER]]
+** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level
+** [VFSes] currently in use. ^(The argument X in
+** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be
+** of type "[sqlite3_vfs] **". This opcodes will set *X
+** to a pointer to the top-level VFS.)^
+** ^When there are multiple VFS shims in the stack, this opcode finds the
+** upper-most shim only.
+**
+** <li>[[SQLITE_FCNTL_PRAGMA]]
+** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA]
+** file control is sent to the open [sqlite3_file] object corresponding
+** to the database file to which the pragma statement refers. ^The argument
+** to the [SQLITE_FCNTL_PRAGMA] file control is an array of
+** pointers to strings (char**) in which the second element of the array
+** is the name of the pragma and the third element is the argument to the
+** pragma or NULL if the pragma has no argument. ^The handler for an
+** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element
+** of the char** argument point to a string obtained from [sqlite3_mprintf()]
+** or the equivalent and that string will become the result of the pragma or
+** the error message if the pragma fails. ^If the
+** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal
+** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA]
+** file control returns [SQLITE_OK], then the parser assumes that the
+** VFS has handled the PRAGMA itself and the parser generates a no-op
+** prepared statement if result string is NULL, or that returns a copy
+** of the result string if the string is non-NULL.
+** ^If the [SQLITE_FCNTL_PRAGMA] file control returns
+** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means
+** that the VFS encountered an error while handling the [PRAGMA] and the
+** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA]
+** file control occurs at the beginning of pragma statement analysis and so
+** it is able to override built-in [PRAGMA] statements.
+**
+** <li>[[SQLITE_FCNTL_BUSYHANDLER]]
+** ^The [SQLITE_FCNTL_BUSYHANDLER]
+** file-control may be invoked by SQLite on the database file handle
+** shortly after it is opened in order to provide a custom VFS with access
+** to the connection's busy-handler callback. The argument is of type (void**)
+** - an array of two (void *) values. The first (void *) actually points
+** to a function of type (int (*)(void *)). In order to invoke the connection's
+** busy-handler, this function should be invoked with the second (void *) in
+** the array as the only argument. If it returns non-zero, then the operation
+** should be retried. If it returns zero, the custom VFS should abandon the
+** current operation.
+**
+** <li>[[SQLITE_FCNTL_TEMPFILENAME]]
+** ^Applications can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control
+** to have SQLite generate a
+** temporary filename using the same algorithm that is followed to generate
+** temporary filenames for TEMP tables and other internal uses. The
+** argument should be a char** which will be filled with the filename
+** written into memory obtained from [sqlite3_malloc()]. The caller should
+** invoke [sqlite3_free()] on the result to avoid a memory leak.
+**
+** <li>[[SQLITE_FCNTL_MMAP_SIZE]]
+** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the
+** maximum number of bytes that will be used for memory-mapped I/O.
+** The argument is a pointer to a value of type sqlite3_int64 that
+** is an advisory maximum number of bytes in the file to memory map. The
+** pointer is overwritten with the old value. The limit is not changed if
+** the value originally pointed to is negative, and so the current limit
+** can be queried by passing in a pointer to a negative number. This
+** file-control is used internally to implement [PRAGMA mmap_size].
+**
+** <li>[[SQLITE_FCNTL_TRACE]]
+** The [SQLITE_FCNTL_TRACE] file control provides advisory information
+** to the VFS about what the higher layers of the SQLite stack are doing.
+** This file control is used by some VFS activity tracing [shims].
+** The argument is a zero-terminated string. Higher layers in the
+** SQLite stack may generate instances of this file control if
+** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled.
+**
+** <li>[[SQLITE_FCNTL_HAS_MOVED]]
+** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a
+** pointer to an integer and it writes a boolean into that integer depending
+** on whether or not the file has been renamed, moved, or deleted since it
+** was first opened.
+**
+** <li>[[SQLITE_FCNTL_WIN32_GET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the
+** underlying native file handle associated with a file handle. This file
+** control interprets its argument as a pointer to a native file handle and
+** writes the resulting value there.
+**
+** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
+** opcode causes the xFileControl method to swap the file handle with the one
+** pointed to by the pArg argument. This capability is used during testing
+** and only needs to be supported when SQLITE_TEST is defined.
+**
+** <li>[[SQLITE_FCNTL_WAL_BLOCK]]
+** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might
+** be advantageous to block on the next WAL lock if the lock is not immediately
+** available. The WAL subsystem issues this signal during rare
+** circumstances in order to fix a problem with priority inversion.
+** Applications should <em>not</em> use this file-control.
+**
+** <li>[[SQLITE_FCNTL_ZIPVFS]]
+** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other
+** VFS should return SQLITE_NOTFOUND for this opcode.
+**
+** <li>[[SQLITE_FCNTL_RBU]]
+** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by
+** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for
+** this opcode.
+**
+** <li>[[SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]]
+** If the [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] opcode returns SQLITE_OK, then
+** the file descriptor is placed in "batch write mode", which
+** means all subsequent write operations will be deferred and done
+** atomically at the next [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. Systems
+** that do not support batch atomic writes will return SQLITE_NOTFOUND.
+** ^Following a successful SQLITE_FCNTL_BEGIN_ATOMIC_WRITE and prior to
+** the closing [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] or
+** [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE], SQLite will make
+** no VFS interface calls on the same [sqlite3_file] file descriptor
+** except for calls to the xWrite method and the xFileControl method
+** with [SQLITE_FCNTL_SIZE_HINT].
+**
+** <li>[[SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]]
+** The [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] opcode causes all write
+** operations since the previous successful call to
+** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be performed atomically.
+** This file control returns [SQLITE_OK] if and only if the writes were
+** all performed successfully and have been committed to persistent storage.
+** ^Regardless of whether or not it is successful, this file control takes
+** the file descriptor out of batch write mode so that all subsequent
+** write operations are independent.
+** ^SQLite will never invoke SQLITE_FCNTL_COMMIT_ATOMIC_WRITE without
+** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE].
+**
+** <li>[[SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE]]
+** The [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE] opcode causes all write
+** operations since the previous successful call to
+** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be rolled back.
+** ^This file control takes the file descriptor out of batch write mode
+** so that all subsequent write operations are independent.
+** ^SQLite will never invoke SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE without
+** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE].
+**
+** <li>[[SQLITE_FCNTL_LOCK_TIMEOUT]]
+** The [SQLITE_FCNTL_LOCK_TIMEOUT] opcode is used to configure a VFS
+** to block for up to M milliseconds before failing when attempting to
+** obtain a file lock using the xLock or xShmLock methods of the VFS.
+** The parameter is a pointer to a 32-bit signed integer that contains
+** the value that M is to be set to. Before returning, the 32-bit signed
+** integer is overwritten with the previous value of M.
+**
+** <li>[[SQLITE_FCNTL_DATA_VERSION]]
+** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to
+** a database file. The argument is a pointer to a 32-bit unsigned integer.
+** The "data version" for the pager is written into the pointer. The
+** "data version" changes whenever any change occurs to the corresponding
+** database file, either through SQL statements on the same database
+** connection or through transactions committed by separate database
+** connections possibly in other processes. The [sqlite3_total_changes()]
+** interface can be used to find if any database on the connection has changed,
+** but that interface responds to changes on TEMP as well as MAIN and does
+** not provide a mechanism to detect changes to MAIN only. Also, the
+** [sqlite3_total_changes()] interface responds to internal changes only and
+** omits changes made by other database connections. The
+** [PRAGMA data_version] command provides a mechanism to detect changes to
+** a single attached database that occur due to other database connections,
+** but omits changes implemented by the database connection on which it is
+** called. This file control is the only mechanism to detect changes that
+** happen either internally or externally and that are associated with
+** a particular attached database.
+**
+** <li>[[SQLITE_FCNTL_CKPT_START]]
+** The [SQLITE_FCNTL_CKPT_START] opcode is invoked from within a checkpoint
+** in wal mode before the client starts to copy pages from the wal
+** file to the database file.
+**
+** <li>[[SQLITE_FCNTL_CKPT_DONE]]
+** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint
+** in wal mode after the client has finished copying pages from the wal
+** file to the database file, but before the *-shm file is updated to
+** record the fact that the pages have been checkpointed.
+** </ul>
+*/
+#define SQLITE_FCNTL_LOCKSTATE 1
+#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2
+#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3
+#define SQLITE_FCNTL_LAST_ERRNO 4
+#define SQLITE_FCNTL_SIZE_HINT 5
+#define SQLITE_FCNTL_CHUNK_SIZE 6
+#define SQLITE_FCNTL_FILE_POINTER 7
+#define SQLITE_FCNTL_SYNC_OMITTED 8
+#define SQLITE_FCNTL_WIN32_AV_RETRY 9
+#define SQLITE_FCNTL_PERSIST_WAL 10
+#define SQLITE_FCNTL_OVERWRITE 11
+#define SQLITE_FCNTL_VFSNAME 12
+#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13
+#define SQLITE_FCNTL_PRAGMA 14
+#define SQLITE_FCNTL_BUSYHANDLER 15
+#define SQLITE_FCNTL_TEMPFILENAME 16
+#define SQLITE_FCNTL_MMAP_SIZE 18
+#define SQLITE_FCNTL_TRACE 19
+#define SQLITE_FCNTL_HAS_MOVED 20
+#define SQLITE_FCNTL_SYNC 21
+#define SQLITE_FCNTL_COMMIT_PHASETWO 22
+#define SQLITE_FCNTL_WIN32_SET_HANDLE 23
+#define SQLITE_FCNTL_WAL_BLOCK 24
+#define SQLITE_FCNTL_ZIPVFS 25
+#define SQLITE_FCNTL_RBU 26
+#define SQLITE_FCNTL_VFS_POINTER 27
+#define SQLITE_FCNTL_JOURNAL_POINTER 28
+#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
+#define SQLITE_FCNTL_PDB 30
+#define SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31
+#define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32
+#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
+#define SQLITE_FCNTL_LOCK_TIMEOUT 34
+#define SQLITE_FCNTL_DATA_VERSION 35
+#define SQLITE_FCNTL_SIZE_LIMIT 36
+#define SQLITE_FCNTL_CKPT_DONE 37
+#define SQLITE_FCNTL_RESERVE_BYTES 38
+#define SQLITE_FCNTL_CKPT_START 39
+
+/* deprecated names */
+#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
+#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE
+#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO
+
+
+/*
+** CAPI3REF: Mutex Handle
+**
+** The mutex module within SQLite defines [sqlite3_mutex] to be an
+** abstract type for a mutex object. The SQLite core never looks
+** at the internal representation of an [sqlite3_mutex]. It only
+** deals with pointers to the [sqlite3_mutex] object.
+**
+** Mutexes are created using [sqlite3_mutex_alloc()].
+*/
+typedef struct sqlite3_mutex sqlite3_mutex;
+
+/*
+** CAPI3REF: Loadable Extension Thunk
+**
+** A pointer to the opaque sqlite3_api_routines structure is passed as
+** the third parameter to entry points of [loadable extensions]. This
+** structure must be typedefed in order to work around compiler warnings
+** on some platforms.
+*/
+typedef struct sqlite3_api_routines sqlite3_api_routines;
+
+/*
+** CAPI3REF: OS Interface Object
+**
+** An instance of the sqlite3_vfs object defines the interface between
+** the SQLite core and the underlying operating system. The "vfs"
+** in the name of the object stands for "virtual file system". See
+** the [VFS | VFS documentation] for further information.
+**
+** The VFS interface is sometimes extended by adding new methods onto
+** the end. Each time such an extension occurs, the iVersion field
+** is incremented. The iVersion value started out as 1 in
+** SQLite [version 3.5.0] on [dateof:3.5.0], then increased to 2
+** with SQLite [version 3.7.0] on [dateof:3.7.0], and then increased
+** to 3 with SQLite [version 3.7.6] on [dateof:3.7.6]. Additional fields
+** may be appended to the sqlite3_vfs object and the iVersion value
+** may increase again in future versions of SQLite.
+** Note that due to an oversight, the structure
+** of the sqlite3_vfs object changed in the transition from
+** SQLite [version 3.5.9] to [version 3.6.0] on [dateof:3.6.0]
+** and yet the iVersion field was not increased.
+**
+** The szOsFile field is the size of the subclassed [sqlite3_file]
+** structure used by this VFS. mxPathname is the maximum length of
+** a pathname in this VFS.
+**
+** Registered sqlite3_vfs objects are kept on a linked list formed by
+** the pNext pointer. The [sqlite3_vfs_register()]
+** and [sqlite3_vfs_unregister()] interfaces manage this list
+** in a thread-safe way. The [sqlite3_vfs_find()] interface
+** searches the list. Neither the application code nor the VFS
+** implementation should use the pNext pointer.
+**
+** The pNext field is the only field in the sqlite3_vfs
+** structure that SQLite will ever modify. SQLite will only access
+** or modify this field while holding a particular static mutex.
+** The application should never modify anything within the sqlite3_vfs
+** object once the object has been registered.
+**
+** The zName field holds the name of the VFS module. The name must
+** be unique across all VFS modules.
+**
+** [[sqlite3_vfs.xOpen]]
+** ^SQLite guarantees that the zFilename parameter to xOpen
+** is either a NULL pointer or string obtained
+** from xFullPathname() with an optional suffix added.
+** ^If a suffix is added to the zFilename parameter, it will
+** consist of a single "-" character followed by no more than
+** 11 alphanumeric and/or "-" characters.
+** ^SQLite further guarantees that
+** the string will be valid and unchanged until xClose() is
+** called. Because of the previous sentence,
+** the [sqlite3_file] can safely store a pointer to the
+** filename if it needs to remember the filename for some reason.
+** If the zFilename parameter to xOpen is a NULL pointer then xOpen
+** must invent its own temporary name for the file. ^Whenever the
+** xFilename parameter is NULL it will also be the case that the
+** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE].
+**
+** The flags argument to xOpen() includes all bits set in
+** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()]
+** or [sqlite3_open16()] is used, then flags includes at least
+** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE].
+** If xOpen() opens a file read-only then it sets *pOutFlags to
+** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set.
+**
+** ^(SQLite will also add one of the following flags to the xOpen()
+** call, depending on the object being opened:
+**
+** <ul>
+** <li> [SQLITE_OPEN_MAIN_DB]
+** <li> [SQLITE_OPEN_MAIN_JOURNAL]
+** <li> [SQLITE_OPEN_TEMP_DB]
+** <li> [SQLITE_OPEN_TEMP_JOURNAL]
+** <li> [SQLITE_OPEN_TRANSIENT_DB]
+** <li> [SQLITE_OPEN_SUBJOURNAL]
+** <li> [SQLITE_OPEN_SUPER_JOURNAL]
+** <li> [SQLITE_OPEN_WAL]
+** </ul>)^
+**
+** The file I/O implementation can use the object type flags to
+** change the way it deals with files. For example, an application
+** that does not care about crash recovery or rollback might make
+** the open of a journal file a no-op. Writes to this journal would
+** also be no-ops, and any attempt to read the journal would return
+** SQLITE_IOERR. Or the implementation might recognize that a database
+** file will be doing page-aligned sector reads and writes in a random
+** order and set up its I/O subsystem accordingly.
+**
+** SQLite might also add one of the following flags to the xOpen method:
+**
+** <ul>
+** <li> [SQLITE_OPEN_DELETEONCLOSE]
+** <li> [SQLITE_OPEN_EXCLUSIVE]
+** </ul>
+**
+** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be
+** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE]
+** will be set for TEMP databases and their journals, transient
+** databases, and subjournals.
+**
+** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction
+** with the [SQLITE_OPEN_CREATE] flag, which are both directly
+** analogous to the O_EXCL and O_CREAT flags of the POSIX open()
+** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the
+** SQLITE_OPEN_CREATE, is used to indicate that file should always
+** be created, and that it is an error if it already exists.
+** It is <i>not</i> used to indicate the file should be opened
+** for exclusive access.
+**
+** ^At least szOsFile bytes of memory are allocated by SQLite
+** to hold the [sqlite3_file] structure passed as the third
+** argument to xOpen. The xOpen method does not have to
+** allocate the structure; it should just fill it in. Note that
+** the xOpen method must set the sqlite3_file.pMethods to either
+** a valid [sqlite3_io_methods] object or to NULL. xOpen must do
+** this even if the open fails. SQLite expects that the sqlite3_file.pMethods
+** element will be valid after xOpen returns regardless of the success
+** or failure of the xOpen call.
+**
+** [[sqlite3_vfs.xAccess]]
+** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
+** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
+** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
+** to test whether a file is at least readable. The SQLITE_ACCESS_READ
+** flag is never actually used and is not implemented in the built-in
+** VFSes of SQLite. The file is named by the second argument and can be a
+** directory. The xAccess method returns [SQLITE_OK] on success or some
+** non-zero error code if there is an I/O error or if the name of
+** the file given in the second argument is illegal. If SQLITE_OK
+** is returned, then non-zero or zero is written into *pResOut to indicate
+** whether or not the file is accessible.
+**
+** ^SQLite will always allocate at least mxPathname+1 bytes for the
+** output buffer xFullPathname. The exact size of the output buffer
+** is also passed as a parameter to both methods. If the output buffer
+** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is
+** handled as a fatal error by SQLite, vfs implementations should endeavor
+** to prevent this by setting mxPathname to a sufficiently large value.
+**
+** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64()
+** interfaces are not strictly a part of the filesystem, but they are
+** included in the VFS structure for completeness.
+** The xRandomness() function attempts to return nBytes bytes
+** of good-quality randomness into zOut. The return value is
+** the actual number of bytes of randomness obtained.
+** The xSleep() method causes the calling thread to sleep for at
+** least the number of microseconds given. ^The xCurrentTime()
+** method returns a Julian Day Number for the current date and time as
+** a floating point value.
+** ^The xCurrentTimeInt64() method returns, as an integer, the Julian
+** Day Number multiplied by 86400000 (the number of milliseconds in
+** a 24-hour day).
+** ^SQLite will use the xCurrentTimeInt64() method to get the current
+** date and time if that method is available (if iVersion is 2 or
+** greater and the function pointer is not NULL) and will fall back
+** to xCurrentTime() if xCurrentTimeInt64() is unavailable.
+**
+** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces
+** are not used by the SQLite core. These optional interfaces are provided
+** by some VFSes to facilitate testing of the VFS code. By overriding
+** system calls with functions under its control, a test program can
+** simulate faults and error conditions that would otherwise be difficult
+** or impossible to induce. The set of system calls that can be overridden
+** varies from one VFS to another, and from one version of the same VFS to the
+** next. Applications that use these interfaces must be prepared for any
+** or all of these interfaces to be NULL or for their behavior to change
+** from one release to the next. Applications must not attempt to access
+** any of these methods if the iVersion of the VFS is less than 3.
+*/
+typedef struct sqlite3_vfs sqlite3_vfs;
+typedef void (*sqlite3_syscall_ptr)(void);
+struct sqlite3_vfs {
+ int iVersion; /* Structure version number (currently 3) */
+ int szOsFile; /* Size of subclassed sqlite3_file */
+ int mxPathname; /* Maximum file pathname length */
+ sqlite3_vfs *pNext; /* Next registered VFS */
+ const char *zName; /* Name of this virtual file system */
+ void *pAppData; /* Pointer to application-specific data */
+ int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
+ int flags, int *pOutFlags);
+ int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
+ int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
+ int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut);
+ void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
+ void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
+ void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void);
+ void (*xDlClose)(sqlite3_vfs*, void*);
+ int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
+ int (*xSleep)(sqlite3_vfs*, int microseconds);
+ int (*xCurrentTime)(sqlite3_vfs*, double*);
+ int (*xGetLastError)(sqlite3_vfs*, int, char *);
+ /*
+ ** The methods above are in version 1 of the sqlite_vfs object
+ ** definition. Those that follow are added in version 2 or later
+ */
+ int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*);
+ /*
+ ** The methods above are in versions 1 and 2 of the sqlite_vfs object.
+ ** Those below are for version 3 and greater.
+ */
+ int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr);
+ sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName);
+ const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName);
+ /*
+ ** The methods above are in versions 1 through 3 of the sqlite_vfs object.
+ ** New fields may be appended in future versions. The iVersion
+ ** value will increment whenever this happens.
+ */
+};
+
+/*
+** CAPI3REF: Flags for the xAccess VFS method
+**
+** These integer constants can be used as the third parameter to
+** the xAccess method of an [sqlite3_vfs] object. They determine
+** what kind of permissions the xAccess method is looking for.
+** With SQLITE_ACCESS_EXISTS, the xAccess method
+** simply checks whether the file exists.
+** With SQLITE_ACCESS_READWRITE, the xAccess method
+** checks whether the named directory is both readable and writable
+** (in other words, if files can be added, removed, and renamed within
+** the directory).
+** The SQLITE_ACCESS_READWRITE constant is currently used only by the
+** [temp_store_directory pragma], though this could change in a future
+** release of SQLite.
+** With SQLITE_ACCESS_READ, the xAccess method
+** checks whether the file is readable. The SQLITE_ACCESS_READ constant is
+** currently unused, though it might be used in a future release of
+** SQLite.
+*/
+#define SQLITE_ACCESS_EXISTS 0
+#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */
+#define SQLITE_ACCESS_READ 2 /* Unused */
+
+/*
+** CAPI3REF: Flags for the xShmLock VFS method
+**
+** These integer constants define the various locking operations
+** allowed by the xShmLock method of [sqlite3_io_methods]. The
+** following are the only legal combinations of flags to the
+** xShmLock method:
+**
+** <ul>
+** <li> SQLITE_SHM_LOCK | SQLITE_SHM_SHARED
+** <li> SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE
+** <li> SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED
+** <li> SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE
+** </ul>
+**
+** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as
+** was given on the corresponding lock.
+**
+** The xShmLock method can transition between unlocked and SHARED or
+** between unlocked and EXCLUSIVE. It cannot transition between SHARED
+** and EXCLUSIVE.
+*/
+#define SQLITE_SHM_UNLOCK 1
+#define SQLITE_SHM_LOCK 2
+#define SQLITE_SHM_SHARED 4
+#define SQLITE_SHM_EXCLUSIVE 8
+
+/*
+** CAPI3REF: Maximum xShmLock index
+**
+** The xShmLock method on [sqlite3_io_methods] may use values
+** between 0 and this upper bound as its "offset" argument.
+** The SQLite core will never attempt to acquire or release a
+** lock outside of this range
+*/
+#define SQLITE_SHM_NLOCK 8
+
+
+/*
+** CAPI3REF: Initialize The SQLite Library
+**
+** ^The sqlite3_initialize() routine initializes the
+** SQLite library. ^The sqlite3_shutdown() routine
+** deallocates any resources that were allocated by sqlite3_initialize().
+** These routines are designed to aid in process initialization and
+** shutdown on embedded systems. Workstation applications using
+** SQLite normally do not need to invoke either of these routines.
+**
+** A call to sqlite3_initialize() is an "effective" call if it is
+** the first time sqlite3_initialize() is invoked during the lifetime of
+** the process, or if it is the first time sqlite3_initialize() is invoked
+** following a call to sqlite3_shutdown(). ^(Only an effective call
+** of sqlite3_initialize() does any initialization. All other calls
+** are harmless no-ops.)^
+**
+** A call to sqlite3_shutdown() is an "effective" call if it is the first
+** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only
+** an effective call to sqlite3_shutdown() does any deinitialization.
+** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^
+**
+** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown()
+** is not. The sqlite3_shutdown() interface must only be called from a
+** single thread. All open [database connections] must be closed and all
+** other SQLite resources must be deallocated prior to invoking
+** sqlite3_shutdown().
+**
+** Among other things, ^sqlite3_initialize() will invoke
+** sqlite3_os_init(). Similarly, ^sqlite3_shutdown()
+** will invoke sqlite3_os_end().
+**
+** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success.
+** ^If for some reason, sqlite3_initialize() is unable to initialize
+** the library (perhaps it is unable to allocate a needed resource such
+** as a mutex) it returns an [error code] other than [SQLITE_OK].
+**
+** ^The sqlite3_initialize() routine is called internally by many other
+** SQLite interfaces so that an application usually does not need to
+** invoke sqlite3_initialize() directly. For example, [sqlite3_open()]
+** calls sqlite3_initialize() so the SQLite library will be automatically
+** initialized when [sqlite3_open()] is called if it has not be initialized
+** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT]
+** compile-time option, then the automatic calls to sqlite3_initialize()
+** are omitted and the application must call sqlite3_initialize() directly
+** prior to using any other SQLite interface. For maximum portability,
+** it is recommended that applications always invoke sqlite3_initialize()
+** directly prior to using any other SQLite interface. Future releases
+** of SQLite may require this. In other words, the behavior exhibited
+** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the
+** default behavior in some future release of SQLite.
+**
+** The sqlite3_os_init() routine does operating-system specific
+** initialization of the SQLite library. The sqlite3_os_end()
+** routine undoes the effect of sqlite3_os_init(). Typical tasks
+** performed by these routines include allocation or deallocation
+** of static resources, initialization of global variables,
+** setting up a default [sqlite3_vfs] module, or setting up
+** a default configuration using [sqlite3_config()].
+**
+** The application should never invoke either sqlite3_os_init()
+** or sqlite3_os_end() directly. The application should only invoke
+** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init()
+** interface is called automatically by sqlite3_initialize() and
+** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate
+** implementations for sqlite3_os_init() and sqlite3_os_end()
+** are built into SQLite when it is compiled for Unix, Windows, or OS/2.
+** When [custom builds | built for other platforms]
+** (using the [SQLITE_OS_OTHER=1] compile-time
+** option) the application must supply a suitable implementation for
+** sqlite3_os_init() and sqlite3_os_end(). An application-supplied
+** implementation of sqlite3_os_init() or sqlite3_os_end()
+** must return [SQLITE_OK] on success and some other [error code] upon
+** failure.
+*/
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
+
+/*
+** CAPI3REF: Configuring The SQLite Library
+**
+** The sqlite3_config() interface is used to make global configuration
+** changes to SQLite in order to tune SQLite to the specific needs of
+** the application. The default configuration is recommended for most
+** applications and so this routine is usually not necessary. It is
+** provided to support rare applications with unusual needs.
+**
+** <b>The sqlite3_config() interface is not threadsafe. The application
+** must ensure that no other SQLite interfaces are invoked by other
+** threads while sqlite3_config() is running.</b>
+**
+** The sqlite3_config() interface
+** may only be invoked prior to library initialization using
+** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
+** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
+** [sqlite3_shutdown()] then it will return SQLITE_MISUSE.
+** Note, however, that ^sqlite3_config() can be called as part of the
+** implementation of an application-defined [sqlite3_os_init()].
+**
+** The first argument to sqlite3_config() is an integer
+** [configuration option] that determines
+** what property of SQLite is to be configured. Subsequent arguments
+** vary depending on the [configuration option]
+** in the first argument.
+**
+** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK].
+** ^If the option is unknown or SQLite is unable to set the option
+** then this routine returns a non-zero [error code].
+*/
+SQLITE_API int sqlite3_config(int, ...);
+
+/*
+** CAPI3REF: Configure database connections
+** METHOD: sqlite3
+**
+** The sqlite3_db_config() interface is used to make configuration
+** changes to a [database connection]. The interface is similar to
+** [sqlite3_config()] except that the changes apply to a single
+** [database connection] (specified in the first argument).
+**
+** The second argument to sqlite3_db_config(D,V,...) is the
+** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code
+** that indicates what aspect of the [database connection] is being configured.
+** Subsequent arguments vary depending on the configuration verb.
+**
+** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
+** the call is considered successful.
+*/
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
+
+/*
+** CAPI3REF: Memory Allocation Routines
+**
+** An instance of this object defines the interface between SQLite
+** and low-level memory allocation routines.
+**
+** This object is used in only one place in the SQLite interface.
+** A pointer to an instance of this object is the argument to
+** [sqlite3_config()] when the configuration option is
+** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC].
+** By creating an instance of this object
+** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC])
+** during configuration, an application can specify an alternative
+** memory allocation subsystem for SQLite to use for all of its
+** dynamic memory needs.
+**
+** Note that SQLite comes with several [built-in memory allocators]
+** that are perfectly adequate for the overwhelming majority of applications
+** and that this object is only useful to a tiny minority of applications
+** with specialized memory allocation requirements. This object is
+** also used during testing of SQLite in order to specify an alternative
+** memory allocator that simulates memory out-of-memory conditions in
+** order to verify that SQLite recovers gracefully from such
+** conditions.
+**
+** The xMalloc, xRealloc, and xFree methods must work like the
+** malloc(), realloc() and free() functions from the standard C library.
+** ^SQLite guarantees that the second argument to
+** xRealloc is always a value returned by a prior call to xRoundup.
+**
+** xSize should return the allocated size of a memory allocation
+** previously obtained from xMalloc or xRealloc. The allocated size
+** is always at least as big as the requested size but may be larger.
+**
+** The xRoundup method returns what would be the allocated size of
+** a memory allocation given a particular requested size. Most memory
+** allocators round up memory allocations at least to the next multiple
+** of 8. Some allocators round up to a larger multiple or to a power of 2.
+** Every memory allocation request coming in through [sqlite3_malloc()]
+** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0,
+** that causes the corresponding memory allocation to fail.
+**
+** The xInit method initializes the memory allocator. For example,
+** it might allocate any required mutexes or initialize internal data
+** structures. The xShutdown method is invoked (indirectly) by
+** [sqlite3_shutdown()] and should deallocate any resources acquired
+** by xInit. The pAppData pointer is used as the only parameter to
+** xInit and xShutdown.
+**
+** SQLite holds the [SQLITE_MUTEX_STATIC_MAIN] mutex when it invokes
+** the xInit method, so the xInit method need not be threadsafe. The
+** xShutdown method is only called from [sqlite3_shutdown()] so it does
+** not need to be threadsafe either. For all other methods, SQLite
+** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the
+** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which
+** it is by default) and so the methods are automatically serialized.
+** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other
+** methods must be threadsafe or else make their own arrangements for
+** serialization.
+**
+** SQLite will never invoke xInit() more than once without an intervening
+** call to xShutdown().
+*/
+typedef struct sqlite3_mem_methods sqlite3_mem_methods;
+struct sqlite3_mem_methods {
+ void *(*xMalloc)(int); /* Memory allocation function */
+ void (*xFree)(void*); /* Free a prior allocation */
+ void *(*xRealloc)(void*,int); /* Resize an allocation */
+ int (*xSize)(void*); /* Return the size of an allocation */
+ int (*xRoundup)(int); /* Round up request size to allocation size */
+ int (*xInit)(void*); /* Initialize the memory allocator */
+ void (*xShutdown)(void*); /* Deinitialize the memory allocator */
+ void *pAppData; /* Argument to xInit() and xShutdown() */
+};
+
+/*
+** CAPI3REF: Configuration Options
+** KEYWORDS: {configuration option}
+**
+** These constants are the available integer configuration options that
+** can be passed as the first argument to the [sqlite3_config()] interface.
+**
+** New configuration options may be added in future releases of SQLite.
+** Existing configuration options might be discontinued. Applications
+** should check the return code from [sqlite3_config()] to make sure that
+** the call worked. The [sqlite3_config()] interface will return a
+** non-zero [error code] if a discontinued or unsupported configuration option
+** is invoked.
+**
+** <dl>
+** [[SQLITE_CONFIG_SINGLETHREAD]] <dt>SQLITE_CONFIG_SINGLETHREAD</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Single-thread. In other words, it disables
+** all mutexing and puts SQLite into a mode where it can only be used
+** by a single thread. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to change the [threading mode] from its default
+** value of Single-thread and so [sqlite3_config()] will return
+** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD
+** configuration option.</dd>
+**
+** [[SQLITE_CONFIG_MULTITHREAD]] <dt>SQLITE_CONFIG_MULTITHREAD</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Multi-thread. In other words, it disables
+** mutexing on [database connection] and [prepared statement] objects.
+** The application is responsible for serializing access to
+** [database connections] and [prepared statements]. But other mutexes
+** are enabled so that SQLite will be safe to use in a multi-threaded
+** environment as long as no two threads attempt to use the same
+** [database connection] at the same time. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to set the Multi-thread [threading mode] and
+** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
+** SQLITE_CONFIG_MULTITHREAD configuration option.</dd>
+**
+** [[SQLITE_CONFIG_SERIALIZED]] <dt>SQLITE_CONFIG_SERIALIZED</dt>
+** <dd>There are no arguments to this option. ^This option sets the
+** [threading mode] to Serialized. In other words, this option enables
+** all mutexes including the recursive
+** mutexes on [database connection] and [prepared statement] objects.
+** In this mode (which is the default when SQLite is compiled with
+** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access
+** to [database connections] and [prepared statements] so that the
+** application is free to use the same [database connection] or the
+** same [prepared statement] in different threads at the same time.
+** ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** it is not possible to set the Serialized [threading mode] and
+** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
+** SQLITE_CONFIG_SERIALIZED configuration option.</dd>
+**
+** [[SQLITE_CONFIG_MALLOC]] <dt>SQLITE_CONFIG_MALLOC</dt>
+** <dd> ^(The SQLITE_CONFIG_MALLOC option takes a single argument which is
+** a pointer to an instance of the [sqlite3_mem_methods] structure.
+** The argument specifies
+** alternative low-level memory allocation routines to be used in place of
+** the memory allocation routines built into SQLite.)^ ^SQLite makes
+** its own private copy of the content of the [sqlite3_mem_methods] structure
+** before the [sqlite3_config()] call returns.</dd>
+**
+** [[SQLITE_CONFIG_GETMALLOC]] <dt>SQLITE_CONFIG_GETMALLOC</dt>
+** <dd> ^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which
+** is a pointer to an instance of the [sqlite3_mem_methods] structure.
+** The [sqlite3_mem_methods]
+** structure is filled with the currently defined memory allocation routines.)^
+** This option can be used to overload the default memory allocation
+** routines with a wrapper that simulations memory allocation failure or
+** tracks memory usage, for example. </dd>
+**
+** [[SQLITE_CONFIG_SMALL_MALLOC]] <dt>SQLITE_CONFIG_SMALL_MALLOC</dt>
+** <dd> ^The SQLITE_CONFIG_SMALL_MALLOC option takes single argument of
+** type int, interpreted as a boolean, which if true provides a hint to
+** SQLite that it should avoid large memory allocations if possible.
+** SQLite will run faster if it is free to make large memory allocations,
+** but some application might prefer to run slower in exchange for
+** guarantees about memory fragmentation that are possible if large
+** allocations are avoided. This hint is normally off.
+** </dd>
+**
+** [[SQLITE_CONFIG_MEMSTATUS]] <dt>SQLITE_CONFIG_MEMSTATUS</dt>
+** <dd> ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int,
+** interpreted as a boolean, which enables or disables the collection of
+** memory allocation statistics. ^(When memory allocation statistics are
+** disabled, the following SQLite interfaces become non-operational:
+** <ul>
+** <li> [sqlite3_hard_heap_limit64()]
+** <li> [sqlite3_memory_used()]
+** <li> [sqlite3_memory_highwater()]
+** <li> [sqlite3_soft_heap_limit64()]
+** <li> [sqlite3_status64()]
+** </ul>)^
+** ^Memory allocation statistics are enabled by default unless SQLite is
+** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory
+** allocation statistics are disabled by default.
+** </dd>
+**
+** [[SQLITE_CONFIG_SCRATCH]] <dt>SQLITE_CONFIG_SCRATCH</dt>
+** <dd> The SQLITE_CONFIG_SCRATCH option is no longer used.
+** </dd>
+**
+** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
+** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool
+** that SQLite can use for the database page cache with the default page
+** cache implementation.
+** This configuration option is a no-op if an application-defined page
+** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2].
+** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
+** 8-byte aligned memory (pMem), the size of each page cache line (sz),
+** and the number of cache lines (N).
+** The sz argument should be the size of the largest database page
+** (a power of two between 512 and 65536) plus some extra bytes for each
+** page header. ^The number of extra bytes needed by the page header
+** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ].
+** ^It is harmless, apart from the wasted memory,
+** for the sz parameter to be larger than necessary. The pMem
+** argument must be either a NULL pointer or a pointer to an 8-byte
+** aligned block of memory of at least sz*N bytes, otherwise
+** subsequent behavior is undefined.
+** ^When pMem is not NULL, SQLite will strive to use the memory provided
+** to satisfy page cache needs, falling back to [sqlite3_malloc()] if
+** a page cache line is larger than sz bytes or if all of the pMem buffer
+** is exhausted.
+** ^If pMem is NULL and N is non-zero, then each database connection
+** does an initial bulk allocation for page cache memory
+** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or
+** of -1024*N bytes if N is negative, . ^If additional
+** page cache memory is needed beyond what is provided by the initial
+** allocation, then SQLite goes to [sqlite3_malloc()] separately for each
+** additional cache line. </dd>
+**
+** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
+** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer
+** that SQLite will use for all of its dynamic memory allocation needs
+** beyond those provided for by [SQLITE_CONFIG_PAGECACHE].
+** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
+** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns
+** [SQLITE_ERROR] if invoked otherwise.
+** ^There are three arguments to SQLITE_CONFIG_HEAP:
+** An 8-byte aligned pointer to the memory,
+** the number of bytes in the memory buffer, and the minimum allocation size.
+** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts
+** to using its default memory allocator (the system malloc() implementation),
+** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the
+** memory pointer is not NULL then the alternative memory
+** allocator is engaged to handle all of SQLites memory allocation needs.
+** The first pointer (the memory pointer) must be aligned to an 8-byte
+** boundary or subsequent behavior of SQLite will be undefined.
+** The minimum allocation size is capped at 2**12. Reasonable values
+** for the minimum allocation size are 2**5 through 2**8.</dd>
+**
+** [[SQLITE_CONFIG_MUTEX]] <dt>SQLITE_CONFIG_MUTEX</dt>
+** <dd> ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a
+** pointer to an instance of the [sqlite3_mutex_methods] structure.
+** The argument specifies alternative low-level mutex routines to be used
+** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of
+** the content of the [sqlite3_mutex_methods] structure before the call to
+** [sqlite3_config()] returns. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** the entire mutexing subsystem is omitted from the build and hence calls to
+** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will
+** return [SQLITE_ERROR].</dd>
+**
+** [[SQLITE_CONFIG_GETMUTEX]] <dt>SQLITE_CONFIG_GETMUTEX</dt>
+** <dd> ^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which
+** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The
+** [sqlite3_mutex_methods]
+** structure is filled with the currently defined mutex routines.)^
+** This option can be used to overload the default mutex allocation
+** routines with a wrapper used to track mutex usage for performance
+** profiling or testing, for example. ^If SQLite is compiled with
+** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
+** the entire mutexing subsystem is omitted from the build and hence calls to
+** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will
+** return [SQLITE_ERROR].</dd>
+**
+** [[SQLITE_CONFIG_LOOKASIDE]] <dt>SQLITE_CONFIG_LOOKASIDE</dt>
+** <dd> ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine
+** the default size of lookaside memory on each [database connection].
+** The first argument is the
+** size of each lookaside buffer slot and the second is the number of
+** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE
+** sets the <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
+** option to [sqlite3_db_config()] can be used to change the lookaside
+** configuration on individual connections.)^ </dd>
+**
+** [[SQLITE_CONFIG_PCACHE2]] <dt>SQLITE_CONFIG_PCACHE2</dt>
+** <dd> ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is
+** a pointer to an [sqlite3_pcache_methods2] object. This object specifies
+** the interface to a custom page cache implementation.)^
+** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.</dd>
+**
+** [[SQLITE_CONFIG_GETPCACHE2]] <dt>SQLITE_CONFIG_GETPCACHE2</dt>
+** <dd> ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which
+** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of
+** the current page cache implementation into that object.)^ </dd>
+**
+** [[SQLITE_CONFIG_LOG]] <dt>SQLITE_CONFIG_LOG</dt>
+** <dd> The SQLITE_CONFIG_LOG option is used to configure the SQLite
+** global [error log].
+** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
+** function with a call signature of void(*)(void*,int,const char*),
+** and a pointer to void. ^If the function pointer is not NULL, it is
+** invoked by [sqlite3_log()] to process each logging event. ^If the
+** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op.
+** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is
+** passed through as the first parameter to the application-defined logger
+** function whenever that function is invoked. ^The second parameter to
+** the logger function is a copy of the first parameter to the corresponding
+** [sqlite3_log()] call and is intended to be a [result code] or an
+** [extended result code]. ^The third parameter passed to the logger is
+** log message after formatting via [sqlite3_snprintf()].
+** The SQLite logging interface is not reentrant; the logger function
+** supplied by the application must not invoke any SQLite interface.
+** In a multi-threaded application, the application-defined logger
+** function must be threadsafe. </dd>
+**
+** [[SQLITE_CONFIG_URI]] <dt>SQLITE_CONFIG_URI
+** <dd>^(The SQLITE_CONFIG_URI option takes a single argument of type int.
+** If non-zero, then URI handling is globally enabled. If the parameter is zero,
+** then URI handling is globally disabled.)^ ^If URI handling is globally
+** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()],
+** [sqlite3_open16()] or
+** specified as part of [ATTACH] commands are interpreted as URIs, regardless
+** of whether or not the [SQLITE_OPEN_URI] flag is set when the database
+** connection is opened. ^If it is globally disabled, filenames are
+** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the
+** database connection is opened. ^(By default, URI handling is globally
+** disabled. The default value may be changed by compiling with the
+** [SQLITE_USE_URI] symbol defined.)^
+**
+** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]] <dt>SQLITE_CONFIG_COVERING_INDEX_SCAN
+** <dd>^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer
+** argument which is interpreted as a boolean in order to enable or disable
+** the use of covering indices for full table scans in the query optimizer.
+** ^The default setting is determined
+** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on"
+** if that compile-time option is omitted.
+** The ability to disable the use of covering indices for full table scans
+** is because some incorrectly coded legacy applications might malfunction
+** when the optimization is enabled. Providing the ability to
+** disable the optimization allows the older, buggy application code to work
+** without change even with newer versions of SQLite.
+**
+** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]]
+** <dt>SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE
+** <dd> These options are obsolete and should not be used by new code.
+** They are retained for backwards compatibility but are now no-ops.
+** </dd>
+**
+** [[SQLITE_CONFIG_SQLLOG]]
+** <dt>SQLITE_CONFIG_SQLLOG
+** <dd>This option is only available if sqlite is compiled with the
+** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should
+** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int).
+** The second should be of type (void*). The callback is invoked by the library
+** in three separate circumstances, identified by the value passed as the
+** fourth parameter. If the fourth parameter is 0, then the database connection
+** passed as the second argument has just been opened. The third argument
+** points to a buffer containing the name of the main database file. If the
+** fourth parameter is 1, then the SQL statement that the third parameter
+** points to has just been executed. Or, if the fourth parameter is 2, then
+** the connection being passed as the second parameter is being closed. The
+** third parameter is passed NULL In this case. An example of using this
+** configuration option can be seen in the "test_sqllog.c" source file in
+** the canonical SQLite source tree.</dd>
+**
+** [[SQLITE_CONFIG_MMAP_SIZE]]
+** <dt>SQLITE_CONFIG_MMAP_SIZE
+** <dd>^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values
+** that are the default mmap size limit (the default setting for
+** [PRAGMA mmap_size]) and the maximum allowed mmap size limit.
+** ^The default setting can be overridden by each database connection using
+** either the [PRAGMA mmap_size] command, or by using the
+** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size
+** will be silently truncated if necessary so that it does not exceed the
+** compile-time maximum mmap size set by the
+** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^
+** ^If either argument to this option is negative, then that argument is
+** changed to its compile-time default.
+**
+** [[SQLITE_CONFIG_WIN32_HEAPSIZE]]
+** <dt>SQLITE_CONFIG_WIN32_HEAPSIZE
+** <dd>^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is
+** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro
+** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
+** that specifies the maximum size of the created heap.
+**
+** [[SQLITE_CONFIG_PCACHE_HDRSZ]]
+** <dt>SQLITE_CONFIG_PCACHE_HDRSZ
+** <dd>^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which
+** is a pointer to an integer and writes into that integer the number of extra
+** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE].
+** The amount of extra space required can change depending on the compiler,
+** target platform, and SQLite version.
+**
+** [[SQLITE_CONFIG_PMASZ]]
+** <dt>SQLITE_CONFIG_PMASZ
+** <dd>^The SQLITE_CONFIG_PMASZ option takes a single parameter which
+** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded
+** sorter to that integer. The default minimum PMA Size is set by the
+** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched
+** to help with sort operations when multithreaded sorting
+** is enabled (using the [PRAGMA threads] command) and the amount of content
+** to be sorted exceeds the page size times the minimum of the
+** [PRAGMA cache_size] setting and this value.
+**
+** [[SQLITE_CONFIG_STMTJRNL_SPILL]]
+** <dt>SQLITE_CONFIG_STMTJRNL_SPILL
+** <dd>^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which
+** becomes the [statement journal] spill-to-disk threshold.
+** [Statement journals] are held in memory until their size (in bytes)
+** exceeds this threshold, at which point they are written to disk.
+** Or if the threshold is -1, statement journals are always held
+** exclusively in memory.
+** Since many statement journals never become large, setting the spill
+** threshold to a value such as 64KiB can greatly reduce the amount of
+** I/O required to support statement rollback.
+** The default value for this setting is controlled by the
+** [SQLITE_STMTJRNL_SPILL] compile-time option.
+**
+** [[SQLITE_CONFIG_SORTERREF_SIZE]]
+** <dt>SQLITE_CONFIG_SORTERREF_SIZE
+** <dd>The SQLITE_CONFIG_SORTERREF_SIZE option accepts a single parameter
+** of type (int) - the new value of the sorter-reference size threshold.
+** Usually, when SQLite uses an external sort to order records according
+** to an ORDER BY clause, all fields required by the caller are present in the
+** sorted records. However, if SQLite determines based on the declared type
+** of a table column that its values are likely to be very large - larger
+** than the configured sorter-reference size threshold - then a reference
+** is stored in each sorted record and the required column values loaded
+** from the database as records are returned in sorted order. The default
+** value for this option is to never use this optimization. Specifying a
+** negative value for this option restores the default behaviour.
+** This option is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
+**
+** [[SQLITE_CONFIG_MEMDB_MAXSIZE]]
+** <dt>SQLITE_CONFIG_MEMDB_MAXSIZE
+** <dd>The SQLITE_CONFIG_MEMDB_MAXSIZE option accepts a single parameter
+** [sqlite3_int64] parameter which is the default maximum size for an in-memory
+** database created using [sqlite3_deserialize()]. This default maximum
+** size can be adjusted up or down for individual databases using the
+** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this
+** configuration setting is never used, then the default maximum is determined
+** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
+** compile-time option is not set, then the default maximum is 1073741824.
+** </dl>
+*/
+#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
+#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */
+#define SQLITE_CONFIG_SERIALIZED 3 /* nil */
+#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */
+#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */
+#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */
+#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */
+#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */
+#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */
+#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */
+#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */
+/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */
+#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */
+#define SQLITE_CONFIG_PCACHE 14 /* no-op */
+#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */
+#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */
+#define SQLITE_CONFIG_URI 17 /* int */
+#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */
+#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */
+#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */
+#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
+#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
+#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */
+#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */
+#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
+#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
+#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
+#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
+#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
+
+/*
+** CAPI3REF: Database Connection Configuration Options
+**
+** These constants are the available integer configuration options that
+** can be passed as the second argument to the [sqlite3_db_config()] interface.
+**
+** New configuration options may be added in future releases of SQLite.
+** Existing configuration options might be discontinued. Applications
+** should check the return code from [sqlite3_db_config()] to make sure that
+** the call worked. ^The [sqlite3_db_config()] interface will return a
+** non-zero [error code] if a discontinued or unsupported configuration option
+** is invoked.
+**
+** <dl>
+** [[SQLITE_DBCONFIG_LOOKASIDE]]
+** <dt>SQLITE_DBCONFIG_LOOKASIDE</dt>
+** <dd> ^This option takes three additional arguments that determine the
+** [lookaside memory allocator] configuration for the [database connection].
+** ^The first argument (the third parameter to [sqlite3_db_config()] is a
+** pointer to a memory buffer to use for lookaside memory.
+** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb
+** may be NULL in which case SQLite will allocate the
+** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the
+** size of each lookaside buffer slot. ^The third argument is the number of
+** slots. The size of the buffer in the first argument must be greater than
+** or equal to the product of the second and third arguments. The buffer
+** must be aligned to an 8-byte boundary. ^If the second argument to
+** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally
+** rounded down to the next smaller multiple of 8. ^(The lookaside memory
+** configuration for a database connection can only be changed when that
+** connection is not currently using lookaside memory, or in other words
+** when the "current value" returned by
+** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero.
+** Any attempt to change the lookaside memory configuration when lookaside
+** memory is in use leaves the configuration unchanged and returns
+** [SQLITE_BUSY].)^</dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_FKEY]]
+** <dt>SQLITE_DBCONFIG_ENABLE_FKEY</dt>
+** <dd> ^This option is used to enable or disable the enforcement of
+** [foreign key constraints]. There should be two additional arguments.
+** The first argument is an integer which is 0 to disable FK enforcement,
+** positive to enable FK enforcement or negative to leave FK enforcement
+** unchanged. The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether FK enforcement is off or on
+** following this call. The second parameter may be a NULL pointer, in
+** which case the FK enforcement setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_TRIGGER]]
+** <dt>SQLITE_DBCONFIG_ENABLE_TRIGGER</dt>
+** <dd> ^This option is used to enable or disable [CREATE TRIGGER | triggers].
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable triggers,
+** positive to enable triggers or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether triggers are disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the trigger setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_VIEW]]
+** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt>
+** <dd> ^This option is used to enable or disable [CREATE VIEW | views].
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable views,
+** positive to enable views or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether views are disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the view setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]]
+** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt>
+** <dd> ^This option is used to enable or disable the
+** [fts3_tokenizer()] function which is part of the
+** [FTS3] full-text search engine extension.
+** There should be two additional arguments.
+** The first argument is an integer which is 0 to disable fts3_tokenizer() or
+** positive to enable fts3_tokenizer() or negative to leave the setting
+** unchanged.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled
+** following this call. The second parameter may be a NULL pointer, in
+** which case the new setting is not reported back. </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]]
+** <dt>SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION</dt>
+** <dd> ^This option is used to enable or disable the [sqlite3_load_extension()]
+** interface independently of the [load_extension()] SQL function.
+** The [sqlite3_enable_load_extension()] API enables or disables both the
+** C-API [sqlite3_load_extension()] and the SQL function [load_extension()].
+** There should be two additional arguments.
+** When the first argument to this interface is 1, then only the C-API is
+** enabled and the SQL function remains disabled. If the first argument to
+** this interface is 0, then both the C-API and the SQL function are disabled.
+** If the first argument is -1, then no changes are made to state of either the
+** C-API or the SQL function.
+** The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface
+** is disabled or enabled following this call. The second parameter may
+** be a NULL pointer, in which case the new setting is not reported back.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_MAINDBNAME]] <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
+** <dd> ^This option is used to change the name of the "main" database
+** schema. ^The sole argument is a pointer to a constant UTF8 string
+** which will become the new schema name in place of "main". ^SQLite
+** does not make a copy of the new main schema name string, so the application
+** must ensure that the argument passed into this DBCONFIG option is unchanged
+** until after the database connection closes.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]]
+** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
+** <dd> Usually, when a database in wal mode is closed or detached from a
+** database handle, SQLite checks if this will mean that there are now no
+** connections at all to the database. If so, it performs a checkpoint
+** operation before closing the connection. This option may be used to
+** override this behaviour. The first parameter passed to this operation
+** is an integer - positive to disable checkpoints-on-close, or zero (the
+** default) to enable them, and negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer
+** into which is written 0 or 1 to indicate whether checkpoints-on-close
+** have been disabled - 0 if they are not disabled, 1 if they are.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_ENABLE_QPSG]] <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
+** <dd>^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates
+** the [query planner stability guarantee] (QPSG). When the QPSG is active,
+** a single SQL query statement will always use the same algorithm regardless
+** of values of [bound parameters].)^ The QPSG disables some query optimizations
+** that look at the values of bound parameters, which can make some queries
+** slower. But the QPSG has the advantage of more predictable behavior. With
+** the QPSG active, SQLite will always use the same query plan in the field as
+** was used during testing in the lab.
+** The first argument to this setting is an integer which is 0 to disable
+** the QPSG, positive to enable QPSG, or negative to leave the setting
+** unchanged. The second parameter is a pointer to an integer into which
+** is written 0 or 1 to indicate whether the QPSG is disabled or enabled
+** following this call.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_TRIGGER_EQP]] <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
+** <dd> By default, the output of EXPLAIN QUERY PLAN commands does not
+** include output for any operations performed by trigger programs. This
+** option is used to set or clear (the default) a flag that governs this
+** behavior. The first parameter passed to this operation is an integer -
+** positive to enable output for trigger programs, or zero to disable it,
+** or negative to leave the setting unchanged.
+** The second parameter is a pointer to an integer into which is written
+** 0 or 1 to indicate whether output-for-triggers has been disabled - 0 if
+** it is not disabled, 1 if it is.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_RESET_DATABASE]] <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
+** <dd> Set the SQLITE_DBCONFIG_RESET_DATABASE flag and then run
+** [VACUUM] in order to reset a database back to an empty database
+** with no schema and no content. The following process works even for
+** a badly corrupted database file:
+** <ol>
+** <li> If the database connection is newly opened, make sure it has read the
+** database schema by preparing then discarding some query against the
+** database, or calling sqlite3_table_column_metadata(), ignoring any
+** errors. This step is only necessary if the application desires to keep
+** the database in WAL mode after the reset if it was in WAL mode before
+** the reset.
+** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0);
+** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);
+** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
+** </ol>
+** Because resetting a database is destructive and irreversible, the
+** process requires the use of this obscure API and multiple steps to help
+** ensure that it does not happen by accident.
+**
+** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
+** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
+** "defensive" flag for a database connection. When the defensive
+** flag is enabled, language features that allow ordinary SQL to
+** deliberately corrupt the database file are disabled. The disabled
+** features include but are not limited to the following:
+** <ul>
+** <li> The [PRAGMA writable_schema=ON] statement.
+** <li> The [PRAGMA journal_mode=OFF] statement.
+** <li> Writes to the [sqlite_dbpage] virtual table.
+** <li> Direct writes to [shadow tables].
+** </ul>
+** </dd>
+**
+** [[SQLITE_DBCONFIG_WRITABLE_SCHEMA]] <dt>SQLITE_DBCONFIG_WRITABLE_SCHEMA</dt>
+** <dd>The SQLITE_DBCONFIG_WRITABLE_SCHEMA option activates or deactivates the
+** "writable_schema" flag. This has the same effect and is logically equivalent
+** to setting [PRAGMA writable_schema=ON] or [PRAGMA writable_schema=OFF].
+** The first argument to this setting is an integer which is 0 to disable
+** the writable_schema, positive to enable writable_schema, or negative to
+** leave the setting unchanged. The second parameter is a pointer to an
+** integer into which is written 0 or 1 to indicate whether the writable_schema
+** is enabled or disabled following this call.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]]
+** <dt>SQLITE_DBCONFIG_LEGACY_ALTER_TABLE</dt>
+** <dd>The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates
+** the legacy behavior of the [ALTER TABLE RENAME] command such it
+** behaves as it did prior to [version 3.24.0] (2018-06-04). See the
+** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for
+** additional information. This feature can also be turned on and off
+** using the [PRAGMA legacy_alter_table] statement.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_DQS_DML]]
+** <dt>SQLITE_DBCONFIG_DQS_DML</td>
+** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates
+** the legacy [double-quoted string literal] misfeature for DML statements
+** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The
+** default value of this setting is determined by the [-DSQLITE_DQS]
+** compile-time option.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_DQS_DDL]]
+** <dt>SQLITE_DBCONFIG_DQS_DDL</td>
+** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates
+** the legacy [double-quoted string literal] misfeature for DDL statements,
+** such as CREATE TABLE and CREATE INDEX. The
+** default value of this setting is determined by the [-DSQLITE_DQS]
+** compile-time option.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]]
+** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</td>
+** <dd>The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to
+** assume that database schemas are untainted by malicious content.
+** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite
+** takes additional defensive steps to protect the application from harm
+** including:
+** <ul>
+** <li> Prohibit the use of SQL functions inside triggers, views,
+** CHECK constraints, DEFAULT clauses, expression indexes,
+** partial indexes, or generated columns
+** unless those functions are tagged with [SQLITE_INNOCUOUS].
+** <li> Prohibit the use of virtual tables inside of triggers or views
+** unless those virtual tables are tagged with [SQLITE_VTAB_INNOCUOUS].
+** </ul>
+** This setting defaults to "on" for legacy compatibility, however
+** all applications are advised to turn it off if possible. This setting
+** can also be controlled using the [PRAGMA trusted_schema] statement.
+** </dd>
+**
+** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]]
+** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</td>
+** <dd>The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates
+** the legacy file format flag. When activated, this flag causes all newly
+** created database file to have a schema format version number (the 4-byte
+** integer found at offset 44 into the database header) of 1. This in turn
+** means that the resulting database file will be readable and writable by
+** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting,
+** newly created databases are generally not understandable by SQLite versions
+** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there
+** is now scarcely any need to generated database files that are compatible
+** all the way back to version 3.0.0, and so this setting is of little
+** practical use, but is provided so that SQLite can continue to claim the
+** ability to generate new database files that are compatible with version
+** 3.0.0.
+** <p>Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on,
+** the [VACUUM] command will fail with an obscure error when attempting to
+** process a table with generated columns and a descending index. This is
+** not considered a bug since SQLite versions 3.3.0 and earlier do not support
+** either generated columns or decending indexes.
+** </dd>
+** </dl>
+*/
+#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
+#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */
+#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */
+#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_QPSG 1007 /* int int* */
+#define SQLITE_DBCONFIG_TRIGGER_EQP 1008 /* int int* */
+#define SQLITE_DBCONFIG_RESET_DATABASE 1009 /* int int* */
+#define SQLITE_DBCONFIG_DEFENSIVE 1010 /* int int* */
+#define SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011 /* int int* */
+#define SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012 /* int int* */
+#define SQLITE_DBCONFIG_DQS_DML 1013 /* int int* */
+#define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */
+#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */
+#define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */
+#define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */
+#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */
+
+/*
+** CAPI3REF: Enable Or Disable Extended Result Codes
+** METHOD: sqlite3
+**
+** ^The sqlite3_extended_result_codes() routine enables or disables the
+** [extended result codes] feature of SQLite. ^The extended result
+** codes are disabled by default for historical compatibility.
+*/
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
+
+/*
+** CAPI3REF: Last Insert Rowid
+** METHOD: sqlite3
+**
+** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables)
+** has a unique 64-bit signed
+** integer key called the [ROWID | "rowid"]. ^The rowid is always available
+** as an undeclared column named ROWID, OID, or _ROWID_ as long as those
+** names are not also used by explicitly declared columns. ^If
+** the table has a column of type [INTEGER PRIMARY KEY] then that column
+** is another alias for the rowid.
+**
+** ^The sqlite3_last_insert_rowid(D) interface usually returns the [rowid] of
+** the most recent successful [INSERT] into a rowid table or [virtual table]
+** on database connection D. ^Inserts into [WITHOUT ROWID] tables are not
+** recorded. ^If no successful [INSERT]s into rowid tables have ever occurred
+** on the database connection D, then sqlite3_last_insert_rowid(D) returns
+** zero.
+**
+** As well as being set automatically as rows are inserted into database
+** tables, the value returned by this function may be set explicitly by
+** [sqlite3_set_last_insert_rowid()]
+**
+** Some virtual table implementations may INSERT rows into rowid tables as
+** part of committing a transaction (e.g. to flush data accumulated in memory
+** to disk). In this case subsequent calls to this function return the rowid
+** associated with these internal INSERT operations, which leads to
+** unintuitive results. Virtual table implementations that do write to rowid
+** tables in this way can avoid this problem by restoring the original
+** rowid value using [sqlite3_set_last_insert_rowid()] before returning
+** control to the user.
+**
+** ^(If an [INSERT] occurs within a trigger then this routine will
+** return the [rowid] of the inserted row as long as the trigger is
+** running. Once the trigger program ends, the value returned
+** by this routine reverts to what it was before the trigger was fired.)^
+**
+** ^An [INSERT] that fails due to a constraint violation is not a
+** successful [INSERT] and does not change the value returned by this
+** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK,
+** and INSERT OR ABORT make no changes to the return value of this
+** routine when their insertion fails. ^(When INSERT OR REPLACE
+** encounters a constraint violation, it does not fail. The
+** INSERT continues to completion after deleting rows that caused
+** the constraint problem so INSERT OR REPLACE will always change
+** the return value of this interface.)^
+**
+** ^For the purposes of this routine, an [INSERT] is considered to
+** be successful even if it is subsequently rolled back.
+**
+** This function is accessible to SQL statements via the
+** [last_insert_rowid() SQL function].
+**
+** If a separate thread performs a new [INSERT] on the same
+** database connection while the [sqlite3_last_insert_rowid()]
+** function is running and thus changes the last insert [rowid],
+** then the value returned by [sqlite3_last_insert_rowid()] is
+** unpredictable and might not equal either the old or the new
+** last insert [rowid].
+*/
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
+
+/*
+** CAPI3REF: Set the Last Insert Rowid value.
+** METHOD: sqlite3
+**
+** The sqlite3_set_last_insert_rowid(D, R) method allows the application to
+** set the value returned by calling sqlite3_last_insert_rowid(D) to R
+** without inserting a row into the database.
+*/
+SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64);
+
+/*
+** CAPI3REF: Count The Number Of Rows Modified
+** METHOD: sqlite3
+**
+** ^This function returns the number of rows modified, inserted or
+** deleted by the most recently completed INSERT, UPDATE or DELETE
+** statement on the database connection specified by the only parameter.
+** ^Executing any other type of SQL statement does not modify the value
+** returned by this function.
+**
+** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are
+** considered - auxiliary changes caused by [CREATE TRIGGER | triggers],
+** [foreign key actions] or [REPLACE] constraint resolution are not counted.
+**
+** Changes to a view that are intercepted by
+** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value
+** returned by sqlite3_changes() immediately after an INSERT, UPDATE or
+** DELETE statement run on a view is always zero. Only changes made to real
+** tables are counted.
+**
+** Things are more complicated if the sqlite3_changes() function is
+** executed while a trigger program is running. This may happen if the
+** program uses the [changes() SQL function], or if some other callback
+** function invokes sqlite3_changes() directly. Essentially:
+**
+** <ul>
+** <li> ^(Before entering a trigger program the value returned by
+** sqlite3_changes() function is saved. After the trigger program
+** has finished, the original value is restored.)^
+**
+** <li> ^(Within a trigger program each INSERT, UPDATE and DELETE
+** statement sets the value returned by sqlite3_changes()
+** upon completion as normal. Of course, this value will not include
+** any changes performed by sub-triggers, as the sqlite3_changes()
+** value will be saved and restored after each sub-trigger has run.)^
+** </ul>
+**
+** ^This means that if the changes() SQL function (or similar) is used
+** by the first INSERT, UPDATE or DELETE statement within a trigger, it
+** returns the value as set when the calling statement began executing.
+** ^If it is used by the second or subsequent such statement within a trigger
+** program, the value returned reflects the number of rows modified by the
+** previous INSERT, UPDATE or DELETE statement within the same trigger.
+**
+** If a separate thread makes changes on the same database connection
+** while [sqlite3_changes()] is running then the value returned
+** is unpredictable and not meaningful.
+**
+** See also:
+** <ul>
+** <li> the [sqlite3_total_changes()] interface
+** <li> the [count_changes pragma]
+** <li> the [changes() SQL function]
+** <li> the [data_version pragma]
+** </ul>
+*/
+SQLITE_API int sqlite3_changes(sqlite3*);
+
+/*
+** CAPI3REF: Total Number Of Rows Modified
+** METHOD: sqlite3
+**
+** ^This function returns the total number of rows inserted, modified or
+** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed
+** since the database connection was opened, including those executed as
+** part of trigger programs. ^Executing any other type of SQL statement
+** does not affect the value returned by sqlite3_total_changes().
+**
+** ^Changes made as part of [foreign key actions] are included in the
+** count, but those made as part of REPLACE constraint resolution are
+** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
+** are not counted.
+**
+** The [sqlite3_total_changes(D)] interface only reports the number
+** of rows that changed due to SQL statement run against database
+** connection D. Any changes by other database connections are ignored.
+** To detect changes against a database file from other database
+** connections use the [PRAGMA data_version] command or the
+** [SQLITE_FCNTL_DATA_VERSION] [file control].
+**
+** If a separate thread makes changes on the same database connection
+** while [sqlite3_total_changes()] is running then the value
+** returned is unpredictable and not meaningful.
+**
+** See also:
+** <ul>
+** <li> the [sqlite3_changes()] interface
+** <li> the [count_changes pragma]
+** <li> the [changes() SQL function]
+** <li> the [data_version pragma]
+** <li> the [SQLITE_FCNTL_DATA_VERSION] [file control]
+** </ul>
+*/
+SQLITE_API int sqlite3_total_changes(sqlite3*);
+
+/*
+** CAPI3REF: Interrupt A Long-Running Query
+** METHOD: sqlite3
+**
+** ^This function causes any pending database operation to abort and
+** return at its earliest opportunity. This routine is typically
+** called in response to a user action such as pressing "Cancel"
+** or Ctrl-C where the user wants a long query operation to halt
+** immediately.
+**
+** ^It is safe to call this routine from a thread different from the
+** thread that is currently running the database operation. But it
+** is not safe to call this routine with a [database connection] that
+** is closed or might close before sqlite3_interrupt() returns.
+**
+** ^If an SQL operation is very nearly finished at the time when
+** sqlite3_interrupt() is called, then it might not have an opportunity
+** to be interrupted and might continue to completion.
+**
+** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT].
+** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE
+** that is inside an explicit transaction, then the entire transaction
+** will be rolled back automatically.
+**
+** ^The sqlite3_interrupt(D) call is in effect until all currently running
+** SQL statements on [database connection] D complete. ^Any new SQL statements
+** that are started after the sqlite3_interrupt() call and before the
+** running statement count reaches zero are interrupted as if they had been
+** running prior to the sqlite3_interrupt() call. ^New SQL statements
+** that are started after the running statement count reaches zero are
+** not effected by the sqlite3_interrupt().
+** ^A call to sqlite3_interrupt(D) that occurs when there are no running
+** SQL statements is a no-op and has no effect on SQL statements
+** that are started after the sqlite3_interrupt() call returns.
+*/
+SQLITE_API void sqlite3_interrupt(sqlite3*);
+
+/*
+** CAPI3REF: Determine If An SQL Statement Is Complete
+**
+** These routines are useful during command-line input to determine if the
+** currently entered text seems to form a complete SQL statement or
+** if additional input is needed before sending the text into
+** SQLite for parsing. ^These routines return 1 if the input string
+** appears to be a complete SQL statement. ^A statement is judged to be
+** complete if it ends with a semicolon token and is not a prefix of a
+** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within
+** string literals or quoted identifier names or comments are not
+** independent tokens (they are part of the token in which they are
+** embedded) and thus do not count as a statement terminator. ^Whitespace
+** and comments that follow the final semicolon are ignored.
+**
+** ^These routines return 0 if the statement is incomplete. ^If a
+** memory allocation fails, then SQLITE_NOMEM is returned.
+**
+** ^These routines do not parse the SQL statements thus
+** will not detect syntactically incorrect SQL.
+**
+** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior
+** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked
+** automatically by sqlite3_complete16(). If that initialization fails,
+** then the return value from sqlite3_complete16() will be non-zero
+** regardless of whether or not the input SQL is complete.)^
+**
+** The input to [sqlite3_complete()] must be a zero-terminated
+** UTF-8 string.
+**
+** The input to [sqlite3_complete16()] must be a zero-terminated
+** UTF-16 string in native byte order.
+*/
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
+
+/*
+** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
+** KEYWORDS: {busy-handler callback} {busy handler}
+** METHOD: sqlite3
+**
+** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X
+** that might be invoked with argument P whenever
+** an attempt is made to access a database table associated with
+** [database connection] D when another thread
+** or process has the table locked.
+** The sqlite3_busy_handler() interface is used to implement
+** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout].
+**
+** ^If the busy callback is NULL, then [SQLITE_BUSY]
+** is returned immediately upon encountering the lock. ^If the busy callback
+** is not NULL, then the callback might be invoked with two arguments.
+**
+** ^The first argument to the busy handler is a copy of the void* pointer which
+** is the third argument to sqlite3_busy_handler(). ^The second argument to
+** the busy handler callback is the number of times that the busy handler has
+** been invoked previously for the same locking event. ^If the
+** busy callback returns 0, then no additional attempts are made to
+** access the database and [SQLITE_BUSY] is returned
+** to the application.
+** ^If the callback returns non-zero, then another attempt
+** is made to access the database and the cycle repeats.
+**
+** The presence of a busy handler does not guarantee that it will be invoked
+** when there is lock contention. ^If SQLite determines that invoking the busy
+** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY]
+** to the application instead of invoking the
+** busy handler.
+** Consider a scenario where one process is holding a read lock that
+** it is trying to promote to a reserved lock and
+** a second process is holding a reserved lock that it is trying
+** to promote to an exclusive lock. The first process cannot proceed
+** because it is blocked by the second and the second process cannot
+** proceed because it is blocked by the first. If both processes
+** invoke the busy handlers, neither will make any progress. Therefore,
+** SQLite returns [SQLITE_BUSY] for the first process, hoping that this
+** will induce the first process to release its read lock and allow
+** the second process to proceed.
+**
+** ^The default busy callback is NULL.
+**
+** ^(There can only be a single busy handler defined for each
+** [database connection]. Setting a new busy handler clears any
+** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()]
+** or evaluating [PRAGMA busy_timeout=N] will change the
+** busy handler and thus clear any previously set busy handler.
+**
+** The busy callback should not take any actions which modify the
+** database connection that invoked the busy handler. In other words,
+** the busy handler is not reentrant. Any such actions
+** result in undefined behavior.
+**
+** A busy handler must not close the database connection
+** or [prepared statement] that invoked the busy handler.
+*/
+SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
+
+/*
+** CAPI3REF: Set A Busy Timeout
+** METHOD: sqlite3
+**
+** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps
+** for a specified amount of time when a table is locked. ^The handler
+** will sleep multiple times until at least "ms" milliseconds of sleeping
+** have accumulated. ^After at least "ms" milliseconds of sleeping,
+** the handler returns 0 which causes [sqlite3_step()] to return
+** [SQLITE_BUSY].
+**
+** ^Calling this routine with an argument less than or equal to zero
+** turns off all busy handlers.
+**
+** ^(There can only be a single busy handler for a particular
+** [database connection] at any given moment. If another busy handler
+** was defined (using [sqlite3_busy_handler()]) prior to calling
+** this routine, that other busy handler is cleared.)^
+**
+** See also: [PRAGMA busy_timeout]
+*/
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
+
+/*
+** CAPI3REF: Convenience Routines For Running Queries
+** METHOD: sqlite3
+**
+** This is a legacy interface that is preserved for backwards compatibility.
+** Use of this interface is not recommended.
+**
+** Definition: A <b>result table</b> is memory data structure created by the
+** [sqlite3_get_table()] interface. A result table records the
+** complete query results from one or more queries.
+**
+** The table conceptually has a number of rows and columns. But
+** these numbers are not part of the result table itself. These
+** numbers are obtained separately. Let N be the number of rows
+** and M be the number of columns.
+**
+** A result table is an array of pointers to zero-terminated UTF-8 strings.
+** There are (N+1)*M elements in the array. The first M pointers point
+** to zero-terminated strings that contain the names of the columns.
+** The remaining entries all point to query results. NULL values result
+** in NULL pointers. All other values are in their UTF-8 zero-terminated
+** string representation as returned by [sqlite3_column_text()].
+**
+** A result table might consist of one or more memory allocations.
+** It is not safe to pass a result table directly to [sqlite3_free()].
+** A result table should be deallocated using [sqlite3_free_table()].
+**
+** ^(As an example of the result table format, suppose a query result
+** is as follows:
+**
+** <blockquote><pre>
+** Name | Age
+** -----------------------
+** Alice | 43
+** Bob | 28
+** Cindy | 21
+** </pre></blockquote>
+**
+** There are two columns (M==2) and three rows (N==3). Thus the
+** result table has 8 entries. Suppose the result table is stored
+** in an array named azResult. Then azResult holds this content:
+**
+** <blockquote><pre>
+** azResult&#91;0] = "Name";
+** azResult&#91;1] = "Age";
+** azResult&#91;2] = "Alice";
+** azResult&#91;3] = "43";
+** azResult&#91;4] = "Bob";
+** azResult&#91;5] = "28";
+** azResult&#91;6] = "Cindy";
+** azResult&#91;7] = "21";
+** </pre></blockquote>)^
+**
+** ^The sqlite3_get_table() function evaluates one or more
+** semicolon-separated SQL statements in the zero-terminated UTF-8
+** string of its 2nd parameter and returns a result table to the
+** pointer given in its 3rd parameter.
+**
+** After the application has finished with the result from sqlite3_get_table(),
+** it must pass the result table pointer to sqlite3_free_table() in order to
+** release the memory that was malloced. Because of the way the
+** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling
+** function must not try to call [sqlite3_free()] directly. Only
+** [sqlite3_free_table()] is able to release the memory properly and safely.
+**
+** The sqlite3_get_table() interface is implemented as a wrapper around
+** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access
+** to any internal data structures of SQLite. It uses only the public
+** interface defined here. As a consequence, errors that occur in the
+** wrapper layer outside of the internal [sqlite3_exec()] call are not
+** reflected in subsequent calls to [sqlite3_errcode()] or
+** [sqlite3_errmsg()].
+*/
+SQLITE_API int sqlite3_get_table(
+ sqlite3 *db, /* An open database */
+ const char *zSql, /* SQL to be evaluated */
+ char ***pazResult, /* Results of the query */
+ int *pnRow, /* Number of result rows written here */
+ int *pnColumn, /* Number of result columns written here */
+ char **pzErrmsg /* Error msg written here */
+);
+SQLITE_API void sqlite3_free_table(char **result);
+
+/*
+** CAPI3REF: Formatted String Printing Functions
+**
+** These routines are work-alikes of the "printf()" family of functions
+** from the standard C library.
+** These routines understand most of the common formatting options from
+** the standard library printf()
+** plus some additional non-standard formats ([%q], [%Q], [%w], and [%z]).
+** See the [built-in printf()] documentation for details.
+**
+** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their
+** results into memory obtained from [sqlite3_malloc64()].
+** The strings returned by these two routines should be
+** released by [sqlite3_free()]. ^Both routines return a
+** NULL pointer if [sqlite3_malloc64()] is unable to allocate enough
+** memory to hold the resulting string.
+**
+** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from
+** the standard C library. The result is written into the
+** buffer supplied as the second parameter whose size is given by
+** the first parameter. Note that the order of the
+** first two parameters is reversed from snprintf().)^ This is an
+** historical accident that cannot be fixed without breaking
+** backwards compatibility. ^(Note also that sqlite3_snprintf()
+** returns a pointer to its buffer instead of the number of
+** characters actually written into the buffer.)^ We admit that
+** the number of characters written would be a more useful return
+** value but we cannot change the implementation of sqlite3_snprintf()
+** now without breaking compatibility.
+**
+** ^As long as the buffer size is greater than zero, sqlite3_snprintf()
+** guarantees that the buffer is always zero-terminated. ^The first
+** parameter "n" is the total size of the buffer, including space for
+** the zero terminator. So the longest string that can be completely
+** written will be n-1 characters.
+**
+** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf().
+**
+** See also: [built-in printf()], [printf() SQL function]
+*/
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
+
+/*
+** CAPI3REF: Memory Allocation Subsystem
+**
+** The SQLite core uses these three routines for all of its own
+** internal memory allocation needs. "Core" in the previous sentence
+** does not include operating-system specific [VFS] implementation. The
+** Windows VFS uses native malloc() and free() for some operations.
+**
+** ^The sqlite3_malloc() routine returns a pointer to a block
+** of memory at least N bytes in length, where N is the parameter.
+** ^If sqlite3_malloc() is unable to obtain sufficient free
+** memory, it returns a NULL pointer. ^If the parameter N to
+** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
+** a NULL pointer.
+**
+** ^The sqlite3_malloc64(N) routine works just like
+** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead
+** of a signed 32-bit integer.
+**
+** ^Calling sqlite3_free() with a pointer previously returned
+** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
+** that it might be reused. ^The sqlite3_free() routine is
+** a no-op if is called with a NULL pointer. Passing a NULL pointer
+** to sqlite3_free() is harmless. After being freed, memory
+** should neither be read nor written. Even reading previously freed
+** memory might result in a segmentation fault or other severe error.
+** Memory corruption, a segmentation fault, or other severe error
+** might result if sqlite3_free() is called with a non-NULL pointer that
+** was not obtained from sqlite3_malloc() or sqlite3_realloc().
+**
+** ^The sqlite3_realloc(X,N) interface attempts to resize a
+** prior memory allocation X to be at least N bytes.
+** ^If the X parameter to sqlite3_realloc(X,N)
+** is a NULL pointer then its behavior is identical to calling
+** sqlite3_malloc(N).
+** ^If the N parameter to sqlite3_realloc(X,N) is zero or
+** negative then the behavior is exactly the same as calling
+** sqlite3_free(X).
+** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation
+** of at least N bytes in size or NULL if insufficient memory is available.
+** ^If M is the size of the prior allocation, then min(N,M) bytes
+** of the prior allocation are copied into the beginning of buffer returned
+** by sqlite3_realloc(X,N) and the prior allocation is freed.
+** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the
+** prior allocation is not freed.
+**
+** ^The sqlite3_realloc64(X,N) interfaces works the same as
+** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead
+** of a 32-bit signed integer.
+**
+** ^If X is a memory allocation previously obtained from sqlite3_malloc(),
+** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then
+** sqlite3_msize(X) returns the size of that memory allocation in bytes.
+** ^The value returned by sqlite3_msize(X) might be larger than the number
+** of bytes requested when X was allocated. ^If X is a NULL pointer then
+** sqlite3_msize(X) returns zero. If X points to something that is not
+** the beginning of memory allocation, or if it points to a formerly
+** valid memory allocation that has now been freed, then the behavior
+** of sqlite3_msize(X) is undefined and possibly harmful.
+**
+** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(),
+** sqlite3_malloc64(), and sqlite3_realloc64()
+** is always aligned to at least an 8 byte boundary, or to a
+** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time
+** option is used.
+**
+** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()]
+** must be either NULL or else pointers obtained from a prior
+** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have
+** not yet been released.
+**
+** The application must not read or write any part of
+** a block of memory after it has been released using
+** [sqlite3_free()] or [sqlite3_realloc()].
+*/
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64);
+SQLITE_API void sqlite3_free(void*);
+SQLITE_API sqlite3_uint64 sqlite3_msize(void*);
+
+/*
+** CAPI3REF: Memory Allocator Statistics
+**
+** SQLite provides these two interfaces for reporting on the status
+** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()]
+** routines, which form the built-in memory allocation subsystem.
+**
+** ^The [sqlite3_memory_used()] routine returns the number of bytes
+** of memory currently outstanding (malloced but not freed).
+** ^The [sqlite3_memory_highwater()] routine returns the maximum
+** value of [sqlite3_memory_used()] since the high-water mark
+** was last reset. ^The values returned by [sqlite3_memory_used()] and
+** [sqlite3_memory_highwater()] include any overhead
+** added by SQLite in its implementation of [sqlite3_malloc()],
+** but not overhead added by the any underlying system library
+** routines that [sqlite3_malloc()] may call.
+**
+** ^The memory high-water mark is reset to the current value of
+** [sqlite3_memory_used()] if and only if the parameter to
+** [sqlite3_memory_highwater()] is true. ^The value returned
+** by [sqlite3_memory_highwater(1)] is the high-water mark
+** prior to the reset.
+*/
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
+
+/*
+** CAPI3REF: Pseudo-Random Number Generator
+**
+** SQLite contains a high-quality pseudo-random number generator (PRNG) used to
+** select random [ROWID | ROWIDs] when inserting new records into a table that
+** already uses the largest possible [ROWID]. The PRNG is also used for
+** the built-in random() and randomblob() SQL functions. This interface allows
+** applications to access the same PRNG for other purposes.
+**
+** ^A call to this routine stores N bytes of randomness into buffer P.
+** ^The P parameter can be a NULL pointer.
+**
+** ^If this routine has not been previously called or if the previous
+** call had N less than one or a NULL pointer for P, then the PRNG is
+** seeded using randomness obtained from the xRandomness method of
+** the default [sqlite3_vfs] object.
+** ^If the previous call to this routine had an N of 1 or more and a
+** non-NULL P then the pseudo-randomness is generated
+** internally and without recourse to the [sqlite3_vfs] xRandomness
+** method.
+*/
+SQLITE_API void sqlite3_randomness(int N, void *P);
+
+/*
+** CAPI3REF: Compile-Time Authorization Callbacks
+** METHOD: sqlite3
+** KEYWORDS: {authorizer callback}
+**
+** ^This routine registers an authorizer callback with a particular
+** [database connection], supplied in the first argument.
+** ^The authorizer callback is invoked as SQL statements are being compiled
+** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()],
+** [sqlite3_prepare_v3()], [sqlite3_prepare16()], [sqlite3_prepare16_v2()],
+** and [sqlite3_prepare16_v3()]. ^At various
+** points during the compilation process, as logic is being created
+** to perform various actions, the authorizer callback is invoked to
+** see if those actions are allowed. ^The authorizer callback should
+** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the
+** specific action but allow the SQL statement to continue to be
+** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be
+** rejected with an error. ^If the authorizer callback returns
+** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY]
+** then the [sqlite3_prepare_v2()] or equivalent call that triggered
+** the authorizer will fail with an error message.
+**
+** When the callback returns [SQLITE_OK], that means the operation
+** requested is ok. ^When the callback returns [SQLITE_DENY], the
+** [sqlite3_prepare_v2()] or equivalent call that triggered the
+** authorizer will fail with an error message explaining that
+** access is denied.
+**
+** ^The first parameter to the authorizer callback is a copy of the third
+** parameter to the sqlite3_set_authorizer() interface. ^The second parameter
+** to the callback is an integer [SQLITE_COPY | action code] that specifies
+** the particular action to be authorized. ^The third through sixth parameters
+** to the callback are either NULL pointers or zero-terminated strings
+** that contain additional details about the action to be authorized.
+** Applications must always be prepared to encounter a NULL pointer in any
+** of the third through the sixth parameters of the authorization callback.
+**
+** ^If the action code is [SQLITE_READ]
+** and the callback returns [SQLITE_IGNORE] then the
+** [prepared statement] statement is constructed to substitute
+** a NULL value in place of the table column that would have
+** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE]
+** return can be used to deny an untrusted user access to individual
+** columns of a table.
+** ^When a table is referenced by a [SELECT] but no column values are
+** extracted from that table (for example in a query like
+** "SELECT count(*) FROM tab") then the [SQLITE_READ] authorizer callback
+** is invoked once for that table with a column name that is an empty string.
+** ^If the action code is [SQLITE_DELETE] and the callback returns
+** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the
+** [truncate optimization] is disabled and all rows are deleted individually.
+**
+** An authorizer is used when [sqlite3_prepare | preparing]
+** SQL statements from an untrusted source, to ensure that the SQL statements
+** do not try to access data they are not allowed to see, or that they do not
+** try to execute malicious statements that damage the database. For
+** example, an application may allow a user to enter arbitrary
+** SQL queries for evaluation by a database. But the application does
+** not want the user to be able to make arbitrary changes to the
+** database. An authorizer could then be put in place while the
+** user-entered SQL is being [sqlite3_prepare | prepared] that
+** disallows everything except [SELECT] statements.
+**
+** Applications that need to process SQL from untrusted sources
+** might also consider lowering resource limits using [sqlite3_limit()]
+** and limiting database size using the [max_page_count] [PRAGMA]
+** in addition to using an authorizer.
+**
+** ^(Only a single authorizer can be in place on a database connection
+** at a time. Each call to sqlite3_set_authorizer overrides the
+** previous call.)^ ^Disable the authorizer by installing a NULL callback.
+** The authorizer is disabled by default.
+**
+** The authorizer callback must not do anything that will modify
+** the database connection that invoked the authorizer callback.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the
+** statement might be re-prepared during [sqlite3_step()] due to a
+** schema change. Hence, the application should ensure that the
+** correct authorizer callback remains in place during the [sqlite3_step()].
+**
+** ^Note that the authorizer callback is invoked only during
+** [sqlite3_prepare()] or its variants. Authorization is not
+** performed during statement evaluation in [sqlite3_step()], unless
+** as stated in the previous paragraph, sqlite3_step() invokes
+** sqlite3_prepare_v2() to reprepare a statement after a schema change.
+*/
+SQLITE_API int sqlite3_set_authorizer(
+ sqlite3*,
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
+ void *pUserData
+);
+
+/*
+** CAPI3REF: Authorizer Return Codes
+**
+** The [sqlite3_set_authorizer | authorizer callback function] must
+** return either [SQLITE_OK] or one of these two constants in order
+** to signal SQLite whether or not the action is permitted. See the
+** [sqlite3_set_authorizer | authorizer documentation] for additional
+** information.
+**
+** Note that SQLITE_IGNORE is also used as a [conflict resolution mode]
+** returned from the [sqlite3_vtab_on_conflict()] interface.
+*/
+#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
+#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
+
+/*
+** CAPI3REF: Authorizer Action Codes
+**
+** The [sqlite3_set_authorizer()] interface registers a callback function
+** that is invoked to authorize certain SQL statement actions. The
+** second parameter to the callback is an integer code that specifies
+** what action is being authorized. These are the integer action codes that
+** the authorizer callback may be passed.
+**
+** These action code values signify what kind of operation is to be
+** authorized. The 3rd and 4th parameters to the authorization
+** callback function will be parameters or NULL depending on which of these
+** codes is used as the second parameter. ^(The 5th parameter to the
+** authorizer callback is the name of the database ("main", "temp",
+** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback
+** is the name of the inner-most trigger or view that is responsible for
+** the access attempt or NULL if this access attempt is directly from
+** top-level SQL code.
+*/
+/******************************************* 3rd ************ 4th ***********/
+#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */
+#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */
+#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */
+#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */
+#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */
+#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */
+#define SQLITE_CREATE_VIEW 8 /* View Name NULL */
+#define SQLITE_DELETE 9 /* Table Name NULL */
+#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */
+#define SQLITE_DROP_TABLE 11 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */
+#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */
+#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */
+#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */
+#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */
+#define SQLITE_DROP_VIEW 17 /* View Name NULL */
+#define SQLITE_INSERT 18 /* Table Name NULL */
+#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */
+#define SQLITE_READ 20 /* Table Name Column Name */
+#define SQLITE_SELECT 21 /* NULL NULL */
+#define SQLITE_TRANSACTION 22 /* Operation NULL */
+#define SQLITE_UPDATE 23 /* Table Name Column Name */
+#define SQLITE_ATTACH 24 /* Filename NULL */
+#define SQLITE_DETACH 25 /* Database Name NULL */
+#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */
+#define SQLITE_REINDEX 27 /* Index Name NULL */
+#define SQLITE_ANALYZE 28 /* Table Name NULL */
+#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */
+#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */
+#define SQLITE_FUNCTION 31 /* NULL Function Name */
+#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */
+#define SQLITE_COPY 0 /* No longer used */
+#define SQLITE_RECURSIVE 33 /* NULL NULL */
+
+/*
+** CAPI3REF: Tracing And Profiling Functions
+** METHOD: sqlite3
+**
+** These routines are deprecated. Use the [sqlite3_trace_v2()] interface
+** instead of the routines described here.
+**
+** These routines register callback functions that can be used for
+** tracing and profiling the execution of SQL statements.
+**
+** ^The callback function registered by sqlite3_trace() is invoked at
+** various times when an SQL statement is being run by [sqlite3_step()].
+** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the
+** SQL statement text as the statement first begins executing.
+** ^(Additional sqlite3_trace() callbacks might occur
+** as each triggered subprogram is entered. The callbacks for triggers
+** contain a UTF-8 SQL comment that identifies the trigger.)^
+**
+** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit
+** the length of [bound parameter] expansion in the output of sqlite3_trace().
+**
+** ^The callback function registered by sqlite3_profile() is invoked
+** as each SQL statement finishes. ^The profile callback contains
+** the original statement text and an estimate of wall-clock time
+** of how long that statement took to run. ^The profile callback
+** time is in units of nanoseconds, however the current implementation
+** is only capable of millisecond resolution so the six least significant
+** digits in the time are meaningless. Future versions of SQLite
+** might provide greater resolution on the profiler callback. Invoking
+** either [sqlite3_trace()] or [sqlite3_trace_v2()] will cancel the
+** profile callback.
+*/
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
+ void(*xTrace)(void*,const char*), void*);
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
+ void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
+
+/*
+** CAPI3REF: SQL Trace Event Codes
+** KEYWORDS: SQLITE_TRACE
+**
+** These constants identify classes of events that can be monitored
+** using the [sqlite3_trace_v2()] tracing logic. The M argument
+** to [sqlite3_trace_v2(D,M,X,P)] is an OR-ed combination of one or more of
+** the following constants. ^The first argument to the trace callback
+** is one of the following constants.
+**
+** New tracing constants may be added in future releases.
+**
+** ^A trace callback has four arguments: xCallback(T,C,P,X).
+** ^The T argument is one of the integer type codes above.
+** ^The C argument is a copy of the context pointer passed in as the
+** fourth argument to [sqlite3_trace_v2()].
+** The P and X arguments are pointers whose meanings depend on T.
+**
+** <dl>
+** [[SQLITE_TRACE_STMT]] <dt>SQLITE_TRACE_STMT</dt>
+** <dd>^An SQLITE_TRACE_STMT callback is invoked when a prepared statement
+** first begins running and possibly at other times during the
+** execution of the prepared statement, such as at the start of each
+** trigger subprogram. ^The P argument is a pointer to the
+** [prepared statement]. ^The X argument is a pointer to a string which
+** is the unexpanded SQL text of the prepared statement or an SQL comment
+** that indicates the invocation of a trigger. ^The callback can compute
+** the same text that would have been returned by the legacy [sqlite3_trace()]
+** interface by using the X argument when X begins with "--" and invoking
+** [sqlite3_expanded_sql(P)] otherwise.
+**
+** [[SQLITE_TRACE_PROFILE]] <dt>SQLITE_TRACE_PROFILE</dt>
+** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same
+** information as is provided by the [sqlite3_profile()] callback.
+** ^The P argument is a pointer to the [prepared statement] and the
+** X argument points to a 64-bit integer which is the estimated of
+** the number of nanosecond that the prepared statement took to run.
+** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes.
+**
+** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>
+** <dd>^An SQLITE_TRACE_ROW callback is invoked whenever a prepared
+** statement generates a single row of result.
+** ^The P argument is a pointer to the [prepared statement] and the
+** X argument is unused.
+**
+** [[SQLITE_TRACE_CLOSE]] <dt>SQLITE_TRACE_CLOSE</dt>
+** <dd>^An SQLITE_TRACE_CLOSE callback is invoked when a database
+** connection closes.
+** ^The P argument is a pointer to the [database connection] object
+** and the X argument is unused.
+** </dl>
+*/
+#define SQLITE_TRACE_STMT 0x01
+#define SQLITE_TRACE_PROFILE 0x02
+#define SQLITE_TRACE_ROW 0x04
+#define SQLITE_TRACE_CLOSE 0x08
+
+/*
+** CAPI3REF: SQL Trace Hook
+** METHOD: sqlite3
+**
+** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback
+** function X against [database connection] D, using property mask M
+** and context pointer P. ^If the X callback is
+** NULL or if the M mask is zero, then tracing is disabled. The
+** M argument should be the bitwise OR-ed combination of
+** zero or more [SQLITE_TRACE] constants.
+**
+** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
+** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+**
+** ^The X callback is invoked whenever any of the events identified by
+** mask M occur. ^The integer return value from the callback is currently
+** ignored, though this may change in future releases. Callback
+** implementations should return zero to ensure future compatibility.
+**
+** ^A trace callback is invoked with four arguments: callback(T,C,P,X).
+** ^The T argument is one of the [SQLITE_TRACE]
+** constants to indicate why the callback was invoked.
+** ^The C argument is a copy of the context pointer.
+** The P and X arguments are pointers whose meanings depend on T.
+**
+** The sqlite3_trace_v2() interface is intended to replace the legacy
+** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which
+** are deprecated.
+*/
+SQLITE_API int sqlite3_trace_v2(
+ sqlite3*,
+ unsigned uMask,
+ int(*xCallback)(unsigned,void*,void*,void*),
+ void *pCtx
+);
+
+/*
+** CAPI3REF: Query Progress Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
+** function X to be invoked periodically during long running calls to
+** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for
+** database connection D. An example use for this
+** interface is to keep a GUI updated during a large query.
+**
+** ^The parameter P is passed through as the only parameter to the
+** callback function X. ^The parameter N is the approximate number of
+** [virtual machine instructions] that are evaluated between successive
+** invocations of the callback X. ^If N is less than one then the progress
+** handler is disabled.
+**
+** ^Only a single progress handler may be defined at one time per
+** [database connection]; setting a new progress handler cancels the
+** old one. ^Setting parameter X to NULL disables the progress handler.
+** ^The progress handler is also disabled by setting N to a value less
+** than 1.
+**
+** ^If the progress callback returns non-zero, the operation is
+** interrupted. This feature can be used to implement a
+** "Cancel" button on a GUI progress dialog box.
+**
+** The progress handler callback must not do anything that will modify
+** the database connection that invoked the progress handler.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+*/
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+
+/*
+** CAPI3REF: Opening A New Database Connection
+** CONSTRUCTOR: sqlite3
+**
+** ^These routines open an SQLite database file as specified by the
+** filename argument. ^The filename argument is interpreted as UTF-8 for
+** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte
+** order for sqlite3_open16(). ^(A [database connection] handle is usually
+** returned in *ppDb, even if an error occurs. The only exception is that
+** if SQLite is unable to allocate memory to hold the [sqlite3] object,
+** a NULL will be written into *ppDb instead of a pointer to the [sqlite3]
+** object.)^ ^(If the database is opened (and/or created) successfully, then
+** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The
+** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain
+** an English language description of the error following a failure of any
+** of the sqlite3_open() routines.
+**
+** ^The default encoding will be UTF-8 for databases created using
+** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases
+** created using sqlite3_open16() will be UTF-16 in the native byte order.
+**
+** Whether or not an error occurs when it is opened, resources
+** associated with the [database connection] handle should be released by
+** passing it to [sqlite3_close()] when it is no longer required.
+**
+** The sqlite3_open_v2() interface works like sqlite3_open()
+** except that it accepts two additional parameters for additional control
+** over the new database connection. ^(The flags parameter to
+** sqlite3_open_v2() must include, at a minimum, one of the following
+** three flag combinations:)^
+**
+** <dl>
+** ^(<dt>[SQLITE_OPEN_READONLY]</dt>
+** <dd>The database is opened in read-only mode. If the database does not
+** already exist, an error is returned.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_READWRITE]</dt>
+** <dd>The database is opened for reading and writing if possible, or reading
+** only if the file is write protected by the operating system. In either
+** case the database must already exist, otherwise an error is returned.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt>
+** <dd>The database is opened for reading and writing, and is created if
+** it does not already exist. This is the behavior that is always used for
+** sqlite3_open() and sqlite3_open16().</dd>)^
+** </dl>
+**
+** In addition to the required flags, the following optional flags are
+** also supported:
+**
+** <dl>
+** ^(<dt>[SQLITE_OPEN_URI]</dt>
+** <dd>The filename can be interpreted as a URI if this flag is set.</dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_MEMORY]</dt>
+** <dd>The database will be opened as an in-memory database. The database
+** is named by the "filename" argument for the purposes of cache-sharing,
+** if shared cache mode is enabled, but the "filename" is otherwise ignored.
+** </dd>)^
+**
+** ^(<dt>[SQLITE_OPEN_NOMUTEX]</dt>
+** <dd>The new database connection will use the "multi-thread"
+** [threading mode].)^ This means that separate threads are allowed
+** to use SQLite at the same time, as long as each thread is using
+** a different [database connection].
+**
+** ^(<dt>[SQLITE_OPEN_FULLMUTEX]</dt>
+** <dd>The new database connection will use the "serialized"
+** [threading mode].)^ This means the multiple threads can safely
+** attempt to use the same database connection at the same time.
+** (Mutexes will block any actual concurrency, but in this mode
+** there is no harm in trying.)
+**
+** ^(<dt>[SQLITE_OPEN_SHAREDCACHE]</dt>
+** <dd>The database is opened [shared cache] enabled, overriding
+** the default shared cache setting provided by
+** [sqlite3_enable_shared_cache()].)^
+**
+** ^(<dt>[SQLITE_OPEN_PRIVATECACHE]</dt>
+** <dd>The database is opened [shared cache] disabled, overriding
+** the default shared cache setting provided by
+** [sqlite3_enable_shared_cache()].)^
+**
+** [[OPEN_NOFOLLOW]] ^(<dt>[SQLITE_OPEN_NOFOLLOW]</dt>
+** <dd>The database filename is not allowed to be a symbolic link</dd>
+** </dl>)^
+**
+** If the 3rd parameter to sqlite3_open_v2() is not one of the
+** required combinations shown above optionally combined with other
+** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits]
+** then the behavior is undefined.
+**
+** ^The fourth parameter to sqlite3_open_v2() is the name of the
+** [sqlite3_vfs] object that defines the operating system interface that
+** the new database connection should use. ^If the fourth parameter is
+** a NULL pointer then the default [sqlite3_vfs] object is used.
+**
+** ^If the filename is ":memory:", then a private, temporary in-memory database
+** is created for the connection. ^This in-memory database will vanish when
+** the database connection is closed. Future versions of SQLite might
+** make use of additional special filenames that begin with the ":" character.
+** It is recommended that when a database filename actually does begin with
+** a ":" character you should prefix the filename with a pathname such as
+** "./" to avoid ambiguity.
+**
+** ^If the filename is an empty string, then a private, temporary
+** on-disk database will be created. ^This private database will be
+** automatically deleted as soon as the database connection is closed.
+**
+** [[URI filenames in sqlite3_open()]] <h3>URI Filenames</h3>
+**
+** ^If [URI filename] interpretation is enabled, and the filename argument
+** begins with "file:", then the filename is interpreted as a URI. ^URI
+** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is
+** set in the third argument to sqlite3_open_v2(), or if it has
+** been enabled globally using the [SQLITE_CONFIG_URI] option with the
+** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option.
+** URI filename interpretation is turned off
+** by default, but future releases of SQLite might enable URI filename
+** interpretation by default. See "[URI filenames]" for additional
+** information.
+**
+** URI filenames are parsed according to RFC 3986. ^If the URI contains an
+** authority, then it must be either an empty string or the string
+** "localhost". ^If the authority is not an empty string or "localhost", an
+** error is returned to the caller. ^The fragment component of a URI, if
+** present, is ignored.
+**
+** ^SQLite uses the path component of the URI as the name of the disk file
+** which contains the database. ^If the path begins with a '/' character,
+** then it is interpreted as an absolute path. ^If the path does not begin
+** with a '/' (meaning that the authority section is omitted from the URI)
+** then the path is interpreted as a relative path.
+** ^(On windows, the first component of an absolute path
+** is a drive specification (e.g. "C:").)^
+**
+** [[core URI query parameters]]
+** The query component of a URI may contain parameters that are interpreted
+** either by SQLite itself, or by a [VFS | custom VFS implementation].
+** SQLite and its built-in [VFSes] interpret the
+** following query parameters:
+**
+** <ul>
+** <li> <b>vfs</b>: ^The "vfs" parameter may be used to specify the name of
+** a VFS object that provides the operating system interface that should
+** be used to access the database file on disk. ^If this option is set to
+** an empty string the default VFS object is used. ^Specifying an unknown
+** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is
+** present, then the VFS specified by the option takes precedence over
+** the value passed as the fourth parameter to sqlite3_open_v2().
+**
+** <li> <b>mode</b>: ^(The mode parameter may be set to either "ro", "rw",
+** "rwc", or "memory". Attempting to set it to any other value is
+** an error)^.
+** ^If "ro" is specified, then the database is opened for read-only
+** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the
+** third argument to sqlite3_open_v2(). ^If the mode option is set to
+** "rw", then the database is opened for read-write (but not create)
+** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had
+** been set. ^Value "rwc" is equivalent to setting both
+** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is
+** set to "memory" then a pure [in-memory database] that never reads
+** or writes from disk is used. ^It is an error to specify a value for
+** the mode parameter that is less restrictive than that specified by
+** the flags passed in the third parameter to sqlite3_open_v2().
+**
+** <li> <b>cache</b>: ^The cache parameter may be set to either "shared" or
+** "private". ^Setting it to "shared" is equivalent to setting the
+** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to
+** sqlite3_open_v2(). ^Setting the cache parameter to "private" is
+** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit.
+** ^If sqlite3_open_v2() is used and the "cache" parameter is present in
+** a URI filename, its value overrides any behavior requested by setting
+** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag.
+**
+** <li> <b>psow</b>: ^The psow parameter indicates whether or not the
+** [powersafe overwrite] property does or does not apply to the
+** storage media on which the database file resides.
+**
+** <li> <b>nolock</b>: ^The nolock parameter is a boolean query parameter
+** which if set disables file locking in rollback journal modes. This
+** is useful for accessing a database on a filesystem that does not
+** support locking. Caution: Database corruption might result if two
+** or more processes write to the same database and any one of those
+** processes uses nolock=1.
+**
+** <li> <b>immutable</b>: ^The immutable parameter is a boolean query
+** parameter that indicates that the database file is stored on
+** read-only media. ^When immutable is set, SQLite assumes that the
+** database file cannot be changed, even by a process with higher
+** privilege, and so the database is opened read-only and all locking
+** and change detection is disabled. Caution: Setting the immutable
+** property on a database file that does in fact change can result
+** in incorrect query results and/or [SQLITE_CORRUPT] errors.
+** See also: [SQLITE_IOCAP_IMMUTABLE].
+**
+** </ul>
+**
+** ^Specifying an unknown parameter in the query component of a URI is not an
+** error. Future versions of SQLite might understand additional query
+** parameters. See "[query parameters with special meaning to SQLite]" for
+** additional information.
+**
+** [[URI filename examples]] <h3>URI filename examples</h3>
+**
+** <table border="1" align=center cellpadding=5>
+** <tr><th> URI filenames <th> Results
+** <tr><td> file:data.db <td>
+** Open the file "data.db" in the current directory.
+** <tr><td> file:/home/fred/data.db<br>
+** file:///home/fred/data.db <br>
+** file://localhost/home/fred/data.db <br> <td>
+** Open the database file "/home/fred/data.db".
+** <tr><td> file://darkstar/home/fred/data.db <td>
+** An error. "darkstar" is not a recognized authority.
+** <tr><td style="white-space:nowrap">
+** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db
+** <td> Windows only: Open the file "data.db" on fred's desktop on drive
+** C:. Note that the %20 escaping in this example is not strictly
+** necessary - space characters can be used literally
+** in URI filenames.
+** <tr><td> file:data.db?mode=ro&cache=private <td>
+** Open file "data.db" in the current directory for read-only access.
+** Regardless of whether or not shared-cache mode is enabled by
+** default, use a private cache.
+** <tr><td> file:/home/fred/data.db?vfs=unix-dotfile <td>
+** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile"
+** that uses dot-files in place of posix advisory locking.
+** <tr><td> file:data.db?mode=readonly <td>
+** An error. "readonly" is not a valid option for the "mode" parameter.
+** </table>
+**
+** ^URI hexadecimal escape sequences (%HH) are supported within the path and
+** query components of a URI. A hexadecimal escape sequence consists of a
+** percent sign - "%" - followed by exactly two hexadecimal digits
+** specifying an octet value. ^Before the path or query components of a
+** URI filename are interpreted, they are encoded using UTF-8 and all
+** hexadecimal escape sequences replaced by a single byte containing the
+** corresponding octet. If this process generates an invalid UTF-8 encoding,
+** the results are undefined.
+**
+** <b>Note to Windows users:</b> The encoding used for the filename argument
+** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever
+** codepage is currently defined. Filenames containing international
+** characters must be converted to UTF-8 prior to passing them into
+** sqlite3_open() or sqlite3_open_v2().
+**
+** <b>Note to Windows Runtime users:</b> The temporary directory must be set
+** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various
+** features that require the use of temporary files may fail.
+**
+** See also: [sqlite3_temp_directory]
+*/
+SQLITE_API int sqlite3_open(
+ const char *filename, /* Database filename (UTF-8) */
+ sqlite3 **ppDb /* OUT: SQLite db handle */
+);
+SQLITE_API int sqlite3_open16(
+ const void *filename, /* Database filename (UTF-16) */
+ sqlite3 **ppDb /* OUT: SQLite db handle */
+);
+SQLITE_API int sqlite3_open_v2(
+ const char *filename, /* Database filename (UTF-8) */
+ sqlite3 **ppDb, /* OUT: SQLite db handle */
+ int flags, /* Flags */
+ const char *zVfs /* Name of VFS module to use */
+);
+
+/*
+** CAPI3REF: Obtain Values For URI Parameters
+**
+** These are utility routines, useful to [VFS|custom VFS implementations],
+** that check if a database file was a URI that contained a specific query
+** parameter, and if so obtains the value of that query parameter.
+**
+** The first parameter to these interfaces (hereafter referred to
+** as F) must be one of:
+** <ul>
+** <li> A database filename pointer created by the SQLite core and
+** passed into the xOpen() method of a VFS implemention, or
+** <li> A filename obtained from [sqlite3_db_filename()], or
+** <li> A new filename constructed using [sqlite3_create_filename()].
+** </ul>
+** If the F parameter is not one of the above, then the behavior is
+** undefined and probably undesirable. Older versions of SQLite were
+** more tolerant of invalid F parameters than newer versions.
+**
+** If F is a suitable filename (as described in the previous paragraph)
+** and if P is the name of the query parameter, then
+** sqlite3_uri_parameter(F,P) returns the value of the P
+** parameter if it exists or a NULL pointer if P does not appear as a
+** query parameter on F. If P is a query parameter of F and it
+** has no explicit value, then sqlite3_uri_parameter(F,P) returns
+** a pointer to an empty string.
+**
+** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean
+** parameter and returns true (1) or false (0) according to the value
+** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the
+** value of query parameter P is one of "yes", "true", or "on" in any
+** case or if the value begins with a non-zero number. The
+** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of
+** query parameter P is one of "no", "false", or "off" in any case or
+** if the value begins with a numeric zero. If P is not a query
+** parameter on F or if the value of P does not match any of the
+** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0).
+**
+** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a
+** 64-bit signed integer and returns that integer, or D if P does not
+** exist. If the value of P is something other than an integer, then
+** zero is returned.
+**
+** The sqlite3_uri_key(F,N) returns a pointer to the name (not
+** the value) of the N-th query parameter for filename F, or a NULL
+** pointer if N is less than zero or greater than the number of query
+** parameters minus 1. The N value is zero-based so N should be 0 to obtain
+** the name of the first query parameter, 1 for the second parameter, and
+** so forth.
+**
+** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and
+** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and
+** is not a database file pathname pointer that the SQLite core passed
+** into the xOpen VFS method, then the behavior of this routine is undefined
+** and probably undesirable.
+**
+** Beginning with SQLite [version 3.31.0] ([dateof:3.31.0]) the input F
+** parameter can also be the name of a rollback journal file or WAL file
+** in addition to the main database file. Prior to version 3.31.0, these
+** routines would only work if F was the name of the main database file.
+** When the F parameter is the name of the rollback journal or WAL file,
+** it has access to all the same query parameters as were found on the
+** main database file.
+**
+** See the [URI filename] documentation for additional information.
+*/
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N);
+
+/*
+** CAPI3REF: Translate filenames
+**
+** These routines are available to [VFS|custom VFS implementations] for
+** translating filenames between the main database file, the journal file,
+** and the WAL file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** passed by the SQLite core into the VFS, then sqlite3_filename_database(F)
+** returns the name of the corresponding database file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** passed by the SQLite core into the VFS, or if F is a database filename
+** obtained from [sqlite3_db_filename()], then sqlite3_filename_journal(F)
+** returns the name of the corresponding rollback journal file.
+**
+** If F is the name of an sqlite database file, journal file, or WAL file
+** that was passed by the SQLite core into the VFS, or if F is a database
+** filename obtained from [sqlite3_db_filename()], then
+** sqlite3_filename_wal(F) returns the name of the corresponding
+** WAL file.
+**
+** In all of the above, if F is not the name of a database, journal or WAL
+** filename passed into the VFS from the SQLite core and F is not the
+** return value from [sqlite3_db_filename()], then the result is
+** undefined and is likely a memory access violation.
+*/
+SQLITE_API const char *sqlite3_filename_database(const char*);
+SQLITE_API const char *sqlite3_filename_journal(const char*);
+SQLITE_API const char *sqlite3_filename_wal(const char*);
+
+/*
+** CAPI3REF: Database File Corresponding To A Journal
+**
+** ^If X is the name of a rollback or WAL-mode journal file that is
+** passed into the xOpen method of [sqlite3_vfs], then
+** sqlite3_database_file_object(X) returns a pointer to the [sqlite3_file]
+** object that represents the main database file.
+**
+** This routine is intended for use in custom [VFS] implementations
+** only. It is not a general-purpose interface.
+** The argument sqlite3_file_object(X) must be a filename pointer that
+** has been passed into [sqlite3_vfs].xOpen method where the
+** flags parameter to xOpen contains one of the bits
+** [SQLITE_OPEN_MAIN_JOURNAL] or [SQLITE_OPEN_WAL]. Any other use
+** of this routine results in undefined and probably undesirable
+** behavior.
+*/
+SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
+
+/*
+** CAPI3REF: Create and Destroy VFS Filenames
+**
+** These interfces are provided for use by [VFS shim] implementations and
+** are not useful outside of that context.
+**
+** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
+** database filename D with corresponding journal file J and WAL file W and
+** with N URI parameters key/values pairs in the array P. The result from
+** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that
+** is safe to pass to routines like:
+** <ul>
+** <li> [sqlite3_uri_parameter()],
+** <li> [sqlite3_uri_boolean()],
+** <li> [sqlite3_uri_int64()],
+** <li> [sqlite3_uri_key()],
+** <li> [sqlite3_filename_database()],
+** <li> [sqlite3_filename_journal()], or
+** <li> [sqlite3_filename_wal()].
+** </ul>
+** If a memory allocation error occurs, sqlite3_create_filename() might
+** return a NULL pointer. The memory obtained from sqlite3_create_filename(X)
+** must be released by a corresponding call to sqlite3_free_filename(Y).
+**
+** The P parameter in sqlite3_create_filename(D,J,W,N,P) should be an array
+** of 2*N pointers to strings. Each pair of pointers in this array corresponds
+** to a key and value for a query parameter. The P parameter may be a NULL
+** pointer if N is zero. None of the 2*N pointers in the P array may be
+** NULL pointers and key pointers should not be empty strings.
+** None of the D, J, or W parameters to sqlite3_create_filename(D,J,W,N,P) may
+** be NULL pointers, though they can be empty strings.
+**
+** The sqlite3_free_filename(Y) routine releases a memory allocation
+** previously obtained from sqlite3_create_filename(). Invoking
+** sqlite3_free_filename(Y) where Y is a NULL pointer is a harmless no-op.
+**
+** If the Y parameter to sqlite3_free_filename(Y) is anything other
+** than a NULL pointer or a pointer previously acquired from
+** sqlite3_create_filename(), then bad things such as heap
+** corruption or segfaults may occur. The value Y should be
+** used again after sqlite3_free_filename(Y) has been called. This means
+** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y,
+** then the corresponding [sqlite3_module.xClose() method should also be
+** invoked prior to calling sqlite3_free_filename(Y).
+*/
+SQLITE_API char *sqlite3_create_filename(
+ const char *zDatabase,
+ const char *zJournal,
+ const char *zWal,
+ int nParam,
+ const char **azParam
+);
+SQLITE_API void sqlite3_free_filename(char*);
+
+/*
+** CAPI3REF: Error Codes And Messages
+** METHOD: sqlite3
+**
+** ^If the most recent sqlite3_* API call associated with
+** [database connection] D failed, then the sqlite3_errcode(D) interface
+** returns the numeric [result code] or [extended result code] for that
+** API call.
+** ^The sqlite3_extended_errcode()
+** interface is the same except that it always returns the
+** [extended result code] even when extended result codes are
+** disabled.
+**
+** The values returned by sqlite3_errcode() and/or
+** sqlite3_extended_errcode() might change with each API call.
+** Except, there are some interfaces that are guaranteed to never
+** change the value of the error code. The error-code preserving
+** interfaces are:
+**
+** <ul>
+** <li> sqlite3_errcode()
+** <li> sqlite3_extended_errcode()
+** <li> sqlite3_errmsg()
+** <li> sqlite3_errmsg16()
+** </ul>
+**
+** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
+** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** ^(Memory to hold the error message string is managed internally.
+** The application does not need to worry about freeing the result.
+** However, the error string might be overwritten or deallocated by
+** subsequent calls to other SQLite interface functions.)^
+**
+** ^The sqlite3_errstr() interface returns the English-language text
+** that describes the [result code], as UTF-8.
+** ^(Memory to hold the error message string is managed internally
+** and must not be freed by the application)^.
+**
+** When the serialized [threading mode] is in use, it might be the
+** case that a second error occurs on a separate thread in between
+** the time of the first error and the call to these interfaces.
+** When that happens, the second error will be reported since these
+** interfaces always report the most recent result. To avoid
+** this, each thread can obtain exclusive use of the [database connection] D
+** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning
+** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after
+** all calls to the interfaces listed here are completed.
+**
+** If an interface fails with SQLITE_MISUSE, that means the interface
+** was invoked incorrectly by the application. In that case, the
+** error code and message may or may not be set.
+*/
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
+
+/*
+** CAPI3REF: Prepared Statement Object
+** KEYWORDS: {prepared statement} {prepared statements}
+**
+** An instance of this object represents a single SQL statement that
+** has been compiled into binary form and is ready to be evaluated.
+**
+** Think of each SQL statement as a separate computer program. The
+** original SQL text is source code. A prepared statement object
+** is the compiled object code. All SQL must be converted into a
+** prepared statement before it can be run.
+**
+** The life-cycle of a prepared statement object usually goes like this:
+**
+** <ol>
+** <li> Create the prepared statement object using [sqlite3_prepare_v2()].
+** <li> Bind values to [parameters] using the sqlite3_bind_*()
+** interfaces.
+** <li> Run the SQL by calling [sqlite3_step()] one or more times.
+** <li> Reset the prepared statement using [sqlite3_reset()] then go back
+** to step 2. Do this zero or more times.
+** <li> Destroy the object using [sqlite3_finalize()].
+** </ol>
+*/
+typedef struct sqlite3_stmt sqlite3_stmt;
+
+/*
+** CAPI3REF: Run-time Limits
+** METHOD: sqlite3
+**
+** ^(This interface allows the size of various constructs to be limited
+** on a connection by connection basis. The first parameter is the
+** [database connection] whose limit is to be set or queried. The
+** second parameter is one of the [limit categories] that define a
+** class of constructs to be size limited. The third parameter is the
+** new limit for that construct.)^
+**
+** ^If the new limit is a negative number, the limit is unchanged.
+** ^(For each limit category SQLITE_LIMIT_<i>NAME</i> there is a
+** [limits | hard upper bound]
+** set at compile-time by a C preprocessor macro called
+** [limits | SQLITE_MAX_<i>NAME</i>].
+** (The "_LIMIT_" in the name is changed to "_MAX_".))^
+** ^Attempts to increase a limit above its hard upper bound are
+** silently truncated to the hard upper bound.
+**
+** ^Regardless of whether or not the limit was changed, the
+** [sqlite3_limit()] interface returns the prior value of the limit.
+** ^Hence, to find the current value of a limit without changing it,
+** simply invoke this interface with the third parameter set to -1.
+**
+** Run-time limits are intended for use in applications that manage
+** both their own internal database and also databases that are controlled
+** by untrusted external sources. An example application might be a
+** web browser that has its own databases for storing history and
+** separate databases controlled by JavaScript applications downloaded
+** off the Internet. The internal databases can be given the
+** large, default limits. Databases managed by external sources can
+** be given much smaller limits designed to prevent a denial of service
+** attack. Developers might also want to use the [sqlite3_set_authorizer()]
+** interface to further control untrusted SQL. The size of the database
+** created by an untrusted script can be contained using the
+** [max_page_count] [PRAGMA].
+**
+** New run-time limit categories may be added in future releases.
+*/
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
+
+/*
+** CAPI3REF: Run-Time Limit Categories
+** KEYWORDS: {limit category} {*limit categories}
+**
+** These constants define various performance limits
+** that can be lowered at run-time using [sqlite3_limit()].
+** The synopsis of the meanings of the various limits is shown below.
+** Additional information is available at [limits | Limits in SQLite].
+**
+** <dl>
+** [[SQLITE_LIMIT_LENGTH]] ^(<dt>SQLITE_LIMIT_LENGTH</dt>
+** <dd>The maximum size of any string or BLOB or table row, in bytes.<dd>)^
+**
+** [[SQLITE_LIMIT_SQL_LENGTH]] ^(<dt>SQLITE_LIMIT_SQL_LENGTH</dt>
+** <dd>The maximum length of an SQL statement, in bytes.</dd>)^
+**
+** [[SQLITE_LIMIT_COLUMN]] ^(<dt>SQLITE_LIMIT_COLUMN</dt>
+** <dd>The maximum number of columns in a table definition or in the
+** result set of a [SELECT] or the maximum number of columns in an index
+** or in an ORDER BY or GROUP BY clause.</dd>)^
+**
+** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(<dt>SQLITE_LIMIT_EXPR_DEPTH</dt>
+** <dd>The maximum depth of the parse tree on any expression.</dd>)^
+**
+** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(<dt>SQLITE_LIMIT_COMPOUND_SELECT</dt>
+** <dd>The maximum number of terms in a compound SELECT statement.</dd>)^
+**
+** [[SQLITE_LIMIT_VDBE_OP]] ^(<dt>SQLITE_LIMIT_VDBE_OP</dt>
+** <dd>The maximum number of instructions in a virtual machine program
+** used to implement an SQL statement. If [sqlite3_prepare_v2()] or
+** the equivalent tries to allocate space for more than this many opcodes
+** in a single prepared statement, an SQLITE_NOMEM error is returned.</dd>)^
+**
+** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(<dt>SQLITE_LIMIT_FUNCTION_ARG</dt>
+** <dd>The maximum number of arguments on a function.</dd>)^
+**
+** [[SQLITE_LIMIT_ATTACHED]] ^(<dt>SQLITE_LIMIT_ATTACHED</dt>
+** <dd>The maximum number of [ATTACH | attached databases].)^</dd>
+**
+** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]]
+** ^(<dt>SQLITE_LIMIT_LIKE_PATTERN_LENGTH</dt>
+** <dd>The maximum length of the pattern argument to the [LIKE] or
+** [GLOB] operators.</dd>)^
+**
+** [[SQLITE_LIMIT_VARIABLE_NUMBER]]
+** ^(<dt>SQLITE_LIMIT_VARIABLE_NUMBER</dt>
+** <dd>The maximum index number of any [parameter] in an SQL statement.)^
+**
+** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(<dt>SQLITE_LIMIT_TRIGGER_DEPTH</dt>
+** <dd>The maximum depth of recursion for triggers.</dd>)^
+**
+** [[SQLITE_LIMIT_WORKER_THREADS]] ^(<dt>SQLITE_LIMIT_WORKER_THREADS</dt>
+** <dd>The maximum number of auxiliary worker threads that a single
+** [prepared statement] may start.</dd>)^
+** </dl>
+*/
+#define SQLITE_LIMIT_LENGTH 0
+#define SQLITE_LIMIT_SQL_LENGTH 1
+#define SQLITE_LIMIT_COLUMN 2
+#define SQLITE_LIMIT_EXPR_DEPTH 3
+#define SQLITE_LIMIT_COMPOUND_SELECT 4
+#define SQLITE_LIMIT_VDBE_OP 5
+#define SQLITE_LIMIT_FUNCTION_ARG 6
+#define SQLITE_LIMIT_ATTACHED 7
+#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8
+#define SQLITE_LIMIT_VARIABLE_NUMBER 9
+#define SQLITE_LIMIT_TRIGGER_DEPTH 10
+#define SQLITE_LIMIT_WORKER_THREADS 11
+
+/*
+** CAPI3REF: Prepare Flags
+**
+** These constants define various flags that can be passed into
+** "prepFlags" parameter of the [sqlite3_prepare_v3()] and
+** [sqlite3_prepare16_v3()] interfaces.
+**
+** New flags may be added in future releases of SQLite.
+**
+** <dl>
+** [[SQLITE_PREPARE_PERSISTENT]] ^(<dt>SQLITE_PREPARE_PERSISTENT</dt>
+** <dd>The SQLITE_PREPARE_PERSISTENT flag is a hint to the query planner
+** that the prepared statement will be retained for a long time and
+** probably reused many times.)^ ^Without this flag, [sqlite3_prepare_v3()]
+** and [sqlite3_prepare16_v3()] assume that the prepared statement will
+** be used just once or at most a few times and then destroyed using
+** [sqlite3_finalize()] relatively soon. The current implementation acts
+** on this hint by avoiding the use of [lookaside memory] so as not to
+** deplete the limited store of lookaside memory. Future versions of
+** SQLite may act on this hint differently.
+**
+** [[SQLITE_PREPARE_NORMALIZE]] <dt>SQLITE_PREPARE_NORMALIZE</dt>
+** <dd>The SQLITE_PREPARE_NORMALIZE flag is a no-op. This flag used
+** to be required for any prepared statement that wanted to use the
+** [sqlite3_normalized_sql()] interface. However, the
+** [sqlite3_normalized_sql()] interface is now available to all
+** prepared statements, regardless of whether or not they use this
+** flag.
+**
+** [[SQLITE_PREPARE_NO_VTAB]] <dt>SQLITE_PREPARE_NO_VTAB</dt>
+** <dd>The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler
+** to return an error (error code SQLITE_ERROR) if the statement uses
+** any virtual tables.
+** </dl>
+*/
+#define SQLITE_PREPARE_PERSISTENT 0x01
+#define SQLITE_PREPARE_NORMALIZE 0x02
+#define SQLITE_PREPARE_NO_VTAB 0x04
+
+/*
+** CAPI3REF: Compiling An SQL Statement
+** KEYWORDS: {SQL statement compiler}
+** METHOD: sqlite3
+** CONSTRUCTOR: sqlite3_stmt
+**
+** To execute an SQL statement, it must first be compiled into a byte-code
+** program using one of these routines. Or, in other words, these routines
+** are constructors for the [prepared statement] object.
+**
+** The preferred routine to use is [sqlite3_prepare_v2()]. The
+** [sqlite3_prepare()] interface is legacy and should be avoided.
+** [sqlite3_prepare_v3()] has an extra "prepFlags" option that is used
+** for special purposes.
+**
+** The use of the UTF-8 interfaces is preferred, as SQLite currently
+** does all parsing using UTF-8. The UTF-16 interfaces are provided
+** as a convenience. The UTF-16 interfaces work by converting the
+** input text into UTF-8, then invoking the corresponding UTF-8 interface.
+**
+** The first argument, "db", is a [database connection] obtained from a
+** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or
+** [sqlite3_open16()]. The database connection must not have been closed.
+**
+** The second argument, "zSql", is the statement to be compiled, encoded
+** as either UTF-8 or UTF-16. The sqlite3_prepare(), sqlite3_prepare_v2(),
+** and sqlite3_prepare_v3()
+** interfaces use UTF-8, and sqlite3_prepare16(), sqlite3_prepare16_v2(),
+** and sqlite3_prepare16_v3() use UTF-16.
+**
+** ^If the nByte argument is negative, then zSql is read up to the
+** first zero terminator. ^If nByte is positive, then it is the
+** number of bytes read from zSql. ^If nByte is zero, then no prepared
+** statement is generated.
+** If the caller knows that the supplied string is nul-terminated, then
+** there is a small performance advantage to passing an nByte parameter that
+** is the number of bytes in the input string <i>including</i>
+** the nul-terminator.
+**
+** ^If pzTail is not NULL then *pzTail is made to point to the first byte
+** past the end of the first SQL statement in zSql. These routines only
+** compile the first statement in zSql, so *pzTail is left pointing to
+** what remains uncompiled.
+**
+** ^*ppStmt is left pointing to a compiled [prepared statement] that can be
+** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set
+** to NULL. ^If the input text contains no SQL (if the input is an empty
+** string or a comment) then *ppStmt is set to NULL.
+** The calling procedure is responsible for deleting the compiled
+** SQL statement using [sqlite3_finalize()] after it has finished with it.
+** ppStmt may not be NULL.
+**
+** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK];
+** otherwise an [error code] is returned.
+**
+** The sqlite3_prepare_v2(), sqlite3_prepare_v3(), sqlite3_prepare16_v2(),
+** and sqlite3_prepare16_v3() interfaces are recommended for all new programs.
+** The older interfaces (sqlite3_prepare() and sqlite3_prepare16())
+** are retained for backwards compatibility, but their use is discouraged.
+** ^In the "vX" interfaces, the prepared statement
+** that is returned (the [sqlite3_stmt] object) contains a copy of the
+** original SQL text. This causes the [sqlite3_step()] interface to
+** behave differently in three ways:
+**
+** <ol>
+** <li>
+** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it
+** always used to do, [sqlite3_step()] will automatically recompile the SQL
+** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY]
+** retries will occur before sqlite3_step() gives up and returns an error.
+** </li>
+**
+** <li>
+** ^When an error occurs, [sqlite3_step()] will return one of the detailed
+** [error codes] or [extended error codes]. ^The legacy behavior was that
+** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code
+** and the application would have to make a second call to [sqlite3_reset()]
+** in order to find the underlying cause of the problem. With the "v2" prepare
+** interfaces, the underlying reason for the error is returned immediately.
+** </li>
+**
+** <li>
+** ^If the specific value bound to a [parameter | host parameter] in the
+** WHERE clause might influence the choice of query plan for a statement,
+** then the statement will be automatically recompiled, as if there had been
+** a schema change, on the first [sqlite3_step()] call following any change
+** to the [sqlite3_bind_text | bindings] of that [parameter].
+** ^The specific value of a WHERE-clause [parameter] might influence the
+** choice of query plan if the parameter is the left-hand side of a [LIKE]
+** or [GLOB] operator or if the parameter is compared to an indexed column
+** and the [SQLITE_ENABLE_STAT4] compile-time option is enabled.
+** </li>
+** </ol>
+**
+** <p>^sqlite3_prepare_v3() differs from sqlite3_prepare_v2() only in having
+** the extra prepFlags parameter, which is a bit array consisting of zero or
+** more of the [SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_*] flags. ^The
+** sqlite3_prepare_v2() interface works exactly the same as
+** sqlite3_prepare_v3() with a zero prepFlags parameter.
+*/
+SQLITE_API int sqlite3_prepare(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare_v2(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare_v3(
+ sqlite3 *db, /* Database handle */
+ const char *zSql, /* SQL statement, UTF-8 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const char **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16_v2(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+SQLITE_API int sqlite3_prepare16_v3(
+ sqlite3 *db, /* Database handle */
+ const void *zSql, /* SQL statement, UTF-16 encoded */
+ int nByte, /* Maximum length of zSql in bytes. */
+ unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */
+ sqlite3_stmt **ppStmt, /* OUT: Statement handle */
+ const void **pzTail /* OUT: Pointer to unused portion of zSql */
+);
+
+/*
+** CAPI3REF: Retrieving Statement SQL
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8
+** SQL text used to create [prepared statement] P if P was
+** created by [sqlite3_prepare_v2()], [sqlite3_prepare_v3()],
+** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()].
+** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8
+** string containing the SQL text of prepared statement P with
+** [bound parameters] expanded.
+** ^The sqlite3_normalized_sql(P) interface returns a pointer to a UTF-8
+** string containing the normalized SQL text of prepared statement P. The
+** semantics used to normalize a SQL statement are unspecified and subject
+** to change. At a minimum, literal values will be replaced with suitable
+** placeholders.
+**
+** ^(For example, if a prepared statement is created using the SQL
+** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345
+** and parameter :xyz is unbound, then sqlite3_sql() will return
+** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql()
+** will return "SELECT 2345,NULL".)^
+**
+** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory
+** is available to hold the result, or if the result would exceed the
+** the maximum string length determined by the [SQLITE_LIMIT_LENGTH].
+**
+** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of
+** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time
+** option causes sqlite3_expanded_sql() to always return NULL.
+**
+** ^The strings returned by sqlite3_sql(P) and sqlite3_normalized_sql(P)
+** are managed by SQLite and are automatically freed when the prepared
+** statement is finalized.
+** ^The string returned by sqlite3_expanded_sql(P), on the other hand,
+** is obtained from [sqlite3_malloc()] and must be free by the application
+** by passing it to [sqlite3_free()].
+*/
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Determine If An SQL Statement Writes The Database
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if
+** and only if the [prepared statement] X makes no direct changes to
+** the content of the database file.
+**
+** Note that [application-defined SQL functions] or
+** [virtual tables] might change the database indirectly as a side effect.
+** ^(For example, if an application defines a function "eval()" that
+** calls [sqlite3_exec()], then the following SQL statement would
+** change the database file through side-effects:
+**
+** <blockquote><pre>
+** SELECT eval('DELETE FROM t1') FROM t2;
+** </pre></blockquote>
+**
+** But because the [SELECT] statement does not change the database file
+** directly, sqlite3_stmt_readonly() would still return true.)^
+**
+** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK],
+** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true,
+** since the statements themselves do not actually modify the database but
+** rather they control the timing of when other statements modify the
+** database. ^The [ATTACH] and [DETACH] statements also cause
+** sqlite3_stmt_readonly() to return true since, while those statements
+** change the configuration of a database connection, they do not make
+** changes to the content of the database files on disk.
+** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since
+** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and
+** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so
+** sqlite3_stmt_readonly() returns false for those commands.
+*/
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Query The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_isexplain(S) interface returns 1 if the
+** prepared statement S is an EXPLAIN statement, or 2 if the
+** statement S is an EXPLAIN QUERY PLAN.
+** ^The sqlite3_stmt_isexplain(S) interface returns 0 if S is
+** an ordinary statement or a NULL pointer.
+*/
+SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Determine If A Prepared Statement Has Been Reset
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
+** [prepared statement] S has been stepped at least once using
+** [sqlite3_step(S)] but has neither run to completion (returned
+** [SQLITE_DONE] from [sqlite3_step(S)]) nor
+** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S)
+** interface returns false if S is a NULL pointer. If S is not a
+** NULL pointer and is not a pointer to a valid [prepared statement]
+** object, then the behavior is undefined and probably undesirable.
+**
+** This interface can be used in combination [sqlite3_next_stmt()]
+** to locate all prepared statements associated with a database
+** connection that are in need of being reset. This can be used,
+** for example, in diagnostic routines to search for prepared
+** statements that are holding a transaction open.
+*/
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Dynamically Typed Value Object
+** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value}
+**
+** SQLite uses the sqlite3_value object to represent all values
+** that can be stored in a database table. SQLite uses dynamic typing
+** for the values it stores. ^Values stored in sqlite3_value objects
+** can be integers, floating point values, strings, BLOBs, or NULL.
+**
+** An sqlite3_value object may be either "protected" or "unprotected".
+** Some interfaces require a protected sqlite3_value. Other interfaces
+** will accept either a protected or an unprotected sqlite3_value.
+** Every interface that accepts sqlite3_value arguments specifies
+** whether or not it requires a protected sqlite3_value. The
+** [sqlite3_value_dup()] interface can be used to construct a new
+** protected sqlite3_value from an unprotected sqlite3_value.
+**
+** The terms "protected" and "unprotected" refer to whether or not
+** a mutex is held. An internal mutex is held for a protected
+** sqlite3_value object but no mutex is held for an unprotected
+** sqlite3_value object. If SQLite is compiled to be single-threaded
+** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0)
+** or if SQLite is run in one of reduced mutex modes
+** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD]
+** then there is no distinction between protected and unprotected
+** sqlite3_value objects and they can be used interchangeably. However,
+** for maximum code portability it is recommended that applications
+** still make the distinction between protected and unprotected
+** sqlite3_value objects even when not strictly required.
+**
+** ^The sqlite3_value objects that are passed as parameters into the
+** implementation of [application-defined SQL functions] are protected.
+** ^The sqlite3_value object returned by
+** [sqlite3_column_value()] is unprotected.
+** Unprotected sqlite3_value objects may only be used as arguments
+** to [sqlite3_result_value()], [sqlite3_bind_value()], and
+** [sqlite3_value_dup()].
+** The [sqlite3_value_blob | sqlite3_value_type()] family of
+** interfaces require protected sqlite3_value objects.
+*/
+typedef struct sqlite3_value sqlite3_value;
+
+/*
+** CAPI3REF: SQL Function Context Object
+**
+** The context in which an SQL function executes is stored in an
+** sqlite3_context object. ^A pointer to an sqlite3_context object
+** is always first parameter to [application-defined SQL functions].
+** The application-defined SQL function implementation will pass this
+** pointer through into calls to [sqlite3_result_int | sqlite3_result()],
+** [sqlite3_aggregate_context()], [sqlite3_user_data()],
+** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()],
+** and/or [sqlite3_set_auxdata()].
+*/
+typedef struct sqlite3_context sqlite3_context;
+
+/*
+** CAPI3REF: Binding Values To Prepared Statements
+** KEYWORDS: {host parameter} {host parameters} {host parameter name}
+** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding}
+** METHOD: sqlite3_stmt
+**
+** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants,
+** literals may be replaced by a [parameter] that matches one of following
+** templates:
+**
+** <ul>
+** <li> ?
+** <li> ?NNN
+** <li> :VVV
+** <li> @VVV
+** <li> $VVV
+** </ul>
+**
+** In the templates above, NNN represents an integer literal,
+** and VVV represents an alphanumeric identifier.)^ ^The values of these
+** parameters (also called "host parameter names" or "SQL parameters")
+** can be set using the sqlite3_bind_*() routines defined here.
+**
+** ^The first argument to the sqlite3_bind_*() routines is always
+** a pointer to the [sqlite3_stmt] object returned from
+** [sqlite3_prepare_v2()] or its variants.
+**
+** ^The second argument is the index of the SQL parameter to be set.
+** ^The leftmost SQL parameter has an index of 1. ^When the same named
+** SQL parameter is used more than once, second and subsequent
+** occurrences have the same index as the first occurrence.
+** ^The index for named parameters can be looked up using the
+** [sqlite3_bind_parameter_index()] API if desired. ^The index
+** for "?NNN" parameters is the value of NNN.
+** ^The NNN value must be between 1 and the [sqlite3_limit()]
+** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 32766).
+**
+** ^The third argument is the value to bind to the parameter.
+** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter
+** is ignored and the end result is the same as sqlite3_bind_null().
+** ^If the third parameter to sqlite3_bind_text() is not NULL, then
+** it should be a pointer to well-formed UTF8 text.
+** ^If the third parameter to sqlite3_bind_text16() is not NULL, then
+** it should be a pointer to well-formed UTF16 text.
+** ^If the third parameter to sqlite3_bind_text64() is not NULL, then
+** it should be a pointer to a well-formed unicode string that is
+** either UTF8 if the sixth parameter is SQLITE_UTF8, or UTF16
+** otherwise.
+**
+** [[byte-order determination rules]] ^The byte-order of
+** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF)
+** found in first character, which is removed, or in the absence of a BOM
+** the byte order is the native byte order of the host
+** machine for sqlite3_bind_text16() or the byte order specified in
+** the 6th parameter for sqlite3_bind_text64().)^
+** ^If UTF16 input text contains invalid unicode
+** characters, then SQLite might change those invalid characters
+** into the unicode replacement character: U+FFFD.
+**
+** ^(In those routines that have a fourth argument, its value is the
+** number of bytes in the parameter. To be clear: the value is the
+** number of <u>bytes</u> in the value, not the number of characters.)^
+** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** is negative, then the length of the string is
+** the number of bytes up to the first zero terminator.
+** If the fourth parameter to sqlite3_bind_blob() is negative, then
+** the behavior is undefined.
+** If a non-negative fourth parameter is provided to sqlite3_bind_text()
+** or sqlite3_bind_text16() or sqlite3_bind_text64() then
+** that parameter must be the byte offset
+** where the NUL terminator would occur assuming the string were NUL
+** terminated. If any NUL characters occurs at byte offsets less than
+** the value of the fourth parameter then the resulting string value will
+** contain embedded NULs. The result of expressions involving strings
+** with embedded NULs is undefined.
+**
+** ^The fifth argument to the BLOB and string binding interfaces
+** is a destructor used to dispose of the BLOB or
+** string after SQLite has finished with it. ^The destructor is called
+** to dispose of the BLOB or string even if the call to the bind API fails,
+** except the destructor is not called if the third parameter is a NULL
+** pointer or the fourth parameter is negative.
+** ^If the fifth argument is
+** the special value [SQLITE_STATIC], then SQLite assumes that the
+** information is in static, unmanaged space and does not need to be freed.
+** ^If the fifth argument has the value [SQLITE_TRANSIENT], then
+** SQLite makes its own private copy of the data immediately, before
+** the sqlite3_bind_*() routine returns.
+**
+** ^The sixth argument to sqlite3_bind_text64() must be one of
+** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]
+** to specify the encoding of the text in the third parameter. If
+** the sixth argument to sqlite3_bind_text64() is not one of the
+** allowed values shown above, or if the text encoding is different
+** from the encoding specified by the sixth parameter, then the behavior
+** is undefined.
+**
+** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that
+** is filled with zeroes. ^A zeroblob uses a fixed amount of memory
+** (just an integer to hold its size) while it is being processed.
+** Zeroblobs are intended to serve as placeholders for BLOBs whose
+** content is later written using
+** [sqlite3_blob_open | incremental BLOB I/O] routines.
+** ^A negative value for the zeroblob results in a zero-length BLOB.
+**
+** ^The sqlite3_bind_pointer(S,I,P,T,D) routine causes the I-th parameter in
+** [prepared statement] S to have an SQL value of NULL, but to also be
+** associated with the pointer P of type T. ^D is either a NULL pointer or
+** a pointer to a destructor function for P. ^SQLite will invoke the
+** destructor D with a single argument of P when it is finished using
+** P. The T parameter should be a static string, preferably a string
+** literal. The sqlite3_bind_pointer() routine is part of the
+** [pointer passing interface] added for SQLite 3.20.0.
+**
+** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer
+** for the [prepared statement] or with a prepared statement for which
+** [sqlite3_step()] has been called more recently than [sqlite3_reset()],
+** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_()
+** routine is passed a [prepared statement] that has been finalized, the
+** result is undefined and probably harmful.
+**
+** ^Bindings are not cleared by the [sqlite3_reset()] routine.
+** ^Unbound parameters are interpreted as NULL.
+**
+** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an
+** [error code] if anything goes wrong.
+** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB
+** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or
+** [SQLITE_MAX_LENGTH].
+** ^[SQLITE_RANGE] is returned if the parameter
+** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails.
+**
+** See also: [sqlite3_bind_parameter_count()],
+** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
+ void(*)(void*));
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
+ void(*)(void*), unsigned char encoding);
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_pointer(sqlite3_stmt*, int, void*, const char*,void(*)(void*));
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+
+/*
+** CAPI3REF: Number Of SQL Parameters
+** METHOD: sqlite3_stmt
+**
+** ^This routine can be used to find the number of [SQL parameters]
+** in a [prepared statement]. SQL parameters are tokens of the
+** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as
+** placeholders for values that are [sqlite3_bind_blob | bound]
+** to the parameters at a later time.
+**
+** ^(This routine actually returns the index of the largest (rightmost)
+** parameter. For all forms except ?NNN, this will correspond to the
+** number of unique parameters. If parameters of the ?NNN form are used,
+** there may be gaps in the list.)^
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_name()], and
+** [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Name Of A Host Parameter
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_bind_parameter_name(P,N) interface returns
+** the name of the N-th [SQL parameter] in the [prepared statement] P.
+** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA"
+** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA"
+** respectively.
+** In other words, the initial ":" or "$" or "@" or "?"
+** is included as part of the name.)^
+** ^Parameters of the form "?" without a following integer have no name
+** and are referred to as "nameless" or "anonymous parameters".
+**
+** ^The first host parameter has an index of 1, not 0.
+**
+** ^If the value N is out of range or if the N-th parameter is
+** nameless, then NULL is returned. ^The returned string is
+** always in UTF-8 encoding even if the named parameter was
+** originally specified as UTF-16 in [sqlite3_prepare16()],
+** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()].
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_count()], and
+** [sqlite3_bind_parameter_index()].
+*/
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+
+/*
+** CAPI3REF: Index Of A Parameter With A Given Name
+** METHOD: sqlite3_stmt
+**
+** ^Return the index of an SQL parameter given its name. ^The
+** index value returned is suitable for use as the second
+** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero
+** is returned if no matching parameter is found. ^The parameter
+** name must be given in UTF-8 even if the original statement
+** was prepared from UTF-16 text using [sqlite3_prepare16_v2()] or
+** [sqlite3_prepare16_v3()].
+**
+** See also: [sqlite3_bind_blob|sqlite3_bind()],
+** [sqlite3_bind_parameter_count()], and
+** [sqlite3_bind_parameter_name()].
+*/
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+
+/*
+** CAPI3REF: Reset All Bindings On A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset
+** the [sqlite3_bind_blob | bindings] on a [prepared statement].
+** ^Use this routine to reset all host parameters to NULL.
+*/
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Number Of Columns In A Result Set
+** METHOD: sqlite3_stmt
+**
+** ^Return the number of columns in the result set returned by the
+** [prepared statement]. ^If this routine returns 0, that means the
+** [prepared statement] returns no data (for example an [UPDATE]).
+** ^However, just because this routine returns a positive number does not
+** mean that one or more rows of data will be returned. ^A SELECT statement
+** will always have a positive sqlite3_column_count() but depending on the
+** WHERE clause constraints and the table content, it might return no rows.
+**
+** See also: [sqlite3_data_count()]
+*/
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Column Names In A Result Set
+** METHOD: sqlite3_stmt
+**
+** ^These routines return the name assigned to a particular column
+** in the result set of a [SELECT] statement. ^The sqlite3_column_name()
+** interface returns a pointer to a zero-terminated UTF-8 string
+** and sqlite3_column_name16() returns a pointer to a zero-terminated
+** UTF-16 string. ^The first parameter is the [prepared statement]
+** that implements the [SELECT] statement. ^The second parameter is the
+** column number. ^The leftmost column is number 0.
+**
+** ^The returned string pointer is valid until either the [prepared statement]
+** is destroyed by [sqlite3_finalize()] or until the statement is automatically
+** reprepared by the first call to [sqlite3_step()] for a particular run
+** or until the next call to
+** sqlite3_column_name() or sqlite3_column_name16() on the same column.
+**
+** ^If sqlite3_malloc() fails during the processing of either routine
+** (for example during a conversion from UTF-8 to UTF-16) then a
+** NULL pointer is returned.
+**
+** ^The name of a result column is the value of the "AS" clause for
+** that column, if there is an AS clause. If there is no AS clause
+** then the name of the column is unspecified and may change from
+** one release of SQLite to the next.
+*/
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
+
+/*
+** CAPI3REF: Source Of Data In A Query Result
+** METHOD: sqlite3_stmt
+**
+** ^These routines provide a means to determine the database, table, and
+** table column that is the origin of a particular result column in
+** [SELECT] statement.
+** ^The name of the database or table or column can be returned as
+** either a UTF-8 or UTF-16 string. ^The _database_ routines return
+** the database name, the _table_ routines return the table name, and
+** the origin_ routines return the column name.
+** ^The returned string is valid until the [prepared statement] is destroyed
+** using [sqlite3_finalize()] or until the statement is automatically
+** reprepared by the first call to [sqlite3_step()] for a particular run
+** or until the same information is requested
+** again in a different encoding.
+**
+** ^The names returned are the original un-aliased names of the
+** database, table, and column.
+**
+** ^The first argument to these interfaces is a [prepared statement].
+** ^These functions return information about the Nth result column returned by
+** the statement, where N is the second function argument.
+** ^The left-most column is column 0 for these routines.
+**
+** ^If the Nth column returned by the statement is an expression or
+** subquery and is not a column value, then all of these functions return
+** NULL. ^These routines might also return NULL if a memory allocation error
+** occurs. ^Otherwise, they return the name of the attached database, table,
+** or column that query result column was extracted from.
+**
+** ^As with all other SQLite APIs, those whose names end with "16" return
+** UTF-16 encoded strings and the other functions return UTF-8.
+**
+** ^These APIs are only available if the library was compiled with the
+** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol.
+**
+** If two or more threads call one or more
+** [sqlite3_column_database_name | column metadata interfaces]
+** for the same [prepared statement] and result column
+** at the same time then the results are undefined.
+*/
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
+
+/*
+** CAPI3REF: Declared Datatype Of A Query Result
+** METHOD: sqlite3_stmt
+**
+** ^(The first parameter is a [prepared statement].
+** If this statement is a [SELECT] statement and the Nth column of the
+** returned result set of that [SELECT] is a table column (not an
+** expression or subquery) then the declared type of the table
+** column is returned.)^ ^If the Nth column of the result set is an
+** expression or subquery, then a NULL pointer is returned.
+** ^The returned string is always UTF-8 encoded.
+**
+** ^(For example, given the database schema:
+**
+** CREATE TABLE t1(c1 VARIANT);
+**
+** and the following statement to be compiled:
+**
+** SELECT c1 + 1, c1 FROM t1;
+**
+** this routine would return the string "VARIANT" for the second result
+** column (i==1), and a NULL pointer for the first result column (i==0).)^
+**
+** ^SQLite uses dynamic run-time typing. ^So just because a column
+** is declared to contain a particular type does not mean that the
+** data stored in that column is of the declared type. SQLite is
+** strongly typed, but the typing is dynamic not static. ^Type
+** is associated with individual values, not with the containers
+** used to hold those values.
+*/
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
+
+/*
+** CAPI3REF: Evaluate An SQL Statement
+** METHOD: sqlite3_stmt
+**
+** After a [prepared statement] has been prepared using any of
+** [sqlite3_prepare_v2()], [sqlite3_prepare_v3()], [sqlite3_prepare16_v2()],
+** or [sqlite3_prepare16_v3()] or one of the legacy
+** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function
+** must be called one or more times to evaluate the statement.
+**
+** The details of the behavior of the sqlite3_step() interface depend
+** on whether the statement was prepared using the newer "vX" interfaces
+** [sqlite3_prepare_v3()], [sqlite3_prepare_v2()], [sqlite3_prepare16_v3()],
+** [sqlite3_prepare16_v2()] or the older legacy
+** interfaces [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the
+** new "vX" interface is recommended for new applications but the legacy
+** interface will continue to be supported.
+**
+** ^In the legacy interface, the return value will be either [SQLITE_BUSY],
+** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE].
+** ^With the "v2" interface, any of the other [result codes] or
+** [extended result codes] might be returned as well.
+**
+** ^[SQLITE_BUSY] means that the database engine was unable to acquire the
+** database locks it needs to do its job. ^If the statement is a [COMMIT]
+** or occurs outside of an explicit transaction, then you can retry the
+** statement. If the statement is not a [COMMIT] and occurs within an
+** explicit transaction then you should rollback the transaction before
+** continuing.
+**
+** ^[SQLITE_DONE] means that the statement has finished executing
+** successfully. sqlite3_step() should not be called again on this virtual
+** machine without first calling [sqlite3_reset()] to reset the virtual
+** machine back to its initial state.
+**
+** ^If the SQL statement being executed returns any data, then [SQLITE_ROW]
+** is returned each time a new row of data is ready for processing by the
+** caller. The values may be accessed using the [column access functions].
+** sqlite3_step() is called again to retrieve the next row of data.
+**
+** ^[SQLITE_ERROR] means that a run-time error (such as a constraint
+** violation) has occurred. sqlite3_step() should not be called again on
+** the VM. More information may be found by calling [sqlite3_errmsg()].
+** ^With the legacy interface, a more specific error code (for example,
+** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth)
+** can be obtained by calling [sqlite3_reset()] on the
+** [prepared statement]. ^In the "v2" interface,
+** the more specific error code is returned directly by sqlite3_step().
+**
+** [SQLITE_MISUSE] means that the this routine was called inappropriately.
+** Perhaps it was called on a [prepared statement] that has
+** already been [sqlite3_finalize | finalized] or on one that had
+** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could
+** be the case that the same database connection is being used by two or
+** more threads at the same moment in time.
+**
+** For all versions of SQLite up to and including 3.6.23.1, a call to
+** [sqlite3_reset()] was required after sqlite3_step() returned anything
+** other than [SQLITE_ROW] before any subsequent invocation of
+** sqlite3_step(). Failure to reset the prepared statement using
+** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from
+** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1],
+** sqlite3_step() began
+** calling [sqlite3_reset()] automatically in this circumstance rather
+** than returning [SQLITE_MISUSE]. This is not considered a compatibility
+** break because any application that ever receives an SQLITE_MISUSE error
+** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option
+** can be used to restore the legacy behavior.
+**
+** <b>Goofy Interface Alert:</b> In the legacy interface, the sqlite3_step()
+** API always returns a generic error code, [SQLITE_ERROR], following any
+** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call
+** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the
+** specific [error codes] that better describes the error.
+** We admit that this is a goofy design. The problem has been fixed
+** with the "v2" interface. If you prepare all of your SQL statements
+** using [sqlite3_prepare_v3()] or [sqlite3_prepare_v2()]
+** or [sqlite3_prepare16_v2()] or [sqlite3_prepare16_v3()] instead
+** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces,
+** then the more specific [error codes] are returned directly
+** by sqlite3_step(). The use of the "vX" interfaces is recommended.
+*/
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Number of columns in a result set
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_data_count(P) interface returns the number of columns in the
+** current row of the result set of [prepared statement] P.
+** ^If prepared statement P does not have results ready to return
+** (via calls to the [sqlite3_column_int | sqlite3_column()] family of
+** interfaces) then sqlite3_data_count(P) returns 0.
+** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer.
+** ^The sqlite3_data_count(P) routine returns 0 if the previous call to
+** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P)
+** will return non-zero if previous call to [sqlite3_step](P) returned
+** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum]
+** where it always returns zero since each step of that multi-step
+** pragma returns 0 columns of data.
+**
+** See also: [sqlite3_column_count()]
+*/
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Fundamental Datatypes
+** KEYWORDS: SQLITE_TEXT
+**
+** ^(Every value in SQLite has one of five fundamental datatypes:
+**
+** <ul>
+** <li> 64-bit signed integer
+** <li> 64-bit IEEE floating point number
+** <li> string
+** <li> BLOB
+** <li> NULL
+** </ul>)^
+**
+** These constants are codes for each of those types.
+**
+** Note that the SQLITE_TEXT constant was also used in SQLite version 2
+** for a completely different meaning. Software that links against both
+** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not
+** SQLITE_TEXT.
+*/
+#define SQLITE_INTEGER 1
+#define SQLITE_FLOAT 2
+#define SQLITE_BLOB 4
+#define SQLITE_NULL 5
+#ifdef SQLITE_TEXT
+# undef SQLITE_TEXT
+#else
+# define SQLITE_TEXT 3
+#endif
+#define SQLITE3_TEXT 3
+
+/*
+** CAPI3REF: Result Values From A Query
+** KEYWORDS: {column access functions}
+** METHOD: sqlite3_stmt
+**
+** <b>Summary:</b>
+** <blockquote><table border=0 cellpadding=0 cellspacing=0>
+** <tr><td><b>sqlite3_column_blob</b><td>&rarr;<td>BLOB result
+** <tr><td><b>sqlite3_column_double</b><td>&rarr;<td>REAL result
+** <tr><td><b>sqlite3_column_int</b><td>&rarr;<td>32-bit INTEGER result
+** <tr><td><b>sqlite3_column_int64</b><td>&rarr;<td>64-bit INTEGER result
+** <tr><td><b>sqlite3_column_text</b><td>&rarr;<td>UTF-8 TEXT result
+** <tr><td><b>sqlite3_column_text16</b><td>&rarr;<td>UTF-16 TEXT result
+** <tr><td><b>sqlite3_column_value</b><td>&rarr;<td>The result as an
+** [sqlite3_value|unprotected sqlite3_value] object.
+** <tr><td>&nbsp;<td>&nbsp;<td>&nbsp;
+** <tr><td><b>sqlite3_column_bytes</b><td>&rarr;<td>Size of a BLOB
+** or a UTF-8 TEXT result in bytes
+** <tr><td><b>sqlite3_column_bytes16&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Size of UTF-16
+** TEXT in bytes
+** <tr><td><b>sqlite3_column_type</b><td>&rarr;<td>Default
+** datatype of the result
+** </table></blockquote>
+**
+** <b>Details:</b>
+**
+** ^These routines return information about a single column of the current
+** result row of a query. ^In every case the first argument is a pointer
+** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*]
+** that was returned from [sqlite3_prepare_v2()] or one of its variants)
+** and the second argument is the index of the column for which information
+** should be returned. ^The leftmost column of the result set has the index 0.
+** ^The number of columns in the result can be determined using
+** [sqlite3_column_count()].
+**
+** If the SQL statement does not currently point to a valid row, or if the
+** column index is out of range, the result is undefined.
+** These routines may only be called when the most recent call to
+** [sqlite3_step()] has returned [SQLITE_ROW] and neither
+** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently.
+** If any of these routines are called after [sqlite3_reset()] or
+** [sqlite3_finalize()] or after [sqlite3_step()] has returned
+** something other than [SQLITE_ROW], the results are undefined.
+** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()]
+** are called from a different thread while any of these routines
+** are pending, then the results are undefined.
+**
+** The first six interfaces (_blob, _double, _int, _int64, _text, and _text16)
+** each return the value of a result column in a specific data format. If
+** the result column is not initially in the requested format (for example,
+** if the query returns an integer but the sqlite3_column_text() interface
+** is used to extract the value) then an automatic type conversion is performed.
+**
+** ^The sqlite3_column_type() routine returns the
+** [SQLITE_INTEGER | datatype code] for the initial data type
+** of the result column. ^The returned value is one of [SQLITE_INTEGER],
+** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL].
+** The return value of sqlite3_column_type() can be used to decide which
+** of the first six interface should be used to extract the column value.
+** The value returned by sqlite3_column_type() is only meaningful if no
+** automatic type conversions have occurred for the value in question.
+** After a type conversion, the result of calling sqlite3_column_type()
+** is undefined, though harmless. Future
+** versions of SQLite may change the behavior of sqlite3_column_type()
+** following a type conversion.
+**
+** If the result is a BLOB or a TEXT string, then the sqlite3_column_bytes()
+** or sqlite3_column_bytes16() interfaces can be used to determine the size
+** of that BLOB or string.
+**
+** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes()
+** routine returns the number of bytes in that BLOB or string.
+** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts
+** the string to UTF-8 and then returns the number of bytes.
+** ^If the result is a numeric value then sqlite3_column_bytes() uses
+** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns
+** the number of bytes in that string.
+** ^If the result is NULL, then sqlite3_column_bytes() returns zero.
+**
+** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16()
+** routine returns the number of bytes in that BLOB or string.
+** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts
+** the string to UTF-16 and then returns the number of bytes.
+** ^If the result is a numeric value then sqlite3_column_bytes16() uses
+** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns
+** the number of bytes in that string.
+** ^If the result is NULL, then sqlite3_column_bytes16() returns zero.
+**
+** ^The values returned by [sqlite3_column_bytes()] and
+** [sqlite3_column_bytes16()] do not include the zero terminators at the end
+** of the string. ^For clarity: the values returned by
+** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of
+** bytes in the string, not the number of characters.
+**
+** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(),
+** even empty strings, are always zero-terminated. ^The return
+** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer.
+**
+** <b>Warning:</b> ^The object returned by [sqlite3_column_value()] is an
+** [unprotected sqlite3_value] object. In a multithreaded environment,
+** an unprotected sqlite3_value object may only be used safely with
+** [sqlite3_bind_value()] and [sqlite3_result_value()].
+** If the [unprotected sqlite3_value] object returned by
+** [sqlite3_column_value()] is used in any other way, including calls
+** to routines like [sqlite3_value_int()], [sqlite3_value_text()],
+** or [sqlite3_value_bytes()], the behavior is not threadsafe.
+** Hence, the sqlite3_column_value() interface
+** is normally only useful within the implementation of
+** [application-defined SQL functions] or [virtual tables], not within
+** top-level application code.
+**
+** The these routines may attempt to convert the datatype of the result.
+** ^For example, if the internal representation is FLOAT and a text result
+** is requested, [sqlite3_snprintf()] is used internally to perform the
+** conversion automatically. ^(The following table details the conversions
+** that are applied:
+**
+** <blockquote>
+** <table border="1">
+** <tr><th> Internal<br>Type <th> Requested<br>Type <th> Conversion
+**
+** <tr><td> NULL <td> INTEGER <td> Result is 0
+** <tr><td> NULL <td> FLOAT <td> Result is 0.0
+** <tr><td> NULL <td> TEXT <td> Result is a NULL pointer
+** <tr><td> NULL <td> BLOB <td> Result is a NULL pointer
+** <tr><td> INTEGER <td> FLOAT <td> Convert from integer to float
+** <tr><td> INTEGER <td> TEXT <td> ASCII rendering of the integer
+** <tr><td> INTEGER <td> BLOB <td> Same as INTEGER->TEXT
+** <tr><td> FLOAT <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> FLOAT <td> TEXT <td> ASCII rendering of the float
+** <tr><td> FLOAT <td> BLOB <td> [CAST] to BLOB
+** <tr><td> TEXT <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> TEXT <td> FLOAT <td> [CAST] to REAL
+** <tr><td> TEXT <td> BLOB <td> No change
+** <tr><td> BLOB <td> INTEGER <td> [CAST] to INTEGER
+** <tr><td> BLOB <td> FLOAT <td> [CAST] to REAL
+** <tr><td> BLOB <td> TEXT <td> Add a zero terminator if needed
+** </table>
+** </blockquote>)^
+**
+** Note that when type conversions occur, pointers returned by prior
+** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or
+** sqlite3_column_text16() may be invalidated.
+** Type conversions and pointer invalidations might occur
+** in the following cases:
+**
+** <ul>
+** <li> The initial content is a BLOB and sqlite3_column_text() or
+** sqlite3_column_text16() is called. A zero-terminator might
+** need to be added to the string.</li>
+** <li> The initial content is UTF-8 text and sqlite3_column_bytes16() or
+** sqlite3_column_text16() is called. The content must be converted
+** to UTF-16.</li>
+** <li> The initial content is UTF-16 text and sqlite3_column_bytes() or
+** sqlite3_column_text() is called. The content must be converted
+** to UTF-8.</li>
+** </ul>
+**
+** ^Conversions between UTF-16be and UTF-16le are always done in place and do
+** not invalidate a prior pointer, though of course the content of the buffer
+** that the prior pointer references will have been modified. Other kinds
+** of conversion are done in place when it is possible, but sometimes they
+** are not possible and in those cases prior pointers are invalidated.
+**
+** The safest policy is to invoke these routines
+** in one of the following ways:
+**
+** <ul>
+** <li>sqlite3_column_text() followed by sqlite3_column_bytes()</li>
+** <li>sqlite3_column_blob() followed by sqlite3_column_bytes()</li>
+** <li>sqlite3_column_text16() followed by sqlite3_column_bytes16()</li>
+** </ul>
+**
+** In other words, you should call sqlite3_column_text(),
+** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result
+** into the desired format, then invoke sqlite3_column_bytes() or
+** sqlite3_column_bytes16() to find the size of the result. Do not mix calls
+** to sqlite3_column_text() or sqlite3_column_blob() with calls to
+** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16()
+** with calls to sqlite3_column_bytes().
+**
+** ^The pointers returned are valid until a type conversion occurs as
+** described above, or until [sqlite3_step()] or [sqlite3_reset()] or
+** [sqlite3_finalize()] is called. ^The memory space used to hold strings
+** and BLOBs is freed automatically. Do not pass the pointers returned
+** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
+** [sqlite3_free()].
+**
+** As long as the input parameters are correct, these routines will only
+** fail if an out-of-memory error occurs during a format conversion.
+** Only the following subset of interfaces are subject to out-of-memory
+** errors:
+**
+** <ul>
+** <li> sqlite3_column_blob()
+** <li> sqlite3_column_text()
+** <li> sqlite3_column_text16()
+** <li> sqlite3_column_bytes()
+** <li> sqlite3_column_bytes16()
+** </ul>
+**
+** If an out-of-memory error occurs, then the return value from these
+** routines is the same as if the column had contained an SQL NULL value.
+** Valid SQL NULL returns can be distinguished from out-of-memory errors
+** by invoking the [sqlite3_errcode()] immediately after the suspect
+** return value is obtained and before any
+** other SQLite interface is called on the same [database connection].
+*/
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+
+/*
+** CAPI3REF: Destroy A Prepared Statement Object
+** DESTRUCTOR: sqlite3_stmt
+**
+** ^The sqlite3_finalize() function is called to delete a [prepared statement].
+** ^If the most recent evaluation of the statement encountered no errors
+** or if the statement is never been evaluated, then sqlite3_finalize() returns
+** SQLITE_OK. ^If the most recent evaluation of statement S failed, then
+** sqlite3_finalize(S) returns the appropriate [error code] or
+** [extended error code].
+**
+** ^The sqlite3_finalize(S) routine can be called at any point during
+** the life cycle of [prepared statement] S:
+** before statement S is ever evaluated, after
+** one or more calls to [sqlite3_reset()], or after any call
+** to [sqlite3_step()] regardless of whether or not the statement has
+** completed execution.
+**
+** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op.
+**
+** The application must finalize every [prepared statement] in order to avoid
+** resource leaks. It is a grievous error for the application to try to use
+** a prepared statement after it has been finalized. Any use of a prepared
+** statement after it has been finalized can result in undefined and
+** undesirable behavior such as segfaults and heap corruption.
+*/
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Reset A Prepared Statement Object
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_reset() function is called to reset a [prepared statement]
+** object back to its initial state, ready to be re-executed.
+** ^Any SQL statement variables that had values bound to them using
+** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values.
+** Use [sqlite3_clear_bindings()] to reset the bindings.
+**
+** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
+** back to the beginning of its program.
+**
+** ^If the most recent call to [sqlite3_step(S)] for the
+** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
+** or if [sqlite3_step(S)] has never before been called on S,
+** then [sqlite3_reset(S)] returns [SQLITE_OK].
+**
+** ^If the most recent call to [sqlite3_step(S)] for the
+** [prepared statement] S indicated an error, then
+** [sqlite3_reset(S)] returns an appropriate [error code].
+**
+** ^The [sqlite3_reset(S)] interface does not change the values
+** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
+*/
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Create Or Redefine SQL Functions
+** KEYWORDS: {function creation routines}
+** METHOD: sqlite3
+**
+** ^These functions (collectively known as "function creation routines")
+** are used to add SQL functions or aggregates or to redefine the behavior
+** of existing SQL functions or aggregates. The only differences between
+** the three "sqlite3_create_function*" routines are the text encoding
+** expected for the second parameter (the name of the function being
+** created) and the presence or absence of a destructor callback for
+** the application data pointer. Function sqlite3_create_window_function()
+** is similar, but allows the user to supply the extra callback functions
+** needed by [aggregate window functions].
+**
+** ^The first parameter is the [database connection] to which the SQL
+** function is to be added. ^If an application uses more than one database
+** connection then application-defined SQL functions must be added
+** to each database connection separately.
+**
+** ^The second parameter is the name of the SQL function to be created or
+** redefined. ^The length of the name is limited to 255 bytes in a UTF-8
+** representation, exclusive of the zero-terminator. ^Note that the name
+** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes.
+** ^Any attempt to create a function with a longer name
+** will result in [SQLITE_MISUSE] being returned.
+**
+** ^The third parameter (nArg)
+** is the number of arguments that the SQL function or
+** aggregate takes. ^If this parameter is -1, then the SQL function or
+** aggregate may take any number of arguments between 0 and the limit
+** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third
+** parameter is less than -1 or greater than 127 then the behavior is
+** undefined.
+**
+** ^The fourth parameter, eTextRep, specifies what
+** [SQLITE_UTF8 | text encoding] this SQL function prefers for
+** its parameters. The application should set this parameter to
+** [SQLITE_UTF16LE] if the function implementation invokes
+** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the
+** implementation invokes [sqlite3_value_text16be()] on an input, or
+** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8]
+** otherwise. ^The same SQL function may be registered multiple times using
+** different preferred text encodings, with different implementations for
+** each encoding.
+** ^When multiple implementations of the same function are available, SQLite
+** will pick the one that involves the least amount of data conversion.
+**
+** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC]
+** to signal that the function will always return the same result given
+** the same inputs within a single SQL statement. Most SQL functions are
+** deterministic. The built-in [random()] SQL function is an example of a
+** function that is not deterministic. The SQLite query planner is able to
+** perform additional optimizations on deterministic functions, so use
+** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
+**
+** ^The fourth parameter may also optionally include the [SQLITE_DIRECTONLY]
+** flag, which if present prevents the function from being invoked from
+** within VIEWs, TRIGGERs, CHECK constraints, generated column expressions,
+** index expressions, or the WHERE clause of partial indexes.
+**
+** <span style="background-color:#ffff90;">
+** For best security, the [SQLITE_DIRECTONLY] flag is recommended for
+** all application-defined SQL functions that do not need to be
+** used inside of triggers, view, CHECK constraints, or other elements of
+** the database schema. This flags is especially recommended for SQL
+** functions that have side effects or reveal internal application state.
+** Without this flag, an attacker might be able to modify the schema of
+** a database file to include invocations of the function with parameters
+** chosen by the attacker, which the application will then execute when
+** the database file is opened and read.
+** </span>
+**
+** ^(The fifth parameter is an arbitrary pointer. The implementation of the
+** function can gain access to this pointer using [sqlite3_user_data()].)^
+**
+** ^The sixth, seventh and eighth parameters passed to the three
+** "sqlite3_create_function*" functions, xFunc, xStep and xFinal, are
+** pointers to C-language functions that implement the SQL function or
+** aggregate. ^A scalar SQL function requires an implementation of the xFunc
+** callback only; NULL pointers must be passed as the xStep and xFinal
+** parameters. ^An aggregate SQL function requires an implementation of xStep
+** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing
+** SQL function or aggregate, pass NULL pointers for all three function
+** callbacks.
+**
+** ^The sixth, seventh, eighth and ninth parameters (xStep, xFinal, xValue
+** and xInverse) passed to sqlite3_create_window_function are pointers to
+** C-language callbacks that implement the new function. xStep and xFinal
+** must both be non-NULL. xValue and xInverse may either both be NULL, in
+** which case a regular aggregate function is created, or must both be
+** non-NULL, in which case the new function may be used as either an aggregate
+** or aggregate window function. More details regarding the implementation
+** of aggregate window functions are
+** [user-defined window functions|available here].
+**
+** ^(If the final parameter to sqlite3_create_function_v2() or
+** sqlite3_create_window_function() is not NULL, then it is destructor for
+** the application data pointer. The destructor is invoked when the function
+** is deleted, either by being overloaded or when the database connection
+** closes.)^ ^The destructor is also invoked if the call to
+** sqlite3_create_function_v2() fails. ^When the destructor callback is
+** invoked, it is passed a single argument which is a copy of the application
+** data pointer which was the fifth parameter to sqlite3_create_function_v2().
+**
+** ^It is permitted to register multiple implementations of the same
+** functions with the same name but with either differing numbers of
+** arguments or differing preferred text encodings. ^SQLite will use
+** the implementation that most closely matches the way in which the
+** SQL function is used. ^A function implementation with a non-negative
+** nArg parameter is a better match than a function implementation with
+** a negative nArg. ^A function where the preferred text encoding
+** matches the database encoding is a better
+** match than a function where the encoding is different.
+** ^A function where the encoding difference is between UTF16le and UTF16be
+** is a closer match than a function where the encoding difference is
+** between UTF8 and UTF16.
+**
+** ^Built-in functions may be overloaded by new application-defined functions.
+**
+** ^An application-defined function is permitted to call other
+** SQLite interfaces. However, such calls must not
+** close the database connection nor finalize or reset the prepared
+** statement in which the function is running.
+*/
+SQLITE_API int sqlite3_create_function(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*)
+);
+SQLITE_API int sqlite3_create_function16(
+ sqlite3 *db,
+ const void *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*)
+);
+SQLITE_API int sqlite3_create_function_v2(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void(*xDestroy)(void*)
+);
+SQLITE_API int sqlite3_create_window_function(
+ sqlite3 *db,
+ const char *zFunctionName,
+ int nArg,
+ int eTextRep,
+ void *pApp,
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xFinal)(sqlite3_context*),
+ void (*xValue)(sqlite3_context*),
+ void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
+ void(*xDestroy)(void*)
+);
+
+/*
+** CAPI3REF: Text Encodings
+**
+** These constant define integer codes that represent the various
+** text encodings supported by SQLite.
+*/
+#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */
+#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */
+#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */
+#define SQLITE_UTF16 4 /* Use native byte order */
+#define SQLITE_ANY 5 /* Deprecated */
+#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */
+
+/*
+** CAPI3REF: Function Flags
+**
+** These constants may be ORed together with the
+** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
+** to [sqlite3_create_function()], [sqlite3_create_function16()], or
+** [sqlite3_create_function_v2()].
+**
+** <dl>
+** [[SQLITE_DETERMINISTIC]] <dt>SQLITE_DETERMINISTIC</dt><dd>
+** The SQLITE_DETERMINISTIC flag means that the new function always gives
+** the same output when the input parameters are the same.
+** The [abs|abs() function] is deterministic, for example, but
+** [randomblob|randomblob()] is not. Functions must
+** be deterministic in order to be used in certain contexts such as
+** with the WHERE clause of [partial indexes] or in [generated columns].
+** SQLite might also optimize deterministic functions by factoring them
+** out of inner loops.
+** </dd>
+**
+** [[SQLITE_DIRECTONLY]] <dt>SQLITE_DIRECTONLY</dt><dd>
+** The SQLITE_DIRECTONLY flag means that the function may only be invoked
+** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in
+** schema structures such as [CHECK constraints], [DEFAULT clauses],
+** [expression indexes], [partial indexes], or [generated columns].
+** The SQLITE_DIRECTONLY flags is a security feature which is recommended
+** for all [application-defined SQL functions], and especially for functions
+** that have side-effects or that could potentially leak sensitive
+** information.
+** </dd>
+**
+** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>
+** The SQLITE_INNOCUOUS flag means that the function is unlikely
+** to cause problems even if misused. An innocuous function should have
+** no side effects and should not depend on any values other than its
+** input parameters. The [abs|abs() function] is an example of an
+** innocuous function.
+** The [load_extension() SQL function] is not innocuous because of its
+** side effects.
+** <p> SQLITE_INNOCUOUS is similar to SQLITE_DETERMINISTIC, but is not
+** exactly the same. The [random|random() function] is an example of a
+** function that is innocuous but not deterministic.
+** <p>Some heightened security settings
+** ([SQLITE_DBCONFIG_TRUSTED_SCHEMA] and [PRAGMA trusted_schema=OFF])
+** disable the use of SQL functions inside views and triggers and in
+** schema structures such as [CHECK constraints], [DEFAULT clauses],
+** [expression indexes], [partial indexes], and [generated columns] unless
+** the function is tagged with SQLITE_INNOCUOUS. Most built-in functions
+** are innocuous. Developers are advised to avoid using the
+** SQLITE_INNOCUOUS flag for application-defined functions unless the
+** function has been carefully audited and found to be free of potentially
+** security-adverse side-effects and information-leaks.
+** </dd>
+**
+** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
+** Specifying this flag makes no difference for scalar or aggregate user
+** functions. However, if it is not specified for a user-defined window
+** function, then any sub-types belonging to arguments passed to the window
+** function may be discarded before the window function is called (i.e.
+** sqlite3_value_subtype() will always return 0).
+** </dd>
+** </dl>
+*/
+#define SQLITE_DETERMINISTIC 0x000000800
+#define SQLITE_DIRECTONLY 0x000080000
+#define SQLITE_SUBTYPE 0x000100000
+#define SQLITE_INNOCUOUS 0x000200000
+
+/*
+** CAPI3REF: Deprecated Functions
+** DEPRECATED
+**
+** These functions are [deprecated]. In order to maintain
+** backwards compatibility with older code, these functions continue
+** to be supported. However, new applications should avoid
+** the use of these functions. To encourage programmers to avoid
+** these functions, we will not explain what they do.
+*/
+#ifndef SQLITE_OMIT_DEPRECATED
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+ void*,sqlite3_int64);
+#endif
+
+/*
+** CAPI3REF: Obtaining SQL Values
+** METHOD: sqlite3_value
+**
+** <b>Summary:</b>
+** <blockquote><table border=0 cellpadding=0 cellspacing=0>
+** <tr><td><b>sqlite3_value_blob</b><td>&rarr;<td>BLOB value
+** <tr><td><b>sqlite3_value_double</b><td>&rarr;<td>REAL value
+** <tr><td><b>sqlite3_value_int</b><td>&rarr;<td>32-bit INTEGER value
+** <tr><td><b>sqlite3_value_int64</b><td>&rarr;<td>64-bit INTEGER value
+** <tr><td><b>sqlite3_value_pointer</b><td>&rarr;<td>Pointer value
+** <tr><td><b>sqlite3_value_text</b><td>&rarr;<td>UTF-8 TEXT value
+** <tr><td><b>sqlite3_value_text16</b><td>&rarr;<td>UTF-16 TEXT value in
+** the native byteorder
+** <tr><td><b>sqlite3_value_text16be</b><td>&rarr;<td>UTF-16be TEXT value
+** <tr><td><b>sqlite3_value_text16le</b><td>&rarr;<td>UTF-16le TEXT value
+** <tr><td>&nbsp;<td>&nbsp;<td>&nbsp;
+** <tr><td><b>sqlite3_value_bytes</b><td>&rarr;<td>Size of a BLOB
+** or a UTF-8 TEXT in bytes
+** <tr><td><b>sqlite3_value_bytes16&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Size of UTF-16
+** TEXT in bytes
+** <tr><td><b>sqlite3_value_type</b><td>&rarr;<td>Default
+** datatype of the value
+** <tr><td><b>sqlite3_value_numeric_type&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>Best numeric datatype of the value
+** <tr><td><b>sqlite3_value_nochange&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>True if the column is unchanged in an UPDATE
+** against a virtual table.
+** <tr><td><b>sqlite3_value_frombind&nbsp;&nbsp;</b>
+** <td>&rarr;&nbsp;&nbsp;<td>True if value originated from a [bound parameter]
+** </table></blockquote>
+**
+** <b>Details:</b>
+**
+** These routines extract type, size, and content information from
+** [protected sqlite3_value] objects. Protected sqlite3_value objects
+** are used to pass parameter information into the functions that
+** implement [application-defined SQL functions] and [virtual tables].
+**
+** These routines work only with [protected sqlite3_value] objects.
+** Any attempt to use these routines on an [unprotected sqlite3_value]
+** is not threadsafe.
+**
+** ^These routines work just like the corresponding [column access functions]
+** except that these routines take a single [protected sqlite3_value] object
+** pointer instead of a [sqlite3_stmt*] pointer and an integer column number.
+**
+** ^The sqlite3_value_text16() interface extracts a UTF-16 string
+** in the native byte-order of the host machine. ^The
+** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces
+** extract UTF-16 strings as big-endian and little-endian respectively.
+**
+** ^If [sqlite3_value] object V was initialized
+** using [sqlite3_bind_pointer(S,I,P,X,D)] or [sqlite3_result_pointer(C,P,X,D)]
+** and if X and Y are strings that compare equal according to strcmp(X,Y),
+** then sqlite3_value_pointer(V,Y) will return the pointer P. ^Otherwise,
+** sqlite3_value_pointer(V,Y) returns a NULL. The sqlite3_bind_pointer()
+** routine is part of the [pointer passing interface] added for SQLite 3.20.0.
+**
+** ^(The sqlite3_value_type(V) interface returns the
+** [SQLITE_INTEGER | datatype code] for the initial datatype of the
+** [sqlite3_value] object V. The returned value is one of [SQLITE_INTEGER],
+** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL].)^
+** Other interfaces might change the datatype for an sqlite3_value object.
+** For example, if the datatype is initially SQLITE_INTEGER and
+** sqlite3_value_text(V) is called to extract a text value for that
+** integer, then subsequent calls to sqlite3_value_type(V) might return
+** SQLITE_TEXT. Whether or not a persistent internal datatype conversion
+** occurs is undefined and may change from one release of SQLite to the next.
+**
+** ^(The sqlite3_value_numeric_type() interface attempts to apply
+** numeric affinity to the value. This means that an attempt is
+** made to convert the value to an integer or floating point. If
+** such a conversion is possible without loss of information (in other
+** words, if the value is a string that looks like a number)
+** then the conversion is performed. Otherwise no conversion occurs.
+** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
+**
+** ^Within the [xUpdate] method of a [virtual table], the
+** sqlite3_value_nochange(X) interface returns true if and only if
+** the column corresponding to X is unchanged by the UPDATE operation
+** that the xUpdate method call was invoked to implement and if
+** and the prior [xColumn] method call that was invoked to extracted
+** the value for that column returned without setting a result (probably
+** because it queried [sqlite3_vtab_nochange()] and found that the column
+** was unchanging). ^Within an [xUpdate] method, any value for which
+** sqlite3_value_nochange(X) is true will in all other respects appear
+** to be a NULL value. If sqlite3_value_nochange(X) is invoked anywhere other
+** than within an [xUpdate] method call for an UPDATE statement, then
+** the return value is arbitrary and meaningless.
+**
+** ^The sqlite3_value_frombind(X) interface returns non-zero if the
+** value X originated from one of the [sqlite3_bind_int|sqlite3_bind()]
+** interfaces. ^If X comes from an SQL literal value, or a table column,
+** or an expression, then sqlite3_value_frombind(X) returns zero.
+**
+** Please pay particular attention to the fact that the pointer returned
+** from [sqlite3_value_blob()], [sqlite3_value_text()], or
+** [sqlite3_value_text16()] can be invalidated by a subsequent call to
+** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()],
+** or [sqlite3_value_text16()].
+**
+** These routines must be called from the same thread as
+** the SQL function that supplied the [sqlite3_value*] parameters.
+**
+** As long as the input parameter is correct, these routines can only
+** fail if an out-of-memory error occurs during a format conversion.
+** Only the following subset of interfaces are subject to out-of-memory
+** errors:
+**
+** <ul>
+** <li> sqlite3_value_blob()
+** <li> sqlite3_value_text()
+** <li> sqlite3_value_text16()
+** <li> sqlite3_value_text16le()
+** <li> sqlite3_value_text16be()
+** <li> sqlite3_value_bytes()
+** <li> sqlite3_value_bytes16()
+** </ul>
+**
+** If an out-of-memory error occurs, then the return value from these
+** routines is the same as if the column had contained an SQL NULL value.
+** Valid SQL NULL returns can be distinguished from out-of-memory errors
+** by invoking the [sqlite3_errcode()] immediately after the suspect
+** return value is obtained and before any
+** other SQLite interface is called on the same [database connection].
+*/
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API void *sqlite3_value_pointer(sqlite3_value*, const char*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
+SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
+
+/*
+** CAPI3REF: Finding The Subtype Of SQL Values
+** METHOD: sqlite3_value
+**
+** The sqlite3_value_subtype(V) function returns the subtype for
+** an [application-defined SQL function] argument V. The subtype
+** information can be used to pass a limited amount of context from
+** one SQL function to another. Use the [sqlite3_result_subtype()]
+** routine to set the subtype for the return value of an SQL function.
+*/
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
+
+/*
+** CAPI3REF: Copy And Free SQL Values
+** METHOD: sqlite3_value
+**
+** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
+** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
+** is a [protected sqlite3_value] object even if the input is not.
+** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
+** memory allocation fails.
+**
+** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
+** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
+** then sqlite3_value_free(V) is a harmless no-op.
+*/
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*);
+SQLITE_API void sqlite3_value_free(sqlite3_value*);
+
+/*
+** CAPI3REF: Obtain Aggregate Function Context
+** METHOD: sqlite3_context
+**
+** Implementations of aggregate SQL functions use this
+** routine to allocate memory for storing their state.
+**
+** ^The first time the sqlite3_aggregate_context(C,N) routine is called
+** for a particular aggregate function, SQLite allocates
+** N bytes of memory, zeroes out that memory, and returns a pointer
+** to the new memory. ^On second and subsequent calls to
+** sqlite3_aggregate_context() for the same aggregate function instance,
+** the same buffer is returned. Sqlite3_aggregate_context() is normally
+** called once for each invocation of the xStep callback and then one
+** last time when the xFinal callback is invoked. ^(When no rows match
+** an aggregate query, the xStep() callback of the aggregate function
+** implementation is never called and xFinal() is called exactly once.
+** In those cases, sqlite3_aggregate_context() might be called for the
+** first time from within xFinal().)^
+**
+** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer
+** when first called if N is less than or equal to zero or if a memory
+** allocate error occurs.
+**
+** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is
+** determined by the N parameter on first successful call. Changing the
+** value of N in any subsequent call to sqlite3_aggregate_context() within
+** the same aggregate function instance will not resize the memory
+** allocation.)^ Within the xFinal callback, it is customary to set
+** N=0 in calls to sqlite3_aggregate_context(C,N) so that no
+** pointless memory allocations occur.
+**
+** ^SQLite automatically frees the memory allocated by
+** sqlite3_aggregate_context() when the aggregate query concludes.
+**
+** The first parameter must be a copy of the
+** [sqlite3_context | SQL function context] that is the first parameter
+** to the xStep or xFinal callback routine that implements the aggregate
+** function.
+**
+** This routine must be called from the same thread in which
+** the aggregate SQL function is running.
+*/
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+
+/*
+** CAPI3REF: User Data For Functions
+** METHOD: sqlite3_context
+**
+** ^The sqlite3_user_data() interface returns a copy of
+** the pointer that was the pUserData parameter (the 5th parameter)
+** of the [sqlite3_create_function()]
+** and [sqlite3_create_function16()] routines that originally
+** registered the application defined function.
+**
+** This routine must be called from the same thread in which
+** the application-defined function is running.
+*/
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
+
+/*
+** CAPI3REF: Database Connection For Functions
+** METHOD: sqlite3_context
+**
+** ^The sqlite3_context_db_handle() interface returns a copy of
+** the pointer to the [database connection] (the 1st parameter)
+** of the [sqlite3_create_function()]
+** and [sqlite3_create_function16()] routines that originally
+** registered the application defined function.
+*/
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
+
+/*
+** CAPI3REF: Function Auxiliary Data
+** METHOD: sqlite3_context
+**
+** These functions may be used by (non-aggregate) SQL functions to
+** associate metadata with argument values. If the same value is passed to
+** multiple invocations of the same SQL function during query execution, under
+** some circumstances the associated metadata may be preserved. An example
+** of where this might be useful is in a regular-expression matching
+** function. The compiled version of the regular expression can be stored as
+** metadata associated with the pattern string.
+** Then as long as the pattern string remains the same,
+** the compiled regular expression can be reused on multiple
+** invocations of the same function.
+**
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
+** value to the application-defined function. ^N is zero for the left-most
+** function argument. ^If there is no metadata
+** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
+** returns a NULL pointer.
+**
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
+** argument of the application-defined function. ^Subsequent
+** calls to sqlite3_get_auxdata(C,N) return P from the most recent
+** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
+** NULL if the metadata has been discarded.
+** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
+** SQLite will invoke the destructor function X with parameter P exactly
+** once, when the metadata is discarded.
+** SQLite is free to discard the metadata at any time, including: <ul>
+** <li> ^(when the corresponding function parameter changes)^, or
+** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
+** SQL statement)^, or
+** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
+** parameter)^, or
+** <li> ^(during the original sqlite3_set_auxdata() call when a memory
+** allocation error occurs.)^ </ul>
+**
+** Note the last bullet in particular. The destructor X in
+** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
+** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
+** should be called near the end of the function implementation and the
+** function implementation should not make any use of P after
+** sqlite3_set_auxdata() has been called.
+**
+** ^(In practice, metadata is preserved between function calls for
+** function parameters that are compile-time constants, including literal
+** values and [parameters] and expressions composed from the same.)^
+**
+** The value of the N parameter to these interfaces should be non-negative.
+** Future enhancements may make use of negative N values to define new
+** kinds of function caching behavior.
+**
+** These routines must be called from the same thread in which
+** the SQL function is running.
+*/
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+
+
+/*
+** CAPI3REF: Constants Defining Special Destructor Behavior
+**
+** These are special values for the destructor that is passed in as the
+** final argument to routines like [sqlite3_result_blob()]. ^If the destructor
+** argument is SQLITE_STATIC, it means that the content pointer is constant
+** and will never change. It does not need to be destroyed. ^The
+** SQLITE_TRANSIENT value means that the content will likely change in
+** the near future and that SQLite should make its own private copy of
+** the content before returning.
+**
+** The typedef is necessary to work around problems in certain
+** C++ compilers.
+*/
+typedef void (*sqlite3_destructor_type)(void*);
+#define SQLITE_STATIC ((sqlite3_destructor_type)0)
+#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1)
+
+/*
+** CAPI3REF: Setting The Result Of An SQL Function
+** METHOD: sqlite3_context
+**
+** These routines are used by the xFunc or xFinal callbacks that
+** implement SQL functions and aggregates. See
+** [sqlite3_create_function()] and [sqlite3_create_function16()]
+** for additional information.
+**
+** These functions work very much like the [parameter binding] family of
+** functions used to bind values to host parameters in prepared statements.
+** Refer to the [SQL parameter] documentation for additional information.
+**
+** ^The sqlite3_result_blob() interface sets the result from
+** an application-defined function to be the BLOB whose content is pointed
+** to by the second parameter and which is N bytes long where N is the
+** third parameter.
+**
+** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
+** interfaces set the result of the application-defined function to be
+** a BLOB containing all zero bytes and N bytes in size.
+**
+** ^The sqlite3_result_double() interface sets the result from
+** an application-defined function to be a floating point value specified
+** by its 2nd argument.
+**
+** ^The sqlite3_result_error() and sqlite3_result_error16() functions
+** cause the implemented SQL function to throw an exception.
+** ^SQLite uses the string pointed to by the
+** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16()
+** as the text of an error message. ^SQLite interprets the error
+** message string from sqlite3_result_error() as UTF-8. ^SQLite
+** interprets the string from sqlite3_result_error16() as UTF-16 using
+** the same [byte-order determination rules] as [sqlite3_bind_text16()].
+** ^If the third parameter to sqlite3_result_error()
+** or sqlite3_result_error16() is negative then SQLite takes as the error
+** message all text up through the first zero character.
+** ^If the third parameter to sqlite3_result_error() or
+** sqlite3_result_error16() is non-negative then SQLite takes that many
+** bytes (not characters) from the 2nd parameter as the error message.
+** ^The sqlite3_result_error() and sqlite3_result_error16()
+** routines make a private copy of the error message text before
+** they return. Hence, the calling function can deallocate or
+** modify the text after they return without harm.
+** ^The sqlite3_result_error_code() function changes the error code
+** returned by SQLite as a result of an error in a function. ^By default,
+** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error()
+** or sqlite3_result_error16() resets the error code to SQLITE_ERROR.
+**
+** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an
+** error indicating that a string or BLOB is too long to represent.
+**
+** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an
+** error indicating that a memory allocation failed.
+**
+** ^The sqlite3_result_int() interface sets the return value
+** of the application-defined function to be the 32-bit signed integer
+** value given in the 2nd argument.
+** ^The sqlite3_result_int64() interface sets the return value
+** of the application-defined function to be the 64-bit signed integer
+** value given in the 2nd argument.
+**
+** ^The sqlite3_result_null() interface sets the return value
+** of the application-defined function to be NULL.
+**
+** ^The sqlite3_result_text(), sqlite3_result_text16(),
+** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces
+** set the return value of the application-defined function to be
+** a text string which is represented as UTF-8, UTF-16 native byte order,
+** UTF-16 little endian, or UTF-16 big endian, respectively.
+** ^The sqlite3_result_text64() interface sets the return value of an
+** application-defined function to be a text string in an encoding
+** specified by the fifth (and last) parameter, which must be one
+** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
+** ^SQLite takes the text result from the application from
+** the 2nd parameter of the sqlite3_result_text* interfaces.
+** ^If the 3rd parameter to the sqlite3_result_text* interfaces
+** is negative, then SQLite takes result text from the 2nd parameter
+** through the first zero character.
+** ^If the 3rd parameter to the sqlite3_result_text* interfaces
+** is non-negative, then as many bytes (not characters) of the text
+** pointed to by the 2nd parameter are taken as the application-defined
+** function result. If the 3rd parameter is non-negative, then it
+** must be the byte offset into the string where the NUL terminator would
+** appear if the string where NUL terminated. If any NUL characters occur
+** in the string at a byte offset that is less than the value of the 3rd
+** parameter, then the resulting string will contain embedded NULs and the
+** result of expressions operating on strings with embedded NULs is undefined.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces
+** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that
+** function as the destructor on the text or BLOB result when it has
+** finished using that result.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces or to
+** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite
+** assumes that the text or BLOB result is in constant space and does not
+** copy the content of the parameter nor call a destructor on the content
+** when it has finished using that result.
+** ^If the 4th parameter to the sqlite3_result_text* interfaces
+** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT
+** then SQLite makes a copy of the result into space obtained
+** from [sqlite3_malloc()] before it returns.
+**
+** ^For the sqlite3_result_text16(), sqlite3_result_text16le(), and
+** sqlite3_result_text16be() routines, and for sqlite3_result_text64()
+** when the encoding is not UTF8, if the input UTF16 begins with a
+** byte-order mark (BOM, U+FEFF) then the BOM is removed from the
+** string and the rest of the string is interpreted according to the
+** byte-order specified by the BOM. ^The byte-order specified by
+** the BOM at the beginning of the text overrides the byte-order
+** specified by the interface procedure. ^So, for example, if
+** sqlite3_result_text16le() is invoked with text that begins
+** with bytes 0xfe, 0xff (a big-endian byte-order mark) then the
+** first two bytes of input are skipped and the remaining input
+** is interpreted as UTF16BE text.
+**
+** ^For UTF16 input text to the sqlite3_result_text16(),
+** sqlite3_result_text16be(), sqlite3_result_text16le(), and
+** sqlite3_result_text64() routines, if the text contains invalid
+** UTF16 characters, the invalid characters might be converted
+** into the unicode replacement character, U+FFFD.
+**
+** ^The sqlite3_result_value() interface sets the result of
+** the application-defined function to be a copy of the
+** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The
+** sqlite3_result_value() interface makes a copy of the [sqlite3_value]
+** so that the [sqlite3_value] specified in the parameter may change or
+** be deallocated after sqlite3_result_value() returns without harm.
+** ^A [protected sqlite3_value] object may always be used where an
+** [unprotected sqlite3_value] object is required, so either
+** kind of [sqlite3_value] object can be used with this interface.
+**
+** ^The sqlite3_result_pointer(C,P,T,D) interface sets the result to an
+** SQL NULL value, just like [sqlite3_result_null(C)], except that it
+** also associates the host-language pointer P or type T with that
+** NULL value such that the pointer can be retrieved within an
+** [application-defined SQL function] using [sqlite3_value_pointer()].
+** ^If the D parameter is not NULL, then it is a pointer to a destructor
+** for the P parameter. ^SQLite invokes D with P as its only argument
+** when SQLite is finished with P. The T parameter should be a static
+** string and preferably a string literal. The sqlite3_result_pointer()
+** routine is part of the [pointer passing interface] added for SQLite 3.20.0.
+**
+** If these routines are called from within the different thread
+** than the one containing the application-defined function that received
+** the [sqlite3_context] pointer, the results are undefined.
+*/
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*,
+ sqlite3_uint64,void(*)(void*));
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
+ void(*)(void*), unsigned char encoding);
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_pointer(sqlite3_context*, void*,const char*,void(*)(void*));
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
+
+
+/*
+** CAPI3REF: Setting The Subtype Of An SQL Function
+** METHOD: sqlite3_context
+**
+** The sqlite3_result_subtype(C,T) function causes the subtype of
+** the result from the [application-defined SQL function] with
+** [sqlite3_context] C to be the value T. Only the lower 8 bits
+** of the subtype T are preserved in current versions of SQLite;
+** higher order bits are discarded.
+** The number of subtype bytes preserved by SQLite might increase
+** in future releases of SQLite.
+*/
+SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
+
+/*
+** CAPI3REF: Define New Collating Sequences
+** METHOD: sqlite3
+**
+** ^These functions add, remove, or modify a [collation] associated
+** with the [database connection] specified as the first argument.
+**
+** ^The name of the collation is a UTF-8 string
+** for sqlite3_create_collation() and sqlite3_create_collation_v2()
+** and a UTF-16 string in native byte order for sqlite3_create_collation16().
+** ^Collation names that compare equal according to [sqlite3_strnicmp()] are
+** considered to be the same name.
+**
+** ^(The third argument (eTextRep) must be one of the constants:
+** <ul>
+** <li> [SQLITE_UTF8],
+** <li> [SQLITE_UTF16LE],
+** <li> [SQLITE_UTF16BE],
+** <li> [SQLITE_UTF16], or
+** <li> [SQLITE_UTF16_ALIGNED].
+** </ul>)^
+** ^The eTextRep argument determines the encoding of strings passed
+** to the collating function callback, xCompare.
+** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep
+** force strings to be UTF16 with native byte order.
+** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin
+** on an even byte address.
+**
+** ^The fourth argument, pArg, is an application data pointer that is passed
+** through as the first argument to the collating function callback.
+**
+** ^The fifth argument, xCompare, is a pointer to the collating function.
+** ^Multiple collating functions can be registered using the same name but
+** with different eTextRep parameters and SQLite will use whichever
+** function requires the least amount of data transformation.
+** ^If the xCompare argument is NULL then the collating function is
+** deleted. ^When all collating functions having the same name are deleted,
+** that collation is no longer usable.
+**
+** ^The collating function callback is invoked with a copy of the pArg
+** application data pointer and with two strings in the encoding specified
+** by the eTextRep argument. The two integer parameters to the collating
+** function callback are the length of the two strings, in bytes. The collating
+** function must return an integer that is negative, zero, or positive
+** if the first string is less than, equal to, or greater than the second,
+** respectively. A collating function must always return the same answer
+** given the same inputs. If two or more collating functions are registered
+** to the same collation name (using different eTextRep values) then all
+** must give an equivalent answer when invoked with equivalent strings.
+** The collating function must obey the following properties for all
+** strings A, B, and C:
+**
+** <ol>
+** <li> If A==B then B==A.
+** <li> If A==B and B==C then A==C.
+** <li> If A&lt;B THEN B&gt;A.
+** <li> If A&lt;B and B&lt;C then A&lt;C.
+** </ol>
+**
+** If a collating function fails any of the above constraints and that
+** collating function is registered and used, then the behavior of SQLite
+** is undefined.
+**
+** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation()
+** with the addition that the xDestroy callback is invoked on pArg when
+** the collating function is deleted.
+** ^Collating functions are deleted when they are overridden by later
+** calls to the collation creation functions or when the
+** [database connection] is closed using [sqlite3_close()].
+**
+** ^The xDestroy callback is <u>not</u> called if the
+** sqlite3_create_collation_v2() function fails. Applications that invoke
+** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should
+** check the return code and dispose of the application data pointer
+** themselves rather than expecting SQLite to deal with it for them.
+** This is different from every other SQLite interface. The inconsistency
+** is unfortunate but cannot be changed without breaking backwards
+** compatibility.
+**
+** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
+*/
+SQLITE_API int sqlite3_create_collation(
+ sqlite3*,
+ const char *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+);
+SQLITE_API int sqlite3_create_collation_v2(
+ sqlite3*,
+ const char *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*),
+ void(*xDestroy)(void*)
+);
+SQLITE_API int sqlite3_create_collation16(
+ sqlite3*,
+ const void *zName,
+ int eTextRep,
+ void *pArg,
+ int(*xCompare)(void*,int,const void*,int,const void*)
+);
+
+/*
+** CAPI3REF: Collation Needed Callbacks
+** METHOD: sqlite3
+**
+** ^To avoid having to register all collation sequences before a database
+** can be used, a single callback function may be registered with the
+** [database connection] to be invoked whenever an undefined collation
+** sequence is required.
+**
+** ^If the function is registered using the sqlite3_collation_needed() API,
+** then it is passed the names of undefined collation sequences as strings
+** encoded in UTF-8. ^If sqlite3_collation_needed16() is used,
+** the names are passed as UTF-16 in machine native byte order.
+** ^A call to either function replaces the existing collation-needed callback.
+**
+** ^(When the callback is invoked, the first argument passed is a copy
+** of the second argument to sqlite3_collation_needed() or
+** sqlite3_collation_needed16(). The second argument is the database
+** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE],
+** or [SQLITE_UTF16LE], indicating the most desirable form of the collation
+** sequence function required. The fourth parameter is the name of the
+** required collation sequence.)^
+**
+** The callback function should register the desired collation using
+** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
+** [sqlite3_create_collation_v2()].
+*/
+SQLITE_API int sqlite3_collation_needed(
+ sqlite3*,
+ void*,
+ void(*)(void*,sqlite3*,int eTextRep,const char*)
+);
+SQLITE_API int sqlite3_collation_needed16(
+ sqlite3*,
+ void*,
+ void(*)(void*,sqlite3*,int eTextRep,const void*)
+);
+
+#ifdef SQLITE_ENABLE_CEROD
+/*
+** Specify the activation key for a CEROD database. Unless
+** activated, none of the CEROD routines will work.
+*/
+SQLITE_API void sqlite3_activate_cerod(
+ const char *zPassPhrase /* Activation phrase */
+);
+#endif
+
+/*
+** CAPI3REF: Suspend Execution For A Short Time
+**
+** The sqlite3_sleep() function causes the current thread to suspend execution
+** for at least a number of milliseconds specified in its parameter.
+**
+** If the operating system does not support sleep requests with
+** millisecond time resolution, then the time will be rounded up to
+** the nearest second. The number of milliseconds of sleep actually
+** requested from the operating system is returned.
+**
+** ^SQLite implements this interface by calling the xSleep()
+** method of the default [sqlite3_vfs] object. If the xSleep() method
+** of the default VFS is not implemented correctly, or not implemented at
+** all, then the behavior of sqlite3_sleep() may deviate from the description
+** in the previous paragraphs.
+*/
+SQLITE_API int sqlite3_sleep(int);
+
+/*
+** CAPI3REF: Name Of The Folder Holding Temporary Files
+**
+** ^(If this global variable is made to point to a string which is
+** the name of a folder (a.k.a. directory), then all temporary files
+** created by SQLite when using a built-in [sqlite3_vfs | VFS]
+** will be placed in that directory.)^ ^If this variable
+** is a NULL pointer, then SQLite performs a search for an appropriate
+** temporary file directory.
+**
+** Applications are strongly discouraged from using this global variable.
+** It is required to set a temporary folder on Windows Runtime (WinRT).
+** But for all other platforms, it is highly recommended that applications
+** neither read nor write this variable. This global variable is a relic
+** that exists for backwards compatibility of legacy applications and should
+** be avoided in new projects.
+**
+** It is not safe to read or modify this variable in more than one
+** thread at a time. It is not safe to read or modify this variable
+** if a [database connection] is being used at the same time in a separate
+** thread.
+** It is intended that this variable be set once
+** as part of process initialization and before any SQLite interface
+** routines have been called and that this variable remain unchanged
+** thereafter.
+**
+** ^The [temp_store_directory pragma] may modify this variable and cause
+** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
+** the [temp_store_directory pragma] always assumes that any string
+** that this variable points to is held in memory obtained from
+** [sqlite3_malloc] and the pragma may attempt to free that memory
+** using [sqlite3_free].
+** Hence, if this variable is modified directly, either it should be
+** made NULL or made to point to memory obtained from [sqlite3_malloc]
+** or else the use of the [temp_store_directory pragma] should be avoided.
+** Except when requested by the [temp_store_directory pragma], SQLite
+** does not free the memory that sqlite3_temp_directory points to. If
+** the application wants that memory to be freed, it must do
+** so itself, taking care to only do so after all [database connection]
+** objects have been destroyed.
+**
+** <b>Note to Windows Runtime users:</b> The temporary directory must be set
+** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various
+** features that require the use of temporary files may fail. Here is an
+** example of how to do this using C++ with the Windows Runtime:
+**
+** <blockquote><pre>
+** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
+** &nbsp; TemporaryFolder->Path->Data();
+** char zPathBuf&#91;MAX_PATH + 1&#93;;
+** memset(zPathBuf, 0, sizeof(zPathBuf));
+** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
+** &nbsp; NULL, NULL);
+** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
+** </pre></blockquote>
+*/
+SQLITE_API SQLITE_EXTERN char *sqlite3_temp_directory;
+
+/*
+** CAPI3REF: Name Of The Folder Holding Database Files
+**
+** ^(If this global variable is made to point to a string which is
+** the name of a folder (a.k.a. directory), then all database files
+** specified with a relative pathname and created or accessed by
+** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed
+** to be relative to that directory.)^ ^If this variable is a NULL
+** pointer, then SQLite assumes that all database files specified
+** with a relative pathname are relative to the current directory
+** for the process. Only the windows VFS makes use of this global
+** variable; it is ignored by the unix VFS.
+**
+** Changing the value of this variable while a database connection is
+** open can result in a corrupt database.
+**
+** It is not safe to read or modify this variable in more than one
+** thread at a time. It is not safe to read or modify this variable
+** if a [database connection] is being used at the same time in a separate
+** thread.
+** It is intended that this variable be set once
+** as part of process initialization and before any SQLite interface
+** routines have been called and that this variable remain unchanged
+** thereafter.
+**
+** ^The [data_store_directory pragma] may modify this variable and cause
+** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
+** the [data_store_directory pragma] always assumes that any string
+** that this variable points to is held in memory obtained from
+** [sqlite3_malloc] and the pragma may attempt to free that memory
+** using [sqlite3_free].
+** Hence, if this variable is modified directly, either it should be
+** made NULL or made to point to memory obtained from [sqlite3_malloc]
+** or else the use of the [data_store_directory pragma] should be avoided.
+*/
+SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory;
+
+/*
+** CAPI3REF: Win32 Specific Interface
+**
+** These interfaces are available only on Windows. The
+** [sqlite3_win32_set_directory] interface is used to set the value associated
+** with the [sqlite3_temp_directory] or [sqlite3_data_directory] variable, to
+** zValue, depending on the value of the type parameter. The zValue parameter
+** should be NULL to cause the previous value to be freed via [sqlite3_free];
+** a non-NULL value will be copied into memory obtained from [sqlite3_malloc]
+** prior to being used. The [sqlite3_win32_set_directory] interface returns
+** [SQLITE_OK] to indicate success, [SQLITE_ERROR] if the type is unsupported,
+** or [SQLITE_NOMEM] if memory could not be allocated. The value of the
+** [sqlite3_data_directory] variable is intended to act as a replacement for
+** the current directory on the sub-platforms of Win32 where that concept is
+** not present, e.g. WinRT and UWP. The [sqlite3_win32_set_directory8] and
+** [sqlite3_win32_set_directory16] interfaces behave exactly the same as the
+** sqlite3_win32_set_directory interface except the string parameter must be
+** UTF-8 or UTF-16, respectively.
+*/
+SQLITE_API int sqlite3_win32_set_directory(
+ unsigned long type, /* Identifier for directory being set or reset */
+ void *zValue /* New value for directory being set or reset */
+);
+SQLITE_API int sqlite3_win32_set_directory8(unsigned long type, const char *zValue);
+SQLITE_API int sqlite3_win32_set_directory16(unsigned long type, const void *zValue);
+
+/*
+** CAPI3REF: Win32 Directory Types
+**
+** These macros are only available on Windows. They define the allowed values
+** for the type argument to the [sqlite3_win32_set_directory] interface.
+*/
+#define SQLITE_WIN32_DATA_DIRECTORY_TYPE 1
+#define SQLITE_WIN32_TEMP_DIRECTORY_TYPE 2
+
+/*
+** CAPI3REF: Test For Auto-Commit Mode
+** KEYWORDS: {autocommit mode}
+** METHOD: sqlite3
+**
+** ^The sqlite3_get_autocommit() interface returns non-zero or
+** zero if the given database connection is or is not in autocommit mode,
+** respectively. ^Autocommit mode is on by default.
+** ^Autocommit mode is disabled by a [BEGIN] statement.
+** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK].
+**
+** If certain kinds of errors occur on a statement within a multi-statement
+** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR],
+** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the
+** transaction might be rolled back automatically. The only way to
+** find out whether SQLite automatically rolled back the transaction after
+** an error is to use this function.
+**
+** If another thread changes the autocommit status of the database
+** connection while this routine is running, then the return value
+** is undefined.
+*/
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
+
+/*
+** CAPI3REF: Find The Database Handle Of A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** ^The sqlite3_db_handle interface returns the [database connection] handle
+** to which a [prepared statement] belongs. ^The [database connection]
+** returned by sqlite3_db_handle is the same [database connection]
+** that was the first argument
+** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
+** create the statement in the first place.
+*/
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Return The Filename For A Database Connection
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_filename(D,N) interface returns a pointer to the filename
+** associated with database N of connection D.
+** ^If there is no attached database N on the database
+** connection D, or if database N is a temporary or in-memory database, then
+** this function will return either a NULL pointer or an empty string.
+**
+** ^The string value returned by this routine is owned and managed by
+** the database connection. ^The value will be valid until the database N
+** is [DETACH]-ed or until the database connection closes.
+**
+** ^The filename returned by this function is the output of the
+** xFullPathname method of the [VFS]. ^In other words, the filename
+** will be an absolute pathname, even if the filename used
+** to open the database originally was a URI or relative pathname.
+**
+** If the filename pointer returned by this routine is not NULL, then it
+** can be used as the filename input parameter to these routines:
+** <ul>
+** <li> [sqlite3_uri_parameter()]
+** <li> [sqlite3_uri_boolean()]
+** <li> [sqlite3_uri_int64()]
+** <li> [sqlite3_filename_database()]
+** <li> [sqlite3_filename_journal()]
+** <li> [sqlite3_filename_wal()]
+** </ul>
+*/
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+
+/*
+** CAPI3REF: Determine if a database is read-only
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N
+** of connection D is read-only, 0 if it is read/write, or -1 if N is not
+** the name of a database on connection D.
+*/
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+
+/*
+** CAPI3REF: Find the next prepared statement
+** METHOD: sqlite3
+**
+** ^This interface returns a pointer to the next [prepared statement] after
+** pStmt associated with the [database connection] pDb. ^If pStmt is NULL
+** then this interface returns a pointer to the first prepared statement
+** associated with the database connection pDb. ^If no prepared statement
+** satisfies the conditions of this routine, it returns NULL.
+**
+** The [database connection] pointer D in a call to
+** [sqlite3_next_stmt(D,S)] must refer to an open database
+** connection and in particular must not be a NULL pointer.
+*/
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+
+/*
+** CAPI3REF: Commit And Rollback Notification Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_commit_hook() interface registers a callback
+** function to be invoked whenever a transaction is [COMMIT | committed].
+** ^Any callback set by a previous call to sqlite3_commit_hook()
+** for the same database connection is overridden.
+** ^The sqlite3_rollback_hook() interface registers a callback
+** function to be invoked whenever a transaction is [ROLLBACK | rolled back].
+** ^Any callback set by a previous call to sqlite3_rollback_hook()
+** for the same database connection is overridden.
+** ^The pArg argument is passed through to the callback.
+** ^If the callback on a commit hook function returns non-zero,
+** then the commit is converted into a rollback.
+**
+** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions
+** return the P argument from the previous call of the same function
+** on the same [database connection] D, or NULL for
+** the first call for each function on D.
+**
+** The commit and rollback hook callbacks are not reentrant.
+** The callback implementation must not do anything that will modify
+** the database connection that invoked the callback. Any actions
+** to modify the database connection must be deferred until after the
+** completion of the [sqlite3_step()] call that triggered the commit
+** or rollback hook in the first place.
+** Note that running any other SQL statements, including SELECT statements,
+** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify
+** the database connections for the meaning of "modify" in this paragraph.
+**
+** ^Registering a NULL function disables the callback.
+**
+** ^When the commit hook callback routine returns zero, the [COMMIT]
+** operation is allowed to continue normally. ^If the commit hook
+** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK].
+** ^The rollback hook is invoked on a rollback that results from a commit
+** hook returning non-zero, just as it would be with any other rollback.
+**
+** ^For the purposes of this API, a transaction is said to have been
+** rolled back if an explicit "ROLLBACK" statement is executed, or
+** an error or constraint causes an implicit rollback to occur.
+** ^The rollback callback is not invoked if a transaction is
+** automatically rolled back because the database connection is closed.
+**
+** See also the [sqlite3_update_hook()] interface.
+*/
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+
+/*
+** CAPI3REF: Data Change Notification Callbacks
+** METHOD: sqlite3
+**
+** ^The sqlite3_update_hook() interface registers a callback function
+** with the [database connection] identified by the first argument
+** to be invoked whenever a row is updated, inserted or deleted in
+** a [rowid table].
+** ^Any callback set by a previous call to this function
+** for the same database connection is overridden.
+**
+** ^The second argument is a pointer to the function to invoke when a
+** row is updated, inserted or deleted in a rowid table.
+** ^The first argument to the callback is a copy of the third argument
+** to sqlite3_update_hook().
+** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE],
+** or [SQLITE_UPDATE], depending on the operation that caused the callback
+** to be invoked.
+** ^The third and fourth arguments to the callback contain pointers to the
+** database and table name containing the affected row.
+** ^The final callback parameter is the [rowid] of the row.
+** ^In the case of an update, this is the [rowid] after the update takes place.
+**
+** ^(The update hook is not invoked when internal system tables are
+** modified (i.e. sqlite_sequence).)^
+** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified.
+**
+** ^In the current implementation, the update hook
+** is not invoked when conflicting rows are deleted because of an
+** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook
+** invoked when rows are deleted using the [truncate optimization].
+** The exceptions defined in this paragraph might change in a future
+** release of SQLite.
+**
+** The update hook implementation must not do anything that will modify
+** the database connection that invoked the update hook. Any actions
+** to modify the database connection must be deferred until after the
+** completion of the [sqlite3_step()] call that triggered the update hook.
+** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
+** database connections for the meaning of "modify" in this paragraph.
+**
+** ^The sqlite3_update_hook(D,C,P) function
+** returns the P argument from the previous call
+** on the same [database connection] D, or NULL for
+** the first call on D.
+**
+** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()],
+** and [sqlite3_preupdate_hook()] interfaces.
+*/
+SQLITE_API void *sqlite3_update_hook(
+ sqlite3*,
+ void(*)(void *,int ,char const *,char const *,sqlite3_int64),
+ void*
+);
+
+/*
+** CAPI3REF: Enable Or Disable Shared Pager Cache
+**
+** ^(This routine enables or disables the sharing of the database cache
+** and schema data structures between [database connection | connections]
+** to the same database. Sharing is enabled if the argument is true
+** and disabled if the argument is false.)^
+**
+** ^Cache sharing is enabled and disabled for an entire process.
+** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]).
+** In prior versions of SQLite,
+** sharing was enabled or disabled for each thread separately.
+**
+** ^(The cache sharing mode set by this interface effects all subsequent
+** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()].
+** Existing database connections continue to use the sharing mode
+** that was in effect at the time they were opened.)^
+**
+** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled
+** successfully. An [error code] is returned otherwise.)^
+**
+** ^Shared cache is disabled by default. It is recommended that it stay
+** that way. In other words, do not use this routine. This interface
+** continues to be provided for historical compatibility, but its use is
+** discouraged. Any use of shared cache is discouraged. If shared cache
+** must be used, it is recommended that shared cache only be enabled for
+** individual database connections using the [sqlite3_open_v2()] interface
+** with the [SQLITE_OPEN_SHAREDCACHE] flag.
+**
+** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0
+** and will always return SQLITE_MISUSE. On those systems,
+** shared cache mode should be enabled per-database connection via
+** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE].
+**
+** This interface is threadsafe on processors where writing a
+** 32-bit integer is atomic.
+**
+** See Also: [SQLite Shared-Cache Mode]
+*/
+SQLITE_API int sqlite3_enable_shared_cache(int);
+
+/*
+** CAPI3REF: Attempt To Free Heap Memory
+**
+** ^The sqlite3_release_memory() interface attempts to free N bytes
+** of heap memory by deallocating non-essential memory allocations
+** held by the database library. Memory used to cache database
+** pages to improve performance is an example of non-essential memory.
+** ^sqlite3_release_memory() returns the number of bytes actually freed,
+** which might be more or less than the amount requested.
+** ^The sqlite3_release_memory() routine is a no-op returning zero
+** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT].
+**
+** See also: [sqlite3_db_release_memory()]
+*/
+SQLITE_API int sqlite3_release_memory(int);
+
+/*
+** CAPI3REF: Free Memory Used By A Database Connection
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap
+** memory as possible from database connection D. Unlike the
+** [sqlite3_release_memory()] interface, this interface is in effect even
+** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is
+** omitted.
+**
+** See also: [sqlite3_release_memory()]
+*/
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
+
+/*
+** CAPI3REF: Impose A Limit On Heap Size
+**
+** These interfaces impose limits on the amount of heap memory that will be
+** by all database connections within a single process.
+**
+** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the
+** soft limit on the amount of heap memory that may be allocated by SQLite.
+** ^SQLite strives to keep heap memory utilization below the soft heap
+** limit by reducing the number of pages held in the page cache
+** as heap memory usages approaches the limit.
+** ^The soft heap limit is "soft" because even though SQLite strives to stay
+** below the limit, it will exceed the limit rather than generate
+** an [SQLITE_NOMEM] error. In other words, the soft heap limit
+** is advisory only.
+**
+** ^The sqlite3_hard_heap_limit64(N) interface sets a hard upper bound of
+** N bytes on the amount of memory that will be allocated. ^The
+** sqlite3_hard_heap_limit64(N) interface is similar to
+** sqlite3_soft_heap_limit64(N) except that memory allocations will fail
+** when the hard heap limit is reached.
+**
+** ^The return value from both sqlite3_soft_heap_limit64() and
+** sqlite3_hard_heap_limit64() is the size of
+** the heap limit prior to the call, or negative in the case of an
+** error. ^If the argument N is negative
+** then no change is made to the heap limit. Hence, the current
+** size of heap limits can be determined by invoking
+** sqlite3_soft_heap_limit64(-1) or sqlite3_hard_heap_limit(-1).
+**
+** ^Setting the heap limits to zero disables the heap limiter mechanism.
+**
+** ^The soft heap limit may not be greater than the hard heap limit.
+** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N)
+** is invoked with a value of N that is greater than the hard heap limit,
+** the the soft heap limit is set to the value of the hard heap limit.
+** ^The soft heap limit is automatically enabled whenever the hard heap
+** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and
+** the soft heap limit is outside the range of 1..N, then the soft heap
+** limit is set to N. ^Invoking sqlite3_soft_heap_limit64(0) when the
+** hard heap limit is enabled makes the soft heap limit equal to the
+** hard heap limit.
+**
+** The memory allocation limits can also be adjusted using
+** [PRAGMA soft_heap_limit] and [PRAGMA hard_heap_limit].
+**
+** ^(The heap limits are not enforced in the current implementation
+** if one or more of following conditions are true:
+**
+** <ul>
+** <li> The limit value is set to zero.
+** <li> Memory accounting is disabled using a combination of the
+** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and
+** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option.
+** <li> An alternative page cache implementation is specified using
+** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...).
+** <li> The page cache allocates from its own memory pool supplied
+** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than
+** from the heap.
+** </ul>)^
+**
+** The circumstances under which SQLite will enforce the heap limits may
+** changes in future releases of SQLite.
+*/
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N);
+
+/*
+** CAPI3REF: Deprecated Soft Heap Limit Interface
+** DEPRECATED
+**
+** This is a deprecated version of the [sqlite3_soft_heap_limit64()]
+** interface. This routine is provided for historical compatibility
+** only. All new applications should use the
+** [sqlite3_soft_heap_limit64()] interface rather than this one.
+*/
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
+
+
+/*
+** CAPI3REF: Extract Metadata About A Column Of A Table
+** METHOD: sqlite3
+**
+** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns
+** information about column C of table T in database D
+** on [database connection] X.)^ ^The sqlite3_table_column_metadata()
+** interface returns SQLITE_OK and fills in the non-NULL pointers in
+** the final five arguments with appropriate values if the specified
+** column exists. ^The sqlite3_table_column_metadata() interface returns
+** SQLITE_ERROR if the specified column does not exist.
+** ^If the column-name parameter to sqlite3_table_column_metadata() is a
+** NULL pointer, then this routine simply checks for the existence of the
+** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it
+** does not. If the table name parameter T in a call to
+** sqlite3_table_column_metadata(X,D,T,C,...) is NULL then the result is
+** undefined behavior.
+**
+** ^The column is identified by the second, third and fourth parameters to
+** this function. ^(The second parameter is either the name of the database
+** (i.e. "main", "temp", or an attached database) containing the specified
+** table or NULL.)^ ^If it is NULL, then all attached databases are searched
+** for the table using the same algorithm used by the database engine to
+** resolve unqualified table references.
+**
+** ^The third and fourth parameters to this function are the table and column
+** name of the desired column, respectively.
+**
+** ^Metadata is returned by writing to the memory locations passed as the 5th
+** and subsequent parameters to this function. ^Any of these arguments may be
+** NULL, in which case the corresponding element of metadata is omitted.
+**
+** ^(<blockquote>
+** <table border="1">
+** <tr><th> Parameter <th> Output<br>Type <th> Description
+**
+** <tr><td> 5th <td> const char* <td> Data type
+** <tr><td> 6th <td> const char* <td> Name of default collation sequence
+** <tr><td> 7th <td> int <td> True if column has a NOT NULL constraint
+** <tr><td> 8th <td> int <td> True if column is part of the PRIMARY KEY
+** <tr><td> 9th <td> int <td> True if column is [AUTOINCREMENT]
+** </table>
+** </blockquote>)^
+**
+** ^The memory pointed to by the character pointers returned for the
+** declaration type and collation sequence is valid until the next
+** call to any SQLite API function.
+**
+** ^If the specified table is actually a view, an [error code] is returned.
+**
+** ^If the specified column is "rowid", "oid" or "_rowid_" and the table
+** is not a [WITHOUT ROWID] table and an
+** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output
+** parameters are set for the explicitly declared column. ^(If there is no
+** [INTEGER PRIMARY KEY] column, then the outputs
+** for the [rowid] are set as follows:
+**
+** <pre>
+** data type: "INTEGER"
+** collation sequence: "BINARY"
+** not null: 0
+** primary key: 1
+** auto increment: 0
+** </pre>)^
+**
+** ^This function causes all database schemas to be read from disk and
+** parsed, if that has not already been done, and returns an error if
+** any errors are encountered while loading the schema.
+*/
+SQLITE_API int sqlite3_table_column_metadata(
+ sqlite3 *db, /* Connection handle */
+ const char *zDbName, /* Database name or NULL */
+ const char *zTableName, /* Table name */
+ const char *zColumnName, /* Column name */
+ char const **pzDataType, /* OUTPUT: Declared data type */
+ char const **pzCollSeq, /* OUTPUT: Collation sequence name */
+ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */
+ int *pPrimaryKey, /* OUTPUT: True if column part of PK */
+ int *pAutoinc /* OUTPUT: True if column is auto-increment */
+);
+
+/*
+** CAPI3REF: Load An Extension
+** METHOD: sqlite3
+**
+** ^This interface loads an SQLite extension library from the named file.
+**
+** ^The sqlite3_load_extension() interface attempts to load an
+** [SQLite extension] library contained in the file zFile. If
+** the file cannot be loaded directly, attempts are made to load
+** with various operating-system specific extensions added.
+** So for example, if "samplelib" cannot be loaded, then names like
+** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might
+** be tried also.
+**
+** ^The entry point is zProc.
+** ^(zProc may be 0, in which case SQLite will try to come up with an
+** entry point name on its own. It first tries "sqlite3_extension_init".
+** If that does not work, it constructs a name "sqlite3_X_init" where the
+** X is consists of the lower-case equivalent of all ASCII alphabetic
+** characters in the filename from the last "/" to the first following
+** "." and omitting any initial "lib".)^
+** ^The sqlite3_load_extension() interface returns
+** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
+** ^If an error occurs and pzErrMsg is not 0, then the
+** [sqlite3_load_extension()] interface shall attempt to
+** fill *pzErrMsg with error message text stored in memory
+** obtained from [sqlite3_malloc()]. The calling function
+** should free this memory by calling [sqlite3_free()].
+**
+** ^Extension loading must be enabled using
+** [sqlite3_enable_load_extension()] or
+** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL)
+** prior to calling this API,
+** otherwise an error will be returned.
+**
+** <b>Security warning:</b> It is recommended that the
+** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this
+** interface. The use of the [sqlite3_enable_load_extension()] interface
+** should be avoided. This will keep the SQL function [load_extension()]
+** disabled and prevent SQL injections from giving attackers
+** access to extension loading capabilities.
+**
+** See also the [load_extension() SQL function].
+*/
+SQLITE_API int sqlite3_load_extension(
+ sqlite3 *db, /* Load the extension into this database connection */
+ const char *zFile, /* Name of the shared library containing extension */
+ const char *zProc, /* Entry point. Derived from zFile if 0 */
+ char **pzErrMsg /* Put error message here if not 0 */
+);
+
+/*
+** CAPI3REF: Enable Or Disable Extension Loading
+** METHOD: sqlite3
+**
+** ^So as not to open security holes in older applications that are
+** unprepared to deal with [extension loading], and as a means of disabling
+** [extension loading] while evaluating user-entered SQL, the following API
+** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
+**
+** ^Extension loading is off by default.
+** ^Call the sqlite3_enable_load_extension() routine with onoff==1
+** to turn extension loading on and call it with onoff==0 to turn
+** it back off again.
+**
+** ^This interface enables or disables both the C-API
+** [sqlite3_load_extension()] and the SQL function [load_extension()].
+** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..)
+** to enable or disable only the C-API.)^
+**
+** <b>Security warning:</b> It is recommended that extension loading
+** be enabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method
+** rather than this interface, so the [load_extension()] SQL function
+** remains disabled. This will prevent SQL injections from giving attackers
+** access to extension loading capabilities.
+*/
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+
+/*
+** CAPI3REF: Automatically Load Statically Linked Extensions
+**
+** ^This interface causes the xEntryPoint() function to be invoked for
+** each new [database connection] that is created. The idea here is that
+** xEntryPoint() is the entry point for a statically linked [SQLite extension]
+** that is to be automatically loaded into all new database connections.
+**
+** ^(Even though the function prototype shows that xEntryPoint() takes
+** no arguments and returns void, SQLite invokes xEntryPoint() with three
+** arguments and expects an integer result as if the signature of the
+** entry point where as follows:
+**
+** <blockquote><pre>
+** &nbsp; int xEntryPoint(
+** &nbsp; sqlite3 *db,
+** &nbsp; const char **pzErrMsg,
+** &nbsp; const struct sqlite3_api_routines *pThunk
+** &nbsp; );
+** </pre></blockquote>)^
+**
+** If the xEntryPoint routine encounters an error, it should make *pzErrMsg
+** point to an appropriate error message (obtained from [sqlite3_mprintf()])
+** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg
+** is NULL before calling the xEntryPoint(). ^SQLite will invoke
+** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any
+** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()],
+** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail.
+**
+** ^Calling sqlite3_auto_extension(X) with an entry point X that is already
+** on the list of automatic extensions is a harmless no-op. ^No entry point
+** will be called more than once for each database connection that is opened.
+**
+** See also: [sqlite3_reset_auto_extension()]
+** and [sqlite3_cancel_auto_extension()]
+*/
+SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void));
+
+/*
+** CAPI3REF: Cancel Automatic Extension Loading
+**
+** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the
+** initialization routine X that was registered using a prior call to
+** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)]
+** routine returns 1 if initialization routine X was successfully
+** unregistered and it returns 0 if X was not on the list of initialization
+** routines.
+*/
+SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
+
+/*
+** CAPI3REF: Reset Automatic Extension Loading
+**
+** ^This interface disables all automatic extensions previously
+** registered using [sqlite3_auto_extension()].
+*/
+SQLITE_API void sqlite3_reset_auto_extension(void);
+
+/*
+** The interface to the virtual-table mechanism is currently considered
+** to be experimental. The interface might change in incompatible ways.
+** If this is a problem for you, do not use the interface at this time.
+**
+** When the virtual-table mechanism stabilizes, we will declare the
+** interface fixed, support it indefinitely, and remove this comment.
+*/
+
+/*
+** Structures used by the virtual table interface
+*/
+typedef struct sqlite3_vtab sqlite3_vtab;
+typedef struct sqlite3_index_info sqlite3_index_info;
+typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor;
+typedef struct sqlite3_module sqlite3_module;
+
+/*
+** CAPI3REF: Virtual Table Object
+** KEYWORDS: sqlite3_module {virtual table module}
+**
+** This structure, sometimes called a "virtual table module",
+** defines the implementation of a [virtual table].
+** This structure consists mostly of methods for the module.
+**
+** ^A virtual table module is created by filling in a persistent
+** instance of this structure and passing a pointer to that instance
+** to [sqlite3_create_module()] or [sqlite3_create_module_v2()].
+** ^The registration remains valid until it is replaced by a different
+** module or until the [database connection] closes. The content
+** of this structure must not change while it is registered with
+** any database connection.
+*/
+struct sqlite3_module {
+ int iVersion;
+ int (*xCreate)(sqlite3*, void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVTab, char**);
+ int (*xConnect)(sqlite3*, void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVTab, char**);
+ int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
+ int (*xDisconnect)(sqlite3_vtab *pVTab);
+ int (*xDestroy)(sqlite3_vtab *pVTab);
+ int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
+ int (*xClose)(sqlite3_vtab_cursor*);
+ int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv);
+ int (*xNext)(sqlite3_vtab_cursor*);
+ int (*xEof)(sqlite3_vtab_cursor*);
+ int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
+ int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
+ int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
+ int (*xBegin)(sqlite3_vtab *pVTab);
+ int (*xSync)(sqlite3_vtab *pVTab);
+ int (*xCommit)(sqlite3_vtab *pVTab);
+ int (*xRollback)(sqlite3_vtab *pVTab);
+ int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
+ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
+ void **ppArg);
+ int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
+ /* The methods above are in version 1 of the sqlite_module object. Those
+ ** below are for version 2 and greater. */
+ int (*xSavepoint)(sqlite3_vtab *pVTab, int);
+ int (*xRelease)(sqlite3_vtab *pVTab, int);
+ int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
+ /* The methods above are in versions 1 and 2 of the sqlite_module object.
+ ** Those below are for version 3 and greater. */
+ int (*xShadowName)(const char*);
+};
+
+/*
+** CAPI3REF: Virtual Table Indexing Information
+** KEYWORDS: sqlite3_index_info
+**
+** The sqlite3_index_info structure and its substructures is used as part
+** of the [virtual table] interface to
+** pass information into and receive the reply from the [xBestIndex]
+** method of a [virtual table module]. The fields under **Inputs** are the
+** inputs to xBestIndex and are read-only. xBestIndex inserts its
+** results into the **Outputs** fields.
+**
+** ^(The aConstraint[] array records WHERE clause constraints of the form:
+**
+** <blockquote>column OP expr</blockquote>
+**
+** where OP is =, &lt;, &lt;=, &gt;, or &gt;=.)^ ^(The particular operator is
+** stored in aConstraint[].op using one of the
+** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^
+** ^(The index of the column is stored in
+** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the
+** expr on the right-hand side can be evaluated (and thus the constraint
+** is usable) and false if it cannot.)^
+**
+** ^The optimizer automatically inverts terms of the form "expr OP column"
+** and makes other simplifications to the WHERE clause in an attempt to
+** get as many WHERE clause terms into the form shown above as possible.
+** ^The aConstraint[] array only reports WHERE clause terms that are
+** relevant to the particular virtual table being queried.
+**
+** ^Information about the ORDER BY clause is stored in aOrderBy[].
+** ^Each term of aOrderBy records a column of the ORDER BY clause.
+**
+** The colUsed field indicates which columns of the virtual table may be
+** required by the current scan. Virtual table columns are numbered from
+** zero in the order in which they appear within the CREATE TABLE statement
+** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62),
+** the corresponding bit is set within the colUsed mask if the column may be
+** required by SQLite. If the table has at least 64 columns and any column
+** to the right of the first 63 is required, then bit 63 of colUsed is also
+** set. In other words, column iCol may be required if the expression
+** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to
+** non-zero.
+**
+** The [xBestIndex] method must fill aConstraintUsage[] with information
+** about what parameters to pass to xFilter. ^If argvIndex>0 then
+** the right-hand side of the corresponding aConstraint[] is evaluated
+** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit
+** is true, then the constraint is assumed to be fully handled by the
+** virtual table and might not be checked again by the byte code.)^ ^(The
+** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
+** is left in its default setting of false, the constraint will always be
+** checked separately in byte code. If the omit flag is change to true, then
+** the constraint may or may not be checked in byte code. In other words,
+** when the omit flag is true there is no guarantee that the constraint will
+** not be checked again using byte code.)^
+**
+** ^The idxNum and idxPtr values are recorded and passed into the
+** [xFilter] method.
+** ^[sqlite3_free()] is used to free idxPtr if and only if
+** needToFreeIdxPtr is true.
+**
+** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
+** the correct order to satisfy the ORDER BY clause so that no separate
+** sorting step is required.
+**
+** ^The estimatedCost value is an estimate of the cost of a particular
+** strategy. A cost of N indicates that the cost of the strategy is similar
+** to a linear scan of an SQLite table with N rows. A cost of log(N)
+** indicates that the expense of the operation is similar to that of a
+** binary search on a unique indexed field of an SQLite table with N rows.
+**
+** ^The estimatedRows value is an estimate of the number of rows that
+** will be returned by the strategy.
+**
+** The xBestIndex method may optionally populate the idxFlags field with a
+** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
+** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
+** assumes that the strategy may visit at most one row.
+**
+** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
+** SQLite also assumes that if a call to the xUpdate() method is made as
+** part of the same statement to delete or update a virtual table row and the
+** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
+** any database changes. In other words, if the xUpdate() returns
+** SQLITE_CONSTRAINT, the database contents must be exactly as they were
+** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
+** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
+** the xUpdate method are automatically rolled back by SQLite.
+**
+** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
+** structure for SQLite [version 3.8.2] ([dateof:3.8.2]).
+** If a virtual table extension is
+** used with an SQLite version earlier than 3.8.2, the results of attempting
+** to read or write the estimatedRows field are undefined (but are likely
+** to include crashing the application). The estimatedRows field should
+** therefore only be used if [sqlite3_libversion_number()] returns a
+** value greater than or equal to 3008002. Similarly, the idxFlags field
+** was added for [version 3.9.0] ([dateof:3.9.0]).
+** It may therefore only be used if
+** sqlite3_libversion_number() returns a value greater than or equal to
+** 3009000.
+*/
+struct sqlite3_index_info {
+ /* Inputs */
+ int nConstraint; /* Number of entries in aConstraint */
+ struct sqlite3_index_constraint {
+ int iColumn; /* Column constrained. -1 for ROWID */
+ unsigned char op; /* Constraint operator */
+ unsigned char usable; /* True if this constraint is usable */
+ int iTermOffset; /* Used internally - xBestIndex should ignore */
+ } *aConstraint; /* Table of WHERE clause constraints */
+ int nOrderBy; /* Number of terms in the ORDER BY clause */
+ struct sqlite3_index_orderby {
+ int iColumn; /* Column number */
+ unsigned char desc; /* True for DESC. False for ASC. */
+ } *aOrderBy; /* The ORDER BY clause */
+ /* Outputs */
+ struct sqlite3_index_constraint_usage {
+ int argvIndex; /* if >0, constraint is part of argv to xFilter */
+ unsigned char omit; /* Do not code a test for this constraint */
+ } *aConstraintUsage;
+ int idxNum; /* Number used to identify the index */
+ char *idxStr; /* String, possibly obtained from sqlite3_malloc */
+ int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */
+ int orderByConsumed; /* True if output is already ordered */
+ double estimatedCost; /* Estimated cost of using this index */
+ /* Fields below are only available in SQLite 3.8.2 and later */
+ sqlite3_int64 estimatedRows; /* Estimated number of rows returned */
+ /* Fields below are only available in SQLite 3.9.0 and later */
+ int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */
+ /* Fields below are only available in SQLite 3.10.0 and later */
+ sqlite3_uint64 colUsed; /* Input: Mask of columns used by statement */
+};
+
+/*
+** CAPI3REF: Virtual Table Scan Flags
+**
+** Virtual table implementations are allowed to set the
+** [sqlite3_index_info].idxFlags field to some combination of
+** these bits.
+*/
+#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */
+
+/*
+** CAPI3REF: Virtual Table Constraint Operator Codes
+**
+** These macros define the allowed values for the
+** [sqlite3_index_info].aConstraint[].op field. Each value represents
+** an operator that is part of a constraint term in the wHERE clause of
+** a query that uses a [virtual table].
+*/
+#define SQLITE_INDEX_CONSTRAINT_EQ 2
+#define SQLITE_INDEX_CONSTRAINT_GT 4
+#define SQLITE_INDEX_CONSTRAINT_LE 8
+#define SQLITE_INDEX_CONSTRAINT_LT 16
+#define SQLITE_INDEX_CONSTRAINT_GE 32
+#define SQLITE_INDEX_CONSTRAINT_MATCH 64
+#define SQLITE_INDEX_CONSTRAINT_LIKE 65
+#define SQLITE_INDEX_CONSTRAINT_GLOB 66
+#define SQLITE_INDEX_CONSTRAINT_REGEXP 67
+#define SQLITE_INDEX_CONSTRAINT_NE 68
+#define SQLITE_INDEX_CONSTRAINT_ISNOT 69
+#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70
+#define SQLITE_INDEX_CONSTRAINT_ISNULL 71
+#define SQLITE_INDEX_CONSTRAINT_IS 72
+#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150
+
+/*
+** CAPI3REF: Register A Virtual Table Implementation
+** METHOD: sqlite3
+**
+** ^These routines are used to register a new [virtual table module] name.
+** ^Module names must be registered before
+** creating a new [virtual table] using the module and before using a
+** preexisting [virtual table] for the module.
+**
+** ^The module name is registered on the [database connection] specified
+** by the first parameter. ^The name of the module is given by the
+** second parameter. ^The third parameter is a pointer to
+** the implementation of the [virtual table module]. ^The fourth
+** parameter is an arbitrary client data pointer that is passed through
+** into the [xCreate] and [xConnect] methods of the virtual table module
+** when a new virtual table is be being created or reinitialized.
+**
+** ^The sqlite3_create_module_v2() interface has a fifth parameter which
+** is a pointer to a destructor for the pClientData. ^SQLite will
+** invoke the destructor function (if it is not NULL) when SQLite
+** no longer needs the pClientData pointer. ^The destructor will also
+** be invoked if the call to sqlite3_create_module_v2() fails.
+** ^The sqlite3_create_module()
+** interface is equivalent to sqlite3_create_module_v2() with a NULL
+** destructor.
+**
+** ^If the third parameter (the pointer to the sqlite3_module object) is
+** NULL then no new module is create and any existing modules with the
+** same name are dropped.
+**
+** See also: [sqlite3_drop_modules()]
+*/
+SQLITE_API int sqlite3_create_module(
+ sqlite3 *db, /* SQLite connection to register module with */
+ const char *zName, /* Name of the module */
+ const sqlite3_module *p, /* Methods for the module */
+ void *pClientData /* Client data for xCreate/xConnect */
+);
+SQLITE_API int sqlite3_create_module_v2(
+ sqlite3 *db, /* SQLite connection to register module with */
+ const char *zName, /* Name of the module */
+ const sqlite3_module *p, /* Methods for the module */
+ void *pClientData, /* Client data for xCreate/xConnect */
+ void(*xDestroy)(void*) /* Module destructor function */
+);
+
+/*
+** CAPI3REF: Remove Unnecessary Virtual Table Implementations
+** METHOD: sqlite3
+**
+** ^The sqlite3_drop_modules(D,L) interface removes all virtual
+** table modules from database connection D except those named on list L.
+** The L parameter must be either NULL or a pointer to an array of pointers
+** to strings where the array is terminated by a single NULL pointer.
+** ^If the L parameter is NULL, then all virtual table modules are removed.
+**
+** See also: [sqlite3_create_module()]
+*/
+SQLITE_API int sqlite3_drop_modules(
+ sqlite3 *db, /* Remove modules from this connection */
+ const char **azKeep /* Except, do not remove the ones named here */
+);
+
+/*
+** CAPI3REF: Virtual Table Instance Object
+** KEYWORDS: sqlite3_vtab
+**
+** Every [virtual table module] implementation uses a subclass
+** of this object to describe a particular instance
+** of the [virtual table]. Each subclass will
+** be tailored to the specific needs of the module implementation.
+** The purpose of this superclass is to define certain fields that are
+** common to all module implementations.
+**
+** ^Virtual tables methods can set an error message by assigning a
+** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should
+** take care that any prior string is freed by a call to [sqlite3_free()]
+** prior to assigning a new string to zErrMsg. ^After the error message
+** is delivered up to the client application, the string will be automatically
+** freed by sqlite3_free() and the zErrMsg field will be zeroed.
+*/
+struct sqlite3_vtab {
+ const sqlite3_module *pModule; /* The module for this virtual table */
+ int nRef; /* Number of open cursors */
+ char *zErrMsg; /* Error message from sqlite3_mprintf() */
+ /* Virtual table implementations will typically add additional fields */
+};
+
+/*
+** CAPI3REF: Virtual Table Cursor Object
+** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor}
+**
+** Every [virtual table module] implementation uses a subclass of the
+** following structure to describe cursors that point into the
+** [virtual table] and are used
+** to loop through the virtual table. Cursors are created using the
+** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed
+** by the [sqlite3_module.xClose | xClose] method. Cursors are used
+** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods
+** of the module. Each module implementation will define
+** the content of a cursor structure to suit its own needs.
+**
+** This superclass exists in order to define fields of the cursor that
+** are common to all implementations.
+*/
+struct sqlite3_vtab_cursor {
+ sqlite3_vtab *pVtab; /* Virtual table of this cursor */
+ /* Virtual table implementations will typically add additional fields */
+};
+
+/*
+** CAPI3REF: Declare The Schema Of A Virtual Table
+**
+** ^The [xCreate] and [xConnect] methods of a
+** [virtual table module] call this interface
+** to declare the format (the names and datatypes of the columns) of
+** the virtual tables they implement.
+*/
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+
+/*
+** CAPI3REF: Overload A Function For A Virtual Table
+** METHOD: sqlite3
+**
+** ^(Virtual tables can provide alternative implementations of functions
+** using the [xFindFunction] method of the [virtual table module].
+** But global versions of those functions
+** must exist in order to be overloaded.)^
+**
+** ^(This API makes sure a global version of a function with a particular
+** name and number of parameters exists. If no such function exists
+** before this API is called, a new function is created.)^ ^The implementation
+** of the new function always causes an exception to be thrown. So
+** the new function is not good for anything by itself. Its only
+** purpose is to be a placeholder function that can be overloaded
+** by a [virtual table].
+*/
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+
+/*
+** The interface to the virtual-table mechanism defined above (back up
+** to a comment remarkably similar to this one) is currently considered
+** to be experimental. The interface might change in incompatible ways.
+** If this is a problem for you, do not use the interface at this time.
+**
+** When the virtual-table mechanism stabilizes, we will declare the
+** interface fixed, support it indefinitely, and remove this comment.
+*/
+
+/*
+** CAPI3REF: A Handle To An Open BLOB
+** KEYWORDS: {BLOB handle} {BLOB handles}
+**
+** An instance of this object represents an open BLOB on which
+** [sqlite3_blob_open | incremental BLOB I/O] can be performed.
+** ^Objects of this type are created by [sqlite3_blob_open()]
+** and destroyed by [sqlite3_blob_close()].
+** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces
+** can be used to read or write small subsections of the BLOB.
+** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes.
+*/
+typedef struct sqlite3_blob sqlite3_blob;
+
+/*
+** CAPI3REF: Open A BLOB For Incremental I/O
+** METHOD: sqlite3
+** CONSTRUCTOR: sqlite3_blob
+**
+** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located
+** in row iRow, column zColumn, table zTable in database zDb;
+** in other words, the same BLOB that would be selected by:
+**
+** <pre>
+** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
+** </pre>)^
+**
+** ^(Parameter zDb is not the filename that contains the database, but
+** rather the symbolic name of the database. For attached databases, this is
+** the name that appears after the AS keyword in the [ATTACH] statement.
+** For the main database file, the database name is "main". For TEMP
+** tables, the database name is "temp".)^
+**
+** ^If the flags parameter is non-zero, then the BLOB is opened for read
+** and write access. ^If the flags parameter is zero, the BLOB is opened for
+** read-only access.
+**
+** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored
+** in *ppBlob. Otherwise an [error code] is returned and, unless the error
+** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
+** the API is not misused, it is always safe to call [sqlite3_blob_close()]
+** on *ppBlob after this function it returns.
+**
+** This function fails with SQLITE_ERROR if any of the following are true:
+** <ul>
+** <li> ^(Database zDb does not exist)^,
+** <li> ^(Table zTable does not exist within database zDb)^,
+** <li> ^(Table zTable is a WITHOUT ROWID table)^,
+** <li> ^(Column zColumn does not exist)^,
+** <li> ^(Row iRow is not present in the table)^,
+** <li> ^(The specified column of row iRow contains a value that is not
+** a TEXT or BLOB value)^,
+** <li> ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE
+** constraint and the blob is being opened for read/write access)^,
+** <li> ^([foreign key constraints | Foreign key constraints] are enabled,
+** column zColumn is part of a [child key] definition and the blob is
+** being opened for read/write access)^.
+** </ul>
+**
+** ^Unless it returns SQLITE_MISUSE, this function sets the
+** [database connection] error code and message accessible via
+** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+**
+** A BLOB referenced by sqlite3_blob_open() may be read using the
+** [sqlite3_blob_read()] interface and modified by using
+** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a
+** different row of the same table using the [sqlite3_blob_reopen()]
+** interface. However, the column, table, or database of a [BLOB handle]
+** cannot be changed after the [BLOB handle] is opened.
+**
+** ^(If the row that a BLOB handle points to is modified by an
+** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
+** then the BLOB handle is marked as "expired".
+** This is true if any column of the row is changed, even a column
+** other than the one the BLOB handle is open on.)^
+** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for
+** an expired BLOB handle fail with a return code of [SQLITE_ABORT].
+** ^(Changes written into a BLOB prior to the BLOB expiring are not
+** rolled back by the expiration of the BLOB. Such changes will eventually
+** commit if the transaction continues to completion.)^
+**
+** ^Use the [sqlite3_blob_bytes()] interface to determine the size of
+** the opened blob. ^The size of a blob may not be changed by this
+** interface. Use the [UPDATE] SQL command to change the size of a
+** blob.
+**
+** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces
+** and the built-in [zeroblob] SQL function may be used to create a
+** zero-filled blob to read or write using the incremental-blob interface.
+**
+** To avoid a resource leak, every open [BLOB handle] should eventually
+** be released by a call to [sqlite3_blob_close()].
+**
+** See also: [sqlite3_blob_close()],
+** [sqlite3_blob_reopen()], [sqlite3_blob_read()],
+** [sqlite3_blob_bytes()], [sqlite3_blob_write()].
+*/
+SQLITE_API int sqlite3_blob_open(
+ sqlite3*,
+ const char *zDb,
+ const char *zTable,
+ const char *zColumn,
+ sqlite3_int64 iRow,
+ int flags,
+ sqlite3_blob **ppBlob
+);
+
+/*
+** CAPI3REF: Move a BLOB Handle to a New Row
+** METHOD: sqlite3_blob
+**
+** ^This function is used to move an existing [BLOB handle] so that it points
+** to a different row of the same database table. ^The new row is identified
+** by the rowid value passed as the second argument. Only the row can be
+** changed. ^The database, table and column on which the blob handle is open
+** remain the same. Moving an existing [BLOB handle] to a new row is
+** faster than closing the existing handle and opening a new one.
+**
+** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] -
+** it must exist and there must be either a blob or text value stored in
+** the nominated column.)^ ^If the new row is not present in the table, or if
+** it does not contain a blob or text value, or if another error occurs, an
+** SQLite error code is returned and the blob handle is considered aborted.
+** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or
+** [sqlite3_blob_reopen()] on an aborted blob handle immediately return
+** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle
+** always returns zero.
+**
+** ^This function sets the database handle error code and message.
+*/
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+
+/*
+** CAPI3REF: Close A BLOB Handle
+** DESTRUCTOR: sqlite3_blob
+**
+** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed
+** unconditionally. Even if this routine returns an error code, the
+** handle is still closed.)^
+**
+** ^If the blob handle being closed was opened for read-write access, and if
+** the database is in auto-commit mode and there are no other open read-write
+** blob handles or active write statements, the current transaction is
+** committed. ^If an error occurs while committing the transaction, an error
+** code is returned and the transaction rolled back.
+**
+** Calling this function with an argument that is not a NULL pointer or an
+** open blob handle results in undefined behaviour. ^Calling this routine
+** with a null pointer (such as would be returned by a failed call to
+** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
+** is passed a valid open blob handle, the values returned by the
+** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
+*/
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
+
+/*
+** CAPI3REF: Return The Size Of An Open BLOB
+** METHOD: sqlite3_blob
+**
+** ^Returns the size in bytes of the BLOB accessible via the
+** successfully opened [BLOB handle] in its only argument. ^The
+** incremental blob I/O routines can only read or overwriting existing
+** blob content; they cannot change the size of a blob.
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+*/
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
+
+/*
+** CAPI3REF: Read Data From A BLOB Incrementally
+** METHOD: sqlite3_blob
+**
+** ^(This function is used to read data from an open [BLOB handle] into a
+** caller-supplied buffer. N bytes of data are copied into buffer Z
+** from the open BLOB, starting at offset iOffset.)^
+**
+** ^If offset iOffset is less than N bytes from the end of the BLOB,
+** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is
+** less than zero, [SQLITE_ERROR] is returned and no data is read.
+** ^The size of the blob (and hence the maximum value of N+iOffset)
+** can be determined using the [sqlite3_blob_bytes()] interface.
+**
+** ^An attempt to read from an expired [BLOB handle] fails with an
+** error code of [SQLITE_ABORT].
+**
+** ^(On success, sqlite3_blob_read() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+**
+** See also: [sqlite3_blob_write()].
+*/
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+
+/*
+** CAPI3REF: Write Data Into A BLOB Incrementally
+** METHOD: sqlite3_blob
+**
+** ^(This function is used to write data into an open [BLOB handle] from a
+** caller-supplied buffer. N bytes of data are copied from the buffer Z
+** into the open BLOB, starting at offset iOffset.)^
+**
+** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+** ^Unless SQLITE_MISUSE is returned, this function sets the
+** [database connection] error code and message accessible via
+** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+**
+** ^If the [BLOB handle] passed as the first argument was not opened for
+** writing (the flags parameter to [sqlite3_blob_open()] was zero),
+** this function returns [SQLITE_READONLY].
+**
+** This function may only modify the contents of the BLOB; it is
+** not possible to increase the size of a BLOB using this API.
+** ^If offset iOffset is less than N bytes from the end of the BLOB,
+** [SQLITE_ERROR] is returned and no data is written. The size of the
+** BLOB (and hence the maximum value of N+iOffset) can be determined
+** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less
+** than zero [SQLITE_ERROR] is returned and no data is written.
+**
+** ^An attempt to write to an expired [BLOB handle] fails with an
+** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred
+** before the [BLOB handle] expired are not rolled back by the
+** expiration of the handle, though of course those changes might
+** have been overwritten by the statement that expired the BLOB handle
+** or by other independent statements.
+**
+** This routine only works on a [BLOB handle] which has been created
+** by a prior successful call to [sqlite3_blob_open()] and which has not
+** been closed by [sqlite3_blob_close()]. Passing any other pointer in
+** to this routine results in undefined and probably undesirable behavior.
+**
+** See also: [sqlite3_blob_read()].
+*/
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+
+/*
+** CAPI3REF: Virtual File System Objects
+**
+** A virtual filesystem (VFS) is an [sqlite3_vfs] object
+** that SQLite uses to interact
+** with the underlying operating system. Most SQLite builds come with a
+** single default VFS that is appropriate for the host computer.
+** New VFSes can be registered and existing VFSes can be unregistered.
+** The following interfaces are provided.
+**
+** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name.
+** ^Names are case sensitive.
+** ^Names are zero-terminated UTF-8 strings.
+** ^If there is no match, a NULL pointer is returned.
+** ^If zVfsName is NULL then the default VFS is returned.
+**
+** ^New VFSes are registered with sqlite3_vfs_register().
+** ^Each new VFS becomes the default VFS if the makeDflt flag is set.
+** ^The same VFS can be registered multiple times without injury.
+** ^To make an existing VFS into the default VFS, register it again
+** with the makeDflt flag set. If two different VFSes with the
+** same name are registered, the behavior is undefined. If a
+** VFS is registered with a name that is NULL or an empty string,
+** then the behavior is undefined.
+**
+** ^Unregister a VFS with the sqlite3_vfs_unregister() interface.
+** ^(If the default VFS is unregistered, another VFS is chosen as
+** the default. The choice for the new VFS is arbitrary.)^
+*/
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
+
+/*
+** CAPI3REF: Mutexes
+**
+** The SQLite core uses these routines for thread
+** synchronization. Though they are intended for internal
+** use by SQLite, code that links against SQLite is
+** permitted to use any of these routines.
+**
+** The SQLite source code contains multiple implementations
+** of these mutex routines. An appropriate implementation
+** is selected automatically at compile-time. The following
+** implementations are available in the SQLite core:
+**
+** <ul>
+** <li> SQLITE_MUTEX_PTHREADS
+** <li> SQLITE_MUTEX_W32
+** <li> SQLITE_MUTEX_NOOP
+** </ul>
+**
+** The SQLITE_MUTEX_NOOP implementation is a set of routines
+** that does no real locking and is appropriate for use in
+** a single-threaded application. The SQLITE_MUTEX_PTHREADS and
+** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix
+** and Windows.
+**
+** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
+** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex
+** implementation is included with the library. In this case the
+** application must supply a custom mutex implementation using the
+** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function
+** before calling sqlite3_initialize() or any other public sqlite3_
+** function that calls sqlite3_initialize().
+**
+** ^The sqlite3_mutex_alloc() routine allocates a new
+** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc()
+** routine returns NULL if it is unable to allocate the requested
+** mutex. The argument to sqlite3_mutex_alloc() must one of these
+** integer constants:
+**
+** <ul>
+** <li> SQLITE_MUTEX_FAST
+** <li> SQLITE_MUTEX_RECURSIVE
+** <li> SQLITE_MUTEX_STATIC_MAIN
+** <li> SQLITE_MUTEX_STATIC_MEM
+** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_PRNG
+** <li> SQLITE_MUTEX_STATIC_LRU
+** <li> SQLITE_MUTEX_STATIC_PMEM
+** <li> SQLITE_MUTEX_STATIC_APP1
+** <li> SQLITE_MUTEX_STATIC_APP2
+** <li> SQLITE_MUTEX_STATIC_APP3
+** <li> SQLITE_MUTEX_STATIC_VFS1
+** <li> SQLITE_MUTEX_STATIC_VFS2
+** <li> SQLITE_MUTEX_STATIC_VFS3
+** </ul>
+**
+** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
+** cause sqlite3_mutex_alloc() to create
+** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
+** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
+** The mutex implementation does not need to make a distinction
+** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
+** not want to. SQLite will only request a recursive mutex in
+** cases where it really needs one. If a faster non-recursive mutex
+** implementation is available on the host platform, the mutex subsystem
+** might return such a mutex in response to SQLITE_MUTEX_FAST.
+**
+** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other
+** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return
+** a pointer to a static preexisting mutex. ^Nine static mutexes are
+** used by the current version of SQLite. Future versions of SQLite
+** may add additional static mutexes. Static mutexes are for internal
+** use by SQLite only. Applications that use SQLite mutexes should
+** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
+** SQLITE_MUTEX_RECURSIVE.
+**
+** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
+** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
+** returns a different mutex on every call. ^For the static
+** mutex types, the same mutex is returned on every call that has
+** the same type number.
+**
+** ^The sqlite3_mutex_free() routine deallocates a previously
+** allocated dynamic mutex. Attempting to deallocate a static
+** mutex results in undefined behavior.
+**
+** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
+** to enter a mutex. ^If another thread is already within the mutex,
+** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
+** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK]
+** upon successful entry. ^(Mutexes created using
+** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread.
+** In such cases, the
+** mutex must be exited an equal number of times before another thread
+** can enter.)^ If the same thread tries to enter any mutex other
+** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
+**
+** ^(Some systems (for example, Windows 95) do not support the operation
+** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
+** will always return SQLITE_BUSY. The SQLite core only ever uses
+** sqlite3_mutex_try() as an optimization so this is acceptable
+** behavior.)^
+**
+** ^The sqlite3_mutex_leave() routine exits a mutex that was
+** previously entered by the same thread. The behavior
+** is undefined if the mutex is not currently entered by the
+** calling thread or is not currently allocated.
+**
+** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or
+** sqlite3_mutex_leave() is a NULL pointer, then all three routines
+** behave as no-ops.
+**
+** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
+*/
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
+
+/*
+** CAPI3REF: Mutex Methods Object
+**
+** An instance of this structure defines the low-level routines
+** used to allocate and use mutexes.
+**
+** Usually, the default mutex implementations provided by SQLite are
+** sufficient, however the application has the option of substituting a custom
+** implementation for specialized deployments or systems for which SQLite
+** does not provide a suitable implementation. In this case, the application
+** creates and populates an instance of this structure to pass
+** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
+** Additionally, an instance of this structure can be used as an
+** output variable when querying the system for the current mutex
+** implementation, using the [SQLITE_CONFIG_GETMUTEX] option.
+**
+** ^The xMutexInit method defined by this structure is invoked as
+** part of system initialization by the sqlite3_initialize() function.
+** ^The xMutexInit routine is called by SQLite exactly once for each
+** effective call to [sqlite3_initialize()].
+**
+** ^The xMutexEnd method defined by this structure is invoked as
+** part of system shutdown by the sqlite3_shutdown() function. The
+** implementation of this method is expected to release all outstanding
+** resources obtained by the mutex methods implementation, especially
+** those obtained by the xMutexInit method. ^The xMutexEnd()
+** interface is invoked exactly once for each call to [sqlite3_shutdown()].
+**
+** ^(The remaining seven methods defined by this structure (xMutexAlloc,
+** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and
+** xMutexNotheld) implement the following interfaces (respectively):
+**
+** <ul>
+** <li> [sqlite3_mutex_alloc()] </li>
+** <li> [sqlite3_mutex_free()] </li>
+** <li> [sqlite3_mutex_enter()] </li>
+** <li> [sqlite3_mutex_try()] </li>
+** <li> [sqlite3_mutex_leave()] </li>
+** <li> [sqlite3_mutex_held()] </li>
+** <li> [sqlite3_mutex_notheld()] </li>
+** </ul>)^
+**
+** The only difference is that the public sqlite3_XXX functions enumerated
+** above silently ignore any invocations that pass a NULL pointer instead
+** of a valid mutex handle. The implementations of the methods defined
+** by this structure are not required to handle this case. The results
+** of passing a NULL pointer instead of a valid mutex handle are undefined
+** (i.e. it is acceptable to provide an implementation that segfaults if
+** it is passed a NULL pointer).
+**
+** The xMutexInit() method must be threadsafe. It must be harmless to
+** invoke xMutexInit() multiple times within the same process and without
+** intervening calls to xMutexEnd(). Second and subsequent calls to
+** xMutexInit() must be no-ops.
+**
+** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
+** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
+** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
+** memory allocation for a fast or recursive mutex.
+**
+** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is
+** called, but only if the prior call to xMutexInit returned SQLITE_OK.
+** If xMutexInit fails in any way, it is expected to clean up after itself
+** prior to returning.
+*/
+typedef struct sqlite3_mutex_methods sqlite3_mutex_methods;
+struct sqlite3_mutex_methods {
+ int (*xMutexInit)(void);
+ int (*xMutexEnd)(void);
+ sqlite3_mutex *(*xMutexAlloc)(int);
+ void (*xMutexFree)(sqlite3_mutex *);
+ void (*xMutexEnter)(sqlite3_mutex *);
+ int (*xMutexTry)(sqlite3_mutex *);
+ void (*xMutexLeave)(sqlite3_mutex *);
+ int (*xMutexHeld)(sqlite3_mutex *);
+ int (*xMutexNotheld)(sqlite3_mutex *);
+};
+
+/*
+** CAPI3REF: Mutex Verification Routines
+**
+** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines
+** are intended for use inside assert() statements. The SQLite core
+** never uses these routines except inside an assert() and applications
+** are advised to follow the lead of the core. The SQLite core only
+** provides implementations for these routines when it is compiled
+** with the SQLITE_DEBUG flag. External mutex implementations
+** are only required to provide these routines if SQLITE_DEBUG is
+** defined and if NDEBUG is not defined.
+**
+** These routines should return true if the mutex in their argument
+** is held or not held, respectively, by the calling thread.
+**
+** The implementation is not required to provide versions of these
+** routines that actually work. If the implementation does not provide working
+** versions of these routines, it should at least provide stubs that always
+** return true so that one does not get spurious assertion failures.
+**
+** If the argument to sqlite3_mutex_held() is a NULL pointer then
+** the routine should return 1. This seems counter-intuitive since
+** clearly the mutex cannot be held if it does not exist. But
+** the reason the mutex does not exist is because the build is not
+** using mutexes. And we do not want the assert() containing the
+** call to sqlite3_mutex_held() to fail, so a non-zero return is
+** the appropriate thing to do. The sqlite3_mutex_notheld()
+** interface should also return 1 when given a NULL pointer.
+*/
+#ifndef NDEBUG
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
+#endif
+
+/*
+** CAPI3REF: Mutex Types
+**
+** The [sqlite3_mutex_alloc()] interface takes a single argument
+** which is one of these integer constants.
+**
+** The set of static mutexes may change from one SQLite release to the
+** next. Applications that override the built-in mutex logic must be
+** prepared to accommodate additional static mutexes.
+*/
+#define SQLITE_MUTEX_FAST 0
+#define SQLITE_MUTEX_RECURSIVE 1
+#define SQLITE_MUTEX_STATIC_MAIN 2
+#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */
+#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */
+#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */
+#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */
+#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
+#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
+#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
+#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */
+#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */
+#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */
+#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */
+#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */
+#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */
+
+/* Legacy compatibility: */
+#define SQLITE_MUTEX_STATIC_MASTER 2
+
+
+/*
+** CAPI3REF: Retrieve the mutex for a database connection
+** METHOD: sqlite3
+**
+** ^This interface returns a pointer the [sqlite3_mutex] object that
+** serializes access to the [database connection] given in the argument
+** when the [threading mode] is Serialized.
+** ^If the [threading mode] is Single-thread or Multi-thread then this
+** routine returns a NULL pointer.
+*/
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
+
+/*
+** CAPI3REF: Low-Level Control Of Database Files
+** METHOD: sqlite3
+** KEYWORDS: {file control}
+**
+** ^The [sqlite3_file_control()] interface makes a direct call to the
+** xFileControl method for the [sqlite3_io_methods] object associated
+** with a particular database identified by the second argument. ^The
+** name of the database is "main" for the main database or "temp" for the
+** TEMP database, or the name that appears after the AS keyword for
+** databases that are added using the [ATTACH] SQL command.
+** ^A NULL pointer can be used in place of "main" to refer to the
+** main database file.
+** ^The third and fourth parameters to this routine
+** are passed directly through to the second and third parameters of
+** the xFileControl method. ^The return value of the xFileControl
+** method becomes the return value of this routine.
+**
+** A few opcodes for [sqlite3_file_control()] are handled directly
+** by the SQLite core and never invoke the
+** sqlite3_io_methods.xFileControl method.
+** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes
+** a pointer to the underlying [sqlite3_file] object to be written into
+** the space pointed to by the 4th parameter. The
+** [SQLITE_FCNTL_JOURNAL_POINTER] works similarly except that it returns
+** the [sqlite3_file] object associated with the journal file instead of
+** the main database. The [SQLITE_FCNTL_VFS_POINTER] opcode returns
+** a pointer to the underlying [sqlite3_vfs] object for the file.
+** The [SQLITE_FCNTL_DATA_VERSION] returns the data version counter
+** from the pager.
+**
+** ^If the second parameter (zDbName) does not match the name of any
+** open database file, then SQLITE_ERROR is returned. ^This error
+** code is not remembered and will not be recalled by [sqlite3_errcode()]
+** or [sqlite3_errmsg()]. The underlying xFileControl method might
+** also return SQLITE_ERROR. There is no way to distinguish between
+** an incorrect zDbName and an SQLITE_ERROR return from the underlying
+** xFileControl method.
+**
+** See also: [file control opcodes]
+*/
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+
+/*
+** CAPI3REF: Testing Interface
+**
+** ^The sqlite3_test_control() interface is used to read out internal
+** state of SQLite and to inject faults into SQLite for testing
+** purposes. ^The first parameter is an operation code that determines
+** the number, meaning, and operation of all subsequent parameters.
+**
+** This interface is not for use by applications. It exists solely
+** for verifying the correct operation of the SQLite library. Depending
+** on how the SQLite library is compiled, this interface might not exist.
+**
+** The details of the operation codes, their meanings, the parameters
+** they take, and what they do are all subject to change without notice.
+** Unlike most of the SQLite API, this function is not guaranteed to
+** operate consistently from one release to the next.
+*/
+SQLITE_API int sqlite3_test_control(int op, ...);
+
+/*
+** CAPI3REF: Testing Interface Operation Codes
+**
+** These constants are the valid operation code parameters used
+** as the first argument to [sqlite3_test_control()].
+**
+** These parameters and their meanings are subject to change
+** without notice. These values are for testing purposes only.
+** Applications should not use any of these parameters or the
+** [sqlite3_test_control()] interface.
+*/
+#define SQLITE_TESTCTRL_FIRST 5
+#define SQLITE_TESTCTRL_PRNG_SAVE 5
+#define SQLITE_TESTCTRL_PRNG_RESTORE 6
+#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_BITVEC_TEST 8
+#define SQLITE_TESTCTRL_FAULT_INSTALL 9
+#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
+#define SQLITE_TESTCTRL_PENDING_BYTE 11
+#define SQLITE_TESTCTRL_ASSERT 12
+#define SQLITE_TESTCTRL_ALWAYS 13
+#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
+#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
+#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
+#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17
+#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
+#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
+#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
+#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
+#define SQLITE_TESTCTRL_BYTEORDER 22
+#define SQLITE_TESTCTRL_ISINIT 23
+#define SQLITE_TESTCTRL_SORTER_MMAP 24
+#define SQLITE_TESTCTRL_IMPOSTER 25
+#define SQLITE_TESTCTRL_PARSER_COVERAGE 26
+#define SQLITE_TESTCTRL_RESULT_INTREAL 27
+#define SQLITE_TESTCTRL_PRNG_SEED 28
+#define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29
+#define SQLITE_TESTCTRL_LAST 29 /* Largest TESTCTRL */
+
+/*
+** CAPI3REF: SQL Keyword Checking
+**
+** These routines provide access to the set of SQL language keywords
+** recognized by SQLite. Applications can uses these routines to determine
+** whether or not a specific identifier needs to be escaped (for example,
+** by enclosing in double-quotes) so as not to confuse the parser.
+**
+** The sqlite3_keyword_count() interface returns the number of distinct
+** keywords understood by SQLite.
+**
+** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and
+** makes *Z point to that keyword expressed as UTF8 and writes the number
+** of bytes in the keyword into *L. The string that *Z points to is not
+** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns
+** SQLITE_OK if N is within bounds and SQLITE_ERROR if not. If either Z
+** or L are NULL or invalid pointers then calls to
+** sqlite3_keyword_name(N,Z,L) result in undefined behavior.
+**
+** The sqlite3_keyword_check(Z,L) interface checks to see whether or not
+** the L-byte UTF8 identifier that Z points to is a keyword, returning non-zero
+** if it is and zero if not.
+**
+** The parser used by SQLite is forgiving. It is often possible to use
+** a keyword as an identifier as long as such use does not result in a
+** parsing ambiguity. For example, the statement
+** "CREATE TABLE BEGIN(REPLACE,PRAGMA,END);" is accepted by SQLite, and
+** creates a new table named "BEGIN" with three columns named
+** "REPLACE", "PRAGMA", and "END". Nevertheless, best practice is to avoid
+** using keywords as identifiers. Common techniques used to avoid keyword
+** name collisions include:
+** <ul>
+** <li> Put all identifier names inside double-quotes. This is the official
+** SQL way to escape identifier names.
+** <li> Put identifier names inside &#91;...&#93;. This is not standard SQL,
+** but it is what SQL Server does and so lots of programmers use this
+** technique.
+** <li> Begin every identifier with the letter "Z" as no SQL keywords start
+** with "Z".
+** <li> Include a digit somewhere in every identifier name.
+** </ul>
+**
+** Note that the number of keywords understood by SQLite can depend on
+** compile-time options. For example, "VACUUM" is not a keyword if
+** SQLite is compiled with the [-DSQLITE_OMIT_VACUUM] option. Also,
+** new keywords may be added to future releases of SQLite.
+*/
+SQLITE_API int sqlite3_keyword_count(void);
+SQLITE_API int sqlite3_keyword_name(int,const char**,int*);
+SQLITE_API int sqlite3_keyword_check(const char*,int);
+
+/*
+** CAPI3REF: Dynamic String Object
+** KEYWORDS: {dynamic string}
+**
+** An instance of the sqlite3_str object contains a dynamically-sized
+** string under construction.
+**
+** The lifecycle of an sqlite3_str object is as follows:
+** <ol>
+** <li> ^The sqlite3_str object is created using [sqlite3_str_new()].
+** <li> ^Text is appended to the sqlite3_str object using various
+** methods, such as [sqlite3_str_appendf()].
+** <li> ^The sqlite3_str object is destroyed and the string it created
+** is returned using the [sqlite3_str_finish()] interface.
+** </ol>
+*/
+typedef struct sqlite3_str sqlite3_str;
+
+/*
+** CAPI3REF: Create A New Dynamic String Object
+** CONSTRUCTOR: sqlite3_str
+**
+** ^The [sqlite3_str_new(D)] interface allocates and initializes
+** a new [sqlite3_str] object. To avoid memory leaks, the object returned by
+** [sqlite3_str_new()] must be freed by a subsequent call to
+** [sqlite3_str_finish(X)].
+**
+** ^The [sqlite3_str_new(D)] interface always returns a pointer to a
+** valid [sqlite3_str] object, though in the event of an out-of-memory
+** error the returned object might be a special singleton that will
+** silently reject new text, always return SQLITE_NOMEM from
+** [sqlite3_str_errcode()], always return 0 for
+** [sqlite3_str_length()], and always return NULL from
+** [sqlite3_str_finish(X)]. It is always safe to use the value
+** returned by [sqlite3_str_new(D)] as the sqlite3_str parameter
+** to any of the other [sqlite3_str] methods.
+**
+** The D parameter to [sqlite3_str_new(D)] may be NULL. If the
+** D parameter in [sqlite3_str_new(D)] is not NULL, then the maximum
+** length of the string contained in the [sqlite3_str] object will be
+** the value set for [sqlite3_limit](D,[SQLITE_LIMIT_LENGTH]) instead
+** of [SQLITE_MAX_LENGTH].
+*/
+SQLITE_API sqlite3_str *sqlite3_str_new(sqlite3*);
+
+/*
+** CAPI3REF: Finalize A Dynamic String
+** DESTRUCTOR: sqlite3_str
+**
+** ^The [sqlite3_str_finish(X)] interface destroys the sqlite3_str object X
+** and returns a pointer to a memory buffer obtained from [sqlite3_malloc64()]
+** that contains the constructed string. The calling application should
+** pass the returned value to [sqlite3_free()] to avoid a memory leak.
+** ^The [sqlite3_str_finish(X)] interface may return a NULL pointer if any
+** errors were encountered during construction of the string. ^The
+** [sqlite3_str_finish(X)] interface will also return a NULL pointer if the
+** string in [sqlite3_str] object X is zero bytes long.
+*/
+SQLITE_API char *sqlite3_str_finish(sqlite3_str*);
+
+/*
+** CAPI3REF: Add Content To A Dynamic String
+** METHOD: sqlite3_str
+**
+** These interfaces add content to an sqlite3_str object previously obtained
+** from [sqlite3_str_new()].
+**
+** ^The [sqlite3_str_appendf(X,F,...)] and
+** [sqlite3_str_vappendf(X,F,V)] interfaces uses the [built-in printf]
+** functionality of SQLite to append formatted text onto the end of
+** [sqlite3_str] object X.
+**
+** ^The [sqlite3_str_append(X,S,N)] method appends exactly N bytes from string S
+** onto the end of the [sqlite3_str] object X. N must be non-negative.
+** S must contain at least N non-zero bytes of content. To append a
+** zero-terminated string in its entirety, use the [sqlite3_str_appendall()]
+** method instead.
+**
+** ^The [sqlite3_str_appendall(X,S)] method appends the complete content of
+** zero-terminated string S onto the end of [sqlite3_str] object X.
+**
+** ^The [sqlite3_str_appendchar(X,N,C)] method appends N copies of the
+** single-byte character C onto the end of [sqlite3_str] object X.
+** ^This method can be used, for example, to add whitespace indentation.
+**
+** ^The [sqlite3_str_reset(X)] method resets the string under construction
+** inside [sqlite3_str] object X back to zero bytes in length.
+**
+** These methods do not return a result code. ^If an error occurs, that fact
+** is recorded in the [sqlite3_str] object and can be recovered by a
+** subsequent call to [sqlite3_str_errcode(X)].
+*/
+SQLITE_API void sqlite3_str_appendf(sqlite3_str*, const char *zFormat, ...);
+SQLITE_API void sqlite3_str_vappendf(sqlite3_str*, const char *zFormat, va_list);
+SQLITE_API void sqlite3_str_append(sqlite3_str*, const char *zIn, int N);
+SQLITE_API void sqlite3_str_appendall(sqlite3_str*, const char *zIn);
+SQLITE_API void sqlite3_str_appendchar(sqlite3_str*, int N, char C);
+SQLITE_API void sqlite3_str_reset(sqlite3_str*);
+
+/*
+** CAPI3REF: Status Of A Dynamic String
+** METHOD: sqlite3_str
+**
+** These interfaces return the current status of an [sqlite3_str] object.
+**
+** ^If any prior errors have occurred while constructing the dynamic string
+** in sqlite3_str X, then the [sqlite3_str_errcode(X)] method will return
+** an appropriate error code. ^The [sqlite3_str_errcode(X)] method returns
+** [SQLITE_NOMEM] following any out-of-memory error, or
+** [SQLITE_TOOBIG] if the size of the dynamic string exceeds
+** [SQLITE_MAX_LENGTH], or [SQLITE_OK] if there have been no errors.
+**
+** ^The [sqlite3_str_length(X)] method returns the current length, in bytes,
+** of the dynamic string under construction in [sqlite3_str] object X.
+** ^The length returned by [sqlite3_str_length(X)] does not include the
+** zero-termination byte.
+**
+** ^The [sqlite3_str_value(X)] method returns a pointer to the current
+** content of the dynamic string under construction in X. The value
+** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X
+** and might be freed or altered by any subsequent method on the same
+** [sqlite3_str] object. Applications must not used the pointer returned
+** [sqlite3_str_value(X)] after any subsequent method call on the same
+** object. ^Applications may change the content of the string returned
+** by [sqlite3_str_value(X)] as long as they do not write into any bytes
+** outside the range of 0 to [sqlite3_str_length(X)] and do not read or
+** write any byte after any subsequent sqlite3_str method call.
+*/
+SQLITE_API int sqlite3_str_errcode(sqlite3_str*);
+SQLITE_API int sqlite3_str_length(sqlite3_str*);
+SQLITE_API char *sqlite3_str_value(sqlite3_str*);
+
+/*
+** CAPI3REF: SQLite Runtime Status
+**
+** ^These interfaces are used to retrieve runtime status information
+** about the performance of SQLite, and optionally to reset various
+** highwater marks. ^The first argument is an integer code for
+** the specific parameter to measure. ^(Recognized integer codes
+** are of the form [status parameters | SQLITE_STATUS_...].)^
+** ^The current value of the parameter is returned into *pCurrent.
+** ^The highest recorded value is returned in *pHighwater. ^If the
+** resetFlag is true, then the highest record value is reset after
+** *pHighwater is written. ^(Some parameters do not record the highest
+** value. For those parameters
+** nothing is written into *pHighwater and the resetFlag is ignored.)^
+** ^(Other parameters record only the highwater mark and not the current
+** value. For these latter parameters nothing is written into *pCurrent.)^
+**
+** ^The sqlite3_status() and sqlite3_status64() routines return
+** SQLITE_OK on success and a non-zero [error code] on failure.
+**
+** If either the current value or the highwater mark is too large to
+** be represented by a 32-bit integer, then the values returned by
+** sqlite3_status() are undefined.
+**
+** See also: [sqlite3_db_status()]
+*/
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
+SQLITE_API int sqlite3_status64(
+ int op,
+ sqlite3_int64 *pCurrent,
+ sqlite3_int64 *pHighwater,
+ int resetFlag
+);
+
+
+/*
+** CAPI3REF: Status Parameters
+** KEYWORDS: {status parameters}
+**
+** These integer constants designate various run-time status parameters
+** that can be returned by [sqlite3_status()].
+**
+** <dl>
+** [[SQLITE_STATUS_MEMORY_USED]] ^(<dt>SQLITE_STATUS_MEMORY_USED</dt>
+** <dd>This parameter is the current amount of memory checked out
+** using [sqlite3_malloc()], either directly or indirectly. The
+** figure includes calls made to [sqlite3_malloc()] by the application
+** and internal memory usage by the SQLite library. Auxiliary page-cache
+** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in
+** this parameter. The amount returned is the sum of the allocation
+** sizes as reported by the xSize method in [sqlite3_mem_methods].</dd>)^
+**
+** [[SQLITE_STATUS_MALLOC_SIZE]] ^(<dt>SQLITE_STATUS_MALLOC_SIZE</dt>
+** <dd>This parameter records the largest memory allocation request
+** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their
+** internal equivalents). Only the value returned in the
+** *pHighwater parameter to [sqlite3_status()] is of interest.
+** The value written into the *pCurrent parameter is undefined.</dd>)^
+**
+** [[SQLITE_STATUS_MALLOC_COUNT]] ^(<dt>SQLITE_STATUS_MALLOC_COUNT</dt>
+** <dd>This parameter records the number of separate memory allocations
+** currently checked out.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_USED]] ^(<dt>SQLITE_STATUS_PAGECACHE_USED</dt>
+** <dd>This parameter returns the number of pages used out of the
+** [pagecache memory allocator] that was configured using
+** [SQLITE_CONFIG_PAGECACHE]. The
+** value returned is in pages, not in bytes.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]]
+** ^(<dt>SQLITE_STATUS_PAGECACHE_OVERFLOW</dt>
+** <dd>This parameter returns the number of bytes of page cache
+** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE]
+** buffer and where forced to overflow to [sqlite3_malloc()]. The
+** returned value includes allocations that overflowed because they
+** where too large (they were larger than the "sz" parameter to
+** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because
+** no space was left in the page cache.</dd>)^
+**
+** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(<dt>SQLITE_STATUS_PAGECACHE_SIZE</dt>
+** <dd>This parameter records the largest memory allocation request
+** handed to the [pagecache memory allocator]. Only the value returned in the
+** *pHighwater parameter to [sqlite3_status()] is of interest.
+** The value written into the *pCurrent parameter is undefined.</dd>)^
+**
+** [[SQLITE_STATUS_SCRATCH_USED]] <dt>SQLITE_STATUS_SCRATCH_USED</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(<dt>SQLITE_STATUS_SCRATCH_OVERFLOW</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_SCRATCH_SIZE]] <dt>SQLITE_STATUS_SCRATCH_SIZE</dt>
+** <dd>No longer used.</dd>
+**
+** [[SQLITE_STATUS_PARSER_STACK]] ^(<dt>SQLITE_STATUS_PARSER_STACK</dt>
+** <dd>The *pHighwater parameter records the deepest parser stack.
+** The *pCurrent value is undefined. The *pHighwater value is only
+** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].</dd>)^
+** </dl>
+**
+** New status parameters may be added from time to time.
+*/
+#define SQLITE_STATUS_MEMORY_USED 0
+#define SQLITE_STATUS_PAGECACHE_USED 1
+#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2
+#define SQLITE_STATUS_SCRATCH_USED 3 /* NOT USED */
+#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 /* NOT USED */
+#define SQLITE_STATUS_MALLOC_SIZE 5
+#define SQLITE_STATUS_PARSER_STACK 6
+#define SQLITE_STATUS_PAGECACHE_SIZE 7
+#define SQLITE_STATUS_SCRATCH_SIZE 8 /* NOT USED */
+#define SQLITE_STATUS_MALLOC_COUNT 9
+
+/*
+** CAPI3REF: Database Connection Status
+** METHOD: sqlite3
+**
+** ^This interface is used to retrieve runtime status information
+** about a single [database connection]. ^The first argument is the
+** database connection object to be interrogated. ^The second argument
+** is an integer constant, taken from the set of
+** [SQLITE_DBSTATUS options], that
+** determines the parameter to interrogate. The set of
+** [SQLITE_DBSTATUS options] is likely
+** to grow in future releases of SQLite.
+**
+** ^The current value of the requested parameter is written into *pCur
+** and the highest instantaneous value is written into *pHiwtr. ^If
+** the resetFlg is true, then the highest instantaneous value is
+** reset back down to the current value.
+**
+** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a
+** non-zero [error code] on failure.
+**
+** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
+*/
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+
+/*
+** CAPI3REF: Status Parameters for database connections
+** KEYWORDS: {SQLITE_DBSTATUS options}
+**
+** These constants are the available integer "verbs" that can be passed as
+** the second argument to the [sqlite3_db_status()] interface.
+**
+** New verbs may be added in future releases of SQLite. Existing verbs
+** might be discontinued. Applications should check the return code from
+** [sqlite3_db_status()] to make sure that the call worked.
+** The [sqlite3_db_status()] interface will return a non-zero error code
+** if a discontinued or unsupported verb is invoked.
+**
+** <dl>
+** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_USED</dt>
+** <dd>This parameter returns the number of lookaside memory slots currently
+** checked out.</dd>)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_HIT</dt>
+** <dd>This parameter returns the number of malloc attempts that were
+** satisfied using lookaside memory. Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]]
+** ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE</dt>
+** <dd>This parameter returns the number malloc attempts that might have
+** been satisfied using lookaside memory but failed due to the amount of
+** memory requested being larger than the lookaside slot size.
+** Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]]
+** ^(<dt>SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL</dt>
+** <dd>This parameter returns the number malloc attempts that might have
+** been satisfied using lookaside memory but failed due to all lookaside
+** memory already being in use.
+** Only the high-water value is meaningful;
+** the current value is always zero.)^
+**
+** [[SQLITE_DBSTATUS_CACHE_USED]] ^(<dt>SQLITE_DBSTATUS_CACHE_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** memory used by all pager caches associated with the database connection.)^
+** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
+**
+** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]]
+** ^(<dt>SQLITE_DBSTATUS_CACHE_USED_SHARED</dt>
+** <dd>This parameter is similar to DBSTATUS_CACHE_USED, except that if a
+** pager cache is shared between two or more connections the bytes of heap
+** memory used by that pager cache is divided evenly between the attached
+** connections.)^ In other words, if none of the pager caches associated
+** with the database connection are shared, this request returns the same
+** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are
+** shared, the value returned by this call will be smaller than that returned
+** by DBSTATUS_CACHE_USED. ^The highwater mark associated with
+** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0.
+**
+** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(<dt>SQLITE_DBSTATUS_SCHEMA_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** memory used to store the schema for all databases associated
+** with the connection - main, temp, and any [ATTACH]-ed databases.)^
+** ^The full amount of memory used by the schemas is reported, even if the
+** schema memory is shared with other database connections due to
+** [shared cache mode] being enabled.
+** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0.
+**
+** [[SQLITE_DBSTATUS_STMT_USED]] ^(<dt>SQLITE_DBSTATUS_STMT_USED</dt>
+** <dd>This parameter returns the approximate number of bytes of heap
+** and lookaside memory used by all prepared statements associated with
+** the database connection.)^
+** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(<dt>SQLITE_DBSTATUS_CACHE_HIT</dt>
+** <dd>This parameter returns the number of pager cache hits that have
+** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT
+** is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(<dt>SQLITE_DBSTATUS_CACHE_MISS</dt>
+** <dd>This parameter returns the number of pager cache misses that have
+** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS
+** is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(<dt>SQLITE_DBSTATUS_CACHE_WRITE</dt>
+** <dd>This parameter returns the number of dirty cache entries that have
+** been written to disk. Specifically, the number of pages written to the
+** wal file in wal mode databases, or the number of pages written to the
+** database file in rollback mode databases. Any pages written as part of
+** transaction rollback or database recovery operations are not included.
+** If an IO or other error occurs while writing a page to disk, the effect
+** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The
+** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(<dt>SQLITE_DBSTATUS_CACHE_SPILL</dt>
+** <dd>This parameter returns the number of dirty cache entries that have
+** been written to disk in the middle of a transaction due to the page
+** cache overflowing. Transactions are more efficient if they are written
+** to disk all at once. When pages spill mid-transaction, that introduces
+** additional overhead. This parameter can be used help identify
+** inefficiencies that can be resolved by increasing the cache size.
+** </dd>
+**
+** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(<dt>SQLITE_DBSTATUS_DEFERRED_FKS</dt>
+** <dd>This parameter returns zero for the current value if and only if
+** all foreign key constraints (deferred or immediate) have been
+** resolved.)^ ^The highwater mark is always 0.
+** </dd>
+** </dl>
+*/
+#define SQLITE_DBSTATUS_LOOKASIDE_USED 0
+#define SQLITE_DBSTATUS_CACHE_USED 1
+#define SQLITE_DBSTATUS_SCHEMA_USED 2
+#define SQLITE_DBSTATUS_STMT_USED 3
+#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4
+#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5
+#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6
+#define SQLITE_DBSTATUS_CACHE_HIT 7
+#define SQLITE_DBSTATUS_CACHE_MISS 8
+#define SQLITE_DBSTATUS_CACHE_WRITE 9
+#define SQLITE_DBSTATUS_DEFERRED_FKS 10
+#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11
+#define SQLITE_DBSTATUS_CACHE_SPILL 12
+#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */
+
+
+/*
+** CAPI3REF: Prepared Statement Status
+** METHOD: sqlite3_stmt
+**
+** ^(Each prepared statement maintains various
+** [SQLITE_STMTSTATUS counters] that measure the number
+** of times it has performed specific operations.)^ These counters can
+** be used to monitor the performance characteristics of the prepared
+** statements. For example, if the number of table steps greatly exceeds
+** the number of table searches or result rows, that would tend to indicate
+** that the prepared statement is using a full table scan rather than
+** an index.
+**
+** ^(This interface is used to retrieve and reset counter values from
+** a [prepared statement]. The first argument is the prepared statement
+** object to be interrogated. The second argument
+** is an integer code for a specific [SQLITE_STMTSTATUS counter]
+** to be interrogated.)^
+** ^The current value of the requested counter is returned.
+** ^If the resetFlg is true, then the counter is reset to zero after this
+** interface call returns.
+**
+** See also: [sqlite3_status()] and [sqlite3_db_status()].
+*/
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+
+/*
+** CAPI3REF: Status Parameters for prepared statements
+** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters}
+**
+** These preprocessor macros define integer codes that name counter
+** values associated with the [sqlite3_stmt_status()] interface.
+** The meanings of the various counters are as follows:
+**
+** <dl>
+** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]] <dt>SQLITE_STMTSTATUS_FULLSCAN_STEP</dt>
+** <dd>^This is the number of times that SQLite has stepped forward in
+** a table as part of a full table scan. Large numbers for this counter
+** may indicate opportunities for performance improvement through
+** careful use of indices.</dd>
+**
+** [[SQLITE_STMTSTATUS_SORT]] <dt>SQLITE_STMTSTATUS_SORT</dt>
+** <dd>^This is the number of sort operations that have occurred.
+** A non-zero value in this counter may indicate an opportunity to
+** improvement performance through careful use of indices.</dd>
+**
+** [[SQLITE_STMTSTATUS_AUTOINDEX]] <dt>SQLITE_STMTSTATUS_AUTOINDEX</dt>
+** <dd>^This is the number of rows inserted into transient indices that
+** were created automatically in order to help joins run faster.
+** A non-zero value in this counter may indicate an opportunity to
+** improvement performance by adding permanent indices that do not
+** need to be reinitialized each time the statement is run.</dd>
+**
+** [[SQLITE_STMTSTATUS_VM_STEP]] <dt>SQLITE_STMTSTATUS_VM_STEP</dt>
+** <dd>^This is the number of virtual machine operations executed
+** by the prepared statement if that number is less than or equal
+** to 2147483647. The number of virtual machine operations can be
+** used as a proxy for the total work done by the prepared statement.
+** If the number of virtual machine operations exceeds 2147483647
+** then the value returned by this statement status code is undefined.
+**
+** [[SQLITE_STMTSTATUS_REPREPARE]] <dt>SQLITE_STMTSTATUS_REPREPARE</dt>
+** <dd>^This is the number of times that the prepare statement has been
+** automatically regenerated due to schema changes or changes to
+** [bound parameters] that might affect the query plan.
+**
+** [[SQLITE_STMTSTATUS_RUN]] <dt>SQLITE_STMTSTATUS_RUN</dt>
+** <dd>^This is the number of times that the prepared statement has
+** been run. A single "run" for the purposes of this counter is one
+** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()].
+** The counter is incremented on the first [sqlite3_step()] call of each
+** cycle.
+**
+** [[SQLITE_STMTSTATUS_MEMUSED]] <dt>SQLITE_STMTSTATUS_MEMUSED</dt>
+** <dd>^This is the approximate number of bytes of heap memory
+** used to store the prepared statement. ^This value is not actually
+** a counter, and so the resetFlg parameter to sqlite3_stmt_status()
+** is ignored when the opcode is SQLITE_STMTSTATUS_MEMUSED.
+** </dd>
+** </dl>
+*/
+#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1
+#define SQLITE_STMTSTATUS_SORT 2
+#define SQLITE_STMTSTATUS_AUTOINDEX 3
+#define SQLITE_STMTSTATUS_VM_STEP 4
+#define SQLITE_STMTSTATUS_REPREPARE 5
+#define SQLITE_STMTSTATUS_RUN 6
+#define SQLITE_STMTSTATUS_MEMUSED 99
+
+/*
+** CAPI3REF: Custom Page Cache Object
+**
+** The sqlite3_pcache type is opaque. It is implemented by
+** the pluggable module. The SQLite core has no knowledge of
+** its size or internal structure and never deals with the
+** sqlite3_pcache object except by holding and passing pointers
+** to the object.
+**
+** See [sqlite3_pcache_methods2] for additional information.
+*/
+typedef struct sqlite3_pcache sqlite3_pcache;
+
+/*
+** CAPI3REF: Custom Page Cache Object
+**
+** The sqlite3_pcache_page object represents a single page in the
+** page cache. The page cache will allocate instances of this
+** object. Various methods of the page cache use pointers to instances
+** of this object as parameters or as their return value.
+**
+** See [sqlite3_pcache_methods2] for additional information.
+*/
+typedef struct sqlite3_pcache_page sqlite3_pcache_page;
+struct sqlite3_pcache_page {
+ void *pBuf; /* The content of the page */
+ void *pExtra; /* Extra information associated with the page */
+};
+
+/*
+** CAPI3REF: Application Defined Page Cache.
+** KEYWORDS: {page cache}
+**
+** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can
+** register an alternative page cache implementation by passing in an
+** instance of the sqlite3_pcache_methods2 structure.)^
+** In many applications, most of the heap memory allocated by
+** SQLite is used for the page cache.
+** By implementing a
+** custom page cache using this API, an application can better control
+** the amount of memory consumed by SQLite, the way in which
+** that memory is allocated and released, and the policies used to
+** determine exactly which parts of a database file are cached and for
+** how long.
+**
+** The alternative page cache mechanism is an
+** extreme measure that is only needed by the most demanding applications.
+** The built-in page cache is recommended for most uses.
+**
+** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an
+** internal buffer by SQLite within the call to [sqlite3_config]. Hence
+** the application may discard the parameter after the call to
+** [sqlite3_config()] returns.)^
+**
+** [[the xInit() page cache method]]
+** ^(The xInit() method is called once for each effective
+** call to [sqlite3_initialize()])^
+** (usually only once during the lifetime of the process). ^(The xInit()
+** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^
+** The intent of the xInit() method is to set up global data structures
+** required by the custom page cache implementation.
+** ^(If the xInit() method is NULL, then the
+** built-in default page cache is used instead of the application defined
+** page cache.)^
+**
+** [[the xShutdown() page cache method]]
+** ^The xShutdown() method is called by [sqlite3_shutdown()].
+** It can be used to clean up
+** any outstanding resources before process shutdown, if required.
+** ^The xShutdown() method may be NULL.
+**
+** ^SQLite automatically serializes calls to the xInit method,
+** so the xInit method need not be threadsafe. ^The
+** xShutdown method is only called from [sqlite3_shutdown()] so it does
+** not need to be threadsafe either. All other methods must be threadsafe
+** in multithreaded applications.
+**
+** ^SQLite will never invoke xInit() more than once without an intervening
+** call to xShutdown().
+**
+** [[the xCreate() page cache methods]]
+** ^SQLite invokes the xCreate() method to construct a new cache instance.
+** SQLite will typically create one cache instance for each open database file,
+** though this is not guaranteed. ^The
+** first parameter, szPage, is the size in bytes of the pages that must
+** be allocated by the cache. ^szPage will always a power of two. ^The
+** second parameter szExtra is a number of bytes of extra storage
+** associated with each page cache entry. ^The szExtra parameter will
+** a number less than 250. SQLite will use the
+** extra szExtra bytes on each page to store metadata about the underlying
+** database page on disk. The value passed into szExtra depends
+** on the SQLite version, the target platform, and how SQLite was compiled.
+** ^The third argument to xCreate(), bPurgeable, is true if the cache being
+** created will be used to cache database pages of a file stored on disk, or
+** false if it is used for an in-memory database. The cache implementation
+** does not have to do anything special based with the value of bPurgeable;
+** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will
+** never invoke xUnpin() except to deliberately delete a page.
+** ^In other words, calls to xUnpin() on a cache with bPurgeable set to
+** false will always have the "discard" flag set to true.
+** ^Hence, a cache created with bPurgeable false will
+** never contain any unpinned pages.
+**
+** [[the xCachesize() page cache method]]
+** ^(The xCachesize() method may be called at any time by SQLite to set the
+** suggested maximum cache-size (number of pages stored by) the cache
+** instance passed as the first argument. This is the value configured using
+** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable
+** parameter, the implementation is not required to do anything with this
+** value; it is advisory only.
+**
+** [[the xPagecount() page cache methods]]
+** The xPagecount() method must return the number of pages currently
+** stored in the cache, both pinned and unpinned.
+**
+** [[the xFetch() page cache methods]]
+** The xFetch() method locates a page in the cache and returns a pointer to
+** an sqlite3_pcache_page object associated with that page, or a NULL pointer.
+** The pBuf element of the returned sqlite3_pcache_page object will be a
+** pointer to a buffer of szPage bytes used to store the content of a
+** single database page. The pExtra element of sqlite3_pcache_page will be
+** a pointer to the szExtra bytes of extra storage that SQLite has requested
+** for each entry in the page cache.
+**
+** The page to be fetched is determined by the key. ^The minimum key value
+** is 1. After it has been retrieved using xFetch, the page is considered
+** to be "pinned".
+**
+** If the requested page is already in the page cache, then the page cache
+** implementation must return a pointer to the page buffer with its content
+** intact. If the requested page is not already in the cache, then the
+** cache implementation should use the value of the createFlag
+** parameter to help it determined what action to take:
+**
+** <table border=1 width=85% align=center>
+** <tr><th> createFlag <th> Behavior when page is not already in cache
+** <tr><td> 0 <td> Do not allocate a new page. Return NULL.
+** <tr><td> 1 <td> Allocate a new page if it easy and convenient to do so.
+** Otherwise return NULL.
+** <tr><td> 2 <td> Make every effort to allocate a new page. Only return
+** NULL if allocating a new page is effectively impossible.
+** </table>
+**
+** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite
+** will only use a createFlag of 2 after a prior call with a createFlag of 1
+** failed.)^ In between the xFetch() calls, SQLite may
+** attempt to unpin one or more cache pages by spilling the content of
+** pinned pages to disk and synching the operating system disk cache.
+**
+** [[the xUnpin() page cache method]]
+** ^xUnpin() is called by SQLite with a pointer to a currently pinned page
+** as its second argument. If the third parameter, discard, is non-zero,
+** then the page must be evicted from the cache.
+** ^If the discard parameter is
+** zero, then the page may be discarded or retained at the discretion of
+** page cache implementation. ^The page cache implementation
+** may choose to evict unpinned pages at any time.
+**
+** The cache must not perform any reference counting. A single
+** call to xUnpin() unpins the page regardless of the number of prior calls
+** to xFetch().
+**
+** [[the xRekey() page cache methods]]
+** The xRekey() method is used to change the key value associated with the
+** page passed as the second argument. If the cache
+** previously contains an entry associated with newKey, it must be
+** discarded. ^Any prior cache entry associated with newKey is guaranteed not
+** to be pinned.
+**
+** When SQLite calls the xTruncate() method, the cache must discard all
+** existing cache entries with page numbers (keys) greater than or equal
+** to the value of the iLimit parameter passed to xTruncate(). If any
+** of these pages are pinned, they are implicitly unpinned, meaning that
+** they can be safely discarded.
+**
+** [[the xDestroy() page cache method]]
+** ^The xDestroy() method is used to delete a cache allocated by xCreate().
+** All resources associated with the specified cache should be freed. ^After
+** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*]
+** handle invalid, and will not use it with any other sqlite3_pcache_methods2
+** functions.
+**
+** [[the xShrink() page cache method]]
+** ^SQLite invokes the xShrink() method when it wants the page cache to
+** free up as much of heap memory as possible. The page cache implementation
+** is not obligated to free any memory, but well-behaved implementations should
+** do their best.
+*/
+typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2;
+struct sqlite3_pcache_methods2 {
+ int iVersion;
+ void *pArg;
+ int (*xInit)(void*);
+ void (*xShutdown)(void*);
+ sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable);
+ void (*xCachesize)(sqlite3_pcache*, int nCachesize);
+ int (*xPagecount)(sqlite3_pcache*);
+ sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
+ void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard);
+ void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*,
+ unsigned oldKey, unsigned newKey);
+ void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
+ void (*xDestroy)(sqlite3_pcache*);
+ void (*xShrink)(sqlite3_pcache*);
+};
+
+/*
+** This is the obsolete pcache_methods object that has now been replaced
+** by sqlite3_pcache_methods2. This object is not used by SQLite. It is
+** retained in the header file for backwards compatibility only.
+*/
+typedef struct sqlite3_pcache_methods sqlite3_pcache_methods;
+struct sqlite3_pcache_methods {
+ void *pArg;
+ int (*xInit)(void*);
+ void (*xShutdown)(void*);
+ sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable);
+ void (*xCachesize)(sqlite3_pcache*, int nCachesize);
+ int (*xPagecount)(sqlite3_pcache*);
+ void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
+ void (*xUnpin)(sqlite3_pcache*, void*, int discard);
+ void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey);
+ void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
+ void (*xDestroy)(sqlite3_pcache*);
+};
+
+
+/*
+** CAPI3REF: Online Backup Object
+**
+** The sqlite3_backup object records state information about an ongoing
+** online backup operation. ^The sqlite3_backup object is created by
+** a call to [sqlite3_backup_init()] and is destroyed by a call to
+** [sqlite3_backup_finish()].
+**
+** See Also: [Using the SQLite Online Backup API]
+*/
+typedef struct sqlite3_backup sqlite3_backup;
+
+/*
+** CAPI3REF: Online Backup API.
+**
+** The backup API copies the content of one database into another.
+** It is useful either for creating backups of databases or
+** for copying in-memory databases to or from persistent files.
+**
+** See Also: [Using the SQLite Online Backup API]
+**
+** ^SQLite holds a write transaction open on the destination database file
+** for the duration of the backup operation.
+** ^The source database is read-locked only while it is being read;
+** it is not locked continuously for the entire backup operation.
+** ^Thus, the backup may be performed on a live source database without
+** preventing other database connections from
+** reading or writing to the source database while the backup is underway.
+**
+** ^(To perform a backup operation:
+** <ol>
+** <li><b>sqlite3_backup_init()</b> is called once to initialize the
+** backup,
+** <li><b>sqlite3_backup_step()</b> is called one or more times to transfer
+** the data between the two databases, and finally
+** <li><b>sqlite3_backup_finish()</b> is called to release all resources
+** associated with the backup operation.
+** </ol>)^
+** There should be exactly one call to sqlite3_backup_finish() for each
+** successful call to sqlite3_backup_init().
+**
+** [[sqlite3_backup_init()]] <b>sqlite3_backup_init()</b>
+**
+** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the
+** [database connection] associated with the destination database
+** and the database name, respectively.
+** ^The database name is "main" for the main database, "temp" for the
+** temporary database, or the name specified after the AS keyword in
+** an [ATTACH] statement for an attached database.
+** ^The S and M arguments passed to
+** sqlite3_backup_init(D,N,S,M) identify the [database connection]
+** and database name of the source database, respectively.
+** ^The source and destination [database connections] (parameters S and D)
+** must be different or else sqlite3_backup_init(D,N,S,M) will fail with
+** an error.
+**
+** ^A call to sqlite3_backup_init() will fail, returning NULL, if
+** there is already a read or read-write transaction open on the
+** destination database.
+**
+** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is
+** returned and an error code and error message are stored in the
+** destination [database connection] D.
+** ^The error code and message for the failed call to sqlite3_backup_init()
+** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or
+** [sqlite3_errmsg16()] functions.
+** ^A successful call to sqlite3_backup_init() returns a pointer to an
+** [sqlite3_backup] object.
+** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and
+** sqlite3_backup_finish() functions to perform the specified backup
+** operation.
+**
+** [[sqlite3_backup_step()]] <b>sqlite3_backup_step()</b>
+**
+** ^Function sqlite3_backup_step(B,N) will copy up to N pages between
+** the source and destination databases specified by [sqlite3_backup] object B.
+** ^If N is negative, all remaining source pages are copied.
+** ^If sqlite3_backup_step(B,N) successfully copies N pages and there
+** are still more pages to be copied, then the function returns [SQLITE_OK].
+** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages
+** from source to destination, then it returns [SQLITE_DONE].
+** ^If an error occurs while running sqlite3_backup_step(B,N),
+** then an [error code] is returned. ^As well as [SQLITE_OK] and
+** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY],
+** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an
+** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code.
+**
+** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if
+** <ol>
+** <li> the destination database was opened read-only, or
+** <li> the destination database is using write-ahead-log journaling
+** and the destination and source page sizes differ, or
+** <li> the destination database is an in-memory database and the
+** destination and source page sizes differ.
+** </ol>)^
+**
+** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then
+** the [sqlite3_busy_handler | busy-handler function]
+** is invoked (if one is specified). ^If the
+** busy-handler returns non-zero before the lock is available, then
+** [SQLITE_BUSY] is returned to the caller. ^In this case the call to
+** sqlite3_backup_step() can be retried later. ^If the source
+** [database connection]
+** is being used to write to the source database when sqlite3_backup_step()
+** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this
+** case the call to sqlite3_backup_step() can be retried later on. ^(If
+** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or
+** [SQLITE_READONLY] is returned, then
+** there is no point in retrying the call to sqlite3_backup_step(). These
+** errors are considered fatal.)^ The application must accept
+** that the backup operation has failed and pass the backup operation handle
+** to the sqlite3_backup_finish() to release associated resources.
+**
+** ^The first call to sqlite3_backup_step() obtains an exclusive lock
+** on the destination file. ^The exclusive lock is not released until either
+** sqlite3_backup_finish() is called or the backup operation is complete
+** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to
+** sqlite3_backup_step() obtains a [shared lock] on the source database that
+** lasts for the duration of the sqlite3_backup_step() call.
+** ^Because the source database is not locked between calls to
+** sqlite3_backup_step(), the source database may be modified mid-way
+** through the backup process. ^If the source database is modified by an
+** external process or via a database connection other than the one being
+** used by the backup operation, then the backup will be automatically
+** restarted by the next call to sqlite3_backup_step(). ^If the source
+** database is modified by the using the same database connection as is used
+** by the backup operation, then the backup database is automatically
+** updated at the same time.
+**
+** [[sqlite3_backup_finish()]] <b>sqlite3_backup_finish()</b>
+**
+** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the
+** application wishes to abandon the backup operation, the application
+** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish().
+** ^The sqlite3_backup_finish() interfaces releases all
+** resources associated with the [sqlite3_backup] object.
+** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any
+** active write-transaction on the destination database is rolled back.
+** The [sqlite3_backup] object is invalid
+** and may not be used following a call to sqlite3_backup_finish().
+**
+** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no
+** sqlite3_backup_step() errors occurred, regardless or whether or not
+** sqlite3_backup_step() completed.
+** ^If an out-of-memory condition or IO error occurred during any prior
+** sqlite3_backup_step() call on the same [sqlite3_backup] object, then
+** sqlite3_backup_finish() returns the corresponding [error code].
+**
+** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step()
+** is not a permanent error and does not affect the return value of
+** sqlite3_backup_finish().
+**
+** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]]
+** <b>sqlite3_backup_remaining() and sqlite3_backup_pagecount()</b>
+**
+** ^The sqlite3_backup_remaining() routine returns the number of pages still
+** to be backed up at the conclusion of the most recent sqlite3_backup_step().
+** ^The sqlite3_backup_pagecount() routine returns the total number of pages
+** in the source database at the conclusion of the most recent
+** sqlite3_backup_step().
+** ^(The values returned by these functions are only updated by
+** sqlite3_backup_step(). If the source database is modified in a way that
+** changes the size of the source database or the number of pages remaining,
+** those changes are not reflected in the output of sqlite3_backup_pagecount()
+** and sqlite3_backup_remaining() until after the next
+** sqlite3_backup_step().)^
+**
+** <b>Concurrent Usage of Database Handles</b>
+**
+** ^The source [database connection] may be used by the application for other
+** purposes while a backup operation is underway or being initialized.
+** ^If SQLite is compiled and configured to support threadsafe database
+** connections, then the source database connection may be used concurrently
+** from within other threads.
+**
+** However, the application must guarantee that the destination
+** [database connection] is not passed to any other API (by any thread) after
+** sqlite3_backup_init() is called and before the corresponding call to
+** sqlite3_backup_finish(). SQLite does not currently check to see
+** if the application incorrectly accesses the destination [database connection]
+** and so no error code is reported, but the operations may malfunction
+** nevertheless. Use of the destination database connection while a
+** backup is in progress might also also cause a mutex deadlock.
+**
+** If running in [shared cache mode], the application must
+** guarantee that the shared cache used by the destination database
+** is not accessed while the backup is running. In practice this means
+** that the application must guarantee that the disk file being
+** backed up to is not accessed by any connection within the process,
+** not just the specific connection that was passed to sqlite3_backup_init().
+**
+** The [sqlite3_backup] object itself is partially threadsafe. Multiple
+** threads may safely make multiple concurrent calls to sqlite3_backup_step().
+** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount()
+** APIs are not strictly speaking threadsafe. If they are invoked at the
+** same time as another thread is invoking sqlite3_backup_step() it is
+** possible that they return invalid values.
+*/
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
+ sqlite3 *pDest, /* Destination database handle */
+ const char *zDestName, /* Destination database name */
+ sqlite3 *pSource, /* Source database handle */
+ const char *zSourceName /* Source database name */
+);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
+
+/*
+** CAPI3REF: Unlock Notification
+** METHOD: sqlite3
+**
+** ^When running in shared-cache mode, a database operation may fail with
+** an [SQLITE_LOCKED] error if the required locks on the shared-cache or
+** individual tables within the shared-cache cannot be obtained. See
+** [SQLite Shared-Cache Mode] for a description of shared-cache locking.
+** ^This API may be used to register a callback that SQLite will invoke
+** when the connection currently holding the required lock relinquishes it.
+** ^This API is only available if the library was compiled with the
+** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined.
+**
+** See Also: [Using the SQLite Unlock Notification Feature].
+**
+** ^Shared-cache locks are released when a database connection concludes
+** its current transaction, either by committing it or rolling it back.
+**
+** ^When a connection (known as the blocked connection) fails to obtain a
+** shared-cache lock and SQLITE_LOCKED is returned to the caller, the
+** identity of the database connection (the blocking connection) that
+** has locked the required resource is stored internally. ^After an
+** application receives an SQLITE_LOCKED error, it may call the
+** sqlite3_unlock_notify() method with the blocked connection handle as
+** the first argument to register for a callback that will be invoked
+** when the blocking connections current transaction is concluded. ^The
+** callback is invoked from within the [sqlite3_step] or [sqlite3_close]
+** call that concludes the blocking connection's transaction.
+**
+** ^(If sqlite3_unlock_notify() is called in a multi-threaded application,
+** there is a chance that the blocking connection will have already
+** concluded its transaction by the time sqlite3_unlock_notify() is invoked.
+** If this happens, then the specified callback is invoked immediately,
+** from within the call to sqlite3_unlock_notify().)^
+**
+** ^If the blocked connection is attempting to obtain a write-lock on a
+** shared-cache table, and more than one other connection currently holds
+** a read-lock on the same table, then SQLite arbitrarily selects one of
+** the other connections to use as the blocking connection.
+**
+** ^(There may be at most one unlock-notify callback registered by a
+** blocked connection. If sqlite3_unlock_notify() is called when the
+** blocked connection already has a registered unlock-notify callback,
+** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is
+** called with a NULL pointer as its second argument, then any existing
+** unlock-notify callback is canceled. ^The blocked connections
+** unlock-notify callback may also be canceled by closing the blocked
+** connection using [sqlite3_close()].
+**
+** The unlock-notify callback is not reentrant. If an application invokes
+** any sqlite3_xxx API functions from within an unlock-notify callback, a
+** crash or deadlock may be the result.
+**
+** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always
+** returns SQLITE_OK.
+**
+** <b>Callback Invocation Details</b>
+**
+** When an unlock-notify callback is registered, the application provides a
+** single void* pointer that is passed to the callback when it is invoked.
+** However, the signature of the callback function allows SQLite to pass
+** it an array of void* context pointers. The first argument passed to
+** an unlock-notify callback is a pointer to an array of void* pointers,
+** and the second is the number of entries in the array.
+**
+** When a blocking connection's transaction is concluded, there may be
+** more than one blocked connection that has registered for an unlock-notify
+** callback. ^If two or more such blocked connections have specified the
+** same callback function, then instead of invoking the callback function
+** multiple times, it is invoked once with the set of void* context pointers
+** specified by the blocked connections bundled together into an array.
+** This gives the application an opportunity to prioritize any actions
+** related to the set of unblocked database connections.
+**
+** <b>Deadlock Detection</b>
+**
+** Assuming that after registering for an unlock-notify callback a
+** database waits for the callback to be issued before taking any further
+** action (a reasonable assumption), then using this API may cause the
+** application to deadlock. For example, if connection X is waiting for
+** connection Y's transaction to be concluded, and similarly connection
+** Y is waiting on connection X's transaction, then neither connection
+** will proceed and the system may remain deadlocked indefinitely.
+**
+** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock
+** detection. ^If a given call to sqlite3_unlock_notify() would put the
+** system in a deadlocked state, then SQLITE_LOCKED is returned and no
+** unlock-notify callback is registered. The system is said to be in
+** a deadlocked state if connection A has registered for an unlock-notify
+** callback on the conclusion of connection B's transaction, and connection
+** B has itself registered for an unlock-notify callback when connection
+** A's transaction is concluded. ^Indirect deadlock is also detected, so
+** the system is also considered to be deadlocked if connection B has
+** registered for an unlock-notify callback on the conclusion of connection
+** C's transaction, where connection C is waiting on connection A. ^Any
+** number of levels of indirection are allowed.
+**
+** <b>The "DROP TABLE" Exception</b>
+**
+** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost
+** always appropriate to call sqlite3_unlock_notify(). There is however,
+** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement,
+** SQLite checks if there are any currently executing SELECT statements
+** that belong to the same connection. If there are, SQLITE_LOCKED is
+** returned. In this case there is no "blocking connection", so invoking
+** sqlite3_unlock_notify() results in the unlock-notify callback being
+** invoked immediately. If the application then re-attempts the "DROP TABLE"
+** or "DROP INDEX" query, an infinite loop might be the result.
+**
+** One way around this problem is to check the extended error code returned
+** by an sqlite3_step() call. ^(If there is a blocking connection, then the
+** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in
+** the special "DROP TABLE/INDEX" case, the extended error code is just
+** SQLITE_LOCKED.)^
+*/
+SQLITE_API int sqlite3_unlock_notify(
+ sqlite3 *pBlocked, /* Waiting connection */
+ void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
+ void *pNotifyArg /* Argument to pass to xNotify */
+);
+
+
+/*
+** CAPI3REF: String Comparison
+**
+** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications
+** and extensions to compare the contents of two buffers containing UTF-8
+** strings in a case-independent fashion, using the same definition of "case
+** independence" that SQLite uses internally when comparing identifiers.
+*/
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
+
+/*
+** CAPI3REF: String Globbing
+*
+** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if
+** string X matches the [GLOB] pattern P.
+** ^The definition of [GLOB] pattern matching used in
+** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
+** SQL dialect understood by SQLite. ^The [sqlite3_strglob(P,X)] function
+** is case sensitive.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+**
+** See also: [sqlite3_strlike()].
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
+
+/*
+** CAPI3REF: String LIKE Matching
+*
+** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if
+** string X matches the [LIKE] pattern P with escape character E.
+** ^The definition of [LIKE] pattern matching used in
+** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E"
+** operator in the SQL dialect understood by SQLite. ^For "X LIKE P" without
+** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0.
+** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case
+** insensitive - equivalent upper and lower case ASCII characters match
+** one another.
+**
+** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though
+** only ASCII characters are case folded.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+**
+** See also: [sqlite3_strglob()].
+*/
+SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
+
+/*
+** CAPI3REF: Error Logging Interface
+**
+** ^The [sqlite3_log()] interface writes a message into the [error log]
+** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
+** ^If logging is enabled, the zFormat string and subsequent arguments are
+** used with [sqlite3_snprintf()] to generate the final output string.
+**
+** The sqlite3_log() interface is intended for use by extensions such as
+** virtual tables, collating functions, and SQL functions. While there is
+** nothing to prevent an application from calling sqlite3_log(), doing so
+** is considered bad form.
+**
+** The zFormat string must not be NULL.
+**
+** To avoid deadlocks and other threading problems, the sqlite3_log() routine
+** will not use dynamically allocated memory. The log message is stored in
+** a fixed-length buffer on the stack. If the log message is longer than
+** a few hundred characters, it will be truncated to the length of the
+** buffer.
+*/
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
+
+/*
+** CAPI3REF: Write-Ahead Log Commit Hook
+** METHOD: sqlite3
+**
+** ^The [sqlite3_wal_hook()] function is used to register a callback that
+** is invoked each time data is committed to a database in wal mode.
+**
+** ^(The callback is invoked by SQLite after the commit has taken place and
+** the associated write-lock on the database released)^, so the implementation
+** may read, write or [checkpoint] the database as required.
+**
+** ^The first parameter passed to the callback function when it is invoked
+** is a copy of the third parameter passed to sqlite3_wal_hook() when
+** registering the callback. ^The second is a copy of the database handle.
+** ^The third parameter is the name of the database that was written to -
+** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter
+** is the number of pages currently in the write-ahead log file,
+** including those that were just committed.
+**
+** The callback function should normally return [SQLITE_OK]. ^If an error
+** code is returned, that error will propagate back up through the
+** SQLite code base to cause the statement that provoked the callback
+** to report an error, though the commit will have still occurred. If the
+** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value
+** that does not correspond to any valid SQLite error code, the results
+** are undefined.
+**
+** A single database handle may have at most a single write-ahead log callback
+** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any
+** previously registered write-ahead log callback. ^Note that the
+** [sqlite3_wal_autocheckpoint()] interface and the
+** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
+** overwrite any prior [sqlite3_wal_hook()] settings.
+*/
+SQLITE_API void *sqlite3_wal_hook(
+ sqlite3*,
+ int(*)(void *,sqlite3*,const char*,int),
+ void*
+);
+
+/*
+** CAPI3REF: Configure an auto-checkpoint
+** METHOD: sqlite3
+**
+** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around
+** [sqlite3_wal_hook()] that causes any database on [database connection] D
+** to automatically [checkpoint]
+** after committing a transaction if there are N or
+** more frames in the [write-ahead log] file. ^Passing zero or
+** a negative value as the nFrame parameter disables automatic
+** checkpoints entirely.
+**
+** ^The callback registered by this function replaces any existing callback
+** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback
+** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism
+** configured by this function.
+**
+** ^The [wal_autocheckpoint pragma] can be used to invoke this interface
+** from SQL.
+**
+** ^Checkpoints initiated by this mechanism are
+** [sqlite3_wal_checkpoint_v2|PASSIVE].
+**
+** ^Every new [database connection] defaults to having the auto-checkpoint
+** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT]
+** pages. The use of this interface
+** is only necessary if the default setting is found to be suboptimal
+** for a particular application.
+*/
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+
+/*
+** CAPI3REF: Checkpoint a database
+** METHOD: sqlite3
+**
+** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to
+** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^
+**
+** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the
+** [write-ahead log] for database X on [database connection] D to be
+** transferred into the database file and for the write-ahead log to
+** be reset. See the [checkpointing] documentation for addition
+** information.
+**
+** This interface used to be the only way to cause a checkpoint to
+** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()]
+** interface was added. This interface is retained for backwards
+** compatibility and as a convenience for applications that need to manually
+** start a callback but which do not need the full power (and corresponding
+** complication) of [sqlite3_wal_checkpoint_v2()].
+*/
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+
+/*
+** CAPI3REF: Checkpoint a database
+** METHOD: sqlite3
+**
+** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint
+** operation on database X of [database connection] D in mode M. Status
+** information is written back into integers pointed to by L and C.)^
+** ^(The M parameter must be a valid [checkpoint mode]:)^
+**
+** <dl>
+** <dt>SQLITE_CHECKPOINT_PASSIVE<dd>
+** ^Checkpoint as many frames as possible without waiting for any database
+** readers or writers to finish, then sync the database file if all frames
+** in the log were checkpointed. ^The [busy-handler callback]
+** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode.
+** ^On the other hand, passive mode might leave the checkpoint unfinished
+** if there are concurrent readers or writers.
+**
+** <dt>SQLITE_CHECKPOINT_FULL<dd>
+** ^This mode blocks (it invokes the
+** [sqlite3_busy_handler|busy-handler callback]) until there is no
+** database writer and all readers are reading from the most recent database
+** snapshot. ^It then checkpoints all frames in the log file and syncs the
+** database file. ^This mode blocks new database writers while it is pending,
+** but new database readers are allowed to continue unimpeded.
+**
+** <dt>SQLITE_CHECKPOINT_RESTART<dd>
+** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition
+** that after checkpointing the log file it blocks (calls the
+** [busy-handler callback])
+** until all readers are reading from the database file only. ^This ensures
+** that the next writer will restart the log file from the beginning.
+** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new
+** database writer attempts while it is pending, but does not impede readers.
+**
+** <dt>SQLITE_CHECKPOINT_TRUNCATE<dd>
+** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the
+** addition that it also truncates the log file to zero bytes just prior
+** to a successful return.
+** </dl>
+**
+** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in
+** the log file or to -1 if the checkpoint could not run because
+** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not
+** NULL,then *pnCkpt is set to the total number of checkpointed frames in the
+** log file (including any that were already checkpointed before the function
+** was called) or to -1 if the checkpoint could not run due to an error or
+** because the database is not in WAL mode. ^Note that upon successful
+** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been
+** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero.
+**
+** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If
+** any other process is running a checkpoint operation at the same time, the
+** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a
+** busy-handler configured, it will not be invoked in this case.
+**
+** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the
+** exclusive "writer" lock on the database file. ^If the writer lock cannot be
+** obtained immediately, and a busy-handler is configured, it is invoked and
+** the writer lock retried until either the busy-handler returns 0 or the lock
+** is successfully obtained. ^The busy-handler is also invoked while waiting for
+** database readers as described above. ^If the busy-handler returns 0 before
+** the writer lock is obtained or while waiting for database readers, the
+** checkpoint operation proceeds from that point in the same way as
+** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible
+** without blocking any further. ^SQLITE_BUSY is returned in this case.
+**
+** ^If parameter zDb is NULL or points to a zero length string, then the
+** specified operation is attempted on all WAL databases [attached] to
+** [database connection] db. In this case the
+** values written to output parameters *pnLog and *pnCkpt are undefined. ^If
+** an SQLITE_BUSY error is encountered when processing one or more of the
+** attached WAL databases, the operation is still attempted on any remaining
+** attached databases and SQLITE_BUSY is returned at the end. ^If any other
+** error occurs while processing an attached database, processing is abandoned
+** and the error code is returned to the caller immediately. ^If no error
+** (SQLITE_BUSY or otherwise) is encountered while processing the attached
+** databases, SQLITE_OK is returned.
+**
+** ^If database zDb is the name of an attached database that is not in WAL
+** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If
+** zDb is not NULL (or a zero length string) and is not the name of any
+** attached database, SQLITE_ERROR is returned to the caller.
+**
+** ^Unless it returns SQLITE_MISUSE,
+** the sqlite3_wal_checkpoint_v2() interface
+** sets the error information that is queried by
+** [sqlite3_errcode()] and [sqlite3_errmsg()].
+**
+** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
+** from SQL.
+*/
+SQLITE_API int sqlite3_wal_checkpoint_v2(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of attached database (or NULL) */
+ int eMode, /* SQLITE_CHECKPOINT_* value */
+ int *pnLog, /* OUT: Size of WAL log in frames */
+ int *pnCkpt /* OUT: Total number of frames checkpointed */
+);
+
+/*
+** CAPI3REF: Checkpoint Mode Values
+** KEYWORDS: {checkpoint mode}
+**
+** These constants define all valid values for the "checkpoint mode" passed
+** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface.
+** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the
+** meaning of each of these checkpoint modes.
+*/
+#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
+#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
+#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
+#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
+
+/*
+** CAPI3REF: Virtual Table Interface Configuration
+**
+** This function may be called by either the [xConnect] or [xCreate] method
+** of a [virtual table] implementation to configure
+** various facets of the virtual table interface.
+**
+** If this interface is invoked outside the context of an xConnect or
+** xCreate virtual table method then the behavior is undefined.
+**
+** In the call sqlite3_vtab_config(D,C,...) the D parameter is the
+** [database connection] in which the virtual table is being created and
+** which is passed in as the first argument to the [xConnect] or [xCreate]
+** method that is invoking sqlite3_vtab_config(). The C parameter is one
+** of the [virtual table configuration options]. The presence and meaning
+** of parameters after C depend on which [virtual table configuration option]
+** is used.
+*/
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
+
+/*
+** CAPI3REF: Virtual Table Configuration Options
+** KEYWORDS: {virtual table configuration options}
+** KEYWORDS: {virtual table configuration option}
+**
+** These macros define the various options to the
+** [sqlite3_vtab_config()] interface that [virtual table] implementations
+** can use to customize and optimize their behavior.
+**
+** <dl>
+** [[SQLITE_VTAB_CONSTRAINT_SUPPORT]]
+** <dt>SQLITE_VTAB_CONSTRAINT_SUPPORT</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported,
+** where X is an integer. If X is zero, then the [virtual table] whose
+** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not
+** support constraints. In this configuration (which is the default) if
+** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire
+** statement is rolled back as if [ON CONFLICT | OR ABORT] had been
+** specified as part of the users SQL statement, regardless of the actual
+** ON CONFLICT mode specified.
+**
+** If X is non-zero, then the virtual table implementation guarantees
+** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before
+** any modifications to internal or persistent data structures have been made.
+** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite
+** is able to roll back a statement or database transaction, and abandon
+** or continue processing the current SQL statement as appropriate.
+** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns
+** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode
+** had been ABORT.
+**
+** Virtual table implementations that are required to handle OR REPLACE
+** must do so within the [xUpdate] method. If a call to the
+** [sqlite3_vtab_on_conflict()] function indicates that the current ON
+** CONFLICT policy is REPLACE, the virtual table implementation should
+** silently replace the appropriate rows within the xUpdate callback and
+** return SQLITE_OK. Or, if this is not possible, it may return
+** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT
+** constraint handling.
+** </dd>
+**
+** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
+** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** prohibits that virtual table from being used from within triggers and
+** views.
+** </dd>
+**
+** [[SQLITE_VTAB_INNOCUOUS]]<dt>SQLITE_VTAB_INNOCUOUS</dt>
+** <dd>Calls of the form
+** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the
+** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** identify that virtual table as being safe to use from within triggers
+** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the
+** virtual table can do no serious harm even if it is controlled by a
+** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS
+** flag unless absolutely necessary.
+** </dd>
+** </dl>
+*/
+#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1
+#define SQLITE_VTAB_INNOCUOUS 2
+#define SQLITE_VTAB_DIRECTONLY 3
+
+/*
+** CAPI3REF: Determine The Virtual Table Conflict Policy
+**
+** This function may only be called from within a call to the [xUpdate] method
+** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The
+** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL],
+** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode
+** of the SQL statement that triggered the call to the [xUpdate] method of the
+** [virtual table].
+*/
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
+
+/*
+** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
+**
+** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
+** method of a [virtual table], then it returns true if and only if the
+** column is being fetched as part of an UPDATE operation during which the
+** column value will not change. Applications might use this to substitute
+** a return value that is less expensive to compute and that the corresponding
+** [xUpdate] method understands as a "no-change" value.
+**
+** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
+** the column is not changed by the UPDATE statement, then the xColumn
+** method can optionally return without setting a result, without calling
+** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
+** In that case, [sqlite3_value_nochange(X)] will return true for the
+** same column in the [xUpdate] method.
+*/
+SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
+
+/*
+** CAPI3REF: Determine The Collation For a Virtual Table Constraint
+**
+** This function may only be called from within a call to the [xBestIndex]
+** method of a [virtual table].
+**
+** The first argument must be the sqlite3_index_info object that is the
+** first parameter to the xBestIndex() method. The second argument must be
+** an index into the aConstraint[] array belonging to the sqlite3_index_info
+** structure passed to xBestIndex. This function returns a pointer to a buffer
+** containing the name of the collation sequence for the corresponding
+** constraint.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
+
+/*
+** CAPI3REF: Conflict resolution modes
+** KEYWORDS: {conflict resolution mode}
+**
+** These constants are returned by [sqlite3_vtab_on_conflict()] to
+** inform a [virtual table] implementation what the [ON CONFLICT] mode
+** is for the SQL statement being evaluated.
+**
+** Note that the [SQLITE_IGNORE] constant is also used as a potential
+** return value from the [sqlite3_set_authorizer()] callback and that
+** [SQLITE_ABORT] is also a [result code].
+*/
+#define SQLITE_ROLLBACK 1
+/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */
+#define SQLITE_FAIL 3
+/* #define SQLITE_ABORT 4 // Also an error code */
+#define SQLITE_REPLACE 5
+
+/*
+** CAPI3REF: Prepared Statement Scan Status Opcodes
+** KEYWORDS: {scanstatus options}
+**
+** The following constants can be used for the T parameter to the
+** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a
+** different metric for sqlite3_stmt_scanstatus() to return.
+**
+** When the value returned to V is a string, space to hold that string is
+** managed by the prepared statement S and will be automatically freed when
+** S is finalized.
+**
+** <dl>
+** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
+** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be
+** set to the total number of times that the X-th loop has run.</dd>
+**
+** [[SQLITE_SCANSTAT_NVISIT]] <dt>SQLITE_SCANSTAT_NVISIT</dt>
+** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be set
+** to the total number of rows examined by all iterations of the X-th loop.</dd>
+**
+** [[SQLITE_SCANSTAT_EST]] <dt>SQLITE_SCANSTAT_EST</dt>
+** <dd>^The "double" variable pointed to by the V parameter will be set to the
+** query planner's estimate for the average number of rows output from each
+** iteration of the X-th loop. If the query planner's estimates was accurate,
+** then this value will approximate the quotient NVISIT/NLOOP and the
+** product of this value for all prior loops with the same SELECTID will
+** be the NLOOP value for the current loop.
+**
+** [[SQLITE_SCANSTAT_NAME]] <dt>SQLITE_SCANSTAT_NAME</dt>
+** <dd>^The "const char *" variable pointed to by the V parameter will be set
+** to a zero-terminated UTF-8 string containing the name of the index or table
+** used for the X-th loop.
+**
+** [[SQLITE_SCANSTAT_EXPLAIN]] <dt>SQLITE_SCANSTAT_EXPLAIN</dt>
+** <dd>^The "const char *" variable pointed to by the V parameter will be set
+** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
+** description for the X-th loop.
+**
+** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
+** <dd>^The "int" variable pointed to by the V parameter will be set to the
+** "select-id" for the X-th loop. The select-id identifies which query or
+** subquery the loop is part of. The main query has a select-id of zero.
+** The select-id is the same value as is output in the first column
+** of an [EXPLAIN QUERY PLAN] query.
+** </dl>
+*/
+#define SQLITE_SCANSTAT_NLOOP 0
+#define SQLITE_SCANSTAT_NVISIT 1
+#define SQLITE_SCANSTAT_EST 2
+#define SQLITE_SCANSTAT_NAME 3
+#define SQLITE_SCANSTAT_EXPLAIN 4
+#define SQLITE_SCANSTAT_SELECTID 5
+
+/*
+** CAPI3REF: Prepared Statement Scan Status
+** METHOD: sqlite3_stmt
+**
+** This interface returns information about the predicted and measured
+** performance for pStmt. Advanced applications can use this
+** interface to compare the predicted and the measured performance and
+** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
+**
+** Since this interface is expected to be rarely used, it is only
+** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS]
+** compile-time option.
+**
+** The "iScanStatusOp" parameter determines which status information to return.
+** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
+** of this interface is undefined.
+** ^The requested measurement is written into a variable pointed to by
+** the "pOut" parameter.
+** Parameter "idx" identifies the specific loop to retrieve statistics for.
+** Loops are numbered starting from zero. ^If idx is out of range - less than
+** zero or greater than or equal to the total number of loops used to implement
+** the statement - a non-zero value is returned and the variable that pOut
+** points to is unchanged.
+**
+** ^Statistics might not be available for all loops in all statements. ^In cases
+** where there exist loops with no available statistics, this function behaves
+** as if the loop did not exist - it returns non-zero and leave the variable
+** that pOut points to unchanged.
+**
+** See also: [sqlite3_stmt_scanstatus_reset()]
+*/
+SQLITE_API int sqlite3_stmt_scanstatus(
+ sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
+ void *pOut /* Result written here */
+);
+
+/*
+** CAPI3REF: Zero Scan-Status Counters
+** METHOD: sqlite3_stmt
+**
+** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
+**
+** This API is only available if the library is built with pre-processor
+** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
+*/
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
+
+/*
+** CAPI3REF: Flush caches to disk mid-transaction
+**
+** ^If a write-transaction is open on [database connection] D when the
+** [sqlite3_db_cacheflush(D)] interface invoked, any dirty
+** pages in the pager-cache that are not currently in use are written out
+** to disk. A dirty page may be in use if a database cursor created by an
+** active SQL statement is reading from it, or if it is page 1 of a database
+** file (page 1 is always "in use"). ^The [sqlite3_db_cacheflush(D)]
+** interface flushes caches for all schemas - "main", "temp", and
+** any [attached] databases.
+**
+** ^If this function needs to obtain extra database locks before dirty pages
+** can be flushed to disk, it does so. ^If those locks cannot be obtained
+** immediately and there is a busy-handler callback configured, it is invoked
+** in the usual manner. ^If the required lock still cannot be obtained, then
+** the database is skipped and an attempt made to flush any dirty pages
+** belonging to the next (if any) database. ^If any databases are skipped
+** because locks cannot be obtained, but no other error occurs, this
+** function returns SQLITE_BUSY.
+**
+** ^If any other error occurs while flushing dirty pages to disk (for
+** example an IO error or out-of-memory condition), then processing is
+** abandoned and an SQLite [error code] is returned to the caller immediately.
+**
+** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK.
+**
+** ^This function does not set the database handle error code or message
+** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
+*/
+SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
+
+/*
+** CAPI3REF: The pre-update hook.
+**
+** ^These interfaces are only available if SQLite is compiled using the
+** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option.
+**
+** ^The [sqlite3_preupdate_hook()] interface registers a callback function
+** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation
+** on a database table.
+** ^At most one preupdate hook may be registered at a time on a single
+** [database connection]; each call to [sqlite3_preupdate_hook()] overrides
+** the previous setting.
+** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()]
+** with a NULL pointer as the second parameter.
+** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as
+** the first parameter to callbacks.
+**
+** ^The preupdate hook only fires for changes to real database tables; the
+** preupdate hook is not invoked for changes to [virtual tables] or to
+** system tables like sqlite_sequence or sqlite_stat1.
+**
+** ^The second parameter to the preupdate callback is a pointer to
+** the [database connection] that registered the preupdate hook.
+** ^The third parameter to the preupdate callback is one of the constants
+** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the
+** kind of update operation that is about to occur.
+** ^(The fourth parameter to the preupdate callback is the name of the
+** database within the database connection that is being modified. This
+** will be "main" for the main database or "temp" for TEMP tables or
+** the name given after the AS keyword in the [ATTACH] statement for attached
+** databases.)^
+** ^The fifth parameter to the preupdate callback is the name of the
+** table that is being modified.
+**
+** For an UPDATE or DELETE operation on a [rowid table], the sixth
+** parameter passed to the preupdate callback is the initial [rowid] of the
+** row being modified or deleted. For an INSERT operation on a rowid table,
+** or any operation on a WITHOUT ROWID table, the value of the sixth
+** parameter is undefined. For an INSERT or UPDATE on a rowid table the
+** seventh parameter is the final rowid value of the row being inserted
+** or updated. The value of the seventh parameter passed to the callback
+** function is not defined for operations on WITHOUT ROWID tables, or for
+** INSERT operations on rowid tables.
+**
+** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
+** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
+** provide additional information about a preupdate event. These routines
+** may only be called from within a preupdate callback. Invoking any of
+** these routines from outside of a preupdate callback or with a
+** [database connection] pointer that is different from the one supplied
+** to the preupdate callback results in undefined and probably undesirable
+** behavior.
+**
+** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns
+** in the row that is being inserted, updated, or deleted.
+**
+** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to
+** a [protected sqlite3_value] that contains the value of the Nth column of
+** the table row before it is updated. The N parameter must be between 0
+** and one less than the number of columns or the behavior will be
+** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE
+** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the
+** behavior is undefined. The [sqlite3_value] that P points to
+** will be destroyed when the preupdate callback returns.
+**
+** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to
+** a [protected sqlite3_value] that contains the value of the Nth column of
+** the table row after it is updated. The N parameter must be between 0
+** and one less than the number of columns or the behavior will be
+** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE
+** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the
+** behavior is undefined. The [sqlite3_value] that P points to
+** will be destroyed when the preupdate callback returns.
+**
+** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate
+** callback was invoked as a result of a direct insert, update, or delete
+** operation; or 1 for inserts, updates, or deletes invoked by top-level
+** triggers; or 2 for changes resulting from triggers called by top-level
+** triggers; and so forth.
+**
+** See also: [sqlite3_update_hook()]
+*/
+#if defined(SQLITE_ENABLE_PREUPDATE_HOOK)
+SQLITE_API void *sqlite3_preupdate_hook(
+ sqlite3 *db,
+ void(*xPreUpdate)(
+ void *pCtx, /* Copy of third arg to preupdate_hook() */
+ sqlite3 *db, /* Database handle */
+ int op, /* SQLITE_UPDATE, DELETE or INSERT */
+ char const *zDb, /* Database name */
+ char const *zName, /* Table name */
+ sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */
+ sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */
+ ),
+ void*
+);
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+#endif
+
+/*
+** CAPI3REF: Low-level system error code
+**
+** ^Attempt to return the underlying operating system error code or error
+** number that caused the most recent I/O error or failure to open a file.
+** The return value is OS-dependent. For example, on unix systems, after
+** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be
+** called to get back the underlying "errno" that caused the problem, such
+** as ENOSPC, EAUTH, EISDIR, and so forth.
+*/
+SQLITE_API int sqlite3_system_errno(sqlite3*);
+
+/*
+** CAPI3REF: Database Snapshot
+** KEYWORDS: {snapshot} {sqlite3_snapshot}
+**
+** An instance of the snapshot object records the state of a [WAL mode]
+** database for some specific point in history.
+**
+** In [WAL mode], multiple [database connections] that are open on the
+** same database file can each be reading a different historical version
+** of the database file. When a [database connection] begins a read
+** transaction, that connection sees an unchanging copy of the database
+** as it existed for the point in time when the transaction first started.
+** Subsequent changes to the database from other connections are not seen
+** by the reader until a new read transaction is started.
+**
+** The sqlite3_snapshot object records state information about an historical
+** version of the database file so that it is possible to later open a new read
+** transaction that sees that historical version of the database rather than
+** the most recent version.
+*/
+typedef struct sqlite3_snapshot {
+ unsigned char hidden[48];
+} sqlite3_snapshot;
+
+/*
+** CAPI3REF: Record A Database Snapshot
+** CONSTRUCTOR: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
+** new [sqlite3_snapshot] object that records the current state of
+** schema S in database connection D. ^On success, the
+** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
+** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
+** If there is not already a read-transaction open on schema S when
+** this function is called, one is opened automatically.
+**
+** The following must be true for this function to succeed. If any of
+** the following statements are false when sqlite3_snapshot_get() is
+** called, SQLITE_ERROR is returned. The final value of *P is undefined
+** in this case.
+**
+** <ul>
+** <li> The database handle must not be in [autocommit mode].
+**
+** <li> Schema S of [database connection] D must be a [WAL mode] database.
+**
+** <li> There must not be a write transaction open on schema S of database
+** connection D.
+**
+** <li> One or more transactions must have been written to the current wal
+** file since it was created on disk (by any connection). This means
+** that a snapshot cannot be taken on a wal mode database with no wal
+** file immediately after it is first opened. At least one transaction
+** must be written to it first.
+** </ul>
+**
+** This function may also return SQLITE_NOMEM. If it is called with the
+** database handle in autocommit mode but fails for some other reason,
+** whether or not a read transaction is opened on schema S is undefined.
+**
+** The [sqlite3_snapshot] object returned from a successful call to
+** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
+** to avoid a memory leak.
+**
+** The [sqlite3_snapshot_get()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
+ sqlite3 *db,
+ const char *zSchema,
+ sqlite3_snapshot **ppSnapshot
+);
+
+/*
+** CAPI3REF: Start a read transaction on an historical snapshot
+** METHOD: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_open(D,S,P)] interface either starts a new read
+** transaction or upgrades an existing one for schema S of
+** [database connection] D such that the read transaction refers to
+** historical [snapshot] P, rather than the most recent change to the
+** database. ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK
+** on success or an appropriate [error code] if it fails.
+**
+** ^In order to succeed, the database connection must not be in
+** [autocommit mode] when [sqlite3_snapshot_open(D,S,P)] is called. If there
+** is already a read transaction open on schema S, then the database handle
+** must have no active statements (SELECT statements that have been passed
+** to sqlite3_step() but not sqlite3_reset() or sqlite3_finalize()).
+** SQLITE_ERROR is returned if either of these conditions is violated, or
+** if schema S does not exist, or if the snapshot object is invalid.
+**
+** ^A call to sqlite3_snapshot_open() will fail to open if the specified
+** snapshot has been overwritten by a [checkpoint]. In this case
+** SQLITE_ERROR_SNAPSHOT is returned.
+**
+** If there is already a read transaction open when this function is
+** invoked, then the same read transaction remains open (on the same
+** database snapshot) if SQLITE_ERROR, SQLITE_BUSY or SQLITE_ERROR_SNAPSHOT
+** is returned. If another error code - for example SQLITE_PROTOCOL or an
+** SQLITE_IOERR error code - is returned, then the final state of the
+** read transaction is undefined. If SQLITE_OK is returned, then the
+** read transaction is now open on database snapshot P.
+**
+** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the
+** database connection D does not know that the database file for
+** schema S is in [WAL mode]. A database connection might not know
+** that the database file is in [WAL mode] if there has been no prior
+** I/O on that database connection, or if the database entered [WAL mode]
+** after the most recent I/O on the database connection.)^
+** (Hint: Run "[PRAGMA application_id]" against a newly opened
+** database connection in order to make it ready to use snapshots.)
+**
+** The [sqlite3_snapshot_open()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
+ sqlite3 *db,
+ const char *zSchema,
+ sqlite3_snapshot *pSnapshot
+);
+
+/*
+** CAPI3REF: Destroy a snapshot
+** DESTRUCTOR: sqlite3_snapshot
+**
+** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
+** The application must eventually free every [sqlite3_snapshot] object
+** using this routine to avoid a memory leak.
+**
+** The [sqlite3_snapshot_free()] interface is only available when the
+** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
+
+/*
+** CAPI3REF: Compare the ages of two snapshot handles.
+** METHOD: sqlite3_snapshot
+**
+** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages
+** of two valid snapshot handles.
+**
+** If the two snapshot handles are not associated with the same database
+** file, the result of the comparison is undefined.
+**
+** Additionally, the result of the comparison is only valid if both of the
+** snapshot handles were obtained by calling sqlite3_snapshot_get() since the
+** last time the wal file was deleted. The wal file is deleted when the
+** database is changed back to rollback mode or when the number of database
+** clients drops to zero. If either snapshot handle was obtained before the
+** wal file was last deleted, the value returned by this function
+** is undefined.
+**
+** Otherwise, this API returns a negative value if P1 refers to an older
+** snapshot than P2, zero if the two handles refer to the same database
+** snapshot, and a positive value if P1 is a newer snapshot than P2.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SNAPSHOT] option.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
+ sqlite3_snapshot *p1,
+ sqlite3_snapshot *p2
+);
+
+/*
+** CAPI3REF: Recover snapshots from a wal file
+** METHOD: sqlite3_snapshot
+**
+** If a [WAL file] remains on disk after all database connections close
+** (either through the use of the [SQLITE_FCNTL_PERSIST_WAL] [file control]
+** or because the last process to have the database opened exited without
+** calling [sqlite3_close()]) and a new connection is subsequently opened
+** on that database and [WAL file], the [sqlite3_snapshot_open()] interface
+** will only be able to open the last transaction added to the WAL file
+** even though the WAL file contains other valid transactions.
+**
+** This function attempts to scan the WAL file associated with database zDb
+** of database handle db and make all valid snapshots available to
+** sqlite3_snapshot_open(). It is an error if there is already a read
+** transaction open on the database, or if the database is not a WAL mode
+** database.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_SNAPSHOT] option.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
+
+/*
+** CAPI3REF: Serialize a database
+**
+** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory
+** that is a serialization of the S database on [database connection] D.
+** If P is not a NULL pointer, then the size of the database in bytes
+** is written into *P.
+**
+** For an ordinary on-disk database file, the serialization is just a
+** copy of the disk file. For an in-memory database or a "TEMP" database,
+** the serialization is the same sequence of bytes which would be written
+** to disk if that database where backed up to disk.
+**
+** The usual case is that sqlite3_serialize() copies the serialization of
+** the database into memory obtained from [sqlite3_malloc64()] and returns
+** a pointer to that memory. The caller is responsible for freeing the
+** returned value to avoid a memory leak. However, if the F argument
+** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations
+** are made, and the sqlite3_serialize() function will return a pointer
+** to the contiguous memory representation of the database that SQLite
+** is currently using for that database, or NULL if the no such contiguous
+** memory representation of the database exists. A contiguous memory
+** representation of the database will usually only exist if there has
+** been a prior call to [sqlite3_deserialize(D,S,...)] with the same
+** values of D and S.
+** The size of the database is written into *P even if the
+** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
+** of the database exists.
+**
+** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
+** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
+** allocation error occurs.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_DESERIALIZE] option.
+*/
+SQLITE_API unsigned char *sqlite3_serialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which DB to serialize. ex: "main", "temp", ... */
+ sqlite3_int64 *piSize, /* Write size of the DB here, if not NULL */
+ unsigned int mFlags /* Zero or more SQLITE_SERIALIZE_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3_serialize
+**
+** Zero or more of the following constants can be OR-ed together for
+** the F argument to [sqlite3_serialize(D,S,P,F)].
+**
+** SQLITE_SERIALIZE_NOCOPY means that [sqlite3_serialize()] will return
+** a pointer to contiguous in-memory database that it is currently using,
+** without making a copy of the database. If SQLite is not currently using
+** a contiguous in-memory database, then this option causes
+** [sqlite3_serialize()] to return a NULL pointer. SQLite will only be
+** using a contiguous in-memory database if it has been initialized by a
+** prior call to [sqlite3_deserialize()].
+*/
+#define SQLITE_SERIALIZE_NOCOPY 0x001 /* Do no memory allocations */
+
+/*
+** CAPI3REF: Deserialize a database
+**
+** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the
+** [database connection] D to disconnect from database S and then
+** reopen S as an in-memory database based on the serialization contained
+** in P. The serialized database P is N bytes in size. M is the size of
+** the buffer P, which might be larger than N. If M is larger than N, and
+** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is
+** permitted to add content to the in-memory database as long as the total
+** size does not exceed M bytes.
+**
+** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will
+** invoke sqlite3_free() on the serialization buffer when the database
+** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then
+** SQLite will try to increase the buffer size using sqlite3_realloc64()
+** if writes on the database cause it to grow larger than M bytes.
+**
+** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
+** database is currently in a read transaction or is involved in a backup
+** operation.
+**
+** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
+** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
+** [sqlite3_free()] is invoked on argument P prior to returning.
+**
+** This interface is only available if SQLite is compiled with the
+** [SQLITE_ENABLE_DESERIALIZE] option.
+*/
+SQLITE_API int sqlite3_deserialize(
+ sqlite3 *db, /* The database connection */
+ const char *zSchema, /* Which DB to reopen with the deserialization */
+ unsigned char *pData, /* The serialized database content */
+ sqlite3_int64 szDb, /* Number bytes in the deserialization */
+ sqlite3_int64 szBuf, /* Total size of buffer pData[] */
+ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3_deserialize()
+**
+** The following are allowed values for 6th argument (the F argument) to
+** the [sqlite3_deserialize(D,S,P,N,M,F)] interface.
+**
+** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization
+** in the P argument is held in memory obtained from [sqlite3_malloc64()]
+** and that SQLite should take ownership of this memory and automatically
+** free it when it has finished using it. Without this flag, the caller
+** is responsible for freeing any dynamically allocated memory.
+**
+** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to
+** grow the size of the database using calls to [sqlite3_realloc64()]. This
+** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used.
+** Without this flag, the deserialized database cannot increase in size beyond
+** the number of bytes specified by the M parameter.
+**
+** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database
+** should be treated as read-only.
+*/
+#define SQLITE_DESERIALIZE_FREEONCLOSE 1 /* Call sqlite3_free() on close */
+#define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */
+#define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */
+
+/*
+** Undo the hack that converts floating point types to integer for
+** builds on processors without floating point support.
+*/
+#ifdef SQLITE_OMIT_FLOATING_POINT
+# undef double
+#endif
+
+#ifdef __cplusplus
+} /* End of the 'extern "C"' block */
+#endif
+#endif /* SQLITE3_H */
+
+/******** Begin file sqlite3rtree.h *********/
+/*
+** 2010 August 30
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+*/
+
+#ifndef _SQLITE3RTREE_H_
+#define _SQLITE3RTREE_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry;
+typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
+
+/* The double-precision datatype used by RTree depends on the
+** SQLITE_RTREE_INT_ONLY compile-time option.
+*/
+#ifdef SQLITE_RTREE_INT_ONLY
+ typedef sqlite3_int64 sqlite3_rtree_dbl;
+#else
+ typedef double sqlite3_rtree_dbl;
+#endif
+
+/*
+** Register a geometry callback named zGeom that can be used as part of an
+** R-Tree geometry query as follows:
+**
+** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
+*/
+SQLITE_API int sqlite3_rtree_geometry_callback(
+ sqlite3 *db,
+ const char *zGeom,
+ int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
+ void *pContext
+);
+
+
+/*
+** A pointer to a structure of the following type is passed as the first
+** argument to callbacks registered using rtree_geometry_callback().
+*/
+struct sqlite3_rtree_geometry {
+ void *pContext; /* Copy of pContext passed to s_r_g_c() */
+ int nParam; /* Size of array aParam[] */
+ sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */
+ void *pUser; /* Callback implementation user data */
+ void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */
+};
+
+/*
+** Register a 2nd-generation geometry callback named zScore that can be
+** used as part of an R-Tree geometry query as follows:
+**
+** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
+*/
+SQLITE_API int sqlite3_rtree_query_callback(
+ sqlite3 *db,
+ const char *zQueryFunc,
+ int (*xQueryFunc)(sqlite3_rtree_query_info*),
+ void *pContext,
+ void (*xDestructor)(void*)
+);
+
+
+/*
+** A pointer to a structure of the following type is passed as the
+** argument to scored geometry callback registered using
+** sqlite3_rtree_query_callback().
+**
+** Note that the first 5 fields of this structure are identical to
+** sqlite3_rtree_geometry. This structure is a subclass of
+** sqlite3_rtree_geometry.
+*/
+struct sqlite3_rtree_query_info {
+ void *pContext; /* pContext from when function registered */
+ int nParam; /* Number of function parameters */
+ sqlite3_rtree_dbl *aParam; /* value of function parameters */
+ void *pUser; /* callback can use this, if desired */
+ void (*xDelUser)(void*); /* function to free pUser */
+ sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */
+ unsigned int *anQueue; /* Number of pending entries in the queue */
+ int nCoord; /* Number of coordinates */
+ int iLevel; /* Level of current node or entry */
+ int mxLevel; /* The largest iLevel value in the tree */
+ sqlite3_int64 iRowid; /* Rowid for current entry */
+ sqlite3_rtree_dbl rParentScore; /* Score of parent node */
+ int eParentWithin; /* Visibility of parent node */
+ int eWithin; /* OUT: Visibility */
+ sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
+ /* The following fields are only available in 3.8.11 and later */
+ sqlite3_value **apSqlParam; /* Original SQL values of parameters */
+};
+
+/*
+** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
+*/
+#define NOT_WITHIN 0 /* Object completely outside of query region */
+#define PARTLY_WITHIN 1 /* Object partially overlaps query region */
+#define FULLY_WITHIN 2 /* Object fully contained within query region */
+
+
+#ifdef __cplusplus
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* ifndef _SQLITE3RTREE_H_ */
+
+/******** End of sqlite3rtree.h *********/
+/******** Begin file sqlite3session.h *********/
+
+#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION)
+#define __SQLITESESSION_H_ 1
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+** CAPI3REF: Session Object Handle
+**
+** An instance of this object is a [session] that can be used to
+** record changes to a database.
+*/
+typedef struct sqlite3_session sqlite3_session;
+
+/*
+** CAPI3REF: Changeset Iterator Handle
+**
+** An instance of this object acts as a cursor for iterating
+** over the elements of a [changeset] or [patchset].
+*/
+typedef struct sqlite3_changeset_iter sqlite3_changeset_iter;
+
+/*
+** CAPI3REF: Create A New Session Object
+** CONSTRUCTOR: sqlite3_session
+**
+** Create a new session object attached to database handle db. If successful,
+** a pointer to the new object is written to *ppSession and SQLITE_OK is
+** returned. If an error occurs, *ppSession is set to NULL and an SQLite
+** error code (e.g. SQLITE_NOMEM) is returned.
+**
+** It is possible to create multiple session objects attached to a single
+** database handle.
+**
+** Session objects created using this function should be deleted using the
+** [sqlite3session_delete()] function before the database handle that they
+** are attached to is itself closed. If the database handle is closed before
+** the session object is deleted, then the results of calling any session
+** module function, including [sqlite3session_delete()] on the session object
+** are undefined.
+**
+** Because the session module uses the [sqlite3_preupdate_hook()] API, it
+** is not possible for an application to register a pre-update hook on a
+** database handle that has one or more session objects attached. Nor is
+** it possible to create a session object attached to a database handle for
+** which a pre-update hook is already defined. The results of attempting
+** either of these things are undefined.
+**
+** The session object will be used to create changesets for tables in
+** database zDb, where zDb is either "main", or "temp", or the name of an
+** attached database. It is not an error if database zDb is not attached
+** to the database when the session object is created.
+*/
+SQLITE_API int sqlite3session_create(
+ sqlite3 *db, /* Database handle */
+ const char *zDb, /* Name of db (e.g. "main") */
+ sqlite3_session **ppSession /* OUT: New session object */
+);
+
+/*
+** CAPI3REF: Delete A Session Object
+** DESTRUCTOR: sqlite3_session
+**
+** Delete a session object previously allocated using
+** [sqlite3session_create()]. Once a session object has been deleted, the
+** results of attempting to use pSession with any other session module
+** function are undefined.
+**
+** Session objects must be deleted before the database handle to which they
+** are attached is closed. Refer to the documentation for
+** [sqlite3session_create()] for details.
+*/
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession);
+
+
+/*
+** CAPI3REF: Enable Or Disable A Session Object
+** METHOD: sqlite3_session
+**
+** Enable or disable the recording of changes by a session object. When
+** enabled, a session object records changes made to the database. When
+** disabled - it does not. A newly created session object is enabled.
+** Refer to the documentation for [sqlite3session_changeset()] for further
+** details regarding how enabling and disabling a session object affects
+** the eventual changesets.
+**
+** Passing zero to this function disables the session. Passing a value
+** greater than zero enables it. Passing a value less than zero is a
+** no-op, and may be used to query the current state of the session.
+**
+** The return value indicates the final state of the session object: 0 if
+** the session is disabled, or 1 if it is enabled.
+*/
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
+
+/*
+** CAPI3REF: Set Or Clear the Indirect Change Flag
+** METHOD: sqlite3_session
+**
+** Each change recorded by a session object is marked as either direct or
+** indirect. A change is marked as indirect if either:
+**
+** <ul>
+** <li> The session object "indirect" flag is set when the change is
+** made, or
+** <li> The change is made by an SQL trigger or foreign key action
+** instead of directly as a result of a users SQL statement.
+** </ul>
+**
+** If a single row is affected by more than one operation within a session,
+** then the change is considered indirect if all operations meet the criteria
+** for an indirect change above, or direct otherwise.
+**
+** This function is used to set, clear or query the session object indirect
+** flag. If the second argument passed to this function is zero, then the
+** indirect flag is cleared. If it is greater than zero, the indirect flag
+** is set. Passing a value less than zero does not modify the current value
+** of the indirect flag, and may be used to query the current state of the
+** indirect flag for the specified session object.
+**
+** The return value indicates the final state of the indirect flag: 0 if
+** it is clear, or 1 if it is set.
+*/
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
+
+/*
+** CAPI3REF: Attach A Table To A Session Object
+** METHOD: sqlite3_session
+**
+** If argument zTab is not NULL, then it is the name of a table to attach
+** to the session object passed as the first argument. All subsequent changes
+** made to the table while the session object is enabled will be recorded. See
+** documentation for [sqlite3session_changeset()] for further details.
+**
+** Or, if argument zTab is NULL, then changes are recorded for all tables
+** in the database. If additional tables are added to the database (by
+** executing "CREATE TABLE" statements) after this call is made, changes for
+** the new tables are also recorded.
+**
+** Changes can only be recorded for tables that have a PRIMARY KEY explicitly
+** defined as part of their CREATE TABLE statement. It does not matter if the
+** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY
+** KEY may consist of a single column, or may be a composite key.
+**
+** It is not an error if the named table does not exist in the database. Nor
+** is it an error if the named table does not have a PRIMARY KEY. However,
+** no changes will be recorded in either of these scenarios.
+**
+** Changes are not recorded for individual rows that have NULL values stored
+** in one or more of their PRIMARY KEY columns.
+**
+** SQLITE_OK is returned if the call completes without error. Or, if an error
+** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
+**
+** <h3>Special sqlite_stat1 Handling</h3>
+**
+** As of SQLite version 3.22.0, the "sqlite_stat1" table is an exception to
+** some of the rules above. In SQLite, the schema of sqlite_stat1 is:
+** <pre>
+** &nbsp; CREATE TABLE sqlite_stat1(tbl,idx,stat)
+** </pre>
+**
+** Even though sqlite_stat1 does not have a PRIMARY KEY, changes are
+** recorded for it as if the PRIMARY KEY is (tbl,idx). Additionally, changes
+** are recorded for rows for which (idx IS NULL) is true. However, for such
+** rows a zero-length blob (SQL value X'') is stored in the changeset or
+** patchset instead of a NULL value. This allows such changesets to be
+** manipulated by legacy implementations of sqlite3changeset_invert(),
+** concat() and similar.
+**
+** The sqlite3changeset_apply() function automatically converts the
+** zero-length blob back to a NULL value when updating the sqlite_stat1
+** table. However, if the application calls sqlite3changeset_new(),
+** sqlite3changeset_old() or sqlite3changeset_conflict on a changeset
+** iterator directly (including on a changeset iterator passed to a
+** conflict-handler callback) then the X'' value is returned. The application
+** must translate X'' to NULL itself if required.
+**
+** Legacy (older than 3.22.0) versions of the sessions module cannot capture
+** changes made to the sqlite_stat1 table. Legacy versions of the
+** sqlite3changeset_apply() function silently ignore any modifications to the
+** sqlite_stat1 table that are part of a changeset or patchset.
+*/
+SQLITE_API int sqlite3session_attach(
+ sqlite3_session *pSession, /* Session object */
+ const char *zTab /* Table name */
+);
+
+/*
+** CAPI3REF: Set a table filter on a Session Object.
+** METHOD: sqlite3_session
+**
+** The second argument (xFilter) is the "filter callback". For changes to rows
+** in tables that are not attached to the Session object, the filter is called
+** to determine whether changes to the table's rows should be tracked or not.
+** If xFilter returns 0, changes are not tracked. Note that once a table is
+** attached, xFilter will not be called again.
+*/
+SQLITE_API void sqlite3session_table_filter(
+ sqlite3_session *pSession, /* Session object */
+ int(*xFilter)(
+ void *pCtx, /* Copy of third arg to _filter_table() */
+ const char *zTab /* Table name */
+ ),
+ void *pCtx /* First argument passed to xFilter */
+);
+
+/*
+** CAPI3REF: Generate A Changeset From A Session Object
+** METHOD: sqlite3_session
+**
+** Obtain a changeset containing changes to the tables attached to the
+** session object passed as the first argument. If successful,
+** set *ppChangeset to point to a buffer containing the changeset
+** and *pnChangeset to the size of the changeset in bytes before returning
+** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to
+** zero and return an SQLite error code.
+**
+** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes,
+** each representing a change to a single row of an attached table. An INSERT
+** change contains the values of each field of a new database row. A DELETE
+** contains the original values of each field of a deleted database row. An
+** UPDATE change contains the original values of each field of an updated
+** database row along with the updated values for each updated non-primary-key
+** column. It is not possible for an UPDATE change to represent a change that
+** modifies the values of primary key columns. If such a change is made, it
+** is represented in a changeset as a DELETE followed by an INSERT.
+**
+** Changes are not recorded for rows that have NULL values stored in one or
+** more of their PRIMARY KEY columns. If such a row is inserted or deleted,
+** no corresponding change is present in the changesets returned by this
+** function. If an existing row with one or more NULL values stored in
+** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL,
+** only an INSERT is appears in the changeset. Similarly, if an existing row
+** with non-NULL PRIMARY KEY values is updated so that one or more of its
+** PRIMARY KEY columns are set to NULL, the resulting changeset contains a
+** DELETE change only.
+**
+** The contents of a changeset may be traversed using an iterator created
+** using the [sqlite3changeset_start()] API. A changeset may be applied to
+** a database with a compatible schema using the [sqlite3changeset_apply()]
+** API.
+**
+** Within a changeset generated by this function, all changes related to a
+** single table are grouped together. In other words, when iterating through
+** a changeset or when applying a changeset to a database, all changes related
+** to a single table are processed before moving on to the next table. Tables
+** are sorted in the same order in which they were attached (or auto-attached)
+** to the sqlite3_session object. The order in which the changes related to
+** a single table are stored is undefined.
+**
+** Following a successful call to this function, it is the responsibility of
+** the caller to eventually free the buffer that *ppChangeset points to using
+** [sqlite3_free()].
+**
+** <h3>Changeset Generation</h3>
+**
+** Once a table has been attached to a session object, the session object
+** records the primary key values of all new rows inserted into the table.
+** It also records the original primary key and other column values of any
+** deleted or updated rows. For each unique primary key value, data is only
+** recorded once - the first time a row with said primary key is inserted,
+** updated or deleted in the lifetime of the session.
+**
+** There is one exception to the previous paragraph: when a row is inserted,
+** updated or deleted, if one or more of its primary key columns contain a
+** NULL value, no record of the change is made.
+**
+** The session object therefore accumulates two types of records - those
+** that consist of primary key values only (created when the user inserts
+** a new record) and those that consist of the primary key values and the
+** original values of other table columns (created when the users deletes
+** or updates a record).
+**
+** When this function is called, the requested changeset is created using
+** both the accumulated records and the current contents of the database
+** file. Specifically:
+**
+** <ul>
+** <li> For each record generated by an insert, the database is queried
+** for a row with a matching primary key. If one is found, an INSERT
+** change is added to the changeset. If no such row is found, no change
+** is added to the changeset.
+**
+** <li> For each record generated by an update or delete, the database is
+** queried for a row with a matching primary key. If such a row is
+** found and one or more of the non-primary key fields have been
+** modified from their original values, an UPDATE change is added to
+** the changeset. Or, if no such row is found in the table, a DELETE
+** change is added to the changeset. If there is a row with a matching
+** primary key in the database, but all fields contain their original
+** values, no change is added to the changeset.
+** </ul>
+**
+** This means, amongst other things, that if a row is inserted and then later
+** deleted while a session object is active, neither the insert nor the delete
+** will be present in the changeset. Or if a row is deleted and then later a
+** row with the same primary key values inserted while a session object is
+** active, the resulting changeset will contain an UPDATE change instead of
+** a DELETE and an INSERT.
+**
+** When a session object is disabled (see the [sqlite3session_enable()] API),
+** it does not accumulate records when rows are inserted, updated or deleted.
+** This may appear to have some counter-intuitive effects if a single row
+** is written to more than once during a session. For example, if a row
+** is inserted while a session object is enabled, then later deleted while
+** the same session object is disabled, no INSERT record will appear in the
+** changeset, even though the delete took place while the session was disabled.
+** Or, if one field of a row is updated while a session is disabled, and
+** another field of the same row is updated while the session is enabled, the
+** resulting changeset will contain an UPDATE change that updates both fields.
+*/
+SQLITE_API int sqlite3session_changeset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
+ void **ppChangeset /* OUT: Buffer containing changeset */
+);
+
+/*
+** CAPI3REF: Load The Difference Between Tables Into A Session
+** METHOD: sqlite3_session
+**
+** If it is not already attached to the session object passed as the first
+** argument, this function attaches table zTbl in the same manner as the
+** [sqlite3session_attach()] function. If zTbl does not exist, or if it
+** does not have a primary key, this function is a no-op (but does not return
+** an error).
+**
+** Argument zFromDb must be the name of a database ("main", "temp" etc.)
+** attached to the same database handle as the session object that contains
+** a table compatible with the table attached to the session by this function.
+** A table is considered compatible if it:
+**
+** <ul>
+** <li> Has the same name,
+** <li> Has the same set of columns declared in the same order, and
+** <li> Has the same PRIMARY KEY definition.
+** </ul>
+**
+** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables
+** are compatible but do not have any PRIMARY KEY columns, it is not an error
+** but no changes are added to the session object. As with other session
+** APIs, tables without PRIMARY KEYs are simply ignored.
+**
+** This function adds a set of changes to the session object that could be
+** used to update the table in database zFrom (call this the "from-table")
+** so that its content is the same as the table attached to the session
+** object (call this the "to-table"). Specifically:
+**
+** <ul>
+** <li> For each row (primary key) that exists in the to-table but not in
+** the from-table, an INSERT record is added to the session object.
+**
+** <li> For each row (primary key) that exists in the to-table but not in
+** the from-table, a DELETE record is added to the session object.
+**
+** <li> For each row (primary key) that exists in both tables, but features
+** different non-PK values in each, an UPDATE record is added to the
+** session.
+** </ul>
+**
+** To clarify, if this function is called and then a changeset constructed
+** using [sqlite3session_changeset()], then after applying that changeset to
+** database zFrom the contents of the two compatible tables would be
+** identical.
+**
+** It an error if database zFrom does not exist or does not contain the
+** required compatible table.
+**
+** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite
+** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg
+** may be set to point to a buffer containing an English language error
+** message. It is the responsibility of the caller to free this buffer using
+** sqlite3_free().
+*/
+SQLITE_API int sqlite3session_diff(
+ sqlite3_session *pSession,
+ const char *zFromDb,
+ const char *zTbl,
+ char **pzErrMsg
+);
+
+
+/*
+** CAPI3REF: Generate A Patchset From A Session Object
+** METHOD: sqlite3_session
+**
+** The differences between a patchset and a changeset are that:
+**
+** <ul>
+** <li> DELETE records consist of the primary key fields only. The
+** original values of other fields are omitted.
+** <li> The original values of any modified fields are omitted from
+** UPDATE records.
+** </ul>
+**
+** A patchset blob may be used with up to date versions of all
+** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(),
+** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly,
+** attempting to use a patchset blob with old versions of the
+** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error.
+**
+** Because the non-primary key "old.*" fields are omitted, no
+** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset
+** is passed to the sqlite3changeset_apply() API. Other conflict types work
+** in the same way as for changesets.
+**
+** Changes within a patchset are ordered in the same way as for changesets
+** generated by the sqlite3session_changeset() function (i.e. all changes for
+** a single table are grouped together, tables appear in the order in which
+** they were attached to the session object).
+*/
+SQLITE_API int sqlite3session_patchset(
+ sqlite3_session *pSession, /* Session object */
+ int *pnPatchset, /* OUT: Size of buffer at *ppPatchset */
+ void **ppPatchset /* OUT: Buffer containing patchset */
+);
+
+/*
+** CAPI3REF: Test if a changeset has recorded any changes.
+**
+** Return non-zero if no changes to attached tables have been recorded by
+** the session object passed as the first argument. Otherwise, if one or
+** more changes have been recorded, return zero.
+**
+** Even if this function returns zero, it is possible that calling
+** [sqlite3session_changeset()] on the session handle may still return a
+** changeset that contains no changes. This can happen when a row in
+** an attached table is modified and then later on the original values
+** are restored. However, if this function returns non-zero, then it is
+** guaranteed that a call to sqlite3session_changeset() will return a
+** changeset containing zero changes.
+*/
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
+
+/*
+** CAPI3REF: Create An Iterator To Traverse A Changeset
+** CONSTRUCTOR: sqlite3_changeset_iter
+**
+** Create an iterator used to iterate through the contents of a changeset.
+** If successful, *pp is set to point to the iterator handle and SQLITE_OK
+** is returned. Otherwise, if an error occurs, *pp is set to zero and an
+** SQLite error code is returned.
+**
+** The following functions can be used to advance and query a changeset
+** iterator created by this function:
+**
+** <ul>
+** <li> [sqlite3changeset_next()]
+** <li> [sqlite3changeset_op()]
+** <li> [sqlite3changeset_new()]
+** <li> [sqlite3changeset_old()]
+** </ul>
+**
+** It is the responsibility of the caller to eventually destroy the iterator
+** by passing it to [sqlite3changeset_finalize()]. The buffer containing the
+** changeset (pChangeset) must remain valid until after the iterator is
+** destroyed.
+**
+** Assuming the changeset blob was created by one of the
+** [sqlite3session_changeset()], [sqlite3changeset_concat()] or
+** [sqlite3changeset_invert()] functions, all changes within the changeset
+** that apply to a single table are grouped together. This means that when
+** an application iterates through a changeset using an iterator created by
+** this function, all changes that relate to a single table are visited
+** consecutively. There is no chance that the iterator will visit a change
+** the applies to table X, then one for table Y, and then later on visit
+** another change for table X.
+**
+** The behavior of sqlite3changeset_start_v2() and its streaming equivalent
+** may be modified by passing a combination of
+** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter.
+**
+** Note that the sqlite3changeset_start_v2() API is still <b>experimental</b>
+** and therefore subject to change.
+*/
+SQLITE_API int sqlite3changeset_start(
+ sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
+ int nChangeset, /* Size of changeset blob in bytes */
+ void *pChangeset /* Pointer to blob containing changeset */
+);
+SQLITE_API int sqlite3changeset_start_v2(
+ sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
+ int nChangeset, /* Size of changeset blob in bytes */
+ void *pChangeset, /* Pointer to blob containing changeset */
+ int flags /* SESSION_CHANGESETSTART_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3changeset_start_v2
+**
+** The following flags may passed via the 4th parameter to
+** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]:
+**
+** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
+** Invert the changeset while iterating through it. This is equivalent to
+** inverting a changeset using sqlite3changeset_invert() before applying it.
+** It is an error to specify this flag with a patchset.
+*/
+#define SQLITE_CHANGESETSTART_INVERT 0x0002
+
+
+/*
+** CAPI3REF: Advance A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function may only be used with iterators created by the function
+** [sqlite3changeset_start()]. If it is called on an iterator passed to
+** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE
+** is returned and the call has no effect.
+**
+** Immediately after an iterator is created by sqlite3changeset_start(), it
+** does not point to any change in the changeset. Assuming the changeset
+** is not empty, the first call to this function advances the iterator to
+** point to the first change in the changeset. Each subsequent call advances
+** the iterator to point to the next change in the changeset (if any). If
+** no error occurs and the iterator points to a valid change after a call
+** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned.
+** Otherwise, if all changes in the changeset have already been visited,
+** SQLITE_DONE is returned.
+**
+** If an error occurs, an SQLite error code is returned. Possible error
+** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or
+** SQLITE_NOMEM.
+*/
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
+
+/*
+** CAPI3REF: Obtain The Current Operation From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this
+** is not the case, this function returns [SQLITE_MISUSE].
+**
+** If argument pzTab is not NULL, then *pzTab is set to point to a
+** nul-terminated utf-8 encoded string containing the name of the table
+** affected by the current change. The buffer remains valid until either
+** sqlite3changeset_next() is called on the iterator or until the
+** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
+** set to the number of columns in the table affected by the change. If
+** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
+** is an indirect change, or false (0) otherwise. See the documentation for
+** [sqlite3session_indirect()] for a description of direct and indirect
+** changes. Finally, if pOp is not NULL, then *pOp is set to one of
+** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the
+** type of change that the iterator currently points to.
+**
+** If no error occurs, SQLITE_OK is returned. If an error does occur, an
+** SQLite error code is returned. The values of the output variables may not
+** be trusted in this case.
+*/
+SQLITE_API int sqlite3changeset_op(
+ sqlite3_changeset_iter *pIter, /* Iterator object */
+ const char **pzTab, /* OUT: Pointer to table name */
+ int *pnCol, /* OUT: Number of columns in table */
+ int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */
+ int *pbIndirect /* OUT: True for an 'indirect' change */
+);
+
+/*
+** CAPI3REF: Obtain The Primary Key Definition Of A Table
+** METHOD: sqlite3_changeset_iter
+**
+** For each modified table, a changeset includes the following:
+**
+** <ul>
+** <li> The number of columns in the table, and
+** <li> Which of those columns make up the tables PRIMARY KEY.
+** </ul>
+**
+** This function is used to find which columns comprise the PRIMARY KEY of
+** the table modified by the change that iterator pIter currently points to.
+** If successful, *pabPK is set to point to an array of nCol entries, where
+** nCol is the number of columns in the table. Elements of *pabPK are set to
+** 0x01 if the corresponding column is part of the tables primary key, or
+** 0x00 if it is not.
+**
+** If argument pnCol is not NULL, then *pnCol is set to the number of columns
+** in the table.
+**
+** If this function is called when the iterator does not point to a valid
+** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise,
+** SQLITE_OK is returned and the output variables populated as described
+** above.
+*/
+SQLITE_API int sqlite3changeset_pk(
+ sqlite3_changeset_iter *pIter, /* Iterator object */
+ unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
+ int *pnCol /* OUT: Number of entries in output array */
+);
+
+/*
+** CAPI3REF: Obtain old.* Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
+** Furthermore, it may only be called if the type of change that the iterator
+** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise,
+** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the vector of
+** original row values stored as part of the UPDATE or DELETE change and
+** returns SQLITE_OK. The name of the function comes from the fact that this
+** is similar to the "old.*" columns available to update or delete triggers.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_old(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
+);
+
+/*
+** CAPI3REF: Obtain new.* Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** The pIter argument passed to this function may either be an iterator
+** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
+** created by [sqlite3changeset_start()]. In the latter case, the most recent
+** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
+** Furthermore, it may only be called if the type of change that the iterator
+** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise,
+** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the vector of
+** new row values stored as part of the UPDATE or INSERT change and
+** returns SQLITE_OK. If the change is an UPDATE and does not include
+** a new value for the requested column, *ppValue is set to NULL and
+** SQLITE_OK returned. The name of the function comes from the fact that
+** this is similar to the "new.*" columns available to update or delete
+** triggers.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_new(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
+);
+
+/*
+** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function should only be used with iterator objects passed to a
+** conflict-handler callback by [sqlite3changeset_apply()] with either
+** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function
+** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue
+** is set to NULL.
+**
+** Argument iVal must be greater than or equal to 0, and less than the number
+** of columns in the table affected by the current change. Otherwise,
+** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
+**
+** If successful, this function sets *ppValue to point to a protected
+** sqlite3_value object containing the iVal'th value from the
+** "conflicting row" associated with the current conflict-handler callback
+** and returns SQLITE_OK.
+**
+** If some other error occurs (e.g. an OOM condition), an SQLite error code
+** is returned and *ppValue is set to NULL.
+*/
+SQLITE_API int sqlite3changeset_conflict(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int iVal, /* Column number */
+ sqlite3_value **ppValue /* OUT: Value from conflicting row */
+);
+
+/*
+** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations
+** METHOD: sqlite3_changeset_iter
+**
+** This function may only be called with an iterator passed to an
+** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case
+** it sets the output variable to the total number of known foreign key
+** violations in the destination database and returns SQLITE_OK.
+**
+** In all other cases this function returns SQLITE_MISUSE.
+*/
+SQLITE_API int sqlite3changeset_fk_conflicts(
+ sqlite3_changeset_iter *pIter, /* Changeset iterator */
+ int *pnOut /* OUT: Number of FK violations */
+);
+
+
+/*
+** CAPI3REF: Finalize A Changeset Iterator
+** METHOD: sqlite3_changeset_iter
+**
+** This function is used to finalize an iterator allocated with
+** [sqlite3changeset_start()].
+**
+** This function should only be called on iterators created using the
+** [sqlite3changeset_start()] function. If an application calls this
+** function with an iterator passed to a conflict-handler by
+** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the
+** call has no effect.
+**
+** If an error was encountered within a call to an sqlite3changeset_xxx()
+** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an
+** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding
+** to that error is returned by this function. Otherwise, SQLITE_OK is
+** returned. This is to allow the following pattern (pseudo-code):
+**
+** <pre>
+** sqlite3changeset_start();
+** while( SQLITE_ROW==sqlite3changeset_next() ){
+** // Do something with change.
+** }
+** rc = sqlite3changeset_finalize();
+** if( rc!=SQLITE_OK ){
+** // An error has occurred
+** }
+** </pre>
+*/
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
+
+/*
+** CAPI3REF: Invert A Changeset
+**
+** This function is used to "invert" a changeset object. Applying an inverted
+** changeset to a database reverses the effects of applying the uninverted
+** changeset. Specifically:
+**
+** <ul>
+** <li> Each DELETE change is changed to an INSERT, and
+** <li> Each INSERT change is changed to a DELETE, and
+** <li> For each UPDATE change, the old.* and new.* values are exchanged.
+** </ul>
+**
+** This function does not change the order in which changes appear within
+** the changeset. It merely reverses the sense of each individual change.
+**
+** If successful, a pointer to a buffer containing the inverted changeset
+** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and
+** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are
+** zeroed and an SQLite error code returned.
+**
+** It is the responsibility of the caller to eventually call sqlite3_free()
+** on the *ppOut pointer to free the buffer allocation following a successful
+** call to this function.
+**
+** WARNING/TODO: This function currently assumes that the input is a valid
+** changeset. If it is not, the results are undefined.
+*/
+SQLITE_API int sqlite3changeset_invert(
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+/*
+** CAPI3REF: Concatenate Two Changeset Objects
+**
+** This function is used to concatenate two changesets, A and B, into a
+** single changeset. The result is a changeset equivalent to applying
+** changeset A followed by changeset B.
+**
+** This function combines the two input changesets using an
+** sqlite3_changegroup object. Calling it produces similar results as the
+** following code fragment:
+**
+** <pre>
+** sqlite3_changegroup *pGrp;
+** rc = sqlite3_changegroup_new(&pGrp);
+** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
+** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB);
+** if( rc==SQLITE_OK ){
+** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut);
+** }else{
+** *ppOut = 0;
+** *pnOut = 0;
+** }
+** </pre>
+**
+** Refer to the sqlite3_changegroup documentation below for details.
+*/
+SQLITE_API int sqlite3changeset_concat(
+ int nA, /* Number of bytes in buffer pA */
+ void *pA, /* Pointer to buffer containing changeset A */
+ int nB, /* Number of bytes in buffer pB */
+ void *pB, /* Pointer to buffer containing changeset B */
+ int *pnOut, /* OUT: Number of bytes in output changeset */
+ void **ppOut /* OUT: Buffer containing output changeset */
+);
+
+
+/*
+** CAPI3REF: Changegroup Handle
+**
+** A changegroup is an object used to combine two or more
+** [changesets] or [patchsets]
+*/
+typedef struct sqlite3_changegroup sqlite3_changegroup;
+
+/*
+** CAPI3REF: Create A New Changegroup Object
+** CONSTRUCTOR: sqlite3_changegroup
+**
+** An sqlite3_changegroup object is used to combine two or more changesets
+** (or patchsets) into a single changeset (or patchset). A single changegroup
+** object may combine changesets or patchsets, but not both. The output is
+** always in the same format as the input.
+**
+** If successful, this function returns SQLITE_OK and populates (*pp) with
+** a pointer to a new sqlite3_changegroup object before returning. The caller
+** should eventually free the returned object using a call to
+** sqlite3changegroup_delete(). If an error occurs, an SQLite error code
+** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL.
+**
+** The usual usage pattern for an sqlite3_changegroup object is as follows:
+**
+** <ul>
+** <li> It is created using a call to sqlite3changegroup_new().
+**
+** <li> Zero or more changesets (or patchsets) are added to the object
+** by calling sqlite3changegroup_add().
+**
+** <li> The result of combining all input changesets together is obtained
+** by the application via a call to sqlite3changegroup_output().
+**
+** <li> The object is deleted using a call to sqlite3changegroup_delete().
+** </ul>
+**
+** Any number of calls to add() and output() may be made between the calls to
+** new() and delete(), and in any order.
+**
+** As well as the regular sqlite3changegroup_add() and
+** sqlite3changegroup_output() functions, also available are the streaming
+** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm().
+*/
+SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
+
+/*
+** CAPI3REF: Add A Changeset To A Changegroup
+** METHOD: sqlite3_changegroup
+**
+** Add all changes within the changeset (or patchset) in buffer pData (size
+** nData bytes) to the changegroup.
+**
+** If the buffer contains a patchset, then all prior calls to this function
+** on the same changegroup object must also have specified patchsets. Or, if
+** the buffer contains a changeset, so must have the earlier calls to this
+** function. Otherwise, SQLITE_ERROR is returned and no changes are added
+** to the changegroup.
+**
+** Rows within the changeset and changegroup are identified by the values in
+** their PRIMARY KEY columns. A change in the changeset is considered to
+** apply to the same row as a change already present in the changegroup if
+** the two rows have the same primary key.
+**
+** Changes to rows that do not already appear in the changegroup are
+** simply copied into it. Or, if both the new changeset and the changegroup
+** contain changes that apply to a single row, the final contents of the
+** changegroup depends on the type of each change, as follows:
+**
+** <table border=1 style="margin-left:8ex;margin-right:8ex">
+** <tr><th style="white-space:pre">Existing Change </th>
+** <th style="white-space:pre">New Change </th>
+** <th>Output Change
+** <tr><td>INSERT <td>INSERT <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>INSERT <td>UPDATE <td>
+** The INSERT change remains in the changegroup. The values in the
+** INSERT change are modified as if the row was inserted by the
+** existing change and then updated according to the new change.
+** <tr><td>INSERT <td>DELETE <td>
+** The existing INSERT is removed from the changegroup. The DELETE is
+** not added.
+** <tr><td>UPDATE <td>INSERT <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>UPDATE <td>UPDATE <td>
+** The existing UPDATE remains within the changegroup. It is amended
+** so that the accompanying values are as if the row was updated once
+** by the existing change and then again by the new change.
+** <tr><td>UPDATE <td>DELETE <td>
+** The existing UPDATE is replaced by the new DELETE within the
+** changegroup.
+** <tr><td>DELETE <td>INSERT <td>
+** If one or more of the column values in the row inserted by the
+** new change differ from those in the row deleted by the existing
+** change, the existing DELETE is replaced by an UPDATE within the
+** changegroup. Otherwise, if the inserted row is exactly the same
+** as the deleted row, the existing DELETE is simply discarded.
+** <tr><td>DELETE <td>UPDATE <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** <tr><td>DELETE <td>DELETE <td>
+** The new change is ignored. This case does not occur if the new
+** changeset was recorded immediately after the changesets already
+** added to the changegroup.
+** </table>
+**
+** If the new changeset contains changes to a table that is already present
+** in the changegroup, then the number of columns and the position of the
+** primary key columns for the table must be consistent. If this is not the
+** case, this function fails with SQLITE_SCHEMA. If the input changeset
+** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
+** returned. Or, if an out-of-memory condition occurs during processing, this
+** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
+** of the final contents of the changegroup is undefined.
+**
+** If no error occurs, SQLITE_OK is returned.
+*/
+SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
+
+/*
+** CAPI3REF: Obtain A Composite Changeset From A Changegroup
+** METHOD: sqlite3_changegroup
+**
+** Obtain a buffer containing a changeset (or patchset) representing the
+** current contents of the changegroup. If the inputs to the changegroup
+** were themselves changesets, the output is a changeset. Or, if the
+** inputs were patchsets, the output is also a patchset.
+**
+** As with the output of the sqlite3session_changeset() and
+** sqlite3session_patchset() functions, all changes related to a single
+** table are grouped together in the output of this function. Tables appear
+** in the same order as for the very first changeset added to the changegroup.
+** If the second or subsequent changesets added to the changegroup contain
+** changes for tables that do not appear in the first changeset, they are
+** appended onto the end of the output changeset, again in the order in
+** which they are first encountered.
+**
+** If an error occurs, an SQLite error code is returned and the output
+** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK
+** is returned and the output variables are set to the size of and a
+** pointer to the output buffer, respectively. In this case it is the
+** responsibility of the caller to eventually free the buffer using a
+** call to sqlite3_free().
+*/
+SQLITE_API int sqlite3changegroup_output(
+ sqlite3_changegroup*,
+ int *pnData, /* OUT: Size of output buffer in bytes */
+ void **ppData /* OUT: Pointer to output buffer */
+);
+
+/*
+** CAPI3REF: Delete A Changegroup Object
+** DESTRUCTOR: sqlite3_changegroup
+*/
+SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*);
+
+/*
+** CAPI3REF: Apply A Changeset To A Database
+**
+** Apply a changeset or patchset to a database. These functions attempt to
+** update the "main" database attached to handle db with the changes found in
+** the changeset passed via the second and third arguments.
+**
+** The fourth argument (xFilter) passed to these functions is the "filter
+** callback". If it is not NULL, then for each table affected by at least one
+** change in the changeset, the filter callback is invoked with
+** the table name as the second argument, and a copy of the context pointer
+** passed as the sixth argument as the first. If the "filter callback"
+** returns zero, then no attempt is made to apply any changes to the table.
+** Otherwise, if the return value is non-zero or the xFilter argument to
+** is NULL, all changes related to the table are attempted.
+**
+** For each table that is not excluded by the filter callback, this function
+** tests that the target database contains a compatible table. A table is
+** considered compatible if all of the following are true:
+**
+** <ul>
+** <li> The table has the same name as the name recorded in the
+** changeset, and
+** <li> The table has at least as many columns as recorded in the
+** changeset, and
+** <li> The table has primary key columns in the same position as
+** recorded in the changeset.
+** </ul>
+**
+** If there is no compatible table, it is not an error, but none of the
+** changes associated with the table are applied. A warning message is issued
+** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most
+** one such warning is issued for each table in the changeset.
+**
+** For each change for which there is a compatible table, an attempt is made
+** to modify the table contents according to the UPDATE, INSERT or DELETE
+** change. If a change cannot be applied cleanly, the conflict handler
+** function passed as the fifth argument to sqlite3changeset_apply() may be
+** invoked. A description of exactly when the conflict handler is invoked for
+** each type of change is below.
+**
+** Unlike the xFilter argument, xConflict may not be passed NULL. The results
+** of passing anything other than a valid function pointer as the xConflict
+** argument are undefined.
+**
+** Each time the conflict handler function is invoked, it must return one
+** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or
+** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned
+** if the second argument passed to the conflict handler is either
+** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler
+** returns an illegal value, any changes already made are rolled back and
+** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different
+** actions are taken by sqlite3changeset_apply() depending on the value
+** returned by each invocation of the conflict-handler function. Refer to
+** the documentation for the three
+** [SQLITE_CHANGESET_OMIT|available return values] for details.
+**
+** <dl>
+** <dt>DELETE Changes<dd>
+** For each DELETE change, the function checks if the target database
+** contains a row with the same primary key value (or values) as the
+** original row values stored in the changeset. If it does, and the values
+** stored in all non-primary key columns also match the values stored in
+** the changeset the row is deleted from the target database.
+**
+** If a row with matching primary key values is found, but one or more of
+** the non-primary key fields contains a value different from the original
+** row value stored in the changeset, the conflict-handler function is
+** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the
+** database table has more columns than are recorded in the changeset,
+** only the values of those non-primary key fields are compared against
+** the current database contents - any trailing database table columns
+** are ignored.
+**
+** If no row with matching primary key values is found in the database,
+** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
+** passed as the second argument.
+**
+** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT
+** (which can only happen if a foreign key constraint is violated), the
+** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT]
+** passed as the second argument. This includes the case where the DELETE
+** operation is attempted because an earlier call to the conflict handler
+** function returned [SQLITE_CHANGESET_REPLACE].
+**
+** <dt>INSERT Changes<dd>
+** For each INSERT change, an attempt is made to insert the new row into
+** the database. If the changeset row contains fewer fields than the
+** database table, the trailing fields are populated with their default
+** values.
+**
+** If the attempt to insert the row fails because the database already
+** contains a row with the same primary key values, the conflict handler
+** function is invoked with the second argument set to
+** [SQLITE_CHANGESET_CONFLICT].
+**
+** If the attempt to insert the row fails because of some other constraint
+** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is
+** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT].
+** This includes the case where the INSERT operation is re-attempted because
+** an earlier call to the conflict handler function returned
+** [SQLITE_CHANGESET_REPLACE].
+**
+** <dt>UPDATE Changes<dd>
+** For each UPDATE change, the function checks if the target database
+** contains a row with the same primary key value (or values) as the
+** original row values stored in the changeset. If it does, and the values
+** stored in all modified non-primary key columns also match the values
+** stored in the changeset the row is updated within the target database.
+**
+** If a row with matching primary key values is found, but one or more of
+** the modified non-primary key fields contains a value different from an
+** original row value stored in the changeset, the conflict-handler function
+** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
+** UPDATE changes only contain values for non-primary key fields that are
+** to be modified, only those fields need to match the original values to
+** avoid the SQLITE_CHANGESET_DATA conflict-handler callback.
+**
+** If no row with matching primary key values is found in the database,
+** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
+** passed as the second argument.
+**
+** If the UPDATE operation is attempted, but SQLite returns
+** SQLITE_CONSTRAINT, the conflict-handler function is invoked with
+** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument.
+** This includes the case where the UPDATE operation is attempted after
+** an earlier call to the conflict handler function returned
+** [SQLITE_CHANGESET_REPLACE].
+** </dl>
+**
+** It is safe to execute SQL statements, including those that write to the
+** table that the callback related to, from within the xConflict callback.
+** This can be used to further customize the application's conflict
+** resolution strategy.
+**
+** All changes made by these functions are enclosed in a savepoint transaction.
+** If any other error (aside from a constraint failure when attempting to
+** write to the target database) occurs, then the savepoint transaction is
+** rolled back, restoring the target database to its original state, and an
+** SQLite error code returned.
+**
+** If the output parameters (ppRebase) and (pnRebase) are non-NULL and
+** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2()
+** may set (*ppRebase) to point to a "rebase" that may be used with the
+** sqlite3_rebaser APIs buffer before returning. In this case (*pnRebase)
+** is set to the size of the buffer in bytes. It is the responsibility of the
+** caller to eventually free any such buffer using sqlite3_free(). The buffer
+** is only allocated and populated if one or more conflicts were encountered
+** while applying the patchset. See comments surrounding the sqlite3_rebaser
+** APIs for further details.
+**
+** The behavior of sqlite3changeset_apply_v2() and its streaming equivalent
+** may be modified by passing a combination of
+** [SQLITE_CHANGESETAPPLY_NOSAVEPOINT | supported flags] as the 9th parameter.
+**
+** Note that the sqlite3changeset_apply_v2() API is still <b>experimental</b>
+** and therefore subject to change.
+*/
+SQLITE_API int sqlite3changeset_apply(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+);
+SQLITE_API int sqlite3changeset_apply_v2(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int nChangeset, /* Size of changeset in bytes */
+ void *pChangeset, /* Changeset blob */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase, /* OUT: Rebase data */
+ int flags /* SESSION_CHANGESETAPPLY_* flags */
+);
+
+/*
+** CAPI3REF: Flags for sqlite3changeset_apply_v2
+**
+** The following flags may passed via the 9th parameter to
+** [sqlite3changeset_apply_v2] and [sqlite3changeset_apply_v2_strm]:
+**
+** <dl>
+** <dt>SQLITE_CHANGESETAPPLY_NOSAVEPOINT <dd>
+** Usually, the sessions module encloses all operations performed by
+** a single call to apply_v2() or apply_v2_strm() in a [SAVEPOINT]. The
+** SAVEPOINT is committed if the changeset or patchset is successfully
+** applied, or rolled back if an error occurs. Specifying this flag
+** causes the sessions module to omit this savepoint. In this case, if the
+** caller has an open transaction or savepoint when apply_v2() is called,
+** it may revert the partially applied changeset by rolling it back.
+**
+** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
+** Invert the changeset before applying it. This is equivalent to inverting
+** a changeset using sqlite3changeset_invert() before applying it. It is
+** an error to specify this flag with a patchset.
+*/
+#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
+#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
+
+/*
+** CAPI3REF: Constants Passed To The Conflict Handler
+**
+** Values that may be passed as the second argument to a conflict-handler.
+**
+** <dl>
+** <dt>SQLITE_CHANGESET_DATA<dd>
+** The conflict handler is invoked with CHANGESET_DATA as the second argument
+** when processing a DELETE or UPDATE change if a row with the required
+** PRIMARY KEY fields is present in the database, but one or more other
+** (non primary-key) fields modified by the update do not contain the
+** expected "before" values.
+**
+** The conflicting row, in this case, is the database row with the matching
+** primary key.
+**
+** <dt>SQLITE_CHANGESET_NOTFOUND<dd>
+** The conflict handler is invoked with CHANGESET_NOTFOUND as the second
+** argument when processing a DELETE or UPDATE change if a row with the
+** required PRIMARY KEY fields is not present in the database.
+**
+** There is no conflicting row in this case. The results of invoking the
+** sqlite3changeset_conflict() API are undefined.
+**
+** <dt>SQLITE_CHANGESET_CONFLICT<dd>
+** CHANGESET_CONFLICT is passed as the second argument to the conflict
+** handler while processing an INSERT change if the operation would result
+** in duplicate primary key values.
+**
+** The conflicting row in this case is the database row with the matching
+** primary key.
+**
+** <dt>SQLITE_CHANGESET_FOREIGN_KEY<dd>
+** If foreign key handling is enabled, and applying a changeset leaves the
+** database in a state containing foreign key violations, the conflict
+** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument
+** exactly once before the changeset is committed. If the conflict handler
+** returns CHANGESET_OMIT, the changes, including those that caused the
+** foreign key constraint violation, are committed. Or, if it returns
+** CHANGESET_ABORT, the changeset is rolled back.
+**
+** No current or conflicting row information is provided. The only function
+** it is possible to call on the supplied sqlite3_changeset_iter handle
+** is sqlite3changeset_fk_conflicts().
+**
+** <dt>SQLITE_CHANGESET_CONSTRAINT<dd>
+** If any other constraint violation occurs while applying a change (i.e.
+** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is
+** invoked with CHANGESET_CONSTRAINT as the second argument.
+**
+** There is no conflicting row in this case. The results of invoking the
+** sqlite3changeset_conflict() API are undefined.
+**
+** </dl>
+*/
+#define SQLITE_CHANGESET_DATA 1
+#define SQLITE_CHANGESET_NOTFOUND 2
+#define SQLITE_CHANGESET_CONFLICT 3
+#define SQLITE_CHANGESET_CONSTRAINT 4
+#define SQLITE_CHANGESET_FOREIGN_KEY 5
+
+/*
+** CAPI3REF: Constants Returned By The Conflict Handler
+**
+** A conflict handler callback must return one of the following three values.
+**
+** <dl>
+** <dt>SQLITE_CHANGESET_OMIT<dd>
+** If a conflict handler returns this value no special action is taken. The
+** change that caused the conflict is not applied. The session module
+** continues to the next change in the changeset.
+**
+** <dt>SQLITE_CHANGESET_REPLACE<dd>
+** This value may only be returned if the second argument to the conflict
+** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this
+** is not the case, any changes applied so far are rolled back and the
+** call to sqlite3changeset_apply() returns SQLITE_MISUSE.
+**
+** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict
+** handler, then the conflicting row is either updated or deleted, depending
+** on the type of change.
+**
+** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict
+** handler, then the conflicting row is removed from the database and a
+** second attempt to apply the change is made. If this second attempt fails,
+** the original row is restored to the database before continuing.
+**
+** <dt>SQLITE_CHANGESET_ABORT<dd>
+** If this value is returned, any changes applied so far are rolled back
+** and the call to sqlite3changeset_apply() returns SQLITE_ABORT.
+** </dl>
+*/
+#define SQLITE_CHANGESET_OMIT 0
+#define SQLITE_CHANGESET_REPLACE 1
+#define SQLITE_CHANGESET_ABORT 2
+
+/*
+** CAPI3REF: Rebasing changesets
+** EXPERIMENTAL
+**
+** Suppose there is a site hosting a database in state S0. And that
+** modifications are made that move that database to state S1 and a
+** changeset recorded (the "local" changeset). Then, a changeset based
+** on S0 is received from another site (the "remote" changeset) and
+** applied to the database. The database is then in state
+** (S1+"remote"), where the exact state depends on any conflict
+** resolution decisions (OMIT or REPLACE) made while applying "remote".
+** Rebasing a changeset is to update it to take those conflict
+** resolution decisions into account, so that the same conflicts
+** do not have to be resolved elsewhere in the network.
+**
+** For example, if both the local and remote changesets contain an
+** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)":
+**
+** local: INSERT INTO t1 VALUES(1, 'v1');
+** remote: INSERT INTO t1 VALUES(1, 'v2');
+**
+** and the conflict resolution is REPLACE, then the INSERT change is
+** removed from the local changeset (it was overridden). Or, if the
+** conflict resolution was "OMIT", then the local changeset is modified
+** to instead contain:
+**
+** UPDATE t1 SET b = 'v2' WHERE a=1;
+**
+** Changes within the local changeset are rebased as follows:
+**
+** <dl>
+** <dt>Local INSERT<dd>
+** This may only conflict with a remote INSERT. If the conflict
+** resolution was OMIT, then add an UPDATE change to the rebased
+** changeset. Or, if the conflict resolution was REPLACE, add
+** nothing to the rebased changeset.
+**
+** <dt>Local DELETE<dd>
+** This may conflict with a remote UPDATE or DELETE. In both cases the
+** only possible resolution is OMIT. If the remote operation was a
+** DELETE, then add no change to the rebased changeset. If the remote
+** operation was an UPDATE, then the old.* fields of change are updated
+** to reflect the new.* values in the UPDATE.
+**
+** <dt>Local UPDATE<dd>
+** This may conflict with a remote UPDATE or DELETE. If it conflicts
+** with a DELETE, and the conflict resolution was OMIT, then the update
+** is changed into an INSERT. Any undefined values in the new.* record
+** from the update change are filled in using the old.* values from
+** the conflicting DELETE. Or, if the conflict resolution was REPLACE,
+** the UPDATE change is simply omitted from the rebased changeset.
+**
+** If conflict is with a remote UPDATE and the resolution is OMIT, then
+** the old.* values are rebased using the new.* values in the remote
+** change. Or, if the resolution is REPLACE, then the change is copied
+** into the rebased changeset with updates to columns also updated by
+** the conflicting remote UPDATE removed. If this means no columns would
+** be updated, the change is omitted.
+** </dl>
+**
+** A local change may be rebased against multiple remote changes
+** simultaneously. If a single key is modified by multiple remote
+** changesets, they are combined as follows before the local changeset
+** is rebased:
+**
+** <ul>
+** <li> If there has been one or more REPLACE resolutions on a
+** key, it is rebased according to a REPLACE.
+**
+** <li> If there have been no REPLACE resolutions on a key, then
+** the local changeset is rebased according to the most recent
+** of the OMIT resolutions.
+** </ul>
+**
+** Note that conflict resolutions from multiple remote changesets are
+** combined on a per-field basis, not per-row. This means that in the
+** case of multiple remote UPDATE operations, some fields of a single
+** local change may be rebased for REPLACE while others are rebased for
+** OMIT.
+**
+** In order to rebase a local changeset, the remote changeset must first
+** be applied to the local database using sqlite3changeset_apply_v2() and
+** the buffer of rebase information captured. Then:
+**
+** <ol>
+** <li> An sqlite3_rebaser object is created by calling
+** sqlite3rebaser_create().
+** <li> The new object is configured with the rebase buffer obtained from
+** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure().
+** If the local changeset is to be rebased against multiple remote
+** changesets, then sqlite3rebaser_configure() should be called
+** multiple times, in the same order that the multiple
+** sqlite3changeset_apply_v2() calls were made.
+** <li> Each local changeset is rebased by calling sqlite3rebaser_rebase().
+** <li> The sqlite3_rebaser object is deleted by calling
+** sqlite3rebaser_delete().
+** </ol>
+*/
+typedef struct sqlite3_rebaser sqlite3_rebaser;
+
+/*
+** CAPI3REF: Create a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Allocate a new changeset rebaser object. If successful, set (*ppNew) to
+** point to the new object and return SQLITE_OK. Otherwise, if an error
+** occurs, return an SQLite error code (e.g. SQLITE_NOMEM) and set (*ppNew)
+** to NULL.
+*/
+SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew);
+
+/*
+** CAPI3REF: Configure a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Configure the changeset rebaser object to rebase changesets according
+** to the conflict resolutions described by buffer pRebase (size nRebase
+** bytes), which must have been obtained from a previous call to
+** sqlite3changeset_apply_v2().
+*/
+SQLITE_API int sqlite3rebaser_configure(
+ sqlite3_rebaser*,
+ int nRebase, const void *pRebase
+);
+
+/*
+** CAPI3REF: Rebase a changeset
+** EXPERIMENTAL
+**
+** Argument pIn must point to a buffer containing a changeset nIn bytes
+** in size. This function allocates and populates a buffer with a copy
+** of the changeset rebased according to the configuration of the
+** rebaser object passed as the first argument. If successful, (*ppOut)
+** is set to point to the new buffer containing the rebased changeset and
+** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the
+** responsibility of the caller to eventually free the new buffer using
+** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut)
+** are set to zero and an SQLite error code returned.
+*/
+SQLITE_API int sqlite3rebaser_rebase(
+ sqlite3_rebaser*,
+ int nIn, const void *pIn,
+ int *pnOut, void **ppOut
+);
+
+/*
+** CAPI3REF: Delete a changeset rebaser object.
+** EXPERIMENTAL
+**
+** Delete the changeset rebaser object and all associated resources. There
+** should be one call to this function for each successful invocation
+** of sqlite3rebaser_create().
+*/
+SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p);
+
+/*
+** CAPI3REF: Streaming Versions of API functions.
+**
+** The six streaming API xxx_strm() functions serve similar purposes to the
+** corresponding non-streaming API functions:
+**
+** <table border=1 style="margin-left:8ex;margin-right:8ex">
+** <tr><th>Streaming function<th>Non-streaming equivalent</th>
+** <tr><td>sqlite3changeset_apply_strm<td>[sqlite3changeset_apply]
+** <tr><td>sqlite3changeset_apply_strm_v2<td>[sqlite3changeset_apply_v2]
+** <tr><td>sqlite3changeset_concat_strm<td>[sqlite3changeset_concat]
+** <tr><td>sqlite3changeset_invert_strm<td>[sqlite3changeset_invert]
+** <tr><td>sqlite3changeset_start_strm<td>[sqlite3changeset_start]
+** <tr><td>sqlite3session_changeset_strm<td>[sqlite3session_changeset]
+** <tr><td>sqlite3session_patchset_strm<td>[sqlite3session_patchset]
+** </table>
+**
+** Non-streaming functions that accept changesets (or patchsets) as input
+** require that the entire changeset be stored in a single buffer in memory.
+** Similarly, those that return a changeset or patchset do so by returning
+** a pointer to a single large buffer allocated using sqlite3_malloc().
+** Normally this is convenient. However, if an application running in a
+** low-memory environment is required to handle very large changesets, the
+** large contiguous memory allocations required can become onerous.
+**
+** In order to avoid this problem, instead of a single large buffer, input
+** is passed to a streaming API functions by way of a callback function that
+** the sessions module invokes to incrementally request input data as it is
+** required. In all cases, a pair of API function parameters such as
+**
+** <pre>
+** &nbsp; int nChangeset,
+** &nbsp; void *pChangeset,
+** </pre>
+**
+** Is replaced by:
+**
+** <pre>
+** &nbsp; int (*xInput)(void *pIn, void *pData, int *pnData),
+** &nbsp; void *pIn,
+** </pre>
+**
+** Each time the xInput callback is invoked by the sessions module, the first
+** argument passed is a copy of the supplied pIn context pointer. The second
+** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no
+** error occurs the xInput method should copy up to (*pnData) bytes of data
+** into the buffer and set (*pnData) to the actual number of bytes copied
+** before returning SQLITE_OK. If the input is completely exhausted, (*pnData)
+** should be set to zero to indicate this. Or, if an error occurs, an SQLite
+** error code should be returned. In all cases, if an xInput callback returns
+** an error, all processing is abandoned and the streaming API function
+** returns a copy of the error code to the caller.
+**
+** In the case of sqlite3changeset_start_strm(), the xInput callback may be
+** invoked by the sessions module at any point during the lifetime of the
+** iterator. If such an xInput callback returns an error, the iterator enters
+** an error state, whereby all subsequent calls to iterator functions
+** immediately fail with the same error code as returned by xInput.
+**
+** Similarly, streaming API functions that return changesets (or patchsets)
+** return them in chunks by way of a callback function instead of via a
+** pointer to a single large buffer. In this case, a pair of parameters such
+** as:
+**
+** <pre>
+** &nbsp; int *pnChangeset,
+** &nbsp; void **ppChangeset,
+** </pre>
+**
+** Is replaced by:
+**
+** <pre>
+** &nbsp; int (*xOutput)(void *pOut, const void *pData, int nData),
+** &nbsp; void *pOut
+** </pre>
+**
+** The xOutput callback is invoked zero or more times to return data to
+** the application. The first parameter passed to each call is a copy of the
+** pOut pointer supplied by the application. The second parameter, pData,
+** points to a buffer nData bytes in size containing the chunk of output
+** data being returned. If the xOutput callback successfully processes the
+** supplied data, it should return SQLITE_OK to indicate success. Otherwise,
+** it should return some other SQLite error code. In this case processing
+** is immediately abandoned and the streaming API function returns a copy
+** of the xOutput error code to the application.
+**
+** The sessions module never invokes an xOutput callback with the third
+** parameter set to a value less than or equal to zero. Other than this,
+** no guarantees are made as to the size of the chunks of data returned.
+*/
+SQLITE_API int sqlite3changeset_apply_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx /* First argument passed to xConflict */
+);
+SQLITE_API int sqlite3changeset_apply_v2_strm(
+ sqlite3 *db, /* Apply change to "main" db of this handle */
+ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
+ void *pIn, /* First arg for xInput */
+ int(*xFilter)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ const char *zTab /* Table name */
+ ),
+ int(*xConflict)(
+ void *pCtx, /* Copy of sixth arg to _apply() */
+ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
+ sqlite3_changeset_iter *p /* Handle describing change and conflict */
+ ),
+ void *pCtx, /* First argument passed to xConflict */
+ void **ppRebase, int *pnRebase,
+ int flags
+);
+SQLITE_API int sqlite3changeset_concat_strm(
+ int (*xInputA)(void *pIn, void *pData, int *pnData),
+ void *pInA,
+ int (*xInputB)(void *pIn, void *pData, int *pnData),
+ void *pInB,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changeset_invert_strm(
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changeset_start_strm(
+ sqlite3_changeset_iter **pp,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+);
+SQLITE_API int sqlite3changeset_start_v2_strm(
+ sqlite3_changeset_iter **pp,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int flags
+);
+SQLITE_API int sqlite3session_changeset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3session_patchset_strm(
+ sqlite3_session *pSession,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3changegroup_add_strm(sqlite3_changegroup*,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn
+);
+SQLITE_API int sqlite3changegroup_output_strm(sqlite3_changegroup*,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+SQLITE_API int sqlite3rebaser_rebase_strm(
+ sqlite3_rebaser *pRebaser,
+ int (*xInput)(void *pIn, void *pData, int *pnData),
+ void *pIn,
+ int (*xOutput)(void *pOut, const void *pData, int nData),
+ void *pOut
+);
+
+/*
+** CAPI3REF: Configure global parameters
+**
+** The sqlite3session_config() interface is used to make global configuration
+** changes to the sessions module in order to tune it to the specific needs
+** of the application.
+**
+** The sqlite3session_config() interface is not threadsafe. If it is invoked
+** while any other thread is inside any other sessions method then the
+** results are undefined. Furthermore, if it is invoked after any sessions
+** related objects have been created, the results are also undefined.
+**
+** The first argument to the sqlite3session_config() function must be one
+** of the SQLITE_SESSION_CONFIG_XXX constants defined below. The
+** interpretation of the (void*) value passed as the second parameter and
+** the effect of calling this function depends on the value of the first
+** parameter.
+**
+** <dl>
+** <dt>SQLITE_SESSION_CONFIG_STRMSIZE<dd>
+** By default, the sessions module streaming interfaces attempt to input
+** and output data in approximately 1 KiB chunks. This operand may be used
+** to set and query the value of this configuration setting. The pointer
+** passed as the second argument must point to a value of type (int).
+** If this value is greater than 0, it is used as the new streaming data
+** chunk size for both input and output. Before returning, the (int) value
+** pointed to by pArg is set to the final value of the streaming interface
+** chunk size.
+** </dl>
+**
+** This function returns SQLITE_OK if successful, or an SQLite error code
+** otherwise.
+*/
+SQLITE_API int sqlite3session_config(int op, void *pArg);
+
+/*
+** CAPI3REF: Values for sqlite3session_config().
+*/
+#define SQLITE_SESSION_CONFIG_STRMSIZE 1
+
+/*
+** Make sure we can call this stuff from C++.
+*/
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */
+
+/******** End of sqlite3session.h *********/
+/******** Begin file fts5.h *********/
+/*
+** 2014 May 31
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** Interfaces to extend FTS5. Using the interfaces defined in this file,
+** FTS5 may be extended with:
+**
+** * custom tokenizers, and
+** * custom auxiliary functions.
+*/
+
+
+#ifndef _FTS5_H
+#define _FTS5_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*************************************************************************
+** CUSTOM AUXILIARY FUNCTIONS
+**
+** Virtual table implementations may overload SQL functions by implementing
+** the sqlite3_module.xFindFunction() method.
+*/
+
+typedef struct Fts5ExtensionApi Fts5ExtensionApi;
+typedef struct Fts5Context Fts5Context;
+typedef struct Fts5PhraseIter Fts5PhraseIter;
+
+typedef void (*fts5_extension_function)(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ sqlite3_context *pCtx, /* Context for returning result/error */
+ int nVal, /* Number of values in apVal[] array */
+ sqlite3_value **apVal /* Array of trailing arguments */
+);
+
+struct Fts5PhraseIter {
+ const unsigned char *a;
+ const unsigned char *b;
+};
+
+/*
+** EXTENSION API FUNCTIONS
+**
+** xUserData(pFts):
+** Return a copy of the context pointer the extension function was
+** registered with.
+**
+** xColumnTotalSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the FTS5 table. Or, if iCol is
+** non-negative but less than the number of columns in the table, return
+** the total number of tokens in column iCol, considering all rows in
+** the FTS5 table.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** xColumnCount(pFts):
+** Return the number of columns in the table.
+**
+** xColumnSize(pFts, iCol, pnToken):
+** If parameter iCol is less than zero, set output variable *pnToken
+** to the total number of tokens in the current row. Or, if iCol is
+** non-negative but less than the number of columns in the table, set
+** *pnToken to the number of tokens in column iCol of the current row.
+**
+** If parameter iCol is greater than or equal to the number of columns
+** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
+** an OOM condition or IO error), an appropriate SQLite error code is
+** returned.
+**
+** This function may be quite inefficient if used with an FTS5 table
+** created with the "columnsize=0" option.
+**
+** xColumnText:
+** This function attempts to retrieve the text of column iCol of the
+** current document. If successful, (*pz) is set to point to a buffer
+** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
+** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
+** if an error occurs, an SQLite error code is returned and the final values
+** of (*pz) and (*pn) are undefined.
+**
+** xPhraseCount:
+** Returns the number of phrases in the current query expression.
+**
+** xPhraseSize:
+** Returns the number of tokens in phrase iPhrase of the query. Phrases
+** are numbered starting from zero.
+**
+** xInstCount:
+** Set *pnInst to the total number of occurrences of all phrases within
+** the query within the current row. Return SQLITE_OK if successful, or
+** an error code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always returns 0.
+**
+** xInst:
+** Query for the details of phrase match iIdx within the current row.
+** Phrase matches are numbered starting from zero, so the iIdx argument
+** should be greater than or equal to zero and smaller than the value
+** output by xInstCount().
+**
+** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** to the column in which it occurs and *piOff the token offset of the
+** first token of the phrase. Returns SQLITE_OK if successful, or an error
+** code (i.e. SQLITE_NOMEM) if an error occurs.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
+**
+** xRowid:
+** Returns the rowid of the current row.
+**
+** xTokenize:
+** Tokenize text using the tokenizer belonging to the FTS5 table.
+**
+** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
+** This API function is used to query the FTS table for phrase iPhrase
+** of the current query. Specifically, a query equivalent to:
+**
+** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
+**
+** with $p set to a phrase equivalent to the phrase iPhrase of the
+** current query is executed. Any column filter that applies to
+** phrase iPhrase of the current query is included in $p. For each
+** row visited, the callback function passed as the fourth argument
+** is invoked. The context and API objects passed to the callback
+** function may be used to access the properties of each matched row.
+** Invoking Api.xUserData() returns a copy of the pointer passed as
+** the third argument to pUserData.
+**
+** If the callback function returns any value other than SQLITE_OK, the
+** query is abandoned and the xQueryPhrase function returns immediately.
+** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
+** Otherwise, the error code is propagated upwards.
+**
+** If the query runs to completion without incident, SQLITE_OK is returned.
+** Or, if some error occurs before the query completes or is aborted by
+** the callback, an SQLite error code is returned.
+**
+**
+** xSetAuxdata(pFts5, pAux, xDelete)
+**
+** Save the pointer passed as the second argument as the extension function's
+** "auxiliary data". The pointer may then be retrieved by the current or any
+** future invocation of the same fts5 extension function made as part of
+** the same MATCH query using the xGetAuxdata() API.
+**
+** Each extension function is allocated a single auxiliary data slot for
+** each FTS query (MATCH expression). If the extension function is invoked
+** more than once for a single FTS query, then all invocations share a
+** single auxiliary data context.
+**
+** If there is already an auxiliary data pointer when this function is
+** invoked, then it is replaced by the new pointer. If an xDelete callback
+** was specified along with the original pointer, it is invoked at this
+** point.
+**
+** The xDelete callback, if one is specified, is also invoked on the
+** auxiliary data pointer after the FTS5 query has finished.
+**
+** If an error (e.g. an OOM condition) occurs within this function,
+** the auxiliary data is set to NULL and an error code returned. If the
+** xDelete parameter was not NULL, it is invoked on the auxiliary data
+** pointer before returning.
+**
+**
+** xGetAuxdata(pFts5, bClear)
+**
+** Returns the current auxiliary data pointer for the fts5 extension
+** function. See the xSetAuxdata() method for details.
+**
+** If the bClear argument is non-zero, then the auxiliary data is cleared
+** (set to NULL) before this function returns. In this case the xDelete,
+** if any, is not invoked.
+**
+**
+** xRowCount(pFts5, pnRow)
+**
+** This function is used to retrieve the total number of rows in the table.
+** In other words, the same value that would be returned by:
+**
+** SELECT count(*) FROM ftstable;
+**
+** xPhraseFirst()
+** This function is used, along with type Fts5PhraseIter and the xPhraseNext
+** method, to iterate through all instances of a single query phrase within
+** the current row. This is the same information as is accessible via the
+** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
+** to use, this API may be faster under some circumstances. To iterate
+** through instances of phrase iPhrase, use the following code:
+**
+** Fts5PhraseIter iter;
+** int iCol, iOff;
+** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
+** iCol>=0;
+** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
+** ){
+** // An instance of phrase iPhrase at offset iOff of column iCol
+** }
+**
+** The Fts5PhraseIter structure is defined above. Applications should not
+** modify this structure directly - it should only be used as shown above
+** with the xPhraseFirst() and xPhraseNext() API methods (and by
+** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option. If the FTS5 table is created
+** with either "detail=none" or "detail=column" and "content=" option
+** (i.e. if it is a contentless table), then this API always iterates
+** through an empty set (all calls to xPhraseFirst() set iCol to -1).
+**
+** xPhraseNext()
+** See xPhraseFirst above.
+**
+** xPhraseFirstColumn()
+** This function and xPhraseNextColumn() are similar to the xPhraseFirst()
+** and xPhraseNext() APIs described above. The difference is that instead
+** of iterating through all instances of a phrase in the current row, these
+** APIs are used to iterate through the set of columns in the current row
+** that contain one or more instances of a specified phrase. For example:
+**
+** Fts5PhraseIter iter;
+** int iCol;
+** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
+** iCol>=0;
+** pApi->xPhraseNextColumn(pFts, &iter, &iCol)
+** ){
+** // Column iCol contains at least one instance of phrase iPhrase
+** }
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" option. If the FTS5 table is created with either
+** "detail=none" "content=" option (i.e. if it is a contentless table),
+** then this API always iterates through an empty set (all calls to
+** xPhraseFirstColumn() set iCol to -1).
+**
+** The information accessed using this API and its companion
+** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
+** (or xInst/xInstCount). The chief advantage of this API is that it is
+** significantly more efficient than those alternatives when used with
+** "detail=column" tables.
+**
+** xPhraseNextColumn()
+** See xPhraseFirstColumn above.
+*/
+struct Fts5ExtensionApi {
+ int iVersion; /* Currently always set to 3 */
+
+ void *(*xUserData)(Fts5Context*);
+
+ int (*xColumnCount)(Fts5Context*);
+ int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
+ int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
+
+ int (*xTokenize)(Fts5Context*,
+ const char *pText, int nText, /* Text to tokenize */
+ void *pCtx, /* Context passed to xToken() */
+ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
+ );
+
+ int (*xPhraseCount)(Fts5Context*);
+ int (*xPhraseSize)(Fts5Context*, int iPhrase);
+
+ int (*xInstCount)(Fts5Context*, int *pnInst);
+ int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
+
+ sqlite3_int64 (*xRowid)(Fts5Context*);
+ int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
+ int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
+
+ int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
+ int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
+ );
+ int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
+ void *(*xGetAuxdata)(Fts5Context*, int bClear);
+
+ int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
+ void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
+
+ int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
+ void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+};
+
+/*
+** CUSTOM AUXILIARY FUNCTIONS
+*************************************************************************/
+
+/*************************************************************************
+** CUSTOM TOKENIZERS
+**
+** Applications may also register custom tokenizer types. A tokenizer
+** is registered by providing fts5 with a populated instance of the
+** following structure. All structure methods must be defined, setting
+** any member of the fts5_tokenizer struct to NULL leads to undefined
+** behaviour. The structure methods are expected to function as follows:
+**
+** xCreate:
+** This function is used to allocate and initialize a tokenizer instance.
+** A tokenizer instance is required to actually tokenize text.
+**
+** The first argument passed to this function is a copy of the (void*)
+** pointer provided by the application when the fts5_tokenizer object
+** was registered with FTS5 (the third argument to xCreateTokenizer()).
+** The second and third arguments are an array of nul-terminated strings
+** containing the tokenizer arguments, if any, specified following the
+** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
+** to create the FTS5 table.
+**
+** The final argument is an output variable. If successful, (*ppOut)
+** should be set to point to the new tokenizer handle and SQLITE_OK
+** returned. If an error occurs, some value other than SQLITE_OK should
+** be returned. In this case, fts5 assumes that the final value of *ppOut
+** is undefined.
+**
+** xDelete:
+** This function is invoked to delete a tokenizer handle previously
+** allocated using xCreate(). Fts5 guarantees that this function will
+** be invoked exactly once for each successful call to xCreate().
+**
+** xTokenize:
+** This function is expected to tokenize the nText byte string indicated
+** by argument pText. pText may or may not be nul-terminated. The first
+** argument passed to this function is a pointer to an Fts5Tokenizer object
+** returned by an earlier call to xCreate().
+**
+** The second argument indicates the reason that FTS5 is requesting
+** tokenization of the supplied text. This is always one of the following
+** four values:
+**
+** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
+** or removed from the FTS table. The tokenizer is being invoked to
+** determine the set of tokens to add to (or delete from) the
+** FTS index.
+**
+** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
+** against the FTS index. The tokenizer is being called to tokenize
+** a bareword or quoted string specified as part of the query.
+**
+** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
+** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
+** followed by a "*" character, indicating that the last token
+** returned by the tokenizer will be treated as a token prefix.
+**
+** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
+** satisfy an fts5_api.xTokenize() request made by an auxiliary
+** function. Or an fts5_api.xColumnSize() request made by the same
+** on a columnsize=0 database.
+** </ul>
+**
+** For each token in the input string, the supplied callback xToken() must
+** be invoked. The first argument to it should be a copy of the pointer
+** passed as the second argument to xTokenize(). The third and fourth
+** arguments are a pointer to a buffer containing the token text, and the
+** size of the token in bytes. The 4th and 5th arguments are the byte offsets
+** of the first byte of and first byte immediately following the text from
+** which the token is derived within the input.
+**
+** The second argument passed to the xToken() callback ("tflags") should
+** normally be set to 0. The exception is if the tokenizer supports
+** synonyms. In this case see the discussion below for details.
+**
+** FTS5 assumes the xToken() callback is invoked for each token in the
+** order that they occur within the input text.
+**
+** If an xToken() callback returns any value other than SQLITE_OK, then
+** the tokenization should be abandoned and the xTokenize() method should
+** immediately return a copy of the xToken() return value. Or, if the
+** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
+** if an error occurs with the xTokenize() implementation itself, it
+** may abandon the tokenization and return any error code other than
+** SQLITE_OK or SQLITE_DONE.
+**
+** SYNONYM SUPPORT
+**
+** Custom tokenizers may also support synonyms. Consider a case in which a
+** user wishes to query for a phrase such as "first place". Using the
+** built-in tokenizers, the FTS5 query 'first + place' will match instances
+** of "first place" within the document set, but not alternative forms
+** such as "1st place". In some applications, it would be better to match
+** all instances of "first place" or "1st place" regardless of which form
+** the user specified in the MATCH query text.
+**
+** There are several ways to approach this in FTS5:
+**
+** <ol><li> By mapping all synonyms to a single token. In this case, using
+** the above example, this means that the tokenizer returns the
+** same token for inputs "first" and "1st". Say that token is in
+** fact "first", so that when the user inserts the document "I won
+** 1st place" entries are added to the index for tokens "i", "won",
+** "first" and "place". If the user then queries for '1st + place',
+** the tokenizer substitutes "first" for "1st" and the query works
+** as expected.
+**
+** <li> By querying the index for all synonyms of each query term
+** separately. In this case, when tokenizing query text, the
+** tokenizer may provide multiple synonyms for a single term
+** within the document. FTS5 then queries the index for each
+** synonym individually. For example, faced with the query:
+**
+** <codeblock>
+** ... MATCH 'first place'</codeblock>
+**
+** the tokenizer offers both "1st" and "first" as synonyms for the
+** first token in the MATCH query and FTS5 effectively runs a query
+** similar to:
+**
+** <codeblock>
+** ... MATCH '(first OR 1st) place'</codeblock>
+**
+** except that, for the purposes of auxiliary functions, the query
+** still appears to contain just two phrases - "(first OR 1st)"
+** being treated as a single phrase.
+**
+** <li> By adding multiple synonyms for a single term to the FTS index.
+** Using this method, when tokenizing document text, the tokenizer
+** provides multiple synonyms for each token. So that when a
+** document such as "I won first place" is tokenized, entries are
+** added to the FTS index for "i", "won", "first", "1st" and
+** "place".
+**
+** This way, even if the tokenizer does not provide synonyms
+** when tokenizing query text (it should not - to do so would be
+** inefficient), it doesn't matter if the user queries for
+** 'first + place' or '1st + place', as there are entries in the
+** FTS index corresponding to both forms of the first token.
+** </ol>
+**
+** Whether it is parsing document or query text, any call to xToken that
+** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
+** is considered to supply a synonym for the previous token. For example,
+** when parsing the document "I won first place", a tokenizer that supports
+** synonyms would call xToken() 5 times, as follows:
+**
+** <codeblock>
+** xToken(pCtx, 0, "i", 1, 0, 1);
+** xToken(pCtx, 0, "won", 3, 2, 5);
+** xToken(pCtx, 0, "first", 5, 6, 11);
+** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
+** xToken(pCtx, 0, "place", 5, 12, 17);
+**</codeblock>
+**
+** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
+** xToken() is called. Multiple synonyms may be specified for a single token
+** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
+** There is no limit to the number of synonyms that may be provided for a
+** single token.
+**
+** In many cases, method (1) above is the best approach. It does not add
+** extra data to the FTS index or require FTS5 to query for multiple terms,
+** so it is efficient in terms of disk space and query speed. However, it
+** does not support prefix queries very well. If, as suggested above, the
+** token "first" is substituted for "1st" by the tokenizer, then the query:
+**
+** <codeblock>
+** ... MATCH '1s*'</codeblock>
+**
+** will not match documents that contain the token "1st" (as the tokenizer
+** will probably not map "1s" to any prefix of "first").
+**
+** For full prefix support, method (3) may be preferred. In this case,
+** because the index contains entries for both "first" and "1st", prefix
+** queries such as 'fi*' or '1s*' will match correctly. However, because
+** extra entries are added to the FTS index, this method uses more space
+** within the database.
+**
+** Method (2) offers a midpoint between (1) and (3). Using this method,
+** a query such as '1s*' will match documents that contain the literal
+** token "1st", but not "first" (assuming the tokenizer is not able to
+** provide synonyms for prefixes). However, a non-prefix query like '1st'
+** will match against "1st" and "first". This method does not require
+** extra disk space, as no extra entries are added to the FTS index.
+** On the other hand, it may require more CPU cycles to run MATCH queries,
+** as separate queries of the FTS index are required for each synonym.
+**
+** When using methods (2) or (3), it is important that the tokenizer only
+** provide synonyms when tokenizing document text (method (2)) or query
+** text (method (3)), not both. Doing so will not cause any errors, but is
+** inefficient.
+*/
+typedef struct Fts5Tokenizer Fts5Tokenizer;
+typedef struct fts5_tokenizer fts5_tokenizer;
+struct fts5_tokenizer {
+ int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
+ void (*xDelete)(Fts5Tokenizer*);
+ int (*xTokenize)(Fts5Tokenizer*,
+ void *pCtx,
+ int flags, /* Mask of FTS5_TOKENIZE_* flags */
+ const char *pText, int nText,
+ int (*xToken)(
+ void *pCtx, /* Copy of 2nd argument to xTokenize() */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Pointer to buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStart, /* Byte offset of token within input text */
+ int iEnd /* Byte offset of end of token within input text */
+ )
+ );
+};
+
+/* Flags that may be passed as the third argument to xTokenize() */
+#define FTS5_TOKENIZE_QUERY 0x0001
+#define FTS5_TOKENIZE_PREFIX 0x0002
+#define FTS5_TOKENIZE_DOCUMENT 0x0004
+#define FTS5_TOKENIZE_AUX 0x0008
+
+/* Flags that may be passed by the tokenizer implementation back to FTS5
+** as the third argument to the supplied xToken callback. */
+#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
+
+/*
+** END OF CUSTOM TOKENIZERS
+*************************************************************************/
+
+/*************************************************************************
+** FTS5 EXTENSION REGISTRATION API
+*/
+typedef struct fts5_api fts5_api;
+struct fts5_api {
+ int iVersion; /* Currently always set to 2 */
+
+ /* Create a new tokenizer */
+ int (*xCreateTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_tokenizer *pTokenizer,
+ void (*xDestroy)(void*)
+ );
+
+ /* Find an existing tokenizer */
+ int (*xFindTokenizer)(
+ fts5_api *pApi,
+ const char *zName,
+ void **ppContext,
+ fts5_tokenizer *pTokenizer
+ );
+
+ /* Create a new auxiliary function */
+ int (*xCreateFunction)(
+ fts5_api *pApi,
+ const char *zName,
+ void *pContext,
+ fts5_extension_function xFunction,
+ void (*xDestroy)(void*)
+ );
+};
+
+/*
+** END OF REGISTRATION API
+*************************************************************************/
+
+#ifdef __cplusplus
+} /* end of the 'extern "C"' block */
+#endif
+
+#endif /* _FTS5_H */
+
+/******** End of fts5.h *********/
diff --git a/database/sqlite/sqlite_functions.c b/database/sqlite/sqlite_functions.c
new file mode 100644
index 0000000..ab6c59c
--- /dev/null
+++ b/database/sqlite/sqlite_functions.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "sqlite_functions.h"
+
+const char *database_config[] = {
+ "PRAGMA auto_vacuum=incremental; PRAGMA synchronous=1 ; PRAGMA journal_mode=WAL; PRAGMA temp_store=MEMORY;",
+ "PRAGMA journal_size_limit=16777216;",
+ "CREATE TABLE IF NOT EXISTS host(host_id blob PRIMARY KEY, hostname text, "
+ "registry_hostname text, update_every int, os text, timezone text, tags text);",
+ "CREATE TABLE IF NOT EXISTS chart(chart_id blob PRIMARY KEY, host_id blob, type text, id text, name text, "
+ "family text, context text, title text, unit text, plugin text, module text, priority int, update_every int, "
+ "chart_type int, memory_mode int, history_entries);",
+ "CREATE TABLE IF NOT EXISTS dimension(dim_id blob PRIMARY KEY, chart_id blob, id text, name text, "
+ "multiplier int, divisor int , algorithm int, options text);",
+ "CREATE TABLE IF NOT EXISTS chart_active(chart_id blob PRIMARY KEY, date_created int);",
+ "CREATE TABLE IF NOT EXISTS dimension_active(dim_id blob primary key, date_created int);",
+ "CREATE TABLE IF NOT EXISTS metadata_migration(filename text, file_size, date_created int);",
+ "CREATE INDEX IF NOT EXISTS ind_d1 on dimension (chart_id, id, name);",
+ "CREATE INDEX IF NOT EXISTS ind_c1 on chart (host_id, id, type, name);",
+
+ "delete from chart_active;",
+ "delete from dimension_active;",
+
+ "delete from chart where chart_id not in (select chart_id from dimension);",
+ "delete from host where host_id not in (select host_id from chart);",
+ NULL
+};
+
+sqlite3 *db_meta = NULL;
+
+static uv_mutex_t sqlite_transaction_lock;
+
+static int execute_insert(sqlite3_stmt *res)
+{
+ int rc;
+
+ while ((rc = sqlite3_step(res)) != SQLITE_DONE && unlikely(netdata_exit)) {
+ if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED))
+ usleep(SQLITE_INSERT_DELAY * USEC_PER_MS);
+ else {
+ error_report("SQLite error %d", rc);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Store a chart or dimension UUID in chart_active or dimension_active
+ * The statement that will be prepared determines that
+ */
+
+static int store_active_uuid_object(sqlite3_stmt **res, char *statement, uuid_t *uuid)
+{
+ int rc;
+
+ // Check if we should need to prepare the statement
+ if (!*res) {
+ rc = sqlite3_prepare_v2(db_meta, statement, -1, res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to store active object, rc = %d", rc);
+ return rc;
+ }
+ }
+
+ rc = sqlite3_bind_blob(*res, 1, uuid, sizeof(*uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to bind input parameter to store active object, rc = %d", rc);
+ else
+ rc = execute_insert(*res);
+ return rc;
+}
+
+/*
+ * Marks a chart with UUID as active
+ * Input: UUID
+ */
+void store_active_chart(uuid_t *chart_uuid)
+{
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ if (unlikely(!db_meta)) {
+ error_report("Database has not been initialized");
+ return;
+ }
+
+ if (unlikely(!chart_uuid))
+ return;
+
+ rc = store_active_uuid_object(&res, SQL_STORE_ACTIVE_CHART, chart_uuid);
+ if (rc != SQLITE_DONE)
+ error_report("Failed to store active chart, rc = %d", rc);
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement in store active chart, rc = %d", rc);
+ return;
+}
+
+/*
+ * Marks a dimension with UUID as active
+ * Input: UUID
+ */
+void store_active_dimension(uuid_t *dimension_uuid)
+{
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ if (unlikely(!db_meta)) {
+ error_report("Database has not been initialized");
+ return;
+ }
+
+ if (unlikely(!dimension_uuid))
+ return;
+
+ rc = store_active_uuid_object(&res, SQL_STORE_ACTIVE_DIMENSION, dimension_uuid);
+ if (rc != SQLITE_DONE)
+ error_report("Failed to store active dimension, rc = %d", rc);
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement in store active dimension, rc = %d", rc);
+ return;
+}
+
+/*
+ * Initialize the SQLite database
+ * Return 0 on success
+ */
+int sql_init_database(void)
+{
+ char *err_msg = NULL;
+ char sqlite_database[FILENAME_MAX + 1];
+ int rc;
+
+ fatal_assert(0 == uv_mutex_init(&sqlite_transaction_lock));
+
+ snprintfz(sqlite_database, FILENAME_MAX, "%s/netdata-meta.db", netdata_configured_cache_dir);
+ rc = sqlite3_open(sqlite_database, &db_meta);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to initialize database at %s", sqlite_database);
+ return 1;
+ }
+
+ info("SQLite database %s initialization", sqlite_database);
+
+ for (int i = 0; database_config[i]; i++) {
+ debug(D_METADATALOG, "Executing %s", database_config[i]);
+ rc = sqlite3_exec(db_meta, database_config[i], 0, 0, &err_msg);
+ if (rc != SQLITE_OK) {
+ error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg);
+ error_report("SQLite failed statement %s", database_config[i]);
+ sqlite3_free(err_msg);
+ return 1;
+ }
+ }
+ info("SQLite database initialization completed");
+ return 0;
+}
+
+/*
+ * Close the sqlite database
+ */
+
+void sql_close_database(void)
+{
+ int rc;
+ if (unlikely(!db_meta))
+ return;
+
+ info("Closing SQLite database");
+ rc = sqlite3_close(db_meta);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Error %d while closing the SQLite database", rc);
+ return;
+}
+
+#define FIND_UUID_TYPE "select 1 from host where host_id = @uuid union select 2 from chart where chart_id = @uuid union select 3 from dimension where dim_id = @uuid;"
+
+int find_uuid_type(uuid_t *uuid)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ int rc;
+ int uuid_type = 3;
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, FIND_UUID_TYPE, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to bind prepare statement to find UUID type in the database");
+ return 0;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, uuid, sizeof(*uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_step(res);
+ if (likely(rc == SQLITE_ROW))
+ uuid_type = sqlite3_column_int(res, 0);
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement during find uuid type, rc = %d", rc);
+
+ return uuid_type;
+
+bind_fail:
+ return 0;
+}
+
+uuid_t *find_dimension_uuid(RRDSET *st, RRDDIM *rd)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ uuid_t *uuid = NULL;
+ int rc;
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, SQL_FIND_DIMENSION_UUID, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to bind prepare statement to lookup dimension UUID in the database");
+ return NULL;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, st->chart_uuid, sizeof(*st->chart_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 2, rd->id, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 3, rd->name, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_step(res);
+ if (likely(rc == SQLITE_ROW)) {
+ uuid = mallocz(sizeof(uuid_t));
+ uuid_copy(*uuid, sqlite3_column_blob(res, 0));
+ }
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement find dimension uuid, rc = %d", rc);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char uuid_str[GUID_LEN + 1];
+ if (likely(uuid)) {
+ uuid_unparse_lower(*uuid, uuid_str);
+ debug(D_METADATALOG, "Found UUID %s for dimension %s", uuid_str, rd->name);
+ }
+ else
+ debug(D_METADATALOG, "UUID not found for dimension %s", rd->name);
+#endif
+ return uuid;
+
+bind_fail:
+ error_report("Failed to bind input parameter to perform dimension UUID database lookup, rc = %d", rc);
+ return NULL;
+}
+
+uuid_t *create_dimension_uuid(RRDSET *st, RRDDIM *rd)
+{
+ uuid_t *uuid = NULL;
+ int rc;
+
+ uuid = mallocz(sizeof(uuid_t));
+ uuid_generate(*uuid);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char uuid_str[GUID_LEN + 1];
+ uuid_unparse_lower(*uuid, uuid_str);
+ debug(D_METADATALOG,"Generating uuid [%s] for dimension %s under chart %s", uuid_str, rd->name, st->id);
+#endif
+
+ rc = sql_store_dimension(uuid, st->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, rd->algorithm);
+ if (unlikely(rc))
+ error_report("Failed to store dimension metadata in the database");
+
+ return uuid;
+}
+
+#define DELETE_DIMENSION_UUID "delete from dimension where dim_id = @uuid;"
+
+void delete_dimension_uuid(uuid_t *dimension_uuid)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ int rc;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char uuid_str[GUID_LEN + 1];
+ uuid_unparse_lower(*dimension_uuid, uuid_str);
+ debug(D_METADATALOG,"Deleting dimension uuid %s", uuid_str);
+#endif
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, DELETE_DIMENSION_UUID, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement to delete a dimension uuid");
+ return;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, dimension_uuid, sizeof(*dimension_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_step(res);
+ if (unlikely(rc != SQLITE_DONE))
+ error_report("Failed to delete dimension uuid, rc = %d", rc);
+
+bind_fail:
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement when deleting dimension UUID, rc = %d", rc);
+ return;
+}
+
+/*
+ * Do a database lookup to find the UUID of a chart
+ *
+ */
+uuid_t *find_chart_uuid(RRDHOST *host, const char *type, const char *id, const char *name)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ uuid_t *uuid = NULL;
+ int rc;
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, SQL_FIND_CHART_UUID, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement to lookup chart UUID in the database");
+ return NULL;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 2, type, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 3, id, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 4, name ? name : id, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_step(res);
+ if (likely(rc == SQLITE_ROW)) {
+ uuid = mallocz(sizeof(uuid_t));
+ uuid_copy(*uuid, sqlite3_column_blob(res, 0));
+ }
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement when searching for a chart UUID, rc = %d", rc);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char uuid_str[GUID_LEN + 1];
+ if (likely(uuid)) {
+ uuid_unparse_lower(*uuid, uuid_str);
+ debug(D_METADATALOG, "Found UUID %s for chart %s.%s", uuid_str, type, name ? name : id);
+ }
+ else
+ debug(D_METADATALOG, "UUID not found for chart %s.%s", type, name ? name : id);
+#endif
+ return uuid;
+
+bind_fail:
+ error_report("Failed to bind input parameter to perform chart UUID database lookup, rc = %d", rc);
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement when searching for a chart UUID, rc = %d", rc);
+ return NULL;
+}
+
+int update_chart_metadata(uuid_t *chart_uuid, RRDSET *st, const char *id, const char *name)
+{
+ int rc;
+
+ rc = sql_store_chart(
+ chart_uuid, &st->rrdhost->host_uuid, st->type, id, name, st->family, st->context, st->title, st->units, st->plugin_name,
+ st->module_name, st->priority, st->update_every, st->chart_type, st->rrd_memory_mode, st->entries);
+
+ return rc;
+}
+
+uuid_t *create_chart_uuid(RRDSET *st, const char *id, const char *name)
+{
+ uuid_t *uuid = NULL;
+ int rc;
+
+ uuid = mallocz(sizeof(uuid_t));
+ uuid_generate(*uuid);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char uuid_str[GUID_LEN + 1];
+ uuid_unparse_lower(*uuid, uuid_str);
+ debug(D_METADATALOG,"Generating uuid [%s] for chart %s under host %s", uuid_str, st->id, st->rrdhost->hostname);
+#endif
+
+ rc = update_chart_metadata(uuid, st, id, name);
+
+ if (unlikely(rc))
+ error_report("Failed to store chart metadata in the database");
+
+ return uuid;
+}
+
+// Functions to create host, chart, dimension in the database
+
+int sql_store_host(
+ uuid_t *host_uuid, const char *hostname, const char *registry_hostname, int update_every, const char *os,
+ const char *tzone, const char *tags)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ int rc;
+
+ if (unlikely(!db_meta)) {
+ error_report("Database has not been initialized");
+ return 1;
+ }
+
+ if (unlikely((!res))) {
+ rc = sqlite3_prepare_v2(db_meta, SQL_STORE_HOST, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to store host, rc = %d", rc);
+ return 1;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, host_uuid, sizeof(*host_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 2, hostname, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 3, registry_hostname, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_int(res, 4, update_every);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 5, os, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 6, tzone, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 7, tags, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ int store_rc = sqlite3_step(res);
+ if (unlikely(store_rc != SQLITE_DONE))
+ error_report("Failed to store host %s, rc = %d", hostname, rc);
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement to store host %s, rc = %d", hostname, rc);
+
+ return !(store_rc == SQLITE_DONE);
+bind_fail:
+ error_report("Failed to bind parameter to store host %s, rc = %d", hostname, rc);
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement to store host %s, rc = %d", hostname, rc);
+ return 1;
+}
+
+/*
+ * Store a chart in the database
+ */
+
+int sql_store_chart(
+ uuid_t *chart_uuid, uuid_t *host_uuid, const char *type, const char *id, const char *name, const char *family,
+ const char *context, const char *title, const char *units, const char *plugin, const char *module, long priority,
+ int update_every, int chart_type, int memory_mode, long history_entries)
+{
+ static __thread sqlite3_stmt *res;
+ int rc, param = 0;
+
+ if (unlikely(!db_meta)) {
+ error_report("Database has not been initialized");
+ return 1;
+ }
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, SQL_STORE_CHART, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to store chart, rc = %d", rc);
+ return 1;
+ }
+ }
+
+ param++;
+ rc = sqlite3_bind_blob(res, 1, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_blob(res, 2, host_uuid, sizeof(*host_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 3, type, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 4, id, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ if (name) {
+ rc = sqlite3_bind_text(res, 5, name, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+ }
+
+ param++;
+ rc = sqlite3_bind_text(res, 6, family, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 7, context, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 8, title, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 9, units, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 10, plugin, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_text(res, 11, module, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_int(res, 12, priority);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_int(res, 13, update_every);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_int(res, 14, chart_type);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_int(res, 15, memory_mode);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ param++;
+ rc = sqlite3_bind_int(res, 16, history_entries);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = execute_insert(res);
+ if (unlikely(rc != SQLITE_DONE))
+ error_report("Failed to store chart, rc = %d", rc);
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement in chart store function, rc = %d", rc);
+
+ return 0;
+
+bind_fail:
+ error_report("Failed to bind parameter %d to store chart, rc = %d", param, rc);
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement in chart store function, rc = %d", rc);
+ return 1;
+}
+
+/*
+ * Store a dimension
+ */
+int sql_store_dimension(
+ uuid_t *dim_uuid, uuid_t *chart_uuid, const char *id, const char *name, collected_number multiplier,
+ collected_number divisor, int algorithm)
+{
+ static __thread sqlite3_stmt *res = NULL;
+ int rc;
+
+ if (unlikely(!db_meta)) {
+ error_report("Database has not been initialized");
+ return 1;
+ }
+
+ if (unlikely(!res)) {
+ rc = sqlite3_prepare_v2(db_meta, SQL_STORE_DIMENSION, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to store dimension, rc = %d", rc);
+ return 1;
+ }
+ }
+
+ rc = sqlite3_bind_blob(res, 1, dim_uuid, sizeof(*dim_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_blob(res, 2, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 3, id, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_text(res, 4, name, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_int(res, 5, multiplier);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_int(res, 6, divisor);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = sqlite3_bind_int(res, 7, algorithm);
+ if (unlikely(rc != SQLITE_OK))
+ goto bind_fail;
+
+ rc = execute_insert(res);
+ if (unlikely(rc != SQLITE_DONE))
+ error_report("Failed to store dimension, rc = %d", rc);
+
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement in store dimension, rc = %d", rc);
+ return 0;
+
+bind_fail:
+ error_report("Failed to bind parameter to store dimension, rc = %d", rc);
+ rc = sqlite3_reset(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset statement in store dimension, rc = %d", rc);
+ return 1;
+}
+
+
+//
+// Support for archived charts
+//
+#define SELECT_DIMENSION "select d.id, d.name from dimension d where d.chart_id = @chart_uuid;"
+
+void sql_rrdim2json(sqlite3_stmt *res_dim, uuid_t *chart_uuid, BUFFER *wb, size_t *dimensions_count)
+{
+ int rc;
+
+ rc = sqlite3_bind_blob(res_dim, 1, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ return;
+
+ int dimensions = 0;
+ buffer_sprintf(wb, "\t\t\t\"dimensions\": {\n");
+
+ while (sqlite3_step(res_dim) == SQLITE_ROW) {
+ if (dimensions)
+ buffer_strcat(wb, ",\n\t\t\t\t\"");
+ else
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat_jsonescape(wb, (const char *) sqlite3_column_text(res_dim, 0));
+ buffer_strcat(wb, "\": { \"name\": \"");
+ buffer_strcat_jsonescape(wb, (const char *) sqlite3_column_text(res_dim, 1));
+ buffer_strcat(wb, "\" }");
+ dimensions++;
+ }
+ *dimensions_count += dimensions;
+ buffer_sprintf(wb, "\n\t\t\t}");
+}
+
+#define SELECT_CHART "select chart_id, id, name, type, family, context, title, priority, plugin, " \
+ "module, unit, chart_type, update_every from chart " \
+ "where host_id = @host_uuid and chart_id not in (select chart_id from chart_active) order by chart_id asc;"
+
+void sql_rrdset2json(RRDHOST *host, BUFFER *wb)
+{
+ // time_t first_entry_t = 0; //= rrdset_first_entry_t(st);
+ // time_t last_entry_t = 0; //rrdset_last_entry_t(st);
+ static char *custom_dashboard_info_js_filename = NULL;
+ int rc;
+
+ sqlite3_stmt *res_chart = NULL;
+ sqlite3_stmt *res_dim = NULL;
+ time_t now = now_realtime_sec();
+
+ rc = sqlite3_prepare_v2(db_meta, SELECT_CHART, -1, &res_chart, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to fetch host archived charts");
+ return;
+ }
+
+ rc = sqlite3_bind_blob(res_chart, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind host parameter to fetch archived charts");
+ return;
+ }
+
+ rc = sqlite3_prepare_v2(db_meta, SELECT_DIMENSION, -1, &res_dim, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to fetch chart archived dimensions");
+ goto failed;
+ };
+
+ if(unlikely(!custom_dashboard_info_js_filename))
+ custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", "");
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"hostname\": \"%s\""
+ ",\n\t\"version\": \"%s\""
+ ",\n\t\"release_channel\": \"%s\""
+ ",\n\t\"os\": \"%s\""
+ ",\n\t\"timezone\": \"%s\""
+ ",\n\t\"update_every\": %d"
+ ",\n\t\"history\": %ld"
+ ",\n\t\"memory_mode\": \"%s\""
+ ",\n\t\"custom_info\": \"%s\""
+ ",\n\t\"charts\": {"
+ , host->hostname
+ , host->program_version
+ , get_release_channel()
+ , host->os
+ , host->timezone
+ , host->rrd_update_every
+ , host->rrd_history_entries
+ , rrd_memory_mode_name(host->rrd_memory_mode)
+ , custom_dashboard_info_js_filename
+ );
+
+ size_t c = 0;
+ size_t dimensions = 0;
+
+ while (sqlite3_step(res_chart) == SQLITE_ROW) {
+ char id[512];
+ sprintf(id, "%s.%s", sqlite3_column_text(res_chart, 3), sqlite3_column_text(res_chart, 1));
+ RRDSET *st = rrdset_find(host, id);
+ if (st && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))
+ continue;
+
+ if (c)
+ buffer_strcat(wb, ",\n\t\t\"");
+ else
+ buffer_strcat(wb, "\n\t\t\"");
+ c++;
+
+ buffer_strcat(wb, id);
+ buffer_strcat(wb, "\": ");
+
+ buffer_sprintf(
+ wb,
+ "\t\t{\n"
+ "\t\t\t\"id\": \"%s\",\n"
+ "\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\"type\": \"%s\",\n"
+ "\t\t\t\"family\": \"%s\",\n"
+ "\t\t\t\"context\": \"%s\",\n"
+ "\t\t\t\"title\": \"%s (%s)\",\n"
+ "\t\t\t\"priority\": %ld,\n"
+ "\t\t\t\"plugin\": \"%s\",\n"
+ "\t\t\t\"module\": \"%s\",\n"
+ "\t\t\t\"enabled\": %s,\n"
+ "\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n"
+ "\t\t\t\"chart_type\": \"%s\",\n",
+ id //sqlite3_column_text(res_chart, 1)
+ ,
+ id // sqlite3_column_text(res_chart, 2)
+ ,
+ sqlite3_column_text(res_chart, 3), sqlite3_column_text(res_chart, 4), sqlite3_column_text(res_chart, 5),
+ sqlite3_column_text(res_chart, 6), id //sqlite3_column_text(res_chart, 2)
+ ,
+ (long ) sqlite3_column_int(res_chart, 7),
+ (const char *) sqlite3_column_text(res_chart, 8) ? (const char *) sqlite3_column_text(res_chart, 8) : (char *) "",
+ (const char *) sqlite3_column_text(res_chart, 9) ? (const char *) sqlite3_column_text(res_chart, 9) : (char *) "", (char *) "false",
+ (const char *) sqlite3_column_text(res_chart, 10), id //sqlite3_column_text(res_chart, 2)
+ ,
+ rrdset_type_name(sqlite3_column_int(res_chart, 11)));
+
+ sql_rrdim2json(res_dim, (uuid_t *) sqlite3_column_blob(res_chart, 0), wb, &dimensions);
+
+ rc = sqlite3_reset(res_dim);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to reset the prepared statement when reading archived chart dimensions");
+ buffer_strcat(wb, "\n\t\t}");
+ }
+
+ buffer_sprintf(wb
+ , "\n\t}"
+ ",\n\t\"charts_count\": %zu"
+ ",\n\t\"dimensions_count\": %zu"
+ ",\n\t\"alarms_count\": %zu"
+ ",\n\t\"rrd_memory_bytes\": %zu"
+ ",\n\t\"hosts_count\": %zu"
+ ",\n\t\"hosts\": ["
+ , c
+ , dimensions
+ , (size_t) 0
+ , (size_t) 0
+ , rrd_hosts_available
+ );
+
+ if(unlikely(rrd_hosts_available > 1)) {
+ rrd_rdlock();
+
+ size_t found = 0;
+ RRDHOST *h;
+ rrdhost_foreach_read(h) {
+ if(!rrdhost_should_be_removed(h, host, now) && !rrdhost_flag_check(h, RRDHOST_FLAG_ARCHIVED)) {
+ buffer_sprintf(wb
+ , "%s\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , (found > 0) ? "," : ""
+ , h->hostname
+ );
+
+ found++;
+ }
+ }
+
+ rrd_unlock();
+ }
+ else {
+ buffer_sprintf(wb
+ , "\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , host->hostname
+ );
+ }
+
+ buffer_sprintf(wb, "\n\t]\n}\n");
+
+ rc = sqlite3_finalize(res_dim);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when reading archived chart dimensions");
+
+failed:
+ rc = sqlite3_finalize(res_chart);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when reading archived charts");
+
+ return;
+}
+
+#define SELECT_HOST "select host_id, registry_hostname, update_every, os, timezone, tags from host where hostname = @hostname;"
+
+RRDHOST *sql_create_host_by_uuid(char *hostname)
+{
+ int rc;
+ RRDHOST *host = NULL;
+
+ sqlite3_stmt *res = NULL;
+
+ rc = sqlite3_prepare_v2(db_meta, SELECT_HOST, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to fetch host");
+ return NULL;
+ }
+
+ rc = sqlite3_bind_text(res, 1, hostname, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind hostname parameter to fetch host information");
+ return NULL;
+ }
+
+ rc = sqlite3_step(res);
+ if (unlikely(rc != SQLITE_ROW)) {
+ error_report("Failed to find hostname %s", hostname);
+ goto failed;
+ }
+
+ char uuid_str[GUID_LEN + 1];
+ uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 0)), uuid_str);
+
+ host = callocz(1, sizeof(RRDHOST));
+
+ set_host_properties(host, sqlite3_column_int(res, 2), RRD_MEMORY_MODE_DBENGINE, hostname,
+ (char *) sqlite3_column_text(res, 1), (const char *) uuid_str,
+ (char *) sqlite3_column_text(res, 3), (char *) sqlite3_column_text(res, 5),
+ (char *) sqlite3_column_text(res, 4), NULL, NULL);
+
+ uuid_copy(host->host_uuid, *((uuid_t *) sqlite3_column_blob(res, 0)));
+
+ host->system_info = NULL;
+
+failed:
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when reading host information");
+
+ return host;
+}
+
+void db_execute(char *cmd)
+{
+ int rc;
+ char *err_msg;
+ rc = sqlite3_exec(db_meta, cmd, 0, 0, &err_msg);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to execute '%s', rc = %d (%s)", cmd, rc, err_msg);
+ sqlite3_free(err_msg);
+ }
+
+ return;
+}
+
+void db_lock(void)
+{
+ uv_mutex_lock(&sqlite_transaction_lock);
+ return;
+}
+
+void db_unlock(void)
+{
+ uv_mutex_unlock(&sqlite_transaction_lock);
+ return;
+}
+
+
+#define SELECT_MIGRATED_FILE "select 1 from metadata_migration where filename = @path;"
+
+int file_is_migrated(char *path)
+{
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ rc = sqlite3_prepare_v2(db_meta, SELECT_MIGRATED_FILE, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to fetch host");
+ return 0;
+ }
+
+ rc = sqlite3_bind_text(res, 1, path, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind filename parameter to check migration");
+ return 0;
+ }
+
+ rc = sqlite3_step(res);
+
+ if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when checking if metadata file is migrated");
+
+ return (rc == SQLITE_ROW);
+}
+
+#define STORE_MIGRATED_FILE "insert or replace into metadata_migration (filename, file_size, date_created) " \
+ "values (@file, @size, strftime('%s'));"
+
+void add_migrated_file(char *path, uint64_t file_size)
+{
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ rc = sqlite3_prepare_v2(db_meta, STORE_MIGRATED_FILE, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to fetch host");
+ return;
+ }
+
+ rc = sqlite3_bind_text(res, 1, path, -1, SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind filename parameter to store migration information");
+ return;
+ }
+
+ rc = sqlite3_bind_int64(res, 2, file_size);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind size parameter to store migration information");
+ return;
+ }
+
+ rc = execute_insert(res);
+ if (unlikely(rc != SQLITE_DONE))
+ error_report("Failed to store migrated file, rc = %d", rc);
+
+ if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when checking if metadata file is migrated");
+
+ return;
+}
diff --git a/database/sqlite/sqlite_functions.h b/database/sqlite/sqlite_functions.h
new file mode 100644
index 0000000..f0b4b77
--- /dev/null
+++ b/database/sqlite/sqlite_functions.h
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SQLITE_FUNCTIONS_H
+#define NETDATA_SQLITE_FUNCTIONS_H
+
+#include "../../daemon/common.h"
+#include "sqlite3.h"
+
+#define SQLITE_INSERT_DELAY (50) // Insert delay in case of lock
+
+#define SQL_STORE_HOST "insert or replace into host (host_id,hostname,registry_hostname,update_every,os,timezone,tags) values (?1,?2,?3,?4,?5,?6,?7);"
+
+#define SQL_STORE_CHART "insert or replace into chart (chart_id, host_id, type, id, " \
+ "name, family, context, title, unit, plugin, module, priority, update_every , chart_type , memory_mode , " \
+ "history_entries) values (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15,?16);"
+
+#define SQL_FIND_CHART_UUID \
+ "select chart_id from chart where host_id = @host and type=@type and id=@id and (name is null or name=@name);"
+
+#define SQL_STORE_ACTIVE_CHART \
+ "insert or replace into chart_active (chart_id, date_created) values (@id, strftime('%s'));"
+
+#define SQL_STORE_DIMENSION \
+ "INSERT OR REPLACE into dimension (dim_id, chart_id, id, name, multiplier, divisor , algorithm) values (?0001,?0002,?0003,?0004,?0005,?0006,?0007);"
+
+#define SQL_FIND_DIMENSION_UUID "select dim_id from dimension where chart_id=@chart and id=@id and name=@name;"
+
+#define SQL_STORE_ACTIVE_DIMENSION \
+ "insert or replace into dimension_active (dim_id, date_created) values (@id, strftime('%s'));"
+extern int sql_init_database(void);
+extern void sql_close_database(void);
+
+extern int sql_store_host(uuid_t *guid, const char *hostname, const char *registry_hostname, int update_every, const char *os, const char *timezone, const char *tags);
+extern int sql_store_chart(
+ uuid_t *chart_uuid, uuid_t *host_uuid, const char *type, const char *id, const char *name, const char *family,
+ const char *context, const char *title, const char *units, const char *plugin, const char *module, long priority,
+ int update_every, int chart_type, int memory_mode, long history_entries);
+extern int sql_store_dimension(uuid_t *dim_uuid, uuid_t *chart_uuid, const char *id, const char *name, collected_number multiplier,
+ collected_number divisor, int algorithm);
+
+extern uuid_t *find_dimension_uuid(RRDSET *st, RRDDIM *rd);
+extern uuid_t *create_dimension_uuid(RRDSET *st, RRDDIM *rd);
+extern void store_active_dimension(uuid_t *dimension_uuid);
+
+extern uuid_t *find_chart_uuid(RRDHOST *host, const char *type, const char *id, const char *name);
+extern uuid_t *create_chart_uuid(RRDSET *st, const char *id, const char *name);
+extern int update_chart_metadata(uuid_t *chart_uuid, RRDSET *st, const char *id, const char *name);
+extern void store_active_chart(uuid_t *dimension_uuid);
+
+extern int find_uuid_type(uuid_t *uuid);
+
+extern void sql_rrdset2json(RRDHOST *host, BUFFER *wb);
+
+extern RRDHOST *sql_create_host_by_uuid(char *guid);
+extern void db_execute(char *cmd);
+extern int file_is_migrated(char *path);
+extern void add_migrated_file(char *path, uint64_t file_size);
+extern void db_unlock(void);
+extern void db_lock(void);
+extern void delete_dimension_uuid(uuid_t *dimension_uuid);
+
+#endif //NETDATA_SQLITE_FUNCTIONS_H
diff --git a/diagrams/Makefile.am b/diagrams/Makefile.am
new file mode 100644
index 0000000..475ca89
--- /dev/null
+++ b/diagrams/Makefile.am
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ config.puml \
+ registry.puml \
+ netdata-for-ephemeral-nodes.xml \
+ netdata-proxies-example.xml \
+ netdata-overview.xml \
+ data_structures/netdata_config.svg \
+ data_structures/README.md \
+ data_structures/registry.svg \
+ data_structures/rrd.svg \
+ data_structures/web.svg \
+ data_structures/src/netdata_config.xml \
+ data_structures/src/registry.xml \
+ data_structures/src/rrd.xml \
+ data_structures/src/web.xml \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ $(NULL)
diff --git a/diagrams/build.sh b/diagrams/build.sh
new file mode 100755
index 0000000..9b3963e
--- /dev/null
+++ b/diagrams/build.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+path=$(dirname "$0")
+cd "${path}" || exit 1
+
+if [ ! -f "plantuml.jar" ]
+then
+ echo >&2 "Please download 'plantuml.jar' from http://plantuml.com/ and put it the same folder with me."
+ exit 1
+fi
+
+for x in *.puml
+do
+ [ "${x}" = "config.puml" ] && continue
+
+ echo >&2 "Working on ${x}..."
+ java -jar plantuml.jar -tpng "${x}"
+ java -jar plantuml.jar -tsvg "${x}"
+ # java -jar plantuml.jar -ttxt "${x}"
+done
diff --git a/diagrams/config.puml b/diagrams/config.puml
new file mode 100644
index 0000000..0ce0932
--- /dev/null
+++ b/diagrams/config.puml
@@ -0,0 +1,46 @@
+
+skinparam handwritten true
+skinparam monochrome true
+skinparam roundcorner 15
+
+skinparam sequence {
+ ArrowThickness 3
+
+ DividerFontColor Black
+ DividerFontName Comic Sans MS
+ DividerFontSize 15
+ DividerFontStyle Italic
+
+ DelayFontColor Black
+ DelayFontName Comic Sans MS
+ DelayFontSize 15
+ DelayFontStyle Italic
+
+ TitleFontColor Black
+ TitleFontName Comic Sans MS
+ TitleFontStyle Italic
+ TitleFontSize 25
+
+ ArrowColor DeepSkyBlue
+ ArrowFontColor Black
+ ArrowFontName Comic Sans MS
+ ArrowFontStyle Regular
+ ArrowFontSize 19
+
+ ActorBorderColor DeepSkyBlue
+
+ LifeLineBorderColor blue
+ LifeLineBackgroundColor #A9DCDF
+
+ ParticipantBorderColor DeepSkyBlue
+ ParticipantBackgroundColor LightBlue
+ ParticipantFontName Comic Sans MS
+ ParticipantFontSize 20
+ ParticipantFontColor Black
+
+ ActorBackgroundColor aqua
+ ActorFontColor Black
+ ActorFontSize 20
+ ActorFontName Comic Sans MS
+}
+
diff --git a/diagrams/data_structures/README.md b/diagrams/data_structures/README.md
new file mode 100644
index 0000000..27705c5
--- /dev/null
+++ b/diagrams/data_structures/README.md
@@ -0,0 +1,18 @@
+<!--
+title: "Data structures"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/diagrams/data_structures/README.md
+-->
+
+# Data structures
+
+These are the main internal data structures of `netdata`. Created with `draw.io`.
+
+![Config](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/netdata_config.svg?sanitize=true)
+
+![Registry](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/registry.svg?sanitize=true)
+
+![RRD](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/rrd.svg?sanitize=true)
+
+![Web](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/web.svg?sanitize=true)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdiagrams%2Fdata_structures%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/diagrams/data_structures/netdata_config.svg b/diagrams/data_structures/netdata_config.svg
new file mode 100644
index 0000000..5b2ed8d
--- /dev/null
+++ b/diagrams/data_structures/netdata_config.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="905px" height="511px" viewBox="-0.5 -0.5 905 511" style="background-color: rgb(255, 255, 255);"><defs><clipPath id="mx-clip-7-34-132-26-0"><rect x="7" y="34" width="132" height="26"/></clipPath><clipPath id="mx-clip-7-60-132-26-0"><rect x="7" y="60" width="132" height="26"/></clipPath><clipPath id="mx-clip-7-86-132-26-0"><rect x="7" y="86" width="132" height="26"/></clipPath><clipPath id="mx-clip-167-154-202-26-0"><rect x="167" y="154" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-180-202-26-0"><rect x="167" y="180" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-206-202-26-0"><rect x="167" y="206" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-232-202-26-0"><rect x="167" y="232" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-258-202-26-0"><rect x="167" y="258" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-284-202-26-0"><rect x="167" y="284" width="202" height="26"/></clipPath><clipPath id="mx-clip-167-310-202-26-0"><rect x="167" y="310" width="202" height="26"/></clipPath><clipPath id="mx-clip-517-68-132-26-0"><rect x="517" y="68" width="132" height="26"/></clipPath><clipPath id="mx-clip-517-94-132-26-0"><rect x="517" y="94" width="132" height="26"/></clipPath><clipPath id="mx-clip-597-181-162-26-0"><rect x="597" y="181" width="162" height="26"/></clipPath><clipPath id="mx-clip-597-207-162-26-0"><rect x="597" y="207" width="162" height="26"/></clipPath><clipPath id="mx-clip-737-271-132-26-0"><rect x="737" y="271" width="132" height="26"/></clipPath><clipPath id="mx-clip-737-297-132-26-0"><rect x="737" y="297" width="132" height="26"/></clipPath><clipPath id="mx-clip-427-353-142-26-0"><rect x="427" y="353" width="142" height="26"/></clipPath><clipPath id="mx-clip-427-379-142-26-0"><rect x="427" y="379" width="142" height="26"/></clipPath><clipPath id="mx-clip-427-405-142-26-0"><rect x="427" y="405" width="142" height="26"/></clipPath><clipPath id="mx-clip-427-431-142-26-0"><rect x="427" y="431" width="142" height="26"/></clipPath><clipPath id="mx-clip-427-457-142-26-0"><rect x="427" y="457" width="142" height="26"/></clipPath><clipPath id="mx-clip-427-483-142-26-0"><rect x="427" y="483" width="142" height="26"/></clipPath></defs><path d="M 3 29 L 3 3 L 143 3 L 143 29" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 29 L 3 107 L 143 107 L 143 29" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 29 L 143 29" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="72.5" y="20.5">struct config</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-34-132-26-0)" font-size="12px"><text x="8.5" y="46.5">struct section *sections</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-60-132-26-0)" font-size="12px"><text x="8.5" y="72.5">netdata_mutex_t mutex</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-86-132-26-0)" font-size="12px"><text x="8.5" y="98.5">avl_tree_lock index</text></g><path d="M 163 149 L 163 123 L 373 123 L 373 149" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 163 149 L 163 331 L 373 331 L 373 149" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 163 149 L 373 149" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="267.5" y="140.5">struct section</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-154-202-26-0)" font-size="12px"><text x="168.5" y="166.5">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-180-202-26-0)" font-size="12px"><text x="168.5" y="192.5">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-206-202-26-0)" font-size="12px"><text x="168.5" y="218.5">char *name</text></g><path d="M 373 240 L 393 240 L 393 103 L 268 103 L 268 116.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 268 121.88 L 264.5 114.88 L 268 116.63 L 271.5 114.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-232-202-26-0)" font-size="12px"><text x="168.5" y="244.5">struct section *next</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-258-202-26-0)" font-size="12px"><text x="168.5" y="270.5">struct config_option *values</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-284-202-26-0)" font-size="12px"><text x="168.5" y="296.5">avl_tree_lock values_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-167-310-202-26-0)" font-size="12px"><text x="168.5" y="322.5">netdata_mutex_t mutex</text></g><path d="M 513 63 L 513 37 L 653 37 L 653 63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 513 63 L 513 115 L 653 115 L 653 63" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 513 63 L 653 63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="582.5" y="54.5">struct avl_tree_lock</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-517-68-132-26-0)" font-size="12px"><text x="518.5" y="80.5">avl_tree avl_tree</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-517-94-132-26-0)" font-size="12px"><text x="518.5" y="106.5">netdata_mutex_t mutex</text></g><path d="M 593 176 L 593 150 L 763 150 L 763 176" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 593 176 L 593 228 L 763 228 L 763 176" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 593 176 L 763 176" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="677.5" y="167.5">struct avl_tree</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-597-181-162-26-0)" font-size="12px"><text x="598.5" y="193.5">avl *root</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-597-207-162-26-0)" font-size="12px"><text x="598.5" y="219.5">int (*compar)(void *a, void *b)</text></g><path d="M 143 42 L 268 42 L 268 116.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 268 121.88 L 264.5 114.88 L 268 116.63 L 271.5 114.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(217.5,36.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 653 76 L 678 76 L 678 143.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 678 148.88 L 674.5 141.88 L 678 143.63 L 681.5 141.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 733 266 L 733 240 L 873 240 L 873 266" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 733 266 L 733 318 L 873 318 L 873 266" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 733 266 L 873 266" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="802.5" y="257.5">struct avl</text></g><path d="M 873 279 L 893 279 L 893 220 L 803 220 L 803 233.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 803 238.88 L 799.5 231.88 L 803 233.63 L 806.5 231.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-737-271-132-26-0)" font-size="12px"><text x="738.5" y="283.5">struct avl *avl_link[2]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-737-297-132-26-0)" font-size="12px"><text x="738.5" y="309.5">signed char avl_balance</text></g><path d="M 763 189 L 803 189 L 803 233.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 803 238.88 L 799.5 231.88 L 803 233.63 L 806.5 231.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 423 348 L 423 322 L 573 322 L 573 348" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 423 348 L 423 504 L 573 504 L 573 348" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 423 348 L 573 348" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="497.5" y="339.5">struct config_options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-353-142-26-0)" font-size="12px"><text x="428.5" y="365.5">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-379-142-26-0)" font-size="12px"><text x="428.5" y="391.5">uint8_t flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-405-142-26-0)" font-size="12px"><text x="428.5" y="417.5">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-431-142-26-0)" font-size="12px"><text x="428.5" y="443.5">char *name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-457-142-26-0)" font-size="12px"><text x="428.5" y="469.5">char *value</text></g><path d="M 573 491 L 593 491 L 593 302 L 498 302 L 498 315.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 498 320.88 L 494.5 313.88 L 498 315.63 L 501.5 313.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-427-483-142-26-0)" font-size="12px"><text x="428.5" y="495.5">struct *config_option_next</text></g><path d="M 143 94 L 268 94 L 268 116.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 268 121.88 L 264.5 114.88 L 268 116.63 L 271.5 114.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(195.5,88.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 373 292 L 498 292 L 498 315.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 498 320.88 L 494.5 313.88 L 498 315.63 L 501.5 313.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(426.5,286.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 373 266 L 498 266 L 498 315.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 498 320.88 L 494.5 313.88 L 498 315.63 L 501.5 313.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(435.5,260.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g></svg> \ No newline at end of file
diff --git a/diagrams/data_structures/registry.svg b/diagrams/data_structures/registry.svg
new file mode 100644
index 0000000..2363e66
--- /dev/null
+++ b/diagrams/data_structures/registry.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="1377px" height="1061px" viewBox="-0.5 -0.5 1377 1061" style="background-color: rgb(255, 255, 255);"><defs><clipPath id="mx-clip-7-39-202-26-0"><rect x="7" y="39" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-91-202-26-0"><rect x="7" y="91" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-117-202-26-0"><rect x="7" y="117" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-143-202-26-0"><rect x="7" y="143" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-169-202-26-0"><rect x="7" y="169" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-195-202-26-0"><rect x="7" y="195" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-221-202-26-0"><rect x="7" y="221" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-247-202-26-0"><rect x="7" y="247" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-299-202-26-0"><rect x="7" y="299" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-325-202-26-0"><rect x="7" y="325" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-351-202-26-0"><rect x="7" y="351" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-377-202-26-0"><rect x="7" y="377" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-403-202-26-0"><rect x="7" y="403" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-455-202-26-0"><rect x="7" y="455" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-481-202-26-0"><rect x="7" y="481" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-507-202-26-0"><rect x="7" y="507" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-533-202-26-0"><rect x="7" y="533" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-559-202-26-0"><rect x="7" y="559" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-585-202-26-0"><rect x="7" y="585" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-611-202-26-0"><rect x="7" y="611" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-637-202-26-0"><rect x="7" y="637" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-689-202-26-0"><rect x="7" y="689" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-715-202-26-0"><rect x="7" y="715" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-741-202-26-0"><rect x="7" y="741" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-767-202-26-0"><rect x="7" y="767" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-793-202-26-0"><rect x="7" y="793" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-819-202-26-0"><rect x="7" y="819" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-845-202-26-0"><rect x="7" y="845" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-871-202-26-0"><rect x="7" y="871" width="202" height="26"/></clipPath><clipPath id="mx-clip-7-897-202-26-0"><rect x="7" y="897" width="202" height="26"/></clipPath><clipPath id="mx-clip-897-52-202-26-0"><rect x="897" y="52" width="202" height="26"/></clipPath><clipPath id="mx-clip-897-78-202-26-0"><rect x="897" y="78" width="202" height="26"/></clipPath><clipPath id="mx-clip-897-104-202-26-0"><rect x="897" y="104" width="202" height="26"/></clipPath><clipPath id="mx-clip-897-130-202-26-0"><rect x="897" y="130" width="202" height="26"/></clipPath><clipPath id="mx-clip-942-513-162-26-0"><rect x="942" y="513" width="162" height="26"/></clipPath><clipPath id="mx-clip-942-539-162-26-0"><rect x="942" y="539" width="162" height="26"/></clipPath><clipPath id="mx-clip-1062-626-132-26-0"><rect x="1062" y="626" width="132" height="26"/></clipPath><clipPath id="mx-clip-1062-652-132-26-0"><rect x="1062" y="652" width="132" height="26"/></clipPath><clipPath id="mx-clip-1027-286-202-26-0"><rect x="1027" y="286" width="202" height="26"/></clipPath><clipPath id="mx-clip-1027-312-202-26-0"><rect x="1027" y="312" width="202" height="26"/></clipPath><clipPath id="mx-clip-1027-338-202-26-0"><rect x="1027" y="338" width="202" height="26"/></clipPath><clipPath id="mx-clip-1027-364-202-26-0"><rect x="1027" y="364" width="202" height="26"/></clipPath><clipPath id="mx-clip-1167-130-202-26-0"><rect x="1167" y="130" width="202" height="26"/></clipPath><clipPath id="mx-clip-1167-156-202-26-0"><rect x="1167" y="156" width="202" height="26"/></clipPath><clipPath id="mx-clip-1167-182-202-26-0"><rect x="1167" y="182" width="202" height="26"/></clipPath><clipPath id="mx-clip-1167-208-202-26-0"><rect x="1167" y="208" width="202" height="26"/></clipPath><clipPath id="mx-clip-377-61-202-26-0"><rect x="377" y="61" width="202" height="26"/></clipPath><clipPath id="mx-clip-377-87-202-26-0"><rect x="377" y="87" width="202" height="26"/></clipPath><clipPath id="mx-clip-377-113-202-26-0"><rect x="377" y="113" width="202" height="26"/></clipPath><clipPath id="mx-clip-377-139-202-26-0"><rect x="377" y="139" width="202" height="26"/></clipPath><clipPath id="mx-clip-377-165-202-26-0"><rect x="377" y="165" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-286-202-26-0"><rect x="527" y="286" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-312-202-26-0"><rect x="527" y="312" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-338-202-26-0"><rect x="527" y="338" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-364-202-26-0"><rect x="527" y="364" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-390-202-26-0"><rect x="527" y="390" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-416-202-26-0"><rect x="527" y="416" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-442-202-26-0"><rect x="527" y="442" width="202" height="26"/></clipPath><clipPath id="mx-clip-527-468-202-26-0"><rect x="527" y="468" width="202" height="26"/></clipPath><clipPath id="mx-clip-677-929-202-26-0"><rect x="677" y="929" width="202" height="26"/></clipPath><clipPath id="mx-clip-677-955-202-26-0"><rect x="677" y="955" width="202" height="26"/></clipPath><clipPath id="mx-clip-677-981-202-26-0"><rect x="677" y="981" width="202" height="26"/></clipPath><clipPath id="mx-clip-677-1007-202-26-0"><rect x="677" y="1007" width="202" height="26"/></clipPath><clipPath id="mx-clip-677-1033-202-26-0"><rect x="677" y="1033" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-587-202-26-0"><rect x="317" y="587" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-613-202-26-0"><rect x="317" y="613" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-639-202-26-0"><rect x="317" y="639" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-665-202-26-0"><rect x="317" y="665" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-691-202-26-0"><rect x="317" y="691" width="202" height="26"/></clipPath><clipPath id="mx-clip-317-717-202-26-0"><rect x="317" y="717" width="202" height="26"/></clipPath><clipPath id="mx-clip-547-704-202-26-0"><rect x="547" y="704" width="202" height="26"/></clipPath><clipPath id="mx-clip-547-730-202-26-0"><rect x="547" y="730" width="202" height="26"/></clipPath><clipPath id="mx-clip-547-756-202-26-0"><rect x="547" y="756" width="202" height="26"/></clipPath><clipPath id="mx-clip-547-782-202-26-0"><rect x="547" y="782" width="202" height="26"/></clipPath><clipPath id="mx-clip-547-808-202-26-0"><rect x="547" y="808" width="202" height="26"/></clipPath></defs><path d="M 213 827 L 213 801 L 253 801 L 253 10 L 478 10 L 478 23.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 478 28.88 L 474.5 21.88 L 478 23.63 L 481.5 21.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(223.5,310.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="59" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">dictionary of</div></div></foreignObject><text x="30" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">dictionary of</text></switch></g><path d="M 213 879 L 778 878 L 778 891.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 778 896.88 L 774.5 889.88 L 778 891.63 L 781.5 889.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(480.5,872.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 3 33.5 L 3 7.5 L 213 7.5 L 213 33.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 33.5 L 3 917.5 L 213 917.5 L 213 33.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 33.5 L 213 33.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="107.5" y="25">struct registry</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-39-202-26-0)" font-size="12px"><text x="8.5" y="51">int enabled</text></g><path d="M 3 85.5 L 3 59.5 L 213 59.5 L 213 85.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 85.5 L 3 267.5 L 213 267.5 L 213 85.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 85.5 L 213 85.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="107.5" y="77">entries counters</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-91-202-26-0)" font-size="12px"><text x="8.5" y="103">unsigned long long persons_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-117-202-26-0)" font-size="12px"><text x="8.5" y="129">unsigned long long machines_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-143-202-26-0)" font-size="12px"><text x="8.5" y="155">unsigned long long usages count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-169-202-26-0)" font-size="12px"><text x="8.5" y="181">unsigned long long urls_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-195-202-26-0)" font-size="12px"><text x="8.5" y="207">unsigned long long persons_urls_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-221-202-26-0)" font-size="12px"><text x="8.5" y="233">unsigned long long machines_urls_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-247-202-26-0)" font-size="12px"><text x="8.5" y="259">unsigned long long log_count</text></g><path d="M 3 293.5 L 3 267.5 L 213 267.5 L 213 293.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 293.5 L 3 423.5 L 213 423.5 L 213 293.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 293.5 L 213 293.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="107.5" y="285">memory counters</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-299-202-26-0)" font-size="12px"><text x="8.5" y="311">unsigned long long persons memory</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-325-202-26-0)" font-size="12px"><text x="8.5" y="337">unsigned long long machines_memory</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-351-202-26-0)" font-size="12px"><text x="8.5" y="363">unsigned long long urls_memory</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-377-202-26-0)" font-size="12px"><text x="8.5" y="389">unsigned long long persons_urls_memory</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-403-202-26-0)" font-size="12px"><text x="8.5" y="415">unsigned long longmachines_urls_memory</text></g><path d="M 3 449.5 L 3 423.5 L 213 423.5 L 213 449.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 449.5 L 3 657.5 L 213 657.5 L 213 449.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 449.5 L 213 449.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="107.5" y="441">configuration</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-455-202-26-0)" font-size="12px"><text x="8.5" y="467">unsigned long long save_registry_every_entries</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-481-202-26-0)" font-size="12px"><text x="8.5" y="493">char *registry_domain</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-507-202-26-0)" font-size="12px"><text x="8.5" y="519">char *hostname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-533-202-26-0)" font-size="12px"><text x="8.5" y="545">char *registry_to_announce</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-559-202-26-0)" font-size="12px"><text x="8.5" y="571">time_t persons_expiration</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-585-202-26-0)" font-size="12px"><text x="8.5" y="597">int verify_cookies_redirects</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-611-202-26-0)" font-size="12px"><text x="8.5" y="623">size_t max_url_length</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-637-202-26-0)" font-size="12px"><text x="8.5" y="649">size_t max_name_length</text></g><path d="M 3 683.5 L 3 657.5 L 213 657.5 L 213 683.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 683.5 L 3 787.5 L 213 787.5 L 213 683.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 683.5 L 213 683.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="107.5" y="675">path names</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-689-202-26-0)" font-size="12px"><text x="8.5" y="701">char *pathname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-715-202-26-0)" font-size="12px"><text x="8.5" y="727">char *db_filename</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-741-202-26-0)" font-size="12px"><text x="8.5" y="753">char *log_filename</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-767-202-26-0)" font-size="12px"><text x="8.5" y="779">char *machine_guid_filename</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-793-202-26-0)" font-size="12px"><text x="8.5" y="805">FILE *log_fp</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-819-202-26-0)" font-size="12px"><text x="8.5" y="831">DICTIONARY *persons</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-845-202-26-0)" font-size="12px"><text x="8.5" y="857">DICTIONARY *machines</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-871-202-26-0)" font-size="12px"><text x="8.5" y="883">avl_tree registry_urls_root_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-897-202-26-0)" font-size="12px"><text x="8.5" y="909">netdata_mutex_t lock</text></g><path d="M 893 46.5 L 893 20.5 L 1103 20.5 L 1103 46.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 893 46.5 L 893 150.5 L 1103 150.5 L 1103 46.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 893 46.5 L 1103 46.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="997.5" y="38">DICTIONARY</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-897-52-202-26-0)" font-size="12px"><text x="898.5" y="64">avl_tree values_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-897-78-202-26-0)" font-size="12px"><text x="898.5" y="90">uint8_t flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-897-104-202-26-0)" font-size="12px"><text x="898.5" y="116">struct dictionary_stats *stats</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-897-130-202-26-0)" font-size="12px"><text x="898.5" y="142">netdata_rwlock_t *rwlock</text></g><path d="M 938 507.5 L 938 481.5 L 1108 481.5 L 1108 507.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 938 507.5 L 938 559.5 L 1108 559.5 L 1108 507.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 938 507.5 L 1108 507.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1022.5" y="499">struct avl_tree</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-942-513-162-26-0)" font-size="12px"><text x="943.5" y="525">avl *root</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-942-539-162-26-0)" font-size="12px"><text x="943.5" y="551">int (*compar)(void *a, void *b)</text></g><path d="M 1058 620.5 L 1058 594.5 L 1198 594.5 L 1198 620.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1058 620.5 L 1058 672.5 L 1198 672.5 L 1198 620.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1058 620.5 L 1198 620.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1127.5" y="612">struct avl</text></g><path d="M 1198 634 L 1218 634 L 1218 575 L 1128 575 L 1128 588.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1128 593.88 L 1124.5 586.88 L 1128 588.63 L 1131.5 586.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1062-626-132-26-0)" font-size="12px"><text x="1063.5" y="638">struct avl *avl_link[2]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1062-652-132-26-0)" font-size="12px"><text x="1063.5" y="664">signed char avl_balance</text></g><path d="M 1108 521 L 1128 521 L 1128 588.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1128 593.88 L 1124.5 586.88 L 1128 588.63 L 1131.5 586.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1023 280.5 L 1023 254.5 L 1233 254.5 L 1233 280.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1023 280.5 L 1023 384.5 L 1233 384.5 L 1233 280.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1023 280.5 L 1233 280.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1127.5" y="272">struct dictionary_stats</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1027-286-202-26-0)" font-size="12px"><text x="1028.5" y="298">unsigned long long inserts</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1027-312-202-26-0)" font-size="12px"><text x="1028.5" y="324">unsigned long long deletes</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1027-338-202-26-0)" font-size="12px"><text x="1028.5" y="350">unsigned long long searches</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1027-364-202-26-0)" font-size="12px"><text x="1028.5" y="376">unsigned long long entries</text></g><path d="M 1103 112 L 1128 112 L 1128 248.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1128 253.88 L 1124.5 246.88 L 1128 248.63 L 1131.5 246.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1163 124.5 L 1163 98.5 L 1373 98.5 L 1373 124.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1163 124.5 L 1163 228.5 L 1373 228.5 L 1373 124.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1163 124.5 L 1373 124.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1267.5" y="116">NAME_VALUE</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1167-130-202-26-0)" font-size="12px"><text x="1168.5" y="142">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1167-156-202-26-0)" font-size="12px"><text x="1168.5" y="168">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1167-182-202-26-0)" font-size="12px"><text x="1168.5" y="194">char *name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1167-208-202-26-0)" font-size="12px"><text x="1168.5" y="220">void *value</text></g><path d="M 373 55.5 L 373 29.5 L 583 29.5 L 583 55.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 373 55.5 L 373 185.5 L 583 185.5 L 583 55.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 373 55.5 L 583 55.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="477.5" y="47">REGISTRY_PERSON</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-61-202-26-0)" font-size="12px"><text x="378.5" y="73">char guid[GUID_LEN + 1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-87-202-26-0)" font-size="12px"><text x="378.5" y="99">avl_tree person_urls</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-113-202-26-0)" font-size="12px"><text x="378.5" y="125">uint32_t fist_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-139-202-26-0)" font-size="12px"><text x="378.5" y="151">uint32_t last_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-165-202-26-0)" font-size="12px"><text x="378.5" y="177">uint32_t usages</text></g><path d="M 523 280.5 L 523 254.5 L 733 254.5 L 733 280.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 523 280.5 L 523 488.5 L 733 488.5 L 733 280.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 523 280.5 L 733 280.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="627.5" y="272">REGISTRY_PERSON_URL</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-286-202-26-0)" font-size="12px"><text x="528.5" y="298">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-312-202-26-0)" font-size="12px"><text x="528.5" y="324">REGISTRY_URL *url</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-338-202-26-0)" font-size="12px"><text x="528.5" y="350">REGISTRY_MACHINE *machine</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-364-202-26-0)" font-size="12px"><text x="528.5" y="376">uint8_t flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-390-202-26-0)" font-size="12px"><text x="528.5" y="402">uint32_t first_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-416-202-26-0)" font-size="12px"><text x="528.5" y="428">uint32_t last_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-442-202-26-0)" font-size="12px"><text x="528.5" y="454">uint32_t usages</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-527-468-202-26-0)" font-size="12px"><text x="528.5" y="480">char machine_name[1]</text></g><path d="M 673 924 L 673 898 L 883 898 L 883 924" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 924 L 673 1054 L 883 1054 L 883 924" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 924 L 883 924" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="777.5" y="915.5">REGISTRY_URL</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-929-202-26-0)" font-size="12px"><text x="678.5" y="941.5">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-955-202-26-0)" font-size="12px"><text x="678.5" y="967.5">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-981-202-26-0)" font-size="12px"><text x="678.5" y="993.5">uint32_t links</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-1007-202-26-0)" font-size="12px"><text x="678.5" y="1019.5">uint16_t len</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-1033-202-26-0)" font-size="12px"><text x="678.5" y="1045.5">char url[1]</text></g><path d="M 733 320 L 778 320 L 778 891.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 778 896.88 L 774.5 889.88 L 778 891.63 L 781.5 889.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 313 581.5 L 313 555.5 L 523 555.5 L 523 581.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 313 581.5 L 313 737.5 L 523 737.5 L 523 581.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 313 581.5 L 523 581.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="417.5" y="573">REGISTRY_MACHINE</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-587-202-26-0)" font-size="12px"><text x="318.5" y="599">char guid[GUID_LEN + 1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-613-202-26-0)" font-size="12px"><text x="318.5" y="625">uint32_t links</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-639-202-26-0)" font-size="12px"><text x="318.5" y="651">DICTIONARY *machine_urls</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-665-202-26-0)" font-size="12px"><text x="318.5" y="677">uint32_t first_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-691-202-26-0)" font-size="12px"><text x="318.5" y="703">uint32_t last_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-317-717-202-26-0)" font-size="12px"><text x="318.5" y="729">uint32_t usages</text></g><path d="M 543 698.5 L 543 672.5 L 753 672.5 L 753 698.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 543 698.5 L 543 828.5 L 753 828.5 L 753 698.5" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 543 698.5 L 753 698.5" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="647.5" y="690">REGISTRY_MACHINE_URL</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-547-704-202-26-0)" font-size="12px"><text x="548.5" y="716">REGISTRY_URL *url</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-547-730-202-26-0)" font-size="12px"><text x="548.5" y="742">uint8_t flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-547-756-202-26-0)" font-size="12px"><text x="548.5" y="768">uint32_t first_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-547-782-202-26-0)" font-size="12px"><text x="548.5" y="794">uint32_t last_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-547-808-202-26-0)" font-size="12px"><text x="548.5" y="820">uint32_t usages</text></g><path d="M 753 712 L 778 712 L 778 891.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 778 896.88 L 774.5 889.88 L 778 891.63 L 781.5 889.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 523 647 L 648 647 L 648 666.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 648 671.88 L 644.5 664.88 L 648 666.63 L 651.5 664.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(569.5,641.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="59" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">dictionary of</div></div></foreignObject><text x="30" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">dictionary of</text></switch></g><path d="M 583 95 L 628 95 L 628 248.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 628 253.88 L 624.5 246.88 L 628 248.63 L 631.5 246.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(603.5,147.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 1103 60 L 1268 60 L 1268 92.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1268 97.88 L 1264.5 90.88 L 1268 92.63 L 1271.5 90.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(1180.5,54.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 213 853 L 213 827 L 273 827 L 273 536 L 418 536 L 418 549.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 418 554.88 L 414.5 547.88 L 418 549.63 L 421.5 547.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(243.5,636.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="59" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">dictionary of</div></div></foreignObject><text x="30" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">dictionary of</text></switch></g><path d="M 733 346 L 753 346 L 753 518 L 418 518 L 418 549.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 418 554.88 L 414.5 547.88 L 418 549.63 L 421.5 547.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></svg> \ No newline at end of file
diff --git a/diagrams/data_structures/rrd.svg b/diagrams/data_structures/rrd.svg
new file mode 100644
index 0000000..8b5014a
--- /dev/null
+++ b/diagrams/data_structures/rrd.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="1787px" height="1646px" viewBox="-0.5 -0.5 1787 1646" style="background-color: rgb(255, 255, 255);"><defs><clipPath id="mx-clip-577-717-262-26-0"><rect x="577" y="717" width="262" height="26"/></clipPath><clipPath id="mx-clip-577-795-262-26-0"><rect x="577" y="795" width="262" height="26"/></clipPath><clipPath id="mx-clip-577-821-262-26-0"><rect x="577" y="821" width="262" height="26"/></clipPath><clipPath id="mx-clip-577-873-262-26-0"><rect x="577" y="873" width="262" height="26"/></clipPath><clipPath id="mx-clip-577-899-262-26-0"><rect x="577" y="899" width="262" height="26"/></clipPath><clipPath id="mx-clip-367-145-242-26-0"><rect x="367" y="145" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-171-242-26-0"><rect x="367" y="171" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-249-242-26-0"><rect x="367" y="249" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-275-242-26-0"><rect x="367" y="275" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-301-242-26-0"><rect x="367" y="301" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-353-242-26-0"><rect x="367" y="353" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-379-242-26-0"><rect x="367" y="379" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-405-242-26-0"><rect x="367" y="405" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-457-242-26-0"><rect x="367" y="457" width="242" height="26"/></clipPath><clipPath id="mx-clip-367-483-242-26-0"><rect x="367" y="483" width="242" height="26"/></clipPath><clipPath id="mx-clip-1247-165-172-26-0"><rect x="1247" y="165" width="172" height="26"/></clipPath><clipPath id="mx-clip-1247-191-172-26-0"><rect x="1247" y="191" width="172" height="26"/></clipPath><clipPath id="mx-clip-1247-217-172-26-0"><rect x="1247" y="217" width="172" height="26"/></clipPath><clipPath id="mx-clip-1247-243-172-26-0"><rect x="1247" y="243" width="172" height="26"/></clipPath><clipPath id="mx-clip-1247-269-172-26-0"><rect x="1247" y="269" width="172" height="26"/></clipPath><clipPath id="mx-clip-7-178-242-26-0"><rect x="7" y="178" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-282-242-26-0"><rect x="7" y="282" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-334-242-26-0"><rect x="7" y="334" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-360-242-26-0"><rect x="7" y="360" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-386-242-26-0"><rect x="7" y="386" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-412-242-26-0"><rect x="7" y="412" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-438-242-26-0"><rect x="7" y="438" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-464-242-26-0"><rect x="7" y="464" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-490-242-26-0"><rect x="7" y="490" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-542-242-26-0"><rect x="7" y="542" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-568-242-26-0"><rect x="7" y="568" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-594-242-26-0"><rect x="7" y="594" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-620-242-26-0"><rect x="7" y="620" width="242" height="26"/></clipPath><clipPath id="mx-clip-7-646-242-26-0"><rect x="7" y="646" width="242" height="26"/></clipPath><clipPath id="mx-clip-1617-477-162-26-0"><rect x="1617" y="477" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-503-162-26-0"><rect x="1617" y="503" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-529-162-26-0"><rect x="1617" y="529" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-555-162-26-0"><rect x="1617" y="555" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-581-162-26-0"><rect x="1617" y="581" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-607-162-26-0"><rect x="1617" y="607" width="162" height="26"/></clipPath><clipPath id="mx-clip-1617-633-162-26-0"><rect x="1617" y="633" width="162" height="26"/></clipPath><clipPath id="mx-clip-1317-555-212-26-0"><rect x="1317" y="555" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-581-212-26-0"><rect x="1317" y="581" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-607-212-26-0"><rect x="1317" y="607" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-633-212-26-0"><rect x="1317" y="633" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-659-212-26-0"><rect x="1317" y="659" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-685-212-26-0"><rect x="1317" y="685" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-711-212-26-0"><rect x="1317" y="711" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-737-212-26-0"><rect x="1317" y="737" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-763-212-26-0"><rect x="1317" y="763" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-789-212-26-0"><rect x="1317" y="789" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-815-212-26-0"><rect x="1317" y="815" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-841-212-26-0"><rect x="1317" y="841" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-867-212-26-0"><rect x="1317" y="867" width="212" height="26"/></clipPath><clipPath id="mx-clip-1317-893-212-26-0"><rect x="1317" y="893" width="212" height="26"/></clipPath><clipPath id="mx-clip-967-730-272-26-0"><rect x="967" y="730" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-756-272-26-0"><rect x="967" y="756" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-782-272-26-0"><rect x="967" y="782" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-808-272-26-0"><rect x="967" y="808" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-834-272-26-0"><rect x="967" y="834" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-860-272-26-0"><rect x="967" y="860" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-886-272-26-0"><rect x="967" y="886" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-912-272-26-0"><rect x="967" y="912" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-938-272-26-0"><rect x="967" y="938" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-964-272-26-0"><rect x="967" y="964" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-990-272-26-0"><rect x="967" y="990" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1016-272-26-0"><rect x="967" y="1016" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1042-272-26-0"><rect x="967" y="1042" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1068-272-26-0"><rect x="967" y="1068" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1094-272-26-0"><rect x="967" y="1094" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1120-272-26-0"><rect x="967" y="1120" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1146-272-26-0"><rect x="967" y="1146" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1172-272-26-0"><rect x="967" y="1172" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1198-272-26-0"><rect x="967" y="1198" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1224-272-26-0"><rect x="967" y="1224" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1250-272-26-0"><rect x="967" y="1250" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1276-272-26-0"><rect x="967" y="1276" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1302-272-26-0"><rect x="967" y="1302" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1328-272-26-0"><rect x="967" y="1328" width="272" height="26"/></clipPath><clipPath id="mx-clip-967-1354-272-26-0"><rect x="967" y="1354" width="272" height="26"/></clipPath><clipPath id="mx-clip-67-815-212-26-0"><rect x="67" y="815" width="212" height="26"/></clipPath><clipPath id="mx-clip-67-841-212-26-0"><rect x="67" y="841" width="212" height="26"/></clipPath><clipPath id="mx-clip-67-867-212-26-0"><rect x="67" y="867" width="212" height="26"/></clipPath><clipPath id="mx-clip-67-893-212-26-0"><rect x="67" y="893" width="212" height="26"/></clipPath><clipPath id="mx-clip-67-919-212-26-0"><rect x="67" y="919" width="212" height="26"/></clipPath><clipPath id="mx-clip-67-945-212-26-0"><rect x="67" y="945" width="212" height="26"/></clipPath><clipPath id="mx-clip-347-1068-132-26-0"><rect x="347" y="1068" width="132" height="26"/></clipPath><clipPath id="mx-clip-347-1094-132-26-0"><rect x="347" y="1094" width="132" height="26"/></clipPath><clipPath id="mx-clip-37-1117-212-26-0"><rect x="37" y="1117" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1143-212-26-0"><rect x="37" y="1143" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1169-212-26-0"><rect x="37" y="1169" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1221-212-26-0"><rect x="37" y="1221" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1247-212-26-0"><rect x="37" y="1247" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1273-212-26-0"><rect x="37" y="1273" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1299-212-26-0"><rect x="37" y="1299" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1325-212-26-0"><rect x="37" y="1325" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1351-212-26-0"><rect x="37" y="1351" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1377-212-26-0"><rect x="37" y="1377" width="212" height="26"/></clipPath><clipPath id="mx-clip-37-1403-212-26-0"><rect x="37" y="1403" width="212" height="26"/></clipPath><clipPath id="mx-clip-587-1065-242-26-0"><rect x="587" y="1065" width="242" height="26"/></clipPath><clipPath id="mx-clip-587-1091-242-26-0"><rect x="587" y="1091" width="242" height="26"/></clipPath><clipPath id="mx-clip-587-1117-242-26-0"><rect x="587" y="1117" width="242" height="26"/></clipPath><clipPath id="mx-clip-587-1143-242-26-0"><rect x="587" y="1143" width="242" height="26"/></clipPath><clipPath id="mx-clip-587-1169-242-26-0"><rect x="587" y="1169" width="242" height="26"/></clipPath><clipPath id="mx-clip-377-1280-162-26-0"><rect x="377" y="1280" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1306-162-26-0"><rect x="377" y="1306" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1332-162-26-0"><rect x="377" y="1332" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1358-162-26-0"><rect x="377" y="1358" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1384-162-26-0"><rect x="377" y="1384" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1410-162-26-0"><rect x="377" y="1410" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1436-162-26-0"><rect x="377" y="1436" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1462-162-26-0"><rect x="377" y="1462" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1488-162-26-0"><rect x="377" y="1488" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1514-162-26-0"><rect x="377" y="1514" width="162" height="26"/></clipPath><clipPath id="mx-clip-377-1540-162-26-0"><rect x="377" y="1540" width="162" height="26"/></clipPath><clipPath id="mx-clip-607-1280-132-26-0"><rect x="607" y="1280" width="132" height="26"/></clipPath><clipPath id="mx-clip-607-1306-132-26-0"><rect x="607" y="1306" width="132" height="26"/></clipPath><clipPath id="mx-clip-607-1332-132-26-0"><rect x="607" y="1332" width="132" height="26"/></clipPath><clipPath id="mx-clip-607-1358-132-26-0"><rect x="607" y="1358" width="132" height="26"/></clipPath><clipPath id="mx-clip-607-1384-132-26-0"><rect x="607" y="1384" width="132" height="26"/></clipPath><clipPath id="mx-clip-677-1462-172-26-0"><rect x="677" y="1462" width="172" height="26"/></clipPath><clipPath id="mx-clip-677-1488-172-26-0"><rect x="677" y="1488" width="172" height="26"/></clipPath><clipPath id="mx-clip-677-1514-172-26-0"><rect x="677" y="1514" width="172" height="26"/></clipPath><clipPath id="mx-clip-877-1566-142-26-0"><rect x="877" y="1566" width="142" height="26"/></clipPath><clipPath id="mx-clip-877-1592-142-26-0"><rect x="877" y="1592" width="142" height="26"/></clipPath><clipPath id="mx-clip-877-1618-142-26-0"><rect x="877" y="1618" width="142" height="26"/></clipPath></defs><path d="M 573 712 L 573 686 L 843 686 L 843 712" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 573 712 L 573 920 L 843 920 L 843 712" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 573 712 L 843 712" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="707.5" y="703.5">RRDDIM</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-577-717-262-26-0)" font-size="12px"><text x="578.5" y="729.5">avl avl</text></g><path d="M 573 764 L 573 738 L 843 738 L 843 764" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 573 764 L 843 764" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="707.5" y="754.5">... dimension definition</text></g><path d="M 573 790 L 573 764 L 843 764 L 843 790" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 573 790 L 843 790" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="707.5" y="780.5">... temporary data</text></g><path d="M 843 803 L 863 803 L 863 660 L 708 660 L 708 679.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 708 684.88 L 704.5 677.88 L 708 679.63 L 711.5 677.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-577-795-262-26-0)" font-size="12px"><text x="578.5" y="807.5">struct rrddim *next</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-577-821-262-26-0)" font-size="12px"><text x="578.5" y="833.5">struct rrdsset *rrdset</text></g><path d="M 573 868 L 573 842 L 843 842 L 843 868" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 573 868 L 843 868" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="707.5" y="858.5">... disk data checking</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-577-873-262-26-0)" font-size="12px"><text x="578.5" y="885.5">struct rrddimvar *variables</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-577-899-262-26-0)" font-size="12px"><text x="578.5" y="911.5">storage_number values[]</text></g><path d="M 363 139.79 L 363 113.79 L 613 113.79 L 613 139.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 139.79 L 363 503.79 L 613 503.79 L 613 139.79" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 139.79 L 613 139.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="487.5" y="131.29">RRDSET</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-145-242-26-0)" font-size="12px"><text x="368.5" y="157.29">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-171-242-26-0)" font-size="12px"><text x="368.5" y="183.29">avl avlname</text></g><path d="M 363 217.79 L 363 191.79 L 613 191.79 L 613 217.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 217.79 L 613 217.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="487.5" y="208.29">... set configuration</text></g><path d="M 363 243.79 L 363 217.79 L 613 217.79 L 613 243.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 243.79 L 613 243.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="487.5" y="234.29">... temporary data</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-249-242-26-0)" font-size="12px"><text x="368.5" y="261.29">RRDFAMILY *rrdfamily</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-275-242-26-0)" font-size="12px"><text x="368.5" y="287.29">RRDHOST *rrdhost</text></g><path d="M 613 309 L 633 309 L 633 66 L 488 66 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-301-242-26-0)" font-size="12px"><text x="368.5" y="313.29">struct rrdset *next</text></g><path d="M 363 347.79 L 363 321.79 L 613 321.79 L 613 347.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 347.79 L 613 347.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="487.5" y="338.29">... local variables</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-353-242-26-0)" font-size="12px"><text x="368.5" y="365.29">avl_tree_lock rrdvar_root_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-379-242-26-0)" font-size="12px"><text x="368.5" y="391.29">RRDSETVAR *variables</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-405-242-26-0)" font-size="12px"><text x="368.5" y="417.29">RRDCALC *alarms</text></g><path d="M 363 451.79 L 363 425.79 L 613 425.79 L 613 451.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 363 451.79 L 613 451.79" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="487.5" y="442.29">... disk data checking</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-457-242-26-0)" font-size="12px"><text x="368.5" y="469.29">avl_tree_lock dimensions_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-367-483-242-26-0)" font-size="12px"><text x="368.5" y="495.29">RRDDIM *dimensions</text></g><path d="M 253 550 L 308 550 L 308 94 L 488 94 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(283.5,243.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 253 628 L 933 628 L 933 427 L 1698 427 L 1698 440.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1698 445.88 L 1694.5 438.88 L 1698 440.63 L 1701.5 438.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(908.5,469.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 253 602 L 930 602 C 930 598.1 936 598.1 936 602 L 936 602 L 1003 602 L 1003 430 C 1006.9 430 1006.9 424 1003 424 L 1003 424 L 1003 114 L 1333 114 L 1333 127.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1333 132.88 L 1329.5 125.88 L 1333 127.63 L 1336.5 125.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(978.5,552.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 1243 160 L 1243 134 L 1423 134 L 1423 160" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1243 160 L 1243 290 L 1423 290 L 1423 160" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1243 160 L 1423 160" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1332.5" y="151.5">RRDFAMILY</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1247-165-172-26-0)" font-size="12px"><text x="1248.5" y="177.5">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1247-191-172-26-0)" font-size="12px"><text x="1248.5" y="203.5">const char *family</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1247-217-172-26-0)" font-size="12px"><text x="1248.5" y="229.5">uint32_t hash_family</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1247-243-172-26-0)" font-size="12px"><text x="1248.5" y="255.5">size_t use_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1247-269-172-26-0)" font-size="12px"><text x="1248.5" y="281.5">avl_tree_lock rrdvar_root_index</text></g><path d="M 3 173 L 3 147 L 253 147 L 253 173" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 173 L 3 667 L 253 667 L 253 173" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 173 L 253 173" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="164.5">RRDHOST</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-178-242-26-0)" font-size="12px"><text x="8.5" y="190.5">avl avl</text></g><path d="M 3 225 L 3 199 L 253 199 L 253 225" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 225 L 253 225" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="215.5">... host information</text></g><path d="M 3 251 L 3 225 L 253 225 L 253 251" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 251 L 253 251" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="241.5">... streaming</text></g><path d="M 3 277 L 3 251 L 253 251 L 253 277" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 277 L 253 277" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="267.5">... health monitoring options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-282-242-26-0)" font-size="12px"><text x="8.5" y="294.5">RRDCALC *alarms</text></g><path d="M 3 329 L 3 303 L 253 303 L 253 329" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 329 L 3 433 L 253 433 L 253 329" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 329 L 253 329" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="320.5">... health monitoring options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-334-242-26-0)" font-size="12px"><text x="8.5" y="346.5">ALARM_LOG health_log</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-360-242-26-0)" font-size="12px"><text x="8.5" y="372.5">uint32_t health_last_processed_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-386-242-26-0)" font-size="12px"><text x="8.5" y="398.5">uint32_t health_max_unique_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-412-242-26-0)" font-size="12px"><text x="8.5" y="424.5">uint32_t health_max_alarm_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-438-242-26-0)" font-size="12px"><text x="8.5" y="450.5">RRDCALCTEMPLATE *templates</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-464-242-26-0)" font-size="12px"><text x="8.5" y="476.5">RRDSET *rrdset_root</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-490-242-26-0)" font-size="12px"><text x="8.5" y="502.5">netdata_rwlock_t rrdhost_rwlock</text></g><path d="M 3 537 L 3 511 L 253 511 L 253 537" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 537 L 3 667 L 253 667 L 253 537" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 537 L 253 537" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="127.5" y="528.5">... indexes</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-542-242-26-0)" font-size="12px"><text x="8.5" y="554.5">avl_tree_lock rrdset_root_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-568-242-26-0)" font-size="12px"><text x="8.5" y="580.5">avl_tree_lock rrdset_root_index_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-594-242-26-0)" font-size="12px"><text x="8.5" y="606.5">avl_tree_lock rrdfamily_root_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-620-242-26-0)" font-size="12px"><text x="8.5" y="632.5">avl_tree_lock rrdvar_root_index</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-646-242-26-0)" font-size="12px"><text x="8.5" y="658.5">struct rrdhost *next</text></g><path d="M 253 654 L 273 654 L 273 631 C 276.9 631 276.9 625 273 625 L 273 625 L 273 605 C 276.9 605 276.9 599 273 599 L 273 599 L 273 553 C 276.9 553 276.9 547 273 547 L 273 547 L 273 127 L 128 127 L 128 140.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 128 145.88 L 124.5 138.88 L 128 140.63 L 131.5 138.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1613 472 L 1613 447 L 1783 447 L 1783 472" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1613 472 L 1613 654 L 1783 654 L 1783 472" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1613 472 L 1783 472" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1697.5" y="464">RRDVAR</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-477-162-26-0)" font-size="12px"><text x="1618.5" y="489.5">avl avl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-503-162-26-0)" font-size="12px"><text x="1618.5" y="515.5">char *name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-529-162-26-0)" font-size="12px"><text x="1618.5" y="541.5">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-555-162-26-0)" font-size="12px"><text x="1618.5" y="567.5">RRDVAR_TYPE type</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-581-162-26-0)" font-size="12px"><text x="1618.5" y="593.5">RRDVAR_OPTIONS options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-607-162-26-0)" font-size="12px"><text x="1618.5" y="619.5">void *value</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1617-633-162-26-0)" font-size="12px"><text x="1618.5" y="645.5">time_t last_updated</text></g><path d="M 1313 550 L 1313 524 L 1533 524 L 1533 550" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1313 550 L 1313 914 L 1533 914 L 1533 550" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1313 550 L 1533 550" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1422.5" y="541.5">RRDSETVAR</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-555-212-26-0)" font-size="12px"><text x="1318.5" y="567.5">char *variable</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-581-212-26-0)" font-size="12px"><text x="1318.5" y="593.5">uint32_t hash</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-607-212-26-0)" font-size="12px"><text x="1318.5" y="619.5">char *key_fullid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-633-212-26-0)" font-size="12px"><text x="1318.5" y="645.5">char *key_fullname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-659-212-26-0)" font-size="12px"><text x="1318.5" y="671.5">RRDVAR_TYPE type</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-685-212-26-0)" font-size="12px"><text x="1318.5" y="697.5">void *value</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-711-212-26-0)" font-size="12px"><text x="1318.5" y="723.5">RRDVAR_OPTIONS options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-737-212-26-0)" font-size="12px"><text x="1318.5" y="749.5">RRDVAR *var_local</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-763-212-26-0)" font-size="12px"><text x="1318.5" y="775.5">RRDVAR *var_family</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-789-212-26-0)" font-size="12px"><text x="1318.5" y="801.5">RRDVAR *var_host</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-815-212-26-0)" font-size="12px"><text x="1318.5" y="827.5">RRDVAR *var_family_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-841-212-26-0)" font-size="12px"><text x="1318.5" y="853.5">RRDVAR *var_host_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-867-212-26-0)" font-size="12px"><text x="1318.5" y="879.5">struct rrdset *rrdset</text></g><path d="M 1533 901 L 1553 901 L 1553 504 L 1423 504 L 1423 517.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1423 522.88 L 1419.5 515.88 L 1423 517.63 L 1426.5 515.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1317-893-212-26-0)" font-size="12px"><text x="1318.5" y="905.5">struct rrdsetvar *next</text></g><path d="M 963 725 L 963 699 L 1243 699 L 1243 725" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 963 725 L 963 1375 L 1243 1375 L 1243 725" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 963 725 L 1243 725" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1102.5" y="716.5">RRDDIMVAR</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-730-272-26-0)" font-size="12px"><text x="968.5" y="742.5">char *prefix</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-756-272-26-0)" font-size="12px"><text x="968.5" y="768.5">char *suffix</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-782-272-26-0)" font-size="12px"><text x="968.5" y="794.5">char *key_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-808-272-26-0)" font-size="12px"><text x="968.5" y="820.5">char *key_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-834-272-26-0)" font-size="12px"><text x="968.5" y="846.5">char *key_contextid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-860-272-26-0)" font-size="12px"><text x="968.5" y="872.5">char *key_contexname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-886-272-26-0)" font-size="12px"><text x="968.5" y="898.5">char *key_fullidid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-912-272-26-0)" font-size="12px"><text x="968.5" y="924.5">char *key_fullidname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-938-272-26-0)" font-size="12px"><text x="968.5" y="950.5">char *key_fullnameid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-964-272-26-0)" font-size="12px"><text x="968.5" y="976.5">char *key_fullnamename</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-990-272-26-0)" font-size="12px"><text x="968.5" y="1002.5">RRDVAR_TYPE type</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1016-272-26-0)" font-size="12px"><text x="968.5" y="1028.5">void *value</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1042-272-26-0)" font-size="12px"><text x="968.5" y="1054.5">RRDVAR_OPTIONS options</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1068-272-26-0)" font-size="12px"><text x="968.5" y="1080.5">RRDVAR *var_local_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1094-272-26-0)" font-size="12px"><text x="968.5" y="1106.5">RRDVAR *var_local_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1120-272-26-0)" font-size="12px"><text x="968.5" y="1132.5">RRDVAR *var_family_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1146-272-26-0)" font-size="12px"><text x="968.5" y="1158.5">RRDVAR *var_family_name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1172-272-26-0)" font-size="12px"><text x="968.5" y="1184.5">RRDVAR *var_family_contextid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1198-272-26-0)" font-size="12px"><text x="968.5" y="1210.5">RRDVAR *var_family_contextname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1224-272-26-0)" font-size="12px"><text x="968.5" y="1236.5">RRDVAR *var_host_chartidid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1250-272-26-0)" font-size="12px"><text x="968.5" y="1262.5">RRDVAR *var_host_chartidname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1276-272-26-0)" font-size="12px"><text x="968.5" y="1288.5">RRDVAR *var_host_chartnameid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1302-272-26-0)" font-size="12px"><text x="968.5" y="1314.5">RRDVAR *var_host_chartnamename</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1328-272-26-0)" font-size="12px"><text x="968.5" y="1340.5">struct rrddim *rrddim</text></g><path d="M 1243 1362 L 1263 1362 L 1263 679 L 1103 679 L 1103 692.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1103 697.88 L 1099.5 690.88 L 1103 692.63 L 1106.5 690.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-967-1354-272-26-0)" font-size="12px"><text x="968.5" y="1366.5">struct rrddimvar *next</text></g><path d="M 843 881 L 903 881 L 903 679 L 1103 679 L 1103 692.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1103 697.88 L 1099.5 690.88 L 1103 692.63 L 1106.5 690.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(874.5,694.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 843 829 L 843 739 L 860 739 C 860 735.1 866 735.1 866 739 L 866 739 L 893 739 L 893 631 C 896.9 631 896.9 625 893 625 L 893 625 L 893 605 C 896.9 605 896.9 599 893 599 L 893 599 L 893 89 L 636 89 C 636 85.1 630 85.1 630 89 L 630 89 L 488 89 L 488 89 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 253 576 L 270 576 C 270 572.1 276 572.1 276 576 L 276 576 L 308 576 L 308 94 L 488 94 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(283.5,256.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 613 465 L 708 465 L 708 599 C 711.9 599 711.9 605 708 605 L 708 605 L 708 625 C 711.9 625 711.9 631 708 631 L 708 631 L 708 679.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 708 684.88 L 704.5 677.88 L 708 679.63 L 711.5 677.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(683.5,522.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 613 491 L 708 491 L 708 491 L 708 599 C 711.9 599 711.9 605 708 605 L 708 605 L 708 625 C 711.9 625 711.9 631 708 631 L 708 631 L 708 679.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 708 684.88 L 704.5 677.88 L 708 679.63 L 711.5 677.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(679.5,535.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 613 387 L 890 387 C 890 383.1 896 383.1 896 387 L 896 387 L 1000 387 C 1000 383.1 1006 383.1 1006 387 L 1006 387 L 1423 387 L 1423 424 C 1426.9 424 1426.9 430 1423 430 L 1423 430 L 1423 517.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1423 522.88 L 1419.5 515.88 L 1423 517.63 L 1426.5 515.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(1058.5,381.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 613 361 L 890 361 C 890 357.1 896 357.1 896 361 L 896 361 L 1000 361 C 1000 357.1 1006 357.1 1006 361 L 1006 361 L 1698 361 L 1698 440.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1698 445.88 L 1694.5 438.88 L 1698 440.63 L 1701.5 438.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(1174.5,355.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g><path d="M 613 283 L 613 277 L 630 277 C 630 273.1 636 273.1 636 277 L 636 277 L 703 277 L 703 92 C 706.9 92 706.9 86 703 86 L 703 86 L 703 10 L 128 10 L 128 140.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 128 145.88 L 124.5 138.88 L 128 140.63 L 131.5 138.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 613 257 L 630 257 C 630 253.1 636 253.1 636 257 L 636 257 L 700 257 C 700 253.1 706 253.1 706 257 L 706 257 L 890 257 C 890 253.1 896 253.1 896 257 L 896 257 L 1003 257 L 1003 257 L 1003 114 L 1333 114 L 1333 127.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1333 132.88 L 1329.5 125.88 L 1333 127.63 L 1336.5 125.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 63 810 L 63 784 L 283 784 L 283 810" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 63 810 L 63 966 L 283 966 L 283 810" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 63 810 L 283 810" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="172.5" y="801.5">ALARM_LOG</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-815-212-26-0)" font-size="12px"><text x="68.5" y="827.5">uint32_t next_log_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-841-212-26-0)" font-size="12px"><text x="68.5" y="853.5">uint32_t next_alarm_id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-867-212-26-0)" font-size="12px"><text x="68.5" y="879.5">unsigned int count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-893-212-26-0)" font-size="12px"><text x="68.5" y="905.5">unsigned int max</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-919-212-26-0)" font-size="12px"><text x="68.5" y="931.5">ALARM_ENTRY *alarms</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-67-945-212-26-0)" font-size="12px"><text x="68.5" y="957.5">netdata_rwlock_t alarm_log_rwlock</text></g><path d="M 283 927 L 413 927 L 413 1030.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 413 1035.88 L 409.5 1028.88 L 413 1030.63 L 416.5 1028.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(374.5,921.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 343 1063 L 343 1037 L 483 1037 L 483 1063" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 343 1063 L 343 1115 L 483 1115 L 483 1063" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 343 1063 L 483 1063" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="412.5" y="1054.5">ALARM_ENTRY *</text></g><path d="M 483 1102 L 503 1102 L 503 1017 L 413 1017 L 413 1030.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 413 1035.88 L 409.5 1028.88 L 413 1030.63 L 416.5 1028.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-347-1068-132-26-0)" font-size="12px"><text x="348.5" y="1080.5">...</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-347-1094-132-26-0)" font-size="12px"><text x="348.5" y="1106.5">struct alarm_entry *next</text></g><path d="M 253 342 L 270 342 C 270 338.1 276 338.1 276 342 L 276 342 L 305 342 C 305 338.1 311 338.1 311 342 L 311 342 L 333 342 L 333 599 C 336.9 599 336.9 605 333 605 L 333 605 L 333 625 C 336.9 625 336.9 631 333 631 L 333 631 L 333 710 L 173 710 L 173 777.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 173 782.88 L 169.5 775.88 L 173 777.63 L 176.5 775.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 33 1086 L 33 1060 L 253 1060 L 253 1086" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 33 1086 L 33 1424 L 253 1424 L 253 1086" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 33 1086 L 253 1086" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="142.5" y="1077.5">RRDCALC</text></g><path d="M 33 1112 L 33 1086 L 253 1086 L 253 1112" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 33 1112 L 253 1112" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="142.5" y="1102.5">...</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1117-212-26-0)" font-size="12px"><text x="38.5" y="1129.5">EVAL_EXPRESSION *calculation</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1143-212-26-0)" font-size="12px"><text x="38.5" y="1155.5">EVAL_EXPRESSION *warning</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1169-212-26-0)" font-size="12px"><text x="38.5" y="1181.5">EVAL_EXPRESSION *critical</text></g><path d="M 33 1216 L 33 1190 L 253 1190 L 253 1216" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 33 1216 L 253 1216" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="142.5" y="1206.5">...</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1221-212-26-0)" font-size="12px"><text x="38.5" y="1233.5">RRDVAR *local</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1247-212-26-0)" font-size="12px"><text x="38.5" y="1259.5">RRDVAR *family</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1273-212-26-0)" font-size="12px"><text x="38.5" y="1285.5">RRDVAR *hostid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1299-212-26-0)" font-size="12px"><text x="38.5" y="1311.5">RRDVAR *hostname</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1325-212-26-0)" font-size="12px"><text x="38.5" y="1337.5">struct rrdset *rrdset</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1351-212-26-0)" font-size="12px"><text x="38.5" y="1363.5">struct rrdcalc *rrdset_next</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1377-212-26-0)" font-size="12px"><text x="38.5" y="1389.5">struct rrdcalc *rrdset_prev</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-37-1403-212-26-0)" font-size="12px"><text x="38.5" y="1415.5">struct rrdcalc *next</text></g><path d="M 253 1359 L 273 1359 L 273 1040 L 143 1040 L 143 1053.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 143 1058.88 L 139.5 1051.88 L 143 1053.63 L 146.5 1051.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 253 1385 L 273 1385 L 273 1040 L 143 1040 L 143 1053.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 143 1058.88 L 139.5 1051.88 L 143 1053.63 L 146.5 1051.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 253 1411 L 273 1411 L 273 1040 L 143 1040 L 143 1053.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 143 1058.88 L 139.5 1051.88 L 143 1053.63 L 146.5 1051.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 583 1060 L 583 1034 L 833 1034 L 833 1060" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 583 1060 L 583 1190 L 833 1190 L 833 1060" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 583 1060 L 833 1060" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="707.5" y="1051.5">RRDCALCTEMPLATE *</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-587-1065-242-26-0)" font-size="12px"><text x="588.5" y="1077.5">...</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-587-1091-242-26-0)" font-size="12px"><text x="588.5" y="1103.5">EVAL_EXPRESSION *calculation</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-587-1117-242-26-0)" font-size="12px"><text x="588.5" y="1129.5">EVAL_EXPRESSION *warning</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-587-1143-242-26-0)" font-size="12px"><text x="588.5" y="1155.5">EVAL_EXPRESSION *critical</text></g><path d="M 833 1177 L 853 1177 L 853 1014 L 708 1014 L 708 1027.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 708 1032.88 L 704.5 1025.88 L 708 1027.63 L 711.5 1025.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-587-1169-242-26-0)" font-size="12px"><text x="588.5" y="1181.5">struct rrdcalctemplate *next</text></g><path d="M 253 290 L 270 290 C 270 286.1 276 286.1 276 290 L 276 290 L 293 290 L 293 339 C 296.9 339 296.9 345 293 345 L 293 345 L 293 547 C 296.9 547 296.9 553 293 553 L 293 553 L 293 573 C 296.9 573 296.9 579 293 579 L 293 579 L 293 599 C 296.9 599 296.9 605 293 605 L 293 605 L 293 625 C 296.9 625 296.9 631 293 631 L 293 631 L 293 682 L 143 682 L 143 1053.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 143 1058.88 L 139.5 1051.88 L 143 1053.63 L 146.5 1051.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(216.5,676.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 253 1333 L 270 1333 C 270 1329.1 276 1329.1 276 1333 L 276 1333 L 323 1333 L 323 930 C 326.9 930 326.9 924 323 924 L 323 924 L 323 713 C 326.9 713 326.9 707 323 707 L 323 707 L 323 631 C 326.9 631 326.9 625 323 625 L 323 625 L 323 605 C 326.9 605 326.9 599 323 599 L 323 599 L 323 345 C 326.9 345 326.9 339 323 339 L 323 339 L 323 97 C 326.9 97 326.9 91 323 91 L 323 91 L 323 60 L 488 60 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 613 413 L 613 407 L 633 407 L 633 462 C 636.9 462 636.9 468 633 468 L 633 468 L 633 488 C 636.9 488 636.9 494 633 494 L 633 494 L 633 599 C 636.9 599 636.9 605 633 605 L 633 605 L 633 625 C 636.9 625 636.9 631 633 631 L 633 631 L 633 670 L 336 670 C 336 666.1 330 666.1 330 670 L 330 670 L 326 670 C 326 666.1 320 666.1 320 670 L 320 670 L 296 670 C 296 666.1 290 666.1 290 670 L 290 670 L 173 670 L 173 679 C 176.9 679 176.9 685 173 685 L 173 685 L 173 777.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 173 782.88 L 169.5 775.88 L 173 777.63 L 176.5 775.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(442.5,664.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="94" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">double linked list of</div></div></foreignObject><text x="47" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">double linked list of</text></switch></g><path d="M 253 472 L 343 472 L 343 30 L 488 30 L 488 107.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 488 112.88 L 484.5 105.88 L 488 107.63 L 491.5 105.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(314.5,175.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 253 446 L 353 446 L 353 800 L 523 800 L 523 970 L 708 970 L 708 1027.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 708 1032.88 L 704.5 1025.88 L 708 1027.63 L 711.5 1025.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(392.5,794.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 373 1275 L 373 1249 L 543 1249 L 543 1275" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 373 1275 L 373 1561 L 543 1561 L 543 1275" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 373 1275 L 543 1275" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="457.5" y="1266.5">EVAL_EXPRESSION</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1280-162-26-0)" font-size="12px"><text x="378.5" y="1292.5">const char *source</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1306-162-26-0)" font-size="12px"><text x="378.5" y="1318.5">const char *parsed_as</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1332-162-26-0)" font-size="12px"><text x="378.5" y="1344.5">RRDCALC_STATUS *status</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1358-162-26-0)" font-size="12px"><text x="378.5" y="1370.5">calculated_number *this</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1384-162-26-0)" font-size="12px"><text x="378.5" y="1396.5">time_t *after</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1410-162-26-0)" font-size="12px"><text x="378.5" y="1422.5">time_t *before</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1436-162-26-0)" font-size="12px"><text x="378.5" y="1448.5">calculated_number result</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1462-162-26-0)" font-size="12px"><text x="378.5" y="1474.5">int error</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1488-162-26-0)" font-size="12px"><text x="378.5" y="1500.5">BUFFER *error_msg</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1514-162-26-0)" font-size="12px"><text x="378.5" y="1526.5">void *nodes</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-377-1540-162-26-0)" font-size="12px"><text x="378.5" y="1552.5">struct rrdcalc_rrdcalc</text></g><path d="M 603 1275 L 603 1249 L 743 1249 L 743 1275" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 603 1275 L 603 1405 L 743 1405 L 743 1275" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 603 1275 L 743 1275" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="672.5" y="1266.5">EVAL_NODE</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-607-1280-132-26-0)" font-size="12px"><text x="608.5" y="1292.5">int id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-607-1306-132-26-0)" font-size="12px"><text x="608.5" y="1318.5">unsigned char operator</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-607-1332-132-26-0)" font-size="12px"><text x="608.5" y="1344.5">in precedence</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-607-1358-132-26-0)" font-size="12px"><text x="608.5" y="1370.5">int count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-607-1384-132-26-0)" font-size="12px"><text x="608.5" y="1396.5">EVAL_VALUE ops[]</text></g><path d="M 673 1457 L 673 1431 L 853 1431 L 853 1457" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 1457 L 673 1535 L 853 1535 L 853 1457" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 1457 L 853 1457" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="762.5" y="1448.5">EVAL_VALUE</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-1462-172-26-0)" font-size="12px"><text x="678.5" y="1474.5">calculated_number number</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-1488-172-26-0)" font-size="12px"><text x="678.5" y="1500.5">EVAL_VARIABLE *variable</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-677-1514-172-26-0)" font-size="12px"><text x="678.5" y="1526.5">struct eval_node *expression</text></g><path d="M 873 1561 L 873 1535 L 1023 1535 L 1023 1561" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 873 1561 L 873 1639 L 1023 1639 L 1023 1561" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 873 1561 L 1023 1561" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="947.5" y="1552.5">EVAL_VARIABLE</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-877-1566-142-26-0)" font-size="12px"><text x="878.5" y="1578.5">char *name</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-877-1592-142-26-0)" font-size="12px"><text x="878.5" y="1604.5">uint32_t hash</text></g><path d="M 1023 1626 L 1043 1626 L 1043 1515 L 948 1515 L 948 1528.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 948 1533.88 L 944.5 1526.88 L 948 1528.63 L 951.5 1526.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-877-1618-142-26-0)" font-size="12px"><text x="878.5" y="1630.5">struct eval_variable *next</text></g><path d="M 743 1392 L 763 1392 L 763 1424.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 763 1429.88 L 759.5 1422.88 L 763 1424.63 L 766.5 1422.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 853 1522 L 873 1522 L 873 1229 L 673 1229 L 673 1242.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 1247.88 L 669.5 1240.88 L 673 1242.63 L 676.5 1240.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 543 1522 L 573 1522 L 573 1229 L 673 1229 L 673 1242.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 673 1247.88 L 669.5 1240.88 L 673 1242.63 L 676.5 1240.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 853 1496 L 948 1496 L 948 1528.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 948 1533.88 L 944.5 1526.88 L 948 1528.63 L 951.5 1526.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1423 277 L 1698 277 L 1698 440.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1698 445.88 L 1694.5 438.88 L 1698 440.63 L 1701.5 438.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(1621.5,271.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="48" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">avl tree of</div></div></foreignObject><text x="24" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">avl tree of</text></switch></g></svg> \ No newline at end of file
diff --git a/diagrams/data_structures/src/netdata_config.xml b/diagrams/data_structures/src/netdata_config.xml
new file mode 100644
index 0000000..dbc3e48
--- /dev/null
+++ b/diagrams/data_structures/src/netdata_config.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36" version="9.2.3" editor="www.draw.io" type="device"><diagram id="84bb3b1a-4913-5d87-532d-429ace37c991" name="Page-1">7VxZb9s4EP41BtoAu7BOO4+1k3YfXKBoHrZ9MmiLlrihRYOik7i/focSJUsifWx0tN0oMBJxNLzmmxkOh3RGznz78omjXfSZBZiO7HHwMnLuRrZt+Y4LfyTlkFGmjpcRQk4CxXQkPJAfWBHHironAU4qjIIxKsiuSlyzOMZrUaEhztlzlW3DaLXXHQqxRnhYI6pT/yaBiBTV8m+PL/7CJIxU11N7kr1YofVjyNk+Vv2NbGeT/mSvtyhvS000iVDAnksk537kzDljInvavswxlbLNxZbV+3jibTFujmNxTQU7q/CE6F5NPRF8DxJNZbshoRqmOOSiSZ7JlqIYSrMNi8WDejOG8joiNFigA9vLvhMBsshLs4hx8gP4EYVXFhDgNRcKeduXrRFK54wyDoSYpR0cKz3IxlQ3HCdQ7Us+R6tG+oxeKowLlIh8gIxStEvIKh2yrLhFPCTxjAnBtoopn+DH0niOMB7fL0ict6JLXQHxhLnALyWSQuETZlss+AFY1FtHKYQymFw/nkva5ypaVFI8a6wsDSmND4uWi86+goWgOIQ5F73Zl3ubGnoDnCqdISowj5HAM6nxSVnT4KE0zSMp1T+zLjqndTGBKRAWw9ONekw0xQRBi1SvOHvENUUy6BaiJIyhSPFGVpNIEXAAHxRZsJ1sbIfWJA4XKc+de6R8VUKRJAZ1NzS14ogEAY6l6jGBBFoVprFjJBap1LwZfEC28/Gf3siDgc+hbB3L8JHsXMxhkoIjkqoUBiV+xlKRDcpmX61shyqOl7SrjndZuSpAn0HV01CNsQhAOMvtHoa7lPCmTwOgrwfUs/sD1NUARU90KTjGS8rASQNnHAxwNoFzMu0PznMhQOF2hxigGgNEYksVawtrv3VbXY4LrEv4F6FxBf/xtAUF0NddMGjZqPw9GHEu6zZWWTOKbVix7pX3MCnHTtfXCCXRAGUDKA3ra3dQ6hHTOkJcxr4x2uIBxwY4GhbWznD0phpUOAhxvljCTCMWshjR+yN1lqYOcKCkp5YZuUjhFyK+lZ6/SxaQHJRiGNi3avG7auAfLMRBLbBoL5hEreh3wSTWJzawuWwTtudrXJUKrNohrvHJqZ1FgGOKBHmqJlhO7lo/cI4OJQalR8eWv0hCaUNb2z9b1qSM1UX+P+p5khq/NfbO8cNDNuLX7n1z0V7Y/MbS1gcH8HoHUORLevHkk9OgZtm1Jdvl0KZMQ1ajEbjOtXFzG+BOTWFzZR+cQboctsONgfX6DKVvNWCHjFUHmE57jKlzx2DyxDWrHVIdXaY6PLcad3n6cmzMdE1aSHQ4lqYEOfYlNRiMukC3sJpf8mDB0ROXg59uH9I+jxYcPYuluenBQ3frof3a2bDlT3QFmHTlovXcV5aLvkmvRAx2XNixezWuZ1yzCcZW7FjPZmTZlHc3a7aFebx/98SAGXBFUhrAqYqr9wPIDUA2OeuuQHZ1Z01J/IgljpQkEm620dBskAK9Npl5LiF60kGW85zqQKyHNKeWZ/T9qvfNV9q8hWxMqlINq/+WcXT93xMbSwcnt5OuwbHGNXRuJ52hcyZ1aDqXHYKgVoOgSQ1o2zMEQV3tUz2vTcvs++Ao19yy0bpT3Whzvp98cGTVkHZuy1BdZL90bjQ5y9742MjVk9BVNwEBFuyb5LoM0YidxiVDeFXTwF8zveHqaegEZJyGV+o+gIR2hcArroesVRNY+0xx5H215Nw7ceGmsMvrw4NfK0T/TDq/crCqn6e++UDp5NcXNHsxKMXJgMn1asuoWkcrNuUZbKo4BvoffIHBNx4wnIjY35aDvqBc/tisXWfWXZMyteGgff1YQd6knKbHCRuKwrd+R6MZlKa1tjMo9ZvNw6XY9qA0fUuhMyj1ZONwKbYlHE134roD8vTt5ow0INkAScMFuO6Q/O3uN9c2KibRVnc9vuHC8ykI2t/knLkSfFPZ5SyHa8FNDcdwwbA7w9Fz+9k2Qd1EavfU7KcbliGb4OpmZdt9mZWeV3tj0rcN2fjevNpET910e2rcLQIUrTD9whKSXuN37tbQCuYl/7moMWzBI9Kyv9Vq5B634HwlypP+UIbi8V+WZImT4/+Fce7/BQ==</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/data_structures/src/registry.xml b/diagrams/data_structures/src/registry.xml
new file mode 100644
index 0000000..5274ba8
--- /dev/null
+++ b/diagrams/data_structures/src/registry.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/8.8.0 Chrome/61.0.3163.100 Electron/2.0.2 Safari/537.36" version="9.3.4" editor="www.draw.io" type="device"><diagram id="84bb3b1a-4913-5d87-532d-429ace37c991" name="Page-1">7Z1bb9u4EoB/TYDdAGehu63HJE27AdLuIt2es30yGJmx1UiiIdG57K9fUiJ1MWlXtmTG7ZmiaC2asiR+w+HMkByduVfpy4ccrZYfyRwnZ441fzlz3505jh0EAfuPl7xWJaHvVQWLPJ6LSk3B5/gfLAotUbqO57joVKSEJDRedQsjkmU4op0ylOfkuVvtgSTdq67QAisFnyOUqKX/i+d0WZVOnUlT/juOF0t5ZTsIq2/uUfS4yMk6E9c7c9yH8k/1dYrkb4kHLZZoTp5bRe71mXuVE0KrT+nLFU5428pmq857v+Xb+r5znNFeJ/jT6pQnlKzFw8/jiMYkQ/krKyfizgv6KhsHz1lbiUOS0yVZsMrJdVN6WTYA5pew2NGSpgn7aLOP+CWmf7c+f+VVfvP5UUbz17+7h1/FD3zDlL4KIUFrSlhRc91bQlbiF7+t05W8sYxk/E4iksaR+Bm1bURzFWSdR+LhAl+IG8oXWFaz3KqQP3nrTNGkHzBJMS1bK8cJovFTV4qQEMZFXU+cepHn6LVVYUXijBatX/6TF7AKol85oZCaVymPbhfud+q7zs7qvu3vqM4+VPcrj1oP3hSV4rVF1AJHETX0xGvQHONxRK2Nuy0NKI82BHGHuNV9ch/Z2y7Z/cRuohE7xzuK2CngXWtDTjyr+xPVnYqzOiKxrxCoIlDQfB3R8iEWcSHusyMExXOcJqjszg8ko5JpyXsZJ/Nb9ErWvM0KyrSvPLpckjz+h9VHkjn7OqcCpRPwX4uT5IokJG/0RXPSZ/5j4jI5Lthpf0qE9kbRR/TSqXiLCloLZJKgVRHfl7fMT0wZ4Ti7JJSSVAqceMD3rftpBo7m+9s4k7+yVaiecE7xy05pEd96XehO+Jvo/s/NiFcPyMvWaBfa1ncl7I6NyihbJHi/69lTzfWcoHs5lFCcZ4jiS973i4ES6SoSWfUNnCEGba5II2tdWgpTTh7xhvRoBAol8SJjhwl+4KdxPDGzMy5EMeUK5LJYoSjOFrdlnXdeU3InWoEXEXbuQ1IqpmU8n+Os1H4UUXRf9wcxgrAb9S/ZX9aYV1wJ+ezGr9ix3Ryzv7x6Tq9Ixp4FxaUcYSa5z5hLr0bCnN4S9toF9z2B2gTclqcO2Z2jS6CA5Fo9ZmYktxPXGROaAnSLols6cnzGxwL+ZxwB8J2eAmBN99Io+/78URWILcfuluSts4L1b649rIRki+a/FRNC1uFmpUCCbmlGr7r7nqZ2mVj9GaeIqY4MA+QRIB/axQ+DbPeHvC7Qohlazvj1XbtSm62PgP5w9JOpSfSqY7IdfZ5A3x4BsG15Jgmrhv53R2kgPRJp1zJJ2jtgrAbUI6H2jZplfn/UCVkA4BEAT42aZKpXn+KUlDcETv2bOPVO0FOZ16pgP6/+ULtvZK9+sr9Xz8cTIZygYaQQTE7bsZ8e4tgD5eGUjXr20308e24JAuHhhI068NIq2du9A9LDSZv15Kd9PfmucwegRwBt1JGfqo58RLKHeLHOEV+hBIa/UcPfs/saZq53iOF/sBYZ2fKf7hFUKNATnsmVKzPMGpP9K6edQdNITVP35BN1AtQoQ7REOSs5r9nOScoeFaAOgWrW5lcdeAl1SQqaoRQDzSE0zdr36jplpYtSMkNZxsaACMgOImvYng8VtDRO8Yy2XDX8soq3GH1A9mQN+FANrlWLK9kNxw+vs4iQR2YpMQNqHufMIASbaRhco3NvoRpT4y5N2W1T9MKd7lmCswW7E6A6hKrRCbdQjaN1qHKzCbCO4eBYJs2nUA2lrPheNMfiQGEK1WwkJfD7TqHK4XoI/EAdhaXpzGUAHKFuVw5POlYRqKOuhDm/nzE0GHgO5WkyTKHZFCl58gVLDVBYVTwKW5NBi0CdqJJsxdTUjG90h147ClmjQYtAtafe39xe1912BSwPN48mU4NBikCd7Xl3c/XXzR+fLu6+ljaSXNwFQA8FOrVMWkjqZE4XqFwWAEQHEHVN2kjqTA56SmYibUQd/i9XefDUKbM4m7PHALyH4/VNmknq1E6G6Zw1zyxdsxsuY08JiR6B6ACiU4PWUX17ehUMsaZurEnkpdmZo6h/Xo9Qxozksh2nb2KPUaJNmoQaLWVdlhagoDe789792WTMydFsqGMPNS0V80OCFmBKDUFpMtzkqN5OnYCpyfs2Y6qUT8Za5+ID0D2YrsmAk6O6PtKSyp+5BVX22PPqM1AdQNVosMlV9W/daZuxFayqo1pVk25yRN9ydfnLJhoRmOxIbtRbBFS9XeVOPC9zhUJXlmDdMWbvtBhH6cmqgq4WRP1yHpGUPcevvzwRVplxRbw1WE1xeP8rQB4AWWNjHQ2yp4ar2uoaNPVxNbVthV1VHdiBTlV7R1LVvq8QPtmMyWpzS+FtJ6f1RKu0k9PKem+cEtkOusEON2yj+m71/2ymx96oP9lZfXBKZE+NfHY1BRsGmH2XxNkj05lOqT1hENiQwGEjvU4LjDIIqIva6y1kYhEAR3uPmGKE7QqDsOrG9mNhldcaSbkfRYVr0ou7mqz242vwvo2o2Zm1IwgF1tJxraWNIdGZ6Kyl480XaDZ2bd1tG2cFezCIRza8p2MMgkcLXGk2E2xlO8cJprAoYxBbkzMJ4R75NAuM8mgJcAfBNTmRoNkushUupDsYztbodEL4QwcoNNato7Fup29m3Yaqdfvp4uP17L8Xt1+uwZw9sjnrbLzLiBUYNWdD1ZytIjm6yO//r4oMT9tuVYM3fMGL65Tz50tUwN7XISiNpoGR+kCzCQf23AwEaTQDjKXufJSTo1URkDycpNmML5bqO95df7j5/Nfd19mf13ef//gEdtJx7STP7ppJvt3bStqV1bu/BGhSO1Zame+IZB3gw5ebd7PbayYI7B7YE7PeD7NfbeJ1HzpJC8q2tAuW5DLwak9duWMHmA5hataUUlcvtczih7igM1iKNginWYNK84aDBmeCAOdQnGatKlu/TUPwlC+sA54DeBrNnmerQ+iGlTz7cncLlvJxLWX/8PnxnXmw+4uBOupCRFEDtu4up2kQ2+poW/fmshtb58wcBqBDgBq1hm010l8D/Xhx9fvNpzIdjMg2AWCHgDVqF9v66D9sdx0Jplmr2NG84KvttObg5gwGatQsdtR5AHBbx+VpNI+04+ziCW7rCDyNZpDWJAARoX2Z9LCcd+XtABH9gQ6M0RzSmjQgXQ8GAhHHDERMNqbsQqfnqDvOjJ0mcwiEITRYnTE2oB+xE++cw4GlTUNhGg1BOLtncOLsESynQTSNxh0czU4o9lh2UNHE8OqjQSwNhx3UIJKwgnmUF4zfEXgajTq4tkLrx940UU9ItHdN1BjMb5uwZUbhnfF0cDGO6WK4ftfF8CeT3ssCZYxhmAxsjR3AssB+erTuRafpfmhS2YHFOiZNo/6HJiudNtU+LPQczNWoJ6JJUwdzZuPyNOuNuLDU89g8zXoj+lABzJmNx9PoHKirRgs2fR+YYjGw1nPa9X+C0DO7LUqmLYNFgr17dtgb8lv4O55+sQqsJRsJplF3x9OkPAKzeFSeRt0cb3cwAqziwTjNejmeGo4Aq3hcnka9HC9QaP3gcy7SvjuVORdf1YBNAlZWTh5+NgK6TLh1R3gLAtuWVYmd8j8fAUtHQK59fAsC2reL/sQE5ALdNoDwzdLl2b4a+/hhddC3dbqSN4byqC8R+crirlJyxkZSnrrvGxbcjWWnU2/3KxY26/viVbLb6nu+v6v+4Hcs2DJR7QD5iUgaR1IUFMSDpatYonlpBx5hkYfm1R2nIlmTcOPdHcFuSdms73u7X96hSFa3/r6SxQ7Ld221qudotfxI5pjX+Bc=</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/data_structures/src/rrd.xml b/diagrams/data_structures/src/rrd.xml
new file mode 100644
index 0000000..87abaeb
--- /dev/null
+++ b/diagrams/data_structures/src/rrd.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/8.8.0 Chrome/61.0.3163.100 Electron/2.0.2 Safari/537.36" version="9.2.7" editor="www.draw.io" type="device"><diagram id="4eaf3d97-8d15-fa6f-3370-04444ee55a0b" name="Page-1">7Z1bc9s6koB/jap2U5WUSOr66MROjqvsJGX7nD15YjESLHFDkVpefJlfvwBJUBTR1MUNwkrcUzMTmaZoCR8aaPS153xaPX2JvfXyOpqzoGf3508957xn29ZkPOD/iCvP8oo1LK4sYn9eXttcuPX/w8qL/fJq5s9ZsnVjGkVB6q+3L86iMGSzdOuaF8fR4/Zt91Gw/VfX3oIpF25nXqBe/R9/ni7Lq9ZouvnFX8xfLMs/PbHHxS9+erNfizjKwvLv9WznPv9P8euVJ59VftFk6c2jx9ol56LnfIqjKC1erZ4+sUAMrhy24n2fW35bfe6YhekhbxiUX+jBC7Lyu9/cnJ9fXpcfL32WQ5I8+qvAC/lPH++jML0tf9PnP8+WfjC/8p6jTPzNJOVjIH/6uIxi/z/8fi/gv7L4Bf7rOC2J2yPxND8IPkVBFPMLYZT/gc2bbsXDyj8Ts4S/7bv8blbj0rX3tHXjlZek8gNGQeCtE/9n/pHFG1devPDDj1GaRqvyJvkFP9c+zwaf81Ed2nK0H1icsqfapXKov7BoxdL4md9S/nYk53cpF441Kn5+3Ewye1zes6zNL9spJcorJ/aievYGLn9R8oVZD/sKa+9B3JD/fwM3/0ZpTiuOfrEGHoCYF/iLkP8YsHvxNjEkPhens/JyGq3Fw9bezA8XV/k954PNlZvym4pLEX/vfZDLxNKfz1kogEapl3o/qwm3jvwwzUdi+JH/l3+vT/0Pw96Qf/BP/Gdr8zP/r7g9Tj9FIf8unp+zY3xqPDIxPQCqlUjsx1pitA+lOMJDnFgKxA8fPoi11l+xMPGjULxm937op/kPJMb7xLgVuNhf8r/I5odMg6GNnwblw2/4puaFC/49j396f7L9eC9IWRx6KfsotqVEmWzVFzlsEVHn30yIlhispRfzf97xW2k1kZNrcvgm8QqriZxUO2iG3ooRTwRPHcvCwTwdSJ1zz66+fLu5vPvrWjw+WPBlOV2uCCoC6nhiEOoAhHp9cf3t5od7/e38gv8yjufuiq2i+Nld8fMYwUXAtfoDg3SHwBIcBFwBYHM3zFY/mViGV5k4/Aa++IHIvpys0zdIdnQI2bn/4CcRYUVhHZrUmcYK1ox/K8d2hdp0H3iLhGBiYE4MKkwTdfUtjtMpW/Ev6uUfbM6HS0FKB+nODtI6lKv2g/ShTx9YjcdrPkhPdi0jSy9Z0iqyWUWGLTPrNE7RqhG9gdKlUzSSp8lT9Eg1lEtryMybLZnL4TAiiiRq8gg9Uq2WYkvM5bPUyX3+Vd0ZX9lTOmOhuBo9PY9UA2YFNgszoXDwEejng0FMX87U5LnZGqvrbxYmfJw5TX5bmMNdc608/7nnnPH/t4gvhq/JA7Q1VldjhS97WkcJ8dXF1+SZ2hoDi3IaZzPBNfVXjF/nrwL+Fd2NQUz8ghhjTjx9gwqVNVY9TXykZ1ngbVk3a9eKO4kwgrBjULOyxqrbCSJciDFh1mm6MKpsAf6nNsxJGsWEWAfiiVF96yBH1OYS8cXydSyj+pbqkQL4NvQtgoyG7BhVuFR/AahwbQBHQUYqNY7wyKjCpboRIMJyEya8aLxTk4rWxFFgsfmCSccx/67LaBGFXnCxufoxTyXJvaZi/P43W63l/V4841eW6Uo6ktmTn/5be/1DvImPJv8p5J/13+0ff8hHsjR9Lt3PXpZGgmT1Sa4iwT9/ojre0tubRFk8Y/JLlqOXevGCNW4U33Ynlpjxqe4/1G/a4dw9i2PvuXZDObk2T/4eFaakci7Ykra0iVjjOr+9978vZ0Hb/ZYz3HU/f1F84pc6jKuxhcwqcTyf+6ueiNcVCwCtCi/P5jDqv7CAfI461CRh4tU78ZIRVxRXkz6MibqXyzSd5FdPhhT1Z0s2+8WHVgFL0UWHEz8yukiLr6M9vOjgx1fzsZv4oqm6WwSRmGl9sf/7jIIUa6rjSaf8TdUtou4DdRn/2M9EE0HTZHDR1FY3fBmxsGIr8ZpYIliaDCuq3KpqpNjKW/gz/vUFz+j+v4qk+ouvt5ffvrrXZ18uP/13T6Tm86/ctyg6BYfcrMYOhRxtH8MeilhB/o8vBo12WpTabjJ0Yap6tYW5zVuwjf0t/12SDw6JLapsgsmAhRGYGXt7cacgpKOXwvboQieDftN2Nhx9sPvDycR2hvZ0YEGFT4YA+spEi2KvRjJQ4ROVciUiqFMQRFGLAKtxChVEisVHgoQOQF2BtCZqOEJhGytsnbMovPcXWexR/ZrDVudW3BrS7o6dBMel3UFPHwzs7cfrNYuNgFiY4sTmi1QBUVjh8ty9uvj65e4vflD7l05p8BqzkeLT3C2gkBgqbKMXqMldYwSEv8icvHzDcJMiiYuQYpDq2AMORgrEu5RI0+c1ySYKJGgT64okkLAlSd57Kz8g7wSOJeS57owlUOBPSqWfBiSWOJSQd7gzlK3eiXdZ6Kdkl8ahhAzTnaEE0q026k9KMV/Yswlki+4MpmqPbNYLIag6oEL5cl1BtR1VCSo8DO7dj+8X5cEzdUm1xdsRTKpDqtmZgm8084SS4Trjqdr/KCxOH0sw8a0zlqqRr2Q5y2LxNdw81YGIoohCWW6dEd1ZFI9qa+JpQhltndFUDXzF3rnw1u7jkoVuECWp66escPYlrveTDyoRRhGGktq6IgykJJUr8Dr2RZl5WnxRMAeHxsloccwDgeZUDPe1vfJaDPk70lUOffxg2mhBpdcvb01Vowh1OThicamE9yT98ba1w4CZ11Sd+zHhxOA06Y23LaA4VIGzUSF3+PHz5dXF17PrC4qm0YrbpKfetlQrWMhSoQq48WMQzX65Ml+ZpeUVoouha9R9b1uqUaxWHplKIuNxmvTg25ZqF2vidOcCCzHFMDXpyueHR4WpqJiaM80re3mzGUsSKqSqg6xJzz7/gO1kszX/lsxbufzU+hzOiC3+0GPS0V+1+QZOPesgW/ghNQbBEzXq5bdVG5kkuormWcCIqAbDhEltybbbtaWqhcSIzqtYqCa9/HxRUKBSdy2NNI36+W17f8AcrbpopCYd/fzJKtKEzYpVV7xI/HDG3PxoU4RaEV0UXZOO/+ohB3QAqRr5EF0EXZNOf9sG7EvU36VzyEaDAbi+pkBO+QAFUKnx/BcEFwVXpr2bgQvYnLbhNqWXCKMJj0zqV32V8M3N+eez68urH72ioDClZvYODNk5Fb8OmGHy17fbu5LoMkooZQjF06xPx1Zg/dYdAeR41zsC2Fa5UNY7AsgbX7sjgMxbl/Snwzq/vfe/H0x23m/Zw133ozsCVGPbVjy+qB1PHQGwq4JRf2BftUcXUbhBNMsPVu11RSkM93DaR4bhavEbtofhHvx4GfbVTRSu3T+sreciZowq7Wxm2UZoTzP6tg+EawJYY7LD4aAaraMIdHjwHgI35bLpFhGYQgngu4UbR1Hq+lxtfCK8mHKnRiMN+qqVtagn8M/ZTY/Ki+tiajTWoK8aVTnTT2dXnwRRL/DiFeFE4TQZZWBNVZzU5ek09HUtAQrt+vrBj+9WX7emUIp1wiVdaHL9MheX2sqo5vtJyzQ7EX29NW5U7SsjKgxRRxnNtI0m0/XVaImmIj/3V4zLtaiRQHo8VkkwGukkjcoNne/88lqofBuuRBRD1GR0kwXUkC9aUQiJ5f9E9wrM39qvU+pfdbfOoBzvbr06hxNRz8pvjshYJSJrKrwGkbaeO/qIGBt/Ff1hREYqEVs3kBc5Pq3+YNuTaTt2ne7+N4iGTrvf4TSdpY134L2f0LYqA1zooL3voH14/FLlzZ40iTrqmajqUr/V6nioYdtVPVPUxEvFerTLAjjXggx1aE6AF6rRlIVi0rA8gZNrZzyBHI5GWg7xxPIE6rx0xnNHGZAsYW5eO4JYvpwlVNWlM5jgkYi8w52xBUKBO2MLuhGLQGDSfLVrvsrZR+qi+6xNQ1n/AdWTmvTeQ7rGa9B7u7MYqmfVwk9cBOz3/fA+ilfU8PRAAW5jfaSLWIePp91DfOjTO+53uqPupph8lDa/vTXoOD53t4zsL4NATDUwNRrFqZ6hpXzGbOHz7/hMUHVANVkvFainWYVv8E07ZO4iy9tNf/m7aDbdo8gNTZiNFk4FCm02F+Qt4AQWAdZkqi1QbbNpqY4oYgOF02SmLVBis4kzpdZCSKBmkyT3A/VX7D9U5Bh5ujFaLVXNZqbmXzphmi2UqsAs4kNECxfqnanN+mAye0k1JZUJBoLpkp9VRV8eaqOpAavRWqnUeMksXbO1U9vtS9R3SQtNo2VTWw1LIh048H8STixOo3VSVftR1R4gjhaxt6K6xmigRkujAnajBlD+yRPIz0tMT7USqmo8Kpz3RWsWyus26bPX4cVp99kf+vRqTenGZ++o1q1sk9RdHaPXWbJ0ExbO+ZlLyOqc1pRN+N60Zb6dhCPfaW8is8V1zr+eH8JxQcT2NB36jmr4Atl6a9/9xcj8heFqtAeqoyrrD5HI0st3kJ0LNItdPuCPXIj5IJ71hJufsL8cu9nmqI6q08vet+mSa4BzF8Bd/IYoYygbra3tqFr+EdL9FIVhXiifJFwPe6N1uAeqvg2v4dHsF6MEKBRYo1ECA1XPPkKoWRxHHPoyoo1b1xnLaDiBDKx/Gf3/5UNH2PVgNxp4MFDd1FJfW2X8E0Pq2s/s/p7/k/+eUKOsKCaVtoHqugZlee2vGR8Mm4KrkXTNxiWoruuPf3/+fHHTtKVUAkxwEXDNhiUMVE92bXOuyg7M5MGqBE1xRSjERmMVBqrBTATl5lxLmu7cTzaIqUUklq/J4AVH1bPKpGbmBfyv2/1VFPppFOce0n60TsEyl+QrVcBr8pVqMZO2O0sPf3y/0xrYDpAV2zzbFVNy4yelY50y6ZwXtE4wuNioumC1l5Rw5yzwnt1s7aYRYUVgNeosbY9nrKjee1mQuuyJzQgrAqtJXyngKm3BGrOZv/YZlRVDsTXqEAX8oQ24QbRwOSJGsatYriZdoIAHtDqG18CW+T3uY+ynKTUBxPE16eYEvJyfL68umnK7JqIYoib9m4B7k7qJaSwRZzYCZdDWTIzsKR3ZU/DrcjVDjjSdvHTd19ztd6ieqs+uzm6u3atvX7Z2fVpFNlbZwUmXmpRdFtpqEpVI+Td013E0Y0nC5i6VJsIBNtotbLi76FQBeOU9uVno/1/GCC4SrtEg86GqBMBwc+2O2CLZGo4kV5fmUl+/u7j+fnV2lx/GUrZai77spLpjqjsbjVQBvBNFY85eEamSsDSvsk9EX07UaHgKEDoqYwjjR9E9QQYRitKs5SWCi4BrMjAFCBAtDtp5Dwxg3aUjtQbCh4am7OwVtuNI/VLDm94jNRCRCnRdkfsBdV1pTKvBSQchAAGpe+FSARcsYZOHayAqFSBcNDUjCdbD1+T5GghJpaZYXcI1esAGPCj8i2Wzmq4uDmSh4EdQEVCNlmKXbcX/lE7R5STd6hRdXqt3ii4vmW/dPVVV2Jub83/ObhQMnZyLhr/1uejo3nLWSAqO3A7HimxZY0i2+mMNrEGNtkfN5bapViJx+LFkeCBFDSvktD2Ilg4fSI5DyyDHvd1kCCUC5cELqw6UYINWvoe6dz++Cy9P+rwmwcTQtPpqD9bucKqhsSXOb9/vLr99ve21x2MR1COgypRHE1CtvhoQ+xAJj7oopi0uEUoEyqFBDciSmXxQelge61R0HJkTUgzSiUFlyOrb0Jp7e3Fn7CT6e3vojj+JOo2TqDUEbHc2wFsph/wy4O2dih+82M/lgsS3wlvJB8pVBuHUI757exUTTAxMyCvWHcz2JjK/2LN7nwUBRSLieEJesO54tluMJE+yHGGJgq6v7pCqxiOyOGgGCrm9ugOqmpDodKqPJZSB1R1Lsh8ZwgpFAXaG1VINSNJTmp9ZREiJR3403KEFKhncHVEwo7pGtAj9IqQopFA54O6QgnakGtIiLIiAYgwLJhUjS7UTQTJKgbd4rlASVXdc2+OONoJKVNFUwUSq7qiqpqN6TGbC8pBM+Yq4IrhCOVTdcR0rtE42KnPH8NajMC3ZVbQehlndaT4Qs/o87bLzUIZ5UUQzVnig9LTuhAcMDzq/vCbfZke+zb4M9pC8gUgTewLwHo21AFcNQJu+0OzepySTGlxLS4EdCKYW4bVVq49kmWT3xBLJEnJsdsZyRyNe4QgjpyaOJeTU7IwlkNZfZ0kHRyxN0KHZHU7VHFDHOeNaCodHEopkCvk0u2O6O46kYEqCioYKOTe7g7o/mMSfk5wikUKOze6Qttfv3yAlOUUfWyDfZndQ2+v214O+SFKRUCHvZndQ2w0Ldagkq3gTg1FFqS2yhKL5tAGFnJvdAVUNDRTNp40l6NLsjqWtsKRovk6wQh7N7uy6+2K/8mg+sglioUKets6gDvcFCxVQST9CY52a1I+GbfpRbzsGjIQVR3VgmVSShvtWYIrs08VV6jBmuO6L7Cu5kk1fD9yRSbVJ+msPhEuyi8Y7NapAga0NmlG5wuaUkpUfH/tgVIkC25G0oCW5RcMdGNWl2vKBVbhk9dcAd2xUoWorLwfDJdlF450aValUp0497nvur3pFzkT+iri+nOvINqlLjSyF1u+UMyGHdytnQrrFtnIm5J2vkDMxUi1DDdmhnAldwjM0qa2O1EUx8MNfoqZfP/Dzyv7RPVq+1ALurylxkLzZpyVvk8GfP+ay7mF9zGVXAn1Dnr/1LI6959oNpcxvnvxdXOhViS1TGb9ZeVemdXx7739vl1+u7Q1DWcm85Q38RfGZN9PluI5Z1qStBr3oyvI2pFrGxZ7gBHMG2/ztsnZY23xp3v/eHu9+gzrBtt+An2BAffy3NsEkta0ZJmPHXmHXAErdv8XNfArI/etRsYFsqDdIpWqsciJp4LZjv/n1y5oCx0zpB3kNJPieVKc+5LYFDLn2c8aLdJJmy6T3U2enijEeIO93SqtC2xus0XDnG9A6TBVU/idPOFklsT7htK+6L5pwVr85I2QVqtYZobzDHu7Wgy1nvPsd+EkE2JXPrs5urt2rb1+U6UVlJ9om7VFlJ7aQDqwDq4xU6V6oXQqwhdZqsAsDqBtECwqSqzPeCMlJ1tWvzAI7mHqBF6+IKpKqyQL79kiNkMvChI9zfvYq9okZ383JY4FiarLIvj1S7ZoK05VH1WJQRI0W2bdHqiWxUKAuvt7d/OC/eJevvZT2g2Nqss6+PVJNkSFL53yE3PhR9ITPN9ZiTxXaUnGRCKMIm6y+b4/UaKnTMWseeoTedQxvPalsHa9HgAvAll51zQds1csz2T4JDZstw4pPVb6rAffIYy4QKdBYpRXYdNjFH3adBuLRCGi4OwCEXGplKCEf/2EGsmoWb8mwjDLtWIYPH3VVx/3w4QNtjipGlDkBFBste6OcUFC4XKHz5PO5RwFzGsgCJoUOyb4BF9UQ8J9X6uhrB84o5vxJnd/e+53Bbp9T5Z+D78d7C8ZgetCns6tPpD51oD418I+gfHqw+a60HWJWCwdI0YU2cgINUxXW2fzpbL51dWvDKASqUzdC+fAbNku9cBGw459epSDKx3tByuLQS9lHsTckyqw6blVxgHzSmv/izTstgJlVn0OlnJ6EK8oBstCarij2wD86uaKQVE26ohwgSEBWFKQ8QixJkw4oR6abUQv2bmAa9T05MvEHkEv2xGaEEoXSpMvJAWI4JMqYzfy1z968ox/L06SDyQGiNyTPPAefWKJYmuzp7EBRG9ubpktM8ecUk12dHSBso+rYVJgTCSYGpsl+zg4QryFhZqGfvvXYG7QBwagi1N6SwA/vI0KJQmmye7MDBF0U7opsPfdSJixB5RMJ6AuBGm3cXLmj6rLpBbMs4DTnbpitfjIhqIuY8cEjrhiuJhs3O2PVKgRxjRkZbXFUTXYUdoA4DrmTzv0VCxPqRoAGeqhHVg9Q1UZU7KeLOMrWRBJDEqxq3h1J1TpUkPzJ7qOYzp44lFAh8+5QqsahAqV3n4pdk0giSEJVy7sjqZqEamY+6t6jAyhUp7yzJI+pakq4+Ofsyr349/vNxe3t5bevQheSmi6n+8bhApFJJ5sdO1VtCwDbRy8O+cgSVwxXowEKfdXEAMls7OcECCwGrNlghbFqpKdwUrPhpFpCGtrjSQ9+fKUGdBRQOlY3/kInn7PAe3aztTvPYtrx96tzhcyeRmjpuM2bUECdR48hYdWC1Whs6UTd8OtYV94TUdVC1agaN1E9Cny8vBrXLEj9deCTiQRH1awOJ8OulPyvW/f27uzu79ucpJdmZCfBUTUaezoBYk8B9x81JcdzNRqDOlG9DBDXKJi7xBbP1mhMKtAhoWasjuO5IO3eB96ClmLcGcdoWOpkpw+CsOrDajRAdaLavlJ/xXKoAf9mbhERNyemOHOEUa1JNTJVTPNE14IpIUUhNRqwOlFNTNtiWpxtRLZHuCCyKLJmI1enqpWpIjv/6VKsBp6n0YjVqWpfqvOkMCoNQI0Gq05Vy9IGqPTapJErLvJVeEXhjji4RgNXp22BqxXZWRbHlOGMhGo2hnXaFsNa88cRVh1YjcazTtviWQusQg0mnCicJoNabWDpvbk5/+fshl97F0QUQ4WNoTJathxYczc0772VH7z1fEosTpNGfBtYazc4l1GSvvkqaUicRo33NtBUeBsnFUtDAzVptrcBB2pVCTuO5wkTL97JV8QVwdWk6d4GnKd1rsLHtgHrUpFzNF2TVnwb8KHuoLuO2QPRxdA1asm3AVcqRJeEFo3VqEFfNlj/U1oTVAO+1ZxgAjUnkHea79Xi9IdvYtShLlevN+q2jBz4w0d9fGKjrrqAy94VdxfX36/O7i561Aasmz4WI7ltVG3AgFPcENhaKusajrzqLKaOVFsCXMkGqsE1RFCPYeWwpGBK5NfDFsr/6o6tKpyUyN8NVygDrDuu9kEyS4n8GsCCSWCdkZVRkn+M+ioHfFt9dQD1Vd75Guqr6sht2j1StloHecgx2T80CBWUg9eVUDl9dRvsoCG1MSlTBbz1GFGXuoEFCJ3+M2P+1qObNzrbh5j35ZRqbd7YuF+aSdvutyY77+9hmzc6/TfQ/VN67OoTSLZWee35M9rmOy0/V+v8adz/3rF2T7ihPdz5Bg0zSPWHzqPsZ14DpoPV6oWz7dAZtWtWHjbbZHSBiV6ze5u9DuTP8hHFpyrftZkGx87bUbMJ7XjnLFRu7++etUrP2r7uSavmaf3OO+thM9MBHA2nsg429jmrv3tfbN7/fl8XZHUd1NwG2emreWJvYEpBXhT9B6KXzanp9hwpVcbWKdW4feDsmVEj3P2jPWvgeDDcdT9+wso1dpcxpjFbye8BnxcBQWn1ewwae9tE1jCrHSmtMXCkrEIqUGdKC2jrIE7M/BL1ujoioL+geIwrBISqxU4gJ8YOpvyzJ2zuelQhAoUV8IJ0iBVM0xDO6U1hrXdUWQtPFfCBdEj1sAJM79KlT1hRWCEPSIdc1fjhKln5HdUSwNMETO8d0lTjhTc0qZKABpxAjlyHOIFmoGAzsyQL3rqLDAsWyJbrEGxbVW8WxxGtuLgzDJAo1yFJ1X728e/Pny9uxIqb03RXyVsP9sESBTLluiNqqwamh0hkr/bfhdGckXaLtDCY1Ids1WzUjDRxq0wbworACqTFdYfVUQ8tuQ3467fzC4UjWX/x1t/R4ADr7wDArSXq3XHUY02hLr35ogL7xLaQlKPsvRBGPVKrnmayMOHjnDsbS5NvtGaxl5IOjOMKGXy74wodZsQHjtmM8fEipwwOJmTn7Q5mW2uiWZRR9TMcSNCy2xnJgXqOyZUk/r+/L/KVNsnH5ZygoqBCBt7uoLakGJVQSfXVrvqOG4EPVn9kqcAnkO4rxR1H3FaIQzZg+YJEeYcoD45OBAXB6pFk1U1eSvLN5dnHqzyD+8GL/Xy8CCsCK6QMd4d1R50txi+6woaY24afuH6cJJThi6QLacfd0W0xPG2ElnZg7TvwtLkDW6OhyhysuaBnB1atTzI+jaog7hff4y1QnWUmDgAL1KYl1dJLlgQTAxPaaTuDOcTnABpOS2hkFoBju52p4AyAtKxWCvqLag1U0962NrPRUClPGy8/kC7T3WKIr31wSvID5PlUJrBt6TFWkk6i+5NHGEhSbrWYdTDC1h8/wjY0hw2OsP3Hj/AAKI9icI8FSq17D+KONGZiX/390i+PJQBUVyy6Q+gffv5jHEVpPf0w9tbL62jOxB3/Dw==</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/data_structures/src/web.xml b/diagrams/data_structures/src/web.xml
new file mode 100644
index 0000000..dfcaa2f
--- /dev/null
+++ b/diagrams/data_structures/src/web.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/8.8.0 Chrome/61.0.3163.100 Electron/2.0.2 Safari/537.36" version="9.3.1" editor="www.draw.io" type="device"><diagram id="a1159e6c-5a01-6707-8aae-70da91d3c1f3" name="Page-1">7Z1rc9q4Gsc/TWZ6MtMOBgzhZSCkzS65nJCe7e4bj2IL0EZYHFskIZ/+SL5xkcyhyHK7zdPpTGz5gqWfZUt//R/rpDWYv36O0GJ2zQJMT5qN4PWkdXHSbDpn3bb4I1NWaUrv7CxNmEYkyHZaJ4zJG84SG1nqkgQ43tqRM0Y5WWwn+iwMsc+30lAUsZft3SaMbv/qAk2xkjD2EVVT/yABn2WpTqe33vAFk+ks++mzZjfd8Ij8p2nElmH2eyfN1iT5l26eo/xcWUbjGQrYy0ZSa3jSGkSM8XRp/jrAVJZtXmzpcZclW4vrjnDIDzmgmR7wjOgyy/oLfvR8SuTx6RXyVV4q8QuZUxSKtf6EhXycbWmIdX9GaDBCK7aUPxtzUQz5Wn/GIvIm9kdUbHJEgtgc8Qx6syPPRigdMMoikRCy5AfWB43lybKfiXAsDrvLs+fsJF2j160dRyjm+QUyStEiJo/JJcsD5yiakrDPOGfzbKc8g5cb17Mm2OqrpZsV+DOOOH7dSMpK+zNmc8yjldgl29rNwGcVw81WX9Z3WauVpc02brBuM6tAKLuzp8WZ13TFQgZYD7ulwF6GMZmGOBCplIXT9R9xyC59kUGewIvYE96hpQGIqDizWKV4Ig+TJUREBTvPkjlbyJMtkE/C6SjZ56K9TrnPsi6TmDh2QpNaMiNBgEPJl3HE0WNx/y0YCXlSNG5f/BclOGh8ck9cceEDse6s18V/uXvEBywUeUEkQYnFnfKC5d2igdw8GHJGVdzUB1HN9zOB2lag/jHse4PR1fDmwbscnX8eywcgRdMYiB5P1G3WR9TdR/T69mIoXyTilQs8j+fZPauPZ2cfz/PBSJ7ep0DzeJpOo10fzq6CUzY1PJ78FApw5Im8xCIhIhieuSZUWwc2jqqgenYgVSqy6MlNQNaArFtjA6mnkJVZEh1Kf+H5LHoCkAYgz2psF+XigEKSTKC3YtJbadTYGHKcEogMIBpBbNXYBHJU2cifoUgKcoly5JGFKIGbK+/6/NuX2/HDSVMUR/cC+JpICjU2hhxVKdrmKzNbEB4P7/+TEHaBsAnhsxobRY4qG2WEA+yzAAfeMqKS8PDh4vzh3JM91vvhv78Oxw/e1/uRN776a5gwB+BHA2/lj9FagKuqUgY86dEcSBtquBHwVp0NLVV2Evla+knXh8yxSJdLzx4JZWEki5Ho6a6AsAHhTp2tMFWJyt/SjD0R7JTU6MHt7e9XQ6jUFSHv1dkwU2WqLeRNQF4D8rZTZ0tN1a8y5CwiU/Hw1hO/vb/6fHXjfRmeXwzvAXxF4Ns1ttiaqtyVgT9dxjjy0FTn0QCah9Ps1Ngca6q6V9Eci3C8ENnEW4vA9WiuvRobYcXlaQaOYlFKsWhU+5g8ix7144rDgKCZCaPGppbGLbUDNpZKGEA1hdqu0yzVVlDhYIpzJ6PI6YxNWYjocJ3aT+ycOMhKb8bnuY8RvxL+bWP5T7mLKDmxFooL+7a9+md2gr8x56vM/YiWnElqxe+OmGSdnLG0bGO2jPzs6nP7F0fRFG/vJjO2F0CEKeLiybSxk2GVUUXF4h23aSttnC4i/AyVxqDSdGtsiOYV9NepNO5PVGnaapc+YEt54zUblIRPqTmXxOkYaVml2SjgxPpceudebNQcXxQSjjR1Zy5qQUKRokdM+4Wn/TBPdH6/KLdz4dfPru5k0/OuM0t/bHxyOrnFILv1P2arB4PIzn7H0nHmfBc2mcSYK6SKizjsiaeq6iVPvFBygife8U+8Xp2dNVU7DzEPRPF4fCZF8qQVmC4C1OOhdpp19tRUufyZyWdI8qhNHw7RMgyJDIAApkczPTSkpRKme2ybC0bF+3PCvJgyePSaEO3W2EPT2DVVogIQ9tliBWhNwyAaNfYjWqqgXSp3QqihAve7Qw3d9nasoeOqb9vCVrYJu/ApGNFWBe/+18vL4b1sDaeWeqi5BdyibpjEGGphVlJzVZF7l6UnKuBiCQ9jE6SaIEN7SFV5e41U9nSApAFJTXihPZKq6pp2ZnyIETWjqAsrtIdRIyXlDd+IiuICkgYkNaGE9khqnJfFyCF4NgxJakIH7ZFUJaP00foGrR1zkprYQXskVaHozRNZw2guceZLgPP4/ogmitAeTlUl6q84nkiWj8vJRPQtd22R47vbm/HQ+2t01fcGX77e/J6aIsEQaUZdE3ZojXpb46HLX6xv8GY1FhRqbCPlRlotyhl6ho6LEUpNKKE9lHs/K5a1l0hIOBFM3mQikD2arC5m0B7ZrsLqH2T8yWNTN40/+bckN4w/OYMf4PzRRHYkfpHY85E/g0GROgZF3M6hj8pGBYMi+dj4BvIFSd0kC/jk4qaHrHcw2B8wHuJqvOF629cyhheeEdY6x0RcVUkvGqUSpOeLdxt0Mkxw1jkw4h5szkTPiMAHGE241jpU4u4R2BOUUFHNgdY5YuLu+aZmhOEdasqyzjETd48lE1HKfMQBpxnOOgdOXLWTekBUyj9bJsib9z9FfFBH7TL++gB+pgCtjipVy3LHoRcz/wlzNfoXdBpznabX2DGvapzKzbbmoVcopEbM93TwfRZOiJwj4zRfgncZ36kqRqqNjmol3+FXuw2+zJpkmn1YJSXqxdjnhIHlyoSsTrixRlbzJbQdsgGeoCXl3iMJA0+8ngDt8Wh1Io41tGp3Yiky5XSSDkVONfkeLSA9HqlWv7HGtOyT/HL2NMrgjWpEUifc2CLZVTsnRWefLXAIPX0zljrhxhrLPe6qCSIUWJqx1Kk21liqfdb0CTsJYpH36/Nv3uhq/DC88S4vxuCBNOzD6Jyv1siWRfucCrReiOYYAFcOWGdytQZYlR6Kquvx1QL4WhAh6mwyqaPSa74TNCeUAOLqEescsdYQq0LT3e1odHVzeatA/OW14hJSGnm4VAt2mrumPU191X21ovjCrxFNVVySNH+77cu37uKdV8v/AzatCt8l/1qbt3PPwDR8R8YQpE7ttQayTD569zPImTHUybrWplst68bIMVXZygWSBiS1aq41lKUzS52uZwMEmgY0dYquNZqaQdJtmjDWYspTp+pa46m2XuXsX17mZJBf48VJAAoAPR6oTtq1BlRtxhZAk6n8ivkmAKpR10Sn6lqDqjZpt6Emc00AUCOgOhXX2qTzewZGRQV9hgiFCrSDGptFvT2Do6JuQmhYBTx1Eq01nk2Fp7QVtZrpcDdF0/c+qY8ZTO13CqzBVIfMnpkMp258OA0w9USpU2kt+hcgNUGqmb3YHtIyfejDaSTfnkC0CqKa2YrtES0b9/xwGsv3JxCtgqhmMmJ7RFWpKHvswgeETUHqphi2B/JMYWU7fqzdcU42Isg+Nj41Gs085Q5HovObTE11RGzZToSYrti3o816mUS2GW1Whqfy6LKeJvQ5jzTK57s4gfmjKqhRmrmb7ek5ave/8AzsMAQDSJFUbgBx8/darrg6rgrT1cAspow1ormv80+ZJgIUKuYG27QyfJcHRIeyinrpNPbocvCdCUOSOhOIPZJ7KuWcvPfYTTOQOieIPZCqFLcGKd49APJ4kFojiD2Se6ZMp2ROoAFrxFJnA7HHUhXgNnwD8wUV/TMvwv9digx6cgt799MhGOLVuULs4VXVuAIvCSgGpFUg1flC7CFV5bh1jZ1h/8nDIjsrIGrSV9GZQuwR3ePdkguRNxd8SIx9FgbQCTUCqzOH2AOrerhy5TzlCvq5sahQa2NJtW9tibiTBO0E6qgZU51BxBpTR6MU6YR58ReoGlDVOkXsUdWoRjqqExKJfs0kwhCIYgRX5xmxB1dVksZX13ejoXd3/vAwvL+RZJHv4zj20k9nAtrj0erMI/bQllq8Tj+cogD8IxVB1flH7EFVNaZN3x4Y96qBqvWS2INa7vMC515lSHVmBntIVWUJrHuVI+3U2lRSpaXi0cvnETCthmmvxjZSr6ew+sm/Ar/Xqaf5LHxXY9QrI1C5Uc9x3o9LUlP2hR7yQ1ySjlP/zd12uz9P4Tv1Fb5YjRjjG9s+R2gxu2aiSS4S/wc=</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/data_structures/web.svg b/diagrams/data_structures/web.svg
new file mode 100644
index 0000000..bf05698
--- /dev/null
+++ b/diagrams/data_structures/web.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="1805px" height="765px" viewBox="-0.5 -0.5 1805 765" style="background-color: rgb(255, 255, 255);"><defs><clipPath id="mx-clip-7-61-322-26-0"><rect x="7" y="61" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-87-322-26-0"><rect x="7" y="87" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-113-322-26-0"><rect x="7" y="113" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-139-322-26-0"><rect x="7" y="139" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-165-322-26-0"><rect x="7" y="165" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-191-322-26-0"><rect x="7" y="191" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-217-322-26-0"><rect x="7" y="217" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-243-322-26-0"><rect x="7" y="243" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-269-322-26-0"><rect x="7" y="269" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-295-322-26-0"><rect x="7" y="295" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-321-322-26-0"><rect x="7" y="321" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-347-322-26-0"><rect x="7" y="347" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-373-322-26-0"><rect x="7" y="373" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-399-322-26-0"><rect x="7" y="399" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-425-322-26-0"><rect x="7" y="425" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-451-322-26-0"><rect x="7" y="451" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-477-322-26-0"><rect x="7" y="477" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-503-322-26-0"><rect x="7" y="503" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-529-322-26-0"><rect x="7" y="529" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-555-322-26-0"><rect x="7" y="555" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-581-322-26-0"><rect x="7" y="581" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-607-322-26-0"><rect x="7" y="607" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-633-322-26-0"><rect x="7" y="633" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-659-322-26-0"><rect x="7" y="659" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-685-322-26-0"><rect x="7" y="685" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-711-322-26-0"><rect x="7" y="711" width="322" height="26"/></clipPath><clipPath id="mx-clip-7-737-322-26-0"><rect x="7" y="737" width="322" height="26"/></clipPath><clipPath id="mx-clip-477-165-252-26-0"><rect x="477" y="165" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-191-252-26-0"><rect x="477" y="191" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-217-252-26-0"><rect x="477" y="217" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-243-252-26-0"><rect x="477" y="243" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-269-252-26-0"><rect x="477" y="269" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-295-252-26-0"><rect x="477" y="295" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-321-252-26-0"><rect x="477" y="321" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-347-252-26-0"><rect x="477" y="347" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-373-252-26-0"><rect x="477" y="373" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-399-252-26-0"><rect x="477" y="399" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-425-252-26-0"><rect x="477" y="425" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-451-252-26-0"><rect x="477" y="451" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-577-252-26-0"><rect x="477" y="577" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-603-252-26-0"><rect x="477" y="603" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-629-252-26-0"><rect x="477" y="629" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-655-252-26-0"><rect x="477" y="655" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-681-252-26-0"><rect x="477" y="681" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-707-252-26-0"><rect x="477" y="707" width="252" height="26"/></clipPath><clipPath id="mx-clip-477-733-252-26-0"><rect x="477" y="733" width="252" height="26"/></clipPath><clipPath id="mx-clip-837-113-232-26-0"><rect x="837" y="113" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-139-232-26-0"><rect x="837" y="139" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-165-232-26-0"><rect x="837" y="165" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-191-232-26-0"><rect x="837" y="191" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-217-232-26-0"><rect x="837" y="217" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-243-232-26-0"><rect x="837" y="243" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-269-232-26-0"><rect x="837" y="269" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-295-232-26-0"><rect x="837" y="295" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-321-232-26-0"><rect x="837" y="321" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-347-232-26-0"><rect x="837" y="347" width="232" height="26"/></clipPath><clipPath id="mx-clip-837-373-232-26-0"><rect x="837" y="373" width="232" height="26"/></clipPath><clipPath id="mx-clip-1177-61-222-26-0"><rect x="1177" y="61" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-87-222-26-0"><rect x="1177" y="87" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-113-222-26-0"><rect x="1177" y="113" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-139-222-26-0"><rect x="1177" y="139" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-165-222-26-0"><rect x="1177" y="165" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-191-222-26-0"><rect x="1177" y="191" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-217-222-26-0"><rect x="1177" y="217" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-243-222-26-0"><rect x="1177" y="243" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-269-222-26-0"><rect x="1177" y="269" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-295-222-26-0"><rect x="1177" y="295" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-321-222-26-0"><rect x="1177" y="321" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-347-222-26-0"><rect x="1177" y="347" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-373-222-26-0"><rect x="1177" y="373" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-399-222-26-0"><rect x="1177" y="399" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-425-222-26-0"><rect x="1177" y="425" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-451-222-26-0"><rect x="1177" y="451" width="222" height="26"/></clipPath><clipPath id="mx-clip-1177-477-222-26-0"><rect x="1177" y="477" width="222" height="26"/></clipPath><clipPath id="mx-clip-1527-126-242-26-0"><rect x="1527" y="126" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-152-242-26-0"><rect x="1527" y="152" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-178-242-26-0"><rect x="1527" y="178" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-204-242-26-0"><rect x="1527" y="204" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-230-242-26-0"><rect x="1527" y="230" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-256-242-26-0"><rect x="1527" y="256" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-282-242-26-0"><rect x="1527" y="282" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-308-242-26-0"><rect x="1527" y="308" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-334-242-26-0"><rect x="1527" y="334" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-360-242-26-0"><rect x="1527" y="360" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-386-242-26-0"><rect x="1527" y="386" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-412-242-26-0"><rect x="1527" y="412" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-438-242-26-0"><rect x="1527" y="438" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-464-242-26-0"><rect x="1527" y="464" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-490-242-26-0"><rect x="1527" y="490" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-516-242-26-0"><rect x="1527" y="516" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-542-242-26-0"><rect x="1527" y="542" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-568-242-26-0"><rect x="1527" y="568" width="242" height="26"/></clipPath><clipPath id="mx-clip-1527-594-242-26-0"><rect x="1527" y="594" width="242" height="26"/></clipPath></defs><path d="M 3 56 L 3 30 L 333 30 L 333 56" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 56 L 3 758 L 333 758 L 333 56" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 3 56 L 333 56" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="167.5" y="47.5">web_client</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-61-322-26-0)" font-size="12px"><text x="8.5" y="73.5">unsigned long long id</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-87-322-26-0)" font-size="12px"><text x="8.5" y="99.5">WEB_CLIENT_FLAGS flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-113-322-26-0)" font-size="12px"><text x="8.5" y="125.5">WEB_CLIENT_MODE mode</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-139-322-26-0)" font-size="12px"><text x="8.5" y="151.5">WEB_CLIENT_ACL acl</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-165-322-26-0)" font-size="12px"><text x="8.5" y="177.5">size_t header_parse_tries</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-191-322-26-0)" font-size="12px"><text x="8.5" y="203.5">size_t header_parse_last_size</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-217-322-26-0)" font-size="12px"><text x="8.5" y="229.5">int tcp_cork</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-243-322-26-0)" font-size="12px"><text x="8.5" y="255.5">int ifd</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-269-322-26-0)" font-size="12px"><text x="8.5" y="281.5">int ofd</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-295-322-26-0)" font-size="12px"><text x="8.5" y="307.5">char client_ip[NI_MAXHOST+1}</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-321-322-26-0)" font-size="12px"><text x="8.5" y="333.5">char client_port[NI_MAXSERV+1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-347-322-26-0)" font-size="12px"><text x="8.5" y="359.5">char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE+1</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-373-322-26-0)" font-size="12px"><text x="8.5" y="385.5">char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-399-322-26-0)" font-size="12px"><text x="8.5" y="411.5">struct timeval tv_in, tv_ready</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-425-322-26-0)" font-size="12px"><text x="8.5" y="437.5">char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE+1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-451-322-26-0)" font-size="12px"><text x="8.5" y="463.5">char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE+1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-477-322-26-0)" font-size="12px"><text x="8.5" y="489.5">char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE+1]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-503-322-26-0)" font-size="12px"><text x="8.5" y="515.5">char *user_agent</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-529-322-26-0)" font-size="12px"><text x="8.5" y="541.5">struct response response</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-555-322-26-0)" font-size="12px"><text x="8.5" y="567.5">size_t stats_received_bytes</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-581-322-26-0)" font-size="12px"><text x="8.5" y="593.5">size_t stats_sent_bytes</text></g><path d="M 333 615 L 353 615 L 353 10 L 168 10 L 168 23.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 168 28.88 L 164.5 21.88 L 168 23.63 L 171.5 21.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-607-322-26-0)" font-size="12px"><text x="8.5" y="619.5">struct web_client *prev</text></g><path d="M 333 641 L 353 641 L 353 10 L 168 10 L 168 23.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 168 28.88 L 164.5 21.88 L 168 23.63 L 171.5 21.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(304.5,299.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="102" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">double linked list of</div></div></foreignObject><text x="51" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">double linked list of</text></switch></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-633-322-26-0)" font-size="12px"><text x="8.5" y="645.5">struct web_client *next</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-659-322-26-0)" font-size="12px"><text x="8.5" y="671.5">netdata_thread_t thread</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-685-322-26-0)" font-size="12px"><text x="8.5" y="697.5">volatile int running</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-711-322-26-0)" font-size="12px"><text x="8.5" y="723.5">size_t pollinfo_slot</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-7-737-322-26-0)" font-size="12px"><text x="8.5" y="749.5">size_t pollinfo_filecopy_slot</text></g><path d="M 473 160 L 473 134 L 733 134 L 733 160" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 473 160 L 473 472 L 733 472 L 733 160" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 473 160 L 733 160" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="602.5" y="151.5">response</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-165-252-26-0)" font-size="12px"><text x="478.5" y="177.5">BUFFER *header</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-191-252-26-0)" font-size="12px"><text x="478.5" y="203.5">BUFFER *header_output</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-217-252-26-0)" font-size="12px"><text x="478.5" y="229.5">BUFFER *data</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-243-252-26-0)" font-size="12px"><text x="478.5" y="255.5">int code</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-269-252-26-0)" font-size="12px"><text x="478.5" y="281.5">size_t rlen</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-295-252-26-0)" font-size="12px"><text x="478.5" y="307.5">size_t sent</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-321-252-26-0)" font-size="12px"><text x="478.5" y="333.5">int zoutput</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-347-252-26-0)" font-size="12px"><text x="478.5" y="359.5">z_stream zstream</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-373-252-26-0)" font-size="12px"><text x="478.5" y="385.5">Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-399-252-26-0)" font-size="12px"><text x="478.5" y="411.5">size_t zsent</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-425-252-26-0)" font-size="12px"><text x="478.5" y="437.5">size_t zhave</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-451-252-26-0)" font-size="12px"><text x="478.5" y="463.5">unsigned int zinitialized</text></g><path d="M 333 537 L 403 537 L 403 114 L 603 114 L 603 127.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 603 132.88 L 599.5 125.88 L 603 127.63 L 606.5 125.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 473 572 L 473 546 L 733 546 L 733 572" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 473 572 L 473 754 L 733 754 L 733 572" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 473 572 L 733 572" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="602.5" y="563.5">clients_cache</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-577-252-26-0)" font-size="12px"><text x="478.5" y="589.5">pid_t pid</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-603-252-26-0)" font-size="12px"><text x="478.5" y="615.5">struct web_client *used</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-629-252-26-0)" font-size="12px"><text x="478.5" y="641.5">size_t used_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-655-252-26-0)" font-size="12px"><text x="478.5" y="667.5">struct web_client *avail</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-681-252-26-0)" font-size="12px"><text x="478.5" y="693.5">size_t avail_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-707-252-26-0)" font-size="12px"><text x="478.5" y="719.5">size_t reused</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-477-733-252-26-0)" font-size="12px"><text x="478.5" y="745.5">size_t allocated</text></g><path d="M 733 611 L 753 611 L 753 10 L 168 10 L 168 23.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 168 28.88 L 164.5 21.88 L 168 23.63 L 171.5 21.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(724.5,12.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 733 663 L 753 663 L 753 10 L 168 10 L 168 23.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 168 28.88 L 164.5 21.88 L 168 23.63 L 171.5 21.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(724.5,38.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="57" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">linked list of</div></div></foreignObject><text x="29" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">linked list of</text></switch></g><path d="M 833 108 L 833 82 L 1073 82 L 1073 108" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 833 108 L 833 394 L 1073 394 L 1073 108" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 833 108 L 1073 108" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="952.5" y="99.5">listen_sockets</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-113-232-26-0)" font-size="12px"><text x="838.5" y="125.5">struct config *config</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-139-232-26-0)" font-size="12px"><text x="838.5" y="151.5">const char *config_section</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-165-232-26-0)" font-size="12px"><text x="838.5" y="177.5">const char *default_bind_to</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-191-232-26-0)" font-size="12px"><text x="838.5" y="203.5">uint16_t default_port</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-217-232-26-0)" font-size="12px"><text x="838.5" y="229.5">int backlog</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-243-232-26-0)" font-size="12px"><text x="838.5" y="255.5">size_t opened</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-269-232-26-0)" font-size="12px"><text x="838.5" y="281.5">size_t failed</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-295-232-26-0)" font-size="12px"><text x="838.5" y="307.5">int fds[MAX_LISTEN_FDS]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-321-232-26-0)" font-size="12px"><text x="838.5" y="333.5">int *fds_names[MAX_LISTEN_FDS]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-347-232-26-0)" font-size="12px"><text x="838.5" y="359.5">int fds_types[MAX_LISTEN_FDS]</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-837-373-232-26-0)" font-size="12px"><text x="838.5" y="385.5">int fds_families[MAX_LISTEN_FDS]</text></g><path d="M 1173 56 L 1173 30 L 1403 30 L 1403 56" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1173 56 L 1173 498 L 1403 498 L 1403 56" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1173 56 L 1403 56" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1287.5" y="47.5">POLLINFO</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-61-222-26-0)" font-size="12px"><text x="1178.5" y="73.5">POLLJOB *p</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-87-222-26-0)" font-size="12px"><text x="1178.5" y="99.5">size_t slot</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-113-222-26-0)" font-size="12px"><text x="1178.5" y="125.5">int fd</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-139-222-26-0)" font-size="12px"><text x="1178.5" y="151.5">int socktype</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-165-222-26-0)" font-size="12px"><text x="1178.5" y="177.5">char *client_ip</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-191-222-26-0)" font-size="12px"><text x="1178.5" y="203.5">char *client_port</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-217-222-26-0)" font-size="12px"><text x="1178.5" y="229.5">time_t connected_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-243-222-26-0)" font-size="12px"><text x="1178.5" y="255.5">time_t last_received_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-269-222-26-0)" font-size="12px"><text x="1178.5" y="281.5">time_t last_sent_t</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-295-222-26-0)" font-size="12px"><text x="1178.5" y="307.5">size_t recv_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-321-222-26-0)" font-size="12px"><text x="1178.5" y="333.5">size_t send_count</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-347-222-26-0)" font-size="12px"><text x="1178.5" y="359.5">uint32_t flags</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-373-222-26-0)" font-size="12px"><text x="1178.5" y="385.5">void (*del_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-399-222-26-0)" font-size="12px"><text x="1178.5" y="411.5">int (*rcv_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-425-222-26-0)" font-size="12px"><text x="1178.5" y="437.5">int (*snd_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-451-222-26-0)" font-size="12px"><text x="1178.5" y="463.5">void *data</text></g><path d="M 1403 485 L 1423 485 L 1423 10 L 1279 10 L 1279 22.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1279 27.88 L 1275.5 20.88 L 1279 22.63 L 1282.5 20.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1177-477-222-26-0)" font-size="12px"><text x="1178.5" y="489.5">struct pollinfo *next</text></g><path d="M 1523 121 L 1523 95 L 1773 95 L 1773 121" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1523 121 L 1523 615 L 1773 615 L 1773 121" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1523 121 L 1773 121" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="1647.5" y="112.5">POLLJOB</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-126-242-26-0)" font-size="12px"><text x="1528.5" y="138.5">size_t slots</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-152-242-26-0)" font-size="12px"><text x="1528.5" y="164.5">size_t used</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-178-242-26-0)" font-size="12px"><text x="1528.5" y="190.5">size_t min</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-204-242-26-0)" font-size="12px"><text x="1528.5" y="216.5">size_t max</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-230-242-26-0)" font-size="12px"><text x="1528.5" y="242.5">size_t limit</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-256-242-26-0)" font-size="12px"><text x="1528.5" y="268.5">time_t complete_request_timeout</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-282-242-26-0)" font-size="12px"><text x="1528.5" y="294.5">time_t idle_timeout</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-308-242-26-0)" font-size="12px"><text x="1528.5" y="320.5">time_t check_every</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-334-242-26-0)" font-size="12px"><text x="1528.5" y="346.5">time_t timer_milliseconds</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-360-242-26-0)" font-size="12px"><text x="1528.5" y="372.5">void *timer_data</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-386-242-26-0)" font-size="12px"><text x="1528.5" y="398.5">struct pollfd *fds</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-412-242-26-0)" font-size="12px"><text x="1528.5" y="424.5">struct pollinfo *inf</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-438-242-26-0)" font-size="12px"><text x="1528.5" y="450.5">struct pollinfo *first_free</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-464-242-26-0)" font-size="12px"><text x="1528.5" y="476.5">SIMPLE_PATTERN *access_list</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-490-242-26-0)" font-size="12px"><text x="1528.5" y="502.5">void *(*add_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-516-242-26-0)" font-size="12px"><text x="1528.5" y="528.5">void (*dell_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-542-242-26-0)" font-size="12px"><text x="1528.5" y="554.5">int (*rcv_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-568-242-26-0)" font-size="12px"><text x="1528.5" y="580.5">int (*snd_callback)</text></g><g fill="#000000" font-family="Helvetica" clip-path="url(#mx-clip-1527-594-242-26-0)" font-size="12px"><text x="1528.5" y="606.5">void (*tmr_callback)</text></g><path d="M 1403 69 L 1648 69 L 1648 88.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1648 93.88 L 1644.5 86.88 L 1648 88.63 L 1651.5 86.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1773 420 L 1793 420 L 1793 10 L 1279 10 L 1279 22.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1279 27.88 L 1275.5 20.88 L 1279 22.63 L 1282.5 20.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1773 446 L 1793 446 L 1793 10 L 1278 10 L 1278 22.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 1278 27.88 L 1274.5 20.88 L 1278 22.63 L 1281.5 20.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></svg> \ No newline at end of file
diff --git a/diagrams/netdata-for-ephemeral-nodes.xml b/diagrams/netdata-for-ephemeral-nodes.xml
new file mode 100644
index 0000000..8606170
--- /dev/null
+++ b/diagrams/netdata-for-ephemeral-nodes.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36" version="6.2.4" editor="www.draw.io" type="github"><diagram name="Page-1">jLzH0qNMti58NT39A2+GeIRwwggzw3vvufpDqurr3n12nIi/BnpRCkGSudZjVqbqXyjXndIcjaU2pFn7LwRKz3+h/L+Q5x9JPH9Ay/WnBaYJ+k9LMVfp37b/NNjVnf1thP62blWaLf914joM7VqN/92YDH2fJet/tUXzPBz/fVo+tP991zEqsv/VYCdR+79bvSpdyz+tFA79p13OqqL8584w9PeTLvrn5L8NSxmlw/E/mlDhXyg3D8P656g7uawFo/fPuPzzvfX6pzP/Qtly7drnDfwc/j4W/x9fhv//fPnp/pz16/+83f/regSK/a8rZukzVH/fDvNaDsXQR63wn1Y22eY9S//ech62Pv29g/67L9lZrf7fZnAcgOP/Dwfv+nW+/P9+G/z9Vhot5b+vDd6Y0bpmc/9rQSDQuqzRvDIgCp7GpI2WpUr+aRar9t/379N/TuqHPvvT8vdz0Kc6W9frb2hG2zo8Tf95XHUYxr/XWdZ5aP4dJsi/RxmM0/8VTf8Z9n8matjmJPtnrP9O/dPPIvt7Hk7B/w6aJ92yocuewXhOmrM2Wqv9v+8Q/Q374t/n/Wdun4O/0/v/muq/t9+jdvsneP7vuX+ieQSHVfdLIHbP5rV60kaN4qw1h6VaqwFMRTys69A9J7TgAzZKmuIXB9zQDvPvUmj++/c/rsG0VQG+u4KRZaNl/JPYeXWC+WZ/t2T+aYX+aXmO02iN/oUyf94i4tgX/0K46ssa1gG9pWJgnn+67ZaCWzxHQfK88B3HBKD9Xwh7uA38HBWM0Aqfr4Ux2fWAFRtQre2rffhcUd5Mbpu3fXiaw7pGk7lo0ut7pmSjfpI1VTl+UY+XwFjvwoTAN+qCZQsOZ4o3m7NMIVjHZ7ArBQ3vot+IjBzJydhNMxUSVb5Vajc5M7+I7doG/LmHv/v2QaP6raE6mqAU99xYrPbnBQUv+z49Jxl1UC3vBXuBTn2/fntj9lz1HG7LBmM8p5W83EWWT80IP1XPSVnlIM+DifmaP68XeOly8vkANFLnRq5orIFwQcQN/F3r5wUHL6SwzhE6c7P6xcCF44uVrWhtyyOT6dUAV0qt50JUmTyHS9ZCUf8cYBZKD+DDCbyjD6kgCRjdPtJzau27KY06L1Lvnnd8sFRCNMOxapp/epDk+9vpaXB3+k6f1wCOwMBmHbhgbZLLDTqmXcnc+7FjcuT0kZ3KidqE76nz+R6bOwvx7n1lLc8veNYNHpFCm96/G7DXnz/PO3a67783ndrzY7KN+1aFhjlgrAfhoXCNWsM6Vc17ITpfQiv6Txtq1jV4+XikyTDKJWtpsJyK8dEe1cfqOYoVLRwSy2LAIhexbnMinrE2+u+00ZFYqFlKaCc7ExHOMKn73CU+CXQmMkxwwFTZW8+H1W5XKpi30e5uYgop30rZK/9ayjOAYl+s4W3OKbxtBBE9D8A+Y88ubxiu9hnTWvr9zvzx9Rb6pISCrJpPLOJ9zFnm/W1I+kulo+d8pywEOFSZtje53FKZGN9sjJxvEwwH2mWDQJIkzGJ5yS1ipuPj0PpI+UbEVmBdtGtV7u2bHXtEldyv/JbWnf7ZEkeeGf+5elEk2jdC3YKsj2eaRRGHW1k6Jj4mYju1cEI5MVl1nk+ajwXiRE20+X6eMoPIg+SieyY8vJGiqR8bL0Cs+vrGz2VnFwdxNnnUDRkjYtDxmHYQ0ufDEuOtkXAG6/dBOvS4bj7x+sAxS+4wpdk21XEJVFiS+cyyJhsEdQzwYc/jKxZ8bamfYILufFqP2VYu9YHLTdZtKSLxebfXZr2WkREoLJK/U9jN9eetVgKGwxuM5MGeDa3ea06DG3Nqrp66f9c2iBnaQPInW8XZMtKWSOb8/FZBnaS1hKESvD2fwDEf5Qh6PL30sKtpw0RIfZDlfQcvvATa1RvJCd/p6kjOQWT3dMYr5xigVflAHavZz1SKfnIHfd/GUcjP8LkmOc3BknCiMbpOkD/TPQsSW3bmX5hs6Zwhu4pvfnJuhkIdNY2G8EqQHzLtFSQcMrmE8PgYXSre9lwM0Rh/vilKG44J8P6FTKdNRWisvy8ogezkTzYtMko+aXerJEX1IB6Ht7sPR94td/O+pORlRdsxTbeWuuMXWcks93d0z9MlRXP/z0wFfo/SW3kk8hJPVqfIm1Qc0loLASJ/kDVtl4ee4u4OIie9iuZhEpGIlEPjuUV+kCAOJxwdy0SqVlaAHJwiLtSXn+uaFsIQZg3dRMKPtSZtW02N+Fssj52/b6h3G2tCYyKtw0RyhvcuKTVsYugMp4BTtnU9jpROH8xG87S6nfe7iB/xq7QX48mxwaR9kG08P29jlnN6nSCkgcg8Amd3hhaUVkZi54UOl2wEheq8tRjW8wQ1DslBkhy5I+Au6qwBJEdkGKisJBGuudIJ4qdBZT0dYJuSSlDTMTbEc6Lwjsg0gjf0ucu9wKyPQrl0vgFrpb0HkdJBruTrAJDsN6qyyAu5YUGimiHKLE+jNCA/oI6lstgzPoGPUDNPhFsPkiY2Eau4l39KPGu70ifBmlt+50uOkBuN46mGZeQjCtj2aOo17nA8r+srrYx6IFfM4F/oDMI9vO3l3glWh9hmYKZFiXUQ381aqIEU2wsP21Fa0/oK8F0D+B4JALLN2VRC42wIJV8OWvYlk0llnAq44hBvjd8oo8QC68hIAw7zmz8AELZULt+GIZXdUfE3FmhyWWC44SDnc0V0JfWxN3jD0+eiSOFZrZtTQ+U9RUi6HUxV6X9xJ65IKv+wt3J3mRwekt3pfkQQbtz1kcJ7N7YSC5ETSf1cgRaWkH4EvQKHG22ccwLpX0BjcUMKqVxicVaL6C2EAZzXDQY4c/FwmnrobbxldzRGI/zW25qZtTYl6PSbKJz2x1+sKRCKk6sMKJ/xHLvd/nQwR0kzB1AGm7IZoT0KpM5WZvXlMsGL9NAN2UkWyhFcb50nLGJuRTadwdgqdCeUeft4ehF7iJexVeE3EtQTNXMd4FQjrKPUeaNfEl3KAIs/FxF2yMb8aKl+fY4hIO4kc/wDqjH/yThW4b1oQOiELfSZ++jTikqSxH3GOkj4FQlcWRbqJLnVu4oruCysAnA2lJS9un0H4oXwyAc/HVE6v1/yXTTDLtvEQjWKR2AZzOqBvTGaMhhFEL1fLylC6o+KmNqlbit/6UWKhkjYfjH6JeeP2rB68auPsJYzDNZBx26mDluo9/fdX26dRGGL+70Xf0h4m6Bd02/soVzN29Ba9NSDMDzZQvLtJXEjEcWO9PK08jgZBuIhg4Zjnck9NfdTV4MIyWwgvtHMIpPuxj4FiM4eiojNtnwwKm/LD3kfXlmuM0KTzud0BAKdyojH3pM2egCnA5VGiXoM9CehWgvupAlpRhj1yDiI9W3manDWLmFmz6sy9leUqEDnzI+hFk1t79lOrxu567zOhSfOhTu8PLwXfoXpCZXjdzZlMS+0m4UIxnknjnrUJHeUX10DUyDe93mVemIVaMP7aPUCt1j5wKnULfuKZA3koDi+itfAtHAvX0kXun535phwbBBmfjCzTIS9oC0sJkhsw4+wEWutROnP1ZPwp8v5QlP6Ah7omYuOR4pBypMZlWA7hOlDSpnHmfEH5jHG4xpyRc6q/9Ip0xBF6jJf4ggaifTTpPboOaAzk/N4CDcdCmTQFZpy2g82EMMHbgwcUHE3PrkPmQ7gskSpCfX7wTB/pWWKipdS8sJVjc2Y8cV7uTgoz0Mkqe/Rdw4bKGl/qROiDAUy+ERXKcPfSoU0e5TfzIucaz9WacvEtxCbbXpPkP7Fn9TxMqOGggQq0I44k5hXMl3McuZ5EELNLSBBs3mmwjmIyC8Oy2OBKk+CLg+B6VlYFuTlosUbq2A/dC5bPMKpLzpREF++te4lBCZ+CAPvoBA8lVBvBgnKhgxbDtdXKGTF73h7VyFiY2yBZgSTtWPsDUQy78euOOtADnBfCG2AdL9E8/XFIGqvYQoefhOiBrL7/WifMWlsO9of5oFcSXuixhmKJ8TY9Yw4H96zuoO7WY5femhSkwF/llFnG1nkcFpsfCBK62MG0zjzAO1H7K2LyFAm77LG9vOD5IbxbjjCor60pH1alKoOgWdpokSpmw/v7ZFMqom1F+Ci99iUHDAx6Vu8e/PjTq/SzjVplyD0cpiUPhX/zRAAzEdTStrx28CYd6FK+rEEW7Dce8ysMjgLGO9W9fZNFUpOzyrMF2Umg8gYaMQkxiUQRPeM9lU902Z/Bh6/z85aICWU6clr+wbIKmEtYleIq4e3VPaZYcAuApXmNHAJrljPPBk4M1+kb9+C+INrWsNy5qGg5iczG9caSYkVyq2ADv5+RT3A/bp822506kb+mo2pv+JMgw6rEBMo2Z3iiuq8zVEcreWLd+HUibp7aZZ+dOUsIm2g/T6tZEOfO7rrYyTFZqBeW9qz0fTChNuaCxSvXkADVWfUNSf/iJHIMzCuYOByU53NPZcugeTDNauFweJWfT8gonRFQEd64Hlo8yrrqwpU76t8IEDA8hMMoZFn70+SMC4AHXnDI+sIf7DfkhHVPmTOTJfH+oJsLJPh98IJAYNmGu6Xzsbd21z8/Bg1SWKsmC6tzftD7r3Hirwr0/LOChVFAjiyWMEkh6T4cruAs5bCN1SELP2BxIzj1LrERWX5xox9rosXIVi4T8NJ+wfb3hoYWnhooWM678XLPyRQAokhs1WhQgK0tDKRV4mqKG2BiODi1lxnxn2OguEIML8ECD/aLA30NSUvXteQX4o+ga6xE02cYdRJVdMHaLh3uq2ZY9tXryjEsUrSzlpl58z0hLZUMSOwAw6M3B7P3aWhx2232SmwfmnjjWVg6v0hXBxdrAj1tPTNFF0ni8CViPmoNxqKf8BNXztMD1YCxpi+G3KKltV4zZY5DwIHe/Zdu+9HsV4l7hTaGTWpEiP0LiVZbe77otG0wmKULmPoxOVazXOEAWhd5Xw8ykRodtSO563E58enlSsfcTwGnqMFvSaZ7KB1t7Xh2m2zYzqcDVf07Rs33Bv3gS/9cH69zrZ3tAvXCl017V6MuZs1K/MY2ZQeucpcRriCk4kULfMjojR+GtirTUQ5EmHoAxOBWY2hw7Tl6JeHM6/vUvgGlD4qlTVyXqKrEpB+PzDw9XSIvUo8r181gTVUrksf0zge0+aG1AtANhUXL7pWiNShqoSMfAhoSvlJP+52bp+/IY08EdSOg/BG+BzOvl3YUt/U38mFzzpVumx8GL/2aVVJP5LRw3HKfb1cFQay9W2f+ZPw8IzpqOge9251G0+Xj3VbYFQEGNo94c8mzFarEC+VNijErPhayF5AnkGn1LVCmSmoafjOPa0pS+BqZycU7E6Ufcx+Fjbu237hNtDh4wGlZ65psUJsiYLn/URiYUlUb3p8A+q//z6dBLMi4bL6ID5PVltxbOHa5W98SSDXiCXkfEU3BwURMTOE/5aAnL5lYOHZvaq9nW4dKG/p8D4eWAjaKu2AnoSh4D35KSrSkyzYEu/ZW5MwgECU3ZReocuVn205ijGQuRdEv8yORKnsJeifmKNOQL5e0am8V+6WV2KVifCvGkmQau3Vi0E4qNMc44vlKD0nSXlmTZaKhgDs+JJm5XP/D/F9opRFdiXmvOMZWxUaOKg1hyMuuQ+9l2UGrw8JmOtK3Jd/NiGu1xIFgEJ/dLpolef9ib4bimbea4TQPgDpSla8rX1AlYCMUti7odjRADrNqzp6SyZmSTM4u0Lq19gBDJJOo2hQNWP7LUs2HOaroeHKwtmJYjK7kpRcRmOubVCM9nWSslLLb3/mKKmiRb5F7SLz4UzEP12C4CcouglJpm4CjJs4+aLVfQA90ZqWeEkqhrdQ9Cg/AWi5AHxALxlezNpc3KyhM6bMXIGhmhWo5oi7+bJdGdmD3kp74iEFDH9aPUv5dJRtguOPt5YuQkhkvCcwinKl+/WkVCwzRRv5dgMGQjYq2E086U624Z3Xd71oqAZoHtIJ0iLgbV0yVSTImNBx+AxPQygonpnw+XMYWs4rdzDjN0YKDnBSbZLa2MVVb0kx2KS3gq/8rpqMpW6M1w6zHF43PoRVe/GCezMxJgQZr9ZZjk2suusqg1MvKlJenQJMDsM5uVnQjbi+ZAfQq2WrK8N890s5V4BTuu194HNJphVzVQL7fue5AimKR8yv+snqr6vCO31EUkSXzwn6kbW1ao5/sRzk5iSexmEmdDqrA4i9ipW6Oaeq58lb0zU9gfGJAaLcQ9z1d9Mm8iYRgmKvWzzA9yTnlooHEXt0fmg1rlr2lpR3ZSkLg6+Qt27I5RsmTbvQzBR/bStIHPv9OjgqpklaKs4L5cS8dDgxwAebFw7sYvKcPY+q0hbjyTdy9fyPTkqSyBc5w9lttY4K/Wlp9yJ9dXtyHmu5FKisMU85GuUt1p/u1URuCw2w3FCXZb8B9xegWgvtJ8fLnL6eCrd9MzG4MCNRHkdFChiyUKrsussk2KAsK730EU+qHtYwG4P52/0MnvyNqPFdKQ1pnq/kaIHIhlol5gMLCfldWNnky6tEPnJBizRXXuKmrBumso7D3iE9DSsJYytzaml1HSLYPkN4kwZ7FW7xC8oeuYQz3wD+o/1n78d/X43E4kWBxDX7ALgP7gWTmk2RFPJs3Y4pJENY3CyY7MHnkoSgrKGj+a1+GXGrBQyRCcAZF76NIfx7dLVk8X7AoveHv5Sn7Gadg6hRlMWX537NSABh8GWzzD+cb98rIP3nKL/d9iOi10ASQeso5EsDtVThE7F/bPeGvzo5ZAz1q+pSNP0kAvu2eklFzamtjPAIPnfr0WD+1207UT+ZHHYFVQVSWbN43iyaVMA1T6NrZe6TRr7CgwFwHnUC3amYXDv9wZpX856Z+05MYqGNnXiixz+/sEQS8dHSYjSSMMVFAJ495xXxdcN5ZqEVOTTm9UZuKDlKtfY+Ypya1ySZOfcjtFxMC6LRlSLmi8J+oGb9HowsHXISBaVvhs9zQ8t4q5lhJyIuISMZa8SNGrAvvw8NsekMjjZJF53twMG4y/RBR7Mpyd0nlJI/GjExOjIJqqWE8f0RgiqE8FNibA9+bRe6JKKzNfi9gNHxy01uYYgH8Hf4QsvIh8UsCfVRw7YnjA5bi2+aMsvx2TjTIM1jdM1nPlk4nVGkrihiQscH+3IEs9WUDnbiLq6XtNbU44RfoPglel6353KnOxd7kKxVH/ELEPSBU7tRfpNurylBmQoM/VLHJgAbs0CxXVu1kj905VEC0yYVUXac+UqnmJtI648FDyMs3W97es29ngAsND5EUkPOyx5VaCqbF2ClfDt90niU9XLrVVbz9UvyJ48Amg5U2jWfHgXSMvPt6Y7emvuW1xb5nh/mUGkOebeEO7SrIyB1sQjJrA/rO8RQ/aUzb4OfrFddPd1upor/1IVsKjcb2YElfN7L6faTgpiGXn4AKyDwg+nSiwMgSTy8kJNLanbnDG9tBAdUTuWEPbqkTgVoiqX1Qmj2tIBq+l3b051r05Y0i6FSs0nBn+xaNxWZShRH0NM6a5PEFtNPS2bt8JjKge7tbya4tXVq11hNNWoHCxarNemrEMbmeLMuR/90rbTkuJEJTWesVm6KHzAm+hGdvUy+mPdoaVdQbYEa14Ya3WJKEeQYu0SCL7oBxwYSsBH/+tDwUc0THf9WbYJFtSxeGvsmiMkOJn073cxOtPZznUBB3Lb6fg49/0YdYa/UHWoeVM4oWT1sVkvreMnmlYVBPyg8DskpDh/6+naHMbM1cvtr8uaF5OKXLxYt7m7OZvZbcso24CeybbnCdsndXcZzZaObi3uZKOw9Pcr6OS48DUyGyHqbBThG1SO65rPks0oiqITWzZyOedT3+iiG1uohUfQ92/Rr9der3wwn8hS2dZwY59DAe5vJh1FdxC+uxRcpfAnOyl++aDpgppitMBNVqEeCQLVLcef9DDnREO63Hl6pGFmEXV5nbJDoW76541xq4lWgVVwuYm3fGZibmjC8YaPKwR19uAG2d/HhoE9BhgTIdAzxd3DpeZ2RR2b5eQsjcUyDAPNSRyXpnSYbQfxOL2fhZTijVw/odNZAwh0l86n/pB6aLjByzgL5luPlM/GFIRcCAdJm9mXnunl1q//k2px/uzFBgsi25STiF29OwIClQZ5aauLneyMFBr9JtR4f59AhECXps8dCuYfc252sEwCvyFoHzMh6B13BAjg7AvwNnbkeg5ozsop4BFgntPMCfTnyxK/DC8sU2sM95qMOdGTiq9Nsk/P7ctOvh5LDdgc3NW35d0TIRwjmXfUQgFJQ4tHmqvGJALHSfkefMevRiRMkqkdUmfJ9g7HIji/WzV+88i0vhyJ/Lck43upkkovQ+1LU2DWUEg1kbEpbSfeyYJgkgcruqHUwg4LVC7nb1dK2zdQXPaL4lHRdcqEYJuk3iJG3FzjhOnoIYs9rHFqyET/sCqPkBvd5PU1efU3pHW9TcX8oKxN9Gs3Wo+JAVY0MvzKJr3THm9MIrf0lPqBAaOHHTsIQfwymXupgXQE2LTjqMFobvTUhpIOQKSzVkBtEwfdDo6nbHocs5oqd4z5XW1STeOMLBxmD7ZKjLnuZtd9vR+pkisyks+HrgcgDviTrdS5EEqd7hMW0cTvt3FRp3ztWGZ5FKh3J/tr8eXtfy9xfTeahp9PHoOoUo2YchTmSeOvU48njeljID1dQiP/61uqrfJ72oLAUU+aplxMork7sku+yS+6VpOtdBRt3kPQxDRbdW8+C6AHzVa37qq2rabSrLRN7256ruZNy2+4IRJHULLPJO/YaSXCse+4UGi8M5cHamLFLyC2FFBTmIWAO8rx96g34QuPlKRFPFpfc0ayYFhVDBIyceszEoWar13qQGxZ0BJZltkaP3b8rIuLSo6FEL/oU1g9KxNQOxHPUxOa+HE1mz5t4DWiiBBTSnMDwiWPWmLM8zjc177mntulNXb8VLNYDoFgP8LLHmAILc1vavB2alvPK+ekuOoPRzt5lFF4zSljCVb/EExeXPkOR4E0LrTnSzbWbEbmT5p1zbt8Bd+5tHRsqXvcQCwg4O3eP8LQgnOMTXmwELfvw3lf+CZMAMeSuLeHfUojsQOisNGHXXPNb0/JX6cfnByxonC4Ne6E69U00xDiqkEfc1NYCzBIcR79KePS+VngFCQ3hcwT45jDjeE+JHCF9pctvbCbD/O5L2FfDrV+R6wvfIAzgXkBbiqv8VdNaWuJP3LfoMx/1K6ePmnuDjQVfUBXuUJjwstyIjP01aPtsfFBFOVFxsxsSQj/cV+S0YoJ5WVvFY/T0F6Pglup/QBlWAoHAi98GlrVMUsvudbomy5lCwOZ9mR1qxznQEiyiHaz29kS7HI1jS4+3d+b061HOILABAj+2WALi/kRuJ291OFjkWQdhX19Xzbxe84LOcZiCcQCaFCyiU0Xa82a0gO0B3+Kyi2/gudhX48AJ67e0jbpCdvCYPyJ7YIIkoLef3nNY6K4cJPs2ycaoJN9NwKER8NZQZGBEnueCWVoW1Kbzm7j0xd6mXxnP+bxDjNb25uNMWtsY/fXmyBGl9zF4EUAhIk9/LhtFZk9WEtciThbIxjcDDFgNjlm1+LQ7soVXlH5o4dynvd0FebVN4zRJUMmOX2jX6yRYIQ5jvc2fEVk6n9Zfkt3vCnh0BKxq06wywi2s2xt6f3ETlu7yol7tNfrsDazHAoqEM97OZqM55a1u6d6/Xfgg6wdcFTIdWB7XvxeoNM8xxzSuWHqfua07tR/R87rNMyScT9OyDkahlvrOE+Odwndq+AVwIp5l/B6m9aG7gx+mh8fyQPsnuSbPtPGYbvK1ylDI2UAW9B4ZgU6naXe7CCQDW672UIdCZHyB+hEd066ZA4qcYpAagw2NoFT4PHnAkWLa4tbk9o3a7aDukHcSLd7xFK0jrkAa7naBEIedMHfatzXwjnmn/kDP7WRueRBJ2kBMt2ebj41lGGnTbilZO4yaO+wBtfphzRk2nQ4l0UikKVCeyxDjx89gvioI7BpCxgJO75VIPM3FbmysLkjyz05lBxd/FQZH6OcVJSY3Mv2XRPRpjFJv2Y2wgsnvHK/8PYKtDCQbI1a2XiMJrv/GZBVathO/ETihYTT6ltdHbdcd3j8ZA+cEb3SGUHkXLC6NlBI4649BuVey/JIQbVlt67uLoDhEt9m8Ma5XWgkfAQLFIh7bvgWUOp9Rso/u9vzLrXi75WiNHIvXnEs8B9b8s1rgWPYjlt1u34ZZsJEnD/6LiPK2kRKcO+6BVX7lkJrI3850tuZFk2lbwbOYgcSNGHoBITnzBpjnt7gjyPvnJJPX29ia/ZAOI0YQIchrmoR7nATAoZPPaC8rgyOsyz75HY8vowrzNIrDhx1C5EnQ7U6pDZmx9ca2gXCWQTf8tldBOUQkQs/YRMvjoxfO8/bHyFeJghvaOywwaakzXN9heNXudfJavmZwg6hyuEhoO6s8id7duaL4QRgumcUWe9o0YNMzeVJxOXjeze0288jRKH1pRllJkkXm4CVaMWcDlYK03rw0jXQQzg0fzAkLch7/dvcSV0veJn04JReIXvGra6U9ydSoATZWZMfm6stDG/ULQOtjApjWC+xligf0uRomf53sCY3kdntXkUC0qfrcvUxIHwZrd5ZPKM9mztH7pD+ejSiG8st84m9ySJs3Q1u3bOHrE1POfb/oevzyWoszARgPAZGYXhIaaRGxdfP31HgR0M1gW+x0pvvyS0wAGyYFLLNUc5j4Go950Qy/cAzKanEycg39QyYgCdYWpAPYy5N6VAxM0SOVPbtTcvXi9cMUE04WTBouUiAGB8Dx6zbXNHUTu7fE7Jar73J11XxnkD1UJiQ3r7FfW8qGycSoIsAJkWxtj7ZAdbhSb9inWgEJI0OJz5BjXr/KoOdSfTsWuL7n+o1EXxpEKY9VLkhWnofA2CLBToN1x2OgjMzMUXzuchQGfsaI5dhbl1Y5iZjnW5rgGd1YIMyQVmteNcmgcP5usmSVza6Z3SlvvSi/H9juLN/Die2hYEPZSTde6UfykHZk9hc+oEIjb1L27qdDi7MVHS4euqN0RQ4KWLizQVXVql8/swbWuTlHRL7WbZrwLS1a9drbyuTPnLQ+Q7R7mph5P/PNnQ67XM3CJsRjn+ZlbtDVRy2T1EzvsYUpmBT2kRhG7tT5utA940zoal9XYkSZruNejk28ST1o0BxE+D7jr3Ibb4QtNfusHM8wN9rf1LdtSoUXnl2NG4p4D5/fRpwhMKvjK0u7KIqGZ3ofdcQEC6Yy/Q4k7vLAPrfXPNfCQDFlGlZlUhyQvj02AkGxS6arYjLOPAXam0IMna18WV0Vm8HtvMWE1EAuZN73TWkBT2MyXpaSqtZjAbYt3u8cUIwER0H2K6g8Uvo2Y3WL0TxaV9SrIbKz0OjymM9OGwNE7YPq/pyVVlgMJIeN626g6i4M6jZlbzzAU5bJpUQBO8400qiXxc4kbrDPxghkXqBcuCSKuC6Mo9d3WSfeQEsiqs+i6wdBz9/22FRSzBxHafoA3kvrypmBGe7HtYygUJrKMU9OPx8lxmOnwQbPz7udPXJmXGlf4zcXs8wr0cA+tO/3Ll3hSvf1UR7uYDizc1qoTfUmL1dq8V22N3VYv12sEnisJz7D2A9V48S+a00vSELlwK4cPoDgETGlJiyL9mETKsJ1MDWDQmWxfkwq26p98fMRjAQBkWOmILcPtIRSGV3rb/dMkHvneo/h7K9gaYA8y763ZE6/Any8wH2c11CKgB2tnYSGn3S68/cGk3so0rBC/hZwIVJVH2fwCjeU/zz59C1So3JTASP00Zdrkpla9QQOJHhwRHRLlWhYsQy13aSPxK6t4+c67MWi1dGK01CBZW+uluRtqwqHzlRhyhRUnaJdfLB+8xkcyvECrM+si66BB5R/+LECUbF+Lj75xpJ0Mj4eagMUL6Rqb3+s9wXNfzi+kM3+YT4nELKhDQVr73O1jlUNRfDB1ZESnb+KRwNVOUc/1xWHRaYtrwQx+e1eSKPEbr9NMDwQS+ZRlQPvuUVmL/RH3J0wgBQpSxG1Ho418LHBRz4RbXUtmNUzAAU70Bk3Lrl6PmawSymcjEfbH9hqksj9ApGEkMpVz1cp0tdL75DPNrgmpbV8GpEIVqgoHEOH0HgTQjAWnVXHakhLbvEyj9be+aZqiax4tb7hVBy3BBSCP+iLkFofyERIjc2aHhFdwuMVQRE0SIGHg6hq2g6G4RjGdi32qwukgE8Pi96iyilYBAaPvT+r4xTRS/5ggAd4pUVQGALP4+serWa2oiQ+GQj+w0zFPXFSFelPuLcjFhudWaDfhJoIeEMn6eo7vY/xFJTGieBe0PxnCicWjXakWlpaA2WRayAfzPH0zxicjIdSUAdqNtXknqp0ZWmN6bpSm7bCUCT+9kx3e7HzmyGr32bhsXkGH1pfg2jN3AsUSt52OL+7KSVDY17q5wP2ANF3jK/4pkaH1kQ5R4RH+fdTiPim2c06aW7r5EDrgDeLnEnll62++24y1/q3ntO6+Y5QCCH2JYD4TgHGd8cPi5rep1Pfwhy/12eiLLqaidN5tGEIFvIWGzXdoR/y94UNy5hW8VUeSyaIBhUBrQyi4ONEKQectlLSdkQ0dooTYnWWsiEQAGb0+hE2EoWrzTkYZxxDJFUZGwX1rhA7bOVMqjHd5wDBEz1l1Gha4+AN9aFwRkGEWDnG2GvBTX/252XUNpPJtbDKhRM7wFYZUQ17F/8+6lV+mPuapJx2Og90PX0AmbpXjETCPC7Q5fyzkqCTkR53YCs82cr5Xv9qJ8IWHEF6PZBEha83NQkZzWqpsnZlHltbK/6wevXYfC2sDDy39hkuyW2VCASGUSAb2kB5OfW9mAi1k4qFrLtpBg/w7Jthj5J5nw45XZArAMSpzyuAXECKmU/eNIL/ir6uUBsNC33Nd4HtpTU5tZ04YjWbJmqbhNszxOTcmqWENyWF8+RzxdaPHnBjr0oStSW2Sy2/GgoSlWBXIg6EjGxQh7I9Hfj91uBxQUD0mNvkI7seAxmkMgA6ZDHyhth+zhiYC3qMwhh9wcwN9HTjOIqc8+vA2Et7bcVve+sLyMROmh6a/k5iHc12WaEZoZAh5ZdLn8uBtRinE+XwFBFAaBHrzTHl0bC79PFV6bxkv5Fzxi1TC9DBV9G0JpF5PN+WScGn8clyGVQbouuAwEIFAa9dgGyEDIaM4FD51K4DfpwvTlkqhqgmI/x2as5EH373BcXeoIeL5HKmTqE3Hi1J338Sg5divsbg7hQWIkMg6r5CkB1xM0tf05l3hECybx5/0J93oOEV1GUxHoAI0n2Qng2RW9uqfbbiFu4gtPjkWcYMsqM07cKpVSnvLjTm/qpPrMNz2bdvcwLRPsjkq/WHsFaEURHs82pZWAZx55bGpe+3QyKe49TLBtxXsul5vpaPLpV6fSw/dxCW88khGsXUFCAoL/g+UBU7HhyfGKuYFpLomdWsV2NQtfhxwrJPTZ1ubwhKkJrvRwfbESiuM5Fzn3h8PzYwIuS1mRKuIOG0/Kp/MMKPzRXR2axdaBijD8zzQIS8sxhZuHqBpADpmhssUqzSmec3NDkoQYtmbN5l8h5/QgPsxH9lS/OR37MfwcX4Lv02Rp28UferdR4rY0LXV5rXPMj0Cq7zxXnfdpXYbvKgJz0vnZxp5Qiuxqp5C21QRRbB+xoQVAH06IRkmMoZy60E/moThd2Vuq/PhjLeKa0E3x2GdsSQDwQl4ZvCluzPRoWV8tcKxD4S82PXmJ/6ZdVOroxgAS7nPV15sm4P/Th7cTUdhkCGrc7LCPwUKKtHep5jgq59R7HJctcOn/dvrXat1p6KKgzpaP6tYytmOKKkTUugq7HJHjkCRD0R5o48I2fr83lt0VYVfsIkFy0ffdno+H33SKF2q4AQ98cuFnNnRuBkOBD/j+Ez+dyZj6nu9Y7PvuMJL+syXNV6It/S3DkFAhzCsUP82/318zZbQX43eJPubCmLMf7o98CE+KkRj43kBvKNBAjmOGosTWtxmUBtbNhs3CDKaWf4ki/oul6cWGPvmT17+EnVYCfvraWOd4Op+sXhEtgCnX8TJrYg+Zy0Re/NbPlVgADzDRLa+UfDP8R4ai/xCWeL3h4Qn97II0JsfKtgMMSCrxWCQ0hMztpugMshoTz3gFxOyjZNLDCePLtVAjbiEuewnWokOQwLCf+E67YA6oX+FEMyBJj+B2WbHb2jNaXSlD4ZXRxDsCq/+8Z2XTa18zCjw7L5MrqvguBasEMtnSVfasYIzoCNWfBqhmvfinsigjtAkQIJTV8Dr2aU2+xj9yua2xnpHi+T1zWoejYxjODImDvxiAQXaomaWgbhQMBjCuXv6bHa3e/3JWjsrzOExPcGVAyfoSGCF2OdorkMqPM0+xuPU5NLnB17YkDDIlN0+slpg8R3DD9DHrcGSev2GsPSOCltETPedZ02Nq0DoEb6MahgozwRGjhgNZ0ST6nsmxB0q8AknxlA1dbjKJLmtSeZt/tKhkr8tOVHLlMfA4qklHyM9nKFF4v7PHPLej0At8gRmJrjVGnoCmcZlXVeih8GI2yq+UJtGDPH0vNkUG7RRsyeC9c1RAtnvoFP3W/dQWNQOc7QFUe35fHUy+YJWfFCejL8/QjCcbbF824f3av6+s4DTLPFF9SYm5ekO/lxNGmj1wGSy0ZQfO610dD2DiAdoxkltG6IRXGCSLs7j2jyASPu9c0J+w4+1Y7bLU4d5nnSYFkINpNsJJ9Ys9lXsrAZfX49KZdRf/1SNLED63H8/IWc1taG5FjI3ktCBigxkHEl0SCuzSDDyCHk3zRmTC4gWFvjnnxqvq906bOy5zIJ0QrMVhupevXvKag9c/UsOJL9K3zJOgUVRk+ZSZrXcbqqsBE+4WMGkMdnPCQcKHedx77cCPazT19tmZhqT8YU8ZD0b6DvuMEk30py8IPwRsy87/ocpFwU6ODWlCPKcr4oQDJ8HmCkiSUuez/PEtdjULUQodKXY4kw935uEh7UTtr33ShpVy1TKioEidJDNGtdXksebuARUReImYdEhejEXeWV4XY9WIkSHUqdRmMUI+NEvHJhk3S5qDmyIHMmwXaBcMyM8BL699v4Pcvcf7VGR9zuDatKJ0ILUYlvqYkQbTFkvgwQer/P722EewzvkQfSWoO4JhUk62CSP9Iteyw5+edw70NFQ8e0fdQQllFRDE2Wu9Zy35CID6mBGNmeQSVFomVXRRr3hqxJl/q/vUvbZ2xX1SDJ3Nfwq8Gz+XFLyKMpriRrafMIzzKStBJAN245yYAhX4XurtUccmTmh2Yjeqn44GUfU8nWm+kStmbMp1SM6ODyCOoAapCCGfR1XypNfzSlhZPl9XBhpfz2Ch1aD0Ae/apY7J58qWgc3wltnVAOlFkTtLV+lZf2yjNW5n8wxsS+pK4TZ36SAf9WtoQAsK2KPmwdcNu5/AJhkPWOMejKJQdJe+UmLln8g5QUz1/gblDS3xXNGubjAWlojHCMkzxojzfS7zOM6oPWeqfZyu8bLIpRhm7xurNYIlza1rVrAjKARNQYvcqeZSk8G43vXb828KsK8bc6SYdU7nymjaaDPeId4iIeQfH4fxjsNAVyzo1d9tO5JaydDd6HA5V6uRl9Vwujl2weTuNcyLd5xECe89fWa3V6Qnqvi92LJkBNR0naltp0qnljOLQXLtNjwuusIOKxEDV+Ka5l3MaJeXXDjRM6KR3e+CEo2si/iuO5AZ3i+HjpkzYok6Qdqw31j4tmZ3zP2eeJJwWdJbZKGiahZBbLELtVp8w4LwQmM2S3HmvtzJB5fPrV21w37+ZPi78DKHB8/uSoGYgX/gSP0EppCkiigPGWPqKWdqbv7dRYwxj0C+jiF0jpr9l/Wfk6oas+HcXple/j7DI0AcUlfQlYJmrLYUTd5NX1izsxvdD3Anppw1a2J5XLZjtHEnNXr3NbMi/Yc0SiR+yEJjyEN47kePdMP2nyNeLrNDVRja+0H9HweOSwzNQ9f0sgYXUWx/HTzlXyXe0fybAqYdd71+RRyKlZobUPatGytp1Fvx1QIJxqDyJQziEldS723LSrIa3fNzA2oe6zQ9qB3RoXwFQgDdiFkMPyD4hd1xs6QGkKXCaoTfLEY+L3q+gwcf2aODCz9nlXZoiazV/rHK3IiQarYekI2cQc+yGoDUe2GdeSFsZf/aO4DP7+wE9CClA0TbvaMXlH8w7yedQWRcVE9hyP1j31ToRtjnxIn09MxOlpAPJ9mVlncIT1IdjaeMblFDQRg9YotPL6grYoI70TTCNOxG1PbYLUCrjc93bymWe7QzsRBWszd7DEuVApzbbxwvzSsbxXdH2QJj8iZ83y13G5iyyj1jvrpXyqxK4Ne2w778dsN3Blvtv17tx174LTV000X+BEjxW+4MFPbdgyq7v5shhX7xEdDbnOODSf4Y+xrcb9MzSDyX9QSinzhCn07Cr+0IV4k3bqZoa9v7umYKhakfHLlcsyNOPmsoDHy16Z1yB1A0to0sugOgUSxcGGqbmELUJvRqhLNyTUIn/Ty+Wr22sNDC4z+7V7P0pwpgp2650C8/kmH3GZYgWovjmACY13R+ELuAuTKbvAYvuEqW1op+pfPKw+3cCZ3n97FEL4bM6AQmfXOqz19125fEhwPyN/idOfglh6vUFWoZI3/mLFDyli27Mg+uC5yAYG7dEwZCB3gew5n9Ue6lOnrRQJ3DTqjFCHNgTY22gqw5AFnKSArSoWtKycBTNV1bii3q9+VbhvJ/SE96JC41QH7SSgZ3zxj5BClkWvjl+JWaIpJUkvRDuyyuxgfkOFAJBXxNjhKJprVwuBJWD7qDRbTXHeBe98wJzkr9px3tU0cXuP8Ixik31r9e6eQimKFxlvjshjhAeGtOX4pqC2tUxn7XgXpbsxZg5es7CzEpydoK30DBDP39YDf41GMfKVLxe8H0g6fqI83hEfFSYhezin2Q6T8rWbtjrPtEGLjyYYGubTO5beB/PgiWZ/sqD8L9Wpbzschx11aorQN/nKr5ulOeA65b41Tem+tVSVs8rK3MtuNPYN4UdcFa7NQhNHhTVcSPXgfuRr1x2TRfxMJps2S+UAnxohmu2pFwG1G6Fd6MrtLwUw2JNdGr1FlD6XfIBhK4dbIocp529qDYJ6wNZtUk84pOk38QWLsFhhbsTD6QhiRBwHmeAHoy3C9i9kp7EarICJ8e/Hak/7+0Yxrxw+D6s0cvTFGH/uPEalJpUHBPBgsSS9aRIJLT3Ymcc7Ekm6iYpmFQJleAXb0Ca8VDh+QxZ2taB7aYpDE00W4kAWp52+i9s5X071Pmymq88kHwbBNBmNn79R0i9v59HSY7jo5SXYEB5sBtVwkqG93wwafePypRnGRSZ5WYFfFre0hSmH/5PGKZLKNjUoe3/nyw2ecZNkQwVEiht+C0+vdVjPiiebPlirhCZ5mohqx11sIyarh8+AGCkb+ZGiRuC4sXqAOTb5CSFCC2XjaUP4Ai9WVUpiLJDZaaZMGc51bIHfGqH7GFE7U4ftvMijTtOq2IvC2QK7Rdj//Yw4KBwDdT5EGXmW9YZeRyDumcBZurYU3TYHhk2DQq6rbE/mgaPvh1JhlFWhIVqPY1PvLToobchLB8OCZucJGZqAqM7W/UXKD2VSg5Bn+Xf//Siq/+43kTwzfAM5CLayGqcVcXTbb5MfyPjixS+/02YRD/yvwj2qTe1L1i+liy3FfuhrBS+7LQSeRAXVIftDYeB/ymDdXdoV0SHJQd8bN0/8/iHkNxrKuVaX2dI5BQNfTo0fhvkqTMpNQUWqurHQF0FiXK880sUlSocFj8PoBYa9ZWyIxki9yfyRwCs9I9u+EvAKivDoYfB1l2CgvSPL+6bp9qWdLRPl3hqzaVPkOOcAdOO3kcPZQWwd/X7pMvhpHAR9/DHQR5t+nOwlaTD1wiCjdl3V9wcmCjmsl+5dVWJ5bgjignQBVPfACmRnP1wOKk5Ov4UJBREtgYY4jKCvCOeVgOZBnbi3X70B4Dp3R4NaB8kpeXjO7DD4fCypatXGeExhIkTBcr+Y4GVTM2AQXtAQWKwyy7CrXA4ISwGN7x6P0i/hQsnbiasdVPf5H9/UMg5HWjgGVFaF4H+SEKk4OcDjyRjEbdmjX2YE97R5qde2W3RHx6tFvz7iiN53MpcL1BICNgrnbpoqAe8jxMpgVVUt2zBy7xTdYcpFW4ATttXSxPkJZflrFydHaJsnKwI6LYmpsXGCBn1mlKWsVfJS+zEQJ1xx+GKXyVKvJB/owyk04h6yJ1X2XJ/XFvxk9JkZ2CTre7E+xm1kx/qx0CEMSuG+jnVFWXPByW9EuejSjgujPuxgPZJ9rEbq4jkkgtkN9NdRZkRIwR4dOaup7Q6G06j/xlX34ItKYIfwgOzZopTIbKT6ExsM87qPaOJG6aj8VIyLfARUbdhR5lXkUbHs+dA/m3JT3tnf2vALx9ZKgTW0zg+xwjGpUbZrVopMd/hig2qAUWsb53iXJEP0SP0WYVS59v5ljZ6QJKzy7SdJb5bwLURf+Vufwlix1aWyb0OdSyUeGCMlhXqKsnqCsI1/Tc0f1UFr6IQN2z6zpRm0VVTbIZB6ohgn/4eqq1hwllmir4TLEie4B9jh7gR7+kvP92/uNjMh0FRXnXNK2puGv5LcrGgT3mHTsxLN/Dy3PySSy6JaoEJ0byv44BN3hY7roDDDWhldXxT+ws7du/ISmwboAizXCc/F09YzE8drcsnENUjGcfCfvncrGVtc9asy5AHaf4ZyQ1daYmhF5qh3L1JtzEUhsvWrS384XDIuVNANgnZx+6djqvAQSoKghb5wCWljf92xYCsp/PeJnvJEeZAYEOGHkcS/Usg1oL7VQzcQDE/INsjndbOnwT7JENKlua8MDKMbTE0YxNZXMtRQ119Oh8Vnn71e1VaKtXP42xCYX4QnIZKqYjQlm/nymnudh5O0YYg+G1oE+fbg1zsrDnZtscxB8ICY9xBxqV/fygh7v+hdEWtDtZ4/jaVE6QOcFosjXrAoFZGYeg9UI1YdLYvvO2n4lYwiLUVOJ4JYXpPoY+xIa4fyYm3Gvj8GKpJYBkBdgFkQdlM+zrstbZm+Zke/WJTbS5O8oJ5yUVewy5PA+7OjTP+bqsOgFSlXrAdKKVPPBcom1D3OUXTMtb38POI1nh7WfWYCdlQadOL7px2Bchcgz4xMVmVb2gMUrGGUwEoXPsVhOeVGf+kAj6i6XHfQ4RNrRcg7KzyBrIL+tQ7ZnveHcfdoIkZ9TGAlrlxwXdwGO1ke4pOxT7NYbEVZpoH4lfQYD/T+/B7nznLzWuC/XD+LwEdpqOW9wI/OW408Wq2sP7zfZ78in9NiQ8am2RKN7qkUw14Izqr1VkjNhsz5zmKsi1dQyXXE5Q+VIfIgmOvfPm5aGy9+NGbwbBY6XZ7NttxJjqmLwqP7Gn051hvd/A5keTbFMdGk79mfISsYAm/p5gosGnaykFpAeAhrBsv1zhlU7WdnVHZMC3N3jPNTgeOH+y+THqIPelcM+TINAj4h05c9qkllJBCOddSBl6oW8B+f4JqKoue9WbMsY+6tPzEDxfXA1K0/cY69GkBQI0mWSY39qnz2nLTviPTfgBqV7TB90AfjPJtzT0FFYodnpBhUn4cGNhGc1Q9KNBMbORzK+gkfv6GFq+GNeRbJNQY2VsVwGk/SDGYchND64BtExSRZLTJQug2WPMK4t9CaTEC7Ufa+NcsCIik0asgEbcBaZNLcSqecaMsbtlgJuwoUoU2R/e6hOKxjPET29mMKl17HbdgHzBVuyRCk6kUtdUb46gUK8aSa6jnmrrOSTeeJfm1QpLDFIkBFvHhX/QiT91ZOkHl+UBBDTPxZDijPYXuU/9pZf5D7Yo+geN3D88wwKY++8yD0LyDdKPbLn3qiK+yo/hhFcYbUkZ9e7tW0D7vU3V9TyG+5eiXEbyTqoK1CW2nSpX6uTa/DKB4n4JlzQPTJuS40+G7SjS0fxcO2OjXGhxK+ZUuV2+X1bQwSeCj+N5dAQx9spPAG3vX9N+JG+cFR2SMrDUAiHmjsyZwp9pOoPKc76wgqArQ2jG4UtvWj0MveYEZ+ua6Zo7GFQszVQq8x+u9djMpsBqh6wvPfyKmqm5XuA3ABzbb01BUkjE0+RvTcjXfv8tkLbhT+YoKiwQRkzCrMExYmrqGZiS3X8MfBhN7Y7rLKqUlf+YsFDd8OPSQxSZ9404bFOmDosfBjMMHFv9bTmt4bWNRTaqFFcBS6lJpGlj6CxMJKYfMFrkuz0uKYWNPUbh9iq6uLicadKvciYV0l5StbK+LtVbE5ZEtSAqx9oTqmLBR7sbbolo7xfqmsPI6OUixTm+h6mP1YWZKxDrBIs198S7u7MytN4Yy4hbybOXau3wKsQhSBKj24z1aZR9d4dp3RZ15mMNASGtt+8MjxTqz4UYekz3M3aJs+MdaHCJdE+0VEVFldvWLtzsEHJi5P/DEPrVJgIs1t2vD28HVa35HXK64w5b5wf9bNH4kQ5BTlIDL5IBkClEiS3nnzdbnkQlEghbr4GF2jGH1HRPyRKZ05M8B5oMagA09t1dvR4ku+frNGF3Swxaw7zbAvf5e0zYGnlG1iSpooiK+j58TqCjuehmZOyj84cen78ky+KDR8fXx665nQ0d7L0TVKD4G/a1FrhB/K5/p+MRD45Kf1USlYpKAoCmfw3w2WU6d+Sh7R+PY5hRuDVNeqAx+nuCoWp1xozc9c1iEdWFb70iyweWT59cxi+cdgZcMi1D8GEGNih1IWCOnZaimCtmB+QQh+5cvF9dGBC2Ov8KF5xs0Jn0PbIvnrH4s+nkeLX+nLarzP+PQYYN6Y7ZjvEO5aLK/LzGNW1usS1bpZ1kAe42TM0poY4AL5tt8GdrBjkivoH/jkSVMIzU2Ray30yDaSJlHLfPdIZiz4nnf4CwinixJHhW1vJ//UNF+KA31ED3cMqYSl94dgbs/Nz1DAEOoqk/J4w+CkGpzPtU7EjGR88Z7SFFKtg3KJRfMeHFQlCadezAItzBKP1sIF6sn8N9ZnrpxgeuzAcQz/dikmFL6TH72gwh8t+G+0j+PkVjJHkc+qfuo++FDM5RWUe5PM5PKMHZEn4yB9eL/rX7UOA3QbzKXYn8z5BaXKe9atcoEHSER8YrshLE2e68L6YJZ/FHPmy+XMoHiRve8QKwfna1GLUMfRTWXJTI7VVy4vKDwU91oCAsCR+TOSAQ9XDLVDGfowBngSCn/ekC5+9RezgLQXeM2TLlq5T8QGqzf8PL2Et9127kjJDQUoakv18nh+7844n90rG4ssSxKfD3RNdCTyID87wKzC0HAFa9zMkrlGyzcO38e/ZQuSmSGizh6ye7Zp8IvQ6Lp1ZbFmDygoSDrGH8E2HONjxjBkiIx25QtMcO/XqP+agU1DFmbM8oTE7giXo8MW4hghxlSmrLmTzqQojeZ1J8pAhkaxJV73AGVcV5Hji07b1LxASeaucrac6VWJZIIVhQT+fakwkXFOPSnd6kyEA+UycLFNdg3IixCBPFLPhoAKWz8nJjTQn2AcW+smxJu539gsY4VC1xm6Awy+VZyZWzBoQxclN6c98nL639/MluLAsWNWy36URQRWImSxrttaOGd8loYscglztQ32DSyT7Aqb6h0VcCZcPuh6zmSBgumTAErLx5g3JHIBhNaVQJot07k+0NbdkJLfO5jeahGfXWLAVvyFRgF3eBYc/diPh+10jZGWvP4N5IL0m3EygkB8FUt9iK9qXY74MVHdGnsDAlHZkLFJ81Z82vLj96XJ/bDTHktDaKzZfrhGHVCY3c5KOkIcKvkkMsMyn4N8qJ4Lor4rFaSF1itM2RRKDDK6DljMd6OcE9uykE0nBiFeUghPWagOg+H8TZJox+lvLCnRg8bkOH3A2mWf0DojUA6q13bdD41jRpuUthrPJGfhwX+Nd9QAQDOn8O8ulJ/RYGNgjSBUy/LKvnYiJ7gs5NeCPmzTprjfe8T4c21x92hKl+NZU8ty/z7hg+8ILU1pQOVAQoGrwUizw8NDvPSGSzTC+/NjQSElhW8vI7NdzjzjUdNypDGvR2wzwqWon3sO+yqzbKStz/RS8VkETPTml2/vNU55buQlhnkekhXueXN66eXdlw7Nb+YzSrXwfeHxERMlBDN/qcHLRoReqnqB6MXyyUJgzC9IaOPWTOoKoq+ANrDw84EieUidMMK9jgqR1ZTxYsTK+zPDRc1Loc4GqPmNYY1ctMtS7k89VI0KiA/pKB7E2meoGm8c+qWjL0zjG5lKMO9EBKDNyhRU4IVyDg8QBuvtl6re99v/U57J3+/JloKG6CmjsFoG87PEtI+Pl3PDYuDbzPliOG6U6OtPWRKUu4wHsVwQw4aN8+fz7XkN8LkErmsWI36Dt5lmmko9l0F2gfN5asuIgLMCCrdGTd+gp6gkUt2yz/qciji26FnT0jDgTjLaMHBaaXvuelc8/oNm92G2YMwri5419H1UjgoercH2712hyBbnv/gE1VPslsJ/ZHgRe/ONFtK7QaZ1BBrRUdTVaY02tPaE6wd2mQEwh7gBGWEXrw2nEpCE4LENJovxXy/nT4tby1J6lL3BKxsshMbsMLvsvjf9kL97SN/bKzA34hFUBMLxeQsWxCctSuLrtJHWEXb0WcsGNHR5M4M/ykmR41Bk6KpRR9jDf2Xc+q702W67QssXsaiUTkh+vFTjHyQgpAwKuzKVpE+emRztTRz1kPAp8Tpc0Ca/9e64/iKQABw/LEhgQj4QuNhZLK5wwcWwwpy/sbz9zPqWHbFlOhlVSl9eCOm6rCg+7mBHw320XL6wh8K9ZDLoD1aqLyPc8O1vKgyYlCd8uD2T/W0ws0B49xw05n1NN79vpRufH1MKaNtsz0+1erP1jaJTD4DYyUkralBQCozU/bvSVzgqx18YxfUKsL8ZSKh29RgeQJ4Nsqyiqf96SY9BQXEOgj2LZY8/QMp2eZhQqJTlXi414FlQ6R+IuyUsHq+CssLnWyl/8+AoNVZsbbGokwGMnpUM+t0y+XcoPyn0xcPZM0eV7R/Mnlvoh1XB05MqAWCpztgP/yn29Ucu4I61rIsLDns+CIZZlhkO6ZxrI8iEsMFhlEJ6OY43adP3mdfv33SZ5Byz2vZs0HR07pNFIzlZ0ggtUvKXwqR9lo5/q8Ha8HitXZseF0V/4fOURtDXw5JymOsfPZQxwh2h0OLjGf30VyfJEbKvtwEF/Z0mfyH9XOoXi341kAn4vpC6u6zHO0fuMWbGUPdODnAt4l42DEEcwY/Q1ALU/osYS1a/IfsM9WWeNZ+qOAmYS68U/IsrypfCQ1kL0ppoMilfyLRwp2gd8xFMkW0BhVYYta2LNmKr1YwhEMwygDnpjTCv9zlW9Es3zAcHvg8g1H/TjLjvyW0dEzq5as14Cb7F/o16VZCzN+NHJz8XRKsz6TU3W7M6eVg2y1kAw09HU9FFU0q2Hcqqxm2XDJGlkCO60Z5KkvAVpJMm/xJxGqTpEbyIv9Kc/EhfgGvlr9abS9UX7a8AyKgtLNmCXz/crZmNn874rZSZyZoto1NbTUo1bC0GEq0IvSb7aj4+fZTW3+0KXDrr2BQJzuCLSAiHK5/BPy8EM7rASicvewWNwSROkb6iMZJPzgD+sKaLTz9SMIUXs14tNNnrD9hXe+gMw3wYVnEE0TfC9cY/3aF/BT2TrhdyIhMzgUiK9PTf9XfbKVi7r3ix2pM0T4ztb75T75QfR9XcjFV6bD16ohtJysZ8llE7p9AXorL4kwohwqe8knFPYy5dhPmbQoe2toLrf/oJcK1JZHAPRBqRkUgbyWu++1dvbs7P8RCfFaQDulTNOOXZs318tocfXrBcMeX8In42LRH4etcbXBtsPI41OLaUCv7BzBul+LEuR0GFltKHum1HDUzBSNzDTsBsMWj0SVrCkFDFIcJgsG9wg59TEiwSlYdCP9Oc4tlxaqqJtwJW9MBBHxNQy7C4JdZz48dDvAMHJwqIetrLt7zjxHST73MordTgabgA77+Xvf3o019D4wVGlD6Pnk1zT2RxRKaA4om6PwYuSHGzwKbvSrMN6STXxKnpbNBmWMcbxjXAqs/fp28vM5tpE6GmOrXaEv894h5U6YRiKJG16183aLX/wt8DJXXqDdXt4Y0OfgVXAiWJGrNEEuaSmpt6qAEB4k12hyNUe+MbRAk8dUenkge9uNYl4jZfznO5JzguPWnm/MG1GQJvwrJ/u8QJZFDg5mptlw/lU/MkWzpzFBmHpSmnqPRjQvmJmUIg6HBCdjWiA6DuyHkT6Bxky7GN1qlSN6q2jfwGpmWZEscEcItjL70udLX5wHev4thJKwluiRhVTaUG4YkYUC9JrUMM58BDUOODEf1yAB0gWNlf3Y3v1SGi3YpAMHsXlYUE1m58VNQsC1ayB0p9qyZLXbpsbLUGQIsBTzo3fJ9oznUrGVb3WC4OKFUU7Of5CAr8TBEBIzfxLiz4/5x+tsgff0zdYr2J48DDCP6avMtDTlcDhvb/VVO6MagccthtafeZ7BQW60GSRjiOGgDd+Vhc5iFLbgfp7BMDzTWtyLkeD3DTFV3HmbOHeh9jKTQLCWoXGgoNNSoj9OqaCjnOJUPZzSOXna8xfPAtJwXeMaX6+fr7a29DX7lLAX7qKhl/M0Wj9bRdGVp+wf7Sr2cB5FCfe66dOM+K0c48j+TXMcGkIp25dX4GiZpAHgKzLb4FHRWvPf+0TJkaYYqQ2P5a8vVdLaHhxV8w8IFq1MftcMhIYczfKJI2OugPI7YX7QWS17+c9ejlZvR2alRWigK/z7MnLA5nfWbMJ3q/tyeYJbSeMlOU72exqxRx8rkAomsqsKzYRkwsVZu/WDk6tSbtQS4+EzAi+xhnyCJa3WEGutU13njsvw79UFn/ZnKd2agsM5KPRr0Yo5dPp1cC9DALMq3rklPvvuzLRe9QWBLxzY74vyWlTtcQvgt3KHy9frqHM0su8Mat05SLKiiAUynK1uVCIek4ae5dBmOJt1Si050mzx+GRSxFvPypo02wDWDJg3EBzcd4/3dewRMl/Mvq7+ebe9J9bl8ZjLogQGq1XSjHTJ8EoNy5IXs7J9s3ZiZoRPKlFU6ZN0N/+6DXM84NTTY+xA+wP/FF70Hsl4Cknv/CI4sXPU5RRZOF0pTPIdZI+ecbTgn5h0gq/NdFhoAl1rkY7JaR/HVduAfJUFW9VEikjc9jSM70Ug6wZKXZ5ADuURKF6YU781JjhHCS59ALFXjd7EjzAe7WzO7nBU7yRmCy5nYSST4AIktsJrcghpLmmCL0jpA0Rl+EcFdWLll/jmuHEfGFpqWwxA79tYktJEIVzXko0nmeYm3l0NqCWs+T1+9JviDl93J8wCa2IUdTBNXx1+iBbiQw/OzCBAYW9EiNHTPkOLrtbyGvJNxPFCidEFO0DYRKr7LXoteuoR/lb5wIuEVsBxy+5f4mwpeQahEFPBmyJjIncYPiv7l/qFP96+Jh8yJTPJkA0zKIih+DtP3SpC9iSUpgzWyjPNNHUlOarrKaVj6o4/PwBpXtk1GwTmue0ML2FzpqOHiUBAYzL9B/pZos7i8OTXmY4Y4faFzr101HUeR0IAIKnlQKMfK3cnHGt5DRdYXJutffZC4ybJKtMW1QtkBVygfb9I4poI6Db0XB0C9/5GtPhQyOn3eUj/fv6CX7omC1erkoPP30C4zBB2+zjAmHIZkPYeWjA5MZcFUW6Jy7qgjh/nG8EqA/mpZp5QLVRTivYLQRtH/TnPb9IhPn+juQRP6nVYBpJWLSEbl1+PfULxb/gUtCRlGqc2V+/PdPVP+QLy5to1xpcvILkS9HgxLOCsHfHzFE17TCSqs89vQgFjZGqitDxHxk2NBymRQH2x+xxx781hKROPfB/b8hvd6vFQeVzUfWb7fFGUI15coX+h6sBSazzR+ukqW22533166Jcs8k511mdL5F5V+yT4YFvc2sIGsTZFm6ot/0rPd36XThJfR+Ip8Kh55E4IA1UdV2oz6a7ViD9VQCFYpSTddUJLjD7F/NEmWXqgsSi/AAy+GUADrM2IW//jYMeldn2Ux+RhW67bEXeiYeTzVG8VALk2EfTdapFWSE/LNNi8bz6kVFccojaZs1hJG/kAGaSe1CAwBlxPCDhQbLom2VaO1LRh7PXj4OrvQ05mQSzvNnecw/XSikT7hv53CwL2ysfzTDpN5d/Hy9SqVBOIk1F6cop76ymc9wZ9/103o04Qt5UTxgTIV4LKqYTWbo/UH3xyJ/ODDok0xgI6Dv8UREHUW6A2bN2+P1gdoEJ8BTWGRpHhQsi90crglqvJxkuutTBCYWf6T2+WrZJBh4zNxtqp+JDScLEGQbF3AWVWaPzuh+itw5H2l8zshkpaF0LOSnOfMvpN8PLAUxQF0cJqOy9WD4nuXWOFa0A+4vAlYt1xWgjmw7/TXgRXtLv5etnru0ZHjQpAE4iTxO0S7F0o1VpK46IfC6TAcmrBFFke8DauK8vCB/yJ4GM5hbTjIwtdbgXkv4/N2DGCIZ4lr4hy2Cv5TeVUt/t4U4rFw8v9T4A67JS2gTJd3l6ZT1ZMx/vaUQ+YjWccHGQvBv863AbxjHAlEgxH8fT0AeVsRCAYrltAZp3lk5sdHRJ0R9Oq09HWmoPME9s9JZdHW65I+RhbYvvj6kSvRrmtt5MB0xPhM5N5uH3p6bXV1GwckXPZfOL5AkjDUtO9fEoFg3iHEtXXZse+ckdloEAJi0IHtJVFzHEkNObIiWW97kEzHpoFhFlAUKK0zxI5oaxaAUV48Heog9Akf3NMadvnYERSvNV6s60fQNxyZKSYIW5sAwTD3S5VjlJyn76zUCznXBpg4l/wsgvSo/11oqE9xyccUe0jN8Hf7ns3/zBXwtkSpc43V/fU7Sq5W6fZ6TkF5gI72x9twaiin5up5shYLo+rIb1boQa6XD6Vn0CDAZ4xPUZ9LaVbOU32iRiJcKv2xt1onibrWYC3dXvpf+Pq/c/KADzOWLizPRz6I6q/h859dZPPqKQS/UMfZ8f/eUhW376W8b/6maH64pmEZdAqnDr19WKAfUu7AbTYbLrpUSQKhPHfr7JnFaFYZpPjYv1GQeuJHvKcFOiniNPEUikcVymd7jFKDP8UaB02btEL7ZyvzSbTax5OVYu58m3g2IskWefInwoWFL+Zggqbw/EIab0ZYY40/1ltThcZPcF3XNTLUylhIv+6P9PXxVsWJu5pYNtXjGfSJEahKLMQR2f0PzxsiF2POgCcmdnV66Loz7YOU5Jx7G9iJUdVGVCFWW9twty9WEjz8hL1+wx7c5wohyIoNCA3f8BaVc80Ee3Wcqnvz63eLeSIPbQMuqgumWaZVBP4nIXJ9PZfzpPvrimOAMJMp/8smrHa5XudkydVSowVIELL105cmqO1WpvvEiIN3liu1plIzqRfIT4WNmzHs8zt51ipbbrXs2K0Nsssxw0VS17DeGbQfWdCSgF1+Hgl5+r7jiSH0WHpGdCo8n3JIr51ppYJ+NAiAQc0ocEExHqxJYeDYBW1LHyuH7cxKtPSwI/nNCX5WjfJt0gbjFEgQ/33dT3u0+dvqOiT/qlAF7gBfOZUB8bSneZnDTFygdabluZ4/lzjenrr8vFonCcI0yErniJlJP5fdrj9YhZAyRrGVuRbXsu+xLxMGyUFxPCFXs5T017pRGVHB7xKMjWNYWEBP3Ge3UJCndskhW/jvCSz3qi04KqDGrkgvtFhF/CLQUJwAAyoQU+tBEMJpqY7KBUvVFjtzKSiX1+mUQod/Udjdw19+d6myHv9J5+K3XbghnkfMs1eL/kvJboNtEViyevhulpNsLiI5+6EfZk3znoulzoyenmKzZZ63eMBkBD0vuYAa0mJzS9UvFBRXRjehZI4NXEqRjVhMmuV1fUQ1uEASACeSxhlrT7Jma7MIS2UFy1uZ2fsllCR74BxYs9MELPGNE52x7stlwNaYP8V8RjczfPIuRFXjwFHl9jo6LTKFp9MsDB/XrCG0pNbUnVkVVk1rxuiwZSixLeWFKuTBl6cn7Rr0h8XC35gqRlm8rIB5fBEgc/Y0Ymsph7ON+k4fCPEbYURiWijd4vKpRsMzrEZ4gvl6exWsUAB26KTKM4SXD7DlJ3VAT80zCGImmHbbmLURjXFR2knVMHOmXzdeQa7YOFrbSTFn25ztQKgPyG3qyLKu4Q+uXprX1gAff0EM4jt/QeFCX5RppYvIdRGkViX4T5atBSLYSTTWRzeAHMjj272XreOyYS881+xhs3+7awiVw5vBruNjnWvT22/v0EMi6eHrmjWOFG2iw3WOOE/YrJ3AwRcw/gsXosWc3tVM7iMjttT2W7zm4Sn0JvzQSkA/1NbDF+nGRqUUnzwJ63mJ6/3QXnIULWMfGAhsQakA5rAhxn8KSTxwbIWkYm3HU+bM60/ns/AHnhjLXgqNQnbzq/ni6wkjeh+Q3LE/r8q+VR+XpCqJjvyCgixIqejmHxqFPY239rjFpNCGN+TFKZgIt7j/lyQg0II/q1LyCzyXgC8TTTlHdnF5Mk1YpPvDl01fBJijxDzn6irsjKhT6R39+kVobc6oZKF14ZSjO+x1nCccY0pl8KsT3nIgw553QOGl+IYsNfwfmCakPyy6dc+ZvQKmHLFUFAOZFMA5hlr3pmXFygPNHMsa41yaR7JBSMYkXAkIjYSjzcy3ePV3meEGBrldkNY90EmJwMLTMVL00whVJmz6LF70r29NxXII21QZzDNKb4WoJ6SlmUnUqfaspobEacWwylPjFIrwP5r851/7SC8nHVT8By40dn/Sd91O7vuNy36N9phT8P0FwdM7yB6sNOjE12TbGJo7IZODnWoinX1LZijvP7zBnz83aDU7TeNejF8hY/rJk1KLQnVomzVi13t9BS9jefOoWr5GqMtGjZYHgzIBTWEQG2wP5nuW7wxJmD6kplXVEgoOUBakDmFkjerNvAAj1wGLMkupd+FN7TyI5Udn+Slalj5aquDy/ajvpGT+2gViL8W1h8V+3wPyDrsePC/Kyve8M2aC6SxCgMlZtlgURlIo/DvaMUpp86eSrqfUjXRL6R5fxM694UFhmHiH8wosT1cpE6+YT5/2UPqQrRuP+miQtqKRWMISUox5HnAlegNBKdMkV9Al8qslZYkitZwI0zbKSXcM2H9VQLex8jSZrgFPUZyeCAzmOc29USid4ZmB2E0F8/xTvdjMdF4D70nOjGg9ko8ggev7x0YszX94g5lOWNRoeoYjRtjOrQ6Mci0PA2FSkulS1lkOwC7pXONBlZITYz3/ksnmBjHPfMjSyH1pS0wGVbgUqfZ25/tJ59w50dYOzSik8I8AL1SZtFnuXas/UIfBoft7OBuXvUWTmAO8sPn3HY8T6v/Osc8nIXgBAskmfhuVgzDOUip7UMoqr8uTSAy1H/QoLgA2/DySiz/V8mgJg2CkuQAR+dvG67KlALoE2aWOManjssfgnJoX9ElcScUYCsujpflBVPfGylYKLKjfGq/vyhrIOrf2BD3HghB7u6kauv6n6a1lmDt1BTqZUbSZOBY+b6H2dFD/BMy4cxYG6WlEQZPkkj7MzJZlkKMpUtJV8XfiDvQRYwSEUIcjPt+w87Iwsspw5fic8mnpJ2mZ7maVDUOcYsDlQERZn9fxsMN+cD6X7qNfttssuo4dkpBzSQ3WtAfBW8jwKd05AUSihE+TqeKHTyEjRJpYod+M8j6Uy/I3FJ7AFJ0lUjjOFXb18NnVGxCI8vKFM/rByEWo1NPjmjje9u/o1LlDyJ1OBic3VkATT8rngBDLZzDoeJrsa4QdyVJ0Zd5BlhpEaQz9R+6Uz73yAwfr2DNIVjtxMlKiRr2EXAfAkadzKuaxi80f0ofVc7L//jtg8ot30+gZTuMxc/oY7rMKLJI/Z6WIEvDYbgg5OzznwPjI9guxqW5ynq/0wco9LvsqqAENMj97VXo5Wq2lR1OB98endNdLlIEE8xmBPkj66Iev3aR42/BJlkGu7rVg3fc2z+5kQw3HgX/q98QypkvviWUekbZD2uFJfKF5/TAuDq+omnEUY7xXl9e0mqG5Z3ntR+2fqlTzd7r+mMmV6X5vj2R2mm9YblNDfSGszUlyUmWJtQMzAwdlzLxcU86ExFqOK1+daKEo87M7v1lj15Ydzr/MfaaxHitqOdEO5j48fRhvvp/7RmKjhqyFswZOl5yERXjHTVsEf0FeSPDfZ8so9AsbMuL8Z0HykEe974BLz4PxUpMCpFeK7U3yjRK83KOuk0qJnhVOSESjOU39Z1YJrE1NLqS/kfJBZRuDmQnI5s+7SF8Kav2FKLzQ/4sx1FKs6P2bBToNVAxfQo9AwdJworuf3hPRN402Lif12+ju4zlGNX2s+21XSlWT5TlmudgR8B5gFL06DTyJ/6uQ2a3FX6rJGeDyDehUSF6vTGO2JM456Z+oiul+1q2XTY5vjPymqcne4EAr4L+MIHEU2DiF91A6SjXsLctC7/EGwL/dTqcBGVcI8XgghSZU5hU2rJQyUFVUyVp9DtHRuJpqtszT4/nDqgYo/dOiwOWu8ktHKD62XEaezc0FIE8TGEY99al+1lP54wiZusCiAqz+ZQsXr6/LOrnLh26mVH6+g6L3PYpyYd5wizbaiP9hM9rYe0i01ImhbAm789s8UWvIUmOHrW3q2l8Nvl+6CtTD1XSDg4NcoQ/ZVuCjOfPc2mdx0ivQ1VMp/B8VQtsYn6VKd+MHT+Ck5f+mRuwoWK6SsSjRcipPEtTw/G/o3ELSluMj3jgpD4wfYnT8b6PLlVgqK0iiQCv6LII6lMHQU26thjJR4PlChSd49WAi3UYuKnkyLagWyk4hRIWWvvIjh3XOWpuNHCgVlNljsUXKDJESrcuykx1URqKbsz/vGRf/nrUysn2jrBVTjHZPZTH9Hj4AkCdAiKAFE+BMDtW8pnqxAZjT1QfyGdZZmqjIX1Qh9qZcfXyNIC9ZBZ3C5ZGeJHCFhvbXT6Kdc80klm10tg8GIWrcMbIAjqfqbttyGV0eNkbNY36jqWEvmzynT08turBhKZFzIiLZtOYmtCMMaYJBy8RMjQvyfav1V32WWZFW8DJ/w91DVrHt2JLXozaz6FBnlxelDfumz+Pj9cJPRpB+3VJTimUWRMzuGE0p1IE9T9mwnftuClUD5Tahj7WTJAwH9lJeil4H2d/LLyaMqEMIs/DTtY598KktDxf2sIBnxZSXMovJPBWMQcguqWNbMUv0NEq1OyEY51YUSAwl5LBP3rYcUpV24v5FouE/2n7Zqu68kwqU15xq8YU9gF7ATXDAbwNO46qLDw14Ke94cn6MQEfq2VlVcoJVaguyG/iVFF0KNg2GtQ2YwnjlVnjJM6YMI2f57xgLJGRBjW0BdwDfM6NRPkqOEI2VwHJmemsoUt+QZDYeWSLhoTNN1VxFbRv/qLGu0nx/OiEbfm/B4CrnIryfRZHrRnsU3sR1rdWl3I6PtHP907b6q5LOyH0gsSw0cgSUSf9P1fBd3hc3aAKpUz9IIXPyQib0KR4d6KmuvyegL+Inl5w/nQ5GR1/jT1gh+u+VonknMjOAJblo5JDTvSX5w9Iry2FgzqksRTZCPZXfENq2PKxhM/AGVnH8TutkJNbP4/JXpPYy59bSg0U+0+9p3eLvBXZBJ+4DykElV31vj/+rj9a8URUC+DHi8mlT/NGSFGrthhE5h1ZJtGItiC6yxAZwla7WU5we13qbZAXrl2Sub7ay/SSg/fC9cpoOzCq5/PzCKLqcYbryUUS+cMUIc1GaLI/Wn1lFVtREr/XNvcJHvSnJKjeE4qoRjziKZrvVDap+rILCWGo4PxSgB6n7sW/a8o2M9vIZO2wuG7i9BWOiyBaYQ1Az0o6hdeko9bKEzrQX4uf7S7kj9PRohj/8KLSQJTDpne0oddpj/KGbOccTaaLtbExP4iy4jL0A4nxKbaEywIOr4zFm44YHOUbf83ueZdfdL8g8L4tvlQqd2jtLUWUiv1S+BShXj58AOeuXyz47JeAOirACgi5E9u/uXrNHPPOB5zOjtVL+ZVVJZYC0Hazptr1OtM1Fm32LfyjS9xsZKqeImSuHrh8U7Eld1DqdkXA4geLz4CKWKoIuQ47qV67mmkflTLFoG/jZ0sPKMsf6i89AeywqyGepn+BHuqs7rz53IrdIChZ57ZJAPpNUroL4B8XeOcdHMT5ddRmhTxkthTsNVi1M65NX8NJ+C4ucV7iMdgVAV6OFKjyBvLNUHNWRgoo31UHmQ8XpADiY2SNb4SUlG6B0jsBGg1JGmJNFobmutYfE3KpVEHhZcAXB7FHBkxDeEz0JYknGESap0Eh3ggUYSC/kW9mbmRR1AR1PnF2ODEjSxVBD0+0Ipuj6Ow/xLAUv/Umia5oBsmiiCqIlgeCNOFRLVqWZd8Wcqms08G4/G7s2IkjqGDjPWJWhBbY9ZP3ZImqSLZhO2GKxRJVaHxG1f31KFPXxwSRkduDTnf3tIX1K4oiRpPmULCoqfNMT0iAaJFui1JXPV6SwztWW27WctSC8o+N+51wZhNHJhpdviUnAXEV6JF9yhXN/28xEnJNl1jWxwM5+LjfRo5UKl38fXrgcHWrQaaCulwOfJ91ThHJR2O/4+g2q2j2jD0xMWEjXO865lpYU5f8c79LTBJfdy3T/cC5HQeg3tr7GZVWzLJBiQVhRxMKhGn+ijREUqztLCAx+DXIOCcT5Qwlr8b1hLDhttSoBzgkQ7P3XLg0938XWK4FtIJs0PH2DcvqnSRAqpOP6VrT1ZxL7Atg1JwcyxGuvSBnFvMxUsVNvmv5SdXukZxzgHKpy5a++58zKYbcw1Tggkjb1G0UVHbPmsaB9q3NixvsSk/qA5P/h6yDxqi9LzYMlNIAVH7Xd53rtbaS2jtriNBxxLPHc4nvZ+91lgPn6rArYQYGOJhGGcahRQnIO9+TsaBISTMC348PUfbojlpXz2woHRMXA80QlxRr/AGByx/Pxr3nfVVeHBLD0uqDSKktdc7Jo2UfqH4pU5V0ruFzOsP/3NEey9EcDG0RpWN4dWe41ded8I1xJ22xRIknK1LsyoZJKhFbt50DYK0BQv2BcvqaAQ2rL+O24ER/36UhR3Vp29QBltTqFi9E96iydUpl+MIgJkwTjTjdV+KnAj7xKlXw8c11Zf1GMEU2ZsqdhBZGCrb9sUt+DmaJj5oML6E1hxfjXm+jAlTsg4hZJF+ZIrILRe5OusQLAqxdIl8DmOEeSJeXRtUwyoZGweJ927bM+7u4mnlCFSgZz2E1mf3XCNpBh1tk4qn8uIT3PcdwdkZQ08VRsWN7Ff+UNIozj+3BMg/G01bAL4SezIrsq/Z3TFs6Na8R0BdmNW9qbr3qe7es1Wv9t1q4HzFcfb0Gw0F3DsKWK419vpI45/UyidPOaW1yykSJfVSEHP+/i83haoDedsTXRK77eQYEmIFc7llfVgfvIcfJb+/2d0hxMp/r2l9Z5XgcTicAqPx73hPTQSRYOE7b0clR3314lAGFCjI30vEPftBbKy1xBu6Km2T1Nl313lC4LdMmO7zxUrazDpQ+b5y5mYy/WTB9Mk6XlYD9Qqew228VlXdOQkget2FIGxv9oWrKN3LCn64AY1dFY8upnuyOgoxofhRhP8ueebBOJaEOwS3ssfR7fjao808apF/a/E8AXy9dfKE86FHebLiBnF8dij9XkVCjXi3z4xm6klEtzTsJcJQsjn2dSv5aX1UHC9KzY95jXUWcI2xpAj5Je8Vf+d0/3bMZD0gH5xgfme4mWERlHgYO/+zK0pbU6Udhkft1FwFIjYEwlJ9SQGc/HvhPw5kq/IcAdEBuxThEVr+sm74tuR21E5g/71ZIez8Vp2jET+Olu2QxTjOh+2R4Zc698p5kbrTWlWQxnKHG6kiRAg0cBhf/RGzBCQ9hKwTLu+yPgtfzJhneuL86wqLbXqLD+Us/BfqGV/8cUxAIrhEskvKAsWAX45gOP39c/hEWXv0wMJBnhXuJlJyNfsT5d8jsT6EmYuSfpR9VSYGwAVOZDU9VigO0Wd6uqfT7+PszcpDAxuZCFgJrfs0pLKYIvZl7WWqzOAKiUbXixxOFg3E2B5IVjL/pLlckU7QTqtB/UBqE+pJxQv4f7kyq8Id+v44mB2saaPkuiUFhBVIZ5i1kvp86kINR1ucYHOIkC9keq/7mvkwhE9f3vsSvIMRQ80LWn7hXdqTBmYHZuB2BITLC2X8Xc6nm7Bxa9uXXV8faWtEwVAYQLvcqiW2lFOeaqX7KC6DSaPINst40b5NTr5wa8x60XsI/NuTjHVpWzlLbOsCZTLTtiwivLviGfePV9InWKBD7Fqac7hqRBoF2iHaAxSiun7OVhrdTAwff8gkHsK8PAIfLi8rZ54sRdYqZD7U3ZBaCuMsr1GJeX4+0U8FjUyBTwiaRZ7Slhcua4TDxaEmHfUfVH7BdXMKB/L8iSxt3osPZnNTvBQH7m/aM1Dn4snpuuF7DR7wCA91PXylVufcZfoN4JPazavX+98QE0h7jtDcQiu+TlzBRTtJvO0BW5RwJVYLQKQp7c74ILI4Qf4UFMWZpQ+oF/KmbWTML7krN6oA33doajNh39/U7V/RAn3gQ7DVpfz6wgjwNyY0skTHOMjvtFis+ETU4D57MWfmvGZUdN9JOdlu4Aw1Do95txO7YXpnOABzg74V1kJqfAYLsUyU/TnnyY2G8+kK1AxlY0g3pS6HHBWrbQff0DPgBoSXoo1GryrujTLqkfLxmYqf+WHKBaPVTD+1JTSoZN9rkDeP3b+w0Zz2thlYEUA1cdJU3HXDsrrhIQwjmATe1ZoEWpT0AM3mCyh9+WryWhYoS+fLKh7BuiEONmV5F/s2dmkaOp+8BfXWuvw0wuGH2uF4SLtG3i3+l896PiirmHNJ316/hiygTOHuTSeSR3WNglqdQK7T3UQdvKe9gSXN0FRlL5Ths9W64SWq+qA96R9dBgZjta2oyUErKYDzVRIZ+O0gz/es+ImjDzhU7LBwXs1+Iq0UhytwAg+nHYBePLnZ9FcIMHwesdqFzLDs219UqL1XNq1gTD+TjAMUzFCL9iB0z/Ox4fxl9I5SmiRxQYXUrGspKEgGTLQ2Vjjif29OtFyuk+rs5TcZI8Y0Ya/vsyGvuarY7MhJ5iHNnjO9/VrC3KBPy26vl9KGpkiM7PRdVMVvwADqC8d9eBlw33khMuO4Ewt1MRSJIG/usquoMDeTL82wyEt8Dppet9QCqwOJQzgm3+uuEQFh6qCcCIIkj2lGDjDwRuUFGKnqXxpzfRD5uXbqwxXznev4pDX6NMFLg0ydOyWSqpZdlm5Iz4muq3taWnu248yCNdeehs475xdP8nX6PHECKEhDpBYCYzR0+e/EsnsQgr7uR9Rg9+fBw3vLx/aCmkrVUEsveobPDhJCbzuhOjLr6GrAn0kIqUpNyo1QyF0bDpAmcLZmyzql2QXEV2sn8FDUU9/d6awYTduJpQOn/x6LL9o9Dg+w6lWnfp6AZI5mBAmYr5MVt31peIO5i3lsiSuhguvFeql6HWrFP+G2P7sP99V3vQku5dmfTTJXRsVACTlSb5lH279GHoGyJCwQeqRV/qsZbFRNV7AsBfFICT0xRWmmeWQZ7xOFaj1gIP6AjWSbzAEyeJF80SbVm0hjrg3lH5VwbQyAA/m7++0xOAiHz/dsW4hG+xSc6wbopgE3mMKkMT1UY3Ie6ZHx2yWE6JuxsFLaCTm/siXiPDW0xEy/KPNlNf6cytiH8kVaOw/s8nx76XKNhMFVYXPvJ/y40G5f11gBUxQHBVpuVMRh31vpZg/+QpaEhGeZSqc8/4IyYyUcyfgD9999Y8E05yIaMeo2uzfIRMp4Z6Cz6SIMz+0sKLw2DWexqYb6AihwRbkNFz5yxkdzJ2qumgZ4mck6bz/3H+Hsfn9915zxZPkCyndoJy/Bwl4pVZeIW1CxOg/hl0F6DchYZra5VB6ItSYxxbvz6h9MQAdS+2vowQ/n7Uf/X3SoeSVH6o28/jtXXy2oFFZAuw1jnr+1gR2/DDbzbOdkf80LMFziH80Vqqd7NEo49jmlWt+pDn2oL+uMC/SkoCeNEYuUBqi2o/C8+S28Qmy1xAQsu3WI1z0smezkRSA94K/7rKbfyddoYfnK4elzu8Wyah5NVN4WM1KG703sIUsKgy04BHpNaWsadIEy/wNu85XyzqDwzoMRuO73xcNUxnZEdO4xuZQVnwc99cFT2u7EoNduIvdFvHBOdQSCh92w7VglDGT6aoH0s8iv7TyS35J95PxVPPipw9rxRFwoezG+Sk54s6BLwouZGlAsbog1/n8fZotpa6Z3qsulKUDyzX8N85lCF+Am/HSz59MGgV7V6YrDKyUybUJscG9k6Pw77qDVToJHjKetcG+ZcStULoPmodhlzVBCcUkGR27Ye7mwV+nhYP8qLtiX4ZFRc/fAHptV2c1IO/ZVIsPOimW0OEVdXABAYIQwKd4QOvoFhWlYO1YhBBsjiz5+KeuGLNoRhKWkkcyvIa9Iaq9pS8EfJ1JPG/4zwfuMrQUDyHHVKIh8IMRhsoWvy3m88VrWbQzBERrh4rGciLcXjMVl3pJM9t61KGRNBqlEZAYmE4AsSWY2NCWK30I4m+o5VMf4qoHuqiw1xpDqbKfT8NSD/hG3eGk3rehfxocS/LjXw/N54vHP4N+pss3CwgBiZWyiSD+T98F4hFIVS9BLIw3kJKp2sdPavq5AZpZvyTDbY0tK6tmBHoWGPDtGhfp2trks8lPwYxB6nGciTciJF+QxbHBIrPxF0myVTUXYnf2M1oWi5Vr7Bv/0h/+iwfit//dwCxm44pA3oLmffZj0v5ELaDnm8qwpl7TWKb/YjeRLvmN/WBHcWl7Bqt/x2ySAQfNGW/KKLLwMtVI1gmf0q4b11YNDDag35QYia9jgY3TpYf3188kLbnRrKmjPXQVfhjRhq8SlasC3fPMiuQaUpTtHD2ZeCNkavrTZ5PMPnwZcJ24+yj2gTxn6abENiMWHj6ZP/kcP9+lmky2byc8gwvej3mTIJscOSj42D88Js7wEJFxcSNhv0Dl+AvlhA0R3gXa3R3uATzJzcGMh2sgsosLEh4NnPntNOtlnnpNuqf3oHbsNa35lX8oEfmxvcBPgF2H1D6rvWdtTkw7mIO9f+gcFez2KkESNKcrBA/4mvvgrbbDno3sE+d+WIM9BJSM6p70v604j/tsYvDl/B2U/NPKivwfU1e1LCmWRb9m3nF5TBJLnMR5w92drx9O3oqJiY7urroK52xZa+vUmFF61q8c8TQObQt6KxT2gVxzDtYAMvNkGjoNqGuvIe6YAlIrdK3hb+ld5YzkPxCH0CBsgRPY4vyP20EPgI+Q8OrTGcAw/rUpwQf6fI2g8ii9jG5zNCZJHWH5IPMulFAtmfbhN3vg04YHdHN5Sbs7MJeHyks8vpf6oGVxTHTcTlLwR6xQ1G2INJq9JKL5RdtmVsjdRZ9fNxaQzxfMrCONo27ACdpzQ/jCQexc6sOvwpuOpg2KfvprsviZNt7z8SEZRYijFRyjepyjdOYCBj2ILQTEg+V3PTT6ndiSmVLPVc19g5gD0thnofbw/npzt/gSc5mCfWK9UQucmjgq6SSH2R0JAiWO+7kbG5Dk+CYTSnnFILyU08z8JovlN6fpzbrw2HoUJNHNgzdQNEoKjGEkCxLvhpRc5TeivZPQClpH7TmE4Toev8GbrklF0dTgd+rB+GbHjN+jKYSxOPv9nLWhKpnFf41OD7MJ2eVWxOz+wAWJEXfRtrjl0gUOcc8vle3ICyFQ+WVKJitc7u1rUOXHNkZvWI3Y+4YDCabmDRZ/uda9rmaHZdAvzL9KXFTMsVR/2+dCek/0Vk9u2p+63ah4afX3NQSAKGCNDuF4YFvfpObj5SF9KUBrvywfMtf5aYMjCzpn+QQAgF29imTysumrJ0WiuKqyrSuc1TIx8Zn0LLbykCwIdNGBuxtVLwnPmpe+YT9S7gOp6oWaVkcrTwxHtJJtr30AY1n5VPwCcdDg73dpom1KVqc9SQa00TPh1E/oYod1r7cHBa1iiZ/TFcLTA3n8h6nG3G0N0EC9putXVQiPNghJvOTW1ls1OoOSTmbFnA2pCZHILb0wq2JuUMjbhN0WALJHhn0S0eKWCjCEVgTzbmdAsh06tGny9o2QurortDWS5V8I1pPamtigHAClg90waLUxaBZf2m8P3gU885oPJwjXzPh15inUVVr6tc3qSBgs+1BLeS7UrOEmH3G7VnDoLhwV8hsiYZIHMAO2tVGyQfHGlk4upX70j0pPfQWRt3TeM7LtVQkrYTfoZqRuS8mQTi8lhJ5lnAHKC97FSoa5aXUPoThoaVQzCu16IAr5Y5HDoNQp56CapeTfPZrZA6zn1PKAlCMyiLCp5FsYdPsD+Vz88eLj2N9A8QlFjOhylgpEPdz53OzPJycSIXgdRujrOqRkL+BS1msS1RjLX362zhH/Giq1VCIQ1Txr5ERbvWZ+LassmsaZh01VTaVybPQbPC95LXoT2en1LkL7+a4xIiMMo8rwDSQidjmoNu5moPsVClcgOdDMVNzt+xHeTbjZZA6BqCucSDa4VNl2p6ULk47tmZJKDFYv1axMnqe6m4Iy6JlrV/fIQPDgXTg1ssJ5FN4LRfQhfnkGsAzPm4Cys+4ztSs4gcw/ijD8jVXE3egGS5kAwoSCPqbJYRc1ioW5MB/PK5tTZ2F3JJy8zIh6OAf9/18F3LBNDAl6scheIaQd+L10VSz4LCyD1qqpk3MZ1J09zmPyAc0YSfFaL8LRGPKxpVqhd7FyFs49pa1Y3BfYsfKhc3gc1zobIu9RaxwZ+MCVewvviX7W+5o048+nVLV0oQcvA91STBEuUKbb9lSFsWYuazdHQeoKEU13YS4/fNBWbvNIBPzapSS2dxtDxjy/fymLBLgeHmp2t6XHLbqhO9sjXmO/kG/sZA6C/5OKx65VzSiY7sZ7L4LwWI58XL2WsHaEketJY792kM1SDpACRuiTHh8haK/sq4pam7nkF6/bd4vf7+HA35dDWxUISPgREREIWC/LjCaKGNl5sjqTCR8neO12hSIabSA4J7J7CKRsr76ZW2a7j9fYQbtXxg9gbicfVTniZFysLrgcaZ6GAcsRd5fs3hRyHcZpbFPw0ZgmkRnjpgTBqP1PJm4b7rAjdvwKIS6Q2a3XkFPDU4KCQ7yc6Np7FJL2oqUhBkXIeA+l85jQRJyH4VggVw6oCQUNF7yQRMtvIIumExLVCV20T7srAXY2W+dO5z3kAiMKBG/IkemknTBAhgwxDTP9GKPz24kLzlr8lYlXQGLDV+o2A0696wJ3Oz9wbZP3cBcEEXibd+mxSUUDTt15tga/q908+v0OJzR+k6QdYDyAHyzhmP2cLj5iBu2cqGqT5qCs32EYVuBqiETba59tkZncvAC2168HIg1bv1UgnAvuXFlSJOTrWUEBmRaHqrx8n6D2HkIhF6Zo2BNwaPt7gzxC918c2JFYSc5She0aWGHzMSpNQgvPPDTAkH9ClEhOCnMiqolySWGBdvAomS7Cn5ZW4/MEEnf3txIISMdvWSE96dR4DCbO+7/qwCiHQTafl+421PdVL2fa0dtlrGcot+Bw8fgdnAOG0g44d+gkFyCdcCgSi5vruLJPUtsjm9Wlj2GCPWA9gtlOVsVOBqWPcwN+i+rVTsrlLT81gUUjXOmJNLrNxHtwZd2e37Md5etDcETO5+0HP+VEbARzHovYHFpAzPeY2nySHlYHNdCrH2uE3CKU4UVyTzYIQWgU3VB2PVea3n8eg2YRkBfDbqTn+nltJWC5eFs8CTfbF/s9kyaU4gQZ8ncXCC7Z3Smvr4ySQCtRGpPwPb7LHBO7rTYozdX64193m4LrpDDMPZC/9fMPzZSH3H909iEaeUYoVHaqDIWBSBWjw/jHzV7H9Dqwt7n7YsjKEfS1M/q87/he7Lo2NnHXH/2pLAFej7R5YbypxmLV8SzKV/ajx+5pzatOx1HxynE2Z6CJfG26vLmEiOZTDeLPqB+8KpH5DIrshp3veBORT9ZhtJ63XuXN5NGtYNaaajVtN3YeTTA5KdiPY4XU4k/NlL851WKqgtjXJIYjfP83bqdNSM9H9jRtp9iiShoSew+E7xbsFHHDpCiQCOYIm2q+v2/gsxIEQLKw8Y/MdsFl05EModcj3PkuwMQm/Vsh4qFdcwL59u+zR/2HRExo+/u733sbegLnVoYzcgjVIXxLdpwVrI8mmW0mZLHkfVWTOf+oBkmJ6OslIn72kIJMTMXryFlO6jJ0gKbcAl7EWUB6v0qD+w6wzizuZb6+tj7hS/8AkRSfm4mGdw4MiBc2t6G7nABOm8rsnRZvxOqB8M3lOe/Czd+SWcW/5z//3vjTcvrehQO8IK+JTT3pAiaMn9Cm9X0rnzIo65zPvvzC915+gr4r3jEeDfMyMOaDmb4hPicGjhLDSM9fEE2bHNnOXYoGYBSGfzc1e7Sh3kjWd9OeT1q+71O4t4BMlxZJ5DYKDC+xdH/GiqnllhpdQuuXW6NB5SFT/j1uzoPEaHy+iWLeV9YQe6Te74GAEWBlBwGw6c7H/bCa5eT0+S5uLUwmb/n41bMYyMOuI0sN5vSx5uRjuID/3kjS9t9cTetwYydGB2GY2ZZRfws8hXMjSftEl4ISbR9Cb1A6AQi6GQhHndBqZomuni+7kjqQQtvog2QFhASmyhSxsjb0hM3feiw6MDp2pe8Wj0WIbO2la6wGfvm7bCSGj+nfeCmOENxdsj+JUr3y/GI/zHwtIy3DvdUp857aKtP16m+0prF4Gtj8zlCSg9vKOy/iEDwcb/3i6bum4yZbP9drsZxjppS0rONnbHwLOVCf+3oUaF9ndjBQm+l+6+D+zgIjGzG4xHTIW+pdvDJ+MYyUu6MtRzm89dLqzU1A0D2fXx7/C6LMLQVAckPm/tckvN8npZ/kAsQtw8b0y9w9HzCIMncQmenZLvxW57cUvatnseUk/ehGYXCsuPO6bKyRW9a7dk2GdVd/V5dKQvxvpbhLgtgDNFsYZBhXdiNrPqWumnqUoYk3EVGwcpFw/VfYy3+nXGtioPEwj9PjaNzK7biZSsLR3jqp3435Kk+Rs8BxBGo34aAfY4F2q421yR3ubGBHaLOLV/x+GLM/wGeTEhQlVGa+RTwOZGF6Lh3B826cBY2sJJeqnYNpBDSXeUn3yqz3Wk9p3XEMdrfJ4nYKyd+ivMdwRDSkichoLseHGngVer8TlvzcfIQ684t2ENF2/G0tg5nswElOGjWJ5jfdgSjm916KCpbtuEKm2U4rucuiE1B8XpmsBbndHsanNmUHqEOgNbd/o3/330wlqxu67PbxvXocEnDyORC+FEQLocS/Fjd2g84IydbbacPPtm8MTzEGZOSbP+hXvXhlXACgtaBsgkXc21sCHnjauwbt3cya55Z5F9ZnXFJzVFpZzp174RvxvBkHc+9xCIYzMXn4onTyHdpneVNxrY6LWqjAVozzVJEW6aT4zdxyO7QeMjYeQObaMhTK/hkv+jT39066g2clzwVsrKPncq4roio0wAJJxI2wx6d9e0q+hgW1EcIKXCpGrdyUg9mTKPZQWjoWNY1RHa8JKXYu1nFI8W1Mr+2NjI8qSLfGpEJbWXZDbvIoxo1GXkoyDqe0rvErNi+8iRDMnG7nQETEvLJSNYuJG943BUkPb9GNHgitMYbo1vjlb9kNiCqOvyrDrdfHioISCnxNExv4TKG/0W3JcBWujNMLJH8DYu0XEnVAEslGdXlHDQz8nHEEUhAFewl0r1rpLU+VH1Zi5gkMFuEhXWyAkXdokMUE11+nNv3t4seDQiTkQgAVmrlqpVBWK780CNSIsLfvsEChM3fCul8OM4ibwE4akw5Gg2H4j8mofaJe99V9vEJS9a9qpiMPX+IYlJIYGjZt67ui24iBDf0zZlMxQtDj82hVRYNjY2XE3HtH3OHxeRC3yGzu2m4U4Gd6jXHwGGJOLxd4On9IhnK5SLR5kyc+8g7mdnAI7Ju/n5YBX9v8yn1acbbnJ2jXmhNI9gPY38SiTKxTaEcbs7GaXIeEEel6mFNberG7meTDwSdI0703yI70PXAvtASq58aAIIlL7LoQyGLs6VOZR6uruYzfy8PmsiAiwfR/QL70wwGgZ/oEXqt1H7c3jchEWCmh7ltKe8TD/VCaeDiazerriKGe7Ylx6jEwSVUKiGz375PwOEPgzG1V7/4TTBMwKw4eewLZrvC/nTXTLMEDCP0wIBLi11qoG5itRbXK7ZJuSn7+Uj6keZTGvFV7Kz4wpLSg3ICoIIOVyOSQbY9Jv9ryAinvEFHOh0R4D2JLiFOp2JKq0YL6I79vKEOcV300Uj109K6dvlizh4xjKTCHMI3ABaiGkkh/Dy12dukDjl1GwGxzIt7kCxW9AyptEIxm0va7g431PPKyXQrE+ZCRxPuN8PExlJazMt8Wdps0kdUbLI0PRJVRKDOkKPS9YgSvDsHZL0/+I1YK6l16Lf3DAn2lXfEQkPp+Sfm8Nye8pPLfaYGZuPw6jzdanoM/kx5QfqBacGaM9BU67LaAnzrkVH/OGbDHwP6PcfTwtQeZ2QAjzW7TpXHv/Ha9Pf9qIg0htNMFL6yfofPrsCV8eFzb5ST+IOz90cL/gCZlQ9LQEW+ytnKzjkyO72ey7lpWBWNT42Z39vY43srgJjitRG0ScLGsplHJGpnNv6yazZ0w8iZDsxZv/Ld2GcjJAtJAaEX9iB+oph/IPCbRCEVAVgbgJw+8oTo2oVlRPBHJIkwk9t+r/So+u5vdgLvfxGtEJziXY7FBHh9kwEAJaG+0oVvftZz5chLRJ3S30zqKg6O0d/b2BqpD9V+FqQP7VEhopT+S8cp/hymXYWVe2Nbt6txV9TWPZQ6CZKJqBaZUlV4mDbon6seNcNviyVDCYPwrZg36t33uHnCUNsYtt7qLt7sCr3UhuzFVAJJDOa+beETiBzSOHm8zDkJm/pwv6zfTfI1JEHZQfGQcwZWSjde6zhKHn89nd/LxOQTw4U1Y7Nf8plDd75MRFVFW8PUajjVyAG+EetJQUhk8T7jjjoR/fNK7urGsMr1QwMXIV7cpDQQHmcSPp2ZUsxG3TXqpgA3YVK0vfmYmJITTArl5Ur6tIslQFwv7u0rmFtjFvDGrjT7L+vvuicen4pOUXlh/PY/HKnKh6acSSt/Pw7/gX1EkUHJ6/OqfyFF+i+Bi+iOJetyZ4b5ZlI63TLTCgjdqyT4iBFpoIMjpOj7INiNg6/LD7JIOEI5tg6aDgiNQbQ75JOyt6GvD/qSquXYjsewB+Bf+njWjzwAiOOcHyJHObWMtgFKefdt5jyzSrXqL6EZpr6YugFXcwrEq5sfvofmamE9EbZa/k+0mrA4ou9YIVudsKv9YuWv/AqsxI635QAy4EwOIEhvTUAkYckg3Cu4Quo9MvdHsg1/V+05SC5D+aaK9cqPk/GdiEZLo1/cg0Klz6vAtZ+UyB330vREY/e3xIId3jbqMo4jOrT/8B4QNUASnr4e7P6puljQqJkqex8NBCKM09HcP/7axZDubu3INClh411bShe7I/ooNQsM3L7zG0506Ys2SxsAHkgh2MECVj2YCxwdvpKf952BP8pVqxAsOuLa81cb7TnTaXtgx1ZKYT9Sw04JNZPRj3K08KRJykdttXgrtBEmRVlduVPzSak6LJTE5Dg1TPDVOV0Ds/jl925ddpqrkY1GMVr7arEWHKGGe0tSm8yXIspIAaB9wwY/XRWxIkkTMG6C6Kay2oX/cn7TsXfuPYz1EmfVbEgMCM9/SKmC3liOHZ/bap27NxkMs+pLvEV4t+s8xOAKIR8zamo9T/U6Aq36FMqIK8Bse0hoL2SCKTr8CKNS7yfe1lKxmW2eZBI0SPQC5fT5thXfkKoYpEY5fkO96x+857/GyaQ+/4B5UBnT4cwC845f+2ucD6iM0QP3Ja4Uq53gwScW/dE4QyMWKVS4mYkcYVgSNwlZdYyrCJEqVup9FxI86ZF0Ouol2CGtQrQDDJn2H30ZjF1+etmxEemzEJycE1OlVIqumprZoU3S/g1sYLH5es3PCk351pDRI5oeCEHiVvD8rrCnna/nFHjO/ginlkhFkCgUadKHERhjfznrTnrSlyYAsnNAWqmILYnfBjPmyEHQKeHtrsdH5hifil4LuFWcUe0N2IvvYBjjy2UW3l3UZ8o8sT7JLJwj9aGMNEYvN3LcCmEey4ww+WjNquaVa+eBj6ZamJ0V3FJHKReEIId0AOuOj8d62M7cBXFlDXMt/Y9y958+xFCio8yKDs8IYUqym7746QrdnHYTr0kWtLZ7E5I7GTKvIEXc8TgM4ZuDn8Ml7rrNzWG08og+OoYUwknDbZ3NlAsrS/aDqQM1QZscEKoKFzha8xZfVYGz1bsMv6cZW5FrLSrSXQk/d89TC1ZC0ew4g0oC123vACU46j4GcCeOyWITc11QQ45okyfy3jbMIyOxtJTB2hOmETLTLM7p7xl0HO9NuP/CU3E7FjtYINGht8OyxnSI810MxgQ+nvzkaG3xL4XRGljMhaxU3Q7YO7lt3Cc+rk9tNDi/m04of3+iF5oqtlm9y2uFf3kC3kbN3qxO4OA2+FbM6Xve3oGc+OMyoQGuuCw+VEnjNvFYl+rD8mcuvskKQnK9oGJrMmgdJvzoHmhmRh4d7YCO9cIet/M2mrklRGXPXJLg+Vc4cHmWTTbzcymh0wLPEt9VaqM5dvf5hkoR94fUQ5i3KDMWE+C/TElnoPTxcmr7yOMb85efuVAuC2u4GrSTqC9Ib+gePbtQ37RtrjgoPcfrVik3c5V0iDqTtCwTKCBkln7yJx9V7W1P5NaNZV/jbdDw8lZ+qr/6bNgH0TE2AhxmtzDCpj4kSjnAJWI0c18u/04k4S1miRzXdAZJhWrkmkodQ/MXW6JVwk9bH0dQgD30PPPfyIjfMiFVsyrh7LQ8LwohIZO2YJXOd3kknw1sc78tahW/BmY6U6GLj8kSgiHBLIFArPkxl+876tR/zmXplIdfy2iLAs0LTmNpaAt8WYE0ov5vgVlMTQZcQtpNyg9OW3o+4sVbmBwNRVMb5Pds5WEIOvEFy0mmp+UKS6bpUep5p+9fh9NsNtwBHPncC9l1hk6Dl1hw9+T37s1SPv+0Q5m+SLqpvXRjm342/ggDCuzuIBBZ7tAELfA+JOgKdUz2z00Us12+VbvIAcXt77ehcWXzi0pSgs5/d2nwCK2YTpGvwZFqvz1tAMCRUi+TiMZH+vDbit6xwCup7GW9SsKL8kZ/GTL8qay3Gukyez3wmPsliIZ/l8Y5/6VpN7sNwEv2GckHJHRP5XvvtoyEbZ/sW9F5jyOs32Jlm+yRS4l9FGnNYIC7EeBjtuNHaAgFjsmv68yXgZtbthZTupW1o+SqVzxXxc9uF0RpCliV+8Q8cnN1d5LOda1Xsf4pchbJjUn/5iDFBEnaLFcWK/GqYaxK8dE5cdRQlPXQaWmI6XYUiMUFvwVJB7DU8b5ybH351YoHIMHmrUEFsK0N/7WxPjIOn8xR08xJjc7D/8PJUK/rEx7c2Ren21pJpBJqzy+k5zJ8zNjctgh6Ucfl6e+TVqYq7SmbtkqFE3tWswukB0q9t16slrqLTtLXtjqrI1hhI0GYr3ntgseCL3qpFIxPFmxNVRQ6o+8msKBU1sZJ1k8VQVs33xkl7Tx8702fCA0d5XgEwpT0VejeFQ0k5yo6nAASkmbfdn5gaQ/H2ieVfkjWR16sQ3gTQzbuqmosbujfC1XRiX8V3awTVcCyqMDZTR9yFrylWXQCPqos5lYt0essYM4thN8X2Ojrk7TfL6SibubSaIo5DQP9acKR+IpQbCf1p1kNqEiZngLvS7a+MIVAoEtS1fiPt9JlXUN3NuCVON2G4OVPv8Wh7loHgvacyPfWdh6bMpU+PZsjOjfRajrWMkJoC1wiaAzayxQH9GeOUzUZFzzdTOrkzwnC1HOeuDSdikMPUvW0rNbT3ryYK6fBVIiugvj71AVK5SQyLgRDoyPkuMcVurV4MntASVot93mueKtUAGa0tsGPOnGetsHiQ9mUFjruY0L6UCAAgBY94T29ZANoxBFdnaPWhtmCF2Qhk378oGcwfjW0Lp7vRdmsMBPMpkiJhcitacVCmrFXSIz4/H/2lZKs1/2WAbty17/lr5civvWyQa/U9ig2cPeAJQM09rro8mpz7by01aib5qc9DKAkvbdXrGOx04NNdR79T2cRsWN19O3zFHpK6T7p7BzyldQe1OeDYaIOk9ZDQODtkYCIYHP4OKAn0N6FA65nNYlY8gFNUSdcUGZMNSARoe2AW7u6SP1FRXmOT0B/ieeG3kV6D6JRrn9Qp/JbNw7a7hvcJMvmSFZoqUgIhsi1sXRfzh2QJnVnxxWb/QPgat9vqKUedoR3+tf3sO+6zFkph4y2PdfWbE3IfOaJnMQoneEwjgaIavgJXUe9f+wsdMv0uUPdWDzT9aug1PWB1tHiSNFEQZ/t0J30sM7Ah3P56bPB1tYD7wIj7nXr4ZCpOkCqPriJanB0/GKcGhEWAySo/qU+B9/6K1fJ6vV8vy/ky7otTa+NXXqQSZ/6BlniyovdXFLzfuAT8syX6hrgRiUr0as9VNFKAlO+u/AAhbDBwEj8Lbi9UYV6MTCgfkwZOQtZWco4V7mBK5Qb75hkBULEp6v25NEPawOaWTvga2kE3EUMqVqPowOzyh6byyHi/ovbXwrBl+2fsV5mKZi+4WWslbBjU09bfuZY+Z+Za/sR+0E96aCJl3PFDKP7HJRCSQvMc0DBZQxmlTWn9ATiAZ6GqQeh03mcsPjoOjuxdap9IcdEQSppIAawxJxtn+wUHVnSamjAnhZC64u+2dqhfTz/zN7au5l2vDT6OXPLxt9vLEMvIq6JoWlLd2+xC8U4GdSNG4jZi30Uo+7guHm4KyKi93MyqLvTEHBxMgqS/9JFrtxyuL5D7scrDf9Gm9ftsieavEZetVYhdZ2nXEijsuymT9WimaorV9wnrMBkBG69VneJ3TJm67qSl5e1wxRhFttfCGaEJ790ZP9A4eUr4fNAmfcy2/WGvUmTYRWJbbzf3Rql0tuLLM2ZakKjcFrGbjRGy9yaczfGBQTQInwfLB5qL5l6jhSZNWrXfAFj7sdEmk1X7crV1GhS27Eu4czuJkETNQIpGc7bu9sfyFlae9vkFn3+DRg9kUd8rkkD7gNxx6J6+yHQaf2n71SqQhgpnZYuKiFHjee7J5w3aXVII31PFyoaIsrFZPyvWWdHR6bMSXqOkm0j706LDDwOgmjapWa7VPKwq71KEgQpNTkc7U1TvqN/X7TDa95nGufZVocj4yR7eig/Lj2mcaDlOCew0QTIycR13t7gUaXSmUMoHjc6OnNHB1NHZxHusdFvoGmyfQ+cIVfQD8SOrROdQArwj9xcZzr4NyGM0GyaRrT84QSZxmqdUlz2XcyZG08EevdcYAdlA14CJmkds8uWg4+7qg3UKcfWt2sft0tm2veHVhrNoI0nG+zvBw/sRbG0hztsjqJB4XQXfwsanVmwyXof+7amxgiXhEfOMeIQnlm5SzgFcRBYrCdiEewi3zJUdcDQ14r4JO/oVUp1C4cAy8BuMV+6w2t7Q7YbXmHqOLUaw73seju/uuwZxey0NS40Wjh2iNj9yX1Z/O3c/SzQZc5MGcqu2l294uQao/ttTAeJY2jROFxrwQeMK+YQMDZQBBWTO9TlSN+57utF717H0HiIdmjrfn6TucAibyWBqX9B7fJc1s3qFZzl0P6v9BY820KoWeo48RscaLPjz0DKGfwKssTNbiuJjw75jo6LuA8so5S9kqRzO2kOWBAzjCBDCDj7hNVkH179VrN9Jayf7OAltSCKmjQup2HvFIxtiXRMgct7XHVMjfs2wVkxy7/fjrJ2rm/qq0/HCu0jwx8VDGfxBG/rnAL7Q9lx6NJ1DbPxaJmtn4s4LoxRljUo/rFED8FF+lUkzBW79W7KKuTBySeujMQJ0ABdCVvn4GMEBeG1rj9kLrh7sFRkP1BfU/nPK4ZiKd4Un99kfJZ0VxfDuQmfL94z19sSaI/X4XpHdbRg9JjiOzYtQfnIJ6Ms+f2/hN7rUxmv9RMVMme9AbnbutdYy8tKm0A7Yd0utU7Q2KF3UvxmZlTdBxgPhBbl4fNP+JTWGQpSbtpzWBam7vGvmAsntyq7CxIeRj4UYqcKiXwD5UHHapWj/LYq+cyLsp2roXr5d4F4TEyBRDYe5bY/anci59egnsnuPXVVW7JB2zxdrUSOvW2duHfZIgHRIbaPyj8nym/Iya+cmxCrv7vco19T9mnxHK3b4dr/dN1RGWEUFOLUWHh7zZXps9u43njqSbebL9MiErox6D+o26m6k8/vhmR9lTFJiHt5n8BIKVQNtpR0NyUHbXweiGS2ejJ2N2BiO3Sm+9nnMaVvCBQousHxjJE4gLsAmuCvGQZ1/o+Lr3S7jQi2tirGfGuHQpL7Sz57vDVeFDoULb0iH+jRjrTQcmkk1u3hYHnBH8602wq4d9RTqtB8adhTUMRCZpxLHnsMJl3bNv6q0tv5CqZ667nSotfcbptkpxtNIL82bP0yF9Oc8FiJj2R/ehFEujsFEmxladGX1aLFqsqkFEsKTIlMQ/0aiNhCjNhWFdcS9rUnvVjD6U7k0cKSlYsW6q9dXsAsVLKNc53srcMKGqM84dyiPx7s+d/vNhXeqUveXeY3ze/ekdtQXdyBiNzr8rwEHuDvGh8W+PL2ggFfhAD1zxQSqY+esd0+TgcB8ajE2OgeoI987yGrLvSIHF2FKy0dQHnN422UpoxZPa/oX9eqDmL+UMfb8nv+UnmYmjIQ/cCXKoR5vc10iIHvTvsgPOLY9oQmKoa4axsPhRKLn60qMefzCrGsuikXCUuY33ZUr/bz0Ucqdsek2Paod1zvg57ySYFGCxP+lo7rd2ywVpS7S4Y5wggahr5WYKmTN46caBIqwTaC37/vh/fasThCt079ESl/AX81lvreS1sTGPyRnfQjrYOgEfLY0XIpehOIkd6botoZ0oGrGb1JqrFJC7KUtyLfuNABJBm1cpNQRuHv5B6xqSCp358NE6PZdFcrNZ1ruhpwMDR7qOZSDyEOq3t6Y81eaTx7lwU19B50nHu/TVyEkckojfs+1eZqOWANHZoVaOCPbg3yPLRVFun3VVbwLnFyzxdCbK3qm/dwjJX05REoZcYIsIUTCe9vcqou40+3s89hgrd61x36vRdctYlTa07rqYaOOkblaNzPC3B0Al+nwR2o8kJUyv/naZSjyjugvJlhj5PIXKfw632CV8XQ3APmGhkKIC2UTIqgx8u0pRm6YOpuKrfaOvUlaf1Fn60C5m4PYfhze7wkxhbTobPi7J3wxeerZWg6bxBhqPNjudhPAXRQtJ3GvX+H5N4KfaZHtTaHyL754jwE5acSDUyYaALpG4vMopOFF9yUSH+70SpSazZWFcVq1purvJjx8Puqiy8vICKBlwhpdJdlOqW1Msfl8bZicQtXPkd+iPBnWd/LFEMmuvOD99R3RPugNYaG1vlvObAIky9P/hk8w0B67D42HHko0ry2rz+zwEfgafuho6Se0FpUx+I1JGHiAdSHw+zB+A8+NNkxn5HIxp/SBps0ac4ZmJjGyl+x66LtVz4p4XTna3u9aTcqGt2qnjdxpBKQOXNV37UpRfTNckCgg5LReFPiwD6fzxNRQ1sJb1v72xWLLuUStG08jie0i/By6wDWyo+h1hIefl3k4VFicmVYI4kDkLZVnX9HLfAKbaj/wvW5ZYDjLL3e2o8jYO2PtlJAeYcV4SeJbk5xmXOPpQ31MNVIxjMwNMipKL1sQvLz6xojCoYLZ9HWPtTYE27oF8k0dLWSc18d4J3Uxw1zRZImxAz74SSb2nWTxK5WUkQh1ly0BeIrknrFDeSc182Yn8nyR7zOaBeJ5zeTz8IwOm0oDzv3Z9n7TumYmXNBwMqsE3sM1xGMZFodsT+auDIjLqvzRlRHQGrPK3gr+329p1N8vnJhjVIrhb8OTA4a6qStmvYJf10cn9dT4bT4cwRXQk+gSyWI14dYu8z5XSxT5Iz5v/TWh/heml/TWrooPnQhnY47Fqc0K7Z7EZDKfRoR23NrkiPxcxsyobWPnT3HwQoLSU7GhYM/aIX2XbCn5lXHM0OmlnZ+R/ZgNbUKuqcNas/GF/4AX2WoN9MVYDfaFtpZy9zunezm3qWZrPtJHj60GcLg2Sf4NgnGNAizL0AHJa8sVdUo6DBaBi8NAaTSbWIXDEX3M2ZFFsxMRQvHZZzx65U9tdXF/nt4X3EbGb6eP+rhnmw7PryPjjsvamzMhmh4L/LvQZwBEvURsyyxi5331SCR+cyVfEXdFshPnw/QKR2TU+hEV1irZCfOrr38G+iFi0fEK/R0yHT8l0s32xV8vBhrSg2FU/mcLFSyilo/R3ahF3DsArbzU6TsAfE00hrOBr1Mxe4Z8UeOvjEIi0CXPzgHtH/TxDdtxT0d/Q68G7+UN97q6QbvOs4OTk2x2Aj19fFh/Dfae0tzInXOYCTqROiuEKfqhGWfkSPFhDBaJUpjD2nvkddRjpXexXHan1qmIFTND6CDFGm2hIpOeYUMR3CfysocUdNUnlYJ4OPaIXv/ggxaf1P6XPtlOGF19guQFQDIyodAyLT6gb7jvXw2730fXju1C0BRJPs8Ar+V2za5/np9bIuJt4gU/c3OELE9Hrq2u524A78YMzjYY9PmBQDXcj5qNYQhuIxTsPEoQm/CLEoV7GIXp9GwlKna015cHXHdnVZxyL2lyHX8U+dqSEBMUUBqCXNpsCDic9ttabQCR2xz2HYZM9d8EvAbQaiiHTduBRIdK1ia97tuhTXrKS5j3EkEfo5vKhCIgYq0J0T3ZYFMGv9wNruT5TrMhEvcni/iM0eye8hDKHPwOHNUWSanSA1L1w7IphCnANItcYwmjN0Ulo28AvUmwRYKH0QP8hcLrPDP8w1AC0/W0iSFrb7oLXKMsvY0CxH5wK9QWHRvHONEVyJvALrn8Ff6q3XBzCugWNVhcVJqP9RqHvMMlDfGSbotEz/iqUobvktm5dUxYxEQBs//Nbh+A2eNYlF9eO42PlJfzKBmXD7MeoyqQFaaGn8NN04xRS5a/ZSZTEO2uRH3H0tfLPCde4l+z7ztjetDfkC3K8izFh3WXdSd3y936Xt4ebTy5SG1KwTTVxp3AnPDYkVZSbwfCgxs+g8WF75EhZuh9tTDvG7p5N0c57NhggdAPH9Aj9S1x7/G/64j9UrHmwQg1WtcfAdY16wx8F0zenmL+41gn3rJqanRh0OTo9V3uwLvJIwCRU4bL3ZQOtCICe5n4tarsFafewOJsn/GXqPCS/HZgUci3UjaNVBBGDO/S3QQEM78U4Rd+UuDCH44Lz6b7qyDUrUVz6ga5JzIQNZImzbr26sEMHZCKPNihd5aNvxunutmSpj7ukNesN9wN53VMHfw2I0DuptsNUOt07YpYjJU/dCLA/VrRP5nfXu0lUJ+pDObgkPX1RiAiRvLM4e/fNKwvhBvRHfnZI8jHShCW7zR3Lt/y/Mib6q354+849RFg1d4VAo49VGByT96Wifl6x3tjzon1rtpU+lBROuitqZSZf389Wf6iSXvU57PFM/ZmJcQ1iKQZzr9dWfTbFuJD7x6AMnW6H6CDnnK/3Wiu5s9Kygs0oo8n9wFh3IreDOb6HOCJX14Os+egtfUo9ZuQyheA+yoHkw8qsATEj0lO5HHWm6yL2sdYdQvS3ayARssPwPEjA0xR9viMISRPe64VkaBje909YaDRafkGnPlNHmYZG4B6M8gNF7/pXlJzGTQtL/vq1iodreBjcNSPSIIkR49F7SdGV89azt2btgXCJFBvxHRCW1GLYkeBDYK2b9HE2GMxKsaUVydEiBrbQags91nj5cTAdiq+5gkK/kpuYPHTMijEEwVlFQzyvsaLcfiXGX+WPIYzJMZFjVsmlve4HS4w8QU99mNKevzrEEpVgsqVQ8vjduJf6fGhP9h+iYpSdzyqp3KLu6UJFy4toKQzrfBA7W/U0rQMAfU1O/7lqBo2H1rALO43dCPCSQazUkuRPz3iuyWwHZc7ghDiJRzhGGjxR6CWe7uoyz47ZRBeu8RicHIvW8P95hiF+IxGl+OHr+zxxFfq0ebaPbdaWr3Rd207wsSakeFua7YDqh8ZwnfPvGtnq72KhiU0KIXRlNsVqF8YL+Qfv24YYeIQnNe7Wj3GPTHm207ZN6mwH/xQkiw465BeXNdb5c1oZJcO0fk7z2GgDj3QHGAkAuDO+7tIJGLEYz5V/Vr/2Fe71SSybrPllVWH+63UAbllSv4TspQCS9y6/7Z0PSAbxQo6qYW2txfOe73PtrDAC2dsrSHXNv+mLJUrl4z3nSWKZkKzUyM5xndruz7O1Fxe6jCZQb284e16wdjkPIpPjg8q5+Oa/M5Limhy7n1b8jlfJP/oBuNPdi7LS4+DeFln8qJdRbEtVfO9DoK9o2/z/X6QTbnVgV8itGai10kzb1esIynJtvhxbmOmrEYhZZqjWkn2Ht9Wn1Z0O3/VQM8WUcxLkoXzswwp9vvB8kATwqYulwqCJytCiPGy8bgwNxHx13E7kyNrxgcz2qqBtUtkyKow3QKUmmtqS0iB1p/myKjnL7rl6jlKVQqtofRK+KnfWY1yxmuosHGrC4DC72WzfvF7Fr71q7DfDYYTfKvuXCbmHzETahFqMbR2IFcyScDFZG5p3rtTYBDcr2EPRdHnAitLmOTK1UwSnIrIHx9KiYpF/3JOJG/hSNK33V5Y0DF8OKV7rL8DUOdSsumw+Iq2qqPPF3QEF17/RsedVJOgFTETDxj3mu9i484gqHwLn5AS+taD92ZffrDakBgUNexMoPLy6PiouqZYJAbl9KcXqFnRLNxheddmx1uqUJcd8+c7Tb4nXONzebSR3C9Uyh3l483aaxd9JPnl36G41mMGPZveDPk6HT+5i7w1CnRWMXkckmFBJaYwY2baNgSn3VzyKs2I+76wMEHH5bWukJTygdcCma9DkMybXWh1oJX0eyj4wteQa3HH5oKiD9cX9t8vQ0jrskBTy/MZxIyJIP71Kq0RPZnHUO71wP+G88DsZ5FztdaY2P7UD/sPppOmiPkDx3hZt0TaB0fqKQN6isQWpRzECDY20XB8OJwObN6Bg2Bz+Cu7Noz7vdzT+KIuXaVNrf5mESx07bvw98GIM+iht8+3CDR8ftuy/pQxJKDwnRKyqZGwVquyOqfy2ggMDuUTunnrQ/W/d9fHclvDw1bwD6t6ftgn+/Vpzm9VTdeJYKNNrPx10LXtdSfJZH6iF2UcGOkdbuC4ZZzYyjlkxhKQIVoIj6Odx69fGTA085ERXyt68KgbGrmgifKVzSYRTbUeOErshKwsOm4eAolgHKHNr4SmGc7IzI+UBzA1K/w3KUg8vmJj/xVwv7wZIr81cfB4HCcHMd10U/NVbu99UewH9qn8gc5L16DTA92ItsBXObjwU8xmnMCcdFs4ADjF+Brr4nk09FCzdIqO3Z+P6wy6ShMvYLvkX1UT4B+fBD9+/vGq5okthJoeFPFDlnMXN3umUAm/jdlm7QF+rOh7P6sQoKCP/D2VYjdeoBZD2bFAnG/Egj5Dyr2b7EfSK4b2mDQUR/Y3vQcSRlNNcWEAXadvJSb3vXzltGFoNH3jenCCS2GF7UUkv+EjXOgUEJ2m/iqaOh044S8+wcTUJ8W37mug0ozx4obXPsejJZcTP1ezgwh4owu/lPV5+HkzMXKaARhHSXHvijS2D/DO2blV71sgUoxoPAbp0pegr39bOQwO1OIWfnawuvCzSXVGmeEBZcT5dihs8UkBWIsHFebYwO7DyyYPhiJntAJSnKIq2vkYQgIXnBcr+dKoAC4HGFsxfz3nHvSNIASVfAfUMFWbDphfd9v+69wfPoAeGSu4wyEQdwCLVu0SgEXJYwDekyQtRSFDLDM9hlNoQwqEbhbMH76n+Jt6rtzngTF9jYxlNlDBaddZVsSN/ksgUdyHAvPCzlM0kpl+WcXjMixpLXoAsdjNwhYM0NWv/833bX6/sHTBRDQqrH3CPDypH4nSBR2MSuRRj6p68x3uR5Z87DNnciqNGYZBEwfK4wdZlytiBCP0eBMK98GFMPHuXUDM32VdUd/7zBRsY0/hYZVcQgR3PtOgEsBOtnzePxT2VlY1gb58z4JjQdDzBE5PuBEDUz2yeuQJzlKC3zKMhoqXuCfwMGEcGhgX2VILhtL5dykXT1fj0nhl2SCIaEYx/C6CPBNpE12Hcty4nrc0262Seivlcm/V3iS09I4AnjYQhk/TSPmd3kQcNfFJ0uE3IC7UycTiMdCbfNLYb/6HQGR3BxMU+pPlW38EYyBtcFTVC8+xMjJB0rpDGU4vuZ0sOR0VIOirXC/wPOuqMgIAvRdO7WCKb3lSdI1oO5DKHFjb9ZFy8gDp3bFqsbxEaQy1XgJoGzxJOyY+FK4lunGgWU4e5WenalZIvgal67i26GyJXISO5vWZB8cdqI8rzjUtCRobPT574KcpeSqo0t6naM+BvhkhdCUI2r9Pmmt2rVeX3tbZYGdo4lvk3tospjjEOVXPw1WJqHnSMph1BvgQjQXArLMYsNcc/0svwCzGUtw3ShKg3Q9X+mU9+f1VG4KOgZnGd/VoRKqiv4lcGY2gxWcnU5/f3ufOPmhuL05A0d94hdVAQVJ6vzYqybD0gbUolZ4pTqNA00EYiKTiPI9/+7N6t9xQ9szbPUFI7Nf0klHDaRUFKM80bPVd6pSNjQE6dtFuyzmTFb9MF72C2U/SgKCYjx34l4n0UiZnj80kjwS3ggISeH54QzRqrrCplh1MFRdx5N5ubnUQYZHaWNsrHdpfSex9q2TKBHPtNeAP2ITNqYAEelGJbBBap521ijALwWuOtAe3gq+ZaIje+7s+KlPPdePd34fYPMiZmaIxEdAulKZ4xWu2wSFEfhlVvG/orySIRRmY07kHk94ij35rC4Fci0swSk28JMtb1x1rP25LSP49y2/+9gNiAW/o2f6ujncCZP7F4ZQUgP1hvCfEv3pIjz2EgdFXbXJK27sOIp2QQHw3rkg0Bot8koYtVLS6VirxTglav5sQfr/EicjWJl+Iny9kGsxCj3jXtpCa3YfQKdl2N+f0Z8sJ535+c64rL+nAqfBFcKZzXIJq3EhqqhEfihoC/8qazkOGnTa7epxcg+FdxtFQVQHteJhi4iDxQ3bdmjvWc8S5+jCvoTwok8SsFnW45YHO95u5QyTt6f7EMuFV0+YtKEygeAepOz1T2D7Q7ZsHfW7HjkgvnMKoT8Pm10uHGK3A7ao/LJGM2URit22K2sVxT3/5rGxMdIln7OM5VIWCEoV5nBNvAmPyPn0QMYAfyHoVfAV5McUWlK7kej8SKZGK3wY5au6YcrE+6XF1p5mA57PPq3xuqB/htRj7vDHwx+DlDOSqtgStf0OxZKW5stUUEQeGpRbeI/SDBxjRKpHg8EDwj2G9XC8Wdqg3CMmrE1jVRXBemKvM2FsDhv+dLVkigUcVw+DLaFaYfXJ//nhgZgZjErq7Ap+o85lmozV+Yoz6K8fOm4OvPhjkMmZeIdqCGVkhLLFZg99hDxvkUh/unpwT9U+jiYPn9cVKj0V74I6XKoB0FHNr3Z1JYRuR7CJrjGRg5+JWwL+95/rb46qtEO8s+X/5lIrEfAegtuhBtuWIIFH/U/6yD3liL1AIgH7OTo+cuinjUz6HXlxZT24MKEcSk4qtS8UQWXNmcgFK4KTX6zi6J1CPBNL+FSg5ZMIa/Pdjk9wBzj3JXjZzEvILUZm6ynj1RL2JitRQOebti6q43H/1s36whnuB1dx8F0JxLA3nMuMPRK/rzaQFEqhpyrScWny//NEEolkph/1Q2ZtnxWpMU2OF/svUdSzJigTJr9k7heaI1rLQN7QWhYavX7LfrNnOaexZdxWdREa4h/D4/28clI4MWtSgN4IjWPg3kWNRPr6laf5y9bjgUnBUFh0tSBiLKcQe7KR7W+ZrY4Uss5iC8dcs3PevfADPVxc11+tiyu6UnK0mqVI3ODxw79CVHrnjnjOTA+GGl7EAScfB+7RxuuqcmIqCUGUOQEpM6P2NNmAEHBRa5FfY9F2z3wS7onOU0nPsnCqXiOzKUKyzXzIsqEh81oip1bguLVN5fTJ6SyWFD9eywSllYg1SIkg2ZtqIxqCxzYHVPufo93l3ZEwvdxcPet3RnfjqI1paUEmSIYL8DYBl0TMjKPrURfpX24MJcqDw44usfwzmc7dl5bwO56mND1SCq50Qp+4axke8b7wI4/Zv43UF4nlaGemL/BCyQK9dJB6MB19h6hi634OYky2xE0ilHXvZUzh9JPump6/FAq/JwE/1p5JMiHRGVXmxSbierChBQuzPJPMWFpd7uPb+U4gSGWDYE6pnKTo/svh7JqQKBzt2OMFiI7Ewz0JOTlW2ZYAT1p0wCcIUrOvJHMhrJgT9CuTLB6vqOcqNYKqc5kPch5kW6bhtUC4QSU7OOljzjZJ/T8g4NQVUzBgt0ZLzkYB/rwq05AsKqh/qOQ7QINfIBppoYIhUaC5R50PjVNoi/1vODH9l+TneQ2R4pC0MCvsU3LNpYP6d41ZNIr9/mk86RpSgVCE57iPt5Ket9ZwS0Oj3Au7wIY4rPX9WYVwISjrEzIzHh/SA9mFOIcjz2MT7J6IOEK1idAOm29L6Q4D8TSLBQY5FSfOlgGLojILQVMgS8iKBWydR2pAO6YUjDB8Q2I7+kkkklgqEa0jfSaOqrb3LRGmzFFJkckiXtJuING2c1OL58iW4tLWaEBFD8WZMyxenyY8C65fmL2mWAw8olnxLVMyNn8POWBcNTtUvMCXYZrPOswKucY2PuBz9RkYJMfFMOuhfgsPa8UTsg8htatLm25Iwns1RQbtU5iTYYekvDtMR5nJea6iwDLoWp3Ht73bRYkAhjWlSJnfa//Wgv6dfMECIRcKkxeVzipBIJbbPZU17+N+PsHo5s21lIViMQRq53Yj7C+9fezIbxto7MA5GGLj7BUUuZmoUjulkVx8dYCU0hhGoFQbYC0BGraWzUJSWo3aU8bRKXQoLNitec8GMCkVqc7AA+vJXbFUyunhqCe55uBbLjMVO8otMyAgN7W5SwY+CTzI9Oy+t5B8sPxcEO1yYnxKvygXAyLObqoH9JSI05Tn/qdDXp305i71AR2cx79n1coO84UoRqGEy52TydZwRCMqEoGnvfVKBpEjsKkv0c9htzvXmaQmIOmjMvEWAsw4M2Ogo2FRCIF/GRNfmIb+CZcoi+f4pNcLn4IMOm2XTXWHpRf5bkCaaWXxdf5T3mwPhF5R7GUG1cXNvGdmRv56yVA0Ss7HxL/VPbrIYiLSeIWd8FEUTsQ1UoTr3t7OxhA1M8/U2XD5UGtt02NrP4PHD3NTuTh7a9+x7zbbFRxy3Y03rO28eNyZ5g5uTk0dJ7b0NDdP7Hq8RAlzdeHUcIFOJJYH0/czX5zrU27hOD0qgP/MkqyrrI/QkFRmmLBWQFPOrYbjZyp+Vp58bsqHDZqqLpG0MR6u5oEkSIT9bQpP9LLm2AwopQvfDp+VJojcIryugs6zEhVO/N08ORdJ68DphUWwj8dVzV0yUaVLTPfDnpVM+F7tMRBHbV7946anl13fcl2uexrJv0uc7HGlKS2WrCEjzMsoKZek/dMEq4C0zOYnO658WsaQ9rtYOpNTt38T9JR9RKvVoVKniyLUaTUT/k4XC7KBU9JOGb529qDN6sJTcRjI1qT0iYmdfwoL3DV4GADhV8d2/Y1tqcUoZghs6ONt5UJpEbD4M2L0LU7l4gHkLrWUh2x6f+BNPNRhlFyqBK+gXwrS0/qcIB6p0f2Z556YnGsSw0vlq2r4pYaSMXZAND85IIvrA3s/aiCXQOWdKgDkE+gRxXXtPVuOQm/9Pyw2zb2N2fKLmis5xx6dKQA3CSzItDk6Yi58kT8e2wz7M00YJC9s0LdOM4vCC91Hr8jFuxsBdbWql35U3Jw1I9ot6fRKypIFydlf1xx2DGjfe7bLYpRnDgNM/1s4t6FFDfSnFyxZtc8EIW6KjR+Ct1pxzFXAiWwmIAWMtEhmJNL7ADa6KcOjmNmipEZ2PQIxCVFxw3qab/UyUSVEremR1bkAgqXNzapSd5PCHlH6aE1TMe7g/6dNRUyJFOdlRZpN+nyIwK9rARk+o8GJXG+hTs99JfmmmgG79J43k6FvLCFGhdSd6J9N7/k9gGpRnlFAGX+RgFjstuuw8mVecYYMelydtmKxr8O9P8FGuymZiTmhRWM2eXJnhJdPULhb3pdJQu2mSpWHsrKGSA1HDuQI626jgqmfB/CJOFvWlgVzqPlzh2SDzz7VsW6No7D36Z5rFP2x1c/1VsNFhiNEh4i8FpGH1ke0djKU6gwjvEtPFWKdmtfFdTpo2TFLipZESipiPWsw7ioZ+z02PEbTzIrR8/dPjNMjSNVwdxyPC2s7KXq4tfQADr2TRQFtp7y2lam3E3JLfQrVBVZoPHIlfH0k1pFX+lsWxIVR90EVOaV6tbnpbUCEaMC788Mud53J0oAr8fptb+mcR3exBYl95x+24RG3w4nD2T3048P0g7W3k7EatkHnt9+Pmu3N37uIUK8jWnRlZRVkZBiVVGamKxBU2nHNvktDEZ0Ft2wJ5AS/5yPRDOmPuBz66Hqnr1wrUcy4UwPmaxzYOUtkyDfawgvthzvj0UqW/VqKqQ/zg0vmXryNFFv25vYhCIp2XLLJL49gPwxkPTrD1nMlyHHdpeUxjkUNDQ1s/jC8s8XeYe23sgwOHL/2uPJJitixZyr+xuq7p811SE5hYAYtr+HvtUeQHJK6EdcYyHSQNQHjeYhFnO5Sd0iOgb1KmWMD19Ra6s0HxYxQOIsFTTEWS4d11Vu9LSh8pFOovGVVcWNsiydiwqfJ1u8scN18ezeJ8Jjje++aF66f1Ynh5MmNAv4SlG2iKe3e8G1zhOkYzkf1z2c4xixOidLucJ+hJ4F8J+ql2bcmdxhVfhtK6psVd6W/dGgOiYthfQaqKVzRmwXBewrr52IaiND+iiWMUO11bY8kWEfYaXl58iN6Wo9dhAhp51twX6gYKvRHL5qTH5jKissqu17JfFxI9y4CxMJsfz8o6nNt04BcaZaD9iE9xL086E8I1gmTScfsu6XOkxxlnp1s2PIBN0A4UTAWqPY7qbwsHXyqUS/041bMqFbRlX4yHVIwTCfB5U6ItpwLsJvQgcsXHRBLHSs1RgfEjZEkwZ8uu3gcFWdVvWEtl16XWIXHnnhTzDcW4EV5u/0IM0OygU2QcLOcGlgC8l1hLQfcWtJsEFCN1VSZeSaEiZB6jEZu/v8U9Vi7Tn1/+0j+zAdc6IW3TtPgg1xG0L45RfOAxsw69KPhGy5pCN3SJe72I9/pR5q+R6X0NRbo20W7heEWfPvu35bGacjSo04w415dW9SXdNd+ia/OjrUVzmsqEiiz8GTfpZtednOiaYyAjDYgOGQHds830Xg/y3Hc8/WPbIIYYAE6Dy/RwKYK1rty+0DND0cpzb+cKD2N/YtuuUVnWl6jvja/7eog5YGbD5WOW9abC+hzzN10h+wF72hlzss8HfhT6a86c6hj0X8ozPaXZpThBinV5YGvibJ9MhvLs3zgIo+N5phIos1No2x4JCPnnBd80f1ERfnqFPyThEt0lbbZIpfQkJO1/yw5kkLXyw9rt0lY2SLnHNYvukpAxkbbZf3gGNAkSeUV2i8AkcVcfCbmBsZy8juClSrwXXpYR7n7Jghg5omXQwwtdNDI8islWR/1qudYwDaDsw8GFTMEy+BujOfvK5J96wmKVclaYcm4JToy30OsVfVw6gH9ixvuD+CON0yQK4mX9t9bpuro7bcu0QnfO3J1u094zJpU/eFPuT9NaxV+s8pEAgap0tSVrr9RQEi3TeFrxRb5j9bd842hpJ31yjKRjtPgbCnhwC7q/ecJZHfKHA7v3UFvCE+OxOZ/0cHHJqzDF104YzirpAcV+1PpIegU8Vsc87vfLVMDS7BLkHbY84QvLXbxNSjQ+EOY+I8h0RGVmfi3THvjR7l2Ea4cEt4YdpitJu4wP1YPVz1aEhvdJqSy3sxh/pZYv3nj62Psi68ZR3DKsiv1oR/AlzyyX+fQUy/s5zyz9/ddi+6WCa9ofFfl2IG1RgwyPMT7EX8v1w2gMeCfz/QI+LT3QTeIZ1MlF2LTS+sUKlzH9SJYcQCNkKtxYhlvu6LgIcec+OGsIdFaJzWOMd3jVAKsVogARfqRnAoYcwevzmJa7SsPIYXZtGeO86C0zqM/RkH8WyxAIaM9jSSFEKfV+EtEoAQj6QtnXZsr3s6IwpZGzgMUQLaVUnTs0xz1hDPF5X/HRNAFjZah99jWXp3R4wze9tVxxfSkGCpcNxBHNXkEDyWjINjYEQqYH6TzIgHA5FX70LwUCH4IcdFelrSgSeuFAw6PnZGhi2RUPOsbYkWFg30MoGVBWaLcz/nIxkYaG1bE/u8omHcpRf/d3B82lYLoQxAjtXq5YkQ4PuoWQvw2sZx1swCoYX60m89f47NAQNzJe1k2FyVaBMm6o4quPqjTjZEK10oO2/tQYuQ2Kq3YfWnPCJFNrZHGO8qkJ9oPAX4yhE6z3MmTGt8pQVXKo6aNafeZx5PSSBoSOtUoF291wVtxAGogpEiNYy7wUyo11L5QyolShNKRPtHZMoyHaOemmLG1dFDeLXLn6w+Etu//UlAgXgz2cq1cs1yM+KJnJv6CvBpT4XYtfISwKhN+Fv60AyFkeZaFlxoM01ZdMEO2grdDNapIId4oOE9t6LpJ9ibJEuSiy5+TYmiIBFhMJKvHnK364ydmxfilZvX6FT/Y3nSCuK3sgW1dOhONlWDNcZCJGZeEV4lxsxF/XgMhdlGcGtHXRjJASu7H8bhqdGo/aoWC0ZMabuCcbURtRMJ3OUVzYPknJyVejfMFsdcuE3CSjFIQd9Bbba4S8bvSxbYY039dPU6w48cb08DX84tB1RO43mk/va77076H+2gbQUglzJnU4GXn3Lnf9KZW4oR0naZR8nSlHdqcE6jZbsjh9mYUkGmC+y/9ThtvAa4rwh+9BlcV9wRiT5BY3aWpF85jpVrB4pl6E8DbTlfH2IYvJUB1EIh6gBpMS6VFK9VJqB0EcB1njZc3N4z8xodMjcGFq/3R7vWdpSTlXM/R5DoDb7W+5kPP8QqkaygfBeRGOcNmCLnMSMe7KQ3LD+9m5Ryo7Hjhj1Cguc55eQDgk+ofU3dp51ETqTqxjO9koxD8pHKpj6LRm3g9ORvAG0V/wQy+r1VJIQT9OI02S+QXu5JvkJl79aVq+nDbS+hMCJzHqPX0Qk2IM7xuDSOHubnCh6uf0jcz/m2DlQYRCYOPM6Wckc/W9eMoFeujoyORyFGroC7WvyenR2WBRkfHJK36th5x0opH3RTXVhhP+dC/Ysxw+nUYpSjbZvhWd9MuX1HbQ4xAT+YzaR4w/gW8NuRrIxb/sd8VB1peHewqWkDSAs1O6SWmt6DZbfb6jL04EBWjJ/fiC3zKpTkYWC0rM9yb189isSxnf99VU2Qje0Rm5bsFZcHVPPMsaucjyY7GGhDWUASdEKP0GX4fBr6coiEQI26rk6GGT1aXGqxSxLLK14SAIqBZIWrAtX3l5F/onSSJXAQXat9bPvyxfpRRca03ow20bpYDIwPhQiH5a71u8Lsr2j4rUDisy6ZhxAeuyW57ndvz/KiggvCo08Y2R7Ei6eHBN1c/7/QE/mpkk9/yIXeA4qZvWqt+PNPWgwG+2LNiOmcTar2VQiJa2cKno7a0aCRl3f32i/nzmLlezJofg10Gwc8ZWvN5N6DlBczz4vgkbr0nKxxCOYaCLu5QhSLqjITPSWdugnsk/Ub4gGEEKWtBUtMgXyGNYM+LD/vXe3xTha5f/2Bv+MUBxwsMimQcLaAT+b+GYiBQasYmIcyJONB47UoDADuohiM47SYpqiFqg5YNmolDWCewOI+SLE47Y8zASt+y9V9oMYE/FdS2eAKtz0aKkuC0Wrh12htn7TKdHf7rCuJlP360wvvac/AmS21Q2B0x50RHyMBypiagjIXKBWlN9g4SNwyqQGp6iYFb9xfE8nH8/EBB8AlGMhyP/pQ3JtAbX0aYy89eCIOQ47wuV3K2HrOSeJ8jNOX3vRtJCfYR28fjCfNG5xFUlHLdHDw8cMEMaBhL+0kSf19FFpBL7q767mGc1hFqenvzUtIeO43/rhKHuY4+L2GNY2CwNb3KHFdbO6qbc6+JZLLLqnHqRqzigMM4rY7Hcp1nGjBzLAg5yYX+Swlpjghy7kE+L0eJfJ22kVvn3+Vwx3nixe9tvLdF6IaFv3Abp7ypL+XXYgHip0i7hZ3YLp12f//NPeZyylmlQmM8YMwLCT4b09Ba36tDTXn99JUgSMO819dA1Z0VXlGuHJqurqkkDeMPgDe2lgBQea31tawSkuBYG/KckVU5aWA1rPXA9MsikCRNRe/yXYI6kSLgkjcI3RNu8rUwhIbIzFKHKzqA4Z+OYt6iqePCslz5lDh5TEinhBgTHXc4175hpWdADmmvcI+vm8q/+x/0BsDZpo2nBW5DG3X1gqL2mKwZZ1xOGwNBLoE6Riq/iB9h17cmvDYA0b5bc6fixsootmhhsRGDwiDwDaimuvTNI1XzPGqDejWPtQ48jae2uo+7uqAbFewfZBhgz8bWT5sNi4kZN+ZYFOghUjnQd9PHzxoBlvGBvjrcAFD5b5KEiM3pmGdKvDM1Ysc5kEB2ixXkGXYskSBqb5OUQG21vuip7ckO0IfK1zLkVdblH5ln7W3rigCYXpm0sUIdmSLoMUNdOt9dj0fJ4hk4w/43mgrMpTGsM0xo97iFHSlY+EEBs/vZ6bRMNbTsiHSSClgM4Eg5DSYgbZXryIni9tCG1oTN9rgKUu43VEyRkqO91djEXeMX0N/TPskLzLwHxbO6IVlso5A+5gvXqurazW2+WGPqAThBgFACEtN70t9w1Vhb/6Kc8pL9htdQdoigD3r52Jh7baiwF9dtwskjheJTUPNSqYbAzpZowOX2SHNcHFyTK076QjDtpdee+VSQpleHglGWFLXBb09Gw64WzquoiUKmrMI8bEpYOzC8ji0WHBE+MRHPvTCbRm9VOi2U4V6mEf3y6QbIPLhAx8tdGqIyRNxg5JG/3i7A02TT/tB+utDWy9wHOr/oRLZCg6rbWo7bO6fquiP9d0eJeP+wlaFDI2d+/RvTm+3kDik5X6q2/lsbv9XooSikcd0m2jtYfB4vwZSlR9+evWvh6BptZ2KZhvd8yttlvzk9hXUL1dag+jC0Qhw/ysXjYwvnwcDCX5yvI4H5S9f5g3IBvOTZM+xfxNzRfv7C+jAUmB9RX//V9AY1ufIrv9TsGCz2lLoW5sDwX9gm9x5V7Aec2gZg02IKfOygi0EYmA924vUA/10sMYH3Qs+LYnlNm3aQYjnCBru1pxjPh7f0F2Ad8KWfrUDAVumBBrZC1Ryj9M8+FuRs+QhlNLYuP0/77R9NZv4AGfyAbKh6R9HXhoA/IqYQP8HosJbd1f9uw/Tcvfmr7NyIse9OQyc+TbP34FLAyOI+/s+D2OxyM/edRDOPG9SshjAQleDZB+CuEFnSISpAiAl6m8lFh58+XmdQ4eyoshx5tDSflfCEvXjluCLuheCrAk33VcpGHDcHGSl3rq85pKl11WuSSsNaXulqDXmrUVU1iobp5rnMIk1FcVLqJbcse2+9HfJiK6uP+4rv/E2SvfmfiJ3Pw9H8qxr8POT4niqb7Pge9EoJQEHrLuRONO4ZXCu0Em6Jp2Mqie7qsEPHWSk41ITQ2O57ERpFsO4sJyRy6j1H0FP2lMwQU9IrdBIJjTWFtGw+Yr6HnOahkQzwREeLwUS8Eg34G4ojR5riQQ8xHRAB2fLN/JW7UyJpyLO5KFDPPG27GFdwugF2iDOiImvnv4DmGispJ9AbdxiiETTNfMix/rXU/NoMMUPW4RkkYNCKW4czpPzZk15ag5hJ3Hr/A8bCfckG/Iw3iI5gfKthJZ2x9qNqMh3flSEoj8a+zasAk2aNtVvxwlzO/lFMGqRcFx724VJPZTAvzrxEXZU9tIwcVjVtOo5hi3biLy2GHHUSdQS7ciby/QRrFlGR/jPlvrgdjgjSPePvCFmrGX5btb3/6ol8v/Gdg7X09Wip3tCKa1DPEobXTgkhvW5978PDxhg37PV9b6syP4vEg83kGBXUGo0be2MzG3xcT49VEh2aw+q5/+H5A7X2wO0s3C3o0VPLeyN91rgOWLqGb8buZlyojo8NJK6YNFP+s7/7agTuiAUTRLWj26GlGlKomgK7dLiWUElRvAznRETTwoli2dBFLECGen4M4KP6YkJyleAaV3skLo7Fs5OSgItMIuiigfC+EUrQ/IMD2iCxfHsPGtDPwayybj+os8e8QX8RED2jkfbRWjhjG4PMGQZIFxAaMyPTyFKHvD/Ezt7lJq6zJB4abh4wVPnbxCR7Wv5UEzLJ/m4321WhyWNQEvzyFsovMlZvEieTfMUYmd1Y6Wq6uyjr2tx8y/MO1GvvbQJe4UObfySwo48ezThCDBZ1MIZKk/D3IIfDaPoygPFuIC/RECgf1IGz+z9Pw4g02ajPhWM9YC+uTlqjhZ9xBjC1b98fur9tRhJ2s7PRban1v/9MUG9FNdU4bBNG5TW8mJNRIrlgkoz6Ubu5+PxPojCghJ1lRpEN+idKGiscrE0nY0PPijDpX6IVyWIe8vdbtknsGQnISipGTxIM+X8GRP40el1+1z64+YQ9raLbWV6QB9mqO0VpWZxmzlZxqOjGYeklgc0aHN3gVux4BPMBR2Qdc5aJHgQXShHYIvzIGFgvCmAi8w2vlcMswnQwz3IyOyAeKb/3SbM8/yoqCFF2eZxvJHmJB31nb0Z0VrzJtJC8QcamlO0vVSehLHeKac1FDMaur488MAaleZsMgifpGuZyGTCS7Sp7/spXoxsU05vegctpWo1gQZZh/bQi3tz8ogYulKMeNVH/B1aZCcXwCWmJhepmPQV26/GBNmTm84PZgSYnfR25bck6IO/2FBBfN41FPaHqiPxJJ3LHVvXRjdFr9nK0UAEhrJcZaveejdBjl0A0sr0seLMi+vbEySKiUSLaWXp6ZRzWOc8SU9sJoT2yzkR7dE7vGPuqyi3qnfykCsAEIO+aBwec4ioPtA/Bd+ngzswShJKQNOqAaeyvEF4ZQ5W/BJweq+bDf1iu8tH+TEvzT/8mku9zQrVvjGQL57WpBQ5Alc1Vf2cTT/X74HKAcLIyobPobW0cw25qECR/JwO9TxN0hJWu4pfdQ0GlxsCEHRH7/elRZFyN7rVl9NBZSahD9b2PSQw+ohPDkv55FfsP519bEjf6BRWR9wCIvthRrbTP7zbXCVu9HxQyqWmXdpDB2a7WMxEop41r2cmcNnjoSvxLWrJRSXCW4pvB6L7eovKcp/xI09KOqJoTX43vMOx2N+V8SM/TzIZ6daw713SN1XNWzH5sLe06o9dSaQ5jIn3393QAknIngKqyCok1fFYj1p824YT6NqG1px+Cqi6oK4pew76PCowKQYxT+qEesbPs9SqF5I1SLgiir2jn0YnhLESnYf54jBuWSacvHJYKhXeII0S/y5eyQP31InbMDWPANG2TZIt3JpIjxC8CASKU9nxJORMXROcjFW5ZwCEZwycahcOA83N2vhG9cHw1aH1NRPuxZxvX2UkNPAdbYtY3aer4pQfQ3I1I7LxN86bFpuDa61mhqJyHME8cfjrLJafS9+/Adpt03AAe/+wr0W7O9pxzLqkX5mC9uyvxQ6pUz+NNSpAIfmVFFvlTzN5GZg2s48T7vSb2ZP3EsSbKxHSryGtRbJDHegro5Zz5i3S/v0Htt6fL3qnf8F83Kj7ibteF05C8ld+yDZyIJzMX9jgr8vtSniLDaedELK2J/vtyrGMB8cyMQ4I+pQSfqIWeqOEiHAPK5iz9Uy4jI5T98ELY2PpbWe2XcDMOSP2gIOEI8N52cXilj3sqX/nhjChvB4HzNrwhE6ZhjTX69XCNnVDdKA5Kw5VXRN+fXuxRJZmHqPsHlnIIV+GOeJX4K4vP0OJcMQTDDMgY5cMeThK6KuhLUS80/vJNbp+FZ53UuNnR74Cl+owlrriH4Xpi7U+JB2BNrU0eJXgLGDsudmhfyyoLgO0Mc0xKPbuLsD0nj7xukDg8kgpAeR7xmzUu1HyG6WK8IpYoh/6b9Fku201KZArLCZEv9sszoNmZOENJNz58p3Eus4O6Cna/lIUs1ZJgav+4DtFyRnekQPz2T5Q2vWNH2cfj+esXjgaB2QjKez/nH537EEpsscAX7XiSOBctkHpTb6STvgTTUJwaVufFj1O7H7JAPgdwMgE8NyLPKt1FPf4lP3Y2cOMH4ZTDHYxeRZMpkUdM5NscbbAZJS3BJa8n6FR02oZAup7814Fk8eqFZ7FnG7wGyL4L+HWX3oT3BS7NzwheuJj9SLOS8a71sj1vOCBUk/No/zt5UPmaF2z74M+afgx56e9N9Inbg06f8TozIQOdej1wEhwuD648Yelp+o1/mZSP9/bu928AcOsd/m1CnhwpEF4KFwsZS6Os/cQCsunfe/U2yiAFJVEYIpvAK1Ri1fieUAIyboPH0embEsAveufTC+9qXx45zARufwlPY92ruRLXed8dhx2F6cvLp1DbdByFBqhdYVXUoNv4cqLpsT9oo55s71FvisS5gTtfcJ9SNfObx530UM/yZZNnYuEHK85lrQt2nrzeBqgka0R4/nelFf3X48+JbVU5/H+pAuWAs+GJjn+VzfxOXeCMVuyAS3Q+KY5SIXkw3hRoXnB3bASKyEasUzUp+E6S+v2ARXDNCyGVowmtMyZ4MCVJo5q8GUyHPyaC1nCqn/cf5JX6IJMVwXZLdvf6PVd/IvafxZ0JSusJ8l/I+r2eLe06m1ZJmYiuM/pYveZlaRKJcwZbpm6J7aD2bh40+9fLoR/6i/pRP+vSsGR54ka8/jM/oKEwM/HhEfCA39UodP0jKLQ7VeRi/eMkihJjOoANKE/Gma37Fy5fcIyh2BA4UtFnDTqmv6XbA9WU19Q/arVYVy2o2nuq8EU0lnjvbq+jjBdBDQ3/1J7CfZipqJaChU2kyLBr9ZN/YnvLsqIeVkR3+BLT/tIU4ZCmoEF+yio2N76JVQmGFdsB80W7ndmnlp7Z3uJywsQZphW8OwTEha+ktCbtFQAuulcLWfyUorRVry60yiZtioCREEJVSZ7apMy0L0zm1VgexomwY/1VIT576N1g4W7gwGkCz5ja/tuGj4In6yJVSr4+CvrJjtpyULlYma5K1H0ou95dbVSG9LIKoPsvsPeO9+SyFDEO5D9xw+u+9EEVUUdXkm/kVlSz2umtQldzFYzqJqsZA98IPsAaLpm0tcq4jqkZFyjZhz7WR6G/8MaibT1wiqEbRRsTwbxS94n2sSRkPgoRGQRWR29hVaNvIPBszo1zjorLE+8ImS4Myst1rb0zOB/prCwSxiZy9QOhtFOWujyqDKRHgNiZbhlHK3finK+33/F78iVeLfzXDs4IcGO0PiQzfA8GITlEO6VygtlH2evSLcKytR4+buwnB/XuJYj1Nvu9lkmMpoH5c8zmKqwP5K08frHAfgg+NN/gTTnvvG95QGRjQvWHaUJMJJxM+073LiG8JzK4yjUKDkLwl/AWyPa5BSoTC2p/kh95nwHcTZP9UH3y4qgn4HtaO0BXpR12uvz0otgGBVIWsdd25Lcs6SNmRxORiAS0QzW4xSXAueEPVk+SZw/F3iGgvNi/r38xGiCQp6pDM4VX+OgmGOuDLSJguFAmB15tRCe/PxU8dnlRhHsbm1xndwe2tOidBlQUtf9tdqW3yNPC0aQ9EgNYTCg1QydFZS27YTGteN4IHE3ujUZTdZ8RkU8D9dTSS2coyxV7vK1z9PvCcTQVpVxoda+xWe22zCv3hG2ztHayGQEWA8OmZykIbf8koqM4bV7onE4Qp7v9mIQhivmZokIKfy4m0zP58BIXaxo8XsLghkGzdoBFdMXn+4L+UczU9YTuviy6b2y7nYvXrlbGYimKTTdaHUQi1Vhl40KNmyRicnoP/Qnf4xvsRwPEh8rP3u5QWe97DURU8zOQJgp7q+o4CFrtlVf327abYaON/v89DFqVsxd+HvsM+5mOOlOJsPNoRInyqmbmttM2k6hUlbWsjU771/dChswV69uSBMRjSD7p+MK3F1xkNAKYPlUjYJ9zDWSfjfm1JXu6gL3qWBonI56ECcizMhhoUA/dUxFaIQFTkoRimovnCI67h16tZ0GA2M6RAJICRBCbihLUurR+r56E5GVSt6D4GnXx6e+VRHS2s2PfXRtTFBnkdD/5J9tVTzkUk7gJKUGMCyWsvwIuLLeYcSrQu1rXIqF78YpztXWXJXxXOaym3nFJUh+bp8WLte1IQQH51FoEOtfw7R+incbJgHDzcVw+uZXAXg6XgduwsK3rByiD8KDoAk321/zTGDvZQMMcB7BcaoIcIk4JbtsI9anNc4udTISjSc1OBS2DKiLk8UR+vcXnNFv5l4Rk23P0RfhCKCxIZe9+VUB/r22znT8pIi8dMdEDm9pHqdj6eo+dG8fXfNCYm8zYZocVvmNGJuJUH4hkzfad/eV8cJe9RIOvTaiIV/6LclKf7JZMI+9OyLFB+o/5ZKeKzFdiNzyyf6dzrt+Yp0gHvWs3CWTLJbswgKEaXyj5G9nWGmKtnr0/h4gODbXBniEPKUmu6wqV5oim7kkg3JbyGEQ/yqGSDkcO0Qx+xxkUSeDdl1n2lxnI6wKG6lZZeMJIIwUBxzNrWNrkW07zOYRsPfWexMleQE/+iT0we6r8ONj1mk3jOgrpUeYmWy/uENJHklTC3Pj2QW2NRWobZKGbX506RcTVOiIp7ZY30XCwkNUv5w8yiIloXhRnewIEqta5T6OviHCXFtybuJZGRR/2vOyndjN/4odjLut63SRHEGR4OKqCp3hq/n5I7JHZ98C/c80Ugo1LaxvN2jGu9fMxscsmFhoGPej3f4eOQ57WkmOhWEH2MUMwMMQUQw5R2BuR/+iIIIFhPrrp2d9236snuicqEUzEqKbti7ydCIoBcYJEr8rs+QzhPVeIZl8M5X4K+WSjq6G3qf4SlOovxR/yScsmebyZnCX3Jrjf+mtb35eFaS6/83SOnPj4Vf1/oZrcnOuJuv/DzmVFgrg2OpDHIqRwtXEMajQ17mUkpusZ6CssNNanaamXx+zFlxdrWom7fPKWwwSpDAZlRk6oLGCYtkJmNAqzEGiGWvH3JfTrzmT/9tWCnabqi+Z63fbfNOAu7Q2lm9EV8PgmCZ093eNcbopqXrUC7CjvXI6p4R9SEo9W+6TYkh7VZLa+64opj1X74tccfn1kG1LuFNIP2IBCiT213kDObaW8ODSru670qC90/HMgo9mtb6O63P4Kp8jXZp6VMVjn4VxmnrT64I4LBrS+Bwclq6dw0NbRiEQ64u9Y527ZjwsGK4y+Y7+K5n+sgiOEZ4emWmiHzPof++vmaA1ufT/fSzJnSnSlMrP+WfEEkVcwInQzUJek/FsJctMnoAbm4wsLvm58FAFqmVBfsOHlx8tXwW+R/+yEcTvtRPZugqbhDmbPT2iiOZzKetDo65EK/fed+4e+/r/EUNMKh1Q/a16TtAZeNffjjplXmiSrELIjT3EUfZf52g31Bwp/2kA7DX4xPvy8/t8xliVNJBuuehUnqDlxwxQOfZvLFu2aSSvHmP3rC/cEXfK0W+0jIts3YJvqt9+/8rfOetSa+sjS3c1PGZXphvJhYA8G3TLe/LbPMlbXpt5yX+TNteernShZ5y3Eow+gb+5hHvz1A9czK7p7BojVlqnMxvj2FN+mskN1PrIXf+XI93Pn0wHsrQrF9E+lR2F+Y46QRv5BJoqUDXXnOJXtWuvKeo6zuWlm2f2NwOtEM97EfqLg20CTikEbj7YQc/1fJUaS48j44dKBRfjRrEOfs7ysO95gk4qO3PeeIix70dbybDL+mcnS7fNBGYs++hLla6TlxwJ/ZtHNIU7Nt4diAB+U+j6osQ8w82fsQ8u3a4P3xxCEe6k/rYylI0m7Fr4kCe+5/JbxtsTYbJOFswt7D/BqPSOJAH0LlRJdY9qHMTphMGdilm59xIAjs6vSdQfJSxLTvpnKrbbC0b6PoCLh+6jegIMPX3pllHiPhr36jcZqVBC9cCnQWMdhTcUbIQQbs+NoK+uFUAdVrbQM0CxPO8Bs/3LK8Tt9XQcfM2DTih+uCzKsZZotZLvbZpVHppu6ZPOEZeVOqjzvF5yRp3cddhnZR/NgIjmQT60aycDZ7/5OQotN9fBEqT2gx2FXTH0RKnK/LEAGphvClAyy94/lxi82LnQ7CEygVTBzh+A7s5zvhAA+gTyPWjFhYD/C+5M04RWhajcjJZYMuxI/+y+0zNcQdUdDy1aEG5OBNLDLcyAj9+b/ORXxyUC0TJOL8IcXqYkWA13CbQY+slWpdF3vZzMm8fEQsas1si0bhJUnuy9s8FPuWQSUV2puEF8pLQFo5+6a44z/2mdASfiZs2+m9RQ6hQR6PFvq/l5LlRLr2oGpxADDR1Rp55HDHIYMAhSomzEALC8ke9/qTaksy7xthReu1mKOpiCtvU2ORf7V1ElNboIjOIDUxk/51720Ka/jIOsm6rF9qzBpjE5ONSITmhN74P3yisof7R0c3P8xtLmOZSKRLpJZ+L5IckW3+4WO0ulhkFIykiQ1Fwo//UgCHrf0CHf7yLM8lNO79AMYKnAl55cHzCzge9AAXhbdpTQdyF0gFrpU+/KjeD8AvFrNwnINXzp/fAErFt/hB/M9B0uhXpLvJAZLWDFZYXOCk2paJ/Fp+PUNTum34ICVIRYJMKaDBmdT1VqUdmxmayvH+OLH8Roo4ckrI3Ftm1rqBWBk+SZCD2hwUZeqI8771C7Jkr17dtlJkmU8cct9+LYaEtsCxqKuZwCTAozJ/s5vipm8voIaiQrj8FLsnX8a+cYjN2Bo1fSkyhcqpSK7y59VFcQlH22lnFy6vuYhGWy1VUImbDWN08F9W/1NPssUP5oYbuclL2e/noPcBuhVQ888Q2HmzQ1YLCJZbzq/za4fdQw7nkDHuTwg0vnrIe4o8RXBLMLXOFrp8u0wERz8pJe5BGhEN2eHkZPXlOMjOMfR6JbuYxEfqoHm3ZjeK/pDGI3o86/NHFoFkgdzztBw4Z8JdVooks/l647+x9ttACr0cpjlqXW3xEE3Pu/NiuGkTT/jFqUn6qY039v1qzkHZrTEL5Phb3Xhs3J+CQtFclOXGCp/cnyEVkm43pkVGkrQoZuOzdkuCeI/30zb82JQbaD8KYEx4+BVbO4bKx/C5hk44d6L8Ihtg+W5+X/O7dsB52yr9lPESy09p43k+q0cHNugKTfxZ8SPr4VAhHU003vP2fG2RfDtW+QJgkpfVECxPIfEIJ1mHRsYVDVmBZ/Kjy48LqwVOTPq/uMpYUUOaiP+NUVIYvwVDeYNBTOt+ZMw1GF4t+LKjk7b1unzDk+a7iyMj+BNpn18C0HrmOmDpYoRcBNu997egUYYjR9H3yeLR2ActpoWRRjMW68/3c0nVVyjP5SRdftq5ebRPhCNYFk1bnbnevx00cIVNmxvnN4qfRPBoVbPJ/PF76bEnuWhh1xl1rRWMJbv6lvRHRq28wepc7EbgOMjymfrolzByTRigCFQmjoxT0TdeHuf8oPX2bYxTfKklEIVhtkzAvgrO4a8nAt+MsXHqTa3dwO2d9fcRCN3BNEu8fqXls+ijhdIBufBENxXuX/csOO3nmxqiBRpQ3mjoF62yiub81fFsQY5YanU2xNG9mat84sjtAN38fTnjh7xfnNj8U/8ThuYojxnjo/Y+ROwwqhx7CCUxN7mHh7XuDAM0Omx6jQq1qiTx3ggCYjq+1pmeGMdeQnUnkgl6AHzuTxk83thC4rHu+9IGykX7Fy6D5Da0hJJ2kx/CjSGZKMAezg44NaUQfokTq9b4UU1maSNc7xGL4zANvnqikb1IxhurWDrmiVfRAL9Si4UNqA8D/oB1QQV6fMHgX+l7TTkw/MB9ZknXvo4nZ0XWTV06fZpWGFlZc21y9G+hquxqcJ1rYiepOkPQjLMWP062Yz9Ho0htDy6b39d/k3cA/ZovN98gio29rmKtnnkBY79weT7vmKM9mB2/qdBnRB8UOKQdxJAu302+41WG+hkTB3WLS+Q3QtjDGxeW+MsMW8fgMVVK00jhlStzNRLlYa93UZIoCYhVjzJrGiPA9pJDqeEkTC7onEJMc305VHdC6ZvyNwoTcloceiv5ouNpEDiVPGQYIWq794YqHPrb/4VYX9R3sKYJVea1OB3k3Ef1TnoX+zh/UQF8DUL+VvPmNxDuK6l29Z1Rtfn+NKZOHD0QbBDkeUcoVy9EiJ1pMiODNJ7WhL2TXPxTr0gbh/2key4ORDPBHVSVJjlctgqCWQJRTMZ0pvz7cKS8DX067q5ezu+9yhKH+y0cJy+Xe0BBvUE5fnh6nvyUiyHMKjT/JlaDMSmb0htRB0U4PbqwpxKWbUcCvTonQfOmzuXdhNyN56CcDoGQpgw/CysUTpmVdcL7dGoHuJZ+kyWsO1FuunPbb4DlSZUomjjG2c9+kosWiJZfu/1n74PFf94woeXwboyplnadih/xDiwMoAdtM45Ofpjb/A4B5ICOIEH+lV1SyyCBls52pljxL/sigoV+mn09jdn8FBoXzCD9j4Mm4x+b5aaHuWxp1szUPF7X54Hhyg2J70bEVWsm6cinZLlPe323FIH1oqDI4SnmPfy8UYjl98UNmwSAdgn9aUV5frCCvtHsp/CG2dqo0+q/RZhJz0DmP9cruJLKpR8D/zU6y1n+HuevFQaZ3tWXVduG4XrkIOzNhrNj2W4KtZQErAENC4FDAwLhUXG5ATj5LXkLgwr2ntNjmxmusFIPf9iDaRrSAFUT3dUcO8FGNJvgvNo6vLGVSdgP9oEculV1pf3528LfKPILla6X/qgC/NV+ecLV08CqsDzN0mUHKMaNpLgk7q5pa676th2d9V/bkuX6cPJhM4OnKAOiSP3mEF4/ATyFul9QrbUCe8eENseJb3Xde5U/lAouhrcT4G8EbX8W0Keg82krROts8/1OL7zg0qr4QhJaTAqkb4mDkDf8WlYP7CX62yvND1/+ot4rlbFJZXipftiuM6FHflO8FgfdHl/bZXE+NiNn4eoOo7q3ByusajsJB6jCx58XGAA7ZgwKNSBmIMoldHNK4WOWgyjfZdXncIslIsA9/Tly0eiX+9fx9HmRZReoPMWG/ed1Zf4HfBAV4bk7f9sNs+GcUdVsZijaje4CpbqX/NvFSZHhXwu5KPUvTHbmjb+vAXmJOwyfeuRI6A1z5m3K2DOZGD7waO+hPJnU51y4dX1Fe3e9CDVxT41XzftAMToDiywZpq9CAEzrRjw413ypABN5Z8y4Qk1OQc3GtpVOAPnE8PAznLF3oKBZ9ennIayMoG6rgdLpcp6lLF1VC39KZYcCim/u6GUz2TLptdny1OutS7mPq+yPmvMzhfSZ4GkM7/N7Q4MIZ71xhowKWXGOcCDQ5geerrcEMZG8boD7fMIYzC5TXGhPiNfHPkpCOc8dVfy6nMR9wAfbj+yzyDib0Uphx8Kv42B2bPSBnsRchPjQccY/ejBRsbM66f4IryuC9UeR56c+fTGUm6AU97Uf5SOiYI7E7xv/+CxZ3Z+JIZJwMkZk1LPvgfnlj/lJnbyYwkEqrsZHQHBJIJlFUJCEyy/7Wctbsy784Ai1flD7NwfqjYoVi/W85tGi7ok/+xD4fYaHkw0Om25EhcvCkfiRq8XSd4AGTuFgJ9MWrWgpzrcbASsH2W9WB2WaGPzvtqjlqgwpQQWaIp1mPkqZbzN9wTgfKJgj+jGzh3ByzoenGWSu8/FlEwwbZL5XkcfhCIBg5ADkM58PIbHF043ENX2ihadcrtTjv0H126vELGbQJmYdpUO/25/irYXNXDOjl8wmV+OLrLCI6phDoy5HSnzrjmXtQzdz9iQwk6tlYmL46zJJzJp/6zQB+H8jtS89lO+HvvHX+86SjcOc5YjPDx7/OtkT42qYTvwNh5vqkcrc8Td/eMlb2MMSlVjpjm4TREHqkGATnWRpmm6E7vFgYqV/jkDP9fefbpmiCVHwwdI3hilVpqVCd2cV0OlgVpR/NgIXcIAz3jnHuTp2tC0LlYWGKWEF8ydw0jEJycaRFuYo1+FLZUqU/yiGE7085uOalUz4PCUKukRYLF/gVWIJWnrFwdc1xGJsEYXc+isU5GBhIewNdt/Tr/8268YYGCImNoU0t1DU2C7+X/Lea+d1bccSfpq67APlcKmcg62sm4asnGVl6el/Ta+9C+d01Q800IW6qYWNtdenz5ZlcpIcg5OTvANbvEokG9xX6aOafXm8Djmr+BqSN0lKVuGwjPVNUt0J8OqmKYyibafzZuqqOBCaWkvfB+kF32CMZ/fRprpstFB61UFamTQ20cLebOu0Gc403HrqrIbxQiBNLkzTzg/IGHAP7d6u+qwRz+Y+k4ev8YdTSeXgGhl/xW2FRxGqjz08i1DMLOtVM3Bq0VDSFwi016VjPLHgU03Qi8TVx1VFEAAq35N3VPYxleNWCXlz2IDUzZaktBHDW/w9wIlaplNH3ptNcBdiEHC/X/fvqK8B3xNw3Z/sIFUqv3H7QxAb1EYf2E8ATBExrBhr32FL46pUOcU2/WYWCN42GCpBNmlSfDN9MOMhO5nbkzBnEOeuoCdIayw0ctdRq1ZvLpft48j9EtXy+8zPyLEL7OvQQryaxZkKTM8NA/zyuqjTRtQ++vV9FB1wVAfWduvHxl3GH7djpcIXd4XfdEyld+4L45xhBNQ8ej0o/fpVkr3GhT9CkLSPXcv3Gb1/4db7WJtNDhJJ8B4PzPede0YHlFI53Cmv5pbrGhoCIhDPXGJHkA8X+ZEyODtH6Ji6jrVnNnEfTHYKGPZXQLbx5rOeExkrZMNNf08qSPsxhSPU52MEolmd33n0LdjXofBN3kEqnqOLm3oGJN6Vn+7qZmyIqTxSF0jKB5CQ/83GRoAL+iSVKa8UnTqJCdj6qd0bcrm/7kiZyvl1JXMNPp03J59MeokMX2Nlmu53tqN1aV996G+NgUE2EATEaXl9hbrU2oHP3VgFU4mGv96E8tvxM7zycSL69eJ1Sx5zJM1549XlFCeYuIyYLo29B2qM37U+GEhbuqvixQGRwclyuZ0sIJ62cDyQAFV+xdgKe6MFP23qqZBf4XozRVTlaxrWaaFSnRRs5E7iwSNbb8Moe+DthyJKomBz5Uo0z8OCNC6+RZ309n8Nw+PPUru7pus3wV745cUDyOfInf15vAuxZEZPWSMQ0sun+Fup0VmR8OGuKrzjWk/VNfiOmZWOuGfxHs69AaSmuuHMKO+TIg45bb9+XQ+jONiwotM0b6U145IP5lHDQgOZSaXmv9XSQ/Q6IqAgenzICaugwHJ4kTnQmvBd1CYfl9TBRVjEciGkOPrt32Z8YxTzbt4HSDggfDnKU4CGiujZX+0lSB+2LYVWAjMdxOThYyvm7F0VSqzgaA5xbi+1W73+YDj/uAw7zmkL7I77QcujX2gOxh7Ko152vkwQN5GWcGYyF6xHlbJtOWH9OwKzVk4/vAsewNJVzXNZTR5IR1ruMqHwKT4GqLdvqcn8Ghy8cAtCASX1iohgmp1SJxMby1K21UbFBVFGKKHcSK+XCzgpyAyTK63Wu/0mDs2Mfc+QmvUim19mzfGrdozV/az1zLAJuDXb7NuzDE4Lc+Dlr9zHvMHqXoYeKhFVQiLobMt+Db7PDvNzIuAgmvmaIrloruqhKo2g3Tt1QEsa87v9gJ88z7G0mRSZVD6/NoS/AspJw6UPRYFsgONZ1gfEHl4B8g0frpPhrQil7zdB3kojXxWFg54WrOMHg7rHeUDPhI9GwsVeQ/trquB5YVPht1qatw/NxvFFARU/51q/sw1T9uII37dlwn2Pxual1QFL1s5jGuncLZDBJx9CaDkGBI8U9m5K2yxc8YyEUywJt7eyGd8LHwCUtjzsjE70loieW/OrgAosKG/N7+YiUb9mM1osi+f7XYao3QHNnx6TvTpVRhVUVnFxMhpHoxtZmDmtXN+KzSLYBAPIzAmKsLkCZUVyqtwX8TVDU3HYGyTPsN1CbQmfAzvEh8d4X1SHGZWEEZglbwaVKPrKCm25hIslpFCd2K+2nV4Q0jj6ogPGxsGyzTTFhDkoqt5S2dvlpMxdw9cNbVQiNi6PET6wIRWbLBHf8MgIx0GJNNxh5MIFNwGd2yxHBbPt9rFhv5TCj1bpGWtvq2ln4g4tYNvKc9IGYx4kBSA12N95G003BdvFkMyvM0Td4eSx2gIoG7nd4rV+qUvHjsIm0gQMDfgd1X3MU1e3ESQBRO093r9pyJrmY5heqBOlaNUkIpH6hK/gDzPko33SjSUhtG5MH45Nz7yJIaBKXuS6TFfqzL+P17v8sZdHdQ9Z6yVvVcuw7hi4u+dLy8oEikKB2uXO6MgMx+ldGotkP84zx/boSt+DZjO8/nuoB2+iBlizHUqY2zEp3K/BU3dKtjz2d5kFRqNj7o40bWlpAYJ4acQVKmcn77gNdxcQwCWpoiU1ukPbX/I4LEqCAu+IJfDZLZL+sFfzwesSRe0HXGzUamR+Q8+DoK4wBfIKhLhHDLwjQ+IWE0GUbG9lWLfbSgHSS/bLeT/6Xo8DkVOwg4fLkHHvHAhgj0kAc3vCFz6Rm9m61rcogbuVEnJtPmnsgxWpIlxIm5jUUJ6FChwAozr5OxkH8n4ciVNobtZgK+nMAc1zYMTG1Bd6yIR9BvZvH9Kv6W8omzRiXjransXWhdc4H0Z5orKHJ1Rop127qDFJJedO2Hvasf5vcs6Wp+anqK7dwt4Pfe5N6vV5f7kDQzfAeh/XDzLyoJxDWNL9U90GqrKgiB3MRxeLst2W+D5HhMlA4YBIX2qhkGQOXoEuowFTBA8WmknD9imP1n4gM6q8e/PI0bDmzCyxtuWOez4KMaEkOvJzu6g0WIrEPM7tamLOcD/n8kJ3XmJkyW0+HgaWvyzxMVm5KOC0SerW4te1l+ClDlx8dJS4hYfJfAge6F/MP2bnkA2VHdVEahekeHdH3zo0HxZ1W0lvxcfefDJ0JTo81DYnJJ+YrVRcwRAb90uPKWLVRpIJBbtCVunJ9wKYYS3GS/0rnAKwDH7hvzOgrIE5lrCX9GWwNJ2ewrab1gBavFh+jF9QsqOydIU29gR3Vjio5foufepwDIirGBLyyHCQWUKbm7Qb4W305yMW/rCz2SboryzpxuNjhLThc4l5fdiXy4KEZ0ySnpMRMdSl6od55FAEpYIwxkE8bHT+KcoqMgIEovtuT24LgO4LvM4P+7fCTQ/wrOvqe419oXyrd9AHgCs7XobX/TsJWg9C1Mm0Uc885ea7u4WP4D1S5L1gMYNYZJbdyA0mq+yaeqvXeVcKBtges5QJHTNzYGknOD9NRWDTbnchkGzajYI693PetxwPjRfe4cJFhCKcJbve0Ru2LS9RlG7ywr8diqLE7HrUH08EApp7rpOG2jb675qXN7dkUF4nBqWNhyZ3UHyPplMxZUj8EvtMvnRQXKDW3wwxQMqpXULn2B+JGMv1a0v793/ir5NvYn82DgzQFev1bcPBouDQ3ekYd7HL8c6dAxyRMF0U1ZaEfdsWuev3F3hD/CQNXXy/jj3/QOcLmkOfuqPl/JUP7vm0T5/yRl1LaKrf3VBMAR6jIgS12ktLioIPfxNkyOWpUdNlhT0YEcLi3ndq39aquVRS/Ox6nS5Ot45Kl3mY7Th4XHziZT+ws0rVJyZeBPznJDdbKxFeQj9bRv/JTrxS9trdTrXgvQS4ZrRSAupkMIKLfALzt/yUZoYlfPakQzbwCLezUl980AYguOKjbuV18ydGFWZWMKUPwEMyijkzxuWKb92NA21NoIcfeIiuG1a5+eRUeCNhHxxi+zzGQJF1wKA1RbBtPtDwizJJLDucAZsRJb0ZRpMvi41iVrzaFLQhpJiNZ4KjKosART5/lMXjf+sE7H2dKiOS6vn54NFkd9R3htQ04TaQfqqt8cYhY7akRkkau2nYrjBQhJMBHiwzSB3TzPi6AlpWlv+Qv13lCmk9TWfZRa9eOStg3QmqbGxnBZ6xSU2phjWPNnpqESxECVocSNW8Pot1FrdbMYKlRKU9n6AHsJjIZG6efhMhObWUH0O4H+XP3srwuc/BWGiNQFBlAGt1mRzNG0VL7bFbfLK4A9oa/X3bd1zSv3j9YFxwzjDA7+h7mhdCljPYNk3h9IPVbxLVFY5nNy7j0qSoyHIt58jOAv2RPeVhvdjg6qITWuRDGgMQBkYnwc3Xv8lzBsT7fxSWwLfsp5ZHThkFfuXqm0hzCxXTjYU3Ucbi2nxZBNygRvjWJZ3UJ/z9IYonpqpbOKHlIfMHGxCfwSxKgKNjBYC7u7PQPZoxD7gAClEeM01V8JHG597ox700v37ORMjIajdMvluFIVf5LmL3xy8u8rA6K5vplHTJM6oEVsWwMTrPzptPrgtToSXmfK18ZuOYF2KhU3LIHxDcDy//HjAfQ3guy6Vrp/xXRW8FDddg2Sese6xgm4pmTUAFO1uuyYI+BybDh1T1aJUmwkyFUYmutHjqNTJTPUTG8uwAh0uk+aAKypkexNsOIAv7jehqM47j/Pb0kGMoYlcrYNMrW4GgKC9Zs9PKWtq3bBoTsNQbEn8Nsd2c88wC6FnCzpoGGjq1cNl34/tgS+CyOxQxMZeSYnRvaaWUsr6d8IZFncL01iz99UhXUEc6SYhZ4l7LqtdtWWSy11bA6HZxCwlkpap0cj17kpx+jujDaH5+XmIswfWHdCg2AzGihZaTx2j6u4/XXB3D19Q4qV1MgNTjZNj88FmbP7F0I4oxi762bdZx76HQcvMcX4IezO7gUVlHE/MuzTQMD5L/SzKjcoAlPnp7KTmLQLXWF5KG1EB7yfieqvVyCJAYDOkaHD6LkDbpwFGgpCjtKUDaGTyvCzAa5gBjoL5hTvG5eaBNAFaQN0fGuyl/j7fCJuk/EU+iio2hqJTnfA7JnLUu8KUBZQ0MmQTThmamPzp3/uCRMrzC+TeQxrRqIlsGQtw4Sq+b/mFs2Ahz4xC+0TTpvXF9GPGFC74Pi0HAQ5ptS05zbz3Lfz570LP+LViCkp1uyHggrkzVuI8uq2efsDCUGSAaKKoC17+tk8U0nEv18yD0GGCqqxllw1cU720jXCBCe/ZRog9AZDeGb0IeQhhd/ZIEJ5OZaFYkVbbeqvVEBNr/6FU2bTmruTm+4MH82hQuTD22xGZm7oTeYyI3RbEGGHe3/UF6v7Y0a0qGcop5NnpgTepO+UMCKqW3C9kIMPeCAWyQp3vtqIeldVHRlLpBYMeXNtT7Lk9ivLDfYWjXpFaBGa9iYYiiZ0wcrY5fw7EPICYVc3f3FJhKV/Qs5IMtYrafQqz33Ob9p3UtSHisWDuHtJNsJOpQbrWa44CZuRhTp4TEXilNrOHyIH3ctcYV1ytzM1g+6E4s2PgkQbnhZcWAG+IJmmHGs9Y6+J2+hOzHMeRjXa+XW3AP8TbthJ2Czg2LbYwAjQIeXWgxGOBAzSbdWv1tdqvVn7Dz52+CpG2ZP9E3WhDZHOYismf9OOs0nRPQnA9/v5CN7IvBSfxw7YlFI6InxgWIAu/IlzssWwG915uH3MwCr+e6gZkh4FdWOxbeuQLZPe/zYzpYp3J/5QTZSE7MIuAbDoYmaHkPvnjcFbBucyPri73RYqTjtaEjCPAMasDNTa8fk1sq6Xyf6ZWRGKlNgG90ooVK44D6pQ4WwR5VZ6pxNdXheRhNN/eozcyMY4kQBF3JYkL+dPQWfU+uemLfEB41w49b7uFw+3UapmPY70XMkaYO0x/vy8aB54BiGzGqUoS/wQmK8tII+lzBaT9RN2nPqF7pOTJywLgIuSJApo0KexzdQQAcA8BHTdb++RQzgKOfHcSTdUW/rYsCxJVpv/bur9FWyyfg6LARsdteyewSw31V+tCOuhV4z/QBaqd1LRwtlKtwfze/WlMe9W+Mm+jWx29U4KvaUO0SDlRCjNcS2OYDRJDTA5bHlvgdXnfzmNF5iDTZu5IbN+45B/khozf9hUdnF88p/26+jzksKrH3r50yCH56m2YN+WdpiyihqjlVDfjrpNBz43u6jdHIoAfocK3s37Go7+z9gaUPa2jxozTkjobOSFxGJdfErgfcPBSJtjmB06v5wnHZGxt9VpURT8rt+uip8e6xyPj648P+Pwjp7+JBqwtu0Hzu6PzobXH5oO8hdGYwWlhs5h4GS7NoppEaW2NUZUwFmuoYGattJWXLKKGY10KBBNwGY/PA/xqTAU86b0BBxV+bySR+39+GN0xi8c8XjAcIHYydVJB/7ELUCujPK1ma+3ftAE3o2HiZx1wMHd4W5PkruauKYjfB3d9/3rw7QyaD9OeEIV9Ug03LDqTcFMZg7Z0mnYf+JxIXGWw8xiV5995hjywOX0yvKaIo+TcCqB2ZzWc2ahCv5BP091ydUhFK2+jo0mZO3myo08M7v670wKQEHl/SdGvGuwP8vwQnR6L7dtdjrDf7c6ICxtW2VI32mNPe527Nta+M+BXnH5ziWRFWv2+X0ozrCUKpcjDN9iW22RTpwx46z7QvnMGTwDCpNmqIDwFl3cSw7phUwH409UqbtyyDSoKqbqdqSrjakFq8fKVUmus2jxM9dK7CVI5Kw3oSBRKt24e2HpMC7pi+xCyYMYbhGMbx3qxv/aUqtluGhrTtsrWknZMiz2Z89lfbKVxgycv9Nn9Jvy4B8n8svJgjloo+J660ZFvOWll+pTH3eQ/T5QfuRVD0Lg3K0tuzxrMQgLsBLmD6PvPreVaA2KSviK2DlliqLXMliS51VlEzfbtFwf3cPqHiFTTEaH8lc1ecb2zOUZNQ1E9YMVldpXL1Em2zw5R34TDVTbUkORN3rmWkpW7o1aEctnUAx5VYgLtgTRXOdb/l3wnvP2kxsPI+RaFOr4LwMhzygTZ/BXjARdD3xfWGIeNRQ5vEb2zHZ1JxVKGwtCjapij+LNUMG80dKgUK9ARFvVLlKc03Hluyb5EkyhMxu7v01jb5zLQ9uCrVR6n8xHJMspnxASUmtoxTJvzsrZH/3HNMS8M2pU9Z09i8O2+MmUhMszbv1whDM8HTUG4G1j4Nk7ipdhbUvdZ7Lm2pqb2LfpBAPP0yfS14zMUofPfDPPTm2uFvZwIPvlF3A72tgTzOP67GhzvauYe5LQ8rJphq+p0m+eDvnSRJ/L0NOAxkQwKy1MX8dL3fE0JMjfSGIABfNXBXTZ5KIdvx22N07wB4lzje58unlAXWJbB8DgU+136m2aN066BatCpsxb0pRKvS7GMcX1RhE9NBpehxcG/z8dzf8GG8fDVqF5J4YlVqnP4VhCNZyYCYu9WRrrnJYiYU+7w0OoLB1k7BMFu836+8qHvgSWfjWnGhlend7fGaNEX8hxADux/oqOMqt8/Pg/0SY1vVqGFhnxV8+exDeU3nezE8xM439QWHYgKrc7oqW0y7BXkun9NemU2peCYOeFWBvXNa1BnzRRy//iAU5eW045pPcLvxV2pfb45wsnNZ2xTevogAUbaAuvCHwX/dRu5yP+p3FiUqQCsYxoZHedgu15GoEYn0UkAL7jidW1Ffanssxn9F+6+ywhIe4MEwbsieylFg5atwXlljquJsViu8VTj2sviEOfjZnNQFOwxx9LL4hTQ3RqBagAqKB5TbfygAOyn8NyWqpDH7N2Z43uaevvMuf8Jjc31/E6WHphSzIUXXWytZVXphcg5cOAYNSv0RMNqFiehx2McJpVanf3uZwdDVnaFXYv8aBYH7+nuoty++77uwujM6/ag0yISIWoaCMqpQjuK+leD2ZaKS897DNN27E4n4EDh2C2LVXx/eeoLesndXiivG9FXsXLKjtjagJlMG3D1czQIzFh5eKzWWSJ0RkvfiSdjpD9l031xyXTZOryW//0pgAeZCgShomf6V8D/P8T0mVgo4JqKx9vE+FESBU3WPsJahF7hfcRWSHZBuRwXeH/r3Q5NSmzyIF1eQeqhaaT+ce3cXGW3I4hYlGM8N2sFFFPcZbC5SXe4rvktZrU2N8QGN07B5sAnTA9xb9+f6gN2F/dNEettTs9z/esSWbBpBLmQGb7MPwErbseNlRtIUM20+ImFKCel+gAAd87naonu/UAf+OHeRskBJuB5Y3CVaKeVewRu9OFmrLOPUc/1YkSG+s/2Y9M6Ll88vtCsjHsAppUKAigXAgRviDXuU+o6bU8Y6MhBHM+rC5QsE08IzIUTKRHEi2h0sj5nlu0b97lDUJH1be4nrYdT4RJFR8CvL/NA4YSbJrZHFYwA8qkWctMwcbZr45TnQRd9EEIfo9Rw8HQX7HvxshKQC/NZ4vzBdw4nD7B6KYVRN82IJXHhptCMtUYwyuw5oPWfJbxWntdW8pZrhLmxcoEM9IrI7loQZ32yNRV/e23mQyrXe9lWgshFd7E1FI8vfI62/lk2pFvcBxB/jk3OpYJPEu7s2Rzs2qNTCjqlAIVhudDCKLpzvHlNYqA5Y7AKdy7fzm2Fku3G4/EaL0nkuO+3YhZeHYnw5NR5KN5fAJ/Wv1Ms2sncZrc6RdaQspcwN9gRhHlNABGu6Te5+ebjGVuU9AulM8wGTv1ZWuXS4rwe0bSHFamhl/cGRFF5vaB6Q59dDPZfqbk3cc3lTHOC3l6lJHk/g1vqOpttZHti4ue3egvvh1QBndNPGmIqTdcuKUvv+dZRXNUhIU8ONH/xRzJQEHFHSFJX1cCFfSiXLGBbuoc2GpQ9dl/NAhb2yae5bO/sInV/0bx4x/8YSPnFdwD4uMGlc/A1WfJNasncleAltgVpV9n0uskAs6kOtn58aSbju5c6+enkKyrlTFBxpF/QAp67RxWPingCJ1tUYSBnVMeD9CP3HnqfB+pT2KsptdwIgqvJQjsgDDP/JjFqrXjKf5sz9DtUKQw30kV+b0VoQfnT335yJv0Lm1g8lhiwhvt60WZlRJleSD4XSgkpf2uFj4EUp6GQd3aiVanZAnl3CqDmcs5qXDNhCZtM9mrU1Cd++oUzFcYrFAqn1bZVlIC9NCELBLhXiwjo6r3j4DZYUpPk1leBEmBjamzT7BtuurzTETKAtA3P17frkhsNhgqjEYZ69XSTJ2mtC2r72DE4WE6y3t8LV+L/mNxE8TAxF7j3flehyqinBx9SUIFPLnfySf+DGDiUFpQ4wI1E4B6rj4qf2VkoftuESYREGaV0NQM5P00AMwF5EuXgRD9LkpyF8q4WpgghW2KCgXr8ZSq9Xef/qQwjAUMPMiLniZrxv8MBbtfZcQeg+MPvWUTexvSCS2jZ2EIcs0IYbj6CyMWeilAqRRwzX3irHQBIqnKtBf4Hgo1l/m0Q7fYevMeeXC8mS5FK3YQoGp/crf36/R1csvmlclhzgYpR/FgVrDYHSZOwaLCI4+9ovv466kBtl9A1/Su+Nf1WqIX46qhlw96i/Dm7gNca6xBybyZyeEyESgfiRrJlMCqdBpC+OyJR2gy8omzUNWcAKJuCY1aLE0TZ/Bz7hUiEZHDRqgT0tAX0iKH53f06QSe8vVtD7nlDXa0CQS0cdJfb79xZoCgoJAbZ6bLfSVmR9l3xOzCiNihzzc8vb72Rs/+q9u3F078bE0QoI2oD9RTO7WppPEhNHao3cvHVeTaSHm1c33yNzdElM81A5Xu91Uu1jMS35q1av0NAzbozkFmvpzWMzdR+hXwZW21hA3dRbdzU1NZaYHymEbz+1G30OhdJZ+3zR9XxDDXRbZcfpoY1Mpuzw52x+W0QNR1tSZ5TOx5R3LbwytsYhEW3MyRyPg+lrvk4a0L5v03xpCN5NCA4tCEG1Bz1xyv0JnCqP5V9dLDsXlNFYjzOzT6HA8/KX1kqCzAWHUEyycB+M4NkCOFHY4e8DQvfAbmOxeVusXOzKJOG/ZmMn5dLqQygNBg8+4hde4EqBPrInlqE8MMd8M9WfI2Cmj8cJQCRJzms0s5DYtQeW+9HPBvMWjIAOAzi1TwppbF85LEPysvIdkYUA0AHStfzMVtszJyiIb644VLV3Ybx5lssw/g7SXUapvSBoCWomHa8XvBibyTqQjlLRFaNxnSC0/RavgQoaUPmi9mCLWy44ThmNFZWj7nGOMWpcrQHThYKp/bAcriXUZTI699x8YFUD/Xn+jIyRsWC1zJZBsTQuKe7XpUVUhgBQj0xZPwOmupCXnx9+efzKwyGm0O95LHufKPMg7MYK1y+aheZtUdcZq3WFBQI9WOn1hODHbBaS/FwVfgXXyRqgZ4rYfPOws+5DqECnMzErKrOP2y+WCJBv93MmWrhrg0bT3K+U8e2x45V4lb1+wQATBfsY8ECGWIrcLfpSUS1i4GI7Cg3syYpFebgE1fltBvwW2jn5EenDkBmOLZcMsLaMORvuc8TfFzdCXET7zHh0XMSLm23nzCfsxut7KKvhPLFkx3WZKk+0pItT6wqyf1jB1YmuZ8An7tg4lfC69pZZDm/ep2k51ERswjH0lmTJZXdCqsH4c1OhhVEGDPwgL4V2LF21X5lLcI+TAmWgMc85tV1/TtVi12qNwL4tDbxqA/56A8+G3UGKftojY6G4m/eq2sR1M+pMSd4If8lkeAfFGi5g3X9lnVKTGH1R1YlCxk3/+mel1W8vUm5Hqygb2KZ/DF394nidWJE3t2vcgbSaDMl8imd7AZa5HKZ6Ne9UNm768ZmGXxKX5sgKlNUByxP5Y6UqvTd0kOoqH4u6hCj/JXholSlSY62SVB8fIvpBo4SvA2iDDjGQYALk3CS8cZx+rtSyUZM7/FyAS53skjaVWt8ji/XFDdagv38R74KCE38ziVGN5C0Dqw+G6EUdpfXuBsUUH6eRM4FAxB4/5JFvjKAo2RUV3uaPAn+9UyE3pVou4c9ZQK+bH4RdsFoYrWTpyBWqrUAAfB8xquKF8jwFdkTOpqPyZ9g/+6uAas2SZhqxR25WqPdombfrUS07Ih6McT5w7iCtn3sPwH3WJqF1G6dvkTpgcTZZt/jEa+PRC62hdJo009F962vHqSZb6udWlBCG1ee1FPXvhPzubbJ5pcRNZomMFpbbiFRud/ub52u9Ntb2oR0P79mEvmFysnmAV2Pa90cOX15sB8ku7xiAbTX5yKLpmXcpJbS1NT6pv/k9GJGcZ8Z0UL+Gu+H3XesPXbPegh4d/G8GvQFM75ML+sXtiTVA6eNpL5R/H1f4RdAXEbJTYnPQJzZ6Dzs7Un0NwkaT3zBEoPTDHg+on9mwKdwDkYazidwwoOEt2YWX3DI9P4a+Q4P159mPMbfWITfkmgedLX1jnL5PFfMZFV7CBUQZPX0T2hgeFJMXW0dbni/wl8G4Z6t0HGMGLzygNaP6Vp1OmF0my0EmXkuUwQBshHXs5U1Q2IMBDumLvREnRax61kF4lCdvkVgqWXGfBpS7Rpc/KNrSt7Yk8xfrbL4ykqP64gbNbl26Vd+vysJG1uZDNRFR8EXPhZUk3WiOtpS75XPM8PoHXAW1KW9/CK+4idr2JixdH/3KrU4iOarvnrHpINuDLdnl50rE2ivbjL1HsRhf77zf6CKKtfz2tW0k7fnjA8wWJFkPfz8m6d5mN56BKQ1wFIsI4C7Dq5SXeuw4LfKlEe3gmssP5AbYcPXmLX4s91mEPchXsdsnyCdkeR/eY/Koi//anQonJS+adghKGHNhtgogeaiwMuZZPFn9yD81yW+PS38zmHsfVmiOXmVE2oMe9OJKrIfwKQdvA0jwMOO3Jipip+z3mpnTqvynHs1Ps3EPLQujKjF0TZ9cQ3I1V5R8vE7wl49R3buYPpskh1ZIgSyomD1oi9+xyCUpcYfxLUQb65WHeyWDTVk2ws4qs7XG2EiKqMDGBdDCa8OSHY2Kg5JTXENEDXDWi0bueNrjbx83q0pGSUPBbeDUW3xxGg6FysblgMnZ9DuGXHXQvhbeEz5y4sSgtjCyGtg9T7lI4gaBLJ7jT5/keCAsWl1h5CJfPuieoPXNGD5Bjd+EuAtNMV63IvsqG920qz37NGV9nfJAfNwKvpZBOgSXq5ABGsetmXFBgFqQCtBeXlGuMBCJ4p74EJ37a++zDdmMOxN9Edy9l3JvYcSQSakOsji5yyCfuJEWE1GHpDFO0kFysEbHpajwUSpDQdmJeABLVp5XyirAETJQEgOavb1BJJn5h/8XCEEcmYnFXjh8C3fTimro5EKsYVkXkgGzWKuo50u5CWYYmW/3R2/TsI9han+esHsHHup8eDQMPOXzlm+bmBCvr1A50d8TBJ1OTrkvYRVd/JTR2RocZfCnvZ+dgof19kqkgv9kqglr7buAyZMvQH6gbpSzR7C1unnGclHYyhZ2JWe1RB/fnV88+RBGxsp1nr1TqkI/8/7BhPh3ZHMHfRfm4lK5PuYWjmDZl4kPOXD/xwGShEpu2+9P4d1R9I0ebDl1aG3e3zbhz2e1gkWIw+jVrPVlEfpYbVhbObnNx9F4Y8MFh08c5Jw5xMB2QXLymtGFQTAfu/DQMx0i0foJRujEyw5a7hRg1epKqKnWrl8atnHfVuzPr9nxr2eu0ZXtLhkNaA4W7u1GmuBwCUFLBupGG+L8zNH9bL2bbZvzK51UB4m/6sF+57NDI32IflNk97o12KIGmDLbO8U8fCbwjQ0d29cQ0+CXEIRplTJyBi4rIk/4yWltlUs8YBlFWdxc/zA20rGU4wQ1XWZatiDPwqoglQ2v2rVrI70Y59IYREiP5u4W2kwUkrj5AgjYL2jEHGqZBhsqcMpZLHBkit0910dwpEnx12YRBK3APa/PUHTbvh62xkZbOUhnxMCxfhJX7+E3pj1fklZaNHDMXSDcUxwcXYdi+7Lxni259zw7GGeDvOYbzXG9pHEF7FQ1GCI5xZGQUG4y2Hk+ZhRqvMKSr9b3HzsMxlbo7VbdzhecmNk45EjoGLfKbMYGB/3H2vCjCB/fsAxeAykliXXk2jd+iYjwW9/yhL1hhAFQ3BeAZtKL98ck1kFmeVvvZUjQJad/rd3u2P75N4OGo8w3oYW8OdLpNllDe6ocUS1WsS5ejhNrZIlBkxiknjMBISKM177oI7Ry48ckqNaWlAUNLmdmW7VjCExc0smWBR/JjPX5BS2IxXq2JgziYRTauvZrNC9xb5Rg7U97PlnaEgSQWYC+jq0Cu8cJ2Z/SJDpgiktfOI7mLNdRHx0uvrfHenW8PvZtxUx72UeojOUYsMObqHjmk1ldVtnZzQg4dgIUCPZX2DBVQX9Ltgpq64a0avNrSyLPeDKRB9AnM9sURSrfWg/CRPuqxYeqlDoFzxRYWMDgRplxq6AksF0M530FohvzGrkPis6tOCnF34HtbYqp206Fww74EgobxI7kjpd6aTLiLZVb6y6giF3zMRxeAkNXv5xyMKbOrySqzy3jxURsTXdrQZ/kaV6f6C6FvPrWkUmy3FnyypfFaM+KaFmbr/Os61fNadZLC1rOqKcOGIid5Aqj5+qjObGD26Lpuk583Z5dkGsLur8+XyYP0ZWxAQFDHqfW4MRVnlFRw0UfBl+8tlH4YmnxykMGecMKhim3zvMkpYSyc5qAWIqGeYoSEHCoMm73sY907+vWl8xfKwUXpHtdkHpyGy+3S5PXSPJZeQy5zybONeKsALAqzmn4nRFuP424QxJ9iKeUSgP6a68QTBpTrkfcXZLDlDDxrt8fm3QhKVd+THKoBKalEhaB5vvIpJeX0w+BSb6Jo0M+a3dVnM65qGUH53+F3+iVfwP1H8CvEZSsIuBbzNc3LQVH/K7flxfq9mNdtRw6YKXvauMV9zxed2qun+vBkZ943BDynWXJEL/fdoHl2npYi6t/8Jp4oKnPpKxq7y7bfz62nVmGPEjdgBNS4sAqqpfHYxdo2xD89zGD5cODAxb3xnwIp6GvT7/cCWwh2ETe2gzbsarB6SSoPc6bq8llfudIru+TY/bb/4tOTjc1nm0fFo/bREGVYDeMyIXD0/HsfCXzMBBs/MMYS5V5v2a+CUxIWAKSkvLDvzO0ggOAUQHCIESU/9F+Prdr+wo/BgmJIXFLNU5yoWOxT8h6IO9dHkqK2g26uI/dqs6XtN7m4YnNg65tyN6DHbtaCMcGQtLcpUcKQIGwN7eWXXO6IVQ1hiCLzCC9PlCmnM6R30eI2+fkaTARdcOi27FhMJGlTw/bz/B+Cvxf8c6yWnaOLa0VQkdeaKod4x6pxb9taTh6zeeCLuQqfeVX8kXvMFXc1CDwFDdxmKcA/mJCFHiYhLavj3P4p8AqlmZzxTSxhONcr3RLDbc0bP/TfR2WzzMthZLTROg1DsVdlHZg3+FhYPBK34fvhD27Vaguzf79pYa2hgcVkjZsxd43ZeJ3xuBFOj4kCiAe6/T/MzknO8aj3LYVK9dfKfAES9h6oCw77rV0HUaeVJp+aqJL5en8WqbLX/AYWyBwPFwUump6e/76nglTf3hW/CE6JGK/hH62XUFFxQtLt2k2sb62U2VhXyrdKHtNAEoTVWWUgL56LEJVowQCE8jlV7mWGJyN8G37hfb7i8DJbg7ai3djdSoWHww1AWDd7SYCbLslmd3L3dsh8lHZ21ZJQz7OMl7Bks0Lc58cZF/4UzbBnmDCkZjPpJVaeRKLAAQkKkMrG6gG8n45EXz/yDKm/CEwXJOaoLyUFopq37TXdTdfIeOLoteCgswWF+w+iAfV3nHnscMaJ0irGCxGoCB39ECtr/CrfCYtzXPwWYprMTRX6mAU5vAXZnvDe+1IuFmX1Nboxnh0jW9V3Oi5ATK8ByMiJlOo0d2Zlq7u8PmTcFLgyyOn6UXsjlE7C5twWZX5eARqhb736RCrTVFN4qJ1C761txgaJmO9f+RcJCNgb9H6NTKrH6/a4Ru0B4VCBgFVLtCGFINodmBl5AAAJrcXPpE6m3qG6gRZeH/PeXy/IvfB3EmI4JAPAhV5YZn/mXbvk9jVwQdw6G4mWD/mFwMEGkWkdSvODj1I2R+iSAfV2CEd5wruoVBZ+nKDScLDW+nNmybHSHRPoCIHHpQ4gTp/k2vvWppC306kw2q7yIqje6+Db5DRerPZ+out0kTtT/QtH+aAIdrUNa1YPeJ/yNL1XcyCg23muzcIagocxAs9d/fvihyFM45j04BD5hXRhscsSMc0oAcBWx9o+K3F+okVWLS/1R3rtGhZ+Ds8WDWE0om/0vFdCypKcgMuLjBfSrZiZbCUv0C8YT7aTjjIO9kPS/Iym6OsPYjbpa9r/E29vmmwUZ6r+wsojwoehFEHoe5xGIZGMm3LxghvBDsM2fujAlNRfcp84M+u8rXxOrCvx6SzEOuki7Jl+nVl9zjjjg97faCqpU0UFBmNY7Ya4/gYdi0JLX4We3RSbx1j5+wUgJ11wx8f8J6sM16+3Vewe6b0YulXO3QPhvOOM8KGXES2/YVzz1GWIn+RlFrkdh17YW0EwTkO7aHCZOmvrOkasaLIYNE/pxnSZ2esqIO7LwLyFgFfQEwWRzLsP/iUk6cg5iKPTSL3/RBhfQgk5vlvscD8JB11MYyqafXrVuUVM7Hlt2ussBGiYvEJ/UOyhR1FMEjWPxVrVFQtlsu5XKD2WLvXd9t3AQOJdh57pejPRCfHlif3gIfQcutFXJMXKGeUIaUq1uCPrEn08bke7qwyqLfp8jIdt4e2+IizUPLZVC4gu8oYBTsSV/khucP3g9PepzCFhmmh8IW4d/vzXY3ieVnr2Tvea2Gp+LtUaRTmytd2OiV1oVSpSYND9/XAc7IZk2iEhLXDCMJxGO4aLor0y1xiwY5a4gfr6osD+22KUa3Uln2/vxLALSa+D+J1wiW6XTsCSQTnYM7OJj4HmaYvoUy4xwWs5pk0+Rm/b2t1XkmoWskUovO7jQOUtqA4Kn5DZTVtF031N89BqkT3Pbhj6Et7QCawgHwyG3gM+wvcnL5qiUeOV8hnFTMIIYjm8HbcuKX2D2qaY0evEK2DQXce8XwYfy8pOv22Pi3MdYZFTt25yGU2YZm8RZsWjVg41DCa3GFOtIxx3hyFJB8ynXooZBH8JG3OgLcAgkNOysWMQhX5MxUd1dGT6SwIZ5lfaIqz26TNXsewuapjKtimQ1qp03pj6o2p1di9QLPz34xrj4gdX3vPtWQz+i1VcXzy6BVSEU11+yZLOYfyDr+sVPZ8BurOdYK7uJ7MgbAZTBi+DZpkV2yKdekbhTkF6RAXfu5p8u6t4rBm2R6yhQojRFxJd/peBPolGgDoaid6dKWlTsjzCkRLoMRXSJQIfp0q1Q07SP7FbhJCMeMXHllOiK30HX351y0iTVD/auZXbUU3yUq0qnQ58UTZDSdbfYxHPgxWV07X3yZFxQXmxvE2JbFndVK6RxsPABjzKo5EVe6J7jMnZJB/E0tjNx7sCyw0OTq9JnsXUGc75w2Ob5ik7/xpsKoeUI/KKFGgJeV0KXjrnyVhLVBC1okWVY7orCNRqO5tpA9+Csrqjfe9sFuxA4PEJp40r0ID8Xo0OdSJoD7fmlJ76Y6gfKEo3GlbC3E/XpHtijNX3aBBzqYMEy/n4UOZ1KMf026/LaQ3C1rmLWcROjpCi3+NEMAxLb2nweioYcFLr1+k1bF4umIPSWZxAQapXc4JuY2IhP6QuatTwVht+DlNdHfgAehLBZ8wULCyMwqSzrLgew5J8T2dd/5x4inP+l43ao2WVDjuCDJxgvFxTOMTpsM90DUc1VwzTNZciO3AWrRoGN+S3pSzur3u1ydkEmWrXqYAOWEBuwUHZiZK43wrGsGVwsODRN+VWYzb42EcHVMUFec7cJG53WrVLh8h8dm2DYIQPprUDrwh9hJSX0oUOwVMeb+Mj/7pZzMlt4Cns0oFDha6SFORa9Lhrc2drt7U12/m1h//a04GknuKH0Xtlb/ED/X1ydYQ6sWsrmAGVa7aheurus1lOXelWF0AN4fDe1rWq0OdUL5QZ/qEio2hhDOv9AYhESHdEbHCMGFB629r93Zs91ij9s+Ig8gxZzSZhqXLOB1n+/RkMS8tleULff0p6d5B12Ur0p4NNTwOPOhuC4I8C4f67+TCzEhcTvduv7YSRrhrc5v2cPNODl4CvDSNztJDaxvwqj4voVYA33bNMN4a8QRshXbKATqaOowdU048XLyovfDTYuIcAv36iNO0fW2Ij9L73t3md4K/9+Rj06lskLuPFwyp/kGNBDDT/k99ooh4BMF0mv5VnELdS9vUn6BHnaB/NOue8COSJDMXSP/VlpYuFrHHXIx959d+trYE/qrMo35DC3l9qQix0rZjvGWXOkRbb2YpnWwG/uo32VT6TWFVje12upO3cvSOJ3PLkUnqkk/S1ScDsrqp/xLxiNjst6/3M65ac9ZtONQ2zgiFn/mdhcSrhrS99SiDN5aHYL5PC6fHR8IpAB8uL/8KF74B+7q4ch/egBJzh61cVTSdJ8q/EzlNtcU67lnSuhSkjPkOrf1xkYOvOHRaAmKQmBhyiZLue102VWyXRT2/+Zt5zVwb0QqT+5idD84ny1vT1WuOkeOGaBS4Dr/IbOf2EyKsoMRw83rZrNcTLyelLricbJp7vV7+K1Ki1GsOp+9HwKDY8GFI7yl2gvYizpfqumZthOlVsJXKZDvUp1+tp5J4F/hR9S1ceo+iCMa9sg9tIuP31NlJC8Tbv4QUPfGDsz+feBX2mfReV1nH6qjl/fgi+6//6suS5G3dUS1TcpclOnnCsb4z/ypEZZmJJHydnSOkiHwi6C2ogheLCeXTNiBrDwuOZ2XBxNRkHiYtQWMeof2+tGjMDe338XtmPwhI0LxEqJqbO2M9iPD0almhQ/Y3THZS/Xx3eut+QwPt7ObNDEjWucLU3C2lasZJg7mz3ZWHfCGH2NnoaajJKQ5o1y7I7g4IVtIbE/meAPtx4u2xNq64HEVw7ZtYtdHu/yroBad6M2Kl6Oy84+eh0o360oAJOBvMsH5hNRj/0PI3ThHbDU3tko5D6FeyhJHuKf2apfoD3OHp97Vd5UFolnDwnetNjIX1TJ67tGv5YFaEcC3DVA/is9rm4jtdrek/jvf9spzpkN+eKHovf/M0hg9hbfkKn15pQApM+6pBYspE1DwLWufrsERxZUOYi+qLcVHmZGwcAjitri8ZhvFi9S2I3rggscI+lKM8drYOZIN9VtN4ZPUY8yd+Sp2lihA0Vi37aVoHadm9y0wYIX4HNxTvqyve5xBXm6z1ZRVIB2foQzL1SOmqSC2d5KHanYxnru44+LsyS/JdMU1ck5rQYVSOeSD+Odq7dU+2aywr86IxYk9NTWsLXmUG1eRjOtsSUCZwjL/q+WNqyUsNqFOk12So+DpPLy7dhentW45jKunrq0G4SmZM6TIQ27AqObY5ktHf4dA4I9ycO8U/tfLS4OMqYT6435fP5MnKRUZ0vhGt1zq3VuyHaZ/K91aIKzwQt8Sgs609FGa+hqu2SDCT0Pl5eUGA1URaBhkFOc34zhssPM3qhppAiDlWpcYnHvP7ZVhBOpH3E20b0fhDn3X8zUA3hLQFy2F9Af0yZn+f/CUiARWAM/mEwNHi+/jX2dC/zk+HbQr221gKZ/WynkehMhjwR+hEt3W2V89x/wbGbj1OENrzec3Pf0OfNyDwn0tTMufD+k+XUOHfUK4/pXzs83V+ViH01xv+F4z99Z7rzwUEhv78fNTZWv11C/qv11R5XVZ/3xdG/lxMlj8Xyn+/+W/D7feRwMmcXN51fz/B798IVGd/3kMgf33annRb/ufanwsg7P51YamSCfyz7pPy+T9brf1zv4fAoSz47nWadExXl8NzbR2nf7qqJ5+8s8elXusR/PYzruvYPy/owC/YJG3LedyGjBu7cf59Flr8/jwv+X0Ys0x5Cr4v9FxJ/v6hqM88+/slz8/Vuk6PDP6UXIlpNiD/qB/nUNRDls//SEfgebJkBZEGXAd52ewRQz09DwBoBPI7MJ2PU5f/YxrK/wqtIiT0L1ol/jOtgs/9D1r9++L/i1ZxEv6/0mo2Hn/JNkuWCsj098M/6bf7PzSYPgLI5/8bFf/nK+PvpdSf5ZxM1T/2PE/6f6DZP/b+f3f1sJ3/RVYFSk7/Sfwo9R/FjxH/UfoE9F8hfOR/tvBh8l9kj0H/nbJH/2fLnkD/VfbIf6fssf/Zsoehf3U6GPbfKXz8f7jwcfxfhU/8dwqf/A/CT7Z1/F/LI41HxgiUT1Xe53MC3jWMGRhT+3/o5vnm678qYs6X+k4+vxcAHU1jPay/p8TZf8N5AIiez3hek//1huQvuXd5sf7/q+MBPfVQuuAH/v8j782WHUWyrOGnabPvv8g05uGSGcQMQgw3ZQgQ8yBmePofPxGRlZERlZVVFdld1X3MIs4RQuD4ntbavl37Jwzcp5/jz6JEwGVefTe7n6+K/TnhGPtOOIa/IKZfSwf5IdKhv2MaRAPmKAUNan8lBuK99OA4kMZPn+fzgpXQZyP45f3rr/zz74/rXLPafTnW9DEQ+TNu4i75+FDz6VTx12f96vDHIL4c/bdUC/RPUgua+EotCIT4Ri3oP0krCIj4ZrKzNM/czy/7cS76vO/iRvjrUTZZxvXDaX5IAlCX77nQbC/n4Fd/h+CUn3HwqruGGXz9Mvx8gY8XVjaW17MAh/u1l4Y/v7Di+Xq3+ziCQODoNMfjzIzjh2dPmniayuTLYbFsfhlUl345qeu77NORz++DO1XZPB+fBQw06Dr01znQ+g/F+HS78aL7/mcJIb9oApi839eDa677ZUw+n4VTnw9e48yz+ctB8vsaM2bNpY3r13f4nvw/Pno9KOi6+MsJn23kr1e2wIG/KiLym+ABf47cf9WlT1f8q2b9MrQ/5oKob/nY0S/fKOB3CPYfZcr/Beg5KIpoSlDZllzs9i+XAvxCbD+KrMY6G/+ix91fLpa5X/8+2O7Xpk38INNGsa+R6Pc8Pop+a9vwD3H5NP2nG/dnC/5s3l/e+fe331+5IALFvnJCP6Mw+rcc0T9r5OR3jPwTSfufNnL0M18R/8b5NPJ7p//LPoH4Qov/DB39VZhBqN/I+IJD/zHB5odqHfZvoXUU9Lta94tT/P75P0Dt4D9P7f4h1/hrRwR/jYZ+Rgj8/6iSIv+ikv5x/PttfuB/Xg/+gYD0v1wP/ga8+hP0AP031AOM+I0/oLD/o3qA/3fpAUx9Q1F+VzE+T9HfFP5/Ny39nGZ8Ndn++YLfoaofd/9yAEV+Jn/9Q3054Rcu9DNFob/6AUq5/fXDQKlf10i/cLLP4/90118RNejj54+p6h/Vnb9JvKCfIfSzIv1jwOYbJEL8Zl2awL++wic9/fyh30E0CPTbC5FfX+iTFfzOhb6c2L9eUzZ/o9X/KBf/Nln7jaL/reT4b5PZ8TYhP0/Z5Q7L+fhL3KV/KdNLUNeLn9NyzJK5H4+/TNm4lsm3qvFZX/7rX2fbv9QM/FJD8Hkp5u8k0r7k3/4lrk39Acfxd9cO/m5Jwde2DP/XbxYwviMYNP35dRmA8GFLXxnqr23ym6KCX+U9f0J/UDIERr8mAQSBfCMfAv0Z+VZC6I+QEP37TPPvefK/hnEMJ34VyK/X5JfA/tuo/E9kIyDoF6z36eo09AsK+FHZCPp7IfaHpBy/cXs/IV/bJPbblaa/4T//GZdG/z6n+wcETFNfCxj/W2jtnxAwBn+F6uCfYeQ/WL7o1+LF4T9RvP8aEvsVDP8043+O/SII/Bv7Rf+T5fuluu63S5Z/hnx/n4L98+Z7IcEfJl6aQn9jvejfTCT+J4gX+439on+ifLEfJd9PNPjPcc8E/Rvz/ZspuP8A8dK/ke5vV5V+oHS/PNYP4xNJ3w7LnH3QiS6bt36syy7/+dKP6yCO/ok84ifia5yK498uyOOfH/fXKPVLxeO/loj6Azzij87jlj0/Kn1/ntrpW/BPETxGACxynZyW19z86j0MYbFfUiH/YmH31xEERr+dTPQ7pAzFf8Rk/gGl/I8r64b/bln31Cdl3PzUZmkZ//SpQl78+PZZZpqy+S8o/tOXVe8fIWAI+/sCxv8kAX+Bm/+rBIz9XQHHaTpm0/TTs+/rn4axX8vr1OmnsvspAWMBp+AfUs/buGx+pLSJ31oz9a2wv1PP8OXYvybs75Xz/9gKtueXA3ETj5fTBAHzlwq1X958XeK+nuVjdN+pafx+SduPGeglKfD57CPKP49fDfCX0V3B8pO6QG08zV+V3j3/Y+ru/ow6Oxj/Gg7R3zqqX1KIv1Ze7EcU2n0hUr8ujv19BfqO9vwiv2GZCiDgaxhlcn2CA7fqPgDj5fjnC6d+X+rcH9DOv13TeQHorwzie5Wc3zv2z6rmf1Sl6J+gsQj0m2ru7+whgb+X0cZ/iMZ+bxPJP+bFPqbw7zvbLwfAFH73uuCNnz6JCFwWxob928v+jnJ9uvA3ivR33eGPeFr45+ut9LoV9OuwMTXxmv3YkPH92yMft78Iw7OPx/R/ahTozx/3/BxT/0eGgH1MxNHFLfg2D+gDNU0XsCsBMIL613/nWPCfP4J48/opabL4yxCeU99kc/ZHB/K/2fOhv9lE9L2lvF82rXxVOAv9ENf3f5M3cuMxzXHDXTo5ftoRvH/aFRwPw0fXnj6/KIYed9cdxh+2Rxj7eqMk8Z0tS3/eHmEC+dtR7l8LR78Dfj7M+9d2/b1A9L2Y9R1H9IMH2Xd/iNn8jw34J+DE+48RzR+R/nJp8QfyjT/i3Bw/4yn7O6P6w+D3h44VFEBkvw7G07/pMLv0V8H632GEv8ctPl85+cVZ/vXDKA3TH98Z8ddD/8II4hY48e45DZ+u9P8+tq5ACRgSlHUfcfaL6czFx9/Pfs3+v78/hd+lTOMftLD/RiUZl+7DLc+AelbLBC6H62AuPvCcw+g/aqzfffrvnPd/CRLBxNfbBPHvbBP8bvoC/iH5CwT/Znr/ydX3//qHimD/iQ2C/+NbAf+1osav1wsJ5DPy/XfcIIj//i6Ob87/0bs4vmyo/peV8jt7VP8jtrF9q3u/mM5vylDwH6uBBPqvlub/jXoxmvj58nS//OC/WW/AvnqX/LK4+OMXtH956h+2oH3Rpua4osf0M4CpfxnKIWvK79RK/8CCWORr+/vpyyP8uuDyOxHjhyxkfwlP3yFWv7CH50V2P8FNkLL+6ULIJaAZ38Px3zCOXwV+AE4+Jrqcsz+MNcCH+uGayQkk5/6BDw0jmI0iW/5IiuivH6umD26V9snSflq++Qfu+4fQ7sfPt9Dt/8WfAX3z8cl2uOzzE1i9FG36A/j0/wzE+m29zE8o/Ach1g/JOiHfluT/ssADxWNSlOtHpvAb7fhD65G/Ov/Hrxv+H9QV4mtdgeFv4Tj8Pef6Q9Zm0P+G7WrfQqLfQeX/CdDon4Q/X5zA1wD8v21n4pfb/yO56B+Rav6+bX0/3fxVRvpLAvrDmaDMp5eI+JEu5soHazobpEp5D75I1HC9QvDAF8UG9vUfX3BMCH6HK4QF1x85IzSC/XAwJjtoLqQaN1BaegpoctxO8C2zaz+OBDkgmYTeuiLhSherbyGXgn5EqseG3I1hQtdTBK5wmbHjfYfkJRvDX851wiN9ZBmJ4rtM3Tp5xTNcXwL/0USPR/MInOahHRa30WiCGmd6/bt+7xhNwyi38Qq170NbqoZC1uepKZOpYjKCinlbhvqGMFtZq+6hrULi7Dg8Uw1UrpuDZzwekpXPs6ANTDJu3MDjRSyDXidtkEZN3ZJphjyfIDuvK/aik0S9yWbJGDM20CYrh3Ghm5WTMcrKP9nhRhcbh7OdjIUSXyaI04Tp7IU8yXKUwek1+GbPhOIxsrobsocSIbvJdv5WYprX6Rpd911XltIVGdCk/p3sqY03cZOT4dYoh6zkL3FTnfta0loZNh8N5N1c9m/pNGgn3HdnMEfPXePG7S1yLJ/CULC95ftNCEG/eFFctES1kIl7c5uNT6wOGmlIvYthk+KKsHyTIxW/DQOFN3pQiRCDwa1A+jG7mV7FDOHc19Q940mBcCLtfdqG3s4iq89ls0l8H3v6SFGufd47M65abr8dcMtVXO4GlIa7UzypD8oD3+P9urdAWbjYKyNVfwy3iSBfb4JxGOHI3mzLNDW7s3W0P+R2XDrO4xUHORX89KRsuMJPw3L1nQXf4rz38tq0nqZ59V0fsOkkRG1wOornMcGa9UcWRomQUSFagBWUg1zKXHpOoOGUjZlbEOJltU0qOqTdRBl2wEx58oxX3kuy552QK+o5r5fehMf51o67XLL3NcMWajDpgE8tCNeos8h6wzIxYuVldOrQo646+yX1lcO9sRsFK2LjQ5yjeI1yg9BRTbO7bgr4TbK9u8UWVhK+NNtaZ0yoWSiMIItjUOa5kJOv9KDx8NBhfT/jo0QvWDo+7lGBF5OA2aQR2We1YAk5+3tFNApjZJonxaNHlNe9BSFQeiYUly1nD8h08KR0WbZTIF73shDXuYbNM7+JJD7txEBy78zMvpR+smFcDffdpso9OPm+0inEEm6TAHlOEeIBdIvk1jwR/h6bTLSnfWZ7xL0Xb+GbebZKcZ5N1AqjLxFBnyELhDRN5mOp8+aUlKa1SZaUrToeaSAv5nkJHpH9aVThIro+fHAp80p5eX85tVc+JthapVNmqFGl5h70O1yUj2ZsJOoQpX5TDmIs3LcOFsKa3cZu2wPyz0gXziCo0ae6vhWBt3vvdukFV4ibJlo76zUdI4S4wIMmhRoNge5PDyisPloAQUhbeC/THioRdIN0JdeUxM4+BLfM5VZ71t6S7MfDSZvuFES2u3zmzW3b5A76TNyZVQrd3i0JfDSXUOH6gcWIxGT5VxtxA1SCboiK5PWqZL8DqzXCYLDft0gE3ZFTkUmyvUhfdeurRLHYO/A3goZKDS1x/v2svaYV5Xy0y9Z844OJFGgz88us547u9LruvK3iRT8wZNmMoUmbycFI+G2SwSMPGZZkHLbdxF5ejJ4zyheDXrrQtsGW6kOV7sbzQZAlu3/qIcUfs0GZTL69hjeHmfynr62uFoti+lKos/T2DBIriI2o2iXFvFWUM/Q0MfVOlxX0mtlVddP5nnKusOFHRHmwGmaHeX68OJ4HHTD4KB1D5L4KGb9zweJubyZ8J61T5IZ7RZkalhhPfts83PavrActCovjQsuC/zBUMRau15WYPKhjD3Nx62zt2dnnOn40ATsZhUap+yx37OqwmqXlSAX7ZHEGr2xL13XtZ6nHQPuWHF4xqp8t3alWWW/kHc2oW3pgaWGtKFzxZJimBrvn9MGKT+Lu2Xwvw6Gnz0aGUIr+evUVio4ld8ScDL6wG22Af2H7G25HuYKT2RP0K9wMw81N9J7e4ekpEqyf2Uqed5x5eQWpHdIzZDaYqwzIJ+DIDtOCF/X3xvSTVX8ShsjNd7te/AEeJLGIrmcD3yhf3Xxzg7yhb1Zw794PT8ouBqLTX1rC5F6FnLE9ZHYa3eL3fLZI4cVz0Y3GbuQvjGTQfLH8yIsmf3hCujfS4u3JqR9dlVYyyJzb5hEK1ZamAb5hvvWd4VQ22O9hmN+C+mA5odGZU7H3Amq5bIb587Ka5PY08pDjG2+78WwLPfWEa4PWZy6Y6x63sLIvZybU3EwTNhO2p8Dg3EOZt1Zk+AYLHrPj1Zjt8pgN+oEKOgLxUiDqEU3EWxPMwCLzYw0fkhwaGw6RixyQ2vokc8Ta22Rrc2jRZKGWpQsM2LaS3HRullfuVCn2zVLVwjxB7K2RPjAGtxSMh0qTjKxC1CZz8XY6ZQXZhVPXvasWUlw5g9/sA5aAJhZW+8kIqApDRwOXWiEYX+wIvAZdrb3sERu7W8E404IvTaV8BchzQFZKRyol0C6HNkoXwkNnhPHVRBEoRn/50Tsnu7KHnenQp/Sm8XwBIrQcPQ5B2rDG0+oUa4ZM8Ab/mDhHZoZWItQk8uREZW8Sq/IBU8aBznK9u9RYDxH6tGUSD7BdRSUKn6CUgSEc8I8EKmY7EZZ7fc8doEAIeJrSCd/15R4j0OlQLunOK1tkmJ+q5I+6saPSPo2xmU6ouDZ1frYvN1dAG/bPui3XvWIhwgTJz3zR8Z4rUb+ly/nR5KHm2rhzgYrNPITw5siooEom5j6aLPDuetsyJyJ46duZRuoNiuGNU0WdOG+md6uflxPokhJZima858jaSAxdGuzkr2KYhplQ0rmOTZL4FOuNZH2FP/3YjGaDCdN86/gQGYmT1XhVLw4Tq1wvr1AOyzbGvMBKMLcBowOsuRzW61G5MJb22EZJYmhUMTq2CkTP4sadNiXQ8F5J9GpR2jvJX88HHV9A2U7uPJMUUOXyDBrkj/Rlvy3T9ybVOi+ngkChH0oxhjAY8TbhLPeexwYDhb7puR3PrbJhLmmei3L3jNUCTUFZtIDwfdGLUj+Gd2e7wRuMbzhviYm47iCyKeggSbt2iPKHTLATFOPlGNzeQgpjKt221vlKGCtPPlWWSPdKZ4LknplziikG31vLrYxt7F1QJNfPRMhkGU5pqvlU7mPTb3PyHjzildpPw2FP1awYPEGwx5isQ92neFI7LRM6736Gp8OrFiSPtXYLxNHmcaZZHKXeXeQZNtnx1PXCFMLQG7A0LVmeWsYtlrwqP1hCJYbhoHPkwH34fBW+7/oKass3R7IuTBpKcDPURau4A4KHVwRjbDtCXp0DQHC3lW8lBKARY1yHLW2lyQ2WHfY7M+FW44gh/CYM98nuhaWXLs6kq+Lc6Vc+WGwpMw+aASY8KSlL75z87LGaq93jUKYIAl0oVDgLnjc7uFiQtGBPxsDrhiMuitO1qh1t+cygQlzWZJAr9GYxos7uYduXyjyKrs5rLjFEb5K+SWV9xYx31fYGMirYh5VJuogEt7w84kiZdsfpFC729bF7Goi3rDqUD3J3SPrT9G8CZyfGZt6m3mYoGPI71Ikat749Ylwo7o4u2O75Kpm1mlJqDzwNfg6HUg76mSc2hDkvMeWZcp2d8qbZ3KmVfg9aTIk2f959KWT6flQ1jmFHdUw0EXVrXVZTaWC2poaQQDIhUJW0Y8ZFpuSJ35Xmcohrll+aS9+eVd9c9suFd9qKZE6/2RSfZ/J+bpomeohDN6UOJT6dMaMD57Q92r6tmwrF7nxQZvm54W5Am1xx4dmalSRotFZABjm9bEGvPao4bilEzmUrZDsnlqau059iX7a/SJZJE+Lii6Az6oKjte+Bnix3KJMdnLv7qUtb0z0xJWQGHQat5EIUbbnSi1QDQCm3Vr6HxjOPKvI9hthGR67+qk6h5UyPMMIZNmfDELf8HmtDqecqdNL+3KrbdDwV7u1mqEDrJHM++gtGG5WoioIZII8kCuLEuOWCWZYEF+V6cqqa57DsOXaht7a4d0XMHLfSYqtvic/zd+Zm2Jp3N3RRHMwMepI8yr3uBq+LJxfxdwFLWflWmaLzeCRFwOMvoWUnWGTmBOL86HaTEGPkCT3kcd2rWq867dnJEk2pE3+XBDe88FU9TKPoDELnCoNkJMgOs9KdsQmCRdl5AvDoonCXlxnZSYTqWIFgzpEEDjr2GopsdzFnzDV1wXee/Kw2zxQRwvLOjwZvFXB1E5KQRd5YSxAIA5HK/C7SiPQiJ6dpaeFul16GVF5Wck9jDCXaNa2i12fs513RzOs5aNWeHKeYJYWBWv1yxA5+BJXhwTpy60SIk3i9oXIZfTSEUnOXtnn9UsBCSMux9oiTC3WAVCZ2ud1CEphgu+0GaGKdth2ms8zFeh515FClvA00l4QVPG9PZ0F8n6HCoCtDdFchusReK0zU7oVaer14rdFegl7RxYldGMtnK+lVwesG+oLDNMWhNIxCfr6AFmCPpI6jSHlumGTcxwukOzIuK08vjZbMaSLfjTS1eu92m+qR+LjY/6S2vQCDbzmFgnvyXtuVbpx3PFuI3A7DNtxQt0e34qU8bxfWo042Olqg8YVwx3dOURjg8EU7QhfP1xTMzvtAyR/9OzojSnxDqvDGVstNF67C/boWpSZ5aJXcLJSKe1XMJoVrFs3FitWBZm/CQdzUoUFccuUhkQ2c66ZVirfPmc4OJo36N28hU/+6SEglde+GUf3Xe1zqJX6X/s3Vb+/+cbvI44crftsA9HTQvh8kDrqMiqPFvgrQqR41I6ol8p41VYCzQK8zQne6l0ceoUUVeETIcp8M7vui/OiJwyg/3vGRMrxN/vDu4g2BLH7UPHoppDNdUK4MLxpSLlYojEHfK4c+B0p4BwQAcTjKLC+/aYB5uwzvNKCq5lTTnaErhHvT4Ryi19GBBgd1DFzxAwfde0Ek0Gni4TVFTayS+uY+kR42cexNyo+uj4s+PdQdJoe33lbyaFrvhaLlNQH0rakPo9j6BH7wHvbI9GXrrZcRHG1nSqDmjY3ORT5A8NZ5XFX5J/Jq3DfsasiHwwvvprUHstkcc0HFKQ5bbW4Zs1eiDOHi7UjzNDdsPuXwBgTFwj1TDcfGj9Bkq8u7wFy0eBjKde/SxG8K5Zdx64auc4zFRxYntu8QjaRa0yba6PU5MQvQOfY6RbaO3KZN64y+pewiSfsYnKViEvhh1aX+S3YJMOapYSNCYMZ3anltPFF+Oro6XZ0HNMsUEnBYC3fN9p5VAIa7Ld7HMDuiklEpZMEgXDYwYILxHSKpt65Ee3H4nUGe2GuO9TdIXcv6AxDlW7JH60t70xrX7C5m+M2mr2GBU919uV/aJRpdckva8Zi5NxwX042YiuEEVa93wrFAVOtuK4mrC4/j17TdA1HL61xduS1QBqrTyHCT4M7yRYLShDhSqbgH7bXW2c94NOplooUNuefsar0REL+aQw1Jnvdw6mCnJu4FAO7oz1mkLeYVgfE7bgFDvkFWd6LPtxndbP9Bg+FgMz0r9zJ9K6woO1L07heL1HfTGdFHgN6nnTn1XlYEziDOLfLb7XkBLZBmJA+jivAWm578Ku2lgKpGEhr15qnWqxMzGKQ0WMaA551puQT0GuO19v56DOXL2HSKoSpNvZgyS3fCkdDk7I8zIw2rcmDQqZ3dcm42TQzvun7MzDLUpQ/BTqsGFueiklWLpnHD3bslDhGnwL1aTBWl43KnIKz2KBrUyrd0v5t06xMTZ427j631+RLnXiCqy8GW6lp1ies0oJMbbMlmQifbu7o7iUCqDxSIp3sFrPxEGfKiHMSFLapcoriMUYN74/UneZuIkIXOQ3tfyO2DYXKbb1V1q1TMRV9Rbg02s2BI+NERYr+ZD9V8F10VgmZsVdQ0J6tHlfoaLxtnThgzETJ1vAGtJ+sBQEAyQNGGtGJbc0RXgg5icimhJ6BB9jM1HdnHB3mazNmtS2+Lc1wmjUB9vQZkAlGGJzQPg2XB2KP0TWixjb4fzwI9MC2aEUGyjYm3RsLFvDTgqHuE7TkhuTlrCsPlHYCmiJb70skJwvYg7V6V3wbwk9bvGEggo6slhWEY5WrRHoRqGjqFmBfwNwbxzGiJLeRbgRlacBA2eBKVTbGeG48AJ/KTiHQo09DwTn7C+Qar5veN3A9IjglNDSokeT7hOwiLRjKIj4NVno6fMMvbxikBWB7+Npgd4hDSMlDWGpyoPavIGqpi/uQEfRTkRWHpJTTnjkbMnbiXEfZQEuAktSjpJOtO3hfnQb8rxtRQvFpYTFnYNR2s9J44b/+4Y95cPUQeB41HXRHQTPJC9ZiYzR49bb7ernEjvIf7eLmiBX+n9uwvj+NRcefZyiUya0/fknzKryg5VCC7FPTyvg0XocXHullTNy4t9imHwR396GlXbKMxoMC98pcPChClVAWpdg48apLX8NG2Gomqw7X2bK7nl5JdtFLsI3OHzoUZQZJtMRfTkkji3RKmuUmuLFjke3sAR+Tqg13JGWsIrDNo6lAmx0PrDpzYcIQPnw3j1KneCbkXQSr7RPslHG4ialcdIw4XnYclJeRfJRUaIJfHIpuV3Fx+2ohyZRBCXo6WSTmVWvtcUjmtAcA2gfyRTbQbaHdJptqrmupBdaBwukDi9oqU0vnohGmanQeiWkWvJDmJ/TwwaXpTaGRJqLhkdBgyaqSscjF4JCHWqrG51XdJxUYgD7FhuDwF/CRIVvPAJiN7Hlxyx41923p4iS6qziDLwCR2UQ8QLblkv7y4SaQn/eG93WG8Z8oAQePrTl/BP4v4c1CIWDxn5Y12HRq490p59nKBF5uBtHFZTlZWwKDAgV0XuWWFG0o4GrTTme9pmaMU3BT2d8aaGx/ddeoivQDbdegLD9dbjEgF2fPSuWIiF+/qLMgwhMKvN/zwV68Di7wiaNn8KL2o8W4xdNaCqEHpBTznwKd0+V2gBaDZNiS4LkhmIlKggljtheLmISg/JBf9r1yHRgucLpYnoQ4z5DjjWGuJ3DyWtwflr9y4PR2dV8JS4BBJl4EBvsg1AR5jM57jhZpfo/TgPJXXKdje8wewKY8pO8NjbtnJs56/Dk0iScg4y6TQM21lvV3ydjQrvpUcLAlB5lbOycTIycePPTjPpE2esnTj5VHg5YVBswgJnKrg11shQnHFkOZ+8qiM1chKyIiqDCtD3SJHlL2QtgkdqXoVVjiArWpl2rRDVorOPR/UkLhEWXCEVS9Et1X5y+RLj4WxyR3rbdSLwszJxKAJgUwfxUyQTJ/plYLxReYPTD/6obtnnR72mHPGtxrAMSLhQ5RQsbsTK1JymJoQIGhF7+9+Je/yJheMWS8sVev00qtU5cr+PbPJi6udgsTDuFVmOHZo5YmVG+4trUjc0Ht6MXxfwGO6uDgacwayTVqPvJnPwhVXDYFLr7iHKDBwwF0XgZ3fBJ0HMV26aqToTbmokHG7GTx8m+fAQIh8wbbCl56JsoGccqKnJEsje+SzZOmty3LvTuAkhebVCdnwLjdiqZqVaLedRMdWok459fs3rrwCKllLhebULC+FtbPZML7nyfzGEL0rWZBTaHOpL08p7qBUe5pm4u1SSHHnNeg5wDyNo5f5+aoGwYLzgrERaQ0PpAyjrK8E/SY+NfukTZTrqYIbG83SCWCs8voiljBrSR4kRdaGORtvE4VXwuU1aSeT2W+oEsacoTi8MI6vIpMQTI5WLjwF6g6dED7McVj7iSGEz/iBIgMR8Jp8kfInffe8VH7EPleBy0wCS0WN6D6ISy0uxt+Xnm0tBOmJUY/XVK6Nu6UaZrBwuz7zt3uRP/WHBfFuj8UEO5q0NuoOYDCWqrdJz9rvJ69kfMAI+sspWr5NaT4MZoWhuNZyTJOtQQqQ62BkIyf4HhDZxUJvGh/AgygHhlYn7zC+xqNdeNU/QrFTXUu7KLdwQTXxbGKuraMl2mB5F5pg9lPYWYtEAHr5wAK2eOcEvBHA8c1vo/UK/VbqTgpzfVylscS4fCUP6fYAfmRxzoA1bvuq35lOHFq4pjRdY4m1V+vStR3XfV5igAYJ8dPQk3QQLcnbi8Eth2jgYGtLpAgqz+iw2UDvj7sxGr58cV3VQZyU0HI9RE96nK33hf2lPVjK9oLiRNJZnUCAtK3l46+o9YW9I0InfaVPWhCs1Jn3tzLlfLXrSX3uUi1sG2nQ3CGN+UeRGMeHTLKjNlTr0XNmq/O5L2hhnLnukNxs9OetIbkrxLS96m1YZFYFszmDfoMQi0y50zb9e52etV1FWJwwMseguclkh1KIU43TClgqYyLmxDJK9ThqYm/jUhqD4ighLRzLq7gF93iXa9ANWkn6kdkjs7YdmeEh/Na4VE+wtMAfarZ5jrc9ONo71ZM/jH6AesMmoQVbhyXCPUHInDD/lPvZtmEN3JhfTlhzKkNaGCvUcrMSe0DhEleGX04Om6OCFXfxzVB8fi+xo0zYu5EKaEHlVnFqIRtQt149p17zZMLMMaLbbd4JriE0qG0UAJEBlloYYHVRfSESiKkOWGFptuXdA/lGJSwMeLfzmMyHOnLnJTi9UKOa9VQG1hBWBXtLPGvVJKlXFBbyj/ertUOT7zJ2KwBB9jzR2zGxUrbS6w9UlWNy2MQ5zisqCqvtVvMHVUBWpfhngK/NIDGUhBiAsvAXME+AXrD26QRGIRmfcJpqlxSxuLdU7IT7HfSZTlHY89+tTqPtYGVtGAXNA6fEE+799NVMx/mOj9shvtEB6LlWenTEPdVeepc9odRE55PnCZyMdjosm+oq0VLvlxGZz9fwwPWsPO8ecksF0UswR35TUUc3yYDJPW/lrehguoyBSdvN57R41YO8v/VXjTnLJHws2xBWXE/xZQazRmKZvWQrziNKzI8beLgr6pqCaL3cCjkO7aH7j5AaAudGk6W4aTpQCMTZU81q/J5l611ANPUmXrSMva+dyU5Z1paStR8VxULDqyuHdOhsoX7dYbuGI2MZE4dM7UqAne5iZNqKu/Q5w/A9XG4Pn0CxE/GYu7dRyQlVe5Qk2KtFoPHYmRfJWM1x1/YNa+Png7V2sFp8PN83s4s6wprwcwexZ1XRDZ+OrigZZ2UsOeUyjUypBS5sddduyhVzNz+oAvoEHAA73O5mqp1sHMhCHoWkdEh2N/yj9gA5nKoCQP8LoIj+SIhY3t4oi8dNCSCNA2NeRmPcC2FwEMLtvHzp/FDrjTnikhqXukPVmSPOxzRGO+4G8jBxYs2jbi7pdVHHlq4wIJrxa9grjeiglgc2y0kObKn1yFG53QjnGV04sIzEltP4xGGzoNBZ8603RoRv3YI6L3SB62Sbx2eLx0aW1e27bEFOwEFfUcQW6Iurb8rk28R27I/DmiXq4ac+GnXzBwZ8xmWzU9ppeHgWlaNt3dPCp583Mt57y2i8l5gPeOFicXZzpCPk7HeYB4ARixRJNw/p5CxLP451NHkZ9Oiu1sQK1MuggpauoiX85ECAO59fl3oHzTPblxCyQLJvK9a1gmYa+E1Tv7d451M0DTa8suZxviiLxVzJv8AKtoy+mG3U62jEKxSdFrBHu2QnCMB07wAA8n54ebneZf7swyGWvaU4jbD9uDfL4z5HP90HnPdjx905dSwgw3XFh3esi9FkBFZ4ReMioR9DJarcH7Jv31hXbYKHsgS5Xd5o8wHM/YAid3sOu7ZdAd/Zoa5fS+uWbHWmem5/8TV2iJPA9fDbh/KIUKOWVfvUn/dwksPuJkID78SRFRmq1y6TFj+4Fju2gh3L2FT9SgAuCmQzun0dtBnaulsw2/vbuswsvX0sJtrOXFsiXsfb4d6IKzA0dvMUrdK611s0OL6P8XPqj28VTh+Y8jRhJGojJ4gaHcrdXu4bFb7Zq5AUWC8FSQEaWzfKsjuUmBrBHBcwSOi5wpaLQO1ZdtD1+4bQVH7zrTIUKqzpUQKpAs9ASO5ZHkVtEaVhm8C1ZI93HpHyFrncUMJ0cLYrnCUqgb9tunwIjwtM4rLDLdY0Y4uAQpCoezBv8apzBT118CEsDuRK84lLMVyPl+3sHV/Pb6sP6nZDTZ8yvYsUgqoTwyDAVMmO6K4FUc305q5szqiFNO5hQGpqg6RKvolSeuKsXENCc1h4w9GIqcZrA0oI2UgboLt26dvxzF6InsbQlIElQLFVojh47KxVzFW4fix2ATr+SY0VAs/8xJxHkgSReAnBfJmu9eqlWd1XjBymFfIvRTZjA5AODnogTGlTulRMOruuZ2sU7/TxALVP8H6b9CWfTgFaFbWtvCBDJ2t6AoLPhjBxTzCss3e5NIWCDyAsyolMlxS/jPcax2LjMaHiA3zVnphP1kVKtMmJhuodtfo40KLDYyB0PvQiMlUih5pGq0dqmwzPOelsXfRiK+SwVUMjbqTEDyUTQgLVO+4urL0rpOgw5wlQkM2j6O1yXQ6qRnrCfrhNnTsgSZOmedxaZG3sg46Eimvc4iP37O36Tj1uddzuT6/YcIY/vdr0zgWpKelFx2jkl4KrKw9h8YsC5x8F1bo77SwKOy652k88dwTA/c6R4Vte04Bd+MYevFID6jXZv/Sz4PLHhbQqWllqVT6XVFtc8WVyqjsQfNRvsjYm/T05zf0R1mVkKnk4Gwn6DoO2d+IbZlJQA7PP1cZXOu69HPECvwLVpnrzKao/Dtpg7Obhe2DNwaVfuX94NxZqm2K5pe8A/yhfMLFOaB2xvsnU2Lmw7KlUVvLnAylXIa8u+t3d0Z2ip65brKeEINcMmtWCkRBwlRxQK9qSZF14kom+XVFiJG30finOhzE5COwX+tOgcS4jWoNG8MxJoLcHcNEUVjRMZdQn95pzz7sNPCqtAgXOF8DBrIFlsSIFDmVKsQSZIjRFSVeDm1ElT1XNOcC/QT6zCpJPD60J2A5CE55CWUKwr0xYR3IheVcAxRAYFn5c3PYyuhuOXvDOiuZfS6hnIrT0MAqJGiL2I4jJsEGOnfSSLUEPMJJ5V7ytHy5dyQrB3sv5fugv6pl3vDlCiGauW8eGGkbG0bTxWlAZOvmIyrm5y3lyKSnw4gyxZl2O9Rd3xvjxlle8m5ysplt3cR0zrbltbzCnPISAcgipvE30UmgLxMSr34rDrX6abuanDqNMF3zP7myGTbwXwNiF5SKDcyTbj15z84psBs9j2epfsB5Mne2hF2KG1a4J0rdSM0KWFXyC1NGHAE+LEmWDTDf8UmoABT/5CPBWE4OM2FLemub1bq/ZCIMCY4CwaBR8NtfdLcmQuT2rBE0+QqcOajEPvaZI8kSZxwibC5AOfSC+ccb71NUPfqeoWjWzbeYCD3IpJ19GCX5tlPda+wWPlo8LMdE7jZ5OLHYgOfkE/4kAsACtWqHbiG3PbEVr0d+XUb9thAccveA9ToUKLqXTX0CJlvcVyT+5vnXYxo8akz7Nu7rO1UCUy0Xk2HtfOu/MRj2YJnOje4CKoZsFMZQpMFlCoaBs6oHcrM2x9MLaW7W/u4vhMcFsZNwchEYQu8ujQFAmdlGGBC5WzYrqot/ScNcAzBAvKPkSR3V29o2NxCpEjEoCcAwdwKAt441AOBqs58eaQojQhQQy4U+OUzJlgm2kx6vik5H0DMMxjOs57MM6+2KnSjpYMKDuk0/UHyVremNZdUghySslSZxfn5I+0zgu0ygd3Eu2uL9er8L8NCmZQsprz5u5Vb3TBQFARysukDwtWDhKyBoiz5b/yAvyrzWxq52cMeuZshS3bSqyPgn91a7VNVgxeAnu9ggmhrJuSzWnzjW77pEDCiC+aM0RFg+EPDeUV3N7CMdgGEX2Ubib1+qpObE7HB/LgZE0iONRYYW7qKInP7YELOm/NR2qCAptAvBFoqLWUkdXy551rnYfASTX7jNgLR6eWnD4yhgXtdg5vUkCljSPQYZ5+JT8Oj4Z4NsROnNxMGotRmJZe4fbMBcUuo4E8CFnhicA+uQVq0/P1ynBxcKEJknLF6hakowoiO3DIyFbyXN4D6FghsSAuryi9mKoNQoe9+DOyyC4E+059wlEK7Les5BuZXlWgmKELbFCNtyXRRfIV+iaOSSDegORlNbXgk7mDBLQ14wzL2z4ZI0E0H521hyz7fYcnvnt7ixxBWo9xRvav3q+TBWuJlB8cLwTj/WGZhb1xdFDyMYbnu/zkqYer1RhxJZLLB41fVjuLbCYcHLXlVGfvnc0G/zwjIeWVmbAgCWZdrFC4vmgQVh5Z8los4+62imSB+65AoZKgv/Ym/Fs9aXqNAcu47edVPMFgwLOS18EfQeLXaWaGmIURAoB27GhNrSID3WrlcBl5xmptEvXhFMgH01kr5dqe3r6sQjMwsxKz936KiEIer7fGQkAZDOIVJlbMFha87itn0q0/SDZu4M1brMl8U455cUzSCvgUNwlTr5TXykONRf1cl1S9cxX1GBeMmgIbUE6Tzr3EcZrUruRVS81ZgxWPc4mWInkjWrSuQUe3MV63GsUqtDPSefXsaE6/JaDOi3eE4OOMChRD6inx+uS1YddXEvmfPaJcqPURfdPelRf+55phwI9yGnPUmKrg7zPUE/EL+4X4ubHGkwqII+sqep5ZYcAZNdTfzswKaHaBjYtlcLNnpFd4lIpSpvIam2oXLqeLxu9Adw3ureZgeoEcOPHuY9TTy3XldqePMdNPPFbZ/lL0jqv6ArB4kSC6AvKvJ7xJ5cwWjph7FcUhIidU8CI3upw3xtTjbpxnApWa7zUb/U+iWI24GeldrTFwpL5cnY3kN6YbXSLrriVo3tdd88Usvyoxl6+YXNtQQiDUb5R0U+4aL0Dh/duePwaIx4M6tMs15cg6mhIw+LTWVCAIedLmpUqjvvb1/ZXpkUFFnQRtRBQg4ZxVDLIUpDxkRPpvfBv0USANKGbv4q5fJbk/Vmt7oPJZAjPemuCrklmyyM9N0h7xpS+P19uZQ0K1fq4osw62b086mNx9twHDlGaVfFPfppXle7q3YZUSkMw9M7OR/tk9BUqBMCssbExsf1lQ6IcWKJQPJu5F8pHTONHnl2XyYVIheHIZ7DywPzn3rF3sovSuxikFpnfdv+N7j6lLSwGyaVgKXBwCegCB5r1KObH+6hDYOTdeLstltvWKn6cwVMOpGdLm5EfcwjSXBAy9cLUIN4rdKJ1o+ZXdMV1aEGvSBAR4i7rgVOQklc8ruhvBBV+K9fwYwFTZWbdHfsQdVHTk9c3eQW1rZoiHPIp6WlcmBwmcQ49Lr/LFrgf7yRn6FyQAW/FrftOFX02UHN6MAUdltahX2zGP71CHykd7pzZ5ICYaaozJuN8AiBKgwMu6lbafDv2nbyvLEscSXnfUC4IVXBh1ny+C4ydFRM4y/J1sujxURP9BmMutG4P99LML4whhSP92vUuMJ/SibDIWAqvV2iAjPZu7ZWPA0dl38RZMUqKg2xLa5+Y4glWC/JAYprKaY6CmxQqCN0aXhJq1uAeeYMfPvRaSTlR6ZdzhuEGHB3lbe7rdRALj7o05KQERgHrcRaT5ylndbtqFMtPpiTaqNutihQbzju2UOdEZD/uiwXFNWhAPhaWM8RlQKivbMUv1AtMZp0/jT5y45/jaL7Yd1fxxJYJJq8bUl0EPZ2TRSYg0xiTSm6j6bbp00iQfFrsYZFL8g2zmAJgBSMSDKct7LCCwu35tBK93p0HL6z+GG3lgLdcpPT2oAmuCcgCzfnEbNwnnw+o0DLkxRjC3WWxUqgpZ6hE5d3c72ooRik8bIo9eJ3rRkhJ3H0HrzhXC9nejLvQsLOR1wtkysx5u9sL/1ZSR6/dGCd5Q5rDp0QUT37dsVI3N45Kj+QJw7JPo8+FX8nLpZ+sFUucrAnQAQBzjS1dy4gWWxktyQFJPmsMv6vJ9cBPC0piWuRr9n0YFsY7iBfur/xOEgOgnufUYIliAmabvJRiTYu0JUpdgusCTfNxPIyXSTGFV+3xAlJu8Qskb4fSezzs9aHZ4bHhKslxQhcJt/YZJMRbQ0l56D+5Sbt11wANAY2ZwdZccYugXudvl0tWzVvIX7M8kx870Cb/VcrmO+VdF0dtzLbI2w1R+SC4VOsJQiOt03ahiCVVeosIXzYeTlgfHcEWUy3r9noQwRl25pkxPwPsNbHIAElmPc0Vl+wjf0gPYq6E9yVpJTqQOuumQ1tuR3KXcbh18XM1tZgt2oU7J/xNXrRyQbmkBHPZO5PBo5l+s2zyPTtv1VJuBRXmA0vvIgEeFKxfZ2HFJAFZXjK8v8ESyPxALH7ix/AZw9WJFLnI70INtrdtouUdrqc+48jQArOg84Uk9CS98y9AaLQOSINm7Y/giuYYEsqXmkszXGkgD3qi4aNA13TGhU5NFagUF/5JcsBMBfY18nuCbEjTcKgVureKXuiTvpg9Ir9lnxIf9LBimUVI09OxLsL3ygHR48NAEs+H0dHqrd9PmQxcfbWoB7dSjiZfIUw9+f5ySNE+S6NtCBXkvA0YoDtGNXcusUrXc5+bxTi8nvHTy/apyWb7GuxyEy9PUcikgAbaa6f8zRge2vu5Bb5/Y9clgJpgWDK0jsgeeuqV/sFqe/ccg/ENkCG61tVMZBLbCWwN++P2HLfLIdRgCarIXkraRfnrJYEozS0oxIKMGo6B6JZF1uvo5VAFdRvsqWqKDwU4vwFDfn7satJLqquCBdReML5H8Bx0841bGXSWMoGkPCNM3WgMEV9NVcOid8OpJ3cKnsgr8ZxAVryd74I8giBwBb/kdoLN1x1lwv7ZxULcpl3+mGHIpCGPT8fEPCzugH2outdBYYPciliLM6xFlSj12JI+S6o90K1/Zs28yOGTtgS7dGfdg7D3e55mX4nSdwVNmuGjVJhelm17gNez0EqBSofMAsAiHD/46pPsX3bWpYWHxkeIIO9a50GuGIVH5lmN7CNGOHjOR7CqkwPLxMDmApHT9FA5X33xVCVj/3jXWlIUq60Cj9GKpV/Aqv0FHp4bnogr9fADa+pxCK3ZIDKJbMc6gyQ00orZTowF2rkrds8Qry5NybXDfMTlEep9qxG/Orikd1qnZ/sBY3Vz2h4AyQAI3eXeCcR0+sXEpI0wN8Nr4IabGQxuvQ4UmczkkTL4fLdJ+VFpB3RhY5en6GK9TgcMEIeRkA4GQKrFjQVkKKg3pZ/A1sy1R8Hkd4idP6z1XkerycfJ6SEgz3MnDsjLBWZ4ov3UTxOER0+RDpHi8loRAqLQwwZaHeBl3r6wPOgGmRL9DqcLI59ZIlCzFHi+kGBriRiVN5oEN6nfVBtOuMd+xxdrzfto1yTGjuocUCiR41wMx5IdCmOwIGtRoXDv5eVQQ3zZz1N3krNTmutFXRaMpKZ9paR5W+WqoCZiPgqag58ExE7xnUbU3ahNY3/dlAtY5RWjBNvBm32AzbxfpHbzuMKhDCxXFFEh8wBrKisSOFhZBhTSwFzA7pZZCI6bXEJZvpRv6IVN4KsS2D335ObU9kdUMfhz2EaQIAVuHaaL94qt8F49qUFJb5bAVWvojy4QaeGfzj6RCDA6zLASJ7+ifbZgOmregxOQky02Y1+qphCUsUXjIr7QdfsUQuQzBwfFCiQBevGAze1F4i9R5TR5DsecSHR9OXWyDpH5zP1gLO9zYtMOS3fDy/y8qEkeJkmhED+W7yKojD6utADNX/m7uiAaj39sJLubKuaZsKu//Rcr8OLoMyBEeqDwMQmj4aVEXQ3WUZJbMOR6j8lmVd+74j0Zj1HJn+ka9b5UfAI1Hhtz/TmjmI/Pqhsu3Up4NxLRQnN4P71jTuL6wpvdk0vNLVDew2ni3f1FQVRIluax8hDFUDH9rvCH5wfeMRKy6KtmZg40BJVqKzwbWe4fXmgWS4k0NkRYJnbbMc+ufN0sBFL2r8h8jTX26g0bp3PjgczZ92WEvA2qLWptGIrE0W0TAvKW8pEfSnqmL42vGjvBakGmUnV6xHqQDJ57dAIUoIcSRUXv8/HFoXfSMqhTXiyJqtPedCCoU2JPO6aDFKgM45gYe1uwDFJlXdPD60eCq3C7i9URANlyIKfOWSte2HKipLofYQef7ClaMNtRMduGLaf8npTtI7IxTLq90FCsUMyJi01eZGLb8bwTqO0RnT5GwCrWr8FRFLw6sSAoQ1J1e6CeedL83GUrxQO4WbX6HNJOJcwvPCctmYSsjQB+L9lN0YIE54U9ypRd/Q9f4Ckpzyp4Pj4JVcm0/cnRT18+uFS+zHo52vOlY83jQXRqLL3qm5D5XKkVnHxsyutBJEUJ9KcPiYLR527CyVtNq0InLn3gmmwmqYn0xLZ2gh0Pk/aljPdUk9LJEb0dIneiXNDUdrj0iUQHgzq6POGlXyf6SQjiRrytdQq4+tx7Cb3dzKKRQaXFQJmZQolJhimnOGmP3HElvjmw7cxEkP9JseMo8CaIn7dq9d853j5REwIld7NAze8L6fFGTCTcBemuWPgApdcL4YtkoTwRr7/56ejrgS5JvMajRljeWUGQvN7A+41YH5GUDefDsvCH7XfLUCjh1EoCTZnPooTL7iTspyucIuG5bwHamff94Es/Fp6nf9mxCxdCTW/zUzJNDOQutjuTlbPDWfvD0aeLNtyJi1G9YPxjVrNXdrkXgJO4mLgAG9PRJPMkVWVfgPAfeBUjUpNs916Solh5ocmDXRRjEVV+GBdW2q/HM40LOWBmyBy2zqvK8lrRHizLyjuSv7DQelQo2yV7l91lp0miE5DY0eUt6jb7/Flok1QW9mjw6f6Qnna3kZtWWQlL8h9rrSfCXdxZsym0FJQcdwZ+hw/lwt+FKrUsoFJMt+LWU0TUl2IXY+rJddYiM302NIO/D3bYmTt0qV+ml+fFIc7XBT9R4F5GUia0E3gZKoNnxY3lOOTDdBthhKdTkmRKGqdZXrqjBIeDD9CygZEk9KaODKwggY0EILAHj0I7TjYFHtL7SE+x/nR0wxLlBJ1ZNK/Ob8Gtgrsml/eRT01QvUUGN5wk7WEtM+RcKDot9OxeyHKblCS6gEyUe9jN3t6TFf/IONps0hCLRhd8dN+73De20cToU4b0zRGxpyIoay63co/rKXBE2onD8/9P03UsSIosya/ZOyT6iNZac0Nrrfn6hep5l+manppMiAh3M3MVWx1XjSBQngoyABP1cLnm8xd2ZQ1AgM6+WLdkMRHf24rOZ3zH6ICVh5Hgk3RdWLMRtgv//LAYf2/6Gi2ovErbM9tmRS3AEG8UZbMG0q/08sLGuhwI14ZKIhqQQO06siy2HA7miy42LWzk4q+CUnTAKZ3fT8fKPvEdEIxyjSU6oFS3HbwFbMfYwxBMlRI828aQs1+ERFigl7hDcV/DN1DBGNf6tReoWG8GwwXhX/wJu2Cb4EDwfBL+/IEDhbkTrBa4wEosCMMdaREec5Z6o/8eOqUOqfxSNH1TAKLlkGQedFRSZZ/GqnvlfAlglVgm7o/bbmPtaU/aZyf3X3tED7WfN91ur88og3AoERQa93uIv0uRwhmy2QqsfxKSLvxYGZLPSAA8GSB11v/g+ERB5vVAKezP49VUHYjXqIXW2aFu+4vpNeHJuOoN7EpgMADAQ5nJsMoHAV7SctLTfoxXxKmHmJO8FBXxuUbEgx2gJw4SHoWpKaXyMeDLpualdIfHoDDgmCHMrId746M4ZGoOkFtY/iUGIqyjgkmquJVncBbOTXpSBcuizq4Z57ZmfvYGni8hLYvAKzBRxdJGnnTKYVK5IgUQdYb5sZZPOjhYvJhx7ETYOqzirC90+hKL7QJ2e+i+KkWb+URqY3zRZcjYNRl+xfE2VscKPyRsSs2lMu0toNICa8L4LegDZiCE/+ahwo4P6oj5jyjtxXd24HIUfNl1NFo5cHOEVHdobABIPolNhBQuiDn7ZVT+zQDgpONeYqb7+BmtbMbpdO9CQJeVC8N/v4RSKAqdvwthElYfQ+eAt5IU10Vs0+sQg+8uNQ6Xon2fVb3EddKhTSjow81ijgNPTp2r7mYdx5GLb1s8AYDT/5ZlJMUafbnTZ5jhozPvufqrGYS46HsnBPNd2cEPVltz6/C6SA5RAPIuP93MuVIam4oY5n9B+dv2Fdc1xW0ouMdVfUC+C5is363/fayqIT6TJaI/9zSlM+c6mMGNzSvcjq8AHmqge/3WzuKGTxgaH7WYfvL9V62B6Eiqbc2dPbLhS7ThrJuhz59NKPD3gY6oyQXp98WCjscLDxMADv6MKvenuFMLifBoWpA2TBtG+vnUjzVhZTuOK1nIC6GUab8xa2oNJoKJPmdH/oJCp/mWaEOYWvmhHp6dP9VteVUG3pXCqLPhW1B9DFiRvozLRVdrb/X8SsFYbXy0tGVBuR1y+3ydS2BbPpI76z5GHzBElVYNCP7imnIrwVIeX/Rf79cJMUPKgVjZanwx5rQvmGABvdyWWp1DOF6XLu45gPKtTJmcsSGZi7jBvWCbRkqWm2QayKO8IjQvfKX/Dq2wiQTOsw3S5cSIxPdmk4kcji76a859w/kH93Oy+GKaE/kLgviM1QRVRQHvKgBw40Al/wrbe+hRyh+8JV2qYQIQCDE3iazGFF+V3/DrpLaH8yv7nvcBQhOB1/FEUKSZbIF8vzQhRI0EXaX1/VfPZ54l3wKr96kyj6Q3XyfR19A2hG0lltG4V/xeCG6ORAVf5Gj7DeymjpaWGkv+1dDTSbJb42Kw2fWMjiPy1X2K4+TzyljNE9vWCY2dQMJ/EVJv7Fd6A6Jo5WnJfLL6bsS9EyMRqJr2sjj/71BSnE8K7GgSaBhdP1LkO1cj8fqT3G4GwE201xPdyeg8bjOkGCn34JOhNmTamsBjGZEl+NQr5bobHxWlvARYKQf6TlaIFo0nF59nYOF6ga0Nt4MOGI2La/ESFFq1LMVMYX9eQIF9uWM1jkS/SLboQrUL0s2YgWrjRWdGzj/5o5Y9idDFu3d5mFbD4A7MecDSe1fHalha+MD1lCgfvuvwoYrtAXxKvYipOdjW5cbxBCC5c1HShldr5KIFyv11gSiaZ0hHafskbjYohXE5g+F3/CeAvX3rEYr3sCGdGcnreheDz3iwPo/UCcj8BbEo7iKKOYy6lBJZAog9pfOGZjnQA0SQD024uWj2Rse8hAgGutWGVZGhZXOnJWNoywLzF71oe4OA0P0y2khmL8CNJmwDHC/RZst8VtWSdusr5bmLuc0qknjINfKYbYN4A1zWeqwxirb6sziA8WpDHme61MwEsiy9nrPP3Dpo8sLpC635kusFgkEtCyq9Fi+1hgjQOh0eNJLtQB4BKQLY7bDtgos0xyvrVBqXDPkwHkvazz3WvB9KKcTzW3Y662R6X8vZmdlo1DryqSSpHE65Jt2vWCf+Are0tjKl2an41yN5y9xkF8Cemr97AYqmB2XjeXk4uylu6P/iQ5dzR/bbjdp2zFblyI7rg55jSiTKoG1b/S+9qhP+ZgOnZ3bKSyJXtIpffyK7keNIOVSda3ur3ecWn3ZBUYaSQYunc0K1nTBRY7e7k9xcnBfD65gRc/DBuoaMKzXggtxeeZl49vhz017JuHdU9DoUR+L8yQTZkaecM0vNFqAhUW0/KAIB9W6uCO0qBXkA8J9yCYqcOgAnsjrwPTu1yHJDZA5MUeWE+UXc9Bt2XyRHFhv1rTtbddNunetWlNMciwbIQ0q7qt/KaP0PL22lmmaXXrCVvZX8OSDXt8kY+un9CPJY1LxHlzsposGIl+rRCWV/rp4f2ZcHvz8VB2kGYYKVsHz9CPgIrsguYz7JYRJk1JYopY/ve4WyorFWy0PTjfYpJx3APJDfka+ZSHnDztDLOxlqbsvuusL3+6/7VaKirgWODLhtPuQnEf0FE9W/ugFGNR0NHr7opdzCqojfXim0nAATUESIFOKKJXZu9BRuwprhi/kScz1yKL/mRGYc2lGNtOEVPhThbfRovRqKAzznp9w0+7XasBW6myjvB2oJhFwPELpuXAQ5zZTVV0X7RFMroYAiS19ExQs53vbDQN0Uzm5bObBtLe1H0RFF1dU5bxbSQP26+PgJ738zVrfPop0zw/0kHLFz9HGFk2hIdA1nsIvj7CPaFNGJCfEN2+WkrPx1S3798oeMsaWsGE9JNzvwEAYpTJtqLnMfaIXMsOIIKTGtd3DcCY3Wx9vSrzDGrIQGTBMEAcHBO2rJJPp1RdF0t18lNV83pVaNJ7l/xuT5c0KvaNAgqZvLoQtIIF/K8x95otynMu7fOWqhre0axxIVN+4JUY2BmeRSiVrjix+FNZbO+/yZN+1qeXlIoDzwckd7LGGyrasQmyDqiVfXmTjfi7rdzJFRSgcvMsgFIMvpkGjlzzpWv25u2FY49cIQSWrTnnPDhTHDE/Z8lEuiz88M9w2gTRI71r6HaKdVjfSbqRZ9nW6P5HTJOwpz8xUXT8VV2lMjfA73r+nBjcV20Ex1IKUfL6+fyfvz5WCedhcrfycUkC/E8yhTcb/i10zeNcorF/wEtboXDBvMTlKtv3ABnBBAxF14VuIVrZenURWGiE91XatMgVEcUbA1ubdBj4Nwtpgc+BNT6WKcCoElG14a7tUYAbVFERgtqDrTixYhQSxt9XtpMBuzELgTdV+lvS+iqnbSfsSVBrmU1yXcEkumllsqf2458Llko3HrqjQoM1J76jfs5nSgvnPKKFRaZG0z3bYWNfs2DSF8jZDAJg7l3meHxFGbzFhCrgnl/kmdEhf+1ceTroe//pKXaYZg05oVTksT64HPUpBFW/sq25tG3zAhEcg1Uk2ug0C/ROzfJSwCjfXA3P71Dt7ewVrDUVA6GlkBy2fwrmeNqivxN0oHpHMrVIeyMlozaWF+jurfIs9Sncj2I/SqRHr8OXuN0UjsGWrV1C6f7yTr+Io5D2Ia8ZJqcqbInTGvdK+iCzQZMjdEB3Bhqa/diRJSXWRHbzxKZvIPGu7xeLlvRSqnC17SFvFe1uByg/2Js11Jqymbf22GRGRUrtEJvL9U+gH/xx5p09Tg4pJ/U2GjLp1N9vTw8TRQ5mzwNl2VGaX9osTZwOAV7eLL8IsuTIE7QWaLfYUqZGcZF8Z7TYkDG0Q/Yv7r1xhVWkflAz76srbO7ILPk6hYvfqVemAIsHY9yBSzGB2Dv7oSyefiO8vV7UhoYJxi1xt5+Xzr9kkpA8N23dJArfQDTBLh/mbzpCO9RskIcUv1OO1zTptT7MxfZMCfotgvxVtupd+CK740NwspCdC8YiPCGG/cW3vmOKlMq+1ERKVKZM4CqTyxJ9xu2SYQRISoy7Eqc8Vc8qZRTJcQm24LhyC2au8Z5iaIOqL7Ufa7RnDKhwWY2Z+jSuj3Bu/sEMkFWcUa0YtaRRAlp5F2LvpEnus2kld/xVGtA2AlorrAIEDsoDd3G9R6gc4PWdvxCyL/91dsTJX18IBeezuwWEGbD5KS5AP10VqMR9tR2FBmat6XkwKPSUn+wnvhLLMHkzZxJUPlAY+xAr++0vZ6u4zUO9bY3wJ+FX2Gx1nRsP6Rm8y4DXPyusDf57h+KNF2XxwGCreq45im9/NekWzp1KSpYSy071W5j8/nSD23n9ynPWOYE8rxIcxIp8ZpVYMJlkPzFB8UBueDtrRRv/h55vCgdb5ck1MH2OuRdWblfiyC3J/jthxEQ/i0XDmA3xrfgU2tpLlrjYrZKlQLYvqo+aGMlW3OL8ztv1533i1ZOV5wXShNOs78EWO1Qv2eLX9xgrFo9VoosdTZcFvYmswWV9QUq0XCb7AXtSs2fxtpd8CLy0LHT8seflV8bdpZNSDEMmNQIeDoIXuZUah7tQvpKwv0DW/0QftHbbwqL0gT4DW5HHb7BTu/inq7ft1onKSXU+blo91MkPLJhNJoB4Tx1Ulg6Omr87ma4wljJqPKj09AvqaaBycGPBzQpCzOuiJRcyweu4R4iNHn/WT5rkKhmNItWzPX1q0TobNJrVrBDaP2RM+N8deWNtzUylqJt+rLkyaMIAlOBouEMIT+qEu3P7OyW7iWBGHa+MXbKcmB+fwlcworpW7PSWTmPFfxg32bBpLVM4RZcfUDQ9XzO8Oy/nsxDiR/xJeep+50PzKEipX9XNk0xr/Qltg7uhXyOVoKI6ggPVCl1TFm8d+/s/L8qfoeP39bd0ufGpAwle/ffezFhMzcCbNp0k5ALqOrPtL5cVFDEEzPOMboBfJPAOtl9MyKv5QQog9RAK2UHV9gciQ7/CXmXas6BTf8CiaVEi7Ea1jPZ6YcA+5fW+lbATQhCzOiSwSS5Ee9XGSTnE+RjuJpUaJKtLM4Vyhg4wOO4Jl6RsJaNDGEsNSx8E/08iwNf4F7vb0IzV4Por4gWc1qKnTqMqQPig5joT02lBbGtq0JnABftsuIwgHV0/UO83I9fwYPqEVbLM/aAP2JC/q7f8cR3bnYrnOE7tfE7dLJFzUt07QNmMdXTAx7rX85J2QnPGEi+Aa/Z6Ks5YAKgxDf+dZ24URGqf0BJzOSE+3dFar2FikGgrDz7T5E2RbtjJmN28q1HvIll3Q0VLbGh9BSLDQ7gnVS/1jUG5uNvtssRFdFdPkgqJwK7Pla/SmquQsJ4K2GzK+q4WarJ/VWK9wkwIhYzl0DafBoG7viCcNM+IXsvA7o6UymsPH3cjpXWtWcGbPQ71DeQLoSnneCkix3IDLuZdA1gnsRqfzAaeDpil+lOHzluszCaNvwCO4Lt2IRBaxQI00sFXW/0OejIGVcH+N73XfxIh0DvauL66b+bsf1BYeQ8J6I4JI/V5Pd8FECvt7daKDo0+jee4DHAE1c/gppOf+6bwDQOzlkK5+X9FYDzUoHNdXhYZKXgRARAT+2Thd+ED6UVWBnx0Q/DdqTH9KQNHFTQQrPH8DucjqZeXyR2kzOvsbYvwwb67FL78nsyLPTyfm9/og0msaTW1fXjsl69BLr4wvyNYXntOP+a71WcjvfiLHxmCqec9FKBQo+mb9e65dz+AE5x3CpVshnWR3OGRmtb7fGQMQRsdH1WFv4PuizlidoYC5ibsSlUkV7tch8XMbWJOevhDlJG4aB4s2B70/jQrz8MwlqvyPjUvubiAfoVeC2FZqgpXrpxI/+ZW3D18ZARQDnAnB2DbBcDbe/dI+q0y5TE/IhTCZJiuS7vyz310/ndQgZqw3nN5PINP6mB+t5Dee4EDt3Ry2kyFeNfbHEcm6z9+XmL0HpOFZgVGrPfT5H5jfHany7g2ee3UbiCH5qZrM9s3GCT7byj8aLB8M0Y/1LTUtF9HmZAfUjw2H1hejaD1tZJL/xRhD77nuq3gWDossatP/+nz0SWiPeFwdq984bHWxSCTdtLpgIpEpFMOGEH1WceEf9nNDlPUmBGl9A9ogzZBoZvuzh4oYn/qv93sSHVr7Cgq+a4gKD8dhB5ypsON7plNnD0/UhmVcGPnaApfUzNOMq4ZnOn3UFrEn4ssCdaNHWjcrkP1X5lmP14U3hN+KLd36EZ6oNC6GkyBXqkJO+/g3OAxj6TJDl5Orr5/X8LXlWchYXpsVfUGEY8S9/jWKN9x0a9csJbxJ+z46QrJYwul48BQyw25C8AJkNkRGu/KpSRoav/N4kZi8alOKl0rk70VPFoskviHVI16wcTdtc+4QJkMHQylMqQ5WTK6hyooTaxtzgVnSeQiCff6ZcDMnjD7ETnjK/cLLJYo1POk4HS8mR+m5gGKR1buZkYlLh3YEIYzbKCIMnu5+IsxbP3IkfvyaIXh3YLRvu0S810HjuL6HyDD+D4fopsj119atyV6Q7T6bKU8E95MytohqxiSImNC/ccHPZNiXpMlVvjxXbSnogPyc10U4C1pdlBPdfVYZZbwde0L+Ju/HZBRw/HJAn2T4i1NVxFFRwvjnxCr0kYv8Xpi8ml4i+PMHvsQ/FBbcJ5l8BAeY9Kt9/VRW6EXyJliRsTr7ygGEaFv0LmGm2wcQcFwSaua0aGN0UAyRTLyh/FhJU5Xz4Lheh3j0hhK0O0ZqINwLOf3PIrZVGowZ6rmfqAw+/CZ0YgkRzMS2P52zLD5cJJ0RPgS/keRL4kZr+bFW+fzHasE239Xmrzv8CUmcOHu7mabs+7d+gNy4L7T1VITNzv8PWpmAKIItqlXD0VY5s3c9F8/vogYw1qL6m941k+/AnxoEZd6yXgZNPT2hvR5XIe8SWfSGYTKfXEUMCX/5W+WLzuMWgB7MmcNkh1KakTuK6gmCysIzuaSL64BihJw85IyI/ZsJ/qXlbtRQVsrgRyaHEPnD7ywc1hK7Fe2VVkcUtxV8/lDKFXy8cXMzmfoPteOPXMZeSJ7ynNmGPL0MJXOK7S7tXcx1lTEsYTFIgIA52LfQNDYEuhWimue9jM0pdKMgqyfJUYG62MfuiljzDyIa/vF6cOQiqNdrcnvTsjJvasVnsVrNgDW80WSrs6zoBGxe6TEtBy6Tas+js8cZ88dDEwwC2K04WHFVztlwcK12H60Z7nJT+XVEq5A8WVL7j1KAXI+ncD9U1ReNUMBV6mnxCOe0mfEU8Hi8kPlr95T4rppIu110EB5eecF4JMtCJNcs+TruMx315FQaLEF4+WVEqgmd5HncMMcYfyb3YzvBwi3xWzJrZ75cMzJ7UpPpre7AFV9RsVIFlfWbB4YM3I+NEviICT2G3Wv+21e+Y0S9xNUIflfrIohIF5nUPphSLpMr7p6KGMt2ETXkI6UR2Y36zU6ZeRkzjdBM8+VZX/PVyQcvDCYM3Il9Q8WsKIeJVarENE9lRiObEq/rHNT7g/+xSrvHOH3cdd5oe3ZrhX68bRwnqj01Tp2e8rudqLq2xmDE1kBo3TId+Kgk/OuMZzeJ/df6cP/jajn5zwUMrlF43FqhRG6oU+OU6KIbBSha9f1v9i3TuLwUjVRoKl7GvV1CimBUZwDuKgN5A4etoB8pBwikl/IWahcfk+vmEnFviefa0rLpmpMhKJyfVa1G1ToDNMQAWKBJNsIpoij0svmOcY9DsjftBX0jXv9LiAv6lt4lP5DHHL9txhV1xhQyZ6lWs1qh1632LSUP7C9g1I2FrJ9QLUrl326vVmZIVVozA+C/BbUWzttErCArPq+Fpsbikko+HJpix2scKi1AfNjxrgCneDXC75v0OnoWzYTfIz5FD9f37F2i7kgpIIKa6mGqojgT6nTQ2yInVYdo0etFxTtH2OwO+PFldOg1XX7fQ7+evy4qaZe48icmBB+PzEY5hspxFij9FqHYWWZf7qrRA7g0ZzPcxIQwOIsgVERTa6zggfJG34xYNZMtT1JIAVfyY14j4OhTB/ABZ/gEpVufwgemCSjXVkoPe++swB7Tp1zXA7Ur+/Ky5CZZeA6cw0JxPrPlB+EEWJH4od1OsHlaNonSR/2UchWiPHhvxyiueLjQjpnBJvFI/f2x782gF8ykoBE1jhxkZ088xgtcCayewFrn50ZIZHUpa7S5esTvSjnpBJyjvxH9QgstPEp4DGUR/Bfd8qOdjxhwiB/dskc26djCkZT28rbNOtcivSZdCslHc54B/OlajuxkuBO+Syd81FTxB4PiKi+RMg2uJCY5C39vYe8xOG1cttol8/VymfD+A/Rv6q8IhQkbgvbl/9S7hK+VZP6ydZcoudIDtpTyHY7BfAvlijrI99pPAHGTLnR6MTRuFAYjS0WkQ0Mx+KZt2XGwA/3R4NeE6Cmie+Jmrpv9e1KbQdp+dnSCIGujdc7E0SlIvagFCaxeYNVDDaVTh1WJ4OTrzB4011jWr/Sv27IFf9o2w43CGzKWhhncYbwWSokjcYC1pYVEd20dhPS1hI2gMzGuGbv3V4xAjvdBO4FycmnK0CXy8Dq3mzKiBno6BTXMR+AVKrstU1RQQvsoTqP+QCHhSVkwdho8Ze//mwg+fG6WJOq1u5S6mFjFekGZereY/4YFiaTdWNP6vZuJLGOzTPXbt+16lVg3kRuJIqZAnQd/xN3OUQ2ZxBBk8mca9xObyEujU+mdV2G1E+Wk+TMRg4dOJ04XwP+YIy1/wHhU1bS28ru30zHOny0URHtJ6shNmqXXRLQ7ctNPkaQHiaw5U3PrHKHuu7NX61fvwdscz9x16Owj0HD654lJfHNuXgcnFsKJ+QgtuulQDz487yy4AYrMyWA4ZYahzyuM1C/THMmfLNTefoiHrHjDlGb+C7jA+5bTgHu3LdSC8nBH993kYKgD/saWXo5qfAxoCy1qAhxCbVc9SJFWkT91nBqRX+IqFKa4R4zF7uvpfRSjQHkyHiQ18+6g1mDcStlE/ezbUNWgghuKX7/ePYUWAhsQvcErxlerh3HdBf/+GUbxSYejQpsHMDgkJqZmzmTjd5iFDcTyjqUQrlrm60DM0lKtYobZLb1fGK0qMeb/r/C4c+SkaPOw1e++ABtQ2NxqG84qJGGgeCDJhDPlyD42sHtUhCPsc6b0QqJo5atM1BRPYsvVD6XW3acTtGPycmO1fdckQ6+Uxb9znAme8Trac5T8MS487FbRJtX75vAEzeGt3DAGgx0CtDkBy2wxN+YKk/GEwNkx/MbM84NogPeqZsltWzQqDrGM0d0ym68iB4AMgTjpA3A/J4oDc6NDQKXz1+g1A8rsG32tB3m8Uerc4tyXEspMldVj1V7S1BS3qHTp4RSRBACwbwCE5wIS5R4k7atRM6JYztHr9LcFzDred6Tk3F3r4NbJ9oLXsxveIhPpTtu0Xav3HcdeB7b+TMnPy60bC3s7molvPRoinSrfB6js0gxNLSdcbJL8cqilbWTGMw9i2oJ/1dh9eP7oc2OXyQ2/aatJqxsuLN85FwCB/d4yyzhCyTDwyRWcQfjkLOq+bMLSqoFMiN4Ro7S/wqt1PbS8l6UtzX3uUTdfM2vzdSnIVitXPjRen7YkaK2V2eJY4elOhI6T24x4e8rBgSvyq4ZRLO9m+vK9Eg1uW1AertHdkgbJ77ucj7EbXx0RcZbLbyONkZZnKYmhe/EX6lrKUonYcqKuSXgq3iQLlp5F4M0Rl3E8QU0Bq0OwmkipldLsAIjHe9buvUDeKk7gZ5Ayve9FyGAfXsPm+Hl7RJgT83fAMqPDxQYqczoYEFCdUeccD5MDBhn+D/wYIXSfn2PwmKOwVwD5b+eK4oD82daxO480yYizFwdfvIHyFZA0EMCUeaH1Cw6yLokxCZieYICg0TV8CBTys5dgM81u/bnidVlstur3KEYxDwx1lXKfLeIYL7BTL6Lag9AUipIXOFcPDCHtO/QpBLXPgJG1kQ2w2ayHExtLy8upeIhv0n48sXqUzVEk6ClZX5FivfYCOPQMuPIHwoMpf3XH9z+lQbcgreLwC/vQ11yg8GycnqHy9Bh6Oa9+fn/fI4ifWYuhZ8FycFRrjHVPw0Xp+6KVO2eR5FfbmNJLEMqEwpGy5kXcCxi09o3tHqyPqy+hRPAaXVerxq9vCk2B/DvfqBR4tOJxzPvhvCR0E8ESXvoPpOJxO9GdZCFoPAP9mY710peOSjVT7T7PzpQFJAjxOkPFVdNan+FrA11J319S30DjEL5n0Ny+3ujd+lmeUHZznPQJsjq9frdZACvi+S8nsoDITb7VAJHP/SHIcNe3h+JsMmMrhuftKH+8GKN1dbP3zBY7Vr3DQuEF8mnP5ClQiEI5GrhZN37F87/IfmEnsxjvzc4vH2ZMR8cEyU1BJp5cDb+csir3o/JHEC4vbuKZXQjf/huu+ElqARZFtfbOBAYqj1r85ts4qmQEyWNDtS3BrSn2qXqxw+o4FDKNj0jnP3iHG249aVRMEAQ09rreVwmt/4ricOs/HMIyRGIhiQ/CfiZ77AOW93KBYvQ+MpFcr8hNzoEFUqU5lM1PU2UjCr/junHW9gVj/itaD+leLVRXOe5w/KnPYSqigo6qEXDfqDP/rwiVOZ2dZVIO56WPJJ2IQCQ/HGimBoqGC8ZxmiPoISBR9gEVPnnFCp9+raPvDPbexeAgYD7wQDXSPwIYvEnCVETVhO7SPK7nnSDI6fe6lyswu1Lrwj1AZ5D7Zq9oaBoj9xngN/Zl/VoxGf0OJlojk0yeUZu/yyanlaaqtnB/G8gOc73wmfWYA/r7OY+EvHaPkHRNo1g8GAgAYmjn5uok5YElIjRkzPuedqSEU3sbrpoYFnFC6sDjMjetZ8vSrEtYIgYLzm897F0kr2Jti44R4cSkPvb2hKdZgjxkUwMaPk9hVopyT9gfmQRmtztNfNXlP76f85kQFROnHf1Lnfav5U1UJ0rCfjRaXplQzr71SpIENyOi/V+igwoEVKW8mJbBB+YFsdvlOzpPmSaWsElf92hAsVvyQjHz+9e/ZLLgCHJgZq4LhE09sB95HpFMxvWWXXayAl97ZB1AaOOY6sRjFtLMmaz+I3i/P2l2Jmegwvsz3cYwXrBDnNFe7fDT4BC/z5eWEQfef4X53bMCcqxMvam48/tJ1BankHI+RWhUbciHAb4RQ7FTHuoYSzJrwZ8bc1D2WRp9EAbKsZui6hk82GjnD4E5eIwI4or0AtjjKlSS66sgOGlhx5Fsh6X0lGnGj0Uvg1TBC8Xw2sb6HDu5TOPddNAPt6wDQ5HkyAI48dvMyTTveqTcxrePn7Wpc0cuXGV/PQM9y4bW0rrmALYjmWdii5oK9rc8mufNb+zcdacDjVIazwHMIAyuKgVVOnBMLlungfuud/btoMuVwNnBBV+aPmcKVH2+ipQV2WDxXW+nV+P41G71urOmzzE4mi/1hoJ+nTbbzvg2kCJdGkd56Nb8vhhXm+fjQU7Erksuokid4rdo9GsBYbWqzw6valQvQ9vx+QXbRNA66XEubq+js1U5YJoGi0oFqd8T8u2wjgCiY508gt6WT6eXqURxOqvslo/eR+fbCYasiqqkL7rxfMsUqBbCBRvJg8jKIr5ZTi4JXgEAT0Pdeu2LlFzmjyDkcMcdyWKWQ75rgRDz+TnMkzY3WLQA1PW3vWFl+qsU9N3h1s9SPvTvBNxiuKCdlTLf6VyVFkZ+fF8ezf1xJLrAg+Tz+dztTDkXvyzmPxpp2tfDQf/LiJvJIbe0gLIhxxfAZxeYvIJljSzdEv5B6JeHLzWmSpP2Qlj4n014DeBSmFtGxE9PJKMA8HZOWBGlpTgoRhDXosu+HOC/no+7fSGwuQgUPiLoo8AICcgymrTjfi9hc+ZuECcHE35AL/Mgztvehv7Kh80KK2njtL4FsLbQ3CgyjxEN/LspuxgI9YvicG+YdyABUCUkMo6Z9y09/NB+Qxql8kKkNv9i3fSsfyCndlEZqjnearnOpUf8sPspW/cHv7Ku4lkR4fjgHPZNQAbWQPCBQvnlHsLKFZHk5r2+QeSoQlKsooJlXl16fuv9r7LbZmA3jIy4MSqA9otTV6vP4vg3Nj8nUqmdwin8GV3+L4SCOF8WAnmXt7E1SUevHoKwN58+a04YEQhsJ0vV8FSl1UuoFkwITlcpY9V62UQGZyFQYE/0JyXd6vbIFtUjEbSNzOhPSM5NMRRgrrC3N++qmreUUrpFlg+7iTdlR1jHs4R8L9f0wE6JN3UsIzNCD5d0BOldI4RUQG0X/PrQ0vrR7LzSn0hOsGvsBxk/HnNm0oDfO509GH2+AH8G0gHgts+a5ftqvP49Dnm6DCc3dhx3b778xUD88vx3/2RJx9PKjOH3xl6zXr/Pb34EE7WQG+KYnYD3K1w5uXvwD/CX0jSOmp0EupG/O/CvNDv1v7sV3oxasUGTaFkTMJIPlRcLyxVP4LCT0VSo1NnSbnmpTt09tu0T6KPWQrDxjkpPNDu5QTboj07j7glPEIYqvOCMNRnAs1FGPlhgcFj67Omf71oZSfjwTMxPNyn7q6jPTk0tYF69hh326kHfzbTzoBNfHJQrhnq5INmwrQfFdxWw97sOJdtINzbtZGYJ9yZQl76LTlllCrkRiZDrtFxxbxLVlzY0ohiKo/UuEcsjRWcpKuO5iJUG2M4iWf6caSjXYYLCO/uTegoLHfQoIgx3PTNgHjEoEQf0XS8uAXPz303+XS11SkOxXeYLDXyYIP4q7j/WfUpzYOH1vpiXuaD4xWW6qyShrzgSGYyVMdiHIX4vll0f4JiDVtKWnwYtglLrBWBArxf03Tsw4RKT89kr5LBn9Mtlit5OQUfB/j1LGZOKlf/MmzTVycKnBv/BSba8RY8nRuDLDssgZt0kyYbajN/x6Z0KR8hB6fQ9RxswcPp9SDaqNRY+P8/qlI0F+IhEtody8e8iiXEv5oirKpVzokP/s+hfFlLXC/sKLSAP8dW0L9Qn2iRbMlUdzcfSHuKgaZXjwV6YOFR6hnj9xCTTnK+7nBIzGCGfJ9PuS3SnL5Cn5m1u0JvGvWTugPLiWnnYUsW0yyoCHIQAd4x58TjTVJSI9jq0siXAbDT3K8Vs4fDpe+VJ87sjsJJX/zINl9m0iMcZrIkbeQdkSm6v7qzSo967zVZ/z44Hz/ua8TuSLv98Ge/R8dKqmRJ7y1N/GTEu38/FzGaGN9SO+8ebs9trVGttnl2VcptEUE0NPor0o3HLUSBziOlqbz4eW4DwICWaiROz//W8uWTuRR8NZo+slfz0kX/mpTFJL3Q5Vk18ABC2l/lWNog34l5pAiPnHClYsKvou5fByF4FHElzTCLnsjU96UhuU/LT3B+MoiixL9HsdQfQ4Cy9Tk7ChFrs/oOSh66SZvSb+gb2uGB3a9/DhL+JDSZC4IkrUZjU1M5uigvXsZr1fZd1K7Yu4jygncYGCSNNxBunWyIksmaSV8oCSEMhfAxhxlrOWi6xTiqv976pGar1EiZU1ftSxDfkKbsq/6apmyV2gPakASo91mjQLwUMuv5zu3K2WRmhxKSGxgmdMo8i7w4lsxo+zrPLQY5yLv1FD8w9evexVDC3NZiwpH4W31atDBkIHp2tsEAg45MuoDg14Q8auz9D+F+UfplRfbgv8ukuKSVfnSZlXIB5iOMYwxXDYPUPTrhW/Np39S0xzPH061K0C/tkT/ss1P9OqR6+XpF+w3gchk6m60t/n5an9amQWlCn3vE//Y/ESK7hFN/SRL7L1QLdmz/Dh794ZSyDPxh7p1yX7KlFJvVnb/Sd0KE4Pklp3LGlXtTlf3eZsUX6vl0rFURy+XSlKwTVA0rlhrZ6XzReTSI+F2RuFZOF0QwCW5+Y33Z+OY+vsxSZMwaMAaXqQY6+/arHJrcbAvUD4vpi+t0TfJX+P1F5/7ov9W6d833BpaFpTYP9moH05xSZRkqCKq2KHXM6P7Arqm3J4iRFVQi9LiIuaW5l7had+6UAEDUlo+J3/Ml45PcX9ezY/YvQ5qnoi31MHkL3NZydPbAm8dyKzWh3xZf//Li47cYr221yZo9LAJJQF7hlYmOwJ5134uqu4oNrvEG9OJ7+Wfwei8Q/4gQjohSqAG2VTcZo69JD128aRENts78MIXuDdYfRfXz3ZsVECR6UxhTM6zd1paHvpjuKVtKAQfM5BYHAZDgx0Dbvc372fStNqzWXBmQrqnNd5iyiPokOa7RdHSU54q4PvBfQjcby2KgwElke0ujjeobomz2LVAZsd8joGUooncrC9flwS51MUa9Yci6J5BO+LTcP4g/fFXS7xZ4gLOda2sFhz1hkCNuzwzdVdJVU/4nMSfOMLgoOxxdjkM9NYik0S5HfY8dIuaqchMtw2a1F35e6MFSyapEOZLbHGqS/rfBx/1fdx50G+uxnS7F90XESOEZGga2FhMbtYWaC3eqbCF3/jDUE3JRED5NNTQihAdEK3emXTmXEXXvbW9fYkRDURT+jJGInw9MUCfNw8MNfT/I6CbXtW18TqVKhceuj/LodakOB3QPcU9IQK5eEqoz91jxmprJxLFcWId8tBziWq0kodzJPv1nrqThZMBfbXehczSm9cWKHSgZgG+QgaAzBb5MZny4uGWp9Qh1HWfxdbfcoKs+gCY44/eIW38/y4dzIuQnCeH3biRjdn0oGd/0H8x3u/JO5EnyA+ONXy01NRL19l4DSvjGyXIY5nFeNNhIR/3jdfDtY+GWV0BgP4adpDzHUdQX80N26WBcRB1BKvWiXwuvbDgTUozEsVJE7VpHJnW29a2bMTJR92ARmiiB1+9TX4UEg/agNxFrB6ZKp9P6Qy5HtLHHluwqjPCKzhHnA0ywEdaxj2g6rl/U6UGiftd+IalwjOE/+vL1FoIRs5nRcEQmdDYQCPNcNZTVAqu2H9JZ+LTdZlFtp2b7r1h9ASnPIO7T0JS4rbN7+DMr/4F72KSFovwtUWdkXHP7pDYiZzRQS1dROOhaFurNt8ZSEgRlow/YTmW2CM44oHBi4NTwd8DBiwYp1ROr5KmKgnTdeRGESiblDv/yJk7W5UYvyyXCePa/HjaLtantYW8EtFbGFbFJBM45HJDh4JQOWCfbv2EiTF1JcWU1Sp83iWOapbel2dgzvSyZUrArRdJOw7vdfxMERj5WSeHTrL+molqbz9DuLrvt60E++M87h1MzqjuJNa4FfuRDsLaxRNvKQK7QsuOPXQyw7m2QMEPnGv3wxo/8Ga1u/D959VcfxduFUJqlXYLd34VrFvEgajH8MbqX+Mkts/yWakA+MW4eIyDnn9ZvkzZcNDkX3d+P0IUh/Ckc/foRxaIZplFxJON2Tfh6LZ2oyAvE/HGflVHdRa/7bXHrAmwLH930nWWuTnPAw+X/BXVVIyveZIwN1L6fbs19JI14KqbnSWqPozeoSMxtkJU2a0Hico1+7Hiu2HdASUmtgGzuEzVLgLoxkTex2+qoaG0Qg67uN5aDccmJAHQikk9817/qyK7Hwfg83fyxNq6Vk9UCif/I9DCR+XPmjlHO+Kmn9Ik+KuEx6F/0SR+h3XP8VdiR8d4zKU8dJfQXMAon4COF90ZnC8F2bFRDBzaCEsMiAJg5w61qeS+siQCy8SjVsQWJfsbrgoE8IldIfMhS71ZL6wF+04/sQydnodNHlmBpSrmAM4UpUY/U5LKeSULTH+4spLmDbW72+sEYWF6cq6Rx7nCgm1ni9jr6Wm6KvLwDTRmVJfhfV8hbjapZp840GyETO8dYFwFqwq4ZqITXp9cotg+bhrYyO0YMV55Wql6/2aPTeeJar2e5LrF6qUIO1ORwfgh6BVWY50ks/el7frXbTZERhMEZhqEalUKkRmd0qKz1frlX/jp7+tHmDOy20uBO7HXHg/fERy9R467yKs0HmAX23feXnX+5teNMo4uea/OFGBJ3o2fcQDbSB1zmK+qu/Y41bLsULZ5wyVng2jlHfMQ6BW5fOhk/ti5zwHWn8j7mTvJxIbwHrZjVxH8rXlNtkXDsQ3zIA0ZZSMq2p2IqdjZpRGRi3Koa2pclfi5CrQCJ3sLwgQyIQFRRtSAdRdvihQ/tEyNpn8Si9kywgBi6/4Diq3CKWfI3g5v87+OWyWpriXOm2iwdZSQAz+d3orfIIrlD49/Vw4BnLwz/yQ3nlWvY5Vu8nqXjStm5/NGqqpD/XG92R/oNKqKs1Ml1FpbOd6yKynGt7anLkdipCZAp6Gl3TGZlvlQ35ssOUI3yWb6Xwkms3xxCdbh3L4FuD9G0oBKemLrFLQAlkXmwgsKqm7/TeOrW8hllewZ//9au5vxl5NQwZVLs8adqd/o7vSZRe4Q/7e/ITYipiRGGg9RKxNdVUnJUjuyWyWtdh9Jy2NvUt/T7LsxoGWdKOiEb4vmUNK76PGk/7KNE3wL7NN4V7mG9pNgCuNJZB9Ao11ZclPh2J9csgJWpaM5VipLoQfVtqZ4KI3aCQXtsMqpoHTCPoXwI+wmRkyE2gdThihLFQ4K27pAHjEpnuK3hPrt6D6jMXou+uv/kNePrh/F3QWV+6a+UcZOkoP+VRk6ExBHWq4ViUo7oETIhCtm/I6Zexe5D31ABPQpnvGtnuekEadFQne3ckMktuk3IIZL2IlUT1qbBN0Bc2DoNJn1n+McfobTNzE+Xyv+zbMC7iYryhqcOz5/fuNL34l1Av2LsTwVdr+ilUUTufjZ1kborZp4VYBwYP4mC9RqBzu4JDYKv7CCN7kHrJPmX1bF6jX62ikSuOcPiffUOtzWmLxaYTUFI80JLdvvhZ7VpTR1nxqXUdaw3QlWAarcph+C4WBasGis4HlY3zfDd1rfa6pG+b6FIYfXlAX5cKImxfd74rwORcVM7JebOIKafucxyrsCDkowXIO/dY+XGZj/wHKuWOgSRv/SD712wIzGpBPpj2XiHDb330SvbzdfB0XAul8HkKnRERr3b/wxC3u08/NvePzNAlQnXVJ7UEu596XI2CSC5pubdKV7mwJ8wTpMNCeImHM9M/vFqiu4V6UOXVcG8pnH3Ub9IY0tj8d53V9/rq8JejAXl5hZWAJ61Mf5Mx/8jJLzgk6MlM3Q07inVsiwxVmqgtSOIbT/vr5ao8gsgvztmEARLswDIPA2CVX+WvTWFyYYnYofAob5uxHgLgmJPF7gtzhQouTCWDTqu50GrbcqIbs/brvMHipVhp4zkh/o+2KRN1ZC0VziNm6/8WGQ1I3CLSAq5v4Zv/oDsXy90c7I8b6ancCDkPmcmUIM1t3JQyeIShfvbeaE51guhkXTVHwAwxeQISBDqLF+3RVBvHLUpIkS5LtWNMzOa5PJN+qFdSuDgRwZBkANDspvNrhsxcN1EM8pr9484BSuev6x32uuuQUpa56IuxpWkpMA1sTH8m1Qr+/Q4Tx+TvTLdbxKV9WrsOAjO+/RnEEp2IRI+9hBfdh4hVgFTfCPOhHYo2ImXh+gmxWsDsq9Ttwmh1lTBdyNeQriRd05Q5o4wEFc5iEnfBSpv60lKrvZmHSf4OoViJdgqCzfHGveb05EctQofLohj+qWrPPegnquHyHFE4dSwhJSfm7bjOgvHWoywvp1IAQFHeU9zDf9BNfussVUiF04B14JYrwhSRqRqkMUYuq4+eQwiq4IJ0CFd7xpB+lynBA/FNh7z4vTPhXv+s8wiy25++vMvja1N+18T/ml5bbCUcZjPU3qzt9AuMHF2rNl8PIH4clCCThAAz4A1MQT4OGDhFMRa1I7EZwRcilIsWhPBuIWGOxra7jG1u/lGALMwRCqCwbTmAz57cBTAta+Azjha+SbpRc1v9T4gYBmFu72UbAzEhhjePrneJdmax/rRZHtouxSVpRXhsiwnfKJckzYsKt9CUsPuoOy8VBZ4xzwfwuHQXBcZANtu7LNg0690+74VNJlIPONhDS+aHEn7dbRgJTwdMgq9f1BN6jRbGTmvDnIWZHRO9fQLcHCmFQKI6vXhmtZLpqHkDyhdXleCltYLUkabq4SAiYY6czio+eGNx+RNyhFwA9gF+zLQgbLTLYQ4BQLZXxEYA3AsFNkl7qnbPMVcRSGz9PqcY3pQRnf7eZurf/GSOmBbeLmrhS9nmiTrY6/PWxKnCZausKGNRfZRhqXmTSW0+4UqC9fErSJdE2fuRwlPJolghC2/8G6rrdAuOIjSMbSdPSZXwFP981t5T1ITjEv5zqkCR7D0/1u7aCQtGBH4n9b2RA7Jq4W1uc/pk9YfO9obabuvDyNp6blkFfQhO67eaE6cb4f5auYslSLAr+Eg5vibs7O9zd+fqB6ll0TERPdQFX8mQe3ZpGUiV+6zFHKVEwcTf2+rfT2JCfvfXXXGpODZW6VYsRlixKjt60fqqv2hkl1WyF2O6COlR7ZSlsT+gUf45UUU2T73alIWO/vx/ndQrnfB7CzMFmbL33XThPKb2bMiQ875bv0QzffSZ5ZCmppLqEdXQIcn6Us2dOuUas3FcA+hHpMGmaENdPzxM7LjOe5TFTh5chuupKTIV9EKOd/06FHTjfmHJKUSduUw1CNznz8cheqf6I6/vHQVANJWs6CtjfSb4oy5vjiV5q2xG2fE1R09lOZlrgZL0cN/F5EBXnosBg0dFV9vwYHjkYD2Jq7lOsiK/85bwx61XAwpJ87nhBsj+7ynh429xIP1NhBdfXsnSTmnuUvifjPJYeLXr6mLc3Xy1PlEv10V/BUs0upV/n4F+MVRQ/3950W/zV+Ya4a96pmfA3b3QiniurP1QX7cvs6fkTrA+oz9O0s88W3MD00dXKNTD8b9jIoCfwkEmquijRDQoAs72Sh/LTy4j2xk0Pc959UYWQGXg1xfvN38EiLSfDGxqV0H4tKlLXHUCIC9KfDrDbtWn4xefJVz578aoxT6eaww9cDkkWqEBc93HxIufzpfN/2JG/1wIWW99fq1+SrsoPjI9gVaBfedOh3R1wvk8km31jT6jXNhYZdqTcXzv238s4h2R36OcbCc1lo3j2M1+KONvlxMD21IlzZhH/rGiHl1rXfv9UbX1/muUIs/8nWXBOFtmpMOXHy2ke3i0LxbIGp0x49TrdsZmKpXFkz8wCf7wUZAJ5NRMJKxdU7wsk6Emu/3CYEPhfa+3HIfAiiPBKEDfPXWDY+kr8xeSenSuq7BfI83yHGE5sHmk2RkfM85cwea+eQaD5ST+vRktdWmCH1VmjqpVXWcBLqp4lLeyDie7JEUTdupzc70K5cN1howeANKbMf17MdTZXbOpI/bNTbC/IXWpKetN2crep5+oY2lwgB+as5mXW7TWCMAXOCBdNUsw9nOn0o+64XKji/Nli20NIUWYo05ZqDtuo8V+Dc6K/iLLdq1qHXkXi5oA9SYGy+9HTrEja+AFoMqwbdjr4qDOVHh32RMlfkPLLMhe7moI8o7T25hEvRA2hVzWOXrq1buRtSFO3Cw3N6GsZFB3r5x12DtHItI/pqkQdlvF7/X4o4X2wF+mYYi5qWVIOiBoNTbX5M30mOakg40d4SgHpSLs0y/3S0WnAB6tptVSkICXU3lXOUVCjz/nozwZkbVSbZFyXwrsmwdutOI9DWlCVbsw+913eVnpzfgHZHE83q/BECy66iervM/nrX3WKhZcBKXS8yro7/CWYgb6DN/cdqh+jD1S5c1dp5XoSG3yReCnEX6f7z/zdHqfOafoRAhQfUvA2V3fSN1/pv0lm3F/lH1WZqikP+15X7+fMgVgsbqRowWsof2WrcXUzptjW/x71E5QiDsOAUf4Q8kvMy600c9vPRJAPFjYhXJlTOhWsb3XUgEfolIMgxHd4upznSjnaygNxVEHaWGJJUhdyWrhYDdv6qOjUPBp9okdsEWDTrk9Z9Sm1/m8Y8hDmsfEK6XdLNpxXa1zqHKz/XZxWrlUkGhcsvbZuXlqbti9FONzv9v+1150nQubiCtuwD6R3k3QQjE5r0j93IYfQvdSRhnQ8gJJOzXNdESLXw0i5eSZaXB98VNnA6OeIqpphCPiyJSg0HsAWa8Lr1/avz7NnlVMn21s3Zv0Zhs06RHpSzZDYiIIhnLj1WWvlpACP/eVU7wRGHB5NLWV3ZprYr1UcD//5Xch4BOaoG3wBCXatSdh/+8+XqE4ns/4p2JVOLGYUUOjOaKSbYzUnLLs6W+nPuY3IY6OImklWRmN3HhjDnWA8X6hwjifpM9kv1W9VpqMstO/oUZlU7SYOCLAK3Kv/qlkfzeJ8j8DbHfFGi0LWoZWheY/geXNmeJ+bdeKDtAfbM+oHTH2y0D1BNtPsjAs7ySJVeVKIAlfU56/nGTug3NMICt943BJ1BLo6D0d184RZIjsybaA0L0X4h5ufM5TdxcxC2TlE3HHlQeT3VQIkD8ylDZsxsa3PT4A6HXLBvFajxwhrS+AD5R2vfyREtoUY5n5MCfFy8ewkIjU4cYbZWOA27/yjGJCLWF+zLxc9NO0SW+wH0VZXn2SO+ce+/u5wtsBk1beTQBFCaWICIvF5QWpYHADFAagPCRHw8yuXaHUMTbEqVnP+zk7IOtSVwr99wswfvXBprZ+rZAGIpwQO+owGZ5fgqS3qguir/5di6Bth1XmG4UdIstzoQuktMTNXq7GCtIRd1U27ch77XfsiWyQCkSBXAVDHNic0I3hE1Ks8yqSv/Rhr3at7A9vpSM0x1ebkc5mXvzaBg/nnRnvWRKzXQXr1QbtGWtOrYnZBjo3GvWeQYgvYQnslFSYoj9NcBrXzkxenNaGng4tAwbEY6sJ9uxrDpQp8HHmIki5FMX7H1WWeB8tPRzWexvBbnLCd4/tivbzgik4w80nUJwDF/46xvrsFuIiPkG1h7bAiU56Dp/86qO42Ry6+nVpHXPQkE/QDbYqpqDGPdJ3CahOqvaSqGwyzRqMlr26CsDkzwezCxcZjwVGU9RHMdiVemIvXRhXXCVIR1rOkaI0nJALVlVTMwYmzMoaV26ODLIJcollXzQEnpFQyn5vs4Ev2mY2kCgL9jrw768ZeqyaN/TpYgv/OK9D5yoOJenykvsB9sZVB5+EvkHDCWSVQcWgwKK/aG4mAWX59jS0pdyLdpdl5zIrPuqImgwjOCAbZP36tztdvRYIPpKpPIknfOuDVF77/Ig+FKI8GI2yg/6WvVGCz7dBNg/2ovHBMMA3677We4+s7m7EV9QEgNs5yLdXiVAomkEeZ/fjCVQegWPT3savMX5/ZNYwc+FUilXduw5877tkYlg0q3TYx6jWS+CnBGFHR7V8NxSScPxfVWRXopzRmkF4MsHjaYyGElEIlkqIOLIWQ25sEYHpEYIdeibrI2O9RRIbkt+3UGRoo5C/vb1tjOZT190YgfF/2uIDsA4tpxT0exdAb2uPrKv3nfPZ1GDFncvwrJ+Y8AWroU8w1LNEBzOZLJJ5uVGAu/ltM613nteQHxV2PuT0yZc66hcR6u3bFZReV6RVJA5tDLn54rPUI44+dlmaKuOLv8Bm9dVW5TqbrfvKgJSVQwNyuChbdy2NagYfATnVOHWOxuvtg1WwH5ReEiPNqXi0iA/CkIVZ9KSftY+srZ7CI/QBKY7ymkHpk1nr7o2UPGyVyTM4VZAu8htQqABK2+nREepKOH8dRmBDB2IAAy9oZwv8M82zD4b2X1mho9oyFqMK2hzg53Sp4+Hr8zEcpLDvhcFwc+Z5vr0/CSWnXjbJE+TUo+l3dEou1ewW5WdvZUnOF3fsyTrJapGBG7dkXNGy86iGPXwdj8cqX5l/pWsm4ZyT+xLtOVJ8d6phK/0ZBSbLrf2Sp9PuON34tkwHF81A/4xu/TWUC84AQ2hAIpsDFZkzU5XKvxHn/lx3uzfaiQOCvi2CfcLdw3qBmLLZ+we5UBViwgXwZqnda6nY0WHVvEzdU/mP1Lgy6v0lxN36nK26c7yc+9h+2yH+tHRu1pAY/fnxAr4/S/e5Df5130+HZOVWcLXgbDNVT3Xymh3upyXejiZWI4cbLL1f/fEABx7ii/WLBi2md/j3zmK+44m4HHzNxg4WvVpnjMJIYuHgNmMlZWKkZR9jr51Jd27+iyN0lL0QTdwFP9qQhqk+DfSJtcS7pTFE1PVS0QLHj0Lun4AgD3HFUOYTOw3S6y24/v12PQLZsVpv3TtUPftH9JfPt6RU30rqlJs5uZjbtLokP091z5rFfO0kK6c79hAy/btuWHTuVJhA9j86Fu8VpL1qbB8DbAgzBstEyh3VGK4LPwfuEASsoE+KeB6r+5cIFgq5Zsujyw1zfCr7sKCt4Ok2epbLeCklgV5e6HYCvu+KzyulDv8EnQ52RJZWEm+UJHYnuNuWcV41SUwYEkKmboq9jDsUPQEf86oIf4zv78jAOuYFsVW2ZAYVOM7kysOTvTVRcLTEy49LHL/q/Rt32lYxRCfObP6XLafptBYlOFD1Gh8kuELfwkHTmGbDMfFUDH/UiLPoH5MJMu/mh2XQcqeLLDjDZe8JUEi2B0YTGn0m4pPnWV/6qa+smNKGpS6kz4J2M0vuAjx7z7I4DVoN/SFsK2IU8yrZpgAUZZ7xj7I0cuP3ZjvuLRHElR2JqMOIUjBKlYKe1OCgZ6c0b+VQ/DvDmc4iEmWkfiDkHDdbZcdINdQVU7W/+SkUT3x0DByn3G8/JJK43r9wBu7AZVHYw/Rb665b9uaxxfl0RMFLJZ92QcwT6exjZJePM2epS4uN29aszbYn4Om5z3cql4Movo9paqWdNT+TM2oMcyOzIGpVlihoPviCnckecMF13PqIk34FfELx74eIvkeD34XBB83xTi11Mr3eiMsGfN4j9BZi/9F0ExyP84pL4FG2Itn0YcY2WES2KYhBjWqixvYgVzUTEpe7ZmppDyzkT657YXAS08Xb48DhgHd+DbRnkGcBI7YV5uUA4sI13Hp55S2HF2PifTu7cWLaAPxbJQj0GEKlghL8xoJbFojO+83joV+VNIuQOQykOxb9bkQPaTbZbzonLcPoyotdL5IbSrQk0ow4MJY3sUfVKFGCYn+rgixi0DUU6U0Ypfzu3Nrtpps+APZ700kSBT+MspsLZav9MSYx60fWzd7MyMd8dqmzx7vq1wt7MYhJoin1Zw9XwCz89nhefklKmpzI70B0hYvnxtlBVyDqo2D7L1aV2s+lvohLh9hGznYcg/bVbFpHXQOa/CX5tslfjQ7tnQwwRwd+PPl+UgiM/fL5y4/cpRQW/lizVNYRthpz1Zoi0rh9vhqc/vCKARu62AcdTMF/BjYyD+Yzbd4GemlL5gY3PnpCO/0NJ36n7P1XqL4mksQD3x7pYHP/wr3XQxyN4DOSAkO1c/9d/t1ZWIgz/vbJPGC3hwFdjWbIidlBGir3zveWrsmCcgCGNfUlfN3BKiJnMSJlrPcNW07m2ggYIUl1vmMJdlNGsZawQNg8dU9lpO04fR2chwCHWr/jiM1va0s0uiZs/EgRsssKGGNcZBG3jqlbdsXrBV8EmaIfZBR/GV4reRze6u4hCQ2s/6NiLj/6d6AWXXD6tgTDclzPGl060T0UE3eiit+NO+YA5kY9i61bsmb9Yu9qjmfWfcnLnbKuG0xY+fuzbWUTFafeFh1hF5geI7hhC5EKTOHzleUoz/xALw12MjT+IubcvQLQFpzUwK8UGa9in3bVwXGNnCqrVbufpHsCWmp/BLmzau0tQT/rYD6cfG/dersyq2Jdze8a3Eu5fKtwHbay7S5IDDI+ohxXPuGp8H5bUvMbGPrsHTo8HkbG5hxZKm5t2rgFbdkRZvDoElT//KqwEy/GQDhRmP+7xDB1CLrIOhRumXH3AVpU9XEQqgNP6S/WsAeA7eDyn8s60mIGwpfHkk5fZWAkrLfUJ6WWrU88NlLEItYMH9szSWGN3q2yIbCKTBW1rWey8ptCGV7NOvSe+7H7+OiiL9VUr5Y4Uta+EesXr5/GGGRALVgJIe88rPLdXGM7/PxV1CKBF7xBshgAgHoIqOr5TiZuaKszNjyN0GSkmJ9dDcml+5Hi9luEEEbkAPLLweNI32NS/ZausQXOxiH4NiUCX4znkvdjUwddeBvdlLoSv/IvYbQqCGYUCDy722AOuffFmLSvVpagOvMBrRSVLq8k7BBC1QAe4nuFJ3EUrvh6WH6tEVyW5MeaT+t+cIDMRn5J73HC95tsQrYAnKLrqoqUR0R8lGOuYtzCR4VmGM+BTBcT/k0gJCW9g0NSor9TPfjmqdO3gR1tvNHoZFxKVK2bsmyDuea+iY6vyIn6Lan3eFwMQD9MDNI5PvHAuw580UGYL/T2fx007OAf0w0YOCgVArWJm4UP65rg7GWe8NuV763Nidk9KDJJl3zfd34+1cSln+K3YCviyTRX9Tjkxgzft+IXmetD1V2JLZg74jfyj2vrWNPicg7+JTH6d5yUt4R6wjemK+fIzlDzPZASY8utyrbDGW0jOWzXX9A2bi4j2PL4ZfSSQwoMBWC6tYas+tJYEse001/iivHpBU1Gh19S4ZrSAf/dfGUpvpRiBlT4v7if5Oyt1SFtS4pkPaNBMb1lWQlsSgYiaIQh8aSplXQ/ZJOPEt2lqFaaol9Nik2uCvwarpy7csLYICfkcArXQZqR9UPayBQgMi+Vcvwuya/UFte7cxlPb1IAD4BHyRHtJkCJVar9legFErjactSLafP9KZLFlkbzmsPiyOzVMopL51YPI18++EWjy10PomvQSmLgwFP6b7Bx5iCH7xD5M12JIgtJBi8J97VsPbBkAGO2ANiFfVvhBhY7CBmHF6ek7FGSox1SWyBwWGN9d9eaDy0PrV01dE3GnYhuAH8uYvRYFOvRgXgXH6evXiAKTMYUPNJOJdLW4ExZxfqbC2tTOoMJerg06NhwGYQqo5bJ5hN51r8mMlkei80yBxDH8hRhWh8TG+MuOEaMd+qWv3VfUIaxVO6gksF4xF/1yKBJ1DJpLriPSEa388k+ctbeggzAO6appzU/icvjahJv5dDhXHfxcUaz9/0zJ8v9mJoSjpkLgGA4XB3KacWmKQy0j+vrZHoWfdrO8xl/uPKc4qJsA998Y7ulXYAebHpQipONKQ1v9orJX/Y5r1diHRwXjy5CcH9fjtbbLGGLYxhlxTGtBAAlNTNa9/0ZJ+FzeaT8u14fzCiy7UbegpKrLZU9Xwlmzcj0EDArr99fBSBV48hp7TmME61duajTHIpvXVUulKAIdS2vRbYkjy8DsqIeMi8zmw3A5kSD7SaCOReXpfzVdB1dNvtLe4J4MItkm+u+wYDGd0JpbpTAGfJn4814MTig0z3I31nl9TGub3QuUTYuVLTvSBsQie58YxH6FKbrA4p3nZUOvQLvroNXWoBr5UpMjppHed2pM211/qtxln95SvC4SxgmVGVELschSBx6DeU8Y1UyMN1A9q0uKxoNRfrOEVgOcTfYcUeVUQYqdEFv1Hs0Rzc/UIj7eUShu9mOMpo3qLy3DligU+r7AG/z1/v28o91D1lLcfsz4FRE8rzoSH3yJM75SIXiWjP3et+vq1TBYmJxBw58zWzg1xbSDe4xY6nTT12Ll+6YgCBiV9wbzhI/9Iwc0FqXHu0T4zIR0LiIFdA/td3wo4utPswu58fnzR07N53PWgxoXlDTe6S+QHXYDMlgqJd12bugmbjQY8jPRmRBfFc7sxxkrkGL2kFl8J4UTEdzqJzWyfyLxQSMcemYeKPwoEO7tTALNtSoEhiXRI6+EwQz0JcbnmVMjleuOtE8+E20Jj9fYNuoGuOHUsOifjyYXzWgZpLkfbCvXks9q6J9dBWW66IQ2uWcC13I7iN1EUT+x5Gw5fDO8X5m4AOR/oaNaIeMFJ7JOyCW9BOxZyjdejXzGVUBxdIPoC6NT4QX8tXr5AX+dmZ44sXLaTmDMsAjJLB9md+fyiwgCv6bD9MpUqKzBZv6vDbXwPLIK2yYldWrOVxnOcrDwPfverVSHChyeQ6e/2ExIusirlutl7f7UqPRQvcA8On/pFJejhinjpBYL4zLAgUrqbBa53Jfufq6uhFfP1eWsv7g6gnT98yLbySWL5ln7KdpFYrD8IPTimTmkwhTcAh+RozEQeBAktK4Au0JasfEV8V0zCaJP4OBISYMCJCWBlKGhls+ZCEQafdWgvCRxspXCTIfOyVGEEd3TX02+3O3lHKS4Ecn33RbDnTHQNuHJ0MyBsoDA4l5kwzCq6RnbqeSNBWLZ5sZxDmTP9141izeRUxgFLXv4uyptV6ohXaZAvGfkw7sgzrNZGUY7XClBsqRFbjcd2lnxWhP/qhKvp1CTO2C4U8bFEKvPG2m0VEup8gVCm7DqQAMy94zCHCRVOFGbz4krmM6qUlpLYKAT1qi1+jPMlGQ8DC9P51nxhh5Kkiafjwv/7cVxfL4bfFcraVdJ1c6Phjp8ja/cEQ0d/osEdRr2QuB9MokdsQhxn4jS5wKbWAljtZ+bJ9x+T9xfoLEa2dyMZ776TbQmpxIIKn20N7Hg9h1Srx2WaqUCiXEGec/1O0wVOaGTb2OYtdxw6V+v6i2ilN2FZ6PP9wiDJ6X0u3JSx4N2IfvBIIG17rUWltdl7pIiCuJgF+Dlq+e0OF+5Dqxm5jFOF0907e1+2EteFjyfokk4M9/sBoHolFSXGgH6G3m8uBAj2BpHljbimlk2fTrLg9Ohcmd7CkLur619iA1+Gd1lZ22TdPskUs6DiA50nqKNZ+L25zcYZHBuSMbzMGEOBaEOIV2cyx5WHnfOFdVT7ujX5EKsSw9YbBwYS18IEFMtaoBlE2Bl192vWgz/dA8JcjXCD0ZHwD5ohtJoB3PJR06a5e4M606XSEi6Xa7+pxJrW4tlj68t+mtVRUX0gl58Aet3i0v/bGyoVln3c6uvPZek3IOEUhqExEXYzA8grhng1VUE5L6hJ/KIYmUQF0kArH+T2Kb+YzA4uaQAud4OA4CIc0xA48hN9j2O4aO6uot+fLlHLJWAGk91t4QS4moRMCPPGsHksZm7vOwZibxM5oEQNTn7Ak/w4DccgZrxr+LBZC0QK2D6wbTXpwzzx4R2DEpO2SFbpMB57Tm+fwU9GvIrW0g7wvMk2x7VFG4f0kp6/JMrlYkfpQ83Sor5laUGUquAMV8zQIpUDTyaqau2yS810SsvM3yXSVXvJzjRLdraoIOnO0XwIzWliVv7QW7aoH4aXKSSaNT4GyVgUBPkvwxleOkg7t56+seebAXDkJ05lM6SUYVTaTkDHkrUshmLdVkvi2VW0tl9eXr4+aVwwSrnj/VsleZQsk9LKOiv0cGRh/H2jEqvlkXymzVPST50BKumrCMXLx/TExrraeas4ZVOJ0Y4Qh+q36Ll0E/67DFqtUcwz1Bg3AtIDoCvxkkKOfhXWR7tns0wi7Wi6erdeL1ZdjfmmDKvVjTGF0p72OPDbJulqZ/jVdhnWixLXMKUmZyOShOB3Pk+APkrMujcT3k7xTwX9ueHgC0kZl8jujwB/D7LrCU9PyhVsvjar8WOtARamW1dp0sc9dhqtQTWpjErrzRM6Z2xkjvHlffunn4cawCWelHPVr5yj93LfKhLxdFWx4hS+f1R1sRuvKk68TgyFokPseLbqQy/IaNZ2lRts/ZlErQEedRxtcy9C/PapGnPKL8Gy4hQ6ebv0U7kiaWVR/ugWJWZVxTY4peQLOy5LsQN6OgB3NkDquGU3RgccqsFx2iWMDzk1yaACpeyQ+pKRHMKT8n0rJMKHdaaxWkfYMVCs+NBbkViLJf7djt9IxZITAi2EY8EzHgTrDIt/J2st+32SciP9HmQ9QMJ9eznqKLkD2HiZBpnTHcVNcvo0F8WH08gdTc/FYttFuWrqb/SGrc+Da29+vUuGgannPT60mgqX+iCfF319QXk9b6/wxSklwpN9bzKsK/MSPSxiNLLfjxUKglIC7wvUiAWq9/n9avr5YrTaiy0glt0hRNRPmsb5gaZfsEapo6DDAR51pPa5FKdRaD8haf5FqkmTYiVSVD18wKGW95KAughTd0L0X8wLKpZVk2mr7kHB2udzu0fxA4qKPcu5/BbovozyGI3whIXQnZPe9alrVHj4IvAA2kauLziAKRDcBJ6dinczZFU8juBCxyRHqzN2XCgQIPxcwn0DNtt4MOvk6mkc798JtQCPB/UzSrVQrSP75fS+5eUftyOEqus2e/6DOYXGKy4D4TaU8nqc91elkpCqHXShxFwFgZGheLkmnck6uXT7HKLTmCM1i9pxSr0daYFDMGFRjCS66r8yz01waFP8Wo+pEZB84lFsMjBhuz3+YcE7OmXeiM2X08EM2BSeKCrJuCeOP7VRIx9IK61p+XmA9CZRLvvFpbb5JnmEswuCKFdrFf32wHeRgWwKVUk15PHuxijT2wetzhckwozNxjSRQtovhlabWSNhynL/kKqi5Ui5oRmM1+z84imEqWZ3wgXKo5aWHwPPMi6k0fDgyHB8TyLYaSIxORFxqBbSw3dhf818JKVAQNDUMy0fkoJVKqwdE5f6Sk/3k30+2bRYSbVue7pbmsRmBa+x5yHJiABKGT5pOb8N2ygZPqld+WMWHNgR9YL8HHmJ9Ojk1H5Wt3BqwU/COr7FqEzfGaE9/QnVCVhDPRax7BMRujm9NYyqZBMr5jZdCXkzffs2mFkd30de75QV2yhCl4d15ZfdfcN0G4fk4gilgpkPb0vwVykzcjesNa0L4kie5kZhmGZ1kkGV+Y2wTThoqVDss2vJwMjsX9cH/GiWFiwXA+ErkZn/TBxA3hrVlSKaGUcXzaWJLQ51YBhFLulDJAkTZK2a1GeSvDrwccRyqZgD0BiVa6klP2yEQsn3L8KLFn9068PiRr0I+km9byBzCxrQW+5CuD6qawq9D1eAtv1UR1e23mUqf1nbPLpdrBIlI1GCzeIrHsZFr1qWhji1UyphQoukIZk0UvVYzaV5l0qUJUGdQYtwrA/psatrkrtjMRXb1k3YoKrlK7y39CMliVplqeTaz9BxjzhK0FNcnL0ouwcbRdb4kPKlWQ1qo1ZbUDI8tWejCRaMqDDnZhGhKG6Gi6GOxorN8N0JYdPkHozafvBgUWnpYUZopWbtUEKXYHIQrmiGotMaizCIisCzKfGVDsglFqy1nVgb1mQNs6uMkZnwjQSz0FX8Usa+TpBp6kVy0aMimEcR7mdlL/7Lo+z9B5cqDmX91/LXFvwtc7N+D0r7KcMeUja3F0KrMm4W5fOwQmSk7Ltbs5HJRmbAVAr7QsNmXUqfoW7hiuC1A31MtHGI6AvaaGaheYuIOAsPHYRS0y1NWGxqa9PDGgps6+y9s8VhCReDCcDrcxNz9uOAux7O1QExfrPhxumPiuJVa4GFveled7D7FoSzhvMrF2TxvbK0uCM+IhFyH9s/eLMVCM2rFnqWGKlOQQL5uMOzmsc0CveGWU6oReHbfjwLCl87p3uS/UqpKuFPVk2eTD3bjl4mqiltaA93y/wwRfK1fwwbPxaiHh7+cCEDXyRo89p6r/pvaVf2E84R13j62X2xMOqczam9mmtaBMsY1qH1dOKSofzLE5lDpvhZFLsYYnpE2lopyK0VVbkO0g+IDX+wthnxldjtzKWde6a3BiuVN94mXBN33uCppWs8PXOpFojtNJBVdp5a9RQs74I6uPaSxe/ayqwBMK0LGXNC3WcVJSZffAR4tMgVX178cnjHcyacHoiCjOqHzzxBTKKbgpU0VYdids+y4sudLaFVpn5msVw0pOejzm1cchRrS2JAscvv3BUm4qC2DPcTBlQxWyFZIcG3E3ww2EQNjYx0vFy2Mv4aiWuMiqMX3hJ1MrguneGKvMX/sxtBOODV18Qrru98sH9rb8HgoDqbHp6WYhBDUCY4C+VRkAssO8JMwKtCUkPIBhX0lNOe29V41wceL9E178x+vHJT+SrkTC1/tCKWa/ePPLQSXvqWYuOpixA/fB4sqR0FGCP90vzaBFZ2SFPAM2wT47mv+QaEd0TRXVn8AmAZn5tkCED8cB7af9ZN09mX8xxK6hNIbZr1Xr6Kn0o2kQJp12BKyrjONS89e7t2n7l2XlNv1M3WzO5t3dNtkArLv3nSHrYd6hM2lYEP5waW7d6vPRc/SFdZrBOxNaek95QZDtfJwROMWm44F3t/CoWqMwQ/eQLkdUJVYe3Zs8i9vXcpn/3958WrkY/BnqDvBKJXlQbSQ+48bMJbrgGqz/ghxVvfEHabNvR0fa2fGhwoJJ1eTzLHxYsp/OgH1/Uol8CtqFrjvEpumbk2U0Vm961f14jqD34HXYNw1yUpHGxl9/RF9moiJpS4fgKLCXf4DEUC0HzPTAu3NpdpIPsP9DjeOL9aPPTFgixEeTq5vKYW5AF0VvD4TEMH4yrK/e+g0Nb62e/1694SuIeUqr+/Mov9VXDC4vQtDVzyb2BS5QUzIUKyjCt2qQZ8ra+nc4yFO7WA+Evo+efPozUqJOhxBah3A1nB3Hqmn7aIQg+6jGiksmpXIWtxTAdpGG9yFxfY5I8vOq39QqAayAIFrzc9FEcbteIFp7Z1u58PYqLYm3YUwgf9iWSVI6BJHT+0dNSW7/fztEaqLrX046Bf8WUHzNkGQdsk3ABcRWLVdXDnsnbJQMojbhJd0kAi9FoTCNN76m+htFp9oXtmA8YV9ErNLW2+mYrS0cIytyNsa03ZNFJ0kODUHem4ISW4VwRI5M9PuISZmBjSF5DR60+D/rNzZ+wEdzKtDNr/EvzdeN+YVTr/mnr7C53Nh0Nvay7HdeptkbDVTCA4+GqiP2un1beRBLbqVyxiTDHfJWSZerZIpGGlAXKRGtniAgW0y2xRV/hhCFqScQ3g1xM/StpEi4E2or2vWE9WiIIsRNA/NEnsJgKI3g8GUFq9aGrc2SUOpEKwofSVu6vFYoGQ6xk8xt6D7nNGu7s/WTXVI2hiSkJCZmWwEV6912FSbU2e7Hxq/64bKG+6T2gBPk1NjTQGq8AWz6HlzvCirtnj/KqbNWGnAvkreR2NGIQrxPG8GbMAaWZT8kpFza1PYUKCv8DgwAetDtk0d1lwby5NRIUVIteyROxbGQZw64KHfVdN43PyVEorLpc8b1bobxL7uoVqR3y1KrEGB482+qPhpKay8jvjT6D2vGtOIDXN46a+tGfUKG3X5xHkTrB9mqDCRM8r6FDcZhMLae7t2VWVNPnkVW/Qq0Tg9RL2C7dqrqJxoUXpWZ3yD9fONWGAMy3xhXYodx0xPJt/doi+xKqFATWcTjY0Roj5u7u9ZmuuTh04KO07X5qe1H4VtoCjCIBhLfdQzw4ErxS3p7vx6gHWsZevfcTZsNKZM4noIAA4lpsNK/eo+xT24lP76qQ8UiC1Nd3AFkbUpQLj74YKrWFPtwp3aRR12/bIAYNYZvO8RpSTUs7j9MhgSwjAQjLGWQ/WbXyNdz59mL/jHKH/qS2PTyAG5D77kc3MxF92QRVy4XgaATzFhTr+ukpVXUYAtFHVVhmMyUiIaztLoxq/OdcRsxjoCl/hfjIiU3tbzg6NIZUDqmqjDYldvx5RTiMRlaHjjadZK85FEzBatm1b76qRTqIMgkG/gIPqv85J9ZT82cadHdm5ixDi581nBxsTd61ie80HPinvv2SkSDE+aUPg+LAK3Ffhi3h5mdIY4UNNKgUVDYI0tQrrw4FLePzaGZSfIP7vjm3oKX23K1f1yUXwZ2caXkfCVXsbtW/NioAwQetryjKRIV6GPNNnPcyJKrsPOehucKOIqig47CgbZNfNZEXprAMPORn4bpeoU/0BcK8rApfQc8N3KezI1yvs/9FMLklNh19km5QQ9WXGvRiya4nP3qniXqpYU5rZHE7SvG6YqkNm7Nhqqiq0kE2VzG19QyEVnmW4ua87zrR4UFlb/tI/4KCGKWSmEf8yJ7uFI7u7YPSI/RfDosskt7LrZPGYd69NdfrqdAlf3jFHa9h2kOyZIpU9XdmT1NNFJUN5v2xX4sWVdkecb56YfUr0KBkYL8G1wtX6yS24YwObosazV3P1AYAtbV5sv+Fifjss2TmZ/8FNDuJLTVdrflCtNKNkqDUWCaKusJ+MzSaxuE7P7BtVazZDWkUYgUAtd1iP9bX3u3ipAyfzFXG6Ownh0TcnjD5vNduAj3Pniqc9JJeDdFdtUfdMPsw51WpIpinnqyUMSIfK29sRr5jsJHNgUXGnoVKr/1F08/dG3JJSqJa9cUE8/LzY7xXi8Kf/m8+sJyzzUf7Rxa7FBfRDh7oIQ7s/ty7ejI2343aNB72cJ4Iei1x1iC+I/O2+co6FmpO4NdKZOSDLzofJnPaO3gSrF+YUxm37oGGFFz332l+DJUGC555tDVmBJIwcqmRGA1ghDFiOLfrWEwKLQW3s5H/7LrsKRV9BiAhg/2YWZzOTFEJZVYLjiCzwX+zhDcynyQGhUCBh5XzlfBttqQVE5j4q7zWV4L90smBv/ghJ989q1SGEmcduZ7QGtzDKfJNnX4fKuEu4tzZuOHi74tv6RJVkakgVSds5b+x2rhS2lF/5rHpU5j1/QREZeaZ7RBbQMPwSDpFFgulFYElHJ3llBPnaJxFbpNiOhHklz+1yuUJr1AscbVE7FccLedCPm095wC7aD8xIPE8vHR2Z9u7D54V5FuIkWo/lX59siHGl6pwhogp7l+q4yUcnMDO1K05F/xdO1LCe210sZlyr7zsOKPu975m67hRzDO4c3rh1ravehW3kWlOl9AYbedcAPrcmN5gpowI1FTV+9mRzkibwtuSVo58rWopWSd8tDpVxt3a0De3a/aZFZFmbwaV11BryxjQ8141Ng2krXol+AqX+SoGrKfFwicysdpXqomUWzuFXTuhbM8jv4s9MRtjm82TtpjzLn2ulra+Ht3huTyjqM6w7FZe5PAaGJtL0D5E7Rjrw4551Wa3cjdEg+efb3gN9jSgQppkZXX1fsBaoD9fn5A+rNqrfk2bQlEw0EboE/5aa/D1mRcUR9N9kd3ap/G0W6dxp0PS/v1BYDDzl8S65h7LFXJn76uY0BC3UilPjV53ASRd4s7ynqROwRG+vKKvxY9rJ/o+MZIDxhjYt7+GVBnk7ofimu/ZEDu5CPbYO51b7ThH3YrNRwvGS5TRIQu2Evo7ieXoUtPK09CX8q4h/1rfZIO1nV+D1BX6E/26OZ2UniNxBgEGMHPSzMPUVwDBHayPGiEadBmnLlir2FzpTQpN9ms/pkc1FCehm6Z1FrBqrvXmivtXW7GpPerlTtfqEJYIHjIJbGk7vGZctJoJWubMTmLH2oznu6jHe7i8R5lGeGvOTNjq+bRRE8effM8dNYJaK8Bn5XMU5wkA4MHYB/aUbpTxHZnZTxn7mhbq9sN97He1luqZEXVJcOjzM/4NEL023oGzDyWuJykb/IvWPoKvjwzxfkkLXiZWTLU1vNxLCUHS6NExp4sZrW9C5lALENB13zYxhID6jKoOkZDBmqLH2QRaECjr5V6qCGcO6WHiQA63eQFiK1zuLu1O9Y9SSWqAQ6aNz6HbXERKCew9zlMznAvoNBjvRVklq8BVIfAcI1+jL0XMHvKCVtL6HJp1a7lurTLmEU58xIQ5OVQ2wNuWtbk6TtBwdRJACVnR+4LfnkmcHIf8nYs6JYQCd3I338KtrjrrHSrpE46Xa8PA1e8RxfN/FfkF5wSRbHyZKWqqY6qpfmrm4QIJCez0Sy0W0vO8DsnhrsWoxxRW16xhNCbu1Z+Tk9HJ51OVuW1rdscXDcpP1s+RIYVag/2Jpy4wN+pg8HnyTs9UmYUeJ/s9TZ9gUZQLnIRfRfjrlXXQcpixW0M6DpBBLe5ZIQxcW4FZzq+CKH8tIBly/UohTyfr/5KYqTb4moFxnoA2BeWJmW95T1Q8ynFY7Dns1nTVGgk/EOhwEHMr5bsbxhO5hvqpeaq0v8JuKp83jn9ZNmmaTgeJiRzslUceXFsKc4+UAebFEaeFaeKDnk+G1lE7X3Sz/LYYyCtBeQGmY6nD/OwhFWySrKnI2VgaK/5Zy10rBJ/DZ8TYli9RvYQcFGo+/GZCC2eW66WlcT5aFxegQ7gfsNL4h2dNRUM5juAxoIvdD38W6Uvfj8JsvPRavfTkSeDESPQy/vLU8B/JP6fSY20T/AKlVa4fhvhV2pWCRyE5V4XZaZnY2KmiLOZPCuchH3NUaE8Gs26c4ehPz73KgRzek/ouSnEBPXiIaZLbyACuiFyf7LhvESJgVSEwJjYZHyfG6DOVrJvE31uA09bZcmVLNcPNLl/mOzAqlAVNbnXv5SSuHm6ltZYYJ5EWFIqO3Vqnzm5PfD3YfcgW6mJdIfzqui+9j7FggiqhLapZV0QUGkz2EcCCs3V3bLIONOZGEL5m0PV1JRuBjF7YoXjN8I8E1VDcjo8Vd9vj7sLdfmWXQOgu09IJ3L02lpM0ciqQogWGMscI1sA7cnqnE1MqpgyVQJQuT6fg9JxNHU+VHsOpsfE7BZGG2l+JwCzcZxhiB/iVKSGxkMbkRvKMcWExwz1bC8oPBQ0trTrgqkezxBofFN/XKYLACD2r/PoPpkdOR/CJsaDlU4oM1l7hVnH2Izba+O7gXg8H+J7gR5hxVoGf4GOTXlTh8L4TkYACel+Z2d6IaeOof6EkPShAhbFnX1CGnBbA+69hrWwbGZbqC0c8KWtbuOds3V3qWz0r3vbgsp8l2JJb6p3hTq1MLndwZAb96i9D8moxdZd6bQbo4RI2dZ+vnPewX4dGjGddT0OoreOIISNQELB+5bRAmcJfbgxJWchpoRt5KPis/9yM9ukyFMYMKURyU/DpTsLtE87reSBMysClfY6hwaslYZqJtcU9HgWlOAQvbhDa8a7LI3DBTSnvBQ6xWxWLEWndNQvZPevQuPdRRzG4SwGisJMUtYRdJk9PsJsajvafDcxNpJyVAIkaXAbniGtqUv6VfT5yqKxLK/8wVGVW5WkTeBeLZ49MfBoqhVUisRAPBC2F6d+sCWlbXsRqZ9hC6uEZHbFjEKUuC+Vlo3YN/eWWpGaV+yzH5hdLRjK4swKTPF7vmZPtD+Coa2WW+K1TyQMcbp+xonp21iqJqyTxslOauzv1Y2a0M7Em7Ic5GqJqvPCT1jFwZBchK7hTg4SEofvpnofRwjiMAotBhbhcvICm1hK9hTzUlOUiNkLiVlc10tDl2Jtt9XRnA1AXM47qat9yp/6kM5E+n4AwVaCLBsW7SjNzPh4ZgoQTiQ/d5zqzlsGZ7FMmne0SfNmD4orMyuRFegSCG9apkv7XGzlYfoOIBg7iE7mQU7wv/M0F4QqBCAILXVM/9ZBvyhxX3h+AxBADUMUZr8KSnDxKEtZptPbPfDcP4WQBycSJ55MNeNWdkIrc8JoaIN0GYCtPZ+mDEusAx8pgsrbvLb04Ci0cZTx/y/JNa+ZuNKudxpnMKGEzyTI5xqAiTWG/3oUcocHMrW/PalLXtfhXNA5h9jOBsPwpFE+nSfN8IMI41muSt0VaIr9Mzbs1Um2jtW5Fv4R0jo9LeI7mJFSyw8J558KqDJC6fPsWSWIbAdbLAcAUEOOwVlKPJmgbc36MrLc5/zfk9Ui2Uux9vwrm/jBrzPKxoeaXrrtCPaXoF9E+R4vSLvPQJDfgyypUpbNiWGeMCF5+JNM2VHYEKH4Be9fZS1l2YcqibRGyHDqkOz8UaVCo6CXTGivLtFUx24tTTIXjfHr02/WloeVZZdEyToieQUvMIliykNbSDpTtx1JKyHZjoffcXfKUGpPCTPWPaTvI74uVbq+77hwR7Bg3/jJZygKE8PoXAtNNdi42rQ5P5iFeRp2zbp82onwBG5CqEmqKhxdrUctKhjBh8Ma/cUfKr8erMAKXVY1TDYYsLXifGyR5POsHftmf96D/Mkhpi+hYS1Enor5eaKhpB/s2th5miPoZ6Zd/3Q2YYjEbF3tplsWDjViMYWPUliTSlmRk+ePtJcdXbGhRYX32LQUBbryQlyTnOk+ka8HKinS+hDrow7leJTD3PMD8gmZUOkkicglWjeh58fsJt9ndh2bxqAdSC/OjVPXHNIYDse+FT0GTW+TnwUGWJFScBk1EykPquiv/VwNYgSkSzb0iGrrjPqFJghEYDPsCBpvQoGQf98E1r/MFGcpAMyPe3tj6dKahWoM6xXOObMyAGJbp8lqaeui88SN1ai+Kfdd6ON29kDhU0gVL4J7bv7qY6sTe2a+CrwH7NodKpHh95Ez3GgFW/2RBuvaqTb0XBI3TMAPYMzXlTEydLejxBEbCBanp1ZsHU0KtbJ78/RhEe8zOTS8Vm5ysSmKDTvs0e3xR8eDQeao2y0c5uX0iN6R3YxbruF7+Ug7kfRch/Ca8VqsZNRxOfY+PLF1J2xwVWfOVH9v9Vjf6fRdE25/OpYlk75+gOYZpGLpIl4k5WLpYz0BWsvBHZQA/AL09uFKdiL++TsKuHH8dFL579hEmNTskpUYJK6JzZKR/IHW4/fXuZdyXyRQgaklvTU79VXuShUuA+aByGH9Sj42b2sgm28/hsaEOXkXM1YstXXxbrfDHbRUnG/RAyotiP/9j6SoWHTeC4C+J4WixxUw3ocXMXx/N21yTXa89M11d1ajKzt9Utrk5KC4JfiDtXv0VjMDkLQRHc3zsuz4sIQsfzzK5tDW/zQIiEc7FYQ9ScEt4sAlebDX2aZ3rV6JSFdQf1oI91emdTx4y4f893WZbWnq4eAgCIT36yX3aCovkK+WB2twFWNkwGPR25zaIbdlRkrrDRw9YhwuXKNbkkmXtKzT5r8acP0z5UBCLaFez8fJOCvOvP5l9Y9T9A5aIvUDruU8jrBRXleZBu3taaXQwwqEKI659Vk0UcZUpCebmlMFNGtEerMBkvzvbgnedK0uzZ/6rjmpQu3wTnIYOf51wDUnZZumEZxyAwFJ/3tdO0Cr6GkJPV4BAwxssQ7RgdI3IRy2xkpnIOFc9+7zQYe+PhdncV15UljntY+jdKMJbLYChpU5dV5BtkJ8QMMxXEi8oobHh8l3Av+RuKZzMJKZt6+EmSsLH3jBZ0yS6K+5XGnqcO+D9gveBOT3/HF5XUqbNTOKpEfJSrLqreI1W1XVn44XzEOvkJXC0/I3JZSgbQ8E+cyHMI+KXUITK1M7mt7sm8QpBkrYGKfAQTUutDa91uovwYtnwjbz5Z6itWxvU9TVEx9H8JxmAGUH7p72tZBqTVxAc4OQg/4xT9cuFda91MRGZTaQHSDvVSNoDmf8l7qb3cmK4Z+pTc9W/mkbh9bSvpqhwuSN4OrlUapf5K9ZLUSjRTcy6lyMdlUbdS0IfN3upRvF5xZqqX3i/6vKS2UbWWgIMz2N3Z17hvcBbPC/1MnPm1fSfarSO5JJtdFw932fGp3SmuX8WmiVYSmcXX1h6x9hHsv9YQSEoxsyB1jnsmB1t+Uz7X4c6TlWaHEyRP2K1jgOyMrEKiNZH150NwjHT6GCmZdBW4etuyZHyAs95xDIyX4E1fyBnYwtizbI7HISfwJEJGdvHuCi7bv/58GfmoE3z2wFyHBFYkxbof9OQRrSpI/qHg6h3IcArQ1NjEdjx3plKrRVBA/xNLmxlE8JtUmfrfCs85MEh5TdhEwSGhX7IYXp5HibVsf4Chu4E+MsDAcX/nXYO499eWu8E5UZpoaNGrvdOnoan/nT2GN4oqKtj+n5a+65akJomFo2nr/Fze6oOq+Hoyuj91WD/F34Q3/Vs0hj0VzMttPChil52ZnGQ5fiLORPmO6WKJcQfd9t1EKGd4xNGA2uWv7R7BzG8BdmkVpOXl8/Stf4tGuKhQqd8XSIK1O771WuoFuWrndgGM7GbYPcfQaCfKsh/KF1xMpR2i5sKYAs9A3WX63xFy5h+qUXeMQ6efzMKBHS0a3VodBNJ+QdatTL5Ne5tQMmrl/reO7s1dCm7PFIG7Olpls6zd8dtsbGo1YuCBIhoM4Mo0xDgGKlvryxHP63IVVgOKqBctBJvBTb/GvvpEQF0lXkNY74lmNTzhMO261mSOFGAalpqkjE54ChrLM4MaPj1KGeSwHPtRA7yycqvHpDjGP/2rqMmpQnHv36bXslquiqHJZaG0KUf4fhYuYSpVpYz+HFW0V9zM1kG38a1d/yq+NDliGzmGTaBBbi4uprXcfPQKMf11gd+vadl3owczL9YNWg+PMQ6vkDFyacJOXwJBcqTdlO1RN3tXsGuzzIhtSfiQ5/BOl7U/aB2I5Ne1ok2zH1g3imKQ8Yk6lapPB1gmvJRKSEP+csqm4yVLrcYMPm6UcP+kaC8gXSa3yjVi77CrluP2sGBK5stltruBwchNcmnNRjHiBlFDyOiCOgJQZZIu1/jGyh0CIZigO3wFYYgQqOf0JM1wa6MvWmQz6zzzE/sTucFp0GNHTxnVrW+fDvbXmY9Nx/oMZEBQHFg5T2BrH89R5seYCiZDlFBRS2KhQQWESIDUXTgVM2UOiD8obU2E+qaxFIHlgGQJvmlPZmwyGcb3df1dXsSbYuGfES4MOuAUbNe7NyURSDObqyT/vUGDjatFWmdyLuQlP9tvuwdS2QNa3e2hE2/5ylFrdRbJ4vJt5GMxhF7vCTcg4YLxlgQBOC+C3PBnzxv0h7Pqd0ZaJDeEr2fdkNSPSYVSdHqfSbtjn3sRH7/CdTsWCgSk61nZpKdH/BquaOs6b6LTKJfor2suhlqUtUQLWH/q1TnjgdHrAPG1Vub7lpzPCltf5DUXJ+x+UU0xydcS23te44gB0rH/k7GqXjtY+6vznDeQnNeiWsVIDGRE+YWxvzfFuou9vwz807VtRIubAtaOkZA0A5WZDi6PqMwKxkqPLEcEyWdW4IEjAIczdIGGzJd5rcoxM5Wf7YTx5hQLFBRrSZTlf5nav3yQaZDKor3XkraG38/NUQF4W9q7Dcj1K/WiVj+o3IHDFBbllfuLtkCHIBQvAQwD54m5jYHd3+perb1wckAAKC4PMjvtQBbUyX/YR2mVKgR/3XY1hvJPiPNvSPYPh+lXiQfXHqFN/5toL+hQsv9wJOhh+QNiclEK6eoqQMxQAXIyZwugalzxbpjQbbd6+CM0HJduv1852lsdTFHxLgasqchXzV6cBM203CnewBqI71AXP0KHOsUG7q3lgdzdxnGqEg38jrGqq6Y1q5BHfUMQ+FzU8cnpUE8BLMkGQYYXba63l86hT4JnwcYkUOUDQU2Mn6yvvk4SMwbTavMsfnRhzQzXF7OJtr3O8wEo3q4GuzcZkxpG3Bs+d24kRTyD8niQ4+n2mvp/slELhucLawU1nWMw3/5WKVKoF8penHJS9AmOWDlmxJAvl+WPq27okdPmt7mbEKKTVqSQLjq+hIMtxWjEziIuUYSOZ3HpJdnImRMw4ESO1fE75Vq15xSyubdfxyc+QjWaQ6aDANe1m2K6YVtbh2Ydg6s8uQdH/3C+jsBP/IyV5oCLUGuYttfxvQIhDSz13d+Ufi6BhN84quV1ywdEVQY+qD9CDqrW0VrZ7lxXfpghvNFZf6V5eBae06hQN3TYpylkBZwQLWRwEfRC3lXpcAf/AyN1YnsL3LHJvn1stxW+A2kyfcEI3kvS3T4q9lSlYGn+QUTBjal4aQSJ3/fYY+sPoxAyp85EYnXwKUOkmkLSehh1Dn1n0SOFTGvnAWicMSpkgaeDf3fqezX95o0LLDnv92bhQqicCebLr9dGOXiZqaWd1xaEVc1j8J2O/BM8wLS480ZbbnfI2o+pu5bsq/HJfazGKpLFekpJbi/TSDDlTylVSEvYdHUQ24U4N32OdWM5YhGxeQzGGnjlV4DQj+E9VuCDguOsMT+JImVG4eJgK+ONwmBMSHY18GX9hxmQjVT98KvmdIvn2Eht3hQgS2x0M/ZUPmcOJT9v2IPOsXvrz1Z28H9chywpHu1DVYHv3Pg3SdCr/Li2APzNZvL/JIo5faOYuZ9JyTkCdE9MGd0dQFbGBXJ9cIvNNScDONOwgaMmhBJt40AXXXc38Ixa26shKien0jM9qDLHZSVITB26DzzwVSaLO8eUY8XXgQkvUXYR9hQbc4xuCNXVmnnaV+eXa07IeMJE4tYS7KpI0P6ID7Uc/4S85TO6f4biAP4vbJYrsL08sBgHWcSB9q5rNuCpIw5ML69R2a0R3hFwJmdu/hnUB27qLp7xSQ+CUwgjH5oO3hwS4JWO0b7RmF9xEX+twGkxMBFncO9WOfn8/0wss0L3q/xZkmacTm7Amw4VG0TnfM+x3b+/Ii/OWWvtteY718VzOAX/VQ7NwGk502tQAbjtfg321lNHksbSoKNmfezXlM4wmVKn1kjrzx/ZH1zXNE+msamDAWNYYiQqKihXCL/8Na+cLE3Iz4hzOChJyCtcqu8d4qOoSMmsb5Msh4hvuBKaC0cPX2O8rVykydZFj0qRpLIea0KvlkWRH/0flME+vwsN3aEbuKjnYm1v9yF6m2mJgEVjKfOrsLOZ+xHO+SvsaJvEV55gaU1C6VVDgcmZuwr3kU0PoQBFqmv7eybpBz5cSpmI1e+tRA32niFuIDapxb9jcr2IQ93+zB64eS34HDXcEp0LoHQrH7xH9fJEOYl9kPqu9UjBdSVLhbmXa7r5zQ88MG/jWmAHSKvHgNT0IWZbN43BudYCCxYXP1hxP6mCC+IE9Khpi3MYm4CEvGeqQRqGcXBWlwD+/ng8B1CrsbcoIOzxLHZeBAVJXQ8EYRtoE8U7KtkAH6RB80i7sAsLw8XSTcBAPxLejQwNH+LzqBGLNC4izerA32Ji2pynf7EQ8ZE2MOnrIIEGOdWRgBH/qJo3VbNUJnPVE7zjorq6hc6i/+35rmH995agLdmS97p1wWcJiYcdkQ98S+WYb43bsBXFQEwtP4VQwobKFVqaiXGigQOMTNNZh7Djr5i+wyBXqIHYiFxPbDmCDVcpwWGQdwTjT22f9MeZGViyQ+XLN03SfR9PvPEzDRiSZfbVMk5BnEfyDRhw9NtPbZh+wpWu0Pybgr4FSwaZvzpmPSJME1hGQUMoytq1/hZpmEHjlzAHOQYJ8SGQLO/qcPICjBX3O+I82zJc5Y2JaoJg63wiEkV34s9JfNAoNDSShikXSDMrgpUN5oC5cJLlUfrGowaRASE53a/xN+AxOzQA+8XxxFpBsiH0hFPe0SFUTvC6EBnwd/kvejfeel9bavNYICx4K+1b9S8HuGckhX+ixvQ5vBtc+e147VRkzuhHZEmIEC+PcLj7xHrPXI09oKlQl4/P1HDckg6kfOyvRwHfcjvsVJPZefOuMz3vOx9ybggc74vqs5X30KyMUcCm6QvYu+/BBw9e3f+/gaBgagFQCWgRCj6XNNcQsykuGgjOXOboItocVH24FezPig/rl6TBX+yTsOz9HUtouUQ1OZdjEsgqBZC5EXquqlxI2404edrPKEHXtTSySaecSlCq1l5SrlGqtXSKyStZHLArNsUqSTiETbAtgsBQWCHso/nYc8Sd0uzMk2oaNBMAmcCflnV/SAKxN6+YaY/Qw+wbkyPYWa2U+UEKOy5yVz2xY2/mxsCmIRxyb1qpcx96IHWurXY5O+DzIxQWpWbNa0jmcSkn6/mfreM9CLq8pAQNyZSKnA9C/wEXyG3b+pzVWmgKY3ppEvT79nTDL0bXKxKRMTmKpVlyjD1eAIqke5Cqj+BZClhPHVAnsfo3CeQTsvi3bjoHyiv+/QgHbRX3LWF6xlU0BSuMa2i6z3wlyK1rZiEQSXhHHtVuoSloW2m6+cBlbcCPYeX/kPusJqmborcwi3MF4y3F0Pc2EU83H6Jp/864QVT0ubzMjCpoe3EJhtExpvOhP5fh/jXwUUr/3we/yvNYbqSr4+GZEeTAAi+xUcdWcxIfldLnbv5Vyy2gQsTgie1iIPU20e9EOs8v/vfom4Jw0NTiy89BlEfATAqUDEqVKAXjaHS4m6B7oYlgshLIQyZKNltSJ0lrM9S9zhL8HIKHNtxAQyR9XKju0lZVgEAgFwTCZPlHMrqgGTvm9EPydHwkjzGXUwK1TV4gd9hr9uJU/Mo06VhL23WVNZPBGt+iKRWIgan1N3cWtYcXwJdvK8adZYLbVj5N2UzmitseCBSLpSZAOjHHvVX8dhuyIFUA+i/g6PreESKbZdZJfPT4H/bnfhv/MASKLQKYhgJVzMKMMqe+laTyvDK/Nmw56YR5YimcSFwq4/hxSCfqUVVY8JRF+0tPRp/Hx6WWbp1Euxh++/b2afnOxAWPNygXg7J/l6Hdp65kURGQ/PAIszyRP5uRW5ax/gCy8BpujE7qJQHIBwUL2iC0FZak+mwHYWreURuWs+zaDugkpH49+7fN1OyDduSTTvlRG6jRa/ZyhUf0wxWGGuSrSuuXA+1qdy9VO1KI3xJFVuqQsr1hxgKMeldvgDec1GWtSDvUqtdTfFFZ/1Vz40W4UiWgW9pXXUlc1xRK66evuXoV4YrYV9Oet99zpe2+69dQMSC10u3ZFK9U+Qg76GfFdvkeIY+n3j58qMWNRoIP0bRi7itJsyV/Lj4BJKQITiARN2+XExkakbOUotZrC8ncX//bS+Us/H0BvzEFyLrsfYcx9a891Y+PKVSWe1inYd1BWsH1aqMkUIug7j5I1UpGF7FpOSF13r1VBhd93179Xc5tsZzmV8PfbAH8AKphduyp7cUkaoTPJoPEgkdHZXTxI8drpittcBl93WzrtFwk+xstE+T8bQe9dy5UbaIohzJRoCpXz9v3PFth6l0/hnqX0Dkg/bJt10dAsQ7WRkZSbBoilEuCG1ChBv3+sML+jcWMHpxKv9GOuV1tguujKgz2JqInTW3nz4XU3o/XswjI7Oo3MnWqwl7P2f0PBmIvR0VsEenF5fXdUUdDrGR6Wpc4MbSAhq8XYQghKkfaoMlrMw2zd8+sZbwdb6x6Zc9tdgtIfnMGJDPXPZwpGF06fJNoRNIcM7cFrrV8AdpCciRMHFerX8HTIyVSFX2Awr4rZImwiozbl0As04FY4PZvzlo38xV82Euy78tilbjDPDn9vRaIQRu+gOjATumQUm2dDxUvW2m/60rLd07YU35356Hv6GMo/SeWmZSMaR/7arEnks/2Me0jJ0ozF+NwlRZsTVc4uhZP5JMZVhhRvhvHzCF7vns8vMcfHZjRh0Hqd7LycEbtBcUk0Fklogx56n935mF0UFlXwuQSlRVMev1sMjFubAW4mFKnDt56h84kadkKroo3FfKs61JcI0uVEctDPpMdyEChQu8/1Cy9dL34AR+XK2E0Kgs3Xrv89qdzs02FiGSRzNSprm4qe/n6Kx/Nv5ctpNBiSBLZvaKrtegPsSq6x98NxoHr6Atk/IHD/ZD4R91dh7FoxgwzILHDmGaju/zzfvybxeMl/l/sQTtqBPmRzR/2z7wnKkO4GRnyFjL9E/8IEKeG8DsxK/jd18AWIrMyKolLkp4JTR3rLPEk1xglLwP3MTyU7VMoXrj98ekGFzTI9uEmQj6eHiVjIXQizPz2W1/uP2H8Rt/qK2zLGwGhYQfO36tdGHbvpj8Gg/FQmAH37WJpT226X14Qkm70i1dZxZJypJ3octZ5n0vPHjwScHSIAeQMQ+kVtL7bmJDlQye+vdNmjwOmbM4Lnv4CfTQh26Ri/6ny/5GFwM/hxZmai+XJH0jdWK00jyuj6+gKAs4Kbl2sO9CO+Vk+nLD+g54WJmxFp0Js1vUxbFVO6/SWjXQgs/0dnMWz5KePyKRYlhUxsdyAY9w+HkQXRyb8IJImKoP5T1GqeuDK13WcSjkRx1Tt2IWxSAkrvzjo71SYj/HlMdf8fpDnzbxMFrz59SRHwDNr9feF3cT8nyt1g/8HSaWuzuUL2oyE7G+Dx9HXVs8insF+cyFdAMhFwba382AqgA4KQO1/6jjwFmjLJ9NlivyN2G0e9EpxOY3YTo/zjbi19ozUMr08Tx/s4hH3H+Y8mzqcgvF39txqE3FdN/kK09IJKr+m/eD5GiVP15HGxS5rmdXQkP6c16OB8fwQsk9YZE23h8lxfSv5MEOeHgliJRVdqaDz/xReuuS4JaItvxRUPTcCqSSUH+P5jAgVJQDMyqtx8h30FUjdJkFSMcuvF7d9uFdCQ8/75Jph9bngMIPYC6Rf4ssl6eK1El+PO05N+fqZHuR4L+AkuZD+hlbQVfQ4Ztxqsb+7kvydmQOMFu+8x60T2xJ9+unqqW/cgKmEQgv4oCzwZ6QK9Pi04JwGGeuMANSGOcsv+SUuqQc8Y106+toJfvXac/augZdqrXMmXHMJj7038RcNpUP4sA8u2cwcrh3PvC2Htb6jVruDZWLrXjl3AV/JPhvUsCTpPUu+jy3ZAwyjzhq/GrZSbXKnRvHF5F+5S91YkNJ74P8uwv9kCwO4qVYAyvaqrZzeMYq+VNKX7AemZKeTZPO4FUwTW/IdT0rs540KXKqw36wuf1oP4iDLtRRhOD1g/JNujqA7c7Q+iQ6VsNQtfAzzI/aGSIlqsicKxkkrNenbWGsmPPVO4mQWt3b8yCW/STCjiiYNhzBvFvC+zaTHPWdtAMdfQJmbBd0boiYssWPRMuuO0boY1Nz3LJsq0W9+zLEq3R2+S9Dw2a7BYDbihfYqZVNDf/Zd3ajrUTcRVSvt0cqP1S1LkVAgDCHNrzWmKmBXPzTU0X3yZg+WwiFN4C5BC1cv0RWXJViJiKw4pVJwjMAojJEAXeT/2iaXtY/1+de2eL+zbgl0thxfL/VO2l6BJvLlCCiQpfRPRahlIXRZnSX1W+g6jsz2xq6FmFSaxPbq9ZJ88t2pq9HwC2sBME9NANcf6rh1vL/mE2bGeiPBxntCqch8fitjskgJyWBDT9HTJxWPMO3oOIO5gOon35E7i2lLp1PIhNcF2zVtL/Yi+O/1ghJj+q9gFIL18hnSCCiwcwimAss6kyJl3NnusDBUSHAjm01/phsw1V+5/xvQP80fUd7Hk6hOf2UmL7BN5eRw8xbuRcspZUU8q+xHVbN9BBb4k5yV2G0iGC+kbRutB7TOrPqhhpt0xAj86t+hFqM3YUL0mxmpMmcGKH16uMVcEciGLm4I701aGKrx9k+6XGPZDvn7VRVZS0qKsYHmxoGVg9FnxrRydg4pVs0tcgy5/OCMkXTSR/vNYb9tNfsvK96Sh0xcX02WY4qzJzT7XP/I/b+vbOU+TLUlwZqLsfGzyPqv5HuPqAyqv1pN6xNj8GPp4jQO8lmaCvG355fSWGsZuX3M9gfZrfforApIjtkSitBfuXyXLPfOBzrwNseUfgmU/FxhX0/M+CH24i4KFzkru42wtLRXrkkGWfDym489kQqToqIhQEy6YmafG/yT5tVrqdX+JKG13CdlSG8tg/8L5HsyAKq7sH+EgarPxKjPT3r0fJgeNj35gDRMTTuxH0DNL95K0156Gdw09ZK2vnwu2Yqjr7HhEbC4FkM+4waf/ilKMgcy1PZ6qLl3nu/MD9Ezge+GsCM70VIq8rs1ZtQQApDh90+VWbKya91J/sTWmMMw7o07pOVa6+9eCJgLRUX82QpHTC9YQWxc9ZLDJ7hN3ORQ5A4vW4HOLxX/STY1SHID2mtyCbbkwmmUuvZxRG/JeU6F+Hdn0llZiF/qNdfUM/nUpHg93yOl9VAkI6u2DfXGah0WeHIq/6BSyS7LXq/onTuJxUtM5lrmfiv+i1cXhP6DELDl1cwyUUXBFZ7ror1CiQqUBR0Q2aHUTL+HHDdccZrPKvIcey2lb0pUSAvE3jWWxtsgmPAxsMz+ek0g3063nkdw4KP9rMeqfFUFKpzIZ/+dQDit54hWrGZP5Ev8wtfv5KVFOYCsYU68cY/SgBSZngfV6acf6Kn17gKGf1u3OD9PLTkrH4qvNjbSY1ur76qaNxVhJDGrwvuuX7B8cuAUMbfGo5vVOBNhEB6QkMwLYz78U3MCMlUdDzMSM6qL9DzLGF8cHamhjzXp2KqYjM6/0YQcptoQDCHD/Y5jfg15qVk6KNvfKWCPCK05G2RfZn4BksmdpwFgGYUjCl2E2mPW2Xs5dz8sUUxKo+GFHUgzd9fu5amFAMn/itJbP15BUXNz/1IT72px9WzEF5jK5lLmd01zWfD/G1zP7nEfo0F1DSJTwCjvvwdvwa5k6+ujjmVkg8yYDDUMe6wutMuudkOL+3rQVIYuymjFotXx6oF+E3YWXQLpp861TzOmBps9WF9yBoQutuQowGk64Yr+ztO2TguCzUNlhtu0wtJr3D5xeM2WzMWQF+HUyxsCPZy5j9O4lZGUdlKnlcGzTzHKHwEorRWs3okZxzdIVh/oxt4uKkfvPFsWyFtS6JEf/C9QVU3jOeQ66mBIpr+xLZRtKdkPwJMWBjwcdq4mTFgwm2zM/pheb6OvIwpLVskb8DLmgXwxYsnl4YqHNG8EyM0456wVp6szdA1IXpOolevUpjfqexiAB4ayoKh0kzQ+Q+dKFWAK7eRc5IDhwkEqYVjp7z9ZVpQZMCXuMrAykwPbqNSdu+fgfvsp9SJdzQo9hP78ed+sjsD3Mj9UFwlicnEYmsNgwdBfj/ryUdSzxn2B7JdK4o4e/9CPxtKl0q60+iyZKnP5O93AD8k/TrjNnxuMWNhODkWG7P5frzbLDaZ9YN3XdQ4dxzl18Csp8vsAXHKeF0r0XnEqyzxH+h+7IQn7K5K/2SojQ9aG3e6Hv+4S+a/kN59HcEi64d3zsZCvnH/oYdbgzwoe7jN6OpBJgHQaq2aCysCHxnCBxZySIo43sKE/e3woVzudcnwLTGQoTErgDFDUcvoSt6XDl1uo0WK/5wGa8dXJJ6XZYt11UScOv0FpMZvKSv1hInJmnjTJbG3xQiSosgiX0hlZuNVw8hRJM3y64P0qSTuLTrPg7ddzRC9lz8YXMXw49MmPIIW0bn6xhi8f9EDAzGF34dedwQNaZrcP/RTWJ1c1Dnrc/G9qNsm7FvBPa3RyMJXgy4QZ9PgDHSf9IriDLKXW1zHXPrnIeGCdtCasL8OOxsvkwXochEzrVBa7nqNAo1QCh5n87cWg5lLC4SbrpvDfOPS3NzJrRcBLH5DIOmPxuUjCpJtHGZYERiFADFy22v2vHPoBKOQ9Bz2w4gyVSVmdhfmyoPPTSCN7fO1GQ3JqHEjU1aj6J8oTZ+o/D0u/UpA6rN7SRT1KmelyZEX6SvNysMpJOm0aKr8uvZ2LXdBnoM8VJxte7/GX+FBvIatryiaqb5aBPsJaGGrBDzMPd0jxaqGp9BiGyTKxs9cfEeyptvOouoj1RSl7wtXgmoc8ebzM7FdmMZLdtnyn0wWcCnSa8P8xS4CAlAWVHzqxsRdQsrH3tlGDlSzeH0RCtv56uRVuJZaas6XSF7op+GCmMEgQe86kPR8bW4T1RzH+2iOxTgLmTq0l2xlJeQOzjQZtQALWVfK9rDtp7bsT1RSH8cX7kjq1kDVNt57H7+pMS/qyT+YeQUBUcKwijzTQJGmzZCZTOyCrgnptNQYqN1gPnkm7hJVPdYebexdHSmv9avL8dVxyV+8Uz3vb93dbZRVF9NuWbvB/UTfkxrG/p8vk1rsL3urR1FJT9nvztRUdKgwnPMAbD5nGqwO3VYL+79kYGzH3wGSN62or1dRPMdXZDpUjJ6HZ4EeKvAA6iCNZm/RSNnDNI/WanbjfbMMziTUUZN/K7wCj1BwZD6AK1wk06eW4Y/bzaIrShYDCVtpk2Zsy3Pbi1k3zGTpuWx3OvNgCouHs2XQOqtk9Lu41PZde4QIcLAPlGS4ReLwvp45RJWA6k8ZUtwFtfFaZMPq2hGV8qEaT46ngED7lEtMLuH2orWRT+E1s6WONosLl0hubTd//vIXS6SiRXI9iHI0QKb5wuf+Ix/dm69GkZQKVIjFc1SHcEByIDWRMjmTAqv4el31FwQJeZuf82RTGOgprUSOx1xK+TaELX1ifXV6imGvP0qE3DsyHSWsyjFveXDgBVY1+etFO2urKKSAiXcxugZ5vmRlsUP25XePF1+rWAUj5n1fsFfPl1h/dc5THWobhGvV2j/h1Cp+GfKJp/KNA7EfgoOn/q757loKZeMnBO9mOlMA8sHBcR/iFPxNL+lgYhNHLeZHZ9JtrRQtsuN7bVSyqLuJ9G9e70+hPag13M+gkm4VJZkYM8bJcV7hlxZna1ThNT+H1T0lqtNm7RUv6XBhThNMVT50MB9MmOeAc7AgrOhXZAq79bzbpsSk6Ks+uIcXrR8JnbYSFLyX+AODCrOIVDr43+D0KzuoshXmbPJe2/e5KmkRNprGuSCOTOtCQwrPSxUr3bXunt0bvgbmEx6+6TBhnMgTiFEj3DPS8KwfrR8A6yA5pinQnYxF87cNAa1A2+CMrnxLOQrYP0OoI2E2vUOAHE/B5XBQCi8lfnwa/eRe3AnsdKxdSc2msl1MRZxT20tLMPgVjCrfkxe9fl0VkaA/UMtEYcy6K5ghJbO2yRoKNFi68vMT9+FpuWkFK+IWPZJa424cAcfTkd4et/wEYq4n86SHn3OIBY6fbeh3R1Uqrw73/bFql27VZxEojNhNLiBQXs1/Um/jd8L5ybzO8g9PlbvyufUwR6eavdtXfB9nTIUIGzZAXA9KhnaZK0/zwZ5K8YgqXnoNURqei+Ff5T0fZ8x+sBP07gjA0+zxxwURy9/ZXjMLS5pFz9zr+s7OHL0g6BWaXQCbLoa/Pms3FGp5wIAmKp9eB7XHw1D9UgFrgwSEuDXR4lqJENQ+C+HtL/EWQ8IXVR8gw1sZGfQibgkBB2kbNHyPoUa+BUsujx1BhqPbItZ+2xR34S++bk2JBCXGq31x60x8vpShSkC09rUX9jRaPSn1YiIGCdqQEI3DPq+gQSzIrj3AhGIGaCafKRoj29SfBvdhCWJPIa4cyoijRRqfeOHE6uurttDZf7zUele4XNgn35GKGU7bBAUtAsRJFOmrf4Vq81J1SFl8/HN64Cj2I2ALempGN+WK33H2Ov0XbNqazQnnfd0DBBo5MPKf8a5axTuI+XVMFXrP+t6Pn/uzOBTihAkBNqtUUAJkPS8hrufIVvcfs0K6bwZtYq378EkyG4z9F967NRDzJVyZ+Uou+/nxIMUwPeILn03IPvC5ihLFdtRa1043jzpCmQg6QNDAKA9icy76zfxGlyCOAenbD6XLv/orVzZvgwuLuoef+nCazoHbPOrc7X6tiRnvxMWt8HCdrhRfSRrhEnU9KSseNKgo8AG/fT81BrkA2uIZnrtvYSjokevh5FwizMtnrmgpC4KQqez7vdn4fbrHcLW7Ek0/yBgQzF30jmlNXUPO4CSELcvwS1tL2tysjX5xlqcR9V6IXk93ppK2Qu+iID/nIzltUfFhwReTlnPmjNwrDV5g93x5rWByr74SMN/j/KNUoX2hcg787BmviL9gLPGcQZZHpwCIaBilz47fa2z8NiaWGsVBQ6tRrwEq5c3YLDBnTjhFeEuDSQxPjm/mu/75UgVlqNSg3fNR+7YjpGVaQreGFKWjHv3Tndlgt/HVr9gT0ZnI27oxl2oUOk2944Tpp6OAwO1j8jEajjoVdmeuThSb+I9Yb/s8vW9nFw4vcUOHBI9JtVymspfh+T1K5BolUykx4RAUCO2K7T/1C0UuaBwEeSSPpB1b49dXFGqM46o0xDz3Z0yXTgnr5w6W43OSEH/8TA2/EqO6cdGJhH3Bgu2l9B7eG1C+kwxxlLbmaZFqFyVno1gcPa9Od1xdM3KAq6XFYnSVb/GPblK7ZrUWs8Xb/lGCONYmdzC1OMuT4VeLXvjRQiCwPidddeXmgHAA5f+GJ5roGGK0Lerncf3QiBkRvzL2pWmJElQkmE9XBzJWtGUX/G1mxzx6XkHKv+GNOGHQ8pXJYEKMXYynZII4sUiq1bO8dpj78FktqWKR3zx76CUpzm0J7dxOew3RX40+jbViFN97n5wLjgY+X4hrd4bhqL498rQ5ZYqZz88PGWG3QAR7D90oGgzclOKkybwkkSGrjVqEPkTI9U6kMtQQGxItQO0K+9WJv7QJ/U+hZGTZFzIKUTcs278Tkn5wcPNKwTx+rEKDevz6wUrYrz+0AKaxstV/mQlWwQvaKtwQ6Men/VVf9dMzUOdwXrYzrlBQHIhtwOouur+Xp5464zwIkjxyCt7tMKBqfOGntGZ2MxLvReDjAgkNXjCLmC7fH7JYgsKxCpNgLLOK6qPA6DCa/5ZtgzdVhe3pI7m8z1i2o/5dspbU6zbawIgk2yurehGiL+VJvH6c4zPuGyQN5FleVf30CmumoVPSqjsFj18L2+FfH6kvLLmRO964fa2a7u8f1Z2STe0OITHFQxvC6ob0oUDXdB9c5GpEUeB/xAAWLV0CHPeRT4jjqwlp56mLikZoM7QIyeprhcJa397swvhL23cjpeefLoeWceuCGrobi1ar66I4fjzyL/AWN+8mxgd3/Z/4CeEP1Uy/29Xx+HWP01Mi6S8dbJKKakyd4GftRimiWydRJWV+ZfxlGbOce34pL+7XKfVMaMr4zg5cELvqe1NPkOxnNi/+/J3HqTvvTCZB1UWcePh0YLX1tftUunXPLh39/jH8X2c9P7i7Ku+7v9CnrE7qWgl5IxeaS7GhlwzJBnejRxe2BSlBDu8uPVIISjRILBNfCct0pa7ZzBA1pdcbD4YK+F+VEZad7m1p8BCbNQWYfMScBhKcWLHOVZbGwmAifzm95N5uhxPW/l5T4uUh1vFE6CyGhDdDYX82knsd6Yba8EWuNHz/2A8H1AUiaqKC9HpHycddowGuDqCOhPHQznvdb9aPKaQv64LPysW+T4c6B3P24Fr03YSMcGeOO+8kCBXkhyQ7+biSlLWf6wAB4cDFiIbO4QNXqhl6DGHO9hu3eLz15HZEhcyGvBWSjUTJj9gA39Hg7Ll82kkKXnE66gd3x07Nj57LQOlVrC6YX6MM2S0U6hc8lRYE7f0my9MvQwUv1A1jik3GRbUfa6ig2EYN+1WgZeF02GYLP8Fxlsqr99D+7MbpkZ+r6JM4vIwaI0xsdbHvz9koHf5UejUfj9N8cg6X+LG6f/yAQopHPWsiC3bExI/ldpL09UZXDJIkg8X7O3qKA2cF+1E4mLAqwKvtWhGiYTB0WdQmcniOsFv4DzTcvI7/7fAB4oDx1PPBYBKreqTA6kf57u4ORbuFAikgLV9dW86AdXF4FPy4NC/Q7YwyBYFwkeY0fVtNfT8urJ4NFpryzMCBalHma5j23zgRnlZdSed7q1uiwwjVF0eijq52PJ9i2J3i5aTES9mwGUMN10uapEsffge50cJL6x4i/g2jDM/8vVf5WttyCmk2fggK+SaxCWXEWVtuXBFe0ffZKgJOV8/0h2E+Ozbo22/3f0N3hbc40EDCxZEaK05Z32bAV77JH01B9zkZwxjk1FzGI7uXM87cjGHXh5XF175u7N+dSGOXML72L1BwGhEH+BYfNCwX+PQOh7i4LfMtzZpmX93OuY37+PoWr/v8LauCF1Yz5YuVJH+pBLKBroyYUPREeELGzQ5Rsn2Euii52tEu4sxSouG8A4ygQv75Xb4Y4v2Nis7vC5x2l8OwGViS+1dHXdw3LnfxGDxUgigydegDpkwiN11xUv4ix+DlCEXqX1DGod2JIyQxoANpGuUx+3w+vw/f8Zb/sv/tx6jfYm/rbfLqm4FZdnfrX/ik5VxqclLHW71yCqgo43i4nR3sEsI7c2w0K9rixnIFFfOtVU+unE/EtT0RlNIzsvvASZj+LJIO0931oTkOkklZejH+28ZKpg+R/2Zf2iogIBxTE4Yw/eorwG1f8NKYmesBawpRuyB9TSRZMQTH9z51U4/2JYIy0yASXSWVdo/gwh0EICamevg8pl9J+3pWN37wCMdlvte3xdm0FqBpy1O3+iC85QhY8nQ+nnExU0qoaSLQpnJUrG97O4ugVhbUXsBVQvxyzI3uQc1f75YScYUSmuf2sf6JdbqKblBW/C2hKZ6FX5WZfJDhisQ67m7ajja3mHMuf8sRsKmvZ6H1SwXSwkto8a3otdxMlaOAfUa4Ak2LwtdbXhTmhPei1GhTFUwSe5kegOy3cpI/Cn9Ej1SPzb9GfZJxI08qxMfMDLvF71EShyiL12oAInR/DhHjqFt+QSHGzilYYqvU7XrXBDZ3XoKlhc6FzjuT/ESVWFsm4cgHz25YdbILwCqEl4tJLezAutQiFSq5QUDCcbQ2psah12HM1CNSBzH/14KmPrIcDKKMzFedmI+RMYj0twuJC6JcNidv9pXcJb0UZqzRa1gNc8jNSePpwvNdKTwQBNYVwQXjEScWh0DEzipP2YNpCj6618hIMX6AlAF1DAwRlkpOBN4JDYirKxce9nYC/6Rnr0+zAfCzoPuqhb619yPDU533yI/4ZC+SPirU8gnl5lvAal8oyf1QgpsPtBwT7Ye/hIPhYtpz86TNSsdC8K+Jg5AZ26Tnd4rNQ0NQSRMFE1bq+ZXrVZdTqZuVwVNWJe4l16C2HQ1X2Xx1LjnK5ujS2aleKL0vq5jDCAUMRBqUlIk29ALGfEsufIT68Tcg8mY4yAfZiL/8uE3DYTJkL+VwMFS/9kELNGNHUXA+4kYTvUQGhX/6NpXneUd1HBEBifXXsgjUJ28zqxF2sX75K8yd5LGFucESHhUbk1D/+LTeVq6czK33RlZvj1CtUaALDhLcdmm71pBuBUxOYD+jsAHFSsT8xLN02VIvOR4YFHVvEsK4yDF/JIXCZBJ9xHJP9wNH4EdQpzN/BVu3pXeCgV+EzSVssvplYxYmHMrpGy+73Lrs526p1Dev7EDgJUdftpD4nouQwO2UYTFRSdqiV6S0X2EpV3hYfATUsPmJ/FXMuDXlhEagiZLzmGugxKnwuoVTPRPBbr0kJwSqN3v6F0IYE1Rzph9gq3e4c3IBVPyHkFRhiOAdxbV5yTqZGMriZd28n1ebAcKn4Z5vMNtNe/enmyAWyxzHD9kd7IJg8GXmP8/7OhKATCQ91ze0up7NpMjRf963+SPQo81EcxfjoVON7cJ7IE8K/RPO8tBRL66MxSZf6PYsgv55xDQOaR9C1t/Q4N5dyMMGGUIcDxJv/ux7AgXZDnbTGyalOAEfgiGWh6CYarcfjNoOyGBuZboSUK7L0qdfm1IJ5jUQKn3TlXNNA7Ki8eRv0oKtRWmjBMDoFa8M7FiL+wmCkRB5Zb6AQQBmSmcHKmV2Ruv3yDOC5yTuHauAHy8CGL/suy+O84v9vtquY3xmEIZjgn5Pq6lOkYVqgMLQdzhZ7abcg5oIi4OAEDhDSfph1L4+5el9Boa0RYm+xQmHqu5JHT0zaA58LITcvDB30nx0+Fho+8mvpL/Sw1QM/xUNuD0L9o7qulHc6au/fAfDnVvVY5/r1ojI9Xgavl+MzSyBERESAbHEyL2mgFaN81Wbw8NuwPIgSCSaWPvSI9KB6UfUTLzqRKaoDRLQ8jJTQzi+7fmlyiNaRcleBKWQp781zr7dxbxjvCexH8vt8UoULw2WH+UPAmFN5RtJ5Fy3fzMXWK6HxkvPSPWT38Y8TFFzx98T7xF9mMaFoeKoNH/YpLPUv8o74ao5yGbiF0BIZiEWuOx7lMkm65u+zKnQn4os/okwKS0kdpBRIqKHvIg4bq3/JBPz/6Ls94u70fvf62SELG5/VjMDbeGCqNHlwdhDKG2otH7uXdqdnewequRKYoiuATutihCxNWhQePdWBL/JYdU+c+A1z3lC4kxi1N6aDPLDT20PmedHmBP/mAF1NSQcncD3fjrYeK6C/gQ+zfFOVwVigx3Hr0iXJClMF/mLoRHSqMqS1IdSBHioVvyOb/VBt+1IQxus8lN1Z3mev6yhxVi9g/KCnOSw1rLeJakrT34i5YY0l/cW1SZAwb3dWDl2iM1ONWs1WOIXQ/nLeejidetgBoeQU/ao0FbKi+fiKSL3yaVXzxR4VQaZS72vjqPx/SwXtq3awXQqIh/bEQ86J7mVC4QlIg8k3PhvOO86mUISJTINTmHB3VxiP7oRqsPcP+36wafZkI2Pmh6qob42y8Sakhc8u1SOhUAms8nS3wK16UaNi+zXpzeakRJ0IkwWRcLxgZ+YsDIkBYrcU8Od19WUQTHMi/pjXVAsWN233H5jt5AJzippSzb8UZo/qyL56U8W61Ggj2IkUs4VY7cXSVOTj0aXqq8TUdVG9nXpBVBfi/aqSemHlv1V5n6rPP9N1Nfs6AwZPMom0FyH2zE7l7Ew5gYdDIFfhrC8nzWTExAVrRBmF4gSzlgj5rRpolSEPWR4fwzrqJbkaSYa5lvajOPvFx0SKUOLYDwbcK/FWca0zl3loHq5lN4f9tp2CP88EVRw/HkO/PmQNtZw/odUpRUVWaPSFOziLVVJ1Rd1G0X+8klxGUDsPPkSWfD1dGjI+knYMsepl38Ls6koobIjZH4BiWEJuriIXIbGPrB2G+b6BRlQuiY+XgJHiqnG5GG56wfFJ/NSe/a46dQATAx3PwNZWEA8fhS05XstjRV7Mpq6NnW50jgoRqW0AcHxZzEeSPXSwfrFP8xJZuPxd4OK92P6sFuo2iYXCqd/aE/O9MIxb+yNtOl2XZfKFvvEQ+qSevu5dXwD5I5LryBmHemKZR0tO7av6Bn+Iu1b2gYZwHnStmNsyH7YX0sjex7exAkbNXRgAoGNxk7slPIRzG7tuB80mPdO45rQiW9iWYHRInn5/VHT5s9nhjkvi/QUOvXG9xRYo4U9vxVaIRH/Botjp5T9bRClEA+P9KaSNUHnh92frZdzipz+6T/uKTgq+MMePF1rlKlmIcNrqJIK6szQhDqd7v2WdJ55txMcQfD0YPQVnZzm4zIglEjZv8H+64yyDdNL+67AblvuWD/DyteaIHvtDKpC6eCrKqTGR7Yse7frurtsCbvcRd/bG4A3p2+22wwAr8KerW3X3CwDj2V1Kyv1YZYyD1NNpbZj0czEa6m68g5+N5jgKJ1VJ7MONj9Nn3h85Q1QPgYvMGcIvXJo7jr9+AXOzuROy7wFCu19d9HW/Vz9nX0cMOkUwbHw0XAWxqfyIsKF1Nbko/7NRlPc2A/7M/yPputYkhRbll/z9mixTBKdaA07tNaar7+c6nlmYz1t1VUUCSc83EMa84DdSPPX925CFw2fzpH4iByZbsJnK8LyQRn8sON3ydwZjF4vTEVyHjUB9aDUaPjbUTiDKzrKfdM6t2XpxEUQhMKJTWcD9BgRhCgIpZCg0B27/spPauzFtOygVNOy68vi1f7lVrlOJS6UN2L0V+cM1zPLPWiGCDxxzJgMiaMjgv5OIJg+UZcUG3se/YMJDZINKIjIdM5WUZmyZ1R/5gqyG6yZnnvB77sLCKEHjOdYpYINcwhcBRY7SEBFHCqEQp35l0YvdMbONOQXQjc4yT1ARNvK8HkXBYkFUS6XzmL8bZyaMOgmVBa+gtrfqnmrOD7vDx8moPEOlFXbUoEhplrTD9baqUHRRBzpmzWfdaEmYqtWfA+lgumolRbwzm65CuvZE2jpZxkjPvMhy7OuNxJ249/6ZdiOhahDCGpfnLnMstHqRtK41g5lqx9FG6BHn9dWinieMBZI568GVaGI/QvSCaL5lztDPbJ1eOCo7f5wrE2ks9wvsb3GYIoWi+D00Nf+TqLUbWU09BZ9j2wnwleRaFsNqzuqGI+sTbnCaepf+hMwDmSlaAYPJr+4pu7SXHU4+MCOl5o7i7muftu1CzBx7nJpPSVaxWJDEfg0qvuQJui8bEZazeKyyQUDk4130q27f0keZFv5Ns6u5aR1dWaw2pHHfIWJvwiFw+R6EhnV7Sn04SJJ9vrywwCHmGcVojylllQoflJXeibjLXNBIZscMzGuha+TQbmJtHInxD6UwRLBxxPGy/icnork2O/4/n5gS3TajJtwhfP9w+ya4OZzkHF8RVD9uCXuAq2aob1R5Lqj8VIJHZFZ8+ai9XJ+ozUgMZCHgHFBvBYkBwVQe9uG8UEADY/O9GJkvlfrmi6G9heAlKG48vSTUNqqV1RqjvikJl2DKdbfBJ+ycDiw7T7wmEYCI7z5JpMbVBmBJYEnqgRxIc9LM2osiCA6gWjm+QH1FTGQpJSbo3w1UAkm1DF3jdAo74O2Qxhz0vilKKzuiBOWQFvF9wLh2B/fSt4roRS+gR4NztzzITlQjoOFcIqitYFcZqTpKuewXqXFdP39jV0eP1UZiD9dBPjPOUq7q9YeNOCGIATrxozS2/j7KgTjsFf1tjMFH+ubm4Np3JE4Fmtos6mTvX+cHc46eTee1l5tHErYD/lGTHOLMjZFvZAK5PcSXvoCJDpanUpOgUFT/Fm3Xsl03wSEbuTpxjVTRAPQw6MCNcioCSGMgL0up1LuzRgf5H2KOu0se1kbBoY8x0KB0jtmOljwwZUSjKUT/8QxqI7NyN5p6hgdrrx6j8mWMyd1nS++zZnLJI9vuXdqe803/EX9+NlPTcEJ9gGp1MwuFmp6VQWzWQtddH54NcIAk08yEcgAAeyl0D2wqCLLpr/OYZimjl1vSNDsffT3rmU0/Niw0ETxK2agHH/4Ti4zB2e04PcqPBScCjLZzr6F0mG8OkokScyBuDxcpvyDiXITEaz1rCSlFQGVsxagkZZT52dd3j1JfU4xt5hXXs2qVRJpY3U5oibbtgk31ZjrZ4lYzS2enEOEvd6yTLgM1WFD6tG4UjAoabMR5dXHU1d2kyMtVrIvE4/veGOAcRgOaWK3w/7tHs37GoUvduB4Y7Q9uySSbcxDIqw/Mc/aeR4Otokdg4xp5kvjpHjwjbERLSEB8odZ+786AZ0e8KCkd30WbjWB8GYyPhnKEItTrYjhyH8jHPG+X1qfFkpahuc4cXHqfWvoiCtjkpPqvmycruHE8dmZHjmu/dIcaEUhntywYGMu+DJhplfT2OE0cf3qonShJWVVl6hpL5cG+IR8IlU0puxUrFzoU/49oqBI9y/rCWIJ6mr9U0O5wCZ1Xt61pka0jgfsATVfKzq79cFka6plmQFzsZiwVFw0vKrXIbphzIRVxiBCKMmDHrJpnKpn+vtC4TiZoyoX8Msseuxu6/IxWC22ImX7i6e3uTRevz2xOUSXm1dytzk2fH/fIsdhIUn2OYSUwiFBZ82HJlMUZkrPHMdrNcIInfxjRg4qYsovgAvjmkD8J4V9CXipHz2mLMxzlciF4ADpHNB02KVNAdrbf/t/Pxhy/psYCUoySCYFzUC2bRlprMcoPCvxLJhluzqKZrAlpW+QA+3IBthcloJ8hMW1dRyWMplT23T2y8TCdD2fH6gxgxXOZ5uz3F+gusxK+Z55TDDgQE8liE7WM6HSkEb4dEevb6oP9u2OQTOeBqAN+GsqtTYDOgtqdeoA+QFX6FMQbBIP2eyz5KyWHeXTK9Cqjhiu6Rh8WPg0yeBQE98zceyhjrhRKFpUez5XAethCxVxgD4fbQ85uf7p7IT3BEbRY4GNvWzkYbG0HOcF4aeRvcDmxwjOQJh+Fvty/W3uWge/XkmeccI/3VEwJU04FbsFM91i0p1e7FefO8Nb0Tw9jurwvEtoZcuAH+eLSwxR/wxL3bKpYWxQgD2YSlzNM7IpHo//UnYsescx22ZrAp8d9YkBaFZbuJPTMJxX5zFWB7pgCy98E1+nEPVD284vToVFYSHDMcp9Hbd0q//m+hFxbCQbaV1wWi3EGh8CuQJo9Wj+43mDPY6e3GXJ7zH4eXS9eShfWiAghxs3UnGpdkL10Ggnn3O/nEnUyvQ2mZq3JqPKwcRQXnX8Kh1/zmDhvhAjnSqJVKtQ/aYI/DURF2arUBtQSs9K4lVHx/rji3QH68v4/MhLoVLa8W/B8YHi34/sYLq1mTOfUHsa/wX3oNQkKKfeo7TQMz4F0e6cV3HwTFzo6x2V0wAe5cAgsL19dCX+MUtOPhPFd3aAZdN3uapK8OE8urAverKxA0od0gknv9EOMgGdPk8R6cZTNJl1gAV0hHQux65ya2gF7AEDXZGlg+BpyXabUA7gY3yQf1LQv9ZiqpfJQwfUmii8nrpJ0oNov1D+GAXJHg4MuDG9/BGMVd3zzLtgA4gze78fDTsKXt+CI1IJJEp/1+zVlQZj89zBWFBuS2Lt/YsMaohYeDMimnhINY/60mrslAkeZMFjq0NsCt7kmHpzqzxQEt7b2Str0uLYtVqv8E/DqFv/XInA/k3VnKa+3aUGHZ1hgzx7Qj0FDD4RhHREIjShENjRnZaNLskt9NI2vbOHRP9MZ1qjLaaCR4hged2biG9utL3l7WODzctkho+uBgBCsW9jJkt+bi9LGXGZUm8oFec6GzdngD9/1cTKpPy9zoyJQXwxwDMEDtjvAho5Q2fPn9/4LFjfYTrwDQu2UW3BCP2zr736I37ATYvvG46LBZrIwGoqggN0svs6zF8BFMY0feSmKUlWgZk/YkeKgUM9BQPC6nQReRUMoi5Rs8S99l8MDOTB0j2cdJVaTyGkzp5Cubs9/h4aBU5aIlGZ19DLpm7ovl3LsOWOss3ouJ2aC0GHGpwzjcPDMe/YDV4y8e/SCtmAj1FNSB+eukXFki10Qbnuzr2YOGuhUhYqcBMusBY8n46ZVNvNlK91ZGBioQKOFyKJWfy395atWBWnm4T8fhDWOnPxJ0hhcxFgLuTfKJPiLwFPsXAK/mZJeMV/kh1uFr8HYfpYMf2uQh3f/G4pjAkh/h6UpMAE9BiHnK4gzBNdV3UUhQO3P2dtOEQNEnol3Yu1pjBVI3IgaciAGbmpGd3AKCiqG3LhQNkFAFOtflruy+t68HpAMQM72gEH5/3cBm+3PhAKQQZmlX6uLHvwot//5sOA/4CJvoyi4l50ArcLy2kAAl8XzciahBMdf2Mu7uhBVzJxFFXGtMvB2OkvwV1wy1Fx80+Nbh8S6IhagyG0DGSCFUpXtv+adF7Mgvf+8wQwJDTTK3CKfEmVSe6lSByMfkUR6ouRP6xSfWX7FrtaenQTtKYxwxEhDjbb6CflhIk2xs5HJodxE09MqXWH9eFLVKNl/es2gkcTvABckujwWNDxz490hBA+fAI9Q+6QvvZEJYimOPyQHoJAz72KXe9Baq+CUvzs9wy7WQUHsqeadfdS9goLbxfHTH3IwNv65cNbdLzNjISZsEQGgxdfSzuxDbCPY1Hh2ly+0+xLP3+WOjls62ZbaHCIMt4jcJXFdvH5ywaB70fRNloKa4tEQQE2yqotCIQMU1V/s14f5k0rCjpBJyGolotSUBo53TxsjZfcYVjD8CKIK1gvC/ipHyuwLWgxr89gq+t32A+G9nbgwPsaFtb36DdaOaflU36rP20K2dvZNrZgWi3+I4c7rZ/M2DYfIWPZNBuiurha/4GzARI4WGBrkmRHyydi0VbOQ+t5nhKbW+b808+0eGPb2I5O8zFUwnZ0+xGtdCs+6Bmk0VljaaTAPgI2T9jez2x6UKl7IhuyZdD8F6VRAzM7ebhrqY5LNJ5HI6WTdHn9Acq4a5c9usRrG4QcY47t6oe+0Vb4XqP2k1RBY61QGmtMZ32cDvwaHvc+SggXTifeDyGxfy2PL8r+K71TBM94VXkikedka0ZSxwFs8JE1kdz3PZG/z0SYwwPX6GD2t1KRD0BOEcUHMdMdHm2Qm6SCjpZbWtoD6vM4lwyjxTj12WUwucTKlHmkeVGg7x/ZcH9C2hI1q1duj7ADFl0Uognk0hDWUwUvMMu5buoFWQbNSeYDrOl1APQ39XhNc7do+paaBZt71w9MFrzIfuUhFHqHYBvasG6VJoeoJfc9VhJkJqJhnvgNmiglz3T9+ohr9cXqT2OpHMJhVvT1bEVKna4rdWYsbxey2Pp8TPznn0sTXUedVJ8Beb8kWmfcu6GFD1+9aEIwIJ9v7NfIN0k3mNN7OBWRMGWEKs4ci+leWhkEYTs1KpFa68aWnpVfFaun8JhdZ6XH0KlQZwmZI6N/kxLbl2B1VUf9LYj3bMPBepZSAoN10HmAgO3ioaGXaPEF/p+GTzv6CQspBABmCm6DlNsxKWwFmVs+wCqsBqqCY8LjyPxoE/8ebfJp3NCQFe+cFYRQV9nj6uqHXB78dNIXf93Hi7zpdNXStokzp+oTvKgb4MNoFBnjmVyWiNCvANSf8oiPJmrtSiOc6OfvQ+HknMOSYQB1aHN8pl3oix7Ie5MoaOKV8QBYPbkgilJDAYmimoSFKQY3WBCNa/5muLdKxLelzp+EjXyTlh6tsR/dZ/FYvEUI/Rdr+DHqIkhYVQNiNOh2hP9lMILcmJfh1rRw4iDn0gKWlMqY7TCa+5uSNhzgp25O/2sMBzegbf+kB/ifhSaIcQTATZTrcBmgcoNPsOYf/2c2CGcD2MYDECBTr2+BrzzjkDHo62HllJxTecdde05PquT+eV3xHNVzsJq/rZmMZkUz5tmOm/1wxWp/Du/y9fKVM/C6+G2btoAFDYpNH5/sLwe/fTQJwtxH8sQ41SNgYAmzFEq8cN+ggmESVUuoYBCuYlQob9Ytn5/vtj5F+6X8AFDYXO/A5VP84G8LYjoZZ6/21SqDyzlg8QbfwnTYdNAVwE5aXD72DXrPc6bhh35W+PJQ/FMPm8siODPTPI0idotDs8xg998UQmIPX2SQmSRNWXo1UMqgwWPdjjQDE4Y//7Wzoh6d9bGxy9Rsv/x7nT46job+qzlkONZK3dFyhyPtfRBHOTRFNsCMCAmTiJC4JIteZy0cgDbVN/PFO/CCWjHoXbMUeUH1ftUV8/ELoqDihSlYOy7q/tg77DetP6fw7u+sEXCSztyUvh9IZTf8Y7YS6LXnC7fPYEgfwuSoaSrMRqJa6e/QxpPln4ZJo9pFq+lCVmM+/TW1oY9D8zFjlKiZH/pRxPlL+I16xhjDg/+qCNTtKvwLq3fl6wmkFJ2EIGOtMkgF7U9dtILQUN61nT/jjULxtDGzy4EVWU9Waw8AY9H1md7HadL/8TeefX//3DtzccCE/2/TK0QHK10YQkTFTTy+WuNsrRRRq5/YVkLsqVe88xD805xvHxUTrs4IJdA0ivXcoKaOKEKEmYcsIDUulp49EaurzlnwmfVKYLOaDT1fVn0JqH2pv7vzaAeKbuGQVJBSBMZAHIMUkMurylbmrOvPSutmexdNxWkTISvTYa6+Fgpeqth0vBxaxnjB3el0Cq/Vc36hWcgHLWbHKpYjnQ3RbLWKP0UvEPyE+ed9+n9bbgEQMkk11xxplqwu4MpQn9bzTUYanSW1s/sUtcL2CCwLEttCniAEDwk2iWtF5V5VpYpXb8RhLcadRihuiN+ydMNB6UdwPWBI9Tc7PNw3WrCrbFwH9TCvQpeqFCOcEe7xqyQ4rxJexsN4I+w4hFbN7S+zU5LIH3RPIkoG0X2CZCM8wAu4O7cYg/fOKkjst7yCgj/EII+JuHj9uNMb0UfrptiZe+F+GYTvMh33sLjWO/XcnyeN9IEYpB3hFXQPRccxvx+9KP6b9PgV5RySqSjRQFQfLERmZFoFbmFBq8JRVoTklgv6I8X3Bzw6gisWUoUcccxvszPw3Cn4DaaKDMRYThfNIDHtoz/UpR5UHI6JUJyhxHTOxYVYgF/k2WjndtKWSAspunUor6Aw4MmCuV5PtWAkW3PVyTKmgH6gpLazQZLHC+x6etmlPFeiap5y3aeLyoJVJe/v+XHAqTvRp41LmKV1eHVwHx8Zyeu+k8xCEZxPXf6BiIWJC2sU0v2SVYvTjlE7ivPPSWSvSodVGWF1SEazJn4PftHH0v01OjNH1/z4hzleMLL6vp9UMW5abl6iHgbg6IrcnD1EcurbXoMlWpTBDmIoIsKzY1F/YloFRWRaCWnyVdrpdhwrdpxKNrE+DsuRhUzt9tVuT/hdq1AlX1HiGsm/2X2xJ0uDBQQk0cDZasqI5H+3RlI/BJbjrrJg+Ngq5Y4XOdFz9JAbP3Cv2C+8Z/73I4KkJAlHq9Iwn+AywcnC1T/3t21EPueVNAi6YrLt+pBc6/LVgjEUSQ0k5znIyGX+Hi4RVrjy+coaAYlzE7zPX3uw+AbahlcjsRmGr+XH/soj9VROTC9HmMatHyMsombnpASwW0d+DZpidvKfGJL+BmZTf/5THaz3NP3zmAXN4T1VUkpoIg3/2zB/AzqiOIl/KpBxlDhJ6W8+JA+HFgC0RQzW2TMzjFVw+FNX7+iRgzWMTBDQfr8xMuJHi5PAREMWOEyo/35fkLJyVDTttUcm7qquDQnGQ/KJJIkQTFb+vWsNb+r6fYpSuVFhYHDc3Ni+tvS6PwCf0KMYSG/lwGQy1Eci173zInTa172wCSiMoCHxYKK/dMsk9ubWvPwoSaOx0W6Vyo9Teh6SZvn4Kd0CHJqHIGIYxmzb8hXEMYsXoWKd/GiBmh76WWoTWQ9uaqzJTj91O5RNxSIPdw2jfxYfiiQeVB0o6npqo7XqKxfM/Whc4ilzgy2sMwVVjX/8yHLonY1PkUmk2bARljyL/DwWl29IvFubXr9K/vdCyZHXtS3KUXcC7TBiB4uD3tPnD5uo+gUSL1gPkzhhEZxUMwY1Xsz3oFwXkmF5M93OJAUs0jAXRgaEGPoUrsf+XKGs064ChlymVW+WAEt1eGL+QAJ5bLQLbyhlKVFzppzQ3XcLPZ7IbX33iae5DHNu1PgbflwTSV/OCeOiR6jfM4mju1S/d+rwxkuRhTp8OXWn/3WGmw+Ah7ruJeTrn9M1UFwKlW15OaiELTZOJCRiwGuoT4cq4RvPTTr2FMO3f+5Aj/QE2FMKCE+lk/kH/1tO/v9sTAYhsE8JJZ1PtO3OGu7HGljAqQAAXsZqDKMI+fc3AryI8mHuUQ0SwjlWFfrDes7ocEpqE4hgGx/LORXL6r6aGgFNec4YdQsKcTgf9mGUpJHcsUaU8JgnhwbkeT9FfvXP5r8ICatCwae4CimxJ43yOKgE7ZjaqJanWFLnoD7XOeRm3XV1PRkMx8VWSQd28h7LQ4uE1w1U9943cp5WhVT+OAzvJGhtkIgncoNJ2fE2xo7p2dBouPMaOGdZjNeR3sWNa63gtZ7gf/W4WtgMgpddmhmyHnlUS3f/ciZQ6sqYMrs8xR8f0fQbmlK6Q0SFJpe06NcCO65g9zKc4P+60Yrk/NrV/NNnrwu5SkpQff8cJzS8brHRw6cxJtgEw3U/tmsxHt9TyEukVjYA1nlACMXa+hKHQ9sI0c/xCL+mFWsjT7HgQovKUpK1IVpjp2RaBunAVYeIL/iIWomKILHW1WQJ0XEklApkDyUPhwF1Gtrr6YNDmCMQwvtrkQadAjyoHmGQQr82MtYpCvuNT3TArCiiiEBY149rVeRK/opMSyAUKnujbUgdwLtjWvofujENj0/Vlbrn2IDjk4PvPFUERcfgpAzAVg3gXviKNCaKVbqz/9lh2E6aG4EseAcwz+qiOtXkaueZspGVNr1unfN1mv3yYhyHDTbNLrTCxCUhikl2bvj1X/0TfumZNpIqYRERbbDA4l5n8n9/xWJfN/Ce81HMwXjYYTTQVaOyz3PcbhEWxxf5foJWIy0HRpdGnxEQjLOaR76oJj94d9QLVy20kqgfMHlKd5CUrZbo/gUNMmeLYTKupryoi2+fohGCG7hjSFLbUm6orfI/6Ykr9uTxVjBQZjU0sHeMfV0rv/tXickheMmo9Z2+/zIpMAVvhVLdqRJ5JDApLokVD5MxB0nagcaCBqJ0y20qMxupE/a5EZfIv/575JRY2FzazJZB55N7Rs7o5x26KVsDnJSuvtdeOTb1NWvVSk92UnaKvqNA4aY5aR8ilFwSQM1ajH+HD5xw7FsLexMbDRH8Fakgqw1/eZl/kUzZFWwm+fFFCHJZmUGi4Rz3GFH3GC4uKhiAStThtjX+kC5rHzb6/oK7QS7LWXGdIUobMv2InRQyzdBufl2XPZGD4vU5/PtZFyR2mec6nq11O/+FIX8IXaL+oPmNQMK6ZOeHGZbWlXOST2vKt9Su9kWFH+97gO/t9hjyOC8Ma6GD/+4wxG8rrmDk9tckIAZoNhi2ZiB4/VeHubfFiRxwarCX8nB4Xooi/dKW6Hvygx7G+d8WkkYjMgsq/1YiRcNA1LwWdxzlfniQ5AaPa1CO47y79DQg7wSTck65D6Qp8RDxC388F2qgl+g2308Z03+hxT2of2nKbw37AatSmLCElipiHHPLd+zQyQoNGyu2f4wi09I+lbnOlD7T13323cJ7QF4nJ28BOK0KNLJgQp1fmdLUflrMFvWjfGXKxqRpi0/9fcS2lNTG75eQYplIGo5Hz7IoOKYcSlQuT5/Scj9l8MNFqjbRCVLF/cJ6ar0ypRAtbvPtAtJTAgmgNum8dNtTsoM55LtgypauvwdKU1bwg2Wdyr33DUWTzNMZXS43E0Z6HC9Woiv6ERw1JMX8xJNLjWRFyJ55baNggG/PC2vV9dLsqJpu1haGo66ji374NzGAOFUrO2kHvZImOBItry2YthTmgvj0yhKH83Md1JHo8LxUQoVf+N9MweAJ6HV47+7lCgMGexhExak/73RKEIXfCHYssHyZB0Fqv4yvI7Z72E2qcOyf9ShhSzb131pl32Jbh8Ft2nxhISuzBD7d39d9GRchQBIk9k58yeBk6FqjaQZBpecBWcBfaM56sBpXz3nHYStyEg11/N6biVXyoekjbV95MKS5i+lOa+2ARnxnvRAjPE5g1D8FtwgGLGFean60R+jiYEgtveUm+M5DKZpMMZ3G2GKEJL70agIhuoVJNILxGcwogQpdrP79ivB8vCsZe8BC8Sm3BoGtw4gpM9msxV8up35qmEqoSWWvXONRL7D0m2AhXsX396x1r5KRAk9WnU8MoFIy4WNdDq/kroDE0l8nL7yHGc2/9C1OiyJsaDdMbEdg9FnEzQ+q8abaBdHKeYbTfQ6m7HKDq0uG51RJUqlTRJ/4FVPI5MB8q+ixfgg7GjmkR36T6LGRp0u83ZOoDR/mldA4eT2IFbGZtP9aKXRHKVbi4FZk0gs2z7CfK0G3+8SiSyfY6PE5MzWpcifuODBo/pdlc/AVAFQWyRp/oaFTtLiSJKIgFh7khOaS+R1u+2B/K4U98nVqXxprje573CAwDip3UVrkjIANv2S/zZnWLFtT2yWsK6Q4SvgTs/qLb07ZodfIrGgYb7oTRie6PFu9mwuTt9SXL36roIj+Gafrr04md2b2i8ArB1FVupNOjpSBrM1RxYC3o++wuZWLJuYcHgjs3O/0Flc5UFx3TB/0gh+nRixtVQfJl/qsa8rA8Ewywnv+vo9PcEMGFulVen6VAONtRwwXEA5D0ICz98+QQ/D21x0UqLmlFFZLjSTn4ZqrjhtyQ6qNY01X8IMnmT3gDPAjXjvuM7A9PHpsOTwiayjdUuvtfM6oggyKR+5tYNp+1jZ6zGuyXkBMMRqeki0KB6GfTwYiiRblptpZsaTdxDMyH7KDQ/S8zwty+cRK/K7Xo1BEAcFH1fcEsEl9wvEp9PX9fDixY0mPRyXC4lU2JksV8Nu/nNxJL/qHfDBCTsGshf/m3oIJC4zNpeHjm3b3Nc3ok3AvCqt90GK/SU1x9TtVkTTi8XNLIyzTM4gr2lfTfygv8H/QY/3sQ1xoT+1pjJtCamL2CYY3HifOYiRZWPD4+hye0xI5q176Sv5eGg2Ztkli88sJYQbo4R/kXIUQ4XUN16C5pIX+llD2PAyCqdmVdE+Dqc7XXb2qLxEIwY8QOZRKs06c+v50rgHSbZWg0U22z2WaF1ChLHRbqruNsJ5AGlJ/wGnmXwqjZJu2C6GSte1kuPy8pz+zy/utXSPlPhOyBSE4gtKZxhsjCYKWeOrBQ3suRiwjR6as8UPn+xrSvwMXuERjdpj3aeuVZxyhLDvyS7KsLKZMIqf1paD0mWUpDjeELEUvMPBcBK6W31F+TXU+UgymyZJm4cWuLWsr0QgmpPdP+j77qilHd9sk5OfZf8lH7kXLDTNXtcfWyGNQkwgpAlQ+vz6qvTa33TxXGUFMiA/MO6OAeTCvqGG71E2JPb4dFTpnVWt0pRhUoGSaTwCPEL/XJf2CU/84l5QX4r0AmeSuvor3iPLtr1+nPmRF9Oxmt164vZZoJsB9x0TMhqnOXJ/f2vdYvGYtJLxc8eUPf6V7op2zejGzgfF8WrUzokv4/n6uVeYngie9ksaSwEPL7fcS2up8q8oqZnzrDwXoxJYNYjhQ4ydl0MzPSLyQ8H9HdwWxiO8XsWWZIzOocRiTVWx2kRWYT1bPI6VBb6DfNxSDjLH3tDGNfkz7iIBa6cmMOLoiJG9UFWp2g+lUw/lbcbmCOIDf85I3VVPa6swiCiBsYbhstMqftFoH4UsJrqBB7WVArUnBHzGtJZwhdW7OVOdyR+NGGc3AQmfb4wPoVT756X4WGlXugmMtscBnZfjalXui21hwv3dw6tFfeSCwrp/QQrbk7E1S6XdBug7/wNp+INlKW0amCp3rWc5rLoPq07KnSx0uvrKhhFUgHAobQRvnQudYzOgX7sRcRq+Hz9kIgQwdoX4ghvGXEVEzTSiYXp4R/0RCxaNooK3FWggZdJnRKlYDBWH+yqFYDAVdB0w/ZuMdPwYjHHAkJMzEd106PurtM+i2fFEIsnKfq2uLuU48xLSD1jn1zO/2fIl8uUrWXd/8zzMueNSbv5tgTJjIm9WADG7Tx4OJ2DDQThYiMyVDWyXQoo+oXpeBoww1DEp7rjWOyXOzJj4z/nUZ/VVpPN888lDao7DJNLcwe1gZsm7zblTGbAqpCxz7CHem7/XB4nFrLu8LhzE6Bvq1wGZWzCJj3PDjM1GvEGvGpTcx2LT1dMt8izsDPUdZuUgKzTgzLphewvINh6VGD3Z6+ZWdeDY7/Eh22qlxjpOXxQNJv+RGdZiQWo18u5Ml/DKzOuW1o9AjqGvvhO7Km7eKuPf+dKlf2eUQpoL37O4u6WF5CZn07WB+yZo6zfr06jNS13AsdXwxbEsK1a12c3/wQ8EakJeqOl79CoodeIsRuj0Bke4yR/caAkajmu3V151gdlOWXfIG3IIxPIUvEP1OtE32Hey1dBKkqbCIq/BlGJelBeP2GRpSA+Cd9Ohv6CzwGmY187XDeKIsa0Pwxcd0C+tHvgm0Pr0s/CuJY0w3V57w7+xQLLsbaOvazyHy/y3skrBwH9YRyb0SgwL8tS07cVi+tz+LqlpTWQs892t4RoXDlrgUOmdWT7a9JYOhaWIgv6PvqPCKja507feaIZkKf+VPIKgzuV5nytDT0bgHea9c6XRbiu6hlCWpERyyWc0Ji+BWdukaPr4H9if23KFttgjtHWbO0gLqunbsh5+Vop05W/tBUDUjciZ5YlQnEE5o33IlVWGFqBXWfAUmUqncqDd8kRpGaTDz8+KN19fO+ZxxoOV+CZbQmzishqy5afrazsJYf/VUmcRfZEm/pLQADGnfYoBEvS0r2RdvvTXk9il3E+FC29mqxYwbNzzXs3H8kIlaVblTumnU/BP+2DK0JmG0vzCrJEyfh4mrVRyXqf6Ct5Vyi+wOnb7wlHOp/YSOEPXLAVVFYak22NPM2rx+Xwejj51v78IFgIoiwTNdqJwmE11Y/nmhP0PRv0hDwOnNnbCDIhQop8UuIdEfdye6KVxQ9fkB7VFH8U41A1O6SWPyDWyjcBA3o8yFBVHVZwn34t2ndz6N/u3ylofRpC8MIapftY1lyqakkJZb+Oh4U1NMP2NZ9fkklQGqW9ewMUFlPzLXX8emiZHP6Q2WGxuDb4CYuj+llxBY7qOq+TramnXb7fCw94P/W4bH+fA2aYN09WWjkr8rAzo41n8TjUJpVhdh/JLnkU0SpQ6tu6PQi6eLynpWF5ebfipnD8RyeNZQRg0M8XgBbG6c6UDwkL9zWDTwb/MIyd+45S7cQcAVd1ELIVcDg0E8oqb07lgDcZCKpxdGZeJ+Ogse5RdGhk+sRMJrHvJfL/HfyrEfVWVr/vIp4dXDbKEd1mGi8IETKLoWKFlg0WmorHy6O7eebXyNcX3YAObcJlVw7bl8h3fatImOqzPVQKuVI1Dp2ugAT5jnfMpOyPeXVEyy/LgV8plyMiFMEHcBZoy3ge8MfFyM94HMnflXzXuQ3gLLuQhRC/EBjWvcYv1Mlfu7ZHtMGk1oeKCzjxnGTm9NH7nv5VUqzacjaxLF8ErbxrYtoWjMOhdxqb/dpRihOAnIFjB6a9l4Io8xSxV2mlxscJr4M31G6GdWzt3OWFHuqMaFMqX6496qqpldCPSzI4OZDEGOuSXD/lToj1F9eIBlVU7U1lQM7xrKOBSrm9W00v+KG0paWa5IlUMLIe4BGCfWU4GiFg2W2Kw+3FeZcL/wyIJ4HAt0YqRgn89Pzg9TDtJveHPRc0VZ/u3E5kb+6jPnIpJYf1/C+IMYuXf5PyHeTm3+JqMLii6sDfJ6t6RFSnGOx8C+RzhrmhZyn6Rx0bkneg5jJ96rUZZS2nuay6JWxl3A+ruYm5o4HpDSM2G2q/TarZ08krBNjeCagwtmdfTIswhJ7gS/BvacWIlTcKhEPVwQSXOrjFIkqNve4rHhxaNuwTC4a4lXd8Ypallh5zje+JBfI7dHhJgEXZZ758qGkC1OHJNRuC78boDDzC4NQJPfh1PI1Fa5QMuEjIAq4gqybFy2x8SRjZywHWgfcXko/DRIRhBzBCZsNnWfCO2+JM0mvtA1r/tstvUhXjZUueET/USg2Yf+40IBbbvH8bdeLoKtcbY5NLup+vy3KY9BVQgmiwvbtQIbdRBlfcA5LZqef8/oJdze2TcoRf3rbGUUusKoD54XRg3EiBH8oR6IzS7Rv+/4FvE/boboKvih4vpLeFGTU3fIQFImduqAC4vW38/yVlkYraYrIuw21TjQZuhY69nfiGU/gt5khbWRCUmJNxpd9AvBLC8HdlLYOHKonenT8/JZoJ2ffzPk//ryBN4dcSo6sNE69pdmn9UxmpqyFbV9eHixuwW+lyCn5HZRh7BqU0Hq/vjZQLK3NVDIdmw9PQVWvzCGSBn6b00/oITEi05EBBLWCe8YtIc4aVAH16+9BJP4oN/zpeMpqwxzvxK9O0YdZXR/FfK2+j2vjIEvRD4/NngXPUmIESX4CAigv8xr+XS0ny1FBzDVpfS/6njtb8ncxU+Oc+iJUgQICb4OU+o/Xh+Mhe82YCshr7NznsXhutLfoKntmrNJLkarfzsdGOyzVUR5DwyouVB/9yXp9NLWy7/LKEyKdEKk3n9zeYyrhBA1ahx+hIhAbyH2CAkDqJMUrT7gapQvjY245jIrSaiGTnXn/qZ8MYYjuKihXIm4dHBsmFb4mx109YJMOfPWnSSp/p6FOBOnk7Im5Rqlf8tmt8aRJKYBWXYkDUqM//U8nlvleQq1gT/s6HneiXNMU4IVk4zIlGE0RMtWVrZ8no7uQ4Ix23h5s3A1p7xpVY3wiW+QigE+xGPGwZrhO91LXYSuh4HuQT3GYpTUsA33oj27XQ8nkn3B8SLiuDlAPd5k9sJ0oKuRAIVLlSp4F/cVoqs2Jjl/f1DOfFUWjRKvuEgzIri/IAZeGCq3bs8v7OqRJZLTSIRXfxd/C1NtMaUSQcPUSefD1U2D2VNRkoBwLVj/bM4gZgtL15iaCtj9UOdU3FID6zfi1q3zLPKimmgSLjBMBLaanXPFtKA2FT5jLwp6kP8hQmt+iJwar6e1AZHdHZ6VMYVKC0Hpaoborr8djuncfRNVKkmzjFjp40fCQSuJCiIBDrtobGxzLvZpV+lhpqISG2TpO9H/WzqP9l2GKTM4t4bex6OJ/WSEqBzLcu84hj1+49LCGzczQjR1qMzDjJfpu96Meskzx9cvP9vWRPT5PlPIgnQsDsDHQ6VfUwl8e+3+Y7z8vA1hjQvfy4rH6hOoyy76F4T3RosOKO+Uh0Si/YytVSVnllDsxa5BD/0j+r/2IDXxrXI9GiqyM9Jqx8Sh/u1aegVdMwU5L4poYaWC8+T7su65fq1ZxQi/gLzRBrZO9LnS2mZf2N1ZvL260Qt8/7OxsSVL7kna4i91vrp6lW5PnQrt2h/WjMKphZSMltSzcxZIujDGW+cvIZixFK9IJH5PqYSjh1JX5gd3+PRoyDd5pfpyCgumss1ploLXBkK7erSJ/7DiWvr52BuIS0O8bMeRt9i+iWEYjj98sCclU5F6B2xSTbsprUCizZOoSi4e4VQd+OGka7OineUn1ZG/WfGza+tMi+JqyPej8ajKnHn4lC8LcrfROnzZN1ka4iQkE9j7VgzTnWd/vpYr9g8yCKry8i5Hj1cXnHUJXtTMqfnA88nYmICf4pFp1C7S69roVHB0/5sv85NqYmr9uDzS6sxFdf5O3i9JP8k6bl7Uwl3xVy36HR/rhxmITVJSG4eyIG8Wt67n3Ps8YRk+4yqZAvWBmomcRHIeufReflOG8f2uiONplwvXo7Jz2AICE86XGX5o58tBx+Ir4nmDZlUGU3yiH3n8thgELOPkZST7YP/y2plkcfk9vbOyv0jt/WFReAriCKQkSqI6aWu4HLvjGrP3giffJOun2ZWnNxYv0UY7ltnWOh0fILtjcyN8Kk+wvx4M+lU9pEw7G65EyM0e/INvSOAwz3Ve3OZJzf2E4aaIn1DwzlONoAlU+nHQjRkl3Y/f3oPGDvSKbI1J6E4dvg87pRzixH+8YOvB79R91zaHU/ogJxpkTzb5asNhhvWaywumqKAP3lG1zRz8vm1vcwG3t2xlSfPihDkYi8xzMfP7ySpK2clxHInP+xbbUKYxDyxCGfIgJVLfd71FBVrZFn9FlVC9aN9e3JFr3XqLx5o6mO6sxGNWLBoPVwj2G8lXZYS0UBno39hFwzToYQyfW7EWcoXc2ITIZRu36jijS9zJI/noQhdjOzPWOwdde9IwY1RVdWUNO4LZGIF5GcZ0wsMJ7uBtSLfnrPlrvz4OKwGouKACnnEa1wlAGUsgXj8BhkeliOtvOkNo54LcIuVSvDtHf0HoHHZeKpmBv8/76ZYYwcYy3JKqZ0I71FUT1bUfCrOHq1rPHShNUpYMw2WwXpVeb6FYnwLHKhEJpKkZqEd1UnRluQM7zBsX4qOxqLjYPY9t1Rjw8uLDOmKPeKMt0iVaeT40ZHMVP6jsEIB1zaO7BaynCNvE+GZH/8USppZxB5AadtGqE4tuMueGdKnu9Wjlb8G6nJ01Ox03LbI8FxvgOC26v93b/KHkPQ8KhZw7RRXRwm5SG9u/flBL/Fzw9PAPjSewRi3ye/BAFh74ObLYUYiPX72mfZpOqw3o7jnC6p8EEbdbfFBN/hNI2WNBkMgYzOmoUSJ0384KhXFxPnIrqVOlctWowfrmzrtRaSMUOczw1xmhf3B2e3qmvpeyAOww+ofJWl6j8tL8TZClTisicSfaSFVoS18Oxxv2CThyP5qWWRtgOw2jgRWzf2QEKh5whQ/1MZOqFNAwcTK+G5055Ww5UaR5vWdRx6nnuh4BiEWwRIMh6N7rvEkDT7n93Pk8HYOl2rroHAjF08uCiug1ObH6UUKP/AIeoQ2LjXggP6bzVYhWw4ze9wZ7sYjflsbb/nPF8LfHm+AKW1kpNR4rLYvrZ98KwCoU/mEVSbU9nMSKh67hj/jMP2e0FNEhU1VkPuzAZJ9v6PcmyFU4jyDWQEr65oWATk7G2f/6IU4jMvlDFz+OkSSZnQxXwHGq1q3fG+l7B6MM69vH5mfUMeso0HoEh1caOrnepagQTlPRDqLgFc14RZlO691K7MEnFra/CuPK15muW0ulSnEvLNDvlFI7PEfUF3j8V+E2A1vZob3o6UybCIwqu+sO4/Jf36nlbn+x4ee7DRcOYRvylTid+qtJX1rIvQwHiXqfRHPcjNOSNXKbcnhTHDA3kEtl1bG/anSfxon4MwJWBMbBgi+hf3JBieBiCfwHAZXZB5o053NynvchWIwKoiaQ9Z0Jz09h5cqwICSER1vC0Xv6183CfWA2Q+XK5kz5PrJrKsC0IWeG9VEaXV59pVq108bjbaG4RfrDxbthOSLbR14fYYcxhr8neQ0LN1iYXJvvCRow8eRwlqLXgg+ptU0G7pKc7GByiifEq1/Z7EWl6vjp0E11Nkh0iCOhDsy+9cR3GtjC89EICTMyEUl4rIfv0YbyfjdDfn5YVf1bAnKF7FcbG1DFtosreCCD/gE5bpWqTsZi+vomTBQWgujONikrYR7Qwm2TU62CyjOxOVacLTtrvnukDzh60M4VzvCxewK9ZJYunllSTeMtItGHENl86q8EGoTmUAJniLL2MLqc8bF6V4vopB/ij0l1Vf6hm0OcPlPS/bkSU3oOzkzAFnXmQDUVBFcTGvuVV1a42fuOgzrGtJMUvIOmLMQf4uiOf5sgvDynPrCspmmU9T2w+ogRU1w7YZBAa+2AKKo4CYrcgLuN3nvKy7bfM5LtLP6tdfRF/291Mar9ZE5sYcPvKyrvVSV/0G9taO7IysHhn4fPJJXRtKT75PnvqrVtG3a6GPQJQ82WpTpO15IJQugl2DFv2hThSwapZo2efaG6jCxsn8V8Jkpp7G3Qb/2Ko6ZRwz7f3yZAiwIVLoh2m2/xxQ2j3jOnjdFXObt5/AhFnwTbs9LWvNNgV5WCwx3XKzg0I4A3zY+9Q1aDVGIetS8CH59iMWO/j4lDbOQr8Tkfk7H3hFCzO320Mod+3D6mAl3R0UlS2rELps/DZy3iB7aVRNpdU6q14p6bSQlAJK3W7v6Qw+dSJ+4LU9G06DSVC2fpeLvJIkZQ964Q9fuHJImR1h/L7P5SyNYE8p87PFQY2dMIOaPohMT/CVBg8Fng9/AjuieAuJwNhnO0GIoRj6wysKv9TehD/NletjsSTRT+ElMX1fisqQzmn7HH29+CtdpAqztUiK78b9I04Zcf4aOc5X/xPCj9F1DdtZ9RoPjOSNlDazDcZJ09Z6iOAPnSyJImVQWBHVr/q8WvZDfsBXLvz0319wRC7V7OfX7ShdguYqBJ4QtcHaxlupk0YlnCJZtt/9Jqz8eRrhao1yP1XjH0ZV86g8NGQptboKKND+55i6e/skiocK42G1u4AlGVwu9fV+9vEhxXcWEvUyOCBCFzeKD1EXwy2rU6Nx+QP/51PF1C0DO1bUJ6NbHZ16xL+nElS7RHilz6beKwwa0EXxrTLA7l4m7k8JYRP2DOfBEZz+eno90JznyIPH+GYxLwAXdLwanUtz55RBKwoPq2ZR2Y1zw3P7vn0rj5FTFPZzNxpYOsg741/sVG7JOCT3AlAGveo2kiTiA8cILg+8YT26OLkwRSUQR5vAf+c72gmxXxUcr4l9elLW8jfMg+Nvpj7SA1cjIXa/YAIZVe/C8YvtM5/p/P3+kv7DAUac9gKygKb5uhoD+kIYgQwXGSJv6eGg7CubjblPHf8j1t7thFDYtMaHvHYosgIe0jhl0UBFpmMHWKR/C2frE56UUVPs2fcIFAvNvNlkyS1LzYP/JSqJiTww7JA2S0qis5Y2RTo1/6AQyEa9etfKGmijafK5zvzA3HTeD0ZufWMwFrWhfUsuF6ke1jGyTimrIERwMtMKxHtRMffODmem3ILprOwrr6oA3teX0NBv5lvoz0xTeSoymULp735I6tDe3Xf2EzNP+NUvKdtb9pO4P0+gqQAl9HzLTCn0gV2c1WPK5An2ZsvVcOd7kf5N2ZPw/3SSFAaD7YBkzF2EQr7J0Pw+ZyZsZsttwhosRCzxyK4/M1v7xSMg5PScrGVUcH3WQu8VMITR55/2PpOrYkRWLgL+HNEe8KT+FueArv3dcv2bO3eTPdNVQiKSIkpRTBD+UQhazOGN73FofLUVFZKZtKTsWxAvtow9/+afYnT+wl1Qf6KyrOsqWioe8HOT46og93gHdY1WeegrfwHRHcHqWUtPHiN1Erl/zKusx8q/IQQFLt7xJyXY/W4KdB0zJyKfHQ2WNZH+bJom++nypwMOch8kYMxpWFqkrxS5i+bg7DmHek4phbqHl5xwdG9/tFgSv9+XVi9kUEfcX2kib6cJKblKobOh7nHGJFZYRXf/At1nwJrqr0T+kb6Ha0qraLPR5oaycXmHtLytSNOYYExrM2kumwH92QaowK1PWTLEuaLFbU563aMp3UP3nbQoRueY0Te4wzhHviX+M+MT4qiE3yMWLY8nU1x77VH71ZJooivZgn/YJ/1dvaQXlqOfHYDXJnDT0l8cSrf07RtC4zG+GKfZ7VEAJBnQVygF/yISGMZ76MfM5poX0y86KJql1793NAH6t01T660WsNpzpSHnrqERp/nKJCYVLL9nIvUYgseZCGkxzjjXtqfCrpBWhHBrR7u+SZ1vFFeE3jd7mQx18+ftawCVjAa2+0cF3LudC+JO8Q0drBCkh9njdMhn/p8RUdP06LAThXefNiRLjFIHSOtpRHRxl2ui1uCEUiGNxcFemhpHGERywhrXcsCrJ5R6udMvDpx2sQNM7TaZY26BTyLldCFJG6EHZAStTa9uSMPXjT7ZUKzkLnfPKzqd+rfrrxSbAsiSx1I66SLH1chVxQBLCkv5xbSe8ituWOwbI8f+VtZDRa62nPorJ/k4iWEcRGCUxtWmGhZrBjS9CsajnSS+BRbKkjJKntYnsmAmP6Y1rpcCSeywr5loUPGAyubltoM4zCsKojiF8CBECUUSF2gpQvix9/hT9i0tqg/la8wfDx6SkHP8fKCgUznVGc1ahidA5i+2vZ9lAYXO9FQR14tQJIPOr7JLKu8Vynb7dS5xi4but2xnA9w46sUFsIhcZ1eBGvv0bVvFo6dsBEDFXq0e1R5Qgf9OS7DDUhaPglSXPpdlnx8sMqXP/XpfoDTW/5pyWlJ/z8rK0VX+l6jin6UiEM3tMlnu91BKnFMMkhau+dkKZX7/McsiTdT8OExXHn/epT8NOiMh/cOhPrxqK8tkwHzE9v4GM9+JwJpy6lF9ZXRwMDYKkib8hOEG1/hqwxlfom5FJ3tSsLvJ0JRIE9BKx+hTI5L411NpD2Aoim9+XNHNNKLa1/Qq8LAN17mYT7gzmUZ8DSUnH/HQGDQ7zlzX8tMrpLrrb8LL8aoko1AVNLC1c7uMi7hToSWwUH00r/qBBc+78Z8M9qVJvTCfpfysfBiEMIgeP4LWmim9yKLd55p6PyvSbTAuppATK/aml8dkTsUijLKZJFSXog/f/vpBl2h+3dHSOYlyNY5GKYsdgOWqLBEqKsCaRrdx8Xech5CbpXnpEqmp04gMdlJhqGzdB/jfp+rcLO4ROeU0DAM+Ggin2LYakknrGRGXop6Caowfw4tvfSzAp0e4GbD3ltaE4CxzCc5yGL6e9WcY4elvXh1KyJl1hRBEYa+dfQv8HnxSfr0qTQyIwKx74PGYQ0H9egwquMLGUx+gZFbJyWfyLWhDi7aIMpEDfn0cyEjohjB6fyBXyDOXBq15AzA9pmyT9ySf7RKsAOJ/iuxfFMiPihjMGyHlqc2dQu5crd8yF+0nhM0gz7NI8/yKP23bIPA+swzFAvV6pHFyaw32K7IX96GDFcTfDr1c2KamjT61ekoKFmsJtYDDakJ6/m5CvmsfRjIcM5zk4TU196zTEpWzJVLr96daDxxbPVjx5p1MDLRDhIvieeXoVpbHJBZbpf9AsHTAE7/fZjSFm5F+/XMRudVThzKoV6xQM12nyBJnTQ+7LPeTZjehDHfRP0garQsN+3osHLZD+18rMgZmlNGcUkkc8G2qIM/D5kUsPoK+M/Yu9bczDBVXbSvx8LVR+x0an3NSlE0X/Syi4IZLB1kkb/sgBX8xHu6I5T9fDh6eyy7U4YUIWUPYz+ktUOOlaOxxjsi1IYh8XJSoqteJW+A+okTZZ513voz0gcwRYO6OVui9j3tvciVsYAazPE3jSuuQ8M0GIw7F2QKO2PQhZyPYb4jh6H6nG0uHySmclr2avPK7p/mbwRBdumg8igw4XP4RUi/HGTPNr/LnDXXjRfOvhXCNWz0kKX+6+RaSGOD4BQ/fUUr7oWTlwpfh+qFzPK+p7qdKtrqSxFETuANU31DxbC0W8IhjE/4uhdw6ODsA5UpNgDN7KCNAqzv6KUGSc7HwFhQvfkvcB8X0lmevLMASNLyf4SbuNTz9gSEQX3AcT0pRFAjYZHxG7TSCIIuvMpvAOLtoejl0ChuDk9ebfoVSTBldvXCDrGowfmvL7CfX+oGS/JY65Twlz+7x0ZmELMvyT5C6nS4LHbsVK9ibuX+wwWS+RGMKwege86lyuCKAnsd6vWWo0hx+XdzNk4UHk9C5edlq4XGzn9zHAqPnlFipCo1PLXSJQbswLgaGxmUCZTRcOSBQvj4zuPGhbKgJOrwRdLhCFMdLrjNzRuCjreSaomKv76ctxLqJcfx1d8yDsQZXm+7TSekaP7K0QMJKIvVzMg+2/2uF/8TQtl5zg/0hXapUUWQcV2Kb7wC0rLEklsoXsHJ4NyLKwXUxXl/CcjeGggFJC5Y3xlFXoXrpQrnj3nJJfvJOT9XKB9NzD1Vtp5hFPSJWBr10TQzE+2I9h1ti76cz32BfbA/XgtgrDKClhQ1UN+Uz/R6jHR1RazXHdhxct84Wnqn8k0PhcRC3vYEfM9QT6fTZE1ge6O2wSTKsFDYml+sZhnkQLSFMCMTOr7aJfQrcpnKM2pP9mU0phT34hKWnc7t0HF+fnFGob9Su+nAg9MJp3ZhhndodjgSS8n55MSo84PpZWW6B/R57pnDh654zEneydpxOCAGpBuou0YHK1JW59hIXzGnzCBy8thO2Mzp4kovyxNfzp8jAZkwxDkRU94oXsY2ZC9Y/NUCbX6cWueKMqt6GnhZ+UgZMs/q8oqhuUSdz1GmoH0IFyAFDFOPwpdLqIEBEFCV+mCeYFupE+hiBtWvfxs4b570Z3v4E03LL52J5QVmNOe9l+sMbCCYc8Bx7BSn20KJ3CzBIXyl0OVPOVzpQyUlGM1xo7acIAvGQnu7f6hwRel4QUQDau5nsIoa8QAg7LVBYGZ8nncb96DfiVWe2oaPTZ8JOsCoCUcLt5OojNNwmZxexsNQfXN+8Lv9tYRJQN/7H/9SAVAp67YICBhs7qYowXg8ii7YcND2hs3Q2gAhfMj8L8HJaEYmakvVwzeaT/I+vUan9Vh7IsY0aXNxE/fXDpB0yhtAoszYyYySXWC35AhBSwUJuMqmwOkGh8/mUP/WeEFb7hiTFf693W0TxJ7LpY6RYyI2IIQ+xZORCq7Kh/2C4KlcHG6q6vqei8520ZvhgPV84+EYADbpEEnJY+K+Mkj6R//lmyyAPmRTCP6k6dfFN++3vmzf5weXS/j7hIY3nY67/e7J10i3AkjosyiSNFFtGb2Tl9wpaq/tioWZZeKXp6wmzAE5u0xC+d61RfN7pYAh/X4N1MEIi3vO9Yi+YvAoYnAJw9Rv88XM7fPVXAbx0PPAQQuEx3HiLgqK1fvv2RJF3zdSY0gypyMdd+0XKnYcffql061Tq452NA7qLktWy4FH+7X615kpkckK9z2sxPCa9ukvz4HEeZmHvoI+Z5n/F3kn714Y1ckj5AHMsErpm0x4fpwqlBIltpbeSnPUO/xmvXtf6RcWGC7Ggh77RsKiqOCvsq/fFchDfCY4lR+6WzYgWt0O3r9TfT0Pf9D40JRIr3bl8XLWISXcuGe8XLRBhE40AKADGi67E92MYynefo27DC4JEdmJMrCdOP8ZG2RHPBj6Pyb5gWm+c1yPMpw5Izq3c5wqIjVv/gpU78jh+Lse1XXnPS6jgle1f+8V2T4RZPVBUQew0BTmQLjNMUs3Z7dKVqhP4lJhLZa/W1otX2nUPyL92DEfGs4p/fyNwCIxXNVEcKcg4yeYDVY4uZ9/WTGnwaSL0rjatL4yzRgPXsXlQp3COj+2RoN2liUII0H5INfVgCQsYBoAEMhli3WdT4Olg4oGU7LAZd0BgZdgvr/NoDAGVB0iXyQgy/oECzCfB3lfUb8b65gbVRUQ7ngj/6nwV+1A84cxrWHBtcgvqk9d+HsWB5oiFs4xBDtxLnDscGllkrUYlCqNXLKY3gelNiDWSqsAQYMdMH99q6XBPJbLVP2Q5yX3ZWnmnXMaRjv53KGL2xqQg2S6k8j1vHx6/s5fgbveZ/llUAduXYSeL3wbwP8N00NPVJG2mywXzGE+0x+X8EiOpH6Nx5KloOJ35LP2rmKHGj8w3uicLcgSm104Y2nBKrAC0XtKf/UiAQVVvw7ePQfJXgIwRpK4vQtKnXP+fN+tdD8pYw1T2ltXf5IrwRMoUmcrsWsjVadtCkak1vD63zMSMk48Em8JqZjB6fMQ8RqVKVpO7+VzjGricjka++VVb6xXGWSEKHXAvYEJZNu6UYl6hQwGOAPppsMW59qR3K9mucEFLxeuUAYW2OXczFcUgqJWGTZHHv2uX5jz+DrRBshu7ATUuptoVOeTXSLA5dqWhcVyh3C64wOtd9NA9I77XoiqKkYULYph2nF9JPUz0r6cfVyEa3DciBFoe2fXvwEcBfoUWdBO0zLnnLvT1ggIAyyN7gZ/Gra9/9b0iiO1kjHi0FtxTa/fv0nAJjz/ZGZ8eUniLH/hrgE7i+P5/i0LDSYI59HE9W7uc1bf2EDy9G4wdmCl9tfCqGLNjJPN9/fsh7D2EXh7NSRf1hn+0YkxlOwnY3n/GFprc00nIdB46dkEDVu8JdzaUHT00fP8k8A2Qar36T+1L8ocjcRHtznQITlqZxbMynNsu2Xonu1E68TLSnwEBsf+o2313l61AuA+d8UdohQGN6i6hIBPeiUlYEuPdC2zO5pxCutoTqsi2/b9hzh6c9W1OBfz00t6QjUzhdlHaMFczOIXts72dvSmd6zeIe4MJi5WQznjM4IPJrhM0C6Ang3R/r3TPltzibHNM4+DO+P1DAwTJKFZBaXCdyxX2J3EH5pHqD9PQQy8zUAwXUdJLmdpA6I0D137lbniVrQ+D70RIIUtdZCSheLv+kNLnH5SejDP4S25hwLQd86dqwLE8M3T/7NtaehQ/hVwm8x77R2xNVv1VFk+UMRV7HerG2hz1/F9CCIpYvxlBIaJA2s2wPDkSESvoEaPxuG92NPDHuFCJfh69nS0SYfn+u976ULUjBlcfFVkTGRRyurqKyIlFZkOIZMklybep03jIfnL6wkTL49+drPK1l6suT9ooYF3kiRyDqhn6gViIHeucnY3oVDtnZYukkRSejYKwPbmfRWD59//g7lsifciMlTlyTdq/zhfQkhIRpklg4MyXr22ixdVnZku1aeoCy5GX6x9cbwc8bAB4w/aXjxpI8uTz1qCy6av0riFQXa50VuEHVA+aVDLZLv9vbzfeO2r9t255JOkY7h3+UiuYW4i1tWfM/LdL6X38chchkBIfF8QBA3FPH+t4FefGB82Wky3j/iq0DhbiLDlJSdZ/U8E42UL5a7/rUsc0ROuoiYHUTKZkjDc54ari6NXzUB6+VFP1lmDEKcRFzlexmjJqgj2qW34ZONe8s8fMr9tI/aubDSJVR7iy48JfPeSQUCcTXeF9qGoLMXKWLDoJukkN5m/DLm1QYtFAnX36jvny9T2Mv3YeoLMxbu/V+amOgaOYChjHqE7/SS/Ct3lRv2njH6YjRI54CdOqwP6t5xuH2ndvkJ2PH6FT2DzopEHBGvRxxxbFO59BABc3fPh3mgZgfxJhDuanbR3BUX7GJ5oP+nWaSm9UH0cSC2aHUYuduRMbvEHvf2/KZBJpSNAjJzftwvdYSORM0qPrvPQHR/40m+6Bq2JIru5tErJvyUr2CHjKRGx1I+n047znIMNZsDuZ8hHQLckAni4j3JW9dUEKNqXXmMeZR+Y6LCAC0o4k2Grps0SpqQ06kANdtSEYAxk0/M/kaheAm9bBn8/slQ9qh8dILwbBBLQIk6BXEb5w17CcNsZrkk0cxtoOu0vI7jcU0Z40EVISiOFH3pnuX9YlArXIUTN8V9nj3QcEjTmn/tUd1bn7PzyQGYozFYJG6bAlsUbJLTGel7a9p9ZS6adxw9b7CgS2Qk5msxzAd32jE2Vp0f2TXbsXHll5GFuOibiV9uMf+WDJlx9fMCuHiNjiQwWCI8iWgSm+fHKqHQBSFX6XuunXqdWO7kQq9VyxHOFKp7lTEKxKf0QyHTbWcrD4g3HZSlUcr2cn+b8hTBQWnnwOdzTVtyuu/dMTbPlIdfo4hc7Qoj4cMIuO/AyoznuxNRoMP8wTbXuPJlKkmAYlNxlYfs/XU31Zf5penPxEROttBtpumb9Znp+nfvIxVmCMbzx5lpmrZztEBM2uCmlx2C0tbXb3eecu9s/mqZPWps+HNqm6dWfdQsrLx6yaQWvlf8SZXQ5n0tQP6nS7PgczPQOQLapQ6cRpMarjjn4GzV2iqvqZ8DvNWDBJqU3EB/N4syDI3j+o0Kf4NjwPUk4lL37SolgbE4JQEaU/iWMcxT+dzRFegrMmdUGkH9H3JDFG7dnS+qfcmF0bSe5RhAmmLAepyk5Zn1oJDh5ah4n/krNF/1mwm2/sKtY6vxGn++67Wa8be/j/gmrYmlm3OKEj9TYXEwoauUs8s545hKcDCtltY2HrCu1RIzFCGgsBk6cTzeWDgNWWdn35rz39hhH2mX9b+Kd18uInykD+Oupqe47eLszhTZlj4IsqP611HFCCJZQxfKsMaRmGIae0JpoKTSKyFKB+y+TiHkxDOeejXDLDSjsR7ohgr9rTMrwWc9i2IgqBtwxlJ68wFdFUjXYoqI12bFm5o8TSCnRImvUQq3OZ9RGDMJhnA792EIyPc21SeXFOFRMIWXHdOcW61S+AbyIXmMXPaOCtIX++yrGUaulu7hW5B4zGUWoBUsrDtWGjPPhDNIukL46gBXGS1AU3WAK5FI/V6xe7u9RGXnM/lpEjqQP3duj962iMZa7ijUeZOYCu80Lt2v9JxRtzuCP678nejcTopeu5EKmA1nfSwDR0xyKz3QZZgyp52vAcg9iXBjZLXquKee/FhT8yG+l3LtiDtEXjpRKvMDnUaTQvdTl3mBivFmRbOC3cD3WLKl/hP1Hg1CUgrEcDagDWhvAIwJ5EVDwJI+BEy/soQCdzc1M7dYLKoJcqCfNP+AH/5DTfA0wUYJ03CmIHTleEA8iIeH1QES2ZnBH/tyUxZmLfUw/E2yiQaw75TVLK7yyvRZBhIFFsdQMo1djEaTw1n41v1AD7COtKHiyYTIeGi+52DBfKDk1MmAN5WZz8gGHJZqKCx/jPylaYzCT4+zYzDPeTqDeS62UKR7pYkLf3cty08D0ji0tmOQMRXnHV39mDB2K+MIvP+UYjUaGapiw7bn2UHRFQYb+7Y+mXmQIb/5RxWOJwVfsrQiIPc2Fy6mMiOCH+nvSrkT4yW9CB2zWCDu5KsVd9ngC2J2WpYd+lLCgoGKb9eucrh/+B8HGRYCMgy5qe9YMf7Nrz/ueOUM/kKtAYWoJMrd3OxXAQYPXOiJGu0Qe71k5sZ0v+56VgIKcKxkAgjyQPib3cc1LMGioEEthwEyjz82kFWtzesqM7cwLipr7kyAelZlsBhfVtexnwTBmqb0d99LDiFe29Mmgs7S+0Bbckidc5j8q4m0D72N24KtqHlrIAVN1DTKUaYYo6yBtV/1VoaJzEvIPKkxagO+Qm27HGqanB21m39oJ/aofOMFgSpKROxFyjk03hbzvs7pNWzGpb5/G8A03DinQ5I15+bVSKzp3UO/ySPnzZF9b05Wp1897VBb7oOnYV3NvhB1vBMXaaBT+ic9vq8ryIDpx9qR2ZN975zKfiPkPi4V/jTV3LBtOrJl3vw2lHry9dmu4sN4vLLJfuP17Zc1IV1ZgHEq4PSXUvsGl8fzm2bBA2sBgEKYVLnWLtjZdW/GQhAa2EbMZRdJ09CusPbDKQQSGD41naND8+WiK6SKDdlYeOMcL7GMYckpEkDYk/I3lU1GQDD1Nw/njWUqDs3mp2+HA2h6zwRZBQSQMosE72kzggK3Z9LJVZSkGTT8LIuIArdOVAs2q3BtbovAUYLeUzWTK3YjRCtscOC1U0UxNdmEjx8C3Sem3ZwlDfOAE+vUDssXo/l034DT6NmEVoz51nVZltRlv6x6E6FlZazb0+tcNiKgaWT1B3dzO8KgAvtRCzRalAmtYYFizMCNKZHxOEc1txNDJ000vA6edviHh1qACHfmfvK0DDUB3sitK+Z+hxDk1l8WKkdnzgefKYJfizVF83cQf5tFWMpByOsfPV01N9/5NfYgQviA+DXSGEdrAqPv7vDg+KRPQ4Tz7i7y5lFq1nLjX5ozCevjTbGRLVLsozI+dWwE/YHzh6/Smw4NwOad+e90iKfx6DN/v7NWfwJcJ6mVPU1s8teP6gfir4AonIYfsC5Q5AF3HbPIlO5Ys+gAddz7ij6GtjMcZsCeg19Dwu6eoOokQrZY38NCzLAYeOckz5KjhKTgfhkr6kGWK/n3sMay5xP0cC6CKVLagmFtaUlyo+fRtEexHKrZHmaGnFnG4JU++CC/4My7IHP2b8iDWFl51+wtyXhgYKzWudpY/7nu/Hi+MYrfYbB7/es76sK2ZfwhddVz9Vfj1MmcNl7a+YtFwO1lHEWqNI1+M1bXGKUfIDceOtptRJx70O7rVBgc90TD3zObaK5o163ygeANm4L58J1EF8Mi9jd/wQOkvgR6O+TUv5oaGOzOLT8Vh72DskjsbwLxOkxf2KTWv9w24ECfH/ohVXO1SSRukt1yfX7c5b/1V4A7xfTEgutCSJONyd4MGIrS59MUYWmBRin/0nEirvJ8wvdQO4RDnUnv8Zextu0Pevi7TDiwUm7WpHA19Duq/WWDSOp9C3om6aZtruFzoJIcwTmCEOVfUyaJtXG4q2E/TJ86J15dDHBROrnPvuFTS8JoiFNO0O26zu65uOHa/5sAXlugNRglurN1eJ5ePBD6S4Lc5fFVRcxOtWP3haxXLwzqsMRoTExIlhG9fAOQEIlNxeMVZ7QSeB4syYQoJ82Wh+LzmZGNpiwnmvFtMSLp4B8SHyYVW2Qx1PC+pTZDSH08NQFDVbGtRiYp/xuODTJIpn/MdyFmzwWsmJ1l0Fz0ocS8p5rnBXMH0/MXEun0Qz3W03TRN4AXed2e6UcaH+v1QHioqM1z0NA/xispSm06dNp0m5Rqh/UbiynXRsqmnM3vbHVEYDleNELiKo5H7JK6cE9roR90Z/jmO/62q53FPjDd94SeUtHFDmudLBWRusI+feUzBopbxYSQlcFonyezCVbDtLhW1nxbjsPo2/FBhz06SVudNsyy3aRMJiOZsxF0uxD1y9WhGydvxuBkAhxAqtO2MX9wUk45TfBzqEsUpDVKh+zYWtL6OXP0QRLrdhoM/PC+lZRPA8jYWvvnc5J5Q69hd5BmSh4KnfXwhUFUxIxY9STslf/KHB7EPUuAyTPM4etuo8NuLGYvwTiVvfSbOBMFM5eLscFbJr9J++ejCkjEcd77836hHI2jwlAiSlQk3/G8JXv65Vc7FcZ+elgfHyPuPvzccz4kNmMzjAJiSxRbNqPJdCLcpJkX9KVMeqOXMEFfUqBJFwCWKc+coss0rp+E/rGqlkyZLINsWHlas2TvZfqX8p/c9zFiNoMLcIZI2uTzvm1OnR86ECRSFqXQiaT5U30t4TR6MipKPiFr9/qrH2dEQHPSyWYlxyss75IsXAyAkcvRYctXMzflxzukP/tjsJAsRPIgzuRbGH954HYII0QTTBDu/x8HCwJ0698j0rpEm1/LQV29OSaU7Hq3+mDIh/o778Nh68q5lJeEexZYuSquCGZ5/dAPOrfs2w4zbCfHG8FUQ8RROClMeU9UD+dNtEEugXX+teK/Tge89TdAPpB/aV5PVvAgjD9RfDhLDKIyIUbAgF2Bk4nI+fiWF/iVvcQVCkaCmADRwwS/HcSfp9u4+Y2VG8OFDwIqgg4z3n2SALK8CReRlXMPzQMobRITPP1lzcT5DTez5an7IdghjMdE+RXKNU3tPAu5Nee5D2mZ9v65Ylrt5/zD9Y8z/yCsAWykQRPS/Aavkld7XOR6IQYGNoNYy/7g3/E34xJCAMvnGw9OP065UH9lRDSYKQh/8JqF29FFroenKvxK8y3YF7iY55p+vpRWf5WEKRNXYtGVIsMcn6Y2kpAGD+Knw+Z9wNMjz9m0MjHmcn2xPMJ5Soj9AxAXgw5/Tv11MHm25zD9c2HRZSm74XgqJY9KyvDbuXUgB2VGN0aaz17Y5Inat5A8TnFWvEdDZg1LFslXWcIuEqvCG9J0chU5PuALrMJL6V7MyDTvOCEppJnN6ivelV59Ij7BDlGJ9mftpZVrFhGkQ8hK7dmufirXx8V5cxecq32kF1q/+N7jJ8JQqo9EFY49pDSnEVPZi2dGdVKNMcX4Q1STHb4qzUMlGzEoM4DzNaSMb+VoXkibZeK9/nK4cykZL4AKs2nmWxkYQoIrmcaM7lKqH4aqhS+sU/bvwi96XkRpX022XuD79DelBaiSYL0lKOsDUfol1EpVVILhzyaq00KiDpzZhZHRAmgl5vgrtY8/i2mMV0nPHctVN9Zq79CnHbfw84uJv4kNotBBVdFa0tGUf9dhvL8q+5PdopgDihIhNmvD0U5CnJZSQjOyg8Flg3FiF45VGLkceA9+DzuZMX1QEGEr5i/dWloWmV8x0HLoZ1qboJebGhnOrpKVb7OPtzc+jCkJT0x/OUHfr7YmctunuYM7qyMNS/T70uPfoa1Y7scaYt/YtMDplo/zCFKVxA3DsvwjKqnb+8iMvkOlBS8uOnf3rfBudIgzJ5uM/2q7GNF2wUc9vTrbIo0IOQTKBge/rfT3kWh/ZJNm7a+HQlN/irHMCzlRW2kcRmlF06nEndUQUQWN5SW9kOjrKfT4o73PXMpOaqEBBbK3yb/tTA0JOSVhfB73QKadTiQUBg8rmigUw8KU9syQkFli5qR2lVJB8BZElGKZTgEO6KEGI91NbyI+H58bwhAIJQPg0FSJAMnOwqbkVVMuk5IuLUJZF7+6hodi6W/PtRliYgF3/qXEarifD/PhSN0Rnv17PEOZCFj387SaWwyQrmKGBe8duAgHn6CLIISVWWa4Kpn0pOWFbGNwpuGzHCLtPJR6SX5P0WmbNiOpe3rfpNHPVlxxBuu1C1PglZ6HAsYly4PWEI6BPMO62XIZvpxqSpaewrAR98483Q0tsHnSPBzCRI0bFKSoEBL0n4w/3InPpG6YRx4G6E8fHqmyrfF1brauv2nCsArDtD/Z31a8Sf8uvYqStjgeXHylVRVQvTGbdYPITFRnhqF7B5LmV7fFdsJvheqI4TzfeTzklLeI31bfUIadFdDu6XxrP9oSVjhd2xXAZA/2uxFfdacM6XfCAvMRptH5HNqgpnAvq3JkHJmXhNhxz3+5m5d7aEe3Vawpc3jFOiD/MT0pkgRSniDNURhw+LuQkBn6XwPj7cw1jtSNyr56D/j9ZLpfLUHWzfJFKJDKLdcfS085Kew8qPz8KLiBQXLH8d3yTtAotuTs9xHlSPeGOEZqZA+ZIlCHyo1nYbV9L+kYCYh3nvj0zPeQ0e9aW6fnEeYRoF/lyIo5OJdPsufbEAxfX5lsc/gyyrMuXf2tZbFzn/dAwSFjcsczZ9y+UkjgVXlEE6UiM0ydA/XJomWWwy1dbVNYiAE/f8105ctM7IP6tNLKU1b0M+L92baVP+Q9zOkn+gRUr7gqVUdciQaZ2FhStSo9xmRmvyIIxCBVGI9gTy+ErHISmUQcAV4teAtzXSFIyym7iZONox3b1pQKaI9x5oSEJUosEyehIbCuVrQnGnZRFECfb3slAaUkamzmc8tU4K06QsBbTI7uNKDf5jhCxQcn3IPgBYjQhLyIZKyGbeWJ+H2eELb3sJhS0nabMv8FYwOzjK1Sxd+GNWhwY4kMGvOAVblMdCVJN8S8aQL0oMx83mszUqv5+MYL0ClwsCB7doOxtNCHUulj4dOZjqpvioTLlyLbvPfAT4C19O145EVR1hVpVTgmXDkp2q80g/6wdPB+OI/kyfdA+2sFji0gZ0+GQCXjJwmverzlk5ffdbiLY/RoeushDFkOIcMwFSN0wmssISvNvYfIK90cBFIghCc8+Coh6KYjyd9cdHvuJYIHBAG2llfGgVJQo/7NHiHjVPclUF2alX1f/3UFyrxt1KZ3dDBiyqMFKaW5VgMaqerD0IXEZi1a6Blvxa98a3Q9RaSPoGDhr/9DuRhrSLysKOYmL+pev9LVuZXC7J90rDBm+d2xGwEqY1SwqxpQyqQBNHzafnWka5EA8dsNL2aa0VsW1oF7O5CoZsUy+6PHVHpB/a7wMeewF3SHd3XLBTVRnvZ53ddl3J8r9uYarLGL3efNcAXTkuPP1p5VlDfvB8fy4VZBEaY+aUMQck0kaThtolwrwcrVwicpQbmpxAvqTjvMMIzxTlcDzhWZf1e/pjWgaiZQam+TKZUe8dCSLxtkh6eKZT/1Qe+n2okNWfOy/jagv3ABdz8mbCouUGy4xzs03iQbutCP8os1NwceKiHwOKLebfkO90gku7YKNSCdDSzkFsif6+gv40HHFjiKlHWHAPvsmMDIt3tmojA45NFX5nnSogLjWBR6RgIVHcu/uz90Ba38edRhsgMOgg7hDZe18kDTz+cQgQxdH8Jvzl8Jr7XRn0UWxfoD63fZdKDkAetRHTHQrZJk3DPST4NoK0xtkXawq492UYpXhOuJZmHhoCNmH5cdqewaI2F9NoRDUV3t0EsOQZp2SI7L7ya8tqeD5GvhrqTgsF/kgOyZ/nbnqwuAK9oGrWEo7NAiSNaQCAoDBAXXMHAgT7hvSYAN7exfrc3XDarjkxG2ZxEwTrm8dPSc0aI2MoICZGgrcTHX0PL3x/AfwnBBXMWl4xqoOsXKw2Dj3QACWzSev65jWlBClTxo5X3DcwhqYWL5G8T1iEP1fBn/a4N/3RomB90+JA19Zx1sIdF1WQQ5gdH+H7QxzQGn80pvSJro1BM44gQT3chZQBGJRAfjjX2bPCU7UJSf2JmEo8m/bpXLVPHw0o/zpIvaiZIJM6k+PjWiGx3w1ydD6eIDMdHKKqCN8gUF5Mzd26gumPo9KVR+//YlEX/zD5NT/r46eB4+XQpkWHEdeHGCvFwgp6A50gE9scW/PN22GaF05S3OiYQ0hIIGnbqL+UsF1XVs8A10Z+0nW6v1by+W5qwUw7/RuGDXhenvdI4Pbf67T/wdZWb8jC+cRhjCMbD1i4I5ujzDwCCd165aYVgRq6MDIjp2qnHL9qdt/tDU0glfm5nPqrVE6V5uvPm8gHejj0TvbuCvXNYykrsgYq364pZHhIpFmOtZVSGy0upJhif0aPKXcsJg3VU/sWFDJ5Cl2lc8BVDeBVUP8IbYtVwdfkS51vWSBylW+DhicfaR50uHa1q4GVQHlzf/KEAA9z5MZNf4CPohHHH3S5PfuEQOR59z9vvZ4EXYx7x9aU9tv13UybWg5cjM2UMLrpoAKp65Nccp4O1DLA4C5dG+9BINYaK1O54i8Wc39Jc4pj8ll6OtCLoaAszUeKa8JNGD/2S3WQphRLJUFeI5zgF5n3ROfM0353izRpd7JsMkWCfJBfPBAYlAWmNB8zvi52nGp2qjjnOzS0noqqQHIm0IaOtFJs8sKV4XKz6xpVtS/8bJvmnnSFWI98hMQpflADb/QtxsWsf3oEEilrFyxzNo06o8i3a79EU/vkoi5LtKJ0Z6zKHh5dfUBob6CTKhj4x4KSSN/vm1RQOixPn0JBYpgaJuf9Civr9QUj1HdenS+1bQrdcG255RSZveB/MI3T+m3Tdrf+XN1+jhy8RCG/i9b51/NzAe/ANS9YMEQUSAGmskQm77egqITVHafHlcdSH0ja0ICj0WDQbNIgS0Fn+lNfpvJranjvQbA0CQKEwMBoco+OR4L0JCou5AoDwYJiT6O/13JQUUD5ZSIkKi+mdd0CRIYej8FZXxOycMYyBTk7ZAVRxr55QYrCNCdi4yP+NffeYxUPgvUSJK9rHt4JlgKitvpOJpb8X81RSwwGwig2kfx4aTNDFQZsrM2zErCqXa+PVNfd8iZwEAz9lYFHliPi+8PxknZLywJ4QBbtVdHzRp2iFuywS2kVdJctFc7iNSBzGp+Geju5N8FJgHMtDLso5H3F+CeHqTtwUk20FxLe3L1DTWU/npbO1Sdl/dt0ti+7sJQzLt3nMn+Jt0PxSpfd8dk7alqrrv4RlrXPjXawmEyk2epo7mT89W7QzkFox0tfn9Eq6TWJvI3PPhF66OL+1eM8rtQ8fyGuY/exx2kcrskYW/E6hhCHfh9J/OP3YuIfztCsTt3vDsb+9qZTBhe59rlnJz/lgvJHczeS7GgBU3f3h+xqw2E7urL2CYpzA39+26TMN6phV+Fj/2DrZvRHuPetAGLEUpDwd/EvxZBw7XxWKGJYGyl2F5TdBmk50q788aS+pLSHtu/GHr98YskV3y7w5J02deRdMhDa0nIZI+YNOhDWmnn3Dr+VX+lJzxbFze3UruxvFM3gLulm7M0WzR1DVI2EUooJL7gh85nQUJUVzRT5xAOVHF/woaCqgJAR1S6w+l6uH75gdkP/6YKmDCesl26FIfNFPm6G7s92aAbaisDwJlQfgB+Wtys4LA54gzSHtB6RCyB88y8VmmRGJVr23ly6sz/GN23s9eQ6GTj7Xm9O/USHs4dpZZxQ6ZLlsSa+nLElm24R/DIc2KgENzsgR5R4jlMW+/iPPn5kTIHp2awJr8b1RfLiPW9SoCTAar+aAqUQKG4gB+O91+wH/D7vYYk0aXBE2EhgO6Qyo4QtSJVHnZHISyci7TcpFi10LzYOyG1IrBA2VzcTmGhd+rY/scNf8XLCBszw0dJDUBRaICGiUW2Cd+cQyOF5z5h/x1iK7IHX6ahCT/LYX/h5IgePV7WaMg7hwh0yw+L64sTUF3Q70C35wLZ2ZCKqtoww9+hvXR9coeFhnOuRGH21nSF9HbBMq/4n4MH2rLBpAopc6/hST84OdJVx6Puz+GYlDndiHu/FoMpYbh8UOuNBJDq0nIyhC/ye4VQa/mcpHasvzVvkNV8sem0cOFRU8VxYHP2M/1sdm/6dSP6y+GCkjW5s8mUrgK7sp8qoFh/2K8rSP18i3r8m/E4ek1916TMSPCYBTEcyaaltzjb4JF+PspFTFy6ig8x+uNKirNRZ5OOYtHwmS2cxc3fB92wfTUu2GPJPOIP7+FvVTSyTCBlU9RToWBj2a/QFfYDdzqyxA0N5MRhCI2qOqXsjjEV6erHH7FEgiJsCYuTAy8aaW8Y0o4lInyBPdBdS3ZdeFoGGtfKPE85VuqpBQEx2sag/bs3WgvjhLSIBivW04A+F18J3g204N+DvEbYDjb02HMbKhiS+JEVeGjqsqh5cznsyS0/IM/+WUAhsSlvhnUqCVt8A4yBNtxykXiu9rXMzLclmnEN6eXhvbjIHlUoqfZ7yX0Jw9PevtRHVWzvUG2E/X5cgNcbeTfSLppCvblmjfPzGk+P9yX87WBZIJgnEfh5/ImMQL8t52saqIWTqO/7E3BdPWHHeIil7MZD/imgvIkK1p0iI6AjcDAsHHtr6ED8NGQxP4Gd2yFpodQBLA8BsRmKEGxGdtLgqZE5ThH1LQy7voaff3TkxbSkaf3qQXowy4n/jqaQKkbJmit6AVrXQtNigYn618mNXsGXIiyLjdcgmSckj9Tr5R08rfNsm/zILtKhzNstdZ1b2Q09AM+ailA0otJLoHxXsz3EBcKx8d+WpNe1ZiLkWqq0Xk0zKkIzcRy6xLxgw8oGPw89BuUs7B84HI4k53c7JivrxSEQtHVzbYzEK03QQYGLH4ShRgcRwLSBPIVERwy5kUKLnVQHh0SZ0n8QrqrdFyhya8BXCJYSCC5xqrMmMHu9pYERTl5Jy4KgSuq+/yiqHSafKqmWFOh9fTbNtpyBZf4nPDPBHE3NS7rl9xeRDI1iaGVNbIsgQGfEcKpfIGWxgVhhI1UitE4P1BLleksuRJ43+5r9F5pOt6ZPAUYcisYDVTZPVx4sPsvQCpaJ0202bh18Sr90jvTJoAog1uo67748uwR1+i64+6G5UeTZnPOHhWvgsuwM+xPh020KSVY8zb/cHvQD4YOnIBbWK61v3/3FZkRpr4GXPmtJc3+ldWQVRhrTBhJc+IOz2CZZZZ5JwB6XTzuYQhQanm05YtJwf8KH68NhFdaJQlxojePK0r83NoihH/s0o+nfksjXNb2gYcwDXyzJ91ik1bB5Qpj+70KcTDN0abUFmbX28KnEP76knSNnSfTjCjiWNP20uB/cu2EtYGFlaCb5zfO5J4+QbLZoFkCZ4JznHZzO1N8LiWSNG5qXkgkk9FcbcsSbUxNGy7mfr6pI4wDHhHD9ookCbM5NhpYYUo8HkpB9B+9rx061S5zNqAYX1MONMc6o6aiRxkCfH7yXhEqaWSo851xJCtgjkY2E8GwBCEmBekGo5RlWy/2UA6+CCG3vNhJ/7VLWLTKdubvwcS5HMvPXlmgjuQQk9RgGmxV46d0lnW1S5eEkvIvK6Wi+0/OEhAcTuqAP5/GpyHzNswvQUkb/fxp5DamZww47whtW3KQeZ6wpIdoE/VBb8BWnxv3nb8uVoCUnD1l490WTjBo1jQyBGY06WrzN9xhcq9FH2XrLKUKG28ge07FvJnf2Q9ziiHgaMUULVQDAPJLGGVZzpVOThbyHInAWmB8gShdMXoocx/n2tIP5eUvBSWAuDQe50F7YOXG+ySjeUIDxY9yIdR7BdzEMJd9wB0Yg5faOzLOXQR/T1p3ISvV3yrj1QHK2KrRX+qKBEGKOKwPrfPkKRMZq3uy357bIq72R9A0fsnWHwVH3VqQTVDApfIY3a8lf8oWcMYnVy3CnPCAW9UA9mLb/CHfxjkatpLbtSJhqDDzuDfcuewS8tWzF9f8zJSI9zmob+JnYP2e1Y3GDGaJSCT0RlRFSLARp6e7D163QmgvSuWJsED2PCXBMNN1Fg3ALXzp559T1LIi+xSm/sBYKNUV1K0nE43TLe5kAgUQrEEspXAcr+BQ3jyPasTjSVv5OHN/w3qzdPhC2fNBskhTJ0BTOr4WmQntAuJwzzWHREWWaDfyFKx3HBDi7NHAPmz9eUkATSVKeWB6KMZBgyTdMxfwK21f7UMEk2IJabcuDk3xmsRezXeeW6y08mk2h1hllcWFCIrO09k4pB+MXIby5JnsoJs4GcNjouhxQp9U9qQE5FX01zML4xUjFY7OL7HzXnnPUfAwbc1KJM3zemMVgPDtfg1LRg/JjQvpQ1fUfdE41O8OVkZXhkJ4dwZ/I5xtic9aROAaQfbf86Ujz+ymoB+ID1tuqAmRQ65YLPTK1xZ0JkNmjVVdzQQznArIYMz58RjqHx5uqFCNvH6hG6AObVAdJga5ZdSvb0hF/LZqjkOUIcZ2sbJOEbZif+sr1OYsvBjAoKX0vKYLws2cwLwv/T3305y/3cT0JxAvpJNBU5UPX6Dh54hhGJf1Bcr7/N09k6zLPkBJWmxiXO5KDEQcOLoG7u5PDnz8RJxH8VPah3H6rfKqklPhH0Tm22ShdVImJOzVkKCMEuXcR+cK3xEFZREX6V5pvbUhn5zs5gEC/GxQlpWnfNZ/qvar+poj+Mmvpcqk7jDxiBjf4H15Qtff2J7a0C51jauluf2tMVxezLUwiWYgkluKJTOt0CrIb7ms3rDVOba2ZKurg/pRZZCQQgV1jhvCOlii+Vv0mv0tn0hW9w1YpzwzDxK3X9WQ8ucqkqfIe3UwELqN1dWIf2sn66N8uLgb24GrIffzTc9C5iL3E3DSw67Nt3ENvp4M7/Wgbp1VA4/9s8FcGw2oa7+zj9snmiNx07lGNbP0WwtvSquhX0jJr32iy+GYE8/ZaHzdQkPd+K2vvO8lpdYSXH4+LRztla5RYvCQ/FUtRBtRRihR+2STgaY6QVYj1e09fOCLaUjBTpW7mi7O2F9aTh+vZ9RixRb2oIZwD2gkrmFKFCT7Zn5gTCwGyoxzHHRW8zP+xBfJxoLFyt0B3JT9gnrgAoyGJE/EejUudubkFvQkMqAZGRubNfy4j7vzzo8OlJIwT+3rc8M4E/D9IFsApCIpq5buDT5P/4QgQI74XuTrWleomdgyp+ew1ukXc0wF5ENOpkO+k3uObDn0Bnu3mD3s02s2oV7DOFLnZ0TeF5tykMBEipIx271/cFSAmi4KWxxHjSqX2dA4Nn/bkOlmFT/Hd9HEJW7+C9gudJFflI00yUq8cvPCPMJ4tpzM77wNEgsyso4HR4LzYapG3/4r772aWFeSNLFfM493Ax7EIxxhCEd44A3ee49fLxTv7dH0dI9WIU1vKFYnzolDFkCgTJovs7Iyxfa9YS9IGOo9l6M5aMIvVhdX/Wk+LEVJx53Ypre1+wb4eMN2QXYNY++2wh4RMrk5PEO0McFq3KLD2C99TmjrlR5/e6rmFZAx1GINyO80pUGHw+hcPCYxxbNHzOjYPZEcAGm+wZaQyM8zrW7G2V0f2ot/WyoSc4kwFS4eo/pmVXQhzhpS34AYha/RR0xj3H0/SIZ+OHlqSGENjuXt7Bfd7c/Hxx/tBnzdQs9u+Itk2jNiXvihxVIDhEECktO+qVEjUuuAHqMqxsvUXkPfdV5wvHrlR5+wiqSbG5h4NHqSZHtSjH4CpRDSn18wWjTGupNUhpEuv1DkXiy18yCRlbDPKwymVkLf52Xsld7IlUSkKNg1oPwT+AZiOsMAUnPEelGbxsp9+QqGlXysyfFDpkAXNCECR/WnZBu7FxRkLM1v+13aygwOdbt5lGa+zKO4pFTFhnjt0Xk21bRTP4ri+j2Cnp11IAMt3tLqtkOW9M1rtkMmnfzZimp73ljS7mxWphnt41L1IlAfIBvTzWkEHNOs+Za+J9HR6yjEEKXqjsriqP6gFRp7UJn1FZmO9IXbeeD5y3kEWb3WWNXovsT0tGOmQcPOseQx4s5qpjrLwMpoUqGj2ToXmeGYiIYKru/b8ne6KHD/micZgr3C/NxUcR8C5/NAul+iL/82ppmDM3+FGbw04ruAsF4D1qfpjAYmS8jvb5x5M8prD9Blxa2EC5B7hiGtS7g1AoLbWxCgSrc53e/Wqzw/mI1hzNorOqmFwG8LdGir9Cpgo9YlvdmW/OM4ZoJym40lfGuJXWiYbPXtRHFMxl4U9NxrcAlxexilUBHy+6GTlFOreMNpWWxRtLhO+/ya8hG+4+kc+LEUg6QnSfsNk3bf+oeizl8vG69vO/nVsgqCX2Loac8EqmKxABd8osGkjtAqhw2tI6LRVLcLvN7FnAmjNZknPBVv7uAIa5pMuyC+33gsyaUN4asx3+GLCTZgUGwrrI9617ux/Aig0LoyZGW54yH9K3LmGsMigCetK1zh1xJlObcjuAqRmWWgnB5ZBOIT8K1/121X7oXzrPvo96FNFRasD/CTmoa+9wvN0oWnd5eEcMsdKr6KcyjCIpqOtv49C7LuZfn3M9l263uT5SzAgPStvjLYuw+8HEgNqcFAVxLSOpF03uPPlb9HSWyiT262H6U6ysTo0arfP3MOMLks9oUB0YkuWi/Uf3Pyc3PvtJ539Qmvqp/U48cuVDOZ5dfHqGaTAReB2TnxqhbsLKYWYVZtesVr6sDO4vxFeYyCzACSgaj0+G9bLotZXp0jAP8egPZbctpWAvS1Rq7EUpz1mHll/UqA7Z9c32g+DBK/9gXYIjqTEcObYziayezaVo3Arb/SnWBf8iw6sQxEl5OPi6aJr1ryMlGU5uL4ALcTkoPXD3AnDZe5zjg+3jYwPS6CMw+aQrxlr3Eyw4nn37Uqi3XK8HjoHF++Rc7CUviXylEqamB671cdwqyahbX2y2vATopZFxg/QMxMeiXndWoXdX02GP4YpVhXFQljzUERWvBRXPs36hOpUE4y7V4hrIIDnLbFS/amBMUKPOwvk/Y2ioWy3dPwwxlP4rriWUAXohtFW298tPDm93UJ8ecOsk1Ljob45FtnxKZy5MvpEDdZEeL3s8As3+nFkbyU6UA+O8WDDfc3yQ1aRcmyiOOD/a3dGip/2WD0l2nQFsWb/GarlzTE8xtDC37rQRYx6lX6KRWNZsyjMeUkKzLs3qnZsFoJHLy7OpKWUEOSBGGn1b3Aasbte4Msqu98bi7eCCgvdWF1S5yITrVXO0dHzObMyC2ICQwALWyc3orKfkOR3ZZd0TWpnu+3a81K9+o2LEsLAdGrdqa7Ca1Ipl+sje1AiN1bmiKzeDi2MUakr12tgA6QPIapLftVcxy8wNOLJGgbBCWyn3ehUTmIMhi7fu46qMj7GSlKkQ75jz2zyLey7SlEzfxFRi83o9HBmAAiF+EHAxqSujf5jhJrQhp+NtVTNWq86fJKG3xwn3aZFTre3rdK1EDrGhWrEHMYEP4+H1P1o5AJKViu3qFioIOeK5wp3jYadxO/vvaqjhpWWQUTgw3xJVbmkheuqlfJJGlOyH8rzol+hT+Zy9DXfkJyWMjuuzbbxzB+03lBcxP35UbstQgZSaM2Weg2qZT5SviZXoUzMvfBxHPeYj0GBN5NmEdxNkSug/NY5SkKeuS4KLPOlw6UfR9IPKSfdzVLh0RRZ0bHRe8fltFlzFRsUPmIh0ERBxgjby7JSjMglhhGmvMM7C/YaeSurjJp3nj3CGlAsp2xvzQCCNFaTX0lhcVI3WuTHf/tTDpA/Pr0Qt0yOJ857TVL6l9T8SDLHWYVpK0gqrswEwxfepuO+Fpk5XSRAZGWRJKOl5ZdA69AtKppARVHQLcVgjqg7wVDvjTp7OZHsfsBeh0H/wp1Sgn03JDXoU1u+lUZlRIV19ZMPutqlZj4xkVoAYfjax92WWP46vf1PkUDryQS+uDuo6utyFr3wocHmYSvKon3QnvPj8xqX7x9+YectDraGLj3MpJRuOWEYILXS0ZW3Zs9sQ+n3hMzKNraGTecg4XiNyaVcZakFzY+gCOdE9JjAxXCTsKKzD2E8DDGqVhtM6Fq95xxNP4rklb6wCBhd/EqpUK9/lAZj94oNwOXa4GeY44Tr4kYJixVOUq/NamE6u27R8+v0od0dIG5RbH7vN0vurhs4fOXHG1v2WfVwu+xxe74SVRJX0IBliEIYnCR30YDs5PTT1tIaZUax7cMSAaTdBakkNRVbsGuJLQ/06Gsqx3SasNsjkabTpmFQtB4+uqQ7w84u5gzD50IEE6/a5AgyX7JZWGSZBi91JBuCToZDf/UI/g0d12qPdJcPUQtWDkkxTjuqLnCnS9Nywd+eCZCf97Nq54ADI6EfZLHSRjtCXLVlvY+j7n6SSMX8c41KHHtC3/nmUPsuvFbCR8qoXtbr/suebKTKVrJrP5jTv6GqDT2CwGXtS/lwOPwfMhZR1zyuE+UY/HZeYR56eB5T6MLfcXm6NI7ZBbw7XVyVZ8xwyhKfrRXkkN/v0ujp+93r6a0tPdNJC3uo06AMy6OSorzN+ERgTVsqcvmhL+CWFvo+dJHkmq6JqKIgbQ5OO88M0mHvaIPBvlNZcOGW+ptRBdU0awwLHZqaJ17wU1DmTirbpAfHabMpCsd3ns0v/guEqFeqapts6yo5e/1rlaA2Wjti7njOMcq7mRw7bqv9lNnW8M05KdiZcUI8B5PH5lbetVXv1lNJayPZjaJcDbT7ocTKcsfVLEcm4zrtAk3GIRcIDsdrL8D+YevHFC7AXvkLVmJnnzfyFDctQwcJEGiariwf2ijiomxPCv2rpN+3rp6yH0Th+xf1aCk/XaPlMvVnngX9wnFJXRTe+CI20WLe4QtdOOJ3Z4ijx1ipsTvtGqPVFPdO79UoZEt8WOFl9sjy/I9tWO4VsT3TuvmZOboHfcZC4Pu6XhI9iTGAn0cyys7sbNHXb8YZIGzO3ylNxrxQcAgE8ZWQgOYVXC3b+bTOCCwrF1XFt4ZiuRDjKIWgA8idxg3yJJz5sghbnS8cFxDH4+apVLjsTx0Q3LNQGSzz3F0n0qjilnI2BgR2VEe6LdKy05zzEGLVqO6cZkZQs2p7+1IKnJfkt5re3++evlmfNuaqgaMtqlqPjt15HMdbrB/PCkqGvvyrgPiyk6yVpCWjRXGU2sNO9Y92EMfLMI+5q5aINHsuoKafcy5DkEAglKZzZulNtyp+vxyrVL9Nl5dxSNCvCSpZIz34QALIevUQqiicOiuaWunKMAtX5bmqoyf654+Rrxt1dCtRYJT/bIFAkXtmQ0CpaVTFgSQW3AH49MgMUX/Gpk0tDf79MdzrBgVD+AvXycPZ/ZsYoGTlfX7sawsNvLUismlryijroo/o94m03mTwWcYO9B9ORsRWMTzKtqPNVrdMXFehYb6XlMq2OSsTiLcI6DWMemjBolKL387vu1b4zpMHwd38W5fO2i7qy9BfFEQXjFbIVL/5lkl1VdNw6XBt5Uz8aVlmr58z0GPDD7Kzo954tEIBD16RYPRBnM934x8pMG1822KNgj4QVR1gWmTXHNR2MWJCydh8ogdkdKYt8c2jpc6QuDqu8R0syLbMq7TGHPRh3ikIFnQKD72I6bbszvQJ3BmgZg0RtUuXwKx3UfFXlMmGCjKOm6w5vQSWcp2HPAaDHbN47jULJNWJbl56Ql2okDueAMN8yH2K01HA7Q0JfgJsZbOi0vIvwh0ZLxeTMN9mvGB2z0QnvaOzzCi3IyAHbhpX+3WlRRLkO9XVH7SNMQ+qz/szDKTZkgUqBuBudsqe/detuERr9ldWRLefhGyo0pF87gyUetiCAh4QRrxsbscpK5hM8BfkdQub6SfnXlF2tBPownjZnep/CYnQNxLLCFotMR4URIpPN93Z2TvPqeN+XU/t02tBpjcsvGGrQlwAOLtAGMEgmDSQMi2I0N+Q+B7jqftxcU29z1P8ZtseMF3k3RioIYbE/o7OIuDMxTGUR8Ro/V76wQoQHrYULwHOU0uHkD+ivLX7MlMdjABtsrsbYeowkHZZ+d1OJ+ai+5NOwvMhftOcGCpFyxZ0WSEN5y+oySQ3wwjgmRmopkwzhJkMZaEVS2WzKzQNVeL57cNBU6Q0um7Ic/j8OYqpxr2iSLtgfG943zk6xM8U18hzaeirRhQ/4OZ28paYO2VTLu8w4mpRMCPE4lRozdCyr9mUMLuzUa2iVzuJNP+qE4Ej7vlcConqIbHFKs4dXHKDq84+RyzaKUTVmFtC5ypvvaW3aPx8NVyzIfK1ApdLl2YvsD1NU9setHbJgOzYzG++Ud7e+8FcuNPbhlvMX3kSkyOnrwP0RVHZ75zOcdRjWkWSTSpjCGYbOLdwuGTJoJ6INzEgQuaUB96sbJtix/EdayXTg0ZsudxRONRYW6u88yt4xK/MA5nvjFeV/B35fvnUNr6ai+mH/KYb0lwvzXpEDGZYum6h3GcN5nErJgfbZtbEYKVI9su2XcYaEJLZJq9V/qiIv5aZjFqa9pYiXsKlWpyzd19VyJvKt3JnikmW5TSComBv30cifr7ID/z/fGFj5pMyPoxMwRsy9FRyIKVyDk9zI0kE5fbMv2o6ocHccdvIWIWwCH09KJpZtyf6WXc91TiOhblQctk+k6vb3ab7ti+7thMg00kYQKNo9un3Mq3I4IAm+sVYucWYX3h14vAIEwbEJpJAbRTsA2Brly7jXSVHcSPhIXy7XBsymwVMMr3s7DIMuz5dceitk3mPuAi9HZrAwEGcG6OC8ioxHTb8E4yj+xjY5S3DjljDqXkIwHhMS6r4JsbzxtldHtPjCPnx2mM+l8yfzoMK45IjTvRXKRnUyvHU2kCIbHrdrF79rOr+L7d5nOiU1wEBN366XsxBtk0oqzLnV+pTOvVRe0KTsBmn4kYfdITp3J7ZM318PN67xbRXeNYk/6ytiMao7Y37mOoUjz8MXXSKtN0mur7ebdJ5iDJxFuffRClmtPwoRCJvK4G1WhY1rqZ8/B6YJncupnh0qizxoVa+/7Wpsd36TiNhDy836E9pm/WjDXXr8i2tNSWCt7Ru4Of7uG9sHgiUoaKBJzrOaHVzVKZEOWLL8KMPPHDz7+TI66w4KCWwfttx8kak3mkHTVnb9zquhQ+dFQKojIdc0wI9hUxpoN/yteoGqDALjWnXsbxmYmtQgnyU4auGtVIFNU4JafRcKXU7Yei1YAqyVpRuA8TPnMGAo4mFb9Tn+iQzPx26LHTnQZ2ZCs1bRlqBnYbqcL1ssjjyJvwvGrIPO2Zslh8zqwXGgNfj+4+eCMYrHlNn1ELSJH0XeUZ79C9OGW3laG+4+2dNu5Ic55u/Ma7adha25q2jV1HBqLnenjXasbQxfL2aFWXBTrHqb7YStHSGfOFIDrJDdh77wI/Nkt5dpn38Znt05SvZLVimhO/du64fatvh6WE4yc0/aHJqaBCggolNrjiKAuhA2KiqdWkeKuJn8khMO5tjqOJ3hDSqt4tf60qFAsz8VZ8ddXOk3wCbo3VYLNIgphVT661PvS+KpeEtc9WsUJuHYxW0e+o+yg8iUhcBXv1PEpJ+KwO18EeF5OYLMieGvMcMhZ48XJM2ND2oEG29ZFPIMX3W3zgcUIdBaoHmIdePRso+b7hYMi+C09a6v4S2cACjiowTP1SYblvnRJvqj1Q1AEi3alR60KvsocdH+m6dr8wYzSgerfO5/v08N1krOpipHK0p6ngPSTe61UpZq3RI33UGpOab688SuNmW9+5mRljmUTXDwBYr32O6dukB4aNJL0Ps5EDXDMun5hYpRlx96koquRwFP+zHFNJJiC8vWUdHy4diNcAqcz4M+LP+qv/zQrIPi7uS1tda/pACGzKVgVJtRRSKGtw674yttmFOMmQs2cJiefvUQdCghiYiEaNcvJ3gCK/dK1etT0PwdGpBFtbDx6TH+FqJHPzSqGOyyqeaaLytK2gAiVXW7AbBdVxPFE3mbkocwYAmmon4qRF8acJPIN0bW+bVg4bGswP1lYHAXFf0t2Ot74KDNeNlbo8vFhTltAFD0MazJqeCAwPfAnPWGeHD77DjAWpQD0yhpE8IlQ1LWSSTwXVpmWXLHBewp8z/hjONadbe8nSDcNF6fcvjau9kTTwbiwdWcaKiPi4xMznH0M28SmJTU9Nh4muVJiPIu3WtE9zGDgSqkaAdfrQ6a0Q2DE4XtoM8PUYdJbcT5a0QsODBL8t+lEm0Q2wB7hGa3onZNWZhFlb3ya+PGbD6o7Ql6ESLZM/0MtUQRzk4EWfNfVkB3iFnSTev9bTFc+KBSJAqcl+VJ28yUXFeRVAEEviIC9K6p6h+zfdoE70VrRVWBQx+9qinyi9nYifdfw0r7jSr1l9Bda6fovn2ip+5u0RY67Fvi63gMi5KR6DocZkGR+EIcT8yo41n08GPvXHdHx9ERfpE49nYJb4TLkCUCLS77fyih9mAt6NEbIFmuavzL1aZBebSh3tlvgFoQOF/ArFWhfDxeo3U5IIvCZPg7ItqczDDP8meG+k0q096MV3Uff5J9rcMFskKZDuo10hm6gkOCRxAq1eLkVJ73jIZ1NZLud7MPGDFvwoWclvWykmnn9StOm6hw2cgoOTty17Qu//jA7C01NYQSu5FW2FMYHjVPY3RuF/+QPX9hspysMeL0pF5LdiE5/uhiVk0FH3uO/cWircMWo7SJVlrfokw2f3/rZSR1QJXuGnn9RUApzEmV/E96qmDJIQFpEQtJvpQwa40zpPprbUBmYf60qGW7YxnFtffcy596DUH/U9zby8FkS7DScosE6QkublBZROB5QRkBugRrS1FucgNG8SQqO0QST7UNDIKrke4ZGVvO5T4sukcimmk98RhzKBqII1XiWCorXKvbJ9vcwtBHuXdjdQUsiNo9+uHyGZDlof1HxH9G8GK2LemJSbc4mpemKPzFy1rqpbDCqmH5QCIP/eUQjro6HHpXbhfTATO/iX7czw7X4ZXZwI43wxJBvPufmMAMt/KUgXi6i2KCqxJjvvMlNgF+7DkBl150tw9+DKm+KuypFPq4x8FsNGv5E8DyrYE0NVT0C6MC1nYbscLkYvY7VAhXRmlGF9FEYXU/UXHOSdTGm91XUyr7UJIQBbKd4NBieU1B6cnLN60VzLRrSHNMLj0RFbOx64PthXs0S8jY/D/kuvmrWSZuN1hj7DtfyaQhfBm+8imLNW4p4UDxKtOrCieJG5VLLjYVnUoRnZ5h1rVg3OQmNapjmhXtfyfXknzNjysR7vEh8mGDcOMBtICqltOhP3TH0e8YCNxRXD3eGGPRagBDc0URq4ydLQ3+nNIh1YM3RbrPThagqrmkuDl5colmek1N/aBuhmG2DP2vT0vb51y6wnTSe0x1aTSdKURuWcNa/hestqh0n0TVcJdJfqGA71zPQaTgH06QHEISP/Ul/s9qytnC1r2UKKX1/3sx5owfdnYKT2RGfa4+9oDVGPdENZEIBHZ6Me4w3/NmsqLU78WLRmhxfa3HfwY8/7aptF2ECOpi7fo2zhHmm1fv4ZFssIIBkkAGURW645COk2h1rJA3OPCdsgdxgQllrPx4RRWPhYgfcTIuD7u6/fOoiuTKLUdNuUUsaWDDOc9zNl9erIlvbMmzt5SWf23zJXAjf65gnPjb387c5Vq4z3MKxOsz6mHXuRJ8fUCz1KqfD5jvoBQ+8uozyszZnWqUh0ZPm9bMkEgcSlSExK0i69qLDm5XKDGCaRdKAFJgY8ddVpJ+SOtHZ+EtaZrJfoaGiYuAkI9axLwokTQPLNdURAROJ4Imd5+TsnquNlknDPSAzNjjssyzhBLPqn05wY7OaZyTxA7ajQ8yp19Yom7VeqCQ/cbr8f+6YPFtpa6OnK6Ta8Y4kN+mGXGjXNM5ksa9WaOaSHl1vnp71qQxshYQpfL+07HktRx/L3XToS8CxZ2df8FHi2UKcU+diLKSDu0tpIuJpHB6WLiH4Zzn976tQVZ1iOtrW8dTk3Z7GLvdqbw/uxvuGCiNWgxhqM0Kn00D+YA0xYvir6o+bnMv0ol6xpi3vtgTZMlf8YEIo6eo8iEqz7uEBJeWbLUmSI7v6rpA8EWCBs0U/iDPV0qw7oO2DM3Vvsl8CZisBtPv1GRSOnPtsrWeOmoUS+Id4HHkbEZC+lHAdEvLyxVuyjDtRHVikhfFJaAVer6ket9nLf2yJMGVpzJB4z3EedV3k+QrZ2A8mpOKDrarT8neH+gEC/uq1FVaXoRH4IiOqmPLrfqtIMC3BNsaTkmp72rgBnuEXgBqSVXWbRjnch37yErBxzFm5M/bjF8DDoYLNm9r2D4GCI3bSodHg1rB1rfne9L6jwIxWtyQnjIY3fTlOLisoEwKEieJUbUNAwfq0Pa60L5p6eJlaGbamOnRsKRIzaJ3DHoSRh+hQG/btxjre+wM71jmJ4naU2e/Be+uDcjmJK5iEajRHYey5Xh02WgbMpLP7WlrJOWiw2UTCOa31DD/N+M+FtUI9a9+WvutfixMzDfq/wewSo5qXsTC5zWZ81hH44YoNo/WgYxiECfL0zmUY90OWX42K0A0TQVpTDFSw36iVNNo0cC3HEnO+QJPe323xlEYEmYAOdi6zb1pKPnvUEC4JZ6OjnU9qVP9OTMahnPIxjpTJ9Tw2ut/uObr/6nL8SrsQks72jeIRjYsLnUq6ow4J3OwoYxPYCt9DRW+raQwwyEJzzaRzTfWcdZKr+/I685yUMbM5mRz1Y/sUalj+KQbnJsuAR33D1/qonjJfm2w2J0vTjVUtD99CTfsrOorO/SPiVpRkVFtbfUrWTnbIetC9fNa0UOh32jRhUfIw8infRCz35RdcbKHSXvha/qOU7XuIU6CQV05yuSYMd3JVOzfzOQ/vdBsT3+xG/MdHP6JlFyZws1qk1XQWRnMN8M9GJAKb/ckn6aNBCjno4GGQF5j+3OHvrN15LSNAQ4U2btpVPUGsb0Ho72mcbIOAo4Jx3qqS9KsJeO4l7swWN4lHp+B5yLDjTsXNw95x6jzfO9oukVUlcO1RbfBUxL3Pkls/tsoPlcjhnODUxonT7Qh04doRgzDN+ux5lclgyN4CJvy+ExPpN5SfplCxN+lirv7uDADVrNGCf1Z3bag2+WBIT+ahobpi5U3mjJwZ2LxgM6SdfX22V1Wm2QTpklnwd7LL9KoIgYD81Vr9qBQfgXVCrF6Ii9Vr7NSnkGhPfXSUNAiJrtcTNg/d2wBCOzwxSzUkGSPD2NXepf6PmnRAGOGP5OX/1ZWZ3X+P9ncOfNQHG0HQ8aNhNizKJ63pupNGdE1TxuYvjRdJXUKVdX9ibyXtPQVOPe2CgIVAZnoz+GHUKvK7RRmWPGIFbs9NKyO0zXYIdEh7hx/g1wlqg5tXz5sxpjaqFDy9X3MjXblX8rpjx2eA5bu/FnSgZ2vpGMEeM4rTVj2+MA/FMhEdihtsB9aN+Xf9zb1z1yzE1a7PPcPk9am+e8beROaPRUqvWmau1BKxNzR5X5dhOLpxiL14KCmtoA8oVlB9O1MW2kPLeapsE0WUjc0fe6Ou0zBpbT22LCLgRy1X+fWR+g6tisRYcWJNAzRTFDJrN0ESw+TVrLghTA+Fu+xjFcW6n+uT1bg0trkPzyUO4thmbCDmlwm5CHog4uODqsYpTbpDepE6pkIfUD0SJDWQ7QCiuppglbYE69GZqva6uBIEftbRo/S6zNhmr+yMeP5c3CHJsHOhHOmTvPUy/iKAs+Jrb8uGCQrLbkT788uQVC1d7W0y84FKshSih2eeYbBCNdv3NKmtDi008NjBwsGmJ6vLd+4wyghliR9dXKZKswEAPyqBc0RD1NxN1vsEfLXIL49f7VMcpQ3PH2ZwJB4H1GSX7Y7L0biuBykr0bEgaXX99YSSWA38oASl+x5uPYCicbeHeh1pRGhpXmKKNtjkhhU57jHL6/p2DRTgy4Tsa/CyGsnmoa84K3tGVqCuh4TuiBetEyYsQ+UAKjvrBp7tTEOxns7GZSdIaP1hRUmM+PtaSk1V78qqYCug4fwgID2JHcqi+iMAOpDISUZcZfTu9TER+eaWQ8t1CPMMjRbM6QjReauFBzrD+4A/zmg9kqB2yR5qrdu1N6OA9qaDsDgH+nRW4qPEr0kOobavy02HTbYajANyNEOelr9YrU3mc1CL1d5MHmqEKSCw9hWkODq9tslqLp6H8ldaK1fYzf8w5ootG2u8uk8dRmOPosUcjkUeW+xPz37AYCkKSsLNLatmsFlhALWAIfwgkjcyA/Paw381huC6s2WQHGaAe0svDaoW+H+7L6TuEa7OyyUAmlo7DiM3t4H6O+U6wy4G97QXFMXxvqSHjswrc1AH1Quc+dERl5pZyep/9i0zU18FkTAXjcXCFRUKpj+AAala8El09Cx3W/DRlVbHpv3HK3DB5pZ+5Tbr2pLASi6kob4UvO7DGILf+akVJPTQhfeu2736XMnSVqg0F+SpMiMeDPS4bFGo6G9pv6qKFhQeMbCfe3Ol3Z+ZNQgX2/KiRSZFw7twgtDL8N0r5vRd0/YL8LBwwR3Q9mK/W3LjywStssXmxYDOhR1zRYqGJQ+JuLNm+bPb5BvxxDzytxOD4CnzmxcVFSB8WJ45QsO31MYg3JbQme2XExwruz5zkIGkuSA4U5yUktbGDoxAw4ktUIiZ6MSqRalDdyuztH+2W5R5DHiyLrWlGJ4J7M+JtTKt9QnVAK9d5uMqqOXT954lQJujfghiqQYlpHEJVPc7yQHp4nMu/dGcJtNIaEvRaIxUIqFLPyONsTZGUoH7zv3ZKPTi3qhDgr/HOLXfmI1Ix4K4SyPqdV6akFbl82KVg0L5jvGl2BEUMFA3XoNUeKTLZENGKZCV6IDMdBtCWMLB2HAcPxLbV08gwMuDcm/IIO4ls/QyTfdY/yJwJcxo7i6+qldSivt/viMVwlWZm4Ug49BSD6ZONQJqtOkKyL8Z+fe15UVPkTLd6PpVhLsKKrWqwH3qvhgq1J/FaHnQsajN3IYp+NgReKETlX4FdjAqXcdvcHJx0vz2RQfJ0Z6n6pBsXWCv0Pe/nrzZ6NoodLnDMYVHiY68F9Ispv1wvsAQ8N12T72Ab/BQxTRyuD99afYjkyIstbPWR+ylqwUPNm6cNN5dNqlkhsDBWCvH3YV9pSJWNWiKW0tq3EOyRrdZkxeRtv5B9V5GNEspQg2zQY9+ns3M8WBlIipqtR10t8EgamRhx31WBP3Z6Y1+/c/0uhqSt/InFX01vzNClLH+fia+VfCkxnMDmtv3Z41+e8vZZC/yNEUEFduXhgbz2Luk8M2wy/z6QyYxyEGX6ZXIyQVcqEFhpMxmVGAEO7h507R6ATB6uhW4Ez2Xgb2bSflrmT8ILg7FHRIxQW6/YOrWGzGa+SCLJy+C4o4DHbLBrxLLLhRuD9waJKxhPHurSdLJgqqBBNgaVm6zzA5tuJtSLnT2Wz03/av9K8akCUrhyIZl9gd6u14elHeMY7tLzXT9aH2sgrpmG+eQh+u53YSU0wZZRft3zB4G0rjncKvW6GcPX2SPl0McwDcdZe8WO0jwI0+runv6a5Agf0lS/5bvBhaAY9ZXaNO8x58O0da5b75XlQsSdiKdp9JA7QBhckvnaVL/trOJUmPVGROU7+nlJLK+/X0tMY5H6ANzx89H7LL5Lu2UjfPYEmjPWDzJ9CHiLW+sBHZr5CiP8sYsAxYP9l1zggSb+Vb/JrJQ2kox/UGxCUVcnhZbCzV72mBfPVYrscy+yDs3TpAtia9R5DWQpNLvrFEhlkHiHUIvDGYFPnti8wB2ZX4PIt0uJeK9f4Nf3AlHjbx1UwwZwJgvm0zweXd3+aajoYsT0ozkYnvo2GtkWiAUW6YertnDfZyIi/PF8X+h3Y8qMpbJdYfprJ5NXxS+2bu7A5bj4wQv7CnIxkyBfD6OkChb+KByT0nmxb3P/891A20Fh0YXf7zuoFbCBgI72FTNw7Qm/uiNCDxmJfxM4yEb5ux9IPDAANclvp03azpuu5jN79wXlOYm9fMhwfr4ng3qUM0lZbpqw/hxExDeTXnWe593OU0M+RwTs/VK07KOgCIJqfQnEwmHNu0Mo5CW/cliL2OY/X7vN5F87BSHMzXaKeiuadI1z8zuDLPDbbPn461n8K99l3oVTn1pJcPTuTdODTxGp59bcS/gYzWqzgrC2Lyr/sNQF9sY+KhStuWGrsshGtu1lQo5Z8XWlBieQGX3u1DftYiodAs79EA/WVywUvUk/FS6+jd+D73WkiyA4QPZUtuYffG2Lgb5PpPDhv5JjP39hDNUISpvrhtJJA1Xl70dBdcZlPHB1j9BfHQkTe6WRwgQCKMlSkY+t7qxFvZFtTNxEeNYrQOIFgwXqn2lX37Gyo71XHyKIr+pFiLhSJPidaKAzZRnLLGh808ADdlV+dTJWcx0owpAsObuC/IKtRrusj0JCr1jl9KRsBYZ5QzmOuqYJ/eK8cs+8yQ8+vFaOPKNjs77wdlPjGWKtRvj1bQmygoaxBEnS3nKccIY76Jgu/hRdxefgSMqbPZlAjJEE7oLjGr7CzAPLXRySvbxfX1XiaUxib4bDWZqwVBdNiWPZpZviEyDL5G97vLIMJfTSH4WsSfbrlyj4GjaC3KsvdCBioIaHKUc7ymvFTD19Lkq3ud4b5X1Qw6R1LGygmqv5R9g+T2wP+0DyX3XoVTDRHW76aBZtj7h3boL6T4Yp/bpPQ31Aw367lXe2y/f2Vjh45dJgaQi1+3J9m8YRx6kwrsMJe2DDMPFpzr7AcUZZk1vddT2PF15NVBb0FrwJ6LrsHV0gdO1erxmN3xsHiky+Mc13KyGxfEaDhh43D6XHFA6QhG9AhwJO8TKDkkFtYsw3loHFX6iUDZQ+B0IrsUR36S3kAJyJfJXda7v3BlieqQlbncNGKtKYc0BCNudS+liBqm8qb5bvj7Yh8ZkVFh9GOvZm0aCoyTvrVaLL5DRT3ZfHwvGPcjjL8C1fTYbfCsqyuGo3AfTpGso+Tsvsl3ocX6KmtGSX+yG2cFnynTlTjgPq5wdGotLIWtwyXCjOaTAcGwTLPt0tgGq7DOwbv4SZPD86ZEFL+amxHDuxJLMzQqUPni4KK0cODQJpBpDTVfy0oTKWsy21+MZSTSrhEh4IL/N04xFjRt4vKNM3zeHNTiAlVI6V/pEZfjxTo7Z6iDthxyVwjfD+8CEz0p04PsBm7d+B4dAqknyYpRyyalEi+jjBtJHs53dMrXmsHesEMLY/VDtcO0BNqkTid1sIBvlYzCxl3MeJIB2LEfAE2xE8arOG6JsUV6NjlX/KeYjOwncfx626ZMr4UnewKbpKxGhyMe0ncaxr9EutDJQ+5PLdRL+i7eKEG+fBYQxwWkEoToA9AAiYM5SuZP1LQ8ldbAJYwUBw92vHEi2PLfoQHu5CXz/Zva4F5djlcYLBGJi/qtiPETsKxKbFPZzJ1SPyqZ8jMfPsZ/rA+Q7et3J4BeQl9sDV+eYsGKA5u/9TDg25VLpDAgLMPBQ/JCqd5VIoQg9JfqkkHgA/QyXqEZlYGnkmouiZGgXKIwUKTgKR4r7fDY0CR80MI+RO3tf6IugEntW9gH2R94+M+UtF5mT8wvD7F02ZUlUA+jGj9AJUBBsu2x3+ktkX/mNCl6CR1Je+f/lTECgH2mkkcMpsItgFvMWCtQdVi8Ka++3WgeH4996CyUi8PCwMmgMp1PRPbuug1jrT17/iD+C5goFNSey/rKAAvzOau8qzNtceJCzuCMKhqQ/LSIypV/+I1ywnbxhQC6jUlR0KEPF7jiIRBlSHpHk9hp24THG1ofr4C+DIG43eFzVDP6+jlzTVTlAjDp6R7xTJKTroM8rEhFmpwLUQpGx2llmSA4tRAQhT++J3o4oEVKjEDm7pDWfuRbCkyM/1Wf9qs6JkDhKIlEijQyew1HIU0Okj3YDngHWw+nxwjdFRo/FnCbg3TeXizR3V62FNCjESy4TzP1W0EiU0N9CvX0EZWhB04Bnv1fTPq6qaK1zzK6wpHPnB3BwOOpOYQNIuYE4eLJBBv0xfBlE/is5OVfzXucxwX3dTe04u0OAPI5sOzs+NXBTFv6Hc7+/TtafH0J7Na3aCFgSB/2waoznr1//QhPL/hrLdKWRDl63zQxPQXz/4A8awP39z/dXwt+9Hla7lX4+gkD/byqwqyr89Fyb+bIyWPxuKf3/4L9j/90qA8E82a9u/9eD3GYGq9M/fEKBo4m8QUbtlf7b9G0K0zzuYtHpWHFrWq/3zAjFtA2h/Brv+EbWPYfhv6KMcoOQZajb/n9efT8Vf//+eE/+tYc6i9o+16h7pA6XRUsZDNKfL324DAOMffjr/5xbAhqBff9/639XVfHhW7Z89CFz4Y6nu7M/nwNh4/uNT8mEGywEmmIi68Wnp42X8+87/+Yb/zrdmD/DqsjkCy9oPafZMKPs/eeG/dhKXEeQQ+bNtyeYdBCBA8fVfTcq/L3qfrWm0PlAN6qLlzzf8l4QBMMl/eM1/NTDk7waEgJE87eXaPfzAwT+SfKY3in83QM/3caj69cdPOPNvOODxaFuHP5fg94O/5oFrsxw8CvB+lUQt/VfzOox/zkBS9YUNvnB/YH+tpPXXU7D/JsmB4tDfSQ6EgP5Rcrygf5Qcf/vd/zvBQfyvExztcPyRZmtUtT/ahtqhL8CYqmUdQLf/L+nkX0DpYLaq5C9G+6dE/R+J86ie1fivJcI/o+N/H0cxR3kEMkP83x/i/940j6J/ry0R5B9pHv8nNI/+N9A8DkcwQaIZTLxIDMKiP/4ZEzxmItAC2yP5lv+PLgbx37QYCAb/3WKQ5OsfFoP8J2uB/GvWgvwnAuk/zf9SRiP4WHVRkf39QvzzifxbqxLFWWs867BWA7gaD+s6dEA0gQtMlDTFPGx9yg7tAwLAu9D89+e55fcyehmzZP1rkaO/fcmr89GOf93yfC/XdVx+sgfYOUnaw/+jSoY+r/o0m/9HMgBT8E8d+QbtwKkIZm74Y6v+SIek+QO0/rFk6x+/T8sf8fVHGjXVHP9S+CEA6GrDWuXPmMBIlv8xPpL0X8Oqf5v+v4gDRv+RONB/Amv/1vbfTByv/w2JA/qfEscybc+C/dEOxfBHvD29+rXioATJozSWZJjTPx4TBHn96+gAJv8TGZD/SAbYPyED7F9CBtQ/kMG/o85/naj+C0b8PxfWv+9/dQ3+7+DNvwnqvy3K6x+RI/G/THBj0D8syr/Dnv8/LcrrP0EbEv1XadPn6zwARPvv14Q5Gkv1sR7BHf8H</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml
new file mode 100644
index 0000000..4d9c3ba
--- /dev/null
+++ b/diagrams/netdata-overview.xml
@@ -0,0 +1 @@
+<mxfile modified="2019-07-10T07:30:20.643Z" host="www.draw.io" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36" version="10.9.2" etag="JRR-u-yNVbUJwUu5Enbm" type="device"><diagram id="319b92f5-ce0c-3638-1d72-e7e1d323a6f9" name="Page-1">7V1ZV+rKtv4t92GNsc9DHNVX6lEU7AU71PWyR5qKoPSh//W3KhAkSamgURH32uesBQHS1PxmU7P9g/ea44Oe06mdtX3Z+IOAP/6D9/8gBDFh6h99ZDI7wuMDD726P//S84Gr+lTOD4L50UHdl2Hii/12u9Gvd5IHvXarJb1+4pjT67VHya8F7Ubyqh3nQWYOXHlOI3v0tu73a7OjNgXPxw9l/aEWXxmC+Seu4z099NqD1vx6fxAOoj+zj5tOfK7598Oa47dHS4dw8Q/e67Xb/dmr5nhPNvTaxss2+13phU8X992Trb7hBzeh7JXdR71kCDQcV5Ftfpv6Z41662n2vtbv65Xe1T9EpYd6vzZwd7x2U71pyb7v9J3Eq35PSvVP0wn7sqdeeO1GQ12j3Qv1m5rT64c7/k6nMXiot/QPB/3ZBRe3jcTi2RfPFPYnMTn0inayjzZ/2qHs9eXYhAvHjc8Als5/INtN2e9N1Pfmv8LEnv1uDlfI7PmB0TP1IZwvU22J8nR+zJkD7mFx7udF1ysze6j47RIZ8iJLUO/JWruRIIvbaLtJsrSCHX+JJDNa7OjjKYJg8CpBFGw7+mW9GTFSIfp3N+zMeBGoI078JqiPpTprQVOprjjsVD9epR3W+/V2S33utvt99QTPX9ht1B/0B/12Jz6zepd8dK/RHvg7swUYqMVTT9BXsJivhROGsq+hhxhDGJDoFUW2IApmJewGyPUAs4h6ZUEouSUkdSwuAXEExLaH+U6n9aAuboDcDKkG0GXBtYQd+uXQOZf9Ubv39Aexhvpmwa8P1csH/fKmchUfVedb+sDw3WsldMOXvv1t2ISvYlORRd9frd/USwHVy54M69O5LNDY7LTrrX606rTwh+5rtA76CpGRFtI/cOYQbMig/yIyw47j1VsP1/rNvkXUkUChcK7LII7fz2/MIJnXwdI4iZnJn2VptwQ0BrJAI2BDZFQOqqPdka1QWwEJSKAtFFYMKZOpZEOOXIkjYWXPhJXtetRyXI6QQAGH2HtZWK0LL2GDJL44YFk9SHkWY5j/VIx1Jv1au7WMMa898Gq+m4QYXgtib+InerjCwljcazfavehUsbn4EsQyUM2i2YxCjTwr+ih8EYvPEMQQYAY5tDyGJAMQWQ7leK4tkQstH9tUQMgpC9w5AKOTF5YN4Fa7JfNBpg1TyKSQGpDJDNKPbA8ye+rG2wprPZkEJ9lG+Uexln/YdgQhy/JPUOlaCHk2dhm1OctR/hGQQhnDhn2AsA0owxuCMpOtNao/1Z8PWEG7Zx21r5MQoj9dvr0TZSDSsoAjbvveMsocEFgYIuQT4CDB3fxQBiElCZghEDsWlmBmG7YMNtsQlK0iy2rSaaiHUV9p9+uBorTGiCZBU4ahoqBb7/lJDLLfikE+wyDHFIFlSw9AYFElcDxOA1eKHDFIOUtAkMAsAqGgBkPPzgOCGcfV3HwcOo2BXEbjEv1HtXpfXqlNl34/6jmd5CYvVKrxSS4ReY/vw6KmVVBvNOLjc4vkoef4dbV6y1/fK6k/hl1cvBn01PcVsNX+2AlrGhbRZTuyV1cLIHtXs+3gHB9t/a6vFxfrtxEAo9/odwunn37z0FC4mL9WOKl78Zei54m9j3h+3YrTV9dqzTaUAOYDBp7UejTeZSacXxwatF58cA00bIYl1al1gk4zKX7sbbSiopc2YL6PHbms34DnW9KjQnIpsO/a+cmWWHMtTHVskC2IGOD0yc7UmRtL83fa5eXGB3YrZ4tjvcXBTqcx11/qlGfqlureslfMTZ9O22GGqyx5z8K+0w99vdK9dr/taYNtRZfcP9d7FfU7JbYQuNmv/O/D3rm1uWl27wteWmah19346kC9E8qZBIyYKQK1er+ubGeeLd0gJdvVcd+RduClhDgxCfHXNEAeNh5AKS4AOCtVETBsJSDNRcW+xQYZ0M4Qu0Qv1h204w+smXd0Vysd1hk/fxif5UpBxmlqBfg6D5jYZQm3+wp/rqMwgsClXOK6L8B4+PwECXfz65GQdZFrIxczZkAulbafdiIbkZsLOnFyB8JEVkQjYNiBiE3x85n2uWmKxmpfLe/Q6bXqYc3yHK+Wcp7A18MJ27vt0A+uTAOPKzHkLJsGmBMLkgBBaZMAwxwdLHCx010YBzTrYRbYALxNcbDkYHwGTr2BXCelO+F6UYwtwiGa4dDlyCfLOEQeslwGXY9Bm7uS5eqCgUkc4ji6v4xDw/6Xf4lyNpiA79fXxKSvg47SdAtIrq2y3zRT40NhR+P8vXfZiqPYapEUlLzJ0vVnZ/6W22q2W3XF3UlrZ5X7+Twpk6RnQq68Hrp6w4BZNkdQ1mxRcgR5nsnQ8ZnLKMuHXYlNdjDGRDCglBNNG9bZMDiEhjg4++Q4+JexrsJBvdOsbzz3RhZrp1ePzHntCR79x74vsW+apgkOfj2+9xM4GDK80Vy7TP3rtr5cq92vaTq9CpAldCwI/bVbVdzuRHGVKKqn7tgKZctXn1ittp/e6Lwe48vPOyMYx45hjyuh2uXyL9vjirRbm2cRRw2Ig+jHJkxl9xpyXE+5uSFfa5/xU/zcItpECBtzaS9vIlwvsAgnLvNtHrgezW8TwQDdSdkiLM4DWE7JMwVOflIg9w2E+e2h9NrpjO6tDKZo/aRApvaktrucLGBzHFguRg5mtgAuDnIEmUinpKCsGBOGvCcsfirEtOZKlAs4TYWAJLxeDzRssa/E1giEEENBEj47z0VWIInn+xCiQObqs2M8LeewIV2AGyJ68bGfB8KsnHM6Wd8xWq9MYntwGNHPhpj6hC6rW5tAYTnqdqHvUQf5IMewcjJjBRJiCCtjgyR8R5KCAYSZlJXFE7w/ZyUP7oSpjRUkBjM3p41Vdg3QRqwBSK8B/so1wJuwBoiR71wDsglrQEh6Daj4wjWgG7EGOL0G5CvXgG3EGqAML3zlGnw8lzGPNYDpNUBfuQb2JqyBoOklMNSMfNoSiE1YAkQzMDBUdH3WGsSJfx9Yg7S3mur/TE5HFv3JadPNk6tm21ldSk1lcBCvv2qr5Su+HetQz2yKdcTZisBfpFatmqS4WrRlNH9+fQ+tdq/pNLJ3sXbgi5qe5Z9278FpqS/5GmP6lmrtUG9P1JKCWa2oXvqW/tivN3WlaLsV/u+NB/6O2M8zLXBclfN8aHmD+Uat8xqhHzt+vyhINoSCJHshFMSFC8DniCRT7hnnBon02SlAL+L/Y5mRmSjwzKMQlbivnUKcRuWn3KAc6xoDzcdgfq/q1W7l6B33+5GbNcZuo+p+3dFFCZr+jPPbQ81ToFPvyHDN5ftE/9EzkRP8jPLiZ0N87WuU4yq60TaF1d6hG9/BvyvDy16JF+qtmBeY09TkaLlhJ4moDC8sDsyAAubAmJUPfD0fGZn+n1FdF+cBx/etdquh6et4ngw1q/d1ALyvfZ0Lo2EFJfo1jIVjglhzDrOcTj3FYx9LeNoAnuLCZKSbenvwT87H/hT9clpvDcb5MMHHEoEM/LkJ+Un/NJ36jCW1Ium1vYVVq+9rkrBovzdtSd/cIkKC9TtT/hL+UP5SHvs5O6WzhME3yg37uR+YMmhkwZIiYuFq/20h/l2pghvPiYrtvH5jczhPJwy6L1Th4dcTvT6f3+w45PYav21Jiq6R384cr7zcZe0/bvvZ3NZ0vHZo5rXXm1Z8gZNbfCuvZX2+K4Q/lpNHl3shLDorRBlg6aTQdOZDs+77+oyLfNXm+EG3o92ZdYBFs3/1afXSgB2qrxf1qIVih/IIwb05/nVniJ5erkKr3fdq81tYP1XaUFYcBL7Ia5+AmUgSm2XzDU2us3dsvbOENcV0liSBgeu1s8aa01Gz/ZyUL+4QTEyes2ReMDx43tk/e7neI7DXFJgrLFPUdjIrHXuDVivK+dZk1Bv0nnT8MHqSsK6TvcH84V7tCfA5nTJfZNJVemWSnKwOO5kGIlhWEi444YMNMr/SxDAHduIOEptqY/wz8DtarXqdzVHjr3SuwK9nlK7nNFq0oXi5cUUOeIcQvlP1Lw5uaosJI6KeOwOvjfn80e0qU+VZc2wOxDMtc5dBTj7Uy+JbCsfsdA6Hqe/nlmwlzSXQUTb8Bsv56AYfw01khlQlQYIVPhZY/w5WQJn0Rvh5uTwbyQvPDV02lhtmt7iJzJCpaEiww8fi0t/SFABk2OHzMhw/Yv30ZLPdX9qe6aWVPfWgr/aQW0LFc2oQ6C03xdqLiLQoBQbPuV5RclRPN2CtNxcJX88N7eYZE1Enua8tV05gLrfGFN+XCxH3x48xiEA2mYkgUzLT53jfYhWxAd63x0Gzc9TaXfbBQbrDxZ+5Ew6xmRMu+v6hdPSVKTVQ1uSle6l72sv91gyeOcF9wHk+SGDp1uEAGDZjJu/DooDkI845skLxwbfSHS2crxh9hO6NeufwBa1je9KsdVybEppT9iKEqQ5mwrDp5obGUe/IXvz+OJYXTd56p2P0q3a8s3t8TkOYHzCaOd8dreGpTB9haEphGxIRaC65r1mx8V+w5vOCNRx/X7CGrFCA8x9h30lYOx1y/UrCrlBW9B9h36vcQdqj/oWUje2K/yj7GZSF+Bspu0KngJ9AWXuHYBNxPR/ZRuLGVUNG4jo08dn+0oWlMvTyoXs6YYLzLyQ7+o/s30R2Rr6R7Cv0g/gJZIfg59EdwpRDbrHf+grCb47v7UOEpztIZOn+/S42mnb4m1xsn0bczXGwfVCYbyZxM/1rViXue7p1fn8k0+kseaw2NZip63nmFYGRA24TcsQ3sHdzgpbL3kb6IW/jRvRtxulWOl/ctzkrh7fEY7mpShank0y+VMluiddyU5VspjHWdyrZj+mBjF6+aF+9oTLfaOCeUwJQX1frenqop9yg5J/+Uplu31ikSz+UAJ0DONNFusbQmAGbnxQaY1vijt1URQNT1RqfqWg2o4FwcxJ2G0mmY+vNnvspbdIVZUs28IVLkvOsAx5YtiSe5wnHJzDHNuk2SuWJGwdaI0Nk/UfNVH8DYWF3oJCVRBjaSoTNphpLSrG9PNXYZgRYAcGOh30OApojwiBMW8aM2VmIAcO0hzgdaBsg1pN+PUxB7PU8zp8KMRZBLBAABYkO53oqIfMxCYgfeE6eUwlZutgFQwPCiCG5bJvmiXTaYf+hJ9Mge72HzPZ22tf1ThqHzGU4gUPmMMu3A+C5lIHAy7HTPklFz8w4jM31hNMHbQ8OdYcVbQIP0kh83au3xUjkGokQiICyxIilgKhXROIAC9/DeSpdG6LsiCWDYUcMWhdvkUxUmOq1x5MUENcb5LU9QIz6oCloUOKI5PARpgdXc+I42KYB83MdgpNBIjVtMeJEg4R23iKpWO8EaXm43qivLYIhnMHQhjS5zfW4awkYeJ5re0zAHC3ExXCBhTSkeCdbEC2QAYRbNNSwPey0/tXNHAbhv412qqyMrTcb7KdsR8Rc5sEAJna8gfAsQW0EhYTElSI/sInY2/q84eUGsEHTgMN4+sE2oE1vSBQUkijj603++hEoE1FDkZKPmCdnm1611Z1P0fQdC/iMCl8KFBCY46aXpkVanF3/hh+YbBHEWuqfNMDWc9xtkVqNvMcQexi5aHmbQT1l3VHsOFQGvk9ljmo17m22MO0MZbwQGrRqHFDd1EY6xqo+jQLZ8t9T1rcUrjxbVJvv9rxafZhMyfm8uKa7uPsEu3yomPzlIvGXysq/YoQ1T408ZIYI2aLfYsLh/OP6fZh7nO02nF7z49Wnh7oRgu7WdLaUPJYrJmvzK+DZC6uZutACpR/qhP1yN7GX+o99BUppqsxxw1CaMyQvBjJ6+KJW2W/NUjJB87kLx/6sF0hP+oN4OMSfeMrE7O/oUytUbxpL0vX5DDmjeCRdrZj1PIVSVz1nPe2E5x9qLP1yqsFLyQlfAd/MOCITfE0F/RsOX7PqP7y+rvxJDyF5G7qfBbMEuD6ULbuZ4EpV+1NjSlM+HUm/H1t/MiNywGqHztv9ejDvarSetv8UJV5qJe8nAdLXfe4/UoEznnRsE27Y/DCDXzuf4TOZzDu+OVnAxl42f+JGNroKLtPF5uV0/JcS+NM0Xu5xY8rsy4PkqVxLigzNikyCKd9E4E/aF5YiItb7qVmKfD3H8LZ4VETUk6xEkYOB0jbaq0fmvmPqMcu1bYygQ4TPcvTqiVQ7LGzqhsWFQfN98kCrz4Jcu6MWK/TdJOTsLfQS2wxF2XeQBD50nGUPHUTUopJ62GOcOzLP7DuQAhSJu6YtA0rYBkDBHwmoBIrWyxLeFsFlMz0tUwHNQyLwveWglxtIK/CU4PIhdDnKM9C/GJAdSy5msIaAQXLRXGz2jDUUp8n/VmsoB5raqcxdYlJGphrG+SStjxW1xabWfwT8gPRPOWmooemibdihLLrFb7K5mhD1r3vYtljUR+nWUEKbw0TUj7jCwlw60AuYh50ck2kyzUKwMepnSHPNxz2TlRRsYyRF3h3Avr/ejKX0OjNMB/qsumZ7S4rWwY5p2M23k9bm4PtIuznOqrVIO++5tNEsCwFMqn0Osps+nE9o5iWtv1DgX9VyuScf6mF0r5/XFuZjvWuaE+t5kkFTtgZv3+rnZYM8Lxeef8l6PpSwrD5U376hCSJvhy6hsZ0Dz6U2IiMM46EGGyAM+6P2rTPZTYlEFItEBHfyiRouy9SwPcjLQQ9xsjqe8ixpmYGy73BCZKm4Je1KN1Kn4VS6K43dA19gq8RlAf8R9jP2F/H0oe8g7Ob4CLeybxIESWkMQew5+IS+SVnqruBAjMnhDXqNSaGnfUva2HrDqol8QhHxzTbOroDU2AJ27joyLHupVEIlsYr980maM11Dv2COhFOHGhz4n9DjamGiX52fVV4z3ZcM5Gh9Or16KFe265XlO2r3nr6+rVRqwGIpbDU7SbNbvO7QVAfqnXBZoET+w7eh+wmpNCY0g5IolXIy1lPVVAvJv5xoaJzWK3KpdMnKlc1xN273zKgFVReTCg0OKmHwMy+yrj6kPrLOx5pawUbU2TVF7i2bCJ9tBy6yjSxMejsXtZ31DIZKbOopjpk074xED9q9kdPzX5Po20s2QZPmFo9P8cbmN9fOcMvNIOc06WklCRxlrda0Kly1sWTKhfip2nlp1CZud6IEVkthyVJ3bCnJKKMyLUur7ZSaft07lp+aFoxjx+BgkVCZ+XwVNZ2LPZ9uPggNu7XFpL+PWvSrKONFtuVr2lhZ98OFuS5b/lxh7ruNtvf058Xu0WRmVfYmd1oa7NiExwfuowMoiudHByqyV1ePoxc6khth3+n148u01b3Fx0r1RuP5RpbeLSyDZ/LfztfUtJV4cUzrHt+HxdjUqDh9dU+t6KwIzC86fzydtiDH9f784ebv7hPv0o/1IobURqTnyZj+s2PqcR9kf5lPEJD+g3wVaUs4MsEoPtaTDadfH8rETZigNb9CRcvxJSCjdPFVOt1t9kDznz0jNHsmnGrAz1EqE3O2DJkzKXA4k6WvzTXNK7ecuhCbi/IX7yxdoRPnlq78Axgn9zyz5+yun5l1QblvzAh5qf6gFDbUfj4pr+d9Bn9doojAGuAl6XquR/0omZnNE0UAZZYf2EICDwLJnPwSRShO2u8YoKyyiFufJfJE7Dx0xXfDrzMIa+5A7fr7aQyiX4lBm6FZJzQbYhkkkpUwh5bwAy9wYYB4YOeIwZRNjA3z66BtyH/GaBsw6NdDr633Q0kAbmXnUWRH6BLUQTZZznqmNrKkIJLiQLrQ43l2fExKOGSqB4qbjP7YrlIvSzhFvZ4/6E/S+Fqv6egPwReetc9zsMR4uXzDFg6wKPaEr5RogPwcUy1pqnpjIakS+DI5Yn5SE8cX8dWXDfnQc5ppeK2X7Psj4KUMNN2pTG0pCeSRcvT4HF4AM0s4DlXgkoABmqP4ikdtvqIcuan3ey6jI74dXaN6o95OY2u9LrU/AltKdM1aLnogEC5bNryAbVucK8FFAwowybMpd0p0Lbbuy6ILf12SOASbk0y8UeXR+U0R5TC93zOVgBkjdTnEDRbFZavE+2P+XSK4mQ3z2HytICTy5fZZnannOBImuN3T/aepy4QLpUCOO+f2lRMY9vZKpSisawj5Lj5bwh0ysksOSEtnK/CY9ktII6ai1vUdC1mcwRU8zi9Ljxed/K12S8Z4WgJaRsqsqUfSMqgZeo7cmSHL6dT/bTotBb1mRIs0vQHgYp99EU3j9Mk4Dyx2DL2Rs/4JwasozGTI8tbhQmv+6DrNuxfdxdsj1tbuoGVMH8+Gw3LIb3//U4b1Zqchd7RPPwi0MhlGb5yRDBUB/u9Tr51Z4aTYfE6039H23RcvTDIE+uE5fb34yD97/1Pf3mt3JvMLA/Vb29Tc7NUA68du6zwGoeIpcNTydt64WnqFX7PsU2RLmOXwdbd9joH8hgz6LwvT18L4KKe0FyFSYRjO8A5jjBMbEGbzbCtpY1QfflJh/aKfa0L/rSnhjCU8NXXzGh9yKPUFA+n0B1HeR6hd6MBp+fpv3TS1r25v9lE7+BPlhSTlQVZWmnjfJD9fHGw5O9avRRftyJY1D3lGnNBTe0yrX2/O33ZkL2j3lGbVn8/vO96Ggg9NYt685JW8UJ+esWoYV0KNYyzz2DrAFRLBl/IG5qba0tInovfmyPvLkXpDtH/V9d2QeDpPpYXwOJK8djg9E4POnOqFePqfN8PRBrKvliH+Tkv+JWP6sy18Z6pE485M3vy7JG2+xoynqSFYLObr5a2ZyY5f32NtIChdiaC/wAUwawbvCeqiRCwM2L7FKHSAkEJ6nvNjXQDpoX9EZBUGzmfChgFnK6SAb64LoB0EdU/uaDiFs7+zdEaAo4L9RbRMe3Nw1sQ1BlHXb1FmIOUKbuHcdAAF+r+vI7Gi7VATefbvv4HTa2Zvan+XKIP8i0gNU5sbjrMR8zg3L6EecmHbFZpO/A71EA3HhD7xeHIEF+W2BYAnpM+Ap4j1Y9VDpvUVB1k7xOhPzKF1GYT/hSJmQCMR0BCnjm49uBSK8BG0MGCOaxMScPfnhiJgarScwNkYumnSVy4CDa0Qivhv37p0hdR2E6dHtYHULmT1fWu6fhbQHcohgRBiPY41JVVy3MQuIJt1x5mKaYAeQw4ck9ds2TMFlBCIvGNZB1rkwZu0B/pcrbDvzG5HL0Y0i2P2Xn3e0zichH3ZnImR1ZvtJBrdW/o0g06n3dMSJnvX2qdl+bI/D2ibXX6JM84ccpkTTWWvrY4ryRbUHwY9Z9XTBe1e5NYLIydlp9N4zjGZ+/7iVXhpCTLPWzc+6ZKX8SWyyOFszEozGjIVnymiy5wimsnddlQjB3TtMZgVNIJ+LYoLQCuUagX0p//MS5ZnI1b6em2G9XCghOg0er7/rUg888MEOu3gxeeYralGaSBH0d21B+FsMZuL+VkdGUFsfr97MQpbUvrzS87uZX4Nnfqv2BDsVW7+zB3H+gKhwntDRoTvrV46bnrQ6P7SUxpSjy21YV5/lbUSD/G8BGeF+K4vd88W0Gppsvj18En9c6TOVdaf9BdsOKrVo2eLztkbtF7G4EoP2AjbSwutCfvW4zZd6fuRT9rISXsmaMixMi3C+qs/WmWtE276t/n4n4PKqca40t+F/62zTi8H4F7/6c7OzpyIC+E8I9GMjUc12ZOvSM7Piwe8PwYG7XysqoxDmWetd+MEGJhjD7vZib5qlqXSvg/tdCN8iNZL1d+eWhEcZfMjaPu2n9g56B0rdplDlYSEMM+URUqTVe+QQsPYXhsZzNGf1Cz/rZmqoZ/G4Hrp/NuCQRENEChJCihxXJ3x79F4Ujm1Le4z6EjmOV6eFSUK0akAGIHZDaUwJGW/IxFqYzFYD71//ZrXySDx9cx/jafO6kSIbP2Wuo25jvwz71/xcmYGXwzyXlDHEKeGZPG1hLv6k2ferkygoN6TtXbjVQLFNFFM+SIx1kuV3xaxYDNC5k4tJSES+bUQuxaRTAKX+FRS92WxEPsDVhAMMdLwDgPLf7K9PZSxsGNQThjskMQvUR44NDgiTJGUPBrn7h/uLTcBS6btbKIV/BxNmd8Y/BgEFgPndgBEwubx3wlRZGVtFVNa2DvKKzZDJ4RqTxh9lhRC9i8VQtp/rIQQ8pBHQMI+ptRC3CY2cB0eyBxtE5SuRuSmAo/Pq+n5Dhx6NafXD5dx6HS8QSdjI8/KxbetboxEJa8Ie4QgL5lGAi3KXUptKRzkghxrEhlL21hM2DuGjZhxDF4ulYlZ3RYHddYJ6G2PrIl6h9iKNLaAyWwiaFtOAKDrM+65DsoPBul+z1AY5onFjZkTMwxyaP23kGG/lNwoIjdlyGUokR0QUAtDjwccSKDYP0+uJ2lyZzmeG3KAWB5hVPyOtMGfIsGjlivId6nkCQmOubRc5Evu+K5r81xbrqTcaCLuGrzsvDUI73dMVzDQcrV8rq1kXYGjHhUu8YB67S13eeIUWi5jgeMTzgDJscvToidFXOgN4q4Cy5LaNAY7j2JM/I6Urm0ht+Lu2SYgYCBgiZYkvuJux3FtrLS2sAnOj9w2T247oeAG/6TB/cXyKNSIs2Z+J7lpZI5jCF1BEuQmnFoMCCaB43KAcmwRkilGM5F7ke+WiInkkUhHVkhw2mJ6R3Y3RpIGUCbm+DrIgsKTHAUCIzdHaZ5OiBKGljCmltvvCLsaqL3CBJ3tpTaZRTx9ST03YaoRGFiOEqCuY6vPqMhRd4vUVltQ01bb5EfMRXsT9JsJTqM4AhbUkXJ5Wy1c4Fo2DyRyPGpD6OXYlSc2xZcIvpKxZufC37/ai0KjXGjsCEr8ZNK9ss0D15NK3gfCJ0F+5OYMpp1pgmb52zaEk3keG2vyq/0olM0ILgPqJPwoWr5TgiVwORZKteco0EGqB9Nzw6WEj97gScHrh6c/KRY9qj/Vnw9EffiP2tezkz5DawubACrIRK43THVSCUiqBGIBiSkNJGEuydHCVwDBKSGhtvAGI99YLbNFiU/NSdhtpFG2le0AaeQ2wF7g+La9LJgcjC2AlQoCLoaek6PhwVNjxJWtb3DwmpyCnxTRIb/YT6gQYM8Q4BGPJeSMsvUt6TOfBgC7MCB5NoREWTGTzeplJuMzj5gO+dWeQhrFAbArXOkl0mmp51uQB47APuZOnum0gqYJDpVeMWwvOcqSnOURCyAreAsN42ASRXGpYrlsfZzO8yk5zXpDP+ShbAylJncyIQjGxXGJfiD6zx9DVeI6FXeJQTPz+r7lQTP6V8lBM88zX+ifpZkvr9E8Md0l5qPl8S6xabAhZX22TdK4Sw1lWbWuT+D0mcDnjHfJ3nJyWsufNWevZJmBruBKzdaKrscL76wVjdhjadIS/bM8Z2kJs3AZsTucphkg5uD0qKWVkB2TehnYC7RvCLJ52h2/sGPWRbadzjMH4pOQzT8d2Su4jX/CGGd9Cxs3wzmdiMMMMzg/az43jCPJW5iYwSLnL6EQJJNshACuxVwPCEmlw0iO3n4sSMYCj1sTLLt/TbuwPFrt0He4f38ENQXWNTolDlypI+7LeReKsBbygPQI8zxu55ghZ8MUKW1DgpzBZRPz1MdI+asduyzy0hHEucDJOVYIWb4LJIHUFz7Ic/dsZ3nX4KT7tN0zfUeK3BYRPNo9E9sJApJwmFHpWdAVgEhMEGE5evKJEFmCZznclEcnchHWq7W421aCzzJtAg5dO+EuQQRYASGYcOAIx8lROy/07jO5DZE6Q+CG8zzI/avdoSxKtCFKTWPKkgnu0ApsxJSsFV6A/Bwd4nbKIW4LliE3NI7rziOxiv5qbyjDMwUecA8uy3MRBNByOAugJwMfI5ijN5SDFegdT51M0DsX/v7ViZOMzTZbLgfcSYc7GBXEd0VguzBH85zxdFQVLlJnE/OQDL3hckmEZ786dZLROcVB4CWS6QSl6i9AMEVE2izHzGhO0wrcMKrBNGo2l8xotoIT7GdurkFUj6TUHrIDR0trD8aba9e3fKBe+tIXyM5RO0OYdvwLYWBeaDDHci2wXnOsBjCVz8+6z8XnGsSH/aWS+oGhd1W2zP6TKmyjrh3hIg8DP7chtGa37lu6f3Hdk+ni79mIqBezMzaiF8Ai9PdaL4C1RY1IVUdCkhU1wmA7olwKsQ3S510DNL4luEQQTISXdqCAq8aRol9VZK+uFk0HCFYPm8b9yxPBpXjE2YYEl9KlPSLtKlw5agqSmhCCWDV+Qv9T9otdkd9S8MdSBX8Q2KasDmrwPZNcDNvtrdb9hqAQ5yxNTWGgJjF1HFs/Y9hAzXf4GX8ENb8lKKTuIJNzhU30NOVc5dKSnK3gSNwQSyCTZpJfLolZ3W9YlhRJa+m0dbhyLglN+bYWMwI/Q9+v4LncEIDxL0cY2SyEobSl8G6EwfSZ0j6yPBG2Xtro70IY2yiE8XTfGJD2lq+KMM5TW2pgr5YP9x6EcZNvNuHr8RbIeHbnYMl84Mush+e5f7jvyGZiasHmN0s05UYXmf5P30+778wtN/HqXnsd116q6JmKbB7FYsj7B50nK40a/5S50fGBkXT/bbSXZ826BmffKvNnP+mOl6ZaAHWzf1Ljc1+9xbeb7n+e3zJbQLZY6oSTki/DMeukHOlpxleKQfT7Uc/pJBkylQ8aBAHyPJP+8JmrC05zmsWbsgwX2ZyJbrMGFnlHKGMl/yJfw8njDXqNSaHneE8yYoDXFzjy7yz09srzpxZuIEP6balUQiWRknLkjyG/eDkbWCmqKBs4B/rZKV2GDM3UMDfs6HOUcMrkaPmzMTNLI2deY8eefKir9Z/spAbBl9IM9Uv7TgvNlSXPlTYhMkpKkvOYpoSuxW0hfEiQB524ODgNvxyQxZOqE5sGJMRnTYzlSJfN5F0ovKYmMs6fz+jOBXB3HsN3KFCDqvxYI+xlVfls881a1L5o862pzD92h7MJYK3Jsy7X+F/v1pLMzj+iPbPiPPpjEOdU/7eKyM6fiQjPKlcU+2YSaSG5DBAwKNcVPCo/tH7ma+wlmMr8oLFnPP8amZckoDcItTZbUrbhGsbvK1049qITW/up0z6DR/xSbRyNWJfC9aHtkiVtLCT2LK4xgFzX8xcj1j9BGyOQ9H6soY7X7961kiixV0g6+0+UvN6NJakcSDZL9NPK7eKSov+o916GTG2cv5Z66D/q5VrqSg2jnD9ZjV9VD9QB1/EfdLIbCHqRTl+MPM3VmxUZ6CWno7V8fMWEard/6exBMUseFgK7IArcu0HcVpm6FpbSgZC7rk9f2mgn/d04J6ccS8kW26DsTZPG07Onc9P1K/jk/pM3rzXKj6Pk37dtyFmkvDk6Jy1h1mvitzUShrGoAxtxQeDDREGKhNBiRLoexZwxL8cRGwylZurS55Li5WCtKdFr/bS9tZx3Xxmkacqm53ha+KSg+Eu9yoyDGRShw4JEzqGLgYUFhEQATpVxlGPOYbr3DAVZS8uUP/qOnvJrAFEt2rKxpQCBYbTc2qk5aQ96GhJer97pf6YpJscdpaBkBF+lJBvplpT26x7RLQYqioDqI+k7iSJ8bFOLOT63XUl87nmvW2XqLcrYaHl5ZHCcMh8baTjbXtsUeHtHu7vVbLRfXebLZ2W+PglSXS6V5WVR3wUcS46kyLHj9qLbxnO/7awLgBqKAN9Rxf9FCrUmnYa6c/WVdr8eKKpqPOjllk2nnpFO67mDl/YHZsjkAcYV0sXzhd2s9jRQbO0nNKovmcWUhFJa1XGcRb3icwoCSO+THhrqKuk908tZherG6l58nnUyDJe2P/N0RkNew95eSf0xSNaXsrc+JXjGkvUKyNAUhRrKMukW2bA9x3Xr/WY3xXsCrMV7WyTnZ2UzgbKNEsXeAmGh3jrARtRxBMqxCCozKAcLQ4qUqTsPzSVF6rtlf2cQ1tpD/Z0kBF9PdtteCIJI5gMOsPSTcxaFbyHGAwaxhDTPSbmMJndRCBgcgqZpzOiTgn/xONZ1jM2tNABIBAbEqeMGie5RPoIWBsxxbUIC7sbenZWzDhcaeEXtjIzu1RywB1EyVGIbIs8mX/Q7Ssy+Q+W22r5cVrhBr92qD9LxkVkv3pWF3VdXLWrAWtFH4YsQhjp3BWhXR0nRkAGOgEU9WyIObYvxuNNGNOiZU8EZl8JTJliOviCcGVHEeNYPHgc3E2BCP0iTvoqvsF+X6sKyoQRBK42yzR50sz7KqKAUW8KFlApEFMpcP/Y4KtXp6v1TwIDH85wvzUnKXOMQmmbKmwZZviPcsiEwy+4cOu2R7PmtjCT78c7vVWD4rKYxsLmwMbCIzZTNSmzLwUDMUcgCz7IhEdzBxMYkxx5wNk339IS6x5wBh6beLT9X3GVx6NWUoJukUfjjPdtro1ApViQsHAAQ+DbTKHTioZ82V6ZiIAnEXHo4R1kIYbqYlhjqQ2AcLvyxgcAX965BVBWbwp79y7CHMKVcCT4Kke87jFgewHMJaEPILdfD0AFeQIHMEXs41QUTQ0NXtXgq7I/1mrwh/FzptMK+08hg8MenUq+LQQIpo8Ry7EA4PvYsDxE618IABZYExPUDwBiUOe44FuNO39DBxKCDyfr71w0UgE5DLZaTRB8CP957vCb6CMUYEmIJ6QVIqA1uABadJZEtFRgVNl0g1Pdz7BJKUFICIkOTUAgMAQyUSwVdxmuH4ga0PzxE/A7iE4gsjwqJPBQRH82J70hs2dLFujuwDVCOzbZskm7OZDNTcyZT2CCHpv4IrOCh/dbOJh+c87XUGUUP6Ui0eQSA/flIB0fTeLAF82xIOxSbwx21y+TKhJr9nUCbHsi4+ET/nTz96jPx6M7yNdJdpJgw3UPuU8VSDp24tdA7p4ptRoJso+72ZDhyUj5ABH58Jv7a4lmnHCrLEPtC6WDPCqCNYl80QJbv+NSzbZf4jp2jeGZ2xhdtyrIHhqBaesLkz7QM6z0vDb2fH+TAMNpc6ZRrzLignCPLVoYdk4G09JhrDSzbsn3mW1xAKTW0IM9R77N0toBpdJPJ9fyp+a6fKdsMLj/ZqaWx9ePT/9cUa1wABgCxuAIfUgaoheHM7WxbwqfUAshHrg+Y7wQ5zl5HHKTFGmVZAJryUsUnO/ySPSdmluBzVwn4ao+OFfrhU1OPila/47/RzeOret9nuWR2c0kueT04k23ilrTA50a8IcVv9borU5uNZNZiDjAVNs1ERwzDRk0j7t7RiXhj5WRL/TP+V70N0zj4be5BJS254mFLUImUTgZaWs51tRA8sIjPpSccLgKep3sQp6MjhgxT46Am8sna+qtaBmqieTWpYySb2jRwPiPkT9xsqCeb7f5SB8F2r7/mjX4liy8tcILD4esu2E1sFYjSk0i+uFXg93OLxtpP5Zbrvcrmc8vSAie55ec11qQMfCu3ZD3Ccd7m7/L+a8uCC2F5gU0cjpi2LGRsWTBiAcolUKKN01xHfoL01CZDgxHbYN6SHCY4o3hW2cb6/j86mCEOHoAdJJYDCHAHoPh9brObFoyzKZ5/KHaIwIJAxPTIj1QV4rsb73Ob7OiqVnViyBnhqfOqHdoOB9S2599J7cPya5mOoGmuSB7j8TIK3Y36A6ztIdjKLTlN05sC03CgfJqlZ4ken2RjZVZ+8UqUEXorySZjVDLmlU2RTfnNlcso0BTK8ooupv31AP+46GLWkO73HBnU02Y0et2M3krjz0bEtkiAbV8XtmAIwTwEJANmUezoAi0PMS/HglEI07b/YjOw7IM3RBYXLSG3wbtZ96SnvprG4G+Lb2sMckIsV0DGXF9qDML5BoQDZvkO8rgHPUrdPLNvaSa+TUkWhNSwBaHrb0E2FoNhRw8XdtvtDAzXi3VvBQwFIcISnue4DotgiOcwtF1ucce3ESCu6wU5tn8iGUkYN55d9oLECW/5p98a7Mx3DKH88dTXc2EV9ZHFHNt3lJZRUofw2Aui9CJhtqRC2pKDHDvk2DQZX0EgnkX5Vg0AW18GbYbAUeZzqOgcSqfnpXMg0C8reopQR4Fupso8m85QZ89RRymzgCddZYbZDAV5DkYV6ZZzPCtzFnIg/55zBpmzHa253kN9TC2CAHOYHVF/ngFjA9e1uMC+juna2M/R8woBxGnLJ25fu0R/YejLZa+vcr5I6Lh6wlUrPZBhHlH4bQKFESvgPgzmAiVOqsLMsZgPgkBgJHyZI6REqqIcM4MNwwxajPyk3dwzxkqdnr7PmszkpeBf5kCYAY4jS+2niB/MZJgzBxxE0CLCxwF0OAk8mSPgUkNmCM82FjT6D9Leto0G3Ms1a6MwTPctmJfE/zLoMQQsxAABZAY9d64+HQ9bHnSZg9R3YJ5NLSlMh60NUWtTEOBHNd17EXq9th4l6dWctL8A/zK31Qx+nFjS44EnQAJ+xPOtICA8kNRmgU3yLJlESfiR1ZKX39FSdWN9Vo7vdPrS+7fnKPSlUPjLvFYRCoX2U3FX+yY0CunceWrbyNbFQYHgLkaOyLV2MwlC+pyunEjeMWjgd3Q921gguu16K10jhH9ZHccMgVT3zFDScLaLjREoJA8sHwWAIOoBT+QoB3m2Ok0YNh3AVD38kxTxGwj0tTruZaTgj29h9a5mkwLr/X7JJR5Qr3XnUY/Ny9i5gqTLWOD4Gq8kx1imyJSzsawzhZnswfWL2DcWhjXf78tmJw3D3+fOhQAQbGmHpgtmypjMRaEn1Y7E4RKqDQP3ghy7aKC0SUgNdbqmGbpkiwRho96Xip7pUTZzc+O3YZAr6AXQBZ69jEFb3aTlU+Bi6QnukxxDCgSlLUJoSuY2hBRit+A2gHAk3X8b7YckBCH68Y6Z97Z+jkaOIOZ46oPEYC+bWtBDgStc38Esz45Cz3O8FhtkKAx7E9ugkXkuvcDfqsLJZNB+LGXXb4V+Pey/mI5rytz9Ugs1vr+EWCavB2a2JmkYglS+J8Q2MsHRtFP+5DSjD8IRGgvKFawGY71qurmuPs2g02lMNhad0e3+G93sv/GtJoH6uvDeGqAKjjKC07ChjtNTEhr8kz06H8WpbSwD9MO+s7lSM769JBZfd25uDRYxSo41goxkt9Q/UGKaFXhTPjheo765UIzvL4nF192cW4NFlO5SSLHJ1f0daPxSEOiq6TQEfryX8R1ba4hsC1HXx3NPN4sT1TmzbJcwDHQjVZhnrgPKdizi2UmaxLC3/lEdet+qfO+1x5Ow20iD8Df6GJUQslwMgHYyLoHQltKzOPYC3w2Q79Ecwy00NcsVYkPKKGSGnMF0z8sfjUG1Q5AZMfgbPYy6A6UACM3TVukiVR5Qizrch9KDAQryzLvJdsQihkiLcWoN2aLkh57s19SlfDed/0V/X66rLuXnFoEe9PkMh3HyfED0HCWK9Uxj2/d5joHn1MxViA2li/NBVSkUblHAT0HES+9P+Y/PfX2nn1t3fS7ZUHgEucujtW3PDyxPCASlzyFyc0Qh5ikUEpHdlMSNL37s1Jq3ahedpuukpeCPB+F7pCATlocZw7OMfxoXkSDqW8h2GHYdTDyRY8a/nfbQ4GxzSmPvptc6JmysiyZsOr2+v7EemtntZSOP82z37XfSKOhntsgiC8htd9KEnfpDu99MZyTS35eZrUSiLSzEbTuYZWbTeU2KLXxgAQKYJ6XwZZBrIlhaJkLTPJPXestsqFA0R1AGLVeTf2Ol4uL+kszwS2IoENBU4JnALQk8m3W00n71DUZjfHtJ19HrknlrwChYZq41AVkX9rbr58EofEjH0ehv9F9jZFtqlwyRM/PazMumBLFtS5cqeRK7ngNBjvtlO40/appzyD9t1twXK+cRDGUr1G2ON1QePt9gkh1eHz27PRIxpZwZM8Fx2wXiQ/tfOe4MnTQK2O9z4nCouwxRR1BPaqHouotOICKwOLUJ4h6zPSfHHQsVmVRZYxkfNLVh/pwJnMUyqRfPx7vuU+H6nt9Uny7OrqxFjviPaw8TsQXenb1VHKAoh/bq1UL5cgRODh7au+rP+dVNrXjzoF5Viuqv/fHe7r0+7kxg8Vi/2LsrHN3enalX4ZX667T4cHiF77itf1soNooX1UuyK7EyLgvwzrsquId2ta/IUzpVR0TRKxR3j/gufqg9HHV3/ZG7eznYk3t/r45HZXLcP3osFMPCfuFi9/7iQoJOsBvePfU67qM4fbxS0qFUKd/hA2I753eKTRXsCp3qSYeeTvFuuDcI+P5w9zw8Orvd9Y4unLMj+2rXu7+4PzsiF7v2/eV9uEsuHkb3l+ShQEYP43slXgvt0eP4qDCuFR5GjzX1wLVSfbRf29sv1A7ru4ePe/ulh8O94sHjXkU96kHxYH+vXNytHBRvT3fLxaOzg6I82pVnuV4T16EoTDvjg/EBPTlFBVIqFQ87p3eP94+eDEoV9xh3vHPO/97au57n1e1W0NmX02n36FDWhvYVaxX7lfB6cmbX7o6Gx4fsAt/4l2CPe9UhOjjdP7Iv1FLq/48LTTIdXin0Fur1W+9y7+ZAvbQHRwE6PN1vH5yfXp5eUFmqsl2g1MTYPcfsYDi6b/FpwQ0PafX+5h5yG8IibFxrMdLYOy75t+R87DtNP2CX962z7tnFcbd1OLzoajEzaQ80JwYNtt/unLROdh2hrkiPLg9ZsTsq3dC7SXu8W2XeOe7s3gfqM3zQrw6bZ64Pzhqn/UJ3uMfEgz8uj552L4/V58w79cPzxt3DYfSu4DxO7k9HoxvnXpwMC8Oq3H8MLvRF4U3jHp1PeuPDxvXj6A5j0vBOb9VvgnL37JrfXKID/6ZUctv0JNjdr8rClXc09XePr3noDdqn5WIlYKe3g0fQ8R7GZ3eXtZpoDid706cBOVJnaU8hLVba49O7EW459tnpBRyBx8GIPI7379q359q8mVypv9vq/8ej0X7lftTsn0HmonEFjEJemdo8rA4bnFz69cPi/l1hUDnujILW9OyuxKtOkeARb43L52zK7+Rj8TF4Gh9Vm/hvs1P7yyqP6rSXt43xYb3Weaz7DcWWj9OpX7u7aB3QI0p6zG12a0qiFs6un2bMU725b7jtQrdSb3a8i73q4OG2bXcbe+fXY6lsn1L9tjjYh+zKDvZl5b44lU9lWXh0uxV+7MtyoFbNvyt3dgu7k8qAjm6OJNs9uaWHl3/PHfB3OqjvtlqXWtC6j0TWii3/4PySV0ndhntXh2Xa7kwJLEZ9HMb3e4dFWS1XLk9PydWepklR/RWtQK+Ex5eFY48Wu9Oi6ITnykgslR/6F53J5XWvwsrHYTUclAi7HOwWy8GhdzaunQbMbhYKrcvHG4nLx63R5Fqdr+TsOkqkFvynk6sB2+P4uiIPx6d2ZUz68Ar2bk4arpKJpWq5XDitKD1TuH869YpTgP+Gp/T+asqGxw8XgvS7j+fkktS4snULBe/s4XIfP6Lu2Z1i31bj4Bxc+3uV3UnXc+3biu/dQvJ0OAor9+rbvQpg9lVR7hXOyuf6MTv38p4OL3ebh/qpNUi86Z22Xpy9ysGovB+U9eOWptfjSn23enAucXE6wtVurbBbUdf0lFItnARVMDkqgHs4bfLxkfMXy4dr5wbjw85h16/c9cbXZ+JafbE2BNfdcHwjxkgL08fp/dVhsY1O9aUlQ7DhPRTAkV2sO54S2YXpXyWI7PrRzcUjoV1cIIVq+5Jclq4frx+dg8YU9GTXPxwPzoPbbumu/FRsHNZa1UHh5mC6d9UeFyqF0qmEtWtWVI9ewpPb6aRWfLJPoHMrT0andqlTJRfdx4MCuTskdHRFerI9Pj/fO/fp8Pp+/1RJjuuL6Z1kvnN6p8306+JJ/6LuyEOnMe5XgyY+Oru8buucl9JtMLk5G+lvwU7/4ObosCl2L6jnHP5VD3JQ6SBwePm4z3aLRfvmRkw0v+5LVuj0rgG/A57milG3XPZ2C+KgvidPxlpnHXUuZWRCFU4amNX3i4C7N85wXCqro7WL3cmBDI+qt/cjhZICdIsRblv3vSMtWp3J4OGKFK5643G5QYk/aQ12kX97al+TQ0fJOPdMfeugFD6K9un1lX97BS/ru+jeJsEE3j14zVEBXtTv0f3tdVUr08fBVfO8+KRgNS5el7vjnu8EF43RceOyfkHGdwR4/uhM2TRUH8CXznDSCr3ol8X+4Vgt7qE6//1ofO7dDYa2uj/kKGIdHD8eP/KSK+sj9dWnfrHS6o07o0tyULqsP0z+3j7qBYDs/sI9rd2eEwGuqqLLbo6QPyjtnQ9HjJxoecbGl6XgfNpFB5PyY/GhcuDdkE/Rjbnr4+PR9dPtuSjcBTJSJp5SU0rya6U0nZ7RSmUSCSK9DPbIfjx5OqiLCj0e8dLfYqVzrQ0dGZxKTO+0ZHk48Tx0QK9x9TisKd4snQ0ORdW7UdZtZ19DCp6PNTOE/Qv97rohXM6x2jPealV33Zpq+VxlPZ3OUJhyerdvDy6Gtfbwmh/33XpbixCN3epu76j6VL05Cvxqaz/AV3/vW6ey05oKKLVEnVyjqqieXu67x3+puNViZTKAvL+nm4QVPP140fOq/2nlDzViw/N+eailzVn/dujjmrauJkf100rb7d7gLr6l1VCz2nm/ctQ6GV444Go4uK2MHvktpNWp2z07JIzq35PrU3ooixUoGlrXQbUcYugcDJ+Ohk2uVSCcdCfwlnSDp6EOC5Y6waA7Lt0NryWowkbzTiDn6O/NEBGf9nsXQe+qOxhfawWKu4NeSzPDDUQHhRGskEn3xrl+OsBVuzrUNJr025cwmF6f3J+rU1679f69UjgK3aX96dHVCeM4kJp9K5fyAveql7VAFwZedx8n4PboBl9daIvgrlwYNxA77El2bYvjCq/uh60q7Wi9eXLtIXZ8gM/GF115Jqb0pq3lJ5tqoBzsnx6hI1kuDJ6elAxRJlGhP3pU8rxw+bfsjTk833cHD6JH5fi8XrmWJwfY6Vdunfuwuve3f/1E/eP9Sl8ee2zsn5Qr+ofnDO6djfcrgWZmpA2Zv2cV8nfqn+KuV9rvlHtaQE30X+zM89v3zdKDHD519u9OHq87t7f33erxtQBOdXJ4uleDjuO79RYa9mvw5AhWtVG2f3MylQ3ISrz8eHHPawPZnV45YZUqst+Wwr1T0VG6/bx+dWKfQjrhNVh3z4fF/WGdPj1MmCzUO8gfVv/u++Vy6+SoUZWVE+7sFYL+U8WvdkqNq4PbOxEpKuiKYjh2KgcPA32/jwe1C9bvNod7drXu+hf+3eW9f3sjYXe4Xynaj5UjZwx3vaK6v1rr6qRfuC7ASghOu/2D3j0ZlTWM64+ef1u7RUpY1YeX+EyCcgiaf2ELnV+eXaIhdPcOj51OeFFzzwZPTdEBlV7jL78aP3SFPw29/dMSkqcj1KU+2NecUiAP+3bvENQG0y44CS+65+deudd4uD+B4WPdGe5Nodd97KBB6bhXvW3faGXtaVDjZqRrmtUubNpl93Rc2hPDg3IgQ4pGFDqtvVN4d1R2g6rTUyS/uR0MxnjQOizXtDYmbf9wvz/jyWMXjp9w96Gidomo0D0Dwd31kISdyfS0L58g5IfX+1Myxpc1eDf0taVmT8nTWNtsBds/PD5vTbo6N7SkydvssruwSVgHn1a74dWEiSHFe5oj9jzg26fH4qmGNe9MemI/OL9W0gy1n9i0dX14PZheDLpi6uIyKTdO8F6tMe0Wyz3nZKD1Krv6e6JBeYrqh7f7l+HB7t9xL8Tt6eVA1JrjQRPctDrtgye11fxbvau6juKVCjm/8U5bZ7fl3mXXvfD6rb7PZk/90EX3bIiephWPMX+/6TnO2fTauZ4cDRRxvCcMBrLoe8NSax/S5plzPKgJZ6B48+/Iv20Xw8Hwuh2M9KahVMWDznW53WpeXlSmd21/l9Z2K8P+3lmldVc+D1pHWvjhy2FnXChAX1OvNzhooP7jsM26xwPYPjw4pQfysn8zOH28u967fxSPe6w17d40a3u1fnl0yG6CbuiDU3x3dfCgqQRKnttFx71O4/YIXnXKe+PKoHLBWxN2GzyUqlV/XIPlG2XF4p5ri95p+Yjfe7vBnQPLjP4NHgsjbXHeugf7StQe9asNoYe/FUJy3Zs0mg4M3E5FENxxZQ9Ou3WkbZ1rZ6j31SMUlqrOEQoRdsvOo0bkyZ1alL8dBa/94Vm1wg5OvMPb0djpXkrQvTm6Dp+GhDwe/+WywNx7CIannStZPS8+hq1HaD+J9vno6d4fVGsDoi5ww8/gYXMaDMioA8rDE0HuKv3yFTz3j8dNqQyZQvlGsezhTfks8OSUnJcnuFxrkCIrsLvyUUXzhdMGj61O70mcXE/9p3rz7LA7PXSP7HG7yh/1+p3c3wwPTu7DMvZG43Yd3hWnJyfn4MY7uBwETyc38Kn9SMvtEB+hKi81KjXcPGfdB3zBIuaT3adRoXz5OA6loAedvaESDM2mrB8MhlqpUk0jl7r1xtO4dnxY5PSQPdz2J0RnbRUK7dug5u/5XrF1Xa5dhceYX3V8FuLj24tj76S0f4Lk4V/YLZ0eXo7oLTu/PNCcxcp00nJrRZcCJaK6w861N534NarA8ngRVC8PzrWY69pPt4fw9kr2L08Pe02sjKZjst85dknv/vyekvvBMZqcgwmW9Xu7pb5+3ejVT6o2x8XAa8ECPuOaW0/BPaj3cLn/+NBpi5Y7vLtoV29vcO1ijC73W/x8twsei05v6pc6RzaaBi6ZTImsCsUuxT1RkWSqrYzScaVxAjRstLjSdn65HE41lYfa/Sca15Nu/9h/PKpAuzP15O50t+vb3u3DUXEzLLb3XPO0cHZZIZXb+4fQ+XuK3e6RdmbtXt1Uy5cndO/+6Eh722JHZsZrafBtvpyNlmo/m00LXyTfJiI778vI7elhCc+fHfScTu2s7WsXevH/AQ==</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/netdata-proxies-example.xml b/diagrams/netdata-proxies-example.xml
new file mode 100644
index 0000000..956bdaf
--- /dev/null
+++ b/diagrams/netdata-proxies-example.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36" version="6.2.4" editor="www.draw.io" type="github"><diagram name="Page-1">7V1dc6M4Fv01rtp9sAskPh+7k83Mw3RVV/XW7uy8pBSj2JpgxAicOPPrVwKBEYIY20AH2/3QMQIkOPege3R1ETN4t9n9wlC8/kYDHM6AEexm8H4GALANh/8RJe95ie/YecGKkSAvMvcFP8jfWBYasnRLApwoB6aUhimJ1cIljSK8TJUyxBh9Uw97pqHaaoxWWCv4sUShXvpfEqTrvNSzjX35r5is1kXLpiH3PKHly4rRbSTbmwH4nP3Ld29QUZc8PlmjgL5ViuC/ZvCOUZrmvza7OxwKbAvYivPS9+JaZ/DrOt2EfMPkP7PdDy0nm11O5nfHcJRWm2urz3Zlja8o3BY11psob9HgLQQoWeNAblRaDtETDr/ThKSERrxsya8AM77jFbOUcMP8VjvgiaYp3VQO+BKSldiR0piX8kZj0f5mtxIMXbxijDYLGCxeN48hiba7xrst7oZXiXeVInn3v2C6wSl754fIvXPHtvJzJNEdTxL9bU8bSz4M6wpjHGlHJIm6Kqvew81/SMTb0AfXjr7hKei7xpjowytH33aAij4YE33r2tE3HRV9a0z07StH33INFX1nRPR9XwM7kxxNAJfAFxvfUcoRjrISYIjSJEUs/SJUkzBAiJKELIviBxIWNeEoKA6KaITzErlfNPonTtN3KeXQNqW8iLJ0TVc04makwjyyHn6/v4uTFg60ioL/ZQXQhEXBd8wIh0aQIase70j6e+V3foInt+pHt9qY39QKp7UuHAeKHNTNznCIUvKqqsMmG8pTv1PC293TBbhqV+kAX60joVu2xPK0GhXK6+jEDsfQJWIrOyq2sIFXs4Xrt9niE5JqCHZYl8gOfQzQzo6jQK0+16atcgk49nS41JEd4BLZobvxAdhxRK8/WXaYl8gOeJJncbgQUnsDz5qOvQfxLPYFsgO6kx8TaAOABlO2jwlgISeLMYE/3pjAgd61ow9cBX3PHBN9/8rR53Cr6MMR0beMa0e/QLtA3x4T/enPAZyHvun5KvrumOjrmkyoih9yU0qfJtm+sCuKpthT1zMji60kZfQF39GQsuxeoJH9K/cUk3Hi2GdefXGkbPSZRqlsxSw1Z36zLiyjGbmCs0y3TYQ2irODqk4KoAFEnFUTcYXNBxBxljUonaqjAqNmEO4z2gwybRaeSifvAuhk/zQ6QXijk0In/wLo5IxGJ9+50ekjOlnGBdDJHYtO7t633ejUSKehAmBj0kkPwdx/+0Ofb+FDjVTlFcMJ+Rs9ZQcI5GNxOdkF2l9n9j0vERZMJMxiUw50Qvycto9/YrQk0erfmannIuTKaIrk2AmYNeNZPY2GbF+NBDRFwYo8uupoCPQyGtLjMNwE+nj0sk0g5lOUAemYJrA7BGPaIgD1ETt6S8AiwcstI+n7I4qCRxJwEPjGIiAML1PK3h8TzF7JEmtdmeyg+gAUeGqemd2Qa+M3AOo7fQAKGgB1QkG6gLwqwDp/bUXi6FdB77kk6Bd+RMbRci//tZJ/s1qe6gWCk431ih3z/BkQ1ZpWvNOrjXAaoBRlFVCGxe1HwUwk4bI3xMSvmNHde9EYv/+8PfUaeLF2Xbwsu2O1tCsILLPLB5fL8IokueHNQZtkGQobvKFZY1kVRoK4lxuy1TUR9tg3CCzBVu7+kkGbRSFimyS3P/8/94iSEnnf3Nz4cD12fqHdu+xBvCRU84Zt29R6FC7FGrqUXvro6aeunge/q84VuUXO1BgRW3v6mavnyhMVfDAm+NNPXD0LfFOdKHKtMbF3rht7p8Z7Z0zsrzw5wzRqPb43JviehvUtX7vTlJsSIyr67u5Wt1SRVc5udQ8gQcP+uIoe40f2EWn9V524fYAmzrE08Q7Y+HPRxDkmv/+aM7gP0MQ6XrhOiiajJPpfQCr3AZrAo2lieZPiiS4Eb0ndJ3gd92hJCt1J8eTKY0bAUMWkZ444gnCuPGZUTleV6cVjgn/lMaNyGqvMLh4T/CsPGsFaAoPnjgn+lUeNYG3e1/PHBF+PGt0yu2eDZHYrSs45WvEDWFP8RcDvCCVXe4NDq6JPJafHmW453sOkvanEOjriANxJEcvVI1O3bO8xiHV0YBwCa1LE0mNZt7zvMYh1dCj9ICs+GbF0PXvLAB+DWEdHy6ZGLD1adv/tD/1dzctORDYPh8wGy0NuWCCQW0B/vfGiLQAc1QL+qBboEDebWCY4qL3qbTfk8AyWCO42hcJuieBHJyo3g9CYBw6GbLExDZyhDf//HyFNBPK8cwDGkvGH5J9DXkk9Ndyb9ZoZ3tyonhgOS4rkPfXYieFH9t8D9Nf1gGO5jtgoaeHuyRHHihbmUtdvkcOfVNda3XRth1nsFunb8PJjLjUL3N0GiWyf+4JtZ7N7p4ZtzGsw+34Mp47fQJkQcUK6Q59EyWXWGURpyy1Qp1zteheTX0Q/o6UO80wTU4pzR4XP9CytJzeBp/fkJuxDKxbLcZ2hFeUc3fhqEbHlmigvw42jC9tueH9lG5RkR1Qk4pANB0+lOkKZeSMqvooyaJuNonSDYiFKk7fsb0hecE9ytO0q6oJU9JsbToL1+IDUZOpo7W5jTjqcjZIyyvEujQptPGSb5aPNDj1Zw4lwLTXgZ7yeWct4gcDRZbjRIMNNqw8d7o22ahKsR6UNAcukotIdZdyJissDg4grsAC+ZZquY7kGF1am+maY6ZmLfJfnAWgDv7ZmSY/KywM9Ky/ErfjOn9tkIXzmY0xiHJKoVWlVLbf/VpZuPq136OMpr60YWyziWnnInYZnvJgzOu8Rb8qmrekq8S0xnIVDUv4gzrlmJcLpGALYJ5RUBZKmhBqcA+85o6KMxhyjROiLuwOerXpWB+fT3uTen2FuNWEGEceQbu3YFi86BqT6noalXMvP5FVpaffieUZbEUt9vegyfc6BQAA4wy05QwQCutOkQ1LuiV7jry1ZviRyhY5P5zJg7SWActHxAz7D6WVM37TE+3g+IzMXSXGrzzCImK34KI5/kjPRHYfZ7Diux0PUPjQAoc5C4A3lI4r13a4rL/noaHFHDzegj/AbfMTZswrN45l5fWERu55B0OeYpcPHFk70Phu0XPPRymOIEYv4c/wZfZD2UZcymXOMgUsxC/CTnFDMxDWv8TY57IZa1xnryw3ZP9MNfYpVzGpf/YBQZyJsipP144j02EWKs/QJPc+ueO7JJvvyd9UCzQAefDEpe6Xpa/m576beIGvsSxLnXygX1kXFxjPZie7pq7ye+3Waik+bfxF3Dx6WQWQsCCfWM4kCzBZL3iInVjYJ8SDKk+Lvmi5f5m/8KPqWuPNiVzax9CAuLF7EWS92coeVHSBxLJLUHtCGhMLk/8Es4J1nP2yqpVaZDUtTNU+YgQ8yQbuTSQ/ISDLpcmdyZAIHyRRwOAjvM15K9sSYxiGeDn3mllcL53l6PG9IAunZjpJAujqZHIHMgwS6Y+9cood3QjnlHNrlPEJxLPb/Rlckmn/j5l7xKnpgVQOLVLKdTymrFpeHvj4N1Buj+CajYjZuL4uFQv3GRY044v8=</diagram></mxfile> \ No newline at end of file
diff --git a/diagrams/registry.puml b/diagrams/registry.puml
new file mode 100644
index 0000000..51a337f
--- /dev/null
+++ b/diagrams/registry.puml
@@ -0,0 +1,40 @@
+@startuml
+!include config.puml
+
+title netdata registry operation
+actor "web browser" as user
+participant "netdata 1" as n1
+participant "registry 1" as r1
+autonumber "<b>0."
+
+== standard dashboard communication ==
+
+user ->n1 : \
+ hi, give me the dashboard
+
+n1 --> user : \
+ welcome, here it is...
+
+... a few seconds later ...
+
+== registry related communication ==
+
+user -> n1 : \
+ now give me registry information
+
+n1 --> user: \
+ here it is, talk to <b>registry 1</b>
+
+note left of r1 #eee: \
+ only your web browser \n\
+ talks to the registry
+
+user -> r1 : \
+ Hey <b>registry 1</b>, \
+I am accessing <b>netdata 1</b>...
+
+r1 --> user : \
+ nice!, here are other netdata servers \
+you have accessed in the past
+
+@enduml
diff --git a/docs/Add-more-charts-to-netdata.md b/docs/Add-more-charts-to-netdata.md
new file mode 100644
index 0000000..a16f2e9
--- /dev/null
+++ b/docs/Add-more-charts-to-netdata.md
@@ -0,0 +1,16 @@
+<!--
+title: "Add more charts to Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Add-more-charts-to-netdata.md
+-->
+
+# Add more charts to Netdata
+
+This file has been deprecated. Please see our [collectors docs](/collectors/README.md) or the collectors [quickstart
+guide](/collectors/QUICKSTART.md) for more information.
+
+## Available data collection modules
+
+See the [list of supported collectors](/collectors/COLLECTORS.md) to see all the sources Netdata can collect metrics
+from.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FAdd-more-charts-to-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md
new file mode 100644
index 0000000..8af1282
--- /dev/null
+++ b/docs/Demo-Sites.md
@@ -0,0 +1,25 @@
+<!--
+title: "Demo sites"
+date: 2020-03-26
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Demo-Sites.md
+-->
+
+# Demo sites
+
+You can also view live demos of Netdata at **[https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)**.
+
+| Location | Netdata demo URL | 60 mins reqs | VM donated by |
+| :------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| :------------------------------------------------- |
+| London (UK) | **[london.my-netdata.io](https://london.my-netdata.io)**<br/>(this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| Atlanta (USA) | **[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**<br/>(with **named** and **mysql** charts) | [![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io) | [CDN77.com](https://www.cdn77.com/) |
+| Israel | **[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)** | [![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io) | [OctopusCS.com](https://www.octopuscs.com) |
+| Bangalore (India) | **[bangalore.my-netdata.io](https://bangalore.my-netdata.io)** | [![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| Frankfurt (Germany) | **[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)** | [![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| New York (USA) | **[newyork.my-netdata.io](https://newyork.my-netdata.io)** | [![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| San Francisco (USA) | **[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)** | [![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| Singapore | **[singapore.my-netdata.io](https://singapore.my-netdata.io)** | [![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+| Toronto (Canada) | **[toronto.my-netdata.io](https://toronto.my-netdata.io)** | [![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
+
+Netdata dashboards are mobile- and touch-friendly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDemo-Sites&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Donations-netdata-has-received.md b/docs/Donations-netdata-has-received.md
new file mode 100644
index 0000000..df6c040
--- /dev/null
+++ b/docs/Donations-netdata-has-received.md
@@ -0,0 +1,29 @@
+<!--
+title: "Donations"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Donations-netdata-has-received.md
+-->
+
+# Donations
+
+This is a list of the donations we have received for Netdata (sorted alphabetically on their name):
+
+| what donated|related links|who donated|description of the donation|
+|-----------:|:-----------:|:---------:|:--------------------------|
+| Packages Distribution|-|**[PackageCloud.io](https://packagecloud.io/)**|**PackageCloud.io** donated to a free open-source subscription to their awesome Package Distribution services.|
+| Cross Browser Testing|-|**[BrowserStack.com](https://www.browserstack.com/)**|**BrowserStack.com** donated a free subscription to their awesome Browser Testing services (all three of them: Live, Screenshots, Responsive).|
+| Cloud VM|[cdn77.my-netdata.io](http://cdn77.my-netdata.io)|**[CDN77.com](https://www.cdn77.com/)**|**CDN77.com** donated a VM with 2 CPU cores, 4GB RAM and 20GB HD, on their excellent CDN network.|
+| Localization Management|[Netdata localization project](https://crowdin.com/project/netdata) (check issue [#279](https://github.com/netdata/netdata/issues/279))|**[Crowdin.com](https://crowdin.com/)**|**Crowdin.com** donated an open source license to their Localization Management Platform.|
+| Cloud VMs|[london.my-netdata.io](https://london.my-netdata.io) (Several VMs)|**[DigitalOcean.com](https://www.digitalocean.com/)**|**DigitalOcean.com** donated 1000 USD to be used in their excellent Cloud Computing services. Many thanks to [Justin Paine](https://github.com/xxdesmus) for making this happen.|
+| Development IDE|-|**[JetBrains.com](https://www.jetbrains.com/)**|**JetBrains.com** donated an open source license for 4 developers for 1 year, to their excellent IDEs.|
+| Cloud VM|[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)|**[OctopusCS.com](https://octopuscs.com/)**|**OctopusCS.com** donated a VM with 4 CPU cores, 16GB RAM and 50GB HD in their excellent Cloud Computing services.|
+| Cloud VM|[stackscale.my-netdata.io](https://stackscale.my-netdata.io)|**[stackscale.com](https://www.stackscale.com/)**|**StackScale.com** donated a VM with 4 CPU cores, 16GB RAM and 100GB HD in their excellent Cloud Computing services.|
+
+Thank you!
+
+---
+
+**Do you want to donate?** We are thirsty for on-line services that can help us make Netdata better. We also try to build a network of demo sites (VMs) that can help us show the full potential of Netdata.
+
+Please contact me at costa@tsaousis.gr.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..47950f3
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "Read documentation on <https://learn.netdata.cloud>"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/README.md
+-->
+
+# Read documentation on <https://learn.netdata.cloud>
+
+Welcome to the Netdata documentation! While you can read Netdata documentation here, or throughout the Netdata
+repository, our intention is that these pages are read on [learn.netdata.cloud](https://learn.netdata.cloud).
+
+Links between documentation pages will work fine here, but the formatting may not be perfect, as our documentation site
+uses a few extra Markdown features that GitHub doesn't support natively. Other things might be missing or look less than
+perfect.
+
+Now get out there and build an exceptional infrastructure.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md
new file mode 100644
index 0000000..8810dc8
--- /dev/null
+++ b/docs/Running-behind-apache.md
@@ -0,0 +1,373 @@
+<!--
+title: "Netdata via apache's mod_proxy"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Running-behind-apache.md
+-->
+
+# Netdata via apache's mod_proxy
+
+Below you can find instructions for configuring an apache server to:
+
+1. proxy a single Netdata via an HTTP and HTTPS virtual host
+2. dynamically proxy any number of Netdata servers
+3. add user authentication
+4. adjust Netdata settings to get optimal results
+
+## Requirements
+
+Make sure your apache has installed `mod_proxy` and `mod_proxy_http`.
+
+On debian/ubuntu systems, install them with this:
+
+```sh
+sudo apt-get install apache2
+```
+
+Also make sure they are enabled:
+
+```sh
+sudo a2enmod proxy
+sudo a2enmod proxy_http
+```
+
+Ensure your rewrite module is enabled:
+
+```sh
+sudo a2enmod rewrite
+```
+
+---
+
+## Netdata on an existing virtual host
+
+On any **existing** and already **working** apache virtual host, you can redirect requests for URL `/netdata/` to one or more Netdata servers.
+
+### proxy one Netdata, running on the same server apache runs
+
+Add the following on top of any existing virtual host. It will allow you to access Netdata as `http://virtual.host/netdata/`.
+
+```conf
+<VirtualHost *:80>
+
+ RewriteEngine On
+ ProxyRequests Off
+ ProxyPreserveHost On
+
+ <Proxy *>
+ Require all granted
+ </Proxy>
+
+ # Local Netdata server accessed with '/netdata/', at localhost:19999
+ ProxyPass "/netdata/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on
+ ProxyPassReverse "/netdata/" "http://localhost:19999/"
+
+ # if the user did not give the trailing /, add it
+ # for HTTP (if the virtualhost is HTTP, use this)
+ RewriteRule ^/netdata$ http://%{HTTP_HOST}/netdata/ [L,R=301]
+ # for HTTPS (if the virtualhost is HTTPS, use this)
+ #RewriteRule ^/netdata$ https://%{HTTP_HOST}/netdata/ [L,R=301]
+
+ # rest of virtual host config here
+
+</VirtualHost>
+```
+
+### proxy multiple Netdata running on multiple servers
+
+Add the following on top of any existing virtual host. It will allow you to access multiple Netdata as `http://virtual.host/netdata/HOSTNAME/`, where `HOSTNAME` is the hostname of any other Netdata server you have (to access the `localhost` Netdata, use `http://virtual.host/netdata/localhost/`).
+
+```conf
+<VirtualHost *:80>
+
+ RewriteEngine On
+ ProxyRequests Off
+ ProxyPreserveHost On
+
+ <Proxy *>
+ Require all granted
+ </Proxy>
+
+ # proxy any host, on port 19999
+ ProxyPassMatch "^/netdata/([A-Za-z0-9\._-]+)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on
+
+ # make sure the user did not forget to add a trailing /
+ # for HTTP (if the virtualhost is HTTP, use this)
+ RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" http://%{HTTP_HOST}/netdata/$1/ [L,R=301]
+ # for HTTPS (if the virtualhost is HTTPS, use this)
+ RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" https://%{HTTP_HOST}/netdata/$1/ [L,R=301]
+
+ # rest of virtual host config here
+
+</VirtualHost>
+```
+
+> IMPORTANT<br/>
+> The above config allows your apache users to connect to port 19999 on any server on your network.
+
+If you want to control the servers your users can connect to, replace the `ProxyPassMatch` line with the following. This allows only `server1`, `server2`, `server3` and `server4`.
+
+```
+ ProxyPassMatch "^/netdata/(server1|server2|server3|server4)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on
+```
+
+## Netdata on a dedicated virtual host
+
+You can proxy Netdata through apache, using a dedicated apache virtual host.
+
+Create a new apache site:
+
+```sh
+nano /etc/apache2/sites-available/netdata.conf
+```
+
+with this content:
+
+```conf
+<VirtualHost *:80>
+ RewriteEngine On
+ ProxyRequests Off
+ ProxyPreserveHost On
+
+ ServerName netdata.domain.tld
+
+ <Proxy *>
+ Require all granted
+ </Proxy>
+
+ ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on
+ ProxyPassReverse "/" "http://localhost:19999/"
+
+ ErrorLog ${APACHE_LOG_DIR}/netdata-error.log
+ CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined
+</VirtualHost>
+```
+
+Enable the VirtualHost:
+
+```sh
+sudo a2ensite netdata.conf && service apache2 reload
+```
+
+## Netdata proxy in Plesk
+
+_Assuming the main goal is to make Netdata running in HTTPS._
+
+1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate
+2. Go to "Apache & nginx Settings", and in the following section, add:
+
+```conf
+RewriteEngine on
+RewriteRule (.*) http://localhost:19999/$1 [P,L]
+```
+
+3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works.
+
+Repeat the operation for as many servers as you need.
+
+## Enable Basic Auth
+
+If you wish to add an authentication (user/password) to access your Netdata, do these:
+
+Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get install apache2-utils`.
+
+Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata`
+
+**Apache 2.2 Example:**\
+Modify the virtual host with these:
+
+```conf
+ # replace the <Proxy *> section
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+
+ # add a <Location /netdata/> section
+ <Location /netdata/>
+ AuthType Basic
+ AuthName "Protected site"
+ AuthUserFile /etc/apache2/.htpasswd
+ Require valid-user
+ Order deny,allow
+ Allow from all
+ </Location>
+```
+
+Specify `Location /` if Netdata is running on dedicated virtual host.
+
+**Apache 2.4 (dedicated virtual host) Example:**
+
+```conf
+<VirtualHost *:80>
+ RewriteEngine On
+ ProxyRequests Off
+ ProxyPreserveHost On
+
+ ServerName netdata.domain.tld
+
+ <Proxy *>
+ AllowOverride None
+ AuthType Basic
+ AuthName "Protected site"
+ AuthUserFile /etc/apache2/.htpasswd
+ Require valid-user
+ </Proxy>
+
+ ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on
+ ProxyPassReverse "/" "http://localhost:19999/"
+
+ ErrorLog ${APACHE_LOG_DIR}/netdata-error.log
+ CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined
+</VirtualHost>
+```
+
+Note: Changes are applied by reloading or restarting Apache.
+
+## Configuration of Content Security Policy
+
+If you want to enable CSP within your Apache, you should consider some special requirements of the headers. Modify your configuration like that:
+
+```
+ Header always set Content-Security-Policy "default-src http: 'unsafe-inline' 'self' 'unsafe-eval'; script-src http: 'unsafe-inline' 'self' 'unsafe-eval'; style-src http: 'self' 'unsafe-inline'"
+```
+
+Note: Changes are applied by reloading or restarting Apache.
+
+## Using Netdata with Apache's `mod_evasive` module
+
+The `mod_evasive` Apache module helps system administrators protect their web server from brute force and distributed
+denial of service attack (DDoS) attacks.
+
+Because Netdata sends a request to the web server for every chart update, it's normal to create 20-30 requests per
+second, per client. If you're using `mod_evasive` on your Apache web server, this volume of requests will trigger the
+module's protection, and your dashboard will become unresponsive. You may even begin to see 403 errors.
+
+To mitigate this issue, you will need to change the value of the `DOSPageCount` option in your `mod_evasive.conf` file,
+which can typically be found at `/etc/httpd/conf.d/mod_evasive.conf` or `/etc/apache2/mods-enabled/evasive.conf`.
+
+The `DOSPageCount` option sets the limit of the number of requests from a single IP address for the same page per page
+interval, which is usually 1 second. The default value is `2` requests per second. Clearly, Netdata's typical usage will
+exceed that threshold, and `mod_evasive` will add your IP address to a blocklist.
+
+Our users have found success by setting `DOSPageCount` to `30`. Try this, and raise the value if you continue to see 403
+errors while accessing the dashboard.
+
+```conf
+DOSPageCount 30
+```
+
+Restart Apache with `sudo service apache2 restart`, or the appropriate method to restart services on your system, to
+reload its configuration with your new values.
+
+
+### Virtual host
+
+To adjust the `DOSPageCount` for a specific virtual host, open your virtual host config, which can be found at
+`/etc/httpd/conf/sites-available/my-domain.conf` or `/etc/apache2/sites-available/my-domain.conf` and add the
+following:
+
+```conf
+<VirtualHost *:80>
+ ...
+ # Increase the DOSPageCount to prevent 403 errors and IP addresses being blocked.
+ <IfModule mod_evasive20.c>
+ DOSPageCount 30
+ </IfModule>
+</VirtualHost>
+```
+
+See issues [#2011](https://github.com/netdata/netdata/issues/2011) and
+[#7658](https://github.com/netdata/netdata/issues/7568) for more information.
+
+# Netdata configuration
+
+You might edit `/etc/netdata/netdata.conf` to optimize your setup a bit. For applying these changes you need to restart Netdata.
+
+## Response compression
+
+If you plan to use Netdata exclusively via apache, you can gain some performance by preventing double compression of its output (Netdata compresses its response, apache re-compresses it) by editing `/etc/netdata/netdata.conf` and setting:
+
+```
+[web]
+ enable gzip compression = no
+```
+
+Once you disable compression at Netdata (and restart it), please verify you receive compressed responses from apache (it is important to receive compressed responses - the charts will be more snappy).
+
+## Limit direct access to Netdata
+
+You would also need to instruct Netdata to listen only on `localhost`, `127.0.0.1` or `::1`.
+
+```
+[web]
+ bind to = localhost
+```
+
+or
+
+```
+[web]
+ bind to = 127.0.0.1
+```
+
+or
+
+```
+[web]
+ bind to = ::1
+```
+
+---
+
+You can also use a unix domain socket. This will also provide a faster route between apache and Netdata:
+
+```
+[web]
+ bind to = unix:/tmp/netdata.sock
+```
+
+Apache 2.4.24+ can not read from `/tmp` so create your socket in `/var/run/netdata`
+
+```
+[web]
+ bind to = unix:/var/run/netdata/netdata.sock
+```
+
+_note: Netdata v1.8+ support unix domain sockets_
+
+At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this:
+
+```
+ProxyPass "/netdata/" "unix:/tmp/netdata.sock|http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on
+```
+
+---
+
+If your apache server is not on localhost, you can set:
+
+```
+[web]
+ bind to = *
+ allow connections from = IP_OF_APACHE_SERVER
+```
+
+*note: Netdata v1.9+ support `allow connections from`*
+
+`allow connections from` accepts [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to match against the connection IP address.
+
+## prevent the double access.log
+
+apache logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`:
+
+```
+[global]
+ access log = none
+```
+
+## Troubleshooting mod_proxy
+
+Make sure the requests reach Netdata, by examining `/var/log/netdata/access.log`.
+
+1. if the requests do not reach Netdata, your apache does not forward them.
+2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-caddy.md b/docs/Running-behind-caddy.md
new file mode 100644
index 0000000..c1d5750
--- /dev/null
+++ b/docs/Running-behind-caddy.md
@@ -0,0 +1,34 @@
+<!--
+title: "Netdata via Caddy"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Running-behind-caddy.md
+-->
+
+# Netdata via Caddy
+
+To run Netdata via [Caddy's proxying,](https://caddyserver.com/docs/proxy) set your Caddyfile up like this:
+
+```caddyfile
+netdata.domain.tld {
+ proxy / localhost:19999
+}
+```
+
+Other directives can be added between the curly brackets as needed.
+
+To run Netdata in a subfolder:
+
+```caddyfile
+netdata.domain.tld {
+ proxy /netdata/ localhost:19999 {
+ without /netdata
+ }
+}
+```
+
+## limit direct access to Netdata
+
+You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`.
+
+To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-caddy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-haproxy.md b/docs/Running-behind-haproxy.md
new file mode 100644
index 0000000..d4b09f8
--- /dev/null
+++ b/docs/Running-behind-haproxy.md
@@ -0,0 +1,293 @@
+<!--
+title: "Netdata via HAProxy"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Running-behind-haproxy.md
+-->
+
+# Netdata via HAProxy
+
+> HAProxy is a free, very fast and reliable solution offering high availability, load balancing,
+> and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic web sites
+> and powers quite a number of the world's most visited ones.
+
+If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can
+be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. This can make it possible to connect to
+Netdata at `https://example.com` or `https://example.com/netdata/`, which is a much nicer experience then
+`http://example.com:19999`.
+
+To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata,
+the following configuration can be used:
+
+## Default Configuration
+
+For all examples, set the mode to `http`
+
+```conf
+defaults
+ mode http
+```
+
+## Simple Configuration
+
+A simple example where the base URL, say `http://example.com`, is used with no subpath:
+
+### Frontend
+
+Create a frontend to receive the request.
+
+```conf
+frontend http_frontend
+ ## HTTP ipv4 and ipv6 on all ips ##
+ bind :::80 v4v6
+
+ default_backend netdata_backend
+```
+
+### Backend
+
+Create the Netdata backend which will send requests to port `19999`.
+
+```conf
+backend netdata_backend
+ option forwardfor
+ server netdata_local 127.0.0.1:19999
+
+ http-request set-header Host %[src]
+ http-request set-header X-Forwarded-For %[src]
+ http-request set-header X-Forwarded-Port %[dst_port]
+ http-request set-header Connection "keep-alive"
+```
+
+## Configuration with subpath
+
+A example where the base URL is used with a subpath `/netdata/`:
+
+### Frontend
+
+To use a subpath, create an ACL, which will set a variable based on the subpath.
+
+```conf
+frontend http_frontend
+ ## HTTP ipv4 and ipv6 on all ips ##
+ bind :::80 v4v6
+
+ # URL begins with /netdata
+ acl is_netdata url_beg /netdata
+
+ # if trailing slash is missing, redirect to /netdata/
+ http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ }
+
+ ## Backends ##
+ use_backend netdata_backend if is_netdata
+
+ # Other requests go here (optional)
+ # put netdata_backend here if no others are used
+ default_backend www_backend
+```
+
+### Backend
+
+Same as simple example, except remove `/netdata/` with regex.
+
+```conf
+backend netdata_backend
+ option forwardfor
+ server netdata_local 127.0.0.1:19999
+
+ http-request set-path %[path,regsub(^/netdata/,/)]
+
+ http-request set-header Host %[src]
+ http-request set-header X-Forwarded-For %[src]
+ http-request set-header X-Forwarded-Port %[dst_port]
+ http-request set-header Connection "keep-alive"
+```
+
+## Using TLS communication
+
+TLS can be used by adding port `443` and a cert to the frontend.
+This example will only use Netdata if host matches example.com (replace with your domain).
+
+### Frontend
+
+This frontend uses a certificate list.
+
+```conf
+frontend https_frontend
+ ## HTTP ##
+ bind :::80 v4v6
+ # Redirect all HTTP traffic to HTTPS with 301 redirect
+ redirect scheme https code 301 if !{ ssl_fc }
+
+ ## HTTPS ##
+ # Bind to all v4/v6 addresses, use a list of certs in file
+ bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt
+
+ ## ACL ##
+ # Optionally check host for Netdata
+ acl is_example_host hdr_sub(host) -i example.com
+
+ ## Backends ##
+ use_backend netdata_backend if is_example_host
+ # Other requests go here (optional)
+ default_backend www_backend
+```
+
+In the cert list file place a mapping from a certificate file to the domain used:
+
+`/etc/letsencrypt/certslist.txt`:
+
+```txt
+example.com /etc/letsencrypt/live/example.com/example.com.pem
+```
+
+The file `/etc/letsencrypt/live/example.com/example.com.pem` should contain the key and
+certificate (in that order) concatenated into a `.pem` file.:
+
+```sh
+cat /etc/letsencrypt/live/example.com/fullchain.pem \
+ /etc/letsencrypt/live/example.com/privkey.pem > \
+ /etc/letsencrypt/live/example.com/example.com.pem
+```
+
+### Backend
+
+Same as simple, except set protocol `https`.
+
+```conf
+backend netdata_backend
+ option forwardfor
+ server netdata_local 127.0.0.1:19999
+
+ http-request add-header X-Forwarded-Proto https
+ http-request set-header Host %[src]
+ http-request set-header X-Forwarded-For %[src]
+ http-request set-header X-Forwarded-Port %[dst_port]
+ http-request set-header Connection "keep-alive"
+```
+
+## Enable authentication
+
+To use basic HTTP Authentication, create an authentication list:
+
+```conf
+# HTTP Auth
+userlist basic-auth-list
+ group is-admin
+ # Plaintext password
+ user admin password passwordhere groups is-admin
+```
+
+You can create a hashed password using the `mkpassword` utility.
+
+```sh
+ printf "passwordhere" | mkpasswd --stdin --method=sha-256
+$5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1
+```
+
+Replace `passwordhere` with hash:
+
+```conf
+user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin
+```
+
+Now add at the top of the backend:
+
+```conf
+acl devops-auth http_auth_group(basic-auth-list) is-admin
+http-request auth realm netdata_local unless devops-auth
+```
+
+## Full Example
+
+Full example configuration with HTTP auth over TLS with subpath:
+
+```conf
+global
+ maxconn 20000
+
+ log /dev/log local0
+ log /dev/log local1 notice
+ user haproxy
+ group haproxy
+ pidfile /run/haproxy.pid
+
+ stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
+ stats timeout 30s
+ daemon
+
+ tune.ssl.default-dh-param 4096 # Max size of DHE key
+
+ # Default ciphers to use on SSL-enabled listening sockets.
+ ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
+ ssl-default-bind-options no-sslv3
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ errorfile 400 /etc/haproxy/errors/400.http
+ errorfile 403 /etc/haproxy/errors/403.http
+ errorfile 408 /etc/haproxy/errors/408.http
+ errorfile 500 /etc/haproxy/errors/500.http
+ errorfile 502 /etc/haproxy/errors/502.http
+ errorfile 503 /etc/haproxy/errors/503.http
+ errorfile 504 /etc/haproxy/errors/504.http
+
+frontend https_frontend
+ ## HTTP ##
+ bind :::80 v4v6
+ # Redirect all HTTP traffic to HTTPS with 301 redirect
+ redirect scheme https code 301 if !{ ssl_fc }
+
+ ## HTTPS ##
+ # Bind to all v4/v6 addresses, use a list of certs in file
+ bind :::443 v4v6 ssl crt-list /etc/letsencrypt/certslist.txt
+
+ ## ACL ##
+ # Optionally check host for Netdata
+ acl is_example_host hdr_sub(host) -i example.com
+ acl is_netdata url_beg /netdata
+
+ http-request redirect scheme https drop-query append-slash if is_netdata ! { path_beg /netdata/ }
+
+ ## Backends ##
+ use_backend netdata_backend if is_example_host is_netdata
+ default_backend www_backend
+
+# HTTP Auth
+userlist basic-auth-list
+ group is-admin
+ # Hashed password
+ user admin password $5$l7Gk0VPIpKO$f5iEcxvjfdF11khw.utzSKqP7W.0oq8wX9nJwPLwzy1 groups is-admin
+
+## Default server(s) (optional)##
+backend www_backend
+ mode http
+ balance roundrobin
+ timeout connect 5s
+ timeout server 30s
+ timeout queue 30s
+
+ http-request add-header 'X-Forwarded-Proto: https'
+ server other_server 111.111.111.111:80 check
+
+backend netdata_backend
+ acl devops-auth http_auth_group(basic-auth-list) is-admin
+ http-request auth realm netdata_local unless devops-auth
+
+ option forwardfor
+ server netdata_local 127.0.0.1:19999
+
+ http-request set-path %[path,regsub(^/netdata/,/)]
+
+ http-request add-header X-Forwarded-Proto https
+ http-request set-header Host %[src]
+ http-request set-header X-Forwarded-For %[src]
+ http-request set-header X-Forwarded-Port %[dst_port]
+ http-request set-header Connection "keep-alive"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-haproxy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md
new file mode 100644
index 0000000..8649158
--- /dev/null
+++ b/docs/Running-behind-lighttpd.md
@@ -0,0 +1,71 @@
+<!--
+title: "Netdata via lighttpd v1.4.x"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Running-behind-lighttpd.md
+-->
+
+# Netdata via lighttpd v1.4.x
+
+Here is a config for accessing Netdata in a suburl via lighttpd 1.4.46 and newer:
+
+```txt
+$HTTP["url"] =~ "^/netdata/" {
+ proxy.server = ( "" => ("netdata" => ( "host" => "127.0.0.1", "port" => 19999 )))
+ proxy.header = ( "map-urlpath" => ( "/netdata/" => "/") )
+}
+```
+
+If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another).
+
+```txt
+$HTTP["url"] =~ "^/netdata/" {
+ proxy.server = ( "" => ("" => ( "host" => "127.0.0.1", "port" => 19998 )))
+}
+
+$SERVER["socket"] == ":19998" {
+ url.rewrite-once = ( "^/netdata(.*)$" => "/$1" )
+ proxy.server = ( "" => ( "" => ( "host" => "127.0.0.1", "port" => 19999 )))
+}
+```
+
+---
+
+If the only thing the server is exposing via the web is Netdata (and thus no suburl rewriting required),
+then you can get away with just
+
+```
+proxy.server = ( "" => ( ( "host" => "127.0.0.1", "port" => 19999 )))
+```
+
+Though if it's public facing you might then want to put some authentication on it. htdigest support
+looks like:
+
+```
+auth.backend = "htdigest"
+auth.backend.htdigest.userfile = "/etc/lighttpd/lighttpd.htdigest"
+auth.require = ( "" => ( "method" => "digest",
+ "realm" => "netdata",
+ "require" => "valid-user"
+ )
+ )
+```
+
+other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth).
+
+---
+
+It seems that lighttpd (or some versions of it), fail to proxy compressed web responses.
+To solve this issue, disable web response compression in Netdata.
+
+Open `/etc/netdata/netdata.conf` and set in [global]\:
+
+```
+enable web responses gzip compression = no
+```
+
+## limit direct access to Netdata
+
+You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`.
+
+To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-lighttpd&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md
new file mode 100644
index 0000000..2f47447
--- /dev/null
+++ b/docs/Running-behind-nginx.md
@@ -0,0 +1,264 @@
+<!--
+title: "Running Netdata behind Nginx"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Running-behind-nginx.md
+-->
+
+# Running Netdata behind Nginx
+
+## Intro
+
+[Nginx](https://nginx.org/en/) is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server used to host websites and applications of all sizes.
+
+The software is known for its low impact on memory resources, high scalability, and its modular, event-driven architecture which can offer secure, predictable performance.
+
+## Why Nginx
+
+- By default, Nginx is fast and lightweight out of the box.
+
+- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server.
+
+- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism.
+
+- A proxy was necessary to encrypt the communication to Netdata, until v1.16.0, which provided TLS (HTTPS) support.
+
+## Nginx configuration file
+
+All Nginx configurations can be found in the `/etc/nginx/` directory. The main configuration file is `/etc/nginx/nginx.conf`. Website or app-specific configurations can be found in the `/etc/nginx/site-available/` directory.
+
+Configuration options in Nginx are known as directives. Directives are organized into groups known as blocks or contexts. The two terms can be used interchangeably.
+
+Depending on your installation source, you’ll find an example configuration file at `/etc/nginx/conf.d/default.conf` or `etc/nginx/sites-enabled/default`, in some cases you may have to manually create the `sites-available` and `sites-enabled` directories.
+
+You can edit the Nginx configuration file with Nano, Vim or any other text editors you are comfortable with.
+
+After making changes to the configuration files:
+
+- Test Nginx configuration with `nginx -t`.
+
+- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`.
+
+## Ways to access Netdata via Nginx
+
+### As a virtual host
+
+With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below.
+
+```conf
+upstream backend {
+ # the Netdata server
+ server 127.0.0.1:19999;
+ keepalive 64;
+}
+
+server {
+ # nginx listens to this
+ listen 80;
+
+ # the virtual host name of this
+ server_name netdata.example.com;
+
+ location / {
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_pass http://backend;
+ proxy_http_version 1.1;
+ proxy_pass_request_headers on;
+ proxy_set_header Connection "keep-alive";
+ proxy_store off;
+ }
+}
+```
+
+### As a subfolder to an existing virtual host
+
+This method is recommended when Netdata is to be served from a subfolder (or directory).
+In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`.
+
+```conf
+upstream netdata {
+ server 127.0.0.1:19999;
+ keepalive 64;
+}
+
+server {
+ listen 80;
+
+ # the virtual host name of this subfolder should be exposed
+ #server_name netdata.example.com;
+
+ location = /netdata {
+ return 301 /netdata/;
+ }
+
+ location ~ /netdata/(?<ndpath>.*) {
+ proxy_redirect off;
+ proxy_set_header Host $host;
+
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_http_version 1.1;
+ proxy_pass_request_headers on;
+ proxy_set_header Connection "keep-alive";
+ proxy_store off;
+ proxy_pass http://netdata/$ndpath$is_args$args;
+
+ gzip on;
+ gzip_proxied any;
+ gzip_types *;
+ }
+}
+```
+
+### As a subfolder for multiple Netdata servers, via one Nginx
+
+This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via subfolders.
+
+```conf
+upstream backend-server1 {
+ server 10.1.1.103:19999;
+ keepalive 64;
+}
+upstream backend-server2 {
+ server 10.1.1.104:19999;
+ keepalive 64;
+}
+
+server {
+ listen 80;
+
+ # the virtual host name of this subfolder should be exposed
+ #server_name netdata.example.com;
+
+ location ~ /netdata/(?<behost>.*?)/(?<ndpath>.*) {
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_http_version 1.1;
+ proxy_pass_request_headers on;
+ proxy_set_header Connection "keep-alive";
+ proxy_store off;
+ proxy_pass http://backend-$behost/$ndpath$is_args$args;
+
+ gzip on;
+ gzip_proxied any;
+ gzip_types *;
+ }
+
+ # make sure there is a trailing slash at the browser
+ # or the URLs will be wrong
+ location ~ /netdata/(?<behost>.*) {
+ return 301 /netdata/$behost/;
+ }
+}
+```
+
+Of course you can add as many backend servers as you like.
+
+Using the above, you access Netdata on the backend servers, like this:
+
+- `http://netdata.example.com/netdata/server1/` to reach `backend-server1`
+- `http://netdata.example.com/netdata/server2/` to reach `backend-server2`
+
+### Encrypt the communication between Nginx and Netdata
+
+In case Netdata's web server has been [configured to use TLS](/web/server/README.md#enabling-tls-support), it is
+necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append
+the following parameters in your `nginx.conf`
+
+```conf
+proxy_set_header X-Forwarded-Proto https;
+proxy_pass https://localhost:19999;
+```
+
+Optionally it is also possible to [enable TLS/SSL on Nginx](http://nginx.org/en/docs/http/configuring_https_servers.html), this way the user will encrypt not only the communication between Nginx and Netdata but also between the user and Nginx.
+
+If Nginx is not configured as described here, you will probably receive the error `SSL_ERROR_RX_RECORD_TOO_LONG`.
+
+### Enable authentication
+
+Create an authentication file to enable basic authentication via Nginx, this secures your Netdata dashboard.
+
+If you don't have an authentication file, you can use the following command:
+
+```sh
+printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords
+```
+
+And then enable the authentication inside your server directive:
+
+```conf
+server {
+ # ...
+ auth_basic "Protected";
+ auth_basic_user_file passwords;
+ # ...
+}
+```
+
+## Limit direct access to Netdata
+
+If your Nginx is on `localhost`, you can use this to protect your Netdata:
+
+```
+[web]
+ bind to = 127.0.0.1 ::1
+```
+
+---
+
+You can also use a unix domain socket. This will also provide a faster route between Nginx and Netdata:
+
+```
+[web]
+ bind to = unix:/var/run/netdata/netdata.sock
+```
+
+*note: Netdata v1.8+ support unix domain sockets*
+
+At the Nginx side, use something like this to use the same unix domain socket:
+
+```conf
+upstream backend {
+ server unix:/var/run/netdata/netdata.sock;
+ keepalive 64;
+}
+```
+
+---
+
+If your Nginx server is not on localhost, you can set:
+
+```
+[web]
+ bind to = *
+ allow connections from = IP_OF_NGINX_SERVER
+```
+
+*note: Netdata v1.9+ support `allow connections from`*
+
+`allow connections from` accepts [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to match against the
+connection IP address.
+
+## Prevent the double access.log
+
+Nginx logs accesses and Netdata logs them too. You can prevent Netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`:
+
+```
+[global]
+ access log = none
+```
+
+## SELinux
+
+If you get an 502 Bad Gateway error you might check your Nginx error log:
+
+```sh
+# cat /var/log/nginx/error.log:
+2016/09/09 12:34:05 [crit] 5731#5731: *1 connect() to 127.0.0.1:19999 failed (13: Permission denied) while connecting to upstream, client: 1.2.3.4, server: netdata.example.com, request: "GET / HTTP/2.0", upstream: "http://127.0.0.1:19999/", host: "netdata.example.com"
+```
+
+If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/a-github-star-is-important.md b/docs/a-github-star-is-important.md
new file mode 100644
index 0000000..d309d39
--- /dev/null
+++ b/docs/a-github-star-is-important.md
@@ -0,0 +1,24 @@
+<!--
+title: "A GitHub star is important"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/a-github-star-is-important.md
+-->
+
+# A GitHub star is important
+
+**GitHub stars** allow Netdata to expand its reach, its community, especially attract people with skills willing to
+contribute to it.
+
+Compared to its first release, Netdata is now **twice as fast**, has all its bugs settled and a lot more functionality.
+This happened because a lot of people find it useful, use it daily at home and work, **rely on it** and **contribute to
+it**.
+
+**GitHub stars** also **motivate** us. They state that you find our work **useful**. They give us strength to continue,
+to work **harder** to make it even **better**.
+
+So, give Netdata a **GitHub star**, at the top right of this page.
+
+Thank you!
+
+Costa Tsaousis
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fa-github-star-is-important&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/agent-cloud.md b/docs/agent-cloud.md
new file mode 100644
index 0000000..061b847
--- /dev/null
+++ b/docs/agent-cloud.md
@@ -0,0 +1,79 @@
+<!--
+title: "Use the Agent with Netdata Cloud"
+date: 2020-05-04
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/agent-cloud.md
+-->
+
+# Use the Agent with Netdata Cloud
+
+While the Netdata Agent is an enormously powerful _distributed_ health monitoring and performance troubleshooting tool,
+many of its users need to monitor dozens or hundreds of systems at the same time. That's why we built Netdata Cloud, a
+hosted web interface that gives you real-time visibility into your entire infrastructure.
+
+There are two main ways to use your Agent(s) with Netdata Cloud. You can use both these methods simultaneously, or just
+one, based on your needs:
+
+- Use Netdata Cloud's web interface for monitoring an entire infrastructure, with any number of Agents, in one
+ centralized dashboard.
+- Use **Visited nodes** to quickly navigate between the dashboards of nodes you've recently visited.
+
+## Monitor an infrastructure with Netdata Cloud
+
+We designed Netdata Cloud to help you see health and performance metrics, plus active alarms, in a single interface.
+Here's what a small infrastructure might look like:
+
+![Animated GIF of Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b-11ea-957f-2c8d0d009e44.gif)
+
+[Read more about Netdata Cloud](https://learn.netdata.cloud/docs/cloud/) to better understand how it gives you real-time
+visibility into your entire infrastructure, and why you might consider using it.
+
+Next, [get started in 5 minutes](https://learn.netdata.cloud/docs/cloud/get-started/), or read our [claiming
+reference](/claim/README.md) for a complete investigation of Cloud's security and encryption features, plus instructions
+for Docker containers.
+
+## Navigate between dashboards with Visited nodes
+
+If you don't want to use Netdata Cloud's web interface, you can still connect multiple nodes through the **Visited
+nodes** menu, which appears on the left-hand side of the dashboard.
+
+You can use the Visited nodes menu to navigate between the dashboards of many different Agent-monitored systems quickly.
+
+To add nodes to your Visited nodes menu, you first need to navigate to that node's dashboard, then click the **Sign in**
+button at the top of the dashboard. On the screen that appears, which states your node is requesting access to your
+Netdata Cloud account, sign in with your preferred method.
+
+Cloud redirects you back to your node's dashboard, which is now connected to your Netdata Cloud account. You can now see
+the Visited nodes menu, which is populated by a single node.
+
+![An Agent's dashboard with the Visited nodes
+menu](https://user-images.githubusercontent.com/1153921/80830383-b6ba2400-8b9d-11ea-9eb2-379c7eccd22f.png)
+
+If you previously went through the Cloud onboarding process to create a Space and War Room, you will also see these in
+the Visited Nodes menu. You can click on your Space or any of your War Rooms to navigate to Netdata Cloud and continue
+monitoring your infrastructure from there.
+
+![A Agent's dashboard with the Visited nodes menu, plus Spaces and War
+Rooms](https://user-images.githubusercontent.com/1153921/80830382-b6218d80-8b9d-11ea-869c-1170b95eeb4a.png)
+
+To add more Agents to your Visited nodes menu, visit them and sign in again. This process connects that node to your
+Cloud account and further populates the menu.
+
+Once you've added more than one node, you can use the menu to switch between various dashboards without remembering IP
+addresses or hostnames or saving bookmarks for every node you want to monitor.
+
+![Switching between dashboards with Visited
+nodes](https://user-images.githubusercontent.com/1153921/80831018-e158ac80-8b9e-11ea-882e-1d82cdc028cd.gif)
+
+## What's next?
+
+The Agent-Cloud integration is highly adaptable to the needs of any infrastructure or user. If you want to learn more
+about how you might want to use or configure Cloud, we recommend the following:
+
+- Get an overview of Cloud's features by reading [Cloud documentation](https://learn.netdata.cloud/docs/cloud/).
+- Follow the 5-minute [get started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started/) guide to finish
+ onboarding and claim your first nodes.
+- Better understand how agents connect securely to the Cloud with [claiming](/claim/README.md) and [Agent-Cloud
+ link](/aclk/README.md) documentation.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fagent-cloud&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md
new file mode 100644
index 0000000..70c502d
--- /dev/null
+++ b/docs/anonymous-statistics.md
@@ -0,0 +1,109 @@
+<!--
+title: "Anonymous statistics"
+description: "The Netdata Agent collects anonymous usage information by default and sends it to Google Analytics for quality assurance and product decisions."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/anonymous-statistics.md
+-->
+
+# Anonymous statistics
+
+Starting with v1.12, Netdata collects anonymous usage information by default and sends it to Google Analytics. We use
+the statistics gathered from this information for two purposes:
+
+1. **Quality assurance**, to help us understand if Netdata behaves as expected, and to help us classify repeated
+ issues with certain distributions or environments.
+
+2. **Usage statistics**, to help us interpret how people use the Netdata agent in real-world environments, and to help
+ us identify how our development/design decisions influence the community.
+
+Netdata sends information to Google Analytics via two different channels:
+
+- Google Tag Manager fires when you access an agent's dashboard.
+- The Netdata daemon executes the [`anonymous-statistics.sh`
+ script](https://github.com/netdata/netdata/blob/6469cf92724644f5facf343e4bdd76ac0551a418/daemon/anonymous-statistics.sh.in)
+ when Netdata starts, stops cleanly, or fails.
+
+You can opt-out from sending anonymous statistics to Netdata through three different [opt-out mechanisms](#opt-out).
+
+## Google tag manager
+
+Google tag manager (GTM) is the recommended way of collecting statistics for new implementations using GA. Unlike the
+older API, the logic of when to send information to GA and what information to send is controlled centrally.
+
+We have configured GTM to trigger the tag only when the variable `anonymous_statistics` is true. The value of this
+variable is controlled via the [opt-out mechanism](#opt-out).
+
+To ensure anonymity of the stored information, we have configured GTM's GA variable "Fields to set" as follows:
+
+| Field name | Value |
+| -------------- | -------------------------------------------------- |
+| page | netdata-dashboard |
+| hostname | dashboard.my-netdata.io |
+| anonymizeIp | true |
+| title | Netdata dashboard |
+| campaignSource | {{machine_guid}} |
+| campaignMedium | web |
+| referrer | <http://dashboard.my-netdata.io> |
+| Page URL | <http://dashboard.my-netdata.io/netdata-dashboard> |
+| Page Hostname | <http://dashboard.my-netdata.io> |
+| Page Path | /netdata-dashboard |
+| location | <http://dashboard.my-netdata.io> |
+
+In addition, the Netdata-generated unique machine guid is sent to GA via a custom dimension.
+You can verify the effect of these settings by examining the GA `collect` request parameters.
+
+The only thing that's impossible for us to prevent from being **sent** is the URL in the "Referrer" Header of the
+browser request to GA. However, the settings above ensure that all **stored** URLs and host names are anonymized.
+
+## Anonymous Statistics Script
+
+Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata uses the anonymous
+statistics script to collect system information and send it to GA via an http call. The information collected for all
+events is:
+
+- Netdata version
+- OS name, version, id, id_like
+- Kernel name, version, architecture
+- Virtualization technology
+- Containerization technology
+
+Furthermore, the FATAL event sends the Netdata process & thread name, along with the source code function, source code
+filename and source code line number of the fatal error.
+
+Starting with v1.21, we additionally collect information about:
+
+- Failures to build the dependencies required to use Cloud features.
+- Unavailability of Cloud features in an agent.
+- Failures to connect to the Cloud in case the agent has been [claimed](/claim/README.md). This includes error codes
+ to inform the Netdata team about the reason why the connection failed.
+
+To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The
+template is converted to a bash script called `anonymous-statistics.sh`, installed under the Netdata `plugins
+directory`, which is usually `/usr/libexec/netdata/plugins.d`.
+
+## Opt-out
+
+You can opt-out from sending anonymous statistics to Netdata through three different opt-out mechanisms:
+
+**Create a file called `.opt-out-from-anonymous-statistics`.** This empty file, stored in your Netdata configuration
+directory (usually `/etc/netdata`), immediately stops the statistics script from running, and works with any type of
+installation, including manual, offline, and macOS installations. Create the file by running `touch
+.opt-out-from-anonymous-statistics` from your Netdata configuration directory.
+
+**Pass the option `--disable-telemetry` to any of the installer scripts in the [installation
+docs](/packaging/installer/README.md).** You can append this option during the initial installation or a manual
+update. You can also export the environment variable `DO_NOT_TRACK` with a non-zero or non-empty value
+(e.g: `export DO_NOT_TRACK=1`).
+
+When using Docker, **set your `DO_NOT_TRACK` environment variable to `1`.** You can set this variable with the following
+command: `export DO_NOT_TRACK=1`. When creating a container using Netdata's [Docker
+image](/packaging/docker/README.md#run-the-agent-with-the-docker-command) for the first time, this variable will disable
+the anonymous statistics script inside of the container.
+
+Each of these opt-out processes does the following:
+
+- Prevents the daemon from executing the anonymous statistics script.
+- Forces the anonymous statistics script to exit immediately.
+- Stops the Google Tag Manager Javascript snippet, which remains on the dashboard, from firing and sending any data to
+ Google Analytics.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fanonymous-statistics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/docs/collect/application-metrics.md b/docs/collect/application-metrics.md
new file mode 100644
index 0000000..e5f9039
--- /dev/null
+++ b/docs/collect/application-metrics.md
@@ -0,0 +1,80 @@
+<!--
+title: "Collect application metrics with Netdata"
+sidebar_label: "Application metrics"
+description: "Monitor and troubleshoot every application on your infrastructure with per-second metrics, zero configuration, and meaningful charts."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/application-metrics.md
+-->
+
+# Collect application metrics with Netdata
+
+Netdata instantly collects per-second metrics from many different types of applications running on your systems, such as
+web servers, databases, message brokers, email servers, search platforms, and much more. Metrics collectors are
+pre-installed with every Netdata Agent and usually require zero configuration. Netdata also collects and visualizes
+resource utilization per application on Linux systems using `apps.plugin`.
+
+[**apps.plugin**](/collectors/apps.plugin/README.md) looks at the Linux process tree every second, much like `top` or
+`ps fax`, and collects resource utilization information on every running process. By reading the process tree, Netdata
+shows CPU, disk, networking, processes, and eBPF for every application or Linux user. Unlike `top` or `ps fax`, Netdata
+adds a layer of meaningful visualization on top of the process tree metrics, such as grouping applications into useful
+dimensions, and then creates per-application charts under the **Applications** section of a Netdata dashboard, per-user
+charts under **Users**, and per-user group charts under **User Groups**.
+
+Our most popular application collectors:
+
+- [Prometheus endpoints](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus): Gathers
+ metrics from one or more Prometheus endpoints that use the OpenMetrics exposition format. Autodetects more than 600
+ endpoints.
+- [Web server logs (Apache, NGINX)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/):
+ Tail access logs and provide very detailed web server performance statistics. This module is able to parse 200k+
+ rows in less than half a second.
+- [MySQL](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql/): Collect database global,
+ replication, and per-user statistics.
+- [Redis](/collectors/python.d.plugin/redis/): Monitor database status by reading the server's response to the `INFO`
+ command.
+- [Apache](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache/): Collect Apache web
+ server performance metrics via the `server-status?auto` endpoint.
+- [Nginx](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx/): Monitor web server
+ status information by gathering metrics via `ngx_http_stub_status_module`.
+- [Postgres](/collectors/python.d.plugin/postgres/README.md): Collect database health and performance metrics.
+- [ElasticSearch](/collectors/python.d.plugin/elasticsearch/README.md): Collect search engine performance and health
+ statistics. Optionally collects per-index metrics.
+- [PHP-FPM](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm/): Collect application
+ summary and processes health metrics by scraping the status page (`/status?full`).
+
+Our [supported collectors list](/collectors/COLLECTORS.md#service-and-application-collectors) shows all Netdata's
+application metrics collectors, including those for containers/k8s clusters.
+
+## Collect metrics from applications running on Windows
+
+Netdata is fully capable of collecting and visualizing metrics from applications running on Windows systems. The only
+caveat is that you must [install the Agent](/docs/get/README.md) on a separate system or a compatible VM because there
+is no native Windows version of the Netdata Agent.
+
+Once you have the Agent running on that separate system, you can follow the [enable and configure
+doc](/docs/collect/enable-configure.md) to tell the collector to look for exposed metrics on the Windows system's IP
+address or hostname, plus the applicable port.
+
+For example, you have a MySQL database with a root password of `my-secret-pw` running on a Windows system with the IP
+address 203.0.113.0. you can configure the [MySQL
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql) to look at `203.0.113.0:3306`:
+
+```yml
+jobs:
+ - name: local
+ dsn: root:my-secret-pw@tcp(203.0.113.0:3306)/
+```
+
+This same logic applies to any application in our [supported collectors
+list](/collectors/COLLECTORS.md#service-and-application-collectors) that can run on Windows.
+
+## What's next?
+
+If you haven't yet seen the [supported collectors list](/collectors/COLLECTORS.md) give it a once-over for any
+additional applications you may want to monitor using Netdata's native collectors, or the [generic Prometheus
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus).
+
+Collecting all the available metrics on your nodes, and across your entire infrastructure, is just one piece of the
+puzzle. Next, learn more about Netdata's famous real-time visualizations by [seeing an overview of your
+infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcollect%2Fapplication-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/collect/container-metrics.md b/docs/collect/container-metrics.md
new file mode 100644
index 0000000..b5bb9da
--- /dev/null
+++ b/docs/collect/container-metrics.md
@@ -0,0 +1,99 @@
+<!--
+title: "Collect container metrics with Netdata"
+sidebar_label: "Container metrics"
+description: "Use Netdata to collect per-second utilization and application-level metrics from Linux/Docker containers and Kubernetes clusters."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/container-metrics.md
+-->
+
+# Collect container metrics with Netdata
+
+Thanks to close integration with Linux cgroups and the virtual files it maintains under `/sys/fs/cgroup`, Netdata can
+monitor the health, status, and resource utilization of many different types of Linux containers.
+
+Netdata uses [cgroups.plugin](/collectors/cgroups.plugin/README.md) to poll `/sys/fs/cgroup` and convert the raw data
+into human-readable metrics and meaningful visualizations. Through cgroups, Netdata is compatible with **all Linux
+containers**, such as Docker, LXC, LXD, Libvirt, systemd-nspawn, and more. Read more about [Docker-specific
+monitoring](#collect-docker-metrics) below.
+
+Netdata also has robust **Kubernetes monitoring** support thanks to a
+[Helmchart](/packaging/installer/methods/kubernetes.md) to automate deployment, collectors for k8s agent services, and
+robust [service discovery](https://github.com/netdata/agent-service-discovery/#service-discovery) to monitor the
+services running inside of pods in your k8s cluster. Read more about [Kubernetes
+monitoring](#collect-kubernetes-metrics) below.
+
+A handful of additional collectors gather metrics from container-related services, such as
+[dockerd](/collectors/python.d.plugin/dockerd/README.md) or [Docker
+Engine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/docker_engine/). You can find all
+container collectors in our supported collectors list under the
+[containers/VMs](/collectors/COLLECTORS.md#containers-and-vms) and
+[Kubernetes](/collectors/COLLECTORS.md#containers-and-vms) headings.
+
+## Collect Docker metrics
+
+Netdata has robust Docker monitoring thanks to the aforementioned
+[cgroups.plugin](/collectors/cgroups.plugin/README.md). By polling cgroups every second, Netdata can produce meaningful
+visualizations about the CPU, memory, disk, and network utilization of all running containers on the host system with
+zero configuration.
+
+Netdata also collects metrics from applications running inside of Docker containers. For example, if you create a MySQL
+database container using `docker run --name some-mysql -e MYSQL_ROOT_PASSWORD=my-secret-pw -d mysql:tag`, it exposes
+metrics on port 3306. You can configure the [MySQL
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql) to look at `127.0.0.0:3306` for
+MySQL metrics:
+
+```yml
+jobs:
+ - name: local
+ dsn: root:my-secret-pw@tcp(127.0.0.1:3306)/
+```
+
+Netdata then collects metrics from the container itself, but also dozens [MySQL-specific
+metrics](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql#charts) as well.
+
+### Collect metrics from applications running in Docker containers
+
+You could use this technique to monitor an entire infrastructure of Docker containers. The same [enable and
+configure](/docs/collect/enable-configure.md) procedures apply whether an application runs on the host system or inside
+a container. You may need to configure the target endpoint if it's not the application's default.
+
+Netdata can even [run in a Docker container](/packaging/docker/README.md) itself, and then collect metrics about the
+host system, its own container with cgroups, and any applications you want to monitor.
+
+See our [application metrics doc](/docs/collect/application-metrics.md) for details about Netdata's application metrics
+collection capabilities.
+
+## Collect Kubernetes metrics
+
+We already have a few complementary tools and collectors for monitoring the many layers of a Kubernetes cluster,
+_entirely for free_. These methods work together to help you troubleshoot performance or availability issues across
+your k8s infrastructure.
+
+- A [Helm chart](https://github.com/netdata/helmchart), which bootstraps a Netdata Agent pod on every node in your
+ cluster, plus an additional parent pod for storing metrics and managing alarm notifications.
+- A [service discovery plugin](https://github.com/netdata/agent-service-discovery), which discovers and creates
+ configuration files for [compatible
+ applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints
+ covered by our [generic Prometheus
+ collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus). With these
+ configuration files, Netdata collects metrics from any compatible applications as they run _inside_ of a pod.
+ Service discovery happens without manual intervention as pods are created, destroyed, or moved between nodes.
+- A [Kubelet collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet), which runs
+ on each node in a k8s cluster to monitor the number of pods/containers, the volume of operations on each container,
+ and more.
+- A [kube-proxy collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy), which
+ also runs on each node and monitors latency and the volume of HTTP requests to the proxy.
+- A [cgroups collector](/collectors/cgroups.plugin/README.md), which collects CPU, memory, and bandwidth metrics for
+ each container running on your k8s cluster.
+
+For a holistic view of Netdata's Kubernetes monitoring capabilities, see our guide: [_Monitor a Kubernetes (k8s) cluster
+with Netdata_](https://learn.netdata.cloud/guides/monitor/kubernetes-k8s-netdata).
+
+## What's next?
+
+Netdata is capable of collecting metrics from hundreds of applications, such as web servers, databases, messaging
+brokers, and more. See more in the [application metrics doc](/docs/collect/application-metrics.md).
+
+If you already have all the information you need about collecting metrics, move into Netdata's meaningful visualizations
+with [seeing an overview of your infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcollect%2Fcontainer-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/collect/enable-configure.md b/docs/collect/enable-configure.md
new file mode 100644
index 0000000..33d7a7b
--- /dev/null
+++ b/docs/collect/enable-configure.md
@@ -0,0 +1,66 @@
+<!--
+title: "Enable or configure a collector"
+description: "Every collector is highly configurable, allowing them to collect metrics from any node and any infrastructure."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/enable-configure.md
+-->
+
+# Enable or configure a collector
+
+When Netdata starts up, each collector searches for exposed metrics on the default endpoint established by that service
+or application's standard installation procedure. For example, the [Nginx
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) searches at
+`http://127.0.0.1/stub_status` for exposed metrics in the correct format. If an Nginx web server is running and exposes
+metrics on that endpoint, the collector begins gathering them.
+
+However, not every node or infrastructure uses standard ports, paths, files, or naming conventions. You may need to
+enable or configure a collector to gather all available metrics from your systems, containers, or applications.
+
+## Enable a collector or its orchestrator
+
+You can enable/disable collectors individually, or enable/disable entire orchestrators, using their configuration files.
+For example, you can change the behavior of the Go orchestrator, or any of its collectors, by editing `go.d.conf`.
+
+Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open
+the orchestrator primary configuration file:
+
+```bash
+cd /etc/netdata
+sudo ./edit-config go.d.conf
+```
+
+Within this file, you can either disable the orchestrator entirely (`enabled: yes`), or find a specific collector and
+enable/disable it with `yes` and `no` settings. Uncomment any line you change to ensure the Netdata daemon reads it on
+start.
+
+After you make your changes, restart the Agent with `service netdata restart`.
+
+## Configure a collector
+
+First, [find the collector](/collectors/COLLECTORS.md) you want to edit and open its documentation. Some software has
+collectors written in multiple languages. In these cases, you should always pick the collector written in Go.
+
+Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open a
+collector's configuration file. For example, edit the Nginx collector with the following:
+
+```bash
+./edit-config go.d/nginx.conf
+```
+
+Each configuration file describes every available option and offers examples to help you tweak Netdata's settings
+according to your needs. In addition, every collector's documentation shows the exact command you need to run to
+configure that collector. Uncomment any line you change to ensure the collector's orchestrator or the Netdata daemon
+read it on start.
+
+After you make your changes, restart the Agent with `service netdata restart`.
+
+## What's next?
+
+Read high-level overviews on how Netdata collects [system metrics](/docs/collect/system-metrics.md), [container
+metrics](/docs/collect/container-metrics.md), and [application metrics](/docs/collect/application-metrics.md).
+
+If you're already collecting all metrics from your systems, containers, and applications, it's time to move into
+Netdata's visualization features. [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md)
+using Netdata Cloud, or learn how to [interact with dashboards and
+charts](/docs/visualize/interact-dashboards-charts.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcollect%2Fenable-configure&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/collect/how-collectors-work.md b/docs/collect/how-collectors-work.md
new file mode 100644
index 0000000..5ae444a
--- /dev/null
+++ b/docs/collect/how-collectors-work.md
@@ -0,0 +1,80 @@
+<!--
+title: "How Netdata's metrics collectors work"
+description: "When Netdata starts, and with zero configuration, it auto-detects thousands of data sources and immediately collects per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/how-collectors-work.md
+-->
+
+# How Netdata's metrics collectors work
+
+When Netdata starts, and with zero configuration, it auto-detects thousands of data sources and immediately collects
+per-second metrics.
+
+Netdata can immediately collect metrics from these endpoints thanks to 300+ **collectors**, which all come pre-installed
+when you [install the Netdata Agent](/docs/get/README.md#install-the-netdata-agent).
+
+Every collector has two primary jobs:
+
+- Look for exposed metrics at a pre- or user-defined endpoint.
+- Gather exposed metrics and use additional logic to build meaningful, interactive visualizations.
+
+If the collector finds compatible metrics exposed on the configured endpoint, it begins a per-second collection job. The
+Netdata Agent gathers these metrics, sends them to the [database engine for
+storage](/docs/store/change-metrics-storage.md), and immediately [visualizes them
+meaningfully](/docs/visualize/interact-dashboards-charts.md) on dashboards.
+
+Each collector comes with a pre-defined configuration that matches the default setup for that application. This endpoint
+can be a URL and port, a socket, a file, a web page, and more.
+
+For example, the [Nginx collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) searches
+at `http://127.0.0.1/stub_status`, which is the default endpoint for exposing Nginx metrics. The [web log collector for
+Nginx or Apache](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) searches at
+`/var/log/nginx/access.log` and `/var/log/apache2/access.log`, respectively, both of which are standard locations for
+access log files on Linux systems.
+
+The endpoint is user-configurable, as are many other specifics of what a given collector does.
+
+## What can Netdata collect?
+
+To quickly find your answer, see our [list of supported collectors](/collectors/COLLECTORS.md).
+
+Generally, Netdata's collectors can be grouped into three types:
+
+- [Systems](/docs/collect/system-metrics.md): Monitor CPU, memory, disk, networking, systemd, eBPF, and much more.
+ Every metric exposed by `/proc`, `/sys`, and other Linux kernel sources.
+- [Containers](/docs/collect/container-metrics.md): Gather metrics from container agents, like `dockerd` or `kubectl`,
+ along with the resource usage of containers and the applications they run.
+- [Applications](/docs/collect/application-metrics.md): Collect per-second metrics from web servers, databases, logs,
+ message brokers, APM tools, email servers, and much more.
+
+## Collector architecture and terminology
+
+**Collector** is a catch-all term for any Netdata process that gathers metrics from an endpoint.
+
+While we use _collector_ most often in documentation, release notes, and educational content, you may encounter other
+terms related to collecting metrics.
+
+- **Modules** are a type of collector.
+- **Orchestrators** are external plugins that run and manage one or more modules. They run as independent processes.
+ The Go orchestrator is in active development.
+ - [go.d.plugin](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/): An orchestrator for data
+ collection modules written in `go`.
+ - [python.d.plugin](/collectors/python.d.plugin/README.md): An orchestrator for data collection modules written in
+ `python` v2/v3.
+ - [charts.d.plugin](/collectors/charts.d.plugin/README.md): An orchestrator for data collection modules written in
+ `bash` v4+.
+ - [node.d.plugin](/collectors/node.d.plugin/README.md): An orchestrator for data collection modules written in
+ `node.js`.
+- **External plugins** gather metrics from external processes, such as a webserver or database, and run as independent
+ processes that communicate with the Netdata daemon via pipes.
+- **Internal plugins** gather metrics from `/proc`, `/sys`, and other Linux kernel sources. They are written in `C`,
+ and run as threads within the Netdata daemon.
+
+## What's next?
+
+[Enable or configure a collector](/docs/collect/enable-configure.md) if the default settings are not compatible with
+your infrastructure.
+
+See our [collectors reference](/collectors/REFERENCE.md) for detailed information on Netdata's collector architecture,
+troubleshooting a collector, developing a custom collector, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcollect%2Fhow-collectors-work&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/collect/system-metrics.md b/docs/collect/system-metrics.md
new file mode 100644
index 0000000..72aa571
--- /dev/null
+++ b/docs/collect/system-metrics.md
@@ -0,0 +1,65 @@
+<!--
+title: "Collect system metrics with Netdata"
+sidebar_label: "System metrics"
+description: "Netdata collects thousands of metrics from physical and virtual systems, IoT/edge devices, and containers with zero configuration."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/system-metrics.md
+-->
+
+# Collect system metrics with Netdata
+
+Netdata collects thousands of metrics directly from the operating systems of physical and virtual systems, IoT/edge
+devices, and [containers](/docs/collect/container-metrics.md) with zero configuration.
+
+To gather system metrics, Netdata uses roughly a dozen plugins, each of which has one or more collectors for very
+specific metrics exposed by the host. The system metrics Netdata users interact with most for health monitoring and
+performance troubleshooting are collected and visualized by `proc.plugin`, `cgroups.plugin`, and `ebpf.plugin`.
+
+[**proc.plugin**](/collectors/proc.plugin/README.md) gathers metrics from the `/proc` and `/sys` folders in Linux
+systems, along with a few other endpoints, and is responsible for the bulk of the system metrics collected and
+visualized by Netdata. It collects CPU, memory, disks, load, networking, mount points, and more with zero configuration.
+It even allows Netdata to monitor its own resource utilization!
+
+[**cgroups.plugin**](/collectors/cgroups.plugin/README.md) collects rich metrics about containers and virtual machines
+using the virtual files under `/sys/fs/cgroup`. By reading cgroups, Netdata can instantly collect resource utilization
+metrics for systemd services, all containers (Docker, LXC, LXD, Libvirt, systemd-nspawn), and more. Learn more in the
+[collecting container metrics](/docs/collect/container-metrics.md) doc.
+
+[**ebpf.plugin**](/collectors/ebpf.plugin/README.md): Netdata's extended Berkeley Packet Filter (eBPF) collector
+monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management. You can use our
+eBPF collector to analyze how and when a process accesses files, when it makes system calls, whether it leaks memory or
+creating zombie processes, and more.
+
+While the above plugins and associated collectors are the most important for system metrics, there are many others. You
+can find all system collectors in our [supported collectors list](/collectors/COLLECTORS.md#system-metrics).
+
+## Collect Windows system metrics
+
+Netdata is also capable of monitoring Windows systems. The [WMI
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi) integrates with
+[windows_exporter](https://github.com/prometheus-community/windows_exporter), a small Go-based binary that you can run
+on Windows systems. The WMI collector then gathers metrics from an endpoint created by windows_exporter.
+
+First, [download windows_exporter](https://github.com/prometheus-community/windows_exporter#installation) and run it
+with the following collectors enabled, changing `0.14.0` to the version you downloaded.
+
+```powershell
+windows_exporter-0.14.0-amd64.exe --collectors.enabled="cpu,memory,net,logical_disk,os,system,logon"
+```
+
+Next, [configure the WMI
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi#configuration) to point to the URL
+and port of your exposed endpoint. Restart Netdata with `service netdata restart` and you'll start seeing Windows system
+metrics, such as CPU utilization, memory, bandwidth per NIC, number of processes, and much more.
+
+For information about collecting metrics from applications _running on Windows systems_, see the [application metrics
+doc](/docs/collect/application-metrics.md#collect-metrics-from-applications-running-on-windows).
+
+## What's next?
+
+Because there's some overlap between system metrics and [container metrics](/docs/collect/container-metrics.md), you
+should investigate Netdata's container compatibility if you use them heavily in your infrastructure.
+
+If you don't use containers, skip ahead to collecting [application metrics](/docs/collect/application-metrics.md) with
+Netdata.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcollect%2Fsystem-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/common-changes.md b/docs/configure/common-changes.md
new file mode 100644
index 0000000..6749384
--- /dev/null
+++ b/docs/configure/common-changes.md
@@ -0,0 +1,214 @@
+<!--
+title: "Common configuration changes"
+description: "See the most popular configuration changes to make to the Netdata Agent, including longer metrics retention, reduce sampling, and more."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/configure/common-changes.md
+-->
+
+# Common configuration changes
+
+The Netdata Agent requires no configuration upon installation to collect thousands of per-second metrics from most
+systems, containers, and applications, but there are hundreds of settings to tweak if you want to exercise more control
+over your monitoring platform.
+
+This document assumes familiarity with using [`edit-config`](/docs/configure/nodes.md) from the Netdata config
+directory.
+
+## Change dashboards and visualizations
+
+The Netdata Agent's [local dashboard](/web/gui/README.md), accessible at `http://NODE:19999` is highly configurable. If
+you use Netdata Cloud for [infrastructure monitoring](/docs/quickstart/infrastructure.md), you will see many of these
+changes reflected in those visualizations due to the way Netdata Cloud proxies metric data and metadata to your browser.
+
+### Increase the long-term metrics retention period
+
+Increase the values for the `page cache size` and `dbengine multihost disk space` settings in the [`[global]`
+section](/daemon/config/README.md#global-section-options) of `netdata.conf`.
+
+```conf
+[global]
+ page cache size = 128 # 128 MiB of memory for metrics storage
+ dbengine multihost disk space = 4096 # 4GiB of disk space for metrics storage
+```
+
+Read our doc on [increasing long-term metrics storage](/docs/store/change-metrics-storage.md) for details, including a
+[calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+to help you determine the exact settings for your desired retention period.
+
+### Reduce the data collection frequency
+
+Change `update every` in the [`[global]` section](/daemon/config/README.md#global-section-options) of `netdata.conf` so
+that it is greater than `1`. An `update every` of `5` means the Netdata Agent enforces a _minimum_ collection frequency
+of 5 seconds.
+
+```conf
+[global]
+ update every = 5
+```
+
+Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
+`python.d.conf`, `node.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
+every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
+or configure a collector](/docs/collect/enable-configure.md) doc for details.
+
+### Disable a collector or plugin
+
+Turn off entire plugins in the [`[plugins]` section](/daemon/config/README.md#plugins-section-options) of
+`netdata.conf`.
+
+To disable specific collectors, open `go.d.conf`, `python.d.conf`, `node.d.conf`, or `charts.d.conf` and find the line
+for that specific module. Uncomment the line and change its value to `no`.
+
+## Modify alarms and notifications
+
+Netdata's health monitoring watchdog uses hundreds of preconfigured health entities, with intelligent thresholds, to
+generate warning and critical alarms for most production systems and their applications without configuration. However,
+each alarm and notification method is completely customizable.
+
+### Add a new alarm
+
+To create a new alarm configuration file, initiate an empty file, with a filename that ends in `.conf`, in the
+`health.d/` directory. The Netdata Agent loads any valid alarm configuration file ending in `.conf` in that directory.
+Next, edit the new file with `edit-config`. For example, with a file called `example-alarm.conf`.
+
+```bash
+sudo touch health.d/example-alarm.conf
+sudo ./edit-config health.d/example-alarm.conf
+```
+
+Or, append your new alarm to an existing file by editing a relevant existing file in the `health.d/` directory.
+
+Read more about [configuring alarms](/docs/monitor/configure-alarms.md) to get started, and see the [health monitoring
+reference](/health/REFERENCE.md) for a full listing of options available in health entities.
+
+### Configure a specific alarm
+
+Tweak existing alarms by editing files in the `health.d/` directory. For example, edit `health.d/cpu.conf` to change how
+the Agent responds to anomalies related to CPU utilization.
+
+To see which configuration file you need to edit to configure a specific alarm, [view your active
+alarms](/docs/monitor/view-active-alarms.md) in Netdata Cloud or the local Agent dashboard and look for the **source**
+line. For example, it might read `source 4@/usr/lib/netdata/conf.d/health.d/cpu.conf`.
+
+Because the source path contains `health.d/cpu.conf`, run `sudo edit-config health.d/cpu.conf` to configure that alarm.
+
+### Disable a specific alarm
+
+Open the configuration file for that alarm and set the `to` line to `silent`.
+
+```conf
+template: disk_fill_rate
+ on: disk.space
+ lookup: max -1s at -30m unaligned of avail
+ calc: ($this - $avail) / (30 * 60)
+ every: 15s
+ to: silent
+```
+
+### Turn of all alarms and notifications
+
+Set `enabled` to `no` in the [`[health]` section](/daemon/config/README.md#health-section-options) section of
+`netdata.conf`.
+
+### Enable alarm notifications
+
+Open `health_alarm_notify.conf` for editing. First, read the [enabling
+notifications](/docs/monitor/enable-notifications.md#netdata-agent) doc for an example of the process using Slack, then
+click on the link to your preferred notification method to find documentation for that specific endpoint.
+
+## Improve node security
+
+While the Netdata Agent is both [open and secure by design](https://www.netdata.cloud/blog/netdata-agent-dashboard/), we
+recommend every user take some action to administer and secure their nodes.
+
+Learn more about a few of the following changes in the [node security doc](/docs/configure/secure-nodes.md).
+
+### Disable the local Agent dashboard (`http://NODE:19999`)
+
+If you use Netdata Cloud to visualize metrics, stream metrics to a parent node, or otherwise don't need the local Agent
+dashboard, disabling it reduces the Agent's resource utilization and improves security.
+
+Change the `mode` setting to `none` in the [`[web]` section](/web/server/README.md#configuration) of `netdata.conf`.
+
+```conf
+[web]
+ mode = none
+```
+
+### Use access lists to restrict access to specific assets
+
+Allow access from only specific IP addresses, ranges of IP addresses, or hostnames using [access
+lists](/web/server/README.md#access-lists) and [simple patterns](/libnetdata/simple_pattern/README.md).
+
+See a quickstart to access lists in the [node security
+doc](/docs/configure/secure-nodes.md#restrict-access-to-the-local-dashboard).
+
+### Stop sending anonymous statistics to Google Analytics
+
+Create a file called `.opt-out-from-anonymous-statistics` inside of your Netdata config directory to immediately stop
+the statistics script.
+
+```bash
+sudo touch .opt-out-from-anonymous-statistics
+```
+
+Learn more about [why we collect anonymous statistics](/docs/anonymous-statistics.md).
+
+### Change the IP address/port Netdata listens to
+
+Change the `default port` setting in the `[web]` section to a port other than `19999`.
+
+```conf
+[web]
+ default port = 39999
+```
+
+Use the `bind to` setting to the ports other assets, such as the [running `netdata.conf`
+configuration](/docs/configure/nodes.md#see-an-agents-running-configuration), API, or streaming requests listen to.
+
+## Reduce resource usage
+
+Read our [performance optimization guide](/docs/guides/configure/performance.md) for a long list of specific changes
+that can reduce the Netdata Agent's CPU/memory footprint and IO requirements.
+
+## Organize nodes with host labels
+
+Beginning with v1.20, Netdata accepts user-defined **host labels**. These labels are sent during streaming, exporting,
+and as metadata to Netdata Cloud, and help you organize the metrics coming from complex infrastructure. Host labels are
+defined in the section `[host labels]`.
+
+For a quick introduction, read the [host label guide](/docs/guides/using-host-labels.md).
+
+The following restrictions apply to host label names:
+
+- Names cannot start with `_`, but it can be present in other parts of the name.
+- Names only accept alphabet letters, numbers, dots, and dashes.
+
+The policy for values is more flexible, but you can not use exclamation marks (`!`), whitespaces (` `), single quotes
+(`'`), double quotes (`"`), or asterisks (`*`), because they are used to compare label values in health alarms and
+templates.
+
+## What's next?
+
+If you haven't already, learn how to [secure your nodes](/docs/configure/secure-nodes.md).
+
+As mentioned at the top, there are plenty of other
+
+You can also take what you've learned about node configuration to tweak the Agent's behavior or enable new features:
+
+- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
+- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
+- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+ infrastructure.
+- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+
+### Related reference documentation
+
+- [Netdata Agent · Daemon](/health/README.md)
+- [Netdata Agent · Daemon configuration](/daemon/config/README.md)
+- [Netdata Agent · Web server](/web/server/README.md)
+- [Netdata Agent · Local Agent dashboard](/web/gui/README.md)
+- [Netdata Agent · Health monitoring](/health/REFERENCE.md)
+- [Netdata Agent · Notifications](/health/notifications/README.md)
+- [Netdata Agent · Simple patterns](/libnetdata/simple_pattern/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fcommon-changes&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/nodes.md b/docs/configure/nodes.md
new file mode 100644
index 0000000..d0a6fd7
--- /dev/null
+++ b/docs/configure/nodes.md
@@ -0,0 +1,165 @@
+<!--
+title: "Configure the Netdata Agent"
+description: "Netdata is zero-configuration for most users, but complex infrastructures may require you to tweak some of the Agent's granular settings."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/configure/nodes.md
+-->
+
+# Configure the Netdata Agent
+
+Netdata's zero-configuration collection, storage, and visualization features work for many users, infrastructures, and
+use cases, but there are some situations where you might want to configure the Netdata Agent running on your node(s),
+which can be a physical or virtual machine (VM), container, cloud deployment, or edge/IoT device.
+
+For example, you might want to increase metrics retention, configure a collector based on your infrastructure's unique
+setup, or secure the local dashboard by restricting it to only connections from `localhost`.
+
+Whatever the reason, Netdata users should know how to configure individual nodes to act decisively if an incident,
+anomaly, or change in infrastructure affects how their Agents should perform.
+
+## The Netdata config directory
+
+On most Linux systems, using our [recommended one-line installation](/docs/get/README.md#install-the-netdata-agent), the
+**Netdata config directory** is `/etc/netdata/`. The config directory contains several configuration files with the
+`.conf` extension, a few directories, and a shell script named `edit-config`.
+
+> Some operating systems will use `/opt/netdata/etc/netdata/` as the config directory. If you're not sure where yours
+> is, navigate to `http://NODE:19999/netdata.conf` in your browser, replacing `NODE` with the IP address or hostname of
+> your node, and find the `# config directory = ` setting. The value listed is the config directory for your system.
+
+All of Netdata's documentation assumes that your config directory is at `/etc/netdata`, and that you're running any
+scripts from inside that directory.
+
+## Netdata's configuration files
+
+Upon installation, the Netdata config directory contains a few files and directories. It's okay if you don't see all
+these files in your own Netdata config directory, as the next section describes how to edit any that might not already
+exist.
+
+- `netdata.conf` is the main configuration file. This is where you'll find most configuration options. Read descriptions
+ for each in the [daemon config](/daemon/config/README.md) doc.
+- `edit-config` is a shell script used for [editing configuration files](#use-edit-config-to-edit-configuration-files).
+- Various configuration files ending in `.conf` for [configuring plugins or
+ collectors](/docs/collect/enable-configure.md#enable-a-collector-or-its-orchestrator) behave. Examples: `go.d.conf`,
+ `python.d.conf`, and `ebpf.conf`.
+- Various directories ending in `.d`, which contain other configuration files, each ending in `.conf`, for [configuring
+ specific collectors](/docs/collect/enable-configure.md#configure-a-collector).
+- `apps_groups.conf` is a configuration file for changing how applications/processes are grouped when viewing the
+ **Application** charts from [`apps.plugin`](/collectors/apps.plugin/README.md) or
+ [`ebpf.plugin`](/collectors/ebpf.plugin/README.md).
+- `health.d/` is a directory that contains [health configuration files](/docs/monitor/configure-alarms.md).
+- `health_alarm_notify.conf` enables and configures [alarm notifications](/docs/monitor/enable-notifications.md).
+- `statsd.d/` is a directory for configuring Netdata's [statsd collector](/collectors/statsd.plugin/README.md).
+- `stream.conf` configures [parent-child streaming](/streaming/README.md) between separate nodes running the Agent.
+- `.environment` is a hidden file that describes the environment in which the Netdata Agent is installed, including the
+ `PATH` and any installation options. Useful for [reinstalling](/packaging/installer/REINSTALL.md) or
+ [uninstalling](/packaging/installer/UNINSTALL.md) the Agent.
+
+The Netdata config directory also contains one symlink:
+
+- `orig` is a symbolic link to the directory `/usr/lib/netdata/conf.d`, which contains stock configuration files. Stock
+ versions are copied into the config directory when opened with `edit-config`. _Do not edit the files in
+ `/usr/lib/netdata/conf.d`, as they are overwritten by updates to the Netdata Agent._
+
+## Use `edit-config` to edit configuration files
+
+The **recommended way to easily and safely edit Netdata's configuration** is with the `edit-config` script. This script
+opens existing Netdata configuration files using your system's `$EDITOR`. If the file doesn't yet exist in your config
+directory, the script copies the stock version from `/usr/lib/netdata/conf.d` and opens it for editing.
+
+Run `edit-config` without any options to see details on its usage and a list of all the configuration files you can
+edit.
+
+```bash
+./edit-config
+USAGE:
+ ./edit-config FILENAME
+
+ Copy and edit the stock config file named: FILENAME
+ if FILENAME is already copied, it will be edited as-is.
+
+ The EDITOR shell variable is used to define the editor to be used.
+
+ Stock config files at: '/usr/lib/netdata/conf.d'
+ User config files at: '/etc/netdata'
+
+ Available files in '/usr/lib/netdata/conf.d' to copy and edit:
+
+./apps_groups.conf ./health.d/phpfpm.conf
+./aws_kinesis.conf ./health.d/pihole.conf
+./charts.d/ap.conf ./health.d/portcheck.conf
+./charts.d/apcupsd.conf ./health.d/postgres.conf
+...
+```
+
+To edit `netdata.conf`, run `./edit-config netdata.conf`. You may need to elevate your privileges with `sudo` or another
+method for `edit-config` to write into the config directory. Use your `$EDITOR`, make your changes, and save the file.
+
+> `edit-config` uses the `EDITOR` environment variable on your system to edit the file. On many systems, that is
+> defaulted to `vim` or `nano`. Use `export EDITOR=` to change this temporarily, or edit your shell configuration file
+> to change to permanently.
+
+After you make your changes, you need to [restart the Agent](/docs/configure/start-stop-restart.md) with `sudo systemctl
+restart netdata` or the appropriate method for your system.
+
+Here's an example of editing the node's hostname, which appears in both the local dashboard and in Netdata Cloud.
+
+![Animated GIF of editing the hostname option in
+netdata.conf](https://user-images.githubusercontent.com/1153921/80994808-1c065300-8df2-11ea-81af-d28dc3ba27c8.gif)
+
+### Other configuration files
+
+You can edit any Netdata configuration file using `edit-config`. A few examples:
+
+```bash
+./edit-config apps_groups.conf
+./edit-config ebpf.conf
+./edit-config health.d/load.conf
+./edit-config go.d/prometheus.conf
+```
+
+The documentation for each of Netdata's components explains which file(s) to edit to achieve the desired behavior.
+
+## See an Agent's running configuration
+
+On start, the Netdata Agent daemon attempts to load `netdata.conf`. If that file is missing, incomplete, or contains
+invalid settings, the daemon attempts to run sane defaults instead. In other words, the state of `netdata.conf` on your
+filesystem may be different from the state of the Netdata Agent itself.
+
+To see the _running configuration_, navigate to `http://NODE:19999/netdata.conf` in your browser, replacing `NODE` with
+the IP address or hostname of your node. The file displayed here is exactly the settings running live in the Netdata
+Agent.
+
+If you're having issues with configuring the Agent, apply the running configuration to `netdata.conf` by downloading the
+file to the Netdata config directory. Use `sudo` to elevate privileges.
+
+```bash
+wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+curl -o /etc/netdata/netdata.conf http://NODE:19999/netdata.conf
+```
+
+## What's next?
+
+Learn more about [starting, stopping, or restarting](/docs/configure/start-stop-restart.md) the Netdata daemon to apply
+configuration changes.
+
+Apply some [common configuration changes](/docs/configure/common-changes.md) to quickly tweak the Agent's behavior.
+
+[Add security to your node](/docs/configure/secure-nodes.md) with what you've learned about the Netdata config directory
+and `edit-config`. We put together a few security best practices based on how you use the Netdata.
+
+You can also take what you've learned about node configuration to enable or enhance features:
+
+- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
+- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
+- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+ infrastructure.
+- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+
+### Related reference documentation
+
+- [Netdata Agent · Daemon](docs/agent/daemon)
+- [Netdata Agent · Health monitoring](/health/README.md)
+- [Netdata Agent · Notifications](/health/notifications/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fnodes&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/secure-nodes.md b/docs/configure/secure-nodes.md
new file mode 100644
index 0000000..704db35
--- /dev/null
+++ b/docs/configure/secure-nodes.md
@@ -0,0 +1,123 @@
+<!--
+title: "Secure your nodes"
+description: "Your data and systems are safe with Netdata, but we recommend a few easy ways to improve the security of your infrastructure."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/configure/secure-nodes.md
+-->
+
+# Secure your nodes
+
+Upon installation, the Netdata Agent serves the **local dashboard** at port `19999`. If the node is accessible to the
+internet at large, anyone can access the dashboard and your node's metrics at `http://NODE:19999`. We made this decision
+so that the local dashboard was immediately accessible to users, and so that we don't dictate how professionals set up
+and secure their infrastructures.
+
+Despite this design decision, your [data](/docs/netdata-security.md#your-data-are-safe-with-netdata) and your
+[systems](/docs/netdata-security.md#your-systems-are-safe-with-netdata) are safe with Netdata. Netdata is read-only,
+cannot do anything other than present metrics, and runs without special/`sudo` privileges. Also, the local dashboard
+only exposes chart metadata and metric values, not raw data.
+
+While Netdata is secure by design, we believe you should [protect your
+nodes](/docs/netdata-security.md#why-netdata-should-be-protected). If left accessible to the internet at large, the
+local dashboard could reveal sensitive information about your infrastructure. For example, an attacker can view which
+applications you run (databases, webservers, and so on), or see every user account on a node.
+
+Instead of dictating how to secure your infrastructure, we give you many options to establish security best practices
+that align with your goals and your organization's standards.
+
+- [Disable the local dashboard](#disable-the-local-dashboard): **Simplest and recommended method** for those who have
+ added nodes to Netdata Cloud and view dashboards and metrics there.
+- [Restrict access to the local dashboard](#restrict-access-to-the-local-dashboard): Allow local dashboard access from
+ only certain IP addresses, such as a trusted static IP or connections from behind a management LAN. Full support for
+ Netdata Cloud.
+- [Use a reverse proxy](#use-a-reverse-proxy): Password-protect a local dashboard and enable TLS to secure it. Full
+ support for Netdata Cloud.
+
+## Disable the local dashboard
+
+This is the _recommended method for those who have claimed their nodes to Netdata Cloud_ and prefer viewing real-time
+metrics using the War Room Overview, Nodes view, and Cloud dashboards.
+
+You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link ([ACLK](/aclk/README.md)) that
+allows you to stream metrics on demand from your nodes via the Netdata Cloud interface. This change mitigates all
+concerns about revealing metrics and system design to the internet at large, while keeping all the functionality you
+need to view metrics and troubleshoot issues with Netdata Cloud.
+
+Open `netdata.conf` with `./edit-config netdata.conf`. Scroll down to the `[web]` section, and find the `mode =
+static-threaded` setting, and change it to `none`.
+
+```conf
+[web]
+ mode = none
+```
+
+Save and close the editor, then [restart your Agent](/docs/configure/start-stop-restart.md) using `sudo systemctl
+restart netdata`. If you try to visit the local dashboard to `http://NODE:19999` again, the connection will fail because
+that node no longer serves its local dashboard.
+
+> See the [configuration basics doc](/docs/configure/nodes.md) for details on how to find `netdata.conf` and use
+> `edit-config`.
+
+## Restrict access to the local dashboard
+
+If you want to keep using the local dashboard, but don't want it exposed to the internet, you can restrict access with
+[access lists](/web/server/README.md#access-lists). This method also fully retains the ability to stream metrics
+on-demand through Netdata Cloud.
+
+The `allow connections from` setting helps you allow only certain IP addresses or FQDN/hostnames, such as a trusted
+static IP, only `localhost`, or connections from behind a management LAN.
+
+By default, this setting is `localhost *`. This setting allows connections from `localhost` in addition to _all_
+connections, using the `*` wildcard. You can change this setting using Netdata's [simple
+patterns](/libnetdata/simple_pattern/README.md).
+
+```conf
+[web]
+ # Allow only localhost connections
+ allow connections from = localhost
+
+ # Allow only from management LAN running on `10.X.X.X`
+ allow connections from = 10.*
+
+ # Allow connections only from a specific FQDN/hostname
+ allow connections from = example*
+```
+
+The `allow connections from` setting is global and restricts access to the dashboard, badges, streaming, API, and
+`netdata.conf`, but you can also set each of those access lists more granularly if you choose:
+
+```conf
+[web]
+ allow connections from = localhost *
+ allow dashboard from = localhost *
+ allow badges from = *
+ allow streaming from = *
+ allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+ allow management from = localhost
+```
+
+See the [web server](/web/server/README.md#access-lists) docs for additional details about access lists. You can take
+access lists one step further by [enabling SSL](/web/server/README.md#enabling-tls-support) to encrypt data from local
+dashboard in transit. The connection to Netdata Cloud is always secured with TLS.
+
+## Use a reverse proxy
+
+You can also put Netdata behind a reverse proxy for additional security while retaining the functionality of both the
+local dashboard and Netdata Cloud dashboards. You can use a reverse proxy to password-protect the local dashboard and
+enable HTTPS to encrypt metadata and metric values in transit.
+
+We recommend Nginx, as it's what we use for our [demo server](https://london.my-netdata.io/), and we have a guide
+dedicated to [running Netdata behind Nginx](/docs/Running-behind-nginx.md).
+
+We also have guides for [Apache](/docs/Running-behind-apache.md), [Lighttpd](/docs/Running-behind-lighttpd.md),
+[HAProxy](/docs/Running-behind-haproxy.md), and [Caddy](/docs/Running-behind-caddy.md).
+
+## What's next?
+
+Read about [Netdata's security design](/docs/netdata-security.md) and our [blog
+post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) about why the local Agent dashboard is both open and
+secure by design.
+
+Next up, learn about [collectors](/docs/collect/how-collectors-work.md) to ensure you're gathering every essential
+metric about your node, its applications, and your infrastructure at large.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fsecure-nodesa&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/start-stop-restart.md b/docs/configure/start-stop-restart.md
new file mode 100644
index 0000000..4967fff
--- /dev/null
+++ b/docs/configure/start-stop-restart.md
@@ -0,0 +1,98 @@
+<!--
+title: "Start, stop, or restart the Netdata Agent"
+description: "Manage the Netdata Agent daemon, load configuration changes, and troubleshoot stuck processes on systemd and non-systemd nodes."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/configure/start-stop-restart.md
+-->
+
+# Start, stop, or restart the Netdata Agent
+
+When you install the Netdata Agent, the [daemon](/daemon/README.md) is configured to start at boot and stop and
+restart/shutdown.
+
+You will most often need to _restart_ the Agent to load new or editing configuration files. [Health
+configuration](#reload-health-configuration) files are the only exception, as they can be reloaded without restarting
+the entire Agent.
+
+Stopping or restarting the Netdata Agent will cause gaps in stored metrics until the `netdata` process initiates
+collectors and the database engine.
+
+## Using `systemctl`, `service`, or `init.d`
+
+This is the recommended way to start, stop, or restart the Netdata daemon.
+
+- To **start** Netdata, run `sudo systemctl start netdata`.
+- To **stop** Netdata, run `sudo systemctl stop netdata`.
+- To **restart** Netdata, run `sudo systemctl restart netdata`.
+
+If the above commands fail, or you know that you're using a non-systemd system, try using the `service` command:
+
+- **service**: `sudo service netdata start`, `sudo service netdata stop`, `sudo service netdata restart`
+
+## Using `netdata`
+
+Use the `netdata` command, typically located at `/usr/sbin/netdata`, to start the Netdata daemon.
+
+```bash
+sudo netdata
+```
+
+If you start the daemon this way, close it with `sudo killall netdata`.
+
+## Using `netdatacli`
+
+The Netdata Agent also comes with a [CLI tool](/cli/README.md) capable of performing shutdowns. Start the Agent back up
+using your preferred method listed above.
+
+```bash
+sudo netdatacli shutdown-agent
+```
+
+## Reload health configuration
+
+You do not need to restart the Netdata Agent between changes to health configuration files, such as specific health
+entities. Instead, use [`netdatacli`](#using-netdatacli) and the `reload-health` option to prevent gaps in metrics
+collection.
+
+```bash
+sudo netdatacli reload-health
+```
+
+If `netdatacli` doesn't work on your system, send a `SIGUSR2` signal to the daemon, which reloads health configuration
+without restarting the entire process.
+
+```bash
+killall -USR2 netdata
+```
+
+## Force stop stalled or unresponsive `netdata` processes
+
+In rare cases, the Netdata Agent may stall or not properly close sockets, preventing a new process from starting. In
+these cases, try the following three commands:
+
+```bash
+sudo systemctl stop netdata
+sudo killall netdata
+ps aux| grep netdata
+```
+
+The output of `ps aux` should show no `netdata` or associated processes running. You can now start the Netdata Agent
+again with `service netdata start`, or the appropriate method for your system.
+
+## What's next?
+
+Learn more about [securing the Netdata Agent](/docs/configure/secure-nodes.md).
+
+You can also use the restart/reload methods described above to enable new features:
+
+- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
+- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
+- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+ infrastructure.
+- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+
+### Related reference documentation
+
+- [Netdata Agent · Daemon](/daemon/README.md)
+- [Netdata Agent · Netdata CLI](/cli/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fstart-stop-restart&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/contributing/contributing-documentation.md b/docs/contributing/contributing-documentation.md
new file mode 100644
index 0000000..44be922
--- /dev/null
+++ b/docs/contributing/contributing-documentation.md
@@ -0,0 +1,109 @@
+<!--
+title: "Contributing to documentation"
+description: "Want to contribute to Netdata's documentation? This guide will set you up with the tools to help others learn about health and performance monitoring."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/contributing/contributing-documentation.md
+-->
+
+# Contributing to documentation
+
+We welcome contributions to Netdata's already extensive documentation.
+
+We store documentation related to the open-source Netdata Agent inside of the [`netdata/netdata`
+repository](https://github.com/netdata/netdata) on GitHub. Documentation related to Netdata Cloud is stored in a private
+repository and is not currently open to community contributions.
+
+The Netdata team aggregates and publishes all documentation at [learn.netdata.cloud](https://learn.netdata.cloud/) using
+[Docusaurus](https://v2.docusaurus.io/) in a private GitHub repository.
+
+## Before you get started
+
+Anyone interested in contributing to documentation should first read the [Netdata style
+guide](/docs/contributing/style-guide.md) and the [Netdata Community Code of Conduct](/CODE_OF_CONDUCT.md).
+
+Netdata's documentation uses Markdown syntax. If you're not familiar with Markdown, read the [Mastering
+Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub for the basics on creating
+paragraphs, styled text, lists, tables, and more.
+
+### Netdata's documentation structure
+
+Netdata's documentation is separated into four sections.
+
+- **Netdata**: Documents based on the actions users want to take, and solutions to their problems, such both the Netdata
+ Agent and Netdata Cloud.
+ - Stored in various subfolders of the [`/docs` folder](https://github.com/netdata/netdata/tree/master/docs) within the
+ `netdata/netdata` repository: `/docs/collect`, `/docs/configure`, `/docs/export`, `/docs/get`, `/docs/monitor`,
+ `/docs/overview`, `/docs/quickstart`, `/docs/store`, and `/docs/visualize`.
+ - Published at [`https://learn.netdata.cloud/docs`](https://learn.netdata.cloud/docs).
+- **Netdata Agent reference**: Reference documentation for the open-source Netdata Agent.
+ - Stored in various `.md` files within the `netdata/netdata` repository alongside the code responsible for that
+ feature. For example, the database engine's reference documentation is at `/database/engine/README.md`.
+ - Published at [`https://learn.netdata.cloud/docs/agent`](https://learn.netdata.cloud/docs/agent).
+- **Netdata Cloud reference**: Reference documentation for the closed-source Netdata Cloud web application.
+ - Stored in a private GitHub repository and not editable by the community.
+ - Published at [`https://learn.netdata.cloud/docs/cloud`](https://learn.netdata.cloud/docs/cloud).
+- **Guides**: Solutions-based articles for users who want instructions on completing a specific complex task using the
+ Netdata Agent and/or Netdata Cloud.
+ - Stored in the [`/docs/guides` folder](https://github.com/netdata/netdata/tree/master/docs/guides) within the
+ `netdata/netdata` repository. Organized into subfolders that roughly correlate with the core Netdata documentation.
+ - Published at [`https://learn.netdata.cloud/guides`](https://learn.netdata.cloud/guides).
+
+Generally speaking, if you want to contribute to the reference documentation for a specific Netdata Agent feature, find
+the appropriate `.md` file co-located with that feature. If you want to contribute documentation that spans features or
+products, or has no direct correlation with the existing directory structure, place it in the `/docs` folder within
+`netdata/netdata`.
+
+## How to contribute
+
+The easiest way to contribute to Netdata's documentation is to edit a file directly on GitHub. This is perfect for small
+fixes to a single document, such as fixing a typo or clarifying a confusing sentence.
+
+Click on the **Edit this page** button on any published document on [Netdata Learn](https://learn.netdata.cloud). Each
+page has two of these buttons: One beneath the table of contents, and another at the end of the document, which take you
+to GitHub's code editor. Make your suggested changes, keeping [Netdata style guide](/docs/contributing/style-guide.md)
+in mind, and use *Preview changes** button to ensure your Markdown syntax works as expected.
+
+Under the **Commit changes** header, write descriptive title for your requested change. Click the **Commit changes**
+button to initiate your pull request (PR).
+
+Jump down to our instructions on [PRs](#making-a-pull-request) for your next steps.
+
+### Edit locally
+
+Editing documentation locally is the preferred method for complex changes that span multiple documents or change the
+documentation's style or structure.
+
+Create a fork of the Netdata Agent repository by visit the [Netdata repository](https://github.com/netdata/netdata) and
+clicking on the **Fork** button.
+
+![Screenshot of forking the Netdata
+repository](https://user-images.githubusercontent.com/1153921/59873572-25f5a380-9351-11e9-92a4-a681fe4a2ed9.png)
+
+GitHub will ask you where you want to clone the repository. When finished, you end up at the index of your forked
+Netdata Agent repository. Clone your fork to your local machine:
+
+```bash
+git clone https://github.com/YOUR-GITHUB-USERNAME/netdata.git
+```
+
+Create a new branch using `git checkout -b BRANCH-NAME`. Use your favorite text editor to make your changes, keeping the
+[Netdata style guide](/docs/contributing/style-guide.md) in mind. Add, commit, and push changes to your fork. When
+you're finished, visit the [Netdata Agent Pull requests](https://github.com/netdata/netdata/pulls) to create a new pull
+request based on the changes you made in the new branch of your fork.
+
+## Making a pull request
+
+Pull requests (PRs) should be concise and informative. See our [PR guidelines](/CONTRIBUTING.md#pr-guidelines) for
+specifics.
+
+- The title must follow the [imperative mood](https://en.wikipedia.org/wiki/Imperative_mood) and be no more than ~50
+ characters.
+- The description should explain what was changed and why. Verify that you tested any code or processes that you are
+ trying to change.
+
+The Netdata team will review your PR and assesses it for correctness, conciseness, and overall quality. We may point to
+specific sections and ask for additional information or other fixes.
+
+After merging your PR, the Netdata team rebuilds the [documentation site](https://learn.netdata.cloud) to publish the
+changed documentation.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcontributing%2Fcontributing-documentation&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/contributing/style-guide.md b/docs/contributing/style-guide.md
new file mode 100644
index 0000000..faa6fc6
--- /dev/null
+++ b/docs/contributing/style-guide.md
@@ -0,0 +1,492 @@
+<!--
+title: "Netdata style guide"
+description: "The Netdata style guide establishes editorial guidelines for all of Netdata's writing, including documentation, blog posts, in-product UX copy, and more."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/contributing/style-guide.md
+-->
+
+# Netdata style guide
+
+The _Netdata style guide_ establishes editorial guidelines for any writing produced by the Netdata team or the Netdata
+community, including documentation, articles, in-product UX copy, and more. Both internal Netdata teams and external
+contributors to any of Netdata's open-source projects should reference and adhere to this style guide as much as
+possible.
+
+Netdata's writing should **empower** and **educate**. You want to help people understand Netdata's value, encourage them
+to learn more, and ultimately use Netdata's products to democratize monitoring in their organizations. To achieve these
+goals, your writing should be:
+
+- **Clear**. Use simple words and sentences. Use strong, direct, and active language that encourages readers to action.
+- **Concise**. Provide solutions and answers as quickly as possible. Give users the information they need right now,
+ along with opportunities to learn more.
+- **Universal**. Think of yourself as a guide giving a tour of Netdata's products, features, and capabilities to a
+ diverse group of users. Write to reach the widest possible audience.
+
+You can achieve these goals by reading and adhering to the principles outlined below.
+
+## Voice and tone
+
+One way we write empowering, educational content is by using a consistent voice and an appropriate tone.
+
+_Voice_ is like your personality, which doesn't really change day to day.
+
+_Tone_ is how you express your personality. Your expression changes based on your attitude or mood, or based on who
+you're around. In writing, your reflect tone in your word choice, punctuation, sentence structure, or even the use of
+emoji.
+
+The same idea about voice and tone applies to organizations, too. Our voice shouldn't change much between two pieces of
+content, no matter who wrote each, but the tone might be quite different based on who we think is reading.
+
+For example, a [blog post](https://www.netdata.cloud/blog/) and a [press release](https://www.netdata.cloud/news/)
+should have a similar voice, despite most often being written by different people. However, blog posts are relaxed and
+witty, while press releases are focused and academic. You won't see any emoji in a press release.
+
+### Voice
+
+Netdata's voice is authentic, passionate, playful, and respectful.
+
+- **Authentic** writing is honest and fact-driven. Focus on Netdata's strength while accurately communicating what
+ Netdata can and cannot do, and emphasize technical accuracy over hard sells and marketing jargon.
+- **Passionate** writing is strong and direct. Be a champion for the product or feature you're writing about, and let
+ your unique personality and writing style shine.
+- **Playful** writing is friendly, thoughtful, and engaging. Don't take yourself too seriously, as long as it's not at
+ the expense of Netdata or any of its users.
+- **Respectful** writing treats people the way you want to be treated. Prioritize giving solutions and answers as
+ quickly as possible.
+
+### Tone
+
+Netdata's tone is fun and playful, but clarity and conciseness comes first. We also tend to be informal, and aren't
+afraid of a playful joke or two.
+
+While we have general standards for voice and tone, we do want every individual's unique writing style to reflect in
+published content.
+
+## Universal communication
+
+Netdata is a global company in every sense, with employees, contributors, and users from around the world. We strive to
+communicate in a way that is clear and easily understood by everyone.
+
+Here are some guidelines, pointers, and questions to be aware of as you write to ensure your writing is universal. Some
+of these are expanded into individual sections in the [language, grammar, and
+mechanics](#language-grammar-and-mechanics) section below.
+
+- Would this language make sense to someone who doesn't work here?
+- Could someone quickly scan this document and understand the material?
+- Create an information hierarchy with key information presented first and clearly called out to improve scannability.
+- Avoid directional language like "sidebar on the right of the page" or "header at the top of the page" since
+ presentation elements may adapt for devices.
+- Use descriptive links rather than "click here" or "learn more".
+- Include alt text for images and image links.
+- Ensure any information contained within a graphic element is also available as plain text.
+- Avoid idioms that may not be familiar to the user or that may not make sense when translated.
+- Avoid local, cultural, or historical references that may be unfamiliar to users.
+- Prioritize active, direct language.
+- Avoid referring to someone's age unless it is directly relevant; likewise, avoid referring to people with age-related
+ descriptors like "young" or "elderly."
+- Avoid disability-related idioms like "lame" or "falling on deaf ears." Don't refer to a person's disability unless
+ it’s directly relevant to what you're writing.
+- Don't call groups of people "guys." Don't call women "girls."
+- Avoid gendered terms in favor of neutral alternatives, like "server" instead of "waitress" and "businessperson"
+ instead of "businessman."
+- When writing about a person, use their communicated pronouns. When in doubt, just ask or use their name. It's OK to
+ use "they" as a singular pronoun.
+
+> Some of these guidelines were adapted from MailChimp under the Creative Commons license.
+
+## Language, grammar, and mechanics
+
+To ensure Netdata's writing is clear, concise, and universal, we have established standards for language, grammar, and
+certain writing mechanics. However, if you're writing about Netdata for an external publication, such as a guest blog
+post, follow that publication's style guide or standards, while keeping the [preferred spelling of Netdata
+terms](#netdata-specific-terms) in mind.
+
+### Active voice
+
+Active voice is more concise and easier to understand compared to passive voice. When using active voice, the subject of
+the sentence is action. In passive voice, the subject is acted upon. A famous example of passive voice is the phrase
+"mistakes were made."
+
+| | |
+|-----------------|---------------------------------------------------------------------------------------------|
+| Not recommended | When an alarm is triggered by a metric, a notification is sent by Netdata. |
+| **Recommended** | When a metric triggers an alarm, Netdata sends a notification to your preferred endpoint. |
+
+### Second person
+
+Use the second person ("you") to give instructions or "talk" directly to users.
+
+In these situations, avoid "we," "I," "let's," and "us," particularly in documentation. The "you" pronoun can also be
+implied, depending on your sentence structure.
+
+One valid exception is when a member of the Netdata team or community wants to write about said team or community.
+
+| | |
+|--------------------------------|-------------------------------------------------------------------------------------------|
+| Not recommended | To install Netdata, we should try the one-line installer... |
+| **Recommended** | To install Netdata, you should try the one-line installer... |
+| **Recommended**, implied "you" | To install Netdata, try the one-line installer... |
+
+### "Easy" or "simple"
+
+Using words that imply the complexity of a task or feature goes against our policy of [universal
+communication](#universal-communication). If you claim that a task is easy and the reader struggles to complete it, you
+may inadvertently discourage them.
+
+However, if you give users two options and want to relay that one option is genuinely less complex than another, be
+specific about how and why.
+
+For example, don't write, "Netdata's one-line installer is the easiest way to install Netdata." Instead, you might want
+to say, "Netdata's one-line installer requires fewer steps than manually installing from source."
+
+### Slang, metaphors, and jargon
+
+A particular word, phrase, or metaphor you're familiar with might not translate well to the other cultures featured
+among Netdata's global community. We recommended you avoid slang or colloquialisms in your writing.
+
+In addition, don't use abbreviations that have not yet been defined in the content. See our section on
+[abbreviations](#abbreviations-acronyms-and-initialisms) for additional guidance.
+
+If you must use industry jargon, such as "mean time to resolution," define the term as clearly and concisely as you can.
+
+> Netdata helps you reduce your organization's mean time to resolution (MTTR), which is the average time the responsible
+> team requires to repair a system and resolve an ongoing incident.
+
+### Spelling
+
+While the Netdata team is mostly _not_ American, we still aspire to use American spelling whenever possible, as it is
+the standard for the monitoring industry.
+
+See the [word list](#word-list) for spellings of specific words.
+
+### Capitalization
+
+Follow the general [English standards](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html) for
+capitalization. In summary:
+
+- Capitalize the first word of every new sentence.
+- Don't use uppercase for emphasis. (Netdata is the BEST!)
+- Capitalize the names of brands, software, products, and companies according to their official guidelines. (Netdata,
+ Docker, Apache, NGINX)
+- Avoid camel case (NetData) or all caps (NETDATA).
+
+Whenever you refer to the company Netdata, Inc., or the open-source monitoring agent the company develops, capitalize
+**Netdata**.
+
+However, if you are referring to a process, user, or group on a Linux system, use lowercase and fence the word in an
+inline code block: `` `netdata` ``.
+
+| | |
+|-----------------|------------------------------------------------------------------------------------------------|
+| Not recommended | The netdata agent, which spawns the netdata process, is actively maintained by netdata, inc. |
+| **Recommended** | The Netdata Agent, which spawns the `netdata` process, is actively maintained by Netdata, Inc. |
+
+#### Capitalization of document titles and page headings
+
+Document titles and page headings should use sentence case. That means you should only capitalize the first word.
+
+If you need to use the name of a brand, software, product, and company, capitalize it according to their official
+guidelines.
+
+Also, don't put a period (`.`) or colon (`:`) at the end of a title or header.
+
+| | |
+|-----------------|-----------------------------------------------------------------------------------------------------|
+| Not recommended | Getting Started Guide <br />Service Discovery and Auto-Detection: <br />Install netdata with docker |
+| **Recommended** | Getting started guide <br />Service discovery and auto-detection <br />Install Netdata with Docker |
+
+### Abbreviations (acronyms and initialisms)
+
+Use abbreviations (including [acronyms and initialisms](https://www.dictionary.com/e/acronym-vs-abbreviation/)) in
+documentation when one exists, when it's widely accepted within the monitoring/sysadmin community, and when it improves
+the readability of a document.
+
+When introducing an abbreviation to a document for the first time, give the reader both the spelled-out version and the
+shortened version at the same time. For example:
+
+> Use Netdata to monitor Extended Berkeley Packet Filter (eBPF) metrics in real-time.
+
+After you define an abbreviation, don't switch back and forth. Use only the abbreviation for the rest of the document.
+
+You can also use abbreviations in a document's title to keep the title short and relevant. If you do this, you should
+still introduce the spelled-out name alongside the abbreviation as soon as possible.
+
+### Clause order
+
+When instructing users to take action, give them the context first. By placing the context in an initial clause at the
+beginning of the sentence, users can immediately know if they want to read more, follow a link, or skip ahead.
+
+| | |
+|-----------------|--------------------------------------------------------------------------------|
+| Not recommended | Read the reference guide if you'd like to learn more about custom dashboards. |
+| **Recommended** | If you'd like to learn more about custom dashboards, read the reference guide. |
+
+### Oxford comma
+
+The Oxford comma is the comma used after the second-to-last item in a list of three or more items. It appears just
+before "and" or "or."
+
+| | |
+|-----------------|------------------------------------------------------------------------------|
+| Not recommended | Netdata can monitor RAM, disk I/O, MySQL queries per second and lm-sensors. |
+| **Recommended** | Netdata can monitor RAM, disk I/O, MySQL queries per second, and lm-sensors. |
+
+### Future releases or features
+
+Do not mention future releases or upcoming features in writing unless they have been previously communicated via a
+public roadmap.
+
+In particular, documentation must describe, as accurately as possible, the Netdata Agent _as of the [latest
+commit](https://github.com/netdata/netdata/commits/master) in the GitHub repository_. For Netdata Cloud, documentation
+must reflect the _current state of [production](https://app.netdata.cloud).
+
+### Informational links
+
+Every link should clearly state its destination. Don't use words like "here" to describe where a link will take your
+reader.
+
+| | |
+|-----------------|-------------------------------------------------------------------------------------------|
+| Not recommended | To install Netdata, click [here](/packaging/installer/README.md). |
+| **Recommended** | To install Netdata, read the [installation instructions](/packaging/installer/README.md). |
+
+Use links as often as required to provide necessary context. Blog posts and guides require less hyperlinks than
+documentation. See the section on [linking between documentation](#linking-between-documentation) for guidance on the
+Markdown syntax and path structure of inter-documentation links.
+
+### Contractions
+
+Contractions like "you'll" or "they're" are acceptable in most Netdata writing. They're both authentic and playful, and
+reinforce the idea that you, as a writer, are guiding users through a particular idea, process, or feature.
+
+Contractions are generally not used in press releases or other media engagements.
+
+### Emoji
+
+Emoji can add fun and character to your writing, but should be used sparingly and only if it matches the content's tone
+and desired audience.
+
+## Technical/Linux standards
+
+Configuration or maintenance of the Netdata Agent requires some system administration skills, such as navigating
+directories, editing files, or starting/stopping/restarting services. Certain processes
+
+### Switching Linux users
+
+Netdata documentation often suggests that users switch from their normal user to the `netdata` user to run specific
+commands. Use the following command to instruct users to make the switch:
+
+```bash
+sudo su -s /bin/bash netdata
+```
+
+### Hostname/IP address of a node
+
+Use `NODE` instead of an actual or example IP address/hostname when referencing the process of navigating to a dashboard
+or API endpoint in a browser.
+
+| | |
+|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Not recommended | Navigate to `http://example.com:19999` in your browser to see Netdata's dashboard. <br />Navigate to `http://203.0.113.0:19999` in your browser to see Netdata's dashboard. |
+| **Recommended** | Navigate to `http://NODE:19999` in your browser to see Netdata's dashboard. |
+
+If you worry that `NODE` doesn't provide enough context for the user, particularly in documentation or guides designed
+for beginners, you can provide an explanation:
+
+> With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP
+> address or hostname of your Agent.
+
+### Paths and running commands
+
+When instructing users to run a Netdata-specific command, don't assume the path to said command, because not every
+Netdata Agent installation will have commands under the same paths. When applicable, help them navigate to the correct
+path, providing a recommendation or instructions on how to view the running configuration, which includes the correct
+paths.
+
+For example, the [configuration](/docs/configure/nodes.md) doc first teaches users how to find the Netdata config
+directory and navigate to it, then runs commands from the `/etc/netdata` path so that the instructions are more
+universal.
+
+Don't include full paths, beginning from the system's root (`/`), as these might not work on certain systems.
+
+| | |
+|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. |
+| **Recommended** | Use `edit-config` to edit Netdata's configuration by first navigating to your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`, then running `sudo edit-config netdata.conf`. |
+
+### `sudo`
+
+Include `sudo` before a command if you believe most Netdata users will need to elevate privileges to run it. This makes
+our writing more universal, and users on `sudo`-less systems are generally already aware that they need to run commands
+differently.
+
+For example, most users need to use `sudo` with the `edit-config` script, because the Netdata config directory is owned
+by the `netdata` user. Same goes for restarting the Netdata Agent with `systemctl`.
+
+| | |
+|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------|
+| Not recommended | Run `edit-config netdata.conf` to configure the Netdata Agent. <br />Run `systemctl restart netdata` to restart the Netdata Agent. |
+| **Recommended** | Run `sudo edit-config netdata.conf` to configure the Netdata Agent. <br />Run `sudo systemctl restart netdata` to restart the Netdata Agent. |
+
+## Markdown syntax
+
+Netdata's documentation uses Markdown syntax.
+
+If you're not familiar with Markdown, read the [Mastering
+Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub for the basics on creating
+paragraphs, styled text, lists, tables, and more.
+
+The following sections describe situations in which a specific syntax is required.
+
+### Syntax standards (`remark-lint`)
+
+The Netdata team uses [`remark-lint`](https://github.com/remarkjs/remark-lint) for Markdown code styling.
+
+- Use a maximum of 120 characters per line.
+- Begin headings with hashes, such as `# H1 heading`, `## H2 heading`, and so on.
+- Use `_` for italics/emphasis.
+- Use `**` for bold.
+- Use dashes `-` to begin an unordered list, and put a single space after the dash.
+- Tables should be padded so that pipes line up vertically with added whitespace.
+
+If you want to see all the settings, open the
+[`remarkrc.js`](https://github.com/netdata/netdata/blob/master/.remarkrc.js) file in the `netdata/netdata` repository.
+
+### Frontmatter
+
+Every document must begin with frontmatter, followed by an H1 (`#`) heading.
+
+Unlike typical Markdown frontmatter, Netdata uses HTML comments (`<!--`, `-->`) to begin and end the frontmatter block.
+These HTML comments are later converted into typical frontmatter syntax when building [Netdata
+Learn](https://learn.netdata.cloud).
+
+Frontmatter _must_ contain the following variables:
+
+- A `title` that quickly and distinctly describes the document's content.
+- A `description` that elaborates on the purpose or goal of the document using no less than 100 characters and no more
+ than 155 characters.
+- A `custom_edit_url` that links directly to the GitHub URL where another user could suggest additional changes to the
+ published document.
+
+Some documents, like the Ansible guide and others in the `/docs/guides` folder, require an `image` variable as well. In
+this case, replace `/docs` with `/img/seo`, and then rebuild the remainder of the path to the document in question. End
+the path with `.png`. A member of the Netdata team will assist in creating the image when publishing the content.
+
+For example, here is the frontmatter for the guide about [deploying the Netdata Agent with
+Ansible](https://learn.netdata.cloud/guides/deploy/ansible).
+
+```markdown
+<!--
+title: Deploy Netdata with Ansible
+description: "Deploy an infrastructure monitoring solution in minutes with the Netdata Agent and Ansible. Use and customize a simple playbook for monitoring as code."
+image: /img/seo/guides/deploy/ansible.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/deploy/ansible.md
+-->
+
+# Deploy Netdata with Ansible
+
+...
+```
+
+Questions about frontmatter in documentation? [Ask on our community
+forum](https://community.netdata.cloud/c/blog-posts-and-articles/6).
+
+### Linking between documentation
+
+Documentation should link to relevant pages whenever it's relevant and provides valuable context to the reader.
+
+Links should always reference the full path to the document, beginning at the root of the Netdata Agent repository
+(`/`), and ending with the `.md` file extension. Avoid relative links or traversing up directories using `../`.
+
+For example, if you want to link to our node configuration document, link to `/docs/configure/nodes.md`. To reference
+the guide for deploying the Netdata Agent with Ansible, link to `/docs/guides/deploy/ansible.md`.
+
+### References to UI elements
+
+When referencing a user interface (UI) element in Netdata, reference the label text of the link/button with Markdown's
+(`**bold text**`) tag.
+
+```markdown
+Click the **Sign in** button.
+```
+
+Avoid directional language whenever possible. Not every user can use instructions like "look at the top-left corner" to
+find their way around an interface, and interfaces often change between devices. If you must use directional language,
+try to supplement the text with an [image](#images).
+
+### Images
+
+Don't rely on images to convey features, ideas, or instructions. Accompany every image with descriptive alt text.
+
+In Markdown, use the standard image syntax, `![]()`, and place the alt text between the brackets `[]`. Here's an example
+using our logo:
+
+```markdown
+![The Netdata logo](../../web/gui/images/netdata-logomark.svg)
+```
+
+Reference in-product text, code samples, and terminal output with actual text content, not screen captures or other
+images. Place the text in an appropriate element, such as a blockquote or code block, so all users can parse the
+information.
+
+### Syntax highlighting
+
+Our documentation site at [learn.netdata.cloud](https://learn.netdata.cloud) uses
+[Prism](https://v2.docusaurus.io/docs/markdown-features#syntax-highlighting) for syntax highlighting. Netdata
+documentation will use the following for the most part: `c`, `python`, `js`, `shell`, `markdown`, `bash`, `css`, `html`,
+and `go`. If no language is specified, Prism tries to guess the language based on its content.
+
+Include the language directly after the three backticks (```` ``` ````) that start the code block. For highlighting C
+code, for example:
+
+````c
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer);
+}
+```
+````
+
+And the prettified result:
+
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer);
+}
+```
+
+Prism also supports titles and line highlighting. See the [Docusaurus
+documentation](https://v2.docusaurus.io/docs/markdown-features#code-blocks) for more information.
+
+## Word list
+
+The following tables describe the standard spelling, capitalization, and usage of words found in Netdata's writing.
+
+### Netdata-specific terms
+
+| Term | Definition |
+|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **claimed node** | A node that you've proved ownership of by completing the [claiming process](/claim/README.md). The claimed node will then appear in your Space and any War Rooms you added it to. |
+| **Netdata** | The company behind the open-source Netdata Agent and the Netdata Cloud web application. Never use _netdata_ or _NetData_. <br /><br />In general, focus on the user's goals, actions, and solutions rather than what the company provides. For example, write _Learn more about enabling alarm notifications on your preferred platforms_ instead of _Netdata sends alarm notifications to your preferred platforms_. |
+| **Netdata Agent** | The free and open source [monitoring agent](https://github.com/netdata/netdata) that you can install on all of your distributed systems, whether they're physical, virtual, containerized, ephemeral, and more. The Agent monitors systems running Linux, Docker, Kubernetes, macOS, FreeBSD, and more, and collects metrics from hundreds of popular services and applications. |
+| **Netdata Cloud** | The web application hosted at [https://app.netdata.cloud](https://app.netdata.cloud) that helps you monitor an entire infrastructure of distributed systems in real time. <br /><br />Never use _Cloud_ without the preceding _Netdata_ to avoid ambiguity. |
+| **Netdata community** | Contributors to any of Netdata's [open-source projects](https://learn.netdata.cloud/contribute/projects), members of the [community forum](https://community.netdata.cloud/). |
+| **Netdata community forum** | The Discourse-powered forum for feature requests, Netdata Cloud technical support, and conversations about Netdata's monitoring and troubleshooting products. |
+| **node** | A system on which the Netdata Agent is installed. The system can be physical, virtual, in a Docker container, and more. Depending on your infrastructure, you may have one, dozens, or hundreds of nodes. Some nodes are _ephemeral_, in that they're created/destroyed automatically by an orchestrator service. |
+| **Space** | The highest level container within Netdata Cloud for a user to organize their team members and nodes within their infrastructure. A Space likely represents an entire organization or a large team. <br /><br />_Space_ is always capitalized. |
+| **unreachable node** | A claimed node with a disrupted [Agent-Cloud link](/aclk/README.md). Unreachable could mean the node no longer exists or is experiencing network connectivity issues with Cloud. |
+| **visited node** | A node which has had its Agent dashboard directly visited by a user. A list of these is maintained on a per-user basis. |
+| **War Room** | A smaller grouping of nodes where users can view key metrics in real-time and monitor the health of many nodes with their alarm status. War Rooms can be used to organize nodes in any way that makes sense for your infrastructure, such as by a service, purpose, physical location, and more. <br /><br />_War Room_ is always capitalized. |
+
+### Other technical terms
+
+| Term | Definition |
+|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **filesystem** | Use instead of _file system_. |
+| **preconfigured** | The concept that many of Netdata's features come with sane defaults that users don't need to configure to find [immediate value](/docs/overview/why-netdata.md#simple-to-deploy). |
+| **real time**/**real-time** | Use _real time_ as a noun phrase, most often with _in_: _Netdata collects metrics in real time_. Use _real-time_ as an adjective: _Netdata collects real-time metrics from hundreds of supported applications and services. |
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fcontributing%2Fstyle-guide&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/export/enable-connector.md b/docs/export/enable-connector.md
new file mode 100644
index 0000000..9789de2
--- /dev/null
+++ b/docs/export/enable-connector.md
@@ -0,0 +1,93 @@
+<!--
+title: "Enable an exporting connector"
+description: "Learn how to enable and configure any connector using examples to start exporting metrics to external time-series databases in minutes."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/export/enable-connector.md
+-->
+
+# Enable an exporting connector
+
+Now that you found the right connector for your [external time-series
+database](/docs/export/external-databases.md#supported-databases), you can now enable the exporting engine and the
+connector itself. We'll walk through the process of enabling the exporting engine itself, followed by two examples using
+the OpenTSDB and Graphite connectors.
+
+> When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
+> restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
+
+Once you understand the process of enabling a connector, you can translate that knowledge to any other connector.
+
+## Enable the exporting engine
+
+Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open
+`exporting.conf`:
+
+```bash
+sudo ./edit-config exporting.conf
+```
+
+Enable the exporting engine itself by setting `enabled` to `yes`:
+
+```conf
+[exporting:global]
+ enabled = yes
+```
+
+Save the file but keep it open, as you will edit it again to enable specific connectors.
+
+## Example: Enable the OpenTSDB connector
+
+Use the following configuration as a starting point. Copy and paste it into `exporting.conf`.
+
+```conf
+[opentsdb:http:my_opentsdb_http_instance]
+ enabled = yes
+ destination = localhost:4242
+```
+
+Replace `my_opentsdb_http_instance` with an instance name of your choice, and change the `destination` setting to the IP
+address or hostname of your OpenTSDB database.
+
+Restart your Agent with `sudo systemctl restart netdata` to begin exporting to your OpenTSDB database. The Netdata Agent
+exports metrics _beginning from the time the process starts_, and because it exports as metrics are collected, you
+should start seeing data in your external database after only a few seconds.
+
+Any further configuration is optional, based on your needs and the configuration of your OpenTSDB database. See the
+[OpenTSDB connector doc](/exporting/opentsdb/README.md) and [exporting engine
+reference](/exporting/README.md#configuration) for details.
+
+## Example: Enable the Graphite connector
+
+Use the following configuration as a starting point. Copy and paste it into `exporting.conf`.
+
+```conf
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = 203.0.113.0:2003
+```
+
+Replace `my_graphite_instance` with an instance name of your choice, and change the `destination` setting to the IP
+address or hostname of your Graphite-supported database.
+
+Restart your Agent with `sudo systemctl restart netdata` to begin exporting to your Graphite-supported database. Because
+the Agent exports metrics as they're collected, you should start seeing data in your external database after only a few
+seconds.
+
+Any further configuration is optional, based on your needs and the configuration of your Graphite-supported database.
+See [exporting engine reference](/exporting/README.md#configuration) for details.
+
+## What's next?
+
+If you want to further configure your exporting connectors, see the [exporting engine
+reference](/exporting/README.md#configuration).
+
+For a comprehensive example of using the Graphite connector, read our guide: [_Export and visualize Netdata metrics in
+Graphite_](/docs/guides/export/export-netdata-metrics-graphite.md). Or, start [using host
+labels](/docs/guides/using-host-labels.md) on exported metrics.
+
+### Related reference documentation
+
+- [Exporting engine reference](/exporting/README.md)
+- [OpenTSDB connector](/exporting/opentsdb/README.md)
+- [Graphite connector](/exporting/graphite/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fexporting%2Fenable-connector&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/export/external-databases.md b/docs/export/external-databases.md
new file mode 100644
index 0000000..309b03a
--- /dev/null
+++ b/docs/export/external-databases.md
@@ -0,0 +1,90 @@
+<!--
+title: "Export metrics to external time-series databases"
+description: "Use the exporting engine to send Netdata metrics to popular external time series databases for long-term storage or further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/export/external-databases.md
+-->
+
+# Export metrics to external time-series databases
+
+Netdata allows you to export metrics to external time-series databases with the [exporting
+engine](/exporting/README.md). This system uses a number of **connectors** to initiate connections to [more than
+thirty](#supported-databases) supported databases, including InfluxDB, Prometheus, Graphite, ElasticSearch, and much
+more.
+
+The exporting engine resamples Netdata's thousands of per-second metrics at a user-configurable interval, and can export
+metrics to multiple time-series databases simultaneously.
+
+Based on your needs and resources you allocated to your external time-series database, you can configure the interval
+that metrics are exported or export only certain charts with filtering. You can also choose whether metrics are exported
+as-collected, a normalized average, or the sum/volume of metrics values over the configured interval.
+
+Exporting is an important part of Netdata's effort to be [interoperable](/docs/overview/netdata-monitoring-stack.md)
+with other monitoring software. You can use an external time-series database for long-term metrics retention, further
+analysis, or correlation with other tools, such as application tracing.
+
+## Supported databases
+
+Netdata supports exporting metrics to the following databases through several
+[connectors](/exporting/README.md#features). Once you find the connector that works for your database, open its
+documentation and the [enabling a connector](/docs/export/enable-connector.md) doc for details on enabling it.
+
+- **AppOptics**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **AWS Kinesis**: [AWS Kinesis Data Streams](/exporting/aws_kinesis/README.md)
+- **Azure Data Explorer**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Azure Event Hubs**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Blueflood**: [Graphite](/exporting/graphite/README.md)
+- **Chronix**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Cortex**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **CrateDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **ElasticSearch**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
+ write](/exporting/prometheus/remote_write/README.md)
+- **Gnocchi**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Google BigQuery**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Google Cloud Pub/Sub**: [Google Cloud Pub/Sub Service](/exporting/pubsub/README.md)
+- **Graphite**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
+ write](/exporting/prometheus/remote_write/README.md)
+- **InfluxDB**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
+ write](/exporting/prometheus/remote_write/README.md)
+- **IRONdb**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **JSON**: [JSON document databases](/exporting/json/README.md)
+- **Kafka**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **KairosDB**: [Graphite](/exporting/graphite/README.md), [OpenTSDB](/exporting/opentsdb/README.md)
+- **M3DB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **MetricFire**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **MongoDB**: [MongoDB](/exporting/mongodb/)
+- **New Relic**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **OpenTSDB**: [OpenTSDB](/exporting/opentsdb/README.md), [Prometheus remote
+ write](/exporting/prometheus/remote_write/README.md)
+- **PostgreSQL**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+ via [PostgreSQL Prometheus Adapter](https://github.com/CrunchyData/postgresql-prometheus-adapter)
+- **Prometheus**: [Prometheus scraper](/exporting/prometheus/README.md)
+- **TimescaleDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md),
+ [netdata-timescale-relay](/exporting/TIMESCALE.md)
+- **QuasarDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **SignalFx**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Splunk**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **TiKV**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Thanos**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **VictoriaMetrics**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Wavefront**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+
+Can't find your preferred external time-series database? Ask our [community](https://community.netdata.cloud/) for
+solutions, or file an [issue on
+GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md).
+
+## What's next?
+
+We recommend you read our document on [enabling a connector](/docs/export/enable-connector.md) to learn about the
+process and discover important configuration options. If you would rather skip ahead, click on any of the above links to
+connectors for their reference documentation, which outline any prerequisites to install for that connector, along with
+connector-specific configuration options.
+
+Read about one possible use case for exporting metrics in our guide: [_Export and visualize Netdata metrics in
+Graphite_](/docs/guides/export/export-netdata-metrics-graphite.md).
+
+### Related reference documentation
+
+- [Exporting engine reference](/exporting/README.md)
+- [Backends reference (deprecated)](/backends/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fexporting%2Fexternal-databases&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/get/README.md b/docs/get/README.md
new file mode 100644
index 0000000..f89472d
--- /dev/null
+++ b/docs/get/README.md
@@ -0,0 +1,158 @@
+<!--
+title: "Get Netdata"
+description: "Time to get Netdata's monitoring and troubleshooting solution. Sign in to Cloud, download the Agent everywhere, and connect it all together."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/get/README.md
+-->
+
+# Get Netdata
+
+import { OneLineInstall } from '../src/components/OneLineInstall/'
+import { Install, InstallBox } from '../src/components/InstallBox/'
+
+Netdata uses the open-source Netdata Agent and Netdata Cloud web application
+[together](/docs/overview/what-is-netdata.md) to help you collect every metric, visualize the health of your nodes, and
+troubleshoot complex performance problems. Once you've signed in to Netdata Cloud and installed the Netdata Agent on all
+your nodes, you can claim your nodes and see their real-time metrics on a single interface.
+
+## Sign in to Netdata Cloud
+
+If you don't already have a free Netdata Cloud account, go ahead and [create one](https://app.netdata.cloud).
+
+Choose your preferred authentication method and follow the onboarding process to create your Space.
+
+## Install the Netdata Agent
+
+The Netdata Agent runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT
+devices. It runs on Linux distributions (**Ubuntu**, **Debian**, **CentOS**, and more), container/microservice platforms
+(**Kubernetes** clusters, **Docker**), and many other operating systems (**FreeBSD**, **macOS**), with no `sudo`
+required.
+
+> ⚠️ Many distributions ship with third-party packages of Netdata, which we cannot maintain or keep up-to-date. For the
+> best experience, use one of the methods described or linked to below.
+
+The **recommended** way to install the Netdata Agent on a Linux system is our one-line [kickstart
+script](/packaging/installer/methods/kickstart.md). This script automatically installs dependencies and builds Netdata
+from its source code.
+
+<OneLineInstall />
+
+Copy the script, paste it into your node's terminal, and hit `Enter`.
+
+Open your favorite browser and navigate to `http://localhost:19999` or `http://REMOTE-HOST:19999` to open the dashboard.
+
+<details>
+<summary>Watch how the one-line installer works</summary>
+<iframe width="820" height="460" src="https://www.youtube.com/embed/tVIp7ycK60A" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+</details>
+
+### Other operating systems/methods
+
+Want to install Netdata on a Kubernetes cluster, with Docker, or using a different method? Not a Linux user? Choose your
+platform to see specific instructions.
+
+<Install>
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/kubernetes"
+ img="/img/index/methods/kubernetes.svg"
+ os="Kubernetes" />
+ <InstallBox
+ to="/docs/agent/packaging/docker"
+ img="/img/index/methods/docker.svg"
+ os="Docker" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/cloud-providers"
+ img="/img/index/methods/cloud.svg"
+ imgDark="/img/index/methods/cloud-dark.svg"
+ os="Cloud providers (GCP, AWS, Azure)" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/packages"
+ img="/img/index/methods/package.svg"
+ imgDark="/img/index/methods/package-dark.svg"
+ os="Linux with .deb/.rpm" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/kickstart-64"
+ img="/img/index/methods/static.svg"
+ imgDark="/img/index/methods/static-dark.svg"
+ os="Linux with static 64-bit binary" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/manual"
+ img="/img/index/methods/git.svg"
+ imgDark="/img/index/methods/git-dark.svg"
+ os="Linux from Git" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/freebsd"
+ img="/img/index/methods/freebsd.svg"
+ os="FreeBSD" />
+ <InstallBox
+ to="/docs/agent/packaging/installer/methods/macos"
+ img="/img/index/methods/macos.svg"
+ os="MacOS" />
+</Install>
+
+Even more options available in our [packaging documentation](/packaging/installer/README.md#alternative-methods).
+
+## Claim your node on Netdata Cloud
+
+You need to [claim](/claim/README.md) your nodes to see them in Netdata Cloud. Claiming establishes a secure TLS
+connection to Netdata Cloud using the [Agent-Cloud link](/aclk/README.md), and proves you have write and administrative
+access to that node.
+
+When you view a node in Netdata Cloud, the Agent running on that node streams metrics, metadata, and alarm status to
+Netdata Cloud, which in turn streams those metrics to your web browser. Netdata Cloud [does not
+store](/docs/store/distributed-data-architecture.md#does-netdata-cloud-store-my-metrics) or log metrics values.
+
+To claim a node, you need to run the claiming script. In Netdata Cloud, click on your Space's name, then **Manage your
+Space** in the dropdown. Click **Nodes** in the panel that appears. Copy the script and run it in your node's terminal.
+The script looks like the following, with long strings instead of `TOKEN` and `ROOM1,ROOM2`:
+
+```bash
+sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud
+```
+
+The script returns `Agent was successfully claimed.` after creating a new RSA pair and establishing the link to Netdata
+Cloud. If the script returns an error, try our [troubleshooting tips](/claim/README.md#troubleshooting).
+
+> 💡 Our claiming reference guide also contains instructions for claiming [Docker
+> containers](/claim/README.md#claim-an-agent-running-in-docker), [Kubernetes cluster parent
+> pods](/claim/README.md#claim-an-agent-running-in-docker), via a [proxy](/claim/README.md#claim-through-a-proxy), and
+> more.
+
+<details>
+<summary>Watch how claiming nodes works</summary>
+<iframe width="820" height="460" src="https://www.youtube.com/embed/UAzVvhMab8g" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+</details>
+
+For more information on the claiming process, why we implemented it, and how it works, see the [claim](/claim/README.md)
+and [Agent-Cloud link](/aclk/README.md) reference docs.
+
+## Troubleshooting
+
+If you experience issues with installing the Netdata Agent, see our
+[installation](/packaging/installer/README.md#troubleshooting-and-known-issues) reference. Our
+[reinstall](/packaging/installer/REINSTALL.md) doc can help clean up your installation and get you back to monitoring.
+
+For Netdata Cloud issues, see the [Netdata Cloud reference docs](https://learn.netdata.cloud/docs/cloud).
+
+## What's next?
+
+At this point, you have set up your free Netdata Cloud account, installed the Netdata Agent on your node(s), and claimed
+one or more nodes to your Space. You're ready to start monitoring, visualizing, and troubleshooting with Netdata. We
+have two quickstart guides based on the scope of what you need to monitor.
+
+Interested in monitoring a single node? Check out our [single-node monitoring
+quickstart](/docs/quickstart/single-node.md).
+
+If you're looking to monitor an entire infrastructure with Netdata, see the [infrastructure monitoring
+quickstart](/docs/quickstart/infrastructure.md).
+
+Or, skip ahead to [Agent configuration](/docs/configure/nodes.md).
+
+### Related reference documentation
+
+- [Netdata Agent · Packaging &amp; installer](/packaging/installer/README.md)
+- [Netdata Agent · Reinstall Netdata](/packaging/installer/REINSTALL.md)
+- [Netdata Agent · Update Netdata](/packaging/installer/UPDATE.md)
+- [Netdata Agent · Agent-Cloud link](/aclk/README.md)
+- [Netdata Agent · Agent claiming](/claim/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Foverview%2Fnetdata-monitoring-stacka&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/getting-started.md b/docs/getting-started.md
new file mode 100644
index 0000000..1ccab42
--- /dev/null
+++ b/docs/getting-started.md
@@ -0,0 +1,240 @@
+<!--
+title: "Get started guide"
+date: 2020-05-04
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/getting-started.md
+-->
+
+# Get started guide
+
+Thanks for trying the Netdata Agent! In this getting started guide, we'll quickly walk you through the first steps you
+should take after installing the Agent.
+
+The Agent can collect thousands of metrics in real-time and use its database for long-term metrics storage without any
+configuration, but there are some valuable things to know to get the most out of Netdata based on your needs.
+
+We'll skip right into some technical details, so if you're brand-new to monitoring the health and performance of systems
+and applications, our [**step-by-step guide**](/docs/guides/step-by-step/step-00.md) might be a better fit.
+
+> If you haven't installed Netdata yet, visit the [installation instructions](/packaging/installer/README.md) for
+> details, including our one-liner script, which automatically installs Netdata on almost all Linux distributions.
+
+## Access the dashboard
+
+Open up your web browser of choice and navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname
+of your Agent. Hit **Enter**. Welcome to Netdata!
+
+![Animated GIF of navigating to the
+dashboard](https://user-images.githubusercontent.com/1153921/80825153-abaec600-8b94-11ea-8b17-1b770a2abaa9.gif)
+
+**What's next?**:
+
+- Read more about the [standard Netdata dashboard](/web/gui/).
+- Learn all the specifics of [using charts](/web/README.md#using-charts) or the differences between [charts,
+ context, and families](/web/README.md#charts-contexts-families).
+
+## Configuration basics
+
+Netdata primarily uses the `netdata.conf` file for custom configurations.
+
+On most systems, you can find that file at `/etc/netdata/netdata.conf`.
+
+> Some operating systems will place your `netdata.conf` at `/opt/netdata/etc/netdata/netdata.conf`, so check there if
+> you find nothing at `/etc/netdata/netdata.conf`.
+
+The `netdata.conf` file is broken up into various sections, such as `[global]`, `[web]`, `[registry]`, and more. By
+default, most options are commented, so you'll have to uncomment them (remove the `#`) for Netdata to recognize your
+change.
+
+Once you save your changes, [restart Netdata](#start-stop-and-restart-netdata) to load your new configuration.
+
+**What's next?**:
+
+- [Change how long Netdata stores metrics](#change-how-long-netdata-stores-metrics) by changing the `page cache size`
+ and `dbengine disk space` settings in `netdata.conf`.
+- Move Netdata's dashboard to a [different port](/web/server/) or enable TLS/HTTPS
+ encryption.
+- See all the `netdata.conf` options in our [daemon configuration documentation](/daemon/config/).
+- Run your own [registry](/registry/README.md#run-your-own-registry).
+
+## Change how long Netdata stores metrics
+
+Netdata can store long-term, historical metrics out of the box. A custom database uses RAM to store recent metrics,
+ensuring dashboards and API queries are extremely responsive, while "spilling" historical metrics to disk. This
+configuration keeps RAM usage low while allowing for long-term, on-disk metrics storage.
+
+You can tweak this custom _database engine_ to store a much larger dataset than your system's available RAM,
+particularly if you allow Netdata to use slightly more RAM and disk space than the default configuration.
+
+Read our guide on [changing how long Netdata stores metrics](/docs/store/change-metrics-storage.md) to learn more and
+use our the embedded database engine to figure out the exact settings you'll need to store historical metrics right in
+the Agent's database.
+
+**What's next?**:
+
+- Learn more about the [memory requirements for the database
+ engine](/database/engine/README.md#memory-requirements) to understand how much RAM/disk space you should commit
+ to storing historical metrics.
+
+## Collect data from more sources
+
+When Netdata _starts_, it auto-detects dozens of **data sources**, such as database servers, web servers, and more. To
+auto-detect and collect metrics from a service or application you just installed, you need to [restart
+Netdata](#start-stop-and-restart-netdata).
+
+> There is one exception: When Netdata is running on the host (as in not in a container itself), it will always
+> auto-detect containers and VMs.
+
+However, auto-detection only works if you installed the source using its standard installation procedure. If Netdata
+isn't collecting metrics after a restart, your source probably isn't configured correctly. Look at the [external plugin
+documentation](/collectors/plugins.d/) to find the appropriate module for your source. Those pages will contain
+more information about how to configure your source for auto-detection.
+
+Some modules, like `chrony`, are disabled by default and must be enabled manually for auto-detection to work.
+
+Once Netdata detects a valid source of data, it will continue trying to collect data from it. For example, if
+Netdata is collecting data from an Nginx web server, and you shut Nginx down, Netdata will collect new data as soon as
+you start the web server back up&mdash;no restart necessary.
+
+### Configure plugins
+
+Even if Netdata auto-detects your service/application, you might want to configure what, or how often, Netdata is
+collecting data.
+
+Netdata uses **internal** and **external** plugins to collect data. Internal plugins run within the Netdata dæmon, while
+external plugins are independent processes that send metrics to Netdata over pipes. There are also plugin
+**orchestrators**, which are external plugins with one or more data collection **modules**.
+
+You can configure both internal and external plugins, along with the individual modules. There are many ways to do so:
+
+- In `netdata.conf`, `[plugins]` section: Enable or disable internal or external plugins with `yes` or `no`.
+- In `netdata.conf`, `[plugin:XXX]` sections: Each plugin has a section for changing collection frequency or passing
+ options to the plugin.
+- In `.conf` files for each external plugin: For example, at `/etc/netdata/python.d.conf`.
+- In `.conf` files for each module : For example, at `/etc/netdata/python.d/nginx.conf`.
+
+It's complex, so let's walk through an example of the various `.conf` files responsible for collecting data from an
+Nginx web server using the `nginx` module and the `python.d` plugin orchestrator.
+
+First, you can enable or disable the `python.d` plugin entirely in `netdata.conf`.
+
+```conf
+[plugins]
+ # Enabled
+ python.d = yes
+ # Disabled
+ python.d = no
+```
+
+You can also configure the entire `python.d` external plugin via the `[plugin:python.d]` section in `netdata.conf`.
+Here, you can change how often Netdata uses `python.d` to collect metrics or pass other command options:
+
+```conf
+[plugin:python.d]
+ update every = 1
+ command options =
+```
+
+The `python.d` plugin has a separate configuration file at `/etc/netdata/python.d.conf` for enabling and disabling
+modules. You can use the `edit-config` script to edit the file, or open it with your text editor of choice:
+
+```bash
+sudo /etc/netdata/edit-config python.d.conf
+```
+
+Finally, the `nginx` module has a configuration file called `nginx.conf` in the `python.d` folder. Again, use
+`edit-config` or your editor of choice:
+
+```bash
+sudo /etc/netdata/edit-config python.d/nginx.conf
+```
+
+In the `nginx.conf` file, you'll find additional options. The default works in most situations, but you may need to make
+changes based on your particular Nginx setup.
+
+**What's next?**:
+
+- Look at the [full list of data collection modules](/collectors/COLLECTORS.md)
+ to configure your sources for auto-detection and monitoring.
+- Improve the [performance](/docs/guides/configure/performance.md) of Netdata on low-memory systems.
+- Configure `systemd` to expose [systemd services
+ utilization](/collectors/cgroups.plugin/README.md#monitoring-systemd-services) metrics automatically.
+- [Reconfigure individual charts](/daemon/config/README.md#per-chart-configuration) in `netdata.conf`.
+
+## Health monitoring and alarms
+
+Netdata comes with hundreds of health monitoring alarms for detecting anomalies on production servers. If you're running
+Netdata on a workstation, you might want to disable Netdata's alarms.
+
+Edit your `/etc/netdata/netdata.conf` file and set the following:
+
+```conf
+[health]
+ enabled = no
+```
+
+If you want to keep health monitoring enabled, but turn email notifications off, edit your `health_alarm_notify.conf`
+file with `edit-config`, or with the text editor of your choice:
+
+```bash
+sudo /etc/netdata/edit-config health_alarm_notify.conf
+```
+
+Find the `SEND_EMAIL="YES"` line and change it to `SEND_EMAIL="NO"`.
+
+**What's next?**:
+
+- Follow the [health quickstart](/health/QUICKSTART.md) to locate and edit existing health entities, and then
+ create your own.
+- See all the alarm options via the [health configuration reference](/health/REFERENCE.md).
+- Add a new notification method, like [Slack](/health/notifications/slack/).
+
+## Monitor multiple systems with Netdata Cloud
+
+If you have the Agent installed on multiple nodes, you can use Netdata Cloud in two ways: Monitor the health and
+performance of an entire infrastructure via the Netdata Cloud web application, or use the Visited Nodes menu that's
+built into every dashboard.
+
+![The War Room
+Overview](https://user-images.githubusercontent.com/1153921/102651377-b1f4b100-4129-11eb-8e60-d2995d258c16.png)
+
+You can use these features together or separately&mdash;the decision is up to you and the needs of your infrastructure.
+
+**What's next?**:
+
+- Sign up for [Netdata Cloud](https://app.netdata.cloud).
+- Read the [infrastructure monitoring quickstart](/docs/quickstart/infrastructure.md).
+- Better understand how the Netdata Agent connects securely to Netdata Cloud with [claiming](/claim/README.md) and
+ [Agent-Cloud link](/aclk/README.md) documentation.
+
+## Start, stop, and restart Netdata
+
+When you install Netdata, it's configured to start at boot, and stop and restart/shutdown. You shouldn't need to start
+or stop Netdata manually, but you will probably need to restart Netdata at some point.
+
+- To **start** Netdata, open a terminal and run `service netdata start`.
+- To **stop** Netdata, run `service netdata stop`.
+- To **restart** Netdata, run `service netdata restart`.
+
+The `service` command is a wrapper script that tries to use your system's preferred method of starting or stopping
+Netdata based on your system. But, if either of those commands fails, try using the equivalent commands for `systemd`
+and `init.d`:
+
+- **systemd**: `systemctl start netdata`, `systemctl stop netdata`, `systemctl restart netdata`
+- **init.d**: `/etc/init.d/netdata start`, `/etc/init.d/netdata stop`, `/etc/init.d/netdata restart`
+
+## What's next?
+
+Even after you've configured `netdata.conf`, tweaked alarms, learned the basics of performance troubleshooting, and
+claimed all your systems in Netdata Cloud or added them to the Visited nodes menu, you've just gotten started with
+Netdata.
+
+Take a look at some more advanced features and configurations:
+
+- Centralize Netdata metrics from many systems with [streaming](/streaming/README.md)
+- Enable long-term archiving of Netdata metrics via [exporting engine](/exporting/README.md) to time-series databases.
+- Improve security by putting Netdata behind an [Nginx proxy with SSL](/docs/Running-behind-nginx.md).
+
+Or, learn more about how you can contribute to [Netdata core](/CONTRIBUTING.md) or our
+[documentation](/docs/contributing/contributing-documentation.md)!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fgetting-started&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/collect-apache-nginx-web-logs.md b/docs/guides/collect-apache-nginx-web-logs.md
new file mode 100644
index 0000000..215ced3
--- /dev/null
+++ b/docs/guides/collect-apache-nginx-web-logs.md
@@ -0,0 +1,161 @@
+<!--
+title: "Monitor Nginx or Apache web server log files with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/collect-apache-nginx-web-logs.md
+-->
+
+# Monitor Nginx or Apache web server log files with Netdata
+
+Log files have been a critical resource for developers and system administrators who want to understand the health and
+performance of their web servers, and Netdata is taking important steps to make them even more valuable.
+
+By parsing web server log files with Netdata, and seeing the volume of redirects, requests, or server errors over time,
+you can better understand what's happening on your infrastructure. Too many bad requests? Maybe a recent deploy missed a
+few small SVG icons. Too many requests? Time to batten down the hatches—it's a DDoS.
+
+Netdata has been capable of monitoring web log files for quite some time, thanks for the [weblog python.d
+module](/collectors/python.d.plugin/web_log/README.md), but we recently refactored this module in Go, and that effort
+comes with a ton of improvements.
+
+You can now use the [LTSV log format](http://ltsv.org/), track TLS and cipher usage, and the whole parser is faster than
+ever. In one test on a system with SSD storage, the collector consistently parsed the logs for 200,000 requests in
+200ms, using ~30% of a single core. To learn more about these improvements, see our [v1.19 release post](https://blog.netdata.cloud/posts/release-1.19/).
+
+The [go.d plugin](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/) is currently compatible
+with [Nginx](https://nginx.org/en/) and [Apache](https://httpd.apache.org/).
+
+This guide will walk you through using the new Go-based web log collector to turn the logs these web servers
+constantly write to into real-time insights into your infrastructure.
+
+## Set up your web servers
+
+As with all data sources, Netdata can auto-detect Nginx or Apache servers if you installed them using their standard
+installation procedures.
+
+Almost all web server installations will need _no_ configuration to start collecting metrics. As long as your web server
+has readable access log file, you can configure the web log plugin to access and parse it.
+
+## Configure the web log collector
+
+To use the Go version of this plugin, you need to explicitly enable it, and disable the deprecated Python version.
+First, open `python.d.conf`:
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config python.d.conf
+```
+
+Find the `web_log` line, uncomment it, and set it to `web_log: no`. Next, open the `go.d.conf` file for editing.
+
+```bash
+./edit-config go.d.conf
+```
+
+Find the `web_log` line again, uncomment it, and set it to `web_log: yes`.
+
+Finally, restart Netdata with `service netdata restart`, or the appropriate method for your system. You should see
+metrics in your Netdata dashboard!
+
+![Example of real-time web server log metrics in Netdata's
+dashboard](https://user-images.githubusercontent.com/1153921/69448130-2980c280-0d15-11ea-9fa5-6dcff25a92c3.png)
+
+If you don't see web log charts, or **web log nginx**/**web log apache** menus on the right-hand side of your dashboard,
+continue reading for other configuration options.
+
+## Custom configuration of the web log collector
+
+The web log collector's default configuration comes with a few example jobs that should cover most Linux distributions
+and their default locations for log files:
+
+```yaml
+# [ JOBS ]
+jobs:
+# NGINX
+# debian, arch
+ - name: nginx
+ path: /var/log/nginx/access.log
+
+# gentoo
+ - name: nginx
+ path: /var/log/nginx/localhost.access_log
+
+# APACHE
+# debian
+ - name: apache
+ path: /var/log/apache2/access.log
+
+# gentoo
+ - name: apache
+ path: /var/log/apache2/access_log
+
+# arch
+ - name: apache
+ path: /var/log/httpd/access_log
+
+# debian
+ - name: apache_vhosts
+ path: /var/log/apache2/other_vhosts_access.log
+
+# GUNICORN
+ - name: gunicorn
+ path: /var/log/gunicorn/access.log
+
+ - name: gunicorn
+ path: /var/log/gunicorn/gunicorn-access.log
+```
+
+However, if your log files were not auto-detected, it might be because they are in a different location. Try the default
+`web_log.conf` file.
+
+```bash
+./edit-config go.d/web_log.conf
+```
+
+To create a new custom configuration, you need to set the `path` parameter to point to your web server's access log
+file. You can give it a `name` as well, and set the `log_type` to `auto`.
+
+```yaml
+jobs:
+ - name: example
+ path: /path/to/file.log
+ log_type: auto
+```
+
+Restart Netdata with `service netdata restart` or the appropriate method for your system. Netdata should pick up your
+web server's access log and begin showing real-time charts!
+
+### Custom log formats and fields
+
+The web log collector is capable of parsing custom Nginx and Apache log formats and presenting them as charts, but we'll
+leave that topic for a separate guide.
+
+We do have [extensive
+documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/#custom-log-format) on how
+to build custom parsing for Nginx and Apache logs.
+
+## Tweak web log collector alarms
+
+Over time, we've created some default alarms for web log monitoring. These alarms are designed to work only when your
+web server is receiving more than 120 requests per minute. Otherwise, there's simply not enough data to make conclusions
+about what is "too few" or "too many."
+
+- [web log alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/web_log.conf).
+
+You can also edit this file directly with `edit-config`:
+
+```bash
+./edit-config health.d/weblog.conf
+```
+
+For more information about editing the defaults or writing new alarm entities, see our [health monitoring
+documentation](/health/README.md).
+
+## What's next?
+
+Now that you have web log collection up and running, we recommend you take a look at the documentation for our
+[python.d](/collectors/python.d.plugin/web_log/README.md) for some ideas of how you can turn these rather "boring" logs
+into powerful real-time tools for keeping your servers happy.
+
+Don't forget to give GitHub user [Wing924](https://github.com/Wing924) a big 👍 for his hard work in starting up the Go
+refactoring effort.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fcollect-apache-nginx-web-logs&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/collect-unbound-metrics.md b/docs/guides/collect-unbound-metrics.md
new file mode 100644
index 0000000..2994647
--- /dev/null
+++ b/docs/guides/collect-unbound-metrics.md
@@ -0,0 +1,138 @@
+<!--
+title: "Monitor Unbound DNS servers with Netdata"
+date: 2020-03-31
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/collect-unbound-metrics.md
+-->
+
+# Monitor Unbound DNS servers with Netdata
+
+[Unbound](https://nlnetlabs.nl/projects/unbound/about/) is a "validating, recursive, caching DNS resolver" from NLNet
+Labs. In v1.19 of Netdata, we release a completely refactored collector for collecting real-time metrics from Unbound
+servers and displaying them in Netdata dashboards.
+
+Unbound runs on FreeBSD, OpenBSD, NetBSD, macOS, Linux, and Windows, and supports DNS-over-TLS, which ensures that DNS
+queries and answers are all encrypted with TLS. In theory, that should reduce the risk of eavesdropping or
+man-in-the-middle attacks when communicating to DNS servers.
+
+This guide will show you how to collect dozens of essential metrics from your Unbound servers with minimal
+configuration.
+
+## Set up your Unbound installation
+
+As with all data sources, Netdata can auto-detect Unbound servers if you installed them using the standard installation
+procedure.
+
+Regardless of whether you're connecting to a local or remote Unbound server, you need to be able to access the server's
+`remote-control` interface via an IP address, FQDN, or Unix socket.
+
+To set up the `remote-control` interface, you can use `unbound-control`. First, run `unbound-control-setup` to generate
+the TLS key files that will encrypt connections to the remote interface. Then add the following to the end of your
+`unbound.conf` configuration file. See the [Unbound
+documentation](https://nlnetlabs.nl/documentation/unbound/howto-setup/#setup-remote-control) for more details on using
+`unbound-control`, such as how to handle situations when Unbound is run under a unique user.
+
+```conf
+# enable remote-control
+remote-control:
+ control-enable: yes
+```
+
+Next, make your `unbound.conf`, `unbound_control.key`, and `unbound_control.pem` files readable by Netdata using [access
+control lists](https://wiki.archlinux.org/index.php/Access_Control_Lists) (ACL).
+
+```bash
+sudo setfacl -m user:netdata:r unbound.conf
+sudo setfacl -m user:netdata:r unbound_control.key
+sudo setfacl -m user:netdata:r unbound_control.pem
+```
+
+Finally, take note whether you're using Unbound in _cumulative_ or _non-cumulative_ mode. This will become relevant when
+configuring the collector.
+
+## Configure the Unbound collector
+
+You may not need to do any more configuration to have Netdata collect your Unbound metrics.
+
+If you followed the steps above to enable `remote-control` and make your Unbound files readable by Netdata, that should
+be enough. Restart Netdata with `service netdata restart`, or the appropriate method for your system. You should see
+Unbound metrics in your Netdata dashboard!
+
+![Some charts showing Unbound metrics in real-time](https://user-images.githubusercontent.com/1153921/69659974-93160f00-103c-11ea-88e6-27e9efcf8c0d.png)
+
+If that failed, you will need to manually configure `unbound.conf`. See the next section for details.
+
+### Manual setup for a local Unbound server
+
+To configure Netdata's Unbound collector module, navigate to your Netdata configuration directory (typically at
+`/etc/netdata/`) and use `edit-config` to initialize and edit your Unbound configuration file.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+sudo ./edit-config go.d/unbound.conf
+```
+
+The file contains all the global and job-related parameters. The `name` setting is required, and two Unbound servers
+can't have the same name.
+
+> It is important you know whether your Unbound server is running in cumulative or non-cumulative mode, as a conflict
+> between modes will create incorrect charts.
+
+Here are two examples for local Unbound servers, which may work based on your unique setup:
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:8953
+ cumulative: no
+ use_tls: yes
+ tls_skip_verify: yes
+ tls_cert: /path/to/unbound_control.pem
+ tls_key: /path/to/unbound_control.key
+
+ - name: local
+ address: 127.0.0.1:8953
+ cumulative: yes
+ use_tls: no
+```
+
+Netdata will attempt to read `unbound.conf` to get the appropriate `address`, `cumulative`, `use_tls`, `tls_cert`, and
+`tls_key` parameters.
+
+Restart Netdata with `service netdata restart`, or the appropriate method for your system.
+
+### Manual setup for a remote Unbound server
+
+Collecting metrics from remote Unbound servers requires manual configuration. There are too many possibilities to cover
+all remote connections here, but the [default `unbound.conf`
+file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/unbound.conf) contains a few useful examples:
+
+```yaml
+jobs:
+ - name: remote
+ address: 203.0.113.10:8953
+ use_tls: no
+
+ - name: remote_cumulative
+ address: 203.0.113.11:8953
+ use_tls: no
+ cumulative: yes
+
+ - name: remote
+ address: 203.0.113.10:8953
+ cumulative: yes
+ use_tls: yes
+ tls_cert: /etc/unbound/unbound_control.pem
+ tls_key: /etc/unbound/unbound_control.key
+```
+
+To see all the available options, see the default [unbound.conf
+file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/unbound.conf).
+
+## What's next?
+
+Now that you're collecting metrics from your Unbound servers, let us know how it's working for you! There's always room
+for improvement or refinement based on real-world use cases. Feel free to [file an
+issue](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md) with your
+thoughts.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Funbound-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/configure/performance.md b/docs/guides/configure/performance.md
new file mode 100644
index 0000000..5f93a8c
--- /dev/null
+++ b/docs/guides/configure/performance.md
@@ -0,0 +1,235 @@
+<!--
+title: How to optimize the Netdata Agent's performance
+description: "While the Netdata Agent is designed to monitor a system with only 1% CPU, you can optimize its performance for low-resource systems."
+image: /img/seo/guides/configure/performance.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/configure/performance.md
+-->
+
+# How to optimize the Netdata Agent's performance
+
+We designed the Netdata Agent to be incredibly lightweight, even when it's collecting a few thousand dimensions every
+second and visualizing that data into hundreds of charts. The Agent itself should never use more than 1% of a single CPU
+core, roughly 100 MiB of RAM, and minimal disk I/O to collect, store, and visualize all this data.
+
+We take this scalability seriously. We have one user [running
+Netdata](https://github.com/netdata/netdata/issues/1323#issuecomment-266427841) on a system with 144 cores and 288
+threads. Despite collecting 100,000 metrics every second, the Agent still only uses 9% CPU utilization on a
+single core.
+
+But not everyone has such powerful systems at their disposal. For example, you might run the Agent on a cloud VM with
+only 512 MiB of RAM, or an IoT device like a [Raspberry Pi](/docs/guides/monitor/pi-hole-raspberry-pi.md). In these
+cases, reducing Netdata's footprint beyond its already diminutive size can pay big dividends, giving your services more
+horsepower while still monitoring the health and the performance of the node, OS, hardware, and applications.
+
+## Prerequisites
+
+- A node running the Netdata Agent.
+- Familiarity with configuring the Netdata Agent with `edit-config`.
+
+If you're not familiar with how to configure the Netdata Agent, read our [node configuration
+doc](/docs/configure/nodes.md) before continuing with this guide. This guide assumes familiarity with the Netdata config
+directory, using `edit-config`, and the process of uncommenting/editing various settings in `netdata.conf` and other
+configuration files.
+
+## What affects Netdata's performance?
+
+Netdata's performance is primarily affected by **data collection/retention** and **clients accessing data**.
+
+You can configure almost all aspects of data collection/retention, and certain aspects of clients accessing data. For
+example, you can't control how many users might be viewing a local Agent dashboard, [viewing an
+infrastructure](/docs/visualize/overview-infrastructure.md) in real-time with Netdata Cloud, or running [Metric
+Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations).
+
+The Netdata Agent runs with the lowest possible [process scheduling
+policy](/daemon/README.md#netdata-process-scheduling-policy), which is `nice 19`, and uses the `idle` process scheduler.
+Together, these settings ensure that the Agent only gets CPU resources when the node has CPU resources to space. If the
+node reaches 100% CPU utilization, the Agent is stopped first to ensure your applications get any available resources.
+In addition, under heavy load, collectors that require disk I/O may stop and show gaps in charts.
+
+Let's walk through the best ways to improve the Netdata Agent's performance.
+
+## Reduce collection frequency
+
+The fastest way to improve the Agent's resource utilization is to reduce how often it collects metrics.
+
+### Global
+
+If you don't need per-second metrics, or if the Netdata Agent uses a lot of CPU even when no one is viewing that node's
+dashboard, configure the Agent to collect metrics less often.
+
+Open `netdata.conf` and edit the `update every` setting. The default is `1`, meaning that the Agent collects metrics
+every second.
+
+If you change this to `2`, Netdata enforces a minimum `update every` setting of 2 seconds, and collects metrics every
+other second, which will effectively halve CPU utilization. Set this to `5` or `10` to collect metrics every 5 or 10
+seconds, respectively.
+
+```conf
+[global]
+ update every = 5
+```
+
+### Specific plugin or collector
+
+Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
+`python.d.conf`, `node.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
+every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
+or configure a collector](/docs/collect/enable-configure.md) doc for details.
+
+To reduce the frequency of an [internal
+plugin/collector](/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), open `netdata.conf` and
+find the appropriate section. For example, to reduce the frequency of the `apps` plugin, which collects and visualizes
+metrics on application resource utilization:
+
+```conf
+[plugin:apps]
+ update every = 5
+```
+
+To [configure an individual collector](/docs/collect/enable-configure.md), open its specific configuration file with
+`edit-config` and look for the `update_every` setting. For example, to reduce the frequency of the `nginx` collector,
+run `sudo ./edit-config go.d/nginx.conf`:
+
+```conf
+# [ GLOBAL ]
+update_every: 10
+```
+
+## Disable unneeded plugins or collectors
+
+If you know that you don't need an [entire plugin or a specific
+collector](/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), you can disable any of them.
+Keep in mind that if a plugin/collector has nothing to do, it simply shuts down and does not consume system resources.
+You will only improve the Agent's performance by disabling plugins/collectors that are actively collecting metrics.
+
+Open `netdata.conf` and scroll down to the `[plugins]` section. To disable any plugin, uncomment it and set the value to
+`no`. For example, to explicitly keep the `proc` and `go.d` plugins enabled while disabling `python.d`, `charts.d`, and
+`node.d`.
+
+```conf
+[plugins]
+ proc = yes
+ python.d = no
+ charts.d = no
+ node.d = no
+ go.d = yes
+```
+
+Disable specific collectors by opening their respective plugin configuration files, uncommenting the line for the
+collector, and setting its value to `no`.
+
+```bash
+sudo ./edit-config go.d.conf
+sudo ./edit-config python.d.conf
+sudo ./edit-config node.d.conf
+sudo ./edit-config charts.d.conf
+```
+
+For example, to disable a few Python collectors:
+
+```conf
+modules:
+ apache: no
+ dockerd: no
+ fail2ban: no
+```
+
+## Lower memory usage for metrics retention
+
+Reduce the disk space that the [database engine](/database/engine/README.md) uses to retain metrics by editing
+the `dbengine multihost disk space` option in `netdata.conf`. The default value is `256`, but can be set to a minimum of
+`64`. By reducing the disk space allocation, Netdata also needs to store less metadata in the node's memory.
+
+The `page cache size` option also directly impacts Netdata's memory usage, but has a minimum value of `32`.
+
+Reducing the value of `dbengine multihost disk space` does slim down Netdata's resource usage, but it also reduces how
+long Netdata retains metrics. Find the right balance of performance and metrics retention by using the [dbengine
+calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics).
+
+All the settings are found in the `[global]` section of `netdata.conf`:
+
+```conf
+[global]
+ memory mode = dbengine
+ page cache size = 32
+ dbengine multihost disk space = 256
+```
+
+## Run Netdata behind Nginx
+
+A dedicated web server like Nginx provides far more robustness than the Agent's internal [web server](/web/README.md).
+Nginx can handle more concurrent connections, reuse idle connections, and use fast gzip compression to reduce payloads.
+
+For details on installing Nginx as a proxy for the local Agent dashboard, see our [Nginx
+doc](/docs/Running-behind-nginx.md).
+
+After you complete Nginx setup according to the doc linked above, we recommend setting `keepalive` to `1024`, and using
+gzip compression with the following options in the `location /` block:
+
+```conf
+ location / {
+ ...
+ gzip on;
+ gzip_proxied any;
+ gzip_types *;
+ }
+```
+
+Finally, edit `netdata.conf` with the following settings:
+
+```conf
+[global]
+ bind socket to IP = 127.0.0.1
+ access log = none
+ disconnect idle web clients after seconds = 3600
+ enable web responses gzip compression = no
+```
+
+## Disable/lower gzip compression for the dashboard
+
+If you choose not to run the Agent behind Nginx, you can disable or lower the Agent's web server's gzip compression.
+While gzip compression does reduce the size of the HTML/CSS/JS payload, it does use additional CPU while a user is
+looking at the local Agent dashboard.
+
+To disable gzip compression, open `netdata.conf` and find the `[web]` section:
+
+```conf
+[web]
+ enable gzip compression = no
+```
+
+Or to lower the default compression level:
+
+```conf
+[web]
+ enable gzip compression = yes
+ gzip compression level = 1
+```
+
+## Disable logs
+
+If you installation is working correctly, and you're not actively auditing Netdata's logs, disable them in
+`netdata.conf`.
+
+```conf
+[global]
+ debug log = none
+ error log = none
+ access log = none
+```
+
+## What's next?
+
+We hope this guide helped you better understand how to optimize the performance of the Netdata Agent.
+
+Now that your Agent is running smoothly, we recommend you [secure your nodes](/docs/configure/nodes.md) if you haven't
+already.
+
+Next, dive into some of Netdata's more complex features, such as configuring its health watchdog or exporting metrics to
+an external time-series database.
+
+- [Interact with dashboards and charts](/docs/visualize/interact-dashboards-charts.md)
+- [Configure health alarms](/docs/monitor/configure-alarms.md)
+- [Export metrics to external time-series databases](/docs/export/external-databases.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fconfigure%2Fperformance.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/deploy/ansible.md b/docs/guides/deploy/ansible.md
new file mode 100644
index 0000000..8298fd0
--- /dev/null
+++ b/docs/guides/deploy/ansible.md
@@ -0,0 +1,174 @@
+<!--
+title: Deploy Netdata with Ansible
+description: "Deploy an infrastructure monitoring solution in minutes with the Netdata Agent and Ansible. Use and customize a simple playbook for monitoring as code."
+image: /img/seo/guides/deploy/ansible.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/deploy/ansible.md
+-->
+
+# Deploy Netdata with Ansible
+
+Netdata's [one-line kickstart](https://learn.netdata.cloud/docs/get) is zero-configuration, highly adaptable, and
+compatible with tons of different operating systems and Linux distributions. You can use it on bare metal, VMs,
+containers, and everything in-between.
+
+But what if you're trying to bootstrap an infrastructure monitoring solution as quickly as possible. What if you need to
+deploy Netdata across an entire infrastructure with many nodes? What if you want to make this deployment reliable,
+repeatable, and idempotent? What if you want to write and deploy your infrastructure or cloud monitoring system like
+code?
+
+Enter [Ansible](https://ansible.com), a popular system provisioning, configuration management, and infrastructure as
+code (IaC) tool. Ansible uses **playbooks** to glue many standardized operations together with a simple syntax, then run
+those operations over standard and secure SSH connections. There's no agent to install on the remote system, so all you
+have to worry about is your application and your monitoring software.
+
+Ansible has some competition from the likes of [Puppet](https://puppet.com/) or [Chef](https://www.chef.io/), but the
+most valuable feature about Ansible is that every is **idempotent**. From the [Ansible
+glossary](https://docs.ansible.com/ansible/latest/reference_appendices/glossary.html)
+
+> An operation is idempotent if the result of performing it once is exactly the same as the result of performing it
+> repeatedly without any intervening actions.
+
+Idempotency means you can run an Ansible playbook against your nodes any number of times without affecting how they
+operate. When you deploy Netdata with Ansible, you're also deploying _monitoring as code_.
+
+In this guide, we'll walk through the process of using an [Ansible
+playbook](https://github.com/netdata/community/tree/main/netdata-agent-deployment/ansible-quickstart) to automatically
+deploy the Netdata Agent to any number of distributed nodes, manage the configuration of each node, and claim them to
+your Netdata Cloud account. You'll go from some unmonitored nodes to a infrastructure monitoring solution in a matter of
+minutes.
+
+## Prerequisites
+
+- A Netdata Cloud account. [Sign in and create one](https://app.netdata.cloud) if you don't have one already.
+- An administration system with [Ansible](https://www.ansible.com/) installed.
+- One or more nodes that your administration system can access via [SSH public
+ keys](https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key) (preferably password-less).
+
+## Download and configure the playbook
+
+First, download the
+[playbook](https://github.com/netdata/community/tree/main/netdata-agent-deployment/ansible-quickstart), move it to the
+current directory, and remove the rest of the cloned repository, as it's not required for using the Ansible playbook.
+
+```bash
+git clone https://github.com/netdata/community.git
+mv community/netdata-agent-deployment/ansible-quickstart .
+rm -rf community
+```
+
+Next, `cd` into the Ansible directory.
+
+```bash
+cd ansible-quickstart
+```
+
+### Edit the `hosts` file
+
+The `hosts` file contains a list of IP addresses or hostnames that Ansible will try to run the playbook against. The
+`hosts` file that comes with the repository contains two example IP addresses, which you should replace according to the
+IP address/hostname of your nodes.
+
+```conf
+203.0.113.0 hostname=node-01
+203.0.113.1 hostname=node-02
+```
+
+You can also set the `hostname` variable, which appears both on the local Agent dashboard and Netdata Cloud, or you can
+omit the `hostname=` string entirely to use the system's default hostname.
+
+#### Set the login user (optional)
+
+If you SSH into your nodes as a user other than `root`, you need to configure `hosts` according to those user names. Use
+the `ansible_user` variable to set the login user. For example:
+
+```conf
+203.0.113.0 hostname=ansible-01 ansible_user=example
+```
+
+#### Set your SSH key (optional)
+
+If you use an SSH key other than `~/.ssh/id_rsa` for logging into your nodes, you can set that on a per-node basis in
+the `hosts` file with the `ansible_ssh_private_key_file` variable. For example, to log into a Lightsail instance using
+two different SSH keys supplied by AWS.
+
+```conf
+203.0.113.0 hostname=ansible-01 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-west-2.pem
+203.0.113.1 hostname=ansible-02 ansible_ssh_private_key_file=~/.ssh/LightsailDefaultKey-us-east-1.pem
+```
+
+### Edit the `vars/main.yml` file
+
+In order to claim your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite
+charts](/docs/visualize/overview-infrastructure.md) or perform [Metric
+Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations), you need to set the `claim_token`
+and `claim_room` variables.
+
+To find your `claim_token` and `claim_room`, go to Netdata Cloud, then click on your Space's name in the top navigation,
+then click on **Manage your Space**. Click on the **Nodes** tab in the panel that appears, which displays a script with
+`token` and `room` strings.
+
+![Animated GIF of finding the claiming script and the token and room
+strings](https://user-images.githubusercontent.com/1153921/98740235-f4c3ac00-2367-11eb-8ffd-e9ab0f04c463.gif)
+
+Copy those strings into the `claim_token` and `claim_rooms` variables.
+
+```yml
+claim_token: XXXXX
+claim_rooms: XXXXX
+```
+
+Change the `dbengine_multihost_disk_space` if you want to change the metrics retention policy by allocating more or less
+disk space for storing metrics. The default is 2048 Mib, or 2 GiB.
+
+Because we're claiming this node to Netdata Cloud, and will view its dashboards there instead of via the IP address or
+hostname of the node, the playbook disables that local dashboard by setting `web_mode` to `none`. This gives a small
+security boost by not allowing any unwanted access to the local dashboard.
+
+You can read more about this decision, or other ways you might lock down the local dashboard, in our [node security
+doc](https://learn.netdata.cloud/docs/configure/secure-nodes).
+
+> Curious about why Netdata's dashboard is open by default? Read our [blog
+> post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) on that zero-configuration design decision.
+
+## Run the playbook
+
+Time to run the playbook from your administration system:
+
+```bash
+ansible-playbook -i hosts tasks/main.yml
+```
+
+Ansible first connects to your node(s) via SSH, then [collects
+facts](https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts) about the system.
+This playbook doesn't use these facts, but you could expand it to provision specific types of systems based on the
+makeup of your infrastructure.
+
+Next, Ansible makes changes to each node according to the `tasks` defined in the playbook, and
+[returns](https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#changed) whether each
+task results in a changed, failure, or was skipped entirely.
+
+The task to install Netdata will take a few minutes per node, so be patient! Once the playbook reaches the claiming
+task, your nodes start populating your Space in Netdata Cloud.
+
+## What's next?
+
+Go use Netdata!
+
+If you need a bit more guidance for how you can use Netdata for health monitoring and performance troubleshooting, see
+our [documentation](https://learn.netdata.cloud/docs). It's designed like a comprehensive guide, based on what you might
+want to do with Netdata, so use those categories to dive in.
+
+Some of the best places to start:
+
+- [Enable or configure a collector](/docs/collect/enable-configure.md)
+- [Supported collectors list](/collectors/COLLECTORS.md)
+- [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md)
+- [Interact with dashboards and charts](/docs/visualize/interact-dashboards-charts.md)
+- [Change how long Netdata stores metrics](/docs/store/change-metrics-storage.md)
+
+We're looking for more deployment and configuration management strategies, whether via Ansible or other
+provisioning/infrastructure as code software, such as Chef or Puppet, in our [community
+repo](https://github.com/netdata/community). Anyone is able to fork the repo and submit a PR, either to improve this
+playbook, extend it, or create an entirely new experience for deploying Netdata across entire infrastructure.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fdeploy%2Fansible.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/export/export-netdata-metrics-graphite.md b/docs/guides/export/export-netdata-metrics-graphite.md
new file mode 100644
index 0000000..9a4a4f5
--- /dev/null
+++ b/docs/guides/export/export-netdata-metrics-graphite.md
@@ -0,0 +1,184 @@
+<!--
+title: Export and visualize Netdata metrics in Graphite
+description: "Use Netdata to collect and export thousands of metrics to Graphite for long-term storage or further analysis."
+image: /img/seo/guides/export/export-netdata-metrics-graphite.png
+-->
+
+# Export and visualize Netdata metrics in Graphite
+
+Collecting metrics is an essential part of monitoring any application, service, or infrastructure, but it's not the
+final step for any developer, sysadmin, SRE, or DevOps engineer who's keeping an eye on things. To take meaningful
+action on these metrics, you may need to develop a stack of monitoring tools that work in parallel to help you diagnose
+anomalies and discover root causes faster.
+
+We designed Netdata with interoperability in mind. The Agent collects thousands of metrics every second, and then what
+you do with them is up to you. You can [store metrics in the database engine](/docs/guides/longer-metrics-storage.md),
+or send them to another time series database for long-term storage or further analysis using Netdata's [exporting
+engine](/docs/export/external-databases.md).
+
+In this guide, we'll show you how to export Netdata metrics to [Graphite](https://graphiteapp.org/) for long-term
+storage and further analysis. Graphite is a free open-source software (FOSS) tool that collects graphs numeric
+time-series data, such as all the metrics collected by the Netdata Agent itself. Using Netdata and Graphite together,
+you get more visibility into the health and performance of your entire infrastructure.
+
+![A custom dashboard in Grafana with Netdata
+metrics](https://user-images.githubusercontent.com/1153921/83903855-b8828480-a713-11ea-8edb-927ba521599b.png)
+
+Let's get started.
+
+## Install the Netdata Agent
+
+If you don't have the Netdata Agent installed already, visit the [installation guide](/packaging/installer/README.md)
+for the recommended instructions for your system. In most cases, you can use the one-line installation script:
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+Once installation finishes, open your browser and navigate to `http://NODE:19999`, replacing `NODE` with the IP address
+or hostname of your system, to find the Agent dashboard.
+
+## Install Graphite via Docker
+
+For this guide, we'll install Graphite using Docker. See the [Docker documentation](https://docs.docker.com/get-docker/)
+for details if you don't yet have it installed on your system.
+
+> If you already have Graphite installed, skip this step. If you want to install via a different method, see the
+> [Graphite installation docs](https://graphite.readthedocs.io/en/latest/install.html), with the caveat that some
+> configuration settings may be different.
+
+Start up the Graphite image with `docker run`.
+
+```bash
+docker run -d \
+ --name graphite \
+ --restart=always \
+ -p 80:80 \
+ -p 2003-2004:2003-2004 \
+ -p 2023-2024:2023-2024 \
+ -p 8125:8125/udp \
+ -p 8126:8126 \
+ graphiteapp/graphite-statsd
+```
+
+Open your browser and navigate to `http://NODE`, to see the Graphite interface. Nothing yet, but we'll fix that soon
+enough.
+
+![An empty Graphite
+dashboard](https://user-images.githubusercontent.com/1153921/83798958-ea371500-a659-11ea-8403-d46f77a05b78.png)
+
+## Enable the Graphite exporting connector
+
+You're now ready to begin exporting Netdata metrics to Graphite.
+
+Begin by using `edit-config` to open the `exporting.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config exporting.conf
+```
+
+If you haven't already, enable the exporting engine by setting `enabled` to `yes` in the `[exporting:global]` section.
+
+```conf
+[exporting:global]
+ enabled = yes
+```
+
+Next, configure the connector. Find the `[graphite:my_graphite_instance]` example section and uncomment the line.
+Replace `my_graphite_instance` with a name of your choice. Let's go with `[graphite:netdata]`. Set `enabled` to `yes`
+and uncomment the line. Your configuration should now look like this:
+
+```conf
+[graphite:netdata]
+ enabled = yes
+ # destination = localhost
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+```
+
+Set the `destination` setting to `localhost:2003`. By default, the Docker image for Graphite listens on port `2003` for
+incoming metrics. If you installed Graphite a different way, or tweaked the `docker run` command, you may need to change
+the port accordingly.
+
+```conf
+[graphite:netdata]
+ enabled = yes
+ destination = localhost:2003
+ ...
+```
+
+We'll not worry about the rest of the settings for now. Restart the Agent using `sudo service netdata restart`, or the
+appropriate method for your system, to spin up the exporting engine.
+
+## See and organize Netdata metrics in Graphite
+
+Head back to the Graphite interface again, then click on the **Dashboard** link to get started with Netdata's exported
+metrics. You can also navigate directly to `http://NODE/dashboard`.
+
+Let's switch the interface to help you understand which metrics Netdata is exporting to Graphite. Click on **Dashboard**
+and **Configure UI**, then choose the **Tree** option. Refresh your browser to change the UI.
+
+![Change the Graphite
+UI](https://user-images.githubusercontent.com/1153921/83798697-77c63500-a659-11ea-8ed5-5e274953c871.png)
+
+You should now see a tree of available contexts, including one that matches the hostname of the Agent exporting metrics.
+In this example, the Agent's hostname is `arcturus`.
+
+Let's add some system CPU charts so you can monitor the long-term health of your system. Click through the tree to find
+**hostname → system → cpu** metrics, then click on the **user** context. A chart with metrics from that context appears
+in the dashboard. Add a few other system CPU charts to flesh things out.
+
+Next, let's combine one or two of these charts. Click and drag one chart onto the other, and wait until the green **Drop
+to merge** dialog appears. Release to merge the charts.
+
+![Merging charts in
+Graphite](https://user-images.githubusercontent.com/1153921/83817628-1bbfd880-a67a-11ea-81bc-05efc639b6ce.png)
+
+Finally, save your dashboard. Click **Dashboard**, then **Save As**, then choose a name. Your dashboard is now saved.
+
+Of course, this is just the beginning of the customization you can do with Graphite. You can change the time range,
+share your dashboard with others, or use the composer to customize the size and appearance of specific charts. Learn
+more about adding, modifying, and combining graphs in the [Graphite
+docs](https://graphite.readthedocs.io/en/latest/dashboard.html).
+
+## Monitor the exporting engine
+
+As soon as the exporting engine begins, Netdata begins reporting metrics about the system's health and performance.
+
+![Graphs for monitoring the exporting
+engine](https://user-images.githubusercontent.com/1153921/83800787-e5c02b80-a65c-11ea-865a-c447d2ce4cbb.png)
+
+You can use these charts to verify that Netdata is properly exporting metrics to Graphite. You can even add these
+exporting charts to your Graphite dashboard!
+
+### Add exporting charts to Netdata Cloud
+
+You can also show these exporting engine metrics on Netdata Cloud. If you don't have an account already, go [sign
+in](https://app.netdata.cloud) and get started for free. If you need some help along the way, read the [get started with
+Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started).
+
+Add more metrics to a War Room's Nodes view by clicking on the **Add metric** button, then typing `exporting` into the
+context field. Choose the exporting contexts you want to add, then click **Add**. You'll see these charts alongside any
+others you've customized in Netdata Cloud.
+
+![Exporting engine metrics in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/83902769-db139e00-a711-11ea-828e-aa7e32b04c75.png)
+
+## What's next?
+
+What you do with your exported metrics is entirely up to you, but as you might have seen in the Graphite connector
+configuration block, there are many other ways to tweak and customize which metrics you export to Graphite and how
+often.
+
+For full details about each configuration option and what it does, see the [exporting reference
+guide](/exporting/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fexport%2Fexport-netdata-metrics-graphite.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/longer-metrics-storage.md b/docs/guides/longer-metrics-storage.md
new file mode 100644
index 0000000..85b397f
--- /dev/null
+++ b/docs/guides/longer-metrics-storage.md
@@ -0,0 +1,160 @@
+<!--
+title: "Change how long Netdata stores metrics"
+description: "With a single configuration change, the Netdata Agent can store days, weeks, or months of metrics at its famous per-second granularity."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/longer-metrics-storage.md
+-->
+
+# Change how long Netdata stores metrics
+
+Netdata helps you collect thousands of system and application metrics every second, but what about storing them for the
+long term?
+
+Many people think Netdata can only store about an hour's worth of real-time metrics, but that's simply not true any
+more. With the right settings, Netdata is quite capable of efficiently storing hours or days worth of historical,
+per-second metrics without having to rely on an [exporting engine](/docs/export/external-databases.md).
+
+This guide gives two options for configuring Netdata to store more metrics. **We recommend the default [database
+engine](#using-the-database-engine)**, but you can stick with or switch to the round-robin database if you prefer.
+
+Let's get started.
+
+## Using the database engine
+
+The database engine uses RAM to store recent metrics while also using a "spill to disk" feature that takes advantage of
+available disk space for long-term metrics storage. This feature of the database engine allows you to store a much
+larger dataset than your system's available RAM.
+
+The database engine is currently the default method of storing metrics, but if you're not sure which database you're
+using, check out your `netdata.conf` file and look for the `memory mode` setting:
+
+```conf
+[global]
+ memory mode = dbengine
+```
+
+If `memory mode` is set to anything but `dbengine`, change it and restart Netdata using the standard command for
+restarting services on your system. You're now using the database engine!
+
+What makes the database engine efficient? While it's structured like a traditional database, the database engine splits
+data between RAM and disk. The database engine caches and indexes data on RAM to keep memory usage low, and then
+compresses older metrics onto disk for long-term storage.
+
+When the Netdata dashboard queries for historical metrics, the database engine will use its cache, stored in RAM, to
+return relevant metrics for visualization in charts.
+
+Now, given that the database engine uses _both_ RAM and disk, there are two other settings to consider: `page cache
+size` and `dbengine multihost disk space`.
+
+```conf
+[global]
+ page cache size = 32
+ dbengine multihost disk space = 256
+```
+
+`page cache size` sets the maximum amount of RAM (in MiB) the database engine will use for caching and indexing.
+`dbengine multihost disk space` sets the maximum disk space (again, in MiB) the database engine will use for storing
+compressed metrics. The default settings retain about two day's worth of metrics on a system collecting 2,000 metrics
+every second.
+
+[**See our database engine
+calculator**](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+to help you correctly set `dbengine multihost disk space` based on your needs. The calculator gives an accurate estimate
+based on how many child nodes you have, how many metrics your Agent collects, and more.
+
+With the database engine active, you can back up your `/var/cache/netdata/dbengine/` folder to another location for
+redundancy.
+
+Now that you know how to switch to the database engine, let's cover the default round-robin database for those who
+aren't ready to make the move.
+
+## Using the round-robin database
+
+In previous versions, Netdata used a round-robin database to store 1 hour of per-second metrics.
+
+To see if you're still using this database, or if you would like to switch to it, open your `netdata.conf` file and see
+if `memory mode` option is set to `save`.
+
+```conf
+[global]
+ memory mode = save
+```
+
+If `memory mode` is set to `save`, then you're using the round-robin database. If so, the `history` option is set to
+`3600`, which is the equivalent to 3,600 seconds, or one hour.
+
+To increase your historical metrics, you can increase `history` to the number of seconds you'd like to store:
+
+```conf
+[global]
+ # 2 hours = 2 * 60 * 60 = 7200 seconds
+ history = 7200
+ # 4 hours = 4 * 60 * 60 = 14440 seconds
+ history = 14440
+ # 24 hours = 24 * 60 * 60 = 86400 seconds
+ history = 86400
+```
+
+And so on.
+
+Next, check to see how many metrics Netdata collects on your system, and how much RAM that uses. Visit the Netdata
+dashboard and look at the bottom-right corner of the interface. You'll find a sentence similar to the following:
+
+> Every second, Netdata collects 1,938 metrics, presents them in 299 charts and monitors them with 81 alarms. Netdata is
+> using 25 MB of memory on **netdata-linux** for 1 hour, 6 minutes and 36 seconds of real-time history.
+
+On this desktop system, using a Ryzen 5 1600 and 16GB of RAM, the round-robin databases uses 25 MB of RAM to store just
+over an hour's worth of data for nearly 2,000 metrics.
+
+To increase the `history` option, you need to edit your `netdata.conf` file and increase the `history` setting. In most
+installations, you'll find it at `/etc/netdata/netdata.conf`, but some operating systems place it at
+`/opt/netdata/etc/netdata/netdata.conf`.
+
+Use `/etc/netdata/edit-config netdata.conf`, or your favorite text editor, to replace `3600` with the number of seconds
+you'd like to store.
+
+You should base this number on two things: How much history you need for your use case, and how much RAM you're willing
+to dedicate to Netdata.
+
+> Take care when you change the `history` option on production systems. Netdata is configured to stop its process if
+> your system starts running out of RAM, but you can never be too careful. Out of memory situations are very bad.
+
+How much RAM will a longer history use? Let's use a little math.
+
+The round-robin database needs 4 bytes for every value Netdata collects. If Netdata collects metrics every second,
+that's 4 bytes, per second, per metric.
+
+```text
+4 bytes * X seconds * Y metrics = RAM usage in bytes
+```
+
+Let's assume your system collects 1,000 metrics per second.
+
+```text
+4 bytes * 3600 seconds * 1,000 metrics = 14400000 bytes = 14.4 MB RAM
+```
+
+With that formula, you can calculate the RAM usage for much larger history settings.
+
+```conf
+# 2 hours at 1,000 metrics per second
+4 bytes * 7200 seconds * 1,000 metrics = 28800000 bytes = 28.8 MB RAM
+# 2 hours at 2,000 metrics per second
+4 bytes * 7200 seconds * 2,000 metrics = 57600000 bytes = 57.6 MB RAM
+# 4 hours at 2,000 metrics per second
+4 bytes * 14440 seconds * 2,000 metrics = 115520000 bytes = 115.52 MB RAM
+# 24 hours at 1,000 metrics per second
+4 bytes * 86400 seconds * 1,000 metrics = 345600000 bytes = 345.6 MB RAM
+```
+
+## What's next?
+
+Now that you have either configured database engine or round-robin database engine to store more metrics, you'll
+probably want to see it in action!
+
+For more information about how to pan charts to view historical metrics, see our documentation on [using
+charts](/web/README.md#using-charts).
+
+And if you'd now like to reduce Netdata's resource usage, view our [performance
+guide](/docs/guides/configure/performance.md) for our best practices on optimization.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Flonger-metrics-storage&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor-cockroachdb.md b/docs/guides/monitor-cockroachdb.md
new file mode 100644
index 0000000..fd0e7db
--- /dev/null
+++ b/docs/guides/monitor-cockroachdb.md
@@ -0,0 +1,136 @@
+<!--
+title: "Monitor CockroachDB metrics with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor-cockroachdb.md
+-->
+
+# Monitor CockroachDB metrics with Netdata
+
+[CockroachDB](https://github.com/cockroachdb/cockroach) is an open-source project that brings SQL databases into
+scalable, disaster-resilient cloud deployments. Thanks to a [new CockroachDB
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/) released in
+[v1.20](https://blog.netdata.cloud/posts/release-1.20/), you can now monitor any number of CockroachDB databases with
+maximum granularity using Netdata. Collect more than 50 unique metrics and put them on interactive visualizations
+designed for better visual anomaly detection.
+
+Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new
+collector and help others get started with it straightaway.
+
+Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata.
+
+## What's in this guide
+
+- [Configure the CockroachDB collector](#configure-the-cockroachdb-collector)
+ - [Manual setup for a local CockroachDB database](#manual-setup-for-a-local-cockroachdb-database)
+- [Tweak CockroachDB alarms](#tweak-cockroachdb-alarms)
+
+## Configure the CockroachDB collector
+
+Because _all_ of Netdata's collectors can auto-detect the services they monitor, you _shouldn't_ need to worry about
+configuring CockroachDB. Netdata only needs to regularly query the database's `_status/vars` page to gather metrics and
+display them on the dashboard.
+
+If your CockroachDB instance is accessible through `http://localhost:8080/` or `http://127.0.0.1:8080`, your setup is
+complete. Restart Netdata with `service netdata restart`, or use the [appropriate
+method](../getting-started.md#start-stop-and-restart-netdata) for your system, and refresh your browser. You should see
+CockroachDB metrics in your Netdata dashboard!
+
+<figure>
+ <img src="https://user-images.githubusercontent.com/1153921/73564467-d7e36b00-441c-11ea-9ec9-b5d5ea7277d4.png" alt="CPU utilization charts from a CockroachDB database monitored by Netdata" />
+ <figcaption>CPU utilization charts from a CockroachDB database monitored by Netdata</figcaption>
+</figure>
+
+> Note: Netdata collects metrics from CockroachDB every 10 seconds, instead of our usual 1 second, because CockroachDB
+> only updates `_status/vars` every 10 seconds. You can't change this setting in CockroachDB.
+
+If you don't see CockroachDB charts, you may need to configure the collector manually.
+
+### Manual setup for a local CockroachDB database
+
+To configure Netdata's CockroachDB collector, navigate to your Netdata configuration directory (typically at
+`/etc/netdata/`) and use `edit-config` to initialize and edit your CockroachDB configuration file.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config go.d/cockroachdb.conf
+```
+
+Scroll down to the `[JOBS]` section at the bottom of the file. You will see the two default jobs there, which you can
+edit, or create a new job with any of the parameters listed above in the file. Both the `name` and `url` values are
+required, and everything else is optional.
+
+For a production cluster, you'll use either an IP address or the system's hostname. Be sure that your remote system
+allows TCP communication on port 8080, or whichever port you have configured CockroachDB's [Admin
+UI](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint) to listen on.
+
+```yaml
+# [ JOBS ]
+jobs:
+ - name: remote
+ url: http://203.0.113.0:8080/_status/vars
+
+ - name: remote_hostname
+ url: http://cockroachdb.example.com:8080/_status/vars
+```
+
+For a secure cluster, use `https` in the `url` field instead.
+
+```yaml
+# [ JOBS ]
+jobs:
+ - name: remote
+ url: https://203.0.113.0:8080/_status/vars
+ tls_skip_verify: yes # If your certificate is self-signed
+
+ - name: remote_hostname
+ url: https://cockroachdb.example.com:8080/_status/vars
+ tls_skip_verify: yes # If your certificate is self-signed
+```
+
+You can add as many jobs as you'd like based on how many CockroachDB databases you have—Netdata will create separate
+charts for each job. Once you've edited `cockroachdb.conf` according to the needs of your infrastructure, restart
+Netdata to see your new charts.
+
+<figure>
+ <img src="https://user-images.githubusercontent.com/1153921/73564469-d7e36b00-441c-11ea-8333-02ba0e1c294c.png" alt="Charts showing a node failure during a simulated test" />
+ <figcaption>Charts showing a node failure during a simulated test</figcaption>
+</figure>
+
+## Tweak CockroachDB alarms
+
+This release also includes eight pre-configured alarms for live nodes, such as whether the node is live, storage
+capacity, issues with replication, and the number of SQL connections/statements. See [health.d/cockroachdb.conf on
+GitHub](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/cockroachdb.conf) for details.
+
+You can also edit these files directly with `edit-config`:
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config health.d/cockroachdb.conf # You may need to use `sudo` for write privileges
+```
+
+For more information about editing the defaults or writing new alarm entities, see our health monitoring [quickstart
+guide](/health/QUICKSTART.md).
+
+## What's next?
+
+Now that you're collecting metrics from your CockroachDB databases, let us know how it's working for you! There's always
+room for improvement or refinement based on real-world use cases. Feel free to [file an
+issue](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md) with your
+thoughts.
+
+Also, be sure to check out these useful resources:
+
+- [Netdata's CockroachDB
+ documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/)
+- [Netdata's CockroachDB
+ configuration](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/cockroachdb.conf)
+- [Netdata's CockroachDB
+ alarms](https://github.com/netdata/netdata/blob/29d9b5e51603792ee27ef5a21f1de0ba8e130158/health/health.d/cockroachdb.conf)
+- [CockroachDB homepage](https://www.cockroachlabs.com/product/)
+- [CockroachDB documentation](https://www.cockroachlabs.com/docs/stable/)
+- [`_status/vars` endpoint
+ docs](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint)
+- [Monitor CockroachDB with
+ Prometheus](https://www.cockroachlabs.com/docs/stable/monitor-cockroachdb-with-prometheus.html)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor-cockroachdb&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor-hadoop-cluster.md b/docs/guides/monitor-hadoop-cluster.md
new file mode 100644
index 0000000..1ca2c03
--- /dev/null
+++ b/docs/guides/monitor-hadoop-cluster.md
@@ -0,0 +1,204 @@
+<!--
+title: "Monitor a Hadoop cluster with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor-hadoop-cluster.md
+-->
+
+# Monitor a Hadoop cluster with Netdata
+
+Hadoop is an [Apache project](https://hadoop.apache.org/) is a framework for processing large sets of data across a
+distributed cluster of systems.
+
+And while Hadoop is designed to be a highly-available and fault-tolerant service, those who operate a Hadoop cluster
+will want to monitor the health and performance of their [Hadoop Distributed File System
+(HDFS)](https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html) and [Zookeeper](https://zookeeper.apache.org/)
+implementations.
+
+Netdata comes with built-in and pre-configured support for monitoring both HDFS and Zookeeper.
+
+This guide assumes you have a Hadoop cluster, with HDFS and Zookeeper, running already. If you don't, please follow
+the [official Hadoop
+instructions](http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html) or an
+alternative, like the guide available from
+[DigitalOcean](https://www.digitalocean.com/community/tutorials/how-to-install-hadoop-in-stand-alone-mode-on-ubuntu-18-04).
+
+For more specifics on the collection modules used in this guide, read the respective pages in our documentation:
+
+- [HDFS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/hdfs)
+- [Zookeeper](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/zookeeper)
+
+## Set up your HDFS and Zookeeper installations
+
+As with all data sources, Netdata can auto-detect HDFS and Zookeeper nodes if you installed them using the standard
+installation procedure.
+
+For Netdata to collect HDFS metrics, it needs to be able to access the node's `/jmx` endpoint. You can test whether an
+JMX endpoint is accessible by using `curl HDFS-IP:PORT/jmx`. For a NameNode, you should see output similar to the
+following:
+
+```json
+{
+ "beans" : [ {
+ "name" : "Hadoop:service=NameNode,name=JvmMetrics",
+ "modelerType" : "JvmMetrics",
+ "MemNonHeapUsedM" : 65.67851,
+ "MemNonHeapCommittedM" : 67.3125,
+ "MemNonHeapMaxM" : -1.0,
+ "MemHeapUsedM" : 154.46341,
+ "MemHeapCommittedM" : 215.0,
+ "MemHeapMaxM" : 843.0,
+ "MemMaxM" : 843.0,
+ "GcCount" : 15,
+ "GcTimeMillis" : 305,
+ "GcNumWarnThresholdExceeded" : 0,
+ "GcNumInfoThresholdExceeded" : 0,
+ "GcTotalExtraSleepTime" : 92,
+ "ThreadsNew" : 0,
+ "ThreadsRunnable" : 6,
+ "ThreadsBlocked" : 0,
+ "ThreadsWaiting" : 7,
+ "ThreadsTimedWaiting" : 34,
+ "ThreadsTerminated" : 0,
+ "LogFatal" : 0,
+ "LogError" : 0,
+ "LogWarn" : 2,
+ "LogInfo" : 348
+ },
+ { ... }
+ ]
+}
+```
+
+The JSON result for a DataNode's `/jmx` endpoint is slightly different:
+
+```json
+{
+ "beans" : [ {
+ "name" : "Hadoop:service=DataNode,name=DataNodeActivity-dev-slave-01.dev.loc
+al-9866",
+ "modelerType" : "DataNodeActivity-dev-slave-01.dev.local-9866",
+ "tag.SessionId" : null,
+ "tag.Context" : "dfs",
+ "tag.Hostname" : "dev-slave-01.dev.local",
+ "BytesWritten" : 500960407,
+ "TotalWriteTime" : 463,
+ "BytesRead" : 80689178,
+ "TotalReadTime" : 41203,
+ "BlocksWritten" : 16,
+ "BlocksRead" : 16,
+ "BlocksReplicated" : 4,
+ ...
+ },
+ { ... }
+ ]
+}
+```
+
+If Netdata can't access the `/jmx` endpoint for either a NameNode or DataNode, it will not be able to auto-detect and
+collect metrics from your HDFS implementation.
+
+Zookeeper auto-detection relies on an accessible client port and a allow-listed `mntr` command. For more details on
+`mntr`, see Zookeeper's documentation on [cluster
+options](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_clusterOptions) and [Zookeeper
+commands](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_zkCommands).
+
+## Configure the HDFS and Zookeeper modules
+
+To configure Netdata's HDFS module, navigate to your Netdata directory (typically at `/etc/netdata/`) and use
+`edit-config` to initialize and edit your HDFS configuration file.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config go.d/hdfs.conf
+```
+
+At the bottom of the file, you will see two example jobs, both of which are commented out:
+
+```yaml
+# [ JOBS ]
+#jobs:
+# - name: namenode
+# url: http://127.0.0.1:9870/jmx
+#
+# - name: datanode
+# url: http://127.0.0.1:9864/jmx
+```
+
+Uncomment these lines and edit the `url` value(s) according to your setup. Now's the time to add any other configuration
+details, which you can find inside of the `hdfs.conf` file itself. Most production implementations will require TLS
+certificates.
+
+The result for a simple HDFS setup, running entirely on `localhost` and without certificate authentication, might look
+like this:
+
+```yaml
+# [ JOBS ]
+jobs:
+ - name: namenode
+ url: http://127.0.0.1:9870/jmx
+
+ - name: datanode
+ url: http://127.0.0.1:9864/jmx
+```
+
+At this point, Netdata should be configured to collect metrics from your HDFS servers. Let's move on to Zookeeper.
+
+Next, use `edit-config` again to initialize/edit your `zookeeper.conf` file.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config go.d/zookeeper.conf
+```
+
+As with the `hdfs.conf` file, head to the bottom, uncomment the example jobs, and tweak the `address` values according
+to your setup. Again, you may need to add additional configuration options, like TLS certificates.
+
+```yaml
+jobs:
+ - name : local
+ address : 127.0.0.1:2181
+
+ - name : remote
+ address : 203.0.113.10:2182
+```
+
+Finally, restart Netdata.
+
+```sh
+sudo service restart netdata
+```
+
+Upon restart, Netdata should recognize your HDFS/Zookeeper servers, enable the HDFS and Zookeeper modules, and begin
+showing real-time metrics for both in your Netdata dashboard. 🎉
+
+## Configuring HDFS and Zookeeper alarms
+
+The Netdata community helped us create sane defaults for alarms related to both HDFS and Zookeeper. You may want to
+investigate these to ensure they work well with your Hadoop implementation.
+
+- [HDFS alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/hdfs.conf)
+- [Zookeeper alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/zookeeper.conf)
+
+You can also access/edit these files directly with `edit-config`:
+
+```bash
+sudo /etc/netdata/edit-config health.d/hdfs.conf
+sudo /etc/netdata/edit-config health.d/zookeeper.conf
+```
+
+For more information about editing the defaults or writing new alarm entities, see our [health monitoring
+documentation](/health/README.md).
+
+## What's next?
+
+If you're having issues with Netdata auto-detecting your HDFS/Zookeeper servers, or want to help improve how Netdata
+collects or presents metrics from these services, feel free to [file an
+issue](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md).
+
+- Read up on the [HDFS configuration
+ file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/hdfs.conf) to understand how to configure
+ global options or per-job options, such as username/password, TLS certificates, timeouts, and more.
+- Read up on the [Zookeeper configuration
+ file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/zookeeper.conf) to understand how to configure
+ global options or per-job options, timeouts, TLS certificates, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor-hadoop-cluster&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/anomaly-detection.md b/docs/guides/monitor/anomaly-detection.md
new file mode 100644
index 0000000..bb9dbc8
--- /dev/null
+++ b/docs/guides/monitor/anomaly-detection.md
@@ -0,0 +1,191 @@
+<!--
+title: "Detect anomalies in systems and applications"
+description: "Detect anomalies in any system, container, or application in your infrastructure with machine learning and the open-source Netdata Agent."
+image: /img/seo/guides/monitor/anomaly-detection.png
+author: "Joel Hans"
+author_title: "Editorial Director, Technical & Educational Resources"
+author_img: "/img/authors/joel-hans.jpg"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/anomaly-detection.md
+-->
+
+# Detect anomalies in systems and applications
+
+Beginning with v1.27, the [open-source Netdata Agent](https://github.com/netdata/netdata) is capable of unsupervised
+[anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) with machine learning (ML). As with all things
+Netdata, the anomalies collector comes with preconfigured alarms and instant visualizations that require no query
+languages or organizing metrics. You configure the collector to look at specific charts, and it handles the rest.
+
+Netdata's implementation uses a handful of functions in the [Python Outlier Detection (PyOD)
+library](https://github.com/yzhao062/pyod/tree/master), which periodically runs a `train` function that learns what
+"normal" looks like on your node and creates an ML model for each chart, then utilizes the
+[`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) and
+[`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) PyOD functions to
+quantify how anomalous certain charts are.
+
+All these metrics and alarms are available for centralized monitoring in [Netdata Cloud](https://app.netdata.cloud). If
+you choose to sign up for Netdata Cloud and [claim your nodes](/claim/README.md), you will have the ability to run
+tailored anomaly detection on every node in your infrastructure, regardless of its purpose or workload.
+
+In this guide, you'll learn how to set up the anomalies collector to instantly detect anomalies in an Nginx web server
+and/or the node that hosts it, which will give you the tools to configure parallel unsupervised monitors for any
+application in your infrastructure. Let's get started.
+
+![Example anomaly detection with an Nginx web
+server](https://user-images.githubusercontent.com/1153921/103586700-da5b0a00-4ea2-11eb-944e-46edd3f83e3a.png)
+
+## Prerequisites
+
+- A node running the Netdata Agent. If you don't yet have that, [get Netdata](/docs/get/README.md).
+- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already.
+- Familiarity with configuring the Netdata Agent with [`edit-config`](/docs/configure/nodes.md).
+- _Optional_: An Nginx web server running on the same node to follow the example configuration steps.
+
+## Install required Python packages
+
+The anomalies collector uses a few Python packages, available with `pip3`, to run ML training. It requires
+[`numba`](http://numba.pydata.org/), [`scikit-learn`](https://scikit-learn.org/stable/),
+[`pyod`](https://pyod.readthedocs.io/en/latest/), in addition to
+[`netdata-pandas`](https://github.com/netdata/netdata-pandas), which is a package built by the Netdata team to pull data
+from a Netdata Agent's API into a [Pandas](https://pandas.pydata.org/). Read more about `netdata-pandas` on its [package
+repo](https://github.com/netdata/netdata-pandas) or in Netdata's [community
+repo](https://github.com/netdata/community/tree/main/netdata-agent-api/netdata-pandas).
+
+```bash
+# Become the netdata user
+sudo su -s /bin/bash netdata
+
+# Install required packages for the netdata user
+pip3 install --user netdata-pandas==0.0.32 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
+```
+
+> If the `pip3` command fails, you need to install it. For example, on an Ubuntu system, use `sudo apt install
+> python3-pip`.
+
+Use `exit` to become your normal user again.
+
+## Enable the anomalies collector
+
+Navigate to your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) and use `edit-config`
+to open the `python.d.conf` file.
+
+```bash
+sudo ./edit-config python.d.conf
+```
+
+In `python.d.conf` file, search for the `anomalies` line. If the line exists, set the value to `yes`. Add the line
+yourself if it doesn't already exist. Either way, the final result should look like:
+
+```conf
+anomalies: yes
+```
+
+[Restart the Agent](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to start up the
+anomalies collector. By default, the model training process runs every 30 minutes, and uses the previous 4 hours of
+metrics to establish a baseline for health and performance across the default included charts.
+
+> 💡 The anomaly collector may need 30-60 seconds to finish its initial training and have enough data to start
+> generating anomaly scores. You may need to refresh your browser tab for the **Anomalies** section to appear in menus
+> on both the local Agent dashboard or Netdata Cloud.
+
+## Configure the anomalies collector
+
+Open `python.d/anomalies.conf` with `edit-conf`.
+
+```bash
+sudo ./edit-config python.d/anomalies.conf
+```
+
+The file contains many user-configurable settings with sane defaults. Here are some important settings that don't
+involve tweaking the behavior of the ML training itself.
+
+- `charts_regex`: Which charts to train models for and run anomaly detection on, with each chart getting a separate
+ model.
+- `charts_to_exclude`: Specific charts, selected by the regex in `charts_regex`, to exclude.
+- `train_every_n`: How often to train the ML models.
+- `train_n_secs`: The number of historical observations to train each model on. The default is 4 hours, but if your node
+ doesn't have historical metrics going back that far, consider [changing the metrics retention
+ policy](/docs/store/change-metrics-storage.md) or reducing this window.
+- `custom_models`: A way to define custom models that you want anomaly probabilities for, including multi-node or
+ streaming setups. More on custom models in part 3 of this guide series.
+
+> ⚠️ Setting `charts_regex` with many charts or `train_n_secs` to a very large number will have an impact on the
+> resources and time required to train a model for every chart. The actual performance implications depend on the
+> resources available on your node. If you plan on changing these settings beyond the default, or what's mentioned in
+> this guide, make incremental changes to observe the performance impact. Considering `train_max_n` to cap the number of
+> observations actually used to train on.
+
+### Run anomaly detection on Nginx and log file metrics
+
+As mentioned above, this guide uses an Nginx web server to demonstrate how the anomalies collector works. You must
+configure the collector to monitor charts from the
+[Nginx](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) and [web
+log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) collectors.
+
+`charts_regex` allows for some basic regex, such as wildcards (`*`) to match all contexts with a certain pattern. For
+example, `system\..*` matches with any chart wit ha context that begins with `system.`, and ends in any number of other
+characters (`.*`). Note the escape character (`\`) around the first period to capture a period character exactly, and
+not any character.
+
+Change `charts_regex` in `anomalies.conf` to the following:
+
+```conf
+ charts_regex: 'system\..*|nginx_local\..*|web_log_nginx\..*|apps.cpu|apps.mem'
+```
+
+This value tells the anomaly collector to train against every `system.` chart, every `nginx_local` chart, every
+`web_log_nginx` chart, and specifically the `apps.cpu` and `apps.mem` charts.
+
+![The anomalies collector chart with many
+dimensions](https://user-images.githubusercontent.com/1153921/102813877-db5e4880-4386-11eb-8040-d7a1d7a476bb.png)
+
+### Remove some metrics from anomaly detection
+
+As you can see in the above screenshot, this node is now looking for anomalies in many places. The result is a single
+`anomalies_local.probability` chart with more than twenty dimensions, some of which the dashboard hides at the bottom of
+a scroll-able area. In addition, training and analyzing the anomaly collector on many charts might require more CPU
+utilization that you're willing to give.
+
+First, explicitly declare which `system.` charts to monitor rather than of all of them using regex (`system\..*`).
+
+```conf
+ charts_regex: 'system\.cpu|system\.load|system\.io|system\.net|system\.ram|nginx_local\..*|web_log_nginx\..*|apps.cpu|apps.mem'
+```
+
+Next, remove some charts with the `charts_to_exclude` setting. For this example, using an Nginx web server, focus on the
+volume of requests/responses, not, for example, which type of 4xx response a user might receive.
+
+```conf
+ charts_to_exclude: 'web_log_nginx.excluded_requests,web_log_nginx.responses_by_status_code_class,web_log_nginx.status_code_class_2xx_responses,web_log_nginx.status_code_class_4xx_responses,web_log_nginx.current_poll_uniq_clients,web_log_nginx.requests_by_http_method,web_log_nginx.requests_by_http_version,web_log_nginx.requests_by_ip_proto'
+```
+
+![The anomalies collector with less
+dimensions](https://user-images.githubusercontent.com/1153921/102820642-d69f9180-4392-11eb-91c5-d3d166d40105.png)
+
+Apply the ideas behind the collector's regex and exclude settings to any other
+[system](/docs/collect/system-metrics.md), [container](/docs/collect/container-metrics.md), or
+[application](/docs/collect/application-metrics.md) metrics you want to detect anomalies for.
+
+## What's next?
+
+Now that you know how to set up unsupervised anomaly detection in the Netdata Agent, using an Nginx web server as an
+example, it's time to apply that knowledge to other mission-critical parts of your infrastructure. If you're not sure
+what to monitor next, check out our list of [collectors](/collectors/COLLECTORS.md) to see what kind of metrics Netdata
+can collect from your systems, containers, and applications.
+
+For a more user-friendly anomaly detection experience, try out the [Metric
+Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) feature in Netdata Cloud. Metric
+Correlations runs only at your requests, removing unrelated charts from the dashboard to help you focus on root cause
+analysis.
+
+Stay tuned for the next two parts of this guide, which provide more real-world context for the anomalies collector.
+First, maximize the immediate value you get from anomaly detection by tracking preconfigured alarms, visualizing
+anomalies in charts, and building a new dashboard tailored to your applications. Then, learn about creating custom ML
+models, which help you holistically monitor an application or service by monitoring anomalies across a _cluster of
+charts_.
+
+### Related reference documentation
+
+- [Netdata Agent · Anomalies collector](/collectors/python.d.plugin/anomalies/README.md)
+- [Netdata Cloud · Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fanomaly-detectionl&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/dimension-templates.md b/docs/guides/monitor/dimension-templates.md
new file mode 100644
index 0000000..7cbe795
--- /dev/null
+++ b/docs/guides/monitor/dimension-templates.md
@@ -0,0 +1,176 @@
+<!--
+title: "Use dimension templates to create dynamic alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/dimension-templates.md
+-->
+
+# Use dimension templates to create dynamic alarms
+
+Your ability to monitor the health of your systems and applications relies on your ability to create and maintain
+the best set of alarms for your particular needs.
+
+In v1.18 of Netdata, we introduced **dimension templates** for alarms, which simplifies the process of writing [alarm
+entities](/health/REFERENCE.md#health-entity-reference) for charts with many dimensions.
+
+Dimension templates can condense many individual entities into one—no more copy-pasting one entity and changing the
+`alarm`/`template` and `lookup` lines for each dimension you'd like to monitor.
+
+They are, however, an advanced health monitoring feature. For more basic instructions on creating your first alarm,
+check out our [health monitoring documentation](/health/README.md), which also includes
+[examples](/health/REFERENCE.md#example-alarms).
+
+## The fundamentals of `foreach`
+
+Our dimension templates update creates a new `foreach` parameter to the existing [`lookup`
+line](/health/REFERENCE.md#alarm-line-lookup). This is where the magic happens.
+
+You use the `foreach` parameter to specify which dimensions you want to monitor with this single alarm. You can separate
+them with a comma (`,`) or a pipe (`|`). You can also use a [Netdata simple pattern](/libnetdata/simple_pattern/README.md)
+to create many alarms with a regex-like syntax.
+
+The `foreach` parameter _has_ to be the last parameter in your `lookup` line, and if you have both `of` and `foreach` in
+the same `lookup` line, Netdata will ignore the `of` parameter and use `foreach` instead.
+
+Let's get into some examples so you can see how the new parameter works.
+
+> ⚠️ The following entities are examples to showcase the functionality and syntax of dimension templates. They are not
+> meant to be run as-is on production systems.
+
+## Condensing entities with `foreach`
+
+Let's say you want to monitor the `system`, `user`, and `nice` dimensions in your system's overall CPU utilization.
+Before dimension templates, you would need the following three entities:
+
+```yaml
+ alarm: cpu_system
+ on: system.cpu
+lookup: average -10m percentage of system
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+
+ alarm: cpu_user
+ on: system.cpu
+lookup: average -10m percentage of user
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+
+ alarm: cpu_nice
+ on: system.cpu
+lookup: average -10m percentage of nice
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+```
+
+With dimension templates, you can condense these into a single alarm. Take note of the `alarm` and `lookup` lines.
+
+```yaml
+ alarm: cpu_template
+ on: system.cpu
+lookup: average -10m percentage foreach system,user,nice
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+```
+
+The `alarm` line specifies the naming scheme Netdata will use. You can use whatever naming scheme you'd like, with `.`
+and `_` being the only allowed symbols.
+
+The `lookup` line has changed from `of` to `foreach`, and we're now passing three dimensions.
+
+In this example, Netdata will create three alarms with the names `cpu_template_system`, `cpu_template_user`, and
+`cpu_template_nice`. Every minute, each alarm will use the same database query to calculate the average CPU usage for
+the `system`, `user`, and `nice` dimensions over the last 10 minutes and send out alarms if necessary.
+
+You can find these three alarms active by clicking on the **Alarms** button in the top navigation, and then clicking on
+the **All** tab and scrolling to the **system - cpu** collapsible section.
+
+![Three new alarms created from the dimension template](https://user-images.githubusercontent.com/1153921/66218994-29523800-e67f-11e9-9bcb-9bca23e2c554.png)
+
+Let's look at some other examples of how `foreach` works so you can best apply it in your configurations.
+
+### Using a Netdata simple pattern in `foreach`
+
+In the last example, we used `foreach system,user,nice` to create three distinct alarms using dimension templates. But
+what if you want to quickly create alarms for _all_ the dimensions of a given chart?
+
+Use a [simple pattern](/libnetdata/simple_pattern/README.md)! One example of a simple pattern is a single wildcard
+(`*`).
+
+Instead of monitoring system CPU usage, let's monitor per-application CPU usage using the `apps.cpu` chart. Passing a
+wildcard as the simple pattern tells Netdata to create a separate alarm for _every_ process on your system:
+
+```yaml
+ alarm: app_cpu
+ on: apps.cpu
+lookup: average -10m percentage foreach *
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+```
+
+This entity will now create alarms for every dimension in the `apps.cpu` chart. Given that most `apps.cpu` charts have
+10 or more dimensions, using the wildcard ensures you catch every CPU-hogging process.
+
+To learn more about how to use simple patterns with dimension templates, see our [simple patterns
+documentation](/libnetdata/simple_pattern/README.md).
+
+## Using `foreach` with alarm templates
+
+Dimension templates also work with [alarm templates](/health/REFERENCE.md#alarm-line-alarm-or-template). Alarm
+templates help you create alarms for all the charts with a given context—for example, all the cores of your system's
+CPU.
+
+By combining the two, you can create dozens of individual alarms with a single template entity. Here's how you would
+create alarms for the `system`, `user`, and `nice` dimensions for every chart in the `cpu.cpu` context—or, in other
+words, every CPU core.
+
+```yaml
+template: cpu_template
+ on: cpu.cpu
+ lookup: average -10m percentage foreach system,user,nice
+ every: 1m
+ warn: $this > 50
+ crit: $this > 80
+```
+
+On a system with a 6-core, 12-thread Ryzen 5 1600 CPU, this one entity creates alarms on the following charts and
+dimensions:
+
+- `cpu.cpu0`
+ - `cpu_template_user`
+ - `cpu_template_system`
+ - `cpu_template_nice`
+- `cpu.cpu1`
+ - `cpu_template_user`
+ - `cpu_template_system`
+ - `cpu_template_nice`
+- `cpu.cpu2`
+ - `cpu_template_user`
+ - `cpu_template_system`
+ - `cpu_template_nice`
+- ...
+- `cpu.cpu11`
+ - `cpu_template_user`
+ - `cpu_template_system`
+ - `cpu_template_nice`
+
+And how just a few of those dimension template-generated alarms look like in the Netdata dashboard.
+
+![A few of the created alarms in the Netdata dashboard](https://user-images.githubusercontent.com/1153921/66219669-708cf880-e680-11e9-8b3a-7bfe178fa28b.png)
+
+All in all, this single entity creates 36 individual alarms. Much easier than writing 36 separate entities in your
+health configuration files!
+
+## What's next?
+
+We hope you're excited about the possibilities of using dimension templates! Maybe they'll inspire you to build new
+alarms that will help you better monitor the health of your systems.
+
+Or, at the very least, simplify your configuration files.
+
+For information about other advanced features in Netdata's health monitoring toolkit, check out our [health
+documentation](/health/README.md). And if you have some cool alarms you built using dimension templates,
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2dimension-templates&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/kubernetes-k8s-netdata.md b/docs/guides/monitor/kubernetes-k8s-netdata.md
new file mode 100644
index 0000000..40af0e9
--- /dev/null
+++ b/docs/guides/monitor/kubernetes-k8s-netdata.md
@@ -0,0 +1,278 @@
+<!--
+title: "Monitor a Kubernetes (k8s) cluster with Netdata"
+description: "Use Netdata's helmchart, service discovery plugin, and Kubelet/kube-proxy collectors for real-time visibility into your Kubernetes cluster."
+image: /img/seo/guides/monitor/kubernetes-k8s-netdata.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/kubernetes-k8s-netdata.md
+-->
+
+# Monitor a Kubernetes cluster with Netdata
+
+While Kubernetes (k8s) might simplify the way you deploy, scale, and load-balance your applications, not all clusters
+come with "batteries included" when it comes to monitoring. Doubly so for a monitoring stack that helps you actively
+troubleshoot issues with your cluster.
+
+Some k8s providers, like GKE (Google Kubernetes Engine), do deploy clusters bundled with monitoring capabilities, such
+as Google Stackdriver Monitoring. However, these pre-configured solutions might not offer the depth of metrics,
+customization, or integration with your preferred alerting methods.
+
+Without this visibility, it's like you built an entire house and _then_ smashed your way through the finished walls to
+add windows.
+
+At Netdata, we're working to build Kubernetes monitoring tools that add visibility without complexity while also helping
+you actively troubleshoot anomalies or outages. Better yet, this toolkit includes a few complementary collectors that
+let you monitor the many layers of a Kubernetes cluster entirely for free.
+
+We already have a few complementary tools and collectors for monitoring the many layers of a Kubernetes cluster,
+_entirely for free_. These methods work together to help you troubleshoot performance or availability issues across
+your k8s infrastructure.
+
+- A [Helm chart](https://github.com/netdata/helmchart), which bootstraps a Netdata Agent pod on every node in your
+ cluster, plus an additional parent pod for storing metrics and managing alarm notifications.
+- A [service discovery plugin](https://github.com/netdata/agent-service-discovery), which discovers and creates
+ configuration files for [compatible
+ applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints
+ covered by our [generic Prometheus
+ collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus). With these
+ configuration files, Netdata collects metrics from any compatible applications as they run _inside_ of a pod.
+ Service discovery happens without manual intervention as pods are created, destroyed, or moved between nodes.
+- A [Kubelet collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet), which runs
+ on each node in a k8s cluster to monitor the number of pods/containers, the volume of operations on each container,
+ and more.
+- A [kube-proxy collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy), which
+ also runs on each node and monitors latency and the volume of HTTP requests to the proxy.
+- A [cgroups collector](/collectors/cgroups.plugin/README.md), which collects CPU, memory, and bandwidth metrics for
+ each container running on your k8s cluster.
+
+By following this guide, you'll learn how to discover, explore, and take away insights from each of these layers in your
+Kubernetes cluster. Let's get started.
+
+## Prerequisites
+
+To follow this guide, you need:
+
+- A working cluster running Kubernetes v1.9 or newer.
+- The [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version
+ difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an
+ administrative system.
+- The [Helm package manager](https://helm.sh/) v3.0.0 or newer on the same administrative system.
+
+**You need to install the Netdata Helm chart on your cluster** before you proceed. See our [Kubernetes installation
+process](/packaging/installer/methods/kubernetes.md) for details.
+
+This guide uses a 3-node cluster, running on Digital Ocean, as an example. This cluster runs CockroachDB, Redis, and
+Apache, which we'll use as examples of how to monitor a Kubernetes cluster with Netdata.
+
+```bash
+kubectl get nodes
+NAME STATUS ROLES AGE VERSION
+pool-0z7557lfb-3fnbf Ready <none> 51m v1.17.5
+pool-0z7557lfb-3fnbx Ready <none> 51m v1.17.5
+pool-0z7557lfb-3fnby Ready <none> 51m v1.17.5
+
+kubectl get pods
+NAME READY STATUS RESTARTS AGE
+cockroachdb-0 1/1 Running 0 44h
+cockroachdb-1 1/1 Running 0 44h
+cockroachdb-2 1/1 Running 1 44h
+cockroachdb-init-q7mp6 0/1 Completed 0 44h
+httpd-6f6cb96d77-4zlc9 1/1 Running 0 2m47s
+httpd-6f6cb96d77-d9gs6 1/1 Running 0 2m47s
+httpd-6f6cb96d77-xtpwn 1/1 Running 0 11m
+netdata-child-5p2m9 2/2 Running 0 42h
+netdata-child-92qvf 2/2 Running 0 42h
+netdata-child-djc6w 2/2 Running 0 42h
+netdata-parent-0 1/1 Running 0 42h
+redis-6bb94d4689-6nn6v 1/1 Running 0 73s
+redis-6bb94d4689-c2fk2 1/1 Running 0 73s
+redis-6bb94d4689-tjcz5 1/1 Running 0 88s
+```
+
+## Explore Netdata's Kubernetes charts
+
+The Helm chart installs and enables everything you need for visibility into your k8s cluster, including the service
+discovery plugin, Kubelet collector, kube-proxy collector, and cgroups collector.
+
+To get started, open your browser and navigate to your cluster's Netdata dashboard. See our [Kubernetes installation
+instructions](/packaging/installer/methods/kubernetes.md) for how to access the dashboard based on your cluster's
+configuration.
+
+You'll see metrics from the parent pod as soon as you navigate to the dashboard:
+
+![The Netdata dashboard when monitoring a Kubernetes
+cluster](https://user-images.githubusercontent.com/1153921/85343043-c6206400-b4a0-11ea-8de6-cf2c6837c456.png)
+
+Remember that the parent pod is responsible for storing metrics from all the child pods and sending alarms.
+
+Take note of the **Replicated Nodes** menu, which shows not only the parent pod, but also the three child pods. This
+example cluster has three child pods, but the number of child pods depends entirely on the number of nodes in your
+cluster.
+
+You'll use the links in the **Replicated Nodes** menu to navigate between the various pods in your cluster. Let's do
+that now to explore the pod-level Kubernetes monitoring Netdata delivers.
+
+### Pods
+
+Click on any of the nodes under **netdata-parent-0**. Netdata redirects you to a separate instance of the Netdata
+dashboard, run by the Netdata child pod, which visualizes thousands of metrics from that node.
+
+![The Netdata dashboard monitoring a pod in a Kubernetes
+cluster](https://user-images.githubusercontent.com/1153921/85348461-85c8e200-b4b0-11ea-85fa-e88046e94719.png)
+
+From this dashboard, you can see all the familiar charts showing the health and performance of an individual node, just
+like you would if you installed Netdata on a single physical system. Explore CPU, memory, bandwidth, networking, and
+more.
+
+You can use the menus on the right-hand side of the dashboard to navigate between different sections of charts and
+metrics.
+
+For example, click on the **Applications** section to view per-application metrics, collected by
+[apps.plugin](/collectors/apps.plugin/README.md). The first chart you see is **Apps CPU Time (100% = 1 core)
+(apps.cpu)**, which shows the CPU utilization of various applications running on the node. You shouldn't be surprised to
+find Netdata processes (`netdata`, `sd-agent`, and more) alongside Kubernetes processes (`kubelet`, `kube-proxy`, and
+`containers`).
+
+![Per-application monitoring on a Kubernetes
+cluster](https://user-images.githubusercontent.com/1153921/85348852-ad6c7a00-b4b1-11ea-95b4-5952bd0e9d98.png)
+
+Beneath the **Applications** section, you'll begin to see sections for **k8s kubelet**, **k8s kubeproxy**, and long
+strings that start with **k8s**, which are sections for metrics collected by
+[`cgroups.plugin`](/collectors/cgroups.plugin/README.md). Let's skip over those for now and head further down to see
+Netdata's service discovery in action.
+
+### Service discovery (services running inside of pods)
+
+Thanks to Netdata's service discovery feature, you monitor containerized applications running in k8s pods with zero
+configuration or manual intervention. Service discovery is like a watchdog for created or deleted pods, recognizing the
+service they run based on the image name and port and immediately attempting to apply a logical default configuration.
+
+Service configuration supports [popular
+applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services), plus any endpoints covered
+by our [generic Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus),
+which are automatically added or removed from Netdata as soon as the pods are created or destroyed.
+
+You can find these service discovery sections near the bottom of the menu. The names for these sections follow a
+pattern: the name of the detected service, followed by a string of the module name, pod TUID, service type, port
+protocol, and port number. See the graphic below to help you identify service discovery sections.
+
+![Showing the difference between cgroups and service discovery
+sections](https://user-images.githubusercontent.com/1153921/85443711-73998300-b546-11ea-9b3b-2dddfe00bdf8.png)
+
+For example, the first service discovery section shows metrics for a pod running an Apache web server running on port 80
+in a pod named `httpd-6f6cb96d77-xtpwn`.
+
+> If you don't see any service discovery sections, it's either because your services are not compatible with service
+> discovery or you changed their default configuration, such as the listening port. See the [list of supported
+> services](https://github.com/netdata/helmchart#service-discovery-and-supported-services) for details about whether
+> your installed services are compatible with service discovery, or read the [configuration
+> instructions](/packaging/installer/methods/kubernetes.md#configure-service-discovery) to change how it discovers the
+> supported services.
+
+Click on any of these service discovery sections to see metrics from that particular service. For example, click on the
+**Apache apache-default httpd-6f6cb96d77-xtpwn httpd tcp 80** section brings you to a series of charts populated by the
+[Apache collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache) itself.
+
+With service discovery, you can now see valuable metrics like requests, bandwidth, workers, and more for this pod.
+
+![Apache metrics collected via service
+discovery](https://user-images.githubusercontent.com/1153921/85443905-a5aae500-b546-11ea-99f0-be20ba796feb.png)
+
+The same goes for metrics coming from the CockroachDB pod running on this same node.
+
+![CockroachDB metrics collected via service
+discovery](https://user-images.githubusercontent.com/1153921/85444316-0e925d00-b547-11ea-83ba-b834275cb419.png)
+
+Service discovery helps you monitor the health of specific applications running on your Kubernetes cluster, which in
+turn gives you a complete resource when troubleshooting your infrastructure's health and performance.
+
+### Kubelet
+
+Let's head back up the menu to the **k8s kubelet** section. Kubelet is an agent that runs on every node in a cluster. It
+receives a set of PodSpecs from the Kubernetes Control Plane and ensures the pods described there are both running and
+healthy. Think of it as a manager for the various pods on that node.
+
+Monitoring each node's Kubelet can be invaluable when diagnosing issues with your Kubernetes cluster. For example, you
+can see when the volume of running containers/pods has dropped.
+
+![Charts showing pod and container removal during a scale
+down](https://user-images.githubusercontent.com/1153921/85598613-9ab48b00-b600-11ea-827e-d9ec7779e2d4.png)
+
+This drop might signal a fault or crash in a particular Kubernetes service or deployment (see `kubectl get services` or
+`kubectl get deployments` for more details). If the number of pods increases, it may be because of something more
+benign, like another member of your team scaling up a service with `kubectl scale`.
+
+You can also view charts for the Kubelet API server, the volume of runtime/Docker operations by type,
+configuration-related errors, and the actual vs. desired numbers of volumes, plus a lot more.
+
+Kubelet metrics are collected and visualized thanks to the [kubelet
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet), which is enabled with
+zero configuration on most Kubernetes clusters with standard configurations.
+
+### kube-proxy
+
+Scroll down into the **k8s kubeproxy** section to see metrics about the network proxy that runs on each node in your
+Kubernetes cluster. kube-proxy allows for pods to communicate with each other and accept sessions from outside your
+cluster.
+
+With Netdata, you can monitor how often your k8s proxies are syncing proxy rules between nodes. Dramatic changes in
+these figures could indicate an anomaly in your cluster that's worthy of further investigation.
+
+kube-proxy metrics are collected and visualized thanks to the [kube-proxy
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy), which is enabled with
+zero configuration on most Kubernetes clusters with standard configurations.
+
+### Containers
+
+We can finally talk about the final piece of Kubernetes monitoring: containers. Each Kubernetes pod is a set of one or
+more cooperating containers, sharing the same namespace, all of which are resourced and tracked by the cgroups feature
+of the Linux kernel. Netdata automatically detects and monitors each running container by interfacing with the cgroups
+feature itself.
+
+You can find these sections beneath **Users**, **k8s kubelet**, and **k8s kubeproxy**. Below, a number of containers
+devoted to running services like CockroachDB, Apache, Redis, and more.
+
+![A number of sections devoted to
+containers](https://user-images.githubusercontent.com/1153921/85480217-74e1a480-b574-11ea-9da7-dd975e0fde0c.png)
+
+Let's look at the section devoted to the container that runs the Apache pod named `httpd-6f6cb96d77-xtpwn`, as described
+in the previous part on [service discovery](#service-discovery-services-running-inside-of-pods).
+
+![cgroups metrics for an Apache
+container/pod](https://user-images.githubusercontent.com/1153921/85480516-03562600-b575-11ea-92ae-dd605bf04106.png)
+
+At first glance, these sections might seem redundant. You might ask, "Why do I need both a service discovery section
+_and_ a container section? It's just one pod, after all!"
+
+The difference is that while the service discovery section shows _Apache_ metrics, the equivalent cgroups section shows
+that container's CPU, memory, and bandwidth usage. You can use the two sections in conjunction to monitor the health and
+performance of your pods and the services they run.
+
+For example, let's say you get an alarm notification from `netdata-parent-0` saying the
+`ea287694-0f22-4f39-80aa-2ca066caf45a` container (also known as the `httpd-6f6cb96d77-xtpwn` pod) is using 99% of its
+available RAM. You can then hop over to the **Apache apache-default httpd-6f6cb96d77-xtpwn httpd tcp 80** section to
+further investigate why Apache is using an unexpected amount of RAM.
+
+All container metrics, whether they're managed by Kubernetes or the Docker service directly, are collected by the
+[cgroups collector](/collectors/cgroups.plugin/README.md). Because this collector integrates with the cgroups Linux
+kernel feature itself, monitoring containers requires zero configuration on most Kubernetes clusters.
+
+## What's next?
+
+After following this guide, you should have a more comprehensive understanding of how to monitor your Kubernetes cluster
+with Netdata. With this setup, you can monitor the health and performance of all your nodes, pods, services, and k8s
+agents. Pre-configured alarms will tell you when something goes awry, and this setup gives you every per-second metric
+you need to make informed decisions about your cluster.
+
+The best part of monitoring a Kubernetes cluster with Netdata is that you don't have to worry about constantly running
+complex `kubectl` commands to see hundreds of highly granular metrics from your nodes. And forget about using `kubectl
+exec -it pod bash` to start up a shell on a pod to find and diagnose an issue with any given pod on your cluster.
+
+And with service discovery, all your compatible pods will automatically appear and disappear as they scale up, move, or
+scale down across your cluster.
+
+To monitor your Kubernetes cluster with Netdata, start by [installing the Helm
+chart](/packaging/installer/methods/kubernetes.md) if you haven't already. The Netdata Agent is open source and entirely
+free for every cluster and every organization, whether you have 10 or 10,000 pods. A few minutes and one `helm install`
+later and you'll have started on the path of building an effective platform for troubleshooting the next performance or
+availability issue on your Kubernetes cluster.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fkubernetes-k8s-netdata.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/pi-hole-raspberry-pi.md b/docs/guides/monitor/pi-hole-raspberry-pi.md
new file mode 100644
index 0000000..a180466
--- /dev/null
+++ b/docs/guides/monitor/pi-hole-raspberry-pi.md
@@ -0,0 +1,163 @@
+<!--
+title: "Monitor Pi-hole (and a Raspberry Pi) with Netdata"
+description: "Monitor Pi-hole metrics, plus Raspberry Pi system metrics, in minutes and completely for free with Netdata's open-source monitoring agent."
+image: /img/seo/guides/monitor/netdata-pi-hole-raspberry-pi.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/pi-hole-raspberry-pi.md
+-->
+
+# Monitor Pi-hole (and a Raspberry Pi) with Netdata
+
+Between intrusive ads, invasive trackers, and vicious malware, many techies and homelab enthusiasts are advancing their
+networks' security and speed with a tiny computer and a powerful piece of software: [Pi-hole](https://pi-hole.net/).
+
+Pi-hole is a DNS sinkhole that prevents unwanted content from even reaching devices on your home network. It blocks ads
+and malware at the network, instead of using extensions/add-ons for individual browsers, so you'll stop seeing ads in
+some of the most intrusive places, like your smart TV. Pi-hole can even [improve your network's speed and reduce
+bandwidth](https://discourse.pi-hole.net/t/will-pi-hole-slow-down-my-network/2048).
+
+Most Pi-hole users run it on a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) (hence the
+name), a credit card-sized, super-capable computer that costs about $35.
+
+And to keep tabs on how both Pi-hole and the Raspberry Pi are working to protect your network, you can use the
+open-source [Netdata monitoring agent](https://github.com/netdata/netdata).
+
+To get started, all you need is a [Raspberry Pi](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) with
+Raspbian installed. This guide uses a Raspberry Pi 4 Model B and Raspbian GNU/Linux 10 (buster). This guide assumes
+you're connecting to a Raspberry Pi remotely over SSH, but you could also complete all these steps on the system
+directly using a keyboard, mouse, and monitor.
+
+## Why monitor Pi-hole and a Raspberry Pi with Netdata?
+
+Netdata helps you monitor and troubleshoot all kinds of devices and the applications they run, including IoT devices
+like the Raspberry Pi and applications like Pi-hole.
+
+After a two-minute installation and with zero configuration, you'll be able to see all of Pi-hole's metrics, including
+the volume of queries, connected clients, DNS queries per type, top clients, top blocked domains, and more.
+
+With Netdata installed, you can also monitor system metrics and any other applications you might be running. By default,
+Netdata collects metrics on CPU usage, disk IO, bandwidth, per-application resource usage, and a ton more. With the
+Raspberry Pi used for this guide, Netdata automatically collects about 1,500 metrics every second!
+
+![Real-time Pi-hole monitoring with
+Netdata](https://user-images.githubusercontent.com/1153921/90447745-c8fe9600-e098-11ea-8a57-4f07339f002b.png)
+
+## Install Netdata
+
+Let's start by installing Netdata first so that it can start collecting system metrics as soon as possible for the most
+possible historic data.
+
+> ⚠️ Don't install Netdata using `apt` and the default package available in Raspbian. The Netdata team does not maintain
+> this package, and can't guarantee it works properly.
+
+On Raspberry Pis running Raspbian, the best way to install Netdata is our one-line kickstart script. This script asks
+you to install dependencies, then compiles Netdata from source via [GitHub](https://github.com/netdata/netdata).
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+Once installed on a Raspberry Pi 4 with no accessories, Netdata starts collecting roughly 1,500 metrics every second and
+populates its dashboard with more than 250 charts.
+
+Open your browser of choice and navigate to `http://NODE:19999/`, replacing `NODE` with the IP address of your Raspberry
+Pi. Not sure what that IP is? Try running `hostname -I | awk '{print $1}'` from the Pi itself.
+
+You'll see Netdata's dashboard and a few hundred real-time,
+[interactive](https://learn.netdata.cloud/guides/step-by-step/step-02#interact-with-charts) charts. Feel free to
+explore, but let's turn our attention to installing Pi-hole.
+
+## Install Pi-Hole
+
+Like Netdata, Pi-hole has a one-line script for simple installation. From your Raspberry Pi, run the following:
+
+```bash
+curl -sSL https://install.pi-hole.net | bash
+```
+
+The installer will help you set up Pi-hole based on the topology of your network. Once finished, you should set up your
+devices—or your router for system-wide sinkhole protection—to [use Pi-hole as their DNS
+service](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245). You've
+finished setting up Pi-hole at this point.
+
+As far as configuring Netdata to monitor Pi-hole metrics, there's nothing you actually need to do. Netdata's [Pi-hole
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pihole) will autodetect the new service
+running on your Raspberry Pi and immediately start collecting metrics every second.
+
+Restart Netdata with `sudo service netdata restart` to start Netdata, which will then recognize that Pi-hole is running
+and start a per-second collection job. When you refresh your Netdata dashboard or load it up again in a new tab, you'll
+see a new entry in the menu for **Pi-hole** metrics.
+
+## Use Netdata to explore and monitor your Raspberry Pi and Pi-hole
+
+By the time you've reached this point in the guide, Netdata has already collected a ton of valuable data about your
+Raspberry Pi, Pi-hole, and any other apps/services you might be running. Even a few minutes of collecting 1,500 metrics
+per second adds up quickly.
+
+You can now use Netdata's synchronized charts to zoom, highlight, scrub through time, and discern how an anomaly in one
+part of your system might affect another.
+
+![The Netdata dashboard in
+action](https://user-images.githubusercontent.com/1153921/80827388-b9fee100-8b98-11ea-8f60-0d7824667cd3.gif)
+
+If you're completely new to Netdata, look at our [step-by-step guide](/docs/guides/step-by-step/step-00.md) for a
+walkthrough of all its features. For a more expedited tour, see the [get started guide](/docs/getting-started.md).
+
+### Enable temperature sensor monitoring
+
+You need to manually enable Netdata's built-in [temperature sensor
+collector](https://learn.netdata.cloud/docs/agent/collectors/charts.d.plugin/sensors) to start collecting metrics.
+
+> Netdata uses a few plugins to manage its [collectors](/collectors/REFERENCE.md), each using a different language: Go,
+> Python, Node.js, and Bash. While our Go collectors are undergoing the most active development, we still support the
+> other languages. In this case, you need to enable a temperature sensor collector that's written in Bash.
+
+First, open the `charts.d.conf` file for editing. You should always use the `edit-config` script to edit Netdata's
+configuration files, as it ensures your settings persist across updates to the Netdata Agent.
+
+```bash
+cd /etc/netdata
+sudo ./edit-config charts.d.conf
+```
+
+Uncomment the `sensors=force` line and save the file. Restart Netdata with `sudo service netdata restart` to enable
+Raspberry Pi temperature sensor monitoring.
+
+### Storing historical metrics on your Raspberry Pi
+
+By default, Netdata allocates 256 MiB in disk space to store historical metrics inside the [database
+engine](/database/engine/README.md). On the Raspberry Pi used for this guide, Netdata collects 1,500 metrics every
+second, which equates to storing 3.5 days worth of historical metrics.
+
+You can increase this allocation by editing `netdata.conf` and increasing the `dbengine multihost disk space` setting to
+more than 256.
+
+```yaml
+[global]
+ dbengine multihost disk space = 512
+```
+
+Use our [database sizing
+calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+and [guide on storing historical metrics](/docs/guides/longer-metrics-storage.md) to help you determine the right
+setting for your Raspberry Pi.
+
+## What's next?
+
+Now that you're monitoring Pi-hole and your Raspberry Pi with Netdata, you can extend its capabilities even further, or
+configure Netdata to more specific goals.
+
+Most importantly, you can always install additional services and instantly collect metrics from many of them with our
+[300+ integrations](/collectors/COLLECTORS.md).
+
+- [Optimize performance](/docs/guides/configure/performance.md) using tweaks developed for IoT devices.
+- [Stream Raspberry Pi metrics](/streaming/README.md) to a parent host for easy access or longer-term storage.
+- [Tweak alarms](/health/QUICKSTART.md) for either Pi-hole or the health of your Raspberry Pi.
+- [Export metrics to external databases](/exporting/README.md) with the exporting engine.
+
+Or, head over to [our guides](https://learn.netdata.cloud/guides/) for even more experiments and insights into
+troubleshooting the health of your systems and services.
+
+If you have any questions about using Netdata to monitor your Raspberry Pi, Pi-hole, or any other applications, head on
+over to our [community forum](https://community.netdata.cloud/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fpi-hole-raspberry-pi.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/process.md b/docs/guides/monitor/process.md
new file mode 100644
index 0000000..893e6b7
--- /dev/null
+++ b/docs/guides/monitor/process.md
@@ -0,0 +1,299 @@
+<!--
+title: Monitor any process in real-time with Netdata
+description: "Tap into Netdata's powerful collectors, with per-second utilization metrics for every process, to troubleshoot faster and make data-informed decisions."
+image: /img/seo/guides/monitor/process.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/process.md
+-->
+
+# Monitor any process in real-time with Netdata
+
+Netdata is more than a multitude of generic system-level metrics and visualizations. Instead of providing only a bird's
+eye view of your system, leaving you to wonder exactly _what_ is taking up 99% CPU, Netdata also gives you visibility
+into _every layer_ of your node. These additional layers give you context, and meaningful insights, into the true health
+and performance of your infrastructure.
+
+One of these layers is the _process_. Every time a Linux system runs a program, it creates an independent process that
+executes the program's instructions in parallel with anything else happening on the system. Linux systems track the
+state and resource utilization of processes using the [`/proc` filesystem](https://en.wikipedia.org/wiki/Procfs), and
+Netdata is designed to hook into those metrics to create meaningful visualizations out of the box.
+
+While there are a lot of existing command-line tools for tracking processes on Linux systems, such as `ps` or `top`,
+only Netdata provides dozens of real-time charts, at both per-second and event frequency, without you having to write
+SQL queries or know a bunch of arbitrary command-line flags.
+
+With Netdata's process monitoring, you can:
+
+- Benchmark/optimize performance of standard applications, like web servers or databases
+- Benchmark/optimize performance of custom applications
+- Troubleshoot CPU/memory/disk utilization issues (why is my system's CPU spiking right now?)
+- Perform granular capacity planning based on the specific needs of your infrastructure
+- Search for leaking file descriptors
+- Investigate zombie processes
+
+... and much more. Let's get started.
+
+## Prerequisites
+
+- One or more Linux nodes running the [Netdata Agent](/docs/get/README.md). If you need more time to understand
+ Netdata before following this guide, see the [infrastructure](/docs/quickstart/infrastructure.md) or
+ [single-node](/docs/quickstart/single-node.md) monitoring quickstarts.
+- A general understanding of how to [configure the Netdata Agent](/docs/configure/nodes.md) using `edit-config`.
+- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already.
+
+## How does Netdata do process monitoring?
+
+The Netdata Agent already knows to look for hundreds of [standard applications that we support via
+collectors](/collectors/COLLECTORS.md), and groups them based on their purpose. Let's say you want to monitor a MySQL
+database using its process. The Netdata Agent already knows to look for processes with the string `mysqld` in their
+name, along with a few others, and puts them into the `sql` group. This `sql` group then becomes a dimension in all
+process-specific charts.
+
+The process and groups settings are used by two unique and powerful collectors.
+
+[**`apps.plugin`**](/collectors/apps.plugin/README.md) looks at the Linux process tree every second, much like `top` or
+`ps fax`, and collects resource utilization information on every running process. It then automatically adds a layer of
+meaningful visualization on top of these metrics, and creates per-process/application charts.
+
+[**`ebpf.plugin`**](/collectors/ebpf.plugin/README.md): Netdata's extended Berkeley Packet Filter (eBPF) collector
+monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management, and then hands
+process-specific metrics over to `apps.plugin` for visualization. The eBPF collector also collects and visualizes
+metrics on an _event frequency_, which means it captures every kernel interaction, and not just the volume of
+interaction at every second in time. That's even more precise than Netdata's standard per-second granularity.
+
+### Per-process metrics and charts in Netdata
+
+With these collectors working in parallel, Netdata visualizes the following per-second metrics for _any_ process on your
+Linux systems:
+
+- CPU utilization (`apps.cpu`)
+ - Total CPU usage
+ - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
+- Disk I/O
+ - Physical reads/writes (`apps.preads`/`apps.pwrites`)
+ - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
+ - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
+- Memory
+ - Real Memory Used (non-shared, `apps.mem`)
+ - Virtual Memory Allocated (`apps.vmem`)
+ - Minor page faults (i.e. memory activity, `apps.minor_faults`)
+- Processes
+ - Threads running (`apps.threads`)
+ - Processes running (`apps.processes`)
+ - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
+ - Minimum uptime (`apps.uptime_min`)
+ - Average uptime (`apps.uptime_average`)
+ - Maximum uptime (`apps.uptime_max`)
+ - Pipes open (`apps.pipes`)
+- Swap memory
+ - Swap memory used (`apps.swap`)
+ - Major page faults (i.e. swap activity, `apps.major_faults`)
+- Network
+ - Sockets open (`apps.sockets`)
+- eBPF file
+ - Number of calls to open files. (`apps.file_open`)
+ - Number of files closed. (`apps.file_closed`)
+ - Number of calls to open files that returned errors.
+ - Number of calls to close files that returned errors.
+- eBPF syscall
+ - Number of calls to delete files. (`apps.file_deleted`)
+ - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
+ - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
+ - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
+ - Number of calls to write a file that returned errors.
+ - Number of calls to read a file that returned errors.
+- eBPF process
+ - Number of process created with `do_fork`. (`apps.process_create`)
+ - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`)
+ - Number of times that a process called `do_exit`. (`apps.task_close`)
+- eBPF net
+ - Number of bytes sent. (`apps.bandwidth_sent`)
+ - Number of bytes received. (`apps.bandwidth_recv`)
+
+As an example, here's the per-process CPU utilization chart, including a `sql` group/dimension.
+
+![A per-process CPU utilization chart in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/101217226-3a5d5700-363e-11eb-8610-aa1640aefb5d.png)
+
+## Configure the Netdata Agent to recognize a specific process
+
+To monitor any process, you need to make sure the Netdata Agent is aware of it. As mentioned above, the Agent is already
+aware of hundreds of processes, and collects metrics from them automatically.
+
+But, if you want to change the grouping behavior, add an application that isn't yet supported in the Netdata Agent, or
+monitor a custom application, you need to edit the `apps_groups.conf` configuration file.
+
+Navigate to your [Netdata config directory](/docs/configure/nodes.md) and use `edit-config` to edit the file.
+
+```bash
+cd /etc/netdata # Replace this with your Netdata config directory if not at /etc/netdata.
+sudo ./edit-config apps_groups.conf
+```
+
+Inside the file are lists of process names, oftentimes using wildcards (`*`), that the Netdata Agent looks for and
+groups together. For example, the Netdata Agent looks for processes starting with `mysqld`, `mariad`, `postgres`, and
+others, and groups them into `sql`. That makes sense, since all these processes are for SQL databases.
+
+```conf
+sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
+```
+
+These groups are then reflected as [dimensions](/web/README.md#dimensions) within Netdata's charts.
+
+![An example per-process CPU utilization chart in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/101369156-352e2100-3865-11eb-9f0d-b8fac162e034.png)
+
+See the following two sections for details based on your needs. If you don't need to configure `apps_groups.conf`, jump
+down to [visualizing process metrics](#visualize-process-metrics).
+
+### Standard applications (web servers, databases, containers, and more)
+
+As explained above, the Netdata Agent is already aware of most standard applications you run on Linux nodes, and you
+shouldn't need to configure it to discover them.
+
+However, if you're using multiple applications that the Netdata Agent groups together you may want to separate them for
+more precise monitoring. If you're not running any other types of SQL databases on that node, you don't need to change
+the grouping, since you know that any MySQL is the only process contributing to the `sql` group.
+
+Let's say you're using both MySQL and PostgreSQL databases on a single node, and want to monitor their processes
+independently. Open the `apps_groups.conf` file as explained in the [section
+above](#configure-the-netdata-agent-to-recognize-a-specific-process) and scroll down until you find the `database
+servers` section. Create new groups for MySQL and PostgreSQL, and move their process queries into the unique groups.
+
+```conf
+# -----------------------------------------------------------------------------
+# database servers
+
+mysql: mysqld*
+postgres: postgres*
+sql: mariad* postmaster* oracle_* ora_* sqlservr
+```
+
+Restart Netdata with `service netdata restart`, or the appropriate method for your system, to start collecting
+utilization metrics from your application. Time to [visualize your process metrics](#visualize-process-metrics).
+
+### Custom applications
+
+Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application
+separate from any others, you need to create a new group in `apps_groups.conf` and associate that process name with it.
+
+Open the `apps_groups.conf` file as explained in the [section
+above](#configure-the-netdata-agent-to-recognize-a-specific-process). Scroll down to `# NETDATA processes accounting`.
+Above that, paste in the following text, which creates a new `custom-app` group with the `custom-app` process. Replace
+`custom-app` with the name of your application's Linux process. `apps_groups.conf` should now look like this:
+
+```conf
+...
+# -----------------------------------------------------------------------------
+# Custom applications to monitor with apps.plugin and ebpf.plugin
+
+custom-app: custom-app
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+...
+```
+
+Restart Netdata with `service netdata restart`, or the appropriate method for your system, to start collecting
+utilization metrics from your application.
+
+## Visualize process metrics
+
+Now that you're collecting metrics for your process, you'll want to visualize them using Netdata's real-time,
+interactive charts. Find these visualizations in the same section regardless of whether you use [Netdata
+Cloud](https://app.netdata.cloud) for infrastructure monitoring, or single-node monitoring with the local Agent's
+dashboard at `http://localhost:19999`.
+
+If you need a refresher on all the available per-process charts, see the [above
+list](#per-process-metrics-and-charts-in-netdata).
+
+### Using Netdata's application collector (`apps.plugin`)
+
+`apps.plugin` puts all of its charts under the **Applications** section of any Netdata dashboard.
+
+![Screenshot of the Applications section on a Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/101401172-2ceadb80-388f-11eb-9e9a-88443894c272.png)
+
+Let's continue with the MySQL example. We can create a [test
+database](https://www.digitalocean.com/community/tutorials/how-to-measure-mysql-query-performance-with-mysqlslap) in
+MySQL to generate load on the `mysql` process.
+
+`apps.plugin` immediately collects and visualizes this activity `apps.cpu` chart, which shows an increase in CPU
+utilization from the `sql` group. There is a parallel increase in `apps.pwrites`, which visualizes writes to disk.
+
+![Per-application CPU utilization
+metrics](https://user-images.githubusercontent.com/1153921/101409725-8527da80-389b-11eb-96e9-9f401535aafc.png)
+
+![Per-application disk writing
+metrics](https://user-images.githubusercontent.com/1153921/101409728-85c07100-389b-11eb-83fd-d79dd1545b5a.png)
+
+Next, the `mysqlslap` utility queries the database to provide some benchmarking load on the MySQL database. It won't
+look exactly like a production database executing lots of user queries, but it gives you an idea into the possibility of
+these visualizations.
+
+```bash
+sudo mysqlslap --user=sysadmin --password --host=localhost --concurrency=50 --iterations=10 --create-schema=employees --query="SELECT * FROM dept_emp;" --verbose
+```
+
+The following per-process disk utilization charts show spikes under the `sql` group at the same time `mysqlslap` was run
+numerous times, with slightly different concurrency and query options.
+
+![Per-application disk
+metrics](https://user-images.githubusercontent.com/1153921/101411810-d08fb800-389e-11eb-85b3-f3fa41f1f887.png)
+
+> 💡 Click on any dimension below a chart in Netdata Cloud (or to the right of a chart on a local Agent dashboard), to
+> visualize only that dimension. This can be particularly useful in process monitoring to separate one process'
+> utilization from the rest of the system.
+
+### Using Netdata's eBPF collector (`ebpf.plugin`)
+
+Netdata's eBPF collector puts its charts in two places. Of most importance to process monitoring are the **ebpf file**,
+**ebpf syscall**, **ebpf process**, and **ebpf net** sub-sections under **Applications**, shown in the above screenshot.
+
+For example, running the above workload shows the entire "story" how MySQL interacts with the Linux kernel to open
+processes/threads to handle a large number of SQL queries, then subsequently close the tasks as each query returns the
+relevant data.
+
+![Per-process eBPF
+charts](https://user-images.githubusercontent.com/1153921/101412395-c8844800-389f-11eb-86d2-20c8a0f7b3c0.png)
+
+`ebpf.plugin` visualizes additional eBPF metrics, which are system-wide and not per-process, under the **eBPF** section.
+
+## What's next?
+
+Now that you have `apps_groups.conf` configured correctly, and know where to find per-process visualizations throughout
+Netdata's ecosystem, you can precisely monitor the health and performance of any process on your node using per-second
+metrics.
+
+For even more in-depth troubleshooting, see our guide on [monitoring and debugging applications with
+eBPF](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
+
+If the process you're monitoring also has a [supported collector](/collectors/COLLECTORS.md), now is a great time to set
+that up if it wasn't autodetected. With both process utilization and application-specific metrics, you should have every
+piece of data needed to discover the root cause of an incident. See our [collector
+setup](/docs/collect/enable-configure.md) doc for details.
+
+[Create new dashboards](/docs/visualize/create-dashboards.md) in Netdata Cloud using charts from `apps.plugin`,
+`ebpf.plugin`, and application-specific collectors to build targeted dashboards for monitoring key processes across your
+infrastructure.
+
+Try running [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) on a node that's
+running the process(es) you're monitoring. Even if nothing is going wrong at the moment, Netdata Cloud's embedded
+intelligence helps you better understand how a MySQL database, for example, might influence a system's volume of memory
+page faults. And when an incident is afoot, use Metric Correlations to reduce mean time to resolution (MTTR) and
+cognitive load.
+
+If you want more specific metrics from your custom application, check out Netdata's [statsd
+support](/collectors/statsd.plugin/README.md). With statd, you can send detailed metrics from your application to
+Netdata and visualize them with per-second granularity. Netdata's statsd collector works with dozens of [statsd server
+implementations](https://github.com/etsy/statsd/wiki#client-implementations), which work with most application
+frameworks.
+
+### Related reference documentation
+
+- [Netdata Agent · `apps.plugin`](/collectors/apps.plugin/README.md)
+- [Netdata Agent · `ebpf.plugin`](/collectors/ebpf.plugin/README.md)
+- [Netdata Agent · Dashboards](/web/README.md#dimensions)
+- [Netdata Agent · MySQL collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fprocess&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/stop-notifications-alarms.md b/docs/guides/monitor/stop-notifications-alarms.md
new file mode 100644
index 0000000..587880a
--- /dev/null
+++ b/docs/guides/monitor/stop-notifications-alarms.md
@@ -0,0 +1,92 @@
+<!--
+title: "Stop notifications for individual alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/stop-notifications-alarms.md
+-->
+
+# Stop notifications for individual alarms
+
+In this short tutorial, you'll learn how to stop notifications for individual alarms in Netdata's health
+monitoring system. We also refer to this process as _silencing_ the alarm.
+
+Why silence alarms? We designed Netdata's pre-configured alarms for production systems, so they might not be
+relevant if you run Netdata on your laptop or a small virtual server. If they're not helpful, they can be a distraction
+to real issues with health and performance.
+
+Silencing individual alarms is an excellent solution for situations where you're not interested in seeing a specific
+alarm but don't want to disable a [notification system](/health/notifications/README.md) entirely.
+
+## Find the alarm configuration file
+
+To silence an alarm, you need to know where to find its configuration file.
+
+Let's use the `system.cpu` chart as an example. It's the first chart you'll see on most Netdata dashboards.
+
+To figure out which file you need to edit, open up Netdata's dashboard and, click the **Alarms** button at the top
+of the dashboard, followed by clicking on the **All** tab.
+
+In this example, we're looking for the `system - cpu` entity, which, when opened, looks like this:
+
+![The system - cpu alarm
+entity](https://user-images.githubusercontent.com/1153921/67034648-ebb4cc80-f0cc-11e9-9d49-1023629924f5.png)
+
+In the `source` row, you see that this chart is getting its configuration from
+`4@/usr/lib/netdata/conf.d/health.d/cpu.conf`. The relevant part of begins at `health.d`: `health.d/cpu.conf`. That's
+the file you need to edit if you want to silence this alarm.
+
+For more information about editing or referencing health configuration files on your system, see the [health
+quickstart](/health/QUICKSTART.md#edit-health-configuration-files).
+
+## Edit the file to enable silencing
+
+To edit `health.d/cpu.conf`, use `edit-config` from inside of your Netdata configuration directory.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config health.d/cpu.conf
+```
+
+> You may need to use `sudo` or another method of elevating your privileges.
+
+The beginning of the file looks like this:
+
+```yaml
+template: 10min_cpu_usage
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of user,system,softirq,irq,guest
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal)
+ to: sysadmin
+```
+
+To silence this alarm, change `sysadmin` to `silent`.
+
+```yaml
+ to: silent
+```
+
+Use one of the available [methods](/health/QUICKSTART.md#reload-health-configuration) to reload your health configuration
+ and ensure you get no more notifications about that alarm**.
+
+You can add `to: silent` to any alarm you'd rather not bother you with notifications.
+
+## What's next?
+
+You should now know the fundamentals behind silencing any individual alarm in Netdata.
+
+To learn about _all_ of Netdata's health configuration possibilities, visit the [health reference
+guide](/health/REFERENCE.md), or check out other [tutorials on health monitoring](/health/README.md#tutorials).
+
+Or, take better control over how you get notified about alarms via the [notification
+system](/health/notifications/README.md).
+
+You can also use Netdata's [Health Management API](/web/api/health/README.md#health-management-api) to control health
+checks and notifications while Netdata runs. With this API, you can disable health checks during a maintenance window or
+backup process, for example.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fstop-notifications-alarms%2F&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/monitor/visualize-monitor-anomalies.md b/docs/guides/monitor/visualize-monitor-anomalies.md
new file mode 100644
index 0000000..f37dadc
--- /dev/null
+++ b/docs/guides/monitor/visualize-monitor-anomalies.md
@@ -0,0 +1,147 @@
+<!--
+title: "Monitor and visualize anomalies with Netdata (part 2)"
+description: "Using unsupervised anomaly detection and machine learning, get notified "
+image: /img/seo/guides/monitor/visualize-monitor-anomalies.png
+author: "Joel Hans"
+author_title: "Editorial Director, Technical & Educational Resources"
+author_img: "/img/authors/joel-hans.jpg"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/monitor/visualize-monitor-anomalies.md
+-->
+
+# Monitor and visualize anomalies with Netdata (part 2)
+
+Welcome to part 2 of our series of guides on using _unsupervised anomaly detection_ to detect issues with your systems,
+containers, and applications using the open-source Netdata Agent. For an introduction to detecting anomalies and
+monitoring associated metrics, see [part 1](/docs/guides/monitor/anomaly-detection.md), which covers prerequisites and
+configuration basics.
+
+With anomaly detection in the Netdata Agent set up, you will now want to visualize and monitor which charts have
+anomalous data, when, and where to look next.
+
+> 💡 In certain cases, the anomalies collector doesn't start immediately after restarting the Netdata Agent. If this
+> happens, you won't see the dashboard section or the relevant [charts](#visualize-anomalies-in-charts) right away. Wait
+> a minute or two, refresh, and look again. If the anomalies charts and alarms are still not present, investigate the
+> error log with `less /var/log/netdata/error.log | grep anomalies`.
+
+## Test anomaly detection
+
+Time to see the Netdata Agent's unsupervised anomaly detection in action. To trigger anomalies on the Nginx web server,
+use `ab`, otherwise known as [Apache Bench](https://httpd.apache.org/docs/2.4/programs/ab.html). Despite its name, it
+works just as well with Nginx web servers. Install it on Ubuntu/Debian systems with `sudo apt install apache2-utils`.
+
+> 💡 If you haven't followed the guide's example of using Nginx, an easy way to test anomaly detection on your node is
+> to use the `stress-ng` command, which is available on most Linux distributions. Run `stress-ng --cpu 0` to create CPU
+> stress or `stress-ng --vm 0` for RAM stress. Each test will cause some "collateral damage," in that you may see CPU
+> utilization rise when running the RAM test, and vice versa.
+
+The following test creates a minimum of 10,000,000 requests for Nginx to handle, with a maximum of 10 at any given time,
+with a run time of 60 seconds. If your system can handle those 10,000,000 in less than 60 seconds, `ab` will keep
+sending requests until the timer runs out.
+
+```bash
+ab -k -c 10 -t 60 -n 10000000 http://127.0.0.1/
+```
+
+Let's see how Netdata detects this anomalous behavior and propagates information to you through preconfigured alarms and
+dashboards that automatically organize anomaly detection metrics into meaningful charts to help you begin root cause
+analysis (RCA).
+
+## Monitor anomalies with alarms
+
+The anomalies collector creates two "classes" of alarms for each chart captured by the `charts_regex` setting. All these
+alarms are preconfigured based on your [configuration in
+`anomalies.conf`](/docs/guides/monitor/anomaly-detection.md#configure-the-anomalies-collector). With the `charts_regex`
+and `charts_to_exclude` settings from [part 1](/docs/guides/monitor/anomaly-detection.md) of this guide series, the
+Netdata Agent creates 32 alarms driven by unsupervised anomaly detection.
+
+The first class triggers warning alarms when the average anomaly probability for a given chart has stayed above 50% for
+at least the last two minutes.
+
+![An example anomaly probability
+alarm](https://user-images.githubusercontent.com/1153921/104225767-0a0a9480-5404-11eb-9bfd-e29592397203.png)
+
+The second class triggers warning alarms when the number of anomalies in the last two minutes hits 10 or higher.
+
+![An example anomaly count
+alarm](https://user-images.githubusercontent.com/1153921/104225769-0aa32b00-5404-11eb-95f3-7309f9429fe1.png)
+
+If you see either of these alarms in Netdata Cloud, the local Agent dashboard, or on your preferred notification
+platform, it's a safe bet that the node's current metrics have deviated from normal. That doesn't necessarily mean
+there's a full-blown incident, depending on what application/service you're using anomaly detection on, but it's worth
+further investigation.
+
+As you use the anomalies collector, you may find that the default settings provide too many or too few genuine alarms.
+In this case, [configure the alarm](/docs/monitor/configure-alarms.md) with `sudo ./edit-config
+health.d/anomalies.conf`. Take a look at the `lookup` line syntax in the [health
+reference](/health/REFERENCE.md#alarm-line-lookup) to understand how the anomalies collector automatically creates
+alarms for any dimension on the `anomalies_local.probability` and `anomalies_local.anomaly` charts.
+
+## Visualize anomalies in charts
+
+In either [Netdata Cloud](https://app.netdata.cloud) or the local Agent dashboard at `http://NODE:19999`, click on the
+**Anomalies** [section](/web/gui/README.md#sections) to see the pair of anomaly detection charts, which are
+preconfigured to visualize per-second anomaly metrics based on your [configuration in
+`anomalies.conf`](/docs/guides/monitor/anomaly-detection.md#configure-the-anomalies-collector).
+
+These charts have the contexts `anomalies.probability` and `anomalies.anomaly`. Together, these charts
+create meaningful visualizations for immediately recognizing not only that something is going wrong on your node, but
+give context as to where to look next.
+
+The `anomalies_local.probability` chart shows the probability that the latest observed data is anomalous, based on the
+trained model. The `anomalies_local.anomaly` chart visualizes 0&rarr;1 predictions based on whether the latest observed
+data is anomalous based on the trained model. Both charts share the same dimensions, which you configured via
+`charts_regex` and `charts_to_exclude` in [part 1](/docs/guides/monitor/anomaly-detection.md).
+
+In other words, the `probability` chart shows the amplitude of the anomaly, whereas the `anomaly` chart provides quick
+yes/no context.
+
+![Two charts created by the anomalies
+collector](https://user-images.githubusercontent.com/1153921/104226380-ef84eb00-5404-11eb-9faf-9e64c43b95ff.png)
+
+Before `08:32:00`, both charts show little in the way of verified anomalies. Based on the metrics the anomalies
+collector has trained on, a certain percentage of anomaly probability score is normal, as seen in the
+`web_log_nginx_requests_prob` dimension and a few others. What you're looking for is large deviations from the "noise"
+in the `anomalies.probability` chart, or any increments to the `anomalies.anomaly` chart.
+
+Unsurprisingly, the stress test that began at `08:32:00` caused significant changes to these charts. The three
+dimensions that immediately shot to 100% anomaly probability, and remained there during the test, were
+`web_log_nginx.requests_prob`, `nginx_local.connections_accepted_handled_prob`, and `system.cpu_pressure_prob`.
+
+## Build an anomaly detection dashboard
+
+[Netdata Cloud](https://app.netdata.cloud) features a drag-and-drop [dashboard
+editor](/docs/visualize/create-dashboards.md) that helps you create entirely new dashboards with charts targeted for
+your specific applications.
+
+For example, here's a dashboard designed for visualizing anomalies present in an Nginx web server, including
+documentation about why the dashboard exists and where to look next based on what you're seeing:
+
+![An example anomaly detection
+dashboard](https://user-images.githubusercontent.com/1153921/104226915-c6188f00-5405-11eb-9bb4-559a18016fa7.png)
+
+Use the anomaly charts for instant visual identification of potential anomalies, and then Nginx-specific charts, in the
+right column, to validate whether the probability and anomaly counters are showing a valid incident worth further
+investigation using [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) to narrow
+the dashboard into only the charts relevant to what you're seeing from the anomalies collector.
+
+## What's next?
+
+Between this guide and [part 1](/docs/guides/monitor/anomaly-detection.md), which covered setup and configuration, you
+now have a fundamental understanding of how unsupervised anomaly detection in Netdata works, from root cause to alarms
+to preconfigured or custom dashboards.
+
+We'd love to hear your feedback on the anomalies collector. Hop over to the [community
+forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767), and let us know if you're already getting value from
+unsupervised anomaly detection, or would like to see something added to it. You might even post a custom configuration
+that works well for monitoring some other popular application, like MySQL, PostgreSQL, Redis, or anything else we
+[support through collectors](/collectors/COLLECTORS.md).
+
+In part 3 of this series on unsupervised anomaly detection using Netdata, we'll create a custom model to apply
+unsupervised anomaly detection to an entire mission-critical application. Stay tuned!
+
+### Related reference documentation
+
+- [Netdata Agent · Anomalies collector](/collectors/python.d.plugin/anomalies/README.md)
+- [Netdata Cloud · Build new dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fmonitor%2Fanomaly-detectionl&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-00.md b/docs/guides/step-by-step/step-00.md
new file mode 100644
index 0000000..7943666
--- /dev/null
+++ b/docs/guides/step-by-step/step-00.md
@@ -0,0 +1,115 @@
+<!--
+title: "The step-by-step Netdata guide"
+date: 2020-03-31
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-00.md
+-->
+
+# The step-by-step Netdata guide
+
+Welcome to Netdata! We're glad you're interested in our health monitoring and performance troubleshooting system.
+
+Because Netdata is entirely open-source software, you can use it free of charge, whether you want to monitor one or ten
+thousand systems! All our code is hosted on [GitHub](https://github.com/netdata/netdata).
+
+This guide is designed to help you understand what Netdata is, what it's capable of, and how it'll help you make
+faster and more informed decisions about the health and performance of your systems and applications. If you're
+completely new to Netdata, or have never tried health monitoring/performance troubleshooting systems before, this
+guide is perfect for you.
+
+If you have monitoring experience, or would rather get straight into configuring Netdata to your needs, you can jump
+straight into code and configurations with our [getting started guide](/docs/getting-started.md).
+
+> This guide contains instructions for Netdata installed on a Linux system. Many of the instructions will work on
+> other supported operating systems, like FreeBSD and macOS, but we can't make any guarantees.
+
+## Where to go if you need help
+
+No matter where you are in this Netdata guide, if you need help, head over to our [GitHub
+repository](https://github.com/netdata/netdata/). That's where we collect questions from users, help fix their bugs, and
+point people toward documentation that explains what they're having trouble with.
+
+Click on the **issues** tab to see all the conversations we're having with Netdata users. Use the search bar to find
+previously-written advice for your specific problem, and if you don't see any results, hit the **New issue** button to
+send us a question.
+
+Or, if that's too complicated, feel free to send this guide's author [an email](mailto:joel@netdata.cloud).
+
+## Before we get started
+
+Let's make sure you have Netdata installed on your system!
+
+> If you already installed Netdata, feel free to skip to [Step 1: Netdata's building blocks](step-01.md).
+
+The easiest way to install Netdata on a Linux system is our `kickstart.sh` one-line installer. Run this on your system
+and let it take care of the rest.
+
+This script will install Netdata from source, keep it up to date with nightly releases, connects to the Netdata
+[registry](/registry/README.md), and sends [_anonymous statistics_](/docs/anonymous-statistics.md) about how you use
+Netdata. We use this information to better understand how we can improve the Netdata experience for all our users.
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+Once finished, you'll have Netdata installed, and you'll be set up to get _nightly updates_ to get the latest features,
+improvements, and bugfixes.
+
+If this method doesn't work for you, or you want to use a different process, visit our [installation
+documentation](/packaging/installer/README.md) for details.
+
+## Netdata fundamentals
+
+[Step 1. Netdata's building blocks](step-01.md)
+
+In this introductory step, we'll talk about the fundamental ideas, philosophies, and UX decisions behind Netdata.
+
+[Step 2. Get to know Netdata's dashboard](step-02.md)
+
+Visit Netdata's dashboard to explore, manipulate charts, and check out alarms. Get your first taste of visual anomaly
+detection.
+
+[Step 3. Monitor more than one system with Netdata](step-03.md)
+
+While the dashboard lets you quickly move from one agent to another, Netdata Cloud is our SaaS solution for monitoring
+the health of many systems. We'll cover its features and the benefits of using Netdata Cloud on top of the dashboard.
+
+[Step 4. The basics of configuring Netdata](step-04.md)
+
+While Netdata can monitor thousands of metrics in real-time without any configuration, you may _want_ to tweak some
+settings based on your system's resources.
+
+## Intermediate steps
+
+[Step 5. Health monitoring alarms and notifications](step-05.md)
+
+Learn how to tune, silence, and write custom alarms. Then enable notifications so you never miss a change in health
+status or performance anomaly.
+
+[Step 6. Collect metrics from more services and apps](step-06.md)
+
+Learn how to enable/disable collection plugins and configure a collection plugin job to add more charts to your Netdata
+dashboard and begin monitoring more apps and services, like MySQL, Nginx, MongoDB, and hundreds more.
+
+[Step 7. Netdata's dashboard in depth](step-07.md)
+
+Now that you configured your Netdata monitoring agent to your exact needs, you'll dive back into metrics snapshots,
+updates, and the dashboard's settings.
+
+## Advanced steps
+
+[Step 8. Building your first custom dashboard](step-08.md)
+
+Using simple HTML, CSS, and JavaScript, we'll build a custom dashboard that displays essential information in any format
+you choose. You can even monitor many systems from a single HTML file.
+
+[Step 9. Long-term metrics storage](step-09.md)
+
+By default, Netdata can store lots of real-time metrics, but you can also tweak our custom database engine to your
+heart's content. Want to take your Netdata metrics elsewhere? We're happy to help you archive data to Prometheus,
+MongoDB, TimescaleDB, and others.
+
+[Step 10. Set up a proxy](step-10.md)
+
+Run Netdata behind an Nginx proxy to improve performance, and enable TLS/HTTPS for better security.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-00&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-01.md b/docs/guides/step-by-step/step-01.md
new file mode 100644
index 0000000..cdcfcd7
--- /dev/null
+++ b/docs/guides/step-by-step/step-01.md
@@ -0,0 +1,156 @@
+<!--
+title: "Step 1. Netdata's building blocks"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-01.md
+-->
+
+# Step 1. Netdata's building blocks
+
+Netdata is a distributed and real-time _health monitoring and performance troubleshooting toolkit_ for monitoring your
+systems and applications.
+
+Because the monitoring agent is highly-optimized, you can install it all your physical systems, containers, IoT devices,
+and edge devices without disrupting their core function.
+
+By default, and without configuration, Netdata delivers real-time insights into everything happening on the system, from
+CPU utilization to packet loss on every network device. Netdata can also auto-detect metrics from hundreds of your
+favorite services and applications, like MySQL/MariaDB, Docker, Nginx, Apache, MongoDB, and more.
+
+All metrics are automatically-updated, providing interactive dashboards that allow you to dive in, discover anomalies,
+and figure out the root cause analysis of any issue.
+
+Best of all, Netdata is entirely free, open-source software! Solo developers and enterprises with thousands of systems
+can both use it free of charge. We're hosted on [GitHub](https://github.com/netdata/netdata).
+
+Want to learn about the history of Netdata, and what inspired our CEO to build it in the first place, and where we're
+headed? Read Costa's comprehensive blog post: _[Redefining monitoring with Netdata (and how it came to
+be)](https://blog.netdata.cloud/posts/redefining-monitoring-netdata/)_.
+
+## What you'll learn in this step
+
+In the first step of the Netdata guide, you'll learn about:
+
+- [Netdata's core features](#netdatas-core-features)
+- [Why you should use Netdata](#why-you-should-use-netdata)
+- [How Netdata has complementary systems, not competitors](#how-netdata-has-complementary-systems-not-competitors)
+
+Let's get started!
+
+## Netdata's core features
+
+Netdata has only been around for a few years, but it's a complex piece of software. Here are just some of the features
+we'll cover throughout this guide.
+
+- A sophisticated **dashboard**, which we'll cover in [step 2](step-02.md). The real-time, highly-granular dashboard,
+ with hundreds of charts, is your main source of information about the health and performance of your systems/
+ applications. We designed the dashboard with anomaly detection and quick analysis in mind. We'll return to
+ dashboard-related topics in both [step 7](step-07.md) and [step 8](step-08.md).
+- **Long-term metrics storage** by default. With our new database engine, you can store days, weeks, or months of
+ per-second historical metrics. Or you can archive metrics to another database, like MongoDB or Prometheus. We'll
+ cover all these options in [step 9](step-09.md).
+- **No configuration necessary**. Without any configuration, you'll get thousands of real-time metrics and hundreds of
+ alarms designed by our community of sysadmin experts. But you _can_ configure Netdata in a lot of ways, some of
+ which we'll cover in [step 4](step-04.md).
+- **Distributed, per-system installation**. Instead of centralizing metrics in one location, you install Netdata on
+ _every_ system, and each system is responsible for its metrics. Having distributed agents reduces cost and lets
+ Netdata run on devices with little available resources, such as IoT and edge devices, without affecting their core
+ purpose.
+- **Sophisticated health monitoring** to ensure you always know when an anomaly hits. In [step 5](step-05.md), we dive
+ into how you can tune alarms, write your own alarm, and enable two types of notifications.
+- **High-speed, low-resource collectors** that allow you to collect thousands of metrics every second while using only
+ a fraction of your system's CPU resources and a few MiB of RAM.
+- **Netdata Cloud** is our SaaS toolkit that helps Netdata users monitor the health and performance of entire
+ infrastructures, whether they are two or two thousand (or more!) systems. We'll cover Netdata Cloud in [step
+ 3](step-03.md).
+
+## Why you should use Netdata
+
+Because you care about the health and performance of your systems and applications, and all of the awesome features we
+just mentioned. And it's free!
+
+All these may be valid reasons, but let's step back and talk about Netdata's _principles_ for health monitoring and
+performance troubleshooting. We have a lot of [complementary
+systems](#how-netdata-has-complementary-systems-not-competitors), and we think there's a good reason why Netdata should
+always be your first choice when troubleshooting an anomaly.
+
+We built Netdata on four principles.
+
+### Per-second data collection
+
+Our first principle is per-second data collection for all metrics.
+
+That matters because you can't monitor a 2-second service-level agreement (SLA) with 10-second metrics. You can't detect
+quick anomalies if your metrics don't show them.
+
+How do we solve this? By decentralizing monitoring. Each node is responsible for collecting metrics, triggering alarms,
+and building dashboards locally, and we work hard to ensure it does each step (and others) with remarkable efficiency.
+For example, Netdata can [collect 100,000 metrics](https://github.com/netdata/netdata/issues/1323) every second while
+using only 9% of a single server-grade CPU core!
+
+By decentralizing monitoring and emphasizing speed at every turn, Netdata helps you scale your health monitoring and
+performance troubleshooting to an infrastructure of every size. _And_ you get to keep per-second metrics in long-term
+storage thanks to the database engine.
+
+### Unlimited metrics
+
+We believe all metrics are fundamentally important, and all metrics should be available to the user.
+
+If you don't collect _all_ the metrics a system creates, you're only seeing part of the story. It's like saying you've
+read a book after skipping all but the last ten pages. You only know the ending, not everything that leads to it.
+
+Most monitoring solutions exist to poke you when there's a problem, and then tell you to use a dozen different console
+tools to find the root cause. Netdata prefers to give you every piece of information you might need to understand why an
+anomaly happened.
+
+### Meaningful presentation
+
+We want every piece of Netdata's dashboard not only to look good and update every second, but also provide context as to
+what you're looking at and why it matters.
+
+The principle of meaningful presentation is fundamental to our dashboard's user experience (UX). We could have put
+charts in a grid or hidden some behind tabs or buttons. We instead chose to stack them vertically, on a single page, so
+you can visually see how, for example, a jump in disk usage can also increase system load.
+
+Here's an example of a system undergoing a disk stress test:
+
+![Screen Shot 2019-10-23 at 15 38
+32](https://user-images.githubusercontent.com/1153921/67439589-7f920700-f5ab-11e9-930d-fb0014900d90.png)
+
+> For the curious, here's the command: `stress-ng --fallocate 4 --fallocate-bytes 4g --timeout 1m --metrics --verify
+> --times`!
+
+### Immediate results
+
+Finally, Netdata should be usable from the moment you install it.
+
+As we've talked about, and as you'll learn in the following nine steps, Netdata comes installed with:
+
+- Auto-detected metrics
+- Human-readable units
+- Metrics that are structured into charts, families, and contexts
+- Automatically generated dashboards
+- Charts designed for visual anomaly detection
+- Hundreds of pre-configured alarms
+
+By standardizing your monitoring infrastructure, Netdata tries to make at least one part of your administrative tasks
+easy!
+
+## How Netdata has complementary systems, not competitors
+
+We'll cover this quickly, as you're probably eager to get on with using Netdata itself.
+
+We don't want to lock you in to using Netdata by itself, and forever. By supporting [archiving to
+external databases](/exporting/README.md) like Graphite, Prometheus, OpenTSDB, MongoDB, and others, you can use Netdata _in
+conjunction_ with software that might seem like our competitors.
+
+We don't want to "wage war" with another monitoring solution, whether it's commercial, open-source, or anything in
+between. We just want to give you all the metrics every second, and what you do with them next is your business, not
+ours. Our mission is helping people create more extraordinary infrastructures!
+
+## What's next?
+
+We think it's imperative you understand why we built Netdata the way we did. But now that we have that behind us, let's
+get right into that dashboard you've heard so much about.
+
+[Next: Get to know Netdata's dashboard &rarr;](step-02.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-01&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-02.md b/docs/guides/step-by-step/step-02.md
new file mode 100644
index 0000000..c87712c
--- /dev/null
+++ b/docs/guides/step-by-step/step-02.md
@@ -0,0 +1,208 @@
+<!--
+title: "Step 2. Get to know Netdata's dashboard"
+date: 2020-05-04
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-02.md
+-->
+
+# Step 2. Get to know Netdata's dashboard
+
+Welcome to Netdata proper! Now that you understand how Netdata works, how it's built, and why we built it, you can start
+working with the dashboard directly.
+
+This step-by-step guide assumes you've already installed Netdata on a system of yours. If you haven't yet, hop back over
+to ["step 0"](step-00.md#before-we-get-started) for information about our one-line installer script. Or, view the
+[installation docs](/packaging/installer/README.md) to learn more. Once you have Netdata installed, you can hop back
+over here and dig in.
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, you'll learn how to:
+
+- [Visit and explore the dashboard](#visit-and-explore-the-dashboard)
+- [Explore available charts using menus](#explore-available-charts-using-menus)
+- [Read the descriptions accompanying charts](#read-the-descriptions-accompanying-charts)
+- [Interact with charts](#interact-with-charts)
+- [See raised alarms and the alarm log](#see-raised-alarms-and-the-alarm-log)
+
+Let's get started!
+
+## Visit and explore the dashboard
+
+Netdata's dashboard is where you interact with your system's metrics. Time to open it up and start exploring. Open up
+your browser of choice.
+
+Open up your web browser of choice and navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname
+of your Agent. If you're unsure, try `http://localhost:19999` first. Hit **Enter**. Welcome to Netdata!
+
+![Animated GIF of navigating to the
+dashboard](https://user-images.githubusercontent.com/1153921/80825153-abaec600-8b94-11ea-8b17-1b770a2abaa9.gif)
+
+> From here on out in this guide, we'll refer to the address you use to view your dashboard as `NODE`. Be sure to
+> replace it with either `localhost`, the IP address, or the hostname of your system.
+
+## Explore available charts using menus
+
+**Menus** are located on the right-hand side of the Netdata dashboard. You can use these to navigate to the
+charts you're interested in.
+
+![Animated GIF of using the menus and
+submenus](https://user-images.githubusercontent.com/1153921/80832425-7c528600-8ba1-11ea-8140-d0a17a62009b.gif)
+
+Netdata shows all its charts on a single page, so you can also scroll up and down using the mouse wheel, your
+touchscreen/touchpad, or the scrollbar.
+
+Both menus and the items displayed beneath them, called **submenus**, are populated automatically by Netdata based on
+what it's collecting. If you run Netdata on many different systems using different OS types or versions, the
+menus and submenus may look a little different for each one.
+
+To learn more about menus, see our documentation about [navigating the standard
+dashboard](/web/gui/README.md#metrics-menus).
+
+> ❗ By default, Netdata only creates and displays charts if the metrics are _not zero_. So, you may be missing some
+> charts, menus, and submenus if those charts have zero metrics. You can change this by changing the **Which dimensions
+> to show?** setting to **All**. In addition, if you start Netdata and immediately load the dashboard, not all
+> charts/menus/submenus may be displayed, as some collectors can take a while to initialize.
+
+## Read the descriptions accompanying charts
+
+Many charts come with a short description of what dimensions the chart is displaying and why they matter.
+
+For example, here's the description that accompanies the **swap** chart.
+
+![Screenshot of the swap
+description](https://user-images.githubusercontent.com/1153921/63452078-477b1600-c3fa-11e9-836b-2fc90fba8b4b.png)
+
+If you're new to health monitoring and performance troubleshooting, we recommend you spend some time reading these
+descriptions and learning more at the pages linked above.
+
+## Understand charts, dimensions, families, and contexts
+
+A **chart** is an interactive visualization of one or more collected/calculated metrics. You can see the name (also
+known as its unique ID) of a chart by looking at the top-left corner of a chart and finding the parenthesized text. On a
+Linux system, one of the first charts on the dashboard will be the system CPU chart, with the name `system.cpu`:
+
+![Screenshot of the system CPU chart in the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/67443082-43b16e80-f5b8-11e9-8d33-d6ee052c6678.png)
+
+A **dimension** is any value that gets shown on a chart. The value can be raw data or calculated values, such as
+percentages, aggregates, and more. Most charts will have more than one dimension, in which case it will display each in
+a different color. Here, a `system.cpu` chart is showing many dimensions, such as `user`, `system`, `softirq`, `irq`,
+and more.
+
+![Screenshot of the dimensions shown in the system CPU chart in the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/62721031-2bba4d80-b9c0-11e9-9dca-32403617ce72.png)
+
+A **family** is _one_ instance of a monitored hardware or software resource that needs to be monitored and displayed
+separately from similar instances. For example, if your system has multiple partitions, Netdata will create different
+families for `/`, `/boot`, `/home`, and so on. Same goes for entire disks, network devices, and more.
+
+![A number of families created for disk partitions](https://user-images.githubusercontent.com/1153921/67896952-a788e980-fb1a-11e9-880b-2dfb3945c8d6.png)
+
+A **context** groups several charts based on the types of metrics being collected and displayed. For example, the
+**Disk** section often has many contexts: `disk.io`, `disk.ops`, `disk.backlog`, `disk.util`, and so on. Netdata uses
+this context to create individual charts and then groups them by family. You can always see the context of any chart by
+looking at its name or hovering over the chart's date.
+
+It's important to understand these differences, as Netdata uses charts, dimensions, families, and contexts to create
+health alarms and configure collectors. To read even more about the differences between all these elements of the
+dashboard, and how they affect other parts of Netdata, read our [dashboards
+documentation](/web/README.md#charts-contexts-families).
+
+## Interact with charts
+
+We built Netdata to be a big sandbox for learning more about your systems and applications. Time to play!
+
+Netdata's charts are fully interactive. You can pan through historical metrics, zoom in and out, select specific
+timeframes for further analysis, resize charts, and more.
+
+Best of all, Whenever you use a chart in this way, Netdata synchronizes all the other charts to match it.
+
+![Animated GIF of the standard Netdata dashboard being manipulated and synchronizing
+charts](https://user-images.githubusercontent.com/1153921/81867875-3d6beb00-9526-11ea-94b8-388951e2e03d.gif)
+
+### Pan, zoom, highlight, and reset charts
+
+You can change how charts show their metrics in a few different ways, each of which have a few methods:
+
+| Change | Method #1 | Method #2 | Method #3 |
+| ------------------------------------------------- | ----------------------------------- | --------------------------------------------------------- | ---------------------------------------------------------- |
+| **Reset** charts to default auto-refreshing state | `double click` | `double tap` (touchpad/touchscreen) | |
+| **Select** a certain timeframe | `ALT` + `mouse selection` | `⌘` + `mouse selection` (macOS) | |
+| **Pan** forward or back in time | `click and drag` | `touch and drag` (touchpad/touchscreen) | |
+| **Zoom** to a specific timeframe | `SHIFT` + `mouse selection` | | |
+| **Zoom** in/out | `SHIFT`/`ALT` + `mouse scrollwheel` | `SHIFT`/`ALT` + `two-finger pinch` (touchpad/touchscreen) | `SHIFT`/`ALT` + `two-finger scroll` (touchpad/touchscreen) |
+
+These interactions can also be triggered using the icons on the bottom-right corner of every chart. They are,
+respectively, `Pan Left`, `Reset`, `Pan Right`, `Zoom In`, and `Zoom Out`.
+
+### Show and hide dimensions
+
+Each dimension can be hidden by clicking on it. Hiding dimensions simplifies the chart and can help you better discover
+exactly which aspect of your system is behaving strangely.
+
+### Resize charts
+
+Additionally, resize charts by clicking-and-dragging the icon on the bottom-right corner of any chart. To restore the
+chart to its original height, double-click the same icon.
+
+![Animated GIF of resizing a chart and resetting it to the default
+height](https://user-images.githubusercontent.com/1153921/80842459-7d41e280-8bb6-11ea-9488-1bc29f94d7f2.gif)
+
+To learn more about other options and chart interactivity, read our [dashboard documentation](/web/README.md).
+
+## See raised alarms and the alarm log
+
+Aside from performance troubleshooting, the Agent helps you monitor the health of your systems and applications. That's
+why every Netdata installation comes with dozens of pre-configured alarms that trigger alerts when your system starts
+acting strangely.
+
+Find the **Alarms** button in the top navigation bring up a modal that shows currently raised alarms, all running
+alarms, and the alarms log.
+
+Here is an example of a raised `system.cpu` alarm, followed by the full list and alarm log:
+
+![Animated GIF of looking at raised alarms and the alarm
+log](https://user-images.githubusercontent.com/1153921/80842482-8c289500-8bb6-11ea-9791-600cfdbe82ce.gif)
+
+And a static screenshot of the raised CPU alarm:
+
+![Screenshot of a raised system CPU alarm](https://user-images.githubusercontent.com/1153921/80842330-2dfbb200-8bb6-11ea-8147-3cd366eb0f37.png)
+
+The alarm itself is named *system - cpu**, and its context is `system.cpu`. Beneath that is an auto-updating badge that
+shows the latest value the chart that triggered the alarm.
+
+With the three icons beneath that and the **role** designation, you can:
+
+1. Scroll to the chart associated with this raised alarm.
+2. Copy a link to the badge to your clipboard.
+3. Copy the code to embed the badge onto another web page using an `<embed>` element.
+
+The table on the right-hand side displays information about the alarm's configuration. In above example, Netdata
+triggers a warning alarm when CPU usage is between 75 and 85%, and a critical alarm when above 85%. It's a _little_ more
+complicated than that, but we'll get into more complex health entity configurations in a later step.
+
+The `calculation` field is the equation used to calculate those percentages, and the `check every` field specifies how
+often Netdata should be calculating these metrics to see if the alarm should remain triggered.
+
+The `execute` field tells Netdata how to notify you about this alarm, and the `source` field lets you know where you can
+find the configuration file, if you'd like to edit its configuration.
+
+We'll cover alarm configuration in more detail later in the guide, so don't worry about it too much for now! Right
+now, it's most important that you understand how to see alarms, and parse their details, if and when they appear on your
+system.
+
+## What's next?
+
+In this step of the Netdata guide, you learned how to:
+
+- Visit the dashboard
+- Explore available charts (using the right-side menu)
+- Read the descriptions accompanying charts
+- Interact with charts
+- See raised alarms and the alarm log
+
+Next, you'll learn how to monitor multiple nodes through the dashboard.
+
+[Next: Monitor more than one system with Netdata →](step-03.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-02&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-03.md b/docs/guides/step-by-step/step-03.md
new file mode 100644
index 0000000..2319adb
--- /dev/null
+++ b/docs/guides/step-by-step/step-03.md
@@ -0,0 +1,91 @@
+<!--
+title: "Step 3. Monitor more than one system with Netdata"
+date: 2020-05-01
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-03.md
+-->
+
+# Step 3. Monitor more than one system with Netdata
+
+The Netdata agent is _distributed_ by design. That means each agent operates independently from any other, collecting
+and creating charts only for the system you installed it on. We made this decision a long time ago to [improve security
+and performance](step-01.md).
+
+You might be thinking, "So, now I have to remember all these IP addresses, and type them into my browser
+manually, to move from one system to another? Maybe I should just make a bunch of bookmarks. What's a few more tabs
+on top of the hundred I have already?"
+
+We get it. That's why we built [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/), which connects many distributed
+agents for a seamless experience when monitoring an entire infrastructure of Netdata-monitored nodes.
+
+![Animated GIF of Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b-11ea-957f-2c8d0d009e44.gif)
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, we'll talk about the following:
+
+- [Why you should use Netdata Cloud](#why-use-netdata-cloud)
+- [Get started with Netdata Cloud](#get-started-with-netdata-cloud)
+- [Navigate between dashboards with Visited Nodes](#navigate-between-dashboards-with-visited-nodes)
+
+## Why use Netdata Cloud?
+
+Our [Cloud documentation](https://learn.netdata.cloud/docs/cloud/) does a good job (we think!) of explaining why Cloud
+gives you a ton of value at no cost:
+
+> Netdata Cloud gives you real-time visibility for your entire infrastructure. With Netdata Cloud, you can run all your
+> distributed Agents in headless mode _and_ access the real-time metrics and insightful charts from their dashboards.
+> View key metrics and active alarms at-a-glance, and then seamlessly dive into any of your distributed dashboards
+> without leaving Cloud's centralized interface.
+
+You can add as many nodes and team members as you need, and as our free and open source Agent gets better with more
+features, new collectors for more applications, and improved UI, so will Cloud.
+
+## Get started with Netdata Cloud
+
+Signing in, onboarding, and claiming your first nodes only takes a few minutes, and we have a [Get started with
+Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide to help you walk through every step.
+
+Or, if you're feeling confident, dive right in.
+
+<p><a href="https://app.netdata.cloud" className="button button--lg">Sign in to Cloud</a></p>
+
+When you finish that guide, circle back to this step in the guide to learn how to use the Visited Nodes feature on
+top of Cloud's centralized web interface.
+
+## Navigate between dashboards with Visited Nodes
+
+To add nodes to your visited nodes, you first need to navigate to that node's dashboard, then click the **Sign in**
+button at the top of the dashboard. On the screen that appears, which states your node is requesting access to your
+Netdata Cloud account, sign in with your preferred method.
+
+Cloud redirects you back to your node's dashboard, which is now connected to your Netdata Cloud account. You can now see the menu populated by a single visited node.
+
+![An Agent's dashboard with the Visited nodes
+menu](https://user-images.githubusercontent.com/1153921/80830383-b6ba2400-8b9d-11ea-9eb2-379c7eccd22f.png)
+
+If you previously went through the Cloud onboarding process to create a Space and War Room, you will also see these
+alongside your visited nodes. You can click on your Space or any of your War Rooms to navigate to Netdata Cloud and
+continue monitoring your infrastructure from there.
+
+![A Agent's dashboard with the Visited nodes menu, plus Spaces and War
+Rooms](https://user-images.githubusercontent.com/1153921/80830382-b6218d80-8b9d-11ea-869c-1170b95eeb4a.png)
+
+To add other visited nodes, navigate to their dashboard and sign in to Cloud by clicking on the **Sign in** button. This
+process connects that node to your Cloud account and further populates the menu.
+
+Once you've added more than one node, you can use the menu to switch between various dashboards without remembering IP
+addresses or hostnames or saving bookmarks for every node you want to monitor.
+
+![Switching between dashboards with Visited
+nodes](https://user-images.githubusercontent.com/1153921/80831018-e158ac80-8b9e-11ea-882e-1d82cdc028cd.gif)
+
+## What's next?
+
+Now that you have a Netdata Cloud account with a claimed node (or a few!) and can navigate between your dashboards with
+Visited nodes, it's time to learn more about how you can configure Netdata to your liking. From there, you'll be able to
+customize your Netdata experience to your exact infrastructure and the information you need.
+
+[Next: The basics of configuring Netdata &rarr;](step-04.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-03&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-04.md b/docs/guides/step-by-step/step-04.md
new file mode 100644
index 0000000..0495145
--- /dev/null
+++ b/docs/guides/step-by-step/step-04.md
@@ -0,0 +1,144 @@
+<!--
+title: "Step 4. The basics of configuring Netdata"
+date: 2020-03-31
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-04.md
+-->
+
+# Step 4. The basics of configuring Netdata
+
+Welcome to the fourth step of the Netdata guide.
+
+Since the beginning, we've covered the building blocks of Netdata, dashboard basics, and how you can monitor many
+individual systems using many distributed Netdata agents.
+
+Next up: configuration.
+
+## What you'll learn in this step
+
+We'll talk about Netdata's default configuration, and then you'll learn how to do the following:
+
+- [Find your `netdata.conf` file](#find-your-netdataconf-file)
+- [Use edit-config to open `netdata.conf`](#use-edit-config-to-open-netdataconf)
+- [Navigate the structure of `netdata.conf`](#the-structure-of-netdataconf)
+- [Edit your `netdata.conf` file](#edit-your-netdataconf-file)
+
+## Find your `netdata.conf` file
+
+Netdata primarily uses the `netdata.conf` file to configure its core functionality. `netdata.conf` resides within your
+**Netdata config directory**.
+
+The location of that directory and `netdata.conf` depends on your operating system and the method you used to install
+Netdata.
+
+The most reliable method of finding your Netdata config directory is loading your `netdata.conf` on your browser. Open a
+tab and navigate to `http://HOST:19999/netdata.conf`. Your browser will load a text document that looks like this:
+
+![A netdata.conf file opened in the
+browser](https://user-images.githubusercontent.com/1153921/68346763-344f1c80-00b2-11ea-9d1d-0ccac74d5558.png)
+
+Look for the line that begins with `# config directory = `. The text after that will be the path to your Netdata config
+directory.
+
+In the system represented by the screenshot, the line reads: `config directory = /etc/netdata`. That means
+`netdata.conf`, and all the other configuration files, can be found at `/etc/netdata`.
+
+> For more details on where your Netdata config directory is, take a look at our [installation
+> instructions](/packaging/installer/README.md).
+
+For the rest of this guide, we'll assume you're editing files or running scripts from _within_ your **Netdata
+configuration directory**.
+
+## Use edit-config to open `netdata.conf`
+
+Inside your Netdata config directory, there is a helper scripted called `edit-config`. This script will open existing
+Netdata configuration files using a text editor. Or, if the configuration file doesn't yet exist, the script will copy
+an example file to your Netdata config directory and then allow you to edit it before saving it.
+
+> `edit-config` will use the `EDITOR` environment variable on your system to edit the file. On many systems, that is
+> defaulted to `vim` or `nano`. We highly recommend `nano` for beginners. To change this variable for the current
+> session (it will revert to the default when you reboot), export a new value: `export EDITOR=nano`. Or, [make the
+> change permanent](https://stackoverflow.com/questions/13046624/how-to-permanently-export-a-variable-in-linux).
+
+Let's give it a shot. Navigate to your Netdata config directory. To use `edit-config` on `netdata.conf`, you need to
+have permissions to edit the file. On Linux/macOS systems, you can usually use `sudo` to elevate your permissions.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different as found in the steps above
+sudo ./edit-config netdata.conf
+```
+
+You should now see `netdata.conf` your editor! Let's walk through how the file is structured.
+
+## The structure of `netdata.conf`
+
+There are two main parts of the file to note: **sections** and **options**.
+
+The `netdata.conf` file is broken up into various **sections**, such as `[global]`, `[web]`, and `[registry]`. Each
+section contains the configuration options for some core component of Netdata.
+
+Each section also contains many **options**. Options have a name and a value. With the option `config directory =
+/etc/netdata`, `config directory` is the name, and `/etc/netdata` is the value.
+
+Most lines are **commented**, in that they start with a hash symbol (`#`), and the value is set to a sane default. To
+tell Netdata that you'd like to change any option from its default value, you must **uncomment** it by removing that
+hash.
+
+### Edit your `netdata.conf` file
+
+Let's try editing the options in `netdata.conf` to see how the process works.
+
+First, add a fake option to show you how Netdata loads its configuration files. Add a `test` option under the `[global]`
+section and give it the value of `1`.
+
+```conf
+[global]
+ test = 1
+```
+
+Restart Netdata with `service restart netdata` or the [appropriate
+alternative](/docs/getting-started.md#start-stop-and-restart-netdata) for your system.
+
+Now, open up your browser and navigate to `http://HOST:19999/netdata.conf`. You'll see that Netdata has recognized
+that our fake option isn't valid and added a notice that Netdata will ignore it.
+
+Here's the process in GIF form!
+
+![Animated GIF of creating a fake option in
+netdata.conf](https://user-images.githubusercontent.com/1153921/65470254-4422e200-de1f-11e9-9597-a97c89ee59b8.gif)
+
+Now, let's make a slightly more substantial edit to `netdata.conf`: change the Agent's name.
+
+If you edit the value of the `hostname` option, you can change the name of your Netdata Agent on the dashboard and a
+handful of other places, like the Visited nodes menu _and_ Netdata Cloud.
+
+Use `edit-config` to change the `hostname` option to a name like `hello-world`. Be sure to uncomment it!
+
+```conf
+[global]
+ hostname = hello-world
+```
+
+Once you're done, restart Netdata and refresh the dashboard. Say hello to your renamed agent!
+
+![Animated GIF of editing the hostname option in
+netdata.conf](https://user-images.githubusercontent.com/1153921/80994808-1c065300-8df2-11ea-81af-d28dc3ba27c8.gif)
+
+Netdata has dozens upon dozens of options you can change. To see them all, read our [daemon
+configuration](/daemon/config/README.md), or hop into our popular guide on [increasing long-term metrics
+storage](/docs/guides/longer-metrics-storage.md).
+
+## What's next?
+
+At this point, you should be comfortable with getting to your Netdata directory, opening and editing `netdata.conf`, and
+seeing your changes reflected in the dashboard.
+
+Netdata has many more configuration files that you might want to change, but we'll cover those in the following steps of
+this guide.
+
+In the next step, we're going to cover one of Netdata's core functions: monitoring the health of your systems via alarms
+and notifications. You'll learn how to disable alarms, create new ones, and push notifications to the system of your
+choosing.
+
+[Next: Health monitoring alarms and notifications &rarr;](step-05.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-04&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-05.md b/docs/guides/step-by-step/step-05.md
new file mode 100644
index 0000000..5e62763
--- /dev/null
+++ b/docs/guides/step-by-step/step-05.md
@@ -0,0 +1,343 @@
+<!--
+title: "Step 5. Health monitoring alarms and notifications"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-05.md
+-->
+
+# Step 5. Health monitoring alarms and notifications
+
+In the fifth step of the Netdata guide, we're introducing you to one of our core features: **health monitoring**.
+
+To accurately monitor the health of your systems and applications, you need to know _immediately_ when there's something
+strange going on. Netdata's alarm and notification systems are essential to keeping you informed.
+
+Netdata comes with hundreds of pre-configured alarms that don't require configuration. They were designed by our
+community of system administrators to cover the most important parts of production systems, so, in many cases, you won't
+need to edit them.
+
+Luckily, Netdata's alarm and notification system are incredibly adaptable to your infrastructure's unique needs.
+
+## What you'll learn in this step
+
+We'll talk about Netdata's default configuration, and then you'll learn how to do the following:
+
+- [Tune Netdata's pre-configured alarms](#tune-netdatas-pre-configured-alarms)
+- [Write your first health entity](#write-your-first-health-entity)
+- [Enable Netdata's notification systems](#enable-netdatas-notification-systems)
+
+## Tune Netdata's pre-configured alarms
+
+First, let's tune an alarm that came pre-configured with your Netdata installation.
+
+The first chart you see on any Netdata dashboard is the `system.cpu` chart, which shows the system's CPU utilization
+across all cores. To figure out which file you need to edit to tune this alarm, click the **Alarms** button at the top
+of the dashboard, click on the **All** tab, and find the **system - cpu** alarm entity.
+
+![The system - cpu alarm
+entity](https://user-images.githubusercontent.com/1153921/67034648-ebb4cc80-f0cc-11e9-9d49-1023629924f5.png)
+
+Look at the `source` row in the table. This means the `system.cpu` chart sources its health alarms from
+`4@/usr/lib/netdata/conf.d/health.d/cpu.conf`. To tune these alarms, you'll need to edit the alarm file at
+`health.d/cpu.conf`. Go to your [Netdata config directory](step-04.md#find-your-netdataconf-file) and use the
+`edit-config` script.
+
+```bash
+sudo ./edit-config health.d/cpu.conf
+```
+
+The first **health entity** in that file looks like this:
+
+```yaml
+template: 10min_cpu_usage
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of user,system,softirq,irq,guest
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal)
+ to: sysadmin
+```
+
+Let's say you want to tune this alarm to trigger warning and critical alarms at a lower CPU utilization. You can change
+the `warn` and `crit` lines to the values of your choosing. For example:
+
+```yaml
+ warn: $this > (($status >= $WARNING) ? (60) : (75))
+ crit: $this > (($status == $CRITICAL) ? (75) : (85))
+```
+
+You _can_ [restart Netdata](/docs/getting-started.md#start-stop-and-restart-netdata) to enable your tune, but you can
+also reload _only_ the health monitoring component using one of the available [methods](/health/QUICKSTART.md#reload-health-configuration).
+
+You can also tune any other aspect of the default alarms. To better understand how each line in a health entity works,
+read our [health documentation](/health/README.md).
+
+### Silence an individual alarm
+
+Many Netdata users don't need all the default alarms enabled. Instead of disabling any given alarm, or even _all_
+alarms, you can silence individual alarms by changing one line in a given health entity. Let's look at that
+`health/cpu.conf` file again.
+
+```yaml
+template: 10min_cpu_usage
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of user,system,softirq,irq,guest
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal)
+ to: sysadmin
+```
+
+To silence this alarm, change `sysadmin` to `silent`.
+
+```yaml
+ to: silent
+```
+
+Use `netdatacli reload-health` to reload your health configuration. You can add `to: silent` to any alarm you'd rather not
+bother you with notifications.
+
+## Write your first health entity
+
+The best way to understand how health entities work is building your own and experimenting with the options. To start,
+let's build a health entity that triggers an alarm when system RAM usage goes above 80%.
+
+The first line in a health entity will be `alarm:`. This is how you name your entity. You can give it any name you
+choose, but the only symbols allowed are `.` and `_`. Let's call the alarm `ram_usage`.
+
+```yaml
+ alarm: ram_usage
+```
+
+> You'll see some funky indentation in the lines coming up. Don't worry about it too much! Indentation is not important
+> to how Netdata processes entities, and it will make sense when you're done.
+
+Next, you need to specify which chart this entity listens via the `on:` line. You're declaring that you want this alarm
+to check metrics on the `system.ram` chart.
+
+```yaml
+ on: system.ram
+```
+
+Now comes the `lookup`. This line specifies what metrics the alarm is looking for, what duration of time it's looking
+at, and how to process the metrics into a more usable format.
+
+```yaml
+lookup: average -1m percentage of used
+```
+
+Let's take a moment to break this line down.
+
+- `average`: Calculate the average of all the metrics collected.
+- `-1m`: Use metrics from 1 minute ago until now to calculate that average.
+- `percentage`: Clarify that you want to calculate a percentage of RAM usage.
+- `of used`: Specify which dimension (`used`) on the `system.ram` chart you want to monitor with this entity.
+
+In other words, you're taking 1 minute's worth of metrics from the `used` dimension on the `system.ram` chart,
+calculating their average, and returning it as a percentage.
+
+You can move on to the `units` line, which lets Netdata know that we're working with a percentage and not an absolute
+unit.
+
+```yaml
+ units: %
+```
+
+Next, the `every` line tells Netdata how often to perform the calculation you specified in the `lookup` line. For
+certain alarms, you might want to use a shorter duration, which you can specify using values like `10s`.
+
+```yaml
+ every: 1m
+```
+
+We'll put the next two lines—`warn` and `crit`—together. In these lines, you declare at which percentage you want to
+trigger a warning or critical alarm. Notice the variable `$this`, which is the value calculated by the `lookup` line.
+These lines will trigger a warning if that average RAM usage goes above 80%, and a critical alert if it's above 90%.
+
+```yaml
+ warn: $this > 80
+ crit: $this > 90
+```
+
+> ❗ Most default Netdata alarms come with more complicated `warn` and `crit` lines. You may have noticed the line `warn:
+> $this > (($status >= $WARNING) ? (75) : (85))` in one of the health entity examples above, which is an example of
+> using the [conditional operator for hysteresis](/health/REFERENCE.md#special-use-of-the-conditional-operator).
+> Hysteresis is used to keep Netdata from triggering a ton of alerts if the metric being tracked quickly goes above and
+> then falls below the threshold. For this very simple example, we'll skip hysteresis, but recommend implementing it in
+> your future health entities.
+
+Finish off with the `info` line, which creates a description of the alarm that will then appear in any
+[notification](#enable-netdatas-notification-systems) you set up. This line is optional, but it has value—think of it as
+documentation for a health entity!
+
+```yaml
+ info: The percentage of RAM being used by the system.
+```
+
+Here's what the entity looks like in full. Now you can see why we indented the lines, too.
+
+```yaml
+ alarm: ram_usage
+ on: system.ram
+lookup: average -1m percentage of used
+ units: %
+ every: 1m
+ warn: $this > 80
+ crit: $this > 90
+ info: The percentage of RAM being used by the system.
+```
+
+What about what it looks like on the Netdata dashboard?
+
+![An active alert for the ram_usage alarm](https://user-images.githubusercontent.com/1153921/67056219-f89ee380-f0ff-11e9-8842-7dc210dd2908.png)
+
+If you'd like to try this alarm on your system, you can install a small program called
+[stress](http://manpages.ubuntu.com/manpages/disco/en/man1/stress.1.html) to create a synthetic load. Use the command
+below, and change the `8G` value to a number that's appropriate for the amount of RAM on your system.
+
+```bash
+stress -m 1 --vm-bytes 8G --vm-keep
+```
+
+Netdata is capable of understanding much more complicated entities. To better understand how they work, read the [health
+documentation](/health/README.md), look at some [examples](/health/REFERENCE.md#example-alarms), and open the files
+containing the default entities on your system.
+
+## Enable Netdata's notification systems
+
+Health alarms, while great on their own, are pretty useless without some way of you knowing they've been triggered.
+That's why Netdata comes with a notification system that supports more than a dozen services, such as email, Slack,
+Discord, PagerDuty, Twilio, Amazon SNS, and much more.
+
+To see all the supported systems, visit our [notifications documentation](/health/notifications/).
+
+We'll cover email and Slack notifications here, but with this knowledge you should be able to enable any other type of
+notifications instead of or in addition to these.
+
+### Email notifications
+
+To use email notifications, you need `sendmail` or an equivalent installed on your system. Linux systems use `sendmail`
+or similar programs to, unsurprisingly, send emails to any inbox.
+
+> Learn more about `sendmail` via its [documentation](http://www.postfix.org/sendmail.1.html).
+
+Edit the `health_alarm_notify.conf` file, which resides in your Netdata directory.
+
+```bash
+sudo ./edit-config health_alarm_notify.conf
+```
+
+Look for the following lines:
+
+```conf
+# if a role recipient is not configured, an email will be send to:
+DEFAULT_RECIPIENT_EMAIL="root"
+# to receive only critical alarms, set it to "root|critical"
+```
+
+Change the value of `DEFAULT_RECIPIENT_EMAIL` to the email address at which you'd like to receive notifications.
+
+```conf
+# if a role recipient is not configured, an email will be sent to:
+DEFAULT_RECIPIENT_EMAIL="me@example.com"
+# to receive only critical alarms, set it to "root|critical"
+```
+
+Test email notifications system by first becoming the Netdata user and then asking Netdata to send a test alarm:
+
+```bash
+sudo su -s /bin/bash netdata
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test
+```
+
+You should see output similar to this:
+
+```bash
+# SENDING TEST WARNING ALARM TO ROLE: sysadmin
+2019-10-17 18:23:38: alarm-notify.sh: INFO: sent email notification for: hostname test.chart.test_alarm is WARNING to 'me@example.com'
+# OK
+
+# SENDING TEST CRITICAL ALARM TO ROLE: sysadmin
+2019-10-17 18:23:38: alarm-notify.sh: INFO: sent email notification for: hostname test.chart.test_alarm is CRITICAL to 'me@example.com'
+# OK
+
+# SENDING TEST CLEAR ALARM TO ROLE: sysadmin
+2019-10-17 18:23:39: alarm-notify.sh: INFO: sent email notification for: hostname test.chart.test_alarm is CLEAR to 'me@example.com'
+# OK
+```
+
+... and you should get three separate emails, one for each test alarm, in your inbox! (Be sure to check your spam
+folder.)
+
+## Enable Slack notifications
+
+If you're one of the many who spend their workday getting pinged with GIFs by your colleagues, why not add Netdata
+notifications to the mix? It's a great way to immediately see, collaborate around, and respond to anomalies in your
+infrastructure.
+
+To get Slack notifications working, you first need to add an [incoming
+webhook](https://slack.com/apps/A0F7XDUAZ-incoming-webhooks) to the channel of your choice. Click the green **Add to
+Slack** button, choose the channel, and click the **Add Incoming WebHooks Integration** button.
+
+On the following page, you'll receive a **Webhook URL**. That's what you'll need to configure Netdata, so keep it handy.
+
+Time to dive back into your `health_alarm_notify.conf` file:
+
+```bash
+sudo ./edit-config health_alarm_notify.conf
+```
+
+Look for the `SLACK_WEBHOOK_URL=" "` line and add the incoming webhook URL you got from Slack:
+
+```conf
+SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXXX/XXXXXXXXX/XXXXXXXXXXXX"
+```
+
+A few lines down, edit the `DEFAULT_RECIPIENT_SLACK` line to contain a single hash `#` character. This instructs Netdata
+to send a notification to the channel you configured with the incoming webhook.
+
+```conf
+DEFAULT_RECIPIENT_SLACK="#"
+```
+
+Time to test the notifications again!
+
+```bash
+sudo su -s /bin/bash netdata
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test
+```
+
+You should receive three notifications in your Slack channel.
+
+Congratulations! You're set up with two awesome ways to get notified about any change in the health of your systems or
+applications.
+
+To further configure your email or Slack notification setup, or to enable other notification systems, check out the
+following documentation:
+
+- [Email notifications](/health/notifications/email/README.md)
+- [Slack notifications](/health/notifications/slack/README.md)
+- [Netdata's notification system](/health/notifications/README.md)
+
+## What's next?
+
+In this step, you learned the fundamentals of Netdata's health monitoring tools: alarms and notifications. You should be
+able to tune default alarms, silence them, and understand some of the basics of writing health entities. And, if you so
+chose, you'll now have both email and Slack notifications enabled.
+
+You're coming along quick!
+
+Next up, we're going to cover how Netdata collects its metrics, and how you can get Netdata to collect real-time metrics
+from hundreds of services with almost no configuration on your part. Onward!
+
+[Next: Collect metrics from more services and apps &rarr;](step-06.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-05&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-06.md b/docs/guides/step-by-step/step-06.md
new file mode 100644
index 0000000..160b1b1
--- /dev/null
+++ b/docs/guides/step-by-step/step-06.md
@@ -0,0 +1,122 @@
+<!--
+title: "Step 6. Collect metrics from more services and apps"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-06.md
+-->
+
+# Step 6. Collect metrics from more services and apps
+
+When Netdata _starts_, it auto-detects dozens of **data sources**, such as database servers, web servers, and more.
+
+To auto-detect and collect metrics from a source you just installed, you need to [restart
+Netdata](/docs/getting-started.md#start-stop-and-restart-netdata).
+
+However, auto-detection only works if you installed the source using its standard installation
+procedure. If Netdata isn't collecting metrics after a restart, your source probably isn't configured
+correctly.
+
+Check out the [collectors that come pre-installed with Netdata](/collectors/COLLECTORS.md) to find the module for the
+source you want to monitor.
+
+## What you'll learn in this step
+
+We'll begin with an overview on Netdata's collector architecture, and then dive into the following:
+
+- [Netdata's collector architecture](#netdatas-collector-architecture)
+- [Enable and disable plugins](#enable-and-disable-plugins)
+- [Enable the Nginx collector as an example](#example-enable-the-nginx-collector)
+
+## Netdata's collector architecture
+
+Many Netdata users never have to configure collector or worry about which plugin orchestrator they want to use.
+
+But, if you want to configure collector or write a collector for your custom source, it's important to understand the
+underlying architecture.
+
+By default, Netdata collects a lot of metrics every second using any number of discrete collector. Collectors, in turn,
+are organized and manged by plugins. **Internal** plugins collect system metrics, **external** plugins collect
+non-system metrics, and **orchestrator** plugins group individual collectors together based on the programming language
+they were built in.
+
+These modules are primarily written in [Go](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/) (`go.d`) and
+[Python](/collectors/python.d.plugin/README.md), although some use [Bash](/collectors/charts.d.plugin/README.md)
+(`charts.d`) or [Node.js](/collectors/node.d.plugin/README.md) (`node.d`).
+
+## Enable and disable plugins
+
+You don't need to explicitly enable plugins to auto-detect properly configured sources, but it's useful to know how to
+enable or disable them.
+
+One reason you might want to _disable_ plugins is to improve Netdata's performance on low-resource systems, like
+ephemeral nodes or edge devices. Disabling orchestrator plugins like `python.d` can save significant resources if you're
+not using any of its data collector modules.
+
+You can enable or disable plugins in the `[plugin]` section of `netdata.conf`. This section features a list of all the
+plugins with a boolean setting (`yes` or `no`) to enable or disable them. Be sure to uncomment the line by removing the
+hash (`#`)!
+
+Enabled:
+
+```conf
+[plugins]
+ # node.d = yes
+```
+
+Disabled:
+
+```conf
+[plugins]
+ node.d = no
+```
+
+When you explicitly disable a plugin this way, it won't auto-collect metrics using its collectors.
+
+## Example: Enable the Nginx collector
+
+To help explain how the auto-detection process works, let's use an Nginx web server as an example.
+
+Even if you don't have Nginx installed on your system, we recommend you read through the following section so you can
+apply the process to other data sources, such as Apache, Redis, Memcached, and more.
+
+The Nginx collector, which helps Netdata collect metrics from a running Nginx web server, is part of the
+`python.d.plugin` external plugin _orchestrator_.
+
+In order for Netdata to auto-detect an Nginx web server, you need to enable `ngx_http_stub_status_module` and pass the
+`stub_status` directive in the `location` block of your Nginx configuration file.
+
+You can confirm if the `stub_status` Nginx module is already enabled or not by using following command:
+
+```sh
+nginx -V 2>&1 | grep -o with-http_stub_status_module
+```
+
+If this command returns nothing, you'll need to [enable this module](https://www.nginx.com/blog/monitoring-nginx/).
+
+Next, edit your `/etc/nginx/sites-enabled/default` file to include a `location` block with the following:
+
+```conf
+ location /stub_status {
+ stub_status;
+ }
+```
+
+Restart Netdata using `service netdata restart` or the [correct
+alternative](/docs/getting-started.md#start-stop-and-restart-netdata) for your system, and Netdata will auto-detect
+metrics from your Nginx web server!
+
+While not necessary for most auto-detection and collection purposes, you can also configure the Nginx collector itself
+by editing its configuration file:
+
+```sh
+./edit-config python.d/nginx.conf
+```
+
+After configuring any source, or changing the configuration files for their respective modules, always restart Netdata.
+
+## What's next?
+
+Now that you've learned the fundamentals behind configuring data sources for auto-detection, it's time to move back to
+the dashboard to learn more about some of its more advanced features.
+
+[Next: Netdata's dashboard in depth &rarr;](step-07.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-06&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-07.md b/docs/guides/step-by-step/step-07.md
new file mode 100644
index 0000000..f2f6655
--- /dev/null
+++ b/docs/guides/step-by-step/step-07.md
@@ -0,0 +1,114 @@
+<!--
+title: "Step 7. Netdata's dashboard in depth"
+date: 2020-05-04
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-07.md
+-->
+
+# Step 7. Netdata's dashboard in depth
+
+Welcome to the seventh step of the Netdata guide!
+
+This step of the guide aims to get you more familiar with the features of the dashboard not previously mentioned in
+[step 2](/docs/guides/step-by-step/step-02.md).
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, you'll learn how to:
+
+- [Change the dashboard's settings](#change-the-dashboards-settings)
+- [Check if there's an update to Netdata](#check-if-theres-an-update-to-netdata)
+- [Export and import a snapshot](#export-and-import-a-snapshot)
+
+Let's get started!
+
+## Change the dashboard's settings
+
+The settings area at the top of your Netdata dashboard houses browser settings. These settings do not affect the
+operation of your Netdata server/daemon. They take effect immediately and are permanently saved to browser local storage
+(except the refresh on focus / always option).
+
+You can see the **Performance**, **Synchronization**, **Visual**, and **Locale** tabs on the dashboard settings modal.
+
+![Animated GIF of opening the settings
+modal](https://user-images.githubusercontent.com/1153921/80841197-c93f5800-8bb3-11ea-907d-85bfe23565e1.gif)
+
+To change any setting, click on the toggle button. We recommend you spend some time reading the descriptions for each setting to understand them before making changes.
+
+Pay particular attention to the following settings, as they have dramatic impacts on the performance and appearance of
+your Netdata dashboard:
+
+- When to refresh the charts?
+- How to handle hidden charts?
+- Which chart refresh policy to use?
+- Which theme to use?
+- Do you need help?
+
+Some settings are applied immediately, and others are only reflected after you refresh the page.
+
+## Check if there's an update to Netdata
+
+You can always check if there is an update available from the **Update** area of your Netdata dashboard.
+
+![Opening the Agent's Update modal](https://user-images.githubusercontent.com/1153921/80829493-1adbe880-8b9c-11ea-9770-cc3b23a89414.gif)
+
+If an update is available, you'll see a modal similar to the one above.
+
+When you use the [automatic one-line installer script](/packaging/installer/README.md) attempt to update every day. If
+you choose to update it manually, there are [several well-documented methods](/packaging/installer/UPDATE.md) to achieve
+that. However, it is best practice for you to first go over the [changelog](/CHANGELOG.md).
+
+## Export and import a snapshot
+
+Netdata can export and import snapshots of the contents of your dashboard at a given time. Any Netdata agent can import
+a snapshot created by any other Netdata agent.
+
+Snapshot files include all the information of the dashboard, including the URL of the origin server, its unique ID, and
+chart data queries for the visible timeframe. While snapshots are not in real-time, and thus won't update with new
+metrics, you can still pan, zoom, and highlight charts as you see fit.
+
+Snapshots can be incredibly useful for diagnosing anomalies after they've already happened. Let's say Netdata triggered
+an alarm while you were sleeping. In the morning, you can look up the exact moment the alarm was raised, export a
+snapshot, and send it to a colleague for further analysis.
+
+> ❗ Know how you shouldn't go around downloading software from suspicious-looking websites? Same policy goes for loading
+> snapshots from untrusted or anonymous sources. Importing a snapshot loads quite a bit of data into your web browser,
+> and so you should always err on the side of protecting your system.
+
+To export a snapshot, click on the **export** icon.
+
+![Animated GIF of opening the export
+modal](https://user-images.githubusercontent.com/1153921/80993197-82d63d00-8def-11ea-88fa-98827814e930.gif)
+
+Edit the snapshot file name and select your desired compression method. Click on **Export**.
+
+When the export is complete, your browser will prompt you to save the `.snapshot` file to your machine. You can now
+share this file with any other Netdata user via email, Slack, or even to help describe your Netdata experience when
+[filing an issue](https://github.com/netdata/netdata/issues/new/choose) on GitHub.
+
+To import a snapshot, click on the **import** icon.
+
+![Animated GIF of opening the import
+modal](https://user-images.githubusercontent.com/12263278/64901503-ee696f80-d691-11e9-9678-8d0e2a162402.gif)
+
+Select the Netdata snapshot file to import. Once the file is loaded, the dashboard will update with critical information
+about the snapshot and the system from which it was taken. Click **import** to render it.
+
+Your Netdata dashboard will load data contained in the snapshot into charts. Because the snapshot only covers a certain
+period, it won't update with new metrics.
+
+An imported snapshot is also temporary. If you reload your browser tab, Netdata will remove the snapshot data and
+restore your real-time dashboard for your machine.
+
+## What's next?
+
+In this step of the Netdata guide, you learned how to:
+
+- Change the dashboard's settings
+- Check if there's an update to Netdata
+- Export or import a snapshot
+
+Next, you'll learn how to build your first custom dashboard!
+
+[Next: Build your first custom dashboard &rarr;](step-08.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-07&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-08.md b/docs/guides/step-by-step/step-08.md
new file mode 100644
index 0000000..76a1b07
--- /dev/null
+++ b/docs/guides/step-by-step/step-08.md
@@ -0,0 +1,395 @@
+<!--
+title: "Step 8. Build your first custom dashboard"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-08.md
+-->
+
+# Step 8. Build your first custom dashboard
+
+In previous steps of the guide, you have learned how several sections of the Netdata dashboard worked.
+
+This step will show you how to set up a custom dashboard to fit your unique needs. If nothing else, Netdata is really,
+really flexible. 🤸
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, you'll learn:
+
+- [Why you might want a custom dashboard](#why-should-i-create-a-custom-dashboard)
+- [How to create and prepare your `custom-dashboard.html` file](#create-and-prepare-your-custom-dashboardhtml-file)
+- [Where to add `dashboard.js` to your custom dashboard file](#add-dashboardjs-to-your-custom-dashboard-file)
+- [How to add basic styling](#add-some-basic-styling)
+- [How to add charts of different types, shapes, and sizes](#creating-your-dashboards-charts)
+
+Let's get on with it!
+
+## Why should I create a custom dashboard?
+
+Because it's cool!
+
+But there are way more reasons than that, most of which will prove more valuable to you.
+
+You could use custom dashboards to aggregate real-time data from multiple Netdata agents in one place. Or, you could put
+all the charts with metrics collected from your custom application via `statsd` and perform application performance
+monitoring from a single dashboard. You could even use a custom dashboard and a standalone web server to create an
+enriched public status page for your service, and give your users something fun to look at while they're waiting for the
+503 errors to clear up!
+
+Netdata's custom dashboarding capability is meant to be as flexible as your ideas. We hope you can take these
+fundamental ideas and turn them into something amazing.
+
+## Create and prepare your `custom-dashboard.html` file
+
+By default, Netdata stores its web server files at `/usr/share/netdata/web`. As with finding the location of your
+`netdata.conf` file, you can double-check this location by loading up `http://HOST:19999/netdata.conf` in your browser
+and finding the value of the `web files directory` option.
+
+To create your custom dashboard, create a file at `/usr/share/netdata/web/custom-dashboard.html` and copy in the
+following:
+
+```html
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <title>My custom dashboard</title>
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+
+ <!-- Add dashboard.js here! -->
+
+</head>
+<body>
+
+ <main class="container">
+
+ <h1>My custom dashboard</h1>
+
+ <!-- Add charts here! -->
+
+ </main>
+
+</body>
+</html>
+```
+
+Try visiting `http://HOST:19999/custom-dashboard.html` in your browser.
+
+If you get a blank page with this text: `Access to file is not permitted: /usr/share/netdata/web/custom-dashboard.html`.
+You can fix this error by changing the dashboard file's permissions to make it owned by the `netdata` user.
+
+```bash
+sudo chown netdata:netdata /usr/share/netdata/web/custom-dashboard.html
+```
+
+Reload your browser, and you should see a blank page with the title: **Your custom dashboard**!
+
+## Add `dashboard.js` to your custom dashboard file
+
+You need to include the `dashboard.js` file of a Netdata agent to add Netdata charts. Add the following to the `<head>`
+of your custom dashboard page and change `HOST` according to your setup.
+
+```html
+ <!-- Add dashboard.js here! -->
+ <script type="text/javascript" src="http://HOST:19999/dashboard.js"></script>
+```
+
+When you add `dashboard.js` to any web page, it loads several JavaScript and CSS files to create and style charts. It
+also scans the page for elements that define charts, builds them, and refreshes with new metrics.
+
+> If you enabled SSL on your Netdata dashboard already, you'll need to use `https://` to grab the `dashboard.js` file.
+
+## Add some basic styling
+
+While not necessary, let's add some basic styling to make our dashboard look a little nicer. We're putting some
+basic CSS into a `<style>` tag inside of the page's `<head>` element.
+
+```html
+ <!-- Add dashboard.js here! -->
+ <script type="text/javascript" src="http://HOST:19999/dashboard.js"></script>
+
+ <style>
+ .wrap {
+ max-width: 1280px;
+ margin: 0 auto;
+ }
+
+ h1 {
+ margin-bottom: 30px;
+ text-align: center;
+ }
+
+ .charts {
+ display: flex;
+ flex-flow: row wrap;
+ justify-content: space-around;
+ }
+ </style>
+
+</head>
+```
+
+## Creating your dashboard's charts
+
+Time to create a chart!
+
+You need to create a `<div>` for each new chart. Each `<div>` element accepts a few `data-` attributes, some of which
+are required and some of which are optional.
+
+Let's cover a few important ones. And while we do it, we'll create a custom dashboard that shows a few CPU-related
+charts on a single page.
+
+### The chart unique ID (required)
+
+You need to specify the unique ID of a chart to show it on your custom dashboard. If you forgot how to find the unique
+ID, head back over to [step 2](/docs/guides/step-by-step/step-02.md#understand-charts-dimensions-families-and-contexts)
+for a re-introduction.
+
+You can then put this unique ID into a `<div>` element with the `data-netdata` attribute. Put this in the `<body>` of
+your custom dashboard file beneath the helpful comment.
+
+```html
+<body>
+
+ <main class="wrap">
+
+ <h1>My custom dashboard</h1>
+
+ <div class="charts">
+
+ <!-- Add charts here! -->
+ <div data-netdata="system.cpu"></div>
+
+ </div>
+
+ </main>
+
+</body>
+```
+
+Reload the page, and you should see a real-time `system.cpu` chart!
+
+... and a whole lot of white space. Let's fix that by adding a few more charts.
+
+```html
+ <!-- Add charts here! -->
+ <div data-netdata="system.cpu"></div>
+ <div data-netdata="apps.cpu"></div>
+ <div data-netdata="groups.cpu"></div>
+ <div data-netdata="users.cpu"></div>
+```
+
+![Custom dashboard with four charts
+added](https://user-images.githubusercontent.com/1153921/67526566-e675f580-f669-11e9-8ff5-d1f21a84fb2b.png)
+
+### Set chart duration
+
+By default, these charts visualize 10 minutes of Netdata metrics. Let's get a little more granular on this dashboard. To
+do so, add a new `data-after=""` attribute to each chart.
+
+`data-after` takes a _relative_ number of seconds from _now_. So, by putting `-300` as the value, you're asking the
+custom dashboard to display the _last 5 minutes_ (`5m * 60s = 300s`) of data.
+
+```html
+ <!-- Add charts here! -->
+ <div data-netdata="system.cpu"
+ data-after="-300">
+ </div>
+ <div data-netdata="apps.cpu"
+ data-after="-300">
+ </div>
+ <div data-netdata="groups.cpu"
+ data-after="-300">
+ </div>
+ <div data-netdata="users.cpu"
+ data-after="-300">
+ </div>
+```
+
+### Set chart size
+
+You can set the size of any chart using the `data-height=""` and `data-width=""` attributes. These attributes can be
+anything CSS accepts for width and height (e.g. percentages, pixels, em/rem, calc, and so on).
+
+Let's make the charts a little taller and allow them to fit side-by-side for a more compact view. Add
+`data-height="200px"` and `data-width="50%"` to each chart.
+
+```html
+ <div data-netdata="system.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="50%"></div>
+ <div data-netdata="apps.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="50%"></div>
+ <div data-netdata="groups.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="50%"></div>
+ <div data-netdata="users.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="50%"></div>
+```
+
+Now we're getting somewhere!
+
+![A custom dashboard with four charts
+side-by-side](https://user-images.githubusercontent.com/1153921/67526620-ff7ea680-f669-11e9-92d3-575665fc3a8e.png)
+
+## Final touches
+
+While we already have a perfectly workable dashboard, let's add some final touches to make it a little more pleasant on
+the eyes.
+
+First, add some extra CSS to create some vertical whitespace between the top and bottom row of charts.
+
+```html
+ <style>
+ ...
+
+ .charts > div {
+ margin-bottom: 6rem;
+ }
+ </style>
+```
+
+To create horizontal whitespace, change the value of `data-width="50%"` to `data-width="calc(50% - 2rem)"`.
+
+```html
+ <div data-netdata="system.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="apps.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="groups.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="users.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+```
+
+Told you the `data-width` and `data-height` attributes can take any CSS values!
+
+Prefer a dark theme? Add this to your `<head>` _above_ where you added `dashboard.js`:
+
+```html
+ <script>
+ var netdataTheme = 'slate';
+ </script>
+
+ <!-- Add dashboard.js here! -->
+ <script type="text/javascript" src="https://HOST/dashboard.js"></script>
+```
+
+Refresh the dashboard to give your eyes a break from all that blue light!
+
+![A finished custom
+dashboard](https://user-images.githubusercontent.com/1153921/67531221-a23d2200-f676-11e9-91fe-c2cf1c426bf9.png)
+
+## The final `custom-dashboard.html`
+
+In case you got lost along the way, here's the final version of the `custom-dashboard.html` file:
+
+```html
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <title>My custom dashboard</title>
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+
+ <script>
+ var netdataTheme = 'slate';
+ </script>
+
+ <!-- Add dashboard.js here! -->
+ <script type="text/javascript" src="http://localhost:19999/dashboard.js"></script>
+
+ <style>
+ .wrap {
+ max-width: 1280px;
+ margin: 0 auto;
+ }
+
+ h1 {
+ margin-bottom: 30px;
+ text-align: center;
+ }
+
+ .charts {
+ display: flex;
+ flex-flow: row wrap;
+ justify-content: space-around;
+ }
+
+ .charts > div {
+ margin-bottom: 6rem;
+ position: relative;
+ }
+ </style>
+
+</head>
+<body>
+
+ <main class="wrap">
+
+ <h1>My custom dashboard</h1>
+
+ <div class="charts">
+
+ <!-- Add charts here! -->
+ <div data-netdata="system.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="apps.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="groups.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+ <div data-netdata="users.cpu"
+ data-after="-300"
+ data-height="250px"
+ data-width="calc(50% - 2rem)"></div>
+
+ </div>
+
+ </main>
+
+</body>
+</html>
+```
+
+## What's next?
+
+In this guide, you learned the fundamentals of building a custom Netdata dashboard. You should now be able to add more
+charts to your `custom-dashboard.html`, change the charts that are already there, and size them according to your needs.
+
+Of course, the custom dashboarding features covered here are just the beginning. Be sure to read up on our [custom
+dashboard documentation](/web/gui/custom/README.md) for details on how you can use other chart libraries, pull metrics
+from multiple Netdata agents, and choose which dimensions a given chart shows.
+
+Next, you'll learn how to store long-term historical metrics in Netdata!
+
+[Next: Long-term metrics storage &rarr;](/docs/guides/step-by-step/step-09.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-08&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-09.md b/docs/guides/step-by-step/step-09.md
new file mode 100644
index 0000000..636ffea
--- /dev/null
+++ b/docs/guides/step-by-step/step-09.md
@@ -0,0 +1,164 @@
+<!--
+title: "Step 9. Long-term metrics storage"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-09.md
+-->
+
+# Step 9. Long-term metrics storage
+
+By default, Netdata stores metrics in a custom database we call the [database engine](/database/engine/README.md), which
+stores recent metrics in your system's RAM and "spills" historical metrics to disk. By using both RAM and disk, the
+database engine helps you store a much larger dataset than the amount of RAM your system has.
+
+On a system that's collecting 2,000 metrics every second, the database engine's default configuration will store about
+two day's worth of metrics in RAM and on disk.
+
+That's a lot of metrics. We're talking 345,600,000 individual data points. And the database engine does it with a tiny
+a portion of the RAM available on most systems.
+
+To store _even more_ metrics, you have two options. First, you can tweak the database engine's options to expand the RAM
+or disk it uses. Second, you can archive metrics to an external database. For that, we'll use MongoDB as examples.
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, you'll learn how to:
+
+- [Tweak the database engine's settings](#tweak-the-database-engines-settings)
+- [Archive metrics to an external database](#archive-metrics-to-an-external-database)
+ - [Use the MongoDB database](#archive-metrics-via-the-mongodb-exporting-connector)
+
+Let's get started!
+
+## Tweak the database engine's settings
+
+If you're using Netdata v1.18.0 or higher, and you haven't changed your `memory mode` settings before following this
+guide, your Netdata agent is already using the database engine.
+
+Let's look at your `netdata.conf` file again. Under the `[global]` section, you'll find three connected options.
+
+```conf
+[global]
+ # memory mode = dbengine
+ # page cache size = 32
+ # dbengine disk space = 256
+```
+
+The `memory mode` option is set, by default, to `dbengine`. `page cache size` determines the amount of RAM, in MiB, that
+the database engine dedicates to caching the metrics it's collecting. `dbengine disk space` determines the amount of
+disk space, in MiB, that the database engine will use to store these metrics once they've been "spilled" to disk..
+
+You can uncomment and change either `page cache size` or `dbengine disk space` based on how much RAM and disk you want
+the database engine to use. The higher those values, the more metrics Netdata will store. If you change them to 64 and
+512, respectively, the database engine should store about four day's worth of data on a system collecting 2,000 metrics
+every second.
+
+[**See our database engine calculator**](/docs/store/change-metrics-storage.md) to help you correctly set `dbengine disk
+space` based on your needs. The calculator gives an accurate estimate based on how many child nodes you have, how many
+metrics your Agent collects, and more.
+
+```conf
+[global]
+ memory mode = dbengine
+ page cache size = 64
+ dbengine disk space = 512
+```
+
+After you've made your changes, [restart Netdata](/docs/getting-started.md#start-stop-and-restart-netdata).
+
+To confirm the database engine is working, go to your Netdata dashboard and click on the **Netdata Monitoring** menu on
+the right-hand side. You can find `dbengine` metrics after `queries`.
+
+![Image of the database engine reflected in the Netdata
+Dashboard](https://user-images.githubusercontent.com/12263278/64781383-9c71fe00-d55a-11e9-962b-efd5558efbae.png)
+
+## Archive metrics to an external database
+
+You can archive all the metrics collected by Netdata to **external databases**. The supported databases and services
+include Graphite, OpenTSDB, Prometheus, AWS Kinesis Data Streams, Google Cloud Pub/Sub, MongoDB, and the list is always
+growing.
+
+As we said in [step 1](/docs/guides/step-by-step/step-01.md), we have only complimentary systems, not competitors! We're
+happy to support these archiving methods and are always working to improve them.
+
+A lot of Netdata users archive their metrics to one of these databases for long-term storage or further analysis. Since
+Netdata collects so many metrics every second, they can quickly overload small devices or even big servers that are
+aggregating metrics streaming in from other Netdata agents.
+
+We even support resampling metrics during archiving. With resampling enabled, Netdata will archive only the average or
+sum of every X seconds of metrics. This reduces the sheer amount of data, albeit with a little less accuracy.
+
+How you archive metrics, or if you archive metrics at all, is entirely up to you! But let's cover two easy archiving
+methods, MongoDB and Prometheus remote write, to get you started.
+
+### Archive metrics via the MongoDB exporting connector
+
+Begin by installing MongoDB its dependencies via the correct package manager for your system.
+
+```bash
+sudo apt-get install mongodb # Debian/Ubuntu
+sudo dnf install mongodb # Fedora
+sudo yum install mongodb # CentOS
+```
+
+Next, install the one essential dependency: v1.7.0 or higher of
+[libmongoc](http://mongoc.org/libmongoc/current/installing.html).
+
+```bash
+sudo apt-get install libmongoc-1.0-0 libmongoc-dev # Debian/Ubuntu
+sudo dnf install mongo-c-driver mongo-c-driver-devel # Fedora
+sudo yum install mongo-c-driver mongo-c-driver-devel # CentOS
+```
+
+Next, create a new MongoDB database and collection to store all these archived metrics. Use the `mongo` command to start
+the MongoDB shell, and then execute the following command:
+
+```mongodb
+use netdata
+db.createCollection("netdata_metrics")
+```
+
+Next, Netdata needs to be [reinstalled](/packaging/installer/REINSTALL.md) in order to detect that the required
+libraries to make this exporting connection exist. Since you most likely installed Netdata using the one-line installer
+script, all you have to do is run that script again. Don't worry—any configuration changes you made along the way will
+be retained!
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+Now, from your Netdata config directory, initialize and edit a `exporting.conf` file to tell Netdata where to find the
+database you just created.
+
+```sh
+./edit-config exporting.conf
+```
+
+Add the following section to the file:
+
+```conf
+[mongodb:my_mongo_instance]
+ enabled = yes
+ destination = mongodb://localhost
+ database = netdata
+ collection = netdata_metrics
+```
+
+[Restart](/docs/getting-started.md#start-stop-and-restart-netdata) Netdata to enable the MongoDB exporting connector.
+Click on the **Netdata Monitoring** menu and check out the **exporting my mongo instance** sub-menu. You should start
+seeing these charts fill up with data about the exporting process!
+
+![image](https://user-images.githubusercontent.com/1153921/70443852-25171200-1a56-11ea-8be3-494544b1c295.png)
+
+If you'd like to try connecting Netdata to another database, such as Prometheus or OpenTSDB, read our [exporting
+documentation](/exporting/README.md).
+
+## What's next?
+
+You're getting close to the end! In this step, you learned how to make the most of the database engine, or archive
+metrics to MongoDB for long-term storage.
+
+In the last step of this step-by-step guide, we'll put our sysadmin hat on and use Nginx to proxy traffic to and from
+our Netdata dashboard.
+
+[Next: Set up a proxy &rarr;](/docs/guides/step-by-step/step-10.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-09&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-10.md b/docs/guides/step-by-step/step-10.md
new file mode 100644
index 0000000..28ab47c
--- /dev/null
+++ b/docs/guides/step-by-step/step-10.md
@@ -0,0 +1,230 @@
+<!--
+title: "Step 10. Set up a proxy"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-10.md
+-->
+
+# Step 10. Set up a proxy
+
+You're almost through! At this point, you should be pretty familiar with now Netdata works and how to configure it to
+your liking.
+
+In this step of the guide, we're going to add a proxy in front of Netdata. We're doing this for both improved
+performance and security, so we highly recommend following these steps. Doubly so if you installed Netdata on a
+publicly-accessible remote server.
+
+> ❗ If you installed Netdata on the machine you're currently using (e.g. on `localhost`), and have been accessing
+> Netdata at `http://localhost:19999`, you can skip this step of the guide. In most cases, there is no benefit to
+> setting up a proxy for a service running locally.
+
+> ❗❗ This guide requires more advanced administration skills than previous parts. If you're still working on your
+> Linux administration skills, and would rather get back to Netdata, you might want to [skip this
+> step](step-99.md) for now and return to it later.
+
+## What you'll learn in this step
+
+In this step of the Netdata guide, you'll learn:
+
+- [What a proxy is and the benefits of using one](#wait-whats-a-proxy)
+- [How to connect Netdata to Nginx](#connect-netdata-to-nginx)
+- [How to enable HTTPS in Nginx](#enable-https-in-nginx)
+- [How to secure your Netdata dashboard with a password](#secure-your-netdata-dashboard-with-a-password)
+
+Let's dive in!
+
+## Wait. What's a proxy?
+
+A proxy is a middleman between the internet and a service you're running on your system. Traffic from the internet at
+large enters your system through the proxy, which then routes it to the service.
+
+A proxy is often used to enable encrypted HTTPS connections with your browser, but they're also useful for load
+balancing, performance, and password-protection.
+
+We'll use [Nginx](https://nginx.org/en/) for this step of the guide, but you can also use
+[Caddy](https://caddyserver.com/) as a simple proxy if you prefer.
+
+## Required before you start
+
+You need three things to run a proxy using Nginx:
+
+- Nginx and Certbot installed on your system
+- A fully qualified domain name
+- A subdomain for Netdata that points to your system
+
+### Nginx and Certbot
+
+This step of the guide assumes you can install Nginx on your system. Here are the easiest methods to do so on Debian,
+Ubuntu, Fedora, and CentOS systems.
+
+```bash
+sudo apt-get install nginx # Debian/Ubuntu
+sudo dnf install nginx # Fedora
+sudo yum install nginx # CentOS
+```
+
+Check out [Nginx's installation
+instructions](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/) for details on
+other Linux distributions.
+
+Certbot is a tool to help you create and renew certificate+key pairs for your domain. Visit their
+[instructions](https://certbot.eff.org/instructions) to get a detailed installation process for your operating system.
+
+### Fully qualified domain name
+
+The only other true prerequisite of using a proxy is a **fully qualified domain name** (FQDN). In other words, a domain
+name like `example.com`, `netdata.cloud`, or `github.com`.
+
+If you don't have a domain name, you won't be able to use a proxy the way we'll describe here.
+
+Because we strongly recommend running Netdata behind a proxy, the cost of a domain name is worth the benefit. If you
+don't have a preferred domain registrar, try [Google Domains](https://domains.google/),
+[Cloudflare](https://www.cloudflare.com/products/registrar/), or [Namecheap](https://www.namecheap.com/).
+
+### Subdomain for Netdata
+
+Any of the three domain registrars mentioned above, and most registrars in general, will allow you to create new DNS
+entries for your domain.
+
+To create a subdomain for Netdata, use your registrar's DNS settings to create an A record for a `netdata` subdomain.
+Point the A record to the IP address of your system.
+
+Once finished with the steps below, you'll be able to access your dashboard at `http://netdata.example.com`.
+
+## Connect Netdata to Nginx
+
+The first part of enabling the proxy is to create a new server for Nginx.
+
+Use your favorite text editor to create a file at `/etc/nginx/sites-available/netdata`, copy in the following
+configuration, and change the `server_name` line to match your domain.
+
+```nginx
+upstream backend {
+ server 127.0.0.1:19999;
+ keepalive 64;
+}
+
+server {
+ listen 80;
+
+ # Change `example.com` to match your domain name.
+ server_name netdata.example.com;
+
+ location / {
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_pass http://backend;
+ proxy_http_version 1.1;
+ proxy_pass_request_headers on;
+ proxy_set_header Connection "keep-alive";
+ proxy_store off;
+ }
+}
+```
+
+Save and close the file.
+
+Test your configuration file by running `sudo nginx -t`.
+
+If that returns no errors, it's time to make your server available. Run the command to create a symbolic link in the
+`sites-enabled` directory.
+
+```bash
+sudo ln -s /etc/nginx/sites-available/netdata /etc/nginx/sites-enabled/netdata
+```
+
+Finally, restart Nginx to make your changes live. Open your browser and head to `http://netdata.example.com`. You should
+see your proxied Netdata dashboard!
+
+## Enable HTTPS in Nginx
+
+All this proxying doesn't mean much if we can't take advantage of one of the biggest benefits: encrypted HTTPS
+connections! Let's fix that.
+
+Certbot will automatically get a certificate, edit your Nginx configuration, and get HTTPS running in a single step. Run
+the following:
+
+```bash
+sudo certbot --nginx
+```
+
+> See this error after running `sudo certbot --nginx`?
+>
+> ```
+> Saving debug log to /var/log/letsencrypt/letsencrypt.log
+> The requested nginx plugin does not appear to be installed`
+> ```
+>
+> You must install `python-certbox-nginx`. On Ubuntu or Debian systems, you can run `sudo apt-get install
+> python-certbot-nginx` to download and install this package.
+
+You'll be prompted with a few questions. At the `Which names would you like to activate HTTPS for?` question, hit
+`Enter`. Next comes this question:
+
+```bash
+Please choose whether or not to redirect HTTP traffic to HTTPS, removing HTTP access.
+- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+1: No redirect - Make no further changes to the webserver configuration.
+2: Redirect - Make all requests redirect to secure HTTPS access. Choose this for
+new sites, or if you're confident your site works on HTTPS. You can undo this
+change by editing your web server's configuration.
+- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+```
+
+You _do_ want to force HTTPS, so hit `2` and then `Enter`. Nginx will now ensure all attempts to access
+`netdata.example.com` use HTTPS.
+
+Certbot will automatically renew your certificate whenever it's needed, so you're done configuring your proxy. Open your
+browser again and navigate to `https://netdata.example.com`, and you'll land on an encrypted, proxied Netdata dashboard!
+
+## Secure your Netdata dashboard with a password
+
+Finally, let's take a moment to put your Netdata dashboard behind a password. This step is optional, but you might not
+want _anyone_ to access the metrics in your proxied dashboard.
+
+Run the below command after changing `user` to the username you want to use to log in to your dashboard.
+
+```bash
+sudo sh -c "echo -n 'user:' >> /etc/nginx/.htpasswd"
+```
+
+Then run this command to create a password:
+
+```bash
+sudo sh -c "openssl passwd -apr1 >> /etc/nginx/.htpasswd"
+```
+
+You'll be prompted to create a password. Next, open your Nginx configuration file at
+`/etc/nginx/sites-available/netdata` and add these two lines under `location / {`:
+
+```nginx
+ location / {
+ auth_basic "Restricted Content";
+ auth_basic_user_file /etc/nginx/.htpasswd;
+ ...
+```
+
+Save, exit, and restart Nginx. Then try visiting your dashboard one last time. You'll see a prompt for the username and
+password you just created.
+
+![Username/password
+prompt](https://user-images.githubusercontent.com/1153921/67431031-5320bf80-f598-11e9-9573-f9f9912f1ef6.png)
+
+Your Netdata dashboard is now a touch more secure.
+
+## What's next?
+
+You're a real sysadmin now!
+
+If you want to configure your Nginx proxy further, check out the following:
+
+- [Running Netdata behind Nginx](/docs/Running-behind-nginx.md)
+- [How to optimize Netdata's performance](/docs/guides/configure/performance.md)
+- [Enabling TLS on Netdata's dashboard](/web/server/README.md#enabling-tls-support)
+
+And... you're _almost_ done with the Netdata guide.
+
+For some celebratory emoji and a clap on the back, head on over to our final step.
+
+[Next: The end. &rarr;](step-99.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-10&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/step-by-step/step-99.md b/docs/guides/step-by-step/step-99.md
new file mode 100644
index 0000000..3b893d5
--- /dev/null
+++ b/docs/guides/step-by-step/step-99.md
@@ -0,0 +1,51 @@
+<!--
+title: "Step ∞. You're finished!"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step-by-step/step-99.md
+-->
+
+# Step ∞. You're finished!
+
+Congratulations. 🎉
+
+You've completed the step-by-step Netdata guide. That means you're well on your way to becoming an expert in using
+our toolkit for health monitoring and performance troubleshooting.
+
+But, perhaps more importantly, also that much closer to being an expert in the _fundamental skills behind health
+monitoring and performance troubleshooting_, which you can take with you to any job or project.
+
+And that is the entire point of this guide, and Netdata's [documentation](https://learn.netdata.cloud) as a
+whole—give you every resource possible to help you build faster, more resilient systems, services, and applications.
+
+Along the way, you learned how to:
+
+- Navigate Netdata's dashboard and visually detect anomalies using its charts.
+- Monitor multiple systems using Netdata agents connected together with your browser and Netdata Cloud.
+- Edit your `netdata.conf` file to tweak Netdata to your liking.
+- Tune existing alarms and create entirely new ones, plus get notifications about alarms on your favorite services.
+- Take advantage of Netdata's auto-detection capabilities to ensure your applications/services are monitored with
+ little to no configuration.
+- Use advanced features within Netdata's dashboard.
+- Build a custom dashboard using `dashboard.js`.
+- Save more historical metrics with the database engine or archive metrics to MongoDB.
+- Put Netdata behind a proxy to enable HTTPS and improve performance.
+
+Seems like a lot, right? Well, we hope it felt manageable and, yes, even _fun_.
+
+## What's next?
+
+Now that you're at the end of our step-by-step Netdata guide, the next steps are entirely up to you. In fact, you're
+just at the beginning of your journey into health monitoring and performance troubleshooting.
+
+Our documentation exists to put every Netdata resource in front of you as easily and coherently as we possibly can.
+Click around, search, and find new mountains to climb.
+
+If that feels like too much possibility to you, why not one of these options:
+
+- Share your experience with Netdata and this guide. Be sure to [@mention](https://twitter.com/linuxnetdata) us on
+ Twitter!
+- Contribute to what we do. Browse our [open issues](https://github.com/netdata/netdata/issues) and check out out
+ [contributions doc](/CONTRIBUTING.md) for ideas of how you can pitch in.
+
+We can't wait to see what you monitor next! Bon voyage! ⛵
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fstep-by-step%2Fstep-99&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
new file mode 100644
index 0000000..342193c
--- /dev/null
+++ b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
@@ -0,0 +1,268 @@
+<!--
+title: "Monitor, troubleshoot, and debug applications with eBPF metrics"
+description: "Use Netdata's built-in eBPF metrics collector to monitor, troubleshoot, and debug your custom application using low-level kernel feedback."
+image: /img/seo/guides/troubleshoot/monitor-debug-applications-ebpf.png
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
+-->
+
+# Monitor, troubleshoot, and debug applications with eBPF metrics
+
+When trying to troubleshoot or debug a finicky application, there's no such thing as too much information. At Netdata,
+we developed programs that connect to the [_extended Berkeley Packet Filter_ (eBPF) virtual
+machine](/collectors/ebpf.plugin/README.md) to help you see exactly how specific applications are interacting with the
+Linux kernel. With these charts, you can root out bugs, discover optimizations, diagnose memory leaks, and much more.
+
+This means you can see exactly how often, and in what volume, the application creates processes, opens files, writes to
+filesystem using virtual filesystem (VFS) functions, and much more. Even better, the eBPF collector gathers metrics at
+an _event frequency_, which is even faster than Netdata's beloved 1-second granularity. When you troubleshoot and debug
+applications with eBPF, rest assured you miss not even the smallest meaningful event.
+
+Using this guide, you'll learn the fundamentals of setting up Netdata to give you kernel-level metrics from your
+application so that you can monitor, troubleshoot, and debug to your heart's content.
+
+## Configure `apps.plugin` to recognize your custom application
+
+To start troubleshooting an application with eBPF metrics, you need to ensure your Netdata dashboard collects and
+displays those metrics independent from any other process.
+
+You can use the `apps_groups.conf` file to configure which applications appear in charts generated by
+[`apps.plugin`](/collectors/apps.plugin/README.md). Once you edit this file and create a new group for the application
+you want to monitor, you can see how it's interacting with the Linux kernel via real-time eBPF metrics.
+
+Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application
+separate from any others, you need to create a new group in `apps_groups.conf` and associate that process name with it.
+
+Open the `apps_groups.conf` file in your Netdata configuration directory.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config apps_groups.conf
+```
+
+Scroll down past the explanatory comments and stop when you see `# NETDATA processes accounting`. Above that, paste in
+the following text, which creates a new `dev` group with the `custom-app` process. Replace `custom-app` with the name of
+your application's process name.
+
+Your file should now look like this:
+
+```conf
+...
+# -----------------------------------------------------------------------------
+# Custom applications to monitor with apps.plugin and ebpf.plugin
+
+dev: custom-app
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+...
+```
+
+Restart Netdata with `sudo service netdata restart` or the appropriate method for your system to begin seeing metrics
+for this particular group+process. You can also add additional processes to the same group.
+
+You can set up `apps_groups.conf` to more show more precise eBPF metrics for any application or service running on your
+system, even if it's a standard package like Redis, Apache, or any other [application/service Netdata collects
+from](/collectors/COLLECTORS.md).
+
+```conf
+# -----------------------------------------------------------------------------
+# Custom applications to monitor with apps.plugin and ebpf.plugin
+
+dev: custom-app
+database: *redis*
+apache: *apache*
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+...
+```
+
+Now that you have `apps_groups.conf` set up to monitor your application/service, you can also set up the eBPF collector
+to show other charts that will help you debug and troubleshoot how it interacts with the Linux kernel.
+
+## Configure the eBPF collector to monitor errors
+
+The eBPF collector has [two possible modes](/collectors/ebpf.plugin#ebpf-load-mode): `entry` and `return`. The default
+is `entry`, and only monitors calls to kernel functions, but the `return` also monitors and charts _whether these calls
+return in error_.
+
+Let's turn on the `return` mode for more granularity when debugging Firefox's behavior.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config ebpf.conf
+```
+
+Replace `entry` with `return`:
+
+```conf
+[global]
+ ebpf load mode = return
+ disable apps = no
+
+[ebpf programs]
+ process = yes
+ network viewer = yes
+```
+
+Restart Netdata with `sudo service netdata restart` or the appropriate method for your system.
+
+## Get familiar with per-application eBPF metrics and charts
+
+Visit the Netdata dashboard at `http://NODE:19999`, replacing `NODE` with the hostname or IP of the system you're using
+to monitor this application. Scroll down to the **Applications** section. These charts now feature a `firefox` dimension
+with metrics specific to that process.
+
+Pay particular attention to the charts in the **ebpf file**, **ebpf syscall**, **ebpf process**, and **ebpf net**
+sub-sections. These charts are populated by low-level Linux kernel metrics thanks to eBPF, and showcase the volume of
+calls to open/close files, call functions like `do_fork`, IO activity on the VFS, and much more.
+
+See the [eBPF collector documentation](/collectors/ebpf.plugin/README.md#integration-with-appsplugin) for the full list
+of per-application charts.
+
+Let's show some examples of how you can first identify normal eBPF patterns, then use that knowledge to identify
+anomalies in a few simulated scenarios.
+
+For example, the following screenshot shows the number of open files, failures to open files, and closed files on a
+Debian 10 system. The first spike is from configuring/compiling a small C program, then from running Apache's `ab` tool
+to benchmark an Apache web server.
+
+![An example of eBPF
+charts](https://user-images.githubusercontent.com/1153921/85311677-a8380c80-b46a-11ea-9735-babaedc22fdb.png)
+
+In these charts, you can see first a spike in syscalls to open and close files from the configure/build process,
+followed by a similar spike from the Apache benchmark.
+
+> 👋 Don't forget that you can view chart data directly via Netdata's API!
+>
+> For example, open your browser and navigate to `http://NODE:19999/api/v1/data?chart=apps.file_open`, replacing `NODE`
+> with the IP address or hostname of your Agent. The API returns JSON of that chart's dimensions and metrics, which you
+> can use in other operations.
+>
+> To see other charts, replace `apps.file_open` with the context of the chart you want to see data for.
+>
+> To see all the API options, visit our [Swagger
+> documentation](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)
+> and look under the **/data** section.
+
+## Troubleshoot and debug applications with eBPF
+
+The actual method of troubleshooting and debugging any application with Netdata's eBPF metrics depends on the
+application, its place within your stack, and the type of issue you're trying to root cause. This guide won't be able to
+explain how to troubleshoot _any_ application with eBPF metrics, but it should give you some ideas on how to start with
+your own systems.
+
+The value of using Netdata to collect and visualize eBPF metrics is that you don't have to rely on existing (complex)
+command line eBPF programs or, even worse, write your own eBPF program to get the information you need.
+
+Let's walk through some scenarios where you might find value in eBPF metrics.
+
+### Benchmark application performance
+
+You can use eBPF metrics to profile the performance of your applications, whether they're custom or a standard Linux
+service, like a web server or database.
+
+For example, look at the charts below. The first spike represents running a Redis benchmark _without_ pipelining
+(`redis-benchmark -n 1000000 -t set,get -q`). The second spike represents the same benchmark _with_ pipelining
+(`redis-benchmark -n 1000000 -t set,get -q -P 16`).
+
+![Screenshot of eBPF metrics during a Redis
+benchmark](https://user-images.githubusercontent.com/1153921/84916168-91607700-b072-11ea-8fec-b76df89315aa.png)
+
+The performance optimization is clear from the speed at which the benchmark finished (the horizontal length of the
+spike) and the reduced write/read syscalls and bytes written to disk.
+
+You can run similar performance benchmarks against any application, view the results on a Linux kernel level, and
+continuously improve the performance of your infrastructure.
+
+### Inspect for leaking file descriptors
+
+If your application runs fine and then only crashes after a few hours, leaking file descriptors may be to blame.
+
+Check the **Number of open files (apps.file_open)** and **Files closed (apps.file_closed)** for discrepancies. These
+metrics should be more or less equal. If they diverge, with more open files than closed, your application may not be
+closing file descriptors properly.
+
+See, for example, the volume of files opened and closed by `apps.plugin` itself. Because the eBPF collector is
+monitoring these syscalls at an event level, you can see at any given second that the open and closed numbers as equal.
+
+This isn't to say Netdata is _perfect_, but at least `apps.plugin` doesn't have a file descriptor problem.
+
+![Screenshot of open and closed file
+descriptors](https://user-images.githubusercontent.com/1153921/84816048-c57f5d80-afc8-11ea-9684-d2b923d5d2b2.png)
+
+### Pin down syscall failures
+
+If you enabled the eBPF collector's `return` mode as mentioned [in a previous
+step](#configure-the-ebpf-collector-to-monitor-errors), you can view charts related to how often a given application's
+syscalls return in failure.
+
+By understanding when these failures happen, and when, you might be able to diagnose a bug in your application.
+
+To diagnose potential issues with an application, look at the **Fails to open files (apps.file_open_error)**, **Fails to
+close files (apps.file_close_error)**, **Fails to write (apps.vfs_write_error)**, and **Fails to read
+(apps.vfs_read_error)** charts for failed syscalls coming from your application. If you see any, look to the surrounding
+charts for anomalies at the same time frame, or correlate with other activity in the application or on the system to get
+closer to the root cause.
+
+### Investigate zombie processes
+
+Look for the trio of **Process started (apps.process_create)**, **Threads started (apps.thread_create)**, and **Tasks
+closed (apps.task_close)** charts to investigate situations where an application inadvertently leaves [zombie
+processes](https://en.wikipedia.org/wiki/Zombie_process).
+
+These processes, which are terminated and don't use up system resources, can still cause issues if your system runs out
+of available PIDs to allocate.
+
+For example, the chart below demonstrates a [zombie factory
+program](https://www.refining-linux.org/archives/7-Dr.-Frankenlinux-or-how-to-create-zombie-processes.html) in action.
+
+![Screenshot of eBPF showing evidence of a zombie
+process](https://user-images.githubusercontent.com/1153921/84831957-27e45800-afe1-11ea-9fe2-fdd910915366.png)
+
+Starting at 14:51:49, Netdata sees the `zombie` group creating one new process every second, but no closed tasks. This
+continues for roughly 30 seconds, at which point the factory program was killed with `SIGINT`, which results in the 31
+closed tasks in the subsequent second.
+
+Zombie processes may not be catastrophic, but if you're developing an application on Linux, you should eliminate them.
+If a service in your stack creates them, you should consider filing a bug report.
+
+## View eBPF metrics in Netdata Cloud
+
+You can also show per-application eBPF metrics in Netdata Cloud. This could be particularly useful if you're running the
+same application on multiple systems and want to correlate how it performs on each target, or if you want to share your
+findings with someone else on your team.
+
+If you don't already have a Netdata Cloud account, go [sign in](https://app.netdata.cloud) and get started for free.
+Read the [get started with Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started) for a walkthrough of node
+claiming and other fundamentals.
+
+Once you've added one or more nodes to a Space in Netdata Cloud, you can see aggregated eBPF metrics in the [Overview
+dashboard](/docs/visualize/overview-infrastructure.md) under the same **Applications** or **eBPF** sections that you
+find on the local Agent dashboard. Or, [create new dashboards](/docs/visualize/create-dashboards.md) using eBPF metrics
+from any number of distributed nodes to see how your application interacts with multiple Linux kernels on multiple Linux
+systems.
+
+Now that you can see eBPF metrics in Netdata Cloud, you can [invite your
+team](https://learn.netdata.cloud/docs/cloud/manage/invite-your-team) and share your findings with others.
+
+## What's next?
+
+Debugging and troubleshooting an application takes a special combination of practice, experience, and sheer luck. With
+Netdata's eBPF metrics to back you up, you can rest assured that you see every minute detail of how your application
+interacts with the Linux kernel.
+
+If you're still trying to wrap your head around what we offer, be sure to read up on our accompanying documentation and
+other resources on eBPF monitoring with Netdata:
+
+- [eBPF collector](/collectors/ebpf.plugin/README.md)
+- [eBPF's integration with `apps.plugin`](/collectors/apps.plugin/README.md#integration-with-ebpf)
+- [Linux eBPF monitoring with Netdata](https://www.netdata.cloud/blog/linux-ebpf-monitoring-with-netdata/)
+
+The scenarios described above are just the beginning when it comes to troubleshooting with eBPF metrics. We're excited
+to explore others and see what our community dreams up. If you have other use cases, whether simulated or real-world,
+we'd love to hear them: [info@netdata.cloud](mailto:info@netdata.cloud).
+
+Happy troubleshooting!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%troubleshoot%2Fmonitor-debug-applications-ebpf.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/using-host-labels.md b/docs/guides/using-host-labels.md
new file mode 100644
index 0000000..6d4af2e
--- /dev/null
+++ b/docs/guides/using-host-labels.md
@@ -0,0 +1,212 @@
+<!--
+title: "Use host labels to organize systems, metrics, and alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/using-host-labels.md
+-->
+
+# Use host labels to organize systems, metrics, and alarms
+
+When you use Netdata to monitor and troubleshoot an entire infrastructure, whether that's dozens or hundreds of systems,
+you need sophisticated ways of keeping everything organized. You need alarms that adapt to the system's purpose, or
+whether the parent or child in a streaming setup. You need properly-labeled metrics archiving so you can sort,
+correlate, and mash-up your data to your heart's content. You need to keep tabs on ephemeral Docker containers in a
+Kubernetes cluster.
+
+You need **host labels**: a powerful new way of organizing your Netdata-monitored systems. We introduced host labels in
+[v1.20 of Netdata](https://blog.netdata.cloud/posts/release-1.20/), and they come pre-configured out of the box.
+
+Let's take a peek into how to create host labels and apply them across a few of Netdata's features to give you more
+organization power over your infrastructure.
+
+## Create unique host labels
+
+Host labels are defined in `netdata.conf`. To create host labels, open that file using `edit-config`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config netdata.conf
+```
+
+Create a new `[host labels]` section defining a new host label and its value for the system in question. Make sure not
+to violate any of the [host label naming rules](/docs/configuration-guide.md#netdata-labels).
+
+```conf
+[host labels]
+ type = webserver
+ location = us-seattle
+ installed = 20200218
+```
+
+Once you've written a few host labels, you need to enable them. Instead of restarting the entire Netdata service, you
+can reload labels using the helpful `netdatacli` tool:
+
+```bash
+netdatacli reload-labels
+```
+
+Your host labels will now be enabled. You can double-check these by using `curl http://HOST-IP:19999/api/v1/info` to
+read the status of your agent. For example, from a VPS system running Debian 10:
+
+```json
+{
+ ...
+ "host_labels": {
+ "_is_k8s_node": "false",
+ "_is_parent": "false",
+ "_virt_detection": "systemd-detect-virt",
+ "_container_detection": "none",
+ "_container": "unknown",
+ "_virtualization": "kvm",
+ "_architecture": "x86_64",
+ "_kernel_version": "4.19.0-6-amd64",
+ "_os_version": "10 (buster)",
+ "_os_name": "Debian GNU/Linux",
+ "type": "webserver",
+ "location": "seattle",
+ "installed": "20200218"
+ },
+ ...
+}
+```
+
+You may have noticed a handful of labels that begin with an underscore (`_`). These are automatic labels.
+
+### Automatic labels
+
+When Netdata starts, it captures relevant information about the system and converts them into automatically-generated
+host labels. You can use these to logically organize your systems via health entities, exporting metrics,
+parent-child status, and more.
+
+They capture the following:
+
+- Kernel version
+- Operating system name and version
+- CPU architecture, system cores, CPU frequency, RAM, and disk space
+- Whether Netdata is running inside of a container, and if so, the OS and hardware details about the container's host
+- Whether Netdata is running inside K8s node
+- What virtualization layer the system runs on top of, if any
+- Whether the system is a streaming parent or child
+
+If you want to organize your systems without manually creating host tags, try the automatic labels in some of the
+features below.
+
+## Host labels in streaming
+
+You may have noticed the `_is_parent` and `_is_child` automatic labels from above. Host labels are also now
+streamed from a child to its parent node, which concentrates an entire infrastructure's OS, hardware, container,
+and virtualization information in one place: the parent.
+
+Now, if you'd like to remind yourself of how much RAM a certain child node has, you can access
+`http://localhost:19999/host/CHILD_HOSTNAME/api/v1/info` and reference the automatically-generated host labels from the
+child system. It's a vastly simplified way of accessing critical information about your infrastructure.
+
+> ⚠️ Because automatic labels for child nodes are accessible via API calls, and contain sensitive information like
+> kernel and operating system versions, you should secure streaming connections with SSL. See the [streaming
+> documentation](/streaming/README.md#securing-streaming-communications) for details. You may also want to use
+> [access lists](/web/server/README.md#access-lists) or [expose the API only to LAN/localhost
+> connections](/docs/netdata-security.md#expose-netdata-only-in-a-private-lan).
+
+You can also use `_is_parent`, `_is_child`, and any other host labels in both health entities and metrics
+exporting. Speaking of which...
+
+## Host labels in health entities
+
+You can use host labels to logically organize your systems by their type, purpose, or location, and then apply specific
+alarms to them.
+
+For example, let's use configuration example from earlier:
+
+```conf
+[host labels]
+ type = webserver
+ location = us-seattle
+ installed = 20200218
+```
+
+You could now create a new health entity (checking if disk space will run out soon) that applies only to any host
+labeled `webserver`:
+
+```yaml
+ template: disk_fill_rate
+ on: disk.space
+ lookup: max -1s at -30m unaligned of avail
+ calc: ($this - $avail) / (30 * 60)
+ every: 15s
+ host labels: type = webserver
+```
+
+Or, by using one of the automatic labels, for only webserver systems running a specific OS:
+
+```yaml
+ host labels: _os_name = Debian*
+```
+
+In a streaming configuration where a parent node is triggering alarms for its child nodes, you could create health
+entities that apply only to child nodes:
+
+```yaml
+ host labels: _is_child = true
+```
+
+Or when ephemeral Docker nodes are involved:
+
+```yaml
+ host labels: _container = docker
+```
+
+Of course, there are many more possibilities for intuitively organizing your systems with host labels. See the [health
+documentation](/health/REFERENCE.md#alarm-line-host-labels) for more details, and then get creative!
+
+## Host labels in metrics exporting
+
+If you have enabled any metrics exporting via our experimental [exporters](/exporting/README.md), any new host
+labels you created manually are sent to the destination database alongside metrics. You can change this behavior by
+editing `exporting.conf`, and you can even send automatically-generated labels on with exported metrics.
+
+```conf
+[exporting:global]
+enabled = yes
+send configured labels = yes
+send automatic labels = no
+```
+
+You can also change this behavior per exporting connection:
+
+```conf
+[opentsdb:my_instance3]
+enabled = yes
+destination = localhost:4242
+data source = sum
+update every = 10
+send charts matching = system.cpu
+send configured labels = no
+send automatic labels = yes
+```
+
+By applying labels to exported metrics, you can more easily parse historical metrics with the labels applied. To learn
+more about exporting, read the [documentation](/exporting/README.md).
+
+## What's next?
+
+Host labels are a brand-new feature to Netdata, and yet they've already propagated deeply into some of its core
+functionality. We're just getting started with labels, and will keep the community apprised of additional functionality
+as it's made available. You can also track [issue #6503](https://github.com/netdata/netdata/issues/6503), which is where
+the Netdata team first kicked off this work.
+
+It should be noted that while the Netdata dashboard does not expose either user-configured or automatic host labels, API
+queries _do_ showcase this information. As always, we recommend you secure Netdata
+
+- [Expose Netdata only in a private LAN](/docs/netdata-security.md#expose-netdata-only-in-a-private-lan)
+- [Enable TLS/SSL for web/API requests](/web/server/README.md#enabling-tls-support)
+- Put Netdata behind a proxy
+ - [Use an authenticating web server in proxy
+ mode](/docs/netdata-security.md#use-an-authenticating-web-server-in-proxy-mode)
+ - [Nginx proxy](/docs/Running-behind-nginx.md)
+ - [Apache proxy](/docs/Running-behind-apache.md)
+ - [Lighttpd](/docs/Running-behind-lighttpd.md)
+ - [Caddy](/docs/Running-behind-caddy.md)
+
+If you have issues or questions around using host labels, don't hesitate to [file an
+issue](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md) on GitHub. We're
+excited to make host labels even more valuable to our users, which we can only do with your input.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fusing-host-labels&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/monitor/configure-alarms.md b/docs/monitor/configure-alarms.md
new file mode 100644
index 0000000..2a97795
--- /dev/null
+++ b/docs/monitor/configure-alarms.md
@@ -0,0 +1,148 @@
+<!--
+title: "Configure health alarms"
+description: "Netdata's health monitoring watchdog is incredibly adaptable to your infrastructure's unique needs, with configurable health alarms."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/monitor/configure-alarms.md
+-->
+
+# Configure health alarms
+
+Netdata's health watchdog is highly configurable, with support for dynamic thresholds, hysteresis, alarm templates, and
+more. You can tweak any of the existing alarms based on your infrastructure's topology or specific monitoring needs, or
+create new entities.
+
+You can use health alarms in conjunction with any of Netdata's [collectors](/docs/collect/how-collectors-work.md) (see
+the [supported collector list](/collectors/COLLECTORS.md)) to monitor the health of your systems, containers, and
+applications in real time.
+
+While you can see active alarms both on the local dashboard and Netdata Cloud, all health alarms are configured _per
+node_ via individual Netdata Agents. If you want to deploy a new alarm across your
+[infrastructure](/docs/quickstart/infrastructure.md), you must configure each node with the same health configuration
+files.
+
+## Edit health configuration files
+
+All of Netdata's [health configuration files](/health/REFERENCE.md#health-configuration-files) are in Netdata's config
+directory, inside the `health.d/` directory. Navigate to your [Netdata config directory](/docs/configure/nodes.md) and
+use `edit-config` to make changes to any of these files.
+
+For example, to edit the `cpu.conf` health configuration file, run:
+
+```bash
+sudo ./edit-config health.d/cpu.conf
+```
+
+Each health configuration file contains one or more health _entities_, which always begin with `alarm:` or `template:`.
+For example, here is the first health entity in `health.d/cpu.conf`:
+
+```yaml
+template: 10min_cpu_usage
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of user,system,softirq,irq,guest
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal)
+ to: sysadmin
+```
+
+To tune this alarm to trigger warning and critical alarms at a lower CPU utilization, change the `warn` and `crit` lines
+to the values of your choosing. For example:
+
+```yaml
+ warn: $this > (($status >= $WARNING) ? (60) : (75))
+ crit: $this > (($status == $CRITICAL) ? (75) : (85))
+```
+
+Save the file and [reload Netdata's health configuration](#reload-health-configuration) to make your changes live.
+
+### Silence an individual alarm
+
+Instead of disabling an alarm altogether, or even disabling _all_ alarms, you can silence individual alarms by changing
+one line in a given health entity. To silence any single alarm, change the `to:` line in its entity to `silent`.
+
+```yaml
+ to: silent
+```
+
+## Write a new health entity
+
+While tuning existing alarms may work in some cases, you may need to write entirely new health entities based on how
+your systems, containers, and applications work.
+
+Read Netdata's [health reference](/health/REFERENCE.md#health-entity-reference) for a full listing of the format,
+syntax, and functionality of health entities.
+
+To write a new health entity into a new file, navigate to your [Netdata config directory](/docs/configure/nodes.md),
+then use `touch` to create a new file in the `health.d/` directory. Use `edit-config` to start editing the file.
+
+As an example, let's create a `ram-usage.conf` file.
+
+```bash
+sudo touch health.d/ram-usage.conf
+sudo ./edit-config health.d/ram-usage.conf
+```
+
+For example, here is a health entity that triggers a warning alarm when a node's RAM usage rises above 80%, and a
+critical alarm above 90%:
+
+```yaml
+ alarm: ram_usage
+ on: system.ram
+lookup: average -1m percentage of used
+ units: %
+ every: 1m
+ warn: $this > 80
+ crit: $this > 90
+ info: The percentage of RAM being used by the system.
+```
+
+Let's look into each of the lines to see how they create a working health entity.
+
+- `alarm`: The name for your new entity. The name needs to follow these requirements:
+ - Any alphabet letter or number.
+ - The symbols `.` and `_`.
+ - Cannot be `chart name`, `dimension name`, `family name`, or `chart variable names`.
+- `on`: Which chart the entity listens to.
+- `lookup`: Which metrics the alarm monitors, the duration of time to monitor, and how to process the metrics into a
+ usable format.
+ - `average`: Calculate the average of all the metrics collected.
+ - `-1m`: Use metrics from 1 minute ago until now to calculate that average.
+ - `percentage`: Clarify that we're calculating a percentage of RAM usage.
+ - `of used`: Specify which dimension (`used`) on the `system.ram` chart you want to monitor with this entity.
+- `units`: Use percentages rather than absolute units.
+- `every`: How often to perform the `lookup` calculation to decide whether or not to trigger this alarm.
+- `warn`/`crit`: The value at which Netdata should trigger a warning or critical alarm. This example uses simple
+ syntax, but most pre-configured health entities use
+ [hysteresis](/health/REFERENCE.md#special-usage-of-the-conditional-operator) to avoid superfluous notifications.
+- `info`: A description of the alarm, which will appear in the dashboard and notifications.
+
+In human-readable format:
+
+> This health entity, named **ram_usage**, watches the **system.ram** chart. It looks up the last **1 minute** of
+> metrics from the **used** dimension and calculates the **average** of all those metrics in a **percentage** format,
+> using a **% unit**. The entity performs this lookup **every minute**.
+>
+> If the average RAM usage percentage over the last 1 minute is **more than 80%**, the entity triggers a warning alarm.
+> If the usage is **more than 90%**, the entity triggers a critical alarm.
+
+When you finish writing this new health entity, [reload Netdata's health configuration](#reload-health-configuration) to
+see it live on the local dashboard or Netdata Cloud.
+
+## Reload health configuration
+
+To make any changes to your health configuration live, you must reload Netdata's health monitoring system. To do that
+without restarting all of Netdata, run `netdatacli reload-health` or `killall -USR2 netdata`.
+
+## What's next?
+
+With your health entities configured properly, it's time to [enable
+notifications](/docs/monitor/enable-notifications.md) to get notified whenever a node reaches a warning or critical
+state.
+
+To build complex, dynamic alarms, read our guide on [dimension templates](/docs/guides/monitor/dimension-templates.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fmonitor%2Fview-active-alarms&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/monitor/enable-notifications.md b/docs/monitor/enable-notifications.md
new file mode 100644
index 0000000..68beba5
--- /dev/null
+++ b/docs/monitor/enable-notifications.md
@@ -0,0 +1,144 @@
+<!--
+title: "Enable alarm notifications"
+description: "Send Netdata alarms from a centralized place with Netdata Cloud, or configure nodes individually, to enable incident response and faster resolution."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/monitor/enable-notifications.md
+-->
+
+# Enable alarm notifications
+
+Netdata offers two ways to receive alarm notifications on external platforms. These methods work independently _or_ in
+parallel, which means you can enable both at the same time to send alarm notifications to any number of endpoints.
+
+Both methods use a node's health alarms to generate the content of alarm notifications. Read the doc on [configuring
+alarms](/docs/monitor/configure-alarms.md) to change the preconfigured thresholds or to create tailored alarms for your
+infrastructure.
+
+Netdata Cloud offers [centralized alarm notifications](#netdata-cloud) via email, which leverages the health status
+information already streamed to Netdata Cloud from claimed nodes to send notifications to those who have enabled them.
+
+The Netdata Agent has a [notification system](#netdata-agent) that supports more than a dozen services, such as email,
+Slack, PagerDuty, Twilio, Amazon SNS, Discord, and much more.
+
+For example, use centralized alarm notifications in Netdata Cloud for immediate, zero-configuration alarm notifications
+for your team, then configure individual nodes send notifications to a PagerDuty endpoint for an automated incident
+response process.
+
+## Netdata Cloud
+
+Netdata Cloud's [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/) is a
+zero-configuration way to get notified when an anomaly or incident strikes any node or application in your
+infrastructure. The advantage of using centralized alarm notifications from Netdata Cloud is that you don't have to
+worry about configuring each node in your infrastructure.
+
+To enable centralized alarm notifications for a Space, click on **Manage Space** in the left-hand menu, then click on
+the **Notifications** tab. Click the toggle switch next to **E-mail** to enable this notification method.
+
+Next, enable notifications on a user level by clicking on your profile icon, then **Profile** in the dropdown. The
+**Notifications** tab reveals rich management settings, including the ability to enable/disable methods entirely or
+choose what types of notifications to receive from each War Room.
+
+![Enabling and configuring alarm notifications in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/101936280-93c50900-3b9d-11eb-9ba0-d6927fa872b7.gif)
+
+See the [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/) reference
+doc for further details about what information is conveyed in an email notification, flood protection, and more.
+
+## Netdata Agent
+
+The Netdata Agent's [notification system](/health/notifications/README.md) runs on every node and dispatches
+notifications based on configured endpoints and roles. You can enable multiple endpoints on any one node _and_ use Agent
+notifications in parallel with centralized alarm notifications in Netdata Cloud.
+
+> ❗ If you want to enable notifications from multiple nodes in your infrastructure, each running the Netdata Agent, you
+> must configure each node individually.
+
+Below, we'll use [Slack notifications](#enable-slack-notifications) as an example of the process of enabling any
+notification platform.
+
+### Supported notification endpoints
+
+- [**alerta.io**](/health/notifications/alerta/README.md)
+- [**Amazon SNS**](/health/notifications/awssns/README.md)
+- [**Custom endpoint**](/health/notifications/custom/README.md)
+- [**Discord**](/health/notifications/discord/README.md)
+- [**Dynatrace**](/health/notifications/dynatrace/README.md)
+- [**Email**](/health/notifications/email/README.md)
+- [**Flock**](/health/notifications/flock/README.md)
+- [**Google Hangouts**](/health/notifications/hangouts/README.md)
+- [**IRC**](/health/notifications/irc/README.md)
+- [**Kavenegar**](/health/notifications/kavenegar/README.md)
+- [**Matrix**](/health/notifications/matrix/README.md)
+- [**Messagebird**](/health/notifications/messagebird/README.md)
+- [**Netdata Agent dashboard**](/health/notifications/web/README.md)
+- [**Opsgenie**](/health/notifications/opsgenie/README.md)
+- [**PagerDuty**](/health/notifications/pagerduty/README.md)
+- [**Prowl**](/health/notifications/prowl/README.md)
+- [**PushBullet**](/health/notifications/pushbullet/README.md)
+- [**PushOver**](/health/notifications/pushover/README.md)
+- [**Rocket.Chat**](/health/notifications/rocketchat/README.md)
+- [**Slack**](/health/notifications/slack/README.md)
+- [**SMS Server Tools 3**](/health/notifications/smstools3/README.md)
+- [**StackPulse**](/health/notifications/stackpulse/README.md)
+- [**Syslog**](/health/notifications/syslog/README.md)
+- [**Telegram**](/health/notifications/telegram/README.md)
+- [**Twilio**](/health/notifications/twilio/README.md)
+
+### Enable Slack notifications
+
+First, [Add an incoming webhook](https://slack.com/apps/A0F7XDUAZ-incoming-webhooks) in Slack for the channel where you
+want to see alarm notifications from Netdata. Click the green **Add to Slack** button, choose the channel, and click the
+**Add Incoming WebHooks Integration** button.
+
+On the following page, you'll receive a **Webhook URL**. That's what you'll need to configure Netdata, so keep it handy.
+
+Navigate to your [Netdata config directory](/docs/configure/nodes.md#netdata-config-directory) and use `edit-config` to
+open the `health_alarm_notify.conf` file:
+
+```bash
+sudo ./edit-config health_alarm_notify.conf
+```
+
+Look for the `SLACK_WEBHOOK_URL=" "` line and add the incoming webhook URL you got from Slack:
+
+```conf
+SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXXX/XXXXXXXXX/XXXXXXXXXXXX"
+```
+
+A few lines down, edit the `DEFAULT_RECIPIENT_SLACK` line to contain a single hash `#` character. This instructs Netdata
+to send a notification to the channel you configured with the incoming webhook.
+
+```conf
+DEFAULT_RECIPIENT_SLACK="#"
+```
+
+To test Slack notifications, switch to the Netdata user.
+
+```bash
+sudo su -s /bin/bash netdata
+```
+
+Next, run the `alarm-notify` script using the `test` option.
+
+```bash
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test
+```
+
+You should receive three notifications in your Slack channel for each health status change: `WARNING`, `CRITICAL`, and
+`CLEAR`.
+
+See the [Agent Slack notifications](/health/notifications/slack/README.md) doc for more options and information.
+
+## What's next?
+
+Now that you have health entities configured to your infrastructure's needs and notifications to inform you of anomalies
+or incidents, your health monitoring setup is complete.
+
+To make your dashboards most useful during root cause analysis, use Netdata's [distributed data
+architecture](/docs/store/distributed-data-architecture.md) for the best-in-class performance and scalability.
+
+### Related reference documentation
+
+- [Netdata Cloud · Alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/)
+- [Netdata Agent · Notifications](/health/notifications/README.md)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fmonitor%2Fenable-notifications&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/monitor/view-active-alarms.md b/docs/monitor/view-active-alarms.md
new file mode 100644
index 0000000..8837e48
--- /dev/null
+++ b/docs/monitor/view-active-alarms.md
@@ -0,0 +1,75 @@
+<!--
+title: "View active health alarms"
+description: "View active alarms and their rich data to discover and resolve anomalies and performance issues across your infrastructure."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/monitor/view-active-alarms.md
+-->
+
+# View active health alarms
+
+Every Netdata Agent comes with hundreds of pre-installed health alarms designed to notify you when an anomaly or
+performance issue affects your node or the applications it runs.
+
+As soon as you launch a Netdata Agent and [claim it](/docs/get/README.md#claim-your-node-on-netdata-cloud), you can view
+active alarms in both the local dashboard and Netdata Cloud.
+
+## View active alarms in Netdata Cloud
+
+You can see active alarms from any node in your infrastructure in two ways: Click on the bell 🔔 icon in the top
+navigation, or click on the first column of any node's row in Nodes. This column's color changes based on the node's
+[health status](/health/REFERENCE.md#alarm-statuses): gray is `CLEAR`, yellow is `WARNING`, and red is `CRITICAL`.
+
+![The Alarms panel in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/93541137-70761f00-f90a-11ea-89ef-7948c6213200.png)
+
+The Alarms panel lists all active alarms for nodes within that War Room, and tells you which chart triggered the alarm,
+what that chart's current value is, the alarm that triggered it, and when the alarm status first began.
+
+Use the input field in the Alarms panel to filter active alarms. You can sort by the node's name, alarm, status, chart
+that triggered the alarm, or the operating system. Read more about the [filtering
+syntax](https://learn.netdata.cloud/docs/cloud/war-rooms#node-filter) to build valuable filters for your infrastructure.
+
+Click on the 3-dot icon (`⋮`) to view active alarm information or navigate directly to the offending chart in that
+node's Cloud dashboard with the **Go to chart** button.
+
+The active alarm information gives you details about the alarm that's been triggered. You can see the alarm's
+configuration, how it calculates warning or critical alarms, and which configuration file you could edit on that node if
+you want to tweak or disable the alarm to better suit your needs.
+
+![Screenshot from 2020-09-17
+17-21-29](https://user-images.githubusercontent.com/1153921/93541139-710eb580-f90a-11ea-809d-25afe1270108.png)
+
+## View active alarms in the Netdata Agent
+
+Find the bell 🔔 icon in the top navigation to bring up a modal that shows currently raised alarms, all running alarms,
+and the alarms log. Here is an example of a raised `system.cpu` alarm, followed by the full list and alarm log:
+
+![Animated GIF of looking at raised alarms and the alarm
+log](https://user-images.githubusercontent.com/1153921/80842482-8c289500-8bb6-11ea-9791-600cfdbe82ce.gif)
+
+And a static screenshot of the raised CPU alarm:
+
+![Screenshot of a raised system CPU
+alarm](https://user-images.githubusercontent.com/1153921/80842330-2dfbb200-8bb6-11ea-8147-3cd366eb0f37.png)
+
+The alarm itself is named **system - cpu**, and its context is `system.cpu`. Beneath that is an auto-updating badge that
+shows the latest value of the chart that triggered the alarm.
+
+With the three icons beneath that and the **role** designation, you can:
+
+1. Scroll to the chart associated with this raised alarm.
+2. Copy a link to the badge to your clipboard.
+3. Copy the code to embed the badge onto another web page using an `<embed>` element.
+
+The table on the right-hand side displays information about the health entity that triggered the alarm, which you can
+use as a reference to [configure alarms](/docs/monitor/configure-alarms.md).
+
+## What's next?
+
+With the information that appears on Netdata Cloud and the local dashboard about active alarms, you can [configure
+alarms](/docs/monitor/configure-alarms.md) to match your infrastructure's needs or your team's goals.
+
+If you're happy with the pre-configured alarms, skip ahead to [enable
+notifications](/docs/monitor/enable-notifications.md) to use Netdata Cloud's centralized alarm notifications and/or
+per-node notifications to endpoints like Slack, PagerDuty, Twilio, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fmonitor%2Fview-active-alarms&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/netdata-for-IoT.md b/docs/netdata-for-IoT.md
new file mode 100644
index 0000000..77b9522
--- /dev/null
+++ b/docs/netdata-for-IoT.md
@@ -0,0 +1,74 @@
+<!--
+title: "Netdata for IoT"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/netdata-for-IoT.md
+-->
+
+# Netdata for IoT
+
+![image1](https://cloud.githubusercontent.com/assets/2662304/14252446/11ae13c4-fa90-11e5-9d03-d93a3eb3317a.gif)
+
+> New to Netdata? Check its demo: **<https://my-netdata.io/>**
+>
+>[![User
+>Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+>[![Monitored
+>Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+>[![Sessions
+>Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+>
+>[![New Users
+>Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+>[![New Machines
+>Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+>[![Sessions
+>Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+
+---
+
+Netdata is a **very efficient** server performance monitoring solution. When running in server hardware, it can collect
+thousands of system and application metrics **per second** with just 1% CPU utilization of a single core. Its web server
+responds to most data requests in about **half a millisecond** making its web dashboards spontaneous, amazingly fast!
+
+Netdata can also be a very efficient real-time monitoring solution for **IoT devices** (RPIs, routers, media players,
+wifi access points, industrial controllers and sensors of all kinds). Netdata will generally run everywhere a Linux
+kernel runs (and it is glibc and [musl-libc](https://www.musl-libc.org/) friendly).
+
+You can use it as both a data collection agent (where you pull data using its API), for embedding its charts on other
+web pages / consoles, but also for accessing it directly with your browser to view its dashboard.
+
+The Netdata web API already provides **reduce** functions allowing it to report **average** and **max** for any
+timeframe. It can also respond in many formats including JSON, JSONP, CSV, HTML. Its API is also a **google charts**
+provider so it can directly be used by google sheets, google charts, google widgets.
+
+![sensors](https://cloud.githubusercontent.com/assets/2662304/15339745/8be84540-1c8e-11e6-9e9a-106dea7539b6.gif)
+
+Although Netdata has been significantly optimized to lower the CPU and RAM resources it consumes, the plethora of data
+collection plugins may be inappropriate for weak IoT devices. Please follow the [Netdata Agent performance
+guide](/docs/guides/configure/performance.md)
+
+## Monitoring RPi temperature
+
+The python version of the sensors plugin uses `lm-sensors`. Unfortunately the temperature reading of RPi are not
+supported by `lm-sensors`.
+
+Netdata also has a bash version of the sensors plugin that can read RPi temperatures. It is disabled by default to avoid
+the conflicts with the python version.
+
+To enable it, run:
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config charts.d.conf
+```
+
+and uncomment this line:
+
+```sh
+sensors=force
+```
+
+Then restart Netdata. You will get this:
+
+![image](https://user-images.githubusercontent.com/2662304/29658868-23aa65ae-88c5-11e7-9dad-c159600db5cc.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-for-IoT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/netdata-security.md b/docs/netdata-security.md
new file mode 100644
index 0000000..50c6b05
--- /dev/null
+++ b/docs/netdata-security.md
@@ -0,0 +1,229 @@
+<!--
+title: "Security design"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/netdata-security.md
+-->
+
+# Security design
+
+We have given special attention to all aspects of Netdata, ensuring that everything throughout its operation is as secure as possible. Netdata has been designed with security in mind.
+
+**Table of Contents**
+
+1. [Your data are safe with Netdata](#your-data-are-safe-with-netdata)
+2. [Your systems are safe with Netdata](#your-systems-are-safe-with-netdata)
+3. [Netdata is read-only](#netdata-is-read-only)
+4. [Netdata viewers authentication](#netdata-viewers-authentication)
+ - [Why Netdata should be protected](#why-netdata-should-be-protected)
+ - [Protect Netdata from the internet](#protect-netdata-from-the-internet)
+ \- [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan)
+ \- [Use an authenticating web server in proxy mode](#use-an-authenticating-web-server-in-proxy-mode)
+ \- [Other methods](#other-methods)
+5. [Registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server)
+
+## Your data are safe with Netdata
+
+Netdata collects raw data from many sources. For each source, Netdata uses a plugin that connects to the source (or reads the relative files produced by the source), receives raw data and processes them to calculate the metrics shown on Netdata dashboards.
+
+Even if Netdata plugins connect to your database server, or read your application log file to collect raw data, the product of this data collection process is always a number of **chart metadata and metric values** (summarized data for dashboard visualization). All Netdata plugins (internal to the Netdata daemon, and external ones written in any computer language), convert raw data collected into metrics, and only these metrics are stored in Netdata databases, sent to upstream Netdata servers, or archived to external time-series databases.
+
+> The **raw data** collected by Netdata, do not leave the host they are collected. **The only data Netdata exposes are chart metadata and metric values.**
+
+This means that Netdata can safely be used in environments that require the highest level of data isolation (like PCI Level 1).
+
+## Your systems are safe with Netdata
+
+We are very proud that **the Netdata daemon runs as a normal system user, without any special privileges**. This is quite an achievement for a monitoring system that collects all kinds of system and application metrics.
+
+There are a few cases however that raw source data are only exposed to processes with escalated privileges. To support these cases, Netdata attempts to minimize and completely isolate the code that runs with escalated privileges.
+
+So, Netdata **plugins**, even those running with escalated capabilities or privileges, perform a **hard coded data collection job**. They do not accept commands from Netdata. The communication is strictly **unidirectional**: from the plugin towards the Netdata daemon. The original application data collected by each plugin do not leave the process they are collected, are not saved and are not transferred to the Netdata daemon. The communication from the plugins to the Netdata daemon includes only chart metadata and processed metric values.
+
+Child nodes use the same protocol when streaming metrics to their parent nodes. The raw data collected by the plugins of
+child Netdata servers are **never leaving the host they are collected**. The only data appearing on the wire are chart
+metadata and metric values. This communication is also **unidirectional**: child nodes never accept commands from
+parent Netdata servers.
+
+## Netdata is read-only
+
+Netdata **dashboards are read-only**. Dashboard users can view and examine metrics collected by Netdata, but cannot instruct Netdata to do something other than present the already collected metrics.
+
+Netdata dashboards do not expose sensitive information. Business data of any kind, the kernel version, O/S version, application versions, host IPs, etc are not stored and are not exposed by Netdata on its dashboards.
+
+## Netdata viewers authentication
+
+Netdata is a monitoring system. It should be protected, the same way you protect all your admin apps. We assume Netdata will be installed privately, for your eyes only.
+
+### Why Netdata should be protected
+
+Viewers will be able to get some information about the system Netdata is running. This information is everything the dashboard provides. The dashboard includes a list of the services each system runs (the legends of the charts under the `Systemd Services` section), the applications running (the legends of the charts under the `Applications` section), the disks of the system and their names, the user accounts of the system that are running processes (the `Users` and `User Groups` section of the dashboard), the network interfaces and their names (not the IPs) and detailed information about the performance of the system and its applications.
+
+This information is not sensitive (meaning that it is not your business data), but **it is important for possible attackers**. It will give them clues on what to check, what to try and in the case of DDoS against your applications, they will know if they are doing it right or not.
+
+Also, viewers could use Netdata itself to stress your servers. Although the Netdata daemon runs unprivileged, with the minimum process priority (scheduling priority `idle` - lower than nice 19) and adjusts its OutOfMemory (OOM) score to 1000 (so that it will be first to be killed by the kernel if the system starves for memory), some pressure can be applied on your systems if someone attempts a DDoS against Netdata.
+
+### Protect Netdata from the internet
+
+Netdata is a distributed application. Most likely you will have many installations of it. Since it is distributed and you are expected to jump from server to server, there is very little usability to add authentication local on each Netdata.
+
+Until we add a distributed authentication method to Netdata, you have the following options:
+
+#### Expose Netdata only in a private LAN
+
+If your organisation has a private administration and management LAN, you can bind Netdata on this network interface on all your servers. This is done in `Netdata.conf` with these settings:
+
+```
+[web]
+ bind to = 10.1.1.1:19999 localhost:19999
+```
+
+You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata will resolve them and use all the IPs (in the above example `localhost` usually resolves to both `127.0.0.1` and `::1`).
+
+**This is the best and the suggested way to protect Netdata**. Your systems **should** have a private administration and management LAN, so that all management tasks are performed without any possibility of them being exposed on the internet.
+
+For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers.
+
+For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your macOS system). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers).
+
+---
+
+In Netdata v1.9+ there is also access list support, like this:
+
+```
+[web]
+ bind to = *
+ allow connections from = localhost 10.* 192.168.*
+```
+
+#### Fine-grained access control
+
+The access list support allows filtering of all incoming connections, by specific IP addresses, ranges
+or validated DNS lookups. Only connections that match an entry on the list will be allowed:
+
+```
+[web]
+ allow connections from = localhost 192.168.* 1.2.3.4 homeip.net
+```
+
+Connections from the IP addresses are allowed if the connection IP matches one of the patterns given.
+The alias localhost is always checked against 127.0.0.1, any other symbolic names need to resolve in
+both directions using DNS. In the above example the IP address of `homeip.net` must reverse DNS resolve
+to the incoming IP address and a DNS lookup on `homeip.net` must return the incoming IP address as
+one of the resolved addresses.
+
+More specific control of what each incoming connection can do can be specified through the access control
+list settings:
+
+```
+[web]
+ allow connections from = 160.1.*
+ allow badges from = 160.1.1.2
+ allow streaming from = 160.1.2.*
+ allow management from = control.subnet.ip
+ allow netdata.conf from = updates.subnet.ip
+ allow dashboard from = frontend.subnet.ip
+```
+
+In this example only connections from `160.1.x.x` are allowed, only the specific IP address `160.1.1.2`
+can access badges, only IP addresses in the smaller range `160.1.2.x` can stream data. The three
+hostnames shown can access specific features, this assumes that DNS is setup to resolve these names
+to IP addresses within the `160.1.x.x` range and that reverse DNS is setup for these hosts.
+
+
+#### Use an authenticating web server in proxy mode
+
+Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md), [Apache](Running-behind-apache.md), [lighttpd](Running-behind-lighttpd.md) and [Caddy](Running-behind-caddy.md).
+
+To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager):
+
+```sh
+PROXY_IP="1.2.3.4"
+iptables -t filter -I INPUT -p tcp --dport 19999 \! -s ${PROXY_IP} -m conntrack --ctstate NEW -j DROP
+```
+
+*commands to allow direct access to Netdata from a web server proxy*
+
+The above will prevent anyone except your web server to access a Netdata dashboard running on the host.
+
+For Netdata v1.9+ you can also use `netdata.conf`:
+
+```
+[web]
+ allow connections from = localhost 1.2.3.4
+```
+
+Of course you can add more IPs.
+
+For Netdata prior to v1.9, if you want to allow multiple IPs, use this:
+
+```sh
+# space separated list of IPs to allow access Netdata
+NETDATA_ALLOWED="1.2.3.4 5.6.7.8 9.10.11.12"
+NETDATA_PORT=19999
+
+# create a new filtering chain || or empty an existing one named netdata
+iptables -t filter -N netdata 2>/dev/null || iptables -t filter -F netdata
+for x in ${NETDATA_ALLOWED}
+do
+ # allow this IP
+ iptables -t filter -A netdata -s ${x} -j ACCEPT
+done
+
+# drop all other IPs
+iptables -t filter -A netdata -j DROP
+
+# delete the input chain hook (if it exists)
+iptables -t filter -D INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata 2>/dev/null
+
+# add the input chain hook (again)
+# to send all new Netdata connections to our filtering chain
+iptables -t filter -I INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata
+```
+
+_script to allow access to Netdata only from a number of hosts_
+
+You can run the above any number of times. Each time it runs it refreshes the list of allowed hosts.
+
+#### Other methods
+
+Of course, there are many more methods you could use to protect Netdata:
+
+- bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard.
+
+- If you are always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, from all your static IPs.
+
+- install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a parent
+ Netdata server, which will be protected with authentication using an nginx server running locally at the parent
+ Netdata server. This requires more resources (you will need a bigger parent Netdata server), but does not require
+ any firewall changes, since all the child Netdata servers will not be listening for incoming connections.
+
+## Anonymous Statistics
+
+### Registry or how to not send any information to a third party server
+
+The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](/registry/README.md) ). Please be aware that if you use that public registry, you submit the following information to a third party server:
+
+- The url where you open the web-ui in the browser (via http request referrer)
+- The hostnames of the Netdata servers
+
+If sending this information to the central Netdata registry violates your security policies, you can configure Netdat to [run your own registry](/registry/README.md#run-your-own-registry).
+
+### Opt-out of anonymous statistics
+
+Starting with v1.12, Netdata collects anonymous usage information by default and sends it to Google Analytics. Read
+about the information collected, and learn how to-opt, on our [anonymous statistics](anonymous-statistics.md) page.
+
+The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for
+_actively_ contributing to Netdata's future.
+
+## Netdata directories
+
+| path|owner|permissions|Netdata|comments|
+|:---|:----|:----------|:------|:-------|
+| `/etc/netdata`|user `root`<br/>group `netdata`|dirs `0755`<br/>files `0640`|reads|**Netdata config files**<br/>may contain sensitive information, so group `netdata` is allowed to read them.|
+| `/usr/libexec/netdata`|user `root`<br/>group `root`|executable by anyone<br/>dirs `0755`<br/>files `0644` or `0755`|executes|**Netdata plugins**<br/>permissions depend on the file - not all of them should have the executable flag.<br/>there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`.|
+| `/usr/share/netdata`|user `root`<br/>group `netdata`|readable by anyone<br/>dirs `0755`<br/>files `0644`|reads and sends over the network|**Netdata web static files**<br/>these files are sent over the network to anyone that has access to the Netdata web server. Netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. Netdata also refuses to serve URLs with `..` in their name.|
+| `/var/cache/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata ephemeral database files**<br/>Netdata stores its ephemeral real-time database here.|
+| `/var/lib/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata permanent database files**<br/>Netdata stores here the registry data, health alarm log db, etc.|
+| `/var/log/netdata`|user `netdata`<br/>group `root`|dirs `0755`<br/>files `0644`|writes, creates|**Netdata log files**<br/>all the Netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated.|
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-security&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/overview/netdata-monitoring-stack.md b/docs/overview/netdata-monitoring-stack.md
new file mode 100644
index 0000000..1504d5f
--- /dev/null
+++ b/docs/overview/netdata-monitoring-stack.md
@@ -0,0 +1,62 @@
+<!--
+title: "Use Netdata standalone or as part of your monitoring stack"
+description: "Netdata can run independently or as part of a larger monitoring stack thanks to its flexibility, interoperable core, and exporting features."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/overview/netdata-monitoring-stack.md
+-->
+
+# Use Netdata standalone or as part of your monitoring stack
+
+Netdata is an extremely powerful monitoring, visualization, and troubleshooting platform. While you can use it as an
+effective standalone tool, we also designed it to be open and interoperable with other tools you might already be using.
+
+Netdata helps you collect everything and scales to infrastructure of any size, but it doesn't lock-in data or force you
+to use specific tools or methodologies. Each feature is extensible and interoperable so they can work in parallel with
+other tools. For example, you can use Netdata to collect metrics, visualize metrics with a second open-source program,
+and centralize your metrics in a cloud-based time-series database solution for long-term storage or further analysis.
+
+You can build a new monitoring stack, including Netdata, or integrate Netdata's metrics with your existing monitoring
+stack. No matter which route you take, Netdata helps you monitor infrastructure of any size.
+
+Here are a few ways to enrich your existing monitoring and troubleshooting stack with Netdata:
+
+## Collect metrics from Prometheus endpoints
+
+Netdata automatically detects 600 popular endpoints and collects per-second metrics from them via the [generic
+Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus). This even
+includes support for Windows 10 via [`windows_exporter`](https://github.com/prometheus-community/windows_exporter).
+
+This collector is installed and enabled on all Agent installations by default, so you don't need to waste time
+configuring Netdata. Netdata will detect these Prometheus metrics endpoints and collect even more granular metrics than
+your existing solutions. You can now use all of Netdata's meaningfully-visualized charts to diagnose issues and
+troubleshoot anomalies.
+
+## Export metrics to external time-series databases
+
+Netdata can send its per-second metrics to external time-series databases, such as InfluxDB, Prometheus, Graphite,
+TimescaleDB, ElasticSearch, AWS Kinesis Data Streams, Google Cloud Pub/Sub Service, and many others.
+
+To [export metrics to external time-series databases](/docs/export/external-databases.md), you configure an [exporting
+_connector_](/docs/export/enable-connector.md). These connectors support filtering and resampling for granular control
+over which metrics you export, and at what volume. You can export resampled metrics as collected, as averages, or the
+sum of interpolated values based on your needs and other monitoring tools.
+
+Once you have Netdata's metrics in a secondary time-series database, you can use them however you'd like, such as
+additional visualization/dashboarding tools or aggregation of data from multiple sources.
+
+## Visualize metrics with Grafana
+
+One popular monitoring stack is Netdata, Graphite, and Grafana. Netdata acts as the stack's metrics collection
+powerhouse, Graphite the time-series database, and Grafana the visualization platform. With Netdata at the core, you can
+be confident that your monitoring stack is powered by all possible metrics, from all possible sources, from every node
+in your infrastructure.
+
+Of course, just because you export or visualize metrics elsewhere, it doesn't mean Netdata's equivalent features
+disappear. You can always build new dashboards in Netdata Cloud, drill down into per-second metrics using Netdata's
+charts, or use Netdata's health watchdog to send notifications whenever an anomaly strikes.
+
+## What's next?
+
+Whether you're using Netdata standalone or as part of a larger monitoring stack, the next step is the same: [**Get
+Netdata**](/docs/get/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Foverview%2Fnetdata-monitoring-stacka&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/overview/what-is-netdata.md b/docs/overview/what-is-netdata.md
new file mode 100644
index 0000000..8ee0db4
--- /dev/null
+++ b/docs/overview/what-is-netdata.md
@@ -0,0 +1,76 @@
+<!--
+title: "What is Netdata?"
+description: "Netdata is distributed, real-time performance and health monitoring for systems and applications on a single node or an entire infrastructure."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/overview/what-is-netdata.md
+-->
+
+# What is Netdata?
+
+Netdata helps sysadmins, SREs, DevOps engineers, and IT professionals collect all possible metrics from systems and
+applications, visualize these metrics in real-time, and troubleshoot complex performance problems.
+
+Netdata's solution uses two components, the Netdata Agent and Netdata Cloud, to deliver real-time performance and health
+monitoring for both single nodes and entire infrastructure.
+
+## Netdata Agent
+
+Netdata's distributed monitoring Agent collects thousands of metrics from systems, hardware, and applications with zero
+configuration. It runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT
+devices.
+
+You can [install](/docs/get/README.md#install-the-netdata-agent) Netdata on most Linux distributions (Ubuntu, Debian,
+CentOS, and more), container/microservice platforms (Kubernetes clusters, Docker), and many other operating systems
+(FreeBSD, macOS), with no `sudo` required.
+
+![The Netdata
+Agent](https://user-images.githubusercontent.com/1153921/94492596-72a86b00-019f-11eb-91ab-224e6ac9ea21.png)
+
+## Netdata Cloud
+
+Netdata Cloud is a web application that gives you real-time visibility for your entire infrastructure. With Netdata
+Cloud, you can view key metrics, insightful charts, and active alarms from all your nodes in a single web interface.
+When an anomaly strikes, seamlessly navigate to any node to troubleshoot and discover the root cause with the familiar
+Netdata dashboard.
+
+**[Netdata Cloud is
+free](https://learn.netdata.cloud/docs/cloud/faq-glossary#how-much-does-netdata-cost-how-and-why-is-it-free)**! You can
+add an entire infrastructure of nodes, invite all your colleagues, and visualize any number of metrics, charts, and
+alarms entirely for free.
+
+While Netdata Cloud offers a centralized method of monitoring your Agents, your metrics data is not stored or
+centralized in any way. Metrics data remains with your nodes and is only streamed to your browser, through Cloud, when
+you're viewing the Netdata Cloud interface.
+
+![Netdata Cloud](https://user-images.githubusercontent.com/1153921/94492597-73410180-019f-11eb-9a9e-032420baa489.png)
+
+## What you can do with Netdata
+
+Netdata is designed to be both simple to use and flexible for every monitoring, visualization, and troubleshooting use
+case:
+
+- **Collect**: Netdata collects all available metrics from your system and applications with 300+ collectors,
+ Kubernetes service discovery, and in-depth container monitoring, all while using only 1% CPU and a few MB of RAM. It
+ even collects metrics from Windows machines.
+- **Visualize**: The dashboard meaningfully presents charts to help you understand the relationships between your
+ hardware, operating system, running apps/services, and the rest of your infrastructure. Add nodes to Netdata Cloud
+ for a complete view of your infrastructure from a single pane of glass.
+- **Monitor**: Netdata's health watchdog uses hundreds of preconfigured alarms to notify you via Slack, email,
+ PagerDuty and more when an anomaly strikes. Customize with dynamic thresholds, hysteresis, alarm templates, and
+ role-based notifications.
+- **Troubleshoot**: 1s granularity helps you detect analyze anomalies other monitoring platforms might have missed.
+ Interactive visualizations reduce your reliance on the console, and historical metrics help you trace issues back to
+ their root cause.
+- **Store**: Netdata's efficient database engine efficiently stores per-second metrics for days, weeks, or even
+ months. Every distributed node stores metrics locally, simplifying deployment, slashing costs, and enriching
+ Netdata's interactive dashboards.
+- **Export**: Integrate per-second metrics with other time-series databases like Graphite, Prometheus, InfluxDB,
+ TimescaleDB, and more with Netdata's interoperable and extensible core.
+- **Stream**: Aggregate metrics from any number of distributed nodes in one place for in-depth analysis, including
+ ephemeral nodes in a Kubernetes cluster.
+
+## What's next?
+
+Learn more about [why you should use Netdata](/docs/overview/why-netdata.md), or [how Netdata works with your existing
+monitoring stack](/docs/overview/netdata-monitoring-stack.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Foverview%2Fwhat-is-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/overview/why-netdata.md b/docs/overview/why-netdata.md
new file mode 100644
index 0000000..27a30a4
--- /dev/null
+++ b/docs/overview/why-netdata.md
@@ -0,0 +1,63 @@
+<!--
+title: "Why use Netdata?"
+description: "Netdata is simple to deploy, scalable, and optimized for troubleshooting. Cut the complexity and expense out of your monitoring stack."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/overview/why-netdata.md
+-->
+
+# Why use Netdata?
+
+Netdata takes a different approach to helping people build extraordinary infrastructure. It was built out of frustration
+with existing monitoring tools that are too complex, too expensive, and don't help their users actually troubleshoot
+complex performance and health issues.
+
+Netdata is:
+
+## Simple to deploy
+
+- **One-line deployment** for Linux distributions, plus support for Kubernetes/Docker infrastructures.
+- **Zero configuration and maintenance** required to collect thousands of metrics, every second, from the underlying
+ OS and running applications.
+- **Prebuilt charts and alarms** alert you to common anomalies and performance issues without manual configuration.
+- **Distributed storage** to simplify the cost and complexity of storing metrics data from any number of nodes.
+
+## Powerful and scalable
+
+- **1% CPU utilization, a few MB of RAM, and minimal disk I/O** to run the monitoring Agent on bare metal, virtual
+ machines, containers, and even IoT devices.
+- **Per-second granularity** for an unlimited number of metrics based on the hardware and applications you're running
+ on your nodes.
+- **Interoperable exporters** let you connect Netdata's per-second metrics with an existing monitoring stack and other
+ time-series databases.
+
+## Optimized for troubleshooting
+
+- **Visual anomaly detection** with a UI/UX that emphasizes the relationships between charts.
+- **Customizable dashboards** to pinpoint correlated metrics, respond to incidents, and help you streamline your
+ workflows.
+- **Distributed metrics in a centralized interface** to assist users or teams trace complex issues between distributed
+ nodes.
+
+## Comparison with other monitoring solutions
+
+Netdata offers many benefits over the existing monitoring landscape, whether they're expensive SaaS products or other
+open-source tools.
+
+| Netdata | Others (open-source and commercial) |
+| :-------------------------------------------------------------- | :--------------------------------------------------------------- |
+| **High resolution metrics** (1s granularity) | Low resolution metrics (10s granularity at best) |
+| Collects **thousands of metrics per node** | Collects just a few metrics |
+| Fast UI optimized for **anomaly detection** | UI is good for just an abstract view |
+| **Long-term, autonomous storage** at one-second granularity | Centralized metrics in an expensive data lake at 10s granularity |
+| **Meaningful presentation**, to help you understand the metrics | You have to know the metrics before you start |
+| Install and get results **immediately** | Long sales process and complex installation process |
+| Use it for **troubleshooting** performance problems | Only gathers _statistics of past performance_ |
+| **Kills the console** for tracing performance issues | The console is always required for troubleshooting |
+| Requires **zero dedicated resources** | Require large dedicated resources |
+
+## What's next?
+
+Whether you already have a monitoring stack you want to integrate Netdata into, or are building something from the
+ground-up, you should read more on how Netdata can work either [standalone or as an interoperable part of a monitoring
+stack](/docs/overview/netdata-monitoring-stack.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Foverview%2Fwhy-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/privacy-policy.md b/docs/privacy-policy.md
new file mode 100644
index 0000000..0152b0e
--- /dev/null
+++ b/docs/privacy-policy.md
@@ -0,0 +1,134 @@
+<!--
+title: "Privacy Policy"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/privacy-policy.md
+-->
+
+# Privacy Policy
+
+## 1. Preamble
+
+This Privacy Policy explains the collection, use, processing, transferring and disclosure of personal information by Netdata, Inc (“ND” or “Netdata”), a Delaware Corporation.
+
+This Privacy Policy is incorporated into and made part of the Netdata Master Terms of Use (“Master Terms”) located [here](terms-of-use.md).
+
+Unless otherwise noted on a particular website or service hosted by Netdata, this Privacy Policy applies to your use of all websites that Netdata operates. These include <https://my-netdata.io> and <https://netdata.cloud>, together with all other subdomains thereof, (collectively, the “Websites”). This Privacy Policy also applies to all products, information, and services provided through the Websites, including without limitation the ND agent, the ND registry, the ND hub and the ND cloud website (together with the Websites, the “Services”).
+
+In addition, supplemental Privacy Policy terms (“Supplemental Privacy Policy Terms”) may apply to a particular Service. All such Supplemental Privacy Policy Terms will be accessible for you to read either within, or through your use of, that particular Service.
+
+By accessing or using any of the Services, you are accepting and agreeing to the practices described in this Privacy Policy.
+
+## 2. Our Principles
+
+Netdata has designed this policy to be consistent with the following principles:
+
+Privacy policies should be human readable and easy to find.
+Data collection, storage, and processing should be simplified as much as possible to enhance security, ensure consistency, and make the practices easy for users to understand.
+Data practices should always meet the reasonable expectations of users.
+
+## 3. Personal Information ND Collects and How it is Used
+
+As used in this policy, “personal information” means information that would allow someone to identify you, including your name, email address, IP address, or other information from which someone could deduce your identity.
+
+ND collects and uses personal information in the following ways:
+
+Website and Analytics: When you visit our Websites and use our Services, ND collects some information about your activities through tools such as Google Analytics. The type of information that we collect focuses on general information such as country or city where you are located, pages visited, time spent on pages, heat-map of visitors’ activity on the site, information about the browser you are using, etc. ND collects and uses this information pursuant to our legitimate interest in enhancing the security and utility of our Services. The information we gather and process is used in the aggregate to spot trends without deliberately identifying individuals.
+
+Note that you can learn about Google’s practices in connection with its analytics services and how to opt out of it by downloading the Google Analytics opt-out browser add-on, available at <https://tools.google.com/dlpage/gaoptout>.
+
+Information from Cookies: We and our service providers (for example, Google Analytics as described above) may collect information using cookies or similar technologies for the purposes described above and below. Cookies are pieces of information that are stored by your browser on the hard drive or memory of your computer or other Internet access device. Cookies may enable us to personalize your experience on the Services, maintain a persistent session, passively collect demographic information about your computer, and monitor advertisements and other activities. The Websites may use different kinds of cookies and other types of local storage (such as browser-based or plugin-based local storage).
+
+ND Registry: The global registry, together with certain browser features, allow Netdata to provide unified cross-server dashboards, via the node menu.
+The menu lists the Netdata servers you have visited. For example, when you jump from server to server using the node menu, several session settings
+(like the currently viewed charts, the current zoom and pan operations on the charts, etc.) are propagated to the new server, so that the new dashboard will come with exactly the
+same view. The global registry keeps track of 4 entities:
+
+1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we call this **machine_guid**)
+
+ For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed.
+
+2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**)
+
+ For each person, the registry keeps track of the Netdata installations it has accessed and their URLs.
+
+3. **URLs** of Netdata installations (as seen by the web browsers)
+
+ For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
+
+4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, an email and a profile picture.
+
+For _persons/accounts_ and _machines_, the registry keeps links to _URLs_, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen).
+_machines_, _persons_, and timestamps are stored in the Netdata registry regardless of whether you sign in or not.
+
+If sending this information is against your policies, you can [run your own registry](/registry/README.md#run-your-own-registry).
+Note that ND versions with the 'Sign in' feature of the ND Cloud do not use the global registry.
+
+ND Cloud: When you sign up to obtain a user account via the 'Sign in' link on the ND agent user interface, ND is granted access to personal information in the user profile of the authentication provider you choose (e.g. GitHub or Google). ND collects and uses this personal information pursuant to its legitimate interest in establishing and maintaining your account providing you with the features we provide Registered Users. We may use your email address to contact you regarding changes to this policy or other applicable policies. The login name or email address of your profile may be used to attribute you in connection with any content you submit to any Service.
+
+Anonymous Usage Statistics: From Netdata v1.12 and above, anonymous usage information is collected by default on certain events of the ND daemon and send to Google Analytics. Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata collects system information and sends it to GA via an http call. The information collected for all events is:
+
+- Netdata version
+- OS name, version, id, id_like
+- Kernel name, version, architecture
+- Virtualization technology
+- Containerization technology
+ Furthermore, the FATAL event sends the Netdata process & thread info, along with the file, function and line of the fatal error.
+
+The statistics calculated from this information are used for:
+
+1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environment.
+
+2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extend our development decisions influence the community.
+
+To opt-out from sending anonymous statistics, you can create a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`).
+
+Emails and Newsletters: When you sign up to receive updates from Netdata or otherwise subscribe to one of our mailing lists, you will be asked to provide some personal information. ND collects and uses this personal information pursuant to its legitimate interest in providing news and updates to, and collaborating with, its supporters and volunteers.
+
+Email Analytics: When you receive communications from ND after signing up for the ND newsletter, campaign updates, or other ongoing email communications from ND, we may use analytics to track whether you open the mail, click on the links, and otherwise interact with what we send. You may opt out of this tracking by choosing to get plain-text emails from ND. ND collects and uses this personal information pursuant to its legitimate interest in understanding the interests of its community of supporters and volunteers in order to provide more relevant news and updates.
+
+Other Voluntarily Provided Information: When you provide feedback to Netdata, sign a petition distributed by ND, or otherwise submit personal information to Netdata, ND collects and uses this personal information pursuant to its legitimate interest in better understanding our community of supporters and volunteers and in furtherance of the particular program or activity to which you provided feedback or other input.
+
+## 4. Retention of Personal Information
+
+The majority of the personal information collected and used as explained in Section 3 above is aggregated and stored in a central database provided by a third party service provider. ND aggregates this data pursuant to its legitimate interest in having information stored in a single location to minimize complexity, increase consistency in internal practices, better understand its community of supporters and volunteers, and enhance the security of the data.
+
+## 5. Access to Your Personal Information
+
+You are generally entitled to access personal information that Netdata holds and to have inaccurate data corrected or removed to the extent ND still maintains it. In certain circumstances, you also may have the right to object for legitimate reasons to the processing or transfer of personal information. If you wish to exercise any of these rights, please write to legal@netdata.cloud explaining your request.
+
+## 6. Disclosure of Your Personal Information
+
+ND does not disclose personal information to third parties except as specified elsewhere in this policy and in the following instances:
+
+We may disclose your personal information to third parties in a good faith belief that such disclosure is reasonably necessary to (a) take action regarding suspected illegal activities; (b) enforce or apply our Master Terms and this Privacy Policy; (c) enforce our Charter, including the Code of Conduct and policies contained and incorporated therein, or (d) comply with legal process, such as a search warrant, subpoena, statute, or court order.
+
+## 7. Security of Your Personal Information
+
+Netdata has implemented reasonable physical, technical, and organizational security measures for personal information that Netdata processes against accidental or unlawful destruction, or accidental loss, alteration, unauthorized disclosure or access, in compliance with applicable law. However, no website can fully eliminate security risks. If any data breach occurs, we will post a reasonably prominent notice to the Websites and comply with all other applicable data privacy requirements including, when required, personal notice to you if you have provided and we have maintained an email address for you.
+
+The ND Cloud Services have security risks in addition to those described above. Among other things, they are vulnerable to DNS attacks, and using any ND Cloud Service may increase the risk of phishing.
+
+## 8. Children
+
+The Services are not directed at children under the age of 13. Consistent with the U.S. federal Children’s Online Privacy Protection Act of 1998 (COPPA), we will never knowingly request personal information from anyone under the age of 13 without requiring parental consent. Our Master Terms specifically prohibit anyone using our Services from submitting any personally identifiable information about persons under 13 years of age. Any person who provides their personal information to ND through the Services represents that they are 13 years of age or older.
+
+## 9. Third Party Service Providers
+
+Netdata uses many third party service providers in connection with the Services, including website hosting services, database management, credit card processing, and many more. Some of these service providers may place session cookies on your computer, and they may collect and store your personal information on our behalf in accordance with the data practices and purposes explained above in Section 3.
+
+## 10. Third Party Sites
+
+The Services may provide links to a wide variety of third party websites. You should consult the respective privacy policies of these third-party websites. This Privacy Policy does not apply to, and we cannot control the activities of, such other websites.
+
+## 11. Transferring Data to Other Countries
+
+If you are accessing or using the Services in regions with laws governing data collection, processing, transfer and use, please note that when we use and share your data as specified in this policy, we may transfer your information to recipients in countries other than the country in which the information was originally collected. Those countries may not have the same data protection laws as the country in which you initially provided the information.
+
+Data transferred from the European Union to the United States or outside the European Union will be made on the grounds of a certification to the E.U./U.S. Privacy Shield regime and/or a data transfer agreement based on the Standard Contractual Clauses approved of by the European Commission respectively, consistent with applicable data privacy requirements.
+
+## 12. Changes to this Privacy Policy
+
+We may occasionally update this Privacy Policy. When we do, we will provide you with notice of such update through (at a minimum) a reasonably prominent notice on the Websites and Services, and will revise the Effective Date below. We encourage you to periodically review this Privacy Policy to stay informed about how we are protecting, using, processing and transferring the personal information we collect.
+
+Effective Date: 8 January 2019.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fprivacy-policy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/quickstart/infrastructure.md b/docs/quickstart/infrastructure.md
new file mode 100644
index 0000000..0e355f3
--- /dev/null
+++ b/docs/quickstart/infrastructure.md
@@ -0,0 +1,184 @@
+<!--
+title: "Infrastructure monitoring with Netdata"
+sidebar_label: "Infrastructure monitoring"
+description: "Build a robust, infinitely scalable infrastructure monitoring solution with Netdata. Any number of nodes and every available metric."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/quickstart/infrastructure.md
+-->
+
+# Infrastructure monitoring with Netdata
+
+[Netdata Cloud](https://app.netdata.cloud) provides scalable infrastructure monitoring for any number of distributed
+nodes running the Netdata Agent. A node is any system in your infrastructure that you want to monitor, whether it's a
+physical or virtual machine (VM), container, cloud deployment, or edge/IoT device.
+
+The Netdata Agent uses zero-configuration collectors to gather metrics from every application and container instantly,
+and uses Netdata's [distributed data architecture](/docs/store/distributed-data-architecture.md) to store metrics
+locally. Without a slow and troublesome centralized data lake for your infrastructure's metrics, you reduce the
+resources you need to invest in, and the complexity of, monitoring your infrastructure.
+
+Netdata Cloud unifies infrastructure monitoring by _centralizing the interface_ you use to query and visualize your
+nodes' metrics, not the data. By streaming metrics values to your browser, with Netdata Cloud acting as the secure proxy
+between them, you can monitor your infrastructure using customizable, interactive, and real-time visualizations from any
+numbe of distributed nodes.
+
+In this quickstart guide, you'll learn the basics of using Netdata Cloud to monitor an infrastructure with dashboards,
+composite charts, and alarm viewing. You'll then learn about the most critical ways to configure the Agent on each of
+your nodes to maximize the value you get from Netdata.
+
+This quickstart assumes you've installed the Netdata Agent on more than one node in your infrastructure, and claimed
+those nodes to your Space in Netdata Cloud. If you haven't yet, see the [_Get Netdata_ doc](/docs/get/README.md) for
+details on signing up for Netdata Cloud, installation, and claiming.
+
+> If you want to monitor a Kubernetes cluster with Netdata, see our [k8s installation
+> doc](/packaging/installer/methods/kubernetes.md) for setup details, and then read our guide, [_Monitor a Kubernetes
+> cluster with Netdata_](/docs/guides/monitor/kubernetes-k8s-netdata.md).
+
+## Set up your Netdata Cloud experience
+
+Start your infrastructure monitoring experience by setting up your Netdata Cloud account.
+
+### Organize Spaces and War Rooms
+
+Spaces are high-level containers to help you organize your team members and the nodes they can view in each War Room.
+You already have at least one Space in your Netdata Cloud account.
+
+A single Space puts all your metrics in one easily-accessible place, while multiple Spaces creates logical division
+between different users and different pieces of a large infrastructure. For example, a large organization might have one
+SRE team for the user-facing SaaS application, and a second IT team for managing employees' hardware. Since these teams
+don't monitor the same nodes, they can work in separate Spaces and then further organize their nodes into War Rooms.
+
+Next, set up War Rooms. Netdata Cloud creates dashboards and visualizations based on the nodes added to a given War
+Room. You can [organize War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms#war-room-organization) in any way
+you want, such as by the application type, for end-to-end application monitoring, or as an incident response tool.
+
+Learn more about [Spaces](https://learn.netdata.cloud/docs/cloud/spaces) and [War
+Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms), including how to manage each, in their respective reference
+documentation.
+
+### Invite your team
+
+Netdata Cloud makes an infrastructure's real-time metrics available and actionable to all organization members. By
+inviting others, you can better synchronize with your team or colleagues to understand your infrastructure's heartbeat.
+When something goes wrong, you'll be ready to collaboratively troubleshoot complex performance problems from a single
+pane of glass.
+
+To invite new users, click on **Invite Users** in the left-hand navigation panel beneath your Space's name. Choose which
+War Rooms to add this user to, then click **Send**.
+
+If your team members have trouble signing in, direct them to the [Netdata Cloud sign
+in](https://learn.netdata.cloud/docs/cloud/manage/sign-in) doc.
+
+### See an overview of your infrastructure
+
+The default way to visualize the health and performance of an infrastructure with Netdata Cloud is the
+[**Overview**](/docs/visualize/overview-infrastructure.md), which is the default interface of every War Room. The
+Overview features composite charts, which display aggregated metrics from every node in a given War Room. These metrics
+are streamed on-demand from individual nodes and composited onto a single, familiar dashboard.
+
+![The War Room
+Overview](https://user-images.githubusercontent.com/1153921/102651377-b1f4b100-4129-11eb-8e60-d2995d258c16.png)
+
+Read more about the Overview in the [infrastructure overview](/docs/visualize/overview-infrastructure.md) doc.
+
+Netdata Cloud also features the [**Nodes view**](https://learn.netdata.cloud/docs/cloud/visualize/nodes), which you can
+use to configure and see a few key metrics from every node in the War Room, view health status, and more.
+
+### Drill down to specific nodes
+
+Both the Overview and Nodes view offer easy access to **single-node dashboards** for targeted analysis. You can use
+single-node dashboards in Netdata Cloud to drill down on specific issues, scrub backward in time to investigate
+historical data, and see like metrics presented meaningfully to help you troubleshoot performance problems.
+
+Read about the process in the [infrastructure
+overview](/docs/visualize/overview-infrastructure.md#single-node-dashboards) doc, then learn about [interacting with
+dashboards and charts](/docs/visualize/interact-dashboards-charts.md) to get the most from all of Netdata's real-time
+metrics.
+
+### Create new dashboards
+
+You can use Netdata Cloud to create new dashboards that match your infrastructure's topology or help you diagnose
+complex issues by aggregating correlated charts from any number of nodes. For example, you could monitor the system CPU
+from every node in your infrastructure on a single dashboard.
+
+![An example system CPU
+dashboard](https://user-images.githubusercontent.com/1153921/95915568-2db63400-0d5c-11eb-92cc-3c61cb6519dd.png)
+
+Read more about [creating new dashboards](/docs/visualize/create-dashboards.md) for more details about the process and
+additional tips on best leveraging the feature to help you troubleshoot complex performance problems.
+
+## Set up your nodes
+
+You get the most value out of Netdata Cloud's infrastructure monitoring capabilities if each node collects every
+possible metric. For example, if a node in your infrastructure is responsible for serving a MySQL database, you should
+ensure that the Netdata Agent on that node is properly collecting and streaming all MySQL-related metrics.
+
+In most cases, collectors autodetect their data source and require no configuration, but you may need to configure
+certain behaviors based on your infrastructure. Or, you may want to enable/configure advanced functionality, such as
+longer metrics retention or streaming.
+
+### Configure the Netdata Agent on your nodes
+
+You can configure any node in your infrastructure if you need to, although most users will find the default settings
+work extremely well for monitoring their infrastructures.
+
+Each node has a configuration file called `netdata.conf`, which is typically at `/etc/netdata/netdata.conf`. The best
+way to edit this file is using the `edit-config` script, which ensures updates to the Netdata Agent do not overwrite
+your changes. For example:
+
+```bash
+cd /etc/netdata
+sudo ./edit-config netdata.conf
+```
+
+Our [configuration basics doc](/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
+along with simple examples to get you familiar with editing your node's configuration.
+
+After you've learned the basics, you should [secure your infrastructure's nodes](/docs/configure/secure-nodes.md) using
+one of our recommended methods. These security best practices ensure no untrusted parties gain access to the metrics
+collected on any of your nodes.
+
+### Collect metrics from systems and applications
+
+Netdata has [300+ pre-installed collectors](/collectors/COLLECTORS.md) that gather thousands of metrics with zero
+configuration. Collectors search each of your nodes in default locations and ports to find running applications and
+gather as many metrics as they can without you having to configure them individually.
+
+Most collectors work without configuration, but you should read up on [how collectors
+work](/docs/collect/how-collectors-work.md) and [how to enable/configure](/docs/collect/enable-configure.md) them so
+that you can see metrics from those applications in Netdata Cloud.
+
+In addition, find detailed information about which [system](/docs/collect/system-metrics.md),
+[container](/docs/collect/container-metrics.md), and [application](/docs/collect/application-metrics.md) metrics you can
+collect from across your infrastructure with Netdata.
+
+## What's next?
+
+Netdata has many features that help you monitor the health of your nodes and troubleshoot complex performance problems.
+Once you have a handle on configuration and are collecting all the right metrics, try out some of Netdata's other
+infrastructure-focused features:
+
+- [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud's composite
+ charts and real-time visualizations.
+- [Create new dashboards](/docs/visualize/create-dashboards.md) from any number of nodes and metrics in Netdata Cloud.
+
+To change how the Netdata Agent runs on each node, dig in to configuration files:
+
+- [Change how long nodes in your infrastructure retain metrics](/docs/store/change-metrics-storage.md) based on how
+ many metrics each node collects, your preferred retention period, and the resources you want to dedicate toward
+ long-term metrics retention.
+- [Create new alarms](/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
+ of anomalies.
+- [Enable notifications](/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
+- [Export metrics](/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
+ other monitoring and troubleshooting tools.
+
+### Related reference documentation
+
+- [Netdata Cloud · Spaces](https://learn.netdata.cloud/docs/cloud/spaces)
+- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
+- [Netdata Cloud · Invite your team](https://learn.netdata.cloud/docs/cloud/manage/invite-your-team)
+- [Netdata Cloud · Sign in or sign up with email, Google, or
+ GitHub](https://learn.netdata.cloud/docs/cloud/manage/sign-in)
+- [Netdata Cloud · Nodes view](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fquickstart%2Finfrastructure&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/quickstart/single-node.md b/docs/quickstart/single-node.md
new file mode 100644
index 0000000..77656af
--- /dev/null
+++ b/docs/quickstart/single-node.md
@@ -0,0 +1,96 @@
+<!--
+title: "Single-node monitoring with Netdata"
+sidebar_label: "Single-node monitoring"
+description: "Learn dashboard basics, configuring your nodes, and collecting metrics from applications to create a powerful single-node monitoring tool."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/quickstart/single-node.md
+-->
+
+# Single-node monitoring with Netdata
+
+Because it's free, open-source, and requires only 1% CPU utilization to collect thousands of metrics every second,
+Netdata is a superb single-node monitoring tool.
+
+In this quickstart guide, you'll learn how to access your single node's metrics through dashboards, configure your node
+to your liking, and make sure the Netdata Agent is collecting metrics from the applications or containers you're running
+on your node.
+
+> This quickstart assumes you have installed the Netdata Agent on your node. If you haven't yet, see the [_Get Netdata_
+> doc](/docs/get/README.md) for details on installation. In addition, this quickstart mentions features available only
+> through Netdata Cloud, which requires you to [claim your node](/docs/get/README.md#claim-your-node-on-netdata-cloud).
+
+## See your node's metrics
+
+To see your node's real-time metrics, you need to access its dashboard. You can either view the local dashboard, which
+runs on the node itself, or see the dashboard through Netdata Cloud. Both methods feature real-time, interactive, and
+synchronized charts, with the same metrics, and use the same UI.
+
+The primary difference is that Netdata Cloud also has a few extra features, like creating new dashboards using a
+drag-and-drop editor, that enhance your monitoring and troubleshooting experience.
+
+To see your node's local dashboard, open up your web browser of choice and navigate to `http://NODE:19999`, replacing
+`NODE` with the IP address or hostname of your Agent. Hit `Enter`.
+
+![Animated GIF of navigating to the
+dashboard](https://user-images.githubusercontent.com/1153921/80825153-abaec600-8b94-11ea-8b17-1b770a2abaa9.gif)
+
+To see a node's dashboard in Netdata Cloud, [sign in](https://app.netdata.cloud). From the **Nodes** view in your
+**General** War Room, click on the hostname of your node to access its dashboard through Netdata Cloud.
+
+![Screenshot of an embedded node
+dashboard](https://user-images.githubusercontent.com/1153921/87457036-9b678e00-c5bc-11ea-977d-ad561a73beef.png)
+
+Once you've decided which dashboard you prefer, learn about [interacting with dashboards and
+charts](/docs/visualize/interact-dashboards-charts.md) to get the most from Netdata's real-time metrics.
+
+## Configure your node
+
+The Netdata Agent is highly configurable so that you can match its behavior to your node. You will find most
+configuration options in the `netdata.conf` file, which is typically at `/etc/netdata/netdata.conf`. The best way to
+edit this file is using the `edit-config` script, which ensures updates to the Netdata Agent do not overwrite your
+changes. For example:
+
+```bash
+cd /etc/netdata
+sudo ./edit-config netdata.conf
+```
+
+Our [configuration basics doc](/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
+along with simple examples to get you familiar with editing your node's configuration.
+
+After you've learned the basics, you should [secure your node](/docs/configure/secure-nodes.md) using one of our
+recommended methods. These security best practices ensure no untrusted parties gain access to your dashboard or its
+metrics.
+
+## Collect metrics from your system and applications
+
+Netdata has [300+ pre-installed collectors](/collectors/COLLECTORS.md) that gather thousands of metrics with zero
+configuration. Collectors search your node in default locations and ports to find running applications and gather as
+many metrics as possible without you having to configure them individually.
+
+These metrics enrich both the local and Netdata Cloud dashboards.
+
+Most collectors work without configuration, but you should read up on [how collectors
+work](/docs/collect/how-collectors-work.md) and [how to enable/configure](/docs/collect/enable-configure.md) them.
+
+In addition, find detailed information about which [system](/docs/collect/system-metrics.md),
+[container](/docs/collect/container-metrics.md), and [application](/docs/collect/application-metrics.md) metrics you can
+collect from across your infrastructure with Netdata.
+
+## What's next?
+
+Netdata has many features that help you monitor the health of your node and troubleshoot complex performance problems.
+Once you understand configuration, and are certain Netdata is collecting all the important metrics from your node, try
+out some of Netdata's other visualization and health monitoring features:
+
+- [Build new dashboards](/docs/visualize/create-dashboards.md) to put disparate but relevant metrics onto a single
+ interface.
+- [Create new alarms](/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
+ of anomalies.
+- [Enable notifications](/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
+- [Change how long your node stores metrics](/docs/store/change-metrics-storage.md) based on how many metrics it
+ collects, your preferred retention period, and the resources you want to dedicate toward long-term metrics
+ retention.
+- [Export metrics](/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
+ other monitoring and troubleshooting tools.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fquickstart%2Fsingle-node&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/store/change-metrics-storage.md b/docs/store/change-metrics-storage.md
new file mode 100644
index 0000000..0e2db13
--- /dev/null
+++ b/docs/store/change-metrics-storage.md
@@ -0,0 +1,72 @@
+<!--
+title: "Change how long Netdata stores metrics"
+description: "With a single configuration change, the Netdata Agent can store days, weeks, or months of metrics at its famous per-second granularity."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/store/change-metrics-storage.md
+-->
+
+# Change how long Netdata stores metrics
+
+import { Calculator } from '../../src/components/agent/dbCalc/'
+
+The [database engine](/database/engine/README.md) uses RAM to store recent metrics. When metrics reach a certain age,
+and based on how much system RAM you allocate toward storing metrics in memory, they are compressed and "spilled" to
+disk for long-term storage.
+
+The default settings retain about two day's worth of metrics on a system collecting 2,000 metrics every second, but the
+Netdata Agent is highly configurable if you want your nodes to store days, weeks, or months worth of per-second data.
+
+The Netdata Agent uses two settings in `netdata.conf` to change the behavior of the database engine:
+
+```conf
+[global]
+ page cache size = 32
+ dbengine multihost disk space = 256
+```
+
+`page cache size` sets the maximum amount of RAM (in MiB) the database engine uses to cache and index recent metrics.
+`dbengine multihost disk space` sets the maximum disk space (again, in MiB) the database engine uses to store
+historical, compressed metrics. When the size of stored metrics exceeds the allocated disk space, the database engine
+removes the oldest metrics on a rolling basis.
+
+## Calculate the system resources (RAM, disk space) needed to store metrics
+
+You can store more or less metrics using the database engine by changing the allocated disk space. Use the calculator
+below to find an appropriate value for `dbengine multihost disk space` based on how many metrics your node(s) collect,
+whether you are streaming metrics to a parent node, and more.
+
+You do not need to edit the `page cache size` setting to store more metrics using the database engine. However, if you
+want to store more metrics _specifically in memory_, you can increase the cache size.
+
+> ⚠️ This calculator provides an estimate of disk and RAM usage for **metrics storage**, along with its best
+> recommendation for the `dbengine multihost disk space` setting. Real-life usage may vary based on the accuracy of the
+> values you enter below, changes in the compression ratio, and the types of metrics stored.
+
+<Calculator />
+
+## Edit `netdata.conf` with recommended database engine settings
+
+Now that you have a recommended setting for `dbengine multihost disk space`, open `netdata.conf` with
+[`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) and look for the `dbengine
+multihost disk space` setting. Change it to the value recommended above. For example:
+
+```conf
+[global]
+ dbengine multihost disk space = 1024
+```
+
+Save the file and restart the Agent with `service netdata restart` to change the database engine's size.
+
+## What's next?
+
+For more information about the database engine, see our [database reference doc](/database/engine/README.md).
+
+Storing metrics with the database engine is completely interoperable with [exporting to other time-series
+databases](/docs/export/external-databases.md). With exporting, you can use the node's resources to surface metrics
+when [viewing dashboards](/docs/visualize/interact-dashboards-charts.md), while also archiving metrics elsewhere for
+further analysis, visualization, or correlation with other tools.
+
+If you don't want to always store metrics on the node that collects them or run ephemeral nodes without dedicated
+storage, you can use [streaming](/streaming/README.md). Streaming allows you to centralize your data, run Agents as
+headless collectors, replicate data, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fstore%2Fchange-metrics-storage&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/store/distributed-data-architecture.md b/docs/store/distributed-data-architecture.md
new file mode 100644
index 0000000..0ec5c14
--- /dev/null
+++ b/docs/store/distributed-data-architecture.md
@@ -0,0 +1,71 @@
+<!--
+title: "Distributed data architecture"
+description: "Netdata's distributed data architecture stores metrics on individual nodes for high performance and scalability using all your granular metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/store/distributed-data-architecture.md
+-->
+
+# Distributed data architecture
+
+Netdata uses a distributed data architecture to help you collect and store per-second metrics from any number of nodes.
+Every node in your infrastructure, whether it's one or a thousand, stores the metrics it collects.
+
+Netdata Cloud bridges the gap between many distributed databases by _centralizing the interface_ you use to query and
+visualize your nodes' metrics. When you [look at charts in Netdata
+Cloud](/docs/visualize/interact-dashboards-charts.md), the metrics values are queried directly from that node's database
+and securely streamed to Netdata Cloud, which proxies them to your browser.
+
+Netdata's distributed data architecture has a number of benefits:
+
+- **Performance**: Every query to a node's database takes only a few milliseconds to complete for responsiveness when
+ viewing dashboards or using features like [Metric
+ Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations).
+- **Scalability**: As your infrastructure scales, install the Netdata Agent on every new node to immediately add it to
+ your monitoring solution without adding cost or complexity.
+- **1-second granularity**: Without an expensive centralized data lake, you can store all of your nodes' per-second
+ metrics, for any period of time, while keeping costs down.
+- **No filtering or selecting of metrics**: Because Netdata's distributed data architecture allows you to store all
+ metrics, you don't have to configure which metrics you retain. Keep everything for full visibility during
+ troubleshooting and root cause analysis.
+- **Easy maintenance**: There is no centralized data lake to purchase, allocate, monitor, and update, removing
+ complexity from your monitoring infrastructure.
+
+## Does Netdata Cloud store my metrics?
+
+Netdata Cloud does not store metric values.
+
+To enable certain features, such as [viewing active alarms](/docs/monitor/view-active-alarms.md) or [filtering by
+hostname/service](https://learn.netdata.cloud/docs/cloud/war-rooms#node-filter), Netdata Cloud does store configured
+alarms, their status, and a list of active collectors.
+
+Netdata does not and never will sell your personal data or data about your deployment.
+
+## Long-term metrics storage with Netdata
+
+Any node running the Netdata Agent can store long-term metrics for any retention period, given you allocate the
+appropriate amount of RAM and disk space.
+
+Read our document on changing [how long Netdata stores metrics](/docs/store/change-metrics-storage.md) on your nodes for
+details.
+
+## Other options for your metrics data
+
+While a distributed data architecture is the default when monitoring infrastructure with Netdata, you can also configure
+its behavior based on your needs or the type of infrastructure you manage.
+
+To archive metrics to an external time-series database, such as InfluxDB, Graphite, OpenTSDB, Elasticsearch,
+TimescaleDB, and many others, see details on [integrating Netdata via exporting](/docs/export/external-databases.md).
+
+You can also stream between nodes using [streaming](/streaming/README.md), allowing to replicate databases and create
+your own centralized data lake of metrics, if you choose to do so.
+
+When you use the database engine to store your metrics, you can always perform a quick backup of a node's
+`/var/cache/netdata/dbengine/` folder using the tool of your choice.
+
+## What's next?
+
+You can configure the Netdata Agent to store days, weeks, or months worth of distributed, per-second data by
+[configuring the database engine](/docs/store/change-metrics-storage.md). Use our calculator to determine the system
+resources required to retain your desired amount of metrics, and expand or contract the database by editing a single
+setting.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fstore%2Fdistributed-data&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/terms-of-use.md b/docs/terms-of-use.md
new file mode 100644
index 0000000..a3cbd00
--- /dev/null
+++ b/docs/terms-of-use.md
@@ -0,0 +1,166 @@
+<!--
+title: "Terms of Use"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/terms-of-use.md
+-->
+
+# Terms of Use
+
+Netdata Master Terms of Use
+Effective as of 09 Aug 2019
+
+## 1. General Information Regarding These Terms of Use
+
+Master terms: Welcome, and thank you for your interest in Netdata (“Netdata, Inc.” “ND,” “we,” “our,” or “us”). Unless otherwise noted on a particular site or service, these master terms of use (“Master Terms”) apply to your use of all of the websites that Netdata Corporation operates. These include <https://my-netdata.io> and <https://netdata.cloud>, together with all other subdomains thereof, (collectively, the “Websites”). The Master Terms also apply to all products, information, and services provided through the Websites, such as the NDID Login Service.
+
+Additional terms: In addition to the Master Terms, your use of any Services may also be subject to specific terms applicable to a particular Service (“Additional Terms”). If there is any conflict between the Additional Terms and the Master Terms, then the Additional Terms apply in relation to the relevant Service.
+
+Collectively, the Terms: The Master Terms, together with any Additional Terms, form a binding legal agreement between you and Netdata in relation to your use of the Services. Collectively, this legal agreement is referred to below as the “Terms.”
+
+Human-readable summary of Sec 1: These terms, together with any special terms for particular websites, create a contract between you and Netdata. The contract governs your use of all websites operated by Netdata, unless a particular website indicates otherwise. These human-readable summaries of each section are not part of the contract, but are intended to help you understand its terms.
+
+## 2. Your Agreement to the Terms
+
+BY ACCESSING OR USING ANY OF THE SERVICES, YOU ACKNOWLEDGE THAT YOU HAVE READ, UNDERSTOOD, AND AGREED TO BE BOUND BY THE TERMS. By accessing or using any Services you also represent that you have the legal authority to accept the Terms on behalf of yourself and any party you represent in connection with your use of any Services. If you do not agree to the Terms, you are not authorized to use any Services. If you are an individual who is entering into these Terms on behalf of an entity, you represent and warrant that you have the power to bind that entity, and you hereby agree on that entity’s behalf to be bound by these Terms, with the terms “you,” and “your” applying to you, that entity, and other users accessing the Services on behalf of that entity.
+
+Human-readable summary of Sec 2: Please read these terms and only use our sites and services if you agree to them.
+
+## 3. Changes to the Terms
+
+From time to time, Netdata may change, remove, or add to the Terms, and reserves the right to do so in its discretion. In that case, we will post updated Terms and indicate the date of revision. If we feel the modifications are material, we will make reasonable efforts to post a prominent notice on the relevant Website(s) and notify those of you with a current NDID Login Service account via email. All new and/or revised Terms take effect immediately and apply to your use of the Services from that date on, except that material changes will take effect 30 days after the change is made and identified as material. Your continued use of any Services after new and/or revised Terms are effective indicates that you have read, understood, and agreed to those Terms.
+
+Human-readable summary of Sec 3: These terms may change. When the changes are important, we will put a notice on the website. If you continue to use the sites after the changes are made, you agree to the changes.
+
+## 4. No Legal Advice
+
+Netdata is not a law firm, does not provide legal advice, and is not a substitute for a law firm. Sending us an email or using any of the Services, including the licenses, public domain tools, and choosers, does not constitute legal advice or create an attorney-client relationship.
+
+Human-readable summary of Sec 4: Some of us are lawyers, but we aren’t your lawyer. Please consult your own attorney if you need legal advice.
+
+## 5. Content Available through the Services
+
+Provided as-is: You acknowledge that Netdata does not make any representations or warranties about the material, data, and information, such as data files, text, computer software, code, music, audio files or other sounds, photographs, videos, or other images (collectively, the “Content”) which you may have access to as part of, or through your use of, the Services. Under no circumstances is Netdata liable in any way for any Content, including, but not limited to: any infringing Content, any errors or omissions in Content, or for any loss or damage of any kind incurred as a result of the use of any Content posted, transmitted, linked from, or otherwise accessible through or made available via the Services. You understand that by using the Services, you may be exposed to Content that is offensive, indecent, or objectionable.
+
+You agree that you are solely responsible for your reuse of Content made available through the Services, including providing proper attribution. You should review the terms of the applicable license before you use the Content so that you know what you can and cannot do.
+
+Licensing: ND-Owned Content: Other than the text of Netdata licenses, ND licenses, and other legal tools and the text of the deeds for all legal tools, Netdata trademarks (subject to the Trademark Policy), and the software code, all Content on the Websites is licensed under the Creative Commons Attribution 4.0 license, unless otherwise marked. See the ND Policies page for more information.
+
+ND-Owned Code: All of CC’s software code is free software; please check our code repository for the specific license on software you want to reuse.
+
+Search Tools: On some of its Websites, Netdata provides website search tools, including ND Search, which return Content based on any information our search tools are able to locate and interpret. Those search tools may return Content that is not ND licensed, and you should independently verify the terms of the license attached to any Content you intend to use.
+
+Human-readable summary of Sec 5: We try our best to have useful information on our sites, but we cannot promise that everything is accurate or appropriate for your situation. Content on the site is licensed under CC BY 4.0 unless it says it is available under different terms. If you find content through a link on our websites, be sure to check the license terms before using it.
+
+## 6. Content Supplied by You
+
+Your responsibility: You represent, warrant, and agree that no Content posted or otherwise shared by you on or through any of the Services (“Your Content”), violates or infringes upon the rights of any third party, including copyright, trademark, privacy, publicity, or other personal or proprietary rights, breaches or conflicts with any obligation, such as a confidentiality obligation, or contains libelous, defamatory, or otherwise unlawful material.
+
+Licensing Your Content: You retain any copyright that you may have in Your Content. You hereby agree that Your Content: (a) is hereby licensed under the CC Attribution 4.0 License and may be used under the terms of that license or any later version of a CC Attribution License, or (b) is in the public domain (such as Content that is not copyrightable or Content you make available under CC0), or (c) if not owned by you, (i) is available under a CC Attribution 4.0 License or (ii) is a media file that is available under any CC license or that you are authorized by law to post or share through any of the Services, such as under the fair use doctrine, and that is prominently marked as being subject to third party copyright. All of Your Content must be appropriately marked with licensing (or other permission status such as fair use) and attribution information.
+
+Removal: Netdata may, but is not obligated to, review Your Content and may delete or remove Your Content (without notice) from any of the Services in its sole discretion. Removal of any of Your Content from the Services (by you or Netdata) does not impact any rights you granted in Your Content under the terms of a Netdata license.
+
+Human-readable summary of Sec 6: We do not take any ownership of your content when you post it on our sites. If you post content you own, you agree it can be used under the terms of CC BY 4.0 or any future version of that license. If you do not own the content, then you should not post it unless it is in the public domain or licensed CC BY 4.0, except that you may also post pictures and videos if you are authorized to use them under law (e.g. fair use) or if they are available under any CC license. You must note that information on the file when you upload it. You are responsible for any content you upload to our sites.
+
+## 7. Prohibited Conduct
+
+You agree not to engage in any of the following activities:
+
+### 1. Violating laws and rights:
+
+You may not (a) use any Service for any illegal purpose or in violation of any local, state, national, or international laws, including without limitation U.S. export controls and economic sanctions regulations, or (b) violate or encourage others to violate any right of or obligation to a third party, including by infringing, misappropriating, or violating intellectual property, confidentiality, or privacy rights.
+
+### 2. Solicitation:
+
+You may not use the Services or any information provided through the Services for the transmission of advertising or promotional materials, including junk mail, spam, chain letters, pyramid schemes, or any other form of unsolicited or unwelcome solicitation.
+
+### 3. Disruption:
+
+You may not use the Services in any manner that could disable, overburden, damage, or impair the Services, or interfere with any other party’s use and enjoyment of the Services; including by (a) uploading or otherwise disseminating any virus, adware, spyware, worm or other malicious code, or (b) interfering with or disrupting any network, equipment, or server connected to or used to provide any of the Services, or violating any regulation, policy, or procedure of any network, equipment, or server.
+
+### 4. Harming others:
+
+You may not post or transmit Content on or through the Services that is harmful, offensive, obscene, abusive, invasive of privacy, defamatory, hateful or otherwise discriminatory, false or misleading, or incites an illegal act;
+You may not intimidate or harass another through the Services; and, you may not post or transmit any personally identifiable information about persons under 13 years of age on or through the Services.
+
+### 5. Impersonation or unauthorized access:
+
+You may not impersonate another person or entity, or misrepresent your affiliation with a person or entity when using the Services;
+You may not use or attempt to use another’s account or personal information without authorization; and
+You may not attempt to gain unauthorized access to the Services, or the computer systems or networks connected to the Services, through hacking password mining or any other means.
+
+Human-readable summary of Sec 7: Play nice. Be yourself. Don’t break the law or be disruptive.
+
+## 8. DISCLAIMER OF WARRANTIES
+
+TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, NETDATA OFFERS THE SERVICES (INCLUDING ALL CONTENT AVAILABLE ON OR THROUGH THE SERVICES) AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE SERVICES, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, INCLUDING WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. NETDATA DOES NOT WARRANT THAT THE FUNCTIONS OF THE SERVICES WILL BE UNINTERRUPTED OR ERROR-FREE, THAT CONTENT MADE AVAILABLE ON OR THROUGH THE SERVICES WILL BE ERROR-FREE, THAT DEFECTS WILL BE CORRECTED, OR THAT ANY SERVERS USED BY ND ARE FREE OF VIRUSES OR OTHER HARMFUL COMPONENTS. NETDATA DOES NOT WARRANT OR MAKE ANY REPRESENTATION REGARDING USE OF THE CONTENT AVAILABLE THROUGH THE SERVICES IN TERMS OF ACCURACY, RELIABILITY, OR OTHERWISE.
+
+Human-readable summary of Sec 8: ND does not make any guarantees about the sites, services, or content available on the sites.
+
+## 9. LIMITATION OF LIABILITY
+
+TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL NETDATA BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY INCIDENTAL, DIRECT, INDIRECT, PUNITIVE, ACTUAL, CONSEQUENTIAL, SPECIAL, EXEMPLARY, OR OTHER DAMAGES, INCLUDING WITHOUT LIMITATION, LOSS OF REVENUE OR INCOME, LOST PROFITS, PAIN AND SUFFERING, EMOTIONAL DISTRESS, COST OF SUBSTITUTE GOODS OR SERVICES, OR SIMILAR DAMAGES SUFFERED OR INCURRED BY YOU OR ANY THIRD PARTY THAT ARISE IN CONNECTION WITH THE SERVICES (OR THE TERMINATION THEREOF FOR ANY REASON), EVEN IF NETDATA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, NETDATA IS NOT RESPONSIBLE OR LIABLE WHATSOEVER IN ANY MANNER FOR ANY CONTENT POSTED ON OR AVAILABLE THROUGH THE SERVICES (INCLUDING CLAIMS OF INFRINGEMENT RELATING TO THAT CONTENT), FOR YOUR USE OF THE SERVICES, OR FOR THE CONDUCT OF THIRD PARTIES ON OR THROUGH THE SERVICES.
+
+Certain jurisdictions do not permit the exclusion of certain warranties or limitation of liability for incidental or consequential damages, which means that some of the above limitations may not apply to you. IN THESE JURISDICTIONS, THE FOREGOING EXCLUSIONS AND LIMITATIONS WILL BE ENFORCED TO THE GREATEST EXTENT PERMITTED BY APPLICABLE LAW.
+
+Human-readable summary of Sec 9: ND is not responsible for the content on the sites, your use of our services, or for the conduct of others on our sites.
+
+## 10. Indemnification
+
+To the extent authorized by law, you agree to indemnify and hold harmless Netdata, its employees, officers, directors, affiliates, and agents from and against any and all claims, losses, expenses, damages, and costs, including reasonable attorneys’ fees, resulting directly or indirectly from or arising out of (a) your violation of the Terms, (b) your use of any of the Services, and/or (c) the Content you make available on any of the Services.
+
+Human-readable summary of Sec 10: If something happens because you violate these terms, because of your use of the services, or because of the content you post on the sites, you agree to repay ND for the damage it causes.
+
+## 11. Privacy Policy
+
+Netdata is committed to responsibly handling the information and data we collect through our Services in compliance with our Privacy Policy, which is incorporated by reference into these Master Terms. Please review the Privacy Policy so you are aware of how we collect and use your personal information.
+
+Human-readable summary of Sec 11: Please read our Privacy Policy. It is part of these terms, too.
+
+## 12. Trademark Policy
+
+ND’s name, logos, icons, and other trademarks may only be used in accordance with our Trademark Policy, which is incorporated by reference into these Master Terms. Please review the Trademark Policy so you understand how ND’s trademarks may be used.
+
+Human-readable summary of Sec 12: Please read our Trademark Policy. It is part of these terms, too.
+
+## 13. Copyright Complaints
+
+Netdata respects copyright, and we prohibit users of the Services from submitting, uploading, posting, or otherwise transmitting any Content on the Services that violates another person’s proprietary rights.
+
+To report allegedly infringing Content hosted on a website owned or controlled by ND, send a Notice of Infringing Materials to info@netdata.cloud.
+
+Please note that Netdata does not host the Content made available through ND Search. You should contact the web site or service hosting the Content to have it removed.
+
+Human-readable summary of Sec 13: Please let us know if you find infringing content on our websites.
+
+## 14. Termination
+
+By Netdata: Netdata may modify, suspend, or terminate the operation of, or access to, all or any portion of the Services at any time for any reason. Additionally, your individual access to, and use of, the Services may be terminated by Netdata at any time and for any reason.
+
+By you: If you wish to terminate this agreement, you may immediately stop accessing or using the Services at any time.
+
+Automatic upon breach: Your right to access and use the Services (including use of your ND Login Service account) automatically upon your breach of any of the Terms. For the avoidance of doubt, termination of the Terms does not require you to remove or delete any reference to previously-applied ND legal tools from your own Content.
+
+Survival: The disclaimer of warranties, the limitation of liability, and the jurisdiction and applicable law provisions will survive any termination. The license grants applicable to Your Content are not impacted by the termination of the Terms and shall continue in effect subject to the terms of the applicable license. Your warranties and indemnification obligations will survive for one year after termination.
+
+Human-readable summary of Sec 14: If you violate these terms, you may no longer use our sites.
+
+## 15. Miscellaneous Terms
+
+Choice of law: The Terms are governed by and construed by the laws of the State of Delaware in the United States, not including its choice of law rules.
+
+Dispute resolution: The parties agree that any disputes between Netdata and you concerning these Terms, and/or any of the Services may only brought in a federal or state court of competent jurisdiction sitting in the State of Delaware, and you hereby consent to the personal jurisdiction and venue of such court.
+
+If you are an authorized agent of a government or intergovernmental entity using the Services in your official capacity, including an authorized agent of the federal, state, or local government in the United States, and you are legally restricted from accepting the controlling law, jurisdiction, or venue clauses above, then those clauses do not apply to you. For any such U.S. federal government entities, these Terms and any action related thereto will be governed by the laws of the United States of America (without reference to conflict of laws) and, in the absence of federal law and to the extent permitted under federal law, the laws of the State of Delaware (excluding its choice of law rules).
+
+No waiver: Either party’s failure to insist on or enforce strict performance of any of the Terms will not be construed as a waiver of any provision or right.
+
+Severability: If any part of the Terms is held to be invalid or unenforceable by any law or regulation or final determination of a competent court or tribunal, that provision will be deemed severable and will not affect the validity and enforceability of the remaining provisions.
+
+No agency relationship: The parties agree that no joint venture, partnership, employment, or agency relationship exists between you and Netdata as a result of the Terms or from your use of any of the Services.
+
+Integration: These Master Terms and any applicable Additional Terms constitute the entire agreement between you and Netdata relating to this subject matter and supersede any and all prior communications and/or agreements between you and Netdata relating to access and use of the Services.
+
+Human-readable summary of Sec 15: If there is a lawsuit arising from these terms, it should be in Delaware and governed by Delaware law. We are glad you use our sites, but this agreement does not mean we are partners.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fterms-of-use&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/visualize/create-dashboards.md b/docs/visualize/create-dashboards.md
new file mode 100644
index 0000000..91a8dcc
--- /dev/null
+++ b/docs/visualize/create-dashboards.md
@@ -0,0 +1,64 @@
+<!--
+title: "Create new dashboards"
+description: "Create new dashboards in Netdata Cloud, with any number of metrics from any node on your infrastructure, for targeted troubleshooting."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/visualize/create-dashboards.md
+-->
+
+# Create new dashboards
+
+With Netdata Cloud, you can build new dashboards that put key metrics from any number of distributed systems in one
+place for a bird's eye view of your infrastructure. You can create more meaningful visualizations for troubleshooting or
+keep a watchful eye on your infrastructure's most meaningful metrics without moving from node to node.
+
+In the War Room you want to monitor with this dashboard, click on your War Room's dropdown, then click on the green **+
+Add** button next to **Dashboards**. In the panel, give your new dashboard a name, and click **+ Add**.
+
+Click the **Add Chart** button to add your first chart card. From the dropdown, select the node you want to add the
+chart from, then the context. Netdata Cloud shows you a preview of the chart before you finish adding it.
+
+The **Add Text** button creates a new card with user-defined text, which you can use to describe or document a
+particular dashboard's meaning and purpose. Enrich the dashboards you create with documentation or procedures on how to
+respond
+
+![A bird's eye dashboard for a single
+node](https://user-images.githubusercontent.com/1153921/102650776-a654ba80-4128-11eb-9a65-4f9801b03d4b.png)
+
+Charts in dashboards are [fully interactive](/docs/visualize/interact-dashboards-charts.md) and synchronized. You can
+pan through time, zoom, highlight specific timeframes, and more.
+
+Move any card by clicking on their top panel and dragging them to a new location. Other cards re-sort to the grid system
+automatically. You can also resize any card by grabbing the bottom-right corner and dragging it to its new size.
+
+Hit the **Save** button to finalize your dashboard. Any other member of the War Room can now access it and make changes.
+
+## Jump to single-node Cloud dashboards
+
+While dashboards help you associate essential charts from distributed nodes on a single pane of glass, you might need
+more detail when troubleshooting an issue. Quickly jump to any node's dashboard by clicking the 3-dot icon in the corner
+of any card to open a menu. Hit the **Go to Chart** item.
+
+Netdata Cloud takes you to the same chart on that node's dashboard. You can now navigate all that node's metrics and
+[interact with charts](/docs/visualize/interact-dashboards-charts.md) to further investigate anomalies or troubleshoot
+complex performance problems.
+
+When viewing a single-node Cloud dashboard, you can also click on the add to dashboard icon <img
+src="https://user-images.githubusercontent.com/1153921/87587846-827fdb00-c697-11ea-9f31-aed0b8c6afba.png" alt="Dashboard
+icon" class="image-inline" /> to quickly add that chart to a new or existing dashboard. You might find this useful when
+investigating an anomaly and want to quickly populate a dashboard with potentially correlated metrics.
+
+## Pin dashboards and navigate through Netdata Cloud
+
+Click on the **Pin** button in any dashboard to put those charts into a separate panel at the bottom of the screen. You
+can now navigate through Netdata Cloud freely, individual Cloud dashboards, the Nodes view, different War Rooms, or even
+different Spaces, and have those valuable metrics follow you.
+
+Pinning dashboards helps you correlate potentially related charts across your infrastructure and discover root causes
+faster.
+
+## What's next?
+
+While it's useful to see real-time metrics on flexible dashboards, you need ways to know precisely when an anomaly
+strikes. Every Netdata Agent comes with a health watchdog that uses [alarms](/docs/monitor/configure-alarms.md) and
+[notifications](/docs/monitor/enable-notifications.md) to notify you of issues seconds after they strike.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fvisualize%2Fcreate-dashboards&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/visualize/interact-dashboards-charts.md b/docs/visualize/interact-dashboards-charts.md
new file mode 100644
index 0000000..30503c2
--- /dev/null
+++ b/docs/visualize/interact-dashboards-charts.md
@@ -0,0 +1,127 @@
+<!--
+title: "Interact with dashboards and charts"
+description: "Zoom, highlight, and pan through time on hundreds of real-time, interactive charts to quickly discover the root cause of any anomaly."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/visualize/interact-dashboards-charts.md
+-->
+
+# Interact with dashboards and charts
+
+You can find Netdata's dashboards in two places: locally served at `http://NODE:19999` by the Netdata Agent, and in
+Netdata Cloud. While you access these dashboards differently, they have similar interfaces, identical charts and
+metrics, and you interact with both of them the same way.
+
+> If you're not sure which option is best for you, see our [single-node](/docs/quickstart/single-node.md) and
+> [infrastructure](/docs/quickstart/infrastructure.md) quickstart guides.
+
+Netdata dashboards are single, scrollable pages with many charts stacked on top of one another. As you scroll up or
+down, charts appearing in your browser's viewport automatically load and update every second.
+
+The dashboard is broken up into multiple **sections**, such as **System Overview**, **CPU**, **Disk**, which are
+automatically generated based on which [collectors](/docs/collect/how-collectors-work.md) begin collecting metrics when
+Netdata starts up. Sections also appear in the right-hand **menu**, along with submenus based on the contexts and
+families Netdata creates for your node.
+
+## Choose timeframes to visualize
+
+Both the local Agent dashboard and Netdata Cloud feature time & date pickers to help you visualize specific points in
+time. In Netdata Cloud, the picker appears in the [Overview](/docs/visualize/overview-infrastructure.md), [Nodes
+view](https://learn.netdata.cloud/docs/cloud/visualize/nodes), [new
+dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards), and any single-node dashboards you visit.
+
+Local Agent dashboard:
+
+![Time & date picker on the local Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/101512538-5875d080-3938-11eb-8daf-0fbd0948a04b.png)
+
+Netdata Cloud:
+
+![Time & date picker on Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/101512689-86f3ab80-3938-11eb-8abc-12171a9b8a5e.png)
+
+Their behavior is identical. Use the Quick Selector to visualize generic timeframes, or use the calendar or inputs to
+select days, hours, minutes or seconds. Click **Apply** to re-render all visualizations with new metrics data, or
+**Clear** to restore the default timeframe.
+
+See reference documentation for the [local Agent dashboard](/web/gui/README.md#time--date-picker) and [Netdata
+Cloud](https://learn.netdata.cloud/docs/cloud/war-rooms#time--date-picker) for additional context about how the time &
+date picker behaves in each environment.
+
+## Charts, dimensions, families, and contexts
+
+A **chart** is an interactive visualization of one or more collected/calculated metrics. You can see the name (also
+known as its unique ID) of a chart by looking at the top-left corner of a chart and finding the parenthesized text. On a
+Linux system, one of the first charts on the dashboard will be the system CPU chart, with the name `system.cpu`.
+
+A **dimension** is any value that gets shown on a chart. The value can be raw data or calculated values, such as
+percentages, aggregates, and more. Most charts will have more than one dimension, in which case it will display each in
+a different color. You can disable or enable showing these dimensions by clicking on them.
+
+A **family** is _one_ instance of a monitored hardware or software resource that needs to be monitored and displayed
+separately from similar instances. For example, if your node has multiple partitions, Netdata will create different
+families for `/`, `/boot`, `/home`, and so on. Same goes for entire disks, network devices, and more.
+
+A **context** groups several charts based on the types of metrics being collected and displayed. For example, the
+**Disk** section often has many contexts: `disk.io`, `disk.ops`, `disk.backlog`, `disk.util`, and so on. Netdata uses
+this context to create individual charts and then groups them by family. You can always see the context of any chart by
+looking at its name or hovering over the chart's date.
+
+See our [dashboard docs](/web/README.md#charts-contexts-families) for more information about the above distinctions
+and how they're used across Netdata to meaningfully organize and present metrics.
+
+## Interact with charts
+
+Netdata's charts are fully interactive to help you find meaningful information about complex problems. You can pan
+through historical metrics, zoom in and out, select specific timeframes for further analysis, resize charts, and more.
+Whenever you use a chart in this way, Netdata synchronizes all the other charts to match it.
+
+| Change | Method #1 | Method #2 | Method #3 |
+| ------------------------------------------------- | ----------------------------------- | --------------------------------------------------------- | ---------------------------------------------------------- |
+| **Stop** a chart from updating | `click` | | |
+| **Reset** charts to default auto-refreshing state | `double click` | `double tap` (touchpad/touchscreen) | |
+| **Select** a certain timeframe | `ALT` + `mouse selection` | `⌘` + `mouse selection` (macOS) | |
+| **Pan** forward or back in time | `click and drag` | `touch and drag` (touchpad/touchscreen) | |
+| **Zoom** to a specific timeframe | `SHIFT` + `mouse selection` | | |
+| **Zoom** in/out | `SHIFT`/`ALT` + `mouse scrollwheel` | `SHIFT`/`ALT` + `two-finger pinch` (touchpad/touchscreen) | `SHIFT`/`ALT` + `two-finger scroll` (touchpad/touchscreen) |
+
+![Animated GIF of interacting with Netdata
+charts](https://user-images.githubusercontent.com/1153921/102652236-051b3380-412b-11eb-8f7c-a2372ed92cd0.gif)
+
+These interactions can also be triggered using the icons on the bottom-right corner of every chart. They are,
+respectively, `Pan Left`, `Reset`, `Pan Right`, `Zoom In`, and `Zoom Out`.
+
+You can show and hide individual dimensions by clicking on their names. Use `SHIFT + click` to hide or show dimensions
+one at a time. Hiding dimensions simplifies the chart and can help you better discover exactly which aspect of your
+system is behaving strangely.
+
+You can resize any chart by clicking-and-dragging the icon on the bottom-right corner of any chart. To restore the chart
+to its original height, double-click the same icon.
+
+![Resizing a chart and resetting it to the default
+height](https://user-images.githubusercontent.com/1153921/102652691-24b25c00-412b-11eb-9e2c-95325fcedc67.gif)
+
+### Composite charts in Netdata Cloud
+
+Netdata Cloud now supports composite charts in the Overview interface. Composite charts come with a few additional UI
+elements and varied interactions, such as the location of dimensions and a utility bar for configuring the state of
+individual composite charts. All of these details are covered in the [Overview
+reference](https://learn.netdata.cloud/docs/cloud/visualize/overview) doc.
+
+## What's next?
+
+Netdata Cloud users can [build new dashboards](/docs/visualize/create-dashboards.md) in just a few clicks. By
+aggregating relevant metrics from any number of nodes onto a single interface, you can respond faster to anomalies,
+perform more targeted troubleshooting, or keep tabs on a bird's eye view of your infrastructure.
+
+If you're finished with dashboards for now, skip to Netdata's health watchdog for information on [creating or
+configuring](/docs/monitor/configure-alarms.md) alarms, and [send notifications](/docs/monitor/enable-notifications.md)
+to get informed when something goes wrong in your infrastructure.
+
+### Related reference documentation
+
+- [Netdata Agent · Web dashboards overview](/web/README.md)
+- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
+- [Netdata Cloud · Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview)
+- [Netdata Cloud · Nodes](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
+- [Netdata Cloud · Build new dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fvisualize%2Finteract-dashboards-charts&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/visualize/overview-infrastructure.md b/docs/visualize/overview-infrastructure.md
new file mode 100644
index 0000000..675abd7
--- /dev/null
+++ b/docs/visualize/overview-infrastructure.md
@@ -0,0 +1,109 @@
+<!--
+title: "See an overview of your infrastructure"
+description: "With Netdata Cloud's War Rooms, you can see real-time metrics, from any number of nodes in your infrastructure, in composite charts."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/visualize/overview-infrastructure.md
+-->
+
+# See an overview of your infrastructure
+
+In Netdata Cloud, your nodes are organized into War Rooms. One of the two available views for a War Room is the
+**Overview**, which uses composite charts to display real-time, aggregated metrics from all the nodes (or a filtered
+selection) in a given War Room.
+
+With Overview's composite charts, you can see your infrastructure from a single pane of glass, discover trends or
+anomalies, then drill down with filtering or single-node dashboards to see more. In the screenshot below,
+each chart visualizes average or sum metrics values from across 5 distributed nodes.
+
+![The War Room
+Overview](https://user-images.githubusercontent.com/1153921/102651377-b1f4b100-4129-11eb-8e60-d2995d258c16.png)
+
+## Using the Overview
+
+> ⚠️ In order for nodes to contribute to composite charts, and thus the Overview UI, they must run v1.26.0 or later of
+> the Netdata Agent. See our [update docs](/packaging/installer/UPDATE.md) for the preferred update method based on how
+> you installed the Agent.
+
+The Overview uses roughly the same interface as local Agent dashboards or single-node dashboards in Netdata Cloud. By
+showing all available metrics from all your nodes in a single interface, Netdata Cloud helps you visualize the overall
+health of your infrastructure. Best of all, you don't have to worry about creating your own dashboards just to get
+started with infrastructure monitoring.
+
+Let's walk through some examples of using the Overview to monitor and troubleshoot your infrastructure.
+
+### Filter nodes and pick relevant times
+
+While not exclusive to Overview, you can use two important features, [node
+filtering](https://learn.netdata.cloud/docs/cloud/war-rooms#node-filter) and the [time &amp; date
+picker](https://learn.netdata.cloud/docs/cloud/war-rooms#time--date-picker), to widen or narrow your infrastructure
+monitoring focus.
+
+By default, the Overview shows composite charts aggregated from every node in the War Room, but you can change that
+behavior on an ad-hoc basis. The node filter allows you to create complex queries against your infrastructure based on
+the name, OS, or services running on nodes. For example, use `(name contains aws AND os contains ubuntu) OR services ==
+apache` to show only nodes that have `aws` in the hostname and are Ubuntu-based, or any nodes that have an Apache
+webserver running on them.
+
+The time &amp; date picker helps you visualize both small and large timeframes depending on your goals, whether that's
+establishing a baseline of infrastructure performance or targeted root cause analysis of a specific anomaly.
+
+For example, use the **Quick Selector** options to pick the 12-hour option first thing in the morning to check your
+infrastructure for any odd behavior overnight. Use the 7-day option to observe trends between various days of the week.
+
+See the [War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms) docs for more details on both features.
+
+### Configure composite charts to identify problems
+
+Let's say you notice a sharp decrease in available RAM for applications, as seen in the example screenshot below. In
+this situation, you can see when the anomalous behavior began and that it affects the average available and committed
+RAM across your infrastructure. However, when _grouped by dimension_, composite charts cannot show whether an anomaly
+affects a single node, a subset of nodes, or an entire infrastructure.
+
+![Composite charts showing available and committed RAM across an
+infrastructure](https://user-images.githubusercontent.com/1153921/99314892-0bae4680-281f-11eb-823e-071a1da25dc7.png)
+
+Use [_group by node_](https://learn.netdata.cloud/docs/cloud/visualize/overview#group-by-dimension-or-node) to visualize
+a single metric across all contributing nodes. If the composite chart has 5 contributing nodes, there will be 5
+lines/areas, one for the most relevant dimension from each node.
+
+![Finding a problematic node with group by
+node](https://user-images.githubusercontent.com/1153921/99315558-0e5d6b80-2820-11eb-91e9-9c46bc4c7298.gif)
+
+After grouping by node, it's clear that the `Composite-Charts-01` node is experiencing anomalous behavior and should be
+investigated further by jumping to its [single-node dashboard](#drill-down-with-single-node-dashboards) in Netdata
+Cloud.
+
+### Drill down with single-node dashboards
+
+Click on **X Charts** of any composite chart's definition bar to display a dropdown of contributing contexts and nodes
+contributing. Click on the link icon <img class="img__inline img__inline--link"
+src="https://user-images.githubusercontent.com/1153921/95762109-1d219300-0c62-11eb-8daa-9ba509a8e71c.png" /> next to a
+given node to quickly _jump to the same chart in that node's single-node dashboard_ in Netdata Cloud.
+
+![Jumping to a single-node dashboard in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/99317327-1e2a7f00-2823-11eb-8fc3-76f260ced86a.gif)
+
+You can use single-node dashboards in Netdata Cloud to drill down on specific issues, scrub backward in time to
+investigate historical data, and see like metrics presented meaningfully to help you troubleshoot performance problems.
+All of the familiar [interactions](/docs/visualize/interact-dashboards-charts.md) are available, as is adding any chart
+to a [new dashboard](/docs/visualize/create-dashboards.md).
+
+## Nodes view
+
+You can also use the **Nodes view** to monitor the health status and user-configurable key metrics from multiple nodes
+in a War Room. Read the [Nodes view doc](https://learn.netdata.cloud/docs/cloud/visualize/nodes) for details.
+
+![The Nodes view](https://user-images.githubusercontent.com/1153921/95909704-cb593580-0d53-11eb-88fa-a3416ab09849.png)
+
+## What's next?
+
+To troubleshoot complex performance issues using Netdata, you need to understand how to interact with its meaningful
+visualizations. Learn more about [interaction](/docs/visualize/interact-dashboards-charts.md) to see historical metrics,
+highlight timeframes for targeted analysis, and more.
+
+### Related reference documentation
+
+- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
+- [Netdata Cloud · Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview)
+- [Netdata Cloud · Nodes view](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fvisualize%2Foverview-infrastructure&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/1s-granularity.md b/docs/why-netdata/1s-granularity.md
new file mode 100644
index 0000000..a2cc65b
--- /dev/null
+++ b/docs/why-netdata/1s-granularity.md
@@ -0,0 +1,59 @@
+<!--
+title: "1s granularity"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata/1s-granularity.md
+-->
+
+# 1s granularity
+
+High resolution metrics are required to effectively monitor and troubleshoot systems and applications.
+
+## Why?
+
+- The world is going real-time. Today, customer experience is significantly affected by response time, so SLAs are tighter than ever before. It is just not practical to monitor a 2-second SLA with 10-second metrics.
+
+- IT goes virtual. Unlike real hardware, virtual environments are not linear, nor predictable. You cannot expect resources to be available when your applications need them. They will eventually be, but not exactly at the time they are needed. The latency of virtual environments is affected by many factors, most of which are outside our control, like: the maintenance policy of the hosting provider, the work load of third party virtual machines running on the same physical servers combined with the resource allocation and throttling policy among virtual machines, the provisioning system of the hosting provider, etc.
+
+## What do others do?
+
+So, why don't most monitoring platforms and monitoring SaaS providers offer high resolution metrics?
+
+They want to, but they can't, at least not massively.
+
+The reasons lie in their design decisions:
+
+1. Time-series databases (prometheus, graphite, opentsdb, influxdb, etc) centralize all the metrics. At scale, these databases can easily become the bottleneck of the whole infrastructure.
+
+2. SaaS providers base their business models on centralizing all the metrics. On top of the time-series database bottleneck they also have increased bandwidth costs. So, massively supporting high resolution metrics, destroys their business model.
+
+Of course, since a couple of decades the world has fixed this kind of scaling problems: instead of scaling up, scale out, horizontally. That is, instead of investing on bigger and bigger central components, decentralize the application so that it can scale by adding more smaller nodes to it.
+
+There have been many attempts to fix this problem for monitoring. But so far, all solutions required centralization of metrics, which can only scale up. So, although the problem is somehow managed, it is still the key problem of all monitoring platforms and one of the key reasons for increased monitoring costs.
+
+Another important factor is how resource efficient data collection can be when running per second. Most solutions fail to do it properly. The data collection agent is consuming significant system resources when running "per second", influencing the monitored systems and applications to a great degree.
+
+Finally, per second data collection is a lot harder. Busy virtual environments have [a constant latency of about 100ms, spread randomly to all data sources](https://docs.google.com/presentation/d/18C8bCTbtgKDWqPa57GXIjB2PbjjpjsUNkLtZEz6YK8s/edit#slide=id.g422e696d87_0_57). If data collection is not implemented properly, this latency introduces a random error of +/- 10%, which is quite significant for a monitoring system.
+
+So, the monitoring industry fails to massively provide high resolution metrics, mainly for 3 reasons:
+
+1. Centralization of metrics makes monitoring cost inefficient at that rate.
+2. Data collection needs optimization, otherwise it will significantly affect the monitored systems.
+3. Data collection is a lot harder, especially on busy virtual environments.
+
+## What does Netdata do differently?
+
+Netdata decentralizes monitoring completely. Each Netdata node is autonomous. It collects metrics locally, it stores them locally, it runs checks against them to trigger alarms locally, and provides an API for the dashboards to visualize them. This allows Netdata to scale to infinity.
+
+Of course, Netdata can centralize metrics when needed. For example, it is not practical to keep metrics locally on ephemeral nodes. For these cases, Netdata streams the metrics in real-time, from the ephemeral nodes to one or more non-ephemeral nodes nearby. This centralization is again distributed. On a large infrastructure, there may be many centralization points.
+
+To eliminate the error introduced by data collection latencies on busy virtual environments, Netdata interpolates collected metrics. It does this using microsecond timings, per data source, offering measurements with an error rate of 0.0001%. When running [in debug mode, Netdata calculates this error rate](https://github.com/netdata/netdata/blob/36199f449852f8077ea915a3a14a33fa2aff6d85/database/rrdset.c#L1070-L1099) for every point collected, ensuring that the database works with acceptable accuracy.
+
+Finally, Netdata is really fast. Optimization is a core product feature. On modern hardware, Netdata can collect metrics with a rate of above 1M metrics per second per core (this includes everything, parsing data sources, interpolating data, storing data in the time series database, etc). So, for a few thousands metrics per second per node, Netdata needs negligible CPU resources (just 1-2% of a single core).
+
+Netdata has been designed to:
+
+- Solve the centralization problem of monitoring
+- Replace the console for performance troubleshooting.
+
+So, for Netdata 1s granularity is easy, the natural outcome...
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2F1s-granularity&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/README.md b/docs/why-netdata/README.md
new file mode 100644
index 0000000..39cda51
--- /dev/null
+++ b/docs/why-netdata/README.md
@@ -0,0 +1,35 @@
+<!--
+title: "Why Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata/README.md
+-->
+
+# Why Netdata
+
+> Any performance monitoring solution that does not go down to per second
+> collection and visualization of the data, is useless.
+> It will make you happy to have it, but it will not help you more than that.
+
+Netdata is built around 4 principles:
+
+1. **[Per second data collection for all metrics.](/docs/why-netdata/1s-granularity.md)**
+
+ _It is impossible to monitor a 2 second SLA, with 10 second metrics._
+
+2. **[Collect and visualize all the metrics from all possible sources.](/docs/why-netdata/unlimited-metrics.md)**
+
+ _To troubleshoot slowdowns, we need all the available metrics. The console should not provide more metrics._
+
+3. **[Meaningful presentation, optimized for visual anomaly detection.](/docs/why-netdata/meaningful-presentation.md)**
+
+ _Metrics are a lot more than name-value pairs over time. The monitoring tool should know all the metrics. Users should not!_
+
+4. **[Immediate results, just install and use.](/docs/why-netdata/immediate-results.md)**
+
+ _Most of our infrastructure is standardized. There is no point to configure everything metric by metric._
+
+Unlike other monitoring solutions that focus on metrics visualization,
+Netdata's helps us troubleshoot slowdowns without touching the console.
+
+So, everything is a bit different.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FWhy-Netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/immediate-results.md b/docs/why-netdata/immediate-results.md
new file mode 100644
index 0000000..ba7c7d6
--- /dev/null
+++ b/docs/why-netdata/immediate-results.md
@@ -0,0 +1,46 @@
+<!--
+title: "Immediate results"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata/immediate-results.md
+-->
+
+# Immediate results
+
+Most of our infrastructure is based on standardized systems and applications.
+
+It is a tremendous waste of time and effort, in a global scale, to require from all users to configure their infrastructure dashboards and alarms metric by metric.
+
+## Why?
+
+Most of the existing monitoring solutions, focus on providing a platform "for building your monitoring". So, they provide the tools to collect metrics, store them, visualize them, check them and query them. And we are all expected to go through this process.
+
+However, most of our infrastructure is standardized. We run well known Linux distributions, the same kernel, the same database, the same web server, etc.
+
+So, why can't we have a monitoring system that can be installed and instantly provide feature rich dashboards and alarms about everything we use? Is there any reason you would like to monitor your web server differently than me?
+
+What a waste of time and money! Hundreds of thousands of people doing the same thing over and over again, trying to understand what the metrics are, how to visualize them, how to configure alarms for them and how to query them when issues arise.
+
+## What do others do?
+
+Open-source solutions rely almost entirely on configuration. So, you have to go through endless metric-by-metric configuration yourself. The result will reflect your skills, your experience, your understanding.
+
+Monitoring SaaS providers offer a very basic set of pre-configured metrics, dashboards and alarms. They assume that you will configure the rest you may need. So, once more, the result will reflect your skills, your experience, your understanding.
+
+## What does Netdata do?
+
+1. Metrics are auto-detected, so for 99% of the cases data collection works out of the box.
+2. Metrics are converted to human readable units, right after data collection, before storing them into the database.
+3. Metrics are structured, organized in charts, families and applications, so that they can be browsed.
+4. Dashboards are automatically generated, so all metrics are available for exploration immediately after installation.
+5. Dashboards are not just visualizing metrics; they are a tool, optimized for visual anomaly detection.
+6. Hundreds of pre-configured alarm templates are automatically attached to collected metrics.
+
+The result is that Netdata can be used immediately after installation!
+
+Netdata:
+
+- Helps engineers understand and learn what the metrics are.
+- Does not require any configuration. Of course there are thousands of options to tweak, but the defaults are pretty good for most systems.
+- Does not introduce any query languages or any other technology to be learned. Of course some familiarity with the tool is required, but nothing too complicated.
+- Includes all the community expertise and experience for monitoring systems and applications.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fimmediate-results&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/meaningful-presentation.md b/docs/why-netdata/meaningful-presentation.md
new file mode 100644
index 0000000..64d83b4
--- /dev/null
+++ b/docs/why-netdata/meaningful-presentation.md
@@ -0,0 +1,68 @@
+<!--
+title: "Meaningful presentation"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata/meaningful-presentation.md
+-->
+
+# Meaningful presentation
+
+Metrics are a lot more than name-value pairs over time. It is just not practical to require from all users to have a deep understanding of all metrics for monitoring their systems and applications.
+
+## Why?
+
+There is a plethora of metrics. And each of them has a context, a meaning, a way to be interpreted.
+
+Traditionally, monitoring solutions instruct engineers to collect only the metrics they understand. This is a good strategy as long as you have a clear understanding of what you need and you have the skills, the expertise and the experience to select them.
+
+For most people, this is an impossible task. It is just not practical to assume that any engineer will have a deep understanding of how the kernel works, how the networking stack works, how the system manages its memory, how it schedules processes, how web servers work, how databases work, etc.
+
+The result is that for most of the world, monitoring sucks. It is incomplete, inefficient, and in most of the cases only useful for providing an illusion that the infrastructure is being monitored. It is not! According to the [State of Monitoring 2017](http://start.bigpanda.io/state-of-monitoring-report-2017), only 11% of the companies are satisfied with their existing monitoring infrastructure, and on the average they use 6-7 monitoring tools.
+
+But even if all the metrics are collected, an even bigger challenge is revealed: What to do with them? How to use them?
+
+The existing monitoring solutions, assume the engineers will:
+
+- Design dashboards
+- Configure alarms
+- Use a query language to investigate issues
+
+However, all these have to be configured metric by metric.
+
+The monitoring industry believes there is this "IT Operations Hero", a person combining these abilities:
+
+1. Has a deep understanding of IT architectures and is a skillful SysAdmin.
+2. Is a superb Network Administrator (can even read and understand the Linux kernel networking stack).
+3. Is an exceptional database administrator.
+4. Is fluent in software engineering, capable of understanding the internal workings of applications.
+5. Masters Data Science, statistical algorithms and is fluent in writing advanced mathematical queries to reveal the meaning of metrics.
+
+Of course this person does not exist!
+
+## What do others do?
+
+Most solutions are based on a time-series database. A database that tracks name-value pairs, over time.
+
+Data collection blindly collects metrics and stores them into the database, dashboard editors query the database to visualize the metrics. They may also provide a query editor, that users can use to query the database by hand.
+
+Of course, it is just not practical to work that way when the database has 10,000 unique metrics. Most of them will be just noise, not because they are not useful, but because no one understands them!
+
+So, they collect very limited metrics. Basic dashboards can be created with these metrics, but for any issue that needs to be troubleshooted, the monitoring system is just not adequate. It cannot help. So, engineers are using the console to access the rest of the metrics and find the root cause.
+
+## What does Netdata do?
+
+In Netdata, the meaning of metrics is incorporated into the database:
+
+1. all metrics are converted and stored to human-friendly units. This is a data-collection process, not a visualization process. For example, cpu utilization in Netdata is stored as percentage, not as kernel ticks.
+
+2. all metrics are organized into human-friendly charts, sharing the same context and units (similar to what other monitoring solutions call `cardinality`). So, when Netdata developer collect metrics, they configure the correlation of the metrics right in data collection, which is stored in the database too.
+
+3. all charts are then organized in families, and chart families are organized in applications. These structures are responsible for providing the menu at the right side of Netdata dashboards for exploring the whole database.
+
+The result is a system that can be browsed by humans, even if the database has 100,000 unique metrics. It is pretty natural for everyone to browse them, understand their meaning and their scope.
+
+Of course, this process makes data collection significantly more time consuming. Netdata developers need to normalize and correlate and categorize every single metric Netdata collects.
+
+But it simplifies everything else. Data collection, metrics database and visualization are de-coupled, thus the query engine is simpler, and the visualization is straight forward.
+
+Netdata goes a step further, by enriching the dashboard with information that is useful for most people. So, to improve clarity and help users be more effective, Netdata includes right in the dashboard the community knowledge and expertise about the metrics. So, that Netdata users can focus on solving their infrastructure problem, not on the technicalities of data collection and visualization.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fmeaningful-presentation&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/unlimited-metrics.md b/docs/why-netdata/unlimited-metrics.md
new file mode 100644
index 0000000..8e50062
--- /dev/null
+++ b/docs/why-netdata/unlimited-metrics.md
@@ -0,0 +1,49 @@
+<!--
+title: "Unlimited metrics"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata/unlimited-metrics.md
+-->
+
+# Unlimited metrics
+
+All metrics are important and all metrics should be available when you need them.
+
+## Why?
+
+Collecting all the metrics breaks the first rule of every monitoring text book: "collect only the metrics you need", "collect only the metrics you understand".
+
+Unfortunately, this does not work! Filtering out most metrics is like reading a book by skipping most of its pages...
+
+For many people, monitoring is about:
+
+- Detecting outages
+- Capacity planning
+
+However, **slowdowns are 10 times more common** compared to outages (check slide 14 of [Online Performance is Business Performance ](https://www.slideshare.net/KenGodskind/alertsitetrac) reported by Trac Research/AlertSite). Designing a monitoring system targeting only outages and capacity planning solves just a tiny part of the operational problems we face. Check also [Downtime vs. Slowtime: Which Hurts More?](https://dzone.com/articles/downtime-vs-slowtime-which-hurts-more).
+
+To troubleshoot a slowdown, a lot more metrics are needed. Actually all the metrics are needed, since the real cause of a slowdown is most probably quite complex. If we knew the possible reasons, chances are we would have fixed them before they become a problem.
+
+## What do others do?
+
+Most monitoring solutions, when they are able to detect something, provide just a hint (e.g. "hey, there is a 20% drop in requests per second over the last minute") and they expect us to use the console for determining the root cause.
+
+Of course this introduces a lot more problems: how to troubleshoot a slowdown using the console, if the slowdown lifetime is just a few seconds, randomly spread throughout the day?
+
+You can't! You will spend your entire day on the console, waiting for the problem to happen again while you are logged in. A blame war starts: developers blame the systems, sysadmins blame the hosting provider, someone says it is a DNS problem, another one believes it is network related, etc. We have all experienced this, multiple times...
+
+So, why do monitoring solutions and SaaS providers filter out metrics?
+
+They can't do otherwise!
+
+1. Centralization of metrics depends on metrics filtering, to control monitoring costs. Time-series databases limit the number of metrics collected, because the number of metrics influences their performance significantly. They get congested at scale.
+2. It is a lot easier to provide an illusion of monitoring by using a few basic metrics.
+3. Troubleshooting slowdowns is the hardest IT problem to solve, so most solutions just avoid it.
+
+## What does Netdata do?
+
+Netdata collects, stores and visualizes everything, every single metric exposed by systems and applications.
+
+Due to Netdata's distributed nature, the number of metrics collected does not have any noticeable effect on the performance or the cost of the monitoring infrastructure.
+
+Of course, since Netdata is also about [meaningful presentation](meaningful-presentation.md), the number of metrics makes Netdata development slower. We, the Netdata developers, need to have a good understanding of the metrics before adding them into Netdata. We need to organize the metrics, add information related to them, configure alarms for them, so that you, the Netdata users, will have the best out-of-the-box experience and all the information required to kill the console for troubleshooting slowdowns.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Funlimited-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/Makefile.am b/exporting/Makefile.am
new file mode 100644
index 0000000..41fcac0
--- /dev/null
+++ b/exporting/Makefile.am
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ tests \
+ graphite \
+ json \
+ opentsdb \
+ prometheus \
+ aws_kinesis \
+ pubsub \
+ mongodb \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ exporting.conf \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ TIMESCALE.md \
+ WALKTHROUGH.md \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ nc-exporting.sh \
+ $(NULL)
diff --git a/exporting/README.md b/exporting/README.md
new file mode 100644
index 0000000..933de0e
--- /dev/null
+++ b/exporting/README.md
@@ -0,0 +1,312 @@
+<!--
+title: "Exporting engine reference"
+description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
+sidebar_label: Reference guide
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/README.md
+-->
+
+# Exporting engine reference
+
+Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
+configuring, and monitoring Netdata's exporting engine, which allows you to send metrics to external time-series
+databases.
+
+For a quick introduction to the exporting engine's features, read our doc on [exporting metrics to time-series
+databases](/docs/export/external-databases.md), or jump in to [enabling a connector](/docs/export/enable-connector.md).
+
+The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
+the same time. You can have different update intervals and filters configured for every exporting connector instance.
+
+When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
+restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
+
+The exporting engine has its own configuration file `exporting.conf`. The configuration is almost similar to the
+deprecated [backends](/backends/README.md#configuration) system. The most important difference is that type of a
+connector should be specified in a section name before a colon and an instance name after the colon. Also, you can't use
+`host tags` anymore. Set your labels using the [`[host labels]`](/docs/guides/using-host-labels.md) section in
+`netdata.conf`.
+
+Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
+several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
+
+So, although Netdata collects metrics every second, it can send to the external database servers averages or sums every
+X seconds (though, it can send them per second if you need it to).
+
+## Features
+
+1. The exporting engine uses a number of connectors to send Netdata metrics to external time-series databases. See our
+ [list of supported databases](/docs/export/external-databases.md#supported-databases) for information on which
+ connector to enable and configure for your database of choice.
+
+ - [**AWS Kinesis Data Streams**](/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
+ format.
+ - [**Google Cloud Pub/Sub Service**](/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
+ format.
+ - [**Graphite**](/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
+ `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
+ also be configured). Learn more in our guide to [export and visualize Netdata metrics in
+ Graphite](/docs/guides/export/export-netdata-metrics-graphite.md).
+ - [**JSON** document databases](/exporting/json/README.md)
+ - [**OpenTSDB**](/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
+ OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
+ - [**MongoDB**](/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
+ - [**Prometheus**](/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
+ from node using the Netdata API.
+ - [**Prometheus remote write**](/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
+ buffer encoding over HTTP. Supports many [storage
+ providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
+ - [**TimescaleDB**](/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
+ Netdata client and writes them to a TimescaleDB table.
+
+2. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
+
+3. Netdata supports three modes of operation for all exporting connectors:
+
+ - `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
+ So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example,
+ to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+
+ - `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
+ are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but
+ you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization
+ percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage
+ to the external database.
+
+ - `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
+ database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values
+ shown on the Netdata charts will be used.
+
+ Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
+ monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
+ and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
+
+ If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
+ Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
+ simpler. Furthermore, if you use `average`, the charts shown in the external service will match exactly what you
+ see in Netdata, which is not necessarily true for the other modes of operation.
+
+4. This code is smart enough, not to slow down Netdata, independently of the speed of the external database server. You
+ should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they run
+ their batches at the same time. You can set different update intervals for every exporting connector instance, but
+ even in that case they can occasionally synchronize their batches for a moment.
+
+## Configuration
+
+Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little
+different.
+
+You can configure each connector individually using the available [options](#options). The
+`[graphite:my_graphite_instance]` block contains examples of some of these additional options in action.
+
+```conf
+[exporting:global]
+ enabled = yes
+ send configured labels = no
+ send automatic labels = no
+ update every = 10
+
+[prometheus:exporter]
+ send names instead of ids = yes
+ send configured labels = yes
+ end automatic labels = no
+ send charts matching = *
+ send hosts matching = localhost *
+ prefix = netdata
+
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = localhost:2003
+ data source = average
+ prefix = Netdata
+ hostname = my-name
+ update every = 10
+ buffer on failures = 10
+ timeout ms = 20000
+ send charts matching = *
+ send hosts matching = localhost *
+ send names instead of ids = yes
+ send configured labels = yes
+ send automatic labels = yes
+
+[prometheus_remote_write:my_prometheus_remote_write_instance]
+ enabled = yes
+ destination = localhost
+ remote write URL path = /receive
+
+[kinesis:my_kinesis_instance]
+ enabled = yes
+ destination = us-east-1
+ stream name = netdata
+ aws_access_key_id = my_access_key_id
+ aws_secret_access_key = my_aws_secret_access_key
+
+[pubsub:my_pubsub_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/pubsub_credentials.json
+ project id = my_project
+ topic id = my_topic
+
+[mongodb:my_mongodb_instance]
+ enabled = yes
+ destination = localhost
+ database = my_database
+ collection = my_collection
+
+[json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+
+[opentsdb:my_opentsdb_plaintext_instance]
+ enabled = yes
+ destination = localhost:4242
+
+[opentsdb:http:my_opentsdb_http_instance]
+ enabled = yes
+ destination = localhost:4242
+
+[opentsdb:https:my_opentsdb_https_instance]
+ enabled = yes
+ destination = localhost:8082
+```
+
+### Sections
+
+- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
+- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
+ `http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`).
+- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
+ - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
+ prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb,
+ json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers
+ (e.g.: `opentsdb:https`).
+ - `name` can be arbitrary instance name you chose.
+
+### Options
+
+Configure individual connectors and override any global settings with the following options.
+
+- `enabled = yes | no`, enables or disables an exporting connector instance
+
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
+ ports to connect to. Netdata will use the **first available** to send the metrics.
+
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current exporting engine.
+
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
+ separate it from the port.
+
+ `PORT` can be a number of a service name. If omitted, the default port for the exporting connector will be used
+ (graphite = 2003, opentsdb = 4242).
+
+ Example IPv4:
+
+```conf
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+```
+
+ Example IPv6 and IPv4 together:
+
+```conf
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+ Netdata also ships `nc-exporting.sh`, a script that can be used as a fallback exporting connector to save the
+ metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
+ monitor / trace / debug the metrics Netdata generates.
+
+ For the Kinesis exporting connector `destination` should be set to an AWS region (for example, `us-east-1`).
+
+ For the MongoDB exporting connector `destination` should be set to a
+ [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/).
+
+ For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint.
+
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
+ be sent to the external database.
+
+- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
+ is `[global].hostname`.
+
+- `prefix = Netdata`, is the prefix to add to all metrics.
+
+- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
+ randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same
+ database. This randomness does not affect the quality of the data, only the time they are sent.
+
+- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
+ when the external database server is not available. If the server fails to receive the data after that many
+ failures, data loss on the connector instance is expected (Netdata will also log it).
+
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
+ By default this is `2 * update_every * 1000`.
+
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
+ of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
+ `localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central
+ Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named
+ `*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first
+ pattern matching the hostname will be used - positive or negative).
+
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
+ within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
+ gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
+ apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
+ positive or negative).
+
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system
+ and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
+ cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+- `send configured labels = yes | no` controls if labels defined in the `[host labels]` section in `netdata.conf`
+ should be sent to the external database
+
+- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
+ should be sent to the external database
+
+> Starting from Netdata v1.20 the host tags (defined in the `[backend]` section of `netdata.conf`) are parsed in
+> accordance with a configured backend type and stored as host labels so that they can be reused in API responses and
+> exporting connectors. The parsing is supported for graphite, json, opentsdb, and prometheus (default) backend types.
+> You can check how the host tags were parsed using the /api/v1/info API call. But, keep in mind that backends subsystem
+> is deprecated and will be deleted soon. Please move your existing tags to the `[host labels]` section.
+
+## HTTPS
+
+Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
+them does not support encrypted connections, so you will have to configure a reverse proxy to enable
+HTTPS communication between Netdata and an external database. You can set up a reverse proxy with
+[Nginx](/docs/Running-behind-nginx.md).
+
+## Exporting engine monitoring
+
+Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health
+and performance of the exporting engine itself:
+
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+ external database server.
+
+2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
+
+3. **Exporting operations**, the number of operations performed by Netdata.
+
+4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
+ the metrics to the external database server.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
+
+## Exporting engine alarms
+
+Netdata adds 3 alarms:
+
+1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
+2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
+3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/TIMESCALE.md b/exporting/TIMESCALE.md
new file mode 100644
index 0000000..c98003e
--- /dev/null
+++ b/exporting/TIMESCALE.md
@@ -0,0 +1,69 @@
+<!--
+title: "Writing metrics to TimescaleDB"
+description: "Send Netdata metrics to TimescaleDB for long-term archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/TIMESCALE.md
+sidebar_label: Writing metrics to TimescaleDB
+-->
+
+# Writing metrics to TimescaleDB
+
+Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
+([GitHub](https://github.com/mahlonsmith)/[Website](http://www.martini.nu/)) in particular, Netdata now supports
+archiving metrics directly to TimescaleDB.
+
+What's TimescaleDB? Here's how their team defines the project on their [GitHub page](https://github.com/timescale/timescaledb):
+
+> TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is engineered up from
+> PostgreSQL, providing automatic partitioning across time and space (partitioning key), as well as full SQL support.
+
+## Quickstart
+
+To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
+repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem
+is deprecated and Netdata configuration should be moved to the new `exporting conf` configuration file. Use
+```conf
+[json:my_instance]
+```
+in `exporting.conf` instead of
+```conf
+[backend]
+ type = json
+```
+in `netdata.conf`.
+
+This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
+You'll run this program in parallel with Netdata, and after a short [configuration
+process](https://github.com/mahlonsmith/netdata-timescale-relay#configuration), your metrics should start populating
+TimescaleDB.
+
+Finally, another member of Netdata's community has built a project that quickly launches Netdata, TimescaleDB, and
+Grafana in easy-to-manage Docker containers. Rune Juhl Jacobsen's
+[project](https://github.com/runejuhl/grafana-timescaledb) uses a `Makefile` to create everything, which makes it
+perfect for testing and experimentation.
+
+## Netdata&#8596;TimescaleDB in action
+
+Aside from creating incredible contributions to Netdata, Mahlon works at [LAIKA](https://www.laika.com/), an
+Oregon-based animation studio that's helped create acclaimed films like _Coraline_ and _Kubo and the Two Strings_.
+
+As part of his work to maintain the company's infrastructure of render farms, workstations, and virtual machines, he's
+using Netdata, `netdata-timescale-relay`, and TimescaleDB to store Netdata metrics alongside other data from other
+sources.
+
+> LAIKA is a long-time PostgreSQL user and added TimescaleDB to their infrastructure in 2018 to help manage and store
+> their IT metrics and time-series data. So far, the tool has been in production at LAIKA for over a year and helps them
+> with their use case of time-based logging, where they record over 8 million metrics an hour for netdata content alone.
+
+By archiving Netdata metrics to a database like TimescaleDB, LAIKA can consolidate metrics data from distributed
+machines efficiently. Mahlon can then correlate Netdata metrics with other sources directly in TimescaleDB.
+
+And, because LAIKA will soon be storing years worth of Netdata metrics data in TimescaleDB, they can analyze long-term
+metrics as their films move from concept to final cut.
+
+Read the full blog post from LAIKA at the [TimescaleDB
+blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timescaledb/amp/).
+
+Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
+exporting connection between Netdata and TimescaleDB!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FTIMESCALE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
new file mode 100644
index 0000000..ac17129
--- /dev/null
+++ b/exporting/WALKTHROUGH.md
@@ -0,0 +1,259 @@
+<!--
+title: "Exporting to Netdata, Prometheus, Grafana stack"
+description: "Using Netdata in conjunction with Prometheus and Grafana."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/WALKTHROUGH.md
+sidebar_label: Netdata, Prometheus, Grafana stack
+-->
+
+# Netdata, Prometheus, Grafana stack
+
+## Intro
+
+In this article I will walk you through the basics of getting Netdata, Prometheus and Grafana all working together and
+monitoring your application servers. This article will be using docker on your local workstation. We will be working
+with docker in an ad-hoc way, launching containers that run `/bin/bash` and attaching a TTY to them. I use docker here
+in a purely academic fashion and do not condone running Netdata in a container. I pick this method so individuals
+without cloud accounts or access to VMs can try this out and for it's speed of deployment.
+
+## Why Netdata, Prometheus, and Grafana
+
+Some time ago I was introduced to Netdata by a coworker. We were attempting to troubleshoot python code which seemed to
+be bottlenecked. I was instantly impressed by the amount of metrics Netdata exposes to you. I quickly added Netdata to
+my set of go-to tools when troubleshooting systems performance.
+
+Some time ago, even later, I was introduced to Prometheus. Prometheus is a monitoring application which flips the normal
+architecture around and polls rest endpoints for its metrics. This architectural change greatly simplifies and decreases
+the time necessary to begin monitoring your applications. Compared to current monitoring solutions the time spent on
+designing the infrastructure is greatly reduced. Running a single Prometheus server per application becomes feasible
+with the help of Grafana.
+
+Grafana has been the go to graphing tool for… some time now. It's awesome, anyone that has used it knows it's awesome.
+We can point Grafana at Prometheus and use Prometheus as a data source. This allows a pretty simple overall monitoring
+architecture: Install Netdata on your application servers, point Prometheus at Netdata, and then point Grafana at
+Prometheus.
+
+I'm omitting an important ingredient in this stack in order to keep this tutorial simple and that is service discovery.
+My personal preference is to use Consul. Prometheus can plug into consul and automatically begin to scrape new hosts
+that register a Netdata client with Consul.
+
+At the end of this tutorial you will understand how each technology fits together to create a modern monitoring stack.
+This stack will offer you visibility into your application and systems performance.
+
+## Getting Started - Netdata
+
+To begin let's create our container which we will install Netdata on. We need to run a container, forward the necessary
+port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
+this we want name resolution between the two containers to work. In order to accomplish this we will create a
+user-defined network and attach both containers to this network. The first command we should run is:
+
+```sh
+docker network create --driver bridge netdata-tutorial
+```
+
+With this user-defined network created we can now launch our container we will install Netdata on and point it to this
+network.
+
+```sh
+docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash'
+```
+
+This command creates an interactive tty session (`-it`), gives the container both a name in relation to the docker
+daemon and a hostname (this is so you know what container is which when working in the shells and docker maps hostname
+resolution to this container), forwards the local port 19999 to the container's port 19999 (`-p 19999:19999`), sets the
+command to run (`/bin/bash`) and then chooses the base container images (`centos:latest`). After running this you should
+be sitting inside the shell of the container.
+
+After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
+link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
+any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
+the following command in your container.
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
+```
+
+After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
+localhost if you're doing this on a VM or have the docker container hosted on a machine not on your local system). If
+this is your first time using Netdata I suggest you take a look around. The amount of time I've spent digging through
+`/proc` and calculating my own metrics has been greatly reduced by this tool. Take it all in.
+
+Next I want to draw your attention to a particular endpoint. Navigate to
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
+publishes all the metrics in a format which Prometheus understands. Let's take a look at one of these metrics.
+`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
+metric is representing several things which I will go in more details in the section on Prometheus. For now understand
+that this metric: `netdata_system_cpu_percentage_average` has several labels: (`chart`, `family`, `dimension`). This
+corresponds with the first cpu chart you see on the Netdata dashboard.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
+
+This CHART is called `system.cpu`, The FAMILY is `cpu`, and the DIMENSION we are observing is `system`. You can begin to
+draw links between the charts in Netdata to the Prometheus metrics format in this manner.
+
+## Prometheus
+
+We will be installing Prometheus in a container for purpose of demonstration. While Prometheus does have an official
+container I would like to walk through the install process and setup on a fresh container. This will allow anyone
+reading to migrate this tutorial to a VM or Server of any sort.
+
+Let's start another container in the same fashion as we did the Netdata container.
+
+```sh
+docker run -it --name prometheus --hostname prometheus
+--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
+```
+
+This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
+files later in this tutorial.
+
+```sh
+yum install vim -y
+```
+
+Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
+
+Let's download the latest version and install into your container.
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+
+mkdir /opt/prometheus
+
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+This should get Prometheus installed into the container. Let's test that we can run Prometheus and connect to it's web
+interface.
+
+```sh
+/opt/prometheus/prometheus
+```
+
+Now attempt to go to <http://localhost:9090/>. You should be presented with the Prometheus homepage. This is a good
+point to talk about Prometheus's data model which can be viewed here: <https://prometheus.io/docs/concepts/data_model/>
+As explained we have two key elements in Prometheus metrics. We have the _metric_ and its _labels_. Labels allow for
+granularity between metrics. Let's use our previous example to further explain.
+
+```conf
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
+```
+
+Here our metric is `netdata_system_cpu_percentage_average` and our labels are `chart`, `family`, and `dimension`. The
+last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
+system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
+
+Let's move our attention to Prometheus's configuration. Prometheus gets it config from the file located (in our example)
+at `/opt/prometheus/prometheus.yml`. I won't spend an extensive amount of time going over the configuration values
+documented here: <https://prometheus.io/docs/operating/configuration/>. We will be adding a new job under the
+`scrape_configs`. Let's make the `scrape_configs` section look like this (we can use the DNS name Netdata due to the
+custom user-defined network we created in docker beforehand).
+
+```yaml
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'netdata'
+
+ metrics_path: /api/v1/allmetrics
+ params:
+ format: [ prometheus ]
+
+ static_configs:
+ - targets: ['netdata:19999']
+```
+
+Let's start Prometheus once again by running `/opt/prometheus/prometheus`. If we now navigate to Prometheus at
+<http://localhost:9090/targets> we should see our target being successfully scraped. If we now go back to the
+Prometheus's homepage and begin to type `netdata\_` Prometheus should auto complete metrics it is now scraping.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
+
+Let's now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a
+pointless busy loop. On the shell do the following:
+
+```sh
+[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
+```
+
+Our NetData cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's
+keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes>. We are
+setting out to graph the data in the CPU chart so let's search for `system.cpu` in the metrics page above. We come
+across a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu",
+family "cpu", units "percentage"` followed by the metrics. This is a good start now let us drill down to the specific
+metric we would like to graph.
+
+```conf
+# COMMENT
+netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
+```
+
+Here we learn that the metric name we care about is `netdata_system_cpu_percentage_average` so throw this into
+Prometheus and see what we get. We should see something similar to this (I shut off my busy loop)
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.47.53%20PM.png)
+
+This is a good step toward what we want. Also make note that Prometheus will tag on an `instance` label for us which
+corresponds to our statically defined job in the configuration file. This allows us to tailor our queries to specific
+instances. Now we need to isolate the dimension we want in our query. To do this let us refine the query slightly. Let's
+query the dimension also. Place this into our query text box.
+`netdata_system_cpu_percentage_average{dimension="system"}` We now wind up with the following graph.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png)
+
+Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from NetData by using
+the `chart` dimension. If you'd like you can combine the `chart` and `instance` dimension to create per-instance charts.
+Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
+
+This is the basics of using Prometheus to query NetData. I'd advise everyone at this point to read [this
+page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from
+its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
+this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
+obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
+`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
+metrics returned by NetData's internal database (not specifying any source= URL parameter) then use that. If you find
+limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
+chart.
+
+## Grafana
+
+Finally we make it to grafana. This is the easiest part in my opinion. This time we will actually run the official
+grafana docker container as all configuration we need to do is done via the GUI. Let's run the following command:
+
+```sh
+docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
+```
+
+This will get grafana running at <http://localhost:3000/>. Let's go there and
+login using the credentials Admin:Admin.
+
+The first thing we want to do is click "Add data source". Let's make it look like the following screenshot
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.36.55%20PM.png)
+
+With this completed let's graph! Create a new Dashboard by clicking on the top left Grafana Icon and create a new graph
+in that dashboard. Fill in the query like we did above and save.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.39.38%20PM.png)
+
+## Conclusion
+
+There you have it, a complete systems monitoring stack which is very easy to deploy. From here I would begin to
+understand how Prometheus and a service discovery mechanism such as Consul can play together nicely. My current prod
+deployments automatically register Netdata services into Consul and Prometheus automatically begins to scrape them. Once
+achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
+happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
+monitoring.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/aws_kinesis/Makefile.am b/exporting/aws_kinesis/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/exporting/aws_kinesis/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/aws_kinesis/README.md b/exporting/aws_kinesis/README.md
new file mode 100644
index 0000000..299fec5
--- /dev/null
+++ b/exporting/aws_kinesis/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Export metrics to AWS Kinesis Data Streams"
+description: "Archive your Agent's metrics to AWS Kinesis Data Streams for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/README.md
+sidebar_label: AWS Kinesis Data Streams
+-->
+
+# Export metrics to AWS Kinesis Data Streams
+
+## Prerequisites
+
+To use AWS Kinesis for metric collecting and processing, you should first
+[install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++.
+`libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata
+should be re-installed from the source. The installer will detect that the required libraries are now available.
+
+If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY=kinesis`. Otherwise, the
+build process could take a very long time. Note, that the default installation path for the libraries is
+`/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is
+advisable to use the following options to `cmake` while building the AWS SDK:
+
+```sh
+sudo cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis <aws-sdk-cpp sources>
+```
+
+The `-DCMAKE_INSTALL_PREFIX=/usr` option also ensures that
+[third party dependencies](https://github.com/aws/aws-sdk-cpp#third-party-dependencies) are installed in your system
+during the SDK build process.
+
+## Configuration
+
+To enable data sending to the Kinesis service, run `./edit-config exporting.conf` in the Netdata configuration directory
+and set the following options:
+
+```conf
+[kinesis:my_instance]
+ enabled = yes
+ destination = us-east-1
+```
+
+Set the `destination` option to an AWS region.
+
+Set AWS credentials and stream name:
+
+```conf
+ # AWS credentials
+ aws_access_key_id = your_access_key_id
+ aws_secret_access_key = your_secret_access_key
+ # destination stream
+ stream name = your_stream_name
+```
+
+Alternatively, you can set AWS credentials for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
+
+Netdata automatically computes a partition key for every record with the purpose to distribute records across
+available shards evenly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Faws_kinesis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c
new file mode 100644
index 0000000..036afb4
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "aws_kinesis.h"
+
+/**
+ * Clean AWS Kinesis *
+ */
+void aws_kinesis_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+ kinesis_shutdown(instance->connector_specific_data);
+
+ freez(instance->connector_specific_data);
+
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ if (connector_specific_config) {
+ freez(connector_specific_config->auth_key_id);
+ freez(connector_specific_config->secure_key);
+ freez(connector_specific_config->stream_name);
+
+ freez(connector_specific_config);
+ }
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+}
+
+/**
+ * Initialize AWS Kinesis connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_aws_kinesis_instance(struct instance *instance)
+{
+ instance->worker = aws_kinesis_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = NULL;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ if (!instance->engine->aws_sdk_initialized) {
+ aws_sdk_init();
+ instance->engine->aws_sdk_initialized = 1;
+ }
+
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ if (!strcmp(connector_specific_config->stream_name, "")) {
+ error("stream name is a mandatory Kinesis parameter but it is not configured");
+ return 1;
+ }
+
+ kinesis_init(
+ (void *)connector_specific_data,
+ instance->config.destination,
+ connector_specific_config->auth_key_id,
+ connector_specific_config->secure_key,
+ instance->config.timeoutms);
+
+ return 0;
+}
+
+/**
+ * AWS Kinesis connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void aws_kinesis_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct aws_kinesis_specific_data *connector_specific_data = instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ unsigned long long partition_key_seq = 0;
+ struct stats *stats = &instance->stats;
+
+ uv_mutex_lock(&instance->mutex);
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ size_t buffer_len = buffer_strlen(buffer);
+
+ stats->buffered_bytes = buffer_len;
+
+ size_t sent = 0;
+
+ while (sent < buffer_len) {
+ char partition_key[KINESIS_PARTITION_KEY_MAX + 1];
+ snprintf(partition_key, KINESIS_PARTITION_KEY_MAX, "netdata_%llu", partition_key_seq++);
+ size_t partition_key_len = strnlen(partition_key, KINESIS_PARTITION_KEY_MAX);
+
+ const char *first_char = buffer_tostring(buffer) + sent;
+
+ size_t record_len = 0;
+
+ // split buffer into chunks of maximum allowed size
+ if (buffer_len - sent < KINESIS_RECORD_MAX - partition_key_len) {
+ record_len = buffer_len - sent;
+ } else {
+ record_len = KINESIS_RECORD_MAX - partition_key_len;
+ while (record_len && *(first_char + record_len - 1) != '\n')
+ record_len--;
+ }
+ char error_message[ERROR_LINE_MAX + 1] = "";
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, \
+ buffer = %zu, record = %zu",
+ instance->config.destination,
+ connector_specific_config->auth_key_id,
+ connector_specific_config->secure_key,
+ connector_specific_config->stream_name,
+ partition_key,
+ buffer_len,
+ record_len);
+
+ kinesis_put_record(
+ connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len);
+
+ sent += record_len;
+ stats->transmission_successes++;
+
+ size_t sent_bytes = 0, lost_bytes = 0;
+
+ if (unlikely(kinesis_get_result(
+ connector_specific_data->request_outcomes, error_message, &sent_bytes, &lost_bytes))) {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", error_message);
+ error(
+ "EXPORTING: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_bytes += lost_bytes;
+
+ // estimate the number of lost metrics
+ stats->lost_metrics += (collected_number)(
+ stats->buffered_metrics *
+ (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1));
+
+ break;
+ } else {
+ stats->receptions++;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+ }
+
+ stats->sent_bytes += sent;
+ if (likely(sent == buffer_len))
+ stats->sent_metrics = stats->buffered_metrics;
+
+ buffer_flush(buffer);
+
+ send_internal_metrics(instance);
+
+ stats->buffered_metrics = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ aws_kinesis_cleanup(instance);
+}
diff --git a/exporting/aws_kinesis/aws_kinesis.h b/exporting/aws_kinesis/aws_kinesis.h
new file mode 100644
index 0000000..d88a458
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_KINESIS_H
+#define NETDATA_EXPORTING_KINESIS_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include "aws_kinesis_put_record.h"
+
+#define KINESIS_PARTITION_KEY_MAX 256
+#define KINESIS_RECORD_MAX 1024 * 1024
+
+int init_aws_kinesis_instance(struct instance *instance);
+void aws_kinesis_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_KINESIS_H
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.cc b/exporting/aws_kinesis/aws_kinesis_put_record.cc
new file mode 100644
index 0000000..b20ec13
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis_put_record.cc
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <aws/core/Aws.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/kinesis/KinesisClient.h>
+#include <aws/kinesis/model/PutRecordRequest.h>
+#include "aws_kinesis_put_record.h"
+
+using namespace Aws;
+
+static SDKOptions options;
+
+struct request_outcome {
+ Kinesis::Model::PutRecordOutcomeCallable future_outcome;
+ size_t data_len;
+};
+
+/**
+ * Initialize AWS SDK API
+ */
+void aws_sdk_init()
+{
+ InitAPI(options);
+}
+
+/**
+ * Shutdown AWS SDK API
+ */
+void aws_sdk_shutdown()
+{
+ ShutdownAPI(options);
+}
+
+/**
+ * Initialize a client and a data structure for request outcomes
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param region AWS region.
+ * @param access_key_id AWS account access key ID.
+ * @param secret_key AWS account secret access key.
+ * @param timeout communication timeout.
+ */
+void kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+
+ Client::ClientConfiguration config;
+
+ config.region = region;
+ config.requestTimeoutMs = timeout;
+ config.connectTimeoutMs = timeout;
+
+ Kinesis::KinesisClient *client;
+
+ if (access_key_id && *access_key_id && secret_key && *secret_key) {
+ client = New<Kinesis::KinesisClient>("client", Auth::AWSCredentials(access_key_id, secret_key), config);
+ } else {
+ client = New<Kinesis::KinesisClient>("client", config);
+ }
+ kinesis_specific_data->client = (void *)client;
+
+ Vector<request_outcome> *request_outcomes;
+
+ request_outcomes = new Vector<request_outcome>;
+ kinesis_specific_data->request_outcomes = (void *)request_outcomes;
+}
+
+/**
+ * Deallocate Kinesis specific data
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ */
+void kinesis_shutdown(void *kinesis_specific_data_p)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+
+ Delete((Kinesis::KinesisClient *)kinesis_specific_data->client);
+ delete (Vector<request_outcome> *)kinesis_specific_data->request_outcomes;
+}
+
+/**
+ * Send data to the Kinesis service
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param stream_name the name of a stream to send to.
+ * @param partition_key a partition key which automatically maps data to a specific stream.
+ * @param data a data buffer to send to the stream.
+ * @param data_len the length of the data buffer.
+ */
+void kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+ Kinesis::Model::PutRecordRequest request;
+
+ request.SetStreamName(stream_name);
+ request.SetPartitionKey(partition_key);
+ request.SetData(Utils::ByteBuffer((unsigned char *)data, data_len));
+
+ ((Vector<request_outcome> *)(kinesis_specific_data->request_outcomes))->push_back(
+ { ((Kinesis::KinesisClient *)(kinesis_specific_data->client))->PutRecordCallable(request), data_len });
+}
+
+/**
+ * Get results from service responces
+ *
+ * @param request_outcomes_p request outcome information.
+ * @param error_message report error message to a caller.
+ * @param sent_bytes report to a caller how many bytes was successfuly sent.
+ * @param lost_bytes report to a caller how many bytes was lost during transmission.
+ * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission
+ */
+int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes)
+{
+ Vector<request_outcome> *request_outcomes = (Vector<request_outcome> *)request_outcomes_p;
+ Kinesis::Model::PutRecordOutcome outcome;
+ *sent_bytes = 0;
+ *lost_bytes = 0;
+
+ for (auto request_outcome = request_outcomes->begin(); request_outcome != request_outcomes->end();) {
+ std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100));
+
+ if (status == std::future_status::ready || status == std::future_status::deferred) {
+ outcome = request_outcome->future_outcome.get();
+ *sent_bytes += request_outcome->data_len;
+
+ if (!outcome.IsSuccess()) {
+ *lost_bytes += request_outcome->data_len;
+ outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX);
+ }
+
+ request_outcomes->erase(request_outcome);
+ } else {
+ ++request_outcome;
+ }
+ }
+
+ if (*lost_bytes) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.h b/exporting/aws_kinesis/aws_kinesis_put_record.h
new file mode 100644
index 0000000..321baf6
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis_put_record.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
+#define NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
+
+#define ERROR_LINE_MAX 1023
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct aws_kinesis_specific_data {
+ void *client;
+ void *request_outcomes;
+};
+
+void aws_sdk_init();
+void aws_sdk_shutdown();
+
+void kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout);
+void kinesis_shutdown(void *client);
+
+void kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len);
+
+int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
diff --git a/exporting/check_filters.c b/exporting/check_filters.c
new file mode 100644
index 0000000..cfe0b4c
--- /dev/null
+++ b/exporting/check_filters.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Check if the connector instance should export the host metrics
+ *
+ * @param instance an exporting connector instance.
+ * @param host a data collecting host.
+ * @return Returns 1 if the connector instance should export the host metrics
+ */
+int rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
+{
+ if (host->exporting_flags == NULL)
+ host->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t));
+
+ RRDHOST_FLAGS *flags = &host->exporting_flags[instance->index];
+
+ if (unlikely((*flags & (RRDHOST_FLAG_BACKEND_SEND | RRDHOST_FLAG_BACKEND_DONT_SEND)) == 0)) {
+ char *host_name = (host == localhost) ? "localhost" : host->hostname;
+
+ if (!instance->config.hosts_pattern || simple_pattern_matches(instance->config.hosts_pattern, host_name)) {
+ *flags |= RRDHOST_FLAG_BACKEND_SEND;
+ info("enabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
+ } else {
+ *flags |= RRDHOST_FLAG_BACKEND_DONT_SEND;
+ info("disabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
+ }
+ }
+
+ if (likely(*flags & RRDHOST_FLAG_BACKEND_SEND))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * Check if the connector instance should export the chart
+ *
+ * @param instance an exporting connector instance.
+ * @param st a chart.
+ * @return Returns 1 if the connector instance should export the chart
+ */
+int rrdset_is_exportable(struct instance *instance, RRDSET *st)
+{
+ RRDHOST *host = st->rrdhost;
+
+ if (st->exporting_flags == NULL)
+ st->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t));
+
+ RRDSET_FLAGS *flags = &st->exporting_flags[instance->index];
+
+ if(unlikely(*flags & RRDSET_FLAG_BACKEND_IGNORE))
+ return 0;
+
+ if(unlikely(!(*flags & RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if(simple_pattern_matches(instance->config.charts_pattern, st->id) || simple_pattern_matches(instance->config.charts_pattern, st->name))
+ *flags |= RRDSET_FLAG_BACKEND_SEND;
+ else {
+ *flags |= RRDSET_FLAG_BACKEND_IGNORE;
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
+ return 0;
+ }
+ }
+
+ if(unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
+ return 0;
+ }
+
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/exporting/clean_connectors.c b/exporting/clean_connectors.c
new file mode 100644
index 0000000..890e8da
--- /dev/null
+++ b/exporting/clean_connectors.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Clean the instance config.
+ *
+ * @param config an instance config structure.
+ */
+static void clean_instance_config(struct instance_config *config)
+{
+ if(!config)
+ return;
+
+ freez((void *)config->type_name);
+ freez((void *)config->name);
+ freez((void *)config->destination);
+ freez((void *)config->prefix);
+ freez((void *)config->hostname);
+
+ simple_pattern_free(config->charts_pattern);
+
+ simple_pattern_free(config->hosts_pattern);
+}
+
+/**
+ * Clean the allocated variables
+ *
+ * @param instance an instance data structure.
+ */
+void clean_instance(struct instance *instance)
+{
+ clean_instance_config(&instance->config);
+ buffer_free(instance->labels);
+
+ uv_cond_destroy(&instance->cond_var);
+ // uv_mutex_destroy(&instance->mutex);
+}
+
+/**
+ * Clean up a simple connector instance on Netdata exit
+ *
+ * @param instance an instance data structure.
+ */
+void simple_connector_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+
+ buffer_free(instance->buffer);
+ buffer_free(simple_connector_data->buffer);
+ buffer_free(simple_connector_data->header);
+
+ struct simple_connector_buffer *next_buffer = simple_connector_data->first_buffer;
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct simple_connector_buffer *current_buffer = next_buffer;
+ next_buffer = next_buffer->next;
+
+ buffer_free(current_buffer->header);
+ buffer_free(current_buffer->buffer);
+ freez(current_buffer);
+ }
+
+#ifdef ENABLE_HTTPS
+ if (simple_connector_data->conn)
+ SSL_free(simple_connector_data->conn);
+#endif
+
+ freez(simple_connector_data);
+
+ struct simple_connector_config *simple_connector_config =
+ (struct simple_connector_config *)instance->config.connector_specific_config;
+ freez(simple_connector_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
diff --git a/exporting/exporting.conf b/exporting/exporting.conf
new file mode 100644
index 0000000..c2e902c
--- /dev/null
+++ b/exporting/exporting.conf
@@ -0,0 +1,89 @@
+[exporting:global]
+ enabled = no
+ # send configured labels = yes
+ # send automatic labels = no
+ # update every = 10
+
+[prometheus:exporter]
+ # data source = average
+ # send names instead of ids = yes
+ # send configured labels = yes
+ # send automatic labels = no
+ # send charts matching = *
+ # send hosts matching = localhost *
+ # prefix = netdata
+
+# An example configuration for graphite, json, opentsdb exporting connectors
+# [graphite:my_graphite_instance]
+ # enabled = no
+ # destination = localhost
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [prometheus_remote_write:my_prometheus_remote_write_instance]
+ # enabled = no
+ # destination = localhost
+ # remote write URL path = /receive
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [kinesis:my_kinesis_instance]
+ # enabled = no
+ # destination = us-east-1
+ # stream name = netdata
+ # aws_access_key_id = my_access_key_id
+ # aws_secret_access_key = my_aws_secret_access_key
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [pubsub:my_pubsub_instance]
+ # enabled = no
+ # destination = pubsub.googleapis.com
+ # credentials file = /etc/netdata/pubsub_credentials.json
+ # project id = my_project
+ # topic id = my_topic
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [mongodb:my_mongodb_instance]
+ # enabled = no
+ # destination = localhost
+ # database = my_database
+ # collection = my_collection
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
new file mode 100644
index 0000000..6a1320c
--- /dev/null
+++ b/exporting/exporting_engine.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+static struct engine *engine = NULL;
+
+/**
+ * Exporting Clean Engine
+ *
+ * Clean all variables allocated inside engine structure
+ *
+ * @param en a pointer to the strcuture that will be cleaned.
+ */
+static void exporting_clean_engine()
+{
+ if (!engine)
+ return;
+
+#if HAVE_KINESIS
+ if (engine->aws_sdk_initialized)
+ aws_sdk_shutdown();
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (engine->protocol_buffers_initialized)
+ protocol_buffers_shutdown();
+#endif
+
+ //Cleanup web api
+ prometheus_clean_server_root();
+
+ for (struct instance *instance = engine->instance_root; instance;) {
+ struct instance *current_instance = instance;
+ instance = instance->next;
+
+ clean_instance(current_instance);
+ }
+
+ freez((void *)engine->config.hostname);
+ freez(engine);
+}
+
+/**
+ * Clean up the main exporting thread and all connector workers on Netdata exit
+ *
+ * @param ptr thread data.
+ */
+static void exporting_main_cleanup(void *ptr)
+{
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ if (!engine) {
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return;
+ }
+
+ engine->exit = 1;
+
+ int found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->exited) {
+ found++;
+ info("stopping worker for instance %s", instance->config.name);
+ uv_mutex_unlock(&instance->mutex);
+ instance->data_is_ready = 1;
+ uv_cond_signal(&instance->cond_var);
+ } else
+ info("found stopped worker for instance %s", instance->config.name);
+ }
+
+ while (found && max > 0) {
+ max -= step;
+ info("Waiting %d exporting connectors to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->exited)
+ found++;
+ }
+ }
+
+ exporting_clean_engine();
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+/**
+ * Exporting engine main
+ *
+ * The main thread used to control the exporting engine.
+ *
+ * @param ptr a pointer to netdata_static_structure.
+ *
+ * @return It always returns NULL.
+ */
+void *exporting_main(void *ptr)
+{
+ netdata_thread_cleanup_push(exporting_main_cleanup, ptr);
+
+ engine = read_exporting_config();
+ if (!engine) {
+ info("EXPORTING: no exporting connectors configured");
+ goto cleanup;
+ }
+
+ if (init_connectors(engine) != 0) {
+ error("EXPORTING: cannot initialize exporting connectors");
+ send_statistics("EXPORTING_START", "FAIL", "-");
+ goto cleanup;
+ }
+
+ RRDSET *st_main_rusage = NULL;
+ RRDDIM *rd_main_user = NULL;
+ RRDDIM *rd_main_system = NULL;
+ create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system);
+
+ usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while (!netdata_exit) {
+ heartbeat_next(&hb, step_ut);
+ engine->now = now_realtime_sec();
+
+ if (mark_scheduled_instances(engine))
+ prepare_buffers(engine);
+
+ send_main_rusage(st_main_rusage, rd_main_user, rd_main_system);
+
+#ifdef UNIT_TESTING
+ return NULL;
+#endif
+ }
+
+cleanup:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h
new file mode 100644
index 0000000..1d9feb7
--- /dev/null
+++ b/exporting/exporting_engine.h
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_ENGINE_H
+#define NETDATA_EXPORTING_ENGINE_H 1
+
+#include "daemon/common.h"
+
+#include <uv.h>
+
+#define exporter_get(section, name, value) expconfig_get(&exporting_config, section, name, value)
+#define exporter_get_number(section, name, value) expconfig_get_number(&exporting_config, section, name, value)
+#define exporter_get_boolean(section, name, value) expconfig_get_boolean(&exporting_config, section, name, value)
+
+extern struct config exporting_config;
+
+#define EXPORTING_UPDATE_EVERY_OPTION_NAME "update every"
+#define EXPORTING_UPDATE_EVERY_DEFAULT 10
+
+typedef enum exporting_options {
+ EXPORTING_OPTION_NON = 0,
+
+ EXPORTING_SOURCE_DATA_AS_COLLECTED = (1 << 0),
+ EXPORTING_SOURCE_DATA_AVERAGE = (1 << 1),
+ EXPORTING_SOURCE_DATA_SUM = (1 << 2),
+
+ EXPORTING_OPTION_SEND_CONFIGURED_LABELS = (1 << 3),
+ EXPORTING_OPTION_SEND_AUTOMATIC_LABELS = (1 << 4),
+ EXPORTING_OPTION_USE_TLS = (1 << 5),
+
+ EXPORTING_OPTION_SEND_NAMES = (1 << 16)
+} EXPORTING_OPTIONS;
+
+#define EXPORTING_OPTIONS_SOURCE_BITS \
+ (EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_SOURCE_DATA_SUM)
+#define EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) (exporting_options & EXPORTING_OPTIONS_SOURCE_BITS)
+
+#define sending_labels_configured(instance) \
+ (instance->config.options & (EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS))
+
+#define should_send_label(instance, label) \
+ ((instance->config.options & EXPORTING_OPTION_SEND_CONFIGURED_LABELS && \
+ label->label_source == LABEL_SOURCE_NETDATA_CONF) || \
+ (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS && \
+ label->label_source != LABEL_SOURCE_NETDATA_CONF))
+
+typedef enum exporting_connector_types {
+ EXPORTING_CONNECTOR_TYPE_UNKNOWN, // Invalid type
+ EXPORTING_CONNECTOR_TYPE_GRAPHITE, // Send plain text to Graphite
+ EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP, // Send data to Graphite using HTTP API
+ EXPORTING_CONNECTOR_TYPE_JSON, // Send data in JSON format
+ EXPORTING_CONNECTOR_TYPE_JSON_HTTP, // Send data in JSON format using HTTP API
+ EXPORTING_CONNECTOR_TYPE_OPENTSDB, // Send data to OpenTSDB using telnet API
+ EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP, // Send data to OpenTSDB using HTTP API
+ EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // User selected to use Prometheus backend
+ EXPORTING_CONNECTOR_TYPE_KINESIS, // Send message to AWS Kinesis
+ EXPORTING_CONNECTOR_TYPE_PUBSUB, // Send message to Google Cloud Pub/Sub
+ EXPORTING_CONNECTOR_TYPE_MONGODB, // Send data to MongoDB collection
+ EXPORTING_CONNECTOR_TYPE_NUM // Number of backend types
+} EXPORTING_CONNECTOR_TYPE;
+
+struct engine;
+
+struct instance_config {
+ EXPORTING_CONNECTOR_TYPE type;
+ const char *type_name;
+
+ const char *name;
+ const char *destination;
+ const char *prefix;
+ const char *hostname;
+
+ int update_every;
+ int buffer_on_failures;
+ long timeoutms;
+
+ EXPORTING_OPTIONS options;
+ SIMPLE_PATTERN *charts_pattern;
+ SIMPLE_PATTERN *hosts_pattern;
+
+ void *connector_specific_config;
+};
+
+struct simple_connector_config {
+ int default_port;
+};
+
+struct simple_connector_buffer {
+ BUFFER *header;
+ BUFFER *buffer;
+
+ size_t buffered_metrics;
+ size_t buffered_bytes;
+
+ int used;
+
+ struct simple_connector_buffer *next;
+};
+
+struct simple_connector_data {
+ void *connector_specific_data;
+
+ size_t total_buffered_metrics;
+
+ BUFFER *header;
+ BUFFER *buffer;
+ size_t buffered_metrics;
+ size_t buffered_bytes;
+
+ struct simple_connector_buffer *previous_buffer;
+ struct simple_connector_buffer *first_buffer;
+ struct simple_connector_buffer *last_buffer;
+
+#ifdef ENABLE_HTTPS
+ SSL *conn; //SSL connection
+ int flags; //The flags for SSL connection
+#endif
+};
+
+struct prometheus_remote_write_specific_config {
+ char *remote_write_path;
+};
+
+struct aws_kinesis_specific_config {
+ char *stream_name;
+ char *auth_key_id;
+ char *secure_key;
+};
+
+struct pubsub_specific_config {
+ char *credentials_file;
+ char *project_id;
+ char *topic_id;
+};
+
+struct mongodb_specific_config {
+ char *database;
+ char *collection;
+};
+
+struct engine_config {
+ const char *hostname;
+ int update_every;
+};
+
+struct stats {
+ collected_number buffered_metrics;
+ collected_number lost_metrics;
+ collected_number sent_metrics;
+ collected_number buffered_bytes;
+ collected_number lost_bytes;
+ collected_number sent_bytes;
+ collected_number received_bytes;
+ collected_number transmission_successes;
+ collected_number data_lost_events;
+ collected_number reconnects;
+ collected_number transmission_failures;
+ collected_number receptions;
+
+ int initialized;
+
+ RRDSET *st_metrics;
+ RRDDIM *rd_buffered_metrics;
+ RRDDIM *rd_lost_metrics;
+ RRDDIM *rd_sent_metrics;
+
+ RRDSET *st_bytes;
+ RRDDIM *rd_buffered_bytes;
+ RRDDIM *rd_lost_bytes;
+ RRDDIM *rd_sent_bytes;
+ RRDDIM *rd_received_bytes;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_transmission_successes;
+ RRDDIM *rd_data_lost_events;
+ RRDDIM *rd_reconnects;
+ RRDDIM *rd_transmission_failures;
+ RRDDIM *rd_receptions;
+
+ RRDSET *st_rusage;
+ RRDDIM *rd_user;
+ RRDDIM *rd_system;
+};
+
+struct instance {
+ struct instance_config config;
+ void *buffer;
+ void (*worker)(void *instance_p);
+ struct stats stats;
+
+ int scheduled;
+ int disabled;
+ int skip_host;
+ int skip_chart;
+
+ BUFFER *labels;
+
+ time_t after;
+ time_t before;
+
+ uv_thread_t thread;
+ uv_mutex_t mutex;
+ uv_cond_t cond_var;
+ int data_is_ready;
+
+ int (*start_batch_formatting)(struct instance *instance);
+ int (*start_host_formatting)(struct instance *instance, RRDHOST *host);
+ int (*start_chart_formatting)(struct instance *instance, RRDSET *st);
+ int (*metric_formatting)(struct instance *instance, RRDDIM *rd);
+ int (*end_chart_formatting)(struct instance *instance, RRDSET *st);
+ int (*end_host_formatting)(struct instance *instance, RRDHOST *host);
+ int (*end_batch_formatting)(struct instance *instance);
+
+ void (*prepare_header)(struct instance *instance);
+ int (*check_response)(BUFFER *buffer, struct instance *instance);
+
+ void *connector_specific_data;
+
+ size_t index;
+ struct instance *next;
+ struct engine *engine;
+
+ volatile sig_atomic_t exited;
+};
+
+struct engine {
+ struct engine_config config;
+
+ size_t instance_num;
+ time_t now;
+
+ int aws_sdk_initialized;
+ int protocol_buffers_initialized;
+ int mongoc_initialized;
+
+ struct instance *instance_root;
+
+ volatile sig_atomic_t exit;
+};
+
+extern struct instance *prometheus_exporter_instance;
+
+void *exporting_main(void *ptr);
+
+struct engine *read_exporting_config();
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type);
+
+int init_connectors(struct engine *engine);
+void simple_connector_init(struct instance *instance);
+
+int mark_scheduled_instances(struct engine *engine);
+void prepare_buffers(struct engine *engine);
+
+size_t exporting_name_copy(char *dst, const char *src, size_t max_len);
+
+int rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+int rrdset_is_exportable(struct instance *instance, RRDSET *st);
+
+calculated_number exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+
+void start_batch_formatting(struct engine *engine);
+void start_host_formatting(struct engine *engine, RRDHOST *host);
+void start_chart_formatting(struct engine *engine, RRDSET *st);
+void metric_formatting(struct engine *engine, RRDDIM *rd);
+void end_chart_formatting(struct engine *engine, RRDSET *st);
+void end_host_formatting(struct engine *engine, RRDHOST *host);
+void end_batch_formatting(struct engine *engine);
+int flush_host_labels(struct instance *instance, RRDHOST *host);
+int simple_connector_end_batch(struct instance *instance);
+
+int exporting_discard_response(BUFFER *buffer, struct instance *instance);
+void simple_connector_receive_response(int *sock, struct instance *instance);
+void simple_connector_send_buffer(
+ int *sock, int *failures, struct instance *instance, BUFFER *header, BUFFER *buffer, size_t buffered_metrics);
+void simple_connector_worker(void *instance_p);
+
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void send_internal_metrics(struct instance *instance);
+
+extern void clean_instance(struct instance *ptr);
+void simple_connector_cleanup(struct instance *instance);
+
+static inline void disable_instance(struct instance *instance)
+{
+ instance->disabled = 1;
+ instance->scheduled = 0;
+ uv_mutex_unlock(&instance->mutex);
+ error("EXPORTING: Instance %s disabled", instance->config.name);
+}
+
+#include "exporting/prometheus/prometheus.h"
+#include "exporting/opentsdb/opentsdb.h"
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "exporting/prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "exporting/aws_kinesis/aws_kinesis.h"
+#endif
+
+#endif /* NETDATA_EXPORTING_ENGINE_H */
diff --git a/exporting/graphite/Makefile.am b/exporting/graphite/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/exporting/graphite/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/graphite/README.md b/exporting/graphite/README.md
new file mode 100644
index 0000000..a6a25ef
--- /dev/null
+++ b/exporting/graphite/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to Graphite providers"
+description: "Archive your Agent's metrics to a any Graphite database provider for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/graphite/README.md
+sidebar_label: Graphite
+-->
+
+# Export metrics to Graphite providers
+
+You can use the Graphite connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to
+Graphite providers for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to a Graphite database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = localhost:2003
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `graphite:http:my_graphite_instance`,
+`graphite:https:my_graphite_instance`.
+
+The Graphite connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
new file mode 100644
index 0000000..9c09631
--- /dev/null
+++ b/exporting/graphite/graphite.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "graphite.h"
+
+/**
+ * Initialize Graphite connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_graphite_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 2003;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_graphite_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_graphite_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_graphite_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP)
+ instance->prepare_header = graphite_http_prepare_header;
+ else
+ instance->prepare_header = NULL;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for graphite exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Copy a label value and substitute underscores in place of charachters which can't be used in Graphite output
+ *
+ * @param dst a destination string.
+ * @param src a source string.
+ * @param len the maximum number of characters copied.
+ */
+
+void sanitize_graphite_label_value(char *dst, char *src, size_t len)
+{
+ while (*src != '\0' && len) {
+ if (isspace(*src) || *src == ';' || *src == '~')
+ *dst++ = '_';
+ else
+ *dst++ = *src;
+ src++;
+ len--;
+ }
+ *dst = '\0';
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_graphite_plaintext(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_graphite_label_value(value, label->value, CONFIG_MAX_VALUE);
+
+ if (*value) {
+ buffer_strcat(instance->labels, ";");
+ buffer_sprintf(instance->labels, "%s=%s", label->key, value);
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for Graphite connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_graphite_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ instance->buffer,
+ "%s.%s.%s.%s%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ chart_name,
+ dimension_name,
+ (host->tags) ? ";" : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "",
+ rd->last_collected_value,
+ (unsigned long long)rd->last_collected_time.tv_sec);
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for Graphite connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ buffer_sprintf(
+ instance->buffer,
+ "%s.%s.%s.%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n",
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ chart_name,
+ dimension_name,
+ (host->tags) ? ";" : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "",
+ value,
+ (unsigned long long)last_t);
+
+ return 0;
+}
+
+/**
+ * Ppepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void graphite_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/graphite\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
diff --git a/exporting/graphite/graphite.h b/exporting/graphite/graphite.h
new file mode 100644
index 0000000..993c12e
--- /dev/null
+++ b/exporting/graphite/graphite.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_GRAPHITE_H
+#define NETDATA_EXPORTING_GRAPHITE_H
+
+#include "exporting/exporting_engine.h"
+
+int init_graphite_instance(struct instance *instance);
+
+void sanitize_graphite_label_value(char *dst, char *src, size_t len);
+int format_host_labels_graphite_plaintext(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_graphite_plaintext(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM *rd);
+
+void graphite_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_GRAPHITE_H
diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c
new file mode 100644
index 0000000..6aff263
--- /dev/null
+++ b/exporting/init_connectors.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+#include "graphite/graphite.h"
+#include "json/json.h"
+#include "opentsdb/opentsdb.h"
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "aws_kinesis/aws_kinesis.h"
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+#include "pubsub/pubsub.h"
+#endif
+
+#if HAVE_MONGOC
+#include "mongodb/mongodb.h"
+#endif
+
+/**
+ * Initialize connectors
+ *
+ * @param engine an engine data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_connectors(struct engine *engine)
+{
+ engine->now = now_realtime_sec();
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ instance->index = engine->instance_num++;
+ instance->after = engine->now;
+
+ switch (instance->config.type) {
+ case EXPORTING_CONNECTOR_TYPE_GRAPHITE:
+ if (init_graphite_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP:
+ if (init_graphite_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_JSON:
+ if (init_json_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_JSON_HTTP:
+ if (init_json_http_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_OPENTSDB:
+ if (init_opentsdb_telnet_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP:
+ if (init_opentsdb_http_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (init_prometheus_remote_write_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_KINESIS:
+#if HAVE_KINESIS
+ if (init_aws_kinesis_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_PUBSUB:
+#if ENABLE_EXPORTING_PUBSUB
+ if (init_pubsub_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_MONGODB:
+#if HAVE_MONGOC
+ if (init_mongodb_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ default:
+ error("EXPORTING: unknown exporting connector type");
+ return 1;
+ }
+
+ // dispatch the instance worker thread
+ int error = uv_thread_create(&instance->thread, instance->worker, instance);
+ if (error) {
+ error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
+ return 1;
+ }
+ char threadname[NETDATA_THREAD_NAME_MAX + 1];
+ snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
+ uv_thread_set_name_np(instance->thread, threadname);
+
+ send_statistics("EXPORTING_START", "OK", instance->config.type_name);
+ }
+
+ return 0;
+}
+
+/**
+ * Initialize a ring buffer for a simple connector
+ *
+ * @param instance an instance data structure.
+ */
+void simple_connector_init(struct instance *instance)
+{
+ struct simple_connector_data *connector_specific_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+
+ if (connector_specific_data->first_buffer)
+ return;
+
+ connector_specific_data->header = buffer_create(0);
+ connector_specific_data->buffer = buffer_create(0);
+
+ // create a ring buffer
+ struct simple_connector_buffer *first_buffer = NULL;
+
+ if (instance->config.buffer_on_failures < 1)
+ instance->config.buffer_on_failures = 1;
+
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct simple_connector_buffer *current_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+
+ if (!connector_specific_data->first_buffer)
+ first_buffer = current_buffer;
+ else
+ current_buffer->next = connector_specific_data->first_buffer;
+
+ connector_specific_data->first_buffer = current_buffer;
+ }
+
+ first_buffer->next = connector_specific_data->first_buffer;
+ connector_specific_data->last_buffer = connector_specific_data->first_buffer;
+
+ return;
+}
diff --git a/exporting/json/Makefile.am b/exporting/json/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/exporting/json/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/json/README.md b/exporting/json/README.md
new file mode 100644
index 0000000..a0f8472
--- /dev/null
+++ b/exporting/json/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to JSON document databases"
+description: "Archive your Agent's metrics to a JSON document database for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/json/README.md
+sidebar_label: JSON Document Databases
+-->
+
+# Export metrics to JSON document databases
+
+You can use the JSON connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to JSON
+document databases for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to a JSON document database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `json:http:my_json_instance`,
+`json:https:my_json_instance`.
+
+The JSON connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/json/json.c b/exporting/json/json.c
new file mode 100644
index 0000000..f2396ba
--- /dev/null
+++ b/exporting/json/json.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json.h"
+
+/**
+ * Initialize JSON connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ instance->prepare_header = NULL;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Initialize JSON connector instance for HTTP protocol
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_http_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = open_batch_json_http;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = close_batch_json_http;
+
+ instance->prepare_header = json_http_prepare_header;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ buffer_strcat(instance->labels, "\"labels\":{");
+
+ int count = 0;
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(value, label->value, CONFIG_MAX_VALUE);
+ if (count > 0)
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "\"%s\":\"%s\"", label->key, value);
+
+ count++;
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ buffer_strcat(instance->labels, "},");
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if (!tags)
+ tags = "";
+
+ if (*tags) {
+ if (*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ } else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\":\"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\":\"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" COLLECTED_NUMBER_FORMAT ","
+
+ "\"timestamp\":%llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ tags_pre,
+ tags,
+ tags_post,
+ instance->labels ? buffer_tostring(instance->labels) : "",
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ rd->last_collected_value,
+
+ (unsigned long long)rd->last_collected_time.tv_sec);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if (!tags)
+ tags = "";
+
+ if (*tags) {
+ if (*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ } else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\": \"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\": \"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" CALCULATED_NUMBER_FORMAT ","
+
+ "\"timestamp\": %llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ tags_pre,
+ tags,
+ tags_post,
+ instance->labels ? buffer_tostring(instance->labels) : "",
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ value,
+
+ (unsigned long long)last_t);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Open a JSON list for a bach
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int open_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "[\n");
+
+ return 0;
+}
+
+/**
+ * Close a JSON list for a bach and update buffered bytes counter
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int close_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "\n]\n");
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void json_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
diff --git a/exporting/json/json.h b/exporting/json/json.h
new file mode 100644
index 0000000..d916263
--- /dev/null
+++ b/exporting/json/json.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_JSON_H
+#define NETDATA_EXPORTING_JSON_H
+
+#include "exporting/exporting_engine.h"
+
+int init_json_instance(struct instance *instance);
+int init_json_http_instance(struct instance *instance);
+
+int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd);
+
+int open_batch_json_http(struct instance *instance);
+int close_batch_json_http(struct instance *instance);
+
+void json_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_JSON_H
diff --git a/exporting/mongodb/Makefile.am b/exporting/mongodb/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/exporting/mongodb/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/mongodb/README.md b/exporting/mongodb/README.md
new file mode 100644
index 0000000..2934f38
--- /dev/null
+++ b/exporting/mongodb/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Export metrics to MongoDB"
+description: "Archive your Agent's metrics to a MongoDB database for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/mongodb/README.md
+sidebar_label: MongoDB
+-->
+
+# Export metrics to MongoDB
+
+You can use the MongoDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to a
+MongoDB database for long-term storage, further analysis, or correlation with data from other sources.
+
+## Prerequisites
+
+To use MongoDB as an external storage for long-term archiving, you should first
+[install](http://mongoc.org/libmongoc/current/installing.html) `libmongoc` 1.7.0 or higher. Next, re-install Netdata
+from the source, which detects that the required library is now available.
+
+## Configuration
+
+To enable data exporting to a MongoDB database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[mongodb:my_instance]
+ enabled = yes
+ destination = mongodb://<hostname>
+ database = your_database_name
+ collection = your_collection_name
+```
+
+You can find more information about the `destination` string URI format in the MongoDB
+[documentation](https://docs.mongodb.com/manual/reference/connection-string/)
+
+The default socket timeout depends on the exporting connector update interval. The timeout is 500 ms shorter than the
+interval (but not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c
new file mode 100644
index 0000000..44922a2
--- /dev/null
+++ b/exporting/mongodb/mongodb.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define EXPORTING_INTERNALS
+#include "mongodb.h"
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+/**
+ * Initialize MongoDB connector specific data, including a ring buffer
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int mongodb_init(struct instance *instance)
+{
+ struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ mongoc_uri_t *uri;
+ bson_error_t bson_error;
+
+ if (unlikely(!connector_specific_config->collection || !*connector_specific_config->collection)) {
+ error("EXPORTING: collection name is a mandatory MongoDB parameter, but it is not configured");
+ return 1;
+ }
+
+ uri = mongoc_uri_new_with_error(instance->config.destination, &bson_error);
+ if (unlikely(!uri)) {
+ error(
+ "EXPORTING: failed to parse URI: %s. Error message: %s", instance->config.destination, bson_error.message);
+ return 1;
+ }
+
+ int32_t socket_timeout =
+ mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, instance->config.timeoutms);
+ if (!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) {
+ error("EXPORTING: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout);
+ return 1;
+ };
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ connector_specific_data->client = mongoc_client_new_from_uri(uri);
+ if (unlikely(!connector_specific_data->client)) {
+ error("EXPORTING: failed to create a new client");
+ return 1;
+ }
+
+ if (!mongoc_client_set_appname(connector_specific_data->client, "netdata")) {
+ error("EXPORTING: failed to set client appname");
+ };
+
+ connector_specific_data->collection = mongoc_client_get_collection(
+ connector_specific_data->client, connector_specific_config->database, connector_specific_config->collection);
+
+ mongoc_uri_destroy(uri);
+
+ // create a ring buffer
+ struct bson_buffer *first_buffer = NULL;
+
+ if (instance->config.buffer_on_failures < 2)
+ instance->config.buffer_on_failures = 1;
+ else
+ instance->config.buffer_on_failures -= 1;
+
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct bson_buffer *current_buffer = callocz(1, sizeof(struct bson_buffer));
+
+ if (!connector_specific_data->first_buffer)
+ first_buffer = current_buffer;
+ else
+ current_buffer->next = connector_specific_data->first_buffer;
+
+ connector_specific_data->first_buffer = current_buffer;
+ }
+
+ first_buffer->next = connector_specific_data->first_buffer;
+ connector_specific_data->last_buffer = connector_specific_data->first_buffer;
+
+ return 0;
+}
+
+/**
+ * Initialize a MongoDB connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_mongodb_instance(struct instance *instance)
+{
+ instance->worker = mongodb_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = format_batch_mongodb;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for MongoDB exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ struct mongodb_specific_data *connector_specific_data = callocz(1, sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ instance->config.timeoutms =
+ (instance->config.update_every >= 2) ? (instance->engine->config.update_every * MSEC_PER_SEC - 500) : 1000;
+
+ if (!instance->engine->mongoc_initialized) {
+ mongoc_init();
+ instance->engine->mongoc_initialized = 1;
+ }
+
+ if (unlikely(mongodb_init(instance))) {
+ error("EXPORTING: cannot initialize MongoDB exporting connector");
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Free an array of BSON structures
+ *
+ * @param insert an array of documents.
+ * @param documents_inserted the number of documents inserted.
+ */
+void free_bson(bson_t **insert, size_t documents_inserted)
+{
+ size_t i;
+
+ for (i = 0; i < documents_inserted; i++)
+ bson_destroy(insert[i]);
+
+ freez(insert);
+}
+
+/**
+ * Format a batch for the MongoDB connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int format_batch_mongodb(struct instance *instance)
+{
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+ struct stats *stats = &instance->stats;
+
+ bson_t **insert = connector_specific_data->last_buffer->insert;
+ if (insert) {
+ // ring buffer is full, reuse the oldest element
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+ free_bson(insert, connector_specific_data->last_buffer->documents_inserted);
+ connector_specific_data->total_documents_inserted -= connector_specific_data->last_buffer->documents_inserted;
+ stats->buffered_bytes -= connector_specific_data->last_buffer->buffered_bytes;
+ }
+ insert = callocz((size_t)stats->buffered_metrics, sizeof(bson_t *));
+ connector_specific_data->last_buffer->insert = insert;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ char *start = (char *)buffer_tostring(buffer);
+ char *end = start;
+
+ size_t documents_inserted = 0;
+
+ while (*end && documents_inserted <= (size_t)stats->buffered_metrics) {
+ while (*end && *end != '\n')
+ end++;
+
+ if (likely(*end)) {
+ *end = '\0';
+ end++;
+ } else {
+ break;
+ }
+
+ bson_error_t bson_error;
+ insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error);
+
+ if (unlikely(!insert[documents_inserted])) {
+ error(
+ "EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message);
+ free_bson(insert, documents_inserted);
+ return 1;
+ }
+
+ start = end;
+
+ documents_inserted++;
+ }
+
+ stats->buffered_bytes += connector_specific_data->last_buffer->buffered_bytes = buffer_strlen(buffer);
+
+ buffer_flush(buffer);
+
+ // The stats->buffered_metrics is used in the MongoDB batch formatting as a variable for the number
+ // of metrics, added in the current iteration, so we are clearing it here. We will use the
+ // connector_specific_data->total_documents_inserted in the worker to show the statistics.
+ stats->buffered_metrics = 0;
+ connector_specific_data->total_documents_inserted += documents_inserted;
+
+ connector_specific_data->last_buffer->documents_inserted = documents_inserted;
+ connector_specific_data->last_buffer = connector_specific_data->last_buffer->next;
+
+ return 0;
+}
+
+/**
+ * Clean a MongoDB connector instance up
+ *
+ * @param instance an instance data structure.
+ */
+void mongodb_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ mongoc_collection_destroy(connector_specific_data->collection);
+ mongoc_client_destroy(connector_specific_data->client);
+ if (instance->engine->mongoc_initialized) {
+ mongoc_cleanup();
+ instance->engine->mongoc_initialized = 0;
+ }
+
+ buffer_free(instance->buffer);
+
+ struct bson_buffer *next_buffer = connector_specific_data->first_buffer;
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct bson_buffer *current_buffer = next_buffer;
+ next_buffer = next_buffer->next;
+
+ if (current_buffer->insert)
+ free_bson(current_buffer->insert, current_buffer->documents_inserted);
+ freez(current_buffer);
+ }
+
+ freez(connector_specific_data);
+
+ struct mongodb_specific_config *connector_specific_config =
+ (struct mongodb_specific_config *)instance->config.connector_specific_config;
+ freez(connector_specific_config->database);
+ freez(connector_specific_config->collection);
+ freez(connector_specific_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
+
+/**
+ * MongoDB connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void mongodb_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+
+ uv_mutex_lock(&instance->mutex);
+ if (!connector_specific_data->first_buffer->insert ||
+ !connector_specific_data->first_buffer->documents_inserted) {
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+ }
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ bson_t **insert = connector_specific_data->first_buffer->insert;
+ size_t documents_inserted = connector_specific_data->first_buffer->documents_inserted;
+ size_t buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
+
+ connector_specific_data->first_buffer->insert = NULL;
+ connector_specific_data->first_buffer->documents_inserted = 0;
+ connector_specific_data->first_buffer->buffered_bytes = 0;
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+
+ uv_mutex_unlock(&instance->mutex);
+
+ size_t data_size = 0;
+ for (size_t i = 0; i < documents_inserted; i++) {
+ data_size += insert[i]->len;
+ }
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: mongodb_insert(): destination = %s, database = %s, collection = %s, data size = %zu",
+ instance->config.destination,
+ connector_specific_config->database,
+ connector_specific_config->collection,
+ data_size);
+
+ if (likely(documents_inserted != 0)) {
+ bson_error_t bson_error;
+ if (likely(mongoc_collection_insert_many(
+ connector_specific_data->collection,
+ (const bson_t **)insert,
+ documents_inserted,
+ NULL,
+ NULL,
+ &bson_error))) {
+ stats->sent_metrics = documents_inserted;
+ stats->sent_bytes += data_size;
+ stats->transmission_successes++;
+ stats->receptions++;
+ } else {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", bson_error.message);
+ error(
+ "EXPORTING: failed to write data to the database '%s'. "
+ "Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, data_size, 0UL);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_bytes += buffered_bytes;
+ stats->lost_metrics += documents_inserted;
+ }
+ }
+
+ free_bson(insert, documents_inserted);
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ uv_mutex_lock(&instance->mutex);
+
+ stats->buffered_metrics = connector_specific_data->total_documents_inserted;
+
+ send_internal_metrics(instance);
+
+ connector_specific_data->total_documents_inserted -= documents_inserted;
+
+ stats->buffered_metrics = 0;
+ stats->buffered_bytes -= buffered_bytes;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ mongodb_cleanup(instance);
+}
diff --git a/exporting/mongodb/mongodb.h b/exporting/mongodb/mongodb.h
new file mode 100644
index 0000000..f1867b2
--- /dev/null
+++ b/exporting/mongodb/mongodb.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_MONGODB_H
+#define NETDATA_EXPORTING_MONGODB_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include <mongoc.h>
+
+struct bson_buffer {
+ bson_t **insert;
+ size_t documents_inserted;
+ size_t buffered_bytes;
+
+ struct bson_buffer *next;
+};
+
+struct mongodb_specific_data {
+ mongoc_client_t *client;
+ mongoc_collection_t *collection;
+
+ size_t total_documents_inserted;
+
+ struct bson_buffer *first_buffer;
+ struct bson_buffer *last_buffer;
+};
+
+int mongodb_init(struct instance *instance);
+void mongodb_cleanup(struct instance *instance);
+
+int init_mongodb_instance(struct instance *instance);
+int format_batch_mongodb(struct instance *instance);
+void mongodb_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_MONGODB_H
diff --git a/exporting/nc-exporting.sh b/exporting/nc-exporting.sh
new file mode 100755
index 0000000..9c8b08e
--- /dev/null
+++ b/exporting/nc-exporting.sh
@@ -0,0 +1,158 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This is a simple backend database proxy, written in BASH, using the nc command.
+# Run the script without any parameters for help.
+
+MODE="${1}"
+MY_PORT="${2}"
+BACKEND_HOST="${3}"
+BACKEND_PORT="${4}"
+FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
+
+log() {
+ logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
+}
+
+mync() {
+ local ret
+
+ log "Running: nc ${*}"
+ nc "${@}"
+ ret=$?
+
+ log "nc stopped with return code ${ret}."
+
+ return ${ret}
+}
+
+listen_save_replay_forever() {
+ local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
+
+ while true
+ do
+ log "Starting nc to listen on port ${port} and save metrics to ${file}"
+
+ started=$(date +%s)
+ mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
+ ended=$(date +%s)
+
+ if [ -s "${file}" ]
+ then
+ if [ -n "${real_backend_host}" ] && [ -n "${real_backend_port}" ]
+ then
+ log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
+
+ mync "${real_backend_host}" "${real_backend_port}" <"${file}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
+ mv "${file}" "${file}.old"
+ touch "${file}"
+ else
+ log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
+ fi
+ else
+ log "No backend configured - appending more data to ${file}"
+ fi
+ fi
+
+ # prevent a CPU hungry infinite loop
+ # if nc cannot listen to port
+ if [ $((ended - started)) -lt 5 ]
+ then
+ log "nc has been stopped too fast."
+ delay=30
+ else
+ delay=1
+ fi
+
+ log "Waiting ${delay} seconds before listening again for data."
+ sleep ${delay}
+ done
+}
+
+if [ "${MODE}" = "start" ]
+ then
+
+ # start the listener, in exclusive mode
+ # only one can use the same file/port at a time
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
+ exit 2
+ fi
+
+ # save our PID to the lock file
+ echo "$$" >"${FILE}.lock"
+
+ listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
+ ret=$?
+
+ log "listener exited."
+ exit ${ret}
+
+ } 9>>"${FILE}.lock"
+
+ # we can only get here if ${FILE}.lock cannot be created
+ log "Cannot create file ${FILE}."
+ exit 3
+
+elif [ "${MODE}" = "stop" ]
+ then
+
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ pid=$(<"${FILE}".lock)
+ log "Killing process ${pid}..."
+ kill -TERM "-${pid}"
+ exit 0
+ fi
+
+ log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
+ exit 4
+
+ } 9<"${FILE}.lock"
+
+ log "File ${FILE}.lock does not exist. Is a collector running?"
+ exit 5
+
+else
+
+ cat <<EOF
+Usage:
+
+ "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
+
+ PORT The port this script will listen
+ (configure netdata to use this as a second backend)
+
+ BACKEND_HOST The real backend host
+ BACKEND_PORT The real backend port
+
+ This script can act as fallback backend for netdata.
+ It will receive metrics from netdata, save them to
+ ${FILE}
+ and once netdata reconnects to the real-backend, this script
+ will push all metrics collected to the real-backend too and
+ wait for a failure to happen again.
+
+ Only one netdata can connect to this script at a time.
+ If you need fallback for multiple netdata, run this script
+ multiple times with different ports.
+
+ You can run me in the background with this:
+
+ screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
+EOF
+ exit 1
+fi
diff --git a/exporting/opentsdb/Makefile.am b/exporting/opentsdb/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/exporting/opentsdb/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/opentsdb/README.md b/exporting/opentsdb/README.md
new file mode 100644
index 0000000..3765ad2
--- /dev/null
+++ b/exporting/opentsdb/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to OpenTSDB"
+description: "Archive your Agent's metrics to an OpenTSDB database for long-term storage and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/opentsdb/README.md
+sidebar_label: OpenTSDB
+-->
+
+# Export metrics to OpenTSDB
+
+You can use the OpenTSDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to OpenTSDB
+databases for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to an OpenTSDB database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `opentsdb:http:my_opentsdb_instance`,
+`opentsdb:https:my_opentsdb_instance`.
+
+The OpenTSDB connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fopentsdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
new file mode 100644
index 0000000..d7b843d
--- /dev/null
+++ b/exporting/opentsdb/opentsdb.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "opentsdb.h"
+#include "../json/json.h"
+
+/**
+ * Initialize OpenTSDB telnet connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_opentsdb_telnet_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 4242;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_opentsdb_telnet;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_opentsdb_telnet;
+ else
+ instance->metric_formatting = format_dimension_stored_opentsdb_telnet;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ instance->prepare_header = NULL;
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for opentsdb telnet exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Initialize OpenTSDB HTTP connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_opentsdb_http_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 4242;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+ instance->connector_specific_data = connector_specific_data;
+
+ instance->start_batch_formatting = open_batch_json_http;
+ instance->start_host_formatting = format_host_labels_opentsdb_http;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_opentsdb_http;
+ else
+ instance->metric_formatting = format_dimension_stored_opentsdb_http;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = close_batch_json_http;
+
+ instance->prepare_header = opentsdb_http_prepare_header;
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for opentsdb HTTP exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Copy a label value and substitute underscores in place of charachters which can't be used in OpenTSDB output
+ *
+ * @param dst a destination string.
+ * @param src a source string.
+ * @param len the maximum number of characters copied.
+ */
+
+void sanitize_opentsdb_label_value(char *dst, char *src, size_t len)
+{
+ while (*src != '\0' && len) {
+ if (isalpha(*src) || isdigit(*src) || *src == '-' || *src == '_' || *src == '.' || *src == '/' || IS_UTF8_BYTE(*src))
+ *dst++ = *src;
+ else
+ *dst++ = '_';
+ src++;
+ len--;
+ }
+ *dst = '\0';
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_opentsdb_telnet(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_opentsdb_label_value(value, label->value, CONFIG_MAX_VALUE);
+
+ if (*value)
+ buffer_sprintf(instance->labels, " %s=%s", label->key, value);
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for OpenTSDB telnet connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ instance->buffer,
+ "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)rd->last_collected_time.tv_sec,
+ rd->last_collected_value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for OpenTSDB telnet connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ buffer_sprintf(
+ instance->buffer,
+ "put %s.%s.%s %llu " CALCULATED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)last_t,
+ value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Ppepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void opentsdb_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
+
+/**
+ * Format host labels for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_opentsdb_http(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char escaped_value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(escaped_value, label->value, CONFIG_MAX_VALUE);
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_opentsdb_label_value(value, escaped_value, CONFIG_MAX_VALUE);
+
+ if (*value) {
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "\"%s\":\"%s\"", label->key, value);
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"metric\":\"%s.%s.%s\","
+ "\"timestamp\":%llu,"
+ "\"value\":"COLLECTED_NUMBER_FORMAT","
+ "\"tags\":{"
+ "\"host\":\"%s%s%s\"%s"
+ "}"
+ "}",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)rd->last_collected_time.tv_sec,
+ rd->last_collected_value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ instance->labels ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"metric\":\"%s.%s.%s\","
+ "\"timestamp\":%llu,"
+ "\"value\":"CALCULATED_NUMBER_FORMAT","
+ "\"tags\":{"
+ "\"host\":\"%s%s%s\"%s"
+ "}"
+ "}",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)last_t,
+ value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ instance->labels ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
diff --git a/exporting/opentsdb/opentsdb.h b/exporting/opentsdb/opentsdb.h
new file mode 100644
index 0000000..d53a505
--- /dev/null
+++ b/exporting/opentsdb/opentsdb.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_OPENTSDB_H
+#define NETDATA_EXPORTING_OPENTSDB_H
+
+#include "exporting/exporting_engine.h"
+
+int init_opentsdb_telnet_instance(struct instance *instance);
+int init_opentsdb_http_instance(struct instance *instance);
+
+void sanitize_opentsdb_label_value(char *dst, char *src, size_t len);
+int format_host_labels_opentsdb_telnet(struct instance *instance, RRDHOST *host);
+int format_host_labels_opentsdb_http(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *rd);
+
+int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd);
+
+int open_batch_opentsdb_http(struct instance *instance);
+int close_batch_opentsdb_http(struct instance *instance);
+
+void opentsdb_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_OPENTSDB_H
diff --git a/exporting/process_data.c b/exporting/process_data.c
new file mode 100644
index 0000000..5e11b39
--- /dev/null
+++ b/exporting/process_data.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Normalize chart and dimension names
+ *
+ * Substitute '_' for any special character except '.'.
+ *
+ * @param dst where to copy name to.
+ * @param src where to copy name from.
+ * @param max_len the maximum size of copied name.
+ * @return Returns the size of the copied name.
+ */
+size_t exporting_name_copy(char *dst, const char *src, size_t max_len)
+{
+ size_t n;
+
+ for (n = 0; *src && n < max_len; dst++, src++, n++) {
+ char c = *src;
+
+ if (c != '.' && !isalnum(c))
+ *dst = '_';
+ else
+ *dst = c;
+ }
+ *dst = '\0';
+
+ return n;
+}
+
+/**
+ * Mark scheduled instances
+ *
+ * Any instance can have its own update interval. On every exporting engine update only those instances are picked,
+ * which are scheduled for the update.
+ *
+ * @param engine an engine data structure.
+ * @return Returns 1 if there are instances to process
+ */
+int mark_scheduled_instances(struct engine *engine)
+{
+ int instances_were_scheduled = 0;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->disabled && (engine->now % instance->config.update_every >=
+ instance->config.update_every - localhost->rrd_update_every)) {
+ instance->scheduled = 1;
+ instances_were_scheduled = 1;
+ instance->before = engine->now;
+ }
+ }
+
+ return instances_were_scheduled;
+}
+
+/**
+ * Calculate the SUM or AVERAGE of a dimension, for any timeframe
+ *
+ * May return NAN if the database does not have any value in the give timeframe.
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension(metric) in the Netdata database.
+ * @param last_timestamp the timestamp that should be reported to the exporting connector instance.
+ * @return Returns the value, calculated over the given period.
+ */
+calculated_number exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+ time_t after = instance->after;
+ time_t before = instance->before;
+
+ // find the edges of the rrd database for this chart
+ time_t first_t = rd->state->query_ops.oldest_time(rd);
+ time_t last_t = rd->state->query_ops.latest_time(rd);
+ time_t update_every = st->update_every;
+ struct rrddim_query_handle handle;
+ storage_number n;
+
+ // step back a little, to make sure we have complete data collection
+ // for all metrics
+ after -= update_every * 2;
+ before -= update_every * 2;
+
+ // align the time-frame
+ after = after - (after % update_every);
+ before = before - (before % update_every);
+
+ // for before, loose another iteration
+ // the latest point will be reported the next time
+ before -= update_every;
+
+ if (unlikely(after > before))
+ // this can happen when update_every > before - after
+ after = before;
+
+ if (unlikely(after < first_t))
+ after = first_t;
+
+ if (unlikely(before > last_t))
+ before = last_t;
+
+ if (unlikely(before < first_t || after > last_t)) {
+ // the chart has not been updated in the wanted timeframe
+ debug(
+ D_BACKEND,
+ "EXPORTING: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
+ host->hostname,
+ st->id,
+ rd->id,
+ (unsigned long)after,
+ (unsigned long)before,
+ (unsigned long)first_t,
+ (unsigned long)last_t);
+ return NAN;
+ }
+
+ *last_timestamp = before;
+
+ size_t counter = 0;
+ calculated_number sum = 0;
+
+ for (rd->state->query_ops.init(rd, &handle, after, before); !rd->state->query_ops.is_finished(&handle);) {
+ time_t curr_t;
+ n = rd->state->query_ops.next_metric(&handle, &curr_t);
+
+ if (unlikely(!does_storage_number_exist(n))) {
+ // not collected
+ continue;
+ }
+
+ calculated_number value = unpack_storage_number(n);
+ sum += value;
+
+ counter++;
+ }
+ rd->state->query_ops.finalize(&handle);
+ if (unlikely(!counter)) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: %s.%s.%s: no values stored in database for range %lu to %lu",
+ host->hostname,
+ st->id,
+ rd->id,
+ (unsigned long)after,
+ (unsigned long)before);
+ return NAN;
+ }
+
+ if (unlikely(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_SUM))
+ return sum;
+
+ return sum / (calculated_number)counter;
+}
+
+/**
+ * Start batch formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ */
+void start_batch_formatting(struct engine *engine)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ uv_mutex_lock(&instance->mutex);
+ if (instance->start_batch_formatting && instance->start_batch_formatting(instance) != 0) {
+ error("EXPORTING: cannot start batch formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ }
+ }
+}
+
+/**
+ * Start host formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param host a data collecting host.
+ */
+void start_host_formatting(struct engine *engine, RRDHOST *host)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ if (rrdhost_is_exportable(instance, host)) {
+ if (instance->start_host_formatting && instance->start_host_formatting(instance, host) != 0) {
+ error("EXPORTING: cannot start host formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ } else {
+ instance->skip_host = 1;
+ }
+ }
+ }
+}
+
+/**
+ * Start chart formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param st a chart.
+ */
+void start_chart_formatting(struct engine *engine, RRDSET *st)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host) {
+ if (rrdset_is_exportable(instance, st)) {
+ if (instance->start_chart_formatting && instance->start_chart_formatting(instance, st) != 0) {
+ error("EXPORTING: cannot start chart formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ } else {
+ instance->skip_chart = 1;
+ }
+ }
+ }
+}
+
+/**
+ * Format metric for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param rd a dimension(metric) in the Netdata database.
+ */
+void metric_formatting(struct engine *engine, RRDDIM *rd)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host && !instance->skip_chart) {
+ if (instance->metric_formatting && instance->metric_formatting(instance, rd) != 0) {
+ error("EXPORTING: cannot format metric for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ instance->stats.buffered_metrics++;
+ }
+ }
+}
+
+/**
+ * End chart formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param a chart.
+ */
+void end_chart_formatting(struct engine *engine, RRDSET *st)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host && !instance->skip_chart) {
+ if (instance->end_chart_formatting && instance->end_chart_formatting(instance, st) != 0) {
+ error("EXPORTING: cannot end chart formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ }
+ instance->skip_chart = 0;
+ }
+}
+
+/**
+ * End host formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param host a data collecting host.
+ */
+void end_host_formatting(struct engine *engine, RRDHOST *host)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host) {
+ if (instance->end_host_formatting && instance->end_host_formatting(instance, host) != 0) {
+ error("EXPORTING: cannot end host formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ }
+ instance->skip_host = 0;
+ }
+}
+
+/**
+ * End batch formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ */
+void end_batch_formatting(struct engine *engine)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ if (instance->end_batch_formatting && instance->end_batch_formatting(instance) != 0) {
+ error("EXPORTING: cannot end batch formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ uv_mutex_unlock(&instance->mutex);
+ instance->data_is_ready = 1;
+ uv_cond_signal(&instance->cond_var);
+
+ instance->scheduled = 0;
+ instance->after = instance->before;
+ }
+ }
+}
+
+/**
+ * Prepare buffers
+ *
+ * Walk through the Netdata database and fill buffers for every scheduled exporting connector instance according to
+ * configured rules.
+ *
+ * @param engine an engine data structure.
+ */
+void prepare_buffers(struct engine *engine)
+{
+ netdata_thread_disable_cancelability();
+ start_batch_formatting(engine);
+
+ rrd_rdlock();
+ RRDHOST *host;
+ rrdhost_foreach_read(host)
+ {
+ rrdhost_rdlock(host);
+ start_host_formatting(engine, host);
+ RRDSET *st;
+ rrdset_foreach_read(st, host)
+ {
+ rrdset_rdlock(st);
+ start_chart_formatting(engine, st);
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st)
+ metric_formatting(engine, rd);
+
+ end_chart_formatting(engine, st);
+ rrdset_unlock(st);
+ }
+
+ end_host_formatting(engine, host);
+ rrdhost_unlock(host);
+ }
+ rrd_unlock();
+ netdata_thread_enable_cancelability();
+
+ end_batch_formatting(engine);
+}
+
+/**
+ * Flush a buffer with host labels
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int flush_host_labels(struct instance *instance, RRDHOST *host)
+{
+ (void)host;
+
+ if (instance->labels)
+ buffer_flush(instance->labels);
+
+ return 0;
+}
+
+/**
+ * End a batch for a simple connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int simple_connector_end_batch(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct stats *stats = &instance->stats;
+
+ BUFFER *instance_buffer = (BUFFER *)instance->buffer;
+ struct simple_connector_buffer *last_buffer = simple_connector_data->last_buffer;
+
+ if (!last_buffer->buffer) {
+ last_buffer->buffer = buffer_create(0);
+ }
+
+ if (last_buffer->used) {
+ // ring buffer is full, reuse the oldest element
+ simple_connector_data->first_buffer = simple_connector_data->first_buffer->next;
+
+ stats->data_lost_events++;
+ stats->lost_metrics += last_buffer->buffered_metrics;
+ stats->lost_bytes += last_buffer->buffered_bytes;
+ }
+
+ // swap buffers
+ BUFFER *tmp_buffer = last_buffer->buffer;
+ last_buffer->buffer = instance_buffer;
+ instance->buffer = instance_buffer = tmp_buffer;
+
+ buffer_flush(instance_buffer);
+
+ if (last_buffer->header)
+ buffer_flush(last_buffer->header);
+ else
+ last_buffer->header = buffer_create(0);
+
+ if (instance->prepare_header)
+ instance->prepare_header(instance);
+
+ // The stats->buffered_metrics is used in the simple connector batch formatting as a variable for the number
+ // of metrics, added in the current iteration, so we are clearing it here. We will use the
+ // simple_connector_data->total_buffered_metrics in the worker to show the statistics.
+ size_t buffered_metrics = (size_t)stats->buffered_metrics;
+ stats->buffered_metrics = 0;
+
+ size_t buffered_bytes = buffer_strlen(last_buffer->buffer);
+
+ last_buffer->buffered_metrics = buffered_metrics;
+ last_buffer->buffered_bytes = buffered_bytes;
+ last_buffer->used++;
+
+ simple_connector_data->total_buffered_metrics += buffered_metrics;
+ stats->buffered_bytes += buffered_bytes;
+
+ simple_connector_data->last_buffer = simple_connector_data->last_buffer->next;
+
+ return 0;
+}
diff --git a/exporting/prometheus/Makefile.am b/exporting/prometheus/Makefile.am
new file mode 100644
index 0000000..334fca8
--- /dev/null
+++ b/exporting/prometheus/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ remote_write \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md
new file mode 100644
index 0000000..d718a36
--- /dev/null
+++ b/exporting/prometheus/README.md
@@ -0,0 +1,461 @@
+<!--
+title: "Export metrics to Prometheus"
+description: "Export Netdata metrics to Prometheus for archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/prometheus/README.md
+sidebar_label: Using Netdata with Prometheus
+-->
+
+# Using Netdata with Prometheus
+
+> IMPORTANT: the format Netdata sends metrics to Prometheus has changed since Netdata v1.7. The new Prometheus exporting
+> connector for Netdata supports a lot more features and is aligned to the development of the rest of the Netdata
+> exporting connectors.
+
+Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently
+Netdata added support for Prometheus. I'm going to quickly show you how to install both Netdata and Prometheus on the
+same server. We can then use Grafana pointed at Prometheus to obtain long term metrics Netdata offers. I'm assuming we
+are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+
+## Installing Netdata and Prometheus
+
+### Installing Netdata
+
+There are number of ways to install Netdata according to [Installation](/packaging/installer/README.md). The suggested way
+of installing the latest Netdata and keep it upgrade automatically. Using one line installation:
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+At this point we should have Netdata listening on port 19999. Attempt to take your browser here:
+
+```sh
+http://your.netdata.ip:19999
+```
+
+_(replace `your.netdata.ip` with the IP or hostname of the server running Netdata)_
+
+### Installing Prometheus
+
+In order to install Prometheus we are going to introduce our own systemd startup script along with an example of
+prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape
+Netdata's api. Prometheus is always a pull model meaning Netdata is the passive client within this architecture.
+Prometheus always initiates the connection with Netdata.
+
+#### Download Prometheus
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+```
+
+#### Create prometheus system user
+
+```sh
+sudo useradd -r prometheus
+```
+
+#### Create prometheus directory
+
+```sh
+sudo mkdir /opt/prometheus
+sudo chown prometheus:prometheus /opt/prometheus
+```
+
+#### Untar prometheus directory
+
+```sh
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+#### Install prometheus.yml
+
+We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
+
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
+
+```yaml
+# my global config
+global:
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first.rules"
+ # - "second.rules"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['0.0.0.0:9090']
+
+ - job_name: 'netdata-scrape'
+
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ # format: prometheus | prometheus_all_hosts
+ # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
+ format: [prometheus]
+ #
+ # sources: as-collected | raw | average | sum | volume
+ # default is: average
+ #source: [as-collected]
+ #
+ # server name for this prometheus - the default is the client IP
+ # for Netdata to uniquely identify it
+ #server: ['prometheus1']
+ honor_labels: true
+
+ static_configs:
+ - targets: ['{your.netdata.ip}:19999']
+```
+
+#### Install nodes.yml
+
+The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the
+values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and
+add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
+
+```yaml
+groups:
+- name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+```
+
+#### Install prometheus.service
+
+Save this service file as `/etc/systemd/system/prometheus.service`:
+
+```sh
+[Unit]
+Description=Prometheus Server
+AssertPathExists=/opt/prometheus
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/prometheus
+User=prometheus
+Group=prometheus
+ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info
+ExecReload=/bin/kill -SIGHUP $MAINPID
+ExecStop=/bin/kill -SIGINT $MAINPID
+
+[Install]
+WantedBy=multi-user.target
+```
+
+##### Start Prometheus
+
+```sh
+sudo systemctl start prometheus
+sudo systemctl enable prometheus
+```
+
+Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
+
+If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click
+this and click on 'targets' We should see the Netdata host as a scraped target.
+
+---
+
+## Netdata support for Prometheus
+
+> IMPORTANT: the format Netdata sends metrics to Prometheus has changed since Netdata v1.6. The new format allows easier
+> queries for metrics and supports both `as collected` and normalized metrics.
+
+Before explaining the changes, we have to understand the key differences between Netdata and Prometheus.
+
+### understanding Netdata metrics
+
+#### charts
+
+Each chart in Netdata has several properties (common to all its metrics):
+
+- `chart_id` - uniquely identifies a chart.
+
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
+
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
+ have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+
+- `units` is the units for all the metrics attached to the chart.
+
+#### dimensions
+
+Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
+measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
+they are both in the same chart).
+
+### Netdata data source
+
+Netdata can send metrics to Prometheus from 3 data sources:
+
+- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
+ done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
+ Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
+ to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
+
+ Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
+ (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
+ format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+
+- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
+ the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
+ dashboard charts. This is the easiest to work with.
+
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
+ metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
+ time-frame the `average` will be calculated.
+
+ So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
+ To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
+
+ If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
+ server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
+
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
+ other operations are the same with `average`.
+
+ To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
+ e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
+
+ Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
+
+### Querying Metrics
+
+Fetch with your web browser this URL:
+
+`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
+
+_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
+
+Netdata will respond with all the metrics it sends to Prometheus.
+
+If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to Prometheus for this
+chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
+such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+
+Searching for `"system.cpu"` reveals:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
+```
+
+_(Netdata response for `system.cpu` with source=`average`)_
+
+In `average` or `sum` data sources, all values are normalized and are reported to Prometheus as gauges. Now, use the
+'expression' text form in Prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
+that the text form begins to auto-fill as Prometheus knows about this metric.
+
+If the data source was `as collected`, the response would be:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
+```
+
+_(Netdata response for `system.cpu` with source=`as-collected`)_
+
+For more information check Prometheus documentation.
+
+### Streaming data from upstream hosts
+
+The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
+functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
+**prometheus.yml**:
+
+```yaml
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ format: [prometheus_all_hosts]
+ honor_labels: true
+```
+
+This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
+provided.
+
+### Timestamps
+
+To pass the metrics through Prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics
+without timestamps.
+
+## Netdata host variables
+
+Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
+files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to Prometheus by default.
+
+To expose them, append `variables=yes` to the Netdata URL.
+
+### TYPE and HELP
+
+To save bandwidth, and because Prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
+wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
+`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
+
+Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against the Prometheus documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
+
+### Names and IDs
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
+names are human friendly labels (also unique).
+
+Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
+interrupts, QoS classes, statsd synthetic charts, etc.
+
+The default is controlled in `exporting.conf`:
+
+```conf
+[prometheus:exporter]
+ send names instead of ids = yes | no
+```
+
+You can overwrite it from Prometheus, by appending to the URL:
+
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
+
+### Filtering metrics sent to Prometheus
+
+Netdata can filter the metrics it sends to Prometheus with this setting:
+
+```conf
+[prometheus:exporter]
+ send charts matching = *
+```
+
+This settings accepts a space separated list of [simple patterns](/libnetdata/simple_pattern/README.md) to match the
+**charts** to be sent to Prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid).
+Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups
+except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is
+used.
+
+### Changing the prefix of Netdata metrics
+
+Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+
+```conf
+[prometheus:exporter]
+ prefix = netdata
+```
+
+It can also be changed from the URL, by appending `&prefix=netdata`.
+
+### Metric Units
+
+The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
+units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
+
+The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
+before v1.12, append to the URL `&oldunits=yes`
+
+### Accuracy of `average` and `sum` data sources
+
+When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing Prometheus
+metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
+that. This means that Prometheus servers are not losing data when they access Netdata with data source = `average` or
+`sum`.
+
+To uniquely identify each Prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
+is not good enough for identifying a single Prometheus server (e.g. when Prometheus servers are accessing Netdata
+through a web proxy, or when multiple Prometheus servers are NATed to a single IP), each Prometheus may append
+`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each Prometheus server and keep track of
+its last access time.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
new file mode 100644
index 0000000..371f5a5
--- /dev/null
+++ b/exporting/prometheus/prometheus.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define EXPORTINGS_INTERNALS
+#include "prometheus.h"
+
+// ----------------------------------------------------------------------------
+// PROMETHEUS
+// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
+
+/**
+ * Check if a chart can be sent to Prometheus
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+inline int can_send_rrdset(struct instance *instance, RRDSET *st)
+{
+ RRDHOST *host = st->rrdhost;
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
+ return 0;
+
+ if (unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if (simple_pattern_matches(instance->config.charts_pattern, st->id) ||
+ simple_pattern_matches(instance->config.charts_pattern, st->name))
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
+ else {
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
+ st->id,
+ host->hostname);
+ return 0;
+ }
+ }
+
+ if (unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
+ st->id,
+ host->hostname);
+ return 0;
+ }
+
+ if (unlikely(
+ st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
+ !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
+ st->id,
+ host->hostname,
+ rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct prometheus_server {
+ const char *server;
+ uint32_t hash;
+ RRDHOST *host;
+ time_t last_access;
+ struct prometheus_server *next;
+} *prometheus_server_root = NULL;
+
+static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
+
+/**
+ * Clean server root local structure
+ */
+void prometheus_clean_server_root()
+{
+ if (prometheus_server_root) {
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ) {
+ struct prometheus_server *current = ps;
+ ps = ps->next;
+ if(current->server)
+ freez((void *)current->server);
+
+ freez(current);
+ }
+ prometheus_server_root = NULL;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ }
+}
+
+/**
+ * Get the last time when a Prometheus server scraped the Netdata Prometheus exporter.
+ *
+ * @param server the name of the Prometheus server.
+ * @param host a data collecting host.
+ * @param now actual time.
+ * @return Returns the last time when the server accessed Netdata, or 0 if it is the first occurrence.
+ */
+static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now)
+{
+#ifdef UNIT_TESTING
+ return 0;
+#endif
+ uint32_t hash = simple_hash(server);
+
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ps = ps->next) {
+ if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) {
+ time_t last = ps->last_access;
+ ps->last_access = now;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return last;
+ }
+ }
+
+ ps = callocz(1, sizeof(struct prometheus_server));
+ ps->server = strdupz(server);
+ ps->hash = hash;
+ ps->host = host;
+ ps->last_access = now;
+ ps->next = prometheus_server_root;
+ prometheus_server_root = ps;
+
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return 0;
+}
+
+/**
+ * Copy and sanitize name.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize label.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ // make sure we can escape one character without overflowing the buffer
+ usable--;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (unlikely(c == '"' || c == '\\' || c == '\n')) {
+ *d++ = '\\';
+ n++;
+ }
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize units.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @param showoldunits set this flag to 1 to show old (before v1.12) units.
+ * @return Returns the destination string.
+ */
+inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits)
+{
+ const char *sorig = s;
+ char *ret = d;
+ size_t n;
+
+ // Fix for issue 5227
+ if (unlikely(showoldunits)) {
+ static struct {
+ const char *newunit;
+ uint32_t hash;
+ const char *oldunit;
+ } units[] = { { "KiB/s", 0, "kilobytes/s" },
+ { "MiB/s", 0, "MB/s" },
+ { "GiB/s", 0, "GB/s" },
+ { "KiB", 0, "KB" },
+ { "MiB", 0, "MB" },
+ { "GiB", 0, "GB" },
+ { "inodes", 0, "Inodes" },
+ { "percentage", 0, "percent" },
+ { "faults/s", 0, "page faults/s" },
+ { "KiB/operation", 0, "kilobytes per operation" },
+ { "milliseconds/operation", 0, "ms per operation" },
+ { NULL, 0, NULL } };
+ static int initialized = 0;
+ int i;
+
+ if (unlikely(!initialized)) {
+ for (i = 0; units[i].newunit; i++)
+ units[i].hash = simple_hash(units[i].newunit);
+ initialized = 1;
+ }
+
+ uint32_t hash = simple_hash(s);
+ for (i = 0; units[i].newunit; i++) {
+ if (unlikely(hash == units[i].hash && !strcmp(s, units[i].newunit))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ s = units[i].oldunit;
+ sorig = s;
+ break;
+ }
+ }
+ }
+ *d++ = '_';
+ for (n = 1; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+
+ if (n == 2 && sorig[0] == '%') {
+ n = 0;
+ d = ret;
+ s = "_percent";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ } else if (n > 3 && sorig[n - 3] == '/' && sorig[n - 2] == 's') {
+ n = n - 2;
+ d -= 2;
+ s = "_persec";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ }
+
+ *d = '\0';
+
+ return ret;
+}
+
+/**
+ * Format host labels for the Prometheus exporter
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ */
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
+{
+ if (unlikely(!sending_labels_configured(instance)))
+ return;
+
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ int count = 0;
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char key[PROMETHEUS_ELEMENT_MAX + 1];
+ char value[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
+
+ if (*key && *value) {
+ if (count > 0)
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "%s=\"%s\"", key, value);
+ count++;
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+}
+
+struct host_variables_callback_options {
+ RRDHOST *host;
+ BUFFER *wb;
+ EXPORTING_OPTIONS exporting_options;
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ const char *prefix;
+ const char *labels;
+ time_t now;
+ int host_header_printed;
+ char name[PROMETHEUS_VARIABLE_MAX + 1];
+};
+
+/**
+ * Print host variables.
+ *
+ * @param rv a variable.
+ * @param data callback options.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+static int print_host_variables(RRDVAR *rv, void *data)
+{
+ struct host_variables_callback_options *opts = data;
+
+ if (rv->options & (RRDVAR_OPTION_CUSTOM_HOST_VAR | RRDVAR_OPTION_CUSTOM_CHART_VAR)) {
+ if (!opts->host_header_printed) {
+ opts->host_header_printed = 1;
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP) {
+ buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
+ }
+ }
+
+ calculated_number value = rrdvar2number(rv);
+ if (isnan(value) || isinf(value)) {
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP)
+ buffer_sprintf(
+ opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rv->name, (isnan(value)) ? "NAN" : "INF");
+
+ return 0;
+ }
+
+ char *label_pre = "";
+ char *label_post = "";
+ if (opts->labels && *opts->labels) {
+ label_pre = "{";
+ label_post = "}";
+ }
+
+ prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value,
+ opts->now * 1000ULL);
+ else
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT "\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+struct gen_parameters {
+ const char *prefix;
+ char *context;
+ char *suffix;
+
+ char *chart;
+ char *dimension;
+ char *family;
+ char *labels;
+
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ RRDSET *st;
+ RRDDIM *rd;
+
+ const char *relation;
+ const char *type;
+};
+
+/**
+ * Write an as-collected help comment to a buffer.
+ *
+ * @param wb the buffer to write the comment to.
+ * @param p parameters for generating the comment string.
+ * @param homogeneous a flag for homogeneous charts.
+ * @param prometheus_collector a flag for metrics from prometheus collector.
+ */
+static void generate_as_collected_prom_help(BUFFER *wb, struct gen_parameters *p, int homogeneous, int prometheus_collector)
+{
+ buffer_sprintf(wb, "# COMMENT %s_%s", p->prefix, p->context);
+
+ if (!homogeneous)
+ buffer_sprintf(wb, "_%s", p->dimension);
+
+ buffer_sprintf(
+ wb,
+ "%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * ",
+ p->suffix,
+ (p->output_options & PROMETHEUS_OUTPUT_NAMES && p->st->name) ? p->st->name : p->st->id,
+ p->st->context,
+ p->st->family,
+ (p->output_options & PROMETHEUS_OUTPUT_NAMES && p->rd->name) ? p->rd->name : p->rd->id);
+
+ if (prometheus_collector)
+ buffer_sprintf(wb, "1 / 1");
+ else
+ buffer_sprintf(wb, COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT, p->rd->multiplier, p->rd->divisor);
+
+ buffer_sprintf(wb, " %s %s (%s)\n", p->relation, p->st->units, p->type);
+}
+
+/**
+ * Write an as-collected metric to a buffer.
+ *
+ * @param wb the buffer to write the metric to.
+ * @param p parameters for generating the metric string.
+ * @param homogeneous a flag for homogeneous charts.
+ * @param prometheus_collector a flag for metrics from prometheus collector.
+ */
+static void generate_as_collected_prom_metric(BUFFER *wb, struct gen_parameters *p, int homogeneous, int prometheus_collector)
+{
+ buffer_sprintf(wb, "%s_%s", p->prefix, p->context);
+
+ if (!homogeneous)
+ buffer_sprintf(wb, "_%s", p->dimension);
+
+ buffer_sprintf(wb, "%s{chart=\"%s\",family=\"%s\"", p->suffix, p->chart, p->family);
+
+ if (homogeneous)
+ buffer_sprintf(wb, ",dimension=\"%s\"", p->dimension);
+
+ buffer_sprintf(wb, "%s} ", p->labels);
+
+ if (prometheus_collector)
+ buffer_sprintf(
+ wb,
+ CALCULATED_NUMBER_FORMAT,
+ (calculated_number)p->rd->last_collected_value * (calculated_number)p->rd->multiplier /
+ (calculated_number)p->rd->divisor);
+ else
+ buffer_sprintf(wb, COLLECTED_NUMBER_FORMAT, p->rd->last_collected_value);
+
+ if (p->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, " %llu\n", timeval_msec(&p->rd->last_collected_time));
+ else
+ buffer_sprintf(wb, "\n");
+}
+
+/**
+ * Write metrics in Prometheus format to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to fill with metrics.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param allhosts set to 1 if host instance should be in the output for tags.
+ * @param output_options options to configure the format of the output.
+ */
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(
+ struct instance *instance,
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ int allhosts,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ rrdhost_rdlock(host);
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+
+ format_host_labels_prometheus(instance, host);
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ wb,
+ "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
+ hostname,
+ host->program_name,
+ host->program_version,
+ now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(
+ wb,
+ "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
+ hostname,
+ host->program_name,
+ host->program_version);
+
+ char labels[PROMETHEUS_LABELS_MAX + 1] = "";
+ if (allhosts) {
+ if (instance->labels && buffer_tostring(instance->labels)) {
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n",
+ hostname,
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n",
+ hostname,
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+ } else {
+ buffer_sprintf(
+ wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
+ }
+ }
+
+ snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
+ } else {
+ if (instance->labels && buffer_tostring(instance->labels)) {
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags_info{%s} 1 %llu\n",
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags{%s} 1 %llu\n",
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+ } else {
+ buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", buffer_tostring(instance->labels));
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", buffer_tostring(instance->labels));
+ }
+ }
+ }
+
+ if (instance->labels)
+ buffer_flush(instance->labels);
+
+ // send custom variables set for the host
+ if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
+ struct host_variables_callback_options opts = { .host = host,
+ .wb = wb,
+ .labels = (labels[0] == ',') ? &labels[1] : labels,
+ .exporting_options = exporting_options,
+ .output_options = output_options,
+ .prefix = prefix,
+ .now = now_realtime_sec(),
+ .host_header_printed = 0 };
+ foreach_host_variable_callback(host, print_host_variables, &opts);
+ }
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host)
+ {
+
+ if (likely(can_send_rrdset(instance, st))) {
+ rrdset_rdlock(st);
+
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ prometheus_label_copy(
+ chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ int homogeneous = 1;
+ int prometheus_collector = 0;
+ if (as_collected) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if (rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+
+ if (st->module_name && !strcmp(st->module_name, "prometheus"))
+ prometheus_collector = 1;
+ } else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE &&
+ !(output_options & PROMETHEUS_OUTPUT_HIDEUNITS))
+ prometheus_units_copy(
+ units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & PROMETHEUS_OUTPUT_OLDUNITS);
+ }
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(
+ wb,
+ "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n",
+ (homogeneous) ? "homogeneous" : "heterogeneous",
+ (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id,
+ st->context,
+ st->family,
+ st->units);
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st)
+ {
+ if (rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ struct gen_parameters p;
+ p.prefix = prefix;
+ p.context = context;
+ p.suffix = suffix;
+ p.chart = chart;
+ p.dimension = dimension;
+ p.family = family;
+ p.labels = labels;
+ p.output_options = output_options;
+ p.st = st;
+ p.rd = rd;
+
+ if (unlikely(rd->last_collected_time.tv_sec < instance->after))
+ continue;
+
+ p.type = "gauge";
+ p.relation = "gives";
+ if (rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
+ rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ p.type = "counter";
+ p.relation = "delta gives";
+ p.suffix = "_total";
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ generate_as_collected_prom_help(wb, &p, homogeneous, prometheus_collector);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s %s\n", prefix, context, suffix, p.type);
+
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ } else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ generate_as_collected_prom_help(wb, &p, homogeneous, prometheus_collector);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(
+ wb, "# TYPE %s_%s_%s%s %s\n", prefix, context, dimension, suffix, p.type);
+
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ }
+ } else {
+ // we need average or sum of the data
+
+ time_t first_time = instance->after;
+ time_t last_time = instance->before;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_time);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(
+ wb,
+ "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ st->units,
+ (unsigned long long)first_time,
+ (unsigned long long)last_time);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s%s gauge\n", prefix, context, units, suffix);
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ wb,
+ "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT
+ " %llu\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ chart,
+ family,
+ dimension,
+ labels,
+ value,
+ last_time * MSEC_PER_SEC);
+ else
+ buffer_sprintf(
+ wb,
+ "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT
+ "\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ chart,
+ family,
+ dimension,
+ labels,
+ value);
+ }
+ }
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+
+ rrdhost_unlock(host);
+}
+
+/**
+ * Get the last time time when a server accessed Netdata. Write information about an API request to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param exporting_options options to configure what data is exported.
+ * @param server the name of a Prometheus server..
+ * @param now actual time.
+ * @param output_options options to configure the format of the output.
+ * @return Returns the last time when the server accessed Netdata.
+ */
+static inline time_t prometheus_preparation(
+ struct instance *instance,
+ RRDHOST *host,
+ BUFFER *wb,
+ EXPORTING_OPTIONS exporting_options,
+ const char *server,
+ time_t now,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (!server || !*server)
+ server = "default";
+
+ time_t after = prometheus_server_last_access(server, host, now);
+
+ int first_seen = 0;
+ if (!after) {
+ after = now - instance->config.update_every;
+ first_seen = 1;
+ }
+
+ if (after > now) {
+ // oops! this should never happen
+ after = now - instance->config.update_every;
+ }
+
+ if (output_options & PROMETHEUS_OUTPUT_HELP) {
+ char *mode;
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ mode = "as collected";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ mode = "average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_SUM)
+ mode = "sum";
+ else
+ mode = "unknown";
+
+ buffer_sprintf(
+ wb,
+ "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s, time range %lu to %lu\n\n",
+ host->hostname,
+ (first_seen) ? "FIRST SEEN " : "",
+ server,
+ mode,
+ (unsigned long)((first_seen) ? 0 : (now - after)),
+ (first_seen) ? "never" : "seconds ago",
+ (unsigned long)after,
+ (unsigned long)now);
+ }
+
+ return after;
+}
+
+/**
+ * Write metrics and auxiliary information for one host to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ wb,
+ exporting_options,
+ server,
+ prometheus_exporter_instance->before,
+ output_options);
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, wb, prefix, exporting_options, 0, output_options);
+}
+
+/**
+ * Write metrics and auxiliary information for all hosts to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ wb,
+ exporting_options,
+ server,
+ prometheus_exporter_instance->before,
+ output_options);
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host)
+ {
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, wb, prefix, exporting_options, 1, output_options);
+ }
+ rrd_unlock();
+}
diff --git a/exporting/prometheus/prometheus.h b/exporting/prometheus/prometheus.h
new file mode 100644
index 0000000..2f0845c
--- /dev/null
+++ b/exporting/prometheus/prometheus.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_H
+#define NETDATA_EXPORTING_PROMETHEUS_H 1
+
+#include "exporting/exporting_engine.h"
+
+#define PROMETHEUS_ELEMENT_MAX 256
+#define PROMETHEUS_LABELS_MAX 1024
+#define PROMETHEUS_VARIABLE_MAX 256
+
+#define PROMETHEUS_LABELS_MAX_NUMBER 128
+
+typedef enum prometheus_output_flags {
+ PROMETHEUS_OUTPUT_NONE = 0,
+ PROMETHEUS_OUTPUT_HELP = (1 << 0),
+ PROMETHEUS_OUTPUT_TYPES = (1 << 1),
+ PROMETHEUS_OUTPUT_NAMES = (1 << 2),
+ PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
+ PROMETHEUS_OUTPUT_VARIABLES = (1 << 4),
+ PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
+ PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
+} PROMETHEUS_OUTPUT_OPTIONS;
+
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+ EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+ EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+
+int can_send_rrdset(struct instance *instance, RRDSET *st);
+size_t prometheus_name_copy(char *d, const char *s, size_t usable);
+size_t prometheus_label_copy(char *d, const char *s, size_t usable);
+char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits);
+
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host);
+
+extern void prometheus_clean_server_root();
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_H
diff --git a/exporting/prometheus/remote_write/Makefile.am b/exporting/prometheus/remote_write/Makefile.am
new file mode 100644
index 0000000..d049ef4
--- /dev/null
+++ b/exporting/prometheus/remote_write/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ remote_write.pb.cc \
+ remote_write.pb.h \
+ $(NULL)
+
+dist_noinst_DATA = \
+ remote_write.proto \
+ README.md \
+ $(NULL)
diff --git a/exporting/prometheus/remote_write/README.md b/exporting/prometheus/remote_write/README.md
new file mode 100644
index 0000000..fe90102
--- /dev/null
+++ b/exporting/prometheus/remote_write/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "Export metrics to Prometheus remote write providers"
+description: "Send Netdata metrics to your choice of more than 20 external storage providers for long-term archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/prometheus/remote_write/README.md
+sidebar_label: Prometheus remote write
+-->
+
+# Prometheus remote write exporting connector
+
+The Prometheus remote write exporting connector uses the exporting engine to send Netdata metrics to your choice of more
+than 20 external storage providers for long-term archiving and further analysis.
+
+## Prerequisites
+
+To use the Prometheus remote write API with [storage
+providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), install
+[protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries.
+Next, [reinstall Netdata](/packaging/installer/REINSTALL.md), which detects that the required libraries and utilities
+are now available.
+
+## Configuration
+
+To enable data exporting to a storage provider using the Prometheus remote write API, run `./edit-config exporting.conf`
+in the Netdata configuration directory and set the following options:
+
+```conf
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = example.domain:example_port
+ remote write URL path = /receive
+```
+
+You can also add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example:
+`remote_write:https:my_instance`.
+
+`remote write URL path` is used to set an endpoint path for the remote write protocol. The default value is `/receive`.
+For example, if your endpoint is `http://example.domain:example_port/storage/read`:
+
+```conf
+ destination = example.domain:example_port
+ remote write URL path = /storage/read
+```
+
+`buffered` and `lost` dimensions in the Netdata Exporting Connector Data Size operation monitoring chart estimate uncompressed
+buffer size on failures.
+
+## Notes
+
+The remote write exporting connector does not support `buffer on failures`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
new file mode 100644
index 0000000..30bd05a
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "remote_write.h"
+
+static int as_collected;
+static int homogeneous;
+char context[PROMETHEUS_ELEMENT_MAX + 1];
+char chart[PROMETHEUS_ELEMENT_MAX + 1];
+char family[PROMETHEUS_ELEMENT_MAX + 1];
+char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ */
+void prometheus_remote_write_prepare_header(struct instance *instance)
+{
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Accept: */*\r\n"
+ "Content-Encoding: snappy\r\n"
+ "Content-Type: application/x-protobuf\r\n"
+ "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
+ "Content-Length: %zu\r\n"
+ "\r\n",
+ connector_specific_config->remote_write_path,
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+}
+
+/**
+ * Process a responce received after Prometheus remote write connector had sent data
+ *
+ * @param buffer a response from a remote service.
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int process_prometheus_remote_write_response(BUFFER *buffer, struct instance *instance)
+{
+ if (unlikely(!buffer))
+ return 1;
+
+ const char *s = buffer_tostring(buffer);
+ int len = buffer_strlen(buffer);
+
+ // do nothing with HTTP responses 200 or 204
+
+ while (!isspace(*s) && len) {
+ s++;
+ len--;
+ }
+ s++;
+ len--;
+
+ if (likely(len > 4 && (!strncmp(s, "200 ", 4) || !strncmp(s, "204 ", 4))))
+ return 0;
+ else
+ return exporting_discard_response(buffer, instance);
+}
+
+/**
+ * Release specific data allocated.
+ *
+ * @param instance an instance data structure.
+ */
+void clean_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+ freez(simple_connector_data->connector_specific_data);
+
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ freez(connector_specific_config->remote_write_path);
+}
+
+/**
+ * Initialize Prometheus Remote Write connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_prometheus_remote_write_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_prometheus_remote_write;
+ instance->start_chart_formatting = format_chart_prometheus_remote_write;
+ instance->metric_formatting = format_dimension_prometheus_remote_write;
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = NULL;
+ instance->end_batch_formatting = format_batch_prometheus_remote_write;
+
+ instance->prepare_header = prometheus_remote_write_prepare_header;
+ instance->check_response = process_prometheus_remote_write_response;
+
+ instance->buffer = (void *)buffer_create(0);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+
+#ifdef ENABLE_HTTPS
+ simple_connector_data->flags = NETDATA_SSL_START;
+ simple_connector_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_data));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+
+ simple_connector_init(instance);
+
+ connector_specific_data->write_request = init_write_request();
+
+ instance->engine->protocol_buffers_initialized = 1;
+
+ return 0;
+}
+
+/**
+ * Format host data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(
+ hostname,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ PROMETHEUS_ELEMENT_MAX);
+
+ add_host_info(
+ connector_specific_data->write_request,
+ "netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
+
+ if (unlikely(sending_labels_configured(instance))) {
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char key[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
+
+ char value[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
+
+ add_label(connector_specific_data->write_request, key, value);
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+ }
+
+ return 0;
+}
+
+/**
+ * Format chart data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Always returns 0.
+ */
+int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st)
+{
+ prometheus_label_copy(
+ chart,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ homogeneous = 1;
+ if (as_collected) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if (rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+ } else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *rd)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ if (rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+ RRDHOST *host = rd->rrdset->rrdhost;
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if (unlikely(rd->last_collected_time.tv_sec < instance->after)) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending dimension '%s' of chart '%s' from host '%s', "
+ "its last data collection (%lu) is not within our timeframe (%lu to %lu)",
+ rd->id, rd->rrdset->id,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (unsigned long)rd->last_collected_time.tv_sec,
+ (unsigned long)instance->after,
+ (unsigned long)instance->before);
+ return 0;
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ } else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", instance->config.prefix, context, dimension,
+ suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, NULL,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ }
+ } else {
+ // we need average or sum of the data
+
+ time_t last_t = instance->before;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", instance->config.prefix, context, units, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ value, last_t * MSEC_PER_SEC);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Format a batch for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int format_batch_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ size_t data_size = get_write_request_size(connector_specific_data->write_request);
+
+ if (unlikely(!data_size)) {
+ error("EXPORTING: write request size is out of range");
+ return 1;
+ }
+
+ BUFFER *buffer = instance->buffer;
+
+ buffer_need_bytes(buffer, data_size);
+ if (unlikely(pack_and_clear_write_request(connector_specific_data->write_request, buffer->buffer, &data_size))) {
+ error("EXPORTING: cannot pack write request");
+ return 1;
+ }
+ buffer->len = data_size;
+ instance->stats.buffered_bytes = (collected_number)buffer_strlen(buffer);
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
diff --git a/exporting/prometheus/remote_write/remote_write.h b/exporting/prometheus/remote_write/remote_write.h
new file mode 100644
index 0000000..d738f51
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
+#define NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/prometheus/prometheus.h"
+#include "remote_write_request.h"
+
+struct prometheus_remote_write_specific_data {
+ void *write_request;
+};
+
+int init_prometheus_remote_write_instance(struct instance *instance);
+extern void clean_prometheus_remote_write(struct instance *instance);
+
+int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host);
+int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st);
+int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *rd);
+int format_batch_prometheus_remote_write(struct instance *instance);
+
+void prometheus_remote_write_prepare_header(struct instance *instance);
+int process_prometheus_remote_write_response(BUFFER *buffer, struct instance *instance);
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
diff --git a/exporting/prometheus/remote_write/remote_write.proto b/exporting/prometheus/remote_write/remote_write.proto
new file mode 100644
index 0000000..dfde254
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+package prometheus;
+
+option cc_enable_arenas = true;
+
+import "google/protobuf/descriptor.proto";
+
+message WriteRequest {
+ repeated TimeSeries timeseries = 1 [(nullable) = false];
+}
+
+message TimeSeries {
+ repeated Label labels = 1 [(nullable) = false];
+ repeated Sample samples = 2 [(nullable) = false];
+}
+
+message Label {
+ string name = 1;
+ string value = 2;
+}
+
+message Sample {
+ double value = 1;
+ int64 timestamp = 2;
+}
+
+extend google.protobuf.FieldOptions {
+ bool nullable = 65001;
+}
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/exporting/prometheus/remote_write/remote_write_request.cc
new file mode 100644
index 0000000..48d19ef
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write_request.cc
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <snappy.h>
+#include "remote_write.pb.h"
+#include "remote_write_request.h"
+
+using namespace prometheus;
+
+google::protobuf::Arena arena;
+
+/**
+ * Initialize a write request
+ *
+ * @return Returns a new write request
+ */
+void *init_write_request()
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ WriteRequest *write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
+ return (void *)write_request;
+}
+
+/**
+ * Adds information about a host to a write request
+ *
+ * @param write_request_p the write request
+ * @param name the name of a metric which is used for providing the host information
+ * @param instance the name of the host itself
+ * @param application the name of a program which sends the information
+ * @param version the version of the program
+ * @param timestamp the timestamp for the metric in milliseconds
+ */
+void add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ if (application) {
+ label = timeseries->add_labels();
+ label->set_name("application");
+ label->set_value(application);
+ }
+
+ if (version) {
+ label = timeseries->add_labels();
+ label->set_name("version");
+ label->set_value(version);
+ }
+
+ sample = timeseries->add_samples();
+ sample->set_value(1);
+ sample->set_timestamp(timestamp);
+}
+
+/**
+ * Adds a label to the last created timeseries
+ *
+ * @param write_request_p the write request with the timeseries
+ * @param key the key of the label
+ * @param value the value of the label
+ */
+void add_label(void *write_request_p, char *key, char *value)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Label *label;
+
+ timeseries = write_request->mutable_timeseries(write_request->timeseries_size() - 1);
+
+ label = timeseries->add_labels();
+ label->set_name(key);
+ label->set_value(value);
+}
+
+/**
+ * Adds a metric to a write request
+ *
+ * @param write_request_p the write request
+ * @param name the name of the metric
+ * @param chart the chart, the metric belongs to
+ * @param family the family, the metric belongs to
+ * @param dimension the dimension, the metric belongs to
+ * @param instance the name of the host, the metric belongs to
+ * @param value the value of the metric
+ * @param timestamp the timestamp for the metric in milliseconds
+ */
+void add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension, const char *instance,
+ const double value, const int64_t timestamp)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("chart");
+ label->set_value(chart);
+
+ label = timeseries->add_labels();
+ label->set_name("family");
+ label->set_value(family);
+
+ if (dimension) {
+ label = timeseries->add_labels();
+ label->set_name("dimension");
+ label->set_value(dimension);
+ }
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ sample = timeseries->add_samples();
+ sample->set_value(value);
+ sample->set_timestamp(timestamp);
+}
+
+/**
+ * Gets the size of a write request
+ *
+ * @param write_request_p the write request
+ * @return Returns the size of the write request
+ */
+size_t get_write_request_size(void *write_request_p)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+
+#if GOOGLE_PROTOBUF_VERSION < 3001000
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSize());
+#else
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSizeLong());
+#endif
+
+ return (size < INT_MAX) ? size : 0;
+}
+
+/**
+ * Packs a write request into a buffer and clears the request
+ *
+ * @param write_request_p the write request
+ * @param buffer a buffer, where compressed data is written
+ * @param size gets the size of the write request, returns the size of the compressed data
+ * @return Returns 0 on success, 1 on failure
+ */
+int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *size)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ std::string uncompressed_write_request;
+
+ if (write_request->SerializeToString(&uncompressed_write_request) == false)
+ return 1;
+ write_request->clear_timeseries();
+ snappy::RawCompress(uncompressed_write_request.data(), uncompressed_write_request.size(), buffer, size);
+
+ return 0;
+}
+
+/**
+ * Shuts down the Protobuf library
+ */
+void protocol_buffers_shutdown()
+{
+ google::protobuf::ShutdownProtobufLibrary();
+}
diff --git a/exporting/prometheus/remote_write/remote_write_request.h b/exporting/prometheus/remote_write/remote_write_request.h
new file mode 100644
index 0000000..e1dfaca
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write_request.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
+#define NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *init_write_request();
+
+void add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+
+void add_label(void *write_request_p, char *key, char *value);
+
+void add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+
+size_t get_write_request_size(void *write_request_p);
+
+int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *size);
+
+void protocol_buffers_shutdown();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
diff --git a/exporting/pubsub/Makefile.am b/exporting/pubsub/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/exporting/pubsub/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/pubsub/README.md b/exporting/pubsub/README.md
new file mode 100644
index 0000000..6da14c4
--- /dev/null
+++ b/exporting/pubsub/README.md
@@ -0,0 +1,45 @@
+<!--
+title: "Export metrics to Google Cloud Pub/Sub Service"
+description: "Export Netdata metrics to the Google Cloud Pub/Sub Service for long-term archiving or analytical processing."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/pubsub/README.md
+sidebar_label: Google Cloud Pub/Sub Service
+-->
+
+# Export metrics to Google Cloud Pub/Sub Service
+
+## Prerequisites
+
+To use the Pub/Sub service for metric collecting and processing, you should first
+[install](https://github.com/googleapis/cpp-cmakefiles) Google Cloud Platform C++ Proto Libraries.
+Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`. Next,
+Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+
+> You [cannot compile Netdata](https://github.com/netdata/netdata/issues/10193) with Pub/Sub support enabled using
+> `grpc` 1.32 or higher.
+>
+> Some distributions don't have `.cmake` files in packages. To build the C++ Proto Libraries on such distributions we
+> advise you to delete `protobuf`, `protoc`, and `grpc` related packages and
+> [install](https://github.com/grpc/grpc/blob/master/BUILDING.md) `grpc` with its dependencies from source.
+
+## Configuration
+
+To enable data sending to the Pub/Sub service, run `./edit-config exporting.conf` in the Netdata configuration directory
+and set the following options:
+
+```conf
+[pubsub:my_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/google_cloud_credentials.json
+ project id = my_project
+ topic id = my_topic
+```
+
+Set the `destination` option to a Pub/Sub service endpoint. `pubsub.googleapis.com` is the default one.
+
+Next, create the credentials JSON file by following Google Cloud's [authentication guide](https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account). The user running the Agent
+(typically `netdata`) needs read access to `google_cloud_credentials.json`, which you can set with
+`chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`. Set the `credentials file`
+option to the full path of the file.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fpubsub%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/pubsub/pubsub.c b/exporting/pubsub/pubsub.c
new file mode 100644
index 0000000..336a096
--- /dev/null
+++ b/exporting/pubsub/pubsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "pubsub.h"
+
+/**
+ * Initialize Pub/Sub connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_pubsub_instance(struct instance *instance)
+{
+ instance->worker = pubsub_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = NULL;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for Pub/Sub exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ uv_mutex_init(&instance->mutex);
+ uv_cond_init(&instance->cond_var);
+
+ struct pubsub_specific_data *connector_specific_data = callocz(1, sizeof(struct pubsub_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ struct pubsub_specific_config *connector_specific_config =
+ (struct pubsub_specific_config *)instance->config.connector_specific_config;
+ char error_message[ERROR_LINE_MAX + 1] = "";
+ if (pubsub_init(
+ (void *)connector_specific_data, error_message, instance->config.destination,
+ connector_specific_config->credentials_file, connector_specific_config->project_id,
+ connector_specific_config->topic_id)) {
+ error(
+ "EXPORTING: Cannot initialize a Pub/Sub publisher for instance %s: %s",
+ instance->config.name, error_message);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Clean a PubSub connector instance
+ *
+ * @param instance an instance data structure.
+ */
+void clean_pubsub_instance(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct pubsub_specific_data *connector_specific_data =
+ (struct pubsub_specific_data *)instance->connector_specific_data;
+ pubsub_cleanup(connector_specific_data);
+ freez(connector_specific_data);
+
+ buffer_free(instance->buffer);
+
+ struct pubsub_specific_config *connector_specific_config =
+ (struct pubsub_specific_config *)instance->config.connector_specific_config;
+ freez(connector_specific_config->credentials_file);
+ freez(connector_specific_config->project_id);
+ freez(connector_specific_config->topic_id);
+ freez(connector_specific_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
+
+/**
+ * Pub/Sub connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void pubsub_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct pubsub_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct pubsub_specific_data *connector_specific_data = instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+ char error_message[ERROR_LINE_MAX + 1] = "";
+
+ uv_mutex_lock(&instance->mutex);
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ size_t buffer_len = buffer_strlen(buffer);
+
+ stats->buffered_bytes = buffer_len;
+
+ if (pubsub_add_message(instance->connector_specific_data, (char *)buffer_tostring(buffer))) {
+ error("EXPORTING: Instance %s: Cannot add data to a message", instance->config.name);
+
+ stats->data_lost_events++;
+ stats->lost_metrics += stats->buffered_metrics;
+ stats->lost_bytes += buffer_len;
+
+ goto cleanup;
+ }
+
+ debug(
+ D_BACKEND, "EXPORTING: pubsub_publish(): project = %s, topic = %s, buffer = %zu",
+ connector_specific_config->project_id, connector_specific_config->topic_id, buffer_len);
+
+ if (pubsub_publish((void *)connector_specific_data, error_message, stats->buffered_metrics, buffer_len)) {
+ error("EXPORTING: Instance: %s: Cannot publish a message: %s", instance->config.name, error_message);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_metrics += stats->buffered_metrics;
+ stats->lost_bytes += buffer_len;
+
+ goto cleanup;
+ }
+
+ stats->sent_bytes = buffer_len;
+ stats->transmission_successes++;
+
+ size_t sent_metrics = 0, lost_metrics = 0, sent_bytes = 0, lost_bytes = 0;
+
+ if (unlikely(pubsub_get_result(
+ connector_specific_data, error_message, &sent_metrics, &sent_bytes, &lost_metrics, &lost_bytes))) {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", error_message);
+ error(
+ "EXPORTING: failed to write data to service '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, lost_bytes, sent_bytes);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_metrics += lost_metrics;
+ stats->lost_bytes += lost_bytes;
+ } else {
+ stats->receptions++;
+ stats->sent_metrics = sent_metrics;
+ }
+
+ cleanup:
+ send_internal_metrics(instance);
+
+ buffer_flush(buffer);
+ stats->buffered_metrics = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ clean_pubsub_instance(instance);
+}
diff --git a/exporting/pubsub/pubsub.h b/exporting/pubsub/pubsub.h
new file mode 100644
index 0000000..0bcb76f
--- /dev/null
+++ b/exporting/pubsub/pubsub.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PUBSUB_H
+#define NETDATA_EXPORTING_PUBSUB_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include "pubsub_publish.h"
+
+int init_pubsub_instance(struct instance *instance);
+void clean_pubsub_instance(struct instance *instance);
+void pubsub_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_PUBSUB_H
diff --git a/exporting/pubsub/pubsub_publish.cc b/exporting/pubsub/pubsub_publish.cc
new file mode 100644
index 0000000..dc237cf
--- /dev/null
+++ b/exporting/pubsub/pubsub_publish.cc
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <google/pubsub/v1/pubsub.grpc.pb.h>
+#include <grpcpp/grpcpp.h>
+#include <stdexcept>
+#include "pubsub_publish.h"
+
+#define EVENT_CHECK_TIMEOUT 50
+
+struct response {
+ grpc::ClientContext *context;
+ google::pubsub::v1::PublishResponse *publish_response;
+ size_t tag;
+ grpc::Status *status;
+
+ size_t published_metrics;
+ size_t published_bytes;
+};
+
+static inline void copy_error_message(char *error_message_dst, const char *error_message_src)
+{
+ std::strncpy(error_message_dst, error_message_src, ERROR_LINE_MAX);
+ error_message_dst[ERROR_LINE_MAX] = '\0';
+}
+
+/**
+ * Initialize a Pub/Sub client and a data structure for responses.
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param error_message report error message to a caller.
+ * @param destination a Pub/Sub service endpoint.
+ * @param credentials_file a full path for a file with google application credentials.
+ * @param project_id a project ID.
+ * @param topic_id a topic ID.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ setenv("GOOGLE_APPLICATION_CREDENTIALS", credentials_file, 0);
+
+ std::shared_ptr<grpc::ChannelCredentials> credentials = grpc::GoogleDefaultCredentials();
+ if (credentials == nullptr) {
+ copy_error_message(error_message, "Can't load credentials");
+ return 1;
+ }
+
+ std::shared_ptr<grpc::Channel> channel = grpc::CreateChannel(destination, credentials);
+
+ google::pubsub::v1::Publisher::Stub *stub = new google::pubsub::v1::Publisher::Stub(channel);
+ if (!stub) {
+ copy_error_message(error_message, "Can't create a publisher stub");
+ return 1;
+ }
+
+ connector_specific_data->stub = stub;
+
+ google::pubsub::v1::PublishRequest *request = new google::pubsub::v1::PublishRequest;
+ connector_specific_data->request = request;
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))
+ ->set_topic(std::string("projects/") + project_id + "/topics/" + topic_id);
+
+ grpc::CompletionQueue *cq = new grpc::CompletionQueue;
+ connector_specific_data->completion_queue = cq;
+
+ connector_specific_data->responses = new std::list<struct response>;
+
+ return 0;
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Clean the PubSub connector instance specific data
+ */
+void pubsub_cleanup(void *pubsub_specific_data_p)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ std::list<struct response> *responses = (std::list<struct response> *)connector_specific_data->responses;
+ std::list<struct response>::iterator response;
+ for (response = responses->begin(); response != responses->end(); ++response) {
+ // TODO: If we do this, there are a huge amount of possibly lost records. We need to find a right way of
+ // cleaning up contexts
+ // delete response->context;
+ delete response->publish_response;
+ delete response->status;
+ }
+ delete responses;
+
+ ((grpc::CompletionQueue *)connector_specific_data->completion_queue)->Shutdown();
+ delete (grpc::CompletionQueue *)connector_specific_data->completion_queue;
+ delete (google::pubsub::v1::PublishRequest *)connector_specific_data->request;
+ delete (google::pubsub::v1::Publisher::Stub *)connector_specific_data->stub;
+
+ // TODO: Find how to shutdown grpc gracefully. grpc_shutdown() doesn't seem to work.
+ // grpc_shutdown();
+
+ return;
+}
+
+/**
+ * Add data to a Pub/Sub request message.
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param data a text buffer with metrics.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_add_message(void *pubsub_specific_data_p, char *data)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ google::pubsub::v1::PubsubMessage *message =
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))->add_messages();
+ if (!message)
+ return 1;
+
+ message->set_data(data);
+ } catch (std::exception const &ex) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Send data to the Pub/Sub service
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param error_message report error message to a caller.
+ * @param buffered_metrics the number of metrics we are going to send.
+ * @param buffered_bytes the number of bytes we are going to send.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_publish(void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ grpc::ClientContext *context = new grpc::ClientContext;
+
+ std::unique_ptr<grpc::ClientAsyncResponseReader<google::pubsub::v1::PublishResponse> > rpc(
+ ((google::pubsub::v1::Publisher::Stub *)(connector_specific_data->stub))
+ ->AsyncPublish(
+ context, (*(google::pubsub::v1::PublishRequest *)(connector_specific_data->request)),
+ ((grpc::CompletionQueue *)(connector_specific_data->completion_queue))));
+
+ struct response response;
+ response.context = context;
+ response.publish_response = new google::pubsub::v1::PublishResponse;
+ response.tag = connector_specific_data->last_tag++;
+ response.status = new grpc::Status;
+ response.published_metrics = buffered_metrics;
+ response.published_bytes = buffered_bytes;
+
+ rpc->Finish(response.publish_response, response.status, (void *)response.tag);
+
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))->clear_messages();
+
+ ((std::list<struct response> *)(connector_specific_data->responses))->push_back(response);
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Get results from service responces
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param error_message report error message to a caller.
+ * @param sent_metrics report to a caller how many metrics was successfuly sent.
+ * @param sent_bytes report to a caller how many bytes was successfuly sent.
+ * @param lost_metrics report to a caller how many metrics was lost during transmission.
+ * @param lost_bytes report to a caller how many bytes was lost during transmission.
+ * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission.
+ */
+int pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+ std::list<struct response> *responses = (std::list<struct response> *)connector_specific_data->responses;
+ grpc_impl::CompletionQueue::NextStatus next_status;
+
+ *sent_metrics = 0;
+ *sent_bytes = 0;
+ *lost_metrics = 0;
+ *lost_bytes = 0;
+
+ try {
+ do {
+ std::list<struct response>::iterator response;
+ void *got_tag;
+ bool ok = false;
+
+ auto deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(50);
+ next_status = (*(grpc::CompletionQueue *)(connector_specific_data->completion_queue))
+ .AsyncNext(&got_tag, &ok, deadline);
+
+ if (next_status == grpc::CompletionQueue::GOT_EVENT) {
+ for (response = responses->begin(); response != responses->end(); ++response) {
+ if ((void *)response->tag == got_tag)
+ break;
+ }
+
+ if (response == responses->end()) {
+ copy_error_message(error_message, "Cannot get Pub/Sub response");
+ return 1;
+ }
+
+ if (ok && response->publish_response->message_ids_size()) {
+ *sent_metrics += response->published_metrics;
+ *sent_bytes += response->published_bytes;
+ } else {
+ *lost_metrics += response->published_metrics;
+ *lost_bytes += response->published_bytes;
+ response->status->error_message().copy(error_message, ERROR_LINE_MAX);
+ error_message[ERROR_LINE_MAX] = '\0';
+ }
+
+ delete response->context;
+ delete response->publish_response;
+ delete response->status;
+ responses->erase(response);
+ }
+
+ if (next_status == grpc::CompletionQueue::SHUTDOWN) {
+ copy_error_message(error_message, "Completion queue shutdown");
+ return 1;
+ }
+
+ } while (next_status == grpc::CompletionQueue::GOT_EVENT);
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ if (*lost_metrics) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/exporting/pubsub/pubsub_publish.h b/exporting/pubsub/pubsub_publish.h
new file mode 100644
index 0000000..567a262
--- /dev/null
+++ b/exporting/pubsub/pubsub_publish.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PUBSUB_PUBLISH_H
+#define NETDATA_EXPORTING_PUBSUB_PUBLISH_H
+
+#define ERROR_LINE_MAX 1023
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct pubsub_specific_data {
+ void *stub;
+ void *request;
+ void *completion_queue;
+
+ void *responses;
+ size_t last_tag;
+};
+
+int pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id);
+void pubsub_cleanup(void *pubsub_specific_data_p);
+
+int pubsub_add_message(void *pubsub_specific_data_p, char *data);
+
+int pubsub_publish(void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes);
+int pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_PUBSUB_PUBLISH_H
diff --git a/exporting/read_config.c b/exporting/read_config.c
new file mode 100644
index 0000000..995ba57
--- /dev/null
+++ b/exporting/read_config.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+struct config exporting_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+struct instance *prometheus_exporter_instance = NULL;
+
+static _CONNECTOR_INSTANCE *find_instance(const char *section)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ local_ci = add_connector_instance(NULL, NULL); // Get root section
+ if (unlikely(!local_ci))
+ return local_ci;
+
+ if (!section)
+ return local_ci;
+
+ while (local_ci) {
+ if (!strcmp(local_ci->instance_name, section))
+ break;
+ local_ci = local_ci->next;
+ }
+ return local_ci;
+}
+
+char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return NULL; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get(
+ root, local_ci->instance_name, name,
+ appconfig_get(
+ root, local_ci->connector_name, name, appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+int expconfig_get_boolean(struct config *root, const char *section, const char *name, int default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return 0; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get_boolean(
+ root, local_ci->instance_name, name,
+ appconfig_get_boolean(
+ root, local_ci->connector_name, name,
+ appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+long long expconfig_get_number(struct config *root, const char *section, const char *name, long long default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return 0; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get_number(
+ root, local_ci->instance_name, name,
+ appconfig_get_number(
+ root, local_ci->connector_name, name,
+ appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+/*
+ * Get the next connector instance that we need to activate
+ *
+ * @param @target_ci will be filled with instance name and connector name
+ *
+ * @return - 1 if more connectors to be fetched, 0 done
+ *
+ */
+
+int get_connector_instance(struct connector_instance *target_ci)
+{
+ static _CONNECTOR_INSTANCE *local_ci = NULL;
+ _CONNECTOR_INSTANCE *global_connector_instance;
+
+ global_connector_instance = find_instance(NULL); // Fetch head of instances
+
+ if (unlikely(!global_connector_instance))
+ return 0;
+
+ if (target_ci == NULL) {
+ local_ci = NULL;
+ return 1;
+ }
+ if (local_ci == NULL)
+ local_ci = global_connector_instance;
+ else {
+ local_ci = local_ci->next;
+ if (local_ci == NULL)
+ return 0;
+ }
+
+ strcpy(target_ci->instance_name, local_ci->instance_name);
+ strcpy(target_ci->connector_name, local_ci->connector_name);
+
+ return 1;
+}
+
+/**
+ * Select Type
+ *
+ * Select the connector type based on the user input
+ *
+ * @param type is the string that defines the connector type
+ *
+ * @return It returns the connector id.
+ */
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type)
+{
+ if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_GRAPHITE;
+ } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) {
+ return EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP;
+ } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_JSON;
+ } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) {
+ return EXPORTING_CONNECTOR_TYPE_JSON_HTTP;
+ } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
+ return EXPORTING_CONNECTOR_TYPE_OPENTSDB;
+ } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
+ return EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP;
+ } else if (
+ !strcmp(type, "prometheus_remote_write") ||
+ !strcmp(type, "prometheus_remote_write:http") ||
+ !strcmp(type, "prometheus_remote_write:https")) {
+ return EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE;
+ } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_KINESIS;
+ } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_PUBSUB;
+ } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext"))
+ return EXPORTING_CONNECTOR_TYPE_MONGODB;
+
+ return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
+}
+
+EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
+{
+ if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
+ !strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_AS_COLLECTED;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ } else if (!strcmp(data_source, "average")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_AVERAGE;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AVERAGE);
+ } else if (!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_SUM;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_SUM);
+ } else {
+ error("EXPORTING: invalid data data_source method '%s'.", data_source);
+ }
+
+ return exporting_options;
+}
+
+/**
+ * Read configuration
+ *
+ * Based on read configuration an engine data structure is filled with exporting connector instances.
+ *
+ * @return Returns a filled engine data structure or NULL if there are no connector instances configured.
+ */
+struct engine *read_exporting_config()
+{
+ int instances_to_activate = 0;
+ int exporting_config_exists = 0;
+
+ static struct engine *engine = NULL;
+ struct connector_instance_list {
+ struct connector_instance local_ci;
+ EXPORTING_CONNECTOR_TYPE backend_type;
+
+ struct connector_instance_list *next;
+ };
+ struct connector_instance local_ci;
+ struct connector_instance_list *tmp_ci_list = NULL, *tmp_ci_list1 = NULL, *tmp_ci_list_prev = NULL;
+
+ if (unlikely(engine))
+ return engine;
+
+ char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, EXPORTING_CONF);
+
+ exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
+ if (!exporting_config_exists) {
+ info("CONFIG: cannot load user exporting config '%s'. Will try the stock version.", filename);
+ freez(filename);
+
+ filename = strdupz_path_subpath(netdata_configured_stock_config_dir, EXPORTING_CONF);
+ exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
+ if (!exporting_config_exists)
+ info("CONFIG: cannot load stock exporting config '%s'. Running with internal defaults.", filename);
+ }
+
+ freez(filename);
+
+#define prometheus_config_get(name, value) \
+ appconfig_get( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_number(name, value) \
+ appconfig_get_number( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get_number(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_boolean(name, value) \
+ appconfig_get_boolean( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+
+ if (!prometheus_exporter_instance) {
+ prometheus_exporter_instance = callocz(1, sizeof(struct instance));
+
+ prometheus_exporter_instance->config.update_every =
+ prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+
+ // wait for backend subsystem to be initialized
+ for (int retries = 0; !global_backend_source && retries < 1000; retries++)
+ sleep_usec(10000);
+
+ if (!global_backend_source)
+ global_backend_source = "average";
+
+ prometheus_exporter_instance->config.options |= global_backend_options & EXPORTING_OPTIONS_SOURCE_BITS;
+
+ char *data_source = prometheus_config_get("data source", global_backend_source);
+ prometheus_exporter_instance->config.options =
+ exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
+
+ if (prometheus_config_get_boolean(
+ "send names instead of ids", global_backend_options & EXPORTING_OPTION_SEND_NAMES))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
+
+ if (prometheus_config_get_boolean("send configured labels", CONFIG_BOOLEAN_YES))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+
+ if (prometheus_config_get_boolean("send automatic labels", CONFIG_BOOLEAN_NO))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ prometheus_exporter_instance->config.charts_pattern =
+ simple_pattern_create(prometheus_config_get("send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
+ prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+ prometheus_exporter_instance->config.prefix = prometheus_config_get("prefix", global_backend_prefix);
+ }
+
+ // TODO: change BACKEND to EXPORTING
+ while (get_connector_instance(&local_ci)) {
+ info("Processing connector instance (%s)", local_ci.instance_name);
+
+ if (exporter_get_boolean(local_ci.instance_name, "enabled", 0)) {
+ info(
+ "Instance (%s) on connector (%s) is enabled and scheduled for activation",
+ local_ci.instance_name, local_ci.connector_name);
+
+ tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
+ memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
+ tmp_ci_list->backend_type = exporting_select_type(local_ci.connector_name);
+ tmp_ci_list->next = tmp_ci_list_prev;
+ tmp_ci_list_prev = tmp_ci_list;
+ instances_to_activate++;
+ } else
+ info("Instance (%s) on connector (%s) is not enabled", local_ci.instance_name, local_ci.connector_name);
+ }
+
+ if (unlikely(!instances_to_activate)) {
+ info("No connector instances to activate");
+ return NULL;
+ }
+
+ engine = (struct engine *)callocz(1, sizeof(struct engine));
+ // TODO: Check and fill engine fields if actually needed
+
+ if (exporting_config_exists) {
+ engine->config.hostname =
+ strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "hostname", netdata_configured_hostname));
+ engine->config.update_every = exporter_get_number(
+ CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+ }
+
+ while (tmp_ci_list) {
+ struct instance *tmp_instance;
+ char *instance_name;
+ char *default_destination = "localhost";
+
+ info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
+
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
+ error("Unknown exporting connector type");
+ goto next_connector_instance;
+ }
+
+#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
+ error("Prometheus Remote Write support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef HAVE_KINESIS
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
+ error("AWS Kinesis support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef ENABLE_EXPORTING_PUBSUB
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
+ error("Google Cloud Pub/Sub support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef HAVE_MONGOC
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
+ error("MongoDB support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+ tmp_instance = (struct instance *)callocz(1, sizeof(struct instance));
+ tmp_instance->next = engine->instance_root;
+ engine->instance_root = tmp_instance;
+
+ tmp_instance->engine = engine;
+ tmp_instance->config.type = tmp_ci_list->backend_type;
+
+ instance_name = tmp_ci_list->local_ci.instance_name;
+
+ tmp_instance->config.type_name = strdupz(tmp_ci_list->local_ci.connector_name);
+ tmp_instance->config.name = strdupz(tmp_ci_list->local_ci.instance_name);
+
+
+ tmp_instance->config.update_every =
+ exporter_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+
+ tmp_instance->config.buffer_on_failures = exporter_get_number(instance_name, "buffer on failures", 10);
+
+ tmp_instance->config.timeoutms = exporter_get_number(instance_name, "timeout ms", 10000);
+
+ tmp_instance->config.charts_pattern =
+ simple_pattern_create(exporter_get(instance_name, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+
+ tmp_instance->config.hosts_pattern = simple_pattern_create(
+ exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+ char *data_source = exporter_get(instance_name, "data source", "average");
+
+ tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
+ if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED &&
+ tmp_instance->config.update_every % localhost->rrd_update_every)
+ info(
+ "The update interval %d for instance %s is not a multiple of the database update interval %d. "
+ "Metric values will deviate at different points in time.",
+ tmp_instance->config.update_every, tmp_instance->config.name, localhost->rrd_update_every);
+
+ if (exporter_get_boolean(instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+
+ if (exporter_get_boolean(instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ if (exporter_get_boolean(instance_name, "send names instead of ids", CONFIG_BOOLEAN_YES))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_config));
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->remote_write_path =
+ strdupz(exporter_get(instance_name, "remote write URL path", "/receive"));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+
+ default_destination = "us-east-1";
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->stream_name = strdupz(exporter_get(instance_name, "stream name", ""));
+
+ connector_specific_config->auth_key_id = strdupz(exporter_get(instance_name, "aws_access_key_id", ""));
+ connector_specific_config->secure_key = strdupz(exporter_get(instance_name, "aws_secret_access_key", ""));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+
+ default_destination = "pubsub.googleapis.com";
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->credentials_file = strdupz(exporter_get(instance_name, "credentials file", ""));
+ connector_specific_config->project_id = strdupz(exporter_get(instance_name, "project id", ""));
+ connector_specific_config->topic_id = strdupz(exporter_get(instance_name, "topic id", ""));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
+ struct mongodb_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct mongodb_specific_config));
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->database = strdupz(exporter_get(
+ instance_name, "database", ""));
+
+ connector_specific_config->collection = strdupz(exporter_get(
+ instance_name, "collection", ""));
+ }
+
+ tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", default_destination));
+
+ tmp_instance->config.prefix = strdupz(exporter_get(instance_name, "prefix", "netdata"));
+
+ tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname));
+
+#ifdef ENABLE_HTTPS
+
+#define STR_GRAPHITE_HTTPS "graphite:https"
+#define STR_JSON_HTTPS "json:https"
+#define STR_OPENTSDB_HTTPS "opentsdb:https"
+#define STR_PROMETHEUS_REMOTE_WRITE_HTTPS "prometheus_remote_write:https"
+
+ if ((tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_GRAPHITE_HTTPS, strlen(STR_GRAPHITE_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_JSON_HTTPS, strlen(STR_JSON_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_OPENTSDB_HTTPS, strlen(STR_OPENTSDB_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE &&
+ !strncmp(
+ tmp_ci_list->local_ci.connector_name, STR_PROMETHEUS_REMOTE_WRITE_HTTPS,
+ strlen(STR_PROMETHEUS_REMOTE_WRITE_HTTPS)))) {
+ tmp_instance->config.options |= EXPORTING_OPTION_USE_TLS;
+ }
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ " Dest=[%s], upd=[%d], buffer=[%d] timeout=[%ld] options=[%u]",
+ tmp_instance->config.destination,
+ tmp_instance->config.update_every,
+ tmp_instance->config.buffer_on_failures,
+ tmp_instance->config.timeoutms,
+ tmp_instance->config.options);
+#endif
+
+ if (unlikely(!exporting_config_exists) && !engine->config.hostname) {
+ engine->config.hostname = strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
+ engine->config.update_every =
+ config_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+ }
+
+ next_connector_instance:
+ tmp_ci_list1 = tmp_ci_list->next;
+ freez(tmp_ci_list);
+ tmp_ci_list = tmp_ci_list1;
+ }
+
+ return engine;
+}
diff --git a/exporting/send_data.c b/exporting/send_data.c
new file mode 100644
index 0000000..1e932e9
--- /dev/null
+++ b/exporting/send_data.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Check if TLS is enabled in the configuration
+ *
+ * @param type buffer with response data.
+ * @param options an instance data structure.
+ * @return Returns 1 if TLS should be enabled, 0 otherwise.
+ */
+static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type, EXPORTING_OPTIONS options)
+{
+ return (type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) &&
+ options & EXPORTING_OPTION_USE_TLS;
+}
+
+/**
+ * Discard response
+ *
+ * Discards a response received by an exporting connector instance after logging a sample of it to error.log
+ *
+ * @param buffer buffer with response data.
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int exporting_discard_response(BUFFER *buffer, struct instance *instance) {
+#if NETDATA_INTERNAL_CHECKS
+ char sample[1024];
+ const char *s = buffer_tostring(buffer);
+ char *d = sample, *e = &sample[sizeof(sample) - 1];
+
+ for(; *s && d < e ;s++) {
+ char c = *s;
+ if(unlikely(!isprint(c))) c = ' ';
+ *d++ = c;
+ }
+ *d = '\0';
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: received %zu bytes from %s connector instance. Ignoring them. Sample: '%s'",
+ buffer_strlen(buffer),
+ instance->config.name,
+ sample);
+#else
+ UNUSED(instance);
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ buffer_flush(buffer);
+ return 0;
+}
+
+/**
+ * Receive response
+ *
+ * @param sock communication socket.
+ * @param instance an instance data structure.
+ */
+void simple_connector_receive_response(int *sock, struct instance *instance)
+{
+ static BUFFER *response = NULL;
+ if (!response)
+ response = buffer_create(4096);
+
+ struct stats *stats = &instance->stats;
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+
+ errno = 0;
+
+ // loop through to collect all data
+ while (*sock != -1 && errno != EWOULDBLOCK) {
+ ssize_t r;
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) &&
+ connector_specific_data->conn &&
+ connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ r = (ssize_t)SSL_read(connector_specific_data->conn,
+ &response->buffer[response->len],
+ (int) (response->size - response->len));
+
+ if (likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ stats->received_bytes += r;
+ stats->receptions++;
+ continue;
+ } else {
+ int sslerrno = SSL_get_error(connector_specific_data->conn, (int) r);
+ u_long sslerr = ERR_get_error();
+ char buf[256];
+ switch (sslerrno) {
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ goto endloop;
+ default:
+ ERR_error_string_n(sslerr, buf, sizeof(buf));
+ error("SSL error (%s)",
+ ERR_error_string((long)SSL_get_error(connector_specific_data->conn, (int)r), NULL));
+ goto endloop;
+ }
+ }
+ } else {
+ r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+ }
+#else
+ r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+#endif
+ if (likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ stats->received_bytes += r;
+ stats->receptions++;
+ } else if (r == 0) {
+ error("EXPORTING: '%s' closed the socket", instance->config.destination);
+ close(*sock);
+ *sock = -1;
+ } else {
+ // failed to receive data
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ error("EXPORTING: cannot receive data from '%s'.", instance->config.destination);
+ }
+ }
+
+#ifdef UNIT_TESTING
+ break;
+#endif
+ }
+#ifdef ENABLE_HTTPS
+endloop:
+#endif
+
+ // if we received data, process them
+ if (buffer_strlen(response))
+ instance->check_response(response, instance);
+}
+
+/**
+ * Send buffer to a server
+ *
+ * @param sock communication socket.
+ * @param failures the number of communication failures.
+ * @param instance an instance data structure.
+ */
+void simple_connector_send_buffer(
+ int *sock, int *failures, struct instance *instance, BUFFER *header, BUFFER *buffer, size_t buffered_metrics)
+{
+ int flags = 0;
+#ifdef MSG_NOSIGNAL
+ flags += MSG_NOSIGNAL;
+#endif
+
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+
+ struct stats *stats = &instance->stats;
+ ssize_t header_sent_bytes = 0;
+ ssize_t buffer_sent_bytes = 0;
+ size_t header_len = buffer_strlen(header);
+ size_t buffer_len = buffer_strlen(buffer);
+
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) &&
+ connector_specific_data->conn &&
+ connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ if (header_len)
+ header_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(header), header_len);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(buffer), buffer_len);
+ } else {
+ if (header_len)
+ header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
+ }
+#else
+ if (header_len)
+ header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
+#endif
+
+ if ((size_t)buffer_sent_bytes == buffer_len) {
+ // we sent the data successfully
+ stats->transmission_successes++;
+ stats->sent_metrics += buffered_metrics;
+ stats->sent_bytes += buffer_sent_bytes;
+
+ // reset the failures count
+ *failures = 0;
+
+ // empty the buffer
+ buffer_flush(buffer);
+ } else {
+ // oops! we couldn't send (all or some of the) data
+ error(
+ "EXPORTING: failed to write data to '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.",
+ instance->config.destination,
+ buffer_len,
+ buffer_sent_bytes);
+ stats->transmission_failures++;
+
+ if(buffer_sent_bytes != -1)
+ stats->sent_bytes += buffer_sent_bytes;
+
+ // increment the counter we check for data loss
+ (*failures)++;
+
+ // close the socket - we will re-open it next time
+ close(*sock);
+ *sock = -1;
+ }
+}
+
+/**
+ * Simple connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void simple_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance*)instance_p;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+ struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
+
+ int sock = -1;
+ struct timeval timeout = { .tv_sec = (instance->config.timeoutms * 1000) / 1000000,
+ .tv_usec = (instance->config.timeoutms * 1000) % 1000000 };
+ int failures = 0;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+ int send_stats = 0;
+
+ if (instance->data_is_ready)
+ send_stats = 1;
+
+ uv_mutex_lock(&instance->mutex);
+ if (!connector_specific_data->first_buffer->used || failures) {
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+ send_stats = 1;
+ }
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // ------------------------------------------------------------------------
+ // detach buffer
+
+ size_t buffered_metrics;
+
+ if (!connector_specific_data->previous_buffer ||
+ (connector_specific_data->previous_buffer == connector_specific_data->first_buffer &&
+ connector_specific_data->first_buffer->used == 1)) {
+ BUFFER *header, *buffer;
+
+ header = connector_specific_data->first_buffer->header;
+ buffer = connector_specific_data->first_buffer->buffer;
+ connector_specific_data->buffered_metrics = connector_specific_data->first_buffer->buffered_metrics;
+ connector_specific_data->buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
+
+ buffered_metrics = connector_specific_data->buffered_metrics;
+
+ buffer_flush(connector_specific_data->header);
+ connector_specific_data->first_buffer->header = connector_specific_data->header;
+ connector_specific_data->header = header;
+
+ buffer_flush(connector_specific_data->buffer);
+ connector_specific_data->first_buffer->buffer = connector_specific_data->buffer;
+ connector_specific_data->buffer = buffer;
+ } else {
+ buffered_metrics = connector_specific_data->buffered_metrics;
+ }
+
+ uv_mutex_unlock(&instance->mutex);
+
+ // ------------------------------------------------------------------------
+ // if we are connected, receive a response, without blocking
+
+ if (likely(sock != -1))
+ simple_connector_receive_response(&sock, instance);
+
+ // ------------------------------------------------------------------------
+ // if we are not connected, connect to a data collecting server
+
+ if (unlikely(sock == -1)) {
+ size_t reconnects = 0;
+
+ sock = connect_to_one_of(
+ instance->config.destination, connector_specific_config->default_port, &timeout, &reconnects, NULL, 0);
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) {
+ if (netdata_exporting_ctx) {
+ if (sock_delnonblock(sock) < 0)
+ error("Exporting cannot remove the non-blocking flag from socket %d", sock);
+
+ if (connector_specific_data->conn == NULL) {
+ connector_specific_data->conn = SSL_new(netdata_exporting_ctx);
+ if (connector_specific_data->conn == NULL) {
+ error("Failed to allocate SSL structure to socket %d.", sock);
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+ } else {
+ SSL_clear(connector_specific_data->conn);
+ }
+
+ if (connector_specific_data->conn) {
+ if (SSL_set_fd(connector_specific_data->conn, sock) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", sock);
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else {
+ connector_specific_data->flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
+ SSL_set_connect_state(connector_specific_data->conn);
+ int err = SSL_connect(connector_specific_data->conn);
+ if (err != 1) {
+ err = SSL_get_error(connector_specific_data->conn, err);
+ error(
+ "SSL cannot connect with the server: %s ",
+ ERR_error_string((long)SSL_get_error(connector_specific_data->conn, err), NULL));
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else {
+ info("Exporting established a SSL connection.");
+
+ struct timeval tv;
+ tv.tv_sec = timeout.tv_sec / 4;
+ tv.tv_usec = 0;
+
+ if (!tv.tv_sec)
+ tv.tv_sec = 2;
+
+ if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv, sizeof(tv)))
+ error("Cannot set timeout to socket %d, this can block communication", sock);
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ stats->reconnects += reconnects;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ // ------------------------------------------------------------------------
+ // if we are connected, send our buffer to the data collecting server
+
+ failures = 0;
+
+ if (likely(sock != -1)) {
+ simple_connector_send_buffer(
+ &sock,
+ &failures,
+ instance,
+ connector_specific_data->header,
+ connector_specific_data->buffer,
+ buffered_metrics);
+ } else {
+ error("EXPORTING: failed to update '%s'", instance->config.destination);
+ stats->transmission_failures++;
+
+ // increment the counter we check for data loss
+ failures++;
+ }
+
+ if (!failures) {
+ connector_specific_data->first_buffer->buffered_metrics =
+ connector_specific_data->first_buffer->buffered_bytes = connector_specific_data->first_buffer->used = 0;
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ if (send_stats) {
+ uv_mutex_lock(&instance->mutex);
+
+ stats->buffered_metrics = connector_specific_data->total_buffered_metrics;
+
+ send_internal_metrics(instance);
+
+ stats->buffered_metrics = 0;
+
+ // reset the internal monitoring chart counters
+ connector_specific_data->total_buffered_metrics =
+ stats->buffered_bytes =
+ stats->receptions =
+ stats->received_bytes =
+ stats->sent_metrics =
+ stats->sent_bytes =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->reconnects =
+ stats->data_lost_events =
+ stats->lost_metrics =
+ stats->lost_bytes = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+ }
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE)
+ clean_prometheus_remote_write(instance);
+#endif
+
+ simple_connector_cleanup(instance);
+}
diff --git a/exporting/send_internal_metrics.c b/exporting/send_internal_metrics.c
new file mode 100644
index 0000000..defb8d0
--- /dev/null
+++ b/exporting/send_internal_metrics.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Create a chart for the main exporting thread CPU usage
+ *
+ * @param st_rusage the thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
+{
+ if (*st_rusage && *rd_user && *rd_system)
+ return;
+
+ *st_rusage = rrdset_create_localhost(
+ "netdata", "exporting_main_thread_cpu", NULL, "exporting", "exporting_cpu_usage", "Netdata Main Exporting Thread CPU Usage",
+ "milliseconds/s", "exporting", NULL, 130600, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
+
+ *rd_user = rrddim_add(*st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ *rd_system = rrddim_add(*st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+}
+
+/**
+ * Send the main exporting thread CPU usage
+ *
+ * @param st_rusage a thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+ struct rusage thread;
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if (likely(st_rusage->counter_done))
+ rrdset_next(st_rusage);
+
+ rrddim_set_by_pointer(st_rusage, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st_rusage, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+ rrdset_done(st_rusage);
+}
+
+/**
+ * Send internal metrics for an instance
+ *
+ * Send performance metrics for the operation of exporting engine itself to the Netdata database.
+ *
+ * @param instance an instance data structure.
+ */
+void send_internal_metrics(struct instance *instance)
+{
+ struct stats *stats = &instance->stats;
+
+ // ------------------------------------------------------------------------
+ // create charts for monitoring the exporting operations
+
+ if (!stats->initialized) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ BUFFER *family = buffer_create(0);
+
+ buffer_sprintf(family, "exporting_%s", instance->config.name);
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_metrics", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_metrics = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_buffer", "Netdata Buffered Metrics", "metrics", "exporting", NULL,
+ 130610, instance->config.update_every, RRDSET_TYPE_LINE);
+
+ stats->rd_buffered_metrics = rrddim_add(stats->st_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_lost_metrics = rrddim_add(stats->st_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_sent_metrics = rrddim_add(stats->st_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_bytes", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_bytes = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_data_size", "Netdata Exporting Data Size", "KiB", "exporting", NULL,
+ 130620, instance->config.update_every, RRDSET_TYPE_AREA);
+
+ stats->rd_buffered_bytes = rrddim_add(stats->st_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_lost_bytes = rrddim_add(stats->st_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_sent_bytes = rrddim_add(stats->st_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_received_bytes = rrddim_add(stats->st_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_ops", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_ops = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_operations", "Netdata Exporting Operations", "operations", "exporting",
+ NULL, 130630, instance->config.update_every, RRDSET_TYPE_LINE);
+
+ stats->rd_transmission_successes = rrddim_add(stats->st_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_data_lost_events = rrddim_add(stats->st_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_reconnects = rrddim_add(stats->st_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_transmission_failures = rrddim_add(stats->st_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_receptions = rrddim_add(stats->st_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_thread_cpu", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_rusage = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_instance", "Netdata Exporting Instance Thread CPU Usage",
+ "milliseconds/s", "exporting", NULL, 130640, instance->config.update_every, RRDSET_TYPE_STACKED);
+
+ stats->rd_user = rrddim_add(stats->st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ stats->rd_system = rrddim_add(stats->st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+
+ buffer_free(family);
+
+ stats->initialized = 1;
+ }
+
+ // ------------------------------------------------------------------------
+ // update the monitoring charts
+
+ if (likely(stats->st_metrics->counter_done))
+ rrdset_next(stats->st_metrics);
+
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_buffered_metrics, stats->buffered_metrics);
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_lost_metrics, stats->lost_metrics);
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_sent_metrics, stats->sent_metrics);
+
+ rrdset_done(stats->st_metrics);
+
+ // ------------------------------------------------------------------------
+
+ if (likely(stats->st_bytes->counter_done))
+ rrdset_next(stats->st_bytes);
+
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_buffered_bytes, stats->buffered_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_lost_bytes, stats->lost_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_sent_bytes, stats->sent_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_received_bytes, stats->received_bytes);
+
+ rrdset_done(stats->st_bytes);
+
+ // ------------------------------------------------------------------------
+
+ if (likely(stats->st_ops->counter_done))
+ rrdset_next(stats->st_ops);
+
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_successes, stats->transmission_successes);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_data_lost_events, stats->data_lost_events);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_reconnects, stats->reconnects);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_failures, stats->transmission_failures);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_receptions, stats->receptions);
+
+ rrdset_done(stats->st_ops);
+
+ // ------------------------------------------------------------------------
+
+ struct rusage thread;
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if (likely(stats->st_rusage->counter_done))
+ rrdset_next(stats->st_rusage);
+
+ rrddim_set_by_pointer(stats->st_rusage, stats->rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stats->st_rusage, stats->rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+ rrdset_done(stats->st_rusage);
+}
diff --git a/exporting/tests/Makefile.am b/exporting/tests/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/exporting/tests/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c
new file mode 100644
index 0000000..3c73e03
--- /dev/null
+++ b/exporting/tests/exporting_doubles.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+struct engine *__real_read_exporting_config();
+struct engine *__wrap_read_exporting_config()
+{
+ function_called();
+ return mock_ptr_type(struct engine *);
+}
+
+struct engine *__mock_read_exporting_config()
+{
+ struct engine *engine = calloc(1, sizeof(struct engine));
+ engine->config.hostname = strdupz("test_engine_host");
+ engine->config.update_every = 3;
+
+
+ engine->instance_root = calloc(1, sizeof(struct instance));
+ struct instance *instance = engine->instance_root;
+ instance->engine = engine;
+ instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
+ instance->config.name = strdupz("instance_name");
+ instance->config.destination = strdupz("localhost");
+ instance->config.prefix = strdupz("netdata");
+ instance->config.hostname = strdupz("test-host");
+ instance->config.update_every = 1;
+ instance->config.buffer_on_failures = 10;
+ instance->config.timeoutms = 10000;
+ instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ return engine;
+}
+
+int __real_init_connectors(struct engine *engine);
+int __wrap_init_connectors(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+int __real_mark_scheduled_instances(struct engine *engine);
+int __wrap_mark_scheduled_instances(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+calculated_number __real_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+calculated_number __wrap_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp)
+{
+ (void)instance;
+ (void)rd;
+
+ *last_timestamp = 15052;
+
+ function_called();
+ return mock_type(calculated_number);
+}
+
+int __real_prepare_buffers(struct engine *engine);
+int __wrap_prepare_buffers(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
+{
+ function_called();
+ check_expected_ptr(st_rusage);
+ check_expected_ptr(rd_user);
+ check_expected_ptr(rd_system);
+}
+
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+ function_called();
+ check_expected_ptr(st_rusage);
+ check_expected_ptr(rd_user);
+ check_expected_ptr(rd_system);
+}
+
+int __wrap_send_internal_metrics(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_start_batch_formatting(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __mock_start_host_formatting(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __mock_start_chart_formatting(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_metric_formatting(struct instance *instance, RRDDIM *rd)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(rd);
+ return mock_type(int);
+}
+
+int __mock_end_chart_formatting(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_end_host_formatting(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __mock_end_batch_formatting(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __wrap_simple_connector_end_batch(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+void *__wrap_init_write_request()
+{
+ function_called();
+ return mock_ptr_type(void *);
+}
+
+void __wrap_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(name);
+ check_expected_ptr(instance);
+ check_expected_ptr(application);
+ check_expected_ptr(version);
+ check_expected(timestamp);
+}
+
+void __wrap_add_label(void *write_request_p, char *key, char *value)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(key);
+ check_expected_ptr(value);
+}
+
+void __wrap_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(name);
+ check_expected_ptr(chart);
+ check_expected_ptr(family);
+ check_expected_ptr(dimension);
+ check_expected_ptr(instance);
+ check_expected(value);
+ check_expected(timestamp);
+}
+#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
+
+#if HAVE_KINESIS
+void __wrap_aws_sdk_init()
+{
+ function_called();
+}
+
+void __wrap_kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout)
+{
+ function_called();
+ check_expected_ptr(kinesis_specific_data_p);
+ check_expected_ptr(region);
+ check_expected_ptr(access_key_id);
+ check_expected_ptr(secret_key);
+ check_expected(timeout);
+}
+
+void __wrap_kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len)
+{
+ function_called();
+ check_expected_ptr(kinesis_specific_data_p);
+ check_expected_ptr(stream_name);
+ check_expected_ptr(partition_key);
+ check_expected_ptr(data);
+ check_expected_ptr(data);
+ check_expected(data_len);
+}
+
+int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes)
+{
+ function_called();
+ check_expected_ptr(request_outcomes_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(sent_bytes);
+ check_expected_ptr(lost_bytes);
+ return mock_type(int);
+}
+#endif // HAVE_KINESIS
+
+#if ENABLE_EXPORTING_PUBSUB
+int __wrap_pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(destination);
+ check_expected_ptr(credentials_file);
+ check_expected_ptr(project_id);
+ check_expected_ptr(topic_id);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(data);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_publish(
+ void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected(buffered_metrics);
+ check_expected(buffered_bytes);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(sent_metrics);
+ check_expected_ptr(sent_bytes);
+ check_expected_ptr(lost_metrics);
+ check_expected_ptr(lost_bytes);
+ return mock_type(int);
+}
+#endif // ENABLE_EXPORTING_PUBSUB
+
+#if HAVE_MONGOC
+void __wrap_mongoc_init()
+{
+ function_called();
+}
+
+mongoc_uri_t * __wrap_mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error)
+{
+ function_called();
+ check_expected_ptr(uri_string);
+ check_expected_ptr(error);
+ return mock_ptr_type(mongoc_uri_t *);
+}
+
+int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback)
+{
+ function_called();
+ check_expected_ptr(uri);
+ check_expected_ptr(option);
+ check_expected(fallback);
+ return mock_type(int32_t);
+}
+
+bool __wrap_mongoc_uri_set_option_as_int32 (const mongoc_uri_t *uri, const char *option, int32_t value)
+{
+ function_called();
+ check_expected_ptr(uri);
+ check_expected_ptr(option);
+ check_expected(value);
+ return mock_type(bool);
+}
+
+mongoc_client_t * __wrap_mongoc_client_new_from_uri (const mongoc_uri_t *uri)
+{
+ function_called();
+ check_expected_ptr(uri);
+ return mock_ptr_type(mongoc_client_t *);
+}
+
+bool __wrap_mongoc_client_set_appname (mongoc_client_t *client, const char *appname)
+{
+ function_called();
+ check_expected_ptr(client);
+ check_expected_ptr(appname);
+ return mock_type(bool);
+}
+
+mongoc_collection_t *
+__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection)
+{
+ function_called();
+ check_expected_ptr(client);
+ check_expected_ptr(db);
+ check_expected_ptr(collection);
+ return mock_ptr_type(mongoc_collection_t *);
+}
+
+void __wrap_mongoc_uri_destroy (mongoc_uri_t *uri)
+{
+ function_called();
+ check_expected_ptr(uri);
+}
+
+bool __wrap_mongoc_collection_insert_many(
+ mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ function_called();
+ check_expected_ptr(collection);
+ check_expected_ptr(documents);
+ check_expected(n_documents);
+ check_expected_ptr(opts);
+ check_expected_ptr(reply);
+ check_expected_ptr(error);
+ return mock_type(bool);
+}
+#endif // HAVE_MONGOC
diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c
new file mode 100644
index 0000000..00bb0ed
--- /dev/null
+++ b/exporting/tests/exporting_fixtures.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+int setup_configured_engine(void **state)
+{
+ struct engine *engine = __mock_read_exporting_config();
+ engine->instance_root->data_is_ready = 1;
+
+ *state = engine;
+
+ return 0;
+}
+
+int teardown_configured_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ struct instance *instance = engine->instance_root;
+ free((void *)instance->config.destination);
+ free((void *)instance->config.name);
+ free((void *)instance->config.prefix);
+ free((void *)instance->config.hostname);
+ simple_pattern_free(instance->config.charts_pattern);
+ simple_pattern_free(instance->config.hosts_pattern);
+ free(instance);
+
+ free((void *)engine->config.hostname);
+ free(engine);
+
+ return 0;
+}
+
+int setup_rrdhost()
+{
+ localhost = calloc(1, sizeof(RRDHOST));
+
+ localhost->rrd_update_every = 1;
+
+ localhost->tags = strdupz("TAG1=VALUE1 TAG2=VALUE2");
+
+ struct label *label = calloc(1, sizeof(struct label));
+ label->key = strdupz("key1");
+ label->value = strdupz("value1");
+ label->label_source = LABEL_SOURCE_NETDATA_CONF;
+ localhost->labels.head = label;
+
+ label = calloc(1, sizeof(struct label));
+ label->key = strdupz("key2");
+ label->value = strdupz("value2");
+ label->label_source = LABEL_SOURCE_AUTO;
+ localhost->labels.head->next = label;
+
+ localhost->rrdset_root = calloc(1, sizeof(RRDSET));
+ RRDSET *st = localhost->rrdset_root;
+ st->rrdhost = localhost;
+ strcpy(st->id, "chart_id");
+ st->name = strdupz("chart_name");
+ st->flags |= RRDSET_FLAG_ENABLED;
+ st->rrd_memory_mode |= RRD_MEMORY_MODE_SAVE;
+ st->update_every = 1;
+
+ localhost->rrdset_root->dimensions = calloc(1, sizeof(RRDDIM));
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ rd->rrdset = st;
+ rd->id = strdupz("dimension_id");
+ rd->name = strdupz("dimension_name");
+ rd->last_collected_value = 123000321;
+ rd->last_collected_time.tv_sec = 15051;
+ rd->collections_counter++;
+ rd->next = NULL;
+
+ rd->state = calloc(1, sizeof(*rd->state));
+ rd->state->query_ops.oldest_time = __mock_rrddim_query_oldest_time;
+ rd->state->query_ops.latest_time = __mock_rrddim_query_latest_time;
+ rd->state->query_ops.init = __mock_rrddim_query_init;
+ rd->state->query_ops.is_finished = __mock_rrddim_query_is_finished;
+ rd->state->query_ops.next_metric = __mock_rrddim_query_next_metric;
+ rd->state->query_ops.finalize = __mock_rrddim_query_finalize;
+
+ return 0;
+}
+
+int teardown_rrdhost()
+{
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ free((void *)rd->name);
+ free((void *)rd->id);
+ free(rd->state);
+ free(rd);
+
+ RRDSET *st = localhost->rrdset_root;
+ free((void *)st->name);
+ free(st);
+
+ free(localhost->labels.head->next->key);
+ free(localhost->labels.head->next->value);
+ free(localhost->labels.head->next);
+ free(localhost->labels.head->key);
+ free(localhost->labels.head->value);
+ free(localhost->labels.head);
+
+ free((void *)localhost->tags);
+ free(localhost);
+
+ return 0;
+}
+
+int setup_initialized_engine(void **state)
+{
+ setup_configured_engine(state);
+
+ struct engine *engine = *state;
+ init_connectors_in_tests(engine);
+
+ setup_rrdhost();
+
+ return 0;
+}
+
+int teardown_initialized_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ teardown_rrdhost();
+ buffer_free(engine->instance_root->labels);
+ buffer_free(engine->instance_root->buffer);
+ teardown_configured_engine(state);
+
+ return 0;
+}
+
+int setup_prometheus(void **state)
+{
+ (void)state;
+
+ prometheus_exporter_instance = calloc(1, sizeof(struct instance));
+
+ setup_rrdhost();
+
+ prometheus_exporter_instance->config.update_every = 10;
+
+ prometheus_exporter_instance->config.options |=
+ EXPORTING_OPTION_SEND_NAMES | EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ prometheus_exporter_instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+
+ return 0;
+}
+
+int teardown_prometheus(void **state)
+{
+ (void)state;
+
+ teardown_rrdhost();
+
+ simple_pattern_free(prometheus_exporter_instance->config.charts_pattern);
+ simple_pattern_free(prometheus_exporter_instance->config.hosts_pattern);
+ free(prometheus_exporter_instance);
+
+ return 0;
+}
diff --git a/exporting/tests/netdata_doubles.c b/exporting/tests/netdata_doubles.c
new file mode 100644
index 0000000..f4da776
--- /dev/null
+++ b/exporting/tests/netdata_doubles.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+// Use memomy allocation functions guarded by CMocka in strdupz
+const char *__wrap_strdupz(const char *s)
+{
+ char *duplicate = malloc(sizeof(char) * (strlen(s) + 1));
+ strcpy(duplicate, s);
+
+ return duplicate;
+}
+
+time_t __wrap_now_realtime_sec(void)
+{
+ function_called();
+ return mock_type(time_t);
+}
+
+void __wrap_uv_thread_set_name_np(uv_thread_t ut, const char* name)
+{
+ (void)ut;
+ (void)name;
+
+ function_called();
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+
+ function_called();
+
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(log_line, MAX_LOG_LINE, fmt, args);
+ va_end(args);
+}
+
+int __wrap_connect_to_one_of(
+ const char *destination,
+ int default_port,
+ struct timeval *timeout,
+ size_t *reconnects_counter,
+ char *connected_to,
+ size_t connected_to_size)
+{
+ (void)timeout;
+
+ function_called();
+
+ check_expected(destination);
+ check_expected_ptr(default_port);
+ // TODO: check_expected_ptr(timeout);
+ check_expected(reconnects_counter);
+ check_expected(connected_to);
+ check_expected(connected_to_size);
+
+ return mock_type(int);
+}
+
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line)
+{
+ (void)host;
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line)
+{
+ (void)st;
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+RRDSET *rrdset_create_custom(
+ RRDHOST *host,
+ const char *type,
+ const char *id,
+ const char *name,
+ const char *family,
+ const char *context,
+ const char *title,
+ const char *units,
+ const char *plugin,
+ const char *module,
+ long priority,
+ int update_every,
+ RRDSET_TYPE chart_type,
+ RRD_MEMORY_MODE memory_mode,
+ long history_entries)
+{
+ check_expected_ptr(host);
+ check_expected_ptr(type);
+ check_expected_ptr(id);
+ check_expected_ptr(name);
+ check_expected_ptr(family);
+ check_expected_ptr(context);
+ UNUSED(title);
+ check_expected_ptr(units);
+ check_expected_ptr(plugin);
+ check_expected_ptr(module);
+ check_expected(priority);
+ check_expected(update_every);
+ check_expected(chart_type);
+ UNUSED(memory_mode);
+ UNUSED(history_entries);
+
+ function_called();
+
+ return mock_ptr_type(RRDSET *);
+}
+
+void rrdset_next_usec(RRDSET *st, usec_t microseconds)
+{
+ check_expected_ptr(st);
+ UNUSED(microseconds);
+
+ function_called();
+}
+
+void rrdset_done(RRDSET *st)
+{
+ check_expected_ptr(st);
+
+ function_called();
+}
+
+RRDDIM *rrddim_add_custom(
+ RRDSET *st,
+ const char *id,
+ const char *name,
+ collected_number multiplier,
+ collected_number divisor,
+ RRD_ALGORITHM algorithm,
+ RRD_MEMORY_MODE memory_mode)
+{
+ check_expected_ptr(st);
+ UNUSED(id);
+ check_expected_ptr(name);
+ check_expected(multiplier);
+ check_expected(divisor);
+ check_expected(algorithm);
+ UNUSED(memory_mode);
+
+ function_called();
+
+ return NULL;
+}
+
+collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
+{
+ check_expected_ptr(st);
+ UNUSED(rd);
+ UNUSED(value);
+
+ function_called();
+
+ return 0;
+}
+
+const char *rrd_memory_mode_name(RRD_MEMORY_MODE id)
+{
+ (void)id;
+ return RRD_MEMORY_MODE_NONE_NAME;
+}
+
+calculated_number rrdvar2number(RRDVAR *rv)
+{
+ (void)rv;
+ return 0;
+}
+
+int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR *rv, void *data), void *data)
+{
+ (void)host;
+ (void)callback;
+ (void)data;
+ return 0;
+}
+
+void rrdset_update_heterogeneous_flag(RRDSET *st)
+{
+ (void)st;
+}
+
+time_t __mock_rrddim_query_oldest_time(RRDDIM *rd)
+{
+ (void)rd;
+
+ function_called();
+ return mock_type(time_t);
+}
+
+time_t __mock_rrddim_query_latest_time(RRDDIM *rd)
+{
+ (void)rd;
+
+ function_called();
+ return mock_type(time_t);
+}
+
+void __mock_rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time)
+{
+ (void)rd;
+ (void)handle;
+
+ function_called();
+ check_expected(start_time);
+ check_expected(end_time);
+}
+
+int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle)
+{
+ (void)handle;
+
+ function_called();
+ return mock_type(int);
+}
+
+storage_number __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time)
+{
+ (void)handle;
+ (void)current_time;
+
+ function_called();
+ return mock_type(storage_number);
+}
+
+void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle)
+{
+ (void)handle;
+
+ function_called();
+}
diff --git a/exporting/tests/system_doubles.c b/exporting/tests/system_doubles.c
new file mode 100644
index 0000000..ca85800
--- /dev/null
+++ b/exporting/tests/system_doubles.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg)
+{
+ function_called();
+
+ check_expected_ptr(thread);
+ check_expected_ptr(worker);
+ check_expected_ptr(arg);
+}
+
+void __wrap_uv_mutex_lock(uv_mutex_t *mutex)
+{
+ (void)mutex;
+}
+
+void __wrap_uv_mutex_unlock(uv_mutex_t *mutex)
+{
+ (void)mutex;
+}
+
+void __wrap_uv_cond_signal(uv_cond_t *cond_var)
+{
+ (void)cond_var;
+}
+
+void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex)
+{
+ (void)cond_var;
+ (void)mutex;
+}
+
+ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags)
+{
+ function_called();
+
+ check_expected(sockfd);
+ check_expected_ptr(buf);
+ check_expected(len);
+ check_expected(flags);
+
+ char *mock_string = "Test recv";
+ strcpy(buf, mock_string);
+
+ return strlen(mock_string);
+}
+
+ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags)
+{
+ function_called();
+
+ check_expected(sockfd);
+ check_expected_ptr(buf);
+ check_expected_ptr(buf);
+ check_expected(len);
+ check_expected(flags);
+
+ return strlen(buf);
+}
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
new file mode 100644
index 0000000..774d1a2
--- /dev/null
+++ b/exporting/tests/test_exporting_engine.c
@@ -0,0 +1,1939 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+#include "libnetdata/required_dummies.h"
+
+RRDHOST *localhost;
+netdata_rwlock_t rrd_rwlock;
+
+// global variables needed by read_exporting_config()
+struct config netdata_config;
+char *netdata_configured_user_config_dir = ".";
+char *netdata_configured_stock_config_dir = ".";
+char *netdata_configured_hostname = "test_global_host";
+
+char log_line[MAX_LOG_LINE + 1];
+
+BACKEND_OPTIONS global_backend_options = 0;
+const char *global_backend_source = "average";
+const char *global_backend_prefix = "netdata";
+
+void init_connectors_in_tests(struct engine *engine)
+{
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_uv_thread_create);
+
+ expect_value(__wrap_uv_thread_create, thread, &engine->instance_root->thread);
+ expect_value(__wrap_uv_thread_create, worker, simple_connector_worker);
+ expect_value(__wrap_uv_thread_create, arg, engine->instance_root);
+
+ expect_function_call(__wrap_uv_thread_set_name_np);
+
+ assert_int_equal(__real_init_connectors(engine), 0);
+
+ assert_int_equal(engine->now, 2);
+ assert_int_equal(engine->instance_root->after, 2);
+}
+
+static void test_exporting_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_read_exporting_config);
+ will_return(__wrap_read_exporting_config, engine);
+
+ expect_function_call(__wrap_init_connectors);
+ expect_memory(__wrap_init_connectors, engine, engine, sizeof(struct engine));
+ will_return(__wrap_init_connectors, 0);
+
+ expect_function_call(__wrap_create_main_rusage_chart);
+ expect_not_value(__wrap_create_main_rusage_chart, st_rusage, NULL);
+ expect_not_value(__wrap_create_main_rusage_chart, rd_user, NULL);
+ expect_not_value(__wrap_create_main_rusage_chart, rd_system, NULL);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_mark_scheduled_instances);
+ expect_memory(__wrap_mark_scheduled_instances, engine, engine, sizeof(struct engine));
+ will_return(__wrap_mark_scheduled_instances, 1);
+
+ expect_function_call(__wrap_prepare_buffers);
+ expect_memory(__wrap_prepare_buffers, engine, engine, sizeof(struct engine));
+ will_return(__wrap_prepare_buffers, 0);
+
+ expect_function_call(__wrap_send_main_rusage);
+ expect_value(__wrap_send_main_rusage, st_rusage, NULL);
+ expect_value(__wrap_send_main_rusage, rd_user, NULL);
+ expect_value(__wrap_send_main_rusage, rd_system, NULL);
+
+ void *ptr = malloc(sizeof(struct netdata_static_thread));
+ assert_ptr_equal(exporting_main(ptr), NULL);
+ assert_int_equal(engine->now, 2);
+ free(ptr);
+}
+
+static void test_read_exporting_config(void **state)
+{
+ struct engine *engine = __mock_read_exporting_config(); // TODO: use real read_exporting_config() function
+ *state = engine;
+
+ assert_ptr_not_equal(engine, NULL);
+ assert_string_equal(engine->config.hostname, "test_engine_host");
+ assert_int_equal(engine->config.update_every, 3);
+ assert_int_equal(engine->instance_num, 0);
+
+
+ struct instance *instance = engine->instance_root;
+ assert_ptr_not_equal(instance, NULL);
+ assert_ptr_equal(instance->next, NULL);
+ assert_ptr_equal(instance->engine, engine);
+ assert_int_equal(instance->config.type, EXPORTING_CONNECTOR_TYPE_GRAPHITE);
+ assert_string_equal(instance->config.destination, "localhost");
+ assert_string_equal(instance->config.prefix, "netdata");
+ assert_int_equal(instance->config.update_every, 1);
+ assert_int_equal(instance->config.buffer_on_failures, 10);
+ assert_int_equal(instance->config.timeoutms, 10000);
+ assert_true(simple_pattern_matches(instance->config.charts_pattern, "any_chart"));
+ assert_true(simple_pattern_matches(instance->config.hosts_pattern, "anyt_host"));
+ assert_int_equal(instance->config.options, EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES);
+
+ teardown_configured_engine(state);
+}
+
+static void test_init_connectors(void **state)
+{
+ struct engine *engine = *state;
+
+ init_connectors_in_tests(engine);
+
+ assert_int_equal(engine->instance_num, 1);
+
+ struct instance *instance = engine->instance_root;
+
+ assert_ptr_equal(instance->next, NULL);
+ assert_int_equal(instance->index, 0);
+
+ struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
+ assert_int_equal(connector_specific_config->default_port, 2003);
+
+ assert_ptr_equal(instance->worker, simple_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_graphite_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+
+ BUFFER *buffer = instance->buffer;
+ assert_ptr_not_equal(buffer, NULL);
+ buffer_sprintf(buffer, "%s", "graphite test");
+ assert_string_equal(buffer_tostring(buffer), "graphite test");
+}
+
+static void test_init_graphite_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_graphite_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 2003);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_graphite_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_graphite_plaintext);
+}
+
+static void test_init_json_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_json_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 5448);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_json_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+}
+
+static void test_init_opentsdb_telnet_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_telnet);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_telnet);
+}
+
+static void test_init_opentsdb_http_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_http_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_http);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_http_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_http);
+}
+
+static void test_mark_scheduled_instances(void **state)
+{
+ struct engine *engine = *state;
+
+ assert_int_equal(__real_mark_scheduled_instances(engine), 1);
+
+ struct instance *instance = engine->instance_root;
+ assert_int_equal(instance->scheduled, 1);
+ assert_int_equal(instance->before, 2);
+}
+
+static void test_rrdhost_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ expect_function_call(__wrap_info_int);
+
+ assert_ptr_equal(localhost->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 1);
+
+ assert_string_equal(log_line, "enabled exporting of host 'localhost' for instance 'instance_name'");
+
+ assert_ptr_not_equal(localhost->exporting_flags, NULL);
+ assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_SEND);
+}
+
+static void test_false_rrdhost_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ simple_pattern_free(instance->config.hosts_pattern);
+ instance->config.hosts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
+
+ expect_function_call(__wrap_info_int);
+
+ assert_ptr_equal(localhost->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 0);
+
+ assert_string_equal(log_line, "disabled exporting of host 'localhost' for instance 'instance_name'");
+
+ assert_ptr_not_equal(localhost->exporting_flags, NULL);
+ assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_DONT_SEND);
+}
+
+static void test_rrdset_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDSET *st = localhost->rrdset_root;
+
+ assert_ptr_equal(st->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdset_is_exportable(instance, st), 1);
+
+ assert_ptr_not_equal(st->exporting_flags, NULL);
+ assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_BACKEND_SEND);
+}
+
+static void test_false_rrdset_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDSET *st = localhost->rrdset_root;
+
+ simple_pattern_free(instance->config.charts_pattern);
+ instance->config.charts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
+
+ assert_ptr_equal(st->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdset_is_exportable(instance, st), 0);
+
+ assert_ptr_not_equal(st->exporting_flags, NULL);
+ assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_BACKEND_IGNORE);
+}
+
+static void test_exporting_calculate_value_from_stored_data(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ time_t timestamp;
+
+ instance->after = 3;
+ instance->before = 10;
+
+ expect_function_call(__mock_rrddim_query_oldest_time);
+ will_return(__mock_rrddim_query_oldest_time, 1);
+
+ expect_function_call(__mock_rrddim_query_latest_time);
+ will_return(__mock_rrddim_query_latest_time, 2);
+
+ expect_function_call(__mock_rrddim_query_init);
+ expect_value(__mock_rrddim_query_init, start_time, 1);
+ expect_value(__mock_rrddim_query_init, end_time, 2);
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 0);
+ expect_function_call(__mock_rrddim_query_next_metric);
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_EXISTS));
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 0);
+ expect_function_call(__mock_rrddim_query_next_metric);
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_EXISTS));
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 1);
+
+ expect_function_call(__mock_rrddim_query_finalize);
+
+ assert_int_equal(__real_exporting_calculate_value_from_stored_data(instance, rd, &timestamp), 36);
+}
+
+static void test_prepare_buffers(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->start_batch_formatting = __mock_start_batch_formatting;
+ instance->start_host_formatting = __mock_start_host_formatting;
+ instance->start_chart_formatting = __mock_start_chart_formatting;
+ instance->metric_formatting = __mock_metric_formatting;
+ instance->end_chart_formatting = __mock_end_chart_formatting;
+ instance->end_host_formatting = __mock_end_host_formatting;
+ instance->end_batch_formatting = __mock_end_batch_formatting;
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__mock_start_batch_formatting);
+ expect_value(__mock_start_batch_formatting, instance, instance);
+ will_return(__mock_start_batch_formatting, 0);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ expect_function_call(__mock_start_host_formatting);
+ expect_value(__mock_start_host_formatting, instance, instance);
+ expect_value(__mock_start_host_formatting, host, localhost);
+ will_return(__mock_start_host_formatting, 0);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__mock_start_chart_formatting);
+ expect_value(__mock_start_chart_formatting, instance, instance);
+ expect_value(__mock_start_chart_formatting, st, st);
+ will_return(__mock_start_chart_formatting, 0);
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ expect_function_call(__mock_metric_formatting);
+ expect_value(__mock_metric_formatting, instance, instance);
+ expect_value(__mock_metric_formatting, rd, rd);
+ will_return(__mock_metric_formatting, 0);
+
+ expect_function_call(__mock_end_chart_formatting);
+ expect_value(__mock_end_chart_formatting, instance, instance);
+ expect_value(__mock_end_chart_formatting, st, st);
+ will_return(__mock_end_chart_formatting, 0);
+
+ expect_function_call(__mock_end_host_formatting);
+ expect_value(__mock_end_host_formatting, instance, instance);
+ expect_value(__mock_end_host_formatting, host, localhost);
+ will_return(__mock_end_host_formatting, 0);
+
+ expect_function_call(__mock_end_batch_formatting);
+ expect_value(__mock_end_batch_formatting, instance, instance);
+ will_return(__mock_end_batch_formatting, 0);
+
+ assert_int_equal(__real_prepare_buffers(engine), 0);
+
+ assert_int_equal(instance->stats.buffered_metrics, 1);
+
+ // check with NULL functions
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = NULL;
+ instance->start_chart_formatting = NULL;
+ instance->metric_formatting = NULL;
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = NULL;
+ instance->end_batch_formatting = NULL;
+ assert_int_equal(__real_prepare_buffers(engine), 0);
+
+ assert_int_equal(instance->scheduled, 0);
+ assert_int_equal(instance->after, 2);
+}
+
+static void test_exporting_name_copy(void **state)
+{
+ (void)state;
+
+ char *source_name = "test.name-with/special#characters_";
+ char destination_name[RRD_ID_LENGTH_MAX + 1];
+
+ assert_int_equal(exporting_name_copy(destination_name, source_name, RRD_ID_LENGTH_MAX), 34);
+ assert_string_equal(destination_name, "test.name_with_special_characters_");
+}
+
+static void test_format_dimension_collected_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_graphite_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+}
+
+static void test_format_dimension_stored_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_graphite_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 690565856.0000000 15052\n");
+}
+
+static void test_format_dimension_collected_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_json_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
+ "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"(null)\","
+ "\"chart_context\":\"(null)\",\"chart_type\":\"(null)\",\"units\":\"(null)\",\"id\":\"dimension_id\","
+ "\"name\":\"dimension_name\",\"value\":123000321,\"timestamp\":15051}\n");
+}
+
+static void test_format_dimension_stored_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_json_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
+ "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"(null)\"," \
+ "\"chart_context\": \"(null)\",\"chart_type\":\"(null)\",\"units\": \"(null)\",\"id\":\"dimension_id\","
+ "\"name\":\"dimension_name\",\"value\":690565856.0000000,\"timestamp\": 15052}\n");
+}
+
+static void test_format_dimension_collected_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_opentsdb_telnet(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "put netdata.chart_name.dimension_name 15051 123000321 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
+}
+
+static void test_format_dimension_stored_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_opentsdb_telnet(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "put netdata.chart_name.dimension_name 15052 690565856.0000000 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
+}
+
+static void test_format_dimension_collected_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_opentsdb_http(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"metric\":\"netdata.chart_name.dimension_name\","
+ "\"timestamp\":15051,"
+ "\"value\":123000321,"
+ "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
+}
+
+static void test_format_dimension_stored_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_opentsdb_http(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"metric\":\"netdata.chart_name.dimension_name\","
+ "\"timestamp\":15052,"
+ "\"value\":690565856.0000000,"
+ "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
+}
+
+static void test_exporting_discard_response(void **state)
+{
+ struct engine *engine = *state;
+
+ BUFFER *response = buffer_create(0);
+ buffer_sprintf(response, "Test response");
+
+ assert_int_equal(exporting_discard_response(response, engine->instance_root), 0);
+ assert_int_equal(buffer_strlen(response), 0);
+
+ buffer_free(response);
+}
+
+static void test_simple_connector_receive_response(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ int sock = 1;
+
+ expect_function_call(__wrap_recv);
+ expect_value(__wrap_recv, sockfd, 1);
+ expect_not_value(__wrap_recv, buf, 0);
+ expect_value(__wrap_recv, len, 4096);
+ expect_value(__wrap_recv, flags, MSG_DONTWAIT);
+
+ simple_connector_receive_response(&sock, instance);
+
+ assert_int_equal(stats->received_bytes, 9);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(sock, 1);
+}
+
+static void test_simple_connector_send_buffer(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ int sock = 1;
+ int failures = 3;
+ size_t buffered_metrics = 1;
+ BUFFER *header = buffer_create(0);
+ BUFFER *buffer = buffer_create(0);
+ buffer_strcat(header, "test header\n");
+ buffer_strcat(buffer, "test buffer\n");
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 1);
+ expect_value(__wrap_send, buf, buffer_tostring(header));
+ expect_string(__wrap_send, buf, "test header\n");
+ expect_value(__wrap_send, len, 12);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 1);
+ expect_value(__wrap_send, buf, buffer_tostring(buffer));
+ expect_string(__wrap_send, buf, "test buffer\n");
+ expect_value(__wrap_send, len, 12);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ simple_connector_send_buffer(&sock, &failures, instance, header, buffer, buffered_metrics);
+
+ assert_int_equal(failures, 0);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->sent_bytes, 12);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+
+ assert_int_equal(buffer_strlen(buffer), 0);
+
+ assert_int_equal(sock, 1);
+}
+
+static void test_simple_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ __real_mark_scheduled_instances(engine);
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+ simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+ simple_connector_data->first_buffer = simple_connector_data->last_buffer;
+ simple_connector_data->header = buffer_create(0);
+ simple_connector_data->buffer = buffer_create(0);
+ simple_connector_data->last_buffer->header = buffer_create(0);
+ simple_connector_data->last_buffer->buffer = buffer_create(0);
+
+ buffer_sprintf(simple_connector_data->last_buffer->header, "test header");
+ buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
+
+ expect_function_call(__wrap_connect_to_one_of);
+ expect_string(__wrap_connect_to_one_of, destination, "localhost");
+ expect_value(__wrap_connect_to_one_of, default_port, 2003);
+ expect_not_value(__wrap_connect_to_one_of, reconnects_counter, 0);
+ expect_value(__wrap_connect_to_one_of, connected_to, 0);
+ expect_value(__wrap_connect_to_one_of, connected_to_size, 0);
+ will_return(__wrap_connect_to_one_of, 2);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 2);
+ expect_not_value(__wrap_send, buf, buffer_tostring(simple_connector_data->last_buffer->buffer));
+ expect_string(__wrap_send, buf, "test header");
+ expect_value(__wrap_send, len, 11);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 2);
+ expect_value(__wrap_send, buf, buffer_tostring(simple_connector_data->last_buffer->buffer));
+ expect_string(__wrap_send, buf, "test buffer");
+ expect_value(__wrap_send, len, 11);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ simple_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 0);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 0);
+ assert_int_equal(stats->sent_metrics, 0);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 0);
+ assert_int_equal(stats->transmission_successes, 0);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+}
+
+static void test_sanitize_json_string(void **state)
+{
+ (void)state;
+
+ char *src = "check \t\\\" string";
+ char dst[19 + 1];
+
+ sanitize_json_string(dst, src, 19);
+
+ assert_string_equal(dst, "check _\\\\\\\" string");
+}
+
+static void test_sanitize_graphite_label_value(void **state)
+{
+ (void)state;
+
+ char *src = "check ;~ string";
+ char dst[15 + 1];
+
+ sanitize_graphite_label_value(dst, src, 15);
+
+ assert_string_equal(dst, "check____string");
+}
+
+static void test_sanitize_opentsdb_label_value(void **state)
+{
+ (void)state;
+
+ char *src = "check \t\\\" #&$? -_./ string";
+ char dst[26 + 1];
+
+ sanitize_opentsdb_label_value(dst, src, 26);
+
+ assert_string_equal(dst, "check__________-_./_string");
+}
+
+static void test_format_host_labels_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_json_plaintext(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), "\"labels\":{\"key1\":\"value1\",\"key2\":\"value2\"},");
+}
+
+static void test_format_host_labels_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_graphite_plaintext(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), ";key1=value1;key2=value2");
+}
+
+static void test_format_host_labels_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_opentsdb_telnet(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), " key1=value1 key2=value2");
+}
+
+static void test_format_host_labels_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_opentsdb_http(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), ",\"key1\":\"value1\",\"key2\":\"value2\"");
+}
+
+static void test_flush_host_labels(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->labels = buffer_create(12);
+ buffer_strcat(instance->labels, "check string");
+ assert_int_equal(buffer_strlen(instance->labels), 12);
+
+ assert_int_equal(flush_host_labels(instance, localhost), 0);
+ assert_int_equal(buffer_strlen(instance->labels), 0);
+}
+
+static void test_create_main_rusage_chart(void **state)
+{
+ UNUSED(state);
+
+ RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+ RRDDIM *rd_user = NULL;
+ RRDDIM *rd_system = NULL;
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_main_thread_cpu");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting");
+ expect_string(rrdset_create_custom, context, "exporting_cpu_usage");
+ expect_string(rrdset_create_custom, units, "milliseconds/s");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130600);
+ expect_value(rrdset_create_custom, update_every, localhost->rrd_update_every);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+ will_return(rrdset_create_custom, st_rusage);
+
+ expect_function_calls(rrddim_add_custom, 2);
+ expect_value_count(rrddim_add_custom, st, st_rusage, 2);
+ expect_value_count(rrddim_add_custom, name, NULL, 2);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+ expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+ __real_create_main_rusage_chart(&st_rusage, &rd_user, &rd_system);
+
+ free(st_rusage);
+}
+
+static void test_send_main_rusage(void **state)
+{
+ UNUSED(state);
+
+ RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+ st_rusage->counter_done = 1;
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, st_rusage);
+
+ expect_function_calls(rrddim_set_by_pointer, 2);
+ expect_value_count(rrddim_set_by_pointer, st, st_rusage, 2);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, st_rusage);
+
+ __real_send_main_rusage(st_rusage, NULL, NULL);
+
+ free(st_rusage);
+}
+
+static void test_send_internal_metrics(void **state)
+{
+ UNUSED(state);
+
+ struct instance *instance = calloc(1, sizeof(struct instance));
+ instance->config.name = (const char *)strdupz("test_instance");
+ instance->config.update_every = 2;
+
+ struct stats *stats = &instance->stats;
+
+ stats->st_metrics = calloc(1, sizeof(RRDSET));
+ stats->st_metrics->counter_done = 1;
+ stats->st_bytes = calloc(1, sizeof(RRDSET));
+ stats->st_bytes->counter_done = 1;
+ stats->st_ops = calloc(1, sizeof(RRDSET));
+ stats->st_ops->counter_done = 1;
+ stats->st_rusage = calloc(1, sizeof(RRDSET));
+ stats->st_rusage->counter_done = 1;
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_metrics");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_buffer");
+ expect_string(rrdset_create_custom, units, "metrics");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130610);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+ will_return(rrdset_create_custom, stats->st_metrics);
+
+ expect_function_calls(rrddim_add_custom, 3);
+ expect_value_count(rrddim_add_custom, st, stats->st_metrics, 3);
+ expect_value_count(rrddim_add_custom, name, NULL, 3);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 3);
+ expect_value_count(rrddim_add_custom, divisor, 1, 3);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 3);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_bytes");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_data_size");
+ expect_string(rrdset_create_custom, units, "KiB");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130620);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_AREA);
+ will_return(rrdset_create_custom, stats->st_bytes);
+
+ expect_function_calls(rrddim_add_custom, 4);
+ expect_value_count(rrddim_add_custom, st, stats->st_bytes, 4);
+ expect_value_count(rrddim_add_custom, name, NULL, 4);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 4);
+ expect_value_count(rrddim_add_custom, divisor, 1024, 4);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 4);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_ops");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_operations");
+ expect_string(rrdset_create_custom, units, "operations");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130630);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+ will_return(rrdset_create_custom, stats->st_ops);
+
+ expect_function_calls(rrddim_add_custom, 5);
+ expect_value_count(rrddim_add_custom, st, stats->st_ops, 5);
+ expect_value_count(rrddim_add_custom, name, NULL, 5);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 5);
+ expect_value_count(rrddim_add_custom, divisor, 1, 5);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 5);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_thread_cpu");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_instance");
+ expect_string(rrdset_create_custom, units, "milliseconds/s");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130640);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+ will_return(rrdset_create_custom, stats->st_rusage);
+
+ expect_function_calls(rrddim_add_custom, 2);
+ expect_value_count(rrddim_add_custom, st, stats->st_rusage, 2);
+ expect_value_count(rrddim_add_custom, name, NULL, 2);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+ expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_metrics);
+
+ expect_function_calls(rrddim_set_by_pointer, 3);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_metrics, 3);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_metrics);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_bytes);
+
+ expect_function_calls(rrddim_set_by_pointer, 4);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_bytes, 4);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_bytes);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_ops);
+
+ expect_function_calls(rrddim_set_by_pointer, 5);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_ops, 5);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_ops);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_rusage);
+
+ expect_function_calls(rrddim_set_by_pointer, 2);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_rusage, 2);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_rusage);
+
+ // ------------------------------------------------------------------------
+
+ __real_send_internal_metrics(instance);
+
+ free(stats->st_metrics);
+ free(stats->st_bytes);
+ free(stats->st_ops);
+ free(stats->st_rusage);
+ free((void *)instance->config.name);
+ free(instance);
+}
+
+static void test_can_send_rrdset(void **state)
+{
+ (void)*state;
+
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 1);
+
+ rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+ rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+
+ // TODO: test with a denying simple pattern
+
+ rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+ rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+
+ localhost->rrdset_root->rrd_memory_mode = RRD_MEMORY_MODE_NONE;
+ prometheus_exporter_instance->config.options |= EXPORTING_SOURCE_DATA_AVERAGE;
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+}
+
+static void test_prometheus_name_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_int_equal(prometheus_name_copy(destination_name, "test-name", PROMETHEUS_ELEMENT_MAX), 9);
+
+ assert_string_equal(destination_name, "test_name");
+}
+
+static void test_prometheus_label_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_int_equal(prometheus_label_copy(destination_name, "test\"\\\nlabel", PROMETHEUS_ELEMENT_MAX), 15);
+
+ assert_string_equal(destination_name, "test\\\"\\\\\\\nlabel");
+}
+
+static void test_prometheus_units_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_string_equal(prometheus_units_copy(destination_name, "test-units", PROMETHEUS_ELEMENT_MAX, 0), "_test_units");
+ assert_string_equal(destination_name, "_test_units");
+
+ assert_string_equal(prometheus_units_copy(destination_name, "%", PROMETHEUS_ELEMENT_MAX, 0), "_percent");
+ assert_string_equal(prometheus_units_copy(destination_name, "test-units/s", PROMETHEUS_ELEMENT_MAX, 0), "_test_units_persec");
+
+ assert_string_equal(prometheus_units_copy(destination_name, "KiB", PROMETHEUS_ELEMENT_MAX, 1), "_KB");
+}
+
+static void test_format_host_labels_prometheus(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ format_host_labels_prometheus(instance, localhost);
+ assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\"");
+}
+
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
+{
+ (void)state;
+
+ BUFFER *buffer = buffer_create(0);
+
+ localhost->hostname = strdupz("test_hostname");
+ localhost->rrdset_root->family = strdupz("test_family");
+ localhost->rrdset_root->context = strdupz("test_context");
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+ "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\"} 690565856.0000000\n");
+
+ buffer_flush(buffer);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+ "# TYPE test_prefix_test_context gauge\n"
+ "test_prefix_test_context{chart=\"chart_name\",family=\"test_family\",dimension=\"dimension_name\"} 690565856.0000000\n");
+
+ buffer_flush(buffer);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+ "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\",instance=\"test_hostname\"} 690565856.0000000\n");
+
+ free(localhost->rrdset_root->context);
+ free(localhost->rrdset_root->family);
+ free(localhost->hostname);
+ buffer_free(buffer);
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+static void test_init_prometheus_remote_write_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ expect_function_call(__wrap_init_write_request);
+ will_return(__wrap_init_write_request, 0xff);
+
+ assert_int_equal(init_prometheus_remote_write_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, simple_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_prometheus_remote_write);
+ assert_ptr_equal(instance->start_chart_formatting, format_chart_prometheus_remote_write);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_prometheus_remote_write);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, NULL);
+ assert_ptr_equal(instance->end_batch_formatting, format_batch_prometheus_remote_write);
+ assert_ptr_equal(instance->prepare_header, prometheus_remote_write_prepare_header);
+ assert_ptr_equal(instance->check_response, process_prometheus_remote_write_response);
+
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)instance->connector_specific_data;
+
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ assert_ptr_not_equal(connector_specific_data->write_request, NULL);
+ freez(instance->connector_specific_data);
+}
+
+static void test_prometheus_remote_write_prepare_header(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->remote_write_path = strdupz("/receive");
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+ simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+ simple_connector_data->last_buffer->header = buffer_create(0);
+ simple_connector_data->last_buffer->buffer = buffer_create(0);
+
+ buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
+
+ prometheus_remote_write_prepare_header(instance);
+
+ assert_string_equal(
+ buffer_tostring(simple_connector_data->last_buffer->header),
+ "POST /receive HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Accept: */*\r\n"
+ "Content-Encoding: snappy\r\n"
+ "Content-Type: application/x-protobuf\r\n"
+ "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
+ "Content-Length: 11\r\n"
+ "\r\n");
+
+ free(connector_specific_config->remote_write_path);
+
+ buffer_free(simple_connector_data->last_buffer->header);
+ buffer_free(simple_connector_data->last_buffer->buffer);
+}
+
+static void test_process_prometheus_remote_write_response(void **state)
+{
+ (void)state;
+ BUFFER *buffer = buffer_create(0);
+
+ buffer_sprintf(buffer, "HTTP/1.1 200 OK\r\n");
+ assert_int_equal(process_prometheus_remote_write_response(buffer, NULL), 0);
+
+ buffer_free(buffer);
+}
+
+static void test_format_host_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = (void *)0xff;
+
+ localhost->program_name = strdupz("test_program");
+ localhost->program_version = strdupz("test_version");
+
+ expect_function_call(__wrap_add_host_info);
+ expect_value(__wrap_add_host_info, write_request_p, 0xff);
+ expect_string(__wrap_add_host_info, name, "netdata_info");
+ expect_string(__wrap_add_host_info, instance, "test-host");
+ expect_string(__wrap_add_host_info, application, "test_program");
+ expect_string(__wrap_add_host_info, version, "test_version");
+ expect_in_range(
+ __wrap_add_host_info, timestamp, now_realtime_usec() / USEC_PER_MS - 1000, now_realtime_usec() / USEC_PER_MS);
+
+ expect_function_call(__wrap_add_label);
+ expect_value(__wrap_add_label, write_request_p, 0xff);
+ expect_string(__wrap_add_label, key, "key1");
+ expect_string(__wrap_add_label, value, "value1");
+
+ expect_function_call(__wrap_add_label);
+ expect_value(__wrap_add_label, write_request_p, 0xff);
+ expect_string(__wrap_add_label, key, "key2");
+ expect_string(__wrap_add_label, value, "value2");
+
+ assert_int_equal(format_host_prometheus_remote_write(instance, localhost), 0);
+
+ freez(connector_specific_data);
+ freez(simple_connector_data);
+ free(localhost->program_name);
+ free(localhost->program_version);
+}
+
+static void test_format_dimension_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = (void *)0xff;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ expect_function_call(__wrap_add_metric);
+ expect_value(__wrap_add_metric, write_request_p, 0xff);
+ expect_string(__wrap_add_metric, name, "netdata_");
+ expect_string(__wrap_add_metric, chart, "");
+ expect_string(__wrap_add_metric, family, "");
+ expect_string(__wrap_add_metric, dimension, "dimension_name");
+ expect_string(__wrap_add_metric, instance, "test-host");
+ expect_value(__wrap_add_metric, value, 0x292932e0);
+ expect_value(__wrap_add_metric, timestamp, 15052 * MSEC_PER_SEC);
+
+ assert_int_equal(format_dimension_prometheus_remote_write(instance, rd), 0);
+}
+
+static void test_format_batch_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = __real_init_write_request();
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_add_host_info(
+ connector_specific_data->write_request,
+ "test_name", "test_instance", "test_application", "test_version", 15051);
+
+ __real_add_label(connector_specific_data->write_request, "test_key", "test_value");
+
+ __real_add_metric(
+ connector_specific_data->write_request,
+ "test_name", "test chart", "test_family", "test_dimension", "test_instance",
+ 123000321, 15052);
+
+ assert_int_equal(format_batch_prometheus_remote_write(instance), 0);
+
+ BUFFER *buffer = instance->buffer;
+ assert_int_equal(buffer_strlen(buffer), 192);
+
+ BUFFER *escaped_buffer = buffer_create(850);
+ size_t len = buffer_strlen(buffer);
+ char *ch = (char *)buffer_tostring(buffer);
+ for (; len > 0; ch++, len--)
+ buffer_sprintf(escaped_buffer, "\\%03o", (unsigned int)*ch);
+ assert_string_equal(
+ buffer_tostring(escaped_buffer),
+ "\\37777777641\\002\\120\\012\\37777777622\\001\\012\\025\\012\\010\\137\\137\\156\\141\\155\\145\\137\\137"
+ "\\022\\011\\164\\145\\163\\164\\005\\015\\064\\012\\031\\012\\010\\151\\156\\163\\164\\141\\156\\143\\145\\022"
+ "\\015\\005\\027\\021\\017\\100\\012\\037\\012\\013\\141\\160\\160\\154\\151\\143\\141\\164\\151\\157\\156\\022"
+ "\\020\\005\\036\\035\\022\\034\\012\\027\\012\\007\\166\\145\\162\\163\\001\\035\\000\\014\\005\\035\\015\\016"
+ "\\014\\012\\026\\012\\010\\005\\020\\020\\153\\145\\171\\022\\012\\005\\012\\040\\166\\141\\154\\165\\145\\022"
+ "\\014\\011\\000\\005\\001\\030\\37777777760\\077\\020\\37777777713\\165\\012\\37777777611\\142\\37777777625"
+ "\\000\\034\\023\\012\\005\\143\\150\\141\\162\\164\\011\\075\\000\\040\\005\\014\\054\\012\\025\\012\\006\\146"
+ "\\141\\155\\151\\154\\171\\022\\013\\005\\123\\011\\015\\040\\012\\033\\012\\011\\144\\151\\155\\145\\156\\005"
+ "\\37777777607\\000\\016\\005\\032\\025\\020\\000\\012\\146\\37777777736\\000\\064\\022\\014\\011\\000\\000\\000"
+ "\\004\\130\\123\\37777777635\\101\\020\\37777777714\\165");
+
+ buffer_free(escaped_buffer);
+ protocol_buffers_shutdown();
+}
+#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
+
+#if HAVE_KINESIS
+static void test_init_aws_kinesis_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->stream_name = strdupz("test_stream");
+ connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
+ connector_specific_config->secure_key = strdupz("test_secure_key");
+
+ expect_function_call(__wrap_aws_sdk_init);
+ expect_function_call(__wrap_kinesis_init);
+ expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_init, region, "localhost");
+ expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
+ expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
+ expect_value(__wrap_kinesis_init, timeout, 10000);
+
+ assert_int_equal(init_aws_kinesis_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, aws_kinesis_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, NULL);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ freez(instance->connector_specific_data);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+
+ expect_function_call(__wrap_kinesis_init);
+ expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_init, region, "localhost");
+ expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
+ expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
+ expect_value(__wrap_kinesis_init, timeout, 10000);
+
+ assert_int_equal(init_aws_kinesis_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+
+ free(connector_specific_config->stream_name);
+ free(connector_specific_config->auth_key_id);
+ free(connector_specific_config->secure_key);
+}
+
+static void test_aws_kinesis_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+ BUFFER *buffer = instance->buffer;
+
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_prepare_buffers(engine);
+
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->stream_name = strdupz("test_stream");
+ connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
+ connector_specific_config->secure_key = strdupz("test_secure_key");
+
+ struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ expect_function_call(__wrap_kinesis_put_record);
+ expect_not_value(__wrap_kinesis_put_record, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_put_record, stream_name, "test_stream");
+ expect_string(__wrap_kinesis_put_record, partition_key, "netdata_0");
+ expect_value(__wrap_kinesis_put_record, data, buffer_tostring(buffer));
+ // The buffer is prepared by Graphite exporting connector
+ expect_string(
+ __wrap_kinesis_put_record, data,
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+ expect_value(__wrap_kinesis_put_record, data_len, 84);
+
+ expect_function_call(__wrap_kinesis_get_result);
+ expect_value(__wrap_kinesis_get_result, request_outcomes_p, NULL);
+ expect_not_value(__wrap_kinesis_get_result, error_message, NULL);
+ expect_not_value(__wrap_kinesis_get_result, sent_bytes, NULL);
+ expect_not_value(__wrap_kinesis_get_result, lost_bytes, NULL);
+ will_return(__wrap_kinesis_get_result, 0);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ aws_kinesis_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 84);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 84);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->stream_name);
+ free(connector_specific_config->auth_key_id);
+ free(connector_specific_config->secure_key);
+}
+#endif // HAVE_KINESIS
+
+#if ENABLE_EXPORTING_PUBSUB
+static void test_init_pubsub_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->credentials_file = strdupz("/test/credentials/file");
+ connector_specific_config->project_id = strdupz("test_project_id");
+ connector_specific_config->topic_id = strdupz("test_topic_id");
+
+ expect_function_call(__wrap_pubsub_init);
+ expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_init, destination, "localhost");
+ expect_string(__wrap_pubsub_init, error_message, "");
+ expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
+ expect_string(__wrap_pubsub_init, project_id, "test_project_id");
+ expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
+ will_return(__wrap_pubsub_init, 0);
+
+ assert_int_equal(init_pubsub_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, pubsub_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, NULL);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ freez(instance->connector_specific_data);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+
+ expect_function_call(__wrap_pubsub_init);
+ expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_init, destination, "localhost");
+ expect_string(__wrap_pubsub_init, error_message, "");
+ expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
+ expect_string(__wrap_pubsub_init, project_id, "test_project_id");
+ expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
+ will_return(__wrap_pubsub_init, 0);
+
+ assert_int_equal(init_pubsub_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+
+ free(connector_specific_config->credentials_file);
+ free(connector_specific_config->project_id);
+ free(connector_specific_config->topic_id);
+}
+
+static void test_pubsub_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_prepare_buffers(engine);
+
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->credentials_file = strdupz("/test/credentials/file");
+ connector_specific_config->project_id = strdupz("test_project_id");
+ connector_specific_config->topic_id = strdupz("test_topic_id");
+
+ struct pubsub_specific_data *connector_specific_data = callocz(1, sizeof(struct pubsub_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ expect_function_call(__wrap_pubsub_add_message);
+ expect_not_value(__wrap_pubsub_add_message, pubsub_specific_data_p, NULL);
+ // The buffer is prepared by Graphite exporting connector
+ expect_string(
+ __wrap_pubsub_add_message, data,
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+ will_return(__wrap_pubsub_add_message, 0);
+
+ expect_function_call(__wrap_pubsub_publish);
+ expect_not_value(__wrap_pubsub_publish, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_publish, error_message, "");
+ expect_value(__wrap_pubsub_publish, buffered_metrics, 1);
+ expect_value(__wrap_pubsub_publish, buffered_bytes, 84);
+ will_return(__wrap_pubsub_publish, 0);
+
+ expect_function_call(__wrap_pubsub_get_result);
+ expect_not_value(__wrap_pubsub_get_result, pubsub_specific_data_p, NULL);
+ expect_not_value(__wrap_pubsub_get_result, error_message, NULL);
+ expect_not_value(__wrap_pubsub_get_result, sent_metrics, NULL);
+ expect_not_value(__wrap_pubsub_get_result, sent_bytes, NULL);
+ expect_not_value(__wrap_pubsub_get_result, lost_metrics, NULL);
+ expect_not_value(__wrap_pubsub_get_result, lost_bytes, NULL);
+ will_return(__wrap_pubsub_get_result, 0);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ pubsub_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 84);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 84);
+ assert_int_equal(stats->sent_metrics, 0);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->credentials_file);
+ free(connector_specific_config->project_id);
+ free(connector_specific_config->topic_id);
+}
+#endif // ENABLE_EXPORTING_PUBSUB
+
+#if HAVE_MONGOC
+static void test_init_mongodb_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->database = strdupz("test_database");
+ connector_specific_config->collection = strdupz("test_collection");
+ instance->config.buffer_on_failures = 10;
+
+ expect_function_call(__wrap_mongoc_init);
+ expect_function_call(__wrap_mongoc_uri_new_with_error);
+ expect_string(__wrap_mongoc_uri_new_with_error, uri_string, "localhost");
+ expect_not_value(__wrap_mongoc_uri_new_with_error, error, NULL);
+ will_return(__wrap_mongoc_uri_new_with_error, 0xf1);
+
+ expect_function_call(__wrap_mongoc_uri_get_option_as_int32);
+ expect_value(__wrap_mongoc_uri_get_option_as_int32, uri, 0xf1);
+ expect_string(__wrap_mongoc_uri_get_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
+ expect_value(__wrap_mongoc_uri_get_option_as_int32, fallback, 1000);
+ will_return(__wrap_mongoc_uri_get_option_as_int32, 1000);
+
+ expect_function_call(__wrap_mongoc_uri_set_option_as_int32);
+ expect_value(__wrap_mongoc_uri_set_option_as_int32, uri, 0xf1);
+ expect_string(__wrap_mongoc_uri_set_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
+ expect_value(__wrap_mongoc_uri_set_option_as_int32, value, 1000);
+ will_return(__wrap_mongoc_uri_set_option_as_int32, true);
+
+ expect_function_call(__wrap_mongoc_client_new_from_uri);
+ expect_value(__wrap_mongoc_client_new_from_uri, uri, 0xf1);
+ will_return(__wrap_mongoc_client_new_from_uri, 0xf2);
+
+ expect_function_call(__wrap_mongoc_client_set_appname);
+ expect_value(__wrap_mongoc_client_set_appname, client, 0xf2);
+ expect_string(__wrap_mongoc_client_set_appname, appname, "netdata");
+ will_return(__wrap_mongoc_client_set_appname, true);
+
+ expect_function_call(__wrap_mongoc_client_get_collection);
+ expect_value(__wrap_mongoc_client_get_collection, client, 0xf2);
+ expect_string(__wrap_mongoc_client_get_collection, db, "test_database");
+ expect_string(__wrap_mongoc_client_get_collection, collection, "test_collection");
+ will_return(__wrap_mongoc_client_get_collection, 0xf3);
+
+ expect_function_call(__wrap_mongoc_uri_destroy);
+ expect_value(__wrap_mongoc_uri_destroy, uri, 0xf1);
+
+ assert_int_equal(init_mongodb_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, mongodb_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, format_batch_mongodb);
+ assert_ptr_equal(instance->prepare_header, NULL);
+ assert_ptr_equal(instance->check_response, NULL);
+
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+ size_t number_of_buffers = 1;
+ struct bson_buffer *current_buffer = connector_specific_data->first_buffer;
+ while (current_buffer->next != connector_specific_data->first_buffer) {
+ current_buffer = current_buffer->next;
+ number_of_buffers++;
+ if (number_of_buffers == (size_t)(instance->config.buffer_on_failures + 1)) {
+ number_of_buffers = 0;
+ break;
+ }
+ }
+ assert_int_equal(number_of_buffers, 9);
+
+ free(connector_specific_config->database);
+ free(connector_specific_config->collection);
+}
+
+static void test_format_batch_mongodb(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ struct mongodb_specific_data *connector_specific_data = mallocz(sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ struct bson_buffer *current_buffer = callocz(1, sizeof(struct bson_buffer));
+ connector_specific_data->first_buffer = current_buffer;
+ connector_specific_data->first_buffer->next = current_buffer;
+ connector_specific_data->last_buffer = current_buffer;
+
+ BUFFER *buffer = buffer_create(0);
+ buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
+ instance->buffer = buffer;
+ stats->buffered_metrics = 1;
+
+ assert_int_equal(format_batch_mongodb(instance), 0);
+
+ assert_int_equal(connector_specific_data->last_buffer->documents_inserted, 1);
+ assert_int_equal(buffer_strlen(buffer), 0);
+
+ size_t len;
+ char *str = bson_as_canonical_extended_json(connector_specific_data->last_buffer->insert[0], &len);
+ assert_string_equal(str, "{ \"metric\" : \"test_metric\" }");
+
+ freez(str);
+ buffer_free(buffer);
+}
+
+static void test_mongodb_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->database = strdupz("test_database");
+
+ struct mongodb_specific_data *connector_specific_data = callocz(1, sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_config->collection = strdupz("test_collection");
+
+ struct bson_buffer *buffer = callocz(1, sizeof(struct bson_buffer));
+ buffer->documents_inserted = 1;
+ connector_specific_data->first_buffer = buffer;
+ connector_specific_data->first_buffer->next = buffer;
+
+ connector_specific_data->first_buffer->insert = callocz(1, sizeof(bson_t *));
+ bson_error_t bson_error;
+ connector_specific_data->first_buffer->insert[0] =
+ bson_new_from_json((const uint8_t *)"{ \"test_key\" : \"test_value\" }", -1, &bson_error);
+
+ connector_specific_data->client = mongoc_client_new("mongodb://localhost");
+ connector_specific_data->collection =
+ __real_mongoc_client_get_collection(connector_specific_data->client, "test_database", "test_collection");
+
+ expect_function_call(__wrap_mongoc_collection_insert_many);
+ expect_value(__wrap_mongoc_collection_insert_many, collection, connector_specific_data->collection);
+ expect_value(__wrap_mongoc_collection_insert_many, documents, connector_specific_data->first_buffer->insert);
+ expect_value(__wrap_mongoc_collection_insert_many, n_documents, 1);
+ expect_value(__wrap_mongoc_collection_insert_many, opts, NULL);
+ expect_value(__wrap_mongoc_collection_insert_many, reply, NULL);
+ expect_not_value(__wrap_mongoc_collection_insert_many, error, NULL);
+ will_return(__wrap_mongoc_collection_insert_many, true);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ mongodb_connector_worker(instance);
+
+ assert_ptr_equal(connector_specific_data->first_buffer->insert, NULL);
+ assert_int_equal(connector_specific_data->first_buffer->documents_inserted, 0);
+ assert_ptr_equal(connector_specific_data->first_buffer, connector_specific_data->first_buffer->next);
+
+ struct stats *stats = &instance->stats;
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 0);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 30);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->database);
+ free(connector_specific_config->collection);
+}
+#endif // HAVE_MONGOC
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test_setup_teardown(test_exporting_engine, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_read_exporting_config),
+ cmocka_unit_test_setup_teardown(test_init_connectors, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_graphite_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_json_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_opentsdb_telnet_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_opentsdb_http_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_mark_scheduled_instances, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_false_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_false_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_exporting_calculate_value_from_stored_data, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(test_prepare_buffers, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_exporting_name_copy),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_exporting_discard_response, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_receive_response, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_send_buffer, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ const struct CMUnitTest label_tests[] = {
+ cmocka_unit_test(test_sanitize_json_string),
+ cmocka_unit_test(test_sanitize_graphite_label_value),
+ cmocka_unit_test(test_sanitize_opentsdb_label_value),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(test_flush_host_labels, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ int test_res = cmocka_run_group_tests_name("exporting_engine", tests, NULL, NULL) +
+ cmocka_run_group_tests_name("labels_in_exporting_engine", label_tests, NULL, NULL);
+
+ const struct CMUnitTest internal_metrics_tests[] = {
+ cmocka_unit_test_setup_teardown(test_create_main_rusage_chart, setup_rrdhost, teardown_rrdhost),
+ cmocka_unit_test(test_send_main_rusage),
+ cmocka_unit_test(test_send_internal_metrics),
+ };
+
+ test_res += cmocka_run_group_tests_name("internal_metrics", internal_metrics_tests, NULL, NULL);
+
+ const struct CMUnitTest prometheus_web_api_tests[] = {
+ cmocka_unit_test_setup_teardown(test_can_send_rrdset, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_name_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
+ };
+
+ test_res += cmocka_run_group_tests_name("prometheus_web_api", prometheus_web_api_tests, NULL, NULL);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ const struct CMUnitTest prometheus_remote_write_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_prometheus_remote_write_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_prometheus_remote_write_prepare_header, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_process_prometheus_remote_write_response),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_batch_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name(
+ "prometheus_remote_write_exporting_connector", prometheus_remote_write_tests, NULL, NULL);
+#endif
+
+#if HAVE_KINESIS
+ const struct CMUnitTest kinesis_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_aws_kinesis_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_aws_kinesis_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("kinesis_exporting_connector", kinesis_tests, NULL, NULL);
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+ const struct CMUnitTest pubsub_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_pubsub_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_pubsub_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("pubsub_exporting_connector", pubsub_tests, NULL, NULL);
+#endif
+
+#if HAVE_MONGOC
+ const struct CMUnitTest mongodb_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_mongodb_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_batch_mongodb, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_mongodb_connector_worker, setup_configured_engine, teardown_configured_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("mongodb_exporting_connector", mongodb_tests, NULL, NULL);
+#endif
+
+ return test_res;
+}
diff --git a/exporting/tests/test_exporting_engine.h b/exporting/tests/test_exporting_engine.h
new file mode 100644
index 0000000..800be1b
--- /dev/null
+++ b/exporting/tests/test_exporting_engine.h
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef TEST_EXPORTING_ENGINE_H
+#define TEST_EXPORTING_ENGINE_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#include "exporting/exporting_engine.h"
+#include "exporting/graphite/graphite.h"
+#include "exporting/json/json.h"
+#include "exporting/opentsdb/opentsdb.h"
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "exporting/prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "exporting/aws_kinesis/aws_kinesis.h"
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+#include "exporting/pubsub/pubsub.h"
+#endif
+
+#if HAVE_MONGOC
+#include "exporting/mongodb/mongodb.h"
+#endif
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <stdint.h>
+#include <cmocka.h>
+
+#define MAX_LOG_LINE 1024
+extern char log_line[];
+
+// -----------------------------------------------------------------------
+// doubles for Netdata functions
+
+const char *__wrap_strdupz(const char *s);
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...);
+int __wrap_connect_to_one_of(
+ const char *destination,
+ int default_port,
+ struct timeval *timeout,
+ size_t *reconnects_counter,
+ char *connected_to,
+ size_t connected_to_size);
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
+time_t __mock_rrddim_query_oldest_time(RRDDIM *rd);
+time_t __mock_rrddim_query_latest_time(RRDDIM *rd);
+void __mock_rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
+int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle);
+storage_number __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time);
+void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle);
+
+// -----------------------------------------------------------------------
+// wraps for system functions
+
+void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg);
+void __wrap_uv_mutex_lock(uv_mutex_t *mutex);
+void __wrap_uv_mutex_unlock(uv_mutex_t *mutex);
+void __wrap_uv_cond_signal(uv_cond_t *cond_var);
+void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex);
+ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags);
+ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags);
+
+// -----------------------------------------------------------------------
+// doubles and originals for exporting engine functions
+
+struct engine *__real_read_exporting_config();
+struct engine *__wrap_read_exporting_config();
+struct engine *__mock_read_exporting_config();
+
+int __real_init_connectors(struct engine *engine);
+int __wrap_init_connectors(struct engine *engine);
+
+int __real_mark_scheduled_instances(struct engine *engine);
+int __wrap_mark_scheduled_instances(struct engine *engine);
+
+calculated_number __real_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+calculated_number __wrap_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+
+int __real_prepare_buffers(struct engine *engine);
+int __wrap_prepare_buffers(struct engine *engine);
+
+void __real_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+
+void __real_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+
+int __real_send_internal_metrics(struct instance *instance);
+int __wrap_send_internal_metrics(struct instance *instance);
+
+int __real_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+
+int __real_rrdset_is_exportable(struct instance *instance, RRDSET *st);
+int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st);
+
+int __mock_start_batch_formatting(struct instance *instance);
+int __mock_start_host_formatting(struct instance *instance, RRDHOST *host);
+int __mock_start_chart_formatting(struct instance *instance, RRDSET *st);
+int __mock_metric_formatting(struct instance *instance, RRDDIM *rd);
+int __mock_end_chart_formatting(struct instance *instance, RRDSET *st);
+int __mock_end_host_formatting(struct instance *instance, RRDHOST *host);
+int __mock_end_batch_formatting(struct instance *instance);
+
+int __wrap_simple_connector_end_batch(struct instance *instance);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+void *__real_init_write_request();
+void *__wrap_init_write_request();
+
+void __real_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+void __wrap_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+
+void __real_add_label(void *write_request_p, char *key, char *value);
+void __wrap_add_label(void *write_request_p, char *key, char *value);
+
+void __real_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+void __wrap_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
+
+#if HAVE_KINESIS
+void __wrap_aws_sdk_init();
+void __wrap_kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout);
+void __wrap_kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len);
+int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes);
+#endif /* HAVE_KINESIS */
+
+#if ENABLE_EXPORTING_PUBSUB
+int __wrap_pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id);
+int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data);
+int __wrap_pubsub_publish(
+ void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes);
+int __wrap_pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes);
+#endif /* ENABLE_EXPORTING_PUBSUB */
+
+#if HAVE_MONGOC
+void __wrap_mongoc_init();
+mongoc_uri_t *__wrap_mongoc_uri_new_with_error(const char *uri_string, bson_error_t *error);
+int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback);
+bool __wrap_mongoc_uri_set_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t value);
+mongoc_client_t *__wrap_mongoc_client_new_from_uri(const mongoc_uri_t *uri);
+bool __wrap_mongoc_client_set_appname(mongoc_client_t *client, const char *appname);
+mongoc_collection_t *
+__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
+mongoc_collection_t *
+__real_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
+void __wrap_mongoc_uri_destroy(mongoc_uri_t *uri);
+bool __wrap_mongoc_collection_insert_many(
+ mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+#endif /* HAVE_MONGOC */
+
+// -----------------------------------------------------------------------
+// fixtures
+
+int setup_configured_engine(void **state);
+int teardown_configured_engine(void **state);
+int setup_rrdhost();
+int teardown_rrdhost();
+int setup_initialized_engine(void **state);
+int teardown_initialized_engine(void **state);
+int setup_prometheus(void **state);
+int teardown_prometheus(void **state);
+
+void init_connectors_in_tests(struct engine *engine);
+
+#endif /* TEST_EXPORTING_ENGINE_H */
diff --git a/health/Makefile.am b/health/Makefile.am
new file mode 100644
index 0000000..399d6df
--- /dev/null
+++ b/health/Makefile.am
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ notifications \
+ $(NULL)
+
+CLEANFILES = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+userhealthconfigdir=$(configdir)/health.d
+dist_userhealthconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(userhealthconfigdir)
+
+healthconfigdir=$(libconfigdir)/health.d
+dist_healthconfig_DATA = \
+ health.d/adaptec_raid.conf \
+ health.d/am2320.conf \
+ health.d/anomalies.conf \
+ health.d/apache.conf \
+ health.d/apcupsd.conf \
+ health.d/apps_plugin.conf \
+ health.d/backend.conf \
+ health.d/bcache.conf \
+ health.d/beanstalkd.conf \
+ health.d/bind_rndc.conf \
+ health.d/boinc.conf \
+ health.d/btrfs.conf \
+ health.d/ceph.conf \
+ health.d/cgroups.conf \
+ health.d/cpu.conf \
+ health.d/cockroachdb.conf \
+ health.d/couchdb.conf \
+ health.d/disks.conf \
+ health.d/dnsmasq_dhcp.conf \
+ health.d/dns_query.conf \
+ health.d/dockerd.conf \
+ health.d/elasticsearch.conf \
+ health.d/entropy.conf \
+ health.d/exporting.conf \
+ health.d/fping.conf \
+ health.d/ioping.conf \
+ health.d/fronius.conf \
+ health.d/gearman.conf \
+ health.d/haproxy.conf \
+ health.d/hdfs.conf \
+ health.d/httpcheck.conf \
+ health.d/ipc.conf \
+ health.d/ipfs.conf \
+ health.d/ipmi.conf \
+ health.d/isc_dhcpd.conf \
+ health.d/kubelet.conf \
+ health.d/lighttpd.conf \
+ health.d/linux_power_supply.conf \
+ health.d/load.conf \
+ health.d/mdstat.conf \
+ health.d/megacli.conf \
+ health.d/memcached.conf \
+ health.d/memory.conf \
+ health.d/mongodb.conf \
+ health.d/mysql.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/netfilter.conf \
+ health.d/nginx.conf \
+ health.d/nginx_plus.conf \
+ health.d/pihole.conf \
+ health.d/phpfpm.conf \
+ health.d/portcheck.conf \
+ health.d/postgres.conf \
+ health.d/processes.conf \
+ health.d/pulsar.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/retroshare.conf \
+ health.d/riakkv.conf \
+ health.d/scaleio.conf \
+ health.d/softnet.conf \
+ health.d/squid.conf \
+ health.d/stiebeleltron.conf \
+ health.d/swap.conf \
+ health.d/tcp_conn.conf \
+ health.d/tcp_listen.conf \
+ health.d/tcp_mem.conf \
+ health.d/tcp_orphans.conf \
+ health.d/tcp_resets.conf \
+ health.d/udp_errors.conf \
+ health.d/unbound.conf \
+ health.d/varnish.conf \
+ health.d/vcsa.conf \
+ health.d/vernemq.conf \
+ health.d/vsphere.conf \
+ health.d/web_log.conf \
+ health.d/whoisquery.conf \
+ health.d/wmi.conf \
+ health.d/x509check.conf \
+ health.d/zfs.conf \
+ health.d/zookeeper.conf \
+ health.d/dbengine.conf \
+ $(NULL)
diff --git a/health/QUICKSTART.md b/health/QUICKSTART.md
new file mode 100644
index 0000000..bc91caf
--- /dev/null
+++ b/health/QUICKSTART.md
@@ -0,0 +1,143 @@
+<!--
+title: "Health quickstart"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/QUICKSTART.md
+-->
+
+# Health quickstart
+
+In this quickstart guide, you'll learn the basics of editing health configuration files. With this knowledge, you
+will be able to customize how and when Netdata triggers alarms based on the health and performance of your system or
+infrastructure.
+
+To learn about more advanced health configurations, visit the [health reference guide](/health/REFERENCE.md).
+
+## Edit health configuration files
+
+You should [use `edit-config`](/docs/configure/nodes.md) to edit Netdata's health configuration files. `edit-config`
+will open your system's default terminal editor for you to make your changes. Once you've saved and closed the editor,
+`edit-config` will copy your edited file into `/etc/netdata/health.d/`, which will override the stock file in
+`/usr/lib/netdata/conf.d/health.d/` and ensure your customizations are persistent between updates.
+
+For example, to edit the `cpu.conf` health configuration file, you would run:
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config health.d/cpu.conf
+```
+
+Each health configuration file contains one or more health entities, which always begin with an `alarm:` or `template:`
+line. You can edit these entities based on your needs. To make any changes live, be sure to [reload your health
+configuration](#reload-health-configuration).
+
+## Reference Netdata's stock health configuration files
+
+While you should always [use `edit-config`](#edit-health-configuration-files), you might also want to view the stock
+health configuration files Netdata ships with. Stock files can be useful as reference material, or to determine which
+file you should edit with `edit-config`.
+
+By default, Netdata will put health configuration files in `/usr/lib/netdata/conf.d/health.d`. However, you can
+double-check the location of these files by navigating to `http://NODE:19999/netdata.conf`, replacing `NODE` with the IP
+address or hostname for your Agent dashboard, looking for the `stock health configuration directory` option. The value
+here will show the correct path for your installation.
+
+```conf
+[health]
+ ...
+ # stock health configuration directory = /usr/lib/netdata/conf.d/health.d
+```
+
+Navigate to the health configuration directory to see all the available files and open them for reading.
+
+```bash
+cd /usr/lib/netdata/conf.d/health.d/
+ls
+adaptec_raid.conf entropy.conf memory.conf squid.conf
+am2320.conf fping.conf mongodb.conf stiebeleltron.conf
+apache.conf fronius.conf mysql.conf swap.conf
+...
+```
+
+> ⚠️ If you edit configuration files in your stock health configuration directory, Netdata will overwrite them during
+> any updates. Please use `edit-config` as described in the [section above](#edit-health-configuration-files).
+
+## Write a new health entity
+
+While tuning existing alarms may work in some cases, you may need to write entirely new health entities based on how
+your systems and applications work.
+
+To write a new health entity, let's create a new file inside of the `health.d/` directory. We'll name our file
+`example.conf` for now.
+
+```bash
+./edit-config health.d/example.conf
+```
+
+As an example, let's build a health entity that triggers an alarm your system's RAM usage goes above 80%. Copy and paste
+the following into the editor:
+
+```yaml
+ alarm: ram_usage
+ on: system.ram
+lookup: average -1m percentage of used
+ units: %
+ every: 1m
+ warn: $this > 80
+ crit: $this > 90
+ info: The percentage of RAM used by the system.
+```
+
+Let's look into each of the lines to see how they create a working health entity.
+
+- `alarm`: The name for your new entity. The name needs to follow these requirements:
+ - Any alphabet letter or number.
+ - The symbols `.` and `_`.
+ - Cannot be `chart name`, `dimension name`, `family name`, or `chart variable names`.
+- `on`: Which chart the entity listens to.
+- `lookup`: Which metrics the alarm monitors, the duration of time to monitor, and how to process the metrics into a
+ usable format.
+ - `average`: Calculate the average of all the metrics collected.
+ - `-1m`: Use metrics from 1 minute ago until now to calculate that average.
+ - `percentage`: Clarify that we're calculating a percentage of RAM usage.
+ - `of used`: Specify which dimension (`used`) on the `system.ram` chart you want to monitor with this entity.
+- `units`: Use percentages rather than absolute units.
+- `every`: How often to perform the `lookup` calculation to decide whether or not to trigger this alarm.
+- `warn`/`crit`: The value at which Netdata should trigger a warning or critical alarm.
+- `info`: A description of the alarm, which will appear in the dashboard and notifications.
+
+Let's put all these lines into a human-readable format.
+
+This health entity, named **ram_usage**, watches at the **system.ram** chart. It looks up the last **1 minute** of
+metrics from the **used** dimension and calculates the **average** of all those metrics in a **percentage** format,
+using a **% unit**. The entity performs this lookup **every minute**. If the average RAM usage percentage over the last
+1 minute is **more than 80%**, the entity triggers a warning alarm. If the usage is **more than 90%**, the entity
+triggers a critical alarm.
+
+Now that you've written a new health entity, you need to reload it to see it live on the dashboard.
+
+## Reload health configuration
+
+To make any changes to your health configuration live, you must reload Netdata's health monitoring system. To do that
+without restarting all of Netdata, run the following:
+
+```bash
+netdatacli reload-health
+```
+
+If you receive an error like `command not found`, this means that `netdatacli` is not installed in your `$PATH`. In that
+ case, you can reload only the health component by sending a `SIGUSR2` to Netdata:
+
+```bash
+killall -USR2 netdata
+```
+## What's next?
+
+To learn about all of Netdata's health configuration options, view the [reference guide](/health/REFERENCE.md) and
+[daemon configuration](/daemon/config/README.md#health-section-options) for additional options available in the
+`[health]` section of `netdata.conf`.
+
+Or, get guided insights into specific health configurations with our [health guides](/health/README.md#guides).
+
+Finally, move on to Netdata's [notification system](/health/notifications/README.md) to learn more about how Netdata can
+let you know when the health of your systems or apps goes awry.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fquickstart%2F&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/README.md b/health/README.md
new file mode 100644
index 0000000..37f09e8
--- /dev/null
+++ b/health/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Health monitoring"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/README.md
+-->
+
+# Health monitoring
+
+The Netdata Agent is a health watchdog for the health and performance of your systems, services, and applications. We've
+worked closely with our community of DevOps engineers, SREs, and developers to define hundreds of production-ready
+alarms that work without any configuration.
+
+The Agent's health monitoring system is also dynamic and fully customizable. You can write entirely new alarms, tune the
+community-configured alarms for every app/service [the Agent collects metrics from](/collectors/COLLECTORS.md), or
+silence anything you're not interested in. You can even power complex lookups by running statistical algorithms against
+your metrics.
+
+Ready to take the next steps with health monitoring?
+
+[Quickstart](/health/QUICKSTART.md)
+
+[Configuration reference](/health/REFERENCE.md)
+
+## Guides
+
+Every infrastructure is different, so we're not interested in mandating how you should configure Netdata's health
+monitoring features. Instead, these guides should give you the details you need to tweak alarms to your heart's
+content.
+
+[Stopping notifications for individual alarms](/docs/guides/monitor/stop-notifications-alarms.md)
+
+[Use dimension templates to create dynamic alarms](/docs/guides/monitor/dimension-templates.md)
+
+## Related features
+
+**[Notifications](/health/notifications/README.md)**: Get notified about ongoing alarms from your Agents via your
+favorite platform(s), such as Slack, Discord, PagerDuty, email, and much more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/REFERENCE.md b/health/REFERENCE.md
new file mode 100644
index 0000000..bc5f40c
--- /dev/null
+++ b/health/REFERENCE.md
@@ -0,0 +1,797 @@
+<!--
+title: "Health configuration reference"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/REFERENCE.md
+-->
+
+# Health configuration reference
+
+Welcome to the health configuration reference.
+
+This guide contains information about editing health configuration files to tweak existing alarms or create new health
+entities that are customized to the needs of your infrastructure.
+
+To learn the basics of locating and editing health configuration files, see the [health
+quickstart](/health/QUICKSTART.md).
+
+## Health configuration files
+
+You can configure the Agent's health watchdog service by editing files in two locations:
+
+- The `[health]` section in `netdata.conf`. By editing the daemon's behavior, you can disable health monitoring
+ altogether, run health checks more or less often, and more. See [daemon
+ configuration](/daemon/config/README.md#health-section-options) for a table of all the available settings, their
+ default values, and what they control.
+- The individual `.conf` files in `health.d/`. These health entity files are organized by the type of metric they are
+ performing calculations on or their associated collector. You should edit these files using the `edit-config`
+ script. For example: `sudo ./edit-config health.d/cpu.conf`.
+
+## Health entity reference
+
+The following reference contains information about the syntax and options of _health entities_, which Netdata attaches
+to charts in order to trigger alarms.
+
+### Entity types
+
+There are two entity types: **alarms** and **templates**. They have the same format and feature set—the only difference
+is their label.
+
+**Alarms** are attached to specific charts and use the `alarm` label.
+
+**Templates** define rules that apply to all charts of a specific context, and use the `template` label. Templates help
+you apply one entity to all disks, all network interfaces, all MySQL databases, and so on.
+
+Alarms have higher precedence and will override templates. If an alarm and template entity have the same name and attach
+to the same chart, Netdata will use the alarm.
+
+### Entity format
+
+Netdata parses the following lines. Beneath the table is an in-depth explanation of each line's purpose and syntax.
+
+- The `on` and `lookup` lines are **always required**.
+- Each entity **must** have one of the following lines: `calc`, `warn`, or `crit`.
+- The `alarm` or `template` line must be the first line of any entity.
+- A few lines use space-separated lists to define how the entity behaves. You can use `*` as a wildcard or prefix with
+ `!` for a negative match. Order is important, too! See our [simple patterns docs](../libnetdata/simple_pattern/) for
+ more examples.
+
+| line | required | functionality |
+| --------------------------------------------------- | --------------- | ------------------------------------------------------------------------------------- |
+| [`alarm`/`template`](#alarm-line-alarm-or-template) | yes | Name of the alarm/template. |
+| [`on`](#alarm-line-on) | yes | The chart this alarm should attach to. |
+| [`os`](#alarm-line-os) | no | Which operating systems to run this chart. |
+| [`hosts`](#alarm-line-hosts) | no | Which hostnames will run this alarm. |
+| [`plugin`](#alarm-line-plugin) | no | Restrict an alarm or template to only a certain plugin. |
+| [`module`](#alarm-line-module) | no | Restrict an alarm or template to only a certain module. |
+| [`families`](#alarm-line-families) | no | Restrict a template to only certain families. |
+| [`lookup`](#alarm-line-lookup) | yes | The database lookup to find and process metrics for the chart specified through `on`. |
+| [`calc`](#alarm-line-calc) | yes (see above) | A calculation to apply to the value found via `lookup` or another variable. |
+| [`every`](#alarm-line-every) | no | The frequency of the alarm. |
+| [`green`/`red`](#alarm-lines-green-and-red) | no | Set the green and red thresholds of a chart. |
+| [`warn`/`crit`](#alarm-lines-warn-and-crit) | yes (see above) | Expressions evaluating to true or false, and when true, will trigger the alarm. |
+| [`to`](#alarm-line-to) | no | A list of roles to send notifications to. |
+| [`exec`](#alarm-line-exec) | no | The script to execute when the alarm changes status. |
+| [`delay`](#alarm-line-delay) | no | Optional hysteresis settings to prevent floods of notifications. |
+| [`repeat`](#alarm-line-repeat) | no | The interval for sending notifications when an alarm is in WARNING or CRITICAL mode. |
+| [`option`](#alarm-line-option) | no | Add an option to not clear alarms. |
+| [`host labels`](#alarm-line-host-labels) | no | List of labels present on a host. |
+
+The `alarm` or `template` line must be the first line of any entity.
+
+#### Alarm line `alarm` or `template`
+
+This line starts an alarm or template based on the [entity type](#entity-types) you're interested in creating.
+
+**Alarm:**
+
+```yaml
+alarm: NAME
+```
+
+**Template:**
+
+```yaml
+template: NAME
+```
+
+`NAME` can be any alpha character, with `.` (period) and `_` (underscore) as the only allowed symbols, but the names
+cannot be `chart name`, `dimension name`, `family name`, or `chart variables names`.
+
+#### Alarm line `on`
+
+This line defines the chart this alarm should attach to.
+
+**Alarms:**
+
+```yaml
+on: CHART
+```
+
+The value `CHART` should be the unique ID or name of the chart you're interested in, as shown on the dashboard. In the
+image below, the unique ID is `system.cpu`.
+
+![Finding the unique ID of a
+chart](https://user-images.githubusercontent.com/1153921/67443082-43b16e80-f5b8-11e9-8d33-d6ee052c6678.png)
+
+**Template:**
+
+```yaml
+on: CONTEXT
+```
+
+The value `CONTEXT` should be the context you want this template to attach to.
+
+Need to find the context? Hover over the date on any given chart and look at the tooltip. In the image below, which
+shows a disk I/O chart, the tooltip reads: `proc:/proc/diskstats, disk.io`.
+
+![Finding the context of a chart via the tooltip](https://user-images.githubusercontent.com/1153921/68882856-2b230880-06cd-11ea-923b-b28c4632d479.png)
+
+You're interested in what comes after the comma: `disk.io`. That's the name of the chart's context.
+
+If you create a template using the `disk.io` context, it will apply an alarm to every disk available on your system.
+
+#### Alarm line `os`
+
+The alarm or template will be used only if the operating system of the host matches this list specified in `os`. The
+value is a space-separated list.
+
+The following example enables the entity on Linux, FreeBSD, and macOS, but no other operating systems.
+
+```yaml
+os: linux freebsd macos
+```
+
+#### Alarm line `hosts`
+
+The alarm or template will be used only if the hostname of the host matches this space-separated list.
+
+The following example will load on systems with the hostnames `server` and `server2`, and any system with hostnames that
+begin with `database`. It _will not load_ on the host `redis3`, but will load on any _other_ systems with hostnames that
+begin with `redis`.
+
+```yaml
+hosts: server1 server2 database* !redis3 redis*
+```
+
+#### Alarm line `plugin`
+
+The `plugin` line filters which plugin within the context this alarm should apply to. The value is a space-separated
+list of [simple patterns](/libnetdata/simple_pattern/README.md). For example,
+you can create a filter for an alarm that applies specifically to `python.d.plugin`:
+
+```yaml
+plugin: python.d.plugin
+```
+
+The `plugin` line is best used with other options like `module`. When used alone, the `plugin` line creates a very
+inclusive filter that is unlikely to be of much use in production. See [`module`](#alarm-line-module) for a
+comprehensive example using both.
+
+#### Alarm line `module`
+
+The `module` line filters which module within the context this alarm should apply to. The value is a space-separated
+list of [simple patterns](/libnetdata/simple_pattern/README.md). For
+example, you can create an alarm that applies only on the `isc_dhcpd` module started by `python.d.plugin`:
+
+```yaml
+plugin: python.d.plugin
+module: isc_dhcpd
+```
+
+#### Alarm line `families`
+
+The `families` line, used only alongside templates, filters which families within the context this alarm should apply
+to. The value is a space-separated list.
+
+The value is a space-separate list of simple patterns. See our [simple patterns docs](../libnetdata/simple_pattern/) for
+some examples.
+
+For example, you can create a template on the `disk.io` context, but filter it to only the `sda` and `sdb` families:
+
+```yaml
+families: sda sdb
+```
+
+#### Alarm line `lookup`
+
+This line makes a database lookup to find a value. This result of this lookup is available as `$this`.
+
+The format is:
+
+```yaml
+lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] [foreach DIMENSIONS]
+```
+
+Everything is the same with [badges](../web/api/badges/). In short:
+
+- `METHOD` is one of `average`, `min`, `max`, `sum`, `incremental-sum`.
+ This is required.
+
+- `AFTER` is a relative number of seconds, but it also accepts a single letter for changing
+ the units, like `-1s` = 1 second in the past, `-1m` = 1 minute in the past, `-1h` = 1 hour
+ in the past, `-1d` = 1 day in the past. You need a negative number (i.e. how far in the past
+ to look for the value). **This is required**.
+
+- `at BEFORE` is by default 0 and is not required. Using this you can define the end of the
+ lookup. So data will be evaluated between `AFTER` and `BEFORE`.
+
+- `every DURATION` sets the updated frequency of the lookup (supports single letter units as
+ above too).
+
+- `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`,
+ `match-ids`, `match-names`. Check the badges documentation for more info.
+
+- `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated
+ by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions
+ have spaces in their names). This accepts Netdata simple patterns _(with `words` separated by
+ `,` or `|` instead of spaces)_ and the `match-ids` and `match-names` options affect the searches
+ for dimensions.
+
+- `foreach DIMENSIONS` is optional, will always be the last parameter, and uses the same `,`/`|`
+ rules as the `of` parameter. Each dimension you specify in `foreach` will use the same rule
+ to trigger an alarm. If you set both `of` and `foreach`, Netdata will ignore the `of` parameter
+ and replace it with one of the dimensions you gave to `foreach`.
+
+The result of the lookup will be available as `$this` and `$NAME` in expressions.
+The timestamps of the timeframe evaluated by the database lookup is available as variables
+`$after` and `$before` (both are unix timestamps).
+
+#### Alarm line `calc`
+
+A `calc` is designed to apply some calculation to the values or variables available to the entity. The result of the
+calculation will be made available at the `$this` variable, overwriting the value from your `lookup`, to use in warning
+and critical expressions.
+
+When paired with `lookup`, `calc` will perform the calculation just after `lookup` has retrieved a value from Netdata's
+database.
+
+You can use `calc` without `lookup` if you are using [other available variables](#variables).
+
+The `calc` line uses [expressions](#expressions) for its syntax.
+
+```yaml
+calc: EXPRESSION
+```
+
+#### Alarm line `every`
+
+Sets the update frequency of this alarm. This is the same to the `every DURATION` given
+in the `lookup` lines.
+
+Format:
+
+```yaml
+every: DURATION
+```
+
+`DURATION` accepts `s` for seconds, `m` is minutes, `h` for hours, `d` for days.
+
+#### Alarm lines `green` and `red`
+
+Set the green and red thresholds of a chart. Both are available as `$green` and `$red` in expressions. If multiple
+alarms define different thresholds, the ones defined by the first alarm will be used. These will eventually visualized
+on the dashboard, so only one set of them is allowed. If you need multiple sets of them in different alarms, use
+absolute numbers instead of `$red` and `$green`.
+
+Format:
+
+```yaml
+green: NUMBER
+red: NUMBER
+```
+
+#### Alarm lines `warn` and `crit`
+
+Define the expression that triggers either a warning or critical alarm. These are optional, and should evaluate to
+either true or false (or zero/non-zero).
+
+The format uses Netdata's [expressions syntax](#expressions).
+
+```yaml
+warn: EXPRESSION
+crit: EXPRESSION
+```
+
+#### Alarm line `to`
+
+This will be the first parameter of the script to be executed when the alarm switches status. Its meaning is left up to
+the `exec` script.
+
+The default `exec` script, `alarm-notify.sh`, uses this field as a space separated list of roles, which are then
+consulted to find the exact recipients per notification method.
+
+Format:
+
+```yaml
+to: ROLE1 ROLE2 ROLE3 ...
+```
+
+#### Alarm line `exec`
+
+The script that will be executed when the alarm changes status.
+
+Format:
+
+```yaml
+exec: SCRIPT
+```
+
+The default `SCRIPT` is Netdata's `alarm-notify.sh`, which supports all the notifications methods Netdata supports,
+including custom hooks.
+
+#### Alarm line `delay`
+
+This is used to provide optional hysteresis settings for the notifications, to defend against notification floods. These
+settings do not affect the actual alarm - only the time the `exec` script is executed.
+
+Format:
+
+```yaml
+delay: [[[up U] [down D] multiplier M] max X]
+```
+
+- `up U` defines the delay to be applied to a notification for an alarm that raised its status
+ (i.e. CLEAR to WARNING, CLEAR to CRITICAL, WARNING to CRITICAL). For example, `up 10s`, the
+ notification for this event will be sent 10 seconds after the actual event. This is used in
+ hope the alarm will get back to its previous state within the duration given. The default `U`
+ is zero.
+
+- `down D` defines the delay to be applied to a notification for an alarm that moves to lower
+ state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m`
+ will delay the notification by 1 minute. This is used to prevent notifications for flapping
+ alarms. The default `D` is zero.
+
+- `multiplier M` multiplies `U` and `D` when an alarm changes state, while a notification is
+ delayed. The default multiplier is `1.0`.
+
+- `max X` defines the maximum absolute notification delay an alarm may get. The default `X`
+ is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`).
+
+ Example:
+
+ `delay: up 10s down 15m multiplier 2 max 1h`
+
+ The time is `00:00:00` and the status of the alarm is CLEAR.
+
+ | time of event | new status | delay | notification will be sent | why |
+ | ------------- | ---------- | --- | ------------------------- | --- |
+ | 00:00:01 | WARNING | `up 10s` | 00:00:11 | first state switch |
+ | 00:00:05 | CLEAR | `down 15m x2` | 00:30:05 | the alarm changes state while a notification is delayed, so it was multiplied |
+ | 00:00:06 | WARNING | `up 10s x2 x2` | 00:00:26 | multiplied twice |
+ | 00:00:07 | CLEAR | `down 15m x2 x2 x2` | 00:45:07 | multiplied 3 times. |
+
+ So:
+
+ - `U` and `D` are multiplied by `M` every time the alarm changes state (any state, not just
+ their matching one) and a delay is in place.
+ - All are reset to their defaults when the alarm switches state without a delay in place.
+
+#### Alarm line `repeat`
+
+Defines the interval between repeating notifications for the alarms in CRITICAL or WARNING mode. This will override the
+default interval settings inherited from health settings in `netdata.conf`. The default settings for repeating
+notifications are `default repeat warning = DURATION` and `default repeat critical = DURATION` which can be found in
+health stock configuration, when one of these interval is bigger than 0, Netdata will activate the repeat notification
+for `CRITICAL`, `CLEAR` and `WARNING` messages.
+
+Format:
+
+```yaml
+repeat: [off] [warning DURATION] [critical DURATION]
+```
+
+- `off`: Turns off the repeating feature for the current alarm. This is effective when the default repeat settings has
+ been enabled in health configuration.
+- `warning DURATION`: Defines the interval when the alarm is in WARNING state. Use `0s` to turn off the repeating
+ notification for WARNING mode.
+- `critical DURATION`: Defines the interval when the alarm is in CRITICAL state. Use `0s` to turn off the repeating
+ notification for CRITICAL mode.
+
+#### Alarm line `option`
+
+The only possible value for the `option` line is
+
+```yaml
+option: no-clear-notification
+```
+
+For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an
+alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes,
+compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice
+the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is
+cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep
+increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm.
+However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a
+good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option.
+
+#### Alarm line `host labels`
+
+Defines the list of labels present on a host. See our [host labels guide](/docs/guides/using-host-labels.md) for
+an explanation of host labels and how to implement them.
+
+For example, let's suppose that `netdata.conf` is configured with the following labels:
+
+```yaml
+[host labels]
+ installed = 20191211
+ room = server
+```
+
+And more labels in `netdata.conf` for workstations:
+
+```yaml
+[host labels]
+ installed = 201705
+ room = workstation
+```
+
+By defining labels inside of `netdata.conf`, you can now apply labels to alarms. For example, you can add the following
+line to any alarms you'd like to apply to hosts that have the label `room = server`.
+
+```yaml
+host labels: room = server
+```
+
+The `host labels` is a space-separated list that accepts simple patterns. For example, you can create an alarm
+that will be applied to all hosts installed in the last decade with the following line:
+
+```yaml
+host labels: installed = 201*
+```
+
+See our [simple patterns docs](../libnetdata/simple_pattern/) for more examples.
+
+## Expressions
+
+Netdata has an internal [infix expression parser](../libnetdata/eval). This parses expressions and creates an internal
+structure that allows fast execution of them.
+
+These operators are supported `+`, `-`, `*`, `/`, `<`, `<=`, `<>`, `!=`, `>`, `>=`, `&&`, `||`, `!`, `AND`, `OR`, `NOT`.
+Boolean operators result in either `1` (true) or `0` (false).
+
+The conditional evaluation operator `?` is supported too. Using this operator IF-THEN-ELSE conditional statements can be
+specified. The format is: `(condition) ? (true expression) : (false expression)`. So, Netdata will first evaluate the
+`condition` and based on the result will either evaluate `true expression` or `false expression`.
+
+Example: `($this > 0) ? ($avail * 2) : ($used / 2)`.
+
+Nested such expressions are also supported (i.e. `true expression` and `false expression` can contain conditional
+evaluations).
+
+Expressions also support the `abs()` function.
+
+Expressions can have variables. Variables start with `$`. Check below for more information.
+
+There are two special values you can use:
+
+- `nan`, for example `$this != nan` will check if the variable `this` is available. A variable can be `nan` if the
+ database lookup failed. All calculations (i.e. addition, multiplication, etc) with a `nan` result in a `nan`.
+
+- `inf`, for example `$this != inf` will check if `this` is not infinite. A value or variable can be set to infinite
+ if divided by zero. All calculations (i.e. addition, multiplication, etc) with a `inf` result in a `inf`.
+
+### Special use of the conditional operator
+
+A common (but not necessarily obvious) use of the conditional evaluation operator is to provide
+[hysteresis](https://en.wikipedia.org/wiki/Hysteresis) around the critical or warning thresholds. This usage helps to
+avoid bogus messages resulting from small variations in the value when it is varying regularly but staying close to the
+threshold value, without needing to delay sending messages at all.
+
+An example of such usage from the default CPU usage alarms bundled with Netdata is:
+
+```yaml
+warn: $this > (($status >= $WARNING) ? (75) : (85))
+crit: $this > (($status == $CRITICAL) ? (85) : (95))
+```
+
+The above say:
+
+- If the alarm is currently a warning, then the threshold for being considered a warning is 75, otherwise it's 85.
+
+- If the alarm is currently critical, then the threshold for being considered critical is 85, otherwise it's 95.
+
+Which in turn, results in the following behavior:
+
+- While the value is rising, it will trigger a warning when it exceeds 85, and a critical alert when it exceeds 95.
+
+- While the value is falling, it will return to a warning state when it goes below 85, and a normal state when it goes
+ below 75.
+
+- If the value is constantly varying between 80 and 90, then it will trigger a warning the first time it goes above
+ 85, but will remain a warning until it goes below 75 (or goes above 85).
+
+- If the value is constantly varying between 90 and 100, then it will trigger a critical alert the first time it goes
+ above 95, but will remain a critical alert goes below 85 (at which point it will return to being a warning).
+
+## Variables
+
+You can find all the variables that can be used for a given chart, using
+`http://NODE:19999/api/v1/alarm_variables?chart=CHART_NAME`, replacing `NODE` with the IP address or hostname for your
+Agent dashboard. For example, [variables for the `system.cpu` chart of the
+registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu).
+
+> If you don't know how to find the CHART_NAME, you can read about it [here](../web/README.md#charts).
+
+Netdata supports 3 internal indexes for variables that will be used in health monitoring.
+
+<details markdown="1"><summary>The variables below can be used in both chart alarms and context templates.</summary>
+
+Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in
+templates for charts belonging to a given [context](../web/README.md#contexts). The reason is that all charts of a given
+context are essentially identical, with the only difference being the [family](../web/README.md#families) that
+identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway,
+unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families).
+
+</details>
+
+- **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of `$this` for
+ the other configured alarms of the chart also appears, under the name of each configured alarm.
+
+ Charts also define a few special variables:
+
+ - `$last_collected_t` is the unix timestamp of the last data collection
+ - `$collected_total_raw` is the sum of all the dimensions (their last collected values)
+ - `$update_every` is the update frequency of the chart
+ - `$green` and `$red` the threshold defined in alarms (these are per chart - the charts
+ inherits them from the the first alarm that defined them)
+
+ Chart dimensions define their last calculated (i.e. interpolated) value, exactly as
+ shown on the charts, but also a variable with their name and suffix `_raw` that resolves
+ to the last collected value - as collected and another with suffix `_last_collected_t`
+ that resolves to unix timestamp the dimension was last collected (there may be dimensions
+ that fail to be collected while others continue normally).
+
+- **family variables**. Families are used to group charts together. For example all `eth0`
+ charts, have `family = eth0`. This index includes all local variables, but if there are
+ overlapping variables, only the first are exposed.
+
+- **host variables**. All the dimensions of all charts, including all alarms, in fullname.
+ Fullname is `CHART.VARIABLE`, where `CHART` is either the chart id or the chart name (both
+ are supported).
+
+- **special variables\*** are:
+
+ - `$this`, which is resolved to the value of the current alarm.
+
+ - `$status`, which is resolved to the current status of the alarm (the current = the last
+ status, i.e. before the current database lookup and the evaluation of the `calc` line).
+ This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`,
+ `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as
+ expected.
+
+ - `$now`, which is resolved to current unix timestamp.
+
+## Alarm statuses
+
+Alarms can have the following statuses:
+
+- `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to Netdata
+ to reload health configuration)
+
+- `UNINITIALIZED` - the alarm is not initialized yet
+
+- `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed,
+ a division by zero occurred, etc)
+
+- `CLEAR` - the alarm is not armed / raised (i.e. is OK)
+
+- `WARNING` - the warning expression resulted in true or non-zero
+
+- `CRITICAL` - the critical expression resulted in true or non-zero
+
+The external script will be called for all status changes.
+
+## Example alarms
+
+Check the `health/health.d/` directory for all alarms shipped with Netdata.
+
+Here are a few examples:
+
+### Example 1
+
+A simple check if an apache server is alive:
+
+```yaml
+template: apache_last_collected_secs
+ on: apache.requests
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+```
+
+The above checks that Netdata is able to collect data from apache. In detail:
+
+```yaml
+template: apache_last_collected_secs
+```
+
+The above defines a **template** named `apache_last_collected_secs`.
+The name is important since `$apache_last_collected_secs` resolves to the `calc` line.
+So, try to give something descriptive.
+
+```yaml
+ on: apache.requests
+```
+
+The above applies the **template** to all charts that have `context = apache.requests`
+(i.e. all your apache servers).
+
+```yaml
+ calc: $now - $last_collected_t
+```
+
+- `$now` is a standard variable that resolves to the current timestamp.
+
+- `$last_collected_t` is the last data collection timestamp of the chart.
+ So this calculation gives the number of seconds passed since the last data collection.
+
+```yaml
+ every: 10s
+```
+
+The alarm will be evaluated every 10 seconds.
+
+```yaml
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+```
+
+If these result in non-zero or true, they trigger the alarm.
+
+- `$this` refers to the value of this alarm (i.e. the result of the `calc` line.
+ We could also use `$apache_last_collected_secs`.
+
+`$update_every` is the update frequency of the chart, in seconds.
+
+So, the warning condition checks if we have not collected data from apache for 5
+iterations and the critical condition checks for 10 iterations.
+
+### Example 2
+
+Check if any of the disks is critically low on disk space:
+
+```yaml
+template: disk_full_percent
+ on: disk.space
+ calc: $used * 100 / ($avail + $used)
+ every: 1m
+ warn: $this > 80
+ crit: $this > 95
+ repeat: warning 120s critical 10s
+```
+
+`$used` and `$avail` are the `used` and `avail` chart dimensions as shown on the dashboard.
+
+So, the `calc` line finds the percentage of used space. `$this` resolves to this percentage.
+
+This is a repeating alarm and if the alarm becomes CRITICAL it repeats the notifications every 10 seconds. It also
+repeats notifications every 2 minutes if the alarm goes into WARNING mode.
+
+### Example 3
+
+Predict if any disk will run out of space in the near future.
+
+We do this in 2 steps:
+
+Calculate the disk fill rate:
+
+```yaml
+ template: disk_fill_rate
+ on: disk.space
+ lookup: max -1s at -30m unaligned of avail
+ calc: ($this - $avail) / (30 * 60)
+ every: 15s
+```
+
+In the `calc` line: `$this` is the result of the `lookup` line (i.e. the free space 30 minutes
+ago) and `$avail` is the current disk free space. So the `calc` line will either have a positive
+number of GB/second if the disk if filling up, or a negative number of GB/second if the disk is
+freeing up space.
+
+There is no `warn` or `crit` lines here. So, this template will just do the calculation and
+nothing more.
+
+Predict the hours after which the disk will run out of space:
+
+```yaml
+ template: disk_full_after_hours
+ on: disk.space
+ calc: $avail / $disk_fill_rate / 3600
+ every: 10s
+ warn: $this > 0 and $this < 48
+ crit: $this > 0 and $this < 24
+```
+
+The `calc` line estimates the time in hours, we will run out of disk space. Of course, only
+positive values are interesting for this check, so the warning and critical conditions check
+for positive values and that we have enough free space for 48 and 24 hours respectively.
+
+Once this alarm triggers we will receive an email like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/17839993/87872b32-6802-11e6-8e08-b2e4afef93bb.png)
+
+### Example 4
+
+Check if any network interface is dropping packets:
+
+```yaml
+template: 30min_packet_drops
+ on: net.drops
+ lookup: sum -30m unaligned absolute
+ every: 10s
+ crit: $this > 0
+```
+
+The `lookup` line will calculate the sum of the all dropped packets in the last 30 minutes.
+
+The `crit` line will issue a critical alarm if even a single packet has been dropped.
+
+Note that the drops chart does not exist if a network interface has never dropped a single packet.
+When Netdata detects a dropped packet, it will add the chart and it will automatically attach this
+alarm to it.
+
+### Example 5
+
+Check if user or system dimension is using more than 50% of cpu:
+
+```yaml
+ alarm: dim_template
+ on: system.cpu
+ os: linux
+lookup: average -3s percentage foreach system,user
+ units: %
+ every: 10s
+ warn: $this > 50
+ crit: $this > 80
+```
+
+The `lookup` line will calculate the average CPU usage from system and user in the last 3 seconds. Because we have
+the foreach in the `lookup` line, Netdata will create two independent alarms called `dim_template_system`
+and `dim_template_user` that will have all the other parameters shared among them.
+
+### Example 6
+
+Check if all dimensions are using more than 50% of cpu:
+
+```yaml
+ alarm: dim_template
+ on: system.cpu
+ os: linux
+lookup: average -3s percentage foreach *
+ units: %
+ every: 10s
+ warn: $this > 50
+ crit: $this > 80
+```
+
+The `lookup` line will calculate the average of CPU usage from system and user in the last 3 seconds. In this case
+Netdata will create alarms for all dimensions of the chart.
+
+## Troubleshooting
+
+You can compile Netdata with [debugging](/daemon/README.md#debugging) and then set in `netdata.conf`:
+
+```yaml
+[global]
+ debug flags = 0x0000000000800000
+```
+
+Then check your `/var/log/netdata/debug.log`. It will show you how it works. Important: this will generate a lot of
+output in debug.log.
+
+You can find the context of charts by looking up the chart in either `http://NODE:19999/netdata.conf` or
+`http://NODE:19999/api/v1/charts`, replacing `NODE` with the IP address or hostname for your Agent dashboard.
+
+You can find how Netdata interpreted the expressions by examining the alarm at
+`http://NODE:19999/api/v1/alarms?all`. For each expression, Netdata will return the expression as given in its
+config file, and the same expression with additional parentheses added to indicate the evaluation flow of the
+expression.
+
+## Disabling health checks or silencing notifications at runtime
+
+It's currently not possible to schedule notifications from within the alarm template. For those scenarios where you need
+to temporary disable notifications (for instance when running backups triggers a disk alert) you can disable or silence
+notifications are runtime. The health checks can be controlled at runtime via the [health management
+api](/web/api/health/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Freference%2F&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/health.c b/health/health.c
new file mode 100644
index 0000000..b81361e
--- /dev/null
+++ b/health/health.c
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "health.h"
+
+unsigned int default_health_enabled = 1;
+char *silencers_filename;
+
+// the queue of executed alarm notifications that haven't been waited for yet
+static struct {
+ ALARM_ENTRY *head; // oldest
+ ALARM_ENTRY *tail; // latest
+} alarm_notifications_in_progress = {NULL, NULL};
+
+static inline void enqueue_alarm_notify_in_progress(ALARM_ENTRY *ae)
+{
+ ae->prev_in_progress = NULL;
+ ae->next_in_progress = NULL;
+
+ if (NULL != alarm_notifications_in_progress.tail) {
+ ae->prev_in_progress = alarm_notifications_in_progress.tail;
+ alarm_notifications_in_progress.tail->next_in_progress = ae;
+ }
+ if (NULL == alarm_notifications_in_progress.head) {
+ alarm_notifications_in_progress.head = ae;
+ }
+ alarm_notifications_in_progress.tail = ae;
+
+}
+
+static inline void unlink_alarm_notify_in_progress(ALARM_ENTRY *ae)
+{
+ struct alarm_entry *prev = ae->prev_in_progress;
+ struct alarm_entry *next = ae->next_in_progress;
+
+ if (NULL != prev) {
+ prev->next_in_progress = next;
+ }
+ if (NULL != next) {
+ next->prev_in_progress = prev;
+ }
+ if (ae == alarm_notifications_in_progress.head) {
+ alarm_notifications_in_progress.head = next;
+ }
+ if (ae == alarm_notifications_in_progress.tail) {
+ alarm_notifications_in_progress.tail = prev;
+ }
+}
+// ----------------------------------------------------------------------------
+// health initialization
+
+/**
+ * User Config directory
+ *
+ * Get the config directory for health and return it.
+ *
+ * @return a pointer to the user config directory
+ */
+inline char *health_user_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_user_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "health configuration directory", buffer);
+}
+
+/**
+ * Stock Config Directory
+ *
+ * Get the Stock config directory and return it.
+ *
+ * @return a pointer to the stock config directory.
+ */
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer);
+}
+
+/**
+ * Silencers init
+ *
+ * Function used to initialize the silencer structure.
+ */
+static void health_silencers_init(void) {
+ FILE *fd = fopen(silencers_filename, "r");
+ if (fd) {
+ fseek(fd, 0 , SEEK_END);
+ off_t length = (off_t) ftell(fd);
+ fseek(fd, 0 , SEEK_SET);
+
+ if (length > 0 && length < HEALTH_SILENCERS_MAX_FILE_LEN) {
+ char *str = mallocz((length+1)* sizeof(char));
+ if(str) {
+ size_t copied;
+ copied = fread(str, sizeof(char), length, fd);
+ if (copied == (length* sizeof(char))) {
+ str[length] = 0x00;
+ json_parse(str, NULL, health_silencers_json_read_callback);
+ info("Parsed health silencers file %s", silencers_filename);
+ } else {
+ error("Cannot read the data from health silencers file %s", silencers_filename);
+ }
+ freez(str);
+ }
+ } else {
+ error("Health silencers file %s has the size %ld that is out of range[ 1 , %d ]. Aborting read.", silencers_filename, length, HEALTH_SILENCERS_MAX_FILE_LEN);
+ }
+ fclose(fd);
+ } else {
+ info("Cannot open the file %s, so Netdata will work with the default health configuration.",silencers_filename);
+ }
+}
+
+/**
+ * Health Init
+ *
+ * Initialize the health thread.
+ */
+void health_init(void) {
+ debug(D_HEALTH, "Health configuration initializing");
+
+ if(!(default_health_enabled = (unsigned int)config_get_boolean(CONFIG_SECTION_HEALTH, "enabled", default_health_enabled))) {
+ debug(D_HEALTH, "Health is disabled.");
+ return;
+ }
+
+ health_silencers_init();
+}
+
+// ----------------------------------------------------------------------------
+// re-load health configuration
+
+/**
+ * Reload host
+ *
+ * Reload configuration for a specific host.
+ *
+ * @param host the structure of the host that the function will reload the configuration.
+ */
+static void health_reload_host(RRDHOST *host) {
+ if(unlikely(!host->health_enabled))
+ return;
+
+ char *user_path = health_user_config_dir();
+ char *stock_path = health_stock_config_dir();
+
+ // free all running alarms
+ rrdhost_wrlock(host);
+
+ while(host->templates)
+ rrdcalctemplate_unlink_and_free(host, host->templates);
+
+ RRDCALCTEMPLATE *rt,*next;
+ for(rt = host->alarms_template_with_foreach; rt ; rt = next) {
+ next = rt->next;
+ rrdcalctemplate_free(rt);
+ }
+ host->alarms_template_with_foreach = NULL;
+
+ while(host->alarms)
+ rrdcalc_unlink_and_free(host, host->alarms);
+
+ RRDCALC *rc,*nc;
+ for(rc = host->alarms_with_foreach; rc ; rc = nc) {
+ nc = rc->next;
+ rrdcalc_free(rc);
+ }
+ host->alarms_with_foreach = NULL;
+
+ rrdhost_unlock(host);
+
+ // invalidate all previous entries in the alarm log
+ ALARM_ENTRY *t;
+ for(t = host->health_log.alarms ; t ; t = t->next) {
+ if(t->new_status != RRDCALC_STATUS_REMOVED)
+ t->flags |= HEALTH_ENTRY_FLAG_UPDATED;
+ }
+
+ rrdhost_rdlock(host);
+ // reset all thresholds to all charts
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ st->green = NAN;
+ st->red = NAN;
+ }
+ rrdhost_unlock(host);
+
+ // load the new alarms
+ rrdhost_wrlock(host);
+ health_readdir(host, user_path, stock_path, NULL);
+
+ //Discard alarms with labels that do not apply to host
+ rrdcalc_labels_unlink_alarm_from_host(host);
+
+ // link the loaded alarms to their charts
+ RRDDIM *rd;
+ rrdset_foreach_write(st, host) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))
+ continue;
+ rrdsetcalc_link_matching(st);
+ rrdcalctemplate_link_matching(st);
+
+ //This loop must be the last, because ` rrdcalctemplate_link_matching` will create alarms related to it.
+ rrdset_rdlock(st);
+ rrddim_foreach_read(rd, st) {
+ rrdcalc_link_to_rrddim(rd, st, host);
+ }
+ rrdset_unlock(st);
+ }
+
+ rrdhost_unlock(host);
+}
+
+/**
+ * Reload
+ *
+ * Reload the host configuration for all hosts.
+ */
+void health_reload(void) {
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting)
+ aclk_single_update_disable();
+#endif
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host)
+ health_reload_host(host);
+
+ rrd_unlock();
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ aclk_single_update_enable();
+ aclk_alarm_reload();
+ }
+#endif
+}
+
+// ----------------------------------------------------------------------------
+// health main thread and friends
+
+static inline RRDCALC_STATUS rrdcalc_value2status(calculated_number n) {
+ if(isnan(n) || isinf(n)) return RRDCALC_STATUS_UNDEFINED;
+ if(n) return RRDCALC_STATUS_RAISED;
+ return RRDCALC_STATUS_CLEAR;
+}
+
+#define ALARM_EXEC_COMMAND_LENGTH 8192
+
+static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
+ ae->flags |= HEALTH_ENTRY_FLAG_PROCESSED;
+
+ if(unlikely(ae->new_status < RRDCALC_STATUS_CLEAR)) {
+ // do not send notifications for internal statuses
+ debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (internal statuses)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status));
+ goto done;
+ }
+
+ if(unlikely(ae->new_status <= RRDCALC_STATUS_CLEAR && (ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) {
+ // do not send notifications for disabled statuses
+ debug(D_HEALTH, "Health not sending notification for alarm '%s.%s' status %s (it has no-clear-notification enabled)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status));
+ // mark it as run, so that we will send the same alarm if it happens again
+ goto done;
+ }
+
+ // find the previous notification for the same alarm
+ // which we have run the exec script
+ // exception: alarms with HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION set
+ if(likely(!(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) {
+ uint32_t id = ae->alarm_id;
+ ALARM_ENTRY *t;
+ for(t = ae->next; t ; t = t->next) {
+ if(t->alarm_id == id && t->flags & HEALTH_ENTRY_FLAG_EXEC_RUN)
+ break;
+ }
+
+ if(likely(t)) {
+ // we have executed this alarm notification in the past
+ if(t && t->new_status == ae->new_status) {
+ // don't send the notification for the same status again
+ debug(D_HEALTH, "Health not sending again notification for alarm '%s.%s' status %s", ae->chart, ae->name
+ , rrdcalc_status2string(ae->new_status));
+ goto done;
+ }
+ }
+ else {
+ // we have not executed this alarm notification in the past
+ // so, don't send CLEAR notifications
+ if(unlikely(ae->new_status == RRDCALC_STATUS_CLEAR)) {
+ if((!(ae->flags & HEALTH_ENTRY_RUN_ONCE)) || (ae->flags & HEALTH_ENTRY_RUN_ONCE && ae->old_status < RRDCALC_STATUS_RAISED) ) {
+ debug(D_HEALTH, "Health not sending notification for first initialization of alarm '%s.%s' status %s"
+ , ae->chart, ae->name, rrdcalc_status2string(ae->new_status));
+ goto done;
+ }
+ }
+ }
+ }
+
+ // Check if alarm notifications are silenced
+ if (ae->flags & HEALTH_ENTRY_FLAG_SILENCED) {
+ info("Health not sending notification for alarm '%s.%s' status %s (command API has disabled notifications)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status));
+ goto done;
+ }
+
+ static char command_to_run[ALARM_EXEC_COMMAND_LENGTH + 1];
+
+ const char *exec = (ae->exec) ? ae->exec : host->health_default_exec;
+ const char *recipient = (ae->recipient) ? ae->recipient : host->health_default_recipient;
+
+ int n_warn=0, n_crit=0;
+ RRDCALC *rc;
+ EVAL_EXPRESSION *expr=NULL;
+
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
+ n_warn++;
+ if (ae->alarm_id == rc->id)
+ expr=rc->warning;
+ } else if (unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) {
+ n_crit++;
+ if (ae->alarm_id == rc->id)
+ expr=rc->critical;
+ } else if (unlikely(rc->status == RRDCALC_STATUS_CLEAR)) {
+ if (ae->alarm_id == rc->id)
+ expr=rc->warning;
+ }
+ }
+
+ snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d'",
+ exec,
+ recipient,
+ host->registry_hostname,
+ ae->unique_id,
+ ae->alarm_id,
+ ae->alarm_event_id,
+ (unsigned long)ae->when,
+ ae->name,
+ ae->chart?ae->chart:"NOCHART",
+ ae->family?ae->family:"NOFAMILY",
+ rrdcalc_status2string(ae->new_status),
+ rrdcalc_status2string(ae->old_status),
+ ae->new_value,
+ ae->old_value,
+ ae->source?ae->source:"UNKNOWN",
+ (uint32_t)ae->duration,
+ (uint32_t)ae->non_clear_duration,
+ ae->units?ae->units:"",
+ ae->info?ae->info:"",
+ ae->new_value_string,
+ ae->old_value_string,
+ (expr && expr->source)?expr->source:"NOSOURCE",
+ (expr && expr->error_msg)?buffer_tostring(expr->error_msg):"NOERRMSG",
+ n_warn,
+ n_crit
+ );
+
+ ae->flags |= HEALTH_ENTRY_FLAG_EXEC_RUN;
+ ae->exec_run_timestamp = now_realtime_sec(); /* will be updated by real time after spawning */
+
+ debug(D_HEALTH, "executing command '%s'", command_to_run);
+ ae->flags |= HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS;
+ ae->exec_spawn_serial = spawn_enq_cmd(command_to_run);
+ enqueue_alarm_notify_in_progress(ae);
+
+ return; //health_alarm_wait_for_execution
+done:
+ health_alarm_log_save(host, ae);
+}
+
+static inline void health_alarm_wait_for_execution(ALARM_ENTRY *ae) {
+ if (!(ae->flags & HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS))
+ return;
+
+ spawn_wait_cmd(ae->exec_spawn_serial, &ae->exec_code, &ae->exec_run_timestamp);
+ debug(D_HEALTH, "done executing command - returned with code %d", ae->exec_code);
+ ae->flags &= ~HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS;
+
+ if(ae->exec_code != 0)
+ ae->flags |= HEALTH_ENTRY_FLAG_EXEC_FAILED;
+
+ unlink_alarm_notify_in_progress(ae);
+}
+
+static inline void health_process_notifications(RRDHOST *host, ALARM_ENTRY *ae) {
+ debug(D_HEALTH, "Health alarm '%s.%s' = " CALCULATED_NUMBER_FORMAT_AUTO " - changed status from %s to %s",
+ ae->chart?ae->chart:"NOCHART", ae->name,
+ ae->new_value,
+ rrdcalc_status2string(ae->old_status),
+ rrdcalc_status2string(ae->new_status)
+ );
+
+ health_alarm_execute(host, ae);
+}
+
+static inline void health_alarm_log_process(RRDHOST *host) {
+ uint32_t first_waiting = (host->health_log.alarms)?host->health_log.alarms->unique_id:0;
+ time_t now = now_realtime_sec();
+
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+
+ ALARM_ENTRY *ae;
+ for(ae = host->health_log.alarms; ae && ae->unique_id >= host->health_last_processed_id; ae = ae->next) {
+ if(likely(!alarm_entry_isrepeating(host, ae))) {
+ if(unlikely(
+ !(ae->flags & HEALTH_ENTRY_FLAG_PROCESSED) &&
+ !(ae->flags & HEALTH_ENTRY_FLAG_UPDATED)
+ )) {
+ if(unlikely(ae->unique_id < first_waiting))
+ first_waiting = ae->unique_id;
+
+ if(likely(now >= ae->delay_up_to_timestamp))
+ health_process_notifications(host, ae);
+ }
+ }
+ }
+
+ // remember this for the next iteration
+ host->health_last_processed_id = first_waiting;
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+
+ if(host->health_log.count <= host->health_log.max)
+ return;
+
+ // cleanup excess entries in the log
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+
+ ALARM_ENTRY *last = NULL;
+ unsigned int count = host->health_log.max * 2 / 3;
+ for(ae = host->health_log.alarms; ae && count ; count--, last = ae, ae = ae->next) ;
+
+ if(ae && last && last->next == ae)
+ last->next = NULL;
+ else
+ ae = NULL;
+
+ while(ae) {
+ debug(D_HEALTH, "Health removing alarm log entry with id: %u", ae->unique_id);
+
+ ALARM_ENTRY *t = ae->next;
+
+ if(likely(!alarm_entry_isrepeating(host, ae))) {
+ health_alarm_wait_for_execution(ae);
+ health_alarm_log_free_one_nochecks_nounlink(ae);
+ host->health_log.count--;
+ }
+
+ ae = t;
+ }
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+}
+
+static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run) {
+ if(unlikely(!rc->rrdset)) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. It is not linked to a chart.", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(unlikely(rc->next_update > now)) {
+ if (unlikely(*next_run > rc->next_update)) {
+ // update the next_run time of the main loop
+ // to run this alarm precisely the time required
+ *next_run = rc->next_update;
+ }
+
+ debug(D_HEALTH, "Health not examining alarm '%s.%s' yet (will do in %d secs).", rc->chart?rc->chart:"NOCHART", rc->name, (int) (rc->next_update - now));
+ return 0;
+ }
+
+ if(unlikely(!rc->update_every)) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. It does not have an update frequency", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(unlikely(rrdset_flag_check(rc->rrdset, RRDSET_FLAG_OBSOLETE))) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart has been marked as obsolete", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(unlikely(!rrdset_flag_check(rc->rrdset, RRDSET_FLAG_ENABLED))) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart is not enabled", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(unlikely(rrdset_flag_check(rc->rrdset, RRDSET_FLAG_ARCHIVED))) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. The chart has been marked as archived", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(unlikely(!rc->rrdset->last_collected_time.tv_sec || rc->rrdset->counter_done < 2)) {
+ debug(D_HEALTH, "Health not running alarm '%s.%s'. Chart is not fully collected yet.", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ int update_every = rc->rrdset->update_every;
+ rrdset_rdlock(rc->rrdset);
+ time_t first = rrdset_first_entry_t_nolock(rc->rrdset);
+ time_t last = rrdset_last_entry_t_nolock(rc->rrdset);
+ rrdset_unlock(rc->rrdset);
+
+ if(unlikely(now + update_every < first /* || now - update_every > last */)) {
+ debug(D_HEALTH
+ , "Health not examining alarm '%s.%s' yet (wanted time is out of bounds - we need %lu but got %lu - %lu)."
+ , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) now, (unsigned long) first
+ , (unsigned long) last);
+ return 0;
+ }
+
+ if(RRDCALC_HAS_DB_LOOKUP(rc)) {
+ time_t needed = now + rc->before + rc->after;
+
+ if(needed + update_every < first || needed - update_every > last) {
+ debug(D_HEALTH
+ , "Health not examining alarm '%s.%s' yet (not enough data yet - we need %lu but got %lu - %lu)."
+ , rc->chart ? rc->chart : "NOCHART", rc->name, (unsigned long) needed, (unsigned long) first
+ , (unsigned long) last);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static inline int check_if_resumed_from_suspention(void) {
+ static usec_t last_realtime = 0, last_monotonic = 0;
+ usec_t realtime = now_realtime_usec(), monotonic = now_monotonic_usec();
+ int ret = 0;
+
+ // detect if monotonic and realtime have twice the difference
+ // in which case we assume the system was just waken from hibernation
+
+ if(last_realtime && last_monotonic && realtime - last_realtime > 2 * (monotonic - last_monotonic))
+ ret = 1;
+
+ last_realtime = realtime;
+ last_monotonic = monotonic;
+
+ return ret;
+}
+
+static void health_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+static SILENCE_TYPE check_silenced(RRDCALC *rc, char* host, SILENCERS *silencers) {
+ SILENCER *s;
+ debug(D_HEALTH, "Checking if alarm was silenced via the command API. Alarm info name:%s context:%s chart:%s host:%s family:%s",
+ rc->name, (rc->rrdset)?rc->rrdset->context:"", rc->chart, host, (rc->rrdset)?rc->rrdset->family:"");
+
+ for (s = silencers->silencers; s!=NULL; s=s->next){
+ if (
+ (!s->alarms_pattern || (rc->name && s->alarms_pattern && simple_pattern_matches(s->alarms_pattern,rc->name))) &&
+ (!s->contexts_pattern || (rc->rrdset && rc->rrdset->context && s->contexts_pattern && simple_pattern_matches(s->contexts_pattern,rc->rrdset->context))) &&
+ (!s->hosts_pattern || (host && s->hosts_pattern && simple_pattern_matches(s->hosts_pattern,host))) &&
+ (!s->charts_pattern || (rc->chart && s->charts_pattern && simple_pattern_matches(s->charts_pattern,rc->chart))) &&
+ (!s->families_pattern || (rc->rrdset && rc->rrdset->family && s->families_pattern && simple_pattern_matches(s->families_pattern,rc->rrdset->family)))
+ ) {
+ debug(D_HEALTH, "Alarm matches command API silence entry %s:%s:%s:%s:%s", s->alarms,s->charts, s->contexts, s->hosts, s->families);
+ if (unlikely(silencers->stype == STYPE_NONE)) {
+ debug(D_HEALTH, "Alarm %s matched a silence entry, but no SILENCE or DISABLE command was issued via the command API. The match has no effect.", rc->name);
+ } else {
+ debug(D_HEALTH, "Alarm %s via the command API - name:%s context:%s chart:%s host:%s family:%s"
+ , (silencers->stype == STYPE_DISABLE_ALARMS)?"Disabled":"Silenced"
+ , rc->name
+ , (rc->rrdset)?rc->rrdset->context:""
+ , rc->chart
+ , host
+ , (rc->rrdset)?rc->rrdset->family:""
+ );
+ }
+ return silencers->stype;
+ }
+ }
+ return STYPE_NONE;
+}
+
+/**
+ * Update Disabled Silenced
+ *
+ * Update the variable rrdcalc_flags of the structure RRDCALC according with the values of the host structure
+ *
+ * @param host structure that contains information about the host monitored.
+ * @param rc structure with information about the alarm
+ *
+ * @return It returns 1 case rrdcalc_flags is DISABLED or 0 otherwise
+ */
+static int update_disabled_silenced(RRDHOST *host, RRDCALC *rc) {
+ uint32_t rrdcalc_flags_old = rc->rrdcalc_flags;
+ // Clear the flags
+ rc->rrdcalc_flags &= ~(RRDCALC_FLAG_DISABLED | RRDCALC_FLAG_SILENCED);
+ if (unlikely(silencers->all_alarms)) {
+ if (silencers->stype == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
+ else if (silencers->stype == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
+ } else {
+ SILENCE_TYPE st = check_silenced(rc, host->hostname, silencers);
+ if (st == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
+ else if (st == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
+ }
+
+ if (rrdcalc_flags_old != rc->rrdcalc_flags) {
+ info("Alarm silencing changed for host '%s' alarm '%s': Disabled %s->%s Silenced %s->%s",
+ host->hostname,
+ rc->name,
+ (rrdcalc_flags_old & RRDCALC_FLAG_DISABLED)?"true":"false",
+ (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false",
+ (rrdcalc_flags_old & RRDCALC_FLAG_SILENCED)?"true":"false",
+ (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false"
+ );
+ }
+ if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * Health Main
+ *
+ * The main thread of the health system. In this function all the alarms will be processed.
+ *
+ * @param ptr is a pointer to the netdata_static_thread structure.
+ *
+ * @return It always returns NULL
+ */
+void *health_main(void *ptr) {
+ netdata_thread_cleanup_push(health_main_cleanup, ptr);
+
+ int min_run_every = (int)config_get_number(CONFIG_SECTION_HEALTH, "run at least every seconds", 10);
+ if(min_run_every < 1) min_run_every = 1;
+
+ time_t now = now_realtime_sec();
+ time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60);
+
+ rrdcalc_labels_unlink();
+
+ unsigned int loop = 0;
+ while(!netdata_exit) {
+ loop++;
+ debug(D_HEALTH, "Health monitoring iteration no %u started", loop);
+
+ int runnable = 0, apply_hibernation_delay = 0;
+ time_t next_run = now + min_run_every;
+ RRDCALC *rc;
+
+ if (unlikely(check_if_resumed_from_suspention())) {
+ apply_hibernation_delay = 1;
+
+ info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension.",
+ hibernation_delay
+ );
+ }
+
+ if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) {
+ static int logged=0;
+ if (!logged) {
+ info("Skipping health checks, because all alarms are disabled via a %s command.",
+ HEALTH_CMDAPI_CMD_DISABLEALL);
+ logged = 1;
+ }
+ }
+
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if (unlikely(!host->health_enabled))
+ continue;
+
+ if (unlikely(apply_hibernation_delay)) {
+
+ info("Postponing health checks for %ld seconds, on host '%s'.", hibernation_delay, host->hostname
+ );
+
+ host->health_delay_up_to = now + hibernation_delay;
+ }
+
+ if (unlikely(host->health_delay_up_to)) {
+ if (unlikely(now < host->health_delay_up_to))
+ continue;
+
+ info("Resuming health checks on host '%s'.", host->hostname);
+ host->health_delay_up_to = 0;
+ }
+
+ rrdhost_rdlock(host);
+
+ // the first loop is to lookup values from the db
+ for (rc = host->alarms; rc; rc = rc->next) {
+
+ if (update_disabled_silenced(host, rc))
+ continue;
+
+ if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) {
+ if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE;
+ continue;
+ }
+
+ runnable++;
+ rc->old_value = rc->value;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE;
+
+ // ------------------------------------------------------------
+ // if there is database lookup, do it
+
+ if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) {
+ /* time_t old_db_timestamp = rc->db_before; */
+ int value_is_null = 0;
+
+ int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rc->dimensions, 1, rc->after,
+ rc->before, rc->group, 0, rc->options, &rc->db_after,
+ &rc->db_before, &value_is_null
+ );
+
+ if (unlikely(ret != 200)) {
+ // database lookup failed
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, ret
+ );
+ } else
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR;
+
+ /* - RRDCALC_FLAG_DB_STALE not currently used
+ if (unlikely(old_db_timestamp == rc->db_before)) {
+ // database is stale
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
+
+ if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) {
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE;
+ error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
+ }
+ }
+ else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE;
+ */
+
+ if (unlikely(value_is_null)) {
+ // collected value is null
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name
+ );
+ } else
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value "
+ CALCULATED_NUMBER_FORMAT, host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->value
+ );
+ }
+
+ // ------------------------------------------------------------
+ // if there is calculation expression, run it
+
+ if (unlikely(rc->calculation)) {
+ if (unlikely(!expression_evaluate(rc->calculation))) {
+ // calculation failed
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->calculation->parsed_as, rc->calculation->result,
+ buffer_tostring(rc->calculation->error_msg), rc->source
+ );
+
+ rc->value = rc->calculation->result;
+
+ if (rc->local) rc->local->last_updated = now;
+ if (rc->family) rc->family->last_updated = now;
+ if (rc->hostid) rc->hostid->last_updated = now;
+ if (rc->hostname) rc->hostname->last_updated = now;
+ }
+ }
+ }
+
+ rrdhost_unlock(host);
+
+ if (unlikely(runnable && !netdata_exit)) {
+ rrdhost_rdlock(host);
+
+ for (rc = host->alarms; rc; rc = rc->next) {
+ if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)))
+ continue;
+
+ if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED) {
+ continue;
+ }
+ RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED;
+ RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED;
+
+ // --------------------------------------------------------
+ // check the warning expression
+
+ if (likely(rc->warning)) {
+ if (unlikely(!expression_evaluate(rc->warning))) {
+ // calculation failed
+ rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ buffer_tostring(rc->warning->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
+ rc->name, rc->warning->result, buffer_tostring(rc->warning->error_msg), rc->source
+ );
+ warning_status = rrdcalc_value2status(rc->warning->result);
+ }
+ }
+
+ // --------------------------------------------------------
+ // check the critical expression
+
+ if (likely(rc->critical)) {
+ if (unlikely(!expression_evaluate(rc->critical))) {
+ // calculation failed
+ rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ buffer_tostring(rc->critical->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
+ rc->name, rc->critical->result, buffer_tostring(rc->critical->error_msg),
+ rc->source
+ );
+ critical_status = rrdcalc_value2status(rc->critical->result);
+ }
+ }
+
+ // --------------------------------------------------------
+ // decide the final alarm status
+
+ RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED;
+
+ switch (warning_status) {
+ case RRDCALC_STATUS_CLEAR:
+ status = RRDCALC_STATUS_CLEAR;
+ break;
+
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_WARNING;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (critical_status) {
+ case RRDCALC_STATUS_CLEAR:
+ if (status == RRDCALC_STATUS_UNDEFINED)
+ status = RRDCALC_STATUS_CLEAR;
+ break;
+
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_CRITICAL;
+ break;
+
+ default:
+ break;
+ }
+
+ // --------------------------------------------------------
+ // check if the new status and the old differ
+
+ if (status != rc->status) {
+ int delay = 0;
+
+ // apply trigger hysteresis
+
+ if (now > rc->delay_up_to_timestamp) {
+ rc->delay_up_current = rc->delay_up_duration;
+ rc->delay_down_current = rc->delay_down_duration;
+ rc->delay_last = 0;
+ rc->delay_up_to_timestamp = 0;
+ } else {
+ rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier);
+ if (rc->delay_up_current > rc->delay_max_duration)
+ rc->delay_up_current = rc->delay_max_duration;
+
+ rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier);
+ if (rc->delay_down_current > rc->delay_max_duration)
+ rc->delay_down_current = rc->delay_max_duration;
+ }
+
+ if (status > rc->status)
+ delay = rc->delay_up_current;
+ else
+ delay = rc->delay_down_current;
+
+ // COMMENTED: because we do need to send raising alarms
+ // if(now + delay < rc->delay_up_to_timestamp)
+ // delay = (int)(rc->delay_up_to_timestamp - now);
+
+ rc->delay_last = delay;
+ rc->delay_up_to_timestamp = now + delay;
+
+ if(likely(!rrdcalc_isrepeating(rc))) {
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host, rc->id, rc->next_event_id++, now, rc->name, rc->rrdset->id,
+ rc->rrdset->family, rc->exec, rc->recipient, now - rc->last_status_change,
+ rc->old_value, rc->value, rc->status, status, rc->source, rc->units, rc->info,
+ rc->delay_last,
+ (
+ ((rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
+ ((rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0)
+ )
+ );
+ health_alarm_log(host, ae);
+ }
+ rc->last_status_change = now;
+ rc->old_status = rc->status;
+ rc->status = status;
+ }
+
+ rc->last_updated = now;
+ rc->next_update = now + rc->update_every;
+
+ if (next_run > rc->next_update)
+ next_run = rc->next_update;
+ }
+
+ // process repeating alarms
+ RRDCALC *rc;
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ int repeat_every = 0;
+ if(unlikely(rrdcalc_isrepeating(rc))) {
+ if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUN_ONCE;
+ repeat_every = rc->warn_repeat_every;
+ } else if(unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUN_ONCE;
+ repeat_every = rc->crit_repeat_every;
+ } else if(unlikely(rc->status == RRDCALC_STATUS_CLEAR)) {
+ if(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUN_ONCE)) {
+ if(rc->old_status == RRDCALC_STATUS_CRITICAL) {
+ repeat_every = rc->crit_repeat_every;
+ } else if (rc->old_status == RRDCALC_STATUS_WARNING) {
+ repeat_every = rc->warn_repeat_every;
+ }
+ }
+ }
+ }
+
+ if(unlikely(repeat_every > 0 && (rc->last_repeat + repeat_every) <= now)) {
+ rc->last_repeat = now;
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host, rc->id, rc->next_event_id++, now, rc->name, rc->rrdset->id,
+ rc->rrdset->family, rc->exec, rc->recipient, now - rc->last_status_change,
+ rc->old_value, rc->value, rc->old_status, rc->status, rc->source, rc->units, rc->info,
+ rc->delay_last,
+ (
+ ((rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
+ ((rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0)
+ )
+ );
+ ae->last_repeat = rc->last_repeat;
+ if (!(rc->rrdcalc_flags & RRDCALC_FLAG_RUN_ONCE) && rc->status == RRDCALC_STATUS_CLEAR) {
+ ae->flags |= HEALTH_ENTRY_RUN_ONCE;
+ }
+ rc->rrdcalc_flags |= RRDCALC_FLAG_RUN_ONCE;
+ health_process_notifications(host, ae);
+ debug(D_HEALTH, "Notification sent for the repeating alarm %u.", ae->alarm_id);
+ health_alarm_wait_for_execution(ae);
+ health_alarm_log_free_one_nochecks_nounlink(ae);
+ }
+ }
+
+ rrdhost_unlock(host);
+ }
+
+ if (unlikely(netdata_exit))
+ break;
+
+ // execute notifications
+ // and cleanup
+ health_alarm_log_process(host);
+
+ if (unlikely(netdata_exit)) {
+ // wait for all notifications to finish before allowing health to be cleaned up
+ ALARM_ENTRY *ae;
+ while (NULL != (ae = alarm_notifications_in_progress.head)) {
+ health_alarm_wait_for_execution(ae);
+ }
+ break;
+ }
+
+ } /* rrdhost_foreach */
+
+ // wait for all notifications to finish before allowing health to be cleaned up
+ ALARM_ENTRY *ae;
+ while (NULL != (ae = alarm_notifications_in_progress.head)) {
+ health_alarm_wait_for_execution(ae);
+ }
+
+ rrd_unlock();
+
+
+ if(unlikely(netdata_exit))
+ break;
+
+ now = now_realtime_sec();
+ if(now < next_run) {
+ debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration in %d secs", loop, (int) (next_run - now));
+ sleep_usec(USEC_PER_SEC * (usec_t) (next_run - now));
+ now = now_realtime_sec();
+ }
+ else
+ debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration now", loop);
+
+ } // forever
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/health/health.d/adaptec_raid.conf b/health/health.d/adaptec_raid.conf
new file mode 100644
index 0000000..a1301ce
--- /dev/null
+++ b/health/health.d/adaptec_raid.conf
@@ -0,0 +1,24 @@
+
+# logical device status check
+
+template: adapter_raid_ld_status
+ on: adapter_raid.ld_status
+ lookup: max -5s
+ units: bool
+ every: 10s
+ crit: $this > 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: at least 1 logical device is failed or degraded
+ to: sysadmin
+
+# physical device state check
+
+template: adapter_raid_pd_state
+ on: adapter_raid.pd_state
+ lookup: max -5s
+ units: bool
+ every: 10s
+ crit: $this > 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: at least 1 physical device is not in online state
+ to: sysadmin
diff --git a/health/health.d/am2320.conf b/health/health.d/am2320.conf
new file mode 100644
index 0000000..ddf8b70
--- /dev/null
+++ b/health/health.d/am2320.conf
@@ -0,0 +1,12 @@
+# make sure am2320 is sending stats
+
+template: am2320_last_collected_secs
+ on: am2320.temperature
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster \ No newline at end of file
diff --git a/health/health.d/anomalies.conf b/health/health.d/anomalies.conf
new file mode 100644
index 0000000..a2d248e
--- /dev/null
+++ b/health/health.d/anomalies.conf
@@ -0,0 +1,17 @@
+# raise a warning alarm if an anomaly probability is consistently above 50%
+
+template: anomaly_probabilities
+ on: anomalies.probability
+ lookup: average -2m foreach *
+ every: 1m
+ warn: $this > 50
+ info: average anomaly probability > 50% for last 2 minutes
+
+# raise a warning alarm if an anomaly flag is consistently firing
+
+template: anomaly_flags
+ on: anomalies.anomaly
+ lookup: sum -2m foreach *
+ every: 1m
+ warn: $this > 10
+ info: count of anomalies > 10 for last 2 minutes
diff --git a/health/health.d/apache.conf b/health/health.d/apache.conf
new file mode 100644
index 0000000..0c98b87
--- /dev/null
+++ b/health/health.d/apache.conf
@@ -0,0 +1,14 @@
+
+# make sure apache is running
+
+template: apache_last_collected_secs
+ on: apache.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.d/apcupsd.conf b/health/health.d/apcupsd.conf
new file mode 100644
index 0000000..4f86037
--- /dev/null
+++ b/health/health.d/apcupsd.conf
@@ -0,0 +1,40 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+template: 10min_ups_load
+ on: apcupsd.load
+ os: *
+ hosts: *
+ lookup: average -10m unaligned of percentage
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 10m multiplier 1.5 max 1h
+ info: average UPS load for the last 10 minutes
+ to: sitemgr
+
+# Discussion in https://github.com/netdata/netdata/pull/3928:
+# Fire the alarm as soon as it's going on battery (99% charge) and clear only when full.
+template: ups_charge
+ on: apcupsd.charge
+ os: *
+ hosts: *
+ lookup: average -60s unaligned of charge
+ units: %
+ every: 60s
+ warn: $this < 100
+ crit: $this < (($status == $CRITICAL) ? (60) : (50))
+ delay: down 10m multiplier 1.5 max 1h
+ info: current UPS charge, averaged over the last 60 seconds to reduce measurement errors
+ to: sitemgr
+
+template: apcupsd_last_collected_secs
+ on: apcupsd.load
+ calc: $now - $last_collected_t
+ every: 10s
+ units: seconds ago
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sitemgr
diff --git a/health/health.d/apps_plugin.conf b/health/health.d/apps_plugin.conf
new file mode 100644
index 0000000..9a27bc6
--- /dev/null
+++ b/health/health.d/apps_plugin.conf
@@ -0,0 +1,15 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# disabled due to https://github.com/netdata/netdata/issues/10327
+#
+# alarm: used_file_descriptors
+# on: apps.files
+# hosts: *
+# calc: $fdperc
+# units: %
+# every: 5s
+# warn: $this > (($status >= $WARNING) ? (75) : (80))
+# crit: $this > (($status == $CRITICAL) ? (85) : (90))
+# delay: down 5m multiplier 1.5 max 1h
+# info: Peak percentage of file descriptors used
+# to: sysadmin
diff --git a/health/health.d/backend.conf b/health/health.d/backend.conf
new file mode 100644
index 0000000..e51b8aa
--- /dev/null
+++ b/health/health.d/backend.conf
@@ -0,0 +1,56 @@
+# Alert that backends subsystem will be disabled soon
+ alarm: backend_metrics_eol
+ on: netdata.backend_metrics
+ units: boolean
+ calc: $now - $last_collected_t
+ every: 1m
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: The backends subsystem is deprecated and will be removed soon. Migrate your configuration to exporting.conf.
+ to: sysadmin
+
+# make sure we are sending data to backend
+
+ alarm: backend_last_buffering
+ on: netdata.backend_metrics
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful buffering of backend data
+ to: dba
+
+ alarm: backend_metrics_sent
+ on: netdata.backend_metrics
+ units: %
+ calc: abs($sent) * 100 / abs($buffered)
+ every: 10s
+ warn: $this != 100
+ delay: down 5m multiplier 1.5 max 1h
+ info: percentage of metrics sent to the backend server
+ to: dba
+
+ alarm: backend_metrics_lost
+ on: netdata.backend_metrics
+ units: metrics
+ calc: abs($lost)
+ every: 10s
+ crit: ($this != 0) || ($status == $CRITICAL && abs($sent) == 0)
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of metrics lost due to repeating failures to contact the backend server
+ to: dba
+
+
+# this chart has been removed from netdata
+# alarm: backend_slow
+# on: netdata.backend_latency
+# units: %
+# calc: $latency * 100 / ($update_every * 1000)
+# every: 10s
+# warn: $this > 50
+# crit: $this > 100
+# delay: down 5m multiplier 1.5 max 1h
+# info: the percentage of time between iterations needed by the backend time to process the data sent by netdata
+# to: dba
diff --git a/health/health.d/bcache.conf b/health/health.d/bcache.conf
new file mode 100644
index 0000000..f0da9ac
--- /dev/null
+++ b/health/health.d/bcache.conf
@@ -0,0 +1,22 @@
+
+template: bcache_cache_errors
+ on: disk.bcache_cache_read_races
+ lookup: sum -10m unaligned absolute
+ units: errors
+ every: 1m
+ warn: $this > 0
+ crit: $this > ( ($status >= $CRITICAL) ? (0) : (10) )
+ delay: down 1h multiplier 1.5 max 2h
+ info: the number of times bcache had issues using the cache, during the last 10 mins (this usually means your SSD cache is failing)
+ to: sysadmin
+
+template: bcache_cache_dirty
+ on: disk.bcache_cache_alloc
+ calc: $dirty + $metadata + $undefined
+ units: %
+ every: 1m
+ warn: $this > ( ($status >= $WARNING ) ? ( 70 ) : ( 90 ) )
+ crit: $this > ( ($status >= $CRITICAL) ? ( 90 ) : ( 95 ) )
+ delay: up 1m down 1h multiplier 1.5 max 2h
+ info: the percentage of cache space used for dirty and metadata (this usually means your SSD cache is too small)
+ to: sysadmin
diff --git a/health/health.d/beanstalkd.conf b/health/health.d/beanstalkd.conf
new file mode 100644
index 0000000..30dc273
--- /dev/null
+++ b/health/health.d/beanstalkd.conf
@@ -0,0 +1,36 @@
+# get the number of buried jobs in all queues
+
+template: server_buried_jobs
+ on: beanstalk.current_jobs
+ calc: $buried
+ units: jobs
+ every: 10s
+ warn: $this > 0
+ crit: $this > 10
+ delay: up 0 down 5m multiplier 1.2 max 1h
+ info: the number of buried jobs aggregated across all tubes
+ to: sysadmin
+
+# get the number of buried jobs per queue
+
+#template: tube_buried_jobs
+# on: beanstalk.jobs
+# calc: $buried
+# units: jobs
+# every: 10s
+# warn: $this > 0
+# crit: $this > 10
+# delay: up 0 down 5m multiplier 1.2 max 1h
+# info: the number of jobs buried per tube
+# to: sysadmin
+
+# get the current number of tubes
+
+#template: number_of_tubes
+# on: beanstalk.current_tubes
+# calc: $tubes
+# every: 10s
+# warn: $this < 5
+# delay: up 0 down 5m multiplier 1.2 max 1h
+# info: the current number of tubes on the server
+# to: sysadmin
diff --git a/health/health.d/bind_rndc.conf b/health/health.d/bind_rndc.conf
new file mode 100644
index 0000000..4145e77
--- /dev/null
+++ b/health/health.d/bind_rndc.conf
@@ -0,0 +1,9 @@
+ template: bind_rndc_stats_file_size
+ on: bind_rndc.stats_size
+ units: megabytes
+ every: 60
+ calc: $stats_size
+ warn: $this > 512
+ crit: $this > 1024
+ info: Bind stats file is very large! Consider to create logrotate conf file for it!
+ to: sysadmin
diff --git a/health/health.d/boinc.conf b/health/health.d/boinc.conf
new file mode 100644
index 0000000..43c588d
--- /dev/null
+++ b/health/health.d/boinc.conf
@@ -0,0 +1,62 @@
+# Alarms for various BOINC issues.
+
+# Warn on any compute errors encountered.
+template: boinc_compute_errors
+ on: boinc.states
+ os: *
+ hosts: *
+families: *
+ lookup: average -10m unaligned of comperror
+ units: tasks
+ every: 1m
+ warn: $this > 0
+ crit: $this > 1
+ delay: up 1m down 5m multiplier 1.5 max 1h
+ info: the total number of compute errors over the past 10 minutes
+ to: sysadmin
+
+# Warn on lots of upload errors
+template: boinc_upload_errors
+ on: boinc.states
+ os: *
+ hosts: *
+families: *
+ lookup: average -10m unaligned of upload_failed
+ units: tasks
+ every: 1m
+ warn: $this > 0
+ crit: $this > 1
+ delay: up 1m down 5m multiplier 1.5 max 1h
+ info: the average number of failed uploads over the past 10 minutes
+ to: sysadmin
+
+# Warn on the task queue being empty
+template: boinc_total_tasks
+ on: boinc.tasks
+ os: *
+ hosts: *
+families: *
+ lookup: average -10m unaligned of total
+ units: tasks
+ every: 1m
+ warn: $this < 1
+ crit: $this < 0.1
+ delay: up 5m down 10m multiplier 1.5 max 1h
+ info: the total number of locally available tasks
+ to: sysadmin
+
+# Warn on no active tasks with a non-empty queue
+template: boinc_active_tasks
+ on: boinc.tasks
+ os: *
+ hosts: *
+families: *
+ lookup: average -10m unaligned of active
+ calc: ($boinc_total_tasks >= 1) ? ($this) : (inf)
+ units: tasks
+ every: 1m
+ warn: $this < 1
+ crit: $this < 0.1
+ delay: up 5m down 10m multiplier 1.5 max 1h
+ info: the total number of active tasks
+ to: sysadmin
diff --git a/health/health.d/btrfs.conf b/health/health.d/btrfs.conf
new file mode 100644
index 0000000..b27aa54
--- /dev/null
+++ b/health/health.d/btrfs.conf
@@ -0,0 +1,57 @@
+
+template: btrfs_allocated
+ on: btrfs.disk
+ os: *
+ hosts: *
+families: *
+ calc: 100 - ($unallocated * 100 / ($unallocated + $data_used + $data_free + $meta_used + $meta_free + $sys_used + $sys_free))
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (90) : (95))
+ crit: $this > (($status == $CRITICAL) ? (95) : (98))
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: the percentage of allocated BTRFS physical disk space
+ to: sysadmin
+
+template: btrfs_data
+ on: btrfs.data
+ os: *
+ hosts: *
+families: *
+ calc: $used * 100 / ($used + $free)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98
+ crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: the percentage of used BTRFS data space
+ to: sysadmin
+
+template: btrfs_metadata
+ on: btrfs.metadata
+ os: *
+ hosts: *
+families: *
+ calc: ($used + $reserved) * 100 / ($used + $free + $reserved)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98
+ crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: the percentage of used BTRFS metadata space
+ to: sysadmin
+
+template: btrfs_system
+ on: btrfs.system
+ os: *
+ hosts: *
+families: *
+ calc: $used * 100 / ($used + $free)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98
+ crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: the percentage of used BTRFS system space
+ to: sysadmin
+
diff --git a/health/health.d/ceph.conf b/health/health.d/ceph.conf
new file mode 100644
index 0000000..de16f7b
--- /dev/null
+++ b/health/health.d/ceph.conf
@@ -0,0 +1,13 @@
+# low ceph disk available
+
+template: cluster_space_usage
+ on: ceph.general_usage
+ calc: $avail * 100 / ($avail + $used)
+ units: %
+ every: 10s
+ warn: $this < 10
+ crit: $this < 1
+ delay: down 5m multiplier 1.2 max 1h
+ info: ceph disk usage is almost full
+ to: sysadmin
+
diff --git a/health/health.d/cgroups.conf b/health/health.d/cgroups.conf
new file mode 100644
index 0000000..79ece53
--- /dev/null
+++ b/health/health.d/cgroups.conf
@@ -0,0 +1,41 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+template: cgroup_10min_cpu_usage
+ on: cgroup.cpu_limit
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: cpu utilization for the last 10 minutes
+ to: sysadmin
+
+template: cgroup_ram_in_use
+ on: cgroup.mem_usage
+ os: linux
+ hosts: *
+ calc: ($ram) * 100 / $memory_limit
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: RAM used by cgroup
+ to: sysadmin
+
+template: cgroup_ram_and_swap_in_use
+ on: cgroup.mem_usage
+ os: linux
+ hosts: *
+ calc: ($ram + $swap) * 100 / $memory_and_swap_limit
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: RAM and Swap used by cgroup
+ to: sysadmin
diff --git a/health/health.d/cockroachdb.conf b/health/health.d/cockroachdb.conf
new file mode 100644
index 0000000..8ab2c9d
--- /dev/null
+++ b/health/health.d/cockroachdb.conf
@@ -0,0 +1,91 @@
+
+# Availability
+
+template: cockroachdb_last_collected_secs
+ on: cockroachdb.live_nodes
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
+
+# Capacity
+
+template: cockroachdb_used_storage_capacity
+ on: cockroachdb.storage_used_capacity_percentage
+ calc: $capacity_used_percent
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: entire disk usage percentage
+ to: dba
+
+template: cockroachdb_used_usable_storage_capacity
+ on: cockroachdb.storage_used_capacity_percentage
+ calc: $capacity_usable_used_percent
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: usable space usage percentage
+ to: dba
+
+# Replication
+
+template: cockroachdb_unavailable_ranges
+ on: cockroachdb.ranges_replication_problem
+ calc: $ranges_unavailable
+ units: num
+ every: 10s
+ warn: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: number of ranges with fewer live replicas than the replication target
+ to: dba
+
+template: cockroachdb_replicas_leaders_not_leaseholders
+ on: cockroachdb.replicas_leaders
+ calc: $replicas_leaders_not_leaseholders
+ units: num
+ every: 10s
+ warn: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: number of replicas that are Raft leaders whose range lease is held by another store
+ to: dba
+
+# FD
+
+template: cockroachdb_open_file_descriptors_limit
+ on: cockroachdb.process_file_descriptors
+ calc: $sys_fd_open/$sys_fd_softlimit * 100
+ units: %
+ every: 10s
+ warn: $this > 80
+ delay: down 15m multiplier 1.5 max 1h
+ info: open file descriptors usage percentage
+ to: dba
+
+# SQL
+
+template: cockroachdb_sql_active_connections
+ on: cockroachdb.sql_connections
+ calc: $sql_conns
+ units: active connections
+ every: 10s
+ info: number of active SQL connections
+ to: dba
+
+template: cockroachdb_sql_executed_statements_total_last_5m
+ on: cockroachdb.sql_statements_total
+ lookup: sum -5m absolute of sql_query_count
+ units: statements
+ every: 10s
+ warn: $this == 0 AND $cockroachdb_sql_active_connections != 0
+ delay: down 15m up 30s multiplier 1.5 max 1h
+ info: number of executed SQL statements in the last 5 minutes
+ to: dba
diff --git a/health/health.d/couchdb.conf b/health/health.d/couchdb.conf
new file mode 100644
index 0000000..4a28952
--- /dev/null
+++ b/health/health.d/couchdb.conf
@@ -0,0 +1,13 @@
+
+# make sure couchdb is running
+
+template: couchdb_last_collected_secs
+ on: couchdb.request_methods
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
diff --git a/health/health.d/cpu.conf b/health/health.d/cpu.conf
new file mode 100644
index 0000000..fa81898
--- /dev/null
+++ b/health/health.d/cpu.conf
@@ -0,0 +1,55 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+template: 10min_cpu_usage
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of user,system,softirq,irq,guest
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal)
+ to: sysadmin
+
+template: 10min_cpu_iowait
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned of iowait
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (20) : (40))
+ crit: $this > (($status == $CRITICAL) ? (40) : (50))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average CPU wait I/O for the last 10 minutes
+ to: sysadmin
+
+template: 20min_steal_cpu
+ on: system.cpu
+ os: linux
+ hosts: *
+ lookup: average -20m unaligned of steal
+ units: %
+ every: 5m
+ warn: $this > (($status >= $WARNING) ? (5) : (10))
+ crit: $this > (($status == $CRITICAL) ? (20) : (30))
+ delay: down 1h multiplier 1.5 max 2h
+ info: average CPU steal time for the last 20 minutes
+ to: sysadmin
+
+## FreeBSD
+template: 10min_cpu_usage
+ on: system.cpu
+ os: freebsd
+ hosts: *
+ lookup: average -10m unaligned of user,system,interrupt
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average cpu utilization for the last 10 minutes (excluding nice)
+ to: sysadmin
diff --git a/health/health.d/dbengine.conf b/health/health.d/dbengine.conf
new file mode 100644
index 0000000..274673e
--- /dev/null
+++ b/health/health.d/dbengine.conf
@@ -0,0 +1,50 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: 10min_dbengine_global_fs_errors
+ on: netdata.dbengine_global_errors
+ os: linux freebsd macos
+ hosts: *
+lookup: sum -10m unaligned of fs_errors
+ units: errors
+ every: 10s
+ crit: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: number of File-System errors dbengine came across the last 10 minutes (too many open files, wrong permissions etc)
+ to: sysadmin
+
+ alarm: 10min_dbengine_global_io_errors
+ on: netdata.dbengine_global_errors
+ os: linux freebsd macos
+ hosts: *
+lookup: sum -10m unaligned of io_errors
+ units: errors
+ every: 10s
+ crit: $this > 0
+ delay: down 1h multiplier 1.5 max 3h
+ info: number of IO errors dbengine came across the last 10 minutes (CRC errors, out of space, bad disk etc)
+ to: sysadmin
+
+ alarm: 10min_dbengine_global_flushing_warnings
+ on: netdata.dbengine_global_errors
+ os: linux freebsd macos
+ hosts: *
+lookup: sum -10m unaligned of pg_cache_over_half_dirty_events
+ units: errors
+ every: 10s
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 3h
+ info: number of times in the last 10 minutes that dbengine dirty pages were over 50% of the instance's page cache, metric data at risk of not being stored in the database, please reduce disk load or use faster disks
+ to: sysadmin
+
+ alarm: 10min_dbengine_global_flushing_errors
+ on: netdata.dbengine_long_term_page_stats
+ os: linux freebsd macos
+ hosts: *
+lookup: sum -10m unaligned of flushing_pressure_deletions
+ units: pages
+ every: 10s
+ crit: $this != 0
+ delay: down 1h multiplier 1.5 max 3h
+ info: number of pages deleted due to failure to flush data to disk in the last 10 minutes, metric data were lost to unblock data collection, please reduce disk load or use faster disks
+ to: sysadmin
diff --git a/health/health.d/disks.conf b/health/health.d/disks.conf
new file mode 100644
index 0000000..9c194ce
--- /dev/null
+++ b/health/health.d/disks.conf
@@ -0,0 +1,167 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+
+# -----------------------------------------------------------------------------
+# low disk space
+
+# checking the latest collected values
+# raise an alarm if the disk is low on
+# available disk space
+
+template: disk_space_usage
+ on: disk.space
+ os: linux freebsd
+ hosts: *
+families: !/dev !/dev/* !/run !/run/* *
+ calc: $used * 100 / ($avail + $used)
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING ) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: current disk space usage
+ to: sysadmin
+
+template: disk_inode_usage
+ on: disk.inodes
+ os: linux freebsd
+ hosts: *
+families: !/dev !/dev/* !/run !/run/* *
+ calc: $used * 100 / ($avail + $used)
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: up 1m down 15m multiplier 1.5 max 1h
+ info: current disk inode usage
+ to: sysadmin
+
+
+# -----------------------------------------------------------------------------
+# disk fill rate
+
+# calculate the rate the disk fills
+# use as base, the available space change
+# during the last hour
+
+# this is just a calculation - it has no alarm
+# we will use it in the next template to find
+# the hours remaining
+
+template: disk_fill_rate
+ on: disk.space
+ os: linux freebsd
+ hosts: *
+families: *
+ lookup: min -10m at -50m unaligned of avail
+ calc: ($this - $avail) / (($now - $after) / 3600)
+ every: 1m
+ units: GB/hour
+ info: average rate the disk fills up (positive), or frees up (negative) space, for the last hour
+
+
+# calculate the hours remaining
+# if the disk continues to fill
+# in this rate
+
+template: out_of_disk_space_time
+ on: disk.space
+ os: linux freebsd
+ hosts: *
+families: *
+ calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf)
+ units: hours
+ every: 10s
+ warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8))
+ crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2))
+ delay: down 15m multiplier 1.2 max 1h
+ info: estimated time the disk will run out of space, if the system continues to add data with the rate of the last hour
+ to: sysadmin
+
+
+# -----------------------------------------------------------------------------
+# disk inode fill rate
+
+# calculate the rate the disk inodes are allocated
+# use as base, the available inodes change
+# during the last hour
+
+# this is just a calculation - it has no alarm
+# we will use it in the next template to find
+# the hours remaining
+
+template: disk_inode_rate
+ on: disk.inodes
+ os: linux freebsd
+ hosts: *
+families: *
+ lookup: min -10m at -50m unaligned of avail
+ calc: ($this - $avail) / (($now - $after) / 3600)
+ every: 1m
+ units: inodes/hour
+ info: average rate at which disk inodes are allocated (positive), or freed (negative), for the last hour
+
+# calculate the hours remaining
+# if the disk inodes are allocated
+# in this rate
+
+template: out_of_disk_inodes_time
+ on: disk.inodes
+ os: linux freebsd
+ hosts: *
+families: *
+ calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf)
+ units: hours
+ every: 10s
+ warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8))
+ crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2))
+ delay: down 15m multiplier 1.2 max 1h
+ info: estimated time the disk will run out of inodes, if the system continues to allocate inodes with the rate of the last hour
+ to: sysadmin
+
+
+# -----------------------------------------------------------------------------
+# disk congestion
+
+# raise an alarm if the disk is congested
+# by calculating the average disk utilization
+# for the last 10 minutes
+
+template: 10min_disk_utilization
+ on: disk.util
+ os: linux freebsd
+ hosts: *
+families: *
+ lookup: average -10m unaligned
+ units: %
+ every: 1m
+ green: 90
+ red: 98
+ warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1))
+ crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1))
+ delay: down 15m multiplier 1.2 max 1h
+ info: the percentage of time the disk was busy, during the last 10 minutes
+ to: sysadmin
+
+
+# raise an alarm if the disk backlog
+# is above 1000ms (1s) per second
+# for 10 minutes
+# (i.e. the disk cannot catch up)
+
+template: 10min_disk_backlog
+ on: disk.backlog
+ os: linux
+ hosts: *
+families: *
+ lookup: average -10m unaligned
+ units: ms
+ every: 1m
+ green: 2000
+ red: 5000
+ warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1))
+ crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1))
+ delay: down 15m multiplier 1.2 max 1h
+ info: average of the kernel estimated disk backlog, for the last 10 minutes
+ to: sysadmin
diff --git a/health/health.d/dns_query.conf b/health/health.d/dns_query.conf
new file mode 100644
index 0000000..113c950
--- /dev/null
+++ b/health/health.d/dns_query.conf
@@ -0,0 +1,12 @@
+
+# detect dns query failure
+
+template: dns_query_time_query_time
+ on: dns_query_time.query_time
+ lookup: average -10s unaligned foreach *
+ units: ms
+ every: 10s
+ warn: $this == nan
+ delay: up 20s down 5m multiplier 1.5 max 1h
+ info: query round trip time
+ to: sysadmin
diff --git a/health/health.d/dnsmasq_dhcp.conf b/health/health.d/dnsmasq_dhcp.conf
new file mode 100644
index 0000000..ecf3b84
--- /dev/null
+++ b/health/health.d/dnsmasq_dhcp.conf
@@ -0,0 +1,12 @@
+# dhcp-range utilization
+
+template: dnsmasq_dhcp_dhcp_range_utilization
+ on: dnsmasq_dhcp.dhcp_range_utilization
+ every: 10s
+ units: %
+ calc: $used
+ warn: $this > ( ($status >= $WARNING ) ? ( 80 ) : ( 90 ) )
+ crit: $this > ( ($status >= $CRITICAL) ? ( 90 ) : ( 95 ) )
+ delay: down 5m
+ info: dhcp-range utilization above threshold!
+ to: sysadmin
diff --git a/health/health.d/dockerd.conf b/health/health.d/dockerd.conf
new file mode 100644
index 0000000..729906c
--- /dev/null
+++ b/health/health.d/dockerd.conf
@@ -0,0 +1,8 @@
+template: docker_unhealthy_containers
+ on: docker.unhealthy_containers
+ units: unhealthy containers
+ every: 10s
+ lookup: average -10s
+ crit: $this > 0
+ info: number of unhealthy containers
+ to: sysadmin
diff --git a/health/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf
new file mode 100644
index 0000000..f442344
--- /dev/null
+++ b/health/health.d/elasticsearch.conf
@@ -0,0 +1,12 @@
+
+# make sure elasticsearch is running
+
+template: elasticsearch_last_collected
+ on: elasticsearch.cluster_health_status
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ info: number of seconds since the last successful data collection
+ to: sysadmin
diff --git a/health/health.d/entropy.conf b/health/health.d/entropy.conf
new file mode 100644
index 0000000..66d44ec
--- /dev/null
+++ b/health/health.d/entropy.conf
@@ -0,0 +1,16 @@
+
+# check if entropy is too low
+# the alarm is checked every 1 minute
+# and examines the last hour of data
+
+ alarm: lowest_entropy
+ on: system.entropy
+ os: linux
+ hosts: *
+ lookup: min -10m unaligned
+ units: entries
+ every: 5m
+ warn: $this < (($status >= $WARNING) ? (200) : (100))
+ delay: down 1h multiplier 1.5 max 2h
+ info: minimum entries in the random numbers pool in the last 10 minutes
+ to: silent
diff --git a/health/health.d/exporting.conf b/health/health.d/exporting.conf
new file mode 100644
index 0000000..506cb0c
--- /dev/null
+++ b/health/health.d/exporting.conf
@@ -0,0 +1,34 @@
+
+template: exporting_last_buffering
+families: *
+ on: exporting_data_size
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful buffering of exporting data
+ to: dba
+
+template: exporting_metrics_sent
+families: *
+ on: exporting_data_size
+ units: %
+ calc: abs($sent) * 100 / abs($buffered)
+ every: 10s
+ warn: $this != 100
+ delay: down 5m multiplier 1.5 max 1h
+ info: percentage of metrics sent to the external database server
+ to: dba
+
+template: exporting_metrics_lost
+families: *
+ on: exporting_data_size
+ units: metrics
+ calc: abs($lost)
+ every: 10s
+ crit: ($this != 0) || ($status == $CRITICAL && abs($sent) == 0)
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of metrics lost due to repeating failures to contact the external database server
+ to: dba
diff --git a/health/health.d/fping.conf b/health/health.d/fping.conf
new file mode 100644
index 0000000..43658fe
--- /dev/null
+++ b/health/health.d/fping.conf
@@ -0,0 +1,53 @@
+
+template: fping_last_collected_secs
+families: *
+ on: fping.latency
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+template: host_reachable
+families: *
+ on: fping.latency
+ calc: $average != nan
+ units: up/down
+ every: 10s
+ crit: $this == 0
+ info: states if the remote host is reachable
+ delay: down 30m multiplier 1.5 max 2h
+ to: sysadmin
+
+template: host_latency
+families: *
+ on: fping.latency
+ lookup: average -10s unaligned of average
+ units: ms
+ every: 10s
+ green: 500
+ red: 1000
+ warn: $this > $green OR $max > $red
+ crit: $this > $red
+ info: average round trip delay during the last 10 seconds
+ delay: down 30m multiplier 1.5 max 2h
+ to: sysadmin
+
+template: packet_loss
+families: *
+ on: fping.quality
+ lookup: average -10m unaligned of returned
+ calc: 100 - $this
+ green: 1
+ red: 10
+ units: %
+ every: 10s
+ warn: $this > $green
+ crit: $this > $red
+ info: packet loss percentage
+ delay: down 30m multiplier 1.5 max 2h
+ to: sysadmin
+
diff --git a/health/health.d/fronius.conf b/health/health.d/fronius.conf
new file mode 100644
index 0000000..cdf6c8f
--- /dev/null
+++ b/health/health.d/fronius.conf
@@ -0,0 +1,11 @@
+template: fronius_last_collected_secs
+families: *
+ on: fronius.power
+ calc: $now - $last_collected_t
+ every: 10s
+ units: seconds ago
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sitemgr
diff --git a/health/health.d/gearman.conf b/health/health.d/gearman.conf
new file mode 100644
index 0000000..e3863ae
--- /dev/null
+++ b/health/health.d/gearman.conf
@@ -0,0 +1,22 @@
+# make sure Gearman is running
+template: gearman_last_collected_secs
+ on: gearman.total_jobs
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+template: gearman_workers_queued
+ on: gearman.single_job
+ lookup: average -10m unaligned match-names of Queued
+ units: workers
+ every: 10s
+ warn: $this > 30000
+ crit: $this > 100000
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of queued jobs
+ to: sysadmin \ No newline at end of file
diff --git a/health/health.d/haproxy.conf b/health/health.d/haproxy.conf
new file mode 100644
index 0000000..e49c70d
--- /dev/null
+++ b/health/health.d/haproxy.conf
@@ -0,0 +1,27 @@
+template: haproxy_backend_server_status
+ on: haproxy_hs.down
+ units: failed servers
+ every: 10s
+ lookup: average -10s
+ crit: $this > 0
+ info: number of failed haproxy backend servers
+ to: sysadmin
+
+template: haproxy_backend_status
+ on: haproxy_hb.down
+ units: failed backend
+ every: 10s
+ lookup: average -10s
+ crit: $this > 0
+ info: number of failed haproxy backends
+ to: sysadmin
+
+template: haproxy_last_collected
+ on: haproxy_hb.down
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ info: number of seconds since the last successful data collection
+ to: sysadmin
diff --git a/health/health.d/hdfs.conf b/health/health.d/hdfs.conf
new file mode 100644
index 0000000..678faab
--- /dev/null
+++ b/health/health.d/hdfs.conf
@@ -0,0 +1,75 @@
+
+# make sure hdfs is running
+
+template: hdfs_last_collected_secs
+ on: hdfs.heap_memory
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+
+# Common
+
+template: hdfs_capacity_usage
+ on: hdfs.capacity
+ calc: ($used) * 100 / ($used + $remaining)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (80) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used capacity
+ to: sysadmin
+
+
+# NameNode
+
+template: hdfs_missing_blocks
+ on: hdfs.blocks
+ calc: $missing
+ units: missing blocks
+ every: 10s
+ warn: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: missing blocks
+ to: sysadmin
+
+
+template: hdfs_stale_nodes
+ on: hdfs.data_nodes
+ calc: $stale
+ units: dead nodes
+ every: 10s
+ warn: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: stale data nodes
+ to: sysadmin
+
+
+template: hdfs_dead_nodes
+ on: hdfs.data_nodes
+ calc: $dead
+ units: dead nodes
+ every: 10s
+ crit: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: dead data nodes
+ to: sysadmin
+
+
+# DataNode
+
+template: hdfs_num_failed_volumes
+ on: hdfs.num_failed_volumes
+ calc: $fsds_num_failed_volumes
+ units: failed volumes
+ every: 10s
+ warn: $this > 0
+ delay: down 15m multiplier 1.5 max 1h
+ info: failed volumes
+ to: sysadmin
diff --git a/health/health.d/httpcheck.conf b/health/health.d/httpcheck.conf
new file mode 100644
index 0000000..0ddf35e
--- /dev/null
+++ b/health/health.d/httpcheck.conf
@@ -0,0 +1,99 @@
+template: httpcheck_last_collected_secs
+families: *
+ on: httpcheck.status
+ calc: $now - $last_collected_t
+ every: 10s
+ units: seconds ago
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges
+template: web_service_up
+families: *
+ on: httpcheck.status
+ lookup: average -1m unaligned percentage of success
+ calc: ($this < 75) ? (0) : ($this)
+ every: 5s
+ units: up/down
+ info: at least 75% verified responses during last 60 seconds, ideal for badges
+ to: silent
+
+template: web_service_bad_content
+families: *
+ on: httpcheck.status
+ lookup: average -5m unaligned percentage of bad_content
+ every: 10s
+ units: %
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: average of unexpected http response content during the last 5 minutes
+ options: no-clear-notification
+ to: webmaster
+
+template: web_service_bad_status
+families: *
+ on: httpcheck.status
+ lookup: average -5m unaligned percentage of bad_status
+ every: 10s
+ units: %
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: average of unexpected http status during the last 5 minutes
+ options: no-clear-notification
+ to: webmaster
+
+template: web_service_timeouts
+families: *
+ on: httpcheck.status
+ lookup: average -5m unaligned percentage of timeout
+ every: 10s
+ units: %
+ info: average of timeouts during the last 5 minutes
+
+template: no_web_service_connections
+families: *
+ on: httpcheck.status
+ lookup: average -5m unaligned percentage of no_connection
+ every: 10s
+ units: %
+ info: average of failed requests during the last 5 minutes
+
+# combined timeout & no connection alarm
+template: web_service_unreachable
+families: *
+ on: httpcheck.status
+ calc: ($no_web_service_connections >= $web_service_timeouts) ? ($no_web_service_connections) : ($web_service_timeouts)
+ units: %
+ every: 10s
+ warn: ($no_web_service_connections >= 10 OR $web_service_timeouts >= 10) AND ($no_web_service_connections < 40 OR $web_service_timeouts < 40)
+ crit: $no_web_service_connections >= 40 OR $web_service_timeouts >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: average of failed requests either due to timeouts or no connection during the last 5 minutes
+ options: no-clear-notification
+ to: webmaster
+
+template: 1h_web_service_response_time
+families: *
+ on: httpcheck.responsetime
+ lookup: average -1h unaligned of time
+ every: 30s
+ units: ms
+ info: average response time over the last hour
+
+template: web_service_slow
+families: *
+ on: httpcheck.responsetime
+ lookup: average -3m unaligned of time
+ units: ms
+ every: 10s
+ warn: ($this > ($1h_web_service_response_time * 2) )
+ crit: ($this > ($1h_web_service_response_time * 3) )
+ info: average response time over the last 3 minutes, compared to the average over the last hour
+ delay: down 5m multiplier 1.5 max 1h
+ options: no-clear-notification
+ to: webmaster
diff --git a/health/health.d/ioping.conf b/health/health.d/ioping.conf
new file mode 100644
index 0000000..59a5c8e
--- /dev/null
+++ b/health/health.d/ioping.conf
@@ -0,0 +1,13 @@
+template: disk_latency
+families: *
+ on: ioping.latency
+ lookup: average -10s unaligned of average
+ units: ms
+ every: 10s
+ green: 500
+ red: 1000
+ warn: $this > $green OR $max > $red
+ crit: $this > $red
+ info: average round trip delay during the last 10 seconds
+ delay: down 30m multiplier 1.5 max 2h
+ to: sysadmin
diff --git a/health/health.d/ipc.conf b/health/health.d/ipc.conf
new file mode 100644
index 0000000..989d6e9
--- /dev/null
+++ b/health/health.d/ipc.conf
@@ -0,0 +1,28 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: semaphores_used
+ on: system.ipc_semaphores
+ os: linux
+ hosts: *
+ calc: $semaphores * 100 / $ipc_semaphores_max
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (70) : (90))
+ delay: down 5m multiplier 1.5 max 1h
+ info: the percentage of IPC semaphores used
+ to: sysadmin
+
+ alarm: semaphore_arrays_used
+ on: system.ipc_semaphore_arrays
+ os: linux
+ hosts: *
+ calc: $arrays * 100 / $ipc_semaphores_arrays_max
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (70) : (90))
+ delay: down 5m multiplier 1.5 max 1h
+ info: the percentage of IPC semaphore arrays used
+ to: sysadmin
diff --git a/health/health.d/ipfs.conf b/health/health.d/ipfs.conf
new file mode 100644
index 0000000..3f77572
--- /dev/null
+++ b/health/health.d/ipfs.conf
@@ -0,0 +1,11 @@
+
+template: ipfs_datastore_usage
+ on: ipfs.repo_size
+ calc: $size * 100 / $avail
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: ipfs Datastore close to running out of space
+ to: sysadmin
diff --git a/health/health.d/ipmi.conf b/health/health.d/ipmi.conf
new file mode 100644
index 0000000..c255819
--- /dev/null
+++ b/health/health.d/ipmi.conf
@@ -0,0 +1,20 @@
+ alarm: ipmi_sensors_states
+ on: ipmi.sensors_states
+ calc: $warning + $critical
+ units: sensors
+ every: 10s
+ warn: $this > 0
+ crit: $critical > 0
+ delay: up 5m down 15m multiplier 1.5 max 1h
+ info: the number IPMI sensors in non-nominal state
+ to: sysadmin
+
+ alarm: ipmi_events
+ on: ipmi.events
+ calc: $events
+ units: events
+ every: 10s
+ warn: $this > 0
+ delay: up 5m down 15m multiplier 1.5 max 1h
+ info: the number of events in the IPMI System Event Log (SEL)
+ to: sysadmin
diff --git a/health/health.d/isc_dhcpd.conf b/health/health.d/isc_dhcpd.conf
new file mode 100644
index 0000000..8054656
--- /dev/null
+++ b/health/health.d/isc_dhcpd.conf
@@ -0,0 +1,10 @@
+ template: isc_dhcpd_leases_size
+ on: isc_dhcpd.leases_total
+ units: KB
+ every: 60
+ calc: $leases_size
+ warn: $this > 3072
+ crit: $this > 6144
+ delay: up 2m down 5m
+ info: dhcpd.leases file too big! Module can slow down your server.
+ to: sysadmin
diff --git a/health/health.d/kubelet.conf b/health/health.d/kubelet.conf
new file mode 100644
index 0000000..d2ef24b
--- /dev/null
+++ b/health/health.d/kubelet.conf
@@ -0,0 +1,115 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------------------------------------
+
+# True (1) if the node is experiencing a configuration-related error, false (0) otherwise.
+
+ template: node_config_error
+ on: k8s_kubelet.kubelet_node_config_error
+ calc: $kubelet_node_config_error
+ units: bool
+ every: 10s
+ warn: $this == 1
+ delay: down 1m multiplier 1.5 max 2h
+ info: the node is experiencing a configuration-related error
+ to: sysadmin
+
+# Failed Token() requests to the alternate token source
+
+ template: token_requests
+ lookup: sum -10s of token_fail_count
+ on: k8s_kubelet.kubelet_token_requests
+ units: failed requests
+ every: 10s
+ warn: $this > 0
+ delay: down 1m multiplier 1.5 max 2h
+ info: failed token requests to alternate token source
+ to: sysadmin
+
+# Docker and runtime operation errors
+
+ template: kubelet_operations_error
+ lookup: sum -1m
+ on: k8s_kubelet.kubelet_operations_errors
+ units: errors
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (0) : (20))
+ delay: up 30s down 1m multiplier 1.5 max 2h
+ info: operations error
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+
+# Pod Lifecycle Event Generator Relisting Latency
+
+# 1. calculate the pleg relisting latency for 1m (quantile 0.5, quantile 0.9, quantile 0.99)
+# 2. do the same for the last 10s
+# 3. raise an alarm if the later is:
+# - 2x the first for quantile 0.5
+# - 4x the first for quantile 0.9
+# - 8x the first for quantile 0.99
+#
+# we assume the minimum latency is 1000 microseconds
+
+# quantile 0.5
+
+template: 1m_kubelet_pleg_relist_latency_quantile_05
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -1m unaligned of kubelet_pleg_relist_latency_05
+ units: microseconds
+ every: 10s
+ info: the average value of pleg relisting latency during the last minute (quantile 0.5)
+
+template: 10s_kubelet_pleg_relist_latency_quantile_05
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -10s unaligned of kubelet_pleg_relist_latency_05
+ calc: $this * 100 / (($1m_kubelet_pleg_relist_latency_quantile_05 < 1000)?(1000):($1m_kubelet_pleg_relist_latency_quantile_05))
+ every: 10s
+ units: %
+ warn: $this > (($status >= $WARNING)?(100):(200))
+ crit: $this > (($status >= $WARNING)?(200):(400))
+ delay: down 1m multiplier 1.5 max 2h
+ info: the % of the pleg relisting latency in the last 10 seconds, compared to the last minute (quantile 0.5)
+ to: sysadmin
+
+# quantile 0.9
+
+template: 1m_kubelet_pleg_relist_latency_quantile_09
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -1m unaligned of kubelet_pleg_relist_latency_09
+ units: microseconds
+ every: 10s
+ info: the average value of pleg relisting latency during the last minute (quantile 0.9)
+
+template: 10s_kubelet_pleg_relist_latency_quantile_09
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -10s unaligned of kubelet_pleg_relist_latency_09
+ calc: $this * 100 / (($1m_kubelet_pleg_relist_latency_quantile_09 < 1000)?(1000):($1m_kubelet_pleg_relist_latency_quantile_09))
+ every: 10s
+ units: %
+ warn: $this > (($status >= $WARNING)?(200):(400))
+ crit: $this > (($status >= $WARNING)?(400):(800))
+ delay: down 1m multiplier 1.5 max 2h
+ info: the % of the pleg relisting latency in the last 10 seconds, compared to the last minute (quantile 0.9)
+ to: sysadmin
+
+# quantile 0.99
+
+template: 1m_kubelet_pleg_relist_latency_quantile_099
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -1m unaligned of kubelet_pleg_relist_latency_099
+ units: microseconds
+ every: 10s
+ info: the average value of pleg relisting latency during the last minute (quantile 0.99)
+
+template: 10s_kubelet_pleg_relist_latency_quantile_099
+ on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ lookup: average -10s unaligned of kubelet_pleg_relist_latency_099
+ calc: $this * 100 / (($1m_kubelet_pleg_relist_latency_quantile_099 < 1000)?(1000):($1m_kubelet_pleg_relist_latency_quantile_099))
+ every: 10s
+ units: %
+ warn: $this > (($status >= $WARNING)?(400):(800))
+ crit: $this > (($status >= $WARNING)?(800):(1200))
+ delay: down 1m multiplier 1.5 max 2h
+ info: the % of the pleg relisting latency in the last 10 seconds, compared to the last minute (quantile 0.99)
+ to: sysadmin
diff --git a/health/health.d/lighttpd.conf b/health/health.d/lighttpd.conf
new file mode 100644
index 0000000..915907a
--- /dev/null
+++ b/health/health.d/lighttpd.conf
@@ -0,0 +1,14 @@
+
+# make sure lighttpd is running
+
+template: lighttpd_last_collected_secs
+ on: lighttpd.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf
new file mode 100644
index 0000000..38727be
--- /dev/null
+++ b/health/health.d/linux_power_supply.conf
@@ -0,0 +1,12 @@
+# Alert on low battery capacity.
+
+template: linux_power_supply_capacity
+ on: powersupply.capacity
+ calc: $capacity
+ units: %
+ every: 10s
+ warn: $this < 10
+ crit: $this < 5
+ delay: up 30s down 5m multiplier 1.2 max 1h
+ info: the percentage remaining capacity of the power supply
+ to: sysadmin
diff --git a/health/health.d/load.conf b/health/health.d/load.conf
new file mode 100644
index 0000000..ee0c54b
--- /dev/null
+++ b/health/health.d/load.conf
@@ -0,0 +1,56 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# Calculate the base trigger point for the load average alarms.
+# This is the maximum number of CPU's in the system over the past 1
+# minute, with a special case for a single CPU of setting the trigger at 2.
+ alarm: load_trigger
+ on: system.load
+ os: linux
+ hosts: *
+ calc: ($active_processors == nan or $active_processors == inf or $active_processors < 2) ? ( 2 ) : ( $active_processors )
+ units: cpus
+ every: 1m
+ info: trigger point for load average alarms
+
+# Send alarms if the load average is unusually high.
+# These intentionally _do not_ calculate the average over the sampled
+# time period because the values being checked already are averages.
+ alarm: load_average_15
+ on: system.load
+ os: linux
+ hosts: *
+ lookup: max -1m unaligned of load15
+ units: load
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (1.75 * $load_trigger) : (2 * $load_trigger))
+ crit: $this > (($status == $CRITICAL) ? (3.5 * $load_trigger) : (4 * $load_trigger))
+ delay: down 15m multiplier 1.5 max 1h
+ info: fifteen-minute load average
+ to: sysadmin
+
+ alarm: load_average_5
+ on: system.load
+ os: linux
+ hosts: *
+ lookup: max -1m unaligned of load5
+ units: load
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (3.5 * $load_trigger) : (4 * $load_trigger))
+ crit: $this > (($status == $CRITICAL) ? (7 * $load_trigger) : (8 * $load_trigger))
+ delay: down 15m multiplier 1.5 max 1h
+ info: five-minute load average
+ to: sysadmin
+
+ alarm: load_average_1
+ on: system.load
+ os: linux
+ hosts: *
+ lookup: max -1m unaligned of load1
+ units: load
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (7 * $load_trigger) : (8 * $load_trigger))
+ crit: $this > (($status == $CRITICAL) ? (14 * $load_trigger) : (16 * $load_trigger))
+ delay: down 15m multiplier 1.5 max 1h
+ info: one-minute load average
+ to: sysadmin
diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf
new file mode 100644
index 0000000..2f906e1
--- /dev/null
+++ b/health/health.d/mdstat.conf
@@ -0,0 +1,38 @@
+template: mdstat_last_collected
+ on: md.disks
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+template: mdstat_disks
+ on: md.disks
+ units: failed devices
+ every: 10s
+ calc: $down
+ crit: $this > 0
+ info: Array is degraded!
+ to: sysadmin
+
+template: mdstat_mismatch_cnt
+ on: md.mismatch_cnt
+ units: unsynchronized blocks
+ calc: $count
+ every: 60s
+ warn: $this > 1024
+ delay: up 30m
+ info: Mismatch count!
+ to: sysadmin
+
+template: mdstat_nonredundant_last_collected
+ on: md.nonredundant
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ info: number of seconds since the last successful data collection
+ to: sysadmin \ No newline at end of file
diff --git a/health/health.d/megacli.conf b/health/health.d/megacli.conf
new file mode 100644
index 0000000..6e81a2a
--- /dev/null
+++ b/health/health.d/megacli.conf
@@ -0,0 +1,48 @@
+template: adapter_state
+ on: megacli.adapter_degraded
+ units: is degraded
+ lookup: sum -10s
+ every: 10s
+ crit: $this > 0
+ info: adapter state
+ to: sysadmin
+
+template: bbu_relative_charge
+ on: megacli.bbu_relative_charge
+ units: percent
+ lookup: average -10s
+ every: 10s
+ warn: $this <= (($status >= $WARNING) ? (85) : (80))
+ crit: $this <= (($status == $CRITICAL) ? (50) : (40))
+ info: BBU relative state of charge
+ to: sysadmin
+
+template: bbu_cycle_count
+ on: megacli.bbu_cycle_count
+ units: cycle count
+ lookup: average -10s
+ every: 10s
+ warn: $this >= 100
+ crit: $this >= 500
+ info: BBU cycle count
+ to: sysadmin
+
+template: pd_media_errors
+ on: megacli.pd_media_error
+ units: media errors
+ lookup: sum -10s
+ every: 10s
+ warn: $this > 0
+ delay: down 1m multiplier 2 max 10m
+ info: physical drive media errors
+ to: sysadmin
+
+template: pd_predictive_failures
+ on: megacli.pd_predictive_failure
+ units: predictive failures
+ lookup: sum -10s
+ every: 10s
+ warn: $this > 0
+ delay: down 1m multiplier 2 max 10m
+ info: physical drive predictive failures
+ to: sysadmin
diff --git a/health/health.d/memcached.conf b/health/health.d/memcached.conf
new file mode 100644
index 0000000..d248ef5
--- /dev/null
+++ b/health/health.d/memcached.conf
@@ -0,0 +1,52 @@
+
+# make sure memcached is running
+
+template: memcached_last_collected_secs
+ on: memcached.cache
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
+
+
+# detect if memcached cache is full
+
+template: memcached_cache_memory_usage
+ on: memcached.cache
+ calc: $used * 100 / ($used + $available)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (80) : (90))
+ delay: up 0 down 15m multiplier 1.5 max 1h
+ info: current cache memory usage
+ to: dba
+
+
+# find the rate memcached cache is filling
+
+template: cache_fill_rate
+ on: memcached.cache
+ lookup: min -10m at -50m unaligned of available
+ calc: ($this - $available) / (($now - $after) / 3600)
+ units: KB/hour
+ every: 1m
+ info: average rate the cache fills up (positive), or frees up (negative) space, for the last hour
+
+
+# find the hours remaining until memcached cache is full
+
+template: out_of_cache_space_time
+ on: memcached.cache
+ calc: ($cache_fill_rate > 0) ? ($available / $cache_fill_rate) : (inf)
+ units: hours
+ every: 10s
+ warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8))
+ crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated time the cache will run out of space, if the system continues to add data with the rate of the last hour
+ to: dba
diff --git a/health/health.d/memory.conf b/health/health.d/memory.conf
new file mode 100644
index 0000000..4a0e6e5
--- /dev/null
+++ b/health/health.d/memory.conf
@@ -0,0 +1,38 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: 1hour_ecc_memory_correctable
+ on: mem.ecc_ce
+ os: linux
+ hosts: *
+ lookup: sum -10m unaligned
+ units: errors
+ every: 1m
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 1h
+ info: number of ECC correctable errors during the last hour
+ to: sysadmin
+
+ alarm: 1hour_ecc_memory_uncorrectable
+ on: mem.ecc_ue
+ os: linux
+ hosts: *
+ lookup: sum -10m unaligned
+ units: errors
+ every: 1m
+ crit: $this > 0
+ delay: down 1h multiplier 1.5 max 1h
+ info: number of ECC uncorrectable errors during the last hour
+ to: sysadmin
+
+ alarm: 1hour_memory_hw_corrupted
+ on: mem.hwcorrupt
+ os: linux
+ hosts: *
+ calc: $HardwareCorrupted
+ units: MB
+ every: 10s
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 1h
+ info: amount of memory corrupted due to a hardware failure
+ to: sysadmin
diff --git a/health/health.d/mongodb.conf b/health/health.d/mongodb.conf
new file mode 100644
index 0000000..a80cb31
--- /dev/null
+++ b/health/health.d/mongodb.conf
@@ -0,0 +1,13 @@
+
+# make sure mongodb is running
+
+template: mongodb_last_collected_secs
+ on: mongodb.read_operations
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
diff --git a/health/health.d/mysql.conf b/health/health.d/mysql.conf
new file mode 100644
index 0000000..62cef5a
--- /dev/null
+++ b/health/health.d/mysql.conf
@@ -0,0 +1,146 @@
+
+# make sure mysql is running
+
+template: mysql_last_collected_secs
+ on: mysql.queries
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
+
+
+# -----------------------------------------------------------------------------
+# slow queries
+
+template: mysql_10s_slow_queries
+ on: mysql.queries
+ lookup: sum -10s of slow_queries
+ units: slow queries
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (5) : (10))
+ crit: $this > (($status == $CRITICAL) ? (10) : (20))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of mysql slow queries over the last 10 seconds
+ to: dba
+
+
+# -----------------------------------------------------------------------------
+# lock waits
+
+template: mysql_10s_table_locks_immediate
+ on: mysql.table_locks
+ lookup: sum -10s absolute of immediate
+ units: immediate locks
+ every: 10s
+ info: number of table immediate locks over the last 10 seconds
+ to: dba
+
+template: mysql_10s_table_locks_waited
+ on: mysql.table_locks
+ lookup: sum -10s absolute of waited
+ units: waited locks
+ every: 10s
+ info: number of table waited locks over the last 10 seconds
+ to: dba
+
+template: mysql_10s_waited_locks_ratio
+ on: mysql.table_locks
+ calc: ( ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate) > 0 ) ? (($mysql_10s_table_locks_waited * 100) / ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate)) : 0
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (10) : (25))
+ crit: $this > (($status == $CRITICAL) ? (25) : (50))
+ delay: down 30m multiplier 1.5 max 1h
+ info: the ratio of mysql waited table locks, for the last 10 seconds
+ to: dba
+
+
+# -----------------------------------------------------------------------------
+# connections
+
+template: mysql_connections
+ on: mysql.connections_active
+ calc: $active * 100 / $limit
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (60) : (70))
+ crit: $this > (($status == $CRITICAL) ? (80) : (90))
+ delay: down 15m multiplier 1.5 max 1h
+ info: the ratio of current active connections vs the maximum possible number of connections
+ to: dba
+
+
+# -----------------------------------------------------------------------------
+# replication
+
+template: mysql_replication
+ on: mysql.slave_status
+ calc: ($sql_running <= 0 OR $io_running <= 0)?0:1
+ units: ok/failed
+ every: 10s
+ crit: $this == 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: checks if mysql replication has stopped
+ to: dba
+
+template: mysql_replication_lag
+ on: mysql.slave_behind
+ calc: $seconds
+ units: seconds
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (5) : (10))
+ crit: $this > (($status == $CRITICAL) ? (10) : (30))
+ delay: down 15m multiplier 1.5 max 1h
+ info: the number of seconds mysql replication is behind this master
+ to: dba
+
+
+# -----------------------------------------------------------------------------
+# galera cluster size
+
+template: mysql_galera_cluster_size_max_2m
+ on: mysql.galera_cluster_size
+ lookup: max -2m absolute
+ units: nodes
+ every: 10s
+ info: max cluster size 2 minute
+ to: dba
+
+template: mysql_galera_cluster_size
+ on: mysql.galera_cluster_size
+ calc: $nodes
+ units: nodes
+ every: 10s
+ warn: $this > $mysql_galera_cluster_size_max_2m
+ crit: $this < $mysql_galera_cluster_size_max_2m
+ delay: up 20s down 5m multiplier 1.5 max 1h
+ info: cluster size has changed
+ to: dba
+
+# galera node state
+
+template: mysql_galera_cluster_state
+ on: mysql.galera_cluster_state
+ calc: $state
+ every: 10s
+ warn: $this < 4
+ crit: $this < 2
+ delay: up 30s down 5m multiplier 1.5 max 1h
+ info: node state (0: undefined, 1: joining, 2: donor/desynced, 3: joined, 4: synced)
+ to: dba
+
+
+# galera node status
+
+template: mysql_galera_cluster_status
+ on: mysql.galera_cluster_status
+ calc: $wsrep_cluster_status
+ every: 10s
+ crit: $mysql_galera_cluster_state != nan AND $this != 0
+ delay: up 30s down 5m multiplier 1.5 max 1h
+ info: node and cluster status (-1: unknown, 0: primary/quorum present, 1: non-primary/quorum lost, 2: disconnected)
+ to: dba
diff --git a/health/health.d/named.conf b/health/health.d/named.conf
new file mode 100644
index 0000000..4fc65c8
--- /dev/null
+++ b/health/health.d/named.conf
@@ -0,0 +1,14 @@
+
+# make sure named is running
+
+template: named_last_collected_secs
+ on: named.global_queries
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: domainadmin
+
diff --git a/health/health.d/net.conf b/health/health.d/net.conf
new file mode 100644
index 0000000..261290e
--- /dev/null
+++ b/health/health.d/net.conf
@@ -0,0 +1,195 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------------------------------------
+# net traffic overflow
+
+ template: interface_speed
+ on: net.net
+ os: *
+ hosts: *
+ families: *
+ calc: ( $nic_speed_max > 0 ) ? ( $nic_speed_max) : ( nan )
+ units: Mbit
+ every: 10s
+ info: The current speed of the physical network interface
+
+ template: 1m_received_traffic_overflow
+ on: net.net
+ os: linux
+ hosts: *
+ families: *
+ lookup: average -1m unaligned absolute of received
+ calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan )
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (90))
+ delay: down 1m multiplier 1.5 max 1h
+ info: interface received bandwidth usage over net device speed max
+ to: sysadmin
+
+ template: 1m_sent_traffic_overflow
+ on: net.net
+ os: linux
+ hosts: *
+ families: *
+ lookup: average -1m unaligned absolute of sent
+ calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan )
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (90))
+ delay: down 1m multiplier 1.5 max 1h
+ info: interface sent bandwidth usage over net device speed max
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# dropped packets
+
+# check if an interface is dropping packets
+# the alarm is checked every 1 minute
+# and examines the last 10 minutes of data
+#
+# it is possible to have expected packet drops on an interface for some network configurations
+# look at the Monitoring Network Interfaces section in the proc.plugin documentation for more information
+
+template: inbound_packets_dropped
+ on: net.drops
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of inbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: outbound_packets_dropped
+ on: net.drops
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of outbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: inbound_packets_dropped_ratio
+ on: net.packets
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of received
+ calc: (($inbound_packets_dropped != nan AND $this > 0) ? ($inbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+template: outbound_packets_dropped_ratio
+ on: net.packets
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of sent
+ calc: (($outbound_packets_dropped != nan AND $this > 0) ? ($outbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# interface errors
+
+template: interface_inbound_errors
+ on: net.errors
+ os: freebsd
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of inbound
+ units: errors
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound errors in the last 10 minutes
+ to: sysadmin
+
+template: interface_outbound_errors
+ on: net.errors
+ os: freebsd
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute of outbound
+ units: errors
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound errors in the last 10 minutes
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# FIFO errors
+
+# check if an interface is having FIFO
+# buffer errors
+# the alarm is checked every 1 minute
+# and examines the last 10 minutes of data
+
+template: 10min_fifo_errors
+ on: net.fifo
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute
+ units: errors
+ every: 1m
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface fifo errors in the last 10 minutes
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# check for packet storms
+
+# 1. calculate the rate packets are received in 1m: 1m_received_packets_rate
+# 2. do the same for the last 10s
+# 3. raise an alarm if the later is 10x or 20x the first
+# we assume the minimum packet storm should at least have
+# 10000 packets/s, average of the last 10 seconds
+
+template: 1m_received_packets_rate
+ on: net.packets
+ os: linux freebsd
+ hosts: *
+families: *
+ lookup: average -1m unaligned of received
+ units: packets
+ every: 10s
+ info: the average number of packets received during the last minute
+
+template: 10s_received_packets_storm
+ on: net.packets
+ os: linux freebsd
+ hosts: *
+families: *
+ lookup: average -10s unaligned of received
+ calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate))
+ every: 10s
+ units: %
+ warn: $this > (($status >= $WARNING)?(200):(5000))
+ crit: $this > (($status >= $WARNING)?(5000):(6000))
+ options: no-clear-notification
+ info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute (clear notification for this alarm will not be sent)
+ to: sysadmin
diff --git a/health/health.d/netfilter.conf b/health/health.d/netfilter.conf
new file mode 100644
index 0000000..1d07752
--- /dev/null
+++ b/health/health.d/netfilter.conf
@@ -0,0 +1,29 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: netfilter_last_collected_secs
+ on: netfilter.conntrack_sockets
+ os: linux
+ hosts: *
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+ alarm: netfilter_conntrack_full
+ on: netfilter.conntrack_sockets
+ os: linux
+ hosts: *
+ lookup: max -10s unaligned of connections
+ calc: $this * 100 / $netfilter_conntrack_max
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (70) : (80))
+ crit: $this > (($status == $CRITICAL) ? (80) : (90))
+ delay: down 5m multiplier 1.5 max 1h
+ info: the number of connections tracked by the netfilter connection tracker, as a percentage of the connection tracker table size
+ to: sysadmin
diff --git a/health/health.d/nginx.conf b/health/health.d/nginx.conf
new file mode 100644
index 0000000..a686c3d
--- /dev/null
+++ b/health/health.d/nginx.conf
@@ -0,0 +1,14 @@
+
+# make sure nginx is running
+
+template: nginx_last_collected_secs
+ on: nginx.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.d/nginx_plus.conf b/health/health.d/nginx_plus.conf
new file mode 100644
index 0000000..5a171a7
--- /dev/null
+++ b/health/health.d/nginx_plus.conf
@@ -0,0 +1,14 @@
+
+# make sure nginx_plus is running
+
+template: nginx_plus_last_collected_secs
+ on: nginx_plus.requests_total
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.d/phpfpm.conf b/health/health.d/phpfpm.conf
new file mode 100644
index 0000000..ec7ae74
--- /dev/null
+++ b/health/health.d/phpfpm.conf
@@ -0,0 +1,14 @@
+
+# make sure phpfpm is running
+
+template: phpfpm_last_collected_secs
+ on: phpfpm.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.d/pihole.conf b/health/health.d/pihole.conf
new file mode 100644
index 0000000..b255d35
--- /dev/null
+++ b/health/health.d/pihole.conf
@@ -0,0 +1,65 @@
+
+# Make sure Pi-hole is responding.
+
+template: pihole_last_collected_secs
+ on: pihole.dns_queries_total
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+# Blocked DNS queries.
+
+template: pihole_blocked_queries
+ on: pihole.dns_queries_percentage
+ every: 10s
+ units: %
+ calc: $blocked
+ warn: $this > ( ($status >= $WARNING ) ? ( 45 ) : ( 55 ) )
+ crit: $this > ( ($status >= $CRITICAL) ? ( 55 ) : ( 75 ) )
+ delay: up 2m down 5m
+ info: percentage of blocked dns queries for the last 24 hour
+ to: sysadmin
+
+
+# Blocklist last update time.
+# Default update interval is a week.
+
+template: pihole_blocklist_last_update
+ on: pihole.blocklist_last_update
+ every: 10s
+ units: seconds
+ calc: $ago
+ warn: $this > 60 * 60 * 24 * 8
+ crit: $this > 60 * 60 * 24 * 8 * 2
+ info: blocklist last update time
+ to: sysadmin
+
+# Gravity file check (gravity.list).
+
+template: pihole_blocklist_gravity_file
+ on: pihole.blocklist_last_update
+ every: 10s
+ units: boolean
+ calc: $file_exists
+ crit: $this != 1
+ delay: up 2m down 5m
+ info: gravity file existence
+ to: sysadmin
+
+# Pi-hole's ability to block unwanted domains.
+# Should be enabled. The whole point of Pi-hole!
+
+template: pihole_status
+ on: pihole.unwanted_domains_blocking_status
+ every: 10s
+ units: boolean
+ calc: $enabled
+ warn: $this != 1
+ delay: up 2m down 5m
+ info: unwanted domains blocking status
+ to: sysadmin
diff --git a/health/health.d/portcheck.conf b/health/health.d/portcheck.conf
new file mode 100644
index 0000000..696333f
--- /dev/null
+++ b/health/health.d/portcheck.conf
@@ -0,0 +1,46 @@
+template: portcheck_last_collected_secs
+families: *
+ on: portcheck.status
+ calc: $now - $last_collected_t
+ every: 10s
+ units: seconds ago
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges
+template: service_reachable
+families: *
+ on: portcheck.status
+ lookup: average -1m unaligned percentage of success
+ calc: ($this < 75) ? (0) : ($this)
+ every: 5s
+ units: up/down
+ info: at least 75% successful connections during last 60 seconds, ideal for badges
+ to: silent
+
+template: connection_timeouts
+families: *
+ on: portcheck.status
+ lookup: average -5m unaligned percentage of timeout
+ every: 10s
+ units: %
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: average of timeouts during the last 5 minutes
+ to: sysadmin
+
+template: connection_fails
+families: *
+ on: portcheck.status
+ lookup: average -5m unaligned percentage of no_connection,failed
+ every: 10s
+ units: %
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: average of failed connections during the last 5 minutes
+ to: sysadmin
diff --git a/health/health.d/postgres.conf b/health/health.d/postgres.conf
new file mode 100644
index 0000000..4e0583b
--- /dev/null
+++ b/health/health.d/postgres.conf
@@ -0,0 +1,13 @@
+
+# make sure postgres is running
+
+template: postgres_last_collected_secs
+ on: postgres.db_stat_transactions
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
diff --git a/health/health.d/processes.conf b/health/health.d/processes.conf
new file mode 100644
index 0000000..293f1aa
--- /dev/null
+++ b/health/health.d/processes.conf
@@ -0,0 +1,13 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: active_processes
+ on: system.active_processes
+ hosts: *
+ calc: $active * 100 / $pidmax
+ units: %
+ every: 5s
+ warn: $this > (($status >= $WARNING) ? (75) : (80))
+ crit: $this > (($status == $CRITICAL) ? (85) : (90))
+ delay: down 5m multiplier 1.5 max 1h
+ info: the percentage of active processes
+ to: sysadmin
diff --git a/health/health.d/pulsar.conf b/health/health.d/pulsar.conf
new file mode 100644
index 0000000..0147894
--- /dev/null
+++ b/health/health.d/pulsar.conf
@@ -0,0 +1,13 @@
+
+# Availability
+
+template: pulsar_last_collected_secs
+ on: pulsar.broker_components
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
diff --git a/health/health.d/qos.conf b/health/health.d/qos.conf
new file mode 100644
index 0000000..7290d15
--- /dev/null
+++ b/health/health.d/qos.conf
@@ -0,0 +1,18 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# check if a QoS class is dropping packets
+# the alarm is checked every 10 seconds
+# and examines the last minute of data
+
+#template: 10min_qos_packet_drops
+# on: tc.qos_dropped
+# os: linux
+# hosts: *
+# lookup: sum -10m unaligned absolute
+# every: 30s
+# warn: $this > 0
+# delay: up 0 down 30m multiplier 1.5 max 1h
+# units: packets
+# info: dropped packets in the last 30 minutes
+# to: sysadmin
diff --git a/health/health.d/ram.conf b/health/health.d/ram.conf
new file mode 100644
index 0000000..0a71dac
--- /dev/null
+++ b/health/health.d/ram.conf
@@ -0,0 +1,64 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: used_ram_to_ignore
+ on: system.ram
+ os: linux freebsd
+ hosts: *
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz - $zfs.arc_size.min)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
+ alarm: ram_in_use
+ on: system.ram
+ os: linux
+ hosts: *
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used + $cached + $free)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM used
+ to: sysadmin
+
+ alarm: ram_available
+ on: mem.available
+ os: linux
+ hosts: *
+ calc: ($avail + $system.ram.used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin
+
+## FreeBSD
+ alarm: ram_in_use
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM usage
+ to: sysadmin
+
+ alarm: ram_available
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin
diff --git a/health/health.d/redis.conf b/health/health.d/redis.conf
new file mode 100644
index 0000000..c08a884
--- /dev/null
+++ b/health/health.d/redis.conf
@@ -0,0 +1,34 @@
+
+# make sure redis is running
+
+template: redis_last_collected_secs
+ on: redis.operations
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
+
+template: redis_bgsave_broken
+families: *
+ on: redis.bgsave_health
+ every: 10s
+ crit: $rdb_last_bgsave_status != 0
+ units: ok/failed
+ info: states if redis bgsave is working
+ delay: down 5m multiplier 1.5 max 1h
+ to: dba
+
+template: redis_bgsave_slow
+families: *
+ on: redis.bgsave_now
+ every: 10s
+ warn: $rdb_bgsave_in_progress > 600
+ crit: $rdb_bgsave_in_progress > 1200
+ units: seconds
+ info: the time redis needs to save its database
+ delay: down 5m multiplier 1.5 max 1h
+ to: dba
diff --git a/health/health.d/retroshare.conf b/health/health.d/retroshare.conf
new file mode 100644
index 0000000..2344b60
--- /dev/null
+++ b/health/health.d/retroshare.conf
@@ -0,0 +1,25 @@
+# make sure RetroShare is running
+
+template: retroshare_last_collected_secs
+ on: retroshare.peers
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# make sure the DHT is fine when active
+
+template: retroshare_dht_working
+ on: retroshare.dht
+ calc: $dht_size_all
+ units: peers
+ every: 1m
+ warn: $this < (($status >= $WARNING) ? (120) : (100))
+ crit: $this < (($status == $CRITICAL) ? (10) : (1))
+ delay: up 0 down 15m multiplier 1.5 max 1h
+ info: Checks if the DHT has enough peers to operate
+ to: sysadmin
diff --git a/health/health.d/riakkv.conf b/health/health.d/riakkv.conf
new file mode 100644
index 0000000..7453027
--- /dev/null
+++ b/health/health.d/riakkv.conf
@@ -0,0 +1,80 @@
+# Ensure that Riak is running. template: riak_last_collected_secs
+template: riak_last_collected_secs
+ on: riak.kv.throughput
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
+
+# Warn if a list keys operation is running.
+template: riak_list_keys_active
+ on: riak.core.fsm_active
+ calc: $list_fsm_active
+ units: state machines
+ every: 10s
+ warn: $list_fsm_active > 0
+ info: number of currently running list keys finite state machines
+ to: dba
+
+
+## Timing healthchecks
+# KV GET
+template: 1h_kv_get_mean_latency
+ on: riak.kv.latency.get
+ calc: $node_get_fsm_time_mean
+ lookup: average -1h unaligned of time
+ every: 30s
+ units: ms
+ info: mean average KV GET latency over the last hour
+
+template: riak_kv_get_slow
+ on: riak.kv.latency.get
+ calc: $mean
+ lookup: average -3m unaligned of time
+ units: ms
+ every: 10s
+ warn: ($this > ($1h_kv_get_mean_latency * 2) )
+ crit: ($this > ($1h_kv_get_mean_latency * 3) )
+ info: average KV GET time over the last 3 minutes, compared to the average over the last hour
+ delay: down 5m multiplier 1.5 max 1h
+ to: dba
+
+# KV PUT
+template: 1h_kv_put_mean_latency
+ on: riak.kv.latency.put
+ calc: $node_put_fsm_time_mean
+ lookup: average -1h unaligned of time
+ every: 30s
+ units: ms
+ info: mean average KV PUT latency over the last hour
+
+template: riak_kv_put_slow
+ on: riak.kv.latency.put
+ calc: $mean
+ lookup: average -3m unaligned of time
+ units: ms
+ every: 10s
+ warn: ($this > ($1h_kv_put_mean_latency * 2) )
+ crit: ($this > ($1h_kv_put_mean_latency * 3) )
+ info: average KV PUT time over the last 3 minutes, compared to the average over the last hour
+ delay: down 5m multiplier 1.5 max 1h
+ to: dba
+
+
+## VM healthchecks
+
+# Default Erlang VM process limit: 262144
+# On systems observed, this is < 2000, but may grow depending on load.
+template: riak_vm_high_process_count
+ on: riak.vm
+ calc: $sys_process_count
+ units: processes
+ every: 10s
+ warn: $this > 10000
+ crit: $this > 100000
+ info: number of processes running in the Erlang VM (the default limit on ERTS 10.2.4 is 262144)
+ to: dba
diff --git a/health/health.d/scaleio.conf b/health/health.d/scaleio.conf
new file mode 100644
index 0000000..1a3088a
--- /dev/null
+++ b/health/health.d/scaleio.conf
@@ -0,0 +1,38 @@
+
+# make sure scaleio is running
+
+template: scaleio_last_collected_secs
+ on: scaleio.system_capacity_total
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# make sure Storage Pool capacity utilization is under limit
+
+template: scaleio_storage_pool_capacity_utilization
+ on: scaleio.storage_pool_capacity_utilization
+ calc: $used
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: Storage Pool capacity utilization
+ to: sysadmin
+
+
+# make sure Sdc is connected to MDM
+
+template: scaleio_sdc_mdm_connection_state
+ on: scaleio.sdc_mdm_connection_state
+ calc: $connected
+ every: 10s
+ warn: $this != 1
+ delay: up 30s down 5m multiplier 1.5 max 1h
+ info: Sdc connection to MDM state
+ to: sysadmin
diff --git a/health/health.d/softnet.conf b/health/health.d/softnet.conf
new file mode 100644
index 0000000..f835f2a
--- /dev/null
+++ b/health/health.d/softnet.conf
@@ -0,0 +1,40 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# check for common /proc/net/softnet_stat errors
+
+ alarm: 1min_netdev_backlog_exceeded
+ on: system.softnet_stat
+ os: linux
+ hosts: *
+ lookup: average -1m unaligned absolute of dropped
+ units: packets
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (0) : (10))
+ delay: down 1h multiplier 1.5 max 2h
+ info: average number of packets dropped in the last 1min, because sysctl net.core.netdev_max_backlog was exceeded (this can be a cause for dropped packets)
+ to: sysadmin
+
+ alarm: 1min_netdev_budget_ran_outs
+ on: system.softnet_stat
+ os: linux
+ hosts: *
+ lookup: average -1m unaligned absolute of squeezed
+ units: events
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (0) : (10))
+ delay: down 1h multiplier 1.5 max 2h
+ info: average number of times, during the last 1min, ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs, with work remaining (this can be a cause for dropped packets)
+ to: silent
+
+ alarm: 10min_netisr_backlog_exceeded
+ on: system.softnet_stat
+ os: freebsd
+ hosts: *
+ lookup: average -1m unaligned absolute of qdrops
+ units: packets
+ every: 10s
+ warn: $this > (($status >+ $WARNING) ? (0) : (10))
+ delay: down 1h multiplier 1.5 max 2h
+ info: average number of drops in the last 1min, because sysctl net.route.netisr_maxqlen was exceeded (this can be a cause for dropped packets)
+ to: sysadmin
diff --git a/health/health.d/squid.conf b/health/health.d/squid.conf
new file mode 100644
index 0000000..06cc967
--- /dev/null
+++ b/health/health.d/squid.conf
@@ -0,0 +1,14 @@
+
+# make sure squid is running
+
+template: squid_last_collected_secs
+ on: squid.clients_requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: proxyadmin
+
diff --git a/health/health.d/stiebeleltron.conf b/health/health.d/stiebeleltron.conf
new file mode 100644
index 0000000..e0361eb
--- /dev/null
+++ b/health/health.d/stiebeleltron.conf
@@ -0,0 +1,11 @@
+template: stiebeleltron_last_collected_secs
+families: *
+ on: stiebeleltron.heating.hc1
+ calc: $now - $last_collected_t
+ every: 10s
+ units: seconds ago
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sitemgr
diff --git a/health/health.d/swap.conf b/health/health.d/swap.conf
new file mode 100644
index 0000000..f920b08
--- /dev/null
+++ b/health/health.d/swap.conf
@@ -0,0 +1,43 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: 30min_ram_swapped_out
+ on: system.swapio
+ os: linux freebsd
+ hosts: *
+ lookup: sum -30m unaligned absolute of out
+ # we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024
+ calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
+ units: % of RAM
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (10) : (20))
+ crit: $this > (($status == $CRITICAL) ? (20) : (30))
+ delay: up 0 down 15m multiplier 1.5 max 1h
+ info: the amount of memory swapped in the last 30 minutes, as a percentage of the system RAM
+ to: sysadmin
+
+ alarm: ram_in_swap
+ on: system.swap
+ os: linux
+ hosts: *
+ calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
+ units: % of RAM
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (15) : (20))
+ crit: $this > (($status == $CRITICAL) ? (40) : (50))
+ delay: up 30s down 15m multiplier 1.5 max 1h
+ info: the swap memory used, as a percentage of the system RAM
+ to: sysadmin
+
+ alarm: used_swap
+ on: system.swap
+ os: linux freebsd
+ hosts: *
+ calc: $used * 100 / ( $used + $free )
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: up 30s down 15m multiplier 1.5 max 1h
+ info: the percentage of swap memory used
+ to: sysadmin
diff --git a/health/health.d/tcp_conn.conf b/health/health.d/tcp_conn.conf
new file mode 100644
index 0000000..7aa9a98
--- /dev/null
+++ b/health/health.d/tcp_conn.conf
@@ -0,0 +1,19 @@
+
+#
+# ${tcp_max_connections} may be nan or -1 if the system
+# supports dynamic threshold for TCP connections.
+# In this case, the alarm will always be zero.
+#
+
+ alarm: tcp_connections
+ on: ipv4.tcpsock
+ os: linux
+ hosts: *
+ calc: (${tcp_max_connections} > 0) ? ( ${connections} * 100 / ${tcp_max_connections} ) : 0
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING ) ? ( 60 ) : ( 80 ))
+ crit: $this > (($status >= $CRITICAL) ? ( 80 ) : ( 90 ))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: the percentage of IPv4 TCP connections over the max allowed
+ to: sysadmin
diff --git a/health/health.d/tcp_listen.conf b/health/health.d/tcp_listen.conf
new file mode 100644
index 0000000..3b30725
--- /dev/null
+++ b/health/health.d/tcp_listen.conf
@@ -0,0 +1,83 @@
+#
+# There are two queues involved when incoming TCP connections are handled
+# (both at the kernel):
+#
+# SYN queue
+# The SYN queue tracks TCP handshakes until connections are fully established.
+# It overflows when too many incoming TCP connection requests hang in the
+# half-open state and the server is not configured to fall back to SYN cookies.
+# Overflows are usually caused by SYN flood DoS attacks (i.e. someone sends
+# lots of SYN packets and never completes the handshakes).
+#
+# Accept queue
+# The accept queue holds fully established TCP connections waiting to be handled
+# by the listening application. It overflows when the server application fails
+# to accept new connections at the rate they are coming in.
+#
+#
+# -----------------------------------------------------------------------------
+# tcp accept queue (at the kernel)
+
+ alarm: 1m_tcp_accept_queue_overflows
+ on: ip.tcp_accept_queue
+ os: linux
+ hosts: *
+ lookup: average -60s unaligned absolute of ListenOverflows
+ units: overflows
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (1) : (5))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: the average number of times the TCP accept queue of the kernel overflown, during the last minute
+ to: sysadmin
+
+# THIS IS TOO GENERIC
+# CHECK: https://github.com/netdata/netdata/issues/3234#issuecomment-423935842
+ alarm: 1m_tcp_accept_queue_drops
+ on: ip.tcp_accept_queue
+ os: linux
+ hosts: *
+ lookup: average -60s unaligned absolute of ListenDrops
+ units: drops
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (1) : (5))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: the average number of times the TCP accept queue of the kernel dropped packets, during the last minute (includes bogus packets received)
+ to: sysadmin
+
+
+# -----------------------------------------------------------------------------
+# tcp SYN queue (at the kernel)
+
+# When the SYN queue is full, either TcpExtTCPReqQFullDoCookies or
+# TcpExtTCPReqQFullDrop is incremented, depending on whether SYN cookies are
+# enabled or not. In both cases this probably indicates a SYN flood attack,
+# so i guess a notification should be sent.
+
+ alarm: 1m_tcp_syn_queue_drops
+ on: ip.tcp_syn_queue
+ os: linux
+ hosts: *
+ lookup: average -60s unaligned absolute of TCPReqQFullDrop
+ units: drops
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (0) : (5))
+ delay: up 10 down 5m multiplier 1.5 max 1h
+ info: the number of times the TCP SYN queue of the kernel was full and dropped packets, during the last minute
+ to: sysadmin
+
+ alarm: 1m_tcp_syn_queue_cookies
+ on: ip.tcp_syn_queue
+ os: linux
+ hosts: *
+ lookup: average -60s unaligned absolute of TCPReqQFullDoCookies
+ units: cookies
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (0) : (5))
+ delay: up 10 down 5m multiplier 1.5 max 1h
+ info: the number of times the TCP SYN queue of the kernel was full and sent SYN cookies, during the last minute
+ to: sysadmin
+
diff --git a/health/health.d/tcp_mem.conf b/health/health.d/tcp_mem.conf
new file mode 100644
index 0000000..6927d57
--- /dev/null
+++ b/health/health.d/tcp_mem.conf
@@ -0,0 +1,20 @@
+#
+# check
+# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html
+#
+# We give a warning when TCP is under memory pressure
+# and a critical when TCP is 90% of its upper memory limit
+#
+
+ alarm: tcp_memory
+ on: ipv4.sockstat_tcp_mem
+ os: linux
+ hosts: *
+ calc: ${mem} * 100 / ${tcp_mem_high}
+ units: %
+ every: 10s
+ warn: ${mem} > (($status >= $WARNING ) ? ( ${tcp_mem_pressure} * 0.8 ) : ( ${tcp_mem_pressure} ))
+ crit: ${mem} > (($status >= $CRITICAL ) ? ( ${tcp_mem_pressure} ) : ( ${tcp_mem_high} * 0.9 ))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: the amount of TCP memory as a percentage of its max memory limit
+ to: sysadmin
diff --git a/health/health.d/tcp_orphans.conf b/health/health.d/tcp_orphans.conf
new file mode 100644
index 0000000..280d659
--- /dev/null
+++ b/health/health.d/tcp_orphans.conf
@@ -0,0 +1,21 @@
+
+#
+# check
+# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html
+#
+# The kernel may penalize orphans by 2x or even 4x
+# so we alarm warning at 25% and critical at 50%
+#
+
+ alarm: tcp_orphans
+ on: ipv4.sockstat_tcp_sockets
+ os: linux
+ hosts: *
+ calc: ${orphan} * 100 / ${tcp_max_orphans}
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING ) ? ( 20 ) : ( 25 ))
+ crit: $this > (($status >= $CRITICAL) ? ( 25 ) : ( 50 ))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: the percentage of orphan IPv4 TCP sockets over the max allowed (this may lead to too-many-orphans errors)
+ to: sysadmin
diff --git a/health/health.d/tcp_resets.conf b/health/health.d/tcp_resets.conf
new file mode 100644
index 0000000..36a550a
--- /dev/null
+++ b/health/health.d/tcp_resets.conf
@@ -0,0 +1,67 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------------------------------------
+
+ alarm: ipv4_tcphandshake_last_collected_secs
+ on: ipv4.tcphandshake
+ os: linux freebsd
+ hosts: *
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# tcp resets this host sends
+
+ alarm: 1m_ipv4_tcp_resets_sent
+ on: ipv4.tcphandshake
+ os: linux
+ hosts: *
+ lookup: average -1m at -10s unaligned absolute of OutRsts
+ units: tcp resets/s
+ every: 10s
+ info: average TCP RESETS this host is sending, over the last minute
+
+ alarm: 10s_ipv4_tcp_resets_sent
+ on: ipv4.tcphandshake
+ os: linux
+ hosts: *
+ lookup: average -10s unaligned absolute of OutRsts
+ units: tcp resets/s
+ every: 10s
+ warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (20)))
+ delay: up 20s down 60m multiplier 1.2 max 2h
+ options: no-clear-notification
+ info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed; clear notification for this alarm will not be sent)
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# tcp resets this host receives
+
+ alarm: 1m_ipv4_tcp_resets_received
+ on: ipv4.tcphandshake
+ os: linux freebsd
+ hosts: *
+ lookup: average -1m at -10s unaligned absolute of AttemptFails
+ units: tcp resets/s
+ every: 10s
+ info: average TCP RESETS this host is sending, over the last minute
+
+ alarm: 10s_ipv4_tcp_resets_received
+ on: ipv4.tcphandshake
+ os: linux freebsd
+ hosts: *
+ lookup: average -10s unaligned absolute of AttemptFails
+ units: tcp resets/s
+ every: 10s
+ warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (10)))
+ delay: up 20s down 60m multiplier 1.2 max 2h
+ options: no-clear-notification
+ info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed; clear notification for this alarm will not be sent)
+ to: sysadmin
diff --git a/health/health.d/udp_errors.conf b/health/health.d/udp_errors.conf
new file mode 100644
index 0000000..1e47b5c
--- /dev/null
+++ b/health/health.d/udp_errors.conf
@@ -0,0 +1,49 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------------------------------------
+
+ alarm: ipv4_udperrors_last_collected_secs
+ on: ipv4.udperrors
+ os: linux freebsd
+ hosts: *
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: up 0 down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# UDP receive buffer errors
+
+ alarm: 1m_ipv4_udp_receive_buffer_errors
+ on: ipv4.udperrors
+ os: linux freebsd
+ hosts: *
+ lookup: average -1m unaligned absolute of RcvbufErrors
+ units: errors
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (0) : (10))
+ info: average number of UDP receive buffer errors during the last minute
+ delay: up 0 down 60m multiplier 1.2 max 2h
+ to: sysadmin
+
+# -----------------------------------------------------------------------------
+# UDP send buffer errors
+
+ alarm: 1m_ipv4_udp_send_buffer_errors
+ on: ipv4.udperrors
+ os: linux
+ hosts: *
+ lookup: average -1m unaligned absolute of SndbufErrors
+ units: errors
+ every: 10s
+ warn: $this > 1
+ crit: $this > (($status == $CRITICAL) ? (0) : (10))
+ info: number of UDP send buffer errors during the last minute
+ delay: up 0 down 60m multiplier 1.2 max 2h
+ to: sysadmin
diff --git a/health/health.d/unbound.conf b/health/health.d/unbound.conf
new file mode 100644
index 0000000..bdedc11
--- /dev/null
+++ b/health/health.d/unbound.conf
@@ -0,0 +1,35 @@
+
+# make sure unbound is running
+
+template: unbound_last_collected_secs
+ on: unbound.queries
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# make sure there is no overwritten/dropped queries in the request-list
+
+template: unbound_request_list_overwritten
+ on: unbound.request_list_jostle_list
+ lookup: average -60s unaligned absolute match-names of overwritten
+ units: queries
+ every: 10s
+ warn: $this > 5
+ delay: up 10 down 5m multiplier 1.5 max 1h
+ info: the number of overwritten queries in the request-list
+ to: sysadmin
+
+template: unbound_request_list_dropped
+ on: unbound.request_list_jostle_list
+ lookup: average -60s unaligned absolute match-names of dropped
+ units: queries
+ every: 10s
+ warn: $this > 0
+ delay: up 10 down 5m multiplier 1.5 max 1h
+ info: the number of dropped queries in the request-list
+ to: sysadmin
diff --git a/health/health.d/varnish.conf b/health/health.d/varnish.conf
new file mode 100644
index 0000000..cca7446
--- /dev/null
+++ b/health/health.d/varnish.conf
@@ -0,0 +1,9 @@
+ alarm: varnish_last_collected
+ on: varnish.uptime
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ info: number of seconds since the last successful data collection
+ to: sysadmin
diff --git a/health/health.d/vcsa.conf b/health/health.d/vcsa.conf
new file mode 100644
index 0000000..7bb98a9
--- /dev/null
+++ b/health/health.d/vcsa.conf
@@ -0,0 +1,122 @@
+
+# make sure vcsa is running and responding
+
+template: vcsa_last_collected_secs
+ on: vcsa.system_health
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# Overall system health:
+# - 0: all components are healthy.
+# - 1: one or more components might become overloaded soon.
+# - 2: one or more components in the appliance might be degraded.
+# - 3: one or more components might be in an unusable status and the appliance might become unresponsive soon.
+# - 4: no health data is available.
+
+template: vcsa_system_health
+ on: vcsa.system_health
+ lookup: max -10s unaligned of system
+ units: status
+ every: 10s
+ warn: ($this == 1) || ($this == 2)
+ crit: $this == 3
+ delay: down 1m multiplier 1.5 max 1h
+ info: overall system health status
+ to: sysadmin
+
+# Components health:
+# - 0: healthy.
+# - 1: healthy, but may have some problems.
+# - 2: degraded, and may have serious problems.
+# - 3: unavailable, or will stop functioning soon.
+# - 4: no health data is available.
+
+template: vcsa_swap_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of swap
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: swap health status
+ to: sysadmin
+
+template: vcsa_storage_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of storage
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: storage health status
+ to: sysadmin
+
+template: vcsa_mem_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of mem
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: mem health status
+ to: sysadmin
+
+template: vcsa_load_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of load
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: load health status
+ to: sysadmin
+
+template: vcsa_database_storage_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of database_storage
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: database storage health status
+ to: sysadmin
+
+template: vcsa_applmgmt_health
+ on: vcsa.components_health
+ lookup: max -10s unaligned of applmgmt
+ units: status
+ every: 10s
+ warn: $this == 1
+ crit: ($this == 2) || ($this == 3)
+ delay: down 1m multiplier 1.5 max 1h
+ info: appl mgmt health status
+ to: sysadmin
+
+
+# Software updates health:
+# - 0: no updates available.
+# - 2: non-security updates are available.
+# - 3: security updates are available.
+# - 4: an error retrieving information on software updates.
+
+template: vcsa_software_updates_health
+ on: vcsa.software_updates_health
+ lookup: max -10s unaligned of software_packages
+ units: status
+ every: 10s
+ warn: $this == 4
+ crit: $this == 3
+ delay: down 1m multiplier 1.5 max 1h
+ info: software packages health status
+ to: sysadmin
diff --git a/health/health.d/vernemq.conf b/health/health.d/vernemq.conf
new file mode 100644
index 0000000..36bbaf8
--- /dev/null
+++ b/health/health.d/vernemq.conf
@@ -0,0 +1,399 @@
+
+# Availability
+
+template: vernemq_last_collected_secs
+ on: vernemq.node_uptime
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+# Socket errors
+
+template: vernemq_socket_errors
+ on: vernemq.socket_errors
+ lookup: sum -1m unaligned absolute of socket_error
+ units: errors
+ every: 10s
+ warn: $this > (($status == $WARNING) ? (0) : (5))
+ delay: down 5m multiplier 1.5 max 2h
+ info: socket errors in the last minute
+ to: sysadmin
+
+# Queues dropped/expired/unhandled PUBLISH messages
+
+template: vernemq_queue_message_drop
+ on: vernemq.queue_undelivered_messages
+ lookup: sum -1m unaligned absolute of queue_message_drop
+ units: dropped messages
+ every: 10s
+ warn: $this > (($status == $WARNING) ? (0) : (5))
+ delay: down 5m multiplier 1.5 max 2h
+ info: dropped messaged due to full queues in the last minute
+ to: sysadmin
+
+template: vernemq_queue_message_expired
+ on: vernemq.queue_undelivered_messages
+ lookup: sum -1m unaligned absolute of queue_message_expired
+ units: expired messages
+ every: 10s
+ warn: $this > (($status == $WARNING) ? (0) : (15))
+ delay: down 5m multiplier 1.5 max 2h
+ info: messages which expired before delivery in the last minute
+ to: sysadmin
+
+template: vernemq_queue_message_unhandled
+ on: vernemq.queue_undelivered_messages
+ lookup: sum -1m unaligned absolute of queue_message_unhandled
+ units: unhandled messages
+ every: 10s
+ warn: $this > (($status == $WARNING) ? (0) : (5))
+ delay: down 5m multiplier 1.5 max 2h
+ info: unhandled messages (connections with clean session=true) in the last minute
+ to: sysadmin
+
+# Erlang VM
+
+template: vernemq_average_scheduler_utilization
+ on: vernemq.average_scheduler_utilization
+ lookup: average -10m unaligned
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: average scheduler utilization for the last 10 minutes
+ to: sysadmin
+
+# Cluster communication and netsplits
+
+template: vernemq_cluster_dropped
+ on: vernemq.cluster_dropped
+ lookup: average -1m unaligned
+ units: KiB/s
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: the amount of traffic dropped during communication with the cluster nodes in the last minute
+ to: sysadmin
+
+template: vernemq_netsplits
+ on: vernemq.netsplits
+ lookup: sum -1m unaligned absolute of netsplit_detected
+ units: netsplits
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: detected netsplits in the last minute
+ to: sysadmin
+
+# Unsuccessful CONNACK
+
+template: vernemq_mqtt_connack_sent_reason_success
+ on: vernemq.mqtt_connack_sent_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v3/v5 CONNACK sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_connack_sent_reason_unsuccessful
+ on: vernemq.mqtt_connack_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_connack_sent_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v3/v5 CONNACK sent in the last minute
+ to: sysadmin
+
+# Not normal DISCONNECT
+
+template: vernemq_mqtt_disconnect_received_reason_normal_disconnect
+ on: vernemq.mqtt_disconnect_received_reason
+ lookup: sum -1m unaligned absolute match-names of normal_disconnect
+ units: packets
+ every: 10s
+ info: normal v5 DISCONNECT received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_disconnect_sent_reason_normal_disconnect
+ on: vernemq.mqtt_disconnect_sent_reason
+ lookup: sum -1m unaligned absolute match-names of normal_disconnect
+ units: packets
+ every: 10s
+ info: normal v5 DISCONNECT sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_disconnect_received_reason_not_normal
+ on: vernemq.mqtt_disconnect_received_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_disconnect_received_reason_normal_disconnect
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: not normal v5 DISCONNECT received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_disconnect_sent_reason_not_normal
+ on: vernemq.mqtt_disconnect_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_disconnect_sent_reason_normal_disconnect
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: not normal v5 DISCONNECT sent in the last minute
+ to: sysadmin
+
+# SUBSCRIBE errors and unauthorized attempts
+
+template: vernemq_mqtt_subscribe_error
+ on: vernemq.mqtt_subscribe_error
+ lookup: sum -1m unaligned absolute
+ units: failed ops
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: failed v3/v5 SUBSCRIBE operations in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_subscribe_auth_error
+ on: vernemq.mqtt_subscribe_auth_error
+ lookup: sum -1m unaligned absolute
+ units: attempts
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unauthorized v3/v5 SUBSCRIBE attempts in the last minute
+ to: sysadmin
+
+# UNSUBSCRIBE errors
+
+template: vernemq_mqtt_unsubscribe_error
+ on: vernemq.mqtt_unsubscribe_error
+ lookup: sum -1m unaligned absolute
+ units: failed ops
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: failed v3/v5 UNSUBSCRIBE operations in the last minute
+ to: sysadmin
+
+# PUBLISH errors and unauthorized attempts
+
+template: vernemq_mqtt_publish_errors
+ on: vernemq.mqtt_publish_errors
+ lookup: sum -1m unaligned absolute
+ units: failed ops
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: failed v3/v5 PUBLISH operations in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_publish_auth_errors
+ on: vernemq.mqtt_publish_auth_errors
+ lookup: sum -1m unaligned absolute
+ units: attempts
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unauthorized v3/v5 PUBLISH attempts in the last minute
+ to: sysadmin
+
+# Unsuccessful and unexpected PUBACK
+
+template: vernemq_mqtt_puback_received_reason_success
+ on: vernemq.mqtt_puback_received_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBACK received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_puback_sent_reason_success
+ on: vernemq.mqtt_puback_sent_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBACK sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_puback_received_reason_unsuccessful
+ on: vernemq.mqtt_puback_received_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_puback_received_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBACK received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_puback_sent_reason_unsuccessful
+ on: vernemq.mqtt_puback_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_puback_sent_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBACK sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_puback_unexpected
+ on: vernemq.mqtt_puback_invalid_error
+ lookup: sum -1m unaligned absolute
+ units: messages
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unexpected v3/v5 PUBACK received in the last minute
+ to: sysadmin
+
+# Unsuccessful and unexpected PUBREC
+
+template: vernemq_mqtt_pubrec_received_reason_success
+ on: vernemq.mqtt_pubrec_received_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBREC received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrec_sent_reason_success
+ on: vernemq.mqtt_pubrec_sent_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBREC sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrec_received_reason_unsuccessful
+ on: vernemq.mqtt_pubrec_received_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubrec_received_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBREC received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrec_sent_reason_unsuccessful
+ on: vernemq.mqtt_pubrec_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubrec_sent_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBREC sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrec_invalid_error
+ on: vernemq.mqtt_pubrec_invalid_error
+ lookup: sum -1m unaligned absolute
+ units: messages
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unexpected v3 PUBREC received in the last minute
+ to: sysadmin
+
+# Unsuccessful PUBREL
+
+template: vernemq_mqtt_pubrel_received_reason_success
+ on: vernemq.mqtt_pubrel_received_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBREL received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrel_sent_reason_success
+ on: vernemq.mqtt_pubrel_sent_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBREL sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrel_received_reason_unsuccessful
+ on: vernemq.mqtt_pubrel_received_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubrel_received_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBREL received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubrel_sent_reason_unsuccessful
+ on: vernemq.mqtt_pubrel_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubrel_sent_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBREL sent in the last minute
+ to: sysadmin
+
+# Unsuccessful and unexpected PUBCOMP
+
+template: vernemq_mqtt_pubcomp_received_reason_success
+ on: vernemq.mqtt_pubcomp_received_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBCOMP received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubcomp_sent_reason_success
+ on: vernemq.mqtt_pubcomp_sent_reason
+ lookup: sum -1m unaligned absolute match-names of success
+ units: packets
+ every: 10s
+ info: successful v5 PUBCOMP sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubcomp_received_reason_unsuccessful
+ on: vernemq.mqtt_pubcomp_received_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubcomp_received_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBCOMP received in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubcomp_sent_reason_unsuccessful
+ on: vernemq.mqtt_pubcomp_sent_reason
+ lookup: sum -1m unaligned absolute
+ calc: $this - $vernemq_mqtt_pubcomp_sent_reason_success
+ units: packets
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unsuccessful v5 PUBCOMP sent in the last minute
+ to: sysadmin
+
+template: vernemq_mqtt_pubcomp_unexpected
+ on: vernemq.mqtt_pubcomp_invalid_error
+ lookup: sum -1m unaligned absolute
+ units: messages
+ every: 10s
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 2h
+ info: unexpected v3/v5 PUBCOMP received in the last minute
+ to: sysadmin
diff --git a/health/health.d/vsphere.conf b/health/health.d/vsphere.conf
new file mode 100644
index 0000000..d8b2be1
--- /dev/null
+++ b/health/health.d/vsphere.conf
@@ -0,0 +1,157 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------VM Specific------------------------------------------------------------
+# Memory
+
+template: vsphere_vm_mem_usage
+ on: vsphere.vm_mem_usage_percentage
+ hosts: *
+ calc: $used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used RAM
+
+# -----------------------------------------------HOST Specific----------------------------------------------------------
+# Memory
+
+template: vsphere_host_mem_usage
+ on: vsphere.host_mem_usage_percentage
+ hosts: *
+ calc: $used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used RAM
+
+# Network errors
+
+template: vsphere_inbound_packets_errors
+ on: vsphere.net_errors_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_errors
+ on: vsphere.net_errors_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+# Network errors ratio
+
+template: vsphere_inbound_packets_errors_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ calc: (($vsphere_inbound_packets_errors != nan AND $this > 0) ? ($vsphere_inbound_packets_errors * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of inbound errors vs the total number of received packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_errors_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ calc: (($vsphere_outbound_packets_errors != nan AND $this > 0) ? ($vsphere_outbound_packets_errors * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of outbound errors vs the total number of sent packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+# -----------------------------------------------Common-------------------------------------------------------------------
+# CPU
+
+template: vsphere_cpu_usage
+ on: vsphere.cpu_usage_total
+ hosts: *
+ lookup: average -10m unaligned match-names of used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: cpu utilization for the last 10 minutes
+ to: sysadmin
+
+# Network drops
+
+template: vsphere_inbound_packets_dropped
+ on: vsphere.net_drops_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_dropped
+ on: vsphere.net_drops_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+# Network drops ratio
+
+template: vsphere_inbound_packets_dropped_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ calc: (($vsphere_inbound_packets_dropped != nan AND $this > 0) ? ($vsphere_inbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_dropped_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ calc: (($vsphere_outbound_packets_dropped != nan AND $this > 0) ? ($vsphere_outbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes
+ to: sysadmin
diff --git a/health/health.d/web_log.conf b/health/health.d/web_log.conf
new file mode 100644
index 0000000..44de38a
--- /dev/null
+++ b/health/health.d/web_log.conf
@@ -0,0 +1,385 @@
+
+# make sure we can collect web log data
+
+template: last_collected_secs
+ on: web_log.response_codes
+families: *
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+
+# -----------------------------------------------------------------------------
+# high level response code alarms
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: 1m_requests
+ on: web_log.response_statuses
+families: *
+ lookup: sum -1m unaligned
+ calc: ($this == 0)?(1):($this)
+ units: requests
+ every: 10s
+ info: the sum of all HTTP requests over the last minute
+
+template: 1m_successful
+ on: web_log.response_statuses
+families: *
+ lookup: sum -1m unaligned of successful_requests
+ calc: $this * 100 / $1m_requests
+ units: %
+ every: 10s
+ warn: ($1m_requests > 120) ? ($this < (($status >= $WARNING ) ? ( 95 ) : ( 85 )) ) : ( 0 )
+ crit: ($1m_requests > 120) ? ($this < (($status == $CRITICAL) ? ( 85 ) : ( 75 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of successful HTTP responses (1xx, 2xx, 304, 401) over the last minute
+ to: webmaster
+
+template: 1m_redirects
+ on: web_log.response_statuses
+families: *
+ lookup: sum -1m unaligned of redirects
+ calc: $this * 100 / $1m_requests
+ units: %
+ every: 10s
+ warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 )
+ crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP redirects (3xx except 304) over the last minute
+ to: webmaster
+
+template: 1m_bad_requests
+ on: web_log.response_statuses
+families: *
+ lookup: sum -1m unaligned of bad_requests
+ calc: $this * 100 / $1m_requests
+ units: %
+ every: 10s
+ warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 )
+ crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP bad requests (4xx except 401) over the last minute
+ to: webmaster
+
+template: 1m_internal_errors
+ on: web_log.response_statuses
+families: *
+ lookup: sum -1m unaligned of server_errors
+ calc: $this * 100 / $1m_requests
+ units: %
+ every: 10s
+ warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 1 ) : ( 2 )) ) : ( 0 )
+ crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 2 ) : ( 5 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP internal server errors (5xx), over the last minute
+ to: webmaster
+
+# unmatched lines
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_total_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: 1m_total_requests
+ on: web_log.response_codes
+families: *
+ lookup: sum -1m unaligned
+ calc: ($this == 0)?(1):($this)
+ units: requests
+ every: 10s
+ info: the sum of all HTTP requests over the last minute
+
+template: 1m_unmatched
+on: web_log.response_codes
+families: *
+ lookup: sum -1m unaligned of unmatched
+ calc: $this * 100 / $1m_total_requests
+ units: %
+ every: 10s
+ warn: ($1m_total_requests > 120) ? ($this > 1) : ( 0 )
+ delay: up 1m down 5m multiplier 1.5 max 1h
+ info: the ratio of unmatched lines, over the last minute
+ to: webmaster
+
+# -----------------------------------------------------------------------------
+# web slow
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: 10m_response_time
+ on: web_log.response_time
+families: *
+ lookup: average -10m unaligned of avg
+ units: ms
+ every: 30s
+ info: the average time to respond to HTTP requests, over the last 10 minutes
+
+template: web_slow
+ on: web_log.response_time
+families: *
+ lookup: average -1m unaligned of avg
+ units: ms
+ every: 10s
+ green: 500
+ red: 1000
+ warn: ($1m_requests > 120) ? ($this > $green && $this > ($10m_response_time * 2) ) : ( 0 )
+ crit: ($1m_requests > 120) ? ($this > $red && $this > ($10m_response_time * 4) ) : ( 0 )
+ delay: down 15m multiplier 1.5 max 1h
+ info: the average time to respond to HTTP requests, over the last 1 minute
+ options: no-clear-notification
+ to: webmaster
+
+# -----------------------------------------------------------------------------
+# web too many or too few requests
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $5m_successful_old > 120
+#
+# i.e. when there were at least 120 requests during the 5 minutes starting
+# at -10m and ending at -5m
+
+template: 5m_successful_old
+ on: web_log.response_statuses
+families: *
+ lookup: average -5m at -5m unaligned of successful_requests
+ units: requests/s
+ every: 30s
+ info: average rate of successful HTTP requests over the last 5 minutes
+
+template: 5m_successful
+ on: web_log.response_statuses
+families: *
+ lookup: average -5m unaligned of successful_requests
+ units: requests/s
+ every: 30s
+ info: average successful HTTP requests over the last 5 minutes
+
+template: 5m_requests_ratio
+ on: web_log.response_codes
+families: *
+ calc: ($5m_successful_old > 0)?($5m_successful * 100 / $5m_successful_old):(100)
+ units: %
+ every: 30s
+ warn: ($5m_successful_old > 120) ? ($this > 200 OR $this < 50) : (0)
+ crit: ($5m_successful_old > 120) ? ($this > 400 OR $this < 25) : (0)
+ delay: down 15m multiplier 1.5 max 1h
+options: no-clear-notification
+ info: the percentage of successful web requests over the last 5 minutes, \
+ compared with the previous 5 minutes \
+ (clear notification for this alarm will not be sent)
+ to: webmaster
+
+
+
+# ---------------------------------------------------GO-VERSION---------------------------------------------------------
+
+# make sure we can collect web log data
+
+template: web_log_last_collected_secs
+ on: web_log.requests
+families: *
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+# unmatched lines
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_total_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: web_log_1m_total_requests
+ on: web_log.requests
+families: *
+ lookup: sum -1m unaligned
+ calc: ($this == 0)?(1):($this)
+ units: requests
+ every: 10s
+ info: the sum of all HTTP requests over the last minute
+
+template: web_log_1m_unmatched
+ on: web_log.excluded_requests
+families: *
+ lookup: sum -1m unaligned of unmatched
+ calc: $this * 100 / $web_log_1m_total_requests
+ units: %
+ every: 10s
+ warn: ($web_log_1m_total_requests > 120) ? ($this > 1) : ( 0 )
+ delay: up 1m down 5m multiplier 1.5 max 1h
+ info: the ratio of unmatched lines, over the last minute
+ to: webmaster
+
+# -----------------------------------------------------------------------------
+# high level response code alarms
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: web_log_1m_requests
+ on: web_log.type_requests
+families: *
+ lookup: sum -1m unaligned
+ calc: ($this == 0)?(1):($this)
+ units: requests
+ every: 10s
+ info: the sum of all HTTP requests over the last minute
+
+template: web_log_1m_successful
+ on: web_log.type_requests
+families: *
+ lookup: sum -1m unaligned of success
+ calc: $this * 100 / $web_log_1m_requests
+ units: %
+ every: 10s
+ warn: ($web_log_1m_requests > 120) ? ($this < (($status >= $WARNING ) ? ( 95 ) : ( 85 )) ) : ( 0 )
+ crit: ($web_log_1m_requests > 120) ? ($this < (($status == $CRITICAL) ? ( 85 ) : ( 75 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of successful HTTP responses (1xx, 2xx, 304, 401) over the last minute
+ to: webmaster
+
+template: web_log_1m_redirects
+ on: web_log.type_requests
+families: *
+ lookup: sum -1m unaligned of redirect
+ calc: $this * 100 / $web_log_1m_requests
+ units: %
+ every: 10s
+ warn: ($web_log_1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 )
+ crit: ($web_log_1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP redirects (3xx except 304) over the last minute
+ to: webmaster
+
+template: web_log_1m_bad_requests
+ on: web_log.type_requests
+families: *
+ lookup: sum -1m unaligned of bad
+ calc: $this * 100 / $web_log_1m_requests
+ units: %
+ every: 10s
+ warn: ($web_log_1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 )
+ crit: ($web_log_1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP bad requests (4xx except 401) over the last minute
+ to: webmaster
+
+template: web_log_1m_internal_errors
+ on: web_log.type_requests
+families: *
+ lookup: sum -1m unaligned of error
+ calc: $this * 100 / $web_log_1m_requests
+ units: %
+ every: 10s
+ warn: ($web_log_1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 1 ) : ( 2 )) ) : ( 0 )
+ crit: ($web_log_1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 2 ) : ( 5 )) ) : ( 0 )
+ delay: up 2m down 15m multiplier 1.5 max 1h
+ info: the ratio of HTTP internal server errors (5xx), over the last minute
+ to: webmaster
+
+# -----------------------------------------------------------------------------
+# web slow
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $1m_requests > 120
+#
+# i.e. when there are at least 120 requests during the last minute
+
+template: web_log_10m_response_time
+ on: web_log.request_processing_time
+families: *
+ lookup: average -10m unaligned of avg
+ units: ms
+ every: 30s
+ info: the average time to respond to HTTP requests, over the last 10 minutes
+
+template: web_log_web_slow
+ on: web_log.request_processing_time
+families: *
+ lookup: average -1m unaligned of avg
+ units: ms
+ every: 10s
+ green: 500
+ red: 1000
+ warn: ($web_log_1m_requests > 120) ? ($this > $green && $this > ($web_log_10m_response_time * 2) ) : ( 0 )
+ crit: ($web_log_1m_requests > 120) ? ($this > $red && $this > ($web_log_10m_response_time * 4) ) : ( 0 )
+ delay: down 15m multiplier 1.5 max 1h
+ info: the average time to respond to HTTP requests, over the last 1 minute
+ options: no-clear-notification
+ to: webmaster
+
+# -----------------------------------------------------------------------------
+# web too many or too few requests
+
+# the following alarms trigger only when there are enough data.
+# we assume there are enough data when:
+#
+# $5m_successful_old > 120
+#
+# i.e. when there were at least 120 requests during the 5 minutes starting
+# at -10m and ending at -5m
+
+template: web_log_5m_successful_old
+ on: web_log.type_requests
+families: *
+ lookup: average -5m at -5m unaligned of success
+ units: requests/s
+ every: 30s
+ info: average rate of successful HTTP requests over the last 5 minutes
+
+template: web_log_5m_successful
+ on: web_log.type_requests
+families: *
+ lookup: average -5m unaligned of success
+ units: requests/s
+ every: 30s
+ info: average successful HTTP requests over the last 5 minutes
+
+template: web_log_5m_requests_ratio
+ on: web_log.type_requests
+families: *
+ calc: ($web_log_5m_successful_old > 0)?($web_log_5m_successful * 100 / $web_log_5m_successful_old):(100)
+ units: %
+ every: 30s
+ warn: ($web_log_5m_successful_old > 120) ? ($this > 200 OR $this < 50) : (0)
+ crit: ($web_log_5m_successful_old > 120) ? ($this > 400 OR $this < 25) : (0)
+ delay: down 15m multiplier 1.5 max 1h
+options: no-clear-notification
+ info: the percentage of successful web requests over the last 5 minutes, \
+ compared with the previous 5 minutes \
+ (clear notification for this alarm will not be sent)
+ to: webmaster
diff --git a/health/health.d/whoisquery.conf b/health/health.d/whoisquery.conf
new file mode 100644
index 0000000..275e11d
--- /dev/null
+++ b/health/health.d/whoisquery.conf
@@ -0,0 +1,24 @@
+
+# make sure whoisquery is running
+
+template: whoisquery_last_collected_secs
+ on: whoisquery.time_until_expiration
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 60s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+
+template: whoisquery_days_until_expiration
+ on: whoisquery.time_until_expiration
+ calc: $expiry
+ units: seconds
+ every: 60s
+ warn: $this < $days_until_expiration_warning*24*60*60
+ crit: $this < $days_until_expiration_critical*24*60*60
+ info: domain time until expiration
+ to: webmaster
diff --git a/health/health.d/wmi.conf b/health/health.d/wmi.conf
new file mode 100644
index 0000000..0441fc1
--- /dev/null
+++ b/health/health.d/wmi.conf
@@ -0,0 +1,130 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+## Availability
+
+template: wmi_last_collected_secs
+ on: cpu.collector_duration
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: sysadmin
+
+## CPU
+
+template: wmi_10min_cpu_usage
+ on: wmi.cpu_utilization_total
+ os: linux
+ hosts: *
+ lookup: average -10m unaligned match-names of dpc,user,privileged,interrupt
+ units: %
+ every: 1m
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: cpu utilization for the last 10 minutes
+ to: sysadmin
+
+
+## Memory
+
+template: wmi_ram_in_use
+ on: wmi.memory_utilization
+ os: linux
+ hosts: *
+ calc: ($used) * 100 / ($used + $available)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used RAM
+ to: sysadmin
+
+template: wmi_swap_in_use
+ on: wmi.memory_swap_utilization
+ os: linux
+ hosts: *
+ calc: ($used) * 100 / ($used + $available)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used Swap
+ to: sysadmin
+
+
+## Network
+
+template: inbound_packets_discarded
+ on: wmi.net_discarded
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of inbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound discarded packets in the last 10 minutes
+ to: sysadmin
+
+template: outbound_packets_discarded
+ on: wmi.net_discarded
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of outbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound discarded packets in the last 10 minutes
+ to: sysadmin
+
+template: inbound_packets_errors
+ on: wmi.net_errors
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of inbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound errors in the last 10 minutes
+ to: sysadmin
+
+template: outbound_packets_errors
+ on: wmi.net_errors
+ os: linux
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of outbound
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound errors in the last 10 minutes
+ to: sysadmin
+
+
+## Disk
+
+template: wmi_disk_in_use
+ on: wmi.logical_disk_utilization
+ os: linux
+ hosts: *
+ calc: ($used) * 100 / ($used + $free)
+ units: %
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used disk space
+ to: sysadmin
diff --git a/health/health.d/x509check.conf b/health/health.d/x509check.conf
new file mode 100644
index 0000000..dfca377
--- /dev/null
+++ b/health/health.d/x509check.conf
@@ -0,0 +1,32 @@
+
+# make sure x509check is running
+
+template: x509check_last_collected_secs
+ on: x509check.time_until_expiration
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 60s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
+
+template: x509check_days_until_expiration
+ on: x509check.time_until_expiration
+ calc: $expiry
+ units: seconds
+ every: 60s
+ warn: $this < $days_until_expiration_warning*24*60*60
+ crit: $this < $days_until_expiration_critical*24*60*60
+ info: certificate time until expiration
+ to: webmaster
+
+template: x509check_revocation_status
+ on: x509check.revocation_status
+ calc: $revoked
+ every: 60s
+ crit: $this != nan AND $this != 0
+ info: certificate revocation status
+ to: webmaster
diff --git a/health/health.d/zfs.conf b/health/health.d/zfs.conf
new file mode 100644
index 0000000..af73824
--- /dev/null
+++ b/health/health.d/zfs.conf
@@ -0,0 +1,10 @@
+
+ alarm: zfs_memory_throttle
+ on: zfs.memory_ops
+ lookup: sum -10m unaligned absolute of throttled
+ units: events
+ every: 1m
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 2h
+ info: the number of times ZFS had to limit the ARC growth in the last 10 minutes
+ to: sysadmin
diff --git a/health/health.d/zookeeper.conf b/health/health.d/zookeeper.conf
new file mode 100644
index 0000000..ffbe31b
--- /dev/null
+++ b/health/health.d/zookeeper.conf
@@ -0,0 +1,14 @@
+
+# make sure zookeeper is running
+
+template: zookeeper_last_collected_secs
+ on: zookeeper.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/health/health.h b/health/health.h
new file mode 100644
index 0000000..5281e16
--- /dev/null
+++ b/health/health.h
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_HEALTH_H
+#define NETDATA_HEALTH_H 1
+
+#include "../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_HEALTH \
+ { \
+ .name = "HEALTH", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = health_main \
+ },
+
+extern unsigned int default_health_enabled;
+
+#define HEALTH_ENTRY_FLAG_PROCESSED 0x00000001
+#define HEALTH_ENTRY_FLAG_UPDATED 0x00000002
+#define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004
+#define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008
+#define HEALTH_ENTRY_FLAG_SILENCED 0x00000010
+#define HEALTH_ENTRY_RUN_ONCE 0x00000020
+#define HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS 0x00000040
+
+#define HEALTH_ENTRY_FLAG_SAVED 0x10000000
+#define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000
+
+#ifndef HEALTH_LISTEN_PORT
+#define HEALTH_LISTEN_PORT 19998
+#endif
+
+#ifndef HEALTH_LISTEN_BACKLOG
+#define HEALTH_LISTEN_BACKLOG 4096
+#endif
+
+#define HEALTH_ON_KEY "on"
+#define HEALTH_EVERY_KEY "every"
+#define HEALTH_GREEN_KEY "green"
+#define HEALTH_RED_KEY "red"
+#define HEALTH_WARN_KEY "warn"
+#define HEALTH_CRIT_KEY "crit"
+#define HEALTH_EXEC_KEY "exec"
+#define HEALTH_RECIPIENT_KEY "to"
+#define HEALTH_UNITS_KEY "units"
+#define HEALTH_INFO_KEY "info"
+#define HEALTH_DELAY_KEY "delay"
+#define HEALTH_OPTIONS_KEY "options"
+#define HEALTH_FOREACH_KEY "foreach"
+
+#define HEALTH_SILENCERS_MAX_FILE_LEN 10000
+
+extern char *silencers_filename;
+
+extern void health_init(void);
+extern void *health_main(void *ptr);
+
+extern void health_reload(void);
+
+extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result);
+extern void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* context, RRDCALC_STATUS status);
+extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all);
+extern void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all);
+extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after);
+
+void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf);
+void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf);
+
+extern int health_alarm_log_open(RRDHOST *host);
+extern void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae);
+extern void health_alarm_log_load(RRDHOST *host);
+
+extern ALARM_ENTRY* health_create_alarm_entry(
+ RRDHOST *host,
+ uint32_t alarm_id,
+ uint32_t alarm_event_id,
+ time_t when,
+ const char *name,
+ const char *chart,
+ const char *family,
+ const char *exec,
+ const char *recipient,
+ time_t duration,
+ calculated_number old_value,
+ calculated_number new_value,
+ RRDCALC_STATUS old_status,
+ RRDCALC_STATUS new_status,
+ const char *source,
+ const char *units,
+ const char *info,
+ int delay,
+ uint32_t flags);
+
+extern void health_alarm_log(RRDHOST *host, ALARM_ENTRY *ae);
+
+extern void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath);
+extern char *health_user_config_dir(void);
+extern char *health_stock_config_dir(void);
+extern void health_alarm_log_free(RRDHOST *host);
+
+extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae);
+
+extern void *health_cmdapi_thread(void *ptr);
+
+extern void health_label_log_save(RRDHOST *host);
+
+extern SIMPLE_PATTERN *health_pattern_from_foreach(char *s);
+
+#endif //NETDATA_HEALTH_H
diff --git a/health/health_config.c b/health/health_config.c
new file mode 100644
index 0000000..1acf369
--- /dev/null
+++ b/health/health_config.c
@@ -0,0 +1,1035 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "health.h"
+
+#define HEALTH_CONF_MAX_LINE 4096
+
+#define HEALTH_ALARM_KEY "alarm"
+#define HEALTH_TEMPLATE_KEY "template"
+#define HEALTH_ON_KEY "on"
+#define HEALTH_HOST_KEY "hosts"
+#define HEALTH_OS_KEY "os"
+#define HEALTH_FAMILIES_KEY "families"
+#define HEALTH_PLUGIN_KEY "plugin"
+#define HEALTH_MODULE_KEY "module"
+#define HEALTH_LOOKUP_KEY "lookup"
+#define HEALTH_CALC_KEY "calc"
+#define HEALTH_EVERY_KEY "every"
+#define HEALTH_GREEN_KEY "green"
+#define HEALTH_RED_KEY "red"
+#define HEALTH_WARN_KEY "warn"
+#define HEALTH_CRIT_KEY "crit"
+#define HEALTH_EXEC_KEY "exec"
+#define HEALTH_RECIPIENT_KEY "to"
+#define HEALTH_UNITS_KEY "units"
+#define HEALTH_INFO_KEY "info"
+#define HEALTH_DELAY_KEY "delay"
+#define HEALTH_OPTIONS_KEY "options"
+#define HEALTH_REPEAT_KEY "repeat"
+#define HEALTH_HOST_LABEL_KEY "host labels"
+
+static inline int rrdcalc_add_alarm_from_config(RRDHOST *host, RRDCALC *rc) {
+ if(!rc->chart) {
+ error("Health configuration for alarm '%s' does not have a chart", rc->name);
+ return 0;
+ }
+
+ if(!rc->update_every) {
+ error("Health configuration for alarm '%s.%s' has no frequency (parameter 'every'). Ignoring it.", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->calculation && !rc->warning && !rc->critical) {
+ error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rc->chart?rc->chart:"NOCHART", rc->name);
+ return 0;
+ }
+
+ if (rrdcalc_exists(host, rc->chart, rc->name, rc->hash_chart, rc->hash))
+ return 0;
+
+ rc->id = rrdcalc_get_unique_id(host, rc->chart, rc->name, &rc->next_event_id);
+
+ debug(D_HEALTH, "Health configuration adding alarm '%s.%s' (%u): exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', for each dimension '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f, warn_repeat_every %u, crit_repeat_every %u",
+ rc->chart?rc->chart:"NOCHART",
+ rc->name,
+ rc->id,
+ (rc->exec)?rc->exec:"DEFAULT",
+ (rc->recipient)?rc->recipient:"DEFAULT",
+ rc->green,
+ rc->red,
+ (int)rc->group,
+ rc->after,
+ rc->before,
+ rc->options,
+ (rc->dimensions)?rc->dimensions:"NONE",
+ (rc->foreachdim)?rc->foreachdim:"NONE",
+ rc->update_every,
+ (rc->calculation)?rc->calculation->parsed_as:"NONE",
+ (rc->warning)?rc->warning->parsed_as:"NONE",
+ (rc->critical)?rc->critical->parsed_as:"NONE",
+ rc->source,
+ rc->delay_up_duration,
+ rc->delay_down_duration,
+ rc->delay_max_duration,
+ rc->delay_multiplier,
+ rc->warn_repeat_every,
+ rc->crit_repeat_every
+ );
+
+ rrdcalc_add_to_host(host, rc);
+
+ return 1;
+}
+
+static inline int rrdcalctemplate_add_template_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) {
+ if(unlikely(!rt->context)) {
+ error("Health configuration for template '%s' does not have a context", rt->name);
+ return 0;
+ }
+
+ if(unlikely(!rt->update_every)) {
+ error("Health configuration for template '%s' has no frequency (parameter 'every'). Ignoring it.", rt->name);
+ return 0;
+ }
+
+ if(unlikely(!RRDCALCTEMPLATE_HAS_DB_LOOKUP(rt) && !rt->calculation && !rt->warning && !rt->critical)) {
+ error("Health configuration for template '%s' is useless (no calculation, no warning and no critical evaluation)", rt->name);
+ return 0;
+ }
+
+ RRDCALCTEMPLATE *t, *last = NULL;
+ if(!rt->foreachdim) {
+ for (t = host->templates; t ; last = t, t = t->next) {
+ if(unlikely(t->hash_name == rt->hash_name
+ && !strcmp(t->name, rt->name)
+ && !strcmp(t->family_match?t->family_match:"*", rt->family_match?rt->family_match:"*")
+ )) {
+ error("Health configuration template '%s' already exists for host '%s'.", rt->name, host->hostname);
+ return 0;
+ }
+ }
+
+ if(likely(last)) {
+ last->next = rt;
+ }
+ else {
+ rt->next = host->templates;
+ host->templates = rt;
+ }
+ } else {
+ for (t = host->alarms_template_with_foreach; t ; last = t, t = t->next) {
+ if(unlikely(t->hash_name == rt->hash_name
+ && !strcmp(t->name, rt->name)
+ && !strcmp(t->family_match?t->family_match:"*", rt->family_match?rt->family_match:"*")
+ )) {
+ error("Health configuration template '%s' already exists for host '%s'.", rt->name, host->hostname);
+ return 0;
+ }
+ }
+
+ if(likely(last)) {
+ last->next = rt;
+ }
+ else {
+ rt->next = host->alarms_template_with_foreach;
+ host->alarms_template_with_foreach = rt;
+ }
+ }
+
+ debug(D_HEALTH, "Health configuration adding template '%s': context '%s', exec '%s', recipient '%s', green " CALCULATED_NUMBER_FORMAT_AUTO ", red " CALCULATED_NUMBER_FORMAT_AUTO ", lookup: group %d, after %d, before %d, options %u, dimensions '%s', for each dimension '%s', update every %d, calculation '%s', warning '%s', critical '%s', source '%s', delay up %d, delay down %d, delay max %d, delay_multiplier %f, warn_repeat_every %u, crit_repeat_every %u",
+ rt->name,
+ (rt->context)?rt->context:"NONE",
+ (rt->exec)?rt->exec:"DEFAULT",
+ (rt->recipient)?rt->recipient:"DEFAULT",
+ rt->green,
+ rt->red,
+ (int)rt->group,
+ rt->after,
+ rt->before,
+ rt->options,
+ (rt->dimensions)?rt->dimensions:"NONE",
+ (rt->foreachdim)?rt->foreachdim:"NONE",
+ rt->update_every,
+ (rt->calculation)?rt->calculation->parsed_as:"NONE",
+ (rt->warning)?rt->warning->parsed_as:"NONE",
+ (rt->critical)?rt->critical->parsed_as:"NONE",
+ rt->source,
+ rt->delay_up_duration,
+ rt->delay_down_duration,
+ rt->delay_max_duration,
+ rt->delay_multiplier,
+ rt->warn_repeat_every,
+ rt->crit_repeat_every
+ );
+
+
+ return 1;
+}
+
+static inline int health_parse_delay(
+ size_t line, const char *filename, char *string,
+ int *delay_up_duration,
+ int *delay_down_duration,
+ int *delay_max_duration,
+ float *delay_multiplier) {
+
+ char given_up = 0;
+ char given_down = 0;
+ char given_max = 0;
+ char given_multiplier = 0;
+
+ char *s = string;
+ while(*s) {
+ char *key = s;
+
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if(!*key) break;
+
+ char *value = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if(!strcasecmp(key, "up")) {
+ if (!config_parse_duration(value, delay_up_duration)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ else given_up = 1;
+ }
+ else if(!strcasecmp(key, "down")) {
+ if (!config_parse_duration(value, delay_down_duration)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ else given_down = 1;
+ }
+ else if(!strcasecmp(key, "multiplier")) {
+ *delay_multiplier = strtof(value, NULL);
+ if(isnan(*delay_multiplier) || isinf(*delay_multiplier) || islessequal(*delay_multiplier, 0)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ else given_multiplier = 1;
+ }
+ else if(!strcasecmp(key, "max")) {
+ if (!config_parse_duration(value, delay_max_duration)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ else given_max = 1;
+ }
+ else {
+ error("Health configuration at line %zu of file '%s': unknown keyword '%s'",
+ line, filename, key);
+ }
+ }
+
+ if(!given_up)
+ *delay_up_duration = 0;
+
+ if(!given_down)
+ *delay_down_duration = 0;
+
+ if(!given_multiplier)
+ *delay_multiplier = 1.0;
+
+ if(!given_max) {
+ if((*delay_max_duration) < (*delay_up_duration) * (*delay_multiplier))
+ *delay_max_duration = (int)((*delay_up_duration) * (*delay_multiplier));
+
+ if((*delay_max_duration) < (*delay_down_duration) * (*delay_multiplier))
+ *delay_max_duration = (int)((*delay_down_duration) * (*delay_multiplier));
+ }
+
+ return 1;
+}
+
+static inline uint32_t health_parse_options(const char *s) {
+ uint32_t options = 0;
+ char buf[100+1] = "";
+
+ while(*s) {
+ buf[0] = '\0';
+
+ // skip spaces
+ while(*s && isspace(*s))
+ s++;
+
+ // find the next space
+ size_t count = 0;
+ while(*s && count < 100 && !isspace(*s))
+ buf[count++] = *s++;
+
+ if(buf[0]) {
+ buf[count] = '\0';
+
+ if(!strcasecmp(buf, "no-clear-notification") || !strcasecmp(buf, "no-clear"))
+ options |= RRDCALC_FLAG_NO_CLEAR_NOTIFICATION;
+ else
+ error("Ignoring unknown alarm option '%s'", buf);
+ }
+ }
+
+ return options;
+}
+
+static inline int health_parse_repeat(
+ size_t line,
+ const char *file,
+ char *string,
+ uint32_t *warn_repeat_every,
+ uint32_t *crit_repeat_every
+) {
+
+ char *s = string;
+ while(*s) {
+ char *key = s;
+
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if(!*key) break;
+
+ char *value = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if(!strcasecmp(key, "off")) {
+ *warn_repeat_every = 0;
+ *crit_repeat_every = 0;
+ return 1;
+ }
+ if(!strcasecmp(key, "warning")) {
+ if (!config_parse_duration(value, (int*)warn_repeat_every)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, file, value, key);
+ }
+ }
+ else if(!strcasecmp(key, "critical")) {
+ if (!config_parse_duration(value, (int*)crit_repeat_every)) {
+ error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
+ line, file, value, key);
+ }
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * Health pattern from Foreach
+ *
+ * Create a new simple pattern using the user input
+ *
+ * @param s the string that will be used to create the simple pattern.
+ */
+SIMPLE_PATTERN *health_pattern_from_foreach(char *s) {
+ char *convert= strdupz(s);
+ SIMPLE_PATTERN *val = NULL;
+ if(convert) {
+ dimension_remove_pipe_comma(convert);
+ val = simple_pattern_create(convert, NULL, SIMPLE_PATTERN_EXACT);
+
+ freez(convert);
+ }
+
+ return val;
+}
+
+static inline int health_parse_db_lookup(
+ size_t line, const char *filename, char *string,
+ RRDR_GROUPING *group_method, int *after, int *before, int *every,
+ uint32_t *options, char **dimensions, char **foreachdim
+) {
+ debug(D_HEALTH, "Health configuration parsing database lookup %zu@%s: %s", line, filename, string);
+
+ if(*dimensions) freez(*dimensions);
+ if(*foreachdim) freez(*foreachdim);
+ *dimensions = NULL;
+ *foreachdim = NULL;
+ *after = 0;
+ *before = 0;
+ *every = 0;
+ *options = 0;
+
+ char *s = string, *key;
+
+ // first is the group method
+ key = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+ if(!*s) {
+ error("Health configuration invalid chart calculation at line %zu of file '%s': expected group method followed by the 'after' time, but got '%s'",
+ line, filename, key);
+ return 0;
+ }
+
+ if((*group_method = web_client_api_request_v1_data_group(key, RRDR_GROUPING_UNDEFINED)) == RRDR_GROUPING_UNDEFINED) {
+ error("Health configuration at line %zu of file '%s': invalid group method '%s'",
+ line, filename, key);
+ return 0;
+ }
+
+ // then is the 'after' time
+ key = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if(!config_parse_duration(key, after)) {
+ error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method",
+ line, filename, key);
+ return 0;
+ }
+
+ // sane defaults
+ *every = abs(*after);
+
+ // now we may have optional parameters
+ while(*s) {
+ key = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+ if(!*key) break;
+
+ if(!strcasecmp(key, "at")) {
+ char *value = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if (!config_parse_duration(value, before)) {
+ error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ }
+ else if(!strcasecmp(key, HEALTH_EVERY_KEY)) {
+ char *value = s;
+ while(*s && !isspace(*s)) s++;
+ while(*s && isspace(*s)) *s++ = '\0';
+
+ if (!config_parse_duration(value, every)) {
+ error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword",
+ line, filename, value, key);
+ }
+ }
+ else if(!strcasecmp(key, "absolute") || !strcasecmp(key, "abs") || !strcasecmp(key, "absolute_sum")) {
+ *options |= RRDR_OPTION_ABSOLUTE;
+ }
+ else if(!strcasecmp(key, "min2max")) {
+ *options |= RRDR_OPTION_MIN2MAX;
+ }
+ else if(!strcasecmp(key, "null2zero")) {
+ *options |= RRDR_OPTION_NULL2ZERO;
+ }
+ else if(!strcasecmp(key, "percentage")) {
+ *options |= RRDR_OPTION_PERCENTAGE;
+ }
+ else if(!strcasecmp(key, "unaligned")) {
+ *options |= RRDR_OPTION_NOT_ALIGNED;
+ }
+ else if(!strcasecmp(key, "match-ids") || !strcasecmp(key, "match_ids")) {
+ *options |= RRDR_OPTION_MATCH_IDS;
+ }
+ else if(!strcasecmp(key, "match-names") || !strcasecmp(key, "match_names")) {
+ *options |= RRDR_OPTION_MATCH_NAMES;
+ }
+ else if(!strcasecmp(key, "of")) {
+ char *find = NULL;
+ if(*s && strcasecmp(s, "all") != 0) {
+ find = strcasestr(s, " foreach");
+ if(find) {
+ *find = '\0';
+ }
+ *dimensions = strdupz(s);
+ }
+
+ if(!find) {
+ break;
+ }
+ s = ++find;
+ }
+ else if(!strcasecmp(key, HEALTH_FOREACH_KEY )) {
+ *foreachdim = strdupz(s);
+ break;
+ }
+ else {
+ error("Health configuration at line %zu of file '%s': unknown keyword '%s'",
+ line, filename, key);
+ }
+ }
+
+ return 1;
+}
+
+static inline char *health_source_file(size_t line, const char *file) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%zu@%s", line, file);
+ return strdupz(buffer);
+}
+
+static inline void strip_quotes(char *s) {
+ while(*s) {
+ if(*s == '\'' || *s == '"') *s = ' ';
+ s++;
+ }
+}
+
+static int health_readfile(const char *filename, void *data) {
+ RRDHOST *host = (RRDHOST *)data;
+
+ debug(D_HEALTH, "Health configuration reading file '%s'", filename);
+
+ static uint32_t
+ hash_alarm = 0,
+ hash_template = 0,
+ hash_os = 0,
+ hash_on = 0,
+ hash_host = 0,
+ hash_families = 0,
+ hash_plugin = 0,
+ hash_module = 0,
+ hash_calc = 0,
+ hash_green = 0,
+ hash_red = 0,
+ hash_warn = 0,
+ hash_crit = 0,
+ hash_exec = 0,
+ hash_every = 0,
+ hash_lookup = 0,
+ hash_units = 0,
+ hash_info = 0,
+ hash_recipient = 0,
+ hash_delay = 0,
+ hash_options = 0,
+ hash_repeat = 0,
+ hash_host_label = 0;
+
+ char buffer[HEALTH_CONF_MAX_LINE + 1];
+
+ if(unlikely(!hash_alarm)) {
+ hash_alarm = simple_uhash(HEALTH_ALARM_KEY);
+ hash_template = simple_uhash(HEALTH_TEMPLATE_KEY);
+ hash_on = simple_uhash(HEALTH_ON_KEY);
+ hash_os = simple_uhash(HEALTH_OS_KEY);
+ hash_host = simple_uhash(HEALTH_HOST_KEY);
+ hash_families = simple_uhash(HEALTH_FAMILIES_KEY);
+ hash_plugin = simple_uhash(HEALTH_PLUGIN_KEY);
+ hash_module = simple_uhash(HEALTH_MODULE_KEY);
+ hash_calc = simple_uhash(HEALTH_CALC_KEY);
+ hash_lookup = simple_uhash(HEALTH_LOOKUP_KEY);
+ hash_green = simple_uhash(HEALTH_GREEN_KEY);
+ hash_red = simple_uhash(HEALTH_RED_KEY);
+ hash_warn = simple_uhash(HEALTH_WARN_KEY);
+ hash_crit = simple_uhash(HEALTH_CRIT_KEY);
+ hash_exec = simple_uhash(HEALTH_EXEC_KEY);
+ hash_every = simple_uhash(HEALTH_EVERY_KEY);
+ hash_units = simple_hash(HEALTH_UNITS_KEY);
+ hash_info = simple_hash(HEALTH_INFO_KEY);
+ hash_recipient = simple_hash(HEALTH_RECIPIENT_KEY);
+ hash_delay = simple_uhash(HEALTH_DELAY_KEY);
+ hash_options = simple_uhash(HEALTH_OPTIONS_KEY);
+ hash_repeat = simple_uhash(HEALTH_REPEAT_KEY);
+ hash_host_label = simple_uhash(HEALTH_HOST_LABEL_KEY);
+ }
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ error("Health configuration cannot read file '%s'.", filename);
+ return 0;
+ }
+
+ RRDCALC *rc = NULL;
+ RRDCALCTEMPLATE *rt = NULL;
+
+ int ignore_this = 0;
+ size_t line = 0, append = 0;
+ char *s;
+ while((s = fgets(&buffer[append], (int)(HEALTH_CONF_MAX_LINE - append), fp)) || append) {
+ int stop_appending = !s;
+ line++;
+ s = trim(buffer);
+ if(!s || *s == '#') continue;
+
+ append = strlen(s);
+ if(!stop_appending && s[append - 1] == '\\') {
+ s[append - 1] = ' ';
+ append = &s[append] - buffer;
+ if(append < HEALTH_CONF_MAX_LINE)
+ continue;
+ else {
+ error("Health configuration has too long multi-line at line %zu of file '%s'.", line, filename);
+ }
+ }
+ append = 0;
+
+ char *key = s;
+ while(*s && *s != ':') s++;
+ if(!*s) {
+ error("Health configuration has invalid line %zu of file '%s'. It does not contain a ':'. Ignoring it.", line, filename);
+ continue;
+ }
+ *s = '\0';
+ s++;
+
+ char *value = s;
+ key = trim_all(key);
+ value = trim_all(value);
+
+ if(!key) {
+ error("Health configuration has invalid line %zu of file '%s'. Keyword is empty. Ignoring it.", line, filename);
+ continue;
+ }
+
+ if(!value) {
+ error("Health configuration has invalid line %zu of file '%s'. value is empty. Ignoring it.", line, filename);
+ continue;
+ }
+
+ uint32_t hash = simple_uhash(key);
+
+ if(hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) {
+ if(rc) {
+ if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) {
+ rrdcalc_free(rc);
+ }
+ // health_add_alarms_loop(host, rc, ignore_this) ;
+ }
+
+ if(rt) {
+ if (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))
+ rrdcalctemplate_free(rt);
+
+ rt = NULL;
+ }
+
+ rc = callocz(1, sizeof(RRDCALC));
+ rc->next_event_id = 1;
+ rc->name = strdupz(value);
+ rc->hash = simple_hash(rc->name);
+ rc->source = health_source_file(line, filename);
+ rc->green = NAN;
+ rc->red = NAN;
+ rc->value = NAN;
+ rc->old_value = NAN;
+ rc->delay_multiplier = 1.0;
+ rc->old_status = RRDCALC_STATUS_UNINITIALIZED;
+ rc->warn_repeat_every = host->health_default_warn_repeat_every;
+ rc->crit_repeat_every = host->health_default_crit_repeat_every;
+
+ if(rrdvar_fix_name(rc->name))
+ error("Health configuration renamed alarm '%s' to '%s'", value, rc->name);
+
+ ignore_this = 0;
+ }
+ else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) {
+ if(rc) {
+// health_add_alarms_loop(host, rc, ignore_this) ;
+ if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) {
+ rrdcalc_free(rc);
+ }
+
+ rc = NULL;
+ }
+
+ if(rt) {
+ if(ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))
+ rrdcalctemplate_free(rt);
+ }
+
+ rt = callocz(1, sizeof(RRDCALCTEMPLATE));
+ rt->name = strdupz(value);
+ rt->hash_name = simple_hash(rt->name);
+ rt->source = health_source_file(line, filename);
+ rt->green = NAN;
+ rt->red = NAN;
+ rt->delay_multiplier = 1.0;
+ rt->warn_repeat_every = host->health_default_warn_repeat_every;
+ rt->crit_repeat_every = host->health_default_crit_repeat_every;
+
+ if(rrdvar_fix_name(rt->name))
+ error("Health configuration renamed template '%s' to '%s'", value, rt->name);
+
+ ignore_this = 0;
+ }
+ else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) {
+ char *os_match = value;
+ SIMPLE_PATTERN *os_pattern = simple_pattern_create(os_match, NULL, SIMPLE_PATTERN_EXACT);
+
+ if(!simple_pattern_matches(os_pattern, host->os)) {
+ if(rc)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s: host O/S does not match '%s'", host->hostname, rc->name, line, filename, os_match);
+
+ if(rt)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s: host O/S does not match '%s'", host->hostname, rt->name, line, filename, os_match);
+
+ ignore_this = 1;
+ }
+
+ simple_pattern_free(os_pattern);
+ }
+ else if(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) {
+ char *host_match = value;
+ SIMPLE_PATTERN *host_pattern = simple_pattern_create(host_match, NULL, SIMPLE_PATTERN_EXACT);
+
+ if(!simple_pattern_matches(host_pattern, host->hostname)) {
+ if(rc)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s: hostname does not match '%s'", host->hostname, rc->name, line, filename, host_match);
+
+ if(rt)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s: hostname does not match '%s'", host->hostname, rt->name, line, filename, host_match);
+
+ ignore_this = 1;
+ }
+
+ simple_pattern_free(host_pattern);
+ }
+ else if(rc) {
+ if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) {
+ if(rc->chart) {
+ if(strcmp(rc->chart, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rc->name, key, rc->chart, value, value);
+
+ freez(rc->chart);
+ }
+ rc->chart = strdupz(value);
+ rc->hash_chart = simple_hash(rc->chart);
+ }
+ else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) {
+ health_parse_db_lookup(line, filename, value, &rc->group, &rc->after, &rc->before,
+ &rc->update_every, &rc->options, &rc->dimensions, &rc->foreachdim);
+ if(rc->foreachdim) {
+ rc->spdim = health_pattern_from_foreach(rc->foreachdim);
+ }
+ }
+ else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) {
+ if(!config_parse_duration(value, &rc->update_every))
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.",
+ line, filename, rc->name, key, value);
+ }
+ else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) {
+ char *e;
+ rc->green = str2ld(value, &e);
+ if(e && *e) {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.",
+ line, filename, rc->name, key, e);
+ }
+ }
+ else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) {
+ char *e;
+ rc->red = str2ld(value, &e);
+ if(e && *e) {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.",
+ line, filename, rc->name, key, e);
+ }
+ }
+ else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rc->calculation = expression_parse(value, &failed_at, &error);
+ if(!rc->calculation) {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rc->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rc->warning = expression_parse(value, &failed_at, &error);
+ if(!rc->warning) {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rc->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rc->critical = expression_parse(value, &failed_at, &error);
+ if(!rc->critical) {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rc->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) {
+ if(rc->exec) {
+ if(strcmp(rc->exec, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rc->name, key, rc->exec, value, value);
+
+ freez(rc->exec);
+ }
+ rc->exec = strdupz(value);
+ }
+ else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) {
+ if(rc->recipient) {
+ if(strcmp(rc->recipient, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rc->name, key, rc->recipient, value, value);
+
+ freez(rc->recipient);
+ }
+ rc->recipient = strdupz(value);
+ }
+ else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) {
+ if(rc->units) {
+ if(strcmp(rc->units, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rc->name, key, rc->units, value, value);
+
+ freez(rc->units);
+ }
+ rc->units = strdupz(value);
+ strip_quotes(rc->units);
+ }
+ else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) {
+ if(rc->info) {
+ if(strcmp(rc->info, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rc->name, key, rc->info, value, value);
+
+ freez(rc->info);
+ }
+ rc->info = strdupz(value);
+ strip_quotes(rc->info);
+ }
+ else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) {
+ health_parse_delay(line, filename, value, &rc->delay_up_duration, &rc->delay_down_duration, &rc->delay_max_duration, &rc->delay_multiplier);
+ }
+ else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) {
+ rc->options |= health_parse_options(value);
+ }
+ else if(hash == hash_repeat && !strcasecmp(key, HEALTH_REPEAT_KEY)){
+ health_parse_repeat(line, filename, value,
+ &rc->warn_repeat_every,
+ &rc->crit_repeat_every);
+ }
+ else if(hash == hash_host_label && !strcasecmp(key, HEALTH_HOST_LABEL_KEY)) {
+ if(rc->labels) {
+ if(strcmp(rc->labels, value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.",
+ line, filename, rc->name, key, value, value);
+
+ freez(rc->labels);
+ simple_pattern_free(rc->splabels);
+ }
+
+ rc->labels = simple_pattern_trim_around_equal(value);
+ rc->splabels = simple_pattern_create(rc->labels, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if(hash == hash_plugin && !strcasecmp(key, HEALTH_PLUGIN_KEY)) {
+ freez(rc->plugin_match);
+ simple_pattern_free(rc->plugin_pattern);
+
+ rc->plugin_match = strdupz(value);
+ rc->plugin_pattern = simple_pattern_create(rc->plugin_match, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if(hash == hash_module && !strcasecmp(key, HEALTH_MODULE_KEY)) {
+ freez(rc->module_match);
+ simple_pattern_free(rc->module_pattern);
+
+ rc->module_match = strdupz(value);
+ rc->module_pattern = simple_pattern_create(rc->module_match, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else {
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has unknown key '%s'.",
+ line, filename, rc->name, key);
+ }
+ }
+ else if(rt) {
+ if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) {
+ if(rt->context) {
+ if(strcmp(rt->context, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->context, value, value);
+
+ freez(rt->context);
+ }
+ rt->context = strdupz(value);
+ rt->hash_context = simple_hash(rt->context);
+ }
+ else if(hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) {
+ freez(rt->family_match);
+ simple_pattern_free(rt->family_pattern);
+
+ rt->family_match = strdupz(value);
+ rt->family_pattern = simple_pattern_create(rt->family_match, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if(hash == hash_plugin && !strcasecmp(key, HEALTH_PLUGIN_KEY)) {
+ freez(rt->plugin_match);
+ simple_pattern_free(rt->plugin_pattern);
+
+ rt->plugin_match = strdupz(value);
+ rt->plugin_pattern = simple_pattern_create(rt->plugin_match, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if(hash == hash_module && !strcasecmp(key, HEALTH_MODULE_KEY)) {
+ freez(rt->module_match);
+ simple_pattern_free(rt->module_pattern);
+
+ rt->module_match = strdupz(value);
+ rt->module_pattern = simple_pattern_create(rt->module_match, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) {
+ health_parse_db_lookup(line, filename, value, &rt->group, &rt->after, &rt->before,
+ &rt->update_every, &rt->options, &rt->dimensions, &rt->foreachdim);
+ if(rt->foreachdim) {
+ rt->spdim = health_pattern_from_foreach(rt->foreachdim);
+ }
+ }
+ else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) {
+ if(!config_parse_duration(value, &rt->update_every))
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' cannot parse duration: '%s'.",
+ line, filename, rt->name, key, value);
+ }
+ else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) {
+ char *e;
+ rt->green = str2ld(value, &e);
+ if(e && *e) {
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.",
+ line, filename, rt->name, key, e);
+ }
+ }
+ else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) {
+ char *e;
+ rt->red = str2ld(value, &e);
+ if(e && *e) {
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.",
+ line, filename, rt->name, key, e);
+ }
+ }
+ else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rt->calculation = expression_parse(value, &failed_at, &error);
+ if(!rt->calculation) {
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rt->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rt->warning = expression_parse(value, &failed_at, &error);
+ if(!rt->warning) {
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rt->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) {
+ const char *failed_at = NULL;
+ int error = 0;
+ rt->critical = expression_parse(value, &failed_at, &error);
+ if(!rt->critical) {
+ error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'",
+ line, filename, rt->name, key, value, expression_strerror(error), failed_at);
+ }
+ }
+ else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) {
+ if(rt->exec) {
+ if(strcmp(rt->exec, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->exec, value, value);
+
+ freez(rt->exec);
+ }
+ rt->exec = strdupz(value);
+ }
+ else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) {
+ if(rt->recipient) {
+ if(strcmp(rt->recipient, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->recipient, value, value);
+
+ freez(rt->recipient);
+ }
+ rt->recipient = strdupz(value);
+ }
+ else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) {
+ if(rt->units) {
+ if(strcmp(rt->units, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->units, value, value);
+
+ freez(rt->units);
+ }
+ rt->units = strdupz(value);
+ strip_quotes(rt->units);
+ }
+ else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) {
+ if(rt->info) {
+ if(strcmp(rt->info, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->info, value, value);
+
+ freez(rt->info);
+ }
+ rt->info = strdupz(value);
+ strip_quotes(rt->info);
+ }
+ else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) {
+ health_parse_delay(line, filename, value, &rt->delay_up_duration, &rt->delay_down_duration, &rt->delay_max_duration, &rt->delay_multiplier);
+ }
+ else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) {
+ rt->options |= health_parse_options(value);
+ }
+ else if(hash == hash_repeat && !strcasecmp(key, HEALTH_REPEAT_KEY)){
+ health_parse_repeat(line, filename, value,
+ &rt->warn_repeat_every,
+ &rt->crit_repeat_every);
+ }
+ else if(hash == hash_host_label && !strcasecmp(key, HEALTH_HOST_LABEL_KEY)) {
+ if(rt->labels) {
+ if(strcmp(rt->labels, value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rt->name, key, rt->labels, value, value);
+
+ freez(rt->labels);
+ simple_pattern_free(rt->splabels);
+ }
+
+ rt->labels = simple_pattern_trim_around_equal(value);
+ rt->splabels = simple_pattern_create(rt->labels, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else {
+ error("Health configuration at line %zu of file '%s' for template '%s' has unknown key '%s'.",
+ line, filename, rt->name, key);
+ }
+ }
+ else {
+ error("Health configuration at line %zu of file '%s' has unknown key '%s'. Expected either '" HEALTH_ALARM_KEY "' or '" HEALTH_TEMPLATE_KEY "'.",
+ line, filename, key);
+ }
+ }
+
+ if(rc) {
+ //health_add_alarms_loop(host, rc, ignore_this) ;
+ if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) {
+ rrdcalc_free(rc);
+ }
+ }
+
+ if(rt) {
+ if(ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))
+ rrdcalctemplate_free(rt);
+ }
+
+ fclose(fp);
+ return 1;
+}
+
+void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath) {
+ if(unlikely(!host->health_enabled)) {
+ debug(D_HEALTH, "CONFIG health is not enabled for host '%s'", host->hostname);
+ return;
+ }
+
+ int stock_enabled = (int)config_get_boolean(CONFIG_SECTION_HEALTH, "enable stock health configuration",
+ CONFIG_BOOLEAN_YES);
+
+ if (!stock_enabled) {
+ info("Netdata will not load stock alarms.");
+ stock_path = user_path;
+ }
+
+ recursive_config_double_dir_load(user_path, stock_path, subpath, health_readfile, (void *) host, 0);
+}
diff --git a/health/health_json.c b/health/health_json.c
new file mode 100644
index 0000000..7b5a1e3
--- /dev/null
+++ b/health/health_json.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "health.h"
+
+static inline void health_string2json(BUFFER *wb, const char *prefix, const char *label, const char *value, const char *suffix) {
+ if(value && *value) {
+ buffer_sprintf(wb, "%s\"%s\":\"", prefix, label);
+ buffer_strcat_htmlescape(wb, value);
+ buffer_strcat(wb, "\"");
+ buffer_strcat(wb, suffix);
+ }
+ else
+ buffer_sprintf(wb, "%s\"%s\":null%s", prefix, label, suffix);
+}
+
+inline void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) {
+ buffer_sprintf(wb,
+ "\n\t{\n"
+ "\t\t\"hostname\": \"%s\",\n"
+ "\t\t\"unique_id\": %u,\n"
+ "\t\t\"alarm_id\": %u,\n"
+ "\t\t\"alarm_event_id\": %u,\n"
+ "\t\t\"name\": \"%s\",\n"
+ "\t\t\"chart\": \"%s\",\n"
+ "\t\t\"family\": \"%s\",\n"
+ "\t\t\"processed\": %s,\n"
+ "\t\t\"updated\": %s,\n"
+ "\t\t\"exec_run\": %lu,\n"
+ "\t\t\"exec_failed\": %s,\n"
+ "\t\t\"exec\": \"%s\",\n"
+ "\t\t\"recipient\": \"%s\",\n"
+ "\t\t\"exec_code\": %d,\n"
+ "\t\t\"source\": \"%s\",\n"
+ "\t\t\"units\": \"%s\",\n"
+ "\t\t\"when\": %lu,\n"
+ "\t\t\"duration\": %lu,\n"
+ "\t\t\"non_clear_duration\": %lu,\n"
+ "\t\t\"status\": \"%s\",\n"
+ "\t\t\"old_status\": \"%s\",\n"
+ "\t\t\"delay\": %d,\n"
+ "\t\t\"delay_up_to_timestamp\": %lu,\n"
+ "\t\t\"updated_by_id\": %u,\n"
+ "\t\t\"updates_id\": %u,\n"
+ "\t\t\"value_string\": \"%s\",\n"
+ "\t\t\"old_value_string\": \"%s\",\n"
+ "\t\t\"last_repeat\": \"%lu\",\n"
+ "\t\t\"silenced\": \"%s\",\n"
+ , host->hostname
+ , ae->unique_id
+ , ae->alarm_id
+ , ae->alarm_event_id
+ , ae->name
+ , ae->chart
+ , ae->family
+ , (ae->flags & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false"
+ , (ae->flags & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false"
+ , (unsigned long)ae->exec_run_timestamp
+ , (ae->flags & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false"
+ , ae->exec?ae->exec:host->health_default_exec
+ , ae->recipient?ae->recipient:host->health_default_recipient
+ , ae->exec_code
+ , ae->source
+ , ae->units?ae->units:""
+ , (unsigned long)ae->when
+ , (unsigned long)ae->duration
+ , (unsigned long)ae->non_clear_duration
+ , rrdcalc_status2string(ae->new_status)
+ , rrdcalc_status2string(ae->old_status)
+ , ae->delay
+ , (unsigned long)ae->delay_up_to_timestamp
+ , ae->updated_by_id
+ , ae->updates_id
+ , ae->new_value_string
+ , ae->old_value_string
+ , (unsigned long)ae->last_repeat
+ , (ae->flags & HEALTH_ENTRY_FLAG_SILENCED)?"true":"false"
+ );
+
+ health_string2json(wb, "\t\t", "info", ae->info?ae->info:"", ",\n");
+
+ if(unlikely(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) {
+ buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n");
+ }
+
+ buffer_strcat(wb, "\t\t\"value\":");
+ buffer_rrd_value(wb, ae->new_value);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\t\"old_value\":");
+ buffer_rrd_value(wb, ae->old_value);
+ buffer_strcat(wb, "\n");
+
+ buffer_strcat(wb, "\t}");
+}
+
+void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after) {
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+
+ buffer_strcat(wb, "[");
+
+ unsigned int max = host->health_log.max;
+ unsigned int count = 0;
+ ALARM_ENTRY *ae;
+ for(ae = host->health_log.alarms; ae && count < max ; count++, ae = ae->next) {
+ if(ae->unique_id > after) {
+ if(likely(count)) buffer_strcat(wb, ",");
+ health_alarm_entry2json_nolock(wb, ae, host);
+ }
+ }
+
+ buffer_strcat(wb, "\n]\n");
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+}
+
+static inline void health_rrdcalc_values2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) {
+ (void)host;
+ buffer_sprintf(wb,
+ "\t\t\"%s.%s\": {\n"
+ "\t\t\t\"id\": %lu,\n"
+ , rc->chart, rc->name
+ , (unsigned long)rc->id);
+
+ buffer_strcat(wb, "\t\t\t\"value\":");
+ buffer_rrd_value(wb, rc->value);
+ buffer_strcat(wb, ",\n");
+
+ buffer_sprintf(wb,
+ "\t\t\t\"status\": \"%s\"\n"
+ , rrdcalc_status2string(rc->status));
+
+ buffer_strcat(wb, "\t\t}");
+}
+
+static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) {
+ char value_string[100 + 1];
+ format_value_and_unit(value_string, 100, rc->value, rc->units, -1);
+
+ buffer_sprintf(wb,
+ "\t\t\"%s.%s\": {\n"
+ "\t\t\t\"id\": %lu,\n"
+ "\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\"chart\": \"%s\",\n"
+ "\t\t\t\"family\": \"%s\",\n"
+ "\t\t\t\"active\": %s,\n"
+ "\t\t\t\"disabled\": %s,\n"
+ "\t\t\t\"silenced\": %s,\n"
+ "\t\t\t\"exec\": \"%s\",\n"
+ "\t\t\t\"recipient\": \"%s\",\n"
+ "\t\t\t\"source\": \"%s\",\n"
+ "\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\"info\": \"%s\",\n"
+ "\t\t\t\"status\": \"%s\",\n"
+ "\t\t\t\"last_status_change\": %lu,\n"
+ "\t\t\t\"last_updated\": %lu,\n"
+ "\t\t\t\"next_update\": %lu,\n"
+ "\t\t\t\"update_every\": %d,\n"
+ "\t\t\t\"delay_up_duration\": %d,\n"
+ "\t\t\t\"delay_down_duration\": %d,\n"
+ "\t\t\t\"delay_max_duration\": %d,\n"
+ "\t\t\t\"delay_multiplier\": %f,\n"
+ "\t\t\t\"delay\": %d,\n"
+ "\t\t\t\"delay_up_to_timestamp\": %lu,\n"
+ "\t\t\t\"warn_repeat_every\": \"%u\",\n"
+ "\t\t\t\"crit_repeat_every\": \"%u\",\n"
+ "\t\t\t\"value_string\": \"%s\",\n"
+ "\t\t\t\"last_repeat\": \"%lu\",\n"
+ , rc->chart, rc->name
+ , (unsigned long)rc->id
+ , rc->name
+ , rc->chart
+ , (rc->rrdset && rc->rrdset->family)?rc->rrdset->family:""
+ , (rc->rrdset)?"true":"false"
+ , (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false"
+ , (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false"
+ , rc->exec?rc->exec:host->health_default_exec
+ , rc->recipient?rc->recipient:host->health_default_recipient
+ , rc->source
+ , rc->units?rc->units:""
+ , rc->info?rc->info:""
+ , rrdcalc_status2string(rc->status)
+ , (unsigned long)rc->last_status_change
+ , (unsigned long)rc->last_updated
+ , (unsigned long)rc->next_update
+ , rc->update_every
+ , rc->delay_up_duration
+ , rc->delay_down_duration
+ , rc->delay_max_duration
+ , rc->delay_multiplier
+ , rc->delay_last
+ , (unsigned long)rc->delay_up_to_timestamp
+ , rc->warn_repeat_every
+ , rc->crit_repeat_every
+ , value_string
+ , (unsigned long)rc->last_repeat
+ );
+
+ if(unlikely(rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)) {
+ buffer_strcat(wb, "\t\t\t\"no_clear_notification\": true,\n");
+ }
+
+ if(RRDCALC_HAS_DB_LOOKUP(rc)) {
+ if(rc->dimensions && *rc->dimensions)
+ health_string2json(wb, "\t\t\t", "lookup_dimensions", rc->dimensions, ",\n");
+
+ buffer_sprintf(wb,
+ "\t\t\t\"db_after\": %lu,\n"
+ "\t\t\t\"db_before\": %lu,\n"
+ "\t\t\t\"lookup_method\": \"%s\",\n"
+ "\t\t\t\"lookup_after\": %d,\n"
+ "\t\t\t\"lookup_before\": %d,\n"
+ "\t\t\t\"lookup_options\": \"",
+ (unsigned long) rc->db_after,
+ (unsigned long) rc->db_before,
+ group_method2string(rc->group),
+ rc->after,
+ rc->before
+ );
+ buffer_data_options2string(wb, rc->options);
+ buffer_strcat(wb, "\",\n");
+ }
+
+ if(rc->calculation) {
+ health_string2json(wb, "\t\t\t", "calc", rc->calculation->source, ",\n");
+ health_string2json(wb, "\t\t\t", "calc_parsed", rc->calculation->parsed_as, ",\n");
+ }
+
+ if(rc->warning) {
+ health_string2json(wb, "\t\t\t", "warn", rc->warning->source, ",\n");
+ health_string2json(wb, "\t\t\t", "warn_parsed", rc->warning->parsed_as, ",\n");
+ }
+
+ if(rc->critical) {
+ health_string2json(wb, "\t\t\t", "crit", rc->critical->source, ",\n");
+ health_string2json(wb, "\t\t\t", "crit_parsed", rc->critical->parsed_as, ",\n");
+ }
+
+ buffer_strcat(wb, "\t\t\t\"green\":");
+ buffer_rrd_value(wb, rc->green);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\t\t\"red\":");
+ buffer_rrd_value(wb, rc->red);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\t\t\"value\":");
+ buffer_rrd_value(wb, rc->value);
+ buffer_strcat(wb, "\n");
+
+ buffer_strcat(wb, "\t\t}");
+}
+
+//void health_rrdcalctemplate2json_nolock(BUFFER *wb, RRDCALCTEMPLATE *rt) {
+//
+//}
+
+void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* contexts, RRDCALC_STATUS status) {
+ RRDCALC *rc;
+ int numberOfAlarms = 0;
+ char *tok = NULL;
+ char *p = NULL;
+
+ rrdhost_rdlock(host);
+
+ if (contexts) {
+ p = (char*)buffer_tostring(contexts);
+ while(p && *p && (tok = mystrsep(&p, ", |"))) {
+ if(!*tok) continue;
+
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+ if(unlikely(rc->rrdset && rc->rrdset->hash_context == simple_hash(tok)
+ && !strcmp(rc->rrdset->context, tok)
+ && ((status==RRDCALC_STATUS_RAISED)?(rc->status >= RRDCALC_STATUS_WARNING):rc->status == status)))
+ numberOfAlarms++;
+ }
+ }
+ }
+ else {
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ if(unlikely((status==RRDCALC_STATUS_RAISED)?(rc->status >= RRDCALC_STATUS_WARNING):rc->status == status))
+ numberOfAlarms++;
+ }
+ }
+
+ buffer_sprintf(wb, "%d", numberOfAlarms);
+ rrdhost_unlock(host);
+}
+
+static void health_alarms2json_fill_alarms(RRDHOST *host, BUFFER *wb, int all, void (*fp)(RRDHOST *, BUFFER *, RRDCALC *)) {
+ RRDCALC *rc;
+ int i;
+ for(i = 0, rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ if(likely(!all && !(rc->status == RRDCALC_STATUS_WARNING || rc->status == RRDCALC_STATUS_CRITICAL)))
+ continue;
+
+ if(likely(i)) buffer_strcat(wb, ",\n");
+ fp(host, wb, rc);
+ i++;
+ }
+}
+
+void health_alarms2json(RRDHOST *host, BUFFER *wb, int all) {
+ rrdhost_rdlock(host);
+ buffer_sprintf(wb, "{\n\t\"hostname\": \"%s\","
+ "\n\t\"latest_alarm_log_unique_id\": %u,"
+ "\n\t\"status\": %s,"
+ "\n\t\"now\": %lu,"
+ "\n\t\"alarms\": {\n",
+ host->hostname,
+ (host->health_log.next_log_id > 0)?(host->health_log.next_log_id - 1):0,
+ host->health_enabled?"true":"false",
+ (unsigned long)now_realtime_sec());
+
+ health_alarms2json_fill_alarms(host, wb, all, health_rrdcalc2json_nolock);
+
+// buffer_strcat(wb, "\n\t},\n\t\"templates\": {");
+// RRDCALCTEMPLATE *rt;
+// for(rt = host->templates; rt ; rt = rt->next)
+// health_rrdcalctemplate2json_nolock(wb, rt);
+
+ buffer_strcat(wb, "\n\t}\n}\n");
+ rrdhost_unlock(host);
+}
+
+void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all) {
+ rrdhost_rdlock(host);
+ buffer_sprintf(wb, "{\n\t\"hostname\": \"%s\","
+ "\n\t\"alarms\": {\n",
+ host->hostname);
+
+ health_alarms2json_fill_alarms(host, wb, all, health_rrdcalc_values2json_nolock);
+
+ buffer_strcat(wb, "\n\t}\n}\n");
+ rrdhost_unlock(host);
+}
+
+
+void health_active_log_alarms_2json(RRDHOST *host, BUFFER *wb) {
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+
+ buffer_sprintf(wb, "[\n");
+
+ unsigned int max = host->health_log.max;
+ unsigned int count = 0;
+ ALARM_ENTRY *ae;
+ for(ae = host->health_log.alarms; ae && count < max ; ae = ae->next) {
+ if (!ae->updated_by_id &&
+ ((ae->new_status == RRDCALC_STATUS_WARNING || ae->new_status == RRDCALC_STATUS_CRITICAL) ||
+ ((ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL) &&
+ ae->new_status == RRDCALC_STATUS_REMOVED))) {
+ if (likely(count))
+ buffer_strcat(wb, ",");
+ health_alarm_entry2json_nolock(wb, ae, host);
+ count++;
+ }
+ }
+ buffer_strcat(wb, "]");
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+}
diff --git a/health/health_log.c b/health/health_log.c
new file mode 100644
index 0000000..8c0bc5c
--- /dev/null
+++ b/health/health_log.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "health.h"
+
+// ----------------------------------------------------------------------------
+// health alarm log load/save
+// no need for locking - only one thread is reading / writing the alarms log
+
+inline int health_alarm_log_open(RRDHOST *host) {
+ if(host->health_log_fp)
+ fclose(host->health_log_fp);
+
+ host->health_log_fp = fopen(host->health_log_filename, "a");
+
+ if(host->health_log_fp) {
+ if (setvbuf(host->health_log_fp, NULL, _IOLBF, 0) != 0)
+ error("HEALTH [%s]: cannot set line buffering on health log file '%s'.", host->hostname, host->health_log_filename);
+ return 0;
+ }
+
+ error("HEALTH [%s]: cannot open health log file '%s'. Health data will be lost in case of netdata or server crash.", host->hostname, host->health_log_filename);
+ return -1;
+}
+
+static inline void health_alarm_log_close(RRDHOST *host) {
+ if(host->health_log_fp) {
+ fclose(host->health_log_fp);
+ host->health_log_fp = NULL;
+ }
+}
+
+static inline void health_log_rotate(RRDHOST *host) {
+ static size_t rotate_every = 0;
+
+ if(unlikely(rotate_every == 0)) {
+ rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000);
+ if(rotate_every < 100) rotate_every = 100;
+ }
+
+ if(unlikely(host->health_log_entries_written > rotate_every)) {
+ health_alarm_log_close(host);
+
+ char old_filename[FILENAME_MAX + 1];
+ snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename);
+
+ if(unlink(old_filename) == -1 && errno != ENOENT)
+ error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, old_filename);
+
+ if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT)
+ error("HEALTH [%s]: cannot move file '%s' to '%s'.", host->hostname, host->health_log_filename, old_filename);
+
+ if(unlink(host->health_log_filename) == -1 && errno != ENOENT)
+ error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, host->health_log_filename);
+
+ // open it with truncate
+ host->health_log_fp = fopen(host->health_log_filename, "w");
+
+ if(host->health_log_fp)
+ fclose(host->health_log_fp);
+ else
+ error("HEALTH [%s]: cannot truncate health log '%s'", host->hostname, host->health_log_filename);
+
+ host->health_log_fp = NULL;
+
+ host->health_log_entries_written = 0;
+ health_alarm_log_open(host);
+ }
+}
+
+inline void health_label_log_save(RRDHOST *host) {
+ health_log_rotate(host);
+
+ if(likely(host->health_log_fp)) {
+ BUFFER *wb = buffer_create(1024);
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ struct label *l=localhost->labels.head;
+ while (l != NULL) {
+ buffer_sprintf(wb,"%s=%s\t ", l->key, l->value);
+ l = l->next;
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ char *write = (char *) buffer_tostring(wb) ;
+
+ write[wb->len-2] = '\n';
+ write[wb->len-1] = '\0';
+
+ if (unlikely(fprintf(host->health_log_fp, "L\t%s"
+ , write
+ ) < 0))
+ error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.",
+ host->hostname, host->health_log_filename);
+ else {
+ host->health_log_entries_written++;
+ }
+
+ buffer_free(wb);
+ }
+}
+
+inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) {
+ health_log_rotate(host);
+ if(likely(host->health_log_fp)) {
+ if(unlikely(fprintf(host->health_log_fp
+ , "%c\t%s"
+ "\t%08x\t%08x\t%08x\t%08x\t%08x"
+ "\t%08x\t%08x\t%08x"
+ "\t%08x\t%08x\t%08x"
+ "\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"
+ "\t%d\t%d\t%d\t%d"
+ "\t" CALCULATED_NUMBER_FORMAT_AUTO "\t" CALCULATED_NUMBER_FORMAT_AUTO
+ "\t%016lx"
+ "\n"
+ , (ae->flags & HEALTH_ENTRY_FLAG_SAVED)?'U':'A'
+ , host->hostname
+
+ , ae->unique_id
+ , ae->alarm_id
+ , ae->alarm_event_id
+ , ae->updated_by_id
+ , ae->updates_id
+
+ , (uint32_t)ae->when
+ , (uint32_t)ae->duration
+ , (uint32_t)ae->non_clear_duration
+ , (uint32_t)ae->flags
+ , (uint32_t)ae->exec_run_timestamp
+ , (uint32_t)ae->delay_up_to_timestamp
+
+ , (ae->name)?ae->name:""
+ , (ae->chart)?ae->chart:""
+ , (ae->family)?ae->family:""
+ , (ae->exec)?ae->exec:""
+ , (ae->recipient)?ae->recipient:""
+ , (ae->source)?ae->source:""
+ , (ae->units)?ae->units:""
+ , (ae->info)?ae->info:""
+
+ , ae->exec_code
+ , ae->new_status
+ , ae->old_status
+ , ae->delay
+
+ , ae->new_value
+ , ae->old_value
+ , (uint64_t)ae->last_repeat
+ ) < 0))
+ error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.", host->hostname, host->health_log_filename);
+ else {
+ ae->flags |= HEALTH_ENTRY_FLAG_SAVED;
+ host->health_log_entries_written++;
+ }
+ }
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ if ((ae->new_status == RRDCALC_STATUS_WARNING || ae->new_status == RRDCALC_STATUS_CRITICAL) ||
+ ((ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL))) {
+ aclk_update_alarm(host, ae);
+ }
+ }
+#endif
+}
+
+static uint32_t is_valid_alarm_id(RRDHOST *host, const char *chart, const char *name, uint32_t alarm_id)
+{
+ uint32_t hash_chart = simple_hash(chart);
+ uint32_t hash_name = simple_hash(name);
+
+ ALARM_ENTRY *ae;
+ for(ae = host->health_log.alarms; ae ;ae = ae->next) {
+ if (unlikely(
+ ae->alarm_id == alarm_id && (!(ae->hash_name == hash_name && ae->hash_chart == hash_chart &&
+ !strcmp(name, ae->name) && !strcmp(chart, ae->chart))))) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename) {
+ errno = 0;
+
+ char *s, *buf = mallocz(65536 + 1);
+ size_t line = 0, len = 0;
+ ssize_t loaded = 0, updated = 0, errored = 0, duplicate = 0;
+
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+
+ while((s = fgets_trim_len(buf, 65536, fp, &len))) {
+ host->health_log_entries_written++;
+ line++;
+
+ int max_entries = 30, entries = 0;
+ char *pointers[max_entries];
+
+ pointers[entries++] = s++;
+ while(*s) {
+ if(unlikely(*s == '\t')) {
+ *s = '\0';
+ pointers[entries++] = ++s;
+ if(entries >= max_entries) {
+ error("HEALTH [%s]: line %zu of file '%s' has more than %d entries. Ignoring excessive entries.", host->hostname, line, filename, max_entries);
+ break;
+ }
+ }
+ else s++;
+ }
+
+ if(likely(*pointers[0] == 'L'))
+ continue;
+
+ if(likely(*pointers[0] == 'U' || *pointers[0] == 'A')) {
+ ALARM_ENTRY *ae = NULL;
+
+ if(entries < 26) {
+ error("HEALTH [%s]: line %zu of file '%s' should have at least 26 entries, but it has %d. Ignoring it.", host->hostname, line, filename, entries);
+ errored++;
+ continue;
+ }
+
+ // check that we have valid ids
+ uint32_t unique_id = (uint32_t)strtoul(pointers[2], NULL, 16);
+ if(!unique_id) {
+ error("HEALTH [%s]: line %zu of file '%s' states alarm entry with invalid unique id %u (%s). Ignoring it.", host->hostname, line, filename, unique_id, pointers[2]);
+ errored++;
+ continue;
+ }
+
+ uint32_t alarm_id = (uint32_t)strtoul(pointers[3], NULL, 16);
+ if(!alarm_id) {
+ error("HEALTH [%s]: line %zu of file '%s' states alarm entry for invalid alarm id %u (%s). Ignoring it.", host->hostname, line, filename, alarm_id, pointers[3]);
+ errored++;
+ continue;
+ }
+
+ // Check if we got last_repeat field
+ time_t last_repeat = 0;
+ if(entries > 27) {
+ char* alarm_name = pointers[13];
+ last_repeat = (time_t)strtoul(pointers[27], NULL, 16);
+
+ RRDCALC *rc = alarm_max_last_repeat(host, alarm_name,simple_hash(alarm_name));
+ if (!rc) {
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ RRDCALC *rdcmp = (RRDCALC *) avl_insert_lock(&(host)->alarms_idx_name, (avl *)rc);
+ if(rdcmp != rc) {
+ error("Cannot insert the alarm index ID using log %s", rc->name);
+ }
+ }
+
+ rc = alarm_max_last_repeat(host, alarm_name,simple_hash(alarm_name));
+ }
+
+ if(unlikely(rc)) {
+ if (rrdcalc_isrepeating(rc)) {
+ rc->last_repeat = last_repeat;
+ // We iterate through repeating alarm entries only to
+ // find the latest last_repeat timestamp. Otherwise,
+ // there is no need to keep them in memory.
+ continue;
+ }
+ }
+ }
+
+ if(unlikely(*pointers[0] == 'A')) {
+ // make sure it is properly numbered
+ if(unlikely(host->health_log.alarms && unique_id < host->health_log.alarms->unique_id)) {
+ error( "HEALTH [%s]: line %zu of file '%s' has alarm log entry %u in wrong order. Ignoring it."
+ , host->hostname, line, filename, unique_id);
+ errored++;
+ continue;
+ }
+
+ ae = callocz(1, sizeof(ALARM_ENTRY));
+ }
+ else if(unlikely(*pointers[0] == 'U')) {
+ // find the original
+ for(ae = host->health_log.alarms; ae ; ae = ae->next) {
+ if(unlikely(unique_id == ae->unique_id)) {
+ if(unlikely(*pointers[0] == 'A')) {
+ error("HEALTH [%s]: line %zu of file '%s' adds duplicate alarm log entry %u. Using the later."
+ , host->hostname, line, filename, unique_id);
+ *pointers[0] = 'U';
+ duplicate++;
+ }
+ break;
+ }
+ else if(unlikely(unique_id > ae->unique_id)) {
+ // no need to continue
+ // the linked list is sorted
+ ae = NULL;
+ break;
+ }
+ }
+ }
+
+ // if not found, skip this line
+ if(unlikely(!ae)) {
+ // error("HEALTH [%s]: line %zu of file '%s' updates alarm log entry with unique id %u, but it is not found.", host->hostname, line, filename, unique_id);
+ continue;
+ }
+
+ // check for a possible host missmatch
+ //if(strcmp(pointers[1], host->hostname))
+ // error("HEALTH [%s]: line %zu of file '%s' provides an alarm for host '%s' but this is named '%s'.", host->hostname, line, filename, pointers[1], host->hostname);
+
+ ae->unique_id = unique_id;
+ if (!is_valid_alarm_id(host, pointers[14], pointers[13], alarm_id))
+ alarm_id = rrdcalc_get_unique_id(host, pointers[14], pointers[13], NULL);
+ ae->alarm_id = alarm_id;
+ ae->alarm_event_id = (uint32_t)strtoul(pointers[4], NULL, 16);
+ ae->updated_by_id = (uint32_t)strtoul(pointers[5], NULL, 16);
+ ae->updates_id = (uint32_t)strtoul(pointers[6], NULL, 16);
+
+ ae->when = (uint32_t)strtoul(pointers[7], NULL, 16);
+ ae->duration = (uint32_t)strtoul(pointers[8], NULL, 16);
+ ae->non_clear_duration = (uint32_t)strtoul(pointers[9], NULL, 16);
+
+ ae->flags = (uint32_t)strtoul(pointers[10], NULL, 16);
+ ae->flags |= HEALTH_ENTRY_FLAG_SAVED;
+
+ ae->exec_run_timestamp = (uint32_t)strtoul(pointers[11], NULL, 16);
+ ae->delay_up_to_timestamp = (uint32_t)strtoul(pointers[12], NULL, 16);
+
+ freez(ae->name);
+ ae->name = strdupz(pointers[13]);
+ ae->hash_name = simple_hash(ae->name);
+
+ freez(ae->chart);
+ ae->chart = strdupz(pointers[14]);
+ ae->hash_chart = simple_hash(ae->chart);
+
+ freez(ae->family);
+ ae->family = strdupz(pointers[15]);
+
+ freez(ae->exec);
+ ae->exec = strdupz(pointers[16]);
+ if(!*ae->exec) { freez(ae->exec); ae->exec = NULL; }
+
+ freez(ae->recipient);
+ ae->recipient = strdupz(pointers[17]);
+ if(!*ae->recipient) { freez(ae->recipient); ae->recipient = NULL; }
+
+ freez(ae->source);
+ ae->source = strdupz(pointers[18]);
+ if(!*ae->source) { freez(ae->source); ae->source = NULL; }
+
+ freez(ae->units);
+ ae->units = strdupz(pointers[19]);
+ if(!*ae->units) { freez(ae->units); ae->units = NULL; }
+
+ freez(ae->info);
+ ae->info = strdupz(pointers[20]);
+ if(!*ae->info) { freez(ae->info); ae->info = NULL; }
+
+ ae->exec_code = str2i(pointers[21]);
+ ae->new_status = str2i(pointers[22]);
+ ae->old_status = str2i(pointers[23]);
+ ae->delay = str2i(pointers[24]);
+
+ ae->new_value = str2l(pointers[25]);
+ ae->old_value = str2l(pointers[26]);
+
+ ae->last_repeat = last_repeat;
+
+ char value_string[100 + 1];
+ freez(ae->old_value_string);
+ freez(ae->new_value_string);
+ ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1));
+ ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1));
+
+ // add it to host if not already there
+ if(unlikely(*pointers[0] == 'A')) {
+ ae->next = host->health_log.alarms;
+ host->health_log.alarms = ae;
+ loaded++;
+ }
+ else updated++;
+
+ if(unlikely(ae->unique_id > host->health_max_unique_id))
+ host->health_max_unique_id = ae->unique_id;
+
+ if(unlikely(ae->alarm_id >= host->health_max_alarm_id))
+ host->health_max_alarm_id = ae->alarm_id;
+ }
+ else {
+ error("HEALTH [%s]: line %zu of file '%s' is invalid (unrecognized entry type '%s').", host->hostname, line, filename, pointers[0]);
+ errored++;
+ }
+ }
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+
+ freez(buf);
+
+ if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec();
+ if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec();
+
+ host->health_log.next_log_id = host->health_max_unique_id + 1;
+ if (unlikely(!host->health_log.next_alarm_id || host->health_log.next_alarm_id <= host->health_max_alarm_id))
+ host->health_log.next_alarm_id = host->health_max_alarm_id + 1;
+
+ debug(D_HEALTH, "HEALTH [%s]: loaded file '%s' with %zd new alarm entries, updated %zd alarms, errors %zd entries, duplicate %zd", host->hostname, filename, loaded, updated, errored, duplicate);
+ return loaded;
+}
+
+inline void health_alarm_log_load(RRDHOST *host) {
+ health_alarm_log_close(host);
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s.old", host->health_log_filename);
+ FILE *fp = fopen(filename, "r");
+ if(!fp)
+ error("HEALTH [%s]: cannot open health file: %s", host->hostname, filename);
+ else {
+ health_alarm_log_read(host, fp, filename);
+ fclose(fp);
+ }
+
+ host->health_log_entries_written = 0;
+ fp = fopen(host->health_log_filename, "r");
+ if(!fp)
+ error("HEALTH [%s]: cannot open health file: %s", host->hostname, host->health_log_filename);
+ else {
+ health_alarm_log_read(host, fp, host->health_log_filename);
+ fclose(fp);
+ }
+
+ health_alarm_log_open(host);
+}
+
+
+// ----------------------------------------------------------------------------
+// health alarm log management
+
+inline ALARM_ENTRY* health_create_alarm_entry(
+ RRDHOST *host,
+ uint32_t alarm_id,
+ uint32_t alarm_event_id,
+ time_t when,
+ const char *name,
+ const char *chart,
+ const char *family,
+ const char *exec,
+ const char *recipient,
+ time_t duration,
+ calculated_number old_value,
+ calculated_number new_value,
+ RRDCALC_STATUS old_status,
+ RRDCALC_STATUS new_status,
+ const char *source,
+ const char *units,
+ const char *info,
+ int delay,
+ uint32_t flags
+) {
+ debug(D_HEALTH, "Health adding alarm log entry with id: %u", host->health_log.next_log_id);
+
+ ALARM_ENTRY *ae = callocz(1, sizeof(ALARM_ENTRY));
+ ae->name = strdupz(name);
+ ae->hash_name = simple_hash(ae->name);
+
+ if(chart) {
+ ae->chart = strdupz(chart);
+ ae->hash_chart = simple_hash(ae->chart);
+ }
+
+ if(family)
+ ae->family = strdupz(family);
+
+ if(exec) ae->exec = strdupz(exec);
+ if(recipient) ae->recipient = strdupz(recipient);
+ if(source) ae->source = strdupz(source);
+ if(units) ae->units = strdupz(units);
+ if(info) ae->info = strdupz(info);
+
+ ae->unique_id = host->health_log.next_log_id++;
+ ae->alarm_id = alarm_id;
+ ae->alarm_event_id = alarm_event_id;
+ ae->when = when;
+ ae->old_value = old_value;
+ ae->new_value = new_value;
+
+ char value_string[100 + 1];
+ ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1));
+ ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1));
+
+ ae->old_status = old_status;
+ ae->new_status = new_status;
+ ae->duration = duration;
+ ae->delay = delay;
+ ae->delay_up_to_timestamp = when + delay;
+ ae->flags |= flags;
+
+ ae->last_repeat = 0;
+
+ if(ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL)
+ ae->non_clear_duration += ae->duration;
+
+ return ae;
+}
+
+inline void health_alarm_log(
+ RRDHOST *host,
+ ALARM_ENTRY *ae
+) {
+ debug(D_HEALTH, "Health adding alarm log entry with id: %u", ae->unique_id);
+
+ if(unlikely(alarm_entry_isrepeating(host, ae))) {
+ error("Repeating alarms cannot be added to host's alarm log entries. It seems somewhere in the logic, API is being misused. Alarm id: %u", ae->alarm_id);
+ return;
+ }
+ // link it
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+ ae->next = host->health_log.alarms;
+ host->health_log.alarms = ae;
+ host->health_log.count++;
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+
+ // match previous alarms
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ ALARM_ENTRY *t;
+ for(t = host->health_log.alarms ; t ; t = t->next) {
+ if(t != ae && t->alarm_id == ae->alarm_id) {
+ if(!(t->flags & HEALTH_ENTRY_FLAG_UPDATED) && !t->updated_by_id) {
+ t->flags |= HEALTH_ENTRY_FLAG_UPDATED;
+ t->updated_by_id = ae->unique_id;
+ ae->updates_id = t->unique_id;
+
+ if((t->new_status == RRDCALC_STATUS_WARNING || t->new_status == RRDCALC_STATUS_CRITICAL) &&
+ (t->old_status == RRDCALC_STATUS_WARNING || t->old_status == RRDCALC_STATUS_CRITICAL))
+ ae->non_clear_duration += t->non_clear_duration;
+
+ health_alarm_log_save(host, t);
+ }
+
+ // no need to continue
+ break;
+ }
+ }
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+
+ health_alarm_log_save(host, ae);
+}
+
+inline void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae) {
+ freez(ae->name);
+ freez(ae->chart);
+ freez(ae->family);
+ freez(ae->exec);
+ freez(ae->recipient);
+ freez(ae->source);
+ freez(ae->units);
+ freez(ae->info);
+ freez(ae->old_value_string);
+ freez(ae->new_value_string);
+ freez(ae);
+}
+
+inline void health_alarm_log_free(RRDHOST *host) {
+ rrdhost_check_wrlock(host);
+
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+
+ ALARM_ENTRY *ae;
+ while((ae = host->health_log.alarms)) {
+ host->health_log.alarms = ae->next;
+ health_alarm_log_free_one_nochecks_nounlink(ae);
+ }
+
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+}
diff --git a/health/notifications/Makefile.am b/health/notifications/Makefile.am
new file mode 100644
index 0000000..e6b4213
--- /dev/null
+++ b/health/notifications/Makefile.am
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ alarm-notify.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ health_alarm_notify.conf \
+ health_email_recipients.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ alarm-notify.sh \
+ alarm-email.sh \
+ alarm-test.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ alarm-notify.sh.in \
+ README.md \
+ $(NULL)
+
+include alerta/Makefile.inc
+include awssns/Makefile.inc
+include discord/Makefile.inc
+include email/Makefile.inc
+include flock/Makefile.inc
+include hangouts/Makefile.inc
+include irc/Makefile.inc
+include kavenegar/Makefile.inc
+include messagebird/Makefile.inc
+include opsgenie/Makefile.inc
+include pagerduty/Makefile.inc
+include pushbullet/Makefile.inc
+include pushover/Makefile.inc
+include rocketchat/Makefile.inc
+include slack/Makefile.inc
+include smstools3/Makefile.inc
+include stackpulse/Makefile.inc
+include syslog/Makefile.inc
+include telegram/Makefile.inc
+include twilio/Makefile.inc
+include web/Makefile.inc
+include matrix/Makefile.inc
+include custom/Makefile.inc
diff --git a/health/notifications/README.md b/health/notifications/README.md
new file mode 100644
index 0000000..5354198
--- /dev/null
+++ b/health/notifications/README.md
@@ -0,0 +1,86 @@
+<!--
+title: "Alarm notifications"
+description: "Reference documentation for Netdata's alarm notification feature, which supports dozens of endpoints, user roles, and more."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/README.md
+-->
+
+# Alarm notifications
+
+The `exec` line in health configuration defines an external script that will be called once
+the alarm is triggered. The default script is `alarm-notify.sh`.
+
+You can change the default script globally by editing `/etc/netdata/netdata.conf`.
+
+`alarm-notify.sh` is capable of sending notifications:
+
+- to multiple recipients
+- using multiple notification methods
+- filtering severity per recipient
+
+It uses **roles**. For example `sysadmin`, `webmaster`, `dba`, etc.
+
+Each alarm is assigned to one or more roles, using the `to` line of the alarm configuration. Then `alarm-notify.sh` uses
+its own configuration file `/etc/netdata/health_alarm_notify.conf`. To edit it on your system, run
+`/etc/netdata/edit-config health_alarm_notify.conf` and find the destination address of the notification for each
+method.
+
+Each role may have one or more destinations.
+
+So, for example the `sysadmin` role may send:
+
+1. emails to admin1@example.com and admin2@example.com
+2. pushover.net notifications to USERTOKENS `A`, `B` and `C`.
+3. pushbullet.com push notifications to admin1@example.com and admin2@example.com
+4. messages to slack.com channel `#alarms` and `#systems`.
+5. messages to Discord channels `#alarms` and `#systems`.
+
+## Configuration
+
+Edit `/etc/netdata/health_alarm_notify.conf` by running `/etc/netdata/edit-config health_alarm_notify.conf`:
+
+- settings per notification method:
+
+ all notification methods except email, require some configuration
+ (i.e. API keys, tokens, destination rooms, channels, etc).
+
+- **recipients** per **role** per **notification method**
+
+```sh
+grep sysadmin /etc/netdata/health_alarm_notify.conf
+
+role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}"
+role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}"
+role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+role_recipients_telegram[sysadmin]="${DEFAULT_RECIPIENT_TELEGRAM}"
+role_recipients_slack[sysadmin]="${DEFAULT_RECIPIENT_SLACK}"
+...
+```
+
+## Testing Notifications
+
+You can run the following command by hand, to test alarms configuration:
+
+```sh
+# become user netdata
+su -s /bin/bash netdata
+
+# enable debugging info on the console
+export NETDATA_ALARM_NOTIFY_DEBUG=1
+
+# send test alarms to sysadmin
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test
+
+# send test alarms to any role
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"
+```
+
+Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). You can always find the location of the alarm-notify.sh script in `netdata.conf`.
+
+If you need to dig even deeper, you can trace the execution with `bash -x`. Note that in test mode, alarm-notify.sh calls itself with many more arguments. So first do
+
+```sh
+bash -x /usr/libexec/netdata/plugins.d/alarm-notify.sh test
+```
+
+ Then look in the output for the alarm-notify.sh calls and run the one you want to trace with `bash -x`.
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/alarm-email.sh b/health/notifications/alarm-email.sh
new file mode 100755
index 0000000..69c4c3f
--- /dev/null
+++ b/health/notifications/alarm-email.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# OBSOLETE - REPLACED WITH
+# alarm-notify.sh
+
+${0/alarm-email.sh/alarm-notify.sh} "${@}"
diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in
new file mode 100755
index 0000000..3bf8db5
--- /dev/null
+++ b/health/notifications/alarm-notify.sh.in
@@ -0,0 +1,2759 @@
+#!/usr/bin/env bash
+#shellcheck source=/dev/null disable=SC2086,SC2154
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to send alarm notifications for netdata
+#
+# Features:
+# - multiple notification methods
+# - multiple roles per alarm
+# - multiple recipients per role
+# - severity filtering per recipient
+#
+# Supported notification methods:
+# - emails by @ktsaou
+# - slack.com notifications by @ktsaou
+# - alerta.io notifications by @kattunga
+# - discordapp.com notifications by @lowfive
+# - pushover.net notifications by @ktsaou
+# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070
+# - telegram.org notifications by @hashworks #1002
+# - twilio.com notifications by Levi Blaney @shadycuz #1211
+# - kafka notifications by @ktsaou #1342
+# - pagerduty.com notifications by Jim Cooley @jimcooley #1373
+# - messagebird.com notifications by @tech_no_logical #1453
+# - hipchat notifications by @ktsaou #1561
+# - fleep notifications by @Ferroin
+# - prowlapp.com notifications by @Ferroin
+# - irc notifications by @manosf
+# - custom notifications by @ktsaou
+# - syslog messages by @Ferroin
+# - Microsoft Team notification by @tioumen
+# - RocketChat notifications by @Hermsi1337 #3777
+# - Google Hangouts Chat notifications by @EnzoAkira and @hendrikhofstadt
+# - Dynatrace Event by @illumine
+# - Stackpulse Event by @thiagoftsm
+# - Opsgenie by @thiaoftsm #9858
+
+# -----------------------------------------------------------------------------
+# testing notifications
+
+if { [ "${1}" = "test" ] || [ "${2}" = "test" ]; } && [ "${#}" -le 2 ]; then
+ if [ "${2}" = "test" ]; then
+ recipient="${1}"
+ else
+ recipient="${2}"
+ fi
+
+ [ -z "${recipient}" ] && recipient="sysadmin"
+
+ id=1
+ last="CLEAR"
+ test_res=0
+ for x in "WARNING" "CRITICAL" "CLEAR"; do
+ echo >&2
+ echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}"
+
+ "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" "evaluated expression" "expression variable values" 0 0
+ #shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ echo >&2 "# FAILED"
+ test_res=1
+ else
+ echo >&2 "# OK"
+ fi
+
+ last="${x}"
+ id=$((id + 1))
+ done
+
+ exit $test_res
+fi
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=${NETDATA_ALARM_NOTIFY_DEBUG-0}
+debug() {
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
+}
+
+docurl() {
+ if [ -z "${curl}" ]; then
+ error "${curl} is unset."
+ return 1
+ fi
+
+ if [ "${debug}" = "1" ]; then
+ echo >&2 "--- BEGIN curl command ---"
+ printf >&2 "%q " ${curl} "${@}"
+ echo >&2
+ echo >&2 "--- END curl command ---"
+
+ local out code ret
+ out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX)
+ code=$(${curl} ${curl_options} --write-out "%{http_code}" --output "${out}" --silent --show-error "${@}")
+ ret=$?
+ echo >&2 "--- BEGIN received response ---"
+ cat >&2 "${out}"
+ echo >&2
+ echo >&2 "--- END received response ---"
+ echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}"
+ rm "${out}"
+ echo "${code}"
+ return ${ret}
+ fi
+
+ ${curl} ${curl_options} --write-out "%{http_code}" --output /dev/null --silent --show-error "${@}"
+ return $?
+}
+
+# -----------------------------------------------------------------------------
+# List of all the notification mechanisms we support.
+# Used in a couple of places to write more compact code.
+
+method_names="
+email
+pushover
+pushbullet
+telegram
+slack
+alerta
+flock
+discord
+hipchat
+twilio
+messagebird
+pd
+fleep
+syslog
+custom
+msteam
+kavenegar
+prowl
+irc
+awssns
+rocketchat
+sms
+hangouts
+dynatrace
+matrix
+"
+
+# -----------------------------------------------------------------------------
+# this is to be overwritten by the config file
+
+custom_sender() {
+ info "not sending custom notification for ${status} of '${host}.${chart}.${name}'"
+}
+
+# -----------------------------------------------------------------------------
+
+# check for BASH v4+ (required for associative arrays)
+if [ ${BASH_VERSINFO[0]} -lt 4 ]; then
+ fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
+fi
+
+# -----------------------------------------------------------------------------
+# defaults to allow running this script by hand
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="@cachedir_POST@"
+[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
+[ -z "${NETDATA_REGISTRY_CLOUD_BASE_URL}" ] && NETDATA_REGISTRY_CLOUD_BASE_URL="https://app.netdata.cloud"
+
+# -----------------------------------------------------------------------------
+# parse command line parameters
+
+if [[ ${1} = "unittest" ]]; then
+ unittest=1 # enable unit testing mode
+ roles="${2}" # the role that should be used for unit testing
+ cfgfile="${3}" # the location of the config file to use for unit testing
+ status="${4}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ old_status="${5}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+else
+ roles="${1}" # the roles that should be notified for this event
+ args_host="${2}" # the host generated this event
+ unique_id="${3}" # the unique id of this event
+ alarm_id="${4}" # the unique id of the alarm that generated this event
+ event_id="${5}" # the incremental id of the event, for this alarm id
+ when="${6}" # the timestamp this event occurred
+ name="${7}" # the name of the alarm, as given in netdata health.d entries
+ chart="${8}" # the name of the chart (type.id)
+ family="${9}" # the family of the chart
+ status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ value="${12}" # the current value of the alarm
+ old_value="${13}" # the previous value of the alarm
+ src="${14}" # the line number and file the alarm has been configured
+ duration="${15}" # the duration in seconds of the previous alarm state
+ non_clear_duration="${16}" # the total duration in seconds this is/was non-clear
+ units="${17}" # the units of the value
+ info="${18}" # a short description of the alarm
+ value_string="${19}" # friendly value (with units)
+ # shellcheck disable=SC2034
+ # variable is unused, but https://github.com/netdata/netdata/pull/5164#discussion_r255572947
+ old_value_string="${20}" # friendly old value (with units), previously named "old_value_string"
+ calc_expression="${21}" # contains the expression that was evaluated to trigger the alarm
+ calc_param_values="${22}" # the values of the parameters in the expression, at the time of the evaluation
+ total_warnings="${23}" # Total number of alarms in WARNING state
+ total_critical="${24}" # Total number of alarms in CRITICAL state
+fi
+
+# -----------------------------------------------------------------------------
+# find a suitable hostname to use, if netdata did not supply a hostname
+
+if [ -z ${args_host} ]; then
+ this_host=$(hostname -s 2>/dev/null)
+ host="${this_host}"
+ args_host="${this_host}"
+else
+ host="${args_host}"
+fi
+
+# -----------------------------------------------------------------------------
+# screen statuses we don't need to send a notification
+
+# don't do anything if this is not WARNING, CRITICAL or CLEAR
+if [ "${status}" != "WARNING" ] && [ "${status}" != "CRITICAL" ] && [ "${status}" != "CLEAR" ]; then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}'"
+ exit 1
+fi
+
+# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL
+if [ "${clear_alarm_always}" != "YES" ] && [ "${old_status}" != "WARNING" ] && [ "${old_status}" != "CRITICAL" ] && [ "${status}" = "CLEAR" ]; then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})"
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# load configuration
+
+# By default fetch images from the global public registry.
+# This is required by default, since all notification methods need to download
+# images via the Internet, and private registries might not be reachable.
+# This can be overwritten at the configuration file.
+images_base_url="https://registry.my-netdata.io"
+
+# curl options to use
+curl_options=""
+
+# hostname handling
+use_fqdn="NO"
+
+# needed commands
+# if empty they will be searched in the system path
+curl=
+sendmail=
+
+# enable / disable features
+for method_name in ${method_names^^}; do
+ declare SEND_${method_name}="YES"
+ declare DEFAULT_RECIPIENT_${method_name}
+done
+
+for method_name in ${method_names}; do
+ declare -A role_recipients_${method_name}
+done
+
+# slack configs
+SLACK_WEBHOOK_URL=
+
+# Microsoft Team configs
+MSTEAM_WEBHOOK_URL=
+
+# rocketchat configs
+ROCKETCHAT_WEBHOOK_URL=
+
+# alerta configs
+ALERTA_WEBHOOK_URL=
+ALERTA_API_KEY=
+
+# flock configs
+FLOCK_WEBHOOK_URL=
+
+# discord configs
+DISCORD_WEBHOOK_URL=
+
+# pushover configs
+PUSHOVER_APP_TOKEN=
+
+# pushbullet configs
+PUSHBULLET_ACCESS_TOKEN=
+PUSHBULLET_SOURCE_DEVICE=
+
+# twilio configs
+TWILIO_ACCOUNT_SID=
+TWILIO_ACCOUNT_TOKEN=
+TWILIO_NUMBER=
+
+# hipchat configs
+HIPCHAT_SERVER=
+HIPCHAT_AUTH_TOKEN=
+
+# messagebird configs
+MESSAGEBIRD_ACCESS_KEY=
+MESSAGEBIRD_NUMBER=
+
+# kavenegar configs
+KAVENEGAR_API_KEY=
+KAVENEGAR_SENDER=
+
+# telegram configs
+TELEGRAM_BOT_TOKEN=
+
+# kafka configs
+SEND_KAFKA="YES"
+KAFKA_URL=
+KAFKA_SENDER_IP=
+
+# pagerduty.com configs
+PD_SERVICE_KEY=
+USE_PD_VERSION=
+
+# fleep.io configs
+FLEEP_SENDER="${host}"
+
+# Amazon SNS configs
+AWSSNS_MESSAGE_FORMAT=
+
+# Matrix configs
+MATRIX_HOMESERVER=
+MATRIX_ACCESSTOKEN=
+
+# syslog configs
+SYSLOG_FACILITY=
+
+# email configs
+EMAIL_SENDER=
+EMAIL_CHARSET=$(locale charmap 2>/dev/null)
+EMAIL_THREADING=
+EMAIL_PLAINTEXT_ONLY=
+
+# irc configs
+IRC_NICKNAME=
+IRC_REALNAME=
+IRC_NETWORK=
+
+# hangouts configs
+declare -A HANGOUTS_WEBHOOK_URI
+declare -A HANGOUTS_WEBHOOK_THREAD
+
+# dynatrace configs
+DYNATRACE_SPACE=
+DYNATRACE_SERVER=
+DYNATRACE_TOKEN=
+DYNATRACE_TAG_VALUE=
+DYNATRACE_ANNOTATION_TYPE=
+DYNATRACE_EVENT=
+SEND_DYNATRACE=
+
+# stackpulse configs
+STACKPULSE_WEBHOOK=
+
+# opsgenie configs
+OPSGENIE_API_KEY=
+
+# load the stock and user configuration files
+# these will overwrite the variables above
+
+if [ ${unittest} ]; then
+ if source "${cfgfile}"; then
+ error "Failed to load requested config file."
+ exit 1
+ fi
+else
+ for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ debug "Loading config file '${CONFIG}'..."
+ source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+ done
+fi
+
+OPSGENIE_API_URL=${OPSGENIE_API_URL:-"https://api.opsgenie.com"}
+
+# If we didn't autodetect the character set for e-mail and it wasn't
+# set by the user, we need to set it to a reasonable default. UTF-8
+# should be correct for almost all modern UNIX systems.
+if [ -z ${EMAIL_CHARSET} ]; then
+ EMAIL_CHARSET="UTF-8"
+fi
+
+# If we've been asked to use FQDN's for the URL's in the alarm, do so,
+# unless we're sending an alarm for a child system which we can't get the
+# FQDN of easily.
+if [ "${use_fqdn}" = "YES" ] && [ "${host}" = "$(hostname -s 2>/dev/null)" ]; then
+ host="$(hostname -f 2>/dev/null)"
+fi
+
+# -----------------------------------------------------------------------------
+# filter a recipient based on alarm event severity
+
+filter_recipient_by_criticality() {
+ local method="${1}" x="${2}" r s
+ shift
+
+ r="${x/|*/}" # the recipient
+ s="${x/*|/}" # the severity required for notifying this recipient
+
+ # no severity filtering for this person
+ [ "${r}" = "${s}" ] && return 0
+
+ # the severity is invalid
+ s="${s^^}"
+ if [ "${s}" != "CRITICAL" ]; then
+ error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported."
+ return 0
+ fi
+
+ # create the status tracking directory for this user
+ [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] &&
+ mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
+
+ case "${status}" in
+ CRITICAL)
+ # make sure he will get future notifications for this alarm too
+ touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
+ return 0
+ ;;
+
+ WARNING)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
+ # we do not remove the file, so that he will get future notifications of this alarm
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
+ return 0
+ fi
+ ;;
+
+ *)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
+ # remove the file, so that he will only receive notifications for CRITICAL states for this alarm
+ rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)"
+ return 0
+ fi
+ ;;
+ esac
+
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification"
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# verify the delivery methods supported
+
+# check slack
+[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO"
+
+# check rocketchat
+[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO"
+
+# check alerta
+[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO"
+
+# check flock
+[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO"
+
+# check discord
+[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO"
+
+# check pushover
+[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO"
+
+# check pushbullet
+[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO"
+
+# check twilio
+{ [ -z "${TWILIO_ACCOUNT_TOKEN}" ] || [ -z "${TWILIO_ACCOUNT_SID}" ] || [ -z "${TWILIO_NUMBER}" ]; } && SEND_TWILIO="NO"
+
+# check hipchat
+[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO"
+
+# check messagebird
+{ [ -z "${MESSAGEBIRD_ACCESS_KEY}" ] || [ -z "${MESSAGEBIRD_NUMBER}" ]; } && SEND_MESSAGEBIRD="NO"
+
+# check kavenegar
+{ [ -z "${KAVENEGAR_API_KEY}" ] || [ -z "${KAVENEGAR_SENDER}" ]; } && SEND_KAVENEGAR="NO"
+
+# check telegram
+[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO"
+
+# check kafka
+{ [ -z "${KAFKA_URL}" ] || [ -z "${KAFKA_SENDER_IP}" ]; } && SEND_KAFKA="NO"
+
+# check irc
+[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO"
+
+# check hangouts
+[ ${#HANGOUTS_WEBHOOK_URI[@]} -eq 0 ] && SEND_HANGOUTS="NO"
+
+# check fleep
+#shellcheck disable=SC2153
+{ [ -z "${FLEEP_SERVER}" ] || [ -z "${FLEEP_SENDER}" ]; } && SEND_FLEEP="NO"
+
+# check dynatrace
+{ [ -z "${DYNATRACE_SPACE}" ] ||
+ [ -z "${DYNATRACE_SERVER}" ] ||
+ [ -z "${DYNATRACE_TOKEN}" ] ||
+ [ -z "${DYNATRACE_TAG_VALUE}" ] ||
+ [ -z "${DYNATRACE_EVENT}" ]; } && SEND_DYNATRACE="NO"
+
+# check opsgenie
+[ -z "${OPSGENIE_API_KEY}" ] && SEND_OPSGENIE="NO"
+
+# check matrix
+{ [ -z "${MATRIX_HOMESERVER}" ] || [ -z "${MATRIX_ACCESSTOKEN}" ]; } && SEND_MATRIX="NO"
+
+# check stackpulse
+[ -z "${STACKPULSE_WEBHOOK}" ] && SEND_STACKPULSE="NO"
+
+if [ "${SEND_PUSHOVER}" = "YES" ] ||
+ [ "${SEND_SLACK}" = "YES" ] ||
+ [ "${SEND_ROCKETCHAT}" = "YES" ] ||
+ [ "${SEND_ALERTA}" = "YES" ] ||
+ [ "${SEND_PD}" = "YES" ] ||
+ [ "${SEND_FLOCK}" = "YES" ] ||
+ [ "${SEND_DISCORD}" = "YES" ] ||
+ [ "${SEND_HIPCHAT}" = "YES" ] ||
+ [ "${SEND_TWILIO}" = "YES" ] ||
+ [ "${SEND_MESSAGEBIRD}" = "YES" ] ||
+ [ "${SEND_KAVENEGAR}" = "YES" ] ||
+ [ "${SEND_TELEGRAM}" = "YES" ] ||
+ [ "${SEND_PUSHBULLET}" = "YES" ] ||
+ [ "${SEND_KAFKA}" = "YES" ] ||
+ [ "${SEND_FLEEP}" = "YES" ] ||
+ [ "${SEND_PROWL}" = "YES" ] ||
+ [ "${SEND_HANGOUTS}" = "YES" ] ||
+ [ "${SEND_MATRIX}" = "YES" ] ||
+ [ "${SEND_CUSTOM}" = "YES" ] ||
+ [ "${SEND_MSTEAM}" = "YES" ] ||
+ [ "${SEND_DYNATRACE}" = "YES" ] ||
+ [ "${SEND_STACKPULSE}" = "YES" ] ||
+ [ "${SEND_OPSGENIE}" = "YES" ]; then
+ # if we need curl, check for the curl command
+ if [ -z "${curl}" ]; then
+ curl="$(command -v curl 2>/dev/null)"
+ fi
+ if [ -z "${curl}" ]; then
+ error "Cannot find curl command in the system path. Disabling all curl based notifications."
+ SEND_PUSHOVER="NO"
+ SEND_PUSHBULLET="NO"
+ SEND_TELEGRAM="NO"
+ SEND_SLACK="NO"
+ SEND_MSTEAM="NO"
+ SEND_ROCKETCHAT="NO"
+ SEND_ALERTA="NO"
+ SEND_PD="NO"
+ SEND_FLOCK="NO"
+ SEND_DISCORD="NO"
+ SEND_TWILIO="NO"
+ SEND_HIPCHAT="NO"
+ SEND_MESSAGEBIRD="NO"
+ SEND_KAVENEGAR="NO"
+ SEND_KAFKA="NO"
+ SEND_FLEEP="NO"
+ SEND_PROWL="NO"
+ SEND_HANGOUTS="NO"
+ SEND_MATRIX="NO"
+ SEND_CUSTOM="NO"
+ SEND_DYNATRACE="NO"
+ SEND_STACKPULSE="NO"
+ SEND_OPSGENIE="NO"
+ fi
+fi
+
+if [ "${SEND_SMS}" = "YES" ]; then
+ if [ -z "${sendsms}" ]; then
+ sendsms="$(command -v sendsms 2>/dev/null)"
+ fi
+ if [ -z "${sendsms}" ]; then
+ SEND_SMS="NO"
+ fi
+fi
+# if we need sendmail, check for the sendmail command
+if [ "${SEND_EMAIL}" = "YES" ] && [ -z "${sendmail}" ]; then
+ sendmail="$(command -v sendmail 2>/dev/null)"
+ if [ -z "${sendmail}" ]; then
+ debug "Cannot find sendmail command in the system path. Disabling email notifications."
+ SEND_EMAIL="NO"
+ fi
+fi
+
+# if we need logger, check for the logger command
+if [ "${SEND_SYSLOG}" = "YES" ] && [ -z "${logger}" ]; then
+ logger="$(command -v logger 2>/dev/null)"
+ if [ -z "${logger}" ]; then
+ debug "Cannot find logger command in the system path. Disabling syslog notifications."
+ SEND_SYSLOG="NO"
+ fi
+fi
+
+# if we need aws, check for the aws command
+if [ "${SEND_AWSSNS}" = "YES" ] && [ -z "${aws}" ]; then
+ aws="$(command -v aws 2>/dev/null)"
+ if [ -z "${aws}" ]; then
+ debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications."
+ SEND_AWSSNS="NO"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+# find the recipients' addresses per method
+
+# netdata may call us with multiple roles, and roles may have multiple but
+# overlapping recipients - so, here we find the unique recipients.
+for method_name in ${method_names}; do
+ send_var="SEND_${method_name^^}"
+ if [ "${!send_var}" = "NO" ]; then
+ continue
+ fi
+
+ declare -A arr_var=()
+
+ for x in ${roles//,/ }; do
+ # the roles 'silent' and 'disabled' mean:
+ # don't send a notification for this role
+ if [ "${x}" = "silent" ] || [ "${x}" = "disabled" ]; then
+ continue
+ fi
+
+ role_recipients="role_recipients_${method_name}[$x]"
+ default_recipient_var="DEFAULT_RECIPIENT_${method_name^^}"
+
+ a="${!role_recipients}"
+ [ -z "${a}" ] && a="${!default_recipient_var}"
+ for r in ${a//,/ }; do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality ${method_name} "${r}" && arr_var[${r/|*/}]="1"
+ done
+ done
+
+ # build the list of recipients
+ to_var="to_${method_name}"
+ declare to_${method_name}="${!arr_var[*]}"
+
+ [ -z "${!to_var}" ] && declare ${send_var}="NO"
+done
+
+# -----------------------------------------------------------------------------
+# handle fixup of the email recipient list.
+
+fix_to_email() {
+ to_email=
+ while [ -n "${1}" ]; do
+ [ -n "${to_email}" ] && to_email="${to_email}, "
+ to_email="${to_email}${1}"
+ shift 1
+ done
+}
+
+# ${to_email} without quotes here
+fix_to_email ${to_email}
+
+# -----------------------------------------------------------------------------
+# handle output if we're running in unit test mode
+if [ ${unittest} ]; then
+ for method_name in ${method_names}; do
+ to_var="to_${method_name}"
+ echo "results: ${method_name}: ${!to_var}"
+ done
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+# check that we have at least a method enabled
+proceed=0
+for method in "${SEND_EMAIL}" \
+ "${SEND_PUSHOVER}" \
+ "${SEND_TELEGRAM}" \
+ "${SEND_SLACK}" \
+ "${SEND_ROCKETCHAT}" \
+ "${SEND_ALERTA}" \
+ "${SEND_FLOCK}" \
+ "${SEND_DISCORD}" \
+ "${SEND_TWILIO}" \
+ "${SEND_HIPCHAT}" \
+ "${SEND_MESSAGEBIRD}" \
+ "${SEND_KAVENEGAR}" \
+ "${SEND_PUSHBULLET}" \
+ "${SEND_KAFKA}" \
+ "${SEND_PD}" \
+ "${SEND_FLEEP}" \
+ "${SEND_PROWL}" \
+ "${SEND_MATRIX}" \
+ "${SEND_CUSTOM}" \
+ "${SEND_IRC}" \
+ "${SEND_HANGOUTS}" \
+ "${SEND_AWSSNS}" \
+ "${SEND_SYSLOG}" \
+ "${SEND_SMS}" \
+ "${SEND_MSTEAM}" \
+ "${SEND_DYNATRACE}" \
+ "${SEND_STACKPULSE}" \
+ "${SEND_OPSGENIE}" ; do
+
+ if [ "${method}" == "YES" ]; then
+ proceed=1
+ break
+ fi
+done
+if [ "$proceed" -eq 0 ]; then
+ fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
+fi
+
+# -----------------------------------------------------------------------------
+# get the date the alarm happened
+
+date=$(date --date=@${when} "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null)
+[ -z "${date}" ] && date=$(date 2>/dev/null)
+
+# ----------------------------------------------------------------------------
+# prepare some extra headers if we've been asked to thread e-mails
+if [ "${SEND_EMAIL}" == "YES" ] && [ "${EMAIL_THREADING}" != "NO" ]; then
+ email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\\r\\nReferences: <${chart}-${name}@${host}>"
+else
+ email_thread_headers=
+fi
+
+# -----------------------------------------------------------------------------
+# function to URL encode a string
+
+urlencode() {
+ local string="${1}" strlen encoded pos c o
+
+ strlen=${#string}
+ for ((pos = 0; pos < strlen; pos++)); do
+ c=${string:pos:1}
+ case "${c}" in
+ [-_.~a-zA-Z0-9])
+ o="${c}"
+ ;;
+
+ *)
+ printf -v o '%%%02x' "'${c}"
+ ;;
+ esac
+ encoded+="${o}"
+ done
+
+ REPLY="${encoded}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# function to convert a duration in seconds, to a human readable duration
+# using DAYS, MINUTES, SECONDS
+
+duration4human() {
+ local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret
+ d=$((s / 86400))
+ s=$((s - (d * 86400)))
+ h=$((s / 3600))
+ s=$((s - (h * 3600)))
+ m=$((s / 60))
+ s=$((s - (m * 60)))
+
+ if [ ${d} -gt 0 ]; then
+ [ ${m} -ge 30 ] && h=$((h + 1))
+ [ ${d} -gt 1 ] && ds="days"
+ [ ${h} -gt 1 ] && hs="hours"
+ if [ ${h} -gt 0 ]; then
+ ret="${d} ${ds} and ${h} ${hs}"
+ else
+ ret="${d} ${ds}"
+ fi
+ elif [ ${h} -gt 0 ]; then
+ [ ${s} -ge 30 ] && m=$((m + 1))
+ [ ${h} -gt 1 ] && hs="hours"
+ [ ${m} -gt 1 ] && ms="minutes"
+ if [ ${m} -gt 0 ]; then
+ ret="${h} ${hs} and ${m} ${ms}"
+ else
+ ret="${h} ${hs}"
+ fi
+ elif [ ${m} -gt 0 ]; then
+ [ ${m} -gt 1 ] && ms="minutes"
+ [ ${s} -gt 1 ] && ss="seconds"
+ if [ ${s} -gt 0 ]; then
+ ret="${m} ${ms} and ${s} ${ss}"
+ else
+ ret="${m} ${ms}"
+ fi
+ else
+ [ ${s} -gt 1 ] && ss="seconds"
+ ret="${s} ${ss}"
+ fi
+
+ REPLY="${ret}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# email sender
+
+send_email() {
+ local ret opts=() sender_email="${EMAIL_SENDER}" sender_name=
+ if [ "${SEND_EMAIL}" = "YES" ]; then
+
+ if [ -n "${EMAIL_SENDER}" ]; then
+ if [[ ${EMAIL_SENDER} =~ ^\".*\"\ \<.*\>$ ]]; then
+ # the name includes double quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)"
+ elif [[ ${EMAIL_SENDER} =~ ^\'.*\'\ \<.*\>$ ]]; then
+ # the name includes single quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)"
+ elif [[ ${EMAIL_SENDER} =~ ^.*\ \<.*\>$ ]]; then
+ # the name does not have any quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ fi
+ fi
+
+ [ -n "${sender_email}" ] && opts+=(-f "${sender_email}")
+ [ -n "${sender_name}" ] && sendmail --help 2>&1 | grep -q "\-F " && opts+=(-F "${sender_name}")
+
+ if [ "${debug}" = "1" ]; then
+ echo >&2 "--- BEGIN sendmail command ---"
+ printf >&2 "%q " "${sendmail}" -t "${opts[@]}"
+ echo >&2
+ echo >&2 "--- END sendmail command ---"
+ fi
+
+ "${sendmail}" -t "${opts[@]}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'"
+ return 0
+ else
+ error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}."
+ return 1
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pushover sender
+
+send_pushover() {
+ local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority
+
+ if [ "${SEND_PUSHOVER}" = "YES" ] && [ -n "${apptoken}" ] && [ -n "${usertokens}" ] && [ -n "${title}" ] && [ -n "${message}" ]; then
+
+ # https://pushover.net/api
+ priority=-2
+ case "${status}" in
+ CLEAR) priority=-1 ;; # low priority: no sound or vibration
+ WARNING) priority=0 ;; # normal priority: respect quiet hours
+ CRITICAL) priority=1 ;; # high priority: bypass quiet hours
+ *) priority=-2 ;; # lowest priority: no notification at all
+ esac
+
+ for user in ${usertokens}; do
+ httpcode=$(docurl \
+ --form-string "token=${apptoken}" \
+ --form-string "user=${user}" \
+ --form-string "html=1" \
+ --form-string "title=${title}" \
+ --form-string "message=${message}" \
+ --form-string "timestamp=${when}" \
+ --form-string "url=${url}" \
+ --form-string "url_title=Open netdata dashboard to view the alarm" \
+ --form-string "priority=${priority}" \
+ https://api.pushover.net/1/messages.json)
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pushbullet sender
+
+send_pushbullet() {
+ local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user
+ if [ "${SEND_PUSHBULLET}" = "YES" ] && [ -n "${userapikey}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://docs.pushbullet.com/#create-push
+ for user in ${recipients}; do
+ httpcode=$(docurl \
+ --header 'Access-Token: '${userapikey}'' \
+ --header 'Content-Type: application/json' \
+ --data-binary @<(
+ cat <<EOF
+ {"title": "${title}",
+ "type": "link",
+ "email": "${user}",
+ "body": "$(echo -n ${message})",
+ "url": "${url}",
+ "source_device_iden": "${source_device}"}
+EOF
+ ) "https://api.pushbullet.com/v2/pushes" -X POST)
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# kafka sender
+
+send_kafka() {
+ local httpcode sent=0
+ if [ "${SEND_KAFKA}" = "YES" ]; then
+ httpcode=$(docurl -X POST \
+ --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \
+ "${KAFKA_URL}")
+
+ if [ "${httpcode}" = "204" ]; then
+ info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP response status code ${httpcode}."
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pagerduty.com sender
+
+send_pd() {
+ local recipients="${1}" sent=0 severity current_time payload url response_code
+ unset t
+ case ${status} in
+ CLEAR) t='resolve' ; severity='info' ;;
+ WARNING) t='trigger' ; severity='warning' ;;
+ CRITICAL) t='trigger' ; severity='critical' ;;
+ esac
+
+ if [ ${SEND_PD} = "YES" ] && [ -n "${t}" ]; then
+ if [ "$(uname)" == "Linux" ]; then
+ current_time=$(date -d @${when} +'%Y-%m-%dT%H:%M:%S.000')
+ else
+ current_time=$(date -r ${when} +'%Y-%m-%dT%H:%M:%S.000')
+ fi
+ for PD_SERVICE_KEY in ${recipients}; do
+ d="${status} ${name} = ${value_string} - ${host}, ${family}"
+ if [ ${USE_PD_VERSION} = "2" ]; then
+ payload="$(
+ cat <<EOF
+ {
+ "payload" : {
+ "summary": "${info:0:1024}",
+ "source" : "${args_host}",
+ "severity" : "${severity}",
+ "timestamp" : "${current_time}",
+ "group" : "${family}",
+ "class" : "${chart}",
+ "custom_details": {
+ "value_w_units": "${value_string}",
+ "when": "${when}",
+ "duration" : "${duration}",
+ "roles": "${roles}",
+ "alarm_id" : "${alarm_id}",
+ "name" : "${name}",
+ "chart" : "${chart}",
+ "family" : "${family}",
+ "status" : "${status}",
+ "old_status" : "${old_status}",
+ "value" : "${value}",
+ "old_value" : "${old_value}",
+ "src" : "${src}",
+ "non_clear_duration" : "${non_clear_duration}",
+ "units" : "${units}",
+ "info" : "${info}"
+ }
+ },
+ "routing_key": "${PD_SERVICE_KEY}",
+ "event_action": "${t}",
+ "dedup_key": "${unique_id}"
+ }
+EOF
+ )"
+ url="https://events.pagerduty.com/v2/enqueue"
+ response_code="202"
+ else
+ payload="$(
+ cat <<EOF
+ {
+ "service_key": "${PD_SERVICE_KEY}",
+ "event_type": "${t}",
+ "incident_key" : "${alarm_id}",
+ "description": "${d}",
+ "details": {
+ "value_w_units": "${value_string}",
+ "when": "${when}",
+ "duration" : "${duration}",
+ "roles": "${roles}",
+ "alarm_id" : "${alarm_id}",
+ "name" : "${name}",
+ "chart" : "${chart}",
+ "family" : "${family}",
+ "status" : "${status}",
+ "old_status" : "${old_status}",
+ "value" : "${value}",
+ "old_value" : "${old_value}",
+ "src" : "${src}",
+ "non_clear_duration" : "${non_clear_duration}",
+ "units" : "${units}",
+ "info" : "${info}"
+ }
+ }
+EOF
+ )"
+ url="https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ response_code="200"
+ fi
+ httpcode=$(docurl -X POST --data "${payload}" ${url})
+ if [ "${httpcode}" = "${response_code}" ]; then
+ info "sent pagerduty notification for: ${host} ${chart}.${name} is ${status}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pagerduty notification for: ${host} ${chart}.${name} is ${status}, with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# twilio sender
+
+send_twilio() {
+ local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user
+ if [ "${SEND_TWILIO}" = "YES" ] && [ -n "${accountsid}" ] && [ -n "${accounttoken}" ] && [ -n "${twilionumber}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://www.twilio.com/packages/labs/code/bash/twilio-sms
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ --data-urlencode "From=${twilionumber}" \
+ --data-urlencode "To=${user}" \
+ --data-urlencode "Body=${title} ${message}" \
+ -u "${accountsid}:${accounttoken}" \
+ "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json")
+
+ if [ "${httpcode}" = "201" ]; then
+ info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# hipchat sender
+
+send_hipchat() {
+ local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color msg_format notify
+
+ # remove <small></small> from the message
+ message="${message//<small>/}"
+ message="${message//<\/small>/}"
+
+ if [ "${SEND_HIPCHAT}" = "YES" ] && [ -n "${HIPCHAT_SERVER}" ] && [ -n "${authtoken}" ] && [ -n "${recipients}" ] && [ -n "${message}" ]; then
+ # Valid values: html, text.
+ # Defaults to 'html'.
+ msg_format="html"
+
+ # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'.
+ case "${status}" in
+ WARNING) color="yellow" ;;
+ CRITICAL) color="red" ;;
+ CLEAR) color="green" ;;
+ *) color="gray" ;;
+ esac
+
+ # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc).
+ # Each recipient's notification preferences are taken into account.
+ # Defaults to false.
+ notify="true"
+
+ for room in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ -H "Content-type: application/json" \
+ -H "Authorization: Bearer ${authtoken}" \
+ -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \
+ "https://${HIPCHAT_SERVER}/v2/room/${room}/notification")
+
+ if [ "${httpcode}" = "204" ]; then
+ info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# messagebird sender
+
+send_messagebird() {
+ local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
+ if [ "${SEND_MESSAGEBIRD}" = "YES" ] && [ -n "${accesskey}" ] && [ -n "${messagebirdnumber}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://developers.messagebird.com/docs/messaging
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ --data-urlencode "originator=${messagebirdnumber}" \
+ --data-urlencode "recipients=${user}" \
+ --data-urlencode "body=${title} ${message}" \
+ --data-urlencode "datacoding=auto" \
+ -H "Authorization: AccessKey ${accesskey}" \
+ "https://rest.messagebird.com/messages")
+
+ if [ "${httpcode}" = "201" ]; then
+ info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# kavenegar sender
+
+send_kavenegar() {
+ local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
+ if [ "${SEND_KAVENEGAR}" = "YES" ] && [ -n "${API_KEY}" ] && [ -n "${kavenegarsender}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \
+ --data-urlencode "sender=${kavenegarsender}" \
+ --data-urlencode "receptor=${user}" \
+ --data-urlencode "message=${title} ${message}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# telegram sender
+
+send_telegram() {
+ local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification=""
+
+ if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi
+
+ case "${status}" in
+ WARNING) emoji="⚠️" ;;
+ CRITICAL) emoji="🔴" ;;
+ CLEAR) emoji="✅" ;;
+ *) emoji="⚪️" ;;
+ esac
+
+ if [ "${SEND_TELEGRAM}" = "YES" ] && [ -n "${bottoken}" ] && [ -n "${chatids}" ] && [ -n "${message}" ]; then
+ for chatid in ${chatids}; do
+ # https://core.telegram.org/bots/api#sendmessage
+ httpcode=$(docurl ${disableNotification} \
+ --data-urlencode "parse_mode=HTML" \
+ --data-urlencode "disable_web_page_preview=true" \
+ --data-urlencode "text=${emoji} ${message}" \
+ "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'"
+ sent=$((sent + 1))
+ elif [ "${httpcode}" = "401" ]; then
+ error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token."
+ else
+ error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Microsoft Team sender
+
+send_msteam() {
+
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_MSTEAM}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}" ;;
+ CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}" ;;
+ CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}" ;;
+ *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}" ;;
+ esac
+
+ for channel in ${channels}; do
+ ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference
+ ## Online designer : https://acdesignerbeta.azurewebsites.net/
+ payload="$(
+ cat <<EOF
+ {
+ "@context": "http://schema.org/extensions",
+ "@type": "MessageCard",
+ "themeColor": "${color}",
+ "title": "$icon Alert ${status} from netdata for ${host}",
+ "text": "${host} ${status_message}, ${chart} (_${family}_), *${alarm}*",
+ "potentialAction": [
+ {
+ "@type": "OpenUri",
+ "name": "Netdata",
+ "targets": [
+ { "os": "default", "uri": "${goto_url}" }
+ ]
+ }
+ ]
+ }
+EOF
+ )"
+
+ # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file.
+ webhook="${webhook//CHANNEL/${channel}}"
+
+ httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# slack sender
+
+send_slack() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_SLACK}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ # Default entry in the recipient is without a hash in front (backwards-compatible). Accept specification of channel or user.
+ if [ "${channel::1}" != "#" ] && [ "${channel::1}" != "@" ]; then channel="#$channel"; fi
+
+ # If channel is equal to "#" then do not send the channel attribute at all. Slack also defines channels and users in webhooks.
+ if [ "${channel}" = "#" ]; then
+ ch=""
+ chstr="without specifying a channel"
+ else
+ ch="\"channel\": \"${channel}\","
+ chstr="to '${channel}'"
+ fi
+
+ payload="$(
+ cat <<EOF
+ {
+ $ch
+ "username": "netdata on ${host}",
+ "icon_url": "${images_base_url}/images/banner-icon-144x144.png",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "attachments": [
+ {
+ "fallback": "${alarm} - ${chart} (${family}) - ${info}",
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "short": true
+ },
+ {
+ "title": "${family}",
+ "short": true
+ }
+ ],
+ "thumb_url": "${image}",
+ "footer": "by ${host}",
+ "ts": ${when}
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}"
+ sent=$((sent + 1))
+ else
+ error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}, with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# rocketchat sender
+
+send_rocketchat() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ payload="$(
+ cat <<EOF
+ {
+ "channel": "#${channel}",
+ "alias": "netdata on ${host}",
+ "avatar": "${images_base_url}/images/banner-icon-144x144.png",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "attachments": [
+ {
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "short": true,
+ "value": "chart"
+ },
+ {
+ "title": "${family}",
+ "short": true,
+ "value": "family"
+ }
+ ],
+ "thumb_url": "${image}",
+ "ts": "${when}"
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# alerta sender
+
+send_alerta() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel severity resource event payload auth
+
+ [ "${SEND_ALERTA}" != "YES" ] && return 1
+
+ case "${status}" in
+ CRITICAL) severity="critical" ;;
+ WARNING) severity="warning" ;;
+ CLEAR) severity="cleared" ;;
+ *) severity="indeterminate" ;;
+ esac
+
+ if [[ ${chart} == httpcheck* ]]; then
+ resource=$chart
+ event=$name
+ else
+ resource="${host}:${family}"
+ event="${chart}.${name}"
+ fi
+
+ for channel in ${channels}; do
+ payload="$(
+ cat <<EOF
+ {
+ "resource": "${resource}",
+ "event": "${event}",
+ "environment": "${channel}",
+ "severity": "${severity}",
+ "service": ["Netdata"],
+ "group": "Performance",
+ "value": "${value_string}",
+ "text": "${info}",
+ "tags": ["alarm_id:${alarm_id}"],
+ "attributes": {
+ "roles": "${roles}",
+ "name": "${name}",
+ "chart": "${chart}",
+ "family": "${family}",
+ "source": "${src}",
+ "moreInfo": "<a href=\"${goto_url}\">View Netdata</a>"
+ },
+ "origin": "netdata/${host}",
+ "type": "netdataAlarm",
+ "rawData": "${BASH_ARGV[@]}"
+ }
+EOF
+ )"
+
+ if [ -n "${ALERTA_API_KEY}" ]; then
+ auth="Key ${ALERTA_API_KEY}"
+ fi
+
+ httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: $auth" --data "${payload}")
+
+ if [ "${httpcode}" = "200" ] || [ "${httpcode}" = "201" ]; then
+ info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ elif [ "${httpcode}" = "202" ]; then
+ info "suppressed alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ else
+ error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# flock sender
+
+send_flock() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_FLOCK}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{
+ \"sendAs\": {
+ \"name\" : \"netdata on ${host}\",
+ \"profileImage\" : \"${images_base_url}/images/banner-icon-144x144.png\"
+ },
+ \"text\": \"${host} *${status_message}*\",
+ \"timestamp\": \"${when}\",
+ \"attachments\": [
+ {
+ \"description\": \"${chart} (${family}) - ${info}\",
+ \"color\": \"${color}\",
+ \"title\": \"${alarm}\",
+ \"url\": \"${goto_url}\",
+ \"text\": \"${info}\",
+ \"views\": {
+ \"image\": {
+ \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 },
+ \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 },
+ \"filename\": \"${image}\"
+ }
+ }
+ }
+ ]
+ }")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# discord sender
+
+send_discord() {
+ local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username
+
+ [ "${SEND_DISCORD}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ username="netdata on ${host}"
+ [ ${#username} -gt 32 ] && username="${username:0:29}..."
+
+ payload="$(
+ cat <<EOF
+ {
+ "channel": "#${channel}",
+ "username": "${username}",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "icon_url": "${images_base_url}/images/banner-icon-144x144.png",
+ "attachments": [
+ {
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "value": "${family}"
+ }
+ ],
+ "thumb_url": "${image}",
+ "footer_icon": "${images_base_url}/images/banner-icon-144x144.png",
+ "footer": "${host}",
+ "ts": ${when}
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# fleep sender
+
+send_fleep() {
+ local httpcode sent=0 webhooks="${1}" data message
+ if [ "${SEND_FLEEP}" = "YES" ]; then
+ message="${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}"
+
+ for hook in ${webhooks}; do
+ data="{ "
+ data="${data} 'message': '${message}', "
+ data="${data} 'user': '${FLEEP_SENDER}' "
+ data="${data} }"
+
+ httpcode=$(docurl -X POST --data "${data}" "https://fleep.io/hook/${hook}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}' with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Prowl sender
+
+send_prowl() {
+ local httpcode sent=0 data message keys prio=0 alarm_url event
+ if [ "${SEND_PROWL}" = "YES" ]; then
+ message="$(urlencode "${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}")"
+ message="description=${message}"
+ keys="$(urlencode "$(echo "${1}" | tr ' ' ,)")"
+ keys="apikey=${keys}"
+ app="application=Netdata"
+
+ case "${status}" in
+ CRITICAL)
+ prio=2
+ ;;
+ WARNING)
+ prio=1
+ ;;
+ esac
+ prio="priority=${prio}"
+
+ alarm_url="$(urlencode ${goto_url})"
+ alarm_url="url=${alarm_url}"
+ event="$(urlencode "${host} ${status_message}")"
+ event="event=${event}"
+
+ data="${keys}&${prio}&${alarm_url}&${app}&${event}&${message}"
+
+ httpcode=$(docurl -X POST --data "${data}" "https://api.prowlapp.com/publicapi/add")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent prowl data for: ${host} ${chart}.${name} is ${status}"
+ sent=1
+ else
+ error "failed to send prowl data for: ${host} ${chart}.${name} is ${status} with with error code ${httpcode}."
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# irc sender
+
+send_irc() {
+ local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error
+
+ if [ "${SEND_IRC}" = "YES" ] && [ -n "${NICKNAME}" ] && [ -n "${REALNAME}" ] && [ -n "${CHANNELS}" ] && [ -n "${NETWORK}" ] && [ -n "${SERVERNAME}" ]; then
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ SNDMESSAGE="${MESSAGE//$'\n'/", "}"
+ for CHANNEL in ${CHANNELS}; do
+ error=0
+ send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\\nNICK ${NICKNAME}\\nJOIN ${CHANNEL}\\nPRIVMSG ${CHANNEL} :${SNDMESSAGE}\\nQUIT\\n" \ | nc "${NETWORK}" 6667)
+ reply_codes=$(echo "${send_alarm}" | cut -d ' ' -f 2 | grep -o '[0-9]*')
+ for code in ${reply_codes}; do
+ if [ "${code}" -ge 400 ] && [ "${code}" -le 599 ]; then
+ error=1
+ break
+ fi
+ done
+
+ if [ "${error}" -eq 0 ]; then
+ info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}."
+ fi
+ done
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Amazon SNS sender
+
+send_awssns() {
+ local targets="${1}" message='' sent=0 region=''
+ local default_format="${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ [ "${SEND_AWSSNS}" = "YES" ] || return 1
+
+ message=${AWSSNS_MESSAGE_FORMAT:-${default_format}}
+
+ for target in ${targets}; do
+ # Extract the region from the target ARN. We need to explicitly specify the region so that it matches up correctly.
+ region="$(echo ${target} | cut -f 4 -d ':')"
+ if ${aws} sns publish --region "${region}" --subject "${host} ${status_message} - ${name//_/ } - ${chart}" --message "${message}" --target-arn ${target} &>/dev/null; then
+ info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Matrix sender
+
+send_matrix() {
+ local homeserver="${1}" webhook accesstoken rooms="${2}" httpcode sent=0 payload
+
+ [ "${SEND_MATRIX}" != "YES" ] && return 1
+ [ -z "${MATRIX_ACCESSTOKEN}" ] && return 1
+
+ accesstoken="${MATRIX_ACCESSTOKEN}"
+
+ case "${status}" in
+ WARNING) emoji="⚠️" ;;
+ CRITICAL) emoji="🔴" ;;
+ CLEAR) emoji="✅" ;;
+ *) emoji="⚪️" ;;
+ esac
+
+ for room in ${rooms}; do
+ webhook="$homeserver/_matrix/client/r0/rooms/$(urlencode $room)/send/m.room.message?access_token=$accesstoken"
+ payload="$(
+ cat <<EOF
+ {
+ "msgtype": "m.notice",
+ "format": "org.matrix.custom.html",
+ "formatted_body": "${emoji} ${host} ${status_message} - <b>${name//_/ }</b><br>${chart} (${family})<br><a href=\"${goto_url}\">${alarm}</a><br><i>${info}</i>",
+ "body": "${emoji} ${host} ${status_message} - ${name//_/ } ${chart} (${family}) ${goto_url} ${alarm} ${info}"
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data "${payload}" "${webhook}")
+ if [ "${httpcode}" == "200" ]; then
+ info "sent Matrix notification for: ${host} ${chart}.${name} is ${status} to '${room}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Matrix notification for: ${host} ${chart}.${name} is ${status} to '${room}', with HTTP response status code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# syslog sender
+
+send_syslog() {
+ local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}"
+ local priority='' message='' server='' port='' prefix=''
+ local temp1='' temp2=''
+
+ [ "${SEND_SYSLOG}" = "YES" ] || return 1
+
+ if [ "${status}" = "CRITICAL" ]; then
+ level='crit'
+ elif [ "${status}" = "WARNING" ]; then
+ level='warning'
+ fi
+
+ for target in ${targets}; do
+ priority="${facility}.${level}"
+ message=''
+ server=''
+ port=''
+ prefix=''
+ temp1=''
+ temp2=''
+
+ prefix=$(echo ${target} | cut -d '/' -f 2)
+ temp1=$(echo ${target} | cut -d '/' -f 1)
+
+ if [ ${prefix} != ${temp1} ]; then
+ if (echo ${temp1} | grep -q '@'); then
+ temp2=$(echo ${temp1} | cut -d '@' -f 1)
+ server=$(echo ${temp1} | cut -d '@' -f 2)
+
+ if [ ${temp2} != ${server} ]; then
+ priority=${temp2}
+ fi
+
+ port=$(echo ${server} | rev | cut -d ':' -f 1 | rev)
+
+ if (echo ${server} | grep -E -q '\[.*\]'); then
+ if (echo ${port} | grep -q ']'); then
+ port=''
+ else
+ server=$(echo ${server} | rev | cut -d ':' -f 2- | rev)
+ fi
+ else
+ if [ ${port} = ${server} ]; then
+ port=''
+ else
+ server=$(echo ${server} | cut -d ':' -f 1)
+ fi
+ fi
+ else
+ priority=${temp1}
+ fi
+ fi
+
+ message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ if [ ${server} ]; then
+ logger_options="${logger_options} -n ${server}"
+ if [ ${port} ]; then
+ logger_options="${logger_options} -P ${port}"
+ fi
+ fi
+
+ ${logger} -p ${priority} ${logger_options} "${message}"
+ done
+
+ return $?
+}
+
+# -----------------------------------------------------------------------------
+# SMS sender
+
+send_sms() {
+ local recipients="${1}" errcode errmessage sent=0
+
+ # Human readable SMS
+ local msg="${host} ${status_message}: ${chart} (${family}), ${alarm}"
+
+ # limit it to 160 characters
+ msg="${msg:0:160}"
+
+ if [ "${SEND_SMS}" = "YES" ] && [ -n "${sendsms}" ] && [ -n "${recipients}" ] && [ -n "${msg}" ]; then
+ # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json
+ for phone in ${recipients}; do
+ errmessage=$($sendsms $phone "$msg" 2>&1)
+ errcode=$?
+ if [ ${errcode} -eq 0 ]; then
+ info "sent smstools3 SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send smstools3 SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with error code ${errcode}: ${errmessage}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# hangouts sender
+
+send_hangouts() {
+ local rooms="${1}" httpcode sent=0 room color payload webhook thread
+
+ [ "${SEND_HANGOUTS}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="#ffa700" ;;
+ CRITICAL) color="#d62d20" ;;
+ CLEAR) color="#008744" ;;
+ *) color="#777777" ;;
+ esac
+
+ for room in ${rooms}; do
+ if [ -z "${HANGOUTS_WEBHOOK_URI[$room]}" ] ; then
+ info "Can't send Hangouts notification for: ${host} ${chart}.${name} to room ${room}. HANGOUTS_WEBHOOK_URI[$room] not defined"
+ else
+ if [ -n "${HANGOUTS_WEBHOOK_THREAD[$room]}" ]; then
+ thread="\"name\" : \"${HANGOUTS_WEBHOOK_THREAD[$room]}\""
+ fi
+ webhook="${HANGOUTS_WEBHOOK_URI[$room]}"
+ payload="$(
+ cat <<EOF
+ {
+ "cards": [
+ {
+ "header": {
+ "title": "Netdata on ${host}",
+ "imageUrl": "${images_base_url}/images/banner-icon-144x144.png",
+ "imageStyle": "IMAGE"
+ },
+ "sections": [
+ {
+ "header": "<b>${host}</b>",
+ "widgets": [
+ {
+ "keyValue": {
+ "topLabel": "Status Message",
+ "content": "<b>${status_message}</b>",
+ "contentMultiline": "true",
+ "iconUrl": "${image}",
+ "onClick": {
+ "openLink": {
+ "url": "${goto_url}"
+ }
+ }
+ }
+ },
+ {
+ "keyValue": {
+ "topLabel": "${chart} | ${family}",
+ "content": "<font color=${color}>${alarm}</font>",
+ "contentMultiline": "true"
+ }
+ }
+ ]
+ },
+ {
+ "widgets": [
+ {
+ "textParagraph": {
+ "text": "<font color=\"#0057e7\">@ ${date}\n<b>${info}</b></font>"
+ }
+ }
+ ]
+ },
+ {
+ "widgets": [
+ {
+ "buttons": [
+ {
+ "textButton": {
+ "text": "Go to ${host}",
+ "onClick": {
+ "openLink": {
+ "url": "${goto_url}"
+ }
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "thread": {
+ $thread
+ }
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -H "Content-Type: application/json" -X POST -d "${payload}" "${webhook}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent hangouts notification for: ${host} ${chart}.${name} is ${status} to '${room}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send hangouts notification for: ${host} ${chart}.${name} is ${status} to '${room}', with HTTP response status code ${httpcode}."
+ fi
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Dynatrace sender
+
+send_dynatrace() {
+ [ "${SEND_DYNATRACE}" != "YES" ] && return 1
+
+ local dynatrace_url="${DYNATRACE_SERVER}/e/${DYNATRACE_SPACE}/api/v1/events"
+ local description="NetData Notification for: ${host} ${chart}.${name} is ${status}"
+ local payload=""
+
+ payload=$(cat <<EOF
+{
+ "title": "NetData Alarm from ${host}",
+ "source" : "${DYNATRACE_ANNOTATION_TYPE}",
+ "description" : "${description}",
+ "eventType": "${DYNATRACE_EVENT}",
+ "attachRules":{
+ "tagRule":[{
+ "meTypes":["HOST"],
+ "tags":["${DYNATRACE_TAG_VALUE}"]
+ }]
+ },
+ "customProperties":{
+ "description": "${description}"
+ }
+}
+EOF
+)
+
+ # echo ${payload}
+
+ httpcode=$(docurl -X POST -H "Authorization: Api-token ${DYNATRACE_TOKEN}" -H "Content-Type: application/json" -d "${payload}" ${dynatrace_url})
+ ret=$?
+
+
+ if [ ${ret} -eq 0 ]; then
+ if [ "${httpcode}" = "200" ]; then
+ info "sent ${DYNATRACE_EVENT} to ${DYNATRACE_SERVER}"
+ return 0
+ else
+ warning "Dynatrace ${DYNATRACE_SERVER} responded ${httpcode} notification for: ${host} ${chart}.${name} is ${status} was not sent!"
+ return 1
+ fi
+ else
+ error "failed to sent ${DYNATRACE_EVENT} notification for: ${host} ${chart}.${name} is ${status} to ${DYNATRACE_SERVER} with error code ${ret}."
+ return 1
+ fi
+}
+
+
+# -----------------------------------------------------------------------------
+# Stackpulse sender
+
+send_stackpulse() {
+ local payload httpcode oldv currv
+ [ "${SEND_STACKPULSE}" != "YES" ] && return 1
+
+ # We are sending null when values are nan to avoid errors while JSON message is parsed
+ [ "${old_value}" != "nan" ] && oldv="${old_value}" || oldv="null"
+ [ "${value}" != "nan" ] && currv="${value}" || currv="null"
+
+ payload=$(cat <<EOF
+ {
+ "Node" : "${host}",
+ "Chart" : "${chart}",
+ "OldValue" : ${oldv},
+ "Value" : ${currv},
+ "Units" : "${units}",
+ "OldStatus" : "${old_status}",
+ "Status" : "${status}",
+ "Alarm" : "${name}",
+ "Date": ${when},
+ "Duration": ${duration},
+ "NonClearDuration": ${non_clear_duration},
+ "Description" : "${status_message}, ${info}",
+ "CalcExpression" : "${calc_expression}",
+ "CalcParamValues" : "${calc_param_values}",
+ "TotalWarnings" : "${total_warnings}",
+ "TotalCritical" : "${total_critical}",
+ "ID" : ${alarm_id}
+ }
+EOF
+)
+
+ httpcode=$(docurl -X POST -H "Content-Type: application/json" -d "${payload}" ${STACKPULSE_WEBHOOK})
+ if [ "${httpcode}" = "200" ]; then
+ info "sent stackpulse notification for: ${host} ${chart}.${name} is ${status}"
+ else
+ error "failed to send stackpulse notification for: ${host} ${chart}.${name} is ${status}, with HTTP response status code ${httpcode}."
+ return 1
+ fi
+
+ return 0
+}
+# -----------------------------------------------------------------------------
+# Opsgenie sender
+
+send_opsgenie() {
+ local payload httpcode oldv currv
+ [ "${SEND_OPSGENIE}" != "YES" ] && return 1
+
+ if [ -z "${OPSGENIE_API_KEY}" ] ; then
+ info "Can't send Opsgenie notification, because OPSGENIE_API_KEY is not defined"
+ return 1
+ fi
+
+ # We are sending null when values are nan to avoid errors while JSON message is parsed
+ [ "${old_value}" != "nan" ] && oldv="${old_value}" || oldv="null"
+ [ "${value}" != "nan" ] && currv="${value}" || currv="null"
+
+ payload=$(cat <<EOF
+ {
+ "host" : "${host}",
+ "unique_id" : "${unique_id}",
+ "alarmId" : ${alarm_id},
+ "eventId" : ${event_id},
+ "chart" : "${chart}",
+ "when": ${when},
+ "name" : "${name}",
+ "family" : "${family}",
+ "status" : "${status}",
+ "old_status" : "${old_status}",
+ "value" : ${currv},
+ "old_value" : ${oldv},
+ "duration": ${duration},
+ "non_clear_duration": ${non_clear_duration},
+ "units" : "${units}",
+ "info" : "${status_message}, ${info}",
+ "calc_expression" : "${calc_expression}",
+ "total_warnings" : "${total_warnings}",
+ "total_critical" : "${total_critical}",
+ "src" : "${src}"
+ }
+EOF
+)
+
+ httpcode=$(docurl -X POST -H "Content-Type: application/json" -d "${payload}" "${OPSGENIE_API_URL}/v1/json/integrations/webhooks/netdata?apiKey=${OPSGENIE_API_KEY}")
+ # https://docs.opsgenie.com/docs/alert-api#create-alert
+ if [ "${httpcode}" = "200" ]; then
+ info "sent opsgenie notification for: ${host} ${chart}.${name} is ${status}"
+ else
+ error "failed to send opsgenie notification for: ${host} ${chart}.${name} is ${status}, with HTTP error code ${httpcode}."
+ return 1
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+# prepare the content of the notification
+
+# the url to send the user on click
+urlencode "${args_host}" >/dev/null
+url_host="${REPLY}"
+urlencode "${chart}" >/dev/null
+url_chart="${REPLY}"
+urlencode "${family}" >/dev/null
+url_family="${REPLY}"
+urlencode "${name}" >/dev/null
+url_name="${REPLY}"
+
+redirect_params="host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}&alarm_when=${when}"
+GOTOCLOUD=0
+
+if [ "${NETDATA_REGISTRY_URL}" == "https://registry.my-netdata.io" ]; then
+ if [ -z "${NETDATA_REGISTRY_UNIQUE_ID}" ]; then
+ if [ -f "@registrydir_POST@/netdata.public.unique.id" ]; then
+ NETDATA_REGISTRY_UNIQUE_ID="$(cat "@registrydir_POST@/netdata.public.unique.id")"
+ fi
+ fi
+ if [ -n "${NETDATA_REGISTRY_UNIQUE_ID}" ]; then
+ GOTOCLOUD=1
+ fi
+fi
+
+if [ ${GOTOCLOUD} -eq 0 ]; then
+ goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?${redirect_params}"
+else
+ # Temporarily disable alarm redirection, as the cloud endpoint no longer exists. This functionality will be restored after discussion on #9487. For now, just lead to netdata.cloud
+ #goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}/alarms/redirect?agentID=${NETDATA_REGISTRY_UNIQUE_ID}&${redirect_params}"
+ goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}"
+fi
+
+# the severity of the alarm
+severity="${status}"
+
+# the time the alarm was raised
+duration4human ${duration} >/dev/null
+duration_txt="${REPLY}"
+duration4human ${non_clear_duration} >/dev/null
+non_clear_duration_txt="${REPLY}"
+raised_for="(was ${old_status,,} for ${duration_txt})"
+
+# the key status message
+status_message="status unknown"
+
+# the color of the alarm
+color="grey"
+
+# the alarm value
+alarm="${name//_/ } = ${value_string}"
+
+# the image of the alarm
+image="${images_base_url}/images/banner-icon-144x144.png"
+
+# prepare the title based on status
+case "${status}" in
+CRITICAL)
+ image="${images_base_url}/images/alert-128-red.png"
+ status_message="is critical"
+ color="#ca414b"
+ ;;
+
+WARNING)
+ image="${images_base_url}/images/alert-128-orange.png"
+ status_message="needs attention"
+ color="#ffc107"
+ ;;
+
+CLEAR)
+ image="${images_base_url}/images/check-mark-2-128-green.png"
+ status_message="recovered"
+ color="#77ca6d"
+ ;;
+esac
+
+if [ "${status}" = "CLEAR" ]; then
+ severity="Recovered from ${old_status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm was raised for ${non_clear_duration_txt})"
+ fi
+
+ # don't show the value when the status is CLEAR
+ # for certain alarms, this value might not have any meaning
+ alarm="${name//_/ } ${raised_for}"
+
+elif { [ "${old_status}" = "WARNING" ] && [ "${status}" = "CRITICAL" ]; }; then
+ severity="Escalated to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+elif { [ "${old_status}" = "CRITICAL" ] && [ "${status}" = "WARNING" ]; }; then
+ severity="Demoted to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+else
+ raised_for=
+fi
+
+# prepare HTML versions of elements
+info_html=
+[ -n "${info}" ] && info_html=" <small><br/>${info}</small>"
+
+raised_for_html=
+[ -n "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>"
+
+# -----------------------------------------------------------------------------
+# send the slack notification
+
+# slack aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}"
+SENT_SLACK=$?
+
+# -----------------------------------------------------------------------------
+# send the hangouts notification
+
+# hangouts aggregates posts from the same room
+# so we use "${host} ${status}" as the room, to make them diff
+
+send_hangouts "${to_hangouts}"
+SENT_HANGOUTS=$?
+
+# -----------------------------------------------------------------------------
+# send the Microsoft notification
+
+# Microsoft team aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}"
+SENT_MSTEAM=$?
+
+# -----------------------------------------------------------------------------
+# send the rocketchat notification
+
+# rocketchat aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}"
+SENT_ROCKETCHAT=$?
+
+# -----------------------------------------------------------------------------
+# send the alerta notification
+
+# alerta aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}"
+SENT_ALERTA=$?
+
+# -----------------------------------------------------------------------------
+# send the flock notification
+
+# flock aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}"
+SENT_FLOCK=$?
+
+# -----------------------------------------------------------------------------
+# send the discord notification
+
+# discord aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}"
+SENT_DISCORD=$?
+
+# -----------------------------------------------------------------------------
+# send the pushover notification
+
+send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" "
+<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>&nbsp;
+<small><b>${chart}</b><br/>Chart<br/>&nbsp;</small>
+<small><b>${family}</b><br/>Family<br/>&nbsp;</small>
+<small><b>${severity}</b><br/>Severity<br/>&nbsp;</small>
+<small><b>${date}${raised_for_html}</b><br/>Time<br/>&nbsp;</small>
+<a href=\"${goto_url}\">View Netdata</a><br/>&nbsp;
+<small><small>The source of this alarm is line ${src}</small></small>
+"
+
+SENT_PUSHOVER=$?
+
+# -----------------------------------------------------------------------------
+# send the pushbullet notification
+
+send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\\n
+Severity: ${severity}\\n
+Chart: ${chart}\\n
+Family: ${family}\\n
+${date}\\n
+The source of this alarm is line ${src}"
+
+SENT_PUSHBULLET=$?
+
+# -----------------------------------------------------------------------------
+# send the twilio SMS
+
+send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_TWILIO=$?
+
+# -----------------------------------------------------------------------------
+# send the messagebird SMS
+
+send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_MESSAGEBIRD=$?
+
+# -----------------------------------------------------------------------------
+# send the kavenegar SMS
+
+send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_KAVENEGAR=$?
+
+# -----------------------------------------------------------------------------
+# send the telegram.org message
+
+# https://core.telegram.org/bots/api#formatting-options
+send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b>
+${chart} (${family})
+<a href=\"${goto_url}\">${alarm}</a>
+<i>${info}</i>"
+
+SENT_TELEGRAM=$?
+
+# -----------------------------------------------------------------------------
+# send the kafka message
+
+send_kafka
+SENT_KAFKA=$?
+
+# -----------------------------------------------------------------------------
+# send the pagerduty.com message
+
+send_pd "${to_pd}"
+SENT_PD=$?
+
+# -----------------------------------------------------------------------------
+# send the fleep message
+
+send_fleep "${to_fleep}"
+SENT_FLEEP=$?
+
+# -----------------------------------------------------------------------------
+# send the Prowl message
+
+send_prowl "${to_prowl}"
+SENT_PROWL=$?
+
+# -----------------------------------------------------------------------------
+# send the irc message
+
+send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_IRC=$?
+
+# -----------------------------------------------------------------------------
+# send the SMS message with smstools3
+
+send_sms "${to_sms}"
+
+SENT_SMS=$?
+
+# -----------------------------------------------------------------------------
+# send the custom message
+
+send_custom() {
+ # is it enabled?
+ [ "${SEND_CUSTOM}" != "YES" ] && return 1
+
+ # do we have any sender?
+ [ -z "${1}" ] && return 1
+
+ # call the custom_sender function
+ custom_sender "${@}"
+}
+
+send_custom "${to_custom}"
+SENT_CUSTOM=$?
+
+# -----------------------------------------------------------------------------
+# send hipchat message
+
+send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \
+${host} ${status_message}<br/> \
+<b>${alarm}</b> ${info_html}<br/> \
+<b>${chart}</b> (family <b>${family}</b>)<br/> \
+<b>${date}${raised_for_html}</b><br/> \
+<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \
+(source of alarm ${src}) \
+"
+
+SENT_HIPCHAT=$?
+
+# -----------------------------------------------------------------------------
+# send the Amazon SNS message
+
+send_awssns "${to_awssns}"
+
+SENT_AWSSNS=$?
+
+# -----------------------------------------------------------------------------
+# send the Matrix message
+send_matrix "${MATRIX_HOMESERVER}" "${to_matrix}"
+
+SENT_MATRIX=$?
+
+
+# -----------------------------------------------------------------------------
+# send the syslog message
+
+send_syslog "${to_syslog}"
+
+SENT_SYSLOG=$?
+
+# -----------------------------------------------------------------------------
+# send the email
+
+IFS='' read -r -d '' email_plaintext_part <<EOF
+Content-Type: text/plain; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+${host} ${status_message}
+
+${alarm} ${info}
+${raised_for}
+
+Chart : ${chart}
+Family : ${family}
+Severity: ${severity}
+URL : ${goto_url}
+Source : ${src}
+Date : ${date}
+Notification generated on ${host}
+
+Evaluated Expression : ${calc_expression}
+Expression Variables : ${calc_param_values}
+
+The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised.
+EOF
+
+if [[ "${EMAIL_PLAINTEXT_ONLY}" == "YES" ]]; then
+
+send_email <<EOF
+To: ${to_email}
+Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+${email_thread_headers}
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+${email_plaintext_part}
+--multipart-boundary--
+EOF
+
+else
+
+IFS='' read -r -d '' email_html_part <<EOF
+Content-Type: text/html; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
+<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
+<table>
+ <tbody>
+ <tr>
+ <td style="vertical-align: top;" valign="top"></td>
+ <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top">
+ <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;">
+ <tbody>
+ <tr>
+ <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;">
+ <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
+ </td>
+ </tr>
+ <tr>
+ <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top">
+ <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
+ </td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;" valign="top">
+ <div style="margin: 0; padding: 20px; max-width: 700px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px">
+ <tbody>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
+ <span>${chart}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span><b>${alarm}</b>${info_html}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span>
+ </td>
+ </tr>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${family}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${severity}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
+ <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${calc_expression}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Evaluated Expression</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${calc_param_values}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Expression Variables</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised.
+ </td>
+ </tr>
+
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
+ <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a>
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs)
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
+ <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${host}</code>.
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+</table>
+</body>
+</html>
+EOF
+
+send_email <<EOF
+To: ${to_email}
+Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+${email_thread_headers}
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+${email_plaintext_part}
+--multipart-boundary
+${email_html_part}
+--multipart-boundary--
+EOF
+
+fi
+
+SENT_EMAIL=$?
+
+# -----------------------------------------------------------------------------
+# send the EVENT to Dynatrace
+send_dynatrace "${host}" "${chart}" "${name}" "${status}"
+SENT_DYNATRACE=$?
+
+# -----------------------------------------------------------------------------
+# send the EVENT to Stackpulse
+send_stackpulse
+SENT_STACKPULSE=$?
+
+# -----------------------------------------------------------------------------
+# send messages to Opsgenie
+send_opsgenie
+SENT_OPSGENIE=$?
+
+# -----------------------------------------------------------------------------
+# let netdata know
+for state in "${SENT_EMAIL}" \
+ "${SENT_PUSHOVER}" \
+ "${SENT_TELEGRAM}" \
+ "${SENT_SLACK}" \
+ "${SENT_HANGOUTS}" \
+ "${SENT_ROCKETCHAT}" \
+ "${SENT_ALERTA}" \
+ "${SENT_FLOCK}" \
+ "${SENT_DISCORD}" \
+ "${SENT_TWILIO}" \
+ "${SENT_HIPCHAT}" \
+ "${SENT_MESSAGEBIRD}" \
+ "${SENT_KAVENEGAR}" \
+ "${SENT_PUSHBULLET}" \
+ "${SENT_KAFKA}" \
+ "${SENT_PD}" \
+ "${SENT_FLEEP}" \
+ "${SENT_PROWL}" \
+ "${SENT_CUSTOM}" \
+ "${SENT_IRC}" \
+ "${SENT_AWSSNS}" \
+ "${SENT_MATRIX}" \
+ "${SENT_SYSLOG}" \
+ "${SENT_SMS}" \
+ "${SENT_MSTEAM}" \
+ "${SENT_DYNATRACE}" \
+ "${SENT_STACKPULSE}" \
+ "${SENT_OPSGENIE}"; do
+ if [ "${state}" -eq 0 ]; then
+ # we sent something
+ exit 0
+ fi
+done
+# we did not send anything
+exit 1
diff --git a/health/notifications/alarm-test.sh b/health/notifications/alarm-test.sh
new file mode 100755
index 0000000..828aa75
--- /dev/null
+++ b/health/notifications/alarm-test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to test alarm notifications for netdata
+
+dir="$(dirname "${0}")"
+"${dir}/alarm-notify.sh" test "${1}"
+exit $?
diff --git a/health/notifications/alerta/Makefile.inc b/health/notifications/alerta/Makefile.inc
new file mode 100644
index 0000000..10f26b0
--- /dev/null
+++ b/health/notifications/alerta/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ alerta/README.md \
+ alerta/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/alerta/README.md b/health/notifications/alerta/README.md
new file mode 100644
index 0000000..8f1679a
--- /dev/null
+++ b/health/notifications/alerta/README.md
@@ -0,0 +1,94 @@
+<!--
+title: "alerta.io"
+description: "Send alarm notifications to Alerta to see the latest health status updates from multiple nodes in a single interface."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/alerta/README.md
+-->
+
+# alerta.io
+
+The [Alerta](https://alerta.io) monitoring system is a tool used to
+consolidate and de-duplicate alerts from multiple sources for quick
+‘at-a-glance’ visualisation. With just one system you can monitor
+alerts from many other monitoring tools on a single screen.
+
+![](https://docs.alerta.io/en/latest/_images/alerta-screen-shot-3.png)
+
+Netadata alarms can be sent to Alerta so you can see in one place
+alerts coming from many Netdata hosts or also from a multi-host
+Netadata configuration. The big advantage over other notifications
+systems is that there is a main view of all active alarms with
+the most recent state, and it is also possible to view alarm history.
+
+## Deploying Alerta
+
+It is recommended to set up the server in a separated server, VM or
+container. If you have other Nginx or Apache server in your organization,
+it is recommended to proxy to this new server.
+
+The easiest way to install Alerta is to use the Docker image available
+on [Docker hub][1]. Alternatively, follow the ["getting started"][2]
+tutorial to deploy Alerta to an Ubuntu server. More advanced
+configurations are out os scope of this tutorial but information
+about different deployment scenarios can be found in the [docs][3].
+
+[1]: https://hub.docker.com/r/alerta/alerta-web/
+
+[2]: http://alerta.readthedocs.io/en/latest/gettingstarted/tutorial-1-deploy-alerta.html
+
+[3]: http://docs.alerta.io/en/latest/deployment.html
+
+## Send alarms to Alerta
+
+Step 1. Create an API key (if authentication is enabled)
+
+You will need an API key to send messages from any source, if
+Alerta is configured to use authentication (recommended). To
+create an API key go to "Configuration -> API Keys" and create
+a new API key called "netdata" with `write:alerts` permission.
+
+Step 2. configure Netdata to send alarms to Alerta
+
+On your system run:
+
+```sh
+/etc/netdata/edit-config health_alarm_notify.conf
+```
+
+and modify the file as below:
+
+```
+# enable/disable sending alerta notifications
+SEND_ALERTA="YES"
+
+# here set your alerta server API url
+# this is the API url you defined when installed Alerta server,
+# it is the same for all users. Do not include last slash.
+ALERTA_WEBHOOK_URL="http://yourserver/alerta/api"
+
+# Login with an administrative user to you Alerta server and create an API KEY
+# with write permissions.
+ALERTA_API_KEY="INSERT_YOUR_API_KEY_HERE"
+
+# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS
+# standard environments are Production and Development
+# if a role's recipients are not configured, a notification will be send to
+# this Environment (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_ALERTA="Production"
+```
+
+## Test alarms
+
+We can test alarms using the standard approach:
+
+```sh
+/opt/netdata/netdata-plugins/plugins.d/alarm-notify.sh test
+```
+
+Note: Netdata will send 3 alarms, and because last alarm is "CLEAR"
+you will not see them in main Alerta page, you need to select to see
+"closed" alarm in top-right lookup. A little change in `alarm-notify.sh`
+that let us test each state one by one will be useful.
+
+For more information see <https://docs.alerta.io>
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Falerta%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/awssns/Makefile.inc b/health/notifications/awssns/Makefile.inc
new file mode 100644
index 0000000..ee86f4b
--- /dev/null
+++ b/health/notifications/awssns/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ awssns/README.md \
+ awssns/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/awssns/README.md b/health/notifications/awssns/README.md
new file mode 100644
index 0000000..c682513
--- /dev/null
+++ b/health/notifications/awssns/README.md
@@ -0,0 +1,43 @@
+<!--
+title: "Amazon SNS"
+description: "hello"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/awssns/README.md
+-->
+
+# Amazon SNS
+
+As part of it's AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' or SNS. Amazon SNS works kind of similarly to Netdata's own notification system, allowing dispatch of a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:
+
+- Email addresses.
+- Mobile Phones via SMS.
+- HTTP or HTTPS web hooks.
+- AWS Lambda functions.
+- AWS SQS queues.
+- Mobile applications via push notifications.
+
+To get this working, you will need:
+
+- The Amazon Web Services CLI tools. Most distributions provide these with the package name `awscli`.
+- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. Setup of this is distribution specific. `/var/lib/netdata` is the recommended directory (because the permissions will already be correct) if you are using a dedicated user (which is how most distributions work).
+- An Amazon SNS topic to send notifications to with one or more subscribers. The [Getting
+ Started](https://docs.aws.amazon.com/sns/latest/dg/sns-getting-started.html) section of the Amazon SNS documentation
+ covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.
+- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. If you're really paranoid, you can create one for each system or group of systems.
+
+Once you have all the above, run the following command as the user Netdata runs under:
+
+```
+aws configure
+```
+
+THis will prompt you for the access key and secret key for accessing Amazon SNS (as well as the default region and output format, but you can leave those blank because we don't use them).
+
+Once that's done, you're ready to go and can specify the desired topic ARN as a recipient.
+
+Notes:
+
+- Netdata's native email notification support is far better in almost all respects than it's support through Amazon SNS. If you want email notifications, use the native support, not SNS.
+ - If you need to change the notification format for SNS notifications, you can do so by specifying the format in `AWSSNS_MESSAGE_FORMAT` in the configuration. This variable supports all the same variables you can use in custom notifications.
+ - While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fawssns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/custom/Makefile.inc b/health/notifications/custom/Makefile.inc
new file mode 100644
index 0000000..c64ebda
--- /dev/null
+++ b/health/notifications/custom/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ custom/README.md \
+ custom/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/custom/README.md b/health/notifications/custom/README.md
new file mode 100644
index 0000000..04376d5
--- /dev/null
+++ b/health/notifications/custom/README.md
@@ -0,0 +1,88 @@
+<!--
+title: "Custom"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/custom/README.md
+-->
+
+# Custom
+
+Netdata allows you to send custom notifications to any endpoint you choose.
+
+To configure custom notifications, you will need to customize `health_alarm_notify.conf`. You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the `custom_sender()` function in `health_alarm_notify.conf`. Ensure you follow the instructions of changing any configuration file to [persist your configuration](/docs/configuration-guide.md#persist-my-configuration).
+
+As with other notifications, you will also need to define the recipient list in `DEFAULT_RECIPIENT_CUSTOM` and/or the `role_recipients_custom` array.
+
+The following is a sample `custom_sender` function in `health_alarm_notify.conf`, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:
+
+```
+ custom_sender() {
+ # example human readable SMS
+ local msg="${host} ${status_message}: ${alarm} ${raised_for}"
+
+ # limit it to 160 characters and encode it for use in a URL
+ urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"
+
+ # a space separated list of the recipients to send alarms to
+ to="${1}"
+
+ for phone in ${to}; do
+ httpcode=$(docurl -X POST \
+ --data-urlencode "From=XXX" \
+ --data-urlencode "To=${phone}" \
+ --data-urlencode "Body=${msg}" \
+ -u "${accountsid}:${accounttoken}" \
+ https://domain.website.com/)
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent custom notification ${msg} to ${phone}"
+ sent=$((sent + 1))
+ else
+ error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."
+ fi
+ done
+}
+```
+
+Variables available to the custom_sender:
+
+- `${to_custom}` the list of recipients for the alarm
+- `${host}` the host generated this event
+- `${url_host}` same as `${host}` but URL encoded
+- `${unique_id}` the unique id of this event
+- `${alarm_id}` the unique id of the alarm that generated this event
+- `${event_id}` the incremental id of the event, for this alarm id
+- `${when}` the timestamp this event occurred
+- `${name}` the name of the alarm, as given in Netdata health.d entries
+- `${url_name}` same as `${name}` but URL encoded
+- `${chart}` the name of the chart (type.id)
+- `${url_chart}` same as `${chart}` but URL encoded
+- `${family}` the family of the chart
+- `${url_family}` same as `${family}` but URL encoded
+- `${status}` the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+- `${old_status}` the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+- `${value}` the current value of the alarm
+- `${old_value}` the previous value of the alarm
+- `${src}` the line number and file the alarm has been configured
+- `${duration}` the duration in seconds of the previous alarm state
+- `${duration_txt}` same as `${duration}` for humans
+- `${non_clear_duration}` the total duration in seconds this is/was non-clear
+- `${non_clear_duration_txt}` same as `${non_clear_duration}` for humans
+- `${units}` the units of the value
+- `${info}` a short description of the alarm
+- `${value_string}` friendly value (with units)
+- `${old_value_string}` friendly old value (with units)
+- `${image}` the URL of an image to represent the status of the alarm
+- `${color}` a color in #AABBCC format for the alarm
+- `${goto_url}` the URL the user can click to see the Netdata dashboard
+- `${calc_expression}` the expression evaluated to provide the value for the alarm
+- `${calc_param_values}` the value of the variables in the evaluated expression
+- `${total_warnings}` the total number of alarms in WARNING state on the host
+- `${total_critical}` the total number of alarms in CRITICAL state on the host
+
+The following are more human friendly:
+
+- `${alarm}` like "name = value units"
+- `${status_message}` like "needs attention", "recovered", "is critical"
+- `${severity}` like "Escalated to CRITICAL", "Recovered from WARNING"
+- `${raised_for}` like "(alarm was raised for 10 minutes)"
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/discord/Makefile.inc b/health/notifications/discord/Makefile.inc
new file mode 100644
index 0000000..78de723
--- /dev/null
+++ b/health/notifications/discord/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ discord/README.md \
+ discord/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/discord/README.md b/health/notifications/discord/README.md
new file mode 100644
index 0000000..1650d9c
--- /dev/null
+++ b/health/notifications/discord/README.md
@@ -0,0 +1,50 @@
+<!--
+title: "Discordapp.com"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/discord/README.md
+-->
+
+# Discordapp.com
+
+This is what you will get:
+
+![image](https://cloud.githubusercontent.com/assets/7321975/22215935/b49ede7e-e162-11e6-98d0-ae8541e6b92e.png)
+
+You need:
+
+1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more Discord channels to post the messages to.
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+###############################################################################
+# sending discord notifications
+
+# note: multiple recipients can be given like this:
+# "CHANNEL1 CHANNEL2 ..."
+
+# enable/disable sending discord notifications
+SEND_DISCORD="YES"
+
+# Create a webhook by following the official documentation -
+# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
+DISCORD_WEBHOOK_URL="https://discordapp.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+
+# if a role's recipients are not configured, a notification will be send to
+# this discord channel (empty = do not send a notification for unconfigured
+# roles):
+DEFAULT_RECIPIENT_DISCORD="alarms"
+```
+
+You can define multiple channels like this: `alarms systems`.
+You can give different channels per **role** using these (at the same file):
+
+```
+role_recipients_discord[sysadmin]="systems"
+role_recipients_discord[dba]="databases systems"
+role_recipients_discord[webmaster]="marketing development"
+```
+
+The keywords `systems`, `databases`, `marketing`, `development` are discordapp.com channels (they should already exist within your discord server).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fdiscord%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/dynatrace/Makefile.inc b/health/notifications/dynatrace/Makefile.inc
new file mode 100644
index 0000000..a2ae623
--- /dev/null
+++ b/health/notifications/dynatrace/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ dynatrace/README.md \
+ dynatrace/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/dynatrace/README.md b/health/notifications/dynatrace/README.md
new file mode 100644
index 0000000..cc82ee7
--- /dev/null
+++ b/health/notifications/dynatrace/README.md
@@ -0,0 +1,36 @@
+<!--
+title: "Dynatrace"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/dynatrace/README.md
+-->
+
+# Dynatrace
+
+Dynatrace allows you to receive notifications using their Events REST API.
+
+See [the Dynatrace documentation](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/environment-api/events/post-event/) about POSTing an event in the Events API for more details.
+
+
+
+You need:
+
+1. Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts.
+The Dynatrace server should be with protocol prefixed (`http://` or `https://`). For example: `https://monitor.example.com`
+This is a required parameter.
+2. API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API.
+Generate a Dynatrace API authentication token. On your Dynatrace server, go to **Settings** --> **Integration** --> **Dynatrace API** --> **Generate token**.
+See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.
+This is a required parameter.
+3. API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL
+ for a generated API token might look like:
+ `https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all` In that
+ case, my space is _2a93fe0e-4cd5-469a-9d0d-1a064235cfce_ This is a required parameter.
+4. Generate a Server Tag. On your Dynatrace Server, go to **Settings** --> **Tags** --> **Manually applied tags** and create the Tag.
+The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.
+This is a required parameter.
+5. Specify the Dynatrace event. This can be one of `CUSTOM_INFO`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, and `CUSTOM_DEPLOYMENT`.
+The default value is `CUSTOM_INFO`.
+This is a required parameter.
+6. Specify the annotation type. This is the source of the Dynatrace event. Put whatever it fits you, for example,
+_Netdata Alarm_, which is also the default value.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fdynatrace%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/health/notifications/email/Makefile.inc b/health/notifications/email/Makefile.inc
new file mode 100644
index 0000000..95dc7cf
--- /dev/null
+++ b/health/notifications/email/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ email/README.md \
+ email/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/email/README.md b/health/notifications/email/README.md
new file mode 100644
index 0000000..827a9c0
--- /dev/null
+++ b/health/notifications/email/README.md
@@ -0,0 +1,63 @@
+<!--
+title: "Email"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/email/README.md
+-->
+
+# Email
+
+You need a working `sendmail` command for email alerts to work. Almost all MTAs provide a `sendmail` interface.
+
+Netdata sends all emails as user `netdata`, so make sure your `sendmail` works for local users.
+
+email notifications look like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/18407294/e9218c68-7714-11e6-8739-e4dd8a498252.png)
+
+## configuration
+
+To edit `health_alarm_notify.conf` on your system run `/etc/netdata/edit-config health_alarm_notify.conf`.
+
+You can configure recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/99d44b7d0c4e006b11318a28ba4a7e7d3f9b3bae/conf.d/health_alarm_notify.conf#L101).
+
+You can also configure per role recipients [in the same file, a few lines below](https://github.com/netdata/netdata/blob/99d44b7d0c4e006b11318a28ba4a7e7d3f9b3bae/conf.d/health_alarm_notify.conf#L313).
+
+Changes to this file do not require a Netdata restart.
+
+You can test your configuration by issuing the commands:
+
+```sh
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# send a test alarm
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test [ROLE]
+```
+
+Where `[ROLE]` is the role you want to test. The default (if you don't give a `[ROLE]`) is `sysadmin`.
+
+Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).
+You can always find the location of the alarm-notify.sh script in `netdata.conf`.
+
+## Simple SMTP transport configuration
+
+If you want an alternative to `sendmail` in order to have a simple MTA configuration for sending emails and auth to an existing SMTP server, you can do the following:
+
+- Install `msmtp`.
+- Modify the `sendmail` path in `health_alarm_notify.conf` to point to the location of `mstmp`:
+```
+# The full path to the sendmail command.
+# If empty, the system $PATH will be searched for it.
+# If not found, email notifications will be disabled (silently).
+sendmail="/usr/bin/msmtp"
+```
+- Login as netdata :
+```sh
+(sudo) su -s /bin/bash netdata
+```
+- Configure `~/.msmtprc` as shown [in the documentation](https://marlam.de/msmtp/documentation/).
+- Finally set the appropriate permissions on the `.msmtprc` file :
+```sh
+chmod 600 ~/.msmtprc
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Femail%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/flock/Makefile.inc b/health/notifications/flock/Makefile.inc
new file mode 100644
index 0000000..5bde161
--- /dev/null
+++ b/health/notifications/flock/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ flock/README.md \
+ flock/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/flock/README.md b/health/notifications/flock/README.md
new file mode 100644
index 0000000..b24ecdb
--- /dev/null
+++ b/health/notifications/flock/README.md
@@ -0,0 +1,37 @@
+<!--
+title: "Flock"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/flock/README.md
+-->
+
+# Flock
+
+This is what you will get:
+
+![Flock](https://i.imgur.com/ok9bRzw.png)
+
+You need:
+
+The **incoming webhook URL** as given by flock.com.
+You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+
+Get them here: <https://admin.flock.com/webhooks>
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+###############################################################################
+# sending flock notifications
+
+# enable/disable sending pushover notifications
+SEND_FLOCK="YES"
+
+# Login to flock.com and create an incoming webhook.
+# You need only one for all your Netdata servers.
+# Without it, Netdata cannot send flock notifications.
+FLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+
+# if a role recipient is not configured, no notification will be sent
+DEFAULT_RECIPIENT_FLOCK="alarms"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fflock%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/hangouts/Makefile.inc b/health/notifications/hangouts/Makefile.inc
new file mode 100644
index 0000000..6ff1dff
--- /dev/null
+++ b/health/notifications/hangouts/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ hangouts/README.md \
+ hangouts/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/hangouts/README.md b/health/notifications/hangouts/README.md
new file mode 100644
index 0000000..886abfc
--- /dev/null
+++ b/health/notifications/hangouts/README.md
@@ -0,0 +1,55 @@
+<!--
+title: "Send notifications to Google Hangouts"
+description: "Send alerts to Send notifications to Google Hangouts any time an anomaly or performance issue strikes a node in your infrastructure."
+sidebar_label: "Google Hangouts"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/hangouts/README.md
+-->
+
+# Send notifications to Google Hangouts
+
+[Google Hangouts](https://hangouts.google.com/) is a cross-platform messaging app developed by Google. You can configure
+Netdata to send alarm notifications to a Hangouts room in order to stay aware of possible health or performance issues
+on your nodes. Here's an example of the notification in action:
+
+![Netdata on Hangouts](https://user-images.githubusercontent.com/1153921/66427166-47de6900-e9c8-11e9-8322-b4b03f084dc1.png)
+
+To receive notifications in Google Hangouts, you need the following in your Hangouts setup:
+
+1. One or more rooms.
+2. An **incoming webhook** for each room.
+
+Follow [Google's documentation](https://developers.google.com/hangouts/chat/how-tos/webhooks) to create an incoming
+webhook for each room you want to send Netdata notifications to.
+
+Set the webhook URIs and room names in `health_alarm_notify.conf`. To edit it on your system, run
+`/etc/netdata/edit-config health_alarm_notify.conf`):
+
+## Threads (optional)
+
+Instead to receive alarms on different threads, Netdata allows you to concentrate them inside an unique thread when you
+set the variable `HANGOUTS_WEBHOOK_THREAD[NAME]`.
+
+```
+#------------------------------------------------------------------------------
+# hangouts (google hangouts chat) global notification options
+# enable/disable sending hangouts notifications
+SEND_HANGOUTS="YES"
+# On Hangouts, in the room you choose, create an incoming webhook,
+# copy the link and paste it below and also identify the room name.
+# Without it, netdata cannot send hangouts notifications to that room.
+# HANGOUTS_WEBHOOK_URI[ROOM_NAME]="URLforroom1"
+HANGOUTS_WEBHOOK_URI[systems]="https://chat.googleapis.com/v1/spaces/AAAAXXXXXXX/..."
+HANGOUTS_WEBHOOK_URI[development]="https://chat.googleapis.com/v1/spaces/AAAAYYYYY/..."
+# On Hangouts, copy a thread link and change the values for space and thread
+# HANGOUTS_WEBHOOK_THREAD[systems]="spaces/AAAAXXXXXXX/threads/XXXXXXXXXXX"
+# if a DEFAULT_RECIPIENT_HANGOUTS are not configured,
+# notifications wouldn't be send to hangouts rooms.
+# DEFAULT_RECIPIENT_HANGOUTS="systems development|critical"
+DEFAULT_RECIPIENT_HANGOUTS="sysadmin devops alarms|critical"
+```
+
+You can define multiple rooms like this: `sysadmin devops alarms|critical`.
+
+The keywords `sysadmin`, `devops`, and `alarms` are Hangouts rooms.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fhangouts%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf
new file mode 100755
index 0000000..be669e1
--- /dev/null
+++ b/health/notifications/health_alarm_notify.conf
@@ -0,0 +1,1251 @@
+# Configuration for alarm notifications
+#
+# This configuration is used by: alarm-notify.sh
+# changes take effect immediately (the next alarm will use them).
+#
+# alarm-notify.sh can send:
+# - e-mails (using the sendmail command),
+# - push notifications to your mobile phone (pushover.net),
+# - messages to your slack team (slack.com),
+# - messages to your alerta server (alerta.io),
+# - messages to your flock team (flock.com),
+# - messages to your discord guild (discordapp.com),
+# - messages to your telegram chat / group chat (telegram.org)
+# - sms messages to your cell phone or any sms enabled device (twilio.com)
+# - sms messages to your cell phone or any sms enabled device (messagebird.com)
+# - sms messages to your cell phone or any sms enabled device (smstools3)
+# - notifications to users on pagerduty.com
+# - push notifications to iOS devices (via prowlapp.com)
+# - notifications to Amazon SNS topics (aws.amazon.com)
+# - messages to your irc channel on your selected network
+# - messages to a local or remote syslog daemon
+# - message to Microsoft Team (through webhook)
+# - message to Rocket.Chat (through webhook)
+# - message to Google Hangouts Chat (through webhook)
+#
+# The 'to' line given at netdata alarms defines a *role*, so that many
+# people can be notified for each role.
+#
+# This file is a BASH script itself.
+#
+#
+#------------------------------------------------------------------------------
+# proxy configuration
+#
+# If you need to send curl based notifications (pushover, pushbullet, slack, alerta,
+# flock, discord, telegram) via a proxy, set these to your proxy address:
+#export http_proxy="http://10.0.0.1:3128/"
+#export https_proxy="http://10.0.0.1:3128/"
+
+
+#------------------------------------------------------------------------------
+# notifications images
+#
+# Images in notifications need to be downloaded from an Internet facing site.
+# To allow notification providers fetch the icons/images, by default we set
+# the URL of the global public netdata registry.
+# If you have an Internet facing netdata (or you have copied the images/ folder
+# of netdata to your web server), set its URL here, to fetch the notification
+# images from it.
+#images_base_url="http://my.public.netdata.server:19999"
+
+
+#------------------------------------------------------------------------------
+# date handling
+#
+# You can configure netdata alerts to send dates in any format you want.
+# This uses standard `date` command format strings. See `man date` for
+# more info on what you can put in here. Note that this has to start with a '+', otherwise it won't work.
+#
+# For ISO 8601 dates, use '+%FT%T%z'
+# For RFC 5322 dates, use '+%a, %d %b %Y %H:%M:%S %z'
+# For RFC 3339 dates, use '+%F %T%:z'
+# For RFC 1123 dates, use '+%a, %d %b %Y %H:%M:%S %Z'
+# For RFC 1036 dates, use '+%A, %d-%b-%y %H:%M:%S %Z'
+# For a reasonably local date and time (in that order), use '+%x %X'
+# For the old default behavior (compatible with ANSI C's asctime() function), leave this empty.
+date_format=''
+
+
+#------------------------------------------------------------------------------
+# hostname handling
+#
+# By default, Netdata will use the simple hostname for the system (the
+# hostname with everything after the first `.` removed) when displaying
+# the hostname in alert notifications. If you prefer, you can uncomment
+# the line below to have Netdata instead use the host's fully qualified
+# domain name.
+#
+# This does not report correct FQDN's for child systems for which this
+# system is a parent.
+#
+# Additionally, if the system host name is overridden in /etc/netdata.conf
+# with the `hostname` option, that name will be used unconditionally
+# instead of this.
+#use_fqdn='YES'
+
+
+#------------------------------------------------------------------------------
+# external commands
+
+# The full path to the sendmail command.
+# If empty, the system $PATH will be searched for it.
+# If not found, email notifications will be disabled (silently).
+sendmail=""
+
+# The full path of the curl command.
+# If empty, the system $PATH will be searched for it.
+# If not found, most notifications will be silently disabled.
+curl=""
+
+# The full path of the nc command.
+# If empty, the system $PATH will be searched for it.
+# If not found, irc notifications will be silently disabled.
+nc=""
+
+# The full path of the logger command.
+# If empty, the system $PATH will be searched for it.
+# If not found, syslog notifications will be silently disabled.
+logger=""
+
+# The full path of the aws command.
+# If empty, the system $PATH will be searched for it.
+# If not found, Amazon SNS notifications will be silently disabled.
+aws=""
+
+# The full path of the sendsms command (smstools3).
+# If empty, the system $PATH will be searched for it.
+# If not found, SMS notifications will be silently disabled.
+sendsms=""
+
+#------------------------------------------------------------------------------
+# extra options for external commands
+#
+# In some cases, you may need to change what options get passed to an
+# external command. Such cases are covered here.
+
+# Extra options to pass to curl. In most cases, you shouldn't need to add anything
+# to this. If you're having issues with HTTPS connections, you might try adding
+# '--insecure' here, but be warned that it will make it much easier for
+# third-parties to block notification delivery, and may allow disclosure
+# of potentially sensitive information.
+#curl_options="--insecure"
+
+# Extra options to pass to logger. You shouldn't have to specify anything
+# here in most cases.
+#logger_options=""
+
+#------------------------------------------------------------------------------
+# extra options
+
+# By default don't do anything if this is CLEAR, but it was not WARNING or CRITICAL.
+# You can send it always if your system makes deduplication for alarms.
+#clear_alarm_always='YES'
+
+#
+#------------------------------------------------------------------------------
+# NOTE ABOUT RECIPIENTS
+#
+# When you define recipients (all types):
+#
+# - emails addresses
+# - pushover user tokens
+# - telegram chat ids
+# - slack channels
+# - alerta environment
+# - flock rooms
+# - discord channels
+# - hipchat rooms
+# - sms phone numbers
+# - pagerduty.com (pd) services
+# - irc channels
+#
+# You can append |critical to limit the notifications to be sent.
+#
+# In these examples, the first recipient receives all the alarms
+# while the second one receives only notifications for alarms that
+# have at some point become critical. The second user may still receive
+# warning and clear notifications, but only for the event that previously
+# caused a critical alarm.
+#
+# email : "user1@example.com user2@example.com|critical"
+# pushover : "2987343...9437837 8756278...2362736|critical"
+# telegram : "111827421 112746832|critical"
+# slack : "alarms disasters|critical"
+# alerta : "alarms disasters|critical"
+# flock : "alarms disasters|critical"
+# discord : "alarms disasters|critical"
+# twilio : "+15555555555 +17777777777|critical"
+# messagebird: "+15555555555 +17777777777|critical"
+# kavenegar : "09155555555 09177777777|critical"
+# pd : "<pd_service_key_1> <pd_service_key_2>|critical"
+# irc : "<irc_channel_1> <irc_channel_2>|critical"
+# hangouts : "alarms disasters|critical"
+#
+# If a recipient is set to empty string, the default recipient of the given
+# notification method (email, pushover, telegram, slack, alerta, etc) will be used.
+# To disable a notification, use the recipient called: disabled
+# This works for all notification methods (including the default recipients).
+
+
+#------------------------------------------------------------------------------
+# email global notification options
+
+# multiple recipients can be given like this:
+# "admin1@example.com admin2@example.com ..."
+
+# the email address sending email notifications
+# the default is the system user netdata runs as (usually: netdata)
+# The following formats are supported:
+# EMAIL_SENDER="user@domain"
+# EMAIL_SENDER="User Name <user@domain>"
+# EMAIL_SENDER="'User Name' <user@domain>"
+# EMAIL_SENDER="\"User Name\" <user@domain>"
+EMAIL_SENDER=""
+
+# enable/disable sending emails
+SEND_EMAIL="YES"
+
+# if a role recipient is not configured, an email will be send to:
+DEFAULT_RECIPIENT_EMAIL="root"
+# to receive only critical alarms, set it to "root|critical"
+
+# Optionally specify the encoding to list in the Content-Type header.
+# This doesn't change what encoding the e-mail is sent with, just what
+# the headers say it was encoded as.
+# This shouldn't need to be changed as it will almost always be
+# autodetected from the environment.
+#EMAIL_CHARSET="UTF-8"
+
+# You can also have netdata add headers to the message that will
+# cause most e-mail clients to treat all notifications for a given
+# chart+alarm+host combination as a single thread. This can help
+# simplify tracking of alarms, as it provides an easy wway for scripts
+# to corelate messages and also will cause most clients to group all the
+# messages together. This is enabled by default, uncomment the line
+# below if you want to disable it.
+#EMAIL_THREADING="NO"
+
+# By default, netdata sends HTML and Plain Text emails, some clients
+# do not parse HTML emails such as command line clients.
+# To make emails readable in these clients, you can configure netdata
+# to not send HTML but Plain Text only emails.
+#EMAIL_PLAINTEXT_ONLY="YES"
+
+#------------------------------------------------------------------------------
+# Dynatrace global notification options
+#------------------------------------------------------------------------------
+# enable/disable sending Dynatrace notifications
+SEND_DYNATRACE="YES"
+
+# The Dynatrace server with protocol prefix (http:// or https://), example https://monitor.illumineit.com
+# Required
+DYNATRACE_SERVER=""
+
+# Generate a Dynatrace API authentication token
+# Read https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/
+# On Dynatrace server goto Settings --> Integration --> Dynatrace API --> Generate token
+# Required
+DYNATRACE_TOKEN=""
+
+# Beware: Space is taken from dynatrace URL from browser when you create the TOKEN
+# Required
+DYNATRACE_SPACE=""
+
+# Generate a Server Tag. On the Dynatrace Server go to Settings --> Tags --> Manually applied tags create the Tag
+# The NetData alarm will be sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag
+# you created.
+# Required
+DYNATRACE_TAG_VALUE=""
+
+# Change this to what you want
+DYNATRACE_ANNOTATION_TYPE="NetData Alarm"
+
+# This can be CUSTOM_INFO, CUSTOM_ANNOTATION, CUSTOM_CONFIGURATION, CUSTOM_DEPLOYMENT
+# Applying default value
+# Required
+DYNATRACE_EVENT="CUSTOM_INFO"
+
+
+DEFAULT_RECIPIENT_DYNATRACE=""
+
+#------------------------------------------------------------------------------
+# Stackpulse global notification options
+SEND_STACKPULSE="YES"
+
+# Webhook
+STACKPULSE_WEBHOOK=""
+
+DEFAULT_RECIPIENT_STACKPULSE=""
+
+#------------------------------------------------------------------------------
+# opsgenie global notification options
+SEND_OPSGENIE="YES"
+
+# Api key
+OPSGENIE_API_KEY=""
+OPSGENIE_API_URL=""
+
+DEFAULT_RECIPIENT_OPSGENIE=""
+
+#------------------------------------------------------------------------------
+# hangouts (google hangouts chat) global notification options
+
+# enable/disable sending hangouts notifications
+SEND_HANGOUTS="YES"
+
+# On Hangouts, in the room you choose, create an incoming webhook,
+# copy the link and paste it below and also give it a room name.
+# Without it, netdata cannot send hangouts notifications to that room.
+# You will then use the same room name in your recipients list. For each URI, you need
+# HANGOUTS_WEBHOOK_URI[room_name]="WEBHOOK_URI"
+# e.g. to define systems and development rooms/recipients:
+# HANGOUTS_WEBHOOK_URI[systems]="URLforroom1"
+# HANGOUTS_WEBHOOK_URI[development]="URLforroom2"
+
+# if a DEFAULT_RECIPIENT_HANGOUTS is not configured,
+# notifications won't be send to hangouts rooms. For the example above,
+# a valid recipients list is the following
+# DEFAULT_RECIPIENT_HANGOUTS="systems development|critical"
+DEFAULT_RECIPIENT_HANGOUTS=""
+
+#------------------------------------------------------------------------------
+# pushover (pushover.net) global notification options
+
+# multiple recipients can be given like this:
+# "USERTOKEN1 USERTOKEN2 ..."
+
+# enable/disable sending pushover notifications
+SEND_PUSHOVER="YES"
+
+# Login to pushover.net to get your pushover app token.
+# You need only one for all your netdata servers (or you can have one for
+# each of your netdata - your call).
+# Without an app token, netdata cannot send pushover notifications.
+PUSHOVER_APP_TOKEN=""
+
+# if a role's recipients are not configured, a notification will be send to
+# this pushover user token (empty = do not send a notification for unconfigured
+# roles):
+DEFAULT_RECIPIENT_PUSHOVER=""
+
+
+#------------------------------------------------------------------------------
+# pushbullet (pushbullet.com) push notification options
+
+# multiple recipients can be given like this:
+# "user1@email.com user2@mail.com"
+
+# enable/disable sending pushbullet notifications
+SEND_PUSHBULLET="YES"
+
+# Signup and Login to pushbullet.com
+# To get your Access Token, go to https://www.pushbullet.com/#settings/account
+# Create a new access token and paste it below.
+# Then just set the recipients' emails.
+# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does
+# not have a pushbullet account, the pushbullet service will send an email
+# to that address instead.
+
+# Without an access token, netdata cannot send pushbullet notifications.
+PUSHBULLET_ACCESS_TOKEN=""
+DEFAULT_RECIPIENT_PUSHBULLET=""
+
+# Device iden of the sending device. Optional.
+PUSHBULLET_SOURCE_DEVICE=""
+
+
+#------------------------------------------------------------------------------
+# Twilio (twilio.com) SMS options
+
+# multiple recipients can be given like this:
+# "+15555555555 +17777777777"
+
+# enable/disable sending twilio SMS
+SEND_TWILIO="YES"
+
+# Signup for free trial and select a SMS capable Twilio Number
+# To get your Account SID and Token, go to https://www.twilio.com/console
+# Place your sid, token and number below.
+# Then just set the recipients' phone numbers.
+# The trial account is only allowed to use the number specified when set up.
+
+# Without an account sid and token, netdata cannot send Twilio text messages.
+TWILIO_ACCOUNT_SID=""
+TWILIO_ACCOUNT_TOKEN=""
+TWILIO_NUMBER=""
+DEFAULT_RECIPIENT_TWILIO=""
+
+
+#------------------------------------------------------------------------------
+# Messagebird (messagebird.com) SMS options
+
+# multiple recipients can be given like this:
+# "+15555555555 +17777777777"
+
+# enable/disable sending messagebird SMS
+SEND_MESSAGEBIRD="YES"
+
+# to get an access key, create a free account at https://www.messagebird.com
+# verify and activate the account (no CC info needed)
+# login to your account and enter your phonenumber to get some free credits
+# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)'
+# click 'Add access key' and fill in data (you want a live key to send SMS)
+
+# Without an access key, netdata cannot send Messagebird text messages.
+MESSAGEBIRD_ACCESS_KEY=""
+MESSAGEBIRD_NUMBER=""
+DEFAULT_RECIPIENT_MESSAGEBIRD=""
+
+
+#------------------------------------------------------------------------------
+# Kavenegar (Kavenegar.com) SMS options
+
+# multiple recipients can be given like this:
+# "09155555555 09177777777"
+
+# enable/disable sending kavenegar SMS
+SEND_KAVENEGAR="YES"
+
+# to get an access key, after selecting and purchasing your desired service
+# at http://kavenegar.com/pricing.html
+# login to your account, go to your dashboard and my account are
+# https://panel.kavenegar.com/Client/setting/account from API Key
+# copy your api key. You can generate new API Key too.
+# You can find and select kevenegar sender number from this place.
+
+# Without an API key, netdata cannot send KAVENEGAR text messages.
+KAVENEGAR_API_KEY=""
+KAVENEGAR_SENDER=""
+DEFAULT_RECIPIENT_KAVENEGAR=""
+
+
+#------------------------------------------------------------------------------
+# telegram (telegram.org) global notification options
+
+# multiple recipients can be given like this:
+# "CHAT_ID_1 CHAT_ID_2 ..."
+
+# enable/disable sending telegram messages
+SEND_TELEGRAM="YES"
+
+# Contact the bot @BotFather to create a new bot and receive a bot token.
+# Without it, netdata cannot send telegram messages.
+TELEGRAM_BOT_TOKEN=""
+
+# To get your chat ID send the command /getid to telegram bot @myidbot
+# (https://t.me/myidbot). Each user also needs to open a conversation with the
+# bot that will be sending notifications.
+# If a role's recipients are not configured, a message will be sent to
+# this chat id (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_TELEGRAM=""
+
+
+#------------------------------------------------------------------------------
+# slack (slack.com) global notification options
+
+# multiple recipients can be given like this:
+# "RECIPIENT1 RECIPIENT2 ..."
+
+# enable/disable sending slack notifications
+SEND_SLACK="YES"
+
+# Login to your slack.com workspace and create an incoming webhook, using the "Incoming Webhooks" App: https://slack.com/apps/A0F7XDUAZ-incoming-webhooks
+# Do not use the instructions in https://api.slack.com/incoming-webhooks#enable_webhooks, as those webhooks work only for a single channel.
+# You need only one for all your netdata servers (or you can have one for each of your netdata).
+# Without the app and a webhook, netdata cannot send slack notifications.
+SLACK_WEBHOOK_URL=""
+
+# if a role's recipients are not configured, a notification will be send to:
+# - A slack channel (syntax: '#channel' or 'channel')
+# - A slack user (syntax: '@user')
+# - The channel or user defined in slack for the webhook (syntax: '#')
+# empty = do not send a notification for unconfigured roles
+DEFAULT_RECIPIENT_SLACK=""
+
+#------------------------------------------------------------------------------
+# Microsoft Team (office.com) global notification options
+# More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference
+# Online designer : https://acdesignerbeta.azurewebsites.net/
+# multiple recipients can be given like this:
+# "CHANNEL1 CHANNEL2 ..."
+
+# enable/disable sending team notifications
+SEND_MSTEAM="YES"
+
+# if a role's recipients are not configured, a notification will be send to
+# this slack channel (empty = do not send a notification for unconfigured
+# roles):
+# For team the channel name is encoded in the URI after ....IncomingWebhook/___/.....
+# This value will be replaced in the webhook value to publish to several channels in a same Team.
+# In order to get it working properly, you have to replace the value between [] ....IncomingWebhook/[___]/..... by "CHANNEL" string.
+DEFAULT_RECIPIENT_MSTEAM=""
+# Based on the way MS Teams is working, put the differents channels here like : "CHANNEL1 CHANNEL2 ..."
+# AT LEAST ONE CHANNEL IS MANDATORY
+MSTEAM_WEBHOOK_URL=""
+
+# Define the default color scheme for alert to MS Team - icon and color
+# Icons - go to https://emojipedia.org/bomb/
+MSTEAM_ICON_DEFAULT="♡"
+MSTEAM_ICON_CLEAR="💚"
+MSTEAM_ICON_WARNING="⚠️"
+MSTEAM_ICON_CRITICAL="🔥"
+
+# Colors
+MSTEAM_COLOR_DEFAULT="0076D7"
+MSTEAM_COLOR_CLEAR="65A677"
+MSTEAM_COLOR_WARNING="FFA500"
+MSTEAM_COLOR_CRITICAL="D93F3C"
+
+
+#------------------------------------------------------------------------------
+# rocketchat (rocket.chat) global notification options
+
+# multiple recipients can be given like this:
+# "CHANNEL1 CHANNEL2 ..."
+
+# enable/disable sending rocketchat notifications
+SEND_ROCKETCHAT="YES"
+
+# Login to rocket.chat and create an incoming webhook. You need only one for all
+# your netdata servers (or you can have one for each of your netdata).
+# Without it, netdata cannot send rocketchat notifications.
+ROCKETCHAT_WEBHOOK_URL=""
+
+# if a role's recipients are not configured, a notification will be send to
+# this rocketchat channel (empty = do not send a notification for unconfigured
+# roles):
+DEFAULT_RECIPIENT_ROCKETCHAT=""
+
+
+#------------------------------------------------------------------------------
+# alerta (alerta.io) global notification options
+
+# multiple recipients (Environments) can be given like this:
+# "Production Development ..."
+
+# enable/disable sending alerta notifications
+SEND_ALERTA="YES"
+
+# here set your alerta server API url
+# this is the API url you defined when installed Alerta server,
+# it is the same for all users. Do not include last slash.
+# ALERTA_WEBHOOK_URL="https://<server>/alerta/api"
+ALERTA_WEBHOOK_URL=""
+
+# Login with an administrative user to you Alerta server and create an API KEY
+# with write permissions.
+ALERTA_API_KEY=""
+
+# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS
+# standard environments are Production and Development
+# if a role's recipients are not configured, a notification will be send to
+# this Environment (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_ALERTA=""
+
+
+#------------------------------------------------------------------------------
+# flock (flock.com) global notification options
+
+# enable/disable sending flock notifications
+SEND_FLOCK="YES"
+
+# Login to flock.com and create an incoming webhook. You need only one for all
+# your netdata servers (or you can have one for each of your netdata).
+# Without it, netdata cannot send flock notifications.
+FLOCK_WEBHOOK_URL=""
+
+# if a role recipient is not configured, no notification will be sent
+DEFAULT_RECIPIENT_FLOCK=""
+
+
+#------------------------------------------------------------------------------
+# discord (discordapp.com) global notification options
+
+# multiple recipients can be given like this:
+# "CHANNEL1 CHANNEL2 ..."
+
+# enable/disable sending discord notifications
+SEND_DISCORD="YES"
+
+# Create a webhook by following the official documentation -
+# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
+DISCORD_WEBHOOK_URL=""
+
+# if a role's recipients are not configured, a notification will be send to
+# this discord channel (empty = do not send a notification for unconfigured
+# roles):
+DEFAULT_RECIPIENT_DISCORD=""
+
+
+#------------------------------------------------------------------------------
+# hipchat global notification options
+
+# multiple recipients can be given like this:
+# "ROOM1 ROOM2 ..."
+
+# enable/disable sending hipchat notifications
+SEND_HIPCHAT="YES"
+
+# define hipchat server
+HIPCHAT_SERVER="api.hipchat.com"
+
+# api.hipchat.com authorization token
+# Without this, netdata cannot send hipchat notifications.
+HIPCHAT_AUTH_TOKEN=""
+
+# if a role's recipients are not configured, a notification will be send to
+# this hipchat room (empty = do not send a notification for unconfigured
+# roles):
+DEFAULT_RECIPIENT_HIPCHAT=""
+
+
+#------------------------------------------------------------------------------
+# kafka notification options
+
+# enable/disable sending kafka notifications
+SEND_KAFKA="YES"
+
+# The URL to POST kafka alarm data to. It should be the full URL.
+KAFKA_URL=""
+
+# The IP to be used in the kafka message as the sender.
+KAFKA_SENDER_IP=""
+
+
+#------------------------------------------------------------------------------
+# pagerduty.com notification options
+#
+# pagerduty.com notifications require a "Generic API" (Events v1)
+# pagerduty service.
+# https://support.pagerduty.com/docs/services-and-integrations
+
+# multiple recipients can be given like this:
+# "<pd_service_key_1> <pd_service_key_2> ..."
+
+# enable/disable sending pagerduty notifications
+SEND_PD="YES"
+
+# if a role's recipients are not configured, a notification will be sent to
+# the "General API" pagerduty.com service that uses this service key.
+# (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_PD=""
+
+# Which PD API are we going to use? For version 2 or newer, it is necessary to do a request for Pagerduty
+# before to set the version(https://developer.pagerduty.com/docs/events-api-v2/overview/).
+USE_PD_VERSION="1"
+
+#------------------------------------------------------------------------------
+# fleep notification options
+#
+# To send fleep.io notifications, you will need a webhook for the
+# conversation you want to send to.
+
+# Fleep recipients are specified as the last part of the webhook URL.
+# So, for a webhook URL of: https://fleep.io/hook/IJONmBuuSlWlkb_ttqyXJg, the
+# recipient name would be: 'IJONmBuuSlWlkb_ttqyXJg'.
+
+# enable/disable sending fleep notifications
+SEND_FLEEP="YES"
+
+# if a role's recipients are not configured, a notification will not be sent.
+# (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_FLEEP=""
+
+# The user name to label the messages with. If this is unset,
+# the hostname of the system the notification is for will be used.
+FLEEP_SENDER=""
+
+
+#------------------------------------------------------------------------------
+# irc notification options
+#
+# irc notifications require only the nc utility to be installed.
+
+# multiple recipients can be given like this:
+# "<irc_channel_1> <irc_channel_2> ..."
+
+# enable/disable sending irc notifications
+SEND_IRC="YES"
+
+# if a role's recipients are not configured, a notification will not be sent.
+# (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_IRC=""
+
+# The irc network to which the recipients belong. It must be the full network.
+# e.g. "irc.freenode.net"
+IRC_NETWORK=""
+
+# The irc nickname which is required to send the notification. It must not be
+# an already registered name as the connection's MODE is defined as a 'guest'.
+IRC_NICKNAME=""
+
+# The irc realname which is required in order to make the connection and is an
+# extra identifier.
+IRC_REALNAME=""
+
+
+#------------------------------------------------------------------------------
+# syslog notifications
+#
+# syslog notifications only need you to have a working logger command, which
+# should be the case on pretty much any Linux system.
+
+# enable/disable sending syslog notifications
+# NOTE: make sure you have everything else configured the way you want
+# it _before_ turning this on.
+SEND_SYSLOG="NO"
+
+# A note on log levels and facilities:
+#
+# The traditional UNIX syslog mechanism has the concept of both log
+# levels and facilities. A log level indicates the relaitve severity of
+# the message, while a facility specifies a generic source for the message
+# (for example, the `mail` facility is where sendmail and postfix log
+# their messages). All major syslog daemons have the ability to filter
+# messages based on both log level and facility, and can often also make
+# routing decisions for messages based on both factors.
+#
+# On Linux, the eight log levels in decreasing order of severity are:
+# emerg, alert, crit, err, warning, notice, info, debug
+#
+# By default, warnings will be logged at the warning level, critical
+# alerts at the crit level, and clear notifications at the invo level.
+#
+# And the 19 facilities you can log to are:
+# auth, authpriv, cron, daemon, ftp, lpr, mail, news, syslog, user,
+# uucp, local0, local1, local2, local3, local4, local5, local6, and local7
+#
+# By default, netdata alerts will be logged to the local6 facility.
+#
+# Depending on your distribution, this means that either all your
+# netdata alerts will by default end up in the main system log (usually
+# /var/log/messages), or they won't be logged to a file at all.
+# Neither of these are likely to be what you actually want, but any
+# configuration to change that needs to happen in the syslog daemon
+# configuration, not here.
+
+# This controls which facility is used by default for logging. Defaults
+# to local6.
+SYSLOG_FACILITY=''
+
+# If a role's recipients are not configured, use the following.
+# (empty = do not send a notification for unconfigured roles)
+#
+# The recipient format for syslog uses the following format:
+# [[facility.level][@host[:port]]/]prefix
+#
+# `prefix` gets appended to the front of all log messages generated for
+# that recipient. The prefix is mandatory.
+# 'host' and 'port' can be used to specify a remote syslog server to
+# send messages to. Leave these out if you want messages to be delivered
+# locally. 'host' can be either a hostname or an IP address.
+# IPv6 addresses must have square around them.
+# 'facility' and 'level' are used to override the default logging facility
+# set above and the log level. If one is specified, both must be present.
+#
+# For example, to send messages with a 'netdata' prefix to a syslog
+# daemon listening on port 514 on 'loghost' using the daemon facility and
+# notice log level:
+# DEFAULT_RECIPIENT_SYSLOG='daemon.notice@loghost:514/netdata'
+#
+DEFAULT_RECIPIENT_SYSLOG="netdata"
+
+#------------------------------------------------------------------------------
+# iOS Push Notifications
+
+# enable/disable sending iOS push notifications
+SEND_PROWL="YES"
+
+# If a role's recipients are not configured, use the following,
+# (empty = do not send a notiication for unconfigured roles)
+#
+# Recipients for iOS push notifications are Prowl API keys.
+#
+# A recipient may also consist of multiple Prowl API keys separated by
+# commas, in which case notifications will be simultaneously sent for all
+# of those API keys.
+DEFAULT_RECIPIENT_PROWL=""
+
+#------------------------------------------------------------------------------
+# Amazon SNS notifications
+#
+# This method requires potentially complex manual configuration. See the
+# netdata wiki for information on what is needed.
+
+# enable/disable sending Amazon SNS notifications
+SEND_AWSSNS="YES"
+
+# Specify a template for the Amazon SNS notifications. This supports
+# the same set of variables that are usable in the `custom_sender()`
+# function in the custom notification configuration below.
+#
+AWSSNS_MESSAGE_FORMAT="${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+# If a role's recipients are not configured, use the following.
+# (empty = do not send a notification for unconfigured roles)
+#
+# Recipients for AWS SNS notifications are specified as topic ARN's.
+#
+DEFAULT_RECIPIENT_AWSSNS=""
+
+#------------------------------------------------------------------------------
+# SMS Server Tools 3 (smstools3) global notification options
+
+# enable/disable sending SMS Server Tools 3 SMS notifications
+SEND_SMS="YES"
+
+# if a role's recipients are not configured, a notification will be sent to
+# this SMS channel (empty = do not send a notification for unconfigured
+# roles). Multiple recipients can be given like this: "PHONE1 PHONE2 ..."
+
+DEFAULT_RECIPIENT_SMS=""
+
+# Matrix notifications
+#
+
+# enable/disable Matrix notifications
+SEND_MATRIX="YES"
+
+# The url of the Matrix homeserver
+# e.g https://matrix.org:8448
+MATRIX_HOMESERVER=
+
+# An access token from a valid Matrix account. Tokens usually don't expire,
+# can be controlled from a Matrix client.
+# See https://matrix.org/docs/guides/client-server.html
+MATRIX_ACCESSTOKEN=
+
+# Specify the default rooms to receive the notification if no rooms are provided
+# in a role's recipients.
+# The format is !roomid:homeservername
+DEFAULT_RECIPIENT_MATRIX=""
+
+#------------------------------------------------------------------------------
+# custom notifications
+#
+
+# enable/disable sending custom notifications
+SEND_CUSTOM="YES"
+
+# if a role's recipients are not configured, use the following.
+# (empty = do not send a notification for unconfigured roles)
+DEFAULT_RECIPIENT_CUSTOM=""
+
+# The custom_sender() is a custom function to do whatever you need to do
+custom_sender() {
+ # variables you can use:
+ # ${host} the host generated this event
+ # ${url_host} same as ${host} but URL encoded
+ # ${unique_id} the unique id of this event
+ # ${alarm_id} the unique id of the alarm that generated this event
+ # ${event_id} the incremental id of the event, for this alarm id
+ # ${when} the timestamp this event occurred
+ # ${name} the name of the alarm, as given in netdata health.d entries
+ # ${url_name} same as ${name} but URL encoded
+ # ${chart} the name of the chart (type.id)
+ # ${url_chart} same as ${chart} but URL encoded
+ # ${family} the family of the chart
+ # ${url_family} same as ${family} but URL encoded
+ # ${status} the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ # ${old_status} the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ # ${value} the current value of the alarm
+ # ${old_value} the previous value of the alarm
+ # ${src} the line number and file the alarm has been configured
+ # ${duration} the duration in seconds of the previous alarm state
+ # ${duration_txt} same as ${duration} for humans
+ # ${non_clear_duration} the total duration in seconds this is/was non-clear
+ # ${non_clear_duration_txt} same as ${non_clear_duration} for humans
+ # ${units} the units of the value
+ # ${info} a short description of the alarm
+ # ${value_string} friendly value (with units)
+ # ${old_value_string} friendly old value (with units)
+ # ${image} the URL of an image to represent the status of the alarm
+ # ${color} a color in #AABBCC format for the alarm
+ # ${goto_url} the URL the user can click to see the netdata dashboard
+ # ${calc_expression} the expression evaluated to provide the value for the alarm
+ # ${calc_param_values} the value of the variables in the evaluated expression
+ # ${total_warnings} the total number of alarms in WARNING state on the host
+ # ${total_critical} the total number of alarms in CRITICAL state on the host
+
+ # these are more human friendly:
+ # ${alarm} like "name = value units"
+ # ${status_message} like "needs attention", "recovered", "is critical"
+ # ${severity} like "Escalated to CRITICAL", "Recovered from WARNING"
+ # ${raised_for} like "(alarm was raised for 10 minutes)"
+
+ # example human readable SMS
+ local msg="${host} ${status_message}: ${alarm} ${raised_for}"
+
+ # limit it to 160 characters and encode it for use in a URL
+ urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"
+
+ # a space separated list of the recipients to send alarms to
+ to="${1}"
+
+ # Sample send SMS to an imaginary SMS gateway accessible via HTTPS
+ #for phone in ${to}; do
+ # httpcode=$(docurl -X POST \
+ # --data-urlencode "From=XXX" \
+ # --data-urlencode "To=${phone}" \
+ # --data-urlencode "Body=${msg}" \
+ # -u "${accountsid}:${accounttoken}" \
+ # https://domain.website.com/)
+ #
+ # if [ "${httpcode}" = "200" ]; then
+ # info "sent custom notification ${msg} to ${phone}"
+ # sent=$((sent + 1))
+ # else
+ # error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."
+ # fi
+ #done
+
+ info "not sending custom notification to ${to}, for ${status} of '${host}.${chart}.${name}' - custom_sender() is not configured."
+}
+
+
+###############################################################################
+# RECIPIENTS PER ROLE
+
+# -----------------------------------------------------------------------------
+# generic system alarms
+# CPU, disks, network interfaces, entropy, etc
+
+role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[sysadmin]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[sysadmin]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[sysadmin]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[sysadmin]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[sysadmin]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[sysadmin]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[sysadmin]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[sysadmin]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[sysadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[sysadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[sysadmin]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[sysadmin]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_irc[sysadmin]="${DEFAULT_RECIPIENT_IRC}"
+
+role_recipients_syslog[sysadmin]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[sysadming]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[sysadmin]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[sysadmin]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[sysadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_dynatrace[sysadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[sysadmin]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[sysadmin]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[sysadmin]="${DEFAULT_RECIPIENT_STACKPULSE}"
+
+# -----------------------------------------------------------------------------
+# DNS related alarms
+
+role_recipients_email[domainadmin]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[domainadmin]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[domainadmin]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[domainadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[domainadmin]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[domainadmin]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[domainadmin]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[domainadmin]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[domainadmin]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[domainadmin]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[domainadmin]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[domainadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[domainadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[domainadmin]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[domainadmin]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_irc[domainadmin]="${DEFAULT_RECIPIENT_IRC}"
+
+role_recipients_syslog[domainadmin]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[domainadmin]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[domainadmin]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[domainadmin]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[domainadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_sms[domainadmin]="${DEFAULT_RECIPIENT_SMS}"
+
+role_recipients_dynatrace[domainadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[domainadmin]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[domainadmin]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[domainadmin]="${DEFAULT_RECIPIENT_STACKPULSE}"
+
+# -----------------------------------------------------------------------------
+# database servers alarms
+# mysql, redis, memcached, postgres, etc
+
+role_recipients_email[dba]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[dba]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[dba]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[dba]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[dba]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[dba]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[dba]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[dba]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[dba]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[dba]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[dba]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[dba]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[dba]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[dba]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[dba]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_irc[dba]="${DEFAULT_RECIPIENT_IRC}"
+
+role_recipients_syslog[dba]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[dba]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[dba]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[dba]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[dba]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_sms[dba]="${DEFAULT_RECIPIENT_SMS}"
+
+role_recipients_dynatrace[dba]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[dba]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[dba]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[dba]="${DEFAULT_RECIPIENT_STACKPULSE}"
+
+# -----------------------------------------------------------------------------
+# web servers alarms
+# apache, nginx, lighttpd, etc
+
+role_recipients_email[webmaster]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[webmaster]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[webmaster]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[webmaster]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[webmaster]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[webmaster]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[webmaster]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[webmaster]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[webmaster]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[webmaster]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[webmaster]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[webmaster]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[webmaster]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[webmaster]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[webmaster]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_irc[webmaster]="${DEFAULT_RECIPIENT_IRC}"
+
+role_recipients_syslog[webmaster]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[webmaster]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[webmaster]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[webmaster]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[webmaster]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_sms[webmaster]="${DEFAULT_RECIPIENT_SMS}"
+
+role_recipients_dynatrace[webmaster]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[webmaster]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[webmaster]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[webmaster]="${DEFAULT_RECIPIENT_STACKPULSE}"
+
+# -----------------------------------------------------------------------------
+# proxy servers alarms
+# squid, etc
+
+role_recipients_email[proxyadmin]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[proxyadmin]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[proxyadmin]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[proxyadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[proxyadmin]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[proxyadmin]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[proxyadmin]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[proxyadmin]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[proxyadmin]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[proxyadmin]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[proxyadmin]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[proxyadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[proxyadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[proxyadmin]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[proxyadmin]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_irc[proxyadmin]="${DEFAULT_RECIPIENT_IRC}"
+
+role_recipients_syslog[proxyadmin]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[proxyadmin]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[porxyadmin]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[proxyadmin]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[proxyadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_sms[proxyadmin]="${DEFAULT_RECIPIENT_SMS}"
+
+role_recipients_dynatrace[proxyadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[proxyadmin]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[proxyadmin]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[proxyadmin]="${DEFAULT_RECIPIENT_STACKPULSE}"
+
+# -----------------------------------------------------------------------------
+# peripheral devices
+# UPS, photovoltaics, etc
+
+role_recipients_email[sitemgr]="${DEFAULT_RECIPIENT_EMAIL}"
+
+role_recipients_hangouts[sitemgr]="${DEFAULT_RECIPIENT_HANGOUTS}"
+
+role_recipients_pushover[sitemgr]="${DEFAULT_RECIPIENT_PUSHOVER}"
+
+role_recipients_pushbullet[sitemgr]="${DEFAULT_RECIPIENT_PUSHBULLET}"
+
+role_recipients_telegram[sitemgr]="${DEFAULT_RECIPIENT_TELEGRAM}"
+
+role_recipients_slack[sitemgr]="${DEFAULT_RECIPIENT_SLACK}"
+
+role_recipients_alerta[sitemgr]="${DEFAULT_RECIPIENT_ALERTA}"
+
+role_recipients_flock[sitemgr]="${DEFAULT_RECIPIENT_FLOCK}"
+
+role_recipients_discord[sitemgr]="${DEFAULT_RECIPIENT_DISCORD}"
+
+role_recipients_hipchat[sitemgr]="${DEFAULT_RECIPIENT_HIPCHAT}"
+
+role_recipients_twilio[sitemgr]="${DEFAULT_RECIPIENT_TWILIO}"
+
+role_recipients_messagebird[sitemgr]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
+
+role_recipients_kavenegar[sitemgr]="${DEFAULT_RECIPIENT_KAVENEGAR}"
+
+role_recipients_pd[sitemgr]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_fleep[sitemgr]="${DEFAULT_RECIPIENT_FLEEP}"
+
+role_recipients_syslog[sitemgr]="${DEFAULT_RECIPIENT_SYSLOG}"
+
+role_recipients_prowl[sitemgr]="${DEFAULT_RECIPIENT_PROWL}"
+
+role_recipients_awssns[sitemgr]="${DEFAULT_RECIPIENT_AWSSNS}"
+
+role_recipients_custom[sitemgr]="${DEFAULT_RECIPIENT_CUSTOM}"
+
+role_recipients_msteam[sitemgr]="${DEFAULT_RECIPIENT_MSTEAM}"
+
+role_recipients_rocketchat[sitemgr]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+
+role_recipients_sms[sitemgr]="${DEFAULT_RECIPIENT_SMS}"
+
+role_recipients_dynatrace[sitemgr]="${DEFAULT_RECIPIENT_DYNATRACE}"
+
+role_recipients_opsgenie[sitemgr]="${DEFAULT_RECIPIENT_OPSGENIE}"
+
+role_recipients_matrix[sitemgr]="${DEFAULT_RECIPIENT_MATRIX}"
+
+role_recipients_stackpulse[sitemgr]="${DEFAULT_RECIPIENT_STACKPULSE}"
diff --git a/health/notifications/health_email_recipients.conf b/health/notifications/health_email_recipients.conf
new file mode 100644
index 0000000..f56c6c6
--- /dev/null
+++ b/health/notifications/health_email_recipients.conf
@@ -0,0 +1,2 @@
+# OBSOLETE FILE
+# REPLACED WITH health_alarm_notify.conf
diff --git a/health/notifications/irc/Makefile.inc b/health/notifications/irc/Makefile.inc
new file mode 100644
index 0000000..1a68f65
--- /dev/null
+++ b/health/notifications/irc/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ irc/README.md \
+ irc/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/irc/README.md b/health/notifications/irc/README.md
new file mode 100644
index 0000000..e7f22e1
--- /dev/null
+++ b/health/notifications/irc/README.md
@@ -0,0 +1,78 @@
+<!--
+title: "IRC"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/irc/README.md
+-->
+
+# IRC
+
+This is what you will get:
+
+IRCCloud web client:\
+![image](https://user-images.githubusercontent.com/31221999/36793487-3735673e-1ca6-11e8-8880-d1d8b6cd3bc0.png)
+
+Irssi terminal client:
+![image](https://user-images.githubusercontent.com/31221999/36793486-3713ada6-1ca6-11e8-8c12-70d956ad801e.png)
+
+You need:
+
+1. The `nc` utility. If you do not set the path, Netdata will search for it in your system `$PATH`.
+
+Set the path for `nc` in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+#------------------------------------------------------------------------------
+# external commands
+#
+# The full path of the nc command.
+# If empty, the system $PATH will be searched for it.
+# If not found, irc notifications will be silently disabled.
+nc="/usr/bin/nc"
+```
+
+2. Αn `IRC_NETWORK` to which your preferred channels belong to.
+3. One or more channels ( `DEFAULT_RECIPIENT_IRC` ) to post the messages to.
+4. An `IRC_NICKNAME` and an `IRC_REALNAME` to identify in IRC.
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+#------------------------------------------------------------------------------
+# irc notification options
+#
+# irc notifications require only the nc utility to be installed.
+
+# multiple recipients can be given like this:
+# "<irc_channel_1> <irc_channel_2> ..."
+
+# enable/disable sending irc notifications
+SEND_IRC="YES"
+
+# if a role's recipients are not configured, a notification will not be sent.
+# (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_IRC="#system-alarms"
+
+# The irc network to which the recipients belong. It must be the full network.
+IRC_NETWORK="irc.freenode.net"
+
+# The irc nickname which is required to send the notification. It must not be
+# an already registered name as the connection's MODE is defined as a 'guest'.
+IRC_NICKNAME="netdata-alarm-user"
+
+# The irc realname which is required in order to make the connection and is an
+# extra identifier.
+IRC_REALNAME="netdata-user"
+```
+
+You can define multiple channels like this: `#system-alarms #networking-alarms`.\
+You can also filter the notifications like this: `#system-alarms|critical`.\
+You can give different channels per **role** using these (at the same file):
+
+```
+role_recipients_irc[sysadmin]="#user-alarms #networking-alarms #system-alarms"
+role_recipients_irc[dba]="#databases-alarms"
+role_recipients_irc[webmaster]="#networking-alarms"
+```
+
+The keywords `#user-alarms`, `#networking-alarms`, `#system-alarms`, `#databases-alarms` are irc channels which belong to the specified IRC network.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Firc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/kavenegar/Makefile.inc b/health/notifications/kavenegar/Makefile.inc
new file mode 100644
index 0000000..b98e794
--- /dev/null
+++ b/health/notifications/kavenegar/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ kavenegar/README.md \
+ kavenegar/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/kavenegar/README.md b/health/notifications/kavenegar/README.md
new file mode 100644
index 0000000..b59799f
--- /dev/null
+++ b/health/notifications/kavenegar/README.md
@@ -0,0 +1,46 @@
+<!--
+title: "Kavenegar"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/kavenegar/README.md
+-->
+
+# Kavenegar
+
+[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.
+
+Will look like this on your Android device:
+
+![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png)
+
+You will need:
+
+1. Signup and Login to kavenegar.com
+2. Get your APIKEY and Sender from `http://panel.kavenegar.com/client/setting/account`
+3. Fill in KAVENEGAR_API_KEY="" KAVENEGAR_SENDER=""
+4. Add the recipient phone numbers to DEFAULT_RECIPIENT_KAVENEGAR=""
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+###############################################################################
+# Kavenegar (kavenegar.com) SMS options
+
+# multiple recipients can be given like this:
+# "09155555555 09177777777"
+
+# enable/disable sending kavenegar SMS
+SEND_KAVENEGAR="YES"
+
+# to get an access key, after selecting and purchasing your desired service
+# at http://kavenegar.com/pricing.html
+# login to your account, go to your dashboard and my account are
+# https://panel.kavenegar.com/Client/setting/account from API Key
+# copy your api key. You can generate new API Key too.
+# You can find and select kevenegar sender number from this place.
+
+# Without an API key, Netdata cannot send KAVENEGAR text messages.
+KAVENEGAR_API_KEY=""
+KAVENEGAR_SENDER=""
+DEFAULT_RECIPIENT_KAVENEGAR=""
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fkavenegar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/matrix/Makefile.inc b/health/notifications/matrix/Makefile.inc
new file mode 100644
index 0000000..9937d80
--- /dev/null
+++ b/health/notifications/matrix/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ matrix/README.md \
+ matrix/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/matrix/README.md b/health/notifications/matrix/README.md
new file mode 100644
index 0000000..ea22b4a
--- /dev/null
+++ b/health/notifications/matrix/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Send Netdata notifications to Matrix network rooms"
+description: "Stay aware of warning or critical anomalies by sending health alarms to Matrix network rooms with Netdata's health monitoring watchdog."
+sidebar_label: "Matrix"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/matrix/README.md
+-->
+
+# Matrix
+
+Send notifications to [Matrix](https://matrix.org/) network rooms.
+
+The requirements for this notification method are:
+
+1. The url of the homeserver (`https://homeserver:port`).
+2. Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a
+ dedicated notification account). These tokens usually don't expire.
+3. The room ids that you want to sent the notification to.
+
+To obtain the access token, you can use the following `curl` command:
+
+```bash
+curl -XPOST -d '{"type":"m.login.password", "user":"example", "password":"wordpass"}' "https://homeserver:8448/_matrix/client/r0/login"
+```
+
+The room ids are unique identifiers and can be obtained from the room settings in a Matrix client (e.g. Riot). Their
+format is `!uniqueid:homeserver`.
+
+Multiple room ids can be defined by separating with a space character.
+
+Detailed information about the Matrix client API is available at the [official
+site](https://matrix.org/docs/guides/client-server.html).
+
+Your `health_alarm_notify.conf` should look like this:
+
+```conf
+###############################################################################
+# Matrix notifications
+#
+
+# enable/disable Matrix notifications
+SEND_MATRIX="YES"
+
+# The url of the Matrix homeserver
+# e.g https://matrix.org:8448
+MATRIX_HOMESERVER="https://matrix.org:8448"
+
+# A access token from a valid Matrix account. Tokens usually don't expire,
+# can be controlled from a Matrix client.
+# See https://matrix.org/docs/guides/client-server.html
+MATRIX_ACCESSTOKEN="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+
+# Specify the default rooms to receive the notification if no rooms are provided
+# in a role's recipients.
+# The format is !roomid:homeservername
+DEFAULT_RECIPIENT_MATRIX="!XXXXXXXXXXXX:matrix.org"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fmatrix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/messagebird/Makefile.inc b/health/notifications/messagebird/Makefile.inc
new file mode 100644
index 0000000..f8d2332
--- /dev/null
+++ b/health/notifications/messagebird/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ messagebird/README.md \
+ messagebird/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/messagebird/README.md b/health/notifications/messagebird/README.md
new file mode 100644
index 0000000..8e3d1a5
--- /dev/null
+++ b/health/notifications/messagebird/README.md
@@ -0,0 +1,45 @@
+<!--
+title: "Messagebird"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/messagebird/README.md
+-->
+
+# Messagebird
+
+The messagebird notifications will look like this on your Android device:
+
+![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png)
+
+You will need:
+
+1. Signup and Login to messagebird.com
+2. Pick an SMS capable number after sign up to get some free credits
+3. Go to <https://www.messagebird.com/app/settings/developers/access>
+4. Create a new access key under 'API ACCESS (REST)' (you will want a live key)
+5. Fill in MESSAGEBIRD_ACCESS_KEY="XXXXXXXX" MESSAGEBIRD_NUMBER="+XXXXXXXXXXX"
+6. Add the recipient phone numbers to DEFAULT_RECIPIENT_MESSAGEBIRD="+XXXXXXXXXXX"
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+#------------------------------------------------------------------------------
+# Messagebird (messagebird.com) SMS options
+
+# multiple recipients can be given like this:
+# "+15555555555 +17777777777"
+
+# enable/disable sending messagebird SMS
+SEND_MESSAGEBIRD="YES"
+
+# to get an access key, create a free account at https://www.messagebird.com
+# verify and activate the account (no CC info needed)
+# login to your account and enter your phonenumber to get some free credits
+# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)'
+# click 'Add access key' and fill in data (you want a live key to send SMS)
+
+# Without an access key, Netdata cannot send Messagebird text messages.
+MESSAGEBIRD_ACCESS_KEY="XXXXXXXX"
+MESSAGEBIRD_NUMBER="XXXXXXX"
+DEFAULT_RECIPIENT_MESSAGEBIRD="XXXXXXX"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fmessagebird%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/opsgenie/Makefile.inc b/health/notifications/opsgenie/Makefile.inc
new file mode 100644
index 0000000..c85bb7c
--- /dev/null
+++ b/health/notifications/opsgenie/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ opsgenie/README.md \
+ opsgenie/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/opsgenie/README.md b/health/notifications/opsgenie/README.md
new file mode 100644
index 0000000..10b9f52
--- /dev/null
+++ b/health/notifications/opsgenie/README.md
@@ -0,0 +1,62 @@
+<!--
+title: "Send notifications to Opsgenie"
+description: "Send alerts to your Opsgenie incident response account any time an anomaly or performance issue strikes a node in your infrastructure."
+sidebar_label: "Opsgenie"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/opsgenie/README.md
+-->
+
+# Send notifications to Opsgenie
+
+[Opsgenie](https://www.atlassian.com/software/opsgenie) is an alerting and incident response tool. It is designed to
+group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to
+incidents.
+
+The first step is to create a [Netdata integration](https://docs.opsgenie.com/docs/api-integration) in the
+[Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard. After this, you need to edit
+`health_alarm_notify.conf` on your system, by running the following from your [config
+directory](/docs/configure/nodes.md):
+
+```bash
+./edit-config health_alarm_notify.conf
+```
+
+Change the variable `OPSGENIE_API_KEY` with the API key you got from Opsgenie. `OPSGENIE_API_URL` defaults to
+`https://api.opsgenie.com`, however there are region-specific API URLs such as `https://eu.api.opsgenie.com`, so set
+this if required.
+
+```conf
+SEND_OPSGENIE="YES"
+
+# Api key
+# Default Opsgenie API
+OPSGENIE_API_KEY="11111111-2222-3333-4444-555555555555"
+OPSGENIE_API_URL=""
+```
+
+Changes to `health_alarm_notify.conf` do not require a Netdata restart. You can test your Opsgenie notifications
+configuration by issuing the commands, replacing `ROLE` with your preferred role:
+
+```sh
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# send a test alarm
+/usr/libexec/netdata/plugins.d/alarm-notify.sh test ROLE
+```
+
+If everything works, you'll see alarms in your Opsgenie platform:
+
+![Example alarm notifications in
+Opsgenie](https://user-images.githubusercontent.com/49162938/92184518-f725f900-ee40-11ea-9afa-e7c639c72206.png)
+
+If sending the test notifications fails, you can look in `/var/log/netdata/error.log` to find the relevant error
+message:
+
+```log
+2020-09-03 23:07:00: alarm-notify.sh: ERROR: failed to send opsgenie notification for: hades test.chart.test_alarm is CRITICAL, with HTTP error code 401.
+```
+
+You can find more details about the Opsgenie error codes in their [response
+docs](https://docs.opsgenie.com/docs/response).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fopsgenie%2FREADME%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/pagerduty/Makefile.inc b/health/notifications/pagerduty/Makefile.inc
new file mode 100644
index 0000000..ee9b091
--- /dev/null
+++ b/health/notifications/pagerduty/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ pagerduty/README.md \
+ pagerduty/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/pagerduty/README.md b/health/notifications/pagerduty/README.md
new file mode 100644
index 0000000..b1f60d4
--- /dev/null
+++ b/health/notifications/pagerduty/README.md
@@ -0,0 +1,46 @@
+<!--
+title: "PagerDuty"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/pagerduty/README.md
+-->
+
+# PagerDuty
+
+[PagerDuty](https://www.pagerduty.com/company/) is the enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.
+
+Here is an example of a PagerDuty dashboard with Netdata notifications:
+
+![PagerDuty dashboard with Netdata notifications](https://cloud.githubusercontent.com/assets/19278582/21233877/b466a08a-c2a5-11e6-8d66-ee6eed43818f.png)
+
+To have Netdata send notifications to PagerDuty, you'll first need to set up a PagerDuty `Generic API` service and install the PagerDuty agent on the host running Netdata. See the following guide for details:
+
+<https://www.pagerduty.com/docs/guides/agent-install-guide/>
+
+During the setup of the `Generic API` PagerDuty service, you'll obtain a `pagerduty service key`. Keep this **service key** handy.
+
+Once the PagerDuty agent is installed on your host and can send notifications from your host to your `Generic API` service on PagerDuty, add the **service key** to `DEFAULT_RECIPIENT_PD` in `health_alarm_notify.conf`:
+
+```
+#------------------------------------------------------------------------------
+# pagerduty.com notification options
+#
+# pagerduty.com notifications require the pagerduty agent to be installed and
+# a "Generic API" pagerduty service.
+# https://www.pagerduty.com/docs/guides/agent-install-guide/
+
+# multiple recipients can be given like this:
+# "<pd_service_key_1> <pd_service_key_2> ..."
+
+# enable/disable sending pagerduty notifications
+SEND_PD="YES"
+
+# if a role's recipients are not configured, a notification will be sent to
+# the "General API" pagerduty.com service that uses this service key.
+# (empty = do not send a notification for unconfigured roles):
+DEFAULT_RECIPIENT_PD="<service key>"
+
+# Which PD API are we going to use? For version 2 or newer, it is necessary to do a request for Pagerduty
+# before to set the version(https://developer.pagerduty.com/docs/events-api-v2/overview/).
+USE_PD_VERSION="1"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpagerduty%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/prowl/Makefile.inc b/health/notifications/prowl/Makefile.inc
new file mode 100644
index 0000000..64a1deb
--- /dev/null
+++ b/health/notifications/prowl/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ prowl/README.md \
+ prowl/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/prowl/README.md b/health/notifications/prowl/README.md
new file mode 100644
index 0000000..7c60de2
--- /dev/null
+++ b/health/notifications/prowl/README.md
@@ -0,0 +1,29 @@
+<!--
+title: "Prowl"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/prowl/README.md
+-->
+
+# Prowl
+
+[Prowl](https://www.prowlapp.com/) is a push notification service for iOS devices. Netdata
+supports delivering notifications to iOS devices through Prowl.
+
+Because of how Netdata integrates with Prowl, there is a hard limit of
+at most 1000 notifications per hour (starting from the first notification
+sent). Any alerts beyond the first thousand in an hour will be dropped.
+
+Warning messages will be sent with the 'High' priority, critical messages
+will be sent with the 'Emergency' priority, and all other messages will
+be sent with the normal priority. Opening the notification's associated
+URL will take you to the Netdata dashboard of the system that issued
+the alert, directly to the chart that it triggered on.
+
+## configuration
+
+To use this, you will need a Prowl API key, which can be requested through
+the Prowl website after registering.
+
+Once you have an API key, simply specify that as a recipient for Prowl
+notifications.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fprowl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/health/notifications/pushbullet/Makefile.inc b/health/notifications/pushbullet/Makefile.inc
new file mode 100644
index 0000000..d3a9459
--- /dev/null
+++ b/health/notifications/pushbullet/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ pushbullet/README.md \
+ pushbullet/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/pushbullet/README.md b/health/notifications/pushbullet/README.md
new file mode 100644
index 0000000..7a098d6
--- /dev/null
+++ b/health/notifications/pushbullet/README.md
@@ -0,0 +1,48 @@
+<!--
+title: "PushBullet"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/pushbullet/README.md
+-->
+
+# PushBullet
+
+Will look like this on your browser:
+![image](https://cloud.githubusercontent.com/assets/4300670/19109636/278b1c0c-8aee-11e6-8a09-7fc94fdbfec8.png)
+
+And like this on your Android device:
+
+![image](https://cloud.githubusercontent.com/assets/4300670/19109635/278a1dde-8aee-11e6-9984-0bc87a13312d.png)
+
+You will need:
+
+1. Signup and Login to pushbullet.com
+2. Get your Access Token, go to <https://www.pushbullet.com/#settings/account> and create a new one
+3. Fill in the PUSHBULLET_ACCESS_TOKEN with that value
+4. Add the recipient emails to DEFAULT_RECIPIENT_PUSHBULLET
+ !!PLEASE NOTE THAT IF THE RECIPIENT DOES NOT HAVE A PUSHBULLET ACCOUNT, PUSHBULLET SERVICE WILL SEND AN EMAIL!!
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+###############################################################################
+# pushbullet (pushbullet.com) push notification options
+
+# multiple recipients can be given like this:
+# "user1@email.com user2@mail.com"
+
+# enable/disable sending pushbullet notifications
+SEND_PUSHBULLET="YES"
+
+# Signup and Login to pushbullet.com
+# To get your Access Token, go to https://www.pushbullet.com/#settings/account
+# And create a new access token
+# Then just set the recipients emails
+# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does
+# not have a pushbullet account, the pushbullet service will send an email
+# to that address instead
+
+# Without an access token, Netdata cannot send pushbullet notifications.
+PUSHBULLET_ACCESS_TOKEN="o.Sometokenhere"
+DEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushbullet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/pushover/Makefile.inc b/health/notifications/pushover/Makefile.inc
new file mode 100644
index 0000000..9b703a1
--- /dev/null
+++ b/health/notifications/pushover/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ pushover/README.md \
+ pushover/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/pushover/README.md b/health/notifications/pushover/README.md
new file mode 100644
index 0000000..3ba97fb
--- /dev/null
+++ b/health/notifications/pushover/README.md
@@ -0,0 +1,23 @@
+<!--
+title: "PushOver"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/pushover/README.md
+-->
+
+# PushOver
+
+pushover.net allows you to receive push notifications on your mobile phone. The service seems free for up to 7.500 messages per month.
+
+Netdata will send warning messages with priority `0` and critical messages with priority `1`. pushover.net allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours. All other notifications will be delivered silently.
+
+You need:
+
+1. APP TOKEN. You can use the same on all your Netdata servers.
+2. USER TOKEN for each user you are going to send notifications to. This is the actual recipient of the notification.
+
+The configuration is like above (slack messages).
+
+pushover.net notifications look like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/18407319/839c10c4-7715-11e6-92c0-12f8215128d3.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushover%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/rocketchat/Makefile.inc b/health/notifications/rocketchat/Makefile.inc
new file mode 100644
index 0000000..58f210b
--- /dev/null
+++ b/health/notifications/rocketchat/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ rocketchat/README.md \
+ rocketchat/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/rocketchat/README.md b/health/notifications/rocketchat/README.md
new file mode 100644
index 0000000..a54f582
--- /dev/null
+++ b/health/notifications/rocketchat/README.md
@@ -0,0 +1,52 @@
+<!--
+title: "Rocket.Chat"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/rocketchat/README.md
+-->
+
+# Rocket.Chat
+
+This is what you will get:
+![Netdata on RocketChat](https://i.imgur.com/Zu4t3j3.png)
+You need:
+
+1. The **incoming webhook URL** as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more channels to post the messages to.
+
+Get them here: <https://rocket.chat/docs/administrator-guides/integrations/index.html#how-to-create-a-new-incoming-webhook>
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+#------------------------------------------------------------------------------
+# rocketchat (rocket.chat) global notification options
+
+# multiple recipients can be given like this:
+# "CHANNEL1 CHANNEL2 ..."
+
+# enable/disable sending rocketchat notifications
+SEND_ROCKETCHAT="YES"
+
+# Login to rocket.chat and create an incoming webhook. You need only one for all
+# your Netdata servers (or you can have one for each of your Netdata).
+# Without it, Netdata cannot send rocketchat notifications.
+ROCKETCHAT_WEBHOOK_URL="<your_incoming_webhook_url>"
+
+# if a role's recipients are not configured, a notification will be send to
+# this rocketchat channel (empty = do not send a notification for unconfigured
+# roles).
+DEFAULT_RECIPIENT_ROCKETCHAT="monitoring_alarms"
+```
+
+You can define multiple channels like this: `alarms systems`.
+You can give different channels per **role** using these (at the same file):
+
+```
+role_recipients_rocketchat[sysadmin]="systems"
+role_recipients_rocketchat[dba]="databases systems"
+role_recipients_rocketchat[webmaster]="marketing development"
+```
+
+The keywords `systems`, `databases`, `marketing`, `development` are RocketChat channels (they should already exist).
+Both public and private channels can be used, even if they differ from the channel configured in your RocketChat incoming webhook.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Frocketchat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/slack/Makefile.inc b/health/notifications/slack/Makefile.inc
new file mode 100644
index 0000000..043bfaf
--- /dev/null
+++ b/health/notifications/slack/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ slack/README.md \
+ slack/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/slack/README.md b/health/notifications/slack/README.md
new file mode 100644
index 0000000..e338e9a
--- /dev/null
+++ b/health/notifications/slack/README.md
@@ -0,0 +1,50 @@
+<!--
+title: "Slack"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/slack/README.md
+-->
+
+# Slack
+
+This is what you will get:
+![image](https://cloud.githubusercontent.com/assets/2662304/18407116/bbd0fee6-7710-11e6-81cf-58c0defaee2b.png)
+
+You need:
+
+1. The **incoming webhook URL** as given by slack.com. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more channels to post the messages to.
+
+To get a webhook that works on multiple channels, you will need to login to your slack.com workspace and create an incoming webhook using the [Incoming Webhooks App](https://slack.com/apps/A0F7XDUAZ-incoming-webhooks).
+Do NOT use the instructions in <https://api.slack.com/incoming-webhooks#enable_webhooks>, as the particular webhooks work only for a single channel.
+
+Set the webhook and the recipients in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+SEND_SLACK="YES"
+
+SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+
+# if a role's recipients are not configured, a notification will be send to:
+# - A slack channel (syntax: '#channel' or 'channel')
+# - A slack user (syntax: '@user')
+# - The channel or user defined in slack for the webhook (syntax: '#')
+# empty = do not send a notification for unconfigured roles
+DEFAULT_RECIPIENT_SLACK="alarms"
+```
+
+You can define multiple recipients like this: `# #alarms systems @myuser`.
+This example will send the alarm to:
+
+- The recipient defined in slack for the webhook (not known to Netdata)
+- The channel 'alarms'
+- The channel 'systems'
+- The user @myuser
+
+You can give different recipients per **role** using these (at the same file):
+
+```
+role_recipients_slack[sysadmin]="systems"
+role_recipients_slack[dba]="databases systems"
+role_recipients_slack[webmaster]="marketing development"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fslack%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/smstools3/Makefile.inc b/health/notifications/smstools3/Makefile.inc
new file mode 100644
index 0000000..4764b9e
--- /dev/null
+++ b/health/notifications/smstools3/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ smstools3/README.md \
+ smstools3/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/smstools3/README.md b/health/notifications/smstools3/README.md
new file mode 100644
index 0000000..6d90e70
--- /dev/null
+++ b/health/notifications/smstools3/README.md
@@ -0,0 +1,44 @@
+<!--
+title: "SMS Server Tools 3"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/smstools3/README.md
+-->
+
+# SMS Server Tools 3
+
+The [SMS Server Tools 3](http://smstools3.kekekasvi.com/) is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.
+
+To have Netdata send notifications via SMS Server Tools 3, you'll first need to [install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) smsd.
+
+Ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:
+
+- Have write permissions to `/tmp` and `/var/spool/sms/outgoing`
+- Be a member of group `smsd`
+
+To ensure that the steps above are successful, just `su netdata` and execute `sendsms phone message`.
+
+You then just need to configure the recipient phone numbers in `health_alarm_notify.conf`:
+
+```sh
+#------------------------------------------------------------------------------
+# SMS Server Tools 3 (smstools3) global notification options
+
+# enable/disable sending SMS Server Tools 3 SMS notifications
+SEND_SMS="YES"
+
+# if a role's recipients are not configured, a notification will be sent to
+# this SMS channel (empty = do not send a notification for unconfigured
+# roles). Multiple recipients can be given like this: "PHONE1 PHONE2 ..."
+
+DEFAULT_RECIPIENT_SMS=""
+```
+
+Netdata uses the script `sendsms` that is installed by `smstools3` and just passes a phone number and a message to it. If `sendsms` is not in `$PATH`, you can pass its location in `health_alarm_notify.conf`:
+
+```sh
+# The full path of the sendsms command (smstools3).
+# If empty, the system $PATH will be searched for it.
+# If not found, SMS notifications will be silently disabled.
+sendsms=""
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsmstools3%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/stackpulse/Makefile.inc b/health/notifications/stackpulse/Makefile.inc
new file mode 100644
index 0000000..eabcb4b
--- /dev/null
+++ b/health/notifications/stackpulse/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ stackpulse/README.md \
+ stackpulse/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/stackpulse/README.md b/health/notifications/stackpulse/README.md
new file mode 100644
index 0000000..13d2f72
--- /dev/null
+++ b/health/notifications/stackpulse/README.md
@@ -0,0 +1,80 @@
+<!--
+title: "Send notifications to StackPulse"
+description: "Send alerts to your StackPulse Netdata integration any time an anomaly or performance issue strikes a node in your infrastructure."
+sidebar_label: "StackPulse"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/stackpulse/README.md
+-->
+
+# Send notifications to StackPulse
+
+[StackPulse](https://stackpulse.com/) is a software-as-a-service platform for site reliability engineering.
+It helps SREs, DevOps Engineers and Software Developers reduce toil and alert fatigue while improving reliability of
+software services by managing, analyzing and automating incident response activities.
+
+Sending Netdata alarm notifications to StackPulse allows you to create smart automated response workflows
+(StackPulse playbooks) that will help you drive down your MTTD and MTTR by performing any of the following:
+
+- Enriching the incident with data from multiple sources
+- Performing triage actions and analyzing their results
+- Orchestrating incident management and notification flows
+- Performing automatic and semi-automatic remediation actions
+- Analyzing incident data and remediation patterns to improve reliability of your services
+
+To send the notification you need:
+
+1. Create a Netdata integration in the `StackPulse Administration Portal`, and copy the `Endpoint` URL.
+
+![Creating a Netdata integration in StackPulse](https://user-images.githubusercontent.com/49162938/93023348-d9455a80-f5dd-11ea-8e05-67d07dce93e4.png)
+
+2. On your node, navigate to `/etc/netdata/` and run the following command:
+
+```sh
+$ ./edit-config health_alarm_notify.conf
+```
+
+3. Set the `STACKPULSE_WEBHOOK` variable to `Endpoint` URL you copied earlier:
+
+```
+SEND_STACKPULSE="YES"
+STACKPULSE_WEBHOOK="https://hooks.stackpulse.io/v1/webhooks/YOUR_UNIQUE_ID"
+```
+
+4. Now [restart Netdata](/docs/getting-started.md#start-stop-and-restart-netdata). When your node creates an alarm, you
+ can see the associated notification on your StackPulse Administration Portal
+
+## React to alarms with playbooks
+
+StackPulse allow users to create `Playbooks` giving additional information about events that happen in specific
+scenarios. For example, you could create a Playbook that responds to a "low disk space" alarm by compressing and
+cleaning up storage partitions with dynamic data.
+
+![image](https://user-images.githubusercontent.com/49162938/93207961-4c201400-f74b-11ea-94d1-42a29d007b62.png)
+
+![The StackPulse Administration Portal with a Netdata
+alarm](https://user-images.githubusercontent.com/49162938/93208199-bfc22100-f74b-11ea-83c4-728be23dcf4d.png)
+### Create Playbooks for Netdata alarms
+
+To create a Playbook, you need to access the StackPulse Administration Portal. After the initial setup, you need to
+access the **TRIGGER** tab to define the scenarios used to trigger the event. The following variables are available:
+
+- `Hostname`: The host that generated the event.
+- `Chart`: The name of the chart.
+- `OldValue` : The previous value of the alarm.
+- `Value`: The current value of the alarm.
+- `Units` : The units of the value.
+- `OldStatus` : The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL.
+- `State`: The current alarm status, the acceptable values are the same of `OldStatus`.
+- `Alarm` : The name of the alarm, as given in Netdata's health.d entries.
+- `Date` : The timestamp this event occurred.
+- `Duration` : The duration in seconds of the previous alarm state.
+- `NonClearDuration` : The total duration in seconds this is/was non-clear.
+- `Description` : A short description of the alarm copied from the alarm definition.
+- `CalcExpression` : The expression that was evaluated to trigger the alarm.
+- `CalcParamValues` : The values of the parameters in the expression, at the time of the evaluation.
+- `TotalWarnings` : Total number of alarms in WARNING state.
+- `TotalCritical` : Total number of alarms in CRITICAL state.
+- `ID` : The unique id of the alarm that generated this event.
+
+For more details how to create a scenario, take a look at the [StackPulse documentation](https://docs.stackpulse.io).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fopsgenie%2FREADME%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/syslog/Makefile.inc b/health/notifications/syslog/Makefile.inc
new file mode 100644
index 0000000..94a8acc
--- /dev/null
+++ b/health/notifications/syslog/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ syslog/README.md \
+ syslog/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md
new file mode 100644
index 0000000..456394d
--- /dev/null
+++ b/health/notifications/syslog/README.md
@@ -0,0 +1,34 @@
+<!--
+title: "Syslog"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/syslog/README.md
+-->
+
+# Syslog
+
+You need a working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.
+
+Logged messages will look like this:
+
+```
+netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of disk space time = 5h
+```
+
+## configuration
+
+System log targets are configured as recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/36bedc044584dea791fd29455bdcd287c3306cb2/conf.d/health_alarm_notify.conf#L534) (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`).
+
+You can als configure per-role targets in the same file a bit further down.
+
+Targets are defined as follows:
+
+```
+[[facility.level][@host[:port]]/]prefix
+```
+
+`prefix` defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.
+
+The `facility` and `level` are the standard syslog facility and level options, for more info on them see your local `logger` and `syslog` documentation. By default, Netdata will log to the `local6` facility, with a log level dependent on the type of message (`crit` for CRITICAL, `warning` for WARNING, and `info` for everything else).
+
+You can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsyslog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/telegram/Makefile.inc b/health/notifications/telegram/Makefile.inc
new file mode 100644
index 0000000..ffca071
--- /dev/null
+++ b/health/notifications/telegram/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ telegram/README.md \
+ telegram/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/telegram/README.md b/health/notifications/telegram/README.md
new file mode 100644
index 0000000..c1c6f2a
--- /dev/null
+++ b/health/notifications/telegram/README.md
@@ -0,0 +1,45 @@
+<!--
+title: "Telegram"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/telegram/README.md
+-->
+
+# Telegram
+
+[Telegram](https://telegram.org/) is a messaging app with a focus on speed and security, it’s super-fast, simple and free. You can use Telegram on all your devices at the same time — your messages sync seamlessly across any number of your phones, tablets or computers.
+
+With Telegram, you can send messages, photos, videos and files of any type (doc, zip, mp3, etc), as well as create groups for up to 100,000 people or channels for broadcasting to unlimited audiences. You can write to your phone contacts and find people by their usernames. As a result, Telegram is like SMS and email combined — and can take care of all your personal or business messaging needs.
+
+Netdata will send warning messages without vibration.
+
+You need to:
+
+1. Get a bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot`. Follow the instructions.
+2. Start a conversation with your bot or invite it into a group where you want it to send messages.
+3. Find the chat ID for every chat you want to send messages to. Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID or invite it into a group and use the `/getgroupid` command to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.
+ Alternatively, you can get the chat ID directly from the bot API. Send *your* bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`
+4. Set the bot token and the chat ID of the recipient in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+```
+SEND_TELEGRAM="YES"
+TELEGRAM_BOT_TOKEN="111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5"
+DEFAULT_RECIPIENT_TELEGRAM="-100233335555"
+```
+
+You can define multiple recipients like this: `"-100311112222 212341234|critical"`.
+This example will send:
+
+- All alerts to the group with ID -100311112222
+- Critical alerts to the user with ID 212341234
+
+You can give different recipients per **role** using these (in the same file):
+
+```
+role_recipients_telegram[sysadmin]="212341234"
+role_recipients_telegram[dba]="-1004444333321"
+role_recipients_telegram[webmaster]="49999333322 -1009999222255"
+```
+
+Telegram messages look like this:
+
+![Netdata notifications via Telegram](https://user-images.githubusercontent.com/1153921/66612223-f07dfb80-eb75-11e9-976f-5734ffd93ecd.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftelegram%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/twilio/Makefile.inc b/health/notifications/twilio/Makefile.inc
new file mode 100644
index 0000000..0f2d8d8
--- /dev/null
+++ b/health/notifications/twilio/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ twilio/README.md \
+ twilio/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/twilio/README.md b/health/notifications/twilio/README.md
new file mode 100644
index 0000000..b36d40b
--- /dev/null
+++ b/health/notifications/twilio/README.md
@@ -0,0 +1,47 @@
+<!--
+title: "Twilio"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/twilio/README.md
+-->
+
+# Twilio
+
+Will look like this on your Android device:
+
+![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png)
+
+You will need:
+
+1. Signup and Login to twilio.com
+2. Pick an SMS capable number during sign up.
+3. Get your SID, and Token from <https://www.twilio.com/console>
+4. Fill in TWILIO_ACCOUNT_SID="XXXXXXXX" TWILIO_ACCOUNT_TOKEN="XXXXXXXXX" TWILIO_NUMBER="+XXXXXXXXXXX"
+5. Add the recipient phone numbers to DEFAULT_RECIPIENT_TWILIO="+XXXXXXXXXXX"
+
+!!PLEASE NOTE THAT IF YOUR ACCOUNT IS A TRIAL ACCOUNT YOU WILL ONLY BE ABLE TO SEND NOTIFICATIONS TO THE NUMBER YOU SIGNED UP WITH
+
+Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
+
+```
+###############################################################################
+# Twilio (twilio.com) SMS options
+
+# multiple recipients can be given like this:
+# "+15555555555 +17777777777"
+
+# enable/disable sending twilio SMS
+SEND_TWILIO="YES"
+
+# Signup for free trial and select a SMS capable Twilio Number
+# To get your Account SID and Token, go to https://www.twilio.com/console
+# Place your sid, token and number below.
+# Then just set the recipients' phone numbers.
+# The trial account is only allowed to use the number specified when set up.
+
+# Without an account sid and token, Netdata cannot send Twilio text messages.
+TWILIO_ACCOUNT_SID="xxxxxxxxx"
+TWILIO_ACCOUNT_TOKEN="xxxxxxxxxx"
+TWILIO_NUMBER="xxxxxxxxxxx"
+DEFAULT_RECIPIENT_TWILIO="+15555555555"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftwilio%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/web/Makefile.inc b/health/notifications/web/Makefile.inc
new file mode 100644
index 0000000..b564d83
--- /dev/null
+++ b/health/notifications/web/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+ web/README.md \
+ web/Makefile.inc \
+ $(NULL)
+
diff --git a/health/notifications/web/README.md b/health/notifications/web/README.md
new file mode 100644
index 0000000..9e49186
--- /dev/null
+++ b/health/notifications/web/README.md
@@ -0,0 +1,13 @@
+<!--
+title: "Dashboard"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/web/README.md
+-->
+
+# Dashboard
+
+The Netdata dashboard shows HTML notifications, when it is open.
+
+Such web notifications look like this:
+![image](https://cloud.githubusercontent.com/assets/2662304/18407279/82bac6a6-7714-11e6-847e-c2e84eeacbfb.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
new file mode 100644
index 0000000..598b72f
--- /dev/null
+++ b/libnetdata/Makefile.am
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ adaptive_resortable_list \
+ avl \
+ buffer \
+ clocks \
+ config \
+ dictionary \
+ ebpf \
+ eval \
+ json \
+ health \
+ locks \
+ log \
+ popen \
+ procfile \
+ simple_pattern \
+ socket \
+ statistical \
+ storage_number \
+ threads \
+ url \
+ tests \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/README.md b/libnetdata/README.md
new file mode 100644
index 0000000..cdb199f
--- /dev/null
+++ b/libnetdata/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "libnetdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/README.md
+-->
+
+# libnetdata
+
+`libnetdata` is a collection of library code that is used by all Netdata `C` programs.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/adaptive_resortable_list/Makefile.am b/libnetdata/adaptive_resortable_list/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md
new file mode 100644
index 0000000..74e379a
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/README.md
@@ -0,0 +1,99 @@
+<!--
+title: "Adaptive Re-sortable List (ARL)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/adaptive_resortable_list/README.md
+-->
+
+# Adaptive Re-sortable List (ARL)
+
+This library allows Netdata to read a series of `name - value` pairs
+in the **fastest possible way**.
+
+ARLs are used all over Netdata, as they are the most
+CPU utilization efficient way to process `/proc` files. They are used to
+process both vertical (csv like) and horizontal (one pair per line) `name - value` pairs.
+
+## How ARL works
+
+It maintains a linked list of all `NAME` (keywords), sorted in the
+order found in the data source. The linked list is kept
+sorted at all times - the data source may change at any time, the
+linked list will adapt at the next iteration.
+
+### Initialization
+
+During initialization (just once), the caller:
+
+- calls `arl_create()` to create the ARL
+
+- calls `arl_expect()` multiple times to register the expected keywords
+
+The library will call the `processor()` function (given to
+`arl_create()`), for each expected keyword found.
+The default `processor()` expects `dst` to be an `unsigned long long *`.
+
+Each `name` keyword may have a different `processor()` (by calling
+`arl_expect_custom()` instead of `arl_expect()`).
+
+### Data collection iterations
+
+For each iteration through the data source, the caller:
+
+- calls `arl_begin()` to initiate a data collection iteration.
+ This is to be called just ONCE every time the source is re-evaluated.
+
+- calls `arl_check()` for each entry read from the file.
+
+### Cleanup
+
+When the caller exits:
+
+- calls `arl_free()` to destroy this and free all memory.
+
+### Performance
+
+ARL maintains a list of `name` keywords found in the data source (even the ones
+that are not useful for data collection).
+
+If the data source maintains the same order on the `name-value` pairs, for each
+each call to `arl_check()` only an `strcmp()` is executed to verify the
+expected order has not changed, a counter is incremented and a pointer is changed.
+So, if the data source has 100 `name-value` pairs, and their order remains constant
+over time, 100 successful `strcmp()` are executed.
+
+In the unlikely event that an iteration sees the data source with a different order,
+for each out-of-order keyword, a full search of the remaining keywords is made. But
+this search uses 32bit hashes, not string comparisons, so it should also be fast.
+
+When all expectations are satisfied (even in the middle of an iteration),
+the call to `arl_check()` will return 1, to signal the caller to stop the loop,
+saving valuable CPU resources for the rest of the data source.
+
+In the following test we used alternative methods to process, **1M times**,
+a data source like `/proc/meminfo`, already tokenized, in memory,
+to extract the same number of expected metrics:
+
+|test|code|string comparison|number parsing|duration|
+|:--:|:--:|:---------------:|:------------:|:------:|
+|1|if-else-if-else-if|`strcmp()`|`strtoull()`|4630.337 ms|
+|2|nested loops|inline `simple_hash()` and `strcmp()`|`strtoull()`|1597.481 ms|
+|3|nested loops|inline `simple_hash()` and `strcmp()`|`str2ull()`|923.523 ms|
+|4|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`|854.574 ms|
+|5|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|912.013 ms|
+|6|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|842.279 ms|
+|7|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|602.837 ms|
+|8|ARL|ARL|`strtoull()`|350.360 ms|
+|9|ARL|ARL|`str2ull()`|157.237 ms|
+
+Compared to unoptimized code (test No 1: 4.6sec):
+
+- before ARL Netdata was using test No **7** with hashing and a custom `str2ull()` to achieve 602ms.
+- the current ARL implementation is test No **9** that needs only 157ms (29 times faster vs unoptimized code, about 4 times faster vs optimized code).
+
+[Check the source code of this test](https://raw.githubusercontent.com/netdata/netdata/master/tests/profile/benchmark-value-pairs.c).
+
+## Limitations
+
+Do not use ARL if the a name/keyword may appear more than once in the
+source data.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fadaptive_resortable_list%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
new file mode 100644
index 0000000..7f4c6c5
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// the default processor() of the ARL
+// can be overwritten at arl_create()
+inline void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = str2ull(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+inline void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register kernel_uint_t *d = dst;
+ *d = str2kernel_uint_t(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, (unsigned long long)*d);
+}
+
+inline void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register ssize_t *d = dst;
+ *d = (ssize_t)str2ll(value, NULL);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %zd\n", name, hash, value, *d);
+}
+
+// create a new ARL
+ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks) {
+ ARL_BASE *base = callocz(1, sizeof(ARL_BASE));
+
+ base->name = strdupz(name);
+
+ if(!processor)
+ base->processor = arl_callback_str2ull;
+ else
+ base->processor = processor;
+
+ base->rechecks = rechecks;
+
+ return base;
+}
+
+void arl_free(ARL_BASE *arl_base) {
+ if(unlikely(!arl_base))
+ return;
+
+ while(arl_base->head) {
+ ARL_ENTRY *e = arl_base->head;
+ arl_base->head = e->next;
+
+ freez(e->name);
+#ifdef NETDATA_INTERNAL_CHECKS
+ memset(e, 0, sizeof(ARL_ENTRY));
+#endif
+ freez(e);
+ }
+
+ freez(arl_base->name);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ memset(arl_base, 0, sizeof(ARL_BASE));
+#endif
+
+ freez(arl_base);
+}
+
+void arl_begin(ARL_BASE *base) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(likely(base->iteration > 10)) {
+ // do these checks after the ARL has been sorted
+
+ if(unlikely(base->relinkings > (base->expected + base->allocated)))
+ info("ARL '%s' has %zu relinkings with %zu expected and %zu allocated entries. Is the source changing so fast?"
+ , base->name, base->relinkings, base->expected, base->allocated);
+
+ if(unlikely(base->slow > base->fast))
+ info("ARL '%s' has %zu fast searches and %zu slow searches. Is the source really changing so fast?"
+ , base->name, base->fast, base->slow);
+
+ /*
+ if(unlikely(base->iteration % 60 == 0)) {
+ info("ARL '%s' statistics: iteration %zu, expected %zu, wanted %zu, allocated %zu, fred %zu, relinkings %zu, found %zu, added %zu, fast %zu, slow %zu"
+ , base->name
+ , base->iteration
+ , base->expected
+ , base->wanted
+ , base->allocated
+ , base->fred
+ , base->relinkings
+ , base->found
+ , base->added
+ , base->fast
+ , base->slow
+ );
+ // for(e = base->head; e; e = e->next) fprintf(stderr, "%s ", e->name);
+ // fprintf(stderr, "\n");
+ }
+ */
+ }
+#endif
+
+ if(unlikely(base->iteration > 0 && (base->added || (base->iteration % base->rechecks) == 0))) {
+ int wanted_equals_expected = ((base->iteration % base->rechecks) == 0);
+
+ // fprintf(stderr, "\n\narl_begin() rechecking, added %zu, iteration %zu, rechecks %zu, wanted_equals_expected %d\n\n\n", base->added, base->iteration, base->rechecks, wanted_equals_expected);
+
+ base->added = 0;
+ base->wanted = (wanted_equals_expected)?base->expected:0;
+
+ ARL_ENTRY *e = base->head;
+ while(e) {
+ if(e->flags & ARL_ENTRY_FLAG_FOUND) {
+
+ // remove the found flag
+ e->flags &= ~ARL_ENTRY_FLAG_FOUND;
+
+ // count it in wanted
+ if(!wanted_equals_expected && e->flags & ARL_ENTRY_FLAG_EXPECTED)
+ base->wanted++;
+
+ }
+ else if(e->flags & ARL_ENTRY_FLAG_DYNAMIC && !(base->head == e && !e->next)) { // not last entry
+ // we can remove this entry
+ // it is not found, and it was created because
+ // it was found in the source file
+
+ // remember the next one
+ ARL_ENTRY *t = e->next;
+
+ // remove it from the list
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+ if(base->head == e) base->head = e->next;
+
+ // free it
+ freez(e->name);
+ freez(e);
+
+ // count it
+ base->fred++;
+
+ // continue
+ e = t;
+ continue;
+ }
+
+ e = e->next;
+ }
+ }
+
+ if(unlikely(!base->head)) {
+ // hm... no nodes at all in the list #1700
+ // add a fake one to prevent a crash
+ // this is better than checking for the existence of nodes all the time
+ arl_expect(base, "a-really-not-existing-source-keyword", NULL);
+ }
+
+ base->iteration++;
+ base->next_keyword = base->head;
+ base->found = 0;
+
+}
+
+// register an expected keyword to the ARL
+// together with its destination ( i.e. the output of the processor() )
+ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst) {
+ ARL_ENTRY *e = callocz(1, sizeof(ARL_ENTRY));
+ e->name = strdupz(keyword);
+ e->hash = simple_hash(e->name);
+ e->processor = (processor)?processor:base->processor;
+ e->dst = dst;
+ e->flags = ARL_ENTRY_FLAG_EXPECTED;
+ e->prev = NULL;
+ e->next = base->head;
+
+ if(base->head) base->head->prev = e;
+ else base->next_keyword = e;
+
+ base->head = e;
+ base->expected++;
+ base->allocated++;
+
+ base->wanted = base->expected;
+
+ return e;
+}
+
+int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value) {
+ ARL_ENTRY *e;
+
+ uint32_t hash = simple_hash(s);
+
+ // find if it already exists in the data
+ for(e = base->head; e ; e = e->next)
+ if(e->hash == hash && !strcmp(e->name, s))
+ break;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(base->next_keyword && e == base->next_keyword))
+ fatal("Internal Error: e == base->last");
+#endif
+
+ if(e) {
+ // found it in the keywords
+
+ base->relinkings++;
+
+ // run the processor for it
+ if(unlikely(e->dst)) {
+ e->processor(e->name, hash, value, e->dst);
+ base->found++;
+ }
+
+ // unlink it - we will relink it below
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+
+ // make sure the head is properly linked
+ if(base->head == e)
+ base->head = e->next;
+ }
+ else {
+ // not found
+
+ // create it
+ e = callocz(1, sizeof(ARL_ENTRY));
+ e->name = strdupz(s);
+ e->hash = hash;
+ e->flags = ARL_ENTRY_FLAG_DYNAMIC;
+
+ base->allocated++;
+ base->added++;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(base->iteration % 60 == 0 && e->flags & ARL_ENTRY_FLAG_FOUND))
+ info("ARL '%s': entry '%s' is already found. Did you forget to call arl_begin()?", base->name, s);
+#endif
+
+ e->flags |= ARL_ENTRY_FLAG_FOUND;
+
+ // link it here
+ e->next = base->next_keyword;
+ if(base->next_keyword) {
+ e->prev = base->next_keyword->prev;
+ base->next_keyword->prev = e;
+
+ if(e->prev)
+ e->prev->next = e;
+
+ if(base->head == base->next_keyword)
+ base->head = e;
+ }
+ else {
+ e->prev = NULL;
+
+ if(!base->head)
+ base->head = e;
+ }
+
+ // prepare the next iteration
+ base->next_keyword = e->next;
+ if(unlikely(!base->next_keyword))
+ base->next_keyword = base->head;
+
+ if(unlikely(base->found == base->wanted)) {
+ // fprintf(stderr, "FOUND ALL WANTED 1: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
new file mode 100644
index 0000000..011ee73
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H
+#define NETDATA_ADAPTIVE_RESORTABLE_LIST_H 1
+
+#define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data
+#define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program
+#define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data
+
+typedef struct arl_entry {
+ char *name; // the keywords
+ uint32_t hash; // the hash of the keyword
+
+ void *dst; // the dst to pass to the processor
+
+ uint8_t flags; // ARL_ENTRY_FLAG_*
+
+ // the processor to do the job
+ void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
+
+ // double linked list for fast re-linkings
+ struct arl_entry *prev, *next;
+} ARL_ENTRY;
+
+typedef struct arl_base {
+ char *name;
+
+ size_t iteration; // incremented on each iteration (arl_begin())
+ size_t found; // the number of expected keywords found in this iteration
+ size_t expected; // the number of expected keywords
+ size_t wanted; // the number of wanted keywords
+ // i.e. the number of keywords found and expected
+
+ size_t relinkings; // the number of relinkings we have made so far
+
+ size_t allocated; // the number of keywords allocated
+ size_t fred; // the number of keywords cleaned up
+
+ size_t rechecks; // the number of iterations between re-checks of the
+ // wanted number of keywords
+ // this is only needed in cases where the source
+ // is having less lines over time.
+
+ size_t added; // it is non-zero if new keywords have been added
+ // this is only needed to detect new lines have
+ // been added to the file, over time.
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ size_t fast; // the number of times we have taken the fast path
+ size_t slow; // the number of times we have taken the slow path
+#endif
+
+ // the processor to do the job
+ void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
+
+ // the linked list of the keywords
+ ARL_ENTRY *head;
+
+ // since we keep the list of keywords sorted (as found in the source data)
+ // this is next keyword that we expect to find in the source data.
+ ARL_ENTRY *next_keyword;
+} ARL_BASE;
+
+// create a new ARL
+extern ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks);
+
+// free an ARL
+extern void arl_free(ARL_BASE *arl_base);
+
+// register an expected keyword to the ARL
+// together with its destination ( i.e. the output of the processor() )
+extern ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst);
+#define arl_expect(base, keyword, dst) arl_expect_custom(base, keyword, NULL, dst)
+
+// an internal call to complete the check() call
+extern int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value);
+
+// begin an ARL iteration
+extern void arl_begin(ARL_BASE *base);
+
+extern void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst);
+extern void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst);
+extern void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst);
+
+// check a keyword against the ARL
+// this is to be called for each keyword read from source data
+// s = the keyword, as collected
+// src = the src data to be passed to the processor
+// it is defined in the header file in order to be inlined
+static inline int arl_check(ARL_BASE *base, const char *keyword, const char *value) {
+ ARL_ENTRY *e = base->next_keyword;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely((base->fast + base->slow) % (base->expected + base->allocated) == 0 && (base->fast + base->slow) > (base->expected + base->allocated) * base->iteration))
+ info("ARL '%s': Did you forget to call arl_begin()?", base->name);
+#endif
+
+ // it should be the first entry (pointed by base->next_keyword)
+ if(likely(!strcmp(keyword, e->name))) {
+ // it is
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ base->fast++;
+#endif
+
+ e->flags |= ARL_ENTRY_FLAG_FOUND;
+
+ // execute the processor
+ if(unlikely(e->dst)) {
+ e->processor(e->name, e->hash, value, e->dst);
+ base->found++;
+ }
+
+ // be prepared for the next iteration
+ base->next_keyword = e->next;
+ if(unlikely(!base->next_keyword))
+ base->next_keyword = base->head;
+
+ // stop if we collected all the values for this iteration
+ if(unlikely(base->found == base->wanted)) {
+ // fprintf(stderr, "FOUND ALL WANTED 2: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
+ return 1;
+ }
+
+ return 0;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ base->slow++;
+#endif
+
+ // we read from source, a not-expected keyword
+ return arl_find_or_create_and_relink(base, keyword, value);
+}
+
+#endif //NETDATA_ADAPTIVE_RESORTABLE_LIST_H
diff --git a/libnetdata/avl/Makefile.am b/libnetdata/avl/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/avl/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md
new file mode 100644
index 0000000..2097be5
--- /dev/null
+++ b/libnetdata/avl/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "AVL"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/avl/README.md
+-->
+
+# AVL
+
+AVL is a library indexing objects in B-Trees.
+
+`avl_insert()`, `avl_remove()` and `avl_search()` are adaptations
+of the AVL algorithm found in `libavl` v2.0.3, so that they do not
+use any memory allocations and their memory footprint is optimized
+(by eliminating non-necessary data members).
+
+In addition to the above, this version of AVL, provides versions using locks
+and traversal functions.
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Favl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/avl/avl.c b/libnetdata/avl/avl.c
new file mode 100644
index 0000000..5219851
--- /dev/null
+++ b/libnetdata/avl/avl.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: LGPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+/* ------------------------------------------------------------------------- */
+/*
+ * avl_insert(), avl_remove() and avl_search()
+ * are adaptations (by Costa Tsaousis) of the AVL algorithm found in libavl
+ * v2.0.3, so that they do not use any memory allocations and their memory
+ * footprint is optimized (by eliminating non-necessary data members).
+ *
+ * libavl - library for manipulation of binary trees.
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004 Free Software
+ * Foundation, Inc.
+*/
+
+
+/* Search |tree| for an item matching |item|, and return it if found.
+ Otherwise return |NULL|. */
+avl *avl_search(avl_tree_type *tree, avl *item) {
+ avl *p;
+
+ // assert (tree != NULL && item != NULL);
+
+ for (p = tree->root; p != NULL; ) {
+ int cmp = tree->compar(item, p);
+
+ if (cmp < 0)
+ p = p->avl_link[0];
+ else if (cmp > 0)
+ p = p->avl_link[1];
+ else /* |cmp == 0| */
+ return p;
+ }
+
+ return NULL;
+}
+
+/* Inserts |item| into |tree| and returns a pointer to |item|'s address.
+ If a duplicate item is found in the tree,
+ returns a pointer to the duplicate without inserting |item|.
+ */
+avl *avl_insert(avl_tree_type *tree, avl *item) {
+ avl *y, *z; /* Top node to update balance factor, and parent. */
+ avl *p, *q; /* Iterator, and parent. */
+ avl *n; /* Newly inserted node. */
+ avl *w; /* New root of rebalanced subtree. */
+ unsigned char dir; /* Direction to descend. */
+
+ unsigned char da[AVL_MAX_HEIGHT]; /* Cached comparison results. */
+ int k = 0; /* Number of cached results. */
+
+ // assert(tree != NULL && item != NULL);
+
+ z = (avl *) &tree->root;
+ y = tree->root;
+ dir = 0;
+ for (q = z, p = y; p != NULL; q = p, p = p->avl_link[dir]) {
+ int cmp = tree->compar(item, p);
+ if (cmp == 0)
+ return p;
+
+ if (p->avl_balance != 0)
+ z = q, y = p, k = 0;
+ da[k++] = dir = (unsigned char)(cmp > 0);
+ }
+
+ n = q->avl_link[dir] = item;
+
+ // tree->avl_count++;
+ n->avl_link[0] = n->avl_link[1] = NULL;
+ n->avl_balance = 0;
+ if (y == NULL) return n;
+
+ for (p = y, k = 0; p != n; p = p->avl_link[da[k]], k++)
+ if (da[k] == 0)
+ p->avl_balance--;
+ else
+ p->avl_balance++;
+
+ if (y->avl_balance == -2) {
+ avl *x = y->avl_link[0];
+ if (x->avl_balance == -1) {
+ w = x;
+ y->avl_link[0] = x->avl_link[1];
+ x->avl_link[1] = y;
+ x->avl_balance = y->avl_balance = 0;
+ }
+ else {
+ // assert (x->avl_balance == +1);
+ w = x->avl_link[1];
+ x->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = x;
+ y->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = y;
+ if (w->avl_balance == -1)
+ x->avl_balance = 0, y->avl_balance = +1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == +1| */
+ x->avl_balance = -1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ }
+ }
+ else if (y->avl_balance == +2) {
+ avl *x = y->avl_link[1];
+ if (x->avl_balance == +1) {
+ w = x;
+ y->avl_link[1] = x->avl_link[0];
+ x->avl_link[0] = y;
+ x->avl_balance = y->avl_balance = 0;
+ }
+ else {
+ // assert (x->avl_balance == -1);
+ w = x->avl_link[0];
+ x->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = x;
+ y->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = y;
+ if (w->avl_balance == +1)
+ x->avl_balance = 0, y->avl_balance = -1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == -1| */
+ x->avl_balance = +1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ }
+ }
+ else return n;
+
+ z->avl_link[y != z->avl_link[0]] = w;
+
+ // tree->avl_generation++;
+ return n;
+}
+
+/* Deletes from |tree| and returns an item matching |item|.
+ Returns a null pointer if no matching item found. */
+avl *avl_remove(avl_tree_type *tree, avl *item) {
+ /* Stack of nodes. */
+ avl *pa[AVL_MAX_HEIGHT]; /* Nodes. */
+ unsigned char da[AVL_MAX_HEIGHT]; /* |avl_link[]| indexes. */
+ int k; /* Stack pointer. */
+
+ avl *p; /* Traverses tree to find node to delete. */
+ int cmp; /* Result of comparison between |item| and |p|. */
+
+ // assert (tree != NULL && item != NULL);
+
+ k = 0;
+ p = (avl *) &tree->root;
+ for(cmp = -1; cmp != 0; cmp = tree->compar(item, p)) {
+ unsigned char dir = (unsigned char)(cmp > 0);
+
+ pa[k] = p;
+ da[k++] = dir;
+
+ p = p->avl_link[dir];
+ if(p == NULL) return NULL;
+ }
+
+ item = p;
+
+ if (p->avl_link[1] == NULL)
+ pa[k - 1]->avl_link[da[k - 1]] = p->avl_link[0];
+ else {
+ avl *r = p->avl_link[1];
+ if (r->avl_link[0] == NULL) {
+ r->avl_link[0] = p->avl_link[0];
+ r->avl_balance = p->avl_balance;
+ pa[k - 1]->avl_link[da[k - 1]] = r;
+ da[k] = 1;
+ pa[k++] = r;
+ }
+ else {
+ avl *s;
+ int j = k++;
+
+ for (;;) {
+ da[k] = 0;
+ pa[k++] = r;
+ s = r->avl_link[0];
+ if (s->avl_link[0] == NULL) break;
+
+ r = s;
+ }
+
+ s->avl_link[0] = p->avl_link[0];
+ r->avl_link[0] = s->avl_link[1];
+ s->avl_link[1] = p->avl_link[1];
+ s->avl_balance = p->avl_balance;
+
+ pa[j - 1]->avl_link[da[j - 1]] = s;
+ da[j] = 1;
+ pa[j] = s;
+ }
+ }
+
+ // assert (k > 0);
+ while (--k > 0) {
+ avl *y = pa[k];
+
+ if (da[k] == 0) {
+ y->avl_balance++;
+ if (y->avl_balance == +1) break;
+ else if (y->avl_balance == +2) {
+ avl *x = y->avl_link[1];
+ if (x->avl_balance == -1) {
+ avl *w;
+ // assert (x->avl_balance == -1);
+ w = x->avl_link[0];
+ x->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = x;
+ y->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = y;
+ if (w->avl_balance == +1)
+ x->avl_balance = 0, y->avl_balance = -1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == -1| */
+ x->avl_balance = +1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ pa[k - 1]->avl_link[da[k - 1]] = w;
+ }
+ else {
+ y->avl_link[1] = x->avl_link[0];
+ x->avl_link[0] = y;
+ pa[k - 1]->avl_link[da[k - 1]] = x;
+ if (x->avl_balance == 0) {
+ x->avl_balance = -1;
+ y->avl_balance = +1;
+ break;
+ }
+ else x->avl_balance = y->avl_balance = 0;
+ }
+ }
+ }
+ else
+ {
+ y->avl_balance--;
+ if (y->avl_balance == -1) break;
+ else if (y->avl_balance == -2) {
+ avl *x = y->avl_link[0];
+ if (x->avl_balance == +1) {
+ avl *w;
+ // assert (x->avl_balance == +1);
+ w = x->avl_link[1];
+ x->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = x;
+ y->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = y;
+ if (w->avl_balance == -1)
+ x->avl_balance = 0, y->avl_balance = +1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == +1| */
+ x->avl_balance = -1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ pa[k - 1]->avl_link[da[k - 1]] = w;
+ }
+ else {
+ y->avl_link[0] = x->avl_link[1];
+ x->avl_link[1] = y;
+ pa[k - 1]->avl_link[da[k - 1]] = x;
+ if (x->avl_balance == 0) {
+ x->avl_balance = +1;
+ y->avl_balance = -1;
+ break;
+ }
+ else x->avl_balance = y->avl_balance = 0;
+ }
+ }
+ }
+ }
+
+ // tree->avl_count--;
+ // tree->avl_generation++;
+ return item;
+}
+
+/* ------------------------------------------------------------------------- */
+// below are functions by (C) Costa Tsaousis
+
+// ---------------------------
+// traversing
+
+int avl_walker(avl *node, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ int total = 0, ret = 0;
+
+ if(node->avl_link[0]) {
+ ret = avl_walker(node->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(node, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(node->avl_link[1]) {
+ ret = avl_walker(node->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int avl_traverse(avl_tree_type *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ if(tree->root)
+ return avl_walker(tree->root, callback, data);
+ else
+ return 0;
+}
+
+// ---------------------------
+// locks
+
+void avl_read_lock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_lock(&t->mutex);
+#else
+ netdata_rwlock_rdlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+void avl_write_lock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_lock(&t->mutex);
+#else
+ netdata_rwlock_wrlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+void avl_unlock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_unlock(&t->mutex);
+#else
+ netdata_rwlock_unlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+// ---------------------------
+// operations with locking
+
+void avl_init_lock(avl_tree_lock *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
+ avl_init(&tree->avl_tree, compar);
+
+#ifndef AVL_WITHOUT_PTHREADS
+ int lock;
+
+#ifdef AVL_LOCK_WITH_MUTEX
+ lock = netdata_mutex_init(&tree->mutex, NULL);
+#else
+ lock = netdata_rwlock_init(&tree->rwlock);
+#endif
+
+ if(lock != 0)
+ fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock);
+
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+void avl_destroy_lock(avl_tree_lock *tree) {
+#ifndef AVL_WITHOUT_PTHREADS
+ int lock;
+
+#ifdef AVL_LOCK_WITH_MUTEX
+ lock = pthread_mutex_destroy(&tree->mutex);
+#else
+ lock = pthread_rwlock_destroy(&tree->rwlock);
+#endif
+
+ if(lock != 0)
+ fatal("Failed to destroy AVL mutex/rwlock, error: %d", lock);
+
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+avl *avl_search_lock(avl_tree_lock *tree, avl *item) {
+ avl_read_lock(tree);
+ avl *ret = avl_search(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+avl * avl_remove_lock(avl_tree_lock *tree, avl *item) {
+ avl_write_lock(tree);
+ avl *ret = avl_remove(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+avl *avl_insert_lock(avl_tree_lock *tree, avl *item) {
+ avl_write_lock(tree);
+ avl * ret = avl_insert(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ int ret;
+ avl_read_lock(tree);
+ ret = avl_traverse(&tree->avl_tree, callback, data);
+ avl_unlock(tree);
+ return ret;
+}
+
+void avl_init(avl_tree_type *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
+ tree->root = NULL;
+ tree->compar = compar;
+}
+
+// ------------------
diff --git a/libnetdata/avl/avl.h b/libnetdata/avl/avl.h
new file mode 100644
index 0000000..32e3f27
--- /dev/null
+++ b/libnetdata/avl/avl.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: LGPL-3.0-or-later
+
+#ifndef _AVL_H
+#define _AVL_H 1
+
+#include "../libnetdata.h"
+
+/* Maximum AVL tree height. */
+#ifndef AVL_MAX_HEIGHT
+#define AVL_MAX_HEIGHT 92
+#endif
+
+#ifndef AVL_WITHOUT_PTHREADS
+#include <pthread.h>
+
+// #define AVL_LOCK_WITH_MUTEX 1
+
+#ifdef AVL_LOCK_WITH_MUTEX
+#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER
+#else /* AVL_LOCK_WITH_MUTEX */
+#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER
+#endif /* AVL_LOCK_WITH_MUTEX */
+
+#else /* AVL_WITHOUT_PTHREADS */
+#define AVL_LOCK_INITIALIZER
+#endif /* AVL_WITHOUT_PTHREADS */
+
+/* Data structures */
+
+/* One element of the AVL tree */
+typedef struct avl {
+ struct avl *avl_link[2]; /* Subtrees. */
+ signed char avl_balance; /* Balance factor. */
+} avl;
+
+/* An AVL tree */
+typedef struct avl_tree_type {
+ avl *root;
+ int (*compar)(void *a, void *b);
+} avl_tree_type;
+
+typedef struct avl_tree_lock {
+ avl_tree_type avl_tree;
+
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_t mutex;
+#else /* AVL_LOCK_WITH_MUTEX */
+ netdata_rwlock_t rwlock;
+#endif /* AVL_LOCK_WITH_MUTEX */
+#endif /* AVL_WITHOUT_PTHREADS */
+} avl_tree_lock;
+
+/* Public methods */
+
+/* Insert element a into the AVL tree t
+ * returns the added element a, or a pointer the
+ * element that is equal to a (as returned by t->compar())
+ * a is linked directly to the tree, so it has to
+ * be properly allocated by the caller.
+ */
+avl *avl_insert_lock(avl_tree_lock *tree, avl *item) NEVERNULL WARNUNUSED;
+avl *avl_insert(avl_tree_type *tree, avl *item) NEVERNULL WARNUNUSED;
+
+/* Remove an element a from the AVL tree t
+ * returns a pointer to the removed element
+ * or NULL if an element equal to a is not found
+ * (equal as returned by t->compar())
+ */
+avl *avl_remove_lock(avl_tree_lock *tree, avl *item) WARNUNUSED;
+avl *avl_remove(avl_tree_type *tree, avl *item) WARNUNUSED;
+
+/* Find the element into the tree that equal to a
+ * (equal as returned by t->compar())
+ * returns NULL is no element is equal to a
+ */
+avl *avl_search_lock(avl_tree_lock *tree, avl *item);
+avl *avl_search(avl_tree_type *tree, avl *item);
+
+/* Initialize the avl_tree_lock
+ */
+void avl_init_lock(avl_tree_lock *tree, int (*compar)(void *a, void *b));
+void avl_init(avl_tree_type *tree, int (*compar)(void *a, void *b));
+
+/* Destroy the avl_tree_lock locks
+ */
+void avl_destroy_lock(avl_tree_lock *tree);
+
+int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void *entry, void *data), void *data);
+int avl_traverse(avl_tree_type *tree, int (*callback)(void *entry, void *data), void *data);
+
+#endif /* avl.h */
diff --git a/libnetdata/buffer/Makefile.am b/libnetdata/buffer/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/buffer/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md
new file mode 100644
index 0000000..5a9d7b0
--- /dev/null
+++ b/libnetdata/buffer/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "BUFFER"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/buffer/README.md
+-->
+
+# BUFFER
+
+`BUFFER` is a convenience library for working with strings in `C`.
+Mainly, `BUFFER`s eliminate the need for tracking the string length, thus providing
+a safe alternative for string operations.
+
+Also, they are super fast in printing and appending data to the string and its `buffer_strlen()`
+is just a lookup (it does not traverse the string).
+
+Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or
+to backend databases.
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fbuffer%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
new file mode 100644
index 0000000..c37da21
--- /dev/null
+++ b/libnetdata/buffer/buffer.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#define BUFFER_OVERFLOW_EOF "EOF"
+
+static inline void buffer_overflow_init(BUFFER *b)
+{
+ b->buffer[b->size] = '\0';
+ strcpy(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define buffer_overflow_check(b) _buffer_overflow_check(b, __FILE__, __FUNCTION__, __LINE__)
+#else
+#define buffer_overflow_check(b)
+#endif
+
+static inline void _buffer_overflow_check(BUFFER *b, const char *file, const char *function, const unsigned long line)
+{
+ if(b->len > b->size) {
+ error("BUFFER: length %zu is above size %zu, at line %lu, at function %s() of file '%s'.", b->len, b->size, line, function, file);
+ b->len = b->size;
+ }
+
+ if(b->buffer[b->size] != '\0' || strcmp(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF) != 0) {
+ error("BUFFER: detected overflow at line %lu, at function %s() of file '%s'.", line, function, file);
+ buffer_overflow_init(b);
+ }
+}
+
+
+void buffer_reset(BUFFER *wb)
+{
+ buffer_flush(wb);
+
+ wb->contenttype = CT_TEXT_PLAIN;
+ wb->options = 0;
+ wb->date = 0;
+ wb->expires = 0;
+
+ buffer_overflow_check(wb);
+}
+
+const char *buffer_tostring(BUFFER *wb)
+{
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len] = '\0';
+
+ buffer_overflow_check(wb);
+
+ return(wb->buffer);
+}
+
+void buffer_char_replace(BUFFER *wb, char from, char to)
+{
+ char *s = wb->buffer, *end = &wb->buffer[wb->len];
+
+ while(s != end) {
+ if(*s == from) *s = to;
+ s++;
+ }
+
+ buffer_overflow_check(wb);
+}
+
+// This trick seems to give an 80% speed increase in 32bit systems
+// print_calculated_number_llu_r() will just print the digits up to the
+// point the remaining value fits in 32 bits, and then calls
+// print_calculated_number_lu_r() to print the rest with 32 bit arithmetic.
+
+inline char *print_number_lu_r(char *str, unsigned long uvalue) {
+ char *wstr = str;
+
+ // print each digit
+ do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+ return wstr;
+}
+
+inline char *print_number_llu_r(char *str, unsigned long long uvalue) {
+ char *wstr = str;
+
+ // print each digit
+ do *wstr++ = (char)('0' + (uvalue % 10)); while((uvalue /= 10) && uvalue > (unsigned long long)0xffffffff);
+ if(uvalue) return print_number_lu_r(wstr, uvalue);
+ return wstr;
+}
+
+inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) {
+#ifdef ENVIRONMENT32
+ if(uvalue > (unsigned long long)0xffffffff)
+ str = print_number_llu_r(str, uvalue);
+ else
+ str = print_number_lu_r(str, uvalue);
+#else
+ do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+#endif
+
+ return str;
+}
+
+void buffer_print_llu(BUFFER *wb, unsigned long long uvalue)
+{
+ buffer_need_bytes(wb, 50);
+
+ char *str = &wb->buffer[wb->len];
+ char *wstr = str;
+
+#ifdef ENVIRONMENT32
+ if(uvalue > (unsigned long long)0xffffffff)
+ wstr = print_number_llu_r(wstr, uvalue);
+ else
+ wstr = print_number_lu_r(wstr, uvalue);
+#else
+ do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+#endif
+
+ // terminate it
+ *wstr = '\0';
+
+ // reverse it
+ char *begin = str, *end = wstr - 1, aux;
+ while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
+
+ // return the buffer length
+ wb->len += wstr - str;
+}
+
+void buffer_strcat(BUFFER *wb, const char *txt)
+{
+ // buffer_sprintf(wb, "%s", txt);
+
+ if(unlikely(!txt || !*txt)) return;
+
+ buffer_need_bytes(wb, 1);
+
+ char *s = &wb->buffer[wb->len], *start, *end = &wb->buffer[wb->size];
+ size_t len = wb->len;
+
+ start = s;
+ while(*txt && s != end)
+ *s++ = *txt++;
+
+ len += s - start;
+
+ wb->len = len;
+ buffer_overflow_check(wb);
+
+ if(*txt) {
+ debug(D_WEB_BUFFER, "strcat(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size);
+ len = strlen(txt);
+ buffer_increase(wb, len);
+ buffer_strcat(wb, txt);
+ }
+ else {
+ // terminate the string
+ // without increasing the length
+ buffer_need_bytes(wb, (size_t)1);
+ wb->buffer[wb->len] = '\0';
+ }
+}
+
+void buffer_strcat_jsonescape(BUFFER *wb, const char *txt)
+{
+ while(*txt) {
+ switch(*txt) {
+ case '\\':
+ buffer_need_bytes(wb, 2);
+ wb->buffer[wb->len++] = '\\';
+ wb->buffer[wb->len++] = '\\';
+ break;
+ case '"':
+ buffer_need_bytes(wb, 2);
+ wb->buffer[wb->len++] = '\\';
+ wb->buffer[wb->len++] = '"';
+ break;
+ default: {
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len++] = *txt;
+ }
+ }
+ txt++;
+ }
+
+ buffer_overflow_check(wb);
+}
+
+void buffer_strcat_htmlescape(BUFFER *wb, const char *txt)
+{
+ while(*txt) {
+ switch(*txt) {
+ case '&': buffer_strcat(wb, "&amp;"); break;
+ case '<': buffer_strcat(wb, "&lt;"); break;
+ case '>': buffer_strcat(wb, "&gt;"); break;
+ case '"': buffer_strcat(wb, "&quot;"); break;
+ case '/': buffer_strcat(wb, "&#x2F;"); break;
+ case '\'': buffer_strcat(wb, "&#x27;"); break;
+ default: {
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len++] = *txt;
+ }
+ }
+ txt++;
+ }
+
+ buffer_overflow_check(wb);
+}
+
+void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ buffer_need_bytes(wb, len + 1);
+
+ va_list args;
+ va_start(args, fmt);
+ wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+ va_end(args);
+
+ buffer_overflow_check(wb);
+
+ // the buffer is \0 terminated by vsnprintfz
+}
+
+void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ buffer_need_bytes(wb, 2);
+
+ size_t len = wb->size - wb->len - 1;
+
+ wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+
+ buffer_overflow_check(wb);
+
+ // the buffer is \0 terminated by vsnprintfz
+}
+
+void buffer_sprintf(BUFFER *wb, const char *fmt, ...)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ va_list args;
+ size_t wrote = 0, need = 2, multiplier = 0, len;
+
+ do {
+ need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP;
+ multiplier++;
+
+ debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
+ buffer_need_bytes(wb, need);
+
+ len = wb->size - wb->len - 1;
+
+ va_start(args, fmt);
+ wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+ va_end(args);
+
+ } while(wrote >= len);
+
+ wb->len += wrote;
+
+ // the buffer is \0 terminated by vsnprintf
+}
+
+
+void buffer_rrd_value(BUFFER *wb, calculated_number value)
+{
+ buffer_need_bytes(wb, 50);
+
+ if(isnan(value) || isinf(value)) {
+ buffer_strcat(wb, "null");
+ return;
+ }
+ else
+ wb->len += print_calculated_number(&wb->buffer[wb->len], value);
+
+ // terminate it
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len] = '\0';
+
+ buffer_overflow_check(wb);
+}
+
+// generate a javascript date, the fastest possible way...
+void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
+{
+ // 10 20 30 = 35
+ // 01234567890123456789012345678901234
+ // Date(2014,04,01,03,28,20)
+
+ buffer_need_bytes(wb, 30);
+
+ char *b = &wb->buffer[wb->len], *p;
+ unsigned int *q = (unsigned int *)b;
+
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *q++ = 0x65746144; // "Date" backwards.
+ #else
+ *q++ = 0x44617465; // "Date"
+ #endif
+ p = (char *)q;
+
+ *p++ = '(';
+ *p++ = '0' + year / 1000; year %= 1000;
+ *p++ = '0' + year / 100; year %= 100;
+ *p++ = '0' + year / 10;
+ *p++ = '0' + year % 10;
+ *p++ = ',';
+ *p = '0' + month / 10; if (*p != '0') p++;
+ *p++ = '0' + month % 10;
+ *p++ = ',';
+ *p = '0' + day / 10; if (*p != '0') p++;
+ *p++ = '0' + day % 10;
+ *p++ = ',';
+ *p = '0' + hours / 10; if (*p != '0') p++;
+ *p++ = '0' + hours % 10;
+ *p++ = ',';
+ *p = '0' + minutes / 10; if (*p != '0') p++;
+ *p++ = '0' + minutes % 10;
+ *p++ = ',';
+ *p = '0' + seconds / 10; if (*p != '0') p++;
+ *p++ = '0' + seconds % 10;
+
+ unsigned short *r = (unsigned short *)p;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *r++ = 0x0029; // ")\0" backwards.
+ #else
+ *r++ = 0x2900; // ")\0"
+ #endif
+
+ wb->len += (size_t)((char *)r - b - 1);
+
+ // terminate it
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
+}
+
+// generate a date, the fastest possible way...
+void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
+{
+ // 10 20 30 = 35
+ // 01234567890123456789012345678901234
+ // 2014-04-01 03:28:20
+
+ buffer_need_bytes(wb, 36);
+
+ char *b = &wb->buffer[wb->len];
+ char *p = b;
+
+ *p++ = '0' + year / 1000; year %= 1000;
+ *p++ = '0' + year / 100; year %= 100;
+ *p++ = '0' + year / 10;
+ *p++ = '0' + year % 10;
+ *p++ = '-';
+ *p++ = '0' + month / 10;
+ *p++ = '0' + month % 10;
+ *p++ = '-';
+ *p++ = '0' + day / 10;
+ *p++ = '0' + day % 10;
+ *p++ = ' ';
+ *p++ = '0' + hours / 10;
+ *p++ = '0' + hours % 10;
+ *p++ = ':';
+ *p++ = '0' + minutes / 10;
+ *p++ = '0' + minutes % 10;
+ *p++ = ':';
+ *p++ = '0' + seconds / 10;
+ *p++ = '0' + seconds % 10;
+ *p = '\0';
+
+ wb->len += (size_t)(p - b);
+
+ // terminate it
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
+}
+
+BUFFER *buffer_create(size_t size)
+{
+ BUFFER *b;
+
+ debug(D_WEB_BUFFER, "Creating new web buffer of size %zu.", size);
+
+ b = callocz(1, sizeof(BUFFER));
+ b->buffer = mallocz(size + sizeof(BUFFER_OVERFLOW_EOF) + 2);
+ b->buffer[0] = '\0';
+ b->size = size;
+ b->contenttype = CT_TEXT_PLAIN;
+ buffer_overflow_init(b);
+ buffer_overflow_check(b);
+
+ return(b);
+}
+
+void buffer_free(BUFFER *b) {
+ if(unlikely(!b)) return;
+
+ buffer_overflow_check(b);
+
+ debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size);
+
+ freez(b->buffer);
+ freez(b);
+}
+
+void buffer_increase(BUFFER *b, size_t free_size_required) {
+ buffer_overflow_check(b);
+
+ size_t left = b->size - b->len;
+
+ if(left >= free_size_required) return;
+
+ size_t increase = free_size_required - left;
+ if(increase < WEB_DATA_LENGTH_INCREASE_STEP) increase = WEB_DATA_LENGTH_INCREASE_STEP;
+
+ debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase);
+
+ b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2);
+ b->size += increase;
+
+ buffer_overflow_init(b);
+ buffer_overflow_check(b);
+}
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
new file mode 100644
index 0000000..d50910c
--- /dev/null
+++ b/libnetdata/buffer/buffer.h
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_BUFFER_H
+#define NETDATA_WEB_BUFFER_H 1
+
+#include "../libnetdata.h"
+
+#define WEB_DATA_LENGTH_INCREASE_STEP 1024
+
+typedef struct web_buffer {
+ size_t size; // allocation size of buffer, in bytes
+ size_t len; // current data length in buffer, in bytes
+ char *buffer; // the buffer itself
+ uint8_t contenttype; // the content type of the data in the buffer
+ uint8_t options; // options related to the content
+ time_t date; // the timestamp this content has been generated
+ time_t expires; // the timestamp this content expires
+} BUFFER;
+
+// options
+#define WB_CONTENT_CACHEABLE 1
+#define WB_CONTENT_NO_CACHEABLE 2
+
+// content-types
+#define CT_APPLICATION_JSON 1
+#define CT_TEXT_PLAIN 2
+#define CT_TEXT_HTML 3
+#define CT_APPLICATION_X_JAVASCRIPT 4
+#define CT_TEXT_CSS 5
+#define CT_TEXT_XML 6
+#define CT_APPLICATION_XML 7
+#define CT_TEXT_XSL 8
+#define CT_APPLICATION_OCTET_STREAM 9
+#define CT_APPLICATION_X_FONT_TRUETYPE 10
+#define CT_APPLICATION_X_FONT_OPENTYPE 11
+#define CT_APPLICATION_FONT_WOFF 12
+#define CT_APPLICATION_FONT_WOFF2 13
+#define CT_APPLICATION_VND_MS_FONTOBJ 14
+#define CT_IMAGE_SVG_XML 15
+#define CT_IMAGE_PNG 16
+#define CT_IMAGE_JPG 17
+#define CT_IMAGE_GIF 18
+#define CT_IMAGE_XICON 19
+#define CT_IMAGE_ICNS 20
+#define CT_IMAGE_BMP 21
+#define CT_PROMETHEUS 22
+
+#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0)
+#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0)
+
+#define buffer_strlen(wb) ((wb)->len)
+extern const char *buffer_tostring(BUFFER *wb);
+
+#define buffer_flush(wb) wb->buffer[(wb)->len = 0] = '\0'
+extern void buffer_reset(BUFFER *wb);
+
+extern void buffer_strcat(BUFFER *wb, const char *txt);
+extern void buffer_rrd_value(BUFFER *wb, calculated_number value);
+
+extern void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
+extern void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
+
+extern BUFFER *buffer_create(size_t size);
+extern void buffer_free(BUFFER *b);
+extern void buffer_increase(BUFFER *b, size_t free_size_required);
+
+extern void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4);
+extern void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args);
+extern void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3);
+extern void buffer_strcat_jsonescape(BUFFER *wb, const char *txt);
+extern void buffer_strcat_htmlescape(BUFFER *wb, const char *txt);
+
+extern void buffer_char_replace(BUFFER *wb, char from, char to);
+
+extern char *print_number_lu_r(char *str, unsigned long uvalue);
+extern char *print_number_llu_r(char *str, unsigned long long uvalue);
+extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue);
+
+extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue);
+
+static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) {
+ if(unlikely(buffer->size - buffer->len < needed_free_size))
+ buffer_increase(buffer, needed_free_size);
+}
+
+#endif /* NETDATA_WEB_BUFFER_H */
diff --git a/libnetdata/circular_buffer/Makefile.am b/libnetdata/circular_buffer/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/circular_buffer/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/circular_buffer/README.md b/libnetdata/circular_buffer/README.md
new file mode 100644
index 0000000..d36c05f
--- /dev/null
+++ b/libnetdata/circular_buffer/README.md
@@ -0,0 +1,12 @@
+<!--
+title: "circular_buffer"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/circular_buffer/README.md
+-->
+
+# Circular Buffer
+
+`struct circular_buffer` is an adaptive circular buffer. It will start at an initial size
+and grow up to a maximum size as it fills. Two indices within the structure track the current
+`read` and `write` position for data.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fcircular_buffer%2README&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/libnetdata/circular_buffer/circular_buffer.c b/libnetdata/circular_buffer/circular_buffer.c
new file mode 100644
index 0000000..998008d
--- /dev/null
+++ b/libnetdata/circular_buffer/circular_buffer.c
@@ -0,0 +1,85 @@
+#include "../libnetdata.h"
+
+struct circular_buffer *cbuffer_new(size_t initial, size_t max) {
+ struct circular_buffer *result = mallocz(sizeof(*result));
+ result->size = initial;
+ result->data = mallocz(initial);
+ result->write = 0;
+ result->read = 0;
+ result->max_size = max;
+ return result;
+}
+
+void cbuffer_free(struct circular_buffer *buf) {
+ freez(buf->data);
+ freez(buf);
+}
+
+static int cbuffer_realloc_unsafe(struct circular_buffer *buf) {
+ // Check that we can grow
+ if (buf->size >= buf->max_size)
+ return 1;
+ size_t new_size = buf->size * 2;
+ if (new_size > buf->max_size)
+ new_size = buf->max_size;
+
+ // We know that: size < new_size <= max_size
+ // For simplicity align the current data at the bottom of the new buffer
+ char *new_data = mallocz(new_size);
+ if (buf->read == buf->write)
+ buf->write = 0; // buffer is empty
+ else if (buf->read < buf->write) {
+ memcpy(new_data, buf->data + buf->read, buf->write - buf->read);
+ buf->write -= buf->read;
+ } else {
+ size_t top_part = buf->size - buf->read;
+ memcpy(new_data, buf->data + buf->read, top_part);
+ memcpy(new_data + top_part, buf->data, buf->write);
+ buf->write = top_part + buf->write;
+ }
+ buf->read = 0;
+
+ // Switch buffers
+ freez(buf->data);
+ buf->data = new_data;
+ buf->size = new_size;
+ return 0;
+}
+
+int cbuffer_add_unsafe(struct circular_buffer *buf, const char *d, size_t d_len) {
+ size_t len = (buf->write >= buf->read) ? (buf->write - buf->read) : (buf->size - buf->read + buf->write);
+ while (d_len + len >= buf->size) {
+ if (cbuffer_realloc_unsafe(buf)) {
+ return 1;
+ }
+ }
+ // Guarantee: write + d_len cannot hit read
+ if (buf->write + d_len < buf->size) {
+ memcpy(buf->data + buf->write, d, d_len);
+ buf->write += d_len;
+ }
+ else {
+ size_t top_part = buf->size - buf->write;
+ memcpy(buf->data + buf->write, d, top_part);
+ memcpy(buf->data, d + top_part, d_len - top_part);
+ buf->write = d_len - top_part;
+ }
+ return 0;
+}
+
+// Assume caller does not remove too many bytes (i.e. read will jump over write)
+void cbuffer_remove_unsafe(struct circular_buffer *buf, size_t num) {
+ buf->read += num;
+ // Assume num < size (i.e. caller cannot remove more bytes than are in the buffer)
+ if (buf->read >= buf->size)
+ buf->read -= buf->size;
+}
+
+size_t cbuffer_next_unsafe(struct circular_buffer *buf, char **start) {
+ if (start != NULL)
+ *start = buf->data + buf->read;
+ if (buf->read <= buf->write) {
+ return buf->write - buf->read; // Includes empty case
+ }
+ return buf->size - buf->read;
+}
diff --git a/libnetdata/circular_buffer/circular_buffer.h b/libnetdata/circular_buffer/circular_buffer.h
new file mode 100644
index 0000000..ba37e0e
--- /dev/null
+++ b/libnetdata/circular_buffer/circular_buffer.h
@@ -0,0 +1,16 @@
+#ifndef CIRCULAR_BUFFER_H
+#define CIRCULAR_BUFFER_H 1
+
+#include <string.h>
+
+struct circular_buffer {
+ size_t size, write, read, max_size;
+ char *data;
+};
+
+extern struct circular_buffer *cbuffer_new(size_t initial, size_t max);
+extern void cbuffer_free(struct circular_buffer *buf);
+extern int cbuffer_add_unsafe(struct circular_buffer *buf, const char *d, size_t d_len);
+extern void cbuffer_remove_unsafe(struct circular_buffer *buf, size_t num);
+extern size_t cbuffer_next_unsafe(struct circular_buffer *buf, char **start);
+#endif
diff --git a/libnetdata/clocks/Makefile.am b/libnetdata/clocks/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/clocks/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md
new file mode 100644
index 0000000..e53b86a
--- /dev/null
+++ b/libnetdata/clocks/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/clocks/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fclocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c
new file mode 100644
index 0000000..4ec5fa9
--- /dev/null
+++ b/libnetdata/clocks/clocks.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+static int clock_boottime_valid = 1;
+static int clock_monotonic_coarse_valid = 1;
+
+#ifndef HAVE_CLOCK_GETTIME
+inline int clock_gettime(clockid_t clk_id, struct timespec *ts) {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1)) {
+ error("gettimeofday() failed.");
+ return -1;
+ }
+ ts->tv_sec = tv.tv_sec;
+ ts->tv_nsec = (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC;
+ return 0;
+}
+#endif
+
+void test_clock_boottime(void) {
+ struct timespec ts;
+ if(clock_gettime(CLOCK_BOOTTIME, &ts) == -1 && errno == EINVAL)
+ clock_boottime_valid = 0;
+}
+
+void test_clock_monotonic_coarse(void) {
+ struct timespec ts;
+ if(clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == -1 && errno == EINVAL)
+ clock_monotonic_coarse_valid = 0;
+}
+
+static inline time_t now_sec(clockid_t clk_id) {
+ struct timespec ts;
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ return 0;
+ }
+ return ts.tv_sec;
+}
+
+static inline usec_t now_usec(clockid_t clk_id) {
+ struct timespec ts;
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ return 0;
+ }
+ return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+static inline int now_timeval(clockid_t clk_id, struct timeval *tv) {
+ struct timespec ts;
+
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ tv->tv_sec = 0;
+ tv->tv_usec = 0;
+ return -1;
+ }
+
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC);
+ return 0;
+}
+
+inline time_t now_realtime_sec(void) {
+ return now_sec(CLOCK_REALTIME);
+}
+
+inline usec_t now_realtime_usec(void) {
+ return now_usec(CLOCK_REALTIME);
+}
+
+inline int now_realtime_timeval(struct timeval *tv) {
+ return now_timeval(CLOCK_REALTIME, tv);
+}
+
+inline time_t now_monotonic_sec(void) {
+ return now_sec(likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC);
+}
+
+inline usec_t now_monotonic_usec(void) {
+ return now_usec(likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC);
+}
+
+inline int now_monotonic_timeval(struct timeval *tv) {
+ return now_timeval(likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC, tv);
+}
+
+inline time_t now_monotonic_high_precision_sec(void) {
+ return now_sec(CLOCK_MONOTONIC);
+}
+
+inline usec_t now_monotonic_high_precision_usec(void) {
+ return now_usec(CLOCK_MONOTONIC);
+}
+
+inline int now_monotonic_high_precision_timeval(struct timeval *tv) {
+ return now_timeval(CLOCK_MONOTONIC, tv);
+}
+
+inline time_t now_boottime_sec(void) {
+ return now_sec(likely(clock_boottime_valid) ? CLOCK_BOOTTIME :
+ likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC);
+}
+
+inline usec_t now_boottime_usec(void) {
+ return now_usec(likely(clock_boottime_valid) ? CLOCK_BOOTTIME :
+ likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC);
+}
+
+inline int now_boottime_timeval(struct timeval *tv) {
+ return now_timeval(likely(clock_boottime_valid) ? CLOCK_BOOTTIME :
+ likely(clock_monotonic_coarse_valid) ? CLOCK_MONOTONIC_COARSE : CLOCK_MONOTONIC,
+ tv);
+}
+
+inline usec_t timeval_usec(struct timeval *tv) {
+ return (usec_t)tv->tv_sec * USEC_PER_SEC + (tv->tv_usec % USEC_PER_SEC);
+}
+
+inline msec_t timeval_msec(struct timeval *tv) {
+ return (msec_t)tv->tv_sec * MSEC_PER_SEC + ((tv->tv_usec % USEC_PER_SEC) / MSEC_PER_SEC);
+}
+
+inline susec_t dt_usec_signed(struct timeval *now, struct timeval *old) {
+ usec_t ts1 = timeval_usec(now);
+ usec_t ts2 = timeval_usec(old);
+
+ if(likely(ts1 >= ts2)) return (susec_t)(ts1 - ts2);
+ return -((susec_t)(ts2 - ts1));
+}
+
+inline usec_t dt_usec(struct timeval *now, struct timeval *old) {
+ usec_t ts1 = timeval_usec(now);
+ usec_t ts2 = timeval_usec(old);
+ return (ts1 > ts2) ? (ts1 - ts2) : (ts2 - ts1);
+}
+
+inline void heartbeat_init(heartbeat_t *hb)
+{
+ hb->monotonic = hb->realtime = 0ULL;
+}
+
+// waits for the next heartbeat
+// it waits using the monotonic clock
+// it returns the dt using the realtime clock
+
+usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
+ heartbeat_t now;
+ now.monotonic = now_monotonic_usec();
+ now.realtime = now_realtime_usec();
+
+ usec_t next_monotonic = now.monotonic - (now.monotonic % tick) + tick;
+
+ while(now.monotonic < next_monotonic) {
+ sleep_usec(next_monotonic - now.monotonic);
+ now.monotonic = now_monotonic_usec();
+ now.realtime = now_realtime_usec();
+ }
+
+ if(likely(hb->realtime != 0ULL)) {
+ usec_t dt_monotonic = now.monotonic - hb->monotonic;
+ usec_t dt_realtime = now.realtime - hb->realtime;
+
+ hb->monotonic = now.monotonic;
+ hb->realtime = now.realtime;
+
+ if(unlikely(dt_monotonic >= tick + tick / 2)) {
+ errno = 0;
+ error("heartbeat missed %llu monotonic microseconds", dt_monotonic - tick);
+ }
+
+ return dt_realtime;
+ }
+ else {
+ hb->monotonic = now.monotonic;
+ hb->realtime = now.realtime;
+ return 0ULL;
+ }
+}
+
+// returned the elapsed time, since the last heartbeat
+// using the monotonic clock
+
+inline usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb) {
+ if(!hb || !hb->monotonic) return 0ULL;
+ return now_monotonic_usec() - hb->monotonic;
+}
+
+int sleep_usec(usec_t usec) {
+
+#ifndef NETDATA_WITH_USLEEP
+ // we expect microseconds (1.000.000 per second)
+ // but timespec is nanoseconds (1.000.000.000 per second)
+ struct timespec rem, req = {
+ .tv_sec = (time_t) (usec / 1000000),
+ .tv_nsec = (suseconds_t) ((usec % 1000000) * 1000)
+ };
+
+ while (nanosleep(&req, &rem) == -1) {
+ if (likely(errno == EINTR)) {
+ debug(D_SYSTEM, "nanosleep() interrupted (while sleeping for %llu microseconds).", usec);
+ req.tv_sec = rem.tv_sec;
+ req.tv_nsec = rem.tv_nsec;
+ } else {
+ error("Cannot nanosleep() for %llu microseconds.", usec);
+ break;
+ }
+ }
+
+ return 0;
+#else
+ int ret = usleep(usec);
+ if(unlikely(ret == -1 && errno == EINVAL)) {
+ // on certain systems, usec has to be up to 999999
+ if(usec > 999999) {
+ int counter = usec / 999999;
+ while(counter--)
+ usleep(999999);
+
+ usleep(usec % 999999);
+ }
+ else {
+ error("Cannot usleep() for %llu microseconds.", usec);
+ return ret;
+ }
+ }
+
+ if(ret != 0)
+ error("usleep() failed for %llu microseconds.", usec);
+
+ return ret;
+#endif
+}
+
+static inline collected_number uptime_from_boottime(void) {
+#ifdef CLOCK_BOOTTIME_IS_AVAILABLE
+ return now_boottime_usec() / 1000;
+#else
+ error("uptime cannot be read from CLOCK_BOOTTIME on this system.");
+ return 0;
+#endif
+}
+
+static procfile *read_proc_uptime_ff = NULL;
+static inline collected_number read_proc_uptime(char *filename) {
+ if(unlikely(!read_proc_uptime_ff)) {
+ read_proc_uptime_ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+ }
+
+ read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+
+ if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) {
+ error("/proc/uptime has no lines.");
+ return 0;
+ }
+ if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) {
+ error("/proc/uptime has less than 1 word in it.");
+ return 0;
+ }
+
+ return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0);
+}
+
+inline collected_number uptime_msec(char *filename){
+ static int use_boottime = -1;
+
+ if(unlikely(use_boottime == -1)) {
+ collected_number uptime_boottime = uptime_from_boottime();
+ collected_number uptime_proc = read_proc_uptime(filename);
+
+ long long delta = (long long)uptime_boottime - (long long)uptime_proc;
+ if(delta < 0) delta = -delta;
+
+ if(delta <= 1000 && uptime_boottime != 0) {
+ procfile_close(read_proc_uptime_ff);
+ info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta);
+ use_boottime = 1;
+ }
+ else if(uptime_proc != 0) {
+ info("Using /proc/uptime for uptime (dt is %lld ms)", delta);
+ use_boottime = 0;
+ }
+ else {
+ error("Cannot find any way to read uptime on this system.");
+ return 1;
+ }
+ }
+
+ collected_number uptime;
+ if(use_boottime)
+ uptime = uptime_from_boottime();
+ else
+ uptime = read_proc_uptime(filename);
+
+ return uptime;
+}
diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h
new file mode 100644
index 0000000..cfe99f5
--- /dev/null
+++ b/libnetdata/clocks/clocks.h
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_CLOCKS_H
+#define NETDATA_CLOCKS_H 1
+
+#include "../libnetdata.h"
+
+#ifndef HAVE_STRUCT_TIMESPEC
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif
+
+#ifndef HAVE_CLOCKID_T
+typedef int clockid_t;
+#endif
+
+typedef unsigned long long nsec_t;
+typedef unsigned long long msec_t;
+typedef unsigned long long usec_t;
+typedef long long susec_t;
+
+typedef struct heartbeat {
+ usec_t monotonic;
+ usec_t realtime;
+} heartbeat_t;
+
+/* Linux value is as good as any other */
+#ifndef CLOCK_REALTIME
+#define CLOCK_REALTIME 0
+#endif
+
+#ifndef CLOCK_MONOTONIC
+/* fallback to CLOCK_REALTIME if not available */
+#define CLOCK_MONOTONIC CLOCK_REALTIME
+#endif
+
+/* Prefer CLOCK_MONOTONIC_COARSE where available to reduce overhead. It has the same semantics as CLOCK_MONOTONIC */
+#ifndef CLOCK_MONOTONIC_COARSE
+/* fallback to CLOCK_MONOTONIC if not available */
+#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
+#endif
+
+#ifndef CLOCK_BOOTTIME
+
+#ifdef CLOCK_UPTIME
+/* CLOCK_BOOTTIME falls back to CLOCK_UPTIME on FreeBSD */
+#define CLOCK_BOOTTIME CLOCK_UPTIME
+#else // CLOCK_UPTIME
+/* CLOCK_BOOTTIME falls back to CLOCK_MONOTONIC */
+#define CLOCK_BOOTTIME CLOCK_MONOTONIC_COARSE
+#endif // CLOCK_UPTIME
+
+#else // CLOCK_BOOTTIME
+
+#ifdef HAVE_CLOCK_GETTIME
+#define CLOCK_BOOTTIME_IS_AVAILABLE 1 // required for /proc/uptime
+#endif // HAVE_CLOCK_GETTIME
+
+#endif // CLOCK_BOOTTIME
+
+#ifndef NSEC_PER_MSEC
+#define NSEC_PER_MSEC 1000000ULL
+#endif
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000ULL
+#endif
+#ifndef NSEC_PER_USEC
+#define NSEC_PER_USEC 1000ULL
+#endif
+
+#ifndef USEC_PER_SEC
+#define USEC_PER_SEC 1000000ULL
+#endif
+#ifndef MSEC_PER_SEC
+#define MSEC_PER_SEC 1000ULL
+#endif
+
+#define USEC_PER_MS 1000ULL
+
+#ifndef HAVE_CLOCK_GETTIME
+/* Fallback function for POSIX.1-2001 clock_gettime() function.
+ *
+ * We use a realtime clock from gettimeofday(), this will
+ * make systems without clock_gettime() support sensitive
+ * to time jumps or hibernation/suspend side effects.
+ */
+extern int clock_gettime(clockid_t clk_id, struct timespec *ts);
+#endif
+
+/*
+ * Three clocks are available (cf. man 3 clock_gettime):
+ *
+ * REALTIME clock (i.e. wall-clock):
+ * This clock is affected by discontinuous jumps in the system time
+ * (e.g., if the system administrator manually changes the clock), and by the incremental adjustments performed by adjtime(3) and NTP.
+ *
+ * MONOTONIC clock
+ * Clock that cannot be set and represents monotonic time since some unspecified starting point.
+ * This clock is not affected by discontinuous jumps in the system time
+ * (e.g., if the system administrator manually changes the clock), but is affected by the incremental adjustments performed by adjtime(3) and NTP.
+ * If not available on the system, this clock falls back to REALTIME clock.
+ *
+ * BOOTTIME clock
+ * Identical to CLOCK_MONOTONIC, except it also includes any time that the system is suspended.
+ * This allows applications to get a suspend-aware monotonic clock without having to deal with the complications of CLOCK_REALTIME,
+ * which may have discontinuities if the time is changed using settimeofday(2).
+ * If not available on the system, this clock falls back to MONOTONIC clock.
+ *
+ * All now_*_timeval() functions fill the `struct timeval` with the time from the appropriate clock.
+ * Those functions return 0 on success, -1 else with errno set appropriately.
+ *
+ * All now_*_sec() functions return the time in seconds from the approriate clock, or 0 on error.
+ * All now_*_usec() functions return the time in microseconds from the approriate clock, or 0 on error.
+ *
+ * Most functions will attempt to use CLOCK_MONOTONIC_COARSE if available to reduce contention overhead and improve
+ * performance scaling. If high precision is required please use one of the available now_*_high_precision_* functions.
+ */
+extern int now_realtime_timeval(struct timeval *tv);
+extern time_t now_realtime_sec(void);
+extern usec_t now_realtime_usec(void);
+
+extern int now_monotonic_timeval(struct timeval *tv);
+extern time_t now_monotonic_sec(void);
+extern usec_t now_monotonic_usec(void);
+extern int now_monotonic_high_precision_timeval(struct timeval *tv);
+extern time_t now_monotonic_high_precision_sec(void);
+extern usec_t now_monotonic_high_precision_usec(void);
+
+extern int now_boottime_timeval(struct timeval *tv);
+extern time_t now_boottime_sec(void);
+extern usec_t now_boottime_usec(void);
+
+
+extern usec_t timeval_usec(struct timeval *tv);
+extern msec_t timeval_msec(struct timeval *tv);
+
+extern usec_t dt_usec(struct timeval *now, struct timeval *old);
+extern susec_t dt_usec_signed(struct timeval *now, struct timeval *old);
+
+extern void heartbeat_init(heartbeat_t *hb);
+
+/* Sleeps until next multiple of tick using monotonic clock.
+ * Returns elapsed time in microseconds since previous heartbeat
+ */
+extern usec_t heartbeat_next(heartbeat_t *hb, usec_t tick);
+
+/* Returns elapsed time in microseconds since last heartbeat */
+extern usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb);
+
+extern int sleep_usec(usec_t usec);
+
+/*
+ * When running a binary with CLOCK_BOOTTIME defined on a system with a linux kernel older than Linux 2.6.39 the
+ * clock_gettime(2) system call fails with EINVAL. In that case it must fall-back to CLOCK_MONOTONIC.
+ */
+void test_clock_boottime(void);
+
+/*
+ * When running a binary with CLOCK_MONOTONIC_COARSE defined on a system with a linux kernel older than Linux 2.6.32 the
+ * clock_gettime(2) system call fails with EINVAL. In that case it must fall-back to CLOCK_MONOTONIC.
+ */
+void test_clock_monotonic_coarse(void);
+
+extern collected_number uptime_msec(char *filename);
+
+#endif /* NETDATA_CLOCKS_H */
diff --git a/libnetdata/config/Makefile.am b/libnetdata/config/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/config/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md
new file mode 100644
index 0000000..a71f1ee
--- /dev/null
+++ b/libnetdata/config/README.md
@@ -0,0 +1,54 @@
+<!--
+title: "Netdata ini config files"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/config/README.md
+-->
+
+# Netdata ini config files
+
+Configuration files `netdata.conf` and `stream.conf` are Netdata ini files.
+
+## Motivation
+
+The whole idea came up when we were evaluating the documentation involved
+in maintaining a complex configuration system. Our intention was to give
+configuration options for everything imaginable. But then, documenting all
+these options would require a tremendous amount of time, users would have
+to search through endless pages for the option they need, etc.
+
+We concluded then that **configuring software like that is a waste of time
+and effort**. Of course there must be plenty of configuration options, but
+the implementation itself should require a lot less effort for both the
+developers and the users.
+
+So, we did this:
+
+1. No configuration is required to run Netdata
+2. There are plenty of options to tweak
+3. There is minimal documentation (or no at all)
+
+## Why this works?
+
+The configuration file is a `name = value` dictionary with `[sections]`.
+Write whatever you like there as long as it follows this simple format.
+
+Netdata loads this dictionary and then when the code needs a value from
+it, it just looks up the `name` in the dictionary at the proper `section`.
+In all places, in the code, there are both the `names` and their
+`default values`, so if something is not found in the configuration
+file, the default is used. The lookup is made using B-Trees and hashes
+(no string comparisons), so they are super fast. Also the `names` of the
+settings can be `my super duper setting that once set to yes, will turn the world upside down = no`
+
+- so goodbye to most of the documentation involved.
+
+Next, Netdata can generate a valid configuration for the user to edit.
+No need to remember anything or copy and paste settings. Just get the
+configuration from the server (`/netdata.conf` on your Netdata server),
+edit it and save it.
+
+Last, what about options you believe you have set, but you misspelled?
+When you get the configuration file from the server, there will be a
+comment above all `name = value` pairs the server does not use.
+So you know that whatever you wrote there, is not used.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c
new file mode 100644
index 0000000..d9dcde7
--- /dev/null
+++ b/libnetdata/config/appconfig.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+/*
+ * @Input:
+ * Connector / instance to add to an internal structure
+ * @Return
+ * The current head of the linked list of connector_instance
+ *
+ */
+
+_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance)
+{
+ static struct _connector_instance *global_connector_instance = NULL;
+ struct _connector_instance *local_ci, *local_ci_tmp;
+
+ if (unlikely(!connector)) {
+ if (unlikely(!instance))
+ return global_connector_instance;
+
+ local_ci = global_connector_instance;
+ while (local_ci) {
+ local_ci_tmp = local_ci->next;
+ freez(local_ci);
+ local_ci = local_ci_tmp;
+ }
+ global_connector_instance = NULL;
+ return NULL;
+ }
+
+ local_ci = callocz(1, sizeof(struct _connector_instance));
+ local_ci->instance = instance;
+ local_ci->connector = connector;
+ strncpy(local_ci->instance_name, instance->name, CONFIG_MAX_NAME);
+ strncpy(local_ci->connector_name, connector->name, CONFIG_MAX_NAME);
+ local_ci->next = global_connector_instance;
+ global_connector_instance = local_ci;
+
+ return global_connector_instance;
+}
+
+int is_valid_connector(char *type, int check_reserved)
+{
+ int rc = 1;
+
+ if (unlikely(!type))
+ return 0;
+
+ if (!check_reserved) {
+ if (unlikely(is_valid_connector(type,1))) {
+ return 0;
+ }
+ //if (unlikely(*type == ':')
+ // return 0;
+ char *separator = strrchr(type, ':');
+ if (likely(separator)) {
+ *separator = '\0';
+ rc = separator - type;
+ } else
+ return 0;
+ }
+// else {
+// if (unlikely(is_valid_connector(type,1))) {
+// error("Section %s invalid -- reserved name", type);
+// return 0;
+// }
+// }
+
+ if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
+ return rc;
+ } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) {
+ return rc;
+ } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
+ return rc;
+ } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) {
+ return rc;
+ } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
+ return rc;
+ } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
+ return rc;
+ } else if (!strcmp(type, "prometheus_remote_write")) {
+ return rc;
+ } else if (!strcmp(type, "prometheus_remote_write:http") || !strcmp(type, "prometheus_remote_write:https")) {
+ return rc;
+ } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
+ return rc;
+ } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) {
+ return rc;
+ } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) {
+ return rc;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// locking
+
+inline void appconfig_wrlock(struct config *root) {
+ netdata_mutex_lock(&root->mutex);
+}
+
+inline void appconfig_unlock(struct config *root) {
+ netdata_mutex_unlock(&root->mutex);
+}
+
+inline void config_section_wrlock(struct section *co) {
+ netdata_mutex_lock(&co->mutex);
+}
+
+inline void config_section_unlock(struct section *co) {
+ netdata_mutex_unlock(&co->mutex);
+}
+
+
+// ----------------------------------------------------------------------------
+// config name-value index
+
+static int appconfig_option_compare(void *a, void *b) {
+ if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1;
+ else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1;
+ else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name);
+}
+
+#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl *)(cv))
+#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl *)(cv))
+
+static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) {
+ struct config_option tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ return (struct config_option *)avl_search_lock(&(co->values_index), (avl *) &tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// config sections index
+
+int appconfig_section_compare(void *a, void *b) {
+ if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1;
+ else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1;
+ else return strcmp(((struct section *)a)->name, ((struct section *)b)->name);
+}
+
+#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl *)(cfg))
+#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl *)(cfg))
+
+static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) {
+ struct section tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ return (struct section *)avl_search_lock(&root->index, (avl *) &tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// config section methods
+
+static inline struct section *appconfig_section_find(struct config *root, const char *section) {
+ return appconfig_index_find(root, section, 0);
+}
+
+static inline struct section *appconfig_section_create(struct config *root, const char *section) {
+ debug(D_CONFIG, "Creating section '%s'.", section);
+
+ struct section *co = callocz(1, sizeof(struct section));
+ co->name = strdupz(section);
+ co->hash = simple_hash(co->name);
+ netdata_mutex_init(&co->mutex);
+
+ avl_init_lock(&co->values_index, appconfig_option_compare);
+
+ if(unlikely(appconfig_index_add(root, co) != co))
+ error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name);
+
+ appconfig_wrlock(root);
+ struct section *co2 = root->last_section;
+ if(co2) {
+ co2->next = co;
+ } else {
+ root->first_section = co;
+ }
+ root->last_section = co;
+ appconfig_unlock(root);
+
+ return co;
+}
+
+void appconfig_section_destroy_non_loaded(struct config *root, const char *section)
+{
+ struct section *co;
+ struct config_option *cv, *cv_next;
+
+ debug(D_CONFIG, "Destroying section '%s'.", section);
+
+ co = appconfig_section_find(root, section);
+ if(!co) {
+ error("Could not destroy section '%s'. Not found.", section);
+ return;
+ }
+
+ config_section_wrlock(co);
+ for(cv = co->values; cv ; cv = cv->next) {
+ if (cv->flags & CONFIG_VALUE_LOADED) {
+ /* Do not destroy values that were loaded from the configuration files. */
+ config_section_unlock(co);
+ return;
+ }
+ }
+ for(cv = co->values ; cv ; cv = cv_next) {
+ cv_next = cv->next;
+ if(unlikely(!appconfig_option_index_del(co, cv)))
+ error("Cannot remove config option '%s' from section '%s'.", cv->name, co->name);
+ freez(cv->value);
+ freez(cv->name);
+ freez(cv);
+ }
+ co->values = NULL;
+ config_section_unlock(co);
+
+ if (unlikely(!appconfig_index_del(root, co))) {
+ error("Cannot remove section '%s' from config.", section);
+ return;
+ }
+
+ avl_destroy_lock(&co->values_index);
+ freez(co->name);
+ pthread_mutex_destroy(&co->mutex);
+ freez(co);
+}
+
+
+// ----------------------------------------------------------------------------
+// config name-value methods
+
+static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) {
+ debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name);
+
+ struct config_option *cv = callocz(1, sizeof(struct config_option));
+ cv->name = strdupz(name);
+ cv->hash = simple_hash(cv->name);
+ cv->value = strdupz(value);
+
+ struct config_option *found = appconfig_option_index_add(co, cv);
+ if(found != cv) {
+ error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name);
+ freez(cv->value);
+ freez(cv->name);
+ freez(cv);
+ return found;
+ }
+
+ config_section_wrlock(co);
+ struct config_option *cv2 = co->values;
+ if(cv2) {
+ while (cv2->next) cv2 = cv2->next;
+ cv2->next = cv;
+ }
+ else co->values = cv;
+ config_section_unlock(co);
+
+ return cv;
+}
+
+int appconfig_exists(struct config *root, const char *section, const char *name) {
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) return 0;
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) return 0;
+
+ return 1;
+}
+
+int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) {
+ struct config_option *cv_old, *cv_new;
+ int ret = -1;
+
+ debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new);
+
+ struct section *co_old = appconfig_section_find(root, section_old);
+ if(!co_old) return ret;
+
+ struct section *co_new = appconfig_section_find(root, section_new);
+ if(!co_new) co_new = appconfig_section_create(root, section_new);
+
+ config_section_wrlock(co_old);
+ if(co_old != co_new)
+ config_section_wrlock(co_new);
+
+ cv_old = appconfig_option_index_find(co_old, name_old, 0);
+ if(!cv_old) goto cleanup;
+
+ cv_new = appconfig_option_index_find(co_new, name_new, 0);
+ if(cv_new) goto cleanup;
+
+ if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old))
+ error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted tge wrong config entry.", cv_old->name, co_old->name);
+
+ if(co_old->values == cv_old) {
+ co_old->values = cv_old->next;
+ }
+ else {
+ struct config_option *t;
+ for(t = co_old->values; t && t->next != cv_old ;t = t->next) ;
+ if(!t || t->next != cv_old)
+ error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name);
+ else
+ t->next = cv_old->next;
+ }
+
+ freez(cv_old->name);
+ cv_old->name = strdupz(name_new);
+ cv_old->hash = simple_hash(cv_old->name);
+
+ cv_new = cv_old;
+ cv_new->next = co_new->values;
+ co_new->values = cv_new;
+
+ if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old))
+ error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name);
+
+ ret = 0;
+
+cleanup:
+ if(co_old != co_new)
+ config_section_unlock(co_new);
+ config_section_unlock(co_old);
+ return ret;
+}
+
+char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value)
+{
+ struct config_option *cv;
+
+ // Only calls internal to this file check for a NULL result and they do not supply a NULL arg.
+ // External caller should treat NULL as an error case.
+ cv = appconfig_option_index_find(co, name, 0);
+ if (!cv) {
+ if (!default_value) return NULL;
+ cv = appconfig_value_create(co, name, default_value);
+ if (!cv) return NULL;
+ }
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) {
+ // this is a loaded value from the config file
+ // if it is different than the default, mark it
+ if(!(cv->flags & CONFIG_VALUE_CHECKED)) {
+ if(default_value && strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED;
+ cv->flags |= CONFIG_VALUE_CHECKED;
+ }
+ }
+
+ return(cv->value);
+}
+
+
+char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ if (default_value == NULL)
+ debug(D_CONFIG, "request to get config in section '%s', name '%s' or fail", section, name);
+ else
+ debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if (!co && !default_value)
+ return NULL;
+ if(!co) co = appconfig_section_create(root, section);
+
+ return appconfig_get_by_section(co, name, default_value);
+}
+
+long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value)
+{
+ char buffer[100], *s;
+ sprintf(buffer, "%lld", value);
+
+ s = appconfig_get(root, section, name, buffer);
+ if(!s) return value;
+
+ return strtoll(s, NULL, 0);
+}
+
+LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
+{
+ char buffer[100], *s;
+ sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
+
+ s = appconfig_get(root, section, name, buffer);
+ if(!s) return value;
+
+ return str2ld(s, NULL);
+}
+
+static inline int appconfig_test_boolean_value(char *s) {
+ if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on")
+ || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand"))
+ return 1;
+
+ return 0;
+}
+
+int appconfig_get_boolean_by_section(struct section *co, const char *name, int value) {
+ char *s;
+
+ s = appconfig_get_by_section(co, name, (!value)?"no":"yes");
+ if(!s) return value;
+
+ return appconfig_test_boolean_value(s);
+}
+
+int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+ if(value) s = "yes";
+ else s = "no";
+
+ s = appconfig_get(root, section, name, s);
+ if(!s) return value;
+
+ return appconfig_test_boolean_value(s);
+}
+
+int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+
+ if(value == CONFIG_BOOLEAN_AUTO)
+ s = "auto";
+
+ else if(value == CONFIG_BOOLEAN_NO)
+ s = "no";
+
+ else
+ s = "yes";
+
+ s = appconfig_get(root, section, name, s);
+ if(!s) return value;
+
+ if(!strcmp(s, "yes"))
+ return CONFIG_BOOLEAN_YES;
+ else if(!strcmp(s, "no"))
+ return CONFIG_BOOLEAN_NO;
+ else if(!strcmp(s, "auto") || !strcmp(s, "on demand"))
+ return CONFIG_BOOLEAN_AUTO;
+
+ return value;
+}
+
+const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value)
+{
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) return appconfig_set(root, section, name, value);
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) return appconfig_set(root, section, name, value);
+
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if(cv->flags & CONFIG_VALUE_LOADED)
+ return cv->value;
+
+ if(strcmp(cv->value, value) != 0) {
+ cv->flags |= CONFIG_VALUE_CHANGED;
+
+ freez(cv->value);
+ cv->value = strdupz(value);
+ }
+
+ return cv->value;
+}
+
+const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value)
+{
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) co = appconfig_section_create(root, section);
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) cv = appconfig_value_create(co, name, value);
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if(strcmp(cv->value, value) != 0) {
+ cv->flags |= CONFIG_VALUE_CHANGED;
+
+ freez(cv->value);
+ cv->value = strdupz(value);
+ }
+
+ return value;
+}
+
+long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value)
+{
+ char buffer[100];
+ sprintf(buffer, "%lld", value);
+
+ appconfig_set(root, section, name, buffer);
+
+ return value;
+}
+
+LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
+{
+ char buffer[100];
+ sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
+
+ appconfig_set(root, section, name, buffer);
+
+ return value;
+}
+
+int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+ if(value) s = "yes";
+ else s = "no";
+
+ appconfig_set(root, section, name, s);
+
+ return value;
+}
+
+int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value)
+{
+ int result = 0;
+ const char *s;
+
+ s = appconfig_get(root, section, name, value);
+ if(!s) goto fallback;
+
+ if(!config_parse_duration(s, &result)) {
+ error("config option '[%s].%s = %s' is configured with an valid duration", section, name, s);
+ goto fallback;
+ }
+
+ return result;
+
+ fallback:
+ if(!config_parse_duration(value, &result))
+ error("INTERNAL ERROR: default duration supplied for option '[%s].%s = %s' is not a valid duration", section, name, value);
+
+ return result;
+}
+
+// ----------------------------------------------------------------------------
+// config load/save
+
+int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name)
+{
+ int line = 0;
+ struct section *co = NULL;
+ int is_exporter_config = 0;
+ int _backends = 0; // number of backend sections we have
+ char working_instance[CONFIG_MAX_NAME + 1];
+ char working_connector[CONFIG_MAX_NAME + 1];
+ struct section *working_connector_section = NULL;
+ int global_exporting_section = 0;
+
+ char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
+
+ if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME;
+
+ debug(D_CONFIG, "CONFIG: opening config file '%s'", filename);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ // info("CONFIG: cannot open file '%s'. Using internal defaults.", filename);
+ return 0;
+ }
+
+ uint32_t section_hash = 0;
+ if(section_name) {
+ section_hash = simple_hash(section_name);
+ }
+ is_exporter_config = (strstr(filename, EXPORTING_CONF) != NULL);
+
+ while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
+ buffer[CONFIG_FILE_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if(!s || *s == '#') {
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ int len = (int) strlen(s);
+ if(*s == '[' && s[len - 1] == ']') {
+ // new section
+ s[len - 1] = '\0';
+ s++;
+
+ if (is_exporter_config) {
+ global_exporting_section =
+ !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS));
+ if (unlikely(!global_exporting_section)) {
+ int rc;
+ rc = is_valid_connector(s, 0);
+ if (likely(rc)) {
+ strncpy(working_connector, s, CONFIG_MAX_NAME);
+ s = s + rc + 1;
+ if (unlikely(!(*s))) {
+ _backends++;
+ sprintf(buffer, "instance_%d", _backends);
+ s = buffer;
+ }
+ strncpy(working_instance, s, CONFIG_MAX_NAME);
+ working_connector_section = NULL;
+ if (unlikely(appconfig_section_find(root, working_instance))) {
+ error("Instance (%s) already exists", working_instance);
+ co = NULL;
+ continue;
+ }
+ } else {
+ co = NULL;
+ error("Section (%s) does not specify a valid connector", s);
+ continue;
+ }
+ }
+ }
+
+ co = appconfig_section_find(root, s);
+ if(!co) co = appconfig_section_create(root, s);
+
+ if(co && section_name && overwrite_used && section_hash == co->hash && !strcmp(section_name, co->name)) {
+ config_section_wrlock(co);
+ struct config_option *cv2 = co->values;
+ while (cv2) {
+ struct config_option *save = cv2->next;
+ struct config_option *found = appconfig_option_index_del(co, cv2);
+ if(found != cv2)
+ error("INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.",
+ cv2->name, co->name);
+
+ freez(cv2->name);
+ freez(cv2->value);
+ freez(cv2);
+ cv2 = save;
+ }
+ co->values = NULL;
+ config_section_unlock(co);
+ }
+
+ continue;
+ }
+
+ if(!co) {
+ // line outside a section
+ error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename);
+ continue;
+ }
+
+ if(section_name && overwrite_used && section_hash != co->hash && strcmp(section_name, co->name)) {
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(!value) {
+ error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(!name || *name == '#') {
+ error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename);
+ continue;
+ }
+
+ if(!value) value = "";
+
+ struct config_option *cv = appconfig_option_index_find(co, name, 0);
+
+ if (!cv) {
+ cv = appconfig_value_create(co, name, value);
+ if (likely(is_exporter_config) && unlikely(!global_exporting_section)) {
+ if (unlikely(!working_connector_section)) {
+ working_connector_section = appconfig_section_find(root, working_connector);
+ if (!working_connector_section)
+ working_connector_section = appconfig_section_create(root, working_connector);
+ if (likely(working_connector_section)) {
+ add_connector_instance(working_connector_section, co);
+ }
+ }
+ }
+ } else {
+ if (((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) {
+ debug(
+ D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name);
+ freez(cv->value);
+ cv->value = strdupz(value);
+ } else
+ debug(
+ D_CONFIG,
+ "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.",
+ line,
+ filename,
+ co->name,
+ cv->name);
+ }
+ cv->flags |= CONFIG_VALUE_LOADED;
+ }
+
+ fclose(fp);
+
+ return 1;
+}
+
+void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
+{
+ int i, pri;
+ struct section *co;
+ struct config_option *cv;
+
+ for(i = 0; i < 3 ;i++) {
+ switch(i) {
+ case 0:
+ buffer_strcat(wb,
+ "# netdata configuration\n"
+ "#\n"
+ "# You can download the latest version of this file, using:\n"
+ "#\n"
+ "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
+ "# or\n"
+ "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
+ "#\n"
+ "# You can uncomment and change any of the options below.\n"
+ "# The value shown in the commented settings, is the default value.\n"
+ "#\n"
+ "\n# global netdata configuration\n");
+ break;
+
+ case 1:
+ buffer_strcat(wb, "\n\n# per plugin configuration\n");
+ break;
+
+ case 2:
+ buffer_strcat(wb, "\n\n# per chart configuration\n");
+ break;
+ }
+
+ appconfig_wrlock(root);
+ for(co = root->first_section; co ; co = co->next) {
+ if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)
+ || !strcmp(co->name, CONFIG_SECTION_WEB)
+ || !strcmp(co->name, CONFIG_SECTION_STATSD)
+ || !strcmp(co->name, CONFIG_SECTION_PLUGINS)
+ || !strcmp(co->name, CONFIG_SECTION_CLOUD)
+ || !strcmp(co->name, CONFIG_SECTION_REGISTRY)
+ || !strcmp(co->name, CONFIG_SECTION_HEALTH)
+ || !strcmp(co->name, CONFIG_SECTION_BACKEND)
+ || !strcmp(co->name, CONFIG_SECTION_STREAM)
+ || !strcmp(co->name, CONFIG_SECTION_HOST_LABEL)
+ )
+ pri = 0;
+ else if(!strncmp(co->name, "plugin:", 7)) pri = 1;
+ else pri = 2;
+
+ if(i == pri) {
+ int loaded = 0;
+ int used = 0;
+ int changed = 0;
+ int count = 0;
+
+ config_section_wrlock(co);
+ for(cv = co->values; cv ; cv = cv->next) {
+ used += (cv->flags & CONFIG_VALUE_USED)?1:0;
+ loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0;
+ changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0;
+ count++;
+ }
+ config_section_unlock(co);
+
+ if(!count) continue;
+ if(only_changed && !changed && !loaded) continue;
+
+ if(!used) {
+ buffer_sprintf(wb, "\n# section '%s' is not used.", co->name);
+ }
+
+ buffer_sprintf(wb, "\n[%s]\n", co->name);
+
+ config_section_wrlock(co);
+ for(cv = co->values; cv ; cv = cv->next) {
+
+ if(used && !(cv->flags & CONFIG_VALUE_USED)) {
+ buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name);
+ }
+ buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value);
+ }
+ config_section_unlock(co);
+ }
+ }
+ appconfig_unlock(root);
+ }
+}
+
+/**
+ * Parse Duration
+ *
+ * Parse the string setting the result
+ *
+ * @param string the timestamp string
+ * @param result the output variable
+ *
+ * @return It returns 1 on success and 0 otherwise
+ */
+int config_parse_duration(const char* string, int* result) {
+ while(*string && isspace(*string)) string++;
+
+ if(unlikely(!*string)) goto fallback;
+
+ if(*string == 'n' && !strcmp(string, "never")) {
+ // this is a valid option
+ *result = 0;
+ return 1;
+ }
+
+ // make sure it is a number
+ if(!(isdigit(*string) || *string == '+' || *string == '-')) goto fallback;
+
+ char *e = NULL;
+ calculated_number n = str2ld(string, &e);
+ if(e && *e) {
+ switch (*e) {
+ case 'Y':
+ *result = (int) (n * 31536000);
+ break;
+ case 'M':
+ *result = (int) (n * 2592000);
+ break;
+ case 'w':
+ *result = (int) (n * 604800);
+ break;
+ case 'd':
+ *result = (int) (n * 86400);
+ break;
+ case 'h':
+ *result = (int) (n * 3600);
+ break;
+ case 'm':
+ *result = (int) (n * 60);
+ break;
+ case 's':
+ default:
+ *result = (int) (n);
+ break;
+ }
+ }
+ else
+ *result = (int)(n);
+
+ return 1;
+
+ fallback:
+ *result = 0;
+ return 0;
+}
+
+struct section *appconfig_get_section(struct config *root, const char *name)
+{
+ return appconfig_section_find(root, name);
+}
diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h
new file mode 100644
index 0000000..9d02e4a
--- /dev/null
+++ b/libnetdata/config/appconfig.h
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * This section manages ini config files, like netdata.conf and stream.conf
+ *
+ * It is organized like this:
+ *
+ * struct config (i.e. netdata.conf or stream.conf)
+ * .sections = a linked list of struct section
+ * .mutex = a mutex to protect the above linked list due to multi-threading
+ * .index = an AVL tree of struct section
+ *
+ * struct section (i.e. [global] or [health] of netdata.conf)
+ * .value = a linked list of struct config_option
+ * .mutex = a mutex to protect the above linked list due to multi-threading
+ * .value_index = an AVL tree of struct config_option
+ *
+ * struct config_option (ie. a name-value pair for each ini file option)
+ *
+ * The following operations on name-value options are supported:
+ * SET to set the value of an option
+ * SET DEFAULT to set the value and the default value of an option
+ * GET to get the value of an option
+ * EXISTS to check if an option exists
+ * MOVE to move an option from a section to another section, and/or rename it
+ *
+ * GET and SET operations are provided for the following data types:
+ * STRING
+ * NUMBER (long long)
+ * FLOAT (long double)
+ * BOOLEAN (false, true)
+ * BOOLEAN ONDEMAND (false, true, auto)
+ *
+ * GET and SET operations create struct config_option, if it is not already present.
+ * This allows netdata to run even without netdata.conf and stream.conf. The internal
+ * defaults are used to create the structure that should exist in the ini file and the config
+ * file can be downloaded from the server.
+ *
+ * Also 2 operations are supported for the whole config file:
+ *
+ * LOAD To load the ini file from disk
+ * GENERATE To generate the ini file (this is used to download the ini file from the server)
+ *
+ * For each option (name-value pair), the system maintains 4 flags:
+ * LOADED to indicate that the value has been loaded from the file
+ * USED to indicate that netdata used the value
+ * CHANGED to indicate that the value has been changed from the loaded value or the internal default value
+ * CHECKED is used internally for optimization (to avoid an strcmp() every time GET is called).
+ *
+ * TODO:
+ * 1. The linked lists and the mutexes can be removed and the AVL trees can become DICTIONARY.
+ * This part of the code was written before we add traversal to AVL.
+ *
+ * 2. High level data types could be supported, to simplify the rest of the code:
+ * MULTIPLE CHOICE to let the user select one of the supported keywords
+ * this would allow users see in comments the available options
+ *
+ * SIMPLE PATTERN to let the user define netdata SIMPLE PATTERNS
+ *
+ * 3. Sorting of options should be supported.
+ * Today, when the ini file is downloaded from the server, the options are shown in the order
+ * they appear in the linked list (the order they were added, listing changed options first).
+ * If we remove the linked list, the order they appear in the AVL tree will be used (which is
+ * random due to simple_hash()).
+ * Ideally, we support sorting of options when generating the ini file.
+ *
+ * 4. There is no free() operation. So, memory is freed on netdata exit.
+ *
+ * 5. Avoid memory fragmentation
+ * Since entries are created from multiple threads and a lot of allocations are required
+ * for each config_option, fragmentation can be a problem for IoT.
+ *
+ * 6. Although this way of managing options is quite flexible and dynamic, it wastes memory
+ * for the names of the options. Since most of the option names are static, we could provide
+ * a method to allocate only the dynamic option names.
+ */
+
+#ifndef NETDATA_CONFIG_H
+#define NETDATA_CONFIG_H 1
+
+#include "../libnetdata.h"
+
+#define CONFIG_FILENAME "netdata.conf"
+
+#define CONFIG_SECTION_GLOBAL "global"
+#define CONFIG_SECTION_WEB "web"
+#define CONFIG_SECTION_STATSD "statsd"
+#define CONFIG_SECTION_PLUGINS "plugins"
+#define CONFIG_SECTION_CLOUD "cloud"
+#define CONFIG_SECTION_REGISTRY "registry"
+#define CONFIG_SECTION_HEALTH "health"
+#define CONFIG_SECTION_BACKEND "backend"
+#define CONFIG_SECTION_STREAM "stream"
+#define CONFIG_SECTION_EXPORTING "exporting:global"
+#define CONFIG_SECTION_PROMETHEUS "prometheus:exporter"
+#define CONFIG_SECTION_HOST_LABEL "host labels"
+#define EXPORTING_CONF "exporting.conf"
+
+// these are used to limit the configuration names and values lengths
+// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)
+#define CONFIG_MAX_NAME 1024
+#define CONFIG_MAX_VALUE 2048
+
+// ----------------------------------------------------------------------------
+// Config definitions
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config
+#define CONFIG_VALUE_USED 0x02 // has been accessed from the program
+#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value
+#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default
+
+struct config_option {
+ avl avl_node; // the index entry of this entry - this has to be first!
+
+ uint8_t flags;
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+ char *value;
+
+ struct config_option *next; // config->mutex protects just this
+};
+
+struct section {
+ avl avl_node; // the index entry of this section - this has to be first!
+
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+
+ struct section *next; // gloabl config_mutex protects just this
+
+ struct config_option *values;
+ avl_tree_lock values_index;
+
+ netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates
+ // readers are protected using the rwlock in avl_tree_lock
+};
+
+struct config {
+ struct section *first_section;
+ struct section *last_section; // optimize inserting at the end
+ netdata_mutex_t mutex;
+ avl_tree_lock index;
+};
+
+#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed)
+
+#define CONFIG_BOOLEAN_NO 0 // disabled
+#define CONFIG_BOOLEAN_YES 1 // enabled
+
+#ifndef CONFIG_BOOLEAN_AUTO
+#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled
+#endif
+
+extern int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name);
+extern void config_section_wrlock(struct section *co);
+extern void config_section_unlock(struct section *co);
+
+extern char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value);
+extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value);
+extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
+extern LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
+extern int appconfig_get_boolean_by_section(struct section *co, const char *name, int value);
+extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
+extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
+extern int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value);
+
+extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value);
+extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value);
+extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
+extern LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
+extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
+
+extern int appconfig_exists(struct config *root, const char *section, const char *name);
+extern int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new);
+
+extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed);
+
+extern int appconfig_section_compare(void *a, void *b);
+
+extern void appconfig_section_destroy_non_loaded(struct config *root, const char *section);
+
+extern int config_parse_duration(const char* string, int* result);
+
+extern struct section *appconfig_get_section(struct config *root, const char *name);
+
+extern void appconfig_wrlock(struct config *root);
+extern void appconfig_unlock(struct config *root);
+
+struct connector_instance {
+ char instance_name[CONFIG_MAX_NAME + 1];
+ char connector_name[CONFIG_MAX_NAME + 1];
+};
+
+typedef struct _connector_instance {
+ struct section *connector; // actual connector
+ struct section *instance; // This instance
+ char instance_name[CONFIG_MAX_NAME + 1];
+ char connector_name[CONFIG_MAX_NAME + 1];
+ struct _connector_instance *next; // Next instance
+} _CONNECTOR_INSTANCE;
+
+extern _CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance);
+
+#endif /* NETDATA_CONFIG_H */
diff --git a/libnetdata/dictionary/Makefile.am b/libnetdata/dictionary/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/dictionary/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md
new file mode 100644
index 0000000..234ff18
--- /dev/null
+++ b/libnetdata/dictionary/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/dictionary/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fdictionary%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
new file mode 100644
index 0000000..cfcf1fb
--- /dev/null
+++ b/libnetdata/dictionary/dictionary.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// dictionary statistics
+
+static inline void NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->inserts++;
+}
+static inline void NETDATA_DICTIONARY_STATS_DELETES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->deletes++;
+}
+static inline void NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->searches++;
+}
+static inline void NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->entries++;
+}
+static inline void NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->entries--;
+}
+
+
+// ----------------------------------------------------------------------------
+// dictionary locks
+
+static inline void dictionary_read_lock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary READ lock");
+ netdata_rwlock_rdlock(dict->rwlock);
+ }
+}
+
+static inline void dictionary_write_lock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary WRITE lock");
+ netdata_rwlock_wrlock(dict->rwlock);
+ }
+}
+
+static inline void dictionary_unlock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary UNLOCK lock");
+ netdata_rwlock_unlock(dict->rwlock);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// avl index
+
+static int name_value_compare(void* a, void* b) {
+ if(((NAME_VALUE *)a)->hash < ((NAME_VALUE *)b)->hash) return -1;
+ else if(((NAME_VALUE *)a)->hash > ((NAME_VALUE *)b)->hash) return 1;
+ else return strcmp(((NAME_VALUE *)a)->name, ((NAME_VALUE *)b)->name);
+}
+
+static inline NAME_VALUE *dictionary_name_value_index_find_nolock(DICTIONARY *dict, const char *name, uint32_t hash) {
+ NAME_VALUE tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(dict);
+ return (NAME_VALUE *)avl_search(&(dict->values_index), (avl *) &tmp);
+}
+
+// ----------------------------------------------------------------------------
+// internal methods
+
+static NAME_VALUE *dictionary_name_value_create_nolock(DICTIONARY *dict, const char *name, void *value, size_t value_len, uint32_t hash) {
+ debug(D_DICTIONARY, "Creating name value entry for name '%s'.", name);
+
+ NAME_VALUE *nv = callocz(1, sizeof(NAME_VALUE));
+
+ if(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)
+ nv->name = (char *)name;
+ else {
+ nv->name = strdupz(name);
+ }
+
+ nv->hash = (hash)?hash:simple_hash(nv->name);
+
+ if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)
+ nv->value = value;
+ else {
+ nv->value = mallocz(value_len);
+ memcpy(nv->value, value, value_len);
+ }
+
+ // index it
+ NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(dict);
+ if(unlikely(avl_insert(&((dict)->values_index), (avl *)(nv)) != (avl *)nv))
+ error("dictionary: INTERNAL ERROR: duplicate insertion to dictionary.");
+
+ NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(dict);
+
+ return nv;
+}
+
+static void dictionary_name_value_destroy_nolock(DICTIONARY *dict, NAME_VALUE *nv) {
+ debug(D_DICTIONARY, "Destroying name value entry for name '%s'.", nv->name);
+
+ NETDATA_DICTIONARY_STATS_DELETES_PLUS1(dict);
+ if(unlikely(avl_remove(&(dict->values_index), (avl *)(nv)) != (avl *)nv))
+ error("dictionary: INTERNAL ERROR: dictionary invalid removal of node.");
+
+ NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(dict);
+
+ if(!(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)) {
+ debug(D_REGISTRY, "Dictionary freeing value of '%s'", nv->name);
+ freez(nv->value);
+ }
+
+ if(!(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)) {
+ debug(D_REGISTRY, "Dictionary freeing name '%s'", nv->name);
+ freez(nv->name);
+ }
+
+ freez(nv);
+}
+
+// ----------------------------------------------------------------------------
+// API - basic methods
+
+DICTIONARY *dictionary_create(uint8_t flags) {
+ debug(D_DICTIONARY, "Creating dictionary.");
+
+ DICTIONARY *dict = callocz(1, sizeof(DICTIONARY));
+
+ if(flags & DICTIONARY_FLAG_WITH_STATISTICS)
+ dict->stats = callocz(1, sizeof(struct dictionary_stats));
+
+ if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) {
+ dict->rwlock = callocz(1, sizeof(netdata_rwlock_t));
+ netdata_rwlock_init(dict->rwlock);
+ }
+
+ avl_init(&dict->values_index, name_value_compare);
+ dict->flags = flags;
+
+ return dict;
+}
+
+void dictionary_destroy(DICTIONARY *dict) {
+ debug(D_DICTIONARY, "Destroying dictionary.");
+
+ dictionary_write_lock(dict);
+
+ while(dict->values_index.root)
+ dictionary_name_value_destroy_nolock(dict, (NAME_VALUE *)dict->values_index.root);
+
+ dictionary_unlock(dict);
+
+ if(dict->stats)
+ freez(dict->stats);
+
+ if(dict->rwlock) {
+ netdata_rwlock_destroy(dict->rwlock);
+ freez(dict->rwlock);
+ }
+
+ freez(dict);
+}
+
+// ----------------------------------------------------------------------------
+
+void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) {
+ debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name);
+
+ uint32_t hash = simple_hash(name);
+
+ dictionary_write_lock(dict);
+
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, hash);
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Dictionary entry with name '%s' not found. Creating a new one.", name);
+
+ nv = dictionary_name_value_create_nolock(dict, name, value, value_len, hash);
+ if(unlikely(!nv))
+ fatal("Cannot create name_value.");
+ }
+ else {
+ debug(D_DICTIONARY, "Dictionary entry with name '%s' found. Changing its value.", name);
+
+ if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) {
+ debug(D_REGISTRY, "Dictionary: linking value to '%s'", name);
+ nv->value = value;
+ }
+ else {
+ debug(D_REGISTRY, "Dictionary: cloning value to '%s'", name);
+
+ // copy the new value without breaking
+ // any other thread accessing the same entry
+ void *new = mallocz(value_len),
+ *old = nv->value;
+
+ memcpy(new, value, value_len);
+ nv->value = new;
+
+ debug(D_REGISTRY, "Dictionary: freeing old value of '%s'", name);
+ freez(old);
+ }
+ }
+
+ dictionary_unlock(dict);
+
+ return nv->value;
+}
+
+void *dictionary_get(DICTIONARY *dict, const char *name) {
+ debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name);
+
+ dictionary_read_lock(dict);
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
+ dictionary_unlock(dict);
+
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
+ return NULL;
+ }
+
+ debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
+ return nv->value;
+}
+
+int dictionary_del(DICTIONARY *dict, const char *name) {
+ int ret;
+
+ debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name);
+
+ dictionary_write_lock(dict);
+
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
+ ret = -1;
+ }
+ else {
+ debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
+ dictionary_name_value_destroy_nolock(dict, nv);
+ ret = 0;
+ }
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
+
+
+// ----------------------------------------------------------------------------
+// API - walk through the dictionary
+// the dictionary is locked for reading while this happens
+// do not user other dictionary calls while walking the dictionary - deadlock!
+
+static int dictionary_walker(avl *a, int (*callback)(void *entry, void *data), void *data) {
+ int total = 0, ret = 0;
+
+ if(a->avl_link[0]) {
+ ret = dictionary_walker(a->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(((NAME_VALUE *)a)->value, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(a->avl_link[1]) {
+ ret = dictionary_walker(a->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data), void *data) {
+ int ret = 0;
+
+ dictionary_read_lock(dict);
+
+ if(likely(dict->values_index.root))
+ ret = dictionary_walker(dict->values_index.root, callback, data);
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
+
+static int dictionary_walker_name_value(avl *a, int (*callback)(char *name, void *entry, void *data), void *data) {
+ int total = 0, ret = 0;
+
+ if(a->avl_link[0]) {
+ ret = dictionary_walker_name_value(a->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(((NAME_VALUE *)a)->name, ((NAME_VALUE *)a)->value, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(a->avl_link[1]) {
+ ret = dictionary_walker_name_value(a->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *data), void *data) {
+ int ret = 0;
+
+ dictionary_read_lock(dict);
+
+ if(likely(dict->values_index.root))
+ ret = dictionary_walker_name_value(dict->values_index.root, callback, data);
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h
new file mode 100644
index 0000000..fc24ec2
--- /dev/null
+++ b/libnetdata/dictionary/dictionary.h
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DICTIONARY_H
+#define NETDATA_DICTIONARY_H 1
+
+#include "../libnetdata.h"
+
+struct dictionary_stats {
+ unsigned long long inserts;
+ unsigned long long deletes;
+ unsigned long long searches;
+ unsigned long long entries;
+};
+
+typedef struct name_value {
+ avl avl_node; // the index - this has to be first!
+
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+ void *value;
+} NAME_VALUE;
+
+typedef struct dictionary {
+ avl_tree_type values_index;
+
+ uint8_t flags;
+
+ struct dictionary_stats *stats;
+ netdata_rwlock_t *rwlock;
+} DICTIONARY;
+
+#define DICTIONARY_FLAG_DEFAULT 0x00000000
+#define DICTIONARY_FLAG_SINGLE_THREADED 0x00000001
+#define DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE 0x00000002
+#define DICTIONARY_FLAG_NAME_LINK_DONT_CLONE 0x00000004
+#define DICTIONARY_FLAG_WITH_STATISTICS 0x00000008
+
+extern DICTIONARY *dictionary_create(uint8_t flags);
+extern void dictionary_destroy(DICTIONARY *dict);
+extern void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) NEVERNULL;
+extern void *dictionary_get(DICTIONARY *dict, const char *name);
+extern int dictionary_del(DICTIONARY *dict, const char *name);
+
+extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data);
+extern int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *d), void *data);
+
+#endif /* NETDATA_DICTIONARY_H */
diff --git a/libnetdata/ebpf/Makefile.am b/libnetdata/ebpf/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/ebpf/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/ebpf/README.md b/libnetdata/ebpf/README.md
new file mode 100644
index 0000000..09c6607
--- /dev/null
+++ b/libnetdata/ebpf/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/ebpf/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Febpf%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c
new file mode 100644
index 0000000..a9ff21f
--- /dev/null
+++ b/libnetdata/ebpf/ebpf.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <sys/utsname.h>
+
+#include "../libnetdata.h"
+
+/*
+static int clean_kprobe_event(FILE *out, char *filename, char *father_pid, netdata_ebpf_events_t *ptr)
+{
+ int fd = open(filename, O_WRONLY | O_APPEND, 0);
+ if (fd < 0) {
+ if (out) {
+ fprintf(out, "Cannot open %s : %s\n", filename, strerror(errno));
+ }
+ return 1;
+ }
+
+ char cmd[1024];
+ int length = snprintf(cmd, 1023, "-:kprobes/%c_netdata_%s_%s", ptr->type, ptr->name, father_pid);
+ int ret = 0;
+ if (length > 0) {
+ ssize_t written = write(fd, cmd, strlen(cmd));
+ if (written < 0) {
+ if (out) {
+ fprintf(
+ out, "Cannot remove the event (%d, %d) '%s' from %s : %s\n", getppid(), getpid(), cmd, filename,
+ strerror((int)errno));
+ }
+ ret = 1;
+ }
+ }
+
+ close(fd);
+
+ return ret;
+}
+
+int clean_kprobe_events(FILE *out, int pid, netdata_ebpf_events_t *ptr)
+{
+ debug(D_EXIT, "Cleaning parent process events.");
+ char filename[FILENAME_MAX + 1];
+ snprintf(filename, FILENAME_MAX, "%s%s", NETDATA_DEBUGFS, "kprobe_events");
+
+ char removeme[16];
+ snprintf(removeme, 15, "%d", pid);
+
+ int i;
+ for (i = 0; ptr[i].name; i++) {
+ if (clean_kprobe_event(out, filename, removeme, &ptr[i])) {
+ break;
+ }
+ }
+
+ return 0;
+}
+*/
+
+//----------------------------------------------------------------------------------------------------------------------
+
+int get_kernel_version(char *out, int size)
+{
+ char major[16], minor[16], patch[16];
+ char ver[VERSION_STRING_LEN];
+ char *version = ver;
+
+ out[0] = '\0';
+ int fd = open("/proc/sys/kernel/osrelease", O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ ssize_t len = read(fd, ver, sizeof(ver));
+ if (len < 0) {
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ char *move = major;
+ while (*version && *version != '.')
+ *move++ = *version++;
+ *move = '\0';
+
+ version++;
+ move = minor;
+ while (*version && *version != '.')
+ *move++ = *version++;
+ *move = '\0';
+
+ if (*version)
+ version++;
+ else
+ return -1;
+
+ move = patch;
+ while (*version && *version != '\n')
+ *move++ = *version++;
+ *move = '\0';
+
+ fd = snprintf(out, (size_t)size, "%s.%s.%s", major, minor, patch);
+ if (fd > size)
+ error("The buffer to store kernel version is not smaller than necessary.");
+
+ return ((int)(str2l(major) * 65536) + (int)(str2l(minor) * 256) + (int)str2l(patch));
+}
+
+int get_redhat_release()
+{
+ char buffer[VERSION_STRING_LEN + 1];
+ int major, minor;
+ FILE *fp = fopen("/etc/redhat-release", "r");
+
+ if (fp) {
+ major = 0;
+ minor = -1;
+ size_t length = fread(buffer, sizeof(char), VERSION_STRING_LEN, fp);
+ if (length > 4) {
+ buffer[length] = '\0';
+ char *end = strchr(buffer, '.');
+ char *start;
+ if (end) {
+ *end = 0x0;
+
+ if (end > buffer) {
+ start = end - 1;
+
+ major = strtol(start, NULL, 10);
+ start = ++end;
+
+ end++;
+ if (end) {
+ end = 0x00;
+ minor = strtol(start, NULL, 10);
+ } else {
+ minor = -1;
+ }
+ }
+ }
+ }
+
+ fclose(fp);
+ return ((major * 256) + minor);
+ } else {
+ return -1;
+ }
+}
+
+/**
+ * Check if the kernel is in a list of rejected ones
+ *
+ * @return Returns 1 if the kernel is rejected, 0 otherwise.
+ */
+static int kernel_is_rejected()
+{
+ // Get kernel version from system
+ char version_string[VERSION_STRING_LEN + 1];
+ int version_string_len = 0;
+
+ if (read_file("/proc/version_signature", version_string, VERSION_STRING_LEN)) {
+ if (read_file("/proc/version", version_string, VERSION_STRING_LEN)) {
+ struct utsname uname_buf;
+ if (!uname(&uname_buf)) {
+ info("Cannot check kernel version");
+ return 0;
+ }
+ version_string_len =
+ snprintfz(version_string, VERSION_STRING_LEN, "%s %s", uname_buf.release, uname_buf.version);
+ }
+ }
+
+ if (!version_string_len)
+ version_string_len = strlen(version_string);
+
+ // Open a file with a list of rejected kernels
+ char *config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if (config_dir == NULL) {
+ config_dir = CONFIG_DIR;
+ }
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s", config_dir, EBPF_KERNEL_REJECT_LIST_FILE);
+ FILE *kernel_reject_list = fopen(filename, "r");
+
+ if (!kernel_reject_list) {
+ config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if (config_dir == NULL) {
+ config_dir = LIBCONFIG_DIR;
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s", config_dir, EBPF_KERNEL_REJECT_LIST_FILE);
+ kernel_reject_list = fopen(filename, "r");
+
+ if (!kernel_reject_list)
+ return 0;
+ }
+
+ // Find if the kernel is in the reject list
+ char *reject_string = NULL;
+ size_t buf_len = 0;
+ ssize_t reject_string_len;
+ while ((reject_string_len = getline(&reject_string, &buf_len, kernel_reject_list) - 1) > 0) {
+ if (version_string_len >= reject_string_len) {
+ if (!strncmp(version_string, reject_string, reject_string_len)) {
+ info("A buggy kernel is detected");
+ fclose(kernel_reject_list);
+ freez(reject_string);
+ return 1;
+ }
+ }
+ }
+
+ fclose(kernel_reject_list);
+ freez(reject_string);
+
+ return 0;
+}
+
+static int has_ebpf_kernel_version(int version)
+{
+ if (kernel_is_rejected())
+ return 0;
+
+ // Kernel 4.11.0 or RH > 7.5
+ return (version >= NETDATA_MINIMUM_EBPF_KERNEL || get_redhat_release() >= NETDATA_MINIMUM_RH_VERSION);
+}
+
+int has_condition_to_run(int version)
+{
+ if (!has_ebpf_kernel_version(version))
+ return 0;
+
+ return 1;
+}
+
+//----------------------------------------------------------------------------------------------------------------------
+
+char *ebpf_kernel_suffix(int version, int isrh)
+{
+ if (isrh) {
+ if (version >= NETDATA_EBPF_KERNEL_4_11)
+ return "4.18";
+ else
+ return "3.10";
+ } else {
+ if (version >= NETDATA_EBPF_KERNEL_5_10)
+ return "5.10";
+ else if (version >= NETDATA_EBPF_KERNEL_4_17)
+ return "5.4";
+ else if (version >= NETDATA_EBPF_KERNEL_4_15)
+ return "4.16";
+ else if (version >= NETDATA_EBPF_KERNEL_4_11)
+ return "4.14";
+ }
+
+ return NULL;
+}
+
+//----------------------------------------------------------------------------------------------------------------------
+
+int ebpf_update_kernel(ebpf_data_t *ed)
+{
+ char *kernel = ebpf_kernel_suffix(ed->running_on_kernel, (ed->isrh < 0) ? 0 : 1);
+ size_t length = strlen(kernel);
+ strncpyz(ed->kernel_string, kernel, length);
+ ed->kernel_string[length] = '\0';
+
+ return 0;
+}
+
+static int select_file(char *name, const char *program, size_t length, int mode, char *kernel_string)
+{
+ int ret = -1;
+ if (!mode)
+ ret = snprintf(name, length, "rnetdata_ebpf_%s.%s.o", program, kernel_string);
+ else if (mode == 1)
+ ret = snprintf(name, length, "dnetdata_ebpf_%s.%s.o", program, kernel_string);
+ else if (mode == 2)
+ ret = snprintf(name, length, "pnetdata_ebpf_%s.%s.o", program, kernel_string);
+
+ return ret;
+}
+
+struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, char *kernel_string, struct bpf_object **obj, int *map_fd)
+{
+ char lpath[4096];
+ char lname[128];
+ int prog_fd;
+
+ int test = select_file(lname, em->thread_name, (size_t)127, em->mode, kernel_string);
+ if (test < 0 || test > 127)
+ return NULL;
+
+ snprintf(lpath, 4096, "%s/%s", plugins_dir, lname);
+ if (bpf_prog_load(lpath, BPF_PROG_TYPE_KPROBE, obj, &prog_fd)) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ info("Cannot load program: %s", lpath);
+ return NULL;
+ } else {
+ info("The eBPF program %s was loaded with success.", em->thread_name);
+ }
+
+ struct bpf_map *map;
+ size_t i = 0;
+ bpf_map__for_each(map, *obj)
+ {
+ map_fd[i] = bpf_map__fd(map);
+ i++;
+ }
+
+ struct bpf_program *prog;
+ struct bpf_link **links = callocz(NETDATA_MAX_PROBES , sizeof(struct bpf_link *));
+ i = 0;
+ bpf_object__for_each_program(prog, *obj)
+ {
+ links[i] = bpf_program__attach(prog);
+ i++;
+ }
+
+ return links;
+}
diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h
new file mode 100644
index 0000000..d4faccf
--- /dev/null
+++ b/libnetdata/ebpf/ebpf.h
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_H
+#define NETDATA_EBPF_H 1
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define NETDATA_DEBUGFS "/sys/kernel/debug/tracing/"
+
+/**
+ * The next magic number is got doing the following math:
+ * 294960 = 4*65536 + 11*256 + 0
+ *
+ * For more details, please, read /usr/include/linux/version.h
+ */
+#define NETDATA_MINIMUM_EBPF_KERNEL 264960
+
+/**
+ * The RedHat magic number was got doing:
+ *
+ * 1797 = 7*256 + 5
+ *
+ * For more details, please, read /usr/include/linux/version.h
+ * in any Red Hat installation.
+ */
+#define NETDATA_MINIMUM_RH_VERSION 1797
+
+/**
+ * 2048 = 8*256 + 0
+ */
+#define NETDATA_RH_8 2048
+
+/**
+ * Kernel 5.10
+ *
+ * 330240 = 5*65536 + 10*256
+ */
+#define NETDATA_EBPF_KERNEL_5_10 330240
+
+/**
+ * Kernel 4.17
+ *
+ * 266496 = 4*65536 + 17*256
+ */
+#define NETDATA_EBPF_KERNEL_4_17 266496
+
+/**
+ * Kernel 4.15
+ *
+ * 265984 = 4*65536 + 15*256
+ */
+#define NETDATA_EBPF_KERNEL_4_15 265984
+
+/**
+ * Kernel 4.11
+ *
+ * 264960 = 4*65536 + 15*256
+ */
+#define NETDATA_EBPF_KERNEL_4_11 264960
+
+#define VERSION_STRING_LEN 256
+#define EBPF_KERNEL_REJECT_LIST_FILE "ebpf_kernel_reject_list.txt"
+
+typedef struct ebpf_data {
+ int *map_fd;
+
+ char *kernel_string;
+ uint32_t running_on_kernel;
+ int isrh;
+} ebpf_data_t;
+
+typedef enum {
+ MODE_RETURN = 0, // This attaches kprobe when the function returns
+ MODE_DEVMODE, // This stores log given description about the errors raised
+ MODE_ENTRY // This attaches kprobe when the function is called
+} netdata_run_mode_t;
+
+typedef struct ebpf_module {
+ const char *thread_name;
+ const char *config_name;
+ int enabled;
+ void *(*start_routine)(void *);
+ int update_time;
+ int global_charts;
+ int apps_charts;
+ netdata_run_mode_t mode;
+ uint32_t thread_id;
+ int optional;
+} ebpf_module_t;
+
+#define NETDATA_MAX_PROBES 64
+
+extern int get_kernel_version(char *out, int size);
+extern int get_redhat_release();
+extern int has_condition_to_run(int version);
+extern char *ebpf_kernel_suffix(int version, int isrh);
+extern int ebpf_update_kernel(ebpf_data_t *ef);
+extern struct bpf_link **ebpf_load_program(char *plugins_dir,
+ ebpf_module_t *em,
+ char *kernel_string,
+ struct bpf_object **obj,
+ int *map_fd);
+
+#endif /* NETDATA_EBPF_H */
diff --git a/libnetdata/eval/Makefile.am b/libnetdata/eval/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/eval/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md
new file mode 100644
index 0000000..72b4089
--- /dev/null
+++ b/libnetdata/eval/README.md
@@ -0,0 +1 @@
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Feval%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c
new file mode 100644
index 0000000..b53b070
--- /dev/null
+++ b/libnetdata/eval/eval.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// data structures for storing the parsed expression in memory
+
+typedef struct eval_value {
+ int type;
+
+ union {
+ calculated_number number;
+ EVAL_VARIABLE *variable;
+ struct eval_node *expression;
+ };
+} EVAL_VALUE;
+
+typedef struct eval_node {
+ int id;
+ unsigned char operator;
+ int precedence;
+
+ int count;
+ EVAL_VALUE ops[];
+} EVAL_NODE;
+
+// these are used for EVAL_NODE.operator
+// they are used as internal IDs to identify an operator
+// THEY ARE NOT USED FOR PARSING OPERATORS LIKE THAT
+#define EVAL_OPERATOR_NOP '\0'
+#define EVAL_OPERATOR_EXPRESSION_OPEN '('
+#define EVAL_OPERATOR_EXPRESSION_CLOSE ')'
+#define EVAL_OPERATOR_NOT '!'
+#define EVAL_OPERATOR_PLUS '+'
+#define EVAL_OPERATOR_MINUS '-'
+#define EVAL_OPERATOR_AND '&'
+#define EVAL_OPERATOR_OR '|'
+#define EVAL_OPERATOR_GREATER_THAN_OR_EQUAL 'G'
+#define EVAL_OPERATOR_LESS_THAN_OR_EQUAL 'L'
+#define EVAL_OPERATOR_NOT_EQUAL '~'
+#define EVAL_OPERATOR_EQUAL '='
+#define EVAL_OPERATOR_LESS '<'
+#define EVAL_OPERATOR_GREATER '>'
+#define EVAL_OPERATOR_MULTIPLY '*'
+#define EVAL_OPERATOR_DIVIDE '/'
+#define EVAL_OPERATOR_SIGN_PLUS 'P'
+#define EVAL_OPERATOR_SIGN_MINUS 'M'
+#define EVAL_OPERATOR_ABS 'A'
+#define EVAL_OPERATOR_IF_THEN_ELSE '?'
+
+// ----------------------------------------------------------------------------
+// forward function definitions
+
+static inline void eval_node_free(EVAL_NODE *op);
+static inline EVAL_NODE *parse_full_expression(const char **string, int *error);
+static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error);
+static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
+static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error);
+static inline void print_parsed_as_constant(BUFFER *out, calculated_number n);
+
+// ----------------------------------------------------------------------------
+// evaluation of expressions
+
+static inline calculated_number eval_variable(EVAL_EXPRESSION *exp, EVAL_VARIABLE *v, int *error) {
+ static uint32_t this_hash = 0, now_hash = 0, after_hash = 0, before_hash = 0, status_hash = 0, removed_hash = 0, uninitialized_hash = 0, undefined_hash = 0, clear_hash = 0, warning_hash = 0, critical_hash = 0;
+ calculated_number n;
+
+ if(unlikely(this_hash == 0)) {
+ this_hash = simple_hash("this");
+ now_hash = simple_hash("now");
+ after_hash = simple_hash("after");
+ before_hash = simple_hash("before");
+ status_hash = simple_hash("status");
+ removed_hash = simple_hash("REMOVED");
+ uninitialized_hash = simple_hash("UNINITIALIZED");
+ undefined_hash = simple_hash("UNDEFINED");
+ clear_hash = simple_hash("CLEAR");
+ warning_hash = simple_hash("WARNING");
+ critical_hash = simple_hash("CRITICAL");
+ }
+
+ if(unlikely(v->hash == this_hash && !strcmp(v->name, "this"))) {
+ n = (exp->myself)?*exp->myself:NAN;
+ buffer_strcat(exp->error_msg, "[ $this = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == after_hash && !strcmp(v->name, "after"))) {
+ n = (exp->after && *exp->after)?*exp->after:NAN;
+ buffer_strcat(exp->error_msg, "[ $after = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == before_hash && !strcmp(v->name, "before"))) {
+ n = (exp->before && *exp->before)?*exp->before:NAN;
+ buffer_strcat(exp->error_msg, "[ $before = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == now_hash && !strcmp(v->name, "now"))) {
+ n = now_realtime_sec();
+ buffer_strcat(exp->error_msg, "[ $now = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == status_hash && !strcmp(v->name, "status"))) {
+ n = (exp->status)?*exp->status:RRDCALC_STATUS_UNINITIALIZED;
+ buffer_strcat(exp->error_msg, "[ $status = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == removed_hash && !strcmp(v->name, "REMOVED"))) {
+ n = RRDCALC_STATUS_REMOVED;
+ buffer_strcat(exp->error_msg, "[ $REMOVED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == uninitialized_hash && !strcmp(v->name, "UNINITIALIZED"))) {
+ n = RRDCALC_STATUS_UNINITIALIZED;
+ buffer_strcat(exp->error_msg, "[ $UNINITIALIZED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == undefined_hash && !strcmp(v->name, "UNDEFINED"))) {
+ n = RRDCALC_STATUS_UNDEFINED;
+ buffer_strcat(exp->error_msg, "[ $UNDEFINED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == clear_hash && !strcmp(v->name, "CLEAR"))) {
+ n = RRDCALC_STATUS_CLEAR;
+ buffer_strcat(exp->error_msg, "[ $CLEAR = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == warning_hash && !strcmp(v->name, "WARNING"))) {
+ n = RRDCALC_STATUS_WARNING;
+ buffer_strcat(exp->error_msg, "[ $WARNING = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == critical_hash && !strcmp(v->name, "CRITICAL"))) {
+ n = RRDCALC_STATUS_CRITICAL;
+ buffer_strcat(exp->error_msg, "[ $CRITICAL = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(exp->rrdcalc && health_variable_lookup(v->name, v->hash, exp->rrdcalc, &n)) {
+ buffer_sprintf(exp->error_msg, "[ ${%s} = ", v->name);
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ *error = EVAL_ERROR_UNKNOWN_VARIABLE;
+ buffer_sprintf(exp->error_msg, "[ undefined variable '%s' ] ", v->name);
+ return NAN;
+}
+
+static inline calculated_number eval_value(EVAL_EXPRESSION *exp, EVAL_VALUE *v, int *error) {
+ calculated_number n;
+
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ n = eval_node(exp, v->expression, error);
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ n = v->number;
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ n = eval_variable(exp, v->variable, error);
+ break;
+
+ default:
+ *error = EVAL_ERROR_INVALID_VALUE;
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+static inline int is_true(calculated_number n) {
+ if(isnan(n)) return 0;
+ if(isinf(n)) return 1;
+ if(n == 0) return 0;
+ return 1;
+}
+
+calculated_number eval_and(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return is_true(eval_value(exp, &op->ops[0], error)) && is_true(eval_value(exp, &op->ops[1], error));
+}
+calculated_number eval_or(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return is_true(eval_value(exp, &op->ops[0], error)) || is_true(eval_value(exp, &op->ops[1], error));
+}
+calculated_number eval_greater_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isgreaterequal(n1, n2);
+}
+calculated_number eval_less_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return islessequal(n1, n2);
+}
+calculated_number eval_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) && isnan(n2)) return 1;
+ if(isinf(n1) && isinf(n2)) return 1;
+ if(isnan(n1) || isnan(n2)) return 0;
+ if(isinf(n1) || isinf(n2)) return 0;
+ return calculated_number_equal(n1, n2);
+}
+calculated_number eval_not_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return !eval_equal(exp, op, error);
+}
+calculated_number eval_less(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isless(n1, n2);
+}
+calculated_number eval_greater(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isgreater(n1, n2);
+}
+calculated_number eval_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 + n2;
+}
+calculated_number eval_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 - n2;
+}
+calculated_number eval_multiply(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 * n2;
+}
+calculated_number eval_divide(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 / n2;
+}
+calculated_number eval_nop(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return eval_value(exp, &op->ops[0], error);
+}
+calculated_number eval_not(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return !is_true(eval_value(exp, &op->ops[0], error));
+}
+calculated_number eval_sign_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return eval_value(exp, &op->ops[0], error);
+}
+calculated_number eval_sign_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ if(isnan(n1)) return NAN;
+ if(isinf(n1)) return INFINITY;
+ return -n1;
+}
+calculated_number eval_abs(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ if(isnan(n1)) return NAN;
+ if(isinf(n1)) return INFINITY;
+ return abs(n1);
+}
+calculated_number eval_if_then_else(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ if(is_true(eval_value(exp, &op->ops[0], error)))
+ return eval_value(exp, &op->ops[1], error);
+ else
+ return eval_value(exp, &op->ops[2], error);
+}
+
+static struct operator {
+ const char *print_as;
+ char precedence;
+ char parameters;
+ char isfunction;
+ calculated_number (*eval)(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
+} operators[256] = {
+ // this is a random access array
+ // we always access it with a known EVAL_OPERATOR_X
+
+ [EVAL_OPERATOR_AND] = { "&&", 2, 2, 0, eval_and },
+ [EVAL_OPERATOR_OR] = { "||", 2, 2, 0, eval_or },
+ [EVAL_OPERATOR_GREATER_THAN_OR_EQUAL] = { ">=", 3, 2, 0, eval_greater_than_or_equal },
+ [EVAL_OPERATOR_LESS_THAN_OR_EQUAL] = { "<=", 3, 2, 0, eval_less_than_or_equal },
+ [EVAL_OPERATOR_NOT_EQUAL] = { "!=", 3, 2, 0, eval_not_equal },
+ [EVAL_OPERATOR_EQUAL] = { "==", 3, 2, 0, eval_equal },
+ [EVAL_OPERATOR_LESS] = { "<", 3, 2, 0, eval_less },
+ [EVAL_OPERATOR_GREATER] = { ">", 3, 2, 0, eval_greater },
+ [EVAL_OPERATOR_PLUS] = { "+", 4, 2, 0, eval_plus },
+ [EVAL_OPERATOR_MINUS] = { "-", 4, 2, 0, eval_minus },
+ [EVAL_OPERATOR_MULTIPLY] = { "*", 5, 2, 0, eval_multiply },
+ [EVAL_OPERATOR_DIVIDE] = { "/", 5, 2, 0, eval_divide },
+ [EVAL_OPERATOR_NOT] = { "!", 6, 1, 0, eval_not },
+ [EVAL_OPERATOR_SIGN_PLUS] = { "+", 6, 1, 0, eval_sign_plus },
+ [EVAL_OPERATOR_SIGN_MINUS] = { "-", 6, 1, 0, eval_sign_minus },
+ [EVAL_OPERATOR_ABS] = { "abs(",6,1, 1, eval_abs },
+ [EVAL_OPERATOR_IF_THEN_ELSE] = { "?", 7, 3, 0, eval_if_then_else },
+ [EVAL_OPERATOR_NOP] = { NULL, 8, 1, 0, eval_nop },
+ [EVAL_OPERATOR_EXPRESSION_OPEN] = { NULL, 8, 1, 0, eval_nop },
+
+ // this should exist in our evaluation list
+ [EVAL_OPERATOR_EXPRESSION_CLOSE] = { NULL, 99, 1, 0, eval_nop }
+};
+
+#define eval_precedence(operator) (operators[(unsigned char)(operator)].precedence)
+
+static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ if(unlikely(op->count != operators[op->operator].parameters)) {
+ *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
+ return 0;
+ }
+
+ calculated_number n = operators[op->operator].eval(exp, op, error);
+
+ return n;
+}
+
+// ----------------------------------------------------------------------------
+// parsed-as generation
+
+static inline void print_parsed_as_variable(BUFFER *out, EVAL_VARIABLE *v, int *error) {
+ (void)error;
+ buffer_sprintf(out, "${%s}", v->name);
+}
+
+static inline void print_parsed_as_constant(BUFFER *out, calculated_number n) {
+ if(unlikely(isnan(n))) {
+ buffer_strcat(out, "nan");
+ return;
+ }
+
+ if(unlikely(isinf(n))) {
+ buffer_strcat(out, "inf");
+ return;
+ }
+
+ char b[100+1], *s;
+ snprintfz(b, 100, CALCULATED_NUMBER_FORMAT, n);
+
+ s = &b[strlen(b) - 1];
+ while(s > b && *s == '0') {
+ *s ='\0';
+ s--;
+ }
+
+ if(s > b && *s == '.')
+ *s = '\0';
+
+ buffer_strcat(out, b);
+}
+
+static inline void print_parsed_as_value(BUFFER *out, EVAL_VALUE *v, int *error) {
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ print_parsed_as_node(out, v->expression, error);
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ print_parsed_as_constant(out, v->number);
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ print_parsed_as_variable(out, v->variable, error);
+ break;
+
+ default:
+ *error = EVAL_ERROR_INVALID_VALUE;
+ break;
+ }
+}
+
+static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error) {
+ if(unlikely(op->count != operators[op->operator].parameters)) {
+ *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
+ return;
+ }
+
+ if(operators[op->operator].parameters == 1) {
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, "%s", operators[op->operator].print_as);
+
+ //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
+ // buffer_strcat(out, "(");
+
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
+ // buffer_strcat(out, ")");
+ }
+
+ else if(operators[op->operator].parameters == 2) {
+ buffer_strcat(out, "(");
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, " %s ", operators[op->operator].print_as);
+
+ print_parsed_as_value(out, &op->ops[1], error);
+ buffer_strcat(out, ")");
+ }
+ else if(op->operator == EVAL_OPERATOR_IF_THEN_ELSE && operators[op->operator].parameters == 3) {
+ buffer_strcat(out, "(");
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, " %s ", operators[op->operator].print_as);
+
+ print_parsed_as_value(out, &op->ops[1], error);
+ buffer_strcat(out, " : ");
+ print_parsed_as_value(out, &op->ops[2], error);
+ buffer_strcat(out, ")");
+ }
+
+ if(operators[op->operator].isfunction)
+ buffer_strcat(out, ")");
+}
+
+// ----------------------------------------------------------------------------
+// parsing expressions
+
+// skip spaces
+static inline void skip_spaces(const char **string) {
+ const char *s = *string;
+ while(isspace(*s)) s++;
+ *string = s;
+}
+
+// what character can appear just after an operator keyword
+// like NOT AND OR ?
+static inline int isoperatorterm_word(const char s) {
+ if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s)
+ return 1;
+
+ return 0;
+}
+
+// what character can appear just after an operator symbol?
+static inline int isoperatorterm_symbol(const char s) {
+ if(isoperatorterm_word(s) || isalpha(s))
+ return 1;
+
+ return 0;
+}
+
+// return 1 if the character should never appear in a variable
+static inline int isvariableterm(const char s) {
+ if(isalnum(s) || s == '.' || s == '_')
+ return 0;
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// parse operators
+
+static inline int parse_and(const char **string) {
+ const char *s = *string;
+
+ // AND
+ if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) {
+ *string = &s[4];
+ return 1;
+ }
+
+ // &&
+ if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_or(const char **string) {
+ const char *s = *string;
+
+ // OR
+ if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) {
+ *string = &s[3];
+ return 1;
+ }
+
+ // ||
+ if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_greater_than_or_equal(const char **string) {
+ const char *s = *string;
+
+ // >=
+ if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_less_than_or_equal(const char **string) {
+ const char *s = *string;
+
+ // <=
+ if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_greater(const char **string) {
+ const char *s = *string;
+
+ // >
+ if(s[0] == '>' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_less(const char **string) {
+ const char *s = *string;
+
+ // <
+ if(s[0] == '<' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_equal(const char **string) {
+ const char *s = *string;
+
+ // ==
+ if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ // =
+ if(s[0] == '=' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_not_equal(const char **string) {
+ const char *s = *string;
+
+ // !=
+ if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ // <>
+ if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ }
+
+ return 0;
+}
+
+static inline int parse_not(const char **string) {
+ const char *s = *string;
+
+ // NOT
+ if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) {
+ *string = &s[3];
+ return 1;
+ }
+
+ if(s[0] == '!') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_multiply(const char **string) {
+ const char *s = *string;
+
+ // *
+ if(s[0] == '*' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_divide(const char **string) {
+ const char *s = *string;
+
+ // /
+ if(s[0] == '/' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_minus(const char **string) {
+ const char *s = *string;
+
+ // -
+ if(s[0] == '-' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_plus(const char **string) {
+ const char *s = *string;
+
+ // +
+ if(s[0] == '+' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_open_subexpression(const char **string) {
+ const char *s = *string;
+
+ // (
+ if(s[0] == '(') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+#define parse_close_function(x) parse_close_subexpression(x)
+
+static inline int parse_close_subexpression(const char **string) {
+ const char *s = *string;
+
+ // )
+ if(s[0] == ')') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_variable(const char **string, char *buffer, size_t len) {
+ const char *s = *string;
+
+ // $
+ if(*s == '$') {
+ size_t i = 0;
+ s++;
+
+ if(*s == '{') {
+ // ${variable_name}
+
+ s++;
+ while (*s && *s != '}' && i < len)
+ buffer[i++] = *s++;
+
+ if(*s == '}')
+ s++;
+ }
+ else {
+ // $variable_name
+
+ while (*s && !isvariableterm(*s) && i < len)
+ buffer[i++] = *s++;
+ }
+
+ buffer[i] = '\0';
+
+ if (buffer[0]) {
+ *string = s;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline int parse_constant(const char **string, calculated_number *number) {
+ char *end = NULL;
+ calculated_number n = str2ld(*string, &end);
+ if(unlikely(!end || *string == end)) {
+ *number = 0;
+ return 0;
+ }
+ *number = n;
+ *string = end;
+ return 1;
+}
+
+static inline int parse_abs(const char **string) {
+ const char *s = *string;
+
+ // ABS
+ if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'B' || s[1] == 'b') && (s[2] == 'S' || s[2] == 's') && s[3] == '(') {
+ *string = &s[3];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_if_then_else(const char **string) {
+ const char *s = *string;
+
+ // ?
+ if(s[0] == '?') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct operator_parser {
+ unsigned char id;
+ int (*parse)(const char **);
+} operator_parsers[] = {
+ // the order in this list is important!
+ // the first matching will be used
+ // so place the longer of overlapping ones
+ // at the top
+
+ { EVAL_OPERATOR_AND, parse_and },
+ { EVAL_OPERATOR_OR, parse_or },
+ { EVAL_OPERATOR_GREATER_THAN_OR_EQUAL, parse_greater_than_or_equal },
+ { EVAL_OPERATOR_LESS_THAN_OR_EQUAL, parse_less_than_or_equal },
+ { EVAL_OPERATOR_NOT_EQUAL, parse_not_equal },
+ { EVAL_OPERATOR_EQUAL, parse_equal },
+ { EVAL_OPERATOR_LESS, parse_less },
+ { EVAL_OPERATOR_GREATER, parse_greater },
+ { EVAL_OPERATOR_PLUS, parse_plus },
+ { EVAL_OPERATOR_MINUS, parse_minus },
+ { EVAL_OPERATOR_MULTIPLY, parse_multiply },
+ { EVAL_OPERATOR_DIVIDE, parse_divide },
+ { EVAL_OPERATOR_IF_THEN_ELSE, parse_if_then_else },
+
+ /* we should not put in this list the following:
+ *
+ * - NOT
+ * - (
+ * - )
+ *
+ * these are handled in code
+ */
+
+ // termination
+ { EVAL_OPERATOR_NOP, NULL }
+};
+
+static inline unsigned char parse_operator(const char **string, int *precedence) {
+ skip_spaces(string);
+
+ int i;
+ for(i = 0 ; operator_parsers[i].parse != NULL ; i++)
+ if(operator_parsers[i].parse(string)) {
+ if(precedence) *precedence = eval_precedence(operator_parsers[i].id);
+ return operator_parsers[i].id;
+ }
+
+ return EVAL_OPERATOR_NOP;
+}
+
+// ----------------------------------------------------------------------------
+// memory management
+
+static inline EVAL_NODE *eval_node_alloc(int count) {
+ static int id = 1;
+
+ EVAL_NODE *op = callocz(1, sizeof(EVAL_NODE) + (sizeof(EVAL_VALUE) * count));
+
+ op->id = id++;
+ op->operator = EVAL_OPERATOR_NOP;
+ op->precedence = eval_precedence(EVAL_OPERATOR_NOP);
+ op->count = count;
+ return op;
+}
+
+static inline void eval_node_set_value_to_node(EVAL_NODE *op, int pos, EVAL_NODE *value) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_EXPRESSION;
+ op->ops[pos].expression = value;
+}
+
+static inline void eval_node_set_value_to_constant(EVAL_NODE *op, int pos, calculated_number value) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_NUMBER;
+ op->ops[pos].number = value;
+}
+
+static inline void eval_node_set_value_to_variable(EVAL_NODE *op, int pos, const char *variable) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_VARIABLE;
+ op->ops[pos].variable = callocz(1, sizeof(EVAL_VARIABLE));
+ op->ops[pos].variable->name = strdupz(variable);
+ op->ops[pos].variable->hash = simple_hash(op->ops[pos].variable->name);
+}
+
+static inline void eval_variable_free(EVAL_VARIABLE *v) {
+ freez(v->name);
+ freez(v);
+}
+
+static inline void eval_value_free(EVAL_VALUE *v) {
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ eval_node_free(v->expression);
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ eval_variable_free(v->variable);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static inline void eval_node_free(EVAL_NODE *op) {
+ if(op->count) {
+ int i;
+ for(i = op->count - 1; i >= 0 ;i--)
+ eval_value_free(&op->ops[i]);
+ }
+
+ freez(op);
+}
+
+// ----------------------------------------------------------------------------
+// the parsing logic
+
+// helper function to avoid allocations all over the place
+static inline EVAL_NODE *parse_next_operand_given_its_operator(const char **string, unsigned char operator_type, int *error) {
+ EVAL_NODE *sub = parse_one_full_operand(string, error);
+ if(!sub) return NULL;
+
+ EVAL_NODE *op = eval_node_alloc(1);
+ op->operator = operator_type;
+ eval_node_set_value_to_node(op, 0, sub);
+ return op;
+}
+
+// parse a full operand, including its sign or other associative operator (e.g. NOT)
+static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error) {
+ char variable_buffer[EVAL_MAX_VARIABLE_NAME_LENGTH + 1];
+ EVAL_NODE *op1 = NULL;
+ calculated_number number;
+
+ *error = EVAL_ERROR_OK;
+
+ skip_spaces(string);
+ if(!(**string)) {
+ *error = EVAL_ERROR_MISSING_OPERAND;
+ return NULL;
+ }
+
+ if(parse_not(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_NOT, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_NOT);
+ }
+ else if(parse_plus(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_PLUS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_PLUS);
+ }
+ else if(parse_minus(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_MINUS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_MINUS);
+ }
+ else if(parse_abs(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_ABS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_ABS);
+ }
+ else if(parse_open_subexpression(string)) {
+ EVAL_NODE *sub = parse_full_expression(string, error);
+ if(sub) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_EXPRESSION_OPEN;
+ op1->precedence = eval_precedence(EVAL_OPERATOR_EXPRESSION_OPEN);
+ eval_node_set_value_to_node(op1, 0, sub);
+ if(!parse_close_subexpression(string)) {
+ *error = EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION;
+ eval_node_free(op1);
+ return NULL;
+ }
+ }
+ }
+ else if(parse_variable(string, variable_buffer, EVAL_MAX_VARIABLE_NAME_LENGTH)) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_NOP;
+ eval_node_set_value_to_variable(op1, 0, variable_buffer);
+ }
+ else if(parse_constant(string, &number)) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_NOP;
+ eval_node_set_value_to_constant(op1, 0, number);
+ }
+ else if(**string)
+ *error = EVAL_ERROR_UNKNOWN_OPERAND;
+ else
+ *error = EVAL_ERROR_MISSING_OPERAND;
+
+ return op1;
+}
+
+// parse an operator and the rest of the expression
+// precedence processing is handled here
+static inline EVAL_NODE *parse_rest_of_expression(const char **string, int *error, EVAL_NODE *op1) {
+ EVAL_NODE *op2 = NULL;
+ unsigned char operator;
+ int precedence;
+
+ operator = parse_operator(string, &precedence);
+ skip_spaces(string);
+
+ if(operator != EVAL_OPERATOR_NOP) {
+ op2 = parse_one_full_operand(string, error);
+ if(!op2) {
+ // error is already reported
+ eval_node_free(op1);
+ return NULL;
+ }
+
+ EVAL_NODE *op = eval_node_alloc(operators[operator].parameters);
+ op->operator = operator;
+ op->precedence = precedence;
+
+ if(operator == EVAL_OPERATOR_IF_THEN_ELSE && op->count == 3) {
+ skip_spaces(string);
+
+ if(**string != ':') {
+ eval_node_free(op);
+ eval_node_free(op1);
+ eval_node_free(op2);
+ *error = EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE;
+ return NULL;
+ }
+ (*string)++;
+
+ skip_spaces(string);
+
+ EVAL_NODE *op3 = parse_one_full_operand(string, error);
+ if(!op3) {
+ eval_node_free(op);
+ eval_node_free(op1);
+ eval_node_free(op2);
+ // error is already reported
+ return NULL;
+ }
+
+ eval_node_set_value_to_node(op, 2, op3);
+ }
+
+ eval_node_set_value_to_node(op, 1, op2);
+
+ // precedence processing
+ // if this operator has a higher precedence compared to its next
+ // put the next operator on top of us (top = evaluated later)
+ // function recursion does the rest...
+ if(op->precedence > op1->precedence && op1->count == 2 && op1->operator != '(' && op1->ops[1].type == EVAL_VALUE_EXPRESSION) {
+ eval_node_set_value_to_node(op, 0, op1->ops[1].expression);
+ op1->ops[1].expression = op;
+ op = op1;
+ }
+ else
+ eval_node_set_value_to_node(op, 0, op1);
+
+ return parse_rest_of_expression(string, error, op);
+ }
+ else if(**string == ')') {
+ ;
+ }
+ else if(**string) {
+ eval_node_free(op1);
+ op1 = NULL;
+ *error = EVAL_ERROR_MISSING_OPERATOR;
+ }
+
+ return op1;
+}
+
+// high level function to parse an expression or a sub-expression
+static inline EVAL_NODE *parse_full_expression(const char **string, int *error) {
+ EVAL_NODE *op1 = parse_one_full_operand(string, error);
+ if(!op1) {
+ *error = EVAL_ERROR_MISSING_OPERAND;
+ return NULL;
+ }
+
+ return parse_rest_of_expression(string, error, op1);
+}
+
+// ----------------------------------------------------------------------------
+// public API
+
+int expression_evaluate(EVAL_EXPRESSION *expression) {
+ expression->error = EVAL_ERROR_OK;
+
+ buffer_reset(expression->error_msg);
+ expression->result = eval_node(expression, (EVAL_NODE *)expression->nodes, &expression->error);
+
+ if(unlikely(isnan(expression->result))) {
+ if(expression->error == EVAL_ERROR_OK)
+ expression->error = EVAL_ERROR_VALUE_IS_NAN;
+ }
+ else if(unlikely(isinf(expression->result))) {
+ if(expression->error == EVAL_ERROR_OK)
+ expression->error = EVAL_ERROR_VALUE_IS_INFINITE;
+ }
+ else if(unlikely(expression->error == EVAL_ERROR_UNKNOWN_VARIABLE)) {
+ // although there is an unknown variable
+ // the expression was evaluated successfully
+ expression->error = EVAL_ERROR_OK;
+ }
+
+ if(expression->error != EVAL_ERROR_OK) {
+ expression->result = NAN;
+
+ if(buffer_strlen(expression->error_msg))
+ buffer_strcat(expression->error_msg, "; ");
+
+ buffer_sprintf(expression->error_msg, "failed to evaluate expression with error %d (%s)", expression->error, expression_strerror(expression->error));
+ return 0;
+ }
+
+ return 1;
+}
+
+EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error) {
+ const char *s = string;
+ int err = EVAL_ERROR_OK;
+
+ EVAL_NODE *op = parse_full_expression(&s, &err);
+
+ if(*s) {
+ if(op) {
+ eval_node_free(op);
+ op = NULL;
+ }
+ err = EVAL_ERROR_REMAINING_GARBAGE;
+ }
+
+ if (failed_at) *failed_at = s;
+ if (error) *error = err;
+
+ if(!op) {
+ unsigned long pos = s - string + 1;
+ error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s);
+ return NULL;
+ }
+
+ BUFFER *out = buffer_create(1024);
+ print_parsed_as_node(out, op, &err);
+ if(err != EVAL_ERROR_OK) {
+ error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err));
+ eval_node_free(op);
+ buffer_free(out);
+ return NULL;
+ }
+
+ EVAL_EXPRESSION *exp = callocz(1, sizeof(EVAL_EXPRESSION));
+
+ exp->source = strdupz(string);
+ exp->parsed_as = strdupz(buffer_tostring(out));
+ buffer_free(out);
+
+ exp->error_msg = buffer_create(100);
+ exp->nodes = (void *)op;
+
+ return exp;
+}
+
+void expression_free(EVAL_EXPRESSION *expression) {
+ if(!expression) return;
+
+ if(expression->nodes) eval_node_free((EVAL_NODE *)expression->nodes);
+ freez((void *)expression->source);
+ freez((void *)expression->parsed_as);
+ buffer_free(expression->error_msg);
+ freez(expression);
+}
+
+const char *expression_strerror(int error) {
+ switch(error) {
+ case EVAL_ERROR_OK:
+ return "success";
+
+ case EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION:
+ return "missing closing parenthesis";
+
+ case EVAL_ERROR_UNKNOWN_OPERAND:
+ return "unknown operand";
+
+ case EVAL_ERROR_MISSING_OPERAND:
+ return "expected operand";
+
+ case EVAL_ERROR_MISSING_OPERATOR:
+ return "expected operator";
+
+ case EVAL_ERROR_REMAINING_GARBAGE:
+ return "remaining characters after expression";
+
+ case EVAL_ERROR_INVALID_VALUE:
+ return "invalid value structure - internal error";
+
+ case EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS:
+ return "wrong number of operands for operation - internal error";
+
+ case EVAL_ERROR_VALUE_IS_NAN:
+ return "value is unset";
+
+ case EVAL_ERROR_VALUE_IS_INFINITE:
+ return "computed value is infinite";
+
+ case EVAL_ERROR_UNKNOWN_VARIABLE:
+ return "undefined variable";
+
+ case EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE:
+ return "missing second sub-expression of inline conditional";
+
+ default:
+ return "unknown error";
+ }
+}
diff --git a/libnetdata/eval/eval.h b/libnetdata/eval/eval.h
new file mode 100644
index 0000000..fc03d7c
--- /dev/null
+++ b/libnetdata/eval/eval.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EVAL_H
+#define NETDATA_EVAL_H 1
+
+#include "../libnetdata.h"
+
+#define EVAL_MAX_VARIABLE_NAME_LENGTH 300
+
+typedef enum rrdcalc_status {
+ RRDCALC_STATUS_REMOVED = -2,
+ RRDCALC_STATUS_UNDEFINED = -1,
+ RRDCALC_STATUS_UNINITIALIZED = 0,
+ RRDCALC_STATUS_CLEAR = 1,
+ RRDCALC_STATUS_RAISED = 2,
+ RRDCALC_STATUS_WARNING = 3,
+ RRDCALC_STATUS_CRITICAL = 4
+} RRDCALC_STATUS;
+
+typedef struct eval_variable {
+ char *name;
+ uint32_t hash;
+ struct eval_variable *next;
+} EVAL_VARIABLE;
+
+typedef struct eval_expression {
+ const char *source;
+ const char *parsed_as;
+
+ RRDCALC_STATUS *status;
+ calculated_number *myself;
+ time_t *after;
+ time_t *before;
+
+ calculated_number result;
+
+ int error;
+ BUFFER *error_msg;
+
+ // hidden EVAL_NODE *
+ void *nodes;
+
+ // custom data to be used for looking up variables
+ struct rrdcalc *rrdcalc;
+} EVAL_EXPRESSION;
+
+#define EVAL_VALUE_INVALID 0
+#define EVAL_VALUE_NUMBER 1
+#define EVAL_VALUE_VARIABLE 2
+#define EVAL_VALUE_EXPRESSION 3
+
+// parsing and evaluation
+#define EVAL_ERROR_OK 0
+
+// parsing errors
+#define EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION 1
+#define EVAL_ERROR_UNKNOWN_OPERAND 2
+#define EVAL_ERROR_MISSING_OPERAND 3
+#define EVAL_ERROR_MISSING_OPERATOR 4
+#define EVAL_ERROR_REMAINING_GARBAGE 5
+#define EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE 6
+
+// evaluation errors
+#define EVAL_ERROR_INVALID_VALUE 101
+#define EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS 102
+#define EVAL_ERROR_VALUE_IS_NAN 103
+#define EVAL_ERROR_VALUE_IS_INFINITE 104
+#define EVAL_ERROR_UNKNOWN_VARIABLE 105
+
+// parse the given string as an expression and return:
+// a pointer to an expression if it parsed OK
+// NULL in which case the pointer to error has the error code
+extern EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error);
+
+// free all resources allocated for an expression
+extern void expression_free(EVAL_EXPRESSION *expression);
+
+// convert an error code to a message
+extern const char *expression_strerror(int error);
+
+// evaluate an expression and return
+// 1 = OK, the result is in: expression->result
+// 2 = FAILED, the error message is in: buffer_tostring(expression->error_msg)
+extern int expression_evaluate(EVAL_EXPRESSION *expression);
+
+extern int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result);
+
+#endif //NETDATA_EVAL_H
diff --git a/libnetdata/health/Makefile.am b/libnetdata/health/Makefile.am
new file mode 100644
index 0000000..643458b
--- /dev/null
+++ b/libnetdata/health/Makefile.am
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ $(NULL)
diff --git a/libnetdata/health/health.c b/libnetdata/health/health.c
new file mode 100644
index 0000000..74f7a02
--- /dev/null
+++ b/libnetdata/health/health.c
@@ -0,0 +1,173 @@
+#include "health.h"
+
+SILENCERS *silencers;
+
+/**
+ * Create Silencer
+ *
+ * Allocate a new silencer to Netdata.
+ *
+ * @return It returns the address off the silencer on success and NULL otherwise
+ */
+SILENCER *create_silencer(void) {
+ SILENCER *t = callocz(1, sizeof(SILENCER));
+ debug(D_HEALTH, "HEALTH command API: Created empty silencer");
+
+ return t;
+}
+
+/**
+ * Health Silencers add
+ *
+ * Add more one silencer to the list of silenecers.
+ *
+ * @param silencer
+ */
+void health_silencers_add(SILENCER *silencer) {
+ // Add the created instance to the linked list in silencers
+ silencer->next = silencers->silencers;
+ silencers->silencers = silencer;
+ debug(D_HEALTH, "HEALTH command API: Added silencer %s:%s:%s:%s:%s", silencer->alarms,
+ silencer->charts, silencer->contexts, silencer->hosts, silencer->families
+ );
+}
+
+/**
+ * Silencers Add Parameter
+ *
+ * Create a new silencer and adjust the variables
+ *
+ * @param silencer a pointer to the silencer that will be adjusted
+ * @param key the key value sent by client
+ * @param value the value sent to the key
+ *
+ * @return It returns the silencer configured on success and NULL otherwise
+ */
+SILENCER *health_silencers_addparam(SILENCER *silencer, char *key, char *value) {
+ static uint32_t
+ hash_alarm = 0,
+ hash_template = 0,
+ hash_chart = 0,
+ hash_context = 0,
+ hash_host = 0,
+ hash_families = 0;
+
+ if (unlikely(!hash_alarm)) {
+ hash_alarm = simple_uhash(HEALTH_ALARM_KEY);
+ hash_template = simple_uhash(HEALTH_TEMPLATE_KEY);
+ hash_chart = simple_uhash(HEALTH_CHART_KEY);
+ hash_context = simple_uhash(HEALTH_CONTEXT_KEY);
+ hash_host = simple_uhash(HEALTH_HOST_KEY);
+ hash_families = simple_uhash(HEALTH_FAMILIES_KEY);
+ }
+
+ uint32_t hash = simple_uhash(key);
+ if (unlikely(silencer == NULL)) {
+ if (
+ (hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) ||
+ (hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) ||
+ (hash == hash_chart && !strcasecmp(key, HEALTH_CHART_KEY)) ||
+ (hash == hash_context && !strcasecmp(key, HEALTH_CONTEXT_KEY)) ||
+ (hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) ||
+ (hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY))
+ ) {
+ silencer = create_silencer();
+ if(!silencer) {
+ error("Cannot add a new silencer to Netdata");
+ return NULL;
+ }
+ }
+ }
+
+ if (hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) {
+ silencer->alarms = strdupz(value);
+ silencer->alarms_pattern = simple_pattern_create(silencer->alarms, NULL, SIMPLE_PATTERN_EXACT);
+ } else if (hash == hash_chart && !strcasecmp(key, HEALTH_CHART_KEY)) {
+ silencer->charts = strdupz(value);
+ silencer->charts_pattern = simple_pattern_create(silencer->charts, NULL, SIMPLE_PATTERN_EXACT);
+ } else if (hash == hash_context && !strcasecmp(key, HEALTH_CONTEXT_KEY)) {
+ silencer->contexts = strdupz(value);
+ silencer->contexts_pattern = simple_pattern_create(silencer->contexts, NULL, SIMPLE_PATTERN_EXACT);
+ } else if (hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) {
+ silencer->hosts = strdupz(value);
+ silencer->hosts_pattern = simple_pattern_create(silencer->hosts, NULL, SIMPLE_PATTERN_EXACT);
+ } else if (hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) {
+ silencer->families = strdupz(value);
+ silencer->families_pattern = simple_pattern_create(silencer->families, NULL, SIMPLE_PATTERN_EXACT);
+ }
+
+ return silencer;
+}
+
+/**
+ * JSON Read Callback
+ *
+ * Callback called by netdata to create the silencer.
+ *
+ * @param e the main json structure
+ *
+ * @return It always return 0.
+ */
+int health_silencers_json_read_callback(JSON_ENTRY *e)
+{
+ switch(e->type) {
+ case JSON_OBJECT:
+#ifndef ENABLE_JSONC
+ e->callback_function = health_silencers_json_read_callback;
+ if(strcmp(e->name,"")) {
+ // init silencer
+ debug(D_HEALTH, "JSON: Got object with a name, initializing new silencer for %s",e->name);
+#endif
+ e->callback_data = create_silencer();
+ if(e->callback_data) {
+ health_silencers_add(e->callback_data);
+ }
+#ifndef ENABLE_JSONC
+ }
+#endif
+ break;
+
+ case JSON_ARRAY:
+ e->callback_function = health_silencers_json_read_callback;
+ break;
+
+ case JSON_STRING:
+ if(!strcmp(e->name,"type")) {
+ debug(D_HEALTH, "JSON: Processing type=%s",e->data.string);
+ if (!strcmp(e->data.string,"SILENCE")) silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
+ else if (!strcmp(e->data.string,"DISABLE")) silencers->stype = STYPE_DISABLE_ALARMS;
+ } else {
+ debug(D_HEALTH, "JSON: Adding %s=%s", e->name, e->data.string);
+ if (e->callback_data)
+ (void)health_silencers_addparam(e->callback_data, e->name, e->data.string);
+ }
+ break;
+
+ case JSON_BOOLEAN:
+ debug(D_HEALTH, "JSON: Processing all_alarms");
+ silencers->all_alarms=e->data.boolean?1:0;
+ break;
+
+ case JSON_NUMBER:
+ case JSON_NULL:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Initialize Global Silencers
+ *
+ * Initialize the silencer for the whole netdata system.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+int health_initialize_global_silencers() {
+ silencers = mallocz(sizeof(SILENCERS));
+ silencers->all_alarms=0;
+ silencers->stype=STYPE_NONE;
+ silencers->silencers=NULL;
+
+ return 0;
+} \ No newline at end of file
diff --git a/libnetdata/health/health.h b/libnetdata/health/health.h
new file mode 100644
index 0000000..f7580ed
--- /dev/null
+++ b/libnetdata/health/health.h
@@ -0,0 +1,55 @@
+#ifndef NETDATA_HEALTH_LIB
+# define NETDATA_HEALTH_LIB 1
+
+# include "../libnetdata.h"
+
+#define HEALTH_ALARM_KEY "alarm"
+#define HEALTH_TEMPLATE_KEY "template"
+#define HEALTH_CONTEXT_KEY "context"
+#define HEALTH_CHART_KEY "chart"
+#define HEALTH_HOST_KEY "hosts"
+#define HEALTH_OS_KEY "os"
+#define HEALTH_FAMILIES_KEY "families"
+#define HEALTH_LOOKUP_KEY "lookup"
+#define HEALTH_CALC_KEY "calc"
+
+typedef struct silencer {
+ char *alarms;
+ SIMPLE_PATTERN *alarms_pattern;
+
+ char *hosts;
+ SIMPLE_PATTERN *hosts_pattern;
+
+ char *contexts;
+ SIMPLE_PATTERN *contexts_pattern;
+
+ char *charts;
+ SIMPLE_PATTERN *charts_pattern;
+
+ char *families;
+ SIMPLE_PATTERN *families_pattern;
+
+ struct silencer *next;
+} SILENCER;
+
+typedef enum silence_type {
+ STYPE_NONE,
+ STYPE_DISABLE_ALARMS,
+ STYPE_SILENCE_NOTIFICATIONS
+} SILENCE_TYPE;
+
+typedef struct silencers {
+ int all_alarms;
+ SILENCE_TYPE stype;
+ SILENCER *silencers;
+} SILENCERS;
+
+extern SILENCERS *silencers;
+
+extern SILENCER *create_silencer(void);
+extern int health_silencers_json_read_callback(JSON_ENTRY *e);
+extern void health_silencers_add(SILENCER *silencer);
+extern SILENCER * health_silencers_addparam(SILENCER *silencer, char *key, char *value);
+extern int health_initialize_global_silencers();
+
+#endif
diff --git a/libnetdata/inlined.h b/libnetdata/inlined.h
new file mode 100644
index 0000000..6f236b8
--- /dev/null
+++ b/libnetdata/inlined.h
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_INLINED_H
+#define NETDATA_INLINED_H 1
+
+#include "libnetdata.h"
+
+#ifdef KERNEL_32BIT
+typedef uint32_t kernel_uint_t;
+#define str2kernel_uint_t(string) str2uint32_t(string)
+#define KERNEL_UINT_FORMAT "%u"
+#else
+typedef uint64_t kernel_uint_t;
+#define str2kernel_uint_t(string) str2uint64_t(string)
+#define KERNEL_UINT_FORMAT "%" PRIu64
+#endif
+
+#define str2pid_t(string) str2uint32_t(string)
+
+
+// for faster execution, allow the compiler to inline
+// these functions that are called thousands of times per second
+
+static inline uint32_t simple_hash(const char *name) {
+ unsigned char *s = (unsigned char *) name;
+ uint32_t hval = 0x811c9dc5;
+ while (*s) {
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ return hval;
+}
+
+static inline uint32_t simple_uhash(const char *name) {
+ unsigned char *s = (unsigned char *) name;
+ uint32_t hval = 0x811c9dc5, c;
+ while ((c = *s++)) {
+ if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A';
+ hval *= 16777619;
+ hval ^= c;
+ }
+ return hval;
+}
+
+static inline int simple_hash_strcmp(const char *name, const char *b, uint32_t *hash) {
+ unsigned char *s = (unsigned char *) name;
+ uint32_t hval = 0x811c9dc5;
+ int ret = 0;
+ while (*s) {
+ if(!ret) ret = *s - *b++;
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ *hash = hval;
+ return ret;
+}
+
+static inline int str2i(const char *s) {
+ int n = 0;
+ char c, negative = (*s == '-');
+
+ for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+
+ if(unlikely(negative))
+ return -n;
+
+ return n;
+}
+
+static inline long str2l(const char *s) {
+ long n = 0;
+ char c, negative = (*s == '-');
+
+ for(c = (negative)?*(++s):*s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+
+ if(unlikely(negative))
+ return -n;
+
+ return n;
+}
+
+static inline uint32_t str2uint32_t(const char *s) {
+ uint32_t n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+static inline uint64_t str2uint64_t(const char *s) {
+ uint64_t n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+static inline unsigned long str2ul(const char *s) {
+ unsigned long n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+static inline unsigned long long str2ull(const char *s) {
+ unsigned long long n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+static inline long long str2ll(const char *s, char **endptr) {
+ int negative = 0;
+
+ if(unlikely(*s == '-')) {
+ s++;
+ negative = 1;
+ }
+ else if(unlikely(*s == '+'))
+ s++;
+
+ long long n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+
+ if(unlikely(endptr))
+ *endptr = (char *)s;
+
+ if(unlikely(negative))
+ return -n;
+ else
+ return n;
+}
+
+static inline long double str2ld(const char *s, char **endptr) {
+ int negative = 0;
+ const char *start = s;
+ unsigned long long integer_part = 0;
+ unsigned long decimal_part = 0;
+ size_t decimal_digits = 0;
+
+ switch(*s) {
+ case '-':
+ s++;
+ negative = 1;
+ break;
+
+ case '+':
+ s++;
+ break;
+
+ case 'n':
+ if(s[1] == 'a' && s[2] == 'n') {
+ if(endptr) *endptr = (char *)&s[3];
+ return NAN;
+ }
+ break;
+
+ case 'i':
+ if(s[1] == 'n' && s[2] == 'f') {
+ if(endptr) *endptr = (char *)&s[3];
+ return INFINITY;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ while (*s >= '0' && *s <= '9') {
+ integer_part = (integer_part * 10) + (*s - '0');
+ s++;
+ }
+
+ if(unlikely(*s == '.')) {
+ decimal_part = 0;
+ s++;
+
+ while (*s >= '0' && *s <= '9') {
+ decimal_part = (decimal_part * 10) + (*s - '0');
+ s++;
+ decimal_digits++;
+ }
+ }
+
+ if(unlikely(*s == 'e' || *s == 'E'))
+ return strtold(start, endptr);
+
+ if(unlikely(endptr))
+ *endptr = (char *)s;
+
+ if(unlikely(negative)) {
+ if(unlikely(decimal_digits))
+ return -((long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits));
+ else
+ return -((long double)integer_part);
+ }
+ else {
+ if(unlikely(decimal_digits))
+ return (long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits);
+ else
+ return (long double)integer_part;
+ }
+}
+
+#ifdef NETDATA_STRCMP_OVERRIDE
+#ifdef strcmp
+#undef strcmp
+#endif
+#define strcmp(a, b) strsame(a, b)
+#endif // NETDATA_STRCMP_OVERRIDE
+
+static inline int strsame(const char *a, const char *b) {
+ if(unlikely(a == b)) return 0;
+ while(*a && *a == *b) { a++; b++; }
+ return *a - *b;
+}
+
+static inline char *strncpyz(char *dst, const char *src, size_t n) {
+ char *p = dst;
+
+ while (*src && n--)
+ *dst++ = *src++;
+
+ *dst = '\0';
+
+ return p;
+}
+
+static inline void sanitize_json_string(char *dst, char *src, size_t len) {
+ while (*src != '\0' && len > 1) {
+ if (*src == '\\' || *src == '\"' || *src < 0x1F) {
+ if (*src < 0x1F) {
+ *dst++ = '_';
+ src++;
+ len--;
+ } else {
+ *dst++ = '\\';
+ *dst++ = *src++;
+ len -= 2;
+ }
+ } else {
+ *dst++ = *src++;
+ len--;
+ }
+ }
+ *dst = '\0';
+}
+
+static inline int read_file(const char *filename, char *buffer, size_t size) {
+ if(unlikely(!size)) return 3;
+
+ int fd = open(filename, O_RDONLY, 0666);
+ if(unlikely(fd == -1)) {
+ buffer[0] = '\0';
+ return 1;
+ }
+
+ ssize_t r = read(fd, buffer, size);
+ if(unlikely(r == -1)) {
+ buffer[0] = '\0';
+ close(fd);
+ return 2;
+ }
+ buffer[r] = '\0';
+
+ close(fd);
+ return 0;
+}
+
+static inline int read_single_number_file(const char *filename, unsigned long long *result) {
+ char buffer[30 + 1];
+
+ int ret = read_file(filename, buffer, 30);
+ if(unlikely(ret)) {
+ *result = 0;
+ return ret;
+ }
+
+ buffer[30] = '\0';
+ *result = str2ull(buffer);
+ return 0;
+}
+
+static inline int read_single_signed_number_file(const char *filename, long long *result) {
+ char buffer[30 + 1];
+
+ int ret = read_file(filename, buffer, 30);
+ if(unlikely(ret)) {
+ *result = 0;
+ return ret;
+ }
+
+ buffer[30] = '\0';
+ *result = atoll(buffer);
+ return 0;
+}
+
+#endif //NETDATA_INLINED_H
diff --git a/libnetdata/json/Makefile.am b/libnetdata/json/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/json/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/json/README.md b/libnetdata/json/README.md
new file mode 100644
index 0000000..79a48e2
--- /dev/null
+++ b/libnetdata/json/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "json"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/json/README.md
+-->
+
+# json
+
+`json` contains a parser for json strings, based on `jsmn` (<https://github.com/zserge/jsmn>), but case you have installed the JSON-C library, the installation script will prefer it, you can also force its use with `--enable-jsonc` in the compilation time.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/json/jsmn.c b/libnetdata/json/jsmn.c
new file mode 100644
index 0000000..9525358
--- /dev/null
+++ b/libnetdata/json/jsmn.c
@@ -0,0 +1,328 @@
+#include <stdlib.h>
+
+#include "jsmn.h"
+
+/**
+ * Alloc token
+ *
+ * Allocates a fresh unused token from the token pull.
+ *
+ * @param parser the controller
+ * @param tokens the tokens I am working
+ * @param num_tokens the number total of tokens.
+ *
+ * @return it returns the next token to work.
+ */
+static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser,
+ jsmntok_t *tokens, size_t num_tokens) {
+ jsmntok_t *tok;
+ if (parser->toknext >= num_tokens) {
+ return NULL;
+ }
+ tok = &tokens[parser->toknext++];
+ tok->start = tok->end = -1;
+ tok->size = 0;
+#ifdef JSMN_PARENT_LINKS
+ tok->parent = -1;
+#endif
+ return tok;
+}
+
+/**
+ * Fill Token
+ *
+ * Fills token type and boundaries.
+ *
+ * @param token the structure to set the values
+ * @param type is the token type
+ * @param start is the first position of the value
+ * @param end is the end of the value
+ */
+static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type,
+ int start, int end) {
+ token->type = type;
+ token->start = start;
+ token->end = end;
+ token->size = 0;
+}
+
+/**
+ * Parse primitive
+ *
+ * Fills next available token with JSON primitive.
+ *
+ * @param parser is the control structure
+ * @param js is the json string
+ * @param type is the token type
+ */
+static jsmnerr_t jsmn_parse_primitive(jsmn_parser *parser, const char *js,
+ size_t len, jsmntok_t *tokens, size_t num_tokens) {
+ jsmntok_t *token;
+ int start;
+
+ start = parser->pos;
+
+ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
+ switch (js[parser->pos]) {
+#ifndef JSMN_STRICT
+ /* In strict mode primitive must be followed by "," or "}" or "]" */
+ case ':':
+#endif
+ case '\t' : case '\r' : case '\n' : case ' ' :
+ case ',' : case ']' : case '}' :
+ goto found;
+ }
+ if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
+ parser->pos = start;
+ return JSMN_ERROR_INVAL;
+ }
+ }
+#ifdef JSMN_STRICT
+ /* In strict mode primitive must be followed by a comma/object/array */
+ parser->pos = start;
+ return JSMN_ERROR_PART;
+#endif
+
+ found:
+ if (tokens == NULL) {
+ parser->pos--;
+ return 0;
+ }
+ token = jsmn_alloc_token(parser, tokens, num_tokens);
+ if (token == NULL) {
+ parser->pos = start;
+ return JSMN_ERROR_NOMEM;
+ }
+ jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
+#ifdef JSMN_PARENT_LINKS
+ token->parent = parser->toksuper;
+#endif
+ parser->pos--;
+ return 0;
+}
+
+/**
+ * Parse string
+ *
+ * Fills next token with JSON string.
+ *
+ * @param parser is the control structure
+ * @param js is the json string
+ * @param len is the js length
+ * @param tokens is structure with the tokens mapped.
+ * @param num_tokens is the total number of tokens
+ *
+ * @return It returns 0 on success and another integer otherwise
+ */
+static jsmnerr_t jsmn_parse_string(jsmn_parser *parser, const char *js,
+ size_t len, jsmntok_t *tokens, size_t num_tokens) {
+ jsmntok_t *token;
+
+ int start = parser->pos;
+
+ parser->pos++;
+
+ /* Skip starting quote */
+ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
+ char c = js[parser->pos];
+
+ /* Quote: end of string */
+ if (c == '\"') {
+ if (tokens == NULL) {
+ return 0;
+ }
+ token = jsmn_alloc_token(parser, tokens, num_tokens);
+ if (token == NULL) {
+ parser->pos = start;
+ return JSMN_ERROR_NOMEM;
+ }
+ jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos);
+#ifdef JSMN_PARENT_LINKS
+ token->parent = parser->toksuper;
+#endif
+ return 0;
+ }
+
+ /* Backslash: Quoted symbol expected */
+ if (c == '\\') {
+ parser->pos++;
+ switch (js[parser->pos]) {
+ /* Allowed escaped symbols */
+ case '\"': case '/' : case '\\' : case 'b' :
+ case 'f' : case 'r' : case 'n' : case 't' :
+ break;
+ /* Allows escaped symbol \uXXXX */
+ case 'u':
+ parser->pos++;
+ int i = 0;
+ for(; i < 4 && js[parser->pos] != '\0'; i++) {
+ /* If it isn't a hex character we have an error */
+ if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */
+ (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */
+ (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */
+ parser->pos = start;
+ return JSMN_ERROR_INVAL;
+ }
+ parser->pos++;
+ }
+ parser->pos--;
+ break;
+ /* Unexpected symbol */
+ default:
+ parser->pos = start;
+ return JSMN_ERROR_INVAL;
+ }
+ }
+ }
+ parser->pos = start;
+ return JSMN_ERROR_PART;
+}
+
+/**
+ * JSMN Parse
+ *
+ * Parse JSON string and fill tokens.
+ *
+ * @param parser the auxiliar vector used to parser
+ * @param js the string to parse
+ * @param len the string length
+ * @param tokens the place to map the tokens
+ * @param num_tokens the number of tokens present in the tokens structure.
+ *
+ * @return It returns the number of tokens present in the string on success or a negative number otherwise
+ */
+jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
+ jsmntok_t *tokens, unsigned int num_tokens) {
+ jsmnerr_t r;
+ int i;
+ jsmntok_t *token;
+ int count = 0;
+
+ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
+ char c;
+ jsmntype_t type;
+
+ c = js[parser->pos];
+ switch (c) {
+ case '{': case '[':
+ count++;
+ if (tokens == NULL) {
+ break;
+ }
+ token = jsmn_alloc_token(parser, tokens, num_tokens);
+ if (token == NULL)
+ return JSMN_ERROR_NOMEM;
+ if (parser->toksuper != -1) {
+ tokens[parser->toksuper].size++;
+#ifdef JSMN_PARENT_LINKS
+ token->parent = parser->toksuper;
+#endif
+ }
+ token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
+ token->start = parser->pos;
+ parser->toksuper = parser->toknext - 1;
+ break;
+ case '}': case ']':
+ if (tokens == NULL)
+ break;
+ type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
+#ifdef JSMN_PARENT_LINKS
+ if (parser->toknext < 1) {
+ return JSMN_ERROR_INVAL;
+ }
+ token = &tokens[parser->toknext - 1];
+ for (;;) {
+ if (token->start != -1 && token->end == -1) {
+ if (token->type != type) {
+ return JSMN_ERROR_INVAL;
+ }
+ token->end = parser->pos + 1;
+ parser->toksuper = token->parent;
+ break;
+ }
+ if (token->parent == -1) {
+ break;
+ }
+ token = &tokens[token->parent];
+ }
+#else
+ for (i = parser->toknext - 1; i >= 0; i--) {
+ token = &tokens[i];
+ if (token->start != -1 && token->end == -1) {
+ if (token->type != type) {
+ return JSMN_ERROR_INVAL;
+ }
+ parser->toksuper = -1;
+ token->end = parser->pos + 1;
+ break;
+ }
+ }
+ /* Error if unmatched closing bracket */
+ if (i == -1) return JSMN_ERROR_INVAL;
+ for (; i >= 0; i--) {
+ token = &tokens[i];
+ if (token->start != -1 && token->end == -1) {
+ parser->toksuper = i;
+ break;
+ }
+ }
+#endif
+ break;
+ case '\"':
+ r = jsmn_parse_string(parser, js, len, tokens, num_tokens);
+ if (r < 0) return r;
+ count++;
+ if (parser->toksuper != -1 && tokens != NULL)
+ tokens[parser->toksuper].size++;
+ break;
+ case '\t' : case '\r' : case '\n' : case ':' : case ',': case ' ':
+ break;
+#ifdef JSMN_STRICT
+ /* In strict mode primitives are: numbers and booleans */
+ case '-': case '0': case '1' : case '2': case '3' : case '4':
+ case '5': case '6': case '7' : case '8': case '9':
+ case 't': case 'f': case 'n' :
+#else
+ /* In non-strict mode every unquoted value is a primitive */
+ default:
+#endif
+ r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens);
+ if (r < 0) return r;
+ count++;
+ if (parser->toksuper != -1 && tokens != NULL)
+ tokens[parser->toksuper].size++;
+ break;
+
+#ifdef JSMN_STRICT
+ /* Unexpected char in strict mode */
+ default:
+ return JSMN_ERROR_INVAL;
+#endif
+ }
+ }
+
+ if (tokens) {
+ for (i = parser->toknext - 1; i >= 0; i--) {
+ /* Unmatched opened object or array */
+ if (tokens[i].start != -1 && tokens[i].end == -1) {
+ return JSMN_ERROR_PART;
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * JSMN Init
+ *
+ * Creates a new parser based over a given buffer with an array of tokens
+ * available.
+ *
+ * @param parser is the structure with values to reset
+ */
+void jsmn_init(jsmn_parser *parser) {
+ parser->pos = 0;
+ parser->toknext = 0;
+ parser->toksuper = -1;
+} \ No newline at end of file
diff --git a/libnetdata/json/jsmn.h b/libnetdata/json/jsmn.h
new file mode 100644
index 0000000..beff586
--- /dev/null
+++ b/libnetdata/json/jsmn.h
@@ -0,0 +1,75 @@
+#ifndef __JSMN_H_
+#define __JSMN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+/**
+ * JSON type identifier. Basic types are:
+ * o Object
+ * o Array
+ * o String
+ * o Other primitive: number, boolean (true/false) or null
+ */
+typedef enum {
+ JSMN_PRIMITIVE = 0,
+ JSMN_OBJECT = 1,
+ JSMN_ARRAY = 2,
+ JSMN_STRING = 3
+} jsmntype_t;
+
+typedef enum {
+ /* Not enough tokens were provided */
+ JSMN_ERROR_NOMEM = -1,
+ /* Invalid character inside JSON string */
+ JSMN_ERROR_INVAL = -2,
+ /* The string is not a full JSON packet, more bytes expected */
+ JSMN_ERROR_PART = -3,
+} jsmnerr_t;
+
+/**
+ * JSON token description.
+ *
+ * @param type type (object, array, string etc.)
+ * @param start start position in JSON data string
+ * @param end end position in JSON data string
+ */
+typedef struct {
+ jsmntype_t type;
+ int start;
+ int end;
+ int size;
+#ifdef JSMN_PARENT_LINKS
+ int parent;
+#endif
+} jsmntok_t;
+
+/**
+ * JSON parser. Contains an array of token blocks available. Also stores
+ * the string being parsed now and current position in that string
+ */
+typedef struct {
+ unsigned int pos; /* offset in the JSON string */
+ unsigned int toknext; /* next token to allocate */
+ int toksuper; /* superior token node, e.g parent object or array */
+} jsmn_parser;
+
+/**
+ * Create JSON parser over an array of tokens
+ */
+void jsmn_init(jsmn_parser *parser);
+
+/**
+ * Run JSON parser. It parses a JSON data string into and array of tokens, each describing
+ * a single JSON object.
+ */
+jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
+ jsmntok_t *tokens, unsigned int num_tokens);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JSMN_H_ */ \ No newline at end of file
diff --git a/libnetdata/json/json.c b/libnetdata/json/json.c
new file mode 100644
index 0000000..bd164ae
--- /dev/null
+++ b/libnetdata/json/json.c
@@ -0,0 +1,556 @@
+#include "jsmn.h"
+#include "../libnetdata.h"
+#include "json.h"
+#include "libnetdata/libnetdata.h"
+#include "../../health/health.h"
+
+#define JSON_TOKENS 1024
+
+int json_tokens = JSON_TOKENS;
+
+/**
+ * Json Tokenise
+ *
+ * Map the string given inside tokens.
+ *
+ * @param js is the string used to create the tokens
+ * @param len is the string length
+ * @param count the number of tokens present in the string
+ *
+ * @return it returns the json parsed in tokens
+ */
+#ifdef ENABLE_JSONC
+json_object *json_tokenise(char *js) {
+ if(!js) {
+ error("JSON: json string is empty.");
+ return NULL;
+ }
+
+ json_object *token = json_tokener_parse(js);
+ if(!token) {
+ error("JSON: Invalid json string.");
+ return NULL;
+ }
+
+ return token;
+}
+#else
+jsmntok_t *json_tokenise(char *js, size_t len, size_t *count)
+{
+ int n = json_tokens;
+ if(!js || !len) {
+ error("JSON: json string is empty.");
+ return NULL;
+ }
+
+ jsmn_parser parser;
+ jsmn_init(&parser);
+
+ jsmntok_t *tokens = mallocz(sizeof(jsmntok_t) * n);
+ if(!tokens) return NULL;
+
+ int ret = jsmn_parse(&parser, js, len, tokens, n);
+ while (ret == JSMN_ERROR_NOMEM) {
+ n *= 2;
+ jsmntok_t *new = reallocz(tokens, sizeof(jsmntok_t) * n);
+ if(!new) {
+ freez(tokens);
+ return NULL;
+ }
+ tokens = new;
+ ret = jsmn_parse(&parser, js, len, tokens, n);
+ }
+
+ if (ret == JSMN_ERROR_INVAL) {
+ error("JSON: Invalid json string.");
+ freez(tokens);
+ return NULL;
+ }
+ else if (ret == JSMN_ERROR_PART) {
+ error("JSON: Truncated JSON string.");
+ freez(tokens);
+ return NULL;
+ }
+
+ if(count) *count = (size_t)ret;
+
+ if(json_tokens < n) json_tokens = n;
+ return tokens;
+}
+#endif
+
+/**
+ * Callback Print
+ *
+ * Set callback print case necesary and wrinte an information inside a buffer to write in the log.
+ *
+ * @param e a pointer for a structure that has the complete information about json structure.
+ *
+ * @return It always return 0
+ */
+int json_callback_print(JSON_ENTRY *e)
+{
+ BUFFER *wb=buffer_create(300);
+
+ buffer_sprintf(wb,"%s = ", e->name);
+ char txt[50];
+ switch(e->type) {
+ case JSON_OBJECT:
+ e->callback_function = json_callback_print;
+ buffer_strcat(wb,"OBJECT");
+ break;
+
+ case JSON_ARRAY:
+ e->callback_function = json_callback_print;
+ sprintf(txt,"ARRAY[%lu]", e->data.items);
+ buffer_strcat(wb, txt);
+ break;
+
+ case JSON_STRING:
+ buffer_strcat(wb, e->data.string);
+ break;
+
+ case JSON_NUMBER:
+ sprintf(txt,"%Lf", e->data.number);
+ buffer_strcat(wb,txt);
+
+ break;
+
+ case JSON_BOOLEAN:
+ buffer_strcat(wb, e->data.boolean?"TRUE":"FALSE");
+ break;
+
+ case JSON_NULL:
+ buffer_strcat(wb,"NULL");
+ break;
+ }
+ info("JSON: %s", buffer_tostring(wb));
+ buffer_free(wb);
+ return 0;
+}
+
+/**
+ * JSONC Set String
+ *
+ * Set the string value of the structure JSON_ENTRY.
+ *
+ * @param e the output structure
+ */
+static inline void json_jsonc_set_string(JSON_ENTRY *e,char *key,const char *value) {
+ size_t len = strlen(key);
+ if(len > JSON_NAME_LEN)
+ len = JSON_NAME_LEN;
+ e->type = JSON_STRING;
+ memcpy(e->name,key,len);
+ e->name[len] = 0x00;
+ e->data.string = (char *) value;
+}
+
+
+#ifdef ENABLE_JSONC
+/**
+ * JSONC set Boolean
+ *
+ * Set the boolean value of the structure JSON_ENTRY
+ *
+ * @param e the output structure
+ * @param value the input value
+ */
+static inline void json_jsonc_set_boolean(JSON_ENTRY *e,int value) {
+ e->type = JSON_BOOLEAN;
+ e->data.boolean = value;
+}
+
+static inline void json_jsonc_set_integer(JSON_ENTRY *e, char *key, int64_t value) {
+ size_t len = strlen(key);
+ if(len > JSON_NAME_LEN)
+ len = JSON_NAME_LEN;
+ e->type = JSON_NUMBER;
+ memcpy(e->name, key, len);
+ e->name[len] = 0;
+ e->data.number = value;
+}
+
+/**
+ * Parse Array
+ *
+ * Parse the array object.
+ *
+ * @param ptr the pointer for the object that we will parse.
+ * @param callback_data additional data to be used together the callback function
+ * @param callback_function function used to create a silencer.
+ */
+static inline void json_jsonc_parse_array(json_object *ptr, void *callback_data,int (*callback_function)(struct json_entry *)) {
+ int end = json_object_array_length(ptr);
+ JSON_ENTRY e;
+
+ if(end) {
+ int i;
+ i = 0;
+
+ enum json_type type;
+ do {
+ json_object *jvalue = json_object_array_get_idx(ptr, i);
+ if(jvalue) {
+ e.callback_data = callback_data;
+ e.type = JSON_OBJECT;
+ callback_function(&e);
+ json_object_object_foreach(jvalue, key, val) {
+ type = json_object_get_type(val);
+ if (type == json_type_array) {
+ e.type = JSON_ARRAY;
+ json_jsonc_parse_array(val, callback_data, callback_function);
+ } else if (type == json_type_object) {
+ json_walk(val,callback_data,callback_function);
+ } else if (type == json_type_string) {
+ json_jsonc_set_string(&e,key,json_object_get_string(val));
+ callback_function(&e);
+ } else if (type == json_type_boolean) {
+ json_jsonc_set_boolean(&e,json_object_get_boolean(val));
+ callback_function(&e);
+ }
+ }
+ }
+
+ } while (++i < end);
+ }
+}
+#else
+
+/**
+ * Walk string
+ *
+ * Set JSON_ENTRY to string and map the values from jsmntok_t.
+ *
+ * @param js the original string
+ * @param t the tokens
+ * @param start the first position
+ * @param e the output structure.
+ *
+ * @return It always return 1
+ */
+size_t json_walk_string(char *js, jsmntok_t *t, size_t start, JSON_ENTRY *e)
+{
+ char old = js[t[start].end];
+ js[t[start].end] = '\0';
+ e->original_string = &js[t[start].start];
+
+ e->type = JSON_STRING;
+ e->data.string = e->original_string;
+ if(e->callback_function) e->callback_function(e);
+ js[t[start].end] = old;
+ return 1;
+}
+
+/**
+ * Walk Primitive
+ *
+ * Define the data type of the string
+ *
+ * @param js the original string
+ * @param t the tokens
+ * @param start the first position
+ * @param e the output structure.
+ *
+ * @return It always return 1
+ */
+size_t json_walk_primitive(char *js, jsmntok_t *t, size_t start, JSON_ENTRY *e)
+{
+ char old = js[t[start].end];
+ js[t[start].end] = '\0';
+ e->original_string = &js[t[start].start];
+
+ switch(e->original_string[0]) {
+ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7':
+ case '8': case '9': case '-': case '.':
+ e->type = JSON_NUMBER;
+ e->data.number = strtold(e->original_string, NULL);
+ break;
+
+ case 't': case 'T':
+ e->type = JSON_BOOLEAN;
+ e->data.boolean = 1;
+ break;
+
+ case 'f': case 'F':
+ e->type = JSON_BOOLEAN;
+ e->data.boolean = 0;
+ break;
+
+ case 'n': case 'N':
+ default:
+ e->type = JSON_NULL;
+ break;
+ }
+ if(e->callback_function) e->callback_function(e);
+ js[t[start].end] = old;
+ return 1;
+}
+
+/**
+ * Array
+ *
+ * Measure the array length
+ *
+ * @param js the original string
+ * @param t the tokens
+ * @param nest the length of structure t
+ * @param start the first position
+ * @param e the structure with values and callback to be used inside the function.
+ *
+ * @return It returns the array length
+ */
+size_t json_walk_array(char *js, jsmntok_t *t, size_t nest, size_t start, JSON_ENTRY *e)
+{
+ JSON_ENTRY ne;
+
+ char old = js[t[start].end];
+ js[t[start].end] = '\0';
+ ne.original_string = &js[t[start].start];
+
+ memcpy(&ne, e, sizeof(JSON_ENTRY));
+ ne.type = JSON_ARRAY;
+ ne.data.items = t[start].size;
+ ne.callback_function = e->callback_function;
+ ne.name[0]='\0';
+ ne.fullname[0]='\0';
+ if(e->callback_function) e->callback_function(&ne);
+ js[t[start].end] = old;
+
+ size_t i, init = start, size = t[start].size;
+
+ start++;
+ for(i = 0; i < size ; i++) {
+ ne.pos = i;
+ if (strlen(e->name) > JSON_NAME_LEN - 24 || strlen(e->fullname) > JSON_FULLNAME_LEN -24) {
+ info("JSON: JSON walk_array ignoring element with name:%s fullname:%s",e->name, e->fullname);
+ continue;
+ }
+ snprintfz(ne.name, JSON_NAME_LEN, "%s[%lu]", e->name, i);
+ snprintfz(ne.fullname, JSON_FULLNAME_LEN, "%s[%lu]", e->fullname, i);
+
+ switch(t[start].type) {
+ case JSMN_PRIMITIVE:
+ start += json_walk_primitive(js, t, start, &ne);
+ break;
+
+ case JSMN_OBJECT:
+ start += json_walk_object(js, t, nest + 1, start, &ne);
+ break;
+
+ case JSMN_ARRAY:
+ start += json_walk_array(js, t, nest + 1, start, &ne);
+ break;
+
+ case JSMN_STRING:
+ start += json_walk_string(js, t, start, &ne);
+ break;
+ }
+ }
+ return start - init;
+}
+
+/**
+ * Object
+ *
+ * Measure the Object length
+ *
+ * @param js the original string
+ * @param t the tokens
+ * @param nest the length of structure t
+ * @param start the first position
+ * @param e the output structure.
+ *
+ * @return It returns the Object length
+ */
+size_t json_walk_object(char *js, jsmntok_t *t, size_t nest, size_t start, JSON_ENTRY *e)
+{
+ JSON_ENTRY ne = {
+ .name = "",
+ .fullname = "",
+ .callback_data = NULL,
+ .callback_function = NULL
+ };
+
+ char old = js[t[start].end];
+ js[t[start].end] = '\0';
+ ne.original_string = &js[t[start].start];
+ memcpy(&ne, e, sizeof(JSON_ENTRY));
+ ne.type = JSON_OBJECT;
+ ne.callback_function = e->callback_function;
+ if(e->callback_function) e->callback_function(&ne);
+ js[t[start].end] = old;
+
+ int key = 1;
+ size_t i, init = start, size = t[start].size;
+
+ start++;
+ for(i = 0; i < size ; i++) {
+ switch(t[start].type) {
+ case JSMN_PRIMITIVE:
+ start += json_walk_primitive(js, t, start, &ne);
+ key = 1;
+ break;
+
+ case JSMN_OBJECT:
+ start += json_walk_object(js, t, nest + 1, start, &ne);
+ key = 1;
+ break;
+
+ case JSMN_ARRAY:
+ start += json_walk_array(js, t, nest + 1, start, &ne);
+ key = 1;
+ break;
+
+ case JSMN_STRING:
+ default:
+ if(key) {
+ int len = t[start].end - t[start].start;
+ if (unlikely(len>JSON_NAME_LEN)) len=JSON_NAME_LEN;
+ strncpy(ne.name, &js[t[start].start], len);
+ ne.name[len] = '\0';
+ len=strlen(e->fullname) + strlen(e->fullname[0]?".":"") + strlen(ne.name);
+ char *c = mallocz((len+1)*sizeof(char));
+ sprintf(c,"%s%s%s", e->fullname, e->fullname[0]?".":"", ne.name);
+ if (unlikely(len>JSON_FULLNAME_LEN)) len=JSON_FULLNAME_LEN;
+ strncpy(ne.fullname, c, len);
+ freez(c);
+ start++;
+ key = 0;
+ }
+ else {
+ start += json_walk_string(js, t, start, &ne);
+ key = 1;
+ }
+ break;
+ }
+ }
+ return start - init;
+}
+#endif
+
+/**
+ * Tree
+ *
+ * Call the correct walk function according its type.
+ *
+ * @param t the json object to work
+ * @param callback_data additional data to be used together the callback function
+ * @param callback_function function used to create a silencer.
+ *
+ * @return It always return 1
+ */
+#ifdef ENABLE_JSONC
+size_t json_walk(json_object *t, void *callback_data, int (*callback_function)(struct json_entry *)) {
+ JSON_ENTRY e;
+
+ e.callback_data = callback_data;
+ enum json_type type;
+ json_object_object_foreach(t, key, val) {
+ type = json_object_get_type(val);
+ if (type == json_type_array) {
+ e.type = JSON_ARRAY;
+ json_jsonc_parse_array(val,NULL,health_silencers_json_read_callback);
+ } else if (type == json_type_object) {
+ e.type = JSON_OBJECT;
+ } else if (type == json_type_string) {
+ json_jsonc_set_string(&e,key,json_object_get_string(val));
+ callback_function(&e);
+ } else if (type == json_type_boolean) {
+ json_jsonc_set_boolean(&e,json_object_get_boolean(val));
+ callback_function(&e);
+ } else if (type == json_type_int) {
+ json_jsonc_set_integer(&e,key,json_object_get_int64(val));
+ callback_function(&e);
+ }
+ }
+
+ return 1;
+}
+#else
+/**
+ * Tree
+ *
+ * Call the correct walk function according its type.
+ *
+ * @param js the original string
+ * @param t the tokens
+ * @param callback_data additional data to be used together the callback function
+ * @param callback_function function used to create a silencer.
+ *
+ * @return It always return 1
+ */
+size_t json_walk_tree(char *js, jsmntok_t *t, void *callback_data, int (*callback_function)(struct json_entry *))
+{
+ JSON_ENTRY e = {
+ .name = "",
+ .fullname = "",
+ .callback_data = callback_data,
+ .callback_function = callback_function
+ };
+
+ switch (t[0].type) {
+ case JSMN_OBJECT:
+ e.type = JSON_OBJECT;
+ json_walk_object(js, t, 0, 0, &e);
+ break;
+
+ case JSMN_ARRAY:
+ e.type = JSON_ARRAY;
+ json_walk_array(js, t, 0, 0, &e);
+ break;
+
+ case JSMN_PRIMITIVE:
+ case JSMN_STRING:
+ break;
+ }
+
+ return 1;
+}
+#endif
+
+/**
+ * JSON Parse
+ *
+ * Parse the json message with the callback function
+ *
+ * @param js the string that the callback function will parse
+ * @param callback_data additional data to be used together the callback function
+ * @param callback_function function used to create a silencer.
+ *
+ * @return JSON_OK case everything happend as expected, JSON_CANNOT_PARSE case there were errors in the
+ * parsing procces and JSON_CANNOT_DOWNLOAD case the string given(js) is NULL.
+ */
+int json_parse(char *js, void *callback_data, int (*callback_function)(JSON_ENTRY *))
+{
+ if(js) {
+#ifdef ENABLE_JSONC
+ json_object *tokens = json_tokenise(js);
+#else
+ size_t count;
+ jsmntok_t *tokens = json_tokenise(js, strlen(js), &count);
+#endif
+
+ if(tokens) {
+#ifdef ENABLE_JSONC
+ json_walk(tokens, callback_data, callback_function);
+ json_object_put(tokens);
+#else
+ json_walk_tree(js, tokens, callback_data, callback_function);
+ freez(tokens);
+#endif
+ return JSON_OK;
+ }
+
+ return JSON_CANNOT_PARSE;
+ }
+
+ return JSON_CANNOT_DOWNLOAD;
+}
+
+/*
+int json_test(char *str)
+{
+ return json_parse(str, NULL, json_callback_print);
+}
+ */ \ No newline at end of file
diff --git a/libnetdata/json/json.h b/libnetdata/json/json.h
new file mode 100644
index 0000000..79b58b1
--- /dev/null
+++ b/libnetdata/json/json.h
@@ -0,0 +1,72 @@
+#ifndef CHECKIN_JSON_H
+#define CHECKIN_JSON_H 1
+
+
+#if ENABLE_JSONC
+# include <json-c/json.h>
+#endif
+
+#include "jsmn.h"
+
+//https://www.ibm.com/support/knowledgecenter/en/SS9H2Y_7.6.0/com.ibm.dp.doc/json_parserlimits.html
+#define JSON_NAME_LEN 256
+#define JSON_FULLNAME_LEN 1024
+
+typedef enum {
+ JSON_OBJECT = 0,
+ JSON_ARRAY = 1,
+ JSON_STRING = 2,
+ JSON_NUMBER = 3,
+ JSON_BOOLEAN = 4,
+ JSON_NULL = 5,
+} JSON_ENTRY_TYPE;
+
+typedef struct json_entry {
+ JSON_ENTRY_TYPE type;
+ char name[JSON_NAME_LEN + 1];
+ char fullname[JSON_FULLNAME_LEN + 1];
+ union {
+ char *string; // type == JSON_STRING
+ long double number; // type == JSON_NUMBER
+ int boolean; // type == JSON_BOOLEAN
+ size_t items; // type == JSON_ARRAY
+ } data;
+ size_t pos; // the position of this item in its parent
+
+ char *original_string;
+
+ void *callback_data;
+ int (*callback_function)(struct json_entry *);
+} JSON_ENTRY;
+
+// ----------------------------------------------------------------------------
+// public functions
+
+#define JSON_OK 0
+#define JSON_CANNOT_DOWNLOAD 1
+#define JSON_CANNOT_PARSE 2
+
+int json_parse(char *js, void *callback_data, int (*callback_function)(JSON_ENTRY *));
+
+
+// ----------------------------------------------------------------------------
+// private functions
+
+#ifdef ENABLE_JSONC
+json_object *json_tokenise(char *js);
+size_t json_walk(json_object *t, void *callback_data, int (*callback_function)(struct json_entry *));
+#else
+jsmntok_t *json_tokenise(char *js, size_t len, size_t *count);
+size_t json_walk_tree(char *js, jsmntok_t *t, void *callback_data, int (*callback_function)(struct json_entry *));
+#endif
+
+size_t json_walk_object(char *js, jsmntok_t *t, size_t nest, size_t start, JSON_ENTRY *e);
+size_t json_walk_array(char *js, jsmntok_t *t, size_t nest, size_t start, JSON_ENTRY *e);
+size_t json_walk_string(char *js, jsmntok_t *t, size_t start, JSON_ENTRY *e);
+size_t json_walk_primitive(char *js, jsmntok_t *t, size_t start, JSON_ENTRY *e);
+
+int json_callback_print(JSON_ENTRY *e);
+
+
+
+#endif \ No newline at end of file
diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c
new file mode 100644
index 0000000..325df3f
--- /dev/null
+++ b/libnetdata/libnetdata.c
@@ -0,0 +1,1493 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata.h"
+
+#ifdef __APPLE__
+#define INHERIT_NONE 0
+#endif /* __APPLE__ */
+#if defined(__FreeBSD__) || defined(__APPLE__)
+# define O_NOATIME 0
+# define MADV_DONTFORK INHERIT_NONE
+#endif /* __FreeBSD__ || __APPLE__*/
+
+struct rlimit rlimit_nofile = { .rlim_cur = 1024, .rlim_max = 1024 };
+int enable_ksm = 1;
+
+volatile sig_atomic_t netdata_exit = 0;
+const char *os_type = NETDATA_OS_TYPE;
+const char *program_version = VERSION;
+
+// ----------------------------------------------------------------------------
+// memory allocation functions that handle failures
+
+// although netdata does not use memory allocations too often (netdata tries to
+// maintain its memory footprint stable during runtime, i.e. all buffers are
+// allocated during initialization and are adapted to current use throughout
+// its lifetime), these can be used to override the default system allocation
+// routines.
+
+#ifdef NETDATA_LOG_ALLOCATIONS
+static __thread struct memory_statistics {
+ volatile ssize_t malloc_calls_made;
+ volatile ssize_t calloc_calls_made;
+ volatile ssize_t realloc_calls_made;
+ volatile ssize_t strdup_calls_made;
+ volatile ssize_t free_calls_made;
+ volatile ssize_t memory_calls_made;
+ volatile ssize_t allocated_memory;
+ volatile ssize_t mmapped_memory;
+} memory_statistics = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+__thread size_t log_thread_memory_allocations = 0;
+
+static inline void print_allocations(const char *file, const char *function, const unsigned long line, const char *type, size_t size) {
+ static __thread struct memory_statistics old = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ fprintf(stderr, "%s iteration %zu MEMORY TRACE: %lu@%s : %s : %s : %zu\n",
+ netdata_thread_tag(),
+ log_thread_memory_allocations,
+ line, file, function,
+ type, size
+ );
+
+ fprintf(stderr, "%s iteration %zu MEMORY ALLOCATIONS: (%04lu@%-40.40s:%-40.40s): Allocated %zd KiB (%+zd B), mmapped %zd KiB (%+zd B): %s : malloc %zd (%+zd), calloc %zd (%+zd), realloc %zd (%+zd), strdup %zd (%+zd), free %zd (%+zd)\n",
+ netdata_thread_tag(),
+ log_thread_memory_allocations,
+ line, file, function,
+ (memory_statistics.allocated_memory + 512) / 1024, memory_statistics.allocated_memory - old.allocated_memory,
+ (memory_statistics.mmapped_memory + 512) / 1024, memory_statistics.mmapped_memory - old.mmapped_memory,
+ type,
+ memory_statistics.malloc_calls_made, memory_statistics.malloc_calls_made - old.malloc_calls_made,
+ memory_statistics.calloc_calls_made, memory_statistics.calloc_calls_made - old.calloc_calls_made,
+ memory_statistics.realloc_calls_made, memory_statistics.realloc_calls_made - old.realloc_calls_made,
+ memory_statistics.strdup_calls_made, memory_statistics.strdup_calls_made - old.strdup_calls_made,
+ memory_statistics.free_calls_made, memory_statistics.free_calls_made - old.free_calls_made
+ );
+
+ memcpy(&old, &memory_statistics, sizeof(struct memory_statistics));
+}
+
+static inline void mmap_accounting(size_t size) {
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.mmapped_memory += size;
+ }
+}
+
+void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size) {
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.malloc_calls_made++;
+ memory_statistics.allocated_memory += size;
+ print_allocations(file, function, line, "malloc()", size);
+ }
+
+ size_t *n = (size_t *)malloc(sizeof(size_t) + size);
+ if (unlikely(!n)) fatal("mallocz() cannot allocate %zu bytes of memory.", size);
+ *n = size;
+ return (void *)&n[1];
+}
+
+void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size) {
+ size = nmemb * size;
+
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.calloc_calls_made++;
+ memory_statistics.allocated_memory += size;
+ print_allocations(file, function, line, "calloc()", size);
+ }
+
+ size_t *n = (size_t *)calloc(1, sizeof(size_t) + size);
+ if (unlikely(!n)) fatal("callocz() cannot allocate %zu bytes of memory.", size);
+ *n = size;
+ return (void *)&n[1];
+}
+
+void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size) {
+ if(!ptr) return mallocz_int(file, function, line, size);
+
+ size_t *n = (size_t *)ptr;
+ n--;
+ size_t old_size = *n;
+
+ n = realloc(n, sizeof(size_t) + size);
+ if (unlikely(!n)) fatal("reallocz() cannot allocate %zu bytes of memory (from %zu bytes).", size, old_size);
+
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.realloc_calls_made++;
+ memory_statistics.allocated_memory += (size - old_size);
+ print_allocations(file, function, line, "realloc()", size - old_size);
+ }
+
+ *n = size;
+ return (void *)&n[1];
+}
+
+char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s) {
+ size_t size = strlen(s) + 1;
+
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.strdup_calls_made++;
+ memory_statistics.allocated_memory += size;
+ print_allocations(file, function, line, "strdup()", size);
+ }
+
+ size_t *n = (size_t *)malloc(sizeof(size_t) + size);
+ if (unlikely(!n)) fatal("strdupz() cannot allocate %zu bytes of memory.", size);
+
+ *n = size;
+ char *t = (char *)&n[1];
+ strcpy(t, s);
+ return t;
+}
+
+void freez_int(const char *file, const char *function, const unsigned long line, void *ptr) {
+ if(unlikely(!ptr)) return;
+
+ size_t *n = (size_t *)ptr;
+ n--;
+ size_t size = *n;
+
+ if(log_thread_memory_allocations) {
+ memory_statistics.memory_calls_made++;
+ memory_statistics.free_calls_made++;
+ memory_statistics.allocated_memory -= size;
+ print_allocations(file, function, line, "free()", size);
+ }
+
+ free(n);
+}
+#else
+
+char *strdupz(const char *s) {
+ char *t = strdup(s);
+ if (unlikely(!t)) fatal("Cannot strdup() string '%s'", s);
+ return t;
+}
+
+// If ptr is NULL, no operation is performed.
+void freez(void *ptr) {
+ free(ptr);
+}
+
+void *mallocz(size_t size) {
+ void *p = malloc(size);
+ if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", size);
+ return p;
+}
+
+void *callocz(size_t nmemb, size_t size) {
+ void *p = calloc(nmemb, size);
+ if (unlikely(!p)) fatal("Cannot allocate %zu bytes of memory.", nmemb * size);
+ return p;
+}
+
+void *reallocz(void *ptr, size_t size) {
+ void *p = realloc(ptr, size);
+ if (unlikely(!p)) fatal("Cannot re-allocate memory to %zu bytes.", size);
+ return p;
+}
+
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+
+void json_escape_string(char *dst, const char *src, size_t size) {
+ const char *t;
+ char *d = dst, *e = &dst[size - 1];
+
+ for(t = src; *t && d < e ;t++) {
+ if(unlikely(*t == '\\' || *t == '"')) {
+ if(unlikely(d + 1 >= e)) break;
+ *d++ = '\\';
+ }
+ *d++ = *t;
+ }
+
+ *d = '\0';
+}
+
+void json_fix_string(char *s) {
+ unsigned char c;
+ while((c = (unsigned char)*s)) {
+ if(unlikely(c == '\\'))
+ *s++ = '/';
+ else if(unlikely(c == '"'))
+ *s++ = '\'';
+ else if(unlikely(isspace(c) || iscntrl(c)))
+ *s++ = ' ';
+ else if(unlikely(!isprint(c) || c > 127))
+ *s++ = '_';
+ else
+ s++;
+ }
+}
+
+unsigned char netdata_map_chart_names[256] = {
+ [0] = '\0', //
+ [1] = '_', //
+ [2] = '_', //
+ [3] = '_', //
+ [4] = '_', //
+ [5] = '_', //
+ [6] = '_', //
+ [7] = '_', //
+ [8] = '_', //
+ [9] = '_', //
+ [10] = '_', //
+ [11] = '_', //
+ [12] = '_', //
+ [13] = '_', //
+ [14] = '_', //
+ [15] = '_', //
+ [16] = '_', //
+ [17] = '_', //
+ [18] = '_', //
+ [19] = '_', //
+ [20] = '_', //
+ [21] = '_', //
+ [22] = '_', //
+ [23] = '_', //
+ [24] = '_', //
+ [25] = '_', //
+ [26] = '_', //
+ [27] = '_', //
+ [28] = '_', //
+ [29] = '_', //
+ [30] = '_', //
+ [31] = '_', //
+ [32] = '_', //
+ [33] = '_', // !
+ [34] = '_', // "
+ [35] = '_', // #
+ [36] = '_', // $
+ [37] = '_', // %
+ [38] = '_', // &
+ [39] = '_', // '
+ [40] = '_', // (
+ [41] = '_', // )
+ [42] = '_', // *
+ [43] = '_', // +
+ [44] = '.', // ,
+ [45] = '-', // -
+ [46] = '.', // .
+ [47] = '/', // /
+ [48] = '0', // 0
+ [49] = '1', // 1
+ [50] = '2', // 2
+ [51] = '3', // 3
+ [52] = '4', // 4
+ [53] = '5', // 5
+ [54] = '6', // 6
+ [55] = '7', // 7
+ [56] = '8', // 8
+ [57] = '9', // 9
+ [58] = '_', // :
+ [59] = '_', // ;
+ [60] = '_', // <
+ [61] = '_', // =
+ [62] = '_', // >
+ [63] = '_', // ?
+ [64] = '_', // @
+ [65] = 'a', // A
+ [66] = 'b', // B
+ [67] = 'c', // C
+ [68] = 'd', // D
+ [69] = 'e', // E
+ [70] = 'f', // F
+ [71] = 'g', // G
+ [72] = 'h', // H
+ [73] = 'i', // I
+ [74] = 'j', // J
+ [75] = 'k', // K
+ [76] = 'l', // L
+ [77] = 'm', // M
+ [78] = 'n', // N
+ [79] = 'o', // O
+ [80] = 'p', // P
+ [81] = 'q', // Q
+ [82] = 'r', // R
+ [83] = 's', // S
+ [84] = 't', // T
+ [85] = 'u', // U
+ [86] = 'v', // V
+ [87] = 'w', // W
+ [88] = 'x', // X
+ [89] = 'y', // Y
+ [90] = 'z', // Z
+ [91] = '_', // [
+ [92] = '/', // backslash
+ [93] = '_', // ]
+ [94] = '_', // ^
+ [95] = '_', // _
+ [96] = '_', // `
+ [97] = 'a', // a
+ [98] = 'b', // b
+ [99] = 'c', // c
+ [100] = 'd', // d
+ [101] = 'e', // e
+ [102] = 'f', // f
+ [103] = 'g', // g
+ [104] = 'h', // h
+ [105] = 'i', // i
+ [106] = 'j', // j
+ [107] = 'k', // k
+ [108] = 'l', // l
+ [109] = 'm', // m
+ [110] = 'n', // n
+ [111] = 'o', // o
+ [112] = 'p', // p
+ [113] = 'q', // q
+ [114] = 'r', // r
+ [115] = 's', // s
+ [116] = 't', // t
+ [117] = 'u', // u
+ [118] = 'v', // v
+ [119] = 'w', // w
+ [120] = 'x', // x
+ [121] = 'y', // y
+ [122] = 'z', // z
+ [123] = '_', // {
+ [124] = '_', // |
+ [125] = '_', // }
+ [126] = '_', // ~
+ [127] = '_', //
+ [128] = '_', //
+ [129] = '_', //
+ [130] = '_', //
+ [131] = '_', //
+ [132] = '_', //
+ [133] = '_', //
+ [134] = '_', //
+ [135] = '_', //
+ [136] = '_', //
+ [137] = '_', //
+ [138] = '_', //
+ [139] = '_', //
+ [140] = '_', //
+ [141] = '_', //
+ [142] = '_', //
+ [143] = '_', //
+ [144] = '_', //
+ [145] = '_', //
+ [146] = '_', //
+ [147] = '_', //
+ [148] = '_', //
+ [149] = '_', //
+ [150] = '_', //
+ [151] = '_', //
+ [152] = '_', //
+ [153] = '_', //
+ [154] = '_', //
+ [155] = '_', //
+ [156] = '_', //
+ [157] = '_', //
+ [158] = '_', //
+ [159] = '_', //
+ [160] = '_', //
+ [161] = '_', //
+ [162] = '_', //
+ [163] = '_', //
+ [164] = '_', //
+ [165] = '_', //
+ [166] = '_', //
+ [167] = '_', //
+ [168] = '_', //
+ [169] = '_', //
+ [170] = '_', //
+ [171] = '_', //
+ [172] = '_', //
+ [173] = '_', //
+ [174] = '_', //
+ [175] = '_', //
+ [176] = '_', //
+ [177] = '_', //
+ [178] = '_', //
+ [179] = '_', //
+ [180] = '_', //
+ [181] = '_', //
+ [182] = '_', //
+ [183] = '_', //
+ [184] = '_', //
+ [185] = '_', //
+ [186] = '_', //
+ [187] = '_', //
+ [188] = '_', //
+ [189] = '_', //
+ [190] = '_', //
+ [191] = '_', //
+ [192] = '_', //
+ [193] = '_', //
+ [194] = '_', //
+ [195] = '_', //
+ [196] = '_', //
+ [197] = '_', //
+ [198] = '_', //
+ [199] = '_', //
+ [200] = '_', //
+ [201] = '_', //
+ [202] = '_', //
+ [203] = '_', //
+ [204] = '_', //
+ [205] = '_', //
+ [206] = '_', //
+ [207] = '_', //
+ [208] = '_', //
+ [209] = '_', //
+ [210] = '_', //
+ [211] = '_', //
+ [212] = '_', //
+ [213] = '_', //
+ [214] = '_', //
+ [215] = '_', //
+ [216] = '_', //
+ [217] = '_', //
+ [218] = '_', //
+ [219] = '_', //
+ [220] = '_', //
+ [221] = '_', //
+ [222] = '_', //
+ [223] = '_', //
+ [224] = '_', //
+ [225] = '_', //
+ [226] = '_', //
+ [227] = '_', //
+ [228] = '_', //
+ [229] = '_', //
+ [230] = '_', //
+ [231] = '_', //
+ [232] = '_', //
+ [233] = '_', //
+ [234] = '_', //
+ [235] = '_', //
+ [236] = '_', //
+ [237] = '_', //
+ [238] = '_', //
+ [239] = '_', //
+ [240] = '_', //
+ [241] = '_', //
+ [242] = '_', //
+ [243] = '_', //
+ [244] = '_', //
+ [245] = '_', //
+ [246] = '_', //
+ [247] = '_', //
+ [248] = '_', //
+ [249] = '_', //
+ [250] = '_', //
+ [251] = '_', //
+ [252] = '_', //
+ [253] = '_', //
+ [254] = '_', //
+ [255] = '_' //
+};
+
+// make sure the supplied string
+// is good for a netdata chart/dimension ID/NAME
+void netdata_fix_chart_name(char *s) {
+ while ((*s = netdata_map_chart_names[(unsigned char) *s])) s++;
+}
+
+unsigned char netdata_map_chart_ids[256] = {
+ [0] = '\0', //
+ [1] = '_', //
+ [2] = '_', //
+ [3] = '_', //
+ [4] = '_', //
+ [5] = '_', //
+ [6] = '_', //
+ [7] = '_', //
+ [8] = '_', //
+ [9] = '_', //
+ [10] = '_', //
+ [11] = '_', //
+ [12] = '_', //
+ [13] = '_', //
+ [14] = '_', //
+ [15] = '_', //
+ [16] = '_', //
+ [17] = '_', //
+ [18] = '_', //
+ [19] = '_', //
+ [20] = '_', //
+ [21] = '_', //
+ [22] = '_', //
+ [23] = '_', //
+ [24] = '_', //
+ [25] = '_', //
+ [26] = '_', //
+ [27] = '_', //
+ [28] = '_', //
+ [29] = '_', //
+ [30] = '_', //
+ [31] = '_', //
+ [32] = '_', //
+ [33] = '_', // !
+ [34] = '_', // "
+ [35] = '_', // #
+ [36] = '_', // $
+ [37] = '_', // %
+ [38] = '_', // &
+ [39] = '_', // '
+ [40] = '_', // (
+ [41] = '_', // )
+ [42] = '_', // *
+ [43] = '_', // +
+ [44] = '.', // ,
+ [45] = '-', // -
+ [46] = '.', // .
+ [47] = '_', // /
+ [48] = '0', // 0
+ [49] = '1', // 1
+ [50] = '2', // 2
+ [51] = '3', // 3
+ [52] = '4', // 4
+ [53] = '5', // 5
+ [54] = '6', // 6
+ [55] = '7', // 7
+ [56] = '8', // 8
+ [57] = '9', // 9
+ [58] = '_', // :
+ [59] = '_', // ;
+ [60] = '_', // <
+ [61] = '_', // =
+ [62] = '_', // >
+ [63] = '_', // ?
+ [64] = '_', // @
+ [65] = 'a', // A
+ [66] = 'b', // B
+ [67] = 'c', // C
+ [68] = 'd', // D
+ [69] = 'e', // E
+ [70] = 'f', // F
+ [71] = 'g', // G
+ [72] = 'h', // H
+ [73] = 'i', // I
+ [74] = 'j', // J
+ [75] = 'k', // K
+ [76] = 'l', // L
+ [77] = 'm', // M
+ [78] = 'n', // N
+ [79] = 'o', // O
+ [80] = 'p', // P
+ [81] = 'q', // Q
+ [82] = 'r', // R
+ [83] = 's', // S
+ [84] = 't', // T
+ [85] = 'u', // U
+ [86] = 'v', // V
+ [87] = 'w', // W
+ [88] = 'x', // X
+ [89] = 'y', // Y
+ [90] = 'z', // Z
+ [91] = '_', // [
+ [92] = '/', // backslash
+ [93] = '_', // ]
+ [94] = '_', // ^
+ [95] = '_', // _
+ [96] = '_', // `
+ [97] = 'a', // a
+ [98] = 'b', // b
+ [99] = 'c', // c
+ [100] = 'd', // d
+ [101] = 'e', // e
+ [102] = 'f', // f
+ [103] = 'g', // g
+ [104] = 'h', // h
+ [105] = 'i', // i
+ [106] = 'j', // j
+ [107] = 'k', // k
+ [108] = 'l', // l
+ [109] = 'm', // m
+ [110] = 'n', // n
+ [111] = 'o', // o
+ [112] = 'p', // p
+ [113] = 'q', // q
+ [114] = 'r', // r
+ [115] = 's', // s
+ [116] = 't', // t
+ [117] = 'u', // u
+ [118] = 'v', // v
+ [119] = 'w', // w
+ [120] = 'x', // x
+ [121] = 'y', // y
+ [122] = 'z', // z
+ [123] = '_', // {
+ [124] = '_', // |
+ [125] = '_', // }
+ [126] = '_', // ~
+ [127] = '_', //
+ [128] = '_', //
+ [129] = '_', //
+ [130] = '_', //
+ [131] = '_', //
+ [132] = '_', //
+ [133] = '_', //
+ [134] = '_', //
+ [135] = '_', //
+ [136] = '_', //
+ [137] = '_', //
+ [138] = '_', //
+ [139] = '_', //
+ [140] = '_', //
+ [141] = '_', //
+ [142] = '_', //
+ [143] = '_', //
+ [144] = '_', //
+ [145] = '_', //
+ [146] = '_', //
+ [147] = '_', //
+ [148] = '_', //
+ [149] = '_', //
+ [150] = '_', //
+ [151] = '_', //
+ [152] = '_', //
+ [153] = '_', //
+ [154] = '_', //
+ [155] = '_', //
+ [156] = '_', //
+ [157] = '_', //
+ [158] = '_', //
+ [159] = '_', //
+ [160] = '_', //
+ [161] = '_', //
+ [162] = '_', //
+ [163] = '_', //
+ [164] = '_', //
+ [165] = '_', //
+ [166] = '_', //
+ [167] = '_', //
+ [168] = '_', //
+ [169] = '_', //
+ [170] = '_', //
+ [171] = '_', //
+ [172] = '_', //
+ [173] = '_', //
+ [174] = '_', //
+ [175] = '_', //
+ [176] = '_', //
+ [177] = '_', //
+ [178] = '_', //
+ [179] = '_', //
+ [180] = '_', //
+ [181] = '_', //
+ [182] = '_', //
+ [183] = '_', //
+ [184] = '_', //
+ [185] = '_', //
+ [186] = '_', //
+ [187] = '_', //
+ [188] = '_', //
+ [189] = '_', //
+ [190] = '_', //
+ [191] = '_', //
+ [192] = '_', //
+ [193] = '_', //
+ [194] = '_', //
+ [195] = '_', //
+ [196] = '_', //
+ [197] = '_', //
+ [198] = '_', //
+ [199] = '_', //
+ [200] = '_', //
+ [201] = '_', //
+ [202] = '_', //
+ [203] = '_', //
+ [204] = '_', //
+ [205] = '_', //
+ [206] = '_', //
+ [207] = '_', //
+ [208] = '_', //
+ [209] = '_', //
+ [210] = '_', //
+ [211] = '_', //
+ [212] = '_', //
+ [213] = '_', //
+ [214] = '_', //
+ [215] = '_', //
+ [216] = '_', //
+ [217] = '_', //
+ [218] = '_', //
+ [219] = '_', //
+ [220] = '_', //
+ [221] = '_', //
+ [222] = '_', //
+ [223] = '_', //
+ [224] = '_', //
+ [225] = '_', //
+ [226] = '_', //
+ [227] = '_', //
+ [228] = '_', //
+ [229] = '_', //
+ [230] = '_', //
+ [231] = '_', //
+ [232] = '_', //
+ [233] = '_', //
+ [234] = '_', //
+ [235] = '_', //
+ [236] = '_', //
+ [237] = '_', //
+ [238] = '_', //
+ [239] = '_', //
+ [240] = '_', //
+ [241] = '_', //
+ [242] = '_', //
+ [243] = '_', //
+ [244] = '_', //
+ [245] = '_', //
+ [246] = '_', //
+ [247] = '_', //
+ [248] = '_', //
+ [249] = '_', //
+ [250] = '_', //
+ [251] = '_', //
+ [252] = '_', //
+ [253] = '_', //
+ [254] = '_', //
+ [255] = '_' //
+};
+
+// make sure the supplied string
+// is good for a netdata chart/dimension ID/NAME
+void netdata_fix_chart_id(char *s) {
+ while ((*s = netdata_map_chart_ids[(unsigned char) *s])) s++;
+}
+
+/*
+// http://stackoverflow.com/questions/7666509/hash-function-for-string
+uint32_t simple_hash(const char *name)
+{
+ const char *s = name;
+ uint32_t hash = 5381;
+ int i;
+
+ while((i = *s++)) hash = ((hash << 5) + hash) + i;
+
+ // fprintf(stderr, "HASH: %lu %s\n", hash, name);
+
+ return hash;
+}
+*/
+
+/*
+// http://isthe.com/chongo/tech/comp/fnv/#FNV-1a
+uint32_t simple_hash(const char *name) {
+ unsigned char *s = (unsigned char *) name;
+ uint32_t hval = 0x811c9dc5;
+
+ // FNV-1a algorithm
+ while (*s) {
+ // multiply by the 32 bit FNV magic prime mod 2^32
+ // NOTE: No need to optimize with left shifts.
+ // GCC will use imul instruction anyway.
+ // Tested with 'gcc -O3 -S'
+ //hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
+ hval *= 16777619;
+
+ // xor the bottom with the current octet
+ hval ^= (uint32_t) *s++;
+ }
+
+ // fprintf(stderr, "HASH: %u = %s\n", hval, name);
+ return hval;
+}
+
+uint32_t simple_uhash(const char *name) {
+ unsigned char *s = (unsigned char *) name;
+ uint32_t hval = 0x811c9dc5, c;
+
+ // FNV-1a algorithm
+ while ((c = *s++)) {
+ if (unlikely(c >= 'A' && c <= 'Z')) c += 'a' - 'A';
+ hval *= 16777619;
+ hval ^= c;
+ }
+ return hval;
+}
+*/
+
+/*
+// http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
+// one at a time hash
+uint32_t simple_hash(const char *name) {
+ unsigned char *s = (unsigned char *)name;
+ uint32_t h = 0;
+
+ while(*s) {
+ h += *s++;
+ h += (h << 10);
+ h ^= (h >> 6);
+ }
+
+ h += (h << 3);
+ h ^= (h >> 11);
+ h += (h << 15);
+
+ // fprintf(stderr, "HASH: %u = %s\n", h, name);
+
+ return h;
+}
+*/
+
+void strreverse(char *begin, char *end) {
+ while (end > begin) {
+ // clearer code.
+ char aux = *end;
+ *end-- = *begin;
+ *begin++ = aux;
+ }
+}
+
+char *strsep_on_1char(char **ptr, char c) {
+ if(unlikely(!ptr || !*ptr))
+ return NULL;
+
+ // remember the position we started
+ char *s = *ptr;
+
+ // skip separators in front
+ while(*s == c) s++;
+ char *ret = s;
+
+ // find the next separator
+ while(*s++) {
+ if(unlikely(*s == c)) {
+ *s++ = '\0';
+ *ptr = s;
+ return ret;
+ }
+ }
+
+ *ptr = NULL;
+ return ret;
+}
+
+char *mystrsep(char **ptr, char *s) {
+ char *p = "";
+ while (p && !p[0] && *ptr) p = strsep(ptr, s);
+ return (p);
+}
+
+char *trim(char *s) {
+ // skip leading spaces
+ while (*s && isspace(*s)) s++;
+ if (!*s) return NULL;
+
+ // skip tailing spaces
+ // this way is way faster. Writes only one NUL char.
+ ssize_t l = strlen(s);
+ if (--l >= 0) {
+ char *p = s + l;
+ while (p > s && isspace(*p)) p--;
+ *++p = '\0';
+ }
+
+ if (!*s) return NULL;
+
+ return s;
+}
+
+inline char *trim_all(char *buffer) {
+ char *d = buffer, *s = buffer;
+
+ // skip spaces
+ while(isspace(*s)) s++;
+
+ while(*s) {
+ // copy the non-space part
+ while(*s && !isspace(*s)) *d++ = *s++;
+
+ // add a space if we have to
+ if(*s && isspace(*s)) {
+ *d++ = ' ';
+ s++;
+ }
+
+ // skip spaces
+ while(isspace(*s)) s++;
+ }
+
+ *d = '\0';
+
+ if(d > buffer) {
+ d--;
+ if(isspace(*d)) *d = '\0';
+ }
+
+ if(!buffer[0]) return NULL;
+ return buffer;
+}
+
+static int memory_file_open(const char *filename, size_t size) {
+ // info("memory_file_open('%s', %zu", filename, size);
+
+ int fd = open(filename, O_RDWR | O_CREAT | O_NOATIME, 0664);
+ if (fd != -1) {
+ if (lseek(fd, size, SEEK_SET) == (off_t) size) {
+ if (write(fd, "", 1) == 1) {
+ if (ftruncate(fd, size))
+ error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size);
+ }
+ else error("Cannot write to file '%s' at position %zu.", filename, size);
+ }
+ else error("Cannot seek file '%s' to size %zu.", filename, size);
+ }
+ else error("Cannot create/open file '%s'.", filename);
+
+ return fd;
+}
+
+// mmap_shared is used for memory mode = map
+static void *memory_file_mmap(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap('%s', %zu", filename, size);
+ static int log_madvise = 1;
+
+ int fd = -1;
+ if(filename) {
+ fd = memory_file_open(filename, size);
+ if(fd == -1) return MAP_FAILED;
+ }
+
+ void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0);
+ if (mem != MAP_FAILED) {
+#ifdef NETDATA_LOG_ALLOCATIONS
+ mmap_accounting(size);
+#endif
+ int advise = MADV_SEQUENTIAL | MADV_DONTFORK;
+ if (flags & MAP_SHARED) advise |= MADV_WILLNEED;
+
+ if (madvise(mem, size, advise) != 0 && log_madvise) {
+ error("Cannot advise the kernel about shared memory usage.");
+ log_madvise--;
+ }
+ }
+
+ if(fd != -1)
+ close(fd);
+
+ return mem;
+}
+
+#ifdef MADV_MERGEABLE
+static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap_ksm('%s', %zu", filename, size);
+ static int log_madvise_2 = 1, log_madvise_3 = 1;
+
+ int fd = -1;
+ if(filename) {
+ fd = memory_file_open(filename, size);
+ if(fd == -1) return MAP_FAILED;
+ }
+
+ void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0);
+ if (mem != MAP_FAILED) {
+#ifdef NETDATA_LOG_ALLOCATIONS
+ mmap_accounting(size);
+#endif
+ if(fd != -1) {
+ if (lseek(fd, 0, SEEK_SET) == 0) {
+ if (read(fd, mem, size) != (ssize_t) size)
+ error("Cannot read from file '%s'", filename);
+ }
+ else error("Cannot seek to beginning of file '%s'.", filename);
+ }
+
+ // don't use MADV_SEQUENTIAL|MADV_DONTFORK, they disable MADV_MERGEABLE
+ if (madvise(mem, size, MADV_SEQUENTIAL | MADV_DONTFORK) != 0 && log_madvise_2) {
+ error("Cannot advise the kernel about the memory usage (MADV_SEQUENTIAL|MADV_DONTFORK) of file '%s'.", filename);
+ log_madvise_2--;
+ }
+
+ if (madvise(mem, size, MADV_MERGEABLE) != 0 && log_madvise_3) {
+ error("Cannot advise the kernel about the memory usage (MADV_MERGEABLE) of file '%s'.", filename);
+ log_madvise_3--;
+ }
+ }
+
+ if(fd != -1)
+ close(fd);
+
+ return mem;
+}
+#else
+static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap_ksm FALLBACK ('%s', %zu", filename, size);
+
+ if(filename)
+ return memory_file_mmap(filename, size, flags);
+
+ // when KSM is not available and no filename is given (memory mode = ram),
+ // we just report failure
+ return MAP_FAILED;
+}
+#endif
+
+void *mymmap(const char *filename, size_t size, int flags, int ksm) {
+ void *mem = NULL;
+
+ if (filename && (flags & MAP_SHARED || !enable_ksm || !ksm))
+ // memory mode = map | save
+ // when KSM is not enabled
+ // MAP_SHARED is used for memory mode = map (no KSM possible)
+ mem = memory_file_mmap(filename, size, flags);
+
+ else
+ // memory mode = save | ram
+ // when KSM is enabled
+ // for memory mode = ram, the filename is NULL
+ mem = memory_file_mmap_ksm(filename, size, flags);
+
+ if(mem == MAP_FAILED) return NULL;
+
+ errno = 0;
+ return mem;
+}
+
+int memory_file_save(const char *filename, void *mem, size_t size) {
+ char tmpfilename[FILENAME_MAX + 1];
+
+ snprintfz(tmpfilename, FILENAME_MAX, "%s.%ld.tmp", filename, (long) getpid());
+
+ int fd = open(tmpfilename, O_RDWR | O_CREAT | O_NOATIME, 0664);
+ if (fd < 0) {
+ error("Cannot create/open file '%s'.", filename);
+ return -1;
+ }
+
+ if (write(fd, mem, size) != (ssize_t) size) {
+ error("Cannot write to file '%s' %ld bytes.", filename, (long) size);
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ if (rename(tmpfilename, filename)) {
+ error("Cannot rename '%s' to '%s'", tmpfilename, filename);
+ return -1;
+ }
+
+ return 0;
+}
+
+int fd_is_valid(int fd) {
+ return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
+}
+
+char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len) {
+ char *s = fgets(buf, (int)buf_size, fp);
+ if (!s) return NULL;
+
+ char *t = s;
+ if (*t != '\0') {
+ // find the string end
+ while (*++t != '\0');
+
+ // trim trailing spaces/newlines/tabs
+ while (--t > s && *t == '\n')
+ *t = '\0';
+ }
+
+ if (len)
+ *len = t - s + 1;
+
+ return s;
+}
+
+int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args) {
+ int size = vsnprintf(dst, n, fmt, args);
+
+ if (unlikely((size_t) size > n)) {
+ // truncated
+ size = (int)n;
+ }
+
+ dst[size] = '\0';
+ return size;
+}
+
+int snprintfz(char *dst, size_t n, const char *fmt, ...) {
+ va_list args;
+
+ va_start(args, fmt);
+ int ret = vsnprintfz(dst, n, fmt, args);
+ va_end(args);
+
+ return ret;
+}
+
+/*
+// poor man cycle counting
+static unsigned long tsc;
+void begin_tsc(void) {
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+unsigned long end_tsc(void) {
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+*/
+
+int recursively_delete_dir(const char *path, const char *reason) {
+ DIR *dir = opendir(path);
+ if(!dir) {
+ error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path);
+ return -1;
+ }
+
+ int ret = 0;
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ char fullpath[FILENAME_MAX + 1];
+ snprintfz(fullpath, FILENAME_MAX, "%s/%s", path, de->d_name);
+
+ if(de->d_type == DT_DIR) {
+ int r = recursively_delete_dir(fullpath, reason);
+ if(r > 0) ret += r;
+ continue;
+ }
+
+ info("Deleting %s file '%s'", reason?reason:"", fullpath);
+ if(unlikely(unlink(fullpath) == -1))
+ error("Cannot delete %s file '%s'", reason?reason:"", fullpath);
+ else
+ ret++;
+ }
+
+ info("Deleting empty directory '%s'", path);
+ if(unlikely(rmdir(path) == -1))
+ error("Cannot delete empty directory '%s'", path);
+ else
+ ret++;
+
+ closedir(dir);
+
+ return ret;
+}
+
+static int is_virtual_filesystem(const char *path, char **reason) {
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+ (void)path;
+ (void)reason;
+#else
+ struct statfs stat;
+ // stat.f_fsid.__val[0] is a file system id
+ // stat.f_fsid.__val[1] is the inode
+ // so their combination uniquely identifies the file/dir
+
+ if (statfs(path, &stat) == -1) {
+ if(reason) *reason = "failed to statfs()";
+ return -1;
+ }
+
+ if(stat.f_fsid.__val[0] != 0 || stat.f_fsid.__val[1] != 0) {
+ errno = EINVAL;
+ if(reason) *reason = "is not a virtual file system";
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+int verify_netdata_host_prefix() {
+ if(!netdata_configured_host_prefix)
+ netdata_configured_host_prefix = "";
+
+ if(!*netdata_configured_host_prefix)
+ return 0;
+
+ char buffer[FILENAME_MAX + 1];
+ char *path = netdata_configured_host_prefix;
+ char *reason = "unknown reason";
+ errno = 0;
+
+ struct stat sb;
+ if (stat(path, &sb) == -1) {
+ reason = "failed to stat()";
+ goto failed;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFDIR) {
+ errno = EINVAL;
+ reason = "is not a directory";
+ goto failed;
+ }
+
+ path = buffer;
+ snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ if(is_virtual_filesystem(path, &reason) == -1)
+ goto failed;
+
+ snprintfz(path, FILENAME_MAX, "%s/sys", netdata_configured_host_prefix);
+ if(is_virtual_filesystem(path, &reason) == -1)
+ goto failed;
+
+ if(netdata_configured_host_prefix && *netdata_configured_host_prefix)
+ info("Using host prefix directory '%s'", netdata_configured_host_prefix);
+
+ return 0;
+
+failed:
+ error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason);
+ netdata_configured_host_prefix = "";
+ return -1;
+}
+
+char *strdupz_path_subpath(const char *path, const char *subpath) {
+ if(unlikely(!path || !*path)) path = ".";
+ if(unlikely(!subpath)) subpath = "";
+
+ // skip trailing slashes in path
+ size_t len = strlen(path);
+ while(len > 0 && path[len - 1] == '/') len--;
+
+ // skip leading slashes in subpath
+ while(subpath[0] == '/') subpath++;
+
+ // if the last character in path is / and (there is a subpath or path is now empty)
+ // keep the trailing slash in path and remove the additional slash
+ char *slash = "/";
+ if(path[len] == '/' && (*subpath || len == 0)) {
+ slash = "";
+ len++;
+ }
+ else if(!*subpath) {
+ // there is no subpath
+ // no need for trailing slash
+ slash = "";
+ }
+
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%.*s%s%s", (int)len, path, slash, subpath);
+ return strdupz(buffer);
+}
+
+int path_is_dir(const char *path, const char *subpath) {
+ char *s = strdupz_path_subpath(path, subpath);
+
+ size_t max_links = 100;
+
+ int is_dir = 0;
+ struct stat statbuf;
+ while(max_links-- && stat(s, &statbuf) == 0) {
+ if((statbuf.st_mode & S_IFMT) == S_IFDIR) {
+ is_dir = 1;
+ break;
+ }
+ else if((statbuf.st_mode & S_IFMT) == S_IFLNK) {
+ char buffer[FILENAME_MAX + 1];
+ ssize_t l = readlink(s, buffer, FILENAME_MAX);
+ if(l > 0) {
+ buffer[l] = '\0';
+ freez(s);
+ s = strdupz(buffer);
+ continue;
+ }
+ else {
+ is_dir = 0;
+ break;
+ }
+ }
+ else {
+ is_dir = 0;
+ break;
+ }
+ }
+
+ freez(s);
+ return is_dir;
+}
+
+int path_is_file(const char *path, const char *subpath) {
+ char *s = strdupz_path_subpath(path, subpath);
+
+ size_t max_links = 100;
+
+ int is_file = 0;
+ struct stat statbuf;
+ while(max_links-- && stat(s, &statbuf) == 0) {
+ if((statbuf.st_mode & S_IFMT) == S_IFREG) {
+ is_file = 1;
+ break;
+ }
+ else if((statbuf.st_mode & S_IFMT) == S_IFLNK) {
+ char buffer[FILENAME_MAX + 1];
+ ssize_t l = readlink(s, buffer, FILENAME_MAX);
+ if(l > 0) {
+ buffer[l] = '\0';
+ freez(s);
+ s = strdupz(buffer);
+ continue;
+ }
+ else {
+ is_file = 0;
+ break;
+ }
+ }
+ else {
+ is_file = 0;
+ break;
+ }
+ }
+
+ freez(s);
+ return is_file;
+}
+
+void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *subpath, int (*callback)(const char *filename, void *data), void *data, size_t depth) {
+ if(depth > 3) {
+ error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath);
+ return;
+ }
+
+ char *udir = strdupz_path_subpath(user_path, subpath);
+ char *sdir = strdupz_path_subpath(stock_path, subpath);
+
+ debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir);
+
+ DIR *dir = opendir(udir);
+ if (!dir) {
+ error("CONFIG cannot open user-config directory '%s'.", udir);
+ }
+ else {
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR || de->d_type == DT_LNK) {
+ if( !de->d_name[0] ||
+ (de->d_name[0] == '.' && de->d_name[1] == '\0') ||
+ (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ) {
+ debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name);
+ continue;
+ }
+
+ if(path_is_dir(udir, de->d_name)) {
+ recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1);
+ continue;
+ }
+ }
+
+ if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) {
+ size_t len = strlen(de->d_name);
+ if(path_is_file(udir, de->d_name) &&
+ len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) {
+ char *filename = strdupz_path_subpath(udir, de->d_name);
+ debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename);
+ callback(filename, data);
+ freez(filename);
+ continue;
+ }
+ }
+
+ debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type);
+ }
+
+ closedir(dir);
+ }
+
+ debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir);
+
+ dir = opendir(sdir);
+ if (!dir) {
+ error("CONFIG cannot open stock config directory '%s'.", sdir);
+ }
+ else if (strcmp(udir, sdir)) {
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR || de->d_type == DT_LNK) {
+ if( !de->d_name[0] ||
+ (de->d_name[0] == '.' && de->d_name[1] == '\0') ||
+ (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ) {
+ debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name);
+ continue;
+ }
+
+ if(path_is_dir(sdir, de->d_name)) {
+ // we recurse in stock subdirectory, only when there is no corresponding
+ // user subdirectory - to avoid reading the files twice
+
+ if(!path_is_dir(udir, de->d_name))
+ recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1);
+
+ continue;
+ }
+ }
+
+ if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) {
+ size_t len = strlen(de->d_name);
+ if(path_is_file(sdir, de->d_name) && !path_is_file(udir, de->d_name) &&
+ len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) {
+ char *filename = strdupz_path_subpath(sdir, de->d_name);
+ debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename);
+ callback(filename, data);
+ freez(filename);
+ continue;
+ }
+
+ }
+
+ debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type);
+ }
+
+ closedir(dir);
+ }
+
+ debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir);
+
+ freez(udir);
+ freez(sdir);
+}
+
+// Returns the number of bytes read from the file if file_size is not NULL.
+// The actual buffer has an extra byte set to zero (not included in the count).
+char *read_by_filename(char *filename, long *file_size)
+{
+ FILE *f = fopen(filename, "r");
+ if (!f)
+ return NULL;
+ if (fseek(f, 0, SEEK_END) < 0) {
+ fclose(f);
+ return NULL;
+ }
+ long size = ftell(f);
+ if (size <= 0 || fseek(f, 0, SEEK_END) < 0) {
+ fclose(f);
+ return NULL;
+ }
+ char *contents = callocz(size + 1, 1);
+ if (!contents) {
+ fclose(f);
+ return NULL;
+ }
+ if (fseek(f, 0, SEEK_SET) < 0) {
+ fclose(f);
+ freez(contents);
+ return NULL;
+ }
+ size_t res = fread(contents, 1, size, f);
+ if ( res != (size_t)size) {
+ freez(contents);
+ fclose(f);
+ return NULL;
+ }
+ fclose(f);
+ if (file_size)
+ *file_size = size;
+ return contents;
+}
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
new file mode 100644
index 0000000..50568b5
--- /dev/null
+++ b/libnetdata/libnetdata.h
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LIB_H
+#define NETDATA_LIB_H 1
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#define OS_LINUX 1
+#define OS_FREEBSD 2
+#define OS_MACOS 3
+
+
+// ----------------------------------------------------------------------------
+// system include files for all netdata C programs
+
+/* select the memory allocator, based on autoconf findings */
+#if defined(ENABLE_JEMALLOC)
+
+#if defined(HAVE_JEMALLOC_JEMALLOC_H)
+#include <jemalloc/jemalloc.h>
+#else // !defined(HAVE_JEMALLOC_JEMALLOC_H)
+#include <malloc.h>
+#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H)
+
+#elif defined(ENABLE_TCMALLOC)
+
+#include <google/tcmalloc.h>
+
+#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
+
+#if !(defined(__FreeBSD__) || defined(__APPLE__))
+#include <malloc.h>
+#endif /* __FreeBSD__ || __APPLE__ */
+
+#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
+
+// ----------------------------------------------------------------------------
+
+#if defined(__FreeBSD__)
+#include <pthread_np.h>
+#define NETDATA_OS_TYPE "freebsd"
+#elif defined(__APPLE__)
+#define NETDATA_OS_TYPE "macos"
+#else
+#define NETDATA_OS_TYPE "linux"
+#endif /* __FreeBSD__, __APPLE__*/
+
+#include <pthread.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <ctype.h>
+#include <string.h>
+#include <strings.h>
+#include <arpa/inet.h>
+#include <netinet/tcp.h>
+#include <sys/ioctl.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <grp.h>
+#include <pwd.h>
+#include <limits.h>
+#include <locale.h>
+#include <net/if.h>
+#include <poll.h>
+#include <signal.h>
+#include <syslog.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/un.h>
+#include <time.h>
+#include <unistd.h>
+#include <uuid/uuid.h>
+#include <spawn.h>
+#include <uv.h>
+#include <assert.h>
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_RESOLV_H
+#include <resolv.h>
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef HAVE_SYS_PRCTL_H
+#include <sys/prctl.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_SYS_VFS_H
+#include <sys/vfs.h>
+#endif
+
+#ifdef HAVE_SYS_STATFS_H
+#include <sys/statfs.h>
+#endif
+
+#ifdef HAVE_SYS_MOUNT_H
+#include <sys/mount.h>
+#endif
+
+#ifdef HAVE_SYS_STATVFS_H
+#include <sys/statvfs.h>
+#endif
+
+// #1408
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+
+#ifdef STORAGE_WITH_MATH
+#include <math.h>
+#include <float.h>
+#endif
+
+#if defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#elif defined(HAVE_STDINT_H)
+#include <stdint.h>
+#endif
+
+#ifdef NETDATA_WITH_ZLIB
+#include <zlib.h>
+#endif
+
+#ifdef HAVE_CAPABILITY
+#include <sys/capability.h>
+#endif
+
+
+// ----------------------------------------------------------------------------
+// netdata common definitions
+
+#if (SIZEOF_VOID_P == 8)
+#define ENVIRONMENT64
+#elif (SIZEOF_VOID_P == 4)
+#define ENVIRONMENT32
+#else
+#error "Cannot detect if this is a 32 or 64 bit CPU"
+#endif
+
+#ifdef __GNUC__
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#define NEVERNULL __attribute__((returns_nonnull))
+#else
+#define NEVERNULL
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE
+#define NOINLINE __attribute__((noinline))
+#else
+#define NOINLINE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC
+#define MALLOCLIKE __attribute__((malloc))
+#else
+#define MALLOCLIKE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT
+#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define PRINTFLIKE(f, a)
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN
+#define NORETURN __attribute__ ((noreturn))
+#else
+#define NORETURN
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+#define WARNUNUSED __attribute__ ((warn_unused_result))
+#else
+#define WARNUNUSED
+#endif
+
+#ifdef abs
+#undef abs
+#endif
+#define abs(x) (((x) < 0)? (-(x)) : (x))
+
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MAX(a,b) (((a)>(b))?(a):(b))
+
+#define GUID_LEN 36
+
+extern void netdata_fix_chart_id(char *s);
+extern void netdata_fix_chart_name(char *s);
+
+extern void strreverse(char* begin, char* end);
+extern char *mystrsep(char **ptr, char *s);
+extern char *trim(char *s); // remove leading and trailing spaces; may return NULL
+extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL
+
+extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args);
+extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
+
+// memory allocation functions that handle failures
+#ifdef NETDATA_LOG_ALLOCATIONS
+extern __thread size_t log_thread_memory_allocations;
+#define strdupz(s) strdupz_int(__FILE__, __FUNCTION__, __LINE__, s)
+#define callocz(nmemb, size) callocz_int(__FILE__, __FUNCTION__, __LINE__, nmemb, size)
+#define mallocz(size) mallocz_int(__FILE__, __FUNCTION__, __LINE__, size)
+#define reallocz(ptr, size) reallocz_int(__FILE__, __FUNCTION__, __LINE__, ptr, size)
+#define freez(ptr) freez_int(__FILE__, __FUNCTION__, __LINE__, ptr)
+
+extern char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s);
+extern void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size);
+extern void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size);
+extern void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size);
+extern void freez_int(const char *file, const char *function, const unsigned long line, void *ptr);
+#else // NETDATA_LOG_ALLOCATIONS
+extern char *strdupz(const char *s) MALLOCLIKE NEVERNULL;
+extern void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL;
+extern void *mallocz(size_t size) MALLOCLIKE NEVERNULL;
+extern void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL;
+extern void freez(void *ptr);
+#endif // NETDATA_LOG_ALLOCATIONS
+
+extern void json_escape_string(char *dst, const char *src, size_t size);
+extern void json_fix_string(char *s);
+
+extern void *mymmap(const char *filename, size_t size, int flags, int ksm);
+extern int memory_file_save(const char *filename, void *mem, size_t size);
+
+extern int fd_is_valid(int fd);
+
+extern struct rlimit rlimit_nofile;
+
+extern int enable_ksm;
+
+extern char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len);
+
+extern int verify_netdata_host_prefix();
+
+extern int recursively_delete_dir(const char *path, const char *reason);
+
+extern volatile sig_atomic_t netdata_exit;
+extern const char *os_type;
+
+extern const char *program_version;
+
+extern char *strdupz_path_subpath(const char *path, const char *subpath);
+extern int path_is_dir(const char *path, const char *subpath);
+extern int path_is_file(const char *path, const char *subpath);
+extern void recursive_config_double_dir_load(
+ const char *user_path
+ , const char *stock_path
+ , const char *subpath
+ , int (*callback)(const char *filename, void *data)
+ , void *data
+ , size_t depth
+);
+extern char *read_by_filename(char *filename, long *file_size);
+
+/* fix for alpine linux */
+#ifndef RUSAGE_THREAD
+#ifdef RUSAGE_CHILDREN
+#define RUSAGE_THREAD RUSAGE_CHILDREN
+#endif
+#endif
+
+#define BITS_IN_A_KILOBIT 1000
+
+/* misc. */
+#define UNUSED(x) (void)(x)
+#define error_report(x, args...) do { errno = 0; error(x, ##args); } while(0)
+
+extern void netdata_cleanup_and_exit(int ret) NORETURN;
+extern void send_statistics(const char *action, const char *action_result, const char *action_data);
+extern char *netdata_configured_host_prefix;
+#include "os.h"
+#include "storage_number/storage_number.h"
+#include "threads/threads.h"
+#include "buffer/buffer.h"
+#include "locks/locks.h"
+#include "circular_buffer/circular_buffer.h"
+#include "avl/avl.h"
+#include "inlined.h"
+#include "clocks/clocks.h"
+#include "popen/popen.h"
+#include "simple_pattern/simple_pattern.h"
+#ifdef ENABLE_HTTPS
+# include "socket/security.h"
+#endif
+#include "socket/socket.h"
+#include "config/appconfig.h"
+#include "log/log.h"
+#include "procfile/procfile.h"
+#include "dictionary/dictionary.h"
+#ifdef HAVE_LIBBPF
+#include "ebpf/ebpf.h"
+#endif
+#include "eval/eval.h"
+#include "statistical/statistical.h"
+#include "adaptive_resortable_list/adaptive_resortable_list.h"
+#include "url/url.h"
+#include "json/json.h"
+#include "health/health.h"
+#include "string/utf8.h"
+
+// BEWARE: Outside of the C code this also exists in alarm-notify.sh
+#define DEFAULT_CLOUD_BASE_URL "https://app.netdata.cloud"
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif // NETDATA_LIB_H
diff --git a/libnetdata/locks/Makefile.am b/libnetdata/locks/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/locks/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md
new file mode 100644
index 0000000..b525c97
--- /dev/null
+++ b/libnetdata/locks/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/locks/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c
new file mode 100644
index 0000000..ca9a5ae
--- /dev/null
+++ b/libnetdata/locks/locks.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// automatic thread cancelability management, based on locks
+
+static __thread int netdata_thread_first_cancelability = 0;
+static __thread int netdata_thread_lock_cancelability = 0;
+
+inline void netdata_thread_disable_cancelability(void) {
+ int old;
+ int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
+ if(ret != 0)
+ error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
+ else {
+ if(!netdata_thread_lock_cancelability)
+ netdata_thread_first_cancelability = old;
+
+ netdata_thread_lock_cancelability++;
+ }
+}
+
+inline void netdata_thread_enable_cancelability(void) {
+ if(netdata_thread_lock_cancelability < 1) {
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!", netdata_thread_lock_cancelability, netdata_thread_tag());
+ }
+ else if(netdata_thread_lock_cancelability == 1) {
+ int old = 1;
+ int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old);
+ if(ret != 0)
+ error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
+ else {
+ if(old != PTHREAD_CANCEL_DISABLE)
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old);
+ }
+
+ netdata_thread_lock_cancelability = 0;
+ }
+ else
+ netdata_thread_lock_cancelability--;
+}
+
+// ----------------------------------------------------------------------------
+// mutex
+
+int __netdata_mutex_init(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_init(mutex, NULL);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to initialize (code %d).", ret);
+ return ret;
+}
+
+int __netdata_mutex_lock(netdata_mutex_t *mutex) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_mutex_lock(mutex);
+ if(unlikely(ret != 0)) {
+ netdata_thread_enable_cancelability();
+ error("MUTEX_LOCK: failed to get lock (code %d)", ret);
+ }
+ return ret;
+}
+
+int __netdata_mutex_trylock(netdata_mutex_t *mutex) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_mutex_trylock(mutex);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_unlock(mutex);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to unlock (code %d).", ret);
+ else
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int netdata_mutex_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_init(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_lock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_lock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_trylock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_trylock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_unlock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+
+// ----------------------------------------------------------------------------
+// r/w lock
+
+int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_destroy(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to destroy lock (code %d)", ret);
+ return ret;
+}
+
+int __netdata_rwlock_init(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_init(rwlock, NULL);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to initialize lock (code %d)", ret);
+ return ret;
+}
+
+int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_rdlock(rwlock);
+ if(unlikely(ret != 0)) {
+ netdata_thread_enable_cancelability();
+ error("RW_LOCK: failed to obtain read lock (code %d)", ret);
+ }
+
+ return ret;
+}
+
+int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_wrlock(rwlock);
+ if(unlikely(ret != 0)) {
+ error("RW_LOCK: failed to obtain write lock (code %d)", ret);
+ netdata_thread_enable_cancelability();
+ }
+
+ return ret;
+}
+
+int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_unlock(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to release lock (code %d)", ret);
+ else
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_tryrdlock(rwlock);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_trywrlock(rwlock);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+
+int netdata_rwlock_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_destroy(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_init(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_rdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_wrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_unlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_tryrdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_tryrdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_trywrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
+ const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+ (void)start;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_trywrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
diff --git a/libnetdata/locks/locks.h b/libnetdata/locks/locks.h
new file mode 100644
index 0000000..850dd7e
--- /dev/null
+++ b/libnetdata/locks/locks.h
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOCKS_H
+#define NETDATA_LOCKS_H 1
+
+#include "../libnetdata.h"
+
+typedef pthread_mutex_t netdata_mutex_t;
+#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+typedef pthread_rwlock_t netdata_rwlock_t;
+#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
+
+extern int __netdata_mutex_init(netdata_mutex_t *mutex);
+extern int __netdata_mutex_lock(netdata_mutex_t *mutex);
+extern int __netdata_mutex_trylock(netdata_mutex_t *mutex);
+extern int __netdata_mutex_unlock(netdata_mutex_t *mutex);
+
+extern int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_init(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock);
+
+extern int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+
+extern int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+
+extern void netdata_thread_disable_cancelability(void);
+extern void netdata_thread_enable_cancelability(void);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+
+#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex)
+#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex)
+#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex)
+#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex)
+
+#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock)
+#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock)
+#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock)
+#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock)
+#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock)
+#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock)
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif //NETDATA_LOCKS_H
diff --git a/libnetdata/log/Makefile.am b/libnetdata/log/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/log/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md
new file mode 100644
index 0000000..c0d7a34
--- /dev/null
+++ b/libnetdata/log/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/log/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c
new file mode 100644
index 0000000..95d3d98
--- /dev/null
+++ b/libnetdata/log/log.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <daemon/main.h>
+#include "../libnetdata.h"
+
+int web_server_is_multithreaded = 1;
+
+const char *program_name = "";
+uint64_t debug_flags = DEBUG;
+
+int access_log_syslog = 1;
+int error_log_syslog = 1;
+int output_log_syslog = 1; // debug log
+
+int stdaccess_fd = -1;
+FILE *stdaccess = NULL;
+
+const char *stdaccess_filename = NULL;
+const char *stderr_filename = NULL;
+const char *stdout_filename = NULL;
+const char *facility_log = NULL;
+
+// ----------------------------------------------------------------------------
+// Log facility(https://tools.ietf.org/html/rfc5424)
+//
+// The facilities accepted in the Netdata are in according with the following
+// header files for their respective operate system:
+// sys/syslog.h (Linux )
+// sys/sys/syslog.h (FreeBSD)
+// bsd/sys/syslog.h (darwin-xnu)
+
+#define LOG_AUTH_KEY "auth"
+#define LOG_AUTHPRIV_KEY "authpriv"
+#ifdef __FreeBSD__
+# define LOG_CONSOLE_KEY "console"
+#endif
+#define LOG_CRON_KEY "cron"
+#define LOG_DAEMON_KEY "daemon"
+#define LOG_FTP_KEY "ftp"
+#ifdef __APPLE__
+# define LOG_INSTALL_KEY "install"
+#endif
+#define LOG_KERN_KEY "kern"
+#define LOG_LPR_KEY "lpr"
+#define LOG_MAIL_KEY "mail"
+//#define LOG_INTERNAL_MARK_KEY "mark"
+#ifdef __APPLE__
+# define LOG_NETINFO_KEY "netinfo"
+# define LOG_RAS_KEY "ras"
+# define LOG_REMOTEAUTH_KEY "remoteauth"
+#endif
+#define LOG_NEWS_KEY "news"
+#ifdef __FreeBSD__
+# define LOG_NTP_KEY "ntp"
+#endif
+#define LOG_SECURITY_KEY "security"
+#define LOG_SYSLOG_KEY "syslog"
+#define LOG_USER_KEY "user"
+#define LOG_UUCP_KEY "uucp"
+#ifdef __APPLE__
+# define LOG_LAUNCHD_KEY "launchd"
+#endif
+#define LOG_LOCAL0_KEY "local0"
+#define LOG_LOCAL1_KEY "local1"
+#define LOG_LOCAL2_KEY "local2"
+#define LOG_LOCAL3_KEY "local3"
+#define LOG_LOCAL4_KEY "local4"
+#define LOG_LOCAL5_KEY "local5"
+#define LOG_LOCAL6_KEY "local6"
+#define LOG_LOCAL7_KEY "local7"
+
+static int log_facility_id(const char *facility_name)
+{
+ static int
+ hash_auth = 0,
+ hash_authpriv = 0,
+#ifdef __FreeBSD__
+ hash_console = 0,
+#endif
+ hash_cron = 0,
+ hash_daemon = 0,
+ hash_ftp = 0,
+#ifdef __APPLE__
+ hash_install = 0,
+#endif
+ hash_kern = 0,
+ hash_lpr = 0,
+ hash_mail = 0,
+// hash_mark = 0,
+#ifdef __APPLE__
+ hash_netinfo = 0,
+ hash_ras = 0,
+ hash_remoteauth = 0,
+#endif
+ hash_news = 0,
+#ifdef __FreeBSD__
+ hash_ntp = 0,
+#endif
+ hash_security = 0,
+ hash_syslog = 0,
+ hash_user = 0,
+ hash_uucp = 0,
+#ifdef __APPLE__
+ hash_launchd = 0,
+#endif
+ hash_local0 = 0,
+ hash_local1 = 0,
+ hash_local2 = 0,
+ hash_local3 = 0,
+ hash_local4 = 0,
+ hash_local5 = 0,
+ hash_local6 = 0,
+ hash_local7 = 0;
+
+ if(unlikely(!hash_auth))
+ {
+ hash_auth = simple_hash(LOG_AUTH_KEY);
+ hash_authpriv = simple_hash(LOG_AUTHPRIV_KEY);
+#ifdef __FreeBSD__
+ hash_console = simple_hash(LOG_CONSOLE_KEY);
+#endif
+ hash_cron = simple_hash(LOG_CRON_KEY);
+ hash_daemon = simple_hash(LOG_DAEMON_KEY);
+ hash_ftp = simple_hash(LOG_FTP_KEY);
+#ifdef __APPLE__
+ hash_install = simple_hash(LOG_INSTALL_KEY);
+#endif
+ hash_kern = simple_hash(LOG_KERN_KEY);
+ hash_lpr = simple_hash(LOG_LPR_KEY);
+ hash_mail = simple_hash(LOG_MAIL_KEY);
+// hash_mark = simple_uhash();
+#ifdef __APPLE__
+ hash_netinfo = simple_hash(LOG_NETINFO_KEY);
+ hash_ras = simple_hash(LOG_RAS_KEY);
+ hash_remoteauth = simple_hash(LOG_REMOTEAUTH_KEY);
+#endif
+ hash_news = simple_hash(LOG_NEWS_KEY);
+#ifdef __FreeBSD__
+ hash_ntp = simple_hash(LOG_NTP_KEY);
+#endif
+ hash_security = simple_hash(LOG_SECURITY_KEY);
+ hash_syslog = simple_hash(LOG_SYSLOG_KEY);
+ hash_user = simple_hash(LOG_USER_KEY);
+ hash_uucp = simple_hash(LOG_UUCP_KEY);
+#ifdef __APPLE__
+ hash_launchd = simple_hash(LOG_LAUNCHD_KEY);
+#endif
+ hash_local0 = simple_hash(LOG_LOCAL0_KEY);
+ hash_local1 = simple_hash(LOG_LOCAL1_KEY);
+ hash_local2 = simple_hash(LOG_LOCAL2_KEY);
+ hash_local3 = simple_hash(LOG_LOCAL3_KEY);
+ hash_local4 = simple_hash(LOG_LOCAL4_KEY);
+ hash_local5 = simple_hash(LOG_LOCAL5_KEY);
+ hash_local6 = simple_hash(LOG_LOCAL6_KEY);
+ hash_local7 = simple_hash(LOG_LOCAL7_KEY);
+ }
+
+ int hash = simple_hash(facility_name);
+ if ( hash == hash_auth )
+ {
+ return LOG_AUTH;
+ }
+ else if ( hash == hash_authpriv )
+ {
+ return LOG_AUTHPRIV;
+ }
+#ifdef __FreeBSD__
+ else if ( hash == hash_console )
+ {
+ return LOG_CONSOLE;
+ }
+#endif
+ else if ( hash == hash_cron )
+ {
+ return LOG_CRON;
+ }
+ else if ( hash == hash_daemon )
+ {
+ return LOG_DAEMON;
+ }
+ else if ( hash == hash_ftp )
+ {
+ return LOG_FTP;
+ }
+#ifdef __APPLE__
+ else if ( hash == hash_install )
+ {
+ return LOG_INSTALL;
+ }
+#endif
+ else if ( hash == hash_kern )
+ {
+ return LOG_KERN;
+ }
+ else if ( hash == hash_lpr )
+ {
+ return LOG_LPR;
+ }
+ else if ( hash == hash_mail )
+ {
+ return LOG_MAIL;
+ }
+ /*
+ else if ( hash == hash_mark )
+ {
+ //this is internal for all OS
+ return INTERNAL_MARK;
+ }
+ */
+#ifdef __APPLE__
+ else if ( hash == hash_netinfo )
+ {
+ return LOG_NETINFO;
+ }
+ else if ( hash == hash_ras )
+ {
+ return LOG_RAS;
+ }
+ else if ( hash == hash_remoteauth )
+ {
+ return LOG_REMOTEAUTH;
+ }
+#endif
+ else if ( hash == hash_news )
+ {
+ return LOG_NEWS;
+ }
+#ifdef __FreeBSD__
+ else if ( hash == hash_ntp )
+ {
+ return LOG_NTP;
+ }
+#endif
+ else if ( hash == hash_security )
+ {
+ //FreeBSD is the unique that does not consider
+ //this facility deprecated. We are keeping
+ //it for other OS while they are kept in their headers.
+#ifdef __FreeBSD__
+ return LOG_SECURITY;
+#else
+ return LOG_AUTH;
+#endif
+ }
+ else if ( hash == hash_syslog )
+ {
+ return LOG_SYSLOG;
+ }
+ else if ( hash == hash_user )
+ {
+ return LOG_USER;
+ }
+ else if ( hash == hash_uucp )
+ {
+ return LOG_UUCP;
+ }
+ else if ( hash == hash_local0 )
+ {
+ return LOG_LOCAL0;
+ }
+ else if ( hash == hash_local1 )
+ {
+ return LOG_LOCAL1;
+ }
+ else if ( hash == hash_local2 )
+ {
+ return LOG_LOCAL2;
+ }
+ else if ( hash == hash_local3 )
+ {
+ return LOG_LOCAL3;
+ }
+ else if ( hash == hash_local4 )
+ {
+ return LOG_LOCAL4;
+ }
+ else if ( hash == hash_local5 )
+ {
+ return LOG_LOCAL5;
+ }
+ else if ( hash == hash_local6 )
+ {
+ return LOG_LOCAL6;
+ }
+ else if ( hash == hash_local7 )
+ {
+ return LOG_LOCAL7;
+ }
+#ifdef __APPLE__
+ else if ( hash == hash_launchd )
+ {
+ return LOG_LAUNCHD;
+ }
+#endif
+
+ return LOG_DAEMON;
+}
+
+//we do not need to use this now, but I already created this function to be
+//used case necessary.
+/*
+char *log_facility_name(int code)
+{
+ char *defvalue = { "daemon" };
+ switch(code)
+ {
+ case LOG_AUTH:
+ {
+ return "auth";
+ }
+ case LOG_AUTHPRIV:
+ {
+ return "authpriv";
+ }
+#ifdef __FreeBSD__
+ case LOG_CONSOLE:
+ {
+ return "console";
+ }
+#endif
+ case LOG_CRON:
+ {
+ return "cron";
+ }
+ case LOG_DAEMON:
+ {
+ return defvalue;
+ }
+ case LOG_FTP:
+ {
+ return "ftp";
+ }
+#ifdef __APPLE__
+ case LOG_INSTALL:
+ {
+ return "install";
+ }
+#endif
+ case LOG_KERN:
+ {
+ return "kern";
+ }
+ case LOG_LPR:
+ {
+ return "lpr";
+ }
+ case LOG_MAIL:
+ {
+ return "mail";
+ }
+#ifdef __APPLE__
+ case LOG_NETINFO:
+ {
+ return "netinfo" ;
+ }
+ case LOG_RAS:
+ {
+ return "ras";
+ }
+ case LOG_REMOTEAUTH:
+ {
+ return "remoteauth";
+ }
+#endif
+ case LOG_NEWS:
+ {
+ return "news";
+ }
+#ifdef __FreeBSD__
+ case LOG_NTP:
+ {
+ return "ntp" ;
+ }
+ case LOG_SECURITY:
+ {
+ return "security";
+ }
+#endif
+ case LOG_SYSLOG:
+ {
+ return "syslog";
+ }
+ case LOG_USER:
+ {
+ return "user";
+ }
+ case LOG_UUCP:
+ {
+ return "uucp";
+ }
+ case LOG_LOCAL0:
+ {
+ return "local0";
+ }
+ case LOG_LOCAL1:
+ {
+ return "local1";
+ }
+ case LOG_LOCAL2:
+ {
+ return "local2";
+ }
+ case LOG_LOCAL3:
+ {
+ return "local3";
+ }
+ case LOG_LOCAL4:
+ {
+ return "local4" ;
+ }
+ case LOG_LOCAL5:
+ {
+ return "local5";
+ }
+ case LOG_LOCAL6:
+ {
+ return "local6";
+ }
+ case LOG_LOCAL7:
+ {
+ return "local7" ;
+ }
+#ifdef __APPLE__
+ case LOG_LAUNCHD:
+ {
+ return "launchd";
+ }
+#endif
+ }
+
+ return defvalue;
+}
+*/
+
+// ----------------------------------------------------------------------------
+
+void syslog_init() {
+ static int i = 0;
+
+ if(!i) {
+ openlog(program_name, LOG_PID,log_facility_id(facility_log));
+ i = 1;
+ }
+}
+
+#define LOG_DATE_LENGTH 26
+
+static inline void log_date(char *buffer, size_t len) {
+ if(unlikely(!buffer || !len))
+ return;
+
+ time_t t;
+ struct tm *tmp, tmbuf;
+
+ t = now_realtime_sec();
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (tmp == NULL) {
+ buffer[0] = '\0';
+ return;
+ }
+
+ if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
+ buffer[0] = '\0';
+
+ buffer[len - 1] = '\0';
+}
+
+static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER;
+static inline void log_lock() {
+ netdata_mutex_lock(&log_mutex);
+}
+static inline void log_unlock() {
+ netdata_mutex_unlock(&log_mutex);
+}
+
+static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_syslog, int is_stdaccess, int *fd_ptr) {
+ int f, devnull = 0;
+
+ if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) {
+ filename = "/dev/null";
+ devnull = 1;
+ }
+
+ if(!strcmp(filename, "syslog")) {
+ filename = "/dev/null";
+ devnull = 1;
+
+ syslog_init();
+ if(enabled_syslog) *enabled_syslog = 1;
+ }
+ else if(enabled_syslog) *enabled_syslog = 0;
+
+ // don't do anything if the user is willing
+ // to have the standard one
+ if(!strcmp(filename, "system")) {
+ if(fd != -1 && !is_stdaccess) {
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+
+ filename = "stderr";
+ }
+
+ if(!strcmp(filename, "stdout"))
+ f = STDOUT_FILENO;
+
+ else if(!strcmp(filename, "stderr"))
+ f = STDERR_FILENO;
+
+ else {
+ f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
+ if(f == -1) {
+ error("Cannot open file '%s'. Leaving %d to its default.", filename, fd);
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+ }
+
+ // if there is a level-2 file pointer
+ // flush it before switching the level-1 fds
+ if(fp)
+ fflush(fp);
+
+ if(devnull && is_stdaccess) {
+ fd = -1;
+ fp = NULL;
+ }
+
+ if(fd != f && fd != -1) {
+ // it automatically closes
+ int t = dup2(f, fd);
+ if (t == -1) {
+ error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
+ close(f);
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+ // info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
+ close(f);
+ }
+ else fd = f;
+
+ if(!fp) {
+ fp = fdopen(fd, "a");
+ if (!fp)
+ error("Cannot fdopen() fd %d ('%s')", fd, filename);
+ else {
+ if (setvbuf(fp, NULL, _IOLBF, 0) != 0)
+ error("Cannot set line buffering on fd %d ('%s')", fd, filename);
+ }
+ }
+
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+}
+
+void reopen_all_log_files() {
+ if(stdout_filename)
+ open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
+
+ if(stderr_filename)
+ open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+
+ if(stdaccess_filename)
+ stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+}
+
+void open_all_log_files() {
+ // disable stdin
+ open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL);
+
+ open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
+ open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+ stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+}
+
+// ----------------------------------------------------------------------------
+// error log throttling
+
+time_t error_log_throttle_period = 1200;
+unsigned long error_log_errors_per_period = 200;
+unsigned long error_log_errors_per_period_backup = 0;
+
+int error_log_limit(int reset) {
+ static time_t start = 0;
+ static unsigned long counter = 0, prevented = 0;
+
+ // fprintf(stderr, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
+
+ // do not throttle if the period is 0
+ if(error_log_throttle_period == 0)
+ return 0;
+
+ // prevent all logs if the errors per period is 0
+ if(error_log_errors_per_period == 0)
+#ifdef NETDATA_INTERNAL_CHECKS
+ return 0;
+#else
+ return 1;
+#endif
+
+ time_t now = now_monotonic_sec();
+ if(!start) start = now;
+
+ if(reset) {
+ if(prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION reset for process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
+ , program_name
+ , program_name
+ , prevented
+ , now - start
+ );
+ }
+
+ start = now;
+ counter = 0;
+ prevented = 0;
+ }
+
+ // detect if we log too much
+ counter++;
+
+ if(now - start > error_log_throttle_period) {
+ if(prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
+ , program_name
+ , program_name
+ , prevented
+ , error_log_throttle_period
+ );
+ }
+
+ // restart the period accounting
+ start = now;
+ counter = 1;
+ prevented = 0;
+
+ // log this error
+ return 0;
+ }
+
+ if(counter > error_log_errors_per_period) {
+ if(!prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n"
+ , date
+ , program_name
+ , counter
+ , now - start
+ , error_log_errors_per_period
+ , error_log_throttle_period
+ , program_name
+ , start + error_log_throttle_period - now);
+ }
+
+ prevented++;
+
+ // prevent logging this error
+#ifdef NETDATA_INTERNAL_CHECKS
+ return 0;
+#else
+ return 1;
+#endif
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// debug log
+
+void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ va_list args;
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ va_start( args, fmt );
+ printf("%s: %s DEBUG : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ vprintf(fmt, args);
+ va_end( args );
+ putchar('\n');
+
+ if(output_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_ERR, fmt, args );
+ va_end( args );
+ }
+
+ fflush(stdout);
+}
+
+// ----------------------------------------------------------------------------
+// info log
+
+void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... )
+{
+ va_list args;
+
+ // prevent logging too much
+ if(error_log_limit(0)) return;
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ else fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ fputc('\n', stderr);
+
+ log_unlock();
+}
+
+// ----------------------------------------------------------------------------
+// error log
+
+#if defined(STRERROR_R_CHAR_P)
+// GLIBC version of strerror_r
+static const char *strerror_result(const char *a, const char *b) { (void)b; return a; }
+#elif defined(HAVE_STRERROR_R)
+// POSIX version of strerror_r
+static const char *strerror_result(int a, const char *b) { (void)a; return b; }
+#elif defined(HAVE_C__GENERIC)
+
+// what a trick!
+// http://stackoverflow.com/questions/479207/function-overloading-in-c
+static const char *strerror_result_int(int a, const char *b) { (void)a; return b; }
+static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; }
+
+#define strerror_result(a, b) _Generic((a), \
+ int: strerror_result_int, \
+ char *: strerror_result_string \
+ )(a, b)
+
+#else
+#error "cannot detect the format of function strerror_r()"
+#endif
+
+void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ // save a copy of errno - just in case this function generates a new error
+ int __errno = errno;
+
+ va_list args;
+
+ // prevent logging too much
+ if(error_log_limit(0)) return;
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_ERR, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function);
+ else fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ if(__errno) {
+ char buf[1024];
+ fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
+ errno = 0;
+ }
+ else
+ fputc('\n', stderr);
+
+ log_unlock();
+}
+
+void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ // save a copy of errno - just in case this function generates a new error
+ int __errno = errno;
+ va_list args;
+ const char *thread_tag;
+ char os_threadname[NETDATA_THREAD_NAME_MAX + 1];
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_CRIT, fmt, args );
+ va_end( args );
+ }
+
+ thread_tag = netdata_thread_tag();
+ if (!netdata_thread_tag_exists()) {
+ os_thread_get_current_name_np(os_threadname);
+ if ('\0' != os_threadname[0]) { /* If it is not an empty string replace "MAIN" thread_tag */
+ thread_tag = os_threadname;
+ }
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, thread_tag, line, file, function);
+ else fprintf(stderr, "%s: %s FATAL : %s : ", date, program_name, thread_tag);
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ perror(" # ");
+ fputc('\n', stderr);
+
+ log_unlock();
+
+ char action_data[70+1];
+ snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, __errno);
+ char action_result[60+1];
+
+ snprintfz(action_result, 60, "%s:%s", program_name, strncmp(thread_tag, "STREAM_RECEIVER", strlen("STREAM_RECEIVER")) ? thread_tag : "[x]");
+ send_statistics("FATAL", action_result, action_data);
+
+ netdata_cleanup_and_exit(1);
+}
+
+// ----------------------------------------------------------------------------
+// access log
+
+void log_access( const char *fmt, ... ) {
+ va_list args;
+
+ if(access_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
+ if(stdaccess) {
+ static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER;
+
+ if(web_server_is_multithreaded)
+ netdata_mutex_lock(&access_mutex);
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stdaccess, "%s: ", date);
+
+ va_start( args, fmt );
+ vfprintf( stdaccess, fmt, args );
+ va_end( args );
+ fputc('\n', stdaccess);
+
+ if(web_server_is_multithreaded)
+ netdata_mutex_unlock(&access_mutex);
+ }
+}
diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h
new file mode 100644
index 0000000..c99c151
--- /dev/null
+++ b/libnetdata/log/log.h
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOG_H
+#define NETDATA_LOG_H 1
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+#include "../libnetdata.h"
+
+#define D_WEB_BUFFER 0x0000000000000001
+#define D_WEB_CLIENT 0x0000000000000002
+#define D_LISTENER 0x0000000000000004
+#define D_WEB_DATA 0x0000000000000008
+#define D_OPTIONS 0x0000000000000010
+#define D_PROCNETDEV_LOOP 0x0000000000000020
+#define D_RRD_STATS 0x0000000000000040
+#define D_WEB_CLIENT_ACCESS 0x0000000000000080
+#define D_TC_LOOP 0x0000000000000100
+#define D_DEFLATE 0x0000000000000200
+#define D_CONFIG 0x0000000000000400
+#define D_PLUGINSD 0x0000000000000800
+#define D_CHILDS 0x0000000000001000
+#define D_EXIT 0x0000000000002000
+#define D_CHECKS 0x0000000000004000
+#define D_NFACCT_LOOP 0x0000000000008000
+#define D_PROCFILE 0x0000000000010000
+#define D_RRD_CALLS 0x0000000000020000
+#define D_DICTIONARY 0x0000000000040000
+#define D_MEMORY 0x0000000000080000
+#define D_CGROUP 0x0000000000100000
+#define D_REGISTRY 0x0000000000200000
+#define D_VARIABLES 0x0000000000400000
+#define D_HEALTH 0x0000000000800000
+#define D_CONNECT_TO 0x0000000001000000
+#define D_RRDHOST 0x0000000002000000
+#define D_LOCKS 0x0000000004000000
+#define D_BACKEND 0x0000000008000000
+#define D_STATSD 0x0000000010000000
+#define D_POLLFD 0x0000000020000000
+#define D_STREAM 0x0000000040000000
+#define D_RRDENGINE 0x0000000100000000
+#define D_ACLK 0x0000000200000000
+#define D_METADATALOG 0x0000000400000000
+#define D_GUIDLOG 0x0000000800000000
+#define D_SYSTEM 0x8000000000000000
+
+//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
+//#define DEBUG 0xffffffff
+#define DEBUG (0)
+
+extern int web_server_is_multithreaded;
+
+extern uint64_t debug_flags;
+
+extern const char *program_name;
+
+extern int stdaccess_fd;
+extern FILE *stdaccess;
+
+extern const char *stdaccess_filename;
+extern const char *stderr_filename;
+extern const char *stdout_filename;
+extern const char *facility_log;
+
+extern int access_log_syslog;
+extern int error_log_syslog;
+extern int output_log_syslog;
+
+extern time_t error_log_throttle_period;
+extern unsigned long error_log_errors_per_period, error_log_errors_per_period_backup;
+extern int error_log_limit(int reset);
+
+extern void open_all_log_files();
+extern void reopen_all_log_files();
+
+static inline void debug_dummy(void) {}
+
+#define error_log_limit_reset() do { error_log_errors_per_period = error_log_errors_per_period_backup; error_log_limit(1); } while(0)
+#define error_log_limit_unlimited() do { \
+ error_log_limit_reset(); \
+ error_log_errors_per_period = ((error_log_errors_per_period_backup * 10) < 10000) ? 10000 : (error_log_errors_per_period_backup * 10); \
+ } while(0)
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#else
+#define debug(type, args...) debug_dummy()
+#endif
+
+#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args)
+#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
+#define fatal_assert(expr) ((expr) ? (void)(0) : fatal_int(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr))
+
+extern void send_statistics(const char *action, const char *action_result, const char *action_data);
+extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
+extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
+extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
+extern void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
+extern void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2);
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif /* NETDATA_LOG_H */
diff --git a/libnetdata/os.c b/libnetdata/os.c
new file mode 100644
index 0000000..4271a91
--- /dev/null
+++ b/libnetdata/os.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "os.h"
+
+// ----------------------------------------------------------------------------
+// system functions
+// to retrieve settings of the system
+
+int processors = 1;
+long get_system_cpus(void) {
+ processors = 1;
+
+#ifdef __APPLE__
+ int32_t tmp_processors;
+
+ if (unlikely(GETSYSCTL_BY_NAME("hw.logicalcpu", tmp_processors))) {
+ error("Assuming system has %d processors.", processors);
+ } else {
+ processors = tmp_processors;
+ }
+
+ return processors;
+#elif __FreeBSD__
+ int32_t tmp_processors;
+
+ if (unlikely(GETSYSCTL_BY_NAME("hw.ncpu", tmp_processors))) {
+ error("Assuming system has %d processors.", processors);
+ } else {
+ processors = tmp_processors;
+ }
+
+ return processors;
+#else
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
+
+ procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(!ff) {
+ error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors);
+ return processors;
+ }
+
+ ff = procfile_readall(ff);
+ if(!ff) {
+ error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors);
+ return processors;
+ }
+
+ processors = 0;
+ unsigned int i;
+ for(i = 0; i < procfile_lines(ff); i++) {
+ if(!procfile_linewords(ff, i)) continue;
+
+ if(strncmp(procfile_lineword(ff, i, 0), "cpu", 3) == 0) processors++;
+ }
+ processors--;
+ if(processors < 1) processors = 1;
+
+ procfile_close(ff);
+
+ debug(D_SYSTEM, "System has %d processors.", processors);
+ return processors;
+
+#endif /* __APPLE__, __FreeBSD__ */
+}
+
+pid_t pid_max = 32768;
+pid_t get_system_pid_max(void) {
+#ifdef __APPLE__
+ // As we currently do not know a solution to query pid_max from the os
+ // we use the number defined in bsd/sys/proc_internal.h in XNU sources
+ pid_max = 99999;
+ return pid_max;
+#elif __FreeBSD__
+ int32_t tmp_pid_max;
+
+ if (unlikely(GETSYSCTL_BY_NAME("kern.pid_max", tmp_pid_max))) {
+ pid_max = 99999;
+ error("Assuming system's maximum pid is %d.", pid_max);
+ } else {
+ pid_max = tmp_pid_max;
+ }
+
+ return pid_max;
+#else
+
+ static char read = 0;
+ if(unlikely(read)) return pid_max;
+ read = 1;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/pid_max", netdata_configured_host_prefix);
+
+ unsigned long long max = 0;
+ if(read_single_number_file(filename, &max) != 0) {
+ error("Cannot open file '%s'. Assuming system supports %d pids.", filename, pid_max);
+ return pid_max;
+ }
+
+ if(!max) {
+ error("Cannot parse file '%s'. Assuming system supports %d pids.", filename, pid_max);
+ return pid_max;
+ }
+
+ pid_max = (pid_t) max;
+ return pid_max;
+
+#endif /* __APPLE__, __FreeBSD__ */
+}
+
+unsigned int system_hz;
+void get_system_HZ(void) {
+ long ticks;
+
+ if ((ticks = sysconf(_SC_CLK_TCK)) == -1) {
+ error("Cannot get system clock ticks");
+ }
+
+ system_hz = (unsigned int) ticks;
+}
+
+// =====================================================================================================================
+// FreeBSD
+
+#if (TARGET_OS == OS_FREEBSD)
+
+int getsysctl_by_name(const char *name, void *ptr, size_t len) {
+ size_t nlen = len;
+
+ if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno));
+ return 1;
+ }
+ if (unlikely(nlen != len)) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen);
+ return 1;
+ }
+ return 0;
+}
+
+int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_t len) {
+ size_t nlen = len;
+
+ if (unlikely(!mib[0]))
+ if (unlikely(getsysctl_mib(name, mib, miblen)))
+ return 1;
+
+ if (unlikely(sysctl(mib, miblen, ptr, &nlen, NULL, 0) == -1)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno));
+ return 1;
+ }
+ if (unlikely(nlen != len)) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen);
+ return 1;
+ }
+
+ return 0;
+}
+
+int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len) {
+ size_t nlen = *len;
+
+ if (unlikely(!mib[0]))
+ if (unlikely(getsysctl_mib(name, mib, miblen)))
+ return 1;
+
+ if (unlikely(sysctl(mib, miblen, ptr, len, NULL, 0) == -1)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno));
+ return 1;
+ }
+ if (unlikely(ptr != NULL && nlen != *len)) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)*len, (unsigned long)nlen);
+ return 1;
+ }
+
+ return 0;
+}
+
+int getsysctl_mib(const char *name, int *mib, size_t len) {
+ size_t nlen = len;
+
+ if (unlikely(sysctlnametomib(name, mib, &nlen) == -1)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno));
+ return 1;
+ }
+ if (unlikely(nlen != len)) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen);
+ return 1;
+ }
+ return 0;
+}
+
+
+#endif
+
+
+// =====================================================================================================================
+// MacOS
+
+#if (TARGET_OS == OS_MACOS)
+
+int getsysctl_by_name(const char *name, void *ptr, size_t len) {
+ size_t nlen = len;
+
+ if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) {
+ error("MACOS: sysctl(%s...) failed: %s", name, strerror(errno));
+ return 1;
+ }
+ if (unlikely(nlen != len)) {
+ error("MACOS: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen);
+ return 1;
+ }
+ return 0;
+}
+
+#endif // (TARGET_OS == OS_MACOS)
diff --git a/libnetdata/os.h b/libnetdata/os.h
new file mode 100644
index 0000000..2494174
--- /dev/null
+++ b/libnetdata/os.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_OS_H
+#define NETDATA_OS_H
+
+#include "libnetdata.h"
+
+// =====================================================================================================================
+// Linux
+
+#if (TARGET_OS == OS_LINUX)
+
+
+// =====================================================================================================================
+// FreeBSD
+
+#elif (TARGET_OS == OS_FREEBSD)
+
+#include <sys/sysctl.h>
+
+#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
+extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
+
+#define GETSYSCTL_MIB(name, mib) getsysctl_mib(name, mib, sizeof(mib)/sizeof(int))
+
+extern int getsysctl_mib(const char *name, int *mib, size_t len);
+
+#define GETSYSCTL_SIMPLE(name, mib, var) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), &(var), sizeof(var))
+#define GETSYSCTL_WSIZE(name, mib, var, size) getsysctl_simple(name, mib, sizeof(mib)/sizeof(int), var, size)
+
+extern int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_t len);
+
+#define GETSYSCTL_SIZE(name, mib, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), NULL, &(size))
+#define GETSYSCTL(name, mib, var, size) getsysctl(name, mib, sizeof(mib)/sizeof(int), &(var), &(size))
+
+extern int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len);
+
+
+// =====================================================================================================================
+// MacOS
+
+#elif (TARGET_OS == OS_MACOS)
+
+#include <sys/sysctl.h>
+
+#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
+extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
+
+
+// =====================================================================================================================
+// unknown O/S
+
+#else
+#error unsupported operating system
+#endif
+
+
+// =====================================================================================================================
+// common for all O/S
+
+extern int processors;
+extern long get_system_cpus(void);
+
+extern pid_t pid_max;
+extern pid_t get_system_pid_max(void);
+
+extern unsigned int system_hz;
+extern void get_system_HZ(void);
+
+#endif //NETDATA_OS_H
diff --git a/libnetdata/popen/Makefile.am b/libnetdata/popen/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/popen/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md
new file mode 100644
index 0000000..e51e205
--- /dev/null
+++ b/libnetdata/popen/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/popen/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fpopen%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c
new file mode 100644
index 0000000..c0135cf
--- /dev/null
+++ b/libnetdata/popen/popen.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+static pthread_mutex_t myp_lock;
+static int myp_tracking = 0;
+
+struct mypopen {
+ pid_t pid;
+ struct mypopen *next;
+ struct mypopen *prev;
+};
+
+static struct mypopen *mypopen_root = NULL;
+
+// myp_add_lock takes the lock if we're tracking.
+static void myp_add_lock(void) {
+ if (myp_tracking == 0)
+ return;
+
+ netdata_mutex_lock(&myp_lock);
+}
+
+// myp_add_unlock release the lock if we're tracking.
+static void myp_add_unlock(void) {
+ if (myp_tracking == 0)
+ return;
+
+ netdata_mutex_unlock(&myp_lock);
+}
+
+// myp_add_locked adds pid if we're tracking.
+// myp_add_lock must have been called previously.
+static void myp_add_locked(pid_t pid) {
+ struct mypopen *mp;
+
+ if (myp_tracking == 0)
+ return;
+
+ mp = mallocz(sizeof(struct mypopen));
+ mp->pid = pid;
+
+ mp->next = mypopen_root;
+ mp->prev = NULL;
+ if (mypopen_root != NULL)
+ mypopen_root->prev = mp;
+ mypopen_root = mp;
+ netdata_mutex_unlock(&myp_lock);
+}
+
+// myp_del deletes pid if we're tracking.
+static void myp_del(pid_t pid) {
+ struct mypopen *mp;
+
+ if (myp_tracking == 0)
+ return;
+
+ netdata_mutex_lock(&myp_lock);
+ for (mp = mypopen_root; mp != NULL; mp = mp->next) {
+ if (mp->pid == pid) {
+ if (mp->next != NULL)
+ mp->next->prev = mp->prev;
+ if (mp->prev != NULL)
+ mp->prev->next = mp->next;
+ if (mypopen_root == mp)
+ mypopen_root = mp->next;
+ freez(mp);
+ break;
+ }
+ }
+
+ if (mp == NULL)
+ error("Cannot find pid %d.", pid);
+
+ netdata_mutex_unlock(&myp_lock);
+}
+
+#define PIPE_READ 0
+#define PIPE_WRITE 1
+
+/* custom_popene flag definitions */
+#define FLAG_CREATE_PIPE 1 // Create a pipe like popen() when set, otherwise set stdout to /dev/null
+#define FLAG_CLOSE_FD 2 // Close all file descriptors other than STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO
+
+/*
+ * Returns -1 on failure, 0 on success. When FLAG_CREATE_PIPE is set, on success set the FILE *fp pointer.
+ */
+static inline int custom_popene(const char *command, volatile pid_t *pidptr, char **env, uint8_t flags, FILE **fpp) {
+ FILE *fp = NULL;
+ int ret = 0; // success by default
+ int pipefd[2], error;
+ pid_t pid;
+ char *const spawn_argv[] = {
+ "sh",
+ "-c",
+ (char *)command,
+ NULL
+ };
+ posix_spawnattr_t attr;
+ posix_spawn_file_actions_t fa;
+
+ if (flags & FLAG_CREATE_PIPE) {
+ if (pipe(pipefd) == -1)
+ return -1;
+ if ((fp = fdopen(pipefd[PIPE_READ], "r")) == NULL) {
+ goto error_after_pipe;
+ }
+ }
+
+ if (flags & FLAG_CLOSE_FD) {
+ // Mark all files to be closed by the exec() stage of posix_spawn()
+ int i;
+ for (i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) {
+ if (i != STDIN_FILENO && i != STDERR_FILENO)
+ (void) fcntl(i, F_SETFD, FD_CLOEXEC);
+ }
+ }
+
+ if (!posix_spawn_file_actions_init(&fa)) {
+ if (flags & FLAG_CREATE_PIPE) {
+ // move the pipe to stdout in the child
+ if (posix_spawn_file_actions_adddup2(&fa, pipefd[PIPE_WRITE], STDOUT_FILENO)) {
+ error("posix_spawn_file_actions_adddup2() failed");
+ goto error_after_posix_spawn_file_actions_init;
+ }
+ } else {
+ // set stdout to /dev/null
+ if (posix_spawn_file_actions_addopen(&fa, STDOUT_FILENO, "/dev/null", O_WRONLY, 0)) {
+ error("posix_spawn_file_actions_addopen() failed");
+ // this is not a fatal error
+ }
+ }
+ } else {
+ error("posix_spawn_file_actions_init() failed.");
+ goto error_after_pipe;
+ }
+ if (!(error = posix_spawnattr_init(&attr))) {
+ // reset all signals in the child
+ sigset_t mask;
+
+ if (posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF))
+ error("posix_spawnattr_setflags() failed.");
+ sigemptyset(&mask);
+ if (posix_spawnattr_setsigmask(&attr, &mask))
+ error("posix_spawnattr_setsigmask() failed.");
+ } else {
+ error("posix_spawnattr_init() failed.");
+ }
+
+ // Take the lock while we fork to ensure we don't race with SIGCHLD
+ // delivery on a process which exits quickly.
+ myp_add_lock();
+ if (!posix_spawn(&pid, "/bin/sh", &fa, &attr, spawn_argv, env)) {
+ *pidptr = pid;
+ myp_add_locked(pid);
+ debug(D_CHILDS, "Spawned command: '%s' on pid %d from parent pid %d.", command, pid, getpid());
+ } else {
+ myp_add_unlock();
+ error("Failed to spawn command: '%s' from parent pid %d.", command, getpid());
+ if (flags & FLAG_CREATE_PIPE) {
+ fclose(fp);
+ }
+ ret = -1;
+ }
+ if (flags & FLAG_CREATE_PIPE) {
+ close(pipefd[PIPE_WRITE]);
+ if (0 == ret) // on success set FILE * pointer
+ *fpp = fp;
+ }
+
+ if (!error) {
+ // posix_spawnattr_init() succeeded
+ if (posix_spawnattr_destroy(&attr))
+ error("posix_spawnattr_destroy");
+ }
+ if (posix_spawn_file_actions_destroy(&fa))
+ error("posix_spawn_file_actions_destroy");
+
+ return ret;
+
+error_after_posix_spawn_file_actions_init:
+ if (posix_spawn_file_actions_destroy(&fa))
+ error("posix_spawn_file_actions_destroy");
+error_after_pipe:
+ if (flags & FLAG_CREATE_PIPE) {
+ if (fp)
+ fclose(fp);
+ else
+ close(pipefd[PIPE_READ]);
+
+ close(pipefd[PIPE_WRITE]);
+ }
+ return -1;
+}
+
+// See man environ
+extern char **environ;
+
+// myp_init should be called by apps which act as init
+// (pid 1) so that processes created by mypopen and mypopene
+// are tracked. This enables the reaper to ignore processes
+// which will be handled internally, by calling myp_reap, to
+// avoid issues with already reaped processes during wait calls.
+//
+// Callers should call myp_free() to clean up resources.
+void myp_init(void) {
+ info("process tracking enabled.");
+ myp_tracking = 1;
+
+ if (netdata_mutex_init(&myp_lock) != 0) {
+ fatal("myp_init() mutex init failed.");
+ }
+}
+
+// myp_free cleans up any resources allocated for process
+// tracking.
+void myp_free(void) {
+ struct mypopen *mp, *next;
+
+ if (myp_tracking == 0)
+ return;
+
+ netdata_mutex_lock(&myp_lock);
+ for (mp = mypopen_root; mp != NULL; mp = next) {
+ next = mp->next;
+ freez(mp);
+ }
+
+ mypopen_root = NULL;
+ myp_tracking = 0;
+ netdata_mutex_unlock(&myp_lock);
+}
+
+// myp_reap returns 1 if pid should be reaped, 0 otherwise.
+int myp_reap(pid_t pid) {
+ struct mypopen *mp;
+
+ if (myp_tracking == 0)
+ return 0;
+
+ netdata_mutex_lock(&myp_lock);
+ for (mp = mypopen_root; mp != NULL; mp = mp->next) {
+ if (mp->pid == pid) {
+ netdata_mutex_unlock(&myp_lock);
+ return 0;
+ }
+ }
+ netdata_mutex_unlock(&myp_lock);
+
+ return 1;
+}
+
+FILE *mypopen(const char *command, volatile pid_t *pidptr) {
+ FILE *fp = NULL;
+ (void)custom_popene(command, pidptr, environ, FLAG_CREATE_PIPE | FLAG_CLOSE_FD, &fp);
+ return fp;
+}
+
+FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) {
+ FILE *fp = NULL;
+ (void)custom_popene(command, pidptr, env, FLAG_CREATE_PIPE | FLAG_CLOSE_FD, &fp);
+ return fp;
+}
+
+// returns 0 on success, -1 on failure
+int netdata_spawn(const char *command, volatile pid_t *pidptr) {
+ return custom_popene(command, pidptr, environ, 0, NULL);
+}
+
+int custom_pclose(FILE *fp, pid_t pid) {
+ int ret;
+ siginfo_t info;
+
+ debug(D_EXIT, "Request to mypclose() on pid %d", pid);
+
+ if (fp) {
+ // close the pipe fd
+ // this is required in musl
+ // without it the childs do not exit
+ close(fileno(fp));
+
+ // close the pipe file pointer
+ fclose(fp);
+ }
+
+ errno = 0;
+
+ ret = waitid(P_PID, (id_t) pid, &info, WEXITED);
+ myp_del(pid);
+
+ if (ret != -1) {
+ switch (info.si_code) {
+ case CLD_EXITED:
+ if(info.si_status)
+ error("child pid %d exited with code %d.", info.si_pid, info.si_status);
+ return(info.si_status);
+
+ case CLD_KILLED:
+ error("child pid %d killed by signal %d.", info.si_pid, info.si_status);
+ return(-1);
+
+ case CLD_DUMPED:
+ error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status);
+ return(-2);
+
+ case CLD_STOPPED:
+ error("child pid %d stopped by signal %d.", info.si_pid, info.si_status);
+ return(0);
+
+ case CLD_TRAPPED:
+ error("child pid %d trapped by signal %d.", info.si_pid, info.si_status);
+ return(-4);
+
+ case CLD_CONTINUED:
+ error("child pid %d continued by signal %d.", info.si_pid, info.si_status);
+ return(0);
+
+ default:
+ error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status);
+ return(-5);
+ }
+ }
+ else
+ error("Cannot waitid() for pid %d", pid);
+
+ return 0;
+}
+
+int mypclose(FILE *fp, pid_t pid)
+{
+ return custom_pclose(fp, pid);
+}
+
+int netdata_spawn_waitpid(pid_t pid)
+{
+ return custom_pclose(NULL, pid);
+} \ No newline at end of file
diff --git a/libnetdata/popen/popen.h b/libnetdata/popen/popen.h
new file mode 100644
index 0000000..f387cff
--- /dev/null
+++ b/libnetdata/popen/popen.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_POPEN_H
+#define NETDATA_POPEN_H 1
+
+#include "../libnetdata.h"
+
+#define PIPE_READ 0
+#define PIPE_WRITE 1
+
+extern FILE *mypopen(const char *command, volatile pid_t *pidptr);
+extern FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env);
+extern int mypclose(FILE *fp, pid_t pid);
+extern int netdata_spawn(const char *command, volatile pid_t *pidptr);
+extern int netdata_spawn_waitpid(pid_t pid);
+extern void myp_init(void);
+extern void myp_free(void);
+extern int myp_reap(pid_t pid);
+
+extern void signals_unblock(void);
+extern void signals_reset(void);
+
+#endif /* NETDATA_POPEN_H */
diff --git a/libnetdata/procfile/Makefile.am b/libnetdata/procfile/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/procfile/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md
new file mode 100644
index 0000000..3eeb1e2
--- /dev/null
+++ b/libnetdata/procfile/README.md
@@ -0,0 +1,67 @@
+<!--
+title: "PROCFILE"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/procfile/README.md
+-->
+
+# PROCFILE
+
+procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way.
+
+## How it works
+
+The library automatically adapts (through the iterations) its memory so that each file
+is read with single `read()` call.
+
+Then the library splits the file into words, using the supplied separators.
+The library also supported quoted words (i.e. strings within of which the separators are ignored).
+
+### Initialization
+
+Initially the caller:
+
+- calls `procfile_open()` to open the file and allocate the structures needed.
+
+### Iterations
+
+For each iteration, the caller:
+
+- calls `procfile_readall()` to read updated contents.
+ This call also rewinds (`lseek()` to 0) before reading it.
+
+ For every file, a [BUFFER](/libnetdata/buffer/README.md) is used that is automatically adjusted to fit the entire
+ file contents of the file. So the file is read with a single `read()` call (providing atomicity / consistency when
+ the data are read from the kernel).
+
+ Once the data are read, 2 arrays of pointers are updated:
+
+ - a `words` array, pointing to each word in the data read
+ - a `lines` array, pointing to the first word for each line
+
+ This is highly optimized. Both arrays are automatically adjusted to
+ fit all contents and are updated in a single pass on the data.
+
+ The library provides a number of macros:
+
+ - `procfile_lines()` returns the # of lines read
+ - `procfile_linewords()` returns the # of words in the given line
+ - `procfile_word()` returns a pointer the given word #
+ - `procfile_line()` returns a pointer to the first word of the given line #
+ - `procfile_lineword()` returns a pointer to the given word # of the given line #
+
+### Cleanup
+
+When the caller exits:
+
+- calls `procfile_free()` to close the file and free all memory used.
+
+### Performance
+
+- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second.
+- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core.
+
+To achieve this kind of performance, the library tries to work in batches so that the code
+and the data are inside the processor's caches.
+
+This library is extensively used in Netdata and its plugins.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fprocfile%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c
new file mode 100644
index 0000000..4a812ba
--- /dev/null
+++ b/libnetdata/procfile/procfile.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#define PF_PREFIX "PROCFILE"
+
+#define PFWORDS_INCREASE_STEP 200
+#define PFLINES_INCREASE_STEP 10
+#define PROCFILE_INCREMENT_BUFFER 512
+
+int procfile_open_flags = O_RDONLY;
+
+int procfile_adaptive_initial_allocation = 0;
+
+// if adaptive allocation is set, these store the
+// max values we have seen so far
+size_t procfile_max_lines = PFLINES_INCREASE_STEP;
+size_t procfile_max_words = PFWORDS_INCREASE_STEP;
+size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER;
+
+
+// ----------------------------------------------------------------------------
+
+char *procfile_filename(procfile *ff) {
+ if(ff->filename[0]) return ff->filename;
+
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "/proc/self/fd/%d", ff->fd);
+
+ ssize_t l = readlink(buffer, ff->filename, FILENAME_MAX);
+ if(unlikely(l == -1))
+ snprintfz(ff->filename, FILENAME_MAX, "unknown filename for fd %d", ff->fd);
+ else
+ ff->filename[l] = '\0';
+
+ // on non-linux systems, something like this will be needed
+ // fcntl(ff->fd, F_GETPATH, ff->filename)
+
+ return ff->filename;
+}
+
+// ----------------------------------------------------------------------------
+// An array of words
+
+static inline void pfwords_add(procfile *ff, char *str) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
+
+ pfwords *fw = ff->words;
+ if(unlikely(fw->len == fw->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding words");
+
+ ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
+ fw->size += PFWORDS_INCREASE_STEP;
+ }
+
+ fw->words[fw->len++] = str;
+}
+
+NEVERNULL
+static inline pfwords *pfwords_new(void) {
+ // debug(D_PROCFILE, PF_PREFIX ": initializing words");
+
+ size_t size = (procfile_adaptive_initial_allocation) ? procfile_max_words : PFWORDS_INCREASE_STEP;
+
+ pfwords *new = mallocz(sizeof(pfwords) + size * sizeof(char *));
+ new->len = 0;
+ new->size = size;
+ return new;
+}
+
+static inline void pfwords_reset(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting words");
+ fw->len = 0;
+}
+
+static inline void pfwords_free(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing words");
+
+ freez(fw);
+}
+
+
+// ----------------------------------------------------------------------------
+// An array of lines
+
+NEVERNULL
+static inline size_t *pflines_add(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
+
+ pflines *fl = ff->lines;
+ if(unlikely(fl->len == fl->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
+
+ ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
+ fl->size += PFLINES_INCREASE_STEP;
+ }
+
+ ffline *ffl = &fl->lines[fl->len++];
+ ffl->words = 0;
+ ffl->first = ff->words->len;
+
+ return &ffl->words;
+}
+
+NEVERNULL
+static inline pflines *pflines_new(void) {
+ // debug(D_PROCFILE, PF_PREFIX ": initializing lines");
+
+ size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_words : PFLINES_INCREASE_STEP;
+
+ pflines *new = mallocz(sizeof(pflines) + size * sizeof(ffline));
+ new->len = 0;
+ new->size = size;
+ return new;
+}
+
+static inline void pflines_reset(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
+
+ fl->len = 0;
+}
+
+static inline void pflines_free(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
+
+ freez(fl);
+}
+
+
+// ----------------------------------------------------------------------------
+// The procfile
+
+void procfile_close(procfile *ff) {
+ if(unlikely(!ff)) return;
+
+ debug(D_PROCFILE, PF_PREFIX ": Closing file '%s'", procfile_filename(ff));
+
+ if(likely(ff->lines)) pflines_free(ff->lines);
+ if(likely(ff->words)) pfwords_free(ff->words);
+
+ if(likely(ff->fd != -1)) close(ff->fd);
+ freez(ff);
+}
+
+NOINLINE
+static void procfile_parser(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
+
+ char *s = ff->data // our current position
+ , *e = &ff->data[ff->len] // the terminating null
+ , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
+
+ // the look up array to find our type of character
+ PF_CHAR_TYPE *separators = ff->separators;
+
+ char quote = 0; // the quote character - only when in quoted string
+ size_t opened = 0; // counts the number of open parenthesis
+
+ size_t *line_words = pflines_add(ff);
+
+ while(s < e) {
+ PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
+
+ // this is faster than a switch()
+ // read more here: http://lazarenko.me/switch/
+ if(likely(ct == PF_CHAR_IS_WORD)) {
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_SEPARATOR)) {
+ if(!quote && !opened) {
+ if (s != t) {
+ // separator, but we have word before it
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else {
+ // separator at the beginning
+ // skip it
+ t = ++s;
+ }
+ }
+ else {
+ // we are inside a quote or parenthesized string
+ s++;
+ }
+ }
+ else if(likely(ct == PF_CHAR_IS_NEWLINE)) {
+ // end of line
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+
+ // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
+
+ line_words = pflines_add(ff);
+ }
+ else if(likely(ct == PF_CHAR_IS_QUOTE)) {
+ if(unlikely(!quote && s == t)) {
+ // quote opened at the beginning
+ quote = *s;
+ t = ++s;
+ }
+ else if(unlikely(quote && quote == *s)) {
+ // quote closed
+ quote = 0;
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_OPEN)) {
+ if(s == t) {
+ opened++;
+ t = ++s;
+ }
+ else if(opened) {
+ opened++;
+ s++;
+ }
+ else
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_CLOSE)) {
+ if(opened) {
+ opened--;
+
+ if(!opened) {
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else
+ s++;
+ }
+ else
+ fatal("Internal Error: procfile_readall() does not handle all the cases.");
+ }
+
+ if(likely(s > t && t < e)) {
+ // the last word
+ if(unlikely(ff->len >= ff->size)) {
+ // we are going to loose the last byte
+ s = &ff->data[ff->size - 1];
+ }
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ // t = ++s;
+ }
+}
+
+procfile *procfile_readall(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
+
+ ff->len = 0; // zero the used size
+ ssize_t r = 1; // read at least once
+ while(r > 0) {
+ ssize_t s = ff->len;
+ ssize_t x = ff->size - s;
+
+ if(unlikely(!x)) {
+ debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
+ ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
+ ff->size += PROCFILE_INCREMENT_BUFFER;
+ }
+
+ debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
+ r = read(ff->fd, &ff->data[s], ff->size - s);
+ if(unlikely(r == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ procfile_close(ff);
+ return NULL;
+ }
+
+ ff->len += r;
+ }
+
+ // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
+ if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ procfile_close(ff);
+ return NULL;
+ }
+
+ pflines_reset(ff->lines);
+ pfwords_reset(ff->words);
+ procfile_parser(ff);
+
+ if(unlikely(procfile_adaptive_initial_allocation)) {
+ if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
+ if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
+ if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
+ }
+
+ // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
+ return ff;
+}
+
+NOINLINE
+static void procfile_set_separators(procfile *ff, const char *separators) {
+ static PF_CHAR_TYPE def[256];
+ static char initilized = 0;
+
+ if(unlikely(!initilized)) {
+ // this is thread safe
+ // if initialized is zero, multiple threads may be executing
+ // this code at the same time, setting in def[] the exact same values
+ int i = 256;
+ while(i--) {
+ if(unlikely(i == '\n' || i == '\r'))
+ def[i] = PF_CHAR_IS_NEWLINE;
+
+ else if(unlikely(isspace(i) || !isprint(i)))
+ def[i] = PF_CHAR_IS_SEPARATOR;
+
+ else
+ def[i] = PF_CHAR_IS_WORD;
+ }
+
+ initilized = 1;
+ }
+
+ // copy the default
+ PF_CHAR_TYPE *ffs = ff->separators, *ffd = def, *ffe = &def[256];
+ while(ffd != ffe)
+ *ffs++ = *ffd++;
+
+ // set the separators
+ if(unlikely(!separators))
+ separators = " \t=|";
+
+ ffs = ff->separators;
+ const char *s = separators;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_SEPARATOR;
+}
+
+void procfile_set_quotes(procfile *ff, const char *quotes) {
+ PF_CHAR_TYPE *ffs = ff->separators;
+
+ // remove all quotes
+ int i = 256;
+ while(i--)
+ if(unlikely(ffs[i] == PF_CHAR_IS_QUOTE))
+ ffs[i] = PF_CHAR_IS_WORD;
+
+ // if nothing given, return
+ if(unlikely(!quotes || !*quotes))
+ return;
+
+ // set the quotes
+ const char *s = quotes;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_QUOTE;
+}
+
+void procfile_set_open_close(procfile *ff, const char *open, const char *close) {
+ PF_CHAR_TYPE *ffs = ff->separators;
+
+ // remove all open/close
+ int i = 256;
+ while(i--)
+ if(unlikely(ffs[i] == PF_CHAR_IS_OPEN || ffs[i] == PF_CHAR_IS_CLOSE))
+ ffs[i] = PF_CHAR_IS_WORD;
+
+ // if nothing given, return
+ if(unlikely(!open || !*open || !close || !*close))
+ return;
+
+ // set the openings
+ const char *s = open;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_OPEN;
+
+ // set the closings
+ s = close;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_CLOSE;
+}
+
+procfile *procfile_open(const char *filename, const char *separators, uint32_t flags) {
+ debug(D_PROCFILE, PF_PREFIX ": Opening file '%s'", filename);
+
+ int fd = open(filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1)) {
+ if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ // info("PROCFILE: opened '%s' on fd %d", filename, fd);
+
+ size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_allocation : PROCFILE_INCREMENT_BUFFER;
+ procfile *ff = mallocz(sizeof(procfile) + size);
+
+ //strncpyz(ff->filename, filename, FILENAME_MAX);
+ ff->filename[0] = '\0';
+
+ ff->fd = fd;
+ ff->size = size;
+ ff->len = 0;
+ ff->flags = flags;
+
+ ff->lines = pflines_new();
+ ff->words = pfwords_new();
+
+ procfile_set_separators(ff, separators);
+
+ debug(D_PROCFILE, "File '%s' opened.", filename);
+ return ff;
+}
+
+procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags) {
+ if(unlikely(!ff)) return procfile_open(filename, separators, flags);
+
+ if(likely(ff->fd != -1)) {
+ // info("PROCFILE: closing fd %d", ff->fd);
+ close(ff->fd);
+ }
+
+ ff->fd = open(filename, procfile_open_flags, 0666);
+ if(unlikely(ff->fd == -1)) {
+ procfile_close(ff);
+ return NULL;
+ }
+
+ // info("PROCFILE: opened '%s' on fd %d", filename, ff->fd);
+
+ //strncpyz(ff->filename, filename, FILENAME_MAX);
+ ff->filename[0] = '\0';
+ ff->flags = flags;
+
+ // do not do the separators again if NULL is given
+ if(likely(separators)) procfile_set_separators(ff, separators);
+
+ return ff;
+}
+
+// ----------------------------------------------------------------------------
+// example parsing of procfile data
+
+void procfile_print(procfile *ff) {
+ size_t lines = procfile_lines(ff), l;
+ char *s;
+ (void)s;
+
+ debug(D_PROCFILE, "File '%s' with %zu lines and %zu words", procfile_filename(ff), ff->lines->len, ff->words->len);
+
+ for(l = 0; likely(l < lines) ;l++) {
+ size_t words = procfile_linewords(ff, l);
+
+ debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words);
+
+ size_t w;
+ for(w = 0; likely(w < words) ;w++) {
+ s = procfile_lineword(ff, l, w);
+ debug(D_PROCFILE, " [%zu.%zu] '%s'", l, w, s);
+ }
+ }
+}
diff --git a/libnetdata/procfile/procfile.h b/libnetdata/procfile/procfile.h
new file mode 100644
index 0000000..b107358
--- /dev/null
+++ b/libnetdata/procfile/procfile.h
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROCFILE_H
+#define NETDATA_PROCFILE_H 1
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// An array of words
+
+typedef struct {
+ size_t len; // used entries
+ size_t size; // capacity
+ char *words[]; // array of pointers
+} pfwords;
+
+
+// ----------------------------------------------------------------------------
+// An array of lines
+
+typedef struct {
+ size_t words; // how many words this line has
+ size_t first; // the id of the first word of this line
+ // in the words array
+} ffline;
+
+typedef struct {
+ size_t len; // used entries
+ size_t size; // capacity
+ ffline lines[]; // array of lines
+} pflines;
+
+
+// ----------------------------------------------------------------------------
+// The procfile
+
+#define PROCFILE_FLAG_DEFAULT 0x00000000
+#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001
+
+typedef enum procfile_separator {
+ PF_CHAR_IS_SEPARATOR,
+ PF_CHAR_IS_NEWLINE,
+ PF_CHAR_IS_WORD,
+ PF_CHAR_IS_QUOTE,
+ PF_CHAR_IS_OPEN,
+ PF_CHAR_IS_CLOSE
+} PF_CHAR_TYPE;
+
+typedef struct {
+ char filename[FILENAME_MAX + 1]; // not populated until profile_filename() is called
+
+ uint32_t flags;
+ int fd; // the file desriptor
+ size_t len; // the bytes we have placed into data
+ size_t size; // the bytes we have allocated for data
+ pflines *lines;
+ pfwords *words;
+ PF_CHAR_TYPE separators[256];
+ char data[]; // allocated buffer to keep file contents
+} procfile;
+
+// close the proc file and free all related memory
+extern void procfile_close(procfile *ff);
+
+// (re)read and parse the proc file
+extern procfile *procfile_readall(procfile *ff);
+
+// open a /proc or /sys file
+extern procfile *procfile_open(const char *filename, const char *separators, uint32_t flags);
+
+// re-open a file
+// if separators == NULL, the last separators are used
+extern procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags);
+
+// example walk-through a procfile parsed file
+extern void procfile_print(procfile *ff);
+
+extern void procfile_set_quotes(procfile *ff, const char *quotes);
+extern void procfile_set_open_close(procfile *ff, const char *open, const char *close);
+
+extern char *procfile_filename(procfile *ff);
+
+// ----------------------------------------------------------------------------
+
+// set to the O_XXXX flags, to have procfile_open and procfile_reopen use them when opening proc files
+extern int procfile_open_flags;
+
+// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far
+extern int procfile_adaptive_initial_allocation;
+
+// return the number of lines present
+#define procfile_lines(ff) ((ff)->lines->len)
+
+// return the number of words of the Nth line
+#define procfile_linewords(ff, line) (((line) < procfile_lines(ff)) ? (ff)->lines->lines[(line)].words : 0)
+
+// return the Nth word of the file, or empty string
+#define procfile_word(ff, word) (((word) < (ff)->words->len) ? (ff)->words->words[(word)] : "")
+
+// return the first word of the Nth line, or empty string
+#define procfile_line(ff, line) (((line) < procfile_lines(ff)) ? procfile_word((ff), (ff)->lines->lines[(line)].first) : "")
+
+// return the Nth word of the current line
+#define procfile_lineword(ff, line, word) (((line) < procfile_lines(ff) && (word) < procfile_linewords((ff), (line))) ? procfile_word((ff), (ff)->lines->lines[(line)].first + (word)) : "")
+
+#endif /* NETDATA_PROCFILE_H */
diff --git a/libnetdata/required_dummies.h b/libnetdata/required_dummies.h
new file mode 100644
index 0000000..aa87e39
--- /dev/null
+++ b/libnetdata/required_dummies.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LIB_DUMMIES_H
+#define NETDATA_LIB_DUMMIES_H 1
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret)
+{
+ exit(ret);
+}
+
+void send_statistics(const char *action, const char *action_result, const char *action_data)
+{
+ (void)action;
+ (void)action_result;
+ (void)action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void){};
+void signals_unblock(void){};
+void signals_reset(void){};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result)
+{
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+#endif // NETDATA_LIB_DUMMIES_H
diff --git a/libnetdata/simple_pattern/Makefile.am b/libnetdata/simple_pattern/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/simple_pattern/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md
new file mode 100644
index 0000000..a90759c
--- /dev/null
+++ b/libnetdata/simple_pattern/README.md
@@ -0,0 +1,43 @@
+<!--
+title: "Netdata simple patterns"
+description: "Netdata supports simple patterns, which are less cryptic versions of regular expressions. Use familiar notation for powerful results."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/simple_pattern/README.md
+-->
+
+# Netdata simple patterns
+
+Unix prefers regular expressions. But they are just too hard, too cryptic
+to use, write and understand.
+
+So, Netdata supports **simple patterns**.
+
+Simple patterns are a space separated list of words, that can have `*`
+as a wildcard. Each world may use any number of `*`. Simple patterns
+allow **negative** matches by prefixing a word with `!`.
+
+So, `pattern = !*bad* *` will match anything, except all those that
+contain the word `bad`.
+
+Simple patterns are quite powerful: `pattern = *foobar* !foo* !*bar *`
+matches everything containing `foobar`, except strings that start
+with `foo` or end with `bar`.
+
+You can use the Netdata command line to check simple patterns,
+like this:
+
+```sh
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world'
+RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world'
+
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world bar'
+RESULT: NOT MATCHED - pattern '*foobar* !foo* !*bar *' does not match 'hello world bar'
+
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world foobar'
+RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world foobar'
+```
+
+Netdata stops processing to the first positive or negative match
+(left to right). If it is not matched by either positive or negative
+patterns, it is denied at the end.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsimple_pattern%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/simple_pattern/simple_pattern.c b/libnetdata/simple_pattern/simple_pattern.c
new file mode 100644
index 0000000..70b06a2
--- /dev/null
+++ b/libnetdata/simple_pattern/simple_pattern.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+struct simple_pattern {
+ const char *match;
+ size_t len;
+
+ SIMPLE_PREFIX_MODE mode;
+ char negative;
+
+ struct simple_pattern *child;
+
+ struct simple_pattern *next;
+};
+
+static inline struct simple_pattern *parse_pattern(char *str, SIMPLE_PREFIX_MODE default_mode) {
+ // fprintf(stderr, "PARSING PATTERN: '%s'\n", str);
+
+ SIMPLE_PREFIX_MODE mode;
+ struct simple_pattern *child = NULL;
+
+ char *s = str, *c = str;
+
+ // skip asterisks in front
+ while(*c == '*') c++;
+
+ // find the next asterisk
+ while(*c && *c != '*') c++;
+
+ // do we have an asterisk in the middle?
+ if(*c == '*' && c[1] != '\0') {
+ // yes, we have
+ child = parse_pattern(c, default_mode);
+ c[1] = '\0';
+ }
+
+ // check what this one matches
+
+ size_t len = strlen(s);
+ if(len >= 2 && *s == '*' && s[len - 1] == '*') {
+ s[len - 1] = '\0';
+ s++;
+ mode = SIMPLE_PATTERN_SUBSTRING;
+ }
+ else if(len >= 1 && *s == '*') {
+ s++;
+ mode = SIMPLE_PATTERN_SUFFIX;
+ }
+ else if(len >= 1 && s[len - 1] == '*') {
+ s[len - 1] = '\0';
+ mode = SIMPLE_PATTERN_PREFIX;
+ }
+ else
+ mode = default_mode;
+
+ // allocate the structure
+ struct simple_pattern *m = callocz(1, sizeof(struct simple_pattern));
+ if(*s) {
+ m->match = strdupz(s);
+ m->len = strlen(m->match);
+ m->mode = mode;
+ }
+ else {
+ m->mode = SIMPLE_PATTERN_SUBSTRING;
+ }
+
+ m->child = child;
+
+ return m;
+}
+
+SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode) {
+ struct simple_pattern *root = NULL, *last = NULL;
+
+ if(unlikely(!list || !*list)) return root;
+
+ int isseparator[256] = {
+ [' '] = 1 // space
+ , ['\t'] = 1 // tab
+ , ['\r'] = 1 // carriage return
+ , ['\n'] = 1 // new line
+ , ['\f'] = 1 // form feed
+ , ['\v'] = 1 // vertical tab
+ };
+
+ if (unlikely(separators && *separators)) {
+ memset(&isseparator[0], 0, sizeof(isseparator));
+ while(*separators) isseparator[(unsigned char)*separators++] = 1;
+ }
+
+ char *buf = mallocz(strlen(list) + 1);
+ const char *s = list;
+
+ while(s && *s) {
+ buf[0] = '\0';
+ char *c = buf;
+
+ char negative = 0;
+
+ // skip all spaces
+ while(isseparator[(unsigned char)*s])
+ s++;
+
+ if(*s == '!') {
+ negative = 1;
+ s++;
+ }
+
+ // empty string
+ if(unlikely(!*s))
+ break;
+
+ // find the next space
+ char escape = 0;
+ while(*s) {
+ if(*s == '\\' && !escape) {
+ escape = 1;
+ s++;
+ }
+ else {
+ if (isseparator[(unsigned char)*s] && !escape) {
+ s++;
+ break;
+ }
+
+ *c++ = *s++;
+ escape = 0;
+ }
+ }
+
+ // terminate our string
+ *c = '\0';
+
+ // if we matched the empty string, skip it
+ if(unlikely(!*buf))
+ continue;
+
+ // fprintf(stderr, "FOUND PATTERN: '%s'\n", buf);
+ struct simple_pattern *m = parse_pattern(buf, default_mode);
+ m->negative = negative;
+
+ // link it at the end
+ if(unlikely(!root))
+ root = last = m;
+ else {
+ last->next = m;
+ last = m;
+ }
+ }
+
+ freez(buf);
+ return (SIMPLE_PATTERN *)root;
+}
+
+static inline char *add_wildcarded(const char *matched, size_t matched_size, char *wildcarded, size_t *wildcarded_size) {
+ //if(matched_size) {
+ // char buf[matched_size + 1];
+ // strncpyz(buf, matched, matched_size);
+ // fprintf(stderr, "ADD WILDCARDED '%s' of length %zu\n", buf, matched_size);
+ //}
+
+ if(unlikely(wildcarded && *wildcarded_size && matched && *matched && matched_size)) {
+ size_t wss = *wildcarded_size - 1;
+ size_t len = (matched_size < wss)?matched_size:wss;
+ if(likely(len)) {
+ strncpyz(wildcarded, matched, len);
+
+ *wildcarded_size -= len;
+ return &wildcarded[len];
+ }
+ }
+
+ return wildcarded;
+}
+
+static inline int match_pattern(struct simple_pattern *m, const char *str, size_t len, char *wildcarded, size_t *wildcarded_size) {
+ char *s;
+
+ if(m->len <= len) {
+ switch(m->mode) {
+ case SIMPLE_PATTERN_SUBSTRING:
+ if(!m->len) return 1;
+ if((s = strstr(str, m->match))) {
+ wildcarded = add_wildcarded(str, s - str, wildcarded, wildcarded_size);
+ if(!m->child) {
+ wildcarded = add_wildcarded(&s[m->len], len - (&s[m->len] - str), wildcarded, wildcarded_size);
+ return 1;
+ }
+ return match_pattern(m->child, &s[m->len], len - (s - str) - m->len, wildcarded, wildcarded_size);
+ }
+ break;
+
+ case SIMPLE_PATTERN_PREFIX:
+ if(unlikely(strncmp(str, m->match, m->len) == 0)) {
+ if(!m->child) {
+ wildcarded = add_wildcarded(&str[m->len], len - m->len, wildcarded, wildcarded_size);
+ return 1;
+ }
+ return match_pattern(m->child, &str[m->len], len - m->len, wildcarded, wildcarded_size);
+ }
+ break;
+
+ case SIMPLE_PATTERN_SUFFIX:
+ if(unlikely(strcmp(&str[len - m->len], m->match) == 0)) {
+ wildcarded = add_wildcarded(str, len - m->len, wildcarded, wildcarded_size);
+ if(!m->child) return 1;
+ return 0;
+ }
+ break;
+
+ case SIMPLE_PATTERN_EXACT:
+ default:
+ if(unlikely(strcmp(str, m->match) == 0)) {
+ if(!m->child) return 1;
+ return 0;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size) {
+ struct simple_pattern *m, *root = (struct simple_pattern *)list;
+
+ if(unlikely(!root || !str || !*str)) return 0;
+
+ size_t len = strlen(str);
+ for(m = root; m ; m = m->next) {
+ char *ws = wildcarded;
+ size_t wss = wildcarded_size;
+ if(unlikely(ws)) *ws = '\0';
+
+ if (match_pattern(m, str, len, ws, &wss)) {
+
+ //if(ws && wss)
+ // fprintf(stderr, "FINAL WILDCARDED '%s' of length %zu\n", ws, strlen(ws));
+
+ if (m->negative) return 0;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline void free_pattern(struct simple_pattern *m) {
+ if(!m) return;
+
+ free_pattern(m->child);
+ free_pattern(m->next);
+ freez((void *)m->match);
+ freez(m);
+}
+
+void simple_pattern_free(SIMPLE_PATTERN *list) {
+ if(!list) return;
+
+ free_pattern(((struct simple_pattern *)list));
+}
+
+/* Debugging patterns
+
+ This code should be dead - it is useful for debugging but should not be called by production code.
+ Feel free to comment it out, but please leave it in the file.
+*/
+extern void simple_pattern_dump(uint64_t debug_type, SIMPLE_PATTERN *p)
+{
+ struct simple_pattern *root = (struct simple_pattern *)p;
+ if(root==NULL) {
+ debug(debug_type,"dump_pattern(NULL)");
+ return;
+ }
+ debug(debug_type,"dump_pattern(%p) child=%p next=%p mode=%u match=%s", root, root->child, root->next, root->mode,
+ root->match);
+ if(root->child!=NULL)
+ simple_pattern_dump(debug_type, (SIMPLE_PATTERN*)root->child);
+ if(root->next!=NULL)
+ simple_pattern_dump(debug_type, (SIMPLE_PATTERN*)root->next);
+}
+
+/* Heuristic: decide if the pattern could match a DNS name.
+
+ Although this functionality is used directly by socket.c:connection_allowed() it must be in this file
+ because of the SIMPLE_PATTERN/simple_pattern structure hiding.
+ Based on RFC952 / RFC1123. We need to decide if the pattern may match a DNS name, or not. For the negative
+ cases we need to be sure that it can only match an ipv4 or ipv6 address:
+ * IPv6 addresses contain ':', which are illegal characters in DNS.
+ * IPv4 addresses cannot contain alpha- characters.
+ * DNS TLDs must be alphanumeric to distinguish from IPv4.
+ Some patterns (e.g. "*a*" ) could match multiple cases (i.e. DNS or IPv6).
+ Some patterns will be awkward (e.g. "192.168.*") as they look like they are intended to match IPv4-only
+ but could match DNS (i.e. "192.168.com" is a valid name).
+*/
+static void scan_is_potential_name(struct simple_pattern *p, int *alpha, int *colon, int *wildcards)
+{
+ while (p) {
+ if (p->match) {
+ if(p->mode == SIMPLE_PATTERN_EXACT && !strcmp("localhost", p->match)) {
+ p = p->child;
+ continue;
+ }
+ char const *scan = p->match;
+ while (*scan != 0) {
+ if ((*scan >= 'a' && *scan <= 'z') || (*scan >= 'A' && *scan <= 'Z'))
+ *alpha = 1;
+ if (*scan == ':')
+ *colon = 1;
+ scan++;
+ }
+ if (p->mode != SIMPLE_PATTERN_EXACT)
+ *wildcards = 1;
+ p = p->child;
+ }
+ }
+}
+
+extern int simple_pattern_is_potential_name(SIMPLE_PATTERN *p)
+{
+ int alpha=0, colon=0, wildcards=0;
+ struct simple_pattern *root = (struct simple_pattern*)p;
+ while (root != NULL) {
+ if (root->match != NULL) {
+ scan_is_potential_name(root, &alpha, &colon, &wildcards);
+ }
+ if (root->mode != SIMPLE_PATTERN_EXACT)
+ wildcards = 1;
+ root = root->next;
+ }
+ return (alpha || wildcards) && !colon;
+}
+
+char *simple_pattern_trim_around_equal(char *src) {
+ char *store = mallocz(strlen(src) +1);
+ if(!store)
+ return NULL;
+
+ char *dst = store;
+ while (*src) {
+ if (*src == '=') {
+ if (*(dst -1) == ' ')
+ dst--;
+
+ *dst++ = *src++;
+ if (*src == ' ')
+ src++;
+ }
+
+ *dst++ = *src++;
+ }
+ *dst = 0x00;
+
+ return store;
+}
+
+char *simple_pattern_iterate(SIMPLE_PATTERN **p)
+{
+ struct simple_pattern *root = (struct simple_pattern *) *p;
+ struct simple_pattern **Proot = (struct simple_pattern **)p;
+
+ (*Proot) = (*Proot)->next;
+ return (char *) root->match;
+}
diff --git a/libnetdata/simple_pattern/simple_pattern.h b/libnetdata/simple_pattern/simple_pattern.h
new file mode 100644
index 0000000..36fbbde
--- /dev/null
+++ b/libnetdata/simple_pattern/simple_pattern.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SIMPLE_PATTERN_H
+#define NETDATA_SIMPLE_PATTERN_H
+
+#include "../libnetdata.h"
+
+
+typedef enum {
+ SIMPLE_PATTERN_EXACT,
+ SIMPLE_PATTERN_PREFIX,
+ SIMPLE_PATTERN_SUFFIX,
+ SIMPLE_PATTERN_SUBSTRING
+} SIMPLE_PREFIX_MODE;
+
+typedef void SIMPLE_PATTERN;
+
+// create a simple_pattern from the string given
+// default_mode is used in cases where EXACT matches, without an asterisk,
+// should be considered PREFIX matches.
+extern SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode);
+
+// test if string str is matched from the pattern and fill 'wildcarded' with the parts matched by '*'
+extern int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size);
+
+// test if string str is matched from the pattern
+#define simple_pattern_matches(list, str) simple_pattern_matches_extract(list, str, NULL, 0)
+
+// free a simple_pattern that was created with simple_pattern_create()
+// list can be NULL, in which case, this does nothing.
+extern void simple_pattern_free(SIMPLE_PATTERN *list);
+
+extern void simple_pattern_dump(uint64_t debug_type, SIMPLE_PATTERN *p) ;
+extern int simple_pattern_is_potential_name(SIMPLE_PATTERN *p) ;
+extern char *simple_pattern_iterate(SIMPLE_PATTERN **p);
+
+//Auxiliary function to create a pattern
+char *simple_pattern_trim_around_equal(char *src);
+
+#endif //NETDATA_SIMPLE_PATTERN_H
diff --git a/libnetdata/socket/Makefile.am b/libnetdata/socket/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/socket/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md
new file mode 100644
index 0000000..6736f13
--- /dev/null
+++ b/libnetdata/socket/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/socket/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsocket%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c
new file mode 100644
index 0000000..53366c4
--- /dev/null
+++ b/libnetdata/socket/security.c
@@ -0,0 +1,385 @@
+#include "../libnetdata.h"
+
+#ifdef ENABLE_HTTPS
+
+SSL_CTX *netdata_exporting_ctx=NULL;
+SSL_CTX *netdata_client_ctx=NULL;
+SSL_CTX *netdata_srv_ctx=NULL;
+const char *security_key=NULL;
+const char *security_cert=NULL;
+const char *tls_version=NULL;
+const char *tls_ciphers=NULL;
+int netdata_validate_server = NETDATA_SSL_VALID_CERTIFICATE;
+
+/**
+ * Info Callback
+ *
+ * Function used as callback for the OpenSSL Library
+ *
+ * @param ssl a pointer to the SSL structure of the client
+ * @param where the variable with the flags set.
+ * @param ret the return of the caller
+ */
+static void security_info_callback(const SSL *ssl, int where, int ret __maybe_unused) {
+ (void)ssl;
+ if (where & SSL_CB_ALERT) {
+ debug(D_WEB_CLIENT,"SSL INFO CALLBACK %s %s", SSL_alert_type_string(ret), SSL_alert_desc_string_long(ret));
+ }
+}
+
+/**
+ * OpenSSL Library
+ *
+ * Starts the openssl library for the Netdata.
+ */
+void security_openssl_library()
+{
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+# if (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097)
+ OPENSSL_config(NULL);
+# endif
+
+ SSL_load_error_strings();
+
+ SSL_library_init();
+#else
+ if (OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL) != 1) {
+ error("SSL library cannot be initialized.");
+ }
+#endif
+}
+
+#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_110
+/**
+ * TLS version
+ *
+ * Returns the TLS version depending of the user input.
+ *
+ * @param lversion is the user input.
+ *
+ * @return it returns the version number.
+ */
+int tls_select_version(const char *lversion) {
+ if (!strcmp(lversion, "1") || !strcmp(lversion, "1.0"))
+ return TLS1_VERSION;
+ else if (!strcmp(lversion, "1.1"))
+ return TLS1_1_VERSION;
+ else if (!strcmp(lversion, "1.2"))
+ return TLS1_2_VERSION;
+#if defined(TLS1_3_VERSION)
+ else if (!strcmp(lversion, "1.3"))
+ return TLS1_3_VERSION;
+#endif
+
+#if defined(TLS_MAX_VERSION)
+ return TLS_MAX_VERSION;
+#else
+ return TLS1_2_VERSION;
+#endif
+}
+#endif
+
+/**
+ * OpenSSL common options
+ *
+ * Clients and SERVER have common options, this function is responsible to set them in the context.
+ *
+ * @param ctx the initialized SSL context.
+ * @param side 0 means server, and 1 client.
+ */
+void security_openssl_common_options(SSL_CTX *ctx, int side) {
+#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_110
+ if (!side) {
+ int version = tls_select_version(tls_version) ;
+#endif
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ SSL_CTX_set_options (ctx,SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_COMPRESSION);
+#else
+ SSL_CTX_set_min_proto_version(ctx, TLS1_VERSION);
+ SSL_CTX_set_max_proto_version(ctx, version);
+
+ if(tls_ciphers && strcmp(tls_ciphers, "none") != 0) {
+ if (!SSL_CTX_set_cipher_list(ctx, tls_ciphers)) {
+ error("SSL error. cannot set the cipher list");
+ }
+ }
+ }
+#endif
+
+ SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+}
+
+/**
+ * Initialize Openssl Client
+ *
+ * Starts the client context with TLS 1.2.
+ *
+ * @return It returns the context on success or NULL otherwise
+ */
+SSL_CTX * security_initialize_openssl_client() {
+ SSL_CTX *ctx;
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ ctx = SSL_CTX_new(SSLv23_client_method());
+#else
+ ctx = SSL_CTX_new(TLS_client_method());
+#endif
+ if(ctx) {
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ SSL_CTX_set_options (ctx,SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_COMPRESSION);
+#else
+ SSL_CTX_set_min_proto_version(ctx, TLS1_VERSION);
+# if defined(TLS_MAX_VERSION)
+ SSL_CTX_set_max_proto_version(ctx, TLS_MAX_VERSION);
+# elif defined(TLS1_3_VERSION)
+ SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
+# elif defined(TLS1_2_VERSION)
+ SSL_CTX_set_max_proto_version(ctx, TLS1_2_VERSION);
+# endif
+#endif
+ }
+
+ return ctx;
+}
+
+/**
+ * Initialize OpenSSL server
+ *
+ * Starts the server context with TLS 1.2 and load the certificate.
+ *
+ * @return It returns the context on success or NULL otherwise
+ */
+static SSL_CTX * security_initialize_openssl_server() {
+ SSL_CTX *ctx;
+ char lerror[512];
+ static int netdata_id_context = 1;
+
+ //TO DO: Confirm the necessity to check return for other OPENSSL function
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ ctx = SSL_CTX_new(SSLv23_server_method());
+ if (!ctx) {
+ error("Cannot create a new SSL context, netdata won't encrypt communication");
+ return NULL;
+ }
+
+ SSL_CTX_use_certificate_file(ctx, security_cert, SSL_FILETYPE_PEM);
+#else
+ ctx = SSL_CTX_new(TLS_server_method());
+ if (!ctx) {
+ error("Cannot create a new SSL context, netdata won't encrypt communication");
+ return NULL;
+ }
+
+ SSL_CTX_use_certificate_chain_file(ctx, security_cert);
+#endif
+ security_openssl_common_options(ctx, 0);
+
+ SSL_CTX_use_PrivateKey_file(ctx,security_key,SSL_FILETYPE_PEM);
+
+ if (!SSL_CTX_check_private_key(ctx)) {
+ ERR_error_string_n(ERR_get_error(),lerror,sizeof(lerror));
+ error("SSL cannot check the private key: %s",lerror);
+ SSL_CTX_free(ctx);
+ return NULL;
+ }
+
+ SSL_CTX_set_session_id_context(ctx,(void*)&netdata_id_context,(unsigned int)sizeof(netdata_id_context));
+ SSL_CTX_set_info_callback(ctx,security_info_callback);
+
+#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_095)
+ SSL_CTX_set_verify_depth(ctx,1);
+#endif
+ debug(D_WEB_CLIENT,"SSL GLOBAL CONTEXT STARTED\n");
+
+ return ctx;
+}
+
+/**
+ * Start SSL
+ *
+ * Call the correct function to start the SSL context.
+ *
+ * @param selector informs the context that must be initialized, the following list has the valid values:
+ * NETDATA_SSL_CONTEXT_SERVER - the server context
+ * NETDATA_SSL_CONTEXT_STREAMING - Starts the streaming context.
+ * NETDATA_SSL_CONTEXT_EXPORTING - Starts the OpenTSDB contextv
+ */
+void security_start_ssl(int selector) {
+ switch (selector) {
+ case NETDATA_SSL_CONTEXT_SERVER: {
+ struct stat statbuf;
+ if (stat(security_key, &statbuf) || stat(security_cert, &statbuf)) {
+ info("To use encryption it is necessary to set \"ssl certificate\" and \"ssl key\" in [web] !\n");
+ return;
+ }
+
+ netdata_srv_ctx = security_initialize_openssl_server();
+ break;
+ }
+ case NETDATA_SSL_CONTEXT_STREAMING: {
+ netdata_client_ctx = security_initialize_openssl_client();
+ //This is necessary for the stream, because it is working sometimes with nonblock socket.
+ //It returns the bitmask afte to change, there is not any description of errors in the documentation
+ SSL_CTX_set_mode(netdata_client_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |SSL_MODE_AUTO_RETRY);
+ break;
+ }
+ case NETDATA_SSL_CONTEXT_EXPORTING: {
+ netdata_exporting_ctx = security_initialize_openssl_client();
+ break;
+ }
+ }
+}
+
+/**
+ * Clean Open SSL
+ *
+ * Clean all the allocated contexts from netdata.
+ */
+void security_clean_openssl()
+{
+ if (netdata_srv_ctx) {
+ SSL_CTX_free(netdata_srv_ctx);
+ }
+
+ if (netdata_client_ctx) {
+ SSL_CTX_free(netdata_client_ctx);
+ }
+
+ if (netdata_exporting_ctx) {
+ SSL_CTX_free(netdata_exporting_ctx);
+ }
+
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ ERR_free_strings();
+#endif
+}
+
+/**
+ * Process accept
+ *
+ * Process the SSL handshake with the client case it is necessary.
+ *
+ * @param ssl is a pointer for the SSL structure
+ * @param msg is a copy of the first 8 bytes of the initial message received
+ *
+ * @return it returns 0 case it performs the handshake, 8 case it is clean connection
+ * and another integer power of 2 otherwise.
+ */
+int security_process_accept(SSL *ssl,int msg) {
+ int sock = SSL_get_fd(ssl);
+ int test;
+ if (msg > 0x17)
+ {
+ return NETDATA_SSL_NO_HANDSHAKE;
+ }
+
+ ERR_clear_error();
+ if ((test = SSL_accept(ssl)) <= 0) {
+ int sslerrno = SSL_get_error(ssl, test);
+ switch(sslerrno) {
+ case SSL_ERROR_WANT_READ:
+ {
+ error("SSL handshake did not finish and it wanna read on socket %d!", sock);
+ return NETDATA_SSL_WANT_READ;
+ }
+ case SSL_ERROR_WANT_WRITE:
+ {
+ error("SSL handshake did not finish and it wanna read on socket %d!", sock);
+ return NETDATA_SSL_WANT_WRITE;
+ }
+ case SSL_ERROR_NONE:
+ case SSL_ERROR_SSL:
+ case SSL_ERROR_SYSCALL:
+ default:
+ {
+ u_long err;
+ char buf[256];
+ int counter = 0;
+ while ((err = ERR_get_error()) != 0) {
+ ERR_error_string_n(err, buf, sizeof(buf));
+ info("%d SSL Handshake error (%s) on socket %d ", counter++, ERR_error_string((long)SSL_get_error(ssl, test), NULL), sock);
+ }
+ return NETDATA_SSL_NO_HANDSHAKE;
+ }
+ }
+ }
+
+ if (SSL_is_init_finished(ssl))
+ {
+ debug(D_WEB_CLIENT_ACCESS,"SSL Handshake finished %s errno %d on socket fd %d", ERR_error_string((long)SSL_get_error(ssl, test), NULL), errno, sock);
+ }
+
+ return NETDATA_SSL_HANDSHAKE_COMPLETE;
+}
+
+/**
+ * Test Certificate
+ *
+ * Check the certificate of Netdata parent
+ *
+ * @param ssl is the connection structure
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+int security_test_certificate(SSL *ssl) {
+ X509* cert = SSL_get_peer_certificate(ssl);
+ int ret;
+ long status;
+ if (!cert) {
+ return -1;
+ }
+
+ status = SSL_get_verify_result(ssl);
+ if((X509_V_OK != status))
+ {
+ char error[512];
+ ERR_error_string_n(ERR_get_error(), error, sizeof(error));
+ error("SSL RFC4158 check: We have a invalid certificate, the tests result with %ld and message %s", status, error);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * Location for context
+ *
+ * Case the user give us a directory with the certificates available and
+ * the Netdata parent certificate, we use this function to validate the certificate.
+ *
+ * @param ctx the context where the path will be set.
+ * @param file the file with Netdata parent certificate.
+ * @param path the directory where the certificates are stored.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+int security_location_for_context(SSL_CTX *ctx, char *file, char *path) {
+ struct stat statbuf;
+ if (stat(file, &statbuf)) {
+ info("Netdata does not have the parent's SSL certificate, so it will use the default OpenSSL configuration to validate certificates!");
+ return 0;
+ }
+
+ ERR_clear_error();
+ u_long err;
+ char buf[256];
+ if(!SSL_CTX_load_verify_locations(ctx, file, path)) {
+ goto slfc;
+ }
+
+ if(!SSL_CTX_set_default_verify_paths(ctx)) {
+ goto slfc;
+ }
+
+ return 0;
+
+slfc:
+ while ((err = ERR_get_error()) != 0) {
+ ERR_error_string_n(err, buf, sizeof(buf));
+ error("Cannot set the directory for the certificates and the parent SSL certificate: %s",buf);
+ }
+ return -1;
+}
+
+#endif
diff --git a/libnetdata/socket/security.h b/libnetdata/socket/security.h
new file mode 100644
index 0000000..17ecc6d
--- /dev/null
+++ b/libnetdata/socket/security.h
@@ -0,0 +1,55 @@
+#ifndef NETDATA_SECURITY_H
+# define NETDATA_SECURITY_H
+
+# define NETDATA_SSL_HANDSHAKE_COMPLETE 0 //All the steps were successful
+# define NETDATA_SSL_START 1 //Starting handshake, conn variable is NULL
+# define NETDATA_SSL_WANT_READ 2 //The connection wanna read from socket
+# define NETDATA_SSL_WANT_WRITE 4 //The connection wanna write on socket
+# define NETDATA_SSL_NO_HANDSHAKE 8 //Continue without encrypt connection.
+# define NETDATA_SSL_OPTIONAL 16 //Flag to define the HTTP request
+# define NETDATA_SSL_FORCE 32 //We only accepts HTTPS request
+# define NETDATA_SSL_INVALID_CERTIFICATE 64 //Accepts invalid certificate
+# define NETDATA_SSL_VALID_CERTIFICATE 128 //Accepts invalid certificate
+# define NETDATA_SSL_PROXY_HTTPS 256 //Proxy is using HTTPS
+
+#define NETDATA_SSL_CONTEXT_SERVER 0
+#define NETDATA_SSL_CONTEXT_STREAMING 1
+#define NETDATA_SSL_CONTEXT_EXPORTING 2
+
+# ifdef ENABLE_HTTPS
+
+#define OPENSSL_VERSION_095 0x00905100L
+#define OPENSSL_VERSION_097 0x0907000L
+#define OPENSSL_VERSION_110 0x10100000L
+#define OPENSSL_VERSION_111 0x10101000L
+
+# include <openssl/ssl.h>
+# include <openssl/err.h>
+# if (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097) && (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110)
+# include <openssl/conf.h>
+# endif
+
+struct netdata_ssl{
+ SSL *conn; //SSL connection
+ uint32_t flags; //The flags for SSL connection
+};
+
+extern SSL_CTX *netdata_exporting_ctx;
+extern SSL_CTX *netdata_client_ctx;
+extern SSL_CTX *netdata_srv_ctx;
+extern const char *security_key;
+extern const char *security_cert;
+extern const char *tls_version;
+extern const char *tls_ciphers;
+extern int netdata_validate_server;
+extern int security_location_for_context(SSL_CTX *ctx,char *file,char *path);
+
+void security_openssl_library();
+void security_clean_openssl();
+void security_start_ssl(int selector);
+int security_process_accept(SSL *ssl,int msg);
+int security_test_certificate(SSL *ssl);
+SSL_CTX * security_initialize_openssl_client();
+
+# endif //ENABLE_HTTPS
+#endif //NETDATA_SECURITY_H
diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c
new file mode 100644
index 0000000..73eb8e6
--- /dev/null
+++ b/libnetdata/socket/socket.c
@@ -0,0 +1,1733 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// --------------------------------------------------------------------------------------------------------------------
+// various library calls
+
+#ifdef __gnu_linux__
+#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number
+#else
+#define LARGE_SOCK_SIZE 4096
+#endif
+
+int sock_setnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags |= O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to set O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_delnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags &= ~O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to remove O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse(int fd, int reuse) {
+ int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
+
+ if(ret == -1)
+ error("Failed to set SO_REUSEADDR on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse_port(int fd, int reuse) {
+ int ret;
+
+#ifdef SO_REUSEPORT
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
+ if(ret == -1 && errno != ENOPROTOOPT)
+ error("failed to set SO_REUSEPORT on socket %d", fd);
+#else
+ ret = -1;
+#endif
+
+ return ret;
+}
+
+int sock_enlarge_in(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_RCVBUF on socket %d", fd);
+
+ return ret;
+}
+
+int sock_enlarge_out(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_SNDBUF on socket %d", fd);
+
+ return ret;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+
+char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port) {
+ char buffer[100 + 1];
+
+ switch(family) {
+ case AF_INET:
+ snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port);
+ break;
+
+ case AF_INET6:
+ default:
+ snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+ break;
+
+ case AF_UNIX:
+ snprintfz(buffer, 100, "%s:%s", protocol, ip);
+ break;
+ }
+
+ return strdupz(buffer);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// listening sockets
+
+int create_listen_socket_unix(const char *path, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(sock < 0) {
+ error("LISTENER: UNIX socket() on path '%s' failed.", path);
+ return -1;
+ }
+
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_un name;
+ memset(&name, 0, sizeof(struct sockaddr_un));
+ name.sun_family = AF_UNIX;
+ strncpy(name.sun_path, path, sizeof(name.sun_path)-1);
+
+ errno = 0;
+ if (unlink(path) == -1 && errno != ENOENT)
+ error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path);
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: UNIX bind() on path '%s' failed.", path);
+ return -1;
+ }
+
+ // we have to chmod this to 0777 so that the client will be able
+ // to read from and write to this socket.
+ if(chmod(path, 0777) == -1)
+ error("LISTENER: failed to chmod() socket file '%s'.", path);
+
+ if(listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: UNIX listen() on path '%s' failed.", path);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path);
+ return sock;
+}
+
+int create_listen_socket4(int socktype, const char *ip, uint16_t port, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET, socktype, 0);
+ if(sock < 0) {
+ error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_in name;
+ memset(&name, 0, sizeof(struct sockaddr_in));
+ name.sin_family = AF_INET;
+ name.sin_port = htons (port);
+
+ int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) {
+ int sock;
+ int ipv6only = 1;
+
+ debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET6, socktype, 0);
+ if (sock < 0) {
+ error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ /* IPv6 only */
+ if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
+ error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype);
+
+ struct sockaddr_in6 name;
+ memset(&name, 0, sizeof(struct sockaddr_in6));
+ name.sin6_family = AF_INET6;
+ name.sin6_port = htons ((uint16_t) port);
+ name.sin6_scope_id = scope_id;
+
+ int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ name.sin6_scope_id = scope_id;
+
+ if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port, int acl_flags) {
+ if(sockets->opened >= MAX_LISTEN_FDS) {
+ error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
+ close(fd);
+ return -1;
+ }
+
+ sockets->fds[sockets->opened] = fd;
+ sockets->fds_types[sockets->opened] = socktype;
+ sockets->fds_families[sockets->opened] = family;
+ sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port);
+ sockets->fds_acl_flags[sockets->opened] = acl_flags;
+
+ sockets->opened++;
+ return 0;
+}
+
+int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ if(sockets->fds[i] == fd) return 1;
+
+ return 0;
+}
+
+static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < MAX_LISTEN_FDS ;i++) {
+ sockets->fds[i] = -1;
+ sockets->fds_names[i] = NULL;
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+void listen_sockets_close(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+ close(sockets->fds[i]);
+ sockets->fds[i] = -1;
+
+ freez(sockets->fds_names[i]);
+ sockets->fds_names[i] = NULL;
+
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+/*
+ * SSL ACL
+ *
+ * Search the SSL acl and apply it case it is set.
+ *
+ * @param acl is the acl given by the user.
+ */
+WEB_CLIENT_ACL socket_ssl_acl(char *acl) {
+ char *ssl = strchr(acl,'^');
+ if(ssl) {
+ //Due the format of the SSL command it is always the last command,
+ //we finish it here to avoid problems with the ACLs
+ *ssl = '\0';
+#ifdef ENABLE_HTTPS
+ ssl++;
+ if (!strncmp("SSL=",ssl,4)) {
+ ssl += 4;
+ if (!strcmp(ssl,"optional")) {
+ return WEB_CLIENT_ACL_SSL_OPTIONAL;
+ }
+ else if (!strcmp(ssl,"force")) {
+ return WEB_CLIENT_ACL_SSL_FORCE;
+ }
+ }
+#endif
+ }
+
+ return WEB_CLIENT_ACL_NONE;
+}
+
+WEB_CLIENT_ACL read_acl(char *st) {
+ WEB_CLIENT_ACL ret = socket_ssl_acl(st);
+
+ if (!strcmp(st,"dashboard")) ret |= WEB_CLIENT_ACL_DASHBOARD;
+ if (!strcmp(st,"registry")) ret |= WEB_CLIENT_ACL_REGISTRY;
+ if (!strcmp(st,"badges")) ret |= WEB_CLIENT_ACL_BADGE;
+ if (!strcmp(st,"management")) ret |= WEB_CLIENT_ACL_MGMT;
+ if (!strcmp(st,"streaming")) ret |= WEB_CLIENT_ACL_STREAMING;
+ if (!strcmp(st,"netdata.conf")) ret |= WEB_CLIENT_ACL_NETDATACONF;
+
+ return ret;
+}
+
+static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) {
+ int added = 0;
+ WEB_CLIENT_ACL acl_flags = WEB_CLIENT_ACL_NONE;
+
+ struct addrinfo hints;
+ struct addrinfo *result = NULL, *rp = NULL;
+
+ char buffer[strlen(definition) + 1];
+ strcpy(buffer, definition);
+
+ char buffer2[10 + 1];
+ snprintfz(buffer2, 10, "%d", default_port);
+
+ char *ip = buffer, *port = buffer2, *interface = "", *portconfig;;
+
+ int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
+ const char *protocol_str = "tcp";
+
+ if(strncmp(ip, "tcp:", 4) == 0) {
+ ip += 4;
+ protocol = IPPROTO_TCP;
+ socktype = SOCK_STREAM;
+ protocol_str = "tcp";
+ }
+ else if(strncmp(ip, "udp:", 4) == 0) {
+ ip += 4;
+ protocol = IPPROTO_UDP;
+ socktype = SOCK_DGRAM;
+ protocol_str = "udp";
+ }
+ else if(strncmp(ip, "unix:", 5) == 0) {
+ char *path = ip + 5;
+ socktype = SOCK_STREAM;
+ protocol_str = "unix";
+ int fd = create_listen_socket_unix(path, listen_backlog);
+ if (fd == -1) {
+ error("LISTENER: Cannot create unix socket '%s'", path);
+ sockets->failed++;
+ } else {
+ acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT;
+ listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0, acl_flags);
+ added++;
+ }
+ return added;
+ }
+
+ char *e = ip;
+ if(*e == '[') {
+ e = ++ip;
+ while(*e && *e != ']') e++;
+ if(*e == ']') {
+ *e = '\0';
+ e++;
+ }
+ }
+ else {
+ while(*e && *e != ':' && *e != '%' && *e != '=') e++;
+ }
+
+ if(*e == '%') {
+ *e = '\0';
+ e++;
+ interface = e;
+ while(*e && *e != ':' && *e != '=') e++;
+ }
+
+ if(*e == ':') {
+ port = e + 1;
+ *e = '\0';
+ e++;
+ while(*e && *e != '=') e++;
+ }
+
+ if(*e == '=') {
+ *e='\0';
+ e++;
+ portconfig = e;
+ while (*e != '\0') {
+ if (*e == '|') {
+ *e = '\0';
+ acl_flags |= read_acl(portconfig);
+ e++;
+ portconfig = e;
+ continue;
+ }
+ e++;
+ }
+ acl_flags |= read_acl(portconfig);
+ } else {
+ acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT;
+ }
+
+ //Case the user does not set the option SSL in the "bind to", but he has
+ //the certificates, I must redirect, so I am assuming here the default option
+ if(!(acl_flags & WEB_CLIENT_ACL_SSL_OPTIONAL) && !(acl_flags & WEB_CLIENT_ACL_SSL_FORCE)) {
+ acl_flags |= WEB_CLIENT_ACL_SSL_DEFAULT;
+ }
+
+ uint32_t scope_id = 0;
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
+
+ if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
+ ip = NULL;
+
+ if(!*port)
+ port = buffer2;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
+ hints.ai_socktype = socktype;
+ hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */
+ hints.ai_protocol = protocol;
+ hints.ai_canonname = NULL;
+ hints.ai_addr = NULL;
+ hints.ai_next = NULL;
+
+ int r = getaddrinfo(ip, port, &hints, &result);
+ if (r != 0) {
+ error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
+ return -1;
+ }
+
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
+ int fd = -1;
+ int family;
+
+ char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
+ uint16_t rport = default_port;
+
+ family = rp->ai_addr->sa_family;
+ switch (family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
+ inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
+ rport = ntohs(sin->sin_port);
+ // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket4(socktype, rip, rport, listen_backlog);
+ break;
+ }
+
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
+ inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
+ rport = ntohs(sin6->sin6_port);
+ // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog);
+ break;
+ }
+
+ default:
+ debug(D_LISTENER, "LISTENER: Unknown socket family %d", family);
+ break;
+ }
+
+ if (fd == -1) {
+ error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport);
+ sockets->failed++;
+ }
+ else {
+ listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport, acl_flags);
+ added++;
+ }
+ }
+
+ freeaddrinfo(result);
+
+ return added;
+}
+
+int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
+ listen_sockets_init(sockets);
+
+ sockets->backlog = (int) appconfig_get_number(sockets->config, sockets->config_section, "listen backlog", sockets->backlog);
+
+ long long int old_port = sockets->default_port;
+ long long int new_port = appconfig_get_number(sockets->config, sockets->config_section, "default port", sockets->default_port);
+ if(new_port < 1 || new_port > 65535) {
+ error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port);
+ sockets->default_port = (uint16_t) appconfig_set_number(sockets->config, sockets->config_section, "default port", old_port);
+ }
+ else sockets->default_port = (uint16_t)new_port;
+
+ debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port);
+
+ char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to);
+ while(*s) {
+ char *e = s;
+
+ // skip separators, moving both s(tart) and e(nd)
+ while(isspace(*e) || *e == ',') s = ++e;
+
+ // move e(nd) to the first separator
+ while(*e && !isspace(*e) && *e != ',') e++;
+
+ // is there anything?
+ if(!*s || s == e) break;
+
+ char buf[e - s + 1];
+ strncpyz(buf, s, e - s);
+ bind_to_this(sockets, buf, sockets->default_port, sockets->backlog);
+
+ s = e;
+ }
+
+ if(sockets->failed) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]);
+ }
+
+ return (int)sockets->opened;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// connect to another host/port
+
+// connect_to_this_unix()
+// path the path of the unix socket
+// timeout the timeout for establishing a connection
+
+static inline int connect_to_unix(const char *path, struct timeval *timeout) {
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(fd == -1) {
+ error("Failed to create UNIX socket() for '%s'", path);
+ return -1;
+ }
+
+ if(timeout) {
+ if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
+ error("Failed to set timeout on UNIX socket '%s'", path);
+ }
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1);
+
+ if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
+ error("Cannot connect to UNIX socket on path '%s'.", path);
+ close(fd);
+ return -1;
+ }
+
+ debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path);
+
+ return fd;
+}
+
+// connect_to_this_ip46()
+// protocol IPPROTO_TCP, IPPROTO_UDP
+// socktype SOCK_STREAM, SOCK_DGRAM
+// host the destination hostname or IP address (IPv4 or IPv6) to connect to
+// if it resolves to many IPs, all are tried (IPv4 and IPv6)
+// scope_id the if_index id of the interface to use for connecting (0 = any)
+// (used only under IPv6)
+// service the service name or port to connect to
+// timeout the timeout for establishing a connection
+
+int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
+ struct addrinfo hints;
+ struct addrinfo *ai_head = NULL, *ai = NULL;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */
+ hints.ai_socktype = socktype;
+ hints.ai_protocol = protocol;
+
+ int ai_err = getaddrinfo(host, service, &hints, &ai_head);
+ if (ai_err != 0) {
+ error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err));
+ return -1;
+ }
+
+ int fd = -1;
+ for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) {
+
+ if (ai->ai_family == PF_INET6) {
+ struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
+ if(pSadrIn6->sin6_scope_id == 0) {
+ pSadrIn6->sin6_scope_id = scope_id;
+ }
+ }
+
+ char hostBfr[NI_MAXHOST + 1];
+ char servBfr[NI_MAXSERV + 1];
+
+ getnameinfo(ai->ai_addr,
+ ai->ai_addrlen,
+ hostBfr,
+ sizeof(hostBfr),
+ servBfr,
+ sizeof(servBfr),
+ NI_NUMERICHOST | NI_NUMERICSERV);
+
+ debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)",
+ hostBfr,
+ servBfr,
+ (unsigned int)ai->ai_flags,
+ ai->ai_family,
+ PF_INET,
+ PF_INET6,
+ ai->ai_socktype,
+ SOCK_STREAM,
+ SOCK_DGRAM,
+ ai->ai_protocol,
+ IPPROTO_TCP,
+ IPPROTO_UDP,
+ (unsigned long)ai->ai_addrlen,
+ (unsigned long)sizeof(struct sockaddr_in),
+ (unsigned long)sizeof(struct sockaddr_in6));
+
+ switch (ai->ai_addr->sa_family) {
+ case PF_INET: {
+ struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr;
+ (void)pSadrIn;
+
+ debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'",
+ pSadrIn->sin_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr);
+ break;
+ }
+
+ case PF_INET6: {
+ struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
+ (void)pSadrIn6;
+
+ debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u",
+ pSadrIn6->sin6_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr,
+ pSadrIn6->sin6_flowinfo,
+ pSadrIn6->sin6_scope_id);
+ break;
+ }
+
+ default: {
+ debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family);
+ continue;
+ }
+ }
+
+ fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
+ if(fd != -1) {
+ if(timeout) {
+ if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
+ error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr);
+ }
+
+ errno = 0;
+ if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) {
+ if(errno == EALREADY || errno == EINPROGRESS) {
+ info("Waiting for connection to ip %s port %s to be established", hostBfr, servBfr);
+
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(0, &fds);
+ int rc = select (1, NULL, &fds, NULL, timeout);
+
+ if(rc > 0 && FD_ISSET(fd, &fds)) {
+ info("connect() to ip %s port %s completed successfully", hostBfr, servBfr);
+ }
+ else if(rc == -1) {
+ error("Failed to connect to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
+ close(fd);
+ fd = -1;
+ }
+ else {
+ error("Timed out while connecting to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
+ close(fd);
+ fd = -1;
+ }
+ }
+ else {
+ error("Failed to connect to '%s', port '%s'", hostBfr, servBfr);
+ close(fd);
+ fd = -1;
+ }
+ }
+
+ if(fd != -1)
+ debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr);
+ }
+ }
+
+ freeaddrinfo(ai_head);
+
+ return fd;
+}
+
+// connect_to_this()
+//
+// definition format:
+//
+// [PROTOCOL:]IP[%INTERFACE][:PORT]
+//
+// PROTOCOL = tcp or udp
+// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6)
+// INTERFACE = for IPv6 only, the network interface to use
+// PORT = port number or service name
+
+int connect_to_this(const char *definition, int default_port, struct timeval *timeout) {
+ char buffer[strlen(definition) + 1];
+ strcpy(buffer, definition);
+
+ char default_service[10 + 1];
+ snprintfz(default_service, 10, "%d", default_port);
+
+ char *host = buffer, *service = default_service, *interface = "";
+ int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
+ uint32_t scope_id = 0;
+
+ if(strncmp(host, "tcp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_TCP;
+ socktype = SOCK_STREAM;
+ }
+ else if(strncmp(host, "udp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_UDP;
+ socktype = SOCK_DGRAM;
+ }
+ else if(strncmp(host, "unix:", 5) == 0) {
+ char *path = host + 5;
+ return connect_to_unix(path, timeout);
+ }
+
+ char *e = host;
+ if(*e == '[') {
+ e = ++host;
+ while(*e && *e != ']') e++;
+ if(*e == ']') {
+ *e = '\0';
+ e++;
+ }
+ }
+ else {
+ while(*e && *e != ':' && *e != '%') e++;
+ }
+
+ if(*e == '%') {
+ *e = '\0';
+ e++;
+ interface = e;
+ while(*e && *e != ':') e++;
+ }
+
+ if(*e == ':') {
+ *e = '\0';
+ e++;
+ service = e;
+ }
+
+ debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
+
+ if(!*host) {
+ error("Definition '%s' does not specify a host.", definition);
+ return -1;
+ }
+
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
+
+ if(!*service)
+ service = default_service;
+
+
+ return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout);
+}
+
+int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) {
+ int sock = -1;
+
+ const char *s = destination;
+ while(*s) {
+ const char *e = s;
+
+ // skip path, moving both s(tart) and e(nd)
+ if(*e == '/')
+ while(!isspace(*e) && *e != ',') s = ++e;
+
+ // skip separators, moving both s(tart) and e(nd)
+ while(isspace(*e) || *e == ',') s = ++e;
+
+ // move e(nd) to the first separator
+ while(*e && !isspace(*e) && *e != ',' && *e != '/') e++;
+
+ // is there anything?
+ if(!*s || s == e) break;
+
+ char buf[e - s + 1];
+ strncpyz(buf, s, e - s);
+ if(reconnects_counter) *reconnects_counter += 1;
+ sock = connect_to_this(buf, default_port, timeout);
+ if(sock != -1) {
+ if(connected_to && connected_to_size) {
+ strncpy(connected_to, buf, connected_to_size);
+ connected_to[connected_to_size - 1] = '\0';
+ }
+ break;
+ }
+ s = e;
+ }
+
+ return sock;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// helpers to send/receive data in one call, in blocking mode, with a timeout
+
+#ifdef ENABLE_HTTPS
+ssize_t recv_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
+#else
+ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
+#endif
+
+ for(;;) {
+ struct pollfd fd = {
+ .fd = sockfd,
+ .events = POLLIN,
+ .revents = 0
+ };
+
+ errno = 0;
+ int retval = poll(&fd, 1, timeout * 1000);
+
+ if(retval == -1) {
+ // failed
+
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+
+ return -1;
+ }
+
+ if(!retval) {
+ // timeout
+ return 0;
+ }
+
+ if(fd.events & POLLIN) break;
+ }
+
+#ifdef ENABLE_HTTPS
+ if (ssl->conn) {
+ if (!ssl->flags) {
+ return SSL_read(ssl->conn,buf,len);
+ }
+ }
+#endif
+ return recv(sockfd, buf, len, flags);
+}
+
+#ifdef ENABLE_HTTPS
+ssize_t send_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
+#else
+ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
+#endif
+
+ for(;;) {
+ struct pollfd fd = {
+ .fd = sockfd,
+ .events = POLLOUT,
+ .revents = 0
+ };
+
+ errno = 0;
+ int retval = poll(&fd, 1, timeout * 1000);
+
+ if(retval == -1) {
+ // failed
+
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+
+ return -1;
+ }
+
+ if(!retval) {
+ // timeout
+ return 0;
+ }
+
+ if(fd.events & POLLOUT) break;
+ }
+
+#ifdef ENABLE_HTTPS
+ if(ssl->conn) {
+ if (!ssl->flags) {
+ return SSL_write(ssl->conn, buf, len);
+ }
+ }
+#endif
+ return send(sockfd, buf, len, flags);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept4() replacement for systems that do not have one
+
+#ifndef HAVE_ACCEPT4
+int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
+ int fd = accept(sock, addr, addrlen);
+ int newflags = 0;
+
+ if (fd < 0) return fd;
+
+ if (flags & SOCK_NONBLOCK) {
+ newflags |= O_NONBLOCK;
+ flags &= ~SOCK_NONBLOCK;
+ }
+
+#ifdef SOCK_CLOEXEC
+#ifdef O_CLOEXEC
+ if (flags & SOCK_CLOEXEC) {
+ newflags |= O_CLOEXEC;
+ flags &= ~SOCK_CLOEXEC;
+ }
+#endif
+#endif
+
+ if (flags) {
+ close(fd);
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFL, newflags) < 0) {
+ int saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ return -1;
+ }
+
+ return fd;
+}
+#endif
+
+/*
+ * ---------------------------------------------------------------------------------------------------------------------
+ * connection_allowed() - if there is an access list then check the connection matches a pattern.
+ * Numeric patterns are checked against the IP address first, only if they
+ * do not match is the hostname resolved (reverse-DNS) and checked. If the
+ * hostname matches then we perform forward DNS resolution to check the IP
+ * is really associated with the DNS record. This call is repeatable: the
+ * web server may check more refined matches against the connection. Will
+ * update the client_host if uninitialized - ensure the hostsize is the number
+ * of *writable* bytes (i.e. be aware of the strdup used to compact the pollinfo).
+ */
+extern int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsize, SIMPLE_PATTERN *access_list,
+ const char *patname, int allow_dns)
+{
+ debug(D_LISTENER,"checking %s... (allow_dns=%d)", patname, allow_dns);
+ if (!access_list)
+ return 1;
+ if (simple_pattern_matches(access_list, client_ip))
+ return 1;
+ // If the hostname is unresolved (and needed) then attempt the DNS lookups.
+ //if (client_host[0]==0 && simple_pattern_is_potential_name(access_list))
+ if (client_host[0]==0 && allow_dns)
+ {
+ struct sockaddr_storage sadr;
+ socklen_t addrlen = sizeof(sadr);
+ int err = getpeername(fd, (struct sockaddr*)&sadr, &addrlen);
+ if (err != 0 ||
+ (err = getnameinfo((struct sockaddr *)&sadr, addrlen, client_host, (socklen_t)hostsize,
+ NULL, 0, NI_NAMEREQD)) != 0) {
+ error("Incoming %s on '%s' does not match a numeric pattern, and host could not be resolved (err=%s)",
+ patname, client_ip, gai_strerror(err));
+ if (hostsize >= 8)
+ strcpy(client_host,"UNKNOWN");
+ return 0;
+ }
+ struct addrinfo *addr_infos = NULL;
+ if (getaddrinfo(client_host, NULL, NULL, &addr_infos) !=0 ) {
+ error("LISTENER: cannot validate hostname '%s' from '%s' by resolving it",
+ client_host, client_ip);
+ if (hostsize >= 8)
+ strcpy(client_host,"UNKNOWN");
+ return 0;
+ }
+ struct addrinfo *scan = addr_infos;
+ int validated = 0;
+ while (scan) {
+ char address[INET6_ADDRSTRLEN];
+ address[0] = 0;
+ switch (scan->ai_addr->sa_family) {
+ case AF_INET:
+ inet_ntop(AF_INET, &((struct sockaddr_in*)(scan->ai_addr))->sin_addr, address, INET6_ADDRSTRLEN);
+ break;
+ case AF_INET6:
+ inet_ntop(AF_INET6, &((struct sockaddr_in6*)(scan->ai_addr))->sin6_addr, address, INET6_ADDRSTRLEN);
+ break;
+ }
+ debug(D_LISTENER, "Incoming ip %s rev-resolved onto %s, validating against forward-resolution %s",
+ client_ip, client_host, address);
+ if (!strcmp(client_ip, address)) {
+ validated = 1;
+ break;
+ }
+ scan = scan->ai_next;
+ }
+ if (!validated) {
+ error("LISTENER: Cannot validate '%s' as ip of '%s', not listed in DNS", client_ip, client_host);
+ if (hostsize >= 8)
+ strcpy(client_host,"UNKNOWN");
+ }
+ if (addr_infos!=NULL)
+ freeaddrinfo(addr_infos);
+ }
+ if (!simple_pattern_matches(access_list, client_host)) {
+ debug(D_LISTENER, "Incoming connection on '%s' (%s) does not match allowed pattern for %s",
+ client_ip, client_host, patname);
+ return 0;
+ }
+ return 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept_socket() - accept a socket and store client IP and port
+int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize,
+ char *client_host, size_t hostsize, SIMPLE_PATTERN *access_list, int allow_dns) {
+ struct sockaddr_storage sadr;
+ socklen_t addrlen = sizeof(sadr);
+
+ int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags);
+ if (likely(nfd >= 0)) {
+ if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize,
+ client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
+ error("LISTENER: cannot getnameinfo() on received client connection.");
+ strncpyz(client_ip, "UNKNOWN", ipsize - 1);
+ strncpyz(client_port, "UNKNOWN", portsize - 1);
+ }
+ if (!strcmp(client_ip, "127.0.0.1") || !strcmp(client_ip, "::1")) {
+ strncpy(client_ip, "localhost", ipsize);
+ client_ip[ipsize - 1] = '\0';
+ }
+
+#ifdef __FreeBSD__
+ if(((struct sockaddr *)&sadr)->sa_family == AF_LOCAL)
+ strncpyz(client_ip, "localhost", ipsize);
+#endif
+
+ client_ip[ipsize - 1] = '\0';
+ client_port[portsize - 1] = '\0';
+
+ switch (((struct sockaddr *)&sadr)->sa_family) {
+ case AF_UNIX:
+ debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd);
+ // set the port - certain versions of libc return garbage on unix sockets
+ strncpy(client_port, "UNIX", portsize);
+ client_port[portsize - 1] = '\0';
+ break;
+
+ case AF_INET:
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ case AF_INET6:
+ if (strncmp(client_ip, "::ffff:", 7) == 0) {
+ memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1);
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ }
+ else
+ debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ default:
+ debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+ }
+ if (!connection_allowed(nfd, client_ip, client_host, hostsize, access_list, "connection", allow_dns)) {
+ errno = 0;
+ error("Permission denied for client '%s', port '%s'", client_ip, client_port);
+ close(nfd);
+ nfd = -1;
+ errno = EPERM;
+ }
+ }
+#ifdef HAVE_ACCEPT4
+ else if (errno == ENOSYS)
+ error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ...");
+#endif
+
+ return nfd;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// poll() based listener
+// this should be the fastest possible listener for up to 100 sockets
+// above 100, an epoll() interface is needed on Linux
+
+#define POLL_FDS_INCREASE_STEP 10
+
+inline POLLINFO *poll_add_fd(POLLJOB *p
+ , int fd
+ , int socktype
+ , WEB_CLIENT_ACL port_acl
+ , uint32_t flags
+ , const char *client_ip
+ , const char *client_port
+ , const char *client_host
+ , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
+ , void (*del_callback)(POLLINFO * /*pi*/)
+ , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , void *data
+) {
+ debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(fd < 0)) return NULL;
+
+ //if(p->limit && p->used >= p->limit) {
+ // info("Max sockets limit reached (%zu sockets), dropping connection", p->used);
+ // close(fd);
+ // return NULL;
+ //}
+
+ if(unlikely(!p->first_free)) {
+ size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP;
+ debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max);
+
+ p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots);
+ p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots);
+
+ // reset all the newly added slots
+ ssize_t i;
+ for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) {
+ debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i);
+ p->fds[i].fd = -1;
+ p->fds[i].events = 0;
+ p->fds[i].revents = 0;
+
+ p->inf[i].p = p;
+ p->inf[i].slot = (size_t)i;
+ p->inf[i].flags = 0;
+ p->inf[i].socktype = -1;
+ p->inf[i].port_acl = -1;
+
+ p->inf[i].client_ip = NULL;
+ p->inf[i].client_port = NULL;
+ p->inf[i].client_host = NULL;
+ p->inf[i].del_callback = p->del_callback;
+ p->inf[i].rcv_callback = p->rcv_callback;
+ p->inf[i].snd_callback = p->snd_callback;
+ p->inf[i].data = NULL;
+
+ // link them so that the first free will be earlier in the array
+ // (we loop decrementing i)
+ p->inf[i].next = p->first_free;
+ p->first_free = &p->inf[i];
+ }
+
+ p->slots = new_slots;
+ }
+
+ POLLINFO *pi = p->first_free;
+ p->first_free = p->first_free->next;
+
+ debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ struct pollfd *pf = &p->fds[pi->slot];
+ pf->fd = fd;
+ pf->events = POLLIN;
+ pf->revents = 0;
+
+ pi->fd = fd;
+ pi->p = p;
+ pi->socktype = socktype;
+ pi->port_acl = port_acl;
+ pi->flags = flags;
+ pi->next = NULL;
+ pi->client_ip = strdupz(client_ip);
+ pi->client_port = strdupz(client_port);
+ pi->client_host = strdupz(client_host);
+
+ pi->del_callback = del_callback;
+ pi->rcv_callback = rcv_callback;
+ pi->snd_callback = snd_callback;
+
+ pi->connected_t = now_boottime_sec();
+ pi->last_received_t = 0;
+ pi->last_sent_t = 0;
+ pi->last_sent_t = 0;
+ pi->recv_count = 0;
+ pi->send_count = 0;
+
+ netdata_thread_disable_cancelability();
+ p->used++;
+ if(unlikely(pi->slot > p->max))
+ p->max = pi->slot;
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ pi->data = add_callback(pi, &pf->events, data);
+ }
+
+ if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) {
+ p->min = pi->slot;
+ }
+ netdata_thread_enable_cancelability();
+
+ debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ return pi;
+}
+
+inline void poll_close_fd(POLLINFO *pi) {
+ POLLJOB *p = pi->p;
+
+ struct pollfd *pf = &p->fds[pi->slot];
+ debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(pf->fd == -1)) return;
+
+ netdata_thread_disable_cancelability();
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ pi->del_callback(pi);
+
+ if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) {
+ if(close(pf->fd) == -1)
+ error("Failed to close() poll_events() socket %d", pf->fd);
+ }
+ }
+
+ pf->fd = -1;
+ pf->events = 0;
+ pf->revents = 0;
+
+ pi->fd = -1;
+ pi->socktype = -1;
+ pi->flags = 0;
+ pi->data = NULL;
+
+ pi->del_callback = NULL;
+ pi->rcv_callback = NULL;
+ pi->snd_callback = NULL;
+
+ freez(pi->client_ip);
+ pi->client_ip = NULL;
+
+ freez(pi->client_port);
+ pi->client_port = NULL;
+
+ freez(pi->client_host);
+ pi->client_host = NULL;
+
+ pi->next = p->first_free;
+ p->first_free = pi;
+
+ p->used--;
+ if(unlikely(p->max == pi->slot)) {
+ p->max = p->min;
+ ssize_t i;
+ for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) {
+ if (unlikely(p->fds[i].fd != -1)) {
+ p->max = (size_t)i;
+ break;
+ }
+ }
+ }
+ netdata_thread_enable_cancelability();
+
+ debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+}
+
+void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)pi;
+ (void)events;
+ (void)data;
+
+ // error("POLLFD: internal error: poll_default_add_callback() called");
+
+ return NULL;
+}
+
+void poll_default_del_callback(POLLINFO *pi) {
+ if(pi->data)
+ error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
+}
+
+int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
+ *events |= POLLIN;
+
+ char buffer[1024 + 1];
+
+ ssize_t rc;
+ do {
+ rc = recv(pi->fd, buffer, 1024, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN) {
+ error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc);
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd);
+ }
+ } while (rc != -1);
+
+ return 0;
+}
+
+int poll_default_snd_callback(POLLINFO *pi, short int *events) {
+ *events &= ~POLLOUT;
+
+ info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd);
+ return 0;
+}
+
+void poll_default_tmr_callback(void *timer_data) {
+ (void)timer_data;
+}
+
+static void poll_events_cleanup(void *data) {
+ POLLJOB *p = (POLLJOB *)data;
+
+ size_t i;
+ for(i = 0 ; i <= p->max ; i++) {
+ POLLINFO *pi = &p->inf[i];
+ poll_close_fd(pi);
+ }
+
+ freez(p->fds);
+ freez(p->inf);
+}
+
+static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, short int revents, time_t now) {
+ short int events = pf->events;
+ int fd = pf->fd;
+ pf->revents = 0;
+ size_t i = pi->slot;
+
+ if(unlikely(fd == -1)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i);
+ return;
+ }
+
+ debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, events, revents);
+
+ if(revents & POLLIN || revents & POLLPRI) {
+ // receiving data
+
+ pi->last_received_t = now;
+ pi->recv_count++;
+
+ if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
+ // read data from client TCP socket
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd);
+
+ pf->events = 0;
+ if (pi->rcv_callback(pi, &pf->events) == -1) {
+ poll_close_fd(&p->inf[i]);
+ return;
+ }
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // this is common - it is used for web server file copies
+ if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) {
+ error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from %s port %s was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
+ //poll_close_fd(pi);
+ //return;
+ }
+#endif
+ }
+ else if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) {
+ // new connection
+ // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd);
+
+ switch(pi->socktype) {
+ case SOCK_STREAM: {
+ // a TCP socket
+ // we accept the connection
+
+ int nfd;
+ do {
+ char client_ip[INET6_ADDRSTRLEN];
+ char client_port[NI_MAXSERV];
+ char client_host[NI_MAXHOST];
+ client_host[0] = 0;
+ client_ip[0] = 0;
+ client_port[0] = 0;
+
+ debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd);
+ nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, INET6_ADDRSTRLEN, client_port, NI_MAXSERV,
+ client_host, NI_MAXHOST, p->access_list, p->allow_dns);
+ if (unlikely(nfd < 0)) {
+ // accept failed
+
+ debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd);
+
+ if(unlikely(errno == EMFILE)) {
+ error("POLLFD: LISTENER: too many open files - sleeping for 1ms - used by this thread %zu, max for this thread %zu", p->used, p->limit);
+ usleep(1000); // 10ms
+ }
+ else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN))
+ error("POLLFD: LISTENER: accept() failed.");
+
+ break;
+ }
+ else {
+ // accept ok
+ // info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s' on fd %d", client_ip, client_port, sockets->fds_names[i], nfd);
+ poll_add_fd(p
+ , nfd
+ , SOCK_STREAM
+ , pi->port_acl
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , client_ip
+ , client_port
+ , client_host
+ , p->add_callback
+ , p->del_callback
+ , p->rcv_callback
+ , p->snd_callback
+ , NULL
+ );
+
+ // it may have reallocated them, so refresh our pointers
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+ }
+ } while (nfd >= 0 && (!p->limit || p->used < p->limit));
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ // a UDP socket
+ // we read data from the server socket
+
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd);
+
+ // TODO: access_list is not applied to UDP
+ // but checking the access list on every UDP packet will destroy
+ // performance, especially for statsd.
+
+ pf->events = 0;
+ pi->rcv_callback(pi, &pf->events);
+ break;
+ }
+
+ default: {
+ error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot);
+ break;
+ }
+ }
+ }
+ }
+
+ if(unlikely(revents & POLLOUT)) {
+ // sending data
+ debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd);
+
+ pi->last_sent_t = now;
+ pi->send_count++;
+
+ pf->events = 0;
+ if (pi->snd_callback(pi, &pf->events) == -1) {
+ poll_close_fd(&p->inf[i]);
+ return;
+ }
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // this is common - it is used for streaming
+ if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) {
+ error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from %s port %s was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
+ //poll_close_fd(pi);
+ //return;
+ }
+#endif
+ }
+
+ if(unlikely(revents & POLLERR)) {
+ error("POLLFD: LISTENER: processing POLLERR events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+
+ if(unlikely(revents & POLLHUP)) {
+ error("POLLFD: LISTENER: processing POLLHUP events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+
+ if(unlikely(revents & POLLNVAL)) {
+ error("POLLFD: LISTENER: processing POLLNVAL events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+}
+
+void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
+ , void (*del_callback)(POLLINFO * /*pi*/)
+ , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , void (*tmr_callback)(void * /*timer_data*/)
+ , SIMPLE_PATTERN *access_list
+ , int allow_dns
+ , void *data
+ , time_t tcp_request_timeout_seconds
+ , time_t tcp_idle_timeout_seconds
+ , time_t timer_milliseconds
+ , void *timer_data
+ , size_t max_tcp_sockets
+) {
+ if(!sockets || !sockets->opened) {
+ error("POLLFD: internal error: no listening sockets are opened");
+ return;
+ }
+
+ if(timer_milliseconds <= 0) timer_milliseconds = 0;
+
+ int retval;
+
+ POLLJOB p = {
+ .slots = 0,
+ .used = 0,
+ .max = 0,
+ .limit = max_tcp_sockets,
+ .fds = NULL,
+ .inf = NULL,
+ .first_free = NULL,
+
+ .complete_request_timeout = tcp_request_timeout_seconds,
+ .idle_timeout = tcp_idle_timeout_seconds,
+ .checks_every = (tcp_idle_timeout_seconds / 3) + 1,
+
+ .access_list = access_list,
+ .allow_dns = allow_dns,
+
+ .timer_milliseconds = timer_milliseconds,
+ .timer_data = timer_data,
+
+ .add_callback = add_callback?add_callback:poll_default_add_callback,
+ .del_callback = del_callback?del_callback:poll_default_del_callback,
+ .rcv_callback = rcv_callback?rcv_callback:poll_default_rcv_callback,
+ .snd_callback = snd_callback?snd_callback:poll_default_snd_callback,
+ .tmr_callback = tmr_callback?tmr_callback:poll_default_tmr_callback
+ };
+
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+
+ POLLINFO *pi = poll_add_fd(&p
+ , sockets->fds[i]
+ , sockets->fds_types[i]
+ , sockets->fds_acl_flags[i]
+ , POLLINFO_FLAG_SERVER_SOCKET
+ , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN"
+ , ""
+ , ""
+ , p.add_callback
+ , p.del_callback
+ , p.rcv_callback
+ , p.snd_callback
+ , NULL
+ );
+
+ pi->data = data;
+ info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
+ }
+
+ int listen_sockets_active = 1;
+
+ int timeout_ms = 1000; // in milliseconds
+ time_t last_check = now_boottime_sec();
+
+ usec_t timer_usec = timer_milliseconds * USEC_PER_MS;
+ usec_t now_usec = 0, next_timer_usec = 0, last_timer_usec = 0;
+ (void)last_timer_usec;
+
+ if(unlikely(timer_usec)) {
+ now_usec = now_boottime_usec();
+ next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
+ }
+
+ netdata_thread_cleanup_push(poll_events_cleanup, &p);
+
+ while(!netdata_exit) {
+ if(unlikely(timer_usec)) {
+ now_usec = now_boottime_usec();
+
+ if(unlikely(timer_usec && now_usec >= next_timer_usec)) {
+ debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec));
+ last_timer_usec = now_usec;
+ p.tmr_callback(p.timer_data);
+ now_usec = now_boottime_usec();
+ next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
+ }
+
+ usec_t dt_usec = next_timer_usec - now_usec;
+ if(dt_usec < 1000 * USEC_PER_MS)
+ timeout_ms = 1000;
+ else
+ timeout_ms = (int)(dt_usec / USEC_PER_MS);
+ }
+
+ // enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set
+ if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) {
+ listen_sockets_active = !listen_sockets_active;
+ info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit);
+ for (i = 0; i <= p.max; i++) {
+ if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) {
+ p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0);
+ }
+ }
+ }
+
+ debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms);
+ retval = poll(p.fds, p.max + 1, timeout_ms);
+ time_t now = now_boottime_sec();
+
+ if(unlikely(retval == -1)) {
+ error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1);
+ break;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout.");
+ }
+ else {
+ for (i = 0; i <= p.max; i++) {
+ struct pollfd *pf = &p.fds[i];
+ short int revents = pf->revents;
+ if (unlikely(revents))
+ poll_events_process(&p, &p.inf[i], pf, revents, now);
+ }
+ }
+
+ if(unlikely(p.checks_every > 0 && now - last_check > p.checks_every)) {
+ last_check = now;
+
+ // security checks
+ for(i = 0; i <= p.max; i++) {
+ POLLINFO *pi = &p.inf[i];
+
+ if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
+ if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) {
+ info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s has not sent a complete request in %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.complete_request_timeout
+ );
+ poll_close_fd(pi);
+ }
+ else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) {
+ info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s is idle for more than %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.idle_timeout
+ );
+ poll_close_fd(pi);
+ }
+ }
+ }
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed");
+}
diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h
new file mode 100644
index 0000000..a40d801
--- /dev/null
+++ b/libnetdata/socket/socket.h
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SOCKET_H
+#define NETDATA_SOCKET_H
+
+#include "../libnetdata.h"
+
+#ifndef MAX_LISTEN_FDS
+#define MAX_LISTEN_FDS 50
+#endif
+
+typedef enum web_client_acl {
+ WEB_CLIENT_ACL_NONE = 0,
+ WEB_CLIENT_ACL_NOCHECK = 0,
+ WEB_CLIENT_ACL_DASHBOARD = 1 << 0,
+ WEB_CLIENT_ACL_REGISTRY = 1 << 1,
+ WEB_CLIENT_ACL_BADGE = 1 << 2,
+ WEB_CLIENT_ACL_MGMT = 1 << 3,
+ WEB_CLIENT_ACL_STREAMING = 1 << 4,
+ WEB_CLIENT_ACL_NETDATACONF = 1 << 5,
+ WEB_CLIENT_ACL_SSL_OPTIONAL = 1 << 6,
+ WEB_CLIENT_ACL_SSL_FORCE = 1 << 7,
+ WEB_CLIENT_ACL_SSL_DEFAULT = 1 << 8
+} WEB_CLIENT_ACL;
+
+#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD)
+#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY)
+#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE)
+#define web_client_can_access_mgmt(w) ((w)->acl & WEB_CLIENT_ACL_MGMT)
+#define web_client_can_access_stream(w) ((w)->acl & WEB_CLIENT_ACL_STREAMING)
+#define web_client_can_access_netdataconf(w) ((w)->acl & WEB_CLIENT_ACL_NETDATACONF)
+#define web_client_is_using_ssl_optional(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_OPTIONAL)
+#define web_client_is_using_ssl_force(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_FORCE)
+#define web_client_is_using_ssl_default(w) ((w)->port_acl & WEB_CLIENT_ACL_SSL_DEFAULT)
+
+typedef struct listen_sockets {
+ struct config *config; // the config file to use
+ const char *config_section; // the netdata configuration section to read settings from
+ const char *default_bind_to; // the default bind to configuration string
+ uint16_t default_port; // the default port to use
+ int backlog; // the default listen backlog to use
+
+ size_t opened; // the number of sockets opened
+ size_t failed; // the number of sockets attempted to open, but failed
+ int fds[MAX_LISTEN_FDS]; // the open sockets
+ char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets
+ int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM)
+ int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6)
+ WEB_CLIENT_ACL fds_acl_flags[MAX_LISTEN_FDS]; // the acl to apply to the open sockets (dashboard, badges, streaming, netdata.conf, management)
+} LISTEN_SOCKETS;
+
+extern char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port);
+
+extern int listen_sockets_setup(LISTEN_SOCKETS *sockets);
+extern void listen_sockets_close(LISTEN_SOCKETS *sockets);
+
+extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout);
+extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size);
+int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout);
+
+#ifdef ENABLE_HTTPS
+extern ssize_t recv_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
+extern ssize_t send_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
+#else
+extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
+extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
+#endif
+
+extern int sock_setnonblock(int fd);
+extern int sock_delnonblock(int fd);
+extern int sock_setreuse(int fd, int reuse);
+extern int sock_setreuse_port(int fd, int reuse);
+extern int sock_enlarge_in(int fd);
+extern int sock_enlarge_out(int fd);
+
+extern int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsize,
+ SIMPLE_PATTERN *access_list, const char *patname, int allow_dns);
+extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize,
+ char *client_host, size_t hostsize, SIMPLE_PATTERN *access_list, int allow_dns);
+
+#ifndef HAVE_ACCEPT4
+extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags);
+
+#ifndef SOCK_NONBLOCK
+#define SOCK_NONBLOCK 00004000
+#endif /* #ifndef SOCK_NONBLOCK */
+
+#ifndef SOCK_CLOEXEC
+#define SOCK_CLOEXEC 02000000
+#endif /* #ifndef SOCK_CLOEXEC */
+
+#endif /* #ifndef HAVE_ACCEPT4 */
+
+
+// ----------------------------------------------------------------------------
+// poll() based listener
+
+#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001
+#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002
+#define POLLINFO_FLAG_DONT_CLOSE 0x00000004
+
+typedef struct poll POLLJOB;
+
+typedef struct pollinfo {
+ POLLJOB *p; // the parent
+ size_t slot; // the slot id
+
+ int fd; // the file descriptor
+ int socktype; // the client socket type
+ WEB_CLIENT_ACL port_acl; // the access lists permitted on this web server port (it's -1 for client sockets)
+ char *client_ip; // Max INET6_ADDRSTRLEN bytes
+ char *client_port; // Max NI_MAXSERV bytes
+ char *client_host; // Max NI_MAXHOST bytes
+
+ time_t connected_t; // the time the socket connected
+ time_t last_received_t; // the time the socket last received data
+ time_t last_sent_t; // the time the socket last sent data
+
+ size_t recv_count; // the number of times the socket was ready for inbound traffic
+ size_t send_count; // the number of times the socket was ready for outbound traffic
+
+ uint32_t flags; // internal flags
+
+ // callbacks for this socket
+ void (*del_callback)(struct pollinfo *pi);
+ int (*rcv_callback)(struct pollinfo *pi, short int *events);
+ int (*snd_callback)(struct pollinfo *pi, short int *events);
+
+ // the user data
+ void *data;
+
+ // linking of free pollinfo structures
+ // for quickly finding the next available
+ // this is like a stack, it grows and shrinks
+ // (with gaps - lower empty slots are preferred)
+ struct pollinfo *next;
+} POLLINFO;
+
+struct poll {
+ size_t slots;
+ size_t used;
+ size_t min;
+ size_t max;
+
+ size_t limit;
+
+ time_t complete_request_timeout;
+ time_t idle_timeout;
+ time_t checks_every;
+
+ time_t timer_milliseconds;
+ void *timer_data;
+
+ struct pollfd *fds;
+ struct pollinfo *inf;
+ struct pollinfo *first_free;
+
+ SIMPLE_PATTERN *access_list;
+ int allow_dns;
+
+ void *(*add_callback)(POLLINFO *pi, short int *events, void *data);
+ void (*del_callback)(POLLINFO *pi);
+ int (*rcv_callback)(POLLINFO *pi, short int *events);
+ int (*snd_callback)(POLLINFO *pi, short int *events);
+ void (*tmr_callback)(void *timer_data);
+};
+
+#define pollinfo_from_slot(p, slot) (&((p)->inf[(slot)]))
+
+extern int poll_default_snd_callback(POLLINFO *pi, short int *events);
+extern int poll_default_rcv_callback(POLLINFO *pi, short int *events);
+extern void poll_default_del_callback(POLLINFO *pi);
+extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data);
+
+extern POLLINFO *poll_add_fd(POLLJOB *p
+ , int fd
+ , int socktype
+ , WEB_CLIENT_ACL port_acl
+ , uint32_t flags
+ , const char *client_ip
+ , const char *client_port
+ , const char *client_host
+ , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
+ , void (*del_callback)(POLLINFO *pi)
+ , int (*rcv_callback)(POLLINFO *pi, short int *events)
+ , int (*snd_callback)(POLLINFO *pi, short int *events)
+ , void *data
+);
+extern void poll_close_fd(POLLINFO *pi);
+
+extern void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
+ , void (*del_callback)(POLLINFO *pi)
+ , int (*rcv_callback)(POLLINFO *pi, short int *events)
+ , int (*snd_callback)(POLLINFO *pi, short int *events)
+ , void (*tmr_callback)(void *timer_data)
+ , SIMPLE_PATTERN *access_list
+ , int allow_dns
+ , void *data
+ , time_t tcp_request_timeout_seconds
+ , time_t tcp_idle_timeout_seconds
+ , time_t timer_milliseconds
+ , void *timer_data
+ , size_t max_tcp_sockets
+);
+
+#endif //NETDATA_SOCKET_H
diff --git a/libnetdata/statistical/Makefile.am b/libnetdata/statistical/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/statistical/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md
new file mode 100644
index 0000000..ee1faba
--- /dev/null
+++ b/libnetdata/statistical/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/statistical/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstatistical%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/statistical/statistical.c b/libnetdata/statistical/statistical.c
new file mode 100644
index 0000000..ba8f0c4
--- /dev/null
+++ b/libnetdata/statistical/statistical.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+LONG_DOUBLE default_single_exponential_smoothing_alpha = 0.1;
+
+void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg) {
+ const LONG_DOUBLE *value, *end = &series[entries];
+
+ fprintf(stderr, "%s of %zu entries [ ", msg, entries);
+ for(value = series; value < end ;value++) {
+ if(value != series) fprintf(stderr, ", ");
+ fprintf(stderr, "%" LONG_DOUBLE_MODIFIER, *value);
+ }
+ fprintf(stderr, " ] results in " CALCULATED_NUMBER_FORMAT "\n", result);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) {
+ const LONG_DOUBLE *value, *end = &series[entries];
+ LONG_DOUBLE sum = 0;
+ size_t c = 0;
+
+ for(value = series; value < end ; value++) {
+ if(calculated_number_isnumber(*value)) {
+ sum += *value;
+ c++;
+ }
+ }
+
+ if(unlikely(!c)) sum = NAN;
+ if(likely(count)) *count = c;
+
+ return sum;
+}
+
+inline LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries) {
+ return sum_and_count(series, entries, NULL);
+}
+
+inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) {
+ size_t count = 0;
+ LONG_DOUBLE sum = sum_and_count(series, entries, &count);
+
+ if(unlikely(!count)) return NAN;
+ return sum / (LONG_DOUBLE)count;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period) {
+ if(unlikely(period <= 0))
+ return 0.0;
+
+ size_t i, count;
+ LONG_DOUBLE sum = 0, avg = 0;
+ LONG_DOUBLE p[period];
+
+ for(count = 0; count < period ; count++)
+ p[count] = 0.0;
+
+ for(i = 0, count = 0; i < entries; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(!calculated_number_isnumber(value))) continue;
+
+ if(unlikely(count < period)) {
+ sum += value;
+ avg = (count == period - 1) ? sum / (LONG_DOUBLE)period : 0;
+ }
+ else {
+ sum = sum - p[count % period] + value;
+ avg = sum / (LONG_DOUBLE)period;
+ }
+
+ p[count % period] = value;
+ count++;
+ }
+
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static int qsort_compare(const void *a, const void *b) {
+ LONG_DOUBLE *p1 = (LONG_DOUBLE *)a, *p2 = (LONG_DOUBLE *)b;
+ LONG_DOUBLE n1 = *p1, n2 = *p2;
+
+ if(unlikely(isnan(n1) || isnan(n2))) {
+ if(isnan(n1) && !isnan(n2)) return -1;
+ if(!isnan(n1) && isnan(n2)) return 1;
+ return 0;
+ }
+ if(unlikely(isinf(n1) || isinf(n2))) {
+ if(!isinf(n1) && isinf(n2)) return -1;
+ if(isinf(n1) && !isinf(n2)) return 1;
+ return 0;
+ }
+
+ if(unlikely(n1 < n2)) return -1;
+ if(unlikely(n1 > n2)) return 1;
+ return 0;
+}
+
+inline void sort_series(LONG_DOUBLE *series, size_t entries) {
+ qsort(series, entries, sizeof(LONG_DOUBLE), qsort_compare);
+}
+
+inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) {
+ LONG_DOUBLE *copy = mallocz(sizeof(LONG_DOUBLE) * entries);
+ memcpy(copy, series, sizeof(LONG_DOUBLE) * entries);
+ return copy;
+}
+
+LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries == 0)) return NAN;
+ if(unlikely(entries == 1)) return series[0];
+ if(unlikely(entries == 2)) return (series[0] + series[1]) / 2;
+
+ LONG_DOUBLE average;
+ if(entries % 2 == 0) {
+ size_t m = entries / 2;
+ average = (series[m] + series[m + 1]) / 2;
+ }
+ else {
+ average = series[entries / 2];
+ }
+
+ return average;
+}
+
+LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries == 0)) return NAN;
+ if(unlikely(entries == 1)) return series[0];
+
+ if(unlikely(entries == 2))
+ return (series[0] + series[1]) / 2;
+
+ LONG_DOUBLE *copy = copy_series(series, entries);
+ sort_series(copy, entries);
+
+ LONG_DOUBLE avg = median_on_sorted_series(copy, entries);
+
+ freez(copy);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period) {
+ if(entries <= period)
+ return median(series, entries);
+
+ LONG_DOUBLE *data = copy_series(series, entries);
+
+ size_t i;
+ for(i = period; i < entries; i++) {
+ data[i - period] = median(&series[i - period], period);
+ }
+
+ LONG_DOUBLE avg = median(data, entries - period);
+ freez(data);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://stackoverflow.com/a/15150143/4525767
+LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) {
+ LONG_DOUBLE median = 0.0f;
+ LONG_DOUBLE average = 0.0f;
+ size_t i;
+
+ for(i = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(!calculated_number_isnumber(value))) continue;
+
+ average += ( value - average ) * 0.1f; // rough running average.
+ median += copysignl( average * 0.01, value - median );
+ }
+
+ return median;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries == 0)) return NAN;
+ if(unlikely(entries == 1)) return series[0];
+
+ const LONG_DOUBLE *value, *end = &series[entries];
+ size_t count;
+ LONG_DOUBLE sum;
+
+ for(count = 0, sum = 0, value = series ; value < end ;value++) {
+ if(likely(calculated_number_isnumber(*value))) {
+ count++;
+ sum += *value;
+ }
+ }
+
+ if(unlikely(count == 0)) return NAN;
+ if(unlikely(count == 1)) return sum;
+
+ LONG_DOUBLE average = sum / (LONG_DOUBLE)count;
+
+ for(count = 0, sum = 0, value = series ; value < end ;value++) {
+ if(calculated_number_isnumber(*value)) {
+ count++;
+ sum += powl(*value - average, 2);
+ }
+ }
+
+ if(unlikely(count == 0)) return NAN;
+ if(unlikely(count == 1)) return average;
+
+ LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count); // remove -1 from count to have a population stddev
+ LONG_DOUBLE stddev = sqrtl(variance);
+ return stddev;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(isnan(alpha)))
+ alpha = default_single_exponential_smoothing_alpha;
+
+ const LONG_DOUBLE *value = series, *end = &series[entries];
+ LONG_DOUBLE level = (1.0 - alpha) * (*value);
+
+ for(value++ ; value < end; value++) {
+ if(likely(calculated_number_isnumber(*value)))
+ level = alpha * (*value) + (1.0 - alpha) * level;
+ }
+
+ return level;
+}
+
+LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(isnan(alpha)))
+ alpha = default_single_exponential_smoothing_alpha;
+
+ const LONG_DOUBLE *value = &series[entries -1];
+ LONG_DOUBLE level = (1.0 - alpha) * (*value);
+
+ for(value++ ; value >= series; value--) {
+ if(likely(calculated_number_isnumber(*value)))
+ level = alpha * (*value) + (1.0 - alpha) * level;
+ }
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/
+LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ LONG_DOUBLE level, trend;
+
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ level = series[0];
+
+ if(likely(entries > 1))
+ trend = series[1] - series[0];
+ else
+ trend = 0;
+
+ const LONG_DOUBLE *value = series;
+ for(value++ ; value >= series; value--) {
+ if(likely(calculated_number_isnumber(*value))) {
+
+ LONG_DOUBLE last_level = level;
+ level = alpha * *value + (1.0 - alpha) * (level + trend);
+ trend = beta * (level - last_level) + (1.0 - beta) * trend;
+
+ }
+ }
+
+ if(forecast)
+ *forecast = level + trend;
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+/*
+ * Based on th R implementation
+ *
+ * a: level component
+ * b: trend component
+ * s: seasonal component
+ *
+ * Additive:
+ *
+ * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p]
+ *
+ * Multiplicative:
+ *
+ * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p]
+ */
+static int __HoltWinters(
+ const LONG_DOUBLE *series,
+ int entries, // start_time + h
+
+ LONG_DOUBLE alpha, // alpha parameter of Holt-Winters Filter.
+ LONG_DOUBLE beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing.
+ LONG_DOUBLE gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted.
+
+ const int *seasonal,
+ const int *period,
+ const LONG_DOUBLE *a, // Start value for level (a[0]).
+ const LONG_DOUBLE *b, // Start value for trend (b[0]).
+ LONG_DOUBLE *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0])
+
+ /* return values */
+ LONG_DOUBLE *SSE, // The final sum of squared errors achieved in optimizing
+ LONG_DOUBLE *level, // Estimated values for the level component (size entries - t + 2)
+ LONG_DOUBLE *trend, // Estimated values for the trend component (size entries - t + 2)
+ LONG_DOUBLE *season // Estimated values for the seasonal component (size entries - t + 2)
+)
+{
+ if(unlikely(entries < 4))
+ return 0;
+
+ int start_time = 2;
+
+ LONG_DOUBLE res = 0, xhat = 0, stmp = 0;
+ int i, i0, s0;
+
+ /* copy start values to the beginning of the vectors */
+ level[0] = *a;
+ if(beta > 0) trend[0] = *b;
+ if(gamma > 0) memcpy(season, s, *period * sizeof(LONG_DOUBLE));
+
+ for(i = start_time - 1; i < entries; i++) {
+ /* indices for period i */
+ i0 = i - start_time + 2;
+ s0 = i0 + *period - 1;
+
+ /* forecast *for* period i */
+ xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0);
+ stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1);
+ if (*seasonal == 1)
+ xhat += stmp;
+ else
+ xhat *= stmp;
+
+ /* Sum of Squared Errors */
+ res = series[i] - xhat;
+ *SSE += res * res;
+
+ /* estimate of level *in* period i */
+ if (*seasonal == 1)
+ level[i0] = alpha * (series[i] - stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+ else
+ level[i0] = alpha * (series[i] / stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+
+ /* estimate of trend *in* period i */
+ if (beta > 0)
+ trend[i0] = beta * (level[i0] - level[i0 - 1])
+ + (1 - beta) * trend[i0 - 1];
+
+ /* estimate of seasonal component *in* period i */
+ if (gamma > 0) {
+ if (*seasonal == 1)
+ season[s0] = gamma * (series[i] - level[i0])
+ + (1 - gamma) * stmp;
+ else
+ season[s0] = gamma * (series[i] / level[i0])
+ + (1 - gamma) * stmp;
+ }
+ }
+
+ return 1;
+}
+
+LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast) {
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ if(unlikely(isnan(gamma)))
+ gamma = 0;
+
+ int seasonal = 0;
+ int period = 0;
+ LONG_DOUBLE a0 = series[0];
+ LONG_DOUBLE b0 = 0;
+ LONG_DOUBLE s[] = {};
+
+ LONG_DOUBLE errors = 0.0;
+ size_t nb_computations = entries;
+ LONG_DOUBLE *estimated_level = callocz(nb_computations, sizeof(LONG_DOUBLE));
+ LONG_DOUBLE *estimated_trend = callocz(nb_computations, sizeof(LONG_DOUBLE));
+ LONG_DOUBLE *estimated_season = callocz(nb_computations, sizeof(LONG_DOUBLE));
+
+ int ret = __HoltWinters(
+ series,
+ (int)entries,
+ alpha,
+ beta,
+ gamma,
+ &seasonal,
+ &period,
+ &a0,
+ &b0,
+ s,
+ &errors,
+ estimated_level,
+ estimated_trend,
+ estimated_season
+ );
+
+ LONG_DOUBLE value = estimated_level[nb_computations - 1];
+
+ if(forecast)
+ *forecast = 0.0;
+
+ freez(estimated_level);
+ freez(estimated_trend);
+ freez(estimated_season);
+
+ if(!ret)
+ return 0.0;
+
+ return value;
+}
diff --git a/libnetdata/statistical/statistical.h b/libnetdata/statistical/statistical.h
new file mode 100644
index 0000000..bbf241f
--- /dev/null
+++ b/libnetdata/statistical/statistical.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STATISTICAL_H
+#define NETDATA_STATISTICAL_H 1
+
+#include "../libnetdata.h"
+
+extern void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg);
+
+extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period);
+extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period);
+extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
+extern LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
+extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast);
+extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast);
+extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count);
+extern LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries);
+extern void sort_series(LONG_DOUBLE *series, size_t entries);
+
+#endif //NETDATA_STATISTICAL_H
diff --git a/libnetdata/storage_number/Makefile.am b/libnetdata/storage_number/Makefile.am
new file mode 100644
index 0000000..c5f8450
--- /dev/null
+++ b/libnetdata/storage_number/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ tests \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md
new file mode 100644
index 0000000..295b3d6
--- /dev/null
+++ b/libnetdata/storage_number/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "Netdata storage number"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/storage_number/README.md
+-->
+
+# Netdata storage number
+
+Although `netdata` does all its calculations using `long double`, it stores all values using
+a **custom-made 32-bit number**.
+
+This custom-made number can store in 29 bits values from `-167772150000000.0` to `167772150000000.0`
+with a precision of 0.00001 (yes, it's a floating point number, meaning that higher integer values
+have less decimal precision) and 3 bits for flags.
+
+This provides an extremely optimized memory footprint with just 0.0001% max accuracy loss.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstorage_number%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/storage_number/storage_number.c b/libnetdata/storage_number/storage_number.c
new file mode 100644
index 0000000..8ef1353
--- /dev/null
+++ b/libnetdata/storage_number/storage_number.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+storage_number pack_storage_number(calculated_number value, uint32_t flags) {
+ // bit 32 = sign 0:positive, 1:negative
+ // bit 31 = 0:divide, 1:multiply
+ // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total)
+ // bit 27 SN_EXISTS_100
+ // bit 26 SN_EXISTS_RESET
+ // bit 25 SN_EXISTS
+ // bit 24 to bit 1 = the value
+
+ storage_number r = get_storage_number_flags(flags);
+ if(!value) return r;
+
+ int m = 0;
+ calculated_number n = value, factor = 10;
+
+ // if the value is negative
+ // add the sign bit and make it positive
+ if(n < 0) {
+ r += (1 << 31); // the sign bit 32
+ n = -n;
+ }
+
+ if(n / 10000000.0 > 0x00ffffff) {
+ factor = 100;
+ r |= SN_EXISTS_100;
+ }
+
+ // make its integer part fit in 0x00ffffff
+ // by dividing it by 10 up to 7 times
+ // and increasing the multiplier
+ while(m < 7 && n > (calculated_number)0x00ffffff) {
+ n /= factor;
+ m++;
+ }
+
+ if(m) {
+ // the value was too big and we divided it
+ // so we add a multiplier to unpack it
+ r += (1 << 30) + (m << 27); // the multiplier m
+
+ if(n > (calculated_number)0x00ffffff) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value);
+ #endif
+ r += 0x00ffffff;
+ return r;
+ }
+ }
+ else {
+ // 0x0019999e is the number that can be multiplied
+ // by 10 to give 0x00ffffff
+ // while the value is below 0x0019999e we can
+ // multiply it by 10, up to 7 times, increasing
+ // the multiplier
+ while(m < 7 && n < (calculated_number)0x0019999e) {
+ n *= 10;
+ m++;
+ }
+
+ if (unlikely(n > (calculated_number) (0x00ffffff))) {
+ n /= 10;
+ m--;
+ }
+ // the value was small enough and we multiplied it
+ // so we add a divider to unpack it
+ r += (0 << 30) + (m << 27); // the divider m
+ }
+
+#ifdef STORAGE_WITH_MATH
+ // without this there are rounding problems
+ // example: 0.9 becomes 0.89
+ r += lrint((double) n);
+#else
+ r += (storage_number)n;
+#endif
+
+ return r;
+}
+
+calculated_number unpack_storage_number(storage_number value) {
+ if(!value) return 0;
+
+ int sign = 0, exp = 0;
+ int factor = 10;
+
+ // bit 32 = 0:positive, 1:negative
+ if(unlikely(value & (1 << 31)))
+ sign = 1;
+
+ // bit 31 = 0:divide, 1:multiply
+ if(unlikely(value & (1 << 30)))
+ exp = 1;
+
+ // bit 27 SN_EXISTS_100
+ if(unlikely(value & (1 << 26)))
+ factor = 100;
+
+ // bit 26 SN_EXISTS_RESET
+ // bit 25 SN_EXISTS
+
+ // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total)
+ int mul = (value & ((1<<29)|(1<<28)|(1<<27))) >> 27;
+
+ // bit 24 to bit 1 = the value, so remove all other bits
+ value ^= value & ((1<<31)|(1<<30)|(1<<29)|(1<<28)|(1<<27)|(1<<26)|(1<<25)|(1<<24));
+
+ calculated_number n = value;
+
+ // fprintf(stderr, "UNPACK: %08X, sign = %d, exp = %d, mul = %d, factor = %d, n = " CALCULATED_NUMBER_FORMAT "\n", value, sign, exp, mul, factor, n);
+
+ if(exp) {
+ for(; mul; mul--)
+ n *= factor;
+ }
+ else {
+ for( ; mul ; mul--)
+ n /= 10;
+ }
+
+ if(sign) n = -n;
+ return n;
+}
+
+/*
+int print_calculated_number(char *str, calculated_number value)
+{
+ char *wstr = str;
+
+ int sign = (value < 0) ? 1 : 0;
+ if(sign) value = -value;
+
+#ifdef STORAGE_WITH_MATH
+ // without llrintl() there are rounding problems
+ // for example 0.9 becomes 0.89
+ unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000);
+#else
+ unsigned long long uvalue = value * (calculated_number)100000;
+#endif
+
+ wstr = print_number_llu_r_smart(str, uvalue);
+
+ // make sure we have 6 bytes at least
+ while((wstr - str) < 6) *wstr++ = '0';
+
+ // put the sign back
+ if(sign) *wstr++ = '-';
+
+ // reverse it
+ char *begin = str, *end = --wstr, aux;
+ while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
+ // wstr--;
+ // strreverse(str, wstr);
+
+ // remove trailing zeros
+ int decimal = 5;
+ while(decimal > 0 && *wstr == '0') {
+ *wstr-- = '\0';
+ decimal--;
+ }
+
+ // terminate it, one position to the right
+ // to let space for a dot
+ wstr[2] = '\0';
+
+ // make space for the dot
+ int i;
+ for(i = 0; i < decimal ;i++) {
+ wstr[1] = wstr[0];
+ wstr--;
+ }
+
+ // put the dot
+ if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; }
+ else wstr[1] = '.';
+
+ // return the buffer length
+ return (int) ((wstr - str) + 2 + decimal );
+}
+*/
+
+int print_calculated_number(char *str, calculated_number value) {
+ // info("printing number " CALCULATED_NUMBER_FORMAT, value);
+ char integral_str[50], fractional_str[50];
+
+ char *wstr = str;
+
+ if(unlikely(value < 0)) {
+ *wstr++ = '-';
+ value = -value;
+ }
+
+ calculated_number integral, fractional;
+
+#ifdef STORAGE_WITH_MATH
+ fractional = calculated_number_modf(value, &integral) * 10000000.0;
+#else
+ fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL);
+#endif
+
+ unsigned long long integral_int = (unsigned long long)integral;
+ unsigned long long fractional_int = (unsigned long long)calculated_number_llrint(fractional);
+ if(unlikely(fractional_int >= 10000000)) {
+ integral_int += 1;
+ fractional_int -= 10000000;
+ }
+
+ // info("integral " CALCULATED_NUMBER_FORMAT " (%llu), fractional " CALCULATED_NUMBER_FORMAT " (%llu)", integral, integral_int, fractional, fractional_int);
+
+ char *istre;
+ if(unlikely(integral_int == 0)) {
+ integral_str[0] = '0';
+ istre = &integral_str[1];
+ }
+ else
+ // convert the integral part to string (reversed)
+ istre = print_number_llu_r_smart(integral_str, integral_int);
+
+ // copy reversed the integral string
+ istre--;
+ while( istre >= integral_str ) *wstr++ = *istre--;
+
+ if(likely(fractional_int != 0)) {
+ // add a dot
+ *wstr++ = '.';
+
+ // convert the fractional part to string (reversed)
+ char *fstre = print_number_llu_r_smart(fractional_str, fractional_int);
+
+ // prepend zeros to reach 7 digits length
+ int decimal = 7;
+ int len = (int)(fstre - fractional_str);
+ while(len < decimal) {
+ *wstr++ = '0';
+ len++;
+ }
+
+ char *begin = fractional_str;
+ while(begin < fstre && *begin == '0') begin++;
+
+ // copy reversed the fractional string
+ fstre--;
+ while( fstre >= begin ) *wstr++ = *fstre--;
+ }
+
+ *wstr = '\0';
+ // info("printed number '%s'", str);
+ return (int)(wstr - str);
+}
diff --git a/libnetdata/storage_number/storage_number.h b/libnetdata/storage_number/storage_number.h
new file mode 100644
index 0000000..28b7f26
--- /dev/null
+++ b/libnetdata/storage_number/storage_number.h
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STORAGE_NUMBER_H
+#define NETDATA_STORAGE_NUMBER_H 1
+
+#include "../libnetdata.h"
+
+#ifdef NETDATA_WITHOUT_LONG_DOUBLE
+
+#define powl pow
+#define modfl modf
+#define llrintl llrint
+#define roundl round
+#define sqrtl sqrt
+#define copysignl copysign
+#define strtold strtod
+
+typedef double calculated_number;
+#define CALCULATED_NUMBER_FORMAT "%0.7f"
+#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0f"
+#define CALCULATED_NUMBER_FORMAT_AUTO "%f"
+
+#define LONG_DOUBLE_MODIFIER "f"
+typedef double LONG_DOUBLE;
+
+#else // NETDATA_WITHOUT_LONG_DOUBLE
+
+typedef long double calculated_number;
+#define CALCULATED_NUMBER_FORMAT "%0.7Lf"
+#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0Lf"
+#define CALCULATED_NUMBER_FORMAT_AUTO "%Lf"
+
+#define LONG_DOUBLE_MODIFIER "Lf"
+typedef long double LONG_DOUBLE;
+
+#endif // NETDATA_WITHOUT_LONG_DOUBLE
+
+//typedef long long calculated_number;
+//#define CALCULATED_NUMBER_FORMAT "%lld"
+
+typedef long long collected_number;
+#define COLLECTED_NUMBER_FORMAT "%lld"
+
+/*
+typedef long double collected_number;
+#define COLLECTED_NUMBER_FORMAT "%0.7Lf"
+*/
+
+#define calculated_number_modf(x, y) modfl(x, y)
+#define calculated_number_llrint(x) llrintl(x)
+#define calculated_number_round(x) roundl(x)
+#define calculated_number_fabs(x) fabsl(x)
+#define calculated_number_pow(x, y) powl(x, y)
+#define calculated_number_epsilon (calculated_number)0.0000001
+
+#define calculated_number_equal(a, b) (calculated_number_fabs((a) - (b)) < calculated_number_epsilon)
+
+#define calculated_number_isnumber(a) (!(fpclassify(a) & (FP_NAN|FP_INFINITE)))
+
+typedef uint32_t storage_number;
+#define STORAGE_NUMBER_FORMAT "%u"
+
+#define SN_EXISTS (1 << 24) // the value exists
+#define SN_EXISTS_RESET (1 << 25) // the value has been overflown
+#define SN_EXISTS_100 (1 << 26) // very large value (multipler is 100 instead of 10)
+
+// extract the flags
+#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (1 << 25)) | (((storage_number)(value)) & (1 << 26)))
+#define SN_EMPTY_SLOT 0x00000000
+
+// checks
+#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0)
+#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0)
+
+storage_number pack_storage_number(calculated_number value, uint32_t flags);
+calculated_number unpack_storage_number(storage_number value);
+
+int print_calculated_number(char *str, calculated_number value);
+
+// sign div/mul <--- multiplier / divider ---> 10/100 RESET EXISTS VALUE
+#define STORAGE_NUMBER_POSITIVE_MAX_RAW (storage_number)( (0 << 31) | (1 << 30) | (1 << 29) | (1 << 28) | (1<<27) | (1 << 26) | (0 << 25) | (1 << 24) | 0x00ffffff )
+#define STORAGE_NUMBER_POSITIVE_MIN_RAW (storage_number)( (0 << 31) | (0 << 30) | (1 << 29) | (1 << 28) | (1<<27) | (0 << 26) | (0 << 25) | (1 << 24) | 0x00000001 )
+#define STORAGE_NUMBER_NEGATIVE_MAX_RAW (storage_number)( (1 << 31) | (0 << 30) | (1 << 29) | (1 << 28) | (1<<27) | (0 << 26) | (0 << 25) | (1 << 24) | 0x00000001 )
+#define STORAGE_NUMBER_NEGATIVE_MIN_RAW (storage_number)( (1 << 31) | (1 << 30) | (1 << 29) | (1 << 28) | (1<<27) | (1 << 26) | (0 << 25) | (1 << 24) | 0x00ffffff )
+
+// accepted accuracy loss
+#define ACCURACY_LOSS_ACCEPTED_PERCENT 0.0001
+#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2)))))
+
+// Maximum acceptable rate of increase for counters. With a rate of 10% netdata can safely detect overflows with a
+// period of at least every other 10 samples.
+#define MAX_INCREMENTAL_PERCENT_RATE 10
+
+#endif /* NETDATA_STORAGE_NUMBER_H */
diff --git a/libnetdata/storage_number/tests/Makefile.am b/libnetdata/storage_number/tests/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/libnetdata/storage_number/tests/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/libnetdata/storage_number/tests/test_storage_number.c b/libnetdata/storage_number/tests/test_storage_number.c
new file mode 100644
index 0000000..0e13208
--- /dev/null
+++ b/libnetdata/storage_number/tests/test_storage_number.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../libnetdata.h"
+#include "../../required_dummies.h"
+#include <setjmp.h>
+#include <cmocka.h>
+
+static void test_number_pinting(void **state)
+{
+ (void)state;
+
+ char value[50];
+
+ print_calculated_number(value, 0);
+ assert_string_equal(value, "0");
+
+ print_calculated_number(value, 0.0000001);
+ assert_string_equal(value, "0.0000001");
+
+ print_calculated_number(value, 0.00000009);
+ assert_string_equal(value, "0.0000001");
+
+ print_calculated_number(value, 0.000000001);
+ assert_string_equal(value, "0");
+
+ print_calculated_number(value, 99.99999999999999999);
+ assert_string_equal(value, "100");
+
+ print_calculated_number(value, -99.99999999999999999);
+ assert_string_equal(value, "-100");
+
+ print_calculated_number(value, 123.4567890123456789);
+ assert_string_equal(value, "123.456789");
+
+ print_calculated_number(value, 9999.9999999);
+ assert_string_equal(value, "9999.9999999");
+
+ print_calculated_number(value, -9999.9999999);
+ assert_string_equal(value, "-9999.9999999");
+
+ print_calculated_number(value, unpack_storage_number(pack_storage_number(16.777218L, SN_EXISTS)));
+ assert_string_equal(value, "16.77722");
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(test_number_pinting)
+ };
+
+ return cmocka_run_group_tests_name("storage_number", tests, NULL, NULL);
+}
diff --git a/libnetdata/string/utf8.h b/libnetdata/string/utf8.h
new file mode 100644
index 0000000..133ec71
--- /dev/null
+++ b/libnetdata/string/utf8.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STRING_UTF8_H
+#define NETDATA_STRING_UTF8_H 1
+
+#define IS_UTF8_BYTE(x) (x & 0x80)
+#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&(x & 0x40))
+
+#endif /* NETDATA_STRING_UTF8_H */
diff --git a/libnetdata/tests/Makefile.am b/libnetdata/tests/Makefile.am
new file mode 100644
index 0000000..babdcf0
--- /dev/null
+++ b/libnetdata/tests/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/libnetdata/tests/test_str2ld.c b/libnetdata/tests/test_str2ld.c
new file mode 100644
index 0000000..9d59f6c
--- /dev/null
+++ b/libnetdata/tests/test_str2ld.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+#include "../required_dummies.h"
+#include <setjmp.h>
+#include <cmocka.h>
+
+static void test_str2ld(void **state)
+{
+ (void)state;
+ char *values[] = {
+ "1.2345678",
+ "-35.6",
+ "0.00123",
+ "23842384234234.2",
+ ".1",
+ "1.2e-10",
+ "hello",
+ "1wrong",
+ "nan",
+ "inf",
+ NULL
+ };
+
+ for (int i = 0; values[i]; i++) {
+ char *e_mine = "hello", *e_sys = "world";
+ LONG_DOUBLE mine = str2ld(values[i], &e_mine);
+ LONG_DOUBLE sys = strtold(values[i], &e_sys);
+
+ if (isnan(mine))
+ assert_true(isnan(sys));
+ else if (isinf(mine))
+ assert_true(isinf(sys));
+ else if (mine != sys)
+ assert_false(abs(mine - sys) > 0.000001);
+
+ assert_ptr_equal(e_mine, e_sys);
+ }
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(test_str2ld)
+ };
+
+ return cmocka_run_group_tests_name("str2ld", tests, NULL, NULL);
+}
diff --git a/libnetdata/threads/Makefile.am b/libnetdata/threads/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/threads/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md
new file mode 100644
index 0000000..377da5e
--- /dev/null
+++ b/libnetdata/threads/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/threads/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fthreads%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c
new file mode 100644
index 0000000..d763694
--- /dev/null
+++ b/libnetdata/threads/threads.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+static size_t default_stacksize = 0, wanted_stacksize = 0;
+static pthread_attr_t *attr = NULL;
+
+// ----------------------------------------------------------------------------
+// per thread data
+
+typedef struct {
+ void *arg;
+ pthread_t *thread;
+ const char *tag;
+ void *(*start_routine) (void *);
+ NETDATA_THREAD_OPTIONS options;
+} NETDATA_THREAD;
+
+static __thread NETDATA_THREAD *netdata_thread = NULL;
+
+inline int netdata_thread_tag_exists(void) {
+ return (netdata_thread && netdata_thread->tag && *netdata_thread->tag);
+}
+
+const char *netdata_thread_tag(void) {
+ return (netdata_thread_tag_exists() ? netdata_thread->tag : "MAIN");
+}
+
+// ----------------------------------------------------------------------------
+// compatibility library functions
+
+pid_t gettid(void) {
+#ifdef __FreeBSD__
+
+ return (pid_t)pthread_getthreadid_np();
+
+#elif defined(__APPLE__)
+
+ #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ uint64_t curthreadid;
+ pthread_threadid_np(NULL, &curthreadid);
+ return (pid_t)curthreadid;
+ #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
+ return (pid_t)pthread_self;
+ #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED */
+
+#else /* __APPLE__*/
+
+ return (pid_t)syscall(SYS_gettid);
+
+#endif /* __FreeBSD__, __APPLE__*/
+}
+
+// ----------------------------------------------------------------------------
+// early initialization
+
+size_t netdata_threads_init(void) {
+ int i;
+
+ // --------------------------------------------------------------------
+ // get the required stack size of the threads of netdata
+
+ attr = callocz(1, sizeof(pthread_attr_t));
+ i = pthread_attr_init(attr);
+ if(i != 0)
+ fatal("pthread_attr_init() failed with code %d.", i);
+
+ i = pthread_attr_getstacksize(attr, &default_stacksize);
+ if(i != 0)
+ fatal("pthread_attr_getstacksize() failed with code %d.", i);
+ else
+ debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize);
+
+ return default_stacksize;
+}
+
+// ----------------------------------------------------------------------------
+// late initialization
+
+void netdata_threads_init_after_fork(size_t stacksize) {
+ wanted_stacksize = stacksize;
+ int i;
+
+ // ------------------------------------------------------------------------
+ // set default pthread stack size
+
+ if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) {
+ i = pthread_attr_setstacksize(attr, wanted_stacksize);
+ if(i != 0)
+ fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i);
+ else
+ debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// netdata_thread_create
+
+static void thread_cleanup(void *ptr) {
+ if(netdata_thread != ptr) {
+ NETDATA_THREAD *info = (NETDATA_THREAD *)ptr;
+ error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag);
+ }
+
+ if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP))
+ info("thread with task id %d finished", gettid());
+
+ freez((void *)netdata_thread->tag);
+ netdata_thread->tag = NULL;
+
+ freez(netdata_thread);
+ netdata_thread = NULL;
+}
+
+static void thread_set_name_np(NETDATA_THREAD *nt) {
+
+ if (nt->tag) {
+ int ret = 0;
+
+ char threadname[NETDATA_THREAD_NAME_MAX+1];
+ strncpyz(threadname, nt->tag, NETDATA_THREAD_NAME_MAX);
+
+#if defined(__FreeBSD__)
+ pthread_set_name_np(pthread_self(), threadname);
+#elif defined(__APPLE__)
+ ret = pthread_setname_np(threadname);
+#else
+ ret = pthread_setname_np(pthread_self(), threadname);
+#endif
+
+ if (ret != 0)
+ error("cannot set pthread name of %d to %s. ErrCode: %d", gettid(), threadname, ret);
+ else
+ info("set name of thread %d to %s", gettid(), threadname);
+
+ }
+}
+
+void uv_thread_set_name_np(uv_thread_t ut, const char* name) {
+ int ret = 0;
+
+ char threadname[NETDATA_THREAD_NAME_MAX+1];
+ strncpyz(threadname, name, NETDATA_THREAD_NAME_MAX);
+
+#if defined(__FreeBSD__)
+ pthread_set_name_np(ut, threadname);
+#elif defined(__APPLE__)
+ // Apple can only set its own name
+#else
+ ret = pthread_setname_np(ut, threadname);
+#endif
+
+ if (ret)
+ info("cannot set libuv thread name to %s. Err: %d", threadname, ret);
+}
+
+void os_thread_get_current_name_np(char threadname[NETDATA_THREAD_NAME_MAX + 1])
+{
+ threadname[0] = '\0';
+#if defined(__FreeBSD__)
+ pthread_get_name_np(pthread_self(), threadname, NETDATA_THREAD_NAME_MAX + 1);
+#elif defined(HAVE_PTHREAD_GETNAME_NP) /* Linux & macOS */
+ (void)pthread_getname_np(pthread_self(), threadname, NETDATA_THREAD_NAME_MAX + 1);
+#endif
+}
+
+static void *thread_start(void *ptr) {
+ netdata_thread = (NETDATA_THREAD *)ptr;
+
+ if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP))
+ info("thread created with task id %d", gettid());
+
+ if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
+ error("cannot set pthread cancel type to DEFERRED.");
+
+ if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("cannot set pthread cancel state to ENABLE.");
+
+ thread_set_name_np(ptr);
+
+ void *ret = NULL;
+ pthread_cleanup_push(thread_cleanup, ptr);
+ ret = netdata_thread->start_routine(netdata_thread->arg);
+ pthread_cleanup_pop(1);
+
+ return ret;
+}
+
+int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg) {
+ NETDATA_THREAD *info = mallocz(sizeof(NETDATA_THREAD));
+ info->arg = arg;
+ info->thread = thread;
+ info->tag = strdupz(tag);
+ info->start_routine = start_routine;
+ info->options = options;
+
+ int ret = pthread_create(thread, attr, thread_start, info);
+ if(ret != 0)
+ error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
+
+ else {
+ if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) {
+ int ret2 = pthread_detach(*thread);
+ if (ret2 != 0)
+ error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2);
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// netdata_thread_cancel
+
+int netdata_thread_cancel(netdata_thread_t thread) {
+ int ret = pthread_cancel(thread);
+ if(ret != 0)
+ error("cannot cancel thread. pthread_cancel() failed with code %d.", ret);
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// netdata_thread_join
+
+int netdata_thread_join(netdata_thread_t thread, void **retval) {
+ int ret = pthread_join(thread, retval);
+ if(ret != 0)
+ error("cannot join thread. pthread_join() failed with code %d.", ret);
+
+ return ret;
+}
+
+int netdata_thread_detach(pthread_t thread) {
+ int ret = pthread_detach(thread);
+ if(ret != 0)
+ error("cannot detach thread. pthread_detach() failed with code %d.", ret);
+
+ return ret;
+}
diff --git a/libnetdata/threads/threads.h b/libnetdata/threads/threads.h
new file mode 100644
index 0000000..e7d79d3
--- /dev/null
+++ b/libnetdata/threads/threads.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_THREADS_H
+#define NETDATA_THREADS_H 1
+
+#include "../libnetdata.h"
+
+extern pid_t gettid(void);
+
+typedef enum {
+ NETDATA_THREAD_OPTION_DEFAULT = 0 << 0,
+ NETDATA_THREAD_OPTION_JOINABLE = 1 << 0,
+ NETDATA_THREAD_OPTION_DONT_LOG_STARTUP = 1 << 1,
+ NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP = 1 << 2,
+ NETDATA_THREAD_OPTION_DONT_LOG = NETDATA_THREAD_OPTION_DONT_LOG_STARTUP|NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP,
+} NETDATA_THREAD_OPTIONS;
+
+#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg)
+#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute)
+
+typedef pthread_t netdata_thread_t;
+
+#define NETDATA_THREAD_TAG_MAX 100
+extern const char *netdata_thread_tag(void);
+extern int netdata_thread_tag_exists(void);
+
+extern size_t netdata_threads_init(void);
+extern void netdata_threads_init_after_fork(size_t stacksize);
+
+extern int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg);
+extern int netdata_thread_cancel(netdata_thread_t thread);
+extern int netdata_thread_join(netdata_thread_t thread, void **retval);
+extern int netdata_thread_detach(pthread_t thread);
+
+#define NETDATA_THREAD_NAME_MAX 15
+extern void uv_thread_set_name_np(uv_thread_t ut, const char* name);
+extern void os_thread_get_current_name_np(char threadname[NETDATA_THREAD_NAME_MAX + 1]);
+
+#define netdata_thread_self pthread_self
+#define netdata_thread_testcancel pthread_testcancel
+
+#endif //NETDATA_THREADS_H
diff --git a/libnetdata/url/Makefile.am b/libnetdata/url/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/url/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md
new file mode 100644
index 0000000..4a95547
--- /dev/null
+++ b/libnetdata/url/README.md
@@ -0,0 +1,5 @@
+<!--
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/url/README.md
+-->
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Furl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c
new file mode 100644
index 0000000..3de94fd
--- /dev/null
+++ b/libnetdata/url/url.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// URL encode / decode
+// code from: http://www.geekhideout.com/urlcode.shtml
+
+/* Converts a hex character to its integer value */
+char from_hex(char ch) {
+ return (char)(isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10);
+}
+
+/* Converts an integer value to its hex character*/
+char to_hex(char code) {
+ static char hex[] = "0123456789abcdef";
+ return hex[code & 15];
+}
+
+/* Returns a url-encoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+char *url_encode(char *str) {
+ char *buf, *pbuf;
+
+ pbuf = buf = mallocz(strlen(str) * 3 + 1);
+
+ while (*str) {
+ if (isalnum(*str) || *str == '-' || *str == '_' || *str == '.' || *str == '~')
+ *pbuf++ = *str;
+
+ else if (*str == ' ')
+ *pbuf++ = '+';
+
+ else{
+ *pbuf++ = '%';
+ *pbuf++ = to_hex(*str >> 4);
+ *pbuf++ = to_hex(*str & 15);
+ }
+
+ str++;
+ }
+ *pbuf = '\0';
+
+ pbuf = strdupz(buf);
+ freez(buf);
+ return pbuf;
+}
+
+/**
+ * Percentage escape decode
+ *
+ * Decode %XX character or return 0 if cannot
+ *
+ * @param s the string to decode
+ *
+ * @return The character decoded on success and 0 otherwise
+ */
+char url_percent_escape_decode(char *s) {
+ if(likely(s[1] && s[2]))
+ return from_hex(s[1]) << 4 | from_hex(s[2]);
+ return 0;
+}
+
+/**
+ * Get byte length
+ *
+ * This (utf8 string related) should be moved in separate file in future
+ *
+ * @param c is the utf8 character
+ * *
+ * @return It reurns the length of the specific character.
+ */
+char url_utf8_get_byte_length(char c) {
+ if(!IS_UTF8_BYTE(c))
+ return 1;
+
+ char length = 0;
+ while(likely(c & 0x80)) {
+ length++;
+ c <<= 1;
+ }
+ //4 byte is max size for UTF-8 char
+ //10XX XXXX is not valid character -> check length == 1
+ if(length > 4 || length == 1)
+ return -1;
+
+ return length;
+}
+
+/**
+ * Decode Multibyte UTF8
+ *
+ * Decode % encoded UTF-8 characters and copy them to *d
+ *
+ * @param s first address
+ * @param d
+ * @param d_end last address
+ *
+ * @return count of bytes written to *d
+ */
+char url_decode_multibyte_utf8(char *s, char *d, char *d_end) {
+ char first_byte = url_percent_escape_decode(s);
+
+ if(unlikely(!first_byte || !IS_UTF8_STARTBYTE(first_byte)))
+ return 0;
+
+ char byte_length = url_utf8_get_byte_length(first_byte);
+
+ if(unlikely(byte_length <= 0 || d+byte_length >= d_end))
+ return 0;
+
+ char to_read = byte_length;
+ while(to_read > 0) {
+ char c = url_percent_escape_decode(s);
+
+ if(unlikely( !IS_UTF8_BYTE(c) ))
+ return 0;
+ if((to_read != byte_length) && IS_UTF8_STARTBYTE(c))
+ return 0;
+
+ *d++ = c;
+ s+=3;
+ to_read--;
+ }
+
+ return byte_length;
+}
+
+/*
+ * The utf8_check() function scans the '\0'-terminated string starting
+ * at s. It returns a pointer to the first byte of the first malformed
+ * or overlong UTF-8 sequence found, or NULL if the string contains
+ * only correct UTF-8. It also spots UTF-8 sequences that could cause
+ * trouble if converted to UTF-16, namely surrogate characters
+ * (U+D800..U+DFFF) and non-Unicode positions (U+FFFE..U+FFFF). This
+ * routine is very likely to find a malformed sequence if the input
+ * uses any other encoding than UTF-8. It therefore can be used as a
+ * very effective heuristic for distinguishing between UTF-8 and other
+ * encodings.
+ *
+ * Markus Kuhn <http://www.cl.cam.ac.uk/~mgk25/> -- 2005-03-30
+ * License: http://www.cl.cam.ac.uk/~mgk25/short-license.html
+ */
+unsigned char *utf8_check(unsigned char *s)
+{
+ while (*s)
+ {
+ if (*s < 0x80)
+ /* 0xxxxxxx */
+ s++;
+ else if ((s[0] & 0xe0) == 0xc0)
+ {
+ /* 110XXXXx 10xxxxxx */
+ if ((s[1] & 0xc0) != 0x80 ||
+ (s[0] & 0xfe) == 0xc0) /* overlong? */
+ return s;
+ else
+ s += 2;
+ }
+ else if ((s[0] & 0xf0) == 0xe0)
+ {
+ /* 1110XXXX 10Xxxxxx 10xxxxxx */
+ if ((s[1] & 0xc0) != 0x80 ||
+ (s[2] & 0xc0) != 0x80 ||
+ (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */
+ (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */
+ (s[0] == 0xef && s[1] == 0xbf &&
+ (s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */
+ return s;
+ else
+ s += 3;
+ }
+ else if ((s[0] & 0xf8) == 0xf0)
+ {
+ /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */
+ if ((s[1] & 0xc0) != 0x80 ||
+ (s[2] & 0xc0) != 0x80 ||
+ (s[3] & 0xc0) != 0x80 ||
+ (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */
+ (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */
+ return s;
+ else
+ s += 4;
+ }
+ else
+ return s;
+ }
+
+ return NULL;
+}
+
+char *url_decode_r(char *to, char *url, size_t size) {
+ char *s = url, // source
+ *d = to, // destination
+ *e = &to[size - 1]; // destination end
+
+ while(*s && d < e) {
+ if(unlikely(*s == '%')) {
+ char t = url_percent_escape_decode(s);
+ if(IS_UTF8_BYTE(t)) {
+ char bytes_written = url_decode_multibyte_utf8(s, d, e);
+ if(likely(bytes_written)){
+ d += bytes_written;
+ s += (bytes_written * 3)-1;
+ }
+ else {
+ goto fail_cleanup;
+ }
+ }
+ else if(likely(t) && isprint(t)) {
+ // avoid HTTP header injection
+ *d++ = t;
+ s += 2;
+ }
+ else
+ goto fail_cleanup;
+ }
+ else if(unlikely(*s == '+'))
+ *d++ = ' ';
+
+ else
+ *d++ = *s;
+
+ s++;
+ }
+
+ *d = '\0';
+
+ if(unlikely( utf8_check((unsigned char *)to) )) //NULL means sucess here
+ return NULL;
+
+ return to;
+
+fail_cleanup:
+ *d = '\0';
+ return NULL;
+}
+
+/**
+ * Is request complete?
+ *
+ * Check whether the request is complete.
+ * This function cannot check all the requests METHODS, for example, case you are working with POST, it will fail.
+ *
+ * @param begin is the first character of the sequence to analyse.
+ * @param end is the last character of the sequence
+ * @param length is the length of the total of bytes read, it is not the difference between end and begin.
+ *
+ * @return It returns 1 when the request is complete and 0 otherwise.
+ */
+inline int url_is_request_complete(char *begin, char *end, size_t length) {
+
+ if ( begin == end) {
+ //Message cannot be complete when first and last address are the same
+ return 0;
+ }
+
+ //This math to verify the last is valid, because we are discarding the POST
+ if (length > 4) {
+ begin = end - 4;
+ }
+
+ return (strstr(begin, "\r\n\r\n"))?1:0;
+}
+
+/**
+ * Find protocol
+ *
+ * Search for the string ' HTTP/' in the message given.
+ *
+ * @param s is the start of the user request.
+ * @return
+ */
+inline char *url_find_protocol(char *s) {
+ while(*s) {
+ // find the next space
+ while (*s && *s != ' ') s++;
+
+ // is it SPACE + "HTTP/" ?
+ if(*s && !strncmp(s, " HTTP/", 6)) break;
+ else s++;
+ }
+
+ return s;
+}
+
+/**
+ * Map query string
+ *
+ * Map the query string fields that will be decoded.
+ * This functions must be called after to check the presence of query strings,
+ * here we are assuming that you already tested this.
+ *
+ * @param out the pointer to pointers that will be used to map
+ * @param url the input url that we are decoding.
+ *
+ * @return It returns the number of total variables in the query string.
+ */
+int url_map_query_string(char **out, char *url) {
+ (void)out;
+ (void)url;
+ int count = 0;
+
+ //First we try to parse considering that there was not URL encode process
+ char *moveme = url;
+ char *ptr;
+
+ //We always we have at least one here, so I can set this.
+ out[count++] = moveme;
+ while(moveme) {
+ ptr = strchr((moveme+1), '&');
+ if(ptr) {
+ out[count++] = ptr;
+ }
+
+ moveme = ptr;
+ }
+
+ //I could not find any '&', so I am assuming now it is like '%26'
+ if (count == 1) {
+ moveme = url;
+ while(moveme) {
+ ptr = strchr((moveme+1), '%');
+ if(ptr) {
+ char *test = (ptr+1);
+ if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) {
+ out[count++] = ptr;
+ }
+ }
+ moveme = ptr;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * Parse query string
+ *
+ * Parse the query string mapped and store it inside output.
+ *
+ * @param output is a vector where I will store the string.
+ * @param max is the maximum length of the output
+ * @param map the map done by the function url_map_query_string.
+ * @param total the total number of variables inside map
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+int url_parse_query_string(char *output, size_t max, char **map, int total) {
+ if(!total) {
+ return 0;
+ }
+
+ int counter, next;
+ size_t length;
+ char *end;
+ char *begin = map[0];
+ char save;
+ size_t copied = 0;
+ for(counter = 0, next=1 ; next <= total ; ++counter, ++next) {
+ if (next != total) {
+ end = map[next];
+ length = (size_t) (end - begin);
+ save = *end;
+ *end = 0x00;
+ } else {
+ length = strlen(begin);
+ end = NULL;
+ }
+ length++;
+
+ if (length > (max - copied)) {
+ error("Parsing query string: we cannot parse a query string so big");
+ break;
+ }
+
+ if(!url_decode_r(output, begin, length)) {
+ return -1;
+ }
+ length = strlen(output);
+ copied += length;
+ output += length;
+
+ begin = end;
+ if (begin) {
+ *begin = save;
+ }
+ }
+
+ return 0;
+}
diff --git a/libnetdata/url/url.h b/libnetdata/url/url.h
new file mode 100644
index 0000000..10f3fe1
--- /dev/null
+++ b/libnetdata/url/url.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_URL_H
+#define NETDATA_URL_H 1
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// URL encode / decode
+// code from: http://www.geekhideout.com/urlcode.shtml
+
+/* Converts a hex character to its integer value */
+extern char from_hex(char ch);
+
+/* Converts an integer value to its hex character*/
+extern char to_hex(char code);
+
+/* Returns a url-encoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+extern char *url_encode(char *str);
+
+/* Returns a url-decoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+extern char *url_decode(char *str);
+
+extern char *url_decode_r(char *to, char *url, size_t size);
+
+#define WEB_FIELDS_MAX 400
+extern int url_map_query_string(char **out, char *url);
+extern int url_parse_query_string(char *output, size_t max, char **map, int total);
+
+extern int url_is_request_complete(char *begin,char *end,size_t length);
+extern char *url_find_protocol(char *s);
+
+#endif /* NETDATA_URL_H */
diff --git a/netdata-installer.sh b/netdata-installer.sh
new file mode 100755
index 0000000..c11e1a7
--- /dev/null
+++ b/netdata-installer.sh
@@ -0,0 +1,1909 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC2046,SC2086,SC2166
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+uniquepath() {
+ local path=""
+ while read -r; do
+ if [[ ! ${path} =~ (^|:)"${REPLY}"(:|$) ]]; then
+ [ -n "${path}" ] && path="${path}:"
+ path="${path}${REPLY}"
+ fi
+ done < <(echo "${PATH}" | tr ":" "\n")
+
+ [ -n "${path}" ] && [[ ${PATH} =~ /bin ]] && [[ ${PATH} =~ /sbin ]] && export PATH="${path}"
+}
+uniquepath
+
+PROGRAM="$0"
+NETDATA_SOURCE_DIR="$(pwd)"
+INSTALLER_DIR="$(dirname "${PROGRAM}")"
+
+if [ "${NETDATA_SOURCE_DIR}" != "${INSTALLER_DIR}" ] && [ "${INSTALLER_DIR}" != "." ]; then
+ echo >&2 "Warning: you are currently in '${NETDATA_SOURCE_DIR}' but the installer is in '${INSTALLER_DIR}'."
+fi
+
+# -----------------------------------------------------------------------------
+# Pull in OpenSSL properly if on macOS
+if [ "$(uname -s)" = 'Darwin' ] && [ -d /usr/local/opt/openssl/include ]; then
+ export C_INCLUDE_PATH="/usr/local/opt/openssl/include"
+ export LDFLAGS="-L/usr/local/opt/openssl@1.1/lib"
+fi
+
+# -----------------------------------------------------------------------------
+# reload the user profile
+
+# shellcheck source=/dev/null
+[ -f /etc/profile ] && . /etc/profile
+
+# make sure /etc/profile does not change our current directory
+cd "${NETDATA_SOURCE_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# figure out an appropriate temporary directory
+_cannot_use_tmpdir() {
+ local testfile ret
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ]; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}"; then
+ if chmod +x "${testfile}"; then
+ if [ "$("${testfile}")" = "SUCCESS" ]; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}"; then
+ if _cannot_use_tmpdir /tmp; then
+ if _cannot_use_tmpdir "${PWD}"; then
+ echo >&2
+ echo >&2 "Unable to find a usable temprorary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again."
+ exit 1
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+# set up handling for deferred error messages
+NETDATA_DEFERRED_ERRORS=""
+
+defer_error() {
+ NETDATA_DEFERRED_ERRORS="${NETDATA_DEFERRED_ERRORS}\n* ${1}"
+}
+
+defer_error_highlighted() {
+ NETDATA_DEFERRED_ERRORS="${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_DEFERRED_ERRORS}\n* ${1}${TPUT_RESET}"
+}
+
+print_deferred_errors() {
+ if [ -n "${NETDATA_DEFERRED_ERRORS}" ]; then
+ echo >&2
+ echo >&2 "The following non-fatal errors were encountered during the installation process:"
+ # shellcheck disable=SC2059
+ printf >&2 "${NETDATA_DEFERRED_ERRORS}"
+ echo >&2
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# load the required functions
+
+if [ -f "${INSTALLER_DIR}/packaging/installer/functions.sh" ]; then
+ # shellcheck source=packaging/installer/functions.sh
+ source "${INSTALLER_DIR}/packaging/installer/functions.sh" || exit 1
+else
+ # shellcheck source=packaging/installer/functions.sh
+ source "${NETDATA_SOURCE_DIR}/packaging/installer/functions.sh" || exit 1
+fi
+
+download_go() {
+ download_file "${1}" "${2}" "go.d plugin" "go"
+}
+
+# make sure we save all commands we run
+run_logfile="netdata-installer.log"
+
+# -----------------------------------------------------------------------------
+# fix PKG_CHECK_MODULES error
+
+if [ -d /usr/share/aclocal ]; then
+ ACLOCAL_PATH=${ACLOCAL_PATH-/usr/share/aclocal}
+ export ACLOCAL_PATH
+fi
+
+export LC_ALL=C
+umask 002
+
+# Be nice on production environments
+renice 19 $$ > /dev/null 2> /dev/null
+
+# you can set CFLAGS before running installer
+LDFLAGS="${LDFLAGS}"
+CFLAGS="${CFLAGS--O2}"
+[ "z${CFLAGS}" = "z-O3" ] && CFLAGS="-O2"
+ACLK="${ACLK}"
+
+# keep a log of this command
+# shellcheck disable=SC2129
+printf "\n# " >> netdata-installer.log
+date >> netdata-installer.log
+printf 'CFLAGS="%s" ' "${CFLAGS}" >> netdata-installer.log
+printf 'LDFLAGS="%s" ' "${LDFLAGS}" >> netdata-installer.log
+printf "%q " "${PROGRAM}" "${@}" >> netdata-installer.log
+printf "\n" >> netdata-installer.log
+
+REINSTALL_OPTIONS="$(
+ printf "%s" "${*}"
+ printf "\n"
+)"
+# remove options that shown not be inherited by netdata-updater.sh
+REINSTALL_OPTIONS="$(echo "${REINSTALL_OPTIONS}" | sed 's/--dont-wait//g' | sed 's/--dont-start-it//g')"
+
+banner_nonroot_install() {
+ cat << NONROOTNOPREFIX
+
+ ${TPUT_RED}${TPUT_BOLD}Sorry! This will fail!${TPUT_RESET}
+
+ You are attempting to install netdata as non-root, but you plan
+ to install it in system paths.
+
+ Please set an installation prefix, like this:
+
+ $PROGRAM ${@} --install /tmp
+
+ or, run the installer as root:
+
+ sudo $PROGRAM ${@}
+
+ We suggest to install it as root, or certain data collectors will
+ not be able to work. Netdata drops root privileges when running.
+ So, if you plan to keep it, install it as root to get the full
+ functionality.
+
+NONROOTNOPREFIX
+}
+
+banner_root_notify() {
+ cat << NONROOT
+
+ ${TPUT_RED}${TPUT_BOLD}IMPORTANT${TPUT_RESET}:
+ You are about to install netdata as a non-root user.
+ Netdata will work, but a few data collection modules that
+ require root access will fail.
+
+ If you installing netdata permanently on your system, run
+ the installer like this:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo $PROGRAM ${@}${TPUT_RESET}
+
+NONROOT
+}
+
+usage() {
+ netdata_banner "installer command line options"
+ cat << HEREDOC
+
+USAGE: ${PROGRAM} [options]
+ where options include:
+
+ --install <path> Install netdata in <path>. Ex. --install /opt will put netdata in /opt/netdata
+ --dont-start-it Do not (re)start netdata after installation
+ --dont-wait Run installation in non-interactive mode
+ --auto-update or -u Install netdata-updater in cron to update netdata automatically once per day
+ --auto-update-type Override the auto-update scheduling mechanism detection, currently supported types
+ are: systemd, interval, crontab
+ --stable-channel Use packages from GitHub release pages instead of GCS (nightly updates).
+ This results in less frequent updates.
+ --nightly-channel Use most recent nightly udpates instead of GitHub releases.
+ This results in more frequent updates.
+ --disable-go Disable installation of go.d.plugin.
+ --disable-ebpf Disable eBPF Kernel plugin (Default: enabled)
+ --disable-cloud Disable all Netdata Cloud functionality.
+ --require-cloud Fail the install if it can't build Netdata Cloud support.
+ --enable-plugin-freeipmi Enable the FreeIPMI plugin. Default: enable it when libipmimonitoring is available.
+ --disable-plugin-freeipmi
+ --disable-https Explicitly disable TLS support
+ --disable-dbengine Explicitly disable DB engine support
+ --enable-plugin-nfacct Enable nfacct plugin. Default: enable it when libmnl and libnetfilter_acct are available.
+ --disable-plugin-nfacct
+ --enable-plugin-xenstat Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available
+ --disable-plugin-xenstat Disable the xenstat plugin.
+ --enable-backend-kinesis Enable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries
+ it depends on are available.
+ --disable-backend-kinesis
+ --enable-backend-prometheus-remote-write Enable Prometheus remote write backend. Default: enable it when libprotobuf and
+ libsnappy are available.
+ --disable-backend-prometheus-remote-write
+ --enable-backend-mongodb Enable MongoDB backend. Default: enable it when libmongoc is available.
+ --disable-backend-mongodb
+ --enable-lto Enable Link-Time-Optimization. Default: enabled
+ --disable-lto
+ --disable-x86-sse Disable SSE instructions. By default SSE optimizations are enabled.
+ --use-system-lws Use a system copy of libwebsockets instead of bundling our own (default is to use the bundled copy).
+ --zlib-is-really-here or
+ --libs-are-really-here If you get errors about missing zlib or libuuid but you know it is available, you might
+ have a broken pkg-config. Use this option to proceed without checking pkg-config.
+ --disable-telemetry Use this flag to opt-out from our anonymous telemetry progam. (DO_NOT_TRACK=1)
+
+Netdata will by default be compiled with gcc optimization -O2
+If you need to pass different CFLAGS, use something like this:
+
+ CFLAGS="<gcc options>" ${PROGRAM} [options]
+
+If you also need to provide different LDFLAGS, use something like this:
+
+ LDFLAGS="<extra ldflag options>" ${PROGRAM} [options]
+
+or use the following if both LDFLAGS and CFLAGS need to be overriden:
+
+ CFLAGS="<gcc options>" LDFLAGS="<extra ld options>" ${PROGRAM} [options]
+
+For the installer to complete successfully, you will need these packages installed:
+
+ gcc make autoconf automake pkg-config zlib1g-dev (or zlib-devel) uuid-dev (or libuuid-devel)
+
+For the plugins, you will at least need:
+
+ curl, bash v4+, python v2 or v3, node.js
+
+HEREDOC
+}
+
+DONOTSTART=0
+DONOTWAIT=0
+AUTOUPDATE=0
+NETDATA_PREFIX=
+LIBS_ARE_HERE=0
+NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}"
+RELEASE_CHANNEL="nightly" # check .travis/create_artifacts.sh before modifying
+IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY:-"no"}"
+while [ -n "${1}" ]; do
+ case "${1}" in
+ "--zlib-is-really-here") LIBS_ARE_HERE=1 ;;
+ "--libs-are-really-here") LIBS_ARE_HERE=1 ;;
+ "--use-system-lws") USE_SYSTEM_LWS=1 ;;
+ "--dont-scrub-cflags-even-though-it-may-break-things") DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS=1 ;;
+ "--dont-start-it") DONOTSTART=1 ;;
+ "--dont-wait") DONOTWAIT=1 ;;
+ "--auto-update" | "-u") AUTOUPDATE=1 ;;
+ "--auto-update-type")
+ AUTO_UPDATE_TYPE="$(echo "${2}" | tr '[:upper:]' '[:lower:]')"
+ case "${AUTO_UPDATE_TYPE}" in
+ systemd|interval|crontab)
+ shift 1
+ ;;
+ *)
+ echo "Unrecognized value for --auto-update-type. Valid values are: systemd, interval, crontab"
+ exit 1
+ ;;
+ esac
+ ;;
+ "--stable-channel") RELEASE_CHANNEL="stable" ;;
+ "--nightly-channel") RELEASE_CHANNEL="nightly" ;;
+ "--enable-plugin-freeipmi") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-freeipmi/} --enable-plugin-freeipmi" ;;
+ "--disable-plugin-freeipmi") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-freeipmi/} --disable-plugin-freeipmi" ;;
+ "--disable-https") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-https/} --disable-https" ;;
+ "--disable-dbengine")
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-dbengine/} --disable-dbengine"
+ NETDATA_DISABLE_DBENGINE=1
+ ;;
+ "--enable-plugin-nfacct") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-nfacct/} --enable-plugin-nfacct" ;;
+ "--disable-plugin-nfacct") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-nfacct/} --disable-plugin-nfacct" ;;
+ "--enable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-xenstat/} --enable-plugin-xenstat" ;;
+ "--disable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-xenstat/} --disable-plugin-xenstat" ;;
+ "--enable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-kinesis/} --enable-backend-kinesis" ;;
+ "--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-kinesis/} --disable-backend-kinesis" ;;
+ "--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-prometheus-remote-write/} --enable-backend-prometheus-remote-write" ;;
+ "--disable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write" ;;
+ "--enable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-mongodb/} --enable-backend-mongodb" ;;
+ "--disable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-mongodb/} --disable-backend-mongodb" ;;
+ "--enable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto" ;;
+ "--disable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto" ;;
+ "--disable-x86-sse") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse" ;;
+ "--disable-telemetry") NETDATA_DISABLE_TELEMETRY=1 ;;
+ "--disable-go") NETDATA_DISABLE_GO=1 ;;
+ "--enable-ebpf") NETDATA_DISABLE_EBPF=0 ;;
+ "--disable-ebpf") NETDATA_DISABLE_EBPF=1 NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-ebpf/} --disable-ebpf" ;;
+ "--disable-cloud")
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ echo "Cloud explicitly enabled, ignoring --disable-cloud."
+ else
+ NETDATA_DISABLE_CLOUD=1
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-cloud/} --disable-cloud"
+ fi
+ ;;
+ "--require-cloud")
+ if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then
+ echo "Cloud explicitly disabled, ignoring --require-cloud."
+ else
+ NETDATA_REQUIRE_CLOUD=1
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-cloud/} --enable-cloud"
+ fi
+ ;;
+ "--build-json-c")
+ NETDATA_BUILD_JSON_C=1
+ ;;
+ "--build-judy")
+ NETDATA_BUILD_JUDY=1
+ ;;
+ "--install")
+ NETDATA_PREFIX="${2}/netdata"
+ shift 1
+ ;;
+ "--help" | "-h")
+ usage
+ exit 1
+ ;;
+ *)
+ run_failed "I cannot understand option '$1'."
+ usage
+ exit 1
+ ;;
+ esac
+ shift 1
+done
+
+make="make"
+# See: https://github.com/netdata/netdata/issues/9163
+if [ "$(uname -s)" = "FreeBSD" ]; then
+ make="gmake"
+ NETDATA_CONFIGURE_OPTIONS="$NETDATA_CONFIGURE_OPTIONS --disable-dependency-tracking"
+fi
+
+# replace multiple spaces with a single space
+NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS// / }"
+
+if [ "${UID}" -ne 0 ]; then
+ if [ -z "${NETDATA_PREFIX}" ]; then
+ netdata_banner "wrong command line options!"
+ banner_nonroot_install "${@}"
+ exit 1
+ else
+ banner_root_notify "${@}"
+ fi
+fi
+
+netdata_banner "real-time performance monitoring, done right!"
+cat << BANNER1
+
+ You are about to build and install netdata to your system.
+
+ The build process will use ${TPUT_CYAN}${TMPDIR}${TPUT_RESET} for
+ any temporary files. You can override this by setting \$TMPDIR to a
+ writable directory where you can execute files.
+
+ It will be installed at these locations:
+
+ - the daemon at ${TPUT_CYAN}${NETDATA_PREFIX}/usr/sbin/netdata${TPUT_RESET}
+ - config files in ${TPUT_CYAN}${NETDATA_PREFIX}/etc/netdata${TPUT_RESET}
+ - web files in ${TPUT_CYAN}${NETDATA_PREFIX}/usr/share/netdata${TPUT_RESET}
+ - plugins in ${TPUT_CYAN}${NETDATA_PREFIX}/usr/libexec/netdata${TPUT_RESET}
+ - cache files in ${TPUT_CYAN}${NETDATA_PREFIX}/var/cache/netdata${TPUT_RESET}
+ - db files in ${TPUT_CYAN}${NETDATA_PREFIX}/var/lib/netdata${TPUT_RESET}
+ - log files in ${TPUT_CYAN}${NETDATA_PREFIX}/var/log/netdata${TPUT_RESET}
+BANNER1
+
+[ "${UID}" -eq 0 ] && cat << BANNER2
+ - pid file at ${TPUT_CYAN}${NETDATA_PREFIX}/var/run/netdata.pid${TPUT_RESET}
+ - logrotate file at ${TPUT_CYAN}/etc/logrotate.d/netdata${TPUT_RESET}
+BANNER2
+
+cat << BANNER3
+
+ This installer allows you to change the installation path.
+ Press Control-C and run the same command with --help for help.
+
+BANNER3
+
+if [ -z "$NETDATA_DISABLE_TELEMETRY" ]; then
+ cat << BANNER4
+
+ ${TPUT_YELLOW}${TPUT_BOLD}NOTE${TPUT_RESET}:
+ Anonymous usage stats will be collected and sent to Google Analytics.
+ To opt-out, pass --disable-telemetry option to the installer or export
+ the environment variable DO_NOT_TRACK to a non-zero or non-empty value
+ (e.g: export DO_NOT_TRACK=1).
+
+BANNER4
+fi
+
+have_autotools=
+if [ "$(type autoreconf 2> /dev/null)" ]; then
+ autoconf_maj_min() {
+ local maj min IFS=.-
+
+ maj=$1
+ min=$2
+
+ set -- $(autoreconf -V | sed -ne '1s/.* \([^ ]*\)$/\1/p')
+ eval $maj=\$1 $min=\$2
+ }
+ autoconf_maj_min AMAJ AMIN
+
+ if [ "$AMAJ" -gt 2 ]; then
+ have_autotools=Y
+ elif [ "$AMAJ" -eq 2 -a "$AMIN" -ge 60 ]; then
+ have_autotools=Y
+ else
+ echo "Found autotools $AMAJ.$AMIN"
+ fi
+else
+ echo "No autotools found"
+fi
+
+if [ ! "$have_autotools" ]; then
+ if [ -f configure ]; then
+ echo "Will skip autoreconf step"
+ else
+ netdata_banner "autotools v2.60 required"
+ cat << "EOF"
+
+-------------------------------------------------------------------------------
+autotools 2.60 or later is required
+
+Sorry, you do not seem to have autotools 2.60 or later, which is
+required to build from the git sources of netdata.
+
+EOF
+ exit 1
+ fi
+fi
+
+if [ ${DONOTWAIT} -eq 0 ]; then
+ if [ -n "${NETDATA_PREFIX}" ]; then
+ echo -n "${TPUT_BOLD}${TPUT_GREEN}Press ENTER to build and install netdata to '${TPUT_CYAN}${NETDATA_PREFIX}${TPUT_YELLOW}'${TPUT_RESET} > "
+ else
+ echo -n "${TPUT_BOLD}${TPUT_GREEN}Press ENTER to build and install netdata to your system${TPUT_RESET} > "
+ fi
+ read -ern1
+ if [ "$REPLY" != '' ]; then
+ exit 1
+ fi
+
+fi
+
+build_error() {
+ netdata_banner "sorry, it failed to build..."
+ cat << EOF
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sorry! netdata failed to build...
+
+You may need to check these:
+
+1. The package uuid-dev (or libuuid-devel) has to be installed.
+
+ If your system cannot find libuuid, although it is installed
+ run me with the option: --libs-are-really-here
+
+2. The package zlib1g-dev (or zlib-devel) has to be installed.
+
+ If your system cannot find zlib, although it is installed
+ run me with the option: --libs-are-really-here
+
+3. The package json-c-dev (or json-c-devel) has to be installed.
+
+ If your system cannot find json-c, although it is installed
+ run me with the option: --libs-are-really-here
+
+4. You need basic build tools installed, like:
+
+ gcc make autoconf automake pkg-config
+
+ Autoconf version 2.60 or higher is required.
+
+If you still cannot get it to build, ask for help at github:
+
+ https://github.com/netdata/netdata/issues
+
+
+EOF
+ trap - EXIT
+ exit 1
+}
+
+if [ ${LIBS_ARE_HERE} -eq 1 ]; then
+ shift
+ echo >&2 "ok, assuming libs are really installed."
+ export ZLIB_CFLAGS=" "
+ export ZLIB_LIBS="-lz"
+ export UUID_CFLAGS=" "
+ export UUID_LIBS="-luuid"
+fi
+
+trap build_error EXIT
+
+# -----------------------------------------------------------------------------
+
+build_libmosquitto() {
+ local env_cmd=''
+
+ if [ -z "${DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS}" ]; then
+ env_cmd="env CFLAGS=-fPIC CXXFLAGS= LDFLAGS="
+ fi
+
+ if [ "$(uname -s)" = Linux ]; then
+ run ${env_cmd} make -C "${1}/lib"
+ else
+ pushd ${1} > /dev/null || return 1
+ if [ "$(uname)" = "Darwin" ] && [ -d /usr/local/opt/openssl ]; then
+ run ${env_cmd} cmake \
+ -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl \
+ -D OPENSSL_LIBRARIES=/usr/local/opt/openssl/lib \
+ -D WITH_STATIC_LIBRARIES:boolean=YES \
+ .
+ else
+ run ${env_cmd} cmake -D WITH_STATIC_LIBRARIES:boolean=YES .
+ fi
+ run ${env_cmd} make -C lib
+ run mv lib/libmosquitto_static.a lib/libmosquitto.a
+ popd || return 1
+ fi
+}
+
+copy_libmosquitto() {
+ target_dir="${PWD}/externaldeps/mosquitto"
+
+ run mkdir -p "${target_dir}"
+
+ run cp "${1}/lib/libmosquitto.a" "${target_dir}"
+ run cp "${1}/lib/mosquitto.h" "${target_dir}"
+}
+
+bundle_libmosquitto() {
+ if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then
+ echo "Skipping cloud"
+ return 0
+ fi
+
+ progress "Prepare custom libmosquitto version"
+
+ MOSQUITTO_PACKAGE_VERSION="$(cat packaging/mosquitto.version)"
+
+ tmp="$(mktemp -d -t netdata-mosquitto-XXXXXX)"
+ MOSQUITTO_PACKAGE_BASENAME="${MOSQUITTO_PACKAGE_VERSION}.tar.gz"
+
+ if fetch_and_verify "mosquitto" \
+ "https://github.com/netdata/mosquitto/archive/${MOSQUITTO_PACKAGE_BASENAME}" \
+ "${MOSQUITTO_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_MOSQUITTO}"; then
+ if run tar -xf "${tmp}/${MOSQUITTO_PACKAGE_BASENAME}" -C "${tmp}" &&
+ build_libmosquitto "${tmp}/mosquitto-${MOSQUITTO_PACKAGE_VERSION}" &&
+ copy_libmosquitto "${tmp}/mosquitto-${MOSQUITTO_PACKAGE_VERSION}" &&
+ rm -rf "${tmp}"; then
+ run_ok "libmosquitto built and prepared."
+ else
+ run_failed "Failed to build libmosquitto."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud."
+ fi
+ fi
+ else
+ run_failed "Unable to fetch sources for libmosquitto."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud."
+ fi
+ fi
+}
+
+bundle_libmosquitto
+
+# -----------------------------------------------------------------------------
+
+build_libwebsockets() {
+ local env_cmd=''
+
+ if [ -z "${DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS}" ]; then
+ env_cmd="env CFLAGS=-fPIC CXXFLAGS= LDFLAGS="
+ fi
+
+ pushd "${1}" > /dev/null || exit 1
+
+ if [ "$(uname)" = "Darwin" ]; then
+ run patch -p1 << "EOF"
+--- a/lib/plat/unix/private.h
++++ b/lib/plat/unix/private.h
+@@ -164,6 +164,8 @@ delete_from_fd(const struct lws_context *context, int fd);
+ * but happily have something equivalent in the SO_NOSIGPIPE flag.
+ */
+ #ifdef __APPLE__
++/* iOS SDK 12+ seems to define it, undef it for compatibility both ways */
++#undef MSG_NOSIGNAL
+ #define MSG_NOSIGNAL SO_NOSIGPIPE
+ #endif
+EOF
+
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ fi
+
+ if [ "$(uname)" = "Darwin" ] && [ -d /usr/local/opt/openssl ]; then
+ run ${env_cmd} cmake \
+ -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl \
+ -D OPENSSL_LIBRARIES=/usr/local/opt/openssl/lib \
+ -D LWS_WITH_SOCKS5:bool=ON \
+ $CMAKE_FLAGS \
+ .
+ else
+ run ${env_cmd} cmake -D LWS_WITH_SOCKS5:bool=ON $CMAKE_FLAGS .
+ fi
+ run ${env_cmd} make
+ popd > /dev/null || exit 1
+}
+
+copy_libwebsockets() {
+ target_dir="${PWD}/externaldeps/libwebsockets"
+
+ run mkdir -p "${target_dir}" || return 1
+
+ run cp "${1}/lib/libwebsockets.a" "${target_dir}/libwebsockets.a" || return 1
+ run cp -r "${1}/include" "${target_dir}" || return 1
+}
+
+bundle_libwebsockets() {
+ if [ -n "${NETDATA_DISABLE_CLOUD}" ] || [ -n "${USE_SYSTEM_LWS}" ]; then
+ return 0
+ fi
+
+ if [ -z "$(command -v cmake)" ]; then
+ run_failed "Could not find cmake, which is required to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud."
+ defer_error_highlighted "Could not find cmake, which is required to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud."
+ return 0
+ fi
+
+ progress "Prepare libwebsockets"
+
+ LIBWEBSOCKETS_PACKAGE_VERSION="$(cat packaging/libwebsockets.version)"
+
+ tmp="$(mktemp -d -t netdata-libwebsockets-XXXXXX)"
+ LIBWEBSOCKETS_PACKAGE_BASENAME="v${LIBWEBSOCKETS_PACKAGE_VERSION}.tar.gz"
+
+ if fetch_and_verify "libwebsockets" \
+ "https://github.com/warmcat/libwebsockets/archive/${LIBWEBSOCKETS_PACKAGE_BASENAME}" \
+ "${LIBWEBSOCKETS_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_LIBWEBSOCKETS}"; then
+ if run tar -xf "${tmp}/${LIBWEBSOCKETS_PACKAGE_BASENAME}" -C "${tmp}" &&
+ build_libwebsockets "${tmp}/libwebsockets-${LIBWEBSOCKETS_PACKAGE_VERSION}" &&
+ copy_libwebsockets "${tmp}/libwebsockets-${LIBWEBSOCKETS_PACKAGE_VERSION}" &&
+ rm -rf "${tmp}"; then
+ run_ok "libwebsockets built and prepared."
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-bundled-lws=externaldeps/libwebsockets"
+ else
+ run_failed "Failed to build libwebsockets."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Failed to build libwebsockets. You may not be able to connect this node to Netdata Cloud."
+ fi
+ fi
+ else
+ run_failed "Unable to fetch sources for libwebsockets."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for libwebsockets. You may not be able to connect this node to Netdata Cloud."
+ fi
+ fi
+}
+
+bundle_libwebsockets
+
+# -----------------------------------------------------------------------------
+
+build_judy() {
+ local env_cmd=''
+ local libtoolize="libtoolize"
+
+ if [ -z "${DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS}" ]; then
+ env_cmd="env CFLAGS=-fPIC CXXFLAGS= LDFLAGS="
+ fi
+
+ if [ "$(uname)" = "Darwin" ]; then
+ libtoolize="glibtoolize"
+ fi
+
+ pushd "${1}" > /dev/null || return 1
+ if run ${env_cmd} ${libtoolize} --force --copy &&
+ run ${env_cmd} aclocal &&
+ run ${env_cmd} autoheader &&
+ run ${env_cmd} automake --add-missing --force --copy --include-deps &&
+ run ${env_cmd} autoconf &&
+ run ${env_cmd} ./configure &&
+ run ${env_cmd} make -C src &&
+ run ${env_cmd} ar -r src/libJudy.a src/Judy*/*.o; then
+ popd > /dev/null || return 1
+ else
+ popd > /dev/null || return 1
+ return 1
+ fi
+}
+
+copy_judy() {
+ target_dir="${PWD}/externaldeps/libJudy"
+
+ run mkdir -p "${target_dir}" || return 1
+
+ run cp "${1}/src/libJudy.a" "${target_dir}/libJudy.a" || return 1
+ run cp "${1}/src/Judy.h" "${target_dir}/Judy.h" || return 1
+}
+
+bundle_judy() {
+ # If --build-judy flag or no Judy on the system and we're building the dbengine, bundle our own libJudy.
+ # shellcheck disable=SC2235
+ if [ -n "${NETDATA_DISABLE_DBENGINE}" ] || ([ -z "${NETDATA_BUILD_JUDY}" ] && [ -e /usr/include/Judy.h ]); then
+ return 0
+ elif [ -n "${NETDATA_BUILD_JUDY}" ]; then
+ progress "User requested bundling of libJudy, building it now"
+ elif [ ! -e /usr/include/Judy.h ]; then
+ progress "/usr/include/Judy.h does not exist, but we need libJudy, building our own copy"
+ fi
+
+ progress "Prepare libJudy"
+
+ JUDY_PACKAGE_VERSION="$(cat packaging/judy.version)"
+
+ tmp="$(mktemp -d -t netdata-judy-XXXXXX)"
+ JUDY_PACKAGE_BASENAME="v${JUDY_PACKAGE_VERSION}.tar.gz"
+
+ if fetch_and_verify "judy" \
+ "https://github.com/netdata/libjudy/archive/${JUDY_PACKAGE_BASENAME}" \
+ "${JUDY_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_JUDY}"; then
+ if run tar -xf "${tmp}/${JUDY_PACKAGE_BASENAME}" -C "${tmp}" &&
+ build_judy "${tmp}/libjudy-${JUDY_PACKAGE_VERSION}" &&
+ copy_judy "${tmp}/libjudy-${JUDY_PACKAGE_VERSION}" &&
+ rm -rf "${tmp}"; then
+ run_ok "libJudy built and prepared."
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-libJudy=externaldeps/libJudy"
+ else
+ run_failed "Failed to build libJudy."
+ if [ -n "${NETDATA_BUILD_JUDY}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Failed to build libJudy. dbengine support will be disabled."
+ fi
+ fi
+ else
+ run_failed "Unable to fetch sources for libJudy."
+ if [ -n "${NETDATA_BUILD_JUDY}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for libJudy. dbengine support will be disabled."
+ fi
+ fi
+}
+
+bundle_judy
+
+# -----------------------------------------------------------------------------
+
+build_jsonc() {
+ local env_cmd=''
+
+ if [ -z "${DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS}" ]; then
+ env_cmd="env CFLAGS=-fPIC CXXFLAGS= LDFLAGS="
+ fi
+
+ pushd "${1}" > /dev/null || exit 1
+ run ${env_cmd} cmake -DBUILD_SHARED_LIBS=OFF .
+ run ${env_cmd} make
+ popd > /dev/null || exit 1
+}
+
+copy_jsonc() {
+ target_dir="${PWD}/externaldeps/jsonc"
+
+ run mkdir -p "${target_dir}" "${target_dir}/json-c" || return 1
+
+ run cp "${1}/libjson-c.a" "${target_dir}/libjson-c.a" || return 1
+ run cp ${1}/*.h "${target_dir}/json-c" || return 1
+}
+
+bundle_jsonc() {
+ # If --build-json-c flag or not json-c on system, then bundle our own json-c
+ if [ -z "${NETDATA_BUILD_JSON_C}" ] && pkg-config json-c; then
+ return 0
+ fi
+
+ if [ -z "$(command -v cmake)" ]; then
+ run_failed "Could not find cmake, which is required to build JSON-C. The install process will continue, but Netdata Cloud support will be disabled."
+ defer_error_highlighted "Could not find cmake, which is required to build JSON-C. The install process will continue, but Netdata Cloud support will be disabled."
+ return 0
+ fi
+
+ progress "Prepare JSON-C"
+
+ JSONC_PACKAGE_VERSION="$(cat packaging/jsonc.version)"
+
+ tmp="$(mktemp -d -t netdata-jsonc-XXXXXX)"
+ JSONC_PACKAGE_BASENAME="json-c-${JSONC_PACKAGE_VERSION}.tar.gz"
+
+ if fetch_and_verify "jsonc" \
+ "https://github.com/json-c/json-c/archive/${JSONC_PACKAGE_BASENAME}" \
+ "${JSONC_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_JSONC}"; then
+ if run tar -xf "${tmp}/${JSONC_PACKAGE_BASENAME}" -C "${tmp}" &&
+ build_jsonc "${tmp}/json-c-json-c-${JSONC_PACKAGE_VERSION}" &&
+ copy_jsonc "${tmp}/json-c-json-c-${JSONC_PACKAGE_VERSION}" &&
+ rm -rf "${tmp}"; then
+ run_ok "JSON-C built and prepared."
+ else
+ run_failed "Failed to build JSON-C."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Failed to build JSON-C. Netdata Cloud support will be disabled."
+ fi
+ fi
+ else
+ run_failed "Unable to fetch sources for JSON-C."
+ if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled."
+ fi
+ fi
+}
+
+bundle_jsonc
+
+# -----------------------------------------------------------------------------
+
+build_libbpf() {
+ pushd "${1}/src" > /dev/null || exit 1
+ run env CFLAGS=-fPIC CXXFLAGS= LDFLAGS= BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=.. make install
+ popd > /dev/null || exit 1
+}
+
+copy_libbpf() {
+ target_dir="${PWD}/externaldeps/libbpf"
+
+ if [ "$(uname -m)" = x86_64 ]; then
+ lib_subdir="lib64"
+ else
+ lib_subdir="lib"
+ fi
+
+ run mkdir -p "${target_dir}" || return 1
+
+ run cp "${1}/usr/${lib_subdir}/libbpf.a" "${target_dir}/libbpf.a" || return 1
+ run cp -r "${1}/usr/include" "${target_dir}" || return 1
+}
+
+bundle_libbpf() {
+ if { [ -n "${NETDATA_DISABLE_EBPF}" ] && [ ${NETDATA_DISABLE_EBPF} = 1 ]; } || [ "$(uname -s)" != Linux ]; then
+ return 0
+ fi
+
+ progress "Prepare libbpf"
+
+ LIBBPF_PACKAGE_VERSION="$(cat packaging/libbpf.version)"
+
+ tmp="$(mktemp -d -t netdata-libbpf-XXXXXX)"
+ LIBBPF_PACKAGE_BASENAME="v${LIBBPF_PACKAGE_VERSION}.tar.gz"
+
+ if fetch_and_verify "libbpf" \
+ "https://github.com/netdata/libbpf/archive/${LIBBPF_PACKAGE_BASENAME}" \
+ "${LIBBPF_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_LIBBPF}"; then
+ if run tar -xf "${tmp}/${LIBBPF_PACKAGE_BASENAME}" -C "${tmp}" &&
+ build_libbpf "${tmp}/libbpf-${LIBBPF_PACKAGE_VERSION}" &&
+ copy_libbpf "${tmp}/libbpf-${LIBBPF_PACKAGE_VERSION}" &&
+ rm -rf "${tmp}"; then
+ run_ok "libbpf built and prepared."
+ else
+ run_failed "Failed to build libbpf."
+ if [ -n "${NETDATA_DISABLE_EBPF}" ] && [ ${NETDATA_DISABLE_EBPF} = 0 ]; then
+ exit 1
+ else
+ defer_error_highlighted "Failed to build libbpf. You may not be able to use eBPF plugin."
+ fi
+ fi
+ else
+ run_failed "Unable to fetch sources for libbpf."
+ if [ -n "${NETDATA_DISABLE_EBPF}" ] && [ ${NETDATA_DISABLE_EBPF} = 0 ]; then
+ exit 1
+ else
+ defer_error_highlighted "Unable to fetch sources for libbpf. You may not be able to use eBPF plugin."
+ fi
+ fi
+}
+
+bundle_libbpf
+
+# -----------------------------------------------------------------------------
+# If we have the dashboard switching logic, make sure we're on the classic
+# dashboard during the install (updates don't work correctly otherwise).
+if [ -x "${NETDATA_PREFIX}/usr/libexec/netdata-switch-dashboard.sh" ]; then
+ "${NETDATA_PREFIX}/usr/libexec/netdata-switch-dashboard.sh" classic
+fi
+
+# -----------------------------------------------------------------------------
+# By default, `git` does not update local tags based on remotes. Because
+# we use the most recent tag as part of our version determination in
+# our build, this can lead to strange versions that look ancient but are
+# actually really recent. To avoid this, try and fetch tags if we're
+# working in a git checkout.
+if [ -d ./.git ] ; then
+ echo >&2
+ progress "Updating tags in git to ensure a consistent version number"
+ run git fetch <remote> 'refs/tags/*:refs/tags/*' || true
+fi
+
+# -----------------------------------------------------------------------------
+echo >&2
+progress "Run autotools to configure the build environment"
+
+if [ "$have_autotools" ]; then
+ run autoreconf -ivf || exit 1
+fi
+
+run ./configure \
+ --prefix="${NETDATA_PREFIX}/usr" \
+ --sysconfdir="${NETDATA_PREFIX}/etc" \
+ --localstatedir="${NETDATA_PREFIX}/var" \
+ --libexecdir="${NETDATA_PREFIX}/usr/libexec" \
+ --libdir="${NETDATA_PREFIX}/usr/lib" \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata \
+ ${NETDATA_CONFIGURE_OPTIONS} \
+ CFLAGS="${CFLAGS}" LDFLAGS="${LDFLAGS}" || exit 1
+
+# remove the build_error hook
+trap - EXIT
+
+# -----------------------------------------------------------------------------
+progress "Cleanup compilation directory"
+
+run $make clean
+
+# -----------------------------------------------------------------------------
+progress "Compile netdata"
+
+run $make -j$(find_processors) || exit 1
+
+# -----------------------------------------------------------------------------
+progress "Migrate configuration files for node.d.plugin and charts.d.plugin"
+
+# migrate existing configuration files
+# for node.d and charts.d
+if [ -d "${NETDATA_PREFIX}/etc/netdata" ]; then
+ # the configuration directory exists
+
+ if [ ! -d "${NETDATA_PREFIX}/etc/netdata/charts.d" ]; then
+ run mkdir "${NETDATA_PREFIX}/etc/netdata/charts.d"
+ fi
+
+ # move the charts.d config files
+ for x in apache ap cpu_apps cpufreq example exim hddtemp load_average mem_apps mysql nginx nut opensips phpfpm postfix sensors squid tomcat; do
+ for y in "" ".old" ".orig"; do
+ if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" ]; then
+ run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}"
+ fi
+ done
+ done
+
+ if [ ! -d "${NETDATA_PREFIX}/etc/netdata/node.d" ]; then
+ run mkdir "${NETDATA_PREFIX}/etc/netdata/node.d"
+ fi
+
+ # move the node.d config files
+ for x in named sma_webbox snmp; do
+ for y in "" ".old" ".orig"; do
+ if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}" ]; then
+ run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}"
+ fi
+ done
+ done
+fi
+
+# -----------------------------------------------------------------------------
+
+# shellcheck disable=SC2230
+md5sum="$(command -v md5sum 2> /dev/null || command -v md5 2> /dev/null)"
+
+deleted_stock_configs=0
+if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ]; then
+
+ progress "Backup existing netdata configuration before installing it"
+
+ if [ "${BASH_VERSINFO[0]}" -ge "4" ]; then
+ declare -A configs_signatures=()
+ if [ -f "configs.signatures" ]; then
+ source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !"
+ fi
+ fi
+
+ config_signature_matches() {
+ local md5="${1}" file="${2}"
+
+ if [ "${BASH_VERSINFO[0]}" -ge "4" ]; then
+ [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0
+ return 1
+ fi
+
+ if [ -f "configs.signatures" ]; then
+ grep "\['${md5}'\]='${file}'" "configs.signatures" > /dev/null
+ return $?
+ fi
+
+ return 1
+ }
+
+ # clean up stock config files from the user configuration directory
+ while IFS= read -r -d '' x; do
+ if [ -f "${x}" ]; then
+ # find it relative filename
+ f="${x/${NETDATA_PREFIX}\/etc\/netdata\//}"
+
+ # find the stock filename
+ t="${f/.conf.installer_backup.*/.conf}"
+ t="${t/.conf.old/.conf}"
+ t="${t/.conf.orig/.conf}"
+ t="${t/orig\//}"
+
+ if [ -z "${md5sum}" -o ! -x "${md5sum}" ]; then
+ # we don't have md5sum - keep it
+ echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED}is not known to distribution${TPUT_RESET}. Keeping it."
+ else
+ # find its checksum
+ md5="$(${md5sum} < "${x}" | cut -d ' ' -f 1)"
+
+ if config_signature_matches "${md5}" "${t}"; then
+ # it is a stock version - remove it
+ echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version of '${t}'."
+ run rm -f "${x}"
+ deleted_stock_configs=$((deleted_stock_configs + 1))
+ else
+ # edited by user - keep it
+ echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} does not match stock of${TPUT_RESET} ${TPUT_CYAN}'${t}'${TPUT_RESET}. Keeping it."
+ fi
+ fi
+ fi
+ done < <(find -L "${NETDATA_PREFIX}/etc/netdata" -type f -not -path '*/\.*' -not -path "${NETDATA_PREFIX}/etc/netdata/orig/*" \( -name '*.conf.old' -o -name '*.conf' -o -name '*.conf.orig' -o -name '*.conf.installer_backup.*' \))
+fi
+touch "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done"
+
+# -----------------------------------------------------------------------------
+progress "Install netdata"
+
+run $make install || exit 1
+
+# -----------------------------------------------------------------------------
+progress "Fix generated files permissions"
+
+run find ./system/ -type f -a \! -name \*.in -a \! -name Makefile\* -a \! -name \*.conf -a \! -name \*.service -a \! -name \*.timer -a \! -name \*.logrotate -exec chmod 755 {} \;
+
+# -----------------------------------------------------------------------------
+progress "Creating standard user and groups for netdata"
+
+NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody"
+NETDATA_ADDED_TO_GROUPS=""
+if [ "${UID}" -eq 0 ]; then
+ progress "Adding group 'netdata'"
+ portable_add_group netdata || :
+
+ progress "Adding user 'netdata'"
+ portable_add_user netdata "${NETDATA_PREFIX}/var/lib/netdata" || :
+
+ progress "Assign user 'netdata' to required groups"
+ for g in ${NETDATA_WANTED_GROUPS}; do
+ # shellcheck disable=SC2086
+ portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}"
+ done
+else
+ run_failed "The installer does not run as root. Nothing to do for user and groups"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install logrotate configuration for netdata"
+
+install_netdata_logrotate
+
+# -----------------------------------------------------------------------------
+progress "Read installation options from netdata.conf"
+
+# create an empty config if it does not exist
+[ ! -f "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ] &&
+ touch "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+
+# function to extract values from the config file
+config_option() {
+ local section="${1}" key="${2}" value="${3}"
+
+ if [ -s "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ]; then
+ "${NETDATA_PREFIX}/usr/sbin/netdata" \
+ -c "${NETDATA_PREFIX}/etc/netdata/netdata.conf" \
+ -W get "${section}" "${key}" "${value}" ||
+ echo "${value}"
+ else
+ echo "${value}"
+ fi
+}
+
+# the user netdata will run as
+if [ "${UID}" = "0" ]; then
+ NETDATA_USER="$(config_option "global" "run as user" "netdata")"
+ ROOT_USER="root"
+else
+ NETDATA_USER="${USER}"
+ ROOT_USER="${USER}"
+fi
+NETDATA_GROUP="$(id -g -n "${NETDATA_USER}")"
+[ -z "${NETDATA_GROUP}" ] && NETDATA_GROUP="${NETDATA_USER}"
+echo >&2 "Netdata user and group is finally set to: ${NETDATA_USER}/${NETDATA_GROUP}"
+
+# the owners of the web files
+NETDATA_WEB_USER="$(config_option "web" "web files owner" "${NETDATA_USER}")"
+NETDATA_WEB_GROUP="${NETDATA_GROUP}"
+if [ "${UID}" = "0" ] && [ "${NETDATA_USER}" != "${NETDATA_WEB_USER}" ]; then
+ NETDATA_WEB_GROUP="$(id -g -n "${NETDATA_WEB_USER}")"
+ [ -z "${NETDATA_WEB_GROUP}" ] && NETDATA_WEB_GROUP="${NETDATA_WEB_USER}"
+fi
+NETDATA_WEB_GROUP="$(config_option "web" "web files group" "${NETDATA_WEB_GROUP}")"
+
+# port
+defport=19999
+NETDATA_PORT="$(config_option "web" "default port" ${defport})"
+
+# directories
+NETDATA_LIB_DIR="$(config_option "global" "lib directory" "${NETDATA_PREFIX}/var/lib/netdata")"
+NETDATA_CACHE_DIR="$(config_option "global" "cache directory" "${NETDATA_PREFIX}/var/cache/netdata")"
+NETDATA_WEB_DIR="$(config_option "global" "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web")"
+NETDATA_LOG_DIR="$(config_option "global" "log directory" "${NETDATA_PREFIX}/var/log/netdata")"
+NETDATA_USER_CONFIG_DIR="$(config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata")"
+NETDATA_STOCK_CONFIG_DIR="$(config_option "global" "stock config directory" "${NETDATA_PREFIX}/usr/lib/netdata/conf.d")"
+NETDATA_RUN_DIR="${NETDATA_PREFIX}/var/run"
+NETDATA_CLAIMING_DIR="${NETDATA_LIB_DIR}/cloud.d"
+
+cat << OPTIONSEOF
+
+ Permissions
+ - netdata user : ${NETDATA_USER}
+ - netdata group : ${NETDATA_GROUP}
+ - web files user : ${NETDATA_WEB_USER}
+ - web files group : ${NETDATA_WEB_GROUP}
+ - root user : ${ROOT_USER}
+
+ Directories
+ - netdata user config dir : ${NETDATA_USER_CONFIG_DIR}
+ - netdata stock config dir : ${NETDATA_STOCK_CONFIG_DIR}
+ - netdata log dir : ${NETDATA_LOG_DIR}
+ - netdata run dir : ${NETDATA_RUN_DIR}
+ - netdata lib dir : ${NETDATA_LIB_DIR}
+ - netdata web dir : ${NETDATA_WEB_DIR}
+ - netdata cache dir : ${NETDATA_CACHE_DIR}
+
+ Other
+ - netdata port : ${NETDATA_PORT}
+
+OPTIONSEOF
+
+# -----------------------------------------------------------------------------
+progress "Fix permissions of netdata directories (using user '${NETDATA_USER}')"
+
+if [ ! -d "${NETDATA_RUN_DIR}" ]; then
+ # this is needed if NETDATA_PREFIX is not empty
+ run mkdir -p "${NETDATA_RUN_DIR}" || exit 1
+fi
+
+# --- stock conf dir ----
+
+[ ! -d "${NETDATA_STOCK_CONFIG_DIR}" ] && mkdir -p "${NETDATA_STOCK_CONFIG_DIR}"
+
+helplink="000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
+[ ${deleted_stock_configs} -eq 0 ] && helplink=""
+for link in "orig" "${helplink}"; do
+ if [ -n "${link}" ]; then
+ [ -L "${NETDATA_USER_CONFIG_DIR}/${link}" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/${link}"
+ run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/${link}"
+ fi
+done
+
+# --- web dir ----
+
+if [ ! -d "${NETDATA_WEB_DIR}" ]; then
+ echo >&2 "Creating directory '${NETDATA_WEB_DIR}'"
+ run mkdir -p "${NETDATA_WEB_DIR}" || exit 1
+fi
+run chown -R "${NETDATA_WEB_USER}:${NETDATA_WEB_GROUP}" "${NETDATA_WEB_DIR}"
+run find "${NETDATA_WEB_DIR}" -type f -exec chmod 0664 {} \;
+run find "${NETDATA_WEB_DIR}" -type d -exec chmod 0775 {} \;
+
+# --- data dirs ----
+
+for x in "${NETDATA_LIB_DIR}" "${NETDATA_CACHE_DIR}" "${NETDATA_LOG_DIR}"; do
+ if [ ! -d "${x}" ]; then
+ echo >&2 "Creating directory '${x}'"
+ run mkdir -p "${x}" || exit 1
+ fi
+
+ run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${x}"
+ #run find "${x}" -type f -exec chmod 0660 {} \;
+ #run find "${x}" -type d -exec chmod 0770 {} \;
+done
+
+run chmod 755 "${NETDATA_LOG_DIR}"
+
+# --- claiming dir ----
+
+if [ ! -d "${NETDATA_CLAIMING_DIR}" ]; then
+ echo >&2 "Creating directory '${NETDATA_CLAIMING_DIR}'"
+ run mkdir -p "${NETDATA_CLAIMING_DIR}" || exit 1
+fi
+run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_CLAIMING_DIR}"
+run chmod 770 "${NETDATA_CLAIMING_DIR}"
+
+# --- plugins ----
+
+if [ "${UID}" -eq 0 ]; then
+ # find the admin group
+ admin_group=
+ test -z "${admin_group}" && getent group root > /dev/null 2>&1 && admin_group="root"
+ test -z "${admin_group}" && getent group daemon > /dev/null 2>&1 && admin_group="daemon"
+ test -z "${admin_group}" && admin_group="${NETDATA_GROUP}"
+
+ run chown "${NETDATA_USER}:${admin_group}" "${NETDATA_LOG_DIR}"
+ run chown -R "root:${admin_group}" "${NETDATA_PREFIX}/usr/libexec/netdata"
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \;
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0644 {} \;
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.plugin -exec chown :${NETDATA_GROUP} {} \;
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.plugin -exec chmod 0750 {} \;
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.sh -exec chmod 0755 {} \;
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"
+ capabilities=0
+ if ! iscontainer && command -v setcap 1> /dev/null 2>&1; then
+ run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"
+ if run setcap cap_dac_read_search,cap_sys_ptrace+ep "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"; then
+ # if we managed to setcap, but we fail to execute apps.plugin setuid to root
+ "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" -t > /dev/null 2>&1 && capabilities=1 || capabilities=0
+ fi
+ fi
+
+ if [ $capabilities -eq 0 ]; then
+ # fix apps.plugin to be setuid to root
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"
+ fi
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/nfacct.plugin" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/nfacct.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/nfacct.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/xenstat.plugin" ]; then
+ run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/xenstat.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/xenstat.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/perf.plugin" ]; then
+ run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/perf.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/perf.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/slabinfo.plugin" ]; then
+ run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/slabinfo.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/slabinfo.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping" ]; then
+ run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ebpf.plugin" ]; then
+ run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ebpf.plugin"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ebpf.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network"
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh"
+ run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh"
+ fi
+else
+ # non-privileged user installation
+ run chown "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_LOG_DIR}"
+ run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata"
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0755 {} \;
+ run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \;
+fi
+
+# -----------------------------------------------------------------------------
+
+copy_react_dashboard() {
+ run cp -a $(find ${1} -mindepth 1 -maxdepth 1) "${NETDATA_WEB_DIR}"
+ run chown -R "${NETDATA_WEB_USER}:${NETDATA_WEB_GROUP}" "${NETDATA_WEB_DIR}"
+}
+
+install_react_dashboard() {
+ progress "Fetching and installing dashboard"
+
+ DASHBOARD_PACKAGE_VERSION="$(cat packaging/dashboard.version)"
+
+ tmp="$(mktemp -d -t netdata-dashboard-XXXXXX)"
+ DASHBOARD_PACKAGE_BASENAME="dashboard.tar.gz"
+
+ if fetch_and_verify "dashboard" \
+ "https://github.com/netdata/dashboard/releases/download/${DASHBOARD_PACKAGE_VERSION}/${DASHBOARD_PACKAGE_BASENAME}" \
+ "${DASHBOARD_PACKAGE_BASENAME}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_DASHBOARD}"; then
+ if run tar -xf "${tmp}/${DASHBOARD_PACKAGE_BASENAME}" -C "${tmp}" &&
+ copy_react_dashboard "${tmp}/build" &&
+ rm -rf "${tmp}"; then
+ run_ok "React dashboard installed."
+ else
+ run_failed "Failed to install React dashboard. The install process will continue, but you will not be able to use the new dashboard."
+ fi
+ else
+ run_failed "Unable to fetch React dashboard. The install process will continue, but you will not be able to use the new dashboard."
+ fi
+}
+
+install_react_dashboard
+
+# -----------------------------------------------------------------------------
+
+# govercomp compares go.d.plugin versions. Exit codes:
+# 0 - version1 == version2
+# 1 - version1 > version2
+# 2 - version2 > version1
+# 3 - error
+govercomp() {
+ # version in file:
+ # - v0.14.0
+ #
+ # 'go.d.plugin -v' output variants:
+ # - go.d.plugin, version: unknown
+ # - go.d.plugin, version: v0.14.1
+ # - go.d.plugin, version: v0.14.1-dirty
+ # - go.d.plugin, version: v0.14.1-1-g4c5f98c
+ # - go.d.plugin, version: v0.14.1-1-g4c5f98c-dirty
+
+ # we need to compare only MAJOR.MINOR.PATCH part
+ local ver1 ver2
+ ver1=$(echo "$1" | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+")
+ ver2=$(echo "$2" | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+")
+
+ local IFS=.
+ read -ra ver1 <<< "$ver1"
+ read -ra ver2 <<< "$ver2"
+
+ if [ ${#ver1[@]} -eq 0 ] || [ ${#ver2[@]} -eq 0 ]; then
+ return 3
+ fi
+
+ local i
+ for ((i = 0; i < ${#ver1[@]}; i++)); do
+ if [ "${ver1[i]}" -gt "${ver2[i]}" ]; then
+ return 1
+ elif [ "${ver2[i]}" -gt "${ver1[i]}" ]; then
+ return 2
+ fi
+ done
+
+ return 0
+}
+
+should_install_go() {
+ if [ -n "${NETDATA_DISABLE_GO+x}" ]; then
+ return 1
+ fi
+
+ local version_in_file
+ local binary_version
+
+ version_in_file="$(cat packaging/go.d.version 2> /dev/null)"
+ binary_version=$("${NETDATA_PREFIX}"/usr/libexec/netdata/plugins.d/go.d.plugin -v 2> /dev/null)
+
+ govercomp "$version_in_file" "$binary_version"
+ case $? in
+ 0) return 1 ;; # =
+ 2) return 1 ;; # <
+ *) return 0 ;; # >, error
+ esac
+}
+
+install_go() {
+ if ! should_install_go; then
+ return 0
+ fi
+
+ # When updating this value, ensure correct checksums in packaging/go.d.checksums
+ GO_PACKAGE_VERSION="$(cat packaging/go.d.version)"
+ ARCH_MAP=(
+ 'i386::386'
+ 'i686::386'
+ 'x86_64::amd64'
+ 'aarch64::arm64'
+ 'armv64::arm64'
+ 'armv6l::arm'
+ 'armv7l::arm'
+ 'armv5tel::arm'
+ )
+
+ progress "Install go.d.plugin"
+ ARCH=$(uname -m)
+ OS=$(uname -s | tr '[:upper:]' '[:lower:]')
+
+ for index in "${ARCH_MAP[@]}"; do
+ KEY="${index%%::*}"
+ VALUE="${index##*::}"
+ if [ "$KEY" = "$ARCH" ]; then
+ ARCH="${VALUE}"
+ break
+ fi
+ done
+ tmp="$(mktemp -d -t netdata-go-XXXXXX)"
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
+
+ if [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN}" ]; then
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
+ else
+ progress "Using provided go.d tarball ${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN}"
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN}" "${tmp}/${GO_PACKAGE_BASENAME}"
+ fi
+
+ if [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN_CONFIG}" ]; then
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
+ else
+ progress "Using provided config file for go.d ${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN_CONFIG}"
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN_CONFIG}" "${tmp}/config.tar.gz"
+ fi
+
+ if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then
+ run_failed "go.d plugin download failed, go.d plugin will not be available"
+ defer_error "go.d plugin download failed, go.d plugin will not be available"
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+ return 0
+ fi
+
+ grep "${GO_PACKAGE_BASENAME}\$" "${INSTALLER_DIR}/packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2> /dev/null
+ grep "config.tar.gz" "${INSTALLER_DIR}/packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2> /dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+
+ echo >&2 "go.d plugin checksum validation failure."
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+
+ run_failed "go.d.plugin package files checksum validation failed."
+ defer_error "go.d.plugin package files checksum validation failed, go.d.plugin will not be available"
+ return 0
+ fi
+
+ # Install new files
+ run rm -rf "${NETDATA_STOCK_CONFIG_DIR}/go.d"
+ run rm -rf "${NETDATA_STOCK_CONFIG_DIR}/go.d.conf"
+ run tar -xf "${tmp}/config.tar.gz" -C "${NETDATA_STOCK_CONFIG_DIR}/"
+ run chown -R "${ROOT_USER}:${ROOT_GROUP}" "${NETDATA_STOCK_CONFIG_DIR}"
+
+ run tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ run mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
+ if [ "${UID}" -eq 0 ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
+ fi
+ run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
+ rm -rf "${tmp}"
+ return 0
+}
+
+install_go
+
+function get_kernel_version() {
+ r="$(uname -r | cut -f 1 -d '-')"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+function get_rh_version() {
+ if [ ! -f /etc/redhat-release ]; then
+ printf "000000000"
+ return
+ fi
+
+ r="$(cut -f 4 -d ' ' < /etc/redhat-release)"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+detect_libc() {
+ libc=
+ if ldd --version 2>&1 | grep -q -i glibc; then
+ echo >&2 " Detected GLIBC"
+ libc="glibc"
+ elif ldd --version 2>&1 | grep -q -i 'gnu libc'; then
+ echo >&2 " Detected GLIBC"
+ libc="glibc"
+ elif ldd --version 2>&1 | grep -q -i musl; then
+ echo >&2 " Detected musl"
+ libc="musl"
+ else
+ echo >&2 " ERROR: Cannot detect a supported libc on your system!"
+ return 1
+ fi
+ echo "${libc}"
+ return 0
+}
+
+should_install_ebpf() {
+ if [ "${NETDATA_DISABLE_EBPF:=0}" -eq 1 ]; then
+ run_failed "eBPF explicitly disabled."
+ defer_error "eBPF explicitly disabled."
+ return 1
+ fi
+
+ if [ "$(uname -s)" != "Linux" ]; then
+ run_failed "Currently eBPF is only supported on Linux."
+ defer_error "Currently eBPF is only supported on Linux."
+ return 1
+ fi
+
+ # Check Kernel Config
+ if ! run "${INSTALLER_DIR}"/packaging/check-kernel-config.sh; then
+ echo >&2 "Warning: Kernel unsupported or missing required config (eBPF may not work on your system)"
+ fi
+
+ return 0
+}
+
+remove_old_ebpf() {
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ebpf_process.plugin" ]; then
+ echo >&2 "Removing alpha eBPF collector."
+ rm -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ebpf_process.plugin"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/usr/lib/netdata/conf.d/ebpf_process.conf" ]; then
+ echo >&2 "Removing alpha eBPF stock file"
+ rm -f "${NETDATA_PREFIX}/usr/lib/netdata/conf.d/ebpf_process.conf"
+ fi
+
+ if [ -f "${NETDATA_PREFIX}/etc/netdata/ebpf_process.conf" ]; then
+ echo >&2 "Renaming eBPF configuration file."
+ mv "${NETDATA_PREFIX}/etc/netdata/ebpf_process.conf" "${NETDATA_PREFIX}/etc/netdata/ebpf.conf"
+ fi
+
+ # Added to remove eBPF programs with name pattern: NAME_VERSION.SUBVERSION.PATCH
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/pnetdata_ebpf_process.3.10.0.o" ]; then
+ echo >&2 "Removing old eBPF programs"
+ rm -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/rnetdata_ebpf"*.?.*.*.o
+ rm -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/pnetdata_ebpf"*.?.*.*.o
+ fi
+
+}
+
+install_ebpf() {
+ if ! should_install_ebpf; then
+ return 0
+ fi
+
+ remove_old_ebpf
+
+ progress "Installing eBPF plugin"
+
+ # Detect libc
+ libc="${EBPF_LIBC:-"$(detect_libc)"}"
+
+ EBPF_VERSION="$(cat packaging/ebpf.version)"
+ EBPF_TARBALL="netdata-kernel-collector-${libc}-${EBPF_VERSION}.tar.xz"
+
+ tmp="$(mktemp -d -t netdata-ebpf-XXXXXX)"
+
+ if ! fetch_and_verify "ebpf" \
+ "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" \
+ "${EBPF_TARBALL}" \
+ "${tmp}" \
+ "${NETDATA_LOCAL_TARBALL_OVERRIDE_EBPF}"; then
+ run_failed "Failed to download eBPF collector package"
+ echo 2>&" Removing temporary directory ${tmp} ..."
+ rm -rf "${tmp}"
+ return 1
+ fi
+
+ echo >&2 " Extracting ${EBPF_TARBALL} ..."
+ tar -xf "${tmp}/${EBPF_TARBALL}" -C "${tmp}"
+
+ # chown everything to root:netdata before we start copying out of our package
+ run chown -R root:netdata "${tmp}"
+
+ run cp -a -v "${tmp}"/*netdata_ebpf_*.o "${NETDATA_PREFIX}"/usr/libexec/netdata/plugins.d
+
+ rm -rf "${tmp}"
+
+ return 0
+}
+
+progress "eBPF Kernel Collector"
+install_ebpf
+
+# -----------------------------------------------------------------------------
+progress "Telemetry configuration"
+
+if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then
+ NETDATA_DISABLE_TELEMETRY=1
+fi
+
+# Opt-out from telemetry program
+if [ -n "${NETDATA_DISABLE_TELEMETRY+x}" ]; then
+ run touch "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+else
+ printf "You can opt out from anonymous statistics via the --disable-telemetry option, or by creating an empty file %s \n\n" "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install netdata at system init"
+
+# By default we assume the shutdown/startup of the Netdata Agent are effectively
+# without any system supervisor/init like SystemD or SysV. So we assume the most
+# basic startup/shutdown commands...
+NETDATA_STOP_CMD="${NETDATA_PREFIX}/usr/sbin/netdatacli shutdown-agent"
+NETDATA_START_CMD="${NETDATA_PREFIX}/usr/sbin/netdata"
+
+if grep -q docker /proc/1/cgroup > /dev/null 2>&1; then
+ # If docker runs systemd for some weird reason, let the install proceed
+ is_systemd_running="NO"
+ if command -v pidof > /dev/null 2>&1; then
+ is_systemd_running="$(pidof /usr/sbin/init || pidof systemd || echo "NO")"
+ else
+ is_systemd_running="$( (pgrep -q -f systemd && echo "1") || echo "NO")"
+ fi
+
+ if [ "${is_systemd_running}" == "1" ]; then
+ echo >&2 "Found systemd within the docker container, running install_netdata_service() method"
+ install_netdata_service || run_failed "Cannot install netdata init service."
+ else
+ echo >&2 "We are running within a docker container, will not be installing netdata service"
+ fi
+ echo >&2
+else
+ install_netdata_service || run_failed "Cannot install netdata init service."
+fi
+
+# -----------------------------------------------------------------------------
+# check if we can re-start netdata
+
+# TODO(paulfantom): Creation of configuration file should be handled by a build system. Additionally we shouldn't touch configuration files in /etc/netdata/...
+started=0
+if [ ${DONOTSTART} -eq 1 ]; then
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+else
+ if ! restart_netdata "${NETDATA_PREFIX}/usr/sbin/netdata" "${@}"; then
+ fatal "Cannot start netdata!"
+ fi
+
+ started=1
+ run_ok "netdata started!"
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:${NETDATA_PORT}/netdata.conf"
+fi
+run chmod 0644 "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+
+if [ "$(uname)" = "Linux" ]; then
+ # -------------------------------------------------------------------------
+ progress "Check KSM (kernel memory deduper)"
+
+ ksm_is_available_but_disabled() {
+ cat << KSM1
+
+${TPUT_BOLD}Memory de-duplication instructions${TPUT_RESET}
+
+You have kernel memory de-duper (called Kernel Same-page Merging,
+or KSM) available, but it is not currently enabled.
+
+To enable it run:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}echo 1 >/sys/kernel/mm/ksm/run${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs${TPUT_RESET}
+
+If you enable it, you will save 40-60% of netdata memory.
+
+KSM1
+ }
+
+ ksm_is_not_available() {
+ cat << KSM2
+
+${TPUT_BOLD}Memory de-duplication not present in your kernel${TPUT_RESET}
+
+It seems you do not have kernel memory de-duper (called Kernel Same-page
+Merging, or KSM) available.
+
+To enable it, you need a kernel built with CONFIG_KSM=y
+
+If you can have it, you will save 40-60% of netdata memory.
+
+KSM2
+ }
+
+ if [ -f "/sys/kernel/mm/ksm/run" ]; then
+ if [ "$(cat "/sys/kernel/mm/ksm/run")" != "1" ]; then
+ ksm_is_available_but_disabled
+ fi
+ else
+ ksm_is_not_available
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+progress "Check version.txt"
+
+if [ ! -s web/gui/version.txt ]; then
+ cat << VERMSG
+
+${TPUT_BOLD}Version update check warning${TPUT_RESET}
+
+The way you downloaded netdata, we cannot find its version. This means the
+Update check on the dashboard, will not work.
+
+If you want to have version update check, please re-install it
+following the procedure in:
+
+https://docs.netdata.cloud/packaging/installer/
+
+VERMSG
+fi
+
+if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" ]; then
+ # -----------------------------------------------------------------------------
+ progress "Check apps.plugin"
+
+ if [ "${UID}" -ne 0 ]; then
+ cat << SETUID_WARNING
+
+${TPUT_BOLD}apps.plugin needs privileges${TPUT_RESET}
+
+Since you have installed netdata as a normal user, to have apps.plugin collect
+all the needed data, you have to give it the access rights it needs, by running
+either of the following sets of commands:
+
+To run apps.plugin with escalated capabilities:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo setcap cap_dac_read_search,cap_sys_ptrace+ep "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"${TPUT_RESET}
+
+or, to run apps.plugin as root:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}sudo chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin"${TPUT_RESET}
+
+apps.plugin is performing a hard-coded function of data collection for all
+running processes. It cannot be instructed from the netdata daemon to perform
+any task, so it is pretty safe to do this.
+
+SETUID_WARNING
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+progress "Copy uninstaller"
+if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh ]; then
+ echo >&2 "Removing uninstaller from old location"
+ rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-uninstaller.sh
+fi
+
+sed "s|ENVIRONMENT_FILE=\"/etc/netdata/.environment\"|ENVIRONMENT_FILE=\"${NETDATA_PREFIX}/etc/netdata/.environment\"|" packaging/installer/netdata-uninstaller.sh > ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh
+chmod 750 ${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh
+
+# -----------------------------------------------------------------------------
+progress "Basic netdata instructions"
+
+cat << END
+
+netdata by default listens on all IPs on port ${NETDATA_PORT},
+so you can access it with:
+
+ ${TPUT_CYAN}${TPUT_BOLD}http://this.machine.ip:${NETDATA_PORT}/${TPUT_RESET}
+
+To stop netdata run:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_STOP_CMD}${TPUT_RESET}
+
+To start netdata run:
+
+ ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_START_CMD}${TPUT_RESET}
+
+END
+echo >&2 "Uninstall script copied to: ${TPUT_RED}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh${TPUT_RESET}"
+echo >&2
+
+# -----------------------------------------------------------------------------
+progress "Installing (but not enabling) the netdata updater tool"
+cleanup_old_netdata_updater || run_failed "Cannot cleanup old netdata updater tool."
+install_netdata_updater || run_failed "Cannot install netdata updater tool."
+
+progress "Check if we must enable/disable the netdata updater tool"
+if [ "${AUTOUPDATE}" = "1" ]; then
+ enable_netdata_updater ${AUTO_UPDATE_TYPE} || run_failed "Cannot enable netdata updater tool"
+else
+ disable_netdata_updater || run_failed "Cannot disable netdata updater tool"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Wrap up environment set up"
+
+# Save environment variables
+echo >&2 "Preparing .environment file"
+cat << EOF > "${NETDATA_USER_CONFIG_DIR}/.environment"
+# Created by installer
+PATH="${PATH}"
+CFLAGS="${CFLAGS}"
+LDFLAGS="${LDFLAGS}"
+NETDATA_TMPDIR="${TMPDIR}"
+NETDATA_PREFIX="${NETDATA_PREFIX}"
+NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS}"
+NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS}"
+INSTALL_UID="${UID}"
+NETDATA_GROUP="${NETDATA_GROUP}"
+REINSTALL_OPTIONS="${REINSTALL_OPTIONS}"
+RELEASE_CHANNEL="${RELEASE_CHANNEL}"
+IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY}"
+NETDATA_LIB_DIR="${NETDATA_LIB_DIR}"
+EOF
+run chmod 0644 "${NETDATA_USER_CONFIG_DIR}/.environment"
+
+echo >&2 "Setting netdata.tarball.checksum to 'new_installation'"
+cat << EOF > "${NETDATA_LIB_DIR}/netdata.tarball.checksum"
+new_installation
+EOF
+
+print_deferred_errors
+
+# -----------------------------------------------------------------------------
+echo >&2
+progress "We are done!"
+
+if [ ${started} -eq 1 ]; then
+ netdata_banner "is installed and running now!"
+else
+ netdata_banner "is installed now!"
+fi
+
+echo >&2 " enjoy real-time performance and health monitoring..."
+echo >&2
+exit 0
diff --git a/netdata.cppcheck b/netdata.cppcheck
new file mode 100644
index 0000000..245c7a0
--- /dev/null
+++ b/netdata.cppcheck
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="1">
+ <builddir>cppcheck-build</builddir>
+ <includedir>
+ <dir name=".."/>
+ </includedir>
+ <libraries>
+ <library>cppcheck-lib</library>
+ <library>gnu</library>
+ <library>posix</library>
+ </libraries>
+ <suppressions>
+ <suppression>nullPointerRedundantCheck</suppression>
+ <suppression>unusedFunction</suppression>
+ <suppression>readdirCalled</suppression>
+ </suppressions>
+</project>
diff --git a/netdata.spec.in b/netdata.spec.in
new file mode 100644
index 0000000..f55dcfe
--- /dev/null
+++ b/netdata.spec.in
@@ -0,0 +1,577 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+%global contentdir %{_datadir}/netdata
+%global version @PACKAGE_VERSION@
+
+#TODO: Temporary fix for the build-id error during go.d plugin set up
+%global _missing_build_ids_terminate_build 0
+
+# XXX: We are using automatic `Requires:` generation for libraries
+# whenever possible, DO NOT LIST LIBRARY DEPENDENCIES UNLESS THE RESULTANT
+# PACKAGE IS BROKEN WITHOUT THEM.
+AutoReqProv: yes
+
+%if "@HAVE_LIBBPF@" == "1"
+%global have_bpf 1
+%else
+%global have_bpf 0
+%endif
+
+# This is temporary and should eventually be resolved. This bypasses
+# the default rhel __os_install_post which throws a python compile
+# error.
+%global __os_install_post %{nil}
+
+# Mitigate the cross-distro mayhem by strictly defining the libexec destination
+%define _prefix /usr
+%define _sysconfdir /etc
+%define _localstatedir /var
+%define _libexecdir /usr/libexec
+%define _libdir /usr/lib
+
+# Redefine centos_ver to standardize on a single macro
+%{?rhel:%global centos_ver %rhel}
+
+#
+# Conditional build:
+%bcond_without systemd # systemd
+%bcond_with netns # build with netns support (cgroup-network)
+
+%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140
+%else
+%undefine with_systemd
+%undefine with_netns
+%endif
+
+%if %{with systemd}
+%if 0%{?suse_version}
+%global netdata_initd_buildrequires \
+BuildRequires: systemd-rpm-macros \
+%{nil}
+%global netdata_initd_requires \
+%{?systemd_requires} \
+%{nil}
+%global netdata_init_post %service_add_post netdata.service \
+/sbin/service netdata restart > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_preun %service_del_preun netdata.service \
+/sbin/service netdata stop > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_postun %service_del_postun netdata.service
+%else
+%global netdata_initd_buildrequires \
+BuildRequires: systemd
+%global netdata_initd_requires \
+Requires(preun): systemd-units \
+Requires(postun): systemd-units \
+Requires(post): systemd-units \
+%{nil}
+%global netdata_init_post %systemd_post netdata.service \
+/usr/bin/systemctl enable netdata.service \
+/usr/bin/systemctl daemon-reload \
+/usr/bin/systemctl restart netdata.service \
+%{nil}
+%global netdata_init_preun %systemd_preun netdata.service
+%global netdata_init_postun %systemd_postun_with_restart netdata.service
+%endif
+%else
+%global netdata_initd_buildrequires %{nil}
+%global netdata_initd_requires \
+Requires(post): chkconfig \
+%{nil}
+%global netdata_init_post \
+/sbin/chkconfig --add netdata \
+/sbin/service netdata restart > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_preun %{nil} \
+if [ $1 = 0 ]; then \
+ /sbin/service netdata stop > /dev/null 2>&1 \
+ /sbin/chkconfig --del netdata \
+fi \
+%{nil}
+%global netdata_init_postun %{nil} \
+if [ $1 != 0 ]; then \
+ /sbin/service netdata condrestart 2>&1 > /dev/null \
+fi \
+%{nil}
+%endif
+
+Summary: Real-time performance monitoring, done right!
+Name: netdata
+Version: %{version}
+Release: 1%{?dist}
+License: GPLv3+
+Group: Applications/System
+Source0: https://github.com/netdata/%{name}/releases/download/%{version}/%{name}-%{version}.tar.gz
+URL: http://my-netdata.io
+
+# Remove conflicting EPEL packages
+Obsoletes: %{name}-conf
+Obsoletes: %{name}-data
+
+# #####################################################################
+# Core build/install/runtime dependencies
+# #####################################################################
+
+# Build dependencies
+#
+BuildRequires: gcc
+BuildRequires: gcc-c++
+BuildRequires: make
+BuildRequires: git-core
+BuildRequires: autoconf
+%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140
+BuildRequires: autoconf-archive
+BuildRequires: autogen
+%endif
+BuildRequires: automake
+BuildRequires: cmake
+BuildRequires: pkgconfig
+BuildRequires: curl
+BuildRequires: findutils
+BuildRequires: zlib-devel
+BuildRequires: libuuid-devel
+BuildRequires: libuv-devel >= 1
+BuildRequires: openssl-devel
+%if 0%{?centos_ver} >= 8 || 0%{?fedora}
+BuildRequires: libwebsockets-devel >= 3.2
+%endif
+%if 0%{?suse_version}
+BuildRequires: judy-devel
+BuildRequires: liblz4-devel
+BuildRequires: libjson-c-devel
+%else
+%if 0%{?fedora}
+BuildRequires: Judy-devel
+BuildRequires: lz4-devel
+BuildRequires: json-c-devel
+%else
+BuildRequires: lz4-devel
+BuildRequires: json-c-devel
+%endif
+%endif
+
+# Core build requirements for service install
+%{netdata_initd_buildrequires}
+
+# Runtime dependencies
+#
+%if 0%{?centos_ver} == 7 || 0%{?centos_ver} == 6
+Requires: python
+%else
+%if 0%{?centos_ver} == 8
+Requires: python38
+%else
+Requires: python3
+%endif
+%endif
+
+# Core requirements for the install to succeed
+Requires(pre): /usr/sbin/groupadd
+Requires(pre): /usr/sbin/useradd
+
+%{netdata_initd_requires}
+
+# #####################################################################
+# Functionality-dependent package dependencies
+# #####################################################################
+# Note: Some or all of the Packages may be found in the EPEL repo,
+# rather than the standard ones
+
+# nfacct plugin dependencies
+BuildRequires: libmnl-devel
+%if 0%{?fedora} || 0%{?suse_version} >= 1140
+BuildRequires: libnetfilter_acct-devel
+%endif
+
+# end nfacct plugin dependencies
+
+# freeipmi plugin dependencies
+BuildRequires: freeipmi-devel
+# end - freeipmi plugin dependencies
+
+# CUPS plugin dependencies
+%if 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7
+BuildRequires: cups-devel >= 1.7
+%endif
+# end - cups plugin dependencies
+
+# Prometheus remote write dependencies
+BuildRequires: snappy-devel
+BuildRequires: protobuf-devel
+%if 0%{?suse_version}
+BuildRequires: libprotobuf-c-devel
+%else
+BuildRequires: protobuf-c-devel
+%endif
+# end - prometheus remote write dependencies
+
+# #####################################################################
+# End of dependency management configuration
+# #####################################################################
+
+%description
+ netdata is the fastest way to visualize metrics. It is a resource
+efficient, highly optimized system for collecting and visualizing any
+type of realtime timeseries data, from CPU usage, disk activity, SQL
+queries, API calls, web site visitors, etc.
+ netdata tries to visualize the truth of now, in its greatest detail,
+so that you can get insights of what is happening now and what just
+happened, on your systems and applications.
+
+%prep
+%setup -q -n %{name}-%{version}
+export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-mosquitto.sh ${RPM_BUILD_DIR}/%{name}-%{version}
+%if 0%{?centos_ver} < 8 || 0%{!?fedora:1}
+export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-lws.sh ${RPM_BUILD_DIR}/%{name}-%{version}
+%endif
+# Only bundle libJudy if this isn't Fedora or SUSE
+%if 0%{!?fedora:1} && 0%{!?suse_version:1}
+export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-judy.sh ${RPM_BUILD_DIR}/%{name}-%{version}
+%endif
+%if 0%{?have_bpf}
+export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-libbpf.sh ${RPM_BUILD_DIR}/%{name}-%{version}
+%endif
+
+%build
+# Conf step
+autoreconf -ivf
+%configure \
+ %if 0%{!?fedora:1} && 0%{!?suse_version:1}
+ --with-libJudy=externaldeps/libJudy \
+ %endif
+ %if 0%{?centos_ver} < 8 || 0%{!?fedora:1}
+ --with-bundled-lws=externaldeps/libwebsockets \
+ %endif
+ --prefix="%{_prefix}" \
+ --sysconfdir="%{_sysconfdir}" \
+ --localstatedir="%{_localstatedir}" \
+ --libexecdir="%{_libexecdir}" \
+ --libdir="%{_libdir}" \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata
+
+# Build step
+%{__make} %{?_smp_mflags}
+
+%install
+
+# ###########################################################
+# Clear the directory, if already exists and install
+rm -rf "${RPM_BUILD_ROOT}"
+%{__make} %{?_smp_mflags} DESTDIR="${RPM_BUILD_ROOT}" install
+
+install -m 644 -p system/netdata.conf "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}"
+
+# ###########################################################
+# logrotate settings
+install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d"
+install -m 644 -p system/netdata.logrotate "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}"
+
+# ###########################################################
+# Install freeipmi
+install -m 4750 -p freeipmi.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/freeipmi.plugin"
+
+# ###########################################################
+# Install apps.plugin
+install -m 4750 -p apps.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/apps.plugin"
+
+# ###########################################################
+# Install perf.plugin
+install -m 4750 -p perf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/perf.plugin"
+
+# ###########################################################
+# Install ebpf.plugin
+%if 0%{?have_bpf}
+install -m 4750 -p ebpf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/ebpf.plugin"
+%endif
+
+# ###########################################################
+# Install cups.plugin
+%if 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7
+install -m 0750 -p cups.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/cups.plugin"
+%endif
+
+# ###########################################################
+# Install slabinfo.plugin
+install -m 4750 -p slabinfo.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/slabinfo.plugin"
+
+# ###########################################################
+# Install cache and log directories
+install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}"
+install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}"
+
+# ###########################################################
+# Install registry directory
+install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/lib/%{name}/registry"
+
+# ###########################################################
+# Install netdata service
+%if %{with systemd}
+install -m 755 -d "${RPM_BUILD_ROOT}%{_unitdir}"
+install -m 644 -p system/netdata.service "${RPM_BUILD_ROOT}%{_unitdir}/netdata.service"
+%else
+# install SYSV init stuff
+install -d "${RPM_BUILD_ROOT}/etc/rc.d/init.d"
+install -m 755 system/netdata-init-d \
+ "${RPM_BUILD_ROOT}/etc/rc.d/init.d/netdata"
+%endif
+
+# ############################################################
+# Package Go within netdata (TBD: Package it separately)
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum >/dev/null 2>&1; then
+ sha256sum $@
+ elif command -v shasum >/dev/null 2>&1; then
+ shasum -a 256 $@
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+download_go() {
+ url="${1}"
+ dest="${2}"
+
+ if command -v curl >/dev/null 2>&1; then
+ curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
+ elif command -v wget >/dev/null 2>&1; then
+ wget -T 15 -O - "${url}" > "${dest}"
+ else
+ echo >&2
+ echo >&2 "Downloading go.d plugin from '${url}' failed because of missing mandatory packages."
+ echo >&2 "Either add packages or disable it by issuing '--disable-go' in the installer"
+ echo >&2
+ exit 1
+ fi
+}
+
+install_go() {
+ # When updating this value, ensure correct checksums in packaging/go.d.checksums
+ GO_PACKAGE_VERSION="$(cat packaging/go.d.version)"
+ ARCH_MAP=(
+ 'i386::386'
+ 'i686::386'
+ 'x86_64::amd64'
+ 'aarch64::arm64'
+ 'armv64::arm64'
+ 'armv6l::arm'
+ 'armv7l::arm'
+ 'armv5tel::arm'
+ )
+
+ if [ -z "${NETDATA_DISABLE_GO+x}" ]; then
+ echo >&2 "Install go.d.plugin"
+ ARCH=$(uname -m)
+ OS=$(uname -s | tr '[:upper:]' '[:lower:]')
+
+ for index in "${ARCH_MAP[@]}" ; do
+ KEY="${index%%::*}"
+ VALUE="${index##*::}"
+ if [ "$KEY" = "$ARCH" ]; then
+ ARCH="${VALUE}"
+ break
+ fi
+ done
+ tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
+
+ if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+ return 1
+ fi
+
+ grep "${GO_PACKAGE_BASENAME}\$" "packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null
+ grep "config.tar.gz" "packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+
+ echo >&2 "go.d plugin checksum validation failure."
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+
+ echo "go.d.plugin package files checksum validation failed."
+ exit 1
+ fi
+
+ # Install files
+ tar -xf "${tmp}/config.tar.gz" -C "${RPM_BUILD_ROOT}%{_libdir}/%{name}/conf.d/"
+ tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "go.d.plugin"
+ rm -rf "${tmp}"
+ fi
+ return 0
+}
+install_go
+install -m 0640 -p go.d.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/go.d.plugin"
+
+${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-dashboard.sh ${RPM_BUILD_DIR}/%{name}-%{version} ${RPM_BUILD_ROOT}%{_datadir}/%{name}/web
+%if 0%{?have_bpf}
+${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-ebpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} ${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d
+%endif
+
+%pre
+
+# User/Group creations, as needed
+getent group netdata >/dev/null || groupadd -r netdata
+getent group docker >/dev/null || groupadd -r docker
+getent passwd netdata >/dev/null || \
+ useradd -r -g netdata -G docker -s /sbin/nologin \
+ -d %{contentdir} -c "netdata" netdata
+
+%post
+%{netdata_init_post}
+
+%preun
+%{netdata_init_preun}
+
+%postun
+%{netdata_init_postun}
+
+%clean
+rm -rf "${RPM_BUILD_ROOT}"
+
+%files
+%doc README.md
+%{_sysconfdir}/%{name}
+%config(noreplace) %{_sysconfdir}/%{name}/netdata.conf
+%config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
+%dir %{_libdir}/%{name}
+%dir %{_datadir}/%{name}
+%{_libdir}/%{name}
+%{_libdir}/%{name}/conf.d/
+%{_libexecdir}/%{name}
+%{_sbindir}/%{name}
+%{_sbindir}/netdatacli
+%{_sbindir}/netdata-claim.sh
+
+%if %{with systemd}
+%{_unitdir}/netdata.service
+%else
+%{_sysconfdir}/rc.d/init.d/netdata
+%endif
+
+%defattr(0750,root,netdata,0750)
+
+%dir %{_libexecdir}/%{name}/python.d
+%dir %{_libexecdir}/%{name}/charts.d
+%dir %{_libexecdir}/%{name}/plugins.d
+%dir %{_libexecdir}/%{name}/node.d
+
+%{_libexecdir}/%{name}/python.d
+%{_libexecdir}/%{name}/plugins.d
+%{_libexecdir}/%{name}/node.d
+
+%caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin
+
+%if %{with netns}
+# cgroup-network detects the network interfaces of CGROUPs
+# it must be able to use setns() and run cgroup-network-helper.sh as root
+# the helper script reads /proc/PID/fdinfo/* files, runs virsh, etc.
+%caps(cap_setuid=ep) %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network
+%attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network-helper.sh
+%endif
+
+# perf plugin
+%caps(cap_setuid=ep) %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/perf.plugin
+
+# perf plugin
+%caps(cap_setuid=ep) %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/slabinfo.plugin
+
+# freeipmi files
+%caps(cap_setuid=ep) %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
+
+# Enforce 0644 for files and 0755 for directories
+# for the netdata web directory
+%defattr(0644,root,netdata,0755)
+%{_datadir}/%{name}/web
+
+# Enforce 0660 for files and 0770 for directories
+# for the netdata lib, cache and log dirs
+%defattr(0660,root,netdata,0770)
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/cache/%{name}
+%attr(0755,netdata,root) %dir %{_localstatedir}/log/%{name}
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}/registry
+
+# Free IPMI belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
+
+# CUPS belongs to a different sub package
+%if 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7
+%exclude %{_libexecdir}/%{name}/plugins.d/cups.plugin
+
+%package plugin-cups
+Summary: The Common Unix Printing System plugin for netdata
+Group: Applications/System
+Requires: cups >= 1.7
+Requires: netdata = %{version}
+
+%description plugin-cups
+ This is the Common Unix Printing System plugin for the netdata daemon.
+Use this plugin to enable metrics collection from cupsd, the daemon running when CUPS is enabled on the system
+
+%files plugin-cups
+%attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cups.plugin
+%endif
+
+%package plugin-freeipmi
+Summary: FreeIPMI - The Intelligent Platform Management System
+Group: Applications/System
+Requires: freeipmi
+Requires: netdata = %{version}
+
+%description plugin-freeipmi
+ The IPMI specification defines a set of interfaces for platform management.
+It is implemented by a number vendors for system management. The features of IPMI that most users will be interested in
+are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+
+%files plugin-freeipmi
+%attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
+
+%changelog
+* Wed Sep 16 2020 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-14
+- Convert to using 'AutoReq: yes' for library dependencies.
+* Thu Feb 13 2020 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-13
+- Add handling for custom libmosquitto fork
+* Wed Jan 01 2020 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-12
+- Add explicit installation of log and cache directories
+- Clean up build dependencies.
+* Thu Dec 19 2019 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-11
+- Fix remaining ownership and permissions issues.
+* Mon Nov 04 2019 Konstantinos Natsakis <konstantinos.natsakis@gmail.com> 0.0.0-10
+- Fix /etc/netdata permissions
+* Mon Sep 23 2019 Konstantinos Natsakis <konstantinos.natsakis@gmail.com> 0.0.0-9
+- Do not build CUPS plugin subpackage on CentOS 6 and CentOS 7
+* Tue Aug 20 2019 Pavlos Emm. Katsoulakis <paul@netdat.acloud> - 0.0.0-8
+- Split CUPS functionality on separate package
+* Fri Jun 28 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-7
+- Raise the path overrides to the spec file level, not just the configure.
+- Adjust tighter permissions on some folders, based on what we did on our installer
+- Introduce go.d plugin download and install, to include it on the package (Temporarily, to become separate package on next iteration)
+* Tue Jun 25 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-6
+- Adjust dependency list: Some packages are missing on some distros, adopt to build successfully
+* Mon Jun 24 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-5
+Another pass on cleaning up pre/post installation steps
+- Sync permission and ownership on files and directories
+* Sun Jun 16 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-4
+First draft refactor on package dependencies section
+- Remove freeipmi/nfacct plugin flags. We auto-detect all plugins by decision
+- Start refactor of package dependencies
+- Add missing dependencies, with respect to distro peculiarities
+- Adjust existing dependencies, so that distro-specific package names is applied
+* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-3
+- Temporary set version statically
+- Fix changelog ordering
+- Comment-out node.d configuration directory
+* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-2
+- Fix permissions for log files
+* Sun Nov 15 2015 Alon Bar-Lev <alonbl@redhat.com> - 0.0.0-1
+- Initial add.
+
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 0000000..11d1fb0
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,6320 @@
+{
+ "requires": true,
+ "lockfileVersion": 1,
+ "dependencies": {
+ "@babel/code-frame": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz",
+ "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==",
+ "dev": true,
+ "requires": {
+ "@babel/highlight": "^7.10.4"
+ }
+ },
+ "@babel/core": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.11.0.tgz",
+ "integrity": "sha512-mkLq8nwaXmDtFmRkQ8ED/eA2CnVw4zr7dCztKalZXBvdK5EeNUAesrrwUqjQEzFgomJssayzB0aqlOsP1vGLqg==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.11.0",
+ "@babel/helper-module-transforms": "^7.11.0",
+ "@babel/helpers": "^7.10.4",
+ "@babel/parser": "^7.11.0",
+ "@babel/template": "^7.10.4",
+ "@babel/traverse": "^7.11.0",
+ "@babel/types": "^7.11.0",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.2",
+ "lodash": "^4.17.19",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/generator": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.11.0.tgz",
+ "integrity": "sha512-fEm3Uzw7Mc9Xi//qU20cBKatTfs2aOtKqmvy/Vm7RkJEGFQ4xc9myCfbXxqK//ZS8MR/ciOHw6meGASJuKmDfQ==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.11.0",
+ "jsesc": "^2.5.1",
+ "source-map": "^0.5.0"
+ }
+ },
+ "@babel/helper-function-name": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.10.4.tgz",
+ "integrity": "sha512-YdaSyz1n8gY44EmN7x44zBn9zQ1Ry2Y+3GTA+3vH6Mizke1Vw0aWDM66FOYEPw8//qKkmqOckrGgTYa+6sceqQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-get-function-arity": "^7.10.4",
+ "@babel/template": "^7.10.4",
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-get-function-arity": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.4.tgz",
+ "integrity": "sha512-EkN3YDB+SRDgiIUnNgcmiD361ti+AVbL3f3Henf6dqqUyr5dMsorno0lJWJuLhDhkI5sYEpgj6y9kB8AOU1I2A==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-member-expression-to-functions": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.11.0.tgz",
+ "integrity": "sha512-JbFlKHFntRV5qKw3YC0CvQnDZ4XMwgzzBbld7Ly4Mj4cbFy3KywcR8NtNctRToMWJOVvLINJv525Gd6wwVEx/Q==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.11.0"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.10.4.tgz",
+ "integrity": "sha512-nEQJHqYavI217oD9+s5MUBzk6x1IlvoS9WTPfgG43CbMEeStE0v+r+TucWdx8KFGowPGvyOkDT9+7DHedIDnVw==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.11.0.tgz",
+ "integrity": "sha512-02EVu8COMuTRO1TAzdMtpBPbe6aQ1w/8fePD2YgQmxZU4gpNWaL9gK3Jp7dxlkUlUCJOTaSeA+Hrm1BRQwqIhg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-imports": "^7.10.4",
+ "@babel/helper-replace-supers": "^7.10.4",
+ "@babel/helper-simple-access": "^7.10.4",
+ "@babel/helper-split-export-declaration": "^7.11.0",
+ "@babel/template": "^7.10.4",
+ "@babel/types": "^7.11.0",
+ "lodash": "^4.17.19"
+ }
+ },
+ "@babel/helper-optimise-call-expression": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.4.tgz",
+ "integrity": "sha512-n3UGKY4VXwXThEiKrgRAoVPBMqeoPgHVqiHZOanAJCG9nQUL2pLRQirUzl0ioKclHGpGqRgIOkgcIJaIWLpygg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-replace-supers": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.10.4.tgz",
+ "integrity": "sha512-sPxZfFXocEymYTdVK1UNmFPBN+Hv5mJkLPsYWwGBxZAxaWfFu+xqp7b6qWD0yjNuNL2VKc6L5M18tOXUP7NU0A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-member-expression-to-functions": "^7.10.4",
+ "@babel/helper-optimise-call-expression": "^7.10.4",
+ "@babel/traverse": "^7.10.4",
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.10.4.tgz",
+ "integrity": "sha512-0fMy72ej/VEvF8ULmX6yb5MtHG4uH4Dbd6I/aHDb/JVg0bbivwt9Wg+h3uMvX+QSFtwr5MeItvazbrc4jtRAXw==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.10.4",
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.11.0.tgz",
+ "integrity": "sha512-74Vejvp6mHkGE+m+k5vHY93FX2cAtrw1zXrZXRlG4l410Nm9PxfEiVTn1PjDPV5SnmieiueY4AFg2xqhNFuuZg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.11.0"
+ }
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz",
+ "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==",
+ "dev": true
+ },
+ "@babel/helpers": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.10.4.tgz",
+ "integrity": "sha512-L2gX/XeUONeEbI78dXSrJzGdz4GQ+ZTA/aazfUsFaWjSe95kiCuOZ5HsXvkiw3iwF+mFHSRUfJU8t6YavocdXA==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.10.4",
+ "@babel/traverse": "^7.10.4",
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz",
+ "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.10.4",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@babel/parser": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.11.0.tgz",
+ "integrity": "sha512-qvRvi4oI8xii8NllyEc4MDJjuZiNaRzyb7Y7lup1NqJV8TZHF4O27CcP+72WPn/k1zkgJ6WJfnIbk4jTsVAZHw==",
+ "dev": true
+ },
+ "@babel/template": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.10.4.tgz",
+ "integrity": "sha512-ZCjD27cGJFUB6nmCB1Enki3r+L5kJveX9pq1SvAUKoICy6CZ9yD8xO086YXdYhvNjBdnekm4ZnaP5yC8Cs/1tA==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/parser": "^7.10.4",
+ "@babel/types": "^7.10.4"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.11.0.tgz",
+ "integrity": "sha512-ZB2V+LskoWKNpMq6E5UUCrjtDUh5IOTAyIl0dTjIEoXum/iKWkoIEKIRDnUucO6f+2FzNkE0oD4RLKoPIufDtg==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.11.0",
+ "@babel/helper-function-name": "^7.10.4",
+ "@babel/helper-split-export-declaration": "^7.11.0",
+ "@babel/parser": "^7.11.0",
+ "@babel/types": "^7.11.0",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0",
+ "lodash": "^4.17.19"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/types": {
+ "version": "7.11.0",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.11.0.tgz",
+ "integrity": "sha512-O53yME4ZZI0jO1EVGtF1ePGl0LHirG4P1ibcD80XyzZcKhcMFeCXmh4Xb1ifGBIV233Qg12x4rBfQgA+tmOukA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.10.4",
+ "lodash": "^4.17.19",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "@istanbuljs/schema": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.2.tgz",
+ "integrity": "sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw==",
+ "dev": true
+ },
+ "@mrmlnc/readdir-enhanced": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
+ "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==",
+ "dev": true,
+ "requires": {
+ "call-me-maybe": "^1.0.1",
+ "glob-to-regexp": "^0.3.0"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz",
+ "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==",
+ "dev": true
+ },
+ "@sindresorhus/is": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz",
+ "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==",
+ "dev": true
+ },
+ "@szmarczak/http-timer": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz",
+ "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==",
+ "dev": true,
+ "requires": {
+ "defer-to-connect": "^1.0.1"
+ }
+ },
+ "@types/component-emitter": {
+ "version": "1.2.10",
+ "resolved": "https://registry.npmjs.org/@types/component-emitter/-/component-emitter-1.2.10.tgz",
+ "integrity": "sha512-bsjleuRKWmGqajMerkzox19aGbscQX5rmmvvXl3wlIp5gMG1HgkiwPxsN5p070fBDKTNSPgojVbuY1+HWMbFhg==",
+ "dev": true
+ },
+ "@types/cookie": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.0.tgz",
+ "integrity": "sha512-y7mImlc/rNkvCRmg8gC3/lj87S7pTUIJ6QGjwHR9WQJcFs+ZMTOaoPrkdFA/YdbuqVEmEbb5RdhVxMkAcgOnpg==",
+ "dev": true
+ },
+ "@types/cors": {
+ "version": "2.8.9",
+ "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.9.tgz",
+ "integrity": "sha512-zurD1ibz21BRlAOIKP8yhrxlqKx6L9VCwkB5kMiP6nZAhoF5MvC7qS1qPA7nRcr1GJolfkQC7/EAL4hdYejLtg==",
+ "dev": true
+ },
+ "@types/node": {
+ "version": "14.14.22",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.22.tgz",
+ "integrity": "sha512-g+f/qj/cNcqKkc3tFqlXOYjrmZA+jNBiDzbP3kH+B+otKFqAdPgVTGP1IeKRdMml/aE69as5S4FqtxAbl+LaMw==",
+ "dev": true
+ },
+ "@types/unist": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.3.tgz",
+ "integrity": "sha512-FvUupuM3rlRsRtCN+fDudtmytGO6iHJuuRKS1Ss0pG5z8oX0diNEw94UEL7hgDbpN94rgaK5R7sWm6RrSkZuAQ==",
+ "dev": true
+ },
+ "abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
+ "dev": true
+ },
+ "accepts": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
+ "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
+ "dev": true,
+ "requires": {
+ "mime-types": "~2.1.24",
+ "negotiator": "0.6.2"
+ }
+ },
+ "adverb-where": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/adverb-where/-/adverb-where-0.2.1.tgz",
+ "integrity": "sha512-IAveFBziMRMNPKFdWRcdIKaJvJG1cAfU9/tf9MzqQ84Dh4QjD9eqwnt4hNSt9cbrcEJD74BMIOaRVgVDEU7MwQ==",
+ "dev": true
+ },
+ "aggregate-error": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.0.1.tgz",
+ "integrity": "sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA==",
+ "dev": true,
+ "requires": {
+ "clean-stack": "^2.0.0",
+ "indent-string": "^4.0.0"
+ }
+ },
+ "ansi-regex": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
+ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==",
+ "dev": true
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "anymatch": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz",
+ "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==",
+ "dev": true,
+ "requires": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ }
+ },
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ },
+ "dependencies": {
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+ "dev": true
+ }
+ }
+ },
+ "arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+ "dev": true
+ },
+ "arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+ "dev": true
+ },
+ "arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+ "dev": true
+ },
+ "array-differ": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/array-differ/-/array-differ-1.0.0.tgz",
+ "integrity": "sha1-7/UuN1gknTO+QCuLuOVkuytdQDE=",
+ "dev": true
+ },
+ "array-each": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/array-each/-/array-each-1.0.1.tgz",
+ "integrity": "sha1-p5SvDAWrF1KEbudTofIRoFugxE8=",
+ "dev": true
+ },
+ "array-iterate": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/array-iterate/-/array-iterate-1.1.4.tgz",
+ "integrity": "sha512-sNRaPGh9nnmdC8Zf+pT3UqP8rnWj5Hf9wiFGsX3wUQ2yVSIhO2ShFwCoceIPpB41QF6i2OEmrHmCo36xronCVA==",
+ "dev": true
+ },
+ "array-slice": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/array-slice/-/array-slice-1.1.0.tgz",
+ "integrity": "sha512-B1qMD3RBP7O8o0H2KbrXDyB0IccejMF15+87Lvlor12ONPRHP6gTjXMNkt/d3ZuOGbAe66hFmaCfECI24Ufp6w==",
+ "dev": true
+ },
+ "array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+ "dev": true
+ },
+ "assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+ "dev": true
+ },
+ "async": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz",
+ "integrity": "sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=",
+ "dev": true
+ },
+ "atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+ "dev": true
+ },
+ "automated-readability": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/automated-readability/-/automated-readability-1.0.5.tgz",
+ "integrity": "sha512-N6mr0nUS0TB+SLHCrDYzLIdJQ1wklXNhsiKYh6tcrjDMlhjfz6BFGlDvngcpcBvZpko10jVjvF5XziJOxyA9Sg==",
+ "dev": true
+ },
+ "bail": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz",
+ "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==",
+ "dev": true
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+ "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
+ "dev": true
+ },
+ "base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "dev": true,
+ "requires": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "base64-arraybuffer": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.4.tgz",
+ "integrity": "sha1-mBjHngWbE1X5fgQooBfIOOkLqBI=",
+ "dev": true
+ },
+ "base64id": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
+ "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==",
+ "dev": true
+ },
+ "binary-extensions": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz",
+ "integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==",
+ "dev": true
+ },
+ "body-parser": {
+ "version": "1.19.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
+ "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
+ "dev": true,
+ "requires": {
+ "bytes": "3.1.0",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "on-finished": "~2.3.0",
+ "qs": "6.7.0",
+ "raw-body": "2.4.0",
+ "type-is": "~1.6.17"
+ }
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "dev": true,
+ "requires": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "buffer-from": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+ "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==",
+ "dev": true
+ },
+ "bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==",
+ "dev": true
+ },
+ "cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "dev": true,
+ "requires": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ }
+ },
+ "cacheable-request": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz",
+ "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==",
+ "dev": true,
+ "requires": {
+ "clone-response": "^1.0.2",
+ "get-stream": "^5.1.0",
+ "http-cache-semantics": "^4.0.0",
+ "keyv": "^3.0.0",
+ "lowercase-keys": "^2.0.0",
+ "normalize-url": "^4.1.0",
+ "responselike": "^1.0.2"
+ },
+ "dependencies": {
+ "get-stream": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz",
+ "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==",
+ "dev": true,
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "lowercase-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
+ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==",
+ "dev": true
+ }
+ }
+ },
+ "call-me-maybe": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz",
+ "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=",
+ "dev": true
+ },
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ },
+ "ccount": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.0.5.tgz",
+ "integrity": "sha512-MOli1W+nfbPLlKEhInaxhRdp7KVLFxLN5ykwzHgLsLI3H3gs5jjFAK4Eoj3OzzcxCtumDaI8onoVDeQyWaNTkw==",
+ "dev": true
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "character-entities": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz",
+ "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==",
+ "dev": true
+ },
+ "character-entities-html4": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-1.1.4.tgz",
+ "integrity": "sha512-HRcDxZuZqMx3/a+qrzxdBKBPUpxWEq9xw2OPZ3a/174ihfrQKVsFhqtthBInFy1zZ9GgZyFXOatNujm8M+El3g==",
+ "dev": true
+ },
+ "character-entities-legacy": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz",
+ "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==",
+ "dev": true
+ },
+ "character-reference-invalid": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz",
+ "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==",
+ "dev": true
+ },
+ "check-links": {
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/check-links/-/check-links-1.1.8.tgz",
+ "integrity": "sha512-lxt1EeQ1CVkmiZzPfbPufperYK0t7MvhdLs3zlRH9areA6NVT1tcGymAdJONolNWQBdCFU/sek59RpeLmVHCnw==",
+ "dev": true,
+ "requires": {
+ "got": "^9.6.0",
+ "is-relative-url": "^2.0.0",
+ "p-map": "^2.0.0",
+ "p-memoize": "^2.1.0"
+ }
+ },
+ "chokidar": {
+ "version": "3.4.1",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.1.tgz",
+ "integrity": "sha512-TQTJyr2stihpC4Sya9hs2Xh+O2wf+igjL36Y75xx2WdHuiICcn/XJza46Jwt0eT5hVpQOzo3FpY3cj3RVYLX0g==",
+ "dev": true,
+ "requires": {
+ "anymatch": "~3.1.1",
+ "braces": "~3.0.2",
+ "fsevents": "~2.1.2",
+ "glob-parent": "~5.1.0",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.4.0"
+ },
+ "dependencies": {
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dev": true,
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dev": true,
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "glob-parent": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz",
+ "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==",
+ "dev": true,
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ }
+ }
+ },
+ "class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ }
+ }
+ },
+ "clean-stack": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
+ "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==",
+ "dev": true
+ },
+ "cliui": {
+ "version": "7.0.4",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
+ "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==",
+ "dev": true,
+ "requires": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.0",
+ "wrap-ansi": "^7.0.0"
+ }
+ },
+ "clone-response": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz",
+ "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=",
+ "dev": true,
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "co": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/co/-/co-3.1.0.tgz",
+ "integrity": "sha1-TqVOpaCJOBUxheFSEMaNkJK8G3g=",
+ "dev": true
+ },
+ "coffee-script": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/coffee-script/-/coffee-script-1.7.1.tgz",
+ "integrity": "sha1-YplqhheAx15tUGnROCJyO3NAS/w=",
+ "dev": true,
+ "requires": {
+ "mkdirp": "~0.3.5"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.3.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.5.tgz",
+ "integrity": "sha1-3j5fiWHIjHh+4TaN+EmsRBPsqNc=",
+ "dev": true
+ }
+ }
+ },
+ "coffeescript": {
+ "version": "2.5.1",
+ "resolved": "https://registry.npmjs.org/coffeescript/-/coffeescript-2.5.1.tgz",
+ "integrity": "sha512-J2jRPX0eeFh5VKyVnoLrfVFgLZtnnmp96WQSLAS8OrLm2wtQLcnikYKe1gViJKDH7vucjuhHvBKKBP3rKcD1tQ==",
+ "dev": true
+ },
+ "coleman-liau": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/coleman-liau/-/coleman-liau-1.0.5.tgz",
+ "integrity": "sha512-g4N/LvbIoGP7cq9E8QGrUWkHnhm9DXP0g1+axHIQnmQq/MwwPbBx5dGxQ0GbY5+ojibsIo1Rg0XuYkF+JPr7sw==",
+ "dev": true
+ },
+ "collapse-white-space": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz",
+ "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==",
+ "dev": true
+ },
+ "collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "dev": true,
+ "requires": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
+ "dev": true
+ },
+ "colors": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz",
+ "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=",
+ "dev": true
+ },
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "dev": true
+ },
+ "component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==",
+ "dev": true
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+ "dev": true
+ },
+ "concat-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz",
+ "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==",
+ "dev": true,
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "inherits": "^2.0.3",
+ "readable-stream": "^3.0.2",
+ "typedarray": "^0.0.6"
+ }
+ },
+ "connect": {
+ "version": "3.7.0",
+ "resolved": "https://registry.npmjs.org/connect/-/connect-3.7.0.tgz",
+ "integrity": "sha512-ZqRXc+tZukToSNmh5C2iWMSoV3X1YUcPbqEM4DkEG5tNQXrQUZCNVGGv3IuicnkMtPfGf3Xtp8WCXs295iQ1pQ==",
+ "dev": true,
+ "requires": {
+ "debug": "2.6.9",
+ "finalhandler": "1.1.2",
+ "parseurl": "~1.3.3",
+ "utils-merge": "1.0.1"
+ }
+ },
+ "content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+ "dev": true
+ },
+ "convert-source-map": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
+ "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.1"
+ },
+ "dependencies": {
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ }
+ }
+ },
+ "cookie": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.1.tgz",
+ "integrity": "sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA==",
+ "dev": true
+ },
+ "copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+ "dev": true
+ },
+ "cors": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
+ "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
+ "dev": true,
+ "requires": {
+ "object-assign": "^4",
+ "vary": "^1"
+ }
+ },
+ "cuss": {
+ "version": "1.20.0",
+ "resolved": "https://registry.npmjs.org/cuss/-/cuss-1.20.0.tgz",
+ "integrity": "sha512-ca6Z5roeWhHgXeDLn0g3SLrG68Cb9922MvHme7Q/dz4XfwuxcBLalW4RqFUyZOiczzAqKc2XVtR2Kof+sIfinQ==",
+ "dev": true
+ },
+ "custom-event": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/custom-event/-/custom-event-1.0.1.tgz",
+ "integrity": "sha1-XQKkaFCt8bSjF5RqOSj8y1v9BCU=",
+ "dev": true
+ },
+ "dale-chall": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/dale-chall/-/dale-chall-1.0.4.tgz",
+ "integrity": "sha512-3Wp5GrsQVxw+KVXfZK9iA6W0jGnkq5uXXQ51Scofb92cbipkRvUZTaeQBr/6rlxdOezkjUGglV1euWIBOGCTRA==",
+ "dev": true
+ },
+ "dale-chall-formula": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/dale-chall-formula/-/dale-chall-formula-1.0.5.tgz",
+ "integrity": "sha512-p7f37Bc6HkIUWDj/b/9r76qrXU7/yyR0lnp0Vmj0QjAG54KrFJbE8kVuPHdcKaCbwAFT8yNvCALMaajid/Mkfw==",
+ "dev": true
+ },
+ "date-format": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz",
+ "integrity": "sha512-eyTcpKOcamdhWJXj56DpQMo1ylSQpcGtGKXcU0Tb97+K56/CF5amAqqqNj0+KvA0iw2ynxtHWFsPDSClCxe48w==",
+ "dev": true
+ },
+ "dateformat": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.3.tgz",
+ "integrity": "sha512-jyCETtSl3VMZMWeRo7iY1FL19ges1t55hMo5yaam4Jrsm5EPL89UQkoQRyiI+Yf4k8r2ZpdngkV8hr1lIdjb3Q==",
+ "dev": true
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+ "dev": true
+ },
+ "decompress-response": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+ "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=",
+ "dev": true,
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+ "dev": true
+ },
+ "defer-to-connect": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz",
+ "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==",
+ "dev": true
+ },
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "dependencies": {
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+ "dev": true
+ },
+ "detect-file": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz",
+ "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=",
+ "dev": true
+ },
+ "di": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/di/-/di-0.0.1.tgz",
+ "integrity": "sha1-gGZJMmzqp8qjMG112YXqJ0i6kTw=",
+ "dev": true
+ },
+ "dictionary-en-us": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/dictionary-en-us/-/dictionary-en-us-2.2.1.tgz",
+ "integrity": "sha512-Z8mycV0ywTfjbUTi0JZfQHqBZhu4CYFtpf7KluSGpt3xHpFlal2S/hiK50tlPynOtR5K3pG5wCradnz1yxwOHA==",
+ "dev": true
+ },
+ "dns-packet": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.2.1.tgz",
+ "integrity": "sha512-JHj2yJeKOqlxzeuYpN1d56GfhzivAxavNwHj9co3qptECel27B1rLY5PifJAvubsInX5pGLDjAHuCfCUc2Zv/w==",
+ "dev": true,
+ "requires": {
+ "ip": "^1.1.5"
+ }
+ },
+ "dns-socket": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/dns-socket/-/dns-socket-4.2.1.tgz",
+ "integrity": "sha512-fNvDq86lS522+zMbh31X8cQzYQd6xumCNlxsuZF5TKxQThF/e+rJbVM6K8mmlsdcSm6yNjKJQq3Sf38viAJj8g==",
+ "dev": true,
+ "requires": {
+ "dns-packet": "^5.1.2"
+ }
+ },
+ "dom-serialize": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/dom-serialize/-/dom-serialize-2.2.1.tgz",
+ "integrity": "sha1-ViromZ9Evl6jB29UGdzVnrQ6yVs=",
+ "dev": true,
+ "requires": {
+ "custom-event": "~1.0.0",
+ "ent": "~2.2.0",
+ "extend": "^3.0.0",
+ "void-elements": "^2.0.0"
+ }
+ },
+ "duplexer3": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
+ "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=",
+ "dev": true
+ },
+ "e-prime": {
+ "version": "0.10.4",
+ "resolved": "https://registry.npmjs.org/e-prime/-/e-prime-0.10.4.tgz",
+ "integrity": "sha512-tzBmM2mFSnAq5BuxPSyin6qXb3yMe1wufJN7L7ZPcEWS5S+jI2dhKQEoqHVEcSMMXo/j5lcWpX5jzA6wLSmX6w==",
+ "dev": true
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+ "dev": true
+ },
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=",
+ "dev": true
+ },
+ "end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "dev": true,
+ "requires": {
+ "once": "^1.4.0"
+ }
+ },
+ "engine.io": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-4.1.0.tgz",
+ "integrity": "sha512-vW7EAtn0HDQ4MtT5QbmCHF17TaYLONv2/JwdYsq9USPRZVM4zG7WB3k0Nc321z8EuSOlhGokrYlYx4176QhD0A==",
+ "dev": true,
+ "requires": {
+ "accepts": "~1.3.4",
+ "base64id": "2.0.0",
+ "cookie": "~0.4.1",
+ "cors": "~2.8.5",
+ "debug": "~4.3.1",
+ "engine.io-parser": "~4.0.0",
+ "ws": "~7.4.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "engine.io-parser": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-4.0.2.tgz",
+ "integrity": "sha512-sHfEQv6nmtJrq6TKuIz5kyEKH/qSdK56H/A+7DnAuUPWosnIZAS2NHNcPLmyjtY3cGS/MqJdZbUjW97JU72iYg==",
+ "dev": true,
+ "requires": {
+ "base64-arraybuffer": "0.1.4"
+ }
+ },
+ "ent": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz",
+ "integrity": "sha1-6WQhkyWiHQX0RGai9obtbOX13R0=",
+ "dev": true
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dev": true,
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "dev": true
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true
+ },
+ "eventemitter2": {
+ "version": "0.4.14",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz",
+ "integrity": "sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas=",
+ "dev": true
+ },
+ "eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
+ "dev": true
+ },
+ "exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=",
+ "dev": true
+ },
+ "expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "dev": true,
+ "requires": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "expand-tilde": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz",
+ "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=",
+ "dev": true,
+ "requires": {
+ "homedir-polyfill": "^1.0.1"
+ }
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "dev": true
+ },
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "dev": true,
+ "requires": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "fast-glob": {
+ "version": "2.2.7",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz",
+ "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==",
+ "dev": true,
+ "requires": {
+ "@mrmlnc/readdir-enhanced": "^2.2.1",
+ "@nodelib/fs.stat": "^1.1.2",
+ "glob-parent": "^3.1.0",
+ "is-glob": "^4.0.0",
+ "merge2": "^1.2.3",
+ "micromatch": "^3.1.10"
+ },
+ "dependencies": {
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ }
+ }
+ },
+ "fault": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz",
+ "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==",
+ "dev": true,
+ "requires": {
+ "format": "^0.2.0"
+ }
+ },
+ "figures": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+ "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+ "dev": true,
+ "requires": {
+ "escape-string-regexp": "^1.0.5"
+ }
+ },
+ "fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "finalhandler": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
+ "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
+ "dev": true,
+ "requires": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "statuses": "~1.5.0",
+ "unpipe": "~1.0.0"
+ }
+ },
+ "findup-sync": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-0.3.0.tgz",
+ "integrity": "sha1-N5MKpdgWt3fANEXhlmzGeQpMCxY=",
+ "dev": true,
+ "requires": {
+ "glob": "~5.0.0"
+ },
+ "dependencies": {
+ "glob": {
+ "version": "5.0.15",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz",
+ "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=",
+ "dev": true,
+ "requires": {
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "2 || 3",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ }
+ }
+ },
+ "fined": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/fined/-/fined-1.2.0.tgz",
+ "integrity": "sha512-ZYDqPLGxDkDhDZBjZBb+oD1+j0rA4E0pXY50eplAAOPg2N/gUBSSk5IM1/QhPfyVo19lJ+CvXpqfvk+b2p/8Ng==",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.2",
+ "is-plain-object": "^2.0.3",
+ "object.defaults": "^1.1.0",
+ "object.pick": "^1.2.0",
+ "parse-filepath": "^1.0.1"
+ }
+ },
+ "flagged-respawn": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/flagged-respawn/-/flagged-respawn-1.0.1.tgz",
+ "integrity": "sha512-lNaHNVymajmk0OJMBn8fVUAU1BtDeKIqKoVhk4xAALB57aALg6b4W0MfJ/cUE0g9YBXy5XhSlPIpYIJ7HaY/3Q==",
+ "dev": true
+ },
+ "flatted": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz",
+ "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==",
+ "dev": true
+ },
+ "flesch": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/flesch/-/flesch-1.0.5.tgz",
+ "integrity": "sha512-SE5X7jm4tp7sbKagLB0V9i0SrjWsFovus7db3E1nCyquy5249+Fyh+bBIK2crUuzX4maXn3Tu5bcMw8nF5oU8Q==",
+ "dev": true
+ },
+ "fn-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/fn-name/-/fn-name-2.0.1.tgz",
+ "integrity": "sha1-UhTXU3pNBqSjAcDMJi/rhBiAAuc=",
+ "dev": true
+ },
+ "follow-redirects": {
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.2.tgz",
+ "integrity": "sha512-6mPTgLxYm3r6Bkkg0vNM0HTjfGrOEtsfbhagQvbxDEsEkpNhw582upBaoRZylzen6krEmxXJgt9Ju6HiI4O7BA==",
+ "dev": true
+ },
+ "for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+ "dev": true
+ },
+ "for-own": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/for-own/-/for-own-1.0.0.tgz",
+ "integrity": "sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs=",
+ "dev": true,
+ "requires": {
+ "for-in": "^1.0.1"
+ }
+ },
+ "format": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz",
+ "integrity": "sha1-1hcBB+nv3E7TDJ3DkBbflCtctYs=",
+ "dev": true
+ },
+ "fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "dev": true,
+ "requires": {
+ "map-cache": "^0.2.2"
+ }
+ },
+ "fs-extra": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
+ "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ },
+ "dependencies": {
+ "graceful-fs": {
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz",
+ "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==",
+ "dev": true
+ }
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+ "dev": true
+ },
+ "fsevents": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz",
+ "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==",
+ "dev": true,
+ "optional": true
+ },
+ "gaze": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz",
+ "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==",
+ "dev": true,
+ "requires": {
+ "globule": "^1.0.0"
+ }
+ },
+ "gensync": {
+ "version": "1.0.0-beta.1",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.1.tgz",
+ "integrity": "sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg==",
+ "dev": true
+ },
+ "get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true
+ },
+ "get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "dev": true,
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+ "dev": true
+ },
+ "getobject": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/getobject/-/getobject-0.1.0.tgz",
+ "integrity": "sha1-BHpEl4n6Fg0Bj1SG7ZEyC27HiFw=",
+ "dev": true
+ },
+ "github-slugger": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.3.0.tgz",
+ "integrity": "sha512-gwJScWVNhFYSRDvURk/8yhcFBee6aFjye2a7Lhb2bUyRulpIoek9p0I9Kt7PT67d/nUlZbFu8L9RLiA0woQN8Q==",
+ "dev": true,
+ "requires": {
+ "emoji-regex": ">=6.0.0 <=6.1.1"
+ },
+ "dependencies": {
+ "emoji-regex": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-6.1.1.tgz",
+ "integrity": "sha1-xs0OwbBkLio8Z6ETfvxeeW2k+I4=",
+ "dev": true
+ }
+ }
+ },
+ "glob": {
+ "version": "7.1.6",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+ "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "dev": true,
+ "requires": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ }
+ },
+ "glob-to-regexp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+ "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=",
+ "dev": true
+ },
+ "global-modules": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz",
+ "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==",
+ "dev": true,
+ "requires": {
+ "global-prefix": "^1.0.1",
+ "is-windows": "^1.0.1",
+ "resolve-dir": "^1.0.0"
+ }
+ },
+ "global-prefix": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz",
+ "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.2",
+ "homedir-polyfill": "^1.0.1",
+ "ini": "^1.3.4",
+ "is-windows": "^1.0.1",
+ "which": "^1.2.14"
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true
+ },
+ "globule": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/globule/-/globule-1.3.2.tgz",
+ "integrity": "sha512-7IDTQTIu2xzXkT+6mlluidnWo+BypnbSoEVVQCGfzqnl5Ik8d3e1d4wycb8Rj9tWW+Z39uPWsdlquqiqPCd/pA==",
+ "dev": true,
+ "requires": {
+ "glob": "~7.1.1",
+ "lodash": "~4.17.10",
+ "minimatch": "~3.0.2"
+ }
+ },
+ "got": {
+ "version": "9.6.0",
+ "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz",
+ "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==",
+ "dev": true,
+ "requires": {
+ "@sindresorhus/is": "^0.14.0",
+ "@szmarczak/http-timer": "^1.1.2",
+ "cacheable-request": "^6.0.0",
+ "decompress-response": "^3.3.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^4.1.0",
+ "lowercase-keys": "^1.0.1",
+ "mimic-response": "^1.0.1",
+ "p-cancelable": "^1.0.0",
+ "to-readable-stream": "^1.0.0",
+ "url-parse-lax": "^3.0.0"
+ }
+ },
+ "graceful-fs": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-1.2.3.tgz",
+ "integrity": "sha1-FaSAaldUfLLS2/J/QuiajDRRs2Q=",
+ "dev": true
+ },
+ "growl": {
+ "version": "1.10.5",
+ "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz",
+ "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
+ "dev": true
+ },
+ "grunt": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/grunt/-/grunt-1.2.1.tgz",
+ "integrity": "sha512-zgJjn9N56tScvRt/y0+1QA+zDBnKTrkpyeSBqQPLcZvbqTD/oyGMrdZQXmm6I3828s+FmPvxc3Xv+lgKFtudOw==",
+ "dev": true,
+ "requires": {
+ "dateformat": "~3.0.3",
+ "eventemitter2": "~0.4.13",
+ "exit": "~0.1.2",
+ "findup-sync": "~0.3.0",
+ "glob": "~7.1.6",
+ "grunt-cli": "~1.3.2",
+ "grunt-known-options": "~1.1.0",
+ "grunt-legacy-log": "~2.0.0",
+ "grunt-legacy-util": "~1.1.1",
+ "iconv-lite": "~0.4.13",
+ "js-yaml": "~3.14.0",
+ "minimatch": "~3.0.4",
+ "mkdirp": "~1.0.4",
+ "nopt": "~3.0.6",
+ "rimraf": "~3.0.2"
+ },
+ "dependencies": {
+ "grunt-cli": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/grunt-cli/-/grunt-cli-1.3.2.tgz",
+ "integrity": "sha512-8OHDiZZkcptxVXtMfDxJvmN7MVJNE8L/yIcPb4HB7TlyFD1kDvjHrb62uhySsU14wJx9ORMnTuhRMQ40lH/orQ==",
+ "dev": true,
+ "requires": {
+ "grunt-known-options": "~1.1.0",
+ "interpret": "~1.1.0",
+ "liftoff": "~2.5.0",
+ "nopt": "~4.0.1",
+ "v8flags": "~3.1.1"
+ },
+ "dependencies": {
+ "nopt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
+ "integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
+ "dev": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ }
+ }
+ },
+ "mkdirp": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
+ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
+ "dev": true
+ }
+ }
+ },
+ "grunt-exec": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-exec/-/grunt-exec-3.0.0.tgz",
+ "integrity": "sha512-cgAlreXf3muSYS5LzW0Cc4xHK03BjFOYk0MqCQ/MZ3k1Xz2GU7D+IAJg4UKicxpO+XdONJdx/NJ6kpy2wI+uHg==",
+ "dev": true
+ },
+ "grunt-known-options": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/grunt-known-options/-/grunt-known-options-1.1.1.tgz",
+ "integrity": "sha512-cHwsLqoighpu7TuYj5RonnEuxGVFnztcUqTqp5rXFGYL4OuPFofwC4Ycg7n9fYwvK6F5WbYgeVOwph9Crs2fsQ==",
+ "dev": true
+ },
+ "grunt-legacy-log": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log/-/grunt-legacy-log-2.0.0.tgz",
+ "integrity": "sha512-1m3+5QvDYfR1ltr8hjiaiNjddxGdQWcH0rw1iKKiQnF0+xtgTazirSTGu68RchPyh1OBng1bBUjLmX8q9NpoCw==",
+ "dev": true,
+ "requires": {
+ "colors": "~1.1.2",
+ "grunt-legacy-log-utils": "~2.0.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.5"
+ }
+ },
+ "grunt-legacy-log-utils": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log-utils/-/grunt-legacy-log-utils-2.0.1.tgz",
+ "integrity": "sha512-o7uHyO/J+i2tXG8r2bZNlVk20vlIFJ9IEYyHMCQGfWYru8Jv3wTqKZzvV30YW9rWEjq0eP3cflQ1qWojIe9VFA==",
+ "dev": true,
+ "requires": {
+ "chalk": "~2.4.1",
+ "lodash": "~4.17.10"
+ }
+ },
+ "grunt-legacy-util": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-util/-/grunt-legacy-util-1.1.1.tgz",
+ "integrity": "sha512-9zyA29w/fBe6BIfjGENndwoe1Uy31BIXxTH3s8mga0Z5Bz2Sp4UCjkeyv2tI449ymkx3x26B+46FV4fXEddl5A==",
+ "dev": true,
+ "requires": {
+ "async": "~1.5.2",
+ "exit": "~0.1.1",
+ "getobject": "~0.1.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.10",
+ "underscore.string": "~3.3.4",
+ "which": "~1.3.0"
+ }
+ },
+ "gunning-fog": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/gunning-fog/-/gunning-fog-1.0.6.tgz",
+ "integrity": "sha512-yXkDi7mbWTPiITTztwhwXFMhXKnMviX/7kprz92BroMJbB/AgDATrHCRCtc87Ox024pQy2kMCihsm7tPonvV6A==",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+ "dev": true
+ },
+ "has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "homedir-polyfill": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz",
+ "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==",
+ "dev": true,
+ "requires": {
+ "parse-passwd": "^1.0.0"
+ }
+ },
+ "hooker": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/hooker/-/hooker-0.2.3.tgz",
+ "integrity": "sha1-uDT3I8xKJCqmWWNFnfbZhMXT2Vk=",
+ "dev": true
+ },
+ "hosted-git-info": {
+ "version": "2.8.8",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz",
+ "integrity": "sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==",
+ "dev": true
+ },
+ "html-escaper": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
+ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
+ "dev": true
+ },
+ "http-cache-semantics": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz",
+ "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==",
+ "dev": true
+ },
+ "http-errors": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
+ "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.1",
+ "statuses": ">= 1.5.0 < 2",
+ "toidentifier": "1.0.0"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+ "dev": true
+ }
+ }
+ },
+ "http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "dev": true,
+ "requires": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dev": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "ignore": {
+ "version": "5.1.8",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz",
+ "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==",
+ "dev": true
+ },
+ "indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
+ "dev": true
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "dev": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true
+ },
+ "ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
+ "dev": true
+ },
+ "interpret": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.1.0.tgz",
+ "integrity": "sha1-ftGxQQxqDg94z5XTuEQMY/eLhhQ=",
+ "dev": true
+ },
+ "ip": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz",
+ "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=",
+ "dev": true
+ },
+ "ip-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.1.0.tgz",
+ "integrity": "sha512-pKnZpbgCTfH/1NLIlOduP/V+WRXzC2MOz3Qo8xmxk8C5GudJLgK5QyLVXOSWy3ParAH7Eemurl3xjv/WXYFvMA==",
+ "dev": true
+ },
+ "irregular-plurals": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/irregular-plurals/-/irregular-plurals-2.0.0.tgz",
+ "integrity": "sha512-Y75zBYLkh0lJ9qxeHlMjQ7bSbyiSqNW/UOPWDmzC7cXskL1hekSITh1Oc6JV0XCWWZ9DE8VYSB71xocLk3gmGw==",
+ "dev": true
+ },
+ "is-absolute": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz",
+ "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==",
+ "dev": true,
+ "requires": {
+ "is-relative": "^1.0.0",
+ "is-windows": "^1.0.1"
+ }
+ },
+ "is-absolute-url": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz",
+ "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=",
+ "dev": true
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-alphabetical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz",
+ "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==",
+ "dev": true
+ },
+ "is-alphanumeric": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-alphanumeric/-/is-alphanumeric-1.0.0.tgz",
+ "integrity": "sha1-Spzvcdr0wAHB2B1j0UDPU/1oifQ=",
+ "dev": true
+ },
+ "is-alphanumerical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz",
+ "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==",
+ "dev": true,
+ "requires": {
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0"
+ }
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=",
+ "dev": true
+ },
+ "is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dev": true,
+ "requires": {
+ "binary-extensions": "^2.0.0"
+ }
+ },
+ "is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
+ "dev": true
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-decimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz",
+ "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==",
+ "dev": true
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "dev": true
+ }
+ }
+ },
+ "is-docker": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.0.0.tgz",
+ "integrity": "sha512-pJEdRugimx4fBMra5z2/5iRdZ63OhYV0vr0Dwm5+xtW4D1FvRkB8hamMIhnWfyJeDdyr/aa7BDyNbtG38VxgoQ==",
+ "dev": true
+ },
+ "is-empty": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/is-empty/-/is-empty-1.2.0.tgz",
+ "integrity": "sha1-3pu1snhzigWgsJpX4ftNSjQan2s=",
+ "dev": true
+ },
+ "is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+ "dev": true
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true
+ },
+ "is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.0"
+ }
+ },
+ "is-hexadecimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz",
+ "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==",
+ "dev": true
+ },
+ "is-hidden": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/is-hidden/-/is-hidden-1.1.3.tgz",
+ "integrity": "sha512-FFzhGKA9h59OFxeaJl0W5ILTYetI8WsdqdofKr69uLKZdV6hbDKxj8vkpG3L9uS/6Q/XYh1tkXm6xwRGFweETA==",
+ "dev": true
+ },
+ "is-ip": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-ip/-/is-ip-3.1.0.tgz",
+ "integrity": "sha512-35vd5necO7IitFPjd/YBeqwWnyDWbuLH9ZXQdMfDA8TEo7pv5X8yfrvVO3xbJbLUlERCMvf6X0hTUamQxCYJ9Q==",
+ "dev": true,
+ "requires": {
+ "ip-regex": "^4.0.0"
+ }
+ },
+ "is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-object": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz",
+ "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=",
+ "dev": true
+ },
+ "is-online": {
+ "version": "8.4.0",
+ "resolved": "https://registry.npmjs.org/is-online/-/is-online-8.4.0.tgz",
+ "integrity": "sha512-i0qGRbtUaQEU5Z7O3LmOnH3yorhG1lnygqY2cv3InlQKKm3nx6XiGXZk49lATR3N7hyxoiuHMR0pKwRuB+s5lg==",
+ "dev": true,
+ "requires": {
+ "got": "^9.6.0",
+ "p-any": "^2.0.0",
+ "p-timeout": "^3.0.0",
+ "public-ip": "^4.0.1"
+ }
+ },
+ "is-plain-obj": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
+ "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
+ "dev": true
+ },
+ "is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "is-relative": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz",
+ "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==",
+ "dev": true,
+ "requires": {
+ "is-unc-path": "^1.0.0"
+ }
+ },
+ "is-relative-url": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative-url/-/is-relative-url-2.0.0.tgz",
+ "integrity": "sha1-cpAtf+BLPUeS59sV+duEtyBMnO8=",
+ "dev": true,
+ "requires": {
+ "is-absolute-url": "^2.0.0"
+ }
+ },
+ "is-unc-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz",
+ "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==",
+ "dev": true,
+ "requires": {
+ "unc-path-regex": "^0.1.2"
+ }
+ },
+ "is-whitespace-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz",
+ "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==",
+ "dev": true
+ },
+ "is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "dev": true
+ },
+ "is-word-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz",
+ "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==",
+ "dev": true
+ },
+ "is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "dev": true,
+ "requires": {
+ "is-docker": "^2.0.0"
+ }
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+ "dev": true
+ },
+ "isbinaryfile": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.6.tgz",
+ "integrity": "sha512-ORrEy+SNVqUhrCaal4hA4fBzhggQQ+BaLntyPOdoEiwlKZW9BZiJXjg3RMiruE4tPEI3pyVPpySHQF/dKWperg==",
+ "dev": true
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
+ "dev": true
+ },
+ "isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+ "dev": true
+ },
+ "istanbul-lib-coverage": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz",
+ "integrity": "sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg==",
+ "dev": true
+ },
+ "istanbul-lib-instrument": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz",
+ "integrity": "sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==",
+ "dev": true,
+ "requires": {
+ "@babel/core": "^7.7.5",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-coverage": "^3.0.0",
+ "semver": "^6.3.0"
+ }
+ },
+ "istanbul-lib-report": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
+ "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==",
+ "dev": true,
+ "requires": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "make-dir": "^3.0.0",
+ "supports-color": "^7.1.0"
+ },
+ "dependencies": {
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz",
+ "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "istanbul-lib-source-maps": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz",
+ "integrity": "sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg==",
+ "dev": true,
+ "requires": {
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^3.0.0",
+ "source-map": "^0.6.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true
+ }
+ }
+ },
+ "istanbul-reports": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.0.2.tgz",
+ "integrity": "sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw==",
+ "dev": true,
+ "requires": {
+ "html-escaper": "^2.0.0",
+ "istanbul-lib-report": "^3.0.0"
+ }
+ },
+ "jasmine": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/jasmine/-/jasmine-3.6.1.tgz",
+ "integrity": "sha512-Jqp8P6ZWkTVFGmJwBK46p+kJNrZCdqkQ4GL+PGuBXZwK1fM4ST9BizkYgIwCFqYYqnTizAy6+XG2Ej5dFrej9Q==",
+ "dev": true,
+ "requires": {
+ "fast-glob": "^2.2.6",
+ "jasmine-core": "~3.6.0"
+ }
+ },
+ "jasmine-core": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.6.0.tgz",
+ "integrity": "sha512-8uQYa7zJN8hq9z+g8z1bqCfdC8eoDAeVnM5sfqs7KHv9/ifoJ500m018fpFc7RDaO6SWCLCXwo/wPSNcdYTgcw==",
+ "dev": true
+ },
+ "jasmine-growl-reporter": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/jasmine-growl-reporter/-/jasmine-growl-reporter-2.0.0.tgz",
+ "integrity": "sha512-RYwVfPaGgxQQSHDOt6jQ99/KAkFQ/Fiwg/AzBS+uO9A4UhGhxb7hwXaUUSU/Zs0MxBoFNqmIRC+7P4/+5O3lXg==",
+ "dev": true,
+ "requires": {
+ "growl": "^1.10.5"
+ }
+ },
+ "jasmine-node": {
+ "version": "github:BrainDoctor/jasmine-node#eac79306acaf016256cdd5824003d99db053f722",
+ "from": "github:BrainDoctor/jasmine-node",
+ "dev": true,
+ "requires": {
+ "coffee-script": "~1.7.1",
+ "gaze": "~0.5.1",
+ "grunt-exec": "~0.4.5",
+ "jasmine-growl-reporter": "~0.2.0",
+ "jasmine-reporters": "~2.0.6",
+ "minimist": "~0.0.8",
+ "mkdirp": "~0.3.5",
+ "underscore": "~1.6.0",
+ "walkdir": "~0.0.7",
+ "xml2js": "~0.4.1"
+ },
+ "dependencies": {
+ "gaze": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/gaze/-/gaze-0.5.2.tgz",
+ "integrity": "sha1-QLcJU30k0dRXZ9takIaJ3+aaxE8=",
+ "dev": true,
+ "requires": {
+ "globule": "~0.1.0"
+ }
+ },
+ "glob": {
+ "version": "3.1.21",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-3.1.21.tgz",
+ "integrity": "sha1-0p4KBV3qUTj00H7UDomC6DwgZs0=",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "~1.2.0",
+ "inherits": "1",
+ "minimatch": "~0.2.11"
+ }
+ },
+ "globule": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/globule/-/globule-0.1.0.tgz",
+ "integrity": "sha1-2cjt3h2nnRJaFRt5UzuXhnY0auU=",
+ "dev": true,
+ "requires": {
+ "glob": "~3.1.21",
+ "lodash": "~1.0.1",
+ "minimatch": "~0.2.11"
+ }
+ },
+ "growl": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/growl/-/growl-1.7.0.tgz",
+ "integrity": "sha1-3i1mE20ALhErpw8/EMMc98NQsto=",
+ "dev": true
+ },
+ "grunt-exec": {
+ "version": "0.4.7",
+ "resolved": "https://registry.npmjs.org/grunt-exec/-/grunt-exec-0.4.7.tgz",
+ "integrity": "sha1-QAUf+k6wyWV+BTuV6I1ENSocLCU=",
+ "dev": true
+ },
+ "inherits": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-1.0.2.tgz",
+ "integrity": "sha1-ykMJ2t7mtUzAuNJH6NfHoJdb3Js=",
+ "dev": true
+ },
+ "jasmine-growl-reporter": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/jasmine-growl-reporter/-/jasmine-growl-reporter-0.2.1.tgz",
+ "integrity": "sha1-1fCje5L2qD/VxkgrgJSVyQqLVf4=",
+ "dev": true,
+ "requires": {
+ "growl": "~1.7.0"
+ }
+ },
+ "jasmine-reporters": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/jasmine-reporters/-/jasmine-reporters-2.0.8.tgz",
+ "integrity": "sha1-XLMn1yRFAqGHzuxml+WmJvgMNbo=",
+ "dev": true,
+ "requires": {
+ "mkdirp": "~0.3.5"
+ }
+ },
+ "lodash": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-1.0.2.tgz",
+ "integrity": "sha1-j1dWDIO1n8JwvT1WG2kAQ0MOJVE=",
+ "dev": true
+ },
+ "minimatch": {
+ "version": "0.2.14",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-0.2.14.tgz",
+ "integrity": "sha1-x054BXT2PG+aCQ6Q775u9TpqdWo=",
+ "dev": true,
+ "requires": {
+ "lru-cache": "2",
+ "sigmund": "~1.0.0"
+ }
+ },
+ "minimist": {
+ "version": "0.0.10",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz",
+ "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=",
+ "dev": true
+ },
+ "mkdirp": {
+ "version": "0.3.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.5.tgz",
+ "integrity": "sha1-3j5fiWHIjHh+4TaN+EmsRBPsqNc=",
+ "dev": true
+ },
+ "underscore": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.6.0.tgz",
+ "integrity": "sha1-izixDKze9jM3uLJOT/htRa6lKag=",
+ "dev": true
+ },
+ "walkdir": {
+ "version": "0.0.12",
+ "resolved": "https://registry.npmjs.org/walkdir/-/walkdir-0.0.12.tgz",
+ "integrity": "sha512-HFhaD4mMWPzFSqhpyDG48KDdrjfn409YQuVW7ckZYhW4sE87mYtWifdB/+73RA7+p4s4K18n5Jfx1kHthE1gBw==",
+ "dev": true
+ }
+ }
+ },
+ "jasmine-reporters": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/jasmine-reporters/-/jasmine-reporters-2.3.2.tgz",
+ "integrity": "sha512-u/7AT9SkuZsUfFBLLzbErohTGNsEUCKaQbsVYnLFW1gEuL2DzmBL4n8v90uZsqIqlWvWUgian8J6yOt5Fyk/+A==",
+ "dev": true,
+ "requires": {
+ "mkdirp": "^0.5.1",
+ "xmldom": "^0.1.22"
+ }
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "3.14.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz",
+ "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==",
+ "dev": true,
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true
+ },
+ "json-buffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+ "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=",
+ "dev": true
+ },
+ "json-parse-better-errors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
+ "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==",
+ "dev": true
+ },
+ "json5": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.3.tgz",
+ "integrity": "sha512-KXPvOm8K9IJKFM0bmdn8QXh7udDh1g/giieX0NLCaMnb4hEiVFqnop2ImTXCc5e0/oHz3LTqmHGtExn5hfMkOA==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.6"
+ },
+ "dependencies": {
+ "graceful-fs": {
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz",
+ "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==",
+ "dev": true,
+ "optional": true
+ }
+ }
+ },
+ "karma": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/karma/-/karma-6.0.2.tgz",
+ "integrity": "sha512-GBm8vPkQvjJL1ye//iH6yk9ROHOW+1TaFzRvbc6TbbCOFszbEG66dyDMoFjMMTAHy0ERNqRrMHap/LEHWeijNA==",
+ "dev": true,
+ "requires": {
+ "body-parser": "^1.19.0",
+ "braces": "^3.0.2",
+ "chokidar": "^3.4.2",
+ "colors": "^1.4.0",
+ "connect": "^3.7.0",
+ "di": "^0.0.1",
+ "dom-serialize": "^2.2.1",
+ "glob": "^7.1.6",
+ "graceful-fs": "^4.2.4",
+ "http-proxy": "^1.18.1",
+ "isbinaryfile": "^4.0.6",
+ "lodash": "^4.17.19",
+ "log4js": "^6.2.1",
+ "mime": "^2.4.5",
+ "minimatch": "^3.0.4",
+ "qjobs": "^1.2.0",
+ "range-parser": "^1.2.1",
+ "rimraf": "^3.0.2",
+ "socket.io": "^3.0.4",
+ "source-map": "^0.6.1",
+ "tmp": "0.2.1",
+ "ua-parser-js": "^0.7.23",
+ "yargs": "^16.1.1"
+ },
+ "dependencies": {
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dev": true,
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "chokidar": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz",
+ "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==",
+ "dev": true,
+ "requires": {
+ "anymatch": "~3.1.1",
+ "braces": "~3.0.2",
+ "fsevents": "~2.3.1",
+ "glob-parent": "~5.1.0",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.5.0"
+ }
+ },
+ "colors": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz",
+ "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==",
+ "dev": true
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dev": true,
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "fsevents": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.1.tgz",
+ "integrity": "sha512-YR47Eg4hChJGAB1O3yEAOkGO+rlzutoICGqGo9EZ4lKWokzZRSyIW1QmTzqjtw8MJdj9srP869CuWw/hyzSiBw==",
+ "dev": true,
+ "optional": true
+ },
+ "glob-parent": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz",
+ "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==",
+ "dev": true,
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz",
+ "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==",
+ "dev": true
+ },
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true
+ },
+ "readdirp": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
+ "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
+ "dev": true,
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ }
+ }
+ },
+ "karma-chrome-launcher": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/karma-chrome-launcher/-/karma-chrome-launcher-3.1.0.tgz",
+ "integrity": "sha512-3dPs/n7vgz1rxxtynpzZTvb9y/GIaW8xjAwcIGttLbycqoFtI7yo1NGnQi6oFTherRE+GIhCAHZC4vEqWGhNvg==",
+ "dev": true,
+ "requires": {
+ "which": "^1.2.1"
+ }
+ },
+ "karma-coverage": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/karma-coverage/-/karma-coverage-2.0.3.tgz",
+ "integrity": "sha512-atDvLQqvPcLxhED0cmXYdsPMCQuh6Asa9FMZW1bhNqlVEhJoB9qyZ2BY1gu7D/rr5GLGb5QzYO4siQskxaWP/g==",
+ "dev": true,
+ "requires": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "istanbul-lib-instrument": "^4.0.1",
+ "istanbul-lib-report": "^3.0.0",
+ "istanbul-lib-source-maps": "^4.0.0",
+ "istanbul-reports": "^3.0.0",
+ "minimatch": "^3.0.4"
+ }
+ },
+ "karma-firefox-launcher": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/karma-firefox-launcher/-/karma-firefox-launcher-1.3.0.tgz",
+ "integrity": "sha512-Fi7xPhwrRgr+94BnHX0F5dCl1miIW4RHnzjIGxF8GaIEp7rNqX7LSi7ok63VXs3PS/5MQaQMhGxw+bvD+pibBQ==",
+ "dev": true,
+ "requires": {
+ "is-wsl": "^2.1.0"
+ }
+ },
+ "karma-jasmine": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/karma-jasmine/-/karma-jasmine-2.0.1.tgz",
+ "integrity": "sha512-iuC0hmr9b+SNn1DaUD2QEYtUxkS1J+bSJSn7ejdEexs7P8EYvA1CWkEdrDQ+8jVH3AgWlCNwjYsT1chjcNW9lA==",
+ "dev": true,
+ "requires": {
+ "jasmine-core": "^3.3"
+ }
+ },
+ "keyv": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz",
+ "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==",
+ "dev": true,
+ "requires": {
+ "json-buffer": "3.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ },
+ "levenshtein-edit-distance": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/levenshtein-edit-distance/-/levenshtein-edit-distance-1.0.0.tgz",
+ "integrity": "sha1-iVuvR4zOi1waDSfkXXwdl4pmHkk=",
+ "dev": true
+ },
+ "liftoff": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/liftoff/-/liftoff-2.5.0.tgz",
+ "integrity": "sha1-IAkpG7Mc6oYbvxCnwVooyvdcMew=",
+ "dev": true,
+ "requires": {
+ "extend": "^3.0.0",
+ "findup-sync": "^2.0.0",
+ "fined": "^1.0.1",
+ "flagged-respawn": "^1.0.0",
+ "is-plain-object": "^2.0.4",
+ "object.map": "^1.0.0",
+ "rechoir": "^0.6.2",
+ "resolve": "^1.1.7"
+ },
+ "dependencies": {
+ "findup-sync": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-2.0.0.tgz",
+ "integrity": "sha1-kyaxSIwi0aYIhlCoaQGy2akKLLw=",
+ "dev": true,
+ "requires": {
+ "detect-file": "^1.0.0",
+ "is-glob": "^3.1.0",
+ "micromatch": "^3.0.4",
+ "resolve-dir": "^1.0.1"
+ }
+ }
+ }
+ },
+ "load-plugin": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/load-plugin/-/load-plugin-2.3.1.tgz",
+ "integrity": "sha512-dYB1lbwqHgPTrruy9glukCu8Ya9vzj6TMfouCtj2H/GuJ+8syioisgKTBPxnCi6m8K8jINKfTOxOHngFkUYqHw==",
+ "dev": true,
+ "requires": {
+ "npm-prefix": "^1.2.0",
+ "resolve-from": "^5.0.0"
+ }
+ },
+ "lodash": {
+ "version": "4.17.19",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz",
+ "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==",
+ "dev": true
+ },
+ "lodash.difference": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.difference/-/lodash.difference-4.5.0.tgz",
+ "integrity": "sha1-nMtOUF1Ia5FlE0V3KIWi3yf9AXw=",
+ "dev": true
+ },
+ "lodash.includes": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz",
+ "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=",
+ "dev": true
+ },
+ "lodash.intersection": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/lodash.intersection/-/lodash.intersection-4.4.0.tgz",
+ "integrity": "sha1-ChG6Yx0OlcI8fy9Mu5ppLtF45wU=",
+ "dev": true
+ },
+ "log4js": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.3.0.tgz",
+ "integrity": "sha512-Mc8jNuSFImQUIateBFwdOQcmC6Q5maU0VVvdC2R6XMb66/VnT+7WS4D/0EeNMZu1YODmJe5NIn2XftCzEocUgw==",
+ "dev": true,
+ "requires": {
+ "date-format": "^3.0.0",
+ "debug": "^4.1.1",
+ "flatted": "^2.0.1",
+ "rfdc": "^1.1.4",
+ "streamroller": "^2.2.4"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "longest-streak": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-2.0.4.tgz",
+ "integrity": "sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==",
+ "dev": true
+ },
+ "lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==",
+ "dev": true
+ },
+ "lru-cache": {
+ "version": "2.7.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-2.7.3.tgz",
+ "integrity": "sha1-bUUk6LlV+V1PW1iFHOId1y+06VI=",
+ "dev": true
+ },
+ "make-dir": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
+ "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
+ "dev": true,
+ "requires": {
+ "semver": "^6.0.0"
+ }
+ },
+ "make-iterator": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/make-iterator/-/make-iterator-1.0.1.tgz",
+ "integrity": "sha512-pxiuXh0iVEq7VM7KMIhs5gxsfxCux2URptUQaXo4iZZJxBAzTPOLE2BumO5dbfVYq/hBJFBR/a1mFDmOx5AGmw==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.2"
+ }
+ },
+ "map-age-cleaner": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz",
+ "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==",
+ "dev": true,
+ "requires": {
+ "p-defer": "^1.0.0"
+ }
+ },
+ "map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+ "dev": true
+ },
+ "map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "dev": true,
+ "requires": {
+ "object-visit": "^1.0.0"
+ }
+ },
+ "markdown-escapes": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz",
+ "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==",
+ "dev": true
+ },
+ "markdown-extensions": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz",
+ "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==",
+ "dev": true
+ },
+ "markdown-table": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-1.1.3.tgz",
+ "integrity": "sha512-1RUZVgQlpJSPWYbFSpmudq5nHY1doEIv89gBtF0s4gW1GF2XorxcA/70M5vq7rLv0a6mhOUccRsqkwhwLCIQ2Q==",
+ "dev": true
+ },
+ "match-casing": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/match-casing/-/match-casing-1.0.3.tgz",
+ "integrity": "sha512-oMyC3vUVCFbGu+M2Zxl212LPJThcaw7QxB5lFuJPQCgV/dsGBP0yZeCoLmX6CiBkoBcVbAKDJZrBpJVu0XcLMw==",
+ "dev": true
+ },
+ "mdast-comment-marker": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-comment-marker/-/mdast-comment-marker-1.1.2.tgz",
+ "integrity": "sha512-vTFXtmbbF3rgnTh3Zl3irso4LtvwUq/jaDvT2D1JqTGAwaipcS7RpTxzi6KjoRqI9n2yuAhzLDAC8xVTF3XYVQ==",
+ "dev": true
+ },
+ "mdast-util-compact": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/mdast-util-compact/-/mdast-util-compact-1.0.4.tgz",
+ "integrity": "sha512-3YDMQHI5vRiS2uygEFYaqckibpJtKq5Sj2c8JioeOQBU6INpKbdWzfyLqFFnDwEcEnRFIdMsguzs5pC1Jp4Isg==",
+ "dev": true,
+ "requires": {
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "mdast-util-heading-style": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/mdast-util-heading-style/-/mdast-util-heading-style-1.0.6.tgz",
+ "integrity": "sha512-8ZuuegRqS0KESgjAGW8zTx4tJ3VNIiIaGFNEzFpRSAQBavVc7AvOo9I4g3crcZBfYisHs4seYh0rAVimO6HyOw==",
+ "dev": true
+ },
+ "mdast-util-to-nlcst": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-nlcst/-/mdast-util-to-nlcst-3.2.3.tgz",
+ "integrity": "sha512-hPIsgEg7zCvdU6/qvjcR6lCmJeRuIEpZGY5xBV+pqzuMOvQajyyF8b6f24f8k3Rw8u40GwkI3aAxUXr3bB2xag==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "repeat-string": "^1.5.2",
+ "unist-util-position": "^3.0.0",
+ "vfile-location": "^2.0.0"
+ }
+ },
+ "mdast-util-to-string": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-1.1.0.tgz",
+ "integrity": "sha512-jVU0Nr2B9X3MU4tSK7JP1CMkSvOj7X5l/GboG1tKRw52lLF1x2Ju92Ms9tNetCcbfX3hzlM73zYo2NKkWSfF/A==",
+ "dev": true
+ },
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
+ "dev": true
+ },
+ "mem": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz",
+ "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==",
+ "dev": true,
+ "requires": {
+ "map-age-cleaner": "^0.1.1",
+ "mimic-fn": "^2.0.0",
+ "p-is-promise": "^2.0.0"
+ },
+ "dependencies": {
+ "mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true
+ }
+ }
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "dev": true
+ },
+ "micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ }
+ },
+ "mime": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.0.tgz",
+ "integrity": "sha512-ft3WayFSFUVBuJj7BMLKAQcSlItKtfjsKDDsii3rqFDAZ7t11zRe8ASw/GlmivGwVUYtwkQrxiGGpL6gFvB0ag==",
+ "dev": true
+ },
+ "mime-db": {
+ "version": "1.45.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.45.0.tgz",
+ "integrity": "sha512-CkqLUxUk15hofLoLyljJSrukZi8mAtgd+yE5uO4tqRZsdsAJKv0O+rFMhVDRJgozy+yG6md5KwuXhD4ocIoP+w==",
+ "dev": true
+ },
+ "mime-types": {
+ "version": "2.1.28",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.28.tgz",
+ "integrity": "sha512-0TO2yJ5YHYr7M2zzT7gDU1tbwHxEUWBCLt0lscSNpcdAfFyJOVEpRYNS7EXVcTLNj/25QO8gulHC5JtTzSE2UQ==",
+ "dev": true,
+ "requires": {
+ "mime-db": "1.45.0"
+ }
+ },
+ "mimic-fn": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz",
+ "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==",
+ "dev": true
+ },
+ "mimic-response": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+ "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==",
+ "dev": true
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
+ "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==",
+ "dev": true
+ },
+ "mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "dev": true,
+ "requires": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ }
+ },
+ "negotiator": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
+ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==",
+ "dev": true
+ },
+ "nlcst-is-literal": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/nlcst-is-literal/-/nlcst-is-literal-1.2.1.tgz",
+ "integrity": "sha512-abNv1XY7TUoyLn5kSSorMIYHfRvVfXbgftNFNvEMiQQkyKteLdjrGuDqEMMyK70sMbn7uPA6oUbRvykM6pg+pg==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0"
+ }
+ },
+ "nlcst-normalize": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/nlcst-normalize/-/nlcst-normalize-2.1.4.tgz",
+ "integrity": "sha512-dWJ3XUoAoWoau24xOM59Y1FPozv7DyYWy+rdUaXj9Ow0hBCVuwqDQbXzTF7H+HskyTVpTkRPXYPu4YsMEScmRw==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0"
+ }
+ },
+ "nlcst-search": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/nlcst-search/-/nlcst-search-1.5.1.tgz",
+ "integrity": "sha512-G3ws0fgNlVsUvHvA2G1PTjyxzGOJ0caI0+WOvlZzev5iSUTX+R1q4lnlL4Y7E+he4ZMUW/0FMn9rYwdYon/13g==",
+ "dev": true,
+ "requires": {
+ "nlcst-is-literal": "^1.1.0",
+ "nlcst-normalize": "^2.1.0",
+ "unist-util-visit": "^1.0.0"
+ }
+ },
+ "nlcst-to-string": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-2.0.4.tgz",
+ "integrity": "sha512-3x3jwTd6UPG7vi5k4GEzvxJ5rDA7hVUIRNHPblKuMVP9Z3xmlsd9cgLcpAMkc5uPOBna82EeshROFhsPkbnTZg==",
+ "dev": true
+ },
+ "no-cliches": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/no-cliches/-/no-cliches-0.2.2.tgz",
+ "integrity": "sha512-iEOqDAOFl6uN5jZGRj39Jdo8qALzf2HPXtpFso8+BMaDylDrUMYMwhFbfYGgxdnMlsRnxYTwv68kaXEpsHIapg==",
+ "dev": true
+ },
+ "nopt": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz",
+ "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=",
+ "dev": true,
+ "requires": {
+ "abbrev": "1"
+ }
+ },
+ "normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true
+ },
+ "normalize-strings": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/normalize-strings/-/normalize-strings-1.1.0.tgz",
+ "integrity": "sha1-skp17UgXY5RAT3Rkm1y4BA6JmnU=",
+ "dev": true
+ },
+ "normalize-url": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz",
+ "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==",
+ "dev": true
+ },
+ "npm-prefix": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/npm-prefix/-/npm-prefix-1.2.0.tgz",
+ "integrity": "sha1-5hlFX3B0ulTMZtbQ033Z8b5ry8A=",
+ "dev": true,
+ "requires": {
+ "rc": "^1.1.0",
+ "shellsubstitute": "^1.1.0",
+ "untildify": "^2.1.0"
+ }
+ },
+ "nspell": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/nspell/-/nspell-2.1.2.tgz",
+ "integrity": "sha512-j79L4A5aJSiswMUJ/6BW8+7ytgBUVce5BJX6xq8LtvKQpU96CMB340/yMeREH9U+gB/8dk4ctTn62DLiPMAXsA==",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^2.0.0"
+ },
+ "dependencies": {
+ "is-buffer": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz",
+ "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==",
+ "dev": true
+ }
+ }
+ },
+ "number-to-words": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/number-to-words/-/number-to-words-1.2.4.tgz",
+ "integrity": "sha512-/fYevVkXRcyBiZDg6yzZbm0RuaD6i0qRfn8yr+6D0KgBMOndFPxuW10qCHpzs50nN8qKuv78k8MuotZhcVX6Pw==",
+ "dev": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
+ "dev": true
+ },
+ "object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "dev": true,
+ "requires": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "dev": true
+ },
+ "object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.defaults": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/object.defaults/-/object.defaults-1.1.0.tgz",
+ "integrity": "sha1-On+GgzS0B96gbaFtiNXNKeQ1/s8=",
+ "dev": true,
+ "requires": {
+ "array-each": "^1.0.1",
+ "array-slice": "^1.0.0",
+ "for-own": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object.map/-/object.map-1.0.1.tgz",
+ "integrity": "sha1-z4Plncj8wK1fQlDh94s7gb2AHTc=",
+ "dev": true,
+ "requires": {
+ "for-own": "^1.0.0",
+ "make-iterator": "^1.0.0"
+ }
+ },
+ "object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "dev": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
+ "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
+ "dev": true
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
+ "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
+ "dev": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
+ "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
+ "dev": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "p-any": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-any/-/p-any-2.1.0.tgz",
+ "integrity": "sha512-JAERcaMBLYKMq+voYw36+x5Dgh47+/o7yuv2oQYuSSUml4YeqJEFznBrY2UeEkoSHqBua6hz518n/PsowTYLLg==",
+ "dev": true,
+ "requires": {
+ "p-cancelable": "^2.0.0",
+ "p-some": "^4.0.0",
+ "type-fest": "^0.3.0"
+ },
+ "dependencies": {
+ "p-cancelable": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.0.0.tgz",
+ "integrity": "sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg==",
+ "dev": true
+ }
+ }
+ },
+ "p-cancelable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz",
+ "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==",
+ "dev": true
+ },
+ "p-defer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz",
+ "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=",
+ "dev": true
+ },
+ "p-finally": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+ "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=",
+ "dev": true
+ },
+ "p-is-promise": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz",
+ "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==",
+ "dev": true
+ },
+ "p-map": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
+ "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==",
+ "dev": true
+ },
+ "p-memoize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-memoize/-/p-memoize-2.1.0.tgz",
+ "integrity": "sha512-c6+a2iV4JyX0r4+i2IBJYO0r6LZAT2fg/tcB6GQbv1uzZsfsmKT7Ej5DRT1G6Wi7XUJSV2ZiP9+YEtluvhCmkg==",
+ "dev": true,
+ "requires": {
+ "mem": "^4.0.0",
+ "mimic-fn": "^1.0.0"
+ }
+ },
+ "p-some": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-some/-/p-some-4.1.0.tgz",
+ "integrity": "sha512-MF/HIbq6GeBqTrTIl5OJubzkGU+qfFhAFi0gnTAK6rgEIJIknEiABHOTtQu4e6JiXjIwuMPMUFQzyHh5QjCl1g==",
+ "dev": true,
+ "requires": {
+ "aggregate-error": "^3.0.0",
+ "p-cancelable": "^2.0.0"
+ },
+ "dependencies": {
+ "p-cancelable": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.0.0.tgz",
+ "integrity": "sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg==",
+ "dev": true
+ }
+ }
+ },
+ "p-timeout": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz",
+ "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
+ "dev": true,
+ "requires": {
+ "p-finally": "^1.0.0"
+ }
+ },
+ "parse-english": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/parse-english/-/parse-english-4.1.3.tgz",
+ "integrity": "sha512-IQl1v/ik9gw437T8083coohMihae0rozpc7JYC/9h6hi9xKBSxFwh5HWRpzVC2ZhEs2nUlze2aAktpNBJXdJKA==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "parse-latin": "^4.0.0",
+ "unist-util-modify-children": "^1.0.0",
+ "unist-util-visit-children": "^1.0.0"
+ }
+ },
+ "parse-entities": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-1.2.2.tgz",
+ "integrity": "sha512-NzfpbxW/NPrzZ/yYSoQxyqUZMZXIdCfE0OIN4ESsnptHJECoUk3FZktxNuzQf4tjt5UEopnxpYJbvYuxIFDdsg==",
+ "dev": true,
+ "requires": {
+ "character-entities": "^1.0.0",
+ "character-entities-legacy": "^1.0.0",
+ "character-reference-invalid": "^1.0.0",
+ "is-alphanumerical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-hexadecimal": "^1.0.0"
+ }
+ },
+ "parse-filepath": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/parse-filepath/-/parse-filepath-1.0.2.tgz",
+ "integrity": "sha1-pjISf1Oq89FYdvWHLz/6x2PWyJE=",
+ "dev": true,
+ "requires": {
+ "is-absolute": "^1.0.0",
+ "map-cache": "^0.2.0",
+ "path-root": "^0.1.1"
+ }
+ },
+ "parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
+ "dev": true,
+ "requires": {
+ "error-ex": "^1.3.1",
+ "json-parse-better-errors": "^1.0.1"
+ }
+ },
+ "parse-latin": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/parse-latin/-/parse-latin-4.2.1.tgz",
+ "integrity": "sha512-7T9g6mIsFFpLlo0Zzb2jLWdCt+H9Qtf/hRmMYFi/Mq6Ovi+YKo+AyDFX3OhFfu0vXX5Nid9FKJGKSSzNcTkWiA==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-modify-children": "^1.0.0",
+ "unist-util-visit-children": "^1.0.0"
+ }
+ },
+ "parse-passwd": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz",
+ "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=",
+ "dev": true
+ },
+ "parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "dev": true
+ },
+ "pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+ "dev": true
+ },
+ "passive-voice": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/passive-voice/-/passive-voice-0.1.0.tgz",
+ "integrity": "sha1-Fv+RrkC6DpLEPmcXY/3IQqcCcLE=",
+ "dev": true
+ },
+ "path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=",
+ "dev": true
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+ "dev": true
+ },
+ "path-parse": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+ "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==",
+ "dev": true
+ },
+ "path-root": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/path-root/-/path-root-0.1.1.tgz",
+ "integrity": "sha1-mkpoFMrBwM1zNgqV8yCDyOpHRbc=",
+ "dev": true,
+ "requires": {
+ "path-root-regex": "^0.1.0"
+ }
+ },
+ "path-root-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/path-root-regex/-/path-root-regex-0.1.2.tgz",
+ "integrity": "sha1-v8zcjfWxLcUsi0PsONGNcsBLqW0=",
+ "dev": true
+ },
+ "picomatch": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz",
+ "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==",
+ "dev": true
+ },
+ "plur": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/plur/-/plur-3.1.1.tgz",
+ "integrity": "sha512-t1Ax8KUvV3FFII8ltczPn2tJdjqbd1sIzu6t4JL7nQ3EyeL/lTrj5PWKb06ic5/6XYDr65rQ4uzQEGN70/6X5w==",
+ "dev": true,
+ "requires": {
+ "irregular-plurals": "^2.0.0"
+ }
+ },
+ "pluralize": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz",
+ "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==",
+ "dev": true
+ },
+ "posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+ "dev": true
+ },
+ "prepend-http": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+ "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=",
+ "dev": true
+ },
+ "propose": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/propose/-/propose-0.0.5.tgz",
+ "integrity": "sha1-SKBl2ex9TIZn9AULFcSi2F28pWs=",
+ "dev": true,
+ "requires": {
+ "levenshtein-edit-distance": "^1.0.0"
+ }
+ },
+ "public-ip": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/public-ip/-/public-ip-4.0.2.tgz",
+ "integrity": "sha512-ZHqUjaYT/+FuSiy5/o2gBxvj0PF7M3MXGnaLJBsJNMCyXI4jzuXXHJKrk0gDxx1apiF/jYsBwjTQOM9V8G6oCQ==",
+ "dev": true,
+ "requires": {
+ "dns-socket": "^4.2.1",
+ "got": "^9.6.0",
+ "is-ip": "^3.1.0"
+ }
+ },
+ "pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "dev": true,
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "qjobs": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/qjobs/-/qjobs-1.2.0.tgz",
+ "integrity": "sha512-8YOJEHtxpySA3fFDyCRxA+UUV+fA+rTWnuWvylOK/NCjhY+b4ocCtmu8TtsWb+mYeU+GCHf/S66KZF/AsteKHg==",
+ "dev": true
+ },
+ "qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==",
+ "dev": true
+ },
+ "quotation": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/quotation/-/quotation-1.1.3.tgz",
+ "integrity": "sha512-45gUgmX/RtQOQV1kwM06boP49OYXcKCPrYwdmAvs5YqkpiobhNKKwo524JM6Ma0ko3oN9tXNcWs9+ABq3Ry7YA==",
+ "dev": true
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "dev": true
+ },
+ "raw-body": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
+ "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
+ "dev": true,
+ "requires": {
+ "bytes": "3.1.0",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ }
+ },
+ "rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "dev": true,
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ }
+ },
+ "readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ },
+ "readdirp": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz",
+ "integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==",
+ "dev": true,
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "rechoir": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz",
+ "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=",
+ "dev": true,
+ "requires": {
+ "resolve": "^1.1.6"
+ }
+ },
+ "regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ }
+ },
+ "remark": {
+ "version": "11.0.2",
+ "resolved": "https://registry.npmjs.org/remark/-/remark-11.0.2.tgz",
+ "integrity": "sha512-bh+eJgn8wgmbHmIBOuwJFdTVRVpl3fcVP6HxmpPWO0ULGP9Qkh6INJh0N5Uy7GqlV7DQYGoqaKiEIpM5LLvJ8w==",
+ "dev": true,
+ "requires": {
+ "remark-parse": "^7.0.0",
+ "remark-stringify": "^7.0.0",
+ "unified": "^8.2.0"
+ }
+ },
+ "remark-cli": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/remark-cli/-/remark-cli-7.0.1.tgz",
+ "integrity": "sha512-CUjBLLSbEay0mNwOO+pptnLIoS8UB6cHlhZVpTRKbtbIcw6YEzEfD7jGjW1HCA8lZK87IfY3/DuWE6DlXu+hfg==",
+ "dev": true,
+ "requires": {
+ "markdown-extensions": "^1.1.0",
+ "remark": "^11.0.0",
+ "unified-args": "^7.0.0"
+ }
+ },
+ "remark-frontmatter": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-1.3.3.tgz",
+ "integrity": "sha512-fM5eZPBvu2pVNoq3ZPW22q+5Ativ1oLozq2qYt9I2oNyxiUd/tDl0iLLntEVAegpZIslPWg1brhcP1VsaSVUag==",
+ "dev": true,
+ "requires": {
+ "fault": "^1.0.1",
+ "xtend": "^4.0.1"
+ }
+ },
+ "remark-lint": {
+ "version": "6.0.6",
+ "resolved": "https://registry.npmjs.org/remark-lint/-/remark-lint-6.0.6.tgz",
+ "integrity": "sha512-JBY6zz5fYQFN724Vq6VeiHwhyjVIlrww/dE1+hWGcDyUuz7YNCqwZKwBdQGDvslICkzHw/wEExNEb8D4PNiLlA==",
+ "dev": true,
+ "requires": {
+ "remark-message-control": "^4.0.0"
+ }
+ },
+ "remark-lint-blockquote-indentation": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-blockquote-indentation/-/remark-lint-blockquote-indentation-1.0.4.tgz",
+ "integrity": "sha512-ExcDP7lufshEBNkVddSHa+Bz/97PtFstIniQ8ZF2TahHPmpx92z3mkI/nXL2Qt5d3B09eTVvh4Pvhgp6x2470g==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-checkbox-character-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-checkbox-character-style/-/remark-lint-checkbox-character-style-1.0.4.tgz",
+ "integrity": "sha512-tpuOtk7URmE6EIBgqklB4iCl00GabkemqEUFWxt69IIzG8bFjebiXrYh5BYIewIVQkOwiSDMsCbHdzYnXxgz0g==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1",
+ "vfile-location": "^2.0.1"
+ }
+ },
+ "remark-lint-code-block-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-code-block-style/-/remark-lint-code-block-style-1.0.4.tgz",
+ "integrity": "sha512-Wq5F94nkaWTx8W/9C/ydG+DhVWLirBrWb0xnoMQ0cHnizAd3BWw8g0x5L7yglMYSUSVLWY0jfMHgOe9UW3JfTw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-definition-case": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-definition-case/-/remark-lint-definition-case-1.0.5.tgz",
+ "integrity": "sha512-iirq74fKhJZsFw7x4FJuLVRkXclntutG1YKajfLaE3Gm14YlJWBEoabNTk+ENR4QXoB9rTdEqn3Cc3ImO8qciQ==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-definition-spacing": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-definition-spacing/-/remark-lint-definition-spacing-1.0.5.tgz",
+ "integrity": "sha512-ss8OQmK4c/1amEAJpDjkFiByLyXpsYFNzmk6rEZQkxZZd+DVHI0oF+CzSeMVHu48rF2qHOkKhVghqrCM0vleAA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-emphasis-marker": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-emphasis-marker/-/remark-lint-emphasis-marker-1.0.4.tgz",
+ "integrity": "sha512-TdYISSw7Ib6EJDApDj9zcZNDCJEaEoQIrYS3+QH2TQxoDx96B0t1bbErRM5L/hx1UWPBpeFLKpgIWL163eMmYA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-fenced-code-flag": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-fenced-code-flag/-/remark-lint-fenced-code-flag-1.0.4.tgz",
+ "integrity": "sha512-bkQvlEYco6ZzdzvGPrY7DBsqSq/2mZEmdhpn0KdMEZ9kcKJP4unQdVQys04SKnf9QISqQ446VnQj5Q4E3HMSkQ==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-fenced-code-marker": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-fenced-code-marker/-/remark-lint-fenced-code-marker-1.0.4.tgz",
+ "integrity": "sha512-aJF4ISIEvK3NX+C2rN93QoS/32SSiytQKRSeGa+HwsAn3sTwqmy2IoAwbFeZIZA2vqKcVB4h1b9yKamSlfX30Q==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-file-extension": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-file-extension/-/remark-lint-file-extension-1.0.5.tgz",
+ "integrity": "sha512-oVQdf5vEomwHkfQ7R/mgmsWW2H/t9kSvnrxtVoNOHr+qnOEafKKDn+AFhioN2kqtjCZBAjSSrePs6xGKmXKDTw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-final-definition": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-final-definition/-/remark-lint-final-definition-1.0.4.tgz",
+ "integrity": "sha512-y9aDZPhqWcI7AtrJtL69HE6MoWMqDqLQUyWMadzAYUYb9/m4ciLdygJ4cWVpEN3n4mkBepHIsWzASaKHHBDJOQ==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-final-newline": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-final-newline/-/remark-lint-final-newline-1.0.5.tgz",
+ "integrity": "sha512-rfLlW8+Fz2dqnaEgU4JwLA55CQF1T4mfSs/GwkkeUCGPenvEYwSkCN2KO2Gr1dy8qPoOdTFE1rSufLjmeTW5HA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-hard-break-spaces": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-hard-break-spaces/-/remark-lint-hard-break-spaces-1.0.5.tgz",
+ "integrity": "sha512-Rss7ujNtxipO/hasWYc0QdiO8D5VyliSwj3zAZ8GeDn0ix2KH+pY4/AJC7i9IGcVVbUGvvXLpJB3Pp1VeY7oKw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-heading-increment": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-heading-increment/-/remark-lint-heading-increment-1.0.4.tgz",
+ "integrity": "sha512-3PJj32G7W1OUyRVSZiZbSOxyFAsw/mNssIosS9G8+6Lq2yeTSMDoCJy0+LC+s00nORFcbzeMedMK5U/eLbDe4w==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-heading-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-heading-style/-/remark-lint-heading-style-1.0.4.tgz",
+ "integrity": "sha512-ASssbw0vj9urTMxDJGpOn4K7d9MgPQPJGaCD+7v7je42krofvqC4CxpYvO/fOAkRZcttE91VfFHxkaPjiBtQLw==",
+ "dev": true,
+ "requires": {
+ "mdast-util-heading-style": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-heading-whitespace": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/remark-lint-heading-whitespace/-/remark-lint-heading-whitespace-1.0.0.tgz",
+ "integrity": "sha512-DykoBIXNbkihg64D+mztSOv3l82RTH4tIZW/HUB4QM4NpIEB+pVIPQpCYD0K4pTgvKiwoqsj4NY8qJ1EhNHAmQ==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.4",
+ "unified-lint-rule": "^1.0.2",
+ "unist-util-visit": "^1.3.0"
+ }
+ },
+ "remark-lint-link-title-style": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-link-title-style/-/remark-lint-link-title-style-1.0.5.tgz",
+ "integrity": "sha512-Nu0cKj220q/PmUzELhYRUR2uxXabWuFJq9sApkgsc59uh+NKDtCEdpxkx7Zwvn6kUEwpuQVimeRfdesiKxX52g==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1",
+ "vfile-location": "^2.0.1"
+ }
+ },
+ "remark-lint-list-item-bullet-indent": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-list-item-bullet-indent/-/remark-lint-list-item-bullet-indent-1.0.4.tgz",
+ "integrity": "sha512-SqhAmVFkeFQYP5I1qztCi2rfhLWwfSvia3DZ56sz9+h1pMqWIj3FmWRueY36xrnQo3LxsLljM5atI7AJBWxQEw==",
+ "dev": true,
+ "requires": {
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-list-item-content-indent": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-list-item-content-indent/-/remark-lint-list-item-content-indent-1.0.4.tgz",
+ "integrity": "sha512-zfEeAayZjEKkPr07fnhkGLENxOhnm0WZJTj6UBIXhtGu7rX23WNKSZaiou8iUoHxcO6ySCvIUJAEmq/XN1FxkQ==",
+ "dev": true,
+ "requires": {
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-list-item-indent": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-list-item-indent/-/remark-lint-list-item-indent-1.0.5.tgz",
+ "integrity": "sha512-DjRgxjqaVMrnlQFJypizTPtLa9gSM5ad0LVIFDSstV2UVXSgpBi2+bSsFJEXb4Fkjo/d2JAgt27UhzhcoF2lnw==",
+ "dev": true,
+ "requires": {
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-list-item-spacing": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-list-item-spacing/-/remark-lint-list-item-spacing-1.1.4.tgz",
+ "integrity": "sha512-zZELzTPYCoOCnOWh/nYTfQWnGXWg4/I5KpwrjBqe7WYwoMtvLVU9mqjRj2jHEbmirEXas54NZnYnkCoIBMS4bw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-maximum-heading-length": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-maximum-heading-length/-/remark-lint-maximum-heading-length-1.0.4.tgz",
+ "integrity": "sha512-dhDBnUFXMuHoW8LSV/VICJAJO+wWumnvuu3ND7MJquCYrsjX2vcRmJXL5cusJSY4yqPosKlOowIkzdV5B6/SDQ==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-maximum-line-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/remark-lint-maximum-line-length/-/remark-lint-maximum-line-length-1.2.2.tgz",
+ "integrity": "sha512-ItAdjK+tUhqggqFvtAJ8iJ0MbBgShLl0HDgpG3In0QSYp/dmofO77DjvRjCJQo1pQYS8/LwlBii9cqg/3MwFfA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-no-auto-link-without-protocol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-auto-link-without-protocol/-/remark-lint-no-auto-link-without-protocol-1.0.4.tgz",
+ "integrity": "sha512-dhDHQLeaI79p7SRoKfxJ9c8J5otQsGua7ILeNbs2Onzn46/tp9ir6zjq3Lfh4VJJr4OVign2e8u+MzXsS7Uu/A==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-blockquote-without-marker": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-blockquote-without-marker/-/remark-lint-no-blockquote-without-marker-2.0.4.tgz",
+ "integrity": "sha512-a5LFGj7It2z7aBRGaAcztk4D2pax2b7dK9iOarIWv/JBus/PSjZJxzZCma2aAAOQhv3wbNTwqQwuQC0UJHMbPg==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1",
+ "vfile-location": "^2.0.1"
+ }
+ },
+ "remark-lint-no-consecutive-blank-lines": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-consecutive-blank-lines/-/remark-lint-no-consecutive-blank-lines-1.0.4.tgz",
+ "integrity": "sha512-33rYrp+3OQ2UjG2/xhctruCvkP2iKLuHJhoUOAUV3BGwqJjAB+xNOl+0DdvDo0fxh5dyZuNesBuos3xr2yVR+w==",
+ "dev": true,
+ "requires": {
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-dead-urls": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-dead-urls/-/remark-lint-no-dead-urls-0.5.0.tgz",
+ "integrity": "sha512-wMr1+7Sh5rRGF9hmF9iyajRNQfA2BnADc9myYjpRxsYozRDj6p2ddVwBh76loUKwCZ64sLRKamyhMDwQ0NIdYA==",
+ "dev": true,
+ "requires": {
+ "check-links": "^1.1.7",
+ "is-online": "^8.2.0",
+ "unified-lint-rule": "^1.0.4",
+ "unist-util-visit": "^2.0.0"
+ },
+ "dependencies": {
+ "unist-util-is": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.0.2.tgz",
+ "integrity": "sha512-Ofx8uf6haexJwI1gxWMGg6I/dLnF2yE+KibhD3/diOqY2TinLcqHXCV6OI5gFVn3xQqDH+u0M625pfKwIwgBKQ==",
+ "dev": true
+ },
+ "unist-util-visit": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz",
+ "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0",
+ "unist-util-visit-parents": "^3.0.0"
+ }
+ },
+ "unist-util-visit-parents": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.0.tgz",
+ "integrity": "sha512-0g4wbluTF93npyPrp/ymd3tCDTMnP0yo2akFD2FIBAYXq/Sga3lwaU1D8OYKbtpioaI6CkDcQ6fsMnmtzt7htw==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0"
+ }
+ }
+ }
+ },
+ "remark-lint-no-duplicate-definitions": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-duplicate-definitions/-/remark-lint-no-duplicate-definitions-1.0.6.tgz",
+ "integrity": "sha512-0biPtjVtGLRTN+ie7TzJpvKBau6sqkuQsQtuD07M0NSOmSvSU4vXUeOW73O5Q5xM3i6sYVESe+opaPefD3zEoA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-no-duplicate-headings": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-duplicate-headings/-/remark-lint-no-duplicate-headings-1.0.5.tgz",
+ "integrity": "sha512-4GKPxhKpN797V/6Jg1K4Zwqq+PhsjC633+wQMrZcTvOJfY+Rq1i7sNJ9lJVZnsDAlZJI56VqZCKnPJmS2br87g==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-emphasis-as-heading": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-emphasis-as-heading/-/remark-lint-no-emphasis-as-heading-1.0.4.tgz",
+ "integrity": "sha512-gnsInLxTkc59eVD3/qelFagD/NcrMPKXT1sy7i4e8D2jqQyrIHHl0p3TfiyNNt8qIjKMKhlIii4k4kVk/3Mczg==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-file-name-articles": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-file-name-articles/-/remark-lint-no-file-name-articles-1.0.5.tgz",
+ "integrity": "sha512-AQk5eTb3s3TAPPjiglZgqlQj4ycao+gPs8/XkdN1VCPUtewW0GgwoQe7YEuBKayJ6ioN8dGP37Kg/P/PlKaRQA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-no-file-name-consecutive-dashes": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-file-name-consecutive-dashes/-/remark-lint-no-file-name-consecutive-dashes-1.0.5.tgz",
+ "integrity": "sha512-Mg2IDsi790/dSdAzwnBnsMYdZm3qC2QgGwqOWcr0TPABJhhjC3p8r5fX4MNMTXI5It7B7bW9+ImmCeLOZiXkLg==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-no-file-name-irregular-characters": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-file-name-irregular-characters/-/remark-lint-no-file-name-irregular-characters-1.0.5.tgz",
+ "integrity": "sha512-Oe5i99qNUKc2bxmiH421o5B/kqlf1dfjAxpHNLhi2X2dXE91zRGavrlRM/4f4oR0N9Bqb3qB9JZPyMPWrzu9XA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-no-file-name-mixed-case": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-file-name-mixed-case/-/remark-lint-no-file-name-mixed-case-1.0.5.tgz",
+ "integrity": "sha512-ilrUCbHZin/ENwr8c3SC2chgkFsizXjBQIB/oZ7gnm1IkCkZPiMyXZAHdpwC/DjbrpGxfMYh9JmIHao4giS5+A==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-no-file-name-outer-dashes": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-file-name-outer-dashes/-/remark-lint-no-file-name-outer-dashes-1.0.6.tgz",
+ "integrity": "sha512-rT8CmcIlenegS0Yst4maYXdZfqIjBOiRUY8j/KJkORF5tKH+3O1/S07025qPGmcRihzK3w4yO0K8rgkKQw0b9w==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0"
+ }
+ },
+ "remark-lint-no-heading-content-indent": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-heading-content-indent/-/remark-lint-no-heading-content-indent-1.0.4.tgz",
+ "integrity": "sha512-z+hcAsGbGiy28ERAZuOT1pYf6lkkoR2YlFUt4po4azfXMz5lmidYTotkezsWvA3Bh8N0mIi7hs8syPt8RRIGqg==",
+ "dev": true,
+ "requires": {
+ "mdast-util-heading-style": "^1.0.2",
+ "plur": "^3.0.0",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-heading-punctuation": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-heading-punctuation/-/remark-lint-no-heading-punctuation-1.0.4.tgz",
+ "integrity": "sha512-++/HXg/qtVssJjzq2ZgEreoxaazw9KkYrAbTDImKV7Fypo+7bZFELUvFicq0/i9qwAwt1tvhkKtLYt1W/sr1JQ==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-inline-padding": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-inline-padding/-/remark-lint-no-inline-padding-1.0.5.tgz",
+ "integrity": "sha512-AjS34hBRasYiIAKZJ7/9U42LouRHok2WVTRdQPcVtRBswStNOuot59S+FRsatqlk1wvMmjytqxUKQfVTSeu9ag==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-no-literal-urls": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-literal-urls/-/remark-lint-no-literal-urls-1.0.4.tgz",
+ "integrity": "sha512-sHjbzaSG4z6jMu1L0Qx1b7VvIQHy0bR4xZ6t9auJ5AoB5ua8hb/970s77irH1+46TF1ezhE7i+QDjmhcQi09xg==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-string": "^1.0.2",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-multiple-toplevel-headings": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-multiple-toplevel-headings/-/remark-lint-no-multiple-toplevel-headings-1.0.5.tgz",
+ "integrity": "sha512-RZ1YPxRO7Bo8mT+A36cZ7nx2QHFAKk+oE6j87YrZYpAKr2oF6snKS8nIGhVku4PSI/9cW1G12MZz1cAA5rcjFw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-shell-dollars": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-shell-dollars/-/remark-lint-no-shell-dollars-1.0.4.tgz",
+ "integrity": "sha512-YXFj8FUVTKkVvoAbFY3zv1Ol7Kj1i+qdze3pXSgRG61y1LpfL8/HpnvFrseMbBmNw6o4WpjTo7GoArngJ1sCeg==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-shortcut-reference-image": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-shortcut-reference-image/-/remark-lint-no-shortcut-reference-image-1.0.4.tgz",
+ "integrity": "sha512-5/9QoesnOHIDwMHU9x+AGPBiFoMe9ZBKIR8nC17C6ZdksgwUIpjBJ3VX5POFlt5E6OhAZaeXqUCq9G2USccEdA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-shortcut-reference-link": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-shortcut-reference-link/-/remark-lint-no-shortcut-reference-link-1.0.5.tgz",
+ "integrity": "sha512-qDVL7/0ptOTd/nyd9u/4MYFWQtYQU8povdUB45UgTXy5Rrf1WsC+4DfzAEZkX3tOSTExdAIf1WOKqdC5xRcfvA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-no-table-indentation": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-table-indentation/-/remark-lint-no-table-indentation-1.0.5.tgz",
+ "integrity": "sha512-eE1GL+IzU3vtHdYCKHCZEIhCwiwCM7UH+pMDIMpGfH2LB3cB/Nrfbiz9xadGkARKqxxDMsJSBZDw4A/01IU+kA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-no-undefined-references": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-undefined-references/-/remark-lint-no-undefined-references-1.1.2.tgz",
+ "integrity": "sha512-/MEXcusNFHx+BYUf4wuil+GVKTkofvT+VodjyGw5X0OuPZZJ9c/kL0QjHHzuLuGH+oJUsgOOfnJC/eyO0tV8bw==",
+ "dev": true,
+ "requires": {
+ "collapse-white-space": "^1.0.4",
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-no-unused-definitions": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/remark-lint-no-unused-definitions/-/remark-lint-no-unused-definitions-1.0.6.tgz",
+ "integrity": "sha512-hYHMjbg3wBGT30R9PN74Bieejg2qRkXH9Rc2YakP1unuJoF+X+i5RMsW71spW11/r/+gW2mdzuwBuBoQrWpGTA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-ordered-list-marker-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-ordered-list-marker-style/-/remark-lint-ordered-list-marker-style-1.0.4.tgz",
+ "integrity": "sha512-c6AIqeePzm3nfkPCbTdwBS3/AQICgwE76+ryOc7tsSq4ulyK/Nt8Syvi/oiHYuonBddZoGtFTNCn0jqen9qscA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-ordered-list-marker-value": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-ordered-list-marker-value/-/remark-lint-ordered-list-marker-value-1.0.5.tgz",
+ "integrity": "sha512-eKepbNNfu9rEuG8WvV0sc7B+KiPMgq5Nc9baAxL9Hi6mhpj347YFWXxJUNttSINS13YTpnHxPvXmF9SzhjFKNQ==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-rule-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-rule-style/-/remark-lint-rule-style-1.0.4.tgz",
+ "integrity": "sha512-omr5P6CCvo2zixCzK9uiGZpwzOE+4rc+95kWH95k2iA6Rp8Qohp8RK4unSRKLtFYGUhSbiQPgWaQXHDxMkWczg==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-strong-marker": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-strong-marker/-/remark-lint-strong-marker-1.0.4.tgz",
+ "integrity": "sha512-X9f6yhZ85cdP0cmCgkqlbxllpeQ60pS9Qqk9Jb9SZo6f95esaHptQ5bExb1ZVXzhSHz2Xz86tUhXtzG3zGFD4g==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-table-cell-padding": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/remark-lint-table-cell-padding/-/remark-lint-table-cell-padding-1.0.5.tgz",
+ "integrity": "sha512-N/WpcymrGBSPbLiv2OQTvdzNn6H9ctdyEA+P/odn4G9FqyrLmeTMkGJuGtinU569hLG/RtHqZIDeFVDiYi8Wzw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.4.0"
+ }
+ },
+ "remark-lint-table-pipe-alignment": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-table-pipe-alignment/-/remark-lint-table-pipe-alignment-1.0.4.tgz",
+ "integrity": "sha512-pmELEOXeUjMQedyVvOtZcTCnTu6FxZ4gfBskMx6iJhOFEEKTFOmviqlKLpndPBxFNZB86AiE0C00/NvAaut8dw==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-table-pipes": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-table-pipes/-/remark-lint-table-pipes-1.0.4.tgz",
+ "integrity": "sha512-0fdnoiiSLIPd/76gNvQY4pg27d8HkMmmv5gCGfD+Z/Si9DdpbJdq93U0kX+Botb3+/4VEDIlcU7Cp5HXppMTWA==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-unordered-list-marker-style": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/remark-lint-unordered-list-marker-style/-/remark-lint-unordered-list-marker-style-1.0.4.tgz",
+ "integrity": "sha512-lcuG1J74VGTT4gl8oH33HpkHrqorxjxMlJnBupLFrVowqvJ2hAq8yPJdGZ7P46uZOYw+Xz+Qv08bF8A73PNWxQ==",
+ "dev": true,
+ "requires": {
+ "unified-lint-rule": "^1.0.0",
+ "unist-util-generated": "^1.1.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^1.1.1"
+ }
+ },
+ "remark-lint-write-good": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/remark-lint-write-good/-/remark-lint-write-good-1.2.0.tgz",
+ "integrity": "sha512-HYiwM16RRBm979yDb/IVwPe1eFhzA1HATe1WucRiYWS10jcPRgJe9FihH7W5uzQFDqX5aRbTtu/yPdq+hPnYkw==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unified-lint-rule": "^1.0.1",
+ "unist-util-visit": "^1.1.1",
+ "write-good": "^1.0.2"
+ }
+ },
+ "remark-message-control": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/remark-message-control/-/remark-message-control-4.2.0.tgz",
+ "integrity": "sha512-WXH2t5ljTyhsXlK1zPBLF3iPHbXl58R94phPMreS1xcHWBZJt6Oiu8RtNjy1poZFb3PqKnbYLJeR/CWcZ1bTFw==",
+ "dev": true,
+ "requires": {
+ "mdast-comment-marker": "^1.0.0",
+ "unified-message-control": "^1.0.0",
+ "xtend": "^4.0.1"
+ }
+ },
+ "remark-parse": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-7.0.2.tgz",
+ "integrity": "sha512-9+my0lQS80IQkYXsMA8Sg6m9QfXYJBnXjWYN5U+kFc5/n69t+XZVXU/ZBYr3cYH8FheEGf1v87rkFDhJ8bVgMA==",
+ "dev": true,
+ "requires": {
+ "collapse-white-space": "^1.0.2",
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-whitespace-character": "^1.0.0",
+ "is-word-character": "^1.0.0",
+ "markdown-escapes": "^1.0.0",
+ "parse-entities": "^1.1.0",
+ "repeat-string": "^1.5.4",
+ "state-toggle": "^1.0.0",
+ "trim": "0.0.1",
+ "trim-trailing-lines": "^1.0.0",
+ "unherit": "^1.0.4",
+ "unist-util-remove-position": "^1.0.0",
+ "vfile-location": "^2.0.0",
+ "xtend": "^4.0.1"
+ }
+ },
+ "remark-preset-lint-consistent": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/remark-preset-lint-consistent/-/remark-preset-lint-consistent-2.0.4.tgz",
+ "integrity": "sha512-cGFSS0ONYEnEYG/USN4f+01BFog2DfBq+jftI1ajSJDPBni6gYffPj6qkT+DZ1F3xkesxLnWAQQb6ZZfa/zGog==",
+ "dev": true,
+ "requires": {
+ "remark-lint": "^6.0.0",
+ "remark-lint-blockquote-indentation": "^1.0.0",
+ "remark-lint-checkbox-character-style": "^1.0.0",
+ "remark-lint-code-block-style": "^1.0.0",
+ "remark-lint-emphasis-marker": "^1.0.0",
+ "remark-lint-fenced-code-marker": "^1.0.0",
+ "remark-lint-heading-style": "^1.0.0",
+ "remark-lint-link-title-style": "^1.0.0",
+ "remark-lint-list-item-content-indent": "^1.0.0",
+ "remark-lint-ordered-list-marker-style": "^1.0.0",
+ "remark-lint-rule-style": "^1.0.0",
+ "remark-lint-strong-marker": "^1.0.0",
+ "remark-lint-table-cell-padding": "^1.0.0"
+ }
+ },
+ "remark-preset-lint-markdown-style-guide": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/remark-preset-lint-markdown-style-guide/-/remark-preset-lint-markdown-style-guide-2.1.4.tgz",
+ "integrity": "sha512-CGEN3DRtJEp+BvfgZ+VKxuq0Ij8Uw2DXfrbhK2xn4/XxatcHRPN8tnagXbMe1LHaQJGN8Gl1+UyLjsfIk6hyGQ==",
+ "dev": true,
+ "requires": {
+ "remark-lint": "^6.0.0",
+ "remark-lint-blockquote-indentation": "^1.0.0",
+ "remark-lint-code-block-style": "^1.0.0",
+ "remark-lint-definition-case": "^1.0.0",
+ "remark-lint-definition-spacing": "^1.0.0",
+ "remark-lint-emphasis-marker": "^1.0.0",
+ "remark-lint-fenced-code-flag": "^1.0.0",
+ "remark-lint-fenced-code-marker": "^1.0.0",
+ "remark-lint-file-extension": "^1.0.0",
+ "remark-lint-final-definition": "^1.0.0",
+ "remark-lint-hard-break-spaces": "^1.0.0",
+ "remark-lint-heading-increment": "^1.0.0",
+ "remark-lint-heading-style": "^1.0.0",
+ "remark-lint-link-title-style": "^1.0.0",
+ "remark-lint-list-item-content-indent": "^1.0.0",
+ "remark-lint-list-item-indent": "^1.0.0",
+ "remark-lint-list-item-spacing": "^1.0.0",
+ "remark-lint-maximum-heading-length": "^1.0.0",
+ "remark-lint-maximum-line-length": "^1.0.0",
+ "remark-lint-no-auto-link-without-protocol": "^1.0.0",
+ "remark-lint-no-blockquote-without-marker": "^2.0.0",
+ "remark-lint-no-consecutive-blank-lines": "^1.0.0",
+ "remark-lint-no-duplicate-headings": "^1.0.0",
+ "remark-lint-no-emphasis-as-heading": "^1.0.0",
+ "remark-lint-no-file-name-articles": "^1.0.0",
+ "remark-lint-no-file-name-consecutive-dashes": "^1.0.0",
+ "remark-lint-no-file-name-irregular-characters": "^1.0.0",
+ "remark-lint-no-file-name-mixed-case": "^1.0.0",
+ "remark-lint-no-file-name-outer-dashes": "^1.0.0",
+ "remark-lint-no-heading-punctuation": "^1.0.0",
+ "remark-lint-no-inline-padding": "^1.0.0",
+ "remark-lint-no-literal-urls": "^1.0.0",
+ "remark-lint-no-multiple-toplevel-headings": "^1.0.0",
+ "remark-lint-no-shell-dollars": "^1.0.0",
+ "remark-lint-no-shortcut-reference-image": "^1.0.0",
+ "remark-lint-no-shortcut-reference-link": "^1.0.0",
+ "remark-lint-no-table-indentation": "^1.0.0",
+ "remark-lint-ordered-list-marker-style": "^1.0.0",
+ "remark-lint-ordered-list-marker-value": "^1.0.0",
+ "remark-lint-rule-style": "^1.0.0",
+ "remark-lint-strong-marker": "^1.0.0",
+ "remark-lint-table-cell-padding": "^1.0.0",
+ "remark-lint-table-pipe-alignment": "^1.0.0",
+ "remark-lint-table-pipes": "^1.0.0",
+ "remark-lint-unordered-list-marker-style": "^1.0.0"
+ }
+ },
+ "remark-preset-lint-recommended": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/remark-preset-lint-recommended/-/remark-preset-lint-recommended-3.0.4.tgz",
+ "integrity": "sha512-i4GoobwWKt5LDJxIZjhHnnYiQISacNE8Oxj1ViwSHJnTUCtd7vh3KwtsfV8DrLsFUqRogX49iJEuZWXipY3PJA==",
+ "dev": true,
+ "requires": {
+ "remark-lint": "^6.0.0",
+ "remark-lint-final-newline": "^1.0.0",
+ "remark-lint-hard-break-spaces": "^1.0.0",
+ "remark-lint-list-item-bullet-indent": "^1.0.0",
+ "remark-lint-list-item-indent": "^1.0.0",
+ "remark-lint-no-auto-link-without-protocol": "^1.0.0",
+ "remark-lint-no-blockquote-without-marker": "^2.0.0",
+ "remark-lint-no-duplicate-definitions": "^1.0.0",
+ "remark-lint-no-heading-content-indent": "^1.0.0",
+ "remark-lint-no-inline-padding": "^1.0.0",
+ "remark-lint-no-literal-urls": "^1.0.0",
+ "remark-lint-no-shortcut-reference-image": "^1.0.0",
+ "remark-lint-no-shortcut-reference-link": "^1.0.0",
+ "remark-lint-no-undefined-references": "^1.0.0",
+ "remark-lint-no-unused-definitions": "^1.0.0",
+ "remark-lint-ordered-list-marker-style": "^1.0.0"
+ }
+ },
+ "remark-retext": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/remark-retext/-/remark-retext-3.1.3.tgz",
+ "integrity": "sha512-UujXAm28u4lnUvtOZQFYfRIhxX+auKI9PuA2QpQVTT7gYk1OgX6o0OUrSo1KOa6GNrFX+OODOtS5PWIHPxM7qw==",
+ "dev": true,
+ "requires": {
+ "mdast-util-to-nlcst": "^3.2.0"
+ }
+ },
+ "remark-stringify": {
+ "version": "7.0.4",
+ "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-7.0.4.tgz",
+ "integrity": "sha512-qck+8NeA1D0utk1ttKcWAoHRrJxERYQzkHDyn+pF5Z4whX1ug98uCNPPSeFgLSaNERRxnD6oxIug6DzZQth6Pg==",
+ "dev": true,
+ "requires": {
+ "ccount": "^1.0.0",
+ "is-alphanumeric": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-whitespace-character": "^1.0.0",
+ "longest-streak": "^2.0.1",
+ "markdown-escapes": "^1.0.0",
+ "markdown-table": "^1.1.0",
+ "mdast-util-compact": "^1.0.0",
+ "parse-entities": "^1.0.2",
+ "repeat-string": "^1.5.4",
+ "state-toggle": "^1.0.0",
+ "stringify-entities": "^2.0.0",
+ "unherit": "^1.0.4",
+ "xtend": "^4.0.1"
+ }
+ },
+ "remark-validate-links": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/remark-validate-links/-/remark-validate-links-9.2.0.tgz",
+ "integrity": "sha512-hRFU6+Xu+OSOowGoLhc73Et7HgEoXg+SGjQuUUANv3Hf9seuFHQCzO76w150tOYpM8APbdoTioxLFwAY2XEAZg==",
+ "dev": true,
+ "requires": {
+ "github-slugger": "^1.2.0",
+ "hosted-git-info": "^2.5.0",
+ "mdast-util-to-string": "^1.0.4",
+ "propose": "0.0.5",
+ "to-vfile": "^6.0.0",
+ "trough": "^1.0.0",
+ "unist-util-visit": "^1.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "repeat-element": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+ "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==",
+ "dev": true
+ },
+ "repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
+ "dev": true
+ },
+ "replace-ext": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.0.tgz",
+ "integrity": "sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs=",
+ "dev": true
+ },
+ "require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=",
+ "dev": true
+ },
+ "requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=",
+ "dev": true
+ },
+ "resolve": {
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz",
+ "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==",
+ "dev": true,
+ "requires": {
+ "path-parse": "^1.0.6"
+ }
+ },
+ "resolve-dir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz",
+ "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.0",
+ "global-modules": "^1.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
+ "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
+ "dev": true
+ },
+ "resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+ "dev": true
+ },
+ "responselike": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+ "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=",
+ "dev": true,
+ "requires": {
+ "lowercase-keys": "^1.0.0"
+ }
+ },
+ "ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+ "dev": true
+ },
+ "retext-contractions": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/retext-contractions/-/retext-contractions-3.0.0.tgz",
+ "integrity": "sha512-Wn95agseXHTsoXvhavuTcnwUEb9TbL7QtnYkHeFMh8L53jdQbbooLX68cbpKyaaPbxYiomrgJpu7eB3arcr2rw==",
+ "dev": true,
+ "requires": {
+ "nlcst-is-literal": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "retext-diacritics": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/retext-diacritics/-/retext-diacritics-2.0.0.tgz",
+ "integrity": "sha512-wr3mqJ9whlIG/q6vP601qR+/C7AMgc7O60kebs9kvN/HtTX7CpvFsKF68+yw86drMOMVCBJP8JB1Qlnj8CkY5A==",
+ "dev": true,
+ "requires": {
+ "match-casing": "^1.0.0",
+ "nlcst-search": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "quotation": "^1.0.1",
+ "unist-util-position": "^3.0.0"
+ }
+ },
+ "retext-english": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/retext-english/-/retext-english-3.0.4.tgz",
+ "integrity": "sha512-yr1PgaBDde+25aJXrnt3p1jvT8FVLVat2Bx8XeAWX13KXo8OT+3nWGU3HWxM4YFJvmfqvJYJZG2d7xxaO774gw==",
+ "dev": true,
+ "requires": {
+ "parse-english": "^4.0.0",
+ "unherit": "^1.0.4"
+ }
+ },
+ "retext-equality": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/retext-equality/-/retext-equality-4.5.0.tgz",
+ "integrity": "sha512-k+fxJpR/mXpm8PvpMo3DPlGVtdTcxYLFhsV+CGd9XmC+22X26y2Xi6fC/rWYwWkxUJSMZiB9HveTN0Cy8VNYuw==",
+ "dev": true,
+ "requires": {
+ "lodash.difference": "^4.5.0",
+ "lodash.intersection": "^4.4.0",
+ "nlcst-normalize": "^2.0.0",
+ "nlcst-search": "^1.1.1",
+ "nlcst-to-string": "^2.0.0",
+ "object-keys": "^1.0.7",
+ "quotation": "^1.0.1",
+ "unist-util-is": "^4.0.0",
+ "unist-util-visit": "^2.0.0"
+ },
+ "dependencies": {
+ "unist-util-is": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.0.2.tgz",
+ "integrity": "sha512-Ofx8uf6haexJwI1gxWMGg6I/dLnF2yE+KibhD3/diOqY2TinLcqHXCV6OI5gFVn3xQqDH+u0M625pfKwIwgBKQ==",
+ "dev": true
+ },
+ "unist-util-visit": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz",
+ "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0",
+ "unist-util-visit-parents": "^3.0.0"
+ }
+ },
+ "unist-util-visit-parents": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.0.tgz",
+ "integrity": "sha512-0g4wbluTF93npyPrp/ymd3tCDTMnP0yo2akFD2FIBAYXq/Sga3lwaU1D8OYKbtpioaI6CkDcQ6fsMnmtzt7htw==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0"
+ }
+ }
+ }
+ },
+ "retext-indefinite-article": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/retext-indefinite-article/-/retext-indefinite-article-1.1.7.tgz",
+ "integrity": "sha512-pqvEfEHL8uoeonbEjk8+d/hmyA3ozIeNTl4t3uurMcBpoIqN3+nbuMCFQrfDy2wjaKZ40KsLmEi+Zjv7m1ejLQ==",
+ "dev": true,
+ "requires": {
+ "format": "^0.2.2",
+ "nlcst-to-string": "^2.0.0",
+ "number-to-words": "^1.2.3",
+ "unist-util-is": "^3.0.0",
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "retext-overuse": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/retext-overuse/-/retext-overuse-1.1.1.tgz",
+ "integrity": "sha1-Q+YoGzg8Byl56rqPKsCARmqPa/o=",
+ "dev": true,
+ "requires": {
+ "array-differ": "^1.0.0",
+ "nlcst-search": "^1.1.1",
+ "nlcst-to-string": "^1.0.0",
+ "object-keys": "^1.0.9",
+ "quotation": "^1.0.1",
+ "stopwords": "0.0.5",
+ "thesaurus": "0.0.0"
+ },
+ "dependencies": {
+ "nlcst-to-string": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-1.1.0.tgz",
+ "integrity": "sha1-ErLllHXsKffWGhRqebNZba/LNE0=",
+ "dev": true
+ }
+ }
+ },
+ "retext-passive": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/retext-passive/-/retext-passive-2.0.0.tgz",
+ "integrity": "sha512-IIOEH6p3Eu/sTYWEgDfdOBnEsDqxa6ZnHJjPNo6u4gfGct5rH4oaEaApTGpEIkYigUpylIHK0U6iWieqbBja0Q==",
+ "dev": true,
+ "requires": {
+ "lodash.difference": "^4.5.0",
+ "nlcst-search": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-find-before": "^2.0.0",
+ "unist-util-position": "^3.0.0"
+ }
+ },
+ "retext-profanities": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/retext-profanities/-/retext-profanities-5.0.0.tgz",
+ "integrity": "sha512-zaPCKtrMDLs0U/2yN3V3rpYn1VZ3hW+AsEZdovWIRlEYZPCaUmkChOSob45LOQYJUnM+YZIahXuU9zHPm+aTKQ==",
+ "dev": true,
+ "requires": {
+ "cuss": "^1.15.0",
+ "lodash.difference": "^4.5.0",
+ "lodash.intersection": "^4.4.0",
+ "nlcst-search": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "object-keys": "^1.0.9",
+ "pluralize": "^8.0.0",
+ "quotation": "^1.0.0"
+ }
+ },
+ "retext-quotes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/retext-quotes/-/retext-quotes-3.0.0.tgz",
+ "integrity": "sha512-2htggeOFsrorhXrQvb4BI1O/r7CGVQZx/TysAT7wlVMsRUTZooiIGkBcLrcySa4sbqjsVgg1RjMy8K5O+gCQlg==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-is": "^3.0.0",
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "retext-readability": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/retext-readability/-/retext-readability-5.0.0.tgz",
+ "integrity": "sha512-g3AzwyfPKUH0KLP57+kpeRJFI3uxu7x+UP4NH5srtmAhyt7b45JJfzMNwt73cYraq2AG+aMlQ541cjeefjsQlg==",
+ "dev": true,
+ "requires": {
+ "automated-readability": "^1.0.0",
+ "coleman-liau": "^1.0.0",
+ "dale-chall": "^1.0.0",
+ "dale-chall-formula": "^1.0.0",
+ "flesch": "^1.0.0",
+ "gunning-fog": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "smog-formula": "^1.0.0",
+ "spache": "^1.1.0",
+ "spache-formula": "^1.0.0",
+ "syllable": "^3.2.0",
+ "unist-util-visit": "^1.0.0"
+ }
+ },
+ "retext-redundant-acronyms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/retext-redundant-acronyms/-/retext-redundant-acronyms-2.0.0.tgz",
+ "integrity": "sha512-9E62yXkDKYItNZO+lT4Pp0rAP8e/H7ppjYP681L0fk74xyItoE6okl0BMNVOa0W4XidLoGX2uWdlwWpCUeqQiw==",
+ "dev": true,
+ "requires": {
+ "nlcst-normalize": "^2.0.0",
+ "nlcst-search": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "pluralize": "^8.0.0",
+ "quotation": "^1.0.0",
+ "unist-util-find-after": "^2.0.0",
+ "unist-util-position": "^3.0.0"
+ }
+ },
+ "retext-repeated-words": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/retext-repeated-words/-/retext-repeated-words-2.0.0.tgz",
+ "integrity": "sha512-CGqM88ViAKtSgdWGObTU974AdmfxyiyB19MvuQTBBW3crWYIS7p21m0sJH/pCnp11gVa0YOU+vqdNaSvRiLTFA==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-is": "^3.0.0",
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "retext-sentence-spacing": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/retext-sentence-spacing/-/retext-sentence-spacing-3.0.0.tgz",
+ "integrity": "sha512-UWltTXZNh6kBwJJc0js3nOmbqze3LqhJg68jaRErNIPZQHtZ5hMn7h7f8kVY5OrbEL9XiY88m5DRPbFz16eTkQ==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-is": "^3.0.0",
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "retext-spell": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/retext-spell/-/retext-spell-3.0.0.tgz",
+ "integrity": "sha512-PvJaRhEuHOsPXX/5h3EfqEpoWZWvDBnLoopn8TGfSfPIonjwpJNvYx8qBCRp1CQlUrPPO9Vi/Hl5CvNMp7GiSA==",
+ "dev": true,
+ "requires": {
+ "lodash.includes": "^4.2.0",
+ "nlcst-is-literal": "^1.0.0",
+ "nlcst-to-string": "^2.0.0",
+ "nspell": "^2.0.0",
+ "quotation": "^1.1.0",
+ "unist-util-visit": "^1.0.0"
+ }
+ },
+ "retext-syntax-urls": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/retext-syntax-urls/-/retext-syntax-urls-1.0.2.tgz",
+ "integrity": "sha512-Ud7i50IEP33OK9g5xCBIKh/DjoEdZi6sRJqnWDW3Wxt4B48llgfSbGLgiLeXZvJD923mOf7EAoLt/WjundXmwg==",
+ "dev": true,
+ "requires": {
+ "nlcst-to-string": "^2.0.0",
+ "unist-util-is": "^3.0.0",
+ "unist-util-modify-children": "^1.1.1",
+ "unist-util-position": "^3.0.0"
+ }
+ },
+ "retext-usage": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/retext-usage/-/retext-usage-0.5.0.tgz",
+ "integrity": "sha1-qX3Jjn6Uf1z02Q5N1kJ1TWS6aJg=",
+ "dev": true,
+ "requires": {
+ "array-differ": "^1.0.0",
+ "nlcst-search": "^1.0.0",
+ "nlcst-to-string": "^1.1.0",
+ "object-keys": "^1.0.9",
+ "quotation": "^1.0.0"
+ },
+ "dependencies": {
+ "nlcst-to-string": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-1.1.0.tgz",
+ "integrity": "sha1-ErLllHXsKffWGhRqebNZba/LNE0=",
+ "dev": true
+ }
+ }
+ },
+ "rfdc": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.2.0.tgz",
+ "integrity": "sha512-ijLyszTMmUrXvjSooucVQwimGUk84eRcmCuLV8Xghe3UO85mjUtRAHRyoMM6XtyqbECaXuBWx18La3523sXINA==",
+ "dev": true
+ },
+ "rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "dev": true
+ },
+ "safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "dev": true,
+ "requires": {
+ "ret": "~0.1.10"
+ }
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==",
+ "dev": true
+ },
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "dev": true
+ },
+ "set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "setprototypeof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
+ "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==",
+ "dev": true
+ },
+ "shellsubstitute": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/shellsubstitute/-/shellsubstitute-1.2.0.tgz",
+ "integrity": "sha1-5PcCpQxRiw9v6YRRiQ1wWvKba3A=",
+ "dev": true
+ },
+ "sigmund": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/sigmund/-/sigmund-1.0.1.tgz",
+ "integrity": "sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA=",
+ "dev": true
+ },
+ "sliced": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz",
+ "integrity": "sha1-CzpmK10Ewxd7GSa+qCsD+Dei70E=",
+ "dev": true
+ },
+ "smog-formula": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/smog-formula/-/smog-formula-1.0.5.tgz",
+ "integrity": "sha512-ogE7LgeO/UeaEP1f1FPrQHwBWURWzb+VIQfw/1K3xR+uzI7o5uS9B5K6rw1+Eq+GtseAg3KGz2m49YylwnfCoQ==",
+ "dev": true
+ },
+ "snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "dev": true,
+ "requires": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.2.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "socket.io": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-3.1.0.tgz",
+ "integrity": "sha512-Aqg2dlRh6xSJvRYK31ksG65q4kmBOqU4g+1ukhPcoT6wNGYoIwSYPlCPuRwOO9pgLUajojGFztl6+V2opmKcww==",
+ "dev": true,
+ "requires": {
+ "@types/cookie": "^0.4.0",
+ "@types/cors": "^2.8.8",
+ "@types/node": "^14.14.10",
+ "accepts": "~1.3.4",
+ "base64id": "~2.0.0",
+ "debug": "~4.3.1",
+ "engine.io": "~4.1.0",
+ "socket.io-adapter": "~2.1.0",
+ "socket.io-parser": "~4.0.3"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "socket.io-adapter": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.1.0.tgz",
+ "integrity": "sha512-+vDov/aTsLjViYTwS9fPy5pEtTkrbEKsw2M+oVSoFGw6OD1IpvlV1VPhUzNbofCQ8oyMbdYJqDtGdmHQK6TdPg==",
+ "dev": true
+ },
+ "socket.io-parser": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.4.tgz",
+ "integrity": "sha512-t+b0SS+IxG7Rxzda2EVvyBZbvFPBCjJoyHuE0P//7OAsN23GItzDRdWa6ALxZI/8R5ygK7jAR6t028/z+7295g==",
+ "dev": true,
+ "requires": {
+ "@types/component-emitter": "^1.2.10",
+ "component-emitter": "~1.3.0",
+ "debug": "~4.3.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ },
+ "source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "dev": true,
+ "requires": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "source-map-url": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
+ "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=",
+ "dev": true
+ },
+ "spache": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/spache/-/spache-1.1.5.tgz",
+ "integrity": "sha512-fblcef79rv7R/pRogLcnt8pYEWc5oLJ8RS7hArT6kCHp5gaVPG8EVzy2FjLbpCclfu77E6GpuNdnP6muTe4YxQ==",
+ "dev": true
+ },
+ "spache-formula": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/spache-formula/-/spache-formula-1.0.5.tgz",
+ "integrity": "sha512-eYX6hYxeFRZbkcpunMrxhKcGAydeT0hZ958Q20J/UA119EybMrU+KuiSSKNbCwR4D55Yk8J6APg16yuWGtjcDw==",
+ "dev": true
+ },
+ "split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.0"
+ }
+ },
+ "sprintf-js": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
+ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==",
+ "dev": true
+ },
+ "state-toggle": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz",
+ "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==",
+ "dev": true
+ },
+ "static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "dev": true,
+ "requires": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ }
+ }
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+ "dev": true
+ },
+ "stopwords": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/stopwords/-/stopwords-0.0.5.tgz",
+ "integrity": "sha1-Wa2dzrluoPonRZ4c7+DuDm3f1RQ=",
+ "dev": true
+ },
+ "streamroller": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz",
+ "integrity": "sha512-OG79qm3AujAM9ImoqgWEY1xG4HX+Lw+yY6qZj9R1K2mhF5bEmQ849wvrb+4vt4jLMLzwXttJlQbOdPOQVRv7DQ==",
+ "dev": true,
+ "requires": {
+ "date-format": "^2.1.0",
+ "debug": "^4.1.1",
+ "fs-extra": "^8.1.0"
+ },
+ "dependencies": {
+ "date-format": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/date-format/-/date-format-2.1.0.tgz",
+ "integrity": "sha512-bYQuGLeFxhkxNOF3rcMtiZxvCBAquGzZm6oWA1oZ0g2THUzivaRhv8uOhdr19LmoobSOLoIAxeUK2RdbM8IFTA==",
+ "dev": true
+ },
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "string-width": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz",
+ "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==",
+ "dev": true,
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ }
+ },
+ "string_decoder": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
+ "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.2.0"
+ }
+ },
+ "stringify-entities": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-2.0.0.tgz",
+ "integrity": "sha512-fqqhZzXyAM6pGD9lky/GOPq6V4X0SeTAFBl0iXb/BzOegl40gpf/bV3QQP7zULNYvjr6+Dx8SCaDULjVoOru0A==",
+ "dev": true,
+ "requires": {
+ "character-entities-html4": "^1.0.0",
+ "character-entities-legacy": "^1.0.0",
+ "is-alphanumerical": "^1.0.0",
+ "is-decimal": "^1.0.2",
+ "is-hexadecimal": "^1.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
+ "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^5.0.0"
+ }
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "syllable": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/syllable/-/syllable-3.6.1.tgz",
+ "integrity": "sha512-L2yAT4DRi9rwHpHMResxA/VKSM9T7X+eRAOEKoAFvHJxaxjtxT2Rt8qfB6pbrxeOZWsHu+ON0kTc9Oo0lU2PHA==",
+ "dev": true,
+ "requires": {
+ "normalize-strings": "^1.1.0",
+ "pluralize": "^7.0.0",
+ "trim": "0.0.1"
+ },
+ "dependencies": {
+ "pluralize": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-7.0.0.tgz",
+ "integrity": "sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow==",
+ "dev": true
+ }
+ }
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
+ "dev": true
+ },
+ "thesaurus": {
+ "version": "0.0.0",
+ "resolved": "https://registry.npmjs.org/thesaurus/-/thesaurus-0.0.0.tgz",
+ "integrity": "sha1-CjszVvwdgYxsFsI/Vrp/PFolyQU=",
+ "dev": true
+ },
+ "tmp": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz",
+ "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==",
+ "dev": true,
+ "requires": {
+ "rimraf": "^3.0.0"
+ }
+ },
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "dev": true
+ },
+ "to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "to-readable-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz",
+ "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==",
+ "dev": true
+ },
+ "to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ }
+ },
+ "to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ }
+ },
+ "to-vfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-6.1.0.tgz",
+ "integrity": "sha512-BxX8EkCxOAZe+D/ToHdDsJcVI4HqQfmw0tCkp31zf3dNP/XWIAjU4CmeuSwsSoOzOTqHPOL0KUzyZqJplkD0Qw==",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^2.0.0",
+ "vfile": "^4.0.0"
+ },
+ "dependencies": {
+ "is-buffer": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz",
+ "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==",
+ "dev": true
+ }
+ }
+ },
+ "toidentifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+ "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==",
+ "dev": true
+ },
+ "too-wordy": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/too-wordy/-/too-wordy-0.2.2.tgz",
+ "integrity": "sha512-ePZfjs1ajL4b8jT4MeVId+9Ci5hJCzAtNIEXIHyFYmKmQuX+eHC/RNv6tuLMUhrGrhJ+sYWW/lBF/LKILHGZEA==",
+ "dev": true
+ },
+ "trim": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz",
+ "integrity": "sha1-WFhUf2spB1fulczMZm+1AITEYN0=",
+ "dev": true
+ },
+ "trim-trailing-lines": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.3.tgz",
+ "integrity": "sha512-4ku0mmjXifQcTVfYDfR5lpgV7zVqPg6zV9rdZmwOPqq0+Zq19xDqEgagqVbc4pOOShbncuAOIs59R3+3gcF3ZA==",
+ "dev": true
+ },
+ "trough": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
+ "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==",
+ "dev": true
+ },
+ "type-fest": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz",
+ "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==",
+ "dev": true
+ },
+ "type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dev": true,
+ "requires": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ }
+ },
+ "typedarray": {
+ "version": "0.0.6",
+ "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
+ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=",
+ "dev": true
+ },
+ "ua-parser-js": {
+ "version": "0.7.23",
+ "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.23.tgz",
+ "integrity": "sha512-m4hvMLxgGHXG3O3fQVAyyAQpZzDOvwnhOTjYz5Xmr7r/+LpkNy3vJXdVRWgd1TkAb7NGROZuSy96CrlNVjA7KA==",
+ "dev": true
+ },
+ "unc-path-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz",
+ "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=",
+ "dev": true
+ },
+ "underscore": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.10.2.tgz",
+ "integrity": "sha512-N4P+Q/BuyuEKFJ43B9gYuOj4TQUHXX+j2FqguVOpjkssLUUrnJofCcBccJSCoeturDoZU6GorDTHSvUDlSQbTg==",
+ "dev": true
+ },
+ "underscore.string": {
+ "version": "3.3.5",
+ "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.3.5.tgz",
+ "integrity": "sha512-g+dpmgn+XBneLmXXo+sGlW5xQEt4ErkS3mgeN2GFbremYeMBSJKr9Wf2KJplQVaiPY/f7FN6atosWYNm9ovrYg==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "^1.0.3",
+ "util-deprecate": "^1.0.2"
+ }
+ },
+ "unherit": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz",
+ "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "unified": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-8.4.2.tgz",
+ "integrity": "sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA==",
+ "dev": true,
+ "requires": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ }
+ },
+ "unified-args": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/unified-args/-/unified-args-7.1.0.tgz",
+ "integrity": "sha512-soi9Rn7l5c1g0RfElSCHMwaxeiclSI0EsS3uZmMPUOfwMeeeZjLpNmHAowV9iSlQh59iiZhSMyQu9lB8WnIz5g==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^5.0.0",
+ "chalk": "^2.0.0",
+ "chokidar": "^3.0.0",
+ "fault": "^1.0.2",
+ "json5": "^2.0.0",
+ "minimist": "^1.2.0",
+ "text-table": "^0.2.0",
+ "unified-engine": "^7.0.0"
+ }
+ },
+ "unified-engine": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/unified-engine/-/unified-engine-7.0.0.tgz",
+ "integrity": "sha512-zH/MvcISpWg3JZtCoY/GYBw1WnVHkhnPoMBWpmuvAifCPSS9mzT9EbtimesJp6t2nnr/ojI0mg3TmkO1CjIwVA==",
+ "dev": true,
+ "requires": {
+ "concat-stream": "^2.0.0",
+ "debug": "^4.0.0",
+ "fault": "^1.0.0",
+ "figures": "^3.0.0",
+ "fn-name": "^2.0.1",
+ "glob": "^7.0.3",
+ "ignore": "^5.0.0",
+ "is-empty": "^1.0.0",
+ "is-hidden": "^1.0.1",
+ "is-object": "^1.0.1",
+ "js-yaml": "^3.6.1",
+ "load-plugin": "^2.0.0",
+ "parse-json": "^4.0.0",
+ "to-vfile": "^6.0.0",
+ "trough": "^1.0.0",
+ "unist-util-inspect": "^4.1.2",
+ "vfile-reporter": "^6.0.0",
+ "vfile-statistics": "^1.1.0",
+ "x-is-string": "^0.1.0",
+ "xtend": "^4.0.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "unified-lint-rule": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/unified-lint-rule/-/unified-lint-rule-1.0.6.tgz",
+ "integrity": "sha512-YPK15YBFwnsVorDFG/u0cVVQN5G2a3V8zv5/N6KN3TCG+ajKtaALcy7u14DCSrJI+gZeyYquFL9cioJXOGXSvg==",
+ "dev": true,
+ "requires": {
+ "wrapped": "^1.0.1"
+ }
+ },
+ "unified-message-control": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unified-message-control/-/unified-message-control-1.0.4.tgz",
+ "integrity": "sha512-e1dEtN4Z/TvLn/qHm+xeZpzqhJTtfZusFErk336kkZVpqrJYiV9ptxq+SbRPFMlN0OkjDYHmVJ929KYjsMTo3g==",
+ "dev": true,
+ "requires": {
+ "trim": "0.0.1",
+ "unist-util-visit": "^1.0.0",
+ "vfile-location": "^2.0.0"
+ }
+ },
+ "union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ }
+ },
+ "unist-util-find-after": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-2.0.4.tgz",
+ "integrity": "sha512-zo0ShIr+E/aU9xSK7JC9Kb+WP9seTFCuqVYdo5+HJSjN009XMfhiA1FIExEKzdDP1UsgvKGleGlB/pSdTSqZww==",
+ "dev": true,
+ "requires": {
+ "unist-util-is": "^3.0.0"
+ }
+ },
+ "unist-util-find-before": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/unist-util-find-before/-/unist-util-find-before-2.0.5.tgz",
+ "integrity": "sha512-6WeMwHCC+CLB6D78xdqLq9DVgRope3rZVAUp4bUTevLKq63adgZTHqQ9Xagd5d69vUqTKZA1kA+DQbdaVgd65g==",
+ "dev": true,
+ "requires": {
+ "unist-util-is": "^4.0.0"
+ },
+ "dependencies": {
+ "unist-util-is": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.0.2.tgz",
+ "integrity": "sha512-Ofx8uf6haexJwI1gxWMGg6I/dLnF2yE+KibhD3/diOqY2TinLcqHXCV6OI5gFVn3xQqDH+u0M625pfKwIwgBKQ==",
+ "dev": true
+ }
+ }
+ },
+ "unist-util-generated": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.5.tgz",
+ "integrity": "sha512-1TC+NxQa4N9pNdayCYA1EGUOCAO0Le3fVp7Jzns6lnua/mYgwHo0tz5WUAfrdpNch1RZLHc61VZ1SDgrtNXLSw==",
+ "dev": true
+ },
+ "unist-util-inspect": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-4.1.4.tgz",
+ "integrity": "sha512-7xxyvKiZ1SC9vL5qrMqKub1T31gRHfau4242F69CcaOrXt//5PmRVOmDZ36UAEgiT+tZWzmQmbNZn+mVtnR9HQ==",
+ "dev": true,
+ "requires": {
+ "is-empty": "^1.0.0"
+ }
+ },
+ "unist-util-is": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-3.0.0.tgz",
+ "integrity": "sha512-sVZZX3+kspVNmLWBPAB6r+7D9ZgAFPNWm66f7YNb420RlQSbn+n8rG8dGZSkrER7ZIXGQYNm5pqC3v3HopH24A==",
+ "dev": true
+ },
+ "unist-util-modify-children": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/unist-util-modify-children/-/unist-util-modify-children-1.1.6.tgz",
+ "integrity": "sha512-TOA6W9QLil+BrHqIZNR4o6IA5QwGOveMbnQxnWYq+7EFORx9vz/CHrtzF36zWrW61E2UKw7sM1KPtIgeceVwXw==",
+ "dev": true,
+ "requires": {
+ "array-iterate": "^1.0.0"
+ }
+ },
+ "unist-util-position": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz",
+ "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==",
+ "dev": true
+ },
+ "unist-util-remove-position": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-1.1.4.tgz",
+ "integrity": "sha512-tLqd653ArxJIPnKII6LMZwH+mb5q+n/GtXQZo6S6csPRs5zB0u79Yw8ouR3wTw8wxvdJFhpP6Y7jorWdCgLO0A==",
+ "dev": true,
+ "requires": {
+ "unist-util-visit": "^1.1.0"
+ }
+ },
+ "unist-util-stringify-position": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz",
+ "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.2"
+ }
+ },
+ "unist-util-visit": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-1.4.1.tgz",
+ "integrity": "sha512-AvGNk7Bb//EmJZyhtRUnNMEpId/AZ5Ph/KUpTI09WHQuDZHKovQ1oEv3mfmKpWKtoMzyMC4GLBm1Zy5k12fjIw==",
+ "dev": true,
+ "requires": {
+ "unist-util-visit-parents": "^2.0.0"
+ }
+ },
+ "unist-util-visit-children": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-children/-/unist-util-visit-children-1.1.4.tgz",
+ "integrity": "sha512-sA/nXwYRCQVRwZU2/tQWUqJ9JSFM1X3x7JIOsIgSzrFHcfVt6NkzDtKzyxg2cZWkCwGF9CO8x4QNZRJRMK8FeQ==",
+ "dev": true
+ },
+ "unist-util-visit-parents": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-2.1.2.tgz",
+ "integrity": "sha512-DyN5vD4NE3aSeB+PXYNKxzGsfocxp6asDc2XXE3b0ekO2BaRUpBicbbUygfSvYfUz1IkmjFR1YF7dPklraMZ2g==",
+ "dev": true,
+ "requires": {
+ "unist-util-is": "^3.0.0"
+ }
+ },
+ "universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+ "dev": true
+ },
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=",
+ "dev": true
+ },
+ "unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "dev": true,
+ "requires": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "dependencies": {
+ "has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "dependencies": {
+ "isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "dev": true,
+ "requires": {
+ "isarray": "1.0.0"
+ }
+ }
+ }
+ },
+ "has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+ "dev": true
+ }
+ }
+ },
+ "untildify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/untildify/-/untildify-2.1.0.tgz",
+ "integrity": "sha1-F+soB5h/dpUunASF/DEdBqgmouA=",
+ "dev": true,
+ "requires": {
+ "os-homedir": "^1.0.0"
+ }
+ },
+ "urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+ "dev": true
+ },
+ "url-parse-lax": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+ "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=",
+ "dev": true,
+ "requires": {
+ "prepend-http": "^2.0.0"
+ }
+ },
+ "use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+ "dev": true
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+ "dev": true
+ },
+ "utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=",
+ "dev": true
+ },
+ "v8flags": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-3.1.3.tgz",
+ "integrity": "sha512-amh9CCg3ZxkzQ48Mhcb8iX7xpAfYJgePHxWMQCBWECpOSqJUXgY26ncA61UTV0BkPqfhcy6mzwCIoP4ygxpW8w==",
+ "dev": true,
+ "requires": {
+ "homedir-polyfill": "^1.0.1"
+ }
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+ "dev": true
+ },
+ "vfile": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.0.tgz",
+ "integrity": "sha512-a/alcwCvtuc8OX92rqqo7PflxiCgXRFjdyoGVuYV+qbgCb0GgZJRvIgCD4+U/Kl1yhaRsaTwksF88xbPyGsgpw==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "replace-ext": "1.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "vfile-message": "^2.0.0"
+ },
+ "dependencies": {
+ "is-buffer": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz",
+ "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==",
+ "dev": true
+ }
+ }
+ },
+ "vfile-location": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-2.0.6.tgz",
+ "integrity": "sha512-sSFdyCP3G6Ka0CEmN83A2YCMKIieHx0EDaj5IDP4g1pa5ZJ4FJDvpO0WODLxo4LUX4oe52gmSCK7Jw4SBghqxA==",
+ "dev": true
+ },
+ "vfile-message": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz",
+ "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==",
+ "dev": true,
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^2.0.0"
+ }
+ },
+ "vfile-reporter": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/vfile-reporter/-/vfile-reporter-6.0.1.tgz",
+ "integrity": "sha512-0OppK9mo8G2XUpv+hIKLVSDsoxJrXnOy73+vIm0jQUOUFYRduqpFHX+QqAQfvRHyX9B0UFiRuNJnBOjQCIsw1g==",
+ "dev": true,
+ "requires": {
+ "repeat-string": "^1.5.0",
+ "string-width": "^4.0.0",
+ "supports-color": "^6.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "vfile-sort": "^2.1.2",
+ "vfile-statistics": "^1.1.0"
+ },
+ "dependencies": {
+ "supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "vfile-sort": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/vfile-sort/-/vfile-sort-2.2.2.tgz",
+ "integrity": "sha512-tAyUqD2R1l/7Rn7ixdGkhXLD3zsg+XLAeUDUhXearjfIcpL1Hcsj5hHpCoy/gvfK/Ws61+e972fm0F7up7hfYA==",
+ "dev": true
+ },
+ "vfile-statistics": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/vfile-statistics/-/vfile-statistics-1.1.4.tgz",
+ "integrity": "sha512-lXhElVO0Rq3frgPvFBwahmed3X03vjPF8OcjKMy8+F1xU/3Q3QU3tKEDp743SFtb74PdF0UWpxPvtOP0GCLheA==",
+ "dev": true
+ },
+ "void-elements": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-2.0.1.tgz",
+ "integrity": "sha1-wGavtYK7HLQSjWDqkjkulNXp2+w=",
+ "dev": true
+ },
+ "walkdir": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/walkdir/-/walkdir-0.4.1.tgz",
+ "integrity": "sha512-3eBwRyEln6E1MSzcxcVpQIhRG8Q1jLvEqRmCZqS3dsfXEDR/AhOF4d+jHg1qvDCpYaVRZjENPQyrVxAkQqxPgQ==",
+ "dev": true
+ },
+ "weasel-words": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/weasel-words/-/weasel-words-0.1.1.tgz",
+ "integrity": "sha1-cTeUZYXHP+RIggE4U70ADF1oek4=",
+ "dev": true
+ },
+ "which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dev": true,
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ }
+ }
+ },
+ "wrapped": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/wrapped/-/wrapped-1.0.1.tgz",
+ "integrity": "sha1-x4PZ2Aeyc+mwHoUWgKk4yHyQckI=",
+ "dev": true,
+ "requires": {
+ "co": "3.1.0",
+ "sliced": "^1.0.1"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+ "dev": true
+ },
+ "write-good": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/write-good/-/write-good-1.0.2.tgz",
+ "integrity": "sha512-1gm9Ouz7mBROF7aC8vvSm/3JtPfTiZ+fegPGCKdxsYhf6VYeStHfVFx2Hnj2kJviHPx5zZkiQ8DytzZMP0Zqwg==",
+ "dev": true,
+ "requires": {
+ "adverb-where": "^0.2.1",
+ "commander": "^2.19.0",
+ "e-prime": "^0.10.2",
+ "no-cliches": "^0.2.2",
+ "passive-voice": "^0.1.0",
+ "too-wordy": "^0.2.2",
+ "weasel-words": "^0.1.1"
+ }
+ },
+ "ws": {
+ "version": "7.4.2",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.2.tgz",
+ "integrity": "sha512-T4tewALS3+qsrpGI/8dqNMLIVdq/g/85U98HPMa6F0m6xTbvhXU6RCQLqPH3+SlomNV/LdY6RXEbBpMH6EOJnA==",
+ "dev": true
+ },
+ "x-is-string": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/x-is-string/-/x-is-string-0.1.0.tgz",
+ "integrity": "sha1-R0tQhlrzpJqcRlfwWs0UVFj3fYI=",
+ "dev": true
+ },
+ "xml2js": {
+ "version": "0.4.23",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
+ "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
+ "dev": true,
+ "requires": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ }
+ },
+ "xmlbuilder": {
+ "version": "11.0.1",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
+ "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==",
+ "dev": true
+ },
+ "xmldom": {
+ "version": "0.1.31",
+ "resolved": "https://registry.npmjs.org/xmldom/-/xmldom-0.1.31.tgz",
+ "integrity": "sha512-yS2uJflVQs6n+CyjHoaBmVSqIDevTAWrzMmjG1Gc7h1qQ7uVozNhEPJAwZXWyGQ/Gafo3fCwrcaokezLPupVyQ==",
+ "dev": true
+ },
+ "xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
+ "dev": true
+ },
+ "y18n": {
+ "version": "5.0.5",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.5.tgz",
+ "integrity": "sha512-hsRUr4FFrvhhRH12wOdfs38Gy7k2FFzB9qgN9v3aLykRq0dRcdcpz5C9FxdS2NuhOrI/628b/KSTJ3rwHysYSg==",
+ "dev": true
+ },
+ "yargs": {
+ "version": "16.2.0",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz",
+ "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==",
+ "dev": true,
+ "requires": {
+ "cliui": "^7.0.2",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.0",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^20.2.2"
+ }
+ },
+ "yargs-parser": {
+ "version": "20.2.4",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz",
+ "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==",
+ "dev": true
+ }
+ }
+}
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..02b6332
--- /dev/null
+++ b/package.json
@@ -0,0 +1,57 @@
+{
+ "devDependencies": {
+ "coffeescript": "^2.4.1",
+ "dictionary-en-us": "^2.1.1",
+ "gaze": "^1.1.3",
+ "grunt": "^1.1.0",
+ "grunt-exec": "^3.0.0",
+ "jasmine": "^3.4.0",
+ "jasmine-core": "^3.4.0",
+ "jasmine-growl-reporter": "^2.0.0",
+ "jasmine-node": "BrainDoctor/jasmine-node",
+ "jasmine-reporters": "^2.3.2",
+ "karma": "^6.0.2",
+ "karma-chrome-launcher": "^3.1.0",
+ "karma-coverage": "^2.0.1",
+ "karma-firefox-launcher": "^1.2.0",
+ "karma-jasmine": "^2.0.1",
+ "minimist": "^1.2.5",
+ "mkdirp": "^0.5.1",
+ "remark-cli": "^7.0.0",
+ "remark-frontmatter": "^1.3.2",
+ "remark-lint-heading-whitespace": "^1.0.0",
+ "remark-lint-no-dead-urls": "^0.5.0",
+ "remark-lint-unordered-list-marker-style": "^1.0.3",
+ "remark-lint-write-good": "^1.1.0",
+ "remark-preset-lint-consistent": "^2.0.3",
+ "remark-preset-lint-markdown-style-guide": "^2.1.3",
+ "remark-preset-lint-recommended": "^3.0.3",
+ "remark-retext": "^3.1.3",
+ "remark-stringify": "^7.0.3",
+ "remark-validate-links": "^9.0.1",
+ "retext-contractions": "^3.0.0",
+ "retext-diacritics": "^2.0.0",
+ "retext-english": "^3.0.3",
+ "retext-equality": "^4.0.0",
+ "retext-indefinite-article": "^1.1.7",
+ "retext-overuse": "^1.1.1",
+ "retext-passive": "^2.0.0",
+ "retext-profanities": "^5.0.0",
+ "retext-quotes": "^3.0.0",
+ "retext-readability": "^5.0.0",
+ "retext-redundant-acronyms": "^2.0.0",
+ "retext-repeated-words": "^2.0.0",
+ "retext-sentence-spacing": "^3.0.0",
+ "retext-spell": "^3.0.0",
+ "retext-syntax-urls": "^1.0.2",
+ "retext-usage": "^0.5.0",
+ "underscore": "^1.9.1",
+ "walkdir": "^0.4.1",
+ "xml2js": "^0.4.22"
+ },
+ "scripts": {
+ "lint-md": "remark .",
+ "lint-md-path": "remark",
+ "fix-md": "remark . --output"
+ }
+}
diff --git a/packaging/DISTRIBUTIONS.md b/packaging/DISTRIBUTIONS.md
new file mode 100644
index 0000000..ca8ab1c
--- /dev/null
+++ b/packaging/DISTRIBUTIONS.md
@@ -0,0 +1,256 @@
+<!--
+title: "Netdata distribution support matrix"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/DISTRIBUTIONS.md
+-->
+
+# Netdata distribution support matrix
+
+![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true)
+
+In the following table we've listed Netdata's official supported operating systems. We detail the distributions, flavors, and the level of support Netdata is currently capable to provide.
+
+The following table is a work in progress. We have concluded on the list of distributions
+that we currently supporting and we are working on documenting our current state so that our users
+have complete visibility over the range of support.
+
+**Legend**:
+
+- **Version**: Operating system version supported
+- **Family**: The family that the OS belongs to
+- **CI: Smoke Testing**: Smoke testing has been implemented on our CI, to prevent broken code reaching our users
+- **CI: Testing**: Testing has been implemented to prevent broken or problematic code reaching our users
+- **CD**: Continuous deployment support has been fully enabled for this operating system
+- **.DEB**: We provide a `.DEB` package for that particular operating system
+- **.RPM**: We provide a `.RPM` package for that particular operating system
+- **Installer**: Running netdata from source, using our installer, is working for this operating system
+- **Kickstart**: Kickstart installation is working fine for this operating system
+- **Kickstart64**: Kickstart static64 installation is working fine for this operating system
+- **Community**: This operating system receives community support, such as packaging maintainers, contributors, and so on
+
+## AMD64 Architecture
+
+| Version | Family | CI: Smoke testing | CI: Testing | CD | .DEB | .RPM | Installer | Kickstart | Kickstart64 | Community
+:------------------: | :------------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------:
+| 14.04.6 LTS (Trusty Tahr) | Ubuntu | &#10007; | &#63; | &#10007; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 16.04.6 LTS (Xenial Xerus) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 18.04.2 LTS (Bionic Beaver) | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| 19.10 (Eoan Ermine) Latest | Ubuntu | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 8 (Jessie) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 9 (Stretch) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Debian 10 (Buster) | Debian | &#10004; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 6.x | RHEL | &#10007; | &#63; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 7.x | RHEL | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Versions 8.x | RHEL | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Fedora 30 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| Fedora 31 | Fedora | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 6.x | Cent OS | &#10007; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 7.x | Cent OS | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| CentOS 8.x | Cent OS | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| openSUSE Leap 15.1 | openSUSE | &#10004; | &#63; | &#10004; | N/A | &#10004; | &#10004; | &#10004; | &#10004; | &#63;
+| openSUSE Tumbleweed | openSUSE | &#10004; | &#63; | &#63; | N/A | &#10007; | &#10004; | &#63; | &#10004; | &#63;
+| SLES 11 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 12 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| SLES 15 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Alpine | Alpine | &#10004; | &#63; | &#10007; | N/A | N/A | &#10004; | &#10004; | &#10004; | &#63;
+| Arch Linux (latest) | Arch | &#10004; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10004; | &#63;
+| All other linux | Other | &#10007; | &#63; | &#63; | &#10007; | &#10007; | &#63; | &#63; | &#10004; | &#63;
+
+## x86 Architecture
+
+| Version | Family | CI: Smoke testing | CI: Testing | CD | .DEB | .RPM | Installer | Kickstart | Kickstart64 | Community
+:------------------: | :------------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------:
+| 14.04.6 LTS (Trusty Tahr) | Ubuntu | &#10007; | &#63; | &#10007; | &#10007; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| 16.04.6 LTS (Xenial Xerus) | Ubuntu | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| 18.04.2 LTS (Bionic Beaver) | Ubuntu | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| 19.10 (Enoan Ermine) Latest | Ubuntu | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10007; | &#10004; | &#63;
+| Debian 8 (Jessie) | Debian | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| Debian 9 (Stretch) | Debian | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| Debian 10 (Buster) | Debian | &#10007; | &#63; | &#10004; | &#10004; | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| Versions 6.x | RHEL | &#10007; | &#63; | &#10007; | N/A | &#10004; | &#10004; | &#10004; | &#10007; | &#63;
+| Versions 7.x | RHEL | &#10007; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| Versions 8.x | RHEL | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| Fedora 30 | Fedora | &#10007; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| Fedora 31 | Fedora | &#10007; | &#63; | &#10007; | N/A | &#10007; | &#10007; | &#10007; | &#10007; | &#63;
+| CentOS 6.x | Cent OS | &#10007; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| CentOS 7.x | Cent OS | &#10007; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| CentOS 8.x | Cent OS | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10004; | &#63;
+| openSUSE Leap 15.1 | openSUSE | &#10007; | &#63; | &#10004; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| openSUSE Tumbleweed | openSUSE | &#10007; | &#63; | &#63; | N/A | &#10007; | &#10004; | &#63; | &#10007; | &#63;
+| SLES 11 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10007; | &#63;
+| SLES 12 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10007; | &#63;
+| SLES 15 | SLES | &#63; | &#63; | &#63; | N/A | &#10007; | &#63; | &#63; | &#10007; | &#63;
+| Alpine | Alpine | &#10007; | &#63; | &#10007; | N/A | N/A | &#10004; | &#10004; | &#10007; | &#63;
+| Arch Linux (latest) | Arch | &#10007; | &#63; | &#10007; | N/A | &#10007; | &#10004; | &#10004; | &#10007; | &#63;
+| All other linux | Other | &#10007; | &#63; | &#63; | &#10007; | &#10007; | &#63; | &#63; | &#10007; | &#63;
+
+## Supported functionalities across different distribution channels
+
+On the following section we try to depict what functionalities are available, across the different distribution channels.
+There are various limitations and problems we try to attend as we evolve and grow. Through this report we want to provide some clarity as to what is available and in what way. Of course we strive to deliver our full solution through all channels, but that may not be feasible yet for some cases.
+
+**Legend**:
+
+- **Auto-detect**: Depends on the programs package dependencies. If the required dependencies are covered during compile time, capability is enabled
+- **YES**: This flag implies that the functionality is available for that distribution channel
+- **NO**: Not available at the moment for that distribution channel at this time, but may be a work-in-progress effort from the Netdata team.
+- **At Runtime**: The given module or functionality is available and only requires configuration after install to enable it
+
+### Core functionality
+
+#### Core
+
+This is the base netdata capability, that includes basic monitoring, embedded web server, and so on.
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|YES|YES|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: None
+- **What packages required for auto-detect?**: `install-required-packages.sh netdata`
+
+#### DB Engine
+
+This is the brand new database engine capability of netdata. It is a mandatory facility required by netdata. Given it's special needs and dependencies though, it remains an optional facility so that users can enjoy netdata even when they cannot cover the dependencies or the H/W requirements.
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: `--disable-dbengine`
+- **What packages required for auto-detect?**: `openssl`, `libuv1`, `lz4`, `Judy`
+
+#### Encryption Support (HTTPS)
+
+This is Netdata's TLS capability that incorporates encryption on the web server and the APIs between parent and child
+nodes. Also a mandatory facility for Netdata, but remains optional for users who are limited or not interested in tight
+security
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|YES|YES|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-https
+- **What packages required for auto-detect?**: `openssl`
+
+### Libraries/optimizations
+
+#### JSON-C Support
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-jsonc
+- **What packages required for auto-detect?**: `json-c`
+
+#### Link time optimizations
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|Auto-detect|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-lto
+- **What packages required for auto-detect?**: No package dependency, depends on GCC version
+
+### External plugins, built with netdata build tools
+
+#### FREEIPMI
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|YES|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-freeipmi
+- **What packages required for auto-detect?**: `freeipmi-dev (or -devel)`
+
+#### NFACCT
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-nfacct
+- **What packages required for auto-detect?**: `libmnl-dev`, `libnetfilter_acct-dev`
+
+#### Xenstat
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|NO|NO|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-xenstat
+- **What packages required for auto-detect?**: `xen-dom0-libs-devel or xen-devel`, `yajl-dev or yajl-devel`
+ Note: for cent-OS based systems you will need `centos-release-xen` repository to get xen-devel
+
+#### CUPS
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-plugin-cups
+- **What packages required for auto-detect?**: `cups-devel >= 1.7`
+
+#### FPING
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: ${INSTALL_PATH}/netdata/plugins.d/fping.plugin install
+- **Flags to disable from source**: None -- just dont install
+- **What packages required for auto-detect?**: None - only fping installed to start it up
+
+#### IOPING
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: ${INSTALL_PATH}/netdata/plugins.d/ioping.plugin install
+- **Flags to disable from source**: None -- just dont install
+- **What packages required for auto-detect?**: None - only ioping installed to start it up
+
+#### PERF
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|At Runtime|
+
+- **Flags/instructions to enable**: Inside netdata.conf, section `[Plugins]`, set `"perf = yes"`
+- **Flags to disable from source**: --disable-perf
+- **What packages required for auto-detect?**: None
+
+### Backends
+
+#### Prometheus remote write
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|YES|YES|
+
+- **Flags/instructions to enable**: None
+- **Flags to disable from source**: --disable-backend-prometheus-remote-write
+- **What packages required for auto-detect?**: `snappy-devel`, `protobuf`, `protobuf-compiler`
+
+#### AWS Kinesis
+
+|make/make install|netdata-installer.sh|kickstart.sh|kickstart-static64.sh|Docker image|RPM packaging|DEB packaging|
+|:---------------:|:------------------:|:----------:|:-------------------:|:----------:|:-----------:|:-----------:|
+|Auto-detect|Auto-detect|Auto-detect|Auto-detect|NO|NO|NO|
+
+- **Flags/instructions to enable**: [Instructions for AWS
+ Kinesis](https://learn.netdata.cloud/docs/agent/backends/aws_kinesis)
+- **Flags to disable from source**: --disable-backend-kinesis
+- **What packages required for auto-detect?**: `AWS SDK for C++`, `libcurl`, `libssl`, `libcrypto`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fpackaging%2FDISTRIBUTIONS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/Dockerfile.packager b/packaging/Dockerfile.packager
new file mode 100644
index 0000000..14f711c
--- /dev/null
+++ b/packaging/Dockerfile.packager
@@ -0,0 +1,42 @@
+ARG ARCH=amd64
+ARG DISTRO=debian
+ARG DISTRO_VERSION=10
+ARG VERSION=0.1
+
+FROM netdata/package-builders:${DISTRO}${DISTRO_VERSION} AS build
+
+ARG ARCH
+ARG DISTRO
+ARG DISTRO_VERSION
+ARG VERSION
+
+ENV ARCH=$ARCH
+ENV DISTRO=$DISTRO
+ENV DISTRO_VERSION=$DISTRO_VERSION
+ENV VERSION=$VERSION
+
+WORKDIR /netdata
+COPY . .
+
+RUN /build.sh
+
+FROM ${DISTRO}:${DISTRO_VERSION} AS runtime
+
+ARG ARCH
+ARG DISTRO
+ARG DISTRO_VERSION
+ARG VERSION
+
+ENV ARCH=$ARCH
+ENV DISTRO=$DISTRO
+ENV DISTRO_VERSION=$DISTRO_VERSION
+ENV VERSION=$VERSION
+
+COPY ./packaging/scripts/install.sh /install.sh
+COPY ./packaging/scripts/test.sh /test.sh
+
+COPY --from=build /netdata/artifacts /artifacts
+
+RUN /install.sh
+
+CMD ["/test.sh"]
diff --git a/packaging/build_package_install_test.sh b/packaging/build_package_install_test.sh
new file mode 100755
index 0000000..e3b3362
--- /dev/null
+++ b/packaging/build_package_install_test.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+# If we are not in netdata git repo, at the top level directory, FAIL
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+CWD=$(git rev-parse --show-cdup || echo "")
+if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" = "netdata" ]; then
+ echo "Run as ./packaging/$(basename "$0") from top level directory of netdata git repository"
+ exit 1
+fi
+
+if [ $# -lt 2 ] || [ $# -gt 3 ]; then
+ echo "Usage: ./packaging/$(basename "$0") <distro> <distro_version> [<netdata_version>]"
+ exit 1
+fi
+
+if ! command -v docker > /dev/null; then
+ echo "Docker CLI not found. You need Docker to run this!"
+ exit 2
+fi
+
+DISTRO="$1"
+DISTRO_VERSION="$2"
+# TODO: Auto compute this?
+VERSION="${3:-1.19.0}"
+
+TAG="netdata/netdata:${DISTRO}_${DISTRO_VERSION}"
+
+docker build \
+ -f ./packaging/Dockerfile.packager \
+ --build-arg DISTRO="$DISTRO" \
+ --build-arg DISTRO_VERSION="$DISTRO_VERSION" \
+ --build-arg VERSION="$VERSION" \
+ -t "$TAG" . |
+ tee build.log
diff --git a/packaging/bundle-dashboard.sh b/packaging/bundle-dashboard.sh
new file mode 100755
index 0000000..9cab084
--- /dev/null
+++ b/packaging/bundle-dashboard.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+SRCDIR="${1}"
+WEBDIR="${2}"
+
+DASHBOARD_TARBALL="dashboard.tar.gz"
+DASHBOARD_VERSION="$(cat "${SRCDIR}/packaging/dashboard.version")"
+
+mkdir -p "${SRCDIR}/tmp/dashboard"
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/dashboard/releases/download/${DASHBOARD_VERSION}/${DASHBOARD_TARBALL}" > "${DASHBOARD_TARBALL}" || exit 1
+sha256sum -c "${SRCDIR}/packaging/dashboard.checksums" || exit 1
+tar -xzf "${DASHBOARD_TARBALL}" -C "${SRCDIR}/tmp/dashboard" || exit 1
+# shellcheck disable=SC2046
+cp -a $(find "${SRCDIR}/tmp/dashboard/build" -mindepth 1 -maxdepth 1) "${WEBDIR}"
diff --git a/packaging/bundle-ebpf.sh b/packaging/bundle-ebpf.sh
new file mode 100755
index 0000000..ecc365e
--- /dev/null
+++ b/packaging/bundle-ebpf.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+SRCDIR="${1}"
+PLUGINDIR="${2}"
+
+EBPF_VERSION="$(cat "${SRCDIR}/packaging/ebpf.version")"
+EBPF_TARBALL="netdata-kernel-collector-glibc-${EBPF_VERSION}.tar.xz"
+
+mkdir -p "${SRCDIR}/tmp/ebpf"
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1
+grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1
+tar -xaf "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1
+# shellcheck disable=SC2046
+cp -a $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}"
diff --git a/packaging/bundle-judy.sh b/packaging/bundle-judy.sh
new file mode 100755
index 0000000..f4e5839
--- /dev/null
+++ b/packaging/bundle-judy.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+JUDY_TARBALL="v$(cat "${1}/packaging/judy.version").tar.gz"
+JUDY_BUILD_PATH="${1}/externaldeps/libJudy/libjudy-$(cat "${1}/packaging/judy.version")"
+
+mkdir -p "${1}/externaldeps/libJudy" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libjudy/archive/${JUDY_TARBALL}" > "${JUDY_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/judy.checksums" || exit 1
+tar -xzf "${JUDY_TARBALL}" -C "${1}/externaldeps/libJudy" || exit 1
+OLDPWD="${PWD}"
+cd "${JUDY_BUILD_PATH}" || exit 1
+libtoolize --force --copy || exit 1
+aclocal || exit 1
+autoheader || exit 1
+automake --add-missing --force --copy --include-deps || exit 1
+autoconf || exit 1
+./configure || exit 1
+make -C src || exit 1
+ar -r src/libJudy.a src/Judy*/*.o || exit 1
+cd "${OLDPWD}" || exit 1
+
+cp -a "${JUDY_BUILD_PATH}/src/libJudy.a" "${1}/externaldeps/libJudy" || exit 1
+cp -a "${JUDY_BUILD_PATH}/src/Judy.h" "${1}/externaldeps/libJudy" || exit 1
diff --git a/packaging/bundle-libbpf.sh b/packaging/bundle-libbpf.sh
new file mode 100755
index 0000000..4c16dd1
--- /dev/null
+++ b/packaging/bundle-libbpf.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+LIBBPF_TARBALL="v$(cat "${1}/packaging/libbpf.version").tar.gz"
+LIBBPF_BUILD_PATH="${1}/externaldeps/libbpf/libbpf-$(cat "${1}/packaging/libbpf.version")"
+
+if [ "$(uname -m)" = x86_64 ]; then
+ lib_subdir="lib64"
+else
+ lib_subdir="lib"
+fi
+
+mkdir -p "${1}/externaldeps/libbpf" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libbpf/archive/${LIBBPF_TARBALL}" > "${LIBBPF_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/libbpf.checksums" || exit 1
+tar -xzf "${LIBBPF_TARBALL}" -C "${1}/externaldeps/libbpf" || exit 1
+make -C "${LIBBPF_BUILD_PATH}/src" BUILD_STATIC_ONLY=1 OBJDIR=build/ DESTDIR=../ install || exit 1
+cp -a "${LIBBPF_BUILD_PATH}/usr/${lib_subdir}/libbpf.a" "${1}/externaldeps/libbpf" || exit 1
+cp -a "${LIBBPF_BUILD_PATH}/usr/include" "${1}/externaldeps/libbpf" || exit 1
diff --git a/packaging/bundle-lws.sh b/packaging/bundle-lws.sh
new file mode 100755
index 0000000..4ecf0ca
--- /dev/null
+++ b/packaging/bundle-lws.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+startdir="${PWD}"
+
+LWS_TARBALL="v$(cat "${1}/packaging/libwebsockets.version").tar.gz"
+LWS_BUILD_PATH="${1}/externaldeps/libwebsockets/libwebsockets-$(cat "${1}/packaging/libwebsockets.version")"
+
+mkdir -p "${1}/externaldeps/libwebsockets" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/warmcat/libwebsockets/archive/${LWS_TARBALL}" > "${LWS_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/libwebsockets.checksums" || exit 1
+tar -xzf "${LWS_TARBALL}" -C "${1}/externaldeps/libwebsockets" || exit 1
+cd "${LWS_BUILD_PATH}" || exit 1
+cmake -D LWS_WITH_SOCKS5:boolean=YES . || exit 1
+make || exit 1
+cd "${startdir}" || exit 1
+cp -a "${LWS_BUILD_PATH}/lib/libwebsockets.a" "${1}/externaldeps/libwebsockets" || exit 1
+cp -a "${LWS_BUILD_PATH}/include" "${1}/externaldeps/libwebsockets" || exit 1
diff --git a/packaging/bundle-mosquitto.sh b/packaging/bundle-mosquitto.sh
new file mode 100755
index 0000000..c5a8dc8
--- /dev/null
+++ b/packaging/bundle-mosquitto.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+MOSQUITTO_TARBALL="$(cat "${1}/packaging/mosquitto.version").tar.gz"
+MOSQUITTO_BUILD_PATH="${1}/externaldeps/mosquitto/mosquitto-$(cat "${1}/packaging/mosquitto.version")/lib"
+
+mkdir -p "${1}/externaldeps/mosquitto" || exit 1
+curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/mosquitto/archive/${MOSQUITTO_TARBALL}" > "${MOSQUITTO_TARBALL}" || exit 1
+sha256sum -c "${1}/packaging/mosquitto.checksums" || exit 1
+tar -xzf "${MOSQUITTO_TARBALL}" -C "${1}/externaldeps/mosquitto" || exit 1
+make -C "${MOSQUITTO_BUILD_PATH}" || exit 1
+cp -a "${MOSQUITTO_BUILD_PATH}/libmosquitto.a" "${1}/externaldeps/mosquitto" || exit 1
+cp -a "${MOSQUITTO_BUILD_PATH}/mosquitto.h" "${1}/externaldeps/mosquitto" || exit 1
diff --git a/packaging/check-kernel-config.sh b/packaging/check-kernel-config.sh
new file mode 100755
index 0000000..ded3225
--- /dev/null
+++ b/packaging/check-kernel-config.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+get_kernel_version() {
+ r="$(uname -r | cut -f 1 -d '-')"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+get_rh_version() {
+ if [ ! -f /etc/redhat-release ]; then
+ printf "000000000"
+ return
+ fi
+
+ r="$(cut -f 4 -d ' ' < /etc/redhat-release)"
+
+ read -r -a p <<< "$(echo "${r}" | tr '.' ' ')"
+
+ printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}"
+}
+
+if [ "$(uname -s)" != "Linux" ]; then
+ echo >&2 "This does not appear to be a Linux system."
+ exit 1
+fi
+
+KERNEL_VERSION="$(uname -r)"
+
+if [ "$(get_kernel_version)" -lt 004014000 ] && [ "$(get_rh_version)" -lt 0070061810 ]; then
+ echo >&2 "WARNING: Your kernel appears to be older than 4.11 or you are using RH version older than 7.6.1810. This may still work in some cases, but probably won't."
+fi
+
+CONFIG_PATH=""
+MODULE_LOADED=""
+
+if modprobe configs 2> /dev/null; then
+ MODULE_LOADED=1
+fi
+
+if [ -r /proc/config.gz ]; then
+ CONFIG_PATH="/proc/config.gz"
+elif [ -r "/lib/modules/${KERNEL_VERSION}/source/.config" ]; then
+ CONFIG_PATH="/lib/modules/${KERNEL_VERSION}/source/.config"
+elif [ -r "/lib/modules/${KERNEL_VERSION}.x86_64/source/.config" ]; then
+ CONFIG_PATH="/lib/modules/${KERNEL_VERSION}.x86_64/source/.config"
+elif [ -n "$(find /boot -name "config-${KERNEL_VERSION}*")" ]; then
+ CONFIG_PATH="$(find /boot -name "config-${KERNEL_VERSION}*" | head -n 1)"
+fi
+
+if [ -n "${CONFIG_PATH}" ]; then
+ GREP='grep'
+
+ if echo "${CONFIG_PATH}" | grep -q '.gz'; then
+ GREP='zgrep'
+ fi
+
+ REQUIRED_CONFIG="KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT"
+
+ for required_config in ${REQUIRED_CONFIG}; do
+ if ! "${GREP}" -q "CONFIG_${required_config}=y" "${CONFIG_PATH}"; then
+ echo >&2 " Missing Kernel Config: ${required_config}"
+ exit 1
+ fi
+ done
+fi
+
+if [ -n "${MODULE_LOADED}" ]; then
+ modprobe -r configs 2> /dev/null || true # Ignore failures from CONFIGS being builtin
+fi
+
+exit 0
diff --git a/packaging/dashboard.checksums b/packaging/dashboard.checksums
new file mode 100644
index 0000000..f343ef5
--- /dev/null
+++ b/packaging/dashboard.checksums
@@ -0,0 +1 @@
+804e4610477ab64726f62cf6093613197be3ccf0140959364426065871075309 dashboard.tar.gz
diff --git a/packaging/dashboard.version b/packaging/dashboard.version
new file mode 100644
index 0000000..3ef97df
--- /dev/null
+++ b/packaging/dashboard.version
@@ -0,0 +1 @@
+v2.13.6
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
new file mode 100644
index 0000000..41e8870
--- /dev/null
+++ b/packaging/docker/Dockerfile
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+# author : paulfantom
+
+# This image contains preinstalled dependecies
+# hadolint ignore=DL3007
+FROM netdata/builder:latest as builder
+
+# One of 'nightly' or 'stable'
+ARG RELEASE_CHANNEL=nightly
+
+ENV JUDY_VER 1.0.5
+
+ARG CFLAGS
+
+ENV CFLAGS=$CFLAGS
+
+ARG EXTRA_INSTALL_OPTS
+
+ENV EXTRA_INSTALL_OPTS=$EXTRA_INSTALL_OPTS
+
+# Copy source
+COPY . /opt/netdata.git
+WORKDIR /opt/netdata.git
+
+# Install from source
+RUN chmod +x netdata-installer.sh && \
+ cp -rp /deps/* /usr/local/ && \
+ ./netdata-installer.sh --dont-wait --dont-start-it ${EXTRA_INSTALL_OPTS} \
+ "$([ "$RELEASE_CHANNEL" = stable ] && echo --stable-channel)"
+
+# files to one directory
+RUN mkdir -p /app/usr/sbin/ \
+ /app/usr/share \
+ /app/usr/libexec \
+ /app/usr/local \
+ /app/usr/lib \
+ /app/var/cache \
+ /app/var/lib \
+ /app/etc && \
+ mv /usr/share/netdata /app/usr/share/ && \
+ mv /usr/libexec/netdata /app/usr/libexec/ && \
+ mv /usr/lib/netdata /app/usr/lib/ && \
+ mv /var/cache/netdata /app/var/cache/ && \
+ mv /var/lib/netdata /app/var/lib/ && \
+ mv /etc/netdata /app/etc/ && \
+ mv /usr/sbin/netdata /app/usr/sbin/ && \
+ mv /usr/sbin/netdata-claim.sh /app/usr/sbin/ && \
+ mv /usr/sbin/netdatacli /app/usr/sbin/ && \
+ mv packaging/docker/run.sh /app/usr/sbin/ && \
+ mv packaging/docker/health.sh /app/usr/sbin/ && \
+ cp -rp /deps/* /app/usr/local/ && \
+ chmod +x /app/usr/sbin/run.sh
+
+#####################################################################
+# This image contains preinstalled dependecies
+# hadolint ignore=DL3007
+FROM netdata/base:latest as base
+
+# Configure system
+ARG NETDATA_UID=201
+ARG NETDATA_GID=201
+ENV DOCKER_GRP netdata
+ENV DOCKER_USR netdata
+# If DO_NOT_TRACK is set, it will disable anonymous stats collection and reporting
+#ENV DO_NOT_TRACK=1
+
+# Copy files over
+RUN mkdir -p /opt/src /var/log/netdata && \
+ # Link log files to stdout
+ ln -sf /dev/stdout /var/log/netdata/access.log && \
+ ln -sf /dev/stdout /var/log/netdata/debug.log && \
+ ln -sf /dev/stderr /var/log/netdata/error.log && \
+ # fping from alpine apk is on a different location. Moving it.
+ ln -snf /usr/sbin/fping /usr/local/bin/fping && \
+ chmod 4755 /usr/local/bin/fping && \
+ # Add netdata user
+ addgroup -g ${NETDATA_GID} -S "${DOCKER_GRP}" && \
+ adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G "${DOCKER_GRP}" "${DOCKER_USR}"
+
+# Long-term this should leverage BuildKit’s mount option.
+COPY --from=builder /wheels /wheels
+COPY --from=builder /app /
+
+# Apply the permissions as described in
+# https://docs.netdata.cloud/docs/netdata-security/#netdata-directories, but own everything by root group due to https://github.com/netdata/netdata/pull/6543
+# hadolint ignore=DL3013
+RUN chown -R root:root \
+ /etc/netdata \
+ /usr/share/netdata \
+ /usr/libexec/netdata && \
+ chown -R netdata:root \
+ /usr/lib/netdata \
+ /var/cache/netdata \
+ /var/lib/netdata \
+ /var/log/netdata && \
+ chown -R netdata:netdata /var/lib/netdata/cloud.d && \
+ chmod 0700 /var/lib/netdata/cloud.d && \
+ chmod 0755 /usr/libexec/netdata/plugins.d/*.plugin && \
+ chmod 4755 \
+ /usr/libexec/netdata/plugins.d/cgroup-network \
+ /usr/libexec/netdata/plugins.d/apps.plugin \
+ /usr/libexec/netdata/plugins.d/freeipmi.plugin && \
+ # Group write permissions due to: https://github.com/netdata/netdata/pull/6543
+ find /var/lib/netdata /var/cache/netdata -type d -exec chmod 0770 {} \; && \
+ find /var/lib/netdata /var/cache/netdata -type f -exec chmod 0660 {} \; && \
+ pip --no-cache-dir install /wheels/* && \
+ rm -rf /wheels
+
+ENV NETDATA_LISTENER_PORT 19999
+EXPOSE $NETDATA_LISTENER_PORT
+
+ENTRYPOINT ["/usr/sbin/run.sh"]
+
+HEALTHCHECK --interval=60s --timeout=10s --retries=3 CMD /usr/sbin/health.sh
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
new file mode 100644
index 0000000..e97cb0c
--- /dev/null
+++ b/packaging/docker/README.md
@@ -0,0 +1,461 @@
+<!--
+title: "Install Netdata with Docker"
+date: 2020-04-23
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/docker/README.md
+-->
+
+# Install the Netdata Agent with Docker
+
+Running the Netdata Agent in a container works best for an internal network or to quickly analyze a host. Docker helps
+you get set up quickly, and doesn't install anything permanent on the system, which makes uninstalling the Agent easy.
+
+See our full list of Docker images at [Docker Hub](https://hub.docker.com/r/netdata/netdata).
+
+Starting with v1.12, Netdata collects anonymous usage information by default and sends it to Google Analytics. Read
+about the information collected, and learn how to-opt, on our [anonymous statistics](/docs/anonymous-statistics.md)
+page.
+
+The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for
+_actively_ contributing to Netdata's future.
+
+## Limitations running the Agent in Docker
+
+For monitoring the whole host, running the Agent in a container can limit its capabilities. Some data, like the host OS
+performance or status, is not accessible or not as detailed in a container as when running the Agent directly on the
+host.
+
+A way around this is to provide special mounts to the Docker container so that the Agent can get visibility on host OS
+information like `/sys` and `/proc` folders or even `/etc/group` and shadow files.
+
+Also, we now ship Docker images using an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint)
+directive, not a COMMAND directive. Please adapt your execution scripts accordingly. You can find more information about
+ENTRYPOINT vs COMMAND in the [Docker
+documentation](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
+
+## Create a new Netdata Agent container
+
+You can create a new Agent container using either `docker run` or Docker Compose. After using either method, you can
+visit the Agent dashboard `http://NODE:19999`.
+
+Both methods create a [bind mount](https://docs.docker.com/storage/bind-mounts/) for Netdata's configuration files
+_within the container_ at `/etc/netdata`. See the [configuration section](#configure-agent-containers) for details. If
+you want to access the configuration files from your _host_ machine, see [host-editable
+configuration](#host-editable-configuration).
+
+**`docker run`**: Use the `docker run` command, along with the following options, to start a new container.
+
+```bash
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -v netdataconfig:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+**Docker Compose**: Copy the following code and paste into a new file called `docker-compose.yml`, then run
+`docker-compose up -d` in the same directory as the `docker-compose.yml` file to start the container.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: example.com # set to fqdn of host
+ ports:
+ - 19999:19999
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+
+volumes:
+ netdataconfig:
+ netdatalib:
+ netdatacache:
+```
+
+## Health Checks
+
+Our Docker image provides integrated support for health checks through the standard Docker interfaces.
+
+You can control how the health checks run by using the environment variable `NETDATA_HEALTHCHECK_TARGET` as follows:
+
+- If left unset, the health check will attempt to access the
+ `/api/v1/info` endpoint of the agent.
+- If set to the exact value 'cli', the health check
+ script will use `netdatacli ping` to determine if the agent is running
+ correctly or not. This is sufficient to ensure that Netdata did not
+ hang during startup, but does not provide a rigorous verification
+ that the daemon is collecting data or is otherwise usable.
+- If set to anything else, the health check will treat the value as a
+ URL to check for a 200 status code on. In most cases, this should
+ start with `http://localhost:19999/` to check the agent running in
+ the container.
+
+In most cases, the default behavior of checking the `/api/v1/info`
+endpoint will be sufficient. If you are using a configuration which
+disables the web server or restricts access to certain API's, you will
+need to use a non-default configuration for health checks to work.
+
+## Configure Agent containers
+
+If you started an Agent container using one of the [recommended methods](#create-a-new-netdata-agent-container) and you
+want to edit Netdata's configuration, you must first use `docker exec` to attach to the container. Replace `netdata`
+with the name of your container.
+
+```bash
+docker exec -it netdata bash
+cd /etc/netdata
+./edit-config netdata.conf
+```
+
+You need to restart the Agent to apply changes. Exit the container if you haven't already, then use the `docker` command
+to restart the container: `docker restart netdata`.
+
+### Host-editable configuration
+
+If you want to make your container's configuration directory accessible from the host system, you need to use a
+[volume](https://docs.docker.com/storage/bind-mounts/) rather than a bind mount. The following commands create a
+temporary `netdata_tmp` container, which is used to populate a `netdataconfig` directory, which is then mounted inside
+the container at `/etc/netdata`.
+
+```bash
+mkdir netdataconfig
+docker run -d --name netdata_tmp netdata/netdata
+docker cp netdata_tmp:/etc/netdata netdataconfig/
+docker rm -f netdata_tmp
+```
+
+**`docker run`**: Use the `docker run` command, along with the following options, to start a new container. Note the
+changed `-v $(pwd)/netdataconfig/netdata:/etc/netdata:ro \` line from the recommended example above.
+
+```bash
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -v $(pwd)/netdataconfig/netdata:/etc/netdata:ro \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ netdata/netdata
+```
+
+**Docker Compose**: Copy the following code and paste into a new file called `docker-compose.yml`, then run
+`docker-compose up -d` in the same directory as the `docker-compose.yml` file to start the container. Note the changed
+`./netdataconfig/netdata:/etc/netdata:ro` line from the recommended example above.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: example.com # set to fqdn of host
+ ports:
+ - 19999:19999
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - ./netdataconfig/netdata:/etc/netdata:ro
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+
+volumes:
+ netdatalib:
+ netdatacache:
+```
+
+### Change the default hostname
+
+You can change the hostname of a Docker container, and thus the name that appears in the local dashboard and in Netdata
+Cloud, when creating a new container. If you want to change the hostname of a Netdata container _after_ you started it,
+you can safely stop and remove it. You configuration and metrics data reside in persistent volumes and are reattached to
+the recreated container.
+
+If you use `docker-run`, use the `--hostname` option with `docker run`.
+
+```bash
+docker run -d --name=netdata \
+ --hostname=my_docker_netdata
+```
+
+If you use `docker-compose`, add a `hostname:` key/value pair into your `docker-compose.yml` file, then create the
+container again using `docker-compose up -d`.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: my_docker_compose_netdata
+ ...
+```
+
+If you don't want to destroy and recreate your container, you can edit the Agent's `netdata.conf` file directly. See the
+above section on [configuring Agent containers](#configure-agent-containers) to find the appropriate method based on
+how you created the container.
+
+### Add or remove other volumes
+
+Some of the volumes are optional depending on how you use Netdata:
+
+- If you don't want to use the apps.plugin functionality, you can remove the mounts of `/etc/passwd` and `/etc/group`
+ (they are used to get proper user and group names for the monitored host) to get slightly better security.
+- Most modern linux distros supply `/etc/os-release` although some older distros only supply `/etc/lsb-release`. If
+ this is the case you can change the line above that mounts the file inside the container to
+ `-v /etc/lsb-release:/host/etc/lsb-release:ro`.
+- If your host is virtualized then Netdata cannot detect it from inside the container and will output the wrong
+ metadata (e.g. on `/api/v1/info` queries). You can fix this by setting a variable that overrides the detection
+ using, e.g. `--env VIRTUALIZATION=$(systemd-detect-virt -v)`. If you are using a `docker-compose.yml` then add:
+
+```yaml
+ environment:
+ - VIRTUALIZATION=${VIRTUALIZATION}
+```
+
+This allows the information to be passed into `docker-compose` using:
+
+```bash
+VIRTUALIZATION=$(systemd-detect-virt -v) docker-compose up
+```
+
+#### Files inside systemd volumes
+
+If a volume is used by systemd service, some files can be removed during
+[reinitialization](https://github.com/netdata/netdata/issues/9916). To avoid this, you need to add
+`RuntimeDirectoryPreserve=yes` to the service file.
+
+### Docker container names resolution
+
+There are a few options for resolving container names within Netdata. Some methods of doing so will allow root access to
+your machine from within the container. Please read the following carefully.
+
+#### Docker socket proxy (safest option)
+
+Deploy a Docker socket proxy that accepts and filters out requests using something like
+[HAProxy](/docs/Running-behind-haproxy.md) so that it restricts connections to read-only access to the CONTAINERS
+endpoint.
+
+The reason it's safer to expose the socket to the proxy is because Netdata has a TCP port exposed outside the Docker
+network. Access to the proxy container is limited to only within the network.
+
+Below is [an example repository (and image)](https://github.com/Tecnativa/docker-socket-proxy) that provides a proxy to
+the socket.
+
+You run the Docker Socket Proxy in its own Docker Compose file and leave it on a private network that you can add to
+other services that require access.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ # ... rest of your config ...
+ ports:
+ - 19999:19999
+ environment:
+ - DOCKER_HOST=proxy:2375
+ proxy:
+ image: tecnativa/docker-socket-proxy
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ - CONTAINERS=1
+
+```
+**Note:** Replace `2375` with the port of your proxy.
+
+#### Giving group access to the Docker socket (less safe)
+
+**Important Note**: You should seriously consider the necessity of activating this option, as it grants to the `netdata`
+user access to the privileged socket connection of docker service and therefore your whole machine.
+
+If you want to have your container names resolved by Netdata, make the `netdata` user be part of the group that owns the
+socket.
+
+To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the Netdata container, where `[GROUP NUMBER]` is
+practically the group id of the group assigned to the docker socket, on your host.
+
+This group number can be found by running the following (if socket group ownership is docker):
+
+```bash
+grep docker /etc/group | cut -d ':' -f 3
+```
+
+#### Running as root (unsafe)
+
+> You should seriously consider the necessity of activating this option, as it grants to the `netdata` user access to
+> the privileged socket connection of docker service, and therefore your whole machine.
+
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ # ... rest of your config ...
+ volumes:
+ # ... other volumes ...
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ environment:
+ - DOCKER_USR=root
+```
+
+### Pass command line options to Netdata
+
+Since we use an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, you can provide
+[Netdata daemon command line options](https://learn.netdata.cloud/docs/agent/daemon/#command-line-options) such as the
+IP address Netdata will be running on, using the [command
+instruction](https://docs.docker.com/engine/reference/builder/#cmd).
+
+## Install the Agent using Docker Compose with SSL/TLS enabled HTTP Proxy
+
+For a permanent installation on a public server, you should [secure the Netdata
+instance](/docs/netdata-security.md). This section contains an example of how to install Netdata with an SSL
+reverse proxy and basic authentication.
+
+You can use the following `docker-compose.yml` and Caddyfile files to run Netdata with Docker. Replace the domains and
+email address for [Let's Encrypt](https://letsencrypt.org/) before starting.
+
+### Caddyfile
+
+This file needs to be placed in `/opt` with name `Caddyfile`. Here you customize your domain and you need to provide
+your email address to obtain a Let's Encrypt certificate. Certificate renewal will happen automatically and will be
+executed internally by the caddy server.
+
+```caddyfile
+netdata.example.org {
+ proxy / netdata:19999
+ tls admin@example.org
+}
+```
+
+### docker-compose.yml
+
+After setting Caddyfile run this with `docker-compose up -d` to have fully functioning Netdata setup behind HTTP reverse
+proxy.
+
+```yaml
+version: '3'
+volumes:
+ caddy:
+
+services:
+ caddy:
+ image: abiosoft/caddy
+ ports:
+ - 80:80
+ - 443:443
+ volumes:
+ - /opt/Caddyfile:/etc/Caddyfile
+ - $HOME/.caddy:/root/.caddy
+ environment:
+ ACME_AGREE: 'true'
+ netdata:
+ restart: always
+ hostname: netdata.example.org
+ image: netdata/netdata
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ volumes:
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+
+volumes:
+ netdatalib:
+ netdatacache:
+```
+
+### Restrict access with basic auth
+
+You can restrict access by following [official caddy guide](https://caddyserver.com/docs/caddyfile/directives/basicauth#basicauth) and adding lines to
+Caddyfile.
+
+## Publish a test image to your own repository
+
+At Netdata, we provide multiple ways of testing your Docker images using your own repositories.
+You may either use the command line tools available or take advantage of our Travis CI infrastructure.
+
+### Inside Netdata organization, using Travis CI
+
+To enable Travis CI integration on your own repositories (Docker and Github), you need to be part of the Netdata
+organization.
+
+Once you have contacted the Netdata owners to setup you up on Github and Travis, execute the following steps
+
+- Preparation
+ - Have Netdata forked on your personal GitHub account
+ - Get a GitHub token: Go to **GitHub settings** -> **Developer Settings** -> **Personal access tokens**, and
+ generate a new token with full access to `repo_hook`, read-only access to `admin:org`, `public_repo`,
+ `repo_deployment`, `repo:status`, and `user:email` settings enabled. This will be your `GITHUB_TOKEN` that is
+ described later in the instructions, so keep it somewhere safe.
+ - Contact the Netdata team and seek for permissions on `https://scan.coverity.com` should you require Travis to be
+ able to push your forked code to coverity for analysis and report. Once you are setup, you should have your
+ email you used in coverity and a token from them. These will be your `COVERITY_SCAN_SUBMIT_EMAIL` and
+ `COVERITY_SCAN_TOKEN` that we will refer to later.
+ - Have a valid Docker hub account, the credentials from this account will be your `DOCKER_USERNAME` and
+ `DOCKER_PWD` mentioned later.
+
+- Setting up Travis CI for your own fork (Detailed instructions provided by Travis team [here](https://docs.travis-ci.com/user/tutorial/))
+ - Login to travis with your own GITHUB credentials (There is Open Auth access)
+ - Go to your profile settings, under [repositories](https://travis-ci.com/account/repositories) section and setup
+ your Netdata fork to be built by Travis CI.
+ - Once the repository has been setup, go to repository settings within Travis CI (usually under
+ `https://travis-ci.com/NETDATA_DEVELOPER/netdata/settings`, where `NETDATA_DEVELOPER` is your GitHub handle),
+ and select your desired settings.
+
+- While in Travis settings, under Netdata repository settings in the Environment Variables section, you need to add
+ the following:
+ - `DOCKER_USERNAME` and `DOCKER_PWD` variables so that Travis can login to your Docker Hub account and publish
+ Docker images there.
+ - `REPOSITORY` variable to `NETDATA_DEVELOPER/netdata`, where `NETDATA_DEVELOPER` is your GitHub handle again.
+ - `GITHUB_TOKEN` variable with the token generated on the preparation step, for Travis workflows to function
+ properly.
+ - `COVERITY_SCAN_SUBMIT_EMAIL` and `COVERITY_SCAN_TOKEN` variables to enable Travis to submit your code for
+ analysis to Coverity.
+
+Having followed these instructions, your forked repository should be all set up for integration with Travis CI. Happy
+testing!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fdocker%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/docker/health.sh b/packaging/docker/health.sh
new file mode 100755
index 0000000..088a6c0
--- /dev/null
+++ b/packaging/docker/health.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+#
+# This is the script that gets run for our Docker image health checks.
+
+if [ -z "${NETDATA_HEALTHCHECK_TARGET}" ] ; then
+ # If users didn't request something else, query `/api/v1/info`.
+ NETDATA_HEALTHCHECK_TARGET="http://localhost:19999/api/v1/info"
+fi
+
+case "${NETDATA_HEALTHCHECK_TARGET}" in
+ cli)
+ netdatacli ping || exit 1
+ ;;
+ *)
+ curl -sSL "${NETDATA_HEALTHCHECK_TARGET}" || exit 1
+ ;;
+esac
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
new file mode 100755
index 0000000..432d199
--- /dev/null
+++ b/packaging/docker/run.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+#
+# Entry point script for netdata
+#
+# Copyright: 2018 and later Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+# Author : Austin S. Hemmelgarn <austin@netdata.cloud>
+set -e
+
+if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then
+ touch /etc/netdata/.opt-out-from-anonymous-statistics
+fi
+
+if [ -n "${PGID}" ]; then
+ echo "Creating docker group ${PGID}"
+ addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
+ echo "Assign netdata user to docker group ${PGID}"
+ usermod -a -G "${PGID}" "${DOCKER_USR}" || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
+fi
+
+if [ -n "${NETDATA_CLAIM_URL}" ] && [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ ! -f /var/lib/netdata/claim.d/claimed_id ]; then
+ /usr/sbin/netdata-claim.sh -token "${NETDATA_CLAIM_TOKEN}" \
+ -url "${NETDATA_CLAIM_URL}" \
+ ${NETDATA_CLAIM_ROOMS:+-rooms "${NETDATA_CLAIM_ROOMS}"} \
+ ${NETDATA_CLAIM_PROXY:+-proxy "${NETDATA_CLAIM_PROXY}"}
+fi
+
+exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_LISTENER_PORT}" -W set web "web files group" root -W set web "web files owner" root "$@"
diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums
new file mode 100644
index 0000000..e35fa1f
--- /dev/null
+++ b/packaging/ebpf.checksums
@@ -0,0 +1,3 @@
+d9c1c81fe3a8b9af7fc1174a28c16ddb24e2f3ff79e6beb1b2eb184bf0d2e8c0 netdata-kernel-collector-glibc-v0.5.5.tar.xz
+0e1dd5e12a58dda53576b2dab963cd26fa26fe2084d84c51becb9238d1055fc1 netdata-kernel-collector-musl-v0.5.5.tar.xz
+d6d65e5f40a83880aa7dd740829a7ffe6a0805637e1616805aebdff088a3fcb0 netdata-kernel-collector-static-v0.5.5.tar.xz
diff --git a/packaging/ebpf.version b/packaging/ebpf.version
new file mode 100644
index 0000000..12aa8c5
--- /dev/null
+++ b/packaging/ebpf.version
@@ -0,0 +1 @@
+v0.5.5
diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums
new file mode 100644
index 0000000..19a2af8
--- /dev/null
+++ b/packaging/go.d.checksums
@@ -0,0 +1,16 @@
+aac2f7e67d1231c68cf5a5500801617f7c8bc9543a9e7d80c81448da50225ab9 *config.tar.gz
+0248ced58c9eaba9815ff4c4cd5498436b3a2db93f50a71b8d602cf6688d66b0 *go.d.plugin-v0.27.0.darwin-amd64.tar.gz
+79cbb731e2f57ef4af1f8c8c204a488008cd212853a0c9bb552504e261e17e5b *go.d.plugin-v0.27.0.freebsd-386.tar.gz
+b0f4151ec40bc92ef6624e8e148074de5a3b3d47c4df55bb858d04e68d557cc1 *go.d.plugin-v0.27.0.freebsd-amd64.tar.gz
+73c0e6b994ef1ade6323163bca7636455d823db34d28e349f6ea9346e1320602 *go.d.plugin-v0.27.0.freebsd-arm.tar.gz
+283e2b3d86221c9208f26c979a86257e6a99d7812cdb6ef1c7989d847ec3cfda *go.d.plugin-v0.27.0.freebsd-arm64.tar.gz
+528cd446a6cfdc5f62e66806e71d6ea1fab4e48fd1e61ab0425cb5df45676430 *go.d.plugin-v0.27.0.linux-386.tar.gz
+b68d0ed812658c175ed6f74cdb29944d006fe92a8958b06aa281d435f728a412 *go.d.plugin-v0.27.0.linux-amd64.tar.gz
+3a70a07081056414ed5c76370e3ec61c0bac7bc852a15266489fe27f5ba9601e *go.d.plugin-v0.27.0.linux-arm.tar.gz
+0cc30f7ea11c7ffce5ac0831845adcea63a76cd56016c066fde9362201f0e9b3 *go.d.plugin-v0.27.0.linux-arm64.tar.gz
+54fab477fb5b80a3e0fc28a1983f7bdf21717a3e7b0c64c1f6bd82b572076f15 *go.d.plugin-v0.27.0.linux-mips.tar.gz
+831e5fb6e38683d3da58219af6c0fbd1495766fafde6c00550c97b086d1bee0f *go.d.plugin-v0.27.0.linux-mips64.tar.gz
+45626fe15fbde8ff18b1f1d167957be664674ebb55d120edf4e193698dcd07f9 *go.d.plugin-v0.27.0.linux-mips64le.tar.gz
+3468a8e95313926ca7712bdaa4838d38279fa4dffde66e1576af44afdc9f0562 *go.d.plugin-v0.27.0.linux-mipsle.tar.gz
+1b23454071500d58dd935c977e9c742744dd201ac654f2862b2d545e1ab212da *go.d.plugin-v0.27.0.linux-ppc64.tar.gz
+8d3feb67d036a9fdc337dea1118074591b85624fc17d235cf0979e3b93dd2009 *go.d.plugin-v0.27.0.linux-ppc64le.tar.gz
diff --git a/packaging/go.d.version b/packaging/go.d.version
new file mode 100644
index 0000000..0a8bf80
--- /dev/null
+++ b/packaging/go.d.version
@@ -0,0 +1 @@
+v0.27.0
diff --git a/packaging/installer/README.md b/packaging/installer/README.md
new file mode 100644
index 0000000..317ac63
--- /dev/null
+++ b/packaging/installer/README.md
@@ -0,0 +1,253 @@
+<!--
+title: "Installation guide"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/README.md
+-->
+
+# Installation guide
+
+Netdata is a monitoring agent designed to run on all your systems: physical and virtual servers, containers, even
+IoT/edge devices. Netdata runs on Linux, FreeBSD, macOS, Kubernetes, Docker, and all their derivatives.
+
+The best way to install Netdata is with our [**automatic one-line installation
+script**](#automatic-one-line-installation-script), which works with all Linux distributions, or our [**.deb/rpm
+packages**](/packaging/installer/methods/packages.md), which seamlessly install with your distribution's package
+manager.
+
+If you want to install Netdata with Docker, on a Kubernetes cluster, or a different operating system, see [Have a
+different operating system, or want to try another
+method?](#have-a-different-operating-system-or-want-to-try-another-method)
+
+Some third parties, such as the packaging teams at various Linux distributions, distribute old, broken, or altered
+packages. We recommend you install Netdata using one of the methods listed below to guarantee you get the latest
+checksum-verified packages.
+
+Starting with v1.12, Netdata collects anonymous usage information by default and sends it to Google Analytics. Read
+about the information collected, and learn how to-opt, on our [anonymous statistics](/docs/anonymous-statistics.md)
+page.
+
+The usage statistics are _vital_ for us, as we use them to discover bugs and prioritize new features. We thank you for
+_actively_ contributing to Netdata's future.
+
+## Automatic one-line installation script
+
+![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0)
+
+This method is fully automatic on all Linux distributions, including Ubuntu, Debian, Fedora, CentOS, and others.
+
+To install Netdata from source, including all dependencies required to connect to Netdata Cloud, and get _automatic
+nightly updates_, run the following as your normal user:
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+To see more information about this installation script, including how to disable automatic updates, get nightly vs.
+stable releases, or disable anonymous statistics, see the [`kickstart.sh` method
+page](/packaging/installer/methods/kickstart.md).
+
+Scroll down for details about [automatic updates](#automatic-updates) or [nightly vs. stable
+releases](#nightly-vs-stable-releases).
+
+### Post-installation
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+## Have a different operating system, or want to try another method?
+
+Netdata works on many different operating systems, each with a few possible installation methods. To see the full list
+of approved methods for each operating system/version we support, see our [distribution
+matrix](/packaging/DISTRIBUTIONS.md).
+
+Below, you can find a few additional installation methods, followed by separate instructions for a variety of unique
+operating systems.
+
+### Alternative methods
+
+<div class="installer-grid" markdown="1">
+
+[![Install with .deb or .rpm
+packages](https://user-images.githubusercontent.com/1153921/76029431-aebd6b00-5ef1-11ea-92b4-06704dabb93e.png) Install
+with .deb or .rpm packages](/packaging/installer/methods/packages.md)
+
+[![Install with a pre-built static binary for 64-bit
+systems](https://user-images.githubusercontent.com/1153921/73030303-94727680-3df6-11ea-963e-6f2cb0ce762c.png) Install
+with a pre-built static binary for 64-bit systems](/packaging/installer/methods/kickstart-64.md)
+
+[![Install Netdata on
+Docker](https://user-images.githubusercontent.com/1153921/76029355-85044400-5ef1-11ea-96f4-79edc58f9b5c.png) Install
+Netdata on Docker](/packaging/docker/README.md)
+
+[![Install Netdata on
+Kubernetes](https://user-images.githubusercontent.com/1153921/76029478-cc8ad000-5ef1-11ea-8981-dd04744b00da.png) Install
+Netdata on a Kubernetes cluster](/packaging/installer/methods/kubernetes.md)
+
+[![Install Netdata on cloud providers
+(GCP/AWS/Azure)](https://user-images.githubusercontent.com/1153921/76029431-aebd6b00-5ef1-11ea-92b4-06704dabb93e.png)
+Install Netdata on cloud providers (GCP/AWS/Azure)](/packaging/installer/methods/cloud-providers.md)
+
+[![Install Netdata on
+macOS](https://user-images.githubusercontent.com/1153921/76029616-1673b600-5ef2-11ea-888a-4a1375a42246.png) Install
+Netdata on macOS](/packaging/installer/methods/macos.md)
+
+[![Install Netdata on
+FreeBSD](https://user-images.githubusercontent.com/1153921/76029787-5fc40580-5ef2-11ea-9461-23e9049aa8f8.png) Install
+Netdata on FreeBSD](/packaging/installer/methods/freebsd.md)
+
+[![Install from a Git
+checkout](https://user-images.githubusercontent.com/1153921/73032280-f1246000-3dfb-11ea-870d-7fbddd9a6f76.png) Install
+from a Git checkout](/packaging/installer/methods/manual.md)
+
+[![Install on offline/air-gapped
+systems](https://user-images.githubusercontent.com/1153921/73032239-c89c6600-3dfb-11ea-8224-c8a9f7a50c53.png) Install on
+offline/air-gapped systems](/packaging/installer/methods/offline.md)
+
+[![Installation on
+PFSense](https://user-images.githubusercontent.com/1153921/76030071-cb0dd780-5ef2-11ea-87cd-607d943dc521.png)
+Installation on PFSense](/packaging/installer/methods/pfsense.md)
+
+[![Install Netdata on
+Synology](https://user-images.githubusercontent.com/1153921/76029789-5fc40580-5ef2-11ea-9d35-c022f682da77.png) Install
+Netdata on Synology](/packaging/installer/methods/synology.md)
+
+[![Manual installation on
+FreeNAS](https://user-images.githubusercontent.com/1153921/76030537-1c1dcb80-5ef3-11ea-9cf9-f130e7d41712.png) Manual
+installation on FreeNAS](/packaging/installer/methods/freenas.md)
+
+[![Manual installation on
+Alpine](https://user-images.githubusercontent.com/1153921/76029682-37d4a200-5ef2-11ea-9a2c-a8ffeb1d13c3.png) Manual
+installation on Alpine](/packaging/installer/methods/alpine.md)
+
+[![Build manually from
+source](https://user-images.githubusercontent.com/1153921/73032280-f1246000-3dfb-11ea-870d-7fbddd9a6f76.png)
+Build manually from source](/packaging/installer/methods/source.md)
+
+</div>
+
+## Automatic updates
+
+By default, Netdata's installation scripts enable automatic updates for both nightly and stable release channels.
+
+If you would prefer to update your Netdata agent manually, you can disable automatic updates by using the `--no-updates`
+option when you install or update Netdata using the [automatic one-line installation
+script](#automatic-one-line-installation-script).
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates
+```
+
+With automatic updates disabled, you can choose exactly when and how you [update
+Netdata](/packaging/installer/UPDATE.md).
+
+### Network usage of Netdata’s automatic updater
+
+The auto-update functionality set up by the installation scripts requires working internet access to function
+correctly. In particular, it currently requires access to GitHub (to check if a newer version of the updater script
+is available or not, as well as potentially fetching build-time dependencies that are bundled as part of the install),
+and Google Cloud Storage (to check for newer versions of Netdata and download the sources if there is a newer version).
+
+Note that the auto-update functionality will check for updates to itself independently of updates to Netdata,
+and will try to use the latest version of the updater script whenever possible. This is intended to reduce the
+amount of effort required by users to get updates working again in the event of a bug in the updater code.
+
+## Nightly vs. stable releases
+
+The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's
+installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration.
+
+**Nightly**: We create nightly builds every 24 hours. They contain fully-tested code that fixes bugs or security flaws,
+or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release—when
+we're ready, we simply change the release tags on GitHub. That means nightly releases are stable and proven to function
+correctly in the vast majority of Netdata use cases. That's why nightly is the _best choice for most Netdata users_.
+
+**Stable**: We create stable releases whenever we believe the code has reached a major milestone. Most often, stable
+releases correlate with the introduction of new, significant features. Stable releases might be a better choice for
+those who run Netdata in _mission-critical production systems_, as updates will come more infrequently, and only after
+the community helps fix any bugs that might have been introduced in previous releases.
+
+**Pros of using nightly releases:**
+
+- Get the latest features and bug fixes as soon as they're available
+- Receive security-related fixes immediately
+- Use stable, fully-tested code that's always improving
+- Leverage the same Netdata experience our community is using
+
+**Pros of using stable releases:**
+
+- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata
+ installation
+- Retain more control over the Netdata version you use
+
+## Troubleshooting and known issues
+
+We are tracking a few issues related to installation and packaging.
+
+### Older distributions (Ubuntu 14.04, Debian 8, CentOS 6) and OpenSSL
+
+If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS
+6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old
+versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which
+helps securely encrypt SSL connections.
+
+We recommend you reinstall Netdata with a [static build](/packaging/installer/methods/kickstart-64.md), which uses an
+up-to-date version of OpenSSL with hostname validation enabled.
+
+If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit
+with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to
+man-in-the-middle attacks.
+
+### CentOS 6 and CentOS 8
+
+To install the Agent on certain CentOS and RHEL systems, you must enable non-default repositories, such as EPEL or
+PowerTools, to gather hard dependencies. See the [CentOS 6](/packaging/installer/methods/manual.md#centos-rehel-6-x) and
+[CentOS 8](/packaging/installer/methods/manual.md#centos-rehel-8-x) sections for more information.
+
+### Access to file is not permitted
+
+If you see an error similar to `Access to file is not permitted: /usr/share/netdata/web//index.html` when you try to
+visit the Agent dashboard at `http://NODE:19999`, you need to update Netdata's permissions to match those of your
+system.
+
+Run `ls -la /usr/share/netdata/web/index.html` to find the file's permissions. You may need to change this path based on
+the error you're seeing in your browser. In the below example, the file is owned by the user `netdata` and the group
+`netdata`.
+
+```bash
+ls -la /usr/share/netdata/web/index.html
+-rw-r--r--. 1 netdata netdata 89377 May 5 06:30 /usr/share/netdata/web/index.html
+```
+
+Open your `netdata.conf` file and find the `[web]` section, plus the `web files owner`/`web files group` settings. Edit
+the lines to match the output from `ls -la` above and uncomment them if necessary.
+
+```conf
+[web]
+ web files owner = netdata
+ web files group = netdata
+```
+
+Save the file, [restart the Netdata Agent](/docs/getting-started.md#start-stop-and-restart-netdata), and try accessing
+the dashboard again.
+
+### Multiple versions of OpenSSL
+
+We've received reports from the community about issues with running the `kickstart.sh` script on systems that have both
+a distribution-installed version of OpenSSL and a manually-installed local version. The Agent's installer cannot handle
+both.
+
+We recommend you install Netdata with the [static binary](/packaging/installer/methods/kickstart-64.md) to avoid the
+issue altogether. Or, you can manually remove one version of OpenSSL to remove the conflict.
+
+### Clang compiler on Linux
+
+Our current build process has some issues when using certain configurations of the `clang` C compiler on Linux. See [the
+section on `nonrepresentable section on output`
+errors](/packaging/installer/methods/manual.md#nonrepresentable-section-on-output-errors) for a workaround.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/REINSTALL.md b/packaging/installer/REINSTALL.md
new file mode 100644
index 0000000..50a15d7
--- /dev/null
+++ b/packaging/installer/REINSTALL.md
@@ -0,0 +1,68 @@
+<!--
+title: "Reinstall the Netdata Agent"
+description: "Troubleshooting installation issues or force an update of the Netdata Agent by reinstalling it using the same method you used during installation."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/REINSTALL.md
+-->
+
+# Reinstall the Netdata Agent
+
+In certain situations, such as needing to enable a feature or troubleshoot an issue, you may need to reinstall the
+Netdata Agent on your node.
+
+Before you try reinstalling Netdata, figure out which [installation method you
+used](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used) if you do not already know. This will
+determine the reinstallation method.
+
+## One-line installer script (`kickstart.sh`)
+
+Run the one-line installer script with the `--reinstall` parameter to reinstall the Netdata Agent. This will preserve
+any [user configuration](/docs/configure/nodes.md) in `netdata.conf` or other files.
+
+If you used any [optional
+parameters](/packaging/installer/methods/kickstart.md#optional-parameters-to-alter-your-installation) during initial
+installation, you need to pass them to the script again during reinstallation. If you cannot remember which options you
+used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line contains a list of
+optional parameters.
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh) --reinstall
+```
+
+## `.deb` or `.rpm` packages
+
+If you installed Netdata with [`.deb` or `.rpm` packages](/packaging/installer/methods/packages.md), use your
+distribution's package manager to reinstall Netdata. Any custom settings present in your Netdata configuration directory
+(typically at `/etc/netdata`) persists during this process.
+
+```bash
+apt-get install --reinstall netdata # Ubuntu/Debian
+dnf reinstall netdata # Fedora/RHEL
+yum reinstall netdata # CentOS
+zypper in -f netdata # openSUSE
+```
+
+## Pre-built static binary for 64-bit systems (`kickstart-static64.sh`)
+
+Run the one-line installer script with the `--reinstall` parameter to reinstall the Netdata Agent. This will preserve
+any [user configuration](/docs/configure/nodes.md) in `netdata.conf` or other files.
+
+If you used any [optional
+parameters](/packaging/installer/methods/kickstart-64.md#optional-parameters-to-alter-your-installation) during
+initial installation, you need to pass them to the script again during reinstallation. If you cannot remember which
+options you used, read the contents of the `.environment` file and look for a `REINSTALL_OPTIONS` line. This line
+contains a list of optional parameters.
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --reinstall
+```
+
+## Troubleshooting
+
+If you still experience problems with your Netdata Agent installation after following one of these processes, the next
+best route is to [uninstall](/packaging/installer/UNINSTALL.md) and then try a fresh installation using the [one-line
+installer](/packaging/installer/methods/kickstart.md).
+
+You can also post to our [community forums](https://community.netdata.cloud/c/support/13) or create a new [bug
+report](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREINSTALL&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md
new file mode 100644
index 0000000..c18a1bf
--- /dev/null
+++ b/packaging/installer/UNINSTALL.md
@@ -0,0 +1,54 @@
+<!--
+title: "Uninstall Netdata"
+description: "If you are no longer interested in using the Netdata Agent, use the self-contained uninstaller to remove all traces of binaries and configuration files."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/UNINSTALL.md
+-->
+
+# Uninstall Netdata
+
+> ⚠️ If you're having trouble updating Netdata, moving from one installation method to another, or generally having
+> issues with your Netdata Agent installation, consider our [**reinstall Netdata**
+> doc](/packaging/installer/REINSTALL.md) instead of removing the Netdata Agent entirely.
+
+Our self-contained uninstaller is able to remove Netdata installations created with shell installer. It doesn't need any
+other Netdata repository files to be run. All it needs is an `.environment` file, which is created during installation
+(with shell installer) and put in `${NETDATA_USER_CONFIG_DIR}/.environment` (by default `/etc/netdata/.environment`).
+That file contains some parameters which are passed to our installer and which are needed during uninstallation process.
+Mainly two parameters are needed:
+
+```sh
+NETDATA_PREFIX
+NETDATA_ADDED_TO_GROUPS
+```
+
+A workflow for uninstallation looks like this:
+
+1. Find your `.environment` file, which is usually `/etc/netdata/.environment` in a default installation.
+2. If you cannot find that file and would like to uninstall Netdata, then create a new file with the following content:
+
+```sh
+NETDATA_PREFIX="<installation prefix>" # put what you used as a parameter to shell installed `--install` flag. Otherwise it should be empty
+NETDATA_ADDED_TO_GROUPS="<additional groups>" # Additional groups for a user running the Netdata process
+```
+
+3. Run `netdata-uninstaller.sh` as follows
+
+```sh
+${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env <environment_file>
+```
+
+Note: Existing installations may still need to download the file if it's not present. To execute uninstall in that case,
+run the following commands:
+
+```sh
+wget https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-uninstaller.sh
+chmod +x ./netdata-uninstaller.sh
+./netdata-uninstaller.sh --yes --env <environment_file>
+```
+
+The default `environment_file` is `/etc/netdata/.environment`.
+
+> Note: This uninstallation method assumes previous installation with `netdata-installer.sh` or the kickstart script.
+> Currently using it when Netdata was installed by a package manager can work or cause unexpected results.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUNINSTALL&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md
new file mode 100644
index 0000000..ba57c8c
--- /dev/null
+++ b/packaging/installer/UPDATE.md
@@ -0,0 +1,153 @@
+<!--
+title: "Update the Netdata Agent"
+description: "If you opted out of automatic updates, you need to update your Netdata Agent to the latest nightly or stable version."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/UPDATE.md
+-->
+
+# Update the Netdata Agent
+
+By default, the Netdata Agent automatically updates with the latest nightly version. If you opted out of automatic
+updates, you need to update your Netdata Agent to the latest nightly or stable version.
+
+> 💡 Looking to reinstall the Netdata Agent to enable a feature, update an Agent that cannot update automatically, or
+> troubleshoot an error during the installation process? See our [reinstallation doc](/packaging/installer/REINSTALL.md)
+> for reinstallation steps.
+
+Before you update the Netdata Agent, check to see if your Netdata Agent is already up-to-date by clicking on the update
+icon in the local Agent dashboard's top navigation. This modal informs you whether your Agent needs an update or not.
+
+![Opening the Agent's Update modal](https://user-images.githubusercontent.com/1153921/99738428-add06780-2a87-11eb-8268-0e17b689eb3f.gif)
+
+## Determine which installation method you used
+
+If you are not sure where your Netdata config directory is, see the [configuration doc](/docs/configure/nodes.md). In
+most installations, this is `/etc/netdata`.
+
+Use `cd` to navigate to the Netdata config directory, then use `ls -a` to look for a file called `.environment`.
+
+- If the `.environment` file _does not_ exist, reinstall with your [package manager](#deb-or-rpm-packages).
+- If the `.environment` file _does_ exist, check its contents with `less .environment`.
+ - If `IS_NETDATA_STATIC_BINARY` is `"yes"`, update using the [pre-built static
+ binary](#pre-built-static-binary-for-64-bit-systems-kickstart-static64sh).
+ - In all other cases, update using the [one-line installer script](#one-line-installer-script-kickstartsh).
+
+Next, use the appropriate method to update the Netdata Agent:
+
+- [One-line installer script (`kickstart.sh`)](#one-line-installer-script-kickstartsh)
+- [`.deb` or `.rpm` packages](#deb-or-rpm-packages)
+- [Pre-built static binary for 64-bit systems (`kickstart-static64.sh`)](#pre-built-static-binary-for-64-bit-systems-kickstart-static64sh)
+- [Docker](#docker)
+- [macOS](#macos)
+- [Manual installation from Git](#manual-installation-from-git)
+
+## One-line installer script (`kickstart.sh`)
+
+If you installed Netdata using our one-line automatic installation script, run it again to update Netdata. Any custom
+settings present in your Netdata configuration directory (typically at `/etc/netdata`) persists during this process.
+
+This script will automatically run the update script that was installed as part of the initial install (even if
+you disabled automatic updates) and preserve the existing install options you specified.
+
+If you installed Netdata using an installation prefix, you will need to add an `--install` option specifying
+that prefix to this command to make sure it finds Netdata.
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+> ❗ If the above command fails, you can [reinstall
+> Netdata](/packaging/installer/REINSTALL.md#one-line-installer-script-kickstartsh) to get the latest version. This also
+> preserves your [configuration](/docs/configure/nodes.md) in `netdata.conf` or other files.
+
+## `.deb` or `.rpm` packages
+
+If you installed Netdata with [`.deb` or `.rpm` packages](/packaging/installer/methods/packages.md), use your
+distribution's package manager to update Netdata. Any custom settings present in your Netdata configuration directory
+(typically at `/etc/netdata`) persists during this process.
+
+Your package manager grabs a new package from our hosted repository, updates the Netdata Agent, and restarts it.
+
+```bash
+apt-get install netdata # Ubuntu/Debian
+dnf install netdata # Fedora/RHEL
+yum install netdata # CentOS
+zypper in netdata # openSUSE
+```
+
+> You may need to escalate privileges using `sudo`.
+
+## Pre-built static binary for 64-bit systems (`kickstart-static64.sh`)
+
+If you installed Netdata using the pre-built static binary, run the `kickstart-static64.sh` script again to update
+Netdata. Any custom settings present in your Netdata configuration directory (typically at `/etc/netdata`) persists
+during this process.
+
+This script will automatically run the update script that was installed as part of the initial install (even if
+you disabled automatic updates) and preserve the existing install options you specified.
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh)
+```
+
+> ❗ If the above command fails, you can [reinstall
+> Netdata](/packaging/installer/REINSTALL.md#pre-built-static-binary-for-64-bit-systems-kickstart-static64sh) to get the
+> latest version. This also preserves your [configuration](/docs/configure/nodes.md) in `netdata.conf` or other files.
+
+## Docker
+
+Docker-based installations do not update automatically. To update an Netdata Agent running in a Docker container, you
+must pull the [latest image from Docker Hub](https://hub.docker.com/r/netdata/netdata), stop and remove the container,
+and re-create it using the latest image.
+
+First, pull the latest version of the image.
+
+```bash
+docker pull netdata/netdata:latest
+```
+
+Next, to stop and remove any containers using the `netdata/netdata` image. Replace `netdata` if you changed it from the
+default.
+
+```bash
+docker stop netdata
+docker rm netdata
+```
+
+You can now re-create your Netdata container using the `docker` command or a `docker-compose.yml` file. See our [Docker
+installation instructions](/packaging/docker/README.md#create-a-new-netdata-agent-container) for details.
+
+## macOS
+
+If you installed Netdata on your macOS system using Homebrew, you can explicitly request an update:
+
+```bash
+brew upgrade netdata
+```
+
+Homebrew downloads the latest Netdata via the
+[formulae](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb), ensures all dependencies are met,
+and updates Netdata via reinstallation.
+
+## Manual installation from Git
+
+If you installed [Netdata manually from Git](/packaging/installer/methods/manual.md), you can run that installer again
+to update your agent. First, run our automatic requirements installer, which works on many Linux distributions, to
+ensure your system has the dependencies necessary for new features.
+
+```bash
+bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh)
+```
+
+Navigate to the directory where you first cloned the Netdata repository, pull the latest source code, and run
+`netdata-install.sh` again. This process compiles Netdata with the latest source code and updates it via reinstallation.
+
+```bash
+cd /path/to/netdata/git
+git pull origin master
+sudo ./netdata-installer.sh
+```
+
+> ⚠️ If you installed Netdata with any optional parameters, such as `--no-updates` to disable automatic updates, and
+> want to retain those settings, you need to set them again during this process.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUPDATE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh
new file mode 100644
index 0000000..03ceb2a
--- /dev/null
+++ b/packaging/installer/functions.sh
@@ -0,0 +1,1054 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# make sure we have a UID
+[ -z "${UID}" ] && UID="$(id -u)"
+
+# -----------------------------------------------------------------------------
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_BLACK=""
+ TPUT_RED=""
+ TPUT_GREEN=""
+ TPUT_YELLOW=""
+ TPUT_BLUE=""
+ TPUT_PURPLE=""
+ TPUT_CYAN=""
+ TPUT_WHITE=""
+ TPUT_BGBLACK=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BGYELLOW=""
+ TPUT_BGBLUE=""
+ TPUT_BGPURPLE=""
+ TPUT_BGCYAN=""
+ TPUT_BGWHITE=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+ TPUT_UNDERLINED=""
+ TPUT_BLINK=""
+ TPUT_INVERTED=""
+ TPUT_STANDOUT=""
+ TPUT_BELL=""
+ TPUT_CLEAR=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput 1> /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ # shellcheck disable=SC2034
+ TPUT_BLACK="$(tput setaf 0)"
+ TPUT_RED="$(tput setaf 1)"
+ TPUT_GREEN="$(tput setaf 2)"
+ # shellcheck disable=SC2034
+ TPUT_YELLOW="$(tput setaf 3)"
+ # shellcheck disable=SC2034
+ TPUT_BLUE="$(tput setaf 4)"
+ # shellcheck disable=SC2034
+ TPUT_PURPLE="$(tput setaf 5)"
+ TPUT_CYAN="$(tput setaf 6)"
+ TPUT_WHITE="$(tput setaf 7)"
+ # shellcheck disable=SC2034
+ TPUT_BGBLACK="$(tput setab 0)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ # shellcheck disable=SC2034
+ TPUT_BGYELLOW="$(tput setab 3)"
+ # shellcheck disable=SC2034
+ TPUT_BGBLUE="$(tput setab 4)"
+ # shellcheck disable=SC2034
+ TPUT_BGPURPLE="$(tput setab 5)"
+ # shellcheck disable=SC2034
+ TPUT_BGCYAN="$(tput setab 6)"
+ # shellcheck disable=SC2034
+ TPUT_BGWHITE="$(tput setab 7)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ # shellcheck disable=SC2034
+ TPUT_UNDERLINED="$(tput smul)"
+ # shellcheck disable=SC2034
+ TPUT_BLINK="$(tput blink)"
+ # shellcheck disable=SC2034
+ TPUT_INVERTED="$(tput rev)"
+ # shellcheck disable=SC2034
+ TPUT_STANDOUT="$(tput smso)"
+ # shellcheck disable=SC2034
+ TPUT_BELL="$(tput bel)"
+ # shellcheck disable=SC2034
+ TPUT_CLEAR="$(tput clear)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+get() {
+ url="${1}"
+ if command -v curl > /dev/null 2>&1; then
+ curl -q -o - -sSL --connect-timeout 10 --retry 3 "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ wget -T 15 -O - "${url}"
+ else
+ fatal "I need curl or wget to proceed, but neither is available on this system."
+ fi
+}
+
+download_file() {
+ url="${1}"
+ dest="${2}"
+ name="${3}"
+ opt="${4}"
+
+ if command -v curl > /dev/null 2>&1; then
+ run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ run wget -T 15 -O "${dest}" "${url}"
+ else
+ echo >&2
+ echo >&2 "Downloading ${name} from '${url}' failed because of missing mandatory packages."
+ if [ -n "$opt" ]; then
+ echo >&2 "Either add packages or disable it by issuing '--disable-${opt}' in the installer"
+ fi
+ echo >&2
+
+ run_failed "I need curl or wget to proceed, but neither is available on this system."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# external component handling
+
+fetch_and_verify() {
+ local component=${1}
+ local url=${2}
+ local base_name=${3}
+ local tmp=${4}
+ local override=${5}
+
+ if [ -z "${override}" ]; then
+ download_file "${url}" "${tmp}/${base_name}" "${component}"
+ else
+ progress "Using provided ${component} archive ${override}"
+ run cp "${override}" "${tmp}/${base_name}"
+ fi
+
+ if [ ! -f "${tmp}/${base_name}" ] || [ ! -s "${tmp}/${base_name}" ]; then
+ run_failed "Unable to find usable archive for ${component}"
+ return 1
+ fi
+
+ grep "${base_name}\$" "${INSTALLER_DIR}/packaging/${component}.checksums" > "${tmp}/sha256sums.txt" 2> /dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+ run_failed "${component} files checksum validation failed."
+ return 1
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+netdata_banner() {
+ local l1=" ^" \
+ l2=" |.-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-" \
+ l3=" | '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' " \
+ l4=" +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->" \
+ sp=" " \
+ netdata="netdata" start end msg="${*}" chartcolor="${TPUT_DIM}"
+
+ [ ${#msg} -lt ${#netdata} ] && msg="${msg}${sp:0:$((${#netdata} - ${#msg}))}"
+ [ ${#msg} -gt $((${#l2} - 20)) ] && msg="${msg:0:$((${#l2} - 23))}..."
+
+ start="$((${#l2} / 2 - 4))"
+ [ $((start + ${#msg} + 4)) -gt ${#l2} ] && start=$((${#l2} - ${#msg} - 4))
+ end=$((start + ${#msg} + 4))
+
+ echo >&2
+ echo >&2 "${chartcolor}${l1}${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l2:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_GREEN}${netdata}${TPUT_RESET}${chartcolor}${sp:0:$((end - start - 2 - ${#netdata}))}${l2:end:$((${#l2} - end))}${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l3:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_CYAN}${msg}${TPUT_RESET}${chartcolor}${sp:0:2}${l3:end:$((${#l2} - end))}${TPUT_RESET}"
+ echo >&2 "${chartcolor}${l4}${TPUT_RESET}"
+ echo >&2
+}
+
+# -----------------------------------------------------------------------------
+# portable service command
+
+service_cmd="$(command -v service 2> /dev/null)"
+rcservice_cmd="$(command -v rc-service 2> /dev/null)"
+systemctl_cmd="$(command -v systemctl 2> /dev/null)"
+service() {
+
+ local cmd="${1}" action="${2}"
+
+ if [ -n "${systemctl_cmd}" ]; then
+ run "${systemctl_cmd}" "${action}" "${cmd}"
+ return $?
+ elif [ -n "${service_cmd}" ]; then
+ run "${service_cmd}" "${cmd}" "${action}"
+ return $?
+ elif [ -n "${rcservice_cmd}" ]; then
+ run "${rcservice_cmd}" "${cmd}" "${action}"
+ return $?
+ fi
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# portable pidof
+
+safe_pidof() {
+ local pidof_cmd
+ pidof_cmd="$(command -v pidof 2> /dev/null)"
+ if [ -n "${pidof_cmd}" ]; then
+ ${pidof_cmd} "${@}"
+ return $?
+ else
+ ps -acxo pid,comm |
+ sed "s/^ *//g" |
+ grep netdata |
+ cut -d ' ' -f 1
+ return $?
+ fi
+}
+
+# -----------------------------------------------------------------------------
+find_processors() {
+ # Most UNIX systems have `nproc` as part of their userland (including macOS, Linux and BSD)
+ if command -v nproc > /dev/null; then
+ nproc && return
+ fi
+
+ local cpus
+ if [ -f "/proc/cpuinfo" ]; then
+ # linux
+ cpus=$(grep -c ^processor /proc/cpuinfo)
+ else
+ # freebsd
+ cpus=$(sysctl hw.ncpu 2> /dev/null | grep ^hw.ncpu | cut -d ' ' -f 2)
+ fi
+ if [ -z "${cpus}" ] || [ $((cpus)) -lt 1 ]; then
+ echo 1
+ else
+ echo "${cpus}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+fatal() {
+ printf >&2 "%s ABORTED %s %s \n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${*}"
+ exit 1
+}
+
+run_ok() {
+ printf >&2 "%s OK %s %s \n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${*}"
+}
+
+run_failed() {
+ printf >&2 "%s FAILED %s %s \n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}" "${*}"
+}
+
+ESCAPED_PRINT_METHOD=
+if printf "%q " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "%s" "${info}"
+ escaped_print "${@}"
+ printf "%s" " ... "
+ } >> "${run_logfile}"
+
+ printf >&2 "%s" "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "%s\n" "${TPUT_RESET}"
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]; then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code %s\n" "${ret}"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+iscontainer() {
+ # man systemd-detect-virt
+ local cmd
+ cmd=$(command -v systemd-detect-virt 2> /dev/null)
+ if [ -n "${cmd}" ] && [ -x "${cmd}" ]; then
+ "${cmd}" --container > /dev/null 2>&1 && return 0
+ fi
+
+ # /proc/1/sched exposes the host's pid of our init !
+ # http://stackoverflow.com/a/37016302
+ local pid
+ pid=$(head -n 1 /proc/1/sched 2> /dev/null | {
+ # shellcheck disable=SC2034
+ IFS='(),#:' read -r name pid th threads
+ echo "$pid"
+ })
+ if [ -n "${pid}" ]; then
+ pid=$((pid + 0))
+ [ ${pid} -gt 1 ] && return 0
+ fi
+
+ # lxc sets environment variable 'container'
+ # shellcheck disable=SC2154
+ [ -n "${container}" ] && return 0
+
+ # docker creates /.dockerenv
+ # http://stackoverflow.com/a/25518345
+ [ -f "/.dockerenv" ] && return 0
+
+ # ubuntu and debian supply /bin/running-in-container
+ # https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/upstart/1.12.1-0ubuntu4/file/bin/running-in-container
+ if [ -x "/bin/running-in-container" ]; then
+ "/bin/running-in-container" > /dev/null 2>&1 && return 0
+ fi
+
+ return 1
+}
+
+get_os_key() {
+ if [ -f /etc/os-release ]; then
+ # shellcheck disable=SC1091
+ source /etc/os-release || return 1
+ echo "${ID}-${VERSION_ID}"
+
+ elif [ -f /etc/redhat-release ]; then
+ echo "$(< /etc/redhat-release)"
+ else
+ echo "unknown"
+ fi
+}
+
+issystemd() {
+ local pids p myns ns systemctl
+
+ # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd
+ if [ ! -d /lib/systemd/system ] && [ ! -d /usr/lib/systemd/system ]; then
+ return 1
+ fi
+
+ # if there is no systemctl command, it is not systemd
+ systemctl=$(command -v systemctl 2> /dev/null)
+ if [ -z "${systemctl}" ] || [ ! -x "${systemctl}" ]; then
+ return 1
+ fi
+
+ # if pid 1 is systemd, it is systemd
+ [ "$(basename "$(readlink /proc/1/exe)" 2> /dev/null)" = "systemd" ] && return 0
+
+ # if systemd is not running, it is not systemd
+ pids=$(safe_pidof systemd 2> /dev/null)
+ [ -z "${pids}" ] && return 1
+
+ # check if the running systemd processes are not in our namespace
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+ for p in ${pids}; do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ # if pid of systemd is in our namespace, it is systemd
+ [ -n "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0
+ done
+
+ # else, it is not systemd
+ return 1
+}
+
+get_systemd_service_dir() {
+ local SYSTEMD_DIRECTORY=""
+ local key
+ key="$(get_os_key)"
+
+ if [ -w "/lib/systemd/system" ]; then
+ SYSTEMD_DIRECTORY="/lib/systemd/system"
+ elif [ -w "/usr/lib/systemd/system" ]; then
+ SYSTEMD_DIRECTORY="/usr/lib/systemd/system"
+ elif [ -w "/etc/systemd/system" ]; then
+ SYSTEMD_DIRECTORY="/etc/systemd/system"
+ fi
+
+ if [[ ${key} =~ ^devuan* ]] || [ "${key}" = "debian-7" ] || [ "${key}" = "ubuntu-12.04" ] || [ "${key}" = "ubuntu-14.04" ]; then
+ SYSTEMD_DIRECTORY="/etc/systemd/system"
+ fi
+
+ echo "${SYSTEMD_DIRECTORY}"
+}
+
+install_non_systemd_init() {
+ [ "${UID}" != 0 ] && return 1
+
+ local key
+ key="$(get_os_key)"
+
+ if [ -d /etc/init.d ] && [ ! -f /etc/init.d/netdata ]; then
+ if [[ ${key} =~ ^(gentoo|alpine).* ]]; then
+ echo >&2 "Installing OpenRC init file..."
+ run cp system/netdata-openrc /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run rc-update add netdata default &&
+ return 0
+
+ elif [[ ${key} =~ ^devuan* ]] || [ "${key}" = "debian-7" ] || [ "${key}" = "ubuntu-12.04" ] || [ "${key}" = "ubuntu-14.04" ]; then
+ echo >&2 "Installing LSB init file..."
+ run cp system/netdata-lsb /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run update-rc.d netdata defaults &&
+ run update-rc.d netdata enable &&
+ return 0
+ elif [[ ${key} =~ ^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).* ]]; then
+ echo >&2 "Installing init.d file..."
+ run cp system/netdata-init-d /etc/init.d/netdata &&
+ run chmod 755 /etc/init.d/netdata &&
+ run chkconfig netdata on &&
+ return 0
+ else
+ echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it."
+ return 1
+ fi
+ elif [ -f /etc/init.d/netdata ]; then
+ echo >&2 "file '/etc/init.d/netdata' already exists."
+ return 0
+ else
+ echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it."
+ fi
+
+ return 1
+}
+
+# This is used by netdata-installer.sh
+# shellcheck disable=SC2034
+NETDATA_STOP_CMD="netdatacli shutdown-agent"
+
+NETDATA_START_CMD="netdata"
+NETDATA_INSTALLER_START_CMD=""
+
+install_netdata_service() {
+ local uname
+ uname="$(uname 2> /dev/null)"
+
+ if [ "${UID}" -eq 0 ]; then
+ if [ "${uname}" = "Darwin" ]; then
+
+ if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ]; then
+ echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists."
+ return 0
+ else
+ echo >&2 "Installing MacOS X plist file..."
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ run cp system/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist &&
+ run launchctl load /Library/LaunchDaemons/com.github.netdata.plist &&
+ NETDATA_START_CMD="launchctl start com.github.netdata" &&
+ NETDATA_STOP_CMD="launchctl stop com.github.netdata"
+ return 0
+ fi
+
+ elif [ "${uname}" = "FreeBSD" ]; then
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ run cp system/netdata-freebsd /etc/rc.d/netdata && NETDATA_START_CMD="service netdata start" &&
+ NETDATA_STOP_CMD="service netdata stop" &&
+ NETDATA_INSTALLER_START_CMD="service netdata onestart" &&
+ myret=$?
+
+ echo >&2 "Note: To explicitly enable netdata automatic start, set 'netdata_enable' to 'YES' in /etc/rc.conf"
+ echo >&2 ""
+
+ return ${myret}
+
+ elif issystemd; then
+ # systemd is running on this system
+ NETDATA_START_CMD="systemctl start netdata"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="systemctl stop netdata"
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+
+ SYSTEMD_DIRECTORY="$(get_systemd_service_dir)"
+
+ if [ "${SYSTEMD_DIRECTORY}x" != "x" ]; then
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="run systemctl enable netdata"
+ IS_NETDATA_ENABLED="$(systemctl is-enabled netdata 2> /dev/null || echo "Netdata not there")"
+ if [ "${IS_NETDATA_ENABLED}" == "disabled" ]; then
+ echo >&2 "Netdata was there and disabled, make sure we don't re-enable it ourselves"
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="true"
+ fi
+
+ echo >&2 "Installing systemd service..."
+ run cp system/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" &&
+ run systemctl daemon-reload &&
+ ${ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED} &&
+ return 0
+ else
+ echo >&2 "no systemd directory; cannot install netdata.service"
+ fi
+ else
+ install_non_systemd_init
+ local ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ if [ -n "${service_cmd}" ]; then
+ NETDATA_START_CMD="service netdata start"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="service netdata stop"
+ elif [ -n "${rcservice_cmd}" ]; then
+ NETDATA_START_CMD="rc-service netdata start"
+ # This is used by netdata-installer.sh
+ # shellcheck disable=SC2034
+ NETDATA_STOP_CMD="rc-service netdata stop"
+ fi
+ NETDATA_INSTALLER_START_CMD="${NETDATA_START_CMD}"
+ fi
+
+ return ${ret}
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# stop netdata
+
+pidisnetdata() {
+ if [ -d /proc/self ]; then
+ if [ -z "$1" ] || [ ! -f "/proc/$1/stat" ]; then
+ return 1
+ fi
+ [ "$(cut -d '(' -f 2 "/proc/$1/stat" | cut -d ')' -f 1)" = "netdata" ] && return 0
+ return 1
+ fi
+ return 0
+}
+
+stop_netdata_on_pid() {
+ local pid="${1}" ret=0 count=0
+
+ pidisnetdata "${pid}" || return 0
+
+ printf >&2 "Stopping netdata on pid %s ..." "${pid}"
+ while [ -n "$pid" ] && [ ${ret} -eq 0 ]; do
+ if [ ${count} -gt 24 ]; then
+ echo >&2 "Cannot stop the running netdata on pid ${pid}."
+ return 1
+ fi
+
+ count=$((count + 1))
+
+ pidisnetdata "${pid}" || ret=1
+ if [ ${ret} -eq 1 ]; then
+ break
+ fi
+
+ if [ ${count} -lt 12 ]; then
+ run kill "${pid}" 2> /dev/null
+ ret=$?
+ else
+ run kill -9 "${pid}" 2> /dev/null
+ ret=$?
+ fi
+
+ test ${ret} -eq 0 && printf >&2 "." && sleep 5
+
+ done
+
+ echo >&2
+ if [ ${ret} -eq 0 ]; then
+ echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !"
+ return 1
+ fi
+
+ echo >&2 "netdata on pid ${pid} stopped."
+ return 0
+}
+
+netdata_pids() {
+ local p myns ns
+
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+
+ for p in \
+ $(cat /var/run/netdata.pid 2> /dev/null) \
+ $(cat /var/run/netdata/netdata.pid 2> /dev/null) \
+ $(safe_pidof netdata 2> /dev/null); do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ if [ -z "${myns}" ] || [ -z "${ns}" ] || [ "${myns}" = "${ns}" ]; then
+ pidisnetdata "${p}" && echo "${p}"
+ fi
+ done
+}
+
+stop_all_netdata() {
+ local p uname
+
+ if [ "${UID}" -eq 0 ]; then
+ uname="$(uname 2> /dev/null)"
+
+ # Any of these may fail, but we need to not bail if they do.
+ if issystemd; then
+ if systemctl stop netdata; then
+ sleep 5
+ fi
+ elif [ "${uname}" = "Darwin" ]; then
+ if launchctl stop netdata; then
+ sleep 5
+ fi
+ elif [ "${uname}" = "FreeBSD" ]; then
+ if /etc/rc.d/netdata stop; then
+ sleep 5
+ fi
+ else
+ if service netdata stop; then
+ sleep 5
+ fi
+ fi
+ fi
+
+ if [ -n "$(netdata_pids)" ] && [ -n "$(builtin type -P netdatacli)" ]; then
+ netdatacli shutdown-agent
+ sleep 20
+ fi
+
+ for p in $(netdata_pids); do
+ # shellcheck disable=SC2086
+ stop_netdata_on_pid ${p}
+ done
+}
+
+# -----------------------------------------------------------------------------
+# restart netdata
+
+restart_netdata() {
+ local netdata="${1}"
+ shift
+
+ local started=0
+
+ progress "Restarting netdata instance"
+
+ if [ -z "${NETDATA_INSTALLER_START_CMD}" ]; then
+ NETDATA_INSTALLER_START_CMD="${netdata}"
+ fi
+
+ if [ "${UID}" -eq 0 ]; then
+ echo >&2
+ echo >&2 "Stopping all netdata threads"
+ run stop_all_netdata
+
+ echo >&2 "Starting netdata using command '${NETDATA_INSTALLER_START_CMD}'"
+ # shellcheck disable=SC2086
+ run ${NETDATA_INSTALLER_START_CMD} && started=1
+
+ if [ ${started} -eq 1 ] && [ -z "$(netdata_pids)" ]; then
+ echo >&2 "Ooops! it seems netdata is not started."
+ started=0
+ fi
+
+ if [ ${started} -eq 0 ]; then
+ echo >&2 "Attempting another netdata start using command '${NETDATA_INSTALLER_START_CMD}'"
+ # shellcheck disable=SC2086
+ run ${NETDATA_INSTALLER_START_CMD} && started=1
+ fi
+ fi
+
+ if [ ${started} -eq 1 ] && [ -z "$(netdata_pids)" ]; then
+ echo >&2 "Hm... it seems netdata is still not started."
+ started=0
+ fi
+
+ if [ ${started} -eq 0 ]; then
+ # still not started... another forced attempt, just run the binary
+ echo >&2 "Netdata service still not started, attempting another forced restart by running '${netdata} ${*}'"
+ run stop_all_netdata
+ run "${netdata}" "${@}"
+ return $?
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+# install netdata logrotate
+
+install_netdata_logrotate() {
+ if [ "${UID}" -eq 0 ]; then
+ if [ -d /etc/logrotate.d ]; then
+ if [ ! -f /etc/logrotate.d/netdata ]; then
+ run cp system/netdata.logrotate /etc/logrotate.d/netdata
+ fi
+
+ if [ -f /etc/logrotate.d/netdata ]; then
+ run chmod 644 /etc/logrotate.d/netdata
+ fi
+
+ return 0
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# create netdata.conf
+
+create_netdata_conf() {
+ local path="${1}" url="${2}"
+
+ if [ -s "${path}" ]; then
+ return 0
+ fi
+
+ if [ -n "$url" ]; then
+ echo >&2 "Downloading default configuration from netdata..."
+ sleep 5
+
+ # remove a possibly obsolete configuration file
+ [ -f "${path}.new" ] && rm "${path}.new"
+
+ # disable a proxy to get data from the local netdata
+ export http_proxy=
+ export https_proxy=
+
+ if command -v curl 1> /dev/null 2>&1; then
+ run curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${path}.new"
+ elif command -v wget 1> /dev/null 2>&1; then
+ run wget -T 15 -O - "${url}" > "${path}.new"
+ fi
+
+ if [ -s "${path}.new" ]; then
+ run mv "${path}.new" "${path}"
+ run_ok "New configuration saved for you to edit at ${path}"
+ else
+ [ -f "${path}.new" ] && rm "${path}.new"
+ run_failed "Cannnot download configuration from netdata daemon using url '${url}'"
+ url=''
+ fi
+ fi
+
+ if [ -z "$url" ]; then
+ echo "# netdata can generate its own config which is available at 'http://<netdata_ip>/netdata.conf'" > "${path}"
+ echo "# You can download it with command like: 'wget -O ${path} http://localhost:19999/netdata.conf'" >> "${path}"
+ fi
+
+}
+
+portable_add_user() {
+ local username="${1}" homedir="${2}"
+
+ [ -z "${homedir}" ] && homedir="/tmp"
+
+ # Check if user exists
+ if cut -d ':' -f 1 < /etc/passwd | grep "^${username}$" 1> /dev/null 2>&1; then
+ echo >&2 "User '${username}' already exists."
+ return 0
+ fi
+
+ echo >&2 "Adding ${username} user account with home ${homedir} ..."
+
+ local nologin
+ nologin="$(command -v nologin || echo '/bin/false')"
+
+ # Linux
+ if command -v useradd 1> /dev/null 2>&1; then
+ run useradd -r -g "${username}" -c "${username}" -s "${nologin}" --no-create-home -d "${homedir}" "${username}" && return 0
+ fi
+
+ # FreeBSD
+ if command -v pw 1> /dev/null 2>&1; then
+ run pw useradd "${username}" -d "${homedir}" -g "${username}" -s "${nologin}" && return 0
+ fi
+
+ # BusyBox
+ if command -v adduser 1> /dev/null 2>&1; then
+ run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0
+ fi
+
+ # mac OS
+ if command -v sysadminctl 1> /dev/null 2>&1; then
+ run sysadminctl -addUser "${username}" && return 0
+ fi
+
+ echo >&2 "Failed to add ${username} user account !"
+
+ return 1
+}
+
+portable_add_group() {
+ local groupname="${1}"
+
+ # Check if group exist
+ if cut -d ':' -f 1 < /etc/group | grep "^${groupname}$" 1> /dev/null 2>&1; then
+ echo >&2 "Group '${groupname}' already exists."
+ return 0
+ fi
+
+ echo >&2 "Adding ${groupname} user group ..."
+
+ # Linux
+ if command -v groupadd 1> /dev/null 2>&1; then
+ run groupadd -r "${groupname}" && return 0
+ fi
+
+ # FreeBSD
+ if command -v pw 1> /dev/null 2>&1; then
+ run pw groupadd "${groupname}" && return 0
+ fi
+
+ # BusyBox
+ if command -v addgroup 1> /dev/null 2>&1; then
+ run addgroup "${groupname}" && return 0
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ dseditgroup -o create "${groupname}" && return 0
+ fi
+
+ echo >&2 "Failed to add ${groupname} user group !"
+ return 1
+}
+
+portable_add_user_to_group() {
+ local groupname="${1}" username="${2}"
+
+ # Check if group exist
+ if ! cut -d ':' -f 1 < /etc/group | grep "^${groupname}$" > /dev/null 2>&1; then
+ echo >&2 "Group '${groupname}' does not exist."
+ return 1
+ fi
+
+ # Check if user is in group
+ if [[ ",$(grep "^${groupname}:" < /etc/group | cut -d ':' -f 4)," =~ ,${username}, ]]; then
+ # username is already there
+ echo >&2 "User '${username}' is already in group '${groupname}'."
+ return 0
+ else
+ # username is not in group
+ echo >&2 "Adding ${username} user to the ${groupname} group ..."
+
+ # Linux
+ if command -v usermod 1> /dev/null 2>&1; then
+ run usermod -a -G "${groupname}" "${username}" && return 0
+ fi
+
+ # FreeBSD
+ if command -v pw 1> /dev/null 2>&1; then
+ run pw groupmod "${groupname}" -m "${username}" && return 0
+ fi
+
+ # BusyBox
+ if command -v addgroup 1> /dev/null 2>&1; then
+ run addgroup "${username}" "${groupname}" && return 0
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ dseditgroup -u "${username}" "${groupname}" && return 0
+ fi
+ echo >&2 "Failed to add user ${username} to group ${groupname} !"
+ return 1
+ fi
+}
+
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ elif command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+_get_scheduler_type() {
+ if _get_intervaldir > /dev/null ; then
+ echo 'interval'
+ elif issystemd ; then
+ echo 'systemd'
+ elif [ -d /etc/cron.d ] ; then
+ echo 'crontab'
+ else
+ echo 'none'
+ fi
+}
+
+_get_intervaldir() {
+ if [ -d /etc/cron.daily ]; then
+ echo /etc/cron.daily
+ elif [ -d /etc/periodic/daily ]; then
+ echo /etc/periodic/daily
+ else
+ return 1
+ fi
+
+ return 0
+}
+
+install_netdata_updater() {
+ if [ "${INSTALLER_DIR}" ] && [ -f "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" ]; then
+ cat "${INSTALLER_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+ fi
+
+ if [ "${NETDATA_SOURCE_DIR}" ] && [ -f "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" ]; then
+ cat "${NETDATA_SOURCE_DIR}/packaging/installer/netdata-updater.sh" > "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+ fi
+
+ if issystemd && [ -n "$(get_systemd_service_dir)" ]; then
+ cat "${NETDATA_SOURCE_DIR}/system/netdata-updater.timer" > "$(get_systemd_service_dir)/netdata-updater.timer"
+ cat "${NETDATA_SOURCE_DIR}/system/netdata-updater.service" > "$(get_systemd_service_dir)/netdata-updater.service"
+ fi
+
+ sed -i -e "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" || return 1
+
+ chmod 0755 "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh"
+ echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh${TPUT_RESET}"
+ echo >&2
+
+ return 0
+}
+
+cleanup_old_netdata_updater() {
+ if [ -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh ]; then
+ echo >&2 "Removing updater from deprecated location"
+ rm -f "${NETDATA_PREFIX}"/usr/libexec/netdata-updater.sh
+ fi
+
+ if issystemd && [ -n "$(get_systemd_service_dir)" ] ; then
+ systemctl disable netdata-updater.timer
+ rm -f "$(get_systemd_service_dir)/netdata-updater.timer"
+ rm -f "$(get_systemd_service_dir)/netdata-updater.service"
+ fi
+
+ if [ -d /etc/cron.daily ]; then
+ rm -f /etc/cron.daily/netdata-updater.sh
+ rm -f /etc/cron.daily/netdata-updater
+ fi
+
+ if [ -d /etc/periodic/daily ]; then
+ rm -f /etc/periodic/daily/netdata-updater.sh
+ rm -f /etc/periodic/daily/netdata-updater
+ fi
+
+ if [ -d /etc/cron.d ]; then
+ rm -f /etc/cron.d/netdata-updater
+ fi
+
+ return 0
+}
+
+enable_netdata_updater() {
+ local updater_type
+
+ if [ -n "${1}" ] ; then
+ updater_type="${1}"
+ else
+ updater_type="$(_get_scheduler_type)"
+ fi
+
+ case "${updater_type}" in
+ "systemd")
+ systemctl enable netdata-updater.timer
+
+ echo >&2 "Auto-updating has been enabled using a systemd timer unit."
+ echo >&2
+ echo >&2 "If the update process fails, the failure will be logged to the systemd journal just like a regular service failure."
+ echo >&2 "Successful updates should produce empty logs."
+ echo >&2
+ ;;
+ "interval")
+ ln -sf "${NETDATA_PREFIX}/usr/libexec/netdata/netdata-updater.sh" "$(_get_intervaldir)/netdata-updater"
+
+ echo >&2 "Auto-updating has been enabled through cron, updater script linked to ${TPUT_RED}${TPUT_BOLD}$(_get_intervaldir)/netdata-updater${TPUT_RESET}"
+ echo >&2
+ echo >&2 "If the update process fails and you have email notifications set up correctly for cron on this system, you should receive an email notification of the failure."
+ echo >&2 "Successful updates will not send an email."
+ echo >&2
+ ;;
+ "crontab")
+ cat "${NETDATA_SOURCE_DIR}/system/netdata.crontab" > "/etc/cron.d/netdata-updater"
+
+ echo >&2 "Auto-updating has been enabled through cron, using a crontab at ${TPUT_RED}${TPUT_BOLD}/etc/cron.d/netdata-updater${TPUT_RESET}"
+ echo >&2
+ echo >&2 "If the update process fails and you have email notifications set up correctly for cron on this system, you should receive an email notification of the failure."
+ echo >&2 "Successful updates will not send an email."
+ echo >&2
+ ;;
+ *)
+ echo >&2 "Unable to determine what type of auto-update scheduling to use, not enabling auto-updates."
+ echo >&2
+ return 1
+ esac
+
+ return 0
+}
+
+disable_netdata_updater() {
+ echo >&2 "You chose *NOT* to enable auto-update, removing any links to the updater from cron (it may have happened if you are reinstalling)"
+ echo >&2
+
+ if issystemd && [ -n "$(get_systemd_service_dir)" ] ; then
+ systemctl disable netdata-updater.timer
+ fi
+
+ if [ -d /etc/cron.daily ]; then
+ rm -f /etc/cron.daily/netdata-updater.sh
+ rm -f /etc/cron.daily/netdata-updater
+ fi
+
+ if [ -d /etc/periodic/daily ]; then
+ rm -f /etc/periodic/daily/netdata-updater.sh
+ rm -f /etc/periodic/daily/netdata-updater
+ fi
+
+ if [ -d /etc/cron.d ]; then
+ rm -f /etc/cron.d/netdata-updater
+ fi
+
+ return 0
+}
+
+set_netdata_updater_channel() {
+ sed -i -e "s/^RELEASE_CHANNEL=.*/RELEASE_CHANNEL=\"${RELEASE_CHANNEL}\"/" "${NETDATA_USER_CONFIG_DIR}/.environment"
+}
diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh
new file mode 100755
index 0000000..23cbe12
--- /dev/null
+++ b/packaging/installer/install-required-packages.sh
@@ -0,0 +1,2175 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC2034
+# We use lots of computed variable names in here, so we need to disable shellcheck 2034
+
+export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+export LC_ALL=C
+
+# Be nice on production environments
+renice 19 $$ > /dev/null 2> /dev/null
+
+ME="${0}"
+
+if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
+ echo >&2 "Sorry! This script needs BASH version 4+, but you have BASH version ${BASH_VERSION}"
+ exit 1
+fi
+
+# These options control which packages we are going to install
+# They can be pre-set, but also can be controlled with command line options
+PACKAGES_NETDATA=${PACKAGES_NETDATA-0}
+PACKAGES_NETDATA_NODEJS=${PACKAGES_NETDATA_NODEJS-0}
+PACKAGES_NETDATA_PYTHON=${PACKAGES_NETDATA_PYTHON-0}
+PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-0}
+PACKAGES_NETDATA_PYTHON_MYSQL=${PACKAGES_NETDATA_PYTHON_MYSQL-0}
+PACKAGES_NETDATA_PYTHON_POSTGRES=${PACKAGES_NETDATA_PYTHON_POSTGRES-0}
+PACKAGES_NETDATA_PYTHON_MONGO=${PACKAGES_NETDATA_PYTHON_MONGO-0}
+PACKAGES_DEBUG=${PACKAGES_DEBUG-0}
+PACKAGES_IPRANGE=${PACKAGES_IPRANGE-0}
+PACKAGES_FIREHOL=${PACKAGES_FIREHOL-0}
+PACKAGES_FIREQOS=${PACKAGES_FIREQOS-0}
+PACKAGES_UPDATE_IPSETS=${PACKAGES_UPDATE_IPSETS-0}
+PACKAGES_NETDATA_DEMO_SITE=${PACKAGES_NETDATA_DEMO_SITE-0}
+PACKAGES_NETDATA_SENSORS=${PACKAGES_NETDATA_SENSORS-0}
+PACKAGES_NETDATA_DATABASE=${PACKAGES_NETDATA_DATABASE-0}
+PACKAGES_NETDATA_EBPF=${PACKAGES_NETDATA_EBPF-0}
+
+# needed commands
+lsb_release=$(command -v lsb_release 2> /dev/null)
+
+# Check which package managers are available
+apk=$(command -v apk 2> /dev/null)
+apt_get=$(command -v apt-get 2> /dev/null)
+brew=$(command -v brew 2> /dev/null)
+pkg=$(command -v pkg 2> /dev/null)
+dnf=$(command -v dnf 2> /dev/null)
+emerge=$(command -v emerge 2> /dev/null)
+equo=$(command -v equo 2> /dev/null)
+pacman=$(command -v pacman 2> /dev/null)
+swupd=$(command -v swupd 2> /dev/null)
+yum=$(command -v yum 2> /dev/null)
+zypper=$(command -v zypper 2> /dev/null)
+
+distribution=
+release=
+version=
+codename=
+package_installer=
+tree=
+detection=
+NAME=
+ID=
+ID_LIKE=
+VERSION=
+VERSION_ID=
+
+usage() {
+ cat << EOF
+OPTIONS:
+
+${ME} [--dont-wait] [--non-interactive] \\
+ [distribution DD [version VV] [codename CN]] [installer IN] [packages]
+
+Supported distributions (DD):
+
+ - arch (all Arch Linux derivatives)
+ - centos (all CentOS derivatives)
+ - gentoo (all Gentoo Linux derivatives)
+ - sabayon (all Sabayon Linux derivatives)
+ - debian, ubuntu (all Debian and Ubuntu derivatives)
+ - redhat, fedora (all Red Hat and Fedora derivatives)
+ - suse, opensuse (all SUSE and openSUSE derivatives)
+ - clearlinux (all Clear Linux derivatives)
+ - macos (Apple's macOS)
+
+Supported installers (IN):
+
+ - apt-get all Debian / Ubuntu Linux derivatives
+ - dnf newer Red Hat / Fedora Linux
+ - emerge all Gentoo Linux derivatives
+ - equo all Sabayon Linux derivatives
+ - pacman all Arch Linux derivatives
+ - yum all Red Hat / Fedora / CentOS Linux derivatives
+ - zypper all SUSE Linux derivatives
+ - apk all Alpine derivatives
+ - swupd all Clear Linux derivatives
+ - brew macOS Homebrew
+ - pkg FreeBSD Ports
+
+Supported packages (you can append many of them):
+
+ - netdata-all all packages required to install netdata
+ including mysql client, postgres client,
+ node.js, python, sensors, etc
+
+ - netdata minimum packages required to install netdata
+ (no mysql client, no nodejs, includes python)
+
+ - nodejs install nodejs
+ (required for monitoring named and SNMP)
+
+ - python install python
+
+ - python3 install python3
+
+ - python-mysql install MySQLdb
+ (for monitoring mysql, will install python3 version
+ if python3 is enabled or detected)
+
+ - python-postgres install psycopg2
+ (for monitoring postgres, will install python3 version
+ if python3 is enabled or detected)
+
+ - python-pymongo install python-pymongo (or python3-pymongo for python3)
+
+ - sensors install lm_sensors for monitoring h/w sensors
+
+ - firehol-all packages required for FireHOL, FireQoS, update-ipsets
+ - firehol packages required for FireHOL
+ - fireqos packages required for FireQoS
+ - update-ipsets packages required for update-ipsets
+
+ - demo packages required for running a netdata demo site
+ (includes nginx and various debugging tools)
+
+
+If you don't supply the --dont-wait option, the program
+will ask you before touching your system.
+
+EOF
+}
+
+release2lsb_release() {
+ # loads the given /etc/x-release file
+ # this file is normaly a single line containing something like
+ #
+ # X Linux release 1.2.3 (release-name)
+ #
+ # It attempts to parse it
+ # If it succeeds, it returns 0
+ # otherwise it returns 1
+
+ local file="${1}" x DISTRIB_ID="" DISTRIB_RELEASE="" DISTRIB_CODENAME=""
+ echo >&2 "Loading ${file} ..."
+
+ x="$(grep -v "^$" "${file}" | head -n 1)"
+
+ if [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")"
+ elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]]; then
+ eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")"
+ fi
+
+ distribution="${DISTRIB_ID}"
+ version="${DISTRIB_RELEASE}"
+ codename="${DISTRIB_CODENAME}"
+
+ [ -z "${distribution}" ] && echo >&2 "Cannot parse this lsb-release: ${x}" && return 1
+ detection="${file}"
+ return 0
+}
+
+get_os_release() {
+ # Loads the /etc/os-release or /usr/lib/os-release file(s)
+ # Only the required fields are loaded
+ #
+ # If it manages to load a valid os-release, it returns 0
+ # otherwise it returns 1
+ #
+ # It searches the ID_LIKE field for a compatible distribution
+
+ os_release_file=
+ if [ -s "/etc/os-release" ]; then
+ os_release_file="/etc/os-release"
+ elif [ -s "/usr/lib/os-release" ]; then
+ os_release_file="/usr/lib/os-release"
+ else
+ echo >&2 "Cannot find an os-release file ..."
+ return 1
+ fi
+
+ local x
+ echo >&2 "Loading ${os_release_file} ..."
+
+ eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" "${os_release_file}")"
+ for x in "${ID}" ${ID_LIKE}; do
+ case "${x,,}" in
+ alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | rhel | sabayon | sles | suse | ubuntu)
+ distribution="${x}"
+ version="${VERSION_ID}"
+ codename="${VERSION}"
+ detection="${os_release_file}"
+ break
+ ;;
+ *)
+ echo >&2 "Unknown distribution ID: ${x}"
+ ;;
+ esac
+ done
+ [ -z "${distribution}" ] && echo >&2 "Cannot find valid distribution in: ${ID} ${ID_LIKE}" && return 1
+
+ [ -z "${distribution}" ] && return 1
+ return 0
+}
+
+get_lsb_release() {
+ # Loads the /etc/lsb-release file
+ # If it fails, it attempts to run the command: lsb_release -a
+ # and parse its output
+ #
+ # If it manages to find the lsb-release, it returns 0
+ # otherwise it returns 1
+
+ if [ -f "/etc/lsb-release" ]; then
+ echo >&2 "Loading /etc/lsb-release ..."
+ local DISTRIB_ID="" DISTRIB_RELEASE="" DISTRIB_CODENAME=""
+ eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" /etc/lsb-release)"
+ distribution="${DISTRIB_ID}"
+ version="${DISTRIB_RELEASE}"
+ codename="${DISTRIB_CODENAME}"
+ detection="/etc/lsb-release"
+ fi
+
+ if [ -z "${distribution}" ] && [ -n "${lsb_release}" ]; then
+ echo >&2 "Cannot find distribution with /etc/lsb-release"
+ echo >&2 "Running command: lsb_release ..."
+ eval "declare -A release=( $(lsb_release -a 2> /dev/null | sed -e "s|^\(.*\):[[:space:]]*\(.*\)$|[\1]=\"\2\"|g") )"
+ distribution="${release["Distributor ID"]}"
+ version="${release[Release]}"
+ codename="${release[Codename]}"
+ detection="lsb_release"
+ fi
+
+ [ -z "${distribution}" ] && echo >&2 "Cannot find valid distribution with lsb-release" && return 1
+ return 0
+}
+
+find_etc_any_release() {
+ # Check for any of the known /etc/x-release files
+ # If it finds one, it loads it and returns 0
+ # otherwise it returns 1
+
+ if [ -f "/etc/arch-release" ]; then
+ release2lsb_release "/etc/arch-release" && return 0
+ fi
+
+ if [ -f "/etc/centos-release" ]; then
+ release2lsb_release "/etc/centos-release" && return 0
+ fi
+
+ if [ -f "/etc/redhat-release" ]; then
+ release2lsb_release "/etc/redhat-release" && return 0
+ fi
+
+ if [ -f "/etc/SuSe-release" ]; then
+ release2lsb_release "/etc/SuSe-release" && return 0
+ fi
+
+ return 1
+}
+
+autodetect_distribution() {
+ # autodetection of distribution/OS
+ case "$(uname -s)" in
+ "Linux")
+ get_os_release || get_lsb_release || find_etc_any_release
+ ;;
+ "FreeBSD")
+ distribution="freebsd"
+ version="$(uname -r)"
+ detection="uname"
+ ;;
+ "Darwin")
+ distribution="macos"
+ version="$(uname -r)"
+ detection="uname"
+
+ if [ ${EUID} -eq 0 ]; then
+ echo >&2 "This script does not support running as EUID 0 on macOS. Please run it as a regular user."
+ exit 1
+ fi
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+}
+
+user_picks_distribution() {
+ # let the user pick a distribution
+
+ echo >&2
+ echo >&2 "I NEED YOUR HELP"
+ echo >&2 "It seems I cannot detect your system automatically."
+
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ echo >&2 " > Bailing out..."
+ exit 1
+ fi
+
+ if [ -z "${equo}" ] && [ -z "${emerge}" ] && [ -z "${apt_get}" ] && [ -z "${yum}" ] && [ -z "${dnf}" ] && [ -z "${pacman}" ] && [ -z "${apk}" ] && [ -z "${swupd}" ]; then
+ echo >&2 "And it seems I cannot find a known package manager in this system."
+ echo >&2 "Please open a github issue to help us support your system too."
+ exit 1
+ fi
+
+ local opts=
+ echo >&2 "I found though that the following installers are available:"
+ echo >&2
+ [ -n "${apt_get}" ] && echo >&2 " - Debian/Ubuntu based (installer is: apt-get)" && opts="apt-get ${opts}"
+ [ -n "${yum}" ] && echo >&2 " - Redhat/Fedora/Centos based (installer is: yum)" && opts="yum ${opts}"
+ [ -n "${dnf}" ] && echo >&2 " - Redhat/Fedora/Centos based (installer is: dnf)" && opts="dnf ${opts}"
+ [ -n "${zypper}" ] && echo >&2 " - SuSe based (installer is: zypper)" && opts="zypper ${opts}"
+ [ -n "${pacman}" ] && echo >&2 " - Arch Linux based (installer is: pacman)" && opts="pacman ${opts}"
+ [ -n "${emerge}" ] && echo >&2 " - Gentoo based (installer is: emerge)" && opts="emerge ${opts}"
+ [ -n "${equo}" ] && echo >&2 " - Sabayon based (installer is: equo)" && opts="equo ${opts}"
+ [ -n "${apk}" ] && echo >&2 " - Alpine Linux based (installer is: apk)" && opts="apk ${opts}"
+ [ -n "${swupd}" ] && echo >&2 " - Clear Linux based (installer is: swupd)" && opts="swupd ${opts}"
+ [ -n "${brew}" ] && echo >&2 " - macOS based (installer is: brew)" && opts="brew ${opts}"
+ # XXX: This is being removed in another PR.
+ echo >&2
+
+ REPLY=
+ while [ -z "${REPLY}" ]; do
+ echo "To proceed please write one of these:"
+ echo "${opts// /, }"
+ if ! read -r -p ">" REPLY; then
+ continue
+ fi
+
+ if [ "${REPLY}" = "yum" ] && [ -z "${distribution}" ]; then
+ REPLY=
+ while [ -z "${REPLY}" ]; do
+ if ! read -r -p "yum in centos, rhel or fedora? > "; then
+ continue
+ fi
+
+ case "${REPLY,,}" in
+ fedora | rhel)
+ distribution="rhel"
+ ;;
+ centos)
+ distribution="centos"
+ ;;
+ *)
+ echo >&2 "Please enter 'centos', 'fedora' or 'rhel'."
+ REPLY=
+ ;;
+ esac
+ done
+ REPLY="yum"
+ fi
+ check_package_manager "${REPLY}" || REPLY=
+ done
+}
+
+detect_package_manager_from_distribution() {
+ case "${1,,}" in
+ arch* | manjaro*)
+ package_installer="install_pacman"
+ tree="arch"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pacman}" ]; then
+ echo >&2 "command 'pacman' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ sabayon*)
+ package_installer="install_equo"
+ tree="sabayon"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${equo}" ]; then
+ echo >&2 "command 'equo' is required to install packages on a '${distribution} ${version}' system."
+ # Maybe offer to fall back on emerge? Both installers exist in Sabayon...
+ exit 1
+ fi
+ ;;
+
+ alpine*)
+ package_installer="install_apk"
+ tree="alpine"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apk}" ]; then
+ echo >&2 "command 'apk' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ gentoo*)
+ package_installer="install_emerge"
+ tree="gentoo"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${emerge}" ]; then
+ echo >&2 "command 'emerge' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ debian* | ubuntu*)
+ package_installer="install_apt_get"
+ tree="debian"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apt_get}" ]; then
+ echo >&2 "command 'apt-get' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ centos* | clearos*)
+ echo >&2 "You should have EPEL enabled to install all the prerequisites."
+ echo >&2 "Check: http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/"
+ package_installer="install_yum"
+ tree="centos"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${yum}" ]; then
+ echo >&2 "command 'yum' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ fedora* | redhat* | red\ hat* | rhel*)
+ package_installer=
+ tree="rhel"
+ [ -n "${yum}" ] && package_installer="install_yum"
+ [ -n "${dnf}" ] && package_installer="install_dnf"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${package_installer}" ]; then
+ echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ suse* | opensuse* | sles*)
+ package_installer="install_zypper"
+ tree="suse"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${zypper}" ]; then
+ echo >&2 "command 'zypper' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ clear-linux* | clearlinux*)
+ package_installer="install_swupd"
+ tree="clearlinux"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${swupd}" ]; then
+ echo >&2 "command 'swupd' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ freebsd)
+ package_installer="install_pkg"
+ tree="freebsd"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pkg}" ]; then
+ echo >&2 "command 'pkg' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+ macos)
+ package_installer="install_brew"
+ tree="macos"
+ if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${brew}" ]; then
+ echo >&2 "command 'brew' is required to install packages on a '${distribution} ${version}' system."
+ exit 1
+ fi
+ ;;
+
+ *)
+ # oops! unknown system
+ user_picks_distribution
+ ;;
+ esac
+}
+
+# XXX: This is being removed in another PR.
+check_package_manager() {
+ # This is called only when the user is selecting a package manager
+ # It is used to verify the user selection is right
+
+ echo >&2 "Checking package manager: ${1}"
+
+ case "${1}" in
+ apt-get)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apt_get}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_apt_get"
+ tree="debian"
+ detection="user-input"
+ return 0
+ ;;
+
+ dnf)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${dnf}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_dnf"
+ tree="rhel"
+ detection="user-input"
+ return 0
+ ;;
+
+ apk)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${apk}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_apk"
+ tree="alpine"
+ detection="user-input"
+ return 0
+ ;;
+
+ equo)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${equo}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_equo"
+ tree="sabayon"
+ detection="user-input"
+ return 0
+ ;;
+
+ emerge)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${emerge}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_emerge"
+ tree="gentoo"
+ detection="user-input"
+ return 0
+ ;;
+
+ pacman)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${pacman}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_pacman"
+ tree="arch"
+ detection="user-input"
+
+ return 0
+ ;;
+
+ zypper)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${zypper}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_zypper"
+ tree="suse"
+ detection="user-input"
+ return 0
+ ;;
+
+ yum)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${yum}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_yum"
+ if [ "${distribution}" = "centos" ]; then
+ tree="centos"
+ else
+ tree="rhel"
+ fi
+ detection="user-input"
+ return 0
+ ;;
+
+ swupd)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${swupd}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_swupd"
+ tree="clear-linux"
+ detection="user-input"
+ return 0
+ ;;
+
+ brew)
+ [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${brew}" ] && echo >&2 "${1} is not available." && return 1
+ package_installer="install_brew"
+ tree="macos"
+ detection="user-input"
+
+ return 0
+ ;;
+
+ *)
+ echo >&2 "Invalid package manager: '${1}'."
+ return 1
+ ;;
+ esac
+}
+
+require_cmd() {
+ # check if any of the commands given as argument
+ # are present on this system
+ # If any of them is available, it returns 0
+ # otherwise 1
+
+ [ "${IGNORE_INSTALLED}" -eq 1 ] && return 1
+
+ local wanted found
+ for wanted in "${@}"; do
+ if command -v "${wanted}" > /dev/null 2>&1; then
+ found="$(command -v "$wanted" 2> /dev/null)"
+ fi
+ [ -n "${found}" ] && [ -x "${found}" ] && return 0
+ done
+ return 1
+}
+
+declare -A pkg_find=(
+ ['gentoo']="sys-apps/findutils"
+ ['fedora']="findutils"
+ ['clearlinux']="findutils"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="WARNING|"
+)
+
+declare -A pkg_distro_sdk=(
+ ['alpine']="alpine-sdk"
+ ['default']="NOTREQUIRED"
+)
+
+declare -A pkg_autoconf=(
+ ['gentoo']="sys-devel/autoconf"
+ ['clearlinux']="c-basic"
+ ['default']="autoconf"
+)
+
+# required to compile netdata with --enable-sse
+# https://github.com/firehol/netdata/pull/450
+declare -A pkg_autoconf_archive=(
+ ['gentoo']="sys-devel/autoconf-archive"
+ ['clearlinux']="c-basic"
+ ['alpine']="WARNING|"
+ ['default']="autoconf-archive"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+ ['rhel-6']="WARNING|"
+ ['rhel-7']="WARNING|"
+)
+
+declare -A pkg_autogen=(
+ ['gentoo']="sys-devel/autogen"
+ ['clearlinux']="c-basic"
+ ['alpine']="WARNING|"
+ ['default']="autogen"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+ ['rhel-6']="WARNING|"
+)
+
+declare -A pkg_automake=(
+ ['gentoo']="sys-devel/automake"
+ ['clearlinux']="c-basic"
+ ['default']="automake"
+)
+
+# required to bundle libJudy
+declare -A pkg_libtool=(
+ ['gentoo']="sys-devel/libtool"
+ ['clearlinux']="c-basic"
+ ['default']="libtool"
+)
+
+# Required to build libwebsockets and libmosquitto on some systems.
+declare -A pkg_cmake=(
+ ['gentoo']="dev-util/cmake"
+ ['clearlinux']="c-basic"
+ ['default']="cmake"
+)
+
+declare -A pkg_json_c_dev=(
+ ['alpine']="json-c-dev"
+ ['arch']="json-c"
+ ['clearlinux']="devpkg-json-c"
+ ['debian']="libjson-c-dev"
+ ['gentoo']="dev-libs/json-c"
+ ['sabayon']="dev-libs/json-c"
+ ['suse']="libjson-c-devel"
+ ['freebsd']="json-c"
+ ['default']="json-c-devel"
+)
+
+declare -A pkg_bridge_utils=(
+ ['gentoo']="net-misc/bridge-utils"
+ ['clearlinux']="network-basic"
+ ['macos']="WARNING|"
+ ['default']="bridge-utils"
+)
+
+declare -A pkg_chrony=(
+ ['gentoo']="net-misc/chrony"
+ ['clearlinux']="time-server-basic"
+ ['macos']="WARNING|"
+ ['default']="chrony"
+)
+
+declare -A pkg_curl=(
+ ['gentoo']="net-misc/curl"
+ ['sabayon']="net-misc/curl"
+ ['default']="curl"
+)
+
+declare -A pkg_gzip=(
+ ['gentoo']="app-arch/gzip"
+ ['macos']="NOTREQUIRED"
+ ['default']="gzip"
+)
+
+declare -A pkg_tar=(
+ ['gentoo']="app-arch/tar"
+ ['clearlinux']="os-core-update"
+ ['macos']="NOTREQUIRED"
+ ['default']="tar"
+)
+
+declare -A pkg_git=(
+ ['gentoo']="dev-vcs/git"
+ ['default']="git"
+)
+
+declare -A pkg_gcc=(
+ ['gentoo']="sys-devel/gcc"
+ ['clearlinux']="c-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="gcc"
+)
+
+declare -A pkg_gdb=(
+ ['gentoo']="sys-devel/gdb"
+ ['macos']="NOTREQUIRED"
+ ['default']="gdb"
+)
+
+declare -A pkg_iotop=(
+ ['gentoo']="sys-process/iotop"
+ ['macos']="WARNING|"
+ ['default']="iotop"
+)
+
+declare -A pkg_iproute2=(
+ ['alpine']="iproute2"
+ ['debian']="iproute2"
+ ['gentoo']="sys-apps/iproute2"
+ ['sabayon']="sys-apps/iproute2"
+ ['clearlinux']="iproute2"
+ ['macos']="WARNING|"
+ ['default']="iproute"
+
+ # exceptions
+ ['ubuntu-12.04']="iproute"
+)
+
+declare -A pkg_ipset=(
+ ['gentoo']="net-firewall/ipset"
+ ['clearlinux']="network-basic"
+ ['macos']="WARNING|"
+ ['default']="ipset"
+)
+
+declare -A pkg_jq=(
+ ['gentoo']="app-misc/jq"
+ ['default']="jq"
+)
+
+declare -A pkg_iptables=(
+ ['gentoo']="net-firewall/iptables"
+ ['macos']="WARNING|"
+ ['default']="iptables"
+)
+
+declare -A pkg_libz_dev=(
+ ['alpine']="zlib-dev"
+ ['arch']="zlib"
+ ['centos']="zlib-devel"
+ ['debian']="zlib1g-dev"
+ ['gentoo']="sys-libs/zlib"
+ ['sabayon']="sys-libs/zlib"
+ ['rhel']="zlib-devel"
+ ['suse']="zlib-devel"
+ ['clearlinux']="devpkg-zlib"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="lzlib"
+ ['default']=""
+)
+
+declare -A pkg_libuuid_dev=(
+ ['alpine']="util-linux-dev"
+ ['arch']="util-linux"
+ ['centos']="libuuid-devel"
+ ['clearlinux']="devpkg-util-linux"
+ ['debian']="uuid-dev"
+ ['gentoo']="sys-apps/util-linux"
+ ['sabayon']="sys-apps/util-linux"
+ ['rhel']="libuuid-devel"
+ ['suse']="libuuid-devel"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="e2fsprogs-libuuid"
+ ['default']=""
+)
+
+declare -A pkg_libmnl_dev=(
+ ['alpine']="libmnl-dev"
+ ['arch']="libmnl"
+ ['centos']="libmnl-devel"
+ ['debian']="libmnl-dev"
+ ['gentoo']="net-libs/libmnl"
+ ['sabayon']="net-libs/libmnl"
+ ['rhel']="libmnl-devel"
+ ['suse']="libmnl-devel"
+ ['clearlinux']="devpkg-libmnl"
+ ['macos']="NOTREQUIRED"
+ ['default']=""
+)
+
+declare -A pkg_lm_sensors=(
+ ['alpine']="lm_sensors"
+ ['arch']="lm_sensors"
+ ['centos']="lm_sensors"
+ ['debian']="lm-sensors"
+ ['gentoo']="sys-apps/lm-sensors"
+ ['sabayon']="sys-apps/lm_sensors"
+ ['rhel']="lm_sensors"
+ ['suse']="sensors"
+ ['clearlinux']="lm-sensors"
+ ['macos']="WARNING|"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="lm_sensors"
+)
+
+declare -A pkg_logwatch=(
+ ['gentoo']="sys-apps/logwatch"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="logwatch"
+)
+
+declare -A pkg_lxc=(
+ ['gentoo']="app-emulation/lxc"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="lxc"
+)
+
+declare -A pkg_mailutils=(
+ ['gentoo']="net-mail/mailutils"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="mailutils"
+)
+
+declare -A pkg_make=(
+ ['gentoo']="sys-devel/make"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="gmake"
+ ['default']="make"
+)
+
+declare -A pkg_netcat=(
+ ['alpine']="netcat-openbsd"
+ ['arch']="netcat"
+ ['centos']="nmap-ncat"
+ ['debian']="netcat"
+ ['gentoo']="net-analyzer/netcat"
+ ['sabayon']="net-analyzer/gnu-netcat"
+ ['rhel']="nmap-ncat"
+ ['suse']="netcat-openbsd"
+ ['clearlinux']="sysadmin-basic"
+ ['arch']="gnu-netcat"
+ ['macos']="NOTREQUIRED"
+ ['default']="netcat"
+
+ # exceptions
+ ['centos-6']="nc"
+ ['rhel-6']="nc"
+)
+
+declare -A pkg_nginx=(
+ ['gentoo']="www-servers/nginx"
+ ['default']="nginx"
+)
+
+declare -A pkg_nodejs=(
+ ['gentoo']="net-libs/nodejs"
+ ['clearlinux']="nodejs-basic"
+ ['freebsd']="node"
+ ['default']="nodejs"
+
+ # exceptions
+ ['rhel-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
+ ['rhel-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
+ ['centos-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
+ ['debian-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
+ ['debian-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
+)
+
+declare -A pkg_postfix=(
+ ['gentoo']="mail-mta/postfix"
+ ['macos']="WARNING|"
+ ['default']="postfix"
+)
+
+declare -A pkg_pkg_config=(
+ ['alpine']="pkgconfig"
+ ['arch']="pkgconfig"
+ ['centos']="pkgconfig"
+ ['debian']="pkg-config"
+ ['gentoo']="virtual/pkgconfig"
+ ['sabayon']="virtual/pkgconfig"
+ ['rhel']="pkgconfig"
+ ['suse']="pkg-config"
+ ['freebsd']="pkgconf"
+ ['clearlinux']="c-basic"
+ ['default']="pkg-config"
+)
+
+declare -A pkg_python=(
+ ['gentoo']="dev-lang/python"
+ ['sabayon']="dev-lang/python:2.7"
+ ['clearlinux']="python-basic"
+ ['default']="python"
+
+ # Exceptions
+ ['macos']="WARNING|"
+ ['centos-8']="python2"
+)
+
+declare -A pkg_python_mysqldb=(
+ ['alpine']="py-mysqldb"
+ ['arch']="mysql-python"
+ ['centos']="MySQL-python"
+ ['debian']="python-mysqldb"
+ ['gentoo']="dev-python/mysqlclient"
+ ['sabayon']="dev-python/mysqlclient"
+ ['rhel']="MySQL-python"
+ ['suse']="python-PyMySQL"
+ ['clearlinux']="WARNING|"
+ ['default']="python-mysql"
+
+ # exceptions
+ ['fedora-24']="python2-mysql"
+)
+
+declare -A pkg_python3_mysqldb=(
+ ['alpine']="WARNING|"
+ ['arch']="WARNING|"
+ ['centos']="WARNING|"
+ ['debian']="python3-mysqldb"
+ ['gentoo']="dev-python/mysqlclient"
+ ['sabayon']="dev-python/mysqlclient"
+ ['rhel']="WARNING|"
+ ['suse']="WARNING|"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="WARNING|"
+
+ # exceptions
+ ['debian-6']="WARNING|"
+ ['debian-7']="WARNING|"
+ ['debian-8']="WARNING|"
+ ['ubuntu-12.04']="WARNING|"
+ ['ubuntu-12.10']="WARNING|"
+ ['ubuntu-13.04']="WARNING|"
+ ['ubuntu-13.10']="WARNING|"
+ ['ubuntu-14.04']="WARNING|"
+ ['ubuntu-14.10']="WARNING|"
+ ['ubuntu-15.04']="WARNING|"
+ ['ubuntu-15.10']="WARNING|"
+ ['centos-7']="python36-mysql"
+ ['centos-8']="python38-mysql"
+ ['rhel-7']="python36-mysql"
+ ['rhel-8']="python38-mysql"
+)
+
+declare -A pkg_python_psycopg2=(
+ ['alpine']="py-psycopg2"
+ ['arch']="python2-psycopg2"
+ ['centos']="python-psycopg2"
+ ['debian']="python-psycopg2"
+ ['gentoo']="dev-python/psycopg"
+ ['sabayon']="dev-python/psycopg:2"
+ ['rhel']="python-psycopg2"
+ ['suse']="python-psycopg2"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="python-psycopg2"
+)
+
+declare -A pkg_python3_psycopg2=(
+ ['alpine']="py3-psycopg2"
+ ['arch']="python-psycopg2"
+ ['centos']="WARNING|"
+ ['debian']="WARNING|"
+ ['gentoo']="dev-python/psycopg"
+ ['sabayon']="dev-python/psycopg:2"
+ ['rhel']="WARNING|"
+ ['suse']="WARNING|"
+ ['clearlinux']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="WARNING|"
+
+ ['centos-7']="python3-psycopg2"
+ ['centos-8']="python38-psycopg2"
+ ['rhel-7']="python3-psycopg2"
+ ['rhel-8']="python38-psycopg2"
+)
+
+declare -A pkg_python_pip=(
+ ['alpine']="py-pip"
+ ['gentoo']="dev-python/pip"
+ ['sabayon']="dev-python/pip"
+ ['clearlinux']="python-basic"
+ ['macos']="WARNING|"
+ ['default']="python-pip"
+)
+
+declare -A pkg_python3_pip=(
+ ['alpine']="py3-pip"
+ ['arch']="python-pip"
+ ['gentoo']="dev-python/pip"
+ ['sabayon']="dev-python/pip"
+ ['clearlinux']="python3-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="python3-pip"
+)
+
+declare -A pkg_python_pymongo=(
+ ['alpine']="WARNING|"
+ ['arch']="python2-pymongo"
+ ['centos']="WARNING|"
+ ['debian']="python-pymongo"
+ ['gentoo']="dev-python/pymongo"
+ ['suse']="python-pymongo"
+ ['clearlinux']="WARNING|"
+ ['rhel']="WARNING|"
+ ['macos']="WARNING|"
+ ['default']="python-pymongo"
+)
+
+declare -A pkg_python3_pymongo=(
+ ['alpine']="WARNING|"
+ ['arch']="python-pymongo"
+ ['centos']="WARNING|"
+ ['debian']="python3-pymongo"
+ ['gentoo']="dev-python/pymongo"
+ ['suse']="python3-pymongo"
+ ['clearlinux']="WARNING|"
+ ['rhel']="WARNING|"
+ ['freebsd']="py37-pymongo"
+ ['macos']="WARNING|"
+ ['default']="python3-pymongo"
+
+ ['centos-7']="python36-pymongo"
+ ['centos-8']="python3-pymongo"
+ ['rhel-7']="python36-pymongo"
+ ['rhel-8']="python3-pymongo"
+)
+
+declare -A pkg_python_requests=(
+ ['alpine']="py-requests"
+ ['arch']="python2-requests"
+ ['centos']="python-requests"
+ ['debian']="python-requests"
+ ['gentoo']="dev-python/requests"
+ ['sabayon']="dev-python/requests"
+ ['rhel']="python-requests"
+ ['suse']="python-requests"
+ ['clearlinux']="python-extras"
+ ['macos']="WARNING|"
+ ['default']="python-requests"
+ ['alpine-3.1.4']="WARNING|"
+ ['alpine-3.2.3']="WARNING|"
+)
+
+declare -A pkg_python3_requests=(
+ ['alpine']="py3-requests"
+ ['arch']="python-requests"
+ ['centos']="WARNING|"
+ ['debian']="WARNING|"
+ ['gentoo']="dev-python/requests"
+ ['sabayon']="dev-python/requests"
+ ['rhel']="WARNING|"
+ ['suse']="WARNING|"
+ ['clearlinux']="python-extras"
+ ['macos']="WARNING|"
+ ['default']="WARNING|"
+
+ ['centos-7']="python36-requests"
+ ['centos-8']="python3-requests"
+ ['rhel-7']="python36-requests"
+ ['rhel-8']="python3-requests"
+)
+
+declare -A pkg_lz4=(
+ ['alpine']="lz4-dev"
+ ['debian']="liblz4-dev"
+ ['ubuntu']="liblz4-dev"
+ ['suse']="liblz4-devel"
+ ['gentoo']="app-arch/lz4"
+ ['clearlinux']="devpkg-lz4"
+ ['arch']="lz4"
+ ['macos']="lz4"
+ ['freebsd']="liblz4"
+ ['default']="lz4-devel"
+)
+
+declare -A pkg_libuv=(
+ ['alpine']="libuv-dev"
+ ['debian']="libuv1-dev"
+ ['ubuntu']="libuv1-dev"
+ ['gentoo']="dev-libs/libuv"
+ ['arch']="libuv"
+ ['clearlinux']="devpkg-libuv"
+ ['macos']="libuv"
+ ['freebsd']="libuv"
+ ['default']="libuv-devel"
+)
+
+declare -A pkg_openssl=(
+ ['alpine']="openssl-dev"
+ ['debian']="libssl-dev"
+ ['ubuntu']="libssl-dev"
+ ['suse']="libopenssl-devel"
+ ['clearlinux']="devpkg-openssl"
+ ['gentoo']="dev-libs/openssl"
+ ['arch']="openssl"
+ ['freebsd']="openssl"
+ ['macos']="openssl@1.1"
+ ['default']="openssl-devel"
+)
+
+declare -A pkg_judy=(
+ ['debian']="libjudy-dev"
+ ['ubuntu']="libjudy-dev"
+ ['suse']="judy-devel"
+ ['gentoo']="dev-libs/judy"
+ ['arch']="judy"
+ ['freebsd']="Judy"
+ ['fedora']="Judy-devel"
+ ['default']="NOTREQUIRED"
+)
+
+declare -A pkg_python3=(
+ ['gentoo']="dev-lang/python"
+ ['sabayon']="dev-lang/python:3.4"
+ ['clearlinux']="python3-basic"
+ ['macos']="python"
+ ['default']="python3"
+
+ # exceptions
+ ['centos-6']="WARNING|"
+)
+
+declare -A pkg_screen=(
+ ['gentoo']="app-misc/screen"
+ ['sabayon']="app-misc/screen"
+ ['clearlinux']="sysadmin-basic"
+ ['default']="screen"
+)
+
+declare -A pkg_sudo=(
+ ['gentoo']="app-admin/sudo"
+ ['macos']="NOTREQUIRED"
+ ['default']="sudo"
+)
+
+declare -A pkg_sysstat=(
+ ['gentoo']="app-admin/sysstat"
+ ['macos']="WARNING|"
+ ['default']="sysstat"
+)
+
+declare -A pkg_tcpdump=(
+ ['gentoo']="net-analyzer/tcpdump"
+ ['clearlinux']="network-basic"
+ ['default']="tcpdump"
+)
+
+declare -A pkg_traceroute=(
+ ['alpine']=" "
+ ['gentoo']="net-analyzer/traceroute"
+ ['clearlinux']="network-basic"
+ ['macos']="NOTREQUIRED"
+ ['default']="traceroute"
+)
+
+declare -A pkg_valgrind=(
+ ['gentoo']="dev-util/valgrind"
+ ['default']="valgrind"
+)
+
+declare -A pkg_ulogd=(
+ ['centos']="WARNING|"
+ ['rhel']="WARNING|"
+ ['clearlinux']="WARNING|"
+ ['gentoo']="app-admin/ulogd"
+ ['arch']="ulogd"
+ ['macos']="WARNING|"
+ ['default']="ulogd2"
+)
+
+declare -A pkg_unzip=(
+ ['gentoo']="app-arch/unzip"
+ ['macos']="NOTREQUIRED"
+ ['default']="unzip"
+)
+
+declare -A pkg_zip=(
+ ['gentoo']="app-arch/zip"
+ ['macos']="NOTREQUIRED"
+ ['default']="zip"
+)
+
+declare -A pkg_libelf=(
+ ['alpine']="elfutils-dev"
+ ['arch']="libelf"
+ ['gentoo']="virtual/libelf"
+ ['sabayon']="virtual/libelf"
+ ['debian']="libelf-dev"
+ ['ubuntu']="libelf-dev"
+ ['fedora']="elfutils-libelf-devel"
+ ['centos']="elfutils-libelf-devel"
+ ['rhel']="elfutils-libelf-devel"
+ ['clearlinux']="devpkg-elfutils"
+ ['suse']="libelf-devel"
+ ['macos']="NOTREQUIRED"
+ ['freebsd']="NOTREQUIRED"
+ ['default']="libelf-devel"
+
+ # exceptions
+ ['alpine-3.5']="libelf-dev"
+ ['alpine-3.4']="libelf-dev"
+ ['alpine-3.3']="libelf-dev"
+)
+
+validate_package_trees() {
+ if type -t validate_tree_${tree} > /dev/null; then
+ validate_tree_${tree}
+ fi
+}
+
+validate_installed_package() {
+ validate_${package_installer} "${p}"
+}
+
+suitable_package() {
+ local package="${1//-/_}" p="" v="${version//.*/}"
+
+ echo >&2 "Searching for ${package} ..."
+
+ eval "p=\${pkg_${package}['${distribution,,}-${version,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}-${v,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${version}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${v}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}']}"
+ [ -z "${p}" ] && eval "p=\${pkg_${package}['default']}"
+
+ if [[ "${p/|*/}" =~ ^(ERROR|WARNING|INFO)$ ]]; then
+ echo >&2 "${p/|*/}"
+ echo >&2 "package ${1} is not available in this system."
+ if [ -z "${p/*|/}" ]; then
+ echo >&2 "You may try to install without it."
+ else
+ echo >&2 "${p/*|/}"
+ fi
+ echo >&2
+ return 1
+ elif [ "${p}" = "NOTREQUIRED" ]; then
+ return 0
+ elif [ -z "${p}" ]; then
+ echo >&2 "WARNING"
+ echo >&2 "package ${1} is not availabe in this system."
+ echo >&2
+ return 1
+ else
+ if [ "${IGNORE_INSTALLED}" -eq 0 ]; then
+ validate_installed_package "${p}"
+ else
+ echo "${p}"
+ fi
+ return 0
+ fi
+}
+
+packages() {
+ # detect the packages we need to install on this system
+
+ # -------------------------------------------------------------------------
+ # basic build environment
+
+ suitable_package distro-sdk
+
+ require_cmd git || suitable_package git
+ require_cmd find || suitable_package find
+
+ require_cmd gcc ||
+ require_cmd gcc-multilib || suitable_package gcc
+
+ require_cmd make || suitable_package make
+ require_cmd autoconf || suitable_package autoconf
+ suitable_package autoconf-archive
+ require_cmd autogen || suitable_package autogen
+ require_cmd automake || suitable_package automake
+ require_cmd libtoolize || suitable_package libtool
+ require_cmd pkg-config || suitable_package pkg-config
+ require_cmd cmake || suitable_package cmake
+
+ # -------------------------------------------------------------------------
+ # debugging tools for development
+
+ if [ "${PACKAGES_DEBUG}" -ne 0 ]; then
+ require_cmd traceroute || suitable_package traceroute
+ require_cmd tcpdump || suitable_package tcpdump
+ require_cmd screen || suitable_package screen
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ require_cmd gdb || suitable_package gdb
+ require_cmd valgrind || suitable_package valgrind
+ fi
+ fi
+
+ # -------------------------------------------------------------------------
+ # common command line tools
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ require_cmd tar || suitable_package tar
+ require_cmd curl || suitable_package curl
+ require_cmd gzip || suitable_package gzip
+ require_cmd nc || suitable_package netcat
+ fi
+
+ # -------------------------------------------------------------------------
+ # firehol/fireqos/update-ipsets command line tools
+
+ if [ "${PACKAGES_FIREQOS}" -ne 0 ]; then
+ require_cmd ip || suitable_package iproute2
+ fi
+
+ if [ "${PACKAGES_FIREHOL}" -ne 0 ]; then
+ require_cmd iptables || suitable_package iptables
+ require_cmd ipset || suitable_package ipset
+ require_cmd ulogd ulogd2 || suitable_package ulogd
+ require_cmd traceroute || suitable_package traceroute
+ require_cmd bridge || suitable_package bridge-utils
+ fi
+
+ if [ "${PACKAGES_UPDATE_IPSETS}" -ne 0 ]; then
+ require_cmd ipset || suitable_package ipset
+ require_cmd zip || suitable_package zip
+ require_cmd funzip || suitable_package unzip
+ fi
+
+ # -------------------------------------------------------------------------
+ # netdata libraries
+
+ if [ "${PACKAGES_NETDATA}" -ne 0 ]; then
+ suitable_package libz-dev
+ suitable_package libuuid-dev
+ suitable_package libmnl-dev
+ suitable_package json-c-dev
+ fi
+
+ # -------------------------------------------------------------------------
+ # sensors
+
+ if [ "${PACKAGES_NETDATA_SENSORS}" -ne 0 ]; then
+ require_cmd sensors || suitable_package lm_sensors
+ fi
+
+ # -------------------------------------------------------------------------
+ # netdata database
+ if [ "${PACKAGES_NETDATA_DATABASE}" -ne 0 ]; then
+ suitable_package libuv
+ suitable_package lz4
+ suitable_package openssl
+ suitable_package judy
+ fi
+
+ # -------------------------------------------------------------------------
+ # ebpf plugin
+ if [ "${PACKAGES_NETDATA_EBPF}" -ne 0 ]; then
+ suitable_package libelf
+ fi
+
+ # -------------------------------------------------------------------------
+ # scripting interpreters for netdata plugins
+
+ if [ "${PACKAGES_NETDATA_NODEJS}" -ne 0 ]; then
+ require_cmd nodejs node js || suitable_package nodejs
+ fi
+
+ # -------------------------------------------------------------------------
+ # python2
+
+ if [ "${PACKAGES_NETDATA_PYTHON}" -ne 0 ]; then
+ require_cmd python || suitable_package python
+
+ [ "${PACKAGES_NETDATA_PYTHON_MONGO}" -ne 0 ] && suitable_package python-pymongo
+ # suitable_package python-requests
+ # suitable_package python-pip
+
+ [ "${PACKAGES_NETDATA_PYTHON_MYSQL}" -ne 0 ] && suitable_package python-mysqldb
+ [ "${PACKAGES_NETDATA_PYTHON_POSTGRES}" -ne 0 ] && suitable_package python-psycopg2
+ fi
+
+ # -------------------------------------------------------------------------
+ # python3
+
+ if [ "${PACKAGES_NETDATA_PYTHON3}" -ne 0 ]; then
+ require_cmd python3 || suitable_package python3
+
+ [ "${PACKAGES_NETDATA_PYTHON_MONGO}" -ne 0 ] && suitable_package python3-pymongo
+ # suitable_package python3-requests
+ # suitable_package python3-pip
+
+ [ "${PACKAGES_NETDATA_PYTHON_MYSQL}" -ne 0 ] && suitable_package python3-mysqldb
+ [ "${PACKAGES_NETDATA_PYTHON_POSTGRES}" -ne 0 ] && suitable_package python3-psycopg2
+ fi
+
+ # -------------------------------------------------------------------------
+ # applications needed for the netdata demo sites
+
+ if [ "${PACKAGES_NETDATA_DEMO_SITE}" -ne 0 ]; then
+ require_cmd sudo || suitable_package sudo
+ require_cmd jq || suitable_package jq
+ require_cmd nginx || suitable_package nginx
+ require_cmd postconf || suitable_package postfix
+ require_cmd lxc-create || suitable_package lxc
+ require_cmd logwatch || suitable_package logwatch
+ require_cmd mail || suitable_package mailutils
+ require_cmd iostat || suitable_package sysstat
+ require_cmd iotop || suitable_package iotop
+ fi
+}
+
+DRYRUN=0
+run() {
+
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+
+ if [ ! "${DRYRUN}" -eq 1 ]; then
+ "${@}"
+ return $?
+ fi
+ return 0
+}
+
+sudo=
+if [ ${UID} -ne 0 ]; then
+ sudo="sudo"
+fi
+
+# -----------------------------------------------------------------------------
+# debian / ubuntu
+
+validate_install_apt_get() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ [ "$(dpkg-query -W --showformat='${Status}\n' "${*}")" = "install ok installed" ] || echo "${*}"
+}
+
+install_apt_get() {
+ local opts=""
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://serverfault.com/questions/227190/how-do-i-ask-apt-get-to-skip-any-interactive-post-install-configuration-steps
+ export DEBIAN_FRONTEND="noninteractive"
+ opts="${opts} -yq"
+ fi
+
+ read -r -a apt_opts <<< "$opts"
+
+ # update apt repository caches
+
+ echo >&2 "NOTE: Running apt-get update and updating your APT caches ..."
+ if [ "${version}" = 8 ]; then
+ echo >&2 "WARNING: You seem to be on Debian 8 (jessie) which is old enough we have to disable Check-Valid-Until checks"
+ if ! cat /etc/apt/sources.list /etc/apt/sources.list.d/* 2> /dev/null | grep -q jessie-backports; then
+ echo >&2 "We also have to enable the jessie-backports repository"
+ if prompt "Is this okay?"; then
+ ${sudo} /bin/sh -c 'echo "deb http://archive.debian.org/debian/ jessie-backports main contrib non-free" >> /etc/apt/sources.list.d/99-archived.list'
+ fi
+ fi
+ run ${sudo} apt-get "${apt_opts[@]}" -o Acquire::Check-Valid-Until=false update
+ else
+ run ${sudo} apt-get "${apt_opts[@]}" update
+ fi
+
+ # install the required packages
+ run ${sudo} apt-get "${apt_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# centos / rhel
+
+prompt() {
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode, assuming yes (y)"
+ echo >&2 " > Would have promptedfor ${1} ..."
+ return 0
+ fi
+
+ while true; do
+ read -r -p "${1} [y/n] " yn
+ case $yn in
+ [Yy]*) return 0 ;;
+ [Nn]*) return 1 ;;
+ *) echo >&2 "Please answer with yes (y) or no (n)." ;;
+ esac
+ done
+}
+
+validate_tree_freebsd() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > FreeBSD Version: ${version} ..."
+
+ make="make"
+ echo >&2 " > Checking for gmake ..."
+ if ! pkg query %n-%v | grep -q gmake; then
+ if prompt "gmake is required to build on FreeBSD and is not installed. Shall I install it?"; then
+ run ${sudo} pkg install ${opts} gmake
+ fi
+ fi
+}
+
+validate_tree_centos() {
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ echo >&2 " > CentOS Version: ${version} ..."
+
+ echo >&2 " > Checking for epel ..."
+ if ! rpm -qa | grep epel > /dev/null; then
+ if prompt "epel not found, shall I install it?"; then
+ run ${sudo} yum ${opts} install epel-release
+ fi
+ fi
+
+ if [[ "${version}" =~ ^8(\..*)?$ ]]; then
+ echo >&2 " > Checking for config-manager ..."
+ if ! run yum ${sudo} config-manager; then
+ if prompt "config-manager not found, shall I install it?"; then
+ run ${sudo} yum ${opts} install 'dnf-command(config-manager)'
+ fi
+ fi
+
+ echo >&2 " > Checking for PowerTools ..."
+ if ! run yum ${sudo} repolist | grep PowerTools; then
+ if prompt "PowerTools not found, shall I install it?"; then
+ run ${sudo} yum ${opts} config-manager --set-enabled powertools
+ fi
+ fi
+
+ echo >&2 " > Checking for Okay ..."
+ if ! rpm -qa | grep okay > /dev/null; then
+ if prompt "okay not found, shall I install it?"; then
+ run ${sudo} yum ${opts} install http://repo.okay.com.mx/centos/8/x86_64/release/okay-release-1-3.el8.noarch.rpm
+ fi
+ fi
+
+ echo >&2 " > Installing Judy-devel directly ..."
+ run ${sudo} yum ${opts} install http://mirror.centos.org/centos/8/PowerTools/x86_64/os/Packages/Judy-devel-1.0.5-18.module_el8.1.0+217+4d875839.x86_64.rpm
+
+ elif [[ "${version}" =~ ^6\..*$ ]]; then
+ echo >&2 " > Detected CentOS 6.x ..."
+ echo >&2 " > Checking for Okay ..."
+ if ! rpm -qa | grep okay > /dev/null; then
+ if prompt "okay not found, shall I install it?"; then
+ run ${sudo} yum ${opts} install http://repo.okay.com.mx/centos/6/x86_64/release/okay-release-1-3.el6.noarch.rpm
+ fi
+ fi
+
+ fi
+}
+
+validate_install_yum() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ yum list installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_yum() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} yum update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/87822/does-yum-have-an-equivalent-to-apt-aptitudes-debian-frontend-noninteractive
+ opts="-y"
+ fi
+
+ read -r -a yum_opts <<< "${opts}"
+
+ # install the required packages
+ run ${sudo} yum "${yum_opts[@]}" install "${@}" # --enablerepo=epel-testing
+}
+
+# -----------------------------------------------------------------------------
+# fedora
+
+validate_install_dnf() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ dnf list installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_dnf() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} dnf update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # man dnf
+ opts="-y"
+ fi
+
+ # install the required packages
+ # --setopt=strict=0 allows dnf to proceed
+ # installing whatever is available
+ # even if a package is not found
+ opts="$opts --setopt=strict=0"
+ read -r -a dnf_opts <<< "$opts"
+ run ${sudo} dnf "${dnf_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# gentoo
+
+validate_install_emerge() {
+ echo "${*}"
+}
+
+install_emerge() {
+ # download the latest package info
+ # we don't do this for emerge - it is very slow
+ # and most users are expected to do this daily
+ # emerge --sync
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} emerge --sync or ${sudo} eix-sync "
+ echo >&2
+ fi
+
+ local opts="--ask"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts=""
+ fi
+
+ read -r -a emerge_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} emerge "${emerge_opts[@]}" -v --noreplace "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# alpine
+
+validate_install_apk() {
+ echo "${*}"
+}
+
+install_apk() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} apk update "
+ echo >&2
+ fi
+
+ local opts="--force-broken-world"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ else
+ opts="${opts} -i"
+ fi
+
+ read -r -a apk_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} apk add "${apk_opts[@]}" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# sabayon
+
+validate_install_equo() {
+ echo >&2 " > Checking if package '${*}' is installed..."
+ equo s --installed "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_equo() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} equo up "
+ echo >&2
+ fi
+
+ local opts="-av"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-v"
+ fi
+
+ read -r -a equo_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} equo i "${equo_opts[@]}" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# arch
+
+PACMAN_DB_SYNCED=0
+validate_install_pacman() {
+
+ if [ ${PACMAN_DB_SYNCED} -eq 0 ]; then
+ echo >&2 " > Running pacman -Sy to sync the database"
+ local x
+ x=$(pacman -Sy)
+ [ -z "${x}" ] && echo "${*}"
+ PACMAN_DB_SYNCED=1
+ fi
+ echo >&2 " > Checking if package '${*}' is installed..."
+
+ # In pacman, you can utilize alternative flags to exactly match package names,
+ # but is highly likely we require pattern matching too in this so we keep -s and match
+ # the exceptional cases like so
+ local x=""
+ case "${package}" in
+ "gcc")
+ # Temporary workaround: In archlinux, default installation includes runtime libs under package "gcc"
+ # These are not sufficient for netdata install, so we need to make sure that the appropriate libraries are there
+ # by ensuring devel libs are available
+ x=$(pacman -Qs "${*}" | grep "base-devel")
+ ;;
+ "tar")
+ x=$(pacman -Qs "${*}" | grep "local/tar")
+ ;;
+ "make")
+ x=$(pacman -Qs "${*}" | grep "local/make ")
+ ;;
+ *)
+ x=$(pacman -Qs "${*}")
+ ;;
+ esac
+
+ [ -z "${x}" ] && echo "${*}"
+}
+
+install_pacman() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} pacman -Syu "
+ echo >&2
+ fi
+
+ # install the required packages
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/52277/pacman-option-to-assume-yes-to-every-question/52278
+ # Try the noconfirm option, if that fails, go with the legacy way for non-interactive
+ run ${sudo} pacman --noconfirm --needed -S "${@}" || yes | run ${sudo} pacman --needed -S "${@}"
+ else
+ run ${sudo} pacman --needed -S "${@}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# suse / opensuse
+
+validate_install_zypper() {
+ rpm -q "${*}" > /dev/null 2>&1 || echo "${*}"
+}
+
+install_zypper() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} zypper update "
+ echo >&2
+ fi
+
+ local opts="--ignore-unknown"
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ # http://unix.stackexchange.com/questions/82016/how-to-use-zypper-in-bash-scripts-for-someone-coming-from-apt-get
+ opts="${opts} --non-interactive"
+ fi
+
+ read -r -a zypper_opts <<< "$opts"
+
+ # install the required packages
+ run ${sudo} zypper "${zypper_opts[@]}" install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# clearlinux
+
+validate_install_swupd() {
+ swupd bundle-list | grep -q "${*}" || echo "${*}"
+}
+
+install_swupd() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: ${sudo} swupd update "
+ echo >&2
+ fi
+
+ run ${sudo} swupd bundle-add "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# macOS
+
+validate_install_pkg() {
+ pkg query %n-%v | grep -q "${*}" || echo "${*}"
+}
+
+validate_install_brew() {
+ brew list | grep -q "${*}" || echo "${*}"
+}
+
+install_pkg() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: pkg update "
+ echo >&2
+ fi
+
+ local opts=
+ if [ "${NON_INTERACTIVE}" -eq 1 ]; then
+ echo >&2 "Running in non-interactive mode"
+ opts="-y"
+ fi
+
+ read -r -a pkg_opts <<< "${opts}"
+
+ run ${sudo} pkg install "${pkg_opts[@]}" "${@}"
+}
+
+install_brew() {
+ # download the latest package info
+ if [ "${DRYRUN}" -eq 1 ]; then
+ echo >&2 " >> IMPORTANT << "
+ echo >&2 " Please make sure your system is up to date"
+ echo >&2 " by running: brew upgrade "
+ echo >&2
+ fi
+
+ run brew install "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+install_failed() {
+ local ret="${1}"
+ cat << EOF
+
+
+
+We are very sorry!
+
+Installation of required packages failed.
+
+What to do now:
+
+ 1. Make sure your system is updated.
+ Most of the times, updating your system will resolve the issue.
+
+ 2. If the error message is about a specific package, try removing
+ that package from the command and run it again.
+ Depending on the broken package, you may be able to continue.
+
+ 3. Let us know. We may be able to help.
+ Open a github issue with the above log, at:
+
+ https://github.com/netdata/netdata/issues
+
+
+EOF
+ remote_log "FAILED" "${ret}"
+ exit 1
+}
+
+remote_log() {
+ # log success or failure on our system
+ # to help us solve installation issues
+ curl > /dev/null 2>&1 -Ss --max-time 3 "https://registry.my-netdata.io/log/installer?status=${1}&error=${2}&distribution=${distribution}&version=${version}&installer=${package_installer}&tree=${tree}&detection=${detection}&netdata=${PACKAGES_NETDATA}&nodejs=${PACKAGES_NETDATA_NODEJS}&python=${PACKAGES_NETDATA_PYTHON}&python3=${PACKAGES_NETDATA_PYTHON3}&mysql=${PACKAGES_NETDATA_PYTHON_MYSQL}&postgres=${PACKAGES_NETDATA_PYTHON_POSTGRES}&pymongo=${PACKAGES_NETDATA_PYTHON_MONGO}&sensors=${PACKAGES_NETDATA_SENSORS}&database=${PACKAGES_NETDATA_DATABASE}&ebpf=${PACKAGES_NETDATA_EBPF}&firehol=${PACKAGES_FIREHOL}&fireqos=${PACKAGES_FIREQOS}&iprange=${PACKAGES_IPRANGE}&update_ipsets=${PACKAGES_UPDATE_IPSETS}&demo=${PACKAGES_NETDATA_DEMO_SITE}"
+}
+
+if [ -z "${1}" ]; then
+ usage
+ exit 1
+fi
+
+pv=$(python --version 2>&1)
+if [ "${tree}" = macos ]; then
+ pv=3
+elif [[ "${pv}" =~ ^Python\ 2.* ]]; then
+ pv=2
+elif [[ "${pv}" =~ ^Python\ 3.* ]]; then
+ pv=3
+elif [[ "${tree}" == "centos" ]] && [ "${version}" -lt 8 ]; then
+ pv=2
+else
+ pv=3
+fi
+
+# parse command line arguments
+DONT_WAIT=0
+NON_INTERACTIVE=0
+IGNORE_INSTALLED=0
+while [ -n "${1}" ]; do
+ case "${1}" in
+ distribution)
+ distribution="${2}"
+ shift
+ ;;
+
+ version)
+ version="${2}"
+ shift
+ ;;
+
+ codename)
+ codename="${2}"
+ shift
+ ;;
+
+ installer)
+ check_package_manager "${2}" || exit 1
+ shift
+ ;;
+
+ dont-wait | --dont-wait | -n)
+ DONT_WAIT=1
+ ;;
+
+ non-interactive | --non-interactive | -y)
+ NON_INTERACTIVE=1
+ ;;
+
+ ignore-installed | --ignore-installed | -i)
+ IGNORE_INSTALLED=1
+ ;;
+
+ netdata-all)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_NODEJS=1
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ PACKAGES_NETDATA_PYTHON_MYSQL=1
+ PACKAGES_NETDATA_PYTHON_POSTGRES=1
+ PACKAGES_NETDATA_PYTHON_MONGO=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_PYTHON3_MYSQL=1
+ PACKAGES_NETDATA_PYTHON3_POSTGRES=1
+ PACKAGES_NETDATA_PYTHON3_MONGO=1
+ fi
+ PACKAGES_NETDATA_SENSORS=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ ;;
+
+ netdata)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ ;;
+
+ python | netdata-python)
+ PACKAGES_NETDATA_PYTHON=1
+ ;;
+
+ python3 | netdata-python3)
+ PACKAGES_NETDATA_PYTHON3=1
+ ;;
+
+ python-mysql | mysql-python | mysqldb | netdata-mysql)
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ PACKAGES_NETDATA_PYTHON_MYSQL=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_PYTHON3_MYSQL=1
+ fi
+ ;;
+
+ python-postgres | postgres-python | psycopg2 | netdata-postgres)
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ PACKAGES_NETDATA_PYTHON_POSTGRES=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_PYTHON3_POSTGRES=1
+ fi
+ ;;
+
+ python-pymongo)
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ PACKAGES_NETDATA_PYTHON_MONGO=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_PYTHON3_MONGO=1
+ fi
+ ;;
+
+ nodejs | netdata-nodejs)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_NODEJS=1
+ PACKAGES_NETDATA_DATABASE=1
+ ;;
+
+ sensors | netdata-sensors)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_SENSORS=1
+ PACKAGES_NETDATA_DATABASE=1
+ ;;
+
+ firehol | update-ipsets | firehol-all | fireqos)
+ PACKAGES_IPRANGE=1
+ PACKAGES_FIREHOL=1
+ PACKAGES_FIREQOS=1
+ PACKAGES_IPRANGE=1
+ PACKAGES_UPDATE_IPSETS=1
+ ;;
+
+ demo | all)
+ PACKAGES_NETDATA=1
+ PACKAGES_NETDATA_NODEJS=1
+ if [ "${pv}" -eq 2 ]; then
+ PACKAGES_NETDATA_PYTHON=1
+ PACKAGES_NETDATA_PYTHON_MYSQL=1
+ PACKAGES_NETDATA_PYTHON_POSTGRES=1
+ PACKAGES_NETDATA_PYTHON_MONGO=1
+ else
+ PACKAGES_NETDATA_PYTHON3=1
+ PACKAGES_NETDATA_PYTHON3_MYSQL=1
+ PACKAGES_NETDATA_PYTHON3_POSTGRES=1
+ PACKAGES_NETDATA_PYTHON3_MONGO=1
+ fi
+ PACKAGES_DEBUG=1
+ PACKAGES_IPRANGE=1
+ PACKAGES_FIREHOL=1
+ PACKAGES_FIREQOS=1
+ PACKAGES_UPDATE_IPSETS=1
+ PACKAGES_NETDATA_DEMO_SITE=1
+ PACKAGES_NETDATA_DATABASE=1
+ PACKAGES_NETDATA_EBPF=1
+ ;;
+
+ help | -h | --help)
+ usage
+ exit 1
+ ;;
+
+ *)
+ echo >&2 "ERROR: Cannot understand option '${1}'"
+ echo >&2
+ usage
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+# Check for missing core commands like grep, warn the user to install it and bail out cleanly
+if ! command -v grep > /dev/null 2>&1; then
+ echo >&2
+ echo >&2 "ERROR: 'grep' is required for the install to run correctly and was not found on the system."
+ echo >&2 "Please install grep and run the installer again."
+ echo >&2
+ exit 1
+fi
+
+if [ -z "${package_installer}" ] || [ -z "${tree}" ]; then
+ if [ -z "${distribution}" ]; then
+ # we dont know the distribution
+ autodetect_distribution || user_picks_distribution
+ fi
+
+ # When no package installer is detected, try again from distro info if any
+ if [ -z "${package_installer}" ]; then
+ detect_package_manager_from_distribution "${distribution}"
+ fi
+
+ # Validate package manager trees
+ validate_package_trees
+fi
+
+[ "${detection}" = "/etc/os-release" ] && cat << EOF
+
+/etc/os-release information:
+NAME : ${NAME}
+VERSION : ${VERSION}
+ID : ${ID}
+ID_LIKE : ${ID_LIKE}
+VERSION_ID : ${VERSION_ID}
+EOF
+
+cat << EOF
+
+We detected these:
+Distribution : ${distribution}
+Version : ${version}
+Codename : ${codename}
+Package Manager : ${package_installer}
+Packages Tree : ${tree}
+Detection Method: ${detection}
+Default Python v: ${pv} $([ ${pv} -eq 2 ] && [ "${PACKAGES_NETDATA_PYTHON3}" -eq 1 ] && echo "(will install python3 too)")
+
+EOF
+
+mapfile -t PACKAGES_TO_INSTALL < <(packages | sort -u)
+
+if [ ${#PACKAGES_TO_INSTALL[@]} -gt 0 ]; then
+ echo >&2
+ echo >&2 "The following command will be run:"
+ echo >&2
+ DRYRUN=1
+ "${package_installer}" "${PACKAGES_TO_INSTALL[@]}"
+ DRYRUN=0
+ echo >&2
+ echo >&2
+
+ if [ "${DONT_WAIT}" -eq 0 ] && [ "${NON_INTERACTIVE}" -eq 0 ]; then
+ read -r -p "Press ENTER to run it > " || exit 1
+ fi
+
+ "${package_installer}" "${PACKAGES_TO_INSTALL[@]}" || install_failed $?
+
+ echo >&2
+ echo >&2 "All Done! - Now proceed to the next step."
+ echo >&2
+
+else
+ echo >&2
+ echo >&2 "All required packages are already installed. Now proceed to the next step."
+ echo >&2
+fi
+
+remote_log "OK"
+
+exit 0
diff --git a/packaging/installer/kickstart-static64.sh b/packaging/installer/kickstart-static64.sh
new file mode 100755
index 0000000..3c45d9e
--- /dev/null
+++ b/packaging/installer/kickstart-static64.sh
@@ -0,0 +1,368 @@
+#!/usr/bin/env sh
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck disable=SC1117,SC2039,SC2059,SC2086
+#
+# Options to run
+# --dont-wait do not wait for input
+# --non-interactive do not wait for input
+# --dont-start-it do not start netdata after install
+# --stable-channel Use the stable release channel, rather than the nightly to fetch sources
+# --disable-telemetry Opt-out of anonymous telemetry program (DO_NOT_TRACK=1)
+# --local-files Use a manually provided tarball for the installation
+# --allow-duplicate-install do not bail if we detect a duplicate install
+# --reinstall if an existing install would be updated, reinstall instead
+#
+# Environment options:
+#
+# NETDATA_TARBALL_BASEURL set the base url for downloading the dist tarball
+#
+# ----------------------------------------------------------------------------
+# library functions copied from packaging/installer/functions.sh
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_YELLOW=""
+ TPUT_WHITE=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput > /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+# ----------------------------------------------------------------------------
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+run_ok() {
+ printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} \n\n"
+}
+
+run_failed() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} \n\n"
+}
+
+ESCAPED_PRINT_METHOD=
+if printf "%q " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "${info}"
+ escaped_print "${@}"
+ printf " ... "
+ } >> "${run_logfile}"
+
+ printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "${TPUT_RESET}\n"
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]; then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code ${ret}\n"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+_cannot_use_tmpdir() {
+ local testfile ret
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ] ; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}" ; then
+ if chmod +x "${testfile}" ; then
+ if [ "$("${testfile}")" = "SUCCESS" ] ; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}" ; then
+ if _cannot_use_tmpdir /tmp ; then
+ if _cannot_use_tmpdir "${PWD}" ; then
+ echo >&2
+ echo >&2 "Unable to find a usable temprorary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again."
+ exit 1
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+
+ mktemp -d -t netdata-kickstart-XXXXXXXXXX
+}
+
+download() {
+ url="${1}"
+ dest="${2}"
+ if command -v curl > /dev/null 2>&1; then
+ run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ run wget -T 15 -O "${dest}" "${url}" || fatal "Cannot download ${url}"
+ else
+ fatal "I need curl or wget to proceed, but neither is available on this system."
+ fi
+}
+
+set_tarball_urls() {
+ if [ -n "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ]; then
+ progress "Not fetching remote tarballs, local override was given"
+ return
+ fi
+
+ if [ "$1" = "stable" ]; then
+ local latest
+ # Simple version
+ latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)"
+ export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.gz.run"
+ export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt"
+ else
+ export NETDATA_TARBALL_URL="$NETDATA_TARBALL_BASEURL/netdata-latest.gz.run"
+ export NETDATA_TARBALL_CHECKSUM_URL="$NETDATA_TARBALL_BASEURL/sha256sums.txt"
+ fi
+}
+
+safe_sha256sum() {
+ # Within the context of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start using non-common options
+ if command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ elif command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+# ----------------------------------------------------------------------------
+umask 022
+
+sudo=""
+[ -z "${UID}" ] && UID="$(id -u)"
+[ "${UID}" -ne "0" ] && sudo="sudo"
+
+# ----------------------------------------------------------------------------
+if [ "$(uname -m)" != "x86_64" ]; then
+ fatal "Static binary versions of netdata are available only for 64bit Intel/AMD CPUs (x86_64), but yours is: $(uname -m)."
+fi
+
+if [ "$(uname -s)" != "Linux" ]; then
+ fatal "Static binary versions of netdata are available only for Linux, but this system is $(uname -s)"
+fi
+
+# ----------------------------------------------------------------------------
+opts=
+NETDATA_INSTALLER_OPTIONS=""
+NETDATA_UPDATES="--auto-update"
+RELEASE_CHANNEL="nightly"
+while [ -n "${1}" ]; do
+ if [ "${1}" = "--dont-wait" ] || [ "${1}" = "--non-interactive" ] || [ "${1}" = "--accept" ]; then
+ opts="${opts} --accept"
+ shift 1
+ elif [ "${1}" = "--dont-start-it" ]; then
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS:+${NETDATA_INSTALLER_OPTIONS} }${1}"
+ shift 1
+ elif [ "${1}" = "--no-updates" ]; then
+ NETDATA_UPDATES=""
+ shift 1
+ elif [ "${1}" = "--auto-update" ]; then
+ true # This is the default behaviour, so ignore it.
+ shift 1
+ elif [ "${1}" = "--stable-channel" ]; then
+ RELEASE_CHANNEL="stable"
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS:+${NETDATA_INSTALLER_OPTIONS} }${1}"
+ shift 1
+ elif [ "${1}" = "--disable-telemetry" ]; then
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS:+${NETDATA_INSTALLER_OPTIONS} }${1}"
+ shift 1
+ elif [ "${1}" = "--local-files" ]; then
+ NETDATA_UPDATES="" # Disable autoupdates if using pre-downloaded files.
+ shift 1
+ if [ -z "${1}" ]; then
+ fatal "Option --local-files requires extra information. The desired tarball full filename is needed"
+ fi
+
+ NETDATA_LOCAL_TARBALL_OVERRIDE="${1}"
+ shift 1
+ if [ -z "${1}" ]; then
+ fatal "Option --local-files requires a pair of the tarball source and the checksum file"
+ fi
+
+ NETDATA_LOCAL_TARBALL_OVERRIDE_CHECKSUM="${1}"
+ shift 1
+ elif [ "${1}" = "--allow-duplicate-install" ]; then
+ NETDATA_ALLOW_DUPLICATE_INSTALL=1
+ shift 1
+ elif [ "${1}" = "--reinstall" ]; then
+ NETDATA_REINSTALL=1
+ shift 1
+ else
+ echo >&2 "Unknown option '${1}' or invalid number of arguments. Please check the README for the available arguments of ${0} and try again"
+ exit 1
+ fi
+done
+
+if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then
+ NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS:+${NETDATA_INSTALLER_OPTIONS} }--disable-telemtry"
+fi
+
+# Netdata Tarball Base URL (defaults to our Google Storage Bucket)
+[ -z "$NETDATA_TARBALL_BASEURL" ] && NETDATA_TARBALL_BASEURL=https://storage.googleapis.com/netdata-nightlies
+
+# ---------------------------------------------------------------------------------------------------------------------
+# look for an existing install and try to update that instead if it exists
+
+ndpath="$(command -v netdata 2>/dev/null)"
+if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ] ; then
+ ndpath="/opt/netdata/bin/netdata"
+fi
+
+if [ -n "$ndpath" ] ; then
+ ndprefix="$(dirname "$(dirname "${ndpath}")")"
+
+ if [ "${ndprefix}" = /usr ] ; then
+ ndprefix="/"
+ fi
+
+ progress "Found existing install of Netdata under: ${ndprefix}"
+
+ if [ -r "${ndprefix}/etc/netdata/.environment" ] ; then
+ ndstatic="$(grep IS_NETDATA_STATIC_BINARY "${ndprefix}/etc/netdata/.environment" | cut -d "=" -f 2 | tr -d \")"
+ if [ -z "${NETDATA_REINSTALL}" ] && [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ] ; then
+ if [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then
+ progress "Attempting to update existing install instead of creating a new one"
+ if run ${sudo} "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" --not-running-from-cron ; then
+ progress "Updated existing install at ${ndpath}"
+ exit 0
+ else
+ fatal "Failed to update existing Netdata install"
+ exit 1
+ fi
+ else
+ if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] || [ "${ndstatic}" = "no" ] ; then
+ fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue."
+ exit 1
+ else
+ progress "User explicitly requested duplicate install, proceeding."
+ fi
+ fi
+ else
+ if [ "${ndstatic}" = "yes" ] ; then
+ progress "User requested reinstall instead of update, proceeding."
+ else
+ fatal "Existing install is not a static install, please use kickstart.sh instead."
+ exit 1
+ fi
+ fi
+ else
+ progress "Existing install appears to be handled manually or through the system package manager."
+ if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then
+ fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue."
+ exit 1
+ else
+ progress "User explicitly requested duplicate install, proceeding."
+ fi
+ fi
+fi
+
+# ----------------------------------------------------------------------------
+TMPDIR=$(create_tmp_directory)
+cd "${TMPDIR}" || exit 1
+
+if [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ]; then
+ set_tarball_urls "${RELEASE_CHANNEL}"
+ progress "Downloading static netdata binary: ${NETDATA_TARBALL_URL}"
+
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${TMPDIR}/sha256sum.txt"
+ download "${NETDATA_TARBALL_URL}" "${TMPDIR}/netdata-latest.gz.run"
+else
+ progress "Installation sources were given as input, running installation using \"${NETDATA_LOCAL_TARBALL_OVERRIDE}\""
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE}" "${TMPDIR}/netdata-latest.gz.run"
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_CHECKSUM}" "${TMPDIR}/sha256sum.txt"
+fi
+
+if ! grep netdata-latest.gz.run "${TMPDIR}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Static binary checksum validation failed. Stopping Netdata Agent installation and leaving binary in ${TMPDIR}\nUsually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour."
+fi
+
+# ----------------------------------------------------------------------------
+progress "Installing netdata"
+run ${sudo} sh "${TMPDIR}/netdata-latest.gz.run" ${opts} -- ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS}
+
+#shellcheck disable=SC2181
+if [ $? -eq 0 ]; then
+ run ${sudo} rm "${TMPDIR}/netdata-latest.gz.run"
+ if [ ! "${TMPDIR}" = "/" ] && [ -d "${TMPDIR}" ]; then
+ run ${sudo} rm -rf "${TMPDIR}"
+ fi
+else
+ echo >&2 "NOTE: did not remove: ${TMPDIR}/netdata-latest.gz.run"
+fi
diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh
new file mode 100755
index 0000000..c97587c
--- /dev/null
+++ b/packaging/installer/kickstart.sh
@@ -0,0 +1,491 @@
+#!/usr/bin/env sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Run me with:
+#
+# bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+#
+# or (to install all netdata dependencies):
+#
+# bash <(curl -Ss https://my-netdata.io/kickstart.sh) all
+#
+# Other options:
+# --dont-wait do not prompt for user input
+# --non-interactive do not prompt for user input
+# --no-updates do not install script for daily updates
+# --local-files set the full path of the desired tarball to run install with
+# --allow-duplicate-install do not bail if we detect a duplicate install
+# --reinstall if an existing install would be updated, reinstall instead
+#
+# Environment options:
+#
+# TMPDIR specify where to save temporary files
+# NETDATA_TARBALL_BASEURL set the base url for downloading the dist tarball
+#
+# This script will:
+#
+# 1. install all netdata compilation dependencies
+# using the package manager of the system
+#
+# 2. download netdata nightly package to temporary directory
+#
+# 3. install netdata
+
+# shellcheck disable=SC2039,SC2059,SC2086
+
+# External files
+PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"
+
+# Netdata Tarball Base URL (defaults to our Google Storage Bucket)
+[ -z "$NETDATA_TARBALL_BASEURL" ] && NETDATA_TARBALL_BASEURL=https://storage.googleapis.com/netdata-nightlies
+
+# ---------------------------------------------------------------------------------------------------------------------
+# library functions copied from packaging/installer/functions.sh
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_YELLOW=""
+ TPUT_WHITE=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput > /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+# -----------------------------------------------------------------------------
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+run_ok() {
+ printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} \n\n"
+}
+
+run_failed() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} \n\n"
+}
+
+ESCAPED_PRINT_METHOD=
+if printf "%q " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "${info}"
+ escaped_print "${@}"
+ printf " ... "
+ } >> "${run_logfile}"
+
+ printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "${TPUT_RESET}"
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]; then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code ${ret}\n"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+warning() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} WARNING ${TPUT_RESET} ${*} \n\n"
+ if [ "${INTERACTIVE}" = "0" ]; then
+ fatal "Stopping due to non-interactive mode. Fix the issue or retry installation in an interactive mode."
+ else
+ read -r -p "Press ENTER to attempt netdata installation > "
+ progress "OK, let's give it a try..."
+ fi
+}
+
+_cannot_use_tmpdir() {
+ local testfile ret
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ] ; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}" ; then
+ if chmod +x "${testfile}" ; then
+ if [ "$("${testfile}")" = "SUCCESS" ] ; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}" ; then
+ if _cannot_use_tmpdir /tmp ; then
+ if _cannot_use_tmpdir "${PWD}" ; then
+ echo >&2
+ echo >&2 "Unable to find a usable temprorary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again."
+ exit 1
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+
+ mktemp -d -t netdata-kickstart-XXXXXXXXXX
+}
+
+download() {
+ url="${1}"
+ dest="${2}"
+ if command -v curl > /dev/null 2>&1; then
+ run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}"
+ elif command -v wget > /dev/null 2>&1; then
+ run wget -T 15 -O "${dest}" "${url}" || fatal "Cannot download ${url}"
+ else
+ fatal "I need curl or wget to proceed, but neither is available on this system."
+ fi
+}
+
+set_tarball_urls() {
+ if [ -n "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ]; then
+ progress "Not fetching remote tarballs, local override was given"
+ return
+ fi
+
+ if [ "$1" = "stable" ]; then
+ local latest
+ # Simple version
+ latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)"
+ export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.tar.gz"
+ export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt"
+ else
+ export NETDATA_TARBALL_URL="$NETDATA_TARBALL_BASEURL/netdata-latest.tar.gz"
+ export NETDATA_TARBALL_CHECKSUM_URL="$NETDATA_TARBALL_BASEURL/sha256sums.txt"
+ fi
+}
+
+detect_bash4() {
+ bash="${1}"
+ if [ -z "${BASH_VERSION}" ]; then
+ # we don't run under bash
+ if [ -n "${bash}" ] && [ -x "${bash}" ]; then
+ # shellcheck disable=SC2016
+ BASH_MAJOR_VERSION=$(${bash} -c 'echo "${BASH_VERSINFO[0]}"')
+ fi
+ else
+ # we run under bash
+ BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}"
+ fi
+
+ if [ -z "${BASH_MAJOR_VERSION}" ]; then
+ echo >&2 "No BASH is available on this system"
+ return 1
+ elif [ $((BASH_MAJOR_VERSION)) -lt 4 ]; then
+ echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION}"
+ return 1
+ fi
+ return 0
+}
+
+dependencies() {
+ SYSTEM="$(uname -s 2> /dev/null || uname -v)"
+ OS="$(uname -o 2> /dev/null || uname -rs)"
+ MACHINE="$(uname -m 2> /dev/null)"
+
+ echo "System : ${SYSTEM}"
+ echo "Operating System : ${OS}"
+ echo "Machine : ${MACHINE}"
+ echo "BASH major version: ${BASH_MAJOR_VERSION}"
+
+ if [ "${OS}" != "GNU/Linux" ] && [ "${SYSTEM}" != "Linux" ]; then
+ warning "Cannot detect the packages to be installed on a ${SYSTEM} - ${OS} system."
+ else
+ bash="$(command -v bash 2> /dev/null)"
+ if ! detect_bash4 "${bash}"; then
+ warning "Cannot detect packages to be installed in this system, without BASH v4+."
+ else
+ progress "Fetching script to detect required packages..."
+ if [ -n "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then
+ if [ -f "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" "${ndtmpdir}/install-required-packages.sh"
+ else
+ fatal "Invalid given dependency file, please check your --local-files parameter options and try again"
+ fi
+ else
+ download "${PACKAGES_SCRIPT}" "${ndtmpdir}/install-required-packages.sh"
+ fi
+
+ if [ ! -s "${ndtmpdir}/install-required-packages.sh" ]; then
+ warning "Downloaded dependency installation script is empty."
+ else
+ progress "Running downloaded script to detect required packages..."
+ run ${sudo} "${bash}" "${ndtmpdir}/install-required-packages.sh" ${PACKAGES_INSTALLER_OPTIONS}
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ warning "It failed to install all the required packages, but installation might still be possible."
+ fi
+ fi
+
+ fi
+ fi
+}
+
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ elif command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+umask 022
+
+sudo=""
+[ -z "${UID}" ] && UID="$(id -u)"
+[ "${UID}" -ne "0" ] && sudo="sudo"
+export PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+INTERACTIVE=1
+PACKAGES_INSTALLER_OPTIONS="netdata"
+NETDATA_INSTALLER_OPTIONS=""
+NETDATA_UPDATES="--auto-update"
+RELEASE_CHANNEL="nightly"
+while [ -n "${1}" ]; do
+ if [ "${1}" = "all" ]; then
+ PACKAGES_INSTALLER_OPTIONS="netdata-all"
+ shift 1
+ elif [ "${1}" = "--dont-wait" ] || [ "${1}" = "--non-interactive" ]; then
+ INTERACTIVE=0
+ shift 1
+ elif [ "${1}" = "--no-updates" ]; then
+ # echo >&2 "netdata will not auto-update"
+ NETDATA_UPDATES=
+ shift 1
+ elif [ "${1}" = "--stable-channel" ]; then
+ RELEASE_CHANNEL="stable"
+ NETDATA_INSTALLER_OPTIONS="$NETDATA_INSTALLER_OPTIONS --stable-channel"
+ shift 1
+ elif [ "${1}" = "--allow-duplicate-install" ]; then
+ NETDATA_ALLOW_DUPLICATE_INSTALL=1
+ shift 1
+ elif [ "${1}" = "--reinstall" ]; then
+ NETDATA_REINSTALL=1
+ shift 1
+ elif [ "${1}" = "--local-files" ]; then
+ shift 1
+ if [ -z "${1}" ]; then
+ fatal "Missing netdata: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order"
+ fi
+
+ export NETDATA_LOCAL_TARBALL_OVERRIDE="${1}"
+ shift 1
+
+ if [ -z "${1}" ]; then
+ fatal "Missing checksum file: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order"
+ fi
+
+ export NETDATA_LOCAL_TARBALL_OVERRIDE_CHECKSUM="${1}"
+ shift 1
+
+ if [ -z "${1}" ]; then
+ fatal "Missing go.d tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order"
+ fi
+
+ export NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN="${1}"
+ shift 1
+
+ if [ -z "${1}" ]; then
+ fatal "Missing go.d config tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order"
+ fi
+
+ export NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN_CONFIG="${1}"
+ shift 1
+
+ if [ -z "${1}" ]; then
+ fatal "Missing dependencies management scriptlet: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order"
+ fi
+
+ export NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT="${1}"
+ shift 1
+ else
+ NETDATA_INSTALLER_OPTIONS="$NETDATA_INSTALLER_OPTIONS ${1}"
+ shift 1
+ fi
+done
+
+if [ "${INTERACTIVE}" = "0" ]; then
+ PACKAGES_INSTALLER_OPTIONS="--dont-wait --non-interactive ${PACKAGES_INSTALLER_OPTIONS}"
+ NETDATA_INSTALLER_OPTIONS="$NETDATA_INSTALLER_OPTIONS --dont-wait"
+fi
+
+# ---------------------------------------------------------------------------------------------------------------------
+# look for an existing install and try to update that instead if it exists
+
+ndpath="$(command -v netdata 2>/dev/null)"
+if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ] ; then
+ ndpath="/opt/netdata/bin/netdata"
+fi
+
+if [ -n "$ndpath" ] ; then
+ ndprefix="$(dirname "$(dirname "${ndpath}")")"
+
+ if [ "${ndprefix}" = /usr ] ; then
+ ndprefix="/"
+ fi
+
+ progress "Found existing install of Netdata under: ${ndprefix}"
+
+ if [ -r "${ndprefix}/etc/netdata/.environment" ] ; then
+ ndstatic="$(grep IS_NETDATA_STATIC_BINARY "${ndprefix}/etc/netdata/.environment" | cut -d "=" -f 2 | tr -d \")"
+ if [ -z "${NETDATA_REINSTALL}" ] && [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ] ; then
+ if [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then
+ progress "Attempting to update existing install instead of creating a new one"
+ if run ${sudo} "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" --not-running-from-cron ; then
+ progress "Updated existing install at ${ndpath}"
+ exit 0
+ else
+ fatal "Failed to update existing Netdata install"
+ exit 1
+ fi
+ else
+ if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] || [ "${ndstatic}" = "yes" ] ; then
+ fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue."
+ exit 1
+ else
+ progress "User explicitly requested duplicate install, proceeding."
+ fi
+ fi
+ else
+ if [ "${ndstatic}" = "no" ] ; then
+ progress "User requested reinstall instead of update, proceeding."
+ else
+ fatal "Existing install is a static install, please use kickstart-static64.sh instead."
+ exit 1
+ fi
+ fi
+ else
+ progress "Existing install appears to be handled manually or through the system package manager."
+ if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then
+ fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue."
+ exit 1
+ else
+ progress "User explicitly requested duplicate install, proceeding."
+ fi
+ fi
+fi
+
+# ---------------------------------------------------------------------------------------------------------------------
+# install required system packages
+
+ndtmpdir=$(create_tmp_directory)
+cd "${ndtmpdir}" || exit 1
+
+dependencies
+
+# ---------------------------------------------------------------------------------------------------------------------
+# download netdata package
+
+if [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ]; then
+ set_tarball_urls "${RELEASE_CHANNEL}"
+
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${ndtmpdir}/sha256sum.txt"
+ download "${NETDATA_TARBALL_URL}" "${ndtmpdir}/netdata-latest.tar.gz"
+else
+ progress "Installation sources were given as input, running installation using \"${NETDATA_LOCAL_TARBALL_OVERRIDE}\""
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_CHECKSUM}" "${ndtmpdir}/sha256sum.txt"
+ run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE}" "${ndtmpdir}/netdata-latest.tar.gz"
+fi
+
+if ! grep netdata-latest.tar.gz "${ndtmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Tarball checksum validation failed. Stopping Netdata Agent installation and leaving tarball in ${ndtmpdir}.\nUsually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour."
+fi
+run tar -xf netdata-latest.tar.gz
+rm -rf netdata-latest.tar.gz > /dev/null 2>&1
+cd netdata-* || fatal "Cannot cd to netdata source tree"
+
+# ---------------------------------------------------------------------------------------------------------------------
+# install netdata from source
+
+install() {
+ progress "Installing netdata..."
+ run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} || fatal "netdata-installer.sh exited with error"
+ if [ -d "${ndtmpdir}" ] && [ ! "${ndtmpdir}" = "/" ]; then
+ run ${sudo} rm -rf "${ndtmpdir}" > /dev/null 2>&1
+ fi
+}
+
+if [ -x netdata-installer.sh ]; then
+ install "$@"
+else
+ if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then
+ cd "$(find . -mindepth 1 -maxdepth 1 -type d)" && install "$@"
+ else
+ fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${ndtmpdir}"
+ fi
+fi
diff --git a/packaging/installer/methods/alpine.md b/packaging/installer/methods/alpine.md
new file mode 100644
index 0000000..fb44895
--- /dev/null
+++ b/packaging/installer/methods/alpine.md
@@ -0,0 +1,36 @@
+<!--
+title: "Install Netdata on Alpine 3.x"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/alpine.md
+-->
+
+# Install Netdata on Alpine 3.x
+
+Execute these commands to install Netdata in Alpine Linux 3.x:
+
+```sh
+# install required packages
+apk add alpine-sdk bash curl libuv-dev zlib-dev util-linux-dev libmnl-dev gcc make git autoconf automake pkgconfig python logrotate
+
+# if you plan to run node.js Netdata plugins
+apk add nodejs
+
+# download Netdata - the directory 'netdata' will be created
+git clone https://github.com/netdata/netdata.git --depth=100
+cd netdata
+
+# build it, install it, start it
+./netdata-installer.sh
+
+# make Netdata start at boot
+echo -e "#!/usr/bin/env bash\n/usr/sbin/netdata" >/etc/local.d/netdata.start
+chmod 755 /etc/local.d/netdata.start
+
+# make Netdata stop at shutdown
+echo -e "#!/usr/bin/env bash\nkillall netdata" >/etc/local.d/netdata.stop
+chmod 755 /etc/local.d/netdata.stop
+
+# enable the local service to start automatically
+rc-update add local
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Falpine&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/cloud-providers.md b/packaging/installer/methods/cloud-providers.md
new file mode 100644
index 0000000..943a649
--- /dev/null
+++ b/packaging/installer/methods/cloud-providers.md
@@ -0,0 +1,130 @@
+<!--
+title: "Install Netdata on cloud providers"
+description: "The Netdata Agent runs on all popular cloud providers, but often requires additional steps and configuration for full functionality."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/cloud-providers.md
+-->
+
+# Install Netdata on cloud providers
+
+Netdata is fully compatible with popular cloud providers like Google Cloud Platform (GCP), Amazon Web Services (AWS),
+Azure, and others. You can install Netdata on cloud instances to monitor the apps/services running there, or use
+multiple instances in a [parent-child streaming](/streaming/README.md) configuration.
+
+In some cases, using Netdata on these cloud providers requires unique installation or configuration steps. This page
+aims to document some of those steps for popular cloud providers.
+
+> This document is a work-in-progress! If you find new issues specific to a cloud provider, or would like to help
+> clarify the correct workaround, please [create an
+> issue](https://github.com/netdata/netdata/issues/new?labels=feature+request,+needs+triage&template=feature_request.md)
+> with your process and instructions on using the provider's interface to complete the workaround.
+
+- [Recommended installation methods for cloud providers](#recommended-installation-methods-for-cloud-providers)
+- [Post-installation configuration](#post-installation-configuration)
+ - [Add a firewall rule to access Netdata's dashboard](#add-a-firewall-rule-to-access-netdatas-dashboard)
+
+## Recommended installation methods for cloud providers
+
+The best installation method depends on the instance's operating system, distribution, and version. For Linux instances,
+we recommend either the [`kickstart.sh` automatic installation script](kickstart.md) or [.deb/.rpm
+packages](packages.md).
+
+To see the full list of approved methods for each operating system/version we support, see our [distribution
+matrix](../../DISTRIBUTIONS.md). That table will guide you to the various supported methods for your cloud instance.
+
+If you have issues with Netdata after installation, look to the sections below to find the issue you're experiencing,
+followed by the solution for your provider.
+
+## Post-installation configuration
+
+Some cloud providers require you take additional steps to properly configure your instance or its networking to access
+all of Netdata's features.
+
+### Add a firewall rule to access Netdata's dashboard
+
+If you cannot access Netdata's dashboard on your cloud instance via `http://HOST:19999`, and instead get an error page
+from your browser that says, "This site can't be reached" (Chrome) or "Unable to connect" (Firefox), you may need to
+configure your cloud provider's firewall.
+
+Cloud providers often create network-level firewalls that run separately from the instance itself. Both AWS and Google
+Cloud Platform calls them Virtual Private Cloud (VPC) networks. These firewalls can apply even if you've disabled
+firewalls on the instance itself. Because you can modify these firewalls only via the cloud provider's web interface,
+it's easy to overlook them when trying to configure and access Netdata's dashboard.
+
+You can often confirm a firewall issue by querying the dashboard while connected to the instance via SSH: `curl
+http://localhost:19999/api/v1/info`. If you see JSON output, Netdata is running properly. If you try the same `curl`
+command from a remote system, and it fails, it's likely that a firewall is blocking your requests.
+
+Another option is to put Netdata behind web server, which will proxy requests through standard HTTP/HTTPS ports
+(80/443), which are likely already open on your instance. We have a number of guides available:
+
+- [Apache](/docs/Running-behind-apache.md)
+- [Nginx](/docs/Running-behind-nginx.md)
+- [Caddy](/docs/Running-behind-caddy.md)
+- [HAProxy](/docs/Running-behind-haproxy.md)
+- [lighttpd](/docs/Running-behind-lighttpd.md)
+
+The next few sections outline how to add firewall rules to GCP, AWS, and Azure instances.
+
+#### Google Cloud Platform (GCP)
+
+To add a firewall rule, go to the [Firewall rules page](https://console.cloud.google.com/networking/firewalls/list) and
+click **Create firewall rule**.
+
+The following configuration has previously worked for Netdata running on GCP instances
+([see #7786](https://github.com/netdata/netdata/issues/7786)):
+
+```conf
+Name: <name>
+Type: Ingress
+Targets: <name-tag>
+Filters: 0.0.0.0/0
+Protocols/ports: 19999
+Action: allow
+Priority: 1000
+```
+
+Read GCP's [firewall documentation](https://cloud.google.com/vpc/docs/using-firewalls) for specific instructions on how
+to create a new firewall rule.
+
+#### Amazon Web Services (AWS) / EC2
+
+Sign in to the [AWS console](https://console.aws.amazon.com/) and navigate to the EC2 dashboard. Click on the **Security
+Groups** link in the navigation, beneath the **Network & Security** heading. Find the Security Group your instance
+belongs to, and either right-click on it or click the **Actions** button above to see a dropdown menu with **Edit
+inbound rules**.
+
+Add a new rule with the following options:
+
+```conf
+Type: Custom TCP
+Protocol: TCP
+Port Range: 19999
+Source: Anywhere
+Description: Netdata
+```
+
+You can also choose **My IP** as the source if you prefer.
+
+Click **Save** to apply your new inbound firewall rule.
+
+#### Azure
+
+Sign in to the [Azure portal](https://portal.azure.com) and open the virtual machine running Netdata. Click on the
+**Networking** link beneath the **Settings** header, then click on the **Add inbound security rule** button.
+
+Add a new rule with the following options:
+
+```conf
+Source: Any
+Source port ranges: 19999
+Destination: Any
+Destination port randes: 19999
+Protocol: TCP
+Action: Allow
+Priority: 310
+Name: Netdata
+```
+
+Click **Add** to apply your new inbound security rule.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fcloud-providers&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/freebsd.md b/packaging/installer/methods/freebsd.md
new file mode 100644
index 0000000..e2af417
--- /dev/null
+++ b/packaging/installer/methods/freebsd.md
@@ -0,0 +1,108 @@
+<!--
+title: "Install Netdata on FreeBSD"
+description: "Install Netdata on FreeBSD to monitor the health and performance of bare metal or VMs with thousands of real-time, per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/freebsd.md
+-->
+
+# Install Netdata on FreeBSD
+
+> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the
+> details of the installation process, such as version numbers for downloadable packages, before proceeding.
+>
+> You can help improve this document by [submitting a
+> PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/freebsd.md) with your recommended
+> improvements or changes. Thank you!
+
+## Install latest version
+
+This is how to install the latest Netdata version on FreeBSD:
+
+Install required packages (**need root permission**):
+
+```sh
+pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof Judy liblz4 libuv json-c cmake gmake
+```
+
+Download Netdata:
+
+```sh
+fetch https://github.com/netdata/netdata/releases/download/v1.26.0/netdata-v1.26.0.tar.gz
+```
+
+> ⚠️ Verify the latest version by either navigating to [Netdata's latest
+> release](https://github.com/netdata/netdata/releases/latest) or using `curl`:
+>
+> ```bash
+> basename $(curl -Ls -o /dev/null -w %{url_effective} https://github.com/netdata/netdata/releases/latest)
+> ```
+
+Unzip the downloaded file:
+
+```sh
+gunzip netdata*.tar.gz && tar xf netdata*.tar && rm -rf netdata*.tar
+```
+
+Install Netdata in `/opt/netdata`. If you want to enable automatic updates, add `--auto-update` or `-u` to install `netdata-updater` in `cron` (**need root permission**):
+
+```sh
+cd netdata-v* && ./netdata-installer.sh --install /opt && cp /opt/netdata/usr/sbin/netdata-claim.sh /usr/sbin/
+```
+
+You also need to enable the `netdata` service in `/etc/rc.conf`:
+
+```sh
+sysrc netdata_enable="YES"
+```
+
+Finally, and very importantly, update Netdata using the script provided by the Netdata team (**need root permission**):
+
+```sh
+cd /opt/netdata/usr/libexec/netdata/ && ./netdata-updater.sh
+```
+
+You can now access the Netdata dashboard by navigating to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your system.
+
+![image](https://user-images.githubusercontent.com/2662304/48304090-fd384080-e51b-11e8-80ae-eecb03118dda.png)
+
+From Netdata v1.12 and above, anonymous usage information is collected by default and sent to Google Analytics. To read
+more about the information collected and how to opt-out, check the [anonymous statistics
+page](/docs/anonymous-statistics.md).
+
+## Updating the Agent on FreeBSD
+If you have not passed the `--auto-update` or `-u` parameter for the installer to enable automatic updating, repeat the last step to update Netdata whenever a new version becomes available.
+The `netdata-updater.sh` script will update your Agent.
+
+## Optional parameters to alter your installation
+| parameters | Description |
+|:-----:|-----------|
+|`--install <path>`| Install netdata in `<path>.` Ex: `--install /opt` will put netdata in `/opt/netdata`|
+| `--dont-start-it` | Do not (re)start netdata after installation|
+| `--dont-wait` | Run installation in non-interactive mode|
+| `--auto-update` or `-u` | Install netdata-updater in cron to update netdata automatically once per day|
+| `--stable-channel` | Use packages from GitHub release pages instead of GCS (nightly updates). This results in less frequent updates|
+| `--nightly-channel` | Use most recent nightly updates instead of GitHub releases. This results in more frequent updates|
+| `--disable-go` | Disable installation of go.d.plugin|
+| `--disable-ebpf` | Disable eBPF Kernel plugin (Default: enabled)|
+| `--disable-cloud` | Disable all Netdata Cloud functionality|
+| `--require-cloud` | Fail the install if it can't build Netdata Cloud support|
+| `--enable-plugin-freeipmi` | Enable the FreeIPMI plugin. Default: enable it when libipmimonitoring is available|
+| `--disable-plugin-freeipmi` | Enable the FreeIPMI plugin|
+| `--disable-https` | Explicitly disable TLS support|
+| `--disable-dbengine` | Explicitly disable DB engine support|
+| `--enable-plugin-nfacct` | Enable nfacct plugin. Default: enable it when libmnl and libnetfilter_acct are available|
+| `--disable-plugin-nfacct` | Disable nfacct plugin. Default: enable it when libmnl and libnetfilter_acct are available|
+| `--enable-plugin-xenstat` | Enable the xenstat plugin. Default: enable it when libxenstat and libyajl are available|
+| `--disable-plugin-xenstat` | Disable the xenstat plugin|
+| `--enable-backend-kinesis` | Enable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries (it depends on are available)|
+| `--disable-backend-kinesis` | Disable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries (it depends on are available)|
+| `--enable-backend-prometheus-remote-write` | Enable Prometheus remote write backend. Default: enable it when libprotobuf and libsnappy are available|
+| `--disable-backend-prometheus-remote-write` | Disable Prometheus remote write backend. Default: enable it when libprotobuf and libsnappy are available|
+| `--enable-backend-mongodb` | Enable MongoDB backend. Default: enable it when libmongoc is available|
+| `--disable-backend-mongodb` | Disable MongoDB backend|
+| `--enable-lto` | Enable Link-Time-Optimization. Default: enabled|
+| `--disable-lto` | Disable Link-Time-Optimization. Default: enabled|
+| `--disable-x86-sse` | Disable SSE instructions. By default SSE optimizations are enabled|
+| `--zlib-is-really-here` or `--libs-are-really-here` | If you get errors about missing zlib or libuuid but you know it is available, you might have a broken pkg-config. Use this option to proceed without checking pkg-config|
+|`--disable-telemetry` | Use this flag to opt-out from our anonymous telemetry program. (DO_NOT_TRACK=1)|
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Ffreebsd&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/freenas.md b/packaging/installer/methods/freenas.md
new file mode 100644
index 0000000..a0dafdf
--- /dev/null
+++ b/packaging/installer/methods/freenas.md
@@ -0,0 +1,24 @@
+<!--
+title: "Install Netdata on FreeNAS"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/freenas.md
+-->
+
+# Install Netdata on FreeNAS
+
+On FreeNAS-Corral-RELEASE (>=10.0.3 and <11.3), Netdata is pre-installed.
+
+To use Netdata, the service will need to be enabled and started from the FreeNAS [CLI](https://github.com/freenas/cli).
+
+To enable the Netdata service:
+
+```sh
+service netdata config set enable=true
+```
+
+To start the Netdata service:
+
+```sh
+service netdata start
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Ffreenas&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/kickstart-64.md b/packaging/installer/methods/kickstart-64.md
new file mode 100644
index 0000000..120cc9e
--- /dev/null
+++ b/packaging/installer/methods/kickstart-64.md
@@ -0,0 +1,96 @@
+<!--
+title: "Install Netdata with kickstart-static64.sh"
+description: "The kickstart-static64.sh script installs a pre-compiled static binary, including all dependencies required to connect to Netdata Cloud, with one command."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/kickstart-64.md
+-->
+
+# Install Netdata with kickstart-static64.sh
+
+![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0)
+
+This page covers detailed instructions on using and configuring the installation script named `kickstart-static64.sh`.
+
+This method uses a pre-compiled static binary to install Netdata on any Intel/AMD 64bit Linux system and on any Linux
+distribution, even those with a broken or unsupported package manager.
+
+To install Netdata from a static binary package, including all dependencies required to connect to Netdata Cloud, and
+get _automatic nightly updates_, run the following as your normal user:
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh)
+```
+
+> This script installs Netdata at `/opt/netdata`.
+
+> See our [installation guide](/packaging/installer/README.md) for details about [automatic
+> updates](/packaging/installer/README.md#automatic-updates) or [nightly vs. stable
+> releases](/packaging/installer/README.md#nightly-vs-stable-releases).
+
+## What does `kickstart-static64.sh` do?
+
+The `kickstart.sh` script does the following after being downloaded and run:
+
+- Checks to see if there is an existing installation, and if there is updates that in preference to reinstalling.
+- Downloads the latest Netdata binary from the [binary-packages](https://github.com/netdata/binary-packages)
+ repository. You can also run any of these `.run` files with [makeself](https://github.com/megastep/makeself).
+- Installs Netdata by running `./netdata-installer.sh` from the source tree, including any options you might have
+ added.
+- Installs `netdata-updater.sh` to `cron.daily` to enable automatic updates, unless you added the `--no-updates`
+ option.
+- Prints a message about whether the installation succeeded for failed for QA purposes.
+
+If your shell fails to handle the above one-liner, you can download and run the `kickstart-static64.sh` script manually.
+
+```sh
+# download the script with curl
+curl https://my-netdata.io/kickstart-static64.sh >/tmp/kickstart-static64.sh
+
+# or, download the script with wget
+wget -O /tmp/kickstart-static64.sh https://my-netdata.io/kickstart-static64.sh
+
+# run the downloaded script (any sh is fine, no need for bash)
+sh /tmp/kickstart-static64.sh
+```
+
+## Optional parameters to alter your installation
+
+The `kickstart-static64.sh` script passes all its parameters to `netdata-installer.sh`, which you can use to customize
+your installation. Here are a few important parameters:
+
+- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
+- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
+- `--stable-channel`: Automatically update only on the release of new major versions.
+- `--nightly-channel`: Automatically update on every new nightly build.
+- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/anonymous-statistics.md) we use to make
+ Netdata better.
+- `--no-updates`: Prevent automatic updates of any kind.
+- `--reinstall`: If an existing installation is detected, reinstall instead of attempting to update it. Note
+ that this cannot be used to switch between installation types.
+- `--local-files`: Used for [offline installations](/packaging/installer/methods/offline.md). Pass four file paths:
+ the Netdata tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force
+ kickstart run the process using those files. This option conflicts with the `--stable-channel` option. If you set
+ this _and_ `--stable-channel`, Netdata will use the local files.
+
+## Verify script integrity
+
+To use `md5sum` to verify the integrity of the `kickstart-static64.sh` script you will download using the one-line
+command above, run the following:
+
+```bash
+[ "047c86a7c8905955bee39b6980a28e30" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+```
+
+If the script is valid, this command will return `OK, VALID`.
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fkickstart-64&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md
new file mode 100644
index 0000000..f825f80
--- /dev/null
+++ b/packaging/installer/methods/kickstart.md
@@ -0,0 +1,79 @@
+<!--
+title: "Install Netdata with kickstart.sh"
+description: "The kickstart.sh script installs Netdata from source, including all dependencies required to connect to Netdata Cloud, with a single command."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/kickstart.md
+-->
+
+# Install Netdata with kickstart.sh
+
+![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0)
+
+This page covers detailed instructions on using and configuring the automatic one-line installation script named
+`kickstart.sh`.
+
+This method is fully automatic on all Linux distributions. To install Netdata from source, including all dependencies
+required to connect to Netdata Cloud, and get _automatic nightly updates_, run the following as your normal user:
+
+```bash
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+> See our [installation guide](../README.md) for details about [automatic updates](../README.md#automatic-updates) or
+> [nightly vs. stable releases](../README.md#nightly-vs-stable-releases).
+
+## What does `kickstart.sh` do?
+
+The `kickstart.sh` script does the following after being downloaded and run using `bash`:
+
+- Detects the Linux distribution and **installs the required system packages** for building Netdata. Unless you added
+ the `--dont-wait` option, it will ask for your permission first.
+- Checks for an existing installation, and if found updates that instead of creating a new install.
+- Downloads the latest Netdata source tree to `/usr/src/netdata.git`.
+- Installs Netdata by running `./netdata-installer.sh` from the source tree, using any [optional
+ parameters](#optional-parameters-to-alter-your-installation) you have specified.
+- Installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated with new nightly
+ versions, unless you override that with an [optional parameter](#optional-parameters-to-alter-your-installation).
+- Prints a message whether installation succeeded or failed for QA purposes.
+
+## Optional parameters to alter your installation
+
+The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, which you can use to customize your
+installation. Here are a few important parameters:
+
+- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
+- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
+- `--stable-channel`: Automatically update only on the release of new major versions.
+- `--nightly-channel`: Automatically update on every new nightly build.
+- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/anonymous-statistics.md) we use to make
+ Netdata better.
+- `--no-updates`: Prevent automatic updates of any kind.
+- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this
+ cannot be used to change installation types.
+- `--local-files`: Used for [offline installations](offline.md). Pass four file paths: the Netdata
+ tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force kickstart run the
+ process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_
+ `--stable-channel`, Netdata will use the local files.
+
+## Verify script integrity
+
+To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above,
+run the following:
+
+```bash
+[ "8df7a45b2abb336c84507b7c107bcba3" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+```
+
+If the script is valid, this command will return `OK, VALID`.
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fkickstart&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md
new file mode 100644
index 0000000..3e85928
--- /dev/null
+++ b/packaging/installer/methods/kubernetes.md
@@ -0,0 +1,209 @@
+<!--
+title: "Install Netdata on a Kubernetes cluster"
+description: "Use Netdata's Helm chart to bootstrap a Netdata monitoring and troubleshooting toolkit on your Kubernetes (k8s) cluster."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/kubernetes.md
+-->
+
+# Install Netdata on a Kubernetes cluster
+
+This document details how to install Netdata on an existing Kubernetes (k8s) cluster. By following these directions, you
+will use Netdata's [Helm chart](https://github.com/netdata/helmchart) to bootstrap a Netdata deployment on your cluster.
+The Helm chart installs one parent pod for storing metrics and managing alarm notifications plus an additional child pod
+for every node in the cluster.
+
+Each child pod will collect metrics from the node it runs on, in addition to [compatible
+applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services), plus any endpoints covered
+by our [generic Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus),
+via [service discovery](https://github.com/netdata/agent-service-discovery/). Each child pod will also collect
+[cgroups](/collectors/cgroups.plugin/README.md),
+[Kubelet](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet), and
+[kube-proxy](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy) metrics from its node.
+
+To install Netdata on a Kubernetes cluster, you need:
+
+- A working cluster running Kubernetes v1.9 or newer.
+- The [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version
+ difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an
+ administrative system.
+- The [Helm package manager](https://helm.sh/) v3.0.0 or newer on the same administrative system.
+
+The default configuration creates one `parent` pod, installed on one of your cluster's nodes, and a DaemonSet for
+additional `child` pods. This DaemonSet ensures that every node in your k8s cluster also runs a `child` pod, including
+the node that also runs `parent`. The `child` pods collect metrics and stream the information to the `parent` pod, which
+uses two persistent volumes to store metrics and alarms. The `parent` pod also handles alarm notifications and enables
+the Netdata dashboard using an ingress controller.
+
+## Install the Netdata Helm chart
+
+We recommend you install the Helm chart using our Helm repository. In the `helm install` command, replace `netdata` with
+the release name of your choice.
+
+```bash
+helm repo add netdata https://netdata.github.io/helmchart/
+helm install netdata netdata/netdata
+```
+
+> You can also install the Netdata Helm chart by cloning the
+> [repository](https://artifacthub.io/packages/helm/netdata/netdata#install-by-cloning-the-repository) and manually
+> running Helm against the included chart.
+
+### Post-installation
+
+Run `kubectl get services` and `kubectl get pods` to confirm that your cluster now runs a `netdata` service, one
+`parent` pod, and three `child` pods.
+
+You've now installed Netdata on your Kubernetes cluster. See how to [access the Netdata
+dashboard](#access-the-netdata-dashboard) to confirm it's working as expected, or see the next section to [configure the
+Helm chart](#configure-the-netdata-helm-chart) to suit your cluster's particular setup.
+
+## Configure the Netdata Helm chart
+
+Read up on the various configuration options in the [Helm chart
+documentation](https://github.com/netdata/helmchart#configuration) to see if you need to change any of the options based
+on your cluster's setup.
+
+To change a setting, use the `--set` or `--values` arguments with `helm install`, for the initial deployment, or `helm upgrade` to upgrade an existing deployment.
+
+```bash
+helm install --set a.b.c=xyz netdata netdata/netdata
+helm upgrade --set a.b.c=xyz netdata netdata/netdata
+```
+
+For example, to change the size of the persistent metrics volume on the parent node:
+
+```bash
+helm install --set parent.database.volumesize=4Gi netdata netdata/netdata
+helm upgrade --set parent.database.volumesize=4Gi netdata netdata/netdata
+```
+
+### Configure service discovery
+
+As mentioned in the introduction, Netdata has a [service discovery
+plugin](https://github.com/netdata/agent-service-discovery/#service-discovery) to identify compatible pods and collect
+metrics from the service they run. The Netdata Helm chart installs this service discovery plugin into your k8s cluster.
+
+Service discovery scans your cluster for pods exposed on certain ports and with certain image names. By default, it
+looks for its supported services on the ports they most commonly listen on, and using default image names. Service
+discovery currently supports [popular
+applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services), plus any endpoints covered
+by our [generic Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus).
+
+If you haven't changed listening ports, image names, or other defaults, service discovery should find your pods, create
+the proper configurations based on the service that pod runs, and begin monitoring them immediately after deployment.
+
+However, if you have changed some of these defaults, you need to copy a file from the Netdata Helm chart repository,
+make your edits, and pass the changed file to `helm install`/`helm upgrade`.
+
+First, copy the file to your administrative system.
+
+```bash
+curl https://raw.githubusercontent.com/netdata/helmchart/master/charts/netdata/sdconfig/child.yml -o child.yml
+```
+
+Edit the new `child.yml` file according to your needs. See the [Helm chart
+configuration](https://github.com/netdata/helmchart#configuration) and the file itself for details.
+
+You can then run `helm install`/`helm upgrade` with the `--set-file` argument to use your configured `child.yml` file
+instead of the default, changing the path if you copied it elsewhere.
+
+```bash
+helm install --set-file sd.child.configmap.from.value=./child.yml netdata netdata/netdata
+helm upgrade --set-file sd.child.configmap.from.value=./child.yml netdata netdata/netdata
+```
+
+Your configured service discovery is now pushed to your cluster.
+
+## Access the Netdata dashboard
+
+Accessing the Netdata dashboard itself depends on how you set up your k8s cluster and the Netdata Helm chart. If you
+installed the Helm chart with the default `service.type=ClusterIP`, you will need to forward a port to the parent pod.
+
+```bash
+kubectl port-forward netdata-parent-0 19999:19999
+```
+
+You can now access the dashboard at `http://CLUSTER:19999`, replacing `CLUSTER` with the IP address or hostname of your
+k8s cluster.
+
+If you set up the Netdata Helm chart with `service.type=LoadBalancer`, you can find the external IP for the load
+balancer with `kubectl get services`, under the `EXTERNAL-IP` column.
+
+```bash
+kubectl get services
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+cockroachdb ClusterIP None <none> 26257/TCP,8080/TCP 46h
+cockroachdb-public ClusterIP 10.245.148.233 <none> 26257/TCP,8080/TCP 46h
+kubernetes ClusterIP 10.245.0.1 <none> 443/TCP 47h
+netdata LoadBalancer 10.245.160.131 203.0.113.0 19999:32231/TCP 74m
+```
+
+In the above example, access the dashboard by navigating to `http://203.0.113.0:19999`.
+
+## Claim a Kubernetes cluster's parent pod
+
+You can [claim](/claim/README.md) a cluster's parent Netdata pod to see its real-time metrics alongside any other nodes
+you monitor using [Netdata Cloud](https://app.netdata.cloud).
+
+> Netdata Cloud does not currently support claiming child nodes because the Helm chart does not allocate a persistent
+> volume for them.
+
+Ensure persistence is enabled on the parent pod by running the following `helm upgrade` command.
+
+```bash
+helm upgrade \
+ --set parent.database.persistence=true \
+ --set parent.alarms.persistence=true \
+ netdata netdata/netdata
+```
+
+Next, find your claiming script in Netdata Cloud by clicking on your Space's dropdown, then **Manage your Space**. Click
+the **Nodes** tab. Netdata Cloud shows a script similar to the following:
+
+```bash
+sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud
+```
+
+You will need the values of `TOKEN` and `ROOM1,ROOM2` for the command, which sets `parent.claiming.enabled`,
+`parent.claiming.token`, and `parent.claiming.rooms` to complete the parent pod claiming process.
+
+Run the following `helm upgrade` command after replacing `TOKEN` and `ROOM1,ROOM2` with the values found in the claiming
+script from Netdata Cloud. The quotations are required.
+
+```bash
+helm upgrade \
+ --set parent.claiming.enabled=true \
+ --set parent.claiming.token="TOKEN" \
+ --set parent.claiming.rooms="ROOM1,ROOM2" \
+ netdata netdata/netdata
+```
+
+The cluster terminates the old parent pod and creates a new one with the proper claiming configuration. You can see your
+parent pod in Netdata Cloud after a few moments. You can now [build new
+dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards) using the parent pod's metrics or run [Metric
+Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) to troubleshoot anomalies.
+
+![A parent Netdata pod in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/94497340-c1f49880-01ab-11eb-97b2-6044537565af.png)
+
+## Update/reinstall the Netdata Helm chart
+
+If you update the Helm chart's configuration, run `helm upgrade` to redeploy your Netdata service, replacing `netdata`
+with the name of the release, if you changed it upon installation:
+
+```bash
+helm upgrade netdata netdata/netdata
+```
+
+## What's next?
+
+Read the [monitoring a Kubernetes cluster guide](/docs/guides/monitor/kubernetes-k8s-netdata.md) for details on the
+various metrics and charts created by the Helm chart and some best practices on real-time troubleshooting using Netdata.
+
+Check out our [infrastructure](/docs/quickstart/infrastructure.md) for details about additional k8s monitoring features,
+and learn more about [configuring the Netdata Agent](/docs/configure/nodes.md) to better understand the settings you
+might be interested in changing.
+
+To further configure Netdata for your cluster, see our [Helm chart repository](https://github.com/netdata/helmchart) and
+the [service discovery repository](https://github.com/netdata/agent-service-discovery/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fkubernetes&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md
new file mode 100644
index 0000000..05883a7
--- /dev/null
+++ b/packaging/installer/methods/macos.md
@@ -0,0 +1,90 @@
+<!--
+title: "Install Netdata on macOS"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/macos.md
+-->
+
+# Install Netdata on macOS
+
+Netdata works on macOS, albeit with some limitations. The number of charts displaying system metrics is limited, but you
+can use any of Netdata's [external plugins](../../../collectors/plugins.d/README.md) to monitor any services you might
+have installed on your macOS system. You could also use a macOS system as the parent node in a [streaming
+configuration](/streaming/README.md).
+
+We recommend installing Netdata with the community-created and -maintained [**Homebrew
+package**](#install-netdata-with-the-homebrew-package).
+
+- [Install Netdata via the Homebrew package](#install-netdata-with-the-homebrew-package)
+- [Install Netdata from source](#install-netdata-from-source)
+
+## Install Netdata with the Homebrew package
+
+If you don't have [Homebrew](https://brew.sh/) installed already, begin with their installation script:
+
+```bash
+/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
+```
+
+Next, you can use Homebrew's package, which installs Netdata all its dependencies in a single step:
+
+```sh
+brew install netdata
+```
+
+> Homebrew will place your Netdata configuration directory at `/usr/local/etc/netdata/`. Use the `edit-config` script
+> and the files in this directory to configure Netdata. For reference, you can find stock configuration files at
+> `/usr/local/Cellar/netdata/{NETDATA_VERSION}/lib/netdata/conf.d/`.
+
+Skip on ahead to the [What's next?](#whats-next) section to find links to helpful post-installation guides.
+
+## Install Netdata from source
+
+We don't recommend installing Netdata from source on macOS, as it can be difficult to configure and install dependencies
+manually.
+
+First open your terminal of choice and install the Xcode development packages.
+
+```bash
+xcode-select --install
+```
+
+Click **Install** on the Software Update popup window that appears. Then, use the same terminal session to use Homebrew
+to install some of Netdata's prerequisites. You can omit `cmake` in case you do not want to use
+[Netdata Cloud](https://learn.netdata.cloud/docs/cloud/).
+
+```bash
+brew install ossp-uuid autoconf automake pkg-config libuv lz4 json-c openssl@1.1 libtool cmake
+```
+
+If you want to use the [database engine](/database/engine/README.md) to store your metrics, you need to download
+and install the [Judy library](https://sourceforge.net/projects/judy/) before proceeding compiling Netdata.
+
+Next, download Netdata from our GitHub repository:
+
+```bash
+git clone https://github.com/netdata/netdata.git
+```
+
+Finally, `cd` into the newly-created directory and then start the installer script:
+
+```bash
+cd netdata/
+sudo ./netdata-installer.sh --install /usr/local
+```
+
+> Your Netdata configuration directory will be at `/usr/local/netdata/`, and your stock configuration directory will
+> be at **`/usr/local/lib/netdata/conf.d/`.**
+>
+> The installer will also install a startup plist to start Netdata when your macOS system boots.
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fmacos&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/manual.md b/packaging/installer/methods/manual.md
new file mode 100644
index 0000000..6ece952
--- /dev/null
+++ b/packaging/installer/methods/manual.md
@@ -0,0 +1,229 @@
+<!--
+title: "Install Netdata on Linux from a Git checkout"
+description: "Use the Netdata Agent source code from GitHub, plus helper scripts to set up your system, to install Netdata without packages or binaries."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/manual.md
+-->
+
+# Install Netdata on Linux from a Git checkout
+
+To install the latest git version of Netdata, please follow these 2 steps:
+
+1. [Prepare your system](#prepare-your-system)
+
+ Install the required packages on your system.
+
+2. [Install Netdata](#install-netdata)
+
+ Download and install Netdata. You can also update it the same way.
+
+## Prepare your system
+
+Use our automatic requirements installer (_no need to be `root`_), which attempts to find the packages that
+should be installed on your system to build and run Netdata. It supports a large variety of major Linux distributions
+and other operating systems and is regularly tested. You can find this tool [here](https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh) or run it directly with `bash <(curl -sSL https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh)`. Otherwise read on for how to get requires packages manually:
+
+- **Alpine** Linux and its derivatives
+ - You have to install `bash` yourself, before using the installer.
+
+- **Arch** Linux and its derivatives
+ - You need arch/aur for package Judy.
+
+- **Gentoo** Linux and its derivatives
+
+- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**)
+
+- **Red Hat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**)
+ - Please note that for RHEL/CentOS you need
+ [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/).
+ In addition, RHEL/CentOS version 6 also need
+ [OKay](https://okay.com.mx/blog-news/rpm-repositories-for-centos-6-and-7.html) for package libuv version 1.
+ - CentOS 8 / RHEL 8 requires a bit of extra work. See the dedicated section below.
+
+- **SUSE** Linux and its derivatives (including **openSUSE**)
+
+- **SLE12** Must have your system registered with SUSE Customer Center or have the DVD. See
+ [#1162](https://github.com/netdata/netdata/issues/1162)
+
+Install the packages for having a **basic Netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `postgres`, `named`, hardware sensors and `SNMP`):
+
+```sh
+curl -Ss 'https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh' >/tmp/install-required-packages.sh && bash /tmp/install-required-packages.sh -i netdata
+```
+
+Install all the required packages for **monitoring everything Netdata can monitor**:
+
+```sh
+curl -Ss 'https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh' >/tmp/install-required-packages.sh && bash /tmp/install-required-packages.sh -i netdata-all
+```
+
+If the above do not work for you, please [open a github
+issue](https://github.com/netdata/netdata/issues/new?title=packages%20installer%20failed&labels=installation%20help&body=The%20experimental%20packages%20installer%20failed.%0A%0AThis%20is%20what%20it%20says:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20screen%20here%0A%0A%60%60%60)
+with a copy of the message you get on screen. We are trying to make it work everywhere (this is also why the script
+[reports back](https://github.com/netdata/netdata/issues/2054) success or failure for all its runs).
+
+---
+
+This is how to do it by hand:
+
+```sh
+# Debian / Ubuntu
+apt-get install zlib1g-dev uuid-dev libuv1-dev liblz4-dev libjudy-dev libssl-dev libelf-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl python cmake
+
+# Fedora
+dnf install zlib-devel libuuid-devel libuv-devel lz4-devel Judy-devel openssl-devel elfutils-libelf-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python cmake
+
+# CentOS / Red Hat Enterprise Linux
+yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel elfutils-libelf-devel make nc pkgconfig python zlib-devel cmake
+
+# openSUSE
+zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libelf-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python cmake
+```
+
+Once Netdata is compiled, to run it the following packages are required (already installed using the above commands):
+
+| package | description|
+|:-----:|-----------|
+| `libuuid` | part of `util-linux` for GUIDs management|
+| `zlib` | gzip compression for the internal Netdata web server|
+| `libuv` | Multi-platform support library with a focus on asynchronous I/O, version 1 or greater|
+
+*Netdata will fail to start without the above.*
+
+Netdata plugins and various aspects of Netdata can be enabled or benefit when these are installed (they are optional):
+
+| package |description|
+|:-----:|-----------|
+| `bash`|for shell plugins and **alarm notifications**|
+| `curl`|for shell plugins and **alarm notifications**|
+| `iproute` or `iproute2`|for monitoring **Linux traffic QoS**<br/>use `iproute2` if `iproute` reports as not available or obsolete|
+| `python`|for most of the external plugins|
+| `python-yaml`|used for monitoring **beanstalkd**|
+| `python-beanstalkc`|used for monitoring **beanstalkd**|
+| `python-dnspython`|used for monitoring DNS query time|
+| `python-ipaddress`|used for monitoring **DHCPd**<br/>this package is required only if the system has python v2. python v3 has this functionality embedded|
+| `python-mysqldb`<br/>or<br/>`python-pymysql`|used for monitoring **mysql** or **mariadb** databases<br/>`python-mysqldb` is a lot faster and thus preferred|
+| `python-psycopg2`|used for monitoring **postgresql** databases|
+| `python-pymongo`|used for monitoring **mongodb** databases|
+| `nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices|
+| `lm-sensors`|for monitoring **hardware sensors**|
+| `libelf`|for monitoring kernel-level metrics using eBPF|
+| `libmnl`|for collecting netfilter metrics|
+| `netcat`|for shell plugins to collect metrics from remote systems|
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+Netdata DB engine can be enabled when these are installed (they are optional):
+
+| package | description|
+|:-----:|-----------|
+| `liblz4` | Extremely fast compression algorithm, version r129 or greater|
+| `Judy` | General purpose dynamic array|
+| `openssl`| Cryptography and SSL/TLS toolkit|
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+Netdata Cloud support may require the following packages to be installed:
+
+| package | description
+|:--------:| -----------------------
+| `cmake` | Needed at build time if you aren't using your distribution's version of libwebsockets or are building on a platform other than Linux
+| `openssl` | Needed to secure communications with the Netdata Cloud
+
+*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
+
+### CentOS / RHEL 6.x
+
+On CentOS / RHEL 6.x, many of the dependencies for Netdata are only
+available with versions older than what we need, so special setup is
+required if manually installing packages.
+
+CentOS 6.x:
+
+- Enable the EPEL repo
+- Enable the additional repo from [okay.network](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+
+And install the minimum required dependencies.
+
+### CentOS / RHEL 8.x
+
+For CentOS / RHEL 8.x a lot of development packages have moved out into their
+own separate repositories. Some other dependencies are either missing completely
+or have to be sourced by 3rd-parties.
+
+CentOS 8.x:
+
+- Enable the PowerTools repo
+- Enable the EPEL repo
+- Enable the Extra repo from [OKAY](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html)
+
+And install the minimum required dependencies:
+
+```sh
+# Enable config-manager
+yum install -y 'dnf-command(config-manager)'
+
+# Enable PowerTools
+yum config-manager --set-enabled powertools
+
+# Enable EPEL
+yum install -y epel-release
+
+# Install Repo for libuv-devl (NEW)
+yum install -y http://repo.okay.com.mx/centos/8/x86_64/release/okay-release-1-3.el8.noarch.rpm
+
+# Install Devel Packages
+yum install autoconf automake curl gcc git cmake libuuid-devel openssl-devel libuv-devel lz4-devel make nc pkgconfig python3 zlib-devel
+
+# Install Judy-Devel directly
+yum install -y http://mirror.centos.org/centos/8/PowerTools/x86_64/os/Packages/Judy-devel-1.0.5-18.module_el8.1.0+217+4d875839.x86_64.rpm
+```
+
+---
+
+### Install Netdata
+
+Do this to install and run Netdata:
+
+```sh
+# download it - the directory 'netdata' will be created
+git clone https://github.com/netdata/netdata.git --depth=100
+cd netdata
+
+# run script with root privileges to build, install, start Netdata
+./netdata-installer.sh
+```
+
+- If you don't want to run it straight-away, add `--dont-start-it` option.
+
+- You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds.
+
+- If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install /opt`. This one will install Netdata in `/opt/netdata`.
+
+- If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`.
+
+Once the installer completes, the file `/etc/netdata/netdata.conf` will be created (if you changed the installation directory, the configuration will appear in that directory too).
+
+You can edit this file to set options. One common option to tweak is `history`, which controls the size of the memory database Netdata will use. By default is `3600` seconds (an hour of data at the charts) which makes Netdata use about 10-15MB of RAM (depending on the number of charts detected on your system). Check **\[[Memory Requirements]]**.
+
+To apply the changes you made, you have to restart Netdata.
+
+### 'nonrepresentable section on output' errors
+
+Our current build process unfortunately has some issues when using certain configurations of the `clang` C compiler on Linux.
+
+If the installation fails with errors like `/bin/ld: externaldeps/libwebsockets/libwebsockets.a(context.c.o): relocation R_X86_64_32 against '.rodata.str1.1' can not be used when making a PIE object; recompile with -fPIC`, and you are trying to build with `clang` on Linux, you will need to build Netdata using GCC to get a fully functional install.
+
+In most cases, you can do this by running `CC=gcc ./netdata-installer.sh`.
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fmanual&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/offline.md b/packaging/installer/methods/offline.md
new file mode 100644
index 0000000..c978dd6
--- /dev/null
+++ b/packaging/installer/methods/offline.md
@@ -0,0 +1,90 @@
+<!--
+title: "Install Netdata on offline systems"
+description: "Install the Netdata Agent on offline/air gapped systems to benefit from real-time, per-second monitoring without connecting to the internet."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/offline.md
+-->
+
+# Install Netdata on offline systems
+
+The Netdata Agent installs on offline or air gapped systems with a few additional steps.
+
+By default, the `kickstart.sh` and `kickstart-static64.sh` download Netdata assets, like the precompiled binary and a
+few dependencies, using the system's internet connection, but the Agent installer can also use equivalent files already
+present on the local filesystem.
+
+First, download the required files. If you're using `kickstart.sh`, you need the Netdata tarball, the checksums, the
+go.d plugin binary, and the go.d plugin configuration. If you're using `kickstart-static64.sh`, you need only the
+Netdata tarball and checksums.
+
+Download the files you need to a system of yours that's connected to the internet. Use the commands below, or visit the
+[latest Netdata release page](https://github.com/netdata/netdata/releases/latest) and [latest go.d plugin release
+page](https://github.com/netdata/go.d.plugin/releases) to download the required files manually.
+
+**If you're using `kickstart.sh`**, use the following commands:
+
+```bash
+cd /tmp
+
+curl -s https://my-netdata.io/kickstart.sh > kickstart.sh
+
+# Netdata tarball
+curl -s https://api.github.com/repos/netdata/netdata/releases/latest | grep "browser_download_url.*tar.gz" | cut -d '"' -f 4 | wget -qi -
+
+# Netdata checksums
+curl -s https://api.github.com/repos/netdata/netdata/releases/latest | grep "browser_download_url.*txt" | cut -d '"' -f 4 | wget -qi -
+
+# Netdata dependency handling script
+wget -q - https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh
+
+# go.d plugin
+# For binaries for OS types and architectures not listed on [go.d releases](https://github.com/netdata/go.d.plugin/releases/latest), kindly open a github issue and we will do our best to serve your request
+export OS=$(uname -s | tr '[:upper:]' '[:lower:]') ARCH=$(uname -m | sed -e 's/i386/386/g' -e 's/i686/386/g' -e 's/x86_64/amd64/g' -e 's/aarch64/arm64/g' -e 's/armv64/arm64/g' -e 's/armv6l/arm/g' -e 's/armv7l/arm/g' -e 's/armv5tel/arm/g') && curl -s https://api.github.com/repos/netdata/go.d.plugin/releases/latest | grep "browser_download_url.*${OS}-${ARCH}.tar.gz" | cut -d '"' -f 4 | wget -qi -
+
+# go.d configuration
+curl -s https://api.github.com/repos/netdata/go.d.plugin/releases/latest | grep "browser_download_url.*config.tar.gz" | cut -d '"' -f 4 | wget -qi -
+```
+
+**If you're using `kickstart-static64.sh`**, use the following commands:
+
+```bash
+cd /tmp
+
+curl -s https://my-netdata.io/kickstart-static64.sh > kickstart-static64.sh
+
+# Netdata static64 tarball
+curl -s https://api.github.com/repos/netdata/netdata/releases/latest | grep "browser_download_url.*gz.run" | cut -d '"' -f 4 | wget -qi -
+
+# Netdata checksums
+curl -s https://api.github.com/repos/netdata/netdata/releases/latest | grep "browser_download_url.*txt" | cut -d '"' -f 4 | wget -qi -
+```
+
+Move downloaded files to the `/tmp` directory on the offline system in whichever way your defined policy allows (if
+any).
+
+Now you can run either the `kickstart.sh` or `kickstart-static64.sh` scripts using the `--local-files` option. This
+option requires you to specify the location and names of the files you just downloaded.
+
+> When using `--local-files`, the `kickstart.sh` or `kickstart-static64.sh` scripts won't download any Netdata assets
+> from the internet. But, you may still need a connection to install dependencies using your system's package manager.
+> The scripts will warn you if your system doesn't have all the dependencies.
+
+```bash
+# kickstart.sh
+bash kickstart.sh --local-files /tmp/netdata-(version-number-here).tar.gz /tmp/sha256sums.txt /tmp/go.d.plugin-(version-number-here).(OS)-(architecture).tar.gz /tmp/config.tar.gz /tmp/install-required-packages.sh
+
+# kickstart-static64.sh
+bash kickstart-static64.sh --local-files /tmp/netdata-(version-number-here).gz.run /tmp/sha256sums.txt
+```
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackages%2Finstaller%2Fmethods%2Foffline&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/packaging/installer/methods/packages.md b/packaging/installer/methods/packages.md
new file mode 100644
index 0000000..cf1e335
--- /dev/null
+++ b/packaging/installer/methods/packages.md
@@ -0,0 +1,48 @@
+<!--
+title: "Install Netdata with .deb/.rpm packages"
+description: "Install the Netdata Agent with Linux packages that support Ubuntu, Debian, Fedora, RHEL, CentOS, openSUSE, and more."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/packages.md
+-->
+
+# Install Netdata with .deb/.rpm packages
+
+![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true)
+
+Netdata provides our own flavour of binary packages for the most common operating systems that use with `.deb` and
+`.rpm` packaging formats.
+
+We provide two separate repositories, one for our stable releases and one for our nightly releases. Visit the repository
+pages and follow the quick set-up instructions to get started.
+
+1. Stable releases: Our stable production releases are hosted in the
+ [netdata/netdata](https://packagecloud.io/netdata/netdata) repository on packagecloud
+2. Nightly releases: Our latest releases are hosted in the
+ [netdata/netdata-edge](https://packagecloud.io/netdata/netdata-edge) repository on packagecloud
+
+## Using caching proxies with packagecloud repositories
+
+packagecloud only provides HTTPS access to repositories they host, which means in turn that Netdata's package
+repositories are only accessible via HTTPS. This is known to cause issues with some setups that use a caching proxy for
+package downloads.
+
+If you are using such a setup, there are a couple of ways to work around this:
+
+- Configure your proxy to automatically pass through HTTPS connections without caching them. This is the simplest
+ solution, but means that downloads of Netdata packages will not be cached.
+- Mirror the repository locally on your proxy system, and use that mirror when installing on other systems. This
+ requires more setup and more disk space on the caching host, but it lets you cache the packages locally.
+- Some specific caching proxies may have alternative configuration options to deal with these issues. Find
+ such options in their documentation.
+
+## What's next?
+
+When you're finished with installation, check out our [single-node](/docs/quickstart/single-node.md) or
+[infrastructure](/docs/quickstart/infrastructure.md) monitoring quickstart guides based on your use case.
+
+Or, skip straight to [configuring the Netdata Agent](/docs/configure/nodes.md).
+
+Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which is structured based on actions and
+solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to
+external databases, and more.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackages%2Finstaller%2Fmethods%2Fpackages&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/packaging/installer/methods/pfsense.md b/packaging/installer/methods/pfsense.md
new file mode 100644
index 0000000..ee1a453
--- /dev/null
+++ b/packaging/installer/methods/pfsense.md
@@ -0,0 +1,84 @@
+<!--
+title: "Install Netdata on pfSense"
+description: "Install Netdata on pfSense to monitor the health and performance of firewalls with thousands of real-time, per-second metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/pfsense.md
+-->
+
+# Install Netdata on pfSense
+
+> 💡 This document is maintained by Netdata's community, and may not be completely up-to-date. Please double-check the
+> details of the installation process, such as version numbers for downloadable packages, before proceeding.
+>
+> You can help improve this document by [submitting a
+> PR](https://github.com/netdata/netdata/edit/master/packaging/installer/methods/pfsense.md) with your recommended
+> improvements or changes. Thank you!
+
+## Install prerequisites/dependencies
+
+To install Netdata on pfSense, first run the following command (within a shell or under the **Diagnostics/Command**
+prompt within the pfSense web interface).
+
+```bash
+pkg install -y pkgconf bash e2fsprogs-libuuid libuv nano
+```
+
+Then run the following commands to download various dependencies from the FreeBSD repository.
+
+```sh
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/Judy-1.0.5_2.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/json-c-0.15_1.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-certifi-2020.6.20.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-asn1crypto-1.3.0.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-pycparser-2.20.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-cffi-1.14.3.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-six-1.15.0.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-cryptography-2.6.1.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-idna-2.10.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-openssl-19.0.0.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-pysocks-1.7.1.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-urllib3-1.25.11,1.txz
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-yaml-5.3.1.txz
+```
+
+> ⚠️ If any of the above commands return a `Not Found` error, you need to manually search for the latest package in the
+> [FreeBSD repository](https://www.freebsd.org/ports/). Search for the package's name, such as `py37-cffi`, find the
+> latest version number, and update the command accordingly.
+
+> ⚠️ On pfSense 2.4.5, Python version 3.7 may be installed by the system, in which case you should should not install
+> Python from the FreeBSD repository as instructed above.
+
+> ⚠️ If you are using the `apcupsd` collector, you need to make sure that apcupsd is up before starting Netdata.
+> Otherwise a infinitely running `cat` process triggered by the default activated apcupsd charts plugin will eat up CPU
+> and RAM (`/tmp/.netdata-charts.d-*/run-*`). This also applies to `OPNsense`.
+
+## Install Netdata
+
+You can now install Netdata from the FreeBSD repository.
+
+```bash
+pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.28.0.txz
+```
+
+> ⚠️ If the above command returns a `Not Found` error, you need to manually search for the latest version of Netdata in
+> the [FreeBSD repository](https://www.freebsd.org/ports/). Search for `netdata`, find the latest version number, and
+> update the command accordingly.
+
+You must edit `/usr/local/etc/netdata/netdata.conf` and change `bind to = 127.0.0.1` to `bind to = 0.0.0.0`.
+
+To start Netdata manually, run `service netdata onestart`.
+
+Visit the Netdata dashboard to confirm it's working: `http://<pfsenseIP>:19999`
+
+To start Netdata automatically every boot, add `service netdata onestart` as a Shellcmd entry within the pfSense web
+interface under **Services/Shellcmd**. You'll need to install the Shellcmd package beforehand under **System/Package
+Manager/Available Packages**. The Shellcmd Type should be set to `Shellcmd`.
+![](https://i.imgur.com/wcKiPe1.png) Alternatively more information can be found in
+<https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages>, for achieving the same via the command line and
+scripts.
+
+If you experience an issue with `/usr/bin/install` being absent in pfSense 2.3 or earlier, update pfSense or use a
+workaround from <https://redmine.pfsense.org/issues/6643>
+
+**Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fpfsense&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/source.md b/packaging/installer/methods/source.md
new file mode 100644
index 0000000..e0827fc
--- /dev/null
+++ b/packaging/installer/methods/source.md
@@ -0,0 +1,291 @@
+<!--
+title: "Manually build Netdata from source"
+description: "Package maintainers and power users may be interested in manually building Netdata from source without using any of our installation scripts."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/source.md
+-->
+
+# Manually build Netdata from source
+
+These instructions are for advanced users and distribution package
+maintainers. Unless this describes you, you almost certainly want
+to follow [our guide for manually installing Netdata from a git
+checkout](/packaging/installer/methods/manual.md) instead.
+
+## Required dependencies
+
+At a bare minimum, Netdata requires the following libraries and tools
+to build and run successfully:
+
+- libuuid
+- libuv version 1.0 or newer
+- zlib
+- GNU autoconf
+- GNU automake
+- GCC or Xcode (Clang is known to have issues in certain configurations, see [Using Clang](#using-clang))
+- A version of `make` compatible with GNU automake
+- Git (we use git in the build system to generate version info, don't need a full install, just a working `git show` command)
+
+Additionally, the following build time features require additional dependencies:
+
+- TLS support for the web GUI:
+ - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer.
+- dbengine metric storage:
+ - liblz4 r129 or newer
+ - OpenSSL 1.0 or newer (LibreSSL _amy_ work, but is largely untested).
+ - [libJudy](http://judy.sourceforge.net/)
+- Netdata Cloud support:
+ - A working internet connection
+ - A recent version of CMake
+ - OpenSSL 1.0.2 or newer _or_ LibreSSL 3.0.0 or newer.
+ - JSON-C (may be provided by the user as shown below, or by the system)
+
+## Preparing the source tree
+
+Certain features in Netdata require custom versions of specific libraries,
+which the the build system will link statically into Netdata. These
+libraries and their header files must be copied into specific locations
+in the source tree to be used.
+
+### Netdata cloud
+
+Netdata Cloud functionality requires custom builds of libmosquitto and
+libwebsockets.
+
+#### libmosquitto
+
+Netdata maintains a custom fork of libmosquitto at
+https://github.com/netdata/mosquitto with patches to allow for proper
+integration with libwebsockets, which is needed for correct operation of
+Netdata Cloud functionality. To prepare this library for the build system:
+
+1. Verify the tag that Netdata expects to be used by checking the contents
+ of `packaging/mosquitto.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/netdata/mosquitto/releases and
+ downloading and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. If building on a platform other than Linux, prepare the mosquitto
+ sources by running `cmake -D WITH_STATIC_LIBRARIES:boolean=YES .` in
+ the mosquitto source directory.
+4. Build mosquitto by running `make -C lib` in the mosquitto source directory.
+5. In the Netdata source directory, create a directory called `externaldeps/mosquitto`.
+6. Copy `lib/mosquitto.h` from the mosquitto source directory to
+ `externaldeps/mosquitto/mosquitto.h` in the Netdata source tree.
+7. Copy `lib/libmosquitto.a` from the mosquitto source directory to
+ `externaldeps/mosquitto/libmosquitto.a` in the Netdata source tree. If
+ building on a platform other than Linux, the file that needs to be
+ copied will instead be named `lib/libmosquitto_static.a`, but it
+ still needs to be copied to `externaldeps/mosquitto/libmosquitto.a`.
+
+#### libwebsockets
+
+Netdata uses the standard upstream version of libwebsockets located at
+https://github.com/warmcat/libwebsockets, but requires a build with SOCKS5
+support, which is not enabled by most pre-built versions. Currently,
+we do not support using a system copy of libwebsockets. To prepare this
+library for the build system:
+
+1. Verify the tag that Netdata expects to be used by checking the contents
+ of `packaging/libwebsockets.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/warmcat/libwebsockets/releases and
+ downloading and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Prepare the libwebsockets sources by running `cmake -D
+ LWS_WITH_SOCKS5:bool=ON .` in the libwebsockets source directory.
+4. Build libwebsockets by running `make` in the libwebsockets source
+ directory.
+5. In the Netdata source directory, create a directory called
+ `externaldeps/libwebsockets`.
+6. Copy `lib/libwebsockets.a` from the libwebsockets source directory to
+ `externaldeps/libwebsockets/libwebsockets.a` in the Netdata source tree.
+7. Copy the entire contents of `lib/include` from the libwebsockets source
+ directory to `externaldeps/libwebsockets/include` in the Netdata source tree.
+
+#### JSON-C
+
+Netdata requires the use of JSON-C for JSON parsing when using Netdata
+Cloud. Netdata is able to use a system-provided copy of JSON-C, but
+some systems may not provide it. If your system does not provide JSON-C,
+you can do the following to prepare a copy for the build system:
+
+1. Verify the tag that Netdata expects to be used by checking the contents
+ of `packaging/jsonc.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/json-c/json-c and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Prepare the JSON-C sources by running `cmake -DBUILD_SHARED_LIBS=OFF .`
+ in the JSON-C source directory.
+4. Build JSON-C by running `make` in the JSON-C source directory.
+5. In the Netdata source directory, create a directory called
+ `externaldeps/jsonc`.
+6. Copy `libjson-c.a` fro the JSON-C source directory to
+ `externaldeps/jsonc/libjson-c.a` in the Netdata source tree.
+7. Copy all of the header files (`*.h`) from the JSON-C source directory
+ to `externaldeps/jsonc/json-c` in the Netdata source tree.
+
+## Building Netdata
+
+Once the source tree has been prepared, Netdata is ready to be configured
+and built. Netdata currently uses GNU autotools as it's primary build
+system. To build Netdata this way:
+
+1. Run `autoreconf -ivf` in the Netdata source tree.
+2. Run `./configure` in the Netdata source tree.
+3. Run `make` in the Netdata source tree.
+
+### Configure options
+
+Netdata provides a number of build time configure options. This section
+lists some of the ones you are most likely to need:
+
+- `--prefix`: Specify the prefix under which Netdata will be installed.
+- `--with-webdir`: Specify a path relative to the prefix in which to
+ install the web UI files.
+- `--disable-cloud`: Disables all Netdata Cloud functionality for
+ this build.
+
+### Using Clang
+
+Netdata is primarily developed using GCC, but in most cases we also
+build just fine using Clang. Under some build configurations of Clang
+itself, you may see build failures with the linker reporting errors
+about `nonrepresentable section on output`. We currently do not have a
+conclusive fix for this issue (the obvious fix leads to other issues which
+we haven't been able to fix yet), and unfortunately the only workaround
+is to use a different build of Clang or to use GCC.
+
+### Linking errors relating to OpenSSL
+
+Netdata's build system currently does not reliably support building
+on systems which have multiple ABI incompatible versions of OpenSSL
+installed. In such situations, you may encounter linking errors due to
+Netdata trying to build against headers for one version but link to a
+different version.
+
+## Additional components
+
+A full featured install of Netdata requires some additional components
+which must be built and installed separately from the main Netdata
+agent. All of these should be handled _after_ installing Netdata itself.
+
+### React dashboard
+
+The above build steps include a deprecated web UI for Netdata that lacks
+support for Netdata Cloud. To get a fully featured dashboard, you must
+install our new React dashboard.
+
+#### Installing the pre-built React dashboard
+
+We provide pre-built archives of the React dashboard for each release
+(these are also used during our normal install process). To use one
+of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/dashboard.version` in your Netdata sources.
+2. Go to https://github.com/netdata/dashboard/releases and download the
+ `dashboard.tar.gz` file for the required release.
+3. Unpack the downloaded archive to a temporary directory.
+4. Copy the contents of the `build` directory from the extracted
+ archive to `/usr/share/netdata/web` or the equivalent location for
+ your build of Netdata. This _will_ overwrite some files in the target
+ location.
+
+#### Building the React dashboard locally
+
+Alternatively, you may wish to build the React dashboard locally. Doing
+so requires a recent version of Node.JS with a working install of
+NPM. Once you have the required tools, do the following:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/dashboard.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/netdata/dashboard and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Run `npm install` in the dashboard source tree.
+4. Run `npm run build` in the dashboard source tree.
+5. Copy the contents of the `build` directory just like step 4 of
+ installing the pre-built React dashboard.
+
+### Go collectors
+
+A number of the collectors for Netdata are written in Go instead of C,
+and are developed in a separate repository from the mian Netdata code.
+An installation without these collectors is still usable, but will be
+unable to collect metrics for a number of network services the system
+may be providing. You can either install a pre-built copy of these
+collectors, or build them locally.
+
+#### Installing the pre-built Go collectors
+
+We provide pre-built binaries of the Go collectors for all the platforms
+we officially support. To use one of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/go.d.version` in your Netdata sources.
+2. Go to https://github.com/netdata/go.d.plugin/releases, select the
+ required release, and download the `go.d.plugin-*.tar.gz` file
+ for your system type and CPu architecture and the `config.tar.gz`
+ configuration file archive.
+3. Extract the `go.d.plugin-*.tar.gz` archive into a temporary
+ location, and then copy the single file in the archive to
+ `/usr/libexec/netdata/plugins.d` or the equivalent location for your
+ build of Netdata and rename it to `go.d.plugin`.
+4. Extract the `config.tar.gz` archive to a temporarylocation and then
+ copy the contents of the archive to `/etc/netdata` or the equivalent
+ location for your build of Netdata.
+
+#### Building the Go collectors locally
+
+Alternatively, you may wish to build the Go collectors locally
+yourself. Doing so requires a working installation of Golang 1.13 or
+newer. Once you have the required tools, do the following:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/go.d.version` in your Netdata sources.
+2. Obtain the sources for that version by either:
+ - Navigating to https://github.com/netdata/go.d.plugin and downloading
+ and unpacking the source code archive for that release.
+ - Cloning the repository with `git` and checking out the required tag.
+3. Run `make` in the go.d.plugin source tree.
+4. Copy `bin/godplugin` to `/usr/libexec/netdata/plugins.d` or th
+ equivalent location for your build of Netdata and rename it to
+ `go.d.plugin`.
+5. Copy the contents of the `config` directory to `/etc/netdata` or the
+ equivalent location for your build of Netdata.
+
+### eBPF collector
+
+On Linux systems, Netdata has support for using the kernel's eBPF
+interface to monitor performance-related VFS, network, and process events,
+allowing for insights into process lifetimes and file access
+patterns. Using this functionality requires additional code managed in
+a separate repository from the core Netdata agent. You can either install
+a pre-built copy of the required code, or build it locally.
+
+#### Installing the pre-built eBPF code
+
+We provide pre-built copies of the eBPF code for 64-bit x86 systems
+using glibc or musl. To use one of these:
+
+1. Verify the release version that Netdata expects to be used by checking
+ the contents of `packaging/ebpf.version` in your Netdata sources.
+2. Go to https://github.com/netdata/kernel-collector/releases, select the
+ required release, and download the `netdata-kernel-collector-*.tar.xz`
+ file for the libc variant your system uses (either rmusl or glibc).
+3. Extract the contents of the archive to a temporary location, and then
+ copy all of the `.o` and `.so.*` files and the contents of the `library/`
+ directory to `/usr/libexec/netdata/plugins.d` or the equivalent location
+ for your build of Netdata.
+
+#### Building the eBPF code locally
+
+Alternatively, you may wish to build the eBPF code locally yourself. For
+instructions, please consult [the README file for our kernel-collector
+repository](https://github.com/netdata/kernel-collector/blob/master/README.md),
+which outlines both the required dependencies, as well as multiple
+options for building the code.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fsource&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/methods/synology.md b/packaging/installer/methods/synology.md
new file mode 100644
index 0000000..4a0ae35
--- /dev/null
+++ b/packaging/installer/methods/synology.md
@@ -0,0 +1,52 @@
+<!--
+title: "Install Netdata on Synology"
+description: "The Netdata Agent can be installed on AMD64-compatible NAS systems using the 64-bit pre-compiled static binary."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/installer/methods/synology.md
+-->
+
+# Install Netdata on Synology
+
+The documentation previously recommended installing the Debian Chroot package from the Synology community package
+sources and then running Netdata from within the chroot. This does not work, as the chroot environment does not have
+access to `/proc`, and therefore exposes very few metrics to Netdata. Additionally, [this
+issue](https://github.com/SynoCommunity/spksrc/issues/2758), still open as of 2018/06/24, indicates that the Debian
+Chroot package is not suitable for DSM versions greater than version 5 and may corrupt system libraries and render the
+NAS unable to boot.
+
+The good news is that the [64-bit static installer](kickstart-64.md) works fine if your NAS is one that uses the amd64 architecture. It
+will install the content into `/opt/netdata`, making future removal safe and simple.
+
+## Run as netdata user
+
+When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other
+installations run it as the `netdata` user, you might wish to do the same. This requires some extra work:
+
+1. Creat a group `netdata` via the Synology group interface. Give it no access to anything.
+2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign
+ the user to the `netdata` group. Netdata will chuid to this user when running.
+3. Change ownership of the following directories, as defined in [Netdata
+ Security](/docs/netdata-security.md#security-design):
+
+```sh
+chown -R root:netdata /opt/netdata/usr/share/netdata
+chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata
+chown -R netdata:root /opt/netdata/var/log/netdata
+```
+
+## Create startup script
+
+Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is
+installed. You'll have to do this manually:
+
+1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it
+ executable with `chmod 0755 /etc/rc.netdata`.
+2. Add or edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot:
+
+```conf
+# Netdata startup
+[ -x /etc/rc.netdata ] && /etc/rc.netdata start
+```
+
+3. Make sure `/etc/rc.netdata` is executable: `chmod 0755 /etc/rc.netdata`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Finstaller%2Fmethods%2Fsynology&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh
new file mode 100755
index 0000000..f62934f
--- /dev/null
+++ b/packaging/installer/netdata-uninstaller.sh
@@ -0,0 +1,499 @@
+#!/usr/bin/env bash
+#
+# This is the netdata uninstaller script
+#
+# Variables needed by script and taken from '.environment' file:
+# - NETDATA_PREFIX
+# - NETDATA_ADDED_TO_GROUPS
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paweł Krupa <paulfantom@gmail.com>
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything
+
+where:
+ -e, --env path to environment file (defauls to '/etc/netdata/.environment'
+ -f, --force force uninstallation and do not ask any questions
+ -h show this help text
+ -y, --yes flag needs to be set to proceed with uninstallation"
+
+FILE_REMOVAL_STATUS=0
+ENVIRONMENT_FILE="/etc/netdata/.environment"
+INTERACTIVITY="-i"
+YES=0
+while :; do
+ case "$1" in
+ -h | --help)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ -f | --force)
+ INTERACTIVITY="-f"
+ shift
+ ;;
+ -y | --yes)
+ YES=1
+ shift
+ ;;
+ -e | --env)
+ ENVIRONMENT_FILE="$2"
+ shift 2
+ ;;
+ -*)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ *) break ;;
+ esac
+done
+
+if [ "$YES" != "1" ]; then
+ echo >&2 "This script will REMOVE netdata from your system."
+ echo >&2 "Run it again with --yes to do it."
+ exit 1
+fi
+
+if [[ $EUID -ne 0 ]]; then
+ echo >&2 "This script SHOULD be run as root or otherwise it won't delete all installed components."
+ key="n"
+ read -r -s -n 1 -p "Do you want to continue as non-root user [y/n] ? " key
+ if [ "$key" != "y" ] && [ "$key" != "Y" ]; then
+ exit 1
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+# portable service command
+
+service_cmd="$(command -v service 2> /dev/null)"
+rcservice_cmd="$(command -v rc-service 2> /dev/null)"
+systemctl_cmd="$(command -v systemctl 2> /dev/null)"
+service() {
+
+ local cmd="${1}" action="${2}"
+
+ if [ -n "${systemctl_cmd}" ]; then
+ run "${systemctl_cmd}" "${action}" "${cmd}"
+ return $?
+ elif [ -n "${service_cmd}" ]; then
+ run "${service_cmd}" "${cmd}" "${action}"
+ return $?
+ elif [ -n "${rcservice_cmd}" ]; then
+ run "${rcservice_cmd}" "${cmd}" "${action}"
+ return $?
+ fi
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_YELLOW=""
+ TPUT_WHITE=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if command -v tput 1> /dev/null 2>&1; then
+ if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo > /dev/null
+
+run_ok() {
+ printf >&2 "%s OK %s\n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}"
+}
+
+run_failed() {
+ printf >&2 "%s FAILED %s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD}" "${TPUT_RESET}"
+}
+
+ESCAPED_PRINT_METHOD=
+if printf "%q " test > /dev/null 2>&1; then
+ ESCAPED_PRINT_METHOD="printfq"
+fi
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]; then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ {
+ printf "%s" "${info}"
+ escaped_print "${@}"
+ printf "%s" " ... "
+ } >> "${run_logfile}"
+
+ printf "%s" "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" >&2
+ escaped_print >&2 "${@}"
+ printf "%s\n" "${TPUT_RESET}" >&2
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]; then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code %s\n" "${ret}"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+portable_del_group() {
+ local groupname="${1}"
+
+ # Check if group exist
+ echo >&2 "Removing ${groupname} user group ..."
+
+ # Linux
+ if command -v groupdel 1> /dev/null 2>&1; then
+ if grep -q "${groupname}" /etc/group; then
+ run groupdel "${groupname}" && return 0
+ else
+ echo >&2 "Group ${groupname} already removed in a previous step."
+ return 0
+ fi
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ if dseditgroup -o read netdata 1> /dev/null 2>&1; then
+ run dseditgroup -o delete "${groupname}" && return 0
+ else
+ echo >&2 "Could not find group ${groupname}, nothing to do"
+ return 0
+ fi
+ fi
+
+ echo >&2 "Group ${groupname} was not automatically removed, you might have to remove it manually"
+ return 1
+}
+
+issystemd() {
+ local pids p myns ns systemctl
+
+ # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd
+ if [ ! -d /lib/systemd/system ] && [ ! -d /usr/lib/systemd/system ]; then
+ return 1
+ fi
+
+ # if there is no systemctl command, it is not systemd
+ systemctl=$(command -v systemctl 2> /dev/null)
+ if [ -z "${systemctl}" ] || [ ! -x "${systemctl}" ]; then
+ return 1
+ fi
+
+ # if pid 1 is systemd, it is systemd
+ [ "$(basename "$(readlink /proc/1/exe)" 2> /dev/null)" = "systemd" ] && return 0
+
+ # if systemd is not running, it is not systemd
+ pids=$(safe_pidof systemd 2> /dev/null)
+ [ -z "${pids}" ] && return 1
+
+ # check if the running systemd processes are not in our namespace
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+ for p in ${pids}; do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ # if pid of systemd is in our namespace, it is systemd
+ [ -n "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0
+ done
+
+ # else, it is not systemd
+ return 1
+}
+
+portable_del_user() {
+ local username="${1}"
+ echo >&2 "Deleting ${username} user account ..."
+
+ # Linux
+ if command -v userdel 1> /dev/null 2>&1; then
+ run userdel -f "${username}" && return 0
+ fi
+
+ # mac OS
+ if command -v sysadminctl 1> /dev/null 2>&1; then
+ run sysadminctl -deleteUser "${username}" && return 0
+ fi
+
+ echo >&2 "User ${username} could not be deleted from system, you might have to remove it manually"
+ return 1
+}
+
+portable_del_user_from_group() {
+ local groupname="${1}" username="${2}"
+
+ # username is not in group
+ echo >&2 "Deleting ${username} user from ${groupname} group ..."
+
+ # Linux
+ if command -v gpasswd 1> /dev/null 2>&1; then
+ run gpasswd -d "netdata" "${group}" && return 0
+ fi
+
+ # FreeBSD
+ if command -v pw 1> /dev/null 2>&1; then
+ run pw groupmod "${groupname}" -d "${username}" && return 0
+ fi
+
+ # BusyBox
+ if command -v delgroup 1> /dev/null 2>&1; then
+ run delgroup "${username}" "${groupname}" && return 0
+ fi
+
+ # mac OS
+ if command -v dseditgroup 1> /dev/null 2>&1; then
+ run dseditgroup -o delete -u "${username}" "${groupname}" && return 0
+ fi
+
+ echo >&2 "Failed to delete user ${username} from group ${groupname} !"
+ return 1
+}
+
+quit_msg() {
+ echo
+ if [ "$FILE_REMOVAL_STATUS" -eq 0 ]; then
+ echo >&2 "Something went wrong :("
+ else
+ echo >&2 "Netdata files were successfully removed from your system"
+ fi
+}
+
+user_input() {
+ TEXT="$1"
+ if [ "${INTERACTIVITY}" = "-i" ]; then
+ read -r -p "$TEXT" >&2
+ fi
+}
+
+rm_file() {
+ FILE="$1"
+ if [ -f "${FILE}" ]; then
+ run rm -v ${INTERACTIVITY} "${FILE}"
+ fi
+}
+
+rm_dir() {
+ DIR="$1"
+ if [ -n "$DIR" ] && [ -d "$DIR" ]; then
+ user_input "Press ENTER to recursively delete directory '$DIR' > "
+ run rm -v -f -R "${DIR}"
+ fi
+}
+
+safe_pidof() {
+ local pidof_cmd
+ pidof_cmd="$(command -v pidof 2> /dev/null)"
+ if [ -n "${pidof_cmd}" ]; then
+ ${pidof_cmd} "${@}"
+ return $?
+ else
+ ps -acxo pid,comm |
+ sed "s/^ *//g" |
+ grep netdata |
+ cut -d ' ' -f 1
+ return $?
+ fi
+}
+
+pidisnetdata() {
+ if [ -d /proc/self ]; then
+ if [ -z "$1" ] || [ ! -f "/proc/$1/stat" ]; then
+ return 1
+ fi
+ [ "$(cut -d '(' -f 2 "/proc/$1/stat" | cut -d ')' -f 1)" = "netdata" ] && return 0
+ return 1
+ fi
+ return 0
+}
+
+stop_netdata_on_pid() {
+ local pid="${1}" ret=0 count=0
+
+ pidisnetdata "${pid}" || return 0
+
+ printf >&2 "Stopping netdata on pid %s ..." "${pid}"
+ while [ -n "$pid" ] && [ ${ret} -eq 0 ]; do
+ if [ ${count} -gt 24 ]; then
+ echo >&2 "Cannot stop the running netdata on pid ${pid}."
+ return 1
+ fi
+
+ count=$((count + 1))
+
+ pidisnetdata "${pid}" || ret=1
+ if [ ${ret} -eq 1 ]; then
+ break
+ fi
+
+ if [ ${count} -lt 12 ]; then
+ run kill "${pid}" 2> /dev/null
+ ret=$?
+ else
+ run kill -9 "${pid}" 2> /dev/null
+ ret=$?
+ fi
+
+ test ${ret} -eq 0 && printf >&2 "." && sleep 5
+
+ done
+
+ echo >&2
+ if [ ${ret} -eq 0 ]; then
+ echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !"
+ return 1
+ fi
+
+ echo >&2 "netdata on pid ${pid} stopped."
+ return 0
+}
+
+netdata_pids() {
+ local p myns ns
+
+ myns="$(readlink /proc/self/ns/pid 2> /dev/null)"
+
+ for p in \
+ $(cat /var/run/netdata.pid 2> /dev/null) \
+ $(cat /var/run/netdata/netdata.pid 2> /dev/null) \
+ $(safe_pidof netdata 2> /dev/null); do
+ ns="$(readlink "/proc/${p}/ns/pid" 2> /dev/null)"
+
+ if [ -z "${myns}" ] || [ -z "${ns}" ] || [ "${myns}" = "${ns}" ]; then
+ pidisnetdata "${p}" && echo "${p}"
+ fi
+ done
+}
+
+stop_all_netdata() {
+ local p
+
+ if [ "${UID}" -eq 0 ]; then
+ uname="$(uname 2> /dev/null)"
+
+ # Any of these may fail, but we need to not bail if they do.
+ if issystemd; then
+ if systemctl stop netdata; then
+ sleep 5
+ fi
+ elif [ "${uname}" = "Darwin" ]; then
+ if launchctl stop netdata; then
+ sleep 5
+ fi
+ elif [ "${uname}" = "FreeBSD" ]; then
+ if /etc/rc.d/netdata stop; then
+ sleep 5
+ fi
+ else
+ if service netdata stop; then
+ sleep 5
+ fi
+ fi
+ fi
+
+ if [ -n "$(netdata_pids)" ] && [ -n "$(builtin type -P netdatacli)" ]; then
+ netdatacli shutdown-agent
+ sleep 20
+ fi
+
+ for p in $(netdata_pids); do
+ # shellcheck disable=SC2086
+ stop_netdata_on_pid ${p}
+ done
+}
+
+trap quit_msg EXIT
+
+#shellcheck source=/dev/null
+source "${ENVIRONMENT_FILE}" || exit 1
+
+#### STOP NETDATA
+echo >&2 "Stopping a possibly running netdata..."
+stop_all_netdata
+
+#### REMOVE NETDATA FILES
+rm_file /etc/logrotate.d/netdata
+rm_file /etc/systemd/system/netdata.service
+rm_file /lib/systemd/system/netdata.service
+rm_file /usr/lib/systemd/system/netdata.service
+rm_file /etc/systemd/system/netdata-updater.service
+rm_file /lib/systemd/system/netdata-updater.service
+rm_file /usr/lib/systemd/system/netdata-updater.service
+rm_file /etc/systemd/system/netdata-updater.timer
+rm_file /lib/systemd/system/netdata-updater.timer
+rm_file /usr/lib/systemd/system/netdata-updater.timer
+rm_file /etc/init.d/netdata
+rm_file /etc/periodic/daily/netdata-updater
+rm_file /etc/cron.daily/netdata-updater
+rm_file /etc/cron.d/netdata-updater
+
+
+if [ -n "${NETDATA_PREFIX}" ] && [ -d "${NETDATA_PREFIX}" ]; then
+ rm_dir "${NETDATA_PREFIX}"
+else
+ rm_file "/usr/sbin/netdata"
+ rm_file "/usr/sbin/netdatacli"
+ rm_file "/tmp/netdata-ipc"
+ rm_file "/usr/sbin/netdata-claim.sh"
+ rm_dir "/usr/share/netdata"
+ rm_dir "/usr/libexec/netdata"
+ rm_dir "/var/lib/netdata"
+ rm_dir "/var/cache/netdata"
+ rm_dir "/var/log/netdata"
+ rm_dir "/etc/netdata"
+fi
+
+FILE_REMOVAL_STATUS=1
+
+#### REMOVE NETDATA USER FROM ADDED GROUPS
+if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then
+ user_input "Press ENTER to delete 'netdata' from following groups: '$NETDATA_ADDED_TO_GROUPS' > "
+ for group in $NETDATA_ADDED_TO_GROUPS; do
+ portable_del_user_from_group "${group}" "netdata"
+ done
+fi
+
+#### REMOVE USER
+user_input "Press ENTER to delete 'netdata' system user > "
+portable_del_user "netdata" || :
+
+### REMOVE GROUP
+user_input "Press ENTER to delete 'netdata' system group > "
+portable_del_group "netdata" || :
diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh
new file mode 100755
index 0000000..10835cd
--- /dev/null
+++ b/packaging/installer/netdata-updater.sh
@@ -0,0 +1,428 @@
+#!/usr/bin/env bash
+
+# Netdata updater utility
+#
+# Variables needed by script:
+# - PATH
+# - CFLAGS
+# - LDFLAGS
+# - IS_NETDATA_STATIC_BINARY
+# - NETDATA_CONFIGURE_OPTIONS
+# - REINSTALL_OPTIONS
+# - NETDATA_TARBALL_URL
+# - NETDATA_TARBALL_CHECKSUM_URL
+# - NETDATA_TARBALL_CHECKSUM
+# - NETDATA_PREFIX / NETDATA_LIB_DIR (After 1.16.1 we will only depend on lib dir)
+#
+# Optional environment options:
+#
+# - TMPDIR (set to a usable temporary directory)
+# - NETDATA_NIGHTLIES_BASEURL (set the base url for downloading the dist tarball)
+#
+# Copyright: 2018-2020 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paweł Krupa <paulfantom@gmail.com>
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+# Author: Austin S. Hemmelgarn <austin@netdata.cloud>
+
+set -e
+
+script_dir="$(CDPATH='' cd -- "$(dirname -- "$0")" && pwd -P)"
+
+if [ -x "${script_dir}/netdata-updater" ]; then
+ script_source="${script_dir}/netdata-updater"
+else
+ script_source="${script_dir}/netdata-updater.sh"
+fi
+
+info() {
+ echo >&3 "$(date) : INFO: " "${@}"
+}
+
+error() {
+ echo >&3 "$(date) : ERROR: " "${@}"
+}
+
+: "${ENVIRONMENT_FILE:=THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT}"
+
+if [ "${ENVIRONMENT_FILE}" == "THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT" ]; then
+ if [ -r "${script_dir}/../../../etc/netdata/.environment" ]; then
+ ENVIRONMENT_FILE="${script_dir}/../../../etc/netdata/.environment"
+ elif [ -r "/etc/netdata/.environment" ]; then
+ ENVIRONMENT_FILE="/etc/netdata/.environment"
+ elif [ -r "/opt/netdata/etc/netdata/.environment" ]; then
+ ENVIRONMENT_FILE="/opt/netdata/etc/netdata/.environment"
+ else
+ envpath="$(find / -type d \( -path /sys -o -path /proc -o -path /dev \) -prune -false -o -path '*netdata/.environment' -type f 2> /dev/null | head -n 1)"
+ if [ -r "${envpath}" ]; then
+ ENVIRONMENT_FILE="${envpath}"
+ else
+ error "Cannot find environment file, unable to update."
+ exit 1
+ fi
+ fi
+fi
+
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum > /dev/null 2>&1; then
+ sha256sum "$@"
+ elif command -v shasum > /dev/null 2>&1; then
+ shasum -a 256 "$@"
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+# this is what we will do if it fails (head-less only)
+fatal() {
+ error "FAILED TO UPDATE NETDATA : ${1}"
+ exit 1
+}
+
+cleanup() {
+ if [ -n "${logfile}" ]; then
+ cat >&2 "${logfile}"
+ rm "${logfile}"
+ fi
+
+ if [ -n "$ndtmpdir" ] && [ -d "$ndtmpdir" ]; then
+ rm -rf "$ndtmpdir"
+ fi
+}
+
+_cannot_use_tmpdir() {
+ local testfile ret
+ testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)"
+ ret=0
+
+ if [ -z "${testfile}" ] ; then
+ return "${ret}"
+ fi
+
+ if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}" ; then
+ if chmod +x "${testfile}" ; then
+ if [ "$("${testfile}")" = "SUCCESS" ] ; then
+ ret=1
+ fi
+ fi
+ fi
+
+ rm -f "${testfile}"
+ return "${ret}"
+}
+
+create_tmp_directory() {
+ if [ -n "${NETDATA_TMPDIR_PATH}" ]; then
+ TMPDIR="${NETDATA_TMPDIR_PATH}"
+ else
+ if [ -z "${NETDATA_TMPDIR}" ] || _cannot_use_tmpdir "${NETDATA_TMPDIR}" ; then
+ if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}" ; then
+ if _cannot_use_tmpdir /tmp ; then
+ if _cannot_use_tmpdir "${PWD}" ; then
+ echo >&2
+ echo >&2 "Unable to find a usable temprorary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again."
+ exit 1
+ else
+ TMPDIR="${PWD}"
+ fi
+ else
+ TMPDIR="/tmp"
+ fi
+ fi
+ else
+ TMPDIR="${NETDATA_TMPDIR}"
+ fi
+
+ mktemp -d -t netdata-updater-XXXXXXXXXX
+ fi
+}
+
+_safe_download() {
+ url="${1}"
+ dest="${2}"
+ if command -v curl > /dev/null 2>&1; then
+ curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
+ return $?
+ elif command -v wget > /dev/null 2>&1; then
+ wget -T 15 -O - "${url}" > "${dest}"
+ return $?
+ else
+ return 255
+ fi
+}
+
+download() {
+ url="${1}"
+ dest="${2}"
+
+ _safe_download "${url}" "${dest}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ return 0
+ elif [ ${ret} -eq 255 ]; then
+ fatal "I need curl or wget to proceed, but neither is available on this system."
+ else
+ fatal "Cannot download ${url}"
+ fi
+}
+
+newer_commit_date() {
+ echo >&3 "Checking if a newer version of the updater script is available."
+
+ if command -v jq > /dev/null 2>&1; then
+ commit_date="$(_safe_download "https://api.github.com/repos/netdata/netdata/commits?path=packaging%2Finstaller%2Fnetdata-updater.sh&page=1&per_page=1" /dev/stdout | jq '.[0].commit.committer.date' | tr -d '"')"
+ elif command -v python > /dev/null 2>&1;then
+ commit_date="$(_safe_download "https://api.github.com/repos/netdata/netdata/commits?path=packaging%2Finstaller%2Fnetdata-updater.sh&page=1&per_page=1" /dev/stdout | python -c 'from __future__ import print_function;import sys,json;print(json.load(sys.stdin)[0]["commit"]["committer"]["date"])')"
+ elif command -v python3 > /dev/null 2>&1;then
+ commit_date="$(_safe_download "https://api.github.com/repos/netdata/netdata/commits?path=packaging%2Finstaller%2Fnetdata-updater.sh&page=1&per_page=1" /dev/stdout | python3 -c 'from __future__ import print_function;import sys,json;print(json.load(sys.stdin)[0]["commit"]["committer"]["date"])')"
+ fi
+
+ if [ -z "${commit_date}" ] ; then
+ commit_date="9999-12-31T23:59:59Z"
+ fi
+
+ if [ -e "${script_source}" ]; then
+ script_date="$(date -r "${script_source}" +%s)"
+ else
+ script_date="$(date +%s)"
+ fi
+
+ [ "$(date -d "${commit_date}" +%s)" -ge "${script_date}" ]
+}
+
+self_update() {
+ if [ -z "${NETDATA_NO_UPDATER_SELF_UPDATE}" ] && newer_commit_date; then
+ echo >&3 "Downloading newest version of updater script."
+
+ ndtmpdir=$(create_tmp_directory)
+ cd "$ndtmpdir" || exit 1
+
+ if _safe_download "https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-updater.sh" ./netdata-updater.sh; then
+ chmod +x ./netdata-updater.sh || exit 1
+ export ENVIRONMENT_FILE="${ENVIRONMENT_FILE}"
+ exec ./netdata-updater.sh --not-running-from-cron --no-self-update --tmpdir-path "$(pwd)"
+ else
+ echo >&3 "Failed to download newest version of updater script, continuing with current version."
+ fi
+ fi
+}
+
+parse_version() {
+ r="${1}"
+ if echo "${r}" | grep -q '^v.*'; then
+ # shellcheck disable=SC2001
+ # XXX: Need a regex group subsitutation here.
+ r="$(echo "${r}" | sed -e 's/^v\(.*\)/\1/')"
+ fi
+
+ read -r -a p <<< "$(echo "${r}" | tr '-' ' ')"
+
+ v="${p[0]}"
+ b="${p[1]}"
+ _="${p[2]}" # ignore the SHA
+
+ if [[ ! "${b}" =~ ^[0-9]+$ ]]; then
+ b="0"
+ fi
+
+ read -r -a pp <<< "$(echo "${v}" | tr '.' ' ')"
+ printf "%03d%03d%03d%05d" "${pp[0]}" "${pp[1]}" "${pp[2]}" "${b}"
+}
+
+get_latest_version() {
+ local latest
+ if [ "${RELEASE_CHANNEL}" == "stable" ]; then
+ latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)"
+ else
+ latest="$(download "$NETDATA_NIGHTLIES_BASEURL/latest-version.txt" /dev/stdout)"
+ fi
+ parse_version "$latest"
+}
+
+set_tarball_urls() {
+ local extension="tar.gz"
+
+ if [ "$2" == "yes" ]; then
+ extension="gz.run"
+ fi
+
+ if [ "$1" = "stable" ]; then
+ local latest
+ # Simple version
+ latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)"
+ export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.${extension}"
+ export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt"
+ else
+ export NETDATA_TARBALL_URL="$NETDATA_NIGHTLIES_BASEURL/netdata-latest.${extension}"
+ export NETDATA_TARBALL_CHECKSUM_URL="$NETDATA_NIGHTLIES_BASEURL/sha256sums.txt"
+ fi
+}
+
+update() {
+ [ -z "${logfile}" ] && info "Running on a terminal - (this script also supports running headless from crontab)"
+
+ RUN_INSTALLER=0
+ ndtmpdir=$(create_tmp_directory)
+ cd "$ndtmpdir" || exit 1
+
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${ndtmpdir}/sha256sum.txt" >&3 2>&3
+
+ current_version="$(command -v netdata > /dev/null && parse_version "$(netdata -v | cut -f 2 -d ' ')")"
+ latest_version="$(get_latest_version)"
+
+ # If we can't get the current version for some reason assume `0`
+ current_version="${current_version:-0}"
+
+ # If we can't get the latest version for some reason assume `0`
+ latest_version="${latest_version:-0}"
+
+ info "Current Version: ${current_version}"
+ info "Latest Version: ${latest_version}"
+
+ if [ "${latest_version}" -gt 0 ] && [ "${current_version}" -gt 0 ] && [ "${current_version}" -ge "${latest_version}" ]; then
+ info "Newest version (current=${current_version} >= latest=${latest_version}) is already installed"
+ elif [ -n "${NETDATA_TARBALL_CHECKSUM}" ] && grep "${NETDATA_TARBALL_CHECKSUM}" sha256sum.txt >&3 2>&3; then
+ info "Newest version is already installed"
+ else
+ download "${NETDATA_TARBALL_URL}" "${ndtmpdir}/netdata-latest.tar.gz"
+ if ! grep netdata-latest.tar.gz sha256sum.txt | safe_sha256sum -c - >&3 2>&3; then
+ fatal "Tarball checksum validation failed. Stopping netdata upgrade and leaving tarball in ${ndtmpdir}\nUsually this is a result of an older copy of the tarball or checksum file being cached somewhere upstream and can be resolved by retrying in an hour."
+ fi
+ NEW_CHECKSUM="$(safe_sha256sum netdata-latest.tar.gz 2> /dev/null | cut -d' ' -f1)"
+ tar -xf netdata-latest.tar.gz >&3 2>&3
+ rm netdata-latest.tar.gz >&3 2>&3
+ cd netdata-* || exit 1
+ RUN_INSTALLER=1
+ cd "${NETDATA_LOCAL_TARBAL_OVERRIDE}" || exit 1
+ fi
+
+ # We got the sources, run the update now
+ if [ ${RUN_INSTALLER} -eq 1 ]; then
+ # signal netdata to start saving its database
+ # this is handy if your database is big
+ possible_pids=$(pidof netdata)
+ do_not_start=
+ if [ -n "${possible_pids}" ]; then
+ read -r -a pids_to_kill <<< "${possible_pids}"
+ kill -USR1 "${pids_to_kill[@]}"
+ else
+ # netdata is currently not running, so do not start it after updating
+ do_not_start="--dont-start-it"
+ fi
+
+ if [ -n "${NETDATA_SELECTED_DASHBOARD}" ]; then
+ env="NETDATA_SELECTED_DASHBOARD=${NETDATA_SELECTED_DASHBOARD}"
+ fi
+
+ if [ ! -x ./netdata-installer.sh ]; then
+ if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then
+ cd "$(find . -mindepth 1 -maxdepth 1 -type d)" || exit 1
+ fi
+ fi
+
+ info "Re-installing netdata..."
+ eval "${env} ./netdata-installer.sh ${REINSTALL_OPTIONS} --dont-wait ${do_not_start}" >&3 2>&3 || fatal "FAILED TO COMPILE/INSTALL NETDATA"
+
+ # We no longer store checksum info here. but leave this so that we clean up all environment files upon next update.
+ sed -i '/NETDATA_TARBALL/d' "${ENVIRONMENT_FILE}"
+
+ info "Updating tarball checksum info"
+ echo "${NEW_CHECKSUM}" > "${NETDATA_LIB_DIR}/netdata.tarball.checksum"
+ fi
+
+ rm -rf "${ndtmpdir}" >&3 2>&3
+ [ -n "${logfile}" ] && rm "${logfile}" && logfile=
+
+ return 0
+}
+
+logfile=
+ndtmpdir=
+
+trap cleanup EXIT
+
+while [ -n "${1}" ]; do
+ if [ "${1}" = "--not-running-from-cron" ]; then
+ NETDATA_NOT_RUNNING_FROM_CRON=1
+ shift 1
+ elif [ "${1}" = "--no-updater-self-update" ]; then
+ NETDATA_NO_UPDATER_SELF_UPDATE=1
+ shift 1
+ elif [ "${1}" = "--tmpdir-path" ]; then
+ NETDATA_TMPDIR_PATH="${2}"
+ shift 2
+ else
+ break
+ fi
+done
+
+# Random sleep to aileviate stampede effect of Agents upgrading
+# and disconnecting/reconnecting at the same time (or near to).
+# But only we're not a controlling terminal (tty)
+# Randomly sleep between 1s and 60m
+if [ ! -t 1 ] && [ -z "${NETDATA_NOT_RUNNING_FROM_CRON}" ]; then
+ sleep $(((RANDOM % 3600) + 1))
+fi
+
+# shellcheck source=/dev/null
+source "${ENVIRONMENT_FILE}" || exit 1
+
+# We dont expect to find lib dir variable on older installations, so load this path if none found
+export NETDATA_LIB_DIR="${NETDATA_LIB_DIR:-${NETDATA_PREFIX}/var/lib/netdata}"
+
+# Source the tarbal checksum, if not already available from environment (for existing installations with the old logic)
+[[ -z "${NETDATA_TARBALL_CHECKSUM}" ]] && [[ -f ${NETDATA_LIB_DIR}/netdata.tarball.checksum ]] && NETDATA_TARBALL_CHECKSUM="$(cat "${NETDATA_LIB_DIR}/netdata.tarball.checksum")"
+
+# Grab the nightlies baseurl (defaulting to our Google Storage bucket)
+export NETDATA_NIGHTLIES_BASEURL="${NETDATA_NIGHTLIES_BASEURL:-https://storage.googleapis.com/netdata-nightlies}"
+
+if [ "${INSTALL_UID}" != "$(id -u)" ]; then
+ fatal "You are running this script as user with uid $(id -u). We recommend to run this script as root (user with uid 0)"
+fi
+
+if [ -t 2 ]; then
+ # we are running on a terminal
+ # open fd 3 and send it to stderr
+ exec 3>&2
+else
+ # we are headless
+ # create a temporary file for the log
+ logfile="$(mktemp "${logfile}"/netdata-updater.log.XXXXXX)"
+ # open fd 3 and send it to logfile
+ exec 3> "${logfile}"
+fi
+
+self_update
+
+set_tarball_urls "${RELEASE_CHANNEL}" "${IS_NETDATA_STATIC_BINARY}"
+
+if [ "${IS_NETDATA_STATIC_BINARY}" == "yes" ]; then
+ ndtmpdir="$(create_tmp_directory)"
+ PREVDIR="$(pwd)"
+
+ echo >&2 "Entering ${ndtmpdir}"
+ cd "${ndtmpdir}" || exit 1
+
+ download "${NETDATA_TARBALL_CHECKSUM_URL}" "${ndtmpdir}/sha256sum.txt"
+ download "${NETDATA_TARBALL_URL}" "${ndtmpdir}/netdata-latest.gz.run"
+ if ! grep netdata-latest.gz.run "${ndtmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then
+ fatal "Static binary checksum validation failed. Stopping netdata installation and leaving binary in ${ndtmpdir}\nUsually this is a result of an older copy of the file being cached somehere and can be resolved by simply retrying in an hour."
+ fi
+
+ # Do not pass any options other than the accept, for now
+ # shellcheck disable=SC2086
+ if sh "${ndtmpdir}/netdata-latest.gz.run" --accept -- ${REINSTALL_OPTIONS}; then
+ rm -r "${ndtmpdir}"
+ else
+ echo >&2 "NOTE: did not remove: ${ndtmpdir}"
+ fi
+ echo >&2 "Switching back to ${PREVDIR}"
+ cd "${PREVDIR}" || exit 1
+else
+ # the installer updates this script - so we run and exit in a single line
+ update && exit 0
+fi
diff --git a/packaging/jsonc.checksums b/packaging/jsonc.checksums
new file mode 100644
index 0000000..6005f45
--- /dev/null
+++ b/packaging/jsonc.checksums
@@ -0,0 +1 @@
+ec4eb70e0f6c0d707b9b1ec646cf7c860f4abb3562a90ea6e4d78d177fd95303 json-c-0.14-20200419.tar.gz
diff --git a/packaging/jsonc.version b/packaging/jsonc.version
new file mode 100644
index 0000000..29b5619
--- /dev/null
+++ b/packaging/jsonc.version
@@ -0,0 +1 @@
+0.14-20200419
diff --git a/packaging/judy.checksums b/packaging/judy.checksums
new file mode 100644
index 0000000..7d4665b
--- /dev/null
+++ b/packaging/judy.checksums
@@ -0,0 +1 @@
+e623a06bf091758b43f5cd97c3609f8109442d1a0441bc44819b60f2861c61b1 v1.0.5-netdata2.tar.gz
diff --git a/packaging/judy.version b/packaging/judy.version
new file mode 100644
index 0000000..934246c
--- /dev/null
+++ b/packaging/judy.version
@@ -0,0 +1 @@
+1.0.5-netdata2
diff --git a/packaging/libbpf.checksums b/packaging/libbpf.checksums
new file mode 100644
index 0000000..d4ff87a
--- /dev/null
+++ b/packaging/libbpf.checksums
@@ -0,0 +1 @@
+fc33402ba33c8f8c5aa18afbb86a9932965886f2906c50e8f2110a1a2126e3ee v0.0.9_netdata-1.tar.gz
diff --git a/packaging/libbpf.version b/packaging/libbpf.version
new file mode 100644
index 0000000..d236290
--- /dev/null
+++ b/packaging/libbpf.version
@@ -0,0 +1 @@
+0.0.9_netdata-1
diff --git a/packaging/libwebsockets.checksums b/packaging/libwebsockets.checksums
new file mode 100644
index 0000000..fc9167c
--- /dev/null
+++ b/packaging/libwebsockets.checksums
@@ -0,0 +1 @@
+166d6e17cab64bfc10c2a71799c298284540a1fa63f6ea3de5caccb34502243c v3.2.2.tar.gz
diff --git a/packaging/libwebsockets.version b/packaging/libwebsockets.version
new file mode 100644
index 0000000..be94e6f
--- /dev/null
+++ b/packaging/libwebsockets.version
@@ -0,0 +1 @@
+3.2.2
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
new file mode 100644
index 0000000..903c37d
--- /dev/null
+++ b/packaging/maintainers/README.md
@@ -0,0 +1,86 @@
+<!--
+title: "Package Maintainers"
+date: 2020-03-31
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/maintainers/README.md
+-->
+
+# Package Maintainers
+
+This page tracks the package maintainers for Netdata, for various operating systems and versions.
+
+> Feel free to update it, so that it reflects the current status.
+
+---
+
+## Official Linux Distributions
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) |
+| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) |
+| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) |
+| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) |
+| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) |
+| Ubuntu | | | |
+| Red Hat / Fedora / CentOS | | | |
+| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) |
+
+---
+
+## FreeBSD
+
+| System | Initial PR | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| FreeBSD | #1321 | @vlvkobal|@mmokhi
+
+---
+
+## macOS
+
+| System | URL | Core Developer | Package Maintainer
+|:-:|:-:|:-:|:-:|
+| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
+
+---
+
+## Unofficial Linux Packages
+
+| Linux Distribution | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 |
+---
+
+## Embedded Linux
+
+| Embedded Linux | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 |
+| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) |
+| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata |
+| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 |
+| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi |
+
+---
+
+## Linux Containers
+
+| Containers | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Docker | Git | @titpetric | https://github.com/titpetric/netdata |
+
+---
+
+## Automation Systems
+
+| Automation Systems | Netdata Version | Maintainer | Related URL |
+| :-: | :-: | :-: | :-- |
+| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ |
+| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook |
+
+---
+
+## Packages summary from repology.org
+
+[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fmaintainers%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md
new file mode 100644
index 0000000..c898a94
--- /dev/null
+++ b/packaging/makeself/README.md
@@ -0,0 +1,54 @@
+<!--
+title: "Netdata static binary build"
+description: "Users can build the static 64-bit binary package that we ship with every release of the open-source Netdata Agent for debugging or specialize purposes."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/makeself/README.md
+-->
+
+# Netdata static binary build
+
+To build the static binary 64-bit distribution package, run:
+
+```bash
+cd /path/to/netdata.git
+./packaging/makeself/build-x86_64-static.sh
+```
+
+The program will:
+
+1. setup a new docker container with Alpine Linux
+2. install the required alpine packages (the build environment, needed libraries, etc)
+3. download and compile third party apps that are packaged with Netdata (`bash`, `curl`, etc)
+4. compile Netdata
+
+Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the Netdata binary package that can be run to install Netdata on any other computer.
+
+---
+
+## building binaries with debug info
+
+To build Netdata binaries with debugging / tracing information in them, use:
+
+```bash
+cd /path/to/netdata.git
+./packaging/makeself/build-x86_64-static.sh debug
+```
+
+These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information).
+
+### debugging Netdata binaries
+
+Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start Netdata:
+
+```bash
+PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D
+```
+
+The above command, will run Netdata under `valgrind`. While Netdata runs under `valgrind` it will be 10x slower and use a lot more memory.
+
+If Netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know.
+
+To stop Netdata while it runs under `valgrind`, press Control-C on the console.
+
+> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static Netdata binary, all the required libraries are built into Netdata. So, valgrind cannot apply its heuristics and prints them.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fmakeself%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh
new file mode 100755
index 0000000..83fa0db
--- /dev/null
+++ b/packaging/makeself/build-x86_64-static.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/installer/functions.sh
+. "$(dirname "$0")"/../installer/functions.sh || exit 1
+
+set -e
+
+DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine312"
+
+if ! docker inspect "${DOCKER_CONTAINER_NAME}" > /dev/null 2>&1; then
+ # To run interactively:
+ # docker run -it netdata-package-x86_64-static /bin/sh
+ # (add -v host-dir:guest-dir:rw arguments to mount volumes)
+ #
+ # To remove images in order to re-create:
+ # docker rm -v $(sudo docker ps -a -q -f status=exited)
+ # docker rmi netdata-package-x86_64-static
+ #
+ # This command maps the current directory to
+ # /usr/src/netdata.git
+ # inside the container and runs the script install-alpine-packages.sh
+ # (also inside the container)
+ #
+ run docker run -v "$(pwd)":/usr/src/netdata.git:rw alpine:3.12 \
+ /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh
+
+ # save the changes made permanently
+ id=$(docker ps -l -q)
+ run docker commit "${id}" "${DOCKER_CONTAINER_NAME}"
+fi
+
+# Run the build script inside the container
+if [ -t 1 ]; then
+ run docker run -a stdin -a stdout -a stderr -i -t -v \
+ "$(pwd)":/usr/src/netdata.git:rw \
+ "${DOCKER_CONTAINER_NAME}" \
+ /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}"
+else
+ run docker run -v "$(pwd)":/usr/src/netdata.git:rw \
+ "${DOCKER_CONTAINER_NAME}" \
+ /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}"
+fi
+
+if [ "${USER}" ]; then
+ sudo chown -R "${USER}" .
+fi
diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh
new file mode 100755
index 0000000..85cb8fc
--- /dev/null
+++ b/packaging/makeself/build.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+# parse command line arguments
+
+set -e
+
+export NETDATA_BUILD_WITH_DEBUG=0
+
+while [ -n "${1}" ]; do
+ case "${1}" in
+ debug)
+ export NETDATA_BUILD_WITH_DEBUG=1
+ ;;
+
+ *) ;;
+
+ esac
+
+ shift
+done
+
+# -----------------------------------------------------------------------------
+
+# First run install-alpine-packages.sh under alpine linux to install
+# the required packages. build-x86_64-static.sh will do this for you
+# using docker.
+
+cd "$(dirname "$0")" || exit 1
+
+# if we don't run inside the netdata repo
+# download it and run from it
+if [ ! -f ../../netdata-installer.sh ]; then
+ git clone https://github.com/netdata/netdata.git netdata.git || exit 1
+ cd netdata.git/makeself || exit 1
+ ./build.sh "$@"
+ exit $?
+fi
+
+cat >&2 << EOF
+This program will create a self-extracting shell package containing
+a statically linked netdata, able to run on any 64bit Linux system,
+without any dependencies from the target system.
+
+It can be used to have netdata running in no-time, or in cases the
+target Linux system cannot compile netdata.
+EOF
+
+if [ ! -d tmp ]; then
+ mkdir tmp || exit 1
+fi
+
+if ! ./run-all-jobs.sh "$@"; then
+ printf >&2 "Build failed."
+ exit 1
+fi
diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh
new file mode 100755
index 0000000..15818d3
--- /dev/null
+++ b/packaging/makeself/functions.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------------------------
+
+# allow running the jobs by hand
+[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0
+[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+[ -z "${NETDATA_MAKESELF_PATH}" ] && NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.."
+[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+export NETDATA_MAKESELF_PATH NETDATA_MAKESELF_PATH
+export NULL=
+
+# make sure the path does not end with /
+if [ "${NETDATA_INSTALL_PATH:$((${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ]; then
+ export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$((${#NETDATA_INSTALL_PATH} - 1))}"
+fi
+
+# find the parent directory
+NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")"
+export NETDATA_INSTALL_PARENT
+
+# -----------------------------------------------------------------------------
+
+# bash strict mode
+set -euo pipefail
+
+# -----------------------------------------------------------------------------
+
+fetch() {
+ local dir="${1}" url="${2}"
+ local tar="${dir}.tar.gz"
+
+ if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]; then
+ run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}"
+ fi
+
+ if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]; then
+ cd "${NETDATA_MAKESELF_PATH}/tmp"
+ run tar -zxpf "${tar}"
+ cd -
+ fi
+
+ run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
+}
+
+# -----------------------------------------------------------------------------
+
+# load the functions of the netdata-installer.sh
+# shellcheck source=packaging/installer/functions.sh
+. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh"
+
+# -----------------------------------------------------------------------------
+
+# debug
+echo "ME=${0}"
+echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}"
+echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}"
+echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}"
+echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}"
+echo "PROCESSORS=$(nproc)"
diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh
new file mode 100755
index 0000000..b4e516b
--- /dev/null
+++ b/packaging/makeself/install-alpine-packages.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env sh
+#
+# Installation script for the alpine host
+# to prepare the static binary
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
+
+# Add required APK packages
+apk add --no-cache -U \
+ bash \
+ wget \
+ curl \
+ ncurses \
+ git \
+ netcat-openbsd \
+ alpine-sdk \
+ autoconf \
+ automake \
+ gcc \
+ make \
+ cmake \
+ libtool \
+ pkgconfig \
+ util-linux-dev \
+ gnutls-dev \
+ zlib-dev \
+ zlib-static \
+ libmnl-dev \
+ libnetfilter_acct-dev \
+ libuv-dev \
+ libuv-static \
+ lz4-dev \
+ lz4-static \
+ snappy-dev \
+ protobuf-dev \
+ binutils \
+ gzip \
+ xz || exit 1
+
+# snappy doesnt have static version in alpine, let's compile it
+export SNAPPY_VER="1.1.7"
+wget -O /snappy.tar.gz https://github.com/google/snappy/archive/${SNAPPY_VER}.tar.gz
+tar -C / -xf /snappy.tar.gz
+rm /snappy.tar.gz
+cd /snappy-${SNAPPY_VER} || exit 1
+mkdir build
+cd build || exit 1
+cmake -DCMAKE_BUILD_SHARED_LIBS=true -DCMAKE_INSTALL_PREFIX:PATH=/usr -DCMAKE_INSTALL_LIBDIR=lib ../
+make && make install
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
new file mode 100755
index 0000000..bbf631b
--- /dev/null
+++ b/packaging/makeself/install-or-update.sh
@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/makeself/functions.sh
+. "$(dirname "${0}")"/functions.sh
+
+export LC_ALL=C
+umask 002
+
+# Be nice on production environments
+renice 19 $$ > /dev/null 2> /dev/null
+
+NETDATA_PREFIX="/opt/netdata"
+NETDATA_USER_CONFIG_DIR="${NETDATA_PREFIX}/etc/netdata"
+
+# -----------------------------------------------------------------------------
+if [ -d /opt/netdata/etc/netdata.old ]; then
+ progress "Found old etc/netdata directory, reinstating this"
+ [ -d /opt/netdata/etc/netdata.new ] && rm -rf /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata /opt/netdata/etc/netdata.new
+ mv -f /opt/netdata/etc/netdata.old /opt/netdata/etc/netdata
+
+ progress "Trigger stock config clean up"
+ rm -f /opt/netdata/etc/netdata/.installer-cleanup-of-stock-configs-done
+fi
+
+STARTIT=1
+AUTOUPDATE=0
+REINSTALL_OPTIONS=""
+RELEASE_CHANNEL="nightly" # check .travis/create_artifacts.sh before modifying
+
+while [ "${1}" ]; do
+ case "${1}" in
+ "--dont-start-it")
+ STARTIT=0
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--auto-update" | "-u")
+ AUTOUPDATE=1
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--stable-channel")
+ RELEASE_CHANNEL="stable"
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--nightly-channel")
+ RELEASE_CHANNEL="nightly"
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+ "--disable-telemetry")
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}"
+ ;;
+
+ *) echo >&2 "Unknown option '${1}'. Ignoring it." ;;
+ esac
+ shift 1
+done
+
+if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then
+ REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --disable-telemtry"
+fi
+
+deleted_stock_configs=0
+if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ]; then
+
+ # -----------------------------------------------------------------------------
+ progress "Deleting stock configuration files from user configuration directory"
+
+ declare -A configs_signatures=()
+ source "system/configs.signatures"
+
+ if [ ! -d etc/netdata ]; then
+ run mkdir -p etc/netdata
+ fi
+
+ md5sum="$(command -v md5sum 2> /dev/null || command -v md5 2> /dev/null)"
+ while IFS= read -r -d '' x; do
+ # find it relative filename
+ f="${x/etc\/netdata\//}"
+
+ # find the stock filename
+ t="${f/.conf.old/.conf}"
+ t="${t/.conf.orig/.conf}"
+
+ if [ -n "${md5sum}" ]; then
+ # find the checksum of the existing file
+ md5="$(${md5sum} < "${x}" | cut -d ' ' -f 1)"
+ #echo >&2 "md5: ${md5}"
+
+ # check if it matches
+ if [ "${configs_signatures[${md5}]}" = "${t}" ]; then
+ # it matches the default
+ run rm -f "${x}"
+ deleted_stock_configs=$((deleted_stock_configs + 1))
+ fi
+ fi
+ done < <(find etc -type f)
+
+ touch "etc/netdata/.installer-cleanup-of-stock-configs-done"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Attempt to create user/group netdata/netadata"
+
+NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody I2C"
+NETDATA_ADDED_TO_GROUPS=""
+# Default user/group
+NETDATA_USER="root"
+NETDATA_GROUP="root"
+
+if portable_add_group netdata; then
+ if portable_add_user netdata "/opt/netdata"; then
+ progress "Add user netdata to required user groups"
+ for g in ${NETDATA_WANTED_GROUPS}; do
+ # shellcheck disable=SC2086
+ if portable_add_user_to_group ${g} netdata; then
+ NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}"
+ else
+ run_failed "Failed to add netdata user to secondary groups"
+ fi
+ done
+ NETDATA_USER="netdata"
+ NETDATA_GROUP="netdata"
+ else
+ run_failed "I could not add user netdata, will be using root"
+ fi
+else
+ run_failed "I could not add group netdata, so no user netdata will be created as well. Netdata run as root:root"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install logrotate configuration for netdata"
+
+install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
+
+# -----------------------------------------------------------------------------
+progress "Telemetry configuration"
+
+# Opt-out from telemetry program
+if [ -n "${NETDATA_DISABLE_TELEMETRY+x}" ]; then
+ run touch "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+else
+ printf "You can opt out from anonymous statistics via the --disable-telemetry option, or by creating an empty file %s \n\n" "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics"
+fi
+
+# -----------------------------------------------------------------------------
+progress "Install netdata at system init"
+
+install_netdata_service || run_failed "Cannot install netdata init service."
+
+set_netdata_updater_channel || run_failed "Cannot set netdata updater tool release channel to '${RELEASE_CHANNEL}'"
+
+# -----------------------------------------------------------------------------
+progress "Install (but not enable) netdata updater tool"
+cleanup_old_netdata_updater || run_failed "Cannot cleanup old netdata updater tool."
+install_netdata_updater || run_failed "Cannot install netdata updater tool."
+
+progress "Check if we must enable/disable the netdata updater tool"
+if [ "${AUTOUPDATE}" = "1" ]; then
+ enable_netdata_updater || run_failed "Cannot enable netdata updater tool"
+else
+ disable_netdata_updater || run_failed "Cannot disable netdata updater tool"
+fi
+
+# -----------------------------------------------------------------------------
+progress "creating quick links"
+
+dir_should_be_link() {
+ local p="${1}" t="${2}" d="${3}" old
+
+ old="${PWD}"
+ cd "${p}" || return 0
+
+ if [ -e "${d}" ]; then
+ if [ -h "${d}" ]; then
+ run rm "${d}"
+ else
+ run mv -f "${d}" "${d}.old.$$"
+ fi
+ fi
+
+ run ln -s "${t}" "${d}"
+ cd "${old}"
+}
+
+dir_should_be_link . bin sbin
+dir_should_be_link usr ../bin bin
+dir_should_be_link usr ../bin sbin
+dir_should_be_link usr . local
+
+dir_should_be_link . etc/netdata netdata-configs
+dir_should_be_link . usr/share/netdata/web netdata-web-files
+dir_should_be_link . usr/libexec/netdata netdata-plugins
+dir_should_be_link . var/lib/netdata netdata-dbs
+dir_should_be_link . var/cache/netdata netdata-metrics
+dir_should_be_link . var/log/netdata netdata-logs
+
+dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig
+
+if [ ${deleted_stock_configs} -gt 0 ]; then
+ dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
+fi
+
+# -----------------------------------------------------------------------------
+progress "fix permissions"
+
+run chmod g+rx,o+rx /opt
+run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata
+
+# -----------------------------------------------------------------------------
+
+progress "fix plugin permissions"
+
+for x in apps.plugin freeipmi.plugin ioping cgroup-network; do
+ f="usr/libexec/netdata/plugins.d/${x}"
+
+ if [ -f "${f}" ]; then
+ run chown root:${NETDATA_GROUP} "${f}"
+ run chmod 4750 "${f}"
+ fi
+done
+
+# fix the fping binary
+if [ -f bin/fping ]; then
+ run chown root:${NETDATA_GROUP} bin/fping
+ run chmod 4750 bin/fping
+fi
+
+# -----------------------------------------------------------------------------
+
+echo "Configure TLS certificate paths"
+if [ ! -L /opt/netdata/etc/ssl ] && [ -d /opt/netdata/etc/ssl ] ; then
+ echo "Preserving existing user configuration for TLS"
+else
+ if [ -d /etc/pki/tls ] ; then
+ echo "Using /etc/pki/tls for TLS configuration and certificates"
+ ln -sf /etc/pki/tls /opt/netdata/etc/ssl
+ elif [ -d /etc/ssl ] ; then
+ echo "Using /etc/ssl for TLS configuration and certificates"
+ ln -sf /etc/ssl /opt/netdata/etc/ssl
+ else
+ echo "Using bundled TLS configuration and certificates"
+ ln -sf /opt/netdata/share/ssl /opt/netdata/etc/ssl
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+echo "Save install options"
+grep -qv 'IS_NETDATA_STATIC_BINARY="yes"' "${NETDATA_PREFIX}/etc/netdata/.environment" || echo IS_NETDATA_STATIC_BINARY=\"yes\" >> "${NETDATA_PREFIX}/etc/netdata/.environment"
+sed -i "s/REINSTALL_OPTIONS=\".*\"/REINSTALL_OPTIONS=\"${REINSTALL_OPTIONS}\"/" "${NETDATA_PREFIX}/etc/netdata/.environment"
+
+# -----------------------------------------------------------------------------
+if [ ${STARTIT} -eq 0 ]; then
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+ netdata_banner "is installed now!"
+else
+ progress "starting netdata"
+
+ if ! restart_netdata "${NETDATA_PREFIX}/bin/netdata"; then
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
+ netdata_banner "is installed and running now!"
+ else
+ create_netdata_conf "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf"
+ netdata_banner "is installed now!"
+ fi
+fi
+run chmod 0644 "${NETDATA_PREFIX}/etc/netdata/netdata.conf"
diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh
new file mode 100755
index 0000000..8cce2d4
--- /dev/null
+++ b/packaging/makeself/jobs/10-prepare-destination.install.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old"
+[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old"
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin"
+run mkdir -p "${NETDATA_INSTALL_PATH}/usr"
+run cd "${NETDATA_INSTALL_PATH}"
+run ln -s bin sbin
+run cd "${NETDATA_INSTALL_PATH}/usr"
+run ln -s ../bin bin
+run ln -s ../sbin sbin
+run ln -s . local
diff --git a/packaging/makeself/jobs/20-openssl.install.sh b/packaging/makeself/jobs/20-openssl.install.sh
new file mode 100755
index 0000000..10863f9
--- /dev/null
+++ b/packaging/makeself/jobs/20-openssl.install.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+version="$(cat "$(dirname "${0}")/../openssl.version")"
+
+export LDFLAGS='-static'
+export PKG_CONFIG="pkg-config --static"
+
+# Might be bind-mounted
+if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/openssl" ]; then
+ run git clone --branch "${version}" --single-branch git://git.openssl.org/openssl.git "${NETDATA_MAKESELF_PATH}/tmp/openssl"
+fi
+cd "${NETDATA_MAKESELF_PATH}/tmp/openssl" || exit 1
+
+run ./config no-shared no-tests --prefix=/openssl-static --openssldir=/opt/netdata/etc/ssl
+run make -j "$(nproc)"
+run make -j "$(nproc)" install_sw
diff --git a/packaging/makeself/jobs/50-bash-5.0.install.sh b/packaging/makeself/jobs/50-bash-5.0.install.sh
new file mode 100755
index 0000000..a204c15
--- /dev/null
+++ b/packaging/makeself/jobs/50-bash-5.0.install.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+fetch "bash-5.0" "http://ftp.gnu.org/gnu/bash/bash-5.0.tar.gz"
+
+export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig"
+
+run ./configure \
+ --prefix="${NETDATA_INSTALL_PATH}" \
+ --without-bash-malloc \
+ --enable-static-link \
+ --enable-net-redirections \
+ --enable-array-variables \
+ --disable-profiling \
+ --disable-nls
+
+run make clean
+run make -j "$(nproc)"
+
+cat > examples/loadables/Makefile << EOF
+all:
+clean:
+install:
+EOF
+
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/bash
+fi
diff --git a/packaging/makeself/jobs/50-curl-7.73.0.install.sh b/packaging/makeself/jobs/50-curl-7.73.0.install.sh
new file mode 100755
index 0000000..4a9505e
--- /dev/null
+++ b/packaging/makeself/jobs/50-curl-7.73.0.install.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+fetch "curl-7.73.0" "https://curl.haxx.se/download/curl-7.73.0.tar.gz"
+
+export CFLAGS="-I/openssl-static/include"
+export LDFLAGS="-static -L/openssl-static/lib"
+export PKG_CONFIG="pkg-config --static"
+export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig"
+
+run autoreconf -fi
+
+run ./configure \
+ --prefix="${NETDATA_INSTALL_PATH}" \
+ --enable-optimize \
+ --disable-shared \
+ --enable-static \
+ --enable-http \
+ --enable-proxy \
+ --enable-ipv6 \
+ --enable-cookies \
+ --with-ca-fallback
+
+# Curl autoconf does not honour the curl_LDFLAGS environment variable
+run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile
+
+run make clean
+run make -j "$(nproc)"
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/curl
+fi
diff --git a/packaging/makeself/jobs/50-fping-5.0.install.sh b/packaging/makeself/jobs/50-fping-5.0.install.sh
new file mode 100755
index 0000000..e62c47e
--- /dev/null
+++ b/packaging/makeself/jobs/50-fping-5.0.install.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+fetch "fping-5.0" "https://fping.org/dist/fping-5.0.tar.gz"
+
+export CFLAGS="-static -I/openssl-static/include"
+export LDFLAGS="-static -L/openssl-static/lib"
+export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig"
+
+run ./configure \
+ --prefix="${NETDATA_INSTALL_PATH}" \
+ --enable-ipv4 \
+ --enable-ipv6
+
+cat > doc/Makefile << EOF
+all:
+clean:
+install:
+EOF
+
+run make clean
+run make -j "$(nproc)"
+run make install
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/fping
+fi
diff --git a/packaging/makeself/jobs/50-ioping-1.2.install.sh b/packaging/makeself/jobs/50-ioping-1.2.install.sh
new file mode 100755
index 0000000..22f2691
--- /dev/null
+++ b/packaging/makeself/jobs/50-ioping-1.2.install.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+fetch "ioping-1.2" "https://github.com/koct9i/ioping/archive/v1.2.tar.gz"
+
+export CFLAGS="-static"
+
+run make clean
+run make -j "$(nproc)"
+run mkdir -p "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/
+run install -o root -g root -m 4750 ioping "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/ioping
+fi
diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh
new file mode 100755
index 0000000..21d4fd0
--- /dev/null
+++ b/packaging/makeself/jobs/70-netdata-git.install.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=./packaging/makeself/functions.sh
+. "${NETDATA_MAKESELF_PATH}"/functions.sh "${@}" || exit 1
+
+cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ export CFLAGS="-static -O3 -I/openssl-static/include"
+else
+ export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1 -I/openssl-static/include"
+fi
+
+export LDFLAGS="-static -L/openssl-static/lib"
+
+# We export this to 'yes', installer sets this to .environment.
+# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one
+export IS_NETDATA_STATIC_BINARY="yes"
+
+# Set eBPF LIBC to "static" to bundle the `-static` variant of the kernel-collector
+export EBPF_LIBC="static"
+export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig"
+
+# Set correct CMake flags for building against non-System OpenSSL
+# See: https://github.com/warmcat/libwebsockets/blob/master/READMEs/README.build.md
+export CMAKE_FLAGS="-DOPENSSL_ROOT_DIR=/openssl-static -DOPENSSL_LIBRARIES=/openssl-static/lib -DCMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE=/openssl-static -DLWS_OPENSSL_INCLUDE_DIRS=/openssl-static/include -DLWS_OPENSSL_LIBRARIES=/openssl-static/lib/libssl.a;/openssl-static/lib/libcrypto.a"
+
+run ./netdata-installer.sh \
+ --install "${NETDATA_INSTALL_PARENT}" \
+ --dont-wait \
+ --dont-start-it \
+ --require-cloud \
+ --dont-scrub-cflags-even-though-it-may-break-things
+
+# Remove the netdata.conf file from the tree. It has hard-coded sensible defaults builtin.
+run rm -f "${NETDATA_INSTALL_PATH}/etc/netdata/netdata.conf"
+
+# Ensure the netdata binary is in fact statically linked
+if run readelf -l "${NETDATA_INSTALL_PATH}"/bin/netdata | grep 'INTERP'; then
+ printf >&2 "Ooops. %s is not a statically linked binary!\n" "${NETDATA_INSTALL_PATH}"/bin/netdata
+ ldd "${NETDATA_INSTALL_PATH}"/bin/netdata
+ exit 1
+fi
+
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then
+ run strip "${NETDATA_INSTALL_PATH}"/bin/netdata
+ run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/apps.plugin
+ run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/cgroup-network
+fi
diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh
new file mode 100755
index 0000000..de64102
--- /dev/null
+++ b/packaging/makeself/jobs/99-makeself.install.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck source=packaging/makeself/functions.sh
+. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1
+
+run cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+# -----------------------------------------------------------------------------
+# find the netdata version
+
+VERSION="$(git describe 2> /dev/null)"
+if [ -z "${VERSION}" ]; then
+ VERSION=$(cat packaging/version)
+fi
+
+if [ "${VERSION}" == "" ]; then
+ echo >&2 "Cannot find version number. Create makeself executable from source code with git tree structure."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# copy the files needed by makeself installation
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/system"
+
+run cp \
+ packaging/makeself/post-installer.sh \
+ packaging/makeself/install-or-update.sh \
+ packaging/installer/functions.sh \
+ configs.signatures \
+ system/netdata-init-d \
+ system/netdata-lsb \
+ system/netdata-openrc \
+ system/netdata.logrotate \
+ system/netdata.service \
+ "${NETDATA_INSTALL_PATH}/system/"
+
+# -----------------------------------------------------------------------------
+# create a wrapper to start our netdata with a modified path
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv"
+
+run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \
+ "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1
+
+cat > "${NETDATA_INSTALL_PATH}/bin/netdata" << EOF
+#!${NETDATA_INSTALL_PATH}/bin/bash
+export NETDATA_BASH_LOADABLES="DISABLE"
+export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}"
+exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}"
+EOF
+run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata"
+
+# -----------------------------------------------------------------------------
+# copy the SSL/TLS configuration and certificates from the build system
+
+run cp -a /etc/ssl "${NETDATA_INSTALL_PATH}/share/ssl"
+
+# -----------------------------------------------------------------------------
+# remove the links to allow untaring the archive
+
+run rm "${NETDATA_INSTALL_PATH}/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/bin" \
+ "${NETDATA_INSTALL_PATH}/usr/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/local"
+
+# -----------------------------------------------------------------------------
+# create the makeself archive
+
+run sed "s|NETDATA_VERSION|${VERSION}|g" < "${NETDATA_MAKESELF_PATH}/makeself.lsm" > "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+run "${NETDATA_MAKESELF_PATH}/makeself.sh" \
+ --gzip \
+ --complevel 9 \
+ --notemp \
+ --needroot \
+ --target "${NETDATA_INSTALL_PATH}" \
+ --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \
+ --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \
+ --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \
+ --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \
+ "${NETDATA_INSTALL_PATH}" \
+ "${NETDATA_INSTALL_PATH}.gz.run" \
+ "netdata, the real-time performance and health monitoring system" \
+ ./system/post-installer.sh
+
+run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
+
+# -----------------------------------------------------------------------------
+# copy it to the netdata build dir
+
+FILE="netdata-${VERSION}.gz.run"
+
+run mkdir -p artifacts
+run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}"
+
+[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run
+run ln -s "artifacts/${FILE}" netdata-latest.gz.run
+
+echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'"
diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh
new file mode 100755
index 0000000..0af3219
--- /dev/null
+++ b/packaging/makeself/makeself-header.sh
@@ -0,0 +1,556 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+# shellcheck shell=sh
+# shellcheck disable=SC2154,SC2039
+cat << EOF > "$archname"
+#!/bin/sh
+# This script was generated using Makeself $MS_VERSION
+
+ORIG_UMASK=\`umask\`
+if test "$KEEP_UMASK" = n; then
+ umask 077
+fi
+
+CRCsum="$CRCsum"
+MD5="$MD5sum"
+TMPROOT=\${TMPDIR:=/tmp}
+USER_PWD="\$PWD"; export USER_PWD
+
+label="$LABEL"
+script="$SCRIPT"
+scriptargs="$SCRIPTARGS"
+licensetxt="$LICENSE"
+helpheader='$HELPHEADER'
+targetdir="$archdirname"
+filesizes="$filesizes"
+keep="$KEEP"
+nooverwrite="$NOOVERWRITE"
+quiet="n"
+accept="n"
+nodiskspace="n"
+export_conf="$EXPORT_CONF"
+
+print_cmd_arg=""
+if type printf > /dev/null; then
+ print_cmd="printf"
+elif test -x /usr/ucb/echo; then
+ print_cmd="/usr/ucb/echo"
+else
+ print_cmd="echo"
+fi
+
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:\$PATH
+ export PATH
+fi
+
+unset CDPATH
+
+MS_Printf()
+{
+ \$print_cmd \$print_cmd_arg "\$1"
+}
+
+MS_PrintLicense()
+{
+ if test x"\$licensetxt" != x; then
+ echo "\$licensetxt"
+ if test x"\$accept" != xy; then
+ while true
+ do
+ MS_Printf "Please type y to accept, n otherwise: "
+ read yn
+ if test x"\$yn" = xn; then
+ keep=n
+ eval \$finish; exit 1
+ break;
+ elif test x"\$yn" = xy; then
+ break;
+ fi
+ done
+ fi
+ fi
+}
+
+MS_diskspace()
+{
+ (
+ df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }'
+ )
+}
+
+MS_dd()
+{
+ blocks=\`expr \$3 / 1024\`
+ bytes=\`expr \$3 % 1024\`
+ dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\
+ { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\
+ test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null
+}
+
+MS_dd_Progress()
+{
+ if test x"\$noprogress" = xy; then
+ MS_dd \$@
+ return \$?
+ fi
+ file="\$1"
+ offset=\$2
+ length=\$3
+ pos=0
+ bsize=4194304
+ while test \$bsize -gt \$length; do
+ bsize=\`expr \$bsize / 4\`
+ done
+ blocks=\`expr \$length / \$bsize\`
+ bytes=\`expr \$length % \$bsize\`
+ (
+ dd ibs=\$offset skip=1 2>/dev/null
+ pos=\`expr \$pos \+ \$bsize\`
+ MS_Printf " 0%% " 1>&2
+ if test \$blocks -gt 0; then
+ while test \$pos -le \$length; do
+ dd bs=\$bsize count=1 2>/dev/null
+ pcent=\`expr \$length / 100\`
+ pcent=\`expr \$pos / \$pcent\`
+ if test \$pcent -lt 100; then
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ if test \$pcent -lt 10; then
+ MS_Printf " \$pcent%% " 1>&2
+ else
+ MS_Printf " \$pcent%% " 1>&2
+ fi
+ fi
+ pos=\`expr \$pos \+ \$bsize\`
+ done
+ fi
+ if test \$bytes -gt 0; then
+ dd bs=\$bytes count=1 2>/dev/null
+ fi
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ MS_Printf " 100%% " 1>&2
+ ) < "\$file"
+}
+
+MS_Help()
+{
+ cat << EOH >&2
+\${helpheader}Makeself version $MS_VERSION
+ 1) Getting help or info about \$0 :
+ \$0 --help Print this message
+ \$0 --info Print embedded info : title, default target directory, embedded script ...
+ \$0 --lsm Print embedded lsm entry (or no LSM)
+ \$0 --list Print the list of files in the archive
+ \$0 --check Checks integrity of the archive
+
+ 2) Running \$0 :
+ \$0 [options] [--] [additional arguments to embedded script]
+ with following options (in that order)
+ --confirm Ask before running embedded script
+ --quiet Do not print anything except error messages
+ --accept Accept the license
+ --noexec Do not run embedded script
+ --keep Do not erase target directory after running
+ the embedded script
+ --noprogress Do not show the progress during the decompression
+ --nox11 Do not spawn an xterm
+ --nochown Do not give the extracted files to the current user
+ --nodiskspace Do not check for available disk space
+ --target dir Extract directly to a target directory
+ directory path can be either absolute or relative
+ --tar arg1 [arg2 ...] Access the contents of the archive through the tar command
+ -- Following arguments will be passed to the embedded script
+EOH
+}
+
+MS_Check()
+{
+ OLD_PATH="\$PATH"
+ PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\`
+ PATH="\$OLD_PATH"
+
+ if test x"\$quiet" = xn; then
+ MS_Printf "Verifying archive integrity..."
+ fi
+ offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\`
+ verb=\$2
+ i=1
+ for s in \$filesizes
+ do
+ crc=\`echo \$CRCsum | cut -d" " -f\$i\`
+ if test -x "\$MD5_PATH"; then
+ if test x"\`basename \$MD5_PATH\`" = xdigest; then
+ MD5_ARG="-a md5"
+ fi
+ md5=\`echo \$MD5 | cut -d" " -f\$i\`
+ if test x"\$md5" = x00000000000000000000000000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2
+ else
+ md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`;
+ if test x"\$md5sum" != x"\$md5"; then
+ echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2
+ exit 2
+ else
+ test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2
+ fi
+ crc="0000000000"; verb=n
+ fi
+ fi
+ if test x"\$crc" = x0000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2
+ else
+ sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\`
+ if test x"\$sum1" = x"\$crc"; then
+ test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2
+ else
+ echo "Error in checksums: \$sum1 is different from \$crc" >&2
+ exit 2;
+ fi
+ fi
+ i=\`expr \$i + 1\`
+ offset=\`expr \$offset + \$s\`
+ done
+ if test x"\$quiet" = xn; then
+ echo " All good."
+ fi
+}
+
+UnTAR()
+{
+ if test x"\$quiet" = xn; then
+ tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; }
+ else
+ tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; }
+ fi
+}
+
+finish=true
+xterm_loop=
+noprogress=$NOPROGRESS
+nox11=$NOX11
+copy=$COPY
+ownership=y
+verbose=n
+
+initargs="\$@"
+
+while true
+do
+ case "\$1" in
+ -h | --help)
+ MS_Help
+ exit 0
+ ;;
+ -q | --quiet)
+ quiet=y
+ noprogress=y
+ shift
+ ;;
+ --accept)
+ accept=y
+ shift
+ ;;
+ --info)
+ echo Identification: "\$label"
+ echo Target directory: "\$targetdir"
+ echo Uncompressed size: $USIZE KB
+ echo Compression: $COMPRESS
+ echo Date of packaging: $DATE
+ echo Built with Makeself version $MS_VERSION on $OSTYPE
+ echo Build command was: "$MS_COMMAND"
+ if test x"\$script" != x; then
+ echo Script run after extraction:
+ echo " " \$script \$scriptargs
+ fi
+ if test x"$copy" = xcopy; then
+ echo "Archive will copy itself to a temporary location"
+ fi
+ if test x"$NEED_ROOT" = xy; then
+ echo "Root permissions required for extraction"
+ fi
+ if test x"$KEEP" = xy; then
+ echo "directory \$targetdir is permanent"
+ else
+ echo "\$targetdir will be removed after extraction"
+ fi
+ exit 0
+ ;;
+ --dumpconf)
+ echo LABEL=\"\$label\"
+ echo SCRIPT=\"\$script\"
+ echo SCRIPTARGS=\"\$scriptargs\"
+ echo archdirname=\"$archdirname\"
+ echo KEEP=$KEEP
+ echo NOOVERWRITE=$NOOVERWRITE
+ echo COMPRESS=$COMPRESS
+ echo filesizes=\"\$filesizes\"
+ echo CRCsum=\"\$CRCsum\"
+ echo MD5sum=\"\$MD5\"
+ echo OLDUSIZE=$USIZE
+ echo OLDSKIP=$((SKIP + 1))
+ exit 0
+ ;;
+ --lsm)
+cat << EOLSM
+EOF
+eval "$LSM_CMD"
+cat << EOF >> "$archname"
+EOLSM
+ exit 0
+ ;;
+ --list)
+ echo Target directory: \$targetdir
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --tar)
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ arg1="\$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@"
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --check)
+ MS_Check "\$0" y
+ exit 0
+ ;;
+ --confirm)
+ verbose=y
+ shift
+ ;;
+ --noexec)
+ script=""
+ shift
+ ;;
+ --keep)
+ keep=y
+ shift
+ ;;
+ --target)
+ keep=y
+ targetdir=\${2:-.}
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --noprogress)
+ noprogress=y
+ shift
+ ;;
+ --nox11)
+ nox11=y
+ shift
+ ;;
+ --nochown)
+ ownership=n
+ shift
+ ;;
+ --nodiskspace)
+ nodiskspace=y
+ shift
+ ;;
+ --xwin)
+ if test "$NOWAIT" = n; then
+ finish="echo Press Return to close this window...; read junk"
+ fi
+ xterm_loop=1
+ shift
+ ;;
+ --phase2)
+ copy=phase2
+ shift
+ ;;
+ --)
+ shift
+ break ;;
+ -*)
+ echo Unrecognized flag : "\$1" >&2
+ MS_Help
+ exit 1
+ ;;
+ *)
+ break ;;
+ esac
+done
+
+if test x"\$quiet" = xy -a x"\$verbose" = xy; then
+ echo Cannot be verbose and quiet at the same time. >&2
+ exit 1
+fi
+
+if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then
+ echo "Administrative privileges required for this archive (use su or sudo)" >&2
+ exit 1
+fi
+
+if test x"\$copy" \!= xphase2; then
+ MS_PrintLicense
+fi
+
+case "\$copy" in
+copy)
+ tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$
+ mkdir "\$tmpdir" || {
+ echo "Could not create temporary directory \$tmpdir" >&2
+ exit 1
+ }
+ SCRIPT_COPY="\$tmpdir/makeself"
+ echo "Copying to a temporary location..." >&2
+ cp "\$0" "\$SCRIPT_COPY"
+ chmod +x "\$SCRIPT_COPY"
+ cd "\$TMPROOT"
+ exec "\$SCRIPT_COPY" --phase2 -- \$initargs
+ ;;
+phase2)
+ finish="\$finish ; rm -rf \`dirname \$0\`"
+ ;;
+esac
+
+if test x"\$nox11" = xn; then
+ if tty -s; then # Do we have a terminal?
+ :
+ else
+ if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X?
+ if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable
+ GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology"
+ for a in \$GUESS_XTERMS; do
+ if type \$a >/dev/null 2>&1; then
+ XTERM=\$a
+ break
+ fi
+ done
+ chmod a+x \$0 || echo Please add execution rights on \$0
+ if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal!
+ exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs"
+ else
+ exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs"
+ fi
+ fi
+ fi
+ fi
+fi
+
+if test x"\$targetdir" = x.; then
+ tmpdir="."
+else
+ if test x"\$keep" = xy; then
+ if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then
+ echo "Target directory \$targetdir already exists, aborting." >&2
+ exit 1
+ fi
+ if test x"\$quiet" = xn; then
+ echo "Creating directory \$targetdir" >&2
+ fi
+ tmpdir="\$targetdir"
+ dashp="-p"
+ else
+ tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM"
+ dashp=""
+ fi
+ mkdir \$dashp \$tmpdir || {
+ echo 'Cannot create target directory' \$tmpdir >&2
+ echo 'You should try option --target dir' >&2
+ eval \$finish
+ exit 1
+ }
+fi
+
+location="\`pwd\`"
+if test x"\$SETUP_NOCHECK" != x1; then
+ MS_Check "\$0"
+fi
+offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+
+if test x"\$verbose" = xy; then
+ MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] "
+ read yn
+ if test x"\$yn" = xn; then
+ eval \$finish; exit 1
+ fi
+fi
+
+if test x"\$quiet" = xn; then
+ MS_Printf "Uncompressing \$label"
+fi
+res=3
+if test x"\$keep" = xn; then
+ trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15
+fi
+
+if test x"\$nodiskspace" = xn; then
+ leftspace=\`MS_diskspace \$tmpdir\`
+ if test -n "\$leftspace"; then
+ if test "\$leftspace" -lt $USIZE; then
+ echo
+ echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2
+ echo "Use --nodiskspace option to skip this check and proceed anyway" >&2
+ if test x"\$keep" = xn; then
+ echo "Consider setting TMPDIR to a directory with more free space."
+ fi
+ eval \$finish; exit 1
+ fi
+ fi
+fi
+
+for s in \$filesizes
+do
+ if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then
+ if test x"\$ownership" = xy; then
+ (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .)
+ fi
+ else
+ echo >&2
+ echo "Unable to decompress \$0" >&2
+ eval \$finish; exit 1
+ fi
+ offset=\`expr \$offset + \$s\`
+done
+if test x"\$quiet" = xn; then
+ echo
+fi
+
+cd "\$tmpdir"
+res=0
+if test x"\$script" != x; then
+ if test x"\$export_conf" = x"y"; then
+ MS_BUNDLE="\$0"
+ MS_LABEL="\$label"
+ MS_SCRIPT="\$script"
+ MS_SCRIPTARGS="\$scriptargs"
+ MS_ARCHDIRNAME="\$archdirname"
+ MS_KEEP="\$KEEP"
+ MS_NOOVERWRITE="\$NOOVERWRITE"
+ MS_COMPRESS="\$COMPRESS"
+ export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS
+ export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS
+ fi
+
+ if test x"\$verbose" = x"y"; then
+ MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] "
+ read yn
+ if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?;
+ fi
+ else
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?
+ fi
+ if test "\$res" -ne 0; then
+ test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2
+ fi
+fi
+if test x"\$keep" = xn; then
+ cd \$TMPROOT
+ /bin/rm -rf \$tmpdir
+fi
+eval \$finish; exit \$res
+EOF
diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt
new file mode 100644
index 0000000..9a8a3ec
--- /dev/null
+++ b/packaging/makeself/makeself-help-header.txt
@@ -0,0 +1,49 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /lib/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE.md
+
+ Anonymous stat collection and reporting to Google Analytics is enabled
+ by default. To disable, pass --disable-telemetry option to the installer
+ or export the environment variable DO_NOT_TRACK to a non-zero or non-empty
+ value (e.g export DO_NOT_TRACK=1).
diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt
new file mode 100644
index 0000000..bf482c4
--- /dev/null
+++ b/packaging/makeself/makeself-license.txt
@@ -0,0 +1,44 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /lib/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/netdata/netdata/blob/master/LICENSE.md
diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm
new file mode 100644
index 0000000..6bd4703
--- /dev/null
+++ b/packaging/makeself/makeself.lsm
@@ -0,0 +1,16 @@
+Begin3
+Title: netdata
+Version: NETDATA_VERSION
+Description: netdata is a system for distributed real-time performance and health monitoring.
+ It provides unparalleled insights, in real-time, of everything happening on the
+ system it runs (including applications such as web and database servers), using
+ modern interactive web dashboards. netdata is fast and efficient, designed to
+ permanently run on all systems (physical & virtual servers, containers, IoT
+ devices), without disrupting their core function.
+Keywords: real-time performance and health monitoring
+Author: Costa Tsaousis (costa@tsaousis.gr)
+Maintained-by: Costa Tsaousis (costa@tsaousis.gr)
+Original-site: https://my-netdata.io/
+Platform: Unix
+Copying-policy: GPL
+End
diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh
new file mode 100755
index 0000000..f3cb699
--- /dev/null
+++ b/packaging/makeself/makeself.sh
@@ -0,0 +1,621 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Makeself version 2.3.x
+# by Stephane Peter <megastep@megastep.org>
+#
+# Utility to create self-extracting tar.gz archives.
+# The resulting archive is a file holding the tar.gz archive with
+# a small Shell script stub that uncompresses the archive to a temporary
+# directory and then executes a given script from withing that directory.
+#
+# Makeself home page: http://makeself.io/
+#
+# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain.
+#
+# Version history :
+# - 1.0 : Initial public release
+# - 1.1 : The archive can be passed parameters that will be passed on to
+# the embedded script, thanks to John C. Quillan
+# - 1.2 : Package distribution, bzip2 compression, more command line options,
+# support for non-temporary archives. Ideas thanks to Francois Petitjean
+# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean:
+# Support for no compression (--nocomp), script is no longer mandatory,
+# automatic launch in an xterm, optional verbose output, and -target
+# archive option to indicate where to extract the files.
+# - 1.4 : Improved UNIX compatibility (Francois Petitjean)
+# Automatic integrity checking, support of LSM files (Francois Petitjean)
+# - 1.5 : Many bugfixes. Optionally disable xterm spawning.
+# - 1.5.1 : More bugfixes, added archive options -list and -check.
+# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big
+# archives (Quake III demo)
+# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm.
+# More verbosity in xterms and check for embedded command's return value.
+# Bugfix for Debian 2.0 systems that have a different "print" command.
+# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed.
+# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to
+# bypass checksum verification of archives.
+# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon)
+# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports.
+# - 2.0.1 : Added --copy
+# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates.
+# Added --nochown for archives
+# Stopped doing redundant checksums when not necesary
+# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command
+# Cleaned up the code to handle error codes from compress. Simplified the extraction code.
+# - 2.1.2 : Some bug fixes. Use head -n to avoid problems.
+# - 2.1.3 : Bug fixes with command line when spawning terminals.
+# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive.
+# Added --noexec to prevent execution of embedded scripts.
+# Added --nomd5 and --nocrc to avoid creating checksums in archives.
+# Added command used to create the archive in --info output.
+# Run the embedded script through eval.
+# - 2.1.4 : Fixed --info output.
+# Generate random directory name when extracting files to . to avoid problems. (Jason Trent)
+# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent)
+# Avoid some race conditions (Ludwig Nussel)
+# Unset the $CDPATH variable to avoid problems if it is set. (Debian)
+# Better handling of dot files in the archive directory.
+# - 2.1.5 : Made the md5sum detection consistent with the header code.
+# Check for the presence of the archive directory
+# Added --encrypt for symmetric encryption through gpg (Eric Windisch)
+# Added support for the digest command on Solaris 10 for MD5 checksums
+# Check for available disk space before extracting to the target directory (Andreas Schweitzer)
+# Allow extraction to run asynchronously (patch by Peter Hatch)
+# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo)
+# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere)
+# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere)
+# Added --target dir to allow extracting directly to a target directory (Guy Baconniere)
+# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details.
+# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky)
+#
+# (C) 1998-2017 by Stephane Peter <megastep@megastep.org>
+#
+# This software is released under the terms of the GNU GPL version 2 and above
+# Please read the license at http://www.gnu.org/copyleft/gpl.html
+#
+
+MS_VERSION=2.3.1
+MS_COMMAND="$0"
+unset CDPATH
+
+for f in "${1+"$@"}"; do
+ MS_COMMAND="$MS_COMMAND \\\\
+ \\\"$f\\\""
+done
+
+# For Solaris systems
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:$PATH
+ export PATH
+fi
+
+# Procedures
+
+MS_Usage()
+{
+ echo "Usage: $0 [params] archive_dir file_name label startup_script [args]"
+ echo "params can be one or more of the following :"
+ echo " --version | -v : Print out Makeself version number and exit"
+ echo " --help | -h : Print out this help message"
+ echo " --tar-quietly : Suppress verbose output from the tar command"
+ echo " --quiet | -q : Do not print any messages other than errors."
+ echo " --gzip : Compress using gzip (default if detected)"
+ echo " --pigz : Compress with pigz"
+ echo " --bzip2 : Compress using bzip2 instead of gzip"
+ echo " --pbzip2 : Compress using pbzip2 instead of gzip"
+ echo " --xz : Compress using xz instead of gzip"
+ echo " --lzo : Compress using lzop instead of gzip"
+ echo " --lz4 : Compress using lz4 instead of gzip"
+ echo " --compress : Compress using the UNIX 'compress' command"
+ echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)"
+ echo " --base64 : Instead of compressing, encode the data using base64"
+ echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG"
+ echo " --gpg-asymmetric-encrypt-sign"
+ echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG"
+ echo " --gpg-extra opt : Append more options to the gpg command line"
+ echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL"
+ echo " --nocomp : Do not compress the data"
+ echo " --notemp : The archive will create archive_dir in the"
+ echo " current directory and uncompress in ./archive_dir"
+ echo " --needroot : Check that the root user is extracting the archive before proceeding"
+ echo " --copy : Upon extraction, the archive will first copy itself to"
+ echo " a temporary directory"
+ echo " --append : Append more files to an existing Makeself archive"
+ echo " The label and startup scripts will then be ignored"
+ echo " --target dir : Extract directly to a target directory"
+ echo " directory path can be either absolute or relative"
+ echo " --nooverwrite : Do not extract the archive if the specified target directory exists"
+ echo " --current : Files will be extracted to the current directory"
+ echo " Both --current and --target imply --notemp"
+ echo " --tar-extra opt : Append more options to the tar command line"
+ echo " --untar-extra opt : Append more options to the during the extraction of the tar archive"
+ echo " --nomd5 : Don't calculate an MD5 for archive"
+ echo " --nocrc : Don't calculate a CRC for archive"
+ echo " --header file : Specify location of the header script"
+ echo " --follow : Follow the symlinks in the archive"
+ echo " --noprogress : Do not show the progress during the decompression"
+ echo " --nox11 : Disable automatic spawn of a xterm"
+ echo " --nowait : Do not wait for user input after executing embedded"
+ echo " program from an xterm"
+ echo " --lsm file : LSM file describing the package"
+ echo " --license file : Append a license file"
+ echo " --help-header file : Add a header to the archive's --help output"
+ echo " --packaging-date date"
+ echo " : Use provided string as the packaging date"
+ echo " instead of the current date."
+ echo
+ echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive."
+ echo " --export-conf : Export configuration variables to startup_script"
+ echo
+ echo "Do not forget to give a fully qualified startup script name"
+ echo "(i.e. with a ./ prefix if inside the archive)."
+ exit 1
+}
+
+# Default settings
+if type gzip 2>&1 > /dev/null; then
+ COMPRESS=gzip
+else
+ COMPRESS=Unix
+fi
+COMPRESS_LEVEL=9
+KEEP=n
+CURRENT=n
+NOX11=n
+NOWAIT=n
+APPEND=n
+TAR_QUIETLY=n
+KEEP_UMASK=n
+QUIET=n
+NOPROGRESS=n
+COPY=none
+NEED_ROOT=n
+TAR_ARGS=cvf
+TAR_EXTRA=""
+GPG_EXTRA=""
+DU_ARGS=-ks
+HEADER=`dirname "$0"`/makeself-header.sh
+TARGETDIR=""
+NOOVERWRITE=n
+DATE=`LC_ALL=C date`
+EXPORT_CONF=n
+
+# LSM file stuff
+LSM_CMD="echo No LSM. >> \"\$archname\""
+
+while true
+do
+ case "$1" in
+ --version | -v)
+ echo Makeself version $MS_VERSION
+ exit 0
+ ;;
+ --pbzip2)
+ COMPRESS=pbzip2
+ shift
+ ;;
+ --bzip2)
+ COMPRESS=bzip2
+ shift
+ ;;
+ --gzip)
+ COMPRESS=gzip
+ shift
+ ;;
+ --pigz)
+ COMPRESS=pigz
+ shift
+ ;;
+ --xz)
+ COMPRESS=xz
+ shift
+ ;;
+ --lzo)
+ COMPRESS=lzo
+ shift
+ ;;
+ --lz4)
+ COMPRESS=lz4
+ shift
+ ;;
+ --compress)
+ COMPRESS=Unix
+ shift
+ ;;
+ --base64)
+ COMPRESS=base64
+ shift
+ ;;
+ --gpg-encrypt)
+ COMPRESS=gpg
+ shift
+ ;;
+ --gpg-asymmetric-encrypt-sign)
+ COMPRESS=gpg-asymmetric
+ shift
+ ;;
+ --gpg-extra)
+ GPG_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --ssl-encrypt)
+ COMPRESS=openssl
+ shift
+ ;;
+ --nocomp)
+ COMPRESS=none
+ shift
+ ;;
+ --complevel)
+ COMPRESS_LEVEL="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --notemp)
+ KEEP=y
+ shift
+ ;;
+ --copy)
+ COPY=copy
+ shift
+ ;;
+ --current)
+ CURRENT=y
+ KEEP=y
+ shift
+ ;;
+ --tar-extra)
+ TAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --untar-extra)
+ UNTAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --target)
+ TARGETDIR="$2"
+ KEEP=y
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --nooverwrite)
+ NOOVERWRITE=y
+ shift
+ ;;
+ --needroot)
+ NEED_ROOT=y
+ shift
+ ;;
+ --header)
+ HEADER="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --license)
+ LICENSE=`cat $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --follow)
+ TAR_ARGS=cvhf
+ DU_ARGS=-ksL
+ shift
+ ;;
+ --noprogress)
+ NOPROGRESS=y
+ shift
+ ;;
+ --nox11)
+ NOX11=y
+ shift
+ ;;
+ --nowait)
+ NOWAIT=y
+ shift
+ ;;
+ --nomd5)
+ NOMD5=y
+ shift
+ ;;
+ --nocrc)
+ NOCRC=y
+ shift
+ ;;
+ --append)
+ APPEND=y
+ shift
+ ;;
+ --lsm)
+ LSM_CMD="cat \"$2\" >> \"\$archname\""
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --packaging-date)
+ DATE="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --help-header)
+ HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER
+"
+ ;;
+ --tar-quietly)
+ TAR_QUIETLY=y
+ shift
+ ;;
+ --keep-umask)
+ KEEP_UMASK=y
+ shift
+ ;;
+ --export-conf)
+ EXPORT_CONF=y
+ shift
+ ;;
+ -q | --quiet)
+ QUIET=y
+ shift
+ ;;
+ -h | --help)
+ MS_Usage
+ ;;
+ -*)
+ echo Unrecognized flag : "$1"
+ MS_Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if test $# -lt 1; then
+ MS_Usage
+else
+ if test -d "$1"; then
+ archdir="$1"
+ else
+ echo "Directory $1 does not exist." >&2
+ exit 1
+ fi
+fi
+archname="$2"
+
+if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then
+ if test "$TAR_ARGS" = "cvf"; then
+ TAR_ARGS="cf"
+ elif test "$TAR_ARGS" = "cvhf";then
+ TAR_ARGS="chf"
+ fi
+fi
+
+if test "$APPEND" = y; then
+ if test $# -lt 2; then
+ MS_Usage
+ fi
+
+ # Gather the info from the original archive
+ OLDENV=`sh "$archname" --dumpconf`
+ if test $? -ne 0; then
+ echo "Unable to update archive: $archname" >&2
+ exit 1
+ else
+ eval "$OLDENV"
+ fi
+else
+ if test "$KEEP" = n -a $# = 3; then
+ echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2
+ echo >&2
+ MS_Usage
+ fi
+ # We don't want to create an absolute directory unless a target directory is defined
+ if test "$CURRENT" = y; then
+ archdirname="."
+ elif test x$TARGETDIR != x; then
+ archdirname="$TARGETDIR"
+ else
+ archdirname=`basename "$1"`
+ fi
+
+ if test $# -lt 3; then
+ MS_Usage
+ fi
+
+ LABEL="$3"
+ SCRIPT="$4"
+ test "x$SCRIPT" = x || shift 1
+ shift 3
+ SCRIPTARGS="$*"
+fi
+
+if test "$KEEP" = n -a "$CURRENT" = y; then
+ echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2
+ exit 1
+fi
+
+case $COMPRESS in
+gzip)
+ GZIP_CMD="gzip -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pigz)
+ GZIP_CMD="pigz -$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pbzip2)
+ GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+bzip2)
+ GZIP_CMD="bzip2 -$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+xz)
+ GZIP_CMD="xz -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="xz -d"
+ ;;
+lzo)
+ GZIP_CMD="lzop -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lzop -d"
+ ;;
+lz4)
+ GZIP_CMD="lz4 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lz4 -d"
+ ;;
+base64)
+ GZIP_CMD="base64"
+ GUNZIP_CMD="base64 -d -i"
+ ;;
+gpg)
+ GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL"
+ GUNZIP_CMD="gpg -d"
+ ;;
+gpg-asymmetric)
+ GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es"
+ GUNZIP_CMD="gpg --yes -d"
+ ;;
+openssl)
+ GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256"
+ GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256"
+ ;;
+Unix)
+ GZIP_CMD="compress -cf"
+ GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd"
+ ;;
+none)
+ GZIP_CMD="cat"
+ GUNZIP_CMD="cat"
+ ;;
+esac
+
+tmpfile="${TMPDIR:=/tmp}/mkself$$"
+
+if test -f "$HEADER"; then
+ oldarchname="$archname"
+ archname="$tmpfile"
+ # Generate a fake header to count its lines
+ SKIP=0
+ . "$HEADER"
+ SKIP=`cat "$tmpfile" |wc -l`
+ # Get rid of any spaces
+ SKIP=`expr $SKIP`
+ rm -f "$tmpfile"
+ if test "$QUIET" = "n";then
+ echo Header is $SKIP lines long >&2
+ fi
+
+ archname="$oldarchname"
+else
+ echo "Unable to open header file: $HEADER" >&2
+ exit 1
+fi
+
+if test "$QUIET" = "n";then
+ echo
+fi
+
+if test "$APPEND" = n; then
+ if test -f "$archname"; then
+ echo "WARNING: Overwriting existing file: $archname" >&2
+ fi
+fi
+
+USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'`
+
+if test "." = "$archdirname"; then
+ if test "$KEEP" = n; then
+ archdirname="makeself-$$-`date +%Y%m%d%H%M%S`"
+ fi
+fi
+
+test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; }
+if test "$QUIET" = "n";then
+ echo About to compress $USIZE KB of data...
+ echo Adding files to archive named \"$archname\"...
+fi
+exec 3<> "$tmpfile"
+( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \
+ { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; }
+exec 3>&- # try to close the archive
+
+fsize=`cat "$tmpfile" | wc -c | tr -d " "`
+
+# Compute the checksums
+
+md5sum=00000000000000000000000000000000
+crcsum=0000000000
+
+if test "$NOCRC" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping crc at user request"
+ fi
+else
+ crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1`
+ if test "$QUIET" = "n";then
+ echo "CRC: $crcsum"
+ fi
+fi
+
+if test "$NOMD5" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping md5sum at user request"
+ fi
+else
+ # Try to locate a MD5 binary
+ OLD_PATH=$PATH
+ PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest`
+ PATH=$OLD_PATH
+ if test -x "$MD5_PATH"; then
+ if test `basename ${MD5_PATH}`x = digestx; then
+ MD5_ARG="-a md5"
+ fi
+ md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`;
+ if test "$QUIET" = "n";then
+ echo "MD5: $md5sum"
+ fi
+ else
+ if test "$QUIET" = "n";then
+ echo "MD5: none, MD5 command not found"
+ fi
+ fi
+fi
+
+if test "$APPEND" = y; then
+ mv "$archname" "$archname".bak || exit
+
+ # Prepare entry for new archive
+ filesizes="$filesizes $fsize"
+ CRCsum="$CRCsum $crcsum"
+ MD5sum="$MD5sum $md5sum"
+ USIZE=`expr $USIZE + $OLDUSIZE`
+ # Generate the header
+ . "$HEADER"
+ # Append the original data
+ tail -n +$OLDSKIP "$archname".bak >> "$archname"
+ # Append the new data
+ cat "$tmpfile" >> "$archname"
+
+ chmod +x "$archname"
+ rm -f "$archname".bak
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully updated.
+ fi
+else
+ filesizes="$fsize"
+ CRCsum="$crcsum"
+ MD5sum="$md5sum"
+
+ # Generate the header
+ . "$HEADER"
+
+ # Append the compressed tar data after the stub
+ if test "$QUIET" = "n";then
+ echo
+ fi
+ cat "$tmpfile" >> "$archname"
+ chmod +x "$archname"
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully created.
+ fi
+fi
+rm -f "$tmpfile"
diff --git a/packaging/makeself/openssl.version b/packaging/makeself/openssl.version
new file mode 100644
index 0000000..adc000b
--- /dev/null
+++ b/packaging/makeself/openssl.version
@@ -0,0 +1 @@
+OpenSSL_1_1_1h
diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh
new file mode 100755
index 0000000..38cc41e
--- /dev/null
+++ b/packaging/makeself/post-installer.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This script is started using the shell of the system
+# and executes our 'install-or-update.sh' script
+# using the netdata supplied, statically linked BASH
+#
+# so, at 'install-or-update.sh' we are always sure
+# we run under BASH v4.
+
+./bin/bash system/install-or-update.sh "${@}"
diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh
new file mode 100755
index 0000000..dd123c2
--- /dev/null
+++ b/packaging/makeself/run-all-jobs.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+LC_ALL=C
+umask 002
+
+# -----------------------------------------------------------------------------
+# prepare the environment for the jobs
+
+# installation directory
+export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+
+# our source directory
+NETDATA_MAKESELF_PATH="$(dirname "${0}")"
+export NETDATA_MAKESELF_PATH
+if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ]; then
+ NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+ export NETDATA_MAKESELF_PATH
+fi
+
+# netdata source directory
+export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
+
+# make sure ${NULL} is empty
+export NULL=
+
+# -----------------------------------------------------------------------------
+
+cd "${NETDATA_MAKESELF_PATH}" || exit 1
+
+# shellcheck source=packaging/makeself/functions.sh
+. ./functions.sh "${@}" || exit 1
+
+for x in jobs/*.install.sh; do
+ progress "running ${x}"
+ "${x}" "${NETDATA_INSTALL_PATH}"
+done
+
+echo >&2 "All jobs for static packaging done successfully."
+exit 0
diff --git a/packaging/mosquitto.checksums b/packaging/mosquitto.checksums
new file mode 100644
index 0000000..5e2db8a
--- /dev/null
+++ b/packaging/mosquitto.checksums
@@ -0,0 +1 @@
+532665f163348019909bd47f3e51a8ca8c9c26a01c08c09c51b903688f78cc11 v.1.6.8_Netdata-5.tar.gz
diff --git a/packaging/mosquitto.version b/packaging/mosquitto.version
new file mode 100644
index 0000000..1ef5ad5
--- /dev/null
+++ b/packaging/mosquitto.version
@@ -0,0 +1 @@
+v.1.6.8_Netdata-5
diff --git a/packaging/scripts/install.sh b/packaging/scripts/install.sh
new file mode 100755
index 0000000..db8d4a6
--- /dev/null
+++ b/packaging/scripts/install.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+install_debian_like() {
+ # This is needed to ensure package installs don't prompt for any user input.
+ export DEBIAN_FRONTEND=noninteractive
+
+ apt-get update
+
+ # Install NetData
+ apt-get install -y "/artifacts/netdata_${VERSION}_${ARCH}.deb"
+
+ # Install testing tools
+ apt-get install -y --no-install-recommends \
+ curl netcat jq
+}
+
+install_fedora_like() {
+ # Using a glob pattern here because I can't reliably determine what the
+ # resulting package name will be (TODO: There must be a better way!)
+
+ PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")"
+
+ # Install NetData
+ "$PKGMGR" install -y /artifacts/netdata-"${VERSION}"-*.rpm
+
+ # Install testing tools
+ "$PKGMGR" install -y curl nc jq
+}
+
+install_suse_like() {
+ # Using a glob pattern here because I can't reliably determine what the
+ # resulting package name will be (TODO: There must be a better way!)
+
+ # Install NetData
+ # FIXME: Allow unsigned packages (for now) #7773
+ zypper install -y --allow-unsigned-rpm \
+ /artifacts/netdata-"${VERSION}"-*.rpm
+
+ # Install testing tools
+ zypper install -y --no-recommends \
+ curl netcat jq
+}
+
+case "${DISTRO}" in
+ debian | ubuntu)
+ install_debian_like
+ ;;
+ fedora | centos)
+ install_fedora_like
+ ;;
+ opensuse)
+ install_suse_like
+ ;;
+ *)
+ printf "ERROR: unspported distro: %s_%s\n" "${DISTRO}" "${DISTRO_VERSION}"
+ exit 1
+ ;;
+esac
diff --git a/packaging/scripts/test.sh b/packaging/scripts/test.sh
new file mode 100755
index 0000000..24ba296
--- /dev/null
+++ b/packaging/scripts/test.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+wait_for() {
+ host="${1}"
+ port="${2}"
+ name="${3}"
+ timeout="${4:-30}"
+
+ printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}"
+
+ i=0
+ while ! nc -z "${host}" "${port}"; do
+ sleep 1
+ if [ "$i" -gt "$timeout" ]; then
+ printf "Timed out!\n"
+ return 1
+ fi
+ i="$((i + 1))"
+ done
+ printf "OK\n"
+}
+
+netdata -D > netdata.log 2>&1 &
+
+wait_for localhost 19999 netdata
+
+curl -sS http://127.0.0.1:19999/api/v1/info | jq '.version'
diff --git a/packaging/version b/packaging/version
new file mode 100644
index 0000000..9bfaa78
--- /dev/null
+++ b/packaging/version
@@ -0,0 +1 @@
+v1.29.3
diff --git a/parser/Makefile.am b/parser/Makefile.am
new file mode 100644
index 0000000..02fe3a3
--- /dev/null
+++ b/parser/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
diff --git a/parser/README.md b/parser/README.md
new file mode 100644
index 0000000..50d55e3
--- /dev/null
+++ b/parser/README.md
@@ -0,0 +1,149 @@
+#### Introduction
+
+The parser will be used to process streaming and plugins input as well as metadata
+
+Usage
+
+1. Define a structure that will be used to share user state across calls
+1. Initialize the parser using `parser_init`
+2. Register keywords and associated callback function using `parser_add_keyword`
+3. Register actions on the keywords
+4. Start a loop until EOF
+ 1. Fetch the next line using `parser_next`
+ 2. Process the line using `parser_action`
+ 1. The registered callbacks are executed to parse the input
+ 2. The registered action for the callback is called for processing
+4. Release the parser using `parser_destroy`
+5. Release the user structure
+
+#### Functions
+
+----
+##### parse_init(RRDHOST *host, void *user, void *input, int flags)
+
+Initialize an internal parser with the specified user defined data structure that will be shared across calls.
+
+Input
+- Host
+ - The host this parser will be dealing with. For streaming with SSL enabled for this host
+- user
+ - User defined structure that is passed in all the calls
+- input
+ - Where the parser will get the input from
+- flags
+ - flags to define processing on the input
+
+Output
+- A parser structure
+
+
+
+----
+##### parse_push(PARSER *parser, char *line)
+
+Push a new line for processing
+
+Input
+
+- parser
+ - The parser object as returned by the `parser_init`
+- line
+ - The new line to process
+
+
+Output
+- The line will be injected into the stream and will be the next one to be processed
+
+Returns
+- 0 line added
+- 1 error detected
+
+----
+##### parse_add_keyword(PARSER *parser, char *keyword, keyword_function callback_function)
+
+The function will add callbacks for keywords. The callback function is defined as
+
+`typedef PARSER_RC (*keyword_function)(char **, void *);`
+
+Input
+
+- parser
+ - The parser object as returned by the `parser_init`
+- keyword
+ - The keyword to register
+- keyword_function
+ - The callback that will handle the keyword processing
+ * The callback function should return one of the following
+ * PARSER_RC_OK -- Callback was successful (continue with other callbacks)
+ * PARSER_RC_STOP -- Stop processing callbacks (return OK)
+ * PARSER_RC_ERROR -- Callback failed, exit
+
+Output
+- The corresponding keyword and callback will be registered
+
+Returns
+- 0 maximum callbacks already registered for this keyword
+- > 0 which is the number of callbacks associated with this keyword.
+
+
+----
+##### parser_next(PARSER *parser)
+Return the next item to parse
+
+Input
+- parser
+ - The parser object as returned by the `parser_init`
+
+Output
+- The parser will store internally the next item to parse
+
+Returns
+- 0 Next item fetched successfully
+- 1 No more items to parse
+
+----
+##### parser_action(PARSER *parser, char *input)
+Return the next item to parse
+
+Input
+- parser
+ - The parser object as returned by the `parser_init`
+- input
+ - Process the input specified instead of using the internal buffer
+
+Output
+- The current keyword will be processed by calling all the registered callbacks
+
+Returns
+- 0 Callbacks called successfully
+- 1 Failed
+
+----
+##### parser_destroy(PARSER *parser)
+Cleanup a previously allocated parser
+
+Input
+- parser
+ - The parser object as returned by the `parser_init`
+
+Output
+- The parser is deallocated
+
+Returns
+- none
+
+----
+##### parser_recover_input(PARSER *parser)
+Cleanup a previously allocated parser
+
+Input
+- parser
+ - The parser object as returned by the `parser_init`
+
+Output
+- The parser is deallocated
+
+Returns
+- none
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fparser%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/parser/parser.c b/parser/parser.c
new file mode 100644
index 0000000..21d7fb3
--- /dev/null
+++ b/parser/parser.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "parser.h"
+
+static inline int find_keyword(char *str, char *keyword, int max_size, int (*custom_isspace)(char))
+{
+ char *s = str, *keyword_start;
+
+ while (unlikely(custom_isspace(*s))) s++;
+ keyword_start = s;
+
+ while (likely(*s && !custom_isspace(*s)) && max_size > 0) {
+ *keyword++ = *s++;
+ max_size--;
+ }
+ *keyword = '\0';
+ return max_size == 0 ? 0 : (int) (s - keyword_start);
+}
+
+/*
+ * Initialize a parser
+ * user : as defined by the user, will be shared across calls
+ * input : main input stream (auto detect stream -- file, socket, pipe)
+ * buffer : This is the buffer to be used (if null a buffer of size will be allocated)
+ * size : buffer size either passed or will be allocated
+ * If the buffer is auto allocated, it will auto freed when the parser is destroyed
+ *
+ *
+ */
+
+PARSER *parser_init(RRDHOST *host, void *user, void *input, PARSER_INPUT_TYPE flags)
+{
+ PARSER *parser;
+
+ parser = callocz(1, sizeof(*parser));
+
+ if (unlikely(!parser))
+ return NULL;
+
+ parser->plugins_action = callocz(1, sizeof(PLUGINSD_ACTION));
+ if (unlikely(!parser->plugins_action)) {
+ freez(parser);
+ return NULL;
+ }
+
+ parser->user = user;
+ parser->input = input;
+ parser->flags = flags;
+ parser->host = host;
+#ifdef ENABLE_HTTPS
+ parser->bytesleft = 0;
+ parser->readfrom = NULL;
+#endif
+
+ if (unlikely(!(flags & PARSER_NO_PARSE_INIT))) {
+ int rc = parser_add_keyword(parser, PLUGINSD_KEYWORD_FLUSH, pluginsd_flush);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_CHART, pluginsd_chart);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_DIMENSION, pluginsd_dimension);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_DISABLE, pluginsd_disable);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_VARIABLE, pluginsd_variable);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_LABEL, pluginsd_label);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_OVERWRITE, pluginsd_overwrite);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_END, pluginsd_end);
+ rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_BEGIN, pluginsd_begin);
+ rc += parser_add_keyword(parser, "SET", pluginsd_set);
+ }
+
+ return parser;
+}
+
+
+/*
+ * Push a new line into the parsing stream
+ *
+ * This line will be the next one to process ie the next fetch will get this one
+ *
+ */
+
+int parser_push(PARSER *parser, char *line)
+{
+ PARSER_DATA *tmp_parser_data;
+
+ if (unlikely(!parser))
+ return 1;
+
+ if (unlikely(!line))
+ return 0;
+
+ tmp_parser_data = callocz(1, sizeof(*tmp_parser_data));
+ tmp_parser_data->line = strdupz(line);
+ tmp_parser_data->next = parser->data;
+ parser->data = tmp_parser_data;
+
+ return 0;
+}
+
+/*
+ * Add a keyword and the corresponding function that will be called
+ * Multiple functions may be added
+ * Input : keyword
+ * : callback function
+ * : flags
+ * Output: > 0 registered function number
+ * : 0 Error
+ */
+
+int parser_add_keyword(PARSER *parser, char *keyword, keyword_function func)
+{
+ PARSER_KEYWORD *tmp_keyword;
+
+ if (strcmp(keyword, "_read") == 0) {
+ parser->read_function = (void *) func;
+ return 0;
+ }
+
+ if (strcmp(keyword, "_eof") == 0) {
+ parser->eof_function = (void *) func;
+ return 0;
+ }
+
+ if (strcmp(keyword, "_unknown") == 0) {
+ parser->unknown_function = (void *) func;
+ return 0;
+ }
+
+ uint32_t keyword_hash = simple_hash(keyword);
+
+ tmp_keyword = parser->keyword;
+
+ while (tmp_keyword) {
+ if (tmp_keyword->keyword_hash == keyword_hash && (!strcmp(tmp_keyword->keyword, keyword))) {
+ if (tmp_keyword->func_no == PARSER_MAX_CALLBACKS)
+ return 0;
+ tmp_keyword->func[tmp_keyword->func_no++] = (void *) func;
+ return tmp_keyword->func_no;
+ }
+ tmp_keyword = tmp_keyword->next;
+ }
+
+ tmp_keyword = callocz(1, sizeof(*tmp_keyword));
+
+ tmp_keyword->keyword = strdupz(keyword);
+ tmp_keyword->keyword_hash = keyword_hash;
+ tmp_keyword->func[tmp_keyword->func_no++] = (void *) func;
+
+ tmp_keyword->next = parser->keyword;
+ parser->keyword = tmp_keyword;
+ return tmp_keyword->func_no;
+}
+
+/*
+ * Cleanup a previously allocated parser
+ */
+
+void parser_destroy(PARSER *parser)
+{
+ if (unlikely(!parser))
+ return;
+
+ PARSER_KEYWORD *tmp_keyword, *tmp_keyword_next;
+ PARSER_DATA *tmp_parser_data, *tmp_parser_data_next;
+
+ // Remove keywords
+ tmp_keyword = parser->keyword;
+ while (tmp_keyword) {
+ tmp_keyword_next = tmp_keyword->next;
+ freez(tmp_keyword->keyword);
+ freez(tmp_keyword);
+ tmp_keyword = tmp_keyword_next;
+ }
+
+ // Remove pushed data if any
+ tmp_parser_data = parser->data;
+ while (tmp_parser_data) {
+ tmp_parser_data_next = tmp_parser_data->next;
+ freez(tmp_parser_data->line);
+ freez(tmp_parser_data);
+ tmp_parser_data = tmp_parser_data_next;
+ }
+
+ freez(parser->plugins_action);
+
+ freez(parser);
+ return;
+}
+
+
+/*
+ * Fetch the next line to process
+ *
+ */
+
+int parser_next(PARSER *parser)
+{
+ char *tmp = NULL;
+
+ if (unlikely(!parser))
+ return 1;
+
+ parser->flags &= ~(PARSER_INPUT_PROCESSED);
+
+ PARSER_DATA *tmp_parser_data = parser->data;
+
+ if (unlikely(tmp_parser_data)) {
+ strncpyz(parser->buffer, tmp_parser_data->line, PLUGINSD_LINE_MAX);
+ parser->data = tmp_parser_data->next;
+ freez(tmp_parser_data->line);
+ freez(tmp_parser_data);
+ return 0;
+ }
+
+ if (unlikely(parser->read_function))
+ tmp = parser->read_function(parser->buffer, PLUGINSD_LINE_MAX, parser->input);
+ else
+ tmp = fgets(parser->buffer, PLUGINSD_LINE_MAX, (FILE *)parser->input);
+
+ if (unlikely(!tmp)) {
+ if (unlikely(parser->eof_function)) {
+ int rc = parser->eof_function(parser->input);
+ error("read failed: user defined function returned %d", rc);
+ }
+ else {
+ if (feof((FILE *)parser->input))
+ error("read failed: end of file");
+ else if (ferror((FILE *)parser->input))
+ error("read failed: input error");
+ else
+ error("read failed: unknown error");
+ }
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+* Takes an initialized parser object that has an unprocessed entry (by calling parser_next)
+* and if it contains a valid keyword, it will execute all the callbacks
+*
+*/
+
+inline int parser_action(PARSER *parser, char *input)
+{
+ PARSER_RC rc = PARSER_RC_OK;
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
+ char command[PLUGINSD_LINE_MAX];
+ keyword_function action_function;
+ keyword_function *action_function_list = NULL;
+
+ if (unlikely(!parser))
+ return 1;
+ parser->recover_location[0] = 0x0;
+
+ // if not direct input check if we have reprocessed this
+ if (unlikely(!input && parser->flags & PARSER_INPUT_PROCESSED))
+ return 0;
+
+ PARSER_KEYWORD *tmp_keyword = parser->keyword;
+ if (unlikely(!tmp_keyword)) {
+ return 1;
+ }
+
+ if (unlikely(!input))
+ input = parser->buffer;
+
+ if (unlikely(!find_keyword(input, command, PLUGINSD_LINE_MAX, pluginsd_space)))
+ return 0;
+
+ if ((parser->flags & PARSER_INPUT_ORIGINAL) == PARSER_INPUT_ORIGINAL)
+ pluginsd_split_words(input, words, PLUGINSD_MAX_WORDS, parser->recover_input, parser->recover_location, PARSER_MAX_RECOVER_KEYWORDS);
+ else
+ pluginsd_split_words(input, words, PLUGINSD_MAX_WORDS, NULL, NULL, 0);
+
+ uint32_t command_hash = simple_hash(command);
+
+ while(tmp_keyword) {
+ if (command_hash == tmp_keyword->keyword_hash &&
+ (!strcmp(command, tmp_keyword->keyword))) {
+ action_function_list = &tmp_keyword->func[0];
+ break;
+ }
+ tmp_keyword = tmp_keyword->next;
+ }
+
+ if (unlikely(!action_function_list)) {
+ if (unlikely(parser->unknown_function))
+ rc = parser->unknown_function(words, parser->user, NULL);
+ else
+ rc = PARSER_RC_ERROR;
+#ifdef NETDATA_INTERNAL_CHECKS
+ error("Unknown keyword [%s]", input);
+#endif
+ }
+ else {
+ while ((action_function = *action_function_list) != NULL) {
+ rc = action_function(words, parser->user, parser->plugins_action);
+ if (unlikely(rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP))
+ break;
+ action_function_list++;
+ }
+ }
+
+ if (likely(input == parser->buffer))
+ parser->flags |= PARSER_INPUT_PROCESSED;
+
+ return (rc == PARSER_RC_ERROR);
+}
+
+inline int parser_recover_input(PARSER *parser)
+{
+ if (unlikely(!parser))
+ return 1;
+
+ for(int i=0; i < PARSER_MAX_RECOVER_KEYWORDS && parser->recover_location[i]; i++)
+ *(parser->recover_location[i]) = parser->recover_input[i];
+
+ parser->recover_location[0] = 0x0;
+
+ return 0;
+}
diff --git a/parser/parser.h b/parser/parser.h
new file mode 100644
index 0000000..86a837e
--- /dev/null
+++ b/parser/parser.h
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_INCREMENTAL_PARSER_H
+#define NETDATA_INCREMENTAL_PARSER_H 1
+
+#include "../daemon/common.h"
+
+#define PARSER_MAX_CALLBACKS 20
+#define PARSER_MAX_RECOVER_KEYWORDS 128
+
+// PARSER return codes
+typedef enum parser_rc {
+ PARSER_RC_OK, // Callback was successful, go on
+ PARSER_RC_STOP, // Callback says STOP
+ PARSER_RC_ERROR // Callback failed (abort rest of callbacks)
+} PARSER_RC;
+
+typedef struct pluginsd_action {
+ PARSER_RC (*set_action)(void *user, RRDSET *st, RRDDIM *rd, long long int value);
+ PARSER_RC (*begin_action)(void *user, RRDSET *st, usec_t microseconds, int trust_durations);
+ PARSER_RC (*end_action)(void *user, RRDSET *st);
+ PARSER_RC (*chart_action)
+ (void *user, char *type, char *id, char *name, char *family, char *context, char *title, char *units, char *plugin,
+ char *module, int priority, int update_every, RRDSET_TYPE chart_type, char *options);
+ PARSER_RC (*dimension_action)
+ (void *user, RRDSET *st, char *id, char *name, char *algorithm, long multiplier, long divisor, char *options,
+ RRD_ALGORITHM algorithm_type);
+
+ PARSER_RC (*flush_action)(void *user, RRDSET *st);
+ PARSER_RC (*disable_action)(void *user);
+ PARSER_RC (*variable_action)(void *user, RRDHOST *host, RRDSET *st, char *name, int global, calculated_number value);
+ PARSER_RC (*label_action)(void *user, char *key, char *value, LABEL_SOURCE source);
+ PARSER_RC (*overwrite_action)(void *user, RRDHOST *host, struct label *new_labels);
+
+ PARSER_RC (*guid_action)(void *user, uuid_t *uuid);
+ PARSER_RC (*context_action)(void *user, uuid_t *uuid);
+ PARSER_RC (*tombstone_action)(void *user, uuid_t *uuid);
+ PARSER_RC (*host_action)(void *user, char *machine_guid, char *hostname, char *registry_hostname, int update_every, char *os,
+ char *timezone, char *tags);
+} PLUGINSD_ACTION;
+
+typedef enum parser_input_type {
+ PARSER_INPUT_SPLIT = 1 << 1,
+ PARSER_INPUT_ORIGINAL = 1 << 2,
+ PARSER_INPUT_PROCESSED = 1 << 3,
+ PARSER_NO_PARSE_INIT = 1 << 4,
+ PARSER_NO_ACTION_INIT = 1 << 5,
+} PARSER_INPUT_TYPE;
+
+#define PARSER_INPUT_FULL (PARSER_INPUT_SPLIT|PARSER_INPUT_ORIGINAL)
+
+typedef PARSER_RC (*keyword_function)(char **, void *, PLUGINSD_ACTION *plugins_action);
+
+typedef struct parser_keyword {
+ char *keyword;
+ uint32_t keyword_hash;
+ int func_no;
+ keyword_function func[PARSER_MAX_CALLBACKS+1];
+ struct parser_keyword *next;
+} PARSER_KEYWORD;
+
+typedef struct parser_data {
+ char *line;
+ struct parser_data *next;
+} PARSER_DATA;
+
+typedef struct parser {
+ uint8_t version; // Parser version
+ RRDHOST *host;
+ void *input; // Input source e.g. stream
+ PARSER_DATA *data; // extra input
+ PARSER_KEYWORD *keyword; // List of parse keywords and functions
+ PLUGINSD_ACTION *plugins_action;
+ void *user; // User defined structure to hold extra state between calls
+ uint32_t flags;
+
+ char *(*read_function)(char *buffer, long unsigned int, void *input);
+ int (*eof_function)(void *input);
+ keyword_function unknown_function;
+ char buffer[PLUGINSD_LINE_MAX];
+ char *recover_location[PARSER_MAX_RECOVER_KEYWORDS+1];
+ char recover_input[PARSER_MAX_RECOVER_KEYWORDS];
+#ifdef ENABLE_HTTPS
+ int bytesleft;
+ char tmpbuffer[PLUGINSD_LINE_MAX];
+ char *readfrom;
+#endif
+} PARSER;
+
+PARSER *parser_init(RRDHOST *host, void *user, void *input, PARSER_INPUT_TYPE flags);
+int parser_add_keyword(PARSER *working_parser, char *keyword, keyword_function func);
+int parser_next(PARSER *working_parser);
+int parser_action(PARSER *working_parser, char *input);
+int parser_push(PARSER *working_parser, char *line);
+void parser_destroy(PARSER *working_parser);
+int parser_recover_input(PARSER *working_parser);
+
+extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
+
+extern PARSER_RC pluginsd_set(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_begin(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_end(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_chart(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_dimension(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_variable(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_flush(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_disable(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_label(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_overwrite(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_guid(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_context(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+extern PARSER_RC pluginsd_tombstone(char **words, void *user, PLUGINSD_ACTION *plugins_action);
+
+#endif
diff --git a/registry/Makefile.am b/registry/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/registry/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/registry/README.md b/registry/README.md
new file mode 100644
index 0000000..968292c
--- /dev/null
+++ b/registry/README.md
@@ -0,0 +1,203 @@
+<!--
+title: "Registry"
+description: "Netdata utilizes a central registry of machines/person GUIDs, URLs, and opt-in account information to provide unified cross-server dashboards."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/registry/README.md
+-->
+
+# Registry
+
+Netdata provides distributed monitoring.
+
+Traditional monitoring solutions centralize all the data to provide unified dashboards across all servers. Before
+Netdata, this was the standard practice. However it has a few issues:
+
+1. due to the resources required, the number of metrics collected is limited.
+2. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds,
+ at worst every 5 or 10 mins.
+3. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole
+ ecosystem. It also requires maintenance, administration, etc.
+4. most centralized monitoring solutions are usually only good for presenting _statistics of past performance_ (i.e.
+ cannot be used for real-time performance troubleshooting).
+
+Netdata follows a different approach:
+
+1. data collection happens per second
+2. thousands of metrics per server are collected
+3. data do not leave the server where they are collected
+4. Netdata servers do not talk to each other
+5. your browser connects all the Netdata servers
+
+Using Netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional
+resources. Netdata is blazingly fast, very resource efficient and utilizes server resources that already exist and are
+spare (on each server). This allows **scaling out** the monitoring infrastructure.
+
+However, the Netdata approach introduces a few new issues that need to be addressed, one being **the list of Netdata we
+have installed**, i.e. the URLs our Netdata servers are listening.
+
+To solve this, Netdata utilizes a **central registry**. This registry, together with certain browser features, allow
+Netdata to provide unified cross-server dashboards. For example, when you jump from server to server using the node
+menu, several session settings (like the currently viewed charts, the current zoom and pan operations on the charts,
+etc.) are propagated to the new server, so that the new dashboard will come with exactly the same view.
+
+## What data does the registry store?
+
+The registry keeps track of 4 entities:
+
+1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we
+ call this **machine_guid**)
+
+ For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it has accessed.
+
+2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the
+ first time it sees a new web browser; we call this **person_guid**)
+
+ For each person, the registry keeps track of the Netdata installations it has accessed and their URLs.
+
+3. **URLs** of Netdata installations (as seen by the web browsers)
+
+ For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only
+ way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
+
+4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the
+ method, this may include an email, or an email and a profile picture or avatar.
+
+For _persons_/_accounts_ and _machines_, the registry keeps links to _URLs_, each link with 2 timestamps (first time
+seen, last time seen) and a counter (number of times it has been seen). *machines_, _persons_ and timestamps are stored
+in the Netdata registry regardless of whether you sign in or not.
+
+## Who talks to the registry?
+
+Your web browser **only**! If sending this information is against your policies, you can [run your own
+registry](#run-your-own-registry)
+
+Your Netdata servers do not talk to the registry. This is a UML diagram of its operation:
+
+![registry](https://cloud.githubusercontent.com/assets/2662304/19448565/11a70632-94ab-11e6-9d80-f410b4acb797.png)
+
+## Which is the default registry?
+
+`https://registry.my-netdata.io`, which is currently served by `https://london.my-netdata.io`. This registry listens to
+both HTTP and HTTPS requests but the default is HTTPS.
+
+### Can this registry handle the global load of Netdata installations?
+
+Yeap! The registry can handle 50.000 - 100.000 requests **per second per core** (depending on the type of CPU, the
+computer's memory bandwidth, etc). 50.000 is on J1900 (celeron 2Ghz).
+
+We believe, it can do it...
+
+## Run your own registry
+
+**Every Netdata can be a registry**. Just pick one and configure it.
+
+**To turn any Netdata into a registry**, edit `/etc/netdata/netdata.conf` and set:
+
+```conf
+[registry]
+ enabled = yes
+ registry to announce = http://your.registry:19999
+```
+
+Restart your Netdata to activate it.
+
+Then, you need to tell **all your other Netdata servers to advertise your registry**, instead of the default. To do
+this, on each of your Netdata servers, edit `/etc/netdata/netdata.conf` and set:
+
+```conf
+[registry]
+ enabled = no
+ registry to announce = http://your.registry:19999
+```
+
+Note that we have not enabled the registry on the other servers. Only one Netdata (the registry) needs
+`[registry].enabled = yes`.
+
+This is it. You have your registry now.
+
+You may also want to give your server different names under the node menu (i.e. to have them sorted / grouped). You can
+change its registry name, by setting on each Netdata server:
+
+```conf
+[registry]
+ registry hostname = Group1 - Master DB
+```
+
+So this server will appear in the node menu as `Group1 - Master DB`. The max name length is 50 characters.
+
+### Limiting access to the registry
+
+Netdata v1.9+ support limiting access to the registry from given IPs, like this:
+
+```conf
+[registry]
+ allow from = *
+```
+
+`allow from` settings are [Netdata simple patterns](/libnetdata/simple_pattern/README.md): string matches that use `*`
+as wildcard (any number of times) and a `!` prefix for a negative match. So: `allow from = !10.1.2.3 10.*` will allow
+all IPs in `10.*` except `10.1.2.3`. The order is important: left to right, the first positive or negative match is
+used.
+
+Keep in mind that connections to Netdata API ports are filtered by `[web].allow connections from`. So, IPs allowed by
+`[registry].allow from` should also be allowed by `[web].allow connection from`.
+
+The patterns can be matches over IP addresses or FQDN of the host. In order to check the FQDN of the connection without
+opening the Netdata agent to DNS-spoofing, a reverse-dns record must be setup for the connecting host. At connection
+time the reverse-dns of the peer IP address is resolved, and a forward DNS resolution is made to validate the IP address
+against the name-pattern.
+
+Please note that this process can be expensive on a machine that is serving many connections. The behaviour of the
+pattern matching can be controlled with the following setting:
+
+```conf
+[registry]
+ allow by dns = heuristic
+```
+
+The settings are:
+- `yes` allows the pattern to match DNS names.
+- `no` disables DNS matching for the patterns (they only match IP addresses).
+- `heuristic` will estimate if the patterns should match FQDNs by the presence or absence of `:`s or alpha-characters.
+
+### Where is the registry database stored?
+
+`/var/lib/netdata/registry/*.db`
+
+There can be up to 2 files:
+
+- `registry-log.db`, the transaction log
+
+ all incoming requests that affect the registry are saved in this file in real-time.
+
+- `registry.db`, the database
+
+ every `[registry].registry save db every new entries` entries in `registry-log.db`, Netdata will save its database
+ to `registry.db` and empty `registry-log.db`.
+
+Both files are machine readable text files.
+
+## The future
+
+The registry opens a whole world of new possibilities for Netdata. Check here what we think:
+<https://github.com/netdata/netdata/issues/416>
+
+## Troubleshooting the registry
+
+The registry URL should be set to the URL of a Netdata dashboard. This server has to have `[registry].enabled = yes`.
+So, accessing the registry URL directly with your web browser, should present the dashboard of the Netdata operating the
+registry.
+
+To use the registry, your web browser needs to support **third party cookies**, since the cookies are set by the
+registry while you are browsing the dashboard of another Netdata server. The registry, the first time it sees a new web
+browser it tries to figure if the web browser has cookies enabled or not. It does this by setting a cookie and
+redirecting the browser back to itself hoping that it will receive the cookie. If it does not receive the cookie, the
+registry will keep redirecting your web browser back to itself, which after a few redirects will fail with an error like
+this:
+
+```conf
+ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io responded with: {"status":"redirect","registry":"https://registry.my-netdata.io"}
+```
+
+This error is printed on your web browser console (press F12 on your browser to see it).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fregistry%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/registry/registry.c b/registry/registry.c
new file mode 100644
index 0000000..b14f4ee
--- /dev/null
+++ b/registry/registry.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+#define REGISTRY_STATUS_OK "ok"
+#define REGISTRY_STATUS_FAILED "failed"
+#define REGISTRY_STATUS_DISABLED "disabled"
+
+// ----------------------------------------------------------------------------
+// REGISTRY concurrency locking
+
+static inline void registry_lock(void) {
+ netdata_mutex_lock(&registry.lock);
+}
+
+static inline void registry_unlock(void) {
+ netdata_mutex_unlock(&registry.lock);
+}
+
+
+// ----------------------------------------------------------------------------
+// COOKIES
+
+static void registry_set_cookie(struct web_client *w, const char *guid) {
+ char edate[100];
+ time_t et = now_realtime_sec() + registry.persons_expiration;
+ struct tm etmbuf, *etm = gmtime_r(&et, &etmbuf);
+ strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", etm);
+
+ snprintfz(w->cookie1, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Expires=%s", guid, edate);
+
+ if(registry.registry_domain && registry.registry_domain[0])
+ snprintfz(w->cookie2, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Domain=%s; Expires=%s", guid, registry.registry_domain, edate);
+}
+
+static inline void registry_set_person_cookie(struct web_client *w, REGISTRY_PERSON *p) {
+ registry_set_cookie(w, p->guid);
+}
+
+
+// ----------------------------------------------------------------------------
+// JSON GENERATION
+
+static inline void registry_json_header(RRDHOST *host, struct web_client *w, const char *action, const char *status) {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"",
+ action, status, host->registry_hostname, host->machine_guid);
+}
+
+static inline void registry_json_footer(struct web_client *w) {
+ buffer_strcat(w->response.data, "\n}\n");
+}
+
+static inline int registry_json_disabled(RRDHOST *host, struct web_client *w, const char *action) {
+ registry_json_header(host, w, action, REGISTRY_STATUS_DISABLED);
+
+ buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"",
+ registry.registry_to_announce);
+
+ registry_json_footer(w);
+ return 200;
+}
+
+
+// ----------------------------------------------------------------------------
+// CALLBACKS FOR WALKING THROUGH REGISTRY OBJECTS
+
+// structure used be the callbacks below
+struct registry_json_walk_person_urls_callback {
+ REGISTRY_PERSON *p;
+ REGISTRY_MACHINE *m;
+ struct web_client *w;
+ int count;
+};
+
+// callback for rendering PERSON_URLs
+static int registry_json_person_url_callback(void *entry, void *data) {
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
+ struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
+ struct web_client *w = c->w;
+
+ if (!strcmp(pu->url->url,"***")) return 0;
+
+ if(unlikely(c->count++))
+ buffer_strcat(w->response.data, ",");
+
+ buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u, \"%s\" ]",
+ pu->machine->guid, pu->url->url, pu->last_t, pu->usages, pu->machine_name);
+
+ return 0;
+}
+
+// callback for rendering MACHINE_URLs
+static int registry_json_machine_url_callback(void *entry, void *data) {
+ REGISTRY_MACHINE_URL *mu = (REGISTRY_MACHINE_URL *)entry;
+ struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
+ struct web_client *w = c->w;
+ REGISTRY_MACHINE *m = c->m;
+
+ if (!strcmp(mu->url->url,"***")) return 1;
+
+ if(unlikely(c->count++))
+ buffer_strcat(w->response.data, ",");
+
+ buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u ]",
+ m->guid, mu->url->url, mu->last_t, mu->usages);
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+
+// structure used be the callbacks below
+struct registry_person_url_callback_verify_machine_exists_data {
+ REGISTRY_MACHINE *m;
+ int count;
+};
+
+static inline int registry_person_url_callback_verify_machine_exists(void *entry, void *data) {
+ struct registry_person_url_callback_verify_machine_exists_data *d = (struct registry_person_url_callback_verify_machine_exists_data *)data;
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
+ REGISTRY_MACHINE *m = d->m;
+
+ if(pu->machine == m)
+ d->count++;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// dynamic update of the configuration
+// The registry does not seem to be designed to support this and I cannot see any concurrency protection
+// that could make this safe, so try to be as atomic as possible.
+
+void registry_update_cloud_base_url()
+{
+ // This is guaranteed to be set early in main via post_conf_load()
+ registry.cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL);
+ if (registry.cloud_base_url == NULL)
+ fatal("Do not move the cloud base url out of post_conf_load!!");
+
+ setenv("NETDATA_REGISTRY_CLOUD_BASE_URL", registry.cloud_base_url, 1);
+}
+// ----------------------------------------------------------------------------
+// public HELLO request
+
+int registry_request_hello_json(RRDHOST *host, struct web_client *w) {
+ registry_json_header(host, w, "hello", REGISTRY_STATUS_OK);
+
+ buffer_sprintf(w->response.data,
+ ",\n\t\"registry\": \"%s\",\n\t\"cloud_base_url\": \"%s\",\n\t\"anonymous_statistics\": %s",
+ registry.registry_to_announce,
+ registry.cloud_base_url, netdata_anonymous_statistics_enabled?"true":"false");
+
+ registry_json_footer(w);
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+//public ACCESS request
+
+#define REGISTRY_VERIFY_COOKIES_GUID "give-me-back-this-cookie-now--please"
+
+// the main method for registering an access
+int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
+ if(unlikely(!registry.enabled))
+ return registry_json_disabled(host, w, "access");
+
+ // ------------------------------------------------------------------------
+ // verify the browser supports cookies
+
+ if(registry.verify_cookies_redirects > 0 && !person_guid[0]) {
+ buffer_flush(w->response.data);
+ registry_set_cookie(w, REGISTRY_VERIFY_COOKIES_GUID);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_sprintf(w->response.data, "{ \"status\": \"redirect\", \"registry\": \"%s\" }", registry.registry_to_announce);
+ return 200;
+ }
+
+ if(unlikely(person_guid[0] && !strcmp(person_guid, REGISTRY_VERIFY_COOKIES_GUID)))
+ person_guid[0] = '\0';
+
+ // ------------------------------------------------------------------------
+
+ registry_lock();
+
+ REGISTRY_PERSON *p = registry_request_access(person_guid, machine_guid, url, name, when);
+ if(!p) {
+ registry_json_header(host, w, "access", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 412;
+ }
+
+ // set the cookie
+ registry_set_person_cookie(w, p);
+
+ // generate the response
+ registry_json_header(host, w, "access", REGISTRY_STATUS_OK);
+
+ buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\",\n\t\"urls\": [", p->guid);
+ struct registry_json_walk_person_urls_callback c = { p, NULL, w, 0 };
+ avl_traverse(&p->person_urls, registry_json_person_url_callback, &c);
+ buffer_strcat(w->response.data, "\n\t]\n");
+
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// public DELETE request
+
+// the main method for deleting a URL from a person
+int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "delete");
+
+ registry_lock();
+
+ REGISTRY_PERSON *p = registry_request_delete(person_guid, machine_guid, url, delete_url, when);
+ if(!p) {
+ registry_json_header(host, w, "delete", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 412;
+ }
+
+ // generate the response
+ registry_json_header(host, w, "delete", REGISTRY_STATUS_OK);
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// public SEARCH request
+
+// the main method for searching the URLs of a netdata
+int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "search");
+
+ registry_lock();
+
+ REGISTRY_MACHINE *m = registry_request_machine(person_guid, machine_guid, url, request_machine, when);
+ if(!m) {
+ registry_json_header(host, w, "search", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 404;
+ }
+
+ registry_json_header(host, w, "search", REGISTRY_STATUS_OK);
+
+ buffer_strcat(w->response.data, ",\n\t\"urls\": [");
+ struct registry_json_walk_person_urls_callback c = { NULL, m, w, 0 };
+ dictionary_get_all(m->machine_urls, registry_json_machine_url_callback, &c);
+ buffer_strcat(w->response.data, "\n\t]\n");
+
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// SWITCH REQUEST
+
+// the main method for switching user identity
+int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "switch");
+
+ (void)url;
+ (void)when;
+
+ registry_lock();
+
+ REGISTRY_PERSON *op = registry_person_find(person_guid);
+ if(!op) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 430;
+ }
+
+ REGISTRY_PERSON *np = registry_person_find(new_person_guid);
+ if(!np) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 431;
+ }
+
+ REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
+ if(!m) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 432;
+ }
+
+ struct registry_person_url_callback_verify_machine_exists_data data = { m, 0 };
+
+ // verify the old person has access to this machine
+ avl_traverse(&op->person_urls, registry_person_url_callback_verify_machine_exists, &data);
+ if(!data.count) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 433;
+ }
+
+ // verify the new person has access to this machine
+ data.count = 0;
+ avl_traverse(&np->person_urls, registry_person_url_callback_verify_machine_exists, &data);
+ if(!data.count) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 434;
+ }
+
+ // set the cookie of the new person
+ // the user just switched identity
+ registry_set_person_cookie(w, np);
+
+ // generate the response
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_OK);
+ buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\"", np->guid);
+ registry_json_footer(w);
+
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// STATISTICS
+
+void registry_statistics(void) {
+ if(!registry.enabled) return;
+
+ static RRDSET *sts = NULL, *stc = NULL, *stm = NULL;
+
+ if(unlikely(!sts)) {
+ sts = rrdset_create_localhost(
+ "netdata"
+ , "registry_sessions"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Sessions"
+ , "sessions"
+ , "registry"
+ , "stats"
+ , 131000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(sts, "sessions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(sts);
+
+ rrddim_set(sts, "sessions", registry.usages_count);
+ rrdset_done(sts);
+
+ // ------------------------------------------------------------------------
+
+ if(unlikely(!stc)) {
+ stc = rrdset_create_localhost(
+ "netdata"
+ , "registry_entries"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Entries"
+ , "entries"
+ , "registry"
+ , "stats"
+ , 131100
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(stc, "persons", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "machines", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "persons_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "machines_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(stc);
+
+ rrddim_set(stc, "persons", registry.persons_count);
+ rrddim_set(stc, "machines", registry.machines_count);
+ rrddim_set(stc, "urls", registry.urls_count);
+ rrddim_set(stc, "persons_urls", registry.persons_urls_count);
+ rrddim_set(stc, "machines_urls", registry.machines_urls_count);
+ rrdset_done(stc);
+
+ // ------------------------------------------------------------------------
+
+ if(unlikely(!stm)) {
+ stm = rrdset_create_localhost(
+ "netdata"
+ , "registry_mem"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Memory"
+ , "KiB"
+ , "registry"
+ , "stats"
+ , 131300
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(stm, "persons", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "machines", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "persons_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(stm);
+
+ rrddim_set(stm, "persons", registry.persons_memory + registry.persons_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
+ rrddim_set(stm, "machines", registry.machines_memory + registry.machines_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
+ rrddim_set(stm, "urls", registry.urls_memory);
+ rrddim_set(stm, "persons_urls", registry.persons_urls_memory);
+ rrddim_set(stm, "machines_urls", registry.machines_urls_memory + registry.machines_count * sizeof(DICTIONARY) + registry.machines_urls_count * sizeof(NAME_VALUE));
+ rrdset_done(stm);
+}
diff --git a/registry/registry.h b/registry/registry.h
new file mode 100644
index 0000000..44095c2
--- /dev/null
+++ b/registry/registry.h
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * netdata registry
+ *
+ * this header file describes the public interface
+ * to the netdata registry
+ *
+ * only these high level functions are exposed
+ *
+ */
+
+// ----------------------------------------------------------------------------
+// TODO
+//
+// 1. the default tracking cookie expires in 1 year, but the persons are not
+// removed from the db - this means the database only grows - ideally the
+// database should be cleaned in registry_db_save() for both on-disk and
+// on-memory entries.
+//
+// Cleanup:
+// i. Find all the PERSONs that have expired cookie
+// ii. For each of their PERSON_URLs:
+// - decrement the linked MACHINE links
+// - if the linked MACHINE has no other links, remove the linked MACHINE too
+// - remove the PERSON_URL
+//
+// 2. add protection to prevent abusing the registry by flooding it with
+// requests to fill the memory and crash it.
+//
+// Possible protections:
+// - limit the number of URLs per person
+// - limit the number of URLs per machine
+// - limit the number of persons
+// - limit the number of machines
+// - [DONE] limit the size of URLs
+// - [DONE] limit the size of PERSON_URL names
+// - limit the number of requests that add data to the registry,
+// per client IP per hour
+//
+// 3. lower memory requirements
+//
+// - embed avl structures directly into registry objects, instead of DICTIONARY
+// [DONE for PERSON_URLs, PENDING for MACHINE_URLs]
+// - store GUIDs in memory as UUID instead of char *
+// - do not track persons using the demo machines only
+// (i.e. start tracking them only when they access a non-demo machine)
+// - [DONE] do not track custom dashboards by default
+
+#ifndef NETDATA_REGISTRY_H
+#define NETDATA_REGISTRY_H 1
+
+#include "../daemon/common.h"
+
+#define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id"
+
+// initialize the registry
+// should only happen when netdata starts
+extern int registry_init(void);
+
+// free all data held by the registry
+// should only happen when netdata exits
+extern void registry_free(void);
+
+// HTTP requests handled by the registry
+extern int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when);
+extern int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when);
+extern int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when);
+extern int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when);
+extern int registry_request_hello_json(RRDHOST *host, struct web_client *w);
+
+// update the registry config
+extern void registry_update_cloud_base_url();
+
+// update the registry monitoring charts
+extern void registry_statistics(void);
+
+extern char *registry_get_this_machine_guid(void);
+extern char *registry_get_mgmt_api_key(void);
+extern char *registry_get_this_machine_hostname(void);
+
+extern int regenerate_guid(const char *guid, char *result);
+
+#endif /* NETDATA_REGISTRY_H */
diff --git a/registry/registry_db.c b/registry/registry_db.c
new file mode 100644
index 0000000..d8e2bbd
--- /dev/null
+++ b/registry/registry_db.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+int registry_db_should_be_saved(void) {
+ debug(D_REGISTRY, "log entries %llu, max %llu", registry.log_count, registry.save_registry_every_entries);
+ return registry.log_count > registry.save_registry_every_entries;
+}
+
+// ----------------------------------------------------------------------------
+// INTERNAL FUNCTIONS FOR SAVING REGISTRY OBJECTS
+
+static int registry_machine_save_url(void *entry, void *file) {
+ REGISTRY_MACHINE_URL *mu = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_machine_save_url('%s')", mu->url->url);
+
+ int ret = fprintf(fp, "V\t%08x\t%08x\t%08x\t%02x\t%s\n",
+ mu->first_t,
+ mu->last_t,
+ mu->usages,
+ mu->flags,
+ mu->url->url
+ );
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static int registry_machine_save(void *entry, void *file) {
+ REGISTRY_MACHINE *m = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_machine_save('%s')", m->guid);
+
+ int ret = fprintf(fp, "M\t%08x\t%08x\t%08x\t%s\n",
+ m->first_t,
+ m->last_t,
+ m->usages,
+ m->guid
+ );
+
+ if(ret >= 0) {
+ int ret2 = dictionary_get_all(m->machine_urls, registry_machine_save_url, fp);
+ if(ret2 < 0) return ret2;
+ ret += ret2;
+ }
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static inline int registry_person_save_url(void *entry, void *file) {
+ REGISTRY_PERSON_URL *pu = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_person_save_url('%s')", pu->url->url);
+
+ int ret = fprintf(fp, "U\t%08x\t%08x\t%08x\t%02x\t%s\t%s\t%s\n",
+ pu->first_t,
+ pu->last_t,
+ pu->usages,
+ pu->flags,
+ pu->machine->guid,
+ pu->machine_name,
+ pu->url->url
+ );
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static inline int registry_person_save(void *entry, void *file) {
+ REGISTRY_PERSON *p = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_person_save('%s')", p->guid);
+
+ int ret = fprintf(fp, "P\t%08x\t%08x\t%08x\t%s\n",
+ p->first_t,
+ p->last_t,
+ p->usages,
+ p->guid
+ );
+
+ if(ret >= 0) {
+ //int ret2 = dictionary_get_all(p->person_urls, registry_person_save_url, fp);
+ int ret2 = avl_traverse(&p->person_urls, registry_person_save_url, fp);
+ if (ret2 < 0) return ret2;
+ ret += ret2;
+ }
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// SAVE THE REGISTRY DATABASE
+
+int registry_db_save(void) {
+ if(unlikely(!registry.enabled))
+ return -1;
+
+ if(unlikely(!registry_db_should_be_saved()))
+ return -2;
+
+ error_log_limit_unlimited();
+
+ char tmp_filename[FILENAME_MAX + 1];
+ char old_filename[FILENAME_MAX + 1];
+
+ snprintfz(old_filename, FILENAME_MAX, "%s.old", registry.db_filename);
+ snprintfz(tmp_filename, FILENAME_MAX, "%s.tmp", registry.db_filename);
+
+ debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename);
+ FILE *fp = fopen(tmp_filename, "w");
+ if(!fp) {
+ error("Registry: Cannot create file: %s", tmp_filename);
+ error_log_limit_reset();
+ return -1;
+ }
+
+ // dictionary_get_all() has its own locking, so this is safe to do
+
+ debug(D_REGISTRY, "Saving all machines");
+ int bytes1 = dictionary_get_all(registry.machines, registry_machine_save, fp);
+ if(bytes1 < 0) {
+ error("Registry: Cannot save registry machines - return value %d", bytes1);
+ fclose(fp);
+ error_log_limit_reset();
+ return bytes1;
+ }
+ debug(D_REGISTRY, "Registry: saving machines took %d bytes", bytes1);
+
+ debug(D_REGISTRY, "Saving all persons");
+ int bytes2 = dictionary_get_all(registry.persons, registry_person_save, fp);
+ if(bytes2 < 0) {
+ error("Registry: Cannot save registry persons - return value %d", bytes2);
+ fclose(fp);
+ error_log_limit_reset();
+ return bytes2;
+ }
+ debug(D_REGISTRY, "Registry: saving persons took %d bytes", bytes2);
+
+ // save the totals
+ fprintf(fp, "T\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\n",
+ registry.persons_count,
+ registry.machines_count,
+ registry.usages_count + 1, // this is required - it is lost on db rotation
+ registry.urls_count,
+ registry.persons_urls_count,
+ registry.machines_urls_count
+ );
+
+ fclose(fp);
+
+ errno = 0;
+
+ // remove the .old db
+ debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename);
+ if(unlink(old_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot remove old registry file '%s'", old_filename);
+
+ // rename the db to .old
+ debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename);
+ if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename);
+
+ else {
+ // remove the database (it is saved in .old)
+ debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename);
+ if (unlink(registry.db_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot remove old registry file '%s'", registry.db_filename);
+
+ // move the .tmp to make it active
+ debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename);
+ if (link(tmp_filename, registry.db_filename) == -1) {
+ error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename,
+ registry.db_filename);
+
+ // move the .old back
+ debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename);
+ if(link(old_filename, registry.db_filename) == -1)
+ error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename);
+ }
+ else {
+ debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename);
+ if(unlink(tmp_filename) == -1)
+ error("Registry: cannot remove tmp registry file '%s'", tmp_filename);
+
+ // it has been moved successfully
+ // discard the current registry log
+ registry_log_recreate();
+ registry.log_count = 0;
+ }
+ }
+
+ // continue operations
+ error_log_limit_reset();
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// LOAD THE REGISTRY DATABASE
+
+size_t registry_db_load(void) {
+ char *s, buf[4096 + 1];
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_URL *u = NULL;
+ size_t line = 0;
+
+ debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename);
+ FILE *fp = fopen(registry.db_filename, "r");
+ if(!fp) {
+ error("Registry: cannot open registry file: '%s'", registry.db_filename);
+ return 0;
+ }
+
+ size_t len = 0;
+ buf[4096] = '\0';
+ while((s = fgets_trim_len(buf, 4096, fp, &len))) {
+ line++;
+
+ debug(D_REGISTRY, "Registry: read line %zu to length %zu: %s", line, len, s);
+ switch(*s) {
+ case 'T': // totals
+ if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) {
+ error("Registry totals line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+ registry.persons_count = strtoull(&s[2], NULL, 16);
+ registry.machines_count = strtoull(&s[19], NULL, 16);
+ registry.usages_count = strtoull(&s[36], NULL, 16);
+ registry.urls_count = strtoull(&s[53], NULL, 16);
+ registry.persons_urls_count = strtoull(&s[70], NULL, 16);
+ registry.machines_urls_count = strtoull(&s[87], NULL, 16);
+ break;
+
+ case 'P': // person
+ m = NULL;
+ // verify it is valid
+ if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
+ error("Registry person line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = '\0';
+ p = registry_person_allocate(&s[29], strtoul(&s[2], NULL, 16));
+ p->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ p->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded person '%s', first: %u, last: %u, usages: %u", p->guid, p->first_t, p->last_t, p->usages);
+ break;
+
+ case 'M': // machine
+ p = NULL;
+ // verify it is valid
+ if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
+ error("Registry person line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = '\0';
+ m = registry_machine_allocate(&s[29], strtoul(&s[2], NULL, 16));
+ m->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ m->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded machine '%s', first: %u, last: %u, usages: %u", m->guid, m->first_t, m->last_t, m->usages);
+ break;
+
+ case 'U': // person URL
+ if(unlikely(!p)) {
+ error("Registry: ignoring line %zu, no person loaded: %s", line, s);
+ continue;
+ }
+
+ // verify it is valid
+ if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') {
+ error("Registry person URL line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = s[31] = s[68] = '\0';
+
+ // skip the name to find the url
+ char *url = &s[69];
+ while(*url && *url != '\t') url++;
+ if(!*url) {
+ error("Registry person URL line %zu does not have a url.", line);
+ continue;
+ }
+ *url++ = '\0';
+
+ // u = registry_url_allocate_nolock(url, strlen(url));
+ u = registry_url_get(url, strlen(url));
+
+ time_t first_t = strtoul(&s[2], NULL, 16);
+
+ m = registry_machine_find(&s[32]);
+ if(!m) m = registry_machine_allocate(&s[32], first_t);
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_allocate(p, m, u, &s[69], strlen(&s[69]), first_t);
+ pu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ pu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ pu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded person URL '%s' with name '%s' of machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, pu->machine_name, m->guid, pu->first_t, pu->last_t, pu->usages, pu->flags);
+ break;
+
+ case 'V': // machine URL
+ if(unlikely(!m)) {
+ error("Registry: ignoring line %zu, no machine loaded: %s", line, s);
+ continue;
+ }
+
+ // verify it is valid
+ if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') {
+ error("Registry person URL line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = s[31] = '\0';
+ // u = registry_url_allocate_nolock(&s[32], strlen(&s[32]));
+ u = registry_url_get(&s[32], strlen(&s[32]));
+
+ REGISTRY_MACHINE_URL *mu = registry_machine_url_allocate(m, u, strtoul(&s[2], NULL, 16));
+ mu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ mu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ mu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded machine URL '%s', machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, m->guid, mu->first_t, mu->last_t, mu->usages, mu->flags);
+ break;
+
+ default:
+ error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s);
+ break;
+ }
+ }
+ fclose(fp);
+
+ return line;
+}
diff --git a/registry/registry_init.c b/registry/registry_init.c
new file mode 100644
index 0000000..ffdb83f
--- /dev/null
+++ b/registry/registry_init.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+int registry_init(void) {
+ char filename[FILENAME_MAX + 1];
+
+ // registry enabled?
+ if(web_server_mode != WEB_SERVER_MODE_NONE) {
+ registry.enabled = config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
+ }
+ else {
+ info("Registry is disabled - use the central netdata");
+ config_set_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
+ registry.enabled = 0;
+ }
+
+ // pathnames
+ snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir);
+ registry.pathname = config_get(CONFIG_SECTION_REGISTRY, "registry db directory", filename);
+ if(mkdir(registry.pathname, 0770) == -1 && errno != EEXIST)
+ fatal("Cannot create directory '%s'.", registry.pathname);
+
+ // filenames
+ snprintfz(filename, FILENAME_MAX, "%s/netdata.public.unique.id", registry.pathname);
+ registry.machine_guid_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata unique id file", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/registry.db", registry.pathname);
+ registry.db_filename = config_get(CONFIG_SECTION_REGISTRY, "registry db file", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/registry-log.db", registry.pathname);
+ registry.log_filename = config_get(CONFIG_SECTION_REGISTRY, "registry log file", filename);
+
+ // configuration options
+ registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000);
+ registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400;
+ registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", "");
+ registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io");
+ registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname);
+ registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1);
+
+ registry_update_cloud_base_url();
+ setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1);
+ setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1);
+
+ registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024);
+ if(registry.max_url_length < 10) {
+ registry.max_url_length = 10;
+ config_set_number(CONFIG_SECTION_REGISTRY, "max URL length", (long long)registry.max_url_length);
+ }
+
+ registry.max_name_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL name length", 50);
+ if(registry.max_name_length < 10) {
+ registry.max_name_length = 10;
+ config_set_number(CONFIG_SECTION_REGISTRY, "max URL name length", (long long)registry.max_name_length);
+ }
+
+ // initialize entries counters
+ registry.persons_count = 0;
+ registry.machines_count = 0;
+ registry.usages_count = 0;
+ registry.urls_count = 0;
+ registry.persons_urls_count = 0;
+ registry.machines_urls_count = 0;
+
+ // initialize memory counters
+ registry.persons_memory = 0;
+ registry.machines_memory = 0;
+ registry.urls_memory = 0;
+ registry.persons_urls_memory = 0;
+ registry.machines_urls_memory = 0;
+
+ // initialize locks
+ netdata_mutex_init(&registry.lock);
+
+ // create dictionaries
+ registry.persons = dictionary_create(DICTIONARY_FLAGS);
+ registry.machines = dictionary_create(DICTIONARY_FLAGS);
+ avl_init(&registry.registry_urls_root_index, registry_url_compare);
+
+ // load the registry database
+ if(registry.enabled) {
+ registry_log_open();
+ registry_db_load();
+ registry_log_load();
+
+ if(unlikely(registry_db_should_be_saved()))
+ registry_db_save();
+ }
+
+ return 0;
+}
+
+void registry_free(void) {
+ if(!registry.enabled) return;
+
+ // we need to destroy the dictionaries ourselves
+ // since the dictionaries use memory we allocated
+
+ while(registry.persons->values_index.root) {
+ REGISTRY_PERSON *p = ((NAME_VALUE *)registry.persons->values_index.root)->value;
+ registry_person_del(p);
+ }
+
+ while(registry.machines->values_index.root) {
+ REGISTRY_MACHINE *m = ((NAME_VALUE *)registry.machines->values_index.root)->value;
+
+ // fprintf(stderr, "\nMACHINE: '%s', first: %u, last: %u, usages: %u\n", m->guid, m->first_t, m->last_t, m->usages);
+
+ while(m->machine_urls->values_index.root) {
+ REGISTRY_MACHINE_URL *mu = ((NAME_VALUE *)m->machine_urls->values_index.root)->value;
+
+ // fprintf(stderr, "\tURL: '%s', first: %u, last: %u, usages: %u, flags: 0x%02x\n", mu->url->url, mu->first_t, mu->last_t, mu->usages, mu->flags);
+
+ //debug(D_REGISTRY, "Registry: destroying persons dictionary from url '%s'", mu->url->url);
+ //dictionary_destroy(mu->persons);
+
+ debug(D_REGISTRY, "Registry: deleting url '%s' from person '%s'", mu->url->url, m->guid);
+ dictionary_del(m->machine_urls, mu->url->url);
+
+ debug(D_REGISTRY, "Registry: unlinking url '%s' from machine", mu->url->url);
+ registry_url_unlink(mu->url);
+
+ debug(D_REGISTRY, "Registry: freeing machine url");
+ freez(mu);
+ }
+
+ debug(D_REGISTRY, "Registry: deleting machine '%s' from machines registry", m->guid);
+ dictionary_del(registry.machines, m->guid);
+
+ debug(D_REGISTRY, "Registry: destroying URL dictionary of machine '%s'", m->guid);
+ dictionary_destroy(m->machine_urls);
+
+ debug(D_REGISTRY, "Registry: freeing machine '%s'", m->guid);
+ freez(m);
+ }
+
+ // and free the memory of remaining dictionary structures
+
+ debug(D_REGISTRY, "Registry: destroying persons dictionary");
+ dictionary_destroy(registry.persons);
+
+ debug(D_REGISTRY, "Registry: destroying machines dictionary");
+ dictionary_destroy(registry.machines);
+}
+
diff --git a/registry/registry_internals.c b/registry/registry_internals.c
new file mode 100644
index 0000000..3de6dd1
--- /dev/null
+++ b/registry/registry_internals.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+struct registry registry;
+
+// ----------------------------------------------------------------------------
+// common functions
+
+// parse a GUID and re-generated to be always lower case
+// this is used as a protection against the variations of GUIDs
+int regenerate_guid(const char *guid, char *result) {
+ uuid_t uuid;
+ if(unlikely(uuid_parse(guid, uuid) == -1)) {
+ info("Registry: GUID '%s' is not a valid GUID.", guid);
+ return -1;
+ }
+ else {
+ uuid_unparse_lower(uuid, result);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(strcmp(guid, result) != 0)
+ info("GUID '%s' and re-generated GUID '%s' differ!", guid, result);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ return 0;
+}
+
+// make sure the names of the machines / URLs do not contain any tabs
+// (which are used as our separator in the database files)
+// and are properly trimmed (before and after)
+static inline char *registry_fix_machine_name(char *name, size_t *len) {
+ char *s = name?name:"";
+
+ // skip leading spaces
+ while(*s && isspace(*s)) s++;
+
+ // make sure all spaces are a SPACE
+ char *t = s;
+ while(*t) {
+ if(unlikely(isspace(*t)))
+ *t = ' ';
+
+ t++;
+ }
+
+ // remove trailing spaces
+ while(--t >= s) {
+ if(*t == ' ')
+ *t = '\0';
+ else
+ break;
+ }
+ t++;
+
+ if(likely(len))
+ *len = (t - s);
+
+ return s;
+}
+
+static inline char *registry_fix_url(char *url, size_t *len) {
+ size_t l = 0;
+ char *s = registry_fix_machine_name(url, &l);
+
+ // protection from too big URLs
+ if(l > registry.max_url_length) {
+ l = registry.max_url_length;
+ s[l] = '\0';
+ }
+
+ if(len) *len = l;
+ return s;
+}
+
+
+// ----------------------------------------------------------------------------
+// HELPERS
+
+// verify the person, the machine and the URL exist in our DB
+REGISTRY_PERSON_URL *registry_verify_request(char *person_guid, char *machine_guid, char *url, REGISTRY_PERSON **pp, REGISTRY_MACHINE **mm) {
+ char pbuf[GUID_LEN + 1], mbuf[GUID_LEN + 1];
+
+ if(!person_guid || !*person_guid || !machine_guid || !*machine_guid || !url || !*url) {
+ info("Registry Request Verification: invalid request! person: '%s', machine '%s', url '%s'", person_guid?person_guid:"UNSET", machine_guid?machine_guid:"UNSET", url?url:"UNSET");
+ return NULL;
+ }
+
+ // normalize the url
+ url = registry_fix_url(url, NULL);
+
+ // make sure the person GUID is valid
+ if(regenerate_guid(person_guid, pbuf) == -1) {
+ info("Registry Request Verification: invalid person GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ person_guid = pbuf;
+
+ // make sure the machine GUID is valid
+ if(regenerate_guid(machine_guid, mbuf) == -1) {
+ info("Registry Request Verification: invalid machine GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ machine_guid = mbuf;
+
+ // make sure the machine exists
+ REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
+ if(!m) {
+ info("Registry Request Verification: machine not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ if(mm) *mm = m;
+
+ // make sure the person exist
+ REGISTRY_PERSON *p = registry_person_find(person_guid);
+ if(!p) {
+ info("Registry Request Verification: person not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ if(pp) *pp = p;
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, url);
+ if(!pu) {
+ info("Registry Request Verification: URL not found for person, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ //else if (pu->machine != m) {
+ // info("Registry Request Verification: Machine mismatch: person: '%s', machine requested='%s' <> loaded='%s', url '%s'", person_guid, machine_guid, pu->machine->guid, url);
+ // return NULL;
+ //}
+
+ return pu;
+}
+
+
+// ----------------------------------------------------------------------------
+// REGISTRY REQUESTS
+
+REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
+ debug(D_REGISTRY, "registry_request_access('%s', '%s', '%s'): NEW REQUEST", (person_guid)?person_guid:"", machine_guid, url);
+
+ REGISTRY_MACHINE *m = registry_machine_get(machine_guid, when);
+ if(!m) return NULL;
+
+ // make sure the name is valid
+ size_t namelen;
+ name = registry_fix_machine_name(name, &namelen);
+
+ size_t urllen;
+ url = registry_fix_url(url, &urllen);
+
+ REGISTRY_PERSON *p = registry_person_get(person_guid, when);
+
+ REGISTRY_URL *u = registry_url_get(url, urllen);
+ registry_person_link_to_url(p, m, u, name, namelen, when);
+ registry_machine_link_to_url(m, u, when);
+
+ registry_log('A', p, m, u, name);
+
+ registry.usages_count++;
+
+ return p;
+}
+
+REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
+ (void) when;
+
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
+ if(!pu || !p || !m) return NULL;
+
+ // normalize the url
+ delete_url = registry_fix_url(delete_url, NULL);
+
+ // make sure the user is not deleting the url it uses
+ /*
+ if(!strcmp(delete_url, pu->url->url)) {
+ info("Registry Delete Request: delete URL is the one currently accessed, person: '%s', machine '%s', url '%s', delete url '%s'"
+ , p->guid, m->guid, pu->url->url, delete_url);
+ return NULL;
+ }
+ */
+
+ REGISTRY_PERSON_URL *dpu = registry_person_url_index_find(p, delete_url);
+ if(!dpu) {
+ info("Registry Delete Request: URL not found for person: '%s', machine '%s', url '%s', delete url '%s'", p->guid
+ , m->guid, pu->url->url, delete_url);
+ return NULL;
+ }
+
+ registry_log('D', p, m, pu->url, dpu->url->url);
+ registry_person_unlink_from_url(p, dpu);
+
+ return p;
+}
+
+
+// a structure to pass to the dictionary_get_all() callback handler
+struct machine_request_callback_data {
+ REGISTRY_MACHINE *find_this_machine;
+ REGISTRY_PERSON_URL *result;
+};
+
+// the callback function
+// this will be run for every PERSON_URL of this PERSON
+static int machine_request_callback(void *entry, void *data) {
+ REGISTRY_PERSON_URL *mypu = (REGISTRY_PERSON_URL *)entry;
+ struct machine_request_callback_data *myrdata = (struct machine_request_callback_data *)data;
+
+ if(mypu->machine == myrdata->find_this_machine) {
+ myrdata->result = mypu;
+ return -1; // this will also stop the walk through
+ }
+
+ return 0; // continue
+}
+
+REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
+ (void)when;
+
+ char mbuf[GUID_LEN + 1];
+
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
+ if(!pu || !p || !m) return NULL;
+
+ // make sure the machine GUID is valid
+ if(regenerate_guid(request_machine, mbuf) == -1) {
+ info("Registry Machine URLs request: invalid machine GUID, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, m->guid, pu->url->url, request_machine);
+ return NULL;
+ }
+ request_machine = mbuf;
+
+ // make sure the machine exists
+ m = registry_machine_find(request_machine);
+ if(!m) {
+ info("Registry Machine URLs request: machine not found, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, machine_guid, pu->url->url, request_machine);
+ return NULL;
+ }
+
+ // Verify the user has in the past accessed this machine
+ // We will walk through the PERSON_URLs to find the machine
+ // linking to our machine
+
+ // a structure to pass to the dictionary_get_all() callback handler
+ struct machine_request_callback_data rdata = { m, NULL };
+
+ // request a walk through on the dictionary
+ avl_traverse(&p->person_urls, machine_request_callback, &rdata);
+
+ if(rdata.result)
+ return m;
+
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// REGISTRY THIS MACHINE UNIQUE ID
+
+static inline int is_machine_guid_blacklisted(const char *guid) {
+ // these are machine GUIDs that have been included in distribution packages.
+ // we blacklist them here, so that the next version of netdata will generate
+ // new ones.
+
+ if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6")
+ || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5")
+ ) {
+ error("Blacklisted machine GUID '%s' found.", guid);
+ return 1;
+ }
+
+ return 0;
+}
+
+char *registry_get_this_machine_hostname(void) {
+ return registry.hostname;
+}
+
+char *registry_get_this_machine_guid(void) {
+ static char guid[GUID_LEN + 1] = "";
+
+ if(likely(guid[0]))
+ return guid;
+
+ // read it from disk
+ int fd = open(registry.machine_guid_filename, O_RDONLY);
+ if(fd != -1) {
+ char buf[GUID_LEN + 1];
+ if(read(fd, buf, GUID_LEN) != GUID_LEN)
+ error("Failed to read machine GUID from '%s'", registry.machine_guid_filename);
+ else {
+ buf[GUID_LEN] = '\0';
+ if(regenerate_guid(buf, guid) == -1) {
+ error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.",
+ buf, registry.machine_guid_filename);
+
+ guid[0] = '\0';
+ }
+ else if(is_machine_guid_blacklisted(guid))
+ guid[0] = '\0';
+ }
+ close(fd);
+ }
+
+ // generate a new one?
+ if(!guid[0]) {
+ uuid_t uuid;
+
+ uuid_generate_time(uuid);
+ uuid_unparse_lower(uuid, guid);
+ guid[GUID_LEN] = '\0';
+
+ // save it
+ fd = open(registry.machine_guid_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
+ if(fd == -1)
+ fatal("Cannot create unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
+
+ if(write(fd, guid, GUID_LEN) != GUID_LEN)
+ fatal("Cannot write the unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
+
+ close(fd);
+ }
+
+ setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1);
+
+ return guid;
+}
diff --git a/registry/registry_internals.h b/registry/registry_internals.h
new file mode 100644
index 0000000..0eb83a4
--- /dev/null
+++ b/registry/registry_internals.h
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_REGISTRY_INTERNALS_H_H
+#define NETDATA_REGISTRY_INTERNALS_H_H 1
+
+#include "registry.h"
+
+#define REGISTRY_URL_FLAGS_DEFAULT 0x00
+#define REGISTRY_URL_FLAGS_EXPIRED 0x01
+
+#define DICTIONARY_FLAGS (DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE | DICTIONARY_FLAG_NAME_LINK_DONT_CLONE | DICTIONARY_FLAG_SINGLE_THREADED)
+
+// ----------------------------------------------------------------------------
+// COMMON structures
+
+struct registry {
+ int enabled;
+
+ // entries counters / statistics
+ unsigned long long persons_count;
+ unsigned long long machines_count;
+ unsigned long long usages_count;
+ unsigned long long urls_count;
+ unsigned long long persons_urls_count;
+ unsigned long long machines_urls_count;
+ unsigned long long log_count;
+
+ // memory counters / statistics
+ unsigned long long persons_memory;
+ unsigned long long machines_memory;
+ unsigned long long urls_memory;
+ unsigned long long persons_urls_memory;
+ unsigned long long machines_urls_memory;
+
+ // configuration
+ unsigned long long save_registry_every_entries;
+ char *registry_domain;
+ char *hostname;
+ char *registry_to_announce;
+ char *cloud_base_url;
+ time_t persons_expiration; // seconds to expire idle persons
+ int verify_cookies_redirects;
+
+ size_t max_url_length;
+ size_t max_name_length;
+
+ // file/path names
+ char *pathname;
+ char *db_filename;
+ char *log_filename;
+ char *machine_guid_filename;
+
+ // open files
+ FILE *log_fp;
+
+ // the database
+ DICTIONARY *persons; // dictionary of REGISTRY_PERSON *, with key the REGISTRY_PERSON.guid
+ DICTIONARY *machines; // dictionary of REGISTRY_MACHINE *, with key the REGISTRY_MACHINE.guid
+
+ avl_tree_type registry_urls_root_index;
+
+ netdata_mutex_t lock;
+};
+
+#include "registry_url.h"
+#include "registry_machine.h"
+#include "registry_person.h"
+#include "registry.h"
+
+extern struct registry registry;
+
+// REGISTRY LOW-LEVEL REQUESTS (in registry-internals.c)
+extern REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when);
+extern REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when);
+extern REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when);
+
+// REGISTRY LOG (in registry_log.c)
+extern void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name);
+extern int registry_log_open(void);
+extern void registry_log_close(void);
+extern void registry_log_recreate(void);
+extern ssize_t registry_log_load(void);
+
+// REGISTRY DB (in registry_db.c)
+extern int registry_db_save(void);
+extern size_t registry_db_load(void);
+extern int registry_db_should_be_saved(void);
+
+#endif //NETDATA_REGISTRY_INTERNALS_H_H
diff --git a/registry/registry_log.c b/registry/registry_log.c
new file mode 100644
index 0000000..e0e58ed
--- /dev/null
+++ b/registry/registry_log.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) {
+ if(likely(registry.log_fp)) {
+ if(unlikely(fprintf(registry.log_fp, "%c\t%08x\t%s\t%s\t%s\t%s\n",
+ action,
+ p->last_t,
+ p->guid,
+ m->guid,
+ name,
+ u->url) < 0))
+ error("Registry: failed to save log. Registry data may be lost in case of abnormal restart.");
+
+ // we increase the counter even on failures
+ // so that the registry will be saved periodically
+ registry.log_count++;
+
+ // this must be outside the log_lock(), or a deadlock will happen.
+ // registry_db_save() checks the same inside the log_lock, so only
+ // one thread will save the db
+ if(unlikely(registry_db_should_be_saved()))
+ registry_db_save();
+ }
+}
+
+int registry_log_open(void) {
+ if(registry.log_fp)
+ fclose(registry.log_fp);
+
+ registry.log_fp = fopen(registry.log_filename, "a");
+ if(registry.log_fp) {
+ if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0)
+ error("Cannot set line buffering on registry log file.");
+ return 0;
+ }
+
+ error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename);
+ return -1;
+}
+
+void registry_log_close(void) {
+ if(registry.log_fp) {
+ fclose(registry.log_fp);
+ registry.log_fp = NULL;
+ }
+}
+
+void registry_log_recreate(void) {
+ if(registry.log_fp != NULL) {
+ registry_log_close();
+
+ // open it with truncate
+ registry.log_fp = fopen(registry.log_filename, "w");
+ if(registry.log_fp) fclose(registry.log_fp);
+ else error("Cannot truncate registry log '%s'", registry.log_filename);
+
+ registry.log_fp = NULL;
+ registry_log_open();
+ }
+}
+
+ssize_t registry_log_load(void) {
+ ssize_t line = -1;
+
+ // closing the log is required here
+ // otherwise we will append to it the values we read
+ registry_log_close();
+
+ debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename);
+ FILE *fp = fopen(registry.log_filename, "r");
+ if(!fp)
+ error("Registry: cannot open registry file: %s", registry.log_filename);
+ else {
+ char *s, buf[4096 + 1];
+ line = 0;
+ size_t len = 0;
+
+ while ((s = fgets_trim_len(buf, 4096, fp, &len))) {
+ line++;
+
+ switch (s[0]) {
+ case 'A': // accesses
+ case 'D': // deletes
+
+ // verify it is valid
+ if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) {
+ error("Registry: log line %zd is wrong (len = %zu).", line, len);
+ continue;
+ }
+ s[1] = s[10] = s[47] = s[84] = '\0';
+
+ // get the variables
+ time_t when = strtoul(&s[2], NULL, 16);
+ char *person_guid = &s[11];
+ char *machine_guid = &s[48];
+ char *name = &s[85];
+
+ // skip the name to find the url
+ char *url = name;
+ while(*url && *url != '\t') url++;
+ if(!*url) {
+ error("Registry: log line %zd does not have a url.", line);
+ continue;
+ }
+ *url++ = '\0';
+
+ // make sure the person exists
+ // without this, a new person guid will be created
+ REGISTRY_PERSON *p = registry_person_find(person_guid);
+ if(!p) p = registry_person_allocate(person_guid, when);
+
+ if(s[0] == 'A')
+ registry_request_access(p->guid, machine_guid, url, name, when);
+ else
+ registry_request_delete(p->guid, machine_guid, url, name, when);
+
+ registry.log_count++;
+ break;
+
+ default:
+ error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s);
+ break;
+ }
+ }
+
+ fclose(fp);
+ }
+
+ // open the log again
+ registry_log_open();
+
+ return line;
+}
diff --git a/registry/registry_machine.c b/registry/registry_machine.c
new file mode 100644
index 0000000..8dbeb8e
--- /dev/null
+++ b/registry/registry_machine.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// MACHINE
+
+REGISTRY_MACHINE *registry_machine_find(const char *machine_guid) {
+ debug(D_REGISTRY, "Registry: registry_machine_find('%s')", machine_guid);
+ return dictionary_get(registry.machines, machine_guid);
+}
+
+REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
+ debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): allocating %zu bytes", m->guid, u->url, sizeof(REGISTRY_MACHINE_URL));
+
+ REGISTRY_MACHINE_URL *mu = mallocz(sizeof(REGISTRY_MACHINE_URL));
+
+ mu->first_t = mu->last_t = (uint32_t)when;
+ mu->usages = 1;
+ mu->url = u;
+ mu->flags = REGISTRY_URL_FLAGS_DEFAULT;
+
+ registry.machines_urls_memory += sizeof(REGISTRY_MACHINE_URL);
+
+ debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): indexing URL in machine", m->guid, u->url);
+ dictionary_set(m->machine_urls, u->url, mu, sizeof(REGISTRY_MACHINE_URL));
+
+ registry_url_link(u);
+
+ return mu;
+}
+
+REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating new machine, sizeof(MACHINE)=%zu", machine_guid, sizeof(REGISTRY_MACHINE));
+
+ REGISTRY_MACHINE *m = mallocz(sizeof(REGISTRY_MACHINE));
+
+ strncpyz(m->guid, machine_guid, GUID_LEN);
+
+ debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating dictionary of urls", machine_guid);
+ m->machine_urls = dictionary_create(DICTIONARY_FLAGS);
+
+ m->first_t = m->last_t = (uint32_t)when;
+ m->usages = 0;
+
+ registry.machines_memory += sizeof(REGISTRY_MACHINE);
+
+ registry.machines_count++;
+ dictionary_set(registry.machines, m->guid, m, sizeof(REGISTRY_MACHINE));
+
+ return m;
+}
+
+// 1. validate machine GUID
+// 2. if it is valid, find it or create it and return it
+// 3. if it is not valid, return NULL
+REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when) {
+ REGISTRY_MACHINE *m = NULL;
+
+ if(likely(machine_guid && *machine_guid)) {
+ // validate it is a GUID
+ char buf[GUID_LEN + 1];
+ if(unlikely(regenerate_guid(machine_guid, buf) == -1))
+ info("Registry: machine guid '%s' is not a valid guid. Ignoring it.", machine_guid);
+ else {
+ machine_guid = buf;
+ m = registry_machine_find(machine_guid);
+ if(!m) m = registry_machine_allocate(machine_guid, when);
+ }
+ }
+
+ return m;
+}
+
+
+// ----------------------------------------------------------------------------
+// LINKING OF OBJECTS
+
+REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): searching for URL in machine", m->guid, u->url);
+
+ REGISTRY_MACHINE_URL *mu = dictionary_get(m->machine_urls, u->url);
+ if(!mu) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): not found", m->guid, u->url);
+ mu = registry_machine_url_allocate(m, u, when);
+ registry.machines_urls_count++;
+ }
+ else {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): found", m->guid, u->url);
+ mu->usages++;
+ if(likely(mu->last_t < (uint32_t)when)) mu->last_t = (uint32_t)when;
+ }
+
+ m->usages++;
+ if(likely(m->last_t < (uint32_t)when)) m->last_t = (uint32_t)when;
+
+ if(mu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): accessing an expired URL.", m->guid, u->url);
+ mu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
+ }
+
+ return mu;
+}
diff --git a/registry/registry_machine.h b/registry/registry_machine.h
new file mode 100644
index 0000000..77ab5aa
--- /dev/null
+++ b/registry/registry_machine.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_REGISTRY_MACHINE_H
+#define NETDATA_REGISTRY_MACHINE_H 1
+
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// MACHINE structures
+
+// For each MACHINE-URL pair we keep this
+struct registry_machine_url {
+ REGISTRY_URL *url; // de-duplicated URL
+
+ uint8_t flags;
+
+ uint32_t first_t; // the first time we saw this
+ uint32_t last_t; // the last time we saw this
+ uint32_t usages; // how many times this has been accessed
+};
+typedef struct registry_machine_url REGISTRY_MACHINE_URL;
+
+// A machine
+struct registry_machine {
+ char guid[GUID_LEN + 1]; // the GUID
+
+ uint32_t links; // the number of REGISTRY_PERSON_URL linked to this machine
+
+ DICTIONARY *machine_urls; // MACHINE_URL *
+
+ uint32_t first_t; // the first time we saw this
+ uint32_t last_t; // the last time we saw this
+ uint32_t usages; // how many times this has been accessed
+};
+typedef struct registry_machine REGISTRY_MACHINE;
+
+extern REGISTRY_MACHINE *registry_machine_find(const char *machine_guid);
+extern REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when);
+extern REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when);
+extern REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when);
+extern REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when);
+
+#endif //NETDATA_REGISTRY_MACHINE_H
diff --git a/registry/registry_person.c b/registry/registry_person.c
new file mode 100644
index 0000000..268b0bd
--- /dev/null
+++ b/registry/registry_person.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// PERSON_URL INDEX
+
+int person_url_compare(void *a, void *b) {
+ register uint32_t hash1 = ((REGISTRY_PERSON_URL *)a)->url->hash;
+ register uint32_t hash2 = ((REGISTRY_PERSON_URL *)b)->url->hash;
+
+ if(hash1 < hash2) return -1;
+ else if(hash1 > hash2) return 1;
+ else return strcmp(((REGISTRY_PERSON_URL *)a)->url->url, ((REGISTRY_PERSON_URL *)b)->url->url);
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_find('%s', '%s')", p->guid, url);
+
+ char buf[sizeof(REGISTRY_URL) + strlen(url)];
+
+ REGISTRY_URL *u = (REGISTRY_URL *)&buf;
+ strcpy(u->url, url);
+ u->hash = simple_hash(u->url);
+
+ REGISTRY_PERSON_URL tpu = { .url = u };
+
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)avl_search(&p->person_urls, (void *)&tpu);
+ return pu;
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url);
+ REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl *)(pu));
+ if(tpu != pu)
+ error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url);
+
+ return tpu;
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url);
+ REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl *)(pu));
+ if(!tpu)
+ error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url);
+ else if(tpu != pu)
+ error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url);
+
+ return tpu;
+}
+
+// ----------------------------------------------------------------------------
+// PERSON_URL
+
+REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
+ debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // protection from too big names
+ if(namelen > registry.max_name_length)
+ namelen = registry.max_name_length;
+
+ REGISTRY_PERSON_URL *pu = mallocz(sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // a simple strcpy() should do the job
+ // but I prefer to be safe, since the caller specified urllen
+ strncpyz(pu->machine_name, name, namelen);
+
+ pu->machine = m;
+ pu->first_t = pu->last_t = (uint32_t)when;
+ pu->usages = 1;
+ pu->url = u;
+ pu->flags = REGISTRY_URL_FLAGS_DEFAULT;
+ m->links++;
+
+ registry.persons_urls_memory += sizeof(REGISTRY_PERSON_URL) + namelen;
+
+ debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url);
+ REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu);
+ if(tpu != pu) {
+ error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid);
+ freez(pu);
+ pu = tpu;
+ }
+ else
+ registry_url_link(u);
+
+ return pu;
+}
+
+void registry_person_url_free(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "registry_person_url_free('%s', '%s')", p->guid, pu->url->url);
+
+ REGISTRY_PERSON_URL *tpu = registry_person_url_index_del(p, pu);
+ if(tpu) {
+ registry_url_unlink(tpu->url);
+ tpu->machine->links--;
+ registry.persons_urls_memory -= sizeof(REGISTRY_PERSON_URL) + strlen(tpu->machine_name);
+ freez(tpu);
+ }
+}
+
+// this function is needed to change the name of a PERSON_URL
+REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "registry_person_url_reallocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // keep a backup
+ REGISTRY_PERSON_URL pu2 = {
+ .first_t = pu->first_t,
+ .last_t = pu->last_t,
+ .usages = pu->usages,
+ .flags = pu->flags,
+ .machine = pu->machine,
+ .machine_name = ""
+ };
+
+ // remove the existing one from the index
+ registry_person_url_free(p, pu);
+ pu = &pu2;
+
+ // allocate a new one
+ REGISTRY_PERSON_URL *tpu = registry_person_url_allocate(p, m, u, name, namelen, when);
+ tpu->first_t = pu->first_t;
+ tpu->last_t = pu->last_t;
+ tpu->usages = pu->usages;
+ tpu->flags = pu->flags;
+
+ return tpu;
+}
+
+
+// ----------------------------------------------------------------------------
+// PERSON
+
+REGISTRY_PERSON *registry_person_find(const char *person_guid) {
+ debug(D_REGISTRY, "Registry: registry_person_find('%s')", person_guid);
+ return dictionary_get(registry.persons, person_guid);
+}
+
+REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): allocating new person, sizeof(PERSON)=%zu", (person_guid)?person_guid:"", sizeof(REGISTRY_PERSON));
+
+ REGISTRY_PERSON *p = mallocz(sizeof(REGISTRY_PERSON));
+ if(!person_guid) {
+ for(;;) {
+ uuid_t uuid;
+ uuid_generate(uuid);
+ uuid_unparse_lower(uuid, p->guid);
+
+ debug(D_REGISTRY, "Registry: Checking if the generated person guid '%s' is unique", p->guid);
+ if (!dictionary_get(registry.persons, p->guid)) {
+ debug(D_REGISTRY, "Registry: generated person guid '%s' is unique", p->guid);
+ break;
+ }
+ else
+ info("Registry: generated person guid '%s' found in the registry. Retrying...", p->guid);
+ }
+ }
+ else
+ strncpyz(p->guid, person_guid, GUID_LEN);
+
+ debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): creating dictionary of urls", p->guid);
+ avl_init(&p->person_urls, person_url_compare);
+
+ p->first_t = p->last_t = (uint32_t)when;
+ p->usages = 0;
+
+ registry.persons_memory += sizeof(REGISTRY_PERSON);
+
+ registry.persons_count++;
+ dictionary_set(registry.persons, p->guid, p, sizeof(REGISTRY_PERSON));
+
+ return p;
+}
+
+
+// 1. validate person GUID
+// 2. if it is valid, find it
+// 3. if it is not valid, create a new one
+// 4. return it
+REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_person_get('%s'): creating dictionary of urls", person_guid);
+
+ REGISTRY_PERSON *p = NULL;
+
+ if(person_guid && *person_guid) {
+ char buf[GUID_LEN + 1];
+ // validate it is a GUID
+ if(unlikely(regenerate_guid(person_guid, buf) == -1))
+ info("Registry: person guid '%s' is not a valid guid. Ignoring it.", person_guid);
+ else {
+ person_guid = buf;
+ p = registry_person_find(person_guid);
+ }
+ }
+
+ if(!p) p = registry_person_allocate(NULL, when);
+
+ return p;
+}
+
+void registry_person_del(REGISTRY_PERSON *p) {
+ debug(D_REGISTRY, "Registry: registry_person_del('%s'): creating dictionary of urls", p->guid);
+
+ while(p->person_urls.root)
+ registry_person_unlink_from_url(p, (REGISTRY_PERSON_URL *)p->person_urls.root);
+
+ debug(D_REGISTRY, "Registry: deleting person '%s' from persons registry", p->guid);
+ dictionary_del(registry.persons, p->guid);
+
+ debug(D_REGISTRY, "Registry: freeing person '%s'", p->guid);
+ freez(p);
+}
+
+// ----------------------------------------------------------------------------
+// LINKING OF OBJECTS
+
+REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): searching for URL in person", p->guid, m->guid, u->url);
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, u->url);
+ if(!pu) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): not found", p->guid, m->guid, u->url);
+ pu = registry_person_url_allocate(p, m, u, name, namelen, when);
+ registry.persons_urls_count++;
+ }
+ else {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): found", p->guid, m->guid, u->url);
+ pu->usages++;
+ if(likely(pu->last_t < (uint32_t)when)) pu->last_t = (uint32_t)when;
+
+ if(pu->machine != m) {
+ REGISTRY_MACHINE_URL *mu = dictionary_get(pu->machine->machine_urls, u->url);
+ if(mu) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - expiring it from previous machine.",
+ p->guid, m->guid, u->url, pu->machine->guid);
+ mu->flags |= REGISTRY_URL_FLAGS_EXPIRED;
+ }
+ else {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - but the URL is not linked to the old machine.",
+ p->guid, m->guid, u->url, pu->machine->guid);
+ }
+
+ pu->machine->links--;
+ pu->machine = m;
+ }
+
+ if(strcmp(pu->machine_name, name) != 0) {
+ // the name of the PERSON_URL has changed !
+ pu = registry_person_url_reallocate(p, m, u, name, namelen, when, pu);
+ }
+ }
+
+ p->usages++;
+ if(likely(p->last_t < (uint32_t)when)) p->last_t = (uint32_t)when;
+
+ if(pu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): accessing an expired URL. Re-enabling URL.", p->guid, m->guid, u->url);
+ pu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
+ }
+
+ return pu;
+}
+
+void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ registry_person_url_free(p, pu);
+}
diff --git a/registry/registry_person.h b/registry/registry_person.h
new file mode 100644
index 0000000..9a4aa95
--- /dev/null
+++ b/registry/registry_person.h
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_REGISTRY_PERSON_H
+#define NETDATA_REGISTRY_PERSON_H 1
+
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// PERSON structures
+
+// for each PERSON-URL pair we keep this
+struct registry_person_url {
+ avl avl; // binary tree node
+
+ REGISTRY_URL *url; // de-duplicated URL
+ REGISTRY_MACHINE *machine; // link the MACHINE of this URL
+
+ uint8_t flags;
+
+ uint32_t first_t; // the first time we saw this
+ uint32_t last_t; // the last time we saw this
+ uint32_t usages; // how many times this has been accessed
+
+ char machine_name[1]; // the name of the machine, as known by the user
+ // dynamically allocated to fit properly
+};
+typedef struct registry_person_url REGISTRY_PERSON_URL;
+
+// A person
+struct registry_person {
+ char guid[GUID_LEN + 1]; // the person GUID
+
+ avl_tree_type person_urls; // dictionary of PERSON_URLs
+
+ uint32_t first_t; // the first time we saw this
+ uint32_t last_t; // the last time we saw this
+ uint32_t usages; // how many times this has been accessed
+
+ //uint32_t flags;
+ //char *email;
+};
+typedef struct registry_person REGISTRY_PERSON;
+
+// PERSON_URL
+extern REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url);
+extern REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) NEVERNULL WARNUNUSED;
+extern REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) WARNUNUSED;
+
+extern REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when);
+extern REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu);
+
+// PERSON
+extern REGISTRY_PERSON *registry_person_find(const char *person_guid);
+extern REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when);
+extern REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when);
+extern void registry_person_del(REGISTRY_PERSON *p);
+
+// LINKING PERSON -> PERSON_URL
+extern REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when);
+extern void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu);
+
+#endif //NETDATA_REGISTRY_PERSON_H
diff --git a/registry/registry_url.c b/registry/registry_url.c
new file mode 100644
index 0000000..9ac3ce1
--- /dev/null
+++ b/registry/registry_url.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// REGISTRY_URL
+
+int registry_url_compare(void *a, void *b) {
+ if(((REGISTRY_URL *)a)->hash < ((REGISTRY_URL *)b)->hash) return -1;
+ else if(((REGISTRY_URL *)a)->hash > ((REGISTRY_URL *)b)->hash) return 1;
+ else return strcmp(((REGISTRY_URL *)a)->url, ((REGISTRY_URL *)b)->url);
+}
+
+inline REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) {
+ return (REGISTRY_URL *)avl_insert(&(registry.registry_urls_root_index), (avl *)(u));
+}
+
+inline REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) {
+ return (REGISTRY_URL *)avl_remove(&(registry.registry_urls_root_index), (avl *)(u));
+}
+
+REGISTRY_URL *registry_url_get(const char *url, size_t urllen) {
+ // protection from too big URLs
+ if(urllen > registry.max_url_length)
+ urllen = registry.max_url_length;
+
+ debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu)", url, urllen);
+
+ char buf[sizeof(REGISTRY_URL) + urllen]; // no need for +1, 1 is already in REGISTRY_URL
+ REGISTRY_URL *n = (REGISTRY_URL *)&buf[0];
+ n->len = (uint16_t)urllen;
+ strncpyz(n->url, url, n->len);
+ n->hash = simple_hash(n->url);
+
+ REGISTRY_URL *u = (REGISTRY_URL *)avl_search(&(registry.registry_urls_root_index), (avl *)n);
+ if(!u) {
+ debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu): allocating %zu bytes", url, urllen, sizeof(REGISTRY_URL) + urllen);
+ u = callocz(1, sizeof(REGISTRY_URL) + urllen); // no need for +1, 1 is already in REGISTRY_URL
+
+ // a simple strcpy() should do the job
+ // but I prefer to be safe, since the caller specified urllen
+ u->len = (uint16_t)urllen;
+ strncpyz(u->url, url, u->len);
+ u->links = 0;
+ u->hash = simple_hash(u->url);
+
+ registry.urls_memory += sizeof(REGISTRY_URL) + urllen; // no need for +1, 1 is already in REGISTRY_URL
+
+ debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url);
+ n = registry_url_index_add(u);
+ if(n != u) {
+ error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url);
+ freez(u);
+ u = n;
+ }
+ else
+ registry.urls_count++;
+ }
+
+ return u;
+}
+
+void registry_url_link(REGISTRY_URL *u) {
+ u->links++;
+ debug(D_REGISTRY, "Registry: registry_url_link('%s'): URL has now %u links", u->url, u->links);
+}
+
+void registry_url_unlink(REGISTRY_URL *u) {
+ u->links--;
+ if(!u->links) {
+ debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url);
+ REGISTRY_URL *n = registry_url_index_del(u);
+ if(!n) {
+ error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url);
+ }
+ else {
+ if(n != u) {
+ error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url);
+ }
+
+ registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL
+ freez(n);
+ }
+ }
+ else
+ debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): URL has %u links left", u->url, u->links);
+}
diff --git a/registry/registry_url.h b/registry/registry_url.h
new file mode 100644
index 0000000..c684f1c
--- /dev/null
+++ b/registry/registry_url.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_REGISTRY_URL_H
+#define NETDATA_REGISTRY_URL_H 1
+
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// URL structures
+// Save memory by de-duplicating URLs
+// so instead of storing URLs all over the place
+// we store them here and we keep pointers elsewhere
+
+struct registry_url {
+ avl avl;
+ uint32_t hash; // the index hash
+
+ uint32_t links; // the number of links to this URL - when none is left, we free it
+
+ uint16_t len; // the length of the URL in bytes
+ char url[1]; // the URL - dynamically allocated to more size
+};
+typedef struct registry_url REGISTRY_URL;
+
+// REGISTRY_URL INDEX
+extern int registry_url_compare(void *a, void *b);
+extern REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) WARNUNUSED;
+extern REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) NEVERNULL WARNUNUSED;
+
+// REGISTRY_URL MANAGEMENT
+extern REGISTRY_URL *registry_url_get(const char *url, size_t urllen) NEVERNULL;
+extern void registry_url_link(REGISTRY_URL *u);
+extern void registry_url_unlink(REGISTRY_URL *u);
+
+#endif //NETDATA_REGISTRY_URL_H
diff --git a/spawn/Makefile.am b/spawn/Makefile.am
new file mode 100644
index 0000000..02fe3a3
--- /dev/null
+++ b/spawn/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
diff --git a/spawn/README.md b/spawn/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/spawn/README.md
diff --git a/spawn/spawn.c b/spawn/spawn.c
new file mode 100644
index 0000000..256c046
--- /dev/null
+++ b/spawn/spawn.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "spawn.h"
+#include "../database/engine/rrdenginelib.h"
+
+static uv_thread_t thread;
+int spawn_thread_error;
+int spawn_thread_shutdown;
+
+struct spawn_queue spawn_cmd_queue;
+
+static struct spawn_cmd_info *create_spawn_cmd(char *command_to_run)
+{
+ struct spawn_cmd_info *cmdinfo;
+
+ cmdinfo = mallocz(sizeof(*cmdinfo));
+ fatal_assert(0 == uv_cond_init(&cmdinfo->cond));
+ fatal_assert(0 == uv_mutex_init(&cmdinfo->mutex));
+ cmdinfo->serial = 0; /* invalid */
+ cmdinfo->command_to_run = strdupz(command_to_run);
+ cmdinfo->exit_status = -1; /* invalid */
+ cmdinfo->pid = -1; /* invalid */
+ cmdinfo->flags = 0;
+
+ return cmdinfo;
+}
+
+void destroy_spawn_cmd(struct spawn_cmd_info *cmdinfo)
+{
+ uv_cond_destroy(&cmdinfo->cond);
+ uv_mutex_destroy(&cmdinfo->mutex);
+
+ freez(cmdinfo->command_to_run);
+ freez(cmdinfo);
+}
+
+int spawn_cmd_compare(void *a, void *b)
+{
+ struct spawn_cmd_info *cmda = a, *cmdb = b;
+
+ /* No need for mutex, serial will never change and the entries cannot be deallocated yet */
+ if (cmda->serial < cmdb->serial) return -1;
+ if (cmda->serial > cmdb->serial) return 1;
+
+ return 0;
+}
+
+static void init_spawn_cmd_queue(void)
+{
+ spawn_cmd_queue.cmd_tree.root = NULL;
+ spawn_cmd_queue.cmd_tree.compar = spawn_cmd_compare;
+ spawn_cmd_queue.size = 0;
+ spawn_cmd_queue.latest_serial = 0;
+ fatal_assert(0 == uv_cond_init(&spawn_cmd_queue.cond));
+ fatal_assert(0 == uv_mutex_init(&spawn_cmd_queue.mutex));
+}
+
+/*
+ * Returns serial number of the enqueued command
+ */
+uint64_t spawn_enq_cmd(char *command_to_run)
+{
+ unsigned queue_size;
+ uint64_t serial;
+ avl *avl_ret;
+ struct spawn_cmd_info *cmdinfo;
+
+ cmdinfo = create_spawn_cmd(command_to_run);
+
+ /* wait for free space in queue */
+ uv_mutex_lock(&spawn_cmd_queue.mutex);
+ while ((queue_size = spawn_cmd_queue.size) == SPAWN_MAX_OUTSTANDING) {
+ uv_cond_wait(&spawn_cmd_queue.cond, &spawn_cmd_queue.mutex);
+ }
+ fatal_assert(queue_size < SPAWN_MAX_OUTSTANDING);
+ spawn_cmd_queue.size = queue_size + 1;
+
+ serial = ++spawn_cmd_queue.latest_serial; /* 0 is invalid */
+ cmdinfo->serial = serial; /* No need to take the cmd mutex since it is unreachable at the moment */
+
+ /* enqueue command */
+ avl_ret = avl_insert(&spawn_cmd_queue.cmd_tree, (avl *)cmdinfo);
+ fatal_assert(avl_ret == (avl *)cmdinfo);
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&spawn_async));
+ return serial;
+}
+
+/*
+ * Blocks until command with serial finishes running. Only one thread is allowed to wait per command.
+ */
+void spawn_wait_cmd(uint64_t serial, int *exit_status, time_t *exec_run_timestamp)
+{
+ avl *avl_ret;
+ struct spawn_cmd_info tmp, *cmdinfo;
+
+ tmp.serial = serial;
+
+ uv_mutex_lock(&spawn_cmd_queue.mutex);
+ avl_ret = avl_search(&spawn_cmd_queue.cmd_tree, (avl *)&tmp);
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+
+ fatal_assert(avl_ret); /* Could be NULL if more than 1 threads wait for the command */
+ cmdinfo = (struct spawn_cmd_info *)avl_ret;
+
+ uv_mutex_lock(&cmdinfo->mutex);
+ while (!(cmdinfo->flags & SPAWN_CMD_DONE)) {
+ /* Only 1 thread is allowed to wait for this command to finish */
+ uv_cond_wait(&cmdinfo->cond, &cmdinfo->mutex);
+ }
+ uv_mutex_unlock(&cmdinfo->mutex);
+
+ spawn_deq_cmd(cmdinfo);
+ *exit_status = cmdinfo->exit_status;
+ *exec_run_timestamp = cmdinfo->exec_run_timestamp;
+
+ destroy_spawn_cmd(cmdinfo);
+}
+
+void spawn_deq_cmd(struct spawn_cmd_info *cmdinfo)
+{
+ unsigned queue_size;
+ avl *avl_ret;
+
+ uv_mutex_lock(&spawn_cmd_queue.mutex);
+ queue_size = spawn_cmd_queue.size;
+ fatal_assert(queue_size);
+ /* dequeue command */
+ avl_ret = avl_remove(&spawn_cmd_queue.cmd_tree, (avl *)cmdinfo);
+ fatal_assert(avl_ret);
+
+ spawn_cmd_queue.size = queue_size - 1;
+
+ /* wake up callers */
+ uv_cond_signal(&spawn_cmd_queue.cond);
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+}
+
+/*
+ * Must be called from the spawn client event loop context. This way no mutex is needed because the event loop is the
+ * only writer as far as struct spawn_cmd_info entries are concerned.
+ */
+static int find_unprocessed_spawn_cmd_cb(void *entry, void *data)
+{
+ struct spawn_cmd_info **cmdinfop = data, *cmdinfo = entry;
+
+ if (!(cmdinfo->flags & SPAWN_CMD_PROCESSED)) {
+ *cmdinfop = cmdinfo;
+ return -1; /* break tree traversal */
+ }
+ return 0; /* continue traversing */
+}
+
+struct spawn_cmd_info *spawn_get_unprocessed_cmd(void)
+{
+ struct spawn_cmd_info *cmdinfo;
+ unsigned queue_size;
+ int ret;
+
+ uv_mutex_lock(&spawn_cmd_queue.mutex);
+ queue_size = spawn_cmd_queue.size;
+ if (queue_size == 0) {
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+ return NULL;
+ }
+ /* find command */
+ cmdinfo = NULL;
+ ret = avl_traverse(&spawn_cmd_queue.cmd_tree, find_unprocessed_spawn_cmd_cb, (void *)&cmdinfo);
+ if (-1 != ret) { /* no commands available for processing */
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+ return NULL;
+ }
+ uv_mutex_unlock(&spawn_cmd_queue.mutex);
+
+ return cmdinfo;
+}
+
+/**
+ * This function spawns a process that shares a libuv IPC pipe with the caller and performs spawn server duties.
+ * The spawn server process will close all open file descriptors except for the pipe, UV_STDOUT_FD, and UV_STDERR_FD.
+ * The caller has to be the netdata user as configured.
+ *
+ * @param loop the libuv loop of the caller context
+ * @param spawn_channel the birectional libuv IPC pipe that the server and the caller will share
+ * @param process the spawn server libuv process context
+ * @return 0 on success or the libuv error code
+ */
+int create_spawn_server(uv_loop_t *loop, uv_pipe_t *spawn_channel, uv_process_t *process)
+{
+ uv_process_options_t options = {0};
+ char *args[3];
+ int ret;
+#define SPAWN_SERVER_DESCRIPTORS (3)
+ uv_stdio_container_t stdio[SPAWN_SERVER_DESCRIPTORS];
+ struct passwd *passwd = NULL;
+ char *user = NULL;
+
+ passwd = getpwuid(getuid());
+ user = (passwd && passwd->pw_name) ? passwd->pw_name : "";
+
+ args[0] = exepath;
+ args[1] = SPAWN_SERVER_COMMAND_LINE_ARGUMENT;
+ args[2] = NULL;
+
+ memset(&options, 0, sizeof(options));
+ options.file = exepath;
+ options.args = args;
+ options.exit_cb = NULL; //exit_cb;
+ options.stdio = stdio;
+ options.stdio_count = SPAWN_SERVER_DESCRIPTORS;
+
+ stdio[0].flags = UV_CREATE_PIPE | UV_READABLE_PIPE | UV_WRITABLE_PIPE;
+ stdio[0].data.stream = (uv_stream_t *)spawn_channel; /* bidirectional libuv pipe */
+ stdio[1].flags = UV_INHERIT_FD;
+ stdio[1].data.fd = 1 /* UV_STDOUT_FD */;
+ stdio[2].flags = UV_INHERIT_FD;
+ stdio[2].data.fd = 2 /* UV_STDERR_FD */;
+
+ ret = uv_spawn(loop, process, &options); /* execute the netdata binary again as the netdata user */
+ if (0 != ret) {
+ error("uv_spawn (process: \"%s\") (user: %s) failed (%s).", exepath, user, uv_strerror(ret));
+ fatal("Cannot start netdata without the spawn server.");
+ }
+
+ return ret;
+}
+
+#define CONCURRENT_SPAWNS 16
+#define SPAWN_ITERATIONS 10000
+#undef CONCURRENT_STRESS_TEST
+
+void spawn_init(void)
+{
+ struct completion completion;
+ int error;
+
+ info("Initializing spawn client.");
+
+ init_spawn_cmd_queue();
+
+ init_completion(&completion);
+ error = uv_thread_create(&thread, spawn_client, &completion);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ goto after_error;
+ }
+ /* wait for spawn client thread to initialize */
+ wait_for_completion(&completion);
+ destroy_completion(&completion);
+ uv_thread_set_name_np(thread, "DAEMON_SPAWN");
+
+ if (spawn_thread_error) {
+ error = uv_thread_join(&thread);
+ if (error) {
+ error("uv_thread_create(): %s", uv_strerror(error));
+ }
+ goto after_error;
+ }
+#ifdef CONCURRENT_STRESS_TEST
+ signals_reset();
+ signals_unblock();
+
+ sleep(60);
+ uint64_t serial[CONCURRENT_SPAWNS];
+ for (int j = 0 ; j < SPAWN_ITERATIONS ; ++j) {
+ for (int i = 0; i < CONCURRENT_SPAWNS; ++i) {
+ char cmd[64];
+ sprintf(cmd, "echo CONCURRENT_STRESS_TEST %d 1>&2", j * CONCURRENT_SPAWNS + i + 1);
+ serial[i] = spawn_enq_cmd(cmd);
+ info("Queued command %s for spawning.", cmd);
+ }
+ int exit_status;
+ time_t exec_run_timestamp;
+ for (int i = 0; i < CONCURRENT_SPAWNS; ++i) {
+ info("Started waiting for serial %llu exit status %d run timestamp %llu.", serial[i], exit_status,
+ exec_run_timestamp);
+ spawn_wait_cmd(serial[i], &exit_status, &exec_run_timestamp);
+ info("Finished waiting for serial %llu exit status %d run timestamp %llu.", serial[i], exit_status,
+ exec_run_timestamp);
+ }
+ }
+ exit(0);
+#endif
+ return;
+
+ after_error:
+ error("Failed to initialize spawn service. The alarms notifications will not be spawned.");
+}
diff --git a/spawn/spawn.h b/spawn/spawn.h
new file mode 100644
index 0000000..34b2632
--- /dev/null
+++ b/spawn/spawn.h
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SPAWN_H
+#define NETDATA_SPAWN_H 1
+
+#include "../daemon/common.h"
+
+#define SPAWN_SERVER_COMMAND_LINE_ARGUMENT "--special-spawn-server"
+
+typedef enum spawn_protocol {
+ SPAWN_PROT_EXEC_CMD = 0,
+ SPAWN_PROT_SPAWN_RESULT,
+ SPAWN_PROT_CMD_EXIT_STATUS
+} spawn_prot_t;
+
+struct spawn_prot_exec_cmd {
+ uint16_t command_length;
+ char command_to_run[];
+};
+
+struct spawn_prot_spawn_result {
+ pid_t exec_pid; /* 0 if failed to spawn */
+ time_t exec_run_timestamp; /* time of successfully spawning the command */
+};
+
+struct spawn_prot_cmd_exit_status {
+ int exec_exit_status;
+};
+
+struct spawn_prot_header {
+ spawn_prot_t opcode;
+ void *handle;
+};
+
+#undef SPAWN_DEBUG /* define to enable debug prints */
+
+#define SPAWN_MAX_OUTSTANDING (32768)
+
+#define SPAWN_CMD_PROCESSED 0x00000001
+#define SPAWN_CMD_IN_PROGRESS 0x00000002
+#define SPAWN_CMD_FAILED_TO_SPAWN 0x00000004
+#define SPAWN_CMD_DONE 0x00000008
+
+struct spawn_cmd_info {
+ avl avl;
+
+ /* concurrency control per command */
+ uv_mutex_t mutex;
+ uv_cond_t cond; /* users block here until command has finished */
+
+ uint64_t serial;
+ char *command_to_run;
+ int exit_status;
+ pid_t pid;
+ unsigned long flags;
+ time_t exec_run_timestamp; /* time of successfully spawning the command */
+};
+
+/* spawn command queue */
+struct spawn_queue {
+ avl_tree_type cmd_tree;
+
+ /* concurrency control of command queue */
+ uv_mutex_t mutex;
+ uv_cond_t cond;
+
+ volatile unsigned size;
+ uint64_t latest_serial;
+};
+
+struct write_context {
+ uv_write_t write_req;
+ struct spawn_prot_header header;
+ struct spawn_prot_cmd_exit_status exit_status;
+ struct spawn_prot_spawn_result spawn_result;
+ struct spawn_prot_exec_cmd payload;
+};
+
+extern int spawn_thread_error;
+extern int spawn_thread_shutdown;
+extern uv_async_t spawn_async;
+
+void spawn_init(void);
+void spawn_server(void);
+void spawn_client(void *arg);
+void destroy_spawn_cmd(struct spawn_cmd_info *cmdinfo);
+uint64_t spawn_enq_cmd(char *command_to_run);
+void spawn_wait_cmd(uint64_t serial, int *exit_status, time_t *exec_run_timestamp);
+void spawn_deq_cmd(struct spawn_cmd_info *cmdinfo);
+struct spawn_cmd_info *spawn_get_unprocessed_cmd(void);
+int create_spawn_server(uv_loop_t *loop, uv_pipe_t *spawn_channel, uv_process_t *process);
+
+/*
+ * Copies from the source buffer to the protocol buffer. It advances the source buffer by the amount copied. It
+ * subtracts the amount copied from the source length.
+ */
+static inline void copy_to_prot_buffer(char *prot_buffer, unsigned *prot_buffer_len, unsigned max_to_copy,
+ char **source, unsigned *source_len)
+{
+ unsigned to_copy;
+
+ to_copy = MIN(max_to_copy, *source_len);
+ memcpy(prot_buffer + *prot_buffer_len, *source, to_copy);
+ *prot_buffer_len += to_copy;
+ *source += to_copy;
+ *source_len -= to_copy;
+}
+
+#endif //NETDATA_SPAWN_H
diff --git a/spawn/spawn_client.c b/spawn/spawn_client.c
new file mode 100644
index 0000000..83dc3c8
--- /dev/null
+++ b/spawn/spawn_client.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "spawn.h"
+#include "../database/engine/rrdenginelib.h"
+
+static uv_process_t process;
+static uv_pipe_t spawn_channel;
+static uv_loop_t *loop;
+uv_async_t spawn_async;
+
+static char prot_buffer[MAX_COMMAND_LENGTH];
+static unsigned prot_buffer_len = 0;
+
+static void async_cb(uv_async_t *handle)
+{
+ uv_stop(handle->loop);
+}
+
+static void after_pipe_write(uv_write_t* req, int status)
+{
+ (void)status;
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s called status=%d", __func__, status);
+#endif
+ freez(req->data);
+}
+
+static void client_parse_spawn_protocol(unsigned source_len, char *source)
+{
+ unsigned required_len;
+ struct spawn_prot_header *header;
+ struct spawn_prot_spawn_result *spawn_result;
+ struct spawn_prot_cmd_exit_status *exit_status;
+ struct spawn_cmd_info *cmdinfo;
+
+ while (source_len) {
+ required_len = sizeof(*header);
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ header = (struct spawn_prot_header *)prot_buffer;
+ cmdinfo = (struct spawn_cmd_info *)header->handle;
+ fatal_assert(NULL != cmdinfo);
+
+ switch(header->opcode) {
+ case SPAWN_PROT_SPAWN_RESULT:
+ required_len += sizeof(*spawn_result);
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ spawn_result = (struct spawn_prot_spawn_result *)(header + 1);
+ uv_mutex_lock(&cmdinfo->mutex);
+ cmdinfo->pid = spawn_result->exec_pid;
+ if (0 == cmdinfo->pid) { /* Failed to spawn */
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s SPAWN_PROT_SPAWN_RESULT failed to spawn.", __func__);
+#endif
+ cmdinfo->flags |= SPAWN_CMD_FAILED_TO_SPAWN | SPAWN_CMD_DONE;
+ uv_cond_signal(&cmdinfo->cond);
+ } else {
+ cmdinfo->exec_run_timestamp = spawn_result->exec_run_timestamp;
+ cmdinfo->flags |= SPAWN_CMD_IN_PROGRESS;
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s SPAWN_PROT_SPAWN_RESULT in progress.", __func__);
+#endif
+ }
+ uv_mutex_unlock(&cmdinfo->mutex);
+ prot_buffer_len = 0;
+ break;
+ case SPAWN_PROT_CMD_EXIT_STATUS:
+ required_len += sizeof(*exit_status);
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ exit_status = (struct spawn_prot_cmd_exit_status *)(header + 1);
+ uv_mutex_lock(&cmdinfo->mutex);
+ cmdinfo->exit_status = exit_status->exec_exit_status;
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s SPAWN_PROT_CMD_EXIT_STATUS %d.", __func__, exit_status->exec_exit_status);
+#endif
+ cmdinfo->flags |= SPAWN_CMD_DONE;
+ uv_cond_signal(&cmdinfo->cond);
+ uv_mutex_unlock(&cmdinfo->mutex);
+ prot_buffer_len = 0;
+ break;
+ default:
+ fatal_assert(0);
+ break;
+ }
+
+ }
+}
+
+static void on_pipe_read(uv_stream_t* pipe, ssize_t nread, const uv_buf_t* buf)
+{
+ if (0 == nread) {
+ info("%s: Zero bytes read from spawn pipe.", __func__);
+ } else if (UV_EOF == nread) {
+ info("EOF found in spawn pipe.");
+ } else if (nread < 0) {
+ error("%s: %s", __func__, uv_strerror(nread));
+ }
+
+ if (nread < 0) { /* stop stream due to EOF or error */
+ (void)uv_read_stop((uv_stream_t *)pipe);
+ } else if (nread) {
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s read %u", __func__, (unsigned)nread);
+#endif
+ client_parse_spawn_protocol(nread, buf->base);
+ }
+ if (buf && buf->len) {
+ freez(buf->base);
+ }
+
+ if (nread < 0) {
+ uv_close((uv_handle_t *)pipe, NULL);
+ }
+}
+
+static void on_read_alloc(uv_handle_t* handle,
+ size_t suggested_size,
+ uv_buf_t* buf)
+{
+ (void)handle;
+ buf->base = mallocz(suggested_size);
+ buf->len = suggested_size;
+}
+
+static void spawn_process_cmd(struct spawn_cmd_info *cmdinfo)
+{
+ int ret;
+ uv_buf_t writebuf[3];
+ struct write_context *write_ctx;
+
+ write_ctx = mallocz(sizeof(*write_ctx));
+ write_ctx->write_req.data = write_ctx;
+
+ uv_mutex_lock(&cmdinfo->mutex);
+ cmdinfo->flags |= SPAWN_CMD_PROCESSED;
+ uv_mutex_unlock(&cmdinfo->mutex);
+
+ write_ctx->header.opcode = SPAWN_PROT_EXEC_CMD;
+ write_ctx->header.handle = cmdinfo;
+ write_ctx->payload.command_length = strlen(cmdinfo->command_to_run);
+
+ writebuf[0] = uv_buf_init((char *)&write_ctx->header, sizeof(write_ctx->header));
+ writebuf[1] = uv_buf_init((char *)&write_ctx->payload, sizeof(write_ctx->payload));
+ writebuf[2] = uv_buf_init((char *)cmdinfo->command_to_run, write_ctx->payload.command_length);
+
+#ifdef SPAWN_DEBUG
+ info("CLIENT %s SPAWN_PROT_EXEC_CMD %u", __func__, (unsigned)cmdinfo->serial);
+#endif
+ ret = uv_write(&write_ctx->write_req, (uv_stream_t *)&spawn_channel, writebuf, 3, after_pipe_write);
+ fatal_assert(ret == 0);
+}
+
+void spawn_client(void *arg)
+{
+ int ret;
+ struct completion *completion = (struct completion *)arg;
+
+ loop = mallocz(sizeof(uv_loop_t));
+ ret = uv_loop_init(loop);
+ if (ret) {
+ error("uv_loop_init(): %s", uv_strerror(ret));
+ spawn_thread_error = ret;
+ goto error_after_loop_init;
+ }
+ loop->data = NULL;
+
+ spawn_async.data = NULL;
+ ret = uv_async_init(loop, &spawn_async, async_cb);
+ if (ret) {
+ error("uv_async_init(): %s", uv_strerror(ret));
+ spawn_thread_error = ret;
+ goto error_after_async_init;
+ }
+
+ ret = uv_pipe_init(loop, &spawn_channel, 1);
+ if (ret) {
+ error("uv_pipe_init(): %s", uv_strerror(ret));
+ spawn_thread_error = ret;
+ goto error_after_pipe_init;
+ }
+ fatal_assert(spawn_channel.ipc);
+
+ ret = create_spawn_server(loop, &spawn_channel, &process);
+ if (ret) {
+ error("Failed to fork spawn server process.");
+ spawn_thread_error = ret;
+ goto error_after_spawn_server;
+ }
+
+ spawn_thread_error = 0;
+ spawn_thread_shutdown = 0;
+ /* wake up initialization thread */
+ complete(completion);
+
+ prot_buffer_len = 0;
+ ret = uv_read_start((uv_stream_t *)&spawn_channel, on_read_alloc, on_pipe_read);
+ fatal_assert(ret == 0);
+
+ while (spawn_thread_shutdown == 0) {
+ struct spawn_cmd_info *cmdinfo;
+
+ uv_run(loop, UV_RUN_DEFAULT);
+ while (NULL != (cmdinfo = spawn_get_unprocessed_cmd())) {
+ spawn_process_cmd(cmdinfo);
+ }
+ }
+ /* cleanup operations of the event loop */
+ info("Shutting down spawn client event loop.");
+ uv_close((uv_handle_t *)&spawn_channel, NULL);
+ uv_close((uv_handle_t *)&spawn_async, NULL);
+ uv_run(loop, UV_RUN_DEFAULT); /* flush all libuv handles */
+
+ info("Shutting down spawn client loop complete.");
+ fatal_assert(0 == uv_loop_close(loop));
+
+ return;
+
+error_after_spawn_server:
+ uv_close((uv_handle_t *)&spawn_channel, NULL);
+error_after_pipe_init:
+ uv_close((uv_handle_t *)&spawn_async, NULL);
+error_after_async_init:
+ uv_run(loop, UV_RUN_DEFAULT); /* flush all libuv handles */
+ fatal_assert(0 == uv_loop_close(loop));
+error_after_loop_init:
+ freez(loop);
+
+ /* wake up initialization thread */
+ complete(completion);
+}
diff --git a/spawn/spawn_server.c b/spawn/spawn_server.c
new file mode 100644
index 0000000..f84fab1
--- /dev/null
+++ b/spawn/spawn_server.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "spawn.h"
+
+static uv_loop_t *loop;
+static uv_pipe_t server_pipe;
+
+static int server_shutdown = 0;
+
+static uv_thread_t thread;
+
+/* spawn outstanding execution structure */
+static avl_tree_lock spawn_outstanding_exec_tree;
+
+static char prot_buffer[MAX_COMMAND_LENGTH];
+static unsigned prot_buffer_len = 0;
+
+struct spawn_execution_info {
+ avl avl;
+
+ void *handle;
+ int exit_status;
+ pid_t pid;
+ struct spawn_execution_info *next;
+};
+
+int spawn_exec_compare(void *a, void *b)
+{
+ struct spawn_execution_info *spwna = a, *spwnb = b;
+
+ if (spwna->pid < spwnb->pid) return -1;
+ if (spwna->pid > spwnb->pid) return 1;
+
+ return 0;
+}
+
+/* wake up waiter thread to reap the spawned processes */
+static uv_mutex_t wait_children_mutex;
+static uv_cond_t wait_children_cond;
+static uint8_t spawned_processes;
+static struct spawn_execution_info *child_waited_list;
+static uv_async_t child_waited_async;
+
+static inline struct spawn_execution_info *dequeue_child_waited_list(void)
+{
+ struct spawn_execution_info *exec_info;
+
+ uv_mutex_lock(&wait_children_mutex);
+ if (NULL == child_waited_list) {
+ exec_info = NULL;
+ } else {
+ exec_info = child_waited_list;
+ child_waited_list = exec_info->next;
+ }
+ uv_mutex_unlock(&wait_children_mutex);
+
+ return exec_info;
+}
+
+static inline void enqueue_child_waited_list(struct spawn_execution_info *exec_info)
+{
+ uv_mutex_lock(&wait_children_mutex);
+ exec_info->next = child_waited_list;
+ child_waited_list = exec_info;
+ uv_mutex_unlock(&wait_children_mutex);
+}
+
+static void after_pipe_write(uv_write_t *req, int status)
+{
+ (void)status;
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SERVER %s called status=%d\n", __func__, status);
+#endif
+ freez(req->data);
+}
+
+static void child_waited_async_cb(uv_async_t *async_handle)
+{
+ uv_buf_t writebuf[2];
+ int ret;
+ struct spawn_execution_info *exec_info;
+ struct write_context *write_ctx;
+
+ (void)async_handle;
+ while (NULL != (exec_info = dequeue_child_waited_list())) {
+ write_ctx = mallocz(sizeof(*write_ctx));
+ write_ctx->write_req.data = write_ctx;
+
+
+ write_ctx->header.opcode = SPAWN_PROT_CMD_EXIT_STATUS;
+ write_ctx->header.handle = exec_info->handle;
+ write_ctx->exit_status.exec_exit_status = exec_info->exit_status;
+ writebuf[0] = uv_buf_init((char *) &write_ctx->header, sizeof(write_ctx->header));
+ writebuf[1] = uv_buf_init((char *) &write_ctx->exit_status, sizeof(write_ctx->exit_status));
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SERVER %s SPAWN_PROT_CMD_EXIT_STATUS\n", __func__);
+#endif
+ ret = uv_write(&write_ctx->write_req, (uv_stream_t *) &server_pipe, writebuf, 2, after_pipe_write);
+ fatal_assert(ret == 0);
+
+ freez(exec_info);
+ }
+}
+
+static void wait_children(void *arg)
+{
+ siginfo_t i;
+ struct spawn_execution_info tmp, *exec_info;
+ avl *ret_avl;
+
+ (void)arg;
+ while (!server_shutdown) {
+ uv_mutex_lock(&wait_children_mutex);
+ while (!spawned_processes) {
+ uv_cond_wait(&wait_children_cond, &wait_children_mutex);
+ }
+ spawned_processes = 0;
+ uv_mutex_unlock(&wait_children_mutex);
+
+ while (!server_shutdown) {
+ i.si_pid = 0;
+ if (waitid(P_ALL, (id_t) 0, &i, WEXITED) == -1) {
+ if (errno != ECHILD)
+ fprintf(stderr, "SPAWN: Failed to wait: %s\n", strerror(errno));
+ break;
+ }
+ if (i.si_pid == 0) {
+ fprintf(stderr, "SPAWN: No child exited.\n");
+ break;
+ }
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SPAWN: Successfully waited for pid:%d.\n", (int) i.si_pid);
+#endif
+ fatal_assert(CLD_EXITED == i.si_code);
+ tmp.pid = (pid_t)i.si_pid;
+ while (NULL == (ret_avl = avl_remove_lock(&spawn_outstanding_exec_tree, (avl *)&tmp))) {
+ fprintf(stderr,
+ "SPAWN: race condition detected, waiting for child process %d to be indexed.\n",
+ (int)tmp.pid);
+ (void)sleep_usec(10000); /* 10 msec */
+ }
+ exec_info = (struct spawn_execution_info *)ret_avl;
+ exec_info->exit_status = i.si_status;
+ enqueue_child_waited_list(exec_info);
+
+ /* wake up event loop */
+ fatal_assert(0 == uv_async_send(&child_waited_async));
+ }
+ }
+}
+
+void spawn_protocol_execute_command(void *handle, char *command_to_run, uint16_t command_length)
+{
+ uv_buf_t writebuf[2];
+ int ret;
+ avl *avl_ret;
+ struct spawn_execution_info *exec_info;
+ struct write_context *write_ctx;
+
+ write_ctx = mallocz(sizeof(*write_ctx));
+ write_ctx->write_req.data = write_ctx;
+
+ command_to_run[command_length] = '\0';
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SPAWN: executing command '%s'\n", command_to_run);
+#endif
+ if (netdata_spawn(command_to_run, &write_ctx->spawn_result.exec_pid)) {
+ fprintf(stderr, "SPAWN: Cannot spawn(\"%s\", \"r\").\n", command_to_run);
+ write_ctx->spawn_result.exec_pid = 0;
+ } else { /* successfully spawned command */
+ write_ctx->spawn_result.exec_run_timestamp = now_realtime_sec();
+
+ /* record it for when the process finishes execution */
+ exec_info = mallocz(sizeof(*exec_info));
+ exec_info->handle = handle;
+ exec_info->pid = write_ctx->spawn_result.exec_pid;
+ avl_ret = avl_insert_lock(&spawn_outstanding_exec_tree, (avl *)exec_info);
+ fatal_assert(avl_ret == (avl *)exec_info);
+
+ /* wake up the thread that blocks waiting for processes to exit */
+ uv_mutex_lock(&wait_children_mutex);
+ spawned_processes = 1;
+ uv_cond_signal(&wait_children_cond);
+ uv_mutex_unlock(&wait_children_mutex);
+ }
+
+ write_ctx->header.opcode = SPAWN_PROT_SPAWN_RESULT;
+ write_ctx->header.handle = handle;
+ writebuf[0] = uv_buf_init((char *)&write_ctx->header, sizeof(write_ctx->header));
+ writebuf[1] = uv_buf_init((char *)&write_ctx->spawn_result, sizeof(write_ctx->spawn_result));
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SERVER %s SPAWN_PROT_SPAWN_RESULT\n", __func__);
+#endif
+ ret = uv_write(&write_ctx->write_req, (uv_stream_t *)&server_pipe, writebuf, 2, after_pipe_write);
+ fatal_assert(ret == 0);
+}
+
+static void server_parse_spawn_protocol(unsigned source_len, char *source)
+{
+ unsigned required_len;
+ struct spawn_prot_header *header;
+ struct spawn_prot_exec_cmd *payload;
+ uint16_t command_length;
+
+ while (source_len) {
+ required_len = sizeof(*header);
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ header = (struct spawn_prot_header *)prot_buffer;
+ fatal_assert(SPAWN_PROT_EXEC_CMD == header->opcode);
+ fatal_assert(NULL != header->handle);
+
+ required_len += sizeof(*payload);
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ payload = (struct spawn_prot_exec_cmd *)(header + 1);
+ command_length = payload->command_length;
+
+ required_len += command_length;
+ if (unlikely(required_len > MAX_COMMAND_LENGTH - 1)) {
+ fprintf(stderr, "SPAWN: Ran out of protocol buffer space.\n");
+ command_length = (MAX_COMMAND_LENGTH - 1) - (sizeof(*header) + sizeof(*payload));
+ required_len = MAX_COMMAND_LENGTH - 1;
+ }
+ if (prot_buffer_len < required_len)
+ copy_to_prot_buffer(prot_buffer, &prot_buffer_len, required_len - prot_buffer_len, &source, &source_len);
+ if (prot_buffer_len < required_len)
+ return; /* Source buffer ran out */
+
+ spawn_protocol_execute_command(header->handle, payload->command_to_run, command_length);
+ prot_buffer_len = 0;
+ }
+}
+
+static void on_pipe_read(uv_stream_t *pipe, ssize_t nread, const uv_buf_t *buf)
+{
+ if (0 == nread) {
+ fprintf(stderr, "SERVER %s: Zero bytes read from spawn pipe.\n", __func__);
+ } else if (UV_EOF == nread) {
+ fprintf(stderr, "EOF found in spawn pipe.\n");
+ } else if (nread < 0) {
+ fprintf(stderr, "%s: %s\n", __func__, uv_strerror(nread));
+ }
+
+ if (nread < 0) { /* stop spawn server due to EOF or error */
+ int error;
+
+ uv_mutex_lock(&wait_children_mutex);
+ server_shutdown = 1;
+ spawned_processes = 1;
+ uv_cond_signal(&wait_children_cond);
+ uv_mutex_unlock(&wait_children_mutex);
+
+ fprintf(stderr, "Shutting down spawn server event loop.\n");
+ /* cleanup operations of the event loop */
+ (void)uv_read_stop((uv_stream_t *) pipe);
+ uv_close((uv_handle_t *)&server_pipe, NULL);
+
+ error = uv_thread_join(&thread);
+ if (error) {
+ fprintf(stderr, "uv_thread_create(): %s", uv_strerror(error));
+ }
+ /* After joining it is safe to destroy child_waited_async */
+ uv_close((uv_handle_t *)&child_waited_async, NULL);
+ } else if (nread) {
+#ifdef SPAWN_DEBUG
+ fprintf(stderr, "SERVER %s nread %u\n", __func__, (unsigned)nread);
+#endif
+ server_parse_spawn_protocol(nread, buf->base);
+ }
+ if (buf && buf->len) {
+ freez(buf->base);
+ }
+}
+
+static void on_read_alloc(uv_handle_t *handle,
+ size_t suggested_size,
+ uv_buf_t* buf)
+{
+ (void)handle;
+ buf->base = mallocz(suggested_size);
+ buf->len = suggested_size;
+}
+
+static void ignore_signal_handler(int signo) {
+ /*
+ * By having a signal handler we allow spawned processes to reset default signal dispositions. Setting SIG_IGN
+ * would be inherited by the spawned children which is not desirable.
+ */
+ (void)signo;
+}
+
+void spawn_server(void)
+{
+ int error;
+
+ test_clock_boottime();
+ test_clock_monotonic_coarse();
+
+ // close all open file descriptors, except the standard ones
+ // the caller may have left open files (lxc-attach has this issue)
+ int fd;
+ for(fd = (int)(sysconf(_SC_OPEN_MAX) - 1) ; fd > 2 ; --fd)
+ if(fd_is_valid(fd))
+ close(fd);
+
+ // Have the libuv IPC pipe be closed when forking child processes
+ (void) fcntl(0, F_SETFD, FD_CLOEXEC);
+ fprintf(stderr, "Spawn server is up.\n");
+
+ // Define signals we want to ignore
+ struct sigaction sa;
+ int signals_to_ignore[] = {SIGPIPE, SIGINT, SIGQUIT, SIGTERM, SIGHUP, SIGUSR1, SIGUSR2, SIGBUS, SIGCHLD};
+ unsigned ignore_length = sizeof(signals_to_ignore) / sizeof(signals_to_ignore[0]);
+
+ unsigned i;
+ for (i = 0; i < ignore_length ; ++i) {
+ sa.sa_flags = 0;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_handler = ignore_signal_handler;
+ if(sigaction(signals_to_ignore[i], &sa, NULL) == -1)
+ fprintf(stderr, "SPAWN: Failed to change signal handler for signal: %d.\n", signals_to_ignore[i]);
+ }
+
+ signals_unblock();
+
+ loop = uv_default_loop();
+ loop->data = NULL;
+
+ error = uv_pipe_init(loop, &server_pipe, 1);
+ if (error) {
+ fprintf(stderr, "uv_pipe_init(): %s\n", uv_strerror(error));
+ exit(error);
+ }
+ fatal_assert(server_pipe.ipc);
+
+ error = uv_pipe_open(&server_pipe, 0 /* UV_STDIN_FD */);
+ if (error) {
+ fprintf(stderr, "uv_pipe_open(): %s\n", uv_strerror(error));
+ exit(error);
+ }
+ avl_init_lock(&spawn_outstanding_exec_tree, spawn_exec_compare);
+
+ spawned_processes = 0;
+ fatal_assert(0 == uv_cond_init(&wait_children_cond));
+ fatal_assert(0 == uv_mutex_init(&wait_children_mutex));
+ child_waited_list = NULL;
+ error = uv_async_init(loop, &child_waited_async, child_waited_async_cb);
+ if (error) {
+ fprintf(stderr, "uv_async_init(): %s\n", uv_strerror(error));
+ exit(error);
+ }
+
+ error = uv_thread_create(&thread, wait_children, NULL);
+ if (error) {
+ fprintf(stderr, "uv_thread_create(): %s\n", uv_strerror(error));
+ exit(error);
+ }
+
+ prot_buffer_len = 0;
+ error = uv_read_start((uv_stream_t *)&server_pipe, on_read_alloc, on_pipe_read);
+ fatal_assert(error == 0);
+
+ while (!server_shutdown) {
+ uv_run(loop, UV_RUN_DEFAULT);
+ }
+ fprintf(stderr, "Shutting down spawn server loop complete.\n");
+ fatal_assert(0 == uv_loop_close(loop));
+
+ exit(0);
+}
diff --git a/streaming/Makefile.am b/streaming/Makefile.am
new file mode 100644
index 0000000..95c31ca
--- /dev/null
+++ b/streaming/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_libconfig_DATA = \
+ stream.conf \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/streaming/README.md b/streaming/README.md
new file mode 100644
index 0000000..94ab1f2
--- /dev/null
+++ b/streaming/README.md
@@ -0,0 +1,624 @@
+<!--
+title: "Streaming and replication"
+description: "Replicate and mirror Netdata's metrics through real-time streaming from child to parent nodes. Then combine, correlate, and export."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/streaming/README.md
+-->
+
+# Streaming and replication
+
+Each Netdata is able to replicate/mirror its database to another Netdata, by streaming collected
+metrics, in real-time to it. This is quite different to [data archiving to third party time-series
+databases](/exporting/README.md).
+
+When Netdata streams metrics to another Netdata, the receiving one is able to perform everything a Netdata instance is
+capable of:
+
+- Visualize metrics with a dashboard
+- Run health checks that trigger alarms and send alarm notifications
+- Export metrics to a external time-series database
+
+The nodes that send metrics are called **child** nodes, and the nodes that receive metrics are called **parent** nodes.
+There are also **proxies**, which collects metrics from a child and sends it to a parent.
+
+## Supported configurations
+
+### Netdata without a database or web API (headless collector)
+
+Local Netdata (child), **without any database or alarms**, collects metrics and sends them to another Netdata
+(parent).
+
+The node menu shows a list of all "databases streamed to" the parent. Clicking one of those links allows the user to
+view the full dashboard of the child node. The URL has the form
+`http://parent-host:parent-port/host/child-host/`.
+
+Alarms for the child are served by the parent.
+
+In this mode the child is just a plain data collector. It spawns all external plugins, but instead of maintaining a
+local database and accepting dashboard requests, it streams all metrics to the parent. The memory footprint is reduced
+significantly, to between 6 MiB and 40 MiB, depending on the enabled plugins. To reduce the memory usage as much as
+possible, refer to the [performance optimization guide](/docs/guides/configure/performance.md).
+
+The same parent can collect data for any number of child nodes.
+
+### Database Replication
+
+Local Netdata (child), **with a local database (and possibly alarms)**, collects metrics and
+sends them to another Netdata (parent).
+
+The user can use all the functions **at both** `http://child-ip:child-port/` and
+`http://parent-host:parent-port/host/child-host/`.
+
+The child and the parent may have different data retention policies for the same metrics.
+
+Alarms for the child are triggered by **both** the child and the parent (and actually
+each can have different alarms configurations or have alarms disabled).
+
+Take a note, that custom chart names, configured on the child, should be in the form `type.name` to work correctly. The parent will truncate the `type` part and substitute the original chart `type` to store the name in the database.
+
+### Netdata proxies
+
+Local Netdata (child), with or without a database, collects metrics and sends them to another
+Netdata (**proxy**), which may or may not maintain a database, which forwards them to another
+Netdata (parent).
+
+Alarms for the child can be triggered by any of the involved hosts that maintains a database.
+
+Any number of daisy chaining Netdata servers are supported, each with or without a database and
+with or without alarms for the child metrics.
+
+### mix and match with backends
+
+All nodes that maintain a database can also send their data to a backend database.
+This allows quite complex setups.
+
+Example:
+
+1. Netdata `A`, `B` do not maintain a database and stream metrics to Netdata `C`(live streaming functionality, i.e. this PR)
+2. Netdata `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (backends functionality)
+3. Netdata `C` also streams data for `A`, `B`, `C` to Netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality, i.e. this PR)
+4. Netdata `D` is just a proxy, without a database, that streams all data to a remote site at Netdata `H`
+5. Netdata `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (backends functionality)
+6. alarms are triggered by `H` for all hosts
+7. users can use all the Netdata that maintain a database to view metrics (i.e. at `H` all hosts can be viewed).
+
+## Configuration
+
+These are options that affect the operation of Netdata in this area:
+
+```
+[global]
+ memory mode = none | ram | save | map | dbengine
+```
+
+`[global].memory mode = none` disables the database at this host. This also disables health
+monitoring (there cannot be health monitoring without a database).
+
+```
+[web]
+ mode = none | static-threaded
+ accept a streaming request every seconds = 0
+```
+
+`[web].mode = none` disables the API (Netdata will not listen to any ports).
+This also disables the registry (there cannot be a registry without an API).
+
+`accept a streaming request every seconds` can be used to set a limit on how often a parent node will accept streaming
+requests from its child nodes. 0 sets no limit, 1 means maximum once every second. If this is set, you may see error log
+entries "... too busy to accept new streaming request. Will be allowed in X secs".
+
+```
+[backend]
+ enabled = yes | no
+ type = graphite | opentsdb
+ destination = IP:PORT ...
+ update every = 10
+```
+
+`[backend]` configures data archiving to a backend (it archives all databases maintained on
+this host).
+
+### streaming configuration
+
+A new file is introduced: `stream.conf` (to edit it on your system run
+`/etc/netdata/edit-config stream.conf`). This file holds streaming configuration for both the
+sending and the receiving Netdata.
+
+API keys are used to authorize the communication of a pair of sending-receiving Netdata.
+Once the communication is authorized, the sending Netdata can push metrics for any number of hosts.
+
+You can generate an API key with the command `uuidgen`. API keys are just random GUIDs.
+You can use the same API key on all your Netdata, or use a different API key for any pair of
+sending-receiving Netdata.
+
+##### options for the sending node
+
+This is the section for the sending Netdata. On the receiving node, `[stream].enabled` can be `no`.
+If it is `yes`, the receiving node will also stream the metrics to another node (i.e. it will be
+a proxy).
+
+```
+[stream]
+ enabled = yes | no
+ destination = IP:PORT[:SSL] ...
+ api key = XXXXXXXXXXX
+```
+
+This is an overview of how these options can be combined:
+
+| target|memory<br/>mode|web<br/>mode|stream<br/>enabled|backend|alarms|dashboard|
+|------|:-------------:|:----------:|:----------------:|:-----:|:----:|:-------:|
+| headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no|
+| headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no|
+| proxy with db|not `none`|not `none`|`yes`|possible|possible|yes|
+| central netdata|not `none`|not `none`|`no`|possible|possible|yes|
+
+For the options to encrypt the data stream between the child and the parent, refer to [securing the communication](#securing-streaming-communications)
+
+##### options for the receiving node
+
+`stream.conf` looks like this:
+
+```sh
+# replace API_KEY with your uuidgen generated GUID
+[API_KEY]
+ enabled = yes
+ default history = 3600
+ default memory mode = save
+ health enabled by default = auto
+ allow from = *
+```
+
+You can add many such sections, one for each API key. The above are used as default values for
+all hosts pushed with this API key.
+
+You can also add sections like this:
+
+```sh
+# replace MACHINE_GUID with the child /var/lib/netdata/registry/netdata.public.unique.id
+[MACHINE_GUID]
+ enabled = yes
+ history = 3600
+ memory mode = save
+ health enabled = yes
+ allow from = *
+```
+
+The above is the parent configuration of a single host, at the parent end. `MACHINE_GUID` is
+the unique id the Netdata generating the metrics (i.e. the Netdata that originally collects
+them `/var/lib/netdata/registry/netdata.unique.id`). So, metrics for Netdata `A` that pass through
+any number of other Netdata, will have the same `MACHINE_GUID`.
+
+You can also use `default memory mode = dbengine` for an API key or `memory mode = dbengine` for
+ a single host. The additional `page cache size` and `dbengine multihost disk space` configuration options
+ are inherited from the global Netdata configuration.
+
+##### allow from
+
+`allow from` settings are [Netdata simple patterns](/libnetdata/simple_pattern/README.md): string matches
+that use `*` as wildcard (any number of times) and a `!` prefix for a negative match.
+So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is
+important: left to right, the first positive or negative match is used.
+
+`allow from` is available in Netdata v1.9+
+
+##### tracing
+
+When a child is trying to push metrics to a parent or proxy, it logs entries like these:
+
+```
+2017-02-25 01:57:44: netdata: ERROR: Failed to connect to '10.11.12.1', port '19999' (errno 111, Connection refused)
+2017-02-25 01:57:44: netdata: ERROR: STREAM costa-pc [send to 10.11.12.1:19999]: failed to connect
+2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: initializing communication...
+2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: waiting response from remote netdata...
+2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [send to 10.11.12.1:19999]: established communication - sending metrics...
+2017-02-25 01:58:14: netdata: ERROR: STREAM costa-pc [send]: discarding 1900 bytes of metrics already in the buffer.
+2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [send]: ready - sending metrics...
+```
+
+The receiving end (proxy or parent) logs entries like these:
+
+```
+2017-02-25 01:58:04: netdata: INFO : STREAM [receive from [10.11.12.11]:33554]: new client connection.
+2017-02-25 01:58:04: netdata: INFO : STREAM costa-pc [10.11.12.11]:33554: receive thread created (task id 7698)
+2017-02-25 01:58:14: netdata: INFO : Host 'costa-pc' with guid '12345678-b5a6-11e6-8a50-00508db7e9c9' initialized, os: linux, update every: 1, memory mode: ram, history entries: 3600, streaming: disabled, health: enabled, cache_dir: '/var/cache/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9', varlib_dir: '/var/lib/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9', health_log: '/var/lib/netdata/12345678-b5a6-11e6-8a50-00508db7e9c9/health/health-log.db', alarms default handler: '/usr/libexec/netdata/plugins.d/alarm-notify.sh', alarms default recipient: 'root'
+2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [receive from [10.11.12.11]:33554]: initializing communication...
+2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [receive from [10.11.12.11]:33554]: receiving metrics...
+```
+
+For Netdata v1.9+, streaming can also be monitored via `access.log`.
+
+### Securing streaming communications
+
+Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS support](/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you need to instruct the child to use TLS/SSL as well. On the child's `stream.conf`, configure the destination as follows:
+
+```
+[stream]
+ destination = host:port:SSL
+```
+
+The word `SSL` appended to the end of the destination tells the child that connections must be encrypted.
+
+> While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol,
+> it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata
+> itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS`
+> or `TLS/SSL`.
+
+#### Certificate verification
+
+When TLS/SSL is enabled on the child, the default behavior will be to not connect with the parent unless the server's certificate can be verified via the default chain. In case you want to avoid this check, add the following to the child's `stream.conf` file:
+
+```
+[stream]
+ ssl skip certificate verification = yes
+```
+
+#### Trusted certificate
+
+If you've enabled [certificate verification](#certificate-verification), you might see errors from the OpenSSL library when there's a problem with checking the certificate chain (`X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY`). More importantly, OpenSSL will reject self-signed certificates.
+
+Given these known issues, you have two options. If you trust your certificate, you can set the options `CApath` and `CAfile` to inform Netdata where your certificates, and the certificate trusted file, are stored.
+
+For more details about these options, you can read about [verify locations](https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html).
+
+Before you changed your streaming configuration, you need to copy your trusted certificate to your child system and add the certificate to OpenSSL's list.
+
+On most Linux distributions, the `update-ca-certificates` command searches inside the `/usr/share/ca-certificates` directory for certificates. You should double-check by reading the `update-ca-certificate` manual (`man update-ca-certificate`), and then change the directory in the below commands if needed.
+
+If you have `sudo` configured on your child system, you can use that to run the following commands. If not, you'll have to log in as `root` to complete them.
+
+```
+# mkdir /usr/share/ca-certificates/netdata
+# cp parent_cert.pem /usr/share/ca-certificates/netdata/parent_cert.crt
+# chown -R netdata.netdata /usr/share/ca-certificates/netdata/
+```
+
+First, you create a new directory to store your certificates for Netdata. Next, you need to change the extension on your certificate from `.pem` to `.crt` so it's compatible with `update-ca-certificate`. Finally, you need to change permissions so the user that runs Netdata can access the directory where you copied in your certificate.
+
+Next, edit the file `/etc/ca-certificates.conf` and add the following line:
+
+```
+netdata/parent_cert.crt
+```
+
+Now you update the list of certificates running the following, again either as `sudo` or `root`:
+
+```
+# update-ca-certificates
+```
+
+> Some Linux distributions have different methods of updating the certificate list. For more details, please read this
+> guide on [adding trusted root certificates](https://github.com/Busindre/How-to-Add-trusted-root-certificates).
+
+Once you update your certificate list, you can set the stream parameters for Netdata to trust the parent certificate. Open `stream.conf` for editing and change the following lines:
+
+```
+[stream]
+ CApath = /etc/ssl/certs/
+ CAfile = /etc/ssl/certs/parent_cert.pem
+```
+
+With this configuration, the `CApath` option tells Netdata to search for trusted certificates inside `/etc/ssl/certs`. The `CAfile` option specifies the Netdata parent certificate is located at `/etc/ssl/certs/parent_cert.pem`. With this configuration, you can skip using the system's entire list of certificates and use Netdata's parent certificate instead.
+
+#### Expected behaviors
+
+With the introduction of TLS/SSL, the parent-child communication behaves as shown in the table below, depending on the following configurations:
+
+- **Parent TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`.
+- **Parent port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming.
+- **Child TLS (Yes/No)**: Whether the destination in the child's `stream.conf` has `:SSL` at the end.
+- **Child TLS Verification (yes/no)**: Value of the child's `stream.conf` `ssl skip certificate verification` parameter (default is no).
+
+| Parent TLS enabled|Parent port SSL|Child TLS|Child SSL Ver.|Behavior|
+|:----------------:|:-------------:|:-------:|:------------:|:-------|
+| No|-|No|no|Legacy behavior. The parent-child stream is unencrypted.|
+| Yes|force|No|no|The parent rejects the child connection.|
+| Yes|-/optional|No|no|The parent-child stream is unencrypted (expected situation for legacy child nodes and newer parent nodes)|
+| Yes|-/force/optional|Yes|no|The parent-child stream is encrypted, provided that the parent has a valid TLS/SSL certificate. Otherwise, the child refuses to connect.|
+| Yes|-/force/optional|Yes|yes|The parent-child stream is encrypted.|
+
+## Viewing remote host dashboards, using mirrored databases
+
+On any receiving Netdata, that maintains remote databases and has its web server enabled,
+The node menu will include a list of the mirrored databases.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/24080824/24cd2d3c-0caf-11e7-909d-a8dd1dbb95d7.png)
+
+Selecting any of these, the server will offer a dashboard using the mirrored metrics.
+
+## Monitoring ephemeral nodes
+
+Auto-scaling is probably the most trendy service deployment strategy these days.
+
+Auto-scaling detects the need for additional resources and boots VMs on demand, based on a template. Soon after they start running the applications, a load balancer starts distributing traffic to them, allowing the service to grow horizontally to the scale needed to handle the load. When demands falls, auto-scaling starts shutting down VMs that are no longer needed.
+
+![Monitoring ephemeral nodes with Netdata](https://cloud.githubusercontent.com/assets/2662304/23627426/65a9074a-02b9-11e7-9664-cd8f258a00af.png)
+
+What a fantastic feature for controlling infrastructure costs! Pay only for what you need for the time you need it!
+
+In auto-scaling, all servers are ephemeral, they live for just a few hours. Every VM is a brand new instance of the application, that was automatically created based on a template.
+
+So, how can we monitor them? How can we be sure that everything is working as expected on all of them?
+
+### The Netdata way
+
+We recently made a significant improvement at the core of Netdata to support monitoring such setups.
+
+Following the Netdata way of monitoring, we wanted:
+
+1. **real-time performance monitoring**, collecting ***thousands of metrics per server per second***, visualized in interactive, automatically created dashboards.
+2. **real-time alarms**, for all nodes.
+3. **zero configuration**, all ephemeral servers should have exactly the same configuration, and nothing should be configured at any system for each of the ephemeral nodes. We shouldn't care if 10 or 100 servers are spawned to handle the load.
+4. **self-cleanup**, so that nothing needs to be done for cleaning up the monitoring infrastructure from the hundreds of nodes that may have been monitored through time.
+
+### How it works
+
+All monitoring solutions, including Netdata, work like this:
+
+1. Collect metrics from the system and the running applications
+2. Store metrics in a time-series database
+3. Examine metrics periodically, for triggering alarms and sending alarm notifications
+4. Visualize metrics so that users can see what exactly is happening
+
+Netdata used to be self-contained, so that all these functions were handled entirely by each server. The changes we made, allow each Netdata to be configured independently for each function. So, each Netdata can now act as:
+
+- A self-contained system, much like it used to be.
+- A data collector that collects metrics from a host and pushes them to another Netdata (with or without a local database and alarms).
+- A proxy, which receives metrics from other hosts and pushes them immediately to other Netdata servers. Netdata proxies can also be `store and forward proxies` meaning that they are able to maintain a local database for all metrics passing through them (with or without alarms).
+- A time-series database node, where data are kept, alarms are run and queries are served to visualise the metrics.
+
+### Configuring an auto-scaling setup
+
+![A diagram of an auto-scaling setup with Netdata](https://user-images.githubusercontent.com/1153921/84290043-0c1c1600-aaf8-11ea-9757-dd8dd8a8ec6c.png)
+
+You need a Netdata parent. This node should not be ephemeral. It will be the node where all ephemeral child
+nodes will send their metrics.
+
+The parent will need to authorize child nodes to receive their metrics. This is done with an API key.
+
+#### API keys
+
+API keys are just random GUIDs. Use the Linux command `uuidgen` to generate one. You can use the same API key for all your child nodes, or you can configure one API for each of them. This is entirely your decision.
+
+We suggest to use the same API key for each ephemeral node template you have, so that all replicas of the same ephemeral node will have exactly the same configuration.
+
+I will use this API_KEY: `11111111-2222-3333-4444-555555555555`. Replace it with your own.
+
+#### Configuring the parent
+
+On the parent, edit `/etc/netdata/stream.conf` (to edit it on your system run `/etc/netdata/edit-config stream.conf`) and set these:
+
+```bash
+[11111111-2222-3333-4444-555555555555]
+ # enable/disable this API key
+ enabled = yes
+
+ # one hour of data for each of the child nodes
+ default history = 3600
+
+ # do not save child metrics on disk
+ default memory = ram
+
+ # alarms checks, only while the child is connected
+ health enabled by default = auto
+```
+
+_`stream.conf` on the parent, to enable receiving metrics from its child nodes using the API key._
+
+If you used many API keys, you can add one such section for each API key.
+
+When done, restart Netdata on the parent node. It is now ready to receive metrics.
+
+Note that `health enabled by default = auto` will still trigger `last_collected` alarms, if a connected child does not exit gracefully. If the `netdata` process running on the child is
+stopped, it will close the connection to the parent, ensuring that no `last_collected` alarms are triggered. For example, a proper container restart would first terminate
+the `netdata` process, but a system power issue would leave the connection open on the parent side. In the second case, you will still receive alarms.
+
+#### Configuring the child nodes
+
+On each of the child nodes, edit `/etc/netdata/stream.conf` (to edit it on your system run `/etc/netdata/edit-config stream.conf`) and set these:
+
+```bash
+[stream]
+ # stream metrics to another Netdata
+ enabled = yes
+
+ # the IP and PORT of the parent
+ destination = 10.11.12.13:19999
+
+ # the API key to use
+ api key = 11111111-2222-3333-4444-555555555555
+```
+
+_`stream.conf` on child nodes, to enable pushing metrics to their parent at `10.11.12.13:19999`._
+
+Using just the above configuration, the child nodes will be pushing their metrics to the parent Netdata, but they will still maintain a local database of the metrics and run health checks. To disable them, edit `/etc/netdata/netdata.conf` and set:
+
+```bash
+[global]
+ # disable the local database
+ memory mode = none
+
+[health]
+ # disable health checks
+ enabled = no
+```
+
+_`netdata.conf` configuration on child nodes, to disable the local database and health checks._
+
+Keep in mind that setting `memory mode = none` will also force `[health].enabled = no` (health checks require access to a local database). But you can keep the database and disable health checks if you need to. You are however sending all the metrics to the parent node, which can handle the health checking (`[health].enabled = yes`)
+
+#### Netdata unique id
+
+The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random GUID that **uniquely identifies each Netdata**. This file is automatically generated, by Netdata, the first time it is started and remains unaltered forever.
+
+> If you are building an image to be used for automated provisioning of autoscaled VMs, it important to delete that file from the image, so that each instance of your image will generate its own.
+
+#### Troubleshooting metrics streaming
+
+Both parent and child nodes log information at `/var/log/netdata/error.log`.
+
+Run the following on both the parent and child nodes:
+
+```
+tail -f /var/log/netdata/error.log | grep STREAM
+```
+
+If the child manages to connect to the parent you will see something like (on the parent):
+
+```
+2017-03-09 09:38:52: netdata: INFO : STREAM [receive from [10.11.12.86]:38564]: new client connection.
+2017-03-09 09:38:52: netdata: INFO : STREAM xxx [10.11.12.86]:38564: receive thread created (task id 27721)
+2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: client willing to stream metrics for host 'xxx' with machine_guid '1234567-1976-11e6-ae19-7cdd9077342a': update every = 1, history = 3600, memory mode = ram, health auto
+2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: initializing communication...
+2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: receiving metrics...
+```
+
+and something like this on the child:
+
+```
+2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: connecting...
+2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: initializing communication...
+2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: waiting response from remote netdata...
+2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: established communication - sending metrics...
+```
+
+### Archiving to a time-series database
+
+The parent Netdata node can also archive metrics, for all its child nodes, to a time-series database. At the time of
+this writing, Netdata supports:
+
+- graphite
+- opentsdb
+- prometheus
+- json document DBs
+- all the compatibles to the above (e.g. kairosdb, influxdb, etc)
+
+Check the Netdata [exporting documentation](/docs/export/external-databases.md) for configuring this.
+
+This is how such a solution will work:
+
+![Diagram showing an example configuration for archiving to a time-series
+database](https://user-images.githubusercontent.com/1153921/84291308-c2ccc600-aaf9-11ea-98a9-89ccbf3a62dd.png)
+
+### An advanced setup
+
+Netdata also supports `proxies` with and without a local database, and data retention can be different between all nodes.
+
+This means a setup like the following is also possible:
+
+<p align="center">
+<img src="https://cloud.githubusercontent.com/assets/2662304/23629551/bb1fd9c2-02c0-11e7-90f5-cab5a3ed4c53.png"/>
+</p>
+
+## Proxies
+
+A proxy is a Netdata instance that is receiving metrics from a Netdata, and streams them to another Netdata.
+
+Netdata proxies may or may not maintain a database for the metrics passing through them.
+When they maintain a database, they can also run health checks (alarms and notifications)
+for the remote host that is streaming the metrics.
+
+To configure a proxy, configure it as a receiving and a sending Netdata at the same time,
+using `stream.conf`.
+
+The sending side of a Netdata proxy, connects and disconnects to the final destination of the
+metrics, following the same pattern of the receiving side.
+
+For a practical example see [Monitoring ephemeral nodes](#monitoring-ephemeral-nodes).
+
+## Troubleshooting streaming connections
+
+This section describes the most common issues you might encounter when connecting parent and child nodes.
+
+### Slow connections between parent and child
+
+When you have a slow connection between parent and child, Netdata raises a few different errors. Most of the
+errors will appear in the child's `error.log`.
+
+```bash
+netdata ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM CHILD HOSTNAME [send to PARENT IP:PARENT PORT]: too many data pending - buffer is X bytes long,
+Y unsent - we have sent Z bytes in total, W on this connection. Closing connection to flush the data.
+```
+
+On the parent side, you may see various error messages, most commonly the following:
+
+```
+netdata ERROR : STREAM_PARENT[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : read failed: end of file
+```
+
+Another common problem in slow connections is the CHILD sending a partial message to the parent. In this case,
+the parent will write the following in its `error.log`:
+
+```
+ERROR : STREAM_RECEIVER[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : sent command 'B' which is not known by netdata, for host 'HOSTNAME'. Disabling it.
+```
+
+In this example, `B` was part of a `BEGIN` message that was cut due to connection problems.
+
+Slow connections can also cause problems when the parent misses a message and then receives a command related to the
+missed message. For example, a parent might miss a message containing the child's charts, and then doesn't know
+what to do with the `SET` message that follows. When that happens, the parent will show a message like this:
+
+```
+ERROR : STREAM_RECEIVER[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : requested a SET on chart 'CHART NAME' of host 'HOSTNAME', without a dimension. Disabling it.
+```
+
+### Child cannot connect to parent
+
+When the child can't connect to a parent for any reason (misconfiguration, networking, firewalls, parent
+down), you will see the following in the child's `error.log`.
+
+```
+ERROR : STREAM_SENDER[HOSTNAME] : Failed to connect to 'PARENT IP', port 'PARENT PORT' (errno 113, No route to host)
+```
+
+### 'Is this a Netdata?'
+
+This question can appear when Netdata starts the stream and receives an unexpected response. This error can appear when
+the parent is using SSL and the child tries to connect using plain text. You will also see this message when
+Netdata connects to another server that isn't Netdata. The complete error message will look like this:
+
+```
+ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM child HOSTNAME [send to PARENT HOSTNAME:PARENT PORT]: server is not replying properly (is it a netdata?).
+```
+
+### Stream charts wrong
+
+Chart data needs to be consistent between child and parent nodes. If there are differences between chart data on
+a parent and a child, such as gaps in metrics collection, it most often means your child's `memory mode`
+does not match the parent's. To learn more about the different ways Netdata can store metrics, and thus keep chart
+data consistent, read our [memory mode documentation](/database/README.md).
+
+### Forbidding access
+
+You may see errors about "forbidding access" for a number of reasons. It could be because of a slow connection between
+the parent and child nodes, but it could also be due to other failures. Look in your parent's `error.log` for errors
+that look like this:
+
+```
+STREAM [receive from [child HOSTNAME]:child IP]: `MESSAGE`. Forbidding access."
+```
+
+`MESSAGE` will have one of the following patterns:
+
+- `request without KEY` : The message received is incomplete and the KEY value can be API, hostname, machine GUID.
+- `API key 'VALUE' is not valid GUID`: The UUID received from child does not have the format defined in [RFC 4122]
+ (https://tools.ietf.org/html/rfc4122)
+- `machine GUID 'VALUE' is not GUID.`: This error with machine GUID is like the previous one.
+- `API key 'VALUE' is not allowed`: This stream has a wrong API key.
+- `API key 'VALUE' is not permitted from this IP`: The IP is not allowed to use STREAM with this parent.
+- `machine GUID 'VALUE' is not allowed.`: The GUID that is trying to send stream is not allowed.
+- `Machine GUID 'VALUE' is not permitted from this IP. `: The IP does not match the pattern or IP allowed to connect
+ to use stream.
+
+### Netdata could not create a stream
+
+The connection between parent and child is a stream. When the parent can't convert the initial connection into
+a stream, it will write the following message inside `error.log`:
+
+```
+file descriptor given is not a valid stream
+```
+
+After logging this error, Netdata will close the stream.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fstreaming%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/streaming/receiver.c b/streaming/receiver.c
new file mode 100644
index 0000000..3ea1580
--- /dev/null
+++ b/streaming/receiver.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdpush.h"
+
+extern struct config stream_config;
+
+void destroy_receiver_state(struct receiver_state *rpt) {
+ freez(rpt->key);
+ freez(rpt->hostname);
+ freez(rpt->registry_hostname);
+ freez(rpt->machine_guid);
+ freez(rpt->os);
+ freez(rpt->timezone);
+ freez(rpt->tags);
+ freez(rpt->client_ip);
+ freez(rpt->client_port);
+ freez(rpt->program_name);
+ freez(rpt->program_version);
+#ifdef ENABLE_HTTPS
+ if(rpt->ssl.conn){
+ SSL_free(rpt->ssl.conn);
+ }
+#endif
+ freez(rpt);
+}
+
+static void rrdpush_receiver_thread_cleanup(void *ptr) {
+ static __thread int executed = 0;
+ if(!executed) {
+ executed = 1;
+ struct receiver_state *rpt = (struct receiver_state *) ptr;
+ // If the shutdown sequence has started, and this receiver is still attached to the host then we cannot touch
+ // the host pointer as it is unpredicable when the RRDHOST is deleted. Do the cleanup from rrdhost_free().
+ if (netdata_exit && rpt->host) {
+ rpt->exited = 1;
+ return;
+ }
+
+ // Make sure that we detach this thread and don't kill a freshly arriving receiver
+ if (!netdata_exit && rpt->host) {
+ netdata_mutex_lock(&rpt->host->receiver_lock);
+ if (rpt->host->receiver == rpt)
+ rpt->host->receiver = NULL;
+ netdata_mutex_unlock(&rpt->host->receiver_lock);
+ }
+
+ info("STREAM %s [receive from [%s]:%s]: receive thread ended (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid());
+ destroy_receiver_state(rpt);
+ }
+}
+
+#include "../collectors/plugins.d/pluginsd_parser.h"
+
+PARSER_RC streaming_timestamp(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(plugins_action);
+ char *remote_time_txt = words[1];
+ time_t remote_time = 0;
+ RRDHOST *host = ((PARSER_USER_OBJECT *)user)->host;
+ struct plugind *cd = ((PARSER_USER_OBJECT *)user)->cd;
+ if (cd->version < VERSION_GAP_FILLING ) {
+ error("STREAM %s from %s: Child negotiated version %u but sent TIMESTAMP!", host->hostname, cd->cmd,
+ cd->version);
+ return PARSER_RC_OK; // Ignore error and continue stream
+ }
+ if (remote_time_txt && *remote_time_txt) {
+ remote_time = str2ull(remote_time_txt);
+ time_t now = now_realtime_sec(), prev = rrdhost_last_entry_t(host);
+ time_t gap = 0;
+ if (prev == 0)
+ info("STREAM %s from %s: Initial connection (no gap to check), remote=%ld local=%ld slew=%ld",
+ host->hostname, cd->cmd, remote_time, now, now-remote_time);
+ else {
+ gap = now - prev;
+ info("STREAM %s from %s: Checking for gaps... remote=%ld local=%ld..%ld slew=%ld %ld-sec gap",
+ host->hostname, cd->cmd, remote_time, prev, now, remote_time - now, gap);
+ }
+ char message[128];
+ sprintf(message,"REPLICATE %ld %ld\n", remote_time - gap, remote_time);
+ int ret;
+#ifdef ENABLE_HTTPS
+ SSL *conn = host->stream_ssl.conn ;
+ if(conn && !host->stream_ssl.flags) {
+ ret = SSL_write(conn, message, strlen(message));
+ } else {
+ ret = send(host->receiver->fd, message, strlen(message), MSG_DONTWAIT);
+ }
+#else
+ ret = send(host->receiver->fd, message, strlen(message), MSG_DONTWAIT);
+#endif
+ if (ret != (int)strlen(message))
+ error("Failed to send initial timestamp - gaps may appear in charts");
+ return PARSER_RC_OK;
+ }
+ return PARSER_RC_ERROR;
+}
+
+#define CLAIMED_ID_MIN_WORDS 3
+PARSER_RC streaming_claimed_id(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(plugins_action);
+
+ int i;
+ uuid_t uuid;
+ RRDHOST *host = ((PARSER_USER_OBJECT *)user)->host;
+
+ for (i = 0; words[i]; i++) ;
+ if (i != CLAIMED_ID_MIN_WORDS) {
+ error("Command CLAIMED_ID came malformed %d parameters are expected but %d received", CLAIMED_ID_MIN_WORDS - 1, i - 1);
+ return PARSER_RC_ERROR;
+ }
+
+ // We don't need the parsed UUID
+ // just do it to check the format
+ if(uuid_parse(words[1], uuid)) {
+ error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", words[1]);
+ return PARSER_RC_ERROR;
+ }
+ if(uuid_parse(words[2], uuid) && strcmp(words[2], "NULL")) {
+ error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", words[2]);
+ return PARSER_RC_ERROR;
+ }
+
+ if(strcmp(words[1], host->machine_guid)) {
+ error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", words[1], host->machine_guid);
+ return PARSER_RC_OK; //the message is OK problem must be somewehere else
+ }
+
+ rrdhost_aclk_state_lock(host);
+ if (host->aclk_state.claimed_id)
+ freez(host->aclk_state.claimed_id);
+ host->aclk_state.claimed_id = strcmp(words[2], "NULL") ? strdupz(words[2]) : NULL;
+ rrdhost_aclk_state_unlock(host);
+
+ rrdpush_claimed_id(host);
+
+ return PARSER_RC_OK;
+}
+
+/* The receiver socket is blocking, perform a single read into a buffer so that we can reassemble lines for parsing.
+ */
+static int receiver_read(struct receiver_state *r, FILE *fp) {
+#ifdef ENABLE_HTTPS
+ if (r->ssl.conn && !r->ssl.flags) {
+ ERR_clear_error();
+ int desired = sizeof(r->read_buffer) - r->read_len - 1;
+ int ret = SSL_read(r->ssl.conn, r->read_buffer + r->read_len, desired);
+ if (ret > 0 ) {
+ r->read_len += ret;
+ return 0;
+ }
+ // Don't treat SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE differently on blocking socket
+ u_long err;
+ char buf[256];
+ while ((err = ERR_get_error()) != 0) {
+ ERR_error_string_n(err, buf, sizeof(buf));
+ error("STREAM %s [receive from %s] ssl error: %s", r->hostname, r->client_ip, buf);
+ }
+ return 1;
+ }
+#endif
+ if (!fgets(r->read_buffer, sizeof(r->read_buffer), fp))
+ return 1;
+ r->read_len = strlen(r->read_buffer);
+ return 0;
+}
+
+/* Produce a full line if one exists, statefully return where we start next time.
+ * When we hit the end of the buffer with a partial line move it to the beginning for the next fill.
+ */
+static char *receiver_next_line(struct receiver_state *r, int *pos) {
+ int start = *pos, scan = *pos;
+ if (scan >= r->read_len) {
+ r->read_len = 0;
+ return NULL;
+ }
+ while (scan < r->read_len && r->read_buffer[scan] != '\n')
+ scan++;
+ if (scan < r->read_len && r->read_buffer[scan] == '\n') {
+ *pos = scan+1;
+ r->read_buffer[scan] = 0;
+ return &r->read_buffer[start];
+ }
+ memmove(r->read_buffer, &r->read_buffer[start], r->read_len - start);
+ r->read_len -= start;
+ return NULL;
+}
+
+
+size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, FILE *fp) {
+ size_t result;
+ PARSER_USER_OBJECT *user = callocz(1, sizeof(*user));
+ user->enabled = cd->enabled;
+ user->host = rpt->host;
+ user->opaque = rpt;
+ user->cd = cd;
+ user->trust_durations = 0;
+
+ PARSER *parser = parser_init(rpt->host, user, fp, PARSER_INPUT_SPLIT);
+ parser_add_keyword(parser, "TIMESTAMP", streaming_timestamp);
+ parser_add_keyword(parser, "CLAIMED_ID", streaming_claimed_id);
+
+ if (unlikely(!parser)) {
+ error("Failed to initialize parser");
+ cd->serial_failures++;
+ freez(user);
+ return 0;
+ }
+
+ parser->plugins_action->begin_action = &pluginsd_begin_action;
+ parser->plugins_action->flush_action = &pluginsd_flush_action;
+ parser->plugins_action->end_action = &pluginsd_end_action;
+ parser->plugins_action->disable_action = &pluginsd_disable_action;
+ parser->plugins_action->variable_action = &pluginsd_variable_action;
+ parser->plugins_action->dimension_action = &pluginsd_dimension_action;
+ parser->plugins_action->label_action = &pluginsd_label_action;
+ parser->plugins_action->overwrite_action = &pluginsd_overwrite_action;
+ parser->plugins_action->chart_action = &pluginsd_chart_action;
+ parser->plugins_action->set_action = &pluginsd_set_action;
+
+ user->parser = parser;
+
+ do {
+ if (receiver_read(rpt, fp))
+ break;
+ int pos = 0;
+ char *line;
+ while ((line = receiver_next_line(rpt, &pos))) {
+ if (unlikely(netdata_exit || rpt->shutdown || parser_action(parser, line)))
+ goto done;
+ }
+ rpt->last_msg_t = now_realtime_sec();
+ }
+ while(!netdata_exit);
+done:
+ result= user->count;
+ freez(user);
+ parser_destroy(parser);
+ return result;
+}
+
+
+static int rrdpush_receive(struct receiver_state *rpt)
+{
+ int history = default_rrd_history_entries;
+ RRD_MEMORY_MODE mode = default_rrd_memory_mode;
+ int health_enabled = default_health_enabled;
+ int rrdpush_enabled = default_rrdpush_enabled;
+ char *rrdpush_destination = default_rrdpush_destination;
+ char *rrdpush_api_key = default_rrdpush_api_key;
+ char *rrdpush_send_charts_matching = default_rrdpush_send_charts_matching;
+ time_t alarms_delay = 60;
+
+ rpt->update_every = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "update every", rpt->update_every);
+ if(rpt->update_every < 0) rpt->update_every = 1;
+
+ history = (int)appconfig_get_number(&stream_config, rpt->key, "default history", history);
+ history = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "history", history);
+ if(history < 5) history = 5;
+
+ mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->key, "default memory mode", rrd_memory_mode_name(mode)));
+ mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "memory mode", rrd_memory_mode_name(mode)));
+
+ health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->key, "health enabled by default", health_enabled);
+ health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->machine_guid, "health enabled", health_enabled);
+
+ alarms_delay = appconfig_get_number(&stream_config, rpt->key, "default postpone alarms on connect seconds", alarms_delay);
+ alarms_delay = appconfig_get_number(&stream_config, rpt->machine_guid, "postpone alarms on connect seconds", alarms_delay);
+
+ rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->key, "default proxy enabled", rrdpush_enabled);
+ rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->machine_guid, "proxy enabled", rrdpush_enabled);
+
+ rrdpush_destination = appconfig_get(&stream_config, rpt->key, "default proxy destination", rrdpush_destination);
+ rrdpush_destination = appconfig_get(&stream_config, rpt->machine_guid, "proxy destination", rrdpush_destination);
+
+ rrdpush_api_key = appconfig_get(&stream_config, rpt->key, "default proxy api key", rrdpush_api_key);
+ rrdpush_api_key = appconfig_get(&stream_config, rpt->machine_guid, "proxy api key", rrdpush_api_key);
+
+ rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->key, "default proxy send charts matching", rrdpush_send_charts_matching);
+ rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->machine_guid, "proxy send charts matching", rrdpush_send_charts_matching);
+
+ (void)appconfig_set_default(&stream_config, rpt->machine_guid, "host tags", (rpt->tags)?rpt->tags:"");
+
+ if (strcmp(rpt->machine_guid, localhost->machine_guid) == 0) {
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->machine_guid, rpt->hostname, "DENIED - ATTEMPT TO RECEIVE METRICS FROM MACHINE_GUID IDENTICAL TO PARENT");
+ error("STREAM %s [receive from %s:%s]: denied to receive metrics, machine GUID [%s] is my own. Did you copy the parent/proxy machine GUID to a child?", rpt->hostname, rpt->client_ip, rpt->client_port, rpt->machine_guid);
+ close(rpt->fd);
+ return 1;
+ }
+
+ if (rpt->host==NULL) {
+
+ rpt->host = rrdhost_find_or_create(
+ rpt->hostname
+ , rpt->registry_hostname
+ , rpt->machine_guid
+ , rpt->os
+ , rpt->timezone
+ , rpt->tags
+ , rpt->program_name
+ , rpt->program_version
+ , rpt->update_every
+ , history
+ , mode
+ , (unsigned int)(health_enabled != CONFIG_BOOLEAN_NO)
+ , (unsigned int)(rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key)
+ , rrdpush_destination
+ , rrdpush_api_key
+ , rrdpush_send_charts_matching
+ , rpt->system_info
+ );
+
+ if(!rpt->host) {
+ close(rpt->fd);
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->machine_guid, rpt->hostname, "FAILED - CANNOT ACQUIRE HOST");
+ error("STREAM %s [receive from [%s]:%s]: failed to find/create host structure.", rpt->hostname, rpt->client_ip, rpt->client_port);
+ return 1;
+ }
+
+ netdata_mutex_lock(&rpt->host->receiver_lock);
+ if (rpt->host->receiver == NULL)
+ rpt->host->receiver = rpt;
+ else {
+ error("Multiple receivers connected for %s concurrently, cancelling this one...", rpt->machine_guid);
+ netdata_mutex_unlock(&rpt->host->receiver_lock);
+ close(rpt->fd);
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->machine_guid, rpt->hostname, "FAILED - BEATEN TO HOST CREATION");
+ return 1;
+ }
+ netdata_mutex_unlock(&rpt->host->receiver_lock);
+ }
+
+ int ssl = 0;
+#ifdef ENABLE_HTTPS
+ if (rpt->ssl.conn != NULL)
+ ssl = 1;
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s,%s tags '%s'"
+ , rpt->hostname
+ , rpt->client_ip
+ , rpt->client_port
+ , rpt->host->hostname
+ , rpt->host->machine_guid
+ , rpt->host->rrd_update_every
+ , rpt->host->rrd_history_entries
+ , rrd_memory_mode_name(rpt->host->rrd_memory_mode)
+ , (health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto")
+ , ssl ? " SSL," : ""
+ , rpt->host->tags?rpt->host->tags:""
+ );
+#endif // NETDATA_INTERNAL_CHECKS
+
+
+ struct plugind cd = {
+ .enabled = 1,
+ .update_every = default_rrd_update_every,
+ .pid = 0,
+ .serial_failures = 0,
+ .successful_collections = 0,
+ .obsolete = 0,
+ .started_t = now_realtime_sec(),
+ .next = NULL,
+ .version = 0,
+ };
+
+ // put the client IP and port into the buffers used by plugins.d
+ snprintfz(cd.id, CONFIG_MAX_NAME, "%s:%s", rpt->client_ip, rpt->client_port);
+ snprintfz(cd.filename, FILENAME_MAX, "%s:%s", rpt->client_ip, rpt->client_port);
+ snprintfz(cd.fullfilename, FILENAME_MAX, "%s:%s", rpt->client_ip, rpt->client_port);
+ snprintfz(cd.cmd, PLUGINSD_CMD_MAX, "%s:%s", rpt->client_ip, rpt->client_port);
+
+ info("STREAM %s [receive from [%s]:%s]: initializing communication...", rpt->host->hostname, rpt->client_ip, rpt->client_port);
+ char initial_response[HTTP_HEADER_SIZE];
+ if (rpt->stream_version > 1) {
+ info("STREAM %s [receive from [%s]:%s]: Netdata is using the stream version %u.", rpt->host->hostname, rpt->client_ip, rpt->client_port, rpt->stream_version);
+ sprintf(initial_response, "%s%u", START_STREAMING_PROMPT_VN, rpt->stream_version);
+ } else if (rpt->stream_version == 1) {
+ info("STREAM %s [receive from [%s]:%s]: Netdata is using the stream version %u.", rpt->host->hostname, rpt->client_ip, rpt->client_port, rpt->stream_version);
+ sprintf(initial_response, "%s", START_STREAMING_PROMPT_V2);
+ } else {
+ info("STREAM %s [receive from [%s]:%s]: Netdata is using first stream protocol.", rpt->host->hostname, rpt->client_ip, rpt->client_port);
+ sprintf(initial_response, "%s", START_STREAMING_PROMPT);
+ }
+ debug(D_STREAM, "Initial response to %s: %s", rpt->client_ip, initial_response);
+ #ifdef ENABLE_HTTPS
+ rpt->host->stream_ssl.conn = rpt->ssl.conn;
+ rpt->host->stream_ssl.flags = rpt->ssl.flags;
+ if(send_timeout(&rpt->ssl, rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) {
+#else
+ if(send_timeout(rpt->fd, initial_response, strlen(initial_response), 0, 60) != strlen(initial_response)) {
+#endif
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->host->machine_guid, rpt->host->hostname, "FAILED - CANNOT REPLY");
+ error("STREAM %s [receive from [%s]:%s]: cannot send ready command.", rpt->host->hostname, rpt->client_ip, rpt->client_port);
+ close(rpt->fd);
+ return 0;
+ }
+
+ // remove the non-blocking flag from the socket
+ if(sock_delnonblock(rpt->fd) < 0)
+ error("STREAM %s [receive from [%s]:%s]: cannot remove the non-blocking flag from socket %d", rpt->host->hostname, rpt->client_ip, rpt->client_port, rpt->fd);
+
+ // convert the socket to a FILE *
+ FILE *fp = fdopen(rpt->fd, "r");
+ if(!fp) {
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->host->machine_guid, rpt->host->hostname, "FAILED - SOCKET ERROR");
+ error("STREAM %s [receive from [%s]:%s]: failed to get a FILE for FD %d.", rpt->host->hostname, rpt->client_ip, rpt->client_port, rpt->fd);
+ close(rpt->fd);
+ return 0;
+ }
+
+ rrdhost_wrlock(rpt->host);
+/* if(rpt->host->connected_senders > 0) {
+ rrdhost_unlock(rpt->host);
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->host->machine_guid, rpt->host->hostname, "REJECTED - ALREADY CONNECTED");
+ info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. Rejecting new connection.", rpt->host->hostname, rpt->client_ip, rpt->client_port);
+ fclose(fp);
+ return 0;
+ }
+*/
+
+// rpt->host->connected_senders++;
+ rpt->host->labels.labels_flag = (rpt->stream_version > 0)?LABEL_FLAG_UPDATE_STREAM:LABEL_FLAG_STOP_STREAM;
+
+ if(health_enabled != CONFIG_BOOLEAN_NO) {
+ if(alarms_delay > 0) {
+ rpt->host->health_delay_up_to = now_realtime_sec() + alarms_delay;
+ info("Postponing health checks for %ld seconds, on host '%s', because it was just connected."
+ , alarms_delay
+ , rpt->host->hostname
+ );
+ }
+ }
+ rrdhost_unlock(rpt->host);
+
+ // call the plugins.d processor to receive the metrics
+ info("STREAM %s [receive from [%s]:%s]: receiving metrics...", rpt->host->hostname, rpt->client_ip, rpt->client_port);
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->host->machine_guid, rpt->host->hostname, "CONNECTED");
+
+ cd.version = rpt->stream_version;
+
+#ifdef ENABLE_ACLK
+ // in case we have cloud connection we inform cloud
+ // new slave connected
+ if (netdata_cloud_setting)
+ aclk_host_state_update(rpt->host, ACLK_CMD_CHILD_CONNECT);
+#endif
+
+ size_t count = streaming_parser(rpt, &cd, fp);
+
+ log_stream_connection(rpt->client_ip, rpt->client_port, rpt->key, rpt->host->machine_guid, rpt->hostname,
+ "DISCONNECTED");
+ error("STREAM %s [receive from [%s]:%s]: disconnected (completed %zu updates).", rpt->hostname, rpt->client_ip,
+ rpt->client_port, count);
+
+#ifdef ENABLE_ACLK
+ // in case we have cloud connection we inform cloud
+ // new slave connected
+ if (netdata_cloud_setting)
+ aclk_host_state_update(rpt->host, ACLK_CMD_CHILD_DISCONNECT);
+#endif
+
+ // During a shutdown there is cleanup code in rrdhost that will cancel the sender thread
+ if (!netdata_exit && rpt->host) {
+ rrd_rdlock();
+ rrdhost_wrlock(rpt->host);
+ netdata_mutex_lock(&rpt->host->receiver_lock);
+ if (rpt->host->receiver == rpt) {
+ rpt->host->senders_disconnected_time = now_realtime_sec();
+ rrdhost_flag_set(rpt->host, RRDHOST_FLAG_ORPHAN);
+ if(health_enabled == CONFIG_BOOLEAN_AUTO)
+ rpt->host->health_enabled = 0;
+ }
+ rrdhost_unlock(rpt->host);
+ if (rpt->host->receiver == rpt) {
+ rrdpush_sender_thread_stop(rpt->host);
+ }
+ netdata_mutex_unlock(&rpt->host->receiver_lock);
+ rrd_unlock();
+ }
+
+ // cleanup
+ fclose(fp);
+ return (int)count;
+}
+
+void *rrdpush_receiver_thread(void *ptr) {
+ netdata_thread_cleanup_push(rrdpush_receiver_thread_cleanup, ptr);
+
+ struct receiver_state *rpt = (struct receiver_state *)ptr;
+ info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid());
+
+ rrdpush_receive(rpt);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c
new file mode 100644
index 0000000..f54fc60
--- /dev/null
+++ b/streaming/rrdpush.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdpush.h"
+#include "../parser/parser.h"
+
+/*
+ * rrdpush
+ *
+ * 3 threads are involved for all stream operations
+ *
+ * 1. a random data collection thread, calling rrdset_done_push()
+ * this is called for each chart.
+ *
+ * the output of this work is kept in a BUFFER in RRDHOST
+ * the sender thread is signalled via a pipe (also in RRDHOST)
+ *
+ * 2. a sender thread running at the sending netdata
+ * this is spawned automatically on the first chart to be pushed
+ *
+ * It tries to push the metrics to the remote netdata, as fast
+ * as possible (i.e. immediately after they are collected).
+ *
+ * 3. a receiver thread, running at the receiving netdata
+ * this is spawned automatically when the sender connects to
+ * the receiver.
+ *
+ */
+
+struct config stream_config = {
+ .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {
+ .avl_tree = {
+ .root = NULL,
+ .compar = appconfig_section_compare
+ },
+ .rwlock = AVL_LOCK_INITIALIZER
+ }
+};
+
+unsigned int default_rrdpush_enabled = 0;
+char *default_rrdpush_destination = NULL;
+char *default_rrdpush_api_key = NULL;
+char *default_rrdpush_send_charts_matching = NULL;
+#ifdef ENABLE_HTTPS
+int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL;
+char *netdata_ssl_ca_path = NULL;
+char *netdata_ssl_ca_file = NULL;
+#endif
+
+static void load_stream_conf() {
+ errno = 0;
+ char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, "stream.conf");
+ if(!appconfig_load(&stream_config, filename, 0, NULL)) {
+ info("CONFIG: cannot load user config '%s'. Will try stock config.", filename);
+ freez(filename);
+
+ filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "stream.conf");
+ if(!appconfig_load(&stream_config, filename, 0, NULL))
+ info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename);
+ }
+ freez(filename);
+}
+
+int rrdpush_init() {
+ // --------------------------------------------------------------------
+ // load stream.conf
+ load_stream_conf();
+
+ default_rrdpush_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled);
+ default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", "");
+ default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", "");
+ default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", "*");
+ rrdhost_free_orphan_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds", rrdhost_free_orphan_time);
+
+
+ if(default_rrdpush_enabled && (!default_rrdpush_destination || !*default_rrdpush_destination || !default_rrdpush_api_key || !*default_rrdpush_api_key)) {
+ error("STREAM [send]: cannot enable sending thread - information is missing.");
+ default_rrdpush_enabled = 0;
+ }
+
+#ifdef ENABLE_HTTPS
+ if (netdata_use_ssl_on_stream == NETDATA_SSL_OPTIONAL) {
+ if (default_rrdpush_destination){
+ char *test = strstr(default_rrdpush_destination,":SSL");
+ if(test){
+ *test = 0X00;
+ netdata_use_ssl_on_stream = NETDATA_SSL_FORCE;
+ }
+ }
+ }
+
+ char *invalid_certificate = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", "no");
+
+ if ( !strcmp(invalid_certificate,"yes")){
+ if (netdata_validate_server == NETDATA_SSL_VALID_CERTIFICATE){
+ info("Netdata is configured to accept invalid SSL certificate.");
+ netdata_validate_server = NETDATA_SSL_INVALID_CERTIFICATE;
+ }
+ }
+
+ netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", "/etc/ssl/certs/");
+ netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", "/etc/ssl/certs/certs.pem");
+#endif
+
+ return default_rrdpush_enabled;
+}
+
+// data collection happens from multiple threads
+// each of these threads calls rrdset_done()
+// which in turn calls rrdset_done_push()
+// which uses this pipe to notify the streaming thread
+// that there are more data ready to be sent
+#define PIPE_READ 0
+#define PIPE_WRITE 1
+
+// to have the remote netdata re-sync the charts
+// to its current clock, we send for this many
+// iterations a BEGIN line without microseconds
+// this is for the first iterations of each chart
+unsigned int remote_clock_resync_iterations = 60;
+
+
+static inline int should_send_chart_matching(RRDSET *st) {
+ if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_ENABLED))) {
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
+ rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE);
+ }
+ else if(!rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND|RRDSET_FLAG_UPSTREAM_IGNORE)) {
+ RRDHOST *host = st->rrdhost;
+
+ if(simple_pattern_matches(host->rrdpush_send_charts_matching, st->id) ||
+ simple_pattern_matches(host->rrdpush_send_charts_matching, st->name)) {
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_IGNORE);
+ rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND);
+ }
+ else {
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_SEND);
+ rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE);
+ }
+ }
+
+ return(rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND));
+}
+
+int configured_as_parent() {
+ struct section *section = NULL;
+ int is_parent = 0;
+
+ appconfig_wrlock(&stream_config);
+ for (section = stream_config.first_section; section; section = section->next) {
+ uuid_t uuid;
+
+ if (uuid_parse(section->name, uuid) != -1 &&
+ appconfig_get_boolean_by_section(section, "enabled", 0)) {
+ is_parent = 1;
+ break;
+ }
+ }
+ appconfig_unlock(&stream_config);
+
+ return is_parent;
+}
+
+// checks if the current chart definition has been sent
+static inline int need_to_send_chart_definition(RRDSET *st) {
+ rrdset_check_rdlock(st);
+
+ if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_EXPOSED))))
+ return 1;
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(unlikely(!rd->exposed)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("host '%s', chart '%s', dimension '%s' flag 'exposed' triggered chart refresh to upstream", st->rrdhost->hostname, st->id, rd->id);
+ #endif
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+// Send the current chart definition.
+// Assumes that collector thread has already called sender_start for mutex / buffer state.
+static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+
+ rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ // properly set the name for the remote end to parse it
+ char *name = "";
+ if(likely(st->name)) {
+ if(unlikely(strcmp(st->id, st->name))) {
+ // they differ
+ name = strchr(st->name, '.');
+ if(name)
+ name++;
+ else
+ name = "";
+ }
+ }
+
+ // send the chart
+ buffer_sprintf(
+ host->sender->build
+ , "CHART \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %ld %d \"%s %s %s %s\" \"%s\" \"%s\"\n"
+ , st->id
+ , name
+ , st->title
+ , st->units
+ , st->family
+ , st->context
+ , rrdset_type_name(st->chart_type)
+ , st->priority
+ , st->update_every
+ , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":""
+ , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?"detail":""
+ , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":""
+ , rrdset_flag_check(st, RRDSET_FLAG_HIDDEN)?"hidden":""
+ , (st->plugin_name)?st->plugin_name:""
+ , (st->module_name)?st->module_name:""
+ );
+
+ // send the dimensions
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ buffer_sprintf(
+ host->sender->build
+ , "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s %s\"\n"
+ , rd->id
+ , rd->name
+ , rrd_algorithm_name(rd->algorithm)
+ , rd->multiplier
+ , rd->divisor
+ , rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)?"obsolete":""
+ , rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?"hidden":""
+ , rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":""
+ );
+ rd->exposed = 1;
+ }
+
+ // send the chart local custom variables
+ RRDSETVAR *rs;
+ for(rs = st->variables; rs ;rs = rs->next) {
+ if(unlikely(rs->type == RRDVAR_TYPE_CALCULATED && rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR)) {
+ calculated_number *value = (calculated_number *) rs->value;
+
+ buffer_sprintf(
+ host->sender->build
+ , "VARIABLE CHART %s = " CALCULATED_NUMBER_FORMAT "\n"
+ , rs->variable
+ , *value
+ );
+ }
+ }
+
+ st->upstream_resync_time = st->last_collected_time.tv_sec + (remote_clock_resync_iterations * st->update_every);
+}
+
+// sends the current chart dimensions
+static inline void rrdpush_send_chart_metrics_nolock(RRDSET *st, struct sender_state *s) {
+ RRDHOST *host = st->rrdhost;
+ buffer_sprintf(host->sender->build, "BEGIN \"%s\" %llu", st->id, (st->last_collected_time.tv_sec > st->upstream_resync_time)?st->usec_since_last_update:0);
+ if (s->version >= VERSION_GAP_FILLING)
+ buffer_sprintf(host->sender->build, " %ld\n", st->last_collected_time.tv_sec);
+ else
+ buffer_strcat(host->sender->build, "\n");
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->updated && rd->exposed)
+ buffer_sprintf(host->sender->build
+ , "SET \"%s\" = " COLLECTED_NUMBER_FORMAT "\n"
+ , rd->id
+ , rd->collected_value
+ );
+ }
+ buffer_strcat(host->sender->build, "END\n");
+}
+
+static void rrdpush_sender_thread_spawn(RRDHOST *host);
+
+// Called from the internal collectors to mark a chart obsolete.
+void rrdset_push_chart_definition_now(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+
+ if(unlikely(!host->rrdpush_send_enabled || !should_send_chart_matching(st)))
+ return;
+
+ rrdset_rdlock(st);
+ sender_start(host->sender);
+ rrdpush_send_chart_definition_nolock(st);
+ sender_commit(host->sender);
+ rrdset_unlock(st);
+}
+
+void rrdset_done_push(RRDSET *st) {
+ if(unlikely(!should_send_chart_matching(st)))
+ return;
+
+ RRDHOST *host = st->rrdhost;
+
+ if(unlikely(host->rrdpush_send_enabled && !host->rrdpush_sender_spawn))
+ rrdpush_sender_thread_spawn(host);
+
+ // Handle non-connected case
+ if(unlikely(!host->rrdpush_sender_connected)) {
+ if(unlikely(!host->rrdpush_sender_error_shown))
+ error("STREAM %s [send]: not ready - discarding collected metrics.", host->hostname);
+ host->rrdpush_sender_error_shown = 1;
+ return;
+ }
+ else if(unlikely(host->rrdpush_sender_error_shown)) {
+ info("STREAM %s [send]: sending metrics...", host->hostname);
+ host->rrdpush_sender_error_shown = 0;
+ }
+
+ sender_start(host->sender);
+
+ if(need_to_send_chart_definition(st))
+ rrdpush_send_chart_definition_nolock(st);
+
+ rrdpush_send_chart_metrics_nolock(st, host->sender);
+
+ // signal the sender there are more data
+ if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1 && write(host->rrdpush_sender_pipe[PIPE_WRITE], " ", 1) == -1)
+ error("STREAM %s [send]: cannot write to internal pipe", host->hostname);
+
+ sender_commit(host->sender);
+}
+
+// labels
+void rrdpush_send_labels(RRDHOST *host) {
+ if (!host->labels.head || !(host->labels.labels_flag & LABEL_FLAG_UPDATE_STREAM) || (host->labels.labels_flag & LABEL_FLAG_STOP_STREAM))
+ return;
+
+ sender_start(host->sender);
+ rrdhost_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+
+ struct label *label_i = host->labels.head;
+ while(label_i) {
+ buffer_sprintf(host->sender->build
+ , "LABEL \"%s\" = %d %s\n"
+ , label_i->key
+ , (int)label_i->label_source
+ , label_i->value);
+
+ label_i = label_i->next;
+ }
+
+ buffer_sprintf(host->sender->build
+ , "OVERWRITE %s\n", "labels");
+
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+ rrdhost_unlock(host);
+ sender_commit(host->sender);
+
+ if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1 && write(host->rrdpush_sender_pipe[PIPE_WRITE], " ", 1) == -1)
+ error("STREAM %s [send]: cannot write to internal pipe", host->hostname);
+
+ host->labels.labels_flag &= ~LABEL_FLAG_UPDATE_STREAM;
+}
+
+void rrdpush_claimed_id(RRDHOST *host)
+{
+ if(unlikely(!host->rrdpush_send_enabled || !host->rrdpush_sender_connected))
+ return;
+
+ if(host->sender->version < STREAM_VERSION_CLAIM)
+ return;
+
+ sender_start(host->sender);
+ rrdhost_aclk_state_lock(host);
+
+ buffer_sprintf(host->sender->build, "CLAIMED_ID %s %s\n", host->machine_guid, (host->aclk_state.claimed_id ? host->aclk_state.claimed_id : "NULL") );
+
+ rrdhost_aclk_state_unlock(host);
+ sender_commit(host->sender);
+
+ // signal the sender there are more data
+ if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1 && write(host->rrdpush_sender_pipe[PIPE_WRITE], " ", 1) == -1)
+ error("STREAM %s [send]: cannot write to internal pipe", host->hostname);
+}
+
+// ----------------------------------------------------------------------------
+// rrdpush sender thread
+
+// Either the receiver lost the connection or the host is being destroyed.
+// The sender mutex guards thread creation, any spurious data is wiped on reconnection.
+void rrdpush_sender_thread_stop(RRDHOST *host) {
+
+ netdata_mutex_lock(&host->sender->mutex);
+ netdata_thread_t thr = 0;
+
+ if(host->rrdpush_sender_spawn) {
+ info("STREAM %s [send]: signaling sending thread to stop...", host->hostname);
+
+ // signal the thread that we want to join it
+ host->rrdpush_sender_join = 1;
+
+ // copy the thread id, so that we will be waiting for the right one
+ // even if a new one has been spawn
+ thr = host->rrdpush_sender_thread;
+
+ // signal it to cancel
+ netdata_thread_cancel(host->rrdpush_sender_thread);
+ }
+
+ netdata_mutex_unlock(&host->sender->mutex);
+
+ if(thr != 0) {
+ info("STREAM %s [send]: waiting for the sending thread to stop...", host->hostname);
+ void *result;
+ netdata_thread_join(thr, &result);
+ info("STREAM %s [send]: sending thread has exited.", host->hostname);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// rrdpush receiver thread
+
+void log_stream_connection(const char *client_ip, const char *client_port, const char *api_key, const char *machine_guid, const char *host, const char *msg) {
+ log_access("STREAM: %d '[%s]:%s' '%s' host '%s' api key '%s' machine guid '%s'", gettid(), client_ip, client_port, msg, host, api_key, machine_guid);
+}
+
+
+static void rrdpush_sender_thread_spawn(RRDHOST *host) {
+ netdata_mutex_lock(&host->sender->mutex);
+
+ if(!host->rrdpush_sender_spawn) {
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STREAM_SENDER[%s]", host->hostname);
+
+ if(netdata_thread_create(&host->rrdpush_sender_thread, tag, NETDATA_THREAD_OPTION_JOINABLE, rrdpush_sender_thread, (void *) host->sender))
+ error("STREAM %s [send]: failed to create new thread for client.", host->hostname);
+ else
+ host->rrdpush_sender_spawn = 1;
+ }
+ netdata_mutex_unlock(&host->sender->mutex);
+}
+
+int rrdpush_receiver_permission_denied(struct web_client *w) {
+ // we always respond with the same message and error code
+ // to prevent an attacker from gaining info about the error
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "You are not permitted to access this. Check the logs for more info.");
+ return 401;
+}
+
+int rrdpush_receiver_too_busy_now(struct web_client *w) {
+ // we always respond with the same message and error code
+ // to prevent an attacker from gaining info about the error
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "The server is too busy now to accept this request. Try later.");
+ return 503;
+}
+
+void *rrdpush_receiver_thread(void *ptr);
+int rrdpush_receiver_thread_spawn(struct web_client *w, char *url) {
+ info("clients wants to STREAM metrics.");
+
+ char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *timezone = "unknown", *tags = NULL;
+ int update_every = default_rrd_update_every;
+ uint32_t stream_version = UINT_MAX;
+ char buf[GUID_LEN + 1];
+
+ struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info));
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ if(!strcmp(name, "key"))
+ key = value;
+ else if(!strcmp(name, "hostname"))
+ hostname = value;
+ else if(!strcmp(name, "registry_hostname"))
+ registry_hostname = value;
+ else if(!strcmp(name, "machine_guid"))
+ machine_guid = value;
+ else if(!strcmp(name, "update_every"))
+ update_every = (int)strtoul(value, NULL, 0);
+ else if(!strcmp(name, "os"))
+ os = value;
+ else if(!strcmp(name, "timezone"))
+ timezone = value;
+ else if(!strcmp(name, "tags"))
+ tags = value;
+ else if(!strcmp(name, "ver"))
+ stream_version = MIN((uint32_t) strtoul(value, NULL, 0), STREAMING_PROTOCOL_CURRENT_VERSION);
+ else {
+ // An old Netdata child does not have a compatible streaming protocol, map to something sane.
+ if (!strcmp(name, "NETDATA_SYSTEM_OS_NAME"))
+ name = "NETDATA_HOST_OS_NAME";
+ else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID"))
+ name = "NETDATA_HOST_OS_ID";
+ else if (!strcmp(name, "NETDATA_SYSTEM_OS_ID_LIKE"))
+ name = "NETDATA_HOST_OS_ID_LIKE";
+ else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION"))
+ name = "NETDATA_HOST_OS_VERSION";
+ else if (!strcmp(name, "NETDATA_SYSTEM_OS_VERSION_ID"))
+ name = "NETDATA_HOST_OS_VERSION_ID";
+ else if (!strcmp(name, "NETDATA_SYSTEM_OS_DETECTION"))
+ name = "NETDATA_HOST_OS_DETECTION";
+ else if(!strcmp(name, "NETDATA_PROTOCOL_VERSION") && stream_version == UINT_MAX) {
+ stream_version = 1;
+ }
+
+ if (unlikely(rrdhost_set_system_info_variable(system_info, name, value))) {
+ info("STREAM [receive from [%s]:%s]: request has parameter '%s' = '%s', which is not used.",
+ w->client_ip, w->client_port, name, value);
+ }
+ }
+ }
+
+ if (stream_version == UINT_MAX)
+ stream_version = 0;
+
+ if(!key || !*key) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO KEY");
+ error("STREAM [receive from [%s]:%s]: request without an API key. Forbidding access.", w->client_ip, w->client_port);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ if(!hostname || !*hostname) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO HOSTNAME");
+ error("STREAM [receive from [%s]:%s]: request without a hostname. Forbidding access.", w->client_ip, w->client_port);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ if(!machine_guid || !*machine_guid) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - NO MACHINE GUID");
+ error("STREAM [receive from [%s]:%s]: request without a machine GUID. Forbidding access.", w->client_ip, w->client_port);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ if(regenerate_guid(key, buf) == -1) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID KEY");
+ error("STREAM [receive from [%s]:%s]: API key '%s' is not valid GUID (use the command uuidgen to generate one). Forbidding access.", w->client_ip, w->client_port, key);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ if(regenerate_guid(machine_guid, buf) == -1) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - INVALID MACHINE GUID");
+ error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not GUID. Forbidding access.", w->client_ip, w->client_port, machine_guid);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ if(!appconfig_get_boolean(&stream_config, key, "enabled", 0)) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - KEY NOT ENABLED");
+ error("STREAM [receive from [%s]:%s]: API key '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, key);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ {
+ SIMPLE_PATTERN *key_allow_from = simple_pattern_create(appconfig_get(&stream_config, key, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ if(key_allow_from) {
+ if(!simple_pattern_matches(key_allow_from, w->client_ip)) {
+ simple_pattern_free(key_allow_from);
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - KEY NOT ALLOWED FROM THIS IP");
+ error("STREAM [receive from [%s]:%s]: API key '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, key);
+ return rrdpush_receiver_permission_denied(w);
+ }
+ simple_pattern_free(key_allow_from);
+ }
+ }
+
+ if(!appconfig_get_boolean(&stream_config, machine_guid, "enabled", 1)) {
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname)?hostname:"-", "ACCESS DENIED - MACHINE GUID NOT ENABLED");
+ error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, machine_guid);
+ return rrdpush_receiver_permission_denied(w);
+ }
+
+ {
+ SIMPLE_PATTERN *machine_allow_from = simple_pattern_create(appconfig_get(&stream_config, machine_guid, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ if(machine_allow_from) {
+ if(!simple_pattern_matches(machine_allow_from, w->client_ip)) {
+ simple_pattern_free(machine_allow_from);
+ rrdhost_system_info_free(system_info);
+ log_stream_connection(w->client_ip, w->client_port, (key && *key)?key:"-", (machine_guid && *machine_guid)?machine_guid:"-", (hostname && *hostname) ? hostname : "-", "ACCESS DENIED - MACHINE GUID NOT ALLOWED FROM THIS IP");
+ error("STREAM [receive from [%s]:%s]: Machine GUID '%s' is not permitted from this IP. Forbidding access.", w->client_ip, w->client_port, machine_guid);
+ return rrdpush_receiver_permission_denied(w);
+ }
+ simple_pattern_free(machine_allow_from);
+ }
+ }
+
+ if(unlikely(web_client_streaming_rate_t > 0)) {
+ static netdata_mutex_t stream_rate_mutex = NETDATA_MUTEX_INITIALIZER;
+ static volatile time_t last_stream_accepted_t = 0;
+
+ netdata_mutex_lock(&stream_rate_mutex);
+ time_t now = now_realtime_sec();
+
+ if(unlikely(last_stream_accepted_t == 0))
+ last_stream_accepted_t = now;
+
+ if(now - last_stream_accepted_t < web_client_streaming_rate_t) {
+ netdata_mutex_unlock(&stream_rate_mutex);
+ rrdhost_system_info_free(system_info);
+ error("STREAM [receive from [%s]:%s]: too busy to accept new streaming request. Will be allowed in %ld secs.", w->client_ip, w->client_port, (long)(web_client_streaming_rate_t - (now - last_stream_accepted_t)));
+ return rrdpush_receiver_too_busy_now(w);
+ }
+
+ last_stream_accepted_t = now;
+ netdata_mutex_unlock(&stream_rate_mutex);
+ }
+
+ /*
+ * Quick path for rejecting multiple connections. The lock taken is fine-grained - it only protects the receiver
+ * pointer within the host (if a host exists). This protects against multiple concurrent web requests hitting
+ * separate threads within the web-server and landing here. The lock guards the thread-shutdown sequence that
+ * detaches the receiver from the host. If the host is being created (first time-access) then we also use the
+ * lock to prevent race-hazard (two threads try to create the host concurrently, one wins and the other does a
+ * lookup to the now-attached structure).
+ */
+ struct receiver_state *rpt = callocz(1, sizeof(*rpt));
+
+ rrd_rdlock();
+ RRDHOST *host = rrdhost_find_by_guid(machine_guid, 0);
+ if (unlikely(host && rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))) /* Ignore archived hosts. */
+ host = NULL;
+ if (host) {
+ rrdhost_wrlock(host);
+ netdata_mutex_lock(&host->receiver_lock);
+ rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN);
+ host->senders_disconnected_time = 0;
+ if (host->receiver != NULL) {
+ time_t age = now_realtime_sec() - host->receiver->last_msg_t;
+ if (age > 30) {
+ host->receiver->shutdown = 1;
+ shutdown(host->receiver->fd, SHUT_RDWR);
+ host->receiver = NULL; // Thread holds reference to structure
+ info("STREAM %s [receive from [%s]:%s]: multiple connections for same host detected - existing connection is dead (%ld sec), accepting new connection.", host->hostname, w->client_ip, w->client_port, age);
+ }
+ else {
+ netdata_mutex_unlock(&host->receiver_lock);
+ rrdhost_unlock(host);
+ rrd_unlock();
+ log_stream_connection(w->client_ip, w->client_port, key, host->machine_guid, host->hostname,
+ "REJECTED - ALREADY CONNECTED");
+ info("STREAM %s [receive from [%s]:%s]: multiple connections for same host detected - existing connection is active (within last %ld sec), rejecting new connection.", host->hostname, w->client_ip, w->client_port, age);
+ // Have not set WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET - caller should clean up
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "This GUID is already streaming to this server");
+ freez(rpt);
+ return 409;
+ }
+ }
+ host->receiver = rpt;
+ netdata_mutex_unlock(&host->receiver_lock);
+ rrdhost_unlock(host);
+ }
+ rrd_unlock();
+
+ rpt->last_msg_t = now_realtime_sec();
+
+ rpt->host = host;
+ rpt->fd = w->ifd;
+ rpt->key = strdupz(key);
+ rpt->hostname = strdupz(hostname);
+ rpt->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname);
+ rpt->machine_guid = strdupz(machine_guid);
+ rpt->os = strdupz(os);
+ rpt->timezone = strdupz(timezone);
+ rpt->tags = (tags)?strdupz(tags):NULL;
+ rpt->client_ip = strdupz(w->client_ip);
+ rpt->client_port = strdupz(w->client_port);
+ rpt->update_every = update_every;
+ rpt->system_info = system_info;
+ rpt->stream_version = stream_version;
+#ifdef ENABLE_HTTPS
+ rpt->ssl.conn = w->ssl.conn;
+ rpt->ssl.flags = w->ssl.flags;
+
+ w->ssl.conn = NULL;
+ w->ssl.flags = NETDATA_SSL_START;
+#endif
+
+ if(w->user_agent && w->user_agent[0]) {
+ char *t = strchr(w->user_agent, '/');
+ if(t && *t) {
+ *t = '\0';
+ t++;
+ }
+
+ rpt->program_name = strdupz(w->user_agent);
+ if(t && *t) rpt->program_version = strdupz(t);
+ }
+
+
+
+ debug(D_SYSTEM, "starting STREAM receive thread.");
+
+ char tag[FILENAME_MAX + 1];
+ snprintfz(tag, FILENAME_MAX, "STREAM_RECEIVER[%s,[%s]:%s]", rpt->hostname, w->client_ip, w->client_port);
+
+ if(netdata_thread_create(&rpt->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt))
+ error("Failed to create new STREAM receive thread for client.");
+
+ // prevent the caller from closing the streaming socket
+ if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) {
+ web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET);
+ }
+ else {
+ if(w->ifd == w->ofd)
+ w->ifd = w->ofd = -1;
+ else
+ w->ifd = -1;
+ }
+
+ buffer_flush(w->response.data);
+ return 200;
+}
diff --git a/streaming/rrdpush.h b/streaming/rrdpush.h
new file mode 100644
index 0000000..225d0c2
--- /dev/null
+++ b/streaming/rrdpush.h
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDPUSH_H
+#define NETDATA_RRDPUSH_H 1
+
+#include "../database/rrd.h"
+#include "../libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+#include "daemon/common.h"
+
+#define CONNECTED_TO_SIZE 100
+
+// #define STREAMING_PROTOCOL_CURRENT_VERSION (uint32_t)4 Gap-filling
+#define STREAMING_PROTOCOL_CURRENT_VERSION (uint32_t)3
+#define VERSION_GAP_FILLING 4
+#define STREAM_VERSION_CLAIM 3
+
+#define STREAMING_PROTOCOL_VERSION "1.1"
+#define START_STREAMING_PROMPT "Hit me baby, push them over..."
+#define START_STREAMING_PROMPT_V2 "Hit me baby, push them over and bring the host labels..."
+#define START_STREAMING_PROMPT_VN "Hit me baby, push them over with the version="
+
+#define HTTP_HEADER_SIZE 8192
+
+typedef enum {
+ RRDPUSH_MULTIPLE_CONNECTIONS_ALLOW,
+ RRDPUSH_MULTIPLE_CONNECTIONS_DENY_NEW
+} RRDPUSH_MULTIPLE_CONNECTIONS_STRATEGY;
+
+typedef struct {
+ char *os_name;
+ char *os_id;
+ char *os_version;
+ char *kernel_name;
+ char *kernel_version;
+} stream_encoded_t;
+
+// Thread-local storage
+ // Metric transmission: collector threads asynchronously fill the buffer, sender thread uses it.
+
+struct sender_state {
+ RRDHOST *host;
+ pid_t task_id;
+ unsigned int overflow:1;
+ int timeout, default_port;
+ usec_t reconnect_delay;
+ char connected_to[CONNECTED_TO_SIZE + 1]; // We don't know which proxy we connect to, passed back from socket.c
+ size_t begin;
+ size_t reconnects_counter;
+ size_t sent_bytes;
+ size_t sent_bytes_on_this_connection;
+ size_t send_attempts;
+ time_t last_sent_t;
+ size_t not_connected_loops;
+ // Metrics are collected asynchronously by collector threads calling rrdset_done_push(). This can also trigger
+ // the lazy creation of the sender thread - both cases (buffer access and thread creation) are guarded here.
+ netdata_mutex_t mutex;
+ struct circular_buffer *buffer;
+ BUFFER *build;
+ char read_buffer[512];
+ int read_len;
+ int32_t version;
+};
+
+struct receiver_state {
+ RRDHOST *host;
+ netdata_thread_t thread;
+ int fd;
+ char *key;
+ char *hostname;
+ char *registry_hostname;
+ char *machine_guid;
+ char *os;
+ char *timezone; // Unused?
+ char *tags;
+ char *client_ip; // Duplicated in pluginsd
+ char *client_port; // Duplicated in pluginsd
+ char *program_name; // Duplicated in pluginsd
+ char *program_version;
+ struct rrdhost_system_info *system_info;
+ int update_every;
+ uint32_t stream_version;
+ time_t last_msg_t;
+ char read_buffer[1024]; // Need to allow RRD_ID_LENGTH_MAX * 4 + the other fields
+ int read_len;
+#ifdef ENABLE_HTTPS
+ struct netdata_ssl ssl;
+#endif
+ unsigned int shutdown:1; // Tell the thread to exit
+ unsigned int exited; // Indicates that the thread has exited (NOT A BITFIELD!)
+};
+
+
+extern unsigned int default_rrdpush_enabled;
+extern char *default_rrdpush_destination;
+extern char *default_rrdpush_api_key;
+extern char *default_rrdpush_send_charts_matching;
+extern unsigned int remote_clock_resync_iterations;
+
+extern void sender_init(struct sender_state *s, RRDHOST *parent);
+void sender_start(struct sender_state *s);
+void sender_commit(struct sender_state *s);
+extern int rrdpush_init();
+extern int configured_as_parent();
+extern void rrdset_done_push(RRDSET *st);
+extern void rrdset_push_chart_definition_now(RRDSET *st);
+extern void *rrdpush_sender_thread(void *ptr);
+extern void rrdpush_send_labels(RRDHOST *host);
+extern void rrdpush_claimed_id(RRDHOST *host);
+
+extern int rrdpush_receiver_thread_spawn(struct web_client *w, char *url);
+extern void rrdpush_sender_thread_stop(RRDHOST *host);
+
+extern void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv);
+extern void log_stream_connection(const char *client_ip, const char *client_port, const char *api_key, const char *machine_guid, const char *host, const char *msg);
+
+#endif //NETDATA_RRDPUSH_H
diff --git a/streaming/sender.c b/streaming/sender.c
new file mode 100644
index 0000000..d55a420
--- /dev/null
+++ b/streaming/sender.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdpush.h"
+
+extern struct config stream_config;
+extern int netdata_use_ssl_on_stream;
+extern char *netdata_ssl_ca_path;
+extern char *netdata_ssl_ca_file;
+
+// Collector thread starting a transmission
+void sender_start(struct sender_state *s) {
+ netdata_mutex_lock(&s->mutex);
+ buffer_flush(s->build);
+}
+
+// Collector thread finishing a transmission
+void sender_commit(struct sender_state *s) {
+ if(cbuffer_add_unsafe(s->host->sender->buffer, buffer_tostring(s->host->sender->build),
+ s->host->sender->build->len))
+ s->overflow = 1;
+ buffer_flush(s->build);
+ netdata_mutex_unlock(&s->mutex);
+}
+
+
+static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) {
+ host->rrdpush_sender_connected = 0;
+
+ if(host->rrdpush_sender_socket != -1) {
+ close(host->rrdpush_sender_socket);
+ host->rrdpush_sender_socket = -1;
+ }
+}
+
+static inline void rrdpush_sender_add_host_variable_to_buffer_nolock(RRDHOST *host, RRDVAR *rv) {
+ calculated_number *value = (calculated_number *)rv->value;
+
+ buffer_sprintf(
+ host->sender->build
+ , "VARIABLE HOST %s = " CALCULATED_NUMBER_FORMAT "\n"
+ , rv->name
+ , *value
+ );
+
+ debug(D_STREAM, "RRDVAR pushed HOST VARIABLE %s = " CALCULATED_NUMBER_FORMAT, rv->name, *value);
+}
+
+void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv) {
+ if(host->rrdpush_send_enabled && host->rrdpush_sender_spawn && host->rrdpush_sender_connected) {
+ sender_start(host->sender);
+ rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv);
+ sender_commit(host->sender);
+ }
+}
+
+
+static int rrdpush_sender_thread_custom_host_variables_callback(void *rrdvar_ptr, void *host_ptr) {
+ RRDVAR *rv = (RRDVAR *)rrdvar_ptr;
+ RRDHOST *host = (RRDHOST *)host_ptr;
+
+ if(unlikely(rv->options & RRDVAR_OPTION_CUSTOM_HOST_VAR && rv->type == RRDVAR_TYPE_CALCULATED)) {
+ rrdpush_sender_add_host_variable_to_buffer_nolock(host, rv);
+
+ // return 1, so that the traversal will return the number of variables sent
+ return 1;
+ }
+
+ // returning a negative number will break the traversal
+ return 0;
+}
+
+static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) {
+ sender_start(host->sender);
+ int ret = rrdvar_callback_for_all_host_variables(host, rrdpush_sender_thread_custom_host_variables_callback, host);
+ (void)ret;
+ sender_commit(host->sender);
+
+ debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret);
+}
+
+// resets all the chart, so that their definitions
+// will be resent to the central netdata
+static void rrdpush_sender_thread_reset_all_charts(RRDHOST *host) {
+ rrdhost_rdlock(host);
+
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
+
+ st->upstream_resync_time = 0;
+
+ rrdset_rdlock(st);
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st)
+ rd->exposed = 0;
+
+ rrdset_unlock(st);
+ }
+
+ rrdhost_unlock(host);
+}
+
+static inline void rrdpush_sender_thread_data_flush(RRDHOST *host) {
+ netdata_mutex_lock(&host->sender->mutex);
+
+ size_t len = cbuffer_next_unsafe(host->sender->buffer, NULL);
+ if (len)
+ error("STREAM %s [send]: discarding %zu bytes of metrics already in the buffer.", host->hostname, len);
+
+ cbuffer_remove_unsafe(host->sender->buffer, len);
+ netdata_mutex_unlock(&host->sender->mutex);
+
+ rrdpush_sender_thread_reset_all_charts(host);
+ rrdpush_sender_thread_send_custom_host_variables(host);
+}
+
+static inline void rrdpush_set_flags_to_newest_stream(RRDHOST *host) {
+ host->labels.labels_flag |= LABEL_FLAG_UPDATE_STREAM;
+ host->labels.labels_flag &= ~LABEL_FLAG_STOP_STREAM;
+}
+
+void rrdpush_encode_variable(stream_encoded_t *se, RRDHOST *host)
+{
+ se->os_name = (host->system_info->host_os_name)?url_encode(host->system_info->host_os_name):"";
+ se->os_id = (host->system_info->host_os_id)?url_encode(host->system_info->host_os_id):"";
+ se->os_version = (host->system_info->host_os_version)?url_encode(host->system_info->host_os_version):"";
+ se->kernel_name = (host->system_info->kernel_name)?url_encode(host->system_info->kernel_name):"";
+ se->kernel_version = (host->system_info->kernel_version)?url_encode(host->system_info->kernel_version):"";
+}
+
+void rrdpush_clean_encoded(stream_encoded_t *se)
+{
+ if (se->os_name)
+ freez(se->os_name);
+
+ if (se->os_id)
+ freez(se->os_id);
+
+ if (se->os_version)
+ freez(se->os_version);
+
+ if (se->kernel_name)
+ freez(se->kernel_name);
+
+ if (se->kernel_version)
+ freez(se->kernel_version);
+}
+
+static int rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_port, int timeout,
+ struct sender_state *s) {
+
+ struct timeval tv = {
+ .tv_sec = timeout,
+ .tv_usec = 0
+ };
+
+ // make sure the socket is closed
+ rrdpush_sender_thread_close_socket(host);
+
+ debug(D_STREAM, "STREAM: Attempting to connect...");
+ info("STREAM %s [send to %s]: connecting...", host->hostname, host->rrdpush_send_destination);
+
+ host->rrdpush_sender_socket = connect_to_one_of(
+ host->rrdpush_send_destination
+ , default_port
+ , &tv
+ , &s->reconnects_counter
+ , s->connected_to
+ , sizeof(s->connected_to)-1
+ );
+
+ if(unlikely(host->rrdpush_sender_socket == -1)) {
+ error("STREAM %s [send to %s]: failed to connect", host->hostname, host->rrdpush_send_destination);
+ return 0;
+ }
+
+ info("STREAM %s [send to %s]: initializing communication...", host->hostname, s->connected_to);
+
+#ifdef ENABLE_HTTPS
+ if( netdata_client_ctx ){
+ host->ssl.flags = NETDATA_SSL_START;
+ if (!host->ssl.conn){
+ host->ssl.conn = SSL_new(netdata_client_ctx);
+ if(!host->ssl.conn){
+ error("Failed to allocate SSL structure.");
+ host->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+ }
+ else{
+ SSL_clear(host->ssl.conn);
+ }
+
+ if (host->ssl.conn)
+ {
+ if (SSL_set_fd(host->ssl.conn, host->rrdpush_sender_socket) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", host->rrdpush_sender_socket);
+ host->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else{
+ host->ssl.flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
+ }
+ }
+ }
+ else {
+ host->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+#endif
+
+ /* TODO: During the implementation of #7265 switch the set of variables to HOST_* and CONTAINER_* if the
+ version negotiation resulted in a high enough version.
+ */
+ stream_encoded_t se;
+ rrdpush_encode_variable(&se, host);
+
+ char http[HTTP_HEADER_SIZE + 1];
+ int eol = snprintfz(http, HTTP_HEADER_SIZE,
+ "STREAM key=%s&hostname=%s&registry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&timezone=%s&tags=%s&ver=%u"
+ "&NETDATA_SYSTEM_OS_NAME=%s"
+ "&NETDATA_SYSTEM_OS_ID=%s"
+ "&NETDATA_SYSTEM_OS_ID_LIKE=%s"
+ "&NETDATA_SYSTEM_OS_VERSION=%s"
+ "&NETDATA_SYSTEM_OS_VERSION_ID=%s"
+ "&NETDATA_SYSTEM_OS_DETECTION=%s"
+ "&NETDATA_HOST_IS_K8S_NODE=%s"
+ "&NETDATA_SYSTEM_KERNEL_NAME=%s"
+ "&NETDATA_SYSTEM_KERNEL_VERSION=%s"
+ "&NETDATA_SYSTEM_ARCHITECTURE=%s"
+ "&NETDATA_SYSTEM_VIRTUALIZATION=%s"
+ "&NETDATA_SYSTEM_VIRT_DETECTION=%s"
+ "&NETDATA_SYSTEM_CONTAINER=%s"
+ "&NETDATA_SYSTEM_CONTAINER_DETECTION=%s"
+ "&NETDATA_CONTAINER_OS_NAME=%s"
+ "&NETDATA_CONTAINER_OS_ID=%s"
+ "&NETDATA_CONTAINER_OS_ID_LIKE=%s"
+ "&NETDATA_CONTAINER_OS_VERSION=%s"
+ "&NETDATA_CONTAINER_OS_VERSION_ID=%s"
+ "&NETDATA_CONTAINER_OS_DETECTION=%s"
+ "&NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT=%s"
+ "&NETDATA_SYSTEM_CPU_FREQ=%s"
+ "&NETDATA_SYSTEM_TOTAL_RAM=%s"
+ "&NETDATA_SYSTEM_TOTAL_DISK_SIZE=%s"
+ "&NETDATA_PROTOCOL_VERSION=%s"
+ " HTTP/1.1\r\n"
+ "User-Agent: %s/%s\r\n"
+ "Accept: */*\r\n\r\n"
+ , host->rrdpush_send_api_key
+ , host->hostname
+ , host->registry_hostname
+ , host->machine_guid
+ , default_rrd_update_every
+ , host->os
+ , host->timezone
+ , (host->tags) ? host->tags : ""
+ , STREAMING_PROTOCOL_CURRENT_VERSION
+ , se.os_name
+ , se.os_id
+ , (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : ""
+ , se.os_version
+ , (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : ""
+ , (host->system_info->host_os_detection) ? host->system_info->host_os_detection : ""
+ , (host->system_info->is_k8s_node) ? host->system_info->is_k8s_node : ""
+ , se.kernel_name
+ , se.kernel_version
+ , (host->system_info->architecture) ? host->system_info->architecture : ""
+ , (host->system_info->virtualization) ? host->system_info->virtualization : ""
+ , (host->system_info->virt_detection) ? host->system_info->virt_detection : ""
+ , (host->system_info->container) ? host->system_info->container : ""
+ , (host->system_info->container_detection) ? host->system_info->container_detection : ""
+ , (host->system_info->container_os_name) ? host->system_info->container_os_name : ""
+ , (host->system_info->container_os_id) ? host->system_info->container_os_id : ""
+ , (host->system_info->container_os_id_like) ? host->system_info->container_os_id_like : ""
+ , (host->system_info->container_os_version) ? host->system_info->container_os_version : ""
+ , (host->system_info->container_os_version_id) ? host->system_info->container_os_version_id : ""
+ , (host->system_info->container_os_detection) ? host->system_info->container_os_detection : ""
+ , (host->system_info->host_cores) ? host->system_info->host_cores : ""
+ , (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : ""
+ , (host->system_info->host_ram_total) ? host->system_info->host_ram_total : ""
+ , (host->system_info->host_disk_space) ? host->system_info->host_disk_space : ""
+ , STREAMING_PROTOCOL_VERSION
+ , host->program_name
+ , host->program_version
+ );
+ http[eol] = 0x00;
+ rrdpush_clean_encoded(&se);
+
+#ifdef ENABLE_HTTPS
+ if (!host->ssl.flags) {
+ ERR_clear_error();
+ SSL_set_connect_state(host->ssl.conn);
+ int err = SSL_connect(host->ssl.conn);
+ if (err != 1){
+ err = SSL_get_error(host->ssl.conn, err);
+ error("SSL cannot connect with the server: %s ",ERR_error_string((long)SSL_get_error(host->ssl.conn,err),NULL));
+ if (netdata_use_ssl_on_stream == NETDATA_SSL_FORCE) {
+ rrdpush_sender_thread_close_socket(host);
+ return 0;
+ }else {
+ host->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+ }
+ else {
+ if (netdata_use_ssl_on_stream == NETDATA_SSL_FORCE) {
+ if (netdata_validate_server == NETDATA_SSL_VALID_CERTIFICATE) {
+ if ( security_test_certificate(host->ssl.conn)) {
+ error("Closing the stream connection, because the server SSL certificate is not valid.");
+ rrdpush_sender_thread_close_socket(host);
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ if(send_timeout(&host->ssl,host->rrdpush_sender_socket, http, strlen(http), 0, timeout) == -1) {
+#else
+ if(send_timeout(host->rrdpush_sender_socket, http, strlen(http), 0, timeout) == -1) {
+#endif
+ error("STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", host->hostname, s->connected_to);
+ rrdpush_sender_thread_close_socket(host);
+ return 0;
+ }
+
+ info("STREAM %s [send to %s]: waiting response from remote netdata...", host->hostname, s->connected_to);
+
+ ssize_t received;
+#ifdef ENABLE_HTTPS
+ received = recv_timeout(&host->ssl,host->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, 0, timeout);
+ if(received == -1) {
+#else
+ received = recv_timeout(host->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, 0, timeout);
+ if(received == -1) {
+#endif
+ error("STREAM %s [send to %s]: remote netdata does not respond.", host->hostname, s->connected_to);
+ rrdpush_sender_thread_close_socket(host);
+ return 0;
+ }
+
+ http[received] = '\0';
+ debug(D_STREAM, "Response to sender from far end: %s", http);
+ int answer = -1;
+ char *version_start = strchr(http, '=');
+ int32_t version = -1;
+ if(version_start) {
+ version_start++;
+ version = (int32_t)strtol(version_start, NULL, 10);
+ answer = memcmp(http, START_STREAMING_PROMPT_VN, (size_t)(version_start - http));
+ if(!answer) {
+ rrdpush_set_flags_to_newest_stream(host);
+ }
+ } else {
+ answer = memcmp(http, START_STREAMING_PROMPT_V2, strlen(START_STREAMING_PROMPT_V2));
+ if(!answer) {
+ version = 1;
+ rrdpush_set_flags_to_newest_stream(host);
+ }
+ else {
+ answer = memcmp(http, START_STREAMING_PROMPT, strlen(START_STREAMING_PROMPT));
+ if(!answer) {
+ version = 0;
+ host->labels.labels_flag |= LABEL_FLAG_STOP_STREAM;
+ host->labels.labels_flag &= ~LABEL_FLAG_UPDATE_STREAM;
+ }
+ }
+ }
+
+ if(version == -1) {
+ error("STREAM %s [send to %s]: server is not replying properly (is it a netdata?).", host->hostname, s->connected_to);
+ rrdpush_sender_thread_close_socket(host);
+ return 0;
+ }
+ s->version = version;
+
+ info("STREAM %s [send to %s]: established communication with a parent using protocol version %d - ready to send metrics..."
+ , host->hostname
+ , s->connected_to
+ , version);
+
+ if(sock_setnonblock(host->rrdpush_sender_socket) < 0)
+ error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", host->hostname, s->connected_to);
+
+ if(sock_enlarge_out(host->rrdpush_sender_socket) < 0)
+ error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", host->hostname, s->connected_to);
+
+ debug(D_STREAM, "STREAM: Connected on fd %d...", host->rrdpush_sender_socket);
+
+ return 1;
+}
+
+static void attempt_to_connect(struct sender_state *state)
+{
+ state->send_attempts = 0;
+
+ if(rrdpush_sender_thread_connect_to_parent(state->host, state->default_port, state->timeout, state)) {
+ state->last_sent_t = now_monotonic_sec();
+
+ // reset the buffer, to properly send charts and metrics
+ rrdpush_sender_thread_data_flush(state->host);
+
+ // send from the beginning
+ state->begin = 0;
+
+ // make sure the next reconnection will be immediate
+ state->not_connected_loops = 0;
+
+ // reset the bytes we have sent for this session
+ state->sent_bytes_on_this_connection = 0;
+
+ // let the data collection threads know we are ready
+ state->host->rrdpush_sender_connected = 1;
+ }
+ else {
+ // increase the failed connections counter
+ state->not_connected_loops++;
+
+ // reset the number of bytes sent
+ state->sent_bytes_on_this_connection = 0;
+
+ // slow re-connection on repeating errors
+ sleep_usec(USEC_PER_SEC * state->reconnect_delay); // seconds
+ }
+}
+
+// TCP window is open and we have data to transmit.
+void attempt_to_send(struct sender_state *s) {
+
+ rrdpush_send_labels(s->host);
+
+ struct circular_buffer *cb = s->buffer;
+
+ netdata_thread_disable_cancelability();
+ netdata_mutex_lock(&s->mutex);
+ char *chunk;
+ size_t outstanding = cbuffer_next_unsafe(s->buffer, &chunk);
+ debug(D_STREAM, "STREAM: Sending data. Buffer r=%zu w=%zu s=%zu, next chunk=%zu", cb->read, cb->write, cb->size, outstanding);
+ ssize_t ret;
+#ifdef ENABLE_HTTPS
+ SSL *conn = s->host->ssl.conn ;
+ if(conn && !s->host->ssl.flags) {
+ ret = SSL_write(conn, chunk, outstanding);
+ } else {
+ ret = send(s->host->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT);
+ }
+#else
+ ret = send(s->host->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT);
+#endif
+ if (likely(ret > 0)) {
+ cbuffer_remove_unsafe(s->buffer, ret);
+ s->sent_bytes_on_this_connection += ret;
+ s->sent_bytes += ret;
+ debug(D_STREAM, "STREAM %s [send to %s]: Sent %zd bytes", s->host->hostname, s->connected_to, ret);
+ s->last_sent_t = now_monotonic_sec();
+ }
+ else if (ret == -1 && (errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK))
+ debug(D_STREAM, "STREAM %s [send to %s]: unavailable aftering polling POLLOUT", s->host->hostname,
+ s->connected_to);
+ else if (ret == -1) {
+ debug(D_STREAM, "STREAM: Send failed - closing socket...");
+ error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", s->host->hostname, s->connected_to, s->sent_bytes_on_this_connection);
+ rrdpush_sender_thread_close_socket(s->host);
+ }
+ else {
+ debug(D_STREAM, "STREAM: send() returned 0 -> no error but no transmission");
+ }
+
+ netdata_mutex_unlock(&s->mutex);
+ netdata_thread_enable_cancelability();
+}
+
+void attempt_read(struct sender_state *s) {
+int ret;
+#ifdef ENABLE_HTTPS
+ if (s->host->ssl.conn && !s->host->stream_ssl.flags) {
+ ERR_clear_error();
+ int desired = sizeof(s->read_buffer) - s->read_len - 1;
+ ret = SSL_read(s->host->ssl.conn, s->read_buffer, desired);
+ if (ret > 0 ) {
+ s->read_len += ret;
+ return;
+ }
+ int sslerrno = SSL_get_error(s->host->ssl.conn, desired);
+ if (sslerrno == SSL_ERROR_WANT_READ || sslerrno == SSL_ERROR_WANT_WRITE)
+ return;
+ u_long err;
+ char buf[256];
+ while ((err = ERR_get_error()) != 0) {
+ ERR_error_string_n(err, buf, sizeof(buf));
+ error("STREAM %s [send to %s] ssl error: %s", s->host->hostname, s->connected_to, buf);
+ }
+ error("Restarting connection");
+ rrdpush_sender_thread_close_socket(s->host);
+ return;
+ }
+#endif
+ ret = recv(s->host->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,
+ MSG_DONTWAIT);
+ if (ret>0) {
+ s->read_len += ret;
+ return;
+ }
+ debug(D_STREAM, "Socket was POLLIN, but req %zu bytes gave %d", sizeof(s->read_buffer) - s->read_len - 1, ret);
+ if (ret<0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR))
+ return;
+ if (ret==0)
+ error("STREAM %s [send to %s]: connection closed by far end. Restarting connection", s->host->hostname, s->connected_to);
+ else
+ error("STREAM %s [send to %s]: error during read (%d). Restarting connection", s->host->hostname, s->connected_to,
+ ret);
+ rrdpush_sender_thread_close_socket(s->host);
+}
+
+// This is just a placeholder until the gap filling state machine is inserted
+void execute_commands(struct sender_state *s) {
+ char *start = s->read_buffer, *end = &s->read_buffer[s->read_len], *newline;
+ *end = 0;
+ while( start<end && (newline=strchr(start, '\n')) ) {
+ *newline = 0;
+ info("STREAM %s [send to %s] received command over connection: %s", s->host->hostname, s->connected_to, start);
+ start = newline+1;
+ }
+ if (start<end) {
+ memmove(s->read_buffer, start, end-start);
+ s->read_len = end-start;
+ }
+}
+
+
+static void rrdpush_sender_thread_cleanup_callback(void *ptr) {
+ RRDHOST *host = (RRDHOST *)ptr;
+
+ netdata_mutex_lock(&host->sender->mutex);
+
+ info("STREAM %s [send]: sending thread cleans up...", host->hostname);
+
+ rrdpush_sender_thread_close_socket(host);
+
+ // close the pipe
+ if(host->rrdpush_sender_pipe[PIPE_READ] != -1) {
+ close(host->rrdpush_sender_pipe[PIPE_READ]);
+ host->rrdpush_sender_pipe[PIPE_READ] = -1;
+ }
+
+ if(host->rrdpush_sender_pipe[PIPE_WRITE] != -1) {
+ close(host->rrdpush_sender_pipe[PIPE_WRITE]);
+ host->rrdpush_sender_pipe[PIPE_WRITE] = -1;
+ }
+
+ if(!host->rrdpush_sender_join) {
+ info("STREAM %s [send]: sending thread detaches itself.", host->hostname);
+ netdata_thread_detach(netdata_thread_self());
+ }
+
+ host->rrdpush_sender_spawn = 0;
+
+ info("STREAM %s [send]: sending thread now exits.", host->hostname);
+
+ netdata_mutex_unlock(&host->sender->mutex);
+}
+
+void sender_init(struct sender_state *s, RRDHOST *parent) {
+ memset(s, 0, sizeof(*s));
+ s->host = parent;
+ s->buffer = cbuffer_new(1024, 1024*1024);
+ s->build = buffer_create(1);
+ netdata_mutex_init(&s->mutex);
+}
+
+void *rrdpush_sender_thread(void *ptr) {
+ struct sender_state *s = ptr;
+ s->task_id = gettid();
+
+ if(!s->host->rrdpush_send_enabled || !s->host->rrdpush_send_destination ||
+ !*s->host->rrdpush_send_destination || !s->host->rrdpush_send_api_key ||
+ !*s->host->rrdpush_send_api_key) {
+ error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.",
+ s->host->hostname, s->task_id);
+ return NULL;
+ }
+
+#ifdef ENABLE_HTTPS
+ if (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE ){
+ security_start_ssl(NETDATA_SSL_CONTEXT_STREAMING);
+ security_location_for_context(netdata_client_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path);
+ }
+#endif
+
+ info("STREAM %s [send]: thread created (task id %d)", s->host->hostname, s->task_id);
+
+ s->timeout = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "timeout seconds", 60);
+ s->default_port = (int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "default port", 19999);
+ s->buffer->max_size =
+ (size_t)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "buffer size bytes", 1024 * 1024);
+ s->reconnect_delay =
+ (unsigned int)appconfig_get_number(&stream_config, CONFIG_SECTION_STREAM, "reconnect delay seconds", 5);
+ remote_clock_resync_iterations = (unsigned int)appconfig_get_number(
+ &stream_config, CONFIG_SECTION_STREAM,
+ "initial clock resync iterations",
+ remote_clock_resync_iterations); // TODO: REMOVE FOR SLEW / GAPFILLING
+
+ // initialize rrdpush globals
+ s->host->rrdpush_sender_connected = 0;
+ if(pipe(s->host->rrdpush_sender_pipe) == -1) {
+ error("STREAM %s [send]: cannot create required pipe. DISABLING STREAMING THREAD", s->host->hostname);
+ return NULL;
+ }
+
+ enum {
+ Collector,
+ Socket
+ };
+ struct pollfd fds[2];
+ fds[Collector].fd = s->host->rrdpush_sender_pipe[PIPE_READ];
+ fds[Collector].events = POLLIN;
+
+ netdata_thread_cleanup_push(rrdpush_sender_thread_cleanup_callback, s->host);
+ for(; s->host->rrdpush_send_enabled && !netdata_exit ;) {
+ // check for outstanding cancellation requests
+ netdata_thread_testcancel();
+
+ // The connection attempt blocks (after which we use the socket in nonblocking)
+ if(unlikely(s->host->rrdpush_sender_socket == -1)) {
+ s->overflow = 0;
+ s->read_len = 0;
+ s->buffer->read = 0;
+ s->buffer->write = 0;
+ attempt_to_connect(s);
+ if (s->version >= VERSION_GAP_FILLING) {
+ time_t now = now_realtime_sec();
+ sender_start(s);
+ buffer_sprintf(s->build, "TIMESTAMP %ld", now);
+ sender_commit(s);
+ }
+ rrdpush_claimed_id(s->host);
+ continue;
+ }
+
+ // If the TCP window never opened then something is wrong, restart connection
+ if(unlikely(now_monotonic_sec() - s->last_sent_t > s->timeout)) {
+ error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection via %zu send attempts.", s->host->hostname, s->connected_to, s->timeout, s->sent_bytes_on_this_connection, s->send_attempts);
+ rrdpush_sender_thread_close_socket(s->host);
+ continue;
+ }
+
+ // Wait until buffer opens in the socket or a rrdset_done_push wakes us
+ fds[Collector].revents = 0;
+ fds[Socket].revents = 0;
+ fds[Socket].fd = s->host->rrdpush_sender_socket;
+
+ netdata_mutex_lock(&s->mutex);
+ char *chunk;
+ size_t outstanding = cbuffer_next_unsafe(s->host->sender->buffer, &chunk);
+ chunk = NULL; // Do not cache pointer outside of region - could be invalidated
+ netdata_mutex_unlock(&s->mutex);
+ if(outstanding) {
+ s->send_attempts++;
+ fds[Socket].events = POLLIN | POLLOUT;
+ }
+ else {
+ fds[Socket].events = POLLIN;
+ }
+
+ int retval = poll(fds, 2, 1000);
+ debug(D_STREAM, "STREAM: poll() finished collector=%d socket=%d (current chunk %zu bytes)...",
+ fds[Collector].revents, fds[Socket].revents, outstanding);
+ if(unlikely(netdata_exit)) break;
+
+ // Spurious wake-ups without error - loop again
+ if (retval == 0 || ((retval == -1) && (errno == EAGAIN || errno == EINTR)))
+ {
+ debug(D_STREAM, "Spurious wakeup");
+ continue;
+ }
+ // Only errors from poll() are internal, but try restarting the connection
+ if(unlikely(retval == -1)) {
+ error("STREAM %s [send to %s]: failed to poll(). Closing socket.", s->host->hostname, s->connected_to);
+ rrdpush_sender_thread_close_socket(s->host);
+ continue;
+ }
+
+ // If the collector woke us up then empty the pipe to remove the signal
+ if (fds[Collector].revents & POLLIN || fds[Collector].revents & POLLPRI) {
+ debug(D_STREAM, "STREAM: Data added to send buffer (current buffer chunk %zu bytes)...", outstanding);
+
+ char buffer[1000 + 1];
+ if (read(s->host->rrdpush_sender_pipe[PIPE_READ], buffer, 1000) == -1)
+ error("STREAM %s [send to %s]: cannot read from internal pipe.", s->host->hostname, s->connected_to);
+ }
+
+ // Read as much as possible to fill the buffer, split into full lines for execution.
+ if (fds[Socket].revents & POLLIN)
+ attempt_read(s);
+ execute_commands(s);
+
+ // If we have data and have seen the TCP window open then try to close it by a transmission.
+ if (outstanding && fds[Socket].revents & POLLOUT)
+ attempt_to_send(s);
+
+ // TODO-GAPS - why do we only check this on the socket, not the pipe?
+ if (outstanding) {
+ char *error = NULL;
+ if (unlikely(fds[Socket].revents & POLLERR))
+ error = "socket reports errors (POLLERR)";
+ else if (unlikely(fds[Socket].revents & POLLHUP))
+ error = "connection closed by remote end (POLLHUP)";
+ else if (unlikely(fds[Socket].revents & POLLNVAL))
+ error = "connection is invalid (POLLNVAL)";
+ if(unlikely(error)) {
+ error("STREAM %s [send to %s]: restart stream because %s - %zu bytes transmitted.", s->host->hostname,
+ s->connected_to, error, s->sent_bytes_on_this_connection);
+ rrdpush_sender_thread_close_socket(s->host);
+ }
+ }
+
+ // protection from overflow
+ if (s->overflow) {
+ errno = 0;
+ error("STREAM %s [send to %s]: buffer full (%zu-bytes) after %zu bytes. Restarting connection",
+ s->host->hostname, s->connected_to, s->buffer->size, s->sent_bytes_on_this_connection);
+ rrdpush_sender_thread_close_socket(s->host);
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/streaming/stream.conf b/streaming/stream.conf
new file mode 100644
index 0000000..b514263
--- /dev/null
+++ b/streaming/stream.conf
@@ -0,0 +1,205 @@
+# netdata configuration for aggregating data from remote hosts
+#
+# API keys authorize a pair of sending-receiving netdata servers.
+# Once their communication is authorized, they can exchange metrics for any
+# number of hosts.
+#
+# You can generate API keys, with the linux command: uuidgen
+
+
+# -----------------------------------------------------------------------------
+# 1. ON CHILD NETDATA - THE ONE THAT WILL BE SENDING METRICS
+
+[stream]
+ # Enable this on child nodes, to have them send metrics.
+ enabled = no
+
+ # Where is the receiving netdata?
+ # A space separated list of:
+ #
+ # [PROTOCOL:]HOST[%INTERFACE][:PORT][:SSL]
+ #
+ # If many are given, the first available will get the metrics.
+ #
+ # PROTOCOL = tcp, udp, or unix (only tcp and unix are supported by parent nodes)
+ # HOST = an IPv4, IPv6 IP, or a hostname, or a unix domain socket path.
+ # IPv6 IPs should be given with brackets [ip:address]
+ # INTERFACE = the network interface to use (only for IPv6)
+ # PORT = the port number or service name (/etc/services)
+ # SSL = when this word appear at the end of the destination string
+ # the Netdata will encrypt the connection with the parent.
+ #
+ # This communication is not HTTP (it cannot be proxied by web proxies).
+ destination =
+
+ # Skip Certificate verification?
+ #
+ # The netdata child is configurated to avoid invalid SSL/TLS certificate,
+ # so certificates that are self-signed or expired will stop the streaming.
+ # Case the server certificate is not valid, you can enable the use of
+ # 'bad' certificates setting the next option as 'yes'.
+ #
+ #ssl skip certificate verification = yes
+
+ # Certificate Authority Path
+ #
+ # OpenSSL has a default directory where the known certificates are stored,
+ # case it is necessary it is possible to change this rule using the variable
+ # "CApath"
+ #
+ #CApath = /etc/ssl/certs/
+
+ # Certificate Authority file
+ #
+ # When the Netdata parent has certificate, that is not recognized as valid,
+ # we can add this certificate in the list of known certificates in CApath
+ # and give for Netdata as argument.
+ #
+ #CAfile = /etc/ssl/certs/cert.pem
+
+ # The API_KEY to use (as the sender)
+ api key =
+
+ # The timeout to connect and send metrics
+ timeout seconds = 60
+
+ # If the destination line above does not specify a port, use this
+ default port = 19999
+
+ # filter the charts to be streamed
+ # netdata SIMPLE PATTERN:
+ # - space separated list of patterns (use \ to include spaces in patterns)
+ # - use * as wildcard, any number of times within each pattern
+ # - prefix a pattern with ! for a negative match (ie not stream the charts it matches)
+ # - the order of patterns is important (left to right)
+ # To send all except a few, use: !this !that * (ie append a wildcard pattern)
+ send charts matching = *
+
+ # The buffer to use for sending metrics.
+ # 1MB is good for 10-20 seconds of data, so increase this if you expect latencies.
+ # The buffer is flushed on reconnects (this will not prevent gaps at the charts).
+ buffer size bytes = 1048576
+
+ # If the connection fails, or it disconnects,
+ # retry after that many seconds.
+ reconnect delay seconds = 5
+
+ # Sync the clock of the charts for that many iterations, when starting.
+ initial clock resync iterations = 60
+
+# -----------------------------------------------------------------------------
+# 2. ON PARENT NETDATA - THE ONE THAT WILL BE RECEIVING METRICS
+
+# You can have one API key per child,
+# or the same API key for all child nodes.
+#
+# netdata searches for options in this order:
+#
+# a) parent netdata settings (netdata.conf)
+# b) [stream] section (above)
+# c) [API_KEY] section (below, settings for the API key)
+# d) [MACHINE_GUID] section (below, settings for each machine)
+#
+# You can combine the above (the more specific setting will be used).
+
+# API key authentication
+# If the key is not listed here, it will not be able to push metrics.
+
+# [API_KEY] is [YOUR-API-KEY], i.e [11111111-2222-3333-4444-555555555555]
+[API_KEY]
+ # Default settings for this API key
+
+ # You can disable the API key, by setting this to: no
+ # The default (for unknown API keys) is: no
+ enabled = no
+
+ # A list of simple patterns matching the IPs of the servers that
+ # will be pushing metrics using this API key.
+ # The metrics are received via the API port, so the same IPs
+ # should also be matched at netdata.conf [web].allow connections from
+ allow from = *
+
+ # The default history in entries, for all hosts using this API key.
+ # You can also set it per host below.
+ # If you don't set it here, the history size of the central netdata
+ # will be used.
+ default history = 3600
+
+ # The default memory mode to be used for all hosts using this API key.
+ # You can also set it per host below.
+ # If you don't set it here, the memory mode of netdata.conf will be used.
+ # Valid modes:
+ # save save on exit, load on start
+ # map like swap (continuously syncing to disks - you need SSD)
+ # ram keep it in RAM, don't touch the disk
+ # none no database at all (use this on headless proxies)
+ # dbengine like a traditional database
+ default memory mode = ram
+
+ # Shall we enable health monitoring for the hosts using this API key?
+ # 3 possible values:
+ # yes enable alarms
+ # no do not enable alarms
+ # auto enable alarms, only when the sending netdata is connected. For ephemeral child nodes or child system restarts,
+ # ensure that the netdata process on the child is gracefully stopped, to prevent invalid last_collected alarms
+ # You can also set it per host, below.
+ # The default is taken from [health].enabled of netdata.conf
+ health enabled by default = auto
+
+ # postpone alarms for a short period after the sender is connected
+ default postpone alarms on connect seconds = 60
+
+ # need to route metrics differently? set these.
+ # the defaults are the ones at the [stream] section (above)
+ #default proxy enabled = yes | no
+ #default proxy destination = IP:PORT IP:PORT ...
+ #default proxy api key = API_KEY
+ #default proxy send charts matching = *
+
+
+# -----------------------------------------------------------------------------
+# 3. PER SENDING HOST SETTINGS, ON PARENT NETDATA
+# THIS IS OPTIONAL - YOU DON'T HAVE TO CONFIGURE IT
+
+# This section exists to give you finer control of the parent settings for each
+# child host, when the same API key is used by many netdata child nodes / proxies.
+#
+# Each netdata has a unique GUID - generated the first time netdata starts.
+# You can find it at /var/lib/netdata/registry/netdata.public.unique.id
+# (at the child).
+#
+# The host sending data will have one. If the host is not ephemeral,
+# you can give settings for each sending host here.
+
+[MACHINE_GUID]
+ # enable this host: yes | no
+ # When disabled, the parent will not receive metrics for this host.
+ # THIS IS NOT A SECURITY MECHANISM - AN ATTACKER CAN SET ANY OTHER GUID.
+ # Use only the API key for security.
+ enabled = no
+
+ # A list of simple patterns matching the IPs of the servers that
+ # will be pushing metrics using this MACHINE GUID.
+ # The metrics are received via the API port, so the same IPs
+ # should also be matched at netdata.conf [web].allow connections from
+ # and at stream.conf [API_KEY].allow from
+ allow from = *
+
+ # The number of entries in the database
+ history = 3600
+
+ # The memory mode of the database: save | map | ram | none | dbengine
+ memory mode = save
+
+ # Health / alarms control: yes | no | auto
+ health enabled = yes
+
+ # postpone alarms when the sender connects
+ postpone alarms on connect seconds = 60
+
+ # need to route metrics differently?
+ # the defaults are the ones at the [API KEY] section
+ #proxy enabled = yes | no
+ #proxy destination = IP:PORT IP:PORT ...
+ #proxy api key = API_KEY
+ #proxy send charts matching = *
diff --git a/system/Makefile.am b/system/Makefile.am
new file mode 100644
index 0000000..5323738
--- /dev/null
+++ b/system/Makefile.am
@@ -0,0 +1,57 @@
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ edit-config \
+ netdata-openrc \
+ netdata.logrotate \
+ netdata.service \
+ netdata.service.v235 \
+ netdata-init-d \
+ netdata-lsb \
+ netdata-freebsd \
+ netdata.plist \
+ netdata.crontab \
+ netdata-updater.service \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_config_SCRIPTS = \
+ edit-config \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(configdir)
+
+nodist_noinst_DATA = \
+ netdata-openrc \
+ netdata.logrotate \
+ netdata.service \
+ netdata.service.v235 \
+ netdata-init-d \
+ netdata-lsb \
+ netdata-freebsd \
+ netdata.plist \
+ netdata.crontab \
+ netdata-updater.service \
+ $(NULL)
+
+dist_noinst_DATA = \
+ edit-config.in \
+ netdata-openrc.in \
+ netdata.logrotate.in \
+ netdata.service.in \
+ netdata.service.v235.in \
+ netdata-init-d.in \
+ netdata-lsb.in \
+ netdata-freebsd.in \
+ netdata.plist.in \
+ netdata.conf \
+ netdata.crontab.in \
+ netdata-updater.service.in \
+ netdata-updater.timer \
+ $(NULL)
diff --git a/system/edit-config.in b/system/edit-config.in
new file mode 100755
index 0000000..050d97c
--- /dev/null
+++ b/system/edit-config.in
@@ -0,0 +1,83 @@
+#!/usr/bin/env sh
+
+[ -f /etc/profile ] && . /etc/profile
+
+file="${1}"
+
+if [ "$(command -v editor)" ]; then
+ EDITOR="${EDITOR-editor}"
+else
+ EDITOR="${EDITOR-vi}"
+fi
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+if [ -z "${file}" ]; then
+ cat << EOF
+
+USAGE:
+ ${0} FILENAME
+
+ Copy and edit the stock config file named: FILENAME
+ if FILENAME is already copied, it will be edited as-is.
+
+ The EDITOR shell variable is used to define the editor to be used.
+
+ Stock config files at: '${NETDATA_STOCK_CONFIG_DIR}'
+ User config files at: '${NETDATA_USER_CONFIG_DIR}'
+
+ Available files in '${NETDATA_STOCK_CONFIG_DIR}' to copy and edit:
+
+EOF
+
+ cd "${NETDATA_STOCK_CONFIG_DIR}" || exit 1
+ ls >&2 -R ./*.conf ./*/*.conf
+ exit 1
+
+fi
+
+edit() {
+ echo >&2 "Editing '${1}' ..."
+
+ # check we can edit
+ if [ ! -w "${1}" ]; then
+ echo >&2 "Cannot write to ${1}! Aborting ..."
+ exit 1
+ fi
+
+ "${EDITOR}" "${1}"
+ exit $?
+}
+
+copy_and_edit() {
+ # check we can copy
+ if [ ! -w "${NETDATA_USER_CONFIG_DIR}" ]; then
+ echo >&2 "Cannot write to ${NETDATA_USER_CONFIG_DIR}! Aborting ..."
+ exit 1
+ fi
+
+ if [ ! -f "${NETDATA_USER_CONFIG_DIR}/${1}" ]; then
+ echo >&2 "Copying '${NETDATA_STOCK_CONFIG_DIR}/${1}' to '${NETDATA_USER_CONFIG_DIR}/${1}' ... "
+ cp -p "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_USER_CONFIG_DIR}/${1}" || exit 1
+ fi
+
+ edit "${NETDATA_USER_CONFIG_DIR}/${1}"
+}
+
+# make sure it is not absolute filename
+c1="$(echo "${file}" | cut -b 1)"
+if [ "${c1}" = "/" ] || [ "${c1}" = "." ]; then
+ echo >&2 "Please don't use filenames starting with '/' or '.'"
+ exit 1
+fi
+
+# already exists
+[ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] && edit "${NETDATA_USER_CONFIG_DIR}/${file}"
+
+# stock config is valid, copy and edit
+[ -f "${NETDATA_STOCK_CONFIG_DIR}/${file}" ] && copy_and_edit "${file}"
+
+# no such config found
+echo >&2 "File '${file}' is not found in '${NETDATA_STOCK_CONFIG_DIR}'"
+exit 1
diff --git a/system/netdata-freebsd.in b/system/netdata-freebsd.in
new file mode 100644
index 0000000..2d4f457
--- /dev/null
+++ b/system/netdata-freebsd.in
@@ -0,0 +1,52 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+. /etc/rc.subr
+
+name=netdata
+rcvar=netdata_enable
+
+piddir="@localstatedir_POST@/run/netdata"
+pidfile="${piddir}/netdata.pid"
+
+command="@sbindir_POST@/netdata"
+command_args="-P ${pidfile}"
+
+required_files="@configdir_POST@/netdata.conf"
+
+start_precmd="netdata_prestart"
+stop_postcmd="netdata_poststop"
+
+extra_commands="reloadhealth savedb"
+
+reloadhealth_cmd="netdata_reloadhealth"
+savedb_cmd="netdata_savedb"
+
+netdata_prestart()
+{
+ [ ! -d "${piddir}" ] && mkdir -p "${piddir}"
+ return 0
+}
+
+netdata_poststop()
+{
+ [ -f "${pidfile}" ] && rm "${pidfile}"
+ return 0
+}
+
+netdata_reloadhealth()
+{
+ p=`cat ${pidfile}`
+ kill -USR2 ${p} && echo "Sent USR2 (reload health) to pid ${p}"
+ return 0
+}
+
+netdata_savedb()
+{
+ p=`cat ${pidfile}`
+ kill -USR2 ${p} && echo "Sent USR1 (save db) to pid ${p}"
+ return 0
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/system/netdata-init-d.in b/system/netdata-init-d.in
new file mode 100644
index 0000000..9ac5101
--- /dev/null
+++ b/system/netdata-init-d.in
@@ -0,0 +1,94 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# netdata Real-time performance monitoring, done right
+# chkconfig: 345 99 01
+# description: Netdata is a daemon that collects data in real-time (per second)
+# and presents a web site to view and analyze them. The presentation
+# is also real-time and full of interactive charts that precisely
+# render all collected values.
+# processname: netdata
+
+# Source functions
+. /etc/rc.d/init.d/functions
+
+DAEMON="netdata"
+DAEMON_PATH=@sbindir_POST@
+PIDFILE_PATH=@localstatedir_POST@/run/netdata
+PIDFILE=$PIDFILE_PATH/$DAEMON.pid
+DAEMONOPTS="-P $PIDFILE"
+STOP_TIMEOUT="60"
+
+[ -e /etc/sysconfig/$DAEMON ] && . /etc/sysconfig/$DAEMON
+
+LOCKFILE=/var/lock/subsys/$DAEMON
+
+service_start()
+{
+ [ -x $DAEMON_PATH ] || exit 5
+ [ ! -d $PIDFILE_PATH ] && mkdir -p $PIDFILE_PATH
+ echo -n "Starting $DAEMON..."
+ daemon $DAEMON_PATH/$DAEMON $DAEMONOPTS
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch $LOCKFILE
+ return $RETVAL
+}
+
+service_stop()
+{
+ printf "%-50s" "Stopping $DAEMON..."
+ killproc -p ${PIDFILE} -d ${STOP_TIMEOUT} $DAEMON
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f ${PIDFILE} ${LOCKFILE}
+ return $RETVAL
+}
+
+condrestart()
+{
+ if ! service_status > /dev/null; then
+ RETVAL=$1
+ return $RETVAL
+ fi
+
+ service_stop
+ service_start
+}
+
+service_status()
+{
+ status -p ${PIDFILE} $DAEMON_PATH/$DAEMON
+}
+
+service_status_quiet()
+{
+ status -p ${PIDFILE} $DAEMON_PATH/$DAEMON >/dev/null 2>&1
+}
+
+case "$1" in
+start)
+ service_status_quiet && exit 0
+ service_start
+;;
+stop)
+ service_status_quiet || exit 0
+ service_stop
+;;
+restart)
+ service_stop
+ service_start
+;;
+try-restart)
+ condrestart 0
+ ;;
+force-reload)
+ condrestart 7
+;;
+status)
+ service_status
+;;
+*)
+ echo "Usage: $0 {start|stop|restart|try-restart|force-reload|status}"
+ exit 3
+esac
diff --git a/system/netdata-lsb.in b/system/netdata-lsb.in
new file mode 100644
index 0000000..ca197a5
--- /dev/null
+++ b/system/netdata-lsb.in
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+#
+# Netdata LSB start script
+#
+# Copyright:
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author:
+# Costa Tsaousis <costa@netdata.cloud>
+# Pavlos Emm. Katsoulakis <paul@netdata.cloud>
+
+### BEGIN INIT INFO
+# Provides: netdata
+# Required-Start: $local_fs $remote_fs $network $named $time
+# Required-Stop: $local_fs $remote_fs $network $named $time
+# Should-Start: $local_fs $network $named $remote_fs $time $all
+# Should-Stop: $local_fs $network $named $remote_fs $time $all
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Start and stop the netdata real-time monitoring server daemon
+# Description: Controls the main netdata monitoring server daemon "netdata".
+# and all its plugins.
+### END INIT INFO
+#
+set -e
+set -u
+${DEBIAN_SCRIPT_DEBUG:+ set -v -x}
+
+DAEMON="netdata"
+DAEMON_PATH=@sbindir_POST@
+PIDFILE_PATH=@localstatedir_POST@/run/netdata
+PIDFILE=$PIDFILE_PATH/$DAEMON.pid
+DAEMONOPTS="-P $PIDFILE"
+
+test -x $DAEMON_PATH/$DAEMON || exit 0
+
+. /lib/lsb/init-functions
+
+# Safeguard (relative paths, core dumps..)
+cd /
+umask 022
+
+service_start() {
+ if [ ! -d $PIDFILE_PATH ]; then
+ mkdir -p $PIDFILE_PATH
+ fi
+
+ log_daemon_msg "Starting real-time performance monitoring" "netdata"
+ start_daemon -p $PIDFILE $DAEMON_PATH/$DAEMON $DAEMONOPTS
+ RETVAL=$?
+ log_end_msg $RETVAL
+ return $RETVAL
+}
+
+service_stop() {
+ log_daemon_msg "Stopping real-time performance monitoring" "netdata"
+ killproc -p ${PIDFILE} $DAEMON_PATH/$DAEMON
+ RETVAL=$?
+ log_end_msg $RETVAL
+
+ if [ $RETVAL -eq 0 ]; then
+ rm -f ${PIDFILE}
+ fi
+ return $RETVAL
+}
+
+condrestart() {
+ if ! service_status > /dev/null; then
+ RETVAL=$1
+ return
+ fi
+
+ service_stop
+ service_start
+}
+
+service_status() {
+ status_of_proc -p $PIDFILE $DAEMON_PATH/$DAEMON netdata
+}
+
+
+#
+# main()
+#
+
+case "${1:-''}" in
+ 'start')
+ service_start
+ ;;
+
+ 'stop')
+ service_stop
+ ;;
+
+ 'restart')
+ service_stop
+ service_start
+ ;;
+
+ 'try-restart')
+ condrestart 0
+ ;;
+
+ 'force-reload')
+ condrestart 7
+ ;;
+
+ 'status')
+ service_status && exit 0 || exit $?
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|try-restart|force-reload|status}"
+ exit 1
+esac
diff --git a/system/netdata-openrc.in b/system/netdata-openrc.in
new file mode 100644
index 0000000..2acf282
--- /dev/null
+++ b/system/netdata-openrc.in
@@ -0,0 +1,60 @@
+#!/sbin/openrc-run
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The user netdata is configured to run as.
+# If you edit its configuration file to set a different
+# user, set it here too, to have its files switch ownership
+: "${NETDATA_OWNER:=netdata:netdata}"
+
+# The timeout in seconds to wait for netdata
+# to save its database on disk and exit.
+: "${NETDATA_WAIT_EXIT_TIMEOUT:=60}"
+
+# When set to 1, if netdata does not exit in
+# NETDATA_WAIT_EXIT_TIMEOUT, we will force it
+# to exit.
+: "${NETDATA_FORCE_EXIT:=0}"
+
+# Netdata will use these services, only if they
+# are enabled to start.
+: "${NETDATA_START_AFTER_SERVICES:=apache2 squid nginx mysql named opensips upsd hostapd postfix lm_sensors}"
+
+extra_started_commands="reload rotate save"
+pidfile="@localstatedir_POST@/run/netdata/netdata.pid"
+command="@sbindir_POST@/netdata"
+command_args="-P ${pidfile} ${NETDATA_EXTRA_ARGS}"
+start_stop_daemon_args="-u ${NETDATA_OWNER}"
+required_files="/etc/netdata/netdata.conf"
+if [ "${NETDATA_FORCE_EXIT}" -eq 1 ]; then
+ retry="TERM/${NETDATA_WAIT_EXIT_TIMEOUT}/KILL/1"
+else
+ retry="TERM/${NETDATA_WAIT_EXIT_TIMEOUT}"
+fi
+
+depend() {
+ use logger
+ need net
+ after ${NETDATA_START_AFTER_SERVICES}
+}
+
+start_pre() {
+ checkpath -o ${NETDATA_OWNER} -d @localstatedir_POST@/cache/netdata @localstatedir_POST@/run/netdata
+}
+
+reload() {
+ ebegin "Reloading Netdata"
+ start-stop-daemon --signal SIGUSR2 --pidfile "${pidfile}"
+ eend $? "Failed to reload Netdata"
+}
+
+rotate() {
+ ebegin "Logrotating Netdata"
+ start-stop-daemon --signal SIGHUP --pidfile "${pidfile}"
+ eend $? "Failed to logrotate Netdata"
+}
+
+save() {
+ ebegin "Saving Netdata database"
+ start-stop-daemon --signal SIGUSR1 --pidfile "${pidfile}"
+ eend $? "Failed to save Netdata database"
+}
diff --git a/system/netdata-updater.service.in b/system/netdata-updater.service.in
new file mode 100644
index 0000000..d0bd499
--- /dev/null
+++ b/system/netdata-updater.service.in
@@ -0,0 +1,8 @@
+[Unit]
+Description=Daily auto-updates for Netdata
+RefuseManualStart=no
+RefuseManualStop=yes
+
+[Service]
+Type=oneshot
+ExecStart=@pkglibexecdir_POST@/netdata-updater.sh
diff --git a/system/netdata-updater.timer b/system/netdata-updater.timer
new file mode 100644
index 0000000..8b36e46
--- /dev/null
+++ b/system/netdata-updater.timer
@@ -0,0 +1,12 @@
+[Unit]
+Description=Daily auto-updates for Netdata
+RefuseManualStart=no
+RefuseManualStop=no
+
+[Timer]
+Persistent=false
+OnCalendar=daily
+Unit=netdata-updater.service
+
+[Install]
+WantedBy=timers.target
diff --git a/system/netdata.conf b/system/netdata.conf
new file mode 100644
index 0000000..6341fca
--- /dev/null
+++ b/system/netdata.conf
@@ -0,0 +1,28 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+[global]
+ run as user = netdata
+
+ # the default database size - 1 hour
+ history = 3600
+
+ # some defaults to run netdata with least priority
+ process scheduling policy = idle
+ OOM score = 1000
+
+[web]
+ web files owner = root
+ web files group = netdata
+
+ # by default do not expose the netdata port
+ bind to = localhost
diff --git a/system/netdata.crontab.in b/system/netdata.crontab.in
new file mode 100644
index 0000000..8f0527e
--- /dev/null
+++ b/system/netdata.crontab.in
@@ -0,0 +1 @@
+2 57 * * * root @pkglibexecdir_POST@/netdata-updater.sh
diff --git a/system/netdata.logrotate.in b/system/netdata.logrotate.in
new file mode 100644
index 0000000..a766b6c
--- /dev/null
+++ b/system/netdata.logrotate.in
@@ -0,0 +1,12 @@
+@localstatedir_POST@/log/netdata/*.log {
+ daily
+ missingok
+ rotate 14
+ compress
+ delaycompress
+ notifempty
+ sharedscripts
+ postrotate
+ /bin/kill -HUP `cat @localstatedir_POST@/run/netdata/netdata.pid 2>/dev/null` 2>/dev/null || true
+ endscript
+}
diff --git a/system/netdata.plist.in b/system/netdata.plist.in
new file mode 100644
index 0000000..a969b31
--- /dev/null
+++ b/system/netdata.plist.in
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<plist version="1.0">
+<dict>
+ <key>Label</key>
+ <string>com.github.netdata</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>@sbindir_POST@/netdata</string>
+ </array>
+ <key>RunAtLoad</key>
+ <true/>
+</dict>
+</plist>
diff --git a/system/netdata.service.in b/system/netdata.service.in
new file mode 100644
index 0000000..1947b15
--- /dev/null
+++ b/system/netdata.service.in
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+[Unit]
+Description=Real time performance monitoring
+
+# append here other services you want netdata to wait for them to start
+After=network.target httpd.service squid.service nfs-server.service mysqld.service mysql.service named.service postfix.service chronyd.service
+
+[Service]
+Type=simple
+User=netdata
+Group=netdata
+RuntimeDirectory=netdata
+RuntimeDirectoryMode=0775
+PIDFile=@localstatedir_POST@/run/netdata/netdata.pid
+ExecStart=@sbindir_POST@/netdata -P @localstatedir_POST@/run/netdata/netdata.pid -D
+ExecStartPre=/bin/mkdir -p @localstatedir_POST@/cache/netdata
+ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/cache/netdata
+ExecStartPre=/bin/mkdir -p @localstatedir_POST@/run/netdata
+ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/run/netdata
+ExecStopPost=@pluginsdir_POST@/reset_netdata_trace.sh
+PermissionsStartOnly=true
+
+# saving a big db on slow disks may need some time
+TimeoutStopSec=150
+
+# restart netdata if it crashes
+Restart=on-failure
+RestartSec=30
+
+# The minimum netdata Out-Of-Memory (OOM) score.
+# netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
+# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
+OOMScoreAdjust=1000
+
+# Valid policies: other (the system default) | batch | idle | fifo | rr
+# To give netdata the max priority, set CPUSchedulingPolicy=rr and CPUSchedulingPriority=99
+CPUSchedulingPolicy=idle
+
+# This sets the scheduling priority (for policies: rr and fifo).
+# Priority gets values 1 (lowest) to 99 (highest).
+#CPUSchedulingPriority=1
+
+# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata (-20 highest to 19 lowest).
+#Nice=0
+
+# Capabilities
+# is required for freeipmi and slabinfo plugins
+CapabilityBoundingSet=CAP_DAC_OVERRIDE
+# is required for apps plugin
+CapabilityBoundingSet=CAP_DAC_READ_SEARCH
+# is required for freeipmi plugin
+CapabilityBoundingSet=CAP_FOWNER
+# is required for apps, perf and slabinfo plugins
+CapabilityBoundingSet=CAP_SETPCAP
+# is required for perf plugin
+CapabilityBoundingSet=CAP_SYS_ADMIN
+# is required for apps plugin
+CapabilityBoundingSet=CAP_SYS_PTRACE
+# is required for ebpf plugin
+CapabilityBoundingSet=CAP_SYS_RESOURCE
+# is required for fping app
+CapabilityBoundingSet=CAP_NET_RAW
+# is required for cgroups plugin
+CapabilityBoundingSet=CAP_SYS_CHROOT
+
+# Sandboxing
+ProtectSystem=full
+ProtectHome=read-only
+# PrivateTmp break netdatacli functionality. See - https://github.com/netdata/netdata/issues/7587
+#PrivateTmp=true
+ProtectControlGroups=true
+# We whitelist this because it's the standard location to listen on a UNIX socket.
+ReadWriteDirectories=/run/netdata
+
+[Install]
+WantedBy=multi-user.target
diff --git a/system/netdata.service.v235.in b/system/netdata.service.v235.in
new file mode 100644
index 0000000..664c583
--- /dev/null
+++ b/system/netdata.service.v235.in
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+[Unit]
+Description=Real time performance monitoring
+
+# append here other services you want netdata to wait for them to start
+After=network.target httpd.service squid.service nfs-server.service mysqld.service mysql.service named.service postfix.service chronyd.service
+
+[Service]
+Type=simple
+User=netdata
+Group=netdata
+RuntimeDirectory=netdata
+CacheDirectory=netdata
+StateDirectory=netdata
+LogsDirectory=netdata
+RuntimeDirectoryMode=0775
+StateDirectoryMode=0755
+CacheDirectoryMode=0755
+LogsDirectoryMode=2750
+EnvironmentFile=-/etc/default/netdata
+ExecStart=/usr/sbin/netdata -D $EXTRA_OPTS
+
+# saving a big db on slow disks may need some time
+TimeoutStopSec=150
+
+# restart netdata if it crashes
+Restart=on-failure
+RestartSec=30
+
+# The minimum netdata Out-Of-Memory (OOM) score.
+# netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
+# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
+OOMScoreAdjust=1000
+
+# Valid policies: other (the system default) | batch | idle | fifo | rr
+# To give netdata the max priority, set CPUSchedulingPolicy=rr and CPUSchedulingPriority=99
+CPUSchedulingPolicy=idle
+
+# This sets the scheduling priority (for policies: rr and fifo).
+# Priority gets values 1 (lowest) to 99 (highest).
+#CPUSchedulingPriority=1
+
+# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata (-20 highest to 19 lowest).
+#Nice=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..29fe7ee
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ alarm_repetition/alarm.sh \
+ template_dimension/template_dim.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ health_mgmtapi/health-cmdapi-test.sh.in \
+ acls/acl.sh.in \
+ urls/request.sh.in \
+ alarm_repetition/alarm.sh.in \
+ template_dimension/template_dim.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ alarm_repetition/alarm.sh \
+ template_dimension/template_dim.sh \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..256b482
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,148 @@
+<!--
+title: "Testing"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/tests/README.md
+-->
+
+# Testing
+
+This readme is a manual on how to get started with unit testing on javascript and nodejs
+
+Original author: BrainDoctor (github), July 2017
+
+## Installation
+
+Tested on Linux Mint 18.2 Sara (Ubuntu/debian derivative)
+
+Make sure you are the user who is developer (permissions, except sudo ofc)
+
+```sh
+sudo apt-get install nodejs npm chromium-browser
+
+cd /path/to/your/netdata
+npm install
+```
+
+That should install the necessary node modules.
+
+Other browsers work too (Chrome, Firefox). However, only the Chromium Browser 59 has been tested for headless unit testing.
+
+### Versions
+
+The commands above leave me with the following versions (July 2017):
+
+- nodejs: v4.2.6
+- npm: 3.5.2
+- chromium-browser: 59.0.3071.109
+- WebStorm (optional): 2017.1.4
+
+## Configuration
+
+### NPM
+
+The dependencies are installed in `netdata/package.json`. If you install a new NPM module, it gets added here. Future developers just need to execute `npm install` and every dep gets added automatically.
+
+### Karma
+
+Karma configuration is in `tests/web/karma.conf.js`. Documentation is provided via comments.
+
+### WebStorm
+
+If you use the JetBrains WebStorm IDE, you can integrate the karma runtime.
+
+#### for Karma (Client side testing)
+
+Headless Chromium:
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Headless Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: ChromiumHeadless
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+GUI Chromium is similar:
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: Chromium
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+You may add other browsers too (comma separated). With the "Browsers to start" field you can override any settings in karma.conf.js.
+
+Also it is recommended to install WebStorm IDE Extension/Addon to Chrome/Chromium for awesome debugging.
+
+#### for node.d plugins (nodejs)
+
+1. Run > Edit Configurations
+2. "+" > Node.js
+3. - Name: Node.d plugins
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
+ - Application parameters: --captureExceptions tests/node.d
+
+## Running
+
+### In WebStorm
+
+#### Karma
+
+Just run the configured run configurations and they produce nice test trees:
+
+![karma_run_2](https://user-images.githubusercontent.com/12159026/28277789-559149f6-6b1b-11e7-9cc7-a81d81d12c35.png)
+
+#### node.js
+
+Debugging is awesome too!
+![node_debug](https://user-images.githubusercontent.com/12159026/28277879-8beee5ee-6b1b-11e7-9356-3156956f2282.png)
+
+### From CLI
+
+#### Karma
+
+```sh
+cd /path/to/your/netdata
+
+nodejs ./node_modules/karma/bin/karma start tests/web/karma.conf.js --single-run=true --browsers=ChromiumHeadless
+```
+
+will start the karma server, start chromium in headless mode and exit.
+
+If a test fails, it produces even a stack trace:
+![karma_run_1](https://user-images.githubusercontent.com/12159026/28277754-3682bebe-6b1b-11e7-8b7e-66b23d87177d.png)
+
+#### Node.d plugins
+
+```sh
+cd /path/to/your/netdata
+
+nodejs node_modules/jasmine-node/bin/jasmine-node --captureExceptions tests/node.d
+```
+
+will run the tests in `tests/node.d` and produce a stacktrace too on error:
+![node_run](https://user-images.githubusercontent.com/12159026/28277812-65bb69b0-6b1b-11e7-8500-bcdbb3436574.png)
+
+### Coverage
+
+#### Karma
+
+A nice HTML is produced from Karma which shows which code paths were executed. It is located somewhere in `/path/to/your/netdata/coverage/`
+
+![coverage_2](https://user-images.githubusercontent.com/12159026/28277719-142146c4-6b1b-11e7-9992-3e88dee2efd2.png)
+and
+![coverage_1](https://user-images.githubusercontent.com/12159026/28277687-fa93e360-6b1a-11e7-995f-cbb4c5d012a7.png)
+
+#### Node.d
+
+Apparently, jasmine-node can produce a junit report with the `--junitreport` flag. But that output was not very useful. Maybe it's configurable?
+
+### CI
+
+The karma and node.d runners can be integrated in Travis (AFAIK), but that is outside my ability.
+
+Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/acls/acl.sh.in b/tests/acls/acl.sh.in
new file mode 100644
index 0000000..9ac404c
--- /dev/null
+++ b/tests/acls/acl.sh.in
@@ -0,0 +1,126 @@
+#!/bin/bash -x
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+BASICURL="http://127.0.0.1"
+BASICURLS="https://127.0.0.1"
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;43m'
+NOCOLOR='\033[0m'
+
+#change the previous acl file and with a new
+#and store it on a new file
+change_file(){
+ sed "s/$1/$2/g" netdata.cfg > "$4"
+}
+
+NETDATAPID=""
+
+change_ssl_file(){
+ KEYROW="ssl key = $3/key.pem"
+ CERTROW="ssl certificate = $3/cert.pem"
+ sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp
+ sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2
+ sed "s/$1/$2/g" tmp2 > "$4"
+}
+
+run_acl_tests() {
+ #Give a time for netdata start properly
+ sleep 2
+
+ curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt
+ curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt
+
+ TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$4" ]; then
+ echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4 ${NOCOLOR}"
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ kill $NETDATAPID
+ exit 1
+ elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then
+ echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP. ${NOCOLOR}"
+ return
+ fi
+
+ TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$3" ]; then
+ echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3. ${NOCOLOR}"
+ kill $NETDATAPID
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ exit 1
+ fi
+
+ echo -e "${GREEN}ACLs were applied correctly ${NOCOLOR}"
+}
+
+CONF=$(grep "bind" netdata.cfg)
+MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//')
+
+openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem
+chown "$MUSER" key.pem cert.pem
+CWD=$(pwd)
+
+if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+else
+ TOKEN="NULL"
+fi
+
+change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0"
+netdata -c "netdata.conf.test0" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1"
+netdata -c "netdata.conf.test1" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2"
+netdata -c "netdata.conf.test2" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+kill $NETDATAPID
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3"
+netdata -c "netdata.conf.test3" -D &
+NETDATAPID=$!
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+kill $NETDATAPID
+
+rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+echo "All the tests were successful ${NOCOLOR}"
diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg
new file mode 100644
index 0000000..1dcb4a5
--- /dev/null
+++ b/tests/acls/netdata.cfg
@@ -0,0 +1,20 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+[global]
+ run as user = netdata
+
+ # the default database size - 1 hour
+ history = 3600
+
+ # by default do not expose the netdata port
+ bind to = localhost
diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg
new file mode 100644
index 0000000..28e0030
--- /dev/null
+++ b/tests/acls/netdata.ssl.cfg
@@ -0,0 +1,24 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+[global]
+ run as user = netdata
+
+ # the default database size - 1 hour
+ history = 3600
+
+ # by default do not expose the netdata port
+ bind to = localhost
+
+[web]
+ ssl key =
+ ssl certificate =
diff --git a/tests/alarm_repetition/alarm.sh.in b/tests/alarm_repetition/alarm.sh.in
new file mode 100644
index 0000000..09d6aaf
--- /dev/null
+++ b/tests/alarm_repetition/alarm.sh.in
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+#The health directory to put the alarms
+HEALTHDIR="@configdir_POST@/health.d/"
+
+#output directory
+OUTDIR="workdir/"
+
+#url to do download
+QUERY="/api/v1/alarms?active"
+MURL="http://localhost:19999$QUERY"
+
+#error messages
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NOCOLOR='\033[0m'
+
+MYCDIR="$(pwd)"
+CONFFILE="$MYCDIR/netdata.conf"
+
+change_alarm_file() {
+ if [ -f "$1" ]; then
+ rm "$1"
+ fi
+
+ #copy keeping the permissions
+ cp -a "$2" "$3"
+}
+
+netdata_test_download() {
+ OPT="-e"
+ if [ "$3" == "I" ]; then
+ OPT="-v"
+ fi
+
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST="$?"
+ if [ "$TEST" -ne "0" ]; then
+ echo -e "${RED} Error to get the alarms. ${NOCOLOR}"
+ kill "$5"
+ rm "$HEALTHDIR/ram.conf"
+ exit 1
+ fi
+
+ COUNT=$(grep -w "\"last_repeat\":" "$2" | grep -c "$OPT" "\"0\"")
+ if [ "$COUNT" -eq "0" ]; then
+ echo -e "${RED} Netdata gave an unexpected result when alarm repetition is $4 ${NOCOLOR}"
+ killall "$5"
+ rm "$HEALTHDIR/ram.conf"
+ exit 1
+ fi
+
+ echo -e "${GREEN} I got the expected result ${NOCOLOR}"
+}
+
+get_the_logs() {
+ curl -v -k --create-dirs -o "$OUTDIR/$1.out" "$MURL" 2> "$OUTDIR/$1.err"
+ netdata_test_download "$OUTDIR/$1.err" "$OUTDIR/$1.out" "$2" "$3" "$4"
+}
+
+process_data() {
+ SEC=120
+ netdata -c "$CONFFILE" -D &
+ NETDATAPID=$!
+ echo -e "${NOCOLOR}Sleeping during $SEC seconds to create alarm entries"
+ sleep $SEC
+ get_the_logs "$1" "$2" "$3" "$NETDATAPID"
+ kill $NETDATAPID
+}
+
+mkdir "$OUTDIR"
+CREATEDIR="$?"
+if [ "$CREATEDIR" -ne "0" ]; then
+ echo -e "${RED}Cannot create the output directory, it already exists. The test will overwrite previous results. ${NOCOLOR}"
+fi
+
+change_alarm_file "./0" "ram_without_repetition.conf" "$HEALTHDIR/ram.conf"
+cp -a netdata.conf_without_repetition netdata.conf
+process_data "ram_without" "K" "not activated."
+rm netdata.conf
+
+change_alarm_file "$HEALTHDIR/ram.conf" "ram_with_repetition.conf" "$HEALTHDIR/ram.conf"
+cp -a netdata.conf_with_repetition netdata.conf
+process_data "ram_with" "I" "activated."
+rm netdata.conf
+
+echo -e "${GREEN} all the tests were sucessful ${NOCOLOR}"
+rm "$HEALTHDIR/ram.conf"
+rm -rf $OUTDIR
diff --git a/tests/alarm_repetition/netdata.conf_with_repetition b/tests/alarm_repetition/netdata.conf_with_repetition
new file mode 100644
index 0000000..d5d00f0
--- /dev/null
+++ b/tests/alarm_repetition/netdata.conf_with_repetition
@@ -0,0 +1,57 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+ #run as user = netdata
+
+[web]
+ #ssl key = /etc/netdata/ssl/key2048.pem
+ #ssl certificate = /etc/netdata/ssl/cert2048.pem
+ mode = static-threaded
+ # listen backlog = 4096
+ default port = 19999
+ #bind to = *=dashboard|registry|streaming|netdata.conf|badges|management *:20000=dashboard|registry|streaming|netdata.conf|badges|management^SSL=optional *:20001=dashboard|registry|streaming|netdata.conf|badges|management^SSL=force unix:/tmp/netdata/netdata.sock
+ # web files owner = netdata
+ # web files group = netdata
+ #accept a streaming request every seconds = 2
+
+[plugins]
+ proc = yes
+ diskspace = no
+ cgroups = no
+ tc = no
+ idlejitter = no
+ enable running new plugins = no
+ check for new plugins every = 60
+ go.d = no
+ node.d = no
+ charts.d = no
+ nfacct = no
+ python.d = no
+ apps = no
+ fping = no
+ cups = no
+
+[health]
+ enabled = yes
+ in memory max health log entries = 1000
+ default repeat warning = 4s
+ default repeat critical = 2s
+
+[registry]
+ enabled = yes
+ allow from = *
+
+[cloud]
+ cloud base url = https://app.netdata.cloud
diff --git a/tests/alarm_repetition/netdata.conf_without_repetition b/tests/alarm_repetition/netdata.conf_without_repetition
new file mode 100644
index 0000000..43518bd
--- /dev/null
+++ b/tests/alarm_repetition/netdata.conf_without_repetition
@@ -0,0 +1,57 @@
+# netdata configuration
+#
+# You can download the latest version of this file, using:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+# or
+# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+# You can uncomment and change any of the options below.
+# The value shown in the commented settings, is the default value.
+#
+
+# global netdata configuration
+
+[global]
+ #run as user = netdata
+
+[web]
+ #ssl key = /etc/netdata/ssl/key2048.pem
+ #ssl certificate = /etc/netdata/ssl/cert2048.pem
+ mode = static-threaded
+ # listen backlog = 4096
+ default port = 19999
+ #bind to = *=dashboard|registry|streaming|netdata.conf|badges|management *:20000=dashboard|registry|streaming|netdata.conf|badges|management^SSL=optional *:20001=dashboard|registry|streaming|netdata.conf|badges|management^SSL=force unix:/tmp/netdata/netdata.sock
+ # web files owner = netdata
+ # web files group = netdata
+ #accept a streaming request every seconds = 2
+
+[plugins]
+ proc = yes
+ diskspace = no
+ cgroups = no
+ tc = no
+ idlejitter = no
+ enable running new plugins = no
+ check for new plugins every = 60
+ go.d = no
+ node.d = no
+ charts.d = no
+ nfacct = no
+ python.d = no
+ apps = no
+ fping = no
+ cups = no
+
+[health]
+ enabled = yes
+ in memory max health log entries = 1000
+ #default repeat warning = 4s
+ #default repeat critical = 2s
+
+[registry]
+ enabled = yes
+ allow from = *
+
+[cloud]
+ cloud base url = https://app.netdata.cloud
diff --git a/tests/alarm_repetition/ram_with_repetition.conf b/tests/alarm_repetition/ram_with_repetition.conf
new file mode 100644
index 0000000..c215a71
--- /dev/null
+++ b/tests/alarm_repetition/ram_with_repetition.conf
@@ -0,0 +1,64 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: used_ram_to_ignore
+ on: system.ram
+ os: linux freebsd
+ hosts: *
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
+ alarm: ram_in_use
+ on: system.ram
+ os: linux
+ hosts: *
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 5
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM used
+ to: sysadmin #alarms
+ repeat: warning 30s critical 60s
+
+ alarm: ram_available
+ on: mem.available
+ os: linux
+ hosts: *
+ calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
+
+## FreeBSD
+alarm: ram_in_use
+ on: system.ram
+ os: freebsd
+hosts: *
+ calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive)
+units: %
+every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+delay: down 15m multiplier 1.5 max 1h
+ info: system RAM usage
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
diff --git a/tests/alarm_repetition/ram_without_repetition.conf b/tests/alarm_repetition/ram_without_repetition.conf
new file mode 100644
index 0000000..edfc492
--- /dev/null
+++ b/tests/alarm_repetition/ram_without_repetition.conf
@@ -0,0 +1,63 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ alarm: used_ram_to_ignore
+ on: system.ram
+ os: linux freebsd
+ hosts: *
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
+ alarm: ram_in_use
+ on: system.ram
+ os: linux
+ hosts: *
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 5
+ delay: down 15m multiplier 1.5 max 1h
+ info: system RAM used
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: mem.available
+ os: linux
+ hosts: *
+ calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
+
+## FreeBSD
+alarm: ram_in_use
+ on: system.ram
+ os: freebsd
+hosts: *
+ calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive)
+units: %
+every: 10s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+delay: down 15m multiplier 1.5 max 1h
+ info: system RAM usage
+ to: sysadmin #alarms
+
+ alarm: ram_available
+ on: system.ram
+ os: freebsd
+ hosts: *
+ calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers)
+ units: %
+ every: 10s
+ warn: $this < (($status >= $WARNING) ? (15) : (10))
+ crit: $this < (($status == $CRITICAL) ? (10) : ( 5))
+ delay: down 15m multiplier 1.5 max 1h
+ info: estimated amount of RAM available for userspace processes, without causing swapping
+ to: sysadmin #alarms
diff --git a/tests/api/fuzzer.py b/tests/api/fuzzer.py
new file mode 100644
index 0000000..ee12a02
--- /dev/null
+++ b/tests/api/fuzzer.py
@@ -0,0 +1,378 @@
+import argparse
+import json
+import logging
+import posixpath
+import random
+import re
+import requests
+import string
+import sys
+import urllib.parse
+
+#######################################################################################################################
+# Utilities
+
+
+def some(s):
+ return random.choice(sorted(s))
+
+
+def not_some(s):
+ test_set = random.choice([string.ascii_uppercase + string.ascii_lowercase,
+ string.digits,
+ string.digits + ".E-",
+ '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJK'
+ 'LMNOPQRSTUVWXYZ!"#$%\'()*+,-./:;<=>?@[\\]^_`{|}~ '])
+ test_len = random.choice([1, 2, 3, 37, 61, 121])
+ while True:
+ x = ''.join([random.choice(test_set) for _ in range(test_len)])
+ if x not in s:
+ return x
+
+
+def build_url(host_maybe_scheme, base_path):
+ try:
+ if '//' not in host_maybe_scheme:
+ host_maybe_scheme = '//' + host_maybe_scheme
+ url_tuple = urllib.parse.urlparse(host_maybe_scheme)
+ if base_path[0] == '/':
+ base_path = base_path[1:]
+ return url_tuple.netloc, posixpath.join(url_tuple.path, base_path)
+ except Exception as e:
+ L.error(f"Critical failure decoding arguments -> {e}")
+ sys.exit(-1)
+
+
+#######################################################################################################################
+# Data-model and processing
+
+
+class Param(object):
+ def __init__(self, name, location, kind):
+ self.location = location
+ self.kind = kind
+ self.name = name
+ self.values = set()
+
+ def dump(self):
+ print(f"{self.name} in {self.location} is {self.kind} : {{{self.values}}}")
+
+
+def does_response_fit_schema(schema_path, schema, resp):
+ '''The schema_path argument tells us where we are (globally) in the schema. The schema argument is the
+ sub-tree within the schema json that we are validating against. The resp is the json subtree from the
+ target host's response.
+
+ The basic idea is this: swagger defines a model of valid json trees. In this sense it is a formal
+ language and we can validate a given server response by checking if the language accepts a particular
+ server response. This is basically a parser, but instead of strings we are operating on languages
+ of trees.
+
+ This could probably be extended to arbitrary swagger definitions - but the amount of work increases
+ rapidly as we attempt to cover the full semantics of languages of trees defined in swagger. Instead
+ we have some special cases that describe the parts of the semantics that we've used to describe the
+ netdata API.
+
+ If we hit an error (in the schema) that prevents further checks then we return early, otherwise we
+ try to collect as many errors as possible.
+ '''
+ success = True
+ if "type" not in schema:
+ L.error(f"Cannot progress past {schema_path} -> no type specified in dictionary")
+ print(json.dumps(schema, indent=2))
+ return False
+ if schema["type"] == "object":
+ if isinstance(resp, dict) and "properties" in schema and isinstance(schema["properties"], dict):
+ L.debug(f"Validate properties against dictionary at {schema_path}")
+ for k, v in schema["properties"].items():
+ L.debug(f"Validate {k} received with {v}")
+ if v.get("required", False) and k not in resp:
+ L.error(f"Missing {k} in response at {schema_path}")
+ print(json.dumps(resp, indent=2))
+ return False
+ if k in resp:
+ if not does_response_fit_schema(posixpath.join(schema_path, k), v, resp[k]):
+ success = False
+ elif isinstance(resp, dict) and "additionalProperties" in schema \
+ and isinstance(schema["additionalProperties"], dict):
+ kv_schema = schema["additionalProperties"]
+ L.debug(f"Validate additionalProperties against every value in dictionary at {schema_path}")
+ if "type" in kv_schema and kv_schema["type"] == "object":
+ for k, v in resp.items():
+ if not does_response_fit_schema(posixpath.join(schema_path, k), kv_schema, v):
+ success = False
+ else:
+ L.error("Don't understand what the additionalProperties means (it has no type?)")
+ return False
+ else:
+ L.error(f"Can't understand schema at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ elif schema["type"] == "string":
+ if isinstance(resp, str):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "boolean":
+ if isinstance(resp, bool):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "number":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int) or isinstance(resp, float):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "integer":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "array":
+ if "items" not in schema:
+ L.error(f"Schema for array at {schema_path} does not specify items!")
+ return False
+ item_schema = schema["items"]
+ if not isinstance(resp, list):
+ L.error(f"Server did not return a list for {schema_path} (typed as array in schema)")
+ return False
+ for i, item in enumerate(resp):
+ if not does_response_fit_schema(posixpath.join(schema_path, str(i)), item_schema, item):
+ success = False
+ else:
+ L.error(f"Invalid swagger type {schema['type']} for {type(resp)} at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ return success
+
+
+class GetPath(object):
+ def __init__(self, url, spec):
+ self.url = url
+ self.req_params = {}
+ self.opt_params = {}
+ self.success = None
+ self.failures = {}
+ if 'parameters' in spec.keys():
+ for p in spec['parameters']:
+ name = p['name']
+ req = p.get('required', False)
+ target = self.req_params if req else self.opt_params
+ target[name] = Param(name, p['in'], p['type'])
+ if 'default' in p:
+ defs = p['default']
+ if isinstance(defs, list):
+ for d in defs:
+ target[name].values.add(d)
+ else:
+ target[name].values.add(defs)
+ if 'enum' in p:
+ for v in p['enum']:
+ target[name].values.add(v)
+ if req and len(target[name].values) == 0:
+ print(f"FAIL: No default values in swagger for required parameter {name} in {self.url}")
+ for code, schema in spec['responses'].items():
+ if code[0] == "2" and 'schema' in schema:
+ self.success = schema['schema']
+ elif code[0] == "2":
+ L.error(f"2xx response with no schema in {self.url}")
+ else:
+ self.failures[code] = schema
+
+ def generate_success(self, host):
+ url_args = "&".join([f"{p.name}={some(p.values)}" for p in self.req_params.values()])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, True)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+ else:
+ L.debug(f"url_filter skips {test_url}")
+
+ def generate_failure(self, host):
+ all_params = list(self.req_params.values()) + list(self.opt_params.values())
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ while bad_param in all_params:
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ all_params.append(Param(bad_param, "query", "string"))
+ url_args = "&".join([f"{p.name}={not_some(p.values)}" for p in all_params])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, False)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+
+ def validate(self, test_url, resp, expect_success):
+ try:
+ resp_json = json.loads(resp.text)
+ except json.decoder.JSONDecodeError as e:
+ L.error(f"Non-json response from {test_url}")
+ return
+ success_code = resp.status_code >= 200 and resp.status_code < 300
+ if success_code and expect_success:
+ if self.success is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), self.success, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error(f"Missing schema {test_url}")
+ elif not success_code and not expect_success:
+ schema = self.failures.get(str(resp.status_code), None)
+ if schema is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), schema, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error("Missing schema for {resp.status_code} from {test_url}")
+ else:
+ L.error(f"Received incorrect status code {resp.status_code} against {test_url}")
+
+
+def get_the_spec(url):
+ if url[:7] == "file://":
+ with open(url[7:]) as f:
+ return f.read()
+ return requests.get(url=url).text
+
+
+# Swagger paths look absolute but they are relative to the base.
+def not_absolute(path):
+ return path[1:] if path[0] == '/' else path
+
+
+def find_ref(spec, path):
+ if len(path) > 0 and path[0] == '#':
+ return find_ref(spec, path[1:])
+ if len(path) == 1:
+ return spec[path[0]]
+ return find_ref(spec[path[0]], path[1:])
+
+
+def resolve_refs(spec, spec_root=None):
+ '''Find all "$ref" keys in the swagger spec and inline their target schemas.
+
+ As with all inliners this will break if a definition recursively links to itself, but this should not
+ happen in swagger as embedding a structure inside itself would produce a record of infinite size.'''
+ if spec_root is None:
+ spec_root = spec
+ newspec = {}
+ for k, v in spec.items():
+ if k == "$ref":
+ path = v.split('/')
+ target = find_ref(spec_root, path)
+ # Unfold one level of the tree and erase the $ref if possible.
+ if isinstance(target, dict):
+ for kk, vv in resolve_refs(target, spec_root).items():
+ newspec[kk] = vv
+ else:
+ newspec[k] = target
+ elif isinstance(v, dict):
+ newspec[k] = resolve_refs(v, spec_root)
+ else:
+ newspec[k] = v
+ # This is an artifact of inline the $refs when they are inside a properties key as their children should be
+ # pushed up into the parent dictionary. They must be merged (union) rather than replace as we use this to
+ # implement polymorphism in the data-model.
+ if 'properties' in newspec and isinstance(newspec['properties'], dict) and \
+ 'properties' in newspec['properties']:
+ sub = newspec['properties']['properties']
+ del newspec['properties']['properties']
+ if 'type' in newspec['properties']:
+ del newspec['properties']['type']
+ for k, v in sub.items():
+ newspec['properties'][k] = v
+ return newspec
+
+
+#######################################################################################################################
+# Initialization
+
+random.seed(7) # Default is reproducible sequences
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--url', type=str,
+ default='https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.json',
+ help='The URL of the API definition in swagger. The default will pull the latest version '
+ 'from the main branch.')
+parser.add_argument('--host', type=str,
+ help='The URL of the target host to fuzz. The default will read the host from the swagger '
+ 'definition.')
+parser.add_argument('--reseed', action='store_true',
+ help="Pick a random seed for the PRNG. The default uses a constant seed for reproducibility.")
+parser.add_argument('--passes', action='store_true',
+ help="Log information about tests that pass")
+parser.add_argument('--detail', action='store_true',
+ help="Log information about the response/schema comparisons during each test")
+parser.add_argument('--filter', type=str,
+ default=".*",
+ help="Supply a regex used to filter the testing URLs generated")
+parser.add_argument('--tls-no-verify', action='store_true',
+ help="Disable TLS certification verification to allow connection to hosts that use"
+ "self-signed certificates")
+parser.add_argument('--dump-inlined', action='store_true',
+ help='Dump the inlined swagger spec instead of fuzzing. For "reasons".')
+
+args = parser.parse_args()
+if args.reseed:
+ random.seed()
+
+spec = json.loads(get_the_spec(args.url))
+inlined_spec = resolve_refs(spec)
+if args.dump_inlined:
+ print(json.dumps(inlined_spec, indent=2))
+ sys.exit(-1)
+
+logging.addLevelName(40, "FAIL")
+logging.addLevelName(20, "PASS")
+logging.addLevelName(10, "DETAIL")
+L = logging.getLogger()
+handler = logging.StreamHandler(sys.stdout)
+if not args.passes and not args.detail:
+ L.setLevel(logging.ERROR)
+elif args.passes and not args.detail:
+ L.setLevel(logging.INFO)
+elif args.detail:
+ L.setLevel(logging.DEBUG)
+handler.setFormatter(logging.Formatter(fmt="%(levelname)s %(message)s"))
+L.addHandler(handler)
+
+url_filter = re.compile(args.filter)
+
+if spec['swagger'] != '2.0':
+ L.error(f"Unexpected swagger version")
+ sys.exit(-1)
+L.info(f"Fuzzing {spec['info']['title']} / {spec['info']['version']}")
+
+host, base_url = build_url(args.host or spec['host'], inlined_spec['basePath'])
+
+L.info(f"Target host is {base_url}")
+paths = []
+for name, p in inlined_spec['paths'].items():
+ if 'get' in p:
+ name = not_absolute(name)
+ paths.append(GetPath(posixpath.join(base_url, name), p['get']))
+ elif 'put' in p:
+ L.error(f"Generation of PUT methods (for {name} is unimplemented")
+
+for s in inlined_spec['schemes']:
+ for p in paths:
+ resp = p.generate_success(s + "://" + host)
+ resp = p.generate_failure(s+"://"+host)
diff --git a/tests/backends/prometheus-avg-oldunits.txt b/tests/backends/prometheus-avg-oldunits.txt
new file mode 100644
index 0000000..53ee8ff
--- /dev/null
+++ b/tests/backends/prometheus-avg-oldunits.txt
@@ -0,0 +1,148 @@
+nd_apps_cpu_percent_average
+nd_apps_cpu_system_percent_average
+nd_apps_cpu_user_percent_average
+nd_apps_files_open_files_average
+nd_apps_lreads_kilobytes_persec_average
+nd_apps_lwrites_kilobytes_persec_average
+nd_apps_major_faults_page_faults_persec_average
+nd_apps_mem_MB_average
+nd_apps_minor_faults_page_faults_persec_average
+nd_apps_pipes_open_pipes_average
+nd_apps_preads_kilobytes_persec_average
+nd_apps_processes_processes_average
+nd_apps_pwrites_kilobytes_persec_average
+nd_apps_sockets_open_sockets_average
+nd_apps_swap_MB_average
+nd_apps_threads_threads_average
+nd_apps_vmem_MB_average
+nd_cpu_core_throttling_events_persec_average
+nd_cpu_cpu_percent_average
+nd_cpu_interrupts_interrupts_persec_average
+nd_cpu_softirqs_softirqs_persec_average
+nd_cpu_softnet_stat_events_persec_average
+nd_disk_avgsz_kilobytes_per_operation_average
+nd_disk_await_ms_per_operation_average
+nd_disk_backlog_milliseconds_average
+nd_disk_inodes_Inodes_average
+nd_disk_io_kilobytes_persec_average
+nd_disk_iotime_milliseconds_persec_average
+nd_disk_mops_merged_operations_persec_average
+nd_disk_ops_operations_persec_average
+nd_disk_space_GB_average
+nd_disk_svctm_ms_per_operation_average
+nd_disk_util___of_time_working_average
+nd_ip_bcast_kilobits_persec_average
+nd_ip_bcastpkts_packets_persec_average
+nd_ip_ecnpkts_packets_persec_average
+nd_ip_inerrors_packets_persec_average
+nd_ip_mcast_kilobits_persec_average
+nd_ip_mcastpkts_packets_persec_average
+nd_ip_tcp_accept_queue_packets_persec_average
+nd_ip_tcpconnaborts_connections_persec_average
+nd_ip_tcpofo_packets_persec_average
+nd_ip_tcpreorders_packets_persec_average
+nd_ipv4_errors_packets_persec_average
+nd_ipv4_icmp_errors_packets_persec_average
+nd_ipv4_icmpmsg_packets_persec_average
+nd_ipv4_icmp_packets_persec_average
+nd_ipv4_packets_packets_persec_average
+nd_ipv4_sockstat_sockets_sockets_average
+nd_ipv4_sockstat_tcp_mem_KB_average
+nd_ipv4_sockstat_tcp_sockets_sockets_average
+nd_ipv4_sockstat_udp_mem_KB_average
+nd_ipv4_sockstat_udp_sockets_sockets_average
+nd_ipv4_tcperrors_packets_persec_average
+nd_ipv4_tcphandshake_events_persec_average
+nd_ipv4_tcpopens_connections_persec_average
+nd_ipv4_tcppackets_packets_persec_average
+nd_ipv4_tcpsock_active_connections_average
+nd_ipv4_udperrors_events_persec_average
+nd_ipv4_udppackets_packets_persec_average
+nd_ipv6_ect_packets_persec_average
+nd_ipv6_errors_packets_persec_average
+nd_ipv6_icmperrors_errors_persec_average
+nd_ipv6_icmp_messages_persec_average
+nd_ipv6_icmpmldv2_reports_persec_average
+nd_ipv6_icmpneighbor_messages_persec_average
+nd_ipv6_icmprouter_messages_persec_average
+nd_ipv6_icmptypes_messages_persec_average
+nd_ipv6_mcast_kilobits_persec_average
+nd_ipv6_mcastpkts_packets_persec_average
+nd_ipv6_packets_packets_persec_average
+nd_ipv6_sockstat6_raw_sockets_sockets_average
+nd_ipv6_sockstat6_tcp_sockets_sockets_average
+nd_ipv6_sockstat6_udp_sockets_sockets_average
+nd_ipv6_udperrors_events_persec_average
+nd_ipv6_udppackets_packets_persec_average
+nd_mem_available_MB_average
+nd_mem_committed_MB_average
+nd_mem_kernel_MB_average
+nd_mem_pgfaults_page_faults_persec_average
+nd_mem_slab_MB_average
+nd_mem_transparent_hugepages_MB_average
+nd_mem_writeback_MB_average
+nd_netdata_apps_children_fix_percent_average
+nd_netdata_apps_cpu_milliseconds_persec_average
+nd_netdata_apps_fix_percent_average
+nd_netdata_apps_sizes_files_persec_average
+nd_netdata_clients_connected_clients_average
+nd_netdata_compression_ratio_percent_average
+nd_netdata_go_plugin_execution_time_ms_average
+nd_netdata_net_kilobits_persec_average
+nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
+nd_netdata_plugin_diskspace_dt_milliseconds_run_average
+nd_netdata_plugin_diskspace_milliseconds_persec_average
+nd_netdata_plugin_proc_cpu_milliseconds_persec_average
+nd_netdata_plugin_proc_modules_milliseconds_run_average
+nd_netdata_plugin_tc_cpu_milliseconds_persec_average
+nd_netdata_plugin_tc_time_milliseconds_run_average
+nd_netdata_private_charts_charts_average
+nd_netdata_pythond_runtime_ms_average
+nd_netdata_requests_requests_persec_average
+nd_netdata_response_time_milliseconds_request_average
+nd_netdata_server_cpu_milliseconds_persec_average
+nd_netdata_statsd_bytes_kilobits_persec_average
+nd_netdata_statsd_cpu_milliseconds_persec_average
+nd_netdata_statsd_events_events_persec_average
+nd_netdata_statsd_metrics_metrics_average
+nd_netdata_statsd_packets_packets_persec_average
+nd_netdata_statsd_reads_reads_persec_average
+nd_netdata_statsd_useful_metrics_metrics_average
+nd_netdata_tcp_connected_sockets_average
+nd_netdata_tcp_connects_events_average
+nd_netdata_web_cpu_milliseconds_persec_average
+nd_net_drops_drops_persec_average
+nd_net_net_kilobits_persec_average
+nd_net_packets_packets_persec_average
+nd_services_cpu_percent_average
+nd_services_mem_usage_MB_average
+nd_services_swap_usage_MB_average
+nd_services_throttle_io_ops_read_operations_persec_average
+nd_services_throttle_io_ops_write_operations_persec_average
+nd_services_throttle_io_read_kilobytes_persec_average
+nd_services_throttle_io_write_kilobytes_persec_average
+nd_system_active_processes_processes_average
+nd_system_cpu_percent_average
+nd_system_ctxt_context_switches_persec_average
+nd_system_entropy_entropy_average
+nd_system_forks_processes_persec_average
+nd_system_idlejitter_microseconds_lost_persec_average
+nd_system_interrupts_interrupts_persec_average
+nd_system_intr_interrupts_persec_average
+nd_system_io_kilobytes_persec_average
+nd_system_ipc_semaphore_arrays_arrays_average
+nd_system_ipc_semaphores_semaphores_average
+nd_system_ip_kilobits_persec_average
+nd_system_ipv6_kilobits_persec_average
+nd_system_load_load_average
+nd_system_net_kilobits_persec_average
+nd_system_pgpgio_kilobytes_persec_average
+nd_system_processes_processes_average
+nd_system_ram_MB_average
+nd_system_shared_memory_bytes_bytes_average
+nd_system_shared_memory_segments_segments_average
+nd_system_softirqs_softirqs_persec_average
+nd_system_softnet_stat_events_persec_average
+nd_system_swapio_kilobytes_persec_average
+nd_system_swap_MB_average
+nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-avg.txt b/tests/backends/prometheus-avg.txt
new file mode 100644
index 0000000..1aedff2
--- /dev/null
+++ b/tests/backends/prometheus-avg.txt
@@ -0,0 +1,148 @@
+nd_apps_cpu_percentage_average
+nd_apps_cpu_system_percentage_average
+nd_apps_cpu_user_percentage_average
+nd_apps_files_open_files_average
+nd_apps_lreads_KiB_persec_average
+nd_apps_lwrites_KiB_persec_average
+nd_apps_major_faults_page_faults_persec_average
+nd_apps_mem_MiB_average
+nd_apps_minor_faults_page_faults_persec_average
+nd_apps_pipes_open_pipes_average
+nd_apps_preads_KiB_persec_average
+nd_apps_processes_processes_average
+nd_apps_pwrites_KiB_persec_average
+nd_apps_sockets_open_sockets_average
+nd_apps_swap_MiB_average
+nd_apps_threads_threads_average
+nd_apps_vmem_MiB_average
+nd_cpu_core_throttling_events_persec_average
+nd_cpu_cpu_percentage_average
+nd_cpu_interrupts_interrupts_persec_average
+nd_cpu_softirqs_softirqs_persec_average
+nd_cpu_softnet_stat_events_persec_average
+nd_disk_avgsz_KiB_operation_average
+nd_disk_await_milliseconds_operation_average
+nd_disk_backlog_milliseconds_average
+nd_disk_inodes_inodes_average
+nd_disk_io_KiB_persec_average
+nd_disk_iotime_milliseconds_persec_average
+nd_disk_mops_merged_operations_persec_average
+nd_disk_ops_operations_persec_average
+nd_disk_space_GiB_average
+nd_disk_svctm_milliseconds_operation_average
+nd_disk_util___of_time_working_average
+nd_ip_bcast_kilobits_persec_average
+nd_ip_bcastpkts_packets_persec_average
+nd_ip_ecnpkts_packets_persec_average
+nd_ip_inerrors_packets_persec_average
+nd_ip_mcast_kilobits_persec_average
+nd_ip_mcastpkts_packets_persec_average
+nd_ip_tcp_accept_queue_packets_persec_average
+nd_ip_tcpconnaborts_connections_persec_average
+nd_ip_tcpofo_packets_persec_average
+nd_ip_tcpreorders_packets_persec_average
+nd_ipv4_errors_packets_persec_average
+nd_ipv4_icmp_errors_packets_persec_average
+nd_ipv4_icmpmsg_packets_persec_average
+nd_ipv4_icmp_packets_persec_average
+nd_ipv4_packets_packets_persec_average
+nd_ipv4_sockstat_sockets_sockets_average
+nd_ipv4_sockstat_tcp_mem_KiB_average
+nd_ipv4_sockstat_tcp_sockets_sockets_average
+nd_ipv4_sockstat_udp_mem_KiB_average
+nd_ipv4_sockstat_udp_sockets_sockets_average
+nd_ipv4_tcperrors_packets_persec_average
+nd_ipv4_tcphandshake_events_persec_average
+nd_ipv4_tcpopens_connections_persec_average
+nd_ipv4_tcppackets_packets_persec_average
+nd_ipv4_tcpsock_active_connections_average
+nd_ipv4_udperrors_events_persec_average
+nd_ipv4_udppackets_packets_persec_average
+nd_ipv6_ect_packets_persec_average
+nd_ipv6_errors_packets_persec_average
+nd_ipv6_icmperrors_errors_persec_average
+nd_ipv6_icmp_messages_persec_average
+nd_ipv6_icmpmldv2_reports_persec_average
+nd_ipv6_icmpneighbor_messages_persec_average
+nd_ipv6_icmprouter_messages_persec_average
+nd_ipv6_icmptypes_messages_persec_average
+nd_ipv6_mcast_kilobits_persec_average
+nd_ipv6_mcastpkts_packets_persec_average
+nd_ipv6_packets_packets_persec_average
+nd_ipv6_sockstat6_raw_sockets_sockets_average
+nd_ipv6_sockstat6_tcp_sockets_sockets_average
+nd_ipv6_sockstat6_udp_sockets_sockets_average
+nd_ipv6_udperrors_events_persec_average
+nd_ipv6_udppackets_packets_persec_average
+nd_mem_available_MiB_average
+nd_mem_committed_MiB_average
+nd_mem_kernel_MiB_average
+nd_mem_pgfaults_faults_persec_average
+nd_mem_slab_MiB_average
+nd_mem_transparent_hugepages_MiB_average
+nd_mem_writeback_MiB_average
+nd_netdata_apps_children_fix_percentage_average
+nd_netdata_apps_cpu_milliseconds_persec_average
+nd_netdata_apps_fix_percentage_average
+nd_netdata_apps_sizes_files_persec_average
+nd_netdata_clients_connected_clients_average
+nd_netdata_compression_ratio_percentage_average
+nd_netdata_go_plugin_execution_time_ms_average
+nd_netdata_net_kilobits_persec_average
+nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
+nd_netdata_plugin_diskspace_dt_milliseconds_run_average
+nd_netdata_plugin_diskspace_milliseconds_persec_average
+nd_netdata_plugin_proc_cpu_milliseconds_persec_average
+nd_netdata_plugin_proc_modules_milliseconds_run_average
+nd_netdata_plugin_tc_cpu_milliseconds_persec_average
+nd_netdata_plugin_tc_time_milliseconds_run_average
+nd_netdata_private_charts_charts_average
+nd_netdata_pythond_runtime_ms_average
+nd_netdata_requests_requests_persec_average
+nd_netdata_response_time_milliseconds_request_average
+nd_netdata_server_cpu_milliseconds_persec_average
+nd_netdata_statsd_bytes_kilobits_persec_average
+nd_netdata_statsd_cpu_milliseconds_persec_average
+nd_netdata_statsd_events_events_persec_average
+nd_netdata_statsd_metrics_metrics_average
+nd_netdata_statsd_packets_packets_persec_average
+nd_netdata_statsd_reads_reads_persec_average
+nd_netdata_statsd_useful_metrics_metrics_average
+nd_netdata_tcp_connected_sockets_average
+nd_netdata_tcp_connects_events_average
+nd_netdata_web_cpu_milliseconds_persec_average
+nd_net_drops_drops_persec_average
+nd_net_net_kilobits_persec_average
+nd_net_packets_packets_persec_average
+nd_services_cpu_percentage_average
+nd_services_mem_usage_MiB_average
+nd_services_swap_usage_MiB_average
+nd_services_throttle_io_ops_read_operations_persec_average
+nd_services_throttle_io_ops_write_operations_persec_average
+nd_services_throttle_io_read_KiB_persec_average
+nd_services_throttle_io_write_KiB_persec_average
+nd_system_active_processes_processes_average
+nd_system_cpu_percentage_average
+nd_system_ctxt_context_switches_persec_average
+nd_system_entropy_entropy_average
+nd_system_forks_processes_persec_average
+nd_system_idlejitter_microseconds_lost_persec_average
+nd_system_interrupts_interrupts_persec_average
+nd_system_intr_interrupts_persec_average
+nd_system_io_KiB_persec_average
+nd_system_ipc_semaphore_arrays_arrays_average
+nd_system_ipc_semaphores_semaphores_average
+nd_system_ip_kilobits_persec_average
+nd_system_ipv6_kilobits_persec_average
+nd_system_load_load_average
+nd_system_net_kilobits_persec_average
+nd_system_pgpgio_KiB_persec_average
+nd_system_processes_processes_average
+nd_system_ram_MiB_average
+nd_system_shared_memory_bytes_bytes_average
+nd_system_shared_memory_segments_segments_average
+nd_system_softirqs_softirqs_persec_average
+nd_system_softnet_stat_events_persec_average
+nd_system_swapio_KiB_persec_average
+nd_system_swap_MiB_average
+nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-raw.txt b/tests/backends/prometheus-raw.txt
new file mode 100644
index 0000000..2ac4c2c
--- /dev/null
+++ b/tests/backends/prometheus-raw.txt
@@ -0,0 +1,156 @@
+nd_apps_cpu
+nd_apps_cpu_system
+nd_apps_cpu_user
+nd_apps_files
+nd_apps_lreads
+nd_apps_lwrites
+nd_apps_major_faults
+nd_apps_mem
+nd_apps_minor_faults
+nd_apps_pipes
+nd_apps_preads
+nd_apps_processes
+nd_apps_pwrites
+nd_apps_sockets
+nd_apps_swap
+nd_apps_threads
+nd_apps_vmem
+nd_cpu_core_throttling_total
+nd_cpu_cpu_total
+nd_cpu_interrupts_total
+nd_cpu_softirqs_total
+nd_cpu_softnet_stat_total
+nd_disk_avgsz
+nd_disk_await
+nd_disk_backlog_total
+nd_disk_inodes
+nd_disk_iotime_total
+nd_disk_io_total
+nd_disk_mops_total
+nd_disk_ops_total
+nd_disk_space
+nd_disk_svctm
+nd_disk_util_total
+nd_ip_bcastpkts_total
+nd_ip_bcast_total
+nd_ip_ecnpkts_total
+nd_ip_inerrors_total
+nd_ip_mcastpkts_total
+nd_ip_mcast_total
+nd_ip_tcp_accept_queue_total
+nd_ip_tcpconnaborts_total
+nd_ip_tcpofo_total
+nd_ip_tcpreorders_total
+nd_ipv4_errors_total
+nd_ipv4_icmp_errors_total
+nd_ipv4_icmpmsg_total
+nd_ipv4_icmp_total
+nd_ipv4_packets_total
+nd_ipv4_sockstat_sockets
+nd_ipv4_sockstat_tcp_mem
+nd_ipv4_sockstat_tcp_sockets
+nd_ipv4_sockstat_udp_mem
+nd_ipv4_sockstat_udp_sockets
+nd_ipv4_tcperrors_total
+nd_ipv4_tcphandshake_total
+nd_ipv4_tcpopens_total
+nd_ipv4_tcppackets_total
+nd_ipv4_tcpsock
+nd_ipv4_udperrors_total
+nd_ipv4_udppackets_total
+nd_ipv6_ect_total
+nd_ipv6_errors_total
+nd_ipv6_icmperrors_total
+nd_ipv6_icmpmldv2_total
+nd_ipv6_icmpneighbor_total
+nd_ipv6_icmprouter_total
+nd_ipv6_icmp_total
+nd_ipv6_icmptypes_total
+nd_ipv6_mcastpkts_total
+nd_ipv6_mcast_total
+nd_ipv6_packets_total
+nd_ipv6_sockstat6_raw_sockets
+nd_ipv6_sockstat6_tcp_sockets
+nd_ipv6_sockstat6_udp_sockets
+nd_ipv6_udperrors_total
+nd_ipv6_udppackets_total
+nd_mem_available
+nd_mem_committed
+nd_mem_kernel
+nd_mem_pgfaults_total
+nd_mem_slab
+nd_mem_transparent_hugepages
+nd_mem_writeback
+nd_netdata_apps_children_fix
+nd_netdata_apps_cpu_total
+nd_netdata_apps_fix
+nd_netdata_apps_sizes_calls_total
+nd_netdata_apps_sizes_fds
+nd_netdata_apps_sizes_filenames_total
+nd_netdata_apps_sizes_files_total
+nd_netdata_apps_sizes_inode_changes_total
+nd_netdata_apps_sizes_link_changes_total
+nd_netdata_apps_sizes_new_pids_total
+nd_netdata_apps_sizes_pids
+nd_netdata_apps_sizes_targets
+nd_netdata_clients
+nd_netdata_compression_ratio
+nd_netdata_go_plugin_execution_time
+nd_netdata_net_total
+nd_netdata_plugin_cgroups_cpu_total
+nd_netdata_plugin_diskspace_dt
+nd_netdata_plugin_diskspace_total
+nd_netdata_plugin_proc_cpu_total
+nd_netdata_plugin_proc_modules
+nd_netdata_plugin_tc_cpu_total
+nd_netdata_plugin_tc_time
+nd_netdata_private_charts
+nd_netdata_pythond_runtime
+nd_netdata_requests_total
+nd_netdata_response_time
+nd_netdata_server_cpu_total
+nd_netdata_statsd_bytes_total
+nd_netdata_statsd_cpu_total
+nd_netdata_statsd_events_total
+nd_netdata_statsd_metrics
+nd_netdata_statsd_packets_total
+nd_netdata_statsd_reads_total
+nd_netdata_statsd_useful_metrics
+nd_netdata_tcp_connected
+nd_netdata_tcp_connects_total
+nd_netdata_web_cpu_total
+nd_net_drops_total
+nd_net_net_total
+nd_net_packets_total
+nd_services_cpu_total
+nd_services_mem_usage
+nd_services_swap_usage
+nd_services_throttle_io_ops_read_total
+nd_services_throttle_io_ops_write_total
+nd_services_throttle_io_read_total
+nd_services_throttle_io_write_total
+nd_system_active_processes
+nd_system_cpu_total
+nd_system_ctxt_total
+nd_system_entropy
+nd_system_forks_total
+nd_system_idlejitter
+nd_system_interrupts_total
+nd_system_intr_total
+nd_system_io_total
+nd_system_ipc_semaphore_arrays
+nd_system_ipc_semaphores
+nd_system_ip_total
+nd_system_ipv6_total
+nd_system_load
+nd_system_net_total
+nd_system_pgpgio_total
+nd_system_processes
+nd_system_ram
+nd_system_shared_memory_bytes
+nd_system_shared_memory_segments
+nd_system_softirqs_total
+nd_system_softnet_stat_total
+nd_system_swap
+nd_system_swapio_total
+nd_system_uptime
diff --git a/tests/backends/prometheus.bats b/tests/backends/prometheus.bats
new file mode 100755
index 0000000..d52f39d
--- /dev/null
+++ b/tests/backends/prometheus.bats
@@ -0,0 +1,31 @@
+#!/usr/bin/env bats
+
+validate_metrics() {
+ fname="${1}"
+ params="${2}"
+
+ curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd&timestamps=no${params}" |
+ grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
+ sed -ne 's/{.*//p' | sort | uniq > tests/backends/new-${fname}
+ diff tests/backends/${fname} tests/backends/new-${fname}
+ rm tests/backends/new-${fname}
+}
+
+
+if [ ! -f .gitignore ]; then
+ echo "Need to run as ./tests/backends/$(basename "$0") from top level directory of git repository" >&2
+ exit 1
+fi
+
+
+@test "prometheus raw" {
+ validate_metrics prometheus-raw.txt "&data=raw"
+}
+
+@test "prometheus avg" {
+ validate_metrics prometheus-avg.txt ""
+}
+
+@test "prometheus avg oldunits" {
+ validate_metrics prometheus-avg-oldunits.txt "&oldunits=yes"
+}
diff --git a/tests/health_mgmtapi/README.md b/tests/health_mgmtapi/README.md
new file mode 100644
index 0000000..1a4b2b1
--- /dev/null
+++ b/tests/health_mgmtapi/README.md
@@ -0,0 +1,15 @@
+<!--
+title: "Health command API tester"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/tests/health_mgmtapi/README.md
+-->
+
+# Health command API tester
+
+The directory `tests/health_cmdapi` contains the test script `health-cmdapi-test.sh` for the [health command API](/web/api/health/README.md).
+
+The script can be executed with options to prepare the system for the tests, run them and restore the system to its previous state.
+
+It depends on the management API being accessible on localhost:19999 and on the responses to the api/v1/alarms?all requests being functional.
+It also requires read access to the management API key that is usually under `/var/lib/netdata/netdata.api.key` (`@varlibdir_POST@/netdata.api.key`).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2Fhealth_mgmtapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
new file mode 100644
index 0000000..9f05efe
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_iowait" }, { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
new file mode 100644
index 0000000..dbf8799
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "context": "system.cpu" }, { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
new file mode 100644
index 0000000..a267cfd
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "context": "system.cpu" }, { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE-list.json b/tests/health_mgmtapi/expected_list/DISABLE-list.json
new file mode 100644
index 0000000..c2c7781
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
new file mode 100644
index 0000000..bbc3f4f
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
@@ -0,0 +1 @@
+{ "all": true, "type": "DISABLE", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
new file mode 100644
index 0000000..e8aee17
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
@@ -0,0 +1 @@
+Auth Error
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
new file mode 100644
index 0000000..a7fc1cb
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "DISABLE", "silencers": [ { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
new file mode 100644
index 0000000..50119f7
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "None", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/HOSTS-list.json b/tests/health_mgmtapi/expected_list/HOSTS-list.json
new file mode 100644
index 0000000..9db21b6
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/HOSTS-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "hosts": "*" } ] }
diff --git a/tests/health_mgmtapi/expected_list/RESET-list.json b/tests/health_mgmtapi/expected_list/RESET-list.json
new file mode 100644
index 0000000..2d3f09d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/RESET-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "None", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE-list.json b/tests/health_mgmtapi/expected_list/SILENCE-list.json
new file mode 100644
index 0000000..d157f2d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
new file mode 100644
index 0000000..d5e6fa2
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
new file mode 100644
index 0000000..69e98cc
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [] } WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
new file mode 100644
index 0000000..dd789cd
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
new file mode 100644
index 0000000..d157f2d
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
@@ -0,0 +1 @@
+{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
new file mode 100644
index 0000000..c88ef9f
--- /dev/null
+++ b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
@@ -0,0 +1 @@
+{ "all": true, "type": "SILENCE", "silencers": [] }
diff --git a/tests/health_mgmtapi/health-cmdapi-test.sh.in b/tests/health_mgmtapi/health-cmdapi-test.sh.in
new file mode 100755
index 0000000..5abf2b1
--- /dev/null
+++ b/tests/health_mgmtapi/health-cmdapi-test.sh.in
@@ -0,0 +1,226 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117,SC2034,SC2059,SC2086,SC2181
+
+NETDATA_VARLIB_DIR="@varlibdir_POST@"
+
+check () {
+ sec=1
+ echo -e " ${GRAY}Check: '${1}' in $sec sec"
+ sleep $sec
+ number=$RANDOM
+ resp=$(curl -s "http://$URL/api/v1/alarms?all&$number")
+ r=$(echo "${resp}" | \
+ python3 -c "import sys, json; d=json.load(sys.stdin); \
+ print(\
+ d['alarms']['system.cpu.10min_cpu_usage']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_usage']['silenced'] , \
+ d['alarms']['system.cpu.10min_cpu_iowait']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_iowait']['silenced'], \
+ d['alarms']['system.load.load_trigger']['disabled'], \
+ d['alarms']['system.load.load_trigger']['silenced'], \
+ );" 2>&1)
+ if [ $? -ne 0 ] ; then
+ echo -e " ${RED}ERROR: Unexpected response stored in /tmp/resp-$number.json"
+ echo "$resp" > /tmp/resp-$number.json
+ err=$((err+1))
+ iter=0
+ elif [ "${r}" != "${2}" ] ; then
+ echo -e " ${GRAY}WARNING: 'Got ${r}'. Expected '${2}'"
+ iter=$((iter+1))
+ if [ $iter -lt 10 ] ; then
+ echo -e " ${GRAY}Repeating test "
+ check "$1" "$2"
+ else
+ echo -e " ${RED}ERROR: 'Got ${r}'. Expected '${2}'"
+ iter=0
+ err=$((err+1))
+ fi
+ else
+ echo -e " ${GREEN}Success"
+ iter=0
+ fi
+}
+
+cmd () {
+ echo -e "${WHITE}Cmd '${1}'"
+ echo -en " ${GRAY}Expecting '${2}' : "
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?${1}" -H "X-Auth-Token: $TOKEN" 2>&1)
+ if [ "${RESPONSE}" != "${2}" ] ; then
+ echo -e "${RED}ERROR: Response '${RESPONSE}'"
+ err=$((err+1))
+ else
+ echo -e "${GREEN}Success"
+ fi
+}
+
+check_list() {
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: $TOKEN" 2>&1)
+
+ NAME="$1-list.json"
+ echo $RESPONSE > $NAME
+ diff $NAME expected_list/$NAME 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo -e "${GREEN}Success: The list command got the correct answer for $NAME!"
+ else
+ echo -e "${RED}ERROR: the files $NAME and expected_list/$NAME does not match."
+ exit 1
+ fi
+}
+
+WHITE='\033[0;37m'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+GRAY='\033[0;37m'
+
+SETUP=0
+RESTART=0
+CLEANUP=0
+TEST=0
+URL="localhost:19999"
+
+err=0
+
+
+ HEALTH_CMDAPI_MSG_AUTHERROR="Auth Error"
+ HEALTH_CMDAPI_MSG_SILENCEALL="All alarm notifications are silenced"
+ HEALTH_CMDAPI_MSG_DISABLEALL="All health checks are disabled"
+ HEALTH_CMDAPI_MSG_RESET="All health checks and notifications are enabled"
+ HEALTH_CMDAPI_MSG_DISABLE="Health checks disabled for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_SILENCE="Alarm notifications silenced for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_ADDED="Alarm selector added"
+ HEALTH_CMDAPI_MSG_INVALID_KEY="Invalid key. Ignoring it."
+ HEALTH_CMDAPI_MSG_STYPEWARNING="WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command."
+ HEALTH_CMDAPI_MSG_NOSELECTORWARNING="WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors."
+
+ if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r CORRECT_TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+ else
+ echo "${NETDATA_VARLIB_DIR}/netdata.api.key not found"
+ exit 1
+ fi
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test default state
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test auth failure
+ TOKEN="Wrong token"
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_AUTHERROR"
+ check "Default State" "False False False False False False"
+ check_list "DISABLE_ALL_ERROR"
+
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test disable
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_DISABLEALL"
+ check "All disabled" "True False True False True False"
+ check_list "DISABLE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test silence
+ cmd "cmd=SILENCE ALL" "$HEALTH_CMDAPI_MSG_SILENCEALL"
+ check "All silenced" "False True False True False True"
+ check_list "SILENCE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Add silencer by name
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger" "${resp}"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER"
+
+ # Convert to disable health checks
+ cmd "cmd=DISABLE" "$HEALTH_CMDAPI_MSG_DISABLE"
+ check "Disable notifications for alarm1 and load_trigger" "True False False False True False"
+ check_list "DISABLE"
+
+ # Convert back to silence notifications
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE"
+
+ # Add second silencer by name
+ cmd "alarm=*10min_cpu_iowait" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications for alarm1,alarm2 and load_trigger" "False True False True False True"
+ check_list "ALARM_CPU_IOWAIT"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer by chart
+ printf -v resp "$HEALTH_CMDAPI_MSG_DISABLE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=DISABLE&chart=system.load" "${resp}"
+ check "Default State" "False False False False True False"
+ check_list "DISABLE_SYSTEM_LOAD"
+
+ # Add silencer by context
+ cmd "context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Default State" "True False True False True False"
+ check_list "CONTEXT_SYSTEM_CPU"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add second condition to a selector (AND)
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger&chart=system.load" "${resp}"
+ check "Silence notifications load_trigger" "False False False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE"
+
+ # Add second selector with two conditions
+ cmd "alarm=*10min_cpu_usage *load_trigger&context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications load_trigger" "False True False False False True"
+ check_list "ALARM_CPU_USAGE"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer without a command to disable or silence alarms
+ printf -v resp "$HEALTH_CMDAPI_MSG_ADDED\n$HEALTH_CMDAPI_MSG_STYPEWARNING"
+ cmd "families=load" "${resp}"
+ check "Family selector with no command" "False False False False False False"
+ check_list "FAMILIES_LOAD"
+
+ # Add silence command
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence family load" "False False False False False True"
+ check_list "SILENCE_2"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add command without silencers
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_NOSELECTORWARNING"
+ cmd "cmd=SILENCE" "${resp}"
+ check "Command with no selector" "False False False False False False"
+ check_list "SILENCE_3"
+
+ # Add hosts silencer
+ cmd "hosts=*" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence all hosts" "False True False True False True"
+ check_list "HOSTS"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+if [ $err -gt 0 ] ; then
+ echo "$err error(s) found"
+ exit 1
+fi
diff --git a/tests/installer/checksums.sh b/tests/installer/checksums.sh
new file mode 100755
index 0000000..3fbbfa3
--- /dev/null
+++ b/tests/installer/checksums.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#
+# Mechanism to validate kickstart files integrity status
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pawel Krupa (pawel@netdata.cloud)
+# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+# Author : Austin S. Hemmelgarn (austin@netdata.cloud)
+set -e
+
+# If we are not in netdata git repo, at the top level directory, fail
+TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel 2>/dev/null || echo "")")
+CWD="$(git rev-parse --show-cdup 2>/dev/null || echo "")"
+if [ -n "$CWD" ] || [ "${TOP_LEVEL}" != "netdata" ]; then
+ echo "Run as ./tests/installer/$(basename "$0") from top level directory of netdata git repository"
+ echo "Kickstart validation process aborted"
+ exit 1
+fi
+
+check_file() {
+ README_MD5=$(grep "$1" "$2" | grep md5sum | grep curl | cut -d '"' -f2)
+ KICKSTART_URL="https://my-netdata.io/$1"
+ KICKSTART="packaging/installer/$1"
+ KICKSTART_MD5="$(md5sum "${KICKSTART}" | cut -d' ' -f1)"
+ CALCULATED_MD5="$(curl -Ss "${KICKSTART_URL}" | md5sum | cut -d ' ' -f 1)"
+
+ # Conditionally run the website validation
+ if [ -z "${LOCAL_ONLY}" ]; then
+ echo "Validating ${KICKSTART_URL} against local file ${KICKSTART} with MD5 ${KICKSTART_MD5}.."
+ if [ "$KICKSTART_MD5" = "$CALCULATED_MD5" ]; then
+ echo "${KICKSTART_URL} looks fine"
+ else
+ echo "${KICKSTART_URL} md5sum does not match local file, it needs to be updated"
+ exit 2
+ fi
+ fi
+
+ echo "Validating documentation for $1"
+ if [ "$KICKSTART_MD5" != "$README_MD5" ]; then
+ echo "Invalid checksum for $1 in $2."
+ echo "checksum in docs: $README_MD5"
+ echo "current checksum: $KICKSTART_MD5"
+ exit 2
+ else
+ echo "$1 MD5Sum is well documented"
+ fi
+}
+
+check_file kickstart.sh packaging/installer/methods/kickstart.md
+check_file kickstart-static64.sh packaging/installer/methods/kickstart-64.md
+
+echo "No problems found, exiting succesfully!"
diff --git a/tests/installer/slack.sh b/tests/installer/slack.sh
new file mode 100755
index 0000000..3f3eff6
--- /dev/null
+++ b/tests/installer/slack.sh
@@ -0,0 +1,65 @@
+# #No shebang necessary
+# BASH Lib: Simple incoming webhook for slack integration.
+#
+# The script expects the following parameters to be defined by the upper layer:
+# SLACK_NOTIFY_WEBHOOK_URL
+# SLACK_BOT_NAME
+# SLACK_CHANNEL
+#
+# Copyright:
+#
+# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud
+
+post_message() {
+ TYPE="$1"
+ MESSAGE="$2"
+ CUSTOM_CHANNEL="$3"
+
+ case "$TYPE" in
+ "PLAIN_MESSAGE")
+ curl -X POST --data-urlencode "payload={\"channel\": \"${SLACK_CHANNEL}\", \"username\": \"${SLACK_BOT_NAME}\", \"text\": \"${MESSAGE}\", \"icon_emoji\": \":space_invader:\"}" "${SLACK_NOTIFY_WEBHOOK_URL}"
+ ;;
+ "TRAVIS_MESSAGE")
+ if [ "${TRAVIS_EVENT_TYPE}" == "pull_request" ] || [ "${TRAVIS_BRANCH}" != "master" ] ; then
+ echo "Skipping notification due to build type."
+ return 0
+ fi
+
+ if [ -n "${CUSTOM_CHANNEL}" ]; then
+ echo "Sending travis message to custom channel ${CUSTOM_CHANNEL}"
+ OPTIONAL_CHANNEL_INFO="\"channel\": \"${CUSTOM_CHANNEL}\","
+ fi
+
+ POST_MESSAGE="{
+ ${OPTIONAL_CHANNEL_INFO}
+ \"text\": \"${TRAVIS_REPO_SLUG}, ${MESSAGE}\",
+ \"attachments\": [{
+ \"text\": \"${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}' \",
+ \"fallback\": \"I could not determine the build\",
+ \"callback_id\": \"\",
+ \"color\": \"#3AA3E3\",
+ \"attachment_type\": \"default\",
+ \"actions\": [
+ {
+ \"name\": \"${TRAVIS_BUILD_NUMBER}\",
+ \"text\": \"Build #${TRAVIS_BUILD_NUMBER}\",
+ \"type\": \"button\",
+ \"url\": \"${TRAVIS_BUILD_WEB_URL}\"
+ },
+ {
+ \"name\": \"${TRAVIS_JOB_NUMBER}\",
+ \"text\": \"Job #${TRAVIS_JOB_NUMBER}\",
+ \"type\": \"button\",
+ \"url\": \"${TRAVIS_JOB_WEB_URL}\"
+ }]
+ }]
+ }"
+ echo "Sending ${POST_MESSAGE}"
+ curl -X POST --data-urlencode "payload=${POST_MESSAGE}" "${SLACK_NOTIFY_WEBHOOK_URL}"
+ ;;
+ *)
+ echo "Unrecognized message type \"$TYPE\" was given"
+ return 1
+ ;;
+ esac
+}
diff --git a/tests/k6/data.js b/tests/k6/data.js
new file mode 100644
index 0000000..fb4e087
--- /dev/null
+++ b/tests/k6/data.js
@@ -0,0 +1,67 @@
+import http from "k6/http";
+import { log, check, group, sleep } from "k6";
+import { Rate } from "k6/metrics";
+
+// A custom metric to track failure rates
+var failureRate = new Rate("check_failure_rate");
+
+// Options
+export let options = {
+ stages: [
+ // Linearly ramp up from 1 to 20 VUs during first 30s
+ { target: 20, duration: "30s" },
+ // Hold at 50 VUs for the next 1 minute
+ { target: 20, duration: "1m" },
+ // Linearly ramp down from 50 to 0 VUs over the last 10 seconds
+ { target: 0, duration: "10s" }
+ ],
+ thresholds: {
+ // We want the 95th percentile of all HTTP request durations to be less than 500ms
+ "http_req_duration": ["p(95)<500"],
+ // Requests with the fast tag should finish even faster
+ "http_req_duration{fast:yes}": ["p(99)<250"],
+ // Thresholds based on the custom metric we defined and use to track application failures
+ "check_failure_rate": [
+ // Global failure rate should be less than 1%
+ "rate<0.01",
+ // Abort the test early if it climbs over 5%
+ { threshold: "rate<=0.05", abortOnFail: true },
+ ],
+ },
+};
+
+function rnd(min, max) {
+ min = Math.ceil(min);
+ max = Math.floor(max);
+ return Math.floor(Math.random() * (max - min)) + min; //The maximum is exclusive and the minimum is inclusive
+}
+
+// Main function
+export default function () {
+ // Control what the data request asks for
+ let charts = [ "example.random" ]
+ let chartmin = 0;
+ let chartmax = charts.length - 1;
+ let aftermin = 60;
+ let aftermax = 3600;
+ let beforemin = 3503600;
+ let beforemax = 3590000;
+ let pointsmin = 300;
+ let pointsmax = 3600;
+
+ group("Requests", function () {
+ // Execute multiple requests in parallel like a browser, to fetch data for the charts. The one taking the longes is the data request.
+ let resps = http.batch([
+ ["GET", "http://localhost:19999/api/v1/info", { tags: { fast: "yes" } }],
+ ["GET", "http://localhost:19999/api/v1/charts", { tags: { fast: "yes" } }],
+ ["GET", "http://localhost:19999/api/v1/data?chart="+charts[rnd(chartmin,chartmax)]+"&before=-"+rnd(beforemin,beforemax)+"&after=-"+rnd(aftermin,aftermax)+"&points="+rnd(pointsmin,pointsmax)+"&format=json&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&_="+rnd(1,1000000000000), { }],
+ ["GET", "http://localhost:19999/api/v1/alarms", { tags: { fast: "yes" } }]
+ ]);
+ // Combine check() call with failure tracking
+ failureRate.add(!check(resps, {
+ "status is 200": (r) => r[0].status === 200 && r[1].status === 200
+ }));
+ });
+
+ sleep(Math.random() * 2 + 1); // Random sleep between 1s and 3s
+}
diff --git a/tests/lifecycle.bats b/tests/lifecycle.bats
new file mode 100755
index 0000000..728c570
--- /dev/null
+++ b/tests/lifecycle.bats
@@ -0,0 +1,61 @@
+#!/usr/bin/env bats
+#
+# Netdata installation lifecycle testing script.
+# This is to validate the install, update and uninstall of netdata
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+INSTALLATION="$BATS_TMPDIR/installation"
+ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
+# list of files which need to be checked. Path cannot start from '/'
+FILES="usr/libexec/netdata/plugins.d/go.d.plugin
+ usr/libexec/netdata/plugins.d/charts.d.plugin
+ usr/libexec/netdata/plugins.d/python.d.plugin
+ usr/libexec/netdata/plugins.d/node.d.plugin"
+
+DIRS="usr/sbin/netdata
+ etc/netdata
+ usr/share/netdata
+ usr/libexec/netdata
+ var/cache/netdata
+ var/lib/netdata
+ var/log/netdata"
+
+setup() {
+ # If we are not in netdata git repo, at the top level directory, fail
+ TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+ CWD=$(git rev-parse --show-cdup || echo "")
+ if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./tests/lifecycle/$(basename "$0") from top level directory of git repository"
+ exit 1
+ fi
+}
+
+@test "install netdata" {
+ ./netdata-installer.sh --dont-wait --dont-start-it --auto-update --install "${INSTALLATION}"
+
+ # Validate particular files
+ for file in $FILES; do
+ [ ! -f "$BATS_TMPDIR/$file" ]
+ done
+
+ # Validate particular directories
+ for a_dir in $DIRS; do
+ [ ! -d "$BATS_TMPDIR/$a_dir" ]
+ done
+}
+
+@test "update netdata" {
+ export ENVIRONMENT_FILE="${ENV}"
+ ${INSTALLATION}/netdata/usr/libexec/netdata/netdata-updater.sh --not-running-from-cron
+ ! grep "new_installation" "${ENV}"
+}
+
+@test "uninstall netdata" {
+ ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
+ [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
+ [ ! -f "/etc/cron.daily/netdata-updater" ]
+}
diff --git a/tests/node.d/fronius.chart.spec.js b/tests/node.d/fronius.chart.spec.js
new file mode 100644
index 0000000..92e88d2
--- /dev/null
+++ b/tests/node.d/fronius.chart.spec.js
@@ -0,0 +1,162 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "chart",
+ module: this
+});
+
+describe("fronius chart creation", function () {
+
+ var chartPrefix = "fronius_chart.";
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ afterAll(function () {
+ deleteProperties(subject.charts)
+ });
+
+ it("should return a basic chart dimension", function () {
+ var result = subject.createBasicDimension("id", "name", 2);
+
+ expect(result.divisor).toBe(2);
+ expect(result.id).toBe("id");
+ expect(result.algorithm).toEqual(netdata.chartAlgorithms.absolute);
+ expect(result.multiplier).toBe(1);
+ });
+
+ it("should return the power chart definition", function () {
+ var suffix = "power";
+ var result = subject.getSitePowerChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("power");
+ expect(result.context).toBe("fronius.power");
+ expect(result.dimensions[subject.powerGridId].name).toBe("grid");
+ expect(result.dimensions[subject.powerPvId].name).toBe("photovoltaics");
+ expect(result.dimensions[subject.powerAccuId].name).toBe("accumulator");
+ expect(Object.keys(result.dimensions).length).toBe(3);
+ });
+
+ it("should return the consumption chart definition", function () {
+ var suffix = "Load";
+ var result = subject.getSiteConsumptionChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("consumption");
+ expect(result.context).toBe("fronius.consumption");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.consumptionLoadId].name).toBe("load");
+ });
+
+ it("should return the autonomy chart definition", function () {
+ var suffix = "Autonomy";
+ var result = subject.getSiteAutonomyChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("%");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("autonomy");
+ expect(result.context).toBe("fronius.autonomy");
+ expect(Object.keys(result.dimensions).length).toBe(3);
+ expect(result.dimensions[subject.autonomyId].name).toBe("autonomy");
+ expect(result.dimensions[subject.consumptionSelfId].name).toBe("self_consumption");
+ });
+
+ it("should return the energy today chart definition", function () {
+ var suffix = "Energy today";
+ var result = subject.getSiteEnergyTodayChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.today");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyTodayId].name).toBe("today");
+ });
+
+ it("should return the energy year chart definition", function () {
+ var suffix = "Energy year";
+ var result = subject.getSiteEnergyYearChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.year");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyYearId].name).toBe("year");
+ });
+
+ it("should return the inverter chart definition with a single numerical inverter", function () {
+ var inverters = {
+ "1": {}
+ };
+ var suffix = "numerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions["1"].name).toBe("inverter_1");
+ });
+
+ it("should return the inverter chart definition with a single alphabetical inverter", function () {
+ var key = "Cellar";
+ var inverters = {
+ "Cellar": {}
+ };
+ var suffix = "alphabetical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[key].name).toBe(key);
+ });
+
+ it("should return the inverter chart definition with multiple alphanumerical inverter", function () {
+ var alpha = "Cellar";
+ var numerical = 1;
+ var inverters = {
+ "Cellar": {},
+ "1": {}
+ };
+ var suffix = "alphanumerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(2);
+ expect(result.dimensions[alpha].name).toBe(alpha);
+ expect(result.dimensions[numerical].name).toBe("inverter_" + numerical);
+ });
+
+ it("should return the same chart definition on second call for lazy loading", function () {
+ var first = subject.getSitePowerChart(service, "id");
+ var second = subject.getSitePowerChart(service, "id");
+
+ expect(first).toBe(second);
+ });
+});
diff --git a/tests/node.d/fronius.parse.spec.js b/tests/node.d/fronius.parse.spec.js
new file mode 100644
index 0000000..e6f308f
--- /dev/null
+++ b/tests/node.d/fronius.parse.spec.js
@@ -0,0 +1,333 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "parse",
+ module: this
+});
+
+var root = {
+ "Body": {
+ "Data": {
+ "Site": {},
+ "Inverters": {}
+ }
+ }
+};
+
+describe("fronius parsing for power chart", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 3000 for P_Grid when rounded", function () {
+ site.P_Grid = 2999.501;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(3000);
+ });
+
+ it("should return -3000 for P_Grid", function () {
+ site.P_Grid = -3000;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(-3000);
+ });
+
+ it("should return 0 for P_Grid if it is null", function () {
+ site.P_Grid = null;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Grid if it is zero", function () {
+ site.P_Grid = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return -100 for P_Akku", function () {
+ // it is unclear whether negative values are possible for p_akku (couln't test, nor any API docs found).
+ site.P_Akku = -100;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(-100);
+ });
+
+ it("should return 0 for P_Akku if it is null", function () {
+ site.P_Akku = null;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Akku if it is zero", function () {
+ site.P_Akku = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100 for P_PV", function () {
+ site.P_PV = 100;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for P_PV if it is zero", function () {
+ site.P_PV = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is null", function () {
+ site.P_PV = null;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is negative", function () {
+ // solar panels shouldn't consume anything, only produce.
+ site.P_PV = -1;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for consumption", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 1000 for P_Load when rounded", function () {
+ site.P_Load = 1000.499;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(1000);
+ });
+
+ it("should return absolute value for P_Load when negative", function () {
+ /*
+ with firmware 3.7.4 it is sometimes possible that negative values are returned for P_Load,
+ which makes absolutely no sense. There is always a device that consumes some electricity around the clock.
+ Best we can do is to make it a positive value, since 0 also doesn't make much sense.
+ This "workaround" seems to work, as there couldn't be any strange peaks observed during long-time testing.
+ */
+ site.P_Load = -50;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(50);
+ });
+
+ it("should return 0 for P_Load if it is null", function () {
+ site.P_Load = null;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Load if it is zero", function () {
+ site.P_Load = 0;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for autonomy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 100 for rel_Autonomy", function () {
+ site.rel_Autonomy = 100;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_Autonomy if it is zero", function () {
+ site.rel_Autonomy = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for rel_Autonomy if it is null", function () {
+ site.rel_Autonomy = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 20 for rel_Autonomy if it is 20", function () {
+ site.rel_Autonomy = 20.1;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 20 for rel_SelfConsumption if it is 19.5", function () {
+ site.rel_SelfConsumption = 19.5;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 100 for rel_SelfConsumption if it is null", function () {
+ /*
+ During testing it could be observed that the API is delivering null if the solar panels
+ do not produce enough energy to supply the local load. But in this case it should be 100, since all
+ the produced energy is directly consumed.
+ */
+ site.rel_SelfConsumption = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_SelfConsumption if it is zero", function () {
+ site.rel_SelfConsumption = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for solarConsumption if PV is null", function () {
+ site.P_PV = null;
+ site.P_Load = -1000;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100 for solarConsumption if Load is higher than solar power", function () {
+ site.P_PV = 500;
+ site.P_Load = -1500;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 50 for solarConsumption if Load is half than solar power", function () {
+ site.P_PV = 3000;
+ site.P_Load = -1500;
+ var result = subject.parseAutonomyChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.solarConsumptionId);
+ expect(result.value).toBe(50);
+ });
+});
+
+describe("fronius parsing for energy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 10000 for E_Day", function () {
+ site.E_Day = 10000;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(10000);
+ });
+
+ it("should return 0 for E_Day if it is negative", function () {
+ /*
+ The solar panels can't produce negative energy, really. It would be a fault of the API.
+ */
+ site.E_Day = -0.4;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100'000 for E_Year", function () {
+ site.E_Year = 100000.4;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(100000);
+ });
+
+ it("should return 0 for E_Year if it is negative", function () {
+ /*
+ A return value of 0 only makes sense in the silvester night anyway, when the counter is being reset.
+ A negative value is a fault from the API though. It wouldn't make sense.
+ */
+ site.E_Year = -1;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(0);
+ });
+});
+
+describe("fronius parsing for inverters", function () {
+
+ var inverters = root.Body.Data.Inverters;
+
+ afterEach(function () {
+ deleteProperties(inverters);
+ });
+
+ it("should return 1000 for P for inverter with name", function () {
+ inverters["cellar"] = {
+ P: 1000
+ };
+ var result = subject.parseInverterChart(service, inverters).dimensions[0];
+
+ expect(result.name).toBe("cellar");
+ expect(result.value).toBe(1000);
+ });
+
+});
diff --git a/tests/node.d/fronius.process.spec.js b/tests/node.d/fronius.process.spec.js
new file mode 100644
index 0000000..141fa8a
--- /dev/null
+++ b/tests/node.d/fronius.process.spec.js
@@ -0,0 +1,75 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "process",
+ module: this
+});
+
+var exampleResponse = {
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter",
+ "P_Grid": -3430.729923,
+ "P_Load": -910.270077,
+ "P_Akku": null,
+ "P_PV": 4341,
+ "rel_SelfConsumption": 20.969133,
+ "rel_Autonomy": 100,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710,
+ "Meter_Location": "grid"
+ },
+ "Inverters": {
+ "1": {
+ "DT": 123,
+ "P": 4341,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710
+ }
+ }
+ }
+ }
+};
+
+describe("fronius main processing", function () {
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ beforeEach(function () {
+ deleteProperties(subject.charts);
+ });
+
+ it("should send parsed values to netdata", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, exampleResponse);
+
+ expect(netdata.send.calls.count()).toBe(6);
+
+ // check if some parsed values were sent.
+ var powerChart = netdata.send.calls.argsFor(5)[0];
+
+ expect(powerChart).toContain("SET p_grid = -3431");
+ expect(powerChart).toContain("SET p_pv = 4341");
+
+ var inverterChart = netdata.send.calls.argsFor(0)[0];
+
+ expect(inverterChart).toContain("SET 1 = 4341");
+
+ var autonomyChart = netdata.send.calls.argsFor(3)[0];
+ expect(autonomyChart).toContain("SET rel_selfconsumption = 21");
+ });
+
+
+});
diff --git a/tests/node.d/fronius.validation.spec.js b/tests/node.d/fronius.validation.spec.js
new file mode 100644
index 0000000..b7938d5
--- /dev/null
+++ b/tests/node.d/fronius.validation.spec.js
@@ -0,0 +1,155 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "validation",
+ module: this
+});
+
+describe("fronius response validation", function () {
+
+ it("should do nothing if response is null", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, null);
+ var result = netdata.send.calls.count();
+
+ expect(result).toBe(0);
+ });
+
+ it("should return null if response is null", function () {
+ var result = subject.convertToJson(null);
+
+ expect(result).toBeNull();
+ });
+
+ it("should return null and log error if response cannot be parsed", function () {
+ netdata.error = jasmine.createSpy("error");
+
+ // trailing commas are enough to create syntax exceptions
+ var result = subject.convertToJson("{name,}");
+
+ expect(result).toBeNull();
+ expect(netdata.error.calls.count()).toBe(1);
+ });
+
+ it("should return true if response is valid", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter"
+ },
+ "Inverters": {
+ "1": {}
+ }
+ }
+ }
+ });
+
+ expect(result).toBeTruthy();
+ });
+
+ it("should return false if response is missing data", function () {
+ var result = subject.isResponseValid({
+ "Body": {}
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Inverters": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+});
+
+describe("fronius configuration validation", function () {
+
+ it("should return 0 if there are no servers configured", function () {
+ var result = subject.configure({});
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if the servers array is empty", function () {
+ var result = subject.configure({
+ "servers": []
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if there is one server configured incorrectly", function () {
+ var result = subject.configure({
+ "servers": [{}]
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 1 if there is one server configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name = "solar1";
+ var result = subject.configure({
+ "servers": [{
+ "name": name,
+ "api_path": "/api/",
+ "hostname": "solar1.local"
+ }]
+ });
+
+ expect(result).toBe(1);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name, "solar1.local/api/", 5);
+ });
+
+ it("should return 2 if there are two servers configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name1 = "solar 1";
+ var name2 = "solar 2";
+ var result = subject.configure({
+ "servers": [
+ {
+ "name": name1,
+ "api_path": "/",
+ "hostname": "solar1.local"
+ },
+ {
+ "name": name2,
+ "api_path": "/",
+ "hostname": "solar2.local",
+ "update_every": 3
+ }
+ ]
+ });
+
+ expect(result).toBe(2);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name1, "solar1.local/", 5);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name2, "solar2.local/", 3);
+ });
+
+});
diff --git a/tests/profile/Makefile b/tests/profile/Makefile
new file mode 100644
index 0000000..9348a48
--- /dev/null
+++ b/tests/profile/Makefile
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+COMMON_CFLAGS = -I ../../ -DTARGET_OS=1 -Wall -Wextra
+PROFILE_CFLAGS = -O1 -ggdb $(COMMON_CFLAGS)
+PERFORMANCE_CFLAGS = -O2 $(COMMON_CFLAGS)
+
+CFLAGS = $(PERFORMANCE_CFLAGS)
+
+LIBNETDATA_FILES = \
+ ../../libnetdata/popen/popen.o \
+ ../../libnetdata/storage_number/storage_number.o \
+ ../../libnetdata/avl/avl.o \
+ ../../libnetdata/socket/socket.o \
+ ../../libnetdata/os.o \
+ ../../libnetdata/clocks/clocks.o \
+ ../../libnetdata/procfile/procfile.o \
+ ../../libnetdata/statistical/statistical.o \
+ ../../libnetdata/eval/eval.o \
+ ../../libnetdata/threads/threads.o \
+ ../../libnetdata/dictionary/dictionary.o \
+ ../../libnetdata/simple_pattern/simple_pattern.o \
+ ../../libnetdata/url/url.o \
+ ../../libnetdata/config/appconfig.o \
+ ../../libnetdata/libnetdata.o \
+ ../../libnetdata/buffer/buffer.o \
+ ../../libnetdata/adaptive_resortable_list/adaptive_resortable_list.o \
+ ../../libnetdata/locks/locks.o \
+ ../../libnetdata/log/log.o \
+ $(NULL)
+
+COMMON_LDFLAGS = $(LIBNETDATA_FILES) -pthread -lm
+
+all: statsd-stress benchmark-procfile-parser test-eval benchmark-dictionary benchmark-value-pairs
+
+benchmark-procfile-parser: benchmark-procfile-parser.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+benchmark-dictionary: benchmark-dictionary.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+benchmark-value-pairs: benchmark-value-pairs.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+statsd-stress: statsd-stress.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+test-eval: test-eval.c
+ gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
+
+clean:
+ rm -f benchmark-procfile-parser statsd-stress test-eval benchmark-dictionary benchmark-value-pairs
diff --git a/tests/profile/benchmark-dictionary.c b/tests/profile/benchmark-dictionary.c
new file mode 100644
index 0000000..30c098d
--- /dev/null
+++ b/tests/profile/benchmark-dictionary.c
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+/*
+ * 1. build netdata (as normally)
+ * 2. cd tests/profile/
+ * 3. compile with:
+ * gcc -O3 -Wall -Wextra -I ../../src/ -I ../../ -o benchmark-dictionary benchmark-dictionary.c ../../src/dictionary.o ../../src/log.o ../../src/avl.o ../../src/common.o -pthread
+ *
+ */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+struct myvalue {
+ int i;
+};
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+int main(int argc, char **argv) {
+ if(argc || argv) {;}
+
+// DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_WITH_STATISTICS);
+ DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_WITH_STATISTICS);
+ if(!dict) fatal("Cannot create dictionary.");
+
+ struct rusage start, end;
+ unsigned long long dt;
+ char buf[100 + 1];
+ struct myvalue value, *v;
+ int i, max = 30000000, max2;
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Inserting %d entries in the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_set(dict, buf, &value, sizeof(struct myvalue));
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Added %d entries in %llu nanoseconds: %llu inserts per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Retrieving %d entries from the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ v = dictionary_get(dict, buf);
+ if(!v)
+ fprintf(stderr, "ERROR: cannot get value %d from the dictionary\n", i);
+ else if(v->i != i)
+ fprintf(stderr, "ERROR: expected %d but got %d\n", i, v->i);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Read %d entries in %llu nanoseconds: %llu searches per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Resetting %d entries in the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_set(dict, buf, &value, sizeof(struct myvalue));
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Reset %d entries in %llu nanoseconds: %llu resets per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Searching %d non-existing entries in the dictionary\n", max);
+ max2 = max * 2;
+ for(i = max; i < max2; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ v = dictionary_get(dict, buf);
+ if(v)
+ fprintf(stderr, "ERROR: cannot got non-existing value %d from the dictionary\n", i);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Searched %d non-existing entries in %llu nanoseconds: %llu not found searches per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Deleting %d entries from the dictionary\n", max);
+ for(i = 0; i < max; i++) {
+ value.i = i;
+ snprintf(buf, 100, "%d", i);
+
+ dictionary_del(dict, buf);
+ }
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Deleted %d entries in %llu nanoseconds: %llu deletes per second\n", max, dt, max * 1000000ULL / dt);
+ fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
+
+ // ------------------------------------------------------------------------
+
+ getrusage(RUSAGE_SELF, &start);
+ dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
+ fprintf(stderr, "Destroying dictionary\n");
+ dictionary_destroy(dict);
+ getrusage(RUSAGE_SELF, &end);
+ dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
+ fprintf(stderr, "Destroyed in %llu nanoseconds\n", dt);
+
+ return 0;
+}
diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c
new file mode 100644
index 0000000..1d47cc8
--- /dev/null
+++ b/tests/profile/benchmark-line-parsing.c
@@ -0,0 +1,702 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/time.h>
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define simple_hash(name) ({ \
+ register unsigned char *__hash_source = (unsigned char *)(name); \
+ register uint32_t __hash_value = 0x811c9dc5; \
+ while (*__hash_source) { \
+ __hash_value *= 16777619; \
+ __hash_value ^= (uint32_t) *__hash_source++; \
+ } \
+ __hash_value; \
+})
+
+static inline uint32_t simple_hash2(const char *name) {
+ register unsigned char *s = (unsigned char *)name;
+ register uint32_t hval = 0x811c9dc5;
+ while (*s) {
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ return hval;
+}
+
+static inline unsigned long long fast_strtoull(const char *s) {
+ register unsigned long long n = 0;
+ register char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ // n = (n << 1) + (n << 3) + (c - '0');
+ }
+ return n;
+}
+
+static uint32_t cache_hash = 0;
+static uint32_t rss_hash = 0;
+static uint32_t rss_huge_hash = 0;
+static uint32_t mapped_file_hash = 0;
+static uint32_t writeback_hash = 0;
+static uint32_t dirty_hash = 0;
+static uint32_t swap_hash = 0;
+static uint32_t pgpgin_hash = 0;
+static uint32_t pgpgout_hash = 0;
+static uint32_t pgfault_hash = 0;
+static uint32_t pgmajfault_hash = 0;
+static uint32_t inactive_anon_hash = 0;
+static uint32_t active_anon_hash = 0;
+static uint32_t inactive_file_hash = 0;
+static uint32_t active_file_hash = 0;
+static uint32_t unevictable_hash = 0;
+static uint32_t hierarchical_memory_limit_hash = 0;
+static uint32_t total_cache_hash = 0;
+static uint32_t total_rss_hash = 0;
+static uint32_t total_rss_huge_hash = 0;
+static uint32_t total_mapped_file_hash = 0;
+static uint32_t total_writeback_hash = 0;
+static uint32_t total_dirty_hash = 0;
+static uint32_t total_swap_hash = 0;
+static uint32_t total_pgpgin_hash = 0;
+static uint32_t total_pgpgout_hash = 0;
+static uint32_t total_pgfault_hash = 0;
+static uint32_t total_pgmajfault_hash = 0;
+static uint32_t total_inactive_anon_hash = 0;
+static uint32_t total_active_anon_hash = 0;
+static uint32_t total_inactive_file_hash = 0;
+static uint32_t total_active_file_hash = 0;
+static uint32_t total_unevictable_hash = 0;
+
+char *strings[] = {
+ "cache",
+ "rss",
+ "rss_huge",
+ "mapped_file",
+ "writeback",
+ "dirty",
+ "swap",
+ "pgpgin",
+ "pgpgout",
+ "pgfault",
+ "pgmajfault",
+ "inactive_anon",
+ "active_anon",
+ "inactive_file",
+ "active_file",
+ "unevictable",
+ "hierarchical_memory_limit",
+ "total_cache",
+ "total_rss",
+ "total_rss_huge",
+ "total_mapped_file",
+ "total_writeback",
+ "total_dirty",
+ "total_swap",
+ "total_pgpgin",
+ "total_pgpgout",
+ "total_pgfault",
+ "total_pgmajfault",
+ "total_inactive_anon",
+ "total_active_anon",
+ "total_inactive_file",
+ "total_active_file",
+ "total_unevictable",
+ NULL
+};
+
+unsigned long long values1[12] = { 0 };
+unsigned long long values2[12] = { 0 };
+unsigned long long values3[12] = { 0 };
+unsigned long long values4[12] = { 0 };
+unsigned long long values5[12] = { 0 };
+unsigned long long values6[12] = { 0 };
+
+#define NUMBER1 "12345678901234"
+#define NUMBER2 "23456789012345"
+#define NUMBER3 "34567890123456"
+#define NUMBER4 "45678901234567"
+#define NUMBER5 "56789012345678"
+#define NUMBER6 "67890123456789"
+#define NUMBER7 "78901234567890"
+#define NUMBER8 "89012345678901"
+#define NUMBER9 "90123456789012"
+#define NUMBER10 "12345678901234"
+#define NUMBER11 "23456789012345"
+
+// simple system strcmp()
+void test1() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+
+ if(unlikely(!strcmp(s, "cache")))
+ values1[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss")))
+ values1[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss_huge")))
+ values1[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "mapped_file")))
+ values1[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "writeback")))
+ values1[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "dirty")))
+ values1[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "swap")))
+ values1[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgin")))
+ values1[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgout")))
+ values1[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgfault")))
+ values1[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgmajfault")))
+ values1[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+// inline simple_hash() with system strtoull()
+void test2() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values2[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values2[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values2[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values2[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values2[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values2[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values2[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values2[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values2[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values2[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values2[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+// statement expression simple_hash(), system strtoull()
+void test3() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values3[i] = strtoull(NUMBER1, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values3[i] = strtoull(NUMBER2, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values3[i] = strtoull(NUMBER3, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values3[i] = strtoull(NUMBER4, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values3[i] = strtoull(NUMBER5, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values3[i] = strtoull(NUMBER6, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values3[i] = strtoull(NUMBER7, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values3[i] = strtoull(NUMBER8, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values3[i] = strtoull(NUMBER9, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values3[i] = strtoull(NUMBER10, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values3[i] = strtoull(NUMBER11, NULL, 10);
+ }
+}
+
+
+// inline simple_hash(), if-continue checks
+void test4() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
+ values4[i] = strtoull(NUMBER1, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
+ values4[i] = strtoull(NUMBER2, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
+ values4[i] = strtoull(NUMBER3, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
+ values4[i] = strtoull(NUMBER4, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
+ values4[i] = strtoull(NUMBER5, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
+ values4[i] = strtoull(NUMBER6, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
+ values4[i] = strtoull(NUMBER7, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
+ values4[i] = strtoull(NUMBER8, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
+ values4[i] = strtoull(NUMBER9, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
+ values4[i] = strtoull(NUMBER10, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
+ values4[i] = strtoull(NUMBER11, NULL, 0);
+ continue;
+ }
+ }
+}
+
+// inline simple_hash(), if-else-if-else-if (netdata default)
+void test5() {
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ char *s = strings[i];
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values5[i] = fast_strtoull(NUMBER1);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values5[i] = fast_strtoull(NUMBER2);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values5[i] = fast_strtoull(NUMBER3);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values5[i] = fast_strtoull(NUMBER4);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values5[i] = fast_strtoull(NUMBER5);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values5[i] = fast_strtoull(NUMBER6);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values5[i] = fast_strtoull(NUMBER7);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values5[i] = fast_strtoull(NUMBER8);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values5[i] = fast_strtoull(NUMBER9);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values5[i] = fast_strtoull(NUMBER10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values5[i] = fast_strtoull(NUMBER11);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+struct entry {
+ char *name;
+ uint32_t hash;
+ int found;
+ void (*func)(void *data1, void *data2);
+ void *data1;
+ void *data2;
+ struct entry *prev, *next;
+};
+
+struct base {
+ int iteration;
+ int registered;
+ int wanted;
+ int found;
+ struct entry *entries, *last;
+};
+
+static inline void callback(void *data1, void *data2) {
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = fast_strtoull(string);
+}
+
+static inline void callback_system_strtoull(void *data1, void *data2) {
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = strtoull(string, NULL, 10);
+}
+
+
+static inline struct base *entry(struct base *base, const char *name, void *data1, void *data2, void (*func)(void *, void *)) {
+ if(!base)
+ base = calloc(1, sizeof(struct base));
+
+ struct entry *e = malloc(sizeof(struct entry));
+ e->name = strdup(name);
+ e->hash = simple_hash2(e->name);
+ e->data1 = data1;
+ e->data2 = data2;
+ e->func = func;
+ e->prev = NULL;
+ e->next = base->entries;
+
+ if(base->entries) base->entries->prev = e;
+ else base->last = e;
+
+ base->entries = e;
+ base->registered++;
+ base->wanted = base->registered;
+
+ return base;
+}
+
+static inline int check(struct base *base, const char *s) {
+ uint32_t hash = simple_hash2(s);
+
+ if(likely(!strcmp(s, base->last->name))) {
+ base->last->found = 1;
+ base->found++;
+ if(base->last->func) base->last->func(base->last->data1, base->last->data2);
+ base->last = base->last->next;
+
+ if(!base->last)
+ base->last = base->entries;
+
+ if(base->found == base->registered)
+ return 1;
+
+ return 0;
+ }
+
+ // find it
+ struct entry *e;
+ for(e = base->entries; e ; e = e->next)
+ if(e->hash == hash && !strcmp(e->name, s))
+ break;
+
+ if(e == base->last) {
+ printf("ERROR\n");
+ exit(1);
+ }
+
+ if(e) {
+ // found
+
+ // run it
+ if(e->func) e->func(e->data1, e->data2);
+
+ // unlink it
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+
+ if(base->entries == e)
+ base->entries = e->next;
+ }
+ else {
+ // not found
+
+ // create it
+ e = calloc(1, sizeof(struct entry));
+ e->name = strdup(s);
+ e->hash = hash;
+ }
+
+ // link it here
+ e->next = base->last;
+ if(base->last) {
+ e->prev = base->last->prev;
+ base->last->prev = e;
+
+ if(base->entries == base->last)
+ base->entries = e;
+ }
+ else
+ e->prev = NULL;
+
+ if(e->prev)
+ e->prev->next = e;
+
+ base->last = e->next;
+ if(!base->last)
+ base->last = base->entries;
+
+ e->found = 1;
+ base->found++;
+
+ if(base->found == base->registered)
+ return 1;
+
+ printf("relinked '%s' after '%s' and before '%s': ", e->name, e->prev?e->prev->name:"NONE", e->next?e->next->name:"NONE");
+ for(e = base->entries; e ; e = e->next) printf("%s ", e->name);
+ printf("\n");
+
+ return 0;
+}
+
+static inline void begin(struct base *base) {
+
+ if(unlikely(base->iteration % 60) == 1) {
+ base->wanted = 0;
+ struct entry *e;
+ for(e = base->entries; e ; e = e->next)
+ if(e->found) base->wanted++;
+ }
+
+ base->iteration++;
+ base->last = base->entries;
+ base->found = 0;
+}
+
+void test6() {
+
+ static struct base *base = NULL;
+
+ if(unlikely(!base)) {
+ base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull);
+ }
+
+ begin(base);
+
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ if(check(base, strings[i]))
+ break;
+ }
+}
+
+void test7() {
+
+ static struct base *base = NULL;
+
+ if(unlikely(!base)) {
+ base = entry(base, "cache", NUMBER1, &values6[0], callback);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
+ }
+
+ begin(base);
+
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ if(check(base, strings[i]))
+ break;
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+static void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+static unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ===============
+
+static unsigned long long clk;
+
+static void begin_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return;
+ clk = tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static unsigned long long end_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return -1;
+ return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
+}
+
+void main(void)
+{
+ cache_hash = simple_hash("cache");
+ rss_hash = simple_hash("rss");
+ rss_huge_hash = simple_hash("rss_huge");
+ mapped_file_hash = simple_hash("mapped_file");
+ writeback_hash = simple_hash("writeback");
+ dirty_hash = simple_hash("dirty");
+ swap_hash = simple_hash("swap");
+ pgpgin_hash = simple_hash("pgpgin");
+ pgpgout_hash = simple_hash("pgpgout");
+ pgfault_hash = simple_hash("pgfault");
+ pgmajfault_hash = simple_hash("pgmajfault");
+ inactive_anon_hash = simple_hash("inactive_anon");
+ active_anon_hash = simple_hash("active_anon");
+ inactive_file_hash = simple_hash("inactive_file");
+ active_file_hash = simple_hash("active_file");
+ unevictable_hash = simple_hash("unevictable");
+ hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
+ total_cache_hash = simple_hash("total_cache");
+ total_rss_hash = simple_hash("total_rss");
+ total_rss_huge_hash = simple_hash("total_rss_huge");
+ total_mapped_file_hash = simple_hash("total_mapped_file");
+ total_writeback_hash = simple_hash("total_writeback");
+ total_dirty_hash = simple_hash("total_dirty");
+ total_swap_hash = simple_hash("total_swap");
+ total_pgpgin_hash = simple_hash("total_pgpgin");
+ total_pgpgout_hash = simple_hash("total_pgpgout");
+ total_pgfault_hash = simple_hash("total_pgfault");
+ total_pgmajfault_hash = simple_hash("total_pgmajfault");
+ total_inactive_anon_hash = simple_hash("total_inactive_anon");
+ total_active_anon_hash = simple_hash("total_active_anon");
+ total_inactive_file_hash = simple_hash("total_inactive_file");
+ total_active_file_hash = simple_hash("total_active_file");
+ total_unevictable_hash = simple_hash("total_unevictable");
+
+ unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7;
+ unsigned long max = 1000000;
+
+ // let the processor get up to speed
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test2();
+ c2 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test3();
+ c3 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test4();
+ c4 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test5();
+ c5 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test6();
+ c6 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test7();
+ c7 = end_clock();
+
+ for(i = 0; i < 11 ; i++)
+ printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]);
+
+ printf("\n\nRESULTS\n");
+ printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n"
+ "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n"
+ "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n"
+ "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n"
+ "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n"
+ "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n"
+ "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n"
+ , c1
+ , c2
+ , c3
+ , c4
+ , c5
+ , c6
+ , c7
+ );
+
+}
diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c
new file mode 100644
index 0000000..991e2df
--- /dev/null
+++ b/tests/profile/benchmark-procfile-parser.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+#define PF_PREFIX "PROCFILE"
+#define PFWORDS_INCREASE_STEP 200
+#define PFLINES_INCREASE_STEP 10
+#define PROCFILE_INCREMENT_BUFFER 512
+extern size_t procfile_max_lines;
+extern size_t procfile_max_words;
+extern size_t procfile_max_allocation;
+
+
+static inline void pflines_reset(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
+
+ fl->len = 0;
+}
+
+static inline void pflines_free(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
+
+ freez(fl);
+}
+
+static inline void pfwords_reset(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting words");
+ fw->len = 0;
+}
+
+
+static inline void pfwords_add(procfile *ff, char *str) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
+
+ pfwords *fw = ff->words;
+ if(unlikely(fw->len == fw->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding words");
+
+ ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
+ fw->size += PFWORDS_INCREASE_STEP;
+ }
+
+ fw->words[fw->len++] = str;
+}
+
+NEVERNULL
+static inline size_t *pflines_add(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
+
+ pflines *fl = ff->lines;
+ if(unlikely(fl->len == fl->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
+
+ ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
+ fl->size += PFLINES_INCREASE_STEP;
+ }
+
+ ffline *ffl = &fl->lines[fl->len++];
+ ffl->words = 0;
+ ffl->first = ff->words->len;
+
+ return &ffl->words;
+}
+
+
+NOINLINE
+static void procfile_parser(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
+
+ char *s = ff->data // our current position
+ , *e = &ff->data[ff->len] // the terminating null
+ , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
+
+ // the look up array to find our type of character
+ PF_CHAR_TYPE *separators = ff->separators;
+
+ char quote = 0; // the quote character - only when in quoted string
+ size_t opened = 0; // counts the number of open parenthesis
+
+ size_t *line_words = pflines_add(ff);
+
+ while(s < e) {
+ PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
+
+ // this is faster than a switch()
+ // read more here: http://lazarenko.me/switch/
+ switch(ct) {
+ case PF_CHAR_IS_SEPARATOR:
+ if(!quote && !opened) {
+ if (s != t) {
+ // separator, but we have word before it
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ }
+ t = s + 1;
+ }
+ // fallthrough
+
+ case PF_CHAR_IS_WORD:
+ s++;
+ break;
+
+
+ case PF_CHAR_IS_NEWLINE:
+ // end of line
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+
+ // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
+
+ line_words = pflines_add(ff);
+ break;
+
+ case PF_CHAR_IS_QUOTE:
+ if(unlikely(!quote && s == t)) {
+ // quote opened at the beginning
+ quote = *s;
+ t = ++s;
+ }
+ else if(unlikely(quote && quote == *s)) {
+ // quote closed
+ quote = 0;
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ break;
+
+ case PF_CHAR_IS_OPEN:
+ if(s == t) {
+ opened++;
+ t = ++s;
+ }
+ else if(opened) {
+ opened++;
+ s++;
+ }
+ else
+ s++;
+ break;
+
+ case PF_CHAR_IS_CLOSE:
+ if(opened) {
+ opened--;
+
+ if(!opened) {
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else
+ s++;
+ break;
+
+ default:
+ fatal("Internal Error: procfile_readall() does not handle all the cases.");
+ }
+ }
+
+ if(likely(s > t && t < e)) {
+ // the last word
+ if(unlikely(ff->len >= ff->size)) {
+ // we are going to loose the last byte
+ s = &ff->data[ff->size - 1];
+ }
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ // t = ++s;
+ }
+}
+
+
+procfile *procfile_readall1(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
+
+ ff->len = 0; // zero the used size
+ ssize_t r = 1; // read at least once
+ while(r > 0) {
+ ssize_t s = ff->len;
+ ssize_t x = ff->size - s;
+
+ if(unlikely(!x)) {
+ debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
+ ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
+ ff->size += PROCFILE_INCREMENT_BUFFER;
+ }
+
+ debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
+ r = read(ff->fd, &ff->data[s], ff->size - s);
+ if(unlikely(r == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ procfile_close(ff);
+ return NULL;
+ }
+
+ ff->len += r;
+ }
+
+ // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
+ if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ procfile_close(ff);
+ return NULL;
+ }
+
+ pflines_reset(ff->lines);
+ pfwords_reset(ff->words);
+ procfile_parser(ff);
+
+ if(unlikely(procfile_adaptive_initial_allocation)) {
+ if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
+ if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
+ if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
+ }
+
+ // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
+ return ff;
+}
+
+
+
+
+
+
+
+
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ==============
+
+
+unsigned long test_netdata_internal(void) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) {
+ fprintf(stderr, "Failed to open filename\n");
+ exit(1);
+ }
+
+ begin_tsc();
+ ff = procfile_readall(ff);
+ unsigned long c = end_tsc();
+
+ if(!ff) {
+ fprintf(stderr, "Failed to read filename\n");
+ exit(1);
+ }
+
+ return c;
+}
+
+unsigned long test_method1(void) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) {
+ fprintf(stderr, "Failed to open filename\n");
+ exit(1);
+ }
+
+ begin_tsc();
+ ff = procfile_readall1(ff);
+ unsigned long c = end_tsc();
+
+ if(!ff) {
+ fprintf(stderr, "Failed to read filename\n");
+ exit(1);
+ }
+
+ return c;
+}
+
+//--- Test
+int main(int argc, char **argv)
+{
+ (void)argc; (void)argv;
+
+ int i, max = 1000000;
+
+ unsigned long c1 = 0;
+ test_netdata_internal();
+ for(i = 0; i < max ; i++)
+ c1 += test_netdata_internal();
+
+ unsigned long c2 = 0;
+ test_method1();
+ for(i = 0; i < max ; i++)
+ c2 += test_method1();
+
+ printf("netdata internal: completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c1, c1 / max, (float)c1 * 100.0 / (float)c1);
+ printf("method1 : completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c2, c2 / max, (float)c2 * 100.0 / (float)c1);
+
+ return 0;
+}
diff --git a/tests/profile/benchmark-registry.c b/tests/profile/benchmark-registry.c
new file mode 100644
index 0000000..cfed6d7
--- /dev/null
+++ b/tests/profile/benchmark-registry.c
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+/*
+ * compile with
+ * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o benchmark-registry benchmark-registry.c ../src/dictionary.o ../src/log.o ../src/avl.o ../src/common.o ../src/appconfig.o ../src/web_buffer.o ../src/storage_number.o ../src/rrd.o ../src/health.o -pthread -luuid -lm -DHAVE_CONFIG_H -DVARLIB_DIR="\"/tmp\""
+ */
+
+char *hostname = "me";
+
+#include "../src/registry.c"
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+// ----------------------------------------------------------------------------
+// TESTS
+
+int test1(int argc, char **argv) {
+
+ void print_stats(uint32_t requests, unsigned long long start, unsigned long long end) {
+ fprintf(stderr, " > SPEED: %u requests served in %0.2f seconds ( >>> %llu per second <<< )\n",
+ requests, (end-start) / 1000000.0, (unsigned long long)requests * 1000000ULL / (end-start));
+
+ fprintf(stderr, " > DB : persons %llu, machines %llu, unique URLs %llu, accesses %llu, URLs: for persons %llu, for machines %llu\n",
+ registry.persons_count, registry.machines_count, registry.urls_count, registry.usages_count,
+ registry.persons_urls_count, registry.machines_urls_count);
+ }
+
+ (void) argc;
+ (void) argv;
+
+ uint32_t u, users = 1000000;
+ uint32_t m, machines = 200000;
+ uint32_t machines2 = machines * 2;
+
+ char **users_guids = malloc(users * sizeof(char *));
+ char **machines_guids = malloc(machines2 * sizeof(char *));
+ char **machines_urls = malloc(machines2 * sizeof(char *));
+ unsigned long long start;
+
+ registry_init();
+
+ fprintf(stderr, "Generating %u machine guids\n", machines2);
+ for(m = 0; m < machines2 ;m++) {
+ uuid_t uuid;
+ machines_guids[m] = malloc(36+1);
+ uuid_generate(uuid);
+ uuid_unparse(uuid, machines_guids[m]);
+
+ char buf[FILENAME_MAX + 1];
+ snprintfz(buf, FILENAME_MAX, "http://%u.netdata.rocks/", m+1);
+ machines_urls[m] = strdup(buf);
+
+ // fprintf(stderr, "\tmachine %u: '%s', url: '%s'\n", m + 1, machines_guids[m], machines_urls[m]);
+ }
+
+ start = timems();
+ fprintf(stderr, "\nGenerating %u users accessing %u machines\n", users, machines);
+ m = 0;
+ time_t now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(NULL, machines_guids[m], machines_urls[m], "test", now);
+ users_guids[u] = p->guid;
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\nAll %u users accessing again the same %u servers\n", users, machines);
+ m = 0;
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
+
+ if(p->guid != users_guids[u])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\nAll %u users accessing a new server, out of the %u servers\n", users, machines);
+ m = 1;
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ if(++m == machines) m = 0;
+
+ PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
+
+ if(p->guid != users_guids[u])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\n%u random users accessing a random server, out of the %u servers\n", users, machines);
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines / RAND_MAX;
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
+
+ if(p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ start = timems();
+ fprintf(stderr, "\n%u random users accessing a random server, out of %u servers\n", users, machines2);
+ now = time(NULL);
+ for(u = 0; u < users ; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines2 / RAND_MAX;
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
+
+ if(p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+
+ for(m = 0; m < 10; m++) {
+ start = timems();
+ fprintf(stderr,
+ "\n%u random user accesses to a random server, out of %u servers,\n > using 1/10000 with a random url, 1/1000 with a mismatched url\n",
+ users * 2, machines2);
+ now = time(NULL);
+ for (u = 0; u < users * 2; u++) {
+ uint32_t tu = random() * users / RAND_MAX;
+ uint32_t tm = random() * machines2 / RAND_MAX;
+
+ char *url = machines_urls[tm];
+ char buf[FILENAME_MAX + 1];
+ if (random() % 10000 == 1234) {
+ snprintfz(buf, FILENAME_MAX, "http://random.%ld.netdata.rocks/", random());
+ url = buf;
+ }
+ else if (random() % 1000 == 123)
+ url = machines_urls[random() * machines2 / RAND_MAX];
+
+ PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], url, "test", now);
+
+ if (p->guid != users_guids[tu])
+ fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
+ }
+ print_stats(u, start, timems());
+ }
+
+ fprintf(stderr, "\n\nSAVE\n");
+ start = timems();
+ registry_save();
+ print_stats(registry.persons_count, start, timems());
+
+ fprintf(stderr, "\n\nCLEANUP\n");
+ start = timems();
+ registry_free();
+ print_stats(registry.persons_count, start, timems());
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// TESTING
+
+int main(int argc, char **argv) {
+ config_set_boolean("registry", "enabled", 1);
+
+ //debug_flags = 0xFFFFFFFF;
+ test1(argc, argv);
+ exit(0);
+
+ (void)argc;
+ (void)argv;
+
+
+ PERSON *p1, *p2;
+
+ fprintf(stderr, "\n\nINITIALIZATION\n");
+
+ registry_init();
+
+ int i = 2;
+
+ fprintf(stderr, "\n\nADDING ENTRY\n");
+ p1 = registry_request_access("2c95abd0-1542-11e6-8c66-00508db7e9c9", "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
+
+ if(0)
+ while(i--) {
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ENTRY\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER URL\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://127.0.0.1:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER URL\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER PERSON\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p2 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
+
+#ifdef REGISTRY_STDOUT_DUMP
+ fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
+#endif /* REGISTRY_STDOUT_DUMP */
+ p2 = registry_request_access(p2->guid, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
+ }
+
+ fprintf(stderr, "\n\nSAVE\n");
+ registry_save();
+
+ fprintf(stderr, "\n\nCLEANUP\n");
+ registry_free();
+ return 0;
+}
diff --git a/tests/profile/benchmark-value-pairs.c b/tests/profile/benchmark-value-pairs.c
new file mode 100644
index 0000000..ae4f53c
--- /dev/null
+++ b/tests/profile/benchmark-value-pairs.c
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+
+#ifdef simple_hash
+#undef simple_hash
+#endif
+
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+#define simple_hash(name) ({ \
+ register unsigned char *__hash_source = (unsigned char *)(name); \
+ register uint32_t __hash_value = 0x811c9dc5; \
+ while (*__hash_source) { \
+ __hash_value *= 16777619; \
+ __hash_value ^= (uint32_t) *__hash_source++; \
+ } \
+ __hash_value; \
+})
+
+static inline uint32_t simple_hash2(const char *name) {
+ register unsigned char *s = (unsigned char *)name;
+ register uint32_t hval = 0x811c9dc5;
+ while (*s) {
+ hval *= 16777619;
+ hval ^= (uint32_t) *s++;
+ }
+ return hval;
+}
+
+static inline unsigned long long fast_strtoull(const char *s) {
+ register unsigned long long n = 0;
+ register char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ // n = (n << 1) + (n << 3) + (c - '0');
+ }
+ return n;
+}
+
+static uint32_t cache_hash = 0;
+static uint32_t rss_hash = 0;
+static uint32_t rss_huge_hash = 0;
+static uint32_t mapped_file_hash = 0;
+static uint32_t writeback_hash = 0;
+static uint32_t dirty_hash = 0;
+static uint32_t swap_hash = 0;
+static uint32_t pgpgin_hash = 0;
+static uint32_t pgpgout_hash = 0;
+static uint32_t pgfault_hash = 0;
+static uint32_t pgmajfault_hash = 0;
+static uint32_t inactive_anon_hash = 0;
+static uint32_t active_anon_hash = 0;
+static uint32_t inactive_file_hash = 0;
+static uint32_t active_file_hash = 0;
+static uint32_t unevictable_hash = 0;
+static uint32_t hierarchical_memory_limit_hash = 0;
+static uint32_t total_cache_hash = 0;
+static uint32_t total_rss_hash = 0;
+static uint32_t total_rss_huge_hash = 0;
+static uint32_t total_mapped_file_hash = 0;
+static uint32_t total_writeback_hash = 0;
+static uint32_t total_dirty_hash = 0;
+static uint32_t total_swap_hash = 0;
+static uint32_t total_pgpgin_hash = 0;
+static uint32_t total_pgpgout_hash = 0;
+static uint32_t total_pgfault_hash = 0;
+static uint32_t total_pgmajfault_hash = 0;
+static uint32_t total_inactive_anon_hash = 0;
+static uint32_t total_active_anon_hash = 0;
+static uint32_t total_inactive_file_hash = 0;
+static uint32_t total_active_file_hash = 0;
+static uint32_t total_unevictable_hash = 0;
+
+unsigned long long values1[50] = { 0 };
+unsigned long long values2[50] = { 0 };
+unsigned long long values3[50] = { 0 };
+unsigned long long values4[50] = { 0 };
+unsigned long long values5[50] = { 0 };
+unsigned long long values6[50] = { 0 };
+unsigned long long values7[50] = { 0 };
+unsigned long long values8[50] = { 0 };
+unsigned long long values9[50] = { 0 };
+
+struct pair {
+ const char *name;
+ const char *value;
+ uint32_t hash;
+ unsigned long long *collected8;
+ unsigned long long *collected9;
+} pairs[] = {
+ { "cache", "12345678901234", 0, &values8[0] ,&values9[0] },
+ { "rss", "23456789012345", 0, &values8[1] ,&values9[1] },
+ { "rss_huge", "34567890123456", 0, &values8[2] ,&values9[2] },
+ { "mapped_file", "45678901234567", 0, &values8[3] ,&values9[3] },
+ { "writeback", "56789012345678", 0, &values8[4] ,&values9[4] },
+ { "dirty", "67890123456789", 0, &values8[5] ,&values9[5] },
+ { "swap", "78901234567890", 0, &values8[6] ,&values9[6] },
+ { "pgpgin", "89012345678901", 0, &values8[7] ,&values9[7] },
+ { "pgpgout", "90123456789012", 0, &values8[8] ,&values9[8] },
+ { "pgfault", "10345678901234", 0, &values8[9] ,&values9[9] },
+ { "pgmajfault", "11456789012345", 0, &values8[10] ,&values9[10] },
+ { "inactive_anon", "12000000000000", 0, &values8[11] ,&values9[11] },
+ { "active_anon", "13345678901234", 0, &values8[12] ,&values9[12] },
+ { "inactive_file", "14345678901234", 0, &values8[13] ,&values9[13] },
+ { "active_file", "15345678901234", 0, &values8[14] ,&values9[14] },
+ { "unevictable", "16345678901234", 0, &values8[15] ,&values9[15] },
+ { "hierarchical_memory_limit", "17345678901234", 0, &values8[16] ,&values9[16] },
+ { "total_cache", "18345678901234", 0, &values8[17] ,&values9[17] },
+ { "total_rss", "19345678901234", 0, &values8[18] ,&values9[18] },
+ { "total_rss_huge", "20345678901234", 0, &values8[19] ,&values9[19] },
+ { "total_mapped_file", "21345678901234", 0, &values8[20] ,&values9[20] },
+ { "total_writeback", "22345678901234", 0, &values8[21] ,&values9[21] },
+ { "total_dirty", "23000000000000", 0, &values8[22] ,&values9[22] },
+ { "total_swap", "24345678901234", 0, &values8[23] ,&values9[23] },
+ { "total_pgpgin", "25345678901234", 0, &values8[24] ,&values9[24] },
+ { "total_pgpgout", "26345678901234", 0, &values8[25] ,&values9[25] },
+ { "total_pgfault", "27345678901234", 0, &values8[26] ,&values9[26] },
+ { "total_pgmajfault", "28345678901234", 0, &values8[27] ,&values9[27] },
+ { "total_inactive_anon", "29345678901234", 0, &values8[28] ,&values9[28] },
+ { "total_active_anon", "30345678901234", 0, &values8[29] ,&values9[29] },
+ { "total_inactive_file", "31345678901234", 0, &values8[30] ,&values9[30] },
+ { "total_active_file", "32345678901234", 0, &values8[31] ,&values9[31] },
+ { "total_unevictable", "33345678901234", 0, &values8[32] ,&values9[32] },
+ { NULL, NULL , 0, NULL ,NULL }
+};
+
+// simple system strcmp()
+void test1() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ if(unlikely(!strcmp(s, "cache")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "rss_huge")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "mapped_file")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "writeback")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "dirty")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "swap")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgin")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgpgout")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgfault")))
+ values1[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(!strcmp(s, "pgmajfault")))
+ values1[i] = strtoull(v, NULL, 10);
+ }
+}
+
+// inline simple_hash() with system strtoull()
+void test2() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values2[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values2[i] = strtoull(v, NULL, 10);
+ }
+}
+
+// statement expression simple_hash(), system strtoull()
+void test3() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values3[i] = strtoull(v, NULL, 10);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values3[i] = strtoull(v, NULL, 10);
+ }
+}
+
+
+// inline simple_hash(), if-continue checks
+void test4() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+
+ if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
+ values4[i] = strtoull(v, NULL, 0);
+ continue;
+ }
+ }
+}
+
+// inline simple_hash(), if-else-if-else-if (netdata default)
+void test5() {
+ int i;
+ for(i = 0; pairs[i].name ; i++) {
+ const char *s = pairs[i].name;
+ const char *v = pairs[i].value;
+
+ uint32_t hash = simple_hash2(s);
+
+ if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
+ values5[i] = fast_strtoull(v);
+
+ else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
+ values5[i] = fast_strtoull(v);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+void arl_strtoull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = strtoull(value, NULL, 10);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+void test6() {
+ static ARL_BASE *base = NULL;
+
+ if(unlikely(!base)) {
+ base = arl_create("test6", arl_strtoull, 60);
+ arl_expect_custom(base, "cache", NULL, &values6[0]);
+ arl_expect_custom(base, "rss", NULL, &values6[1]);
+ arl_expect_custom(base, "rss_huge", NULL, &values6[2]);
+ arl_expect_custom(base, "mapped_file", NULL, &values6[3]);
+ arl_expect_custom(base, "writeback", NULL, &values6[4]);
+ arl_expect_custom(base, "dirty", NULL, &values6[5]);
+ arl_expect_custom(base, "swap", NULL, &values6[6]);
+ arl_expect_custom(base, "pgpgin", NULL, &values6[7]);
+ arl_expect_custom(base, "pgpgout", NULL, &values6[8]);
+ arl_expect_custom(base, "pgfault", NULL, &values6[9]);
+ arl_expect_custom(base, "pgmajfault", NULL, &values6[10]);
+ }
+
+ arl_begin(base);
+
+ int i;
+ for(i = 0; pairs[i].name ; i++)
+ if(arl_check(base, pairs[i].name, pairs[i].value)) break;
+}
+
+void arl_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = str2ull(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+void test7() {
+ static ARL_BASE *base = NULL;
+
+ if(unlikely(!base)) {
+ base = arl_create("test7", arl_str2ull, 60);
+ arl_expect_custom(base, "cache", NULL, &values7[0]);
+ arl_expect_custom(base, "rss", NULL, &values7[1]);
+ arl_expect_custom(base, "rss_huge", NULL, &values7[2]);
+ arl_expect_custom(base, "mapped_file", NULL, &values7[3]);
+ arl_expect_custom(base, "writeback", NULL, &values7[4]);
+ arl_expect_custom(base, "dirty", NULL, &values7[5]);
+ arl_expect_custom(base, "swap", NULL, &values7[6]);
+ arl_expect_custom(base, "pgpgin", NULL, &values7[7]);
+ arl_expect_custom(base, "pgpgout", NULL, &values7[8]);
+ arl_expect_custom(base, "pgfault", NULL, &values7[9]);
+ arl_expect_custom(base, "pgmajfault", NULL, &values7[10]);
+ }
+
+ arl_begin(base);
+
+ int i;
+ for(i = 0; pairs[i].name ; i++)
+ if(arl_check(base, pairs[i].name, pairs[i].value)) break;
+}
+
+void test8() {
+ int i;
+ for(i = 0; pairs[i].name; i++) {
+ uint32_t hash = simple_hash(pairs[i].name);
+
+ int j;
+ for(j = 0; pairs[j].name; j++) {
+ if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
+ *pairs[j].collected8 = strtoull(pairs[i].value, NULL, 10);
+ break;
+ }
+ }
+ }
+}
+
+void test9() {
+ int i;
+ for(i = 0; pairs[i].name; i++) {
+ uint32_t hash = simple_hash(pairs[i].name);
+
+ int j;
+ for(j = 0; pairs[j].name; j++) {
+ if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
+ *pairs[j].collected9 = str2ull(pairs[i].value);
+ break;
+ }
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+/*
+// ==============
+// --- Poor man cycle counting.
+static unsigned long tsc;
+
+static void begin_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
+ tsc = ((unsigned long)d << 32) | (unsigned long)a;
+}
+
+static unsigned long end_tsc(void)
+{
+ unsigned long a, d;
+ asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
+ return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
+}
+// ===============
+*/
+
+static unsigned long long clk;
+
+static void begin_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return;
+ clk = tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static unsigned long long end_clock() {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1))
+ return -1;
+ return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
+}
+
+int main(void)
+{
+ {
+ int i;
+ for(i = 0; pairs[i].name; i++)
+ pairs[i].hash = simple_hash(pairs[i].name);
+ }
+
+ cache_hash = simple_hash("cache");
+ rss_hash = simple_hash("rss");
+ rss_huge_hash = simple_hash("rss_huge");
+ mapped_file_hash = simple_hash("mapped_file");
+ writeback_hash = simple_hash("writeback");
+ dirty_hash = simple_hash("dirty");
+ swap_hash = simple_hash("swap");
+ pgpgin_hash = simple_hash("pgpgin");
+ pgpgout_hash = simple_hash("pgpgout");
+ pgfault_hash = simple_hash("pgfault");
+ pgmajfault_hash = simple_hash("pgmajfault");
+ inactive_anon_hash = simple_hash("inactive_anon");
+ active_anon_hash = simple_hash("active_anon");
+ inactive_file_hash = simple_hash("inactive_file");
+ active_file_hash = simple_hash("active_file");
+ unevictable_hash = simple_hash("unevictable");
+ hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
+ total_cache_hash = simple_hash("total_cache");
+ total_rss_hash = simple_hash("total_rss");
+ total_rss_huge_hash = simple_hash("total_rss_huge");
+ total_mapped_file_hash = simple_hash("total_mapped_file");
+ total_writeback_hash = simple_hash("total_writeback");
+ total_dirty_hash = simple_hash("total_dirty");
+ total_swap_hash = simple_hash("total_swap");
+ total_pgpgin_hash = simple_hash("total_pgpgin");
+ total_pgpgout_hash = simple_hash("total_pgpgout");
+ total_pgfault_hash = simple_hash("total_pgfault");
+ total_pgmajfault_hash = simple_hash("total_pgmajfault");
+ total_inactive_anon_hash = simple_hash("total_inactive_anon");
+ total_active_anon_hash = simple_hash("total_active_anon");
+ total_inactive_file_hash = simple_hash("total_inactive_file");
+ total_active_file_hash = simple_hash("total_active_file");
+ total_unevictable_hash = simple_hash("total_unevictable");
+
+ // cache functions
+ (void)simple_hash2("hello world");
+ (void)strcmp("1", "2");
+ (void)strtoull("123", NULL, 0);
+
+ unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7 = 0, c8 = 0, c9 = 0;
+ unsigned long max = 1000000;
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test2();
+ c2 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test3();
+ c3 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test4();
+ c4 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test5();
+ c5 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test6();
+ c6 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test7();
+ c7 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test8();
+ c8 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test9();
+ c9 = end_clock();
+
+ for(i = 0; i < 11 ; i++)
+ printf("value %lu: %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i], values7[i], values8[i], values9[i]);
+
+ printf("\n\nRESULTS\n");
+ printf("test1() [1] in %lu usecs: simple system strcmp().\n"
+ "test2() [4] in %lu usecs: inline simple_hash() with system strtoull().\n"
+ "test3() [5] in %lu usecs: statement expression simple_hash(), system strtoull().\n"
+ "test4() [6] in %lu usecs: inline simple_hash(), if-continue checks.\n"
+ "test5() [7] in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default prior to ARL).\n"
+ "test6() [8] in %lu usecs: adaptive re-sortable array with strtoull() (wow!)\n"
+ "test7() [9] in %lu usecs: adaptive re-sortable array with str2ull() (wow!)\n"
+ "test8() [2] in %lu usecs: nested loop with strtoull()\n"
+ "test9() [3] in %lu usecs: nested loop with str2ull()\n"
+ , c1
+ , c2
+ , c3
+ , c4
+ , c5
+ , c6
+ , c7
+ , c8
+ , c9
+ );
+
+ return 0;
+}
diff --git a/tests/profile/statsd-stress.c b/tests/profile/statsd-stress.c
new file mode 100644
index 0000000..435d58d
--- /dev/null
+++ b/tests/profile/statsd-stress.c
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <string.h>
+#include <time.h>
+#include <pthread.h>
+
+void diep(char *s)
+{
+ perror(s);
+ exit(1);
+}
+
+size_t run_threads = 1;
+size_t metrics = 1024;
+
+#define SERVER_IP "127.0.0.1"
+#define PORT 8125
+
+size_t myrand(size_t max) {
+ size_t loops = max / RAND_MAX;
+ size_t i;
+
+ size_t ret = rand();
+ for(i = 0; i < loops ;i++)
+ ret += rand();
+
+ return ret % max;
+}
+
+struct thread_data {
+ size_t id;
+ struct sockaddr_in *si_other;
+ int slen;
+ size_t counter;
+};
+
+static void *report_thread(void *__data) {
+ struct thread_data *data = (struct thread_data *)__data;
+
+ size_t last = 0;
+ for (;;) {
+ size_t i;
+ size_t total = 0;
+ for(i = 0; i < run_threads ;i++)
+ total += data[i].counter;
+
+ printf("%zu metrics/s\n", total-last);
+ last = total;
+
+ sleep(1);
+ printf("\033[F\033[J");
+ }
+
+ return NULL;
+}
+
+char *types[] = {"g", "c", "m", "ms", "h", "s", NULL};
+// char *types[] = {"g", "c", "C", "h", "ms", NULL}; // brubeck compatible
+
+static void *spam_thread(void *__data) {
+ struct thread_data *data = (struct thread_data *)__data;
+
+ int s;
+ char packet[1024];
+
+ if ((s = socket(AF_INET, SOCK_DGRAM, 0))==-1)
+ diep("socket");
+
+ char **packets = malloc(sizeof(char *) * metrics);
+ size_t i, *lengths = malloc(sizeof(size_t) * metrics);
+ size_t t;
+
+ for(i = 0, t = 0; i < metrics ;i++, t++) {
+ if(!types[t]) t = 0;
+ char *type = types[t];
+
+ lengths[i] = sprintf(packet, "stress.%s.t%zu.m%zu:%zu|%s", type, data->id, i, myrand(metrics), type);
+ packets[i] = strdup(packet);
+ // printf("packet %zu, of length %zu: '%s'\n", i, lengths[i], packets[i]);
+ }
+ //printf("\n");
+
+ for (;;) {
+ for(i = 0; i < metrics ;i++) {
+ if (sendto(s, packets[i], lengths[i], 0, (void *)data->si_other, data->slen) < 0) {
+ printf("C ==> DROPPED\n");
+ return NULL;
+ }
+ data->counter++;
+ }
+ }
+
+ free(packets);
+ free(lengths);
+ close(s);
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc != 5) {
+ fprintf(stderr, "Usage: '%s THREADS METRICS IP PORT'\n", argv[0]);
+ exit(-1);
+ }
+
+ run_threads = atoi(argv[1]);
+ metrics = atoi(argv[2]);
+ char *ip = argv[3];
+ int port = atoi(argv[4]);
+
+ struct thread_data data[run_threads];
+ struct sockaddr_in si_other;
+ pthread_t threads[run_threads], report;
+ size_t i;
+
+ srand(time(NULL));
+
+ memset(&si_other, 0, sizeof(si_other));
+ si_other.sin_family = AF_INET;
+ si_other.sin_port = htons(port);
+ if (inet_aton(ip, &si_other.sin_addr)==0) {
+ fprintf(stderr, "inet_aton() of ip '%s' failed\n", ip);
+ exit(1);
+ }
+
+ for (i = 0; i < run_threads; ++i) {
+ data[i].id = i;
+ data[i].si_other = &si_other;
+ data[i].slen = sizeof(si_other);
+ data[i].counter = 0;
+ pthread_create(&threads[i], NULL, spam_thread, &data[i]);
+ }
+
+ printf("\n");
+ printf("THREADS : %zu\n", run_threads);
+ printf("METRICS : %zu\n", metrics);
+ printf("DESTINATION : %s:%d\n", ip, port);
+ printf("\n");
+ pthread_create(&report, NULL, report_thread, &data);
+
+ for (i =0; i < run_threads; ++i)
+ pthread_join(threads[i], NULL);
+
+ return 0;
+}
diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c
new file mode 100644
index 0000000..144381c
--- /dev/null
+++ b/tests/profile/test-eval.c
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+
+/*
+ * 1. build netdata (as normally)
+ * 2. cd profile/
+ * 3. compile with:
+ * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o test-eval test-eval.c ../src/log.o ../src/eval.o ../src/common.o ../src/clocks.o ../src/web_buffer.o ../src/storage_number.o -pthread -lm
+ */
+
+#include "config.h"
+#include "libnetdata/libnetdata.h"
+#include "database/rrdcalc.h"
+
+void netdata_cleanup_and_exit(int ret) { exit(ret); }
+
+/*
+void indent(int level, int show) {
+ int i = level;
+ while(i--) printf(" | ");
+ if(show) printf(" \\_ ");
+ else printf(" \\_ ");
+}
+
+void print_node(EVAL_NODE *op, int level);
+
+void print_value(EVAL_VALUE *v, int level) {
+ indent(level, 0);
+
+ switch(v->type) {
+ case EVAL_VALUE_INVALID:
+ printf("value (NOP)\n");
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ printf("value %Lf (NUMBER)\n", v->number);
+ break;
+
+ case EVAL_VALUE_EXPRESSION:
+ printf("value (SUB-EXPRESSION)\n");
+ print_node(v->expression, level+1);
+ break;
+
+ default:
+ printf("value (INVALID type %d)\n", v->type);
+ break;
+
+ }
+}
+
+void print_node(EVAL_NODE *op, int level) {
+
+// if(op->operator != EVAL_OPERATOR_NOP) {
+ indent(level, 1);
+ if(op->operator) printf("%c (node %d, precedence: %d)\n", op->operator, op->id, op->precedence);
+ else printf("NOP (node %d, precedence: %d)\n", op->id, op->precedence);
+// }
+
+ int i = op->count;
+ while(i--) print_value(&op->ops[i], level + 1);
+}
+
+calculated_number evaluate(EVAL_NODE *op, int depth);
+
+calculated_number evaluate_value(EVAL_VALUE *v, int depth) {
+ switch(v->type) {
+ case EVAL_VALUE_NUMBER:
+ return v->number;
+
+ case EVAL_VALUE_EXPRESSION:
+ return evaluate(v->expression, depth);
+
+ default:
+ fatal("I don't know how to handle EVAL_VALUE type %d", v->type);
+ }
+}
+
+void print_depth(int depth) {
+ static int count = 0;
+
+ printf("%d. ", ++count);
+ while(depth--) printf(" ");
+}
+
+calculated_number evaluate(EVAL_NODE *op, int depth) {
+ calculated_number n1, n2, r;
+
+ switch(op->operator) {
+ case EVAL_OPERATOR_SIGN_PLUS:
+ r = evaluate_value(&op->ops[0], depth);
+ break;
+
+ case EVAL_OPERATOR_SIGN_MINUS:
+ r = -evaluate_value(&op->ops[0], depth);
+ break;
+
+ case EVAL_OPERATOR_PLUS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 + n2;
+ print_depth(depth);
+ printf("%Lf = %Lf + %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_MINUS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 - n2;
+ print_depth(depth);
+ printf("%Lf = %Lf - %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_MULTIPLY:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 * n2;
+ print_depth(depth);
+ printf("%Lf = %Lf * %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_DIVIDE:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 / n2;
+ print_depth(depth);
+ printf("%Lf = %Lf / %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_NOT:
+ n1 = evaluate_value(&op->ops[0], depth);
+ r = !n1;
+ print_depth(depth);
+ printf("%Lf = NOT %Lf\n", r, n1);
+ break;
+
+ case EVAL_OPERATOR_AND:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 && n2;
+ print_depth(depth);
+ printf("%Lf = %Lf AND %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_OR:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 || n2;
+ print_depth(depth);
+ printf("%Lf = %Lf OR %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_GREATER_THAN_OR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 >= n2;
+ print_depth(depth);
+ printf("%Lf = %Lf >= %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_LESS_THAN_OR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 <= n2;
+ print_depth(depth);
+ printf("%Lf = %Lf <= %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_GREATER:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 > n2;
+ print_depth(depth);
+ printf("%Lf = %Lf > %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_LESS:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 < n2;
+ print_depth(depth);
+ printf("%Lf = %Lf < %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_NOT_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 != n2;
+ print_depth(depth);
+ printf("%Lf = %Lf <> %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_EQUAL:
+ if(op->count != 2)
+ fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
+ n1 = evaluate_value(&op->ops[0], depth);
+ n2 = evaluate_value(&op->ops[1], depth);
+ r = n1 == n2;
+ print_depth(depth);
+ printf("%Lf = %Lf == %Lf\n", r, n1, n2);
+ break;
+
+ case EVAL_OPERATOR_EXPRESSION_OPEN:
+ printf("BEGIN SUB-EXPRESSION\n");
+ r = evaluate_value(&op->ops[0], depth + 1);
+ printf("END SUB-EXPRESSION\n");
+ break;
+
+ case EVAL_OPERATOR_NOP:
+ case EVAL_OPERATOR_VALUE:
+ r = evaluate_value(&op->ops[0], depth);
+ break;
+
+ default:
+ error("I don't know how to handle operator '%c'", op->operator);
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
+
+void print_expression(EVAL_NODE *op, const char *failed_at, int error) {
+ if(op) {
+ printf("expression tree:\n");
+ print_node(op, 0);
+
+ printf("\nevaluation steps:\n");
+ evaluate(op, 0);
+
+ int error;
+ calculated_number ret = expression_evaluate(op, &error);
+ printf("\ninternal evaluator:\nSTATUS: %d, RESULT = %Lf\n", error, ret);
+
+ expression_free(op);
+ }
+ else {
+ printf("error: %d, failed_at: '%s'\n", error, (failed_at)?failed_at:"<NONE>");
+ }
+}
+*/
+
+int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ if(argc != 2) {
+ fprintf(stderr, "I need an epxression (enclose it in single-quotes (') as a single parameter)\n");
+ exit(1);
+ }
+
+ const char *failed_at = NULL;
+ int error;
+
+ EVAL_EXPRESSION *exp = expression_parse(argv[1], &failed_at, &error);
+ if(!exp)
+ printf("\nPARSING FAILED\nExpression: '%s'\nParsing stopped at: '%s'\nParsing error code: %d (%s)\n", argv[1], (failed_at)?((*failed_at)?failed_at:"<END OF EXPRESSION>"):"<NONE>", error, expression_strerror(error));
+
+ else {
+ printf("\nPARSING OK\nExpression: '%s'\nParsed as : '%s'\nParsing error code: %d (%s)\n", argv[1], exp->parsed_as, error, expression_strerror(error));
+
+ if(expression_evaluate(exp)) {
+ printf("\nEvaluates to: %Lf\n\n", exp->result);
+ }
+ else {
+ printf("\nEvaluation failed with code %d and message: %s\n\n", exp->error, buffer_tostring(exp->error_msg));
+ }
+ expression_free(exp);
+ }
+
+ return 0;
+}
diff --git a/tests/run-unit-tests.sh b/tests/run-unit-tests.sh
new file mode 100755
index 0000000..70d618a
--- /dev/null
+++ b/tests/run-unit-tests.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+#
+# Unit-testing script
+#
+# This script does the following:
+# 1. Check whether any files were modified that would necessitate unit testing (using the `TRAVIS_COMMIT_RANGE` environment variable).
+# 2. If there are no changed files that require unit testing, exit successfully.
+# 3. Otherwise, run all the unit tests.
+#
+# We do things this way because our unit testing takes a rather long
+# time (average 18-19 minutes as of the original creation of this script),
+# so skipping it when we don't actually need it can significantly speed
+# up the CI process.
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Austin S. Hemmelgarn <austin@netdata.cloud>
+#
+# shellcheck disable=SC2230
+
+install_netdata() {
+ echo "Installing Netdata"
+ fakeroot ./netdata-installer.sh \
+ --install "$HOME" \
+ --dont-wait \
+ --dont-start-it \
+ --enable-plugin-nfacct \
+ --enable-plugin-freeipmi \
+ --disable-lto
+}
+
+c_unit_tests() {
+ echo "Running C code unit tests"
+ "$HOME"/netdata/usr/sbin/netdata -W unittest
+}
+
+install_netdata || exit 1
+
+c_unit_tests || exit 1
diff --git a/tests/stress.sh b/tests/stress.sh
new file mode 100755
index 0000000..97cced0
--- /dev/null
+++ b/tests/stress.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+if ! hash curl 2>/dev/null
+then
+ 1>&2 echo "'curl' not found on system. Please install 'curl'."
+ exit 1
+fi
+
+# set the host to connect to
+if [ -n "$1" ]
+then
+ host="$1"
+else
+ host="http://127.0.0.1:19999"
+fi
+echo "using netdata server at: $host"
+
+# shellcheck disable=SC2207 disable=SC1117
+charts=($(curl -k "$host/netdata.conf" 2>/dev/null | grep "^\[" | cut -d '[' -f 2 | cut -d ']' -f 1 | grep -v ^global$ | grep -v "^plugin" | sort -u))
+if [ "${#charts[@]}" -eq 0 ]
+then
+ echo "Cannot download charts from server: $host"
+ exit 1
+fi
+
+update_every="$(curl -k "$host/netdata.conf" 2>/dev/null | grep "update every = " | head -n 1 | cut -d '=' -f 2)"
+[ $(( update_every + 1 - 1)) -eq 0 ] && update_every=1
+
+entries="$(curl -k "$host/netdata.conf" 2>/dev/null | grep "history = " | head -n 1 | cut -d '=' -f 2)"
+[ $(( entries + 1 - 1)) -eq 0 ] && entries=3600
+
+# to compare equal things, set the entries to 3600 max
+[ $entries -gt 3600 ] && entries=3600
+
+if [ $entries -ne 3600 ]
+then
+ echo >&2 "You are running a test for a history of $entries entries."
+fi
+
+modes=("average" "max")
+formats=("jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array")
+options="flip|jsonwrap"
+
+now=$(date +%s)
+first=$((now - (entries * update_every)))
+duration=$((now - first))
+
+file="$(mktemp /tmp/netdata-stress-XXXXXXXX)"
+cleanup() {
+ echo "cleanup"
+ [ -f "$file" ] && rm "$file"
+}
+trap cleanup EXIT
+
+while true
+do
+ echo "curl -k --compressed --keepalive-time 120 --header \"Connection: keep-alive\" \\" >"$file"
+ # shellcheck disable=SC2034
+ for x in {1..100}
+ do
+ dt=$((RANDOM * duration / 32767))
+ st=$((RANDOM * duration / 32767))
+ et=$(( st + dt ))
+ [ $et -gt "$now" ] && st=$(( now - dt ))
+
+ points=$((RANDOM * 2000 / 32767 + 2))
+ st=$((first + st))
+ et=$((first + et))
+
+ mode=$((RANDOM * ${#modes[@]} / 32767))
+ mode="${modes[$mode]}"
+
+ chart=$((RANDOM * ${#charts[@]} / 32767))
+ chart="${charts[$chart]}"
+
+ format=$((RANDOM * ${#formats[@]} / 32767))
+ format="${formats[$format]}"
+
+ echo "--url \"$host/api/v1/data?chart=$chart&mode=$mode&format=$format&options=$options&after=$st&before=$et&points=$points\" \\"
+ done >>"$file"
+ bash "$file" >/dev/null
+done
diff --git a/tests/template_dimension/system_cpu.conf.alarm_foreach b/tests/template_dimension/system_cpu.conf.alarm_foreach
new file mode 100644
index 0000000..21a8cbb
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.alarm_foreach
@@ -0,0 +1,8 @@
+ alarm: dev_dim_template
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage foreach system,user,nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.alarm_foreach_sp b/tests/template_dimension/system_cpu.conf.alarm_foreach_sp
new file mode 100644
index 0000000..fdd19e8
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.alarm_foreach_sp
@@ -0,0 +1,8 @@
+ alarm: dev_dim_template
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage foreach *
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_alarm b/tests/template_dimension/system_cpu.conf.template_alarm
new file mode 100644
index 0000000..2bd12a1
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_alarm
@@ -0,0 +1,26 @@
+template: dev_dim_template_system
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of system
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+template: dev_dim_template_user
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of user
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+template: dev_dim_template_nice
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage of nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_foreach b/tests/template_dimension/system_cpu.conf.template_foreach
new file mode 100644
index 0000000..c75c15b
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_foreach
@@ -0,0 +1,8 @@
+template: dev_dim_template
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage foreach system,user,nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.template_foreach_sp b/tests/template_dimension/system_cpu.conf.template_foreach_sp
new file mode 100644
index 0000000..f50a832
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.template_foreach_sp
@@ -0,0 +1,8 @@
+ template: dev_dim_template
+ on: system.cpu
+ os: linux
+ lookup: sum -3s at 0 every 3 percentage foreach *
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/system_cpu.conf.unique_alarm b/tests/template_dimension/system_cpu.conf.unique_alarm
new file mode 100644
index 0000000..0f38b6e
--- /dev/null
+++ b/tests/template_dimension/system_cpu.conf.unique_alarm
@@ -0,0 +1,26 @@
+ alarm: dev_dim_template_system
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of system
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+ alarm: dev_dim_template_user
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of user
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
+
+ alarm: dev_dim_template_nice
+ on: system.cpu
+ os: linux
+lookup: sum -3s at 0 every 3 percentage of nice
+ units: %
+ every: 1s
+ warn: $this > 1
+ crit: $this > 4
diff --git a/tests/template_dimension/template_dim.sh.in b/tests/template_dimension/template_dim.sh.in
new file mode 100644
index 0000000..88978fd
--- /dev/null
+++ b/tests/template_dimension/template_dim.sh.in
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+#The health directory to put the alarms
+HEALTHDIR="@configdir_POST@/health.d/"
+
+#the current time
+OUTDIR="alarms"
+QUERY="/api/v1/alarms?all"
+MURL="http://localhost:19999$QUERY"
+
+#error messages
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NOCOLOR='\033[0m'
+
+ALARMTEST="dev_dim_template"
+
+change_alarm_file() {
+ if [ -f "$1" ]; then
+ rm "$1"
+ fi
+
+ #copy keeping the permissions
+ cp -a "$2" "$3"
+}
+
+netdata_test_download() {
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST="$?"
+ if [ "$TEST" -ne "0" ]; then
+ echo -e "${RED} Error to get the alarm log. ${NOCOLOR}"
+ exit 1
+ fi
+
+ TOTALARM=$(grep "$ALARMTEST" "$2" | grep name | cut -d: -f2 | grep -c "$ALARMTEST")
+
+ if [ "$TOTALARM" -ne "$3" ]; then
+ echo -e "${RED} The number of actives alarms with the name $SYSTEMALARM is wrong ${NOCOLOR}"
+ exit 1
+ fi
+}
+
+get_the_logs() {
+ curl -v -k --create-dirs -o "$OUTDIR/$1.out" "$MURL" 2> "$OUTDIR/$1.err"
+ netdata_test_download "$OUTDIR/$1.err" "$OUTDIR/$1.out" "$2"
+}
+
+process_data() {
+ netdata -D &
+ NETDATAPID=$!
+ echo -e "${NOCOLOR}Sleeping during 15 seconds to create alarms"
+ sleep 15
+ kill $NETDATAPID
+ get_the_logs "$1" "$2"
+}
+
+mkdir "$OUTDIR"
+CREATEDIR="$?"
+if [ "$CREATEDIR" -ne "0" ]; then
+ echo -e "${RED}Cannot create the output directory, it already exists. The test will overwrite previous results. ${NOCOLOR}"
+fi
+
+if [ -n "$1" ]; then
+ MURL="$1$QUERY"
+fi
+
+change_alarm_file "./0" "system_cpu.conf.unique_alarm" "$HEALTHDIR/dim_double_without_template.conf"
+process_data "double_without_template" 3 "$HEALTHDIR/dim_double_without_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_double_without_template.conf" "system_cpu.conf.alarm_foreach" "$HEALTHDIR/dim_foreach_without_template.conf"
+process_data "foreach_without_template" 3 "$HEALTHDIR/dim_foreach_without_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_without_template.conf" "system_cpu.conf.alarm_foreach_sp" "$HEALTHDIR/dim_foreach_without_template_sp.conf"
+process_data "foreach_without_template" 10 "$HEALTHDIR/dim_foreach_without_template_sp.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_without_template_sp.conf" "system_cpu.conf.template_alarm" "$HEALTHDIR/dim_double_with_template.conf"
+process_data "double_with_template" 3 "$HEALTHDIR/dim_double_with_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_double_with_template.conf" "system_cpu.conf.template_foreach" "$HEALTHDIR/dim_foreach_with_template.conf"
+process_data "foreach_with_template" 3 "$HEALTHDIR/dim_foreach_with_template.conf"
+
+change_alarm_file "$HEALTHDIR/dim_foreach_with_template.conf" "system_cpu.conf.template_foreach_sp" "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+process_data "foreach_with_template" 10 "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+
+rm "$HEALTHDIR/dim_foreach_with_template_sp.conf"
+rm -rf "$OUTDIR"
+
+echo -e "${GREEN} all the tests were sucessful ${NOCOLOR}"
diff --git a/tests/updater_checks.bats b/tests/updater_checks.bats
new file mode 100755
index 0000000..930cea9
--- /dev/null
+++ b/tests/updater_checks.bats
@@ -0,0 +1,66 @@
+#!/usr/bin/env bats
+#
+# This script is responsible for validating
+# updater capabilities after a change
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+INSTALLATION="$BATS_TMPDIR/installation"
+ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
+# list of files which need to be checked. Path cannot start from '/'
+FILES="usr/libexec/netdata/plugins.d/go.d.plugin
+ usr/libexec/netdata/plugins.d/charts.d.plugin
+ usr/libexec/netdata/plugins.d/python.d.plugin
+ usr/libexec/netdata/plugins.d/node.d.plugin"
+
+DIRS="usr/sbin/netdata
+ etc/netdata
+ usr/share/netdata
+ usr/libexec/netdata
+ var/cache/netdata
+ var/lib/netdata
+ var/log/netdata"
+
+setup() {
+ # If we are not in netdata git repo, at the top level directory, fail
+ TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
+ CWD=$(git rev-parse --show-cdup || echo "")
+ if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
+ echo "Run as ./tests/$(basename "$0") from top level directory of git repository"
+ exit 1
+ fi
+}
+
+@test "install stable netdata using kickstart" {
+ ./packaging/installer/kickstart.sh --dont-wait --dont-start-it --auto-update --install ${INSTALLATION}
+
+ # Validate particular files
+ for file in $FILES; do
+ [ ! -f "$BATS_TMPDIR/$file" ]
+ done
+
+ # Validate particular directories
+ for a_dir in $DIRS; do
+ [ ! -d "$BATS_TMPDIR/$a_dir" ]
+ done
+
+ # Cleanup
+ rm -rf ${kickstart_file}
+}
+
+@test "update netdata using the new updater" {
+ export ENVIRONMENT_FILE="${ENV}"
+ # Run the updater, with the override so that it uses the local repo we have at hand
+ export NETDATA_LOCAL_TARBAL_OVERRIDE="${PWD}"
+ ${INSTALLATION}/netdata/usr/libexec/netdata/netdata-updater.sh --not-running-from-cron
+ ! grep "new_installation" "${ENV}"
+}
+
+@test "uninstall netdata using latest uninstaller" {
+ ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
+ [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
+ [ ! -f "/etc/cron.daily/netdata-updater" ]
+}
diff --git a/tests/updater_checks.sh b/tests/updater_checks.sh
new file mode 100755
index 0000000..dff87a6
--- /dev/null
+++ b/tests/updater_checks.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env sh
+#
+# Wrapper script that installs the required dependencies
+# for the BATS script to run successfully
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
+#
+
+echo "Syncing/updating repository.."
+
+blind_arch_grep_install() {
+ # There is a peculiar docker case with arch, where grep is not available
+ # This method will have to be triggered blindly, to inject grep so that we can process
+ # It starts to become a chicken-egg situation with all the distros..
+ echo "* * Workaround hack * *"
+ echo "Attempting blind install for archlinux case"
+
+ if command -v pacman > /dev/null 2>&1; then
+ echo "Executing grep installation"
+ pacman -Sy
+ pacman --noconfirm --needed -S grep
+ fi
+}
+blind_arch_grep_install || echo "Workaround failed, proceed as usual"
+
+running_os="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | sed -e 's/"//g')"
+
+case "${running_os}" in
+"centos"|"fedora"|"CentOS")
+ echo "Running on CentOS, updating YUM repository.."
+ yum clean all
+ yum update -y
+
+ echo "Installing extra dependencies.."
+ yum install -y epel-release
+ yum install -y bats curl
+ ;;
+"debian"|"ubuntu")
+ echo "Running ${running_os}, updating APT repository"
+ apt-get update -y
+ apt-get install -y bats curl
+ ;;
+"opensuse-leap"|"opensuse-tumbleweed")
+ zypper update -y
+ zypper install -y bats curl
+
+ # Fixes curl: (60) SSL certificate problem: unable to get local issuer certificate
+ # https://travis-ci.com/netdata/netdata/jobs/267573805
+ update-ca-certificates
+ ;;
+"arch")
+ pacman --noconfirm -Syu
+ pacman --noconfirm --needed -S bash-bats curl libffi
+ ;;
+"alpine")
+ apk update
+ apk add bash curl bats
+ ;;
+*)
+ echo "Running on ${running_os}, no repository preparation done"
+ ;;
+esac
+
+# Run depednency scriptlet, before anything else
+#
+./packaging/installer/install-required-packages.sh --non-interactive netdata
+
+echo "Running BATS file.."
+bats --tap tests/updater_checks.bats
diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in
new file mode 100644
index 0000000..ebdfc09
--- /dev/null
+++ b/tests/urls/request.sh.in
@@ -0,0 +1,307 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################################
+#### ####
+#### GLOBAL VARIABLES ####
+#### ####
+################################################################################################
+
+# The current time
+CT=$(date +'%s')
+
+# The previous time
+PT=$((CT - 30))
+
+# The output directory where we will store the results and error
+OUTDIR="tests"
+OUTEDIR="encoded_tests"
+OUTOPTDIR="options"
+ERRDIR="etests"
+NOCOLOR='\033[0'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+
+################################################################################################
+#### ####
+#### FUNCTIONS ####
+#### ####
+################################################################################################
+
+# Print error message and close script
+netdata_print_error(){
+ echo "${RED} Closing due error \"$1\" code \"$2\" ${NOCOLOR}"
+ exit 1
+}
+
+# Print the header message of the function
+netdata_print_header() {
+ echo "$1"
+}
+
+# Create the main directory where the results will be stored
+netdata_create_directory() {
+ netdata_print_header "Creating directory $1"
+ if [ ! -d "$1" ]; then
+ mkdir "$1"
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot create directory $?"
+ fi
+ else
+ echo "Working with directory $OUTDIR"
+ fi
+}
+
+#Check whether download did not have problem
+netdata_test_download(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot do download of the page $2" $?
+ exit 1
+ fi
+}
+
+#Check whether download had a problem
+netdata_error_test(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -eq 0 ]; then
+ netdata_print_error "The page $2 did not answer with an error" $?
+ exit 1
+ fi
+}
+
+
+# Download information from Netdata
+netdata_download_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err"
+ netdata_test_download "$OUTDIR/$3.err" "$1/$2"
+}
+
+netdata_download_various_with_options() {
+ netdata_print_header "Getting options for $2"
+ curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err"
+ netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2"
+}
+
+# Download information from Netdata
+netdata_wrong_request_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err"
+ netdata_error_test "$ERRDIR/$3.err" "$1/$2"
+}
+
+# Download charts from Netdata
+netdata_download_charts() {
+ curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err"
+ netdata_test_download "$OUTDIR/charts.err" "$1/$2"
+
+ #Rewrite the next
+ grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort
+}
+
+#Test options for a specific chart
+netdata_download_chart() {
+ SEPARATOR="&"
+ EQUAL="="
+ OUTD=$OUTDIR
+ ENCODED=" "
+ for I in $(seq 0 1); do
+ if [ "$I" -eq "1" ] ; then
+ SEPARATOR="%26"
+ EQUAL="%3D"
+ OUTD=$OUTEDIR
+ ENCODED="encoded"
+ fi
+
+ NAME=${3//\"/}
+ netdata_print_header "Getting data for $NAME using $4 $ENCODED"
+
+ LDIR=$OUTD"/"$4
+
+ LURL="$1/$2$EQUAL$NAME"
+
+ NAME=$NAME"_$4"
+
+ curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err"
+ netdata_test_download "$LDIR/$NAME.err" "$LURL"
+
+ UFILES=( "points" "before" "after" )
+ COUNTER=0
+ for OPT in "points=100" "before=$PT" "after=$CT" ;
+ do
+ LURL="$LURL$SEPARATOR$OPT"
+ LFILE=$NAME"_${UFILES[$COUNTER]}";
+
+ curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err"
+ netdata_test_download "$LDIR/$LFILE.err" "$LURL"
+
+ COUNTER=$((COUNTER + 1))
+ done
+
+ LURL="$LURL&group$EQUAL"
+ for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average";
+ do
+ TURL=$LURL$OPT
+ TFILE=$NAME"_$OPT";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array";
+ do
+ TURL=$TURL"&format="$MORE
+ TFILE=$NAME"_$OPT""_$MORE";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ done
+ done
+
+ LURL="$LURL$OPT&gtime=60"
+ NFILE=$NAME"_gtime"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&options=percentage"
+ NFILE=$NAME"_percentage"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&dimensions=system%7Cnice"
+ NFILE=$NAME"_dimension"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&label=testing"
+ NFILE=$NAME"_label"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+ done
+}
+
+# Download information from Netdata
+netdata_download_allmetrics() {
+ netdata_print_header "Getting All metrics"
+ LURL="$1/api/v1/allmetrics?format="
+ for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ;
+ do
+ TURL=$LURL$FMT
+ for OPT in "yes" "no";
+ do
+ if [ "$FMT" == "prometheus" ]; then
+ TURL="$TURL&help=$OPT&types=$OPT&timestamps=$OPT"
+ fi
+ TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND"
+
+ NAME="allmetrics_$FMT"
+ echo "$OUTDIR/$2/$NAME.out"
+ curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err"
+ netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL"
+ done
+ done
+}
+
+
+####################################################
+#### ####
+#### MAIN ROUTINE ####
+#### ####
+####################################################
+MURL="http://127.0.0.1:19999"
+
+if [ -n "$1" ]; then
+ MURL="$1"
+fi
+
+netdata_create_directory $OUTDIR
+netdata_create_directory $OUTEDIR
+netdata_create_directory $OUTOPTDIR
+netdata_create_directory $ERRDIR
+
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent "$MURL"
+TEST=$?
+if [ $TEST -ne "0" ] ; then
+ echo "Cannot connect to Netdata"
+ exit 1
+fi
+
+netdata_download_various "$MURL" "netdata.conf" "netdata.conf"
+
+netdata_download_various_with_options "$MURL" "netdata.conf" "netdata.conf"
+
+netdata_wrong_request_various "$MURL" "api/v15/info?this%20could%20not%20be%20here" "err_version"
+
+netdata_wrong_request_various "$MURL" "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2"
+
+netdata_download_various "$MURL" "api/v1/info" "info"
+netdata_download_various_with_options "$MURL" "api/v1/info" "info"
+netdata_download_various "$MURL" "api/v1/info?this%20could%20not%20be%20here" "err_info"
+
+netdata_print_header "Getting all the netdata charts"
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+
+if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
+ echo "The number of charts does not match with division not encoded.";
+ exit 2;
+elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then
+ echo "The number of charts does not match when everything is encoded";
+ exit 3;
+fi
+
+netdata_wrong_request_various "$MURL" "api/v1/chart" "err_chart_without_chart"
+netdata_wrong_request_various "$MURL" "api/v1/chart?_=234231424242" "err_chart_arg"
+
+netdata_download_various "$MURL" "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+netdata_download_various_with_options "$MURL" "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+
+netdata_download_various "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various_with_options "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various "$MURL" "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2"
+netdata_download_various "$MURL" "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3"
+
+netdata_create_directory "$OUTDIR/chart"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various "$MURL" "api/v1/chart?chart=$NAME" "chart/$NAME"
+done
+
+netdata_wrong_request_various "$MURL" "api/v1/alarm_variables" "err_alarm_variables_without_chart"
+netdata_wrong_request_various "$MURL" "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg"
+netdata_download_various "$MURL" "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args"
+
+netdata_create_directory "$OUTDIR/alarm_variables"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various "$MURL" "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME"
+done
+
+netdata_create_directory "$OUTDIR/badge"
+netdata_create_directory "$OUTEDIR/badge"
+for I in $CHARTS ; do
+ netdata_download_chart "$MURL" "api/v1/badge.svg?chart" "$I" "badge"
+done
+
+netdata_create_directory "$OUTDIR/allmetrics"
+netdata_download_allmetrics "$MURL" "allmetrics"
+
+netdata_download_various "$MURL" "api/v1/alarms?all" "alarms_all"
+netdata_download_various "$MURL" "api/v1/alarms?active" "alarms_active"
+netdata_download_various "$MURL" "api/v1/alarms" "alarms_nothing"
+
+netdata_download_various "$MURL" "api/v1/alarm_log?after" "alarm_without"
+netdata_download_various "$MURL" "api/v1/alarm_log" "alarm_nothing"
+netdata_download_various "$MURL" "api/v1/alarm_log?after&_=$PT" "alarm_log"
+
+netdata_create_directory "$OUTDIR/data"
+netdata_create_directory "$OUTEDIR/data"
+for I in $CHARTS ; do
+ netdata_download_chart "$MURL" "api/v1/data?chart" "$I" "data"
+ break;
+done
+
+echo -e "${GREEN}ALL the URLS got 200 as answer! ${NOCOLOR}"
+
+exit 0
diff --git a/tests/web/easypiechart.chart.spec.js b/tests/web/easypiechart.chart.spec.js
new file mode 100644
index 0000000..23bf33d
--- /dev/null
+++ b/tests/web/easypiechart.chart.spec.js
@@ -0,0 +1,39 @@
+"use strict";
+
+
+// with xdescribe, this is skipped.
+describe("creation of easy pie charts", function () {
+
+ beforeAll(function () {
+ // karma stores the loaded files relative to "base/".
+ // This command is needed to load HTML fixtures
+ jasmine.getFixtures().fixturesPath = "base/tests/web/fixtures";
+ });
+
+ it("should create new chart, but it's failure is expected for demonstration purpose", function () {
+ // arrange
+ // Theoretically we can load some html. What about jquery? could this work?
+ // https://stackoverflow.com/questions/5337481/spying-on-jquery-selectors-in-jasmine
+ loadFixtures("easypiechart.chart.fixture1.html");
+
+ // for easy pie chart, we can fake the data result:
+ var data = {
+ result: [5]
+ };
+ // act
+ var result = NETDATA.easypiechartChartCreate(createState(), data);
+ // assert
+ expect(result).toBe(true);
+ });
+
+ function createState(min, max) {
+ // create a fake state with only needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+ }
+
+});
diff --git a/tests/web/easypiechart.percentage.spec.js b/tests/web/easypiechart.percentage.spec.js
new file mode 100644
index 0000000..976b339
--- /dev/null
+++ b/tests/web/easypiechart.percentage.spec.js
@@ -0,0 +1,142 @@
+"use strict";
+
+
+describe("percentage calculations for easy pie charts with dynamic range", function () {
+
+ it("should return positive value, if value greater than dynamic max", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 6, 2, 10);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return negative value, if value lesser than dynamic min", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -6, -10, 10);
+
+ expect(result).toBe(-60);
+ });
+
+ it("should return 0 if value is zero and min negative, max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -1, 2);
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0.1 if value and min are zero and max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, 0, 2);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -0.1 if value is zero, max and min negative", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -2, -1);
+
+ expect(result).toBe(-0.1);
+ });
+
+ it("should return positive value, if max is user-defined", function () {
+ var state = createState(null, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, -40, 50);
+
+ expect(result).toBe(92);
+ });
+
+ it("should return negative value, if min is user-defined", function () {
+ var state = createState(-50, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -46, -50, 40);
+
+ expect(result).toBe(-92);
+ });
+
+});
+
+describe("percentage calculations for easy pie charts with fixed range", function () {
+
+ it("should return positive value, if min and max are user-defined", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, 40, 50);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return 100 if positive min and max are user-defined, but value is greater than max", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 60, 40, 50);
+
+ expect(result).toBe(100);
+ });
+
+ it("should return 0.1 if positive min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 39.9, 42, 48);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -100 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -40, -50);
+
+ expect(result).toBe(-100);
+ });
+
+ it("should return 0.1 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -20, -45);
+
+ expect(result).toBe(-100);
+ });
+});
+
+describe("percentage calculations for easy pie charts with invalid input", function () {
+
+ it("should return 0.1 if value undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, null, 40, 50);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return positive value if min is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 1, null, 2);
+
+ expect(result).toBe(50);
+ });
+
+ it("should return positive if max is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 21, 42, null);
+
+ expect(result).toBe(50);
+ });
+});
+
+function createState(min, max) {
+ // create a fake state with only the needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+}
diff --git a/tests/web/fixtures/easypiechart.chart.fixture1.html b/tests/web/fixtures/easypiechart.chart.fixture1.html
new file mode 100644
index 0000000..f0f4eb7
--- /dev/null
+++ b/tests/web/fixtures/easypiechart.chart.fixture1.html
@@ -0,0 +1,6 @@
+<div data-netdata="system.cpu"
+ data-chart-library="easypiechart"
+ data-width="5%"
+ data-height="20"
+ data-after="-30"
+></div> \ No newline at end of file
diff --git a/tests/web/karma.conf.js b/tests/web/karma.conf.js
new file mode 100644
index 0000000..b3ee094
--- /dev/null
+++ b/tests/web/karma.conf.js
@@ -0,0 +1,110 @@
+// Karma configuration
+// Generated on Sun Jul 16 2017 02:28:05 GMT+0200 (CEST)
+
+module.exports = function (config) {
+ config.set({
+
+ // base path that will be used to resolve all patterns (eg. files, exclude)
+ // this path should always resolve so that "." is the "netdata" root folder.
+ basePath: '../../',
+
+ // frameworks to use
+ // available frameworks: https://npmjs.org/browse/keyword/karma-adapter
+ frameworks: ['jasmine'],
+
+
+ // list of files / patterns to load in the browser
+ files: [
+ // order matters! load jquery libraries first
+ 'web/lib/jquery*.js',
+ // our jasmine libs and fixtures
+ 'tests/web/lib/*.js',
+ 'tests/web/fixtures/*.html',
+ // then bootstrap
+ 'web/lib/bootstrap*.js',
+ // then the rest
+ 'web/lib/perfect-scrollbar*.js',
+ 'web/lib/dygraph*.js',
+ 'web/lib/gauge*.js',
+ 'web/lib/morris*.js',
+ 'web/lib/raphael*.js',
+ 'web/lib/tableExport*.js',
+ 'web/lib/d3*.js',
+ 'web/lib/c3*.js',
+ // some CSS
+ 'web/css/*.css',
+ 'web/dashboard.css',
+ // our dashboard
+ 'web/dashboard.js',
+ // finally our test specs
+ 'tests/web/*.spec.js',
+ ],
+
+
+ // list of files to exclude
+ exclude: [],
+
+
+ // preprocess matching files before serving them to the browser
+ // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
+ preprocessors: {
+ 'web/dashboard.js': ['coverage']
+ },
+
+
+ // test results reporter to use
+ // possible values: 'dots', 'progress'
+ // available reporters: https://npmjs.org/browse/keyword/karma-reporter
+ reporters: ['progress', 'coverage'],
+
+ // optionally, configure the reporter
+ coverageReporter: {
+ type : 'html',
+ dir : 'coverage/'
+ },
+
+ // web server port
+ port: 9876,
+
+
+ // enable / disable colors in the output (reporters and logs)
+ colors: true,
+
+
+ // level of logging
+ // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
+ logLevel: config.LOG_INFO,
+
+
+ // enable / disable watching file and executing tests whenever any file changes
+ autoWatch: false,
+ // not needed with WebStorm. Just hit Alt+Shift+R to rerun.
+
+ // start these browsers
+ // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
+ browsers: ['Chromium', 'ChromiumHeadless'],
+
+ customLaunchers: {
+ // Headless browsers could be useful for CI integration, if installed.
+ ChromiumHeadless: {
+ // needs Chrome/Chromium version >= 59
+ // see https://chromium.googlesource.com/chromium/src/+/lkgr/headless/README.md
+ base: "Chromium",
+ flags: [
+ "--headless",
+ "--disable-gpu",
+ // Without a remote debugging port, Chromium exits immediately.
+ "--remote-debugging-port=9222"
+ ]
+ }
+ },
+
+ // Continuous Integration mode
+ // if true, Karma captures browsers, runs the tests and exits
+ singleRun: false,
+
+ // Concurrency level
+ // how many browser should be started simultaneous
+ concurrency: Infinity
+ })
+};
diff --git a/tests/web/lib/jasmine-jquery.js b/tests/web/lib/jasmine-jquery.js
new file mode 100644
index 0000000..6e4611c
--- /dev/null
+++ b/tests/web/lib/jasmine-jquery.js
@@ -0,0 +1,841 @@
+/*!
+ Jasmine-jQuery: a set of jQuery helpers for Jasmine tests.
+
+ Version 2.1.1
+
+ https://github.com/velesin/jasmine-jquery
+
+ Copyright (c) 2010-2014 Wojciech Zawistowski, Travis Jeffery
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+(function (root, factory) {
+ if (typeof module !== 'undefined' && module.exports && typeof exports !== 'undefined') {
+ factory(root, root.jasmine, require('jquery'));
+ } else {
+ factory(root, root.jasmine, root.jQuery);
+ }
+}((function() {return this; })(), function (window, jasmine, $) { "use strict";
+
+ jasmine.spiedEventsKey = function (selector, eventName) {
+ return [$(selector).selector, eventName].toString()
+ }
+
+ jasmine.getFixtures = function () {
+ return jasmine.currentFixtures_ = jasmine.currentFixtures_ || new jasmine.Fixtures()
+ }
+
+ jasmine.getStyleFixtures = function () {
+ return jasmine.currentStyleFixtures_ = jasmine.currentStyleFixtures_ || new jasmine.StyleFixtures()
+ }
+
+ jasmine.Fixtures = function () {
+ this.containerId = 'jasmine-fixtures'
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.Fixtures.prototype.set = function (html) {
+ this.cleanUp()
+ return this.createContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.appendSet= function (html) {
+ this.addToContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.preload = function () {
+ this.read.apply(this, arguments)
+ }
+
+ jasmine.Fixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.appendLoad = function () {
+ this.addToContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.read = function () {
+ var htmlChunks = []
+ , fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ htmlChunks.push(this.getFixtureHtml_(fixtureUrls[urlIndex]))
+ }
+
+ return htmlChunks.join('')
+ }
+
+ jasmine.Fixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.Fixtures.prototype.cleanUp = function () {
+ $('#' + this.containerId).remove()
+ }
+
+ jasmine.Fixtures.prototype.sandbox = function (attributes) {
+ var attributesToSet = attributes || {}
+ return $('<div id="sandbox" />').attr(attributesToSet)
+ }
+
+ jasmine.Fixtures.prototype.createContainer_ = function (html) {
+ var container = $('<div>')
+ .attr('id', this.containerId)
+ .html(html)
+
+ $(document.body).append(container)
+ return container
+ }
+
+ jasmine.Fixtures.prototype.addToContainer_ = function (html){
+ var container = $(document.body).find('#'+this.containerId).append(html)
+
+ if (!container.length) {
+ this.createContainer_(html)
+ }
+ }
+
+ jasmine.Fixtures.prototype.getFixtureHtml_ = function (url) {
+ if (typeof this.fixturesCache_[url] === 'undefined') {
+ this.loadFixtureIntoCache_(url)
+ }
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.Fixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.makeFixtureUrl_(relativeUrl)
+ , htmlText = ''
+ , request = $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ url: url,
+ dataType: 'html',
+ success: function (data, status, $xhr) {
+ htmlText = $xhr.responseText
+ }
+ }).fail(function ($xhr, status, err) {
+ throw new Error('Fixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ })
+
+ var scripts = $($.parseHTML(htmlText, true)).find('script[src]') || [];
+
+ scripts.each(function(){
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'script',
+ url: $(this).attr('src'),
+ success: function (data, status, $xhr) {
+ htmlText += '<script>' + $xhr.responseText + '</script>'
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('Script could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ });
+ })
+
+ self.fixturesCache_[relativeUrl] = htmlText;
+ }
+
+ jasmine.Fixtures.prototype.makeFixtureUrl_ = function (relativeUrl){
+ return this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+ }
+
+ jasmine.Fixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+
+ jasmine.StyleFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesNodes_ = []
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.StyleFixtures.prototype.set = function (css) {
+ this.cleanUp()
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.appendSet = function (css) {
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.preload = function () {
+ this.read_.apply(this, arguments)
+ }
+
+ jasmine.StyleFixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.appendLoad = function () {
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.cleanUp = function () {
+ while(this.fixturesNodes_.length) {
+ this.fixturesNodes_.pop().remove()
+ }
+ }
+
+ jasmine.StyleFixtures.prototype.createStyle_ = function (html) {
+ var styleText = $('<div></div>').html(html).text()
+ , style = $('<style>' + styleText + '</style>')
+
+ this.fixturesNodes_.push(style)
+ $('head').append(style)
+ }
+
+ jasmine.StyleFixtures.prototype.clearCache = jasmine.Fixtures.prototype.clearCache
+ jasmine.StyleFixtures.prototype.read_ = jasmine.Fixtures.prototype.read
+ jasmine.StyleFixtures.prototype.getFixtureHtml_ = jasmine.Fixtures.prototype.getFixtureHtml_
+ jasmine.StyleFixtures.prototype.loadFixtureIntoCache_ = jasmine.Fixtures.prototype.loadFixtureIntoCache_
+ jasmine.StyleFixtures.prototype.makeFixtureUrl_ = jasmine.Fixtures.prototype.makeFixtureUrl_
+ jasmine.StyleFixtures.prototype.proxyCallTo_ = jasmine.Fixtures.prototype.proxyCallTo_
+
+ jasmine.getJSONFixtures = function () {
+ return jasmine.currentJSONFixtures_ = jasmine.currentJSONFixtures_ || new jasmine.JSONFixtures()
+ }
+
+ jasmine.JSONFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures/json'
+ }
+
+ jasmine.JSONFixtures.prototype.load = function () {
+ this.read.apply(this, arguments)
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.read = function () {
+ var fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ this.getFixtureData_(fixtureUrls[urlIndex])
+ }
+
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.JSONFixtures.prototype.getFixtureData_ = function (url) {
+ if (!this.fixturesCache_[url]) this.loadFixtureIntoCache_(url)
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.JSONFixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'json',
+ url: url,
+ success: function (data) {
+ self.fixturesCache_[relativeUrl] = data
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('JSONFixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ })
+ }
+
+ jasmine.JSONFixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+ jasmine.jQuery = function () {}
+
+ jasmine.jQuery.browserTagCaseIndependentHtml = function (html) {
+ return $('<div/>').append(html).html()
+ }
+
+ jasmine.jQuery.elementToString = function (element) {
+ return $(element).map(function () { return this.outerHTML; }).toArray().join(', ')
+ }
+
+ var data = {
+ spiedEvents: {}
+ , handlers: []
+ }
+
+ jasmine.jQuery.events = {
+ spyOn: function (selector, eventName) {
+ var handler = function (e) {
+ var calls = (typeof data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] !== 'undefined') ? data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] = {
+ args: jasmine.util.argsToArray(arguments),
+ calls: ++calls
+ }
+ }
+
+ $(selector).on(eventName, handler)
+ data.handlers.push(handler)
+
+ return {
+ selector: selector,
+ eventName: eventName,
+ handler: handler,
+ reset: function (){
+ delete data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ },
+ calls: {
+ count: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0;
+ },
+ any: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ !!data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : false;
+ }
+ }
+ }
+ },
+
+ args: function (selector, eventName) {
+ var actualArgs = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].args
+
+ if (!actualArgs) {
+ throw "There is no spy for " + eventName + " on " + selector.toString() + ". Make sure to create a spy using spyOnEvent."
+ }
+
+ return actualArgs
+ },
+
+ wasTriggered: function (selector, eventName) {
+ return !!(data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)])
+ },
+
+ wasTriggeredWith: function (selector, eventName, expectedArgs, util, customEqualityTesters) {
+ var actualArgs = jasmine.jQuery.events.args(selector, eventName).slice(1)
+
+ if (Object.prototype.toString.call(expectedArgs) !== '[object Array]')
+ actualArgs = actualArgs[0]
+
+ return util.equals(actualArgs, expectedArgs, customEqualityTesters)
+ },
+
+ wasPrevented: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isDefaultPrevented()
+ },
+
+ wasStopped: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isPropagationStopped()
+ },
+
+ cleanUp: function () {
+ data.spiedEvents = {}
+ data.handlers = []
+ }
+ }
+
+ var hasProperty = function (actualValue, expectedValue) {
+ if (expectedValue === undefined)
+ return actualValue !== undefined
+
+ return actualValue === expectedValue
+ }
+
+ beforeEach(function () {
+ jasmine.addMatchers({
+ toHaveClass: function () {
+ return {
+ compare: function (actual, className) {
+ return { pass: $(actual).hasClass(className) }
+ }
+ }
+ },
+
+ toHaveCss: function () {
+ return {
+ compare: function (actual, css) {
+ var stripCharsRegex = /[\s;\"\']/g
+ for (var prop in css) {
+ var value = css[prop]
+ // see issue #147 on gh
+ ;if ((value === 'auto') && ($(actual).get(0).style[prop] === 'auto')) continue
+ var actualStripped = $(actual).css(prop).replace(stripCharsRegex, '')
+ var valueStripped = value.replace(stripCharsRegex, '')
+ if (actualStripped !== valueStripped) return { pass: false }
+ }
+ return { pass: true }
+ }
+ }
+ },
+
+ toBeVisible: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':visible') }
+ }
+ }
+ },
+
+ toBeHidden: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':hidden') }
+ }
+ }
+ },
+
+ toBeSelected: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':selected') }
+ }
+ }
+ },
+
+ toBeChecked: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':checked') }
+ }
+ }
+ },
+
+ toBeEmpty: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':empty') }
+ }
+ }
+ },
+
+ toBeInDOM: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $.contains(document.documentElement, $(actual)[0]) }
+ }
+ }
+ },
+
+ toExist: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).length }
+ }
+ }
+ },
+
+ toHaveLength: function () {
+ return {
+ compare: function (actual, length) {
+ return { pass: $(actual).length === length }
+ }
+ }
+ },
+
+ toHaveAttr: function () {
+ return {
+ compare: function (actual, attributeName, expectedAttributeValue) {
+ return { pass: hasProperty($(actual).attr(attributeName), expectedAttributeValue) }
+ }
+ }
+ },
+
+ toHaveProp: function () {
+ return {
+ compare: function (actual, propertyName, expectedPropertyValue) {
+ return { pass: hasProperty($(actual).prop(propertyName), expectedPropertyValue) }
+ }
+ }
+ },
+
+ toHaveId: function () {
+ return {
+ compare: function (actual, id) {
+ return { pass: $(actual).attr('id') == id }
+ }
+ }
+ },
+
+ toHaveHtml: function () {
+ return {
+ compare: function (actual, html) {
+ return { pass: $(actual).html() == jasmine.jQuery.browserTagCaseIndependentHtml(html) }
+ }
+ }
+ },
+
+ toContainHtml: function () {
+ return {
+ compare: function (actual, html) {
+ var actualHtml = $(actual).html()
+ , expectedHtml = jasmine.jQuery.browserTagCaseIndependentHtml(html)
+
+ return { pass: (actualHtml.indexOf(expectedHtml) >= 0) }
+ }
+ }
+ },
+
+ toHaveText: function () {
+ return {
+ compare: function (actual, text) {
+ var actualText = $(actual).text()
+ var trimmedText = $.trim(actualText)
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(actualText) || text.test(trimmedText) }
+ } else {
+ return { pass: (actualText == text || trimmedText == text) }
+ }
+ }
+ }
+ },
+
+ toContainText: function () {
+ return {
+ compare: function (actual, text) {
+ var trimmedText = $.trim($(actual).text())
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(trimmedText) }
+ } else {
+ return { pass: trimmedText.indexOf(text) != -1 }
+ }
+ }
+ }
+ },
+
+ toHaveValue: function () {
+ return {
+ compare: function (actual, value) {
+ return { pass: $(actual).val() === value }
+ }
+ }
+ },
+
+ toHaveData: function () {
+ return {
+ compare: function (actual, key, expectedValue) {
+ return { pass: hasProperty($(actual).data(key), expectedValue) }
+ }
+ }
+ },
+
+ toContainElement: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).find(selector).length }
+ }
+ }
+ },
+
+ toBeMatchedBy: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).filter(selector).length }
+ }
+ }
+ },
+
+ toBeDisabled: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).is(':disabled') }
+ }
+ }
+ },
+
+ toBeFocused: function (selector) {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual)[0] === $(actual)[0].ownerDocument.activeElement }
+ }
+ }
+ },
+
+ toHandle: function () {
+ return {
+ compare: function (actual, event) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var events = $._data($(actual).get(0), "events")
+
+ if (!events || !event || typeof event !== "string") {
+ return { pass: false }
+ }
+
+ var namespaces = event.split(".")
+ , eventType = namespaces.shift()
+ , sortedNamespaces = namespaces.slice(0).sort()
+ , namespaceRegExp = new RegExp("(^|\\.)" + sortedNamespaces.join("\\.(?:.*\\.)?") + "(\\.|$)")
+
+ if (events[eventType] && namespaces.length) {
+ for (var i = 0; i < events[eventType].length; i++) {
+ var namespace = events[eventType][i].namespace
+
+ if (namespaceRegExp.test(namespace))
+ return { pass: true }
+ }
+ } else {
+ return { pass: (events[eventType] && events[eventType].length > 0) }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHandleWith: function () {
+ return {
+ compare: function (actual, eventName, eventHandler) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var normalizedEventName = eventName.split('.')[0]
+ , stack = $._data($(actual).get(0), "events")[normalizedEventName]
+
+ for (var i = 0; i < stack.length; i++) {
+ if (stack[i].handler == eventHandler) return { pass: true }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasTriggered(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + $(actual) + " not to have been triggered on " + selector :
+ "Expected event " + $(actual) + " to have been triggered on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenTriggered: function (){
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasTriggered(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been triggered on " + selector :
+ "Expected event " + eventName + " to have been triggered on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOnAndWith: function (j$, customEqualityTesters) {
+ return {
+ compare: function (actual, selector, expectedArgs) {
+ var wasTriggered = jasmine.jQuery.events.wasTriggered(selector, actual)
+ , result = { pass: wasTriggered && jasmine.jQuery.events.wasTriggeredWith(selector, actual, expectedArgs, j$, customEqualityTesters) }
+
+ if (wasTriggered) {
+ var actualArgs = jasmine.jQuery.events.args(selector, actual, expectedArgs)[1]
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs) :
+ "Expected event " + actual + " to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs)
+
+ } else {
+ // todo check on this
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered on " + selector :
+ "Expected event " + actual + " to have been triggered on " + selector
+ }
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPreventedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasPrevented(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been prevented on " + selector :
+ "Expected event " + actual + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPrevented: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasPrevented(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been prevented on " + selector :
+ "Expected event " + eventName + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenStoppedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasStopped(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been stopped on " + selector :
+ "Expected event " + actual + " to have been stopped on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenStopped: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasStopped(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been stopped on " + selector :
+ "Expected event " + eventName + " to have been stopped on " + selector
+
+ return result
+ }
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function(a, b) {
+ if (a && b) {
+ if (a instanceof $ || jasmine.isDomNode(a)) {
+ var $a = $(a)
+
+ if (b instanceof $)
+ return $a.length == b.length && $a.is(b)
+
+ return $a.is(b);
+ }
+
+ if (b instanceof $ || jasmine.isDomNode(b)) {
+ var $b = $(b)
+
+ if (a instanceof $)
+ return a.length == $b.length && $b.is(a)
+
+ return $b.is(a);
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function (a, b) {
+ if (a instanceof $ && b instanceof $ && a.size() == b.size())
+ return a.is(b)
+ })
+ })
+
+ afterEach(function () {
+ jasmine.getFixtures().cleanUp()
+ jasmine.getStyleFixtures().cleanUp()
+ jasmine.jQuery.events.cleanUp()
+ })
+
+ window.readFixtures = function () {
+ return jasmine.getFixtures().proxyCallTo_('read', arguments)
+ }
+
+ window.preloadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setFixtures = function (html) {
+ return jasmine.getFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.sandbox = function (attributes) {
+ return jasmine.getFixtures().sandbox(attributes)
+ }
+
+ window.spyOnEvent = function (selector, eventName) {
+ return jasmine.jQuery.events.spyOn(selector, eventName)
+ }
+
+ window.preloadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.loadJSONFixtures = function () {
+ return jasmine.getJSONFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.getJSONFixture = function (url) {
+ return jasmine.getJSONFixtures().proxyCallTo_('read', arguments)[url]
+ }
+}));
diff --git a/web/Makefile.am b/web/Makefile.am
new file mode 100644
index 0000000..ccaccd7
--- /dev/null
+++ b/web/Makefile.am
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ api \
+ gui \
+ server \
+ $(NULL)
+
+usersslconfigdir=$(configdir)/ssl
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(usersslconfigdir)
+
+dist_noinst_DATA = \
+ README.md \
+ gui/confluence/README.md \
+ gui/custom/README.md \
+ $(NULL)
diff --git a/web/README.md b/web/README.md
new file mode 100644
index 0000000..fc1d371
--- /dev/null
+++ b/web/README.md
@@ -0,0 +1,236 @@
+<!--
+title: "Dashboards"
+description: "Every Netdata Agent comes bundled with hundreds of interactive, customizable charts designed by monitoring and troubleshooting experts."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/README.md
+-->
+
+# Dashboards
+
+Because Netdata is a health monitoring and _performance troubleshooting_ system,
+we put a lot of emphasis on real-time, meaningful, and context-aware charts.
+
+We bundle Netdata with a dashboard and hundreds of charts, designed by both our
+team and the community, but you can also customize them yourself.
+
+There are two primary ways to view Netdata's dashboards:
+
+1. The [local Agent dashboard](/web/gui/README.md) that comes pre-configured with every Netdata installation. You can
+ see it at `http://NODE:19999`, replacing `NODE` with `localhost`, the hostname of your node, or its IP address. You
+ can customize the contents and colors of the standard dashboard [using
+ JavaScript](/web/gui/README.md#customizing-the-standard-dashboard).
+
+2. The [`dashboard.js` JavaScript library](#dashboardjs), which helps you
+ [customize the standard dashboards](/web/gui/README.md#customizing-the-standard-dashboard)
+ using JavaScript, or create entirely new [custom dashboards](/web/gui/custom/README.md) or
+ [Atlassian Confluence dashboards](/web/gui/confluence/README.md).
+
+You can also view all the data Netdata collects through the [REST API v1](/web/api/).
+
+No matter where you use Netdata's charts, you'll want to know how to [use](#using-charts) them. You'll also want to
+understand how Netdata defines [charts](#charts), [dimensions](#dimensions), [families](#families), and
+[contexts](#contexts).
+
+## Using charts
+
+Netdata's charts are far from static. They are interactive, real-time, and work
+with your mouse, touchpad, or touchscreen!
+
+Hover over any chart to temporarily pause it and see the exact values presented
+as different [dimensions](#dimensions). Click or tap stop the chart from automatically updating with new metrics, thereby locking it to a single timeframe.
+
+![Animated GIF of hovering over a chart to see
+values](https://user-images.githubusercontent.com/1153921/62968279-9227dd00-bdbf-11e9-9112-1d21444d0f31.gif)
+
+You can change how charts show their metrics by zooming in or out, moving
+forward or backward in time, or selecting a specific timeframe for more in-depth
+analysis.
+
+Whenever you use a chart in this way, Netdata synchronizes all the other charts
+to match it.
+
+You can change how charts show their metrics in a few different ways, each of
+which have a few methods:
+
+| Manipulation | Method #1 | Method #2 | Method #3 |
+|--- |--- |--- |--- |
+| **Reset** charts to default auto-refreshing state  | `double click` | `double tap` (touchpad/touchscreen)  |  |
+| **Select** a certain timeframe | `ALT` + `mouse selection`  | `⌘` + `mouse selection` (macOS)  |  |
+| **Pan** forward or back in time  | `click and drag` | `touch and drag` (touchpad/touchscreen)  |  |
+| **Zoom** to a specific timeframe | `SHIFT` + `mouse selection`  |  |  |
+| **Zoom** in/out  | `SHIFT`/`ALT` + `mouse scrollwheel`  | `SHIFT`/`ALT` + `two-finger pinch` (touchpad/touchscreen)  | `SHIFT`/`ALT` + `two-finger scroll` (touchpad/touchscreen) |
+
+Here's how chart synchronization looks while zooming and panning:
+
+![Animated GIF of the standard Netdata dashboard being manipulated and synchronizing
+charts](https://user-images.githubusercontent.com/1153921/80839230-b034a800-8baf-11ea-9cb2-99c1e10f0f85.gif)
+
+You can also perform all these actions using the small
+rewind/play/fast-forward/zoom-in/zoom-out buttons that appear in the
+bottom-right corner of each chart.
+
+Additionally, resize charts by clicking-and-dragging the icon on the bottom-right corner of any chart. To restore the
+chart to its original height, double-click the same icon.
+
+![Animated GIF of resizing a chart and resetting it to the default
+height](https://user-images.githubusercontent.com/1153921/80842459-7d41e280-8bb6-11ea-9488-1bc29f94d7f2.gif)
+
+## Charts, contexts, families
+
+Before customizing the standard web dashboard, creating a custom dashboard,
+configuring an alarm, or writing a collector, it's crucial to understand how
+Netdata organizes metrics into charts, dimensions, families, and contexts.
+
+### Charts
+
+A **chart** is an individual, interactive, always-updating graphic displaying
+one or more collected/calculated metrics. Charts are generated by
+[collectors](/collectors/README.md).
+
+Here's the system CPU chart, the first chart displayed on the standard
+dashboard:
+
+![Screenshot of the system CPU chart in the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/62720972-0b8a8e80-b9c0-11e9-930b-4829f7b17cfd.png)
+
+Netdata displays a chart's name in parentheses above the chart. For example, if
+you navigate to the system CPU chart, you'll see the label: **Total CPU
+utilization (system.cpu)**. In this case, the chart's name is `system.cpu`.
+Netdata derives the name from the chart's [context](#contexts).
+
+### Dimensions
+
+A **dimension** is a value that gets shown on a chart. The value can be raw data
+or calculated values, such as percentages, aggregates, and more.
+
+Charts are capable of showing more than one dimension. Netdata shows these
+dimensions on the right side of the chart, beneath the date and time. Again, the
+`system.cpu` chart will serve as a good example.
+
+![Screenshot of the dimensions shown in the system CPU chart in the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/62721031-2bba4d80-b9c0-11e9-9dca-32403617ce72.png)
+
+Here, the `system.cpu` chart is showing many dimensions, such as `user`,
+`system`, `softirq`, `irq`, and more.
+
+Note that other applications sometimes use the word _series_ instead of
+_dimension_.
+
+### Families
+
+A **family** is _one_ instance of a monitored hardware or software resource that
+needs to be monitored and displayed separately from similar instances.
+
+For example, if your system has multiple disk drives at `sda` and `sdb`, Netdata
+will put each interface into their own family. Same goes for software resources,
+like multiple MySQL instances. We call these instances "families" because the
+charts associated with a single disk instance, for example, are often related to
+each other. Relatives, family... get it?
+
+When relevant, Netdata prefers to organize charts by family. When you visit the
+**Disks** section, you will see your disk drives organized into families, and
+each family will have one or more charts: `disk`, `disk_ops`, `disk_backlog`,
+`disk_util`, `disk_await`, `disk_avgsz`, `disk_svctm`, `disk_mops`, and
+`disk_iotime`.
+
+In the screenshot below, the disk family `sdb` shows a few gauges, followed by a
+few of the associated charts:
+
+![Screenshot of a disk drive family and associated charts in the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/62721362-e34f5f80-b9c0-11e9-8d2e-9a3bec48e920.png)
+
+Netdata also creates separate submenu entries for each family in the right
+navigation page so you can easily navigate to the instance you're interested in.
+Here, Netdata has made several submenus under the **Disk** menu.
+
+![Screenshot of the disks menu and
+submenus](https://user-images.githubusercontent.com/1153921/62721531-3cb78e80-b9c1-11e9-89c2-fdd736aec7d4.png)
+
+### Contexts
+
+A **context** is a way of grouping charts by the types of metrics collected and
+dimensions displayed. Different charts with the same context will show the same
+dimensions, but for different instances (families) of hardware/software
+resources.
+
+For example, the **Disks** section will often use many contexts (`disk.io`,
+`disk.ops`, `disk.backlog`, `disk.util`, and so on). Netdata then creates an
+individual chart for each context, and groups them by family.
+
+Netdata names charts according to their context according to the following
+structure: `[context].[family]`. A chart with the `disk.util` context, in the
+`sdb` family, gets the name `disk_util.sdb`. Netdata shows that name in the
+top-left corner of a chart.
+
+Given the four example contexts, and two families of `sdb` and `sdd`, Netdata
+will create the following charts and their names:
+
+Context | `sdb` family | `sdd` family
+--- | --- | ---
+`disk.io` | `disk_io.sdb` | `disk_io.sdd`
+`disk.ops` | `disk_ops.sdb` | `disk_ops.sdd`
+`disk.backlog` | `disk_backlog.sdb` | `disk_backlog.sdd`
+`disk.util` | `disk_util.sdb` | `disk_util.sdd`
+
+And here's what two of those charts in the `disk.io` context look like under
+`sdb` and `sdd` families:
+
+![context_01](https://user-images.githubusercontent.com/1153921/62728232-177e4c80-b9d0-11e9-9e29-2a6c59d4d873.png)
+![context_02](https://user-images.githubusercontent.com/1153921/62728234-1b11d380-b9d0-11e9-8904-07befd8ac592.png)
+
+As you can see in the screenshot, you can view the context of a chart if you
+hover over the date above the list of dimensions. A tooltip will appear that
+shows you two pieces of information: the collector that produces the chart, and
+the chart's context.
+
+Netdata also uses [contexts for alarm templates](/health/REFERENCE.md#alarm-line-on). You can create an alarm for the
+`net.packets` context to receive alerts for any chart with that context, no matter which family it's attached to.
+
+## Positive and negative values on charts
+
+To improve clarity on charts, Netdata dashboards present **positive** values for
+metrics representing `read`, `input`, `inbound`, `received` and **negative**
+values for metrics representing `write`, `output`, `outbound`, `sent`.
+
+![Screenshot showing positive and negative
+values](https://user-images.githubusercontent.com/1153921/81870401-9d649080-952a-11ea-80e3-4a7b480252ee.gif)
+
+_Netdata charts showing the bandwidth and packets of a network interface.
+`received` is positive and `sent` is negative._
+
+## Autoscaled y-axis
+
+Netdata charts automatically zoom vertically, to visualize the variation of each
+metric within the visible timeframe.
+
+![Animated GIF showing the auso-scaling Y
+axis](https://user-images.githubusercontent.com/1153921/80838276-8084a080-8bad-11ea-8167-8d5ab2fb1be1.gif)
+
+_A zero-based `stacked` chart, automatically switches to an auto-scaled `area`
+chart when a single dimension is selected._
+
+## dashboard.js
+
+Netdata uses the `dashboards.js` file to define, configure, create, and update
+all the charts and other visualizations that appear on any Netdata dashboard.
+You need to put `dashboard.js` on any HTML page that's going to render Netdata
+charts.
+
+The [custom dashboards documentation](/web/gui/custom/README.md) contains examples of such
+custom HTML pages.
+
+### Generating dashboard.js
+
+We build the `dashboards.js` file by concatenating all the source files located
+in the `web/gui/src/dashboard.js/` directory. That's done using the provided
+build script:
+
+```sh
+cd web/gui
+make
+```
+
+If you make any changes to the `src` directory when developing Netdata, you
+should regenerate the `dashboard.js` file before you commit to the Netdata
+repository.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/web/api/Makefile.am b/web/api/Makefile.am
new file mode 100644
index 0000000..7255ac8
--- /dev/null
+++ b/web/api/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ badges \
+ queries \
+ exporters \
+ formatters \
+ health \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_web_DATA = \
+ netdata-swagger.yaml \
+ netdata-swagger.json \
+ $(NULL)
diff --git a/web/api/README.md b/web/api/README.md
new file mode 100644
index 0000000..1cc3439
--- /dev/null
+++ b/web/api/README.md
@@ -0,0 +1,14 @@
+<!--
+title: "API"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/README.md
+-->
+
+# API
+
+## Netdata REST API
+
+The complete documentation of the Netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
+
+If your prefer it over the Swagger Editor, you can also use **[Swagger UI](https://registry.my-netdata.io/swagger/#!/default/get_data)**. This however does not provide all the information available.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/badges/Makefile.am b/web/api/badges/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/badges/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/badges/README.md b/web/api/badges/README.md
new file mode 100644
index 0000000..b5fc534
--- /dev/null
+++ b/web/api/badges/README.md
@@ -0,0 +1,363 @@
+<!--
+title: "Netdata badges"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/badges/README.md
+-->
+
+# Netdata badges
+
+**Badges are cool!**
+
+Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `<IMG>` HTML tag.
+
+**Netdata badges are powerful**!
+
+Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful.
+
+For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**.
+
+For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc):
+
+<a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&value_color=grey:null%7Cblue&label=nginx%20active%20connections%20now&units=null&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&after=-3600&value_color=orange&label=last%20hour%20average&units=null&options=unaligned&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&group=max&after=-3600&value_color=red&label=last%20hour%20max&units=null&options=unaligned&precision=0"/></a>
+
+Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show:
+
+<a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&value_color=green&label=web%20server%20sends%20now&units=kbps"/></a> <a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&after=-86400&options=unaligned&group=sum&divide=8388608&value_color=blue&label=web%20server%20sent%20today&units=GB"/></a>
+
+The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations.
+
+Let's see a few more badge examples (they come from the [Netdata registry](/registry/README.md)):
+
+- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL).
+
+ <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a>
+
+- **mysql queries per second**
+
+ <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a>
+
+ niche ones: **mysql SELECT statements with JOIN, which did full table scans**:
+
+ <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a>
+
+---
+
+> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value.
+
+---
+
+## How to create badges
+
+The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`.
+
+Here is what you can put for `options` (these are standard Netdata API options):
+
+- `chart=CHART.NAME`
+
+ The chart to get the values from.
+
+ **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions.
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
+ </a>
+```
+
+ Which produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
+ </a>
+
+- `alarm=NAME`
+
+ Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm.
+
+ The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm.
+
+ For alarm badges, **both `chart` and `alarm` parameters are required**.
+
+- `dimensions=DIMENSION1|DIMENSION2|...`
+
+ The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them.
+
+ Pipes in HTML have to escaped with `%7C`.
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
+ </a>
+```
+
+ Which produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
+ </a>
+
+- `before=SECONDS` and `after=SECONDS`
+
+ The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past).
+
+ To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59).
+
+ To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59)
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
+ </a>
+```
+
+ Which produces the average of last complete minute (XX:XX:00 - XX:XX:59):
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
+ </a>
+
+ While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59):
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
+ </a>
+```
+
+ It produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
+ </a>
+
+- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum`
+
+ If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use.
+
+ - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value.
+
+ - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero.
+
+ - `average` will calculate the average value for the timeframe.
+
+ - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe.
+
+ - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1).
+
+- `options=opt1|opt2|opt3|...`
+
+ These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges):
+
+ - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`.
+
+ - `absolute` or `abs`, turn all values positive and then sum them.
+
+ - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge.
+
+ - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`.
+
+ - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data.
+
+These are options dedicated to badges:
+
+- `label=TEXT`
+
+ The label of the badge.
+
+- `units=TEXT`
+
+ The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units.
+
+ The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration.
+
+- `multiply=NUMBER`
+
+ Multiply the value with this number. The default is `1`.
+
+- `divide=NUMBER`
+
+ Divide the value with this number. The default is `1`.
+
+- Color customization parameters
+
+ The following parameters specify colors of each individual part of the badge. Each parameter is documented in detail
+ below.
+
+ | Area of badge | Background color parameter | Text color parameter |
+ | ---: | :------------------------: | :------------------: |
+ | Label (left) part | `label_color` | `text_color_lbl` |
+ | Value (right) part | `value_color` | `text_color_val` |
+
+ - `label_color=COLOR`
+
+ The color of the label (the left part). You can use any HTML color in `RGB` or `RRGGBB` hex notation (without
+ the `#` character at the beginning). Additionally, you can use one of the following predefined colors (and you
+ can use them by their name):
+
+ - `green`
+ - `brightgreen`
+ - `yellow`
+ - `yellowgreen`
+ - `orange`
+ - `red`
+ - `blue`
+ - `grey`
+ - `gray`
+ - `lightgrey`
+ - `lightgray`
+
+ These colors are taken from <https://github.com/badges/shields>, which makes them compatible with standard
+ badges.
+
+ - `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...`
+
+ You can add a pipe delimited list of conditions to pick the value color. The first matching (left to right) will
+ be used.
+
+ Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red`
+
+ The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in
+ `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, and
+ so on. Netdata will use `red` if no other conditions match. Only integers are supported as values.
+
+ The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`), and `!=` (or `<>`).
+
+ You can also use the same syntax as the `label_color` parameter to define each of these colors. You can
+ reference a predefined color by name or `RGB`/`RRGGBB` hex notation.
+
+ - `text_color_lbl=RGB` or `text_color_lbl=RRGGBB` or `text_color_lbl=color_by_name`
+
+ This value specifies the font color for the font of left/label side of the badge. The syntax is the same as the
+ `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
+
+ - `text_color_val=RGB` or `text_color_val=RRGGBB` or `text_color_lbl=color_by_name`
+
+ This value specifies the font color for the font of right/value side of the badge. The syntax is the same as the
+ `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
+
+- `precision=NUMBER`
+
+ The number of decimal digits of the value. By default Netdata will add:
+
+ - no decimal digits for values > 1000
+ - 1 decimal digit for values > 100
+ - 2 decimal digits for values > 1
+ - 3 decimal digits for values > 0.1
+ - 4 decimal digits for values \<= 0.1
+
+ Using the `precision=NUMBER` you can set your preference per badge.
+
+- `scale=XXX`
+
+ This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes:
+
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200`
+
+- `fixed_width_lbl=NUMBER` and `fixed_width_val=NUMBER`
+
+ This parameter overrides auto-sizing of badges and displays them at fixed widths. `fixed_width_lbl` determines the size of the label's left side (label/name). `fixed_width_val` determines the size of the the label's right side (value). You must set both parameters together, or they will be ignored.
+
+ You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped.
+
+ The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+
+- `refresh=auto` or `refresh=SECONDS`
+
+ This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals.
+
+ `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default).
+
+ Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this:
+
+```html
+<embed src="BADGE_URL" type="image/svg+xml" height="20" />
+```
+
+ Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery):
+
+```html
+<script>
+ var NETDATA_BADGES_AUTOREFRESH_SECONDS = 5;
+ function refreshNetdataBadges() {
+ var now = new Date().getTime().toString();
+ $('.netdata-badge').each(function() {
+ this.src = this.src.replace(/\&_=\d*/, '') + '&_=' + now;
+ });
+ setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
+ }
+ setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
+</script>
+```
+
+A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page.
+
+---
+
+## Escaping URLs
+
+Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters:
+
+|character|name|escape sequence|
+|:-------:|:--:|:-------------:|
+|``|space (in labels and units)|`%20`|
+|`#`|hash (for colors)|`%23`|
+|`%`|percent (in units)|`%25`|
+|`<`|less than|`%3C`|
+|`>`|greater than|`%3E`|
+|`\`|backslash (when you need a `/`)|`%5C`|
+|`\|`|pipe (delimiting parameters)|`%7C`|
+
+## FAQ
+
+#### Is it fast?
+
+On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond!
+
+Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals.
+
+#### Embedding badges in github
+
+You have 2 options a) SVG images with markdown and b) SVG images with HTML (directly in .md files).
+
+For example, this is the cpu badge shown above:
+
+- Markdown example:
+
+```md
+[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu)
+```
+
+- HTML example:
+
+```html
+<a href="https://registry.my-netdata.io/#apps_cpu">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
+</a>
+```
+
+Both produce this:
+
+<a href="https://registry.my-netdata.io/#apps_cpu">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
+</a>
+
+#### auto-refreshing badges in github
+
+Unfortunately it cannot be done. Github fetches all the images using a proxy and rewrites all the URLs to be served by the proxy.
+
+You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh:
+
+```js
+var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); };
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fbadges%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c
new file mode 100644
index 0000000..b5a1e03
--- /dev/null
+++ b/web/api/badges/web_buffer_svg.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_buffer_svg.h"
+
+#define BADGE_HORIZONTAL_PADDING 4
+#define VERDANA_KERNING 0.2
+#define VERDANA_PADDING 1.0
+
+/*
+ * verdana11_widths[] has been generated with this method:
+ * https://github.com/badges/shields/blob/master/measure-text.js
+*/
+
+static double verdana11_widths[128] = {
+ [0] = 0.0,
+ [1] = 0.0,
+ [2] = 0.0,
+ [3] = 0.0,
+ [4] = 0.0,
+ [5] = 0.0,
+ [6] = 0.0,
+ [7] = 0.0,
+ [8] = 0.0,
+ [9] = 0.0,
+ [10] = 0.0,
+ [11] = 0.0,
+ [12] = 0.0,
+ [13] = 0.0,
+ [14] = 0.0,
+ [15] = 0.0,
+ [16] = 0.0,
+ [17] = 0.0,
+ [18] = 0.0,
+ [19] = 0.0,
+ [20] = 0.0,
+ [21] = 0.0,
+ [22] = 0.0,
+ [23] = 0.0,
+ [24] = 0.0,
+ [25] = 0.0,
+ [26] = 0.0,
+ [27] = 0.0,
+ [28] = 0.0,
+ [29] = 0.0,
+ [30] = 0.0,
+ [31] = 0.0,
+ [32] = 3.8671874999999996, //
+ [33] = 4.3291015625, // !
+ [34] = 5.048828125, // "
+ [35] = 9.001953125, // #
+ [36] = 6.9931640625, // $
+ [37] = 11.837890625, // %
+ [38] = 7.992187499999999, // &
+ [39] = 2.9541015625, // '
+ [40] = 4.9951171875, // (
+ [41] = 4.9951171875, // )
+ [42] = 6.9931640625, // *
+ [43] = 9.001953125, // +
+ [44] = 4.00146484375, // ,
+ [45] = 4.9951171875, // -
+ [46] = 4.00146484375, // .
+ [47] = 4.9951171875, // /
+ [48] = 6.9931640625, // 0
+ [49] = 6.9931640625, // 1
+ [50] = 6.9931640625, // 2
+ [51] = 6.9931640625, // 3
+ [52] = 6.9931640625, // 4
+ [53] = 6.9931640625, // 5
+ [54] = 6.9931640625, // 6
+ [55] = 6.9931640625, // 7
+ [56] = 6.9931640625, // 8
+ [57] = 6.9931640625, // 9
+ [58] = 4.9951171875, // :
+ [59] = 4.9951171875, // ;
+ [60] = 9.001953125, // <
+ [61] = 9.001953125, // =
+ [62] = 9.001953125, // >
+ [63] = 5.99951171875, // ?
+ [64] = 11.0, // @
+ [65] = 7.51953125, // A
+ [66] = 7.541015625, // B
+ [67] = 7.680664062499999, // C
+ [68] = 8.4755859375, // D
+ [69] = 6.95556640625, // E
+ [70] = 6.32177734375, // F
+ [71] = 8.529296875, // G
+ [72] = 8.26611328125, // H
+ [73] = 4.6298828125, // I
+ [74] = 5.00048828125, // J
+ [75] = 7.62158203125, // K
+ [76] = 6.123046875, // L
+ [77] = 9.2705078125, // M
+ [78] = 8.228515625, // N
+ [79] = 8.658203125, // O
+ [80] = 6.63330078125, // P
+ [81] = 8.658203125, // Q
+ [82] = 7.6484375, // R
+ [83] = 7.51953125, // S
+ [84] = 6.7783203125, // T
+ [85] = 8.05126953125, // U
+ [86] = 7.51953125, // V
+ [87] = 10.87646484375, // W
+ [88] = 7.53564453125, // X
+ [89] = 6.767578125, // Y
+ [90] = 7.53564453125, // Z
+ [91] = 4.9951171875, // [
+ [92] = 4.9951171875, // backslash
+ [93] = 4.9951171875, // ]
+ [94] = 9.001953125, // ^
+ [95] = 6.9931640625, // _
+ [96] = 6.9931640625, // `
+ [97] = 6.6064453125, // a
+ [98] = 6.853515625, // b
+ [99] = 5.73095703125, // c
+ [100] = 6.853515625, // d
+ [101] = 6.552734375, // e
+ [102] = 3.8671874999999996, // f
+ [103] = 6.853515625, // g
+ [104] = 6.9609375, // h
+ [105] = 3.0185546875, // i
+ [106] = 3.78662109375, // j
+ [107] = 6.509765625, // k
+ [108] = 3.0185546875, // l
+ [109] = 10.69921875, // m
+ [110] = 6.9609375, // n
+ [111] = 6.67626953125, // o
+ [112] = 6.853515625, // p
+ [113] = 6.853515625, // q
+ [114] = 4.6943359375, // r
+ [115] = 5.73095703125, // s
+ [116] = 4.33447265625, // t
+ [117] = 6.9609375, // u
+ [118] = 6.509765625, // v
+ [119] = 9.001953125, // w
+ [120] = 6.509765625, // x
+ [121] = 6.509765625, // y
+ [122] = 5.779296875, // z
+ [123] = 6.982421875, // {
+ [124] = 4.9951171875, // |
+ [125] = 6.982421875, // }
+ [126] = 9.001953125, // ~
+ [127] = 0.0
+};
+
+// find the width of the string using the verdana 11points font
+static inline double verdana11_width(const char *s, float em_size) {
+ double w = 0.0;
+
+ while(*s) {
+ // if UTF8 multibyte char found and guess it's width equal 1em
+ // as label width will be updated with JavaScript this is not so important
+
+ // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata
+ // then use url_utf8_get_byte_length etc.
+ if(IS_UTF8_STARTBYTE(*s)) {
+ s++;
+ while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){
+ s++;
+ }
+ w += em_size;
+ }
+ else {
+ if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8
+ double t = verdana11_widths[(unsigned char)*s];
+ if(t != 0.0)
+ w += t + VERDANA_KERNING;
+ }
+ s++;
+ }
+ }
+
+ w -= VERDANA_KERNING;
+ w += VERDANA_PADDING;
+ return w;
+}
+
+static inline size_t escape_xmlz(char *dst, const char *src, size_t len) {
+ size_t i = len;
+
+ // required escapes from
+ // https://github.com/badges/shields/blob/master/badge.js
+ while(*src && i) {
+ switch(*src) {
+ case '\\':
+ *dst++ = '/';
+ src++;
+ i--;
+ break;
+
+ case '&':
+ if(i > 5) {
+ strcpy(dst, "&amp;");
+ i -= 5;
+ dst += 5;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '<':
+ if(i > 4) {
+ strcpy(dst, "&lt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '>':
+ if(i > 4) {
+ strcpy(dst, "&gt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '"':
+ if(i > 6) {
+ strcpy(dst, "&quot;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '\'':
+ if(i > 6) {
+ strcpy(dst, "&apos;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ default:
+ i--;
+ *dst++ = *src++;
+ break;
+ }
+ }
+
+cleanup:
+ *dst = '\0';
+ return len - i;
+}
+
+static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+ if(unlikely(isnan(value) || isinf(value)))
+ value = 0.0;
+
+ char *separator = "";
+ if(unlikely(isalnum(*units)))
+ separator = " ";
+
+ if(precision < 0) {
+ int len, lstop = 0, trim_zeros = 1;
+
+ calculated_number abs = value;
+ if(isless(value, 0)) {
+ lstop = 1;
+ abs = calculated_number_fabs(value);
+ }
+
+ if(isgreaterequal(abs, 1000)) {
+ len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ trim_zeros = 0;
+ }
+ else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+
+ if(unlikely(trim_zeros)) {
+ int l;
+ // remove trailing zeros from the decimal part
+ for(l = len - 1; l > lstop; l--) {
+ if(likely(value_string[l] == '0')) {
+ value_string[l] = '\0';
+ len--;
+ }
+
+ else if(unlikely(value_string[l] == '.')) {
+ value_string[l] = '\0';
+ len--;
+ break;
+ }
+
+ else
+ break;
+ }
+ }
+
+ if(unlikely(len <= 0)) len = 1;
+ snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units);
+ }
+ else {
+ if(precision > 50) precision = 50;
+ snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units);
+ }
+
+ return value_string;
+}
+
+typedef enum badge_units_format {
+ UNITS_FORMAT_NONE,
+ UNITS_FORMAT_SECONDS,
+ UNITS_FORMAT_SECONDS_AGO,
+ UNITS_FORMAT_MINUTES,
+ UNITS_FORMAT_MINUTES_AGO,
+ UNITS_FORMAT_HOURS,
+ UNITS_FORMAT_HOURS_AGO,
+ UNITS_FORMAT_ONOFF,
+ UNITS_FORMAT_UPDOWN,
+ UNITS_FORMAT_OKERROR,
+ UNITS_FORMAT_OKFAILED,
+ UNITS_FORMAT_EMPTY,
+ UNITS_FORMAT_PERCENT
+} UNITS_FORMAT;
+
+
+static struct units_formatter {
+ const char *units;
+ uint32_t hash;
+ UNITS_FORMAT format;
+} badge_units_formatters[] = {
+ { "seconds", 0, UNITS_FORMAT_SECONDS },
+ { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO },
+ { "minutes", 0, UNITS_FORMAT_MINUTES },
+ { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO },
+ { "hours", 0, UNITS_FORMAT_HOURS },
+ { "hours ago", 0, UNITS_FORMAT_HOURS_AGO },
+ { "on/off", 0, UNITS_FORMAT_ONOFF },
+ { "on-off", 0, UNITS_FORMAT_ONOFF },
+ { "onoff", 0, UNITS_FORMAT_ONOFF },
+ { "up/down", 0, UNITS_FORMAT_UPDOWN },
+ { "up-down", 0, UNITS_FORMAT_UPDOWN },
+ { "updown", 0, UNITS_FORMAT_UPDOWN },
+ { "ok/error", 0, UNITS_FORMAT_OKERROR },
+ { "ok-error", 0, UNITS_FORMAT_OKERROR },
+ { "okerror", 0, UNITS_FORMAT_OKERROR },
+ { "ok/failed", 0, UNITS_FORMAT_OKFAILED },
+ { "ok-failed", 0, UNITS_FORMAT_OKFAILED },
+ { "okfailed", 0, UNITS_FORMAT_OKFAILED },
+ { "empty", 0, UNITS_FORMAT_EMPTY },
+ { "null", 0, UNITS_FORMAT_EMPTY },
+ { "percentage", 0, UNITS_FORMAT_PERCENT },
+ { "percent", 0, UNITS_FORMAT_PERCENT },
+ { "pcent", 0, UNITS_FORMAT_PERCENT },
+
+ // terminator
+ { NULL, 0, UNITS_FORMAT_NONE }
+};
+
+inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_units_formatters[i].units; i++)
+ badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units);
+
+ max = i;
+ }
+
+ if(unlikely(!units)) units = "";
+ uint32_t hash_units = simple_hash(units);
+
+ UNITS_FORMAT format = UNITS_FORMAT_NONE;
+ for(i = 0; i < max; i++) {
+ struct units_formatter *ptr = &badge_units_formatters[i];
+
+ if(hash_units == ptr->hash && !strcmp(units, ptr->units)) {
+ format = ptr->format;
+ break;
+ }
+ }
+
+ if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":"";
+
+ size_t s = (size_t)value;
+ size_t d = s / 86400;
+ s = s % 86400;
+
+ size_t h = s / 3600;
+ s = s % 3600;
+
+ size_t m = s / 60;
+ s = s % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":"";
+
+ size_t m = (size_t)value;
+ size_t d = m / (60 * 24);
+ m = m % (60 * 24);
+
+ size_t h = m / 60;
+ m = m % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":"";
+
+ size_t h = (size_t)value;
+ size_t d = h / 24;
+ h = h % 24;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh%s", h, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_ONOFF)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_UPDOWN)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKERROR)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKFAILED)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_EMPTY))
+ units = "";
+
+ else if(unlikely(format == UNITS_FORMAT_PERCENT))
+ units = "%";
+
+ if(unlikely(isnan(value) || isinf(value))) {
+ strcpy(value_string, "-");
+ return value_string;
+ }
+
+ return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision);
+}
+
+static struct badge_color {
+ const char *name;
+ uint32_t hash;
+ const char *color;
+} badge_colors[] = {
+
+ // colors from:
+ // https://github.com/badges/shields/blob/master/colorscheme.json
+
+ { "brightgreen", 0, "4c1" },
+ { "green", 0, "97CA00" },
+ { "yellow", 0, "dfb317" },
+ { "yellowgreen", 0, "a4a61d" },
+ { "orange", 0, "fe7d37" },
+ { "red", 0, "e05d44" },
+ { "blue", 0, "007ec6" },
+ { "grey", 0, "555" },
+ { "gray", 0, "555" },
+ { "lightgrey", 0, "9f9f9f" },
+ { "lightgray", 0, "9f9f9f" },
+
+ // terminator
+ { NULL, 0, NULL }
+};
+
+static inline const char *color_map(const char *color, const char *def) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_colors[i].name ;i++)
+ badge_colors[i].hash = simple_hash(badge_colors[i].name);
+
+ max = i;
+ }
+
+ uint32_t hash = simple_hash(color);
+
+ for(i = 0; i < max; i++) {
+ struct badge_color *ptr = &badge_colors[i];
+
+ if(hash == ptr->hash && !strcmp(color, ptr->name))
+ return ptr->color;
+ }
+
+ return def;
+}
+
+typedef enum color_comparison {
+ COLOR_COMPARE_EQUAL,
+ COLOR_COMPARE_NOTEQUAL,
+ COLOR_COMPARE_LESS,
+ COLOR_COMPARE_LESSEQUAL,
+ COLOR_COMPARE_GREATER,
+ COLOR_COMPARE_GREATEREQUAL,
+} BADGE_COLOR_COMPARISON;
+
+static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) {
+ if(isnan(value) || isinf(value))
+ value = NAN;
+
+ char color_buffer[256 + 1] = "";
+ char value_buffer[256 + 1] = "";
+ BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER;
+
+ // example input:
+ // color<max|color>min|color:null...
+
+ const char *c = color;
+ while(*c) {
+ char *dc = color_buffer, *dv = NULL;
+ size_t ci = 0, vi = 0;
+
+ const char *t = c;
+
+ while(*t && *t != '|') {
+ switch(*t) {
+ case '!':
+ if(t[1] == '=') t++;
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ dv = value_buffer;
+ break;
+
+ case '=':
+ case ':':
+ comparison = COLOR_COMPARE_EQUAL;
+ dv = value_buffer;
+ break;
+
+ case '}':
+ case ')':
+ case '>':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_GREATEREQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_GREATER;
+ dv = value_buffer;
+ break;
+
+ case '{':
+ case '(':
+ case '<':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_LESSEQUAL;
+ t++;
+ }
+ else if(t[1] == '>' || t[1] == ')' || t[1] == '}') {
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_LESS;
+ dv = value_buffer;
+ break;
+
+ default:
+ if(dv) {
+ if(vi < 256) {
+ vi++;
+ *dv++ = *t;
+ }
+ }
+ else {
+ if(ci < 256) {
+ ci++;
+ *dc++ = *t;
+ }
+ }
+ break;
+ }
+
+ t++;
+ }
+
+ // prepare for next iteration
+ if(*t == '|') t++;
+ c = t;
+
+ // do the math
+ *dc = '\0';
+ if(dv) {
+ *dv = '\0';
+ calculated_number v;
+
+ if(!*value_buffer || !strcmp(value_buffer, "null")) {
+ v = NAN;
+ }
+ else {
+ v = str2l(value_buffer);
+ if(isnan(v) || isinf(v))
+ v = NAN;
+ }
+
+ if(unlikely(isnan(value) || isnan(v))) {
+ if(isnan(value) && isnan(v))
+ break;
+ }
+ else {
+ if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break;
+ }
+ }
+ else
+ break;
+ }
+
+ const char *b;
+ if(color_buffer[0])
+ b = color_buffer;
+ else
+ b = color;
+
+ strncpyz(final, b, len);
+}
+
+// value + units
+#define VALUE_STRING_SIZE 100
+
+// label
+#define LABEL_STRING_SIZE 200
+
+// colors
+#define COLOR_STRING_SIZE 100
+
+static inline int allowed_hexa_char(char x) {
+ return ( (x >= '0' && x <= '9') ||
+ (x >= 'a' && x <= 'f') ||
+ (x >= 'A' && x <= 'F')
+ );
+}
+
+static int html_color_check(const char *str) {
+ int i = 0;
+ while(str[i]) {
+ if(!allowed_hexa_char(str[i]))
+ return 0;
+ if(unlikely(i >= 6))
+ return 0;
+ i++;
+ }
+ // want to allow either RGB or RRGGBB
+ return ( i == 6 || i == 3 );
+}
+
+// Will parse color arg as #RRGGBB or #RGB or one of the colors
+// from color_map hash table
+// if parsing fails (argument error) it will return default color
+// given as default parameter (def)
+// in any case it will return either color in "RRGGBB" or "RGB" format as string
+// or whatever is given as def (without checking - caller responsible to give sensible
+// safely escaped default) as default if it fails
+// in any case this function must always return something we can put directly in XML
+// so no escaping is necessary anymore (with excpetion of default where caller is responsible)
+// to give sensible default
+#define BADGE_SVG_COLOR_ARG_MAXLEN 20
+
+static const char *parse_color_argument(const char *arg, const char *def)
+{
+ if( !arg )
+ return def;
+ size_t len = strnlen(arg, BADGE_SVG_COLOR_ARG_MAXLEN);
+ if( len < 2 || len >= BADGE_SVG_COLOR_ARG_MAXLEN )
+ return def;
+ if( html_color_check(arg) )
+ return arg;
+ return color_map(arg, def);
+}
+
+void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) {
+ char value_color_buffer[COLOR_STRING_SIZE + 1]
+ , value_string[VALUE_STRING_SIZE + 1]
+ , label_escaped[LABEL_STRING_SIZE + 1]
+ , value_escaped[VALUE_STRING_SIZE + 1];
+
+ const char *label_color_parsed;
+ const char *value_color_parsed;
+
+ double label_width = (double)fixed_width_lbl, value_width = (double)fixed_width_val, total_width;
+ double height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0;
+
+ if(scale < 100) scale = 100;
+
+ if(unlikely(!value_color || !*value_color))
+ value_color = (isnan(value) || isinf(value))?"999":"4c1";
+
+ calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value);
+ format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision);
+
+ if(fixed_width_lbl <= 0 || fixed_width_val <= 0) {
+ label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2);
+ value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2);
+ }
+ total_width = label_width + value_width;
+
+ escape_xmlz(label_escaped, label, LABEL_STRING_SIZE);
+ escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE);
+
+ label_color_parsed = parse_color_argument(label_color, "555");
+ value_color_parsed = parse_color_argument(value_color_buffer, "555");
+
+ wb->contenttype = CT_IMAGE_SVG_XML;
+
+ total_width = total_width * scale / 100.0;
+ height = height * scale / 100.0;
+ font_size = font_size * scale / 100.0;
+ text_offset = text_offset * scale / 100.0;
+ label_width = label_width * scale / 100.0;
+ value_width = value_width * scale / 100.0;
+ round_corner = round_corner * scale / 100.0;
+
+ // svg template from:
+ // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg
+ buffer_sprintf(wb,
+ "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">"
+ "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">"
+ "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>"
+ "<stop offset=\"1\" stop-opacity=\".1\"/>"
+ "</linearGradient>"
+ "<mask id=\"round\">"
+ "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>"
+ "</mask>"
+ "<g mask=\"url(#round)\">"
+ "<rect class=\"bdge-rect-lbl\" width=\"%0.2f\" height=\"%0.2f\" fill=\"#%s\"/>",
+ total_width, height,
+ total_width, height, round_corner,
+ label_width, height, label_color_parsed); //<rect class="bdge-rect-lbl"
+
+ if(fixed_width_lbl > 0 && fixed_width_val > 0) {
+ buffer_sprintf(wb,
+ "<clipPath id=\"lbl-rect\">"
+ "<rect class=\"bdge-rect-lbl\" width=\"%0.2f\" height=\"%0.2f\"/>"
+ "</clipPath>",
+ label_width, height); //<clipPath id="lbl-rect"> <rect class="bdge-rect-lbl"
+ }
+
+ buffer_sprintf(wb,
+ "<rect class=\"bdge-rect-val\" x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"#%s\"/>",
+ label_width, value_width, height, value_color_parsed);
+
+ if(fixed_width_lbl > 0 && fixed_width_val > 0) {
+ buffer_sprintf(wb,
+ "<clipPath id=\"val-rect\">"
+ "<rect class=\"bdge-rect-val\" x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\"/>"
+ "</clipPath>",
+ label_width, value_width, height);
+ }
+
+ buffer_sprintf(wb,
+ "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>"
+ "</g>"
+ "<g text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">"
+ "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\" clip-path=\"url(#lbl-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#%s\" clip-path=\"url(#lbl-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\" clip-path=\"url(#val-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#%s\" clip-path=\"url(#val-rect)\">%s</text>"
+ "</g>",
+ total_width, height,
+ font_size,
+ label_width / 2, ceil(height - text_offset), label_escaped,
+ label_width / 2, ceil(height - text_offset - 1.0), parse_color_argument(text_color_lbl, "fff"), label_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), parse_color_argument(text_color_val, "fff"), value_escaped);
+
+ if(fixed_width_lbl <= 0 || fixed_width_val <= 0){
+ buffer_sprintf(wb,
+ "<script type=\"text/javascript\">"
+ "var bdg_horiz_padding = %d;"
+ "function netdata_bdge_each(list, attr, value){"
+ "Array.prototype.forEach.call(list, function(el){"
+ "el.setAttribute(attr, value);"
+ "});"
+ "};"
+ "var this_svg = document.currentScript.closest(\"svg\");"
+ "var elem_lbl = this_svg.getElementsByClassName(\"bdge-lbl-lbl\");"
+ "var elem_val = this_svg.getElementsByClassName(\"bdge-lbl-val\");"
+ "var lbl_size = elem_lbl[0].getBBox();"
+ "var val_size = elem_val[0].getBBox();"
+ "var width_total = lbl_size.width + bdg_horiz_padding*2;"
+ "this_svg.getElementsByClassName(\"bdge-rect-lbl\")[0].setAttribute(\"width\", width_total);"
+ "netdata_bdge_each(elem_lbl, \"x\", (lbl_size.width / 2) + bdg_horiz_padding);"
+ "netdata_bdge_each(elem_val, \"x\", width_total + (val_size.width / 2) + bdg_horiz_padding);"
+ "var val_rect = this_svg.getElementsByClassName(\"bdge-rect-val\")[0];"
+ "val_rect.setAttribute(\"width\", val_size.width + bdg_horiz_padding*2);"
+ "val_rect.setAttribute(\"x\", width_total);"
+ "width_total += val_size.width + bdg_horiz_padding*2;"
+ "var width_update_elems = this_svg.getElementsByClassName(\"bdge-ttl-width\");"
+ "netdata_bdge_each(width_update_elems, \"width\", width_total);"
+ "this_svg.setAttribute(\"width\", width_total);"
+ "</script>",
+ BADGE_HORIZONTAL_PADDING);
+ }
+ buffer_sprintf(wb, "</svg>");
+}
+
+#define BADGE_URL_ARG_LBL_COLOR "text_color_lbl"
+#define BADGE_URL_ARG_VAL_COLOR "text_color_val"
+
+int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) {
+ int ret = HTTP_RESP_BAD_REQUEST;
+ buffer_flush(w->response.data);
+
+ BUFFER *dimensions = NULL;
+
+ const char *chart = NULL
+ , *before_str = NULL
+ , *after_str = NULL
+ , *points_str = NULL
+ , *multiply_str = NULL
+ , *divide_str = NULL
+ , *label = NULL
+ , *units = NULL
+ , *label_color = NULL
+ , *value_color = NULL
+ , *refresh_str = NULL
+ , *precision_str = NULL
+ , *scale_str = NULL
+ , *alarm = NULL
+ , *fixed_width_lbl_str = NULL
+ , *fixed_width_val_str = NULL
+ , *text_color_lbl_str = NULL
+ , *text_color_val_str = NULL;
+
+ int group = RRDR_GROUPING_AVERAGE;
+ uint32_t options = 0x00000000;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value);
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "chart")) chart = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions)
+ dimensions = buffer_create(100);
+
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ else if(!strcmp(name, "after")) after_str = value;
+ else if(!strcmp(name, "before")) before_str = value;
+ else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "group")) {
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ }
+ else if(!strcmp(name, "options")) {
+ options |= web_client_api_request_v1_data_options(value);
+ }
+ else if(!strcmp(name, "label")) label = value;
+ else if(!strcmp(name, "units")) units = value;
+ else if(!strcmp(name, "label_color")) label_color = value;
+ else if(!strcmp(name, "value_color")) value_color = value;
+ else if(!strcmp(name, "multiply")) multiply_str = value;
+ else if(!strcmp(name, "divide")) divide_str = value;
+ else if(!strcmp(name, "refresh")) refresh_str = value;
+ else if(!strcmp(name, "precision")) precision_str = value;
+ else if(!strcmp(name, "scale")) scale_str = value;
+ else if(!strcmp(name, "fixed_width_lbl")) fixed_width_lbl_str = value;
+ else if(!strcmp(name, "fixed_width_val")) fixed_width_val_str = value;
+ else if(!strcmp(name, "alarm")) alarm = value;
+ else if(!strcmp(name, BADGE_URL_ARG_LBL_COLOR)) text_color_lbl_str = value;
+ else if(!strcmp(name, BADGE_URL_ARG_VAL_COLOR)) text_color_val_str = value;
+ }
+
+ int fixed_width_lbl = -1;
+ int fixed_width_val = -1;
+
+ if(fixed_width_lbl_str && *fixed_width_lbl_str
+ && fixed_width_val_str && *fixed_width_val_str) {
+ fixed_width_lbl = str2i(fixed_width_lbl_str);
+ fixed_width_val = str2i(fixed_width_val_str);
+ }
+
+ if(!chart || !*chart) {
+ buffer_no_cacheable(w->response.data);
+ buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ goto cleanup;
+ }
+
+ int scale = (scale_str && *scale_str)?str2i(scale_str):100;
+
+ RRDSET *st = rrdset_find(host, chart);
+ if(!st) st = rrdset_find_byname(host, chart);
+ if(!st) {
+ buffer_no_cacheable(w->response.data);
+ buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL);
+ ret = HTTP_RESP_OK;
+ goto cleanup;
+ }
+ st->last_accessed_time = now_realtime_sec();
+
+ RRDCALC *rc = NULL;
+ if(alarm) {
+ rc = rrdcalc_find(st, alarm);
+ if (!rc) {
+ buffer_no_cacheable(w->response.data);
+ buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL);
+ ret = HTTP_RESP_OK;
+ goto cleanup;
+ }
+ }
+
+ long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1;
+ long long divide = (divide_str && *divide_str )?str2l(divide_str):1;
+ long long before = (before_str && *before_str )?str2l(before_str):0;
+ long long after = (after_str && *after_str )?str2l(after_str):-st->update_every;
+ int points = (points_str && *points_str )?str2i(points_str):1;
+ int precision = (precision_str && *precision_str)?str2i(precision_str):-1;
+
+ if(!multiply) multiply = 1;
+ if(!divide) divide = 1;
+
+ int refresh = 0;
+ if(refresh_str && *refresh_str) {
+ if(!strcmp(refresh_str, "auto")) {
+ if(rc) refresh = rc->update_every;
+ else if(options & RRDR_OPTION_NOT_ALIGNED)
+ refresh = st->update_every;
+ else {
+ refresh = (int)(before - after);
+ if(refresh < 0) refresh = -refresh;
+ }
+ }
+ else {
+ refresh = str2i(refresh_str);
+ if(refresh < 0) refresh = -refresh;
+ }
+ }
+
+ if(!label) {
+ if(alarm) {
+ char *s = (char *)alarm;
+ while(*s) {
+ if(*s == '_') *s = ' ';
+ s++;
+ }
+ label = alarm;
+ }
+ else if(dimensions) {
+ const char *dim = buffer_tostring(dimensions);
+ if(*dim == '|') dim++;
+ label = dim;
+ }
+ else
+ label = st->name;
+ }
+ if(!units) {
+ if(alarm) {
+ if(rc->units)
+ units = rc->units;
+ else
+ units = "";
+ }
+ else if(options & RRDR_OPTION_PERCENTAGE)
+ units = "%";
+ else
+ units = st->units;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'"
+ , w->id
+ , chart
+ , alarm?alarm:""
+ , (dimensions)?buffer_tostring(dimensions):""
+ , after
+ , before
+ , points
+ , group
+ , options
+ );
+
+ if(rc) {
+ if (refresh > 0) {
+ buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
+ w->response.data->expires = now_realtime_sec() + refresh;
+ }
+ else buffer_no_cacheable(w->response.data);
+
+ if(!value_color) {
+ switch(rc->status) {
+ case RRDCALC_STATUS_CRITICAL:
+ value_color = "red";
+ break;
+
+ case RRDCALC_STATUS_WARNING:
+ value_color = "orange";
+ break;
+
+ case RRDCALC_STATUS_CLEAR:
+ value_color = "brightgreen";
+ break;
+
+ case RRDCALC_STATUS_UNDEFINED:
+ value_color = "lightgrey";
+ break;
+
+ case RRDCALC_STATUS_UNINITIALIZED:
+ value_color = "#000";
+ break;
+
+ default:
+ value_color = "grey";
+ break;
+ }
+ }
+
+ buffer_svg(w->response.data,
+ label,
+ (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide,
+ units,
+ label_color,
+ value_color,
+ precision,
+ scale,
+ options,
+ fixed_width_lbl,
+ fixed_width_val,
+ text_color_lbl_str,
+ text_color_val_str
+ );
+ ret = HTTP_RESP_OK;
+ }
+ else {
+ time_t latest_timestamp = 0;
+ int value_is_null = 1;
+ calculated_number n = NAN;
+ ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
+
+ // if the collected value is too old, don't calculate its value
+ if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above)))
+ ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL
+ , points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null);
+
+ // if the value cannot be calculated, show empty badge
+ if (ret != HTTP_RESP_OK) {
+ buffer_no_cacheable(w->response.data);
+ value_is_null = 1;
+ n = 0;
+ ret = HTTP_RESP_OK;
+ }
+ else if (refresh > 0) {
+ buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
+ w->response.data->expires = now_realtime_sec() + refresh;
+ }
+ else buffer_no_cacheable(w->response.data);
+
+ // render the badge
+ buffer_svg(w->response.data,
+ label,
+ (value_is_null)?NAN:(n * multiply / divide),
+ units,
+ label_color,
+ value_color,
+ precision,
+ scale,
+ options,
+ fixed_width_lbl,
+ fixed_width_val,
+ text_color_lbl_str,
+ text_color_val_str
+ );
+ }
+
+ cleanup:
+ buffer_free(dimensions);
+ return ret;
+}
diff --git a/web/api/badges/web_buffer_svg.h b/web/api/badges/web_buffer_svg.h
new file mode 100644
index 0000000..1cf69e2
--- /dev/null
+++ b/web/api/badges/web_buffer_svg.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_BUFFER_SVG_H
+#define NETDATA_WEB_BUFFER_SVG_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+
+extern void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val);
+extern char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision);
+
+extern int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
+
+#include "web/api/web_api_v1.h"
+
+#endif /* NETDATA_WEB_BUFFER_SVG_H */
diff --git a/web/api/exporters/Makefile.am b/web/api/exporters/Makefile.am
new file mode 100644
index 0000000..06fda51
--- /dev/null
+++ b/web/api/exporters/Makefile.am
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ shell \
+ prometheus \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md
new file mode 100644
index 0000000..4019647
--- /dev/null
+++ b/web/api/exporters/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "Exporters"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/README.md
+-->
+
+# Exporters
+
+TBD
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/allmetrics.c b/web/api/exporters/allmetrics.c
new file mode 100644
index 0000000..d10de3d
--- /dev/null
+++ b/web/api/exporters/allmetrics.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "allmetrics.h"
+
+struct prometheus_output_options {
+ char *name;
+ PROMETHEUS_OUTPUT_OPTIONS flag;
+} prometheus_output_flags_root[] = {
+ { "help", PROMETHEUS_OUTPUT_HELP },
+ { "types", PROMETHEUS_OUTPUT_TYPES },
+ { "names", PROMETHEUS_OUTPUT_NAMES },
+ { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS },
+ { "variables", PROMETHEUS_OUTPUT_VARIABLES },
+ { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS },
+ { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS },
+ // terminator
+ { NULL, PROMETHEUS_OUTPUT_NONE },
+};
+
+inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) {
+ int format = ALLMETRICS_SHELL;
+ const char *prometheus_server = w->client_ip;
+
+ uint32_t prometheus_exporting_options;
+ if (prometheus_exporter_instance)
+ prometheus_exporting_options = prometheus_exporter_instance->config.options;
+ else
+ prometheus_exporting_options = global_backend_options;
+
+ PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options =
+ PROMETHEUS_OUTPUT_TIMESTAMPS |
+ ((prometheus_exporting_options & BACKEND_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0);
+
+ const char *prometheus_prefix;
+ if (prometheus_exporter_instance)
+ prometheus_prefix = prometheus_exporter_instance->config.prefix;
+ else
+ prometheus_prefix = global_backend_prefix;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ if(!strcmp(name, "format")) {
+ if(!strcmp(value, ALLMETRICS_FORMAT_SHELL))
+ format = ALLMETRICS_SHELL;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS))
+ format = ALLMETRICS_PROMETHEUS;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS))
+ format = ALLMETRICS_PROMETHEUS_ALL_HOSTS;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_JSON))
+ format = ALLMETRICS_JSON;
+ else
+ format = 0;
+ }
+ else if(!strcmp(name, "server")) {
+ prometheus_server = value;
+ }
+ else if(!strcmp(name, "prefix")) {
+ prometheus_prefix = value;
+ }
+ else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
+ prometheus_exporting_options = backend_parse_data_source(value, prometheus_exporting_options);
+ }
+ else {
+ int i;
+ for(i = 0; prometheus_output_flags_root[i].name ; i++) {
+ if(!strcmp(name, prometheus_output_flags_root[i].name)) {
+ if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true"))
+ prometheus_output_options |= prometheus_output_flags_root[i].flag;
+ else
+ prometheus_output_options &= ~prometheus_output_flags_root[i].flag;
+
+ break;
+ }
+ }
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_no_cacheable(w->response.data);
+
+ switch(format) {
+ case ALLMETRICS_JSON:
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ rrd_stats_api_v1_charts_allmetrics_json(host, w->response.data);
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_SHELL:
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ rrd_stats_api_v1_charts_allmetrics_shell(host, w->response.data);
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_PROMETHEUS:
+ w->response.data->contenttype = CT_PROMETHEUS;
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ host
+ , w->response.data
+ , prometheus_server
+ , prometheus_prefix
+ , prometheus_exporting_options
+ , prometheus_output_options
+ );
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
+ w->response.data->contenttype = CT_PROMETHEUS;
+ rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ host
+ , w->response.data
+ , prometheus_server
+ , prometheus_prefix
+ , prometheus_exporting_options
+ , prometheus_output_options
+ );
+ return HTTP_RESP_OK;
+
+ default:
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
diff --git a/web/api/exporters/allmetrics.h b/web/api/exporters/allmetrics.h
new file mode 100644
index 0000000..f076ff0
--- /dev/null
+++ b/web/api/exporters/allmetrics.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_ALLMETRICS_H
+#define NETDATA_API_ALLMETRICS_H
+
+#include "web/api/formatters/rrd2json.h"
+#include "shell/allmetrics_shell.h"
+#include "web/server/web_client.h"
+
+extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
+
+#endif //NETDATA_API_ALLMETRICS_H
diff --git a/web/api/exporters/prometheus/Makefile.am b/web/api/exporters/prometheus/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/exporters/prometheus/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md
new file mode 100644
index 0000000..d26c6e4
--- /dev/null
+++ b/web/api/exporters/prometheus/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "prometheus exporter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/prometheus/README.md
+-->
+
+# prometheus exporter
+
+The prometheus exporter for Netdata is located at the [backends section for prometheus](/backends/prometheus/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/shell/Makefile.am b/web/api/exporters/shell/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/exporters/shell/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md
new file mode 100644
index 0000000..b919045
--- /dev/null
+++ b/web/api/exporters/shell/README.md
@@ -0,0 +1,71 @@
+<!--
+title: "shell exporter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/shell/README.md
+-->
+
+# shell exporter
+
+Shell scripts can now query Netdata:
+
+```sh
+eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
+```
+
+after this command, all the Netdata metrics are exposed to shell. Check:
+
+```sh
+# source the metrics
+eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
+
+# let's see if there are variables exposed by Netdata for system.cpu
+set | grep "^NETDATA_SYSTEM_CPU"
+
+NETDATA_SYSTEM_CPU_GUEST=0
+NETDATA_SYSTEM_CPU_GUEST_NICE=0
+NETDATA_SYSTEM_CPU_IDLE=95
+NETDATA_SYSTEM_CPU_IOWAIT=0
+NETDATA_SYSTEM_CPU_IRQ=0
+NETDATA_SYSTEM_CPU_NICE=0
+NETDATA_SYSTEM_CPU_SOFTIRQ=0
+NETDATA_SYSTEM_CPU_STEAL=0
+NETDATA_SYSTEM_CPU_SYSTEM=1
+NETDATA_SYSTEM_CPU_USER=4
+NETDATA_SYSTEM_CPU_VISIBLETOTAL=5
+
+# let's see the total cpu utilization of the system
+echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL}
+5
+
+# what about alarms?
+set | grep "^NETDATA_ALARM_SYSTEM_SWAP_"
+NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS=CRITICAL
+NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_VALUE=53
+NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR
+NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51
+
+# let's get the current status of the alarm 'ram in swap'
+echo ${NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS}
+CRITICAL
+
+# is it fast?
+time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null
+
+real 0m0,070s
+user 0m0,000s
+sys 0m0,007s
+
+# it is...
+# 0.07 seconds for curl to be loaded, connect to Netdata and fetch the response back...
+```
+
+The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
+
+The format of the variables is:
+
+```sh
+NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
+```
+
+The value is rounded to the closest integer, since shell script cannot process decimal numbers.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fshell%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c
new file mode 100644
index 0000000..daa0049
--- /dev/null
+++ b/web/api/exporters/shell/allmetrics_shell.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "allmetrics_shell.h"
+
+// ----------------------------------------------------------------------------
+// BASH
+// /api/v1/allmetrics?format=bash
+
+static inline size_t shell_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(unlikely(!isalnum(c))) *d = '_';
+ else *d = (char)toupper(c);
+ }
+ *d = '\0';
+
+ return n;
+}
+
+#define SHELL_ELEMENT_MAX 100
+
+void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
+ rrdhost_rdlock(host);
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ calculated_number total = 0.0;
+ char chart[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX);
+
+ buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name);
+ if(rrdset_is_available_for_viewers(st)) {
+ rrdset_rdlock(st);
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
+
+ calculated_number n = rd->last_stored_value;
+
+ if(isnan(n) || isinf(n))
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units);
+ else {
+ if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
+ n = calculated_number_round(n);
+ if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n;
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units);
+ }
+ }
+ }
+
+ total = calculated_number_round(total);
+ buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, total, st->units);
+ rrdset_unlock(st);
+ }
+ }
+
+ buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n");
+
+ RRDCALC *rc;
+ for(rc = host->alarms; rc ;rc = rc->next) {
+ if(!rc->rrdset) continue;
+
+ char chart[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX);
+
+ char alarm[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX);
+
+ calculated_number n = rc->value;
+
+ if(isnan(n) || isinf(n))
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units);
+ else {
+ n = calculated_number_round(n);
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units);
+ }
+
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status));
+ }
+
+ rrdhost_unlock(host);
+}
+
+// ----------------------------------------------------------------------------
+
+void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb) {
+ rrdhost_rdlock(host);
+
+ buffer_strcat(wb, "{");
+
+ size_t chart_counter = 0;
+ size_t dimension_counter = 0;
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if(rrdset_is_available_for_viewers(st)) {
+ rrdset_rdlock(st);
+
+ buffer_sprintf(wb, "%s\n"
+ "\t\"%s\": {\n"
+ "\t\t\"name\":\"%s\",\n"
+ "\t\t\"family\":\"%s\",\n"
+ "\t\t\"context\":\"%s\",\n"
+ "\t\t\"units\":\"%s\",\n"
+ "\t\t\"last_updated\": %ld,\n"
+ "\t\t\"dimensions\": {"
+ , chart_counter?",":""
+ , st->id
+ , st->name
+ , st->family
+ , st->context
+ , st->units
+ , rrdset_last_entry_t_nolock(st)
+ );
+
+ chart_counter++;
+ dimension_counter = 0;
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+
+ buffer_sprintf(wb, "%s\n"
+ "\t\t\t\"%s\": {\n"
+ "\t\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\t\"value\": "
+ , dimension_counter?",":""
+ , rd->id
+ , rd->name
+ );
+
+ if(isnan(rd->last_stored_value))
+ buffer_strcat(wb, "null");
+ else
+ buffer_sprintf(wb, CALCULATED_NUMBER_FORMAT, rd->last_stored_value);
+
+ buffer_strcat(wb, "\n\t\t\t}");
+
+ dimension_counter++;
+ }
+ }
+
+ buffer_strcat(wb, "\n\t\t}\n\t}");
+ rrdset_unlock(st);
+ }
+ }
+
+ buffer_strcat(wb, "\n}");
+ rrdhost_unlock(host);
+}
+
diff --git a/web/api/exporters/shell/allmetrics_shell.h b/web/api/exporters/shell/allmetrics_shell.h
new file mode 100644
index 0000000..1d7611a
--- /dev/null
+++ b/web/api/exporters/shell/allmetrics_shell.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_ALLMETRICS_SHELL_H
+#define NETDATA_API_ALLMETRICS_SHELL_H
+
+#include "../allmetrics.h"
+
+#define ALLMETRICS_FORMAT_SHELL "shell"
+#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
+#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts"
+#define ALLMETRICS_FORMAT_JSON "json"
+
+#define ALLMETRICS_SHELL 1
+#define ALLMETRICS_PROMETHEUS 2
+#define ALLMETRICS_JSON 3
+#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
+
+extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb);
+extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb);
+
+#endif //NETDATA_API_ALLMETRICS_SHELL_H
diff --git a/web/api/formatters/Makefile.am b/web/api/formatters/Makefile.am
new file mode 100644
index 0000000..11f239c
--- /dev/null
+++ b/web/api/formatters/Makefile.am
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ csv \
+ json \
+ ssv \
+ value \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md
new file mode 100644
index 0000000..1fd2b30
--- /dev/null
+++ b/web/api/formatters/README.md
@@ -0,0 +1,78 @@
+<!--
+title: "Query formatting"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md
+-->
+
+# Query formatting
+
+API data queries need to be formatted before returned to the caller.
+Using API parameters, the caller may define the format he/she wishes to get back.
+
+The following formats are supported:
+
+| format|module|content type|description|
+|:----:|:----:|:----------:|:----------|
+| `array`|[ssv](/web/api/formatters/ssv/README.md)|application/json|a JSON array|
+| `csv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|[csv](/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `datasource`|[json](/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback|
+| `datatable`|[json](/web/api/formatters/json/README.md)|application/json|a Google `datatable`|
+| `html`|[csv](/web/api/formatters/csv/README.md)|text/html|an html table|
+| `json`|[json](/web/api/formatters/json/README.md)|application/json|a JSON object|
+| `jsonp`|[json](/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback|
+| `markdown`|[csv](/web/api/formatters/csv/README.md)|text/plain|a markdown table|
+| `ssv`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values|
+| `ssvcomma`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values|
+| `tsv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)|
+
+For examples of each format, check the relative module documentation.
+
+## Metadata with the `jsonwrap` option
+
+All data queries can be encapsulated to JSON object having metadata about the query and the results.
+
+This is done by adding the `options=jsonwrap` to the API URL (if there are other `options` append
+`,jsonwrap` to the existing ones).
+
+This is such an object:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&points=6&group=average&format=csv&options=nonzero,jsonwrap'
+{
+ "api": 1,
+ "id": "system.cpu",
+ "name": "system.cpu",
+ "view_update_every": 600,
+ "update_every": 1,
+ "first_entry": 1540387074,
+ "last_entry": 1540647070,
+ "before": 1540647000,
+ "after": 1540644000,
+ "dimension_names": ["steal", "softirq", "user", "system", "iowait"],
+ "dimension_ids": ["steal", "softirq", "user", "system", "iowait"],
+ "latest_values": [0, 0.2493766, 1.745636, 0.4987531, 0],
+ "view_latest_values": [0.0158314, 0.0516506, 0.866549, 0.7196127, 0.0050002],
+ "dimensions": 5,
+ "points": 6,
+ "format": "csv",
+ "result": "time,steal,softirq,user,system,iowait\n2018-10-27 13:30:00,0.0158314,0.0516506,0.866549,0.7196127,0.0050002\n2018-10-27 13:20:00,0.0149856,0.0529183,0.8673155,0.7121144,0.0049979\n2018-10-27 13:10:00,0.0137501,0.053315,0.8578097,0.7197613,0.0054209\n2018-10-27 13:00:00,0.0154252,0.0554688,0.899432,0.7200638,0.0067252\n2018-10-27 12:50:00,0.0145866,0.0495922,0.8404341,0.7011141,0.0041688\n2018-10-27 12:40:00,0.0162366,0.0595954,0.8827475,0.7020573,0.0041636\n",
+ "min": 0,
+ "max": 0
+}
+```
+
+## Downloading data query result files
+
+Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source),
+Netdata supports parsing `tqx` options.
+
+Using these options, any Netdata data query can instruct the web browser to download
+the result and save it under a given filename.
+
+For example, to download a CSV file with CPU utilization of the last hour,
+[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv).
+
+This is done by appending `&tqx=outFileName:FILENAME` to any data query.
+The output will be in the format given with `&format=`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c
new file mode 100644
index 0000000..856ffb5
--- /dev/null
+++ b/web/api/formatters/charts2json.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "charts2json.h"
+
+// generate JSON for the /api/v1/charts API call
+
+const char* get_release_channel() {
+ static int use_stable = -1;
+
+ if (use_stable == -1) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/.environment", netdata_configured_user_config_dir);
+ procfile *ff = procfile_open(filename, "=", PROCFILE_FLAG_DEFAULT);
+ if(!ff) {
+ use_stable=1;
+ } else {
+ procfile_set_quotes(ff, "'\"");
+ ff = procfile_readall(ff);
+ if(!ff) {
+ use_stable=1;
+ } else {
+ unsigned int i;
+ for(i = 0; i < procfile_lines(ff); i++) {
+ if (!procfile_linewords(ff, i)) continue;
+
+ if (!strcmp(procfile_lineword(ff, i, 0), "RELEASE_CHANNEL") && !strcmp(procfile_lineword(ff, i, 1), "stable")) {
+ use_stable = 1;
+ break;
+ }
+ }
+ procfile_close(ff);
+ if (use_stable == -1) use_stable = 0;
+ }
+ }
+ }
+ return (use_stable)?"stable":"nightly";
+}
+
+void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived) {
+ static char *custom_dashboard_info_js_filename = NULL;
+ size_t c, dimensions = 0, memory = 0, alarms = 0;
+ RRDSET *st;
+
+ time_t now = now_realtime_sec();
+
+ if(unlikely(!custom_dashboard_info_js_filename))
+ custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", "");
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"hostname\": \"%s\""
+ ",\n\t\"version\": \"%s\""
+ ",\n\t\"release_channel\": \"%s\""
+ ",\n\t\"os\": \"%s\""
+ ",\n\t\"timezone\": \"%s\""
+ ",\n\t\"update_every\": %d"
+ ",\n\t\"history\": %ld"
+ ",\n\t\"memory_mode\": \"%s\""
+ ",\n\t\"custom_info\": \"%s\""
+ ",\n\t\"charts\": {"
+ , host->hostname
+ , host->program_version
+ , get_release_channel()
+ , host->os
+ , host->timezone
+ , host->rrd_update_every
+ , host->rrd_history_entries
+ , rrd_memory_mode_name(host->rrd_memory_mode)
+ , custom_dashboard_info_js_filename
+ );
+
+ c = 0;
+ rrdhost_rdlock(host);
+ rrdset_foreach_read(st, host) {
+ if ((!show_archived && rrdset_is_available_for_viewers(st)) || (show_archived && rrdset_is_archived(st))) {
+ if(c) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t\"");
+ buffer_strcat(wb, st->id);
+ buffer_strcat(wb, "\": ");
+ rrdset2json(st, wb, &dimensions, &memory, skip_volatile);
+
+ c++;
+ st->last_accessed_time = now;
+ }
+ }
+
+ RRDCALC *rc;
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(rc->rrdset)
+ alarms++;
+ }
+ rrdhost_unlock(host);
+
+ buffer_sprintf(wb
+ , "\n\t}"
+ ",\n\t\"charts_count\": %zu"
+ ",\n\t\"dimensions_count\": %zu"
+ ",\n\t\"alarms_count\": %zu"
+ ",\n\t\"rrd_memory_bytes\": %zu"
+ ",\n\t\"hosts_count\": %zu"
+ ",\n\t\"hosts\": ["
+ , c
+ , dimensions
+ , alarms
+ , memory
+ , rrd_hosts_available
+ );
+
+ if(unlikely(rrd_hosts_available > 1)) {
+ rrd_rdlock();
+
+ size_t found = 0;
+ RRDHOST *h;
+ rrdhost_foreach_read(h) {
+ if(!rrdhost_should_be_removed(h, host, now) && !rrdhost_flag_check(h, RRDHOST_FLAG_ARCHIVED)) {
+ buffer_sprintf(wb
+ , "%s\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , (found > 0) ? "," : ""
+ , h->hostname
+ );
+
+ found++;
+ }
+ }
+
+ rrd_unlock();
+ }
+ else {
+ buffer_sprintf(wb
+ , "\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , host->hostname
+ );
+ }
+
+ buffer_sprintf(wb, "\n\t]\n}\n");
+}
+
+// generate collectors list for the api/v1/info call
+
+struct collector {
+ char *plugin;
+ char *module;
+};
+
+struct array_printer {
+ int c;
+ BUFFER *wb;
+};
+
+int print_collector(void *entry, void *data) {
+ struct array_printer *ap = (struct array_printer *)data;
+ BUFFER *wb = ap->wb;
+ struct collector *col=(struct collector *) entry;
+ if(ap->c) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t{\n\t\t\t\"plugin\": \"");
+ buffer_strcat(wb, col->plugin);
+ buffer_strcat(wb, "\",\n\t\t\t\"module\": \"");
+ buffer_strcat(wb, col->module);
+ buffer_strcat(wb, "\"\n\t\t}");
+ (ap->c)++;
+ return 0;
+}
+
+void chartcollectors2json(RRDHOST *host, BUFFER *wb) {
+ DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ RRDSET *st;
+ char name[500];
+
+ time_t now = now_realtime_sec();
+ rrdhost_rdlock(host);
+ rrdset_foreach_read(st, host) {
+ if (rrdset_is_available_for_viewers(st)) {
+ struct collector col = {
+ .plugin = st->plugin_name ? st->plugin_name : "",
+ .module = st->module_name ? st->module_name : ""
+ };
+ sprintf(name, "%s:%s", col.plugin, col.module);
+ dictionary_set(dict, name, &col, sizeof(struct collector));
+ st->last_accessed_time = now;
+ }
+ }
+ rrdhost_unlock(host);
+ struct array_printer ap = {
+ .c = 0,
+ .wb = wb
+ };
+ dictionary_get_all(dict, print_collector, &ap);
+ dictionary_destroy(dict);
+}
diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h
new file mode 100644
index 0000000..2d8cce3
--- /dev/null
+++ b/web/api/formatters/charts2json.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_CHARTS2JSON_H
+#define NETDATA_API_FORMATTER_CHARTS2JSON_H
+
+#include "rrd2json.h"
+
+extern void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived);
+extern void chartcollectors2json(RRDHOST *host, BUFFER *wb);
+extern const char* get_release_channel();
+
+#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H
diff --git a/web/api/formatters/csv/Makefile.am b/web/api/formatters/csv/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/csv/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md
new file mode 100644
index 0000000..2a859e2
--- /dev/null
+++ b/web/api/formatters/csv/README.md
@@ -0,0 +1,144 @@
+<!--
+title: "CSV formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md
+-->
+
+# CSV formatter
+
+The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats:
+
+| format|content type|description|
+| :----:|:----------:|:----------|
+| `csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)|
+| `html`|text/html|an html table|
+| `markdown`|text/plain|markdown table|
+
+In all formats the date and time is the first column.
+
+The CSV formatter respects the following API `&options=`:
+
+| option|supported|description|
+|:----:|:-------:|:----------|
+| `nonzero`|yes|to return only the dimensions that have at least a non-zero value|
+| `flip`|yes|to return the rows older to newer (the default is newer to older)|
+| `seconds`|yes|to return the date and time in unix timestamp|
+| `ms`|yes|to return the date and time in unit timestamp as milliseconds|
+| `percent`|yes|to replace all values with their percentage over the row total|
+| `abs`|yes|to turn all values positive|
+| `null2zero`|yes|to replace gaps with zeros (the default prints the string `null`|
+
+## Examples
+
+Get the system total bandwidth for all physical network interfaces, over the last hour,
+in 6 rows (one for every 10 minutes), in `csv` format:
+
+Netdata always returns bandwidth in `kilobits`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.net&format=csv&after=-3600&group=sum&points=6&options=abs'
+time,received,sent
+2018-10-26 23:50:00,90214.67847,215137.79762
+2018-10-26 23:40:00,90126.32286,238587.57522
+2018-10-26 23:30:00,86061.22688,213389.23526
+2018-10-26 23:20:00,85590.75164,206129.01608
+2018-10-26 23:10:00,83163.30691,194311.77384
+2018-10-26 23:00:00,85167.29657,197538.07773
+```
+
+---
+
+Get the max RAM used by the SQL server and any cron jobs, over the last hour, in 2 rows (one for every 30
+minutes), in `tsv` format, and format the date and time as unix timestamp:
+
+Netdata always returns memory in `MB`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=apps.mem&format=tsv&after=-3600&group=max&points=2&options=nonzero,seconds&dimensions=sql,cron'
+time sql cron
+1540598400 61.95703 0.25
+1540596600 61.95703 0.25
+```
+
+---
+
+Get an HTML table of the last 4 values (4 seconds) of system CPU utilization:
+
+Netdata always returns CPU utilization as `%`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=html&after=-4&options=nonzero'
+<html>
+<center>
+<table border="0" cellpadding="5" cellspacing="5">
+<tr><td>time</td><td>softirq</td><td>user</td><td>system</td></tr>
+<tr><td>2018-10-27 00:16:07</td><td>0.25</td><td>1</td><td>0.75</td></tr>
+<tr><td>2018-10-27 00:16:06</td><td>0</td><td>1.0025063</td><td>0.5012531</td></tr>
+<tr><td>2018-10-27 00:16:05</td><td>0</td><td>1</td><td>0.75</td></tr>
+<tr><td>2018-10-27 00:16:04</td><td>0</td><td>1.0025063</td><td>0.7518797</td></tr>
+</table>
+</center>
+</html>
+```
+
+This is how it looks when rendered by a web browser:
+
+![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png)
+
+---
+
+Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values
+(one every 10 minutes), and return the date and time in milliseconds:
+
+Netdata always returns bandwidth rates in `kilobits/s`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=csvjsonarray&after=-3600&points=6&group=average&options=abs,ms'
+[
+["time","in","out"],
+[1540599600000,0.7499986,120.2810185],
+[1540599000000,0.7500019,120.2815509],
+[1540598400000,0.7499999,120.2812319],
+[1540597800000,0.7500044,120.2819634],
+[1540597200000,0.7499968,120.2807337],
+[1540596600000,0.7499988,120.2810527]
+]
+```
+
+---
+
+Get the number of processes started per minute, for the last 10 minutes, in `markdown` format:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum'
+time | started
+:---: |:---:
+2018-10-27 03:52:00| 245.1706149
+2018-10-27 03:51:00| 152.6654636
+2018-10-27 03:50:00| 163.1755789
+2018-10-27 03:49:00| 176.1574766
+2018-10-27 03:48:00| 178.0137076
+2018-10-27 03:47:00| 183.8306543
+2018-10-27 03:46:00| 264.1635621
+2018-10-27 03:45:00| 205.001551
+2018-10-27 03:44:00| 7026.9852167
+2018-10-27 03:43:00| 205.9904794
+```
+
+And this is how it looks when formatted:
+
+| time | started |
+|:--:|:-----:|
+| 2018-10-27 03:52:00 | 245.1706149 |
+| 2018-10-27 03:51:00 | 152.6654636 |
+| 2018-10-27 03:50:00 | 163.1755789 |
+| 2018-10-27 03:49:00 | 176.1574766 |
+| 2018-10-27 03:48:00 | 178.0137076 |
+| 2018-10-27 03:47:00 | 183.8306543 |
+| 2018-10-27 03:46:00 | 264.1635621 |
+| 2018-10-27 03:45:00 | 205.001551 |
+| 2018-10-27 03:44:00 | 7026.9852167 |
+| 2018-10-27 03:43:00 | 205.9904794 |
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fcsv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c
new file mode 100644
index 0000000..da0a6b5
--- /dev/null
+++ b/web/api/formatters/csv/csv.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "csv.h"
+
+void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines, RRDDIM *temp_rd) {
+ rrdset_check_rdlock(r->st);
+
+ //info("RRD2CSV(): %s: BEGIN", r->st->id);
+ long c, i;
+ RRDDIM *d;
+
+ // print the csv header
+ for(c = 0, i = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(!i) {
+ buffer_strcat(wb, startline);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, "time");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ }
+ buffer_strcat(wb, separator);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, d->name);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ i++;
+ }
+ buffer_strcat(wb, endline);
+
+ if(format == DATASOURCE_CSV_MARKDOWN) {
+ // print the --- line after header
+ for(c = 0, i = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(!i) {
+ buffer_strcat(wb, startline);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, ":---:");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ }
+ buffer_strcat(wb, separator);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, ":---:");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ i++;
+ }
+ buffer_strcat(wb, endline);
+ }
+
+ if(!i) {
+ // no dimensions present
+ return;
+ }
+
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ calculated_number total = 1;
+ for(i = start; i != end ;i += step) {
+ calculated_number *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+
+ buffer_strcat(wb, betweenlines);
+ buffer_strcat(wb, startline);
+
+ time_t now = r->t[i];
+
+ if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) {
+ // print the timestamp of the line
+ buffer_rrd_value(wb, (calculated_number)now);
+ // in ms
+ if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
+ }
+ else {
+ // generate the local date time
+ struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
+ if(!tm) { error("localtime() failed."); continue; }
+ buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+ }
+
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ calculated_number n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ buffer_strcat(wb, separator);
+
+ calculated_number n = cn[c];
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ buffer_rrd_value(wb, n);
+ }
+ }
+
+ buffer_strcat(wb, endline);
+ }
+ //info("RRD2CSV(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/csv/csv.h b/web/api/formatters/csv/csv.h
new file mode 100644
index 0000000..cf6020d
--- /dev/null
+++ b/web/api/formatters/csv/csv.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_CSV_H
+#define NETDATA_API_FORMATTER_CSV_H
+
+#include "web/api/queries/rrdr.h"
+
+extern void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines, RRDDIM *temp_rd);
+
+#include "../rrd2json.h"
+
+#endif //NETDATA_API_FORMATTER_CSV_H
diff --git a/web/api/formatters/json/Makefile.am b/web/api/formatters/json/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/json/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md
new file mode 100644
index 0000000..685a3f2
--- /dev/null
+++ b/web/api/formatters/json/README.md
@@ -0,0 +1,156 @@
+<!--
+title: "JSON formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md
+-->
+
+# JSON formatter
+
+The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats:
+
+| format | content type | description|
+|:----:|:----------:|:----------|
+| `json` | application/json | return the query result as a json object|
+| `jsonp` | application/json | return the query result as a JSONP javascript callback|
+| `datatable` | application/json | return the query result as a Google `datatable`|
+| `datasource` | application/json | return the query result as a Google Visualization Provider `datasource` javascript callback|
+
+The CSV formatter respects the following API `&options=`:
+
+| option | supported | description|
+|:----:|:-------:|:----------|
+| `google_json` | yes | enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates|
+| `objectrows` | yes | return each row as an object, instead of an array|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value|
+| `flip` | yes | to return the rows older to newer (the default is newer to older)|
+| `seconds` | yes | to return the date and time in unix timestamp|
+| `ms` | yes | to return the date and time in unit timestamp as milliseconds|
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive|
+| `null2zero` | yes | to replace gaps with zeros (the default prints the string `null`|
+
+## Examples
+
+To show the differences between each format, in the following examples we query the same
+chart (having just one dimension called `active`), changing only the query `format` and its `options`.
+
+> Using `format=json` and `options=`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options='
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ [ 1540644600, 224.2516667],
+ [ 1540644000, 229.29],
+ [ 1540643400, 222.41],
+ [ 1540642800, 226.6816667],
+ [ 1540642200, 246.4083333],
+ [ 1540641600, 241.0966667]
+ ]
+}
+```
+
+> Using `format=json` and `options=objectrows`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=objectrows'
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ { "time": 1540644600, "active": 224.2516667},
+ { "time": 1540644000, "active": 229.29},
+ { "time": 1540643400, "active": 222.41},
+ { "time": 1540642800, "active": 226.6816667},
+ { "time": 1540642200, "active": 246.4083333},
+ { "time": 1540641600, "active": 241.0966667}
+ ]
+}
+```
+
+> Using `format=json` and `options=objectrows,google_json`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formatjson&options=objectrows,google_json'
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ { "time": new Date(2018,9,27,12,50,0), "active": 224.2516667},
+ { "time": new Date(2018,9,27,12,40,0), "active": 229.29},
+ { "time": new Date(2018,9,27,12,30,0), "active": 222.41},
+ { "time": new Date(2018,9,27,12,20,0), "active": 226.6816667},
+ { "time": new Date(2018,9,27,12,10,0), "active": 246.4083333},
+ { "time": new Date(2018,9,27,12,0,0), "active": 241.0966667}
+ ]
+}
+```
+
+> Using `format=jsonp` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formjsonp&options='
+callback({
+ "labels": ["time", "active"],
+ "data":
+ [
+ [ 1540645200, 235.885],
+ [ 1540644600, 224.2516667],
+ [ 1540644000, 229.29],
+ [ 1540643400, 222.41],
+ [ 1540642800, 226.6816667],
+ [ 1540642200, 246.4083333]
+ ]
+});
+```
+
+> Using `format=datatable` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formdatatable&options='
+{
+ "cols":
+ [
+ {"id":"","label":"time","pattern":"","type":"datetime"},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
+ {"id":"","label":"active","pattern":"","type":"number"}
+ ],
+ "rows":
+ [
+ {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
+ {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
+ {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
+ {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
+ {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
+ {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
+ ]
+}
+```
+
+> Using `format=datasource` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=datasource&options='
+google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:'1540645368',table:{
+ "cols":
+ [
+ {"id":"","label":"time","pattern":"","type":"datetime"},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
+ {"id":"","label":"active","pattern":"","type":"number"}
+ ],
+ "rows":
+ [
+ {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
+ {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
+ {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
+ {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
+ {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
+ {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
+ ]
+}});
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
new file mode 100644
index 0000000..f28eb57
--- /dev/null
+++ b/web/api/formatters/json/json.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json.h"
+
+#define JSON_DATES_JS 1
+#define JSON_DATES_TIMESTAMP 2
+
+void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, RRDDIM *temp_rd) {
+ rrdset_check_rdlock(r->st);
+
+ //info("RRD2JSON(): %s: BEGIN", r->st->id);
+ int row_annotations = 0, dates, dates_with_new = 0;
+ char kq[2] = "", // key quote
+ sq[2] = "", // string quote
+ pre_label[101] = "", // before each label
+ post_label[101] = "", // after each label
+ pre_date[101] = "", // the beginning of line, to the date
+ post_date[101] = "", // closing the date
+ pre_value[101] = "", // before each value
+ post_value[101] = "", // after each value
+ post_line[101] = "", // at the end of each row
+ normal_annotation[201] = "", // default row annotation
+ overflow_annotation[201] = "", // overflow row annotation
+ data_begin[101] = "", // between labels and values
+ finish[101] = ""; // at the end of everything
+
+ if(datatable) {
+ dates = JSON_DATES_JS;
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+ row_annotations = 1;
+ snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq);
+ snprintfz(post_date, 100, "%s}", sq);
+ snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq);
+ snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq);
+ snprintfz(pre_value, 100, ",{%sv%s:", kq, kq);
+ strcpy(post_value, "}");
+ strcpy(post_line, "]}");
+ snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq);
+ strcpy(finish, "\n ]\n}");
+
+ snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq);
+ snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq);
+
+ buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
+
+ // remove the valueobjects flag
+ // google wants its own keys
+ if(options & RRDR_OPTION_OBJECTSROWS)
+ options &= ~RRDR_OPTION_OBJECTSROWS;
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ if(options & RRDR_OPTION_GOOGLE_JSON) {
+ dates = JSON_DATES_JS;
+ dates_with_new = 1;
+ }
+ else {
+ dates = JSON_DATES_TIMESTAMP;
+ dates_with_new = 0;
+ }
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ strcpy(pre_date, " { ");
+ else
+ strcpy(pre_date, " [ ");
+ strcpy(pre_label, ", \"");
+ strcpy(post_label, "\"");
+ strcpy(pre_value, ", ");
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ strcpy(post_line, "}");
+ else
+ strcpy(post_line, "]");
+ snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq);
+ strcpy(finish, "\n ]\n}");
+
+ buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq);
+ buffer_sprintf(wb, "%stime%s", sq, sq);
+ }
+
+ // -------------------------------------------------------------------------
+ // print the JSON header
+
+ long c, i;
+ RRDDIM *rd;
+
+ // print the header lines
+ for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ buffer_strcat(wb, pre_label);
+ buffer_strcat(wb, rd->name);
+// buffer_strcat(wb, ".");
+// buffer_strcat(wb, rd->rrdset->name);
+ buffer_strcat(wb, post_label);
+ i++;
+ }
+ if(!i) {
+ buffer_strcat(wb, pre_label);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, post_label);
+ }
+
+ // print the begin of row data
+ buffer_strcat(wb, data_begin);
+
+ // if all dimensions are hidden, print a null
+ if(!i) {
+ buffer_strcat(wb, finish);
+ return;
+ }
+
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ calculated_number total = 1;
+ for(i = start; i != end ;i += step) {
+ calculated_number *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+
+ time_t now = r->t[i];
+
+ if(dates == JSON_DATES_JS) {
+ // generate the local date time
+ struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
+ if(!tm) { error("localtime_r() failed."); continue; }
+
+ if(likely(i != start)) buffer_strcat(wb, ",\n");
+ buffer_strcat(wb, pre_date);
+
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ buffer_sprintf(wb, "%stime%s: ", kq, kq);
+
+ if(dates_with_new)
+ buffer_strcat(wb, "new ");
+
+ buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ buffer_strcat(wb, post_date);
+
+ if(row_annotations) {
+ // google supports one annotation per row
+ int annotation_found = 0;
+ for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd ;c++, rd = rd->next) {
+ if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue;
+
+ if(co[c] & RRDR_VALUE_RESET) {
+ buffer_strcat(wb, overflow_annotation);
+ annotation_found = 1;
+ break;
+ }
+ }
+ if(!annotation_found)
+ buffer_strcat(wb, normal_annotation);
+ }
+ }
+ else {
+ // print the timestamp of the line
+ if(likely(i != start)) buffer_strcat(wb, ",\n");
+ buffer_strcat(wb, pre_date);
+
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ buffer_sprintf(wb, "%stime%s: ", kq, kq);
+
+ buffer_rrd_value(wb, (calculated_number)r->t[i]);
+ // in ms
+ if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
+
+ buffer_strcat(wb, post_date);
+ }
+
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ calculated_number n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ calculated_number n = cn[c];
+
+ buffer_strcat(wb, pre_value);
+
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq);
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ buffer_rrd_value(wb, n);
+ }
+
+ buffer_strcat(wb, post_value);
+ }
+
+ buffer_strcat(wb, post_line);
+ }
+
+ buffer_strcat(wb, finish);
+ //info("RRD2JSON(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h
new file mode 100644
index 0000000..6d73a3f
--- /dev/null
+++ b/web/api/formatters/json/json.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_JSON_H
+#define NETDATA_API_FORMATTER_JSON_H
+
+#include "../rrd2json.h"
+
+extern void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, RRDDIM *temp_rd);
+
+#endif //NETDATA_API_FORMATTER_JSON_H
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
new file mode 100644
index 0000000..cf4f109
--- /dev/null
+++ b/web/api/formatters/json_wrapper.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json_wrapper.h"
+
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value, RRDDIM *temp_rd, char *chart_label_key) {
+ rrdset_check_rdlock(r->st);
+
+ long rows = rrdr_rows(r);
+ long c, i;
+ RRDDIM *rd;
+
+ //info("JSONWRAPPER(): %s: BEGIN", r->st->id);
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ rrdset_rdlock(r->st);
+ buffer_sprintf(wb, "{\n"
+ " %sapi%s: 1,\n"
+ " %sid%s: %s%s%s,\n"
+ " %sname%s: %s%s%s,\n"
+ " %sview_update_every%s: %d,\n"
+ " %supdate_every%s: %d,\n"
+ " %sfirst_entry%s: %u,\n"
+ " %slast_entry%s: %u,\n"
+ " %sbefore%s: %u,\n"
+ " %safter%s: %u,\n"
+ " %sdimension_names%s: ["
+ , kq, kq
+ , kq, kq, sq, temp_rd?r->st->context:r->st->id, sq
+ , kq, kq, sq, temp_rd?r->st->context:r->st->name, sq
+ , kq, kq, r->update_every
+ , kq, kq, r->st->update_every
+ , kq, kq, (uint32_t)rrdset_first_entry_t_nolock(r->st)
+ , kq, kq, (uint32_t)rrdset_last_entry_t_nolock(r->st)
+ , kq, kq, (uint32_t)r->before
+ , kq, kq, (uint32_t)r->after
+ , kq, kq);
+ rrdset_unlock(r->st);
+
+ for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, rd->name);
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if(!i) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ error("RRDR is empty for %s (RRDR has %d dimensions, options is 0x%08x)", r->st->id, r->d, options);
+#endif
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sdimension_ids%s: ["
+ , kq, kq);
+
+ for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, rd->id);
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "],\n");
+
+ // Composite charts
+ if (temp_rd) {
+ buffer_sprintf(
+ wb,
+ " %schart_ids%s: [",
+ kq, kq);
+
+ for (c = 0, i = 0, rd = temp_rd ; rd && c < r->d; c++, rd = rd->next) {
+ if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
+ continue;
+ if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
+ continue;
+
+ if (i)
+ buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, rd->rrdset->name);
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if (!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "],\n");
+ if (chart_label_key) {
+ buffer_sprintf(wb, " %schart_labels%s: { ", kq, kq);
+
+ SIMPLE_PATTERN *pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ SIMPLE_PATTERN *original_pattern = pattern;
+ char *label_key = NULL;
+ int keys = 0;
+ while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
+ uint32_t key_hash = simple_hash(label_key);
+ struct label *current_label;
+
+ if (keys)
+ buffer_strcat(wb, ", ");
+ buffer_sprintf(wb, "%s%s%s : [", kq, label_key, kq);
+ keys++;
+
+ for (c = 0, i = 0, rd = temp_rd; rd && c < r->d; c++, rd = rd->next) {
+ if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
+ continue;
+ if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
+ continue;
+ if (i)
+ buffer_strcat(wb, ", ");
+
+ current_label = rrdset_lookup_label_key(rd->rrdset, label_key, key_hash);
+ if (current_label) {
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, current_label->value);
+ buffer_strcat(wb, sq);
+ } else
+ buffer_strcat(wb, "null");
+ i++;
+ }
+ if (!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "]");
+ }
+ buffer_strcat(wb, "},\n");
+ simple_pattern_free(original_pattern);
+ }
+ }
+
+ buffer_sprintf(wb, " %slatest_values%s: ["
+ , kq, kq);
+
+ for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ i++;
+
+ calculated_number value = rd->last_stored_value;
+ if (NAN == value)
+ buffer_strcat(wb, "null");
+ else
+ buffer_rrd_value(wb, value);
+ /*
+ storage_number n = rd->values[rrdset_last_slot(r->st)];
+
+ if(!does_storage_number_exist(n))
+ buffer_strcat(wb, "null");
+ else
+ buffer_rrd_value(wb, unpack_storage_number(n));
+ */
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, "null");
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sview_latest_values%s: ["
+ , kq, kq);
+
+ i = 0;
+ if(rows) {
+ calculated_number total = 1;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ calculated_number n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ }
+
+ for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ i++;
+
+ calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ];
+ calculated_number n = cn[c];
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE))
+ n = n * 100 / total;
+
+ buffer_rrd_value(wb, n);
+ }
+ }
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, "null");
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sdimensions%s: %ld,\n"
+ " %spoints%s: %ld,\n"
+ " %sformat%s: %s"
+ , kq, kq, i
+ , kq, kq, rows
+ , kq, kq, sq
+ );
+
+ rrdr_buffer_print_format(wb, format);
+
+ if((options & RRDR_OPTION_CUSTOM_VARS) && (options & RRDR_OPTION_JSON_WRAP)) {
+ buffer_sprintf(wb, "%s,\n %schart_variables%s: ", sq, kq, kq);
+ health_api_v1_chart_custom_variables2json(r->st, wb);
+ }
+ else
+ buffer_sprintf(wb, "%s", sq);
+
+ buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq);
+
+ if(string_value) buffer_strcat(wb, sq);
+ //info("JSONWRAPPER(): %s: END", r->st->id);
+}
+
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
+ (void)format;
+
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ if(string_value) buffer_strcat(wb, sq);
+
+ buffer_sprintf(wb, ",\n %smin%s: ", kq, kq);
+ buffer_rrd_value(wb, r->min);
+ buffer_sprintf(wb, ",\n %smax%s: ", kq, kq);
+ buffer_rrd_value(wb, r->max);
+ buffer_strcat(wb, "\n}\n");
+}
diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h
new file mode 100644
index 0000000..d48d5d1
--- /dev/null
+++ b/web/api/formatters/json_wrapper.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_JSON_WRAPPER_H
+#define NETDATA_API_FORMATTER_JSON_WRAPPER_H
+
+#include "rrd2json.h"
+
+extern void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value, RRDDIM *temp_rd, char *chart_key);
+extern void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+
+#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
new file mode 100644
index 0000000..d8e2480
--- /dev/null
+++ b/web/api/formatters/rrd2json.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web/api/web_api_v1.h"
+
+static inline void free_temp_rrddim(RRDDIM *temp_rd)
+{
+ if (unlikely(!temp_rd))
+ return;
+
+ RRDDIM *t;
+ while (temp_rd) {
+ t = temp_rd->next;
+ freez((char *)temp_rd->id);
+ freez((char *)temp_rd->name);
+#ifdef ENABLE_DBENGINE
+ if (temp_rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ freez(temp_rd->state->metric_uuid);
+#endif
+ freez(temp_rd->state);
+ freez(temp_rd);
+ temp_rd = t;
+ }
+}
+
+void free_context_param_list(struct context_param **param_list)
+{
+ if (unlikely(!param_list || !*param_list))
+ return;
+
+ free_temp_rrddim(((*param_list)->rd));
+ freez((*param_list));
+ *param_list = NULL;
+}
+
+void rebuild_context_param_list(struct context_param *context_param_list, time_t after_requested)
+{
+ RRDDIM *temp_rd = context_param_list->rd;
+ RRDDIM *new_rd_list = NULL, *t;
+ while (temp_rd) {
+ t = temp_rd->next;
+ if (rrdset_last_entry_t(temp_rd->rrdset) >= after_requested) {
+ temp_rd->next = new_rd_list;
+ new_rd_list = temp_rd;
+ } else {
+ freez((char *)temp_rd->id);
+ freez((char *)temp_rd->name);
+#ifdef ENABLE_DBENGINE
+ if (temp_rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ freez(temp_rd->state->metric_uuid);
+#endif
+ freez(temp_rd->state);
+ freez(temp_rd);
+ }
+ temp_rd = t;
+ }
+ context_param_list->rd = new_rd_list;
+};
+
+void build_context_param_list(struct context_param **param_list, RRDSET *st)
+{
+ if (unlikely(!param_list || !st))
+ return;
+
+ if (unlikely(!(*param_list))) {
+ *param_list = mallocz(sizeof(struct context_param));
+ (*param_list)->first_entry_t = LONG_MAX;
+ (*param_list)->last_entry_t = 0;
+ (*param_list)->rd = NULL;
+ }
+
+ RRDDIM *rd1;
+ st->last_accessed_time = now_realtime_sec();
+ rrdset_rdlock(st);
+
+ (*param_list)->first_entry_t = MIN((*param_list)->first_entry_t, rrdset_first_entry_t_nolock(st));
+ (*param_list)->last_entry_t = MAX((*param_list)->last_entry_t, rrdset_last_entry_t_nolock(st));
+
+ rrddim_foreach_read(rd1, st) {
+ RRDDIM *rd = mallocz(rd1->memsize);
+ memcpy(rd, rd1, rd1->memsize);
+ rd->id = strdupz(rd1->id);
+ rd->name = strdupz(rd1->name);
+ rd->state = mallocz(sizeof(*rd->state));
+ memcpy(rd->state, rd1->state, sizeof(*rd->state));
+ memcpy(&rd->state->collect_ops, &rd1->state->collect_ops, sizeof(struct rrddim_collect_ops));
+ memcpy(&rd->state->query_ops, &rd1->state->query_ops, sizeof(struct rrddim_query_ops));
+#ifdef ENABLE_DBENGINE
+ if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ rd->state->metric_uuid = mallocz(sizeof(uuid_t));
+ uuid_copy(*rd->state->metric_uuid, *rd1->state->metric_uuid);
+ }
+#endif
+ rd->next = (*param_list)->rd;
+ (*param_list)->rd = rd;
+ }
+
+ rrdset_unlock(st);
+}
+
+void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) {
+ rrdset2json(st, wb, NULL, NULL, 0);
+}
+
+void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) {
+ switch(format) {
+ case DATASOURCE_JSON:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JSON);
+ break;
+
+ case DATASOURCE_DATATABLE_JSON:
+ buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON);
+ break;
+
+ case DATASOURCE_DATATABLE_JSONP:
+ buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP);
+ break;
+
+ case DATASOURCE_JSONP:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JSONP);
+ break;
+
+ case DATASOURCE_SSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_SSV);
+ break;
+
+ case DATASOURCE_CSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_CSV);
+ break;
+
+ case DATASOURCE_TSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_TSV);
+ break;
+
+ case DATASOURCE_HTML:
+ buffer_strcat(wb, DATASOURCE_FORMAT_HTML);
+ break;
+
+ case DATASOURCE_JS_ARRAY:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY);
+ break;
+
+ case DATASOURCE_SSV_COMMA:
+ buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA);
+ break;
+
+ default:
+ buffer_strcat(wb, "unknown");
+ break;
+ }
+}
+
+int rrdset2value_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , calculated_number *n
+ , const char *dimensions
+ , long points
+ , long long after
+ , long long before
+ , int group_method
+ , long group_time
+ , uint32_t options
+ , time_t *db_after
+ , time_t *db_before
+ , int *value_is_null
+) {
+
+ RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, options, dimensions, NULL);
+
+ if(!r) {
+ if(value_is_null) *value_is_null = 1;
+ return HTTP_RESP_INTERNAL_SERVER_ERROR;
+ }
+
+ if(rrdr_rows(r) == 0) {
+ rrdr_free(r);
+
+ if(db_after) *db_after = 0;
+ if(db_before) *db_before = 0;
+ if(value_is_null) *value_is_null = 1;
+
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ if(wb) {
+ if (r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ buffer_no_cacheable(wb);
+ else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ buffer_cacheable(wb);
+ }
+
+ if(db_after) *db_after = r->after;
+ if(db_before) *db_before = r->before;
+
+ long i = (!(options & RRDR_OPTION_REVERSED))?rrdr_rows(r) - 1:0;
+ *n = rrdr2value(r, i, options, value_is_null);
+
+ rrdr_free(r);
+ return HTTP_RESP_OK;
+}
+
+int rrdset2anything_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , BUFFER *dimensions
+ , uint32_t format
+ , long points
+ , long long after
+ , long long before
+ , int group_method
+ , long group_time
+ , uint32_t options
+ , time_t *latest_timestamp
+ , struct context_param *context_param_list
+ , char *chart_label_key
+) {
+ time_t last_accessed_time = now_realtime_sec();
+ st->last_accessed_time = last_accessed_time;
+
+
+ RRDR *r = rrd2rrdr(st, points, after, before, group_method, group_time, options, dimensions?buffer_tostring(dimensions):NULL, context_param_list);
+ if(!r) {
+ buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
+ return HTTP_RESP_INTERNAL_SERVER_ERROR;
+ }
+
+ RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+
+ if(r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ buffer_no_cacheable(wb);
+ else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ buffer_cacheable(wb);
+
+ if(latest_timestamp && rrdr_rows(r) > 0)
+ *latest_timestamp = r->before;
+
+ switch(format) {
+ case DATASOURCE_SSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ rrdr2ssv(r, wb, options, "", " ", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2ssv(r, wb, options, "", " ", "");
+ }
+ break;
+
+ case DATASOURCE_SSV_COMMA:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ rrdr2ssv(r, wb, options, "", ",", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2ssv(r, wb, options, "", ",", "");
+ }
+ break;
+
+ case DATASOURCE_JS_ARRAY:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+ rrdr2ssv(r, wb, options, "[", ",", "]");
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
+ else {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr2ssv(r, wb, options, "[", ",", "]");
+ }
+ break;
+
+ case DATASOURCE_CSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ rrdr2csv(r, wb, format, options, "", ",", "\\n", "", temp_rd);
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", ",", "\r\n", "", temp_rd);
+ }
+ break;
+
+ case DATASOURCE_CSV_MARKDOWN:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ rrdr2csv(r, wb, format, options, "", "|", "\\n", "", temp_rd);
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", "|", "\r\n", "", temp_rd);
+ }
+ break;
+
+ case DATASOURCE_CSV_JSON_ARRAY:
+ wb->contenttype = CT_APPLICATION_JSON;
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+ buffer_strcat(wb, "[\n");
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n", temp_rd);
+ buffer_strcat(wb, "\n]");
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
+ else {
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_strcat(wb, "[\n");
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n", temp_rd);
+ buffer_strcat(wb, "\n]");
+ }
+ break;
+
+ case DATASOURCE_TSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ rrdr2csv(r, wb, format, options, "", "\t", "\\n", "", temp_rd);
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", "\t", "\r\n", "", temp_rd);
+ }
+ break;
+
+ case DATASOURCE_HTML:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, temp_rd, chart_label_key);
+ buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "", temp_rd);
+ buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_HTML;
+ buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n");
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", "", temp_rd);
+ buffer_strcat(wb, "</table>\n</center>\n</html>\n");
+ }
+ break;
+
+ case DATASOURCE_DATATABLE_JSONP:
+ wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+
+ rrdr2json(r, wb, options, 1, temp_rd);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_DATATABLE_JSON:
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+
+ rrdr2json(r, wb, options, 1, temp_rd);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_JSONP:
+ wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+
+ rrdr2json(r, wb, options, 0, temp_rd);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_JSON:
+ default:
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, temp_rd, chart_label_key);
+
+ rrdr2json(r, wb, options, 0, temp_rd);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+ }
+
+ rrdr_free(r);
+ return HTTP_RESP_OK;
+}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
new file mode 100644
index 0000000..3dc5989
--- /dev/null
+++ b/web/api/formatters/rrd2json.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRD2JSON_H
+#define NETDATA_RRD2JSON_H 1
+
+#include "web/api/web_api_v1.h"
+#include "web/api/exporters/allmetrics.h"
+#include "web/api/queries/rrdr.h"
+
+#include "web/api/formatters/csv/csv.h"
+#include "web/api/formatters/ssv/ssv.h"
+#include "web/api/formatters/json/json.h"
+#include "web/api/formatters/value/value.h"
+
+#include "web/api/formatters/rrdset2json.h"
+#include "web/api/formatters/charts2json.h"
+#include "web/api/formatters/json_wrapper.h"
+
+#include "web/server/web_client.h"
+
+#define HOSTNAME_MAX 1024
+
+#define API_RELATIVE_TIME_MAX (3 * 365 * 86400)
+
+// type of JSON generations
+#define DATASOURCE_INVALID (-1)
+#define DATASOURCE_JSON 0
+#define DATASOURCE_DATATABLE_JSON 1
+#define DATASOURCE_DATATABLE_JSONP 2
+#define DATASOURCE_SSV 3
+#define DATASOURCE_CSV 4
+#define DATASOURCE_JSONP 5
+#define DATASOURCE_TSV 6
+#define DATASOURCE_HTML 7
+#define DATASOURCE_JS_ARRAY 8
+#define DATASOURCE_SSV_COMMA 9
+#define DATASOURCE_CSV_JSON_ARRAY 10
+#define DATASOURCE_CSV_MARKDOWN 11
+
+#define DATASOURCE_FORMAT_JSON "json"
+#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable"
+#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource"
+#define DATASOURCE_FORMAT_JSONP "jsonp"
+#define DATASOURCE_FORMAT_SSV "ssv"
+#define DATASOURCE_FORMAT_CSV "csv"
+#define DATASOURCE_FORMAT_TSV "tsv"
+#define DATASOURCE_FORMAT_HTML "html"
+#define DATASOURCE_FORMAT_JS_ARRAY "array"
+#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma"
+#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray"
+#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown"
+
+extern void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
+extern void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
+
+extern int rrdset2anything_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , BUFFER *dimensions
+ , uint32_t format
+ , long points
+ , long long after
+ , long long before
+ , int group_method
+ , long group_time
+ , uint32_t options
+ , time_t *latest_timestamp
+ , struct context_param *context_param_list
+ , char *chart_label_key
+);
+
+extern int rrdset2value_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , calculated_number *n
+ , const char *dimensions
+ , long points
+ , long long after
+ , long long before
+ , int group_method
+ , long group_time
+ , uint32_t options
+ , time_t *db_after
+ , time_t *db_before
+ , int *value_is_null
+);
+
+extern void build_context_param_list(struct context_param **param_list, RRDSET *st);
+extern void rebuild_context_param_list(struct context_param *context_param_list, time_t after_requested);
+extern void free_context_param_list(struct context_param **param_list);
+
+#endif /* NETDATA_RRD2JSON_H */
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
new file mode 100644
index 0000000..5482881
--- /dev/null
+++ b/web/api/formatters/rrdset2json.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdset2json.h"
+
+void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation)
+{
+ char tabs[11];
+ struct label_index *labels = &st->state->labels;
+
+ if (indentation > 10)
+ indentation = 10;
+
+ tabs[0] = '\0';
+ while (indentation) {
+ strcat(tabs, "\t");
+ indentation--;
+ }
+
+ int count = 0;
+ netdata_rwlock_rdlock(&labels->labels_rwlock);
+ for (struct label *label = labels->head; label; label = label->next) {
+ if(count > 0) buffer_strcat(wb, ",\n");
+ buffer_strcat(wb, tabs);
+
+ char value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(value, label->value, CONFIG_MAX_VALUE * 2);
+ buffer_sprintf(wb, "\"%s\": \"%s\"", label->key, value);
+
+ count++;
+ }
+ buffer_strcat(wb, "\n");
+ netdata_rwlock_unlock(&labels->labels_rwlock);
+}
+
+// generate JSON for the /api/v1/chart API call
+
+void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile) {
+ rrdset_rdlock(st);
+
+ time_t first_entry_t = rrdset_first_entry_t_nolock(st);
+ time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+
+ buffer_sprintf(wb,
+ "\t\t{\n"
+ "\t\t\t\"id\": \"%s\",\n"
+ "\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\"type\": \"%s\",\n"
+ "\t\t\t\"family\": \"%s\",\n"
+ "\t\t\t\"context\": \"%s\",\n"
+ "\t\t\t\"title\": \"%s (%s)\",\n"
+ "\t\t\t\"priority\": %ld,\n"
+ "\t\t\t\"plugin\": \"%s\",\n"
+ "\t\t\t\"module\": \"%s\",\n"
+ "\t\t\t\"enabled\": %s,\n"
+ "\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n"
+ "\t\t\t\"chart_type\": \"%s\",\n"
+ , st->id
+ , st->name
+ , st->type
+ , st->family
+ , st->context
+ , st->title, st->name
+ , st->priority
+ , st->plugin_name?st->plugin_name:""
+ , st->module_name?st->module_name:""
+ , rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?"true":"false"
+ , st->units
+ , st->name
+ , rrdset_type_name(st->chart_type)
+ );
+
+ if (likely(!skip_volatile))
+ buffer_sprintf(wb,
+ "\t\t\t\"duration\": %ld,\n"
+ , last_entry_t - first_entry_t + st->update_every//st->entries * st->update_every
+ );
+
+ buffer_sprintf(wb,
+ "\t\t\t\"first_entry\": %ld,\n"
+ , first_entry_t //rrdset_first_entry_t(st)
+ );
+
+ if (likely(!skip_volatile))
+ buffer_sprintf(wb,
+ "\t\t\t\"last_entry\": %ld,\n"
+ , last_entry_t//rrdset_last_entry_t(st)
+ );
+
+ buffer_sprintf(wb,
+ "\t\t\t\"update_every\": %d,\n"
+ "\t\t\t\"dimensions\": {\n"
+ , st->update_every
+ );
+
+ unsigned long memory = st->memsize;
+
+ size_t dimensions = 0;
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
+
+ memory += rd->memsize;
+
+ if (dimensions)
+ buffer_strcat(wb, ",\n\t\t\t\t\"");
+ else
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat_jsonescape(wb, rd->id);
+ buffer_strcat(wb, "\": { \"name\": \"");
+ buffer_strcat_jsonescape(wb, rd->name);
+ buffer_strcat(wb, "\" }");
+
+ dimensions++;
+ }
+
+ if(dimensions_count) *dimensions_count += dimensions;
+ if(memory_used) *memory_used += memory;
+
+ buffer_sprintf(wb, "\n\t\t\t},\n\t\t\t\"chart_variables\": ");
+ health_api_v1_chart_custom_variables2json(st, wb);
+
+ buffer_strcat(wb, ",\n\t\t\t\"green\": ");
+ buffer_rrd_value(wb, st->green);
+ buffer_strcat(wb, ",\n\t\t\t\"red\": ");
+ buffer_rrd_value(wb, st->red);
+
+ if (likely(!skip_volatile)) {
+ buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n");
+ size_t alarms = 0;
+ RRDCALC *rc;
+ for (rc = st->alarms; rc; rc = rc->rrdset_next) {
+ buffer_sprintf(
+ wb,
+ "%s"
+ "\t\t\t\t\"%s\": {\n"
+ "\t\t\t\t\t\"id\": %u,\n"
+ "\t\t\t\t\t\"status\": \"%s\",\n"
+ "\t\t\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\t\t\"update_every\": %d\n"
+ "\t\t\t\t}",
+ (alarms) ? ",\n" : "", rc->name, rc->id, rrdcalc_status2string(rc->status), rc->units,
+ rc->update_every);
+
+ alarms++;
+ }
+ buffer_sprintf(wb,
+ "\n\t\t\t}"
+ );
+ }
+ buffer_strcat(wb, ",\n\t\t\t\"chart_labels\": {\n");
+ chart_labels2json(st, wb, 2);
+ buffer_strcat(wb, "\t\t\t}\n");
+
+
+ buffer_sprintf(wb,
+ "\n\t\t}"
+ );
+
+ rrdset_unlock(st);
+}
diff --git a/web/api/formatters/rrdset2json.h b/web/api/formatters/rrdset2json.h
new file mode 100644
index 0000000..697c846
--- /dev/null
+++ b/web/api/formatters/rrdset2json.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_RRDSET2JSON_H
+#define NETDATA_API_FORMATTER_RRDSET2JSON_H
+
+#include "rrd2json.h"
+
+extern void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile);
+
+#endif //NETDATA_API_FORMATTER_RRDSET2JSON_H
diff --git a/web/api/formatters/ssv/Makefile.am b/web/api/formatters/ssv/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/ssv/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md
new file mode 100644
index 0000000..d439949
--- /dev/null
+++ b/web/api/formatters/ssv/README.md
@@ -0,0 +1,59 @@
+<!--
+title: "SSV formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md
+-->
+
+# SSV formatter
+
+The SSV formatter sums all dimensions in [results of database queries](/web/api/queries/README.md)
+to a single value and returns a list of such values showing how it changes through time.
+
+It supports the following formats:
+
+| format | content type | description |
+|:----:|:----------:|:----------|
+| `ssv` | text/plain | a space separated list of values |
+| `ssvcomma` | text/plain | a comma separated list of values |
+| `array` | application/json | a JSON array |
+
+The SSV formatter respects the following API `&options=`:
+
+| option | supported | description |
+| :----:|:-------:|:----------|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value |
+| `flip` | yes | to return the numbers older to newer (the default is newer to older) |
+| `percent` | yes | to replace all values with their percentage over the row total |
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions) |
+
+## Examples
+
+Get the average system CPU utilization of the last hour, in 6 values (one every 10 minutes):
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=ssv&after=-3600&points=6&group=average'
+1.741352 1.6800467 1.769411 1.6761112 1.629862 1.6807968
+```
+
+---
+
+Get the total mysql bandwidth (in + out) for the last hour, in 6 values (one every 10 minutes):
+
+Netdata returns bandwidth in `kilobits`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=ssvcomma&after=-3600&points=6&group=sum&options=abs'
+72618.7936215,72618.778889,72618.788084,72618.9195918,72618.7760612,72618.6712421
+```
+
+---
+
+Get the web server max connections for the last hour, in 12 values (one every 5 minutes)
+in a JSON array:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max'
+[278,258,268,239,259,260,243,266,278,318,264,258]
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fssv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c
new file mode 100644
index 0000000..eeba028
--- /dev/null
+++ b/web/api/formatters/ssv/ssv.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ssv.h"
+
+void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) {
+ //info("RRD2SSV(): %s: BEGIN", r->st->id);
+ long i;
+
+ buffer_strcat(wb, prefix);
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ for(i = start; i != end ;i += step) {
+ int all_values_are_null = 0;
+ calculated_number v = rrdr2value(r, i, options, &all_values_are_null);
+
+ if(likely(i != start)) {
+ if(r->min > v) r->min = v;
+ if(r->max < v) r->max = v;
+ }
+ else {
+ r->min = v;
+ r->max = v;
+ }
+
+ if(likely(i != start))
+ buffer_strcat(wb, separator);
+
+ if(all_values_are_null) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else
+ buffer_rrd_value(wb, v);
+ }
+ buffer_strcat(wb, suffix);
+ //info("RRD2SSV(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/ssv/ssv.h b/web/api/formatters/ssv/ssv.h
new file mode 100644
index 0000000..6963dcf
--- /dev/null
+++ b/web/api/formatters/ssv/ssv.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_SSV_H
+#define NETDATA_API_FORMATTER_SSV_H
+
+#include "../rrd2json.h"
+
+extern void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix);
+
+#endif //NETDATA_API_FORMATTER_SSV_H
diff --git a/web/api/formatters/value/Makefile.am b/web/api/formatters/value/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/value/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md
new file mode 100644
index 0000000..21c9370
--- /dev/null
+++ b/web/api/formatters/value/README.md
@@ -0,0 +1,24 @@
+<!--
+title: "Value formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md
+-->
+
+# Value formatter
+
+The Value formatter presents [results of database queries](/web/api/queries/README.md) as a single value.
+
+To calculate the single value to be returned, it sums the values of all dimensions.
+
+The Value formatter respects the following API `&options=`:
+
+| option | supported | description |
+|:----: |:-------: |:---------- |
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)|
+
+The Value formatter is not exposed by the API by itself.
+Instead it is used by the [`ssv`](/web/api/formatters/ssv/README.md) formatter
+and [health monitoring queries](/health/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fvalue%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
new file mode 100644
index 0000000..aea6c16
--- /dev/null
+++ b/web/api/formatters/value/value.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "value.h"
+
+
+inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null) {
+ rrdset_check_rdlock(r->st);
+
+ long c;
+ RRDDIM *d;
+
+ calculated_number *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+
+ calculated_number sum = 0, min = 0, max = 0, v;
+ int all_null = 1, init = 1;
+
+ calculated_number total = 1;
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ calculated_number n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ calculated_number n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ if(unlikely(init)) {
+ if(n > 0) {
+ min = 0;
+ max = n;
+ }
+ else {
+ min = n;
+ max = 0;
+ }
+ init = 0;
+ }
+
+ if(likely(!(co[c] & RRDR_VALUE_EMPTY))) {
+ all_null = 0;
+ sum += n;
+ }
+
+ if(n < min) min = n;
+ if(n > max) max = n;
+ }
+
+ if(unlikely(all_null)) {
+ if(likely(all_values_are_null))
+ *all_values_are_null = 1;
+ return 0;
+ }
+ else {
+ if(likely(all_values_are_null))
+ *all_values_are_null = 0;
+ }
+
+ if(options & RRDR_OPTION_MIN2MAX)
+ v = max - min;
+ else
+ v = sum;
+
+ return v;
+}
diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h
new file mode 100644
index 0000000..d9e981f
--- /dev/null
+++ b/web/api/formatters/value/value.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_VALUE_H
+#define NETDATA_API_FORMATTER_VALUE_H
+
+#include "../rrd2json.h"
+
+extern calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null);
+
+#endif //NETDATA_API_FORMATTER_VALUE_H
diff --git a/web/api/health/Makefile.am b/web/api/health/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/health/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/health/README.md b/web/api/health/README.md
new file mode 100644
index 0000000..b6e8b5c
--- /dev/null
+++ b/web/api/health/README.md
@@ -0,0 +1,225 @@
+<!--
+title: "Health API Calls"
+date: 2020-04-27
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/health/README.md
+-->
+
+# Health API Calls
+
+## Health Read API
+
+### Enabled Alarms
+
+Netdata enables alarms on demand, i.e. when the chart they should be linked to starts collecting data. So, although many
+more alarms are configured, only the useful ones are enabled.
+
+To get the list of all enabled alarms, open your browser and navigate to `http://NODE:19999/api/v1/alarms?all`,
+replacing `NODE` with the IP address or hostname for your Agent dashboard.
+
+### Raised Alarms
+
+This API call will return the alarms currently in WARNING or CRITICAL state.
+
+`http://NODE:19999/api/v1/alarms`
+
+### Event Log
+
+The size of the alarm log is configured in `netdata.conf`. There are 2 settings: the rotation of the alarm log file and the in memory size of the alarm log.
+
+```
+[health]
+ in memory max health log entries = 1000
+ rotate log every lines = 2000
+```
+
+The API call retrieves all entries of the alarm log:
+
+`http://NODE:19999/api/v1/alarm_log`
+
+### Alarm Log Incremental Updates
+
+`http://NODE:19999/api/v1/alarm_log?after=UNIQUEID`
+
+The above returns all the events in the alarm log that occurred after UNIQUEID (you poll it once without `after=`, remember the last UNIQUEID of the returned set, which you give back to get incrementally the next events).
+
+### Alarm badges
+
+The following will return an SVG badge of the alarm named `NAME`, attached to the chart named `CHART`.
+
+`http://NODE:19999/api/v1/badge.svg?alarm=NAME&chart=CHART`
+
+## Health Management API
+
+Netdata v1.12 and beyond provides a command API to control health checks and notifications at runtime. The feature is especially useful for maintenance periods, during which you receive meaningless alarms.
+From Netdata v1.16.0 and beyond, the configuration controlled via the API commands is [persisted across Netdata restarts](#persistence).
+
+Specifically, the API allows you to:
+
+- Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log.
+- Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the Netdata UI will show the alarms as active, but no notifications will be sent.
+- Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family.
+
+The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://NODE:19999/netdata.conf`:
+
+```
+[registry]
+ # netdata management api key file = /var/lib/netdata/netdata.api.key
+```
+
+You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this:
+
+```
+curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
+```
+
+By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](/web/server/README.md#access-lists) for more information.
+
+The command `RESET` just returns Netdata to the default operation, with all health checks and notifications enabled.
+If you've configured and entered your token correctly, you should see the plain text response `All health checks and notifications are enabled`.
+
+### Disable or silence all alarms
+
+If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts:
+
+```sh
+curl "http://NODE:19999/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken"
+```
+
+The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log.
+If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this:
+
+```sh
+curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken"
+```
+
+Alarms may then still be raised and logged in Netdata, so you'll be able to see them via the UI.
+
+Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command.
+
+```sh
+ curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
+```
+
+### Disable or silence specific alarms
+
+If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used.
+Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced.
+
+- `DISABLE` : Set the mode to disable health checks.
+- `SILENCE` : Set the mode to silence notifications.
+
+You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well.
+You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa.
+
+Each request can specify a single alarm `selector`, with one or more `selection criteria`.
+A single alarm will match a `selector` if all selection criteria match the alarm.
+You can add as many selectors as you like.
+In essence, the rule is: IF (alarm matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command.
+
+To clear all selectors and reset the mode to default, use the `RESET` command.
+
+The following example silences notifications for all the alarms with context=load:
+
+```
+curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken"
+```
+
+#### Selection criteria
+
+The `selection criteria` are key/value pairs, in the format `key : value`, where value is a Netdata [simple pattern](/libnetdata/simple_pattern/README.md). This means that you can create very powerful selectors (you will rarely need more than one or two).
+
+The accepted keys for the `selection criteria` are the following:
+
+- `alarm` : The expression provided will match both `alarm` and `template` names.
+- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.
+- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.
+- `hosts` : The hostnames that will need to match.
+- `families` : The alarm families.
+
+You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts.
+
+Example 1: Disable all health checks for context = `random`
+
+```
+http://NODE:19999/api/v1/manage/health?cmd=DISABLE&context=random
+```
+
+Example 2: Silence all alarms and templates with name starting with `out_of` on host `myhost`
+
+```
+http://NODE:19999/api/v1/manage/health?cmd=SILENCE&alarm=out_of*&hosts=myhost
+```
+
+Example 2.2: Add one more selector, to also silence alarms for cpu1 and cpu2
+
+```
+http://NODE:19999/api/v1/manage/health?families=cpu1 cpu2
+```
+
+### List silencers
+
+The command `LIST` was added in Netdata v1.16.0 and returns a JSON with the current status of the silencers.
+
+```
+ curl "http://NODE:19999/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: Mytoken"
+```
+
+As an example, the following response shows that we have two silencers configured, one for an alarm called `samplealarm` and one for alarms with context `random` on host `myhost`
+
+```
+json
+{
+ "all": false,
+ "type": "SILENCE",
+ "silencers": [
+ {
+ "alarm": "samplealarm"
+ },
+ {
+ "context": "random",
+ "hosts": "myhost"
+ }
+ ]
+}
+```
+
+The response below shows that we have disabled all health checks.
+
+```
+json
+{
+ "all": true,
+ "type": "DISABLE",
+ "silencers": []
+}
+```
+
+### Responses
+
+- "Auth Error" : Token authentication failed
+- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL
+- "All health checks are disabled" : Successful response to cmd=DISABLE ALL
+- "All health checks and notifications are enabled" : Successful response to cmd=RESET
+- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE
+- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE
+- "Alarm selector added" : Added to the response when a new selector is added
+- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored.
+- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command.
+- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector.
+
+### Persistence
+
+From Netdata v1.16.0 and beyond, the silencers configuration is persisted to disk and loaded when Netdata starts.
+The JSON string returned by the [LIST command](#list-silencers) is automatically saved to the `silencers file`, every time a command alters the silencers configuration.
+The file's location is configurable in `netdata.conf`. The default is shown below:
+
+```
+[health]
+ # silencers file = /var/lib/netdata/health.silencers.json
+```
+
+### Further reading
+
+The test script under [tests/health_mgmtapi](/tests/health_mgmtapi/README.md) contains a series of tests that you can either run or read through to understand the various calls and responses better.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c
new file mode 100644
index 0000000..4dd85e6
--- /dev/null
+++ b/web/api/health/health_cmdapi.c
@@ -0,0 +1,205 @@
+//
+// Created by Christopher on 11/12/18.
+//
+
+#include "health_cmdapi.h"
+
+/**
+ * Free Silencers
+ *
+ * Clean the silencer structure
+ *
+ * @param t is the structure that will be cleaned.
+ */
+void free_silencers(SILENCER *t) {
+ if (!t) return;
+ if (t->next) free_silencers(t->next);
+ debug(D_HEALTH, "HEALTH command API: Freeing silencer %s:%s:%s:%s:%s", t->alarms,
+ t->charts, t->contexts, t->hosts, t->families);
+ simple_pattern_free(t->alarms_pattern);
+ simple_pattern_free(t->charts_pattern);
+ simple_pattern_free(t->contexts_pattern);
+ simple_pattern_free(t->hosts_pattern);
+ simple_pattern_free(t->families_pattern);
+ freez(t->alarms);
+ freez(t->charts);
+ freez(t->contexts);
+ freez(t->hosts);
+ freez(t->families);
+ freez(t);
+ return;
+}
+
+/**
+ * Silencers to JSON Entry
+ *
+ * Fill the buffer with the other values given.
+ *
+ * @param wb a pointer to the output buffer
+ * @param var the json variable
+ * @param val the json value
+ * @param hasprev has it a previous value?
+ *
+ * @return
+ */
+int health_silencers2json_entry(BUFFER *wb, char* var, char* val, int hasprev) {
+ if (val) {
+ buffer_sprintf(wb, "%s\n\t\t\t\"%s\": \"%s\"", (hasprev)?",":"", var, val);
+ return 1;
+ } else {
+ return hasprev;
+ }
+}
+
+/**
+ * Silencer to JSON
+ *
+ * Write the silencer values using JSON format inside a buffer.
+ *
+ * @param wb is the buffer to write the silencers.
+ */
+void health_silencers2json(BUFFER *wb) {
+ buffer_sprintf(wb, "{\n\t\"all\": %s,"
+ "\n\t\"type\": \"%s\","
+ "\n\t\"silencers\": [",
+ (silencers->all_alarms)?"true":"false",
+ (silencers->stype == STYPE_NONE)?"None":((silencers->stype == STYPE_DISABLE_ALARMS)?"DISABLE":"SILENCE"));
+
+ SILENCER *silencer;
+ int i = 0, j = 0;
+ for(silencer = silencers->silencers; silencer ; silencer = silencer->next) {
+ if(likely(i)) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t{");
+ j=health_silencers2json_entry(wb, HEALTH_ALARM_KEY, silencer->alarms, j);
+ j=health_silencers2json_entry(wb, HEALTH_CHART_KEY, silencer->charts, j);
+ j=health_silencers2json_entry(wb, HEALTH_CONTEXT_KEY, silencer->contexts, j);
+ j=health_silencers2json_entry(wb, HEALTH_HOST_KEY, silencer->hosts, j);
+ health_silencers2json_entry(wb, HEALTH_FAMILIES_KEY, silencer->families, j);
+ j=0;
+ buffer_strcat(wb, "\n\t\t}");
+ i++;
+ }
+ if(likely(i)) buffer_strcat(wb, "\n\t");
+ buffer_strcat(wb, "]\n}\n");
+}
+
+/**
+ * Silencer to FILE
+ *
+ * Write the sliencer buffer to a file.
+ * @param wb
+ */
+void health_silencers2file(BUFFER *wb) {
+ if (wb->len == 0) return;
+
+ FILE *fd = fopen(silencers_filename, "wb");
+ if(fd) {
+ size_t written = (size_t)fprintf(fd, "%s", wb->buffer) ;
+ if (written == wb->len ) {
+ info("Silencer changes written to %s", silencers_filename);
+ }
+ fclose(fd);
+ return;
+ }
+ error("Silencer changes could not be written to %s. Error %s", silencers_filename, strerror(errno));
+}
+
+/**
+ * Request V1 MGMT Health
+ *
+ * Function called by api to management the health.
+ *
+ * @param host main structure with client information!
+ * @param w is the structure with all information of the client request.
+ * @param url is the url that netdata is working
+ *
+ * @return It returns 200 on success and another code otherwise.
+ */
+int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url) {
+ int ret;
+ (void) host;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_TEXT_PLAIN;
+
+ buffer_flush(w->response.data);
+
+ //Local instance of the silencer
+ SILENCER *silencer = NULL;
+ int config_changed = 1;
+
+ if (!w->auth_bearer_token) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
+ ret = HTTP_RESP_FORBIDDEN;
+ } else {
+ debug(D_HEALTH, "HEALTH command API: Comparing secret '%s' to '%s'", w->auth_bearer_token, api_secret);
+ if (strcmp(w->auth_bearer_token, api_secret)) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
+ ret = HTTP_RESP_FORBIDDEN;
+ } else {
+ while (url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *key = mystrsep(&value, "=");
+ if (!key || !*key) continue;
+ if (!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 health query param '%s' with value '%s'", w->id, key, value);
+
+ // name and value are now the parameters
+ if (!strcmp(key, "cmd")) {
+ if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCEALL)) {
+ silencers->all_alarms = 1;
+ silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCEALL);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLEALL)) {
+ silencers->all_alarms = 1;
+ silencers->stype = STYPE_DISABLE_ALARMS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLEALL);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCE)) {
+ silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCE);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLE)) {
+ silencers->stype = STYPE_DISABLE_ALARMS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLE);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_RESET)) {
+ silencers->all_alarms = 0;
+ silencers->stype = STYPE_NONE;
+ free_silencers(silencers->silencers);
+ silencers->silencers = NULL;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_RESET);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_LIST)) {
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_silencers2json(wb);
+ config_changed=0;
+ }
+ } else {
+ silencer = health_silencers_addparam(silencer, key, value);
+ }
+ }
+
+ if (likely(silencer)) {
+ health_silencers_add(silencer);
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED);
+ if (silencers->stype == STYPE_NONE) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_STYPEWARNING);
+ }
+ }
+ if (unlikely(silencers->stype != STYPE_NONE && !silencers->all_alarms && !silencers->silencers)) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_NOSELECTORWARNING);
+ }
+ ret = HTTP_RESP_OK;
+ }
+ }
+ w->response.data = wb;
+ buffer_no_cacheable(w->response.data);
+ if (ret == HTTP_RESP_OK && config_changed) {
+ BUFFER *jsonb = buffer_create(200);
+ health_silencers2json(jsonb);
+ health_silencers2file(jsonb);
+ }
+
+ return ret;
+}
diff --git a/web/api/health/health_cmdapi.h b/web/api/health/health_cmdapi.h
new file mode 100644
index 0000000..d8ec6aa
--- /dev/null
+++ b/web/api/health/health_cmdapi.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_HEALTH_SVG_H
+#define NETDATA_WEB_HEALTH_SVG_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+#include "health/health.h"
+
+#define HEALTH_CMDAPI_CMD_SILENCEALL "SILENCE ALL"
+#define HEALTH_CMDAPI_CMD_DISABLEALL "DISABLE ALL"
+#define HEALTH_CMDAPI_CMD_SILENCE "SILENCE"
+#define HEALTH_CMDAPI_CMD_DISABLE "DISABLE"
+#define HEALTH_CMDAPI_CMD_RESET "RESET"
+#define HEALTH_CMDAPI_CMD_LIST "LIST"
+
+#define HEALTH_CMDAPI_MSG_AUTHERROR "Auth Error\n"
+#define HEALTH_CMDAPI_MSG_SILENCEALL "All alarm notifications are silenced\n"
+#define HEALTH_CMDAPI_MSG_DISABLEALL "All health checks are disabled\n"
+#define HEALTH_CMDAPI_MSG_RESET "All health checks and notifications are enabled\n"
+#define HEALTH_CMDAPI_MSG_DISABLE "Health checks disabled for alarms matching the selectors\n"
+#define HEALTH_CMDAPI_MSG_SILENCE "Alarm notifications silenced for alarms matching the selectors\n"
+#define HEALTH_CMDAPI_MSG_ADDED "Alarm selector added\n"
+#define HEALTH_CMDAPI_MSG_STYPEWARNING "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command.\n"
+#define HEALTH_CMDAPI_MSG_NOSELECTORWARNING "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.\n"
+
+extern int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url);
+
+#include "web/api/web_api_v1.h"
+
+#endif /* NETDATA_WEB_HEALTH_SVG_H */
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
new file mode 100644
index 0000000..ed2555f
--- /dev/null
+++ b/web/api/netdata-swagger.json
@@ -0,0 +1,2065 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "NetData API",
+ "description": "Real-time performance and health monitoring.",
+ "version": "1.11.1_rolling"
+ },
+ "paths": {
+ "/info": {
+ "get": {
+ "summary": "Get netdata basic information",
+ "description": "The info endpoint returns basic information about netdata. It provides:\n* netdata version\n* netdata unique id\n* list of hosts mirrored (includes itself)\n* Operating System, Virtualization, K8s nodes and Container technology information\n* List of active collector plugins and modules\n* number of alarms in the host\n * number of alarms in normal state\n * number of alarms in warning state\n * number of alarms in critical state\n",
+ "responses": {
+ "200": {
+ "description": "netdata basic information.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/info"
+ }
+ }
+ }
+ },
+ "503": {
+ "description": "netdata daemon not ready (used for health checks)."
+ }
+ }
+ }
+ },
+ "/charts": {
+ "get": {
+ "summary": "Get a list of all charts available at the server",
+ "description": "The charts endpoint returns a summary about all charts stored in the netdata server.",
+ "responses": {
+ "200": {
+ "description": "An array of charts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/chart_summary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/chart": {
+ "get": {
+ "summary": "Get info about a specific chart",
+ "description": "The Chart endpoint returns detailed information about a chart.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with detailed information about the chart.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/chart"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "No chart id was supplied in the request."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ }
+ }
+ }
+ },
+ "/alarm_variables": {
+ "get": {
+ "summary": "List variables available to configure alarms for a chart",
+ "description": "Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with information about the chart and the available variables.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarm_variables"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/data": {
+ "get": {
+ "summary": "Get collected data for a specific chart",
+ "description": "The data endpoint returns data stored in the round robin database of a chart.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call. Note chart or context must be specified",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "The context of the chart as returned by the /charts call. Note chart or context must be specified",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ },
+ {
+ "name": "dimension",
+ "in": "query",
+ "description": "Zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). If not specified the default is -600 seconds. Netdata will adapt this parameter to the boundaries of the round robin database unless the allow_past option is specified.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -600
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 20
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "gtime",
+ "in": "query",
+ "description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the data to be returned.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "json",
+ "jsonp",
+ "csv",
+ "tsv",
+ "tsv-excel",
+ "ssv",
+ "ssvcomma",
+ "datatable",
+ "datasource",
+ "html",
+ "markdown",
+ "array",
+ "csvjsonarray"
+ ],
+ "default": "json"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "nonzero",
+ "flip",
+ "jsonwrap",
+ "min2max",
+ "seconds",
+ "milliseconds",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "objectrows",
+ "google_json",
+ "percentage",
+ "unaligned",
+ "match-ids",
+ "match-names",
+ "showcustomvars",
+ "allow_past"
+ ]
+ },
+ "default": [
+ "seconds",
+ "jsonwrap"
+ ]
+ }
+ },
+ {
+ "name": "callback",
+ "in": "query",
+ "description": "For JSONP responses, the callback function name.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "filename",
+ "in": "query",
+ "description": "Add Content-Disposition: attachment; filename= header to the response, that will instruct the browser to save the response with the given filename.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "tqx",
+ "in": "query",
+ "description": "[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful. The response includes the data in the format requested. Swagger2.0 does not process the discriminator field to show polymorphism. The response will be one of the sub-types of the data-schema according to the chosen format, e.g. json -> data_json.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/data"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "Chart or context is not found. The supplied chart or context will be reported."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/badge.svg": {
+ "get": {
+ "summary": "Generate a badge in form of SVG image for a chart (or dimension)",
+ "description": "Successful responses are SVG images.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "alarm",
+ "in": "query",
+ "description": "The name of an alarm linked to the chart.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "dimension",
+ "in": "query",
+ "description": "Zero, one or more dimension ids, as returned by the /chart call.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -600
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "abs",
+ "absolute",
+ "display-absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned"
+ ]
+ },
+ "default": [
+ "absolute"
+ ]
+ }
+ },
+ {
+ "name": "label",
+ "in": "query",
+ "description": "A text to be used as the label.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "units",
+ "in": "query",
+ "description": "A text to be used as the units.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "label_color",
+ "in": "query",
+ "description": "A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "value_color",
+ "in": "query",
+ "description": "A color to be used for the background of the value *(right)* part of badge. You can set multiple using a pipe with a condition each, like this: `color<value|color:null` The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter. Currently only integers are suported as values.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "text_color_lbl",
+ "in": "query",
+ "description": "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceeding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "text_color_val",
+ "in": "query",
+ "description": "Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceeding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "multiply",
+ "in": "query",
+ "description": "Multiply the value with this number for rendering it at the image (integer value required).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "divide",
+ "in": "query",
+ "description": "Divide the value with this number for rendering it at the image (integer value required).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "scale",
+ "in": "query",
+ "description": "Set the scale of the badge (greater or equal to 100).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "fixed_width_lbl",
+ "in": "query",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "fixed_width_val",
+ "in": "query",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful. The response should be an SVG image."
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/allmetrics": {
+ "get": {
+ "summary": "Get a value of all the metrics maintained by netdata",
+ "description": "The allmetrics endpoint returns the latest value of all charts and dimensions stored in the netdata server.",
+ "parameters": [
+ {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the response to be returned.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "shell",
+ "prometheus",
+ "prometheus_all_hosts",
+ "json"
+ ],
+ "default": "shell"
+ }
+ },
+ {
+ "name": "variables",
+ "in": "query",
+ "description": "When enabled, netdata will expose various system configuration metrics.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "help",
+ "in": "query",
+ "description": "Enable or disable HELP lines in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "types",
+ "in": "query",
+ "description": "Enable or disable TYPE lines in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "timestamps",
+ "in": "query",
+ "description": "Enable or disable timestamps in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "names",
+ "in": "query",
+ "description": "When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "oldunits",
+ "in": "query",
+ "description": "When enabled, netdata will show metric names for the default source=average as they appeared before 1.12, by using the legacy unit naming conventions.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "hideunits",
+ "in": "query",
+ "description": "When enabled, netdata will not include the units in the metric names, for the default source=average.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "server",
+ "in": "query",
+ "description": "Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "prefix",
+ "in": "query",
+ "description": "Prefix all prometheus metrics with this string.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "data",
+ "in": "query",
+ "description": "Select the prometheus response data source. There is a setting in netdata.conf for the default.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "as-collected",
+ "average",
+ "sum"
+ ],
+ "default": "average"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "All the metrics returned in the format requested."
+ },
+ "400": {
+ "description": "The format requested is not supported."
+ }
+ }
+ }
+ },
+ "/alarms": {
+ "get": {
+ "summary": "Get a list of active or raised alarms on the server",
+ "description": "The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned.",
+ "parameters": [
+ {
+ "name": "all",
+ "in": "query",
+ "description": "If passed, all enabled alarms are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ },
+ {
+ "name": "active",
+ "in": "query",
+ "description": "If passed, the raised alarms in state WARNING or CRITICAL are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing general info and a linked list of alarms.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarms"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarms_values": {
+ "get": {
+ "summary": "Get a list of active or raised alarms on the server",
+ "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value` and alarm `status`.",
+ "parameters": [
+ {
+ "name": "all",
+ "in": "query",
+ "description": "If passed, all enabled alarms are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ },
+ {
+ "name": "active",
+ "in": "query",
+ "description": "If passed, the raised alarms in state WARNING or CRITICAL are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing general info and a linked list of alarms.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarms_values"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarm_log": {
+ "get": {
+ "summary": "Retrieves the entries of the alarm log",
+ "description": "Returns an array of alarm_log entries, with historical information on raised and cleared alarms.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An array of alarm log entries.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/alarm_log_entry"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarm_count": {
+ "get": {
+ "summary": "Get an overall status of the chart",
+ "description": "Checks multiple charts with the same context and counts number of alarms with given status.",
+ "parameters": [
+ {
+ "in": "query",
+ "name": "context",
+ "description": "Specify context which should be checked.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [
+ "system.cpu"
+ ]
+ }
+ },
+ {
+ "in": "query",
+ "name": "status",
+ "description": "Specify alarm status to count.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "REMOVED",
+ "UNDEFINED",
+ "UNINITIALIZED",
+ "CLEAR",
+ "RAISED",
+ "WARNING",
+ "CRITICAL"
+ ],
+ "default": "RAISED"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing a count of alarms with given status for given contexts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/manage/health": {
+ "get": {
+ "summary": "Accesses the health management API to control health checks and notifications at runtime.",
+ "description": "Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenaria, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.",
+ "parameters": [
+ {
+ "name": "cmd",
+ "in": "query",
+ "description": "DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors. LIST: Show active configuration.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "DISABLE ALL",
+ "SILENCE ALL",
+ "DISABLE",
+ "SILENCE",
+ "RESET",
+ "LIST"
+ ]
+ }
+ },
+ {
+ "name": "alarm",
+ "in": "query",
+ "description": "The expression provided will match both `alarm` and `template` names.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "hosts",
+ "in": "query",
+ "description": "The hostnames that will need to match.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "families",
+ "in": "query",
+ "description": "The alarm families.",
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A plain text response based on the result of the command."
+ },
+ "403": {
+ "description": "Bearer authentication error."
+ }
+ }
+ }
+ }
+ },
+ "servers": [
+ {
+ "url": "https://registry.my-netdata.io/api/v1"
+ },
+ {
+ "url": "http://registry.my-netdata.io/api/v1"
+ }
+ ],
+ "components": {
+ "schemas": {
+ "info": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "string",
+ "description": "netdata version of the server.",
+ "example": "1.11.1_rolling"
+ },
+ "uid": {
+ "type": "string",
+ "description": "netdata unique id of the server.",
+ "example": "24e9fe3c-f2ac-11e8-bafc-0242ac110002"
+ },
+ "mirrored_hosts": {
+ "type": "array",
+ "description": "List of hosts mirrored of the server (include itself).",
+ "items": {
+ "type": "string"
+ },
+ "example": [
+ "host1.example.com",
+ "host2.example.com"
+ ]
+ },
+ "mirrored_hosts_status": {
+ "type": "array",
+ "description": "List of details of hosts mirrored to this served (including self). Indexes correspond to indexes in \"mirrored_hosts\".",
+ "items": {
+ "type": "object",
+ "description": "Host data",
+ "properties": {
+ "guid": {
+ "type": "string",
+ "format": "uuid",
+ "nullable": false,
+ "description": "Host unique GUID from `netdata.public.unique.id`.",
+ "example": "245e4bff-3b34-47c1-a6e5-5c535a9abfb2"
+ },
+ "reachable": {
+ "type": "boolean",
+ "nullable": false,
+ "description": "Current state of streaming. Always true for localhost/self."
+ },
+ "claim_id": {
+ "type": "string",
+ "format": "uuid",
+ "nullable": true,
+ "description": "Cloud GUID/identifier in case the host is claimed. If child status unknown or unclaimed this field is set to `null`",
+ "example": "c3b2a66a-3052-498c-ac52-7fe9e8cccb0c"
+ }
+ }
+ }
+ },
+ "os_name": {
+ "type": "string",
+ "description": "Operating System Name.",
+ "example": "Manjaro Linux"
+ },
+ "os_id": {
+ "type": "string",
+ "description": "Operating System ID.",
+ "example": "manjaro"
+ },
+ "os_id_like": {
+ "type": "string",
+ "description": "Known OS similar to this OS.",
+ "example": "arch"
+ },
+ "os_version": {
+ "type": "string",
+ "description": "Operating System Version.",
+ "example": "18.0.4"
+ },
+ "os_version_id": {
+ "type": "string",
+ "description": "Operating System Version ID.",
+ "example": "unknown"
+ },
+ "os_detection": {
+ "type": "string",
+ "description": "OS parameters detection method.",
+ "example": "Mixed"
+ },
+ "kernel_name": {
+ "type": "string",
+ "description": "Kernel Name.",
+ "example": "Linux"
+ },
+ "kernel_version": {
+ "type": "string",
+ "description": "Kernel Version.",
+ "example": "4.19.32-1-MANJARO"
+ },
+ "is_k8s_node": {
+ "type": "boolean",
+ "description": "Netdata is running on a K8s node.",
+ "example": false
+ },
+ "architecture": {
+ "type": "string",
+ "description": "Kernel architecture.",
+ "example": "x86_64"
+ },
+ "virtualization": {
+ "type": "string",
+ "description": "Virtualization Type.",
+ "example": "kvm"
+ },
+ "virt_detection": {
+ "type": "string",
+ "description": "Virtualization detection method.",
+ "example": "systemd-detect-virt"
+ },
+ "container": {
+ "type": "string",
+ "description": "Container technology.",
+ "example": "docker"
+ },
+ "container_detection": {
+ "type": "string",
+ "description": "Container technology detection method.",
+ "example": "dockerenv"
+ },
+ "labels": {
+ "type": "object",
+ "description": "List of host labels.",
+ "properties": {
+ "app": {
+ "type": "string",
+ "description": "Host label.",
+ "example": "netdata"
+ }
+ }
+ },
+ "collectors": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "Array of collector plugins and modules.",
+ "properties": {
+ "plugin": {
+ "type": "string",
+ "description": "Collector plugin.",
+ "example": "python.d.plugin"
+ },
+ "module": {
+ "type": "string",
+ "description": "Module of the collector plugin.",
+ "example": "dockerd"
+ }
+ }
+ }
+ },
+ "alarms": {
+ "type": "object",
+ "description": "Number of alarms in the server.",
+ "properties": {
+ "normal": {
+ "type": "integer",
+ "description": "Number of alarms in normal state."
+ },
+ "warning": {
+ "type": "integer",
+ "description": "Number of alarms in warning state."
+ },
+ "critical": {
+ "type": "integer",
+ "description": "Number of alarms in critical state."
+ }
+ }
+ }
+ }
+ },
+ "chart_summary": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string",
+ "description": "The hostname of the netdata server."
+ },
+ "version": {
+ "type": "string",
+ "description": "netdata version of the server."
+ },
+ "release_channel": {
+ "type": "string",
+ "description": "The release channel of the build on the server.",
+ "example": "nightly"
+ },
+ "timezone": {
+ "type": "string",
+ "description": "The current timezone on the server."
+ },
+ "os": {
+ "type": "string",
+ "description": "The netdata server host operating system.",
+ "enum": [
+ "macos",
+ "linux",
+ "freebsd"
+ ]
+ },
+ "history": {
+ "type": "number",
+ "description": "The duration, in seconds, of the round robin database maintained by netdata."
+ },
+ "memory_mode": {
+ "type": "string",
+ "description": "The name of the database memory mode on the server."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The default update frequency of the netdata server. All charts have an update frequency equal or bigger than this."
+ },
+ "charts": {
+ "type": "object",
+ "description": "An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart"
+ }
+ },
+ "charts_count": {
+ "type": "number",
+ "description": "The number of charts."
+ },
+ "dimensions_count": {
+ "type": "number",
+ "description": "The total number of dimensions."
+ },
+ "alarms_count": {
+ "type": "number",
+ "description": "The number of alarms."
+ },
+ "rrd_memory_bytes": {
+ "type": "number",
+ "description": "The size of the round robin database in bytes."
+ }
+ }
+ },
+ "chart": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of the chart. Types are not handled by netdata. You can use this field for anything you like."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the chart. Families are not handled by netdata. You can use this field for anything you like."
+ },
+ "title": {
+ "type": "string",
+ "description": "The title of the chart."
+ },
+ "priority": {
+ "type": "number",
+ "description": "The relative priority of the chart. NetData does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family."
+ },
+ "enabled": {
+ "type": "boolean",
+ "description": "True when the chart is enabled. Disabled charts do not currently collect values, but they may have historical values available."
+ },
+ "units": {
+ "type": "string",
+ "description": "The unit of measurement for the values of all dimensions of the chart."
+ },
+ "data_url": {
+ "type": "string",
+ "description": "The absolute path to get data values for this chart. You are expected to use this path as the base when constructing the URL to fetch data values for this chart."
+ },
+ "chart_type": {
+ "type": "string",
+ "description": "The chart type.",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ]
+ },
+ "duration": {
+ "type": "number",
+ "description": "The duration, in seconds, of the round robin database maintained by netdata."
+ },
+ "first_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database."
+ },
+ "last_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the round robin database."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database."
+ },
+ "dimensions": {
+ "type": "object",
+ "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name.",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the dimension"
+ }
+ }
+ }
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ },
+ "green": {
+ "type": "number",
+ "nullable": true,
+ "description": "Chart health green threshold."
+ },
+ "red": {
+ "type": "number",
+ "nullable": true,
+ "description": "Chart health red threshold."
+ }
+ }
+ },
+ "alarm_variables": {
+ "type": "object",
+ "properties": {
+ "chart": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "chart_name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "cnart_context": {
+ "type": "string",
+ "description": "The context of the chart. It is shared across multiple monitored software or hardware instances and used in alarm templates."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the chart."
+ },
+ "host": {
+ "type": "string",
+ "description": "The host containing the chart."
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ },
+ "family_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ },
+ "host_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ }
+ }
+ },
+ "chart_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ },
+ "data": {
+ "type": "object",
+ "discriminator": {
+ "propertyName": "format"
+ },
+ "description": "Response will contain the appropriate subtype, e.g. data_json depending on the requested format.",
+ "properties": {
+ "api": {
+ "type": "number",
+ "description": "The API version this conforms to, currently 1."
+ },
+ "id": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database (indepedently of the current view)."
+ },
+ "view_update_every": {
+ "type": "number",
+ "description": "The current view appropriate update frequency of this chart, in seconds. There is no point to request chart refreshes, using the same settings, more frequently than this."
+ },
+ "first_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database (indepedently of the current view)."
+ },
+ "last_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the round robin database (indepedently of the current view)."
+ },
+ "after": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) returned in this response."
+ },
+ "before": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry returned in this response."
+ },
+ "min": {
+ "type": "number",
+ "description": "The minimum value returned in the current view. This can be used to size the y-series of the chart."
+ },
+ "max": {
+ "type": "number",
+ "description": "The maximum value returned in the current view. This can be used to size the y-series of the chart."
+ },
+ "dimension_names": {
+ "description": "The dimension names of the chart as returned in the current view.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "dimension_ids": {
+ "description": "The dimension IDs of the chart as returned in the current view.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "latest_values": {
+ "description": "The latest values collected for the chart (indepedently of the current view).",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "view_latest_values": {
+ "description": "The latest values returned with this response.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "dimensions": {
+ "type": "number",
+ "description": "The number of dimensions returned."
+ },
+ "points": {
+ "type": "number",
+ "description": "The number of rows / points returned."
+ },
+ "format": {
+ "type": "string",
+ "description": "The format of the result returned."
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ }
+ }
+ },
+ "data_json": {
+ "description": "Data response in json format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "object",
+ "properties": {
+ "labels": {
+ "description": "The dimensions retrieved from the chart.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "data": {
+ "description": "The data requested, one element per sample with each element containing the values of the dimensions described in the labels value.",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ "description": "The result requested, in the format requested."
+ }
+ }
+ }
+ ]
+ },
+ "data_flat": {
+ "description": "Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown / html formats.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ },
+ "data_array": {
+ "description": "Data response in array format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ ]
+ },
+ "data_csvjsonarray": {
+ "description": "Data response in csvjsonarray format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "description": "The first inner array contains strings showing the labels of each column, each subsequent array contains the values for each point in time.",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {}
+ }
+ }
+ }
+ }
+ ]
+ },
+ "data_datatable": {
+ "description": "Data response in datatable / datasource formats (suitable for Google Charts).",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "object",
+ "properties": {
+ "cols": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "Always empty - for future use."
+ },
+ "label": {
+ "description": "The dimension returned from the chart."
+ },
+ "pattern": {
+ "description": "Always empty - for future use."
+ },
+ "type": {
+ "description": "The type of data in the column / chart-dimension."
+ },
+ "p": {
+ "description": "Contains any annotations for the column."
+ }
+ },
+ "required": [
+ "id",
+ "label",
+ "pattern",
+ "type"
+ ]
+ }
+ },
+ "rows": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "c": {
+ "type": "array",
+ "items": {
+ "properties": {
+ "v": {
+ "description": "Each value in the row is represented by an object named `c` with five v fields: data, null, null, 0, the value. This format is fixed by the Google Charts API."
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "alarms": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "latest_alarm_log_unique_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "status": {
+ "type": "boolean"
+ },
+ "now": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarms": {
+ "type": "object",
+ "properties": {
+ "chart-name.alarm-name": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string",
+ "description": "Full alarm name."
+ },
+ "chart": {
+ "type": "string"
+ },
+ "family": {
+ "type": "string"
+ },
+ "active": {
+ "type": "boolean",
+ "description": "Will be false only if the alarm is disabled in the configuration."
+ },
+ "disabled": {
+ "type": "boolean",
+ "description": "Whether the health check for this alarm has been disabled via a health command API DISABLE command."
+ },
+ "silenced": {
+ "type": "boolean",
+ "description": "Whether notifications for this alarm have been silenced via a health command API SILENCE command."
+ },
+ "exec": {
+ "type": "string"
+ },
+ "recipient": {
+ "type": "string"
+ },
+ "source": {
+ "type": "string"
+ },
+ "units": {
+ "type": "string"
+ },
+ "info": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "last_status_change": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "last_updated": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "next_update": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "update_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_down_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_max_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_multiplier": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_to_timestamp": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "value_string": {
+ "type": "string"
+ },
+ "no_clear_notification": {
+ "type": "boolean"
+ },
+ "lookup_dimensions": {
+ "type": "string"
+ },
+ "db_after": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "db_before": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_method": {
+ "type": "string"
+ },
+ "lookup_after": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_before": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_options": {
+ "type": "string"
+ },
+ "calc": {
+ "type": "string"
+ },
+ "calc_parsed": {
+ "type": "string"
+ },
+ "warn": {
+ "type": "string"
+ },
+ "warn_parsed": {
+ "type": "string"
+ },
+ "crit": {
+ "type": "string"
+ },
+ "crit_parsed": {
+ "type": "string"
+ },
+ "warn_repeat_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "crit_repeat_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "green": {
+ "type": "string",
+ "format": "nullable"
+ },
+ "red": {
+ "type": "string",
+ "format": "nullable"
+ },
+ "value": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "alarm_log_entry": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "unique_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarm_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarm_event_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string"
+ },
+ "chart": {
+ "type": "string"
+ },
+ "family": {
+ "type": "string"
+ },
+ "processed": {
+ "type": "boolean"
+ },
+ "updated": {
+ "type": "boolean"
+ },
+ "exec_run": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "exec_failed": {
+ "type": "boolean"
+ },
+ "exec": {
+ "type": "string"
+ },
+ "recipient": {
+ "type": "string"
+ },
+ "exec_code": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "source": {
+ "type": "string"
+ },
+ "units": {
+ "type": "string"
+ },
+ "when": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "non_clear_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "status": {
+ "type": "string"
+ },
+ "old_status": {
+ "type": "string"
+ },
+ "delay": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_to_timestamp": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "updated_by_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "updates_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "value_string": {
+ "type": "string"
+ },
+ "old_value_string": {
+ "type": "string"
+ },
+ "silenced": {
+ "type": "string"
+ },
+ "info": {
+ "type": "string"
+ },
+ "value": {
+ "type": "number",
+ "nullable": true
+ },
+ "old_value": {
+ "type": "number",
+ "nullable": true
+ }
+ }
+ },
+ "alarms_values": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "alarms": {
+ "type": "object",
+ "description": "HashMap with keys being alarm names",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer"
+ },
+ "value": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string",
+ "enum": [
+ "REMOVED",
+ "UNDEFINED",
+ "UNINITIALIZED",
+ "CLEAR",
+ "RAISED",
+ "WARNING",
+ "CRITICAL",
+ "UNKNOWN"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
new file mode 100644
index 0000000..7482742
--- /dev/null
+++ b/web/api/netdata-swagger.yaml
@@ -0,0 +1,1611 @@
+openapi: 3.0.0
+info:
+ title: NetData API
+ description: Real-time performance and health monitoring.
+ version: 1.11.1_rolling
+paths:
+ /info:
+ get:
+ summary: Get netdata basic information
+ description: |
+ The info endpoint returns basic information about netdata. It provides:
+ * netdata version
+ * netdata unique id
+ * list of hosts mirrored (includes itself)
+ * Operating System, Virtualization, K8s nodes and Container technology information
+ * List of active collector plugins and modules
+ * number of alarms in the host
+ * number of alarms in normal state
+ * number of alarms in warning state
+ * number of alarms in critical state
+ responses:
+ "200":
+ description: netdata basic information.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/info"
+ "503":
+ description: netdata daemon not ready (used for health checks).
+ /charts:
+ get:
+ summary: Get a list of all charts available at the server
+ description: The charts endpoint returns a summary about all charts stored in the
+ netdata server.
+ responses:
+ "200":
+ description: An array of charts.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/chart_summary"
+ /chart:
+ get:
+ summary: Get info about a specific chart
+ description: The Chart endpoint returns detailed information about a chart.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ responses:
+ "200":
+ description: A javascript object with detailed information about the chart.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/chart"
+ "400":
+ description: No chart id was supplied in the request.
+ "404":
+ description: No chart with the given id is found.
+ /alarm_variables:
+ get:
+ summary: List variables available to configure alarms for a chart
+ description: Returns the basic information of a chart and all the variables that can
+ be used in alarm and template health configurations for the particular
+ chart or family.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ responses:
+ "200":
+ description: A javascript object with information about the chart and the
+ available variables.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarm_variables"
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: No chart with the given id is found.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /data:
+ get:
+ summary: Get collected data for a specific chart
+ description: The data endpoint returns data stored in the round robin database of a
+ chart.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call. Note chart or context must be specified
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ - name: context
+ in: query
+ description: The context of the chart as returned by the /charts call. Note chart or context must be specified
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ - name: dimension
+ in: query
+ description: Zero, one or more dimension ids or names, as returned by the /chart
+ call, separated with comma or pipe. Netdata simple patterns are
+ supported.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ format: as returned by /charts
+ - name: after
+ in: query
+ description: "This parameter can either be an absolute timestamp specifying the
+ starting point of the data to be returned, or a relative number of
+ seconds (negative, relative to parameter: before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ If not specified the default is -600 seconds. Netdata will adapt this
+ parameter to the boundaries of the round robin database unless the allow_past
+ option is specified."
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -600
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the data to be returned, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds). Netdata will adapt this parameter to the
+ boundaries of the round robin database. The default is zero (i.e.
+ the timestamp of the last value collected).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: points
+ in: query
+ description: The number of points to be returned. If not given, or it is <= 0, or
+ it is bigger than the points stored in the round robin database for
+ this chart for the given duration, all the available collected
+ values for the given duration will be returned.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 20
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimesions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ default: average
+ - name: gtime
+ in: query
+ description: The grouping number of seconds. This is used in conjunction with
+ group=average to change the units of metrics (ie when the data is
+ per-second, setting gtime=60 will turn them to per-minute).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: format
+ in: query
+ description: The format of the data to be returned.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - json
+ - jsonp
+ - csv
+ - tsv
+ - tsv-excel
+ - ssv
+ - ssvcomma
+ - datatable
+ - datasource
+ - html
+ - markdown
+ - array
+ - csvjsonarray
+ default: json
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - nonzero
+ - flip
+ - jsonwrap
+ - min2max
+ - seconds
+ - milliseconds
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - objectrows
+ - google_json
+ - percentage
+ - unaligned
+ - match-ids
+ - match-names
+ - showcustomvars
+ - allow_past
+ default:
+ - seconds
+ - jsonwrap
+ - name: callback
+ in: query
+ description: For JSONP responses, the callback function name.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: filename
+ in: query
+ description: "Add Content-Disposition: attachment; filename= header to
+ the response, that will instruct the browser to save the response
+ with the given filename."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: tqx
+ in: query
+ description: "[Google Visualization
+ API](https://developers.google.com/chart/interactive/docs/dev/imple\
+ menting_data_source?hl=en) formatted parameter."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: The call was successful. The response includes the data in the
+ format requested. Swagger2.0 does not process the discriminator
+ field to show polymorphism. The response will be one of the
+ sub-types of the data-schema according to the chosen format, e.g.
+ json -> data_json.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/data"
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: Chart or context is not found. The supplied chart or context will be reported.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /badge.svg:
+ get:
+ summary: Generate a badge in form of SVG image for a chart (or dimension)
+ description: Successful responses are SVG images.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ - name: alarm
+ in: query
+ description: The name of an alarm linked to the chart.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: dimension
+ in: query
+ description: Zero, one or more dimension ids, as returned by the /chart call.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ format: as returned by /charts
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of the data to be returned, or a relative number of
+ seconds, to the last collected timestamp. Netdata will assume it is
+ a relative number if it is smaller than the duration of the round
+ robin database for this chart. So, if the round robin database is
+ 3600 seconds, any value from -3600 to 3600 will trigger relative
+ arithmetics. Netdata will adapt this parameter to the boundaries of
+ the round robin database.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -600
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the data to be returned, or a relative number of
+ seconds, to the last collected timestamp. Netdata will assume it is
+ a relative number if it is smaller than the duration of the round
+ robin database for this chart. So, if the round robin database is
+ 3600 seconds, any value from -3600 to 3600 will trigger relative
+ arithmetics. Netdata will adapt this parameter to the boundaries of
+ the round robin database.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods are supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimesions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ default: average
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - abs
+ - absolute
+ - display-absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ default:
+ - absolute
+ - name: label
+ in: query
+ description: A text to be used as the label.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: units
+ in: query
+ description: A text to be used as the units.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: label_color
+ in: query
+ description: A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: value_color
+ in: query
+ description: "A color to be used for the background of the value *(right)* part of badge. You can set
+ multiple using a pipe with a condition each, like this:
+ `color<value|color:null` The following operators are
+ supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter.
+ Currently only integers are suported as values."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: text_color_lbl
+ in: query
+ description: Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceeding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: text_color_val
+ in: query
+ description: Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceeding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: multiply
+ in: query
+ description: Multiply the value with this number for rendering it at the image
+ (integer value required).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: divide
+ in: query
+ description: Divide the value with this number for rendering it at the image
+ (integer value required).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: scale
+ in: query
+ description: Set the scale of the badge (greater or equal to 100).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: fixed_width_lbl
+ in: query
+ description: This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ - name: fixed_width_val
+ in: query
+ description: This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ responses:
+ "200":
+ description: The call was successful. The response should be an SVG image.
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: No chart with the given id is found.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /allmetrics:
+ get:
+ summary: Get a value of all the metrics maintained by netdata
+ description: The allmetrics endpoint returns the latest value of all charts and
+ dimensions stored in the netdata server.
+ parameters:
+ - name: format
+ in: query
+ description: The format of the response to be returned.
+ required: true
+ schema:
+ type: string
+ enum:
+ - shell
+ - prometheus
+ - prometheus_all_hosts
+ - json
+ default: shell
+ - name: variables
+ in: query
+ description: When enabled, netdata will expose various system
+ configuration metrics.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: help
+ in: query
+ description: Enable or disable HELP lines in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: types
+ in: query
+ description: Enable or disable TYPE lines in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: timestamps
+ in: query
+ description: Enable or disable timestamps in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: names
+ in: query
+ description: When enabled netdata will report dimension names. When disabled
+ netdata will report dimension IDs. The default is controlled in
+ netdata.conf.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: oldunits
+ in: query
+ description: When enabled, netdata will show metric names for the default
+ source=average as they appeared before 1.12, by using the legacy
+ unit naming conventions.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: hideunits
+ in: query
+ description: When enabled, netdata will not include the units in the metric
+ names, for the default source=average.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: server
+ in: query
+ description: Set a distinct name of the client querying prometheus metrics.
+ Netdata will use the client IP if this is not set.
+ required: false
+ schema:
+ type: string
+ format: any text
+ - name: prefix
+ in: query
+ description: Prefix all prometheus metrics with this string.
+ required: false
+ schema:
+ type: string
+ format: any text
+ - name: data
+ in: query
+ description: Select the prometheus response data source. There is a setting in
+ netdata.conf for the default.
+ required: false
+ schema:
+ type: string
+ enum:
+ - as-collected
+ - average
+ - sum
+ default: average
+ responses:
+ "200":
+ description: All the metrics returned in the format requested.
+ "400":
+ description: The format requested is not supported.
+ /alarms:
+ get:
+ summary: Get a list of active or raised alarms on the server
+ description: The alarms endpoint returns the list of all raised or enabled alarms on
+ the netdata server. Called without any parameters, the raised alarms in
+ state WARNING or CRITICAL are returned. By passing "?all", all the
+ enabled alarms are returned.
+ parameters:
+ - name: all
+ in: query
+ description: If passed, all enabled alarms are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ - name: active
+ in: query
+ description: If passed, the raised alarms in state WARNING or CRITICAL are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ responses:
+ "200":
+ description: An object containing general info and a linked list of alarms.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarms"
+ /alarms_values:
+ get:
+ summary: Get a list of active or raised alarms on the server
+ description: The alarms_values endpoint returns the list of all raised or enabled alarms on
+ the netdata server. Called without any parameters, the raised alarms in
+ state WARNING or CRITICAL are returned. By passing "?all", all the
+ enabled alarms are returned.
+ This option output differs from `/alarms` in the number of variables delivered. This endpoint gives
+ to user `id`, `value` and alarm `status`.
+ parameters:
+ - name: all
+ in: query
+ description: If passed, all enabled alarms are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ - name: active
+ in: query
+ description: If passed, the raised alarms in state WARNING or CRITICAL are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ responses:
+ "200":
+ description: An object containing general info and a linked list of alarms.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarms_values"
+ /alarm_log:
+ get:
+ summary: Retrieves the entries of the alarm log
+ description: Returns an array of alarm_log entries, with historical information on
+ raised and cleared alarms.
+ parameters:
+ - name: after
+ in: query
+ description: Passing the parameter after=UNIQUEID returns all the events in the
+ alarm log that occurred after UNIQUEID. An automated series of calls
+ would call the interface once without after=, store the last
+ UNIQUEID of the returned set, and give it back to get incrementally
+ the next events.
+ required: false
+ schema:
+ type: integer
+ responses:
+ "200":
+ description: An array of alarm log entries.
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/alarm_log_entry"
+ /alarm_count:
+ get:
+ summary: Get an overall status of the chart
+ description: Checks multiple charts with the same context and counts number of alarms
+ with given status.
+ parameters:
+ - in: query
+ name: context
+ description: Specify context which should be checked.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ default:
+ - system.cpu
+ - in: query
+ name: status
+ description: Specify alarm status to count.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ enum:
+ - REMOVED
+ - UNDEFINED
+ - UNINITIALIZED
+ - CLEAR
+ - RAISED
+ - WARNING
+ - CRITICAL
+ default: RAISED
+ responses:
+ "200":
+ description: An object containing a count of alarms with given status for given
+ contexts.
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ type: number
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /manage/health:
+ get:
+ summary: Accesses the health management API to control health checks and
+ notifications at runtime.
+ description: Available from Netdata v1.12 and above, protected via bearer
+ authorization. Especially useful for maintenance periods, the API allows
+ you to disable health checks completely, silence alarm notifications, or
+ Disable/Silence specific alarms that match selectors on alarm/template
+ name, chart, context, host and family. For the simple disable/silence
+ all scenaria, only the cmd parameter is required. The other parameters
+ are used to define alarm selectors. For more information and examples,
+ refer to the netdata documentation.
+ parameters:
+ - name: cmd
+ in: query
+ description: "DISABLE ALL: No alarm criteria are evaluated, nothing is written in
+ the alarm log. SILENCE ALL: No notifications are sent. RESET: Return
+ to the default state. DISABLE/SILENCE: Set the mode to be used for
+ the alarms matching the criteria of the alarm selectors. LIST: Show
+ active configuration."
+ required: false
+ schema:
+ type: string
+ enum:
+ - DISABLE ALL
+ - SILENCE ALL
+ - DISABLE
+ - SILENCE
+ - RESET
+ - LIST
+ - name: alarm
+ in: query
+ description: The expression provided will match both `alarm` and `template` names.
+ schema:
+ type: string
+ - name: chart
+ in: query
+ description: Chart ids/names, as shown on the dashboard. These will match the
+ `on` entry of a configured `alarm`.
+ schema:
+ type: string
+ - name: context
+ in: query
+ description: Chart context, as shown on the dashboard. These will match the `on`
+ entry of a configured `template`.
+ schema:
+ type: string
+ - name: hosts
+ in: query
+ description: The hostnames that will need to match.
+ schema:
+ type: string
+ - name: families
+ in: query
+ description: The alarm families.
+ schema:
+ type: string
+ responses:
+ "200":
+ description: A plain text response based on the result of the command.
+ "403":
+ description: Bearer authentication error.
+servers:
+ - url: https://registry.my-netdata.io/api/v1
+ - url: http://registry.my-netdata.io/api/v1
+components:
+ schemas:
+ info:
+ type: object
+ properties:
+ version:
+ type: string
+ description: netdata version of the server.
+ example: 1.11.1_rolling
+ uid:
+ type: string
+ description: netdata unique id of the server.
+ example: 24e9fe3c-f2ac-11e8-bafc-0242ac110002
+ mirrored_hosts:
+ type: array
+ description: List of hosts mirrored of the server (include itself).
+ items:
+ type: string
+ example:
+ - host1.example.com
+ - host2.example.com
+ mirrored_hosts_status:
+ type: array
+ description: >-
+ List of details of hosts mirrored to this served (including self).
+ Indexes correspond to indexes in "mirrored_hosts".
+ items:
+ type: object
+ description: Host data
+ properties:
+ guid:
+ type: string
+ format: uuid
+ nullable: false
+ description: Host unique GUID from `netdata.public.unique.id`.
+ example: 245e4bff-3b34-47c1-a6e5-5c535a9abfb2
+ reachable:
+ type: boolean
+ nullable: false
+ description: Current state of streaming. Always true for localhost/self.
+ claim_id:
+ type: string
+ format: uuid
+ nullable: true
+ description: >-
+ Cloud GUID/identifier in case the host is claimed.
+ If child status unknown or unclaimed this field is set to `null`
+ example: c3b2a66a-3052-498c-ac52-7fe9e8cccb0c
+ os_name:
+ type: string
+ description: Operating System Name.
+ example: Manjaro Linux
+ os_id:
+ type: string
+ description: Operating System ID.
+ example: manjaro
+ os_id_like:
+ type: string
+ description: Known OS similar to this OS.
+ example: arch
+ os_version:
+ type: string
+ description: Operating System Version.
+ example: 18.0.4
+ os_version_id:
+ type: string
+ description: Operating System Version ID.
+ example: unknown
+ os_detection:
+ type: string
+ description: OS parameters detection method.
+ example: Mixed
+ kernel_name:
+ type: string
+ description: Kernel Name.
+ example: Linux
+ kernel_version:
+ type: string
+ description: Kernel Version.
+ example: 4.19.32-1-MANJARO
+ is_k8s_node:
+ type: boolean
+ description: Netdata is running on a K8s node.
+ example: false
+ architecture:
+ type: string
+ description: Kernel architecture.
+ example: x86_64
+ virtualization:
+ type: string
+ description: Virtualization Type.
+ example: kvm
+ virt_detection:
+ type: string
+ description: Virtualization detection method.
+ example: systemd-detect-virt
+ container:
+ type: string
+ description: Container technology.
+ example: docker
+ container_detection:
+ type: string
+ description: Container technology detection method.
+ example: dockerenv
+ labels:
+ type: object
+ description: List of host labels.
+ properties:
+ app:
+ type: string
+ description: Host label.
+ example: netdata
+ collectors:
+ type: array
+ items:
+ type: object
+ description: Array of collector plugins and modules.
+ properties:
+ plugin:
+ type: string
+ description: Collector plugin.
+ example: python.d.plugin
+ module:
+ type: string
+ description: Module of the collector plugin.
+ example: dockerd
+ alarms:
+ type: object
+ description: Number of alarms in the server.
+ properties:
+ normal:
+ type: integer
+ description: Number of alarms in normal state.
+ warning:
+ type: integer
+ description: Number of alarms in warning state.
+ critical:
+ type: integer
+ description: Number of alarms in critical state.
+ chart_summary:
+ type: object
+ properties:
+ hostname:
+ type: string
+ description: The hostname of the netdata server.
+ version:
+ type: string
+ description: netdata version of the server.
+ release_channel:
+ type: string
+ description: The release channel of the build on the server.
+ example: nightly
+ timezone:
+ type: string
+ description: The current timezone on the server.
+ os:
+ type: string
+ description: The netdata server host operating system.
+ enum:
+ - macos
+ - linux
+ - freebsd
+ history:
+ type: number
+ description: The duration, in seconds, of the round robin database maintained by
+ netdata.
+ memory_mode:
+ type: string
+ description: The name of the database memory mode on the server.
+ update_every:
+ type: number
+ description: The default update frequency of the netdata server. All charts have
+ an update frequency equal or bigger than this.
+ charts:
+ type: object
+ description: An object containing all the chart objects available at the netdata
+ server. This is used as an indexed array. The key of each chart
+ object is the id of the chart.
+ additionalProperties:
+ $ref: "#/components/schemas/chart"
+ charts_count:
+ type: number
+ description: The number of charts.
+ dimensions_count:
+ type: number
+ description: The total number of dimensions.
+ alarms_count:
+ type: number
+ description: The number of alarms.
+ rrd_memory_bytes:
+ type: number
+ description: The size of the round robin database in bytes.
+ chart:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The unique id of the chart.
+ name:
+ type: string
+ description: The name of the chart.
+ type:
+ type: string
+ description: The type of the chart. Types are not handled by netdata. You can use
+ this field for anything you like.
+ family:
+ type: string
+ description: The family of the chart. Families are not handled by netdata. You
+ can use this field for anything you like.
+ title:
+ type: string
+ description: The title of the chart.
+ priority:
+ type: number
+ description: The relative priority of the chart. NetData does not care about
+ priorities. This is just an indication of importance for the chart
+ viewers to sort charts of higher priority (lower number) closer to
+ the top. Priority sorting should only be used among charts of the
+ same type or family.
+ enabled:
+ type: boolean
+ description: True when the chart is enabled. Disabled charts do not currently
+ collect values, but they may have historical values available.
+ units:
+ type: string
+ description: The unit of measurement for the values of all dimensions of the
+ chart.
+ data_url:
+ type: string
+ description: The absolute path to get data values for this chart. You are
+ expected to use this path as the base when constructing the URL to
+ fetch data values for this chart.
+ chart_type:
+ type: string
+ description: The chart type.
+ enum:
+ - line
+ - area
+ - stacked
+ duration:
+ type: number
+ description: The duration, in seconds, of the round robin database maintained by
+ netdata.
+ first_entry:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the round
+ robin database.
+ last_entry:
+ type: number
+ description: The UNIX timestamp of the latest entry in the round robin database.
+ update_every:
+ type: number
+ description: The update frequency of this chart, in seconds. One value every this
+ amount of time is kept in the round robin database.
+ dimensions:
+ type: object
+ description: "An object containing all the chart dimensions available for the
+ chart. This is used as an indexed array. For each pair in the
+ dictionary: the key is the id of the dimension and the value is a
+ dictionary containing the name."
+ additionalProperties:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The name of the dimension
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ green:
+ type: number
+ nullable: true
+ description: Chart health green threshold.
+ red:
+ type: number
+ nullable: true
+ description: Chart health red threshold.
+ alarm_variables:
+ type: object
+ properties:
+ chart:
+ type: string
+ description: The unique id of the chart.
+ chart_name:
+ type: string
+ description: The name of the chart.
+ cnart_context:
+ type: string
+ description: The context of the chart. It is shared across multiple monitored
+ software or hardware instances and used in alarm templates.
+ family:
+ type: string
+ description: The family of the chart.
+ host:
+ type: string
+ description: The host containing the chart.
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ family_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ host_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ chart_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ data:
+ type: object
+ discriminator:
+ propertyName: format
+ description: Response will contain the appropriate subtype, e.g. data_json depending
+ on the requested format.
+ properties:
+ api:
+ type: number
+ description: The API version this conforms to, currently 1.
+ id:
+ type: string
+ description: The unique id of the chart.
+ name:
+ type: string
+ description: The name of the chart.
+ update_every:
+ type: number
+ description: The update frequency of this chart, in seconds. One value every this
+ amount of time is kept in the round robin database (indepedently of
+ the current view).
+ view_update_every:
+ type: number
+ description: The current view appropriate update frequency of this chart, in
+ seconds. There is no point to request chart refreshes, using the
+ same settings, more frequently than this.
+ first_entry:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the round
+ robin database (indepedently of the current view).
+ last_entry:
+ type: number
+ description: The UNIX timestamp of the latest entry in the round robin database
+ (indepedently of the current view).
+ after:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) returned in this
+ response.
+ before:
+ type: number
+ description: The UNIX timestamp of the latest entry returned in this response.
+ min:
+ type: number
+ description: The minimum value returned in the current view. This can be used to
+ size the y-series of the chart.
+ max:
+ type: number
+ description: The maximum value returned in the current view. This can be used to
+ size the y-series of the chart.
+ dimension_names:
+ description: The dimension names of the chart as returned in the current view.
+ type: array
+ items:
+ type: string
+ dimension_ids:
+ description: The dimension IDs of the chart as returned in the current view.
+ type: array
+ items:
+ type: string
+ latest_values:
+ description: The latest values collected for the chart (indepedently of the
+ current view).
+ type: array
+ items:
+ type: string
+ view_latest_values:
+ description: The latest values returned with this response.
+ type: array
+ items:
+ type: string
+ dimensions:
+ type: number
+ description: The number of dimensions returned.
+ points:
+ type: number
+ description: The number of rows / points returned.
+ format:
+ type: string
+ description: The format of the result returned.
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ data_json:
+ description: Data response in json format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: object
+ properties:
+ labels:
+ description: The dimensions retrieved from the chart.
+ type: array
+ items:
+ type: string
+ data:
+ description: The data requested, one element per sample with each element
+ containing the values of the dimensions described in the
+ labels value.
+ type: array
+ items:
+ type: number
+ description: The result requested, in the format requested.
+ data_flat:
+ description: Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown /
+ html formats.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: string
+ data_array:
+ description: Data response in array format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: array
+ items:
+ type: number
+ data_csvjsonarray:
+ description: Data response in csvjsonarray format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ description: The first inner array contains strings showing the labels of
+ each column, each subsequent array contains the values for each
+ point in time.
+ type: array
+ items:
+ type: array
+ items: {}
+ data_datatable:
+ description: Data response in datatable / datasource formats (suitable for Google
+ Charts).
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: object
+ properties:
+ cols:
+ type: array
+ items:
+ type: object
+ properties:
+ id:
+ description: Always empty - for future use.
+ label:
+ description: The dimension returned from the chart.
+ pattern:
+ description: Always empty - for future use.
+ type:
+ description: The type of data in the column / chart-dimension.
+ p:
+ description: Contains any annotations for the column.
+ required:
+ - id
+ - label
+ - pattern
+ - type
+ rows:
+ type: array
+ items:
+ type: object
+ properties:
+ c:
+ type: array
+ items:
+ properties:
+ v:
+ description: "Each value in the row is represented by an
+ object named `c` with five v fields: data, null,
+ null, 0, the value. This format is fixed by the
+ Google Charts API."
+ alarms:
+ type: object
+ properties:
+ hostname:
+ type: string
+ latest_alarm_log_unique_id:
+ type: integer
+ format: int32
+ status:
+ type: boolean
+ now:
+ type: integer
+ format: int32
+ alarms:
+ type: object
+ properties:
+ chart-name.alarm-name:
+ type: object
+ properties:
+ id:
+ type: integer
+ format: int32
+ name:
+ type: string
+ description: Full alarm name.
+ chart:
+ type: string
+ family:
+ type: string
+ active:
+ type: boolean
+ description: Will be false only if the alarm is disabled in the
+ configuration.
+ disabled:
+ type: boolean
+ description: Whether the health check for this alarm has been disabled
+ via a health command API DISABLE command.
+ silenced:
+ type: boolean
+ description: Whether notifications for this alarm have been silenced via
+ a health command API SILENCE command.
+ exec:
+ type: string
+ recipient:
+ type: string
+ source:
+ type: string
+ units:
+ type: string
+ info:
+ type: string
+ status:
+ type: string
+ last_status_change:
+ type: integer
+ format: int32
+ last_updated:
+ type: integer
+ format: int32
+ next_update:
+ type: integer
+ format: int32
+ update_every:
+ type: integer
+ format: int32
+ delay_up_duration:
+ type: integer
+ format: int32
+ delay_down_duration:
+ type: integer
+ format: int32
+ delay_max_duration:
+ type: integer
+ format: int32
+ delay_multiplier:
+ type: integer
+ format: int32
+ delay:
+ type: integer
+ format: int32
+ delay_up_to_timestamp:
+ type: integer
+ format: int32
+ value_string:
+ type: string
+ no_clear_notification:
+ type: boolean
+ lookup_dimensions:
+ type: string
+ db_after:
+ type: integer
+ format: int32
+ db_before:
+ type: integer
+ format: int32
+ lookup_method:
+ type: string
+ lookup_after:
+ type: integer
+ format: int32
+ lookup_before:
+ type: integer
+ format: int32
+ lookup_options:
+ type: string
+ calc:
+ type: string
+ calc_parsed:
+ type: string
+ warn:
+ type: string
+ warn_parsed:
+ type: string
+ crit:
+ type: string
+ crit_parsed:
+ type: string
+ warn_repeat_every:
+ type: integer
+ format: int32
+ crit_repeat_every:
+ type: integer
+ format: int32
+ green:
+ type: string
+ format: nullable
+ red:
+ type: string
+ format: nullable
+ value:
+ type: number
+ alarm_log_entry:
+ type: object
+ properties:
+ hostname:
+ type: string
+ unique_id:
+ type: integer
+ format: int32
+ alarm_id:
+ type: integer
+ format: int32
+ alarm_event_id:
+ type: integer
+ format: int32
+ name:
+ type: string
+ chart:
+ type: string
+ family:
+ type: string
+ processed:
+ type: boolean
+ updated:
+ type: boolean
+ exec_run:
+ type: integer
+ format: int32
+ exec_failed:
+ type: boolean
+ exec:
+ type: string
+ recipient:
+ type: string
+ exec_code:
+ type: integer
+ format: int32
+ source:
+ type: string
+ units:
+ type: string
+ when:
+ type: integer
+ format: int32
+ duration:
+ type: integer
+ format: int32
+ non_clear_duration:
+ type: integer
+ format: int32
+ status:
+ type: string
+ old_status:
+ type: string
+ delay:
+ type: integer
+ format: int32
+ delay_up_to_timestamp:
+ type: integer
+ format: int32
+ updated_by_id:
+ type: integer
+ format: int32
+ updates_id:
+ type: integer
+ format: int32
+ value_string:
+ type: string
+ old_value_string:
+ type: string
+ silenced:
+ type: string
+ info:
+ type: string
+ value:
+ type: number
+ nullable: true
+ old_value:
+ type: number
+ nullable: true
+ alarms_values:
+ type: object
+ properties:
+ hostname:
+ type: string
+ alarms:
+ type: object
+ description: HashMap with keys being alarm names
+ additionalProperties:
+ type: object
+ properties:
+ id:
+ type: integer
+ value:
+ type: integer
+ status:
+ type: string
+ enum:
+ - REMOVED
+ - UNDEFINED
+ - UNINITIALIZED
+ - CLEAR
+ - RAISED
+ - WARNING
+ - CRITICAL
+ - UNKNOWN
diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am
new file mode 100644
index 0000000..34bfdb8
--- /dev/null
+++ b/web/api/queries/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ average \
+ des \
+ incremental_sum \
+ max \
+ min \
+ sum \
+ median \
+ ses \
+ stddev \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/README.md b/web/api/queries/README.md
new file mode 100644
index 0000000..31ec496
--- /dev/null
+++ b/web/api/queries/README.md
@@ -0,0 +1,176 @@
+<!--
+title: "Database Queries"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/README.md
+-->
+
+# Database Queries
+
+Netdata database can be queried with `/api/v1/data` and `/api/v1/badge.svg` REST API methods.
+
+Every data query accepts the following parameters:
+
+|name|required|description|
+|:--:|:------:|:----------|
+|`chart`|yes|The chart to be queried.|
+|`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`).|
+|`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database.|
+|`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database.|
+|`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`.|
+|`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database.|
+|`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data.|
+|`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart.|
+
+## Operation
+
+The query engine works as follows (in this order):
+
+#### Time-frame
+
+`after` and `before` define a time-frame, accepting:
+
+- **absolute timestamps** (unix timestamps, i.e. seconds since epoch).
+
+- **relative timestamps**:
+
+ `before` is relative to now and `after` is relative to `before`.
+
+ Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in
+ the past, relative to the latest entry of the database of the chart.
+
+The engine verifies that the time-frame requested is available at the database:
+
+- If the requested time-frame overlaps with the database, the excess requested
+ will be truncated.
+
+- If the requested time-frame does not overlap with the database, the engine will
+ return an empty data set.
+
+At the end of this operation, `after` and `before` are absolute timestamps.
+
+#### Data grouping
+
+Database points grouping is applied when the caller requests a time-frame to be
+expressed with fewer points, compared to what is available at the database.
+
+There are 2 uses that enable this feature:
+
+- The caller requests a specific number of `points` to be returned.
+
+ For example, for a time-frame of 10 minutes, the database has 600 points (1/sec),
+ while the caller requested these 10 minutes to be expressed in 200 points.
+
+ This feature is used by Netdata dashboards when you zoom-out the charts.
+ The dashboard is requesting the number of points the user's screen has.
+ This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts).
+- The caller requests a **re-sampling** of the database, by setting `gtime` to any value
+ above the granularity of the chart.
+
+ For example, the chart's units is `requests/sec` and caller wants `requests/min`.
+
+Using `points` and `gtime` the query engine tries to find a best fit for **database-points**
+vs **result-points** (we call this ratio `group points`). It always tries to keep `group points`
+an integer. Keep in mind the query engine may shift `after` if required. See also the [example](#example).
+
+#### Time-frame Alignment
+
+Alignment is a very important aspect of Netdata queries. Without it, the animated
+charts on the dashboards would constantly [change shape](#example) during incremental updates.
+
+To provide consistent grouping through time, the query engine (by default) aligns
+`after` and `before` to be a multiple of `group points`.
+
+For example, if `group points` is 60 and alignment is enabled, the engine will return
+each point with durations XX:XX:00 - XX:XX:59, matching whole minutes.
+
+To disable alignment, pass `&options=unaligned` to the query.
+
+#### Query Execution
+
+To execute the query, the engine evaluates all dimensions of the chart, one after another.
+
+The engine does not evaluate dimensions that do not match the [simple pattern](/libnetdata/simple_pattern/README.md)
+given at the `dimensions` parameter, except when `options=percentage` is given (this option
+requires all the dimensions to be evaluated to find the percentage of each dimension vs to chart
+total).
+
+For each dimension, it starts evaluating values starting at `after` (not inclusive) towards
+`before` (inclusive).
+
+For each value it calls the **grouping method** given with the `&group=` query parameter
+(the default is `average`).
+
+## Grouping methods
+
+The following grouping methods are supported. These are given all the values in the time-frame
+and they group the values every `group points`.
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
+
+The examples shown above, are live information from the `successful` web requests of the global Netdata registry.
+
+## Further processing
+
+The result of the query engine is always a structure that has dimensions and values
+for each dimension.
+
+Formatting modules are then used to convert this result in many different formats and return it
+to the caller.
+
+## Performance
+
+The query engine is highly optimized for speed. Most of its modules implement "online"
+versions of the algorithms, requiring just one pass on the database values to produce
+the result.
+
+## Example
+
+When Netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`.
+
+Let's see why this is needed, by looking at the error case.
+
+Assume we have 5 points:
+
+|time|value|
+|:--:|:---:|
+|00:01|1|
+|00:02|2|
+|00:03|3|
+|00:04|4|
+|00:05|5|
+
+At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. Netdata would return:
+
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:01 - 00:02|1.5|
+|2|00:03 - 00:04|3.5|
+
+A second later the chart is to be refreshed, and makes again the same request at 00:05. These are the points that would have been returned:
+
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:02 - 00:03|2.5|
+|2|00:04 - 00:05|4.5|
+
+**Wait a moment!** The chart was shifted just one point and it changed value! Point 2 was 3.5 and when shifted to point 1 is 2.5! If you see this in a chart, it's a mess. The charts change shape constantly.
+
+For this reason, Netdata always aligns the data it returns to the `group`.
+
+When you request `points=1`, Netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something).
+
+So, the proper way to query the database is to also set at least `after`. The following call will returns 1 point for the last complete 10-second duration (it starts at `timestamp % 10 = 0`):
+
+<http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds>
+
+When you keep calling this URL, you will see that it returns one new value every 10 seconds, and the timestamp always ends with zero. Similarly, if you say `points=1&after=-5` it will always return timestamps ending with 0 or 5.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/average/Makefile.am b/web/api/queries/average/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/average/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md
new file mode 100644
index 0000000..f32a675
--- /dev/null
+++ b/web/api/queries/average/README.md
@@ -0,0 +1,46 @@
+<!--
+title: "Average or Mean"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/average/README.md
+-->
+
+# Average or Mean
+
+> This query is available as `average` and `mean`.
+
+An average is a single number taken as representative of a list of numbers.
+
+It is calculated as:
+
+```
+average = sum(numbers) / count(numbers)
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: average -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`average` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=average` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Average>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Faverage%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
new file mode 100644
index 0000000..2c64358
--- /dev/null
+++ b/web/api/queries/average/average.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "average.h"
+
+// ----------------------------------------------------------------------------
+// average
+
+struct grouping_average {
+ calculated_number sum;
+ size_t count;
+};
+
+void *grouping_create_average(RRDR *r) {
+ (void)r;
+ return callocz(1, sizeof(struct grouping_average));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_average(RRDR *r) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+void grouping_free_average(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_average(RRDR *r, calculated_number value) {
+ if(!isnan(value)) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+ g->sum += value;
+ g->count++;
+ }
+}
+
+calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ if(unlikely(r->internal.resampling_group != 1)) {
+ if (unlikely(r->result_options & RRDR_RESULT_OPTION_VARIABLE_STEP))
+ value = g->sum / g->count / r->internal.resampling_divisor;
+ else
+ value = g->sum / r->internal.resampling_divisor;
+ } else
+ value = g->sum / g->count;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
new file mode 100644
index 0000000..9fb7de2
--- /dev/null
+++ b/web/api/queries/average/average.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_AVERAGE_H
+#define NETDATA_API_QUERY_AVERAGE_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_average(RRDR *r);
+extern void grouping_reset_average(RRDR *r);
+extern void grouping_free_average(RRDR *r);
+extern void grouping_add_average(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/des/Makefile.am b/web/api/queries/des/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/des/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md
new file mode 100644
index 0000000..5505de5
--- /dev/null
+++ b/web/api/queries/des/README.md
@@ -0,0 +1,73 @@
+<!--
+title: "double exponential smoothing"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/des/README.md
+-->
+
+# double exponential smoothing
+
+Exponential smoothing is one of many window functions commonly applied to smooth data in signal
+processing, acting as low-pass filters to remove high frequency noise.
+
+Simple exponential smoothing does not do well when there is a trend in the data.
+In such situations, several methods were devised under the name "double exponential smoothing"
+or "second-order exponential smoothing.", which is the recursive application of an exponential
+filter twice, thus being termed "double exponential smoothing".
+
+In simple terms, this is like an average value, but more recent values are given more weight
+and the trend of the values influences significantly the result.
+
+> **IMPORTANT**
+>
+> It is common for `des` to provide "average" values that far beyond the minimum or the maximum
+> values found in the time-series.
+> `des` estimates these values because of it takes into account the trend.
+
+This module implements the "Holt-Winters double exponential smoothing".
+
+Netdata automatically adjusts the weight (`alpha`) and the trend (`beta`) based on the number
+of values processed, using the formula:
+
+```
+window = max(number of values, 15)
+alpha = 2 / (window + 1)
+beta = 2 / (window + 1)
+```
+
+You can change the fixed value `15` by setting in `netdata.conf`:
+
+```
+[web]
+ des max window = 15
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: des -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`des` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=des` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fdes%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
new file mode 100644
index 0000000..c6236f3
--- /dev/null
+++ b/web/api/queries/des/des.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <web/api/queries/rrdr.h>
+#include "des.h"
+
+
+// ----------------------------------------------------------------------------
+// single exponential smoothing
+
+struct grouping_des {
+ calculated_number alpha;
+ calculated_number alpha_other;
+ calculated_number beta;
+ calculated_number beta_other;
+
+ calculated_number level;
+ calculated_number trend;
+
+ size_t count;
+};
+
+static size_t max_window_size = 15;
+
+void grouping_init_des(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
+ }
+ else {
+ max_window_size = (size_t) ret;
+ }
+}
+
+static inline calculated_number window(RRDR *r, struct grouping_des *g) {
+ (void)g;
+
+ calculated_number points;
+ if(r->group == 1) {
+ // provide a running DES
+ points = r->internal.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = r->group;
+ }
+
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ return (points > max_window_size) ? max_window_size : points;
+}
+
+static inline void set_alpha(RRDR *r, struct grouping_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->alpha = 2.0 / (window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+
+ //info("alpha for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->alpha);
+}
+
+static inline void set_beta(RRDR *r, struct grouping_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->beta = 2.0 / (window(r, g) + 1.0);
+ g->beta_other = 1.0 - g->beta;
+
+ //info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
+}
+
+void *grouping_create_des(RRDR *r) {
+ struct grouping_des *g = (struct grouping_des *)malloc(sizeof(struct grouping_des));
+ set_alpha(r, g);
+ set_beta(r, g);
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+ return g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_des(RRDR *r) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+
+ // fprintf(stderr, "\nDES: ");
+
+}
+
+void grouping_free_des(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_des(RRDR *r, calculated_number value) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+
+ if(calculated_number_isnumber(value)) {
+ if(likely(g->count > 0)) {
+ // we have at least a number so far
+
+ if(unlikely(g->count == 1)) {
+ // the second value we got
+ g->trend = value - g->trend;
+ g->level = value;
+ }
+
+ // for the values, except the first
+ calculated_number last_level = g->level;
+ g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
+ g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
+ }
+ else {
+ // the first value we got
+ g->level = g->trend = value;
+ }
+
+ g->count++;
+ }
+
+ //fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
+}
+
+calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+
+ if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ //fprintf(stderr, " RESULT for %zu values = " CALCULATED_NUMBER_FORMAT " \n", g->count, g->level);
+
+ return g->level;
+}
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
new file mode 100644
index 0000000..360513e
--- /dev/null
+++ b/web/api/queries/des/des.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_DES_H
+#define NETDATA_API_QUERIES_DES_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_init_des(void);
+
+extern void *grouping_create_des(RRDR *r);
+extern void grouping_reset_des(RRDR *r);
+extern void grouping_free_des(RRDR *r);
+extern void grouping_add_des(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/Makefile.am b/web/api/queries/incremental_sum/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/incremental_sum/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md
new file mode 100644
index 0000000..e5f3dfc
--- /dev/null
+++ b/web/api/queries/incremental_sum/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Incremental Sum (`incremental_sum`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/incremental_sum/README.md
+-->
+
+# Incremental Sum (`incremental_sum`)
+
+This modules finds the incremental sum of a period, which `last value - first value`.
+
+The result may be positive (rising) or negative (falling) depending on the first and last values.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: incremental_sum -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`incremental_sum` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=incremental_sum` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange)
+
+## References
+
+- none
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fincremental_sum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
new file mode 100644
index 0000000..131d85d
--- /dev/null
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "incremental_sum.h"
+
+// ----------------------------------------------------------------------------
+// incremental sum
+
+struct grouping_incremental_sum {
+ calculated_number first;
+ calculated_number last;
+ size_t count;
+};
+
+void *grouping_create_incremental_sum(RRDR *r) {
+ (void)r;
+ return callocz(1, sizeof(struct grouping_incremental_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_incremental_sum(RRDR *r) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+ g->first = 0;
+ g->last = 0;
+ g->count = 0;
+}
+
+void grouping_free_incremental_sum(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
+ if(!isnan(value)) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+
+ if(unlikely(!g->count)) {
+ g->first = value;
+ g->count++;
+ }
+ else {
+ g->last = value;
+ g->count++;
+ }
+ }
+}
+
+calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(unlikely(g->count == 1)) {
+ value = 0.0;
+ }
+ else {
+ value = g->last - g->first;
+ }
+
+ g->first = 0.0;
+ g->last = 0.0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
new file mode 100644
index 0000000..990a2ac
--- /dev/null
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_INCREMENTAL_SUM_H
+#define NETDATA_API_QUERY_INCREMENTAL_SUM_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_incremental_sum(RRDR *r);
+extern void grouping_reset_incremental_sum(RRDR *r);
+extern void grouping_free_incremental_sum(RRDR *r);
+extern void grouping_add_incremental_sum(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/Makefile.am b/web/api/queries/max/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/max/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md
new file mode 100644
index 0000000..32b1d43
--- /dev/null
+++ b/web/api/queries/max/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Max"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/max/README.md
+-->
+
+# Max
+
+This module finds the max value in the time-frame given.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: max -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`max` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=max` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmax%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
new file mode 100644
index 0000000..a4be36a
--- /dev/null
+++ b/web/api/queries/max/max.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "max.h"
+
+// ----------------------------------------------------------------------------
+// max
+
+struct grouping_max {
+ calculated_number max;
+ size_t count;
+};
+
+void *grouping_create_max(RRDR *r) {
+ (void)r;
+ return callocz(1, sizeof(struct grouping_max));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_max(RRDR *r) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+ g->max = 0;
+ g->count = 0;
+}
+
+void grouping_free_max(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_max(RRDR *r, calculated_number value) {
+ if(!isnan(value)) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+
+ if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->max)) {
+ g->max = value;
+ g->count++;
+ }
+ }
+}
+
+calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->max;
+ }
+
+ g->max = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
new file mode 100644
index 0000000..d839fe3
--- /dev/null
+++ b/web/api/queries/max/max.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_MAX_H
+#define NETDATA_API_QUERY_MAX_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_max(RRDR *r);
+extern void grouping_reset_max(RRDR *r);
+extern void grouping_free_max(RRDR *r);
+extern void grouping_add_max(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/Makefile.am b/web/api/queries/median/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/median/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
new file mode 100644
index 0000000..25ce8b8
--- /dev/null
+++ b/web/api/queries/median/README.md
@@ -0,0 +1,45 @@
+<!--
+title: "Median"
+description: "Use median in API queries and health entities to find the 'middle' value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/median/README.md
+-->
+
+# Median
+
+The median is the value separating the higher half from the lower half of a data sample
+(a population or a probability distribution). For a data set, it may be thought of as the
+"middle" value.
+
+`median` is not an accurate average. However, it eliminates all spikes, by sorting
+all the values in a period, and selecting the value in the middle of the sorted array.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: median -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`median` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=median` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Median>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmedian%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
new file mode 100644
index 0000000..31916c5
--- /dev/null
+++ b/web/api/queries/median/median.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "median.h"
+
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_median {
+ size_t series_size;
+ size_t next_pos;
+
+ LONG_DOUBLE series[];
+};
+
+void *grouping_create_median(RRDR *r) {
+ long entries = r->group;
+ if(entries < 0) entries = 0;
+
+ struct grouping_median *g = (struct grouping_median *)callocz(1, sizeof(struct grouping_median) + entries * sizeof(LONG_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ return g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_median(RRDR *r) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_median(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_median(RRDR *r, calculated_number value) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ error("INTERNAL ERROR: median buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld.", r->st->name, g->next_pos, g->series_size, r->group);
+ }
+ else {
+ if(calculated_number_isnumber(value))
+ g->series[g->next_pos++] = (LONG_DOUBLE)value;
+ }
+}
+
+calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->next_pos)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ if(g->next_pos > 1) {
+ sort_series(g->series, g->next_pos);
+ value = (calculated_number)median_on_sorted_series(g->series, g->next_pos);
+ }
+ else
+ value = (calculated_number)g->series[0];
+
+ if(!calculated_number_isnumber(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "median");
+ }
+
+ g->next_pos = 0;
+
+ return value;
+}
+
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
new file mode 100644
index 0000000..dd2c1ff
--- /dev/null
+++ b/web/api/queries/median/median.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_MEDIAN_H
+#define NETDATA_API_QUERIES_MEDIAN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_median(RRDR *r);
+extern void grouping_reset_median(RRDR *r);
+extern void grouping_free_median(RRDR *r);
+extern void grouping_add_median(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/Makefile.am b/web/api/queries/min/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/min/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md
new file mode 100644
index 0000000..69ef4ea
--- /dev/null
+++ b/web/api/queries/min/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Min"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/min/README.md
+-->
+
+# Min
+
+This module finds the min value in the time-frame given.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: min -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`min` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=min` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
new file mode 100644
index 0000000..9bd7460
--- /dev/null
+++ b/web/api/queries/min/min.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "min.h"
+
+// ----------------------------------------------------------------------------
+// min
+
+struct grouping_min {
+ calculated_number min;
+ size_t count;
+};
+
+void *grouping_create_min(RRDR *r) {
+ (void)r;
+ return callocz(1, sizeof(struct grouping_min));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_min(RRDR *r) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+ g->min = 0;
+ g->count = 0;
+}
+
+void grouping_free_min(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_min(RRDR *r, calculated_number value) {
+ if(!isnan(value)) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+
+ if(!g->count || calculated_number_fabs(value) < calculated_number_fabs(g->min)) {
+ g->min = value;
+ g->count++;
+ }
+ }
+}
+
+calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->min;
+ }
+
+ g->min = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
new file mode 100644
index 0000000..7470360
--- /dev/null
+++ b/web/api/queries/min/min.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_MIN_H
+#define NETDATA_API_QUERY_MIN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_min(RRDR *r);
+extern void grouping_reset_min(RRDR *r);
+extern void grouping_free_min(RRDR *r);
+extern void grouping_add_min(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
new file mode 100644
index 0000000..663e4bd
--- /dev/null
+++ b/web/api/queries/query.c
@@ -0,0 +1,1636 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "query.h"
+#include "web/api/formatters/rrd2json.h"
+#include "rrdr.h"
+
+#include "average/average.h"
+#include "incremental_sum/incremental_sum.h"
+#include "max/max.h"
+#include "median/median.h"
+#include "min/min.h"
+#include "sum/sum.h"
+#include "stddev/stddev.h"
+#include "ses/ses.h"
+#include "des/des.h"
+
+// ----------------------------------------------------------------------------
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ RRDR_GROUPING value;
+
+ // One time initialization for the module.
+ // This is called once, when netdata starts.
+ void (*init)(void);
+
+ // Allocate all required structures for a query.
+ // This is called once for each netdata query.
+ void *(*create)(struct rrdresult *r);
+
+ // Cleanup collected values, but don't destroy the structures.
+ // This is called when the query engine switches dimensions,
+ // as part of the same query (so same chart, switching metric).
+ void (*reset)(struct rrdresult *r);
+
+ // Free all resources allocated for the query.
+ void (*free)(struct rrdresult *r);
+
+ // Add a single value into the calculation.
+ // The module may decide to cache it, or use it in the fly.
+ void (*add)(struct rrdresult *r, calculated_number value);
+
+ // Generate a single result for the values added so far.
+ // More values and points may be requested later.
+ // It is up to the module to reset its internal structures
+ // when flushing it (so for a few modules it may be better to
+ // continue after a flush as if nothing changed, for others a
+ // cleanup of the internal structures may be required).
+ calculated_number (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+} api_v1_data_groups[] = {
+ {.name = "average",
+ .hash = 0,
+ .value = RRDR_GROUPING_AVERAGE,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average
+ },
+ {.name = "mean", // alias on 'average'
+ .hash = 0,
+ .value = RRDR_GROUPING_AVERAGE,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average
+ },
+ {.name = "incremental_sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .init = NULL,
+ .create= grouping_create_incremental_sum,
+ .reset = grouping_reset_incremental_sum,
+ .free = grouping_free_incremental_sum,
+ .add = grouping_add_incremental_sum,
+ .flush = grouping_flush_incremental_sum
+ },
+ {.name = "incremental-sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .init = NULL,
+ .create= grouping_create_incremental_sum,
+ .reset = grouping_reset_incremental_sum,
+ .free = grouping_free_incremental_sum,
+ .add = grouping_add_incremental_sum,
+ .flush = grouping_flush_incremental_sum
+ },
+ {.name = "median",
+ .hash = 0,
+ .value = RRDR_GROUPING_MEDIAN,
+ .init = NULL,
+ .create= grouping_create_median,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median
+ },
+ {.name = "min",
+ .hash = 0,
+ .value = RRDR_GROUPING_MIN,
+ .init = NULL,
+ .create= grouping_create_min,
+ .reset = grouping_reset_min,
+ .free = grouping_free_min,
+ .add = grouping_add_min,
+ .flush = grouping_flush_min
+ },
+ {.name = "max",
+ .hash = 0,
+ .value = RRDR_GROUPING_MAX,
+ .init = NULL,
+ .create= grouping_create_max,
+ .reset = grouping_reset_max,
+ .free = grouping_free_max,
+ .add = grouping_add_max,
+ .flush = grouping_flush_max
+ },
+ {.name = "sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_SUM,
+ .init = NULL,
+ .create= grouping_create_sum,
+ .reset = grouping_reset_sum,
+ .free = grouping_free_sum,
+ .add = grouping_add_sum,
+ .flush = grouping_flush_sum
+ },
+
+ // standard deviation
+ {.name = "stddev",
+ .hash = 0,
+ .value = RRDR_GROUPING_STDDEV,
+ .init = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_stddev
+ },
+ {.name = "cv", // coefficient variation is calculated by stddev
+ .hash = 0,
+ .value = RRDR_GROUPING_CV,
+ .init = NULL,
+ .create= grouping_create_stddev, // not an error, stddev calculates this too
+ .reset = grouping_reset_stddev, // not an error, stddev calculates this too
+ .free = grouping_free_stddev, // not an error, stddev calculates this too
+ .add = grouping_add_stddev, // not an error, stddev calculates this too
+ .flush = grouping_flush_coefficient_of_variation
+ },
+ {.name = "rsd", // alias of 'cv'
+ .hash = 0,
+ .value = RRDR_GROUPING_CV,
+ .init = NULL,
+ .create= grouping_create_stddev, // not an error, stddev calculates this too
+ .reset = grouping_reset_stddev, // not an error, stddev calculates this too
+ .free = grouping_free_stddev, // not an error, stddev calculates this too
+ .add = grouping_add_stddev, // not an error, stddev calculates this too
+ .flush = grouping_flush_coefficient_of_variation
+ },
+
+ /*
+ {.name = "mean", // same as average, no need to define it again
+ .hash = 0,
+ .value = RRDR_GROUPING_MEAN,
+ .setup = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_mean
+ },
+ */
+
+ /*
+ {.name = "variance", // meaningless to offer
+ .hash = 0,
+ .value = RRDR_GROUPING_VARIANCE,
+ .setup = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_variance
+ },
+ */
+
+ // single exponential smoothing
+ {.name = "ses",
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = grouping_init_ses,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses
+ },
+ {.name = "ema", // alias for 'ses'
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = NULL,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses
+ },
+ {.name = "ewma", // alias for ses
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = NULL,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses
+ },
+
+ // double exponential smoothing
+ {.name = "des",
+ .hash = 0,
+ .value = RRDR_GROUPING_DES,
+ .init = grouping_init_des,
+ .create= grouping_create_des,
+ .reset = grouping_reset_des,
+ .free = grouping_free_des,
+ .add = grouping_add_des,
+ .flush = grouping_flush_des
+ },
+
+ // terminator
+ {.name = NULL,
+ .hash = 0,
+ .value = RRDR_GROUPING_UNDEFINED,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average
+ }
+};
+
+void web_client_api_v1_init_grouping(void) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++) {
+ api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
+
+ if(api_v1_data_groups[i].init)
+ api_v1_data_groups[i].init();
+ }
+}
+
+const char *group_method2string(RRDR_GROUPING group) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++) {
+ if(api_v1_data_groups[i].value == group) {
+ return api_v1_data_groups[i].name;
+ }
+ }
+
+ return "unknown-group-method";
+}
+
+RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
+ int i;
+
+ uint32_t hash = simple_hash(name);
+ for(i = 0; api_v1_data_groups[i].name ; i++)
+ if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
+ return api_v1_data_groups[i].value;
+
+ return def;
+}
+
+// ----------------------------------------------------------------------------
+
+static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims, RRDDIM *temp_rd) {
+ rrdset_check_rdlock(r->st);
+
+ if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return;
+
+ int match_ids = 0, match_names = 0;
+
+ if(unlikely(options & RRDR_OPTION_MATCH_IDS))
+ match_ids = 1;
+ if(unlikely(options & RRDR_OPTION_MATCH_NAMES))
+ match_names = 1;
+
+ if(likely(!match_ids && !match_names))
+ match_ids = match_names = 1;
+
+ SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ RRDDIM *d;
+ long c, dims_selected = 0, dims_not_hidden_not_zero = 0;
+ for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next) {
+ if( (match_ids && simple_pattern_matches(pattern, d->id))
+ || (match_names && simple_pattern_matches(pattern, d->name))
+ ) {
+ r->od[c] |= RRDR_DIMENSION_SELECTED;
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN;
+ dims_selected++;
+
+ // since the user needs this dimension
+ // make it appear as NONZERO, to return it
+ // even if the dimension has only zeros
+ // unless option non_zero is set
+ if(unlikely(!(options & RRDR_OPTION_NONZERO)))
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+
+ // count the visible dimensions
+ if(likely(r->od[c] & RRDR_DIMENSION_NONZERO))
+ dims_not_hidden_not_zero++;
+ }
+ else {
+ r->od[c] |= RRDR_DIMENSION_HIDDEN;
+ if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
+ }
+ }
+ simple_pattern_free(pattern);
+
+ // check if all dimensions are hidden
+ if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
+ // there are a few selected dimensions
+ // but they are all zero
+ // enable the selected ones
+ // to avoid returning an empty chart
+ for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next)
+ if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED))
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// helpers to find our way in RRDR
+
+static inline RRDR_VALUE_FLAGS *rrdr_line_options(RRDR *r, long rrdr_line) {
+ return &r->o[ rrdr_line * r->d ];
+}
+
+static inline calculated_number *rrdr_line_values(RRDR *r, long rrdr_line) {
+ return &r->v[ rrdr_line * r->d ];
+}
+
+static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
+ rrdr_line++;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+
+ if(unlikely(rrdr_line >= r->n))
+ error("INTERNAL ERROR: requested to step above RRDR size for chart '%s'", r->st->name);
+
+ if(unlikely(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t))
+ error("INTERNAL ERROR: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'", (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
+
+ #endif
+
+ // save the time
+ r->t[rrdr_line] = t;
+
+ return rrdr_line;
+}
+
+static inline void rrdr_done(RRDR *r, long rrdr_line) {
+ r->rows = rrdr_line + 1;
+}
+
+
+// ----------------------------------------------------------------------------
+// fill RRDR for a single dimension
+
+static inline void do_dimension_variablestep(
+ RRDR *r
+ , long points_wanted
+ , RRDDIM *rd
+ , long dim_id_in_rrdr
+ , time_t after_wanted
+ , time_t before_wanted
+){
+// RRDSET *st = r->st;
+
+ time_t
+ now = after_wanted,
+ dt = r->update_every,
+ max_date = 0,
+ min_date = 0;
+
+ long
+// group_size = r->group,
+ points_added = 0,
+ values_in_group = 0,
+ values_in_group_non_zero = 0,
+ rrdr_line = -1;
+
+ RRDR_VALUE_FLAGS
+ group_value_flags = RRDR_VALUE_NOTHING;
+
+ struct rrddim_query_handle handle;
+
+ calculated_number min = r->min, max = r->max;
+ size_t db_points_read = 0;
+ time_t db_now = now;
+ storage_number n_curr, n_prev = SN_EMPTY_SLOT;
+ calculated_number value;
+
+ for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
+ // make sure we return data in the proper time range
+ if (unlikely(now > before_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+#endif
+ break;
+ }
+ if (unlikely(now < after_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
+#endif
+ continue;
+ }
+
+ while (now >= db_now && (!rd->state->query_ops.is_finished(&handle) ||
+ does_storage_number_exist(n_prev))) {
+ value = NAN;
+ if (does_storage_number_exist(n_prev)) {
+ // use the previously read database value
+ n_curr = n_prev;
+ } else {
+ // read the value from the database
+ n_curr = rd->state->query_ops.next_metric(&handle, &db_now);
+ }
+ n_prev = SN_EMPTY_SLOT;
+ // db_now has a different value than above
+ if (likely(now >= db_now)) {
+ if (likely(does_storage_number_exist(n_curr))) {
+ value = unpack_storage_number(n_curr);
+ if (likely(value != 0.0))
+ values_in_group_non_zero++;
+
+ if (unlikely(did_storage_number_reset(n_curr)))
+ group_value_flags |= RRDR_VALUE_RESET;
+ }
+ } else {
+ // We must postpone processing the value and fill the result with gaps instead
+ if (likely(does_storage_number_exist(n_curr))) {
+ n_prev = n_curr;
+ }
+ }
+ // add this value to grouping
+ r->internal.grouping_add(r, value);
+ values_in_group++;
+ db_points_read++;
+ }
+
+ if (0 == values_in_group) {
+ // add NAN to grouping
+ r->internal.grouping_add(r, NAN);
+ }
+
+ rrdr_line = rrdr_line_init(r, now, rrdr_line);
+
+ if(unlikely(!min_date)) min_date = now;
+ max_date = now;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
+
+ // update the dimension options
+ if(likely(values_in_group_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = group_value_flags;
+
+ // store the value
+ value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(value < min)) min = value;
+ if(unlikely(value > max)) max = value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = value;
+ }
+
+ points_added++;
+ values_in_group = 0;
+ group_value_flags = RRDR_VALUE_NOTHING;
+ values_in_group_non_zero = 0;
+ }
+ rd->state->query_ops.finalize(&handle);
+
+ r->internal.db_points_read += db_points_read;
+ r->internal.result_points_generated += points_added;
+
+ r->min = min;
+ r->max = max;
+ r->before = max_date;
+ r->after = min_date - (r->group - 1) * dt;
+ rrdr_done(r, rrdr_line);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(r->rows != points_added))
+ error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
+ #endif
+}
+
+static inline void do_dimension_fixedstep(
+ RRDR *r
+ , long points_wanted
+ , RRDDIM *rd
+ , long dim_id_in_rrdr
+ , time_t after_wanted
+ , time_t before_wanted
+){
+ RRDSET *st = r->st;
+
+ time_t
+ now = after_wanted,
+ dt = r->update_every / r->group, /* usually is st->update_every */
+ max_date = 0,
+ min_date = 0;
+
+ long
+ group_size = r->group,
+ points_added = 0,
+ values_in_group = 0,
+ values_in_group_non_zero = 0,
+ rrdr_line = -1;
+
+ RRDR_VALUE_FLAGS
+ group_value_flags = RRDR_VALUE_NOTHING;
+
+ struct rrddim_query_handle handle;
+
+ calculated_number min = r->min, max = r->max;
+ size_t db_points_read = 0;
+ time_t db_now = now;
+
+ for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
+ // make sure we return data in the proper time range
+ if(unlikely(now > before_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+#endif
+ break;
+ }
+ if(unlikely(now < after_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
+#endif
+ continue;
+ }
+ // read the value from the database
+ //storage_number n = rd->values[slot];
+#ifdef NETDATA_INTERNAL_CHECKS
+ if ((rd->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) &&
+ (rrdset_time2slot(st, now) != (long unsigned)handle.slotted.slot)) {
+ error("INTERNAL CHECK: Unaligned query for %s, database slot: %lu, expected slot: %lu", rd->id, (long unsigned)handle.slotted.slot, rrdset_time2slot(st, now));
+ }
+#endif
+ db_now = now; // this is needed to set db_now in case the next_metric implementation does not set it
+ storage_number n = rd->state->query_ops.next_metric(&handle, &db_now);
+ if(unlikely(db_now > before_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+#endif
+ break;
+ }
+ for ( ; now <= db_now ; now += dt) {
+ calculated_number value = NAN;
+ if(likely(now >= db_now && does_storage_number_exist(n))) {
+#if defined(NETDATA_INTERNAL_CHECKS) && defined(ENABLE_DBENGINE)
+ if ((rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) && (now != handle.rrdeng.now)) {
+ error("INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld", rd->id, (long)handle.rrdeng.now, (long)now);
+ }
+#endif
+ value = unpack_storage_number(n);
+ if(likely(value != 0.0))
+ values_in_group_non_zero++;
+
+ if(unlikely(did_storage_number_reset(n)))
+ group_value_flags |= RRDR_VALUE_RESET;
+
+ }
+
+ // add this value for grouping
+ r->internal.grouping_add(r, value);
+ values_in_group++;
+ db_points_read++;
+
+ if(unlikely(values_in_group == group_size)) {
+ rrdr_line = rrdr_line_init(r, now, rrdr_line);
+
+ if(unlikely(!min_date)) min_date = now;
+ max_date = now;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
+
+ // update the dimension options
+ if(likely(values_in_group_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = group_value_flags;
+
+ // store the value
+ calculated_number value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(value < min)) min = value;
+ if(unlikely(value > max)) max = value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = value;
+ }
+
+ points_added++;
+ values_in_group = 0;
+ group_value_flags = RRDR_VALUE_NOTHING;
+ values_in_group_non_zero = 0;
+ }
+ }
+ now = db_now;
+ }
+ rd->state->query_ops.finalize(&handle);
+
+ r->internal.db_points_read += db_points_read;
+ r->internal.result_points_generated += points_added;
+
+ r->min = min;
+ r->max = max;
+ r->before = max_date;
+ r->after = min_date - (r->group - 1) * dt;
+ rrdr_done(r, rrdr_line);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(r->rows != points_added))
+ error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
+#endif
+}
+
+// ----------------------------------------------------------------------------
+// fill RRDR for the whole chart
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static void rrd2rrdr_log_request_response_metdata(RRDR *r
+ , RRDR_GROUPING group_method
+ , int aligned
+ , long group
+ , long resampling_time
+ , long resampling_group
+ , time_t after_wanted
+ , time_t after_requested
+ , time_t before_wanted
+ , time_t before_requested
+ , long points_requested
+ , long points_wanted
+ //, size_t after_slot
+ //, size_t before_slot
+ , const char *msg
+ ) {
+ netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
+ info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
+ "after (got: %zu, want: %zu, req: %zu, db: %zu), "
+ "before (got: %zu, want: %zu, req: %zu, db: %zu), "
+ "duration (got: %zu, want: %zu, req: %zu, db: %zu), "
+ //"slot (after: %zu, before: %zu, delta: %zu), "
+ "points (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "%s"
+ , r->st->name
+ , r->st->update_every
+
+ // grouping
+ , (aligned) ? "aligned" : "unaligned"
+ , group_method2string(group_method)
+ , group
+ , resampling_time
+ , resampling_group
+
+ // after
+ , (size_t)r->after
+ , (size_t)after_wanted
+ , (size_t)after_requested
+ , (size_t)rrdset_first_entry_t_nolock(r->st)
+
+ // before
+ , (size_t)r->before
+ , (size_t)before_wanted
+ , (size_t)before_requested
+ , (size_t)rrdset_last_entry_t_nolock(r->st)
+
+ // duration
+ , (size_t)(r->before - r->after + r->st->update_every)
+ , (size_t)(before_wanted - after_wanted + r->st->update_every)
+ , (size_t)(before_requested - after_requested)
+ , (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
+
+ // slot
+ /*
+ , after_slot
+ , before_slot
+ , (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot)
+ */
+
+ // points
+ , r->rows
+ , points_wanted
+ , points_requested
+ , r->st->entries
+
+ // message
+ , msg
+ );
+ netdata_rwlock_unlock(&r->st->rrdset_rwlock);
+}
+#endif // NETDATA_INTERNAL_CHECKS
+
+// Returns 1 if an absolute period was requested or 0 if it was a relative period
+static int rrdr_convert_before_after_to_absolute(
+ long long *after_requestedp
+ , long long *before_requestedp
+ , int update_every
+ , time_t first_entry_t
+ , time_t last_entry_t
+ , RRDR_OPTIONS options
+) {
+ int absolute_period_requested = -1;
+ long long after_requested, before_requested;
+
+ before_requested = *before_requestedp;
+ after_requested = *after_requestedp;
+
+ if(before_requested == 0 && after_requested == 0) {
+ // dump the all the data
+ before_requested = last_entry_t;
+ after_requested = first_entry_t;
+ absolute_period_requested = 0;
+ }
+
+ // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
+ if(abs(before_requested) <= API_RELATIVE_TIME_MAX) {
+ if(abs(before_requested) % update_every) {
+ // make sure it is multiple of st->update_every
+ if(before_requested < 0) before_requested = before_requested - update_every -
+ before_requested % update_every;
+ else before_requested = before_requested + update_every - before_requested % update_every;
+ }
+ if(before_requested > 0) before_requested = first_entry_t + before_requested;
+ else before_requested = last_entry_t + before_requested; //last_entry_t is not really now_t
+ //TODO: fix before_requested to be relative to now_t
+ absolute_period_requested = 0;
+ }
+
+ // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
+ if(abs(after_requested) <= API_RELATIVE_TIME_MAX) {
+ if(after_requested == 0) after_requested = -update_every;
+ if(abs(after_requested) % update_every) {
+ // make sure it is multiple of st->update_every
+ if(after_requested < 0) after_requested = after_requested - update_every - after_requested % update_every;
+ else after_requested = after_requested + update_every - after_requested % update_every;
+ }
+ after_requested = before_requested + after_requested;
+ absolute_period_requested = 0;
+ }
+
+ if(absolute_period_requested == -1)
+ absolute_period_requested = 1;
+
+ // make sure they are within our timeframe
+ if(before_requested > last_entry_t) before_requested = last_entry_t;
+ if(before_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
+ before_requested = first_entry_t;
+
+ if(after_requested > last_entry_t) after_requested = last_entry_t;
+ if(after_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
+ after_requested = first_entry_t;
+
+ // check if they are reversed
+ if(after_requested > before_requested) {
+ time_t tmp = before_requested;
+ before_requested = after_requested;
+ after_requested = tmp;
+ }
+
+ *before_requestedp = before_requested;
+ *after_requestedp = after_requested;
+
+ return absolute_period_requested;
+}
+
+static RRDR *rrd2rrdr_fixedstep(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+ , int update_every
+ , time_t first_entry_t
+ , time_t last_entry_t
+ , int absolute_period_requested
+ , struct context_param *context_param_list
+) {
+ int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+
+ // the duration of the chart
+ time_t duration = before_requested - after_requested;
+ long available_points = duration / update_every;
+
+ RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+
+ if(duration <= 0 || available_points <= 0)
+ return rrdr_create(st, 1, context_param_list);
+
+ // check the number of wanted points in the result
+ if(unlikely(points_requested < 0)) points_requested = -points_requested;
+ if(unlikely(points_requested > available_points)) points_requested = available_points;
+ if(unlikely(points_requested == 0)) points_requested = available_points;
+
+ // calculate the desired grouping of source data points
+ long group = available_points / points_requested;
+ if(unlikely(group <= 0)) group = 1;
+ if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
+
+ // resampling_time_requested enforces a certain grouping multiple
+ calculated_number resampling_divisor = 1.0;
+ long resampling_group = 1;
+ if(unlikely(resampling_time_requested > update_every)) {
+ if (unlikely(resampling_time_requested > duration)) {
+ // group_time is above the available duration
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
+ #endif
+
+ after_requested = before_requested - resampling_time_requested;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if(duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if(delta > resampling_time_requested / 10) {
+ after_requested -= resampling_time_requested - delta;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+ }
+
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / update_every;
+ if(unlikely(resampling_time_requested % update_every)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
+ #endif
+
+ resampling_group++;
+ }
+
+ // adapt group according to resampling_group
+ if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
+ if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+
+ //resampling_divisor = group / resampling_group;
+ resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
+ }
+
+ // now that we have group,
+ // align the requested timeframe to fit it.
+
+ if(aligned) {
+ // alignement has been requested, so align the values
+ before_requested -= before_requested % (group * update_every);
+ after_requested -= after_requested % (group * update_every);
+ }
+
+ // we align the request on requested_before
+ time_t before_wanted = before_requested;
+ if(likely(before_wanted > last_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
+ #endif
+
+ before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
+ }
+ //size_t before_slot = rrdset_time2slot(st, before_wanted);
+
+ // we need to estimate the number of points, for having
+ // an integer number of values per point
+ long points_wanted = (before_wanted - after_requested) / (update_every * group);
+
+ time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+ if(unlikely(after_wanted < first_entry_t)) {
+ // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
+ points_wanted = (before_wanted - first_entry_t) / group;
+
+ // recalculate after wanted with the new number of points
+ after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+
+ if(unlikely(after_wanted < first_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
+ #endif
+
+ after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
+ }
+ }
+ //size_t after_slot = rrdset_time2slot(st, after_wanted);
+
+ // check if they are reversed
+ if(unlikely(after_wanted > before_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
+ #endif
+ time_t tmp = before_wanted;
+ before_wanted = after_wanted;
+ after_wanted = tmp;
+ }
+
+ // recalculate points_wanted using the final time-frame
+ points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
+ if(unlikely(points_wanted < 0)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
+ #endif
+ points_wanted = 0;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ duration = before_wanted - after_wanted;
+
+ if(after_wanted < first_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
+
+ if(after_wanted > last_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
+
+ if(before_wanted < first_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
+
+ if(before_wanted > last_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
+
+/*
+ if(before_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+
+ if(after_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
+*/
+
+ if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
+ error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
+
+ if(group < resampling_group)
+ error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
+
+ if(group > resampling_group && group % resampling_group)
+ error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
+#endif
+
+ // -------------------------------------------------------------------------
+ // initialize our result set
+ // this also locks the chart for us
+
+ RRDR *r = rrdr_create(st, points_wanted, context_param_list);
+ if(unlikely(!r)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
+ #endif
+ return NULL;
+ }
+
+ if(unlikely(!r->d || !points_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
+ #endif
+ return r;
+ }
+
+ if(unlikely(absolute_period_requested == 1))
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+
+ // find how many dimensions we have
+ long dimensions_count = r->d;
+
+ // -------------------------------------------------------------------------
+ // initialize RRDR
+
+ r->group = group;
+ r->update_every = (int)group * update_every;
+ r->before = before_wanted;
+ r->after = after_wanted;
+ r->internal.points_wanted = points_wanted;
+ r->internal.resampling_group = resampling_group;
+ r->internal.resampling_divisor = resampling_divisor;
+
+
+ // -------------------------------------------------------------------------
+ // assign the processor functions
+
+ {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create= api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
+ #endif
+ r->internal.grouping_create= grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ }
+ }
+
+ // allocate any memory required by the grouping method
+ r->internal.grouping_data = r->internal.grouping_create(r);
+
+
+ // -------------------------------------------------------------------------
+ // disable the not-wanted dimensions
+
+ rrdset_check_rdlock(st);
+
+ if(dimensions)
+ rrdr_disable_not_selected_dimensions(r, options, dimensions, temp_rd);
+
+
+ // -------------------------------------------------------------------------
+ // do the work for each dimension
+
+ time_t max_after = 0, min_before = 0;
+ long max_rows = 0;
+
+ RRDDIM *rd;
+ long c, dimensions_used = 0, dimensions_nonzero = 0;
+ for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+
+ // if we need a percentage, we need to calculate all dimensions
+ if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
+ continue;
+ }
+ r->od[c] |= RRDR_DIMENSION_SELECTED;
+
+ // reset the grouping for the new dimension
+ r->internal.grouping_reset(r);
+
+ do_dimension_fixedstep(
+ r
+ , points_wanted
+ , rd
+ , c
+ , after_wanted
+ , before_wanted
+ );
+
+ if(r->od[c] & RRDR_DIMENSION_NONZERO)
+ dimensions_nonzero++;
+
+ // verify all dimensions are aligned
+ if(unlikely(!dimensions_used)) {
+ min_before = r->before;
+ max_after = r->after;
+ max_rows = r->rows;
+ }
+ else {
+ if(r->after != max_after) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_after, rd->name, (size_t)r->after);
+ #endif
+ r->after = (r->after > max_after) ? r->after : max_after;
+ }
+
+ if(r->before != min_before) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)min_before, rd->name, (size_t)r->before);
+ #endif
+ r->before = (r->before < min_before) ? r->before : min_before;
+ }
+
+ if(r->rows != max_rows) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+ #endif
+ r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ }
+ }
+
+ dimensions_used++;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if (dimensions_used) {
+ if(r->internal.log)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+
+ if(r->rows != points_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+
+ if(aligned && (r->before % group) != 0)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+
+ // 'after' should not be aligned, since we start inside the first group
+ //if(aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+
+ if(r->before != before_requested)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+
+ // reported 'after' varies, depending on group
+ if(r->after != after_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ }
+ #endif
+
+ // free all resources used by the grouping method
+ r->internal.grouping_free(r);
+
+ // when all the dimensions are zero, we should return all of them
+ if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero)) {
+ // all the dimensions are zero
+ // mark them as NONZERO to send them all
+ for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+ }
+ }
+
+ rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
+ return r;
+}
+
+#ifdef ENABLE_DBENGINE
+static RRDR *rrd2rrdr_variablestep(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+ , int update_every
+ , time_t first_entry_t
+ , time_t last_entry_t
+ , int absolute_period_requested
+ , struct rrdeng_region_info *region_info_array
+ , struct context_param *context_param_list
+) {
+ int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+
+ // the duration of the chart
+ time_t duration = before_requested - after_requested;
+ long available_points = duration / update_every;
+
+ RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+
+ if(duration <= 0 || available_points <= 0) {
+ freez(region_info_array);
+ return rrdr_create(st, 1, context_param_list);
+ }
+
+ // check the number of wanted points in the result
+ if(unlikely(points_requested < 0)) points_requested = -points_requested;
+ if(unlikely(points_requested > available_points)) points_requested = available_points;
+ if(unlikely(points_requested == 0)) points_requested = available_points;
+
+ // calculate the desired grouping of source data points
+ long group = available_points / points_requested;
+ if(unlikely(group <= 0)) group = 1;
+ if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
+
+ // resampling_time_requested enforces a certain grouping multiple
+ calculated_number resampling_divisor = 1.0;
+ long resampling_group = 1;
+ if(unlikely(resampling_time_requested > update_every)) {
+ if (unlikely(resampling_time_requested > duration)) {
+ // group_time is above the available duration
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
+ #endif
+
+ after_requested = before_requested - resampling_time_requested;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if(duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if(delta > resampling_time_requested / 10) {
+ after_requested -= resampling_time_requested - delta;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+ }
+
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / update_every;
+ if(unlikely(resampling_time_requested % update_every)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
+ #endif
+
+ resampling_group++;
+ }
+
+ // adapt group according to resampling_group
+ if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
+ if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+
+ //resampling_divisor = group / resampling_group;
+ resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
+ }
+
+ // now that we have group,
+ // align the requested timeframe to fit it.
+
+ if(aligned) {
+ // alignement has been requested, so align the values
+ before_requested -= before_requested % (group * update_every);
+ after_requested -= after_requested % (group * update_every);
+ }
+
+ // we align the request on requested_before
+ time_t before_wanted = before_requested;
+ if(likely(before_wanted > last_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
+ #endif
+
+ before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
+ }
+ //size_t before_slot = rrdset_time2slot(st, before_wanted);
+
+ // we need to estimate the number of points, for having
+ // an integer number of values per point
+ long points_wanted = (before_wanted - after_requested) / (update_every * group);
+
+ time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+ if(unlikely(after_wanted < first_entry_t)) {
+ // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
+ points_wanted = (before_wanted - first_entry_t) / group;
+
+ // recalculate after wanted with the new number of points
+ after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+
+ if(unlikely(after_wanted < first_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
+ #endif
+
+ after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
+ }
+ }
+ //size_t after_slot = rrdset_time2slot(st, after_wanted);
+
+ // check if they are reversed
+ if(unlikely(after_wanted > before_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
+ #endif
+ time_t tmp = before_wanted;
+ before_wanted = after_wanted;
+ after_wanted = tmp;
+ }
+
+ // recalculate points_wanted using the final time-frame
+ points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
+ if(unlikely(points_wanted < 0)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
+ #endif
+ points_wanted = 0;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ duration = before_wanted - after_wanted;
+
+ if(after_wanted < first_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
+
+ if(after_wanted > last_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
+
+ if(before_wanted < first_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
+
+ if(before_wanted > last_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
+
+/*
+ if(before_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+
+ if(after_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
+*/
+
+ if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
+ error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
+
+ if(group < resampling_group)
+ error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
+
+ if(group > resampling_group && group % resampling_group)
+ error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
+#endif
+
+ // -------------------------------------------------------------------------
+ // initialize our result set
+ // this also locks the chart for us
+
+ RRDR *r = rrdr_create(st, points_wanted, context_param_list);
+ if(unlikely(!r)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
+ #endif
+ freez(region_info_array);
+ return NULL;
+ }
+
+ if(unlikely(!r->d || !points_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
+ #endif
+ freez(region_info_array);
+ return r;
+ }
+
+ r->result_options |= RRDR_RESULT_OPTION_VARIABLE_STEP;
+ if(unlikely(absolute_period_requested == 1))
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+
+ // find how many dimensions we have
+ long dimensions_count = r->d;
+
+ // -------------------------------------------------------------------------
+ // initialize RRDR
+
+ r->group = group;
+ r->update_every = (int)group * update_every;
+ r->before = before_wanted;
+ r->after = after_wanted;
+ r->internal.points_wanted = points_wanted;
+ r->internal.resampling_group = resampling_group;
+ r->internal.resampling_divisor = resampling_divisor;
+
+
+ // -------------------------------------------------------------------------
+ // assign the processor functions
+
+ {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create= api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
+ #endif
+ r->internal.grouping_create= grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ }
+ }
+
+ // allocate any memory required by the grouping method
+ r->internal.grouping_data = r->internal.grouping_create(r);
+
+
+ // -------------------------------------------------------------------------
+ // disable the not-wanted dimensions
+
+ rrdset_check_rdlock(st);
+
+ if(dimensions)
+ rrdr_disable_not_selected_dimensions(r, options, dimensions, temp_rd);
+
+
+ // -------------------------------------------------------------------------
+ // do the work for each dimension
+
+ time_t max_after = 0, min_before = 0;
+ long max_rows = 0;
+
+ RRDDIM *rd;
+ long c, dimensions_used = 0, dimensions_nonzero = 0;
+ for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+
+ // if we need a percentage, we need to calculate all dimensions
+ if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
+ continue;
+ }
+ r->od[c] |= RRDR_DIMENSION_SELECTED;
+
+ // reset the grouping for the new dimension
+ r->internal.grouping_reset(r);
+
+ do_dimension_variablestep(
+ r
+ , points_wanted
+ , rd
+ , c
+ , after_wanted
+ , before_wanted
+ );
+
+ if(r->od[c] & RRDR_DIMENSION_NONZERO)
+ dimensions_nonzero++;
+
+ // verify all dimensions are aligned
+ if(unlikely(!dimensions_used)) {
+ min_before = r->before;
+ max_after = r->after;
+ max_rows = r->rows;
+ }
+ else {
+ if(r->after != max_after) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_after, rd->name, (size_t)r->after);
+ #endif
+ r->after = (r->after > max_after) ? r->after : max_after;
+ }
+
+ if(r->before != min_before) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)min_before, rd->name, (size_t)r->before);
+ #endif
+ r->before = (r->before < min_before) ? r->before : min_before;
+ }
+
+ if(r->rows != max_rows) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+ #endif
+ r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ }
+ }
+
+ dimensions_used++;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+
+ if (dimensions_used) {
+ if(r->internal.log)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+
+ if(r->rows != points_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+
+ if(aligned && (r->before % group) != 0)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+
+ // 'after' should not be aligned, since we start inside the first group
+ //if(aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+
+ if(r->before != before_requested)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+
+ // reported 'after' varies, depending on group
+ if(r->after != after_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ }
+ #endif
+
+ // free all resources used by the grouping method
+ r->internal.grouping_free(r);
+
+ // when all the dimensions are zero, we should return all of them
+ if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero)) {
+ // all the dimensions are zero
+ // mark them as NONZERO to send them all
+ for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+ }
+ }
+
+ rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
+ freez(region_info_array);
+ return r;
+}
+#endif //#ifdef ENABLE_DBENGINE
+
+RRDR *rrd2rrdr(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+ , struct context_param *context_param_list
+)
+{
+ int rrd_update_every;
+ int absolute_period_requested;
+
+ time_t first_entry_t;
+ time_t last_entry_t;
+ if (context_param_list) {
+ first_entry_t = context_param_list->first_entry_t;
+ last_entry_t = context_param_list->last_entry_t;
+ } else {
+ rrdset_rdlock(st);
+ first_entry_t = rrdset_first_entry_t_nolock(st);
+ last_entry_t = rrdset_last_entry_t_nolock(st);
+ rrdset_unlock(st);
+ }
+
+ rrd_update_every = st->update_every;
+ absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
+ rrd_update_every, first_entry_t,
+ last_entry_t, options);
+ if (options & RRDR_OPTION_ALLOW_PAST)
+ if (first_entry_t > after_requested)
+ first_entry_t = after_requested;
+
+ if (context_param_list)
+ rebuild_context_param_list(context_param_list, after_requested);
+
+#ifdef ENABLE_DBENGINE
+ if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
+ struct rrdeng_region_info *region_info_array;
+ unsigned regions, max_interval;
+
+ /* This call takes the chart read-lock */
+ regions = rrdeng_variable_step_boundaries(st, after_requested, before_requested,
+ &region_info_array, &max_interval, context_param_list);
+ if (1 == regions) {
+ if (region_info_array) {
+ if (rrd_update_every != region_info_array[0].update_every) {
+ rrd_update_every = region_info_array[0].update_every;
+ /* recalculate query alignment */
+ absolute_period_requested =
+ rrdr_convert_before_after_to_absolute(&after_requested, &before_requested, rrd_update_every,
+ first_entry_t, last_entry_t, options);
+ }
+ freez(region_info_array);
+ }
+ return rrd2rrdr_fixedstep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions, rrd_update_every,
+ first_entry_t, last_entry_t, absolute_period_requested, context_param_list);
+ } else {
+ if (rrd_update_every != (uint16_t)max_interval) {
+ rrd_update_every = (uint16_t) max_interval;
+ /* recalculate query alignment */
+ absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
+ rrd_update_every, first_entry_t,
+ last_entry_t, options);
+ }
+ return rrd2rrdr_variablestep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions, rrd_update_every,
+ first_entry_t, last_entry_t, absolute_period_requested, region_info_array, context_param_list);
+ }
+ }
+#endif
+ return rrd2rrdr_fixedstep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions,
+ rrd_update_every, first_entry_t, last_entry_t, absolute_period_requested, context_param_list);
+} \ No newline at end of file
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
new file mode 100644
index 0000000..6b8a51c
--- /dev/null
+++ b/web/api/queries/query.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_DATA_QUERY_H
+#define NETDATA_API_DATA_QUERY_H
+
+typedef enum rrdr_grouping {
+ RRDR_GROUPING_UNDEFINED = 0,
+ RRDR_GROUPING_AVERAGE,
+ RRDR_GROUPING_MIN,
+ RRDR_GROUPING_MAX,
+ RRDR_GROUPING_SUM,
+ RRDR_GROUPING_INCREMENTAL_SUM,
+ RRDR_GROUPING_MEDIAN,
+ RRDR_GROUPING_STDDEV,
+ RRDR_GROUPING_CV,
+ RRDR_GROUPING_SES,
+ RRDR_GROUPING_DES,
+} RRDR_GROUPING;
+
+extern const char *group_method2string(RRDR_GROUPING group);
+extern void web_client_api_v1_init_grouping(void);
+extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
+
+#endif //NETDATA_API_DATA_QUERY_H
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
new file mode 100644
index 0000000..ef237fa
--- /dev/null
+++ b/web/api/queries/rrdr.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdr.h"
+
+/*
+static void rrdr_dump(RRDR *r)
+{
+ long c, i;
+ RRDDIM *d;
+
+ fprintf(stderr, "\nCHART %s (%s)\n", r->st->id, r->st->name);
+
+ for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
+ fprintf(stderr, "DIMENSION %s (%s), %s%s%s%s\n"
+ , d->id
+ , d->name
+ , (r->od[c] & RRDR_EMPTY)?"EMPTY ":""
+ , (r->od[c] & RRDR_RESET)?"RESET ":""
+ , (r->od[c] & RRDR_DIMENSION_HIDDEN)?"HIDDEN ":""
+ , (r->od[c] & RRDR_DIMENSION_NONZERO)?"NONZERO ":""
+ );
+ }
+
+ if(r->rows <= 0) {
+ fprintf(stderr, "RRDR does not have any values in it.\n");
+ return;
+ }
+
+ fprintf(stderr, "RRDR includes %d values in it:\n", r->rows);
+
+ // for each line in the array
+ for(i = 0; i < r->rows ;i++) {
+ calculated_number *cn = &r->v[ i * r->d ];
+ RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ];
+
+ // print the id and the timestamp of the line
+ fprintf(stderr, "%ld %ld ", i + 1, r->t[i]);
+
+ // for each dimension
+ for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely(!(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(co[c] & RRDR_EMPTY)
+ fprintf(stderr, "null ");
+ else
+ fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s "
+ , cn[c]
+ , (co[c] & RRDR_EMPTY)?"E":" "
+ , (co[c] & RRDR_RESET)?"R":" "
+ , (co[c] & RRDR_DIMENSION_HIDDEN)?"H":" "
+ , (co[c] & RRDR_DIMENSION_NONZERO)?"N":" "
+ );
+ }
+
+ fprintf(stderr, "\n");
+ }
+}
+*/
+
+
+
+
+inline static void rrdr_lock_rrdset(RRDR *r) {
+ if(unlikely(!r)) {
+ error("NULL value given!");
+ return;
+ }
+
+ rrdset_rdlock(r->st);
+ r->has_st_lock = 1;
+}
+
+inline static void rrdr_unlock_rrdset(RRDR *r) {
+ if(unlikely(!r)) {
+ error("NULL value given!");
+ return;
+ }
+
+ if(likely(r->has_st_lock)) {
+ rrdset_unlock(r->st);
+ r->has_st_lock = 0;
+ }
+}
+
+inline void rrdr_free(RRDR *r)
+{
+ if(unlikely(!r)) {
+ error("NULL value given!");
+ return;
+ }
+
+ rrdr_unlock_rrdset(r);
+ freez(r->t);
+ freez(r->v);
+ freez(r->o);
+ freez(r->od);
+ freez(r);
+}
+
+RRDR *rrdr_create(struct rrdset *st, long n, struct context_param *context_param_list)
+{
+ if(unlikely(!st)) {
+ error("NULL value given!");
+ return NULL;
+ }
+
+ RRDR *r = callocz(1, sizeof(RRDR));
+ r->st = st;
+
+ rrdr_lock_rrdset(r);
+
+ RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+ RRDDIM *rd;
+ if (temp_rd) {
+ RRDDIM *t = temp_rd;
+ while (t) {
+ r->d++;
+ t = t->next;
+ }
+ } else
+ rrddim_foreach_read(rd, st) r->d++;
+
+ r->n = n;
+
+ r->t = callocz((size_t)n, sizeof(time_t));
+ r->v = mallocz(n * r->d * sizeof(calculated_number));
+ r->o = mallocz(n * r->d * sizeof(RRDR_VALUE_FLAGS));
+ r->od = mallocz(r->d * sizeof(RRDR_DIMENSION_FLAGS));
+
+ // set the hidden flag on hidden dimensions
+ int c;
+ for (c = 0, rd = temp_rd ? temp_rd : st->dimensions; rd; c++, rd = rd->next) {
+ if (unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)))
+ r->od[c] = RRDR_DIMENSION_HIDDEN;
+ else
+ r->od[c] = RRDR_DIMENSION_DEFAULT;
+ }
+
+ r->group = 1;
+ r->update_every = 1;
+
+ return r;
+}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
new file mode 100644
index 0000000..4d349c3
--- /dev/null
+++ b/web/api/queries/rrdr.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_QUERIES_RRDR_H
+#define NETDATA_QUERIES_RRDR_H
+
+#include "libnetdata/libnetdata.h"
+
+typedef enum rrdr_options {
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wraping response in a JSON, return custom variables in response
+ RRDR_OPTION_ALLOW_PAST = 0x00020000, // The after parameter can extend in the past before the first entry
+} RRDR_OPTIONS;
+
+typedef enum rrdr_value_flag {
+ RRDR_VALUE_NOTHING = 0x00, // no flag set (a good default)
+ RRDR_VALUE_EMPTY = 0x01, // the database value is empty
+ RRDR_VALUE_RESET = 0x02, // the database value is marked as reset (overflown)
+} RRDR_VALUE_FLAGS;
+
+typedef enum rrdr_dimension_flag {
+ RRDR_DIMENSION_DEFAULT = 0x00,
+ RRDR_DIMENSION_HIDDEN = 0x04, // the dimension is hidden (not to be presented to callers)
+ RRDR_DIMENSION_NONZERO = 0x08, // the dimension is non zero (contains non-zero values)
+ RRDR_DIMENSION_SELECTED = 0x10, // the dimension is selected for evaluation in this RRDR
+} RRDR_DIMENSION_FLAGS;
+
+// RRDR result options
+typedef enum rrdr_result_flags {
+ RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames
+ // (can be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames
+ // (should not to be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_VARIABLE_STEP = 0x00000004, // the query uses variable-step time-frames
+} RRDR_RESULT_FLAGS;
+
+typedef struct rrdresult {
+ struct rrdset *st; // the chart this result refers to
+
+ RRDR_RESULT_FLAGS result_options; // RRDR_RESULT_OPTION_*
+
+ int d; // the number of dimensions
+ long n; // the number of values in the arrays
+ long rows; // the number of rows used
+
+ RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
+
+ time_t *t; // array of n timestamps
+ calculated_number *v; // array n x d values
+ RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
+
+ long group; // how many collected values were grouped for each row
+ int update_every; // what is the suggested update frequency in seconds
+
+ calculated_number min;
+ calculated_number max;
+
+ time_t before;
+ time_t after;
+
+ int has_st_lock; // if st is read locked by us
+
+ // internal rrd2rrdr() members below this point
+ struct {
+ long points_wanted;
+ long resampling_group;
+ calculated_number resampling_divisor;
+
+ void *(*grouping_create)(struct rrdresult *r);
+ void (*grouping_reset)(struct rrdresult *r);
+ void (*grouping_free)(struct rrdresult *r);
+ void (*grouping_add)(struct rrdresult *r, calculated_number value);
+ calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ void *grouping_data;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ const char *log;
+ #endif
+
+ size_t db_points_read;
+ size_t result_points_generated;
+ } internal;
+} RRDR;
+
+#define rrdr_rows(r) ((r)->rows)
+
+#include "../../../database/rrd.h"
+extern void rrdr_free(RRDR *r);
+extern RRDR *rrdr_create(struct rrdset *st, long n, struct context_param *context_param_list);
+
+#include "../web_api_v1.h"
+#include "web/api/queries/query.h"
+
+extern RRDR *rrd2rrdr(
+ RRDSET *st, long points_requested, long long after_requested, long long before_requested,
+ RRDR_GROUPING group_method, long resampling_time_requested, RRDR_OPTIONS options, const char *dimensions,
+ struct context_param *context_param_list);
+
+#include "query.h"
+
+#endif //NETDATA_QUERIES_RRDR_H
diff --git a/web/api/queries/ses/Makefile.am b/web/api/queries/ses/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/ses/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md
new file mode 100644
index 0000000..c279701
--- /dev/null
+++ b/web/api/queries/ses/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "Single (or Simple) Exponential Smoothing (`ses`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/ses/README.md
+-->
+
+# Single (or Simple) Exponential Smoothing (`ses`)
+
+> This query is also available as `ema` and `ewma`.
+
+An exponential moving average (`ema`), also known as an exponentially weighted moving average (`ewma`)
+is a first-order infinite impulse response filter that applies weighting factors which decrease
+exponentially. The weighting for each older datum decreases exponentially, never reaching zero.
+
+In simple terms, this is like an average value, but more recent values are given more weight.
+
+Netdata automatically adjusts the weight (`alpha`) based on the number of values processed,
+using the formula:
+
+```
+window = max(number of values, 15)
+alpha = 2 / (window + 1)
+```
+
+You can change the fixed value `15` by setting in `netdata.conf`:
+
+```
+[web]
+ ses max window = 15
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: ses -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`ses` does not change the units. For example, if the chart units is `requests/sec`, the exponential
+moving average will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=ses` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average>
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fses%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c
new file mode 100644
index 0000000..772505f
--- /dev/null
+++ b/web/api/queries/ses/ses.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ses.h"
+
+
+// ----------------------------------------------------------------------------
+// single exponential smoothing
+
+struct grouping_ses {
+ calculated_number alpha;
+ calculated_number alpha_other;
+ calculated_number level;
+ size_t count;
+};
+
+static size_t max_window_size = 15;
+
+void grouping_init_ses(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
+ }
+ else {
+ max_window_size = (size_t) ret;
+ }
+}
+
+static inline calculated_number window(RRDR *r, struct grouping_ses *g) {
+ (void)g;
+
+ calculated_number points;
+ if(r->group == 1) {
+ // provide a running DES
+ points = r->internal.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = r->group;
+ }
+
+ return (points > max_window_size) ? max_window_size : points;
+}
+
+static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ g->alpha = 2.0 / (window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+}
+
+void *grouping_create_ses(RRDR *r) {
+ struct grouping_ses *g = (struct grouping_ses *)callocz(1, sizeof(struct grouping_ses));
+ set_alpha(r, g);
+ g->level = 0.0;
+ return g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_ses(RRDR *r) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+ g->level = 0.0;
+ g->count = 0;
+}
+
+void grouping_free_ses(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_ses(RRDR *r, calculated_number value) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+
+ if(calculated_number_isnumber(value)) {
+ if(unlikely(!g->count))
+ g->level = value;
+
+ g->level = g->alpha * value + g->alpha_other * g->level;
+ g->count++;
+ }
+}
+
+calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+
+ if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ return g->level;
+}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
new file mode 100644
index 0000000..603fdb5
--- /dev/null
+++ b/web/api/queries/ses/ses.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_SES_H
+#define NETDATA_API_QUERIES_SES_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_init_ses(void);
+
+extern void *grouping_create_ses(RRDR *r);
+extern void grouping_reset_ses(RRDR *r);
+extern void grouping_free_ses(RRDR *r);
+extern void grouping_add_ses(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/Makefile.am b/web/api/queries/stddev/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/stddev/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md
new file mode 100644
index 0000000..7cd7d62
--- /dev/null
+++ b/web/api/queries/stddev/README.md
@@ -0,0 +1,93 @@
+<!--
+title: "standard deviation (`stddev`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/stddev/README.md
+-->
+
+# standard deviation (`stddev`)
+
+The standard deviation is a measure that is used to quantify the amount of variation or dispersion
+of a set of data values.
+
+A low standard deviation indicates that the data points tend to be close to the mean (also called the
+expected value) of the set, while a high standard deviation indicates that the data points are spread
+out over a wider range of values.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: stddev -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`stdev` does not change the units. For example, if the chart units is `requests/sec`, the standard
+deviation will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=stddev` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+Check <https://en.wikipedia.org/wiki/Standard_deviation>.
+
+---
+
+# Coefficient of variation (`cv`)
+
+> This query is also available as `rsd`.
+
+The coefficient of variation (`cv`), also known as relative standard deviation (`rsd`),
+is a standardized measure of dispersion of a probability distribution or frequency distribution.
+
+It is defined as the ratio of the **standard deviation** to the **mean**.
+
+In simple terms, it gives the percentage of change. So, if the average value of a metric is 1000
+and its standard deviation is 100 (meaning that it variates from 900 to 1100), then `cv` is 10%.
+
+This is an easy way to check the % variation, without using absolute values.
+
+For example, you may trigger an alarm if your web server requests/sec `cv` is above 20 (`%`)
+over the last minute. So if your web server was serving 1000 reqs/sec over the last minute,
+it will trigger the alarm if had spikes below 800/sec or above 1200/sec.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: cv -1m unaligned of my_dimension
+ units: %
+ warn: $this > 20
+```
+
+The units reported by `cv` is always `%`.
+
+It can also be used in APIs and badges as `&group=cv` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+Check <https://en.wikipedia.org/wiki/Coefficient_of_variation>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fstddev%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c
new file mode 100644
index 0000000..1625844
--- /dev/null
+++ b/web/api/queries/stddev/stddev.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "stddev.h"
+
+
+// ----------------------------------------------------------------------------
+// stddev
+
+// this implementation comes from:
+// https://www.johndcook.com/blog/standard_deviation/
+
+struct grouping_stddev {
+ long count;
+ calculated_number m_oldM, m_newM, m_oldS, m_newS;
+};
+
+void *grouping_create_stddev(RRDR *r) {
+ UNUSED (r);
+ return callocz(1, sizeof(struct grouping_stddev));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_stddev(RRDR *r) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+ g->count = 0;
+}
+
+void grouping_free_stddev(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_stddev(RRDR *r, calculated_number value) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ if(calculated_number_isnumber(value)) {
+ g->count++;
+
+ // See Knuth TAOCP vol 2, 3rd edition, page 232
+ if (g->count == 1) {
+ g->m_oldM = g->m_newM = value;
+ g->m_oldS = 0.0;
+ }
+ else {
+ g->m_newM = g->m_oldM + (value - g->m_oldM) / g->count;
+ g->m_newS = g->m_oldS + (value - g->m_oldM) * (value - g->m_newM);
+
+ // set up for next iteration
+ g->m_oldM = g->m_newM;
+ g->m_oldS = g->m_newS;
+ }
+ }
+}
+
+static inline calculated_number mean(struct grouping_stddev *g) {
+ return (g->count > 0) ? g->m_newM : 0.0;
+}
+
+static inline calculated_number variance(struct grouping_stddev *g) {
+ return ( (g->count > 1) ? g->m_newS/(g->count - 1) : 0.0 );
+}
+static inline calculated_number stddev(struct grouping_stddev *g) {
+ return sqrtl(variance(g));
+}
+
+calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(likely(g->count > 1)) {
+ value = stddev(g);
+
+ if(!calculated_number_isnumber(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ value = 0.0;
+ }
+ else {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+
+// https://en.wikipedia.org/wiki/Coefficient_of_variation
+calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(likely(g->count > 1)) {
+ calculated_number m = mean(g);
+ value = 100.0 * stddev(g) / ((m < 0)? -m : m);
+
+ if(unlikely(!calculated_number_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ // one value collected
+ value = 0.0;
+ }
+ else {
+ // no values collected
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+
+
+/*
+ * Mean = average
+ *
+calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = mean(g);
+
+ if(!isnormal(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+ */
+
+/*
+ * It is not advised to use this version of variance directly
+ *
+calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = variance(g);
+
+ if(!isnormal(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+*/ \ No newline at end of file
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
new file mode 100644
index 0000000..7a46975
--- /dev/null
+++ b/web/api/queries/stddev/stddev.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_STDDEV_H
+#define NETDATA_API_QUERIES_STDDEV_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_stddev(RRDR *r);
+extern void grouping_reset_stddev(RRDR *r);
+extern void grouping_free_stddev(RRDR *r);
+extern void grouping_add_stddev(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/Makefile.am b/web/api/queries/sum/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/sum/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md
new file mode 100644
index 0000000..aeace0a
--- /dev/null
+++ b/web/api/queries/sum/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Sum"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/sum/README.md
+-->
+
+# Sum
+
+This module sums all the values in the time-frame requested.
+
+You can use `sum` to find the volume of something over a period.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: sum -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`sum` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=sum` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Summation>.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fsum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c
new file mode 100644
index 0000000..0da9937
--- /dev/null
+++ b/web/api/queries/sum/sum.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "sum.h"
+
+// ----------------------------------------------------------------------------
+// sum
+
+struct grouping_sum {
+ calculated_number sum;
+ size_t count;
+};
+
+void *grouping_create_sum(RRDR *r) {
+ (void)r;
+ return callocz(1, sizeof(struct grouping_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_sum(RRDR *r) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+void grouping_free_sum(RRDR *r) {
+ freez(r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_sum(RRDR *r, calculated_number value) {
+ if(!isnan(value)) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+ g->sum += value;
+ g->count++;
+ }
+}
+
+calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+
+ calculated_number value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->sum;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
+
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
new file mode 100644
index 0000000..9dc8d20
--- /dev/null
+++ b/web/api/queries/sum/sum.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_SUM_H
+#define NETDATA_API_QUERY_SUM_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void *grouping_create_sum(RRDR *r);
+extern void grouping_reset_sum(RRDR *r);
+extern void grouping_free_sum(RRDR *r);
+extern void grouping_add_sum(RRDR *r, calculated_number value);
+extern calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/tests/valid_urls.c b/web/api/tests/valid_urls.c
new file mode 100644
index 0000000..d8c261c
--- /dev/null
+++ b/web/api/tests/valid_urls.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../../libnetdata/libnetdata.h"
+#include "../../../libnetdata/required_dummies.h"
+#include "../../../database/rrd.h"
+#include "../../../web/server/web_client.h"
+#include <setjmp.h>
+#include <cmocka.h>
+#include <stdbool.h>
+
+RRDHOST *sql_create_host_by_uuid(char *hostname)
+{
+ (void) hostname;
+ return NULL;
+}
+
+RRDHOST *__wrap_sql_create_host_by_uuid(char *hostname)
+{
+ (void) hostname;
+ return NULL;
+}
+
+void repr(char *result, int result_size, char const *buf, int size)
+{
+ int n;
+ char *end = result + result_size - 1;
+ unsigned char const *ubuf = (unsigned char const *)buf;
+ while (size && result_size > 0) {
+ if (*ubuf <= 0x20 || *ubuf >= 0x80) {
+ n = snprintf(result, result_size, "\\%02X", *ubuf);
+ } else {
+ *result = *ubuf;
+ n = 1;
+ }
+ result += n;
+ result_size -= n;
+ ubuf++;
+ size--;
+ }
+ if (result_size > 0)
+ *(result++) = 0;
+ else
+ *end = 0;
+}
+
+// ---------------------------------- Mocking accesses from web_client ------------------------------------------------
+
+ssize_t send(int sockfd, const void *buf, size_t len, int flags)
+{
+ info("Mocking send: %zu bytes\n", len);
+ (void)sockfd;
+ (void)buf;
+ (void)flags;
+ return len;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_hostname(const char *hostname, uint32_t hash)
+{
+ (void)hostname;
+ (void)hash;
+ return NULL;
+}
+
+/* Note: we've got some intricate code inside the global statistics module, might be useful to pull it inside the
+ test set instead of mocking it. */
+void __wrap_finished_web_request_statistics(
+ uint64_t dt, uint64_t bytes_received, uint64_t bytes_sent, uint64_t content_size, uint64_t compressed_content_size)
+{
+ (void)dt;
+ (void)bytes_received;
+ (void)bytes_sent;
+ (void)content_size;
+ (void)compressed_content_size;
+}
+
+char *__wrap_config_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ if (!strcmp(section, CONFIG_SECTION_WEB) && !strcmp(name, "web files owner"))
+ return "netdata";
+ (void)root;
+ (void)default_value;
+ return "UNKNOWN FIX ME";
+}
+
+int __wrap_web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url)
+{
+ char url_repr[160];
+ repr(url_repr, sizeof(url_repr), url, strlen(url));
+ printf("web_client_api_request_v1(url=\"%s\")\n", url_repr);
+ check_expected_ptr(host);
+ check_expected_ptr(w);
+ check_expected_ptr(url_repr);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_mysendfile(struct web_client *w, char *filename)
+{
+ (void)w;
+ printf("mysendfile(filename=\"%s\"\n", filename);
+ check_expected_ptr(filename);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url)
+{
+ (void)host;
+ (void)w;
+ (void)url;
+ return 0;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_guid(const char *guid, uint32_t hash)
+{
+ (void)guid;
+ (void)hash;
+ printf("FIXME: rrdset_find_guid\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find_byname(RRDHOST *host, const char *name)
+{
+ (void)host;
+ (void)name;
+ printf("FIXME: rrdset_find_byname\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find(RRDHOST *host, const char *id)
+{
+ (void)host;
+ (void)id;
+ printf("FIXME: rrdset_find\n");
+ return NULL;
+}
+
+// -------------------------------- Mocking the log - dump straight through --------------------------------------------
+
+void __wrap_debug_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" DEBUG: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" INFO: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_error_int(
+ const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)prefix;
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" ERROR: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_fatal_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf("FATAL: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+ fail();
+}
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+char *netdata_configured_web_dir = "UNKNOWN FIXME";
+RRDHOST *localhost = NULL;
+
+struct config netdata_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+/* Note: this is not a CMocka group_test_setup/teardown pair. This is performed per-test.
+*/
+static struct web_client *setup_fresh_web_client()
+{
+ struct web_client *w = (struct web_client *)malloc(sizeof(struct web_client));
+ memset(w, 0, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f; // Everything on
+ return w;
+}
+
+static void destroy_web_client(struct web_client *w)
+{
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ free(w);
+}
+
+//////////////////////////// Test cases ///////////////////////////////////////////////////////////////////////////////
+
+static void only_root(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_string(__wrap_mysendfile, filename, "/");
+
+ web_client_process_request(w);
+
+ //assert_string_equal(w->decoded_query_string, def->query_out);
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void two_slashes(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET // HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_string(__wrap_mysendfile, filename, "//");
+
+ web_client_process_request(w);
+
+ //assert_string_equal(w->decoded_query_string, def->query_out);
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void absolute_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET http://localhost:19999/api/v1/info HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void valid_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info?blah HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+/* RFC2616, section 4.1:
+
+ In the interest of robustness, servers SHOULD ignore any empty
+ line(s) received where a Request-Line is expected. In other words, if
+ the server is reading the protocol stream at the beginning of a
+ message and receives a CRLF first, it should ignore the CRLF.
+*/
+static void leading_blanks(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "\r\n\r\nGET /api/v1/info?blah HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void empty_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+/* If the %-escape is being performed at the correct time then the url should not be treated as a query, but instead
+ as a path "/api/v1/info?blah?" which should despatch into the API with the given values.
+*/
+static void not_a_query(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info%3fblah%3f HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "info?blah?", 10);
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, expected_url_repr);
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "");
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void cr_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/inf\ro\t?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+static void newline_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/inf\no\t?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void bad_version(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info?blah HTTP/1.2\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void pathless_query(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET ?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void pathless_fragment(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET #blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET % HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent2(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %0 HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent3(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void percent_nulls(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %00%00%00%00%00%00 HTTP/1.1\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void percent_invalid(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /%x%x%x%x%x%x HTTP/1.1\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void space_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / / HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void random_sploit1(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ // FIXME: Encoding probably needs to go through printf
+ buffer_need_bytes(w->response.data, 55);
+ memcpy(
+ w->response.data->buffer,
+ "GET \x03\x00\x00/*\xE0\x00\x00\x00\x00\x00Cookie: mstshash=Administr HTTP/1.1\r\n\r\n", 54);
+ w->response.data->len = 54;
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void null_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / / HTTP/1.1\r\n\r\n");
+ w->response.data->buffer[5] = 0;
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+static void many_ands(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET foo?");
+ for (size_t i = 0; i < 600; i++)
+ buffer_strcat(w->response.data, "&");
+ buffer_strcat(w->response.data, " HTTP/1.1\r\n\r\n");
+
+ char debug[2048];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+int main(void)
+{
+ debug_flags = 0xffffffffffff;
+ int fails = 0;
+
+ struct CMUnitTest static_tests[] = {
+ cmocka_unit_test(only_root), cmocka_unit_test(two_slashes), cmocka_unit_test(valid_url),
+ cmocka_unit_test(leading_blanks), cmocka_unit_test(empty_url), cmocka_unit_test(newline_in_url),
+ cmocka_unit_test(not_a_query), cmocka_unit_test(cr_in_url), cmocka_unit_test(pathless_query),
+ cmocka_unit_test(pathless_fragment), cmocka_unit_test(short_percent), cmocka_unit_test(short_percent2),
+ cmocka_unit_test(short_percent3), cmocka_unit_test(percent_nulls), cmocka_unit_test(percent_invalid),
+ cmocka_unit_test(space_in_url), cmocka_unit_test(random_sploit1), cmocka_unit_test(null_in_url),
+ cmocka_unit_test(absolute_url),
+ // cmocka_unit_test(many_ands), CMocka cannot recover after this crash
+ cmocka_unit_test(bad_version)
+ };
+ (void)many_ands;
+
+ fails += cmocka_run_group_tests_name("static_tests", static_tests, NULL, NULL);
+ return fails;
+}
diff --git a/web/api/tests/web_api.c b/web/api/tests/web_api.c
new file mode 100644
index 0000000..3cc0a79
--- /dev/null
+++ b/web/api/tests/web_api.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../../libnetdata/libnetdata.h"
+#include "../../../libnetdata/required_dummies.h"
+#include "../../../database/rrd.h"
+#include "../../../web/server/web_client.h"
+#include <setjmp.h>
+#include <cmocka.h>
+#include <stdbool.h>
+
+RRDHOST *sql_create_host_by_uuid(char *hostname)
+{
+ (void) hostname;
+ return NULL;
+}
+
+RRDHOST *__wrap_sql_create_host_by_uuid(char *hostname)
+{
+ (void) hostname;
+ return NULL;
+}
+
+void repr(char *result, int result_size, char const *buf, int size)
+{
+ int n;
+ char *end = result + result_size - 1;
+ unsigned char const *ubuf = (unsigned char const *)buf;
+ while (size && result_size > 0) {
+ if (*ubuf <= 0x20 || *ubuf >= 0x80) {
+ n = snprintf(result, result_size, "\\%02X", *ubuf);
+ } else {
+ *result = *ubuf;
+ n = 1;
+ }
+ result += n;
+ result_size -= n;
+ ubuf++;
+ size--;
+ }
+ if (result_size > 0)
+ *(result++) = 0;
+ else
+ *end = 0;
+}
+
+// ---------------------------------- Mocking accesses from web_client ------------------------------------------------
+
+ssize_t send(int sockfd, const void *buf, size_t len, int flags)
+{
+ info("Mocking send: %zu bytes\n", len);
+ (void)sockfd;
+ (void)buf;
+ (void)flags;
+ return len;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_hostname(const char *hostname, uint32_t hash)
+{
+ (void)hostname;
+ (void)hash;
+ return NULL;
+}
+
+/* Note: we've got some intricate code inside the global statistics module, might be useful to pull it inside the
+ test set instead of mocking it. */
+void __wrap_finished_web_request_statistics(
+ uint64_t dt, uint64_t bytes_received, uint64_t bytes_sent, uint64_t content_size, uint64_t compressed_content_size)
+{
+ (void)dt;
+ (void)bytes_received;
+ (void)bytes_sent;
+ (void)content_size;
+ (void)compressed_content_size;
+}
+
+char *__wrap_config_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ if (!strcmp(section, CONFIG_SECTION_WEB) && !strcmp(name, "web files owner"))
+ return "netdata";
+ (void)root;
+ (void)default_value;
+ return "UNKNOWN FIX ME";
+}
+
+int __wrap_web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url)
+{
+ char url_repr[160];
+ repr(url_repr, sizeof(url_repr), url, strlen(url));
+ info("web_client_api_request_v1(url=\"%s\")\n", url_repr);
+ check_expected_ptr(host);
+ check_expected_ptr(w);
+ check_expected_ptr(url_repr);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url)
+{
+ (void)host;
+ (void)w;
+ (void)url;
+ return 0;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_guid(const char *guid, uint32_t hash)
+{
+ (void)guid;
+ (void)hash;
+ printf("FIXME: rrdset_find_guid\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find_byname(RRDHOST *host, const char *name)
+{
+ (void)host;
+ (void)name;
+ printf("FIXME: rrdset_find_byname\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find(RRDHOST *host, const char *id)
+{
+ (void)host;
+ (void)id;
+ printf("FIXME: rrdset_find\n");
+ return NULL;
+}
+
+// -------------------------------- Mocking the log - capture per-test ------------------------------------------------
+
+char log_buffer[10240] = { 0 };
+void __wrap_debug_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " DEBUG: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " INFO: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_error_int(
+ const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)prefix;
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " ERROR: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_fatal_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf("FATAL: ");
+ vprintf(fmt, args);
+ printf("\n");
+ va_end(args);
+ fail();
+}
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+char *netdata_configured_web_dir = "UNKNOWN FIXME";
+RRDHOST *localhost = NULL;
+
+struct config netdata_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+const char *http_headers[] = { "Host: 254.254.0.1",
+ "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_" // No ,
+ "0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
+ "Connection: keep-alive",
+ "X-Forwarded-For: 1.254.1.251",
+ "Cookie: _ga=GA1.1.1227576758.1571113676; _gid=GA1.2.1222321739.1573628979",
+ "X-Requested-With: XMLHttpRequest",
+ "Accept-Encoding: gzip, deflate",
+ "Cache-Control: no-cache, no-store" };
+#define MAX_HEADERS (sizeof(http_headers) / (sizeof(const char *)))
+
+static void build_request(struct web_buffer *wb, const char *url, bool use_cr, size_t num_headers)
+{
+ buffer_reset(wb);
+ buffer_strcat(wb, "GET ");
+ buffer_strcat(wb, url);
+ buffer_strcat(wb, " HTTP/1.1");
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+ for (size_t i = 0; i < num_headers && i < MAX_HEADERS; i++) {
+ buffer_strcat(wb, http_headers[i]);
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+ }
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+}
+
+/* Note: this is not a CMocka group_test_setup/teardown pair. This is performed per-test.
+*/
+static struct web_client *setup_fresh_web_client()
+{
+ struct web_client *w = (struct web_client *)malloc(sizeof(struct web_client));
+ memset(w, 0, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.data->date = 0; // Valgrind uninitialised value
+ w->response.data->expires = 0; // Valgrind uninitialised value
+ w->response.data->options = 0; // Valgrind uninitialised value
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f; // Everything on
+ return w;
+}
+
+static void destroy_web_client(struct web_client *w)
+{
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ free(w);
+}
+
+// ---------------------------------- Parameterized test-families -----------------------------------------------------
+// There is no way to pass a parameter block into the setup fixture, we would have to patch CMocka and maintain it
+// locally. (The void **current_state in _run_group_tests would be set from a parameter). This is unfortunate as a
+// parameteric unit-tester needs to be to pass parameters to the fixtures. We are faking this by calculating the
+// space of tests in the launcher, passing an array of identical unit-tests to CMocka and then counting through the
+// parameters in the shared state passed between tests. To initialise this counter structure we use this global to
+// pass from the launcher (test-builder) to the setup-fixture.
+
+void *shared_test_state = NULL;
+
+// -------------------------------- Test family for /api/v1/info ------------------------------------------------------
+
+struct test_def {
+ size_t num_headers; // Index coordinate
+ size_t prefix_len; // Index coordinate
+ char name[80];
+ size_t full_len;
+ struct web_client *instance; // Used within this single test
+ bool completed, use_cr;
+ struct test_def *next, *prev;
+};
+
+static void api_info(void **state)
+{
+ (void)state;
+ struct test_def *def = (struct test_def *)shared_test_state;
+ shared_test_state = def->next;
+
+ if (def->prev != NULL && !def->prev->completed && strlen(log_buffer) > 0) {
+ printf("Log of failing case %s:\n", def->prev->name);
+ puts(log_buffer);
+ }
+ log_buffer[0] = 0;
+ if (localhost != NULL)
+ free(localhost);
+ localhost = calloc(1,sizeof(RRDHOST));
+
+ def->instance = setup_fresh_web_client();
+ build_request(def->instance->response.data, "/api/v1/info", def->use_cr, def->num_headers);
+ def->instance->response.data->len = def->prefix_len;
+
+ char buffer_repr[1024];
+ repr(buffer_repr, sizeof(buffer_repr), def->instance->response.data->buffer,def->prefix_len);
+ info("Buffer contains: %s [first %zu]", buffer_repr,def->prefix_len);
+ if (def->prefix_len == def->full_len) {
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, def->instance);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+ }
+
+ web_client_process_request(def->instance);
+
+ if (def->prefix_len == def->full_len)
+ assert_int_equal(def->instance->flags & WEB_CLIENT_FLAG_WAIT_RECEIVE, 0);
+ else
+ assert_int_equal(def->instance->flags & WEB_CLIENT_FLAG_WAIT_RECEIVE, WEB_CLIENT_FLAG_WAIT_RECEIVE);
+ assert_int_equal(def->instance->mode, WEB_CLIENT_MODE_NORMAL);
+ def->completed = true;
+ log_buffer[0] = 0;
+}
+
+static int api_info_launcher()
+{
+ size_t num_tests = 0;
+ struct web_client *template = setup_fresh_web_client();
+ struct test_def *current, *head = NULL;
+ struct test_def *prev = NULL;
+
+ for (size_t i = 0; i < MAX_HEADERS; i++) {
+ build_request(template->response.data, "/api/v1/info", true, i);
+ for (size_t j = 0; j <= template->response.data->len; j++) {
+ if (j == 0 && i > 0)
+ continue; // All zero-length prefixes are identical, skip after first time
+ current = malloc(sizeof(struct test_def));
+ if (prev != NULL)
+ prev->next = current;
+ else
+ head = current;
+ current->prev = prev;
+ prev = current;
+
+ current->num_headers = i;
+ current->prefix_len = j;
+ current->full_len = template->response.data->len;
+ current->instance = NULL;
+ current->next = NULL;
+ current->use_cr = true;
+ current->completed = false;
+ sprintf(
+ current->name, "/api/v1/info@%zu,%zu/%zu+%d", current->num_headers, current->prefix_len,
+ current->full_len,true);
+ num_tests++;
+ }
+ }
+ for (size_t i = 0; i < MAX_HEADERS; i++) {
+ build_request(template->response.data, "/api/v1/info", false, i);
+ for (size_t j = 0; j <= template->response.data->len; j++) {
+ if (j == 0 && i > 0)
+ continue; // All zero-length prefixes are identical, skip after first time
+ current = malloc(sizeof(struct test_def));
+ if (prev != NULL)
+ prev->next = current;
+ else
+ head = current;
+ current->prev = prev;
+ prev = current;
+
+ current->num_headers = i;
+ current->prefix_len = j;
+ current->full_len = template->response.data->len;
+ current->instance = NULL;
+ current->next = NULL;
+ current->use_cr = false;
+ current->completed = false;
+ sprintf(
+ current->name, "/api/v1/info@%zu,%zu/%zu+%d", current->num_headers, current->prefix_len,
+ current->full_len,false);
+ num_tests++;
+ }
+ }
+
+ struct CMUnitTest *tests = calloc(num_tests, sizeof(struct CMUnitTest));
+ current = head;
+ for (size_t i = 0; i < num_tests; i++) {
+ tests[i].name = current->name;
+ tests[i].test_func = api_info;
+ tests[i].setup_func = NULL;
+ tests[i].teardown_func = NULL;
+ tests[i].initial_state = NULL;
+ current = current->next;
+ }
+
+ printf("Setup %zu tests in %p\n", num_tests, head);
+ shared_test_state = head;
+ int fails = _cmocka_run_group_tests("web_api", tests, num_tests, NULL, NULL);
+ free(tests);
+ destroy_web_client(template);
+ current = head;
+ while (current != NULL) {
+ struct test_def *c = current;
+ current = current->next;
+ if (c->instance != NULL) // Clean up resources from tests that failed
+ destroy_web_client(c->instance);
+ free(c);
+ }
+ if (localhost!=NULL)
+ free(localhost);
+ return fails;
+}
+
+/* Raw notes for the cases that we did not use in the unit testing suite.
+ Leaving them here instead of deleting them in-case we expand the suite during the
+ work on the URL parser.
+
+ Any ' ' in the URI -> invalid response (Description in 5.1 of RFC2616)
+ Characters that can't be in paths #;?
+ "GET /apb/../api/v1/info" HTTP/1.1\r\n"
+
+ https://github.com/uriparser/uriparser/blob/uriparser-0.9.3/test/FourSuite.cpp
+ Not clear why some of these are illegal -> reserved chars?
+
+ ASSERT_TRUE(testBadUri("beepbeep\x07\x07", 8));
+ ASSERT_TRUE(testBadUri("\n", 0));
+ ASSERT_TRUE(testBadUri("::", 0)); // not OK, per Roy Fielding on the W3C uri list on 2004-04-01
+
+ // the following test cases are from a Perl script by David A. Wheeler
+ // at http://www.dwheeler.com/secure-programs/url.pl
+ ASSERT_TRUE(testBadUri("http://www yahoo.com", 10));
+ ASSERT_TRUE(testBadUri("http://www.yahoo.com/hello world/", 26));
+ ASSERT_TRUE(testBadUri("http://www.yahoo.com/yelp.html#\"", 31));
+
+ // the following test cases are from a Haskell program by Graham Klyne
+ // at http://www.ninebynine.org/Software/HaskellUtils/Network/URITest.hs
+ ASSERT_TRUE(testBadUri("[2010:836B:4179::836B:4179]", 0));
+ ASSERT_TRUE(testBadUri(" ", 0));
+ ASSERT_TRUE(testBadUri("%", 1));
+ ASSERT_TRUE(testBadUri("A%Z", 2));
+ ASSERT_TRUE(testBadUri("%ZZ", 1));
+ ASSERT_TRUE(testBadUri("%AZ", 2));
+ ASSERT_TRUE(testBadUri("A C", 1));
+ ASSERT_TRUE(testBadUri("A\\'C", 1)); // r"A\'C"
+ ASSERT_TRUE(testBadUri("A`C", 1));
+ ASSERT_TRUE(testBadUri("A<C", 1));
+ ASSERT_TRUE(testBadUri("A>C", 1));
+ ASSERT_TRUE(testBadUri("A^C", 1));
+ ASSERT_TRUE(testBadUri("A\\\\C", 1)); // r'A\\C'
+ ASSERT_TRUE(testBadUri("A{C", 1));
+ ASSERT_TRUE(testBadUri("A|C", 1));
+ ASSERT_TRUE(testBadUri("A}C", 1));
+ ASSERT_TRUE(testBadUri("A[C", 1));
+ ASSERT_TRUE(testBadUri("A]C", 1));
+ ASSERT_TRUE(testBadUri("A[**]C", 1));
+ ASSERT_TRUE(testBadUri("http://[xyz]/", 8));
+ ASSERT_TRUE(testBadUri("http://]/", 7));
+ ASSERT_TRUE(testBadUri("http://example.org/[2010:836B:4179::836B:4179]", 19));
+ ASSERT_TRUE(testBadUri("http://example.org/abc#[2010:836B:4179::836B:4179]", 23));
+ ASSERT_TRUE(testBadUri("http://example.org/xxx/[qwerty]#a[b]", 23));
+
+ // from a post to the W3C uri list on 2004-02-17
+ // breaks at 22 instead of 17 because everything up to that point is a valid userinfo
+ ASSERT_TRUE(testBadUri("http://w3c.org:80path1/path2", 22));
+
+*/
+
+int main(void)
+{
+ debug_flags = 0xffffffffffff;
+ int fails = 0;
+ fails += api_info_launcher();
+
+ return fails;
+}
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
new file mode 100644
index 0000000..73ac15d
--- /dev/null
+++ b/web/api/web_api_v1.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_api_v1.h"
+
+char *api_secret;
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ RRDR_OPTIONS value;
+} api_v1_data_options[] = {
+ { "nonzero" , 0 , RRDR_OPTION_NONZERO}
+ , {"flip" , 0 , RRDR_OPTION_REVERSED}
+ , {"reversed" , 0 , RRDR_OPTION_REVERSED}
+ , {"reverse" , 0 , RRDR_OPTION_REVERSED}
+ , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP}
+ , {"min2max" , 0 , RRDR_OPTION_MIN2MAX}
+ , {"ms" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"display_absolute", 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"display-absolute", 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"seconds" , 0 , RRDR_OPTION_SECONDS}
+ , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO}
+ , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS}
+ , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE}
+ , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED}
+ , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS}
+ , {"allow_past" , 0 , RRDR_OPTION_ALLOW_PAST}
+ , { NULL, 0, 0}
+};
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ uint32_t value;
+} api_v1_data_formats[] = {
+ { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON}
+ , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP}
+ , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON}
+ , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP}
+ , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV}
+ , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV}
+ , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV}
+ , {"tsv-excel" , 0 , DATASOURCE_TSV}
+ , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML}
+ , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY}
+ , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA}
+ , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY}
+ , {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN}
+ , { NULL, 0, 0}
+};
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ uint32_t value;
+} api_v1_data_google_formats[] = {
+ // this is not error - when google requests json, it expects javascript
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat
+ { "json" , 0 , DATASOURCE_DATATABLE_JSONP}
+ , {"html" , 0 , DATASOURCE_HTML}
+ , {"csv" , 0 , DATASOURCE_CSV}
+ , {"tsv-excel", 0 , DATASOURCE_TSV}
+ , { NULL, 0, 0}
+};
+
+void web_client_api_v1_init(void) {
+ int i;
+
+ for(i = 0; api_v1_data_options[i].name ; i++)
+ api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name);
+
+ for(i = 0; api_v1_data_formats[i].name ; i++)
+ api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name);
+
+ for(i = 0; api_v1_data_google_formats[i].name ; i++)
+ api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name);
+
+ web_client_api_v1_init_grouping();
+
+ uuid_t uuid;
+
+ // generate
+ uuid_generate(uuid);
+
+ // unparse (to string)
+ char uuid_str[37];
+ uuid_unparse_lower(uuid, uuid_str);
+}
+
+char *get_mgmt_api_key(void) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir);
+ char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename);
+ static char guid[GUID_LEN + 1] = "";
+
+ if(likely(guid[0]))
+ return guid;
+
+ // read it from disk
+ int fd = open(api_key_filename, O_RDONLY);
+ if(fd != -1) {
+ char buf[GUID_LEN + 1];
+ if(read(fd, buf, GUID_LEN) != GUID_LEN)
+ error("Failed to read management API key from '%s'", api_key_filename);
+ else {
+ buf[GUID_LEN] = '\0';
+ if(regenerate_guid(buf, guid) == -1) {
+ error("Failed to validate management API key '%s' from '%s'.",
+ buf, api_key_filename);
+
+ guid[0] = '\0';
+ }
+ }
+ close(fd);
+ }
+
+ // generate a new one?
+ if(!guid[0]) {
+ uuid_t uuid;
+
+ uuid_generate_time(uuid);
+ uuid_unparse_lower(uuid, guid);
+ guid[GUID_LEN] = '\0';
+
+ // save it
+ fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
+ if(fd == -1)
+ fatal("Cannot create unique management API key file '%s'. Please fix this.", api_key_filename);
+
+ if(write(fd, guid, GUID_LEN) != GUID_LEN)
+ fatal("Cannot write the unique management API key file '%s'. Please fix this.", api_key_filename);
+
+ close(fd);
+ }
+
+ return guid;
+}
+
+void web_client_api_v1_management_init(void) {
+ api_secret = get_mgmt_api_key();
+}
+
+inline uint32_t web_client_api_request_v1_data_options(char *o) {
+ uint32_t ret = 0x00000000;
+ char *tok;
+
+ while(o && *o && (tok = mystrsep(&o, ", |"))) {
+ if(!*tok) continue;
+
+ uint32_t hash = simple_hash(tok);
+ int i;
+ for(i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_options[i].hash && !strcmp(tok, api_v1_data_options[i].name))) {
+ ret |= api_v1_data_options[i].value;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+inline uint32_t web_client_api_request_v1_data_format(char *name) {
+ uint32_t hash = simple_hash(name);
+ int i;
+
+ for(i = 0; api_v1_data_formats[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) {
+ return api_v1_data_formats[i].value;
+ }
+ }
+
+ return DATASOURCE_JSON;
+}
+
+inline uint32_t web_client_api_request_v1_data_google_format(char *name) {
+ uint32_t hash = simple_hash(name);
+ int i;
+
+ for(i = 0; api_v1_data_google_formats[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) {
+ return api_v1_data_google_formats[i].value;
+ }
+ }
+
+ return DATASOURCE_JSON;
+}
+
+int web_client_api_request_v1_alarms_select (char *url) {
+ int all = 0;
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ if(!strcmp(value, "all")) all = 1;
+ else if(!strcmp(value, "active")) all = 0;
+ }
+
+ return all;
+}
+
+inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) {
+ int all = web_client_api_request_v1_alarms_select(url);
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarms2json(host, w->response.data, all);
+ buffer_no_cacheable(w->response.data);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) {
+ int all = web_client_api_request_v1_alarms_select(url);
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarms_values2json(host, w->response.data, all);
+ buffer_no_cacheable(w->response.data);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) {
+ RRDCALC_STATUS status = RRDCALC_STATUS_RAISED;
+ BUFFER *contexts = NULL;
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "[");
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value);
+
+ char* p = value;
+ if(!strcmp(name, "status")) {
+ while ((*p = toupper(*p))) p++;
+ if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL;
+ else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING;
+ else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED;
+ else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED;
+ else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED;
+ else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR;
+ }
+ else if(!strcmp(name, "context") || !strcmp(name, "ctx")) {
+ if(!contexts) contexts = buffer_create(255);
+ buffer_strcat(contexts, "|");
+ buffer_strcat(contexts, value);
+ }
+ }
+
+ health_aggregate_alarms(host, w->response.data, contexts, status);
+
+ buffer_sprintf(w->response.data, "]\n");
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(w->response.data);
+
+ buffer_free(contexts);
+ return 200;
+}
+
+inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) {
+ uint32_t after = 0;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ if(!strcmp(name, "after")) after = (uint32_t)strtoul(value, NULL, 0);
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarm_log2json(host, w->response.data, after);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) {
+ int ret = HTTP_RESP_BAD_REQUEST;
+ char *chart = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "chart")) chart = value;
+ //else {
+ /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name);
+ // goto cleanup;
+ //}
+ }
+
+ if(!chart || !*chart) {
+ buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ goto cleanup;
+ }
+
+ RRDSET *st = rrdset_find(host, chart);
+ if(!st) st = rrdset_find_byname(host, chart);
+ if(!st) {
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, chart);
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ st->last_accessed_time = now_realtime_sec();
+ callback(st, w->response.data);
+ return HTTP_RESP_OK;
+
+ cleanup:
+ return ret;
+}
+
+inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
+}
+
+inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) {
+ (void)url;
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ charts2json(host, w->response.data, 0, 0);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_archivedcharts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ (void)url;
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+#ifdef ENABLE_DBENGINE
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ sql_rrdset2json(host, w->response.data);
+#endif
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart);
+}
+
+void fix_google_param(char *s) {
+ if(unlikely(!s)) return;
+
+ for( ; *s ;s++) {
+ if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-')
+ *s = '_';
+ }
+}
+
+// returns the HTTP code
+inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
+ debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url);
+
+ int ret = HTTP_RESP_BAD_REQUEST;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ char *google_version = "0.6",
+ *google_reqId = "0",
+ *google_sig = "0",
+ *google_out = "json",
+ *responseHandler = NULL,
+ *outFileName = NULL;
+
+ time_t last_timestamp_in_data = 0, google_timestamp = 0;
+
+ char *chart = NULL
+ , *before_str = NULL
+ , *after_str = NULL
+ , *group_time_str = NULL
+ , *points_str = NULL
+ , *context = NULL
+ , *chart_label_key = NULL;
+
+ int group = RRDR_GROUPING_AVERAGE;
+ uint32_t format = DATASOURCE_JSON;
+ uint32_t options = 0x00000000;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value);
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "context")) context = value;
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart")) chart = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ else if(!strcmp(name, "after")) after_str = value;
+ else if(!strcmp(name, "before")) before_str = value;
+ else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "gtime")) group_time_str = value;
+ else if(!strcmp(name, "group")) {
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ }
+ else if(!strcmp(name, "format")) {
+ format = web_client_api_request_v1_data_format(value);
+ }
+ else if(!strcmp(name, "options")) {
+ options |= web_client_api_request_v1_data_options(value);
+ }
+ else if(!strcmp(name, "callback")) {
+ responseHandler = value;
+ }
+ else if(!strcmp(name, "filename")) {
+ outFileName = value;
+ }
+ else if(!strcmp(name, "tqx")) {
+ // parse Google Visualization API options
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source
+ char *tqx_name, *tqx_value;
+
+ while(value) {
+ tqx_value = mystrsep(&value, ";");
+ if(!tqx_value || !*tqx_value) continue;
+
+ tqx_name = mystrsep(&tqx_value, ":");
+ if(!tqx_name || !*tqx_name) continue;
+ if(!tqx_value || !*tqx_value) continue;
+
+ if(!strcmp(tqx_name, "version"))
+ google_version = tqx_value;
+ else if(!strcmp(tqx_name, "reqId"))
+ google_reqId = tqx_value;
+ else if(!strcmp(tqx_name, "sig")) {
+ google_sig = tqx_value;
+ google_timestamp = strtoul(google_sig, NULL, 0);
+ }
+ else if(!strcmp(tqx_name, "out")) {
+ google_out = tqx_value;
+ format = web_client_api_request_v1_data_google_format(google_out);
+ }
+ else if(!strcmp(tqx_name, "responseHandler"))
+ responseHandler = tqx_value;
+ else if(!strcmp(tqx_name, "outFileName"))
+ outFileName = tqx_value;
+ }
+ }
+ }
+
+ // validate the google parameters given
+ fix_google_param(google_out);
+ fix_google_param(google_sig);
+ fix_google_param(google_reqId);
+ fix_google_param(google_version);
+ fix_google_param(responseHandler);
+ fix_google_param(outFileName);
+
+ RRDSET *st = NULL;
+
+ if((!chart || !*chart) && (!context)) {
+ buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ goto cleanup;
+ }
+
+ struct context_param *context_param_list = NULL;
+ if (context && !chart) {
+ RRDSET *st1;
+ uint32_t context_hash = simple_hash(context);
+
+ rrdhost_rdlock(host);
+ rrdset_foreach_read(st1, host) {
+ if (st1->hash_context == context_hash && !strcmp(st1->context, context) &&
+ (!chart_label_key || rrdset_contains_label_keylist(st1, chart_label_key)))
+ build_context_param_list(&context_param_list, st1);
+ }
+ rrdhost_unlock(host);
+ if (likely(context_param_list && context_param_list->rd)) // Just set the first one
+ st = context_param_list->rd->rrdset;
+ }
+ else {
+ st = rrdset_find(host, chart);
+ if (!st)
+ st = rrdset_find_byname(host, chart);
+ if (likely(st))
+ st->last_accessed_time = now_realtime_sec();
+ }
+
+ if (!st && !context_param_list) {
+ if (context && !chart) {
+ if (!chart_label_key) {
+ buffer_strcat(w->response.data, "Context is not found: ");
+ buffer_strcat_htmlescape(w->response.data, context);
+ } else {
+ buffer_strcat(w->response.data, "Context: ");
+ buffer_strcat_htmlescape(w->response.data, context);
+ buffer_strcat(w->response.data, " or chart label key: ");
+ buffer_strcat_htmlescape(w->response.data, chart_label_key);
+ buffer_strcat(w->response.data, " not found");
+ }
+ }
+ else {
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, chart);
+ }
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
+
+ long long before = (before_str && *before_str)?str2l(before_str):0;
+ long long after = (after_str && *after_str) ?str2l(after_str):-600;
+ int points = (points_str && *points_str)?str2i(points_str):0;
+ long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0;
+
+ debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', format '%u', options '0x%08x'"
+ , w->id
+ , chart
+ , (dimensions)?buffer_tostring(dimensions):""
+ , after
+ , before
+ , points
+ , group
+ , format
+ , options
+ );
+
+ if(outFileName && *outFileName) {
+ buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
+ debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName);
+ }
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "google.visualization.Query.setResponse";
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'",
+ w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName
+ );
+
+ buffer_sprintf(w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'ok',sig:'%ld',table:",
+ responseHandler, google_version, google_reqId, st->last_updated.tv_sec);
+ }
+ else if(format == DATASOURCE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "callback";
+
+ buffer_strcat(w->response.data, responseHandler);
+ buffer_strcat(w->response.data, "(");
+ }
+
+ ret = rrdset2anything_api_v1(st, w->response.data, dimensions, format, points, after, before, group, group_time
+ , options, &last_timestamp_in_data, context_param_list, chart_label_key);
+
+ free_context_param_list(&context_param_list);
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(google_timestamp < last_timestamp_in_data)
+ buffer_strcat(w->response.data, "});");
+
+ else {
+ // the client already has the latest data
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});",
+ responseHandler, google_version, google_reqId);
+ }
+ }
+ else if(format == DATASOURCE_JSONP)
+ buffer_strcat(w->response.data, ");");
+
+ cleanup:
+ buffer_free(dimensions);
+ return ret;
+}
+
+// Pings a netdata server:
+// /api/v1/registry?action=hello
+//
+// Access to a netdata registry:
+// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url}
+//
+// Delete from a netdata registry:
+// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url}
+//
+// Search for the URLs of a machine:
+// /api/v1/registry?action=search&machine=${machine_guid}&name=${hostname}&url=${url}&for=${machine_guid}
+//
+// Impersonate:
+// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid}
+inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0,
+ hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0,
+ hash_to = 0 /*, hash_redirects = 0 */;
+
+ if(unlikely(!hash_action)) {
+ hash_action = simple_hash("action");
+ hash_access = simple_hash("access");
+ hash_hello = simple_hash("hello");
+ hash_delete = simple_hash("delete");
+ hash_search = simple_hash("search");
+ hash_switch = simple_hash("switch");
+ hash_machine = simple_hash("machine");
+ hash_url = simple_hash("url");
+ hash_name = simple_hash("name");
+ hash_delete_url = simple_hash("delete_url");
+ hash_for = simple_hash("for");
+ hash_to = simple_hash("to");
+/*
+ hash_redirects = simple_hash("redirects");
+*/
+ }
+
+ char person_guid[GUID_LEN + 1] = "";
+
+ debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url);
+
+ // TODO
+ // The browser may send multiple cookies with our id
+
+ char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "=");
+ if(cookie)
+ strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], 36);
+
+ char action = '\0';
+ char *machine_guid = NULL,
+ *machine_url = NULL,
+ *url_name = NULL,
+ *search_machine_guid = NULL,
+ *delete_url = NULL,
+ *to_person_guid = NULL;
+/*
+ int redirects = 0;
+*/
+
+ // Don't cache registry responses
+ buffer_no_cacheable(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if (!name || !*name) continue;
+ if (!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value);
+
+ uint32_t hash = simple_hash(name);
+
+ if(hash == hash_action && !strcmp(name, "action")) {
+ uint32_t vhash = simple_hash(value);
+
+ if(vhash == hash_access && !strcmp(value, "access")) action = 'A';
+ else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H';
+ else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D';
+ else if(vhash == hash_search && !strcmp(value, "search")) action = 'S';
+ else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W';
+#ifdef NETDATA_INTERNAL_CHECKS
+ else error("unknown registry action '%s'", value);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+/*
+ else if(hash == hash_redirects && !strcmp(name, "redirects"))
+ redirects = atoi(value);
+*/
+ else if(hash == hash_machine && !strcmp(name, "machine"))
+ machine_guid = value;
+
+ else if(hash == hash_url && !strcmp(name, "url"))
+ machine_url = value;
+
+ else if(action == 'A') {
+ if(hash == hash_name && !strcmp(name, "name"))
+ url_name = value;
+ }
+ else if(action == 'D') {
+ if(hash == hash_delete_url && !strcmp(name, "delete_url"))
+ delete_url = value;
+ }
+ else if(action == 'S') {
+ if(hash == hash_for && !strcmp(name, "for"))
+ search_machine_guid = value;
+ }
+ else if(action == 'W') {
+ if(hash == hash_to && !strcmp(name, "to"))
+ to_person_guid = value;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else error("unused registry URL parameter '%s' with value '%s'", name, value);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ if(unlikely(respect_web_browser_do_not_track_policy && web_client_has_donottrack(w))) {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ if(unlikely(action == 'H')) {
+ // HELLO request, dashboard ACL
+ if(unlikely(!web_client_can_access_dashboard(w)))
+ return web_client_permission_denied(w);
+ }
+ else {
+ // everything else, registry ACL
+ if(unlikely(!web_client_can_access_registry(w)))
+ return web_client_permission_denied(w);
+ }
+
+ switch(action) {
+ case 'A':
+ if(unlikely(!machine_guid || !machine_url || !url_name)) {
+ error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Access request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec());
+
+ case 'D':
+ if(unlikely(!machine_guid || !machine_url || !delete_url)) {
+ error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Delete request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec());
+
+ case 'S':
+ if(unlikely(!machine_guid || !machine_url || !search_machine_guid)) {
+ error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Search request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_search_json(host, w, person_guid, machine_guid, machine_url, search_machine_guid, now_realtime_sec());
+
+ case 'W':
+ if(unlikely(!machine_guid || !machine_url || !to_person_guid)) {
+ error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Switch request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec());
+
+ case 'H':
+ return registry_request_hello_json(host, w);
+
+ default:
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
+
+static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb) {
+ int alarm_normal = 0, alarm_warn = 0, alarm_crit = 0;
+ RRDCALC *rc;
+ rrdhost_rdlock(host);
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ switch(rc->status) {
+ case RRDCALC_STATUS_WARNING:
+ alarm_warn++;
+ break;
+ case RRDCALC_STATUS_CRITICAL:
+ alarm_crit++;
+ break;
+ default:
+ alarm_normal++;
+ }
+ }
+ rrdhost_unlock(host);
+ buffer_sprintf(wb, "\t\t\"normal\": %d,\n", alarm_normal);
+ buffer_sprintf(wb, "\t\t\"warning\": %d,\n", alarm_warn);
+ buffer_sprintf(wb, "\t\t\"critical\": %d\n", alarm_crit);
+}
+
+static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
+ RRDHOST *host;
+ int count = 0;
+
+ buffer_strcat(wb, "\t\"mirrored_hosts\": [\n");
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))
+ continue;
+ if (count > 0)
+ buffer_strcat(wb, ",\n");
+
+ buffer_sprintf(wb, "\t\t\"%s\"", host->hostname);
+ count++;
+ }
+
+ buffer_strcat(wb, "\n\t],\n\t\"mirrored_hosts_status\": [\n");
+ count = 0;
+ rrdhost_foreach_read(host)
+ {
+ if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))
+ continue;
+ if (count > 0)
+ buffer_strcat(wb, ",\n");
+
+ netdata_mutex_lock(&host->receiver_lock);
+ buffer_sprintf(
+ wb, "\t\t{ \"guid\": \"%s\", \"reachable\": %s, \"claim_id\": ", host->machine_guid,
+ (host->receiver || host == localhost) ? "true" : "false");
+ netdata_mutex_unlock(&host->receiver_lock);
+
+ rrdhost_aclk_state_lock(host);
+ if (host->aclk_state.claimed_id)
+ buffer_sprintf(wb, "\"%s\" }", host->aclk_state.claimed_id);
+ else
+ buffer_strcat(wb, "null }");
+ rrdhost_aclk_state_unlock(host);
+
+ count++;
+ }
+ rrd_unlock();
+
+ buffer_strcat(wb, "\n\t],\n");
+}
+
+inline void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation) {
+ char tabs[11];
+
+ if (indentation > 10)
+ indentation = 10;
+
+ tabs[0] = '\0';
+ while (indentation) {
+ strcat(tabs, "\t");
+ indentation--;
+ }
+
+ int count = 0;
+ rrdhost_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if(count > 0) buffer_strcat(wb, ",\n");
+ buffer_strcat(wb, tabs);
+
+ char value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(value, label->value, CONFIG_MAX_VALUE * 2);
+ buffer_sprintf(wb, "\"%s\": \"%s\"", label->key, value);
+
+ count++;
+ }
+ buffer_strcat(wb, "\n");
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+ rrdhost_unlock(host);
+}
+
+extern int aclk_connected;
+inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
+{
+ buffer_strcat(wb, "{\n");
+ buffer_sprintf(wb, "\t\"version\": \"%s\",\n", host->program_version);
+ buffer_sprintf(wb, "\t\"uid\": \"%s\",\n", host->machine_guid);
+
+ web_client_api_request_v1_info_mirrored_hosts(wb);
+
+ buffer_strcat(wb, "\t\"alarms\": {\n");
+ web_client_api_request_v1_info_summary_alarm_statuses(host, wb);
+ buffer_strcat(wb, "\t},\n");
+
+ buffer_sprintf(wb, "\t\"os_name\": \"%s\",\n", (host->system_info->host_os_name) ? host->system_info->host_os_name : "");
+ buffer_sprintf(wb, "\t\"os_id\": \"%s\",\n", (host->system_info->host_os_id) ? host->system_info->host_os_id : "");
+ buffer_sprintf(wb, "\t\"os_id_like\": \"%s\",\n", (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : "");
+ buffer_sprintf(wb, "\t\"os_version\": \"%s\",\n", (host->system_info->host_os_version) ? host->system_info->host_os_version : "");
+ buffer_sprintf(wb, "\t\"os_version_id\": \"%s\",\n", (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : "");
+ buffer_sprintf(wb, "\t\"os_detection\": \"%s\",\n", (host->system_info->host_os_detection) ? host->system_info->host_os_detection : "");
+ buffer_sprintf(wb, "\t\"cores_total\": \"%s\",\n", (host->system_info->host_cores) ? host->system_info->host_cores : "");
+ buffer_sprintf(wb, "\t\"total_disk_space\": \"%s\",\n", (host->system_info->host_disk_space) ? host->system_info->host_disk_space : "");
+ buffer_sprintf(wb, "\t\"cpu_freq\": \"%s\",\n", (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : "");
+ buffer_sprintf(wb, "\t\"ram_total\": \"%s\",\n", (host->system_info->host_ram_total) ? host->system_info->host_ram_total : "");
+
+ if (host->system_info->container_os_name)
+ buffer_sprintf(wb, "\t\"container_os_name\": \"%s\",\n", host->system_info->container_os_name);
+ if (host->system_info->container_os_id)
+ buffer_sprintf(wb, "\t\"container_os_id\": \"%s\",\n", host->system_info->container_os_id);
+ if (host->system_info->container_os_id_like)
+ buffer_sprintf(wb, "\t\"container_os_id_like\": \"%s\",\n", host->system_info->container_os_id_like);
+ if (host->system_info->container_os_version)
+ buffer_sprintf(wb, "\t\"container_os_version\": \"%s\",\n", host->system_info->container_os_version);
+ if (host->system_info->container_os_version_id)
+ buffer_sprintf(wb, "\t\"container_os_version_id\": \"%s\",\n", host->system_info->container_os_version_id);
+ if (host->system_info->container_os_detection)
+ buffer_sprintf(wb, "\t\"container_os_detection\": \"%s\",\n", host->system_info->container_os_detection);
+ if (host->system_info->is_k8s_node)
+ buffer_sprintf(wb, "\t\"is_k8s_node\": \"%s\",\n", host->system_info->is_k8s_node);
+
+ buffer_sprintf(wb, "\t\"kernel_name\": \"%s\",\n", (host->system_info->kernel_name) ? host->system_info->kernel_name : "");
+ buffer_sprintf(wb, "\t\"kernel_version\": \"%s\",\n", (host->system_info->kernel_version) ? host->system_info->kernel_version : "");
+ buffer_sprintf(wb, "\t\"architecture\": \"%s\",\n", (host->system_info->architecture) ? host->system_info->architecture : "");
+ buffer_sprintf(wb, "\t\"virtualization\": \"%s\",\n", (host->system_info->virtualization) ? host->system_info->virtualization : "");
+ buffer_sprintf(wb, "\t\"virt_detection\": \"%s\",\n", (host->system_info->virt_detection) ? host->system_info->virt_detection : "");
+ buffer_sprintf(wb, "\t\"container\": \"%s\",\n", (host->system_info->container) ? host->system_info->container : "");
+ buffer_sprintf(wb, "\t\"container_detection\": \"%s\",\n", (host->system_info->container_detection) ? host->system_info->container_detection : "");
+
+ buffer_strcat(wb, "\t\"host_labels\": {\n");
+ host_labels2json(host, wb, 2);
+ buffer_strcat(wb, "\t},\n");
+
+ buffer_strcat(wb, "\t\"collectors\": [");
+ chartcollectors2json(host, wb);
+ buffer_strcat(wb, "\n\t],\n");
+
+#ifdef DISABLE_CLOUD
+ buffer_strcat(wb, "\t\"cloud-enabled\": false,\n");
+#else
+ buffer_sprintf(wb, "\t\"cloud-enabled\": %s,\n",
+ appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1) ? "true" : "false");
+#endif
+
+#ifdef ENABLE_ACLK
+ buffer_strcat(wb, "\t\"cloud-available\": true,\n");
+#else
+ buffer_strcat(wb, "\t\"cloud-available\": false,\n");
+#endif
+ char *agent_id = is_agent_claimed();
+ if (agent_id == NULL)
+ buffer_strcat(wb, "\t\"agent-claimed\": false,\n");
+ else {
+ buffer_strcat(wb, "\t\"agent-claimed\": true,\n");
+ freez(agent_id);
+ }
+#ifdef ENABLE_ACLK
+ if (aclk_connected)
+ buffer_strcat(wb, "\t\"aclk-available\": true\n");
+ else
+#endif
+ buffer_strcat(wb, "\t\"aclk-available\": false\n"); // Intentionally valid with/without #ifdef above
+
+ buffer_strcat(wb, "}");
+ return 0;
+}
+
+inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) {
+ (void)url;
+ if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED;
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ web_client_api_request_v1_info_fill_buffer(host, wb);
+
+ buffer_no_cacheable(wb);
+ return HTTP_RESP_OK;
+}
+
+static struct api_command {
+ const char *command;
+ uint32_t hash;
+ WEB_CLIENT_ACL acl;
+ int (*callback)(RRDHOST *host, struct web_client *w, char *url);
+} api_commands[] = {
+ { "info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_info },
+ { "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data },
+ { "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart },
+ { "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts },
+ { "archivedcharts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_archivedcharts },
+
+ // registry checks the ACL by itself, so we allow everything
+ { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry },
+
+ // badges can be fetched with both dashboard and badge permissions
+ { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD|WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge },
+
+ { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms },
+ { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms_values },
+ { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log },
+ { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables },
+ { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_count },
+ { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics },
+ { "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health },
+ // terminator
+ { NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
+};
+
+inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) {
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(initialized == 0)) {
+ initialized = 1;
+
+ for(i = 0; api_commands[i].command ; i++)
+ api_commands[i].hash = simple_hash(api_commands[i].command);
+ }
+
+ // get the command
+ if(url) {
+ debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, url);
+ uint32_t hash = simple_hash(url);
+
+ for(i = 0; api_commands[i].command ;i++) {
+ if(unlikely(hash == api_commands[i].hash && !strcmp(url, api_commands[i].command))) {
+ if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl))
+ return web_client_permission_denied(w);
+
+ //return api_commands[i].callback(host, w, url);
+ return api_commands[i].callback(host, w, (w->decoded_query_string + 1));
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Unsupported v1 API command: ");
+ buffer_strcat_htmlescape(w->response.data, url);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API v1 command?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
new file mode 100644
index 0000000..445b0e4
--- /dev/null
+++ b/web/api/web_api_v1.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_API_V1_H
+#define NETDATA_WEB_API_V1_H 1
+
+#include "daemon/common.h"
+#include "web/api/badges/web_buffer_svg.h"
+#include "web/api/formatters/rrd2json.h"
+#include "web/api/health/health_cmdapi.h"
+
+extern uint32_t web_client_api_request_v1_data_options(char *o);
+extern uint32_t web_client_api_request_v1_data_format(char *name);
+extern uint32_t web_client_api_request_v1_data_google_format(char *name);
+
+extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
+extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_archivedcharts(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb);
+extern void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation);
+
+extern void web_client_api_v1_init(void);
+extern void web_client_api_v1_management_init(void);
+
+extern char *api_secret;
+
+#endif //NETDATA_WEB_API_V1_H
diff --git a/web/gui/.well-known/dnt/cookies b/web/gui/.well-known/dnt/cookies
new file mode 100644
index 0000000..b7c70e5
--- /dev/null
+++ b/web/gui/.well-known/dnt/cookies
@@ -0,0 +1,14 @@
+{
+ "tracking": "T",
+ "compliance": ["https://github.com/netdata/netdata/wiki/cookies#compliance"],
+ "qualifiers": "afc",
+ "controller": ["https://github.com/netdata/netdata/wiki/cookies#controller"],
+ "same-party": [
+ "my-netdata.io",
+ "mynetdata.io",
+ "netdata.online",
+ "netdata.rocks",
+ "registry.my-netdata.io"
+ ],
+ "policy": "https://github.com/netdata/netdata/wiki/cookies#policy",
+}
diff --git a/web/gui/Makefile.am b/web/gui/Makefile.am
new file mode 100644
index 0000000..c69380f
--- /dev/null
+++ b/web/gui/Makefile.am
@@ -0,0 +1,189 @@
+#
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ version.txt \
+ $(NULL)
+
+DASHBOARD_JS_FILES = \
+ src/dashboard.js/prologue.js.inc \
+ src/dashboard.js/utils.js \
+ src/dashboard.js/server-detection.js \
+ src/dashboard.js/dependencies.js \
+ src/dashboard.js/error-handling.js \
+ src/dashboard.js/compatibility.js \
+ src/dashboard.js/xss.js \
+ src/dashboard.js/colors.js \
+ src/dashboard.js/units-conversion.js \
+ src/dashboard.js/options.js \
+ src/dashboard.js/localstorage.js \
+ src/dashboard.js/timeout.js \
+ src/dashboard.js/themes.js \
+ src/dashboard.js/charting/dygraph.js \
+ src/dashboard.js/charting/sparkline.js \
+ src/dashboard.js/charting/google-charts.js \
+ src/dashboard.js/charting/gauge.js \
+ src/dashboard.js/charting/easy-pie-chart.js \
+ src/dashboard.js/charting/d3pie.js \
+ src/dashboard.js/charting/d3.js \
+ src/dashboard.js/charting/peity.js \
+ src/dashboard.js/charting/textonly.js \
+ src/dashboard.js/charting.js \
+ src/dashboard.js/chart-registry.js \
+ src/dashboard.js/common.js \
+ src/dashboard.js/main.js \
+ src/dashboard.js/alarms.js \
+ src/dashboard.js/registry.js \
+ src/dashboard.js/boot.js \
+ src/dashboard.js/epilogue.js.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(DASHBOARD_JS_FILES) \
+ $(NULL)
+
+dist_web_DATA = \
+ demo.html \
+ demo2.html \
+ demosites.html \
+ demosites2.html \
+ dashboard.html \
+ dashboard.js \
+ dashboard_info.js \
+ dashboard_info_custom_example.js \
+ dashboard.css \
+ dashboard.slate.css \
+ favicon.ico \
+ goto-host-from-alarm.html \
+ index.html \
+ main.css \
+ main.js \
+ console.html \
+ infographic.html \
+ robots.txt \
+ refresh-badges.js \
+ sitemap.xml \
+ tv.html \
+ dash-example.html \
+ version.txt \
+ $(NULL)
+
+webolddir=$(webdir)/old
+dist_webold_DATA = \
+ old/index.html \
+ $(NULL)
+
+webstaticdir=$(webdir)/static/img
+dist_webstatic_DATA = \
+ static/img/netdata-logomark.svg \
+ $(NULL)
+
+weblibdir=$(webdir)/lib
+dist_weblib_DATA = \
+ lib/bootstrap-3.3.7.min.js \
+ lib/bootstrap-slider-10.0.0.min.js \
+ lib/bootstrap-table-1.11.0.min.js \
+ lib/bootstrap-table-export-1.11.0.min.js \
+ lib/bootstrap-toggle-2.2.2.min.js \
+ lib/clipboard-polyfill-be05dad.js \
+ lib/d3-4.12.2.min.js \
+ lib/d3pie-0.2.1-netdata-3.js \
+ lib/dygraph-c91c859.min.js \
+ lib/dygraph-smooth-plotter-c91c859.js \
+ lib/fontawesome-all-5.0.1.min.js \
+ lib/gauge-1.3.2.min.js \
+ lib/jquery-2.2.4.min.js \
+ lib/jquery.easypiechart-97b5824.min.js \
+ lib/jquery.peity-3.2.0.min.js \
+ lib/jquery.sparkline-2.1.2.min.js \
+ lib/lz-string-1.4.4.min.js \
+ lib/pako-1.0.6.min.js \
+ lib/perfect-scrollbar-0.6.15.min.js \
+ lib/tableExport-1.6.0.min.js \
+ $(NULL)
+
+webcssdir=$(webdir)/css
+dist_webcss_DATA = \
+ css/morris-0.5.1.css \
+ css/bootstrap-3.3.7.css \
+ css/bootstrap-theme-3.3.7.min.css \
+ css/bootstrap-slate-flat-3.3.7.css \
+ css/bootstrap-slider-10.0.0.min.css \
+ css/bootstrap-toggle-2.2.2.min.css \
+ css/c3-0.4.18.min.css \
+ $(NULL)
+
+webfontsdir=$(webdir)/fonts
+dist_webfonts_DATA = \
+ fonts/glyphicons-halflings-regular.eot \
+ fonts/glyphicons-halflings-regular.svg \
+ fonts/glyphicons-halflings-regular.ttf \
+ fonts/glyphicons-halflings-regular.woff \
+ fonts/glyphicons-halflings-regular.woff2 \
+ $(NULL)
+
+webimagesdir=$(webdir)/images
+dist_webimages_DATA = \
+ images/netdata-logomark.svg \
+ images/alert-128-orange.png \
+ images/alert-128-red.png \
+ images/alert-multi-size-orange.ico \
+ images/alert-multi-size-red.ico \
+ images/animated.gif \
+ images/check-mark-2-128-green.png \
+ images/check-mark-2-multi-size-green.ico \
+ images/netdata.svg \
+ images/post.png \
+ images/android-icon-36x36.png \
+ images/android-icon-48x48.png \
+ images/android-icon-72x72.png \
+ images/android-icon-96x96.png \
+ images/android-icon-144x144.png \
+ images/android-icon-192x192.png \
+ images/apple-icon-57x57.png \
+ images/apple-icon-60x60.png \
+ images/apple-icon-72x72.png \
+ images/apple-icon-76x76.png \
+ images/apple-icon-114x114.png \
+ images/apple-icon-120x120.png \
+ images/apple-icon-144x144.png \
+ images/apple-icon-152x152.png \
+ images/apple-icon-180x180.png \
+ images/apple-icon-precomposed.png \
+ images/apple-icon.png \
+ images/favicon-16x16.png \
+ images/favicon-32x32.png \
+ images/favicon-96x96.png \
+ images/favicon.ico \
+ images/ms-icon-70x70.png \
+ images/ms-icon-144x144.png \
+ images/ms-icon-150x150.png \
+ images/ms-icon-310x310.png \
+ images/banner-icon-144x144.png \
+ $(NULL)
+
+dashboard.js: $(DASHBOARD_JS_FILES)
+ if test -f $@; then rm -f $@; fi
+ cat $(DASHBOARD_JS_FILES) > $@.tmp && mv $@.tmp $@
+
+webwellknowndir=$(webdir)/.well-known
+dist_webwellknown_DATA = \
+ $(NULL)
+
+webdntdir=$(webdir)/.well-known/dnt
+dist_webdnt_DATA = \
+ .well-known/dnt/cookies \
+ $(NULL)
+
+version.txt:
+ if test -d "$(top_srcdir)/.git"; then \
+ git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \
+ fi > $@.tmp
+ test -s $@.tmp || echo 0 > $@.tmp
+ mv $@.tmp $@
+
+# regenerate these files, even if they are up to date
+.PHONY: version.txt dashboard.js
diff --git a/web/gui/README.md b/web/gui/README.md
new file mode 100644
index 0000000..166cea7
--- /dev/null
+++ b/web/gui/README.md
@@ -0,0 +1,169 @@
+<!--
+title: "Local Agent dashboard"
+description: "The local Netdata Agent dashboard is the heart of health monitoring and performance troubleshooting, with hundreds of real-time charts."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/gui/README.md
+-->
+
+# Local Agent dashboard
+
+The local Netdata Agent dashboard is the heart of Netdata's performance troubleshooting toolkit. You've probably seen it
+before:
+
+![The Netdata dashboard in
+action](https://user-images.githubusercontent.com/1153921/101513938-fae28380-3939-11eb-9434-8ad86a39be62.gif)
+
+Learn more about how dashboards work and how they're populated using the `dashboards.js` file in our [web dashboards
+overview](/web/README.md).
+
+By default, Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and
+navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If you're unsure, try
+`http://localhost:19999` first.
+
+Netdata uses an [internal, static-threaded web server](/web/server/README.md) to host the HTML, CSS, and JavaScript
+files that make up the local Agent dashboard. You don't have to configure anything to access it, although you can adjust
+[your settings](/web/server/README.md#other-netdataconf-web-section-options) in the `netdata.conf` file, or run Netdata
+behind an [Nginx proxy](https://learn.netdata.cloud/docs/agent/running-behind-nginx), and so on.
+
+## Navigating the local dashboard
+
+Beyond charts, the local dashboard can be broken down into three key areas:
+
+1. [**Sections**](#sections)
+2. [**Time & date picker**](#time--date-picker)
+3. [**Metrics menus/submenus**](#metrics-menus)
+4. [**Netdata Cloud menus: Spaces, War Rooms, and Visited nodes)**](#cloud-menus-spaces-war-rooms-and-visited-nodes)
+
+![Annotated screenshot of the local Agent
+dashboard](https://user-images.githubusercontent.com/1153921/101509403-f7e59400-3935-11eb-9abd-cbecfa3ee49a.png)
+
+### Sections
+
+Netdata is broken up into multiple **sections**, such as **System Overview**,
+**CPU**, **Disk**, and more. Inside each section you'll find a number of charts,
+broken down into [contexts](/web/README.md#contexts) and
+[families](/web/README.md#families).
+
+An example of the **Memory** section on a Linux desktop system.
+
+![Screenshot of the Memory section of the Netdata
+dashboard](https://user-images.githubusercontent.com/1153921/101508423-e354cc00-3934-11eb-9b33-3ad57a5988b4.png)
+
+All sections and their associated charts appear on a single page, so all you need to do to view different sections is
+scroll up and down. But it's usually quicker to use the [menus](#metrics-menus).
+
+### Time & date picker
+
+The local dashboard features a time & date picker to help you visualize specific timeframes of historical metrics. The
+picker chooses an appropriate default to always show per-second granularity based on the width of your browser's
+viewport.
+
+![The time & date picker in the local Agent
+dashboard](https://user-images.githubusercontent.com/1153921/101507784-2c585080-3934-11eb-9d6e-eff30b8553e4.png)
+
+Use the Quick Selector to show metrics from the last 5 minutes, 15 minutes, 30 minutes, 2 hours, 6 hours, or 12 hours.
+
+Beneath the Quick Selector is an input field and dropdown you use in combination to select a specific timeframe of
+minutes, hours, days, or months. Enter a number and choose the appropriate unit of time.
+
+Use the calendar to select multiple days. Click on a date to begin the timeframe selection, then an ending date.
+
+Click **Apply** to re-render all visualizations with new metrics data, or **Clear** to restore the default timeframe.
+
+[Increase the metrics retention policy](/docs/store/change-metrics-storage.md) for your node to see more historical
+timeframes.
+
+### Metrics menus
+
+**Metrics menus** appears on the right-hand side of the local Agent dashboard. Netdata generates a menu for each
+section, and menus link to the section they're associated with.
+
+![A screenshot of metrics menus](https://user-images.githubusercontent.com/1153921/80834638-f08f2880-8ba5-11ea-99ae-f610b2885fd6.png)
+
+Most metrics menu items will contain several **submenu** entries, which represent any
+[families](/web/README.md#families) from that section. Netdata automatically
+generates these submenu entries.
+
+Here's a **Disks** menu with several submenu entries for each disk drive and
+partition Netdata recognizes.
+
+![Screenshot of some metrics
+submenus](https://user-images.githubusercontent.com/1153921/80834697-11577e00-8ba6-11ea-979c-92fd19cdb480.png)
+
+### Cloud menus (Spaces, War Rooms, and Visited nodes)
+
+The dashboard also features a menu related to Netdata Cloud functionality. You can view your existing Spaces or create
+new ones via the left vertical column of boxes. This menu also displays the name of your current Space, shows a list of
+any War Rooms you've added you your Space, and lists any notes you recently visited via their Agent dashboards. Click on
+a War Room's name to jump to the Netdata Cloud web interface.
+
+![A screenshot of the Cloud
+menus](https://user-images.githubusercontent.com/1153921/80837210-3f8b8c80-8bab-11ea-9c75-128c2d823ef8.png)
+
+If you want to know more about how Cloud populates this menu, and the Agent-Cloud integration at a high level, see our
+document on [using the Agent with Netdata Cloud](/docs/agent-cloud.md).
+
+## Customizing the local dashboard
+
+Netdata stores information about individual charts in the `dashboard_info.js`
+file. This file includes section and subsection headings, descriptions, colors,
+titles, tooltips, and other information for Netdata to render on the dashboard.
+
+For example, here is how `dashboard_info.js` defines the **System Overview**
+section.
+
+```javascript
+netdataDashboard.menu = {
+ 'system': {
+ title: 'System Overview',
+ icon: '<i class="fas fa-bookmark"></i>',
+ info: 'Overview of the key system metrics.'
+ },
+```
+
+If you want to customize this information, you should avoid editing
+`dashboard_info.js` directly. These changes are not persistent; Netdata will
+overwrite the file when it's updated. Instead, you should create a new file with
+your customizations.
+
+We created an example file at `dashboard_info_custom_example.js`. You can
+copy this to a new file with a name of your choice in the `web/` directory. This
+directory changes based on your operating system and installation method. If
+you're on a Linux system, it should be at `/usr/share/netdata/web/`.
+
+```shell
+cd /usr/share/netdata/web/
+sudo cp dashboard_info_custom_example.js your_dashboard_info_file.js
+```
+
+Edit the file with your customizations. For example:
+
+```javascript
+customDashboard.menu = {
+ system: {
+ title: "Testing, testing, 1 2 3",
+ icon: '<i class="fas fa-thumbs-up"></i>',
+ info: "This is overwritten info for the system overview section!"
+ }
+};
+```
+
+Finally, tell Netdata where you placed your customization file by replacing
+`your_dashboard_info_file.js` below.
+
+```conf
+[web]
+ custom dashboard_info.js = your_dashboard_info_file.js
+```
+
+Once you restart Netdata, refresh the dashboard to find your custom
+configuration:
+
+![Screenshot of overwritten text from dashboard_info.js
+file](https://user-images.githubusercontent.com/1153921/62798924-570e6c80-ba94-11e9-9578-869753bec39c.png)
+
+## Custom dashboards
+
+For information on creating custom dashboards from scratch, see the [custom dashboards](/web/gui/custom/README.md) or
+[Atlassian Confluence dashboards](/web/gui/confluence/README.md) guides.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/web/gui/browserconfig.xml b/web/gui/browserconfig.xml
new file mode 100644
index 0000000..32f4759
--- /dev/null
+++ b/web/gui/browserconfig.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="utf-8"?>
+<browserconfig><msapplication><tile><square70x70logo src="images/ms-icon-70x70.png"/><square150x150logo src="images/ms-icon-150x150.png"/><square310x310logo src="images/ms-icon-310x310.png"/><TileColor>#ffffff</TileColor></tile></msapplication></browserconfig>
diff --git a/web/gui/confluence/README.md b/web/gui/confluence/README.md
new file mode 100644
index 0000000..2e929e7
--- /dev/null
+++ b/web/gui/confluence/README.md
@@ -0,0 +1,1019 @@
+<!--
+title: "Atlassian Confluence dashboards"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/gui/confluence/README.md
+-->
+
+# Atlassian Confluence dashboards
+
+With Netdata you can build **live, interactive, monitoring dashboards** directly on Atlassian's **Confluence** pages.
+
+I see you already asking "why should I do this?"
+
+Well... think a bit of it.... confluence is the perfect place for something like that:
+
+1. All the employees of your company already have access to it.
+
+2. Most probably you have already several spaces on confluence, one for each project or service. Adding live monitoring information there is ideal: everything in one place. Your users will just click on the page and instantly the monitoring page they need will appear with only the information they need to know.
+
+3. You can create monitoring pages for very specific purposes, hiding all the information that is too detailed for most users, or explaining in detail things that are difficult for them to understand.
+
+So, what can we expect? What can Netdata do on confluence?
+
+You will be surprised! **Everything a Netdata dashboard does!**. Example:
+
+![final-confluence4](https://user-images.githubusercontent.com/2662304/34366214-767fa4b8-eaa1-11e7-83af-0b9b9b72aa73.gif)
+
+Let me show you how.
+
+> Let's assume we have 2 web servers we want to monitor. We will create a simple dashboard with key information about them, directly on confluence.
+
+### Before you begin
+
+Most likely your confluence is accessible via HTTPS. So, you need to proxy your Netdata servers via an apache or nginx to make them HTTPS too. If your Confluence is HTTPS but your Netdata are not, you will not be able to fetch the Netdata content from the confluence page. The Netdata wiki has many examples for proxying Netdata through another web server.
+
+> So, make sure Netdata and Confluence can be accessed with the same protocol (**http**, or **https**).
+
+For our example, I will use these 2 servers:
+
+| server | url |
+|------|---|
+| Server 1 | <https://london.my-netdata.io> |
+| Server 2 | <https://frankfurt.my-netdata.io> |
+
+I will use the first server for the static dashboard javascript files.
+
+---
+
+Then, you need to enable the `html` plugin of confluence. We will add some plain html content on that page, and this plugin is required.
+
+### Create a new page
+
+Create a new confluence page and paste this into an `html` box:
+
+```html
+<script>
+// don't load bootstrap - confluence does not need this
+var netdataNoBootstrap = true;
+
+// select the web notifications to show on this dashboard
+// var netdataShowAlarms = true;
+// var netdataAlarmsRecipients = [ 'sysadmin', 'webmaster' ];
+</script>
+
+<script src="https://london.my-netdata.io/dashboard.js"></script>
+```
+
+like this (type `{html` for the html box to appear - you need the confluence html plugin enabled):
+
+![screenshot from 2017-12-25 00-46-20](https://user-images.githubusercontent.com/2662304/34329541-1dd9077c-e90d-11e7-988d-6820be31ff3f.png)
+
+### Add a few badges
+
+Then, go to your Netdata and copy an alarm badge (the `<embed>` version of it):
+
+![copy-embed-badge](https://user-images.githubusercontent.com/2662304/34329562-dddea37e-e90d-11e7-9830-041a9f6a5984.gif)
+
+Then add another HTML box on the page, and paste it, like this:
+
+![screenshot from 2017-12-25 00-55-18](https://user-images.githubusercontent.com/2662304/34329569-4fc3d07c-e90e-11e7-8127-3127a21e1657.png)
+
+Hit **update** and you will get this:
+
+![screenshot from 2017-12-25 00-56-58](https://user-images.githubusercontent.com/2662304/34329573-8d4237cc-e90e-11e7-80bf-6c260456c690.png)
+
+This badge is now auto-refreshing. It will update itself based on the update frequency of the alarm.
+
+> Keep in mind you can add badges with custom Netdata queries too. Netdata automatically creates badges for all the
+> alarms, but every chart, every dimension on every chart, can be used for a badge. And Netdata badges are quite
+> powerful! Check [Creating Badges](/web/api/badges/README.md) for more information on badges.
+
+So, let's create a table and add this badge for both our web servers:
+
+![screenshot from 2017-12-25 01-06-10](https://user-images.githubusercontent.com/2662304/34329609-d3e9ab00-e90f-11e7-99df-884196347538.png)
+
+Now we get this:
+
+![screenshot from 2017-12-25 01-07-10](https://user-images.githubusercontent.com/2662304/34329615-f7dea286-e90f-11e7-9b6f-600215494f96.png)
+
+### Add a Netdata chart
+
+The simplest form of a chart is this (it adds the chart `web_log_nginx_netdata.response_statuses`, using 100% of the width, 150px height, and the last 10 minutes of data):
+
+```html
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-width="100%"
+ data-height="150px"
+ data-before="0"
+ data-after="-600"
+></div>
+```
+
+Add this to `html` block on confluence:
+
+![screenshot from 2017-12-25 01-13-15](https://user-images.githubusercontent.com/2662304/34329635-cf83ab0a-e910-11e7-85a3-b72ccc2d54e4.png)
+
+And you will get this:
+
+![screenshot from 2017-12-25 01-14-09](https://user-images.githubusercontent.com/2662304/34329640-efd15574-e910-11e7-9004-94487dcde154.png)
+
+> This chart is **alive**, fully interactive. You can drag it, pan it, zoom it, etc like you do on Netdata dashboards!
+
+Of course this too big. We need something smaller to add inside the table. Let's try this:
+
+```html
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+></div>
+```
+
+The chart name is shown on all Netdata charts, so just copy it from a Netdata dashboard.
+
+We will fetch the same chart from both servers. To define the server we also added `data-host=` with the URL of each server, like this (we also added `<br/>` for a newline between the badge and the chart):
+
+![screenshot from 2017-12-25 01-25-05](https://user-images.githubusercontent.com/2662304/34329695-76fd2680-e912-11e7-9969-87f8d5b36145.png)
+
+Which gives us this:
+
+![screenshot from 2017-12-25 01-26-04](https://user-images.githubusercontent.com/2662304/34329700-989f0f2e-e912-11e7-8ac9-c78f82cfbdb0.png)
+
+Note the color difference. This is because Netdata automatically hides dimensions that are just zero (the frankfurt server has only successful requests). To instruct Netdata to disable this feature, we need to add another html fragment at the bottom of the page (make sure this is added after loading `dashboard.js`). So we edit the first block we added, and append a new `<script>` section to it:
+
+```html
+<script>
+// don't load bootstrap - confluence does not need this
+var netdataNoBootstrap = true;
+
+// select the web notifications to show on this dashboard
+// var netdataShowAlarms = true;
+// var netdataAlarmsRecipients = [ 'sysadmin', 'webmaster' ];
+</script>
+
+<script src="https://london.my-netdata.io/dashboard.js"></script>
+
+<script>
+// do not hide dimensions with just zeros
+NETDATA.options.current.eliminate_zero_dimensions = false;
+</script>
+```
+
+Now they match:
+
+![screenshot from 2017-12-25 01-30-14](https://user-images.githubusercontent.com/2662304/34329716-2ea83680-e913-11e7-847e-52b3f402aeb0.png)
+
+#### more options
+
+If you want to change the colors append `data-colors="#001122 #334455 #667788"`. The colors will be used for the dimensions top to bottom, as shown on a Netdata dashboard. Keep in mind the default Netdata dashboards hide by default all dimensions that are just zero, so enable them at the dashboard settings to see them all.
+
+You can get a percentage chart, by adding these on these charts:
+
+```html
+ data-append-options="percentage"
+ data-decimal-digits="0"
+ data-dygraph-valuerange="[0, 100]"
+ data-dygraph-includezero="true"
+ data-units="%"
+```
+
+The first line instructs Netdata to calculate the percentage of each dimension, the second strips any fractional digits, the third instructs the charting library to size the chart from 0 to 100, the next one instructs it to include 0 in the chart and the last changes the units of the chart to `%`. This is how it will look:
+
+![screenshot from 2017-12-25 01-45-39](https://user-images.githubusercontent.com/2662304/34329774-570ef990-e915-11e7-899f-eee939564aaf.png)
+
+You can make any number of charts have common min and max on the y-range by adding `common-min="NAME"` and `common-max="NAME"`, where `NAME` is anything you like. Keep in mind for best results all the charts with the same `NAME` should be visible at once, otherwise a not-visible chart will influence the range and until it is updated the range will not adapt.
+
+### Add gauges
+
+Let's now add a few gauges. The chart we added has several dimensions: `success`, `error`, `redirect`, `bad` and `other`.
+
+Let's say we want to add 2 gauges:
+
+1. `success` and `redirect` together, in blue
+2. `error`, `bad` and `other` together, in orange
+
+We will add the following for each server. We have enclosed them in another a `<div>` because Confluence will wrap them if the page width is not enough to fit them. With that additional `<div>` they will always be next to each other.
+
+```html
+<div style="width: 300px; text-align: center;">
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-dimensions="success,redirect"
+ data-chart-library="gauge"
+ data-title="Good"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-600"
+ data-points="600"
+ data-common-max="response_statuses"
+ data-colors="#007ec6"
+ data-decimal-digits="0"
+ ></div><div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-dimensions="error,bad,other"
+ data-chart-library="gauge"
+ data-title="Bad"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-600"
+ data-points="600"
+ data-common-max="response_statuses"
+ data-colors="#97CA00"
+ data-decimal-digits="0"
+ ></div>
+</div>
+```
+
+Adding the above will give you this:
+
+![final-confluence](https://user-images.githubusercontent.com/2662304/34329813-636bb8de-e917-11e7-8cc7-19e197859008.gif)
+
+### Final source - for the confluence source editor
+
+If you enable the source editor of Confluence, you can paste the whole example (implementing the first image on this post and demonstrating everything discussed on this page):
+
+```html
+<p class="auto-cursor-target">Monitoring the health of the web servers, by analyzing the response codes they send.</p>
+<table>
+ <colgroup>
+ <col/>
+ <col/>
+ <col/>
+ <col/>
+ <col/>
+ </colgroup>
+ <tbody>
+ <tr>
+ <th style="text-align: center;">
+ <br/>
+ </th>
+ <th style="text-align: center;">London</th>
+ <th style="text-align: center;">Frankfurt</th>
+ <th colspan="1" style="text-align: center;">San Francisco</th>
+ <th colspan="1" style="text-align: center;">Toronto</th>
+ </tr>
+ <tr>
+ <td colspan="1" style="text-align: right;">
+ <strong>last hour</strong>
+ <br/>
+ <strong>requests</strong>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="5771a1db-b461-478f-a820-edcb67809eb1" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-chart-library="easypiechart"
+ data-after="-14400"
+ data-before="0"
+ data-points="4"
+ data-title="london"
+ data-method="sum"
+ data-append-options="unaligned"
+ data-update-every="60"
+ data-width="120px"
+ data-common-max="1h_requests_pie"
+ data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="aff4446a-1432-407b-beb0-488c33eced18" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://frankfurt.my-netdata.io"
+ data-chart-library="easypiechart"
+ data-after="-14400"
+ data-before="0"
+ data-points="4"
+ data-title="frankfurt"
+ data-method="sum"
+ data-append-options="unaligned"
+ data-update-every="60"
+ data-width="120px"
+ data-common-max="1h_requests_pie"
+ data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="fd310534-627c-47bd-a184-361eb3f00489" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://sanfrancisco.my-netdata.io"
+ data-chart-library="easypiechart"
+ data-after="-14400"
+ data-before="0"
+ data-points="4"
+ data-title="sanfrancisco"
+ data-method="sum"
+ data-append-options="unaligned"
+ data-update-every="60"
+ data-width="120px"
+ data-common-max="1h_requests_pie"
+ data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="eb1261d5-8ff2-4a5c-8945-701bf04fb75b" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://toronto.my-netdata.io"
+ data-chart-library="easypiechart"
+ data-after="-14400"
+ data-before="0"
+ data-points="4"
+ data-title="toronto"
+ data-method="sum"
+ data-append-options="unaligned"
+ data-update-every="60"
+ data-width="120px"
+ data-common-max="1h_requests_pie"
+ data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td colspan="1" style="text-align: right;">
+ <strong>last<br/>1 hour</strong>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="d2ee8425-2c6c-4e26-8c5a-17f6153fdce1" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://london.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+data-dygraph-xpixelsperlabel="30"
+data-dygraph-xaxislabelwidth="26"
+ data-after="-3600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="1h_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="b3fb482a-4e9e-4b69-bb0b-9885d1687334" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://frankfurt.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+data-dygraph-xpixelsperlabel="30"
+data-dygraph-xaxislabelwidth="26"
+ data-after="-3600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="1h_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="199b1618-64be-4614-9662-f84cd01c6d8d" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://sanfrancisco.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+data-dygraph-xpixelsperlabel="30"
+data-dygraph-xaxislabelwidth="26"
+ data-after="-3600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="1h_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="61b2d444-fb2b-42e0-b4eb-611fb37dcb66" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://toronto.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+data-dygraph-xpixelsperlabel="30"
+data-dygraph-xaxislabelwidth="26"
+ data-after="-3600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="1h_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td colspan="1" style="text-align: right;">
+ <strong>last 10<br/>minutes</strong>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="f29e7663-f2e6-4e1d-a090-38704e0f2bd3" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://london.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="10m_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="245ccc90-1505-430b-ba13-15e6a9793c11" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://frankfurt.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="10m_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="864ff17f-f372-47e4-9d57-54e44b142240" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://sanfrancisco.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="10m_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="e0072f2b-0169-4ecf-8ddf-724270d185b8" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://toronto.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-600"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+data-common-max="10m_requests"
+data-decimal-digits="0"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td style="text-align: right;">
+ <strong>last 1<br/>minute</strong>
+ </td>
+ <td style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="8c041cfb-a5a0-425c-afe6-207f4986cb26" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<embed src="https://london.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx_netdata.response_statuses&alarm=1m_successful&refresh=auto&label=1m%20london%20successful%20requests" type="image/svg+xml" height="20"/>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://london.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-60"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+ data-append-options="percentage"
+ data-decimal-digits="0"
+ data-dygraph-valuerange="[0, 100]"
+ data-dygraph-includezero="true"
+ data-units="%"
+data-dimensions="success"
+data-colors="#009900"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="a3777583-9919-4997-891c-94a8cec60604" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<embed src="https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx_netdata.response_statuses&alarm=1m_successful&refresh=auto&label=1m%20frankfurt%20successful%20requests" type="image/svg+xml" height="20"/>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://frankfurt.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-60"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+ data-append-options="percentage"
+ data-decimal-digits="0"
+ data-dygraph-valuerange="[0, 100]"
+ data-dygraph-includezero="true"
+ data-units="%"
+data-dimensions="success"
+data-colors="#009900"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="e003deba-82fa-4aec-8264-6cb7d814a299" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<embed src="https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx_netdata.response_statuses&alarm=1m_successful&refresh=auto&label=1m%20sanfrancisco%20successful%20requests" type="image/svg+xml" height="20"/>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://sanfrancisco.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-60"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+ data-append-options="percentage"
+ data-decimal-digits="0"
+ data-dygraph-valuerange="[0, 100]"
+ data-dygraph-includezero="true"
+ data-units="%"
+data-dimensions="success"
+data-colors="#009900"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: center;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="046fcda5-98db-4776-8c51-3981d0e68f38" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<embed src="https://toronto.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx_netdata.response_statuses&alarm=1m_successful&refresh=auto&label=1m%20toronto%20successful%20requests" type="image/svg+xml" height="20"/>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+data-host="https://toronto.my-netdata.io"
+ data-legend="false"
+ data-dygraph-yaxislabelwidth="35"
+ data-dygraph-ypixelsperlabel="8"
+ data-after="-60"
+ data-before="0"
+ data-title=""
+ data-height="100px"
+ data-width="300px"
+ data-append-options="percentage"
+ data-decimal-digits="0"
+ data-dygraph-valuerange="[0, 100]"
+ data-dygraph-includezero="true"
+ data-units="%"
+data-dimensions="success"
+data-colors="#009900"
+></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td colspan="1" style="text-align: right;">
+ <strong>now</strong>
+ </td>
+ <td colspan="1" style="text-align: left;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="4aef31d3-9439-439b-838d-7350a26bde5f" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div style="width: 300px; text-align: center;">
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-dimensions="success"
+ data-chart-library="gauge"
+ data-title="Success"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#009900"
+ data-decimal-digits="0"
+ ></div><div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-dimensions="redirect,error,bad,other"
+ data-chart-library="gauge"
+ data-title="All Others"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#fe7d37"
+ data-decimal-digits="0"
+ ></div>
+</div>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://london.my-netdata.io"
+ data-dygraph-theme="sparkline"
+ data-width="300"
+ data-height="20"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="1m_requests_sparkline"
+ ></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1" style="text-align: left;">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="bf9fb1c4-ceaf-4ad8-972e-a64d23eb48f8" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div style="width: 300px; text-align: center;">
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://frankfurt.my-netdata.io"
+ data-dimensions="success"
+ data-chart-library="gauge"
+ data-title="Success"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#009900"
+ data-decimal-digits="0"
+ ></div><div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://frankfurt.my-netdata.io"
+ data-dimensions="redirect,error,bad,other"
+ data-chart-library="gauge"
+ data-title="All Others"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#fe7d37"
+ data-decimal-digits="0"
+ ></div>
+</div>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://frankfurt.my-netdata.io"
+ data-dygraph-theme="sparkline"
+ data-width="300"
+ data-height="20"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="1m_requests_sparkline"
+ ></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="60b4c9bc-353a-4e64-b7c8-365ae74156c4" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div style="width: 300px; text-align: center;">
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://sanfrancisco.my-netdata.io"
+ data-dimensions="success"
+ data-chart-library="gauge"
+ data-title="Success"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#009900"
+ data-decimal-digits="0"
+ ></div><div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://sanfrancisco.my-netdata.io"
+ data-dimensions="redirect,error,bad,other"
+ data-chart-library="gauge"
+ data-title="All Others"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#fe7d37"
+ data-decimal-digits="0"
+ ></div>
+</div>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://sanfrancisco.my-netdata.io"
+ data-dygraph-theme="sparkline"
+ data-width="300"
+ data-height="20"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="1m_requests_sparkline"
+ ></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ <td colspan="1">
+ <div class="content-wrapper">
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ <ac:structured-macro ac:macro-id="75e03235-9681-4aaf-bd85-b0ffbb9e3602" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<div style="width: 300px; text-align: center;">
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://toronto.my-netdata.io"
+ data-dimensions="success"
+ data-chart-library="gauge"
+ data-title="Success"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#009900"
+ data-decimal-digits="0"
+ ></div><div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://toronto.my-netdata.io"
+ data-dimensions="redirect,error,bad,other"
+ data-chart-library="gauge"
+ data-title="All Others"
+ data-units="requests/s"
+ data-gauge-adjust="width"
+ data-width="120"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="response_statuses"
+ data-colors="#fe7d37"
+ data-decimal-digits="0"
+ ></div>
+</div>
+<br/>
+<div
+ data-netdata="web_log_nginx_netdata.response_statuses"
+ data-host="https://toronto.my-netdata.io"
+ data-dygraph-theme="sparkline"
+ data-width="300"
+ data-height="20"
+ data-before="0"
+ data-after="-60"
+ data-points="60"
+ data-common-max="1m_requests_sparkline"
+ ></div>]]></ac:plain-text-body>
+ </ac:structured-macro>
+ <p class="auto-cursor-target">
+ <br/>
+ </p>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+</table>
+<p class="auto-cursor-target">
+ <br/>
+</p>
+<p>
+ <br/>
+</p>
+<ac:structured-macro ac:macro-id="10bbb1a6-cd65-4a27-9b3a-cb86a5a0ebe1" ac:name="html" ac:schema-version="1">
+ <ac:plain-text-body><![CDATA[<script>
+// don't load bootstrap - confluence does not need this
+var netdataNoBootstrap = true;
+
+// select the web notifications to show on this dashboard
+// var netdataShowAlarms = true;
+// var netdataAlarmsRecipients = [ 'sysadmin', 'webmaster' ];
+</script>
+
+<script src="https://london.my-netdata.io/dashboard.js"></script>
+
+
+<script>
+// do not hide dimensions with just zeros
+NETDATA.options.current.eliminate_zero_dimensions = false;
+</script>]]></ac:plain-text-body>
+</ac:structured-macro>
+<p class="auto-cursor-target">
+ <br/>
+</p>
+<div>
+ <span style="color: rgb(52,52,52);font-family: &quot;Source Code Pro&quot; , monospace;font-size: 16.2px;white-space: pre-wrap;background-color: rgb(252,252,252);">
+ <br/>
+ </span>
+</div>
+<div>
+ <span style="color: rgb(52,52,52);font-family: &quot;Source Code Pro&quot; , monospace;font-size: 16.2px;white-space: pre-wrap;background-color: rgb(252,252,252);">
+ <br/>
+ </span>
+</div>
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fconfluence%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/gui/console.html b/web/gui/console.html
new file mode 100644
index 0000000..b85b4ff
--- /dev/null
+++ b/web/gui/console.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<html>
+
+<head>
+ <title>Netdata Console</title>
+ <meta name="application-name" content="netdata" />
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8" />
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" />
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
+ <meta name="apple-mobile-web-app-capable" content="yes" />
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
+ <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" />
+ <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" />
+ <!-- Google Tag Manager -->
+ <script>(function (w, d, s, l, i) {
+ w[l] = w[l] || []; w[l].push({
+ 'gtm.start':
+ new Date().getTime(), event: 'gtm.js'
+ }); var f = d.getElementsByTagName(s)[0],
+ j = d.createElement(s), dl = l != 'dataLayer' ? '&l=' + l : ''; j.async = true; j.src =
+ 'https://www.googletagmanager.com/gtm.js?id=' + i + dl; f.parentNode.insertBefore(j, f);
+ })(window, document, 'script', 'dataLayer', 'GTM-N6CBMJD');
+ window.dataLayer = window.dataLayer || [];
+ </script>
+ <!-- End Google Tag Manager -->
+ <script>
+ function loadJSFile(filename) {
+ const s = document.createElement("script")
+ s.setAttribute("type", "text/javascript")
+ s.setAttribute("src", filename)
+ document.getElementsByTagName("head")[0].appendChild(s)
+ }
+
+ function loadCSSFile(filename) {
+ const l = document.createElement("link")
+ l.setAttribute("rel", "stylesheet")
+ l.setAttribute("type", "text/css")
+ l.setAttribute("href", filename)
+ document.getElementsByTagName("head")[0].appendChild(l)
+ }
+
+ var cloudBaseURL = localStorage.getItem("cloud.baseURL") || "https://netdata.cloud"
+
+ loadCSSFile(`${cloudBaseURL}/static/console/main.css?v=4`)
+ </script>
+</head>
+
+<body class="mdc-theme--background mdc-typography">
+ <!-- Google Tag Manager (noscript) -->
+ <noscript>
+ <iframe src="https://www.googletagmanager.com/ns.html?id=GTM-N6CBMJD" height="0" width="0"
+ style="display:none;visibility:hidden"></iframe>
+ </noscript>
+ <!-- End Google Tag Manager (noscript) -->
+ <main id="app" class="console-app" data-host="agent"></main>
+ <script>
+ const main = document.getElementById("app")
+ main.setAttribute("data-static-base-url", cloudBaseURL)
+ main.setAttribute("data-api-base-url", cloudBaseURL)
+ main.setAttribute("data-api-base-url-v2", cloudBaseURL)
+
+ loadJSFile(`${cloudBaseURL}/static/console/main.js?v=4`)
+ </script>
+ <script>
+ var netdataTheme = "white"
+ var netdataNoBootstrap = true
+ </script>
+ <script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+</body>
+
+</html> \ No newline at end of file
diff --git a/web/gui/css/bootstrap-3.3.7.css b/web/gui/css/bootstrap-3.3.7.css
new file mode 100644
index 0000000..8c4db1f
--- /dev/null
+++ b/web/gui/css/bootstrap-3.3.7.css
@@ -0,0 +1,6758 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * SPDX-License-Identifier: MIT
+ */
+/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */
+html {
+ font-family: sans-serif;
+ -webkit-text-size-adjust: 100%;
+ -ms-text-size-adjust: 100%;
+}
+body {
+ margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+menu,
+nav,
+section,
+summary {
+ display: block;
+}
+audio,
+canvas,
+progress,
+video {
+ display: inline-block;
+ vertical-align: baseline;
+}
+audio:not([controls]) {
+ display: none;
+ height: 0;
+}
+[hidden],
+template {
+ display: none;
+}
+a {
+ background-color: transparent;
+}
+a:active,
+a:hover {
+ outline: 0;
+}
+abbr[title] {
+ border-bottom: 1px dotted;
+}
+b,
+strong {
+ font-weight: bold;
+}
+dfn {
+ font-style: italic;
+}
+h1 {
+ margin: .67em 0;
+ font-size: 2em;
+}
+mark {
+ color: #000;
+ background: #ff0;
+}
+small {
+ font-size: 80%;
+}
+sub,
+sup {
+ position: relative;
+ font-size: 75%;
+ line-height: 0;
+ vertical-align: baseline;
+}
+sup {
+ top: -.5em;
+}
+sub {
+ bottom: -.25em;
+}
+img {
+ border: 0;
+}
+svg:not(:root) {
+ overflow: hidden;
+}
+figure {
+ margin: 1em 40px;
+}
+hr {
+ height: 0;
+ -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+}
+pre {
+ overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+ font-family: monospace, monospace;
+ font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+ margin: 0;
+ font: inherit;
+ color: inherit;
+}
+button {
+ overflow: visible;
+}
+button,
+select {
+ text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+ -webkit-appearance: button;
+ cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+ cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+ padding: 0;
+ border: 0;
+}
+input {
+ line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+ padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+ height: auto;
+}
+input[type="search"] {
+ -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+ -webkit-appearance: textfield;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+ -webkit-appearance: none;
+}
+fieldset {
+ padding: .35em .625em .75em;
+ margin: 0 2px;
+ border: 1px solid #c0c0c0;
+}
+legend {
+ padding: 0;
+ border: 0;
+}
+textarea {
+ overflow: auto;
+}
+optgroup {
+ font-weight: bold;
+}
+table {
+ border-spacing: 0;
+ border-collapse: collapse;
+}
+td,
+th {
+ padding: 0;
+}
+/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */
+@media print {
+ *,
+ *:before,
+ *:after {
+ color: #000 !important;
+ text-shadow: none !important;
+ background: transparent !important;
+ -webkit-box-shadow: none !important;
+ box-shadow: none !important;
+ }
+ a,
+ a:visited {
+ text-decoration: underline;
+ }
+ a[href]:after {
+ content: " (" attr(href) ")";
+ }
+ abbr[title]:after {
+ content: " (" attr(title) ")";
+ }
+ a[href^="#"]:after,
+ a[href^="javascript:"]:after {
+ content: "";
+ }
+ pre,
+ blockquote {
+ border: 1px solid #999;
+
+ page-break-inside: avoid;
+ }
+ thead {
+ display: table-header-group;
+ }
+ tr,
+ img {
+ page-break-inside: avoid;
+ }
+ img {
+ max-width: 100% !important;
+ }
+ p,
+ h2,
+ h3 {
+ orphans: 3;
+ widows: 3;
+ }
+ h2,
+ h3 {
+ page-break-after: avoid;
+ }
+ .navbar {
+ display: none;
+ }
+ .btn > .caret,
+ .dropup > .btn > .caret {
+ border-top-color: #000 !important;
+ }
+ .label {
+ border: 1px solid #000;
+ }
+ .table {
+ border-collapse: collapse !important;
+ }
+ .table td,
+ .table th {
+ background-color: #fff !important;
+ }
+ .table-bordered th,
+ .table-bordered td {
+ border: 1px solid #ddd !important;
+ }
+}
+@font-face {
+ font-family: 'Glyphicons Halflings';
+
+ src: url('../fonts/glyphicons-halflings-regular.eot');
+ src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+}
+.glyphicon {
+ position: relative;
+ top: 1px;
+ display: inline-block;
+ font-family: 'Glyphicons Halflings';
+ font-style: normal;
+ font-weight: normal;
+ line-height: 1;
+
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+.glyphicon-asterisk:before {
+ content: "\002a";
+}
+.glyphicon-plus:before {
+ content: "\002b";
+}
+.glyphicon-euro:before,
+.glyphicon-eur:before {
+ content: "\20ac";
+}
+.glyphicon-minus:before {
+ content: "\2212";
+}
+.glyphicon-cloud:before {
+ content: "\2601";
+}
+.glyphicon-envelope:before {
+ content: "\2709";
+}
+.glyphicon-pencil:before {
+ content: "\270f";
+}
+.glyphicon-glass:before {
+ content: "\e001";
+}
+.glyphicon-music:before {
+ content: "\e002";
+}
+.glyphicon-search:before {
+ content: "\e003";
+}
+.glyphicon-heart:before {
+ content: "\e005";
+}
+.glyphicon-star:before {
+ content: "\e006";
+}
+.glyphicon-star-empty:before {
+ content: "\e007";
+}
+.glyphicon-user:before {
+ content: "\e008";
+}
+.glyphicon-film:before {
+ content: "\e009";
+}
+.glyphicon-th-large:before {
+ content: "\e010";
+}
+.glyphicon-th:before {
+ content: "\e011";
+}
+.glyphicon-th-list:before {
+ content: "\e012";
+}
+.glyphicon-ok:before {
+ content: "\e013";
+}
+.glyphicon-remove:before {
+ content: "\e014";
+}
+.glyphicon-zoom-in:before {
+ content: "\e015";
+}
+.glyphicon-zoom-out:before {
+ content: "\e016";
+}
+.glyphicon-off:before {
+ content: "\e017";
+}
+.glyphicon-signal:before {
+ content: "\e018";
+}
+.glyphicon-cog:before {
+ content: "\e019";
+}
+.glyphicon-trash:before {
+ content: "\e020";
+}
+.glyphicon-home:before {
+ content: "\e021";
+}
+.glyphicon-file:before {
+ content: "\e022";
+}
+.glyphicon-time:before {
+ content: "\e023";
+}
+.glyphicon-road:before {
+ content: "\e024";
+}
+.glyphicon-download-alt:before {
+ content: "\e025";
+}
+.glyphicon-download:before {
+ content: "\e026";
+}
+.glyphicon-upload:before {
+ content: "\e027";
+}
+.glyphicon-inbox:before {
+ content: "\e028";
+}
+.glyphicon-play-circle:before {
+ content: "\e029";
+}
+.glyphicon-repeat:before {
+ content: "\e030";
+}
+.glyphicon-refresh:before {
+ content: "\e031";
+}
+.glyphicon-list-alt:before {
+ content: "\e032";
+}
+.glyphicon-lock:before {
+ content: "\e033";
+}
+.glyphicon-flag:before {
+ content: "\e034";
+}
+.glyphicon-headphones:before {
+ content: "\e035";
+}
+.glyphicon-volume-off:before {
+ content: "\e036";
+}
+.glyphicon-volume-down:before {
+ content: "\e037";
+}
+.glyphicon-volume-up:before {
+ content: "\e038";
+}
+.glyphicon-qrcode:before {
+ content: "\e039";
+}
+.glyphicon-barcode:before {
+ content: "\e040";
+}
+.glyphicon-tag:before {
+ content: "\e041";
+}
+.glyphicon-tags:before {
+ content: "\e042";
+}
+.glyphicon-book:before {
+ content: "\e043";
+}
+.glyphicon-bookmark:before {
+ content: "\e044";
+}
+.glyphicon-print:before {
+ content: "\e045";
+}
+.glyphicon-camera:before {
+ content: "\e046";
+}
+.glyphicon-font:before {
+ content: "\e047";
+}
+.glyphicon-bold:before {
+ content: "\e048";
+}
+.glyphicon-italic:before {
+ content: "\e049";
+}
+.glyphicon-text-height:before {
+ content: "\e050";
+}
+.glyphicon-text-width:before {
+ content: "\e051";
+}
+.glyphicon-align-left:before {
+ content: "\e052";
+}
+.glyphicon-align-center:before {
+ content: "\e053";
+}
+.glyphicon-align-right:before {
+ content: "\e054";
+}
+.glyphicon-align-justify:before {
+ content: "\e055";
+}
+.glyphicon-list:before {
+ content: "\e056";
+}
+.glyphicon-indent-left:before {
+ content: "\e057";
+}
+.glyphicon-indent-right:before {
+ content: "\e058";
+}
+.glyphicon-facetime-video:before {
+ content: "\e059";
+}
+.glyphicon-picture:before {
+ content: "\e060";
+}
+.glyphicon-map-marker:before {
+ content: "\e062";
+}
+.glyphicon-adjust:before {
+ content: "\e063";
+}
+.glyphicon-tint:before {
+ content: "\e064";
+}
+.glyphicon-edit:before {
+ content: "\e065";
+}
+.glyphicon-share:before {
+ content: "\e066";
+}
+.glyphicon-check:before {
+ content: "\e067";
+}
+.glyphicon-move:before {
+ content: "\e068";
+}
+.glyphicon-step-backward:before {
+ content: "\e069";
+}
+.glyphicon-fast-backward:before {
+ content: "\e070";
+}
+.glyphicon-backward:before {
+ content: "\e071";
+}
+.glyphicon-play:before {
+ content: "\e072";
+}
+.glyphicon-pause:before {
+ content: "\e073";
+}
+.glyphicon-stop:before {
+ content: "\e074";
+}
+.glyphicon-forward:before {
+ content: "\e075";
+}
+.glyphicon-fast-forward:before {
+ content: "\e076";
+}
+.glyphicon-step-forward:before {
+ content: "\e077";
+}
+.glyphicon-eject:before {
+ content: "\e078";
+}
+.glyphicon-chevron-left:before {
+ content: "\e079";
+}
+.glyphicon-chevron-right:before {
+ content: "\e080";
+}
+.glyphicon-plus-sign:before {
+ content: "\e081";
+}
+.glyphicon-minus-sign:before {
+ content: "\e082";
+}
+.glyphicon-remove-sign:before {
+ content: "\e083";
+}
+.glyphicon-ok-sign:before {
+ content: "\e084";
+}
+.glyphicon-question-sign:before {
+ content: "\e085";
+}
+.glyphicon-info-sign:before {
+ content: "\e086";
+}
+.glyphicon-screenshot:before {
+ content: "\e087";
+}
+.glyphicon-remove-circle:before {
+ content: "\e088";
+}
+.glyphicon-ok-circle:before {
+ content: "\e089";
+}
+.glyphicon-ban-circle:before {
+ content: "\e090";
+}
+.glyphicon-arrow-left:before {
+ content: "\e091";
+}
+.glyphicon-arrow-right:before {
+ content: "\e092";
+}
+.glyphicon-arrow-up:before {
+ content: "\e093";
+}
+.glyphicon-arrow-down:before {
+ content: "\e094";
+}
+.glyphicon-share-alt:before {
+ content: "\e095";
+}
+.glyphicon-resize-full:before {
+ content: "\e096";
+}
+.glyphicon-resize-small:before {
+ content: "\e097";
+}
+.glyphicon-exclamation-sign:before {
+ content: "\e101";
+}
+.glyphicon-gift:before {
+ content: "\e102";
+}
+.glyphicon-leaf:before {
+ content: "\e103";
+}
+.glyphicon-fire:before {
+ content: "\e104";
+}
+.glyphicon-eye-open:before {
+ content: "\e105";
+}
+.glyphicon-eye-close:before {
+ content: "\e106";
+}
+.glyphicon-warning-sign:before {
+ content: "\e107";
+}
+.glyphicon-plane:before {
+ content: "\e108";
+}
+.glyphicon-calendar:before {
+ content: "\e109";
+}
+.glyphicon-random:before {
+ content: "\e110";
+}
+.glyphicon-comment:before {
+ content: "\e111";
+}
+.glyphicon-magnet:before {
+ content: "\e112";
+}
+.glyphicon-chevron-up:before {
+ content: "\e113";
+}
+.glyphicon-chevron-down:before {
+ content: "\e114";
+}
+.glyphicon-retweet:before {
+ content: "\e115";
+}
+.glyphicon-shopping-cart:before {
+ content: "\e116";
+}
+.glyphicon-folder-close:before {
+ content: "\e117";
+}
+.glyphicon-folder-open:before {
+ content: "\e118";
+}
+.glyphicon-resize-vertical:before {
+ content: "\e119";
+}
+.glyphicon-resize-horizontal:before {
+ content: "\e120";
+}
+.glyphicon-hdd:before {
+ content: "\e121";
+}
+.glyphicon-bullhorn:before {
+ content: "\e122";
+}
+.glyphicon-bell:before {
+ content: "\e123";
+}
+.glyphicon-certificate:before {
+ content: "\e124";
+}
+.glyphicon-thumbs-up:before {
+ content: "\e125";
+}
+.glyphicon-thumbs-down:before {
+ content: "\e126";
+}
+.glyphicon-hand-right:before {
+ content: "\e127";
+}
+.glyphicon-hand-left:before {
+ content: "\e128";
+}
+.glyphicon-hand-up:before {
+ content: "\e129";
+}
+.glyphicon-hand-down:before {
+ content: "\e130";
+}
+.glyphicon-circle-arrow-right:before {
+ content: "\e131";
+}
+.glyphicon-circle-arrow-left:before {
+ content: "\e132";
+}
+.glyphicon-circle-arrow-up:before {
+ content: "\e133";
+}
+.glyphicon-circle-arrow-down:before {
+ content: "\e134";
+}
+.glyphicon-globe:before {
+ content: "\e135";
+}
+.glyphicon-wrench:before {
+ content: "\e136";
+}
+.glyphicon-tasks:before {
+ content: "\e137";
+}
+.glyphicon-filter:before {
+ content: "\e138";
+}
+.glyphicon-briefcase:before {
+ content: "\e139";
+}
+.glyphicon-fullscreen:before {
+ content: "\e140";
+}
+.glyphicon-dashboard:before {
+ content: "\e141";
+}
+.glyphicon-paperclip:before {
+ content: "\e142";
+}
+.glyphicon-heart-empty:before {
+ content: "\e143";
+}
+.glyphicon-link:before {
+ content: "\e144";
+}
+.glyphicon-phone:before {
+ content: "\e145";
+}
+.glyphicon-pushpin:before {
+ content: "\e146";
+}
+.glyphicon-usd:before {
+ content: "\e148";
+}
+.glyphicon-gbp:before {
+ content: "\e149";
+}
+.glyphicon-sort:before {
+ content: "\e150";
+}
+.glyphicon-sort-by-alphabet:before {
+ content: "\e151";
+}
+.glyphicon-sort-by-alphabet-alt:before {
+ content: "\e152";
+}
+.glyphicon-sort-by-order:before {
+ content: "\e153";
+}
+.glyphicon-sort-by-order-alt:before {
+ content: "\e154";
+}
+.glyphicon-sort-by-attributes:before {
+ content: "\e155";
+}
+.glyphicon-sort-by-attributes-alt:before {
+ content: "\e156";
+}
+.glyphicon-unchecked:before {
+ content: "\e157";
+}
+.glyphicon-expand:before {
+ content: "\e158";
+}
+.glyphicon-collapse-down:before {
+ content: "\e159";
+}
+.glyphicon-collapse-up:before {
+ content: "\e160";
+}
+.glyphicon-log-in:before {
+ content: "\e161";
+}
+.glyphicon-flash:before {
+ content: "\e162";
+}
+.glyphicon-log-out:before {
+ content: "\e163";
+}
+.glyphicon-new-window:before {
+ content: "\e164";
+}
+.glyphicon-record:before {
+ content: "\e165";
+}
+.glyphicon-save:before {
+ content: "\e166";
+}
+.glyphicon-open:before {
+ content: "\e167";
+}
+.glyphicon-saved:before {
+ content: "\e168";
+}
+.glyphicon-import:before {
+ content: "\e169";
+}
+.glyphicon-export:before {
+ content: "\e170";
+}
+.glyphicon-send:before {
+ content: "\e171";
+}
+.glyphicon-floppy-disk:before {
+ content: "\e172";
+}
+.glyphicon-floppy-saved:before {
+ content: "\e173";
+}
+.glyphicon-floppy-remove:before {
+ content: "\e174";
+}
+.glyphicon-floppy-save:before {
+ content: "\e175";
+}
+.glyphicon-floppy-open:before {
+ content: "\e176";
+}
+.glyphicon-credit-card:before {
+ content: "\e177";
+}
+.glyphicon-transfer:before {
+ content: "\e178";
+}
+.glyphicon-cutlery:before {
+ content: "\e179";
+}
+.glyphicon-header:before {
+ content: "\e180";
+}
+.glyphicon-compressed:before {
+ content: "\e181";
+}
+.glyphicon-earphone:before {
+ content: "\e182";
+}
+.glyphicon-phone-alt:before {
+ content: "\e183";
+}
+.glyphicon-tower:before {
+ content: "\e184";
+}
+.glyphicon-stats:before {
+ content: "\e185";
+}
+.glyphicon-sd-video:before {
+ content: "\e186";
+}
+.glyphicon-hd-video:before {
+ content: "\e187";
+}
+.glyphicon-subtitles:before {
+ content: "\e188";
+}
+.glyphicon-sound-stereo:before {
+ content: "\e189";
+}
+.glyphicon-sound-dolby:before {
+ content: "\e190";
+}
+.glyphicon-sound-5-1:before {
+ content: "\e191";
+}
+.glyphicon-sound-6-1:before {
+ content: "\e192";
+}
+.glyphicon-sound-7-1:before {
+ content: "\e193";
+}
+.glyphicon-copyright-mark:before {
+ content: "\e194";
+}
+.glyphicon-registration-mark:before {
+ content: "\e195";
+}
+.glyphicon-cloud-download:before {
+ content: "\e197";
+}
+.glyphicon-cloud-upload:before {
+ content: "\e198";
+}
+.glyphicon-tree-conifer:before {
+ content: "\e199";
+}
+.glyphicon-tree-deciduous:before {
+ content: "\e200";
+}
+.glyphicon-cd:before {
+ content: "\e201";
+}
+.glyphicon-save-file:before {
+ content: "\e202";
+}
+.glyphicon-open-file:before {
+ content: "\e203";
+}
+.glyphicon-level-up:before {
+ content: "\e204";
+}
+.glyphicon-copy:before {
+ content: "\e205";
+}
+.glyphicon-paste:before {
+ content: "\e206";
+}
+.glyphicon-alert:before {
+ content: "\e209";
+}
+.glyphicon-equalizer:before {
+ content: "\e210";
+}
+.glyphicon-king:before {
+ content: "\e211";
+}
+.glyphicon-queen:before {
+ content: "\e212";
+}
+.glyphicon-pawn:before {
+ content: "\e213";
+}
+.glyphicon-bishop:before {
+ content: "\e214";
+}
+.glyphicon-knight:before {
+ content: "\e215";
+}
+.glyphicon-baby-formula:before {
+ content: "\e216";
+}
+.glyphicon-tent:before {
+ content: "\26fa";
+}
+.glyphicon-blackboard:before {
+ content: "\e218";
+}
+.glyphicon-bed:before {
+ content: "\e219";
+}
+.glyphicon-apple:before {
+ content: "\f8ff";
+}
+.glyphicon-erase:before {
+ content: "\e221";
+}
+.glyphicon-hourglass:before {
+ content: "\231b";
+}
+.glyphicon-lamp:before {
+ content: "\e223";
+}
+.glyphicon-duplicate:before {
+ content: "\e224";
+}
+.glyphicon-piggy-bank:before {
+ content: "\e225";
+}
+.glyphicon-scissors:before {
+ content: "\e226";
+}
+.glyphicon-bitcoin:before {
+ content: "\e227";
+}
+.glyphicon-btc:before {
+ content: "\e227";
+}
+.glyphicon-xbt:before {
+ content: "\e227";
+}
+.glyphicon-yen:before {
+ content: "\00a5";
+}
+.glyphicon-jpy:before {
+ content: "\00a5";
+}
+.glyphicon-ruble:before {
+ content: "\20bd";
+}
+.glyphicon-rub:before {
+ content: "\20bd";
+}
+.glyphicon-scale:before {
+ content: "\e230";
+}
+.glyphicon-ice-lolly:before {
+ content: "\e231";
+}
+.glyphicon-ice-lolly-tasted:before {
+ content: "\e232";
+}
+.glyphicon-education:before {
+ content: "\e233";
+}
+.glyphicon-option-horizontal:before {
+ content: "\e234";
+}
+.glyphicon-option-vertical:before {
+ content: "\e235";
+}
+.glyphicon-menu-hamburger:before {
+ content: "\e236";
+}
+.glyphicon-modal-window:before {
+ content: "\e237";
+}
+.glyphicon-oil:before {
+ content: "\e238";
+}
+.glyphicon-grain:before {
+ content: "\e239";
+}
+.glyphicon-sunglasses:before {
+ content: "\e240";
+}
+.glyphicon-text-size:before {
+ content: "\e241";
+}
+.glyphicon-text-color:before {
+ content: "\e242";
+}
+.glyphicon-text-background:before {
+ content: "\e243";
+}
+.glyphicon-object-align-top:before {
+ content: "\e244";
+}
+.glyphicon-object-align-bottom:before {
+ content: "\e245";
+}
+.glyphicon-object-align-horizontal:before {
+ content: "\e246";
+}
+.glyphicon-object-align-left:before {
+ content: "\e247";
+}
+.glyphicon-object-align-vertical:before {
+ content: "\e248";
+}
+.glyphicon-object-align-right:before {
+ content: "\e249";
+}
+.glyphicon-triangle-right:before {
+ content: "\e250";
+}
+.glyphicon-triangle-left:before {
+ content: "\e251";
+}
+.glyphicon-triangle-bottom:before {
+ content: "\e252";
+}
+.glyphicon-triangle-top:before {
+ content: "\e253";
+}
+.glyphicon-console:before {
+ content: "\e254";
+}
+.glyphicon-superscript:before {
+ content: "\e255";
+}
+.glyphicon-subscript:before {
+ content: "\e256";
+}
+.glyphicon-menu-left:before {
+ content: "\e257";
+}
+.glyphicon-menu-right:before {
+ content: "\e258";
+}
+.glyphicon-menu-down:before {
+ content: "\e259";
+}
+.glyphicon-menu-up:before {
+ content: "\e260";
+}
+* {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+*:before,
+*:after {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+html {
+ font-size: 10px;
+
+ -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+body {
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #333;
+ background-color: #fff;
+}
+input,
+button,
+select,
+textarea {
+ font-family: inherit;
+ font-size: inherit;
+ line-height: inherit;
+}
+a {
+ color: #337ab7;
+ text-decoration: none;
+}
+a:hover,
+a:focus {
+ color: #23527c;
+ text-decoration: underline;
+}
+a:focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+figure {
+ margin: 0;
+}
+img {
+ vertical-align: middle;
+}
+.img-responsive,
+.thumbnail > img,
+.thumbnail a > img,
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+ display: block;
+ max-width: 100%;
+ height: auto;
+}
+.img-rounded {
+ border-radius: 6px;
+}
+.img-thumbnail {
+ display: inline-block;
+ max-width: 100%;
+ height: auto;
+ padding: 4px;
+ line-height: 1.42857143;
+ background-color: #fff;
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ -webkit-transition: all .2s ease-in-out;
+ -o-transition: all .2s ease-in-out;
+ transition: all .2s ease-in-out;
+}
+.img-circle {
+ border-radius: 50%;
+}
+hr {
+ margin-top: 20px;
+ margin-bottom: 20px;
+ border: 0;
+ border-top: 1px solid #eee;
+}
+.sr-only {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ padding: 0;
+ margin: -1px;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ border: 0;
+}
+.sr-only-focusable:active,
+.sr-only-focusable:focus {
+ position: static;
+ width: auto;
+ height: auto;
+ margin: 0;
+ overflow: visible;
+ clip: auto;
+}
+[role="button"] {
+ cursor: pointer;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+.h1,
+.h2,
+.h3,
+.h4,
+.h5,
+.h6 {
+ font-family: inherit;
+ font-weight: 500;
+ line-height: 1.1;
+ color: inherit;
+}
+h1 small,
+h2 small,
+h3 small,
+h4 small,
+h5 small,
+h6 small,
+.h1 small,
+.h2 small,
+.h3 small,
+.h4 small,
+.h5 small,
+.h6 small,
+h1 .small,
+h2 .small,
+h3 .small,
+h4 .small,
+h5 .small,
+h6 .small,
+.h1 .small,
+.h2 .small,
+.h3 .small,
+.h4 .small,
+.h5 .small,
+.h6 .small {
+ font-weight: normal;
+ line-height: 1;
+ color: #777;
+}
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+ margin-top: 20px;
+ margin-bottom: 10px;
+}
+h1 small,
+.h1 small,
+h2 small,
+.h2 small,
+h3 small,
+.h3 small,
+h1 .small,
+.h1 .small,
+h2 .small,
+.h2 .small,
+h3 .small,
+.h3 .small {
+ font-size: 65%;
+}
+h4,
+.h4,
+h5,
+.h5,
+h6,
+.h6 {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+h4 small,
+.h4 small,
+h5 small,
+.h5 small,
+h6 small,
+.h6 small,
+h4 .small,
+.h4 .small,
+h5 .small,
+.h5 .small,
+h6 .small,
+.h6 .small {
+ font-size: 75%;
+}
+h1,
+.h1 {
+ font-size: 36px;
+}
+h2,
+.h2 {
+ font-size: 30px;
+}
+h3,
+.h3 {
+ font-size: 24px;
+}
+h4,
+.h4 {
+ font-size: 18px;
+}
+h5,
+.h5 {
+ font-size: 14px;
+}
+h6,
+.h6 {
+ font-size: 12px;
+}
+p {
+ margin: 0 0 10px;
+}
+.lead {
+ margin-bottom: 20px;
+ font-size: 16px;
+ font-weight: 300;
+ line-height: 1.4;
+}
+@media (min-width: 768px) {
+ .lead {
+ font-size: 21px;
+ }
+}
+small,
+.small {
+ font-size: 85%;
+}
+mark,
+.mark {
+ padding: .2em;
+ background-color: #fcf8e3;
+}
+.text-left {
+ text-align: left;
+}
+.text-right {
+ text-align: right;
+}
+.text-center {
+ text-align: center;
+}
+.text-justify {
+ text-align: justify;
+}
+.text-nowrap {
+ white-space: nowrap;
+}
+.text-lowercase {
+ text-transform: lowercase;
+}
+.text-uppercase {
+ text-transform: uppercase;
+}
+.text-capitalize {
+ text-transform: capitalize;
+}
+.text-muted {
+ color: #777;
+}
+.text-primary {
+ color: #337ab7;
+}
+a.text-primary:hover,
+a.text-primary:focus {
+ color: #286090;
+}
+.text-success {
+ color: #3c763d;
+}
+a.text-success:hover,
+a.text-success:focus {
+ color: #2b542c;
+}
+.text-info {
+ color: #31708f;
+}
+a.text-info:hover,
+a.text-info:focus {
+ color: #245269;
+}
+.text-warning {
+ color: #8a6d3b;
+}
+a.text-warning:hover,
+a.text-warning:focus {
+ color: #66512c;
+}
+.text-danger {
+ color: #a94442;
+}
+a.text-danger:hover,
+a.text-danger:focus {
+ color: #843534;
+}
+.bg-primary {
+ color: #fff;
+ background-color: #337ab7;
+}
+a.bg-primary:hover,
+a.bg-primary:focus {
+ background-color: #286090;
+}
+.bg-success {
+ background-color: #dff0d8;
+}
+a.bg-success:hover,
+a.bg-success:focus {
+ background-color: #c1e2b3;
+}
+.bg-info {
+ background-color: #d9edf7;
+}
+a.bg-info:hover,
+a.bg-info:focus {
+ background-color: #afd9ee;
+}
+.bg-warning {
+ background-color: #fcf8e3;
+}
+a.bg-warning:hover,
+a.bg-warning:focus {
+ background-color: #f7ecb5;
+}
+.bg-danger {
+ background-color: #f2dede;
+}
+a.bg-danger:hover,
+a.bg-danger:focus {
+ background-color: #e4b9b9;
+}
+.page-header {
+ padding-bottom: 9px;
+ margin: 40px 0 20px;
+ border-bottom: 1px solid #eee;
+}
+ul,
+ol {
+ margin-top: 0;
+ margin-bottom: 10px;
+}
+ul ul,
+ol ul,
+ul ol,
+ol ol {
+ margin-bottom: 0;
+}
+.list-unstyled {
+ padding-left: 0;
+ list-style: none;
+}
+.list-inline {
+ padding-left: 0;
+ margin-left: -5px;
+ list-style: none;
+}
+.list-inline > li {
+ display: inline-block;
+ padding-right: 5px;
+ padding-left: 5px;
+}
+dl {
+ margin-top: 0;
+ margin-bottom: 20px;
+}
+dt,
+dd {
+ line-height: 1.42857143;
+}
+dt {
+ font-weight: bold;
+}
+dd {
+ margin-left: 0;
+}
+@media (min-width: 768px) {
+ .dl-horizontal dt {
+ float: left;
+ width: 160px;
+ overflow: hidden;
+ clear: left;
+ text-align: right;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ }
+ .dl-horizontal dd {
+ margin-left: 180px;
+ }
+}
+abbr[title],
+abbr[data-original-title] {
+ cursor: help;
+ border-bottom: 1px dotted #777;
+}
+.initialism {
+ font-size: 90%;
+ text-transform: uppercase;
+}
+blockquote {
+ padding: 10px 20px;
+ margin: 0 0 20px;
+ font-size: 17.5px;
+ border-left: 5px solid #eee;
+}
+blockquote p:last-child,
+blockquote ul:last-child,
+blockquote ol:last-child {
+ margin-bottom: 0;
+}
+blockquote footer,
+blockquote small,
+blockquote .small {
+ display: block;
+ font-size: 80%;
+ line-height: 1.42857143;
+ color: #777;
+}
+blockquote footer:before,
+blockquote small:before,
+blockquote .small:before {
+ content: '\2014 \00A0';
+}
+.blockquote-reverse,
+blockquote.pull-right {
+ padding-right: 15px;
+ padding-left: 0;
+ text-align: right;
+ border-right: 5px solid #eee;
+ border-left: 0;
+}
+.blockquote-reverse footer:before,
+blockquote.pull-right footer:before,
+.blockquote-reverse small:before,
+blockquote.pull-right small:before,
+.blockquote-reverse .small:before,
+blockquote.pull-right .small:before {
+ content: '';
+}
+.blockquote-reverse footer:after,
+blockquote.pull-right footer:after,
+.blockquote-reverse small:after,
+blockquote.pull-right small:after,
+.blockquote-reverse .small:after,
+blockquote.pull-right .small:after {
+ content: '\00A0 \2014';
+}
+address {
+ margin-bottom: 20px;
+ font-style: normal;
+ line-height: 1.42857143;
+}
+code,
+kbd,
+pre,
+samp {
+ font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+}
+code {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: #c7254e;
+ background-color: #f9f2f4;
+ border-radius: 4px;
+}
+kbd {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: #fff;
+ background-color: #333;
+ border-radius: 3px;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+}
+kbd kbd {
+ padding: 0;
+ font-size: 100%;
+ font-weight: bold;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+pre {
+ display: block;
+ padding: 9.5px;
+ margin: 0 0 10px;
+ font-size: 13px;
+ line-height: 1.42857143;
+ color: #333;
+ word-break: break-all;
+ word-wrap: break-word;
+ background-color: #f5f5f5;
+ border: 1px solid #ccc;
+ border-radius: 4px;
+}
+pre code {
+ padding: 0;
+ font-size: inherit;
+ color: inherit;
+ white-space: pre-wrap;
+ background-color: transparent;
+ border-radius: 0;
+}
+.pre-scrollable {
+ max-height: 340px;
+ overflow-y: scroll;
+}
+.container {
+ padding-right: 15px;
+ padding-left: 15px;
+ margin-right: auto;
+ margin-left: auto;
+}
+@media (min-width: 768px) {
+ .container {
+ width: 750px;
+ }
+}
+@media (min-width: 992px) {
+ .container {
+ width: 970px;
+ }
+}
+@media (min-width: 1200px) {
+ .container {
+ width: 1170px;
+ }
+}
+.container-fluid {
+ padding-right: 15px;
+ padding-left: 15px;
+ margin-right: auto;
+ margin-left: auto;
+}
+.row {
+ margin-right: -15px;
+ margin-left: -15px;
+}
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+ position: relative;
+ min-height: 1px;
+ padding-right: 15px;
+ padding-left: 15px;
+}
+.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {
+ float: left;
+}
+.col-xs-12 {
+ width: 100%;
+}
+.col-xs-11 {
+ width: 91.66666667%;
+}
+.col-xs-10 {
+ width: 83.33333333%;
+}
+.col-xs-9 {
+ width: 75%;
+}
+.col-xs-8 {
+ width: 66.66666667%;
+}
+.col-xs-7 {
+ width: 58.33333333%;
+}
+.col-xs-6 {
+ width: 50%;
+}
+.col-xs-5 {
+ width: 41.66666667%;
+}
+.col-xs-4 {
+ width: 33.33333333%;
+}
+.col-xs-3 {
+ width: 25%;
+}
+.col-xs-2 {
+ width: 16.66666667%;
+}
+.col-xs-1 {
+ width: 8.33333333%;
+}
+.col-xs-pull-12 {
+ right: 100%;
+}
+.col-xs-pull-11 {
+ right: 91.66666667%;
+}
+.col-xs-pull-10 {
+ right: 83.33333333%;
+}
+.col-xs-pull-9 {
+ right: 75%;
+}
+.col-xs-pull-8 {
+ right: 66.66666667%;
+}
+.col-xs-pull-7 {
+ right: 58.33333333%;
+}
+.col-xs-pull-6 {
+ right: 50%;
+}
+.col-xs-pull-5 {
+ right: 41.66666667%;
+}
+.col-xs-pull-4 {
+ right: 33.33333333%;
+}
+.col-xs-pull-3 {
+ right: 25%;
+}
+.col-xs-pull-2 {
+ right: 16.66666667%;
+}
+.col-xs-pull-1 {
+ right: 8.33333333%;
+}
+.col-xs-pull-0 {
+ right: auto;
+}
+.col-xs-push-12 {
+ left: 100%;
+}
+.col-xs-push-11 {
+ left: 91.66666667%;
+}
+.col-xs-push-10 {
+ left: 83.33333333%;
+}
+.col-xs-push-9 {
+ left: 75%;
+}
+.col-xs-push-8 {
+ left: 66.66666667%;
+}
+.col-xs-push-7 {
+ left: 58.33333333%;
+}
+.col-xs-push-6 {
+ left: 50%;
+}
+.col-xs-push-5 {
+ left: 41.66666667%;
+}
+.col-xs-push-4 {
+ left: 33.33333333%;
+}
+.col-xs-push-3 {
+ left: 25%;
+}
+.col-xs-push-2 {
+ left: 16.66666667%;
+}
+.col-xs-push-1 {
+ left: 8.33333333%;
+}
+.col-xs-push-0 {
+ left: auto;
+}
+.col-xs-offset-12 {
+ margin-left: 100%;
+}
+.col-xs-offset-11 {
+ margin-left: 91.66666667%;
+}
+.col-xs-offset-10 {
+ margin-left: 83.33333333%;
+}
+.col-xs-offset-9 {
+ margin-left: 75%;
+}
+.col-xs-offset-8 {
+ margin-left: 66.66666667%;
+}
+.col-xs-offset-7 {
+ margin-left: 58.33333333%;
+}
+.col-xs-offset-6 {
+ margin-left: 50%;
+}
+.col-xs-offset-5 {
+ margin-left: 41.66666667%;
+}
+.col-xs-offset-4 {
+ margin-left: 33.33333333%;
+}
+.col-xs-offset-3 {
+ margin-left: 25%;
+}
+.col-xs-offset-2 {
+ margin-left: 16.66666667%;
+}
+.col-xs-offset-1 {
+ margin-left: 8.33333333%;
+}
+.col-xs-offset-0 {
+ margin-left: 0;
+}
+@media (min-width: 768px) {
+ .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {
+ float: left;
+ }
+ .col-sm-12 {
+ width: 100%;
+ }
+ .col-sm-11 {
+ width: 91.66666667%;
+ }
+ .col-sm-10 {
+ width: 83.33333333%;
+ }
+ .col-sm-9 {
+ width: 75%;
+ }
+ .col-sm-8 {
+ width: 66.66666667%;
+ }
+ .col-sm-7 {
+ width: 58.33333333%;
+ }
+ .col-sm-6 {
+ width: 50%;
+ }
+ .col-sm-5 {
+ width: 41.66666667%;
+ }
+ .col-sm-4 {
+ width: 33.33333333%;
+ }
+ .col-sm-3 {
+ width: 25%;
+ }
+ .col-sm-2 {
+ width: 16.66666667%;
+ }
+ .col-sm-1 {
+ width: 8.33333333%;
+ }
+ .col-sm-pull-12 {
+ right: 100%;
+ }
+ .col-sm-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-sm-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-sm-pull-9 {
+ right: 75%;
+ }
+ .col-sm-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-sm-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-sm-pull-6 {
+ right: 50%;
+ }
+ .col-sm-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-sm-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-sm-pull-3 {
+ right: 25%;
+ }
+ .col-sm-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-sm-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-sm-pull-0 {
+ right: auto;
+ }
+ .col-sm-push-12 {
+ left: 100%;
+ }
+ .col-sm-push-11 {
+ left: 91.66666667%;
+ }
+ .col-sm-push-10 {
+ left: 83.33333333%;
+ }
+ .col-sm-push-9 {
+ left: 75%;
+ }
+ .col-sm-push-8 {
+ left: 66.66666667%;
+ }
+ .col-sm-push-7 {
+ left: 58.33333333%;
+ }
+ .col-sm-push-6 {
+ left: 50%;
+ }
+ .col-sm-push-5 {
+ left: 41.66666667%;
+ }
+ .col-sm-push-4 {
+ left: 33.33333333%;
+ }
+ .col-sm-push-3 {
+ left: 25%;
+ }
+ .col-sm-push-2 {
+ left: 16.66666667%;
+ }
+ .col-sm-push-1 {
+ left: 8.33333333%;
+ }
+ .col-sm-push-0 {
+ left: auto;
+ }
+ .col-sm-offset-12 {
+ margin-left: 100%;
+ }
+ .col-sm-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-sm-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-sm-offset-9 {
+ margin-left: 75%;
+ }
+ .col-sm-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-sm-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-sm-offset-6 {
+ margin-left: 50%;
+ }
+ .col-sm-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-sm-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-sm-offset-3 {
+ margin-left: 25%;
+ }
+ .col-sm-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-sm-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-sm-offset-0 {
+ margin-left: 0;
+ }
+}
+@media (min-width: 992px) {
+ .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {
+ float: left;
+ }
+ .col-md-12 {
+ width: 100%;
+ }
+ .col-md-11 {
+ width: 91.66666667%;
+ }
+ .col-md-10 {
+ width: 83.33333333%;
+ }
+ .col-md-9 {
+ width: 75%;
+ }
+ .col-md-8 {
+ width: 66.66666667%;
+ }
+ .col-md-7 {
+ width: 58.33333333%;
+ }
+ .col-md-6 {
+ width: 50%;
+ }
+ .col-md-5 {
+ width: 41.66666667%;
+ }
+ .col-md-4 {
+ width: 33.33333333%;
+ }
+ .col-md-3 {
+ width: 25%;
+ }
+ .col-md-2 {
+ width: 16.66666667%;
+ }
+ .col-md-1 {
+ width: 8.33333333%;
+ }
+ .col-md-pull-12 {
+ right: 100%;
+ }
+ .col-md-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-md-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-md-pull-9 {
+ right: 75%;
+ }
+ .col-md-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-md-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-md-pull-6 {
+ right: 50%;
+ }
+ .col-md-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-md-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-md-pull-3 {
+ right: 25%;
+ }
+ .col-md-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-md-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-md-pull-0 {
+ right: auto;
+ }
+ .col-md-push-12 {
+ left: 100%;
+ }
+ .col-md-push-11 {
+ left: 91.66666667%;
+ }
+ .col-md-push-10 {
+ left: 83.33333333%;
+ }
+ .col-md-push-9 {
+ left: 75%;
+ }
+ .col-md-push-8 {
+ left: 66.66666667%;
+ }
+ .col-md-push-7 {
+ left: 58.33333333%;
+ }
+ .col-md-push-6 {
+ left: 50%;
+ }
+ .col-md-push-5 {
+ left: 41.66666667%;
+ }
+ .col-md-push-4 {
+ left: 33.33333333%;
+ }
+ .col-md-push-3 {
+ left: 25%;
+ }
+ .col-md-push-2 {
+ left: 16.66666667%;
+ }
+ .col-md-push-1 {
+ left: 8.33333333%;
+ }
+ .col-md-push-0 {
+ left: auto;
+ }
+ .col-md-offset-12 {
+ margin-left: 100%;
+ }
+ .col-md-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-md-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-md-offset-9 {
+ margin-left: 75%;
+ }
+ .col-md-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-md-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-md-offset-6 {
+ margin-left: 50%;
+ }
+ .col-md-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-md-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-md-offset-3 {
+ margin-left: 25%;
+ }
+ .col-md-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-md-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-md-offset-0 {
+ margin-left: 0;
+ }
+}
+@media (min-width: 1200px) {
+ .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {
+ float: left;
+ }
+ .col-lg-12 {
+ width: 100%;
+ }
+ .col-lg-11 {
+ width: 91.66666667%;
+ }
+ .col-lg-10 {
+ width: 83.33333333%;
+ }
+ .col-lg-9 {
+ width: 75%;
+ }
+ .col-lg-8 {
+ width: 66.66666667%;
+ }
+ .col-lg-7 {
+ width: 58.33333333%;
+ }
+ .col-lg-6 {
+ width: 50%;
+ }
+ .col-lg-5 {
+ width: 41.66666667%;
+ }
+ .col-lg-4 {
+ width: 33.33333333%;
+ }
+ .col-lg-3 {
+ width: 25%;
+ }
+ .col-lg-2 {
+ width: 16.66666667%;
+ }
+ .col-lg-1 {
+ width: 8.33333333%;
+ }
+ .col-lg-pull-12 {
+ right: 100%;
+ }
+ .col-lg-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-lg-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-lg-pull-9 {
+ right: 75%;
+ }
+ .col-lg-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-lg-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-lg-pull-6 {
+ right: 50%;
+ }
+ .col-lg-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-lg-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-lg-pull-3 {
+ right: 25%;
+ }
+ .col-lg-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-lg-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-lg-pull-0 {
+ right: auto;
+ }
+ .col-lg-push-12 {
+ left: 100%;
+ }
+ .col-lg-push-11 {
+ left: 91.66666667%;
+ }
+ .col-lg-push-10 {
+ left: 83.33333333%;
+ }
+ .col-lg-push-9 {
+ left: 75%;
+ }
+ .col-lg-push-8 {
+ left: 66.66666667%;
+ }
+ .col-lg-push-7 {
+ left: 58.33333333%;
+ }
+ .col-lg-push-6 {
+ left: 50%;
+ }
+ .col-lg-push-5 {
+ left: 41.66666667%;
+ }
+ .col-lg-push-4 {
+ left: 33.33333333%;
+ }
+ .col-lg-push-3 {
+ left: 25%;
+ }
+ .col-lg-push-2 {
+ left: 16.66666667%;
+ }
+ .col-lg-push-1 {
+ left: 8.33333333%;
+ }
+ .col-lg-push-0 {
+ left: auto;
+ }
+ .col-lg-offset-12 {
+ margin-left: 100%;
+ }
+ .col-lg-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-lg-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-lg-offset-9 {
+ margin-left: 75%;
+ }
+ .col-lg-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-lg-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-lg-offset-6 {
+ margin-left: 50%;
+ }
+ .col-lg-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-lg-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-lg-offset-3 {
+ margin-left: 25%;
+ }
+ .col-lg-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-lg-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-lg-offset-0 {
+ margin-left: 0;
+ }
+}
+table {
+ background-color: transparent;
+}
+caption {
+ padding-top: 8px;
+ padding-bottom: 8px;
+ color: #777;
+ text-align: left;
+}
+th {
+ text-align: left;
+}
+.table {
+ width: 100%;
+ max-width: 100%;
+ margin-bottom: 20px;
+}
+.table > thead > tr > th,
+.table > tbody > tr > th,
+.table > tfoot > tr > th,
+.table > thead > tr > td,
+.table > tbody > tr > td,
+.table > tfoot > tr > td {
+ padding: 8px;
+ line-height: 1.42857143;
+ vertical-align: top;
+ border-top: 1px solid #ddd;
+}
+.table > thead > tr > th {
+ vertical-align: bottom;
+ border-bottom: 2px solid #ddd;
+}
+.table > caption + thead > tr:first-child > th,
+.table > colgroup + thead > tr:first-child > th,
+.table > thead:first-child > tr:first-child > th,
+.table > caption + thead > tr:first-child > td,
+.table > colgroup + thead > tr:first-child > td,
+.table > thead:first-child > tr:first-child > td {
+ border-top: 0;
+}
+.table > tbody + tbody {
+ border-top: 2px solid #ddd;
+}
+.table .table {
+ background-color: #fff;
+}
+.table-condensed > thead > tr > th,
+.table-condensed > tbody > tr > th,
+.table-condensed > tfoot > tr > th,
+.table-condensed > thead > tr > td,
+.table-condensed > tbody > tr > td,
+.table-condensed > tfoot > tr > td {
+ padding: 5px;
+}
+.table-bordered {
+ border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > tbody > tr > th,
+.table-bordered > tfoot > tr > th,
+.table-bordered > thead > tr > td,
+.table-bordered > tbody > tr > td,
+.table-bordered > tfoot > tr > td {
+ border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > thead > tr > td {
+ border-bottom-width: 2px;
+}
+.table-striped > tbody > tr:nth-of-type(odd) {
+ background-color: #f9f9f9;
+}
+.table-hover > tbody > tr:hover {
+ background-color: #f5f5f5;
+}
+table col[class*="col-"] {
+ position: static;
+ display: table-column;
+ float: none;
+}
+table td[class*="col-"],
+table th[class*="col-"] {
+ position: static;
+ display: table-cell;
+ float: none;
+}
+.table > thead > tr > td.active,
+.table > tbody > tr > td.active,
+.table > tfoot > tr > td.active,
+.table > thead > tr > th.active,
+.table > tbody > tr > th.active,
+.table > tfoot > tr > th.active,
+.table > thead > tr.active > td,
+.table > tbody > tr.active > td,
+.table > tfoot > tr.active > td,
+.table > thead > tr.active > th,
+.table > tbody > tr.active > th,
+.table > tfoot > tr.active > th {
+ background-color: #f5f5f5;
+}
+.table-hover > tbody > tr > td.active:hover,
+.table-hover > tbody > tr > th.active:hover,
+.table-hover > tbody > tr.active:hover > td,
+.table-hover > tbody > tr:hover > .active,
+.table-hover > tbody > tr.active:hover > th {
+ background-color: #e8e8e8;
+}
+.table > thead > tr > td.success,
+.table > tbody > tr > td.success,
+.table > tfoot > tr > td.success,
+.table > thead > tr > th.success,
+.table > tbody > tr > th.success,
+.table > tfoot > tr > th.success,
+.table > thead > tr.success > td,
+.table > tbody > tr.success > td,
+.table > tfoot > tr.success > td,
+.table > thead > tr.success > th,
+.table > tbody > tr.success > th,
+.table > tfoot > tr.success > th {
+ background-color: #dff0d8;
+}
+.table-hover > tbody > tr > td.success:hover,
+.table-hover > tbody > tr > th.success:hover,
+.table-hover > tbody > tr.success:hover > td,
+.table-hover > tbody > tr:hover > .success,
+.table-hover > tbody > tr.success:hover > th {
+ background-color: #d0e9c6;
+}
+.table > thead > tr > td.info,
+.table > tbody > tr > td.info,
+.table > tfoot > tr > td.info,
+.table > thead > tr > th.info,
+.table > tbody > tr > th.info,
+.table > tfoot > tr > th.info,
+.table > thead > tr.info > td,
+.table > tbody > tr.info > td,
+.table > tfoot > tr.info > td,
+.table > thead > tr.info > th,
+.table > tbody > tr.info > th,
+.table > tfoot > tr.info > th {
+ background-color: #d9edf7;
+}
+.table-hover > tbody > tr > td.info:hover,
+.table-hover > tbody > tr > th.info:hover,
+.table-hover > tbody > tr.info:hover > td,
+.table-hover > tbody > tr:hover > .info,
+.table-hover > tbody > tr.info:hover > th {
+ background-color: #c4e3f3;
+}
+.table > thead > tr > td.warning,
+.table > tbody > tr > td.warning,
+.table > tfoot > tr > td.warning,
+.table > thead > tr > th.warning,
+.table > tbody > tr > th.warning,
+.table > tfoot > tr > th.warning,
+.table > thead > tr.warning > td,
+.table > tbody > tr.warning > td,
+.table > tfoot > tr.warning > td,
+.table > thead > tr.warning > th,
+.table > tbody > tr.warning > th,
+.table > tfoot > tr.warning > th {
+ background-color: #fcf8e3;
+}
+.table-hover > tbody > tr > td.warning:hover,
+.table-hover > tbody > tr > th.warning:hover,
+.table-hover > tbody > tr.warning:hover > td,
+.table-hover > tbody > tr:hover > .warning,
+.table-hover > tbody > tr.warning:hover > th {
+ background-color: #faf2cc;
+}
+.table > thead > tr > td.danger,
+.table > tbody > tr > td.danger,
+.table > tfoot > tr > td.danger,
+.table > thead > tr > th.danger,
+.table > tbody > tr > th.danger,
+.table > tfoot > tr > th.danger,
+.table > thead > tr.danger > td,
+.table > tbody > tr.danger > td,
+.table > tfoot > tr.danger > td,
+.table > thead > tr.danger > th,
+.table > tbody > tr.danger > th,
+.table > tfoot > tr.danger > th {
+ background-color: #f2dede;
+}
+.table-hover > tbody > tr > td.danger:hover,
+.table-hover > tbody > tr > th.danger:hover,
+.table-hover > tbody > tr.danger:hover > td,
+.table-hover > tbody > tr:hover > .danger,
+.table-hover > tbody > tr.danger:hover > th {
+ background-color: #ebcccc;
+}
+.table-responsive {
+ min-height: .01%;
+ overflow-x: auto;
+}
+@media screen and (max-width: 767px) {
+ .table-responsive {
+ width: 100%;
+ margin-bottom: 15px;
+ overflow-y: hidden;
+ -ms-overflow-style: -ms-autohiding-scrollbar;
+ border: 1px solid #ddd;
+ }
+ .table-responsive > .table {
+ margin-bottom: 0;
+ }
+ .table-responsive > .table > thead > tr > th,
+ .table-responsive > .table > tbody > tr > th,
+ .table-responsive > .table > tfoot > tr > th,
+ .table-responsive > .table > thead > tr > td,
+ .table-responsive > .table > tbody > tr > td,
+ .table-responsive > .table > tfoot > tr > td {
+ white-space: nowrap;
+ }
+ .table-responsive > .table-bordered {
+ border: 0;
+ }
+ .table-responsive > .table-bordered > thead > tr > th:first-child,
+ .table-responsive > .table-bordered > tbody > tr > th:first-child,
+ .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+ .table-responsive > .table-bordered > thead > tr > td:first-child,
+ .table-responsive > .table-bordered > tbody > tr > td:first-child,
+ .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+ border-left: 0;
+ }
+ .table-responsive > .table-bordered > thead > tr > th:last-child,
+ .table-responsive > .table-bordered > tbody > tr > th:last-child,
+ .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+ .table-responsive > .table-bordered > thead > tr > td:last-child,
+ .table-responsive > .table-bordered > tbody > tr > td:last-child,
+ .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+ border-right: 0;
+ }
+ .table-responsive > .table-bordered > tbody > tr:last-child > th,
+ .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+ .table-responsive > .table-bordered > tbody > tr:last-child > td,
+ .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+ border-bottom: 0;
+ }
+}
+fieldset {
+ min-width: 0;
+ padding: 0;
+ margin: 0;
+ border: 0;
+}
+legend {
+ display: block;
+ width: 100%;
+ padding: 0;
+ margin-bottom: 20px;
+ font-size: 21px;
+ line-height: inherit;
+ color: #333;
+ border: 0;
+ border-bottom: 1px solid #e5e5e5;
+}
+label {
+ display: inline-block;
+ max-width: 100%;
+ margin-bottom: 5px;
+ font-weight: bold;
+}
+input[type="search"] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+input[type="radio"],
+input[type="checkbox"] {
+ margin: 4px 0 0;
+ margin-top: 1px \9;
+ line-height: normal;
+}
+input[type="file"] {
+ display: block;
+}
+input[type="range"] {
+ display: block;
+ width: 100%;
+}
+select[multiple],
+select[size] {
+ height: auto;
+}
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+output {
+ display: block;
+ padding-top: 7px;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #555;
+}
+.form-control {
+ display: block;
+ width: 100%;
+ height: 34px;
+ padding: 6px 12px;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #555;
+ background-color: #fff;
+ background-image: none;
+ border: 1px solid #ccc;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+ -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
+ -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+ transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+}
+.form-control:focus {
+ border-color: #66afe9;
+ outline: 0;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+ box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+}
+.form-control::-moz-placeholder {
+ color: #999;
+ opacity: 1;
+}
+.form-control:-ms-input-placeholder {
+ color: #999;
+}
+.form-control::-webkit-input-placeholder {
+ color: #999;
+}
+.form-control::-ms-expand {
+ background-color: transparent;
+ border: 0;
+}
+.form-control[disabled],
+.form-control[readonly],
+fieldset[disabled] .form-control {
+ background-color: #eee;
+ opacity: 1;
+}
+.form-control[disabled],
+fieldset[disabled] .form-control {
+ cursor: not-allowed;
+}
+textarea.form-control {
+ height: auto;
+}
+input[type="search"] {
+ -webkit-appearance: none;
+}
+@media screen and (-webkit-min-device-pixel-ratio: 0) {
+ input[type="date"].form-control,
+ input[type="time"].form-control,
+ input[type="datetime-local"].form-control,
+ input[type="month"].form-control {
+ line-height: 34px;
+ }
+ input[type="date"].input-sm,
+ input[type="time"].input-sm,
+ input[type="datetime-local"].input-sm,
+ input[type="month"].input-sm,
+ .input-group-sm input[type="date"],
+ .input-group-sm input[type="time"],
+ .input-group-sm input[type="datetime-local"],
+ .input-group-sm input[type="month"] {
+ line-height: 30px;
+ }
+ input[type="date"].input-lg,
+ input[type="time"].input-lg,
+ input[type="datetime-local"].input-lg,
+ input[type="month"].input-lg,
+ .input-group-lg input[type="date"],
+ .input-group-lg input[type="time"],
+ .input-group-lg input[type="datetime-local"],
+ .input-group-lg input[type="month"] {
+ line-height: 46px;
+ }
+}
+.form-group {
+ margin-bottom: 15px;
+}
+.radio,
+.checkbox {
+ position: relative;
+ display: block;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+.radio label,
+.checkbox label {
+ min-height: 20px;
+ padding-left: 20px;
+ margin-bottom: 0;
+ font-weight: normal;
+ cursor: pointer;
+}
+.radio input[type="radio"],
+.radio-inline input[type="radio"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+ position: absolute;
+ margin-top: 4px \9;
+ margin-left: -20px;
+}
+.radio + .radio,
+.checkbox + .checkbox {
+ margin-top: -5px;
+}
+.radio-inline,
+.checkbox-inline {
+ position: relative;
+ display: inline-block;
+ padding-left: 20px;
+ margin-bottom: 0;
+ font-weight: normal;
+ vertical-align: middle;
+ cursor: pointer;
+}
+.radio-inline + .radio-inline,
+.checkbox-inline + .checkbox-inline {
+ margin-top: 0;
+ margin-left: 10px;
+}
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+input[type="radio"].disabled,
+input[type="checkbox"].disabled,
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"] {
+ cursor: not-allowed;
+}
+.radio-inline.disabled,
+.checkbox-inline.disabled,
+fieldset[disabled] .radio-inline,
+fieldset[disabled] .checkbox-inline {
+ cursor: not-allowed;
+}
+.radio.disabled label,
+.checkbox.disabled label,
+fieldset[disabled] .radio label,
+fieldset[disabled] .checkbox label {
+ cursor: not-allowed;
+}
+.form-control-static {
+ min-height: 34px;
+ padding-top: 7px;
+ padding-bottom: 7px;
+ margin-bottom: 0;
+}
+.form-control-static.input-lg,
+.form-control-static.input-sm {
+ padding-right: 0;
+ padding-left: 0;
+}
+.input-sm {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+select.input-sm {
+ height: 30px;
+ line-height: 30px;
+}
+textarea.input-sm,
+select[multiple].input-sm {
+ height: auto;
+}
+.form-group-sm .form-control {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.form-group-sm select.form-control {
+ height: 30px;
+ line-height: 30px;
+}
+.form-group-sm textarea.form-control,
+.form-group-sm select[multiple].form-control {
+ height: auto;
+}
+.form-group-sm .form-control-static {
+ height: 30px;
+ min-height: 32px;
+ padding: 6px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+}
+.input-lg {
+ height: 46px;
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+select.input-lg {
+ height: 46px;
+ line-height: 46px;
+}
+textarea.input-lg,
+select[multiple].input-lg {
+ height: auto;
+}
+.form-group-lg .form-control {
+ height: 46px;
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+.form-group-lg select.form-control {
+ height: 46px;
+ line-height: 46px;
+}
+.form-group-lg textarea.form-control,
+.form-group-lg select[multiple].form-control {
+ height: auto;
+}
+.form-group-lg .form-control-static {
+ height: 46px;
+ min-height: 38px;
+ padding: 11px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+}
+.has-feedback {
+ position: relative;
+}
+.has-feedback .form-control {
+ padding-right: 42.5px;
+}
+.form-control-feedback {
+ position: absolute;
+ top: 0;
+ right: 0;
+ z-index: 2;
+ display: block;
+ width: 34px;
+ height: 34px;
+ line-height: 34px;
+ text-align: center;
+ pointer-events: none;
+}
+.input-lg + .form-control-feedback,
+.input-group-lg + .form-control-feedback,
+.form-group-lg .form-control + .form-control-feedback {
+ width: 46px;
+ height: 46px;
+ line-height: 46px;
+}
+.input-sm + .form-control-feedback,
+.input-group-sm + .form-control-feedback,
+.form-group-sm .form-control + .form-control-feedback {
+ width: 30px;
+ height: 30px;
+ line-height: 30px;
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline,
+.has-success.radio label,
+.has-success.checkbox label,
+.has-success.radio-inline label,
+.has-success.checkbox-inline label {
+ color: #3c763d;
+}
+.has-success .form-control {
+ border-color: #3c763d;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-success .form-control:focus {
+ border-color: #2b542c;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+}
+.has-success .input-group-addon {
+ color: #3c763d;
+ background-color: #dff0d8;
+ border-color: #3c763d;
+}
+.has-success .form-control-feedback {
+ color: #3c763d;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline,
+.has-warning.radio label,
+.has-warning.checkbox label,
+.has-warning.radio-inline label,
+.has-warning.checkbox-inline label {
+ color: #8a6d3b;
+}
+.has-warning .form-control {
+ border-color: #8a6d3b;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-warning .form-control:focus {
+ border-color: #66512c;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+}
+.has-warning .input-group-addon {
+ color: #8a6d3b;
+ background-color: #fcf8e3;
+ border-color: #8a6d3b;
+}
+.has-warning .form-control-feedback {
+ color: #8a6d3b;
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline,
+.has-error.radio label,
+.has-error.checkbox label,
+.has-error.radio-inline label,
+.has-error.checkbox-inline label {
+ color: #a94442;
+}
+.has-error .form-control {
+ border-color: #a94442;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-error .form-control:focus {
+ border-color: #843534;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+}
+.has-error .input-group-addon {
+ color: #a94442;
+ background-color: #f2dede;
+ border-color: #a94442;
+}
+.has-error .form-control-feedback {
+ color: #a94442;
+}
+.has-feedback label ~ .form-control-feedback {
+ top: 25px;
+}
+.has-feedback label.sr-only ~ .form-control-feedback {
+ top: 0;
+}
+.help-block {
+ display: block;
+ margin-top: 5px;
+ margin-bottom: 10px;
+ color: #737373;
+}
+@media (min-width: 768px) {
+ .form-inline .form-group {
+ display: inline-block;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .form-control {
+ display: inline-block;
+ width: auto;
+ vertical-align: middle;
+ }
+ .form-inline .form-control-static {
+ display: inline-block;
+ }
+ .form-inline .input-group {
+ display: inline-table;
+ vertical-align: middle;
+ }
+ .form-inline .input-group .input-group-addon,
+ .form-inline .input-group .input-group-btn,
+ .form-inline .input-group .form-control {
+ width: auto;
+ }
+ .form-inline .input-group > .form-control {
+ width: 100%;
+ }
+ .form-inline .control-label {
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .radio,
+ .form-inline .checkbox {
+ display: inline-block;
+ margin-top: 0;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .radio label,
+ .form-inline .checkbox label {
+ padding-left: 0;
+ }
+ .form-inline .radio input[type="radio"],
+ .form-inline .checkbox input[type="checkbox"] {
+ position: relative;
+ margin-left: 0;
+ }
+ .form-inline .has-feedback .form-control-feedback {
+ top: 0;
+ }
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox,
+.form-horizontal .radio-inline,
+.form-horizontal .checkbox-inline {
+ padding-top: 7px;
+ margin-top: 0;
+ margin-bottom: 0;
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox {
+ min-height: 27px;
+}
+.form-horizontal .form-group {
+ margin-right: -15px;
+ margin-left: -15px;
+}
+@media (min-width: 768px) {
+ .form-horizontal .control-label {
+ padding-top: 7px;
+ margin-bottom: 0;
+ text-align: right;
+ }
+}
+.form-horizontal .has-feedback .form-control-feedback {
+ right: 15px;
+}
+@media (min-width: 768px) {
+ .form-horizontal .form-group-lg .control-label {
+ padding-top: 11px;
+ font-size: 18px;
+ }
+}
+@media (min-width: 768px) {
+ .form-horizontal .form-group-sm .control-label {
+ padding-top: 6px;
+ font-size: 12px;
+ }
+}
+.btn {
+ display: inline-block;
+ padding: 6px 12px;
+ margin-bottom: 0;
+ font-size: 14px;
+ font-weight: normal;
+ line-height: 1.42857143;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: middle;
+ -ms-touch-action: manipulation;
+ touch-action: manipulation;
+ cursor: pointer;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ background-image: none;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+.btn:focus,
+.btn:active:focus,
+.btn.active:focus,
+.btn.focus,
+.btn:active.focus,
+.btn.active.focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+.btn:hover,
+.btn:focus,
+.btn.focus {
+ color: #333;
+ text-decoration: none;
+}
+.btn:active,
+.btn.active {
+ background-image: none;
+ outline: 0;
+ -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn.disabled,
+.btn[disabled],
+fieldset[disabled] .btn {
+ cursor: not-allowed;
+ filter: alpha(opacity=65);
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ opacity: .65;
+}
+a.btn.disabled,
+fieldset[disabled] a.btn {
+ pointer-events: none;
+}
+.btn-default {
+ color: #333;
+ background-color: #fff;
+ border-color: #ccc;
+}
+.btn-default:focus,
+.btn-default.focus {
+ color: #333;
+ background-color: #e6e6e6;
+ border-color: #8c8c8c;
+}
+.btn-default:hover {
+ color: #333;
+ background-color: #e6e6e6;
+ border-color: #adadad;
+}
+.btn-default:active,
+.btn-default.active,
+.open > .dropdown-toggle.btn-default {
+ color: #333;
+ background-color: #e6e6e6;
+ border-color: #adadad;
+}
+.btn-default:active:hover,
+.btn-default.active:hover,
+.open > .dropdown-toggle.btn-default:hover,
+.btn-default:active:focus,
+.btn-default.active:focus,
+.open > .dropdown-toggle.btn-default:focus,
+.btn-default:active.focus,
+.btn-default.active.focus,
+.open > .dropdown-toggle.btn-default.focus {
+ color: #333;
+ background-color: #d4d4d4;
+ border-color: #8c8c8c;
+}
+.btn-default:active,
+.btn-default.active,
+.open > .dropdown-toggle.btn-default {
+ background-image: none;
+}
+.btn-default.disabled:hover,
+.btn-default[disabled]:hover,
+fieldset[disabled] .btn-default:hover,
+.btn-default.disabled:focus,
+.btn-default[disabled]:focus,
+fieldset[disabled] .btn-default:focus,
+.btn-default.disabled.focus,
+.btn-default[disabled].focus,
+fieldset[disabled] .btn-default.focus {
+ background-color: #fff;
+ border-color: #ccc;
+}
+.btn-default .badge {
+ color: #fff;
+ background-color: #333;
+}
+.btn-primary {
+ color: #fff;
+ background-color: #337ab7;
+ border-color: #2e6da4;
+}
+.btn-primary:focus,
+.btn-primary.focus {
+ color: #fff;
+ background-color: #286090;
+ border-color: #122b40;
+}
+.btn-primary:hover {
+ color: #fff;
+ background-color: #286090;
+ border-color: #204d74;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open > .dropdown-toggle.btn-primary {
+ color: #fff;
+ background-color: #286090;
+ border-color: #204d74;
+}
+.btn-primary:active:hover,
+.btn-primary.active:hover,
+.open > .dropdown-toggle.btn-primary:hover,
+.btn-primary:active:focus,
+.btn-primary.active:focus,
+.open > .dropdown-toggle.btn-primary:focus,
+.btn-primary:active.focus,
+.btn-primary.active.focus,
+.open > .dropdown-toggle.btn-primary.focus {
+ color: #fff;
+ background-color: #204d74;
+ border-color: #122b40;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open > .dropdown-toggle.btn-primary {
+ background-image: none;
+}
+.btn-primary.disabled:hover,
+.btn-primary[disabled]:hover,
+fieldset[disabled] .btn-primary:hover,
+.btn-primary.disabled:focus,
+.btn-primary[disabled]:focus,
+fieldset[disabled] .btn-primary:focus,
+.btn-primary.disabled.focus,
+.btn-primary[disabled].focus,
+fieldset[disabled] .btn-primary.focus {
+ background-color: #337ab7;
+ border-color: #2e6da4;
+}
+.btn-primary .badge {
+ color: #337ab7;
+ background-color: #fff;
+}
+.btn-success {
+ color: #fff;
+ background-color: #5cb85c;
+ border-color: #4cae4c;
+}
+.btn-success:focus,
+.btn-success.focus {
+ color: #fff;
+ background-color: #449d44;
+ border-color: #255625;
+}
+.btn-success:hover {
+ color: #fff;
+ background-color: #449d44;
+ border-color: #398439;
+}
+.btn-success:active,
+.btn-success.active,
+.open > .dropdown-toggle.btn-success {
+ color: #fff;
+ background-color: #449d44;
+ border-color: #398439;
+}
+.btn-success:active:hover,
+.btn-success.active:hover,
+.open > .dropdown-toggle.btn-success:hover,
+.btn-success:active:focus,
+.btn-success.active:focus,
+.open > .dropdown-toggle.btn-success:focus,
+.btn-success:active.focus,
+.btn-success.active.focus,
+.open > .dropdown-toggle.btn-success.focus {
+ color: #fff;
+ background-color: #398439;
+ border-color: #255625;
+}
+.btn-success:active,
+.btn-success.active,
+.open > .dropdown-toggle.btn-success {
+ background-image: none;
+}
+.btn-success.disabled:hover,
+.btn-success[disabled]:hover,
+fieldset[disabled] .btn-success:hover,
+.btn-success.disabled:focus,
+.btn-success[disabled]:focus,
+fieldset[disabled] .btn-success:focus,
+.btn-success.disabled.focus,
+.btn-success[disabled].focus,
+fieldset[disabled] .btn-success.focus {
+ background-color: #5cb85c;
+ border-color: #4cae4c;
+}
+.btn-success .badge {
+ color: #5cb85c;
+ background-color: #fff;
+}
+.btn-info {
+ color: #fff;
+ background-color: #5bc0de;
+ border-color: #46b8da;
+}
+.btn-info:focus,
+.btn-info.focus {
+ color: #fff;
+ background-color: #31b0d5;
+ border-color: #1b6d85;
+}
+.btn-info:hover {
+ color: #fff;
+ background-color: #31b0d5;
+ border-color: #269abc;
+}
+.btn-info:active,
+.btn-info.active,
+.open > .dropdown-toggle.btn-info {
+ color: #fff;
+ background-color: #31b0d5;
+ border-color: #269abc;
+}
+.btn-info:active:hover,
+.btn-info.active:hover,
+.open > .dropdown-toggle.btn-info:hover,
+.btn-info:active:focus,
+.btn-info.active:focus,
+.open > .dropdown-toggle.btn-info:focus,
+.btn-info:active.focus,
+.btn-info.active.focus,
+.open > .dropdown-toggle.btn-info.focus {
+ color: #fff;
+ background-color: #269abc;
+ border-color: #1b6d85;
+}
+.btn-info:active,
+.btn-info.active,
+.open > .dropdown-toggle.btn-info {
+ background-image: none;
+}
+.btn-info.disabled:hover,
+.btn-info[disabled]:hover,
+fieldset[disabled] .btn-info:hover,
+.btn-info.disabled:focus,
+.btn-info[disabled]:focus,
+fieldset[disabled] .btn-info:focus,
+.btn-info.disabled.focus,
+.btn-info[disabled].focus,
+fieldset[disabled] .btn-info.focus {
+ background-color: #5bc0de;
+ border-color: #46b8da;
+}
+.btn-info .badge {
+ color: #5bc0de;
+ background-color: #fff;
+}
+.btn-warning {
+ color: #fff;
+ background-color: #f0ad4e;
+ border-color: #eea236;
+}
+.btn-warning:focus,
+.btn-warning.focus {
+ color: #fff;
+ background-color: #ec971f;
+ border-color: #985f0d;
+}
+.btn-warning:hover {
+ color: #fff;
+ background-color: #ec971f;
+ border-color: #d58512;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open > .dropdown-toggle.btn-warning {
+ color: #fff;
+ background-color: #ec971f;
+ border-color: #d58512;
+}
+.btn-warning:active:hover,
+.btn-warning.active:hover,
+.open > .dropdown-toggle.btn-warning:hover,
+.btn-warning:active:focus,
+.btn-warning.active:focus,
+.open > .dropdown-toggle.btn-warning:focus,
+.btn-warning:active.focus,
+.btn-warning.active.focus,
+.open > .dropdown-toggle.btn-warning.focus {
+ color: #fff;
+ background-color: #d58512;
+ border-color: #985f0d;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open > .dropdown-toggle.btn-warning {
+ background-image: none;
+}
+.btn-warning.disabled:hover,
+.btn-warning[disabled]:hover,
+fieldset[disabled] .btn-warning:hover,
+.btn-warning.disabled:focus,
+.btn-warning[disabled]:focus,
+fieldset[disabled] .btn-warning:focus,
+.btn-warning.disabled.focus,
+.btn-warning[disabled].focus,
+fieldset[disabled] .btn-warning.focus {
+ background-color: #f0ad4e;
+ border-color: #eea236;
+}
+.btn-warning .badge {
+ color: #f0ad4e;
+ background-color: #fff;
+}
+.btn-danger {
+ color: #fff;
+ background-color: #d9534f;
+ border-color: #d43f3a;
+}
+.btn-danger:focus,
+.btn-danger.focus {
+ color: #fff;
+ background-color: #c9302c;
+ border-color: #761c19;
+}
+.btn-danger:hover {
+ color: #fff;
+ background-color: #c9302c;
+ border-color: #ac2925;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open > .dropdown-toggle.btn-danger {
+ color: #fff;
+ background-color: #c9302c;
+ border-color: #ac2925;
+}
+.btn-danger:active:hover,
+.btn-danger.active:hover,
+.open > .dropdown-toggle.btn-danger:hover,
+.btn-danger:active:focus,
+.btn-danger.active:focus,
+.open > .dropdown-toggle.btn-danger:focus,
+.btn-danger:active.focus,
+.btn-danger.active.focus,
+.open > .dropdown-toggle.btn-danger.focus {
+ color: #fff;
+ background-color: #ac2925;
+ border-color: #761c19;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open > .dropdown-toggle.btn-danger {
+ background-image: none;
+}
+.btn-danger.disabled:hover,
+.btn-danger[disabled]:hover,
+fieldset[disabled] .btn-danger:hover,
+.btn-danger.disabled:focus,
+.btn-danger[disabled]:focus,
+fieldset[disabled] .btn-danger:focus,
+.btn-danger.disabled.focus,
+.btn-danger[disabled].focus,
+fieldset[disabled] .btn-danger.focus {
+ background-color: #d9534f;
+ border-color: #d43f3a;
+}
+.btn-danger .badge {
+ color: #d9534f;
+ background-color: #fff;
+}
+.btn-link {
+ font-weight: normal;
+ color: #337ab7;
+ border-radius: 0;
+}
+.btn-link,
+.btn-link:active,
+.btn-link.active,
+.btn-link[disabled],
+fieldset[disabled] .btn-link {
+ background-color: transparent;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+.btn-link,
+.btn-link:hover,
+.btn-link:focus,
+.btn-link:active {
+ border-color: transparent;
+}
+.btn-link:hover,
+.btn-link:focus {
+ color: #23527c;
+ text-decoration: underline;
+ background-color: transparent;
+}
+.btn-link[disabled]:hover,
+fieldset[disabled] .btn-link:hover,
+.btn-link[disabled]:focus,
+fieldset[disabled] .btn-link:focus {
+ color: #777;
+ text-decoration: none;
+}
+.btn-lg,
+.btn-group-lg > .btn {
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+.btn-sm,
+.btn-group-sm > .btn {
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.btn-xs,
+.btn-group-xs > .btn {
+ padding: 1px 5px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.btn-block {
+ display: block;
+ width: 100%;
+}
+.btn-block + .btn-block {
+ margin-top: 5px;
+}
+input[type="submit"].btn-block,
+input[type="reset"].btn-block,
+input[type="button"].btn-block {
+ width: 100%;
+}
+.fade {
+ opacity: 0;
+ -webkit-transition: opacity .15s linear;
+ -o-transition: opacity .15s linear;
+ transition: opacity .15s linear;
+}
+.fade.in {
+ opacity: 1;
+}
+.collapse {
+ display: none;
+}
+.collapse.in {
+ display: block;
+}
+tr.collapse.in {
+ display: table-row;
+}
+tbody.collapse.in {
+ display: table-row-group;
+}
+.collapsing {
+ position: relative;
+ height: 0;
+ overflow: hidden;
+ -webkit-transition-timing-function: ease;
+ -o-transition-timing-function: ease;
+ transition-timing-function: ease;
+ -webkit-transition-duration: .35s;
+ -o-transition-duration: .35s;
+ transition-duration: .35s;
+ -webkit-transition-property: height, visibility;
+ -o-transition-property: height, visibility;
+ transition-property: height, visibility;
+}
+.caret {
+ display: inline-block;
+ width: 0;
+ height: 0;
+ margin-left: 2px;
+ vertical-align: middle;
+ border-top: 4px dashed;
+ border-top: 4px solid \9;
+ border-right: 4px solid transparent;
+ border-left: 4px solid transparent;
+}
+.dropup,
+.dropdown {
+ position: relative;
+}
+.dropdown-toggle:focus {
+ outline: 0;
+}
+.dropdown-menu {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: 1000;
+ display: none;
+ float: left;
+ min-width: 160px;
+ padding: 5px 0;
+ margin: 2px 0 0;
+ font-size: 14px;
+ text-align: left;
+ list-style: none;
+ background-color: #fff;
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ border: 1px solid #ccc;
+ border: 1px solid rgba(0, 0, 0, .15);
+ border-radius: 4px;
+ -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+ box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+}
+.dropdown-menu.pull-right {
+ right: 0;
+ left: auto;
+}
+.dropdown-menu .divider {
+ height: 1px;
+ margin: 9px 0;
+ overflow: hidden;
+ background-color: #e5e5e5;
+}
+.dropdown-menu > li > a {
+ display: block;
+ padding: 3px 20px;
+ clear: both;
+ font-weight: normal;
+ line-height: 1.42857143;
+ color: #333;
+ white-space: nowrap;
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+ color: #262626;
+ text-decoration: none;
+ background-color: #f5f5f5;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+ color: #fff;
+ text-decoration: none;
+ background-color: #337ab7;
+ outline: 0;
+}
+.dropdown-menu > .disabled > a,
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+ color: #777;
+}
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+ text-decoration: none;
+ cursor: not-allowed;
+ background-color: transparent;
+ background-image: none;
+ filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+}
+.open > .dropdown-menu {
+ display: block;
+}
+.open > a {
+ outline: 0;
+}
+.dropdown-menu-right {
+ right: 0;
+ left: auto;
+}
+.dropdown-menu-left {
+ right: auto;
+ left: 0;
+}
+.dropdown-header {
+ display: block;
+ padding: 3px 20px;
+ font-size: 12px;
+ line-height: 1.42857143;
+ color: #777;
+ white-space: nowrap;
+}
+.dropdown-backdrop {
+ position: fixed;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 990;
+}
+.pull-right > .dropdown-menu {
+ right: 0;
+ left: auto;
+}
+.dropup .caret,
+.navbar-fixed-bottom .dropdown .caret {
+ content: "";
+ border-top: 0;
+ border-bottom: 4px dashed;
+ border-bottom: 4px solid \9;
+}
+.dropup .dropdown-menu,
+.navbar-fixed-bottom .dropdown .dropdown-menu {
+ top: auto;
+ bottom: 100%;
+ margin-bottom: 2px;
+}
+@media (min-width: 768px) {
+ .navbar-right .dropdown-menu {
+ right: 0;
+ left: auto;
+ }
+ .navbar-right .dropdown-menu-left {
+ right: auto;
+ left: 0;
+ }
+}
+.btn-group,
+.btn-group-vertical {
+ position: relative;
+ display: inline-block;
+ vertical-align: middle;
+}
+.btn-group > .btn,
+.btn-group-vertical > .btn {
+ position: relative;
+ float: left;
+}
+.btn-group > .btn:hover,
+.btn-group-vertical > .btn:hover,
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus,
+.btn-group > .btn:active,
+.btn-group-vertical > .btn:active,
+.btn-group > .btn.active,
+.btn-group-vertical > .btn.active {
+ z-index: 2;
+}
+.btn-group .btn + .btn,
+.btn-group .btn + .btn-group,
+.btn-group .btn-group + .btn,
+.btn-group .btn-group + .btn-group {
+ margin-left: -1px;
+}
+.btn-toolbar {
+ margin-left: -5px;
+}
+.btn-toolbar .btn,
+.btn-toolbar .btn-group,
+.btn-toolbar .input-group {
+ float: left;
+}
+.btn-toolbar > .btn,
+.btn-toolbar > .btn-group,
+.btn-toolbar > .input-group {
+ margin-left: 5px;
+}
+.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
+ border-radius: 0;
+}
+.btn-group > .btn:first-child {
+ margin-left: 0;
+}
+.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+}
+.btn-group > .btn:last-child:not(:first-child),
+.btn-group > .dropdown-toggle:not(:first-child) {
+ border-top-left-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group > .btn-group {
+ float: left;
+}
+.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+}
+.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ border-top-left-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group .dropdown-toggle:active,
+.btn-group.open .dropdown-toggle {
+ outline: 0;
+}
+.btn-group > .btn + .dropdown-toggle {
+ padding-right: 8px;
+ padding-left: 8px;
+}
+.btn-group > .btn-lg + .dropdown-toggle {
+ padding-right: 12px;
+ padding-left: 12px;
+}
+.btn-group.open .dropdown-toggle {
+ -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn-group.open .dropdown-toggle.btn-link {
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+.btn .caret {
+ margin-left: 0;
+}
+.btn-lg .caret {
+ border-width: 5px 5px 0;
+ border-bottom-width: 0;
+}
+.dropup .btn-lg .caret {
+ border-width: 0 5px 5px;
+}
+.btn-group-vertical > .btn,
+.btn-group-vertical > .btn-group,
+.btn-group-vertical > .btn-group > .btn {
+ display: block;
+ float: none;
+ width: 100%;
+ max-width: 100%;
+}
+.btn-group-vertical > .btn-group > .btn {
+ float: none;
+}
+.btn-group-vertical > .btn + .btn,
+.btn-group-vertical > .btn + .btn-group,
+.btn-group-vertical > .btn-group + .btn,
+.btn-group-vertical > .btn-group + .btn-group {
+ margin-top: -1px;
+ margin-left: 0;
+}
+.btn-group-vertical > .btn:not(:first-child):not(:last-child) {
+ border-radius: 0;
+}
+.btn-group-vertical > .btn:first-child:not(:last-child) {
+ border-top-left-radius: 4px;
+ border-top-right-radius: 4px;
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn:last-child:not(:first-child) {
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+}
+.btn-group-justified {
+ display: table;
+ width: 100%;
+ table-layout: fixed;
+ border-collapse: separate;
+}
+.btn-group-justified > .btn,
+.btn-group-justified > .btn-group {
+ display: table-cell;
+ float: none;
+ width: 1%;
+}
+.btn-group-justified > .btn-group .btn {
+ width: 100%;
+}
+.btn-group-justified > .btn-group .dropdown-menu {
+ left: auto;
+}
+[data-toggle="buttons"] > .btn input[type="radio"],
+[data-toggle="buttons"] > .btn-group > .btn input[type="radio"],
+[data-toggle="buttons"] > .btn input[type="checkbox"],
+[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] {
+ position: absolute;
+ clip: rect(0, 0, 0, 0);
+ pointer-events: none;
+}
+.input-group {
+ position: relative;
+ display: table;
+ border-collapse: separate;
+}
+.input-group[class*="col-"] {
+ float: none;
+ padding-right: 0;
+ padding-left: 0;
+}
+.input-group .form-control {
+ position: relative;
+ z-index: 2;
+ float: left;
+ width: 100%;
+ margin-bottom: 0;
+}
+.input-group .form-control:focus {
+ z-index: 3;
+}
+.input-group-lg > .form-control,
+.input-group-lg > .input-group-addon,
+.input-group-lg > .input-group-btn > .btn {
+ height: 46px;
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+select.input-group-lg > .form-control,
+select.input-group-lg > .input-group-addon,
+select.input-group-lg > .input-group-btn > .btn {
+ height: 46px;
+ line-height: 46px;
+}
+textarea.input-group-lg > .form-control,
+textarea.input-group-lg > .input-group-addon,
+textarea.input-group-lg > .input-group-btn > .btn,
+select[multiple].input-group-lg > .form-control,
+select[multiple].input-group-lg > .input-group-addon,
+select[multiple].input-group-lg > .input-group-btn > .btn {
+ height: auto;
+}
+.input-group-sm > .form-control,
+.input-group-sm > .input-group-addon,
+.input-group-sm > .input-group-btn > .btn {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+select.input-group-sm > .form-control,
+select.input-group-sm > .input-group-addon,
+select.input-group-sm > .input-group-btn > .btn {
+ height: 30px;
+ line-height: 30px;
+}
+textarea.input-group-sm > .form-control,
+textarea.input-group-sm > .input-group-addon,
+textarea.input-group-sm > .input-group-btn > .btn,
+select[multiple].input-group-sm > .form-control,
+select[multiple].input-group-sm > .input-group-addon,
+select[multiple].input-group-sm > .input-group-btn > .btn {
+ height: auto;
+}
+.input-group-addon,
+.input-group-btn,
+.input-group .form-control {
+ display: table-cell;
+}
+.input-group-addon:not(:first-child):not(:last-child),
+.input-group-btn:not(:first-child):not(:last-child),
+.input-group .form-control:not(:first-child):not(:last-child) {
+ border-radius: 0;
+}
+.input-group-addon,
+.input-group-btn {
+ width: 1%;
+ white-space: nowrap;
+ vertical-align: middle;
+}
+.input-group-addon {
+ padding: 6px 12px;
+ font-size: 14px;
+ font-weight: normal;
+ line-height: 1;
+ color: #555;
+ text-align: center;
+ background-color: #eee;
+ border: 1px solid #ccc;
+ border-radius: 4px;
+}
+.input-group-addon.input-sm {
+ padding: 5px 10px;
+ font-size: 12px;
+ border-radius: 3px;
+}
+.input-group-addon.input-lg {
+ padding: 10px 16px;
+ font-size: 18px;
+ border-radius: 6px;
+}
+.input-group-addon input[type="radio"],
+.input-group-addon input[type="checkbox"] {
+ margin-top: 0;
+}
+.input-group .form-control:first-child,
+.input-group-addon:first-child,
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group > .btn,
+.input-group-btn:first-child > .dropdown-toggle,
+.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),
+.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+}
+.input-group-addon:first-child {
+ border-right: 0;
+}
+.input-group .form-control:last-child,
+.input-group-addon:last-child,
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group > .btn,
+.input-group-btn:last-child > .dropdown-toggle,
+.input-group-btn:first-child > .btn:not(:first-child),
+.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {
+ border-top-left-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.input-group-addon:last-child {
+ border-left: 0;
+}
+.input-group-btn {
+ position: relative;
+ font-size: 0;
+ white-space: nowrap;
+}
+.input-group-btn > .btn {
+ position: relative;
+}
+.input-group-btn > .btn + .btn {
+ margin-left: -1px;
+}
+.input-group-btn > .btn:hover,
+.input-group-btn > .btn:focus,
+.input-group-btn > .btn:active {
+ z-index: 2;
+}
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group {
+ margin-right: -1px;
+}
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group {
+ z-index: 2;
+ margin-left: -1px;
+}
+.nav {
+ padding-left: 0;
+ margin-bottom: 0;
+ list-style: none;
+}
+.nav > li {
+ position: relative;
+ display: block;
+}
+.nav > li > a {
+ position: relative;
+ display: block;
+ padding: 10px 15px;
+}
+.nav > li > a:hover,
+.nav > li > a:focus {
+ text-decoration: none;
+ background-color: #eee;
+}
+.nav > li.disabled > a {
+ color: #777;
+}
+.nav > li.disabled > a:hover,
+.nav > li.disabled > a:focus {
+ color: #777;
+ text-decoration: none;
+ cursor: not-allowed;
+ background-color: transparent;
+}
+.nav .open > a,
+.nav .open > a:hover,
+.nav .open > a:focus {
+ background-color: #eee;
+ border-color: #337ab7;
+}
+.nav .nav-divider {
+ height: 1px;
+ margin: 9px 0;
+ overflow: hidden;
+ background-color: #e5e5e5;
+}
+.nav > li > a > img {
+ max-width: none;
+}
+.nav-tabs {
+ border-bottom: 1px solid #ddd;
+}
+.nav-tabs > li {
+ float: left;
+ margin-bottom: -1px;
+}
+.nav-tabs > li > a {
+ margin-right: 2px;
+ line-height: 1.42857143;
+ border: 1px solid transparent;
+ border-radius: 4px 4px 0 0;
+}
+.nav-tabs > li > a:hover {
+ border-color: #eee #eee #ddd;
+}
+.nav-tabs > li.active > a,
+.nav-tabs > li.active > a:hover,
+.nav-tabs > li.active > a:focus {
+ color: #555;
+ cursor: default;
+ background-color: #fff;
+ border: 1px solid #ddd;
+ border-bottom-color: transparent;
+}
+.nav-tabs.nav-justified {
+ width: 100%;
+ border-bottom: 0;
+}
+.nav-tabs.nav-justified > li {
+ float: none;
+}
+.nav-tabs.nav-justified > li > a {
+ margin-bottom: 5px;
+ text-align: center;
+}
+.nav-tabs.nav-justified > .dropdown .dropdown-menu {
+ top: auto;
+ left: auto;
+}
+@media (min-width: 768px) {
+ .nav-tabs.nav-justified > li {
+ display: table-cell;
+ width: 1%;
+ }
+ .nav-tabs.nav-justified > li > a {
+ margin-bottom: 0;
+ }
+}
+.nav-tabs.nav-justified > li > a {
+ margin-right: 0;
+ border-radius: 4px;
+}
+.nav-tabs.nav-justified > .active > a,
+.nav-tabs.nav-justified > .active > a:hover,
+.nav-tabs.nav-justified > .active > a:focus {
+ border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+ .nav-tabs.nav-justified > li > a {
+ border-bottom: 1px solid #ddd;
+ border-radius: 4px 4px 0 0;
+ }
+ .nav-tabs.nav-justified > .active > a,
+ .nav-tabs.nav-justified > .active > a:hover,
+ .nav-tabs.nav-justified > .active > a:focus {
+ border-bottom-color: #fff;
+ }
+}
+.nav-pills > li {
+ float: left;
+}
+.nav-pills > li > a {
+ border-radius: 4px;
+}
+.nav-pills > li + li {
+ margin-left: 2px;
+}
+.nav-pills > li.active > a,
+.nav-pills > li.active > a:hover,
+.nav-pills > li.active > a:focus {
+ color: #fff;
+ background-color: #337ab7;
+}
+.nav-stacked > li {
+ float: none;
+}
+.nav-stacked > li + li {
+ margin-top: 2px;
+ margin-left: 0;
+}
+.nav-justified {
+ width: 100%;
+}
+.nav-justified > li {
+ float: none;
+}
+.nav-justified > li > a {
+ margin-bottom: 5px;
+ text-align: center;
+}
+.nav-justified > .dropdown .dropdown-menu {
+ top: auto;
+ left: auto;
+}
+@media (min-width: 768px) {
+ .nav-justified > li {
+ display: table-cell;
+ width: 1%;
+ }
+ .nav-justified > li > a {
+ margin-bottom: 0;
+ }
+}
+.nav-tabs-justified {
+ border-bottom: 0;
+}
+.nav-tabs-justified > li > a {
+ margin-right: 0;
+ border-radius: 4px;
+}
+.nav-tabs-justified > .active > a,
+.nav-tabs-justified > .active > a:hover,
+.nav-tabs-justified > .active > a:focus {
+ border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+ .nav-tabs-justified > li > a {
+ border-bottom: 1px solid #ddd;
+ border-radius: 4px 4px 0 0;
+ }
+ .nav-tabs-justified > .active > a,
+ .nav-tabs-justified > .active > a:hover,
+ .nav-tabs-justified > .active > a:focus {
+ border-bottom-color: #fff;
+ }
+}
+.tab-content > .tab-pane {
+ display: none;
+}
+.tab-content > .active {
+ display: block;
+}
+.nav-tabs .dropdown-menu {
+ margin-top: -1px;
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+}
+.navbar {
+ position: relative;
+ min-height: 50px;
+ margin-bottom: 20px;
+ border: 1px solid transparent;
+}
+@media (min-width: 768px) {
+ .navbar {
+ border-radius: 4px;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-header {
+ float: left;
+ }
+}
+.navbar-collapse {
+ padding-right: 15px;
+ padding-left: 15px;
+ overflow-x: visible;
+ -webkit-overflow-scrolling: touch;
+ border-top: 1px solid transparent;
+ -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);
+}
+.navbar-collapse.in {
+ overflow-y: auto;
+}
+@media (min-width: 768px) {
+ .navbar-collapse {
+ width: auto;
+ border-top: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+ .navbar-collapse.collapse {
+ display: block !important;
+ height: auto !important;
+ padding-bottom: 0;
+ overflow: visible !important;
+ }
+ .navbar-collapse.in {
+ overflow-y: visible;
+ }
+ .navbar-fixed-top .navbar-collapse,
+ .navbar-static-top .navbar-collapse,
+ .navbar-fixed-bottom .navbar-collapse {
+ padding-right: 0;
+ padding-left: 0;
+ }
+}
+.navbar-fixed-top .navbar-collapse,
+.navbar-fixed-bottom .navbar-collapse {
+ max-height: 340px;
+}
+@media (max-device-width: 480px) and (orientation: landscape) {
+ .navbar-fixed-top .navbar-collapse,
+ .navbar-fixed-bottom .navbar-collapse {
+ max-height: 200px;
+ }
+}
+.container > .navbar-header,
+.container-fluid > .navbar-header,
+.container > .navbar-collapse,
+.container-fluid > .navbar-collapse {
+ margin-right: -15px;
+ margin-left: -15px;
+}
+@media (min-width: 768px) {
+ .container > .navbar-header,
+ .container-fluid > .navbar-header,
+ .container > .navbar-collapse,
+ .container-fluid > .navbar-collapse {
+ margin-right: 0;
+ margin-left: 0;
+ }
+}
+.navbar-static-top {
+ z-index: 1000;
+ border-width: 0 0 1px;
+}
+@media (min-width: 768px) {
+ .navbar-static-top {
+ border-radius: 0;
+ }
+}
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+ position: fixed;
+ right: 0;
+ left: 0;
+ z-index: 1030;
+}
+@media (min-width: 768px) {
+ .navbar-fixed-top,
+ .navbar-fixed-bottom {
+ border-radius: 0;
+ }
+}
+.navbar-fixed-top {
+ top: 0;
+ border-width: 0 0 1px;
+}
+.navbar-fixed-bottom {
+ bottom: 0;
+ margin-bottom: 0;
+ border-width: 1px 0 0;
+}
+.navbar-brand {
+ float: left;
+ height: 50px;
+ padding: 15px 15px;
+ font-size: 18px;
+ line-height: 20px;
+}
+.navbar-brand:hover,
+.navbar-brand:focus {
+ text-decoration: none;
+}
+.navbar-brand > img {
+ display: block;
+}
+@media (min-width: 768px) {
+ .navbar > .container .navbar-brand,
+ .navbar > .container-fluid .navbar-brand {
+ margin-left: -15px;
+ }
+}
+.navbar-toggle {
+ position: relative;
+ float: right;
+ padding: 9px 10px;
+ margin-top: 8px;
+ margin-right: 15px;
+ margin-bottom: 8px;
+ background-color: transparent;
+ background-image: none;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+.navbar-toggle:focus {
+ outline: 0;
+}
+.navbar-toggle .icon-bar {
+ display: block;
+ width: 22px;
+ height: 2px;
+ border-radius: 1px;
+}
+.navbar-toggle .icon-bar + .icon-bar {
+ margin-top: 4px;
+}
+@media (min-width: 768px) {
+ .navbar-toggle {
+ display: none;
+ }
+}
+.navbar-nav {
+ margin: 7.5px -15px;
+}
+.navbar-nav > li > a {
+ padding-top: 10px;
+ padding-bottom: 10px;
+ line-height: 20px;
+}
+@media (max-width: 767px) {
+ .navbar-nav .open .dropdown-menu {
+ position: static;
+ float: none;
+ width: auto;
+ margin-top: 0;
+ background-color: transparent;
+ border: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+ .navbar-nav .open .dropdown-menu > li > a,
+ .navbar-nav .open .dropdown-menu .dropdown-header {
+ padding: 5px 15px 5px 25px;
+ }
+ .navbar-nav .open .dropdown-menu > li > a {
+ line-height: 20px;
+ }
+ .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-nav .open .dropdown-menu > li > a:focus {
+ background-image: none;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-nav {
+ float: left;
+ margin: 0;
+ }
+ .navbar-nav > li {
+ float: left;
+ }
+ .navbar-nav > li > a {
+ padding-top: 15px;
+ padding-bottom: 15px;
+ }
+}
+.navbar-form {
+ padding: 10px 15px;
+ margin-top: 8px;
+ margin-right: -15px;
+ margin-bottom: 8px;
+ margin-left: -15px;
+ border-top: 1px solid transparent;
+ border-bottom: 1px solid transparent;
+ -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+}
+@media (min-width: 768px) {
+ .navbar-form .form-group {
+ display: inline-block;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .form-control {
+ display: inline-block;
+ width: auto;
+ vertical-align: middle;
+ }
+ .navbar-form .form-control-static {
+ display: inline-block;
+ }
+ .navbar-form .input-group {
+ display: inline-table;
+ vertical-align: middle;
+ }
+ .navbar-form .input-group .input-group-addon,
+ .navbar-form .input-group .input-group-btn,
+ .navbar-form .input-group .form-control {
+ width: auto;
+ }
+ .navbar-form .input-group > .form-control {
+ width: 100%;
+ }
+ .navbar-form .control-label {
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .radio,
+ .navbar-form .checkbox {
+ display: inline-block;
+ margin-top: 0;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .radio label,
+ .navbar-form .checkbox label {
+ padding-left: 0;
+ }
+ .navbar-form .radio input[type="radio"],
+ .navbar-form .checkbox input[type="checkbox"] {
+ position: relative;
+ margin-left: 0;
+ }
+ .navbar-form .has-feedback .form-control-feedback {
+ top: 0;
+ }
+}
+@media (max-width: 767px) {
+ .navbar-form .form-group {
+ margin-bottom: 5px;
+ }
+ .navbar-form .form-group:last-child {
+ margin-bottom: 0;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-form {
+ width: auto;
+ padding-top: 0;
+ padding-bottom: 0;
+ margin-right: 0;
+ margin-left: 0;
+ border: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+}
+.navbar-nav > li > .dropdown-menu {
+ margin-top: 0;
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+}
+.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
+ margin-bottom: 0;
+ border-top-left-radius: 4px;
+ border-top-right-radius: 4px;
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.navbar-btn {
+ margin-top: 8px;
+ margin-bottom: 8px;
+}
+.navbar-btn.btn-sm {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+.navbar-btn.btn-xs {
+ margin-top: 14px;
+ margin-bottom: 14px;
+}
+.navbar-text {
+ margin-top: 15px;
+ margin-bottom: 15px;
+}
+@media (min-width: 768px) {
+ .navbar-text {
+ float: left;
+ margin-right: 15px;
+ margin-left: 15px;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-left {
+ float: left !important;
+ }
+ .navbar-right {
+ float: right !important;
+ margin-right: -15px;
+ }
+ .navbar-right ~ .navbar-right {
+ margin-right: 0;
+ }
+}
+.navbar-default {
+ background-color: #f8f8f8;
+ border-color: #e7e7e7;
+}
+.navbar-default .navbar-brand {
+ color: #777;
+}
+.navbar-default .navbar-brand:hover,
+.navbar-default .navbar-brand:focus {
+ color: #5e5e5e;
+ background-color: transparent;
+}
+.navbar-default .navbar-text {
+ color: #777;
+}
+.navbar-default .navbar-nav > li > a {
+ color: #777;
+}
+.navbar-default .navbar-nav > li > a:hover,
+.navbar-default .navbar-nav > li > a:focus {
+ color: #333;
+ background-color: transparent;
+}
+.navbar-default .navbar-nav > .active > a,
+.navbar-default .navbar-nav > .active > a:hover,
+.navbar-default .navbar-nav > .active > a:focus {
+ color: #555;
+ background-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .disabled > a,
+.navbar-default .navbar-nav > .disabled > a:hover,
+.navbar-default .navbar-nav > .disabled > a:focus {
+ color: #ccc;
+ background-color: transparent;
+}
+.navbar-default .navbar-toggle {
+ border-color: #ddd;
+}
+.navbar-default .navbar-toggle:hover,
+.navbar-default .navbar-toggle:focus {
+ background-color: #ddd;
+}
+.navbar-default .navbar-toggle .icon-bar {
+ background-color: #888;
+}
+.navbar-default .navbar-collapse,
+.navbar-default .navbar-form {
+ border-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .open > a,
+.navbar-default .navbar-nav > .open > a:hover,
+.navbar-default .navbar-nav > .open > a:focus {
+ color: #555;
+ background-color: #e7e7e7;
+}
+@media (max-width: 767px) {
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a {
+ color: #777;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
+ color: #333;
+ background-color: transparent;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a,
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
+ color: #555;
+ background-color: #e7e7e7;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+ color: #ccc;
+ background-color: transparent;
+ }
+}
+.navbar-default .navbar-link {
+ color: #777;
+}
+.navbar-default .navbar-link:hover {
+ color: #333;
+}
+.navbar-default .btn-link {
+ color: #777;
+}
+.navbar-default .btn-link:hover,
+.navbar-default .btn-link:focus {
+ color: #333;
+}
+.navbar-default .btn-link[disabled]:hover,
+fieldset[disabled] .navbar-default .btn-link:hover,
+.navbar-default .btn-link[disabled]:focus,
+fieldset[disabled] .navbar-default .btn-link:focus {
+ color: #ccc;
+}
+.navbar-inverse {
+ background-color: #222;
+ border-color: #080808;
+}
+.navbar-inverse .navbar-brand {
+ color: #9d9d9d;
+}
+.navbar-inverse .navbar-brand:hover,
+.navbar-inverse .navbar-brand:focus {
+ color: #fff;
+ background-color: transparent;
+}
+.navbar-inverse .navbar-text {
+ color: #9d9d9d;
+}
+.navbar-inverse .navbar-nav > li > a {
+ color: #9d9d9d;
+}
+.navbar-inverse .navbar-nav > li > a:hover,
+.navbar-inverse .navbar-nav > li > a:focus {
+ color: #fff;
+ background-color: transparent;
+}
+.navbar-inverse .navbar-nav > .active > a,
+.navbar-inverse .navbar-nav > .active > a:hover,
+.navbar-inverse .navbar-nav > .active > a:focus {
+ color: #fff;
+ background-color: #080808;
+}
+.navbar-inverse .navbar-nav > .disabled > a,
+.navbar-inverse .navbar-nav > .disabled > a:hover,
+.navbar-inverse .navbar-nav > .disabled > a:focus {
+ color: #444;
+ background-color: transparent;
+}
+.navbar-inverse .navbar-toggle {
+ border-color: #333;
+}
+.navbar-inverse .navbar-toggle:hover,
+.navbar-inverse .navbar-toggle:focus {
+ background-color: #333;
+}
+.navbar-inverse .navbar-toggle .icon-bar {
+ background-color: #fff;
+}
+.navbar-inverse .navbar-collapse,
+.navbar-inverse .navbar-form {
+ border-color: #101010;
+}
+.navbar-inverse .navbar-nav > .open > a,
+.navbar-inverse .navbar-nav > .open > a:hover,
+.navbar-inverse .navbar-nav > .open > a:focus {
+ color: #fff;
+ background-color: #080808;
+}
+@media (max-width: 767px) {
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {
+ border-color: #080808;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu .divider {
+ background-color: #080808;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
+ color: #9d9d9d;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {
+ color: #fff;
+ background-color: transparent;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {
+ color: #fff;
+ background-color: #080808;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+ color: #444;
+ background-color: transparent;
+ }
+}
+.navbar-inverse .navbar-link {
+ color: #9d9d9d;
+}
+.navbar-inverse .navbar-link:hover {
+ color: #fff;
+}
+.navbar-inverse .btn-link {
+ color: #9d9d9d;
+}
+.navbar-inverse .btn-link:hover,
+.navbar-inverse .btn-link:focus {
+ color: #fff;
+}
+.navbar-inverse .btn-link[disabled]:hover,
+fieldset[disabled] .navbar-inverse .btn-link:hover,
+.navbar-inverse .btn-link[disabled]:focus,
+fieldset[disabled] .navbar-inverse .btn-link:focus {
+ color: #444;
+}
+.breadcrumb {
+ padding: 8px 15px;
+ margin-bottom: 20px;
+ list-style: none;
+ background-color: #f5f5f5;
+ border-radius: 4px;
+}
+.breadcrumb > li {
+ display: inline-block;
+}
+.breadcrumb > li + li:before {
+ padding: 0 5px;
+ color: #ccc;
+ content: "/\00a0";
+}
+.breadcrumb > .active {
+ color: #777;
+}
+.pagination {
+ display: inline-block;
+ padding-left: 0;
+ margin: 20px 0;
+ border-radius: 4px;
+}
+.pagination > li {
+ display: inline;
+}
+.pagination > li > a,
+.pagination > li > span {
+ position: relative;
+ float: left;
+ padding: 6px 12px;
+ margin-left: -1px;
+ line-height: 1.42857143;
+ color: #337ab7;
+ text-decoration: none;
+ background-color: #fff;
+ border: 1px solid #ddd;
+}
+.pagination > li:first-child > a,
+.pagination > li:first-child > span {
+ margin-left: 0;
+ border-top-left-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+.pagination > li:last-child > a,
+.pagination > li:last-child > span {
+ border-top-right-radius: 4px;
+ border-bottom-right-radius: 4px;
+}
+.pagination > li > a:hover,
+.pagination > li > span:hover,
+.pagination > li > a:focus,
+.pagination > li > span:focus {
+ z-index: 2;
+ color: #23527c;
+ background-color: #eee;
+ border-color: #ddd;
+}
+.pagination > .active > a,
+.pagination > .active > span,
+.pagination > .active > a:hover,
+.pagination > .active > span:hover,
+.pagination > .active > a:focus,
+.pagination > .active > span:focus {
+ z-index: 3;
+ color: #fff;
+ cursor: default;
+ background-color: #337ab7;
+ border-color: #337ab7;
+}
+.pagination > .disabled > span,
+.pagination > .disabled > span:hover,
+.pagination > .disabled > span:focus,
+.pagination > .disabled > a,
+.pagination > .disabled > a:hover,
+.pagination > .disabled > a:focus {
+ color: #777;
+ cursor: not-allowed;
+ background-color: #fff;
+ border-color: #ddd;
+}
+.pagination-lg > li > a,
+.pagination-lg > li > span {
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+}
+.pagination-lg > li:first-child > a,
+.pagination-lg > li:first-child > span {
+ border-top-left-radius: 6px;
+ border-bottom-left-radius: 6px;
+}
+.pagination-lg > li:last-child > a,
+.pagination-lg > li:last-child > span {
+ border-top-right-radius: 6px;
+ border-bottom-right-radius: 6px;
+}
+.pagination-sm > li > a,
+.pagination-sm > li > span {
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+}
+.pagination-sm > li:first-child > a,
+.pagination-sm > li:first-child > span {
+ border-top-left-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.pagination-sm > li:last-child > a,
+.pagination-sm > li:last-child > span {
+ border-top-right-radius: 3px;
+ border-bottom-right-radius: 3px;
+}
+.pager {
+ padding-left: 0;
+ margin: 20px 0;
+ text-align: center;
+ list-style: none;
+}
+.pager li {
+ display: inline;
+}
+.pager li > a,
+.pager li > span {
+ display: inline-block;
+ padding: 5px 14px;
+ background-color: #fff;
+ border: 1px solid #ddd;
+ border-radius: 15px;
+}
+.pager li > a:hover,
+.pager li > a:focus {
+ text-decoration: none;
+ background-color: #eee;
+}
+.pager .next > a,
+.pager .next > span {
+ float: right;
+}
+.pager .previous > a,
+.pager .previous > span {
+ float: left;
+}
+.pager .disabled > a,
+.pager .disabled > a:hover,
+.pager .disabled > a:focus,
+.pager .disabled > span {
+ color: #777;
+ cursor: not-allowed;
+ background-color: #fff;
+}
+.label {
+ display: inline;
+ padding: .2em .6em .3em;
+ font-size: 75%;
+ font-weight: bold;
+ line-height: 1;
+ color: #fff;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: baseline;
+ border-radius: .25em;
+}
+a.label:hover,
+a.label:focus {
+ color: #fff;
+ text-decoration: none;
+ cursor: pointer;
+}
+.label:empty {
+ display: none;
+}
+.btn .label {
+ position: relative;
+ top: -1px;
+}
+.label-default {
+ background-color: #777;
+}
+.label-default[href]:hover,
+.label-default[href]:focus {
+ background-color: #5e5e5e;
+}
+.label-primary {
+ background-color: #337ab7;
+}
+.label-primary[href]:hover,
+.label-primary[href]:focus {
+ background-color: #286090;
+}
+.label-success {
+ background-color: #5cb85c;
+}
+.label-success[href]:hover,
+.label-success[href]:focus {
+ background-color: #449d44;
+}
+.label-info {
+ background-color: #5bc0de;
+}
+.label-info[href]:hover,
+.label-info[href]:focus {
+ background-color: #31b0d5;
+}
+.label-warning {
+ background-color: #f0ad4e;
+}
+.label-warning[href]:hover,
+.label-warning[href]:focus {
+ background-color: #ec971f;
+}
+.label-danger {
+ background-color: #d9534f;
+}
+.label-danger[href]:hover,
+.label-danger[href]:focus {
+ background-color: #c9302c;
+}
+.badge {
+ display: inline-block;
+ min-width: 10px;
+ padding: 3px 7px;
+ font-size: 12px;
+ font-weight: bold;
+ line-height: 1;
+ color: #fff;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: middle;
+ background-color: #777;
+ border-radius: 10px;
+}
+.badge:empty {
+ display: none;
+}
+.btn .badge {
+ position: relative;
+ top: -1px;
+}
+.btn-xs .badge,
+.btn-group-xs > .btn .badge {
+ top: 0;
+ padding: 1px 5px;
+}
+a.badge:hover,
+a.badge:focus {
+ color: #fff;
+ text-decoration: none;
+ cursor: pointer;
+}
+.list-group-item.active > .badge,
+.nav-pills > .active > a > .badge {
+ color: #337ab7;
+ background-color: #fff;
+}
+.list-group-item > .badge {
+ float: right;
+}
+.list-group-item > .badge + .badge {
+ margin-right: 5px;
+}
+.nav-pills > li > a > .badge {
+ margin-left: 3px;
+}
+.jumbotron {
+ padding-top: 30px;
+ padding-bottom: 30px;
+ margin-bottom: 30px;
+ color: inherit;
+ background-color: #eee;
+}
+.jumbotron h1,
+.jumbotron .h1 {
+ color: inherit;
+}
+.jumbotron p {
+ margin-bottom: 15px;
+ font-size: 21px;
+ font-weight: 200;
+}
+.jumbotron > hr {
+ border-top-color: #d5d5d5;
+}
+.container .jumbotron,
+.container-fluid .jumbotron {
+ padding-right: 15px;
+ padding-left: 15px;
+ border-radius: 6px;
+}
+.jumbotron .container {
+ max-width: 100%;
+}
+@media screen and (min-width: 768px) {
+ .jumbotron {
+ padding-top: 48px;
+ padding-bottom: 48px;
+ }
+ .container .jumbotron,
+ .container-fluid .jumbotron {
+ padding-right: 60px;
+ padding-left: 60px;
+ }
+ .jumbotron h1,
+ .jumbotron .h1 {
+ font-size: 63px;
+ }
+}
+.thumbnail {
+ display: block;
+ padding: 4px;
+ margin-bottom: 20px;
+ line-height: 1.42857143;
+ background-color: #fff;
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ -webkit-transition: border .2s ease-in-out;
+ -o-transition: border .2s ease-in-out;
+ transition: border .2s ease-in-out;
+}
+.thumbnail > img,
+.thumbnail a > img {
+ margin-right: auto;
+ margin-left: auto;
+}
+a.thumbnail:hover,
+a.thumbnail:focus,
+a.thumbnail.active {
+ border-color: #337ab7;
+}
+.thumbnail .caption {
+ padding: 9px;
+ color: #333;
+}
+.alert {
+ padding: 15px;
+ margin-bottom: 20px;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+.alert h4 {
+ margin-top: 0;
+ color: inherit;
+}
+.alert .alert-link {
+ font-weight: bold;
+}
+.alert > p,
+.alert > ul {
+ margin-bottom: 0;
+}
+.alert > p + p {
+ margin-top: 5px;
+}
+.alert-dismissable,
+.alert-dismissible {
+ padding-right: 35px;
+}
+.alert-dismissable .close,
+.alert-dismissible .close {
+ position: relative;
+ top: -2px;
+ right: -21px;
+ color: inherit;
+}
+.alert-success {
+ color: #3c763d;
+ background-color: #dff0d8;
+ border-color: #d6e9c6;
+}
+.alert-success hr {
+ border-top-color: #c9e2b3;
+}
+.alert-success .alert-link {
+ color: #2b542c;
+}
+.alert-info {
+ color: #31708f;
+ background-color: #d9edf7;
+ border-color: #bce8f1;
+}
+.alert-info hr {
+ border-top-color: #a6e1ec;
+}
+.alert-info .alert-link {
+ color: #245269;
+}
+.alert-warning {
+ color: #8a6d3b;
+ background-color: #fcf8e3;
+ border-color: #faebcc;
+}
+.alert-warning hr {
+ border-top-color: #f7e1b5;
+}
+.alert-warning .alert-link {
+ color: #66512c;
+}
+.alert-danger {
+ color: #a94442;
+ background-color: #f2dede;
+ border-color: #ebccd1;
+}
+.alert-danger hr {
+ border-top-color: #e4b9c0;
+}
+.alert-danger .alert-link {
+ color: #843534;
+}
+@-webkit-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+@-o-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+@keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+.progress {
+ height: 20px;
+ margin-bottom: 20px;
+ overflow: hidden;
+ background-color: #f5f5f5;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+ box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+}
+.progress-bar {
+ float: left;
+ width: 0;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ color: #fff;
+ text-align: center;
+ background-color: #337ab7;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+ -webkit-transition: width .6s ease;
+ -o-transition: width .6s ease;
+ transition: width .6s ease;
+}
+.progress-striped .progress-bar,
+.progress-bar-striped {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ -webkit-background-size: 40px 40px;
+ background-size: 40px 40px;
+}
+.progress.active .progress-bar,
+.progress-bar.active {
+ -webkit-animation: progress-bar-stripes 2s linear infinite;
+ -o-animation: progress-bar-stripes 2s linear infinite;
+ animation: progress-bar-stripes 2s linear infinite;
+}
+.progress-bar-success {
+ background-color: #5cb85c;
+}
+.progress-striped .progress-bar-success {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-info {
+ background-color: #5bc0de;
+}
+.progress-striped .progress-bar-info {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-warning {
+ background-color: #f0ad4e;
+}
+.progress-striped .progress-bar-warning {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-danger {
+ background-color: #d9534f;
+}
+.progress-striped .progress-bar-danger {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.media {
+ margin-top: 15px;
+}
+.media:first-child {
+ margin-top: 0;
+}
+.media,
+.media-body {
+ overflow: hidden;
+ zoom: 1;
+}
+.media-body {
+ width: 10000px;
+}
+.media-object {
+ display: block;
+}
+.media-object.img-thumbnail {
+ max-width: none;
+}
+.media-right,
+.media > .pull-right {
+ padding-left: 10px;
+}
+.media-left,
+.media > .pull-left {
+ padding-right: 10px;
+}
+.media-left,
+.media-right,
+.media-body {
+ display: table-cell;
+ vertical-align: top;
+}
+.media-middle {
+ vertical-align: middle;
+}
+.media-bottom {
+ vertical-align: bottom;
+}
+.media-heading {
+ margin-top: 0;
+ margin-bottom: 5px;
+}
+.media-list {
+ padding-left: 0;
+ list-style: none;
+}
+.list-group {
+ padding-left: 0;
+ margin-bottom: 20px;
+}
+.list-group-item {
+ position: relative;
+ display: block;
+ padding: 10px 15px;
+ margin-bottom: -1px;
+ background-color: #fff;
+ border: 1px solid #ddd;
+}
+.list-group-item:first-child {
+ border-top-left-radius: 4px;
+ border-top-right-radius: 4px;
+}
+.list-group-item:last-child {
+ margin-bottom: 0;
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+a.list-group-item,
+button.list-group-item {
+ color: #555;
+}
+a.list-group-item .list-group-item-heading,
+button.list-group-item .list-group-item-heading {
+ color: #333;
+}
+a.list-group-item:hover,
+button.list-group-item:hover,
+a.list-group-item:focus,
+button.list-group-item:focus {
+ color: #555;
+ text-decoration: none;
+ background-color: #f5f5f5;
+}
+button.list-group-item {
+ width: 100%;
+ text-align: left;
+}
+.list-group-item.disabled,
+.list-group-item.disabled:hover,
+.list-group-item.disabled:focus {
+ color: #777;
+ cursor: not-allowed;
+ background-color: #eee;
+}
+.list-group-item.disabled .list-group-item-heading,
+.list-group-item.disabled:hover .list-group-item-heading,
+.list-group-item.disabled:focus .list-group-item-heading {
+ color: inherit;
+}
+.list-group-item.disabled .list-group-item-text,
+.list-group-item.disabled:hover .list-group-item-text,
+.list-group-item.disabled:focus .list-group-item-text {
+ color: #777;
+}
+.list-group-item.active,
+.list-group-item.active:hover,
+.list-group-item.active:focus {
+ z-index: 2;
+ color: #fff;
+ background-color: #337ab7;
+ border-color: #337ab7;
+}
+.list-group-item.active .list-group-item-heading,
+.list-group-item.active:hover .list-group-item-heading,
+.list-group-item.active:focus .list-group-item-heading,
+.list-group-item.active .list-group-item-heading > small,
+.list-group-item.active:hover .list-group-item-heading > small,
+.list-group-item.active:focus .list-group-item-heading > small,
+.list-group-item.active .list-group-item-heading > .small,
+.list-group-item.active:hover .list-group-item-heading > .small,
+.list-group-item.active:focus .list-group-item-heading > .small {
+ color: inherit;
+}
+.list-group-item.active .list-group-item-text,
+.list-group-item.active:hover .list-group-item-text,
+.list-group-item.active:focus .list-group-item-text {
+ color: #c7ddef;
+}
+.list-group-item-success {
+ color: #3c763d;
+ background-color: #dff0d8;
+}
+a.list-group-item-success,
+button.list-group-item-success {
+ color: #3c763d;
+}
+a.list-group-item-success .list-group-item-heading,
+button.list-group-item-success .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-success:hover,
+button.list-group-item-success:hover,
+a.list-group-item-success:focus,
+button.list-group-item-success:focus {
+ color: #3c763d;
+ background-color: #d0e9c6;
+}
+a.list-group-item-success.active,
+button.list-group-item-success.active,
+a.list-group-item-success.active:hover,
+button.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus,
+button.list-group-item-success.active:focus {
+ color: #fff;
+ background-color: #3c763d;
+ border-color: #3c763d;
+}
+.list-group-item-info {
+ color: #31708f;
+ background-color: #d9edf7;
+}
+a.list-group-item-info,
+button.list-group-item-info {
+ color: #31708f;
+}
+a.list-group-item-info .list-group-item-heading,
+button.list-group-item-info .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-info:hover,
+button.list-group-item-info:hover,
+a.list-group-item-info:focus,
+button.list-group-item-info:focus {
+ color: #31708f;
+ background-color: #c4e3f3;
+}
+a.list-group-item-info.active,
+button.list-group-item-info.active,
+a.list-group-item-info.active:hover,
+button.list-group-item-info.active:hover,
+a.list-group-item-info.active:focus,
+button.list-group-item-info.active:focus {
+ color: #fff;
+ background-color: #31708f;
+ border-color: #31708f;
+}
+.list-group-item-warning {
+ color: #8a6d3b;
+ background-color: #fcf8e3;
+}
+a.list-group-item-warning,
+button.list-group-item-warning {
+ color: #8a6d3b;
+}
+a.list-group-item-warning .list-group-item-heading,
+button.list-group-item-warning .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-warning:hover,
+button.list-group-item-warning:hover,
+a.list-group-item-warning:focus,
+button.list-group-item-warning:focus {
+ color: #8a6d3b;
+ background-color: #faf2cc;
+}
+a.list-group-item-warning.active,
+button.list-group-item-warning.active,
+a.list-group-item-warning.active:hover,
+button.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus,
+button.list-group-item-warning.active:focus {
+ color: #fff;
+ background-color: #8a6d3b;
+ border-color: #8a6d3b;
+}
+.list-group-item-danger {
+ color: #a94442;
+ background-color: #f2dede;
+}
+a.list-group-item-danger,
+button.list-group-item-danger {
+ color: #a94442;
+}
+a.list-group-item-danger .list-group-item-heading,
+button.list-group-item-danger .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-danger:hover,
+button.list-group-item-danger:hover,
+a.list-group-item-danger:focus,
+button.list-group-item-danger:focus {
+ color: #a94442;
+ background-color: #ebcccc;
+}
+a.list-group-item-danger.active,
+button.list-group-item-danger.active,
+a.list-group-item-danger.active:hover,
+button.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus,
+button.list-group-item-danger.active:focus {
+ color: #fff;
+ background-color: #a94442;
+ border-color: #a94442;
+}
+.list-group-item-heading {
+ margin-top: 0;
+ margin-bottom: 5px;
+}
+.list-group-item-text {
+ margin-bottom: 0;
+ line-height: 1.3;
+}
+.panel {
+ margin-bottom: 20px;
+ background-color: #fff;
+ border: 1px solid transparent;
+ border-radius: 4px;
+ -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+ box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+}
+.panel-body {
+ padding: 15px;
+}
+.panel-heading {
+ padding: 10px 15px;
+ border-bottom: 1px solid transparent;
+ border-top-left-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.panel-heading > .dropdown .dropdown-toggle {
+ color: inherit;
+}
+.panel-title {
+ margin-top: 0;
+ margin-bottom: 0;
+ font-size: 16px;
+ color: inherit;
+}
+.panel-title > a,
+.panel-title > small,
+.panel-title > .small,
+.panel-title > small > a,
+.panel-title > .small > a {
+ color: inherit;
+}
+.panel-footer {
+ padding: 10px 15px;
+ background-color: #f5f5f5;
+ border-top: 1px solid #ddd;
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .list-group,
+.panel > .panel-collapse > .list-group {
+ margin-bottom: 0;
+}
+.panel > .list-group .list-group-item,
+.panel > .panel-collapse > .list-group .list-group-item {
+ border-width: 1px 0;
+ border-radius: 0;
+}
+.panel > .list-group:first-child .list-group-item:first-child,
+.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {
+ border-top: 0;
+ border-top-left-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.panel > .list-group:last-child .list-group-item:last-child,
+.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {
+ border-bottom: 0;
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+}
+.panel-heading + .list-group .list-group-item:first-child {
+ border-top-width: 0;
+}
+.list-group + .panel-footer {
+ border-top-width: 0;
+}
+.panel > .table,
+.panel > .table-responsive > .table,
+.panel > .panel-collapse > .table {
+ margin-bottom: 0;
+}
+.panel > .table caption,
+.panel > .table-responsive > .table caption,
+.panel > .panel-collapse > .table caption {
+ padding-right: 15px;
+ padding-left: 15px;
+}
+.panel > .table:first-child,
+.panel > .table-responsive:first-child > .table:first-child {
+ border-top-left-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {
+ border-top-left-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {
+ border-top-left-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {
+ border-top-right-radius: 3px;
+}
+.panel > .table:last-child,
+.panel > .table-responsive:last-child > .table:last-child {
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {
+ border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {
+ border-bottom-right-radius: 3px;
+}
+.panel > .panel-body + .table,
+.panel > .panel-body + .table-responsive,
+.panel > .table + .panel-body,
+.panel > .table-responsive + .panel-body {
+ border-top: 1px solid #ddd;
+}
+.panel > .table > tbody:first-child > tr:first-child th,
+.panel > .table > tbody:first-child > tr:first-child td {
+ border-top: 0;
+}
+.panel > .table-bordered,
+.panel > .table-responsive > .table-bordered {
+ border: 0;
+}
+.panel > .table-bordered > thead > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,
+.panel > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-bordered > thead > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,
+.panel > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-bordered > tfoot > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+ border-left: 0;
+}
+.panel > .table-bordered > thead > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,
+.panel > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-bordered > thead > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,
+.panel > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-bordered > tfoot > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+ border-right: 0;
+}
+.panel > .table-bordered > thead > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,
+.panel > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-bordered > thead > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,
+.panel > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {
+ border-bottom: 0;
+}
+.panel > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {
+ border-bottom: 0;
+}
+.panel > .table-responsive {
+ margin-bottom: 0;
+ border: 0;
+}
+.panel-group {
+ margin-bottom: 20px;
+}
+.panel-group .panel {
+ margin-bottom: 0;
+ border-radius: 4px;
+}
+.panel-group .panel + .panel {
+ margin-top: 5px;
+}
+.panel-group .panel-heading {
+ border-bottom: 0;
+}
+.panel-group .panel-heading + .panel-collapse > .panel-body,
+.panel-group .panel-heading + .panel-collapse > .list-group {
+ border-top: 1px solid #ddd;
+}
+.panel-group .panel-footer {
+ border-top: 0;
+}
+.panel-group .panel-footer + .panel-collapse .panel-body {
+ border-bottom: 1px solid #ddd;
+}
+.panel-default {
+ border-color: #ddd;
+}
+.panel-default > .panel-heading {
+ color: #333;
+ background-color: #f5f5f5;
+ border-color: #ddd;
+}
+.panel-default > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #ddd;
+}
+.panel-default > .panel-heading .badge {
+ color: #f5f5f5;
+ background-color: #333;
+}
+.panel-default > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #ddd;
+}
+.panel-primary {
+ border-color: #337ab7;
+}
+.panel-primary > .panel-heading {
+ color: #fff;
+ background-color: #337ab7;
+ border-color: #337ab7;
+}
+.panel-primary > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #337ab7;
+}
+.panel-primary > .panel-heading .badge {
+ color: #337ab7;
+ background-color: #fff;
+}
+.panel-primary > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #337ab7;
+}
+.panel-success {
+ border-color: #d6e9c6;
+}
+.panel-success > .panel-heading {
+ color: #3c763d;
+ background-color: #dff0d8;
+ border-color: #d6e9c6;
+}
+.panel-success > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #d6e9c6;
+}
+.panel-success > .panel-heading .badge {
+ color: #dff0d8;
+ background-color: #3c763d;
+}
+.panel-success > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #d6e9c6;
+}
+.panel-info {
+ border-color: #bce8f1;
+}
+.panel-info > .panel-heading {
+ color: #31708f;
+ background-color: #d9edf7;
+ border-color: #bce8f1;
+}
+.panel-info > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #bce8f1;
+}
+.panel-info > .panel-heading .badge {
+ color: #d9edf7;
+ background-color: #31708f;
+}
+.panel-info > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #bce8f1;
+}
+.panel-warning {
+ border-color: #faebcc;
+}
+.panel-warning > .panel-heading {
+ color: #8a6d3b;
+ background-color: #fcf8e3;
+ border-color: #faebcc;
+}
+.panel-warning > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #faebcc;
+}
+.panel-warning > .panel-heading .badge {
+ color: #fcf8e3;
+ background-color: #8a6d3b;
+}
+.panel-warning > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #faebcc;
+}
+.panel-danger {
+ border-color: #ebccd1;
+}
+.panel-danger > .panel-heading {
+ color: #a94442;
+ background-color: #f2dede;
+ border-color: #ebccd1;
+}
+.panel-danger > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: #ebccd1;
+}
+.panel-danger > .panel-heading .badge {
+ color: #f2dede;
+ background-color: #a94442;
+}
+.panel-danger > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: #ebccd1;
+}
+.embed-responsive {
+ position: relative;
+ display: block;
+ height: 0;
+ padding: 0;
+ overflow: hidden;
+}
+.embed-responsive .embed-responsive-item,
+.embed-responsive iframe,
+.embed-responsive embed,
+.embed-responsive object,
+.embed-responsive video {
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ border: 0;
+}
+.embed-responsive-16by9 {
+ padding-bottom: 56.25%;
+}
+.embed-responsive-4by3 {
+ padding-bottom: 75%;
+}
+.well {
+ min-height: 20px;
+ padding: 19px;
+ margin-bottom: 20px;
+ background-color: #f5f5f5;
+ border: 1px solid #e3e3e3;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+}
+.well blockquote {
+ border-color: #ddd;
+ border-color: rgba(0, 0, 0, .15);
+}
+.well-lg {
+ padding: 24px;
+ border-radius: 6px;
+}
+.well-sm {
+ padding: 9px;
+ border-radius: 3px;
+}
+.close {
+ float: right;
+ font-size: 21px;
+ font-weight: bold;
+ line-height: 1;
+ color: #000;
+ text-shadow: 0 1px 0 #fff;
+ filter: alpha(opacity=20);
+ opacity: .2;
+}
+.close:hover,
+.close:focus {
+ color: #000;
+ text-decoration: none;
+ cursor: pointer;
+ filter: alpha(opacity=50);
+ opacity: .5;
+}
+button.close {
+ -webkit-appearance: none;
+ padding: 0;
+ cursor: pointer;
+ background: transparent;
+ border: 0;
+}
+.modal-open {
+ overflow: hidden;
+}
+.modal {
+ position: fixed;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 1050;
+ display: none;
+ overflow: hidden;
+ -webkit-overflow-scrolling: touch;
+ outline: 0;
+}
+.modal.fade .modal-dialog {
+ -webkit-transition: -webkit-transform .3s ease-out;
+ -o-transition: -o-transform .3s ease-out;
+ transition: transform .3s ease-out;
+ -webkit-transform: translate(0, -25%);
+ -ms-transform: translate(0, -25%);
+ -o-transform: translate(0, -25%);
+ transform: translate(0, -25%);
+}
+.modal.in .modal-dialog {
+ -webkit-transform: translate(0, 0);
+ -ms-transform: translate(0, 0);
+ -o-transform: translate(0, 0);
+ transform: translate(0, 0);
+}
+.modal-open .modal {
+ overflow-x: hidden;
+ overflow-y: auto;
+}
+.modal-dialog {
+ position: relative;
+ width: auto;
+ margin: 10px;
+}
+.modal-content {
+ position: relative;
+ background-color: #fff;
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ border: 1px solid #999;
+ border: 1px solid rgba(0, 0, 0, .2);
+ border-radius: 6px;
+ outline: 0;
+ -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+ box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+}
+.modal-backdrop {
+ position: fixed;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 1040;
+ background-color: #000;
+}
+.modal-backdrop.fade {
+ filter: alpha(opacity=0);
+ opacity: 0;
+}
+.modal-backdrop.in {
+ filter: alpha(opacity=50);
+ opacity: .5;
+}
+.modal-header {
+ padding: 15px;
+ border-bottom: 1px solid #e5e5e5;
+}
+.modal-header .close {
+ margin-top: -2px;
+}
+.modal-title {
+ margin: 0;
+ line-height: 1.42857143;
+}
+.modal-body {
+ position: relative;
+ padding: 15px;
+}
+.modal-footer {
+ padding: 15px;
+ text-align: right;
+ border-top: 1px solid #e5e5e5;
+}
+.modal-footer .btn + .btn {
+ margin-bottom: 0;
+ margin-left: 5px;
+}
+.modal-footer .btn-group .btn + .btn {
+ margin-left: -1px;
+}
+.modal-footer .btn-block + .btn-block {
+ margin-left: 0;
+}
+.modal-scrollbar-measure {
+ position: absolute;
+ top: -9999px;
+ width: 50px;
+ height: 50px;
+ overflow: scroll;
+}
+@media (min-width: 768px) {
+ .modal-dialog {
+ width: 600px;
+ margin: 30px auto;
+ }
+ .modal-content {
+ -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+ box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+ }
+ .modal-sm {
+ width: 300px;
+ }
+}
+@media (min-width: 992px) {
+ .modal-lg {
+ width: 900px;
+ }
+}
+.tooltip {
+ position: absolute;
+ z-index: 1070;
+ display: block;
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: normal;
+ line-height: 1.42857143;
+ text-align: left;
+ text-align: start;
+ text-decoration: none;
+ text-shadow: none;
+ text-transform: none;
+ letter-spacing: normal;
+ word-break: normal;
+ word-spacing: normal;
+ word-wrap: normal;
+ white-space: normal;
+ filter: alpha(opacity=0);
+ opacity: 0;
+
+ line-break: auto;
+}
+.tooltip.in {
+ filter: alpha(opacity=90);
+ opacity: .9;
+}
+.tooltip.top {
+ padding: 5px 0;
+ margin-top: -3px;
+}
+.tooltip.right {
+ padding: 0 5px;
+ margin-left: 3px;
+}
+.tooltip.bottom {
+ padding: 5px 0;
+ margin-top: 3px;
+}
+.tooltip.left {
+ padding: 0 5px;
+ margin-left: -3px;
+}
+.tooltip-inner {
+ max-width: 200px;
+ padding: 3px 8px;
+ color: #fff;
+ text-align: center;
+ background-color: #000;
+ border-radius: 4px;
+}
+.tooltip-arrow {
+ position: absolute;
+ width: 0;
+ height: 0;
+ border-color: transparent;
+ border-style: solid;
+}
+.tooltip.top .tooltip-arrow {
+ bottom: 0;
+ left: 50%;
+ margin-left: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000;
+}
+.tooltip.top-left .tooltip-arrow {
+ right: 5px;
+ bottom: 0;
+ margin-bottom: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000;
+}
+.tooltip.top-right .tooltip-arrow {
+ bottom: 0;
+ left: 5px;
+ margin-bottom: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000;
+}
+.tooltip.right .tooltip-arrow {
+ top: 50%;
+ left: 0;
+ margin-top: -5px;
+ border-width: 5px 5px 5px 0;
+ border-right-color: #000;
+}
+.tooltip.left .tooltip-arrow {
+ top: 50%;
+ right: 0;
+ margin-top: -5px;
+ border-width: 5px 0 5px 5px;
+ border-left-color: #000;
+}
+.tooltip.bottom .tooltip-arrow {
+ top: 0;
+ left: 50%;
+ margin-left: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000;
+}
+.tooltip.bottom-left .tooltip-arrow {
+ top: 0;
+ right: 5px;
+ margin-top: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000;
+}
+.tooltip.bottom-right .tooltip-arrow {
+ top: 0;
+ left: 5px;
+ margin-top: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000;
+}
+.popover {
+ position: absolute;
+ top: 0;
+ left: 0;
+ z-index: 1060;
+ display: none;
+ max-width: 276px;
+ padding: 1px;
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: normal;
+ line-height: 1.42857143;
+ text-align: left;
+ text-align: start;
+ text-decoration: none;
+ text-shadow: none;
+ text-transform: none;
+ letter-spacing: normal;
+ word-break: normal;
+ word-spacing: normal;
+ word-wrap: normal;
+ white-space: normal;
+ background-color: #fff;
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ border: 1px solid #ccc;
+ border: 1px solid rgba(0, 0, 0, .2);
+ border-radius: 6px;
+ -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+ box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+
+ line-break: auto;
+}
+.popover.top {
+ margin-top: -10px;
+}
+.popover.right {
+ margin-left: 10px;
+}
+.popover.bottom {
+ margin-top: 10px;
+}
+.popover.left {
+ margin-left: -10px;
+}
+.popover-title {
+ padding: 8px 14px;
+ margin: 0;
+ font-size: 14px;
+ background-color: #f7f7f7;
+ border-bottom: 1px solid #ebebeb;
+ border-radius: 5px 5px 0 0;
+}
+.popover-content {
+ padding: 9px 14px;
+}
+.popover > .arrow,
+.popover > .arrow:after {
+ position: absolute;
+ display: block;
+ width: 0;
+ height: 0;
+ border-color: transparent;
+ border-style: solid;
+}
+.popover > .arrow {
+ border-width: 11px;
+}
+.popover > .arrow:after {
+ content: "";
+ border-width: 10px;
+}
+.popover.top > .arrow {
+ bottom: -11px;
+ left: 50%;
+ margin-left: -11px;
+ border-top-color: #999;
+ border-top-color: rgba(0, 0, 0, .25);
+ border-bottom-width: 0;
+}
+.popover.top > .arrow:after {
+ bottom: 1px;
+ margin-left: -10px;
+ content: " ";
+ border-top-color: #fff;
+ border-bottom-width: 0;
+}
+.popover.right > .arrow {
+ top: 50%;
+ left: -11px;
+ margin-top: -11px;
+ border-right-color: #999;
+ border-right-color: rgba(0, 0, 0, .25);
+ border-left-width: 0;
+}
+.popover.right > .arrow:after {
+ bottom: -10px;
+ left: 1px;
+ content: " ";
+ border-right-color: #fff;
+ border-left-width: 0;
+}
+.popover.bottom > .arrow {
+ top: -11px;
+ left: 50%;
+ margin-left: -11px;
+ border-top-width: 0;
+ border-bottom-color: #999;
+ border-bottom-color: rgba(0, 0, 0, .25);
+}
+.popover.bottom > .arrow:after {
+ top: 1px;
+ margin-left: -10px;
+ content: " ";
+ border-top-width: 0;
+ border-bottom-color: #fff;
+}
+.popover.left > .arrow {
+ top: 50%;
+ right: -11px;
+ margin-top: -11px;
+ border-right-width: 0;
+ border-left-color: #999;
+ border-left-color: rgba(0, 0, 0, .25);
+}
+.popover.left > .arrow:after {
+ right: 1px;
+ bottom: -10px;
+ content: " ";
+ border-right-width: 0;
+ border-left-color: #fff;
+}
+.carousel {
+ position: relative;
+}
+.carousel-inner {
+ position: relative;
+ width: 100%;
+ overflow: hidden;
+}
+.carousel-inner > .item {
+ position: relative;
+ display: none;
+ -webkit-transition: .6s ease-in-out left;
+ -o-transition: .6s ease-in-out left;
+ transition: .6s ease-in-out left;
+}
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+ line-height: 1;
+}
+@media all and (transform-3d), (-webkit-transform-3d) {
+ .carousel-inner > .item {
+ -webkit-transition: -webkit-transform .6s ease-in-out;
+ -o-transition: -o-transform .6s ease-in-out;
+ transition: transform .6s ease-in-out;
+
+ -webkit-backface-visibility: hidden;
+ backface-visibility: hidden;
+ -webkit-perspective: 1000px;
+ perspective: 1000px;
+ }
+ .carousel-inner > .item.next,
+ .carousel-inner > .item.active.right {
+ left: 0;
+ -webkit-transform: translate3d(100%, 0, 0);
+ transform: translate3d(100%, 0, 0);
+ }
+ .carousel-inner > .item.prev,
+ .carousel-inner > .item.active.left {
+ left: 0;
+ -webkit-transform: translate3d(-100%, 0, 0);
+ transform: translate3d(-100%, 0, 0);
+ }
+ .carousel-inner > .item.next.left,
+ .carousel-inner > .item.prev.right,
+ .carousel-inner > .item.active {
+ left: 0;
+ -webkit-transform: translate3d(0, 0, 0);
+ transform: translate3d(0, 0, 0);
+ }
+}
+.carousel-inner > .active,
+.carousel-inner > .next,
+.carousel-inner > .prev {
+ display: block;
+}
+.carousel-inner > .active {
+ left: 0;
+}
+.carousel-inner > .next,
+.carousel-inner > .prev {
+ position: absolute;
+ top: 0;
+ width: 100%;
+}
+.carousel-inner > .next {
+ left: 100%;
+}
+.carousel-inner > .prev {
+ left: -100%;
+}
+.carousel-inner > .next.left,
+.carousel-inner > .prev.right {
+ left: 0;
+}
+.carousel-inner > .active.left {
+ left: -100%;
+}
+.carousel-inner > .active.right {
+ left: 100%;
+}
+.carousel-control {
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ left: 0;
+ width: 15%;
+ font-size: 20px;
+ color: #fff;
+ text-align: center;
+ text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+ background-color: rgba(0, 0, 0, 0);
+ filter: alpha(opacity=50);
+ opacity: .5;
+}
+.carousel-control.left {
+ background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);
+ background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);
+ background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001)));
+ background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);
+ background-repeat: repeat-x;
+}
+.carousel-control.right {
+ right: 0;
+ left: auto;
+ background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);
+ background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);
+ background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5)));
+ background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);
+ background-repeat: repeat-x;
+}
+.carousel-control:hover,
+.carousel-control:focus {
+ color: #fff;
+ text-decoration: none;
+ filter: alpha(opacity=90);
+ outline: 0;
+ opacity: .9;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-left,
+.carousel-control .glyphicon-chevron-right {
+ position: absolute;
+ top: 50%;
+ z-index: 5;
+ display: inline-block;
+ margin-top: -10px;
+}
+.carousel-control .icon-prev,
+.carousel-control .glyphicon-chevron-left {
+ left: 50%;
+ margin-left: -10px;
+}
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-right {
+ right: 50%;
+ margin-right: -10px;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next {
+ width: 20px;
+ height: 20px;
+ font-family: serif;
+ line-height: 1;
+}
+.carousel-control .icon-prev:before {
+ content: '\2039';
+}
+.carousel-control .icon-next:before {
+ content: '\203a';
+}
+.carousel-indicators {
+ position: absolute;
+ bottom: 10px;
+ left: 50%;
+ z-index: 15;
+ width: 60%;
+ padding-left: 0;
+ margin-left: -30%;
+ text-align: center;
+ list-style: none;
+}
+.carousel-indicators li {
+ display: inline-block;
+ width: 10px;
+ height: 10px;
+ margin: 1px;
+ text-indent: -999px;
+ cursor: pointer;
+ background-color: #000 \9;
+ background-color: rgba(0, 0, 0, 0);
+ border: 1px solid #fff;
+ border-radius: 10px;
+}
+.carousel-indicators .active {
+ width: 12px;
+ height: 12px;
+ margin: 0;
+ background-color: #fff;
+}
+.carousel-caption {
+ position: absolute;
+ right: 15%;
+ bottom: 20px;
+ left: 15%;
+ z-index: 10;
+ padding-top: 20px;
+ padding-bottom: 20px;
+ color: #fff;
+ text-align: center;
+ text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+}
+.carousel-caption .btn {
+ text-shadow: none;
+}
+@media screen and (min-width: 768px) {
+ .carousel-control .glyphicon-chevron-left,
+ .carousel-control .glyphicon-chevron-right,
+ .carousel-control .icon-prev,
+ .carousel-control .icon-next {
+ width: 30px;
+ height: 30px;
+ margin-top: -10px;
+ font-size: 30px;
+ }
+ .carousel-control .glyphicon-chevron-left,
+ .carousel-control .icon-prev {
+ margin-left: -10px;
+ }
+ .carousel-control .glyphicon-chevron-right,
+ .carousel-control .icon-next {
+ margin-right: -10px;
+ }
+ .carousel-caption {
+ right: 20%;
+ left: 20%;
+ padding-bottom: 30px;
+ }
+ .carousel-indicators {
+ bottom: 20px;
+ }
+}
+.clearfix:before,
+.clearfix:after,
+.dl-horizontal dd:before,
+.dl-horizontal dd:after,
+.container:before,
+.container:after,
+.container-fluid:before,
+.container-fluid:after,
+.row:before,
+.row:after,
+.form-horizontal .form-group:before,
+.form-horizontal .form-group:after,
+.btn-toolbar:before,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:before,
+.btn-group-vertical > .btn-group:after,
+.nav:before,
+.nav:after,
+.navbar:before,
+.navbar:after,
+.navbar-header:before,
+.navbar-header:after,
+.navbar-collapse:before,
+.navbar-collapse:after,
+.pager:before,
+.pager:after,
+.panel-body:before,
+.panel-body:after,
+.modal-header:before,
+.modal-header:after,
+.modal-footer:before,
+.modal-footer:after {
+ display: table;
+ content: " ";
+}
+.clearfix:after,
+.dl-horizontal dd:after,
+.container:after,
+.container-fluid:after,
+.row:after,
+.form-horizontal .form-group:after,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:after,
+.nav:after,
+.navbar:after,
+.navbar-header:after,
+.navbar-collapse:after,
+.pager:after,
+.panel-body:after,
+.modal-header:after,
+.modal-footer:after {
+ clear: both;
+}
+.center-block {
+ display: block;
+ margin-right: auto;
+ margin-left: auto;
+}
+.pull-right {
+ float: right !important;
+}
+.pull-left {
+ float: left !important;
+}
+.hide {
+ display: none !important;
+}
+.show {
+ display: block !important;
+}
+.invisible {
+ visibility: hidden;
+}
+.text-hide {
+ font: 0/0 a;
+ color: transparent;
+ text-shadow: none;
+ background-color: transparent;
+ border: 0;
+}
+.hidden {
+ display: none !important;
+}
+.affix {
+ position: fixed;
+}
+@-ms-viewport {
+ width: device-width;
+}
+.visible-xs,
+.visible-sm,
+.visible-md,
+.visible-lg {
+ display: none !important;
+}
+.visible-xs-block,
+.visible-xs-inline,
+.visible-xs-inline-block,
+.visible-sm-block,
+.visible-sm-inline,
+.visible-sm-inline-block,
+.visible-md-block,
+.visible-md-inline,
+.visible-md-inline-block,
+.visible-lg-block,
+.visible-lg-inline,
+.visible-lg-inline-block {
+ display: none !important;
+}
+@media (max-width: 767px) {
+ .visible-xs {
+ display: block !important;
+ }
+ table.visible-xs {
+ display: table !important;
+ }
+ tr.visible-xs {
+ display: table-row !important;
+ }
+ th.visible-xs,
+ td.visible-xs {
+ display: table-cell !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-block {
+ display: block !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-inline {
+ display: inline !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm {
+ display: block !important;
+ }
+ table.visible-sm {
+ display: table !important;
+ }
+ tr.visible-sm {
+ display: table-row !important;
+ }
+ th.visible-sm,
+ td.visible-sm {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-block {
+ display: block !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md {
+ display: block !important;
+ }
+ table.visible-md {
+ display: table !important;
+ }
+ tr.visible-md {
+ display: table-row !important;
+ }
+ th.visible-md,
+ td.visible-md {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-block {
+ display: block !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg {
+ display: block !important;
+ }
+ table.visible-lg {
+ display: table !important;
+ }
+ tr.visible-lg {
+ display: table-row !important;
+ }
+ th.visible-lg,
+ td.visible-lg {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-block {
+ display: block !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (max-width: 767px) {
+ .hidden-xs {
+ display: none !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .hidden-sm {
+ display: none !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .hidden-md {
+ display: none !important;
+ }
+}
+@media (min-width: 1200px) {
+ .hidden-lg {
+ display: none !important;
+ }
+}
+.visible-print {
+ display: none !important;
+}
+@media print {
+ .visible-print {
+ display: block !important;
+ }
+ table.visible-print {
+ display: table !important;
+ }
+ tr.visible-print {
+ display: table-row !important;
+ }
+ th.visible-print,
+ td.visible-print {
+ display: table-cell !important;
+ }
+}
+.visible-print-block {
+ display: none !important;
+}
+@media print {
+ .visible-print-block {
+ display: block !important;
+ }
+}
+.visible-print-inline {
+ display: none !important;
+}
+@media print {
+ .visible-print-inline {
+ display: inline !important;
+ }
+}
+.visible-print-inline-block {
+ display: none !important;
+}
+@media print {
+ .visible-print-inline-block {
+ display: inline-block !important;
+ }
+}
+@media print {
+ .hidden-print {
+ display: none !important;
+ }
+}
+/*# sourceMappingURL=bootstrap.css.map */
diff --git a/web/gui/css/bootstrap-slate-flat-3.3.7.css b/web/gui/css/bootstrap-slate-flat-3.3.7.css
new file mode 100644
index 0000000..7ce384f
--- /dev/null
+++ b/web/gui/css/bootstrap-slate-flat-3.3.7.css
@@ -0,0 +1,7101 @@
+/*!
+ * bootswatch v3.3.7
+ * Homepage: http://bootswatch.com
+ * Copyright 2012-2016 Thomas Park
+ * Licensed under MIT
+ * SPDX-License-Identifier: MIT
+ * Based on Bootstrap
+*/
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */
+html {
+ font-family: sans-serif;
+ -ms-text-size-adjust: 100%;
+ -webkit-text-size-adjust: 100%;
+}
+body {
+ margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+menu,
+nav,
+section,
+summary {
+ display: block;
+}
+audio,
+canvas,
+progress,
+video {
+ display: inline-block;
+ vertical-align: baseline;
+}
+audio:not([controls]) {
+ display: none;
+ height: 0;
+}
+[hidden],
+template {
+ display: none;
+}
+a {
+ background-color: transparent;
+}
+a:active,
+a:hover {
+ outline: 0;
+}
+abbr[title] {
+ border-bottom: 1px dotted;
+}
+b,
+strong {
+ font-weight: bold;
+}
+dfn {
+ font-style: italic;
+}
+h1 {
+ font-size: 2em;
+ margin: 0.67em 0;
+}
+mark {
+ background: #ff0;
+ color: #000;
+}
+small {
+ font-size: 80%;
+}
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline;
+}
+sup {
+ top: -0.5em;
+}
+sub {
+ bottom: -0.25em;
+}
+img {
+ border: 0;
+}
+svg:not(:root) {
+ overflow: hidden;
+}
+figure {
+ margin: 1em 40px;
+}
+hr {
+ -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+ height: 0;
+}
+pre {
+ overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+ font-family: monospace, monospace;
+ font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+ color: inherit;
+ font: inherit;
+ margin: 0;
+}
+button {
+ overflow: visible;
+}
+button,
+select {
+ text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+ -webkit-appearance: button;
+ cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+ cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+ border: 0;
+ padding: 0;
+}
+input {
+ line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+ padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+ height: auto;
+}
+input[type="search"] {
+ -webkit-appearance: textfield;
+ -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+ -webkit-appearance: none;
+}
+fieldset {
+ border: 1px solid #c0c0c0;
+ margin: 0 2px;
+ padding: 0.35em 0.625em 0.75em;
+}
+legend {
+ border: 0;
+ padding: 0;
+}
+textarea {
+ overflow: auto;
+}
+optgroup {
+ font-weight: bold;
+}
+table {
+ border-collapse: collapse;
+ border-spacing: 0;
+}
+td,
+th {
+ padding: 0;
+}
+/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */
+@media print {
+ *,
+ *:before,
+ *:after {
+ background: transparent !important;
+ color: #000 !important;
+ -webkit-box-shadow: none !important;
+ box-shadow: none !important;
+ text-shadow: none !important;
+ }
+ a,
+ a:visited {
+ text-decoration: underline;
+ }
+ a[href]:after {
+ content: " (" attr(href) ")";
+ }
+ abbr[title]:after {
+ content: " (" attr(title) ")";
+ }
+ a[href^="#"]:after,
+ a[href^="javascript:"]:after {
+ content: "";
+ }
+ pre,
+ blockquote {
+ border: 1px solid #999;
+ page-break-inside: avoid;
+ }
+ thead {
+ display: table-header-group;
+ }
+ tr,
+ img {
+ page-break-inside: avoid;
+ }
+ img {
+ max-width: 100% !important;
+ }
+ p,
+ h2,
+ h3 {
+ orphans: 3;
+ widows: 3;
+ }
+ h2,
+ h3 {
+ page-break-after: avoid;
+ }
+ .navbar {
+ display: none;
+ }
+ .btn > .caret,
+ .dropup > .btn > .caret {
+ border-top-color: #000 !important;
+ }
+ .label {
+ border: 1px solid #000;
+ }
+ .table {
+ border-collapse: collapse !important;
+ }
+ .table td,
+ .table th {
+ background-color: #fff !important;
+ }
+ .table-bordered th,
+ .table-bordered td {
+ border: 1px solid #ddd !important;
+ }
+}
+@font-face {
+ font-family: 'Glyphicons Halflings';
+ src: url('../fonts/glyphicons-halflings-regular.eot');
+ src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+}
+.glyphicon {
+ position: relative;
+ top: 1px;
+ display: inline-block;
+ font-family: 'Glyphicons Halflings';
+ font-style: normal;
+ font-weight: normal;
+ line-height: 1;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+.glyphicon-asterisk:before {
+ content: "\002a";
+}
+.glyphicon-plus:before {
+ content: "\002b";
+}
+.glyphicon-euro:before,
+.glyphicon-eur:before {
+ content: "\20ac";
+}
+.glyphicon-minus:before {
+ content: "\2212";
+}
+.glyphicon-cloud:before {
+ content: "\2601";
+}
+.glyphicon-envelope:before {
+ content: "\2709";
+}
+.glyphicon-pencil:before {
+ content: "\270f";
+}
+.glyphicon-glass:before {
+ content: "\e001";
+}
+.glyphicon-music:before {
+ content: "\e002";
+}
+.glyphicon-search:before {
+ content: "\e003";
+}
+.glyphicon-heart:before {
+ content: "\e005";
+}
+.glyphicon-star:before {
+ content: "\e006";
+}
+.glyphicon-star-empty:before {
+ content: "\e007";
+}
+.glyphicon-user:before {
+ content: "\e008";
+}
+.glyphicon-film:before {
+ content: "\e009";
+}
+.glyphicon-th-large:before {
+ content: "\e010";
+}
+.glyphicon-th:before {
+ content: "\e011";
+}
+.glyphicon-th-list:before {
+ content: "\e012";
+}
+.glyphicon-ok:before {
+ content: "\e013";
+}
+.glyphicon-remove:before {
+ content: "\e014";
+}
+.glyphicon-zoom-in:before {
+ content: "\e015";
+}
+.glyphicon-zoom-out:before {
+ content: "\e016";
+}
+.glyphicon-off:before {
+ content: "\e017";
+}
+.glyphicon-signal:before {
+ content: "\e018";
+}
+.glyphicon-cog:before {
+ content: "\e019";
+}
+.glyphicon-trash:before {
+ content: "\e020";
+}
+.glyphicon-home:before {
+ content: "\e021";
+}
+.glyphicon-file:before {
+ content: "\e022";
+}
+.glyphicon-time:before {
+ content: "\e023";
+}
+.glyphicon-road:before {
+ content: "\e024";
+}
+.glyphicon-download-alt:before {
+ content: "\e025";
+}
+.glyphicon-download:before {
+ content: "\e026";
+}
+.glyphicon-upload:before {
+ content: "\e027";
+}
+.glyphicon-inbox:before {
+ content: "\e028";
+}
+.glyphicon-play-circle:before {
+ content: "\e029";
+}
+.glyphicon-repeat:before {
+ content: "\e030";
+}
+.glyphicon-refresh:before {
+ content: "\e031";
+}
+.glyphicon-list-alt:before {
+ content: "\e032";
+}
+.glyphicon-lock:before {
+ content: "\e033";
+}
+.glyphicon-flag:before {
+ content: "\e034";
+}
+.glyphicon-headphones:before {
+ content: "\e035";
+}
+.glyphicon-volume-off:before {
+ content: "\e036";
+}
+.glyphicon-volume-down:before {
+ content: "\e037";
+}
+.glyphicon-volume-up:before {
+ content: "\e038";
+}
+.glyphicon-qrcode:before {
+ content: "\e039";
+}
+.glyphicon-barcode:before {
+ content: "\e040";
+}
+.glyphicon-tag:before {
+ content: "\e041";
+}
+.glyphicon-tags:before {
+ content: "\e042";
+}
+.glyphicon-book:before {
+ content: "\e043";
+}
+.glyphicon-bookmark:before {
+ content: "\e044";
+}
+.glyphicon-print:before {
+ content: "\e045";
+}
+.glyphicon-camera:before {
+ content: "\e046";
+}
+.glyphicon-font:before {
+ content: "\e047";
+}
+.glyphicon-bold:before {
+ content: "\e048";
+}
+.glyphicon-italic:before {
+ content: "\e049";
+}
+.glyphicon-text-height:before {
+ content: "\e050";
+}
+.glyphicon-text-width:before {
+ content: "\e051";
+}
+.glyphicon-align-left:before {
+ content: "\e052";
+}
+.glyphicon-align-center:before {
+ content: "\e053";
+}
+.glyphicon-align-right:before {
+ content: "\e054";
+}
+.glyphicon-align-justify:before {
+ content: "\e055";
+}
+.glyphicon-list:before {
+ content: "\e056";
+}
+.glyphicon-indent-left:before {
+ content: "\e057";
+}
+.glyphicon-indent-right:before {
+ content: "\e058";
+}
+.glyphicon-facetime-video:before {
+ content: "\e059";
+}
+.glyphicon-picture:before {
+ content: "\e060";
+}
+.glyphicon-map-marker:before {
+ content: "\e062";
+}
+.glyphicon-adjust:before {
+ content: "\e063";
+}
+.glyphicon-tint:before {
+ content: "\e064";
+}
+.glyphicon-edit:before {
+ content: "\e065";
+}
+.glyphicon-share:before {
+ content: "\e066";
+}
+.glyphicon-check:before {
+ content: "\e067";
+}
+.glyphicon-move:before {
+ content: "\e068";
+}
+.glyphicon-step-backward:before {
+ content: "\e069";
+}
+.glyphicon-fast-backward:before {
+ content: "\e070";
+}
+.glyphicon-backward:before {
+ content: "\e071";
+}
+.glyphicon-play:before {
+ content: "\e072";
+}
+.glyphicon-pause:before {
+ content: "\e073";
+}
+.glyphicon-stop:before {
+ content: "\e074";
+}
+.glyphicon-forward:before {
+ content: "\e075";
+}
+.glyphicon-fast-forward:before {
+ content: "\e076";
+}
+.glyphicon-step-forward:before {
+ content: "\e077";
+}
+.glyphicon-eject:before {
+ content: "\e078";
+}
+.glyphicon-chevron-left:before {
+ content: "\e079";
+}
+.glyphicon-chevron-right:before {
+ content: "\e080";
+}
+.glyphicon-plus-sign:before {
+ content: "\e081";
+}
+.glyphicon-minus-sign:before {
+ content: "\e082";
+}
+.glyphicon-remove-sign:before {
+ content: "\e083";
+}
+.glyphicon-ok-sign:before {
+ content: "\e084";
+}
+.glyphicon-question-sign:before {
+ content: "\e085";
+}
+.glyphicon-info-sign:before {
+ content: "\e086";
+}
+.glyphicon-screenshot:before {
+ content: "\e087";
+}
+.glyphicon-remove-circle:before {
+ content: "\e088";
+}
+.glyphicon-ok-circle:before {
+ content: "\e089";
+}
+.glyphicon-ban-circle:before {
+ content: "\e090";
+}
+.glyphicon-arrow-left:before {
+ content: "\e091";
+}
+.glyphicon-arrow-right:before {
+ content: "\e092";
+}
+.glyphicon-arrow-up:before {
+ content: "\e093";
+}
+.glyphicon-arrow-down:before {
+ content: "\e094";
+}
+.glyphicon-share-alt:before {
+ content: "\e095";
+}
+.glyphicon-resize-full:before {
+ content: "\e096";
+}
+.glyphicon-resize-small:before {
+ content: "\e097";
+}
+.glyphicon-exclamation-sign:before {
+ content: "\e101";
+}
+.glyphicon-gift:before {
+ content: "\e102";
+}
+.glyphicon-leaf:before {
+ content: "\e103";
+}
+.glyphicon-fire:before {
+ content: "\e104";
+}
+.glyphicon-eye-open:before {
+ content: "\e105";
+}
+.glyphicon-eye-close:before {
+ content: "\e106";
+}
+.glyphicon-warning-sign:before {
+ content: "\e107";
+}
+.glyphicon-plane:before {
+ content: "\e108";
+}
+.glyphicon-calendar:before {
+ content: "\e109";
+}
+.glyphicon-random:before {
+ content: "\e110";
+}
+.glyphicon-comment:before {
+ content: "\e111";
+}
+.glyphicon-magnet:before {
+ content: "\e112";
+}
+.glyphicon-chevron-up:before {
+ content: "\e113";
+}
+.glyphicon-chevron-down:before {
+ content: "\e114";
+}
+.glyphicon-retweet:before {
+ content: "\e115";
+}
+.glyphicon-shopping-cart:before {
+ content: "\e116";
+}
+.glyphicon-folder-close:before {
+ content: "\e117";
+}
+.glyphicon-folder-open:before {
+ content: "\e118";
+}
+.glyphicon-resize-vertical:before {
+ content: "\e119";
+}
+.glyphicon-resize-horizontal:before {
+ content: "\e120";
+}
+.glyphicon-hdd:before {
+ content: "\e121";
+}
+.glyphicon-bullhorn:before {
+ content: "\e122";
+}
+.glyphicon-bell:before {
+ content: "\e123";
+}
+.glyphicon-certificate:before {
+ content: "\e124";
+}
+.glyphicon-thumbs-up:before {
+ content: "\e125";
+}
+.glyphicon-thumbs-down:before {
+ content: "\e126";
+}
+.glyphicon-hand-right:before {
+ content: "\e127";
+}
+.glyphicon-hand-left:before {
+ content: "\e128";
+}
+.glyphicon-hand-up:before {
+ content: "\e129";
+}
+.glyphicon-hand-down:before {
+ content: "\e130";
+}
+.glyphicon-circle-arrow-right:before {
+ content: "\e131";
+}
+.glyphicon-circle-arrow-left:before {
+ content: "\e132";
+}
+.glyphicon-circle-arrow-up:before {
+ content: "\e133";
+}
+.glyphicon-circle-arrow-down:before {
+ content: "\e134";
+}
+.glyphicon-globe:before {
+ content: "\e135";
+}
+.glyphicon-wrench:before {
+ content: "\e136";
+}
+.glyphicon-tasks:before {
+ content: "\e137";
+}
+.glyphicon-filter:before {
+ content: "\e138";
+}
+.glyphicon-briefcase:before {
+ content: "\e139";
+}
+.glyphicon-fullscreen:before {
+ content: "\e140";
+}
+.glyphicon-dashboard:before {
+ content: "\e141";
+}
+.glyphicon-paperclip:before {
+ content: "\e142";
+}
+.glyphicon-heart-empty:before {
+ content: "\e143";
+}
+.glyphicon-link:before {
+ content: "\e144";
+}
+.glyphicon-phone:before {
+ content: "\e145";
+}
+.glyphicon-pushpin:before {
+ content: "\e146";
+}
+.glyphicon-usd:before {
+ content: "\e148";
+}
+.glyphicon-gbp:before {
+ content: "\e149";
+}
+.glyphicon-sort:before {
+ content: "\e150";
+}
+.glyphicon-sort-by-alphabet:before {
+ content: "\e151";
+}
+.glyphicon-sort-by-alphabet-alt:before {
+ content: "\e152";
+}
+.glyphicon-sort-by-order:before {
+ content: "\e153";
+}
+.glyphicon-sort-by-order-alt:before {
+ content: "\e154";
+}
+.glyphicon-sort-by-attributes:before {
+ content: "\e155";
+}
+.glyphicon-sort-by-attributes-alt:before {
+ content: "\e156";
+}
+.glyphicon-unchecked:before {
+ content: "\e157";
+}
+.glyphicon-expand:before {
+ content: "\e158";
+}
+.glyphicon-collapse-down:before {
+ content: "\e159";
+}
+.glyphicon-collapse-up:before {
+ content: "\e160";
+}
+.glyphicon-log-in:before {
+ content: "\e161";
+}
+.glyphicon-flash:before {
+ content: "\e162";
+}
+.glyphicon-log-out:before {
+ content: "\e163";
+}
+.glyphicon-new-window:before {
+ content: "\e164";
+}
+.glyphicon-record:before {
+ content: "\e165";
+}
+.glyphicon-save:before {
+ content: "\e166";
+}
+.glyphicon-open:before {
+ content: "\e167";
+}
+.glyphicon-saved:before {
+ content: "\e168";
+}
+.glyphicon-import:before {
+ content: "\e169";
+}
+.glyphicon-export:before {
+ content: "\e170";
+}
+.glyphicon-send:before {
+ content: "\e171";
+}
+.glyphicon-floppy-disk:before {
+ content: "\e172";
+}
+.glyphicon-floppy-saved:before {
+ content: "\e173";
+}
+.glyphicon-floppy-remove:before {
+ content: "\e174";
+}
+.glyphicon-floppy-save:before {
+ content: "\e175";
+}
+.glyphicon-floppy-open:before {
+ content: "\e176";
+}
+.glyphicon-credit-card:before {
+ content: "\e177";
+}
+.glyphicon-transfer:before {
+ content: "\e178";
+}
+.glyphicon-cutlery:before {
+ content: "\e179";
+}
+.glyphicon-header:before {
+ content: "\e180";
+}
+.glyphicon-compressed:before {
+ content: "\e181";
+}
+.glyphicon-earphone:before {
+ content: "\e182";
+}
+.glyphicon-phone-alt:before {
+ content: "\e183";
+}
+.glyphicon-tower:before {
+ content: "\e184";
+}
+.glyphicon-stats:before {
+ content: "\e185";
+}
+.glyphicon-sd-video:before {
+ content: "\e186";
+}
+.glyphicon-hd-video:before {
+ content: "\e187";
+}
+.glyphicon-subtitles:before {
+ content: "\e188";
+}
+.glyphicon-sound-stereo:before {
+ content: "\e189";
+}
+.glyphicon-sound-dolby:before {
+ content: "\e190";
+}
+.glyphicon-sound-5-1:before {
+ content: "\e191";
+}
+.glyphicon-sound-6-1:before {
+ content: "\e192";
+}
+.glyphicon-sound-7-1:before {
+ content: "\e193";
+}
+.glyphicon-copyright-mark:before {
+ content: "\e194";
+}
+.glyphicon-registration-mark:before {
+ content: "\e195";
+}
+.glyphicon-cloud-download:before {
+ content: "\e197";
+}
+.glyphicon-cloud-upload:before {
+ content: "\e198";
+}
+.glyphicon-tree-conifer:before {
+ content: "\e199";
+}
+.glyphicon-tree-deciduous:before {
+ content: "\e200";
+}
+.glyphicon-cd:before {
+ content: "\e201";
+}
+.glyphicon-save-file:before {
+ content: "\e202";
+}
+.glyphicon-open-file:before {
+ content: "\e203";
+}
+.glyphicon-level-up:before {
+ content: "\e204";
+}
+.glyphicon-copy:before {
+ content: "\e205";
+}
+.glyphicon-paste:before {
+ content: "\e206";
+}
+.glyphicon-alert:before {
+ content: "\e209";
+}
+.glyphicon-equalizer:before {
+ content: "\e210";
+}
+.glyphicon-king:before {
+ content: "\e211";
+}
+.glyphicon-queen:before {
+ content: "\e212";
+}
+.glyphicon-pawn:before {
+ content: "\e213";
+}
+.glyphicon-bishop:before {
+ content: "\e214";
+}
+.glyphicon-knight:before {
+ content: "\e215";
+}
+.glyphicon-baby-formula:before {
+ content: "\e216";
+}
+.glyphicon-tent:before {
+ content: "\26fa";
+}
+.glyphicon-blackboard:before {
+ content: "\e218";
+}
+.glyphicon-bed:before {
+ content: "\e219";
+}
+.glyphicon-apple:before {
+ content: "\f8ff";
+}
+.glyphicon-erase:before {
+ content: "\e221";
+}
+.glyphicon-hourglass:before {
+ content: "\231b";
+}
+.glyphicon-lamp:before {
+ content: "\e223";
+}
+.glyphicon-duplicate:before {
+ content: "\e224";
+}
+.glyphicon-piggy-bank:before {
+ content: "\e225";
+}
+.glyphicon-scissors:before {
+ content: "\e226";
+}
+.glyphicon-bitcoin:before {
+ content: "\e227";
+}
+.glyphicon-btc:before {
+ content: "\e227";
+}
+.glyphicon-xbt:before {
+ content: "\e227";
+}
+.glyphicon-yen:before {
+ content: "\00a5";
+}
+.glyphicon-jpy:before {
+ content: "\00a5";
+}
+.glyphicon-ruble:before {
+ content: "\20bd";
+}
+.glyphicon-rub:before {
+ content: "\20bd";
+}
+.glyphicon-scale:before {
+ content: "\e230";
+}
+.glyphicon-ice-lolly:before {
+ content: "\e231";
+}
+.glyphicon-ice-lolly-tasted:before {
+ content: "\e232";
+}
+.glyphicon-education:before {
+ content: "\e233";
+}
+.glyphicon-option-horizontal:before {
+ content: "\e234";
+}
+.glyphicon-option-vertical:before {
+ content: "\e235";
+}
+.glyphicon-menu-hamburger:before {
+ content: "\e236";
+}
+.glyphicon-modal-window:before {
+ content: "\e237";
+}
+.glyphicon-oil:before {
+ content: "\e238";
+}
+.glyphicon-grain:before {
+ content: "\e239";
+}
+.glyphicon-sunglasses:before {
+ content: "\e240";
+}
+.glyphicon-text-size:before {
+ content: "\e241";
+}
+.glyphicon-text-color:before {
+ content: "\e242";
+}
+.glyphicon-text-background:before {
+ content: "\e243";
+}
+.glyphicon-object-align-top:before {
+ content: "\e244";
+}
+.glyphicon-object-align-bottom:before {
+ content: "\e245";
+}
+.glyphicon-object-align-horizontal:before {
+ content: "\e246";
+}
+.glyphicon-object-align-left:before {
+ content: "\e247";
+}
+.glyphicon-object-align-vertical:before {
+ content: "\e248";
+}
+.glyphicon-object-align-right:before {
+ content: "\e249";
+}
+.glyphicon-triangle-right:before {
+ content: "\e250";
+}
+.glyphicon-triangle-left:before {
+ content: "\e251";
+}
+.glyphicon-triangle-bottom:before {
+ content: "\e252";
+}
+.glyphicon-triangle-top:before {
+ content: "\e253";
+}
+.glyphicon-console:before {
+ content: "\e254";
+}
+.glyphicon-superscript:before {
+ content: "\e255";
+}
+.glyphicon-subscript:before {
+ content: "\e256";
+}
+.glyphicon-menu-left:before {
+ content: "\e257";
+}
+.glyphicon-menu-right:before {
+ content: "\e258";
+}
+.glyphicon-menu-down:before {
+ content: "\e259";
+}
+.glyphicon-menu-up:before {
+ content: "\e260";
+}
+* {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+*:before,
+*:after {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+html {
+ font-size: 10px;
+ -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+body {
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #c8c8c8;
+ background-color: #272b30;
+}
+input,
+button,
+select,
+textarea {
+ font-family: inherit;
+ font-size: inherit;
+ line-height: inherit;
+}
+a {
+ color: #ffffff;
+ text-decoration: none;
+}
+a:hover,
+a:focus {
+ color: #ffffff;
+ text-decoration: underline;
+}
+a:focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+figure {
+ margin: 0;
+}
+img {
+ vertical-align: middle;
+}
+.img-responsive,
+.thumbnail > img,
+.thumbnail a > img,
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+ display: block;
+ max-width: 100%;
+ height: auto;
+}
+.img-rounded {
+ border-radius: 6px;
+}
+.img-thumbnail {
+ padding: 4px;
+ line-height: 1.42857143;
+ background-color: #1c1e22;
+ border: 1px solid #0c0d0e;
+ border-radius: 4px;
+ -webkit-transition: all 0.2s ease-in-out;
+ -o-transition: all 0.2s ease-in-out;
+ transition: all 0.2s ease-in-out;
+ display: inline-block;
+ max-width: 100%;
+ height: auto;
+}
+.img-circle {
+ border-radius: 50%;
+}
+hr {
+ margin-top: 20px;
+ margin-bottom: 20px;
+ border: 0;
+ border-top: 1px solid #1c1e22;
+}
+.sr-only {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ margin: -1px;
+ padding: 0;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ border: 0;
+}
+.sr-only-focusable:active,
+.sr-only-focusable:focus {
+ position: static;
+ width: auto;
+ height: auto;
+ margin: 0;
+ overflow: visible;
+ clip: auto;
+}
+[role="button"] {
+ cursor: pointer;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+.h1,
+.h2,
+.h3,
+.h4,
+.h5,
+.h6 {
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-weight: 500;
+ line-height: 1.1;
+ color: inherit;
+}
+h1 small,
+h2 small,
+h3 small,
+h4 small,
+h5 small,
+h6 small,
+.h1 small,
+.h2 small,
+.h3 small,
+.h4 small,
+.h5 small,
+.h6 small,
+h1 .small,
+h2 .small,
+h3 .small,
+h4 .small,
+h5 .small,
+h6 .small,
+.h1 .small,
+.h2 .small,
+.h3 .small,
+.h4 .small,
+.h5 .small,
+.h6 .small {
+ font-weight: normal;
+ line-height: 1;
+ color: #7a8288;
+}
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+ margin-top: 20px;
+ margin-bottom: 10px;
+}
+h1 small,
+.h1 small,
+h2 small,
+.h2 small,
+h3 small,
+.h3 small,
+h1 .small,
+.h1 .small,
+h2 .small,
+.h2 .small,
+h3 .small,
+.h3 .small {
+ font-size: 65%;
+}
+h4,
+.h4,
+h5,
+.h5,
+h6,
+.h6 {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+h4 small,
+.h4 small,
+h5 small,
+.h5 small,
+h6 small,
+.h6 small,
+h4 .small,
+.h4 .small,
+h5 .small,
+.h5 .small,
+h6 .small,
+.h6 .small {
+ font-size: 75%;
+}
+h1,
+.h1 {
+ font-size: 36px;
+}
+h2,
+.h2 {
+ font-size: 30px;
+}
+h3,
+.h3 {
+ font-size: 24px;
+}
+h4,
+.h4 {
+ font-size: 18px;
+}
+h5,
+.h5 {
+ font-size: 14px;
+}
+h6,
+.h6 {
+ font-size: 12px;
+}
+p {
+ margin: 0 0 10px;
+}
+.lead {
+ margin-bottom: 20px;
+ font-size: 16px;
+ font-weight: 300;
+ line-height: 1.4;
+}
+@media (min-width: 768px) {
+ .lead {
+ font-size: 21px;
+ }
+}
+small,
+.small {
+ font-size: 85%;
+}
+mark,
+.mark {
+ background-color: #f89406;
+ padding: .2em;
+}
+.text-left {
+ text-align: left;
+}
+.text-right {
+ text-align: right;
+}
+.text-center {
+ text-align: center;
+}
+.text-justify {
+ text-align: justify;
+}
+.text-nowrap {
+ white-space: nowrap;
+}
+.text-lowercase {
+ text-transform: lowercase;
+}
+.text-uppercase {
+ text-transform: uppercase;
+}
+.text-capitalize {
+ text-transform: capitalize;
+}
+.text-muted {
+ color: #7a8288;
+}
+.text-primary {
+ color: #7a8288;
+}
+a.text-primary:hover,
+a.text-primary:focus {
+ color: #62686d;
+}
+.text-success {
+ color: #ffffff;
+}
+a.text-success:hover,
+a.text-success:focus {
+ color: #e6e6e6;
+}
+.text-info {
+ color: #ffffff;
+}
+a.text-info:hover,
+a.text-info:focus {
+ color: #e6e6e6;
+}
+.text-warning {
+ color: #ffffff;
+}
+a.text-warning:hover,
+a.text-warning:focus {
+ color: #e6e6e6;
+}
+.text-danger {
+ color: #ffffff;
+}
+a.text-danger:hover,
+a.text-danger:focus {
+ color: #e6e6e6;
+}
+.bg-primary {
+ color: #fff;
+ background-color: #7a8288;
+}
+a.bg-primary:hover,
+a.bg-primary:focus {
+ background-color: #62686d;
+}
+.bg-success {
+ background-color: #62c462;
+}
+a.bg-success:hover,
+a.bg-success:focus {
+ background-color: #42b142;
+}
+.bg-info {
+ background-color: #5bc0de;
+}
+a.bg-info:hover,
+a.bg-info:focus {
+ background-color: #31b0d5;
+}
+.bg-warning {
+ background-color: #f89406;
+}
+a.bg-warning:hover,
+a.bg-warning:focus {
+ background-color: #c67605;
+}
+.bg-danger {
+ background-color: #ee5f5b;
+}
+a.bg-danger:hover,
+a.bg-danger:focus {
+ background-color: #e9322d;
+}
+.page-header {
+ padding-bottom: 9px;
+ margin: 40px 0 20px;
+ border-bottom: 1px solid #1c1e22;
+}
+ul,
+ol {
+ margin-top: 0;
+ margin-bottom: 10px;
+}
+ul ul,
+ol ul,
+ul ol,
+ol ol {
+ margin-bottom: 0;
+}
+.list-unstyled {
+ padding-left: 0;
+ list-style: none;
+}
+.list-inline {
+ padding-left: 0;
+ list-style: none;
+ margin-left: -5px;
+}
+.list-inline > li {
+ display: inline-block;
+ padding-left: 5px;
+ padding-right: 5px;
+}
+dl {
+ margin-top: 0;
+ margin-bottom: 20px;
+}
+dt,
+dd {
+ line-height: 1.42857143;
+}
+dt {
+ font-weight: bold;
+}
+dd {
+ margin-left: 0;
+}
+@media (min-width: 768px) {
+ .dl-horizontal dt {
+ float: left;
+ width: 160px;
+ clear: left;
+ text-align: right;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ }
+ .dl-horizontal dd {
+ margin-left: 180px;
+ }
+}
+abbr[title],
+abbr[data-original-title] {
+ cursor: help;
+ border-bottom: 1px dotted #7a8288;
+}
+.initialism {
+ font-size: 90%;
+ text-transform: uppercase;
+}
+blockquote {
+ padding: 10px 20px;
+ margin: 0 0 20px;
+ font-size: 17.5px;
+ border-left: 5px solid #7a8288;
+}
+blockquote p:last-child,
+blockquote ul:last-child,
+blockquote ol:last-child {
+ margin-bottom: 0;
+}
+blockquote footer,
+blockquote small,
+blockquote .small {
+ display: block;
+ font-size: 80%;
+ line-height: 1.42857143;
+ color: #7a8288;
+}
+blockquote footer:before,
+blockquote small:before,
+blockquote .small:before {
+ content: '\2014 \00A0';
+}
+.blockquote-reverse,
+blockquote.pull-right {
+ padding-right: 15px;
+ padding-left: 0;
+ border-right: 5px solid #7a8288;
+ border-left: 0;
+ text-align: right;
+}
+.blockquote-reverse footer:before,
+blockquote.pull-right footer:before,
+.blockquote-reverse small:before,
+blockquote.pull-right small:before,
+.blockquote-reverse .small:before,
+blockquote.pull-right .small:before {
+ content: '';
+}
+.blockquote-reverse footer:after,
+blockquote.pull-right footer:after,
+.blockquote-reverse small:after,
+blockquote.pull-right small:after,
+.blockquote-reverse .small:after,
+blockquote.pull-right .small:after {
+ content: '\00A0 \2014';
+}
+address {
+ margin-bottom: 20px;
+ font-style: normal;
+ line-height: 1.42857143;
+}
+code,
+kbd,
+pre,
+samp {
+ font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+}
+code {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: #c7254e;
+ background-color: #f9f2f4;
+ border-radius: 4px;
+}
+kbd {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: #ffffff;
+ background-color: #333333;
+ border-radius: 3px;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);
+}
+kbd kbd {
+ padding: 0;
+ font-size: 100%;
+ font-weight: bold;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+pre {
+ display: block;
+ padding: 9.5px;
+ margin: 0 0 10px;
+ font-size: 13px;
+ line-height: 1.42857143;
+ word-break: break-all;
+ word-wrap: break-word;
+ color: #3a3f44;
+ background-color: #f5f5f5;
+ border: 1px solid #cccccc;
+ border-radius: 4px;
+}
+pre code {
+ padding: 0;
+ font-size: inherit;
+ color: inherit;
+ white-space: pre-wrap;
+ background-color: transparent;
+ border-radius: 0;
+}
+.pre-scrollable {
+ max-height: 340px;
+ overflow-y: scroll;
+}
+.container {
+ margin-right: auto;
+ margin-left: auto;
+ padding-left: 15px;
+ padding-right: 15px;
+}
+@media (min-width: 768px) {
+ .container {
+ width: 750px;
+ }
+}
+@media (min-width: 992px) {
+ .container {
+ width: 970px;
+ }
+}
+@media (min-width: 1200px) {
+ .container {
+ width: 1170px;
+ }
+}
+.container-fluid {
+ margin-right: auto;
+ margin-left: auto;
+ padding-left: 15px;
+ padding-right: 15px;
+}
+.row {
+ margin-left: -15px;
+ margin-right: -15px;
+}
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+ position: relative;
+ min-height: 1px;
+ padding-left: 15px;
+ padding-right: 15px;
+}
+.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {
+ float: left;
+}
+.col-xs-12 {
+ width: 100%;
+}
+.col-xs-11 {
+ width: 91.66666667%;
+}
+.col-xs-10 {
+ width: 83.33333333%;
+}
+.col-xs-9 {
+ width: 75%;
+}
+.col-xs-8 {
+ width: 66.66666667%;
+}
+.col-xs-7 {
+ width: 58.33333333%;
+}
+.col-xs-6 {
+ width: 50%;
+}
+.col-xs-5 {
+ width: 41.66666667%;
+}
+.col-xs-4 {
+ width: 33.33333333%;
+}
+.col-xs-3 {
+ width: 25%;
+}
+.col-xs-2 {
+ width: 16.66666667%;
+}
+.col-xs-1 {
+ width: 8.33333333%;
+}
+.col-xs-pull-12 {
+ right: 100%;
+}
+.col-xs-pull-11 {
+ right: 91.66666667%;
+}
+.col-xs-pull-10 {
+ right: 83.33333333%;
+}
+.col-xs-pull-9 {
+ right: 75%;
+}
+.col-xs-pull-8 {
+ right: 66.66666667%;
+}
+.col-xs-pull-7 {
+ right: 58.33333333%;
+}
+.col-xs-pull-6 {
+ right: 50%;
+}
+.col-xs-pull-5 {
+ right: 41.66666667%;
+}
+.col-xs-pull-4 {
+ right: 33.33333333%;
+}
+.col-xs-pull-3 {
+ right: 25%;
+}
+.col-xs-pull-2 {
+ right: 16.66666667%;
+}
+.col-xs-pull-1 {
+ right: 8.33333333%;
+}
+.col-xs-pull-0 {
+ right: auto;
+}
+.col-xs-push-12 {
+ left: 100%;
+}
+.col-xs-push-11 {
+ left: 91.66666667%;
+}
+.col-xs-push-10 {
+ left: 83.33333333%;
+}
+.col-xs-push-9 {
+ left: 75%;
+}
+.col-xs-push-8 {
+ left: 66.66666667%;
+}
+.col-xs-push-7 {
+ left: 58.33333333%;
+}
+.col-xs-push-6 {
+ left: 50%;
+}
+.col-xs-push-5 {
+ left: 41.66666667%;
+}
+.col-xs-push-4 {
+ left: 33.33333333%;
+}
+.col-xs-push-3 {
+ left: 25%;
+}
+.col-xs-push-2 {
+ left: 16.66666667%;
+}
+.col-xs-push-1 {
+ left: 8.33333333%;
+}
+.col-xs-push-0 {
+ left: auto;
+}
+.col-xs-offset-12 {
+ margin-left: 100%;
+}
+.col-xs-offset-11 {
+ margin-left: 91.66666667%;
+}
+.col-xs-offset-10 {
+ margin-left: 83.33333333%;
+}
+.col-xs-offset-9 {
+ margin-left: 75%;
+}
+.col-xs-offset-8 {
+ margin-left: 66.66666667%;
+}
+.col-xs-offset-7 {
+ margin-left: 58.33333333%;
+}
+.col-xs-offset-6 {
+ margin-left: 50%;
+}
+.col-xs-offset-5 {
+ margin-left: 41.66666667%;
+}
+.col-xs-offset-4 {
+ margin-left: 33.33333333%;
+}
+.col-xs-offset-3 {
+ margin-left: 25%;
+}
+.col-xs-offset-2 {
+ margin-left: 16.66666667%;
+}
+.col-xs-offset-1 {
+ margin-left: 8.33333333%;
+}
+.col-xs-offset-0 {
+ margin-left: 0%;
+}
+@media (min-width: 768px) {
+ .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {
+ float: left;
+ }
+ .col-sm-12 {
+ width: 100%;
+ }
+ .col-sm-11 {
+ width: 91.66666667%;
+ }
+ .col-sm-10 {
+ width: 83.33333333%;
+ }
+ .col-sm-9 {
+ width: 75%;
+ }
+ .col-sm-8 {
+ width: 66.66666667%;
+ }
+ .col-sm-7 {
+ width: 58.33333333%;
+ }
+ .col-sm-6 {
+ width: 50%;
+ }
+ .col-sm-5 {
+ width: 41.66666667%;
+ }
+ .col-sm-4 {
+ width: 33.33333333%;
+ }
+ .col-sm-3 {
+ width: 25%;
+ }
+ .col-sm-2 {
+ width: 16.66666667%;
+ }
+ .col-sm-1 {
+ width: 8.33333333%;
+ }
+ .col-sm-pull-12 {
+ right: 100%;
+ }
+ .col-sm-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-sm-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-sm-pull-9 {
+ right: 75%;
+ }
+ .col-sm-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-sm-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-sm-pull-6 {
+ right: 50%;
+ }
+ .col-sm-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-sm-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-sm-pull-3 {
+ right: 25%;
+ }
+ .col-sm-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-sm-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-sm-pull-0 {
+ right: auto;
+ }
+ .col-sm-push-12 {
+ left: 100%;
+ }
+ .col-sm-push-11 {
+ left: 91.66666667%;
+ }
+ .col-sm-push-10 {
+ left: 83.33333333%;
+ }
+ .col-sm-push-9 {
+ left: 75%;
+ }
+ .col-sm-push-8 {
+ left: 66.66666667%;
+ }
+ .col-sm-push-7 {
+ left: 58.33333333%;
+ }
+ .col-sm-push-6 {
+ left: 50%;
+ }
+ .col-sm-push-5 {
+ left: 41.66666667%;
+ }
+ .col-sm-push-4 {
+ left: 33.33333333%;
+ }
+ .col-sm-push-3 {
+ left: 25%;
+ }
+ .col-sm-push-2 {
+ left: 16.66666667%;
+ }
+ .col-sm-push-1 {
+ left: 8.33333333%;
+ }
+ .col-sm-push-0 {
+ left: auto;
+ }
+ .col-sm-offset-12 {
+ margin-left: 100%;
+ }
+ .col-sm-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-sm-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-sm-offset-9 {
+ margin-left: 75%;
+ }
+ .col-sm-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-sm-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-sm-offset-6 {
+ margin-left: 50%;
+ }
+ .col-sm-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-sm-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-sm-offset-3 {
+ margin-left: 25%;
+ }
+ .col-sm-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-sm-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-sm-offset-0 {
+ margin-left: 0%;
+ }
+}
+@media (min-width: 992px) {
+ .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {
+ float: left;
+ }
+ .col-md-12 {
+ width: 100%;
+ }
+ .col-md-11 {
+ width: 91.66666667%;
+ }
+ .col-md-10 {
+ width: 83.33333333%;
+ }
+ .col-md-9 {
+ width: 75%;
+ }
+ .col-md-8 {
+ width: 66.66666667%;
+ }
+ .col-md-7 {
+ width: 58.33333333%;
+ }
+ .col-md-6 {
+ width: 50%;
+ }
+ .col-md-5 {
+ width: 41.66666667%;
+ }
+ .col-md-4 {
+ width: 33.33333333%;
+ }
+ .col-md-3 {
+ width: 25%;
+ }
+ .col-md-2 {
+ width: 16.66666667%;
+ }
+ .col-md-1 {
+ width: 8.33333333%;
+ }
+ .col-md-pull-12 {
+ right: 100%;
+ }
+ .col-md-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-md-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-md-pull-9 {
+ right: 75%;
+ }
+ .col-md-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-md-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-md-pull-6 {
+ right: 50%;
+ }
+ .col-md-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-md-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-md-pull-3 {
+ right: 25%;
+ }
+ .col-md-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-md-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-md-pull-0 {
+ right: auto;
+ }
+ .col-md-push-12 {
+ left: 100%;
+ }
+ .col-md-push-11 {
+ left: 91.66666667%;
+ }
+ .col-md-push-10 {
+ left: 83.33333333%;
+ }
+ .col-md-push-9 {
+ left: 75%;
+ }
+ .col-md-push-8 {
+ left: 66.66666667%;
+ }
+ .col-md-push-7 {
+ left: 58.33333333%;
+ }
+ .col-md-push-6 {
+ left: 50%;
+ }
+ .col-md-push-5 {
+ left: 41.66666667%;
+ }
+ .col-md-push-4 {
+ left: 33.33333333%;
+ }
+ .col-md-push-3 {
+ left: 25%;
+ }
+ .col-md-push-2 {
+ left: 16.66666667%;
+ }
+ .col-md-push-1 {
+ left: 8.33333333%;
+ }
+ .col-md-push-0 {
+ left: auto;
+ }
+ .col-md-offset-12 {
+ margin-left: 100%;
+ }
+ .col-md-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-md-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-md-offset-9 {
+ margin-left: 75%;
+ }
+ .col-md-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-md-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-md-offset-6 {
+ margin-left: 50%;
+ }
+ .col-md-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-md-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-md-offset-3 {
+ margin-left: 25%;
+ }
+ .col-md-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-md-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-md-offset-0 {
+ margin-left: 0%;
+ }
+}
+@media (min-width: 1200px) {
+ .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {
+ float: left;
+ }
+ .col-lg-12 {
+ width: 100%;
+ }
+ .col-lg-11 {
+ width: 91.66666667%;
+ }
+ .col-lg-10 {
+ width: 83.33333333%;
+ }
+ .col-lg-9 {
+ width: 75%;
+ }
+ .col-lg-8 {
+ width: 66.66666667%;
+ }
+ .col-lg-7 {
+ width: 58.33333333%;
+ }
+ .col-lg-6 {
+ width: 50%;
+ }
+ .col-lg-5 {
+ width: 41.66666667%;
+ }
+ .col-lg-4 {
+ width: 33.33333333%;
+ }
+ .col-lg-3 {
+ width: 25%;
+ }
+ .col-lg-2 {
+ width: 16.66666667%;
+ }
+ .col-lg-1 {
+ width: 8.33333333%;
+ }
+ .col-lg-pull-12 {
+ right: 100%;
+ }
+ .col-lg-pull-11 {
+ right: 91.66666667%;
+ }
+ .col-lg-pull-10 {
+ right: 83.33333333%;
+ }
+ .col-lg-pull-9 {
+ right: 75%;
+ }
+ .col-lg-pull-8 {
+ right: 66.66666667%;
+ }
+ .col-lg-pull-7 {
+ right: 58.33333333%;
+ }
+ .col-lg-pull-6 {
+ right: 50%;
+ }
+ .col-lg-pull-5 {
+ right: 41.66666667%;
+ }
+ .col-lg-pull-4 {
+ right: 33.33333333%;
+ }
+ .col-lg-pull-3 {
+ right: 25%;
+ }
+ .col-lg-pull-2 {
+ right: 16.66666667%;
+ }
+ .col-lg-pull-1 {
+ right: 8.33333333%;
+ }
+ .col-lg-pull-0 {
+ right: auto;
+ }
+ .col-lg-push-12 {
+ left: 100%;
+ }
+ .col-lg-push-11 {
+ left: 91.66666667%;
+ }
+ .col-lg-push-10 {
+ left: 83.33333333%;
+ }
+ .col-lg-push-9 {
+ left: 75%;
+ }
+ .col-lg-push-8 {
+ left: 66.66666667%;
+ }
+ .col-lg-push-7 {
+ left: 58.33333333%;
+ }
+ .col-lg-push-6 {
+ left: 50%;
+ }
+ .col-lg-push-5 {
+ left: 41.66666667%;
+ }
+ .col-lg-push-4 {
+ left: 33.33333333%;
+ }
+ .col-lg-push-3 {
+ left: 25%;
+ }
+ .col-lg-push-2 {
+ left: 16.66666667%;
+ }
+ .col-lg-push-1 {
+ left: 8.33333333%;
+ }
+ .col-lg-push-0 {
+ left: auto;
+ }
+ .col-lg-offset-12 {
+ margin-left: 100%;
+ }
+ .col-lg-offset-11 {
+ margin-left: 91.66666667%;
+ }
+ .col-lg-offset-10 {
+ margin-left: 83.33333333%;
+ }
+ .col-lg-offset-9 {
+ margin-left: 75%;
+ }
+ .col-lg-offset-8 {
+ margin-left: 66.66666667%;
+ }
+ .col-lg-offset-7 {
+ margin-left: 58.33333333%;
+ }
+ .col-lg-offset-6 {
+ margin-left: 50%;
+ }
+ .col-lg-offset-5 {
+ margin-left: 41.66666667%;
+ }
+ .col-lg-offset-4 {
+ margin-left: 33.33333333%;
+ }
+ .col-lg-offset-3 {
+ margin-left: 25%;
+ }
+ .col-lg-offset-2 {
+ margin-left: 16.66666667%;
+ }
+ .col-lg-offset-1 {
+ margin-left: 8.33333333%;
+ }
+ .col-lg-offset-0 {
+ margin-left: 0%;
+ }
+}
+table {
+ background-color: #2e3338;
+}
+caption {
+ padding-top: 8px;
+ padding-bottom: 8px;
+ color: #7a8288;
+ text-align: left;
+}
+th {
+ text-align: left;
+}
+.table {
+ width: 100%;
+ max-width: 100%;
+ margin-bottom: 20px;
+}
+.table > thead > tr > th,
+.table > tbody > tr > th,
+.table > tfoot > tr > th,
+.table > thead > tr > td,
+.table > tbody > tr > td,
+.table > tfoot > tr > td {
+ padding: 8px;
+ line-height: 1.42857143;
+ vertical-align: top;
+ border-top: 1px solid #1c1e22;
+}
+.table > thead > tr > th {
+ vertical-align: bottom;
+ border-bottom: 2px solid #1c1e22;
+}
+.table > caption + thead > tr:first-child > th,
+.table > colgroup + thead > tr:first-child > th,
+.table > thead:first-child > tr:first-child > th,
+.table > caption + thead > tr:first-child > td,
+.table > colgroup + thead > tr:first-child > td,
+.table > thead:first-child > tr:first-child > td {
+ border-top: 0;
+}
+.table > tbody + tbody {
+ border-top: 2px solid #1c1e22;
+}
+.table .table {
+ background-color: #272b30;
+}
+.table-condensed > thead > tr > th,
+.table-condensed > tbody > tr > th,
+.table-condensed > tfoot > tr > th,
+.table-condensed > thead > tr > td,
+.table-condensed > tbody > tr > td,
+.table-condensed > tfoot > tr > td {
+ padding: 5px;
+}
+.table-bordered {
+ border: 1px solid #1c1e22;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > tbody > tr > th,
+.table-bordered > tfoot > tr > th,
+.table-bordered > thead > tr > td,
+.table-bordered > tbody > tr > td,
+.table-bordered > tfoot > tr > td {
+ border: 1px solid #1c1e22;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > thead > tr > td {
+ border-bottom-width: 2px;
+}
+.table-striped > tbody > tr:nth-of-type(odd) {
+ background-color: #353a41;
+}
+.table-hover > tbody > tr:hover {
+ background-color: #49515a;
+}
+table col[class*="col-"] {
+ position: static;
+ float: none;
+ display: table-column;
+}
+table td[class*="col-"],
+table th[class*="col-"] {
+ position: static;
+ float: none;
+ display: table-cell;
+}
+.table > thead > tr > td.active,
+.table > tbody > tr > td.active,
+.table > tfoot > tr > td.active,
+.table > thead > tr > th.active,
+.table > tbody > tr > th.active,
+.table > tfoot > tr > th.active,
+.table > thead > tr.active > td,
+.table > tbody > tr.active > td,
+.table > tfoot > tr.active > td,
+.table > thead > tr.active > th,
+.table > tbody > tr.active > th,
+.table > tfoot > tr.active > th {
+ background-color: #49515a;
+}
+.table-hover > tbody > tr > td.active:hover,
+.table-hover > tbody > tr > th.active:hover,
+.table-hover > tbody > tr.active:hover > td,
+.table-hover > tbody > tr:hover > .active,
+.table-hover > tbody > tr.active:hover > th {
+ background-color: #3e444c;
+}
+.table > thead > tr > td.success,
+.table > tbody > tr > td.success,
+.table > tfoot > tr > td.success,
+.table > thead > tr > th.success,
+.table > tbody > tr > th.success,
+.table > tfoot > tr > th.success,
+.table > thead > tr.success > td,
+.table > tbody > tr.success > td,
+.table > tfoot > tr.success > td,
+.table > thead > tr.success > th,
+.table > tbody > tr.success > th,
+.table > tfoot > tr.success > th {
+ background-color: #62c462;
+}
+.table-hover > tbody > tr > td.success:hover,
+.table-hover > tbody > tr > th.success:hover,
+.table-hover > tbody > tr.success:hover > td,
+.table-hover > tbody > tr:hover > .success,
+.table-hover > tbody > tr.success:hover > th {
+ background-color: #4fbd4f;
+}
+.table > thead > tr > td.info,
+.table > tbody > tr > td.info,
+.table > tfoot > tr > td.info,
+.table > thead > tr > th.info,
+.table > tbody > tr > th.info,
+.table > tfoot > tr > th.info,
+.table > thead > tr.info > td,
+.table > tbody > tr.info > td,
+.table > tfoot > tr.info > td,
+.table > thead > tr.info > th,
+.table > tbody > tr.info > th,
+.table > tfoot > tr.info > th {
+ background-color: #5bc0de;
+}
+.table-hover > tbody > tr > td.info:hover,
+.table-hover > tbody > tr > th.info:hover,
+.table-hover > tbody > tr.info:hover > td,
+.table-hover > tbody > tr:hover > .info,
+.table-hover > tbody > tr.info:hover > th {
+ background-color: #46b8da;
+}
+.table > thead > tr > td.warning,
+.table > tbody > tr > td.warning,
+.table > tfoot > tr > td.warning,
+.table > thead > tr > th.warning,
+.table > tbody > tr > th.warning,
+.table > tfoot > tr > th.warning,
+.table > thead > tr.warning > td,
+.table > tbody > tr.warning > td,
+.table > tfoot > tr.warning > td,
+.table > thead > tr.warning > th,
+.table > tbody > tr.warning > th,
+.table > tfoot > tr.warning > th {
+ background-color: #f89406;
+}
+.table-hover > tbody > tr > td.warning:hover,
+.table-hover > tbody > tr > th.warning:hover,
+.table-hover > tbody > tr.warning:hover > td,
+.table-hover > tbody > tr:hover > .warning,
+.table-hover > tbody > tr.warning:hover > th {
+ background-color: #df8505;
+}
+.table > thead > tr > td.danger,
+.table > tbody > tr > td.danger,
+.table > tfoot > tr > td.danger,
+.table > thead > tr > th.danger,
+.table > tbody > tr > th.danger,
+.table > tfoot > tr > th.danger,
+.table > thead > tr.danger > td,
+.table > tbody > tr.danger > td,
+.table > tfoot > tr.danger > td,
+.table > thead > tr.danger > th,
+.table > tbody > tr.danger > th,
+.table > tfoot > tr.danger > th {
+ background-color: #ee5f5b;
+}
+.table-hover > tbody > tr > td.danger:hover,
+.table-hover > tbody > tr > th.danger:hover,
+.table-hover > tbody > tr.danger:hover > td,
+.table-hover > tbody > tr:hover > .danger,
+.table-hover > tbody > tr.danger:hover > th {
+ background-color: #ec4844;
+}
+.table-responsive {
+ overflow-x: auto;
+ min-height: 0.01%;
+}
+@media screen and (max-width: 767px) {
+ .table-responsive {
+ width: 100%;
+ margin-bottom: 15px;
+ overflow-y: hidden;
+ -ms-overflow-style: -ms-autohiding-scrollbar;
+ border: 1px solid #1c1e22;
+ }
+ .table-responsive > .table {
+ margin-bottom: 0;
+ }
+ .table-responsive > .table > thead > tr > th,
+ .table-responsive > .table > tbody > tr > th,
+ .table-responsive > .table > tfoot > tr > th,
+ .table-responsive > .table > thead > tr > td,
+ .table-responsive > .table > tbody > tr > td,
+ .table-responsive > .table > tfoot > tr > td {
+ white-space: nowrap;
+ }
+ .table-responsive > .table-bordered {
+ border: 0;
+ }
+ .table-responsive > .table-bordered > thead > tr > th:first-child,
+ .table-responsive > .table-bordered > tbody > tr > th:first-child,
+ .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+ .table-responsive > .table-bordered > thead > tr > td:first-child,
+ .table-responsive > .table-bordered > tbody > tr > td:first-child,
+ .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+ border-left: 0;
+ }
+ .table-responsive > .table-bordered > thead > tr > th:last-child,
+ .table-responsive > .table-bordered > tbody > tr > th:last-child,
+ .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+ .table-responsive > .table-bordered > thead > tr > td:last-child,
+ .table-responsive > .table-bordered > tbody > tr > td:last-child,
+ .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+ border-right: 0;
+ }
+ .table-responsive > .table-bordered > tbody > tr:last-child > th,
+ .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+ .table-responsive > .table-bordered > tbody > tr:last-child > td,
+ .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+ border-bottom: 0;
+ }
+}
+fieldset {
+ padding: 0;
+ margin: 0;
+ border: 0;
+ min-width: 0;
+}
+legend {
+ display: block;
+ width: 100%;
+ padding: 0;
+ margin-bottom: 20px;
+ font-size: 21px;
+ line-height: inherit;
+ color: #c8c8c8;
+ border: 0;
+ border-bottom: 1px solid #1c1e22;
+}
+label {
+ display: inline-block;
+ max-width: 100%;
+ margin-bottom: 5px;
+ font-weight: bold;
+}
+input[type="search"] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+input[type="radio"],
+input[type="checkbox"] {
+ margin: 4px 0 0;
+ margin-top: 1px \9;
+ line-height: normal;
+}
+input[type="file"] {
+ display: block;
+}
+input[type="range"] {
+ display: block;
+ width: 100%;
+}
+select[multiple],
+select[size] {
+ height: auto;
+}
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+output {
+ display: block;
+ padding-top: 9px;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #272b30;
+}
+.form-control {
+ display: block;
+ width: 100%;
+ height: 38px;
+ padding: 8px 12px;
+ font-size: 14px;
+ line-height: 1.42857143;
+ color: #272b30;
+ background-color: #ffffff;
+ background-image: none;
+ border: 1px solid #000000;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
+ -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+ transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+}
+.form-control:focus {
+ border-color: #66afe9;
+ outline: 0;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);
+ box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);
+}
+.form-control::-moz-placeholder {
+ color: #7a8288;
+ opacity: 1;
+}
+.form-control:-ms-input-placeholder {
+ color: #7a8288;
+}
+.form-control::-webkit-input-placeholder {
+ color: #7a8288;
+}
+.form-control::-ms-expand {
+ border: 0;
+ background-color: transparent;
+}
+.form-control[disabled],
+.form-control[readonly],
+fieldset[disabled] .form-control {
+ background-color: #999999;
+ opacity: 1;
+}
+.form-control[disabled],
+fieldset[disabled] .form-control {
+ cursor: not-allowed;
+}
+textarea.form-control {
+ height: auto;
+}
+input[type="search"] {
+ -webkit-appearance: none;
+}
+@media screen and (-webkit-min-device-pixel-ratio: 0) {
+ input[type="date"].form-control,
+ input[type="time"].form-control,
+ input[type="datetime-local"].form-control,
+ input[type="month"].form-control {
+ line-height: 38px;
+ }
+ input[type="date"].input-sm,
+ input[type="time"].input-sm,
+ input[type="datetime-local"].input-sm,
+ input[type="month"].input-sm,
+ .input-group-sm input[type="date"],
+ .input-group-sm input[type="time"],
+ .input-group-sm input[type="datetime-local"],
+ .input-group-sm input[type="month"] {
+ line-height: 30px;
+ }
+ input[type="date"].input-lg,
+ input[type="time"].input-lg,
+ input[type="datetime-local"].input-lg,
+ input[type="month"].input-lg,
+ .input-group-lg input[type="date"],
+ .input-group-lg input[type="time"],
+ .input-group-lg input[type="datetime-local"],
+ .input-group-lg input[type="month"] {
+ line-height: 54px;
+ }
+}
+.form-group {
+ margin-bottom: 15px;
+}
+.radio,
+.checkbox {
+ position: relative;
+ display: block;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+.radio label,
+.checkbox label {
+ min-height: 20px;
+ padding-left: 20px;
+ margin-bottom: 0;
+ font-weight: normal;
+ cursor: pointer;
+}
+.radio input[type="radio"],
+.radio-inline input[type="radio"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+ position: absolute;
+ margin-left: -20px;
+ margin-top: 4px \9;
+}
+.radio + .radio,
+.checkbox + .checkbox {
+ margin-top: -5px;
+}
+.radio-inline,
+.checkbox-inline {
+ position: relative;
+ display: inline-block;
+ padding-left: 20px;
+ margin-bottom: 0;
+ vertical-align: middle;
+ font-weight: normal;
+ cursor: pointer;
+}
+.radio-inline + .radio-inline,
+.checkbox-inline + .checkbox-inline {
+ margin-top: 0;
+ margin-left: 10px;
+}
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+input[type="radio"].disabled,
+input[type="checkbox"].disabled,
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"] {
+ cursor: not-allowed;
+}
+.radio-inline.disabled,
+.checkbox-inline.disabled,
+fieldset[disabled] .radio-inline,
+fieldset[disabled] .checkbox-inline {
+ cursor: not-allowed;
+}
+.radio.disabled label,
+.checkbox.disabled label,
+fieldset[disabled] .radio label,
+fieldset[disabled] .checkbox label {
+ cursor: not-allowed;
+}
+.form-control-static {
+ padding-top: 9px;
+ padding-bottom: 9px;
+ margin-bottom: 0;
+ min-height: 34px;
+}
+.form-control-static.input-lg,
+.form-control-static.input-sm {
+ padding-left: 0;
+ padding-right: 0;
+}
+.input-sm {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+select.input-sm {
+ height: 30px;
+ line-height: 30px;
+}
+textarea.input-sm,
+select[multiple].input-sm {
+ height: auto;
+}
+.form-group-sm .form-control {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.form-group-sm select.form-control {
+ height: 30px;
+ line-height: 30px;
+}
+.form-group-sm textarea.form-control,
+.form-group-sm select[multiple].form-control {
+ height: auto;
+}
+.form-group-sm .form-control-static {
+ height: 30px;
+ min-height: 32px;
+ padding: 6px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+}
+.input-lg {
+ height: 54px;
+ padding: 14px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+select.input-lg {
+ height: 54px;
+ line-height: 54px;
+}
+textarea.input-lg,
+select[multiple].input-lg {
+ height: auto;
+}
+.form-group-lg .form-control {
+ height: 54px;
+ padding: 14px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+.form-group-lg select.form-control {
+ height: 54px;
+ line-height: 54px;
+}
+.form-group-lg textarea.form-control,
+.form-group-lg select[multiple].form-control {
+ height: auto;
+}
+.form-group-lg .form-control-static {
+ height: 54px;
+ min-height: 38px;
+ padding: 15px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+}
+.has-feedback {
+ position: relative;
+}
+.has-feedback .form-control {
+ padding-right: 47.5px;
+}
+.form-control-feedback {
+ position: absolute;
+ top: 0;
+ right: 0;
+ z-index: 2;
+ display: block;
+ width: 38px;
+ height: 38px;
+ line-height: 38px;
+ text-align: center;
+ pointer-events: none;
+}
+.input-lg + .form-control-feedback,
+.input-group-lg + .form-control-feedback,
+.form-group-lg .form-control + .form-control-feedback {
+ width: 54px;
+ height: 54px;
+ line-height: 54px;
+}
+.input-sm + .form-control-feedback,
+.input-group-sm + .form-control-feedback,
+.form-group-sm .form-control + .form-control-feedback {
+ width: 30px;
+ height: 30px;
+ line-height: 30px;
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline,
+.has-success.radio label,
+.has-success.checkbox label,
+.has-success.radio-inline label,
+.has-success.checkbox-inline label {
+ color: #ffffff;
+}
+.has-success .form-control {
+ border-color: #ffffff;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+}
+.has-success .form-control:focus {
+ border-color: #e6e6e6;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+}
+.has-success .input-group-addon {
+ color: #ffffff;
+ border-color: #ffffff;
+ background-color: #62c462;
+}
+.has-success .form-control-feedback {
+ color: #ffffff;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline,
+.has-warning.radio label,
+.has-warning.checkbox label,
+.has-warning.radio-inline label,
+.has-warning.checkbox-inline label {
+ color: #ffffff;
+}
+.has-warning .form-control {
+ border-color: #ffffff;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+}
+.has-warning .form-control:focus {
+ border-color: #e6e6e6;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+}
+.has-warning .input-group-addon {
+ color: #ffffff;
+ border-color: #ffffff;
+ background-color: #f89406;
+}
+.has-warning .form-control-feedback {
+ color: #ffffff;
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline,
+.has-error.radio label,
+.has-error.checkbox label,
+.has-error.radio-inline label,
+.has-error.checkbox-inline label {
+ color: #ffffff;
+}
+.has-error .form-control {
+ border-color: #ffffff;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+}
+.has-error .form-control:focus {
+ border-color: #e6e6e6;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ffffff;
+}
+.has-error .input-group-addon {
+ color: #ffffff;
+ border-color: #ffffff;
+ background-color: #ee5f5b;
+}
+.has-error .form-control-feedback {
+ color: #ffffff;
+}
+.has-feedback label ~ .form-control-feedback {
+ top: 25px;
+}
+.has-feedback label.sr-only ~ .form-control-feedback {
+ top: 0;
+}
+.help-block {
+ display: block;
+ margin-top: 5px;
+ margin-bottom: 10px;
+ color: #ffffff;
+}
+@media (min-width: 768px) {
+ .form-inline .form-group {
+ display: inline-block;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .form-control {
+ display: inline-block;
+ width: auto;
+ vertical-align: middle;
+ }
+ .form-inline .form-control-static {
+ display: inline-block;
+ }
+ .form-inline .input-group {
+ display: inline-table;
+ vertical-align: middle;
+ }
+ .form-inline .input-group .input-group-addon,
+ .form-inline .input-group .input-group-btn,
+ .form-inline .input-group .form-control {
+ width: auto;
+ }
+ .form-inline .input-group > .form-control {
+ width: 100%;
+ }
+ .form-inline .control-label {
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .radio,
+ .form-inline .checkbox {
+ display: inline-block;
+ margin-top: 0;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .form-inline .radio label,
+ .form-inline .checkbox label {
+ padding-left: 0;
+ }
+ .form-inline .radio input[type="radio"],
+ .form-inline .checkbox input[type="checkbox"] {
+ position: relative;
+ margin-left: 0;
+ }
+ .form-inline .has-feedback .form-control-feedback {
+ top: 0;
+ }
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox,
+.form-horizontal .radio-inline,
+.form-horizontal .checkbox-inline {
+ margin-top: 0;
+ margin-bottom: 0;
+ padding-top: 9px;
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox {
+ min-height: 29px;
+}
+.form-horizontal .form-group {
+ margin-left: -15px;
+ margin-right: -15px;
+}
+@media (min-width: 768px) {
+ .form-horizontal .control-label {
+ text-align: right;
+ margin-bottom: 0;
+ padding-top: 9px;
+ }
+}
+.form-horizontal .has-feedback .form-control-feedback {
+ right: 15px;
+}
+@media (min-width: 768px) {
+ .form-horizontal .form-group-lg .control-label {
+ padding-top: 15px;
+ font-size: 18px;
+ }
+}
+@media (min-width: 768px) {
+ .form-horizontal .form-group-sm .control-label {
+ padding-top: 6px;
+ font-size: 12px;
+ }
+}
+.btn {
+ display: inline-block;
+ margin-bottom: 0;
+ font-weight: normal;
+ text-align: center;
+ vertical-align: middle;
+ -ms-touch-action: manipulation;
+ touch-action: manipulation;
+ cursor: pointer;
+ background-image: none;
+ border: 0px solid transparent;
+ white-space: nowrap;
+ padding: 8px 12px;
+ font-size: 14px;
+ line-height: 1.42857143;
+ border-radius: 4px;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+.btn:focus,
+.btn:active:focus,
+.btn.active:focus,
+.btn.focus,
+.btn:active.focus,
+.btn.active.focus {
+ outline: 5px auto -webkit-focus-ring-color;
+ outline-offset: -2px;
+}
+.btn:hover,
+.btn:focus,
+.btn.focus {
+ color: #ffffff;
+ text-decoration: none;
+}
+.btn:active,
+.btn.active {
+ outline: 0;
+ background-image: none;
+ -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+}
+.btn.disabled,
+.btn[disabled],
+fieldset[disabled] .btn {
+ cursor: not-allowed;
+ opacity: 0.65;
+ filter: alpha(opacity=65);
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+a.btn.disabled,
+fieldset[disabled] a.btn {
+ pointer-events: none;
+}
+.btn-default {
+ color: #ffffff;
+ background-color: #3a3f44;
+ border-color: #3a3f44;
+}
+.btn-default:focus,
+.btn-default.focus {
+ color: #ffffff;
+ background-color: #232628;
+ border-color: #000000;
+}
+.btn-default:hover {
+ color: #ffffff;
+ background-color: #232628;
+ border-color: #1e2023;
+}
+.btn-default:active,
+.btn-default.active,
+.open > .dropdown-toggle.btn-default {
+ color: #ffffff;
+ background-color: #232628;
+ border-color: #1e2023;
+}
+.btn-default:active:hover,
+.btn-default.active:hover,
+.open > .dropdown-toggle.btn-default:hover,
+.btn-default:active:focus,
+.btn-default.active:focus,
+.open > .dropdown-toggle.btn-default:focus,
+.btn-default:active.focus,
+.btn-default.active.focus,
+.open > .dropdown-toggle.btn-default.focus {
+ color: #ffffff;
+ background-color: #121415;
+ border-color: #000000;
+}
+.btn-default:active,
+.btn-default.active,
+.open > .dropdown-toggle.btn-default {
+ background-image: none;
+}
+.btn-default.disabled:hover,
+.btn-default[disabled]:hover,
+fieldset[disabled] .btn-default:hover,
+.btn-default.disabled:focus,
+.btn-default[disabled]:focus,
+fieldset[disabled] .btn-default:focus,
+.btn-default.disabled.focus,
+.btn-default[disabled].focus,
+fieldset[disabled] .btn-default.focus {
+ background-color: #3a3f44;
+ border-color: #3a3f44;
+}
+.btn-default .badge {
+ color: #3a3f44;
+ background-color: #ffffff;
+}
+.btn-primary {
+ color: #ffffff;
+ background-color: #7a8288;
+ border-color: #7a8288;
+}
+.btn-primary:focus,
+.btn-primary.focus {
+ color: #ffffff;
+ background-color: #62686d;
+ border-color: #3e4245;
+}
+.btn-primary:hover {
+ color: #ffffff;
+ background-color: #62686d;
+ border-color: #5d6368;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open > .dropdown-toggle.btn-primary {
+ color: #ffffff;
+ background-color: #62686d;
+ border-color: #5d6368;
+}
+.btn-primary:active:hover,
+.btn-primary.active:hover,
+.open > .dropdown-toggle.btn-primary:hover,
+.btn-primary:active:focus,
+.btn-primary.active:focus,
+.open > .dropdown-toggle.btn-primary:focus,
+.btn-primary:active.focus,
+.btn-primary.active.focus,
+.open > .dropdown-toggle.btn-primary.focus {
+ color: #ffffff;
+ background-color: #51565a;
+ border-color: #3e4245;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open > .dropdown-toggle.btn-primary {
+ background-image: none;
+}
+.btn-primary.disabled:hover,
+.btn-primary[disabled]:hover,
+fieldset[disabled] .btn-primary:hover,
+.btn-primary.disabled:focus,
+.btn-primary[disabled]:focus,
+fieldset[disabled] .btn-primary:focus,
+.btn-primary.disabled.focus,
+.btn-primary[disabled].focus,
+fieldset[disabled] .btn-primary.focus {
+ background-color: #7a8288;
+ border-color: #7a8288;
+}
+.btn-primary .badge {
+ color: #7a8288;
+ background-color: #ffffff;
+}
+.btn-success {
+ color: #ffffff;
+ background-color: #62c462;
+ border-color: #62c462;
+}
+.btn-success:focus,
+.btn-success.focus {
+ color: #ffffff;
+ background-color: #42b142;
+ border-color: #2d792d;
+}
+.btn-success:hover {
+ color: #ffffff;
+ background-color: #42b142;
+ border-color: #40a940;
+}
+.btn-success:active,
+.btn-success.active,
+.open > .dropdown-toggle.btn-success {
+ color: #ffffff;
+ background-color: #42b142;
+ border-color: #40a940;
+}
+.btn-success:active:hover,
+.btn-success.active:hover,
+.open > .dropdown-toggle.btn-success:hover,
+.btn-success:active:focus,
+.btn-success.active:focus,
+.open > .dropdown-toggle.btn-success:focus,
+.btn-success:active.focus,
+.btn-success.active.focus,
+.open > .dropdown-toggle.btn-success.focus {
+ color: #ffffff;
+ background-color: #399739;
+ border-color: #2d792d;
+}
+.btn-success:active,
+.btn-success.active,
+.open > .dropdown-toggle.btn-success {
+ background-image: none;
+}
+.btn-success.disabled:hover,
+.btn-success[disabled]:hover,
+fieldset[disabled] .btn-success:hover,
+.btn-success.disabled:focus,
+.btn-success[disabled]:focus,
+fieldset[disabled] .btn-success:focus,
+.btn-success.disabled.focus,
+.btn-success[disabled].focus,
+fieldset[disabled] .btn-success.focus {
+ background-color: #62c462;
+ border-color: #62c462;
+}
+.btn-success .badge {
+ color: #62c462;
+ background-color: #ffffff;
+}
+.btn-info {
+ color: #ffffff;
+ background-color: #5bc0de;
+ border-color: #5bc0de;
+}
+.btn-info:focus,
+.btn-info.focus {
+ color: #ffffff;
+ background-color: #31b0d5;
+ border-color: #1f7e9a;
+}
+.btn-info:hover {
+ color: #ffffff;
+ background-color: #31b0d5;
+ border-color: #2aabd2;
+}
+.btn-info:active,
+.btn-info.active,
+.open > .dropdown-toggle.btn-info {
+ color: #ffffff;
+ background-color: #31b0d5;
+ border-color: #2aabd2;
+}
+.btn-info:active:hover,
+.btn-info.active:hover,
+.open > .dropdown-toggle.btn-info:hover,
+.btn-info:active:focus,
+.btn-info.active:focus,
+.open > .dropdown-toggle.btn-info:focus,
+.btn-info:active.focus,
+.btn-info.active.focus,
+.open > .dropdown-toggle.btn-info.focus {
+ color: #ffffff;
+ background-color: #269abc;
+ border-color: #1f7e9a;
+}
+.btn-info:active,
+.btn-info.active,
+.open > .dropdown-toggle.btn-info {
+ background-image: none;
+}
+.btn-info.disabled:hover,
+.btn-info[disabled]:hover,
+fieldset[disabled] .btn-info:hover,
+.btn-info.disabled:focus,
+.btn-info[disabled]:focus,
+fieldset[disabled] .btn-info:focus,
+.btn-info.disabled.focus,
+.btn-info[disabled].focus,
+fieldset[disabled] .btn-info.focus {
+ background-color: #5bc0de;
+ border-color: #5bc0de;
+}
+.btn-info .badge {
+ color: #5bc0de;
+ background-color: #ffffff;
+}
+.btn-warning {
+ color: #ffffff;
+ background-color: #f89406;
+ border-color: #f89406;
+}
+.btn-warning:focus,
+.btn-warning.focus {
+ color: #ffffff;
+ background-color: #c67605;
+ border-color: #7c4a03;
+}
+.btn-warning:hover {
+ color: #ffffff;
+ background-color: #c67605;
+ border-color: #bc7005;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open > .dropdown-toggle.btn-warning {
+ color: #ffffff;
+ background-color: #c67605;
+ border-color: #bc7005;
+}
+.btn-warning:active:hover,
+.btn-warning.active:hover,
+.open > .dropdown-toggle.btn-warning:hover,
+.btn-warning:active:focus,
+.btn-warning.active:focus,
+.open > .dropdown-toggle.btn-warning:focus,
+.btn-warning:active.focus,
+.btn-warning.active.focus,
+.open > .dropdown-toggle.btn-warning.focus {
+ color: #ffffff;
+ background-color: #a36104;
+ border-color: #7c4a03;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open > .dropdown-toggle.btn-warning {
+ background-image: none;
+}
+.btn-warning.disabled:hover,
+.btn-warning[disabled]:hover,
+fieldset[disabled] .btn-warning:hover,
+.btn-warning.disabled:focus,
+.btn-warning[disabled]:focus,
+fieldset[disabled] .btn-warning:focus,
+.btn-warning.disabled.focus,
+.btn-warning[disabled].focus,
+fieldset[disabled] .btn-warning.focus {
+ background-color: #f89406;
+ border-color: #f89406;
+}
+.btn-warning .badge {
+ color: #f89406;
+ background-color: #ffffff;
+}
+.btn-danger {
+ color: #ffffff;
+ background-color: #ee5f5b;
+ border-color: #ee5f5b;
+}
+.btn-danger:focus,
+.btn-danger.focus {
+ color: #ffffff;
+ background-color: #e9322d;
+ border-color: #b71713;
+}
+.btn-danger:hover {
+ color: #ffffff;
+ background-color: #e9322d;
+ border-color: #e82924;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open > .dropdown-toggle.btn-danger {
+ color: #ffffff;
+ background-color: #e9322d;
+ border-color: #e82924;
+}
+.btn-danger:active:hover,
+.btn-danger.active:hover,
+.open > .dropdown-toggle.btn-danger:hover,
+.btn-danger:active:focus,
+.btn-danger.active:focus,
+.open > .dropdown-toggle.btn-danger:focus,
+.btn-danger:active.focus,
+.btn-danger.active.focus,
+.open > .dropdown-toggle.btn-danger.focus {
+ color: #ffffff;
+ background-color: #dc1c17;
+ border-color: #b71713;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open > .dropdown-toggle.btn-danger {
+ background-image: none;
+}
+.btn-danger.disabled:hover,
+.btn-danger[disabled]:hover,
+fieldset[disabled] .btn-danger:hover,
+.btn-danger.disabled:focus,
+.btn-danger[disabled]:focus,
+fieldset[disabled] .btn-danger:focus,
+.btn-danger.disabled.focus,
+.btn-danger[disabled].focus,
+fieldset[disabled] .btn-danger.focus {
+ background-color: #ee5f5b;
+ border-color: #ee5f5b;
+}
+.btn-danger .badge {
+ color: #ee5f5b;
+ background-color: #ffffff;
+}
+.btn-link {
+ color: #ffffff;
+ font-weight: normal;
+ border-radius: 0;
+}
+.btn-link,
+.btn-link:active,
+.btn-link.active,
+.btn-link[disabled],
+fieldset[disabled] .btn-link {
+ background-color: transparent;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+.btn-link,
+.btn-link:hover,
+.btn-link:focus,
+.btn-link:active {
+ border-color: transparent;
+}
+.btn-link:hover,
+.btn-link:focus {
+ color: #ffffff;
+ text-decoration: underline;
+ background-color: transparent;
+}
+.btn-link[disabled]:hover,
+fieldset[disabled] .btn-link:hover,
+.btn-link[disabled]:focus,
+fieldset[disabled] .btn-link:focus {
+ color: #7a8288;
+ text-decoration: none;
+}
+.btn-lg,
+.btn-group-lg > .btn {
+ padding: 14px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+.btn-sm,
+.btn-group-sm > .btn {
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.btn-xs,
+.btn-group-xs > .btn {
+ padding: 1px 5px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.btn-block {
+ display: block;
+ width: 100%;
+}
+.btn-block + .btn-block {
+ margin-top: 5px;
+}
+input[type="submit"].btn-block,
+input[type="reset"].btn-block,
+input[type="button"].btn-block {
+ width: 100%;
+}
+.fade {
+ opacity: 0;
+ -webkit-transition: opacity 0.15s linear;
+ -o-transition: opacity 0.15s linear;
+ transition: opacity 0.15s linear;
+}
+.fade.in {
+ opacity: 1;
+}
+.collapse {
+ display: none;
+}
+.collapse.in {
+ display: block;
+}
+tr.collapse.in {
+ display: table-row;
+}
+tbody.collapse.in {
+ display: table-row-group;
+}
+.collapsing {
+ position: relative;
+ height: 0;
+ overflow: hidden;
+ -webkit-transition-property: height, visibility;
+ -o-transition-property: height, visibility;
+ transition-property: height, visibility;
+ -webkit-transition-duration: 0.35s;
+ -o-transition-duration: 0.35s;
+ transition-duration: 0.35s;
+ -webkit-transition-timing-function: ease;
+ -o-transition-timing-function: ease;
+ transition-timing-function: ease;
+}
+.caret {
+ display: inline-block;
+ width: 0;
+ height: 0;
+ margin-left: 2px;
+ vertical-align: middle;
+ border-top: 4px dashed;
+ border-top: 4px solid \9;
+ border-right: 4px solid transparent;
+ border-left: 4px solid transparent;
+}
+.dropup,
+.dropdown {
+ position: relative;
+}
+.dropdown-toggle:focus {
+ outline: 0;
+}
+.dropdown-menu {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: 1000;
+ display: none;
+ float: left;
+ min-width: 160px;
+ padding: 5px 0;
+ margin: 2px 0 0;
+ list-style: none;
+ font-size: 14px;
+ text-align: left;
+ background-color: #3a3f44;
+ border: 1px solid #272b30;
+ border: 1px solid rgba(0, 0, 0, 0.15);
+ border-radius: 4px;
+ -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
+ box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+}
+.dropdown-menu.pull-right {
+ right: 0;
+ left: auto;
+}
+.dropdown-menu .divider {
+ height: 1px;
+ margin: 9px 0;
+ overflow: hidden;
+ background-color: #272b30;
+}
+.dropdown-menu > li > a {
+ display: block;
+ padding: 3px 20px;
+ clear: both;
+ font-weight: normal;
+ line-height: 1.42857143;
+ color: #c8c8c8;
+ white-space: nowrap;
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+ text-decoration: none;
+ color: #ffffff;
+ background-color: #272b30;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+ color: #ffffff;
+ text-decoration: none;
+ outline: 0;
+ background-color: #272b30;
+}
+.dropdown-menu > .disabled > a,
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+ color: #7a8288;
+}
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+ text-decoration: none;
+ background-color: transparent;
+ background-image: none;
+ filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+ cursor: not-allowed;
+}
+.open > .dropdown-menu {
+ display: block;
+}
+.open > a {
+ outline: 0;
+}
+.dropdown-menu-right {
+ left: auto;
+ right: 0;
+}
+.dropdown-menu-left {
+ left: 0;
+ right: auto;
+}
+.dropdown-header {
+ display: block;
+ padding: 3px 20px;
+ font-size: 12px;
+ line-height: 1.42857143;
+ color: #7a8288;
+ white-space: nowrap;
+}
+.dropdown-backdrop {
+ position: fixed;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ top: 0;
+ z-index: 990;
+}
+.pull-right > .dropdown-menu {
+ right: 0;
+ left: auto;
+}
+.dropup .caret,
+.navbar-fixed-bottom .dropdown .caret {
+ border-top: 0;
+ border-bottom: 4px dashed;
+ border-bottom: 4px solid \9;
+ content: "";
+}
+.dropup .dropdown-menu,
+.navbar-fixed-bottom .dropdown .dropdown-menu {
+ top: auto;
+ bottom: 100%;
+ margin-bottom: 2px;
+}
+@media (min-width: 768px) {
+ .navbar-right .dropdown-menu {
+ left: auto;
+ right: 0;
+ }
+ .navbar-right .dropdown-menu-left {
+ left: 0;
+ right: auto;
+ }
+}
+.btn-group,
+.btn-group-vertical {
+ position: relative;
+ display: inline-block;
+ vertical-align: middle;
+}
+.btn-group > .btn,
+.btn-group-vertical > .btn {
+ position: relative;
+ float: left;
+}
+.btn-group > .btn:hover,
+.btn-group-vertical > .btn:hover,
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus,
+.btn-group > .btn:active,
+.btn-group-vertical > .btn:active,
+.btn-group > .btn.active,
+.btn-group-vertical > .btn.active {
+ z-index: 2;
+}
+.btn-group .btn + .btn,
+.btn-group .btn + .btn-group,
+.btn-group .btn-group + .btn,
+.btn-group .btn-group + .btn-group {
+ margin-left: -1px;
+}
+.btn-toolbar {
+ margin-left: -5px;
+}
+.btn-toolbar .btn,
+.btn-toolbar .btn-group,
+.btn-toolbar .input-group {
+ float: left;
+}
+.btn-toolbar > .btn,
+.btn-toolbar > .btn-group,
+.btn-toolbar > .input-group {
+ margin-left: 5px;
+}
+.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
+ border-radius: 0;
+}
+.btn-group > .btn:first-child {
+ margin-left: 0;
+}
+.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
+ border-bottom-right-radius: 0;
+ border-top-right-radius: 0;
+}
+.btn-group > .btn:last-child:not(:first-child),
+.btn-group > .dropdown-toggle:not(:first-child) {
+ border-bottom-left-radius: 0;
+ border-top-left-radius: 0;
+}
+.btn-group > .btn-group {
+ float: left;
+}
+.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+ border-bottom-right-radius: 0;
+ border-top-right-radius: 0;
+}
+.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ border-bottom-left-radius: 0;
+ border-top-left-radius: 0;
+}
+.btn-group .dropdown-toggle:active,
+.btn-group.open .dropdown-toggle {
+ outline: 0;
+}
+.btn-group > .btn + .dropdown-toggle {
+ padding-left: 8px;
+ padding-right: 8px;
+}
+.btn-group > .btn-lg + .dropdown-toggle {
+ padding-left: 12px;
+ padding-right: 12px;
+}
+.btn-group.open .dropdown-toggle {
+ -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+}
+.btn-group.open .dropdown-toggle.btn-link {
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+.btn .caret {
+ margin-left: 0;
+}
+.btn-lg .caret {
+ border-width: 5px 5px 0;
+ border-bottom-width: 0;
+}
+.dropup .btn-lg .caret {
+ border-width: 0 5px 5px;
+}
+.btn-group-vertical > .btn,
+.btn-group-vertical > .btn-group,
+.btn-group-vertical > .btn-group > .btn {
+ display: block;
+ float: none;
+ width: 100%;
+ max-width: 100%;
+}
+.btn-group-vertical > .btn-group > .btn {
+ float: none;
+}
+.btn-group-vertical > .btn + .btn,
+.btn-group-vertical > .btn + .btn-group,
+.btn-group-vertical > .btn-group + .btn,
+.btn-group-vertical > .btn-group + .btn-group {
+ margin-top: -1px;
+ margin-left: 0;
+}
+.btn-group-vertical > .btn:not(:first-child):not(:last-child) {
+ border-radius: 0;
+}
+.btn-group-vertical > .btn:first-child:not(:last-child) {
+ border-top-right-radius: 4px;
+ border-top-left-radius: 4px;
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn:last-child:not(:first-child) {
+ border-top-right-radius: 0;
+ border-top-left-radius: 0;
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ border-top-right-radius: 0;
+ border-top-left-radius: 0;
+}
+.btn-group-justified {
+ display: table;
+ width: 100%;
+ table-layout: fixed;
+ border-collapse: separate;
+}
+.btn-group-justified > .btn,
+.btn-group-justified > .btn-group {
+ float: none;
+ display: table-cell;
+ width: 1%;
+}
+.btn-group-justified > .btn-group .btn {
+ width: 100%;
+}
+.btn-group-justified > .btn-group .dropdown-menu {
+ left: auto;
+}
+[data-toggle="buttons"] > .btn input[type="radio"],
+[data-toggle="buttons"] > .btn-group > .btn input[type="radio"],
+[data-toggle="buttons"] > .btn input[type="checkbox"],
+[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] {
+ position: absolute;
+ clip: rect(0, 0, 0, 0);
+ pointer-events: none;
+}
+.input-group {
+ position: relative;
+ display: table;
+ border-collapse: separate;
+}
+.input-group[class*="col-"] {
+ float: none;
+ padding-left: 0;
+ padding-right: 0;
+}
+.input-group .form-control {
+ position: relative;
+ z-index: 2;
+ float: left;
+ width: 100%;
+ margin-bottom: 0;
+}
+.input-group .form-control:focus {
+ z-index: 3;
+}
+.input-group-lg > .form-control,
+.input-group-lg > .input-group-addon,
+.input-group-lg > .input-group-btn > .btn {
+ height: 54px;
+ padding: 14px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+select.input-group-lg > .form-control,
+select.input-group-lg > .input-group-addon,
+select.input-group-lg > .input-group-btn > .btn {
+ height: 54px;
+ line-height: 54px;
+}
+textarea.input-group-lg > .form-control,
+textarea.input-group-lg > .input-group-addon,
+textarea.input-group-lg > .input-group-btn > .btn,
+select[multiple].input-group-lg > .form-control,
+select[multiple].input-group-lg > .input-group-addon,
+select[multiple].input-group-lg > .input-group-btn > .btn {
+ height: auto;
+}
+.input-group-sm > .form-control,
+.input-group-sm > .input-group-addon,
+.input-group-sm > .input-group-btn > .btn {
+ height: 30px;
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+select.input-group-sm > .form-control,
+select.input-group-sm > .input-group-addon,
+select.input-group-sm > .input-group-btn > .btn {
+ height: 30px;
+ line-height: 30px;
+}
+textarea.input-group-sm > .form-control,
+textarea.input-group-sm > .input-group-addon,
+textarea.input-group-sm > .input-group-btn > .btn,
+select[multiple].input-group-sm > .form-control,
+select[multiple].input-group-sm > .input-group-addon,
+select[multiple].input-group-sm > .input-group-btn > .btn {
+ height: auto;
+}
+.input-group-addon,
+.input-group-btn,
+.input-group .form-control {
+ display: table-cell;
+}
+.input-group-addon:not(:first-child):not(:last-child),
+.input-group-btn:not(:first-child):not(:last-child),
+.input-group .form-control:not(:first-child):not(:last-child) {
+ border-radius: 0;
+}
+.input-group-addon,
+.input-group-btn {
+ width: 1%;
+ white-space: nowrap;
+ vertical-align: middle;
+}
+.input-group-addon {
+ padding: 8px 12px;
+ font-size: 14px;
+ font-weight: normal;
+ line-height: 1;
+ color: #272b30;
+ text-align: center;
+ background-color: #3a3f44;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ border-radius: 4px;
+}
+.input-group-addon.input-sm {
+ padding: 5px 10px;
+ font-size: 12px;
+ border-radius: 3px;
+}
+.input-group-addon.input-lg {
+ padding: 14px 16px;
+ font-size: 18px;
+ border-radius: 6px;
+}
+.input-group-addon input[type="radio"],
+.input-group-addon input[type="checkbox"] {
+ margin-top: 0;
+}
+.input-group .form-control:first-child,
+.input-group-addon:first-child,
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group > .btn,
+.input-group-btn:first-child > .dropdown-toggle,
+.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),
+.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {
+ border-bottom-right-radius: 0;
+ border-top-right-radius: 0;
+}
+.input-group-addon:first-child {
+ border-right: 0;
+}
+.input-group .form-control:last-child,
+.input-group-addon:last-child,
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group > .btn,
+.input-group-btn:last-child > .dropdown-toggle,
+.input-group-btn:first-child > .btn:not(:first-child),
+.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {
+ border-bottom-left-radius: 0;
+ border-top-left-radius: 0;
+}
+.input-group-addon:last-child {
+ border-left: 0;
+}
+.input-group-btn {
+ position: relative;
+ font-size: 0;
+ white-space: nowrap;
+}
+.input-group-btn > .btn {
+ position: relative;
+}
+.input-group-btn > .btn + .btn {
+ margin-left: -1px;
+}
+.input-group-btn > .btn:hover,
+.input-group-btn > .btn:focus,
+.input-group-btn > .btn:active {
+ z-index: 2;
+}
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group {
+ margin-right: -1px;
+}
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group {
+ z-index: 2;
+ margin-left: -1px;
+}
+.nav {
+ margin-bottom: 0;
+ padding-left: 0;
+ list-style: none;
+}
+.nav > li {
+ position: relative;
+ display: block;
+}
+.nav > li > a {
+ position: relative;
+ display: block;
+ padding: 10px 15px;
+}
+.nav > li > a:hover,
+.nav > li > a:focus {
+ text-decoration: none;
+ background-color: #3e444c;
+}
+.nav > li.disabled > a {
+ color: #7a8288;
+}
+.nav > li.disabled > a:hover,
+.nav > li.disabled > a:focus {
+ color: #7a8288;
+ text-decoration: none;
+ background-color: transparent;
+ cursor: not-allowed;
+}
+.nav .open > a,
+.nav .open > a:hover,
+.nav .open > a:focus {
+ background-color: #3e444c;
+ border-color: #ffffff;
+}
+.nav .nav-divider {
+ height: 1px;
+ margin: 9px 0;
+ overflow: hidden;
+ background-color: #e5e5e5;
+}
+.nav > li > a > img {
+ max-width: none;
+}
+.nav-tabs {
+ border-bottom: 1px solid #1c1e22;
+}
+.nav-tabs > li {
+ float: left;
+ margin-bottom: -1px;
+}
+.nav-tabs > li > a {
+ margin-right: 2px;
+ line-height: 1.42857143;
+ border: 1px solid transparent;
+ border-radius: 4px 4px 0 0;
+}
+.nav-tabs > li > a:hover {
+ border-color: #1c1e22 #1c1e22 #1c1e22;
+}
+.nav-tabs > li.active > a,
+.nav-tabs > li.active > a:hover,
+.nav-tabs > li.active > a:focus {
+ color: #ffffff;
+ background-color: #3e444c;
+ border: 1px solid #1c1e22;
+ border-bottom-color: transparent;
+ cursor: default;
+}
+.nav-tabs.nav-justified {
+ width: 100%;
+ border-bottom: 0;
+}
+.nav-tabs.nav-justified > li {
+ float: none;
+}
+.nav-tabs.nav-justified > li > a {
+ text-align: center;
+ margin-bottom: 5px;
+}
+.nav-tabs.nav-justified > .dropdown .dropdown-menu {
+ top: auto;
+ left: auto;
+}
+@media (min-width: 768px) {
+ .nav-tabs.nav-justified > li {
+ display: table-cell;
+ width: 1%;
+ }
+ .nav-tabs.nav-justified > li > a {
+ margin-bottom: 0;
+ }
+}
+.nav-tabs.nav-justified > li > a {
+ margin-right: 0;
+ border-radius: 4px;
+}
+.nav-tabs.nav-justified > .active > a,
+.nav-tabs.nav-justified > .active > a:hover,
+.nav-tabs.nav-justified > .active > a:focus {
+ border: 1px solid #1c1e22;
+}
+@media (min-width: 768px) {
+ .nav-tabs.nav-justified > li > a {
+ border-bottom: 1px solid #1c1e22;
+ border-radius: 4px 4px 0 0;
+ }
+ .nav-tabs.nav-justified > .active > a,
+ .nav-tabs.nav-justified > .active > a:hover,
+ .nav-tabs.nav-justified > .active > a:focus {
+ border-bottom-color: #272b30;
+ }
+}
+.nav-pills > li {
+ float: left;
+}
+.nav-pills > li > a {
+ border-radius: 4px;
+}
+.nav-pills > li + li {
+ margin-left: 2px;
+}
+.nav-pills > li.active > a,
+.nav-pills > li.active > a:hover,
+.nav-pills > li.active > a:focus {
+ color: #ffffff;
+ background-color: transparent;
+}
+.nav-stacked > li {
+ float: none;
+}
+.nav-stacked > li + li {
+ margin-top: 2px;
+ margin-left: 0;
+}
+.nav-justified {
+ width: 100%;
+}
+.nav-justified > li {
+ float: none;
+}
+.nav-justified > li > a {
+ text-align: center;
+ margin-bottom: 5px;
+}
+.nav-justified > .dropdown .dropdown-menu {
+ top: auto;
+ left: auto;
+}
+@media (min-width: 768px) {
+ .nav-justified > li {
+ display: table-cell;
+ width: 1%;
+ }
+ .nav-justified > li > a {
+ margin-bottom: 0;
+ }
+}
+.nav-tabs-justified {
+ border-bottom: 0;
+}
+.nav-tabs-justified > li > a {
+ margin-right: 0;
+ border-radius: 4px;
+}
+.nav-tabs-justified > .active > a,
+.nav-tabs-justified > .active > a:hover,
+.nav-tabs-justified > .active > a:focus {
+ border: 1px solid #1c1e22;
+}
+@media (min-width: 768px) {
+ .nav-tabs-justified > li > a {
+ border-bottom: 1px solid #1c1e22;
+ border-radius: 4px 4px 0 0;
+ }
+ .nav-tabs-justified > .active > a,
+ .nav-tabs-justified > .active > a:hover,
+ .nav-tabs-justified > .active > a:focus {
+ border-bottom-color: #272b30;
+ }
+}
+.tab-content > .tab-pane {
+ display: none;
+}
+.tab-content > .active {
+ display: block;
+}
+.nav-tabs .dropdown-menu {
+ margin-top: -1px;
+ border-top-right-radius: 0;
+ border-top-left-radius: 0;
+}
+.navbar {
+ position: relative;
+ min-height: 50px;
+ margin-bottom: 20px;
+ border: 1px solid transparent;
+}
+@media (min-width: 768px) {
+ .navbar {
+ border-radius: 4px;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-header {
+ float: left;
+ }
+}
+.navbar-collapse {
+ overflow-x: visible;
+ padding-right: 15px;
+ padding-left: 15px;
+ border-top: 1px solid transparent;
+ -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);
+ -webkit-overflow-scrolling: touch;
+}
+.navbar-collapse.in {
+ overflow-y: auto;
+}
+@media (min-width: 768px) {
+ .navbar-collapse {
+ width: auto;
+ border-top: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+ .navbar-collapse.collapse {
+ display: block !important;
+ height: auto !important;
+ padding-bottom: 0;
+ overflow: visible !important;
+ }
+ .navbar-collapse.in {
+ overflow-y: visible;
+ }
+ .navbar-fixed-top .navbar-collapse,
+ .navbar-static-top .navbar-collapse,
+ .navbar-fixed-bottom .navbar-collapse {
+ padding-left: 0;
+ padding-right: 0;
+ }
+}
+.navbar-fixed-top .navbar-collapse,
+.navbar-fixed-bottom .navbar-collapse {
+ max-height: 340px;
+}
+@media (max-device-width: 480px) and (orientation: landscape) {
+ .navbar-fixed-top .navbar-collapse,
+ .navbar-fixed-bottom .navbar-collapse {
+ max-height: 200px;
+ }
+}
+.container > .navbar-header,
+.container-fluid > .navbar-header,
+.container > .navbar-collapse,
+.container-fluid > .navbar-collapse {
+ margin-right: -15px;
+ margin-left: -15px;
+}
+@media (min-width: 768px) {
+ .container > .navbar-header,
+ .container-fluid > .navbar-header,
+ .container > .navbar-collapse,
+ .container-fluid > .navbar-collapse {
+ margin-right: 0;
+ margin-left: 0;
+ }
+}
+.navbar-static-top {
+ z-index: 1000;
+ border-width: 0 0 1px;
+}
+@media (min-width: 768px) {
+ .navbar-static-top {
+ border-radius: 0;
+ }
+}
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+ position: fixed;
+ right: 0;
+ left: 0;
+ z-index: 1030;
+}
+@media (min-width: 768px) {
+ .navbar-fixed-top,
+ .navbar-fixed-bottom {
+ border-radius: 0;
+ }
+}
+.navbar-fixed-top {
+ top: 0;
+ border-width: 0 0 1px;
+}
+.navbar-fixed-bottom {
+ bottom: 0;
+ margin-bottom: 0;
+ border-width: 1px 0 0;
+}
+.navbar-brand {
+ float: left;
+ padding: 15px 15px;
+ font-size: 18px;
+ line-height: 20px;
+ height: 50px;
+}
+.navbar-brand:hover,
+.navbar-brand:focus {
+ text-decoration: none;
+}
+.navbar-brand > img {
+ display: block;
+}
+@media (min-width: 768px) {
+ .navbar > .container .navbar-brand,
+ .navbar > .container-fluid .navbar-brand {
+ margin-left: -15px;
+ }
+}
+.navbar-toggle {
+ position: relative;
+ float: right;
+ margin-right: 15px;
+ padding: 9px 10px;
+ margin-top: 8px;
+ margin-bottom: 8px;
+ background-color: transparent;
+ background-image: none;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+.navbar-toggle:focus {
+ outline: 0;
+}
+.navbar-toggle .icon-bar {
+ display: block;
+ width: 22px;
+ height: 2px;
+ border-radius: 1px;
+}
+.navbar-toggle .icon-bar + .icon-bar {
+ margin-top: 4px;
+}
+@media (min-width: 768px) {
+ .navbar-toggle {
+ display: none;
+ }
+}
+.navbar-nav {
+ margin: 7.5px -15px;
+}
+.navbar-nav > li > a {
+ padding-top: 10px;
+ padding-bottom: 10px;
+ line-height: 20px;
+}
+@media (max-width: 767px) {
+ .navbar-nav .open .dropdown-menu {
+ position: static;
+ float: none;
+ width: auto;
+ margin-top: 0;
+ background-color: transparent;
+ border: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+ .navbar-nav .open .dropdown-menu > li > a,
+ .navbar-nav .open .dropdown-menu .dropdown-header {
+ padding: 5px 15px 5px 25px;
+ }
+ .navbar-nav .open .dropdown-menu > li > a {
+ line-height: 20px;
+ }
+ .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-nav .open .dropdown-menu > li > a:focus {
+ background-image: none;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-nav {
+ float: left;
+ margin: 0;
+ }
+ .navbar-nav > li {
+ float: left;
+ }
+ .navbar-nav > li > a {
+ padding-top: 15px;
+ padding-bottom: 15px;
+ }
+}
+.navbar-form {
+ margin-left: -15px;
+ margin-right: -15px;
+ padding: 10px 15px;
+ border-top: 1px solid transparent;
+ border-bottom: 1px solid transparent;
+ -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);
+ margin-top: 6px;
+ margin-bottom: 6px;
+}
+@media (min-width: 768px) {
+ .navbar-form .form-group {
+ display: inline-block;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .form-control {
+ display: inline-block;
+ width: auto;
+ vertical-align: middle;
+ }
+ .navbar-form .form-control-static {
+ display: inline-block;
+ }
+ .navbar-form .input-group {
+ display: inline-table;
+ vertical-align: middle;
+ }
+ .navbar-form .input-group .input-group-addon,
+ .navbar-form .input-group .input-group-btn,
+ .navbar-form .input-group .form-control {
+ width: auto;
+ }
+ .navbar-form .input-group > .form-control {
+ width: 100%;
+ }
+ .navbar-form .control-label {
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .radio,
+ .navbar-form .checkbox {
+ display: inline-block;
+ margin-top: 0;
+ margin-bottom: 0;
+ vertical-align: middle;
+ }
+ .navbar-form .radio label,
+ .navbar-form .checkbox label {
+ padding-left: 0;
+ }
+ .navbar-form .radio input[type="radio"],
+ .navbar-form .checkbox input[type="checkbox"] {
+ position: relative;
+ margin-left: 0;
+ }
+ .navbar-form .has-feedback .form-control-feedback {
+ top: 0;
+ }
+}
+@media (max-width: 767px) {
+ .navbar-form .form-group {
+ margin-bottom: 5px;
+ }
+ .navbar-form .form-group:last-child {
+ margin-bottom: 0;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-form {
+ width: auto;
+ border: 0;
+ margin-left: 0;
+ margin-right: 0;
+ padding-top: 0;
+ padding-bottom: 0;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ }
+}
+.navbar-nav > li > .dropdown-menu {
+ margin-top: 0;
+ border-top-right-radius: 0;
+ border-top-left-radius: 0;
+}
+.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
+ margin-bottom: 0;
+ border-top-right-radius: 4px;
+ border-top-left-radius: 4px;
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+}
+.navbar-btn {
+ margin-top: 6px;
+ margin-bottom: 6px;
+}
+.navbar-btn.btn-sm {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+.navbar-btn.btn-xs {
+ margin-top: 14px;
+ margin-bottom: 14px;
+}
+.navbar-text {
+ margin-top: 15px;
+ margin-bottom: 15px;
+}
+@media (min-width: 768px) {
+ .navbar-text {
+ float: left;
+ margin-left: 15px;
+ margin-right: 15px;
+ }
+}
+@media (min-width: 768px) {
+ .navbar-left {
+ float: left !important;
+ }
+ .navbar-right {
+ float: right !important;
+ margin-right: -15px;
+ }
+ .navbar-right ~ .navbar-right {
+ margin-right: 0;
+ }
+}
+.navbar-default {
+ background-color: #3a3f44;
+ border-color: #2b2e32;
+}
+.navbar-default .navbar-brand {
+ color: #c8c8c8;
+}
+.navbar-default .navbar-brand:hover,
+.navbar-default .navbar-brand:focus {
+ color: #ffffff;
+ background-color: none;
+}
+.navbar-default .navbar-text {
+ color: #c8c8c8;
+}
+.navbar-default .navbar-nav > li > a {
+ color: #c8c8c8;
+}
+.navbar-default .navbar-nav > li > a:hover,
+.navbar-default .navbar-nav > li > a:focus {
+ color: #ffffff;
+ background-color: #272b2e;
+}
+.navbar-default .navbar-nav > .active > a,
+.navbar-default .navbar-nav > .active > a:hover,
+.navbar-default .navbar-nav > .active > a:focus {
+ color: #ffffff;
+ background-color: #272b2e;
+}
+.navbar-default .navbar-nav > .disabled > a,
+.navbar-default .navbar-nav > .disabled > a:hover,
+.navbar-default .navbar-nav > .disabled > a:focus {
+ color: #cccccc;
+ background-color: transparent;
+}
+.navbar-default .navbar-toggle {
+ border-color: #272b2e;
+}
+.navbar-default .navbar-toggle:hover,
+.navbar-default .navbar-toggle:focus {
+ background-color: #272b2e;
+}
+.navbar-default .navbar-toggle .icon-bar {
+ background-color: #c8c8c8;
+}
+.navbar-default .navbar-collapse,
+.navbar-default .navbar-form {
+ border-color: #2b2e32;
+}
+.navbar-default .navbar-nav > .open > a,
+.navbar-default .navbar-nav > .open > a:hover,
+.navbar-default .navbar-nav > .open > a:focus {
+ background-color: #272b2e;
+ color: #ffffff;
+}
+@media (max-width: 767px) {
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a {
+ color: #c8c8c8;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
+ color: #ffffff;
+ background-color: #272b2e;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a,
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
+ color: #ffffff;
+ background-color: #272b2e;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+ .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+ color: #cccccc;
+ background-color: transparent;
+ }
+}
+.navbar-default .navbar-link {
+ color: #c8c8c8;
+}
+.navbar-default .navbar-link:hover {
+ color: #ffffff;
+}
+.navbar-default .btn-link {
+ color: #c8c8c8;
+}
+.navbar-default .btn-link:hover,
+.navbar-default .btn-link:focus {
+ color: #ffffff;
+}
+.navbar-default .btn-link[disabled]:hover,
+fieldset[disabled] .navbar-default .btn-link:hover,
+.navbar-default .btn-link[disabled]:focus,
+fieldset[disabled] .navbar-default .btn-link:focus {
+ color: #cccccc;
+}
+.navbar-inverse {
+ background-color: #7a8288;
+ border-color: #62686d;
+}
+.navbar-inverse .navbar-brand {
+ color: #cccccc;
+}
+.navbar-inverse .navbar-brand:hover,
+.navbar-inverse .navbar-brand:focus {
+ color: #ffffff;
+ background-color: none;
+}
+.navbar-inverse .navbar-text {
+ color: #cccccc;
+}
+.navbar-inverse .navbar-nav > li > a {
+ color: #cccccc;
+}
+.navbar-inverse .navbar-nav > li > a:hover,
+.navbar-inverse .navbar-nav > li > a:focus {
+ color: #ffffff;
+ background-color: #5d6368;
+}
+.navbar-inverse .navbar-nav > .active > a,
+.navbar-inverse .navbar-nav > .active > a:hover,
+.navbar-inverse .navbar-nav > .active > a:focus {
+ color: #ffffff;
+ background-color: #5d6368;
+}
+.navbar-inverse .navbar-nav > .disabled > a,
+.navbar-inverse .navbar-nav > .disabled > a:hover,
+.navbar-inverse .navbar-nav > .disabled > a:focus {
+ color: #cccccc;
+ background-color: transparent;
+}
+.navbar-inverse .navbar-toggle {
+ border-color: #5d6368;
+}
+.navbar-inverse .navbar-toggle:hover,
+.navbar-inverse .navbar-toggle:focus {
+ background-color: #5d6368;
+}
+.navbar-inverse .navbar-toggle .icon-bar {
+ background-color: #ffffff;
+}
+.navbar-inverse .navbar-collapse,
+.navbar-inverse .navbar-form {
+ border-color: #697075;
+}
+.navbar-inverse .navbar-nav > .open > a,
+.navbar-inverse .navbar-nav > .open > a:hover,
+.navbar-inverse .navbar-nav > .open > a:focus {
+ background-color: #5d6368;
+ color: #ffffff;
+}
+@media (max-width: 767px) {
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {
+ border-color: #62686d;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu .divider {
+ background-color: #62686d;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
+ color: #cccccc;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {
+ color: #ffffff;
+ background-color: #5d6368;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {
+ color: #ffffff;
+ background-color: #5d6368;
+ }
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+ .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+ color: #cccccc;
+ background-color: transparent;
+ }
+}
+.navbar-inverse .navbar-link {
+ color: #cccccc;
+}
+.navbar-inverse .navbar-link:hover {
+ color: #ffffff;
+}
+.navbar-inverse .btn-link {
+ color: #cccccc;
+}
+.navbar-inverse .btn-link:hover,
+.navbar-inverse .btn-link:focus {
+ color: #ffffff;
+}
+.navbar-inverse .btn-link[disabled]:hover,
+fieldset[disabled] .navbar-inverse .btn-link:hover,
+.navbar-inverse .btn-link[disabled]:focus,
+fieldset[disabled] .navbar-inverse .btn-link:focus {
+ color: #cccccc;
+}
+.breadcrumb {
+ padding: 8px 15px;
+ margin-bottom: 20px;
+ list-style: none;
+ background-color: transparent;
+ border-radius: 4px;
+}
+.breadcrumb > li {
+ display: inline-block;
+}
+.breadcrumb > li + li:before {
+ content: "/\00a0";
+ padding: 0 5px;
+ color: #cccccc;
+}
+.breadcrumb > .active {
+ color: #7a8288;
+}
+.pagination {
+ display: inline-block;
+ padding-left: 0;
+ margin: 20px 0;
+ border-radius: 4px;
+}
+.pagination > li {
+ display: inline;
+}
+.pagination > li > a,
+.pagination > li > span {
+ position: relative;
+ float: left;
+ padding: 8px 12px;
+ line-height: 1.42857143;
+ text-decoration: none;
+ color: #ffffff;
+ background-color: #3a3f44;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ margin-left: -1px;
+}
+.pagination > li:first-child > a,
+.pagination > li:first-child > span {
+ margin-left: 0;
+ border-bottom-left-radius: 4px;
+ border-top-left-radius: 4px;
+}
+.pagination > li:last-child > a,
+.pagination > li:last-child > span {
+ border-bottom-right-radius: 4px;
+ border-top-right-radius: 4px;
+}
+.pagination > li > a:hover,
+.pagination > li > span:hover,
+.pagination > li > a:focus,
+.pagination > li > span:focus {
+ z-index: 2;
+ color: #ffffff;
+ background-color: transparent;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.pagination > .active > a,
+.pagination > .active > span,
+.pagination > .active > a:hover,
+.pagination > .active > span:hover,
+.pagination > .active > a:focus,
+.pagination > .active > span:focus {
+ z-index: 3;
+ color: #ffffff;
+ background-color: #232628;
+ border-color: rgba(0, 0, 0, 0.6);
+ cursor: default;
+}
+.pagination > .disabled > span,
+.pagination > .disabled > span:hover,
+.pagination > .disabled > span:focus,
+.pagination > .disabled > a,
+.pagination > .disabled > a:hover,
+.pagination > .disabled > a:focus {
+ color: #7a8288;
+ background-color: #ffffff;
+ border-color: rgba(0, 0, 0, 0.6);
+ cursor: not-allowed;
+}
+.pagination-lg > li > a,
+.pagination-lg > li > span {
+ padding: 14px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+}
+.pagination-lg > li:first-child > a,
+.pagination-lg > li:first-child > span {
+ border-bottom-left-radius: 6px;
+ border-top-left-radius: 6px;
+}
+.pagination-lg > li:last-child > a,
+.pagination-lg > li:last-child > span {
+ border-bottom-right-radius: 6px;
+ border-top-right-radius: 6px;
+}
+.pagination-sm > li > a,
+.pagination-sm > li > span {
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+}
+.pagination-sm > li:first-child > a,
+.pagination-sm > li:first-child > span {
+ border-bottom-left-radius: 3px;
+ border-top-left-radius: 3px;
+}
+.pagination-sm > li:last-child > a,
+.pagination-sm > li:last-child > span {
+ border-bottom-right-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.pager {
+ padding-left: 0;
+ margin: 20px 0;
+ list-style: none;
+ text-align: center;
+}
+.pager li {
+ display: inline;
+}
+.pager li > a,
+.pager li > span {
+ display: inline-block;
+ padding: 5px 14px;
+ background-color: #3a3f44;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ border-radius: 15px;
+}
+.pager li > a:hover,
+.pager li > a:focus {
+ text-decoration: none;
+ background-color: transparent;
+}
+.pager .next > a,
+.pager .next > span {
+ float: right;
+}
+.pager .previous > a,
+.pager .previous > span {
+ float: left;
+}
+.pager .disabled > a,
+.pager .disabled > a:hover,
+.pager .disabled > a:focus,
+.pager .disabled > span {
+ color: #7a8288;
+ background-color: #3a3f44;
+ cursor: not-allowed;
+}
+.label {
+ display: inline;
+ padding: .2em .6em .3em;
+ font-size: 75%;
+ font-weight: bold;
+ line-height: 1;
+ color: #ffffff;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: baseline;
+ border-radius: .25em;
+}
+a.label:hover,
+a.label:focus {
+ color: #ffffff;
+ text-decoration: none;
+ cursor: pointer;
+}
+.label:empty {
+ display: none;
+}
+.btn .label {
+ position: relative;
+ top: -1px;
+}
+.label-default {
+ background-color: #3a3f44;
+}
+.label-default[href]:hover,
+.label-default[href]:focus {
+ background-color: #232628;
+}
+.label-primary {
+ background-color: #7a8288;
+}
+.label-primary[href]:hover,
+.label-primary[href]:focus {
+ background-color: #62686d;
+}
+.label-success {
+ background-color: #62c462;
+}
+.label-success[href]:hover,
+.label-success[href]:focus {
+ background-color: #42b142;
+}
+.label-info {
+ background-color: #5bc0de;
+}
+.label-info[href]:hover,
+.label-info[href]:focus {
+ background-color: #31b0d5;
+}
+.label-warning {
+ background-color: #f89406;
+}
+.label-warning[href]:hover,
+.label-warning[href]:focus {
+ background-color: #c67605;
+}
+.label-danger {
+ background-color: #ee5f5b;
+}
+.label-danger[href]:hover,
+.label-danger[href]:focus {
+ background-color: #e9322d;
+}
+.badge {
+ display: inline-block;
+ min-width: 10px;
+ padding: 3px 7px;
+ font-size: 12px;
+ font-weight: bold;
+ color: #ffffff;
+ line-height: 1;
+ vertical-align: middle;
+ white-space: nowrap;
+ text-align: center;
+ background-color: #7a8288;
+ border-radius: 10px;
+}
+.badge:empty {
+ display: none;
+}
+.btn .badge {
+ position: relative;
+ top: -1px;
+}
+.btn-xs .badge,
+.btn-group-xs > .btn .badge {
+ top: 0;
+ padding: 1px 5px;
+}
+a.badge:hover,
+a.badge:focus {
+ color: #ffffff;
+ text-decoration: none;
+ cursor: pointer;
+}
+.list-group-item.active > .badge,
+.nav-pills > .active > a > .badge {
+ color: #ffffff;
+ background-color: #7a8288;
+}
+.list-group-item > .badge {
+ float: right;
+}
+.list-group-item > .badge + .badge {
+ margin-right: 5px;
+}
+.nav-pills > li > a > .badge {
+ margin-left: 3px;
+}
+.jumbotron {
+ padding-top: 30px;
+ padding-bottom: 30px;
+ margin-bottom: 30px;
+ color: inherit;
+ background-color: #1c1e22;
+}
+.jumbotron h1,
+.jumbotron .h1 {
+ color: inherit;
+}
+.jumbotron p {
+ margin-bottom: 15px;
+ font-size: 21px;
+ font-weight: 200;
+}
+.jumbotron > hr {
+ border-top-color: #050506;
+}
+.container .jumbotron,
+.container-fluid .jumbotron {
+ border-radius: 6px;
+ padding-left: 15px;
+ padding-right: 15px;
+}
+.jumbotron .container {
+ max-width: 100%;
+}
+@media screen and (min-width: 768px) {
+ .jumbotron {
+ padding-top: 48px;
+ padding-bottom: 48px;
+ }
+ .container .jumbotron,
+ .container-fluid .jumbotron {
+ padding-left: 60px;
+ padding-right: 60px;
+ }
+ .jumbotron h1,
+ .jumbotron .h1 {
+ font-size: 63px;
+ }
+}
+.thumbnail {
+ display: block;
+ padding: 4px;
+ margin-bottom: 20px;
+ line-height: 1.42857143;
+ background-color: #1c1e22;
+ border: 1px solid #0c0d0e;
+ border-radius: 4px;
+ -webkit-transition: border 0.2s ease-in-out;
+ -o-transition: border 0.2s ease-in-out;
+ transition: border 0.2s ease-in-out;
+}
+.thumbnail > img,
+.thumbnail a > img {
+ margin-left: auto;
+ margin-right: auto;
+}
+a.thumbnail:hover,
+a.thumbnail:focus,
+a.thumbnail.active {
+ border-color: #ffffff;
+}
+.thumbnail .caption {
+ padding: 9px;
+ color: #c8c8c8;
+}
+.alert {
+ padding: 15px;
+ margin-bottom: 20px;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+.alert h4 {
+ margin-top: 0;
+ color: inherit;
+}
+.alert .alert-link {
+ font-weight: bold;
+}
+.alert > p,
+.alert > ul {
+ margin-bottom: 0;
+}
+.alert > p + p {
+ margin-top: 5px;
+}
+.alert-dismissable,
+.alert-dismissible {
+ padding-right: 35px;
+}
+.alert-dismissable .close,
+.alert-dismissible .close {
+ position: relative;
+ top: -2px;
+ right: -21px;
+ color: inherit;
+}
+.alert-success {
+ background-color: #62c462;
+ border-color: #62bd4f;
+ color: #ffffff;
+}
+.alert-success hr {
+ border-top-color: #55b142;
+}
+.alert-success .alert-link {
+ color: #e6e6e6;
+}
+.alert-info {
+ background-color: #5bc0de;
+ border-color: #3dced8;
+ color: #ffffff;
+}
+.alert-info hr {
+ border-top-color: #2ac7d2;
+}
+.alert-info .alert-link {
+ color: #e6e6e6;
+}
+.alert-warning {
+ background-color: #f89406;
+ border-color: #e96506;
+ color: #ffffff;
+}
+.alert-warning hr {
+ border-top-color: #d05a05;
+}
+.alert-warning .alert-link {
+ color: #e6e6e6;
+}
+.alert-danger {
+ background-color: #ee5f5b;
+ border-color: #ed4d63;
+ color: #ffffff;
+}
+.alert-danger hr {
+ border-top-color: #ea364f;
+}
+.alert-danger .alert-link {
+ color: #e6e6e6;
+}
+@-webkit-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+@-o-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+@keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+.progress {
+ overflow: hidden;
+ height: 20px;
+ margin-bottom: 20px;
+ background-color: #1c1e22;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+ box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+}
+.progress-bar {
+ float: left;
+ width: 0%;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ color: #ffffff;
+ text-align: center;
+ background-color: #7a8288;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ -webkit-transition: width 0.6s ease;
+ -o-transition: width 0.6s ease;
+ transition: width 0.6s ease;
+}
+.progress-striped .progress-bar,
+.progress-bar-striped {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ -webkit-background-size: 40px 40px;
+ background-size: 40px 40px;
+}
+.progress.active .progress-bar,
+.progress-bar.active {
+ -webkit-animation: progress-bar-stripes 2s linear infinite;
+ -o-animation: progress-bar-stripes 2s linear infinite;
+ animation: progress-bar-stripes 2s linear infinite;
+}
+.progress-bar-success {
+ background-color: #62c462;
+}
+.progress-striped .progress-bar-success {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-info {
+ background-color: #5bc0de;
+}
+.progress-striped .progress-bar-info {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-warning {
+ background-color: #f89406;
+}
+.progress-striped .progress-bar-warning {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-danger {
+ background-color: #ee5f5b;
+}
+.progress-striped .progress-bar-danger {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.media {
+ margin-top: 15px;
+}
+.media:first-child {
+ margin-top: 0;
+}
+.media,
+.media-body {
+ zoom: 1;
+ overflow: hidden;
+}
+.media-body {
+ width: 10000px;
+}
+.media-object {
+ display: block;
+}
+.media-object.img-thumbnail {
+ max-width: none;
+}
+.media-right,
+.media > .pull-right {
+ padding-left: 10px;
+}
+.media-left,
+.media > .pull-left {
+ padding-right: 10px;
+}
+.media-left,
+.media-right,
+.media-body {
+ display: table-cell;
+ vertical-align: top;
+}
+.media-middle {
+ vertical-align: middle;
+}
+.media-bottom {
+ vertical-align: bottom;
+}
+.media-heading {
+ margin-top: 0;
+ margin-bottom: 5px;
+}
+.media-list {
+ padding-left: 0;
+ list-style: none;
+}
+.list-group {
+ margin-bottom: 20px;
+ padding-left: 0;
+}
+.list-group-item {
+ position: relative;
+ display: block;
+ padding: 10px 15px;
+ margin-bottom: -1px;
+ background-color: #32383e;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+}
+.list-group-item:first-child {
+ border-top-right-radius: 4px;
+ border-top-left-radius: 4px;
+}
+.list-group-item:last-child {
+ margin-bottom: 0;
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+a.list-group-item,
+button.list-group-item {
+ color: #c8c8c8;
+}
+a.list-group-item .list-group-item-heading,
+button.list-group-item .list-group-item-heading {
+ color: #ffffff;
+}
+a.list-group-item:hover,
+button.list-group-item:hover,
+a.list-group-item:focus,
+button.list-group-item:focus {
+ text-decoration: none;
+ color: #c8c8c8;
+ background-color: #3e444c;
+}
+button.list-group-item {
+ width: 100%;
+ text-align: left;
+}
+.list-group-item.disabled,
+.list-group-item.disabled:hover,
+.list-group-item.disabled:focus {
+ background-color: #999999;
+ color: #7a8288;
+ cursor: not-allowed;
+}
+.list-group-item.disabled .list-group-item-heading,
+.list-group-item.disabled:hover .list-group-item-heading,
+.list-group-item.disabled:focus .list-group-item-heading {
+ color: inherit;
+}
+.list-group-item.disabled .list-group-item-text,
+.list-group-item.disabled:hover .list-group-item-text,
+.list-group-item.disabled:focus .list-group-item-text {
+ color: #7a8288;
+}
+.list-group-item.active,
+.list-group-item.active:hover,
+.list-group-item.active:focus {
+ z-index: 2;
+ color: #ffffff;
+ background-color: #3e444c;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.list-group-item.active .list-group-item-heading,
+.list-group-item.active:hover .list-group-item-heading,
+.list-group-item.active:focus .list-group-item-heading,
+.list-group-item.active .list-group-item-heading > small,
+.list-group-item.active:hover .list-group-item-heading > small,
+.list-group-item.active:focus .list-group-item-heading > small,
+.list-group-item.active .list-group-item-heading > .small,
+.list-group-item.active:hover .list-group-item-heading > .small,
+.list-group-item.active:focus .list-group-item-heading > .small {
+ color: inherit;
+}
+.list-group-item.active .list-group-item-text,
+.list-group-item.active:hover .list-group-item-text,
+.list-group-item.active:focus .list-group-item-text {
+ color: #a2aab4;
+}
+.list-group-item-success {
+ color: #ffffff;
+ background-color: #62c462;
+}
+a.list-group-item-success,
+button.list-group-item-success {
+ color: #ffffff;
+}
+a.list-group-item-success .list-group-item-heading,
+button.list-group-item-success .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-success:hover,
+button.list-group-item-success:hover,
+a.list-group-item-success:focus,
+button.list-group-item-success:focus {
+ color: #ffffff;
+ background-color: #4fbd4f;
+}
+a.list-group-item-success.active,
+button.list-group-item-success.active,
+a.list-group-item-success.active:hover,
+button.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus,
+button.list-group-item-success.active:focus {
+ color: #fff;
+ background-color: #ffffff;
+ border-color: #ffffff;
+}
+.list-group-item-info {
+ color: #ffffff;
+ background-color: #5bc0de;
+}
+a.list-group-item-info,
+button.list-group-item-info {
+ color: #ffffff;
+}
+a.list-group-item-info .list-group-item-heading,
+button.list-group-item-info .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-info:hover,
+button.list-group-item-info:hover,
+a.list-group-item-info:focus,
+button.list-group-item-info:focus {
+ color: #ffffff;
+ background-color: #46b8da;
+}
+a.list-group-item-info.active,
+button.list-group-item-info.active,
+a.list-group-item-info.active:hover,
+button.list-group-item-info.active:hover,
+a.list-group-item-info.active:focus,
+button.list-group-item-info.active:focus {
+ color: #fff;
+ background-color: #ffffff;
+ border-color: #ffffff;
+}
+.list-group-item-warning {
+ color: #ffffff;
+ background-color: #f89406;
+}
+a.list-group-item-warning,
+button.list-group-item-warning {
+ color: #ffffff;
+}
+a.list-group-item-warning .list-group-item-heading,
+button.list-group-item-warning .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-warning:hover,
+button.list-group-item-warning:hover,
+a.list-group-item-warning:focus,
+button.list-group-item-warning:focus {
+ color: #ffffff;
+ background-color: #df8505;
+}
+a.list-group-item-warning.active,
+button.list-group-item-warning.active,
+a.list-group-item-warning.active:hover,
+button.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus,
+button.list-group-item-warning.active:focus {
+ color: #fff;
+ background-color: #ffffff;
+ border-color: #ffffff;
+}
+.list-group-item-danger {
+ color: #ffffff;
+ background-color: #ee5f5b;
+}
+a.list-group-item-danger,
+button.list-group-item-danger {
+ color: #ffffff;
+}
+a.list-group-item-danger .list-group-item-heading,
+button.list-group-item-danger .list-group-item-heading {
+ color: inherit;
+}
+a.list-group-item-danger:hover,
+button.list-group-item-danger:hover,
+a.list-group-item-danger:focus,
+button.list-group-item-danger:focus {
+ color: #ffffff;
+ background-color: #ec4844;
+}
+a.list-group-item-danger.active,
+button.list-group-item-danger.active,
+a.list-group-item-danger.active:hover,
+button.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus,
+button.list-group-item-danger.active:focus {
+ color: #fff;
+ background-color: #ffffff;
+ border-color: #ffffff;
+}
+.list-group-item-heading {
+ margin-top: 0;
+ margin-bottom: 5px;
+}
+.list-group-item-text {
+ margin-bottom: 0;
+ line-height: 1.3;
+}
+.panel {
+ margin-bottom: 20px;
+ background-color: #2e3338;
+ border: 1px solid transparent;
+ border-radius: 4px;
+ -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);
+ box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);
+}
+.panel-body {
+ padding: 15px;
+}
+.panel-heading {
+ padding: 10px 15px;
+ border-bottom: 1px solid transparent;
+ border-top-right-radius: 3px;
+ border-top-left-radius: 3px;
+}
+.panel-heading > .dropdown .dropdown-toggle {
+ color: inherit;
+}
+.panel-title {
+ margin-top: 0;
+ margin-bottom: 0;
+ font-size: 16px;
+ color: inherit;
+}
+.panel-title > a,
+.panel-title > small,
+.panel-title > .small,
+.panel-title > small > a,
+.panel-title > .small > a {
+ color: inherit;
+}
+.panel-footer {
+ padding: 10px 15px;
+ background-color: #3e444c;
+ border-top: 1px solid rgba(0, 0, 0, 0.6);
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .list-group,
+.panel > .panel-collapse > .list-group {
+ margin-bottom: 0;
+}
+.panel > .list-group .list-group-item,
+.panel > .panel-collapse > .list-group .list-group-item {
+ border-width: 1px 0;
+ border-radius: 0;
+}
+.panel > .list-group:first-child .list-group-item:first-child,
+.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {
+ border-top: 0;
+ border-top-right-radius: 3px;
+ border-top-left-radius: 3px;
+}
+.panel > .list-group:last-child .list-group-item:last-child,
+.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {
+ border-bottom: 0;
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {
+ border-top-right-radius: 0;
+ border-top-left-radius: 0;
+}
+.panel-heading + .list-group .list-group-item:first-child {
+ border-top-width: 0;
+}
+.list-group + .panel-footer {
+ border-top-width: 0;
+}
+.panel > .table,
+.panel > .table-responsive > .table,
+.panel > .panel-collapse > .table {
+ margin-bottom: 0;
+}
+.panel > .table caption,
+.panel > .table-responsive > .table caption,
+.panel > .panel-collapse > .table caption {
+ padding-left: 15px;
+ padding-right: 15px;
+}
+.panel > .table:first-child,
+.panel > .table-responsive:first-child > .table:first-child {
+ border-top-right-radius: 3px;
+ border-top-left-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {
+ border-top-left-radius: 3px;
+ border-top-right-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {
+ border-top-left-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {
+ border-top-right-radius: 3px;
+}
+.panel > .table:last-child,
+.panel > .table-responsive:last-child > .table:last-child {
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {
+ border-bottom-left-radius: 3px;
+ border-bottom-right-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {
+ border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {
+ border-bottom-right-radius: 3px;
+}
+.panel > .panel-body + .table,
+.panel > .panel-body + .table-responsive,
+.panel > .table + .panel-body,
+.panel > .table-responsive + .panel-body {
+ border-top: 1px solid #1c1e22;
+}
+.panel > .table > tbody:first-child > tr:first-child th,
+.panel > .table > tbody:first-child > tr:first-child td {
+ border-top: 0;
+}
+.panel > .table-bordered,
+.panel > .table-responsive > .table-bordered {
+ border: 0;
+}
+.panel > .table-bordered > thead > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,
+.panel > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-bordered > thead > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,
+.panel > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-bordered > tfoot > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+ border-left: 0;
+}
+.panel > .table-bordered > thead > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,
+.panel > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-bordered > thead > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,
+.panel > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-bordered > tfoot > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+ border-right: 0;
+}
+.panel > .table-bordered > thead > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,
+.panel > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-bordered > thead > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,
+.panel > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {
+ border-bottom: 0;
+}
+.panel > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {
+ border-bottom: 0;
+}
+.panel > .table-responsive {
+ border: 0;
+ margin-bottom: 0;
+}
+.panel-group {
+ margin-bottom: 20px;
+}
+.panel-group .panel {
+ margin-bottom: 0;
+ border-radius: 4px;
+}
+.panel-group .panel + .panel {
+ margin-top: 5px;
+}
+.panel-group .panel-heading {
+ border-bottom: 0;
+}
+.panel-group .panel-heading + .panel-collapse > .panel-body,
+.panel-group .panel-heading + .panel-collapse > .list-group {
+ border-top: 1px solid rgba(0, 0, 0, 0.6);
+}
+.panel-group .panel-footer {
+ border-top: 0;
+}
+.panel-group .panel-footer + .panel-collapse .panel-body {
+ border-bottom: 1px solid rgba(0, 0, 0, 0.6);
+}
+.panel-default {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-default > .panel-heading {
+ color: #c8c8c8;
+ background-color: #3e444c;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-default > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-default > .panel-heading .badge {
+ color: #3e444c;
+ background-color: #c8c8c8;
+}
+.panel-default > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.panel-primary {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-primary > .panel-heading {
+ color: #ffffff;
+ background-color: #7a8288;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-primary > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-primary > .panel-heading .badge {
+ color: #7a8288;
+ background-color: #ffffff;
+}
+.panel-primary > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.panel-success {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-success > .panel-heading {
+ color: #ffffff;
+ background-color: #62c462;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-success > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-success > .panel-heading .badge {
+ color: #62c462;
+ background-color: #ffffff;
+}
+.panel-success > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.panel-info {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-info > .panel-heading {
+ color: #ffffff;
+ background-color: #5bc0de;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-info > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-info > .panel-heading .badge {
+ color: #5bc0de;
+ background-color: #ffffff;
+}
+.panel-info > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.panel-warning {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-warning > .panel-heading {
+ color: #ffffff;
+ background-color: #f89406;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-warning > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-warning > .panel-heading .badge {
+ color: #f89406;
+ background-color: #ffffff;
+}
+.panel-warning > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.panel-danger {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-danger > .panel-heading {
+ color: #ffffff;
+ background-color: #ee5f5b;
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.panel-danger > .panel-heading + .panel-collapse > .panel-body {
+ border-top-color: rgba(0, 0, 0, 0.6);
+}
+.panel-danger > .panel-heading .badge {
+ color: #ee5f5b;
+ background-color: #ffffff;
+}
+.panel-danger > .panel-footer + .panel-collapse > .panel-body {
+ border-bottom-color: rgba(0, 0, 0, 0.6);
+}
+.embed-responsive {
+ position: relative;
+ display: block;
+ height: 0;
+ padding: 0;
+ overflow: hidden;
+}
+.embed-responsive .embed-responsive-item,
+.embed-responsive iframe,
+.embed-responsive embed,
+.embed-responsive object,
+.embed-responsive video {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ height: 100%;
+ width: 100%;
+ border: 0;
+}
+.embed-responsive-16by9 {
+ padding-bottom: 56.25%;
+}
+.embed-responsive-4by3 {
+ padding-bottom: 75%;
+}
+.well {
+ min-height: 20px;
+ padding: 19px;
+ margin-bottom: 20px;
+ background-color: #1c1e22;
+ border: 1px solid #0c0d0e;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
+}
+.well blockquote {
+ border-color: #ddd;
+ border-color: rgba(0, 0, 0, 0.15);
+}
+.well-lg {
+ padding: 24px;
+ border-radius: 6px;
+}
+.well-sm {
+ padding: 9px;
+ border-radius: 3px;
+}
+.close {
+ float: right;
+ font-size: 21px;
+ font-weight: bold;
+ line-height: 1;
+ color: #000000;
+ text-shadow: 0 1px 0 #ffffff;
+ opacity: 0.2;
+ filter: alpha(opacity=20);
+}
+.close:hover,
+.close:focus {
+ color: #000000;
+ text-decoration: none;
+ cursor: pointer;
+ opacity: 0.5;
+ filter: alpha(opacity=50);
+}
+button.close {
+ padding: 0;
+ cursor: pointer;
+ background: transparent;
+ border: 0;
+ -webkit-appearance: none;
+}
+.modal-open {
+ overflow: hidden;
+}
+.modal {
+ display: none;
+ overflow: hidden;
+ position: fixed;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 1050;
+ -webkit-overflow-scrolling: touch;
+ outline: 0;
+}
+.modal.fade .modal-dialog {
+ -webkit-transform: translate(0, -25%);
+ -ms-transform: translate(0, -25%);
+ -o-transform: translate(0, -25%);
+ transform: translate(0, -25%);
+ -webkit-transition: -webkit-transform 0.3s ease-out;
+ -o-transition: -o-transform 0.3s ease-out;
+ transition: transform 0.3s ease-out;
+}
+.modal.in .modal-dialog {
+ -webkit-transform: translate(0, 0);
+ -ms-transform: translate(0, 0);
+ -o-transform: translate(0, 0);
+ transform: translate(0, 0);
+}
+.modal-open .modal {
+ overflow-x: hidden;
+ overflow-y: auto;
+}
+.modal-dialog {
+ position: relative;
+ width: auto;
+ margin: 10px;
+}
+.modal-content {
+ position: relative;
+ background-color: #2e3338;
+ border: 1px solid #999999;
+ border: 1px solid rgba(0, 0, 0, 0.2);
+ border-radius: 6px;
+ -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
+ box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ outline: 0;
+}
+.modal-backdrop {
+ position: fixed;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ z-index: 1040;
+ background-color: #000000;
+}
+.modal-backdrop.fade {
+ opacity: 0;
+ filter: alpha(opacity=0);
+}
+.modal-backdrop.in {
+ opacity: 0.5;
+ filter: alpha(opacity=50);
+}
+.modal-header {
+ padding: 15px;
+ border-bottom: 1px solid #1c1e22;
+}
+.modal-header .close {
+ margin-top: -2px;
+}
+.modal-title {
+ margin: 0;
+ line-height: 1.42857143;
+}
+.modal-body {
+ position: relative;
+ padding: 20px;
+}
+.modal-footer {
+ padding: 20px;
+ text-align: right;
+ border-top: 1px solid #1c1e22;
+}
+.modal-footer .btn + .btn {
+ margin-left: 5px;
+ margin-bottom: 0;
+}
+.modal-footer .btn-group .btn + .btn {
+ margin-left: -1px;
+}
+.modal-footer .btn-block + .btn-block {
+ margin-left: 0;
+}
+.modal-scrollbar-measure {
+ position: absolute;
+ top: -9999px;
+ width: 50px;
+ height: 50px;
+ overflow: scroll;
+}
+@media (min-width: 768px) {
+ .modal-dialog {
+ width: 600px;
+ margin: 30px auto;
+ }
+ .modal-content {
+ -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
+ box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
+ }
+ .modal-sm {
+ width: 300px;
+ }
+}
+@media (min-width: 992px) {
+ .modal-lg {
+ width: 900px;
+ }
+}
+.tooltip {
+ position: absolute;
+ z-index: 1070;
+ display: block;
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-style: normal;
+ font-weight: normal;
+ letter-spacing: normal;
+ line-break: auto;
+ line-height: 1.42857143;
+ text-align: left;
+ text-align: start;
+ text-decoration: none;
+ text-shadow: none;
+ text-transform: none;
+ white-space: normal;
+ word-break: normal;
+ word-spacing: normal;
+ word-wrap: normal;
+ font-size: 12px;
+ opacity: 0;
+ filter: alpha(opacity=0);
+}
+.tooltip.in {
+ opacity: 0.9;
+ filter: alpha(opacity=90);
+}
+.tooltip.top {
+ margin-top: -3px;
+ padding: 5px 0;
+}
+.tooltip.right {
+ margin-left: 3px;
+ padding: 0 5px;
+}
+.tooltip.bottom {
+ margin-top: 3px;
+ padding: 5px 0;
+}
+.tooltip.left {
+ margin-left: -3px;
+ padding: 0 5px;
+}
+.tooltip-inner {
+ max-width: 200px;
+ padding: 3px 8px;
+ color: #ffffff;
+ text-align: center;
+ background-color: #000000;
+ border-radius: 4px;
+}
+.tooltip-arrow {
+ position: absolute;
+ width: 0;
+ height: 0;
+ border-color: transparent;
+ border-style: solid;
+}
+.tooltip.top .tooltip-arrow {
+ bottom: 0;
+ left: 50%;
+ margin-left: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000000;
+}
+.tooltip.top-left .tooltip-arrow {
+ bottom: 0;
+ right: 5px;
+ margin-bottom: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000000;
+}
+.tooltip.top-right .tooltip-arrow {
+ bottom: 0;
+ left: 5px;
+ margin-bottom: -5px;
+ border-width: 5px 5px 0;
+ border-top-color: #000000;
+}
+.tooltip.right .tooltip-arrow {
+ top: 50%;
+ left: 0;
+ margin-top: -5px;
+ border-width: 5px 5px 5px 0;
+ border-right-color: #000000;
+}
+.tooltip.left .tooltip-arrow {
+ top: 50%;
+ right: 0;
+ margin-top: -5px;
+ border-width: 5px 0 5px 5px;
+ border-left-color: #000000;
+}
+.tooltip.bottom .tooltip-arrow {
+ top: 0;
+ left: 50%;
+ margin-left: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000000;
+}
+.tooltip.bottom-left .tooltip-arrow {
+ top: 0;
+ right: 5px;
+ margin-top: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000000;
+}
+.tooltip.bottom-right .tooltip-arrow {
+ top: 0;
+ left: 5px;
+ margin-top: -5px;
+ border-width: 0 5px 5px;
+ border-bottom-color: #000000;
+}
+.popover {
+ position: absolute;
+ top: 0;
+ left: 0;
+ z-index: 1060;
+ display: none;
+ max-width: 276px;
+ padding: 1px;
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-style: normal;
+ font-weight: normal;
+ letter-spacing: normal;
+ line-break: auto;
+ line-height: 1.42857143;
+ text-align: left;
+ text-align: start;
+ text-decoration: none;
+ text-shadow: none;
+ text-transform: none;
+ white-space: normal;
+ word-break: normal;
+ word-spacing: normal;
+ word-wrap: normal;
+ font-size: 14px;
+ background-color: #2e3338;
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ border: 1px solid #999999;
+ border: 1px solid rgba(0, 0, 0, 0.2);
+ border-radius: 6px;
+ -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
+ box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
+}
+.popover.top {
+ margin-top: -10px;
+}
+.popover.right {
+ margin-left: 10px;
+}
+.popover.bottom {
+ margin-top: 10px;
+}
+.popover.left {
+ margin-left: -10px;
+}
+.popover-title {
+ margin: 0;
+ padding: 8px 14px;
+ font-size: 14px;
+ background-color: #2e3338;
+ border-bottom: 1px solid #22262a;
+ border-radius: 5px 5px 0 0;
+}
+.popover-content {
+ padding: 9px 14px;
+}
+.popover > .arrow,
+.popover > .arrow:after {
+ position: absolute;
+ display: block;
+ width: 0;
+ height: 0;
+ border-color: transparent;
+ border-style: solid;
+}
+.popover > .arrow {
+ border-width: 11px;
+}
+.popover > .arrow:after {
+ border-width: 10px;
+ content: "";
+}
+.popover.top > .arrow {
+ left: 50%;
+ margin-left: -11px;
+ border-bottom-width: 0;
+ border-top-color: #666666;
+ border-top-color: rgba(0, 0, 0, 0.25);
+ bottom: -11px;
+}
+.popover.top > .arrow:after {
+ content: " ";
+ bottom: 1px;
+ margin-left: -10px;
+ border-bottom-width: 0;
+ border-top-color: #2e3338;
+}
+.popover.right > .arrow {
+ top: 50%;
+ left: -11px;
+ margin-top: -11px;
+ border-left-width: 0;
+ border-right-color: #666666;
+ border-right-color: rgba(0, 0, 0, 0.25);
+}
+.popover.right > .arrow:after {
+ content: " ";
+ left: 1px;
+ bottom: -10px;
+ border-left-width: 0;
+ border-right-color: #2e3338;
+}
+.popover.bottom > .arrow {
+ left: 50%;
+ margin-left: -11px;
+ border-top-width: 0;
+ border-bottom-color: #666666;
+ border-bottom-color: rgba(0, 0, 0, 0.25);
+ top: -11px;
+}
+.popover.bottom > .arrow:after {
+ content: " ";
+ top: 1px;
+ margin-left: -10px;
+ border-top-width: 0;
+ border-bottom-color: #2e3338;
+}
+.popover.left > .arrow {
+ top: 50%;
+ right: -11px;
+ margin-top: -11px;
+ border-right-width: 0;
+ border-left-color: #666666;
+ border-left-color: rgba(0, 0, 0, 0.25);
+}
+.popover.left > .arrow:after {
+ content: " ";
+ right: 1px;
+ border-right-width: 0;
+ border-left-color: #2e3338;
+ bottom: -10px;
+}
+.carousel {
+ position: relative;
+}
+.carousel-inner {
+ position: relative;
+ overflow: hidden;
+ width: 100%;
+}
+.carousel-inner > .item {
+ display: none;
+ position: relative;
+ -webkit-transition: 0.6s ease-in-out left;
+ -o-transition: 0.6s ease-in-out left;
+ transition: 0.6s ease-in-out left;
+}
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+ line-height: 1;
+}
+@media all and (transform-3d), (-webkit-transform-3d) {
+ .carousel-inner > .item {
+ -webkit-transition: -webkit-transform 0.6s ease-in-out;
+ -o-transition: -o-transform 0.6s ease-in-out;
+ transition: transform 0.6s ease-in-out;
+ -webkit-backface-visibility: hidden;
+ backface-visibility: hidden;
+ -webkit-perspective: 1000px;
+ perspective: 1000px;
+ }
+ .carousel-inner > .item.next,
+ .carousel-inner > .item.active.right {
+ -webkit-transform: translate3d(100%, 0, 0);
+ transform: translate3d(100%, 0, 0);
+ left: 0;
+ }
+ .carousel-inner > .item.prev,
+ .carousel-inner > .item.active.left {
+ -webkit-transform: translate3d(-100%, 0, 0);
+ transform: translate3d(-100%, 0, 0);
+ left: 0;
+ }
+ .carousel-inner > .item.next.left,
+ .carousel-inner > .item.prev.right,
+ .carousel-inner > .item.active {
+ -webkit-transform: translate3d(0, 0, 0);
+ transform: translate3d(0, 0, 0);
+ left: 0;
+ }
+}
+.carousel-inner > .active,
+.carousel-inner > .next,
+.carousel-inner > .prev {
+ display: block;
+}
+.carousel-inner > .active {
+ left: 0;
+}
+.carousel-inner > .next,
+.carousel-inner > .prev {
+ position: absolute;
+ top: 0;
+ width: 100%;
+}
+.carousel-inner > .next {
+ left: 100%;
+}
+.carousel-inner > .prev {
+ left: -100%;
+}
+.carousel-inner > .next.left,
+.carousel-inner > .prev.right {
+ left: 0;
+}
+.carousel-inner > .active.left {
+ left: -100%;
+}
+.carousel-inner > .active.right {
+ left: 100%;
+}
+.carousel-control {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ width: 15%;
+ opacity: 0.5;
+ filter: alpha(opacity=50);
+ font-size: 20px;
+ color: #ffffff;
+ text-align: center;
+ text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);
+ background-color: rgba(0, 0, 0, 0);
+}
+.carousel-control.left {
+ background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
+ background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
+ background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001)));
+ background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);
+}
+.carousel-control.right {
+ left: auto;
+ right: 0;
+ background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
+ background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
+ background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5)));
+ background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);
+}
+.carousel-control:hover,
+.carousel-control:focus {
+ outline: 0;
+ color: #ffffff;
+ text-decoration: none;
+ opacity: 0.9;
+ filter: alpha(opacity=90);
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-left,
+.carousel-control .glyphicon-chevron-right {
+ position: absolute;
+ top: 50%;
+ margin-top: -10px;
+ z-index: 5;
+ display: inline-block;
+}
+.carousel-control .icon-prev,
+.carousel-control .glyphicon-chevron-left {
+ left: 50%;
+ margin-left: -10px;
+}
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-right {
+ right: 50%;
+ margin-right: -10px;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next {
+ width: 20px;
+ height: 20px;
+ line-height: 1;
+ font-family: serif;
+}
+.carousel-control .icon-prev:before {
+ content: '\2039';
+}
+.carousel-control .icon-next:before {
+ content: '\203a';
+}
+.carousel-indicators {
+ position: absolute;
+ bottom: 10px;
+ left: 50%;
+ z-index: 15;
+ width: 60%;
+ margin-left: -30%;
+ padding-left: 0;
+ list-style: none;
+ text-align: center;
+}
+.carousel-indicators li {
+ display: inline-block;
+ width: 10px;
+ height: 10px;
+ margin: 1px;
+ text-indent: -999px;
+ border: 1px solid #ffffff;
+ border-radius: 10px;
+ cursor: pointer;
+ background-color: #000 \9;
+ background-color: rgba(0, 0, 0, 0);
+}
+.carousel-indicators .active {
+ margin: 0;
+ width: 12px;
+ height: 12px;
+ background-color: #ffffff;
+}
+.carousel-caption {
+ position: absolute;
+ left: 15%;
+ right: 15%;
+ bottom: 20px;
+ z-index: 10;
+ padding-top: 20px;
+ padding-bottom: 20px;
+ color: #ffffff;
+ text-align: center;
+ text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);
+}
+.carousel-caption .btn {
+ text-shadow: none;
+}
+@media screen and (min-width: 768px) {
+ .carousel-control .glyphicon-chevron-left,
+ .carousel-control .glyphicon-chevron-right,
+ .carousel-control .icon-prev,
+ .carousel-control .icon-next {
+ width: 30px;
+ height: 30px;
+ margin-top: -10px;
+ font-size: 30px;
+ }
+ .carousel-control .glyphicon-chevron-left,
+ .carousel-control .icon-prev {
+ margin-left: -10px;
+ }
+ .carousel-control .glyphicon-chevron-right,
+ .carousel-control .icon-next {
+ margin-right: -10px;
+ }
+ .carousel-caption {
+ left: 20%;
+ right: 20%;
+ padding-bottom: 30px;
+ }
+ .carousel-indicators {
+ bottom: 20px;
+ }
+}
+.clearfix:before,
+.clearfix:after,
+.dl-horizontal dd:before,
+.dl-horizontal dd:after,
+.container:before,
+.container:after,
+.container-fluid:before,
+.container-fluid:after,
+.row:before,
+.row:after,
+.form-horizontal .form-group:before,
+.form-horizontal .form-group:after,
+.btn-toolbar:before,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:before,
+.btn-group-vertical > .btn-group:after,
+.nav:before,
+.nav:after,
+.navbar:before,
+.navbar:after,
+.navbar-header:before,
+.navbar-header:after,
+.navbar-collapse:before,
+.navbar-collapse:after,
+.pager:before,
+.pager:after,
+.panel-body:before,
+.panel-body:after,
+.modal-header:before,
+.modal-header:after,
+.modal-footer:before,
+.modal-footer:after {
+ content: " ";
+ display: table;
+}
+.clearfix:after,
+.dl-horizontal dd:after,
+.container:after,
+.container-fluid:after,
+.row:after,
+.form-horizontal .form-group:after,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:after,
+.nav:after,
+.navbar:after,
+.navbar-header:after,
+.navbar-collapse:after,
+.pager:after,
+.panel-body:after,
+.modal-header:after,
+.modal-footer:after {
+ clear: both;
+}
+.center-block {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+}
+.pull-right {
+ float: right !important;
+}
+.pull-left {
+ float: left !important;
+}
+.hide {
+ display: none !important;
+}
+.show {
+ display: block !important;
+}
+.invisible {
+ visibility: hidden;
+}
+.text-hide {
+ font: 0/0 a;
+ color: transparent;
+ text-shadow: none;
+ background-color: transparent;
+ border: 0;
+}
+.hidden {
+ display: none !important;
+}
+.affix {
+ position: fixed;
+}
+@-ms-viewport {
+ width: device-width;
+}
+.visible-xs,
+.visible-sm,
+.visible-md,
+.visible-lg {
+ display: none !important;
+}
+.visible-xs-block,
+.visible-xs-inline,
+.visible-xs-inline-block,
+.visible-sm-block,
+.visible-sm-inline,
+.visible-sm-inline-block,
+.visible-md-block,
+.visible-md-inline,
+.visible-md-inline-block,
+.visible-lg-block,
+.visible-lg-inline,
+.visible-lg-inline-block {
+ display: none !important;
+}
+@media (max-width: 767px) {
+ .visible-xs {
+ display: block !important;
+ }
+ table.visible-xs {
+ display: table !important;
+ }
+ tr.visible-xs {
+ display: table-row !important;
+ }
+ th.visible-xs,
+ td.visible-xs {
+ display: table-cell !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-block {
+ display: block !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-inline {
+ display: inline !important;
+ }
+}
+@media (max-width: 767px) {
+ .visible-xs-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm {
+ display: block !important;
+ }
+ table.visible-sm {
+ display: table !important;
+ }
+ tr.visible-sm {
+ display: table-row !important;
+ }
+ th.visible-sm,
+ td.visible-sm {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-block {
+ display: block !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .visible-sm-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md {
+ display: block !important;
+ }
+ table.visible-md {
+ display: table !important;
+ }
+ tr.visible-md {
+ display: table-row !important;
+ }
+ th.visible-md,
+ td.visible-md {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-block {
+ display: block !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .visible-md-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg {
+ display: block !important;
+ }
+ table.visible-lg {
+ display: table !important;
+ }
+ tr.visible-lg {
+ display: table-row !important;
+ }
+ th.visible-lg,
+ td.visible-lg {
+ display: table-cell !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-block {
+ display: block !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-inline {
+ display: inline !important;
+ }
+}
+@media (min-width: 1200px) {
+ .visible-lg-inline-block {
+ display: inline-block !important;
+ }
+}
+@media (max-width: 767px) {
+ .hidden-xs {
+ display: none !important;
+ }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+ .hidden-sm {
+ display: none !important;
+ }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+ .hidden-md {
+ display: none !important;
+ }
+}
+@media (min-width: 1200px) {
+ .hidden-lg {
+ display: none !important;
+ }
+}
+.visible-print {
+ display: none !important;
+}
+@media print {
+ .visible-print {
+ display: block !important;
+ }
+ table.visible-print {
+ display: table !important;
+ }
+ tr.visible-print {
+ display: table-row !important;
+ }
+ th.visible-print,
+ td.visible-print {
+ display: table-cell !important;
+ }
+}
+.visible-print-block {
+ display: none !important;
+}
+@media print {
+ .visible-print-block {
+ display: block !important;
+ }
+}
+.visible-print-inline {
+ display: none !important;
+}
+@media print {
+ .visible-print-inline {
+ display: inline !important;
+ }
+}
+.visible-print-inline-block {
+ display: none !important;
+}
+@media print {
+ .visible-print-inline-block {
+ display: inline-block !important;
+ }
+}
+@media print {
+ .hidden-print {
+ display: none !important;
+ }
+}
+.navbar-default,
+.navbar-inverse {
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+}
+@media (min-width: 768px) {
+ .navbar-default .navbar-nav > li > a,
+ .navbar-inverse .navbar-nav > li > a {
+ border-right: 1px solid rgba(0, 0, 0, 0.2);
+ border-left: 1px solid rgba(255, 255, 255, 0.1);
+ }
+ .navbar-default .navbar-nav > li > a:hover,
+ .navbar-inverse .navbar-nav > li > a:hover {
+ border-left-color: transparent;
+ }
+ .navbar-default .nav .open > a,
+ .navbar-inverse .nav .open > a {
+ border-color: transparent;
+ }
+ .navbar-default .navbar-nav > li.active > a,
+ .navbar-inverse .navbar-nav > li.active > a {
+ border-left-color: transparent;
+ }
+ .navbar-default .navbar-form,
+ .navbar-inverse .navbar-form {
+ margin-left: 5px;
+ margin-right: 5px;
+ }
+}
+.navbar-default {
+ -webkit-filter: none;
+ filter: none;
+}
+.navbar-default .navbar-nav > li > a:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.navbar-inverse {
+ -webkit-filter: none;
+ filter: none;
+}
+.navbar-inverse .badge {
+ background-color: #5d6368;
+}
+.navbar-inverse .navbar-nav > li > a:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn,
+.btn:hover {
+ border-color: rgba(0, 0, 0, 0.6);
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+}
+.btn-default {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-default:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-primary {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-primary:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-success {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-success:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-info {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-info:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-warning {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-warning:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-danger {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-danger:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.btn-link,
+.btn-link:hover {
+ border-color: transparent;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6 {
+ text-shadow: -1px -1px 0 rgba(0, 0, 0, 0.3);
+}
+.text-primary,
+.text-primary:hover {
+ color: #7a8288;
+}
+.text-success,
+.text-success:hover {
+ color: #62c462;
+}
+.text-danger,
+.text-danger:hover {
+ color: #ee5f5b;
+}
+.text-warning,
+.text-warning:hover {
+ color: #f89406;
+}
+.text-info,
+.text-info:hover {
+ color: #5bc0de;
+}
+.table .success,
+.table .warning,
+.table .danger,
+.table .info {
+ color: #fff;
+}
+.table-bordered tbody tr.success td,
+.table-bordered tbody tr.warning td,
+.table-bordered tbody tr.danger td,
+.table-bordered tbody tr.success:hover td,
+.table-bordered tbody tr.warning:hover td,
+.table-bordered tbody tr.danger:hover td {
+ border-color: #1c1e22;
+}
+.table-responsive > .table {
+ background-color: #2e3338;
+}
+input,
+textarea {
+ color: #272b30;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline,
+.has-warning.radio label,
+.has-warning.checkbox label,
+.has-warning.radio-inline label,
+.has-warning.checkbox-inline label,
+.has-warning .form-control-feedback {
+ color: #f89406;
+}
+.has-warning .form-control,
+.has-warning .form-control:focus {
+ border-color: #f89406;
+}
+.has-warning .input-group-addon {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline,
+.has-error.radio label,
+.has-error.checkbox label,
+.has-error.radio-inline label,
+.has-error.checkbox-inline label,
+.has-error .form-control-feedback {
+ color: #ee5f5b;
+}
+.has-error .form-control,
+.has-error .form-control:focus {
+ border-color: #ee5f5b;
+}
+.has-error .input-group-addon {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline,
+.has-success.radio label,
+.has-success.checkbox label,
+.has-success.radio-inline label,
+.has-success.checkbox-inline label,
+.has-success .form-control-feedback {
+ color: #62c462;
+}
+.has-success .form-control,
+.has-success .form-control:focus {
+ border-color: #62c462;
+}
+.has-success .input-group-addon {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+legend {
+ color: #fff;
+}
+.input-group-addon {
+ -webkit-filter: none;
+ filter: none;
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+ color: #ffffff;
+}
+.nav .open > a,
+.nav .open > a:hover,
+.nav .open > a:focus {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+.nav-pills > li > a {
+ -webkit-filter: none;
+ filter: none;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+}
+.nav-pills > li > a:hover {
+ -webkit-filter: none;
+ filter: none;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+}
+.nav-pills > li.active > a,
+.nav-pills > li.active > a:hover {
+ background-color: none;
+ -webkit-filter: none;
+ filter: none;
+ border: 1px solid rgba(0, 0, 0, 0.6);
+}
+.nav-pills > li.disabled > a,
+.nav-pills > li.disabled > a:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.pagination > li > a,
+.pagination > li > span {
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+ -webkit-filter: none;
+ filter: none;
+}
+.pagination > li > a:hover,
+.pagination > li > span:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.pagination > li.active > a,
+.pagination > li.active > span {
+ -webkit-filter: none;
+ filter: none;
+}
+.pagination > li.disabled > a,
+.pagination > li.disabled > a:hover,
+.pagination > li.disabled > span,
+.pagination > li.disabled > span:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.pager > li > a {
+ -webkit-filter: none;
+ filter: none;
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+}
+.pager > li > a:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.pager > li.disabled > a,
+.pager > li.disabled > a:hover {
+ -webkit-filter: none;
+ filter: none;
+}
+.breadcrumb {
+ border: 1px solid rgba(0, 0, 0, 0.6);
+ text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.3);
+ -webkit-filter: none;
+ filter: none;
+}
+.alert .alert-link,
+.alert a {
+ color: #fff;
+ text-decoration: underline;
+}
+.alert .close {
+ color: #000000;
+ text-decoration: none;
+}
+a.thumbnail:hover,
+a.thumbnail:focus,
+a.thumbnail.active {
+ border-color: #0c0d0e;
+}
+a.list-group-item.active,
+a.list-group-item.active:hover,
+a.list-group-item.active:focus {
+ border-color: rgba(0, 0, 0, 0.6);
+}
+a.list-group-item-success.active {
+ background-color: #62c462;
+}
+a.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus {
+ background-color: #4fbd4f;
+}
+a.list-group-item-warning.active {
+ background-color: #f89406;
+}
+a.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus {
+ background-color: #df8505;
+}
+a.list-group-item-danger.active {
+ background-color: #ee5f5b;
+}
+a.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus {
+ background-color: #ec4844;
+}
+.jumbotron {
+ border: 1px solid rgba(0, 0, 0, 0.6);
+}
+.panel-primary .panel-heading,
+.panel-success .panel-heading,
+.panel-danger .panel-heading,
+.panel-warning .panel-heading,
+.panel-info .panel-heading {
+ border-color: #000;
+}
diff --git a/web/gui/css/bootstrap-slider-10.0.0.min.css b/web/gui/css/bootstrap-slider-10.0.0.min.css
new file mode 100644
index 0000000..095be95
--- /dev/null
+++ b/web/gui/css/bootstrap-slider-10.0.0.min.css
@@ -0,0 +1,22 @@
+/*! =======================================================
+ VERSION 10.0.0
+========================================================= */
+/*! =========================================================
+ * bootstrap-slider.js
+ *
+ * Maintainers:
+ * Kyle Kemp
+ * - Twitter: @seiyria
+ * - Github: seiyria
+ * Rohit Kalkur
+ * - Twitter: @Rovolutionary
+ * - Github: rovolution
+ *
+ * =========================================================
+ *
+ * bootstrap-slider is released under the MIT License
+ * Copyright (c) 2017 Kyle Kemp, Rohit Kalkur, and contributors
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * ========================================================= */.slider{display:inline-block;vertical-align:middle;position:relative}.slider.slider-horizontal{width:210px;height:20px}.slider.slider-horizontal .slider-track{height:10px;width:100%;margin-top:-5px;top:50%;left:0}.slider.slider-horizontal .slider-selection,.slider.slider-horizontal .slider-track-low,.slider.slider-horizontal .slider-track-high{height:100%;top:0;bottom:0}.slider.slider-horizontal .slider-tick,.slider.slider-horizontal .slider-handle{margin-left:-10px}.slider.slider-horizontal .slider-tick.triangle,.slider.slider-horizontal .slider-handle.triangle{position:relative;top:50%;-ms-transform:translateY(-50%);transform:translateY(-50%);border-width:0 10px 10px 10px;width:0;height:0;border-bottom-color:#2e6da4;margin-top:0}.slider.slider-horizontal .slider-tick-container{white-space:nowrap;position:absolute;top:0;left:0;width:100%}.slider.slider-horizontal .slider-tick-label-container{white-space:nowrap;margin-top:20px}.slider.slider-horizontal .slider-tick-label-container .slider-tick-label{padding-top:4px;display:inline-block;text-align:center}.slider.slider-horizontal .tooltip{-ms-transform:translateX(-50%);transform:translateX(-50%)}.slider.slider-horizontal.slider-rtl .slider-track{left:initial;right:0}.slider.slider-horizontal.slider-rtl .slider-tick,.slider.slider-horizontal.slider-rtl .slider-handle{margin-left:initial;margin-right:-10px}.slider.slider-horizontal.slider-rtl .slider-tick-container{left:initial;right:0}.slider.slider-horizontal.slider-rtl .tooltip{-ms-transform:translateX(50%);transform:translateX(50%)}.slider.slider-vertical{height:210px;width:20px}.slider.slider-vertical .slider-track{width:10px;height:100%;left:25%;top:0}.slider.slider-vertical .slider-selection{width:100%;left:0;top:0;bottom:0}.slider.slider-vertical .slider-track-low,.slider.slider-vertical .slider-track-high{width:100%;left:0;right:0}.slider.slider-vertical .slider-tick,.slider.slider-vertical .slider-handle{margin-top:-10px}.slider.slider-vertical .slider-tick.triangle,.slider.slider-vertical .slider-handle.triangle{border-width:10px 0 10px 10px;width:1px;height:1px;border-left-color:#2e6da4;border-right-color:#2e6da4;margin-left:0;margin-right:0}.slider.slider-vertical .slider-tick-label-container{white-space:nowrap}.slider.slider-vertical .slider-tick-label-container .slider-tick-label{padding-left:4px}.slider.slider-vertical .tooltip{-ms-transform:translateY(-50%);transform:translateY(-50%)}.slider.slider-vertical.slider-rtl .slider-track{left:initial;right:25%}.slider.slider-vertical.slider-rtl .slider-selection{left:initial;right:0}.slider.slider-vertical.slider-rtl .slider-tick.triangle,.slider.slider-vertical.slider-rtl .slider-handle.triangle{border-width:10px 10px 10px 0}.slider.slider-vertical.slider-rtl .slider-tick-label-container .slider-tick-label{padding-left:initial;padding-right:4px}.slider.slider-disabled .slider-handle{background-image:-webkit-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:-o-linear-gradient(top,#dfdfdf 0,#bebebe 100%);background-image:linear-gradient(to bottom,#dfdfdf 0,#bebebe 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdfdfdf',endColorstr='#ffbebebe',GradientType=0)}.slider.slider-disabled .slider-track{background-image:-webkit-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:-o-linear-gradient(top,#e5e5e5 0,#e9e9e9 100%);background-image:linear-gradient(to bottom,#e5e5e5 0,#e9e9e9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe5e5e5',endColorstr='#ffe9e9e9',GradientType=0);cursor:not-allowed}.slider input{display:none}.slider .tooltip.top{margin-top:-36px}.slider .tooltip-inner{white-space:nowrap;max-width:none}.slider .hide{display:none}.slider-track{position:absolute;cursor:pointer;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#f9f9f9 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#f9f9f9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);border-radius:4px}.slider-selection{position:absolute;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-selection.tick-slider-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0)}.slider-track-low,.slider-track-high{position:absolute;background:transparent;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;border-radius:4px}.slider-handle{position:absolute;top:0;width:20px;height:20px;background-color:#337ab7;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7',endColorstr='#ff2e6da4',GradientType=0);filter:none;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05);border:0 solid transparent}.slider-handle.round{border-radius:50%}.slider-handle.triangle{background:transparent none}.slider-handle.custom{background:transparent none}.slider-handle.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick{position:absolute;width:20px;height:20px;background-image:-webkit-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#f9f9f9 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#f9f9f9 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;filter:none;opacity:.8;border:0 solid transparent}.slider-tick.round{border-radius:50%}.slider-tick.triangle{background:transparent none}.slider-tick.custom{background:transparent none}.slider-tick.custom::before{line-height:20px;font-size:20px;content:'\2605';color:#726204}.slider-tick.in-selection{background-image:-webkit-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:-o-linear-gradient(top,#8ac1ef 0,#82b3de 100%);background-image:linear-gradient(to bottom,#8ac1ef 0,#82b3de 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff8ac1ef',endColorstr='#ff82b3de',GradientType=0);opacity:1}
diff --git a/web/gui/css/bootstrap-theme-3.3.7.min.css b/web/gui/css/bootstrap-theme-3.3.7.min.css
new file mode 100644
index 0000000..ba77cff
--- /dev/null
+++ b/web/gui/css/bootstrap-theme-3.3.7.min.css
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * SPDX-License-Identifier: MIT
+ */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
+/*# sourceMappingURL=bootstrap-theme.min.css.map */
diff --git a/web/gui/css/bootstrap-toggle-2.2.2.min.css b/web/gui/css/bootstrap-toggle-2.2.2.min.css
new file mode 100644
index 0000000..a3daa37
--- /dev/null
+++ b/web/gui/css/bootstrap-toggle-2.2.2.min.css
@@ -0,0 +1,29 @@
+/*! ========================================================================
+ * Bootstrap Toggle: bootstrap-toggle.css v2.2.0
+ * http://www.bootstraptoggle.com
+ * ========================================================================
+ * Copyright 2014 Min Hur, The New York Times Company
+ * Licensed under MIT
+ * SPDX-License-Identifier: MIT
+ * ======================================================================== */
+.checkbox label .toggle,.checkbox-inline .toggle{margin-left:-20px;margin-right:5px}
+.toggle{position:relative;overflow:hidden}
+.toggle input[type=checkbox]{display:none}
+.toggle-group{position:absolute;width:200%;top:0;bottom:0;left:0;transition:left .35s;-webkit-transition:left .35s;-moz-user-select:none;-webkit-user-select:none}
+.toggle.off .toggle-group{left:-100%}
+.toggle-on{position:absolute;top:0;bottom:0;left:0;right:50%;margin:0;border:0;border-radius:0}
+.toggle-off{position:absolute;top:0;bottom:0;left:50%;right:0;margin:0;border:0;border-radius:0}
+.toggle-handle{position:relative;margin:0 auto;padding-top:0;padding-bottom:0;height:100%;width:0;border-width:0 1px}
+.toggle.btn{min-width:59px;min-height:34px}
+.toggle-on.btn{padding-right:24px}
+.toggle-off.btn{padding-left:24px}
+.toggle.btn-lg{min-width:79px;min-height:45px}
+.toggle-on.btn-lg{padding-right:31px}
+.toggle-off.btn-lg{padding-left:31px}
+.toggle-handle.btn-lg{width:40px}
+.toggle.btn-sm{min-width:50px;min-height:30px}
+.toggle-on.btn-sm{padding-right:20px}
+.toggle-off.btn-sm{padding-left:20px}
+.toggle.btn-xs{min-width:35px;min-height:22px}
+.toggle-on.btn-xs{padding-right:12px}
+.toggle-off.btn-xs{padding-left:12px}
diff --git a/web/gui/css/c3-0.4.18.min.css b/web/gui/css/c3-0.4.18.min.css
new file mode 100644
index 0000000..a033d72
--- /dev/null
+++ b/web/gui/css/c3-0.4.18.min.css
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: MIT */
+.c3 svg{font:10px sans-serif;-webkit-tap-highlight-color:transparent}.c3 line,.c3 path{fill:none;stroke:#000}.c3 text{-webkit-user-select:none;-moz-user-select:none;user-select:none}.c3-bars path,.c3-event-rect,.c3-legend-item-tile,.c3-xgrid-focus,.c3-ygrid{shape-rendering:crispEdges}.c3-chart-arc path{stroke:#fff}.c3-chart-arc text{fill:#fff;font-size:13px}.c3-grid line{stroke:#aaa}.c3-grid text{fill:#aaa}.c3-xgrid,.c3-ygrid{stroke-dasharray:3 3}.c3-text.c3-empty{fill:grey;font-size:2em}.c3-line{stroke-width:1px}.c3-circle._expanded_{stroke-width:1px;stroke:#fff}.c3-selected-circle{fill:#fff;stroke-width:2px}.c3-bar{stroke-width:0}.c3-bar._expanded_{fill-opacity:1;fill-opacity:.75}.c3-target.c3-focused{opacity:1}.c3-target.c3-focused path.c3-line,.c3-target.c3-focused path.c3-step{stroke-width:2px}.c3-target.c3-defocused{opacity:.3!important}.c3-region{fill:#4682b4;fill-opacity:.1}.c3-brush .extent{fill-opacity:.1}.c3-legend-item{font-size:12px}.c3-legend-item-hidden{opacity:.15}.c3-legend-background{opacity:.75;fill:#fff;stroke:#d3d3d3;stroke-width:1}.c3-title{font:14px sans-serif}.c3-tooltip-container{z-index:10}.c3-tooltip{border-collapse:collapse;border-spacing:0;background-color:#fff;empty-cells:show;-webkit-box-shadow:7px 7px 12px -9px #777;-moz-box-shadow:7px 7px 12px -9px #777;box-shadow:7px 7px 12px -9px #777;opacity:.9}.c3-tooltip tr{border:1px solid #ccc}.c3-tooltip th{background-color:#aaa;font-size:14px;padding:2px 5px;text-align:left;color:#fff}.c3-tooltip td{font-size:13px;padding:3px 6px;background-color:#fff;border-left:1px dotted #999}.c3-tooltip td>span{display:inline-block;width:10px;height:10px;margin-right:6px}.c3-tooltip td.value{text-align:right}.c3-area{stroke-width:0;opacity:.2}.c3-chart-arcs-title{dominant-baseline:middle;font-size:1.3em}.c3-chart-arcs .c3-chart-arcs-background{fill:#e0e0e0;stroke:none}.c3-chart-arcs .c3-chart-arcs-gauge-unit{fill:#000;font-size:16px}.c3-chart-arcs .c3-chart-arcs-gauge-max{fill:#777}.c3-chart-arcs .c3-chart-arcs-gauge-min{fill:#777}.c3-chart-arc .c3-gauge-value{fill:#000}.c3-chart-arc.c3-target g path{opacity:1}.c3-chart-arc.c3-target.c3-focused g path{opacity:1}
diff --git a/web/gui/css/morris-0.5.1.css b/web/gui/css/morris-0.5.1.css
new file mode 100644
index 0000000..39203d3
--- /dev/null
+++ b/web/gui/css/morris-0.5.1.css
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+.morris-hover{position:absolute;z-index:1000}.morris-hover.morris-default-style{border-radius:10px;padding:6px;color:#666;background:rgba(255,255,255,0.8);border:solid 2px rgba(230,230,230,0.8);font-family:sans-serif;font-size:12px;text-align:center}.morris-hover.morris-default-style .morris-hover-row-label{font-weight:bold;margin:0.25em 0}
+.morris-hover.morris-default-style .morris-hover-point{white-space:nowrap;margin:0.1em 0}
diff --git a/web/gui/custom/README.md b/web/gui/custom/README.md
new file mode 100644
index 0000000..733ef52
--- /dev/null
+++ b/web/gui/custom/README.md
@@ -0,0 +1,663 @@
+<!--
+title: "Custom dashboards"
+description: "Build custom dashboards with key metrics from one or more nodes running the Netdata Agent and host them anywhere."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/gui/custom/README.md
+-->
+
+# Custom dashboards
+
+You can:
+
+- create your own dashboards using simple HTML (no javascript is required for
+ basic dashboards)
+- utilize any or all of the available chart libraries, on the same dashboard
+- use data from one or more Netdata servers, on the same dashboard
+- host your dashboard HTML page on any web server, anywhere
+
+You can also add Netdata charts to existing web pages.
+
+Check this **[very simple working example of a custom dashboard](http://netdata.firehol.org/demo.html)**, and its
+**[html source](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/demo.html)**.
+
+You should also look at the [custom dashboard
+template](https://my-netdata.io/dashboard.html), which contains samples of all
+supported charts. The code is [here](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/dashboard.html).
+
+If you plan to put the dashboard on TV, check out
+[tv.html](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/tv.html). Here's is a screenshot of it,
+monitoring two servers on the same page:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/14252187/d8d5f78e-fa8e-11e5-990d-99821d38c874.png)
+
+--
+
+## Web directory
+
+All of the mentioned examples are available on your local Netdata installation
+(e.g. `http://myhost:19999/dashboard.html`). The default web root directory with
+the HTML and JS code is `/usr/share/netdata/web`. The main dashboard is also in
+that directory and called `index.html`.\
+Note: index.html has a different syntax. Don't use it as a template for simple
+custom dashboards.
+
+## Example empty dashboard
+
+If you need to create a new dashboard on an empty page, we suggest the following
+header:
+
+```html
+<!DOCTYPE html>
+<html lang="en">
+<head>
+  <title>Your dashboard</title>
+
+  <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+  <meta name="viewport" content="width=device-width, initial-scale=1">
+  <meta name="apple-mobile-web-app-capable" content="yes">
+  <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+
+  <!-- here we will add dashboard.js -->
+
+</head>
+<body>
+
+<!-- here we will add charts -->
+
+</body>
+</html>
+```
+
+## Dash (Multi-Host Dashboard)
+
+`dash-example.html` is an all-in-one page that automatically fetches graphs from all your hosts. Just add your graphs and charts (or use the defaults) one time using the `dash-*` syntax, and your selections will be automatically replicated for all of your hosts; showing alarms and graphs for all your hosts on **one page!**
+
+__**Dash will only work if you have implemented netdata streaming using `stream.conf`**__
+
+`dash-example.html` was created as an experiment to demonstrate the capabilities of netdata in a multi-host environment. If you desire more features, submit a pull request or check out Netdata Cloud!
+
+### Configure Dash
+
+First, rename the file so it doesn't get overwritten. For instance, with a webroot at `/usr/share/netdata/web`:
+```
+cp /usr/share/netdata/web/dash-example.html /usr/share/netdata/web/dash.html
+```
+
+Find and change the following line in `dash.html` to reflect your Netdata URLs. The second URL is only used if you access your Netdata dashboard through a reverse proxy. The reverse proxy URL is optional; if it is not set then both will use the Netdata host URL.
+
+```js
+/*
+* TUTORIAL: Change this to the URL of your netdata host
+* If you use netdata behind a reverse proxy, add a second parameter for the reverse proxy url like so:
+* new Dash('http://localhost:19999', 'https://my-domain.com/stats');
+*/
+var dash = new Dash('http://localhost:19999');
+```
+
+### The `dash-*` Syntax
+
+If you want to change the graphs or styling to fit your needs, just add an element to the page as shown. Child divs will be generated to create your graph/chart:
+```
+<div class="dash-graph" <---- Use class dash-graph for line graphs, etc
+ data-dash-netdata="system.cpu" <---- REQUIRED: Use data-dash-netdata to set the data source
+ data-dygraph-valuerange="[0, 100]"> <---- OPTIONAL: This overrides the default config. Any other data-* attributes will
+</div> be added to the generated div, so you can set any desired options here
+
+<div class="dash-chart" <---- Use class dash-chart for pie charts, etc. CHARTS ARE SQUARE
+ data-dash-netdata="system.io" <---- REQUIRED: Use data-dash-netdata to set the data source
+ data-dimensions="in" <---- Use this to override or append default options
+ data-title="Disk Read" <---- Use this to override or append default options
+ data-common-units="dash.io"> <---- Use this to override or append default options
+</div>
+```
+
+To change the sizes of graphs and charts, find the `Dash.options` object in `dash.html` and set your preferences:
+```js
+/*
+* TUTORIAL: Change your graph/chart dimensions here. Host columns will automatically adjust.
+* Charts are square! Their width is the same as their height.
+*/
+this.options = {
+ graph_width: '40em',
+ graph_height: '20em',
+ chart_width: '10em' // Charts are square
+};
+```
+
+To change the display order of your hosts, which is saved in localStorage, click the settings gear in the lower right corner
+
+We hope you like it!
+
+---
+
+
+## dashboard.js
+
+To add Netdata charts to any web page (dedicated to Netdata or not), you need to
+include the `/dashboard.js` file of a Netdata server.
+
+For example, if your Netdata server listens at `http://box:19999/`, you will
+need to add the following to the `head` section of your web page:
+
+```html
+<script type="text/javascript" src="http://box:19999/dashboard.js"></script>
+```
+
+### What does dashboard.js do?
+
+`dashboard.js` will automatically load the following:
+
+1. `dashboard.css`, required for the Netdata charts
+
+2. `jquery.min.js`, (only if jQuery is not already loaded for this web page)
+
+3. `bootstrap.min.js` (only if Bootstrap is not already loaded) and
+ `bootstrap.min.css`.
+
+ You can disable this by adding the following before loading `dashboard.js`:
+
+```html
+<script>var netdataNoBootstrap = true;</script>
+```
+
+4. `jquery.nanoscroller.min.js`, required for the scrollbar of the chart
+ legends.
+
+5. `bootstrap-toggle.min.js` and `bootstrap-toggle.min.css`, required for the
+ settings toggle buttons.
+
+6. `font-awesome.min.css`, for icons.
+
+When `dashboard.js` loads will scan the page for elements that define charts
+(see below) and immediately start refreshing them. Keep in mind more javascript
+modules may be loaded (every chart library is a different javascript file, that
+is loaded on first use).
+
+### Prevent dashboard.js from starting chart refreshes
+
+If your web page is not static and you plan to add charts using JavaScript, you
+can tell `dashboard.js` not to start processing charts immediately after loaded,
+by adding this fragment before loading it:
+
+```html
+<script>var netdataDontStart = true;</script>
+```
+
+The above, will inform the `dashboard.js` to load everything, but not process the web page until you tell it to.
+You can tell it to start processing the page, by running this javascript code:
+
+```js
+NETDATA.start();
+```
+
+Be careful not to call the `NETDATA.start()` multiple times. Each call to this
+function will spawn a new thread that will start refreshing the charts.
+
+If, after calling `NETDATA.start()` you need to update the page (or even get
+your javascript code synchronized with `dashboard.js`), you can call (after you
+loaded `dashboard.js`):
+
+```js
+NETDATA.pause(function() {
+ // ok, it is paused
+
+ // update the DOM as you wish
+
+ // and then call this to let the charts refresh:
+ NETDATA.unpause();
+});
+```
+
+### The default Netdata server
+
+`dashboard.js` will attempt to auto-detect the URL of the Netdata server it is
+loaded from, and set this server as the default Netdata server for all charts.
+
+If you need to set any other URL as the default Netdata server for all charts
+that do not specify a Netdata server, add this before loading `dashboard.js`:
+
+```html
+<script type="text/javascript">var netdataServer = "http://your.netdata.server:19999";</script>
+```
+
+---
+
+## Adding charts
+
+To add charts, you need to add a `div` for each of them. Each of these `div`
+elements accept a few `data-` attributes:
+
+### The chart unique ID
+
+The unique ID of a chart is shown at the title of the chart of the default
+Netdata dashboard. You can also find all the charts available at your Netdata
+server with this URL: `http://your.netdata.server:19999/api/v1/charts`
+([example](http://netdata.firehol.org/api/v1/charts)).
+
+To specify the unique id, use this:
+
+```html
+<div data-netdata="unique.id"></div>
+```
+
+The above is enough for adding a chart. It most probably have the wrong visual
+settings though. Keep reading...
+
+### The duration of the chart
+
+You can specify the duration of the chart (how much time of data it will show)
+using:
+
+```html
+<div data-netdata="unique.id"
+ data-after="AFTER_SECONDS"
+ data-before="BEFORE_SECONDS"
+ ></div>
+```
+
+`AFTER_SECONDS` and `BEFORE_SECONDS` are numbers representing a time-frame in
+seconds.
+
+The can be either:
+
+- **absolute** unix timestamps (in javascript terms, they are `new
+ Date().getTime() / 1000`. Using absolute timestamps you can have a chart
+ showing always the same time-frame.
+
+- **relative** number of seconds to now. To show the last 10 minutes of data,
+ `AFTER_SECONDS` must be `-600` (relative to now) and `BEFORE_SECONDS` must
+ be `0` (meaning: now). If you want the chart to auto-refresh the current
+ values, you need to specify **relative** values.
+
+### Chart sizes
+
+You can set the size of the chart using this:
+
+```html
+<div data-netdata="unique.id"
+ data-width="WIDTH"
+ data-height="HEIGHT"
+ ></div>
+```
+
+`WIDTH` and `HEIGHT` can be anything CSS accepts for width and height (e.g.
+percentages, pixels, etc). Keep in mind that for certain chart libraries,
+`dashboard.js` may apply an aspect ratio to these.
+
+If you want `dashboard.js` to permanently remember (browser local storage) the
+dimensions of the chart (the user may resize it), you can add: `data-id="
+SETTINGS_ID"`, where `SETTINGS_ID` is anything that will be common for this
+chart across user sessions.
+
+### Netdata server
+
+Each chart can get data from a different Netdata server. You can specify the Netdata server to use for each chart using:
+
+```html
+<div data-netdata="unique.id"
+ data-host="http://another.netdata.server:19999/"
+ ></div>
+```
+
+If you have ephemeral monitoring setup ([More info here](/streaming/README.md#monitoring-ephemeral-nodes)) and have no
+direct access to the nodes dashboards, you can use the following:
+
+```html
+<div data-netdata="unique.id"
+ data-host="http://yournetdata.server:19999/host/reported-hostname"
+ ></div>
+```
+
+### Chart library
+
+Netdata supports a number of chart libraries. The default chart library is
+`dygraph`, but you can set a different chart library per chart using
+`data-chart-library`:
+
+```html
+<div data-netdata="unique.id"
+ data-chart-library="gauge"
+ ></div>
+```
+
+Each chart library has a number of specific settings. To learn more about them,
+you should investigate the documentation of the given chart library, or visit
+the appropriate JavaScript file that defines the library's options. These files
+are concatenated into the monolithic `dashboard.js` for deployment.
+
+- [Dygraph](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L2034)
+- [d3](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L4095)
+- [d3pie](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L3753)
+- [Gauge.js](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L3065)
+- [Google Charts](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L2936)
+- [EasyPieChart](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L3531)
+- [Peity](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L4137)
+- [Sparkline](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L2779)
+- [Text-only](https://github.com/netdata/netdata/blob/5b57fc441c40959514c4e2d0863be2e6a417e352/web/gui/dashboard.js#L4200)
+
+### Data points
+
+For the time-frame requested, `dashboard.js` will use the chart dimensions and
+the settings of the chart library to find out how many data points it can show.
+
+For example, most line chart libraries are using 3 pixels per data point. If the
+chart shows 10 minutes of data (600 seconds), its update frequency is 1 second,
+and the chart width is 1800 pixels, then `dashboard.js` will request from the
+Netdata server: 10 minutes of data, represented in 600 points, and the chart
+will be refreshed per second. If the user resizes the window so that the chart
+becomes 600 pixels wide, then `dashboard.js` will request the same 10 minutes of
+data, represented in 200 points and the chart will be refreshed once every 3
+seconds.
+
+If you need the chart to show a fixed number of points, you can set the `data-points` option. Replace `DATA_POINTS` with the number of points you need:
+
+```html
+<div data-netdata="unique.id"
+ data-points="DATA_POINTS"
+ ></div>
+```
+
+You can also overwrite the pixels-per-point per chart using this:
+
+```html
+<div data-netdata="unique.id"
+ data-pixels-per-point="PIXELS_PER_POINT"
+ ></div>
+```
+
+Where `PIXELS_PER_POINT` is the number of pixels each data point should occupy.
+
+### Data grouping method
+
+Netdata supports **average** (the default), **sum** and **max** grouping
+methods. The grouping method is used when the Netdata server is requested to
+return fewer points for a time-frame, compared to the number of points
+available.
+
+You can give it per chart, using:
+
+```html
+<div data-netdata="unique.id"
+ data-method="max"
+ ></div>
+```
+
+### Changing rates
+
+Netdata can change the rate of charts on the fly. So a charts that shows values
+**per second** can be turned to **per minute** (or any other, e.g. **per 10
+seconds**), with this:
+
+```html
+<div data-netdata="unique.id"
+ data-method="average"
+ data-gtime="60"
+ data-units="per minute"
+ ></div>
+```
+
+The above will provide the average rate per minute (60 seconds). Use 60 for
+`/minute`, 3600 for `/hour`, 86400 for `/day` (provided you have that many
+data).
+
+- The `data-gtime` setting does not change the units of the chart. You have to
+ change them yourself with `data-units`.
+- This works only for `data-method="average"`.
+- Netdata may aggregate multiple points to satisfy the `data-points` setting.
+ For example, you request `per minute` but the requested number of points to
+ be returned are not enough to report every single minute. In this case
+ Netdata will sum the `per second` raw data of the database to find the `per
+ minute` for every single minute and then **average** them to find the
+ **average per minute rate of every X minutes**. So, it works as if the data
+ collection frequency was per minute.
+
+### Selecting dimensions
+
+By default, `dashboard.js` will show all the dimensions of the chart. You can
+select specific dimensions using this:
+
+```html
+<div data-netdata="unique.id"
+ data-dimensions="dimension1,dimension2,dimension3,..."
+ ></div>
+```
+
+Netdata supports coma (`,`) or pipe (`|`) separated [simple
+patterns](/libnetdata/simple_pattern/README.md) for dimensions. By default it
+searches for both dimension IDs and dimension NAMEs. You can control the target
+of the match with: `data-append-options="match-ids"` or
+`data-append-options="match-names"`. Spaces in `data-dimensions=""` are matched
+in the dimension names and IDs.
+
+### Chart title
+
+You can overwrite the title of the chart using this:
+
+```html
+<div data-netdata="unique.id"
+ data-title="my super chart"
+ ></div>
+```
+
+### Chart units
+
+You can overwrite the units of measurement of the dimensions of the chart, using
+this:
+
+```html
+<div data-netdata="unique.id"
+ data-units="words/second"
+ ></div>
+```
+
+### Chart colors
+
+`dashboard.js` has an internal palette of colors for the dimensions of the
+charts. You can prepend colors to it (so that your will be used first) using
+this:
+
+```html
+<div data-netdata="unique.id"
+ data-colors="#AABBCC #DDEEFF ..."
+ ></div>
+```
+
+### Extracting dimension values
+
+`dashboard.js` can update the selected values of the chart at elements you
+specify. For example, let's assume we have a chart that measures the bandwidth
+of eth0, with 2 dimensions `in` and `out`. You can use this:
+
+```html
+<div data-netdata="net.eth0"
+ data-show-value-of-in-at="eth0_in_value"
+ data-show-value-of-out-at="eth0_out_value"
+ ></div>
+
+My eth0 interface, is receiving <span id="eth0_in_value"></span>
+and transmitting <span id="eth0_out_value"></span>.
+```
+
+### Hiding the legend of a chart
+
+On charts that by default have a legend managed by `dashboard.js` you can remove
+it, using this:
+
+```html
+<div data-netdata="unique.id"
+ data-legend="no"
+ ></div>
+```
+
+### API options
+
+You can append Netdata **[REST API v1](/web/api/README.md)** data options, using this:
+
+```html
+<div data-netdata="unique.id"
+ data-append-options="absolute,percentage"
+ ></div>
+```
+
+A few useful options are:
+
+- `absolute` to show all values are absolute (i.e. turn negative dimensions to
+ positive)
+- `percentage` to express the values as a percentage of the chart total (so,
+ the values of the dimensions are added, and the sum of them if expressed as
+ a percentage of the sum of all dimensions)
+- `unaligned` to prevent Netdata from aligning the charts (e.g. when
+ requesting 60 seconds aggregation per point, Netdata returns chart data
+ aligned to XX:XX:00 to XX:XX:59 - similarly for hours, days, etc - the
+ `unaligned` option disables this feature)
+- `match-ids` or `match-names` is used to control what `data-dimensions=` will
+ match.
+
+### Chart library performance
+
+`dashboard.js` measures the performance of the chart library when it renders the
+charts. You can specify an element ID you want this information to be
+visualized, using this:
+
+```html
+<div data-netdata="unique.id"
+ data-dt-element-name="measurement1"
+ ></div>
+
+refreshed in <span id="measurement1"></span> milliseconds!
+```
+
+### Syncing charts y-range
+
+If you give the same `data-common-max="NAME"` to 2+ charts, then all of them
+will share the same max value of their y-range. If one spikes, all of them will
+be aligned to have the same scale. This is done for the cpu interrupts and and
+cpu softnet charts at the dashboard and also for the `gauge` and `easypiecharts`
+of the Netdata home page.
+
+```html
+<div data-netdata="chart1"
+ data-common-max="chart-group-1"
+ ></div>
+
+<div data-netdata="chart2"
+ data-common-max="chart-group-1"
+ ></div>
+```
+
+The same functionality exists for `data-common-min`.
+
+### Syncing chart units
+
+Netdata dashboards support auto-scaling of units. So, `MB` can become `KB`,
+`GB`, etc dynamically, based on the value to be shown.
+
+Giving the same `NAME` with `data-common-units= "NAME"`, 2+ charts can be forced
+to always have the same units.
+
+```html
+<div data-netdata="chart1"
+ data-common-units="chart-group-1"
+ ></div>
+
+<div data-netdata="chart2"
+ data-common-units="chart-group-1"
+ ></div>
+```
+
+### Setting desired units
+
+Charts can be scaled to specific units with `data-desired-units=" UNITS"`. If
+the dashboard can convert the units to the desired one, it will do.
+
+```html
+<div data-netdata="chart1"
+ data-desired-units="GB"
+ ></div>
+```
+
+## Chart library settings
+
+### Dygraph
+
+You can set the min and max values of the y-axis using
+`data-dygraph-valuerange=" [MIN, MAX] "`.
+
+### EasyPieChart
+
+#### Value range
+
+You can set the max value of the chart using the following snippet:
+
+```html
+<div data-netdata="unique.id"
+ data-chart-library="easypiechart"
+ data-easypiechart-max-value="40"
+ ></div>
+```
+
+Be aware that values that exceed the max value will get expanded (e.g. "41" is
+still 100%). Similar for the minimum:
+
+```html
+<div data-netdata="unique.id"
+ data-chart-library="easypiechart"
+ data-easypiechart-min-value="20"
+ ></div>
+```
+
+If you specify both minimum and maximum, the rendering behavior changes. Instead
+of displaying the `value` based from zero, it is now based on the range that is
+provided by the snippet:
+
+```html
+<div data-netdata="unique.id"
+ data-chart-library="easypiechart"
+ data-easypiechart-min-value="20"
+ data-easypiechart-max-value="40"
+ ></div>
+```
+
+In the first example, a value of `30`, without specifying the minimum, fills the chart bar to '75 %` (100% / 40 * 30). However, in this example the range is now `20` (40 - 20 = 20). The value `30` will fill the chart to ** '50 %`**, since it's in the middle between 20 and 40.
+
+This scenario is useful if you have metrics that change only within a specific range, e.g. temperatures that are very unlikely to fall out of range. In these cases it is more useful to have the chart render the values between the given min and max, to better highlight the changes within them.
+
+#### Negative values
+
+EasyPieCharts can render negative values with the following flag:
+```html
+<div data-netdata="unique.id"
+ data-chart-library="easypiechart"
+ data-override-options="signed"
+ ></div>
+```
+Negative values are rendered counter-clockwise.
+
+#### Full example with EasyPieChart
+
+This is a chart that displays the hotwater temperature in the given range of 40
+to 50.
+```html
+<div data-netdata="stiebeleltron_system.hotwater.hotwatertemp"
+ data-title="Hot Water Temperature"
+ data-decimal-digits="1"
+ data-chart-library="easypiechart"
+ data-colors="#FE3912"
+ data-width="55%"
+ data-height="50%"
+ data-points="1200"
+ data-after="-1200"
+ data-dimensions="actual"
+ data-units="°C"
+ data-easypiechart-max-value="50"
+ data-easypiechart-min-value="40"
+ data-common-max="netdata-hotwater-max"
+ data-common-min="netdata-hotwater-min"
+></div>
+```
+
+![hot water
+chart](https://user-images.githubusercontent.com/12159026/28666665-a7d68ad2-72c8-11e7-9a96-f6bf9691b471.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/web/gui/dash-example.html b/web/gui/dash-example.html
new file mode 100644
index 0000000..b1ff1af
--- /dev/null
+++ b/web/gui/dash-example.html
@@ -0,0 +1,1028 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+</head>
+<body>
+ <div id="alarms" class="collapsed">
+ <div class="alarm-collapse-button" onclick="dash.toggle_alarm_collapse()">&rsaquo;</div>
+ <span class="alarm-count"></span>
+ <h1>Alarms</h1>
+ <div class="alarm-host-list"></div>
+ <div class="settings-button" onclick="dash.reorder_hosts()">&#9881;&#xFE0E;</div>
+ </div>
+ <div id="dash">
+ <div class="netdata-host-stats-container template">
+ <div class="netdata-host-name">host</div>
+ <div class="netdata-host-stats">
+ <div class="dash-graph"
+ data-dash-netdata="system.cpu"
+ data-dygraph-valuerange="[0, 100]">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="system.load">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="system.ram">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="disk_space._">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="system.net">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="system.processes">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="apps.cpu">
+ </div>
+ <div class="dash-graph"
+ data-dash-netdata="apps.mem">
+ </div>
+ <div class="dash-charts">
+ <div class="dash-chart"
+ data-dash-netdata="system.io"
+ data-dimensions="in"
+ data-title="Disk Read"
+ data-common-units="dash.io">
+ </div>
+ <div class="dash-chart"
+ data-dash-netdata="system.io"
+ data-dimensions="out"
+ data-title="Disk Write"
+ data-common-units="dash.io">
+ </div>
+ <div class="dash-chart"
+ data-dash-netdata="system.cpu"
+ data-chart-library="gauge"
+ data-title="CPU"
+ data-units="%"
+ data-gauge-max-value="100"
+ data-colors="#22AA99"
+ data-gauge-stroke-color="#373B40">
+ </div>
+ <div class="dash-chart"
+ data-dash-netdata="system.net"
+ data-dimensions="received"
+ data-title="Net Inbound"
+ data-common-units="dash.net">
+ </div>
+ <div class="dash-chart"
+ data-dash-netdata="system.net"
+ data-dimensions="sent"
+ data-title="Net Outbound"
+ data-common-units="dash.net">
+ </div>
+ <div class="dash-chart"
+ data-dash-netdata="system.ram"
+ data-dimensions="used|buffers|active|wired"
+ data-append-options="percentage"
+ data-title="Used RAM"
+ data-units="%"
+ data-easypiechart-max-value="100"
+ data-colors="#EE9911">
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+</body>
+<script
+ src="https://code.jquery.com/jquery-3.4.1.min.js"
+ integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo="
+ crossorigin="anonymous">
+</script>
+<script type="text/javascript">
+class PickNSort {
+ // PickNSort.js
+
+ constructor (dark) {
+ this.items = [];
+ this.callback = function (output, disabled) { console.log(output, disabled) };
+ this.reset = function () { this.items = [] };
+ this.last_output = {
+ enabled: [],
+ disabled: []
+ }
+
+ this.add_css_to_page(dark);
+ }
+
+ create_modal () {
+ $("<div></div>", {
+ id: "picknsort-container"
+ }).appendTo('body');
+
+ $("<div></div>", {
+ id: 'picknsort-window'
+ }).appendTo('#picknsort-container');
+
+ $("<div></div>", {
+ id: "picknsort-close-button",
+ text: '\u2573'
+ }).appendTo('#picknsort-window');
+
+ $("<div></div>", {
+ id: "picknsort-item-list"
+ }).appendTo('#picknsort-window')
+
+ $("<div></div>", {
+ text: "Apply",
+ id: "picknsort-apply-button"
+ }).appendTo('#picknsort-window');
+
+ $("<div></div>", {
+ text: "Reset",
+ id: "picknsort-reset-button"
+ }).appendTo('#picknsort-window');
+
+ $('#picknsort-close-button').click(function () {
+ picknsort.destroy_modal();
+ picknsort.callback(null, null);
+ });
+
+ $('#picknsort-apply-button').click(function () {
+ picknsort.apply();
+ });
+
+ $('#picknsort-reset-button').click(function () {
+ picknsort.reset();
+ });
+ }
+
+ destroy_modal () {
+ $('#picknsort-container').remove();
+ }
+
+ populate_list () {
+ this.clear_list();
+
+ for (var i=0, len=this.items.length; i<len; i++) {
+ this.draw_item(i);
+ }
+ }
+
+ draw_item (index) {
+ var item = this.items[index];
+
+ var $item = $("<div></div>").addClass("picknsort-item");
+
+ var $checkbox= $("<div></div>").addClass('picknsort-item-checkbox-wrapper');
+
+ $("<input>", {
+ type: "checkbox",
+ checked: item.enabled
+ }).addClass('picknsort-item-checkbox')
+ .appendTo($checkbox);
+
+ var $value = $("<div></div>", {
+ text: item.value
+ }).addClass("picknsort-item-value");
+
+ var $nav = $("<div></div>").addClass("picknsort-item-nav")
+ .append("<div class='picknsort-item-down' data-index='" + index + "' onclick='picknsort.shift_down(this)'>&#8595;</div>")
+ .append("<div class='picknsort-item-up' data-index='" + index + "' onclick='picknsort.shift_up(this)'>&#8593;</div>");
+
+ $item.append($checkbox).append($value).append($nav).appendTo('#picknsort-item-list');
+ }
+
+ clear_list () {
+ $('#picknsort-item-list').html('');
+ }
+
+ popup (callback, reset, options={}) {
+ this.callback = callback || this.callback;
+ this.reset = reset || this.reset;
+ if (!options.enabled) {
+ options.enabled = this.last_output.enabled;
+ }
+ if (!options.disabled) {
+ options.disabled = this.last_output.disabled;
+ }
+
+ this.parse_items(options.enabled, options.disabled);
+ if (this.items.length) {
+ this.create_modal();
+ this.populate_list();
+ }
+
+ }
+
+ parse_items (enabled, disabled) {
+ this.items = [];
+
+ for (var i=0, len=enabled.length; i<len; i++) {
+ this.items.push({
+ value: enabled[i],
+ enabled: true
+ });
+ }
+
+ for (var i=0, len=disabled.length; i<len; i++) {
+ this.items.push({
+ value: disabled[i],
+ enabled: false
+ });
+ }
+ }
+
+ shift_down (el) {
+ var index = $(el).data('index');
+ if (index === this.items.length - 1) {
+ return;
+ }
+ var temp = this.items[index];
+ this.items[index] = this.items[index + 1];
+ this.items[index + 1] = temp;
+ this.move_element_down(index);
+ }
+
+ shift_up (el) {
+ var index = $(el).data('index');
+ if (index === 0) {
+ return;
+ }
+ var temp = this.items[index];
+ this.items[index] = this.items[index - 1];
+ this.items[index - 1] = temp;
+ this.move_element_up(index);
+ }
+
+ move_element_up (index) {
+ var $src = $('.picknsort-item:eq(' + index + ')');
+ var $dest = $('.picknsort-item:eq(' + (index-1) + ')');
+ $src.insertBefore($dest);
+
+ // Update data-index
+ $src.find('.picknsort-item-down, .picknsort-item-up').data("index", index-1);
+ $dest.find('.picknsort-item-down, .picknsort-item-up').data("index", index);
+ }
+
+ move_element_down (index) {
+ var $src = $('.picknsort-item:eq(' + index + ')');
+ var $dest = $('.picknsort-item:eq(' + (index+1) + ')');
+ $src.insertAfter($dest);
+
+ // Update data-index
+ $src.find('.picknsort-item-down, .picknsort-item-up').data("index", index+1);
+ $dest.find('.picknsort-item-down, .picknsort-item-up').data("index", index);
+ }
+
+ apply () {
+ var out = [];
+ for (var i=0, len=this.items.length; i<len; i++) {
+ out.push(this.items[i].value);
+ }
+ var disabled = [];
+ $('input.picknsort-item-checkbox:not(:checked)').each(function (elindex) {
+ var itemindex = $('input.picknsort-item-checkbox').index($(this));
+ // Adjust for deleted elements
+ var spliceindex = itemindex - (1 * elindex);
+ var del = out.splice(spliceindex, 1);
+ disabled.push(del[0]);
+ });
+ this.callback(out, disabled);
+ this.last_output = {
+ enabled: out,
+ disabled: disabled
+ }
+ this.destroy_modal();
+ }
+
+ add_css_to_page (dark) {
+ /*
+ * TUTORIAL: Change your theme colors here. Not sure how well it will work...
+ */
+ var light_theme = {
+ windowbg: "#FFF",
+ itembg: "#F4F4F4",
+ itemcolor: "#535353"
+ }
+ var dark_theme = {
+ windowbg: "#444",
+ itembg: "#333",
+ itemcolor: "#DBDBDB"
+ }
+
+ var current_theme = (dark) ? dark_theme : light_theme;
+
+ var css_string = `
+#picknsort-container {
+ position: fixed;
+ z-index: 9999 !important;
+ top: 0;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ background: rgba(0,0,0,0.5);
+}
+#picknsort-window {
+ max-height: 90vh;
+ width: 40em;
+ background: ${current_theme.windowbg};
+ position: absolute;
+ top: 5em;
+ left: calc(50% - 20em);
+ padding: 2em;
+ padding-top: 4em;
+}
+#picknsort-close-button {
+ position: absolute;
+ top: 1em;
+ font-size: 1.5em;
+ color: #666;
+ right: 1em;
+ cursor: pointer;
+}
+#picknsort-item-list {
+ max-height: 60vh;
+ overflow-y: scroll;
+ margin-bottom: 1em;
+}
+#picknsort-apply-button, #picknsort-reset-button {
+ text-align: center;
+ font-size: 1.5em;
+ padding: 1em;
+ background: rgb(106, 232, 165);
+ font-weight: bold;
+ color: #FFF;
+ cursor: pointer;
+ margin-top: 1em;
+}
+#picknsort-reset-button {
+ background: #CCC;
+}
+.picknsort-item {
+ padding: 1em;
+ border-top: solid 1px #CCC;
+ margin-bottom: 1em;
+ position: relative;
+ background: ${current_theme.itembg};
+ color: ${current_theme.itemcolor}
+}
+.picknsort-item-checkbox-wrapper {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ width: 3em;
+ text-align: center;
+}
+.picknsort-item-checkbox {
+ transform: scale(1.5);
+ margin-top: 1.3em !important;
+}
+.picknsort-item-value {
+ margin-left: 3em;
+}
+.picknsort-item-nav {
+ font-size: 2em;
+ position: absolute;
+ right: 0;
+ top: 0;
+ bottom: 0;
+ color: #CCC;
+ cursor: pointer;
+}
+.picknsort-item-down, .picknsort-item-up {
+ display: inline-block;
+ padding: 0em 0.5em;
+ height: calc(100% - 0.2em);
+}
+ ` // End multiline string
+
+
+ $("<style>")
+ .prop("type", "text/css")
+ .html(css_string).appendTo("head");
+ }
+}
+</script>
+<script type="text/javascript">
+ // Dash JS
+
+ function picknsort_setup () {
+ if (localStorage.getItem('netdata_ordered_hosts')) {
+ picknsort.last_output = JSON.parse(localStorage.getItem('netdata_ordered_hosts'));
+ } else {
+ picknsort.last_output = {
+ enabled: dash.netdata_info.mirrored_hosts,
+ disabled: []
+ }
+ }
+ }
+
+ class Dash {
+ constructor (base_url, link_base_url) {
+ this.base_url = base_url; // URL of netdata host, with port
+ this.link_base_url = link_base_url || base_url; // Reverse proxy URL (Optional)
+ this.current_alarms = {};
+ this.new_alarms = {};
+ this.first_build = true;
+ /*
+ * TUTORIAL: Change your graph/chart dimensions here. Host columns will automatically adjust.
+ * Charts are square! Their width is the same as their height.
+ */
+ this.options = {
+ graph_width: '40em',
+ graph_height: '20em',
+ chart_width: '10em' // Charts are square
+ };
+
+ this.init();
+ }
+
+ reorder_hosts () {
+ picknsort.popup(dash.update_ordered_hosts, dash.find_new_hosts);
+ }
+
+ get_enabled_hosts () {
+ try {
+ return JSON.parse(localStorage.getItem('netdata_ordered_hosts')).enabled
+ } catch (e) {
+ return this.netdata_info.mirrored_hosts.sort();
+ }
+ }
+
+ get_ordered_hosts () {
+ try {
+ return JSON.parse(localStorage.getItem('netdata_ordered_hosts'));
+ } catch (e) {
+ return null;
+ }
+ }
+
+ set_ordered_hosts (hosts) {
+ localStorage.setItem('netdata_ordered_hosts', JSON.stringify(hosts));
+ location.reload();
+ }
+
+ update_ordered_hosts (enabled, disabled) {
+ if (enabled === null && disabled === null) {
+ return;
+ }
+ dash.set_ordered_hosts({
+ enabled: enabled,
+ disabled: disabled
+ });
+ }
+
+ find_new_hosts () {
+ var newhosts = dash.netdata_info.mirrored_hosts.slice();
+ var currenthosts = dash.get_ordered_hosts();
+
+ for (var i=0,len=currenthosts.enabled.length; i<len; i++) {
+ var found = newhosts.indexOf(currenthosts.enabled[i]);
+
+ if (found > -1) {
+ newhosts.splice(found, 1);
+ } else {
+ currenthosts.enabled.splice(i, 1);
+ }
+ }
+
+ for (var i=0,len=currenthosts.disabled.length; i<len; i++) {
+ var found = newhosts.indexOf(currenthosts.disabled[i]);
+
+ if (found > -1) {
+ newhosts.splice(found, 1);
+ } else {
+ currenthosts.enabled.splice(i, 1);
+ }
+ }
+
+ for (var i=0,len=newhosts.length; i<len; i++) {
+ currenthosts.enabled.push(newhosts[i]);
+ }
+
+ dash.set_ordered_hosts(currenthosts);
+ }
+
+ get_host_url (hostname, link) {
+ var base = ( link ) ? this.link_base_url : this.base_url;
+ return (hostname) ? base + '/host/' + hostname : base;
+ }
+
+ get_api_url (hostname) {
+ return this.get_host_url(hostname) + '/api/v1'
+ }
+
+
+ init () {
+ var that = this;
+ $.get(this.get_api_url() + '/info', function (data) {
+ that.netdata_info = data;
+ picknsort_setup()
+ that.build();
+ });
+ }
+
+ digest () {
+ this.fetch_active_alarms();
+ this.fix_layout_errors();
+ }
+
+ build () {
+ // Fix vertically misaligned stats on load error
+ $('.dash-graph').css({
+ height: this.options.graph_height,
+ width: this.options.graph_width
+ });
+ $('.dash-chart').css({
+ height: this.options.chart_width, // Charts are square
+ width: this.options.chart_width
+ });
+
+ // Fix chart alignment
+ $('.netdata-host-stats-container').css({
+ width: 'calc(' + this.options.graph_width + ' + (2 * ' + $('.netdata-host-stats-container').css('padding-left') + '))'
+ });
+
+ var $template = $('.netdata-host-stats-container').first();
+
+ var hosts = this.get_enabled_hosts();
+
+ for (var i=0, len=hosts.length; i<len; i++) {
+ var hostname = hosts[i];
+ $('#alarms .alarm-host-list').append('<div class="host-alarms ' + hostname + '"><a onclick="return dash.scroll_to_host_stats(' + "'" + hostname + "'" + ')" href="#' + hostname + '"><h2>' + hostname + '</h2></a></div>');
+
+ $template.clone().removeClass('template').attr('id', hostname).appendTo('#dash');
+ var $newest = $('.netdata-host-stats-container').last();
+ this.build_stats($newest, hostname);
+ }
+
+
+ if (this.first_build === true) {
+ this.remove_template_elements();
+ this.first_build = false;
+ }
+ }
+
+ build_stats ($hoststats, hostname) {
+ var that = this;
+
+ $hoststats.find('.netdata-host-stats .dash-graph').each(function () {
+ that.build_graph($(this), hostname);
+ });
+
+ $hoststats.find('.netdata-host-stats .dash-chart').each(function () {
+ that.build_chart($(this), hostname);
+ });
+
+ $hoststats.find('.netdata-host-name').html('<a target="_blank" href="' + that.get_host_url(hostname, true) + '/">' + hostname + '</a>');
+ }
+
+ build_graph ($wrapper, hostname) {
+ var that = this;
+ var $graph = $('<div>', {
+ // Defaults
+ 'data-netdata': $wrapper.attr('data-dash-netdata'),
+ 'data-host': that.get_host_url(hostname),
+ 'data-before': '0',
+ 'data-after': '-540',
+ 'role': 'application',
+ 'data-width': that.options.graph_width,
+ 'data-height': that.options.graph_height
+ });
+
+ $.each($wrapper[0].attributes, function (key, node) {
+ if ( node.name.match(/^data-(?!dash).*/ ) ) {
+ $graph.attr(node.name, node.value);
+ }
+ });
+
+ $graph.appendTo($wrapper);
+ }
+
+ build_chart ($wrapper, hostname) {
+ var that = this;
+ var $graph = $('<div>', {
+ // Defaults
+ 'data-netdata': $wrapper.attr('data-dash-netdata'),
+ 'data-host': that.get_host_url(hostname),
+ 'data-before': '0',
+ 'data-after': '-540',
+ 'data-points': '540',
+ 'role': 'application',
+ 'data-width': that.options.chart_width,
+ 'data-title': ' ',
+ 'data-easypiechart-trackcolor': "#373B40",
+ 'data-chart-library': 'easypiechart'
+
+ })
+
+ $.each($wrapper[0].attributes, function (key, node) {
+ if ( node.name.match(/^data-(?!dash).*/ ) ) {
+ $graph.attr(node.name, node.value);
+ }
+ });
+
+ $graph.appendTo($wrapper);
+ }
+
+ scroll_to_host_stats (hostname) {
+ event.preventDefault();
+ $('#dash').scrollLeft(document.getElementById(hostname).offsetLeft - $('#' + hostname).width());
+
+ // Prevent regular event from happening
+ return false;
+ }
+
+ all_alarms_are_warnings () {
+ var out = true;
+ $.each(this.current_alarms, function (host, alarms) {
+ if (out === false) {
+ return false; // Maximum efficiency
+ }
+ $.each(alarms, function (name, data) {
+ if (data.status != "WARNING") {
+ out = false;
+ return false;
+ }
+ });
+ });
+ return out;
+ }
+
+ update_alarm_count () {
+ console.log(this.all_alarms_are_warnings());
+ var count = $('img.alarm-badge').length;
+ $('.alarm-count').text($('img.alarm-badge').length);
+ $('.alarm-count').removeClass('no-alarms warnings-only');
+ if ( count === 0 ) {
+ $('.alarm-count').addClass('no-alarms');
+ return
+ }
+ if (this.all_alarms_are_warnings()) {
+ $('.alarm-count').addClass('warnings-only');
+ }
+ }
+
+ fetch_active_alarms () {
+ var that = this;
+ console.log("Getting alarms...");
+ var hosts = this.get_enabled_hosts();
+ for (var i=0, len=hosts.length; i<len; i++) {
+ var hostname = hosts[i];
+
+ $.get(this.get_api_url(hostname) + '/alarms', function (data) {
+ that.new_alarms[ data.hostname ] = data.alarms;
+ if ( Object.keys(that.new_alarms).length === len ) {
+ // All responses received
+ if ( that.check_new_current_alarms() ) {
+ console.log("No alarm changes detected... Refreshing images.");
+ that.new_alarms = {};
+ that.refresh_alarm_images();
+ return;
+ }
+
+ that.current_alarms = that.new_alarms;
+ that.new_alarms = {};
+ that.draw_alarms();
+ that.update_alarm_count();
+ }
+ });
+ }
+ }
+
+ toggle_alarm_collapse () {
+ $('#alarms').toggleClass('collapsed');
+ }
+
+ check_new_current_alarms () {
+ // Create arrays of property names
+ var a = this.current_alarms;
+ var b = this.new_alarms;
+ var aProps = Object.keys(a);
+ var bProps = Object.keys(b);
+
+ // If number of properties is different,
+ // objects are not equivalent
+ if (aProps.length != bProps.length) {
+ return false;
+ }
+
+ for (var i = 0; i < aProps.length; i++) {
+ var propName = aProps[i];
+ var aa = a[propName];
+ var aaProps = Object.keys(aa);
+ var bb = b[propName];
+ var bbProps = Object.keys(bb);
+
+ if (aaProps.length != bbProps.length) {
+ return false;
+ }
+
+ for (var n = 0, len=aaProps.length; i < len; i++) {
+ if ( bb[aaProps[n]] === undefined ) {
+ return false;
+ }
+ }
+ }
+
+ // If we made it this far, objects
+ // are considered equivalent
+ return true;
+ }
+
+ refresh_alarm_images () {
+ // Use Math.random() to "reload" the image
+ $('.alarm-badge').each(function () {
+ var old_src = $(this).attr('src').replace(/&rand=.*/g, "");
+ $(this).attr('src', old_src + "&rand=" + Math.random());
+ });
+ }
+
+ draw_alarms () {
+ var that = this;
+ console.log("Drawing...", that.current_alarms);
+ $.each(that.current_alarms, function(hostname, alarms) {
+ $('#alarms .host-alarms.' + hostname + ' .alarm-badge').remove();
+ $.each(alarms, function (index, alarm) {
+ that.draw_alarm(hostname, alarm);
+ })
+ });
+ }
+
+ draw_alarm (hostname, alarm) {
+ var queryStr = '/badge.svg?alarm=' + alarm.name + '&chart=' + alarm.chart
+ $('<img />', {
+ src: this.get_api_url(hostname) + queryStr,
+ class: 'alarm-badge'
+ }).appendTo($('#alarms .host-alarms.' + hostname));
+ }
+
+ remove_template_elements () {
+ $('.netdata-host-stats-container').first().remove();
+ }
+
+ fix_layout_errors () {
+ $('.dash-graph:contains("chart not found")').html('<div class="loading-error">Not found</div>');
+ $('.dash-chart:contains("chart not found")').html('<div class="loading-error">Not found</div>');
+ }
+ }
+
+ /*
+ * TUTORIAL: Change this to the URL of your netdata host
+ * If you use netdata behind a reverse proxy, add a second parameter for the reverse proxy url like so:
+ * new Dash('http://localhost:19999', 'https://my-domain.com/stats');
+ */
+ var dash = new Dash('http://localhost:19999');
+
+ var picknsort = new PickNSort(true);
+
+ // Import dashboard.js
+ $.getScript(dash.base_url + '/dashboard.js', function() {
+ console.log("Loaded dashboard.js");
+ setTimeout(function () {
+ $('#alarms').css("visibility", "visible");
+ }, 400);
+ });
+
+ setInterval(function () {
+ dash.digest();
+ }, 6 * 1000);
+
+</script>
+<style type="text/css">
+ body {
+ background-color: #272b30;
+ }
+
+ a, a:hover {
+ color: #AAA !important;
+ }
+
+ #dash {
+ overflow: scroll;
+ width: 100vw;
+ white-space: nowrap;
+ height: 100vh;
+
+ }
+
+ #alarms {
+ display: block;
+ z-index: 9999;
+ position: fixed;
+ right: 0;
+ top: 0;
+ bottom: 0;
+ background-color: #111;
+ width: 23em;
+ padding: 1em;
+ color: #AAA;
+ box-shadow: 0 0 3em #060606;
+ border: solid 1px #2d2d2d;
+ visibility: hidden;
+ }
+
+
+
+ #alarms h1 {
+ text-align: center;
+ margin: 0;
+ margin-bottom: 0.5em;
+ margin-top: -0.1em;
+ font-size: 2em;
+ }
+
+ #alarms h2 {
+ font-size: 1.5em;
+ }
+
+ #alarms .alarm-collapse-button {
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 0.9em;
+ height: 0.9em;
+ font-size: 4em;
+ line-height: 0.8;
+ background: #111;
+ text-align: center;
+ cursor: default;
+ }
+
+ #alarms .alarm-collapse-button:hover {
+ background: #000;
+ }
+
+ #alarms.collapsed {
+ width: 4em;
+ }
+
+ #alarms.collapsed .alarm-collapse-button {
+ position: fixed;
+ left: auto;
+ right: 0;
+ top: 0;
+ bottom: 0;
+ width: 4em;
+ height: 100vh;
+ opacity: 0;
+ z-index: 9999;
+ }
+
+ #alarms.collapsed h1 {
+ transform: rotate(-90deg);
+ position: absolute;
+ left: -9.1em;
+ top: 4em;
+ width: 20em;
+ }
+
+ #alarms .alarm-host-list {
+ margin-top: 1em;
+ height: 100%;
+ overflow-y: scroll;
+ scrollbar-width: thin;
+ }
+
+ #alarms.collapsed .alarm-host-list {
+ display: none;
+ }
+
+
+
+ #alarms .host-alarms {
+ background: #1d1d1d;
+ margin-bottom: 1em;
+ padding: 0.2em 1em;
+ padding-bottom: 1.5em;
+ border-top: 1px solid;
+ position: relative;
+ }
+
+ .host-alarms a:last-child:after {
+ position: absolute;
+ display: block;
+ content: 'no alarms';
+ width: 6em;
+ height: 1em;
+ right: 0;
+ top: 2em;
+ color: #646464;
+ }
+
+ #alarms .host-alarms img.alarm-badge {
+ margin: .25em;
+ height: 1.2em;
+ }
+
+ #alarms .alarm-count {
+ width: 1.5em;
+ height: 1.5em;
+ text-align: center;
+ position: absolute;
+ right: 0.5em;
+ top: 0.5em;
+ font-size: 1.5em;
+ background: #6f1515;
+ border-radius: 50%;
+ }
+
+ #alarms .alarm-count:empty {
+ background: #333;
+ }
+
+ #alarms .no-alarms {
+ background: #156f15;
+ color: rgba(0,0,0,0);
+ }
+
+ #alarms .warnings-only {
+ background: #f48041;
+ color: #EEE;
+ }
+
+ .settings-button {
+ position: absolute;
+ bottom: 0;
+ right: 0;
+ font-size: 3em;
+ width: 1.3em;
+ height: 1.3em;
+ z-index: 9999;
+ text-align: center;
+ line-height: 1.2;
+ cursor: default;
+ }
+
+ .settings-button:hover {
+ background: #424242;
+ }
+
+ .netdata-host-name {
+ font-size: 3em;
+ color: #FFF;
+ text-align: center;
+ white-space: nowrap;
+ background: inherit;
+ position: sticky;
+ top: 0;
+ z-index: 9998;
+ box-shadow: 0 0.5em 1em #232323;
+ margin-bottom: 0.5em;
+ border-bottom: solid 0.1em #AAA;
+ }
+
+ .netdata-host-stats-container {
+ position: relative;
+ margin: 0 1em;
+ padding: 1em 2em;
+ display: inline-block;
+ text-align: center;
+ color: #CCC;
+ background: #232323;
+ }
+
+ .netdata-host-stats-container:last-of-type {
+ margin-right: 27em;
+ }
+
+ .netdata-host-stats {
+
+ }
+
+ .netdata-message {
+ }
+
+ .dash-graph {
+ margin-bottom: 1em;
+ }
+
+ .netdata-legend-resize-handler {
+ display: none
+ }
+
+ .dash-charts {
+ white-space: normal;
+ margin: 2em 0;
+ }
+
+ .dash-chart {
+ display: inline-block;
+ vertical-align: top;
+ }
+
+ .easyPieChartLabel {
+ color: #FFF;
+ }
+
+ .loading-error {
+ padding-top: 2em;
+ font-size: 2em;
+ }
+
+ .netdata-legend-value, .netdata-legend-toolbox, .netdata-legend-toolbox-button, .netdata-legend-resize-handler {
+ background: initial;
+ }
+</style>
+</html>
diff --git a/web/gui/dashboard.css b/web/gui/dashboard.css
new file mode 100644
index 0000000..674131a
--- /dev/null
+++ b/web/gui/dashboard.css
@@ -0,0 +1,757 @@
+/* SPDX-License-Identfier: GPL-3.0-or-later */
+html,
+body {
+ /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-style: normal;
+ font-variant: normal;
+}
+
+.morelink {
+ color: #765d9c;
+ text-decoration: none;
+}
+
+.morelink:hover {
+ color: #563d7c;
+ text-decoration: none;
+}
+
+.morelink:focus {
+ color: #765d9c;
+ text-decoration: none;
+}
+
+.netdata-chart-alignment {
+ margin-left: 55px;
+}
+
+.netdata-chart-row {
+ width: 100%;
+ text-align: center;
+ display: flex;
+ display: -webkit-flex;
+ display: -moz-flex;
+ align-items: baseline;
+ -moz-align-items: baseline;
+ -webkit-align-items: baseline;
+ justify-content: center;
+ -webkit-justify-content: center;
+ -moz-justify-content: center;
+ padding-top: 10px;
+}
+
+.netdata-container {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge:after {
+ padding-top: 60%;
+ display: block;
+ content: '';
+}
+
+.netdata-container-easypiechart {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-easypiechart:after {
+ padding-top: 100%;
+ display: block;
+ content: '';
+}
+
+.netdata-aspect {
+ position: relative;
+ width: 100%;
+ padding: 0px;
+ margin: 0px;
+}
+
+.netdata-container-with-legend {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* fix minimum scrollbar issue in firefox */
+ min-height: 99px;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-legend-resize-handler {
+ display: block;
+ position: absolute;
+ bottom: 0px;
+ right: 0px;
+ height: 15px;
+ width: 20px;
+ background-color: White;
+ font-size: 15px;
+ vertical-align: middle;
+ line-height: 15px;
+ cursor: ns-resize;
+ color: #DDDDDD;
+ text-align: center;
+ overflow: hidden;
+ z-index: 20;
+ padding: 0px;
+ margin: 0px;
+}
+
+.netdata-legend-toolbox {
+ display: block;
+ position: absolute;
+ bottom: 0px;
+ right: 30px;
+ height: 15px;
+ width: 110px;
+ background-color: White;
+ font-size: 12px;
+ vertical-align: middle;
+ line-height: 15px;
+ color: #DDDDDD;
+ text-align: center;
+ overflow: hidden;
+ z-index: 20;
+ padding: 0px;
+ margin: 0px;
+
+ /* prevent text selection after double click */
+ -webkit-user-select: none; /* webkit (safari, chrome) browsers */
+ -moz-user-select: none; /* mozilla browsers */
+ -khtml-user-select: none; /* webkit (konqueror) browsers */
+ -ms-user-select: none; /* IE10+ */
+}
+
+.netdata-legend-toolbox-button {
+ display: inline-block;
+ position: relative;
+ height: 15px;
+ width: 18px;
+ background-color: White;
+ font-size: 12px;
+ vertical-align: middle;
+ line-height: 15px;
+ color: #CDCDCD;
+ text-align: center;
+ overflow: hidden;
+ z-index: 21;
+ padding: 0px;
+ margin: 0px;
+ cursor: pointer;
+
+ /* prevent text selection after double click */
+ -webkit-user-select: none; /* webkit (safari, chrome) browsers */
+ -moz-user-select: none; /* mozilla browsers */
+ -khtml-user-select: none; /* webkit (konqueror) browsers */
+ -ms-user-select: none; /* IE10+ */
+}
+
+.netdata-message {
+ display: inline-block;
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ text-align: left;
+ vertical-align: top;
+ font-weight: bold;
+ font-size: x-small;
+ overflow: hidden;
+ background: inherit;
+ z-index: 0;
+}
+
+.netdata-message.hidden {
+ display: none;
+}
+
+.netdata-message.icon {
+ color: #F8F8F8;
+ text-align: center;
+ vertical-align: middle;
+}
+
+.netdata-chart-legend {
+ position: absolute; /* within .netdata-container */
+ top: 0;
+ right: 0;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ line-height: 14px;
+ display: block;
+ width: 140px; /* --legend-width */
+ height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */
+ font-size: 10px;
+ margin-top: 5px;
+ text-align: left;
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-legend-title-date {
+ font-size: 10px;
+ font-weight: normal;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-title-time {
+ font-size: 11px;
+ font-weight: bold;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-title-units {
+ position: absolute;
+ right: 10px;
+ float: right;
+ font-size: 11px;
+ vertical-align: top;
+ font-weight: normal;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-series {
+ position: absolute;
+ width: 140px; /* legend-width */
+ height: calc(100% - 50px);
+ overflow: hidden;
+ text-overflow: ellipsis;
+ line-height: 14.5px; /* line spacing at the legend */
+ display: block;
+ font-size: 10px;
+ margin-top: 0px;
+}
+
+.netdata-legend-name-table-line {
+ display: inline-block;
+ width: 13px;
+ height: 4px;
+ border-width: 0px;
+ border-bottom-width: 2px;
+ border-bottom-style: solid;
+ border-bottom-color: white;
+}
+
+.netdata-legend-name-table-area {
+ display: inline-block;
+ width: 13px;
+ height: 5px;
+ border-width: 1px;
+ border-top-width: 1px;
+ border-top-style: solid;
+ border-top-color: inherit;
+}
+
+.netdata-legend-name-table-stacked {
+ display: inline-block;
+ width: 13px;
+ height: 5px;
+ border-width: 1px;
+ border-top-width: 1px;
+ border-top-style: solid;
+ border-top-color: inherit;
+}
+
+.netdata-legend-name-tr {
+}
+
+.netdata-legend-name-td {
+}
+
+.netdata-legend-name {
+ text-align: left;
+ font-size: 11px; /* legend: dimension name size */
+ font-weight: bold;
+ vertical-align: bottom;
+ margin-top: 0px;
+ z-index: 9;
+ padding: 0px;
+ width: 80px !important;
+ max-width: 80px !important;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ display: inline-block;
+ cursor: pointer;
+ -webkit-print-color-adjust: exact;
+}
+
+.netdata-legend-value {
+ /*margin-left: 14px;*/
+ position: absolute;
+ right: 10px;
+ float: right;
+ text-align: right;
+ font-size: 11px; /* legend: dimension value size */
+ font-weight: bold;
+ vertical-align: bottom;
+ background-color: White;
+ margin-top: 0px;
+ z-index: 10;
+ padding: 0px;
+ padding-left: 15px;
+ cursor: pointer;
+ /* -webkit-font-smoothing: none; */
+}
+
+.netdata-legend-name.not-selected {
+ font-weight: normal;
+ opacity: 0.3;
+}
+
+.netdata-chart {
+ position: absolute; /* within .netdata-container */
+ top: 0; /* within .netdata-container */
+ left: 0; /* within .netdata-container */
+ display: inline-block;
+ overflow: hidden;
+ width: 100%;
+ height: 100%;
+ z-index: 5;
+
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-chart-with-legend-right {
+ position: absolute; /* within .netdata-container */
+ top: 0; /* within .netdata-container */
+ left: 0; /* within .netdata-container */
+ display: block;
+ overflow: hidden;
+ margin-right: 140px; /* --legend-width */
+ width: calc(100% - 140px); /* --legend-width */
+ height: 100%;
+ z-index: 5;
+ flex-grow: 1;
+
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-peity-chart {
+
+}
+
+.netdata-sparkline-chart {
+
+}
+
+.netdata-dygraph-chart {
+
+}
+
+.netdata-morris-chart {
+
+}
+
+.netdata-google-chart {
+
+}
+
+.dygraph-ylabel {
+}
+
+.dygraph-axis-label-x {
+ overflow-x: hidden;
+}
+
+.dygraph-label-rotate-left {
+ text-align: center;
+ /* See http://caniuse.com/#feat=transforms2d */
+ transform: rotate(90deg);
+ -webkit-transform: rotate(90deg);
+ -moz-transform: rotate(90deg);
+ -o-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+}
+
+/* For y2-axis label */
+.dygraph-label-rotate-right {
+ text-align: center;
+ /* See http://caniuse.com/#feat=transforms2d */
+ transform: rotate(-90deg);
+ -webkit-transform: rotate(-90deg);
+ -moz-transform: rotate(-90deg);
+ -o-transform: rotate(-90deg);
+ -ms-transform: rotate(-90deg);
+}
+
+.dygraph-title {
+ text-indent: 56px;
+ text-align: left;
+ position: absolute;
+ left: 0px;
+ top: 4px;
+ font-size: 11px;
+ font-weight: bold;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+}
+
+/* fix for sparkline tooltip under bootstrap */
+.jqstooltip {
+ width: auto !important;
+ height: auto !important;
+}
+
+.easyPieChart {
+ position: relative;
+ text-align: center;
+}
+
+.easyPieChart canvas {
+ position: absolute;
+ top: 0;
+ left: 0;
+}
+
+.easyPieChartLabel {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #666;
+ font-weight: normal;
+ text-shadow: #BBB 0px 0px 1px;
+ /* -webkit-font-smoothing: none; */
+}
+
+.easyPieChartTitle {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 64%;
+ margin-left: 18% !important;
+ text-align: center;
+ color: #999999;
+ font-weight: bold;
+}
+
+.easyPieChartUnits {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 60%;
+ margin-left: 20% !important;
+ text-align: center;
+ color: #999999;
+ font-weight: normal;
+}
+
+.gaugeChart {
+ position: relative;
+ text-align: center;
+}
+
+.gaugeChart canvas {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ right: 0;
+ z-index: 0;
+}
+
+.gaugeChartLabel {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #FFFFFF;
+ font-weight: bold;
+ z-index: 1;
+ text-shadow: #777 0px 0px 1px;
+ /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */
+ /* -webkit-text-stroke: 1px #777; */
+ /* -webkit-font-smoothing: none; */
+}
+
+.gaugeChartTitle {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #999999;
+ font-weight: bold;
+}
+
+.gaugeChartUnits {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 0;
+ width: 100%;
+ text-align: left;
+ margin-left: 5%;
+ color: #999999;
+ font-weight: normal;
+}
+
+.gaugeChartMin {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 8%;
+ width: 92%;
+ margin-left: 8%;
+ text-align: left;
+ color: #999999;
+ font-weight: normal;
+}
+
+.gaugeChartMax {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 8%;
+ width: 95%;
+ margin-right: 5%;
+ text-align: right;
+ color: #999999;
+ font-weight: normal;
+}
+
+.popover-title {
+ font-weight: bold;
+ font-size: 12px;
+}
+
+.popover-content {
+ font-size: 11px;
+}
+
+/* ----------------------------------------------------------------------------
+ perfect-scrollbar settings
+ */
+
+.ps-container {
+ -ms-touch-action: auto;
+ touch-action: auto;
+ overflow: hidden !important;
+ -ms-overflow-style: none;
+}
+
+@supports (-ms-overflow-style: none) {
+ .ps-container {
+ overflow: auto !important;
+ }
+}
+
+@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) {
+ .ps-container {
+ overflow: auto !important;
+ }
+}
+
+.ps-container.ps-active-x > .ps-scrollbar-x-rail,
+.ps-container.ps-active-y > .ps-scrollbar-y-rail {
+ display: block;
+ background-color: transparent;
+}
+
+.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail {
+ background-color: transparent; /* background color when dragged away */
+ opacity: 0.9;
+}
+
+.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ background-color: #aaa; /* scrollbar color when dragged away */
+ height: 5px;
+}
+
+.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail {
+ background-color: transparent; /* background color when dragged away */
+ opacity: 0.9;
+}
+
+.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ background-color: #aaa; /* scrollbar color when dragged away */
+ width: 5px;
+}
+
+.ps-container > .ps-scrollbar-x-rail {
+ display: none;
+ position: absolute;
+ /* please don't change 'position' */
+ opacity: 0.2; /* the opacity when not on hover of the content */
+ -webkit-transition: background-color .2s linear, opacity .2s linear;
+ -o-transition: background-color .2s linear, opacity .2s linear;
+ -moz-transition: background-color .2s linear, opacity .2s linear;
+ transition: background-color .2s linear, opacity .2s linear;
+ bottom: 0px;
+ /* there must be 'bottom' for ps-scrollbar-x-rail */
+ height: 15px;
+}
+
+.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ position: absolute;
+ /* please don't change 'position' */
+ background-color: #666; /* #aaa; the color on content hover */
+ -webkit-border-radius: 6px;
+ -moz-border-radius: 6px;
+ border-radius: 6px;
+ -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ bottom: 2px;
+ /* there must be 'bottom' for ps-scrollbar-x */
+ height: 5px; /* the width of the scrollbar */
+}
+
+.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x {
+ height: 4px;
+}
+
+.ps-container > .ps-scrollbar-y-rail {
+ display: none;
+ position: absolute;
+ /* please don't change 'position' */
+ opacity: 0.2; /* the opacity when not on hover of the content */
+ -webkit-transition: background-color .2s linear, opacity .2s linear;
+ -o-transition: background-color .2s linear, opacity .2s linear;
+ -moz-transition: background-color .2s linear, opacity .2s linear;
+ transition: background-color .2s linear, opacity .2s linear;
+ right: 0;
+ /* there must be 'right' for ps-scrollbar-y-rail */
+ width: 15px;
+}
+
+.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ position: absolute;
+ /* please don't change 'position' */
+ background-color: #666; /* #aaa; the color on content hover */
+ -webkit-border-radius: 6px;
+ -moz-border-radius: 6px;
+ border-radius: 6px;
+ -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ right: 2px;
+ /* there must be 'right' for ps-scrollbar-y */
+ width: 5px; /* the width of the scrollbar */
+}
+
+.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y {
+ width: 5px;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail {
+ background-color: transparent; /* background color when dragged */
+ opacity: 0.9;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ background-color: #bbb; /* scrollbar color when dragged */
+ height: 5px;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail {
+ background-color: transparent; /* background color when dragged */
+ opacity: 0.9;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ background-color: #bbb; /* scrollbar color when dragged */
+ width: 5px;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail,
+.ps-container:hover > .ps-scrollbar-y-rail {
+ opacity: 0.6;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail:hover {
+ background-color: transparent; /* the background color on hover of the scrollbar */
+ opacity: 0.9;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x {
+ background-color: #999; /* scrollbar color on hover */
+}
+
+.ps-container:hover > .ps-scrollbar-y-rail:hover {
+ background-color: transparent; /* the background color on hover of the scrollbar */
+ opacity: 0.9;
+}
+
+.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y {
+ background-color: #999; /* scrollbar color on hover */
+}
+
+.dygraph__history-tip {
+ position: absolute;
+ top: 50%;
+ transform: translateY(-50%);
+ display: none; /* overriden in js */
+ margin-right: 25px;
+ direction: rtl;
+ overflow: hidden;
+ pointer-events: none;
+}
+
+.dygraph__history-tip-content {
+ display: inline-block;
+ white-space: nowrap;
+ direction: ltr;
+ pointer-events: auto;
+}
diff --git a/web/gui/dashboard.html b/web/gui/dashboard.html
new file mode 100644
index 0000000..d843fc5
--- /dev/null
+++ b/web/gui/dashboard.html
@@ -0,0 +1,699 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>NetData Dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+ <meta name="author" content="costa@tsaousis.gr">
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png"/>
+ <meta property="og:url" content="http://my-netdata.io/"/>
+ <meta property="og:type" content="website"/>
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="netdata - real-time performance monitoring, done right!"/>
+ <meta property="og:description" content="Stunning real-time dashboards, blazingly fast and extremely interactive. Zero configuration, zero dependencies, zero maintenance." />
+</head>
+<body>
+
+<div class="container-fluid">
+
+<h1>NetData Custom Dashboard <div data-netdata="system.cpu" data-chart-library="sparkline" data-height="30" data-after="-600" data-sparkline-linecolor="#888"></div></h1>
+
+This is a template for building custom dashboards. To build a dashboard you just do this:
+
+<pre>
+&lt;!DOCTYPE html>
+&lt;html lang="en">
+&lt;head>
+ &lt;meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ &lt;meta charset="utf-8">
+ &lt;meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ &lt;meta name="viewport" content="width=device-width, initial-scale=1">
+ &lt;meta name="apple-mobile-web-app-capable" content="yes">
+ &lt;meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+&lt;/head>
+&lt;body>
+ &lt;div data-netdata="system.processes"
+ data-chart-library="dygraph"
+ data-width="600"
+ data-height="200"
+ data-after="-600"
+ >&lt;/div>
+&lt;/body>
+&lt;script type="text/javascript" src="http://netdata.server:19999/dashboard.js">&lt;/script>
+&lt;/html>
+</pre>
+
+<ul>
+ <li>You can host your dashboard anywhere.</li>
+ <li>You can add as many charts as you like.</li>
+ <li>You can have charts from many different netdata servers (add <pre>data-host="http://another.netdata.server:19999/"</pre> to each chart).</li>
+ <li>You can use different chart libraries on the same page: <b>peity</b>, <b>sparkline</b>, <b>dygraph</b>, <b>google</b></li>
+ <li>You can customize each chart to your preferences. For each chart library most of their attributes can be given in <b>data-</b> attributes.</li>
+ <li>Each chart can have each own duration - it is controlled with the <b>data-after</b> attribute to give that many seconds of data.</li>
+ <li>Depending on the width of the chart and <b>data-after</b> attribute, netdata will automatically refresh the chart when it needs to be updated. For example giving 600 pixels for width for -600 seconds of data, using a chart library that needs 3 pixels per point, will yeld in a chart updated once every 3 seconds.</li>
+</ul>
+
+
+<hr>
+<h1>Sparkline Charts</h1>
+Sparkline charts support 'NULL' values, so the charts can indicate that values are missing.
+Sparkline charts stretch the values to show the variations between values in more detail.
+They also have mouse-hover support.
+<br/>
+<b>Sparklines are fantastic</b>. You can inline charts in text. For example this
+ <div data-netdata="system.cpu"
+ data-chart-library="sparkline"
+ data-width="5%"
+ data-height="20"
+ data-after="-30"
+ ></div> is my current cpu usage (last 30 seconds),
+ while this
+ <div data-netdata="netdata.net"
+ data-dimensions="out"
+ data-chart-library="sparkline"
+ data-width="10%"
+ data-height="20"
+ data-after="-60"
+ ></div> is the bandwidth my netdata server is currently transmitting (last minute)
+ and this
+ <div data-netdata="netdata.requests"
+ data-chart-library="sparkline"
+ data-width="20%"
+ data-height="20"
+ data-after="-180"
+ ></div> is the requests/sec it serves (last 3 minutes).
+
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time101"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time101">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time102"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time102">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time103"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time103">X</span> ms</small>
+</div>
+
+
+
+<hr>
+<h1>Peity Charts</h1>
+Peity charts do not support 'NULL' values, so the charts cannot indicate that values are missing.
+Peity charts cannot have multiple dimensions on the charts - so netdata will use 'min2max' to show
+the total of all dimensions.
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="peity"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time001"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time001">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="peity"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time002"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time002">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="peity"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time003"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time003">X</span> ms</small>
+</div>
+
+
+
+
+<hr>
+<h1>Dygraph Charts</h1>
+The fastest charting engine that can chart complete charts (not just sparklines).
+The charts are zoomable (drag their contents to pan, shift with mouse wheel to zoom-in or zoom-out, double click to reset it).
+<b>Netdata magic!</b> Realtime charts on your web page!
+<br/>
+Sparklines using dygraphs
+ <div data-netdata="system.processes"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-legend="no"
+ data-width="15%"
+ data-height="20"
+ data-after="-300"
+ ></div>
+ are also possible! This
+ <div data-netdata="system.net"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-legend="no"
+ data-width="15%"
+ data-height="20"
+ data-after="-300"
+ ></div>
+ is an area chart, while this
+ <div data-netdata="system.cpu"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-legend="no"
+ data-width="15%"
+ data-height="20"
+ data-after="-300"
+ ></div> is a stacked area chart!
+
+
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time501"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time501">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time502"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time502">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-width="100%"
+ data-height="30"
+ data-after="-300"
+ data-dt-element-name="time503"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time503">X</span> ms</small>
+</div>
+
+
+
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time201"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time201">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time202"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time202">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time203"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time203">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.io"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time204"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time204">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="apps.cpu"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time205"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time205">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="netdata.net"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time206"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time206">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="netdata.server_cpu"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time207"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time207">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="netdata.requests"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time208"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time208">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="net.gstag0"
+ data-chart-library="dygraph"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time209"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time209">X</span> ms</small>
+</div>
+
+
+
+<hr>
+<h1>EasyPieChart</h1>
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.processes"
+ data-chart-library="easypiechart"
+ data-width="200"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time601"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time601">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.processes"
+ data-chart-library="easypiechart"
+ data-width="150"
+ data-height="150"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time601a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time601a">X</span> ms</small>
+ </div>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.net"
+ data-chart-library="easypiechart"
+ data-width="200"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time602"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time602">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.net"
+ data-chart-library="easypiechart"
+ data-width="100"
+ data-height="100"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time602a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time602a">X</span> ms</small>
+ </div>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.cpu"
+ data-chart-library="easypiechart"
+ data-width="200"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time603"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time603">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.cpu"
+ data-chart-library="easypiechart"
+ data-width="75"
+ data-height="75"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time603a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time603a">X</span> ms</small>
+ </div>
+</div>
+
+
+<hr>
+<h1>Gauge.js</h1>
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.processes"
+ data-chart-library="gauge"
+ data-width="250"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time701"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time701">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.processes"
+ data-chart-library="gauge"
+ data-width="125"
+ data-height="100"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time701a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time701a">X</span> ms</small>
+ </div>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.net"
+ data-chart-library="gauge"
+ data-width="250"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time702"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time702">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.net"
+ data-chart-library="gauge"
+ data-width="125"
+ data-height="100"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time702a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time702a">X</span> ms</small>
+ </div>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.cpu"
+ data-chart-library="gauge"
+ data-width="250"
+ data-height="200"
+ data-after="-300"
+ data-points="300"
+ data-dt-element-name="time703"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time703">X</span> ms</small>
+ </div>
+ <div style="display: inline-block; position: relative;">
+ <div data-netdata="system.cpu"
+ data-chart-library="gauge"
+ data-width="125"
+ data-height="100"
+ data-after="-300"
+ data-points="150"
+ data-dt-element-name="time703a"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time703a">X</span> ms</small>
+ </div>
+</div>
+
+
+<hr>
+<h1>Google Charts</h1>
+NetData was originaly developed with Google Charts.
+NetData is a complete Google Visualization API provider.
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="google"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time301"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time301">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="google"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time302"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time302">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="google"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time303"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time303">X</span> ms</small>
+</div>
+
+<!--
+
+<hr>
+<h1>Morris Charts</h1>
+Unfortunatelly, Morris Charts are very slow. Here we force them to lower their detail to get acceptable results.
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="morris"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time401"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time401">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="morris"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time402"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time402">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="morris"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time403"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time403">X</span> ms</small>
+</div>
+
+
+<hr>
+<h1>C3 Charts</h1>
+C3 charts are not usable in large scale. They suffer from the following issues:
+<ul>
+ <li>extreme use of transitions (implemented with D3 instead of CSS, meaning they are javascript rendered) that cannot be disabled - even opacity is hardcoded in the javascript library</li>
+ <li>rendering is done with <code>SVG</code> instead of <code>canvas</code>, so they use DOM elements for every point, becomimg useless if more than 500 points are drawn</li>
+ <li>lack of a <code>raw</code> data format, so every time a chart is updated, data convertion in javascript is required</li>
+ <li>lack of <code>stacked</code> charts support</li>
+</ul>
+So, to avoid flashing the charts, we destroy and re-create the charts on each update. Also, since they manipulate the data with javascript we were forced to lower the detail they render to get acceptable speeds.
+<br/>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.processes"
+ data-chart-library="c3"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time801"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time801">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.net"
+ data-chart-library="c3"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time802"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time802">X</span> ms</small>
+</div>
+<div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="c3"
+ data-width="100%"
+ data-height="200"
+ data-after="-300"
+ data-dt-element-name="time803"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time803">X</span> ms</small>
+</div>
+
+-->
+
+ <hr>
+ <h1>d3pie Charts</h1>
+ <div style="width: 33%; display: inline-block;">
+ <div data-netdata="apps.cpu"
+ data-chart-library="d3pie"
+ data-d3pie-pieinnerradius="70%"
+ data-d3pie-pieouterradius="90%"
+ data-width="100%"
+ data-height="300px"
+ data-after="-60"
+ data-points="60"
+ data-dt-element-name="time901"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time901">X</span> ms</small>
+ </div>
+
+ <div style="width: 33%; display: inline-block;">
+ <div data-netdata="system.cpu"
+ data-chart-library="d3pie"
+ data-width="100%"
+ data-height="300px"
+ data-after="-60"
+ data-points="60"
+ data-dt-element-name="time902"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time902">X</span> ms</small>
+ </div>
+
+ <div style="width: 33%; display: inline-block;">
+ <div data-netdata="apps.cpu"
+ data-width="100%"
+ data-height="300px"
+ data-after="-60"
+ data-points="60"
+ data-debug="false"
+ data-decimal-digits="0"
+ data-dt-element-name="time903"
+ ></div>
+ <br/>
+ <small>rendered in <span id="time903">X</span> ms</small>
+ </div>
+</div> <!-- 1st container -->
+</body>
+</html>
+
+<!-- you can set your netdata server globally, by ucommenting this -->
+<!-- you can also give a different server per chart, with the attribute: data-host="http://netdata.server:19999" -->
+<!-- <script> netdataServer = "http://box:19999"; </script> -->
+
+<!-- load the dashboard manager - it will do the rest -->
+<!-- <script>var netdataTheme = 'slate';</script> -->
+<script type="text/javascript" src="dashboard.js?v20190902-0"></script>
diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js
new file mode 100644
index 0000000..53e9090
--- /dev/null
+++ b/web/gui/dashboard.js
@@ -0,0 +1,10377 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// DO NOT EDIT: This file is automatically generated from the source files in src/
+
+// ----------------------------------------------------------------------------
+// You can set the following variables before loading this script:
+
+// 'use strict';
+
+/*global netdataNoDygraphs *//* boolean, disable dygraph charts
+ * (default: false) */
+/*global netdataNoSparklines *//* boolean, disable sparkline charts
+ * (default: false) */
+/*global netdataNoPeitys *//* boolean, disable peity charts
+ * (default: false) */
+/*global netdataNoGoogleCharts *//* boolean, disable google charts
+ * (default: false) */
+/*global netdataNoMorris *//* boolean, disable morris charts
+ * (default: false) */
+/*global netdataNoEasyPieChart *//* boolean, disable easypiechart charts
+ * (default: false) */
+/*global netdataNoGauge *//* boolean, disable gauge.js charts
+ * (default: false) */
+/*global netdataNoD3 *//* boolean, disable d3 charts
+ * (default: false) */
+/*global netdataNoC3 *//* boolean, disable c3 charts
+ * (default: false) */
+/*global netdataNoD3pie *//* boolean, disable d3pie charts
+ * (default: false) */
+/*global netdataNoBootstrap *//* boolean, disable bootstrap - disables help too
+ * (default: false) */
+/*global netdataNoFontAwesome *//* boolean, disable fontawesome (do not load it)
+ * (default: false) */
+/*global netdataIcons *//* object, overwrite netdata fontawesome icons
+ * (default: null) */
+/*global netdataDontStart *//* boolean, do not start the thread to process the charts
+ * (default: false) */
+/*global netdataErrorCallback *//* function, callback to be called when the dashboard encounters an error
+ * (default: null) */
+/*global netdataRegistry:true *//* boolean, use the netdata registry
+ * (default: false) */
+/*global netdataNoRegistry *//* boolean, included only for compatibility with existing custom dashboard
+ * (obsolete - do not use this any more) */
+/*global netdataRegistryCallback *//* function, callback that will be invoked with one param: the URLs from the registry
+ * (default: null) */
+/*global netdataShowHelp:true *//* boolean, disable charts help
+ * (default: true) */
+/*global netdataShowAlarms:true *//* boolean, enable alarms checks and notifications
+ * (default: false) */
+/*global netdataRegistryAfterMs:true *//* ms, delay registry use at started
+ * (default: 1500) */
+/*global netdataCallback *//* function, callback to be called when netdata is ready to start
+ * (default: null)
+ * netdata will be running while this is called
+ * (call NETDATA.pause to stop it) */
+/*global netdataPrepCallback *//* function, callback to be called before netdata does anything else
+ * (default: null) */
+/*global netdataServer *//* string, the URL of the netdata server to use
+ * (default: the URL the page is hosted at) */
+/*global netdataServerStatic *//* string, the URL of the netdata server to use for static files
+ * (default: netdataServer) */
+/*global netdataSnapshotData *//* object, a netdata snapshot loaded
+ * (default: null) */
+/*global netdataAlarmsRecipients *//* array, an array of alarm recipients to show notifications for
+ * (default: null) */
+/*global netdataAlarmsRemember *//* boolen, keep our position in the alarm log at browser local storage
+ * (default: true) */
+/*global netdataAlarmsActiveCallback *//* function, a hook for the alarm logs
+ * (default: undefined) */
+/*global netdataAlarmsNotifCallback *//* function, a hook for alarm notifications
+ * (default: undefined) */
+/*global netdataIntersectionObserver *//* boolean, enable or disable the use of intersection observer
+ * (default: true) */
+/*global netdataCheckXSS *//* boolean, enable or disable checking for XSS issues
+ * (default: false) */
+
+// ----------------------------------------------------------------------------
+// global namespace
+
+// Should stay var!
+var NETDATA = window.NETDATA || {};
+
+(function(window, document, $, undefined) {
+
+// *** src/dashboard.js/utils.js
+
+NETDATA.name2id = function (s) {
+ return s
+ .replace(/ /g, '_')
+ .replace(/:/g, '_')
+ .replace(/\(/g, '_')
+ .replace(/\)/g, '_')
+ .replace(/\./g, '_')
+ .replace(/\//g, '_');
+};
+
+NETDATA.encodeURIComponent = function (s) {
+ if (typeof(s) === 'string') {
+ return encodeURIComponent(s);
+ }
+
+ return s;
+};
+
+/// A heuristic for detecting slow devices.
+let isSlowDeviceResult = undefined;
+const isSlowDevice = function () {
+ if (!isSlowDeviceResult) {
+ return isSlowDeviceResult;
+ }
+
+ try {
+ let ua = navigator.userAgent.toLowerCase();
+
+ let iOS = /ipad|iphone|ipod/.test(ua) && !window.MSStream;
+ let android = /android/.test(ua) && !window.MSStream;
+ isSlowDeviceResult = (iOS || android);
+ } catch (e) {
+ isSlowDeviceResult = false;
+ }
+
+ return isSlowDeviceResult;
+};
+
+NETDATA.guid = function () {
+ function s4() {
+ return Math.floor((1 + Math.random()) * 0x10000)
+ .toString(16)
+ .substring(1);
+ }
+
+ return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();
+};
+
+NETDATA.zeropad = function (x) {
+ if (x > -10 && x < 10) {
+ return '0' + x.toString();
+ } else {
+ return x.toString();
+ }
+};
+
+NETDATA.seconds4human = function (seconds, options) {
+ let defaultOptions = {
+ now: 'now',
+ space: ' ',
+ negative_suffix: 'ago',
+ day: 'day',
+ days: 'days',
+ hour: 'hour',
+ hours: 'hours',
+ minute: 'min',
+ minutes: 'mins',
+ second: 'sec',
+ seconds: 'secs',
+ and: 'and'
+ };
+
+ if (typeof options !== 'object') {
+ options = defaultOptions;
+ } else {
+ for (var x in defaultOptions) {
+ if (typeof options[x] !== 'string') {
+ options[x] = defaultOptions[x];
+ }
+ }
+ }
+
+ if (typeof seconds === 'string') {
+ seconds = parseInt(seconds, 10);
+ }
+
+ if (seconds === 0) {
+ return options.now;
+ }
+
+ let suffix = '';
+ if (seconds < 0) {
+ seconds = -seconds;
+ if (options.negative_suffix !== '') {
+ suffix = options.space + options.negative_suffix;
+ }
+ }
+
+ let days = Math.floor(seconds / 86400);
+ seconds -= (days * 86400);
+
+ let hours = Math.floor(seconds / 3600);
+ seconds -= (hours * 3600);
+
+ let minutes = Math.floor(seconds / 60);
+ seconds -= (minutes * 60);
+
+ let strings = [];
+
+ if (days > 1) {
+ strings.push(days.toString() + options.space + options.days);
+ } else if (days === 1) {
+ strings.push(days.toString() + options.space + options.day);
+ }
+
+ if (hours > 1) {
+ strings.push(hours.toString() + options.space + options.hours);
+ } else if (hours === 1) {
+ strings.push(hours.toString() + options.space + options.hour);
+ }
+
+ if (minutes > 1) {
+ strings.push(minutes.toString() + options.space + options.minutes);
+ } else if (minutes === 1) {
+ strings.push(minutes.toString() + options.space + options.minute);
+ }
+
+ if (seconds > 1) {
+ strings.push(Math.floor(seconds).toString() + options.space + options.seconds);
+ } else if (seconds === 1) {
+ strings.push(Math.floor(seconds).toString() + options.space + options.second);
+ }
+
+ if (strings.length === 1) {
+ return strings.pop() + suffix;
+ }
+
+ let last = strings.pop();
+ return strings.join(", ") + " " + options.and + " " + last + suffix;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// element data attributes
+
+NETDATA.dataAttribute = function (element, attribute, def) {
+ let key = 'data-' + attribute.toString();
+ if (element.hasAttribute(key)) {
+ let data = element.getAttribute(key);
+
+ if (data === 'true') {
+ return true;
+ }
+ if (data === 'false') {
+ return false;
+ }
+ if (data === 'null') {
+ return null;
+ }
+
+ // Only convert to a number if it doesn't change the string
+ if (data === +data + '') {
+ return +data;
+ }
+
+ if (/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(data)) {
+ return JSON.parse(data);
+ }
+
+ return data;
+ } else {
+ return def;
+ }
+};
+
+NETDATA.dataAttributeBoolean = function (element, attribute, def) {
+ let value = NETDATA.dataAttribute(element, attribute, def);
+
+ if (value === true || value === false) // gmosx: Love this :)
+ {
+ return value;
+ }
+
+ if (typeof(value) === 'string') {
+ if (value === 'yes' || value === 'on') {
+ return true;
+ }
+
+ if (value === '' || value === 'no' || value === 'off' || value === 'null') {
+ return false;
+ }
+
+ return def;
+ }
+
+ if (typeof(value) === 'number') {
+ return value !== 0;
+ }
+
+ return def;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// fast numbers formatting
+
+NETDATA.fastNumberFormat = {
+ formattersFixed: [],
+ formattersZeroBased: [],
+
+ // this is the fastest and the preferred
+ getIntlNumberFormat: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ // this is never used
+ // it is added just for completeness
+ return new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ },
+
+ // this respects locale
+ getLocaleString: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ return {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+ },
+
+ // the fallback
+ getFixed: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ return {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+ },
+
+ testIntlNumberFormat: function () {
+ let value = 1.12345;
+ let e1 = "1.12", e2 = "1,12";
+ let s = "";
+
+ try {
+ let x = new Intl.NumberFormat(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+
+ s = x.format(value);
+ } catch (e) {
+ s = "";
+ }
+
+ // console.log('NumberFormat: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ testLocaleString: function () {
+ let value = 1.12345;
+ let e1 = "1.12", e2 = "1,12";
+ let s = "";
+
+ try {
+ s = value.toLocaleString(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+ } catch (e) {
+ s = "";
+ }
+
+ // console.log('localeString: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ // on first run we decide which formatter to use
+ get: function (min, max) {
+ if (this.testIntlNumberFormat()) {
+ // console.log('numberformat');
+ this.get = this.getIntlNumberFormat;
+ } else if (this.testLocaleString()) {
+ // console.log('localestring');
+ this.get = this.getLocaleString;
+ } else {
+ // console.log('fixed');
+ this.get = this.getFixed;
+ }
+ return this.get(min, max);
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// Detect the netdata server
+
+// http://stackoverflow.com/questions/984510/what-is-my-script-src-url
+// http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url
+NETDATA._scriptSource = function () {
+ let script = null;
+
+ if (typeof document.currentScript !== 'undefined') {
+ script = document.currentScript;
+ } else {
+ const all_scripts = document.getElementsByTagName('script');
+ script = all_scripts[all_scripts.length - 1];
+ }
+
+ if (typeof script.getAttribute.length !== 'undefined') {
+ script = script.src;
+ } else {
+ script = script.getAttribute('src', -1);
+ }
+
+ return script;
+};
+
+// *** src/dashboard.js/server-detection.js
+
+if (typeof netdataServer !== 'undefined') {
+ NETDATA.serverDefault = netdataServer;
+} else {
+ let s = NETDATA._scriptSource();
+ if (s) {
+ NETDATA.serverDefault = s.replace(/\/dashboard.js(\?.*)?$/g, "");
+ } else {
+ console.log('WARNING: Cannot detect the URL of the netdata server.');
+ NETDATA.serverDefault = null;
+ }
+}
+
+if (NETDATA.serverDefault === null) {
+ NETDATA.serverDefault = '';
+} else if (NETDATA.serverDefault.slice(-1) !== '/') {
+ NETDATA.serverDefault += '/';
+}
+
+if (typeof netdataServerStatic !== 'undefined' && netdataServerStatic !== null && netdataServerStatic !== '') {
+ NETDATA.serverStatic = netdataServerStatic;
+ if (NETDATA.serverStatic.slice(-1) !== '/') {
+ NETDATA.serverStatic += '/';
+ }
+} else {
+ NETDATA.serverStatic = NETDATA.serverDefault;
+}
+
+// *** src/dashboard.js/dependencies.js
+
+// default URLs for all the external files we need
+// make them RELATIVE so that the whole thing can also be
+// installed under a web server
+NETDATA.jQuery = NETDATA.serverStatic + 'lib/jquery-2.2.4.min.js';
+NETDATA.peity_js = NETDATA.serverStatic + 'lib/jquery.peity-3.2.0.min.js';
+NETDATA.sparkline_js = NETDATA.serverStatic + 'lib/jquery.sparkline-2.1.2.min.js';
+NETDATA.easypiechart_js = NETDATA.serverStatic + 'lib/jquery.easypiechart-97b5824.min.js';
+NETDATA.gauge_js = NETDATA.serverStatic + 'lib/gauge-1.3.2.min.js';
+NETDATA.dygraph_js = NETDATA.serverStatic + 'lib/dygraph-c91c859.min.js';
+NETDATA.dygraph_smooth_js = NETDATA.serverStatic + 'lib/dygraph-smooth-plotter-c91c859.js';
+// NETDATA.raphael_js = NETDATA.serverStatic + 'lib/raphael-2.2.4-min.js';
+// NETDATA.c3_js = NETDATA.serverStatic + 'lib/c3-0.4.18.min.js';
+// NETDATA.c3_css = NETDATA.serverStatic + 'css/c3-0.4.18.min.css';
+NETDATA.d3pie_js = NETDATA.serverStatic + 'lib/d3pie-0.2.1-netdata-3.js';
+NETDATA.d3_js = NETDATA.serverStatic + 'lib/d3-4.12.2.min.js';
+// NETDATA.morris_js = NETDATA.serverStatic + 'lib/morris-0.5.1.min.js';
+// NETDATA.morris_css = NETDATA.serverStatic + 'css/morris-0.5.1.css';
+NETDATA.google_js = 'https://www.google.com/jsapi';
+// Error Handling
+
+NETDATA.errorCodes = {
+ 100: {message: "Cannot load chart library", alert: true},
+ 101: {message: "Cannot load jQuery", alert: true},
+ 402: {message: "Chart library not found", alert: false},
+ 403: {message: "Chart library not enabled/is failed", alert: false},
+ 404: {message: "Chart not found", alert: false},
+ 405: {message: "Cannot download charts index from server", alert: true},
+ 406: {message: "Invalid charts index downloaded from server", alert: true},
+ 407: {message: "Cannot HELLO netdata server", alert: false},
+ 408: {message: "Netdata servers sent invalid response to HELLO", alert: false},
+ 409: {message: "Cannot ACCESS netdata registry", alert: false},
+ 410: {message: "Netdata registry ACCESS failed", alert: false},
+ 411: {message: "Netdata registry server send invalid response to DELETE ", alert: false},
+ 412: {message: "Netdata registry DELETE failed", alert: false},
+ 413: {message: "Netdata registry server send invalid response to SWITCH ", alert: false},
+ 414: {message: "Netdata registry SWITCH failed", alert: false},
+ 415: {message: "Netdata alarms download failed", alert: false},
+ 416: {message: "Netdata alarms log download failed", alert: false},
+ 417: {message: "Netdata registry server send invalid response to SEARCH ", alert: false},
+ 418: {message: "Netdata registry SEARCH failed", alert: false}
+};
+
+NETDATA.errorLast = {
+ code: 0,
+ message: "",
+ datetime: 0
+};
+
+NETDATA.error = function (code, msg) {
+ NETDATA.errorLast.code = code;
+ NETDATA.errorLast.message = msg;
+ NETDATA.errorLast.datetime = Date.now();
+
+ console.log("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg);
+
+ let ret = true;
+ if (typeof netdataErrorCallback === 'function') {
+ ret = netdataErrorCallback('system', code, msg);
+ }
+
+ if (ret && NETDATA.errorCodes[code].alert) {
+ alert("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg);
+ }
+};
+
+NETDATA.errorReset = function () {
+ NETDATA.errorLast.code = 0;
+ NETDATA.errorLast.message = "You are doing fine!";
+ NETDATA.errorLast.datetime = 0;
+};
+// *** src/dashboard.js/compatibility.js
+
+// Compatibility fixes.
+
+// fix IE issue with console
+if (!window.console) {
+ window.console = {
+ log: function () {
+ }
+ };
+}
+
+// if string.endsWith is not defined, define it
+if (typeof String.prototype.endsWith !== 'function') {
+ String.prototype.endsWith = function (s) {
+ if (s.length > this.length) {
+ return false;
+ }
+ return this.slice(-s.length) === s;
+ };
+}
+
+// if string.startsWith is not defined, define it
+if (typeof String.prototype.startsWith !== 'function') {
+ String.prototype.startsWith = function (s) {
+ if (s.length > this.length) {
+ return false;
+ }
+ return this.slice(s.length) === s;
+ };
+}
+// ----------------------------------------------------------------------------------------------------------------
+// XSS checks
+
+NETDATA.xss = {
+ enabled: (typeof netdataCheckXSS === 'undefined') ? false : netdataCheckXSS,
+ enabled_for_data: (typeof netdataCheckXSS === 'undefined') ? false : netdataCheckXSS,
+
+ string: function (s) {
+ return s.toString()
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/"/g, '&quot;')
+ .replace(/'/g, '&#39;');
+ },
+
+ object: function (name, obj, ignore_regex) {
+ if (typeof ignore_regex !== 'undefined' && ignore_regex.test(name)) {
+ // console.log('XSS: ignoring "' + name + '"');
+ return obj;
+ }
+
+ switch (typeof(obj)) {
+ case 'string':
+ const ret = this.string(obj);
+ if (ret !== obj) {
+ console.log('XSS protection changed string ' + name + ' from "' + obj + '" to "' + ret + '"');
+ }
+ return ret;
+
+ case 'object':
+ if (obj === null) {
+ return obj;
+ }
+
+ if (Array.isArray(obj)) {
+ // console.log('checking array "' + name + '"');
+
+ let len = obj.length;
+ while (len--) {
+ obj[len] = this.object(name + '[' + len + ']', obj[len], ignore_regex);
+ }
+ } else {
+ // console.log('checking object "' + name + '"');
+
+ for (var i in obj) {
+ if (obj.hasOwnProperty(i) === false) {
+ continue;
+ }
+ if (this.string(i) !== i) {
+ console.log('XSS protection removed invalid object member "' + name + '.' + i + '"');
+ delete obj[i];
+ } else {
+ obj[i] = this.object(name + '.' + i, obj[i], ignore_regex);
+ }
+ }
+ }
+ return obj;
+
+ default:
+ return obj;
+ }
+ },
+
+ checkOptional: function (name, obj, ignore_regex) {
+ if (this.enabled) {
+ //console.log('XSS: checking optional "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ }
+ return obj;
+ },
+
+ checkAlways: function (name, obj, ignore_regex) {
+ //console.log('XSS: checking always "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ },
+
+ checkData: function (name, obj, ignore_regex) {
+ if (this.enabled_for_data) {
+ //console.log('XSS: checking data "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ }
+ return obj;
+ }
+};
+NETDATA.colorHex2Rgb = function (hex) {
+ // Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF")
+ let shorthandRegex = /^#?([a-f\d])([a-f\d])([a-f\d])$/i;
+ hex = hex.replace(shorthandRegex, function (m, r, g, b) {
+ return r + r + g + g + b + b;
+ });
+
+ let result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
+ return result ? {
+ r: parseInt(result[1], 16),
+ g: parseInt(result[2], 16),
+ b: parseInt(result[3], 16)
+ } : null;
+};
+
+NETDATA.colorLuminance = function (hex, lum) {
+ // validate hex string
+ hex = String(hex).replace(/[^0-9a-f]/gi, '');
+ if (hex.length < 6) {
+ hex = hex[0] + hex[0] + hex[1] + hex[1] + hex[2] + hex[2];
+ }
+
+ lum = lum || 0;
+
+ // convert to decimal and change luminosity
+ let rgb = "#";
+ for (let i = 0; i < 3; i++) {
+ let c = parseInt(hex.substr(i * 2, 2), 16);
+ c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16);
+ rgb += ("00" + c).substr(c.length);
+ }
+
+ return rgb;
+};
+NETDATA.unitsConversion = {
+ keys: {}, // keys for data-common-units
+ latest: {}, // latest selected units for data-common-units
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ scalableUnits: {
+ 'packets/s': {
+ 'pps': 1,
+ 'Kpps': 1000,
+ 'Mpps': 1000000
+ },
+ 'pps': {
+ 'pps': 1,
+ 'Kpps': 1000,
+ 'Mpps': 1000000
+ },
+ 'kilobits/s': {
+ 'bits/s': 1 / 1000,
+ 'kilobits/s': 1,
+ 'megabits/s': 1000,
+ 'gigabits/s': 1000000,
+ 'terabits/s': 1000000000
+ },
+ 'bytes/s': {
+ 'bytes/s': 1,
+ 'kilobytes/s': 1024,
+ 'megabytes/s': 1024 * 1024,
+ 'gigabytes/s': 1024 * 1024 * 1024,
+ 'terabytes/s': 1024 * 1024 * 1024 * 1024
+ },
+ 'kilobytes/s': {
+ 'bytes/s': 1 / 1024,
+ 'kilobytes/s': 1,
+ 'megabytes/s': 1024,
+ 'gigabytes/s': 1024 * 1024,
+ 'terabytes/s': 1024 * 1024 * 1024
+ },
+ 'B/s': {
+ 'B/s': 1,
+ 'KiB/s': 1024,
+ 'MiB/s': 1024 * 1024,
+ 'GiB/s': 1024 * 1024 * 1024,
+ 'TiB/s': 1024 * 1024 * 1024 * 1024
+ },
+ 'KB/s': {
+ 'B/s': 1 / 1024,
+ 'KB/s': 1,
+ 'MB/s': 1024,
+ 'GB/s': 1024 * 1024,
+ 'TB/s': 1024 * 1024 * 1024
+ },
+ 'KiB/s': {
+ 'B/s': 1 / 1024,
+ 'KiB/s': 1,
+ 'MiB/s': 1024,
+ 'GiB/s': 1024 * 1024,
+ 'TiB/s': 1024 * 1024 * 1024
+ },
+ 'B': {
+ 'B': 1,
+ 'KiB': 1024,
+ 'MiB': 1024 * 1024,
+ 'GiB': 1024 * 1024 * 1024,
+ 'TiB': 1024 * 1024 * 1024 * 1024,
+ 'PiB': 1024 * 1024 * 1024 * 1024 * 1024
+ },
+ 'KB': {
+ 'B': 1 / 1024,
+ 'KB': 1,
+ 'MB': 1024,
+ 'GB': 1024 * 1024,
+ 'TB': 1024 * 1024 * 1024
+ },
+ 'KiB': {
+ 'B': 1 / 1024,
+ 'KiB': 1,
+ 'MiB': 1024,
+ 'GiB': 1024 * 1024,
+ 'TiB': 1024 * 1024 * 1024
+ },
+ 'MB': {
+ 'B': 1 / (1024 * 1024),
+ 'KB': 1 / 1024,
+ 'MB': 1,
+ 'GB': 1024,
+ 'TB': 1024 * 1024,
+ 'PB': 1024 * 1024 * 1024
+ },
+ 'MiB': {
+ 'B': 1 / (1024 * 1024),
+ 'KiB': 1 / 1024,
+ 'MiB': 1,
+ 'GiB': 1024,
+ 'TiB': 1024 * 1024,
+ 'PiB': 1024 * 1024 * 1024
+ },
+ 'GB': {
+ 'B': 1 / (1024 * 1024 * 1024),
+ 'KB': 1 / (1024 * 1024),
+ 'MB': 1 / 1024,
+ 'GB': 1,
+ 'TB': 1024,
+ 'PB': 1024 * 1024,
+ 'EB': 1024 * 1024 * 1024
+ },
+ 'GiB': {
+ 'B': 1 / (1024 * 1024 * 1024),
+ 'KiB': 1 / (1024 * 1024),
+ 'MiB': 1 / 1024,
+ 'GiB': 1,
+ 'TiB': 1024,
+ 'PiB': 1024 * 1024,
+ 'EiB': 1024 * 1024 * 1024
+ },
+ 'num': {
+ 'num': 1,
+ 'num (K)': 1000,
+ 'num (M)': 1000000,
+ 'num (G)': 1000000000,
+ 'num (T)': 1000000000000
+ }
+ /*
+ 'milliseconds': {
+ 'seconds': 1000
+ },
+ 'seconds': {
+ 'milliseconds': 0.001,
+ 'seconds': 1,
+ 'minutes': 60,
+ 'hours': 3600,
+ 'days': 86400
+ }
+ */
+ },
+
+ convertibleUnits: {
+ 'Celsius': {
+ 'Fahrenheit': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.temperature === 'fahrenheit';
+ },
+ convert: function (value) {
+ return value * 9 / 5 + 32;
+ }
+ }
+ },
+ 'celsius': {
+ 'fahrenheit': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.temperature === 'fahrenheit';
+ },
+ convert: function (value) {
+ return value * 9 / 5 + 32;
+ }
+ }
+ },
+ 'seconds': {
+ 'time': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.seconds_as_time;
+ },
+ convert: function (seconds) {
+ return NETDATA.unitsConversion.seconds2time(seconds);
+ }
+ }
+ },
+ 'milliseconds': {
+ 'milliseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max < 1000;
+ },
+ convert: function (milliseconds) {
+ let tms = Math.round(milliseconds * 10);
+ milliseconds = Math.floor(tms / 10);
+
+ tms -= milliseconds * 10;
+
+ return (milliseconds).toString() + '.' + tms.toString();
+ }
+ },
+ 'seconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max >= 1000 && max < 60000;
+ },
+ convert: function (milliseconds) {
+ milliseconds = Math.round(milliseconds);
+
+ let seconds = Math.floor(milliseconds / 1000);
+ milliseconds -= seconds * 1000;
+
+ milliseconds = Math.round(milliseconds / 10);
+
+ return seconds.toString() + '.'
+ + NETDATA.zeropad(milliseconds);
+ }
+ },
+ 'M:SS.ms': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max >= 60000;
+ },
+ convert: function (milliseconds) {
+ milliseconds = Math.round(milliseconds);
+
+ let minutes = Math.floor(milliseconds / 60000);
+ milliseconds -= minutes * 60000;
+
+ let seconds = Math.floor(milliseconds / 1000);
+ milliseconds -= seconds * 1000;
+
+ milliseconds = Math.round(milliseconds / 10);
+
+ return minutes.toString() + ':'
+ + NETDATA.zeropad(seconds) + '.'
+ + NETDATA.zeropad(milliseconds);
+ }
+ }
+ },
+ 'nanoseconds': {
+ 'nanoseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max < 1000;
+ },
+ convert: function (nanoseconds) {
+ let tms = Math.round(nanoseconds * 10);
+ nanoseconds = Math.floor(tms / 10);
+
+ tms -= nanoseconds * 10;
+
+ return (nanoseconds).toString() + '.' + tms.toString();
+ }
+ },
+ 'microseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 && max < 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let microseconds = Math.floor(nanoseconds / 1000);
+ nanoseconds -= microseconds * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 10 );
+
+ return microseconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ 'milliseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 * 1000 && max < 1000 * 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let milliseconds = Math.floor(nanoseconds / 1000 / 1000);
+ nanoseconds -= milliseconds * 1000 * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 1000 / 10);
+
+ return milliseconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ 'seconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 * 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let seconds = Math.floor(nanoseconds / 1000 / 1000 / 1000);
+ nanoseconds -= seconds * 1000 * 1000 * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 1000 / 1000 / 10);
+
+ return seconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ }
+ },
+
+ seconds2time: function (seconds) {
+ seconds = Math.abs(seconds);
+
+ let days = Math.floor(seconds / 86400);
+ seconds -= days * 86400;
+
+ let hours = Math.floor(seconds / 3600);
+ seconds -= hours * 3600;
+
+ let minutes = Math.floor(seconds / 60);
+ seconds -= minutes * 60;
+
+ seconds = Math.round(seconds);
+
+ let ms_txt = '';
+ /*
+ let ms = seconds - Math.floor(seconds);
+ seconds -= ms;
+ ms = Math.round(ms * 1000);
+
+ if (ms > 1) {
+ if (ms < 10)
+ ms_txt = '.00' + ms.toString();
+ else if (ms < 100)
+ ms_txt = '.0' + ms.toString();
+ else
+ ms_txt = '.' + ms.toString();
+ }
+ */
+
+ return ((days > 0) ? days.toString() + 'd:' : '').toString()
+ + NETDATA.zeropad(hours) + ':'
+ + NETDATA.zeropad(minutes) + ':'
+ + NETDATA.zeropad(seconds)
+ + ms_txt;
+ },
+
+ // get a function that converts the units
+ // + every time units are switched call the callback
+ get: function (uuid, min, max, units, desired_units, common_units_name, switch_units_callback) {
+ // validate the parameters
+ if (typeof units === 'undefined') {
+ units = 'undefined';
+ }
+
+ // check if we support units conversion
+ if (typeof this.scalableUnits[units] === 'undefined' && typeof this.convertibleUnits[units] === 'undefined') {
+ // we can't convert these units
+ //console.log('DEBUG: ' + uuid.toString() + ' can\'t convert units: ' + units.toString());
+ return function (value) {
+ return value;
+ };
+ }
+
+ // check if the caller wants the original units
+ if (typeof desired_units === 'undefined' || desired_units === null || desired_units === 'original' || desired_units === units) {
+ //console.log('DEBUG: ' + uuid.toString() + ' original units wanted');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+
+ // now we know we can convert the units
+ // and the caller wants some kind of conversion
+
+ let tunits = null;
+ let tdivider = 0;
+
+ if (typeof this.scalableUnits[units] !== 'undefined') {
+ // units that can be scaled
+ // we decide a divider
+
+ // console.log('NETDATA.unitsConversion.get(' + units.toString() + ', ' + desired_units.toString() + ', function()) decide divider with min = ' + min.toString() + ', max = ' + max.toString());
+
+ if (desired_units === 'auto') {
+ // the caller wants to auto-scale the units
+
+ // find the absolute maximum value that is rendered on the chart
+ // based on this we decide the scale
+ min = Math.abs(min);
+ max = Math.abs(max);
+ if (min > max) {
+ max = min;
+ }
+
+ // find the smallest scale that provides integers
+ // for (x in this.scalableUnits[units]) {
+ // if (this.scalableUnits[units].hasOwnProperty(x)) {
+ // let m = this.scalableUnits[units][x];
+ // if (m <= max && m > tdivider) {
+ // tunits = x;
+ // tdivider = m;
+ // }
+ // }
+ // }
+ const sunit = this.scalableUnits[units];
+ for (var x of Object.keys(sunit)) {
+ let m = sunit[x];
+ if (m <= max && m > tdivider) {
+ tunits = x;
+ tdivider = m;
+ }
+ }
+
+ if (tunits === null || tdivider <= 0) {
+ // we couldn't find one
+ //console.log('DEBUG: ' + uuid.toString() + ' cannot find an auto-scaling candidate for units: ' + units.toString() + ' (max: ' + max.toString() + ')');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+
+ if (typeof common_units_name === 'string' && typeof uuid === 'string') {
+ // the caller wants several charts to have the same units
+ // data-common-units
+
+ let common_units_key = common_units_name + '-' + units;
+
+ // add our divider into the list of keys
+ let t = this.keys[common_units_key];
+ if (typeof t === 'undefined') {
+ this.keys[common_units_key] = {};
+ t = this.keys[common_units_key];
+ }
+ t[uuid] = {
+ units: tunits,
+ divider: tdivider
+ };
+
+ // find the max divider of all charts
+ let common_units = t[uuid];
+ for (var x in t) {
+ if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) {
+ common_units = t[x];
+ }
+ }
+
+ // save our common_max to the latest keys
+ let latest = this.latest[common_units_key];
+ if (typeof latest === 'undefined') {
+ this.latest[common_units_key] = {};
+ latest = this.latest[common_units_key];
+ }
+ latest.units = common_units.units;
+ latest.divider = common_units.divider;
+
+ tunits = latest.units;
+ tdivider = latest.divider;
+
+ //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', common-units=' + common_units_name.toString() + ((t[uuid].divider !== tdivider)?' USED COMMON, mine was ' + t[uuid].units:' set common').toString());
+
+ // apply it to this chart
+ switch_units_callback(tunits);
+ return function (value) {
+ if (tdivider !== latest.divider) {
+ // another chart switched our common units
+ // we should switch them too
+ //console.log('DEBUG: ' + uuid + ' switching units due to a common-units change, from ' + tunits.toString() + ' to ' + latest.units.toString());
+ tunits = latest.units;
+ tdivider = latest.divider;
+ switch_units_callback(tunits);
+ }
+
+ return value / tdivider;
+ };
+ } else {
+ // the caller did not give data-common-units
+ // this chart auto-scales independently of all others
+ //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', autonomously');
+
+ switch_units_callback(tunits);
+ return function (value) {
+ return value / tdivider;
+ };
+ }
+ } else {
+ // the caller wants specific units
+
+ if (typeof this.scalableUnits[units][desired_units] !== 'undefined') {
+ // all good, set the new units
+ tdivider = this.scalableUnits[units][desired_units];
+ // console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + desired_units.toString() + ' with divider ' + tdivider.toString() + ', by reference');
+ switch_units_callback(desired_units);
+ return function (value) {
+ return value / tdivider;
+ };
+ } else {
+ // oops! switch back to original units
+ console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ }
+ } else if (typeof this.convertibleUnits[units] !== 'undefined') {
+ // units that can be converted
+ if (desired_units === 'auto') {
+ for (var x in this.convertibleUnits[units]) {
+ if (this.convertibleUnits[units].hasOwnProperty(x)) {
+ if (this.convertibleUnits[units][x].check(max)) {
+ //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString());
+ switch_units_callback(x);
+ return this.convertibleUnits[units][x].convert;
+ }
+ }
+ }
+
+ // none checked ok
+ //console.log('DEBUG: ' + uuid.toString() + ' no conversion available for ' + units.toString() + ' to: ' + desired_units.toString());
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ } else if (typeof this.convertibleUnits[units][desired_units] !== 'undefined') {
+ switch_units_callback(desired_units);
+ return this.convertibleUnits[units][desired_units].convert;
+ } else {
+ console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ } else {
+ // hm... did we forget to implement the new type?
+ console.log(`Unmatched unit conversion method for units ${units.toString()}`);
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ }
+};
+
+NETDATA.icons = {
+ left: '<i class="fas fa-backward"></i>',
+ reset: '<i class="fas fa-play"></i>',
+ right: '<i class="fas fa-forward"></i>',
+ zoomIn: '<i class="fas fa-plus"></i>',
+ zoomOut: '<i class="fas fa-minus"></i>',
+ resize: '<i class="fas fa-sort"></i>',
+ lineChart: '<i class="fas fa-chart-line"></i>',
+ areaChart: '<i class="fas fa-chart-area"></i>',
+ noChart: '<i class="fas fa-chart-area"></i>',
+ loading: '<i class="fas fa-sync-alt"></i>',
+ noData: '<i class="fas fa-exclamation-triangle"></i>'
+};
+
+if (typeof netdataIcons === 'object') {
+ // for (let icon in NETDATA.icons) {
+ // if (NETDATA.icons.hasOwnProperty(icon) && typeof(netdataIcons[icon]) === 'string')
+ // NETDATA.icons[icon] = netdataIcons[icon];
+ // }
+ for (var icon of Object.keys(NETDATA.icons)) {
+ if (typeof(netdataIcons[icon]) === 'string') {
+ NETDATA.icons[icon] = netdataIcons[icon]
+ }
+ }
+}
+
+if (typeof netdataSnapshotData === 'undefined') {
+ netdataSnapshotData = null;
+}
+
+if (typeof netdataShowHelp === 'undefined') {
+ netdataShowHelp = true;
+}
+
+if (typeof netdataShowAlarms === 'undefined') {
+ netdataShowAlarms = false;
+}
+
+if (typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) {
+ netdataRegistryAfterMs = 0; // 1500;
+}
+
+if (typeof netdataRegistry === 'undefined') {
+ // backward compatibility
+ netdataRegistry = (typeof netdataNoRegistry !== 'undefined' && netdataNoRegistry === false);
+}
+
+if (netdataRegistry === false && typeof netdataRegistryCallback === 'function') {
+ netdataRegistry = true;
+}
+
+// ----------------------------------------------------------------------------------------------------------------
+// the defaults for all charts
+
+// if the user does not specify any of these, the following will be used
+
+NETDATA.chartDefaults = {
+ width: '100%', // the chart width - can be null
+ height: '100%', // the chart height - can be null
+ min_width: null, // the chart minimum width - can be null
+ library: 'dygraph', // the graphing library to use
+ method: 'average', // the grouping method
+ before: 0, // panning
+ after: -600, // panning
+ pixels_per_point: 1, // the detail of the chart
+ fill_luminance: 0.8 // luminance of colors in solid areas
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// global options
+
+NETDATA.options = {
+ pauseCallback: null, // a callback when we are really paused
+
+ pause: false, // when enabled we don't auto-refresh the charts
+
+ targets: [], // an array of all the state objects that are
+ // currently active (independently of their
+ // viewport visibility)
+
+ updated_dom: true, // when true, the DOM has been updated with
+ // new elements we have to check.
+
+ auto_refresher_fast_weight: 0, // this is the current time in ms, spent
+ // rendering charts continuously.
+ // used with .current.fast_render_timeframe
+
+ page_is_visible: true, // when true, this page is visible
+
+ auto_refresher_stop_until: 0, // timestamp in ms - used internally, to stop the
+ // auto-refresher for some time (when a chart is
+ // performing pan or zoom, we need to stop refreshing
+ // all other charts, to have the maximum speed for
+ // rendering the chart that is panned or zoomed).
+ // Used with .current.global_pan_sync_time
+
+ on_scroll_refresher_stop_until: 0, // timestamp in ms - used to stop evaluating
+ // charts for some time, after a page scroll
+
+ last_page_resize: Date.now(), // the timestamp of the last resize request
+
+ last_page_scroll: 0, // the timestamp the last time the page was scrolled
+
+ browser_timezone: 'unknown', // timezone detected by javascript
+ server_timezone: 'unknown', // timezone reported by the server
+
+ force_data_points: 0, // force the number of points to be returned for charts
+ fake_chart_rendering: false, // when set to true, the dashboard will download data but will not render the charts
+
+ passive_events: null, // true if the browser supports passive events
+
+ // the current profile
+ // we may have many...
+ current: {
+ units: 'auto', // can be 'auto' or 'original'
+ temperature: 'celsius', // can be 'celsius' or 'fahrenheit'
+ seconds_as_time: true, // show seconds as DDd:HH:MM:SS ?
+ timezone: 'default', // the timezone to use, or 'default'
+ user_set_server_timezone: 'default', // as set by the user on the dashboard
+
+ legend_toolbox: true, // show the legend toolbox on charts
+ resize_charts: true, // show the resize handler on charts
+
+ pixels_per_point: isSlowDevice() ? 5 : 1, // the minimum pixels per point for all charts
+ // increase this to speed javascript up
+ // each chart library has its own limit too
+ // the max of this and the chart library is used
+ // the final is calculated every time, so a change
+ // here will have immediate effect on the next chart
+ // update
+
+ idle_between_charts: 100, // ms - how much time to wait between chart updates
+
+ fast_render_timeframe: 200, // ms - render continuously until this time of continuous
+ // rendering has been reached
+ // this setting is used to make it render e.g. 10
+ // charts at once, sleep idle_between_charts time
+ // and continue for another 10 charts.
+
+ idle_between_loops: 500, // ms - if all charts have been updated, wait this
+ // time before starting again.
+
+ idle_parallel_loops: 100, // ms - the time between parallel refresher updates
+
+ idle_lost_focus: 500, // ms - when the window does not have focus, check
+ // if focus has been regained, every this time
+
+ global_pan_sync_time: 300, // ms - when you pan or zoom a chart, the background
+ // auto-refreshing of charts is paused for this amount
+ // of time
+
+ sync_selection_delay: 400, // ms - when you pan or zoom a chart, wait this amount
+ // of time before setting up synchronized selections
+ // on hover.
+
+ sync_selection: true, // enable or disable selection sync
+
+ pan_and_zoom_delay: 50, // when panning or zooming, how ofter to update the chart
+
+ sync_pan_and_zoom: true, // enable or disable pan and zoom sync
+
+ pan_and_zoom_data_padding: true, // fetch more data for the master chart when panning or zooming
+
+ update_only_visible: true, // enable or disable visibility management / used for printing
+
+ parallel_refresher: !isSlowDevice(), // enable parallel refresh of charts
+
+ concurrent_refreshes: true, // when parallel_refresher is enabled, sync also the charts
+
+ destroy_on_hide: isSlowDevice(), // destroy charts when they are not visible
+
+ show_help: netdataShowHelp, // when enabled the charts will show some help
+ show_help_delay_show_ms: 500,
+ show_help_delay_hide_ms: 0,
+
+ eliminate_zero_dimensions: true, // do not show dimensions with just zeros
+
+ stop_updates_when_focus_is_lost: true, // boolean - shall we stop auto-refreshes when document does not have user focus
+ stop_updates_while_resizing: 1000, // ms - time to stop auto-refreshes while resizing the charts
+
+ double_click_speed: 500, // ms - time between clicks / taps to detect double click/tap
+
+ smooth_plot: !isSlowDevice(), // enable smooth plot, where possible
+
+ color_fill_opacity_line: 1.0,
+ color_fill_opacity_area: 0.2,
+ color_fill_opacity_stacked: 0.8,
+
+ pan_and_zoom_factor: 0.25, // the increment when panning and zooming with the toolbox
+ pan_and_zoom_factor_multiplier_control: 2.0,
+ pan_and_zoom_factor_multiplier_shift: 3.0,
+ pan_and_zoom_factor_multiplier_alt: 4.0,
+
+ abort_ajax_on_scroll: false, // kill pending ajax page scroll
+ async_on_scroll: false, // sync/async onscroll handler
+ onscroll_worker_duration_threshold: 30, // time in ms, for async scroll handler
+
+ retries_on_data_failures: 3, // how many retries to make if we can't fetch chart data from the server
+
+ setOptionCallback: function () {
+ }
+ },
+
+ debug: {
+ show_boxes: false,
+ main_loop: false,
+ focus: false,
+ visibility: false,
+ chart_data_url: false,
+ chart_errors: true, // remember to set it to false before merging
+ chart_timing: false,
+ chart_calls: false,
+ libraries: false,
+ dygraph: false,
+ globalSelectionSync: false,
+ globalPanAndZoom: false
+ }
+};
+
+NETDATA.statistics = {
+ refreshes_total: 0,
+ refreshes_active: 0,
+ refreshes_active_max: 0
+};
+
+// local storage options
+
+NETDATA.localStorage = {
+ default: {},
+ current: {},
+ callback: {} // only used for resetting back to defaults
+};
+
+NETDATA.localStorageTested = -1;
+NETDATA.localStorageTest = function () {
+ if (NETDATA.localStorageTested !== -1) {
+ return NETDATA.localStorageTested;
+ }
+
+ if (typeof Storage !== "undefined" && typeof localStorage === 'object') {
+ let test = 'test';
+ try {
+ localStorage.setItem(test, test);
+ localStorage.removeItem(test);
+ NETDATA.localStorageTested = true;
+ } catch (e) {
+ NETDATA.localStorageTested = false;
+ }
+ } else {
+ NETDATA.localStorageTested = false;
+ }
+
+ return NETDATA.localStorageTested;
+};
+
+NETDATA.localStorageGet = function (key, def, callback) {
+ let ret = def;
+
+ if (typeof NETDATA.localStorage.default[key.toString()] === 'undefined') {
+ NETDATA.localStorage.default[key.toString()] = def;
+ NETDATA.localStorage.callback[key.toString()] = callback;
+ }
+
+ if (NETDATA.localStorageTest()) {
+ try {
+ // console.log('localStorage: loading "' + key.toString() + '"');
+ ret = localStorage.getItem(key.toString());
+ // console.log('netdata loaded: ' + key.toString() + ' = ' + ret.toString());
+ if (ret === null || ret === 'undefined') {
+ // console.log('localStorage: cannot load it, saving "' + key.toString() + '" with value "' + JSON.stringify(def) + '"');
+ localStorage.setItem(key.toString(), JSON.stringify(def));
+ ret = def;
+ } else {
+ // console.log('localStorage: got "' + key.toString() + '" with value "' + ret + '"');
+ ret = JSON.parse(ret);
+ // console.log('localStorage: loaded "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret));
+ }
+ } catch (error) {
+ console.log('localStorage: failed to read "' + key.toString() + '", using default: "' + def.toString() + '"');
+ ret = def;
+ }
+ }
+
+ if (typeof ret === 'undefined' || ret === 'undefined') {
+ console.log('localStorage: LOADED UNDEFINED "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret));
+ ret = def;
+ }
+
+ NETDATA.localStorage.current[key.toString()] = ret;
+ return ret;
+};
+
+NETDATA.localStorageSet = function (key, value, callback) {
+ if (typeof value === 'undefined' || value === 'undefined') {
+ console.log('localStorage: ATTEMPT TO SET UNDEFINED "' + key.toString() + '" as value ' + value + ' of type ' + typeof(value));
+ }
+
+ if (typeof NETDATA.localStorage.default[key.toString()] === 'undefined') {
+ NETDATA.localStorage.default[key.toString()] = value;
+ NETDATA.localStorage.current[key.toString()] = value;
+ NETDATA.localStorage.callback[key.toString()] = callback;
+ }
+
+ if (NETDATA.localStorageTest()) {
+ // console.log('localStorage: saving "' + key.toString() + '" with value "' + JSON.stringify(value) + '"');
+ try {
+ localStorage.setItem(key.toString(), JSON.stringify(value));
+ } catch (e) {
+ console.log('localStorage: failed to save "' + key.toString() + '" with value: "' + value.toString() + '"');
+ }
+ }
+
+ NETDATA.localStorage.current[key.toString()] = value;
+ return value;
+};
+
+NETDATA.localStorageGetRecursive = function (obj, prefix, callback) {
+ let keys = Object.keys(obj);
+ let len = keys.length;
+ while (len--) {
+ let i = keys[len];
+
+ if (typeof obj[i] === 'object') {
+ //console.log('object ' + prefix + '.' + i.toString());
+ NETDATA.localStorageGetRecursive(obj[i], prefix + '.' + i.toString(), callback);
+ continue;
+ }
+
+ obj[i] = NETDATA.localStorageGet(prefix + '.' + i.toString(), obj[i], callback);
+ }
+};
+
+NETDATA.setOption = function (key, value) {
+ if (key.toString() === 'setOptionCallback') {
+ if (typeof NETDATA.options.current.setOptionCallback === 'function') {
+ NETDATA.options.current[key.toString()] = value;
+ NETDATA.options.current.setOptionCallback();
+ }
+ } else if (NETDATA.options.current[key.toString()] !== value) {
+ let name = 'options.' + key.toString();
+
+ if (typeof NETDATA.localStorage.default[name.toString()] === 'undefined') {
+ console.log('localStorage: setOption() on unsaved option: "' + name.toString() + '", value: ' + value);
+ }
+
+ //console.log(NETDATA.localStorage);
+ //console.log('setOption: setting "' + key.toString() + '" to "' + value + '" of type ' + typeof(value) + ' original type ' + typeof(NETDATA.options.current[key.toString()]));
+ //console.log(NETDATA.options);
+ NETDATA.options.current[key.toString()] = NETDATA.localStorageSet(name.toString(), value, null);
+
+ if (typeof NETDATA.options.current.setOptionCallback === 'function') {
+ NETDATA.options.current.setOptionCallback();
+ }
+ }
+
+ return true;
+};
+
+NETDATA.getOption = function (key) {
+ return NETDATA.options.current[key.toString()];
+};
+
+// read settings from local storage
+NETDATA.localStorageGetRecursive(NETDATA.options.current, 'options', null);
+
+// always start with this option enabled.
+NETDATA.setOption('stop_updates_when_focus_is_lost', true);
+
+NETDATA.resetOptions = function () {
+ let keys = Object.keys(NETDATA.localStorage.default);
+ let len = keys.length;
+
+ while (len--) {
+ let i = keys[len];
+ let a = i.split('.');
+
+ if (a[0] === 'options') {
+ if (a[1] === 'setOptionCallback') {
+ continue;
+ }
+ if (typeof NETDATA.localStorage.default[i] === 'undefined') {
+ continue;
+ }
+ if (NETDATA.options.current[i] === NETDATA.localStorage.default[i]) {
+ continue;
+ }
+
+ NETDATA.setOption(a[1], NETDATA.localStorage.default[i]);
+ } else if (a[0] === 'chart_heights') {
+ if (typeof NETDATA.localStorage.callback[i] === 'function' && typeof NETDATA.localStorage.default[i] !== 'undefined') {
+ NETDATA.localStorage.callback[i](NETDATA.localStorage.default[i]);
+ }
+ }
+ }
+
+ NETDATA.dateTime.init(NETDATA.options.current.timezone);
+};
+
+// *** src/dashboard.js/timeout.js
+
+// TODO: Better name needed
+
+NETDATA.timeout = {
+ // by default, these are just wrappers to setTimeout() / clearTimeout()
+
+ step: function (callback) {
+ return window.setTimeout(callback, 1000 / 60);
+ },
+
+ set: function (callback, delay) {
+ return window.setTimeout(callback, delay);
+ },
+
+ clear: function (id) {
+ return window.clearTimeout(id);
+ },
+
+ init: function () {
+ let custom = true;
+
+ if (window.requestAnimationFrame) {
+ this.step = function (callback) {
+ return window.requestAnimationFrame(callback);
+ };
+
+ this.clear = function (handle) {
+ return window.cancelAnimationFrame(handle.value);
+ };
+ // } else if (window.webkitRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.webkitRequestAnimationFrame(callback);
+ // };
+
+ // if (window.webkitCancelAnimationFrame) {
+ // this.clear = function (handle) {
+ // return window.webkitCancelAnimationFrame(handle.value);
+ // };
+ // } else if (window.webkitCancelRequestAnimationFrame) {
+ // this.clear = function (handle) {
+ // return window.webkitCancelRequestAnimationFrame(handle.value);
+ // };
+ // }
+ // } else if (window.mozRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.mozRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.mozCancelRequestAnimationFrame(handle.value);
+ // };
+ // } else if (window.oRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.oRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.oCancelRequestAnimationFrame(handle.value);
+ // };
+ // } else if (window.msRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.msRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.msCancelRequestAnimationFrame(handle.value);
+ // };
+ } else {
+ custom = false;
+ }
+
+ if (custom) {
+ // we have installed custom .step() / .clear() functions
+ // overwrite the .set() too
+
+ this.set = function (callback, delay) {
+ let start = Date.now(),
+ handle = new Object();
+
+ const loop = () => {
+ let current = Date.now(),
+ delta = current - start;
+
+ if (delta >= delay) {
+ callback.call();
+ } else {
+ handle.value = this.step(loop);
+ }
+ }
+
+ handle.value = this.step(loop);
+ return handle;
+ };
+ }
+ }
+};
+
+NETDATA.timeout.init();
+// Codacy declarations
+/* global netdataTheme */
+
+NETDATA.themes = {
+ white: {
+ bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-3.3.7.css',
+ dashboard_css: NETDATA.serverStatic + 'dashboard.css?v20190902-0',
+ background: '#FFFFFF',
+ foreground: '#000000',
+ grid: '#F0F0F0',
+ axis: '#F0F0F0',
+ highlight: '#F5F5F5',
+ colors: ['#3366CC', '#DC3912', '#109618', '#FF9900', '#990099', '#DD4477',
+ '#3B3EAC', '#66AA00', '#0099C6', '#B82E2E', '#AAAA11', '#5574A6',
+ '#994499', '#22AA99', '#6633CC', '#E67300', '#316395', '#8B0707',
+ '#329262', '#3B3EAC'],
+ easypiechart_track: '#f0f0f0',
+ easypiechart_scale: '#dfe0e0',
+ gauge_pointer: '#C0C0C0',
+ gauge_stroke: '#F0F0F0',
+ gauge_gradient: false,
+ d3pie: {
+ title: '#333333',
+ subtitle: '#666666',
+ footer: '#888888',
+ other: '#aaaaaa',
+ mainlabel: '#333333',
+ percentage: '#dddddd',
+ value: '#aaaa22',
+ tooltip_bg: '#000000',
+ tooltip_fg: '#efefef',
+ segment_stroke: "#ffffff",
+ gradient_color: '#000000'
+ }
+ },
+ slate: {
+ bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1',
+ dashboard_css: NETDATA.serverStatic + 'dashboard.slate.css?v20190902-0',
+ background: '#272b30',
+ foreground: '#C8C8C8',
+ grid: '#283236',
+ axis: '#283236',
+ highlight: '#383838',
+ /* colors: [ '#55bb33', '#ff2222', '#0099C6', '#faa11b', '#adbce0', '#DDDD00',
+ '#4178ba', '#f58122', '#a5cc39', '#f58667', '#f5ef89', '#cf93c0',
+ '#a5d18a', '#b8539d', '#3954a3', '#c8a9cf', '#c7de8a', '#fad20a',
+ '#a6a479', '#a66da8' ],
+ */
+ colors: ['#66AA00', '#FE3912', '#3366CC', '#D66300', '#0099C6', '#DDDD00',
+ '#5054e6', '#EE9911', '#BB44CC', '#e45757', '#ef0aef', '#CC7700',
+ '#22AA99', '#109618', '#905bfd', '#f54882', '#4381bf', '#ff3737',
+ '#329262', '#3B3EFF'],
+ easypiechart_track: '#373b40',
+ easypiechart_scale: '#373b40',
+ gauge_pointer: '#474b50',
+ gauge_stroke: '#373b40',
+ gauge_gradient: false,
+ d3pie: {
+ title: '#C8C8C8',
+ subtitle: '#283236',
+ footer: '#283236',
+ other: '#283236',
+ mainlabel: '#C8C8C8',
+ percentage: '#dddddd',
+ value: '#cccc44',
+ tooltip_bg: '#272b30',
+ tooltip_fg: '#C8C8C8',
+ segment_stroke: "#283236",
+ gradient_color: '#000000'
+ }
+ }
+};
+
+if (typeof netdataTheme !== 'undefined' && typeof NETDATA.themes[netdataTheme] !== 'undefined') {
+ NETDATA.themes.current = NETDATA.themes[netdataTheme];
+} else {
+ NETDATA.themes.current = NETDATA.themes.white;
+}
+
+NETDATA.colors = NETDATA.themes.current.colors;
+
+// these are the colors Google Charts are using
+// we have them here to attempt emulate their look and feel on the other chart libraries
+// http://there4.io/2012/05/02/google-chart-color-list/
+//NETDATA.colors = [ '#3366CC', '#DC3912', '#FF9900', '#109618', '#990099', '#3B3EAC', '#0099C6',
+// '#DD4477', '#66AA00', '#B82E2E', '#316395', '#994499', '#22AA99', '#AAAA11',
+// '#6633CC', '#E67300', '#8B0707', '#329262', '#5574A6', '#3B3EAC' ];
+
+// an alternative set
+// http://www.mulinblog.com/a-color-palette-optimized-for-data-visualization/
+// (blue) (red) (orange) (green) (pink) (brown) (purple) (yellow) (gray)
+//NETDATA.colors = [ '#5DA5DA', '#F15854', '#FAA43A', '#60BD68', '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#4D4D4D' ];
+// dygraph
+
+// Codacy declarations
+/* global smoothPlotter */
+/* global Dygraph */
+
+NETDATA.dygraph = {
+ smooth: false
+};
+
+NETDATA.dygraphToolboxPanAndZoom = function (state, after, before) {
+ if (after < state.netdata_first) {
+ after = state.netdata_first;
+ }
+
+ if (before > state.netdata_last) {
+ before = state.netdata_last;
+ }
+
+ state.setMode('zoom');
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
+ // state.log('toolboxPanAndZoom');
+ state.updateChartPanOrZoom(after, before);
+ NETDATA.globalPanAndZoom.setMaster(state, after, before);
+};
+
+NETDATA.dygraphSetSelection = function (state, t) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined') {
+ let r = state.calculateRowForTime(t);
+ if (r !== -1) {
+ state.tmp.dygraph_instance.setSelection(r);
+ return true;
+ } else {
+ state.tmp.dygraph_instance.clearSelection();
+ state.legendShowUndefined();
+ }
+ }
+
+ return false;
+};
+
+NETDATA.dygraphClearSelection = function (state) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined') {
+ state.tmp.dygraph_instance.clearSelection();
+ }
+ return true;
+};
+
+NETDATA.dygraphSmoothInitialize = function (callback) {
+ $.ajax({
+ url: NETDATA.dygraph_smooth_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.dygraph.smooth = true;
+ smoothPlotter.smoothing = 0.3;
+ })
+ .fail(function () {
+ NETDATA.dygraph.smooth = false;
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+};
+
+NETDATA.dygraphInitialize = function (callback) {
+ if (typeof netdataNoDygraphs === 'undefined' || !netdataNoDygraphs) {
+ $.ajax({
+ url: NETDATA.dygraph_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('dygraph', NETDATA.dygraph_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.dygraph.enabled = false;
+ NETDATA.error(100, NETDATA.dygraph_js);
+ })
+ .always(function () {
+ if (NETDATA.chartLibraries.dygraph.enabled && NETDATA.options.current.smooth_plot) {
+ NETDATA.dygraphSmoothInitialize(callback);
+ } else if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.dygraph.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.dygraphChartUpdate = function (state, data) {
+ let dygraph = state.tmp.dygraph_instance;
+
+ if (typeof dygraph === 'undefined') {
+ return NETDATA.dygraphChartCreate(state, data);
+ }
+
+ // when the chart is not visible, and hidden
+ // if there is a window resize, dygraph detects
+ // its element size as 0x0.
+ // this will make it re-appear properly
+
+ if (state.tm.last_unhidden > state.tmp.dygraph_last_rendered) {
+ dygraph.resize();
+ }
+
+ let options = {
+ file: data.result.data,
+ colors: state.chartColors(),
+ labels: data.result.labels,
+ //labelsDivWidth: state.chartWidth() - 70,
+ includeZero: state.tmp.dygraph_include_zero,
+ visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names)
+ };
+
+ if (state.tmp.dygraph_chart_type === 'stacked') {
+ if (options.includeZero && state.dimensions_visibility.countSelected() < options.visibility.length) {
+ options.includeZero = 0;
+ }
+ }
+
+ if (!NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ options.ylabel = state.units_current; // (state.units_desired === 'auto')?"":state.units_current;
+ }
+
+ if (state.tmp.dygraph_force_zoom) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() forced zoom update');
+ }
+
+ options.dateWindow = (state.requested_padding !== null) ? [state.view_after, state.view_before] : null;
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ state.tmp.dygraph_force_zoom = false;
+ } else if (state.current.name !== 'auto') {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() loose update');
+ }
+ } else {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() strict update');
+ }
+
+ options.dateWindow = (state.requested_padding !== null) ? [state.view_after, state.view_before] : null;
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ options.valueRange = state.tmp.dygraph_options.valueRange;
+
+ let oldMax = null, oldMin = null;
+ if (state.tmp.__commonMin !== null) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
+ oldMin = options.valueRange[0] = NETDATA.commonMin.get(state);
+ }
+ if (state.tmp.__commonMax !== null) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
+ oldMax = options.valueRange[1] = NETDATA.commonMax.get(state);
+ }
+
+ if (state.tmp.dygraph_smooth_eligible) {
+ if ((NETDATA.options.current.smooth_plot && state.tmp.dygraph_options.plotter !== smoothPlotter)
+ || (NETDATA.options.current.smooth_plot === false && state.tmp.dygraph_options.plotter === smoothPlotter)) {
+ NETDATA.dygraphChartCreate(state, data);
+ return;
+ }
+ }
+
+ if (netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() && NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // pan and zoom on snapshots
+ options.dateWindow = [NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms];
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ if (NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ if (Array.isArray(options.valueRange) && options.valueRange[0] <= 0) {
+ options.valueRange[0] = null;
+ }
+ }
+
+ dygraph.updateOptions(options);
+
+ let redraw = false;
+ if (oldMin !== null && oldMin > state.tmp.dygraph_instance.axes_[0].extremeRange[0]) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
+ options.valueRange[0] = NETDATA.commonMin.get(state);
+ redraw = true;
+ }
+ if (oldMax !== null && oldMax < state.tmp.dygraph_instance.axes_[0].extremeRange[1]) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
+ options.valueRange[1] = NETDATA.commonMax.get(state);
+ redraw = true;
+ }
+
+ if (redraw) {
+ // state.log('forcing redraw to adapt to common- min/max');
+ dygraph.updateOptions(options);
+ }
+
+ state.tmp.dygraph_last_rendered = Date.now();
+ return true;
+};
+
+NETDATA.dygraphChartCreate = function (state, data) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartCreate()');
+ }
+
+ state.tmp.dygraph_chart_type = NETDATA.dataAttribute(state.element, 'dygraph-type', state.chart.chart_type);
+ if (state.tmp.dygraph_chart_type === 'stacked' && data.dimensions === 1) {
+ state.tmp.dygraph_chart_type = 'area';
+ }
+ if (state.tmp.dygraph_chart_type === 'stacked' && NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ state.tmp.dygraph_chart_type = 'area';
+ }
+
+ let highlightCircleSize = NETDATA.chartLibraries.dygraph.isSparkline(state) ? 3 : 4;
+
+ let smooth = NETDATA.dygraph.smooth
+ ? (NETDATA.dataAttributeBoolean(state.element, 'dygraph-smooth', (state.tmp.dygraph_chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false)))
+ : false;
+
+ state.tmp.dygraph_include_zero = NETDATA.dataAttribute(state.element, 'dygraph-includezero', (state.tmp.dygraph_chart_type === 'stacked'));
+ let drawAxis = NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawaxis', true);
+
+ state.tmp.dygraph_options = {
+ colors: NETDATA.dataAttribute(state.element, 'dygraph-colors', state.chartColors()),
+
+ // leave a few pixels empty on the right of the chart
+ rightGap: NETDATA.dataAttribute(state.element, 'dygraph-rightgap', 5),
+ showRangeSelector: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showrangeselector', false),
+ showRoller: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showroller', false),
+ title: NETDATA.dataAttribute(state.element, 'dygraph-title', state.title),
+ titleHeight: NETDATA.dataAttribute(state.element, 'dygraph-titleheight', 19),
+ legend: NETDATA.dataAttribute(state.element, 'dygraph-legend', 'always'), // we need this to get selection events
+ labels: data.result.labels,
+ labelsDiv: NETDATA.dataAttribute(state.element, 'dygraph-labelsdiv', state.element_legend_childs.hidden),
+ //labelsDivStyles: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivstyles', { 'fontSize':'1px' }),
+ //labelsDivWidth: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivwidth', state.chartWidth() - 70),
+ labelsSeparateLines: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsseparatelines', true),
+ labelsShowZeroValues: NETDATA.chartLibraries.dygraph.isLogScale(state) ? false : NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsshowzerovalues', true),
+ labelsKMB: false,
+ labelsKMG2: false,
+ showLabelsOnHighlight: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showlabelsonhighlight', true),
+ hideOverlayOnMouseOut: NETDATA.dataAttributeBoolean(state.element, 'dygraph-hideoverlayonmouseout', true),
+ includeZero: state.tmp.dygraph_include_zero,
+ xRangePad: NETDATA.dataAttribute(state.element, 'dygraph-xrangepad', 0),
+ yRangePad: NETDATA.dataAttribute(state.element, 'dygraph-yrangepad', 1),
+ valueRange: NETDATA.dataAttribute(state.element, 'dygraph-valuerange', [null, null]),
+ ylabel: state.units_current, // (state.units_desired === 'auto')?"":state.units_current,
+ yLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-ylabelwidth', 12),
+
+ // the function to plot the chart
+ plotter: null,
+
+ // The width of the lines connecting data points.
+ // This can be used to increase the contrast or some graphs.
+ strokeWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokewidth', ((state.tmp.dygraph_chart_type === 'stacked') ? 0.1 : ((smooth === true) ? 1.5 : 0.7))),
+ strokePattern: NETDATA.dataAttribute(state.element, 'dygraph-strokepattern', undefined),
+
+ // The size of the dot to draw on each point in pixels (see drawPoints).
+ // A dot is always drawn when a point is "isolated",
+ // i.e. there is a missing point on either side of it.
+ // This also controls the size of those dots.
+ drawPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawpoints', false),
+
+ // Draw points at the edges of gaps in the data.
+ // This improves visibility of small data segments or other data irregularities.
+ drawGapEdgePoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgapedgepoints', true),
+ connectSeparatedPoints: NETDATA.chartLibraries.dygraph.isLogScale(state) ? false : NETDATA.dataAttributeBoolean(state.element, 'dygraph-connectseparatedpoints', false),
+ pointSize: NETDATA.dataAttribute(state.element, 'dygraph-pointsize', 1),
+
+ // enabling this makes the chart with little square lines
+ stepPlot: NETDATA.dataAttributeBoolean(state.element, 'dygraph-stepplot', false),
+
+ // Draw a border around graph lines to make crossing lines more easily
+ // distinguishable. Useful for graphs with many lines.
+ strokeBorderColor: NETDATA.dataAttribute(state.element, 'dygraph-strokebordercolor', NETDATA.themes.current.background),
+ strokeBorderWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokeborderwidth', (state.tmp.dygraph_chart_type === 'stacked') ? 0.0 : 0.0),
+ fillGraph: NETDATA.dataAttribute(state.element, 'dygraph-fillgraph', (state.tmp.dygraph_chart_type === 'area' || state.tmp.dygraph_chart_type === 'stacked')),
+ fillAlpha: NETDATA.dataAttribute(state.element, 'dygraph-fillalpha',
+ ((state.tmp.dygraph_chart_type === 'stacked')
+ ? NETDATA.options.current.color_fill_opacity_stacked
+ : NETDATA.options.current.color_fill_opacity_area)
+ ),
+ stackedGraph: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraph', (state.tmp.dygraph_chart_type === 'stacked')),
+ stackedGraphNaNFill: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraphnanfill', 'none'),
+ drawAxis: drawAxis,
+ axisLabelFontSize: NETDATA.dataAttribute(state.element, 'dygraph-axislabelfontsize', 10),
+ axisLineColor: NETDATA.dataAttribute(state.element, 'dygraph-axislinecolor', NETDATA.themes.current.axis),
+ axisLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-axislinewidth', 1.0),
+ drawGrid: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgrid', true),
+ gridLinePattern: NETDATA.dataAttribute(state.element, 'dygraph-gridlinepattern', null),
+ gridLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-gridlinewidth', 1.0),
+ gridLineColor: NETDATA.dataAttribute(state.element, 'dygraph-gridlinecolor', NETDATA.themes.current.grid),
+ maxNumberWidth: NETDATA.dataAttribute(state.element, 'dygraph-maxnumberwidth', 8),
+ sigFigs: NETDATA.dataAttribute(state.element, 'dygraph-sigfigs', null),
+ digitsAfterDecimal: NETDATA.dataAttribute(state.element, 'dygraph-digitsafterdecimal', 2),
+ valueFormatter: NETDATA.dataAttribute(state.element, 'dygraph-valueformatter', undefined),
+ highlightCircleSize: NETDATA.dataAttribute(state.element, 'dygraph-highlightcirclesize', highlightCircleSize),
+ highlightSeriesOpts: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesopts', null), // TOO SLOW: { strokeWidth: 1.5 },
+ highlightSeriesBackgroundAlpha: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesbackgroundalpha', null), // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5,
+ pointClickCallback: NETDATA.dataAttribute(state.element, 'dygraph-pointclickcallback', undefined),
+ visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names),
+ logscale: NETDATA.chartLibraries.dygraph.isLogScale(state) ? 'y' : undefined,
+
+ // Expects a string in the format "<series name>: <style>" where each series is separated by a |
+ perSeriesStyle: NETDATA.dataAttribute(state.element, 'dygraph-per-series-style', ''),
+
+ axes: {
+ x: {
+ pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-xpixelsperlabel', 50),
+ ticker: Dygraph.dateTicker,
+ axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-xaxislabelwidth', 60),
+ drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawxaxis', drawAxis),
+ axisLabelFormatter: function (d, gran) {
+ void(gran);
+ return NETDATA.dateTime.xAxisTimeString(d);
+ }
+ },
+ y: {
+ logscale: NETDATA.chartLibraries.dygraph.isLogScale(state) ? true : undefined,
+ pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-ypixelsperlabel', 15),
+ axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-yaxislabelwidth', 50),
+ drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawyaxis', drawAxis),
+ axisLabelFormatter: function (y) {
+
+ // unfortunately, we have to call this every single time
+ state.legendFormatValueDecimalsFromMinMax(
+ this.axes_[0].extremeRange[0],
+ this.axes_[0].extremeRange[1]
+ );
+
+ let old_units = this.user_attrs_.ylabel;
+ let v = state.legendFormatValue(y);
+ let new_units = state.units_current;
+
+ if (state.units_desired === 'auto' && typeof old_units !== 'undefined' && new_units !== old_units && !NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ // console.log(this);
+ // state.log('units discrepancy: old = ' + old_units + ', new = ' + new_units);
+ let len = this.plugins_.length;
+ while (len--) {
+ // console.log(this.plugins_[len]);
+ if (typeof this.plugins_[len].plugin.ylabel_div_ !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_ !== null
+ && typeof this.plugins_[len].plugin.ylabel_div_.children !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_.children !== null
+ && typeof this.plugins_[len].plugin.ylabel_div_.children[0].children !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_.children[0].children !== null
+ ) {
+ this.plugins_[len].plugin.ylabel_div_.children[0].children[0].innerHTML = new_units;
+ this.user_attrs_.ylabel = new_units;
+ break;
+ }
+ }
+
+ if (len < 0) {
+ state.log('units discrepancy, but cannot find dygraphs div to change: old = ' + old_units + ', new = ' + new_units);
+ }
+ }
+
+ return v;
+ }
+ }
+ },
+ legendFormatter: function (data) {
+ if (state.tmp.dygraph_mouse_down) {
+ return;
+ }
+
+ let elements = state.element_legend_childs;
+
+ // if the hidden div is not there
+ // we are not managing the legend
+ if (elements.hidden === null) {
+ return;
+ }
+
+ if (typeof data.x !== 'undefined') {
+ state.legendSetDate(data.x);
+ let i = data.series.length;
+ while (i--) {
+ let series = data.series[i];
+ if (series.isVisible) {
+ state.legendSetLabelValue(series.label, series.y);
+ } else {
+ state.legendSetLabelValue(series.label, null);
+ }
+ }
+ }
+
+ return '';
+ },
+ drawCallback: function (dygraph, is_initial) {
+
+ // the user has panned the chart and this is called to re-draw the chart
+ // 1. refresh this chart by adding data to it
+ // 2. notify all the other charts about the update they need
+
+ // to prevent an infinite loop (feedback), we use
+ // state.tmp.dygraph_user_action
+ // - when true, this is initiated by a user
+ // - when false, this is feedback
+
+ if (state.current.name !== 'auto' && state.tmp.dygraph_user_action) {
+ state.tmp.dygraph_user_action = false;
+
+ let x_range = dygraph.xAxisRange();
+ let after = Math.round(x_range[0]);
+ let before = Math.round(x_range[1]);
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphDrawCallback(dygraph, ' + is_initial + '): mode ' + state.current.name + ' ' + (after / 1000).toString() + ' - ' + (before / 1000).toString());
+ //console.log(state);
+ }
+
+ if (before <= state.netdata_last && after >= state.netdata_first) {
+ // update only when we are within the data limits
+ state.updateChartPanOrZoom(after, before);
+ }
+ }
+ },
+ zoomCallback: function (minDate, maxDate, yRanges) {
+
+ // the user has selected a range on the chart
+ // 1. refresh this chart by adding data to it
+ // 2. notify all the other charts about the update they need
+
+ void(yRanges);
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphZoomCallback(): ' + state.current.name);
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+ state.setMode('zoom');
+
+ // refresh it to the greatest possible zoom level
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
+ state.updateChartPanOrZoom(minDate, maxDate);
+ },
+ highlightCallback: function (event, x, points, row, seriesName) {
+ void(seriesName);
+
+ state.pauseChart();
+
+ // there is a bug in dygraph when the chart is zoomed enough
+ // the time it thinks is selected is wrong
+ // here we calculate the time t based on the row number selected
+ // which is ok
+ // let t = state.data_after + row * state.data_update_every;
+ // console.log('row = ' + row + ', x = ' + x + ', t = ' + t + ' ' + ((t === x)?'SAME':(Math.abs(x-t)<=state.data_update_every)?'SIMILAR':'DIFFERENT') + ', rows in db: ' + state.data_points + ' visible(x) = ' + state.timeIsVisible(x) + ' visible(t) = ' + state.timeIsVisible(t) + ' r(x) = ' + state.calculateRowForTime(x) + ' r(t) = ' + state.calculateRowForTime(t) + ' range: ' + state.data_after + ' - ' + state.data_before + ' real: ' + state.data.after + ' - ' + state.data.before + ' every: ' + state.data_update_every);
+
+ if (state.tmp.dygraph_mouse_down !== true) {
+ NETDATA.globalSelectionSync.sync(state, x);
+ }
+
+ // fix legend zIndex using the internal structures of dygraph legend module
+ // this works, but it is a hack!
+ // state.tmp.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000;
+ },
+ unhighlightCallback: function (event) {
+ void(event);
+
+ if (state.tmp.dygraph_mouse_down) {
+ return;
+ }
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphUnhighlightCallback()');
+ }
+
+ state.unpauseChart();
+ NETDATA.globalSelectionSync.stop();
+ },
+ underlayCallback: function (canvas, area, g) {
+ // the chart is about to be drawn
+
+ // update history_tip_element
+ if (state.tmp.dygraph_history_tip_element) {
+ const xHookRightSide = g.toDomXCoord(state.netdata_first);
+ if (xHookRightSide > area.x) {
+ state.tmp.dygraph_history_tip_element_displayed = true;
+ // group the styles for possible better performance
+ state.tmp.dygraph_history_tip_element.setAttribute(
+ 'style',
+ `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);`
+ )
+ } else {
+ if (state.tmp.dygraph_history_tip_element_displayed) {
+ // additional check just for performance
+ // don't update the DOM when it's not needed
+ state.tmp.dygraph_history_tip_element.style.display = 'none';
+ state.tmp.dygraph_history_tip_element_displayed = false;
+ }
+ }
+ }
+
+ // this function renders global highlighted time-frame
+
+ if (NETDATA.globalChartUnderlay.isActive()) {
+ let after = NETDATA.globalChartUnderlay.after;
+ let before = NETDATA.globalChartUnderlay.before;
+
+ if (after < state.view_after) {
+ after = state.view_after;
+ }
+
+ if (before > state.view_before) {
+ before = state.view_before;
+ }
+
+ if (after < before) {
+ let bottom_left = g.toDomCoords(after, -20);
+ let top_right = g.toDomCoords(before, +20);
+
+ let left = bottom_left[0];
+ let right = top_right[0];
+
+ canvas.fillStyle = NETDATA.themes.current.highlight;
+ canvas.fillRect(left, area.y, right - left, area.h);
+ }
+ }
+ },
+ interactionModel: {
+ mousedown: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mousedown()');
+ }
+
+ state.tmp.dygraph_user_action = true;
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphMouseDown()');
+ }
+
+ // Right-click should not initiate anything.
+ if (event.button && event.button === 2) {
+ return;
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_mouse_down = true;
+ context.initializeMouseDown(event, dygraph, context);
+
+ //console.log(event);
+ if (event.button && event.button === 1) {
+ if (event.shiftKey) {
+ //console.log('middle mouse button dragging (PAN)');
+
+ state.setMode('pan');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startPan(event, dygraph, context);
+ } else if (event.altKey || event.ctrlKey || event.metaKey) {
+ //console.log('middle mouse button highlight');
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+ state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX);
+ Dygraph.startZoom(event, dygraph, context);
+ } else {
+ //console.log('middle mouse button selection for zoom (ZOOM)');
+
+ state.setMode('zoom');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startZoom(event, dygraph, context);
+ }
+ } else {
+ if (event.shiftKey) {
+ //console.log('left mouse button selection for zoom (ZOOM)');
+
+ state.setMode('zoom');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startZoom(event, dygraph, context);
+ } else if (event.altKey || event.ctrlKey || event.metaKey) {
+ //console.log('left mouse button highlight');
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+ state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX);
+ Dygraph.startZoom(event, dygraph, context);
+ } else {
+ //console.log('left mouse button dragging (PAN)');
+
+ state.setMode('pan');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startPan(event, dygraph, context);
+ }
+ }
+ },
+ mousemove: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mousemove()');
+ }
+
+ if (state.tmp.dygraph_highlight_after !== null) {
+ //console.log('highlight selection...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.moveZoom(event, dygraph, context);
+ event.preventDefault();
+ } else if (context.isPanning) {
+ //console.log('panning...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ //NETDATA.globalSelectionSync.stop();
+ //NETDATA.globalSelectionSync.delay();
+ state.setMode('pan');
+ context.is2DPan = false;
+ Dygraph.movePan(event, dygraph, context);
+ } else if (context.isZooming) {
+ //console.log('zooming...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ //NETDATA.globalSelectionSync.stop();
+ //NETDATA.globalSelectionSync.delay();
+ state.setMode('zoom');
+ Dygraph.moveZoom(event, dygraph, context);
+ }
+ },
+ mouseup: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = false;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mouseup()');
+ }
+
+ if (state.tmp.dygraph_highlight_after !== null) {
+ //console.log('done highlight selection');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+
+ NETDATA.globalChartUnderlay.set(state
+ , state.tmp.dygraph_highlight_after
+ , dygraph.toDataXCoord(event.offsetX)
+ , state.view_after
+ , state.view_before
+ );
+
+ state.tmp.dygraph_highlight_after = null;
+
+ context.isZooming = false;
+ dygraph.clearZoomRect_();
+ dygraph.drawGraph_(false);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ } else if (context.isPanning) {
+ //console.log('done panning');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.endPan(event, dygraph, context);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ } else if (context.isZooming) {
+ //console.log('done zomming');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.endZoom(event, dygraph, context);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ }
+ },
+ click: function (event, dygraph, context) {
+ void(dygraph);
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.click()');
+ }
+
+ event.preventDefault();
+ },
+ dblclick: function (event, dygraph, context) {
+ void(event);
+ void(dygraph);
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.dblclick()');
+ }
+ NETDATA.resetAllCharts(state);
+ },
+ wheel: function (event, dygraph, context) {
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.wheel()');
+ }
+
+ // Take the offset of a mouse event on the dygraph canvas and
+ // convert it to a pair of percentages from the bottom left.
+ // (Not top left, bottom is where the lower value is.)
+ function offsetToPercentage(g, offsetX, offsetY) {
+ // This is calculating the pixel offset of the leftmost date.
+ let xOffset = g.toDomCoords(g.xAxisRange()[0], null)[0];
+ let yar0 = g.yAxisRange(0);
+
+ // This is calculating the pixel of the highest value. (Top pixel)
+ let yOffset = g.toDomCoords(null, yar0[1])[1];
+
+ // x y w and h are relative to the corner of the drawing area,
+ // so that the upper corner of the drawing area is (0, 0).
+ let x = offsetX - xOffset;
+ let y = offsetY - yOffset;
+
+ // This is computing the rightmost pixel, effectively defining the
+ // width.
+ let w = g.toDomCoords(g.xAxisRange()[1], null)[0] - xOffset;
+
+ // This is computing the lowest pixel, effectively defining the height.
+ let h = g.toDomCoords(null, yar0[0])[1] - yOffset;
+
+ // Percentage from the left.
+ let xPct = w === 0 ? 0 : (x / w);
+ // Percentage from the top.
+ let yPct = h === 0 ? 0 : (y / h);
+
+ // The (1-) part below changes it from "% distance down from the top"
+ // to "% distance up from the bottom".
+ return [xPct, (1 - yPct)];
+ }
+
+ // Adjusts [x, y] toward each other by zoomInPercentage%
+ // Split it so the left/bottom axis gets xBias/yBias of that change and
+ // tight/top gets (1-xBias)/(1-yBias) of that change.
+ //
+ // If a bias is missing it splits it down the middle.
+ function zoomRange(g, zoomInPercentage, xBias, yBias) {
+ xBias = xBias || 0.5;
+ yBias = yBias || 0.5;
+
+ function adjustAxis(axis, zoomInPercentage, bias) {
+ let delta = axis[1] - axis[0];
+ let increment = delta * zoomInPercentage;
+ let foo = [increment * bias, increment * (1 - bias)];
+
+ return [axis[0] + foo[0], axis[1] - foo[1]];
+ }
+
+ let yAxes = g.yAxisRanges();
+ let newYAxes = [];
+ for (let i = 0; i < yAxes.length; i++) {
+ newYAxes[i] = adjustAxis(yAxes[i], zoomInPercentage, yBias);
+ }
+
+ return adjustAxis(g.xAxisRange(), zoomInPercentage, xBias);
+ }
+
+ if (event.altKey || event.shiftKey) {
+ state.tmp.dygraph_user_action = true;
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ // http://dygraphs.com/gallery/interaction-api.js
+ let normal_def;
+ if (typeof event.wheelDelta === 'number' && !isNaN(event.wheelDelta))
+ // chrome
+ {
+ normal_def = event.wheelDelta / 40;
+ } else
+ // firefox
+ {
+ normal_def = event.deltaY * -1.2;
+ }
+
+ let normal = (event.detail) ? event.detail * -1 : normal_def;
+ let percentage = normal / 50;
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+
+ let percentages = offsetToPercentage(dygraph, event.offsetX, event.offsetY);
+ let xPct = percentages[0];
+ let yPct = percentages[1];
+
+ let new_x_range = zoomRange(dygraph, percentage, xPct, yPct);
+ let after = new_x_range[0];
+ let before = new_x_range[1];
+
+ let first = state.netdata_first + state.data_update_every;
+ let last = state.netdata_last + state.data_update_every;
+
+ if (before > last) {
+ after -= (before - last);
+ before = last;
+ }
+ if (after < first) {
+ after = first;
+ }
+
+ state.setMode('zoom');
+ state.updateChartPanOrZoom(after, before, function () {
+ dygraph.updateOptions({dateWindow: [after, before]});
+ });
+
+ event.preventDefault();
+ }
+ },
+ touchstart: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = true;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchstart()');
+ }
+
+ state.tmp.dygraph_user_action = true;
+ state.setMode('zoom');
+ state.pauseChart();
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ Dygraph.defaultInteractionModel.touchstart(event, dygraph, context);
+
+ // we overwrite the touch directions at the end, to overwrite
+ // the internal default of dygraph
+ context.touchDirections = {x: true, y: false};
+
+ state.dygraph_last_touch_start = Date.now();
+ state.dygraph_last_touch_move = 0;
+
+ if (typeof event.touches[0].pageX === 'number') {
+ state.dygraph_last_touch_page_x = event.touches[0].pageX;
+ } else {
+ state.dygraph_last_touch_page_x = 0;
+ }
+ },
+ touchmove: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchmove()');
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.defaultInteractionModel.touchmove(event, dygraph, context);
+
+ state.dygraph_last_touch_move = Date.now();
+ },
+ touchend: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = false;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchend()');
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.defaultInteractionModel.touchend(event, dygraph, context);
+
+ // if it didn't move, it is a selection
+ if (state.dygraph_last_touch_move === 0 && state.dygraph_last_touch_page_x !== 0) {
+ NETDATA.globalSelectionSync.dontSyncBefore = 0;
+ NETDATA.globalSelectionSync.setMaster(state);
+
+ // internal api of dygraph
+ let pct = (state.dygraph_last_touch_page_x - (dygraph.plotter_.area.x + state.element.getBoundingClientRect().left)) / dygraph.plotter_.area.w;
+ console.log('pct: ' + pct.toString());
+
+ let t = Math.round(state.view_after + (state.view_before - state.view_after) * pct);
+ if (NETDATA.dygraphSetSelection(state, t)) {
+ NETDATA.globalSelectionSync.sync(state, t);
+ }
+ }
+
+ // if it was double tap within double click time, reset the charts
+ let now = Date.now();
+ if (typeof state.dygraph_last_touch_end !== 'undefined') {
+ if (state.dygraph_last_touch_move === 0) {
+ let dt = now - state.dygraph_last_touch_end;
+ if (dt <= NETDATA.options.current.double_click_speed) {
+ NETDATA.resetAllCharts(state);
+ }
+ }
+ }
+
+ // remember the timestamp of the last touch end
+ state.dygraph_last_touch_end = now;
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ }
+ }
+ };
+
+ if (NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ if (Array.isArray(state.tmp.dygraph_options.valueRange) && state.tmp.dygraph_options.valueRange[0] <= 0) {
+ state.tmp.dygraph_options.valueRange[0] = null;
+ }
+ }
+
+ if (NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ state.tmp.dygraph_options.drawGrid = false;
+ state.tmp.dygraph_options.drawAxis = false;
+ state.tmp.dygraph_options.title = undefined;
+ state.tmp.dygraph_options.ylabel = undefined;
+ state.tmp.dygraph_options.yLabelWidth = 0;
+ //state.tmp.dygraph_options.labelsDivWidth = 120;
+ //state.tmp.dygraph_options.labelsDivStyles.width = '120px';
+ state.tmp.dygraph_options.labelsSeparateLines = true;
+ state.tmp.dygraph_options.rightGap = 0;
+ state.tmp.dygraph_options.yRangePad = 1;
+ state.tmp.dygraph_options.axes.x.drawAxis = false;
+ state.tmp.dygraph_options.axes.y.drawAxis = false;
+ }
+
+ if (smooth) {
+ state.tmp.dygraph_smooth_eligible = true;
+
+ if (NETDATA.options.current.smooth_plot) {
+ state.tmp.dygraph_options.plotter = smoothPlotter;
+ }
+ }
+ else {
+ state.tmp.dygraph_smooth_eligible = false;
+ }
+
+ if (netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() && NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // pan and zoom on snapshots
+ state.tmp.dygraph_options.dateWindow = [NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms];
+ //state.tmp.dygraph_options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ let seriesStyles = NETDATA.dygraphGetSeriesStyle(state.tmp.dygraph_options);
+ state.tmp.dygraph_options.series = seriesStyles;
+
+ state.tmp.dygraph_instance = new Dygraph(
+ state.element_chart,
+ data.result.data,
+ state.tmp.dygraph_options
+ );
+
+ state.tmp.dygraph_history_tip_element = document.createElement('div');
+ state.tmp.dygraph_history_tip_element.innerHTML = `
+ <span class="dygraph__history-tip-content">
+ Want to extend your history of real-time metrics?
+ <br />
+ <a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>
+ Configure Netdata's <b>history</b></a>
+ or use the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine</a>.
+ </span>
+ `;
+ state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip';
+ state.element_chart.appendChild(state.tmp.dygraph_history_tip_element);
+
+
+ state.tmp.dygraph_force_zoom = false;
+ state.tmp.dygraph_user_action = false;
+ state.tmp.dygraph_last_rendered = Date.now();
+ state.tmp.dygraph_highlight_after = null;
+
+ if (state.tmp.dygraph_options.valueRange[0] === null && state.tmp.dygraph_options.valueRange[1] === null) {
+ if (typeof state.tmp.dygraph_instance.axes_[0].extremeRange !== 'undefined') {
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
+ } else {
+ state.log('incompatible version of Dygraph detected');
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
+ }
+ } else {
+ // if the user gave a valueRange, respect it
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
+ }
+
+ return true;
+};
+
+NETDATA.dygraphGetSeriesStyle = function(dygraphOptions) {
+ const seriesStyleStr = dygraphOptions.perSeriesStyle;
+ let formattedStyles = {};
+
+ if (seriesStyleStr === '') {
+ return formattedStyles;
+ }
+
+ // Parse the config string into a JSON object
+ let styles = seriesStyleStr.replace(' ', '').split('|');
+
+ styles.forEach(style => {
+ const keys = style.split(':');
+ formattedStyles[keys[0]] = keys[1];
+ });
+
+ for (let key in formattedStyles) {
+ if (formattedStyles.hasOwnProperty(key)) {
+ let settings;
+
+ switch (formattedStyles[key]) {
+ case 'line':
+ settings = { fillGraph: false };
+ break;
+ case 'area':
+ settings = { fillGraph: true };
+ break;
+ case 'dot':
+ settings = {
+ fillGraph: false,
+ drawPoints: true,
+ pointSize: dygraphOptions.pointSize
+ };
+ break;
+ default:
+ settings = undefined;
+ }
+
+ formattedStyles[key] = settings;
+ }
+ }
+
+ return formattedStyles;
+};
+// ----------------------------------------------------------------------------------------------------------------
+// sparkline
+
+NETDATA.sparklineInitialize = function (callback) {
+ if (typeof netdataNoSparklines === 'undefined' || !netdataNoSparklines) {
+ $.ajax({
+ url: NETDATA.sparkline_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('sparkline', NETDATA.sparkline_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.sparkline.enabled = false;
+ NETDATA.error(100, NETDATA.sparkline_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.sparkline.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.sparklineChartUpdate = function (state, data) {
+ state.sparkline_options.width = state.chartWidth();
+ state.sparkline_options.height = state.chartHeight();
+
+ $(state.element_chart).sparkline(data.result, state.sparkline_options);
+ return true;
+};
+
+NETDATA.sparklineChartCreate = function (state, data) {
+ let type = NETDATA.dataAttribute(state.element, 'sparkline-type', 'line');
+ let lineColor = NETDATA.dataAttribute(state.element, 'sparkline-linecolor', state.chartCustomColors()[0]);
+ let fillColor = NETDATA.dataAttribute(state.element, 'sparkline-fillcolor', ((state.chart.chart_type === 'line') ? NETDATA.themes.current.background : NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance)));
+ let chartRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemin', undefined);
+ let chartRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemax', undefined);
+ let composite = NETDATA.dataAttribute(state.element, 'sparkline-composite', undefined);
+ let enableTagOptions = NETDATA.dataAttribute(state.element, 'sparkline-enabletagoptions', undefined);
+ let tagOptionPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tagoptionprefix', undefined);
+ let tagValuesAttribute = NETDATA.dataAttribute(state.element, 'sparkline-tagvaluesattribute', undefined);
+ let disableHiddenCheck = NETDATA.dataAttribute(state.element, 'sparkline-disablehiddencheck', undefined);
+ let defaultPixelsPerValue = NETDATA.dataAttribute(state.element, 'sparkline-defaultpixelspervalue', undefined);
+ let spotColor = NETDATA.dataAttribute(state.element, 'sparkline-spotcolor', undefined);
+ let minSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-minspotcolor', undefined);
+ let maxSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-maxspotcolor', undefined);
+ let spotRadius = NETDATA.dataAttribute(state.element, 'sparkline-spotradius', undefined);
+ let valueSpots = NETDATA.dataAttribute(state.element, 'sparkline-valuespots', undefined);
+ let highlightSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightspotcolor', undefined);
+ let highlightLineColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightlinecolor', undefined);
+ let lineWidth = NETDATA.dataAttribute(state.element, 'sparkline-linewidth', undefined);
+ let normalRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemin', undefined);
+ let normalRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemax', undefined);
+ let drawNormalOnTop = NETDATA.dataAttribute(state.element, 'sparkline-drawnormalontop', undefined);
+ let xvalues = NETDATA.dataAttribute(state.element, 'sparkline-xvalues', undefined);
+ let chartRangeClip = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeclip', undefined);
+ let chartRangeMinX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeminx', undefined);
+ let chartRangeMaxX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemaxx', undefined);
+ let disableInteraction = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disableinteraction', false);
+ let disableTooltips = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disabletooltips', false);
+ let disableHighlight = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disablehighlight', false);
+ let highlightLighten = NETDATA.dataAttribute(state.element, 'sparkline-highlightlighten', 1.4);
+ let highlightColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightcolor', undefined);
+ let tooltipContainer = NETDATA.dataAttribute(state.element, 'sparkline-tooltipcontainer', undefined);
+ let tooltipClassname = NETDATA.dataAttribute(state.element, 'sparkline-tooltipclassname', undefined);
+ let tooltipFormat = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformat', undefined);
+ let tooltipPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipprefix', undefined);
+ let tooltipSuffix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipsuffix', ' ' + state.units_current);
+ let tooltipSkipNull = NETDATA.dataAttributeBoolean(state.element, 'sparkline-tooltipskipnull', true);
+ let tooltipValueLookups = NETDATA.dataAttribute(state.element, 'sparkline-tooltipvaluelookups', undefined);
+ let tooltipFormatFieldlist = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlist', undefined);
+ let tooltipFormatFieldlistKey = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlistkey', undefined);
+ let numberFormatter = NETDATA.dataAttribute(state.element, 'sparkline-numberformatter', function (n) {
+ return n.toFixed(2);
+ });
+ let numberDigitGroupSep = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupsep', undefined);
+ let numberDecimalMark = NETDATA.dataAttribute(state.element, 'sparkline-numberdecimalmark', undefined);
+ let numberDigitGroupCount = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupcount', undefined);
+ let animatedZooms = NETDATA.dataAttributeBoolean(state.element, 'sparkline-animatedzooms', false);
+
+ if (spotColor === 'disable') {
+ spotColor = '';
+ }
+ if (minSpotColor === 'disable') {
+ minSpotColor = '';
+ }
+ if (maxSpotColor === 'disable') {
+ maxSpotColor = '';
+ }
+
+ // state.log('sparkline type ' + type + ', lineColor: ' + lineColor + ', fillColor: ' + fillColor);
+
+ state.sparkline_options = {
+ type: type,
+ lineColor: lineColor,
+ fillColor: fillColor,
+ chartRangeMin: chartRangeMin,
+ chartRangeMax: chartRangeMax,
+ composite: composite,
+ enableTagOptions: enableTagOptions,
+ tagOptionPrefix: tagOptionPrefix,
+ tagValuesAttribute: tagValuesAttribute,
+ disableHiddenCheck: disableHiddenCheck,
+ defaultPixelsPerValue: defaultPixelsPerValue,
+ spotColor: spotColor,
+ minSpotColor: minSpotColor,
+ maxSpotColor: maxSpotColor,
+ spotRadius: spotRadius,
+ valueSpots: valueSpots,
+ highlightSpotColor: highlightSpotColor,
+ highlightLineColor: highlightLineColor,
+ lineWidth: lineWidth,
+ normalRangeMin: normalRangeMin,
+ normalRangeMax: normalRangeMax,
+ drawNormalOnTop: drawNormalOnTop,
+ xvalues: xvalues,
+ chartRangeClip: chartRangeClip,
+ chartRangeMinX: chartRangeMinX,
+ chartRangeMaxX: chartRangeMaxX,
+ disableInteraction: disableInteraction,
+ disableTooltips: disableTooltips,
+ disableHighlight: disableHighlight,
+ highlightLighten: highlightLighten,
+ highlightColor: highlightColor,
+ tooltipContainer: tooltipContainer,
+ tooltipClassname: tooltipClassname,
+ tooltipChartTitle: state.title,
+ tooltipFormat: tooltipFormat,
+ tooltipPrefix: tooltipPrefix,
+ tooltipSuffix: tooltipSuffix,
+ tooltipSkipNull: tooltipSkipNull,
+ tooltipValueLookups: tooltipValueLookups,
+ tooltipFormatFieldlist: tooltipFormatFieldlist,
+ tooltipFormatFieldlistKey: tooltipFormatFieldlistKey,
+ numberFormatter: numberFormatter,
+ numberDigitGroupSep: numberDigitGroupSep,
+ numberDecimalMark: numberDecimalMark,
+ numberDigitGroupCount: numberDigitGroupCount,
+ animatedZooms: animatedZooms,
+ width: state.chartWidth(),
+ height: state.chartHeight()
+ };
+
+ $(state.element_chart).sparkline(data.result, state.sparkline_options);
+
+ return true;
+};
+// google charts
+
+// Codacy declarations
+/* global google */
+
+NETDATA.googleInitialize = function (callback) {
+ if (typeof netdataNoGoogleCharts === 'undefined' || !netdataNoGoogleCharts) {
+ $.ajax({
+ url: NETDATA.google_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('google', NETDATA.google_js);
+ google.load('visualization', '1.1', {
+ 'packages': ['corechart', 'controls'],
+ 'callback': callback
+ });
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.google.enabled = false;
+ NETDATA.error(100, NETDATA.google_js);
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.google.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.googleChartUpdate = function (state, data) {
+ let datatable = new google.visualization.DataTable(data.result);
+ state.google_instance.draw(datatable, state.google_options);
+ return true;
+};
+
+NETDATA.googleChartCreate = function (state, data) {
+ let datatable = new google.visualization.DataTable(data.result);
+
+ state.google_options = {
+ colors: state.chartColors(),
+
+ // do not set width, height - the chart resizes itself
+ //width: state.chartWidth(),
+ //height: state.chartHeight(),
+ lineWidth: 1,
+ title: state.title,
+ fontSize: 11,
+ hAxis: {
+ // title: "Time of Day",
+ // format:'HH:mm:ss',
+ viewWindowMode: 'maximized',
+ slantedText: false,
+ format: 'HH:mm:ss',
+ textStyle: {
+ fontSize: 9
+ },
+ gridlines: {
+ color: '#EEE'
+ }
+ },
+ vAxis: {
+ title: state.units_current,
+ viewWindowMode: 'pretty',
+ minValue: -0.1,
+ maxValue: 0.1,
+ direction: 1,
+ textStyle: {
+ fontSize: 9
+ },
+ gridlines: {
+ color: '#EEE'
+ }
+ },
+ chartArea: {
+ width: '65%',
+ height: '80%'
+ },
+ focusTarget: 'category',
+ annotation: {
+ '1': {
+ style: 'line'
+ }
+ },
+ pointsVisible: 0,
+ titlePosition: 'out',
+ titleTextStyle: {
+ fontSize: 11
+ },
+ tooltip: {
+ isHtml: false,
+ ignoreBounds: true,
+ textStyle: {
+ fontSize: 9
+ }
+ },
+ curveType: 'function',
+ areaOpacity: 0.3,
+ isStacked: false
+ };
+
+ switch (state.chart.chart_type) {
+ case "area":
+ state.google_options.vAxis.viewWindowMode = 'maximized';
+ state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_area;
+ state.google_instance = new google.visualization.AreaChart(state.element_chart);
+ break;
+
+ case "stacked":
+ state.google_options.isStacked = true;
+ state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_stacked;
+ state.google_options.vAxis.viewWindowMode = 'maximized';
+ state.google_options.vAxis.minValue = null;
+ state.google_options.vAxis.maxValue = null;
+ state.google_instance = new google.visualization.AreaChart(state.element_chart);
+ break;
+
+ default:
+ case "line":
+ state.google_options.lineWidth = 2;
+ state.google_instance = new google.visualization.LineChart(state.element_chart);
+ break;
+ }
+
+ state.google_instance.draw(datatable, state.google_options);
+ return true;
+};
+// gauge.js
+
+NETDATA.gaugeInitialize = function (callback) {
+ if (typeof netdataNoGauge === 'undefined' || !netdataNoGauge) {
+ $.ajax({
+ url: NETDATA.gauge_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('gauge', NETDATA.gauge_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.gauge.enabled = false;
+ NETDATA.error(100, NETDATA.gauge_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ })
+ }
+ else {
+ NETDATA.chartLibraries.gauge.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.gaugeAnimation = function (state, status) {
+ let speed = 32;
+
+ if (typeof status === 'boolean' && status === false) {
+ speed = 1000000000;
+ } else if (typeof status === 'number') {
+ speed = status;
+ }
+
+ // console.log('gauge speed ' + speed);
+ state.tmp.gauge_instance.animationSpeed = speed;
+ state.tmp.___gaugeOld__.speed = speed;
+};
+
+NETDATA.gaugeSet = function (state, value, min, max) {
+ if (typeof value !== 'number') {
+ value = 0;
+ }
+ if (typeof min !== 'number') {
+ min = 0;
+ }
+ if (typeof max !== 'number') {
+ max = 0;
+ }
+ if (value > max) {
+ max = value;
+ }
+ if (value < min) {
+ min = value;
+ }
+ if (min > max) {
+ let t = min;
+ min = max;
+ max = t;
+ }
+ else if (min === max) {
+ max = min + 1;
+ }
+
+ state.legendFormatValueDecimalsFromMinMax(min, max);
+
+ // gauge.js has an issue if the needle
+ // is smaller than min or larger than max
+ // when we set the new values
+ // the needle will go crazy
+
+ // to prevent it, we always feed it
+ // with a percentage, so that the needle
+ // is always between min and max
+ let pcent = (value - min) * 100 / (max - min);
+
+ // bug fix for gauge.js 1.3.1
+ // if the value is the absolute min or max, the chart is broken
+ if (pcent < 0.001) {
+ pcent = 0.001;
+ }
+ if (pcent > 99.999) {
+ pcent = 99.999;
+ }
+
+ state.tmp.gauge_instance.set(pcent);
+ // console.log('gauge set ' + pcent + ', value ' + value + ', min ' + min + ', max ' + max);
+
+ state.tmp.___gaugeOld__.value = value;
+ state.tmp.___gaugeOld__.min = min;
+ state.tmp.___gaugeOld__.max = max;
+};
+
+NETDATA.gaugeSetLabels = function (state, value, min, max) {
+ if (state.tmp.___gaugeOld__.valueLabel !== value) {
+ state.tmp.___gaugeOld__.valueLabel = value;
+ state.tmp.gaugeChartLabel.innerText = state.legendFormatValue(value);
+ }
+ if (state.tmp.___gaugeOld__.minLabel !== min) {
+ state.tmp.___gaugeOld__.minLabel = min;
+ state.tmp.gaugeChartMin.innerText = state.legendFormatValue(min);
+ }
+ if (state.tmp.___gaugeOld__.maxLabel !== max) {
+ state.tmp.___gaugeOld__.maxLabel = max;
+ state.tmp.gaugeChartMax.innerText = state.legendFormatValue(max);
+ }
+};
+
+NETDATA.gaugeClearSelection = function (state, force) {
+ if (typeof state.tmp.gaugeEvent !== 'undefined' && typeof state.tmp.gaugeEvent.timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.gaugeEvent.timer);
+ state.tmp.gaugeEvent.timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.gaugeChartUpdate(state, state.data);
+ } else {
+ NETDATA.gaugeAnimation(state, false);
+ NETDATA.gaugeSetLabels(state, null, null, null);
+ NETDATA.gaugeSet(state, null, null, null);
+ }
+
+ NETDATA.gaugeAnimation(state, true);
+ return true;
+};
+
+NETDATA.gaugeSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.gaugeClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.gaugeClearSelection(state, true);
+ }
+
+ if (typeof state.tmp.gaugeEvent === 'undefined') {
+ state.tmp.gaugeEvent = {
+ timer: undefined,
+ value: 0,
+ min: 0,
+ max: 0
+ };
+ }
+
+ let value = state.data.result[state.data.result.length - 1 - slot];
+ let min = (state.tmp.gaugeMin === null) ? NETDATA.commonMin.get(state) : state.tmp.gaugeMin;
+ let max = (state.tmp.gaugeMax === null) ? NETDATA.commonMax.get(state) : state.tmp.gaugeMax;
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ state.tmp.gaugeEvent.value = value;
+ state.tmp.gaugeEvent.min = min;
+ state.tmp.gaugeEvent.max = max;
+ NETDATA.gaugeSetLabels(state, value, min, max);
+
+ if (state.tmp.gaugeEvent.timer === undefined) {
+ NETDATA.gaugeAnimation(state, false);
+
+ state.tmp.gaugeEvent.timer = NETDATA.timeout.set(function () {
+ state.tmp.gaugeEvent.timer = undefined;
+ NETDATA.gaugeSet(state, state.tmp.gaugeEvent.value, state.tmp.gaugeEvent.min, state.tmp.gaugeEvent.max);
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.gaugeChartUpdate = function (state, data) {
+ let value, min, max;
+
+ if (NETDATA.globalPanAndZoom.isActive() || state.isAutoRefreshable() === false) {
+ NETDATA.gaugeSetLabels(state, null, null, null);
+ state.tmp.gauge_instance.set(0);
+ } else {
+ value = data.result[0];
+ min = (state.tmp.gaugeMin === null) ? NETDATA.commonMin.get(state) : state.tmp.gaugeMin;
+ max = (state.tmp.gaugeMax === null) ? NETDATA.commonMax.get(state) : state.tmp.gaugeMax;
+ if (value < min) {
+ min = value;
+ }
+ if (value > max) {
+ max = value;
+ }
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ NETDATA.gaugeSet(state, value, min, max);
+ NETDATA.gaugeSetLabels(state, value, min, max);
+ }
+
+ return true;
+};
+
+NETDATA.gaugeChartCreate = function (state, data) {
+ // let chart = $(state.element_chart);
+
+ let value = data.result[0];
+ let min = NETDATA.dataAttribute(state.element, 'gauge-min-value', null);
+ let max = NETDATA.dataAttribute(state.element, 'gauge-max-value', null);
+ // let adjust = NETDATA.dataAttribute(state.element, 'gauge-adjust', null);
+ let pointerColor = NETDATA.dataAttribute(state.element, 'gauge-pointer-color', NETDATA.themes.current.gauge_pointer);
+ let strokeColor = NETDATA.dataAttribute(state.element, 'gauge-stroke-color', NETDATA.themes.current.gauge_stroke);
+ let startColor = NETDATA.dataAttribute(state.element, 'gauge-start-color', state.chartCustomColors()[0]);
+ let stopColor = NETDATA.dataAttribute(state.element, 'gauge-stop-color', void 0);
+ let generateGradient = NETDATA.dataAttribute(state.element, 'gauge-generate-gradient', false);
+
+ if (min === null) {
+ min = NETDATA.commonMin.get(state);
+ state.tmp.gaugeMin = null;
+ } else {
+ state.tmp.gaugeMin = min;
+ }
+
+ if (max === null) {
+ max = NETDATA.commonMax.get(state);
+ state.tmp.gaugeMax = null;
+ } else {
+ state.tmp.gaugeMax = max;
+ }
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ let width = state.chartWidth(), height = state.chartHeight(); //, ratio = 1.5;
+ // console.log('gauge width: ' + width.toString() + ', height: ' + height.toString());
+ //switch(adjust) {
+ // case 'width': width = height * ratio; break;
+ // case 'height':
+ // default: height = width / ratio; break;
+ //}
+ //state.element.style.width = width.toString() + 'px';
+ //state.element.style.height = height.toString() + 'px';
+
+ let lum_d = 0.05;
+
+ let options = {
+ lines: 12, // The number of lines to draw
+ angle: 0.14, // The span of the gauge arc
+ lineWidth: 0.57, // The line thickness
+ radiusScale: 1.0, // Relative radius
+ pointer: {
+ length: 0.85, // 0.9 The radius of the inner circle
+ strokeWidth: 0.045, // The rotation offset
+ color: pointerColor // Fill color
+ },
+ limitMax: true, // If false, the max value of the gauge will be updated if value surpass max
+ limitMin: true, // If true, the min value of the gauge will be fixed unless you set it manually
+ colorStart: startColor, // Colors
+ colorStop: stopColor, // just experiment with them
+ strokeColor: strokeColor, // to see which ones work best for you
+ generateGradient: (generateGradient === true), // gmosx:
+ gradientType: 0,
+ highDpiSupport: true // High resolution support
+ };
+
+ if (generateGradient.constructor === Array) {
+ // example options:
+ // data-gauge-generate-gradient="[0, 50, 100]"
+ // data-gauge-gradient-percent-color-0="#FFFFFF"
+ // data-gauge-gradient-percent-color-50="#999900"
+ // data-gauge-gradient-percent-color-100="#000000"
+
+ options.percentColors = [];
+ let len = generateGradient.length;
+ while (len--) {
+ let pcent = generateGradient[len];
+ let color = NETDATA.dataAttribute(state.element, 'gauge-gradient-percent-color-' + pcent.toString(), false);
+ if (color !== false) {
+ let a = [];
+ a[0] = pcent / 100;
+ a[1] = color;
+ options.percentColors.unshift(a);
+ }
+ }
+ if (options.percentColors.length === 0) {
+ delete options.percentColors;
+ }
+ } else if (generateGradient === false && NETDATA.themes.current.gauge_gradient) {
+ //noinspection PointlessArithmeticExpressionJS
+ options.percentColors = [
+ [0.0, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 0))],
+ [0.1, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 1))],
+ [0.2, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 2))],
+ [0.3, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 3))],
+ [0.4, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 4))],
+ [0.5, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 5))],
+ [0.6, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 6))],
+ [0.7, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 7))],
+ [0.8, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 8))],
+ [0.9, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 9))],
+ [1.0, NETDATA.colorLuminance(startColor, 0.0)]];
+ }
+
+ state.tmp.gauge_canvas = document.createElement('canvas');
+ state.tmp.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas';
+ state.tmp.gauge_canvas.className = 'gaugeChart';
+ state.tmp.gauge_canvas.width = width;
+ state.tmp.gauge_canvas.height = height;
+ state.element_chart.appendChild(state.tmp.gauge_canvas);
+
+ let valuefontsize = Math.floor(height / 5);
+ let valuetop = Math.round((height - valuefontsize) / 3.2);
+ state.tmp.gaugeChartLabel = document.createElement('span');
+ state.tmp.gaugeChartLabel.className = 'gaugeChartLabel';
+ state.tmp.gaugeChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.gaugeChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartLabel);
+
+ let titlefontsize = Math.round(valuefontsize / 2.1);
+ let titletop = 0;
+ state.tmp.gaugeChartTitle = document.createElement('span');
+ state.tmp.gaugeChartTitle.className = 'gaugeChartTitle';
+ state.tmp.gaugeChartTitle.innerText = state.title;
+ state.tmp.gaugeChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartTitle);
+
+ let unitfontsize = Math.round(titlefontsize * 0.9);
+ state.tmp.gaugeChartUnits = document.createElement('span');
+ state.tmp.gaugeChartUnits.className = 'gaugeChartUnits';
+ state.tmp.gaugeChartUnits.innerText = state.units_current;
+ state.tmp.gaugeChartUnits.style.fontSize = unitfontsize + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartUnits);
+
+ state.tmp.gaugeChartMin = document.createElement('span');
+ state.tmp.gaugeChartMin.className = 'gaugeChartMin';
+ state.tmp.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMin);
+
+ state.tmp.gaugeChartMax = document.createElement('span');
+ state.tmp.gaugeChartMax.className = 'gaugeChartMax';
+ state.tmp.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMax);
+
+ // when we just re-create the chart
+ // do not animate the first update
+ let animate = true;
+ if (typeof state.tmp.gauge_instance !== 'undefined') {
+ animate = false;
+ }
+
+ state.tmp.gauge_instance = new Gauge(state.tmp.gauge_canvas).setOptions(options); // create sexy gauge!
+
+ state.tmp.___gaugeOld__ = {
+ value: value,
+ min: min,
+ max: max,
+ valueLabel: null,
+ minLabel: null,
+ maxLabel: null
+ };
+
+ // we will always feed a percentage
+ state.tmp.gauge_instance.minValue = 0;
+ state.tmp.gauge_instance.maxValue = 100;
+
+ NETDATA.gaugeAnimation(state, animate);
+ NETDATA.gaugeSet(state, value, min, max);
+ NETDATA.gaugeSetLabels(state, value, min, max);
+ NETDATA.gaugeAnimation(state, true);
+
+ state.legendSetUnitsString = function (units) {
+ if (typeof state.tmp.gaugeChartUnits !== 'undefined' && state.tmp.units !== units) {
+ state.tmp.gaugeChartUnits.innerText = units;
+ state.tmp.___gaugeOld__.valueLabel = null;
+ state.tmp.___gaugeOld__.minLabel = null;
+ state.tmp.___gaugeOld__.maxLabel = null;
+ state.tmp.units = units;
+ }
+ };
+ state.legendShowUndefined = function () {
+ if (typeof state.tmp.gauge_instance !== 'undefined') {
+ NETDATA.gaugeClearSelection(state);
+ }
+ };
+
+ return true;
+};
+// ----------------------------------------------------------------------------------------------------------------
+
+NETDATA.easypiechartPercentFromValueMinMax = function (state, value, min, max) {
+ if (typeof value !== 'number') {
+ value = 0;
+ }
+ if (typeof min !== 'number') {
+ min = 0;
+ }
+ if (typeof max !== 'number') {
+ max = 0;
+ }
+
+ if (min > max) {
+ let t = min;
+ min = max;
+ max = t;
+ }
+
+ if (min > value) {
+ min = value;
+ }
+ if (max < value) {
+ max = value;
+ }
+
+ state.legendFormatValueDecimalsFromMinMax(min, max);
+
+ if (state.tmp.easyPieChartMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.easyPieChartMax === null && max < 0) {
+ max = 0;
+ }
+
+ let pcent;
+
+ if (min < 0 && max > 0) {
+ // it is both positive and negative
+ // zero at the top center of the chart
+ max = (-min > max) ? -min : max;
+ pcent = Math.round(value * 100 / max);
+ } else if (value >= 0 && min >= 0 && max >= 0) {
+ // clockwise
+ pcent = Math.round((value - min) * 100 / (max - min));
+ if (pcent === 0) {
+ pcent = 0.1;
+ }
+ } else {
+ // counter clockwise
+ pcent = Math.round((value - max) * 100 / (max - min));
+ if (pcent === 0) {
+ pcent = -0.1;
+ }
+ }
+
+ return pcent;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// easy-pie-chart
+
+NETDATA.easypiechartInitialize = function (callback) {
+ if (typeof netdataNoEasyPieChart === 'undefined' || !netdataNoEasyPieChart) {
+ $.ajax({
+ url: NETDATA.easypiechart_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('easypiechart', NETDATA.easypiechart_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.easypiechart.enabled = false;
+ NETDATA.error(100, NETDATA.easypiechart_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ })
+ } else {
+ NETDATA.chartLibraries.easypiechart.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.easypiechartClearSelection = function (state, force) {
+ if (typeof state.tmp.easyPieChartEvent !== 'undefined' && typeof state.tmp.easyPieChartEvent.timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.easyPieChartEvent.timer);
+ state.tmp.easyPieChartEvent.timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.easypiechartChartUpdate(state, state.data);
+ }
+ else {
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(null);
+ state.tmp.easyPieChart_instance.update(0);
+ }
+ state.tmp.easyPieChart_instance.enableAnimation();
+
+ return true;
+};
+
+NETDATA.easypiechartSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.easypiechartClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.easypiechartClearSelection(state, true);
+ }
+
+ if (typeof state.tmp.easyPieChartEvent === 'undefined') {
+ state.tmp.easyPieChartEvent = {
+ timer: undefined,
+ value: 0,
+ pcent: 0
+ };
+ }
+
+ let value = state.data.result[state.data.result.length - 1 - slot];
+ let min = (state.tmp.easyPieChartMin === null) ? NETDATA.commonMin.get(state) : state.tmp.easyPieChartMin;
+ let max = (state.tmp.easyPieChartMax === null) ? NETDATA.commonMax.get(state) : state.tmp.easyPieChartMax;
+ let pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+
+ state.tmp.easyPieChartEvent.value = value;
+ state.tmp.easyPieChartEvent.pcent = pcent;
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+
+ if (state.tmp.easyPieChartEvent.timer === undefined) {
+ state.tmp.easyPieChart_instance.disableAnimation();
+
+ state.tmp.easyPieChartEvent.timer = NETDATA.timeout.set(function () {
+ state.tmp.easyPieChartEvent.timer = undefined;
+ state.tmp.easyPieChart_instance.update(state.tmp.easyPieChartEvent.pcent);
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.easypiechartChartUpdate = function (state, data) {
+ let value, min, max, pcent;
+
+ if (NETDATA.globalPanAndZoom.isActive() || state.isAutoRefreshable() === false) {
+ value = null;
+ pcent = 0;
+ }
+ else {
+ value = data.result[0];
+ min = (state.tmp.easyPieChartMin === null) ? NETDATA.commonMin.get(state) : state.tmp.easyPieChartMin;
+ max = (state.tmp.easyPieChartMax === null) ? NETDATA.commonMax.get(state) : state.tmp.easyPieChartMax;
+ pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+ }
+
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChart_instance.update(pcent);
+ return true;
+};
+
+NETDATA.easypiechartChartCreate = function (state, data) {
+ let chart = $(state.element_chart);
+
+ let value = data.result[0];
+ let min = NETDATA.dataAttribute(state.element, 'easypiechart-min-value', null);
+ let max = NETDATA.dataAttribute(state.element, 'easypiechart-max-value', null);
+
+ if (min === null) {
+ min = NETDATA.commonMin.get(state);
+ state.tmp.easyPieChartMin = null;
+ }
+ else {
+ state.tmp.easyPieChartMin = min;
+ }
+
+ if (max === null) {
+ max = NETDATA.commonMax.get(state);
+ state.tmp.easyPieChartMax = null;
+ }
+ else {
+ state.tmp.easyPieChartMax = max;
+ }
+
+ let size = state.chartWidth();
+ let stroke = Math.floor(size / 22);
+ if (stroke < 3) {
+ stroke = 2;
+ }
+
+ let valuefontsize = Math.floor((size * 2 / 3) / 5);
+ let valuetop = Math.round((size - valuefontsize - (size / 40)) / 2);
+ state.tmp.easyPieChartLabel = document.createElement('span');
+ state.tmp.easyPieChartLabel.className = 'easyPieChartLabel';
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.easyPieChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartLabel);
+
+ let titlefontsize = Math.round(valuefontsize * 1.6 / 3);
+ let titletop = Math.round(valuetop - (titlefontsize * 2) - (size / 40));
+ state.tmp.easyPieChartTitle = document.createElement('span');
+ state.tmp.easyPieChartTitle.className = 'easyPieChartTitle';
+ state.tmp.easyPieChartTitle.innerText = state.title;
+ state.tmp.easyPieChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartTitle);
+
+ let unitfontsize = Math.round(titlefontsize * 0.9);
+ let unittop = Math.round(valuetop + (valuefontsize + unitfontsize) + (size / 40));
+ state.tmp.easyPieChartUnits = document.createElement('span');
+ state.tmp.easyPieChartUnits.className = 'easyPieChartUnits';
+ state.tmp.easyPieChartUnits.innerText = state.units_current;
+ state.tmp.easyPieChartUnits.style.fontSize = unitfontsize + 'px';
+ state.tmp.easyPieChartUnits.style.top = unittop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartUnits);
+
+ let barColor = NETDATA.dataAttribute(state.element, 'easypiechart-barcolor', undefined);
+ if (typeof barColor === 'undefined' || barColor === null) {
+ barColor = state.chartCustomColors()[0];
+ } else {
+ // <div ... data-easypiechart-barcolor="(function(percent){return(percent < 50 ? '#5cb85c' : percent < 85 ? '#f0ad4e' : '#cb3935');})" ...></div>
+ let tmp = eval(barColor);
+ if (typeof tmp === 'function') {
+ barColor = tmp;
+ }
+ }
+
+ let pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+ chart.data('data-percent', pcent);
+
+ chart.easyPieChart({
+ barColor: barColor,
+ trackColor: NETDATA.dataAttribute(state.element, 'easypiechart-trackcolor', NETDATA.themes.current.easypiechart_track),
+ scaleColor: NETDATA.dataAttribute(state.element, 'easypiechart-scalecolor', NETDATA.themes.current.easypiechart_scale),
+ scaleLength: NETDATA.dataAttribute(state.element, 'easypiechart-scalelength', 5),
+ lineCap: NETDATA.dataAttribute(state.element, 'easypiechart-linecap', 'round'),
+ lineWidth: NETDATA.dataAttribute(state.element, 'easypiechart-linewidth', stroke),
+ trackWidth: NETDATA.dataAttribute(state.element, 'easypiechart-trackwidth', undefined),
+ size: NETDATA.dataAttribute(state.element, 'easypiechart-size', size),
+ rotate: NETDATA.dataAttribute(state.element, 'easypiechart-rotate', 0),
+ animate: NETDATA.dataAttribute(state.element, 'easypiechart-animate', {duration: 500, enabled: true}),
+ easing: NETDATA.dataAttribute(state.element, 'easypiechart-easing', undefined)
+ });
+
+ // when we just re-create the chart
+ // do not animate the first update
+ let animate = true;
+ if (typeof state.tmp.easyPieChart_instance !== 'undefined') {
+ animate = false;
+ }
+
+ state.tmp.easyPieChart_instance = chart.data('easyPieChart');
+ if (animate === false) {
+ state.tmp.easyPieChart_instance.disableAnimation();
+ }
+ state.tmp.easyPieChart_instance.update(pcent);
+ if (animate === false) {
+ state.tmp.easyPieChart_instance.enableAnimation();
+ }
+
+ state.legendSetUnitsString = function (units) {
+ if (typeof state.tmp.easyPieChartUnits !== 'undefined' && state.tmp.units !== units) {
+ state.tmp.easyPieChartUnits.innerText = units;
+ state.tmp.units = units;
+ }
+ };
+ state.legendShowUndefined = function () {
+ if (typeof state.tmp.easyPieChart_instance !== 'undefined') {
+ NETDATA.easypiechartClearSelection(state);
+ }
+ };
+
+ return true;
+};
+
+// d3pie
+
+NETDATA.d3pieInitialize = function (callback) {
+ if (typeof netdataNoD3pie === 'undefined' || !netdataNoD3pie) {
+
+ // d3pie requires D3
+ if (!NETDATA.chartLibraries.d3.initialized) {
+ if (NETDATA.chartLibraries.d3.enabled) {
+ NETDATA.d3Initialize(function () {
+ NETDATA.d3pieInitialize(callback);
+ });
+ } else {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+ } else {
+ $.ajax({
+ url: NETDATA.d3pie_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('d3pie', NETDATA.d3pie_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ NETDATA.error(100, NETDATA.d3pie_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ }
+ } else {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.d3pieSetContent = function (state, data, index) {
+ state.legendFormatValueDecimalsFromMinMax(
+ data.min,
+ data.max
+ );
+
+ let content = [];
+ let colors = state.chartColors();
+ let len = data.result.labels.length;
+ for (let i = 1; i < len; i++) {
+ let label = data.result.labels[i];
+ let value = data.result.data[index][label];
+ let color = colors[i - 1];
+
+ if (value !== null && value > 0) {
+ content.push({
+ label: label,
+ value: value,
+ color: color
+ });
+ }
+ }
+
+ if (content.length === 0) {
+ content.push({
+ label: 'no data',
+ value: 100,
+ color: '#666666'
+ });
+ }
+
+ state.tmp.d3pie_last_slot = index;
+ return content;
+};
+
+NETDATA.d3pieDateRange = function (state, data, index) {
+ let dt = Math.round((data.before - data.after + 1) / data.points);
+ let dt_str = NETDATA.seconds4human(dt);
+
+ let before = data.result.data[index].time;
+ let after = before - (dt * 1000);
+
+ let d1 = NETDATA.dateTime.localeDateString(after);
+ let t1 = NETDATA.dateTime.localeTimeString(after);
+ let d2 = NETDATA.dateTime.localeDateString(before);
+ let t2 = NETDATA.dateTime.localeTimeString(before);
+
+ if (d1 === d2) {
+ return d1 + ' ' + t1 + ' to ' + t2 + ', ' + dt_str;
+ }
+
+ return d1 + ' ' + t1 + ' to ' + d2 + ' ' + t2 + ', ' + dt_str;
+};
+
+NETDATA.d3pieSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.d3pieClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ slot = state.data.result.data.length - slot - 1;
+
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.d3pieClearSelection(state, true);
+ }
+
+ if (state.tmp.d3pie_last_slot === slot) {
+ // we already show this slot, don't do anything
+ return true;
+ }
+
+ if (state.tmp.d3pie_timer === undefined) {
+ state.tmp.d3pie_timer = NETDATA.timeout.set(function () {
+ state.tmp.d3pie_timer = undefined;
+ NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, state.data, slot), NETDATA.d3pieDateRange(state, state.data, slot));
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.d3pieClearSelection = function (state, force) {
+ if (typeof state.tmp.d3pie_timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.d3pie_timer);
+ state.tmp.d3pie_timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.d3pieChartUpdate(state, state.data);
+ } else {
+ if (state.tmp.d3pie_last_slot !== -1) {
+ state.tmp.d3pie_last_slot = -1;
+ NETDATA.d3pieChange(state, [{label: 'no data', value: 1, color: '#666666'}], 'no data available');
+ }
+ }
+
+ return true;
+};
+
+NETDATA.d3pieChange = function (state, content, footer) {
+ if (state.d3pie_forced_subtitle === null) {
+ //state.d3pie_instance.updateProp("header.subtitle.text", state.units_current);
+ state.d3pie_instance.options.header.subtitle.text = state.units_current;
+ }
+
+ if (state.d3pie_forced_footer === null) {
+ //state.d3pie_instance.updateProp("footer.text", footer);
+ state.d3pie_instance.options.footer.text = footer;
+ }
+
+ //state.d3pie_instance.updateProp("data.content", content);
+ state.d3pie_instance.options.data.content = content;
+ state.d3pie_instance.destroy();
+ state.d3pie_instance.recreate();
+ return true;
+};
+
+NETDATA.d3pieChartUpdate = function (state, data) {
+ return NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, data, 0), NETDATA.d3pieDateRange(state, data, 0));
+};
+
+NETDATA.d3pieChartCreate = function (state, data) {
+
+ state.element_chart.id = 'd3pie-' + state.uuid;
+ // console.log('id = ' + state.element_chart.id);
+
+ let content = NETDATA.d3pieSetContent(state, data, 0);
+
+ state.d3pie_forced_title = NETDATA.dataAttribute(state.element, 'd3pie-title', null);
+ state.d3pie_forced_subtitle = NETDATA.dataAttribute(state.element, 'd3pie-subtitle', null);
+ state.d3pie_forced_footer = NETDATA.dataAttribute(state.element, 'd3pie-footer', null);
+
+ state.d3pie_options = {
+ header: {
+ title: {
+ text: (state.d3pie_forced_title !== null) ? state.d3pie_forced_title : state.title,
+ color: NETDATA.dataAttribute(state.element, 'd3pie-title-color', NETDATA.themes.current.d3pie.title),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-title-fontsize', 12),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-title-fontweight', "bold"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-title-font', "arial")
+ },
+ subtitle: {
+ text: (state.d3pie_forced_subtitle !== null) ? state.d3pie_forced_subtitle : state.units_current,
+ color: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-color', NETDATA.themes.current.d3pie.subtitle),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontweight', "normal"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-font', "arial")
+ },
+ titleSubtitlePadding: 1
+ },
+ footer: {
+ text: (state.d3pie_forced_footer !== null) ? state.d3pie_forced_footer : NETDATA.d3pieDateRange(state, data, 0),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-footer-color', NETDATA.themes.current.d3pie.footer),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontsize', 9),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontweight', "bold"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-footer-font', "arial"),
+ location: NETDATA.dataAttribute(state.element, 'd3pie-footer-location', "bottom-center") // bottom-left, bottom-center, bottom-right
+ },
+ size: {
+ canvasHeight: state.chartHeight(),
+ canvasWidth: state.chartWidth(),
+ pieInnerRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieinnerradius', "45%"),
+ pieOuterRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieouterradius', "80%")
+ },
+ data: {
+ // none, random, value-asc, value-desc, label-asc, label-desc
+ sortOrder: NETDATA.dataAttribute(state.element, 'd3pie-sortorder', "value-desc"),
+ smallSegmentGrouping: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, "d3pie-smallsegmentgrouping-enabled", false),
+ value: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-value', 1),
+ // percentage, value
+ valueType: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-valuetype', "percentage"),
+ label: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-label', "other"),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-color', NETDATA.themes.current.d3pie.other)
+ },
+
+ // REQUIRED! This is where you enter your pie data; it needs to be an array of objects
+ // of this form: { label: "label", value: 1.5, color: "#000000" } - color is optional
+ content: content
+ },
+ labels: {
+ outer: {
+ // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2
+ format: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-format', "label-value1"),
+ hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-hidewhenlessthanpercentage', null),
+ pieDistance: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-piedistance', 15)
+ },
+ inner: {
+ // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2
+ format: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-format', "percentage"),
+ hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-hidewhenlessthanpercentage', 2)
+ },
+ mainLabel: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-color', NETDATA.themes.current.d3pie.mainlabel), // or 'segment' for dynamic color
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontweight', "normal")
+ },
+ percentage: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-color', NETDATA.themes.current.d3pie.percentage),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontweight', "bold"),
+ decimalPlaces: 0
+ },
+ value: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-color', NETDATA.themes.current.d3pie.value),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontweight', "bold")
+ },
+ lines: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-lines-enabled', true),
+ style: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-style', "curved"),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-color', "segment") // "segment" or a hex color
+ },
+ truncation: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-truncation-enabled', false),
+ truncateLength: NETDATA.dataAttribute(state.element, 'd3pie-labels-truncation-truncatelength', 30)
+ },
+ formatter: function (context) {
+ // console.log(context);
+ if (context.part === 'value') {
+ return state.legendFormatValue(context.value);
+ }
+ if (context.part === 'percentage') {
+ return context.label + '%';
+ }
+
+ return context.label;
+ }
+ },
+ effects: {
+ load: {
+ effect: "none", // none / default
+ speed: 0 // commented in the d3pie code to speed it up
+ },
+ pullOutSegmentOnClick: {
+ effect: "bounce", // none / linear / bounce / elastic / back
+ speed: 400,
+ size: 5
+ },
+ highlightSegmentOnMouseover: true,
+ highlightLuminosity: -0.2
+ },
+ tooltips: {
+ enabled: false,
+ type: "placeholder", // caption|placeholder
+ string: "",
+ placeholderParser: null, // function
+ styles: {
+ fadeInSpeed: 250,
+ backgroundColor: NETDATA.themes.current.d3pie.tooltip_bg,
+ backgroundOpacity: 0.5,
+ color: NETDATA.themes.current.d3pie.tooltip_fg,
+ borderRadius: 2,
+ font: "arial",
+ fontSize: 12,
+ padding: 4
+ }
+ },
+ misc: {
+ colors: {
+ background: 'transparent', // transparent or color #
+ // segments: state.chartColors(),
+ segmentStroke: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-segmentstroke', NETDATA.themes.current.d3pie.segment_stroke)
+ },
+ gradient: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-misc-gradient-enabled', false),
+ percentage: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-percentage', 95),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-misc-gradient-color', NETDATA.themes.current.d3pie.gradient_color)
+ },
+ canvasPadding: {
+ top: 5,
+ right: 5,
+ bottom: 5,
+ left: 5
+ },
+ pieCenterOffset: {
+ x: 0,
+ y: 0
+ },
+ cssPrefix: NETDATA.dataAttribute(state.element, 'd3pie-cssprefix', null)
+ },
+ callbacks: {
+ onload: null,
+ onMouseoverSegment: null,
+ onMouseoutSegment: null,
+ onClickSegment: null
+ }
+ };
+
+ state.d3pie_instance = new d3pie(state.element_chart, state.d3pie_options);
+ return true;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// D3
+
+NETDATA.d3Initialize = function(callback) {
+ if (typeof netdataStopD3 === 'undefined' || !netdataStopD3) {
+ $.ajax({
+ url: NETDATA.d3_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .done(function() {
+ NETDATA.registerChartLibrary('d3', NETDATA.d3_js);
+ })
+ .fail(function() {
+ NETDATA.chartLibraries.d3.enabled = false;
+ NETDATA.error(100, NETDATA.d3_js);
+ })
+ .always(function() {
+ if (typeof callback === "function")
+ return callback();
+ });
+ } else {
+ NETDATA.chartLibraries.d3.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+};
+
+NETDATA.d3ChartUpdate = function(state, data) {
+ void(state);
+ void(data);
+
+ return false;
+};
+
+NETDATA.d3ChartCreate = function(state, data) {
+ void(state);
+ void(data);
+
+ return false;
+};
+
+// peity
+
+NETDATA.peityInitialize = function (callback) {
+ if (typeof netdataNoPeitys === 'undefined' || !netdataNoPeitys) {
+ $.ajax({
+ url: NETDATA.peity_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('peity', NETDATA.peity_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.peity.enabled = false;
+ NETDATA.error(100, NETDATA.peity_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.peity.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.peityChartUpdate = function (state, data) {
+ state.peity_instance.innerHTML = data.result;
+
+ if (state.peity_options.stroke !== state.chartCustomColors()[0]) {
+ state.peity_options.stroke = state.chartCustomColors()[0];
+ if (state.chart.chart_type === 'line') {
+ state.peity_options.fill = NETDATA.themes.current.background;
+ } else {
+ state.peity_options.fill = NETDATA.colorLuminance(state.chartCustomColors()[0], NETDATA.chartDefaults.fill_luminance);
+ }
+ }
+
+ $(state.peity_instance).peity('line', state.peity_options);
+ return true;
+};
+
+NETDATA.peityChartCreate = function (state, data) {
+ state.peity_instance = document.createElement('div');
+ state.element_chart.appendChild(state.peity_instance);
+
+ state.peity_options = {
+ stroke: NETDATA.themes.current.foreground,
+ strokeWidth: NETDATA.dataAttribute(state.element, 'peity-strokewidth', 1),
+ width: state.chartWidth(),
+ height: state.chartHeight(),
+ fill: NETDATA.themes.current.foreground
+ };
+
+ NETDATA.peityChartUpdate(state, data);
+ return true;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// "Text-only" chart - Just renders the raw value to the DOM
+
+NETDATA.textOnlyCreate = function(state, data) {
+ var decimalPlaces = NETDATA.dataAttribute(state.element, 'textonly-decimal-places', 1);
+ var prefix = NETDATA.dataAttribute(state.element, 'textonly-prefix', '');
+ var suffix = NETDATA.dataAttribute(state.element, 'textonly-suffix', '');
+
+ // Round based on number of decimal places to show
+ var precision = Math.pow(10, decimalPlaces);
+ var value = Math.round(data.result[0] * precision) / precision;
+
+ state.element.textContent = prefix + value + suffix;
+ return true;
+}
+
+NETDATA.textOnlyUpdate = NETDATA.textOnlyCreate;
+// Charts Libraries Registration
+
+NETDATA.chartLibraries = {
+ "dygraph": {
+ initialize: NETDATA.dygraphInitialize,
+ create: NETDATA.dygraphChartCreate,
+ update: NETDATA.dygraphChartUpdate,
+ resize: function (state) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined' && typeof state.tmp.dygraph_instance.resize === 'function') {
+ state.tmp.dygraph_instance.resize();
+ }
+ },
+ setSelection: NETDATA.dygraphSetSelection,
+ clearSelection: NETDATA.dygraphClearSelection,
+ toolboxPanAndZoom: NETDATA.dygraphToolboxPanAndZoom,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ return 'ms' + '%7C' + 'flip' + (this.isLogScale(state) ? ('%7C' + 'abs') : '').toString();
+ },
+ legend: function (state) {
+ return (this.isSparkline(state) === false && NETDATA.dataAttributeBoolean(state.element, 'legend', true) === true) ? 'right-side' : null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return true;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ return (this.isSparkline(state) === false) ? 3 : 2;
+ },
+ isSparkline: function (state) {
+ if (typeof state.tmp.dygraph_sparkline === 'undefined') {
+ state.tmp.dygraph_sparkline = (this.theme(state) === 'sparkline');
+ }
+ return state.tmp.dygraph_sparkline;
+ },
+ isLogScale: function (state) {
+ if (typeof state.tmp.dygraph_logscale === 'undefined') {
+ state.tmp.dygraph_logscale = (this.theme(state) === 'logscale');
+ }
+ return state.tmp.dygraph_logscale;
+ },
+ theme: function (state) {
+ if (typeof state.tmp.dygraph_theme === 'undefined') {
+ state.tmp.dygraph_theme = NETDATA.dataAttribute(state.element, 'dygraph-theme', 'default');
+ }
+ return state.tmp.dygraph_theme;
+ },
+ container_class: function (state) {
+ if (this.legend(state) !== null) {
+ return 'netdata-container-with-legend';
+ }
+ return 'netdata-container';
+ }
+ },
+ "sparkline": {
+ initialize: NETDATA.sparklineInitialize,
+ create: NETDATA.sparklineChartCreate,
+ update: NETDATA.sparklineChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'flip' + '%7C' + 'abs';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "peity": {
+ initialize: NETDATA.peityInitialize,
+ create: NETDATA.peityChartCreate,
+ update: NETDATA.peityChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'ssvcomma';
+ },
+ options: function (state) {
+ void(state);
+ return 'null2zero' + '%7C' + 'flip' + '%7C' + 'abs';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ // "morris": {
+ // initialize: NETDATA.morrisInitialize,
+ // create: NETDATA.morrisChartCreate,
+ // update: NETDATA.morrisChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ // format: function(state) { void(state); return 'json'; },
+ // options: function(state) { void(state); return 'objectrows' + '%7C' + 'ms'; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 50; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 15; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ "google": {
+ initialize: NETDATA.googleInitialize,
+ create: NETDATA.googleChartCreate,
+ update: NETDATA.googleChartUpdate,
+ resize: null,
+ setSelection: undefined, //function(state, t) { void(state); return true; },
+ clearSelection: undefined, //function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.rows$'),
+ format: function (state) {
+ void(state);
+ return 'datatable';
+ },
+ options: function (state) {
+ void(state);
+ return '';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 300;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 4;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ // "raphael": {
+ // initialize: NETDATA.raphaelInitialize,
+ // create: NETDATA.raphaelChartCreate,
+ // update: NETDATA.raphaelChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ // format: function(state) { void(state); return 'json'; },
+ // options: function(state) { void(state); return ''; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 5000; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 3; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ // "c3": {
+ // initialize: NETDATA.c3Initialize,
+ // create: NETDATA.c3ChartCreate,
+ // update: NETDATA.c3ChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ // format: function(state) { void(state); return 'csvjsonarray'; },
+ // options: function(state) { void(state); return 'milliseconds'; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 5000; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 15; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ "d3pie": {
+ initialize: NETDATA.d3pieInitialize,
+ create: NETDATA.d3pieChartCreate,
+ update: NETDATA.d3pieChartUpdate,
+ resize: null,
+ setSelection: NETDATA.d3pieSetSelection,
+ clearSelection: NETDATA.d3pieClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ void(state);
+ return 'objectrows' + '%7C' + 'ms';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 15;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "d3": {
+ initialize: NETDATA.d3Initialize,
+ create: NETDATA.d3ChartCreate,
+ update: NETDATA.d3ChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ void(state);
+ return '';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "easypiechart": {
+ initialize: NETDATA.easypiechartInitialize,
+ create: NETDATA.easypiechartChartCreate,
+ update: NETDATA.easypiechartChartUpdate,
+ resize: null,
+ setSelection: NETDATA.easypiechartSetSelection,
+ clearSelection: NETDATA.easypiechartClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ aspect_ratio: 100,
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container-easypiechart';
+ }
+ },
+ "gauge": {
+ initialize: NETDATA.gaugeInitialize,
+ create: NETDATA.gaugeChartCreate,
+ update: NETDATA.gaugeChartUpdate,
+ resize: null,
+ setSelection: NETDATA.gaugeSetSelection,
+ clearSelection: NETDATA.gaugeClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ aspect_ratio: 60,
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container-gauge';
+ }
+ },
+ "textonly": {
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ },
+ create: NETDATA.textOnlyCreate,
+ enabled: true,
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ initialized: true,
+ initialize: function (callback) {
+ callback();
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ update: NETDATA.textOnlyUpdate,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ }
+};
+
+NETDATA.registerChartLibrary = function (library, url) {
+ if (NETDATA.options.debug.libraries) {
+ console.log("registering chart library: " + library);
+ }
+
+ NETDATA.chartLibraries[library].url = url;
+ NETDATA.chartLibraries[library].initialized = true;
+ NETDATA.chartLibraries[library].enabled = true;
+};
+
+// *** src/dashboard.js/chart-registry.js
+
+// Chart Registry
+
+// When multiple charts need the same chart, we avoid downloading it
+// multiple times (and having it in browser memory multiple time)
+// by using this registry.
+
+// Every time we download a chart definition, we save it here with .add()
+// Then we try to get it back with .get(). If that fails, we download it.
+
+NETDATA.fixHost = function (host) {
+ while (host.slice(-1) === '/') {
+ host = host.substring(0, host.length - 1);
+ }
+
+ return host;
+};
+
+NETDATA.chartRegistry = {
+ charts: {},
+
+ globalReset: function () {
+ this.charts = {};
+ },
+
+ add: function (host, id, data) {
+ if (typeof this.charts[host] === 'undefined') {
+ this.charts[host] = {};
+ }
+
+ //console.log('added ' + host + '/' + id);
+ this.charts[host][id] = data;
+ },
+
+ get: function (host, id) {
+ if (typeof this.charts[host] === 'undefined') {
+ return null;
+ }
+
+ if (typeof this.charts[host][id] === 'undefined') {
+ return null;
+ }
+
+ //console.log('cached ' + host + '/' + id);
+ return this.charts[host][id];
+ },
+
+ downloadAll: function (host, callback) {
+ host = NETDATA.fixHost(host);
+
+ let self = this;
+
+ function got_data(h, data, callback) {
+ if (data !== null) {
+ self.charts[h] = data.charts;
+
+ // update the server timezone in our options
+ if (typeof data.timezone === 'string') {
+ NETDATA.options.server_timezone = data.timezone;
+ }
+ } else {
+ NETDATA.error(406, h + '/api/v1/charts');
+ }
+
+ if (typeof callback === 'function') {
+ callback(data);
+ }
+ }
+
+ if (netdataSnapshotData !== null) {
+ got_data(host, netdataSnapshotData.charts, callback);
+ } else {
+ $.ajax({
+ url: host + '/api/v1/charts',
+ async: true,
+ cache: false,
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/charts', data);
+ got_data(host, data, callback);
+ })
+ .fail(function () {
+ NETDATA.error(405, host + '/api/v1/charts');
+
+ if (typeof callback === 'function') {
+ callback(null);
+ }
+ });
+ }
+ }
+};
+
+// Compute common (joint) values over multiple charts.
+
+
+// commonMin & commonMax
+
+NETDATA.commonMin = {
+ keys: {},
+ latest: {},
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ get: function (state) {
+ if (typeof state.tmp.__commonMin === 'undefined') {
+ // get the commonMin setting
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
+ }
+
+ let min = state.data.min;
+ let name = state.tmp.__commonMin;
+
+ if (name === null) {
+ // we don't need commonMin
+ //state.log('no need for commonMin');
+ return min;
+ }
+
+ let t = this.keys[name];
+ if (typeof t === 'undefined') {
+ // add our commonMin
+ this.keys[name] = {};
+ t = this.keys[name];
+ }
+
+ let uuid = state.uuid;
+ if (typeof t[uuid] !== 'undefined') {
+ if (t[uuid] === min) {
+ //state.log('commonMin ' + state.tmp.__commonMin + ' not changed: ' + this.latest[name]);
+ return this.latest[name];
+ } else if (min < this.latest[name]) {
+ //state.log('commonMin ' + state.tmp.__commonMin + ' increased: ' + min);
+ t[uuid] = min;
+ this.latest[name] = min;
+ return min;
+ }
+ }
+
+ // add our min
+ t[uuid] = min;
+
+ // find the common min
+ let m = min;
+ // for (let i in t) {
+ // if (t.hasOwnProperty(i) && t[i] < m) m = t[i];
+ // }
+ for (var ti of Object.values(t)) {
+ if (ti < m) {
+ m = ti;
+ }
+ }
+
+ //state.log('commonMin ' + state.tmp.__commonMin + ' updated: ' + m);
+ this.latest[name] = m;
+ return m;
+ }
+};
+
+NETDATA.commonMax = {
+ keys: {},
+ latest: {},
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ get: function (state) {
+ if (typeof state.tmp.__commonMax === 'undefined') {
+ // get the commonMax setting
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
+ }
+
+ let max = state.data.max;
+ let name = state.tmp.__commonMax;
+
+ if (name === null) {
+ // we don't need commonMax
+ //state.log('no need for commonMax');
+ return max;
+ }
+
+ let t = this.keys[name];
+ if (typeof t === 'undefined') {
+ // add our commonMax
+ this.keys[name] = {};
+ t = this.keys[name];
+ }
+
+ let uuid = state.uuid;
+ if (typeof t[uuid] !== 'undefined') {
+ if (t[uuid] === max) {
+ //state.log('commonMax ' + state.tmp.__commonMax + ' not changed: ' + this.latest[name]);
+ return this.latest[name];
+ } else if (max > this.latest[name]) {
+ //state.log('commonMax ' + state.tmp.__commonMax + ' increased: ' + max);
+ t[uuid] = max;
+ this.latest[name] = max;
+ return max;
+ }
+ }
+
+ // add our max
+ t[uuid] = max;
+
+ // find the common max
+ let m = max;
+ // for (let i in t) {
+ // if (t.hasOwnProperty(i) && t[i] > m) m = t[i];
+ // }
+ for (var ti of Object.values(t)) {
+ if (ti > m) {
+ m = ti;
+ }
+ }
+
+ //state.log('commonMax ' + state.tmp.__commonMax + ' updated: ' + m);
+ this.latest[name] = m;
+ return m;
+ }
+};
+
+NETDATA.commonColors = {
+ keys: {},
+
+ globalReset: function () {
+ this.keys = {};
+ },
+
+ get: function (state, label) {
+ let ret = this.refill(state);
+
+ if (typeof ret.assigned[label] === 'undefined') {
+ ret.assigned[label] = ret.available.shift();
+ }
+
+ return ret.assigned[label];
+ },
+
+ refill: function (state) {
+ let ret, len;
+
+ if (typeof state.tmp.__commonColors === 'undefined') {
+ ret = this.prepare(state);
+ } else {
+ ret = this.keys[state.tmp.__commonColors];
+ if (typeof ret === 'undefined') {
+ ret = this.prepare(state);
+ }
+ }
+
+ if (ret.available.length === 0) {
+ if (ret.copy_theme || ret.custom.length === 0) {
+ // copy the theme colors
+ len = NETDATA.themes.current.colors.length;
+ while (len--) {
+ ret.available.unshift(NETDATA.themes.current.colors[len]);
+ }
+ }
+
+ // copy the custom colors
+ len = ret.custom.length;
+ while (len--) {
+ ret.available.unshift(ret.custom[len]);
+ }
+ }
+
+ state.colors_assigned = ret.assigned;
+ state.colors_available = ret.available;
+ state.colors_custom = ret.custom;
+
+ return ret;
+ },
+
+ __read_custom_colors: function (state, ret) {
+ // add the user supplied colors
+ let c = NETDATA.dataAttribute(state.element, 'colors', undefined);
+ if (typeof c === 'string' && c.length > 0) {
+ c = c.split(' ');
+ let len = c.length;
+
+ if (len > 0 && c[len - 1] === 'ONLY') {
+ len--;
+ ret.copy_theme = false;
+ }
+
+ while (len--) {
+ ret.custom.unshift(c[len]);
+ }
+ }
+ },
+
+ prepare: function (state) {
+ let has_custom_colors = false;
+
+ if (typeof state.tmp.__commonColors === 'undefined') {
+ let defname = state.chart.context;
+
+ // if this chart has data-colors=""
+ // we should use the chart uuid as the default key (private palette)
+ // (data-common-colors="NAME" will be used anyways)
+ let c = NETDATA.dataAttribute(state.element, 'colors', undefined);
+ if (typeof c === 'string' && c.length > 0) {
+ defname = state.uuid;
+ has_custom_colors = true;
+ }
+
+ // get the commonColors setting
+ state.tmp.__commonColors = NETDATA.dataAttribute(state.element, 'common-colors', defname);
+ }
+
+ let name = state.tmp.__commonColors;
+ let ret = this.keys[name];
+
+ if (typeof ret === 'undefined') {
+ // add our commonMax
+ this.keys[name] = {
+ assigned: {}, // name-value of dimensions and their colors
+ available: [], // an array of colors available to be used
+ custom: [], // the array of colors defined by the user
+ charts: {}, // the charts linked to this
+ copy_theme: true
+ };
+ ret = this.keys[name];
+ }
+
+ if (typeof ret.charts[state.uuid] === 'undefined') {
+ ret.charts[state.uuid] = state;
+
+ if (has_custom_colors) {
+ this.__read_custom_colors(state, ret);
+ }
+ }
+
+ return ret;
+ }
+};
+
+// *** src/dashboard.js/main.js
+
+// Codacy declarations
+/* global clipboard */
+/* global Ps */
+
+if (NETDATA.options.debug.main_loop) {
+ console.log('welcome to NETDATA');
+}
+
+NETDATA.onresizeCallback = null;
+NETDATA.onresize = function () {
+ NETDATA.options.last_page_resize = Date.now();
+ NETDATA.onscroll();
+
+ if (typeof NETDATA.onresizeCallback === 'function') {
+ NETDATA.onresizeCallback();
+ }
+};
+
+NETDATA.abortAllRefreshes = function () {
+ let targets = NETDATA.options.targets;
+ let len = targets.length;
+
+ while (len--) {
+ if (targets[len].fetching_data) {
+ if (typeof targets[len].xhr !== 'undefined') {
+ targets[len].xhr.abort();
+ targets[len].running = false;
+ targets[len].fetching_data = false;
+ }
+ }
+ }
+};
+
+NETDATA.onscrollStartDelay = function () {
+ NETDATA.options.last_page_scroll = Date.now();
+
+ NETDATA.options.on_scroll_refresher_stop_until =
+ NETDATA.options.last_page_scroll
+ + (NETDATA.options.current.async_on_scroll ? 1000 : 0);
+};
+
+NETDATA.onscrollEndDelay = function () {
+ NETDATA.options.on_scroll_refresher_stop_until =
+ Date.now()
+ + (NETDATA.options.current.async_on_scroll ? NETDATA.options.current.onscroll_worker_duration_threshold : 0);
+};
+
+NETDATA.onscroll_updater_timeout_id = undefined;
+NETDATA.onscrollUpdater = function () {
+ NETDATA.globalSelectionSync.stop();
+
+ if (NETDATA.options.abort_ajax_on_scroll) {
+ NETDATA.abortAllRefreshes();
+ }
+
+ // when the user scrolls he sees that we have
+ // hidden all the not-visible charts
+ // using this little function we try to switch
+ // the charts back to visible quickly
+
+ if (!NETDATA.intersectionObserver.enabled()) {
+ if (!NETDATA.options.current.parallel_refresher) {
+ let targets = NETDATA.options.targets;
+ let len = targets.length;
+
+ while (len--) {
+ if (!targets[len].running) {
+ targets[len].isVisible();
+ }
+ }
+ }
+ }
+
+ NETDATA.onscrollEndDelay();
+};
+
+NETDATA.scrollUp = false;
+NETDATA.scrollY = window.scrollY;
+NETDATA.onscroll = function () {
+ //console.log('onscroll() begin');
+
+ NETDATA.onscrollStartDelay();
+ NETDATA.chartRefresherReschedule();
+
+ NETDATA.scrollUp = (window.scrollY > NETDATA.scrollY);
+ NETDATA.scrollY = window.scrollY;
+
+ if (NETDATA.onscroll_updater_timeout_id) {
+ NETDATA.timeout.clear(NETDATA.onscroll_updater_timeout_id);
+ }
+
+ NETDATA.onscroll_updater_timeout_id = NETDATA.timeout.set(NETDATA.onscrollUpdater, 0);
+ //console.log('onscroll() end');
+};
+
+NETDATA.supportsPassiveEvents = function () {
+ if (NETDATA.options.passive_events === null) {
+ let supportsPassive = false;
+ try {
+ let opts = Object.defineProperty({}, 'passive', {
+ get: function () {
+ supportsPassive = true;
+ }
+ });
+ window.addEventListener("test", null, opts);
+ } catch (e) {
+ console.log('browser does not support passive events');
+ }
+
+ NETDATA.options.passive_events = supportsPassive;
+ }
+
+ // console.log('passive ' + NETDATA.options.passive_events);
+ return NETDATA.options.passive_events;
+};
+
+window.addEventListener('resize', NETDATA.onresize, NETDATA.supportsPassiveEvents() ? {passive: true} : false);
+window.addEventListener('scroll', NETDATA.onscroll, NETDATA.supportsPassiveEvents() ? {passive: true} : false);
+// window.onresize = NETDATA.onresize;
+// window.onscroll = NETDATA.onscroll;
+
+// ----------------------------------------------------------------------------------------------------------------
+// Global Pan and Zoom on charts
+
+// Using this structure are synchronize all the charts, so that
+// when you pan or zoom one, all others are automatically refreshed
+// to the same timespan.
+
+NETDATA.globalPanAndZoom = {
+ seq: 0, // timestamp ms
+ // every time a chart is panned or zoomed
+ // we set the timestamp here
+ // then we use it as a sequence number
+ // to find if other charts are synchronized
+ // to this time-range
+
+ master: null, // the master chart (state), to which all others
+ // are synchronized
+
+ force_before_ms: null, // the timespan to sync all other charts
+ force_after_ms: null,
+
+ callback: null,
+
+ globalReset: function () {
+ this.clearMaster();
+ this.seq = 0;
+ this.master = null;
+ this.force_after_ms = null;
+ this.force_before_ms = null;
+ this.callback = null;
+ },
+
+ delay: function () {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.delay()');
+ }
+
+ NETDATA.options.auto_refresher_stop_until = Date.now() + NETDATA.options.current.global_pan_sync_time;
+ },
+
+ // set a new master
+ setMaster: function (state, after, before) {
+ this.delay();
+
+ if (!NETDATA.options.current.sync_pan_and_zoom) {
+ return;
+ }
+
+ if (this.master === null) {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') SET MASTER');
+ }
+ } else if (this.master !== state) {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') CHANGED MASTER');
+ }
+
+ this.master.resetChart(true, true);
+ }
+
+ let now = Date.now();
+ this.master = state;
+ this.seq = now;
+ this.force_after_ms = after;
+ this.force_before_ms = before;
+
+ if (typeof this.callback === 'function') {
+ this.callback(true, after, before);
+ }
+ },
+
+ // clear the master
+ clearMaster: function () {
+ // if (NETDATA.options.debug.globalPanAndZoom === true)
+ // console.log('globalPanAndZoom.clearMaster()');
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.clearMaster()');
+ }
+
+ if (this.master !== null) {
+ let st = this.master;
+ this.master = null;
+ st.resetChart();
+ }
+
+ this.master = null;
+ this.seq = 0;
+ this.force_after_ms = null;
+ this.force_before_ms = null;
+ NETDATA.options.auto_refresher_stop_until = 0;
+
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ },
+
+ // is the given state the master of the global
+ // pan and zoom sync?
+ isMaster: function (state) {
+ return (this.master === state);
+ },
+
+ // are we currently have a global pan and zoom sync?
+ isActive: function () {
+ return (this.master !== null && this.force_before_ms !== null && this.force_after_ms !== null && this.seq !== 0);
+ },
+
+ // check if a chart, other than the master
+ // needs to be refreshed, due to the global pan and zoom
+ shouldBeAutoRefreshed: function (state) {
+ if (this.master === null || this.seq === 0) {
+ return false;
+ }
+
+ //if (state.needsRecreation())
+ // return true;
+
+ return (state.tm.pan_and_zoom_seq !== this.seq);
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// global chart underlay (time-frame highlighting)
+
+NETDATA.globalChartUnderlay = {
+ callback: null, // what to call when a highlighted range is setup
+ after: null, // highlight after this time
+ before: null, // highlight before this time
+ view_after: null, // the charts after_ms viewport when the highlight was setup
+ view_before: null, // the charts before_ms viewport, when the highlight was setup
+ state: null, // the chart the highlight was setup
+
+ isActive: function () {
+ return (this.after !== null && this.before !== null);
+ },
+
+ hasViewport: function () {
+ return (this.state !== null && this.view_after !== null && this.view_before !== null);
+ },
+
+ init: function (state, after, before, view_after, view_before) {
+ this.state = (typeof state !== 'undefined') ? state : null;
+ this.after = (typeof after !== 'undefined' && after !== null && after > 0) ? after : null;
+ this.before = (typeof before !== 'undefined' && before !== null && before > 0) ? before : null;
+ this.view_after = (typeof view_after !== 'undefined' && view_after !== null && view_after > 0) ? view_after : null;
+ this.view_before = (typeof view_before !== 'undefined' && view_before !== null && view_before > 0) ? view_before : null;
+ },
+
+ setup: function () {
+ if (this.isActive()) {
+ if (this.state === null) {
+ this.state = NETDATA.options.targets[0];
+ }
+
+ if (typeof this.callback === 'function') {
+ this.callback(true, this.after, this.before);
+ }
+ } else {
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ }
+ },
+
+ set: function (state, after, before, view_after, view_before) {
+ if (after > before) {
+ let t = after;
+ after = before;
+ before = t;
+ }
+
+ this.init(state, after, before, view_after, view_before);
+
+ // if (this.hasViewport() === true)
+ // NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before);
+ if (this.hasViewport()) {
+ NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before);
+ }
+
+ this.setup();
+ },
+
+ clear: function () {
+ this.after = null;
+ this.before = null;
+ this.state = null;
+ this.view_after = null;
+ this.view_before = null;
+
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ },
+
+ focus: function () {
+ if (this.isActive() && this.hasViewport()) {
+ if (this.state === null) {
+ this.state = NETDATA.options.targets[0];
+ }
+
+ if (NETDATA.globalPanAndZoom.isMaster(this.state)) {
+ NETDATA.globalPanAndZoom.clearMaster();
+ }
+
+ NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before, true);
+ }
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// dimensions selection
+
+// TODO
+// move color assignment to dimensions, here
+
+let dimensionStatus = function (parent, label, name_div, value_div, color) {
+ this.enabled = false;
+ this.parent = parent;
+ this.label = label;
+ this.name_div = null;
+ this.value_div = null;
+ this.color = NETDATA.themes.current.foreground;
+ this.selected = (parent.unselected_count === 0);
+
+ this.setOptions(name_div, value_div, color);
+};
+
+dimensionStatus.prototype.invalidate = function () {
+ this.name_div = null;
+ this.value_div = null;
+ this.enabled = false;
+};
+
+dimensionStatus.prototype.setOptions = function (name_div, value_div, color) {
+ this.color = color;
+
+ if (this.name_div !== name_div) {
+ this.name_div = name_div;
+ this.name_div.title = this.label;
+ this.name_div.style.setProperty('color', this.color, 'important');
+ if (!this.selected) {
+ this.name_div.className = 'netdata-legend-name not-selected';
+ } else {
+ this.name_div.className = 'netdata-legend-name selected';
+ }
+ }
+
+ if (this.value_div !== value_div) {
+ this.value_div = value_div;
+ this.value_div.title = this.label;
+ this.value_div.style.setProperty('color', this.color, 'important');
+ if (!this.selected) {
+ this.value_div.className = 'netdata-legend-value not-selected';
+ } else {
+ this.value_div.className = 'netdata-legend-value selected';
+ }
+ }
+
+ this.enabled = true;
+ this.setHandler();
+};
+
+dimensionStatus.prototype.setHandler = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ let ds = this;
+
+ // this.name_div.onmousedown = this.value_div.onmousedown = function(e) {
+ this.name_div.onclick = this.value_div.onclick = function (e) {
+ e.preventDefault();
+ if (ds.isSelected()) {
+ // this is selected
+ if (e.shiftKey || e.ctrlKey) {
+ // control or shift key is pressed -> unselect this (except is none will remain selected, in which case select all)
+ ds.unselect();
+
+ if (ds.parent.countSelected() === 0) {
+ ds.parent.selectAll();
+ }
+ } else {
+ // no key is pressed -> select only this (except if it is the only selected already, in which case select all)
+ if (ds.parent.countSelected() === 1) {
+ ds.parent.selectAll();
+ } else {
+ ds.parent.selectNone();
+ ds.select();
+ }
+ }
+ }
+ else {
+ // this is not selected
+ if (e.shiftKey || e.ctrlKey) {
+ // control or shift key is pressed -> select this too
+ ds.select();
+ } else {
+ // no key is pressed -> select only this
+ ds.parent.selectNone();
+ ds.select();
+ }
+ }
+
+ ds.parent.state.redrawChart();
+ }
+};
+
+dimensionStatus.prototype.select = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ this.name_div.className = 'netdata-legend-name selected';
+ this.value_div.className = 'netdata-legend-value selected';
+ this.selected = true;
+};
+
+dimensionStatus.prototype.unselect = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ this.name_div.className = 'netdata-legend-name not-selected';
+ this.value_div.className = 'netdata-legend-value hidden';
+ this.selected = false;
+};
+
+dimensionStatus.prototype.isSelected = function () {
+ // return(this.enabled === true && this.selected === true);
+ return this.enabled && this.selected;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+
+let dimensionsVisibility = function (state) {
+ this.state = state;
+ this.len = 0;
+ this.dimensions = {};
+ this.selected_count = 0;
+ this.unselected_count = 0;
+};
+
+dimensionsVisibility.prototype.dimensionAdd = function (label, name_div, value_div, color) {
+ if (typeof this.dimensions[label] === 'undefined') {
+ this.len++;
+ this.dimensions[label] = new dimensionStatus(this, label, name_div, value_div, color);
+ } else {
+ this.dimensions[label].setOptions(name_div, value_div, color);
+ }
+
+ return this.dimensions[label];
+};
+
+dimensionsVisibility.prototype.dimensionGet = function (label) {
+ return this.dimensions[label];
+};
+
+dimensionsVisibility.prototype.invalidateAll = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].invalidate();
+ }
+};
+
+dimensionsVisibility.prototype.selectAll = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].select();
+ }
+};
+
+dimensionsVisibility.prototype.countSelected = function () {
+ let selected = 0;
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ if (this.dimensions[keys[len]].isSelected()) {
+ selected++;
+ }
+ }
+
+ return selected;
+};
+
+dimensionsVisibility.prototype.selectNone = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].unselect();
+ }
+};
+
+dimensionsVisibility.prototype.selected2BooleanArray = function (array) {
+ let ret = [];
+ this.selected_count = 0;
+ this.unselected_count = 0;
+
+ let len = array.length;
+ while (len--) {
+ let ds = this.dimensions[array[len]];
+ if (typeof ds === 'undefined') {
+ // console.log(array[i] + ' is not found');
+ ret.unshift(false);
+ } else if (ds.isSelected()) {
+ ret.unshift(true);
+ this.selected_count++;
+ } else {
+ ret.unshift(false);
+ this.unselected_count++;
+ }
+ }
+
+ if (this.selected_count === 0 && this.unselected_count !== 0) {
+ this.selectAll();
+ return this.selected2BooleanArray(array);
+ }
+
+ return ret;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// date/time conversion
+
+NETDATA.dateTime = {
+ using_timezone: false,
+
+ // these are the old netdata functions
+ // we fallback to these, if the new ones fail
+
+ localeDateStringNative: function (d) {
+ return d.toLocaleDateString();
+ },
+
+ localeTimeStringNative: function (d) {
+ return d.toLocaleTimeString();
+ },
+
+ xAxisTimeStringNative: function (d) {
+ return NETDATA.zeropad(d.getHours()) + ":"
+ + NETDATA.zeropad(d.getMinutes()) + ":"
+ + NETDATA.zeropad(d.getSeconds());
+ },
+
+ // initialize the new date/time conversion
+ // functions.
+ // if this fails, we fallback to the above
+ init: function (timezone) {
+ //console.log('init with timezone: ' + timezone);
+
+ // detect browser timezone
+ try {
+ NETDATA.options.browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
+ } catch (e) {
+ console.log('failed to detect browser timezone: ' + e.toString());
+ NETDATA.options.browser_timezone = 'cannot-detect-it';
+ }
+
+ let ret = false;
+
+ try {
+ let dateOptions = {
+ localeMatcher: 'best fit',
+ formatMatcher: 'best fit',
+ weekday: 'short',
+ year: 'numeric',
+ month: 'short',
+ day: '2-digit'
+ };
+
+ let timeOptions = {
+ localeMatcher: 'best fit',
+ hour12: false,
+ formatMatcher: 'best fit',
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit'
+ };
+
+ let xAxisOptions = {
+ localeMatcher: 'best fit',
+ hour12: false,
+ formatMatcher: 'best fit',
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit'
+ };
+
+ if (typeof timezone === 'string' && timezone !== '' && timezone !== 'default') {
+ dateOptions.timeZone = timezone;
+ timeOptions.timeZone = timezone;
+ timeOptions.timeZoneName = 'short';
+ xAxisOptions.timeZone = timezone;
+ this.using_timezone = true;
+ } else {
+ timezone = 'default';
+ this.using_timezone = false;
+ }
+
+ this.dateFormat = new Intl.DateTimeFormat(navigator.language, dateOptions);
+ this.timeFormat = new Intl.DateTimeFormat(navigator.language, timeOptions);
+ this.xAxisFormat = new Intl.DateTimeFormat(navigator.language, xAxisOptions);
+
+ this.localeDateString = function (d) {
+ return this.dateFormat.format(d);
+ };
+
+ this.localeTimeString = function (d) {
+ return this.timeFormat.format(d);
+ };
+
+ this.xAxisTimeString = function (d) {
+ return this.xAxisFormat.format(d);
+ };
+
+ //let d = new Date();
+ //let t = this.dateFormat.format(d) + ' ' + this.timeFormat.format(d) + ' ' + this.xAxisFormat.format(d);
+
+ ret = true;
+ } catch (e) {
+ console.log('Cannot setup Date/Time formatting: ' + e.toString());
+
+ timezone = 'default';
+ this.localeDateString = this.localeDateStringNative;
+ this.localeTimeString = this.localeTimeStringNative;
+ this.xAxisTimeString = this.xAxisTimeStringNative;
+ this.using_timezone = false;
+
+ ret = false;
+ }
+
+ // save it
+ //console.log('init setOption timezone: ' + timezone);
+ NETDATA.setOption('timezone', timezone);
+
+ return ret;
+ }
+};
+NETDATA.dateTime.init(NETDATA.options.current.timezone);
+
+// ----------------------------------------------------------------------------------------------------------------
+// global selection sync
+
+NETDATA.globalSelectionSync = {
+ state: null,
+ dontSyncBefore: 0,
+ last_t: 0,
+ slaves: [],
+ timeoutId: undefined,
+
+ globalReset: function () {
+ this.stop();
+ this.state = null;
+ this.dontSyncBefore = 0;
+ this.last_t = 0;
+ this.slaves = [];
+ this.timeoutId = undefined;
+ },
+
+ active: function () {
+ return (this.state !== null);
+ },
+
+ // return true if global selection sync can be enabled now
+ enabled: function () {
+ // console.log('enabled()');
+ // can we globally apply selection sync?
+ if (!NETDATA.options.current.sync_selection) {
+ return false;
+ }
+
+ return (this.dontSyncBefore <= Date.now());
+ },
+
+ // set the global selection sync master
+ setMaster: function (state) {
+ if (!this.enabled()) {
+ this.stop();
+ return;
+ }
+
+ if (this.state === state) {
+ return;
+ }
+
+ if (this.state !== null) {
+ this.stop();
+ }
+
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.setMaster(' + state.id + ')');
+ }
+
+ state.selected = true;
+ this.state = state;
+ this.last_t = 0;
+
+ // find all slaves
+ let targets = NETDATA.intersectionObserver.targets();
+ this.slaves = [];
+ let len = targets.length;
+ while (len--) {
+ let st = targets[len];
+ if (this.state !== st && st.globalSelectionSyncIsEligible()) {
+ this.slaves.push(st);
+ }
+ }
+
+ // this.delay(100);
+ },
+
+ // stop global selection sync
+ stop: function () {
+ if (this.state !== null) {
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.stop()');
+ }
+
+ let len = this.slaves.length;
+ while (len--) {
+ this.slaves[len].clearSelection();
+ }
+
+ this.state.clearSelection();
+
+ this.last_t = 0;
+ this.slaves = [];
+ this.state = null;
+ }
+ },
+
+ // delay global selection sync for some time
+ delay: function (ms) {
+ if (NETDATA.options.current.sync_selection) {
+ // if (NETDATA.options.debug.globalSelectionSync === true) {
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.delay()');
+ }
+
+ if (typeof ms === 'number') {
+ this.dontSyncBefore = Date.now() + ms;
+ } else {
+ this.dontSyncBefore = Date.now() + NETDATA.options.current.sync_selection_delay;
+ }
+ }
+ },
+
+ __syncSlaves: function () {
+ // if (NETDATA.globalSelectionSync.enabled() === true) {
+ if (NETDATA.globalSelectionSync.enabled()) {
+ // if (NETDATA.options.debug.globalSelectionSync === true)
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.__syncSlaves()');
+ }
+
+ let t = NETDATA.globalSelectionSync.last_t;
+ let len = NETDATA.globalSelectionSync.slaves.length;
+ while (len--) {
+ NETDATA.globalSelectionSync.slaves[len].setSelection(t);
+ }
+
+ this.timeoutId = undefined;
+ }
+ },
+
+ // sync all the visible charts to the given time
+ // this is to be called from the chart libraries
+ sync: function (state, t) {
+ // if (NETDATA.options.current.sync_selection === true) {
+ if (NETDATA.options.current.sync_selection) {
+ // if (NETDATA.options.debug.globalSelectionSync === true)
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.sync(' + state.id + ', ' + t.toString() + ')');
+ }
+
+ this.setMaster(state);
+
+ if (t === this.last_t) {
+ return;
+ }
+
+ this.last_t = t;
+
+ if (state.foreignElementSelection !== null) {
+ state.foreignElementSelection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t);
+ }
+
+ if (this.timeoutId) {
+ NETDATA.timeout.clear(this.timeoutId);
+ }
+
+ this.timeoutId = NETDATA.timeout.set(this.__syncSlaves, 0);
+ }
+ }
+};
+
+NETDATA.intersectionObserver = {
+ observer: null,
+ visible_targets: [],
+
+ options: {
+ root: null,
+ rootMargin: "0px",
+ threshold: null
+ },
+
+ enabled: function () {
+ return this.observer !== null;
+ },
+
+ globalReset: function () {
+ if (this.observer !== null) {
+ this.visible_targets = [];
+ this.observer.disconnect();
+ this.init();
+ }
+ },
+
+ targets: function () {
+ if (this.enabled() && this.visible_targets.length > 0) {
+ return this.visible_targets;
+ } else {
+ return NETDATA.options.targets;
+ }
+ },
+
+ switchChartVisibility: function () {
+ let old = this.__visibilityRatioOld;
+
+ if (old !== this.__visibilityRatio) {
+ if (old === 0 && this.__visibilityRatio > 0) {
+ this.unhideChart();
+ } else if (old > 0 && this.__visibilityRatio === 0) {
+ this.hideChart();
+ }
+
+ this.__visibilityRatioOld = this.__visibilityRatio;
+ }
+ },
+
+ handler: function (entries, observer) {
+ entries.forEach(function (entry) {
+ let state = NETDATA.chartState(entry.target);
+
+ let idx;
+ if (entry.intersectionRatio > 0) {
+ idx = NETDATA.intersectionObserver.visible_targets.indexOf(state);
+ if (idx === -1) {
+ if (NETDATA.scrollUp) {
+ NETDATA.intersectionObserver.visible_targets.push(state);
+ } else {
+ NETDATA.intersectionObserver.visible_targets.unshift(state);
+ }
+ }
+ else if (state.__visibilityRatio === 0) {
+ state.log("was not visible until now, but was already in visible_targets");
+ }
+ } else {
+ idx = NETDATA.intersectionObserver.visible_targets.indexOf(state);
+ if (idx !== -1) {
+ NETDATA.intersectionObserver.visible_targets.splice(idx, 1);
+ } else if (state.__visibilityRatio > 0) {
+ state.log("was visible, but not found in visible_targets");
+ }
+ }
+
+ state.__visibilityRatio = entry.intersectionRatio;
+
+ if (!NETDATA.options.current.async_on_scroll) {
+ if (window.requestIdleCallback) {
+ window.requestIdleCallback(function () {
+ NETDATA.intersectionObserver.switchChartVisibility.call(state);
+ }, {timeout: 100});
+ } else {
+ NETDATA.intersectionObserver.switchChartVisibility.call(state);
+ }
+ }
+ });
+ },
+
+ observe: function (state) {
+ if (this.enabled()) {
+ state.__visibilityRatioOld = 0;
+ state.__visibilityRatio = 0;
+ this.observer.observe(state.element);
+
+ state.isVisible = function () {
+ if (!NETDATA.options.current.update_only_visible) {
+ return true;
+ }
+
+ NETDATA.intersectionObserver.switchChartVisibility.call(this);
+
+ return this.__visibilityRatio > 0;
+ }
+ }
+ },
+
+ init: function () {
+ if (typeof netdataIntersectionObserver === 'undefined' || netdataIntersectionObserver) {
+ try {
+ this.observer = new IntersectionObserver(this.handler, this.options);
+ } catch (e) {
+ console.log("IntersectionObserver is not supported on this browser");
+ this.observer = null;
+ }
+ }
+ //else {
+ // console.log("IntersectionObserver is disabled");
+ //}
+ }
+};
+NETDATA.intersectionObserver.init();
+
+// ----------------------------------------------------------------------------------------------------------------
+// Our state object, where all per-chart values are stored
+
+let chartState = function (element) {
+ this.element = element;
+
+ // IMPORTANT:
+ // all private functions should use 'that', instead of 'this'
+ // Alternatively, you can use arrow functions (related issue #4514)
+ let that = this;
+
+ // ============================================================================================================
+ // ERROR HANDLING
+
+ /* error() - private
+ * show an error instead of the chart
+ */
+ let error = (msg) => {
+ let ret = true;
+
+ if (typeof netdataErrorCallback === 'function') {
+ ret = netdataErrorCallback('chart', this.id, msg);
+ }
+
+ if (ret) {
+ this.element.innerHTML = this.id + ': ' + msg;
+ this.enabled = false;
+ this.current = this.pan;
+ }
+ };
+
+ // console logging
+ this.log = function (msg) {
+ console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg);
+ };
+
+ this.debugLog = function (msg) {
+ if (this.debug) {
+ this.log(msg);
+ }
+ };
+
+ // ============================================================================================================
+ // EARLY INITIALIZATION
+
+ // These are variables that should exist even if the chart is never to be rendered.
+ // Be careful what you add here - there may be thousands of charts on the page.
+
+ // GUID - a unique identifier for the chart
+ this.uuid = NETDATA.guid();
+
+ // string - the name of chart
+ this.id = NETDATA.dataAttribute(this.element, 'netdata', undefined);
+ if (typeof this.id === 'undefined') {
+ error("netdata elements need data-netdata");
+ return;
+ }
+
+ // string - the key for localStorage settings
+ this.settings_id = NETDATA.dataAttribute(this.element, 'id', null);
+
+ // the user given dimensions of the element
+ this.width = NETDATA.dataAttribute(this.element, 'width', NETDATA.chartDefaults.width);
+ this.height = NETDATA.dataAttribute(this.element, 'height', NETDATA.chartDefaults.height);
+ this.height_original = this.height;
+
+ if (this.settings_id !== null) {
+ this.height = NETDATA.localStorageGet('chart_heights.' + this.settings_id, this.height, function (height) {
+ // this is the callback that will be called
+ // if and when the user resets all localStorage variables
+ // to their defaults
+
+ resizeChartToHeight(height);
+ });
+ }
+
+ // the chart library requested by the user
+ this.library_name = NETDATA.dataAttribute(this.element, 'chart-library', NETDATA.chartDefaults.library);
+
+ // check the requested library is available
+ // we don't initialize it here - it will be initialized when
+ // this chart will be first used
+ if (typeof NETDATA.chartLibraries[this.library_name] === 'undefined') {
+ NETDATA.error(402, this.library_name);
+ error('chart library "' + this.library_name + '" is not found');
+ this.enabled = false;
+ } else if (!NETDATA.chartLibraries[this.library_name].enabled) {
+ NETDATA.error(403, this.library_name);
+ error('chart library "' + this.library_name + '" is not enabled');
+ this.enabled = false;
+ } else {
+ this.library = NETDATA.chartLibraries[this.library_name];
+ }
+
+ this.auto = {
+ name: 'auto',
+ autorefresh: true,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+ this.pan = {
+ name: 'pan',
+ autorefresh: false,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+ this.zoom = {
+ name: 'zoom',
+ autorefresh: false,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+
+ // this is a pointer to one of the sub-classes below
+ // auto, pan, zoom
+ this.current = this.auto;
+
+ this.running = false; // boolean - true when the chart is being refreshed now
+ this.enabled = true; // boolean - is the chart enabled for refresh?
+
+ this.force_update_every = null; // number - overwrite the visualization update frequency of the chart
+
+ this.tmp = {};
+
+ this.foreignElementBefore = null;
+ this.foreignElementAfter = null;
+ this.foreignElementDuration = null;
+ this.foreignElementUpdateEvery = null;
+ this.foreignElementSelection = null;
+
+ // ============================================================================================================
+ // PRIVATE FUNCTIONS
+
+ // reset the runtime status variables to their defaults
+ const runtimeInit = () => {
+ this.paused = false; // boolean - is the chart paused for any reason?
+ this.selected = false; // boolean - is the chart shown a selection?
+
+ this.chart_created = false; // boolean - is the library.create() been called?
+ this.dom_created = false; // boolean - is the chart DOM been created?
+ this.fetching_data = false; // boolean - true while we fetch data via ajax
+
+ this.updates_counter = 0; // numeric - the number of refreshes made so far
+ this.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden
+ this.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created
+
+ this.tm = {
+ last_initialized: 0, // milliseconds - the timestamp it was last initialized
+ last_dom_created: 0, // milliseconds - the timestamp its DOM was last created
+ last_mode_switch: 0, // milliseconds - the timestamp it switched modes
+
+ last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart
+ last_updated: 0, // the timestamp the chart last updated with data
+ pan_and_zoom_seq: 0, // the sequence number of the global synchronization
+ // between chart.
+ // Used with NETDATA.globalPanAndZoom.seq
+ last_visible_check: 0, // the time we last checked if it is visible
+ last_resized: 0, // the time the chart was resized
+ last_hidden: 0, // the time the chart was hidden
+ last_unhidden: 0, // the time the chart was unhidden
+ last_autorefreshed: 0 // the time the chart was last refreshed
+ };
+
+ this.data = null; // the last data as downloaded from the netdata server
+ this.data_url = 'invalid://'; // string - the last url used to update the chart
+ this.data_points = 0; // number - the number of points returned from netdata
+ this.data_after = 0; // milliseconds - the first timestamp of the data
+ this.data_before = 0; // milliseconds - the last timestamp of the data
+ this.data_update_every = 0; // milliseconds - the frequency to update the data
+
+ this.tmp = {}; // members that can be destroyed to save memory
+ };
+
+ // initialize all the variables that are required for the chart to be rendered
+ const lateInitialization = () => {
+ if (typeof this.host !== 'undefined') {
+ return;
+ }
+
+ // string - the netdata server URL, without any path
+ this.host = NETDATA.dataAttribute(this.element, 'host', NETDATA.serverDefault);
+
+ // make sure the host does not end with /
+ // all netdata API requests use absolute paths
+ while (this.host.slice(-1) === '/') {
+ this.host = this.host.substring(0, this.host.length - 1);
+ }
+
+ // string - the grouping method requested by the user
+ this.method = NETDATA.dataAttribute(this.element, 'method', NETDATA.chartDefaults.method);
+ this.gtime = NETDATA.dataAttribute(this.element, 'gtime', 0);
+
+ // the time-range requested by the user
+ this.after = NETDATA.dataAttribute(this.element, 'after', NETDATA.chartDefaults.after);
+ this.before = NETDATA.dataAttribute(this.element, 'before', NETDATA.chartDefaults.before);
+
+ // the pixels per point requested by the user
+ this.pixels_per_point = NETDATA.dataAttribute(this.element, 'pixels-per-point', 1);
+ this.points = NETDATA.dataAttribute(this.element, 'points', null);
+
+ // the forced update_every
+ this.force_update_every = NETDATA.dataAttribute(this.element, 'update-every', null);
+ if (typeof this.force_update_every !== 'number' || this.force_update_every <= 1) {
+ if (this.force_update_every !== null) {
+ this.log('ignoring invalid value of property data-update-every');
+ }
+
+ this.force_update_every = null;
+ } else {
+ this.force_update_every *= 1000;
+ }
+
+ // the dimensions requested by the user
+ this.dimensions = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'dimensions', null));
+
+ this.title = NETDATA.dataAttribute(this.element, 'title', null); // the title of the chart
+ this.units = NETDATA.dataAttribute(this.element, 'units', null); // the units of the chart dimensions
+ this.units_desired = NETDATA.dataAttribute(this.element, 'desired-units', NETDATA.options.current.units); // the units of the chart dimensions
+ this.units_current = this.units;
+ this.units_common = NETDATA.dataAttribute(this.element, 'common-units', null);
+
+ // additional options to pass to netdata
+ this.append_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'append-options', null));
+
+ // override options to pass to netdata
+ this.override_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'override-options', null));
+
+ this.debug = NETDATA.dataAttributeBoolean(this.element, 'debug', false);
+
+ this.value_decimal_detail = -1;
+ let d = NETDATA.dataAttribute(this.element, 'decimal-digits', -1);
+ if (typeof d === 'number') {
+ this.value_decimal_detail = d;
+ } else if (typeof d !== 'undefined') {
+ this.log('ignoring decimal-digits value: ' + d.toString());
+ }
+
+ // if we need to report the rendering speed
+ // find the element that needs to be updated
+ let refresh_dt_element_name = NETDATA.dataAttribute(this.element, 'dt-element-name', null); // string - the element to print refresh_dt_ms
+
+ if (refresh_dt_element_name !== null) {
+ this.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null;
+ }
+ else {
+ this.refresh_dt_element = null;
+ }
+
+ this.dimensions_visibility = new dimensionsVisibility(that);
+
+ this.netdata_first = 0; // milliseconds - the first timestamp in netdata
+ this.netdata_last = 0; // milliseconds - the last timestamp in netdata
+ this.requested_after = null; // milliseconds - the timestamp of the request after param
+ this.requested_before = null; // milliseconds - the timestamp of the request before param
+ this.requested_padding = null;
+ this.view_after = 0;
+ this.view_before = 0;
+
+ this.refresh_dt_ms = 0; // milliseconds - the time the last refresh took
+
+ // how many retries we have made to load chart data from the server
+ this.retries_on_data_failures = 0;
+
+ // color management
+ this.colors = null;
+ this.colors_assigned = null;
+ this.colors_available = null;
+ this.colors_custom = null;
+
+ this.element_message = null; // the element already created by the user
+ this.element_chart = null; // the element with the chart
+ this.element_legend = null; // the element with the legend of the chart (if created by us)
+ this.element_legend_childs = {
+ content: null,
+ hidden: null,
+ title_date: null,
+ title_time: null,
+ title_units: null,
+ perfect_scroller: null, // the container to apply perfect scroller to
+ series: null
+ };
+
+ this.chart_url = null; // string - the url to download chart info
+ this.chart = null; // object - the chart as downloaded from the server
+
+ const getForeignElementById = (opt) => {
+ let id = NETDATA.dataAttribute(this.element, opt, null);
+ if (id === null) {
+ //this.log('option "' + opt + '" is undefined');
+ return null;
+ }
+
+ let el = document.getElementById(id);
+ if (typeof el === 'undefined') {
+ this.log('cannot find an element with name "' + id.toString() + '"');
+ return null;
+ }
+
+ return el;
+ };
+
+ this.foreignElementBefore = getForeignElementById('show-before-at');
+ this.foreignElementAfter = getForeignElementById('show-after-at');
+ this.foreignElementDuration = getForeignElementById('show-duration-at');
+ this.foreignElementUpdateEvery = getForeignElementById('show-update-every-at');
+ this.foreignElementSelection = getForeignElementById('show-selection-at');
+ };
+
+ const destroyDOM = () => {
+ if (!this.enabled) {
+ return;
+ }
+
+ if (this.debug) {
+ this.log('destroyDOM()');
+ }
+
+ // this.element.className = 'netdata-message icon';
+ // this.element.innerHTML = '<i class="fas fa-sync"></i> netdata';
+ this.element.innerHTML = '';
+ this.element_message = null;
+ this.element_legend = null;
+ this.element_chart = null;
+ this.element_legend_childs.series = null;
+
+ this.chart_created = false;
+ this.dom_created = false;
+
+ this.tm.last_resized = 0;
+ this.tm.last_dom_created = 0;
+ };
+
+ const maxMessageFontSize = () => {
+ let screenHeight = screen.height;
+ let el = this.element;
+
+ // normally we want a font size, as tall as the element
+ let h = el.clientHeight;
+
+ // but give it some air, 20% let's say, or 5 pixels min
+ let lost = Math.max(h * 0.2, 5);
+ h -= lost;
+
+ // center the text, vertically
+ let paddingTop = (lost - 5) / 2;
+
+ // but check the width too
+ // it should fit 10 characters in it
+ let w = el.clientWidth / 10;
+ if (h > w) {
+ paddingTop += (h - w) / 2;
+ h = w;
+ }
+
+ // and don't make it too huge
+ // 5% of the screen size is good
+ if (h > screenHeight / 20) {
+ paddingTop += (h - (screenHeight / 20)) / 2;
+ h = screenHeight / 20;
+ }
+
+ // set it
+ this.element_message.style.fontSize = h.toString() + 'px';
+ this.element_message.style.paddingTop = paddingTop.toString() + 'px';
+ };
+
+ const showMessageIcon = (icon) => {
+ this.element_message.innerHTML = icon;
+ maxMessageFontSize();
+ $(this.element_message).removeClass('hidden');
+ this.tmp.___messageHidden___ = undefined;
+ };
+
+ const showLoading = () => {
+ if (!this.chart_created) {
+ showMessageIcon(NETDATA.icons.loading + ' netdata');
+ return true;
+ }
+ return false;
+ };
+
+ let createDOM = () => {
+ if (!this.enabled) {
+ return;
+ }
+ lateInitialization();
+
+ destroyDOM();
+
+ if (this.debug) {
+ this.log('createDOM()');
+ }
+
+ this.element_message = document.createElement('div');
+ this.element_message.className = 'netdata-message icon hidden';
+ this.element.appendChild(this.element_message);
+
+ this.dom_created = true;
+ this.chart_created = false;
+
+ this.tm.last_dom_created = this.tm.last_resized = Date.now();
+
+ showLoading();
+ };
+
+ const initDOM = () => {
+ this.element.className = this.library.container_class(that);
+
+ if (typeof(this.width) === 'string') {
+ this.element.style.width = this.width;
+ } else if (typeof(this.width) === 'number') {
+ this.element.style.width = this.width.toString() + 'px';
+ }
+
+ if (typeof(this.library.aspect_ratio) === 'undefined') {
+ if (typeof(this.height) === 'string') {
+ this.element.style.height = this.height;
+ } else if (typeof(this.height) === 'number') {
+ this.element.style.height = this.height.toString() + 'px';
+ }
+ }
+
+ if (NETDATA.chartDefaults.min_width !== null) {
+ this.element.style.min_width = NETDATA.chartDefaults.min_width;
+ }
+ };
+
+ const invisibleSearchableText = () => {
+ return '<span style="position:absolute; opacity: 0; width: 0px;">' + this.id + '</span>';
+ };
+
+ /* init() private
+ * initialize state variables
+ * destroy all (possibly) created state elements
+ * create the basic DOM for a chart
+ */
+ const init = (opt) => {
+ if (!this.enabled) {
+ return;
+ }
+
+ runtimeInit();
+ this.element.innerHTML = invisibleSearchableText();
+
+ this.tm.last_initialized = Date.now();
+ this.setMode('auto');
+
+ if (opt !== 'fast') {
+ if (this.isVisible(true) || opt === 'force') {
+ createDOM();
+ }
+ }
+ };
+
+ const hideMessage = () => {
+ if (typeof this.tmp.___messageHidden___ === 'undefined') {
+ this.tmp.___messageHidden___ = true;
+ $(this.element_message).addClass('hidden');
+ }
+ };
+
+ const showRendering = () => {
+ let icon;
+ if (this.chart !== null) {
+ if (this.chart.chart_type === 'line') {
+ icon = NETDATA.icons.lineChart;
+ } else {
+ icon = NETDATA.icons.areaChart;
+ }
+ }
+ else {
+ icon = NETDATA.icons.noChart;
+ }
+
+ showMessageIcon(icon + ' netdata' + invisibleSearchableText());
+ };
+
+ const isHidden = () => {
+ return (typeof this.tmp.___chartIsHidden___ !== 'undefined');
+ };
+
+ // hide the chart, when it is not visible - called from isVisible()
+ this.hideChart = function () {
+ // hide it, if it is not already hidden
+ if (isHidden()) {
+ return;
+ }
+
+ if (this.chart_created) {
+ if (NETDATA.options.current.show_help) {
+ if (this.element_legend_childs.toolbox !== null) {
+ if (this.debug) {
+ this.log('hideChart(): hidding legend popovers');
+ }
+
+ $(this.element_legend_childs.toolbox_left).popover('hide');
+ $(this.element_legend_childs.toolbox_reset).popover('hide');
+ $(this.element_legend_childs.toolbox_right).popover('hide');
+ $(this.element_legend_childs.toolbox_zoomin).popover('hide');
+ $(this.element_legend_childs.toolbox_zoomout).popover('hide');
+ }
+
+ if (this.element_legend_childs.resize_handler !== null) {
+ $(this.element_legend_childs.resize_handler).popover('hide');
+ }
+
+ if (this.element_legend_childs.content !== null) {
+ $(this.element_legend_childs.content).popover('hide');
+ }
+ }
+
+ if (NETDATA.options.current.destroy_on_hide) {
+ if (this.debug) {
+ this.log('hideChart(): initializing chart');
+ }
+
+ // we should destroy it
+ init('force');
+ } else {
+ if (this.debug) {
+ this.log('hideChart(): hiding chart');
+ }
+
+ showRendering();
+ this.element_chart.style.display = 'none';
+ this.element.style.willChange = 'auto';
+ if (this.element_legend !== null) {
+ this.element_legend.style.display = 'none';
+ }
+ if (this.element_legend_childs.toolbox !== null) {
+ this.element_legend_childs.toolbox.style.display = 'none';
+ }
+ if (this.element_legend_childs.resize_handler !== null) {
+ this.element_legend_childs.resize_handler.style.display = 'none';
+ }
+
+ this.tm.last_hidden = Date.now();
+
+ // de-allocate data
+ // This works, but I not sure there are no corner cases somewhere
+ // so it is commented - if the user has memory issues he can
+ // set Destroy on Hide for all charts
+ // this.data = null;
+ }
+ }
+
+ this.tmp.___chartIsHidden___ = true;
+ };
+
+ // unhide the chart, when it is visible - called from isVisible()
+ this.unhideChart = function () {
+ if (!isHidden()) {
+ return;
+ }
+
+ this.tmp.___chartIsHidden___ = undefined;
+ this.updates_since_last_unhide = 0;
+
+ if (!this.chart_created) {
+ if (this.debug) {
+ this.log('unhideChart(): initializing chart');
+ }
+
+ // we need to re-initialize it, to show our background
+ // logo in bootstrap tabs, until the chart loads
+ init('force');
+ } else {
+ if (this.debug) {
+ this.log('unhideChart(): unhiding chart');
+ }
+
+ this.element.style.willChange = 'transform';
+ this.tm.last_unhidden = Date.now();
+ this.element_chart.style.display = '';
+ if (this.element_legend !== null) {
+ this.element_legend.style.display = '';
+ }
+ if (this.element_legend_childs.toolbox !== null) {
+ this.element_legend_childs.toolbox.style.display = '';
+ }
+ if (this.element_legend_childs.resize_handler !== null) {
+ this.element_legend_childs.resize_handler.style.display = '';
+ }
+ resizeChart();
+ hideMessage();
+ }
+
+ if (this.__redraw_on_unhide) {
+ if (this.debug) {
+ this.log("redrawing chart on unhide");
+ }
+
+ this.__redraw_on_unhide = undefined;
+ this.redrawChart();
+ }
+ };
+
+ const canBeRendered = (uncached_visibility) => {
+ if (this.debug) {
+ this.log('canBeRendered() called');
+ }
+
+ if (!NETDATA.options.current.update_only_visible) {
+ return true;
+ }
+
+ let ret = (
+ (
+ NETDATA.options.page_is_visible ||
+ NETDATA.options.current.stop_updates_when_focus_is_lost === false ||
+ this.updates_since_last_unhide === 0
+ )
+ && isHidden() === false && this.isVisible(uncached_visibility)
+ );
+
+ if (this.debug) {
+ this.log('canBeRendered(): ' + ret);
+ }
+
+ return ret;
+ };
+
+ // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers
+ const callChartLibraryUpdateSafely = (data) => {
+ let status;
+
+ // we should not do this here
+ // if we prevent rendering the chart then:
+ // 1. globalSelectionSync will be wrong
+ // 2. globalPanAndZoom will be wrong
+ //if (canBeRendered(true) === false)
+ // return false;
+
+ if (NETDATA.options.fake_chart_rendering) {
+ return true;
+ }
+
+ this.updates_counter++;
+ this.updates_since_last_unhide++;
+ this.updates_since_last_creation++;
+
+ if (NETDATA.options.debug.chart_errors) {
+ status = this.library.update(that, data);
+ } else {
+ try {
+ status = this.library.update(that, data);
+ } catch (err) {
+ status = false;
+ }
+ }
+
+ if (!status) {
+ error('chart failed to be updated as ' + this.library_name);
+ return false;
+ }
+
+ return true;
+ };
+
+ // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers
+ const callChartLibraryCreateSafely = (data) => {
+ let status;
+
+ // we should not do this here
+ // if we prevent rendering the chart then:
+ // 1. globalSelectionSync will be wrong
+ // 2. globalPanAndZoom will be wrong
+ //if (canBeRendered(true) === false)
+ // return false;
+
+ if (NETDATA.options.fake_chart_rendering) {
+ return true;
+ }
+
+ this.updates_counter++;
+ this.updates_since_last_unhide++;
+ this.updates_since_last_creation++;
+
+ if (NETDATA.options.debug.chart_errors) {
+ status = this.library.create(that, data);
+ } else {
+ try {
+ status = this.library.create(that, data);
+ } catch (err) {
+ status = false;
+ }
+ }
+
+ if (!status) {
+ error('chart failed to be created as ' + this.library_name);
+ return false;
+ }
+
+ this.chart_created = true;
+ this.updates_since_last_creation = 0;
+ return true;
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // Chart Resize
+
+ // resizeChart() - private
+ // to be called just before the chart library to make sure that
+ // a properly sized dom is available
+ const resizeChart = () => {
+ if (this.tm.last_resized < NETDATA.options.last_page_resize) {
+ if (!this.chart_created) {
+ return;
+ }
+
+ if (this.needsRecreation()) {
+ if (this.debug) {
+ this.log('resizeChart(): initializing chart');
+ }
+
+ init('force');
+ } else if (typeof this.library.resize === 'function') {
+ if (this.debug) {
+ this.log('resizeChart(): resizing chart');
+ }
+
+ this.library.resize(that);
+
+ if (this.element_legend_childs.perfect_scroller !== null) {
+ Ps.update(this.element_legend_childs.perfect_scroller);
+ }
+
+ maxMessageFontSize();
+ }
+
+ this.tm.last_resized = Date.now();
+ }
+ };
+
+ // this is the actual chart resize algorithm
+ // it will:
+ // - resize the entire container
+ // - update the internal states
+ // - resize the chart as the div changes height
+ // - update the scrollbar of the legend
+ const resizeChartToHeight = (h) => {
+ // console.log(h);
+ this.element.style.height = h;
+
+ if (this.settings_id !== null) {
+ NETDATA.localStorageSet('chart_heights.' + this.settings_id, h);
+ }
+
+ let now = Date.now();
+ NETDATA.options.last_page_scroll = now;
+ NETDATA.options.auto_refresher_stop_until = now + NETDATA.options.current.stop_updates_while_resizing;
+
+ // force a resize
+ this.tm.last_resized = 0;
+ resizeChart();
+ };
+
+ this.resizeForPrint = function () {
+ if (typeof this.element_legend_childs !== 'undefined' && this.element_legend_childs.perfect_scroller !== null) {
+ let current = this.element.clientHeight;
+ let optimal = current
+ + this.element_legend_childs.perfect_scroller.scrollHeight
+ - this.element_legend_childs.perfect_scroller.clientHeight;
+
+ if (optimal > current) {
+ // this.log('resized');
+ this.element.style.height = optimal + 'px';
+ this.library.resize(this);
+ }
+ }
+ };
+
+ this.resizeHandler = function (e) {
+ e.preventDefault();
+
+ if (typeof this.event_resize === 'undefined'
+ || this.event_resize.chart_original_w === 'undefined'
+ || this.event_resize.chart_original_h === 'undefined') {
+ this.event_resize = {
+ chart_original_w: this.element.clientWidth,
+ chart_original_h: this.element.clientHeight,
+ last: 0
+ };
+ }
+
+ if (e.type === 'touchstart') {
+ this.event_resize.mouse_start_x = e.touches.item(0).pageX;
+ this.event_resize.mouse_start_y = e.touches.item(0).pageY;
+ } else {
+ this.event_resize.mouse_start_x = e.clientX;
+ this.event_resize.mouse_start_y = e.clientY;
+ }
+
+ this.event_resize.chart_start_w = this.element.clientWidth;
+ this.event_resize.chart_start_h = this.element.clientHeight;
+ this.event_resize.chart_last_w = this.element.clientWidth;
+ this.event_resize.chart_last_h = this.element.clientHeight;
+
+ let now = Date.now();
+ if (now - this.event_resize.last <= NETDATA.options.current.double_click_speed && this.element_legend_childs.perfect_scroller !== null) {
+ // double click / double tap event
+
+ // console.dir(this.element_legend_childs.content);
+ // console.dir(this.element_legend_childs.perfect_scroller);
+
+ // the optimal height of the chart
+ // showing the entire legend
+ let optimal = this.event_resize.chart_last_h
+ + this.element_legend_childs.perfect_scroller.scrollHeight
+ - this.element_legend_childs.perfect_scroller.clientHeight;
+
+ // if we are not optimal, be optimal
+ if (this.event_resize.chart_last_h !== optimal) {
+ // this.log('resize to optimal, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(optimal.toString() + 'px');
+ }
+
+ // else if the current height is not the original/saved height
+ // reset to the original/saved height
+ else if (this.event_resize.chart_last_h !== this.event_resize.chart_original_h) {
+ // this.log('resize to original, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(this.event_resize.chart_original_h.toString() + 'px');
+ }
+
+ // else if the current height is not the internal default height
+ // reset to the internal default height
+ else if ((this.event_resize.chart_last_h.toString() + 'px') !== this.height_original) {
+ // this.log('resize to internal default, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(this.height_original.toString());
+ }
+
+ // else if the current height is not the firstchild's clientheight
+ // resize to it
+ else if (typeof this.element_legend_childs.perfect_scroller.firstChild !== 'undefined') {
+ let parent_rect = this.element.getBoundingClientRect();
+ let content_rect = this.element_legend_childs.perfect_scroller.firstElementChild.getBoundingClientRect();
+ let wanted = content_rect.top - parent_rect.top + this.element_legend_childs.perfect_scroller.firstChild.clientHeight + 18; // 15 = toolbox + 3 space
+
+ // console.log(parent_rect);
+ // console.log(content_rect);
+ // console.log(wanted);
+
+ // this.log('resize to firstChild, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString() + 'px, firstChild = ' + wanted.toString() + 'px' );
+ if (this.event_resize.chart_last_h !== wanted) {
+ resizeChartToHeight(wanted.toString() + 'px');
+ }
+ }
+ } else {
+ this.event_resize.last = now;
+
+ // process movement event
+ document.onmousemove =
+ document.ontouchmove =
+ this.element_legend_childs.resize_handler.onmousemove =
+ this.element_legend_childs.resize_handler.ontouchmove =
+ function (e) {
+ let y = null;
+
+ switch (e.type) {
+ case 'mousemove':
+ y = e.clientY;
+ break;
+ case 'touchmove':
+ y = e.touches.item(e.touches - 1).pageY;
+ break;
+ }
+
+ if (y !== null) {
+ let newH = that.event_resize.chart_start_h + y - that.event_resize.mouse_start_y;
+
+ if (newH >= 70 && newH !== that.event_resize.chart_last_h) {
+ resizeChartToHeight(newH.toString() + 'px');
+ that.event_resize.chart_last_h = newH;
+ }
+ }
+ };
+
+ // process end event
+ document.onmouseup =
+ document.ontouchend =
+ this.element_legend_childs.resize_handler.onmouseup =
+ this.element_legend_childs.resize_handler.ontouchend =
+ function (e) {
+ void(e);
+
+ // remove all the hooks
+ document.onmouseup =
+ document.onmousemove =
+ document.ontouchmove =
+ document.ontouchend =
+ that.element_legend_childs.resize_handler.onmousemove =
+ that.element_legend_childs.resize_handler.ontouchmove =
+ that.element_legend_childs.resize_handler.onmouseout =
+ that.element_legend_childs.resize_handler.onmouseup =
+ that.element_legend_childs.resize_handler.ontouchend =
+ null;
+
+ // allow auto-refreshes
+ NETDATA.options.auto_refresher_stop_until = 0;
+ };
+ }
+ };
+
+ const noDataToShow = () => {
+ showMessageIcon(NETDATA.icons.noData + ' empty');
+ this.legendUpdateDOM();
+ this.tm.last_autorefreshed = Date.now();
+ // this.data_update_every = 30 * 1000;
+ //this.element_chart.style.display = 'none';
+ //if (this.element_legend !== null) this.element_legend.style.display = 'none';
+ //this.tmp.___chartIsHidden___ = true;
+ };
+
+ // ============================================================================================================
+ // PUBLIC FUNCTIONS
+
+ this.error = function (msg) {
+ error(msg);
+ };
+
+ this.setMode = function (m) {
+ if (this.current !== null && this.current.name === m) {
+ return;
+ }
+
+ if (m === 'auto') {
+ this.current = this.auto;
+ } else if (m === 'pan') {
+ this.current = this.pan;
+ } else if (m === 'zoom') {
+ this.current = this.zoom;
+ } else {
+ this.current = this.auto;
+ }
+
+ this.current.force_update_at = 0;
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+
+ this.tm.last_mode_switch = Date.now();
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // global selection sync for slaves
+
+ // can the chart participate to the global selection sync as a slave?
+ this.globalSelectionSyncIsEligible = function () {
+ return (
+ this.enabled &&
+ this.library !== null &&
+ typeof this.library.setSelection === 'function' &&
+ this.isVisible() &&
+ this.chart_created
+ );
+ };
+
+ this.setSelection = function (t) {
+ if (typeof this.library.setSelection === 'function') {
+ // this.selected = this.library.setSelection(this, t) === true;
+ this.selected = this.library.setSelection(this, t);
+ } else {
+ this.selected = true;
+ }
+
+ if (this.selected && this.debug) {
+ this.log('selection set to ' + t.toString());
+ }
+
+ if (this.foreignElementSelection !== null) {
+ this.foreignElementSelection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t);
+ }
+
+ return this.selected;
+ };
+
+ this.clearSelection = function () {
+ if (this.selected) {
+ if (typeof this.library.clearSelection === 'function') {
+ this.selected = (this.library.clearSelection(this) !== true);
+ } else {
+ this.selected = false;
+ }
+
+ if (this.selected === false && this.debug) {
+ this.log('selection cleared');
+ }
+
+ if (this.foreignElementSelection !== null) {
+ this.foreignElementSelection.innerText = '';
+ }
+
+ this.legendReset();
+ }
+
+ return this.selected;
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+
+ // find if a timestamp (ms) is shown in the current chart
+ this.timeIsVisible = function (t) {
+ return (t >= this.data_after && t <= this.data_before);
+ };
+
+ this.calculateRowForTime = function (t) {
+ if (!this.timeIsVisible(t)) {
+ return -1;
+ }
+ return Math.floor((t - this.data_after) / this.data_update_every);
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+
+ this.pauseChart = function () {
+ if (!this.paused) {
+ if (this.debug) {
+ this.log('pauseChart()');
+ }
+
+ this.paused = true;
+ }
+ };
+
+ this.unpauseChart = function () {
+ if (this.paused) {
+ if (this.debug) {
+ this.log('unpauseChart()');
+ }
+
+ this.paused = false;
+ }
+ };
+
+ this.resetChart = function (dontClearMaster, dontUpdate) {
+ if (this.debug) {
+ this.log('resetChart(' + dontClearMaster + ', ' + dontUpdate + ') called');
+ }
+
+ if (typeof dontClearMaster === 'undefined') {
+ dontClearMaster = false;
+ }
+
+ if (typeof dontUpdate === 'undefined') {
+ dontUpdate = false;
+ }
+
+ if (dontClearMaster !== true && NETDATA.globalPanAndZoom.isMaster(this)) {
+ if (this.debug) {
+ this.log('resetChart() diverting to clearMaster().');
+ }
+ // this will call us back with master === true
+ NETDATA.globalPanAndZoom.clearMaster();
+ return;
+ }
+
+ this.clearSelection();
+
+ this.tm.pan_and_zoom_seq = 0;
+
+ this.setMode('auto');
+ this.current.force_update_at = 0;
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+ this.tm.last_autorefreshed = 0;
+ this.paused = false;
+ this.selected = false;
+ this.enabled = true;
+ // this.debug = false;
+
+ // do not update the chart here
+ // or the chart will flip-flop when it is the master
+ // of a selection sync and another chart becomes
+ // the new master
+
+ if (dontUpdate !== true && this.isVisible()) {
+ this.updateChart();
+ }
+ };
+
+ this.updateChartPanOrZoom = function (after, before, callback) {
+ let logme = 'updateChartPanOrZoom(' + after + ', ' + before + '): ';
+ let ret = true;
+
+ NETDATA.globalPanAndZoom.delay();
+ NETDATA.globalSelectionSync.delay();
+
+ if (this.debug) {
+ this.log(logme);
+ }
+
+ if (before < after) {
+ if (this.debug) {
+ this.log(logme + 'flipped parameters, rejecting it.');
+ }
+ return false;
+ }
+
+ if (typeof this.fixed_min_duration === 'undefined') {
+ this.fixed_min_duration = Math.round((this.chartWidth() / 30) * this.chart.update_every * 1000);
+ }
+
+ let min_duration = this.fixed_min_duration;
+ let current_duration = Math.round(this.view_before - this.view_after);
+
+ // round the numbers
+ after = Math.round(after);
+ before = Math.round(before);
+
+ // align them to update_every
+ // stretching them further away
+ after -= after % this.data_update_every;
+ before += this.data_update_every - (before % this.data_update_every);
+
+ // the final wanted duration
+ let wanted_duration = before - after;
+
+ // to allow panning, accept just a point below our minimum
+ if ((current_duration - this.data_update_every) < min_duration) {
+ min_duration = current_duration - this.data_update_every;
+ }
+
+ // we do it, but we adjust to minimum size and return false
+ // when the wanted size is below the current and the minimum
+ // and we zoom
+ if (wanted_duration < current_duration && wanted_duration < min_duration) {
+ if (this.debug) {
+ this.log(logme + 'too small: min_duration: ' + (min_duration / 1000).toString() + ', wanted: ' + (wanted_duration / 1000).toString());
+ }
+
+ min_duration = this.fixed_min_duration;
+
+ let dt = (min_duration - wanted_duration) / 2;
+ before += dt;
+ after -= dt;
+ wanted_duration = before - after;
+ ret = false;
+ }
+
+ let tolerance = this.data_update_every * 2;
+ let movement = Math.abs(before - this.view_before);
+
+ if (Math.abs(current_duration - wanted_duration) <= tolerance && movement <= tolerance && ret) {
+ if (this.debug) {
+ this.log(logme + 'REJECTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + false);
+ }
+ return false;
+ }
+
+ if (this.current.name === 'auto') {
+ this.log(logme + 'caller called me with mode: ' + this.current.name);
+ this.setMode('pan');
+ }
+
+ if (this.debug) {
+ this.log(logme + 'ACCEPTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + ret);
+ }
+
+ this.current.force_update_at = Date.now() + NETDATA.options.current.pan_and_zoom_delay;
+ this.current.force_after_ms = after;
+ this.current.force_before_ms = before;
+ NETDATA.globalPanAndZoom.setMaster(this, after, before);
+
+ if (ret && typeof callback === 'function') {
+ callback();
+ }
+
+ return ret;
+ };
+
+ this.updateChartPanOrZoomAsyncTimeOutId = undefined;
+ this.updateChartPanOrZoomAsync = function (after, before, callback) {
+ NETDATA.globalPanAndZoom.delay();
+ NETDATA.globalSelectionSync.delay();
+
+ if (!NETDATA.globalPanAndZoom.isMaster(this)) {
+ this.pauseChart();
+ NETDATA.globalPanAndZoom.setMaster(this, after, before);
+ // NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.setMaster(this);
+ }
+
+ if (this.updateChartPanOrZoomAsyncTimeOutId) {
+ NETDATA.timeout.clear(this.updateChartPanOrZoomAsyncTimeOutId);
+ }
+
+ NETDATA.timeout.set(function () {
+ that.updateChartPanOrZoomAsyncTimeOutId = undefined;
+ that.updateChartPanOrZoom(after, before, callback);
+ }, 0);
+ };
+
+ let _unitsConversionLastUnits = undefined;
+ let _unitsConversionLastUnitsDesired = undefined;
+ let _unitsConversionLastMin = undefined;
+ let _unitsConversionLastMax = undefined;
+ let _unitsConversion = function (value) {
+ return value;
+ };
+ this.unitsConversionSetup = function (min, max) {
+ if (this.units !== _unitsConversionLastUnits
+ || this.units_desired !== _unitsConversionLastUnitsDesired
+ || min !== _unitsConversionLastMin
+ || max !== _unitsConversionLastMax) {
+
+ _unitsConversionLastUnits = this.units;
+ _unitsConversionLastUnitsDesired = this.units_desired;
+ _unitsConversionLastMin = min;
+ _unitsConversionLastMax = max;
+
+ _unitsConversion = NETDATA.unitsConversion.get(this.uuid, min, max, this.units, this.units_desired, this.units_common, function (units) {
+ // console.log('switching units from ' + that.units.toString() + ' to ' + units.toString());
+ that.units_current = units;
+ that.legendSetUnitsString(that.units_current);
+ });
+ }
+ };
+
+ let _legendFormatValueChartDecimalsLastMin = undefined;
+ let _legendFormatValueChartDecimalsLastMax = undefined;
+ let _legendFormatValueChartDecimals = -1;
+ let _intlNumberFormat = null;
+ this.legendFormatValueDecimalsFromMinMax = function (min, max) {
+ if (min === _legendFormatValueChartDecimalsLastMin && max === _legendFormatValueChartDecimalsLastMax) {
+ return;
+ }
+
+ this.unitsConversionSetup(min, max);
+ if (_unitsConversion !== null) {
+ min = _unitsConversion(min);
+ max = _unitsConversion(max);
+
+ if (typeof min !== 'number' || typeof max !== 'number') {
+ return;
+ }
+ }
+
+ _legendFormatValueChartDecimalsLastMin = min;
+ _legendFormatValueChartDecimalsLastMax = max;
+
+ let old = _legendFormatValueChartDecimals;
+
+ if (this.data !== null && this.data.min === this.data.max)
+ // it is a fixed number, let the visualizer decide based on the value
+ {
+ _legendFormatValueChartDecimals = -1;
+ } else if (this.value_decimal_detail !== -1)
+ // there is an override
+ {
+ _legendFormatValueChartDecimals = this.value_decimal_detail;
+ } else {
+ // ok, let's calculate the proper number of decimal points
+ let delta;
+
+ if (min === max) {
+ delta = Math.abs(min);
+ } else {
+ delta = Math.abs(max - min);
+ }
+
+ if (delta > 1000) {
+ _legendFormatValueChartDecimals = 0;
+ } else if (delta > 10) {
+ _legendFormatValueChartDecimals = 1;
+ } else if (delta > 1) {
+ _legendFormatValueChartDecimals = 2;
+ } else if (delta > 0.1) {
+ _legendFormatValueChartDecimals = 2;
+ } else if (delta > 0.01) {
+ _legendFormatValueChartDecimals = 4;
+ } else if (delta > 0.001) {
+ _legendFormatValueChartDecimals = 5;
+ } else if (delta > 0.0001) {
+ _legendFormatValueChartDecimals = 6;
+ } else {
+ _legendFormatValueChartDecimals = 7;
+ }
+ }
+
+ if (_legendFormatValueChartDecimals !== old) {
+ if (_legendFormatValueChartDecimals < 0) {
+ _intlNumberFormat = null;
+ } else {
+ _intlNumberFormat = NETDATA.fastNumberFormat.get(
+ _legendFormatValueChartDecimals,
+ _legendFormatValueChartDecimals
+ );
+ }
+ }
+ };
+
+ this.legendFormatValue = function (value) {
+ if (typeof value !== 'number') {
+ return '-';
+ }
+
+ value = _unitsConversion(value);
+
+ if (typeof value !== 'number') {
+ return value;
+ }
+
+ if (_intlNumberFormat !== null) {
+ return _intlNumberFormat.format(value);
+ }
+
+ let dmin, dmax;
+ if (this.value_decimal_detail !== -1) {
+ dmin = dmax = this.value_decimal_detail;
+ } else {
+ dmin = 0;
+ let abs = (value < 0) ? -value : value;
+ if (abs > 1000) {
+ dmax = 0;
+ } else if (abs > 10) {
+ dmax = 1;
+ } else if (abs > 1) {
+ dmax = 2;
+ } else if (abs > 0.1) {
+ dmax = 2;
+ } else if (abs > 0.01) {
+ dmax = 4;
+ } else if (abs > 0.001) {
+ dmax = 5;
+ } else if (abs > 0.0001) {
+ dmax = 6;
+ } else {
+ dmax = 7;
+ }
+ }
+
+ return NETDATA.fastNumberFormat.get(dmin, dmax).format(value);
+ };
+
+ this.legendSetLabelValue = function (label, value) {
+ let series = this.element_legend_childs.series[label];
+ if (typeof series === 'undefined') {
+ return;
+ }
+ if (series.value === null && series.user === null) {
+ return;
+ }
+
+ /*
+ // this slows down firefox and edge significantly
+ // since it requires to use innerHTML(), instead of innerText()
+
+ // if the value has not changed, skip DOM update
+ //if (series.last === value) return;
+
+ let s, r;
+ if (typeof value === 'number') {
+ let v = Math.abs(value);
+ s = r = this.legendFormatValue(value);
+
+ if (typeof series.last === 'number') {
+ if (v > series.last) s += '<i class="fas fa-angle-up" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ else if (v < series.last) s += '<i class="fas fa-angle-down" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ else s += '<i class="fas fa-angle-left" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ }
+ else s += '<i class="fas fa-angle-right" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+
+ series.last = v;
+ }
+ else {
+ if (value === null)
+ s = r = '';
+ else
+ s = r = value;
+
+ series.last = value;
+ }
+ */
+
+ let s = this.legendFormatValue(value);
+
+ // caching: do not update the update to show the same value again
+ if (s === series.last_shown_value) {
+ return;
+ }
+ series.last_shown_value = s;
+
+ if (series.value !== null) {
+ series.value.innerText = s;
+ }
+ if (series.user !== null) {
+ series.user.innerText = s;
+ }
+ };
+
+ this.legendSetDateString = function (date) {
+ if (this.element_legend_childs.title_date !== null && date !== this.tmp.__last_shown_legend_date) {
+ this.element_legend_childs.title_date.innerText = date;
+ this.tmp.__last_shown_legend_date = date;
+ }
+ };
+
+ this.legendSetTimeString = function (time) {
+ if (this.element_legend_childs.title_time !== null && time !== this.tmp.__last_shown_legend_time) {
+ this.element_legend_childs.title_time.innerText = time;
+ this.tmp.__last_shown_legend_time = time;
+ }
+ };
+
+ this.legendSetUnitsString = function (units) {
+ if (this.element_legend_childs.title_units !== null && units !== this.tmp.__last_shown_legend_units) {
+ this.element_legend_childs.title_units.innerText = units;
+ this.tmp.__last_shown_legend_units = units;
+ }
+ };
+
+ this.legendSetDateLast = {
+ ms: 0,
+ date: undefined,
+ time: undefined
+ };
+
+ this.legendSetDate = function (ms) {
+ if (typeof ms !== 'number') {
+ this.legendShowUndefined();
+ return;
+ }
+
+ if (this.legendSetDateLast.ms !== ms) {
+ let d = new Date(ms);
+ this.legendSetDateLast.ms = ms;
+ this.legendSetDateLast.date = NETDATA.dateTime.localeDateString(d);
+ this.legendSetDateLast.time = NETDATA.dateTime.localeTimeString(d);
+ }
+
+ this.legendSetDateString(this.legendSetDateLast.date);
+ this.legendSetTimeString(this.legendSetDateLast.time);
+ this.legendSetUnitsString(this.units_current)
+ };
+
+ this.legendShowUndefined = function () {
+ this.legendSetDateString(this.legendPluginModuleString(false));
+ this.legendSetTimeString(this.chart.context.toString());
+ // this.legendSetUnitsString(' ');
+
+ if (this.data && this.element_legend_childs.series !== null) {
+ let labels = this.data.dimension_names;
+ let i = labels.length;
+ while (i--) {
+ let label = labels[i];
+
+ if (typeof label === 'undefined' || typeof this.element_legend_childs.series[label] === 'undefined') {
+ continue;
+ }
+ this.legendSetLabelValue(label, null);
+ }
+ }
+ };
+
+ this.legendShowLatestValues = function () {
+ if (this.chart === null) {
+ return;
+ }
+ if (this.selected) {
+ return;
+ }
+
+ if (this.data === null || this.element_legend_childs.series === null) {
+ this.legendShowUndefined();
+ return;
+ }
+
+ let show_undefined = true;
+ if (Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) {
+ show_undefined = false;
+ }
+
+ if (show_undefined) {
+ this.legendShowUndefined();
+ return;
+ }
+
+ this.legendSetDate(this.view_before);
+
+ let labels = this.data.dimension_names;
+ let i = labels.length;
+ while (i--) {
+ let label = labels[i];
+
+ if (typeof label === 'undefined') {
+ continue;
+ }
+ if (typeof this.element_legend_childs.series[label] === 'undefined') {
+ continue;
+ }
+
+ this.legendSetLabelValue(label, this.data.view_latest_values[i]);
+ }
+ };
+
+ this.legendReset = function () {
+ this.legendShowLatestValues();
+ };
+
+ // this should be called just ONCE per dimension per chart
+ this.__chartDimensionColor = function (label) {
+ let c = NETDATA.commonColors.get(this, label);
+
+ // it is important to maintain a list of colors
+ // for this chart only, since the chart library
+ // uses this to assign colors to dimensions in the same
+ // order the dimension are given to it
+ this.colors.push(c);
+
+ return c;
+ };
+
+ this.chartPrepareColorPalette = function () {
+ NETDATA.commonColors.refill(this);
+ };
+
+ // get the ordered list of chart colors
+ // this includes user defined colors
+ this.chartCustomColors = function () {
+ this.chartPrepareColorPalette();
+
+ let colors;
+ if (this.colors_custom.length) {
+ colors = this.colors_custom;
+ } else {
+ colors = this.colors;
+ }
+
+ if (this.debug) {
+ this.log("chartCustomColors() returns:");
+ this.log(colors);
+ }
+
+ return colors;
+ };
+
+ // get the ordered list of chart ASSIGNED colors
+ // (this returns only the colors that have been
+ // assigned to dimensions, prepended with any
+ // custom colors defined)
+ this.chartColors = function () {
+ this.chartPrepareColorPalette();
+
+ if (this.debug) {
+ this.log("chartColors() returns:");
+ this.log(this.colors);
+ }
+
+ return this.colors;
+ };
+
+ this.legendPluginModuleString = function (withContext) {
+ let str = ' ';
+ let context = '';
+
+ if (typeof this.chart !== 'undefined') {
+ if (withContext && typeof this.chart.context === 'string') {
+ context = this.chart.context;
+ }
+
+ if (typeof this.chart.plugin === 'string' && this.chart.plugin !== '') {
+ str = this.chart.plugin;
+
+ if (str.endsWith(".plugin")) {
+ str = str.substring(0, str.length - 7);
+ }
+
+ if (typeof this.chart.module === 'string' && this.chart.module !== '') {
+ str += ':' + this.chart.module;
+ }
+
+ if (withContext && context !== '') {
+ str += ', ' + context;
+ }
+ }
+ else if (withContext && context !== '') {
+ str = context;
+ }
+ }
+
+ return str;
+ };
+
+ this.legendResolutionTooltip = function () {
+ if (!this.chart) {
+ return '';
+ }
+
+ let collected = this.chart.update_every;
+ let viewed = (this.data) ? this.data.view_update_every : collected;
+
+ if (collected === viewed) {
+ return "resolution " + NETDATA.seconds4human(collected);
+ }
+
+ return "resolution " + NETDATA.seconds4human(viewed) + ", collected every " + NETDATA.seconds4human(collected);
+ };
+
+ this.legendUpdateDOM = function () {
+ let needed = false, dim, keys, len;
+
+ // check that the legend DOM is up to date for the downloaded dimensions
+ if (typeof this.element_legend_childs.series !== 'object' || this.element_legend_childs.series === null) {
+ // this.log('the legend does not have any series - requesting legend update');
+ needed = true;
+ } else if (this.data === null) {
+ // this.log('the chart does not have any data - requesting legend update');
+ needed = true;
+ } else if (typeof this.element_legend_childs.series.labels_key === 'undefined') {
+ needed = true;
+ } else {
+ let labels = this.data.dimension_names.toString();
+ if (labels !== this.element_legend_childs.series.labels_key) {
+ needed = true;
+
+ if (this.debug) {
+ this.log('NEW LABELS: "' + labels + '" NOT EQUAL OLD LABELS: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+ }
+ }
+
+ if (!needed) {
+ // make sure colors available
+ this.chartPrepareColorPalette();
+
+ // do we have to update the current values?
+ // we do this, only when the visible chart is current
+ if (Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) {
+ if (this.debug) {
+ this.log('chart is in latest position... updating values on legend...');
+ }
+
+ //let labels = this.data.dimension_names;
+ //let i = labels.length;
+ //while (i--)
+ // this.legendSetLabelValue(labels[i], this.data.view_latest_values[i]);
+ }
+ return;
+ }
+
+ if (this.colors === null) {
+ // this is the first time we update the chart
+ // let's assign colors to all dimensions
+ if (this.library.track_colors()) {
+ this.colors = [];
+ keys = Object.keys(this.chart.dimensions);
+ len = keys.length;
+ for (let i = 0; i < len; i++) {
+ NETDATA.commonColors.get(this, this.chart.dimensions[keys[i]].name);
+ }
+ }
+ }
+
+ // we will re-generate the colors for the chart
+ // based on the dimensions this result has data for
+ this.colors = [];
+
+ if (this.debug) {
+ this.log('updating Legend DOM');
+ }
+
+ // mark all dimensions as invalid
+ this.dimensions_visibility.invalidateAll();
+
+ const genLabel = function (state, parent, dim, name, count) {
+ let color = state.__chartDimensionColor(name);
+
+ let user_element = null;
+ let user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + name.toLowerCase() + '-at', null);
+ if (user_id === null) {
+ user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + dim.toLowerCase() + '-at', null);
+ }
+ if (user_id !== null) {
+ user_element = document.getElementById(user_id) || null;
+ if (user_element === null) {
+ state.log('Cannot find element with id: ' + user_id);
+ }
+ }
+
+ state.element_legend_childs.series[name] = {
+ name: document.createElement('span'),
+ value: document.createElement('span'),
+ user: user_element,
+ last: null,
+ last_shown_value: null
+ };
+
+ let label = state.element_legend_childs.series[name];
+
+ // create the dimension visibility tracking for this label
+ state.dimensions_visibility.dimensionAdd(name, label.name, label.value, color);
+
+ let rgb = NETDATA.colorHex2Rgb(color);
+ label.name.innerHTML = '<table class="netdata-legend-name-table-'
+ + state.chart.chart_type
+ + '" style="background-color: '
+ + 'rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ',' + NETDATA.options.current['color_fill_opacity_' + state.chart.chart_type] + ') !important'
+ + '"><tr class="netdata-legend-name-tr"><td class="netdata-legend-name-td"></td></tr></table>';
+
+ let text = document.createTextNode(' ' + name);
+ label.name.appendChild(text);
+
+ if (count > 0) {
+ parent.appendChild(document.createElement('br'));
+ }
+
+ parent.appendChild(label.name);
+ parent.appendChild(label.value);
+ };
+
+ let content = document.createElement('div');
+
+ if (this.element_chart === null) {
+ this.element_chart = document.createElement('div');
+ this.element_chart.id = this.library_name + '-' + this.uuid + '-chart';
+ this.element.appendChild(this.element_chart);
+
+ if (this.hasLegend()) {
+ this.element_chart.className = 'netdata-chart-with-legend-right netdata-' + this.library_name + '-chart-with-legend-right';
+ } else {
+ this.element_chart.className = ' netdata-chart netdata-' + this.library_name + '-chart';
+ }
+ }
+
+ if (this.hasLegend()) {
+ if (this.element_legend === null) {
+ this.element_legend = document.createElement('div');
+ this.element_legend.className = 'netdata-chart-legend netdata-' + this.library_name + '-legend';
+ this.element.appendChild(this.element_legend);
+ } else {
+ this.element_legend.innerHTML = '';
+ }
+
+ this.element_legend_childs = {
+ content: content,
+ resize_handler: null,
+ toolbox: null,
+ toolbox_left: null,
+ toolbox_right: null,
+ toolbox_reset: null,
+ toolbox_zoomin: null,
+ toolbox_zoomout: null,
+ toolbox_volume: null,
+ title_date: document.createElement('span'),
+ title_time: document.createElement('span'),
+ title_units: document.createElement('span'),
+ perfect_scroller: document.createElement('div'),
+ series: {}
+ };
+
+ if (NETDATA.options.current.legend_toolbox && this.library.toolboxPanAndZoom !== null) {
+ this.element_legend_childs.toolbox = document.createElement('div');
+ this.element_legend_childs.toolbox_left = document.createElement('div');
+ this.element_legend_childs.toolbox_right = document.createElement('div');
+ this.element_legend_childs.toolbox_reset = document.createElement('div');
+ this.element_legend_childs.toolbox_zoomin = document.createElement('div');
+ this.element_legend_childs.toolbox_zoomout = document.createElement('div');
+ this.element_legend_childs.toolbox_volume = document.createElement('div');
+
+ const getPanAndZoomStep = function (event) {
+ if (event.ctrlKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_control;
+ } else if (event.shiftKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_shift;
+ } else if (event.altKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_alt;
+ } else {
+ return NETDATA.options.current.pan_and_zoom_factor;
+ }
+ };
+
+ this.element_legend_childs.toolbox.className += ' netdata-legend-toolbox';
+ this.element.appendChild(this.element_legend_childs.toolbox);
+
+ this.element_legend_childs.toolbox_left.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_left.innerHTML = NETDATA.icons.left;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_left);
+ this.element_legend_childs.toolbox_left.onclick = function (e) {
+ e.preventDefault();
+
+ let step = (that.view_before - that.view_after) * getPanAndZoomStep(e);
+ let before = that.view_before - step;
+ let after = that.view_after - step;
+ if (after >= that.netdata_first) {
+ that.library.toolboxPanAndZoom(that, after, before);
+ }
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_left).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Pan Left',
+ content: 'Pan the chart to the left. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_reset.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_reset.innerHTML = NETDATA.icons.reset;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_reset);
+ this.element_legend_childs.toolbox_reset.onclick = function (e) {
+ e.preventDefault();
+ NETDATA.resetAllCharts(that);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_reset).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Reset',
+ content: 'Reset all the charts to their default auto-refreshing state. You can also <b>double click</b> the chart contents with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_right.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_right.innerHTML = NETDATA.icons.right;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_right);
+ this.element_legend_childs.toolbox_right.onclick = function (e) {
+ e.preventDefault();
+ let step = (that.view_before - that.view_after) * getPanAndZoomStep(e);
+ let before = that.view_before + step;
+ let after = that.view_after + step;
+ if (before <= that.netdata_last) {
+ that.library.toolboxPanAndZoom(that, after, before);
+ }
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_right).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Pan Right',
+ content: 'Pan the chart to the right. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_zoomin.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_zoomin.innerHTML = NETDATA.icons.zoomIn;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomin);
+ this.element_legend_childs.toolbox_zoomin.onclick = function (e) {
+ e.preventDefault();
+ let dt = ((that.view_before - that.view_after) * (getPanAndZoomStep(e) * 0.8) / 2);
+ let before = that.view_before - dt;
+ let after = that.view_after + dt;
+ that.library.toolboxPanAndZoom(that, after, before);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_zoomin).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Zoom In',
+ content: 'Zoom in the chart. You can also press SHIFT and select an area of the chart, or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_zoomout.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_zoomout.innerHTML = NETDATA.icons.zoomOut;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomout);
+ this.element_legend_childs.toolbox_zoomout.onclick = function (e) {
+ e.preventDefault();
+ let dt = (((that.view_before - that.view_after) / (1.0 - (getPanAndZoomStep(e) * 0.8)) - (that.view_before - that.view_after)) / 2);
+ let before = that.view_before + dt;
+ let after = that.view_after - dt;
+
+ that.library.toolboxPanAndZoom(that, after, before);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_zoomout).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Zoom Out',
+ content: 'Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel, or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ //this.element_legend_childs.toolbox_volume.className += ' netdata-legend-toolbox-button';
+ //this.element_legend_childs.toolbox_volume.innerHTML = '<i class="fas fa-sort-amount-down"></i>';
+ //this.element_legend_childs.toolbox_volume.title = 'Visible Volume';
+ //this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_volume);
+ //this.element_legend_childs.toolbox_volume.onclick = function(e) {
+ //e.preventDefault();
+ //alert('clicked toolbox_volume on ' + that.id);
+ //}
+ }
+
+ if (NETDATA.options.current.resize_charts) {
+ this.element_legend_childs.resize_handler = document.createElement('div');
+
+ this.element_legend_childs.resize_handler.className += " netdata-legend-resize-handler";
+ this.element_legend_childs.resize_handler.innerHTML = NETDATA.icons.resize;
+ this.element.appendChild(this.element_legend_childs.resize_handler);
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.resize_handler).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Resize',
+ content: 'Drag this point with your mouse or your finger (on touch devices), to resize the chart vertically. You can also <b>double click it</b> or <b>double tap it</b> to reset between 2 states: the default and the one that fits all the values.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ // mousedown event
+ this.element_legend_childs.resize_handler.onmousedown =
+ function (e) {
+ that.resizeHandler(e);
+ };
+
+ // touchstart event
+ this.element_legend_childs.resize_handler.addEventListener('touchstart', function (e) {
+ that.resizeHandler(e);
+ }, false);
+ }
+
+ if (this.chart) {
+ this.element_legend_childs.title_date.title = this.legendPluginModuleString(true);
+ this.element_legend_childs.title_time.title = this.legendResolutionTooltip();
+ }
+
+ this.element_legend_childs.title_date.className += " netdata-legend-title-date";
+ this.element_legend.appendChild(this.element_legend_childs.title_date);
+ this.tmp.__last_shown_legend_date = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.title_time.className += " netdata-legend-title-time";
+ this.element_legend.appendChild(this.element_legend_childs.title_time);
+ this.tmp.__last_shown_legend_time = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.title_units.className += " netdata-legend-title-units";
+ this.element_legend_childs.title_units.innerText = this.units_current;
+ this.element_legend.appendChild(this.element_legend_childs.title_units);
+ this.tmp.__last_shown_legend_units = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.perfect_scroller.className = 'netdata-legend-series';
+ this.element_legend.appendChild(this.element_legend_childs.perfect_scroller);
+
+ content.className = 'netdata-legend-series-content';
+ this.element_legend_childs.perfect_scroller.appendChild(content);
+
+ this.element_legend_childs.content = content;
+
+ if (NETDATA.options.current.show_help) {
+ $(content).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ title: 'Chart Legend',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ content: 'You can click or tap on the values or the labels to select dimensions. By pressing SHIFT or CONTROL, you can enable or disable multiple dimensions.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+ } else {
+ this.element_legend_childs = {
+ content: content,
+ resize_handler: null,
+ toolbox: null,
+ toolbox_left: null,
+ toolbox_right: null,
+ toolbox_reset: null,
+ toolbox_zoomin: null,
+ toolbox_zoomout: null,
+ toolbox_volume: null,
+ title_date: null,
+ title_time: null,
+ title_units: null,
+ perfect_scroller: null,
+ series: {}
+ };
+ }
+
+ if (this.data) {
+ this.element_legend_childs.series.labels_key = this.data.dimension_names.toString();
+ if (this.debug) {
+ this.log('labels from data: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+
+ for (let i = 0, len = this.data.dimension_names.length; i < len; i++) {
+ genLabel(this, content, this.data.dimension_ids[i], this.data.dimension_names[i], i);
+ }
+ } else {
+ let tmp = [];
+ keys = Object.keys(this.chart.dimensions);
+ for (let i = 0, len = keys.length; i < len; i++) {
+ dim = keys[i];
+ tmp.push(this.chart.dimensions[dim].name);
+ genLabel(this, content, dim, this.chart.dimensions[dim].name, i);
+ }
+ this.element_legend_childs.series.labels_key = tmp.toString();
+ if (this.debug) {
+ this.log('labels from chart: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+ }
+
+ // create a hidden div to be used for hidding
+ // the original legend of the chart library
+ let el = document.createElement('div');
+ if (this.element_legend !== null) {
+ this.element_legend.appendChild(el);
+ }
+ el.style.display = 'none';
+
+ this.element_legend_childs.hidden = document.createElement('div');
+ el.appendChild(this.element_legend_childs.hidden);
+
+ if (this.element_legend_childs.perfect_scroller !== null) {
+ Ps.initialize(this.element_legend_childs.perfect_scroller, {
+ wheelSpeed: 0.2,
+ wheelPropagation: true,
+ swipePropagation: true,
+ minScrollbarLength: null,
+ maxScrollbarLength: null,
+ useBothWheelAxes: false,
+ suppressScrollX: true,
+ suppressScrollY: false,
+ scrollXMarginOffset: 0,
+ scrollYMarginOffset: 0,
+ theme: 'default'
+ });
+ Ps.update(this.element_legend_childs.perfect_scroller);
+ }
+
+ this.legendShowLatestValues();
+ };
+
+ this.hasLegend = function () {
+ if (typeof this.tmp.___hasLegendCache___ !== 'undefined') {
+ return this.tmp.___hasLegendCache___;
+ }
+
+ let leg = false;
+ if (this.library && this.library.legend(this) === 'right-side') {
+ leg = true;
+ }
+
+ this.tmp.___hasLegendCache___ = leg;
+ return leg;
+ };
+
+ this.legendWidth = function () {
+ return (this.hasLegend()) ? 140 : 0;
+ };
+
+ this.legendHeight = function () {
+ return $(this.element).height();
+ };
+
+ this.chartWidth = function () {
+ return $(this.element).width() - this.legendWidth();
+ };
+
+ this.chartHeight = function () {
+ return $(this.element).height();
+ };
+
+ this.chartPixelsPerPoint = function () {
+ // force an options provided detail
+ let px = this.pixels_per_point;
+
+ if (this.library && px < this.library.pixels_per_point(this)) {
+ px = this.library.pixels_per_point(this);
+ }
+
+ if (px < NETDATA.options.current.pixels_per_point) {
+ px = NETDATA.options.current.pixels_per_point;
+ }
+
+ return px;
+ };
+
+ this.needsRecreation = function () {
+ let ret = (
+ this.chart_created &&
+ this.library &&
+ this.library.autoresize() === false &&
+ this.tm.last_resized < NETDATA.options.last_page_resize
+ );
+
+ if (this.debug) {
+ this.log('needsRecreation(): ' + ret.toString() + ', chart_created = ' + this.chart_created.toString());
+ }
+
+ return ret;
+ };
+
+ this.chartDataUniqueID = function () {
+ return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(true);
+ };
+
+ this.chartURLOptions = function (isForUniqueId) {
+ let ret = '';
+
+ if (this.override_options !== null) {
+ ret = this.override_options.toString();
+ } else {
+ ret = this.library.options(this);
+ }
+
+ if (this.append_options !== null) {
+ ret += '%7C' + this.append_options.toString();
+ }
+
+ ret += '%7C' + 'jsonwrap';
+
+ // always add `nonzero` when it's used to create a chartDataUniqueID
+ // we cannot just remove `nonzero` because of backwards compatibility with old snapshots
+ if (isForUniqueId || NETDATA.options.current.eliminate_zero_dimensions) {
+ ret += '%7C' + 'nonzero';
+ }
+
+ return ret;
+ };
+
+ this.chartURL = function () {
+ let after, before, points_multiplier = 1;
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ if (this.current.force_before_ms !== null && this.current.force_after_ms !== null) {
+ this.tm.pan_and_zoom_seq = 0;
+
+ before = Math.round(this.current.force_before_ms / 1000);
+ after = Math.round(this.current.force_after_ms / 1000);
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ if (NETDATA.options.current.pan_and_zoom_data_padding) {
+ this.requested_padding = Math.round((before - after) / 2);
+ after -= this.requested_padding;
+ before += this.requested_padding;
+ this.requested_padding *= 1000;
+ points_multiplier = 2;
+ }
+
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+ } else {
+ this.tm.pan_and_zoom_seq = NETDATA.globalPanAndZoom.seq;
+
+ after = Math.round(NETDATA.globalPanAndZoom.force_after_ms / 1000);
+ before = Math.round(NETDATA.globalPanAndZoom.force_before_ms / 1000);
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ this.requested_padding = null;
+ points_multiplier = 1;
+ }
+ } else {
+ this.tm.pan_and_zoom_seq = 0;
+
+ before = this.before;
+ after = this.after;
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ this.requested_padding = null;
+ points_multiplier = 1;
+ }
+
+ this.requested_after = after * 1000;
+ this.requested_before = before * 1000;
+
+ let data_points;
+ if (NETDATA.options.force_data_points !== 0) {
+ data_points = NETDATA.options.force_data_points;
+ this.data_points = data_points;
+ } else {
+ this.data_points = this.points || Math.round(this.chartWidth() / this.chartPixelsPerPoint());
+ data_points = this.data_points * points_multiplier;
+ }
+
+ // build the data URL
+ this.data_url = this.host + this.chart.data_url;
+ this.data_url += "&format=" + this.library.format();
+ this.data_url += "&points=" + (data_points).toString();
+ this.data_url += "&group=" + this.method;
+ this.data_url += "&gtime=" + this.gtime;
+ this.data_url += "&options=" + this.chartURLOptions();
+
+ if (after) {
+ this.data_url += "&after=" + after.toString();
+ }
+
+ if (before) {
+ this.data_url += "&before=" + before.toString();
+ }
+
+ if (this.dimensions) {
+ this.data_url += "&dimensions=" + this.dimensions;
+ }
+
+ if (NETDATA.options.debug.chart_data_url || this.debug) {
+ this.log('chartURL(): ' + this.data_url + ' WxH:' + this.chartWidth() + 'x' + this.chartHeight() + ' points: ' + data_points.toString() + ' library: ' + this.library_name);
+ }
+ };
+
+ this.redrawChart = function () {
+ if (this.data !== null) {
+ this.updateChartWithData(this.data);
+ }
+ };
+
+ this.updateChartWithData = function (data) {
+ if (this.debug) {
+ this.log('updateChartWithData() called.');
+ }
+
+ // this may force the chart to be re-created
+ resizeChart();
+
+ this.data = data;
+
+ let started = Date.now();
+ let view_update_every = data.view_update_every * 1000;
+
+ if (this.data_update_every !== view_update_every) {
+ if (this.element_legend_childs.title_time) {
+ this.element_legend_childs.title_time.title = this.legendResolutionTooltip();
+ }
+ }
+
+ // if the result is JSON, find the latest update-every
+ this.data_update_every = view_update_every;
+ this.data_after = data.after * 1000;
+ this.data_before = data.before * 1000;
+ this.netdata_first = data.first_entry * 1000;
+ this.netdata_last = data.last_entry * 1000;
+ this.data_points = data.points;
+
+ data.state = this;
+
+ if (NETDATA.options.current.pan_and_zoom_data_padding && this.requested_padding !== null) {
+ if (this.view_after < this.data_after) {
+ // console.log('adjusting view_after from ' + this.view_after + ' to ' + this.data_after);
+ this.view_after = this.data_after;
+ }
+
+ if (this.view_before > this.data_before) {
+ // console.log('adjusting view_before from ' + this.view_before + ' to ' + this.data_before);
+ this.view_before = this.data_before;
+ }
+ } else {
+ this.view_after = this.data_after;
+ this.view_before = this.data_before;
+ }
+
+ if (this.debug) {
+ this.log('UPDATE No ' + this.updates_counter + ' COMPLETED');
+
+ if (this.current.force_after_ms) {
+ this.log('STATUS: forced : ' + (this.current.force_after_ms / 1000).toString() + ' - ' + (this.current.force_before_ms / 1000).toString());
+ } else {
+ this.log('STATUS: forced : unset');
+ }
+
+ this.log('STATUS: requested : ' + (this.requested_after / 1000).toString() + ' - ' + (this.requested_before / 1000).toString());
+ this.log('STATUS: downloaded: ' + (this.data_after / 1000).toString() + ' - ' + (this.data_before / 1000).toString());
+ this.log('STATUS: rendered : ' + (this.view_after / 1000).toString() + ' - ' + (this.view_before / 1000).toString());
+ this.log('STATUS: points : ' + (this.data_points).toString());
+ }
+
+ if (this.data_points === 0) {
+ noDataToShow();
+ return;
+ }
+
+ if (this.updates_since_last_creation >= this.library.max_updates_to_recreate()) {
+ if (this.debug) {
+ this.log('max updates of ' + this.updates_since_last_creation.toString() + ' reached. Forcing re-generation.');
+ }
+
+ init('force');
+ return;
+ }
+
+ // check and update the legend
+ this.legendUpdateDOM();
+
+ if (this.chart_created && typeof this.library.update === 'function') {
+ if (this.debug) {
+ this.log('updating chart...');
+ }
+
+ if (!callChartLibraryUpdateSafely(data)) {
+ return;
+ }
+ } else {
+ if (this.debug) {
+ this.log('creating chart...');
+ }
+
+ if (!callChartLibraryCreateSafely(data)) {
+ return;
+ }
+ }
+
+ if (this.isVisible()) {
+ hideMessage();
+ this.legendShowLatestValues();
+ } else {
+ this.__redraw_on_unhide = true;
+
+ if (this.debug) {
+ this.log("drawn while not visible");
+ }
+ }
+
+ if (this.selected) {
+ NETDATA.globalSelectionSync.stop();
+ }
+
+ // update the performance counters
+ let now = Date.now();
+ this.tm.last_updated = now;
+
+ // don't update last_autorefreshed if this chart is
+ // forced to be updated with global PanAndZoom
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ this.tm.last_autorefreshed = 0;
+ } else {
+ if (NETDATA.options.current.parallel_refresher && NETDATA.options.current.concurrent_refreshes && typeof this.force_update_every !== 'number') {
+ this.tm.last_autorefreshed = now - (now % this.data_update_every);
+ } else {
+ this.tm.last_autorefreshed = now;
+ }
+ }
+
+ this.refresh_dt_ms = now - started;
+ NETDATA.options.auto_refresher_fast_weight += this.refresh_dt_ms;
+
+ if (this.refresh_dt_element !== null) {
+ this.refresh_dt_element.innerText = this.refresh_dt_ms.toString();
+ }
+
+ if (this.foreignElementBefore !== null) {
+ this.foreignElementBefore.innerText = NETDATA.dateTime.localeDateString(this.view_before) + ' ' + NETDATA.dateTime.localeTimeString(this.view_before);
+ }
+
+ if (this.foreignElementAfter !== null) {
+ this.foreignElementAfter.innerText = NETDATA.dateTime.localeDateString(this.view_after) + ' ' + NETDATA.dateTime.localeTimeString(this.view_after);
+ }
+
+ if (this.foreignElementDuration !== null) {
+ this.foreignElementDuration.innerText = NETDATA.seconds4human(Math.floor((this.view_before - this.view_after) / 1000) + 1);
+ }
+
+ if (this.foreignElementUpdateEvery !== null) {
+ this.foreignElementUpdateEvery.innerText = NETDATA.seconds4human(Math.floor(this.data_update_every / 1000));
+ }
+ };
+
+ this.getSnapshotData = function (key) {
+ if (this.debug) {
+ this.log('updating from snapshot: ' + key);
+ }
+
+ if (typeof netdataSnapshotData.data[key] === 'undefined') {
+ this.log('snapshot does not include data for key "' + key + '"');
+ return null;
+ }
+
+ if (typeof netdataSnapshotData.data[key] !== 'string') {
+ this.log('snapshot data for key "' + key + '" is not string');
+ return null;
+ }
+
+ let uncompressed;
+ try {
+ uncompressed = netdataSnapshotData.uncompress(netdataSnapshotData.data[key]);
+
+ if (uncompressed === null) {
+ this.log('uncompressed snapshot data for key ' + key + ' is null');
+ return null;
+ }
+
+ if (typeof uncompressed === 'undefined') {
+ this.log('uncompressed snapshot data for key ' + key + ' is undefined');
+ return null;
+ }
+ } catch (e) {
+ this.log('decompression of snapshot data for key ' + key + ' failed');
+ console.log(e);
+ uncompressed = null;
+ }
+
+ if (typeof uncompressed !== 'string') {
+ this.log('uncompressed snapshot data for key ' + key + ' is not string');
+ return null;
+ }
+
+ let data;
+ try {
+ data = JSON.parse(uncompressed);
+ } catch (e) {
+ this.log('parsing snapshot data for key ' + key + ' failed');
+ console.log(e);
+ data = null;
+ }
+
+ return data;
+ };
+
+ this.updateChart = function (callback) {
+ if (this.debug) {
+ this.log('updateChart()');
+ }
+
+ if (this.fetching_data) {
+ if (this.debug) {
+ this.log('updateChart(): I am already updating...');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'already running');
+ }
+
+ return;
+ }
+
+ // due to late initialization of charts and libraries
+ // we need to check this too
+ if (!this.enabled) {
+ if (this.debug) {
+ this.log('updateChart(): I am not enabled');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'not enabled');
+ }
+
+ return;
+ }
+
+ if (!canBeRendered()) {
+ if (this.debug) {
+ this.log('updateChart(): cannot be rendered');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'cannot be rendered');
+ }
+
+ return;
+ }
+
+ if (that.dom_created !== true) {
+ if (this.debug) {
+ this.log('updateChart(): creating DOM');
+ }
+
+ createDOM();
+ }
+
+ if (this.chart === null) {
+ if (this.debug) {
+ this.log('updateChart(): getting chart');
+ }
+
+ return this.getChart(function () {
+ return that.updateChart(callback);
+ });
+ }
+
+ if (!this.library.initialized) {
+ if (this.library.enabled) {
+ if (this.debug) {
+ this.log('updateChart(): initializing chart library');
+ }
+
+ return this.library.initialize(function () {
+ return that.updateChart(callback);
+ });
+ } else {
+ error('chart library "' + this.library_name + '" is not available.');
+
+ if (typeof callback === 'function') {
+ return callback(false, 'library not available');
+ }
+
+ return;
+ }
+ }
+
+ this.clearSelection();
+ this.chartURL();
+
+ NETDATA.statistics.refreshes_total++;
+ NETDATA.statistics.refreshes_active++;
+
+ if (NETDATA.statistics.refreshes_active > NETDATA.statistics.refreshes_active_max) {
+ NETDATA.statistics.refreshes_active_max = NETDATA.statistics.refreshes_active;
+ }
+
+ let ok = false;
+ this.fetching_data = true;
+
+ if (netdataSnapshotData !== null) {
+ let key = this.chartDataUniqueID();
+ let data = this.getSnapshotData(key);
+ if (data !== null) {
+ ok = true;
+ data = NETDATA.xss.checkData('/api/v1/data', data, this.library.xssRegexIgnore);
+ this.updateChartWithData(data);
+ } else {
+ ok = false;
+ error('cannot get data from snapshot for key: "' + key + '"');
+ that.tm.last_autorefreshed = Date.now();
+ }
+
+ NETDATA.statistics.refreshes_active--;
+ this.fetching_data = false;
+
+ if (typeof callback === 'function') {
+ callback(ok, 'snapshot');
+ }
+
+ return;
+ }
+
+ if (this.debug) {
+ this.log('updating from ' + this.data_url);
+ }
+
+ this.xhr = $.ajax({
+ url: this.data_url,
+ cache: false,
+ async: true,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkData('/api/v1/data', data, that.library.xssRegexIgnore);
+
+ that.xhr = undefined;
+ that.retries_on_data_failures = 0;
+ ok = true;
+
+ if (that.debug) {
+ that.log('data received. updating chart.');
+ }
+
+ that.updateChartWithData(data);
+ })
+ .fail(function (msg) {
+ that.xhr = undefined;
+
+ if (msg.statusText !== 'abort') {
+ that.retries_on_data_failures++;
+ if (that.retries_on_data_failures > NETDATA.options.current.retries_on_data_failures) {
+ // that.log('failed ' + that.retries_on_data_failures.toString() + ' times - giving up');
+ that.retries_on_data_failures = 0;
+ error('data download failed for url: ' + that.data_url);
+ }
+ else {
+ that.tm.last_autorefreshed = Date.now();
+ // that.log('failed ' + that.retries_on_data_failures.toString() + ' times, but I will retry');
+ }
+ }
+ })
+ .always(function () {
+ that.xhr = undefined;
+
+ NETDATA.statistics.refreshes_active--;
+ that.fetching_data = false;
+
+ if (typeof callback === 'function') {
+ return callback(ok, 'download');
+ }
+ });
+ };
+
+ const __isVisible = function () {
+ let ret = true;
+
+ if (NETDATA.options.current.update_only_visible !== false) {
+ // tolerance is the number of pixels a chart can be off-screen
+ // to consider it as visible and refresh it as if was visible
+ let tolerance = 0;
+
+ that.tm.last_visible_check = Date.now();
+
+ let rect = that.element.getBoundingClientRect();
+
+ let screenTop = window.scrollY;
+ let screenBottom = screenTop + window.innerHeight;
+
+ let chartTop = rect.top + screenTop;
+ let chartBottom = chartTop + rect.height;
+
+ ret = !(rect.width === 0 || rect.height === 0 || chartBottom + tolerance < screenTop || chartTop - tolerance > screenBottom);
+ }
+
+ if (that.debug) {
+ that.log('__isVisible(): ' + ret);
+ }
+
+ return ret;
+ };
+
+ this.isVisible = function (nocache) {
+ // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll);
+
+ // caching - we do not evaluate the charts visibility
+ // if the page has not been scrolled since the last check
+ if ((typeof nocache !== 'undefined' && nocache)
+ || typeof this.tmp.___isVisible___ === 'undefined'
+ || this.tm.last_visible_check <= NETDATA.options.last_page_scroll) {
+ this.tmp.___isVisible___ = __isVisible();
+ if (this.tmp.___isVisible___) {
+ this.unhideChart();
+ } else {
+ this.hideChart();
+ }
+ }
+
+ if (this.debug) {
+ this.log('isVisible(' + nocache + '): ' + this.tmp.___isVisible___);
+ }
+
+ return this.tmp.___isVisible___;
+ };
+
+ this.isAutoRefreshable = function () {
+ return (this.current.autorefresh);
+ };
+
+ this.canBeAutoRefreshed = function () {
+ if (!this.enabled) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> not enabled');
+ }
+
+ return false;
+ }
+
+ if (this.running) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> already running');
+ }
+
+ return false;
+ }
+
+ if (this.library === null || this.library.enabled === false) {
+ error('charting library "' + this.library_name + '" is not available');
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> chart library ' + this.library_name + ' is not available');
+ }
+
+ return false;
+ }
+
+ if (!this.isVisible()) {
+ if (NETDATA.options.debug.visibility || this.debug) {
+ this.log('canBeAutoRefreshed() -> not visible');
+ }
+
+ return false;
+ }
+
+ let now = Date.now();
+
+ if (this.current.force_update_at !== 0 && this.current.force_update_at < now) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> timed force update - allowing this update');
+ }
+
+ this.current.force_update_at = 0;
+ return true;
+ }
+
+ if (!this.isAutoRefreshable()) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> not auto-refreshable');
+ }
+
+ return false;
+ }
+
+ // allow the first update, even if the page is not visible
+ if (NETDATA.options.page_is_visible === false && this.updates_counter && this.updates_since_last_unhide) {
+ if (NETDATA.options.debug.focus || this.debug) {
+ this.log('canBeAutoRefreshed() -> not the first update, and page does not have focus');
+ }
+
+ return false;
+ }
+
+ if (this.needsRecreation()) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> needs re-creation.');
+ }
+
+ return true;
+ }
+
+ if (NETDATA.options.auto_refresher_stop_until >= now) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> stopped until is in future.');
+ }
+
+ return false;
+ }
+
+ // options valid only for autoRefresh()
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ if (NETDATA.globalPanAndZoom.shouldBeAutoRefreshed(this)) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): global panning: I need an update.');
+ }
+
+ return true;
+ }
+ else {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): global panning: I am already up to date.');
+ }
+
+ return false;
+ }
+ }
+
+ if (this.selected) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): I have a selection in place.');
+ }
+
+ return false;
+ }
+
+ if (this.paused) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): I am paused.');
+ }
+
+ return false;
+ }
+
+ let data_update_every = this.data_update_every;
+ if (typeof this.force_update_every === 'number') {
+ data_update_every = this.force_update_every;
+ }
+
+ if (now - this.tm.last_autorefreshed >= data_update_every) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): It is time to update me. Now: ' + now.toString() + ', last_autorefreshed: ' + this.tm.last_autorefreshed + ', data_update_every: ' + data_update_every + ', delta: ' + (now - this.tm.last_autorefreshed).toString());
+ }
+
+ return true;
+ }
+
+ return false;
+ };
+
+ this.autoRefresh = function (callback) {
+ let state = that;
+
+ if (state.canBeAutoRefreshed() && state.running === false) {
+ state.running = true;
+ state.updateChart(function () {
+ state.running = false;
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ });
+ } else {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ }
+ };
+
+ this.__defaultsFromDownloadedChart = function (chart) {
+ this.chart = chart;
+ this.chart_url = chart.url;
+ this.data_update_every = chart.update_every * 1000;
+ this.data_points = Math.round(this.chartWidth() / this.chartPixelsPerPoint());
+ this.tm.last_info_downloaded = Date.now();
+
+ if (this.title === null) {
+ this.title = chart.title;
+ }
+
+ if (this.units === null) {
+ this.units = chart.units;
+ this.units_current = this.units;
+ }
+ };
+
+ // fetch the chart description from the netdata server
+ this.getChart = function (callback) {
+ this.chart = NETDATA.chartRegistry.get(this.host, this.id);
+ if (this.chart) {
+ this.__defaultsFromDownloadedChart(this.chart);
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ } else if (netdataSnapshotData !== null) {
+ // console.log(this);
+ // console.log(NETDATA.chartRegistry);
+ NETDATA.error(404, 'host: ' + this.host + ', chart: ' + this.id);
+ error('chart not found in snapshot');
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ } else {
+ this.chart_url = "/api/v1/chart?chart=" + this.id;
+
+ if (this.debug) {
+ this.log('downloading ' + this.chart_url);
+ }
+
+ $.ajax({
+ url: this.host + this.chart_url,
+ cache: false,
+ async: true,
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (chart) {
+ chart = NETDATA.xss.checkOptional('/api/v1/chart', chart);
+
+ chart.url = that.chart_url;
+ that.__defaultsFromDownloadedChart(chart);
+ NETDATA.chartRegistry.add(that.host, that.id, chart);
+ })
+ .fail(function () {
+ NETDATA.error(404, that.chart_url);
+ error('chart not found on url "' + that.chart_url + '"');
+ })
+ .always(function () {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ });
+ }
+ };
+
+ // ============================================================================================================
+ // INITIALIZATION
+
+ initDOM();
+ init('fast');
+};
+
+NETDATA.resetAllCharts = function (state) {
+ // first clear the global selection sync
+ // to make sure no chart is in selected state
+ NETDATA.globalSelectionSync.stop();
+
+ // there are 2 possibilities here
+ // a. state is the global Pan and Zoom master
+ // b. state is not the global Pan and Zoom master
+
+ // let master = true;
+ // if (NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // master = false;
+ // }
+ const master = NETDATA.globalPanAndZoom.isMaster(state);
+
+ // clear the global Pan and Zoom
+ // this will also refresh the master
+ // and unblock any charts currently mirroring the master
+ NETDATA.globalPanAndZoom.clearMaster();
+
+ // if we were not the master, reset our status too
+ // this is required because most probably the mouse
+ // is over this chart, blocking it from auto-refreshing
+ if (master === false && (state.paused || state.selected)) {
+ state.resetChart();
+ }
+};
+
+// get or create a chart state, given a DOM element
+NETDATA.chartState = function (element) {
+ let self = $(element);
+
+ let state = self.data('netdata-state-object') || null;
+ if (state === null) {
+ state = new chartState(element);
+ self.data('netdata-state-object', state);
+ }
+ return state;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// Library functions
+
+// Load a script without jquery
+// This is used to load jquery - after it is loaded, we use jquery
+NETDATA._loadjQuery = function (callback) {
+ if (typeof jQuery === 'undefined') {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.jQuery);
+ }
+
+ let script = document.createElement('script');
+ script.type = 'text/javascript';
+ script.async = true;
+ script.src = NETDATA.jQuery;
+
+ // script.onabort = onError;
+ script.onerror = function () {
+ NETDATA.error(101, NETDATA.jQuery);
+ };
+ if (typeof callback === "function") {
+ script.onload = function () {
+ $ = jQuery;
+ return callback();
+ };
+ }
+
+ let s = document.getElementsByTagName('script')[0];
+ s.parentNode.insertBefore(script, s);
+ }
+ else if (typeof callback === "function") {
+ $ = jQuery;
+ return callback();
+ }
+};
+
+NETDATA._loadCSS = function (filename) {
+ // don't use jQuery here
+ // styles are loaded before jQuery
+ // to eliminate showing an unstyled page to the user
+
+ let fileref = document.createElement("link");
+ fileref.setAttribute("rel", "stylesheet");
+ fileref.setAttribute("type", "text/css");
+ fileref.setAttribute("href", filename);
+
+ if (typeof fileref !== 'undefined') {
+ document.getElementsByTagName("head")[0].appendChild(fileref);
+ }
+};
+
+// user function to signal us the DOM has been
+// updated.
+NETDATA.updatedDom = function () {
+ NETDATA.options.updated_dom = true;
+};
+
+NETDATA.ready = function (callback) {
+ NETDATA.options.pauseCallback = callback;
+};
+
+NETDATA.pause = function (callback) {
+ if (typeof callback === 'function') {
+ if (NETDATA.options.pause) {
+ return callback();
+ } else {
+ NETDATA.options.pauseCallback = callback;
+ }
+ }
+};
+
+NETDATA.unpause = function () {
+ NETDATA.options.pauseCallback = null;
+ NETDATA.options.updated_dom = true;
+ NETDATA.options.pause = false;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+
+// this is purely sequential charts refresher
+// it is meant to be autonomous
+NETDATA.chartRefresherNoParallel = function (index, callback) {
+ let targets = NETDATA.intersectionObserver.targets();
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('NETDATA.chartRefresherNoParallel(' + index + ')');
+ }
+
+ if (NETDATA.options.updated_dom) {
+ // the dom has been updated
+ // get the dom parts again
+ NETDATA.parseDom(callback);
+ return;
+ }
+ if (index >= targets.length) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('waiting to restart main loop...');
+ }
+
+ NETDATA.options.auto_refresher_fast_weight = 0;
+ callback();
+ } else {
+ let state = targets[index];
+
+ if (NETDATA.options.auto_refresher_fast_weight < NETDATA.options.current.fast_render_timeframe) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('fast rendering...');
+ }
+
+ if (state.isVisible()) {
+ NETDATA.timeout.set(function () {
+ state.autoRefresh(function () {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ });
+ }, 0);
+ } else {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ }
+ } else {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('waiting for next refresh...');
+ }
+ NETDATA.options.auto_refresher_fast_weight = 0;
+
+ NETDATA.timeout.set(function () {
+ state.autoRefresh(function () {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ });
+ }, NETDATA.options.current.idle_between_charts);
+ }
+ }
+};
+
+NETDATA.chartRefresherWaitTime = function () {
+ return NETDATA.options.current.idle_parallel_loops;
+};
+
+// the default refresher
+NETDATA.chartRefresherLastRun = 0;
+NETDATA.chartRefresherRunsAfterParseDom = 0;
+NETDATA.chartRefresherTimeoutId = undefined;
+
+NETDATA.chartRefresherReschedule = function () {
+ if (NETDATA.options.current.async_on_scroll) {
+ if (NETDATA.chartRefresherTimeoutId) {
+ NETDATA.timeout.clear(NETDATA.chartRefresherTimeoutId);
+ }
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(NETDATA.chartRefresher, NETDATA.options.current.onscroll_worker_duration_threshold);
+ //console.log('chartRefresherReschedule()');
+ }
+};
+
+NETDATA.chartRefresher = function () {
+ // console.log('chartRefresher() begin ' + (Date.now() - NETDATA.chartRefresherLastRun).toString() + ' ms since last run');
+
+ if (NETDATA.options.page_is_visible === false
+ && NETDATA.options.current.stop_updates_when_focus_is_lost
+ && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_resize
+ && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_scroll
+ && NETDATA.chartRefresherRunsAfterParseDom > 10
+ ) {
+ setTimeout(
+ NETDATA.chartRefresher,
+ NETDATA.options.current.idle_lost_focus
+ );
+
+ // console.log('chartRefresher() page without focus, will run in ' + NETDATA.options.current.idle_lost_focus.toString() + ' ms, ' + NETDATA.chartRefresherRunsAfterParseDom.toString());
+ return;
+ }
+ NETDATA.chartRefresherRunsAfterParseDom++;
+
+ let now = Date.now();
+ NETDATA.chartRefresherLastRun = now;
+
+ if (now < NETDATA.options.on_scroll_refresher_stop_until) {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end1 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (now < NETDATA.options.auto_refresher_stop_until) {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end2 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (NETDATA.options.pause) {
+ // console.log('auto-refresher is paused');
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end3 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (typeof NETDATA.options.pauseCallback === 'function') {
+ // console.log('auto-refresher is calling pauseCallback');
+
+ NETDATA.options.pause = true;
+ NETDATA.options.pauseCallback();
+ NETDATA.chartRefresher();
+
+ // console.log('chartRefresher() end4 (nested)');
+ return;
+ }
+
+ if (!NETDATA.options.current.parallel_refresher) {
+ // console.log('auto-refresher is calling chartRefresherNoParallel(0)');
+ NETDATA.chartRefresherNoParallel(0, function () {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.options.current.idle_between_loops
+ );
+ });
+ // console.log('chartRefresher() end5 (no parallel, nested)');
+ return;
+ }
+
+ if (NETDATA.options.updated_dom) {
+ // the dom has been updated
+ // get the dom parts again
+ // console.log('auto-refresher is calling parseDom()');
+ NETDATA.parseDom(NETDATA.chartRefresher);
+ // console.log('chartRefresher() end6 (parseDom)');
+ return;
+ }
+
+ if (!NETDATA.globalSelectionSync.active()) {
+ let parallel = [];
+ let targets = NETDATA.intersectionObserver.targets();
+ let len = targets.length;
+ let state;
+ while (len--) {
+ state = targets[len];
+ if (state.running || state.isVisible() === false) {
+ continue;
+ }
+
+ if (!state.library.initialized) {
+ if (state.library.enabled) {
+ state.library.initialize(NETDATA.chartRefresher);
+ //console.log('chartRefresher() end6 (library init)');
+ return;
+ }
+ else {
+ state.error('chart library "' + state.library_name + '" is not enabled.');
+ }
+ }
+
+ if (NETDATA.scrollUp) {
+ parallel.unshift(state);
+ } else {
+ parallel.push(state);
+ }
+ }
+
+ len = parallel.length;
+ while (len--) {
+ state = parallel[len];
+ // console.log('auto-refresher executing in parallel for ' + parallel.length.toString() + ' charts');
+ // this will execute the jobs in parallel
+
+ if (!state.running) {
+ NETDATA.timeout.set(state.autoRefresh, 0);
+ }
+ }
+ //else {
+ // console.log('auto-refresher nothing to do');
+ //}
+ }
+
+ // run the next refresh iteration
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ //console.log('chartRefresher() completed in ' + (Date.now() - now).toString() + ' ms');
+};
+
+NETDATA.parseDom = function (callback) {
+ //console.log('parseDom()');
+
+ NETDATA.options.last_page_scroll = Date.now();
+ NETDATA.options.updated_dom = false;
+ NETDATA.chartRefresherRunsAfterParseDom = 0;
+
+ let targets = $('div[data-netdata]'); //.filter(':visible');
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('DOM updated - there are ' + targets.length + ' charts on page.');
+ }
+
+ NETDATA.intersectionObserver.globalReset();
+ NETDATA.options.targets = [];
+ let len = targets.length;
+ while (len--) {
+ // the initialization will take care of sizing
+ // and the "loading..." message
+ let state = NETDATA.chartState(targets[len]);
+ NETDATA.options.targets.push(state);
+ NETDATA.intersectionObserver.observe(state);
+ }
+
+ if (NETDATA.globalChartUnderlay.isActive()) {
+ NETDATA.globalChartUnderlay.setup();
+ } else {
+ NETDATA.globalChartUnderlay.clear();
+ }
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+};
+
+// this is the main function - where everything starts
+NETDATA.started = false;
+NETDATA.start = function () {
+ // this should be called only once
+
+ if (NETDATA.started) {
+ console.log('netdata is already started');
+ return;
+ }
+
+ NETDATA.started = true;
+ NETDATA.options.page_is_visible = true;
+
+ $(window).blur(function () {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = false;
+ if (NETDATA.options.debug.focus) {
+ console.log('Lost Focus!');
+ }
+ }
+ });
+
+ $(window).focus(function () {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = true;
+ if (NETDATA.options.debug.focus) {
+ console.log('Focus restored!');
+ }
+ }
+ });
+
+ if (typeof document.hasFocus === 'function' && !document.hasFocus()) {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = false;
+ if (NETDATA.options.debug.focus) {
+ console.log('Document has no focus!');
+ }
+ }
+ }
+
+ // bootstrap tab switching
+ $('a[data-toggle="tab"]').on('shown.bs.tab', NETDATA.onscroll);
+
+ // bootstrap modal switching
+ let $modal = $('.modal');
+ $modal.on('hidden.bs.modal', NETDATA.onscroll);
+ $modal.on('shown.bs.modal', NETDATA.onscroll);
+
+ // bootstrap collapse switching
+ let $collapse = $('.collapse');
+ $collapse.on('hidden.bs.collapse', NETDATA.onscroll);
+ $collapse.on('shown.bs.collapse', NETDATA.onscroll);
+
+ NETDATA.parseDom(NETDATA.chartRefresher);
+
+ // Alarms initialization
+ setTimeout(NETDATA.alarms.init, 1000);
+
+ // Registry initialization
+ setTimeout(NETDATA.registry.init, netdataRegistryAfterMs);
+
+ if (typeof netdataCallback === 'function') {
+ netdataCallback();
+ }
+};
+
+NETDATA.globalReset = function () {
+ NETDATA.intersectionObserver.globalReset();
+ NETDATA.globalSelectionSync.globalReset();
+ NETDATA.globalPanAndZoom.globalReset();
+ NETDATA.chartRegistry.globalReset();
+ NETDATA.commonMin.globalReset();
+ NETDATA.commonMax.globalReset();
+ NETDATA.commonColors.globalReset();
+ NETDATA.unitsConversion.globalReset();
+ NETDATA.options.targets = [];
+ NETDATA.parseDom();
+ NETDATA.unpause();
+};
+
+// Registry of netdata hosts
+
+NETDATA.alarms = {
+ onclick: null, // the callback to handle the click - it will be called with the alarm log entry
+ chart_div_offset: -50, // give that space above the chart when scrolling to it
+ chart_div_id_prefix: 'chart_', // the chart DIV IDs have this prefix (they should be NETDATA.name2id(chart.id))
+ chart_div_animation_duration: 0,// the duration of the animation while scrolling to a chart
+
+ ms_penalty: 0, // the time penalty of the next alarm
+ ms_between_notifications: 500, // firefox moves the alarms off-screen (above, outside the top of the screen)
+ // if alarms are shown faster than: one per 500ms
+
+ update_every: 10000, // the time in ms between alarm checks
+
+ notifications: false, // when true, the browser supports notifications (may not be granted though)
+ last_notification_id: 0, // the id of the last alarm_log we have raised an alarm for
+ first_notification_id: 0, // the id of the first alarm_log entry for this session
+ // this is used to prevent CLEAR notifications for past events
+ // notifications_shown: [],
+
+ server: null, // the server to connect to for fetching alarms
+ current: null, // the list of raised alarms - updated in the background
+
+ // a callback function to call every time the list of raised alarms is refreshed
+ callback: (typeof netdataAlarmsActiveCallback === 'function') ? netdataAlarmsActiveCallback : null,
+
+ // a callback function to call every time a notification is shown
+ // the return value is used to decide if the notification will be shown
+ notificationCallback: (typeof netdataAlarmsNotifCallback === 'function') ? netdataAlarmsNotifCallback : null,
+
+ recipients: null, // the list (array) of recipients to show alarms for, or null
+
+ recipientMatches: function (to_string, wanted_array) {
+ if (typeof wanted_array === 'undefined' || wanted_array === null || Array.isArray(wanted_array) === false) {
+ return true;
+ }
+
+ let r = ' ' + to_string.toString() + ' ';
+ let len = wanted_array.length;
+ while (len--) {
+ if (r.indexOf(' ' + wanted_array[len] + ' ') >= 0) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ activeForRecipients: function () {
+ let active = {};
+ let data = NETDATA.alarms.current;
+
+ if (typeof data === 'undefined' || data === null) {
+ return active;
+ }
+
+ for (let x in data.alarms) {
+ if (!data.alarms.hasOwnProperty(x)) {
+ continue;
+ }
+
+ let alarm = data.alarms[x];
+ if ((alarm.status === 'WARNING' || alarm.status === 'CRITICAL') && NETDATA.alarms.recipientMatches(alarm.recipient, NETDATA.alarms.recipients)) {
+ active[x] = alarm;
+ }
+ }
+
+ return active;
+ },
+
+ notify: function (entry) {
+ // console.log('alarm ' + entry.unique_id);
+
+ if (entry.updated) {
+ // console.log('alarm ' + entry.unique_id + ' has been updated by another alarm');
+ return;
+ }
+
+ let value_string = entry.value_string;
+
+ if (NETDATA.alarms.current !== null) {
+ // get the current value_string
+ let t = NETDATA.alarms.current.alarms[entry.chart + '.' + entry.name];
+ if (typeof t !== 'undefined' && entry.status === t.status && typeof t.value_string !== 'undefined') {
+ value_string = t.value_string;
+ }
+ }
+
+ let name = entry.name.replace(/_/g, ' ');
+ let status = entry.status.toLowerCase();
+ let title = name + ' = ' + value_string.toString();
+ let tag = entry.alarm_id;
+ let icon = 'images/banner-icon-144x144.png';
+ let interaction = false;
+ let data = entry;
+ let show = true;
+
+ // console.log('alarm ' + entry.unique_id + ' ' + entry.chart + '.' + entry.name + ' is ' + entry.status);
+
+ switch (entry.status) {
+ case 'REMOVED':
+ show = false;
+ break;
+
+ case 'UNDEFINED':
+ return;
+
+ case 'UNINITIALIZED':
+ return;
+
+ case 'CLEAR':
+ if (entry.unique_id < NETDATA.alarms.first_notification_id) {
+ // console.log('alarm ' + entry.unique_id + ' is not current');
+ return;
+ }
+ if (entry.old_status === 'UNINITIALIZED' || entry.old_status === 'UNDEFINED') {
+ // console.log('alarm' + entry.unique_id + ' switch to CLEAR from ' + entry.old_status);
+ return;
+ }
+ if (entry.no_clear_notification) {
+ // console.log('alarm' + entry.unique_id + ' is CLEAR but has no_clear_notification flag');
+ return;
+ }
+ title = name + ' back to normal (' + value_string.toString() + ')';
+ icon = 'images/check-mark-2-128-green.png';
+ interaction = false;
+ break;
+
+ case 'WARNING':
+ if (entry.old_status === 'CRITICAL') {
+ status = 'demoted to ' + entry.status.toLowerCase();
+ }
+
+ icon = 'images/alert-128-orange.png';
+ interaction = false;
+ break;
+
+ case 'CRITICAL':
+ if (entry.old_status === 'WARNING') {
+ status = 'escalated to ' + entry.status.toLowerCase();
+ }
+
+ icon = 'images/alert-128-red.png';
+ interaction = true;
+ break;
+
+ default:
+ console.log('invalid alarm status ' + entry.status);
+ return;
+ }
+
+ // filter recipients
+ if (show) {
+ show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients);
+ }
+
+ /*
+ // cleanup old notifications with the same alarm_id as this one
+ // it does not seem to work on any web browser - so notifications cannot be removed
+
+ let len = NETDATA.alarms.notifications_shown.length;
+ while (len--) {
+ let n = NETDATA.alarms.notifications_shown[len];
+ if (n.data.alarm_id === entry.alarm_id) {
+ console.log('removing old alarm ' + n.data.unique_id);
+
+ // close the notification
+ n.close.bind(n);
+
+ // remove it from the array
+ NETDATA.alarms.notifications_shown.splice(len, 1);
+ len = NETDATA.alarms.notifications_shown.length;
+ }
+ }
+ */
+
+ if (show) {
+ if (typeof NETDATA.alarms.notificationCallback === 'function') {
+ show = NETDATA.alarms.notificationCallback(entry);
+ }
+
+ if (show) {
+ setTimeout(function () {
+ // show this notification
+ // console.log('new notification: ' + title);
+ let n = new Notification(title, {
+ body: entry.hostname + ' - ' + entry.chart + ' (' + entry.family + ') - ' + status + ': ' + entry.info,
+ tag: tag,
+ requireInteraction: interaction,
+ icon: NETDATA.serverStatic + icon,
+ data: data
+ });
+
+ n.onclick = function (event) {
+ event.preventDefault();
+ NETDATA.alarms.onclick(event.target.data);
+ };
+
+ // console.log(n);
+ // NETDATA.alarms.notifications_shown.push(n);
+ // console.log(entry);
+ }, NETDATA.alarms.ms_penalty);
+
+ NETDATA.alarms.ms_penalty += NETDATA.alarms.ms_between_notifications;
+ }
+ }
+ },
+
+ scrollToChart: function (chart_id) {
+ if (typeof chart_id === 'string') {
+ let offset = $('#' + NETDATA.alarms.chart_div_id_prefix + NETDATA.name2id(chart_id)).offset();
+ if (typeof offset !== 'undefined') {
+ $('html, body').animate({scrollTop: offset.top + NETDATA.alarms.chart_div_offset}, NETDATA.alarms.chart_div_animation_duration);
+ return true;
+ }
+ }
+ return false;
+ },
+
+ scrollToAlarm: function (alarm) {
+ if (typeof alarm === 'object') {
+ let ret = NETDATA.alarms.scrollToChart(alarm.chart);
+
+ if (ret && NETDATA.options.page_is_visible === false) {
+ window.focus();
+ }
+ // alert('netdata dashboard will now scroll to chart: ' + alarm.chart + '\n\nThis alarm opened to bring the browser window in front of the screen. Click on the dashboard to prevent it from appearing again.');
+ }
+
+ },
+
+ notifyAll: function () {
+ // console.log('FETCHING ALARM LOG');
+ NETDATA.alarms.get_log(NETDATA.alarms.last_notification_id, function (data) {
+ // console.log('ALARM LOG FETCHED');
+
+ if (data === null || typeof data !== 'object') {
+ console.log('invalid alarms log response');
+ return;
+ }
+
+ if (data.length === 0) {
+ console.log('received empty alarm log');
+ return;
+ }
+
+ // console.log('received alarm log of ' + data.length + ' entries, from ' + data[data.length - 1].unique_id.toString() + ' to ' + data[0].unique_id.toString());
+
+ data.sort(function (a, b) {
+ if (a.unique_id > b.unique_id) {
+ return -1;
+ }
+ if (a.unique_id < b.unique_id) {
+ return 1;
+ }
+ return 0;
+ });
+
+ NETDATA.alarms.ms_penalty = 0;
+
+ let len = data.length;
+ while (len--) {
+ if (data[len].unique_id > NETDATA.alarms.last_notification_id) {
+ NETDATA.alarms.notify(data[len]);
+ }
+ //else
+ // console.log('ignoring alarm (older) with id ' + data[len].unique_id.toString());
+ }
+
+ NETDATA.alarms.last_notification_id = data[0].unique_id;
+
+ if (typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember) {
+ NETDATA.localStorageSet('last_notification_id', NETDATA.alarms.last_notification_id, null);
+ }
+ // console.log('last notification id = ' + NETDATA.alarms.last_notification_id);
+ })
+ },
+
+ check_notifications: function () {
+ // returns true if we should fire 1+ notifications
+
+ if (NETDATA.alarms.notifications !== true) {
+ // console.log('web notifications are not available');
+ return false;
+ }
+
+ if (Notification.permission !== 'granted') {
+ // console.log('web notifications are not granted');
+ return false;
+ }
+
+ if (typeof NETDATA.alarms.current !== 'undefined' && typeof NETDATA.alarms.current.alarms === 'object') {
+ // console.log('can do alarms: old id = ' + NETDATA.alarms.last_notification_id + ' new id = ' + NETDATA.alarms.current.latest_alarm_log_unique_id);
+
+ if (NETDATA.alarms.current.latest_alarm_log_unique_id > NETDATA.alarms.last_notification_id) {
+ // console.log('new alarms detected');
+ return true;
+ }
+ //else console.log('no new alarms');
+ }
+ // else console.log('cannot process alarms');
+
+ return false;
+ },
+
+ get: function (what, callback) {
+ $.ajax({
+ url: NETDATA.alarms.server + '/api/v1/alarms?' + what.toString(),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/alarms', data /*, '.*\.(calc|calc_parsed|warn|warn_parsed|crit|crit_parsed)$' */);
+
+ if (NETDATA.alarms.first_notification_id === 0 && typeof data.latest_alarm_log_unique_id === 'number') {
+ NETDATA.alarms.first_notification_id = data.latest_alarm_log_unique_id;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(415, NETDATA.alarms.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ update_forever: function () {
+ if (netdataShowAlarms !== true || netdataSnapshotData !== null) {
+ return;
+ }
+
+ NETDATA.alarms.get('active', function (data) {
+ if (data !== null) {
+ NETDATA.alarms.current = data;
+
+ if (NETDATA.alarms.check_notifications()) {
+ NETDATA.alarms.notifyAll();
+ }
+
+ if (typeof NETDATA.alarms.callback === 'function') {
+ NETDATA.alarms.callback(data);
+ }
+
+ // Health monitoring is disabled on this netdata
+ if (data.status === false) {
+ return;
+ }
+ }
+
+ setTimeout(NETDATA.alarms.update_forever, NETDATA.alarms.update_every);
+ });
+ },
+
+ get_log: function (last_id, callback) {
+ // console.log('fetching all log after ' + last_id.toString());
+ $.ajax({
+ url: NETDATA.alarms.server + '/api/v1/alarm_log?after=' + last_id.toString(),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/alarm_log', data);
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(416, NETDATA.alarms.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ init: function () {
+ NETDATA.alarms.server = NETDATA.fixHost(NETDATA.serverDefault);
+
+ if (typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember) {
+ NETDATA.alarms.last_notification_id =
+ NETDATA.localStorageGet('last_notification_id', NETDATA.alarms.last_notification_id, null);
+ }
+
+ if (NETDATA.alarms.onclick === null) {
+ NETDATA.alarms.onclick = NETDATA.alarms.scrollToAlarm;
+ }
+
+ if (typeof netdataAlarmsRecipients !== 'undefined' && Array.isArray(netdataAlarmsRecipients)) {
+ NETDATA.alarms.recipients = netdataAlarmsRecipients;
+ }
+
+ if (netdataShowAlarms) {
+ NETDATA.alarms.update_forever();
+
+ if ('Notification' in window) {
+ // console.log('notifications available');
+ NETDATA.alarms.notifications = true;
+
+ if (Notification.permission === 'default') {
+ Notification.requestPermission();
+ }
+ }
+ }
+ }
+};
+
+// Registry of netdata hosts
+
+NETDATA.registry = {
+ server: null, // the netdata registry server
+ isCloudEnabled: false,// is netdata.cloud functionality enabled?
+ cloudBaseURL: null, // the netdata cloud base url
+ person_guid: null, // the unique ID of this browser / user
+ machine_guid: null, // the unique ID the netdata server that served dashboard.js
+ hostname: 'unknown', // the hostname of the netdata server that served dashboard.js
+ machines: null, // the user's other URLs
+ machines_array: null, // the user's other URLs in an array
+ person_urls: null,
+ anonymous_statistics_checked: false,
+ MASKED_DATA: "***",
+
+ isUsingGlobalRegistry: function() {
+ return NETDATA.registry.server == "https://registry.my-netdata.io";
+ },
+
+ isRegistryEnabled: function() {
+ return !(NETDATA.registry.isUsingGlobalRegistry() || isSignedIn())
+ },
+
+ parsePersonUrls: function (person_urls) {
+ NETDATA.registry.person_urls = person_urls;
+
+ if (person_urls) {
+ NETDATA.registry.machines = {};
+ NETDATA.registry.machines_array = [];
+
+ let apu = person_urls;
+ let i = apu.length;
+ while (i--) {
+ if (typeof NETDATA.registry.machines[apu[i][0]] === 'undefined') {
+ // console.log('adding: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString());
+
+ let obj = {
+ guid: apu[i][0],
+ url: apu[i][1],
+ last_t: apu[i][2],
+ accesses: apu[i][3],
+ name: apu[i][4],
+ alternate_urls: []
+ };
+ obj.alternate_urls.push(apu[i][1]);
+
+ NETDATA.registry.machines[apu[i][0]] = obj;
+ NETDATA.registry.machines_array.push(obj);
+ } else {
+ // console.log('appending: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString());
+
+ let pu = NETDATA.registry.machines[apu[i][0]];
+ if (pu.last_t < apu[i][2]) {
+ pu.url = apu[i][1];
+ pu.last_t = apu[i][2];
+ pu.name = apu[i][4];
+ }
+ pu.accesses += apu[i][3];
+ pu.alternate_urls.push(apu[i][1]);
+ }
+ }
+ }
+
+ if (typeof netdataRegistryCallback === 'function') {
+ netdataRegistryCallback(NETDATA.registry.machines_array);
+ }
+ },
+
+ init: function () {
+ if (netdataRegistry !== true) {
+ return;
+ }
+
+ NETDATA.registry.hello(NETDATA.serverDefault, function (data) {
+ if (data) {
+ NETDATA.registry.server = data.registry;
+ if (data.cloud_base_url !== "") {
+ NETDATA.registry.isCloudEnabled = true;
+ NETDATA.registry.cloudBaseURL = data.cloud_base_url;
+ } else {
+ NETDATA.registry.isCloudEnabled = false;
+ NETDATA.registry.cloudBaseURL = "";
+ }
+ NETDATA.registry.machine_guid = data.machine_guid;
+ NETDATA.registry.hostname = data.hostname;
+ if (!NETDATA.registry.anonymous_statistics_checked) {
+ NETDATA.registry.anonymous_statistics_checked=true;
+ if (data.anonymous_statistics) {
+ (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
+ new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
+ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=false;j.src=
+ 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
+ })(window,document,'script','dataLayer','GTM-N6CBMJD');
+ dataLayer.push({"anonymous_statistics" : "true", "machine_guid" : data.machine_guid});
+ }
+ }
+ NETDATA.registry.access(2, function (person_urls) {
+ NETDATA.registry.parsePersonUrls(person_urls);
+ });
+ }
+ });
+ },
+
+ hello: function (host, callback) {
+ host = NETDATA.fixHost(host);
+
+ // send HELLO to a netdata server:
+ // 1. verifies the server is reachable
+ // 2. responds with the registry URL, the machine GUID of this netdata server and its hostname
+ $.ajax({
+ url: host + '/api/v1/registry?action=hello',
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/registry?action=hello', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(408, host + ' response: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(407, host);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ access: function (max_redirects, callback) {
+ let name = NETDATA.registry.MASKED_DATA;
+ let url = NETDATA.registry.MASKED_DATA;
+
+ if (!NETDATA.registry.isUsingGlobalRegistry()) {
+ // If the user is using a private registry keep sending identifiable
+ // data.
+ name = NETDATA.registry.hostname;
+ url = NETDATA.serverDefault;
+ }
+
+ console.log("ACCESS", name, url);
+
+ // send ACCESS to a netdata registry:
+ // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL)
+ // 2. it responds with a list of netdata servers we know
+ // the registry identifies us using a cookie it sets the first time we access it
+ // the registry may respond with a redirect URL to send us to another registry
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(name) + '&url=' + encodeURIComponent(url), // + '&visible_url=' + encodeURIComponent(document.location),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=access', data);
+
+ let redirect = null;
+ if (typeof data.registry === 'string') {
+ redirect = data.registry;
+ }
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(409, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (data === null) {
+ if (redirect !== null && max_redirects > 0) {
+ NETDATA.registry.server = redirect;
+ NETDATA.registry.access(max_redirects - 1, callback);
+ }
+ else {
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ }
+ } else {
+ if (typeof data.person_guid === 'string') {
+ NETDATA.registry.person_guid = data.person_guid;
+ }
+
+ if (typeof callback === 'function') {
+ const urls = data.urls.filter((u) => u[1] !== NETDATA.registry.MASKED_DATA);
+ return callback(urls);
+ }
+ }
+ })
+ .fail(function () {
+ NETDATA.error(410, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ delete: function (delete_url, callback) {
+ // send DELETE to a netdata registry:
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=delete&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&delete_url=' + encodeURIComponent(delete_url),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=delete', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(411, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(412, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ search: function (machine_guid, callback) {
+ // SEARCH for the URLs of a machine:
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=search&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&for=' + machine_guid,
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=search', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(417, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(418, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ switch: function (new_person_guid, callback) {
+ // impersonate
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=switch&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&to=' + new_person_guid,
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(414, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ }
+};
+
+// Load required JS libraries and CSS
+
+NETDATA.requiredJs = [
+ {
+ url: NETDATA.serverStatic + 'lib/bootstrap-3.3.7.min.js',
+ async: false,
+ isAlreadyLoaded: function () {
+ // check if bootstrap is loaded
+ if (typeof $().emulateTransitionEnd === 'function') {
+ return true;
+ } else {
+ return typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap;
+ }
+ }
+ },
+ {
+ url: NETDATA.serverStatic + 'lib/fontawesome-all-5.0.1.min.js',
+ async: true,
+ isAlreadyLoaded: function () {
+ return typeof netdataNoFontAwesome !== 'undefined' && netdataNoFontAwesome;
+ }
+ },
+ {
+ url: NETDATA.serverStatic + 'lib/perfect-scrollbar-0.6.15.min.js',
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ }
+];
+
+NETDATA.requiredCSS = [
+ {
+ url: NETDATA.themes.current.bootstrap_css,
+ isAlreadyLoaded: function () {
+ return typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap;
+ }
+ },
+ {
+ url: NETDATA.themes.current.dashboard_css,
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ }
+];
+
+NETDATA.loadedRequiredJs = 0;
+NETDATA.loadRequiredJs = function (index, callback) {
+ if (index >= NETDATA.requiredJs.length) {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ return;
+ }
+
+ if (NETDATA.requiredJs[index].isAlreadyLoaded()) {
+ NETDATA.loadedRequiredJs++;
+ NETDATA.loadRequiredJs(++index, callback);
+ return;
+ }
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.requiredJs[index].url);
+ }
+
+ let async = true;
+ if (typeof NETDATA.requiredJs[index].async !== 'undefined' && NETDATA.requiredJs[index].async === false) {
+ async = false;
+ }
+
+ $.ajax({
+ url: NETDATA.requiredJs[index].url,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loaded ' + NETDATA.requiredJs[index].url);
+ }
+ })
+ .fail(function () {
+ alert('Cannot load required JS library: ' + NETDATA.requiredJs[index].url);
+ })
+ .always(function () {
+ NETDATA.loadedRequiredJs++;
+
+ // if (async === false)
+ if (!async) {
+ NETDATA.loadRequiredJs(++index, callback);
+ }
+ });
+
+ // if (async === true)
+ if (async) {
+ NETDATA.loadRequiredJs(++index, callback);
+ }
+};
+
+NETDATA.loadRequiredCSS = function (index) {
+ if (index >= NETDATA.requiredCSS.length) {
+ return;
+ }
+
+ if (NETDATA.requiredCSS[index].isAlreadyLoaded()) {
+ NETDATA.loadRequiredCSS(++index);
+ return;
+ }
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.requiredCSS[index].url);
+ }
+
+ NETDATA._loadCSS(NETDATA.requiredCSS[index].url);
+ NETDATA.loadRequiredCSS(++index);
+};
+
+// Boot it!
+
+if (typeof netdataPrepCallback === 'function') {
+ netdataPrepCallback();
+}
+
+NETDATA.errorReset();
+NETDATA.loadRequiredCSS(0);
+
+NETDATA._loadjQuery(function () {
+ NETDATA.loadRequiredJs(0, function () {
+ if (typeof $().emulateTransitionEnd !== 'function') {
+ // bootstrap is not available
+ NETDATA.options.current.show_help = false;
+ }
+
+ if (typeof netdataDontStart === 'undefined' || !netdataDontStart) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('starting chart refresh thread');
+ }
+
+ NETDATA.start();
+ }
+ });
+});
+})(window, document, (typeof jQuery === 'function')?jQuery:undefined);
diff --git a/web/gui/dashboard.slate.css b/web/gui/dashboard.slate.css
new file mode 100644
index 0000000..af33825
--- /dev/null
+++ b/web/gui/dashboard.slate.css
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: GPL-3.0-or-later */
+html,
+body {
+ /*font-family: Calibri,"Segoe UI","Helvetica Neue",Helvetica,Arial,sans-serif;*/
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-style: normal;
+ font-variant: normal;
+ color: #878b90;
+}
+
+/* fixes for default slate theme */
+code {
+ color: #bbb; /*#c7254e;*/
+ background-color: #555; /* #f9f2f4; */
+}
+
+.dashboard-sidebar .nav > .active > a,
+.dashboard-sidebar .nav > .active:hover > a,
+.dashboard-sidebar .nav > .active:focus > a {
+ color: #765d9c;
+ border-left: 2px solid #765d9c;
+}
+
+.morelink {
+ color: #765d9c;
+ text-decoration: none;
+}
+
+.morelink:hover {
+ color: #563d7c;
+ text-decoration: none;
+}
+
+.morelink:focus {
+ color: #765d9c;
+ text-decoration: none;
+}
+
+.netdata-chart-alignment {
+ margin-left: 55px;
+}
+
+.netdata-chart-row {
+ width: 100%;
+ text-align: center;
+ display: flex;
+ display: -webkit-flex;
+ display: -moz-flex;
+ align-items: flex-end;
+ -moz-align-items: flex-end;
+ -webkit-align-items: flex-end;
+ justify-content: center;
+ -moz--webkit-justify-content: center;
+ -moz-justify-content: center;
+ padding-top: 10px;
+}
+
+.netdata-container {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge:after {
+ padding-top: 60%;
+ display: block;
+ content: '';
+}
+
+.netdata-container-easypiechart {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-easypiechart:after {
+ padding-top: 100%;
+ display: block;
+ content: '';
+}
+
+.netdata-aspect {
+ position: relative;
+ width: 100%;
+ padding: 0px;
+ margin: 0px;
+}
+
+.netdata-container-with-legend {
+ display: inline-block;
+ overflow: hidden;
+
+ transform: translate3d(0,0,0);
+
+ /* fix minimum scrollbar issue in firefox */
+ min-height: 99px;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-legend-resize-handler {
+ display: block;
+ position: absolute;
+ bottom: 0px;
+ right: 0px;
+ height: 15px;
+ width: 20px;
+ background-color: #272b30;
+ font-size: 15px;
+ vertical-align: middle;
+ line-height: 15px;
+ cursor: ns-resize;
+ color: #373b40;
+ text-align: center;
+ overflow: hidden;
+ z-index: 20;
+ padding: 0px;
+ margin: 0px;
+}
+
+.netdata-legend-toolbox {
+ display: block;
+ position: absolute;
+ bottom: 0px;
+ right: 30px;
+ height: 15px;
+ width: 110px;
+ background-color: #272b30;
+ font-size: 12px;
+ vertical-align: middle;
+ line-height: 15px;
+ color: #373b40;
+ text-align: center;
+ overflow: hidden;
+ z-index: 20;
+ padding: 0px;
+ margin: 0px;
+
+ /* prevent text selection after double click */
+ -webkit-user-select: none; /* webkit (safari, chrome) browsers */
+ -moz-user-select: none; /* mozilla browsers */
+ -khtml-user-select: none; /* webkit (konqueror) browsers */
+ -ms-user-select: none; /* IE10+ */
+}
+
+.netdata-legend-toolbox-button {
+ display: inline-block;
+ position: relative;
+ height: 15px;
+ width: 18px;
+ background-color: #272b30;
+ font-size: 12px;
+ vertical-align: middle;
+ line-height: 15px;
+ color: #474b50;
+ text-align: center;
+ overflow: hidden;
+ z-index: 21;
+ padding: 0px;
+ margin: 0px;
+ cursor: pointer;
+
+ /* prevent text selection after double click */
+ -webkit-user-select: none; /* webkit (safari, chrome) browsers */
+ -moz-user-select: none; /* mozilla browsers */
+ -khtml-user-select: none; /* webkit (konqueror) browsers */
+ -ms-user-select: none; /* IE10+ */
+}
+
+.netdata-message {
+ display: inline-block;
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ text-align: left;
+ vertical-align: top;
+ font-weight: bold;
+ font-size: x-small;
+ overflow: hidden;
+ background: inherit;
+ z-index: 0;
+}
+
+.netdata-message.hidden {
+ display: none;
+}
+
+.netdata-message.icon {
+ color: #2f3338;
+ text-align: center;
+ vertical-align: middle;
+}
+
+.netdata-chart-legend {
+ position: absolute; /* within .netdata-container */
+ top: 0;
+ right: 0;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ line-height: 14px;
+ display: block;
+ width: 140px; /* --legend-width */
+ height: calc(100% - 15px); /* 10px for the resize handler and 5px for the top margin */
+ font-size: 10px;
+ margin-top: 5px;
+ text-align: left;
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-legend-title-date {
+ font-size: 10px;
+ font-weight: normal;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-title-time {
+ font-size: 11px;
+ font-weight: bold;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-title-units {
+ position: absolute;
+ right: 10px;
+ float: right;
+ font-size: 11px;
+ vertical-align: top;
+ font-weight: normal;
+ margin-top: 0px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.netdata-legend-series {
+ position: absolute;
+ width: 140px; /* legend-width */
+ height: calc(100% - 50px);
+ overflow: hidden;
+ text-overflow: ellipsis;
+ line-height: 14.5px; /* line spacing at the legend */
+ display: block;
+ font-size: 10px;
+ margin-top: 0px;
+}
+
+.netdata-legend-name-table-line {
+ display: inline-block;
+ width: 13px;
+ height: 4px;
+ border-width: 0px;
+ border-bottom-width: 2px;
+ border-bottom-style: solid;
+ border-bottom-color: #272b30;
+}
+
+.netdata-legend-name-table-area {
+ display: inline-block;
+ width: 13px;
+ height: 5px;
+ border-width: 1px;
+ border-top-width: 1px;
+ border-top-style: solid;
+ border-top-color: inherit;
+}
+
+.netdata-legend-name-table-stacked {
+ display: inline-block;
+ width: 13px;
+ height: 5px;
+ border-width: 1px;
+ border-top-width: 1px;
+ border-top-style: solid;
+ border-top-color: inherit;
+}
+
+.netdata-legend-name-tr {
+}
+
+.netdata-legend-name-td {
+}
+
+.netdata-legend-name {
+ text-align: left;
+ font-size: 11px; /* legend: dimension name size */
+ font-weight: bold;
+ vertical-align: bottom;
+ margin-top: 0px;
+ z-index: 9;
+ padding: 0px;
+ width: 80px !important;
+ max-width: 80px !important;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ display: inline-block;
+ cursor: pointer;
+ -webkit-print-color-adjust: exact;
+}
+
+.netdata-legend-value {
+ /*margin-left: 14px;*/
+ position: absolute;
+ right: 10px;
+ float: right;
+ text-align: right;
+ font-size: 11px; /* legend: dimension value size */
+ font-weight: bold;
+ vertical-align: bottom;
+ background-color: #272b30;
+ margin-top: 0px;
+ z-index: 10;
+ padding: 0px;
+ padding-left: 15px;
+ cursor: pointer;
+ /* -webkit-font-smoothing: none; */
+}
+
+.netdata-legend-name.not-selected {
+ font-weight: normal;
+ opacity: 0.3;
+}
+
+.netdata-chart {
+ position: absolute; /* within .netdata-container */
+ top: 0; /* within .netdata-container */
+ left: 0; /* within .netdata-container */
+ display: inline-block;
+ overflow: hidden;
+ width: 100%;
+ height: 100%;
+ z-index: 5;
+
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-chart-with-legend-right {
+ position: absolute; /* within .netdata-container */
+ top: 0; /* within .netdata-container */
+ left: 0; /* within .netdata-container */
+ display: block;
+ overflow: hidden;
+ margin-right: 140px; /* --legend-width */
+ width: calc(100% - 140px); /* --legend-width */
+ height: 100%;
+ z-index: 5;
+ flex-grow: 1;
+
+ /* width and height is calculated (depends on the appearance of the legend) */
+}
+
+.netdata-peity-chart {
+
+}
+
+.netdata-sparkline-chart {
+
+}
+
+.netdata-dygraph-chart {
+
+}
+
+.netdata-morris-chart {
+
+}
+
+.netdata-google-chart {
+
+}
+
+.dygraph-ylabel {
+}
+
+.dygraph-axis-label-x {
+ overflow-x: hidden;
+}
+
+.dygraph-axis-label {
+ color: #6c7075;
+}
+
+.dygraph-label-rotate-left {
+ text-align: center;
+ /* See http://caniuse.com/#feat=transforms2d */
+ transform: rotate(90deg);
+ -webkit-transform: rotate(90deg);
+ -moz-transform: rotate(90deg);
+ -o-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+}
+
+/* For y2-axis label */
+.dygraph-label-rotate-right {
+ text-align: center;
+ /* See http://caniuse.com/#feat=transforms2d */
+ transform: rotate(-90deg);
+ -webkit-transform: rotate(-90deg);
+ -moz-transform: rotate(-90deg);
+ -o-transform: rotate(-90deg);
+ -ms-transform: rotate(-90deg);
+}
+
+.dygraph-title {
+ text-indent: 56px;
+ text-align: left;
+ position: absolute;
+ left: 0px;
+ top: 4px;
+ font-size: 11px;
+ font-weight: bold;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+}
+
+/* fix for sparkline tooltip under bootstrap */
+.jqstooltip {
+ width: auto !important;
+ height: auto !important;
+}
+
+.easyPieChart {
+ position: relative;
+ text-align: center;
+}
+
+.easyPieChart canvas {
+ position: absolute;
+ top: 0;
+ left: 0;
+}
+
+.easyPieChartLabel {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #BBB;
+ font-weight: normal;
+ text-shadow: #272b30 0px 0px 1px;
+ /* -webkit-font-smoothing: none; */
+}
+
+.easyPieChartTitle {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 64%;
+ margin-left: 18% !important;
+ text-align: center;
+ color: #676b70;
+ font-weight: bold;
+}
+
+.easyPieChartUnits {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 60%;
+ margin-left: 20% !important;
+ text-align: center;
+ color: #676b70;
+ font-weight: normal;
+}
+
+.gaugeChart {
+ position: relative;
+ text-align: center;
+}
+
+.gaugeChart canvas {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ right: 0;
+ z-index: 0;
+}
+
+.gaugeChartLabel {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #BBB;
+ font-weight: bold;
+ z-index: 1;
+ text-shadow: #272b30 0px 0px 1px;
+ /* text-shadow: #CCC 1px 1px 0px, #CCC -1px -1px 0px, #CCC 1px -1px 0px, #CCC -1px 1px 0px; */
+ /* -webkit-text-stroke: 1px #777; */
+ /* -webkit-font-smoothing: none; */
+}
+
+.gaugeChartTitle {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ width: 100%;
+ text-align: center;
+ color: #676b70;
+ font-weight: bold;
+}
+
+.gaugeChartUnits {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 0;
+ width: 100%;
+ text-align: left;
+ margin-left: 5%;
+ color: #676b70;
+ font-weight: normal;
+}
+
+.gaugeChartMin {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 8%;
+ width: 92%;
+ margin-left: 8%;
+ text-align: left;
+ color: #676b70;
+ font-weight: normal;
+}
+
+.gaugeChartMax {
+ display: inline-block;
+ position: absolute;
+ float: left;
+ left: 0;
+ bottom: 8%;
+ width: 95%;
+ margin-right: 5%;
+ text-align: right;
+ color: #676b70;
+ font-weight: normal;
+}
+
+.popover-title {
+ font-weight: bold;
+ font-size: 12px;
+}
+
+.popover-content {
+ font-size: 11px;
+}
+
+/* ----------------------------------------------------------------------------
+ perfect-scrollbar settings
+ */
+
+.ps-container {
+ -ms-touch-action: auto;
+ touch-action: auto;
+ overflow: hidden !important;
+ -ms-overflow-style: none;
+}
+
+@supports (-ms-overflow-style: none) {
+ .ps-container {
+ overflow: auto !important;
+ }
+}
+
+@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) {
+ .ps-container {
+ overflow: auto !important;
+ }
+}
+
+.ps-container.ps-active-x > .ps-scrollbar-x-rail,
+.ps-container.ps-active-y > .ps-scrollbar-y-rail {
+ display: block;
+ background-color: transparent;
+}
+
+.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail {
+ background-color: transparent; /* background color when dragged away */
+ opacity: 0.9;
+}
+
+.ps-container.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ background-color: #aaa; /* scrollbar color when dragged away */
+ height: 5px;
+}
+
+.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail {
+ background-color: transparent; /* background color when dragged away */
+ opacity: 0.9;
+}
+
+.ps-container.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ background-color: #aaa; /* scrollbar color when dragged away */
+ width: 5px;
+}
+
+.ps-container > .ps-scrollbar-x-rail {
+ display: none;
+ position: absolute;
+ /* please don't change 'position' */
+ opacity: 0.2; /* the opacity when not on hover of the content */
+ -webkit-transition: background-color .2s linear, opacity .2s linear;
+ -o-transition: background-color .2s linear, opacity .2s linear;
+ -moz-transition: background-color .2s linear, opacity .2s linear;
+ transition: background-color .2s linear, opacity .2s linear;
+ bottom: 0px;
+ /* there must be 'bottom' for ps-scrollbar-x-rail */
+ height: 15px;
+}
+
+.ps-container > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ position: absolute;
+ /* please don't change 'position' */
+ background-color: #666; /* #aaa; the color on content hover */
+ -webkit-border-radius: 6px;
+ -moz-border-radius: 6px;
+ border-radius: 6px;
+ -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ bottom: 2px;
+ /* there must be 'bottom' for ps-scrollbar-x */
+ height: 5px; /* the width of the scrollbar */
+}
+
+.ps-container > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x, .ps-container > .ps-scrollbar-x-rail:active > .ps-scrollbar-x {
+ height: 5px;
+}
+
+.ps-container > .ps-scrollbar-y-rail {
+ display: none;
+ position: absolute;
+ /* please don't change 'position' */
+ opacity: 0.2; /* the opacity when not on hover of the content */
+ -webkit-transition: background-color .2s linear, opacity .2s linear;
+ -o-transition: background-color .2s linear, opacity .2s linear;
+ -moz-transition: background-color .2s linear, opacity .2s linear;
+ transition: background-color .2s linear, opacity .2s linear;
+ right: 0;
+ /* there must be 'right' for ps-scrollbar-y-rail */
+ width: 15px;
+}
+
+.ps-container > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ position: absolute;
+ /* please don't change 'position' */
+ background-color: #666; /* #aaa; the color on content hover */
+ -webkit-border-radius: 6px;
+ -moz-border-radius: 6px;
+ border-radius: 6px;
+ -webkit-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;
+ -o-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ -moz-transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;
+ transition: background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;
+ right: 2px;
+ /* there must be 'right' for ps-scrollbar-y */
+ width: 5px; /* the width of the scrollbar */
+}
+
+.ps-container > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y, .ps-container > .ps-scrollbar-y-rail:active > .ps-scrollbar-y {
+ width: 5px;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail {
+ background-color: transparent; /* background color when dragged */
+ opacity: 0.9;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-x > .ps-scrollbar-x-rail > .ps-scrollbar-x {
+ background-color: #bbb; /* scrollbar color when dragged */
+ height: 5px;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail {
+ background-color: transparent; /* background color when dragged */
+ opacity: 0.9;
+}
+
+.ps-container:hover.ps-in-scrolling.ps-y > .ps-scrollbar-y-rail > .ps-scrollbar-y {
+ background-color: #bbb; /* scrollbar color when dragged */
+ width: 5px;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail,
+.ps-container:hover > .ps-scrollbar-y-rail {
+ opacity: 0.6;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail:hover {
+ background-color: transparent; /* the background color on hover of the scrollbar */
+ opacity: 0.9;
+}
+
+.ps-container:hover > .ps-scrollbar-x-rail:hover > .ps-scrollbar-x {
+ background-color: #999; /* scrollbar color on hover */
+}
+
+.ps-container:hover > .ps-scrollbar-y-rail:hover {
+ background-color: transparent; /* the background color on hover of the scrollbar */
+ opacity: 0.9;
+}
+
+.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y {
+ background-color: #999; /* scrollbar color on hover */
+}
+
+.dygraph__history-tip {
+ position: absolute;
+ top: 50%;
+ transform: translateY(-50%);
+ display: none; /* overriden in js */
+ margin-right: 25px;
+ direction: rtl;
+ overflow: hidden;
+ pointer-events: none;
+}
+
+.dygraph__history-tip-content {
+ display: inline-block;
+ white-space: nowrap;
+ direction: ltr;
+ pointer-events: auto;
+}
diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js
new file mode 100644
index 0000000..df15a63
--- /dev/null
+++ b/web/gui/dashboard_info.js
@@ -0,0 +1,3781 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// Codacy declarations
+/* global NETDATA */
+
+var netdataDashboard = window.netdataDashboard || {};
+
+// Informational content for the various sections of the GUI (menus, sections, charts, etc.)
+
+// ----------------------------------------------------------------------------
+// Menus
+
+netdataDashboard.menu = {
+ 'system': {
+ title: 'System Overview',
+ icon: '<i class="fas fa-bookmark"></i>',
+ info: 'Overview of the key system metrics.'
+ },
+
+ 'services': {
+ title: 'systemd Services',
+ icon: '<i class="fas fa-cogs"></i>',
+ info: 'Resources utilization of systemd services. netdata monitors all systemd services via CGROUPS ' +
+ '(the resources accounting used by containers). '
+ },
+
+ 'ap': {
+ title: 'Access Points',
+ icon: '<i class="fas fa-wifi"></i>',
+ info: 'Performance metrics for the access points (i.e. wireless interfaces in AP mode) found on the system.'
+ },
+
+ 'tc': {
+ title: 'Quality of Service',
+ icon: '<i class="fas fa-globe"></i>',
+ info: 'Netdata collects and visualizes <code>tc</code> class utilization using its ' +
+ '<a href="https://github.com/netdata/netdata/blob/master/collectors/tc.plugin/tc-qos-helper.sh.in" target="_blank">tc-helper plugin</a>. ' +
+ 'If you also use <a href="http://firehol.org/#fireqos" target="_blank">FireQOS</a> for setting up QoS, ' +
+ 'netdata automatically collects interface and class names. If your QoS configuration includes overheads ' +
+ 'calculation, the values shown here will include these overheads (the total bandwidth for the same ' +
+ 'interface as reported in the Network Interfaces section, will be lower than the total bandwidth ' +
+ 'reported here). QoS data collection may have a slight time difference compared to the interface ' +
+ '(QoS data collection uses a BASH script, so a shift in data collection of a few milliseconds ' +
+ 'should be justified).'
+ },
+
+ 'net': {
+ title: 'Network Interfaces',
+ icon: '<i class="fas fa-sitemap"></i>',
+ info: 'Performance metrics for network interfaces.'
+ },
+
+ 'wireless': {
+ title: 'Wireless Interfaces',
+ icon: '<i class="fas fa-wifi"></i>',
+ info: 'Performance metrics for wireless interfaces.'
+ },
+
+ 'ip': {
+ title: 'Networking Stack',
+ icon: '<i class="fas fa-cloud"></i>',
+ info: function (os) {
+ if (os === "linux")
+ return 'Metrics for the networking stack of the system. These metrics are collected from <code>/proc/net/netstat</code>, apply to both IPv4 and IPv6 traffic and are related to operation of the kernel networking stack.';
+ else
+ return 'Metrics for the networking stack of the system.';
+ }
+ },
+
+ 'ipv4': {
+ title: 'IPv4 Networking',
+ icon: '<i class="fas fa-cloud"></i>',
+ info: 'Metrics for the IPv4 stack of the system. ' +
+ '<a href="https://en.wikipedia.org/wiki/IPv4" target="_blank">Internet Protocol version 4 (IPv4)</a> is ' +
+ 'the fourth version of the Internet Protocol (IP). It is one of the core protocols of standards-based ' +
+ 'internetworking methods in the Internet. IPv4 is a connectionless protocol for use on packet-switched ' +
+ 'networks. It operates on a best effort delivery model, in that it does not guarantee delivery, nor does ' +
+ 'it assure proper sequencing or avoidance of duplicate delivery. These aspects, including data integrity, ' +
+ 'are addressed by an upper layer transport protocol, such as the Transmission Control Protocol (TCP).'
+ },
+
+ 'ipv6': {
+ title: 'IPv6 Networking',
+ icon: '<i class="fas fa-cloud"></i>',
+ info: 'Metrics for the IPv6 stack of the system. <a href="https://en.wikipedia.org/wiki/IPv6" target="_blank">Internet Protocol version 6 (IPv6)</a> is the most recent version of the Internet Protocol (IP), the communications protocol that provides an identification and location system for computers on networks and routes traffic across the Internet. IPv6 was developed by the Internet Engineering Task Force (IETF) to deal with the long-anticipated problem of IPv4 address exhaustion. IPv6 is intended to replace IPv4.'
+ },
+
+ 'sctp': {
+ title: 'SCTP Networking',
+ icon: '<i class="fas fa-cloud"></i>',
+ info: '<a href="https://en.wikipedia.org/wiki/Stream_Control_Transmission_Protocol" target="_blank">Stream Control Transmission Protocol (SCTP)</a> is a computer network protocol which operates at the transport layer and serves a role similar to the popular protocols TCP and UDP. SCTP provides some of the features of both UDP and TCP: it is message-oriented like UDP and ensures reliable, in-sequence transport of messages with congestion control like TCP. It differs from those protocols by providing multi-homing and redundant paths to increase resilience and reliability.'
+ },
+
+ 'ipvs': {
+ title: 'IP Virtual Server',
+ icon: '<i class="fas fa-eye"></i>',
+ info: '<a href="http://www.linuxvirtualserver.org/software/ipvs.html" target="_blank">IPVS (IP Virtual Server)</a> implements transport-layer load balancing inside the Linux kernel, so called Layer-4 switching. IPVS running on a host acts as a load balancer at the front of a cluster of real servers, it can direct requests for TCP/UDP based services to the real servers, and makes services of the real servers to appear as a virtual service on a single IP address.'
+ },
+
+ 'netfilter': {
+ title: 'Firewall (netfilter)',
+ icon: '<i class="fas fa-shield-alt"></i>',
+ info: 'Performance metrics of the netfilter components.'
+ },
+
+ 'ipfw': {
+ title: 'Firewall (ipfw)',
+ icon: '<i class="fas fa-shield-alt"></i>',
+ info: 'Counters and memory usage for the ipfw rules.'
+ },
+
+ 'cpu': {
+ title: 'CPUs',
+ icon: '<i class="fas fa-bolt"></i>',
+ info: 'Detailed information for each CPU of the system. A summary of the system for all CPUs can be found at the <a href="#menu_system">System Overview</a> section.'
+ },
+
+ 'mem': {
+ title: 'Memory',
+ icon: '<i class="fas fa-microchip"></i>',
+ info: 'Detailed information about the memory management of the system.'
+ },
+
+ 'disk': {
+ title: 'Disks',
+ icon: '<i class="fas fa-hdd"></i>',
+ info: 'Charts with performance information for all the system disks. Special care has been given to present disk performance metrics in a way compatible with <code>iostat -x</code>. netdata by default prevents rendering performance charts for individual partitions and unmounted virtual disks. Disabled charts can still be enabled by configuring the relative settings in the netdata configuration file.'
+ },
+
+ 'sensors': {
+ title: 'Sensors',
+ icon: '<i class="fas fa-leaf"></i>',
+ info: 'Readings of the configured system sensors.'
+ },
+
+ 'ipmi': {
+ title: 'IPMI',
+ icon: '<i class="fas fa-leaf"></i>',
+ info: 'The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system\'s CPU, firmware (BIOS or UEFI) and operating system.'
+ },
+
+ 'samba': {
+ title: 'Samba',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Performance metrics of the Samba file share operations of this system. Samba is a implementation of Windows services, including Windows SMB protocol file shares.'
+ },
+
+ 'nfsd': {
+ title: 'NFS Server',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Performance metrics of the Network File Server. NFS is a distributed file system protocol, allowing a user on a client computer to access files over a network, much like local storage is accessed. NFS, like many other protocols, builds on the Open Network Computing Remote Procedure Call (ONC RPC) system. The NFS is an open standard defined in Request for Comments (RFC).'
+ },
+
+ 'nfs': {
+ title: 'NFS Client',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Performance metrics of the NFS operations of this system, acting as an NFS client.'
+ },
+
+ 'zfs': {
+ title: 'ZFS filesystem',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arcstat/arcstat" target="_blank">arcstat.py</a> and <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arc_summary/arc_summary3" target="_blank">arc_summary.py</a>.'
+ },
+
+ 'btrfs': {
+ title: 'BTRFS filesystem',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Disk space metrics for the BTRFS filesystem.'
+ },
+
+ 'apps': {
+ title: 'Applications',
+ icon: '<i class="fas fa-heartbeat"></i>',
+ info: 'Per application statistics are collected using netdata\'s <code>apps.plugin</code>. This plugin walks through all processes and aggregates statistics for applications of interest, defined in <code>/etc/netdata/apps_groups.conf</code>, which can be edited by running <code>$ /etc/netdata/edit-config apps_groups.conf</code> (the default is <a href="https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/apps_groups.conf" target="_blank">here</a>). The plugin internally builds a process tree (much like <code>ps fax</code> does), and groups processes together (evaluating both child and parent processes) so that the result is always a chart with a predefined set of dimensions (of course, only application groups found running are reported). The reported values are compatible with <code>top</code>, although the netdata plugin counts also the resources of exited children (unlike <code>top</code> which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',
+ height: 1.5
+ },
+
+ 'users': {
+ title: 'Users',
+ icon: '<i class="fas fa-user"></i>',
+ info: 'Per user statistics are collected using netdata\'s <code>apps.plugin</code>. This plugin walks through all processes and aggregates statistics per user. The reported values are compatible with <code>top</code>, although the netdata plugin counts also the resources of exited children (unlike <code>top</code> which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',
+ height: 1.5
+ },
+
+ 'groups': {
+ title: 'User Groups',
+ icon: '<i class="fas fa-users"></i>',
+ info: 'Per user group statistics are collected using netdata\'s <code>apps.plugin</code>. This plugin walks through all processes and aggregates statistics per user group. The reported values are compatible with <code>top</code>, although the netdata plugin counts also the resources of exited children (unlike <code>top</code> which shows only the resources of the currently running processes). So for processes like shell scripts, the reported values include the resources used by the commands these scripts run within each timeframe.',
+ height: 1.5
+ },
+
+ 'netdata': {
+ title: 'Netdata Monitoring',
+ icon: '<i class="fas fa-chart-bar"></i>',
+ info: 'Performance metrics for the operation of netdata itself and its plugins.'
+ },
+
+ 'aclk_test': {
+ title: 'ACLK Test Generator',
+ info: 'For internal use to perform integration testing.'
+ },
+
+ 'example': {
+ title: 'Example Charts',
+ info: 'Example charts, demonstrating the external plugin architecture.'
+ },
+
+ 'cgroup': {
+ title: '',
+ icon: '<i class="fas fa-th"></i>',
+ info: 'Container resource utilization metrics. Netdata reads this information from <b>cgroups</b> (abbreviated from <b>control groups</b>), a Linux kernel feature that limits and accounts resource usage (CPU, memory, disk I/O, network, etc.) of a collection of processes. <b>cgroups</b> together with <b>namespaces</b> (that offer isolation between processes) provide what we usually call: <b>containers</b>.'
+ },
+
+ 'cgqemu': {
+ title: '',
+ icon: '<i class="fas fa-th-large"></i>',
+ info: 'QEMU virtual machine resource utilization metrics. QEMU (short for Quick Emulator) is a free and open-source hosted hypervisor that performs hardware virtualization.'
+ },
+
+ 'fping': {
+ title: 'fping',
+ icon: '<i class="fas fa-exchange-alt"></i>',
+ info: 'Network latency statistics, via <b>fping</b>. <b>fping</b> is a program to send ICMP echo probes to network hosts, similar to <code>ping</code>, but much better performing when pinging multiple hosts. fping versions after 3.15 can be directly used as netdata plugins.'
+ },
+
+ 'gearman': {
+ title: 'Gearman',
+ icon: '<i class="fas fa-tasks"></i>',
+ info: 'Gearman is a job server that allows you to do work in parallel, to load balance processing, and to call functions between languages.'
+ },
+
+ 'ioping': {
+ title: 'ioping',
+ icon: '<i class="fas fa-exchange-alt"></i>',
+ info: 'Disk latency statistics, via <b>ioping</b>. <b>ioping</b> is a program to read/write data probes from/to a disk.'
+ },
+
+ 'httpcheck': {
+ title: 'Http Check',
+ icon: '<i class="fas fa-heartbeat"></i>',
+ info: 'Web Service availability and latency monitoring using HTTP checks. This plugin is a specialized version of the port check plugin.'
+ },
+
+ 'memcached': {
+ title: 'memcached',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>memcached</b>. Memcached is a general-purpose distributed memory caching system. It is often used to speed up dynamic database-driven websites by caching data and objects in RAM to reduce the number of times an external data source (such as a database or API) must be read.'
+ },
+
+ 'monit': {
+ title: 'monit',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Statuses of checks in <b>monit</b>. Monit is a utility for managing and monitoring processes, programs, files, directories and filesystems on a Unix system. Monit conducts automatic maintenance and repair and can execute meaningful causal actions in error situations.'
+ },
+
+ 'mysql': {
+ title: 'MySQL',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>mysql</b>, the open-source relational database management system (RDBMS).'
+ },
+
+ 'postgres': {
+ title: 'Postgres',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>PostgresSQL</b>, the object-relational database (ORDBMS).'
+ },
+
+ 'redis': {
+ title: 'Redis',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>redis</b>. Redis (REmote DIctionary Server) is a software project that implements data structure servers. It is open-source, networked, in-memory, and stores keys with optional durability.'
+ },
+
+ 'rethinkdbs': {
+ title: 'RethinkDB',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>rethinkdb</b>. RethinkDB is the first open-source scalable database built for realtime applications'
+ },
+
+ 'retroshare': {
+ title: 'RetroShare',
+ icon: '<i class="fas fa-share-alt"></i>',
+ info: 'Performance metrics for <b>RetroShare</b>. RetroShare is open source software for encrypted filesharing, serverless email, instant messaging, online chat, and BBS, based on a friend-to-friend network built on GNU Privacy Guard (GPG).'
+ },
+
+ 'riakkv': {
+ title: 'Riak KV',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Metrics for <b>Riak KV</b>, the distributed key-value store.'
+ },
+
+ 'ipfs': {
+ title: 'IPFS',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Performance metrics for the InterPlanetary File System (IPFS), a content-addressable, peer-to-peer hypermedia distribution protocol.'
+ },
+
+ 'phpfpm': {
+ title: 'PHP-FPM',
+ icon: '<i class="fas fa-eye"></i>',
+ info: 'Performance metrics for <b>PHP-FPM</b>, an alternative FastCGI implementation for PHP.'
+ },
+
+ 'pihole': {
+ title: 'Pi-hole',
+ icon: '<i class="fas fa-ban"></i>',
+ info: 'Metrics for <a href="https://pi-hole.net/" target="_blank">Pi-hole</a>, a black hole for Internet advertisements.' +
+ ' The metrics returned by Pi-Hole API is all from the last 24 hours.'
+ },
+
+ 'portcheck': {
+ title: 'Port Check',
+ icon: '<i class="fas fa-heartbeat"></i>',
+ info: 'Service availability and latency monitoring using port checks.'
+ },
+
+ 'postfix': {
+ title: 'postfix',
+ icon: '<i class="fas fa-envelope"></i>',
+ info: undefined
+ },
+
+ 'dovecot': {
+ title: 'Dovecot',
+ icon: '<i class="fas fa-envelope"></i>',
+ info: undefined
+ },
+
+ 'hddtemp': {
+ title: 'HDD Temp',
+ icon: '<i class="fas fa-thermometer-half"></i>',
+ info: undefined
+ },
+
+ 'nginx': {
+ title: 'nginx',
+ icon: '<i class="fas fa-eye"></i>',
+ info: undefined
+ },
+
+ 'apache': {
+ title: 'Apache',
+ icon: '<i class="fas fa-eye"></i>',
+ info: undefined
+ },
+
+ 'lighttpd': {
+ title: 'Lighttpd',
+ icon: '<i class="fas fa-eye"></i>',
+ info: undefined
+ },
+
+ 'web_log': {
+ title: undefined,
+ icon: '<i class="fas fa-file-alt"></i>',
+ info: 'Information extracted from a server log file. <code>web_log</code> plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for <code>nginx</code> and <code>apache</code>) offering timing information and bandwidth for both requests and responses. <code>web_log</code> plugin may also be configured to provide a break down of requests per URL pattern (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ },
+
+ 'named': {
+ title: 'named',
+ icon: '<i class="fas fa-tag"></i>',
+ info: undefined
+ },
+
+ 'squid': {
+ title: 'squid',
+ icon: '<i class="fas fa-exchange-alt"></i>',
+ info: undefined
+ },
+
+ 'nut': {
+ title: 'UPS',
+ icon: '<i class="fas fa-battery-half"></i>',
+ info: undefined
+ },
+
+ 'apcupsd': {
+ title: 'UPS',
+ icon: '<i class="fas fa-battery-half"></i>',
+ info: undefined
+ },
+
+ 'smawebbox': {
+ title: 'Solar Power',
+ icon: '<i class="fas fa-sun"></i>',
+ info: undefined
+ },
+
+ 'fronius': {
+ title: 'Fronius',
+ icon: '<i class="fas fa-sun"></i>',
+ info: undefined
+ },
+
+ 'stiebeleltron': {
+ title: 'Stiebel Eltron',
+ icon: '<i class="fas fa-thermometer-half"></i>',
+ info: undefined
+ },
+
+ 'snmp': {
+ title: 'SNMP',
+ icon: '<i class="fas fa-random"></i>',
+ info: undefined
+ },
+
+ 'go_expvar': {
+ title: 'Go - expvars',
+ icon: '<i class="fas fa-eye"></i>',
+ info: 'Statistics about running Go applications exposed by the <a href="https://golang.org/pkg/expvar/" target="_blank">expvar package</a>.'
+ },
+
+ 'chrony': {
+ icon: '<i class="fas fa-clock"></i>',
+ info: 'chronyd parameters about the system’s clock performance.'
+ },
+
+ 'couchdb': {
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b><a href="https://couchdb.apache.org/">CouchDB</a></b>, the open-source, JSON document-based database with an HTTP API and multi-master replication.'
+ },
+
+ 'beanstalk': {
+ title: 'Beanstalkd',
+ icon: '<i class="fas fa-tasks"></i>',
+ info: 'Provides statistics on the <b><a href="http://kr.github.io/beanstalkd/">beanstalkd</a></b> server and any tubes available on that server using data pulled from beanstalkc'
+ },
+
+ 'rabbitmq': {
+ title: 'RabbitMQ',
+ icon: '<i class="fas fa-comments"></i>',
+ info: 'Performance data for the <b><a href="https://www.rabbitmq.com/">RabbitMQ</a></b> open-source message broker.'
+ },
+
+ 'ceph': {
+ title: 'Ceph',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Provides statistics on the <b><a href="http://ceph.com/">ceph</a></b> cluster server, the open-source distributed storage system.'
+ },
+
+ 'ntpd': {
+ title: 'ntpd',
+ icon: '<i class="fas fa-clock"></i>',
+ info: 'Provides statistics for the internal variables of the Network Time Protocol daemon <b><a href="http://www.ntp.org/">ntpd</a></b> and optional including the configured peers (if enabled in the module configuration). The module presents the performance metrics as shown by <b><a href="http://doc.ntp.org/current-stable/ntpq.html">ntpq</a></b> (the standard NTP query program) using NTP mode 6 UDP packets to communicate with the NTP server.'
+ },
+
+ 'spigotmc': {
+ title: 'Spigot MC',
+ icon: '<i class="fas fa-eye"></i>',
+ info: 'Provides basic performance statistics for the <b><a href="https://www.spigotmc.org/">Spigot Minecraft</a></b> server.'
+ },
+
+ 'unbound': {
+ title: 'Unbound',
+ icon: '<i class="fas fa-tag"></i>',
+ info: undefined
+ },
+
+ 'boinc': {
+ title: 'BOINC',
+ icon: '<i class="fas fa-microchip"></i>',
+ info: 'Provides task counts for <b><a href="http://boinc.berkeley.edu/">BOINC</a></b> distributed computing clients.'
+ },
+
+ 'w1sensor': {
+ title: '1-Wire Sensors',
+ icon: '<i class="fas fa-thermometer-half"></i>',
+ info: 'Data derived from <a href="https://en.wikipedia.org/wiki/1-Wire">1-Wire</a> sensors. Currently temperature sensors are automatically detected.'
+ },
+
+ 'logind': {
+ title: 'Logind',
+ icon: '<i class="fas fa-user"></i>',
+ info: undefined
+ },
+
+ 'powersupply': {
+ title: 'Power Supply',
+ icon: '<i class="fas fa-battery-half"></i>',
+ info: 'Statistics for the various system power supplies. Data collected from <a href="https://www.kernel.org/doc/Documentation/power/power_supply_class.txt">Linux power supply class</a>.'
+ },
+
+ 'xenstat': {
+ title: 'Xen Node',
+ icon: '<i class="fas fa-server"></i>',
+ info: 'General statistics for the Xen node. Data collected using <b>xenstat</b> library</a>.'
+ },
+
+ 'xendomain': {
+ title: '',
+ icon: '<i class="fas fa-th-large"></i>',
+ info: 'Xen domain resource utilization metrics. Netdata reads this information using <b>xenstat</b> library which gives access to the resource usage information (CPU, memory, disk I/O, network) for a virtual machine.'
+ },
+
+ 'wmi': {
+ title: 'wmi',
+ icon: '<i class="fas fa-server"></i>',
+ info: undefined
+ },
+
+ 'perf': {
+ title: 'Perf Counters',
+ icon: '<i class="fas fa-tachometer-alt"></i>',
+ info: 'Performance Monitoring Counters (PMC). Data collected using <b>perf_event_open()</b> system call which utilises Hardware Performance Monitoring Units (PMU).'
+ },
+
+ 'vsphere': {
+ title: 'vSphere',
+ icon: '<i class="fas fa-server"></i>',
+ info: 'Performance statistics for ESXI hosts and virtual machines. Data collected from <a href="https://www.vmware.com/products/vcenter-server.html">VMware vCenter Server</a> using <code><a href="https://github.com/vmware/govmomi"> govmomi</a></code> library.'
+ },
+
+ 'vcsa': {
+ title: 'VCSA',
+ icon: '<i class="fas fa-server"></i>',
+ info: 'vCenter Server Appliance health statistics. Data collected from <a href="https://vmware.github.io/vsphere-automation-sdk-rest/vsphere/index.html#SVC_com.vmware.appliance.health">Health API</a>.'
+ },
+
+ 'zookeeper': {
+ title: 'Zookeeper',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Provides health statistics for <b><a href="https://zookeeper.apache.org/">Zookeeper</a></b> server. Data collected through the command port using <code><a href="https://zookeeper.apache.org/doc/r3.5.5/zookeeperAdmin.html#sc_zkCommands">mntr</a></code> command.'
+ },
+
+ 'hdfs': {
+ title: 'HDFS',
+ icon: '<i class="fas fa-folder-open"></i>',
+ info: 'Provides <b><a href="https://hadoop.apache.org/docs/r3.2.0/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html">Hadoop Distributed File System</a></b> performance statistics. Module collects metrics over <code>Java Management Extensions</code> through the web interface of an <code>HDFS</code> daemon.'
+ },
+
+ 'am2320': {
+ title: 'AM2320 Sensor',
+ icon: '<i class="fas fa-thermometer-half"></i>',
+ info: 'Readings from the external AM2320 Sensor.'
+ },
+
+ 'scaleio': {
+ title: 'ScaleIO',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance and health statistics for various ScaleIO components. Data collected via VxFlex OS Gateway REST API.'
+ },
+
+ 'squidlog': {
+ title: 'Squid log',
+ icon: '<i class="fas fa-file-alt"></i>',
+ info: undefined
+ },
+
+ 'cockroachdb': {
+ title: 'CockroachDB',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance and health statistics for various <code>CockroachDB</code> components.'
+ },
+
+ 'ebpf': {
+ title: 'eBPF',
+ icon: '<i class="fas fa-heartbeat"></i>',
+ info: 'Monitor system calls, internal functions, bytes read, bytes written and errors using <code>eBPF</code>.'
+ },
+
+ 'vernemq': {
+ title: 'VerneMQ',
+ icon: '<i class="fas fa-comments"></i>',
+ info: 'Performance data for the <b><a href="https://vernemq.com/">VerneMQ</a></b> open-source MQTT broker.'
+ },
+
+ 'pulsar': {
+ title: 'Pulsar',
+ icon: '<i class="fas fa-comments"></i>',
+ info: 'Summary, namespaces and topics performance data for the <b><a href="http://pulsar.apache.org/">Apache Pulsar</a></b> pub-sub messaging system.'
+ },
+
+ 'anomalies': {
+ title: 'Anomalies',
+ icon: '<i class="fas fa-flask"></i>',
+ info: 'Anomaly scores relating to key system metrics. A high anomaly probability indicates strange behaviour and may trigger an anomaly prediction from the trained models. Read the <a href="https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/anomalies" target="_blank">anomalies collector docs</a> for more details.'
+ },
+
+ 'alarms': {
+ title: 'Alarms',
+ icon: '<i class="fas fa-bell"></i>',
+ info: 'Charts showing alarm status over time. More details <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/README.md" target="_blank">here</a>.'
+ },
+ 'statsd': {
+ title: 'StatsD',
+ icon: '<i class="fas fa-chart-line"></i>',
+ info:'StatsD is an industry-standard technology stack for monitoring applications and instrumenting any piece of software to deliver custom metrics. Netdata allows the user to organize the metrics in different charts and visualize any application metric easily. Read more on <a href="https://learn.netdata.cloud/docs/agent/collectors/statsd.plugin">Netdata Learn</a>.'
+ }
+};
+
+
+// ----------------------------------------------------------------------------
+// submenus
+
+// information to be shown, just below each submenu
+
+// information about the submenus
+netdataDashboard.submenu = {
+ 'web_log.squid_bandwidth': {
+ title: 'bandwidth',
+ info: 'Bandwidth of responses (<code>sent</code>) by squid. This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. <a href="http://firehol.org/#fireqos" target="_blank">FireQOS</a>) for accurate accounting of the server bandwidth.'
+ },
+
+ 'web_log.squid_responses': {
+ title: 'responses',
+ info: 'Information related to the responses sent by squid.'
+ },
+
+ 'web_log.squid_requests': {
+ title: 'requests',
+ info: 'Information related to the requests squid has received.'
+ },
+
+ 'web_log.squid_hierarchy': {
+ title: 'hierarchy',
+ info: 'Performance metrics for the squid hierarchy used to serve the requests.'
+ },
+
+ 'web_log.squid_squid_transport': {
+ title: 'transport'
+ },
+
+ 'web_log.squid_squid_cache': {
+ title: 'cache',
+ info: 'Performance metrics for the performance of the squid cache.'
+ },
+
+ 'web_log.squid_timings': {
+ title: 'timings',
+ info: 'Duration of squid requests. Unrealistic spikes may be reported, since squid logs the total time of the requests, when they complete. Especially for HTTPS, the clients get a tunnel from the proxy and exchange requests directly with the upstream servers, so squid cannot evaluate the individual requests and reports the total time the tunnel was open.'
+ },
+
+ 'web_log.squid_clients': {
+ title: 'clients'
+ },
+
+ 'web_log.bandwidth': {
+ info: 'Bandwidth of requests (<code>received</code>) and responses (<code>sent</code>). <code>received</code> requires an extended log format (without it, the web server log does not have this information). This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the web server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. <a href="http://firehol.org/#fireqos" target="_blank">FireQOS</a>) for accurate accounting of the web server bandwidth.'
+ },
+
+ 'web_log.urls': {
+ info: 'Number of requests for each <code>URL pattern</code> defined in <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).'
+ },
+
+ 'web_log.clients': {
+ info: 'Charts showing the number of unique client IPs, accessing the web server.'
+ },
+
+ 'web_log.timings': {
+ info: 'Web server response timings - the time the web server needed to prepare and respond to requests. This requires an extended log format and its meaning is web server specific. For most web servers this accounts the time from the reception of a complete request, to the dispatch of the last byte of the response. So, it includes the network delays of responses, but it does not include the network delays of requests.'
+ },
+
+ 'mem.ksm': {
+ title: 'deduper (ksm)',
+ info: 'Kernel Same-page Merging (KSM) performance monitoring, read from several files in <code>/sys/kernel/mm/ksm/</code>. KSM is a memory-saving de-duplication feature in the Linux kernel (since version 2.6.32). The KSM daemon ksmd periodically scans those areas of user memory which have been registered with it, looking for pages of identical content which can be replaced by a single write-protected page (which is automatically copied if a process later wants to update its content). KSM was originally developed for use with KVM (where it was known as Kernel Shared Memory), to fit more virtual machines into physical memory, by sharing the data common between them. But it can be useful to any application which generates many instances of the same data.'
+ },
+
+ 'mem.hugepages': {
+ info: 'Hugepages is a feature that allows the kernel to utilize the multiple page size capabilities of modern hardware architectures. The kernel creates multiple pages of virtual memory, mapped from both physical RAM and swap. There is a mechanism in the CPU architecture called "Translation Lookaside Buffers" (TLB) to manage the mapping of virtual memory pages to actual physical memory addresses. The TLB is a limited hardware resource, so utilizing a large amount of physical memory with the default page size consumes the TLB and adds processing overhead. By utilizing Huge Pages, the kernel is able to create pages of much larger sizes, each page consuming a single resource in the TLB. Huge Pages are pinned to physical RAM and cannot be swapped/paged out.'
+ },
+
+ 'mem.numa': {
+ info: 'Non-Uniform Memory Access (NUMA) is a hierarchical memory design the memory access time is dependent on locality. Under NUMA, a processor can access its own local memory faster than non-local memory (memory local to another processor or memory shared between processors). The individual metrics are described in the <a href="https://www.kernel.org/doc/Documentation/numastat.txt" target="_blank">Linux kernel documentation</a>.'
+ },
+
+ 'ip.ecn': {
+ info: '<a href="https://en.wikipedia.org/wiki/Explicit_Congestion_Notification" target="_blank">Explicit Congestion Notification (ECN)</a> is a TCP extension that allows end-to-end notification of network congestion without dropping packets. ECN is an optional feature that may be used between two ECN-enabled endpoints when the underlying network infrastructure also supports it.'
+ },
+
+ 'netfilter.conntrack': {
+ title: 'connection tracker',
+ info: 'Netfilter Connection Tracker performance metrics. The connection tracker keeps track of all connections of the machine, inbound and outbound. It works by keeping a database with all open connections, tracking network and address translation and connection expectations.'
+ },
+
+ 'netfilter.nfacct': {
+ title: 'bandwidth accounting',
+ info: 'The following information is read using the <code>nfacct.plugin</code>.'
+ },
+
+ 'netfilter.synproxy': {
+ title: 'DDoS protection',
+ info: 'DDoS protection performance metrics. <a href="https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY" target="_blank">SYNPROXY</a> is a TCP SYN packets proxy. It is used to protect any TCP server (like a web server) from SYN floods and similar DDoS attacks. It is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. It can be used for any kind of TCP traffic (even encrypted), since it does not interfere with the content itself.'
+ },
+
+ 'ipfw.dynamic_rules': {
+ title: 'dynamic rules',
+ info: 'Number of dynamic rules, created by correspondent stateful firewall rules.'
+ },
+
+ 'system.softnet_stat': {
+ title: 'softnet',
+ info: function (os) {
+ if (os === 'linux')
+ return 'Statistics for CPUs SoftIRQs related to network receive work. Break down per CPU core can be found at <a href="#menu_cpu_submenu_softnet_stat">CPU / softnet statistics</a>. <b>processed</b> states the number of packets processed, <b>dropped</b> is the number packets dropped because the network device backlog was full (to fix them on Linux use <code>sysctl</code> to increase <code>net.core.netdev_max_backlog</code>), <b>squeezed</b> is the number of packets dropped because the network device budget ran out (to fix them on Linux use <code>sysctl</code> to increase <code>net.core.netdev_budget</code> and/or <code>net.core.netdev_budget_usecs</code>). More information about identifying and troubleshooting network driver related issues can be found at <a href="https://access.redhat.com/sites/default/files/attachments/20150325_network_performance_tuning.pdf" target="_blank">Red Hat Enterprise Linux Network Performance Tuning Guide</a>.';
+ else
+ return 'Statistics for CPUs SoftIRQs related to network receive work.';
+ }
+ },
+
+ 'cpu.softnet_stat': {
+ title: 'softnet',
+ info: function (os) {
+ if (os === 'linux')
+ return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at <a href="#menu_system_submenu_softnet_stat">System / softnet statistics</a>. <b>processed</b> states the number of packets processed, <b>dropped</b> is the number packets dropped because the network device backlog was full (to fix them on Linux use <code>sysctl</code> to increase <code>net.core.netdev_max_backlog</code>), <b>squeezed</b> is the number of packets dropped because the network device budget ran out (to fix them on Linux use <code>sysctl</code> to increase <code>net.core.netdev_budget</code> and/or <code>net.core.netdev_budget_usecs</code>). More information about identifying and troubleshooting network driver related issues can be found at <a href="https://access.redhat.com/sites/default/files/attachments/20150325_network_performance_tuning.pdf" target="_blank">Red Hat Enterprise Linux Network Performance Tuning Guide</a>.';
+ else
+ return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at <a href="#menu_system_submenu_softnet_stat">System / softnet statistics</a>.';
+ }
+ },
+
+ 'go_expvar.memstats': {
+ title: 'memory statistics',
+ info: 'Go runtime memory statistics. See <a href="https://golang.org/pkg/runtime/#MemStats" target="_blank">runtime.MemStats</a> documentation for more info about each chart and the values.'
+ },
+
+ 'couchdb.dbactivity': {
+ title: 'db activity',
+ info: 'Overall database reads and writes for the entire server. This includes any external HTTP traffic, as well as internal replication traffic performed in a cluster to ensure node consistency.'
+ },
+
+ 'couchdb.httptraffic': {
+ title: 'http traffic breakdown',
+ info: 'All HTTP traffic, broken down by type of request (<tt>GET</tt>, <tt>PUT</tt>, <tt>POST</tt>, etc.) and response status code (<tt>200</tt>, <tt>201</tt>, <tt>4xx</tt>, etc.)<br/><br/>Any <tt>5xx</tt> errors here indicate a likely CouchDB bug; check the logfile for further information.'
+ },
+
+ 'couchdb.ops': {
+ title: 'server operations'
+ },
+
+ 'couchdb.perdbstats': {
+ title: 'per db statistics',
+ info: 'Statistics per database. This includes <a href="http://docs.couchdb.org/en/latest/api/database/common.html#get--db">3 size graphs per database</a>: active (the size of live data in the database), external (the uncompressed size of the database contents), and file (the size of the file on disk, exclusive of any views and indexes). It also includes the number of documents and number of deleted documents per database.'
+ },
+
+ 'couchdb.erlang': {
+ title: 'erlang statistics',
+ info: 'Detailed information about the status of the Erlang VM that hosts CouchDB. These are intended for advanced users only. High values of the peak message queue (>10e6) generally indicate an overload condition.'
+ },
+
+ 'ntpd.system': {
+ title: 'system',
+ info: 'Statistics of the system variables as shown by the readlist billboard <code>ntpq -c rl</code>. System variables are assigned an association ID of zero and can also be shown in the readvar billboard <code>ntpq -c "rv 0"</code>. These variables are used in the <a href="http://doc.ntp.org/current-stable/discipline.html">Clock Discipline Algorithm</a>, to calculate the lowest and most stable offset.'
+ },
+
+ 'ntpd.peers': {
+ title: 'peers',
+ info: 'Statistics of the peer variables for each peer configured in <code>/etc/ntp.conf</code> as shown by the readvar billboard <code>ntpq -c "rv &lt;association&gt;"</code>, while each peer is assigned a nonzero association ID as shown by <code>ntpq -c "apeers"</code>. The module periodically scans for new/changed peers (default: every 60s). <b>ntpd</b> selects the best possible peer from the available peers to synchronize the clock. A minimum of at least 3 peers is required to properly identify the best possible peer.'
+ }
+};
+
+
+// ----------------------------------------------------------------------------
+// chart
+
+// information works on the context of a chart
+// Its purpose is to set:
+//
+// info: the text above the charts
+// heads: the representation of the chart at the top the subsection (second level menu)
+// mainheads: the representation of the chart at the top of the section (first level menu)
+// colors: the dimension colors of the chart (the default colors are appended)
+// height: the ratio of the chart height relative to the default
+//
+
+var cgroupCPULimitIsSet = 0;
+var cgroupMemLimitIsSet = 0;
+
+netdataDashboard.context = {
+ 'system.cpu': {
+ info: function (os) {
+ void (os);
+ return 'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the <a href="#menu_cpu">CPUs</a> section and per application usage at the <a href="#menu_apps">Applications Monitoring</a> section.'
+ + netdataDashboard.sparkline('<br/>Keep an eye on <b>iowait</b> ', 'system.cpu', 'iowait', '%', '. If it is constantly high, your disks are a bottleneck and they slow your system down.')
+ + netdataDashboard.sparkline('<br/>An important metric worth monitoring, is <b>softirq</b> ', 'system.cpu', 'softirq', '%', '. A constantly high percentage of softirq may indicate network driver issues.');
+ },
+ valueRange: "[0, 100]"
+ },
+
+ 'system.load': {
+ info: 'Current system load, i.e. the number of processes using CPU or waiting for system resources (usually CPU and disk). The 3 metrics refer to 1, 5 and 15 minute averages. The system calculates this once every 5 seconds. For more information check <a href="https://en.wikipedia.org/wiki/Load_(computing)" target="_blank">this wikipedia article</a>.',
+ height: 0.7
+ },
+
+ 'system.cpu_pressure': {
+ info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html">Pressure Stall Information</a> ' +
+ 'identifies and quantifies the disruptions caused by resource contentions. ' +
+ 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on CPU. ' +
+ 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+
+ 'system.memory_some_pressure': {
+ info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html">Pressure Stall Information</a> ' +
+ 'identifies and quantifies the disruptions caused by resource contentions. ' +
+ 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on memory. ' +
+ 'The "full" line indicates the share of time in which <b>all non-idle</b> tasks are stalled on memory simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. ' +
+ 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+
+ 'system.io_some_pressure': {
+ info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html">Pressure Stall Information</a> ' +
+ 'identifies and quantifies the disruptions caused by resource contentions. ' +
+ 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on I/O. ' +
+ 'The "full" line indicates the share of time in which <b>all non-idle</b> tasks are stalled on I/O simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. ' +
+ 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+
+ 'system.io': {
+ info: function (os) {
+ var s = 'Total Disk I/O, for all physical disks. You can get detailed information about each disk at the <a href="#menu_disk">Disks</a> section and per application Disk usage at the <a href="#menu_apps">Applications Monitoring</a> section.';
+
+ if (os === 'linux')
+ return s + ' Physical are all the disks that are listed in <code>/sys/block</code>, but do not exist in <code>/sys/devices/virtual/block</code>.';
+ else
+ return s;
+ }
+ },
+
+ 'system.pgpgio': {
+ info: 'Memory paged from/to disk. This is usually the total disk I/O of the system.'
+ },
+
+ 'system.swapio': {
+ info: 'Total Swap I/O. (netdata measures both <code>in</code> and <code>out</code>. If either of the metrics <code>in</code> or <code>out</code> is not shown in the chart, the reason is that the metric is zero. - you can change the page settings to always render all the available dimensions on all charts).'
+ },
+
+ 'system.pgfaults': {
+ info: 'Total page faults. <b>Major page faults</b> indicates that the system is using its swap. You can find which applications use the swap at the <a href="#menu_apps">Applications Monitoring</a> section.'
+ },
+
+ 'system.entropy': {
+ colors: '#CC22AA',
+ info: '<a href="https://en.wikipedia.org/wiki/Entropy_(computing)" target="_blank">Entropy</a>, is a pool of random numbers (<a href="https://en.wikipedia.org/wiki//dev/random" target="_blank">/dev/random</a>) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like <code>haveged</code>, although these are generally useful only in servers.'
+ },
+
+ 'system.forks': {
+ colors: '#5555DD',
+ info: 'Number of new processes created.'
+ },
+
+ 'system.intr': {
+ colors: '#DD5555',
+ info: 'Total number of CPU interrupts. Check <code>system.interrupts</code> that gives more detail about each interrupt and also the <a href="#menu_cpu">CPUs</a> section where interrupts are analyzed per CPU core.'
+ },
+
+ 'system.interrupts': {
+ info: 'CPU interrupts in detail. At the <a href="#menu_cpu">CPUs</a> section, interrupts are analyzed per CPU core.'
+ },
+
+ 'system.softirqs': {
+ info: 'CPU softirqs in detail. At the <a href="#menu_cpu">CPUs</a> section, softirqs are analyzed per CPU core.'
+ },
+
+ 'system.processes': {
+ info: 'System processes. <b>Running</b> are the processes in the CPU. <b>Blocked</b> are processes that are willing to enter the CPU, but they cannot, e.g. because they wait for disk activity.'
+ },
+
+ 'system.active_processes': {
+ info: 'All system processes.'
+ },
+
+ 'system.ctxt': {
+ info: '<a href="https://en.wikipedia.org/wiki/Context_switch" target="_blank">Context Switches</a>, is the switching of the CPU from one process, task or thread to another. If there are many processes or threads willing to execute and very few CPU cores available to handle them, the system is making more context switching to balance the CPU resources among them. The whole process is computationally intensive. The more the context switches, the slower the system gets.'
+ },
+
+ 'system.idlejitter': {
+ info: 'Idle jitter is calculated by netdata. A thread is spawned that requests to sleep for a few microseconds. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the <b>idle jitter</b>. This number is useful in real-time environments, where CPU jitter can affect the quality of the service (like VoIP media gateways).'
+ },
+
+ 'system.net': {
+ info: function (os) {
+ var s = 'Total bandwidth of all physical network interfaces. This does not include <code>lo</code>, VPNs, network bridges, IFB devices, bond interfaces, etc. Only the bandwidth of physical network interfaces is aggregated.';
+
+ if (os === 'linux')
+ return s + ' Physical are all the network interfaces that are listed in <code>/proc/net/dev</code>, but do not exist in <code>/sys/devices/virtual/net</code>.';
+ else
+ return s;
+ }
+ },
+
+ 'system.ip': {
+ info: 'Total IP traffic in the system.'
+ },
+
+ 'system.ipv4': {
+ info: 'Total IPv4 Traffic.'
+ },
+
+ 'system.ipv6': {
+ info: 'Total IPv6 Traffic.'
+ },
+
+ 'system.ram': {
+ info: 'System Random Access Memory (i.e. physical memory) usage.'
+ },
+
+ 'system.swap': {
+ info: 'System swap memory usage. Swap space is used when the amount of physical memory (RAM) is full. When the system needs more memory resources and the RAM is full, inactive pages in memory are moved to the swap space (usually a disk, a disk partition or a file).'
+ },
+
+ // ------------------------------------------------------------------------
+ // CPU charts
+
+ 'cpu.cpu': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]"
+ },
+
+ 'cpu.interrupts': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ 'cpu.softirqs': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ 'cpu.softnet_stat': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ // ------------------------------------------------------------------------
+ // MEMORY
+
+ 'mem.ksm_savings': {
+ heads: [
+ netdataDashboard.gaugeChart('Saved', '12%', 'savings', '#0099CC')
+ ]
+ },
+
+ 'mem.ksm_ratios': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Savings"'
+ + ' data-units="percentage %"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'mem.zram_usage': {
+ info: 'ZRAM total RAM usage metrics. ZRAM uses some memory to store metadata about stored memory pages, thus introducing an overhead which is proportional to disk size. It excludes same-element-filled-pages since no memory is allocated for them.'
+ },
+
+ 'mem.zram_savings': {
+ info: 'Displays original and compressed memory data sizes.'
+ },
+
+ 'mem.zram_ratio': {
+ heads: [
+ netdataDashboard.gaugeChart('Compression Ratio', '12%', 'ratio', '#0099CC')
+ ],
+ info: 'Compression ratio, calculated as <code>100 * original_size / compressed_size</code>. More means better compression and more RAM savings.'
+ },
+
+ 'mem.zram_efficiency': {
+ heads: [
+ netdataDashboard.gaugeChart('Efficiency', '12%', 'percent', NETDATA.colors[0])
+ ],
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]",
+ info: 'Memory usage efficiency, calculated as <code>100 * compressed_size / total_mem_used</code>.'
+ },
+
+
+ 'mem.pgfaults': {
+ info: 'A <a href="https://en.wikipedia.org/wiki/Page_fault" target="_blank">page fault</a> is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory. If the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory, then it is called a <b>minor</b> or soft page fault. A <b>major</b> page fault is generated when the system needs to load the memory page from disk or swap memory.'
+ },
+
+ 'mem.committed': {
+ colors: NETDATA.colors[3],
+ info: 'Committed Memory, is the sum of all memory which has been allocated by processes.'
+ },
+
+ 'mem.available': {
+ info: 'Available Memory is estimated by the kernel, as the amount of RAM that can be used by userspace processes, without causing swapping.'
+ },
+
+ 'mem.writeback': {
+ info: '<b>Dirty</b> is the amount of memory waiting to be written to disk. <b>Writeback</b> is how much memory is actively being written to disk.'
+ },
+
+ 'mem.kernel': {
+ info: 'The total amount of memory being used by the kernel. <b>Slab</b> is the amount of memory used by the kernel to cache data structures for its own use. <b>KernelStack</b> is the amount of memory allocated for each task done by the kernel. <b>PageTables</b> is the amount of memory dedicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). <b>VmallocUsed</b> is the amount of memory being used as virtual address space.'
+ },
+
+ 'mem.slab': {
+ info: '<b>Reclaimable</b> is the amount of memory which the kernel can reuse. <b>Unreclaimable</b> can not be reused even when the kernel is lacking memory.'
+ },
+
+ 'mem.hugepages': {
+ info: 'Dedicated (or Direct) HugePages is memory reserved for applications configured to utilize huge pages. Hugepages are <b>used</b> memory, even if there are free hugepages available.'
+ },
+
+ 'mem.transparent_hugepages': {
+ info: 'Transparent HugePages (THP) is backing virtual memory with huge pages, supporting automatic promotion and demotion of page sizes. It works for all applications for anonymous memory mappings and tmpfs/shmem.'
+ },
+
+ // ------------------------------------------------------------------------
+ // network interfaces
+
+ 'net.drops': {
+ info: 'Packets that have been dropped at the network interface level. These are the same counters reported by <code>ifconfig</code> as <code>RX dropped</code> (inbound) and <code>TX dropped</code> (outbound). <b>inbound</b> packets can be dropped at the network interface level due to <a href="#menu_system_submenu_softnet_stat">softnet backlog</a> overflow, bad / unintented VLAN tags, unknown or unregistered protocols, IPv6 frames when the server is not configured for IPv6. Check <a href="https://www.novell.com/support/kb/doc.php?id=7007165" target="_blank">this document</a> for more information.'
+ },
+
+ // ------------------------------------------------------------------------
+ // IP
+
+ 'ip.inerrors': {
+ info: 'Errors encountered during the reception of IP packets. ' +
+ '<code>noroutes</code> (<code>InNoRoutes</code>) counts packets that were dropped because there was no route to send them. ' +
+ '<code>truncated</code> (<code>InTruncatedPkts</code>) counts packets which is being discarded because the datagram frame didn\'t carry enough data. ' +
+ '<code>checksum</code> (<code>InCsumErrors</code>) counts packets that were dropped because they had wrong checksum. '
+ },
+
+ 'ip.tcpmemorypressures': {
+ info: 'Number of times a socket was put in <b>memory pressure</b> due to a non fatal memory allocation failure (the kernel attempts to work around this situation by reducing the send buffers, etc).'
+ },
+
+ 'ip.tcpconnaborts': {
+ info: 'TCP connection aborts. <b>baddata</b> (<code>TCPAbortOnData</code>) happens while the connection is on <code>FIN_WAIT1</code> and the kernel receives a packet with a sequence number beyond the last one for this connection - the kernel responds with <code>RST</code> (closes the connection). <b>userclosed</b> (<code>TCPAbortOnClose</code>) happens when the kernel receives data on an already closed connection and responds with <code>RST</code>. <b>nomemory</b> (<code>TCPAbortOnMemory</code> happens when there are too many orphaned sockets (not attached to an fd) and the kernel has to drop a connection - sometimes it will send an <code>RST</code>, sometimes it won\'t. <b>timeout</b> (<code>TCPAbortOnTimeout</code>) happens when a connection times out. <b>linger</b> (<code>TCPAbortOnLinger</code>) happens when the kernel killed a socket that was already closed by the application and lingered around for long enough. <b>failed</b> (<code>TCPAbortFailed</code>) happens when the kernel attempted to send an <code>RST</code> but failed because there was no memory available.'
+ },
+
+ 'ip.tcp_syn_queue': {
+ info: 'The <b>SYN queue</b> of the kernel tracks TCP handshakes until connections get fully established. ' +
+ 'It overflows when too many incoming TCP connection requests hang in the half-open state and the server ' +
+ 'is not configured to fall back to SYN cookies*. Overflows are usually caused by SYN flood DoS attacks ' +
+ '(i.e. someone sends lots of SYN packets and never completes the handshakes). ' +
+ '<b>drops</b> (or <code>TcpExtTCPReqQFullDrop</code>) is the number of connections dropped because the ' +
+ 'SYN queue was full and SYN cookies were disabled. ' +
+ '<b>cookies</b> (or <code>TcpExtTCPReqQFullDoCookies</code>) is the number of SYN cookies sent because the ' +
+ 'SYN queue was full.'
+ },
+
+ 'ip.tcp_accept_queue': {
+ info: 'The <b>accept queue</b> of the kernel holds the fully established TCP connections, waiting to be handled ' +
+ 'by the listening application. <b>overflows</b> (or <code>ListenOverflows</code>) is the number of ' +
+ 'established connections that could not be handled because the receive queue of the listening application ' +
+ 'was full. <b>drops</b> (or <code>ListenDrops</code>) is the number of incoming ' +
+ 'connections that could not be handled, including SYN floods, overflows, out of memory, security issues, ' +
+ 'no route to destination, reception of related ICMP messages, socket is broadcast or multicast.'
+ },
+
+
+ // ------------------------------------------------------------------------
+ // IPv4
+
+ 'ipv4.tcpsock': {
+ info: 'The number of established TCP connections (known as <code>CurrEstab</code>). This is a snapshot of the established connections at the time of measurement (i.e. a connection established and a connection disconnected within the same iteration will not affect this metric).'
+ },
+
+ 'ipv4.tcpopens': {
+ info: '<b>active</b> or <code>ActiveOpens</code> is the number of outgoing TCP <b>connections attempted</b> by this host.'
+ + ' <b>passive</b> or <code>PassiveOpens</code> is the number of incoming TCP <b>connections accepted</b> by this host.'
+ },
+
+ 'ipv4.tcperrors': {
+ info: '<code>InErrs</code> is the number of TCP segments received in error (including header too small, checksum errors, sequence errors, bad packets - for both IPv4 and IPv6).'
+ + ' <code>InCsumErrors</code> is the number of TCP segments received with checksum errors (for both IPv4 and IPv6).'
+ + ' <code>RetransSegs</code> is the number of TCP segments retransmitted.'
+ },
+
+ 'ipv4.tcphandshake': {
+ info: '<code>EstabResets</code> is the number of established connections resets (i.e. connections that made a direct transition from <code>ESTABLISHED</code> or <code>CLOSE_WAIT</code> to <code>CLOSED</code>).'
+ + ' <code>OutRsts</code> is the number of TCP segments sent, with the <code>RST</code> flag set (for both IPv4 and IPv6).'
+ + ' <code>AttemptFails</code> is the number of times TCP connections made a direct transition from either <code>SYN_SENT</code> or <code>SYN_RECV</code> to <code>CLOSED</code>, plus the number of times TCP connections made a direct transition from the <code>SYN_RECV</code> to <code>LISTEN</code>.'
+ + ' <code>TCPSynRetrans</code> shows retries for new outbound TCP connections, which can indicate general connectivity issues or backlog on the remote host.'
+ },
+
+ // ------------------------------------------------------------------------
+ // APPS
+
+ 'apps.cpu': {
+ height: 2.0
+ },
+
+ 'apps.mem': {
+ info: 'Real memory (RAM) used by applications. This does not include shared memory.'
+ },
+
+ 'apps.vmem': {
+ info: 'Virtual memory allocated by applications. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
+ },
+
+ 'apps.preads': {
+ height: 2.0
+ },
+
+ 'apps.pwrites': {
+ height: 2.0
+ },
+
+ 'apps.uptime': {
+ info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.'
+ },
+
+ 'apps.file_open': {
+ info: 'Calls to the internal function <code>do_sys_open</code> ( For kernels newer than <code>5.5.19</code> we add a kprobe to <code>do_sys_openat2</code>. ), which is the common function called from' +
+ ' <a href="https://www.man7.org/linux/man-pages/man2/open.2.html" target="_blank">open(2)</a> ' +
+ ' and <a href="https://www.man7.org/linux/man-pages/man2/openat.2.html" target="_blank">openat(2)</a>. '
+ },
+
+ 'apps.file_open_error': {
+ info: 'Failed calls to the internal function <code>do_sys_open</code> ( For kernels newer than <code>5.5.19</code> we add a kprobe to <code>do_sys_openat2</code>. ).'
+ },
+
+ 'apps.file_closed': {
+ info: 'Calls to the internal function <a href="https://elixir.bootlin.com/linux/latest/source/fs/file.c#L665" target="_blank">__close_fd</a>, which is called from' +
+ ' <a href="https://www.man7.org/linux/man-pages/man2/close.2.html" target="_blank">close(2)</a>. '
+ },
+
+ 'apps.file_close_error': {
+ info: 'Failed calls to the internal function <a href="https://elixir.bootlin.com/linux/latest/source/fs/file.c#L665" target="_blank">__close_fd</a>.'
+ },
+
+ 'apps.file_deleted': {
+ info: 'Calls to the function <a href="https://www.kernel.org/doc/htmldocs/filesystems/API-vfs-unlink.html" target="_blank">vfs_unlink</a>. This chart does not show all events that remove files from the filesystem, because filesystems can create their own functions to remove files.'
+ },
+
+ 'apps.vfs_write_call': {
+ info: 'Successful calls to the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>. This chart may not show all filesystem events if it uses other functions to store data on disk.'
+ },
+
+ 'apps.vfs_write_error': {
+ info: 'Failed calls to the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>. This chart may not show all filesystem events if it uses other functions to store data on disk.'
+ },
+
+ 'apps.vfs_read_call': {
+ info: 'Successful calls to the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a>. This chart may not show all filesystem events if it uses other functions to store data on disk.'
+ },
+
+ 'apps.vfs_read_error': {
+ info: 'Failed calls to the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a>. This chart may not show all filesystem events if it uses other functions to store data on disk.'
+ },
+
+ 'apps.vfs_write_bytes': {
+ info: 'Total of bytes successfully written using the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>.'
+ },
+
+ 'apps.vfs_read_bytes': {
+ info: 'Total of bytes successfully read using the function <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a>.'
+ },
+
+ 'apps.process_create': {
+ info: 'Calls to either <a href="https://www.ece.uic.edu/~yshi1/linux/lkse/node4.html#SECTION00421000000000000000" target="_blank">do_fork</a>, or <code>kernel_clone</code> if you are running kernel newer than 5.9.16, to create a new task, which is the common name used to define process and tasks inside the kernel. Netdata identifies the process by counting the number of calls to <a href="https://linux.die.net/man/2/clone" target="_blank">sys_clone</a> that do not have the flag <code>CLONE_THREAD</code> set.'
+ },
+
+ 'apps.thread_create': {
+ info: 'Calls to either <a href="https://www.ece.uic.edu/~yshi1/linux/lkse/node4.html#SECTION00421000000000000000" target="_blank">do_fork</a>, or <code>kernel_clone</code> if you are running kernel newer than 5.9.16, to create a new task, which is the common name used to define process and tasks inside the kernel. Netdata identifies the threads by counting the number of calls to <a href="https://linux.die.net/man/2/clone" target="_blank">sys_clone</a> that have the flag <code>CLONE_THREAD</code> set.'
+ },
+
+ 'apps.task_close': {
+ info: 'Calls to the functions responsible for closing (<a href="https://www.informit.com/articles/article.aspx?p=370047&seqNum=4" target="_blank">do_exit</a>) and releasing (<a href="https://www.informit.com/articles/article.aspx?p=370047&seqNum=4" target="_blank">release_task</a>) tasks.'
+ },
+
+ 'apps.bandwidth_sent': {
+ info: 'Bytes sent by functions <code>tcp_sendmsg</code> and <code>udp_sendmsg</code>.'
+ },
+
+ 'apps.bandwidth_recv': {
+ info: 'Bytes received by functions <code>tcp_cleanup_rbuf</code> and <code>udp_recvmsg</code>. We use <code>tcp_cleanup_rbuf</code> instead <code>tcp_recvmsg</code>, because this last misses <code>tcp_read_sock()</code> traffic and we would also need to have more probes to get the socket and package size.'
+ },
+
+ 'apps.bandwidth_tcp_send': {
+ info: 'Calls for function <code>tcp_sendmsg</code>.'
+ },
+
+ 'apps.bandwidth_tcp_recv': {
+ info: 'Calls for functions <code>tcp_cleanup_rbuf</code>. We use <code>tcp_cleanup_rbuf</code> instead <code>tcp_recvmsg</code>, because this last misses <code>tcp_read_sock()</code> traffic and we would also need to have more probes to get the socket and package size.'
+ },
+
+ 'apps.bandwidth_tcp_retransmit': {
+ info: 'Calls for functions <code>tcp_retranstmit_skb</code>.'
+ },
+
+ 'apps.bandwidth_udp_send': {
+ info: 'Calls for function <code>udp_sendmsg</code>.'
+ },
+
+ 'apps.bandwidth_udp_recv': {
+ info: 'Calls for function <code>udp_recvmsg</code>.'
+ },
+
+ // ------------------------------------------------------------------------
+ // USERS
+
+ 'users.cpu': {
+ height: 2.0
+ },
+
+ 'users.mem': {
+ info: 'Real memory (RAM) used per user. This does not include shared memory.'
+ },
+
+ 'users.vmem': {
+ info: 'Virtual memory allocated per user. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
+ },
+
+ 'users.preads': {
+ height: 2.0
+ },
+
+ 'users.pwrites': {
+ height: 2.0
+ },
+
+ 'users.uptime': {
+ info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.'
+ },
+
+ // ------------------------------------------------------------------------
+ // GROUPS
+
+ 'groups.cpu': {
+ height: 2.0
+ },
+
+ 'groups.mem': {
+ info: 'Real memory (RAM) used per user group. This does not include shared memory.'
+ },
+
+ 'groups.vmem': {
+ info: 'Virtual memory allocated per user group since the Netdata restart. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
+ },
+
+ 'groups.preads': {
+ height: 2.0
+ },
+
+ 'groups.pwrites': {
+ height: 2.0
+ },
+
+ 'groups.uptime': {
+ info: 'Carried over process group uptime. The period of time within which at least one process in the group was running.'
+ },
+
+ // ------------------------------------------------------------------------
+ // NETWORK QoS
+
+ 'tc.qos': {
+ heads: [
+ function (os, id) {
+ void (os);
+
+ if (id.match(/.*-ifb$/))
+ return netdataDashboard.gaugeChart('Inbound', '12%', '', '#5555AA');
+ else
+ return netdataDashboard.gaugeChart('Outbound', '12%', '', '#AA9900');
+ }
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // NETWORK INTERFACES
+
+ 'net.net': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ if (id.match(/^cgroup_.*/)) {
+ var iface;
+ try {
+ iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length);
+ } catch (e) {
+ iface = '';
+ }
+ return netdataDashboard.gaugeChart('Received' + iface, '12%', 'received');
+ } else
+ return '';
+ },
+ function (os, id) {
+ void (os);
+ if (id.match(/^cgroup_.*/)) {
+ var iface;
+ try {
+ iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length);
+ } catch (e) {
+ iface = '';
+ }
+ return netdataDashboard.gaugeChart('Sent' + iface, '12%', 'sent');
+ } else
+ return '';
+ }
+ ],
+ heads: [
+ function (os, id) {
+ void (os);
+ if (!id.match(/^cgroup_.*/))
+ return netdataDashboard.gaugeChart('Received', '12%', 'received');
+ else
+ return '';
+ },
+ function (os, id) {
+ void (os);
+ if (!id.match(/^cgroup_.*/))
+ return netdataDashboard.gaugeChart('Sent', '12%', 'sent');
+ else
+ return '';
+ }
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // NETFILTER
+
+ 'netfilter.sockets': {
+ colors: '#88AA00',
+ heads: [
+ netdataDashboard.gaugeChart('Active Connections', '12%', '', '#88AA00')
+ ]
+ },
+
+ 'netfilter.new': {
+ heads: [
+ netdataDashboard.gaugeChart('New Connections', '12%', 'new', '#5555AA')
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // DISKS
+
+ 'disk.util': {
+ colors: '#FF5588',
+ heads: [
+ netdataDashboard.gaugeChart('Utilization', '12%', '', '#FF5588')
+ ],
+ info: 'Disk Utilization measures the amount of time the disk was busy with something. This is not related to its performance. 100% means that the system always had an outstanding operation on the disk. Keep in mind that depending on the underlying technology of the disk, 100% here may or may not be an indication of congestion.'
+ },
+
+ 'disk.backlog': {
+ colors: '#0099CC',
+ info: 'Backlog is an indication of the duration of pending disk operations. On every I/O event the system is multiplying the time spent doing I/O since the last update of this field with the number of pending operations. While not accurate, this metric can provide an indication of the expected completion time of the operations in progress.'
+ },
+
+ 'disk.io': {
+ heads: [
+ netdataDashboard.gaugeChart('Read', '12%', 'reads'),
+ netdataDashboard.gaugeChart('Write', '12%', 'writes')
+ ],
+ info: 'Amount of data transferred to and from disk.'
+ },
+
+ 'disk.ops': {
+ info: 'Completed disk I/O operations. Keep in mind the number of operations requested might be higher, since the system is able to merge adjacent to each other (see merged operations chart).'
+ },
+
+ 'disk.qops': {
+ info: 'I/O operations currently in progress. This metric is a snapshot - it is not an average over the last interval.'
+ },
+
+ 'disk.iotime': {
+ height: 0.5,
+ info: 'The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute I/O operations in parallel.'
+ },
+ 'disk.mops': {
+ height: 0.5,
+ info: 'The number of merged disk operations. The system is able to merge adjacent I/O operations, for example two 4KB reads can become one 8KB read before given to disk.'
+ },
+ 'disk.svctm': {
+ height: 0.5,
+ info: 'The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.'
+ },
+ 'disk.avgsz': {
+ height: 0.5,
+ info: 'The average I/O operation size.'
+ },
+ 'disk.await': {
+ height: 0.5,
+ info: 'The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.'
+ },
+
+ 'disk.space': {
+ info: 'Disk space utilization. reserved for root is automatically reserved by the system to prevent the root user from getting out of space.'
+ },
+ 'disk.inodes': {
+ info: 'inodes (or index nodes) are filesystem objects (e.g. files and directories). On many types of file system implementations, the maximum number of inodes is fixed at filesystem creation, limiting the maximum number of files the filesystem can hold. It is possible for a device to run out of inodes. When this happens, new files cannot be created on the device, even though there may be free space available.'
+ },
+
+ 'mysql.net': {
+ info: 'The amount of data sent to mysql clients (<strong>out</strong>) and received from mysql clients (<strong>in</strong>).'
+ },
+
+ // ------------------------------------------------------------------------
+ // MYSQL
+
+ 'mysql.queries': {
+ info: 'The number of statements executed by the server.<ul>' +
+ '<li><strong>queries</strong> counts the statements executed within stored SQL programs.</li>' +
+ '<li><strong>questions</strong> counts the statements sent to the mysql server by mysql clients.</li>' +
+ '<li><strong>slow queries</strong> counts the number of statements that took more than <a href="http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time" target="_blank">long_query_time</a> seconds to be executed.' +
+ ' For more information about slow queries check the mysql <a href="http://dev.mysql.com/doc/refman/5.7/en/slow-query-log.html" target="_blank">slow query log</a>.</li>' +
+ '</ul>'
+ },
+
+ 'mysql.handlers': {
+ info: 'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing.' +
+ ' (if the chart is not showing all these dimensions it is because they are zero - set <strong>Which dimensions to show?</strong> to <strong>All</strong> from the dashboard settings, to render even the zero values)<ul>' +
+ '<li><strong>commit</strong>, the number of internal <a href="http://dev.mysql.com/doc/refman/5.7/en/commit.html" target="_blank">COMMIT</a> statements.</li>' +
+ '<li><strong>delete</strong>, the number of times that rows have been deleted from tables.</li>' +
+ '<li><strong>prepare</strong>, a counter for the prepare phase of two-phase commit operations.</li>' +
+ '<li><strong>read first</strong>, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. <strong>SELECT col1 FROM foo</strong>, with col1 indexed.</li>' +
+ '<li><strong>read key</strong>, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.</li>' +
+ '<li><strong>read next</strong>, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.</li>' +
+ '<li><strong>read prev</strong>, the number of requests to read the previous row in key order. This read method is mainly used to optimize <strong>ORDER BY ... DESC</strong>.</li>' +
+ '<li><strong>read rnd</strong>, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.</li>' +
+ '<li><strong>read rnd next</strong>, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.</li>' +
+ '<li><strong>rollback</strong>, the number of requests for a storage engine to perform a rollback operation.</li>' +
+ '<li><strong>savepoint</strong>, the number of requests for a storage engine to place a savepoint.</li>' +
+ '<li><strong>savepoint rollback</strong>, the number of requests for a storage engine to roll back to a savepoint.</li>' +
+ '<li><strong>update</strong>, the number of requests to update a row in a table.</li>' +
+ '<li><strong>write</strong>, the number of requests to insert a row in a table.</li>' +
+ '</ul>'
+ },
+
+ 'mysql.table_locks': {
+ info: 'MySQL table locks counters: <ul>' +
+ '<li><strong>immediate</strong>, the number of times that a request for a table lock could be granted immediately.</li>' +
+ '<li><strong>waited</strong>, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.</li>' +
+ '</ul>'
+ },
+
+ 'mysql.innodb_deadlocks': {
+ info: 'A deadlock happens when two or more transactions mutually hold and request for locks, creating a cycle of dependencies. For more information about <a href="https://dev.mysql.com/doc/refman/5.7/en/innodb-deadlocks-handling.html" target="_blank">how to minimize and handle deadlocks</a>.'
+ },
+
+ 'mysql.galera_cluster_status': {
+ info:
+ '<code>-1</code>: unknown, ' +
+ '<code>0</code>: primary (primary group configuration, quorum present), ' +
+ '<code>1</code>: non-primary (non-primary group configuration, quorum lost), ' +
+ '<code>2</code>: disconnected(not connected to group, retrying).'
+ },
+
+ 'mysql.galera_cluster_state': {
+ info:
+ '<code>0</code>: undefined, ' +
+ '<code>1</code>: joining, ' +
+ '<code>2</code>: donor/desynced, ' +
+ '<code>3</code>: joined, ' +
+ '<code>4</code>: synced.'
+ },
+
+ 'mysql.galera_cluster_weight': {
+ info: 'The value is counted as a sum of <code>pc.weight</code> of the nodes in the current Primary Component.'
+ },
+
+ 'mysql.galera_connected': {
+ info: '<code>0</code> means that the node has not yet connected to any of the cluster components. ' +
+ 'This may be due to misconfiguration.'
+ },
+
+ 'mysql.open_transactions': {
+ info: 'The number of locally running transactions which have been registered inside the wsrep provider. ' +
+ 'This means transactions which have made operations which have caused write set population to happen. ' +
+ 'Transactions which are read only are not counted.'
+ },
+
+
+ // ------------------------------------------------------------------------
+ // POSTGRESQL
+
+
+ 'postgres.db_stat_blks': {
+ info: 'Blocks reads from disk or cache.<ul>' +
+ '<li><strong>blks_read:</strong> number of disk blocks read in this database.</li>' +
+ '<li><strong>blks_hit:</strong> number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system&#39;s file system cache)</li>' +
+ '</ul>'
+ },
+ 'postgres.db_stat_tuple_write': {
+ info: '<ul><li>Number of rows inserted/updated/deleted.</li>' +
+ '<li><strong>conflicts:</strong> number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see <a href="https://www.postgresql.org/docs/10/static/monitoring-stats.html#PG-STAT-DATABASE-CONFLICTS-VIEW" target="_blank">pg_stat_database_conflicts</a> for details.)</li>' +
+ '</ul>'
+ },
+ 'postgres.db_stat_temp_bytes': {
+ info: 'Temporary files can be created on disk for sorts, hashes, and temporary query results.'
+ },
+ 'postgres.db_stat_temp_files': {
+ info: '<ul>' +
+ '<li><strong>files:</strong> number of temporary files created by queries. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing).</li>' +
+ '</ul>'
+ },
+ 'postgres.archive_wal': {
+ info: 'WAL archiving.<ul>' +
+ '<li><strong>total:</strong> total files.</li>' +
+ '<li><strong>ready:</strong> WAL waiting to be archived.</li>' +
+ '<li><strong>done:</strong> WAL successfully archived. ' +
+ 'Ready WAL can indicate archive_command is in error, see <a href="https://www.postgresql.org/docs/current/static/continuous-archiving.html" target="_blank">Continuous Archiving and Point-in-Time Recovery</a>.</li>' +
+ '</ul>'
+ },
+ 'postgres.checkpointer': {
+ info: 'Number of checkpoints.<ul>' +
+ '<li><strong>scheduled:</strong> when checkpoint_timeout is reached.</li>' +
+ '<li><strong>requested:</strong> when max_wal_size is reached.</li>' +
+ '</ul>' +
+ 'For more information see <a href="https://www.postgresql.org/docs/current/static/wal-configuration.html" target="_blank">WAL Configuration</a>.'
+ },
+ 'postgres.autovacuum': {
+ info: 'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon. ' +
+ 'For more information see <a href="https://www.postgresql.org/docs/current/static/routine-vacuuming.html#AUTOVACUUM" target="_blank">The Autovacuum Daemon</a>.'
+ },
+ 'postgres.standby_delta': {
+ info: 'Streaming replication delta.<ul>' +
+ '<li><strong>sent_delta:</strong> replication delta sent to standby.</li>' +
+ '<li><strong>write_delta:</strong> replication delta written to disk by this standby.</li>' +
+ '<li><strong>flush_delta:</strong> replication delta flushed to disk by this standby server.</li>' +
+ '<li><strong>replay_delta:</strong> replication delta replayed into the database on this standby server.</li>' +
+ '</ul>' +
+ 'For more information see <a href="https://www.postgresql.org/docs/current/static/warm-standby.html#SYNCHRONOUS-REPLICATION" target="_blank">Synchronous Replication</a>.'
+ },
+ 'postgres.replication_slot': {
+ info: 'Replication slot files.<ul>' +
+ '<li><strong>wal_keeped:</strong> WAL files retained by each replication slots.</li>' +
+ '<li><strong>pg_replslot_files:</strong> files present in pg_replslot.</li>' +
+ '</ul>' +
+ 'For more information see <a href="https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS" target="_blank">Replication Slots</a>.'
+ },
+ 'postgres.backend_usage': {
+ info: 'Connections usage against maximum connections allowed, as defined in the <i>max_connections</i> setting.<ul>' +
+ '<li><strong>available:</strong> maximum new connections allowed.</li>' +
+ '<li><strong>used:</strong> connections currently in use.</li>' +
+ '</ul>' +
+ 'Assuming non-superuser accounts are being used to connect to Postgres (so <i>superuser_reserved_connections</i> are subtracted from <i>max_connections</i>).<br/>' +
+ 'For more information see <a href="https://www.postgresql.org/docs/current/runtime-config-connection.html" target="_blank">Connections and Authentication</a>.'
+ },
+
+
+ // ------------------------------------------------------------------------
+ // APACHE
+
+ 'apache.connections': {
+ colors: NETDATA.colors[4],
+ mainheads: [
+ netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4])
+ ]
+ },
+
+ 'apache.requests': {
+ colors: NETDATA.colors[0],
+ mainheads: [
+ netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0])
+ ]
+ },
+
+ 'apache.net': {
+ colors: NETDATA.colors[3],
+ mainheads: [
+ netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3])
+ ]
+ },
+
+ 'apache.workers': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="busy"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Workers Utilization"'
+ + ' data-units="percentage %"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'apache.bytesperreq': {
+ colors: NETDATA.colors[3],
+ height: 0.5
+ },
+
+ 'apache.reqpersec': {
+ colors: NETDATA.colors[4],
+ height: 0.5
+ },
+
+ 'apache.bytespersec': {
+ colors: NETDATA.colors[6],
+ height: 0.5
+ },
+
+
+ // ------------------------------------------------------------------------
+ // LIGHTTPD
+
+ 'lighttpd.connections': {
+ colors: NETDATA.colors[4],
+ mainheads: [
+ netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4])
+ ]
+ },
+
+ 'lighttpd.requests': {
+ colors: NETDATA.colors[0],
+ mainheads: [
+ netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0])
+ ]
+ },
+
+ 'lighttpd.net': {
+ colors: NETDATA.colors[3],
+ mainheads: [
+ netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3])
+ ]
+ },
+
+ 'lighttpd.workers': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="busy"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Servers Utilization"'
+ + ' data-units="percentage %"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'lighttpd.bytesperreq': {
+ colors: NETDATA.colors[3],
+ height: 0.5
+ },
+
+ 'lighttpd.reqpersec': {
+ colors: NETDATA.colors[4],
+ height: 0.5
+ },
+
+ 'lighttpd.bytespersec': {
+ colors: NETDATA.colors[6],
+ height: 0.5
+ },
+
+ // ------------------------------------------------------------------------
+ // NGINX
+
+ 'nginx.connections': {
+ colors: NETDATA.colors[4],
+ mainheads: [
+ netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4])
+ ]
+ },
+
+ 'nginx.requests': {
+ colors: NETDATA.colors[0],
+ mainheads: [
+ netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0])
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // HTTP check
+
+ 'httpcheck.responsetime': {
+ info: 'The <code>response time</code> describes the time passed between request and response. ' +
+ 'Currently, the accuracy of the response time is low and should be used as reference only.'
+ },
+
+ 'httpcheck.responselength': {
+ info: 'The <code>response length</code> counts the number of characters in the response body. For static pages, this should be mostly constant.'
+ },
+
+ 'httpcheck.status': {
+ valueRange: "[0, 1]",
+ info: 'This chart verifies the response of the webserver. Each status dimension will have a value of <code>1</code> if triggered. ' +
+ 'Dimension <code>success</code> is <code>1</code> only if all constraints are satisfied. ' +
+ 'This chart is most useful for alarms or third-party apps.'
+ },
+
+ // ------------------------------------------------------------------------
+ // NETDATA
+
+ 'netdata.response_time': {
+ info: 'The netdata API response time measures the time netdata needed to serve requests. This time includes everything, from the reception of the first byte of a request, to the dispatch of the last byte of its reply, therefore it includes all network latencies involved (i.e. a client over a slow network will influence these metrics).'
+ },
+
+ // ------------------------------------------------------------------------
+ // RETROSHARE
+
+ 'retroshare.bandwidth': {
+ info: 'RetroShare inbound and outbound traffic.',
+ mainheads: [
+ netdataDashboard.gaugeChart('Received', '12%', 'bandwidth_down_kb'),
+ netdataDashboard.gaugeChart('Sent', '12%', 'bandwidth_up_kb')
+ ]
+ },
+
+ 'retroshare.peers': {
+ info: 'Number of (connected) RetroShare friends.',
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="peers_connected"'
+ + ' data-append-options="friends"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="connected friends"'
+ + ' data-units=""'
+ + ' data-width="8%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'retroshare.dht': {
+ info: 'Statistics about RetroShare\'s DHT. These values are estimated!'
+ },
+
+ // ------------------------------------------------------------------------
+ // fping
+
+ 'fping.quality': {
+ colors: NETDATA.colors[10],
+ height: 0.5
+ },
+
+ 'fping.packets': {
+ height: 0.5
+ },
+
+
+ // ------------------------------------------------------------------------
+ // containers
+
+ 'cgroup.cpu_limit': {
+ valueRange: "[0, null]",
+ mainheads: [
+ function (os, id) {
+ void (os);
+ cgroupCPULimitIsSet = 1;
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="used"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="CPU"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'cgroup.cpu': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ if (cgroupCPULimitIsSet === 0) {
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="CPU"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' role="application"></div>';
+ } else
+ return '';
+ }
+ ]
+ },
+
+ 'cgroup.mem_usage_limit': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ cgroupMemLimitIsSet = 1;
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="used"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Memory"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'cgroup.mem_usage': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ if (cgroupMemLimitIsSet === 0) {
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Memory"'
+ + ' data-units="MB"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ } else
+ return '';
+ }
+ ]
+ },
+
+ 'cgroup.throttle_io': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="read"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Read Disk I/O"'
+ + ' data-units="KB/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' role="application"></div>';
+ },
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="write"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Write Disk I/O"'
+ + ' data-units="KB/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // beanstalkd
+ // system charts
+ 'beanstalk.cpu_usage': {
+ info: 'Amount of CPU Time for user and system used by beanstalkd.'
+ },
+
+ // This is also a per-tube stat
+ 'beanstalk.jobs_rate': {
+ info: 'The rate of jobs processed by the beanstalkd served.'
+ },
+
+ 'beanstalk.connections_rate': {
+ info: 'Tthe rate of connections opened to beanstalkd.'
+ },
+
+ 'beanstalk.commands_rate': {
+ info: 'The rate of commands received by beanstalkd.'
+ },
+
+ 'beanstalk.current_tubes': {
+ info: 'Total number of current tubes on the server including the default tube (which always exists).'
+ },
+
+ 'beanstalk.current_jobs': {
+ info: 'Current number of jobs in all tubes grouped by status: urgent, ready, reserved, delayed and buried.'
+ },
+
+ 'beanstalk.current_connections': {
+ info: 'Current number of connections group by connection type: written, producers, workers, waiting.'
+ },
+
+ 'beanstalk.binlog': {
+ info: 'The rate of records <code>written</code> to binlog and <code>migrated</code> as part of compaction.'
+ },
+
+ 'beanstalk.uptime': {
+ info: 'Total time beanstalkd server has been up for.'
+ },
+
+ // tube charts
+ 'beanstalk.jobs': {
+ info: 'Number of jobs currently in the tube grouped by status: urgent, ready, reserved, delayed and buried.'
+ },
+
+ 'beanstalk.connections': {
+ info: 'The current number of connections to this tube grouped by connection type; using, waiting and watching.'
+ },
+
+ 'beanstalk.commands': {
+ info: 'The rate of <code>delete</code> and <code>pause</code> commands executed by beanstalkd.'
+ },
+
+ 'beanstalk.pause': {
+ info: 'Shows info on how long the tube has been paused for, and how long is left remaining on the pause.'
+ },
+
+ // ------------------------------------------------------------------------
+ // ceph
+
+ 'ceph.general_usage': {
+ info: 'The usage and available space in all ceph cluster.'
+ },
+
+ 'ceph.general_objects': {
+ info: 'Total number of objects storage on ceph cluster.'
+ },
+
+ 'ceph.general_bytes': {
+ info: 'Cluster read and write data per second.'
+ },
+
+ 'ceph.general_operations': {
+ info: 'Number of read and write operations per second.'
+ },
+
+ 'ceph.general_latency': {
+ info: 'Total of apply and commit latency in all OSDs. The apply latency is the total time taken to flush an update to disk. The commit latency is the total time taken to commit an operation to the journal.'
+ },
+
+ 'ceph.pool_usage': {
+ info: 'The usage space in each pool.'
+ },
+
+ 'ceph.pool_objects': {
+ info: 'Number of objects presents in each pool.'
+ },
+
+ 'ceph.pool_read_bytes': {
+ info: 'The rate of read data per second in each pool.'
+ },
+
+ 'ceph.pool_write_bytes': {
+ info: 'The rate of write data per second in each pool.'
+ },
+
+ 'ceph.pool_read_objects': {
+ info: 'Number of read objects per second in each pool.'
+ },
+
+ 'ceph.pool_write_objects': {
+ info: 'Number of write objects per second in each pool.'
+ },
+
+ 'ceph.osd_usage': {
+ info: 'The usage space in each OSD.'
+ },
+
+ 'ceph.osd_size': {
+ info: "Each OSD's size"
+ },
+
+ 'ceph.apply_latency': {
+ info: 'Time taken to flush an update in each OSD.'
+ },
+
+ 'ceph.commit_latency': {
+ info: 'Time taken to commit an operation to the journal in each OSD.'
+ },
+
+ // ------------------------------------------------------------------------
+ // web_log
+
+ 'web_log.response_statuses': {
+ info: 'Web server responses by type. <code>success</code> includes <b>1xx</b>, <b>2xx</b>, <b>304</b> and <b>401</b>, <code>error</code> includes <b>5xx</b>, <code>redirect</code> includes <b>3xx</b> except <b>304</b>, <code>bad</code> includes <b>4xx</b> except <b>401</b>, <code>other</code> are all the other responses.',
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="success"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Successful"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="redirect"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Redirects"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="bad"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Bad Requests"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="error"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Server Errors"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.response_codes': {
+ info: 'Web server responses by code family. ' +
+ 'According to the standards <code>1xx</code> are informational responses, ' +
+ '<code>2xx</code> are successful responses, ' +
+ '<code>3xx</code> are redirects (although they include <b>304</b> which is used as "<b>not modified</b>"), ' +
+ '<code>4xx</code> are bad requests, ' +
+ '<code>5xx</code> are internal server errors, ' +
+ '<code>other</code> are non-standard responses, ' +
+ '<code>unmatched</code> counts the lines in the log file that are not matched by the plugin (<a href="https://github.com/netdata/netdata/issues/new?title=web_log%20reports%20unmatched%20lines&body=web_log%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20web%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
+ },
+
+ 'web_log.response_time': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="avg"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Average Response Time"'
+ + ' data-units="milliseconds"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.detailed_response_codes': {
+ info: 'Number of responses for each response code individually.'
+ },
+
+ 'web_log.requests_per_ipproto': {
+ info: 'Web server requests received per IP protocol version.'
+ },
+
+ 'web_log.clients': {
+ info: 'Unique client IPs accessing the web server, within each data collection iteration. If data collection is <b>per second</b>, this chart shows <b>unique client IPs per second</b>.'
+ },
+
+ 'web_log.clients_all': {
+ info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ },
+
+ // ------------------------------------------------------------------------
+ // web_log for squid
+
+ 'web_log.squid_response_statuses': {
+ info: 'Squid responses by type. ' +
+ '<code>success</code> includes <b>1xx</b>, <b>2xx</b>, <b>000</b>, <b>304</b>, ' +
+ '<code>error</code> includes <b>5xx</b> and <b>6xx</b>, ' +
+ '<code>redirect</code> includes <b>3xx</b> except <b>304</b>, ' +
+ '<code>bad</code> includes <b>4xx</b>, ' +
+ '<code>other</code> are all the other responses.',
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="success"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Successful"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="redirect"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Redirects"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="bad"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Bad Requests"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="error"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Server Errors"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.squid_response_codes': {
+ info: 'Web server responses by code family. ' +
+ 'According to HTTP standards <code>1xx</code> are informational responses, ' +
+ '<code>2xx</code> are successful responses, ' +
+ '<code>3xx</code> are redirects (although they include <b>304</b> which is used as "<b>not modified</b>"), ' +
+ '<code>4xx</code> are bad requests, ' +
+ '<code>5xx</code> are internal server errors. ' +
+ 'Squid also defines <code>000</code> mostly for UDP requests, and ' +
+ '<code>6xx</code> for broken upstream servers sending wrong headers. ' +
+ 'Finally, <code>other</code> are non-standard responses, and ' +
+ '<code>unmatched</code> counts the lines in the log file that are not matched by the plugin (<a href="https://github.com/netdata/netdata/issues/new?title=web_log%20reports%20unmatched%20lines&body=web_log%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20web%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
+ },
+
+ 'web_log.squid_duration': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="avg"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Average Response Time"'
+ + ' data-units="milliseconds"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.squid_detailed_response_codes': {
+ info: 'Number of responses for each response code individually.'
+ },
+
+ 'web_log.squid_clients': {
+ info: 'Unique client IPs accessing squid, within each data collection iteration. If data collection is <b>per second</b>, this chart shows <b>unique client IPs per second</b>.'
+ },
+
+ 'web_log.squid_clients_all': {
+ info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ },
+
+ 'web_log.squid_transport_methods': {
+ info: 'Break down per delivery method: <code>TCP</code> are requests on the HTTP port (usually 3128), ' +
+ '<code>UDP</code> are requests on the ICP port (usually 3130), or HTCP port (usually 4128). ' +
+ 'If ICP logging was disabled using the log_icp_queries option, no ICP replies will be logged. ' +
+ '<code>NONE</code> are used to state that squid delivered an unusual response or no response at all. ' +
+ 'Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. ' +
+ 'Also seen with responses to <code>CONNECT</code> requests.'
+ },
+
+ 'web_log.squid_code': {
+ info: 'These are combined squid result status codes. A break down per component is given in the following charts. ' +
+ 'Check the <a href="http://wiki.squid-cache.org/SquidFaq/SquidLogs">squid documentation about them</a>.'
+ },
+
+ 'web_log.squid_handling_opts': {
+ info: 'These tags are optional and describe why the particular handling was performed or where the request came from. ' +
+ '<code>CLIENT</code> means that the client request placed limits affecting the response. Usually seen with client issued a <b>no-cache</b>, or analogous cache control command along with the request. Thus, the cache has to validate the object.' +
+ '<code>IMS</code> states that the client sent a revalidation (conditional) request. ' +
+ '<code>ASYNC</code>, is used when the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded. ' +
+ '<code>SWAPFAIL</code> is assigned when the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server. ' +
+ '<code>REFRESH</code> when a revalidation (conditional) request was sent to the server. ' +
+ '<code>SHARED</code> when this request was combined with an existing transaction by collapsed forwarding. NOTE: the existing request is not marked as SHARED. ' +
+ '<code>REPLY</code> when particular handling was requested in the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.'
+ },
+
+ 'web_log.squid_object_types': {
+ info: 'These tags are optional and describe what type of object was produced. ' +
+ '<code>NEGATIVE</code> is only seen on HIT responses, indicating the response was a cached error response. e.g. <b>404 not found</b>. ' +
+ '<code>STALE</code> means the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls. ' +
+ '<code>OFFLINE</code> when the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object. ' +
+ '<code>INVALID</code> when an invalid request was received. An error response was delivered indicating what the problem was. ' +
+ '<code>FAIL</code> is only seen on <code>REFRESH</code> to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control. ' +
+ '<code>MODIFIED</code> is only seen on <code>REFRESH</code> responses to indicate revalidation produced a new modified object. ' +
+ '<code>UNMODIFIED</code> is only seen on <code>REFRESH</code> responses to indicate revalidation produced a <b>304</b> (Not Modified) status, which was relayed to the client. ' +
+ '<code>REDIRECT</code> when squid generated an HTTP redirect response to this request.'
+ },
+
+ 'web_log.squid_cache_events': {
+ info: 'These tags are optional and describe whether the response was loaded from cache, network, or otherwise. ' +
+ '<code>HIT</code> when the response object delivered was the local cache object. ' +
+ '<code>MEM</code> when the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses. ' +
+ '<code>MISS</code> when the response object delivered was the network response object. ' +
+ '<code>DENIED</code> when the request was denied by access controls. ' +
+ '<code>NOFETCH</code> an ICP specific type, indicating service is alive, but not to be used for this request (sent during "-Y" startup, or during frequent failures, a cache in hit only mode will return either UDP_HIT or UDP_MISS_NOFETCH. Neighbours will thus only fetch hits). ' +
+ '<code>TUNNEL</code> when a binary tunnel was established for this transaction.'
+ },
+
+ 'web_log.squid_transport_errors': {
+ info: 'These tags are optional and describe some error conditions which occured during response delivery (if any). ' +
+ '<code>ABORTED</code> when the response was not completed due to the connection being aborted (usually by the client). ' +
+ '<code>TIMEOUT</code>, when the response was not completed due to a connection timeout.'
+ },
+
+ // ------------------------------------------------------------------------
+ // go web_log
+
+ 'web_log.type_requests': {
+ info: 'Web server responses by type. <code>success</code> includes <b>1xx</b>, <b>2xx</b>, <b>304</b> and <b>401</b>, <code>error</code> includes <b>5xx</b>, <code>redirect</code> includes <b>3xx</b> except <b>304</b>, <code>bad</code> includes <b>4xx</b> except <b>401</b>, <code>other</code> are all the other responses.',
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="success"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Successful"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="redirect"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Redirects"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="bad"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Bad Requests"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="error"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Server Errors"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.request_processing_time': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="avg"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Average Response Time"'
+ + ' data-units="milliseconds"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ // ------------------------------------------------------------------------
+ // Fronius Solar Power
+
+ 'fronius.power': {
+ info: 'Positive <code>Grid</code> values mean that power is coming from the grid. Negative values are excess power that is going back into the grid, possibly selling it. ' +
+ '<code>Photovoltaics</code> is the power generated from the solar panels. ' +
+ '<code>Accumulator</code> is the stored power in the accumulator, if one is present.'
+ },
+
+ 'fronius.autonomy': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]",
+ info: 'The <code>Autonomy</code> is the percentage of how autonomous the installation is. An autonomy of 100 % means that the installation is producing more energy than it is needed. ' +
+ 'The <code>Self consumption</code> indicates the ratio between the current power generated and the current load. When it reaches 100 %, the <code>Autonomy</code> declines, since the solar panels can not produce enough energy and need support from the grid.'
+ },
+
+ 'fronius.energy.today': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, null]"
+ },
+
+ // ------------------------------------------------------------------------
+ // Stiebel Eltron Heat pump installation
+
+ 'stiebeleltron.system.roomtemp': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, null]"
+ },
+
+ // ------------------------------------------------------------------------
+ // Port check
+
+ 'portcheck.latency': {
+ info: 'The <code>latency</code> describes the time spent connecting to a TCP port. No data is sent or received. ' +
+ 'Currently, the accuracy of the latency is low and should be used as reference only.'
+ },
+
+ 'portcheck.status': {
+ valueRange: "[0, 1]",
+ info: 'The <code>status</code> chart verifies the availability of the service. ' +
+ 'Each status dimension will have a value of <code>1</code> if triggered. Dimension <code>success</code> is <code>1</code> only if connection could be established. ' +
+ 'This chart is most useful for alarms and third-party apps.'
+ },
+
+ // ------------------------------------------------------------------------
+
+ 'chrony.system': {
+ info: 'In normal operation, chronyd never steps the system clock, because any jump in the timescale can have adverse consequences for certain application programs. Instead, any error in the system clock is corrected by slightly speeding up or slowing down the system clock until the error has been removed, and then returning to the system clock’s normal speed. A consequence of this is that there will be a period when the system clock (as read by other programs using the <code>gettimeofday()</code> system call, or by the <code>date</code> command in the shell) will be different from chronyd\'s estimate of the current true time (which it reports to NTP clients when it is operating in server mode). The value reported on this line is the difference due to this effect.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'chrony.offsets': {
+ info: '<code>last offset</code> is the estimated local offset on the last clock update. <code>RMS offset</code> is a long-term average of the offset value.',
+ height: 0.5
+ },
+
+ 'chrony.stratum': {
+ info: 'The <code>stratum</code> indicates how many hops away from a computer with an attached reference clock we are. Such a computer is a stratum-1 computer.',
+ decimalDigits: 0,
+ height: 0.5
+ },
+
+ 'chrony.root': {
+ info: 'Estimated delays against the root time server this system is synchronized with. <code>delay</code> is the total of the network path delays to the stratum-1 computer from which the computer is ultimately synchronised. <code>dispersion</code> is the total dispersion accumulated through all the computers back to the stratum-1 computer from which the computer is ultimately synchronised. Dispersion is due to system clock resolution, statistical measurement variations etc.'
+ },
+
+ 'chrony.frequency': {
+ info: 'The <code>frequency</code> is the rate by which the system\'s clock would be would be wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). For example, a value of 1ppm would mean that when the system\'s clock thinks it has advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.',
+ colors: NETDATA.colors[0]
+ },
+
+ 'chrony.residualfreq': {
+ info: 'This shows the <code>residual frequency</code> for the currently selected reference source. ' +
+ 'It reflects any difference between what the measurements from the reference source indicate the ' +
+ 'frequency should be and the frequency currently being used. The reason this is not always zero is ' +
+ 'that a smoothing procedure is applied to the frequency. Each time a measurement from the reference ' +
+ 'source is obtained and a new residual frequency computed, the estimated accuracy of this residual ' +
+ 'is compared with the estimated accuracy (see <code>skew</code>) of the existing frequency value. ' +
+ 'A weighted average is computed for the new frequency, with weights depending on these accuracies. ' +
+ 'If the measurements from the reference source follow a consistent trend, the residual will be ' +
+ 'driven to zero over time.',
+ height: 0.5,
+ colors: NETDATA.colors[3]
+ },
+
+ 'chrony.skew': {
+ info: 'The estimated error bound on the frequency.',
+ height: 0.5,
+ colors: NETDATA.colors[5]
+ },
+
+ 'couchdb.active_tasks': {
+ info: 'Active tasks running on this CouchDB <b>cluster</b>. Four types of tasks currently exist: indexer (view building), replication, database compaction and view compaction.'
+ },
+
+ 'couchdb.replicator_jobs': {
+ info: 'Detailed breakdown of any replication jobs in progress on this node. For more information, see the <a href="http://docs.couchdb.org/en/latest/replication/replicator.html">replicator documentation</a>.'
+ },
+
+ 'couchdb.open_files': {
+ info: 'Count of all files held open by CouchDB. If this value seems pegged at 1024 or 4096, your server process is probably hitting the open file handle limit and <a href="http://docs.couchdb.org/en/latest/maintenance/performance.html#pam-and-ulimit">needs to be increased.</a>'
+ },
+
+ 'btrfs.disk': {
+ info: 'Physical disk usage of BTRFS. The disk space reported here is the raw physical disk space assigned to the BTRFS volume (i.e. <b>before any RAID levels</b>). BTRFS uses a two-stage allocator, first allocating large regions of disk space for one type of block (data, metadata, or system), and then using a regular block allocator inside those regions. <code>unallocated</code> is the physical disk space that is not allocated yet and is available to become data, metdata or system on demand. When <code>unallocated</code> is zero, all available disk space has been allocated to a specific function. Healthy volumes should ideally have at least five percent of their total space <code>unallocated</code>. You can keep your volume healthy by running the <code>btrfs balance</code> command on it regularly (check <code>man btrfs-balance</code> for more info). Note that some of the space listed as <code>unallocated</code> may not actually be usable if the volume uses devices of different sizes.',
+ colors: [NETDATA.colors[12]]
+ },
+
+ 'btrfs.data': {
+ info: 'Logical disk usage for BTRFS data. Data chunks are used to store the actual file data (file contents). The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running <code>btrfs balance</code> can help here.'
+ },
+
+ 'btrfs.metadata': {
+ info: 'Logical disk usage for BTRFS metadata. Metadata chunks store most of the filesystem interal structures, as well as information like directory structure and file names. The disk space reported here is the usable allocation (i.e. after any striping or replication). Healthy volumes should ideally have no more than a few GB of free space reported here persistently. Running <code>btrfs balance</code> can help here.'
+ },
+
+ 'btrfs.system': {
+ info: 'Logical disk usage for BTRFS system. System chunks store information about the allocation of other chunks. The disk space reported here is the usable allocation (i.e. after any striping or replication). The values reported here should be relatively small compared to Data and Metadata, and will scale with the volume size and overall space usage.'
+ },
+
+ // ------------------------------------------------------------------------
+ // RabbitMQ
+
+ // info: the text above the charts
+ // heads: the representation of the chart at the top the subsection (second level menu)
+ // mainheads: the representation of the chart at the top of the section (first level menu)
+ // colors: the dimension colors of the chart (the default colors are appended)
+ // height: the ratio of the chart height relative to the default
+
+ 'rabbitmq.queued_messages': {
+ info: 'Overall total of ready and unacknowledged queued messages. Messages that are delivered immediately are not counted here.'
+ },
+
+ 'rabbitmq.message_rates': {
+ info: 'Overall messaging rates including acknowledgements, delieveries, redeliveries, and publishes.'
+ },
+
+ 'rabbitmq.global_counts': {
+ info: 'Overall totals for channels, consumers, connections, queues and exchanges.'
+ },
+
+ 'rabbitmq.file_descriptors': {
+ info: 'Total number of used filed descriptors. See <code><a href="https://www.rabbitmq.com/production-checklist.html#resource-limits-file-handle-limit" target="_blank">Open File Limits</a></code> for further details.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.sockets': {
+ info: 'Total number of used socket descriptors. Each used socket also counts as a used file descriptor. See <code><a href="https://www.rabbitmq.com/production-checklist.html#resource-limits-file-handle-limit" target="_blank">Open File Limits</a></code> for further details.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.processes': {
+ info: 'Total number of processes running within the Erlang VM. This is not the same as the number of processes running on the host.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.erlang_run_queue': {
+ info: 'Number of Erlang processes the Erlang schedulers have queued to run.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.memory': {
+ info: 'Total amount of memory used by the RabbitMQ. This is a complex statistic that can be further analyzed in the management UI. See <code><a href="https://www.rabbitmq.com/production-checklist.html#resource-limits-ram" target="_blank">Memory</a></code> for further details.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.disk_space': {
+ info: 'Total amount of disk space consumed by the message store(s). See <code><a href="https://www.rabbitmq.com/production-checklist.html#resource-limits-disk-space" target=_"blank">Disk Space Limits</a></code> for further details.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.queue_messages': {
+ info: 'Total amount of messages and their states in this queue.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'rabbitmq.queue_messages_stats': {
+ info: 'Overall messaging rates including acknowledgements, delieveries, redeliveries, and publishes.',
+ colors: NETDATA.colors[3]
+ },
+
+ // ------------------------------------------------------------------------
+ // ntpd
+
+ 'ntpd.sys_offset': {
+ info: 'For hosts without any time critical services an offset of &lt; 100 ms should be acceptable even with high network latencies. For hosts with time critical services an offset of about 0.01 ms or less can be achieved by using peers with low delays and configuring optimal <b>poll exponent</b> values.',
+ colors: NETDATA.colors[4]
+ },
+
+ 'ntpd.sys_jitter': {
+ info: 'The jitter statistics are exponentially-weighted RMS averages. The system jitter is defined in the NTPv4 specification; the clock jitter statistic is computed by the clock discipline module.'
+ },
+
+ 'ntpd.sys_frequency': {
+ info: 'The frequency offset is shown in ppm (parts per million) relative to the frequency of the system. The frequency correction needed for the clock can vary significantly between boots and also due to external influences like temperature or radiation.',
+ colors: NETDATA.colors[2],
+ height: 0.6
+ },
+
+ 'ntpd.sys_wander': {
+ info: 'The wander statistics are exponentially-weighted RMS averages.',
+ colors: NETDATA.colors[3],
+ height: 0.6
+ },
+
+ 'ntpd.sys_rootdelay': {
+ info: 'The rootdelay is the round-trip delay to the primary reference clock, similar to the delay shown by the <code>ping</code> command. A lower delay should result in a lower clock offset.',
+ colors: NETDATA.colors[1]
+ },
+
+ 'ntpd.sys_stratum': {
+ info: 'The distance in "hops" to the primary reference clock',
+ colors: NETDATA.colors[5],
+ height: 0.3
+ },
+
+ 'ntpd.sys_tc': {
+ info: 'Time constants and poll intervals are expressed as exponents of 2. The default poll exponent of 6 corresponds to a poll interval of 64 s. For typical Internet paths, the optimum poll interval is about 64 s. For fast LANs with modern computers, a poll exponent of 4 (16 s) is appropriate. The <a href="http://doc.ntp.org/current-stable/poll.html">poll process</a> sends NTP packets at intervals determined by the clock discipline algorithm.',
+ height: 0.5
+ },
+
+ 'ntpd.sys_precision': {
+ colors: NETDATA.colors[6],
+ height: 0.2
+ },
+
+ 'ntpd.peer_offset': {
+ info: 'The offset of the peer clock relative to the system clock in milliseconds. Smaller values here weight peers more heavily for selection after the initial synchronization of the local clock. For a system providing time service to other systems, these should be as low as possible.'
+ },
+
+ 'ntpd.peer_delay': {
+ info: 'The round-trip time (RTT) for communication with the peer, similar to the delay shown by the <code>ping</code> command. Not as critical as either the offset or jitter, but still factored into the selection algorithm (because as a general rule, lower delay means more accurate time). In most cases, it should be below 100ms.'
+ },
+
+ 'ntpd.peer_dispersion': {
+ info: 'This is a measure of the estimated error between the peer and the local system. Lower values here are better.'
+ },
+
+ 'ntpd.peer_jitter': {
+ info: 'This is essentially a remote estimate of the peer\'s <code>system_jitter</code> value. Lower values here weight highly in favor of peer selection, and this is a good indicator of overall quality of a given time server (good servers will have values not exceeding single digit milliseconds here, with high quality stratum one servers regularly having sub-millisecond jitter).'
+ },
+
+ 'ntpd.peer_xleave': {
+ info: 'This variable is used in interleaved mode (used only in NTP symmetric and broadcast modes). See <a href="http://doc.ntp.org/current-stable/xleave.html">NTP Interleaved Modes</a>.'
+ },
+
+ 'ntpd.peer_rootdelay': {
+ info: 'For a stratum 1 server, this is the access latency for the reference clock. For lower stratum servers, it is the sum of the <code>peer_delay</code> and <code>peer_rootdelay</code> for the system they are syncing off of. Similarly to <code>peer_delay</code>, lower values here are technically better, but have limited influence in peer selection.'
+ },
+
+ 'ntpd.peer_rootdisp': {
+ info: 'Is the same as <code>peer_rootdelay</code>, but measures accumulated <code>peer_dispersion</code> instead of accumulated <code>peer_delay</code>.'
+ },
+
+ 'ntpd.peer_hmode': {
+ info: 'The <code>peer_hmode</code> and <code>peer_pmode</code> variables give info about what mode the packets being sent to and received from a given peer are. Mode 1 is symmetric active (both the local system and the remote peer have each other declared as peers in <code>/etc/ntp.conf</code>), Mode 2 is symmetric passive (only one side has the other declared as a peer), Mode 3 is client, Mode 4 is server, and Mode 5 is broadcast (also used for multicast and manycast operation).',
+ height: 0.2
+ },
+
+ 'ntpd.peer_pmode': {
+ height: 0.2
+ },
+
+ 'ntpd.peer_hpoll': {
+ info: 'The <code>peer_hpoll</code> and <code>peer_ppoll</code> variables are log2 representations of the polling interval in seconds.',
+ height: 0.5
+ },
+
+ 'ntpd.peer_ppoll': {
+ height: 0.5
+ },
+
+ 'ntpd.peer_precision': {
+ height: 0.2
+ },
+
+ 'spigotmc.tps': {
+ info: 'The running 1, 5, and 15 minute average number of server ticks per second. An idealized server will show 20.0 for all values, but in practice this almost never happens. Typical servers should show approximately 19.98-20.0 here. Lower values indicate progressively more server-side lag (and thus that you need better hardware for your server or a lower user limit). For every 0.05 ticks below 20, redstone clocks will lag behind by approximately 0.25%. Values below approximately 19.50 may interfere with complex free-running redstone circuits and will noticeably slow down growth.'
+ },
+
+ 'spigotmc.users': {
+ info: 'The number of currently connect users on the monitored Spigot server.'
+ },
+
+ 'boinc.tasks': {
+ info: 'The total number of tasks and the number of active tasks. Active tasks are those which are either currently being processed, or are partialy processed but suspended.'
+ },
+
+ 'boinc.states': {
+ info: 'Counts of tasks in each task state. The normal sequence of states is <code>New</code>, <code>Downloading</code>, <code>Ready to Run</code>, <code>Uploading</code>, <code>Uploaded</code>. Tasks which are marked <code>Ready to Run</code> may be actively running, or may be waiting to be scheduled. <code>Compute Errors</code> are tasks which failed for some reason during execution. <code>Aborted</code> tasks were manually cancelled, and will not be processed. <code>Failed Uploads</code> are otherwise finished tasks which failed to upload to the server, and usually indicate networking issues.'
+ },
+
+ 'boinc.sched': {
+ info: 'Counts of active tasks in each scheduling state. <code>Scheduled</code> tasks are the ones which will run if the system is permitted to process tasks. <code>Preempted</code> tasks are on standby, and will run if a <code>Scheduled</code> task stops running for some reason. <code>Uninitialized</code> tasks should never be present, and indicate tha the scheduler has not tried to schedule them yet.'
+ },
+
+ 'boinc.process': {
+ info: 'Counts of active tasks in each process state. <code>Executing</code> tasks are running right now. <code>Suspended</code> tasks have an associated process, but are not currently running (either because the system isn\'t processing any tasks right now, or because they have been preempted by higher priority tasks). <code>Quit</code> tasks are exiting gracefully. <code>Aborted</code> tasks exceeded some resource limit, and are being shut down. <code>Copy Pending</code> tasks are waiting on a background file transfer to finish. <code>Uninitialized</code> tasks do not have an associated process yet.'
+ },
+
+ 'w1sensor.temp': {
+ info: 'Temperature derived from 1-Wire temperature sensors.'
+ },
+
+ 'logind.sessions': {
+ info: 'Shows the number of active sessions of each type tracked by logind.'
+ },
+
+ 'logind.users': {
+ info: 'Shows the number of active users of each type tracked by logind.'
+ },
+
+ 'logind.seats': {
+ info: 'Shows the number of active seats tracked by logind. Each seat corresponds to a combination of a display device and input device providing a physical presence for the system.'
+ },
+
+ // ------------------------------------------------------------------------
+ // ProxySQL
+
+ 'proxysql.pool_status': {
+ info: 'The status of the backend servers. ' +
+ '<code>1=ONLINE</code> backend server is fully operational, ' +
+ '<code>2=SHUNNED</code> backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold, ' +
+ '<code>3=OFFLINE_SOFT</code> when a server is put into OFFLINE_SOFT mode, new incoming connections aren\'t accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend, ' +
+ '<code>4=OFFLINE_HARD</code> when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren\'t accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work, ' +
+ '<code>-1</code> Unknown status.'
+ },
+
+ 'proxysql.pool_net': {
+ info: 'The amount of data sent to/received from the backend ' +
+ '(This does not include metadata (packets\' headers, OK/ERR packets, fields\' description, etc).'
+ },
+
+ 'proxysql.pool_overall_net': {
+ info: 'The amount of data sent to/received from the all backends ' +
+ '(This does not include metadata (packets\' headers, OK/ERR packets, fields\' description, etc).'
+ },
+
+ 'proxysql.questions': {
+ info: '<code>questions</code> total number of queries sent from frontends, ' +
+ '<code>slow_queries</code> number of queries that ran for longer than the threshold in milliseconds defined in global variable <code>mysql-long_query_time</code>. '
+ },
+
+ 'proxysql.connections': {
+ info: '<code>aborted</code> number of frontend connections aborted due to invalid credential or max_connections reached, ' +
+ '<code>connected</code> number of frontend connections currently connected, ' +
+ '<code>created</code> number of frontend connections created, ' +
+ '<code>non_idle</code> number of frontend connections that are not currently idle. '
+ },
+
+ 'proxysql.pool_latency': {
+ info: 'The currently ping time in microseconds, as reported from Monitor.'
+ },
+
+ 'proxysql.queries': {
+ info: 'The number of queries routed towards this particular backend server.'
+ },
+
+ 'proxysql.pool_used_connections': {
+ info: 'The number of connections are currently used by ProxySQL for sending queries to the backend server.'
+ },
+
+ 'proxysql.pool_free_connections': {
+ info: 'The number of connections are currently free. They are kept open in order to minimize the time cost of sending a query to the backend server.'
+ },
+
+ 'proxysql.pool_ok_connections': {
+ info: 'The number of connections were established successfully.'
+ },
+
+ 'proxysql.pool_error_connections': {
+ info: 'The number of connections weren\'t established successfully.'
+ },
+
+ 'proxysql.commands_count': {
+ info: 'The total number of commands of that type executed'
+ },
+
+ 'proxysql.commands_duration': {
+ info: 'The total time spent executing commands of that type, in ms'
+ },
+
+ // ------------------------------------------------------------------------
+ // Power Supplies
+
+ 'powersupply.capacity': {
+ info: undefined
+ },
+
+ 'powersupply.charge': {
+ info: undefined
+ },
+
+ 'powersupply.energy': {
+ info: undefined
+ },
+
+ 'powersupply.voltage': {
+ info: undefined
+ },
+
+ // ------------------------------------------------------------------------
+ // VMware vSphere
+
+ // Host specific
+ 'vsphere.host_mem_usage_percentage': {
+ info: 'Percentage of used machine memory: <code>consumed</code> / <code>machine-memory-size</code>.'
+ },
+
+ 'vsphere.host_mem_usage': {
+ info:
+ '<code>granted</code> is amount of machine memory that is mapped for a host, ' +
+ 'it equals sum of all granted metrics for all powered-on virtual machines, plus machine memory for vSphere services on the host. ' +
+ '<code>consumed</code> is amount of machine memory used on the host, it includes memory used by the Service Console, the VMkernel, vSphere services, plus the total consumed metrics for all running virtual machines. ' +
+ '<code>consumed</code> = <code>total host memory</code> - <code>free host memory</code>.' +
+ '<code>active</code> is sum of all active metrics for all powered-on virtual machines plus vSphere services (such as COS, vpxa) on the host.' +
+ '<code>shared</code> is sum of all shared metrics for all powered-on virtual machines, plus amount for vSphere services on the host. ' +
+ '<code>sharedcommon</code> is amount of machine memory that is shared by all powered-on virtual machines and vSphere services on the host. ' +
+ '<code>shared</code> - <code>sharedcommon</code> = machine memory (host memory) savings (KB). ' +
+ 'For details see <a href="https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-BFDC988B-F53D-4E97-9793-A002445AFAE1.html">Measuring and Differentiating Types of Memory Usage</a> and ' +
+ '<a href="https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html">Memory Counters</a> articles.'
+ },
+
+ 'vsphere.host_mem_swap_rate': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>in</code> is sum of <code>swapinRate</code> values for all powered-on virtual machines on the host.' +
+ '<code>swapinRate</code> is rate at which VMKernel reads data into machine memory from the swap file. ' +
+ '<code>out</code> is sum of <code>swapoutRate</code> values for all powered-on virtual machines on the host.' +
+ '<code>swapoutRate</code> is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.'
+ },
+
+ // VM specific
+ 'vsphere.vm_mem_usage_percentage': {
+ info: 'Percentage of used virtual machine “physical” memory: <code>actvive</code> / <code>virtual machine configured size</code>.'
+ },
+
+ 'vsphere.vm_mem_usage': {
+ info:
+ '<code>granted</code> is amount of guest “physical” memory that is mapped to machine memory, it includes <code>shared</code> memory amount. ' +
+ '<code>consumed</code> is amount of guest “physical” memory consumed by the virtual machine for guest memory, ' +
+ '<code>consumed</code> = <code>granted</code> - <code>memory saved due to memory sharing</code>. ' +
+ '<code>active</code> is amount of memory that is actively used, as estimated by VMkernel based on recently touched memory pages. ' +
+ '<code>shared</code> is amount of guest “physical” memory shared with other virtual machines (through the VMkernel’s transparent page-sharing mechanism, a RAM de-duplication technique). ' +
+ 'For details see <a href="https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-BFDC988B-F53D-4E97-9793-A002445AFAE1.html">Measuring and Differentiating Types of Memory Usage</a> and ' +
+ '<a href="https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html">Memory Counters</a> articles.'
+
+ },
+
+ 'vsphere.vm_mem_swap_rate': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>in</code> is rate at which VMKernel reads data into machine memory from the swap file. ' +
+ '<code>out</code> is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.'
+ },
+
+ 'vsphere.vm_mem_swap': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>swapped</code> is amount of guest physical memory swapped out to the virtual machine\'s swap file by the VMkernel. ' +
+ 'Swapped memory stays on disk until the virtual machine needs it.'
+ },
+
+ // Common
+ 'vsphere.cpu_usage_total': {
+ info: 'Summary CPU usage statistics across all CPUs/cores.'
+ },
+
+ 'vsphere.net_bandwidth_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_packets_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_errors_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_drops_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.disk_usage_total': {
+ info: 'Summary read/write statistics across all disks.'
+ },
+
+ 'vsphere.disk_max_latency': {
+ info: '<code>latency</code> is highest latency value across all disks.'
+ },
+
+ 'vsphere.overall_status': {
+ info: '<code>0</code> is unknown, <code>1</code> is OK, <code>2</code> is might have a problem, <code>3</code> is definitely has a problem.'
+ },
+
+ // ------------------------------------------------------------------------
+ // VCSA
+ 'vcsa.system_health': {
+ info:
+ '<code>-1</code>: unknown; ' +
+ '<code>0</code>: all components are healthy; ' +
+ '<code>1</code>: one or more components might become overloaded soon; ' +
+ '<code>2</code>: one or more components in the appliance might be degraded; ' +
+ '<code>3</code>: one or more components might be in an unusable status and the appliance might become unresponsive soon; ' +
+ '<code>4</code>: no health data is available.'
+ },
+
+ 'vcsa.components_health': {
+ info:
+ '<code>-1</code>: unknown; ' +
+ '<code>0</code>: healthy; ' +
+ '<code>1</code>: healthy, but may have some problems; ' +
+ '<code>2</code>: degraded, and may have serious problems; ' +
+ '<code>3</code>: unavailable, or will stop functioning soon; ' +
+ '<code>4</code>: no health data is available.'
+ },
+
+ 'vcsa.software_updates_health': {
+ info:
+ '<code>softwarepackages</code> represents information on available software updates available in the remote vSphere Update Manager repository.<br>' +
+ '<code>-1</code>: unknown; ' +
+ '<code>0</code>: no updates available; ' +
+ '<code>2</code>: non-security updates are available; ' +
+ '<code>3</code>: security updates are available; ' +
+ '<code>4</code>: an error retrieving information on software updates.'
+ },
+
+ // ------------------------------------------------------------------------
+ // Zookeeper
+
+ 'zookeeper.server_state': {
+ info:
+ '<code>0</code>: unknown, ' +
+ '<code>1</code>: leader, ' +
+ '<code>2</code>: follower, ' +
+ '<code>3</code>: observer, ' +
+ '<code>4</code>: standalone.'
+ },
+
+ // ------------------------------------------------------------------------
+ // Squidlog
+
+ 'squidlog.requests': {
+ info: 'Total number of requests (log lines read). It includes <code>unmatched</code>.'
+ },
+
+ 'squidlog.excluded_requests': {
+ info: '<code>unmatched</code> counts the lines in the log file that are not matched by the plugin parser (<a href="https://github.com/netdata/netdata/issues/new?title=squidlog%20reports%20unmatched%20lines&body=squidlog%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20squid%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
+ },
+
+ 'squidlog.type_requests': {
+ info: 'Requests by response type:<br>' +
+ '<ul>' +
+ ' <li><code>success</code> includes 1xx, 2xx, 0, 304, 401.</li>' +
+ ' <li><code>error</code> includes 5xx and 6xx.</li>' +
+ ' <li><code>redirect</code> includes 3xx except 304.</li>' +
+ ' <li><code>bad</code> includes 4xx except 401.</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.http_status_code_class_responses': {
+ info: 'The HTTP response status code classes. According to <a href="https://tools.ietf.org/html/rfc7231" target="_blank">rfc7231</a>:<br>' +
+ ' <li><code>1xx</code> is informational responses.</li>' +
+ ' <li><code>2xx</code> is successful responses.</li>' +
+ ' <li><code>3xx</code> is redirects.</li>' +
+ ' <li><code>4xx</code> is bad requests.</li>' +
+ ' <li><code>5xx</code> is internal server errors.</li>' +
+ ' </ul>' +
+ 'Squid also uses <code>0</code> for a result code being unavailable, and <code>6xx</code> to signal an invalid header, a proxy error.'
+ },
+
+ 'squidlog.http_status_code_responses': {
+ info: 'Number of responses for each http response status code individually.'
+ },
+
+ 'squidlog.uniq_clients': {
+ info: 'Unique clients (requesting instances), within each data collection iteration. If data collection is <b>per second</b>, this chart shows <b>unique clients per second</b>.'
+ },
+
+ 'squidlog.bandwidth': {
+ info: 'The size is the amount of data delivered to the clients. Mind that this does not constitute the net object size, as headers are also counted. ' +
+ 'Also, failed requests may deliver an error page, the size of which is also logged here.'
+ },
+
+ 'squidlog.response_time': {
+ info: 'The elapsed time considers how many milliseconds the transaction busied the cache. It differs in interpretation between TCP and UDP:' +
+ '<ul>' +
+ ' <li><code>TCP</code> this is basically the time from having received the request to when Squid finishes sending the last byte of the response.</li>' +
+ ' <li><code>UDP</code> this is the time between scheduling a reply and actually sending it.</li>' +
+ ' </ul>' +
+ 'Please note that <b>the entries are logged after the reply finished being sent</b>, not during the lifetime of the transaction.'
+ },
+
+ 'squidlog.cache_result_code_requests': {
+ info: 'The Squid result code is composed of several tags (separated by underscore characters) which describe the response sent to the client. ' +
+ 'Check the <a href="https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes">squid documentation</a> about them.'
+ },
+
+ 'squidlog.cache_result_code_transport_tag_requests': {
+ info: 'These tags are always present and describe delivery method.<br>' +
+ '<ul>' +
+ ' <li><code>TCP</code> requests on the HTTP port (usually 3128).</li>' +
+ ' <li><code>UDP</code> requests on the ICP port (usually 3130) or HTCP port (usually 4128).</li>' +
+ ' <li><code>NONE</code> Squid delivered an unusual response or no response at all. Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. Also seen with responses to CONNECT requests.</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.cache_result_code_handling_tag_requests': {
+ info: 'These tags are optional and describe why the particular handling was performed or where the request came from.<br>' +
+ '<ul>' +
+ ' <li><code>CF</code> at least one request in this transaction was collapsed. See <a href="http://www.squid-cache.org/Doc/config/collapsed_forwarding/" target="_blank">collapsed_forwarding</a> for more details about request collapsing.</li>' +
+ ' <li><code>CLIENT</code> usually seen with client issued a "no-cache", or analogous cache control command along with the request. Thus, the cache has to validate the object.</li>' +
+ ' <li><code>IMS</code> the client sent a revalidation (conditional) request.</li>' +
+ ' <li><code>ASYNC</code> the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from <i>stale-while-revalidate</i> cache controls, or ESI sub-objects being loaded.</li>' +
+ ' <li><code>SWAPFAIL</code> the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server.</li>' +
+ ' <li><code>REFRESH</code> a revalidation (conditional) request was sent to the server.</li>' +
+ ' <li><code>SHARED</code> this request was combined with an existing transaction by collapsed forwarding.</li>' +
+ ' <li><code>REPLY</code> the HTTP reply from server or peer. Usually seen on <code>DENIED</code> due to <a href="http://www.squid-cache.org/Doc/config/http_reply_access/" target="_blank">http_reply_access</a> ACLs preventing delivery of servers response object to the client.</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.cache_code_object_tag_requests': {
+ info: 'These tags are optional and describe what type of object was produced.<br>' +
+ '<ul>' +
+ ' <li><code>NEGATIVE</code> only seen on HIT responses, indicating the response was a cached error response. e.g. <b>404 not found</b>.</li>' +
+ ' <li><code>STALE</code> the object was cached and served stale. This is usually caused by <i>stale-while-revalidate</i> or <i>stale-if-error</i> cache controls.</li>' +
+ ' <li><code>OFFLINE</code> the requested object was retrieved from the cache during <a href="http://www.squid-cache.org/Doc/config/offline_mode/" target="_blank">offline_mode</a>. The offline mode never validates any object.</li>' +
+ ' <li><code>INVALID</code> an invalid request was received. An error response was delivered indicating what the problem was.</li>' +
+ ' <li><code>FAILED</code> only seen on <code>REFRESH</code> to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control.</li>' +
+ ' <li><code>MODIFIED</code> only seen on <code>REFRESH</code> responses to indicate revalidation produced a new modified object.</li>' +
+ ' <li><code>UNMODIFIED</code> only seen on <code>REFRESH</code> responses to indicate revalidation produced a 304 (Not Modified) status. The client gets either a full 200 (OK), a 304 (Not Modified), or (in theory) another response, depending on the client request and other details.</li>' +
+ ' <li><code>REDIRECT</code> Squid generated an HTTP redirect response to this request.</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.cache_code_load_source_tag_requests': {
+ info: 'These tags are optional and describe whether the response was loaded from cache, network, or otherwise.<br>' +
+ '<ul>' +
+ ' <li><code>HIT</code> the response object delivered was the local cache object.</li>' +
+ ' <li><code>MEM</code> the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses.</li>' +
+ ' <li><code>MISS</code> the response object delivered was the network response object.</li>' +
+ ' <li><code>DENIED</code> the request was denied by access controls.</li>' +
+ ' <li><code>NOFETCH</code> an ICP specific type, indicating service is alive, but not to be used for this request.</li>' +
+ ' <li><code>TUNNEL</code> a binary tunnel was established for this transaction.</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.cache_code_error_tag_requests': {
+ info: 'These tags are optional and describe some error conditions which occured during response delivery.<br>' +
+ '<ul>' +
+ ' <li><code>ABORTED</code> the response was not completed due to the connection being aborted (usually by the client).</li>' +
+ ' <li><code>TIMEOUT</code> the response was not completed due to a connection timeout.</li>' +
+ ' <li><code>IGNORED</code> while refreshing a previously cached response A, Squid got a response B that was older than A (as determined by the Date header field). Squid ignored response B (and attempted to use A instead).</li>' +
+ ' </ul>'
+ },
+
+ 'squidlog.http_method_requests': {
+ info: 'The request method to obtain an object. Please refer to section <a href="https://wiki.squid-cache.org/SquidFaq/SquidLogs#Request_methods">request-methods</a> for available methods and their description.'
+ },
+
+ 'squidlog.hier_code_requests': {
+ info: 'A code that explains how the request was handled, e.g. by forwarding it to a peer, or going straight to the source. ' +
+ 'Any hierarchy tag may be prefixed with <code>TIMEOUT_</code>, if the timeout occurs waiting for all ICP replies to return from the neighbours. The timeout is either dynamic, if the <a href="http://www.squid-cache.org/Doc/config/icp_query_timeout/" target="_blank">icp_query_timeout</a> was not set, or the time configured there has run up. ' +
+ 'Refer to <a href="https://wiki.squid-cache.org/SquidFaq/SquidLogs#Hierarchy_Codes" target="_blank">Hierarchy Codes</a> for details on hierarchy codes.'
+ },
+
+ 'squidlog.server_address_forwarded_requests': {
+ info: 'The IP address or hostname where the request (if a miss) was forwarded. For requests sent to origin servers, this is the origin server\'s IP address. ' +
+ 'For requests sent to a neighbor cache, this is the neighbor\'s hostname. NOTE: older versions of Squid would put the origin server hostname here.'
+ },
+
+ 'squidlog.mime_type_requests': {
+ info: 'The content type of the object as seen in the HTTP reply header. Please note that ICP exchanges usually don\'t have any content type.'
+ },
+
+ // ------------------------------------------------------------------------
+ // CockroachDB
+
+ 'cockroachdb.process_cpu_time_combined_percentage': {
+ info: 'Current combined cpu utilization, calculated as <code>(user+system)/num of logical cpus</code>.'
+ },
+
+ 'cockroachdb.host_disk_bandwidth': {
+ info: 'Summary disk bandwidth statistics across all system host disks.'
+ },
+
+ 'cockroachdb.host_disk_operations': {
+ info: 'Summary disk operations statistics across all system host disks.'
+ },
+
+ 'cockroachdb.host_disk_iops_in_progress': {
+ info: 'Summary disk iops in progress statistics across all system host disks.'
+ },
+
+ 'cockroachdb.host_network_bandwidth': {
+ info: 'Summary network bandwidth statistics across all system host network interfaces.'
+ },
+
+ 'cockroachdb.host_network_packets': {
+ info: 'Summary network packets statistics across all system host network interfaces.'
+ },
+
+ 'cockroachdb.live_nodes': {
+ info: 'Will be <code>0</code> if this node is not itself live.'
+ },
+
+ 'cockroachdb.total_storage_capacity': {
+ info: 'Entire disk capacity. It includes non-CR data, CR data, and empty space.'
+ },
+
+ 'cockroachdb.storage_capacity_usability': {
+ info: '<code>usable</code> is sum of empty space and CR data, <code>unusable</code> is space used by non-CR data.'
+ },
+
+ 'cockroachdb.storage_usable_capacity': {
+ info: 'Breakdown of <code>usable</code> space.'
+ },
+
+ 'cockroachdb.storage_used_capacity_percentage': {
+ info: '<code>total</code> is % of <b>total</b> space used, <code>usable</code> is % of <b>usable</b> space used.'
+ },
+
+ 'cockroachdb.sql_bandwidth': {
+ info: 'The total amount of SQL client network traffic.'
+ },
+
+ 'cockroachdb.sql_errors': {
+ info: '<code>statement</code> is statements resulting in a planning or runtime error, ' +
+ '<code>transaction</code> is SQL transactions abort errors.'
+ },
+
+ 'cockroachdb.sql_started_ddl_statements': {
+ info: 'The amount of <b>started</b> DDL (Data Definition Language) statements. ' +
+ 'This type means database schema changes. ' +
+ 'It includes <code>CREATE</code>, <code>ALTER</code>, <code>DROP</code>, <code>RENAME</code>, <code>TRUNCATE</code> and <code>COMMENT</code> statements.'
+ },
+
+ 'cockroachdb.sql_executed_ddl_statements': {
+ info: 'The amount of <b>executed</b> DDL (Data Definition Language) statements. ' +
+ 'This type means database schema changes. ' +
+ 'It includes <code>CREATE</code>, <code>ALTER</code>, <code>DROP</code>, <code>RENAME</code>, <code>TRUNCATE</code> and <code>COMMENT</code> statements.'
+ },
+
+ 'cockroachdb.sql_started_dml_statements': {
+ info: 'The amount of <b>started</b> DML (Data Manipulation Language) statements.'
+ },
+
+ 'cockroachdb.sql_executed_dml_statements': {
+ info: 'The amount of <b>executed</b> DML (Data Manipulation Language) statements.'
+ },
+
+ 'cockroachdb.sql_started_tcl_statements': {
+ info: 'The amount of <b>started</b> TCL (Transaction Control Language) statements.'
+ },
+
+ 'cockroachdb.sql_executed_tcl_statements': {
+ info: 'The amount of <b>executed</b> TCL (Transaction Control Language) statements.'
+ },
+
+ 'cockroachdb.live_bytes': {
+ info: 'The amount of live data used by both applications and the CockroachDB system.'
+ },
+
+ 'cockroachdb.kv_transactions': {
+ info: 'KV transactions breakdown:<br>' +
+ '<ul>' +
+ ' <li><code>committed</code> committed KV transactions (including 1PC).</li>' +
+ ' <li><code>fast-path_committed</code> KV transaction on-phase commit attempts.</li>' +
+ ' <li><code>aborted</code> aborted KV transactions.</li>' +
+ ' </ul>'
+ },
+
+ 'cockroachdb.kv_transaction_restarts': {
+ info: 'KV transactions restarts breakdown:<br>' +
+ '<ul>' +
+ ' <li><code>write too old</code> restarts due to a concurrent writer committing first.</li>' +
+ ' <li><code>write too old (multiple)</code> restarts due to multiple concurrent writers committing first.</li>' +
+ ' <li><code>forwarded timestamp (iso=serializable)</code> restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE".</li>' +
+ ' <li><code>possible replay</code> restarts due to possible replays of command batches at the storage layer.</li>' +
+ ' <li><code>async consensus failure</code> restarts due to async consensus writes that failed to leave intents.</li>' +
+ ' <li><code>read within uncertainty interval</code> restarts due to reading a new value within the uncertainty interval.</li>' +
+ ' <li><code>aborted</code> restarts due to an abort by a concurrent transaction (usually due to deadlock).</li>' +
+ ' <li><code>push failure</code> restarts due to a transaction push failure.</li>' +
+ ' <li><code>unknown</code> restarts due to a unknown reasons.</li>' +
+ ' </ul>'
+ },
+
+ 'cockroachdb.ranges': {
+ info: 'CockroachDB stores all user data (tables, indexes, etc.) and almost all system data in a giant sorted map of key-value pairs. ' +
+ 'This keyspace is divided into "ranges", contiguous chunks of the keyspace, so that every key can always be found in a single range.'
+ },
+
+ 'cockroachdb.ranges_replication_problem': {
+ info: 'Ranges with not optimal number of replicas:<br>' +
+ '<ul>' +
+ ' <li><code>unavailable</code> ranges with fewer live replicas than needed for quorum.</li>' +
+ ' <li><code>under replicated</code> ranges with fewer live replicas than the replication target.</li>' +
+ ' <li><code>over replicated</code> ranges with more live replicas than the replication target.</li>' +
+ ' </ul>'
+ },
+
+ 'cockroachdb.replicas': {
+ info: 'CockroachDB replicates each range (3 times by default) and stores each replica on a different node.'
+ },
+
+ 'cockroachdb.replicas_leaders': {
+ info: 'For each range, one of the replicas is the <code>leader</code> for write requests, <code>not leaseholders</code> is the number of Raft leaders whose range lease is held by another store.'
+ },
+
+ 'cockroachdb.replicas_leaseholders': {
+ info: 'For each range, one of the replicas holds the "range lease". This replica, referred to as the <code>leaseholder</code>, is the one that receives and coordinates all read and write requests for the range.'
+ },
+
+ 'cockroachdb.queue_processing_failures': {
+ info: 'Failed replicas breakdown by queue:<br>' +
+ '<ul>' +
+ ' <li><code>gc</code> replicas which failed processing in the GC queue.</li>' +
+ ' <li><code>replica gc</code> replicas which failed processing in the replica GC queue.</li>' +
+ ' <li><code>replication</code> replicas which failed processing in the replicate queue.</li>' +
+ ' <li><code>split</code> replicas which failed processing in the split queue.</li>' +
+ ' <li><code>consistency</code> replicas which failed processing in the consistency checker queue.</li>' +
+ ' <li><code>raft log</code> replicas which failed processing in the Raft log queue.</li>' +
+ ' <li><code>raft snapshot</code> replicas which failed processing in the Raft repair queue.</li>' +
+ ' <li><code>time series maintenance</code> replicas which failed processing in the time series maintenance queue.</li>' +
+ ' </ul>'
+ },
+
+ 'cockroachdb.rebalancing_queries': {
+ info: 'Number of kv-level requests received per second by the store, averaged over a large time period as used in rebalancing decisions.'
+ },
+
+ 'cockroachdb.rebalancing_writes': {
+ info: 'Number of keys written (i.e. applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions.'
+ },
+
+ 'cockroachdb.slow_requests': {
+ info: 'Requests that have been stuck for a long time.'
+ },
+
+ 'cockroachdb.timeseries_samples': {
+ info: 'The amount of metric samples written to disk.'
+ },
+
+ 'cockroachdb.timeseries_write_errors': {
+ info: 'The amount of errors encountered while attempting to write metrics to disk.'
+ },
+
+ 'cockroachdb.timeseries_write_bytes': {
+ info: 'Size of metric samples written to disk.'
+ },
+
+ // ------------------------------------------------------------------------
+ // eBPF
+
+ 'ebpf.tcp_functions': {
+ title : 'TCP calls',
+ info: 'Successful or failed calls to functions <code>tcp_sendmsg</code>, <code>tcp_cleanup_rbuf</code> and <code>tcp_close</code>.'
+ },
+
+ 'ebpf.tcp_bandwidth': {
+ title : 'TCP bandwidth',
+ info: 'Bytes sent and received for functions <code>tcp_sendmsg</code> and <code>tcp_cleanup_rbuf</code>. We use <code>tcp_cleanup_rbuf</code> instead <code>tcp_recvmsg</code>, because this last misses <code>tcp_read_sock()</code> traffic and we would also need to have more probes to get the socket and package size.'
+ },
+
+ 'ebpf.tcp_retransmit': {
+ title : 'TCP retransmit',
+ info: 'Number of packets retransmitted for function <code>tcp_retranstmit_skb</code>.'
+ },
+
+ 'ebpf.tcp_error': {
+ title : 'TCP errors',
+ info: 'Failed calls that to functions <code>tcp_sendmsg</code>, <code>tcp_cleanup_rbuf</code> and <code>tcp_close</code>.'
+ },
+
+ 'ebpf.udp_functions': {
+ title : 'UDP calls',
+ info: 'Successful or failed calls to functions <code>udp_sendmsg</code> and <code>udp_recvmsg</code>.'
+ },
+
+ 'ebpf.udp_bandwidth': {
+ title : 'UDP bandwidth',
+ info: 'Bytes sent and received for functions <code>udp_sendmsg</code> and <code>udp_recvmsg</code>.'
+ },
+
+ 'ebpf.file_descriptor': {
+ title : 'File access',
+ info: 'Calls for internal functions on Linux kernel. The open dimension is attached to the kernel internal function <code>do_sys_open</code> ( For kernels newer than <code>5.5.19</code> we add a kprobe to <code>do_sys_openat2</code>. ), which is the common function called from'+
+ ' <a href="https://www.man7.org/linux/man-pages/man2/open.2.html" target="_blank">open(2)</a> ' +
+ ' and <a href="https://www.man7.org/linux/man-pages/man2/openat.2.html" target="_blank">openat(2)</a>. ' +
+ ' The close dimension is attached to the function <code>__close_fd</code>, which is called from system call' +
+ ' <a href="https://www.man7.org/linux/man-pages/man2/close.2.html" target="_blank">close(2)</a>. '
+ },
+
+ 'ebpf.file_error': {
+ title : 'File access error',
+ info: 'Failed calls to the kernel internal function <code>do_sys_open</code> ( For kernels newer than <code>5.5.19</code> we add a kprobe to <code>do_sys_openat2</code>. ), which is the common function called from'+
+ ' <a href="https://www.man7.org/linux/man-pages/man2/open.2.html" target="_blank">open(2)</a> ' +
+ ' and <a href="https://www.man7.org/linux/man-pages/man2/openat.2.html" target="_blank">openat(2)</a>. ' +
+ ' The close dimension is attached to the function <code>__close_fd</code>, which is called from system call' +
+ ' <a href="https://www.man7.org/linux/man-pages/man2/close.2.html" target="_blank">close(2)</a>. '
+ },
+
+ 'ebpf.deleted_objects': {
+ title : 'VFS remove',
+ info: 'This chart does not show all events that remove files from the file system, because file systems can create their own functions to remove files, it shows calls for the function <a href="https://www.kernel.org/doc/htmldocs/filesystems/API-vfs-unlink.html" target="_blank">vfs_unlink</a>. '
+ },
+
+ 'ebpf.io': {
+ title : 'VFS IO',
+ info: 'Successful or failed calls to functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>. This chart may not show all file system events if it uses other functions to store data on disk.'
+ },
+
+ 'ebpf.io_bytes': {
+ title : 'VFS bytes written',
+ info: 'Total of bytes read or written with success using the functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>.'
+ },
+
+ 'ebpf.io_error': {
+ title : 'VFS IO error',
+ info: 'Failed calls to functions <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_read</a> and <a href="https://topic.alibabacloud.com/a/kernel-state-file-operation-__-work-information-kernel_8_8_20287135.html" target="_blank">vfs_write</a>.'
+ },
+
+ 'ebpf.process_thread': {
+ title : 'Task creation',
+ info: 'Number of times that either <a href="https://www.ece.uic.edu/~yshi1/linux/lkse/node4.html#SECTION00421000000000000000" target="_blank">do_fork</a>, or <code>kernel_clone</code> if you are running kernel newer than 5.9.16, is called to create a new task, which is the common name used to define process and tasks inside the kernel. Netdata identifies the threads by couting the number of calls for <a href="https://linux.die.net/man/2/clone" target="_blank">sys_clone</a> that has the flag <code>CLONE_THREAD</code> set.'
+ },
+
+ 'ebpf.exit': {
+ title : 'Exit monitoring',
+ info: 'Calls for the functions responsible for closing (<a href="https://www.informit.com/articles/article.aspx?p=370047&seqNum=4" target="_blank">do_exit</a>) and releasing (<a href="https://www.informit.com/articles/article.aspx?p=370047&seqNum=4" target="_blank">release_task</a>) tasks.'
+ },
+
+ 'ebpf.task_error': {
+ title : 'Task error',
+ info: 'Number of errors to create a new process or thread.'
+ },
+
+ 'ebpf.process_status': {
+ title : 'Task status',
+ info: 'Difference between the number of process created and the number of threads created per period(<code>process</code> dimension), it also shows the number of possible zombie process running on system.'
+ },
+
+ // ------------------------------------------------------------------------
+ // ACLK Internal Stats
+ 'netdata.aclk_status': {
+ valueRange: "[0, 1]",
+ info: 'This chart shows if ACLK was online during entirety of the sample duration.'
+ },
+
+ 'netdata.aclk_query_per_second': {
+ info: 'This chart shows how many queries were added for ACLK_query thread to process and how many it was actually able to process.'
+ },
+
+ 'netdata.aclk_latency_mqtt': {
+ info: 'Measures latency between MQTT publish of the message and it\'s PUB_ACK being received'
+ },
+
+ // ------------------------------------------------------------------------
+ // VerneMQ
+
+ 'vernemq.sockets': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="open_sockets"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Connected Clients"'
+ + ' data-units="clients"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="16%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'vernemq.queue_processes': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="queue_processes"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Queues Processes"'
+ + ' data-units="processes"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="16%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'vernemq.queue_messages_in_queues': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="queue_messages_current"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Messages in the Queues"'
+ + ' data-units="messages"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="16%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'vernemq.queue_messages': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="queue_message_in"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="MQTT Receive Rate"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="queue_message_out"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="MQTT Send Rate"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ ]
+ },
+ 'vernemq.average_scheduler_utilization': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="system_utilization"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Average Scheduler Utilization"'
+ + ' data-units="percentage"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="16%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // Apache Pulsar
+ 'pulsar.messages_rate': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="pulsar_rate_in"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Publish"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="pulsar_rate_out"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Dispatch"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ ]
+ },
+ 'pulsar.subscription_msg_rate_redeliver': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="pulsar_subscription_msg_rate_redeliver"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Redelivered"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'pulsar.subscription_blocked_on_unacked_messages': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="pulsar_subscription_blocked_on_unacked_messages"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Blocked On Unacked"'
+ + ' data-units="subscriptions"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'pulsar.msg_backlog': {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="pulsar_msg_backlog"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Messages Backlog"'
+ + ' data-units="messages"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'pulsar.namespace_messages_rate': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="publish"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Publish"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="dispatch"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Dispatch"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ ]
+ },
+ 'pulsar.namespace_subscription_msg_rate_redeliver': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="redelivered"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Redelivered"'
+ + ' data-units="messages/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'pulsar.namespace_subscription_blocked_on_unacked_messages': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="blocked"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Blocked On Unacked"'
+ + ' data-units="subscriptions"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'pulsar.namespace_msg_backlog': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="backlog"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-max-value="100"'
+ + ' data-title="Messages Backlog"'
+ + ' data-units="messages"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="14%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ },
+ ],
+ },
+
+ // ------------------------------------------------------------------------
+ // Nvidia-smi
+
+ 'nvidia_smi.fan_speed': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="speed"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Fan Speed"'
+ + ' data-units="percentage"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'nvidia_smi.temperature': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="temp"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Temperature"'
+ + ' data-units="celsius"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'nvidia_smi.memory_allocated': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="used"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Used Memory"'
+ + ' data-units="MiB"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+ 'nvidia_smi.power': {
+ heads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="power"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Power Utilization"'
+ + ' data-units="watts"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+};
diff --git a/web/gui/dashboard_info_custom_example.js b/web/gui/dashboard_info_custom_example.js
new file mode 100644
index 0000000..6a2d537
--- /dev/null
+++ b/web/gui/dashboard_info_custom_example.js
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * Custom netdata information file
+ * -------------------------------
+ *
+ * Use this file to add custom information on netdata dashboards:
+ *
+ * 1. Copy it to a new filename (so that it will not be overwritten with netdata updates)
+ * 2. Edit it to fit your needs
+ * 3. Set the following option to /etc/netdata/netdata.conf :
+ *
+ * [web]
+ * custom dashboard_info.js = your_filename.js
+ *
+ * Using this file you can:
+ *
+ * 1. Overwrite or add messages to menus, submenus and charts.
+ * Use dashboard_info.js to find out what you can define.
+ *
+ * 2. Inject javascript code into the default netdata dashboard.
+ *
+ */
+
+// Codacy declarations
+/* global customDashboard */
+
+// ----------------------------------------------------------------------------
+// MENU
+//
+// - title the menu title as to be rendered at the charts menu
+// - icon html fragment of the icon to display
+// - info html fragment for the description above all the menu charts
+
+customDashboard.menu = {
+
+};
+
+
+// ----------------------------------------------------------------------------
+// SUBMENU
+//
+// - title the submenu title as to be rendered at the charts menu
+// - info html fragment for the description above all the submenu charts
+
+customDashboard.submenu = {
+
+};
+
+
+// ----------------------------------------------------------------------------
+// CONTEXT (the template each chart is based on)
+//
+// - info html fragment for the description above the chart
+// - height a ratio to the default as a decimal number: 1.0 = 100%
+// - colors a single color or an array of colors to use for the dimensions
+// - valuerange the y-range of the chart as an array [min, max]
+// - heads an array of gauge charts to render above the submenu section
+// - mainheads an array of gauge charts to render at the menu section
+
+customDashboard.context = {
+
+};
diff --git a/web/gui/demo.html b/web/gui/demo.html
new file mode 100644
index 0000000..b096bef
--- /dev/null
+++ b/web/gui/demo.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>NetData Dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+ <meta name="author" content="costa@tsaousis.gr">
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png"/>
+ <meta property="og:url" content="http://my-netdata.io/"/>
+ <meta property="og:type" content="website"/>
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="netdata - real-time performance monitoring, done right!"/>
+ <meta property="og:description" content="Stunning real-time dashboards, blazingly fast and extremely interactive. Zero configuration, zero dependencies, zero maintenance." />
+</head>
+<script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+<body>
+
+<div style="width: 100%; text-align: center;">
+ <div data-netdata="netdata.server_cpu"
+ data-dimensions="user"
+ data-chart-library="gauge"
+ data-width="150px"
+ data-after="-60"
+ data-points="60"
+ data-title="Yes! Realtime!"
+ data-units="I am alive!"
+ data-colors="#FF5555"
+ ></div>
+ <br/>
+ <div data-netdata="netdata.server_cpu"
+ data-dimensions="user"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-width="200px"
+ data-height="30px"
+ data-after="-60"
+ data-points="60"
+ data-colors="#FF5555"
+ ></div>
+</div>
+</body>
+</html>
diff --git a/web/gui/demo2.html b/web/gui/demo2.html
new file mode 100644
index 0000000..650acfe
--- /dev/null
+++ b/web/gui/demo2.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>NetData Dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+ <meta name="author" content="costa@tsaousis.gr">
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png"/>
+ <meta property="og:url" content="https://my-netdata.io/"/>
+ <meta property="og:type" content="website"/>
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="netdata - real-time performance monitoring, done right!"/>
+ <meta property="og:description" content="Stunning real-time dashboards, blazingly fast and extremely interactive. Zero configuration, zero dependencies, zero maintenance." />
+</head>
+<script>var netdataTheme = 'slate';</script>
+<script type="text/javascript" src="https://my-netdata.io/dashboard.js?v20190902-0"></script>
+<body>
+
+<div class="container" style="width: 90%; padding-top: 10px; text-align: center; color: #AAA">
+ <div style="font-size: 7vw;">why netdata?</div>
+ <br/>
+ <div style="font-size: 2vw; color: white;">These charts visualize the same data...</div>
+
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#gauge" aria-controls="gauge" role="tab" data-toggle="tab">Gauge.js</a></li>
+ <li role="presentation"><a href="#easypiechart" aria-controls="easypiechart" role="tab" data-toggle="tab">Easy Pie Chart</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="gauge">
+
+ <div style="display: inline-block; width: 35.8%">
+ <div style="font-size: 1.2vw; color: #666; padding-top: 10px;"><i class="fa fa-comment"></i> I can trace an issue like this</div>
+ <br/>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="gauge"
+ data-gauge-max-value="32767"
+ data-width="100%"
+ data-after="-600"
+ data-points="600"
+ data-title="1/second (netdata&nbsp;default)"
+ data-units="important metric"
+ data-colors="#5A5"
+ ></div>
+ </div>
+ <div style="display: inline-block; width: 50%">
+ <div style="font-size: 1.2vw; color: #666;"><i class="fa fa-comment"></i> Can you trace an issue like these?<br/>&nbsp;<br/></div>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="gauge"
+ data-gauge-max-value="32767"
+ data-width="45%"
+ data-after="-600"
+ data-points="60"
+ data-title="Updates Every 10&nbsp;Sec"
+ data-units="important metric"
+ data-colors="#C55"
+ ></div>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="gauge"
+ data-gauge-max-value="32767"
+ data-width="45%"
+ data-after="-600"
+ data-points="2"
+ data-title="Updates Every 5&nbsp;Mins"
+ data-units="important metric"
+ data-colors="#C55"
+ ></div>
+ </div>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="easypiechart">
+
+ <div style="display: inline-block; width: 25%">
+ <div style="font-size: 1.2vw; color: #666; padding-top: 10px;"><i class="fa fa-comment"></i> I can trace an issue like this</div>
+ <br/>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="easypiechart"
+ data-easypiechart-max-value="32767"
+ data-width="100%"
+ data-after="-600"
+ data-points="600"
+ data-title="1/second (netdata&nbsp;default)"
+ data-units="important metric"
+ data-colors="#5A5"
+ ></div>
+ </div>
+ <div style="display: inline-block; width: 40%">
+ <div style="font-size: 1.2vw; color: #666;"><i class="fa fa-comment"></i> Can you trace an issue like these?<br/>&nbsp;<br/></div>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="easypiechart"
+ data-easypiechart-max-value="32767"
+ data-width="45%"
+ data-after="-600"
+ data-points="60"
+ data-title="Updates Every 10&nbsp;Sec"
+ data-units="important metric"
+ data-colors="#C55"
+ ></div>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-chart-library="easypiechart"
+ data-easypiechart-max-value="32767"
+ data-width="45%"
+ data-after="-600"
+ data-points="2"
+ data-title="Updates Every 5&nbsp;Mins"
+ data-units="important metric"
+ data-colors="#C55"
+ ></div>
+ </div>
+ </div>
+ </div>
+ <div style="font-size: 1.5vw;">Hover on the chart below, to see the selected value on the charts above!</div>
+ <div data-netdata="example.random2"
+ data-dimensions="random"
+ data-dygraph-theme="sparkline"
+ data-width="100%"
+ data-height="20vh"
+ data-after="-600"
+ data-points="600"
+ data-title="1/second (netdata&nbsp;default)"
+ data-units="something"
+ data-colors="#888"
+ ></div>
+</div>
+</body>
+</html>
diff --git a/web/gui/demosites.html b/web/gui/demosites.html
new file mode 100644
index 0000000..e8a310a
--- /dev/null
+++ b/web/gui/demosites.html
@@ -0,0 +1,1469 @@
+<!doctype html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang=en-us xmlns="http://www.w3.org/1999/html">
+<head>
+ <meta http-equiv="Refresh" content="0; url=https://www.netdata.cloud">
+ <meta charset=utf-8>
+ <title>NetData: Get control of your Linux Servers. Simple. Effective. Awesome.</title>
+ <meta name=author content="Costa Tsaousis">
+ <meta name=description content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms.">
+
+ <meta name=viewport content="width=device-width,initial-scale=1">
+ <link rel=apple-touch-icon href=apple-touch-icon.png>
+ <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" />
+
+ <meta property="og:url" content="https://my-netdata.io" />
+ <meta property="og:type" content="website" />
+ <meta property="og:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/14092712/93b039ea-f551-11e5-822c-beadbf2b2a2e.gif" />
+
+ <meta name="google-site-verification" content="3Xmk2kyCvai8p9HEnYHoQ9RBW20-b1NvPAgu07Fkkds" />
+ <meta name="msvalidate.01" content="896DCA31C9A664CE359FCF1A645DD476" />
+
+ <style>/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */
+html {
+ line-height: 1.15;
+ -ms-text-size-adjust: 100%;
+ -webkit-text-size-adjust: 100%;
+ color: #fff;
+ font: 17px/1.4 'Open Sans', sans-serif;
+ text-align: center
+}
+
+body {
+ margin: 0;
+ background-color: #2f3135;
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADoAAAA9BAMAAAAOkGejAAAAGFBMVEUKCgoUFBQAAAAPDw8ZGRkeHh4jIyMFBQUUJmucAAAACHRSTlMzMzMzMzMzM85JBgUAAAV1SURBVDjLBQC3tkMA9OqrbtWtCKwErLoVhDUP5PffAUvqhtJFWytU/UqOWbf0nG8ZSVyyfSPwrjqzxYailPJtJu/uihN7np+51RrBgYosPTzBElTZCg8JieV4W/HJciqhFwhQLBmkX5JnNzzMlOGvQChGCKbanFWBgVeRCr9L6BZCgZxB/0wN7zTO2QuP80SIL3F5Ydbnhz12iE/nSOMrqwK/OMfbAYHkioJlnlj9CKUbeomN61U5LQ6nWRmg+tfrDusm2LHGDnRDdHUp5CLTvoHrwgtZlIr/+FyoPz2tz/HiQzc8x1TWqAZp99yto4qGuAs20qucNqPyUAyqHuAp2Hhv0OR1LC+g2voMngjB2uvyVvf0aFhD1Mi/f5Q6MER7SzJWu2AW3my9l8mB4W3WfCqwf+ikfc7pudFVvOMy0ikuSoF47zw6UYPxdqWRRSRo91RZtWYa/sQeri7tMPKcCao0vs9QTQC+8CuoReAkHuKRZCi2qtv9zJLAHfSoRltH/+sj9rlgdBTsfUbTEb7oTbTJ8acxbjMQnaftBkRVVerOeKhKr3jkRzTzqS6RpJNvF0MhOBQm/BRXor/MU/YjObdYIu1/iaQ+IviYnlsG6r3cbQoB7cj4SPgMSkLzgIM24+LSjb2sYxWDtIhCdzBfS5Kh768XgH5jkSLjrvRX/nQnv+SXQMvagpPVScAZwWhVbvPdPqcr3X/u8z558ddIdjmVMH9CIVvrW+8rPVq6M54Pf+tebGkIXwPevaCgdQ59wWbULrPB4dPT8suLWr13YKUuDNTpGBspJ9fPGSNOEjp2TYXTIgF8QgEtzX0gIbXRP8JGMbxh1uHA/CwE4a/KHUc8KzV868fO4o+8GNcNvdPaKfzprkunXIthFS9MqpEG1p7ozWTJTcnAlvUnjixEaGn5ll1yuZbtIgS/r2ISBSDE2nsksYx7YFwH2ytB0rXzqh52qJowchJSI3RJmxHeJGZFDq37LWVmzvkgA7zjT2iOsHsdb9viBQLPx3gUmys1cQG6HOEsopo6glj0VXdyli/FJsnSbg5FQLpDO1xiy0ozQy0InDVNZTuXbhENG9gu28ZoHg/de53YTAVqCwl966V7VX/g+AW3ysMyMjXNhOuaLFmBJ2Z1x9LfG55m/34snAnOgXbMqZIbaop8Zjk5P3fAw9h8vkwHKZAC7yqW6+85ZdpAFD8iAjbVRj8BI3PMYJ2oiKNrZHKSnfNJ2UZwtcT9IryvNrGxdqtCx4vc74z39odPA/h5f/MJu46HKUOcbURZd/E2QH6Kgv8Aa2PSevG2gMfoYHWdN38kadbiFHonAjv50PjgyFZwannFGebUjVmxFnokoTbwWBNVd7qx9KG1joZ69npEk0jRr7/aBYQ5ipNcGRvqjeT+kFjTgv7n33L0zlBIH6CoeaPm9eQN6uKmSwE/LAtDPgMNAOQ5X1Vr8Zd0BQlLTV88U6LzD+6iwQp9NSHD5uCcqml/N0NgRmDN9vNS6A/QJBm2jvBbFTLvly/mtLX1rg5kwgPvA4rA+LIdN3bkVvhrqk8OUYZpuYxaXW/gPVlDxtru6+3Z0KY5DMac3pQzo8y7hO2qxdd6lnvUSdXfFRduigV0YuZv9peBHwHix+d4M7fL/Y44jX6S5ZdOzBoEC2fEohdSE7PTjRBUT3T+jclLxWbKdEOoiuB81dV0xo2pFPOXZmpEMueTDrAjAr8k6y15pMsoCHOyT5qlyWn85HLLuyyAWMlmmjYSNKnv9nRsTib5DSbWLPkJjoVihW/eRQqy/dja151zycTHTBmuroDeXRvVzJ3VFWB65e+L6xu+D5fa+D0BESL4VjlKSKrvs9W69lhj2345pBjIr3+RSJFuS0A/sQAAAABJRU5ErkJggg==)
+}
+
+a {
+ background-color: transparent;
+ -webkit-text-decoration-skip: objects;
+ color: #069;
+ text-decoration: none
+}
+
+a:active, a:hover {
+ outline-width: 0
+}
+
+strong {
+ font-weight: bolder
+}
+
+h1 {
+ font-size: 2.9em;
+ line-height: 1.2em;
+ margin: 0 .5em .75em
+}
+
+img {
+ border-style: none;
+ vertical-align: middle
+}
+
+[type=button]::-moz-focus-inner, [type=reset]::-moz-focus-inner, [type=submit]::-moz-focus-inner, button::-moz-focus-inner {
+ border-style: none;
+ padding: 0
+}
+
+[type=button]:-moz-focusring, [type=reset]:-moz-focusring, [type=submit]:-moz-focusring, button:-moz-focusring {
+ outline: 1px dotted ButtonText
+}
+
+a:active, a:focus, a:hover {
+ text-decoration: underline;
+}
+
+::-moz-selection {
+ background-color: #b3d4fc;
+ text-shadow: none;
+}
+
+::selection {
+ background-color: #b3d4fc;
+ text-shadow: none;
+}
+
+h2 {
+ font-size: 2em;
+ margin: 1.5em 0;
+}
+
+h3 {
+ color: #555;
+ font-size: 1.5em;
+ margin: 0 0 .5em;
+}
+
+p {
+ margin: 0 0 2em;
+}
+
+.quote {
+ font-size: 1.15em;
+ margin: 0 0 .5em;
+ text-align: center;
+}
+
+.title {
+ /* opacity: 0; */
+ /* transition: opacity 500ms; */
+}
+
+.titlefadein {
+ opacity: 1;
+ transition: opacity 500ms;
+}
+
+.grid {
+ margin: 0 -15px;
+ letter-spacing: -.31em;
+ word-spacing: -.43em;
+ text-rendering: optimizespeed
+}
+
+.grid-title {
+ text-align: left;
+}
+
+.grid-cell {
+ display: inline-block;
+ letter-spacing: normal;
+ text-align: left;
+ text-rendering: auto;
+ vertical-align: top;
+ width: 50%;
+ word-spacing: normal
+}
+
+.grid-cell > * {
+ padding: 0 15px
+}
+
+.inline-block-list {
+ list-style-type: none;
+ margin: 0;
+ padding: 0
+}
+
+.inline-block-list li {
+ display: inline-block;
+ margin: 0 0 0 1.5em;
+ padding: 0;
+ vertical-align: top
+}
+
+.inline-block-list li:first-child {
+ margin-left: 0
+}
+
+.flex-embed {
+ background-color: #000;
+ box-shadow: 0 0 10px #000;
+ height: 0;
+ overflow: hidden;
+ padding-bottom: 56.25%;
+ position: relative
+}
+
+.flex-embed a, .flex-embed img {
+ bottom: 0;
+ height: 100%;
+ left: 0;
+ position: absolute;
+ top: 0;
+ width: 100%
+}
+
+.flex-embed .play-btn {
+ background: url(data:image/svg+xml;base64,PHN2ZyBmaWxsPSIjMDAwIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA1MTIgNTEyIj48cGF0aCBkPSJNMjU2LDkyLjQ4MWM0NC40MzMsMCw4Ni4xOCwxNy4wNjgsMTE3LjU1Myw0OC4wNjRDNDA0Ljc5NCwxNzEuNDExLDQyMiwyMTIuNDEzLDQyMiwyNTUuOTk5cy0xNy4yMDYsODQuNTg4LTQ4LjQ0OCwxMTUuNDU1Yy0zMS4zNzIsMzAuOTk0LTczLjEyLDQ4LjA2NC0xMTcuNTUyLDQ4LjA2NHMtODYuMTc5LTE3LjA3LTExNy41NTItNDguMDY0QzEwNy4yMDYsMzQwLjU4Nyw5MCwyOTkuNTg1LDkwLDI1NS45OTlzMTcuMjA2LTg0LjU4OCw0OC40NDgtMTE1LjQ1M0MxNjkuODIxLDEwOS41NSwyMTEuNTY4LDkyLjQ4MSwyNTYsOTIuNDgxIE0yNTYsNTIuNDgxIGMtMTEzLjc3MSwwLTIwNiw5MS4xMTctMjA2LDIwMy41MThjMCwxMTIuMzk4LDkyLjIyOSwyMDMuNTIsMjA2LDIwMy41MmMxMTMuNzcyLDAsMjA2LTkxLjEyMSwyMDYtMjAzLjUyQzQ2MiwxNDMuNTk5LDM2OS43NzIsNTIuNDgxLDI1Niw1Mi40ODFMMjU2LDUyLjQ4MXogTTIwNi41NDQsMzU3LjE2MVYxNTkuODMzbDE2MC45MTksOTguNjY2TDIwNi41NDQsMzU3LjE2MXoiPjwvcGF0aD48L3N2Zz4K);
+ height: 150px;
+ left: 50%;
+ margin-left: -75px;
+ margin-top: -75px;
+ position: absolute;
+ top: 50%;
+ -webkit-transition: 1s;
+ transition: 1s;
+ width: 150px
+}
+
+.flex-embed:hover .play-btn {
+ opacity: .5
+}
+
+.clearfix:after, .clearfix:before {
+ content: ' ';
+ display: table
+}
+
+.clearfix:after {
+ clear: both
+}
+
+.clearfix {
+ *zoom: 1
+}
+
+.container {
+ margin: 0 auto;
+ max-width: 760px;
+ padding: 0 10px
+}
+
+.aside {
+ background-color: #eee;
+ border: solid #e3e3e3;
+ border-width: 1px 0;
+ font-size: 1.125em;
+ padding: 1em 0
+}
+
+.btn, .cta-option {
+ display: inline-block;
+ position: relative
+}
+
+.cta-option {
+ margin: 2.5em .5em 0;
+ vertical-align: top
+}
+
+.btn {
+ color: #fff;
+ font-size: 1.5em;
+ padding: .6em 1em;
+ text-decoration: none;
+ text-shadow: 0 -1px 0 rgba(0, 0, 0, .5);
+ vertical-align: middle;
+ border-radius: 4px;
+ border: 1px solid #333
+}
+
+.btn:active, .btn:focus, .btn:hover {
+ text-decoration: none
+}
+
+.btn-download {
+ background-color: #d9750b;
+ background-image: -webkit-linear-gradient(#f90 10%, #e76a00 100%);
+ background-image: linear-gradient(#f90 10%, #e76a00 100%);
+ box-shadow: 0 1px 0 rgba(255, 255, 255, .5) inset, 0 1px 3px rgba(0, 0, 0, .2);
+ border: 1px solid #995309
+}
+
+.btn-download:active, .btn-download:focus, .btn-download:hover {
+ background-color: #e0811b;
+ background-image: -webkit-linear-gradient(#f0a100 10%, #f70 100%);
+ background-image: linear-gradient(#f0a100 10%, #f70 100%)
+}
+
+.btn-download:active {
+ background-color: #cf6a00;
+ box-shadow: 0 2px 3px 0 rgba(0, 0, 0, .2) inset
+}
+
+.btn-alt {
+ background-color: #444;
+ border-color: #222;
+ box-shadow: none;
+ font-size: 1.25em;
+ margin-top: .25em
+}
+
+.btn-alt:active, .btn-alt:focus, .btn-alt:hover {
+ background-color: #555
+}
+
+.star {
+ color: #e08524
+}
+
+.Icon {
+ display: inline-block;
+ height: 16px;
+ margin: -3px 1px 0 0;
+ vertical-align: middle;
+ width: 16px
+}
+
+.Icon--github {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxMjEgMTIxIj48ZyBmaWxsPSIjMTkxNzE3Ij48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTYwLjUgMS42Yy0zMy4zIDAtNjAuNCAyNy02MC40IDYwLjQgMCAyNi43IDE3LjMgNDkuMyA0MS4zIDU3LjMgMyAuNiA0LjEtMS4zIDQuMS0yLjkgMC0xLjQtLjEtNi4yLS4xLTExLjItMTYuNyAzLjYtMjAuMy03LjItMjAuMy03LjItMi43LTctNi43LTguOC02LjctOC44LTUuNS0zLjcuNC0zLjcuNC0zLjcgNi4xLjQgOS4zIDYuMiA5LjMgNi4yIDUuNCA5LjIgMTQuMSA2LjYgMTcuNiA1IC41LTMuOSAyLjEtNi42IDMuOC04LjEtMTMuNC0xLjQtMjcuNS02LjYtMjcuNS0yOS44IDAtNi42IDIuNC0xMiA2LjItMTYuMi0uNi0xLjUtMi43LTcuNy42LTE2IDAgMCA1LjEtMS42IDE2LjYgNi4yIDQuOC0xLjMgMTAtMiAxNS4xLTJzMTAuMy43IDE1LjEgMmMxMS41LTcuOCAxNi42LTYuMiAxNi42LTYuMiAzLjMgOC4zIDEuMiAxNC41LjYgMTYgMy45IDQuMiA2LjIgOS42IDYuMiAxNi4yIDAgMjMuMi0xNC4xIDI4LjMtMjcuNSAyOS44IDIuMiAxLjkgNC4xIDUuNSA0LjEgMTEuMiAwIDguMS0uMSAxNC42LS4xIDE2LjYgMCAxLjYgMS4xIDMuNSA0LjEgMi45IDI0LTggNDEuMy0zMC42IDQxLjMtNTcuMyAwLTMzLjQtMjctNjAuNC02MC40LTYwLjR6Ii8+PHBhdGggZD0iTTIzIDg4LjNjLS4xLjMtLjYuNC0xIC4ycy0uNy0uNi0uNS0uOWMuMS0uMy42LS40IDEtLjJzLjYuNi41Ljl6bS0uOC0uNU0yNS40IDkxYy0uMy4zLS45LjEtMS4yLS4zLS40LS40LS41LTEtLjItMS4zLjMtLjMuOC0uMSAxLjIuMy41LjUuNSAxLjEuMiAxLjN6bS0uNS0uNk0yNy44IDk0LjVjLS40LjMtMSAwLTEuMy0uNS0uNC0uNS0uNC0xLjIgMC0xLjQuNC0uMyAxIDAgMS4zLjUuNC41LjQgMS4xIDAgMS40em0wIDBNMzEuMSA5Ny45Yy0uMy40LTEgLjMtMS42LS4yLS41LS41LS43LTEuMi0uMy0xLjUuMy0uNCAxLS4zIDEuNi4yLjUuNC42IDEuMS4zIDEuNXptMCAwTTM1LjYgOTkuOGMtLjEuNS0uOC43LTEuNS41LS43LS4yLTEuMS0uOC0xLTEuMi4xLS41LjgtLjcgMS41LS41LjcuMiAxLjEuNyAxIDEuMnptMCAwTTQwLjUgMTAwLjJjMCAuNS0uNi45LTEuMy45LS43IDAtMS4zLS40LTEuMy0uOXMuNi0uOSAxLjMtLjljLjcgMCAxLjMuNCAxLjMuOXptMCAwTTQ1LjEgOTkuNGMuMS41LS40IDEtMS4xIDEuMS0uNy4xLTEuMy0uMi0xLjQtLjctLjEtLjUuNC0xIDEuMS0xLjEuNy0uMSAxLjMuMiAxLjQuN3ptMCAwIi8+PC9nPjwvc3ZnPgo=)
+}
+
+.Icon--html5 {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjUwIDEwMSA0MTIgNDEyIj48cGF0aCBmaWxsPSIjRTQ0RDI2IiBkPSJNMTA3LjY0NCA0NzAuODc3bC0zMy4wMTEtMzcwLjI1N2gzNjIuNzM0bC0zMy4wNDYgMzcwLjE5OS0xNDguNTQzIDQxLjE4MXoiLz48cGF0aCBmaWxsPSIjRjE2NTI5IiBkPSJNMjU2IDQ4MC41MjNsMTIwLjAzLTMzLjI3NyAyOC4yNC0zMTYuMzUyaC0xNDguMjd6Ii8+PHBhdGggZmlsbD0iI0VCRUJFQiIgZD0iTTI1NiAyNjguMjE3aC02MC4wOWwtNC4xNS00Ni41MDFoNjQuMjR2LTQ1LjQxMWgtMTEzLjg2OGwxLjA4NyAxMi4xODMgMTEuMTYxIDEyNS4xMzloMTAxLjYyem0wIDExNy45MzZsLS4xOTkuMDUzLTUwLjU3NC0xMy42NTYtMy4yMzMtMzYuMjE3aC00NS41ODVsNi4zNjIgNzEuMzAxIDkzLjAyIDI1LjgyMy4yMDktLjA1OHoiLz48cGF0aCBmaWxsPSIjZmZmIiBkPSJNMjU1Ljg0MyAyNjguMjE3djQ1LjQxaDU1LjkxOGwtNS4yNzEgNTguODk0LTUwLjY0NyAxMy42N3Y0Ny4yNDRsOTMuMDk0LTI1LjgwMS42ODMtNy42NzIgMTAuNjcxLTExOS41NTEgMS4xMDgtMTIuMTk0aC0xMi4yMzd6bTAtOTEuOTEydjQ1LjQxMWgxMDkuNjg4bC45MTEtMTAuMjA3IDIuMDY5LTIzLjAyMSAxLjA4Ni0xMi4xODN6Ii8+PC9zdmc+Cg==)
+}
+
+.Icon--stackoverflow {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjMgMi44IDU4LjIgNTgiPjxwYXRoIGQ9Ik05LjMwNSAzNi44NDhsNC40MDEuMDQzLS4xNTMgMTkuNTk4aDI5LjI5MXYtMTkuNTI4aDQuNjM4djI0LjI4N2gtMzguMjAxbC4wMjQtMjQuNDAxem03LjE3NyAxMS41ODZoMjIuOTQ1djQuODgyaC0yMi45NDV6IiBmaWxsPSIjOTE5MTkxIi8+PHBhdGggZmlsbD0iI2E3OGI2OCIgZD0iTTE3LjAyIDM5LjY0OGwyMi45NiAyLjIxNi0uNDgxIDQuOTgxLTIyLjk2LTIuMjE2eiIvPjxwYXRoIGZpbGw9IiNjMTk2NTMiIGQ9Ik0xOS4xMjEgMjkuNzEzbDIyLjIgNi4yNjYtMS4zNTkgNC44MTYtMjIuMi02LjI2NnoiLz48cGF0aCBmaWxsPSIjZDQ4YzI4IiBkPSJNMjQuNTAxIDE4LjQ4NGwxOS43NDUgMTEuOTI2LTIuNTg3IDQuMjgzLTE5Ljc0NS0xMS45MjZ6Ii8+PHBhdGggZmlsbD0iI2ZlODkwOCIgZD0iTTM1LjczMyA3Ljg0OWwxMy40MzUgMTguNzUxLTQuMDY4IDIuOTE0LTEzLjQzNS0xOC43NTF6Ii8+PHBhdGggZmlsbD0iI2ZmN2ExNSIgZD0iTTUxLjM0IDIuNzUxbDMuODAyIDIyLjc1Mi00LjkzNi44MjUtMy44MDItMjIuNzUyeiIvPjwvc3ZnPgo=)
+}
+
+.site-header {
+ padding-top: 50px
+}
+
+.site-logo {
+ color: #fff;
+ float: left;
+ font-size: 25px;
+ font-weight: 700;
+ line-height: 32px;
+ text-decoration: none;
+ text-shadow: 2px 2px 0 #000;
+ text-transform: uppercase
+}
+
+.site-nav {
+ float: right;
+ list-style-type: none;
+ margin: 7px 0 0;
+ padding: 0
+}
+
+.site-nav a {
+ color: #ffa000;
+ display: block;
+ text-decoration: none;
+ text-transform: uppercase
+}
+
+.site-nav a:active, .site-nav a:focus, .site-nav a:hover {
+ color: #fff
+}
+
+.site-promo {
+ padding: 4em 0 6em;
+ color: white;
+}
+
+.site-promo .description {
+ color: #ddd;
+ font-size: 1.2em;
+ margin: 1em 2em 0
+}
+
+.last-update {
+ color: #999;
+ display: block;
+ font-size: .75em;
+ margin-top: 10px
+}
+
+.site-section {
+ background-color: #f9f9f9;
+ color: #333;
+ overflow: hidden;
+ padding: 2em 0 2em
+}
+
+.site-section-gray {
+ background-color: #f5f5f5;
+ color: #333;
+ overflow: hidden;
+ padding: 2em 0 2em
+}
+
+.site-section-dark {
+ background-color: transparent;
+ color: #fff;
+ text-align: center;
+ padding: 2em 0 3em
+}
+
+.site-section-dark .content {
+ max-width: 720px;
+ margin: auto;
+ padding: 10px
+}
+
+.site-section-dark h2 {
+ margin: 1em 0
+}
+.site-section-dark h3 {
+ color: white;
+}
+
+.in-the-wild {
+ font-size: 1.25em;
+ margin: 0 auto;
+ max-width: 720px
+}
+
+.site-footer {
+ font-size: .875em;
+ padding: 2em
+}
+
+.site-footer a {
+ color: #ffa000
+}
+
+@media only screen and (max-width: 800px) {
+ .site-logo, .site-nav {
+ float: none
+ }
+
+ .site-nav li {
+ margin: 0 .5em
+ }
+
+ .site-header {
+ padding-top: 40px
+ }
+
+ .site-promo {
+ padding: 3em 0;
+ color: white;
+ }
+
+ .site-section {
+ padding: 0 1em 4em
+ }
+}
+
+@media only screen and (max-width: 600px) {
+ html {
+ font-size: 14px
+ }
+
+ .last-update, .site-footer {
+ font-size: 1em
+ }
+}
+
+@media only screen and (max-width: 460px) {
+ .grid-cell {
+ width: 100%
+ }
+}
+
+@media only screen and (max-width: 420px) {
+ h1 {
+ font-size: 2.5em
+ }
+
+ html {
+ font-size: 13px
+ }
+}
+
+@media print {
+ * {
+ background-color: transparent !important;
+ box-shadow: none !important;
+ color: #000 !important;
+ text-shadow: none !important
+ }
+
+ a, a:visited {
+ text-decoration: underline
+ }
+
+ img {
+ page-break-inside: avoid;
+ max-width: 100% !important
+ }
+
+ h1 {
+ padding: 1em 0 0
+ }
+
+ .site-promo {
+ margin: 1em;
+ padding: 0;
+ color: white;
+ }
+
+ .site-section {
+ padding: 0;
+ margin: 2em 1em
+ }
+
+ .site-section-dark {
+ display: none
+ }
+
+ h2, h3, p {
+ orphans: 3;
+ widows: 3
+ }
+
+ h2, h3 {
+ page-break-after: avoid
+ }
+}
+</style>
+
+<script>
+ // --- OPTIONS FOR THE DASHBOARD --
+
+ // this section has to appear before loading dashboard.js
+
+ // Select a theme.
+ // uncomment on of the two themes:
+
+ // var netdataTheme = 'default'; // this is white
+ var netdataTheme = 'slate'; // this is dark
+
+ var netdataNoBootstrap = true;
+
+ // Set the default netdata server.
+ // on charts without a 'data-host', this one will be used.
+ // the default is the server that dashboard.js is downloaded from.
+
+ // var netdataServer = 'http://my.server:19999/';
+</script>
+
+<!--
+ --- LOAD dashboard.js ---
+
+ to host this HTML file on your web server,
+ you have to load dashboard.js from the netdata server.
+
+ So, pick one the two below
+ If you pick the first, set the server name/IP.
+
+ The second assumes you host this file on /usr/share/netdata/web
+ and that you have chown it to be owned by netdata:netdata
+-->
+<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
+<script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+
+<script>
+ // --- OPTIONS FOR THE CHARTS --
+
+ // destroy charts not shown (lowers memory on the browsers)
+ // set this to 'true' to destroy, 'false' to hide the charts
+ NETDATA.options.current.destroy_on_hide = false;
+
+ // set this to false, to always show all dimensions
+ NETDATA.options.current.eliminate_zero_dimensions = true;
+
+ // set this to false, to lower the pressure on the browser
+ NETDATA.options.current.concurrent_refreshes = true;
+
+ // if you need to support slow mobile phones, set this to false
+ NETDATA.options.current.parallel_refresher = true;
+
+ // set this to false, to always update the charts, even if focus is lost
+ NETDATA.options.current.stop_updates_when_focus_is_lost = true;
+
+ // since we have many servers and limited sockets,
+ // abort ajax calls when we scroll
+ NETDATA.options.current.abort_ajax_on_scroll = true;
+
+ // do not to give errors on netdata demo servers for 60 seconds
+ NETDATA.options.current.retries_on_data_failures = 60;
+</script>
+
+<style>
+ .mygauge-combo {
+ display: inline-block;
+ }
+
+ .mygauge-combo20 {
+ display: inline-block;
+ min-width: 150px;
+ width: 49%;
+ padding-top: 40px;
+ text-align: center;
+ }
+
+ .mygauge-combo30 {
+ display: inline-block;
+ min-width: 150px;
+ width: 32%;
+ padding-top: 40px;
+ text-align: center;
+ }
+
+ .mygauge {
+ position: relative;
+ display: block;
+ width: 171px;
+ /* height: 150px; */
+ }
+
+ .mygauge-button {
+ display: block;
+ }
+
+ .mygauge-legend-button {
+ font-size: 13px;
+ }
+
+ .mygause-donation {
+ font-size: 9px;
+ color: #999;
+ }
+
+ .mysparkline {
+ position: relative;
+ display: inline-block;
+ width: 100%;
+ height: 50px;
+ text-align: left;
+ }
+
+ .mysparkline-overchart-label {
+ position: absolute;
+ display: block;
+ top: -15px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 14px;
+ z-index: 1;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-label2 {
+ position: absolute;
+ display: block;
+ top: -15px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 8px;
+ color: #676b70;
+ z-index: 1;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-value {
+ position: absolute;
+ display: block;
+ top: 0px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 40px;
+ z-index: 2;
+ text-shadow: #333 0px 0px 2px;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-value-center {
+ position: absolute;
+ display: block;
+ top: 5px;
+ left: 0px;
+ bottom: 0;
+ right: 0;
+ font-size: 35px;
+ font-weight: bold;
+ text-align: center;
+ z-index: 2;
+ text-shadow: #333 0px 0px 2px;
+ pointer-events: none;
+ }
+
+ .fb-share-button span {
+ top: 0px;
+ }
+ .fb-like span {
+ top: 0px;
+ }
+ .fb-follow span {
+ top: 0px;
+ }
+
+</style>
+</head>
+<body>
+<div class=container>
+ <div class="site-header clearfix" role=banner>
+ <div class=site-logo>my-netdata.io</div>
+ <ul class="site-nav inline-block-list">
+ <li><a href=https://github.com/netdata/netdata data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label="Source code" target="_blank">Source code</a>
+ <li><a href=https://docs.netdata.cloud data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Docs target="_blank">Docs</a>
+ </ul>
+ </div>
+ <div class=site-promo><h1><span class="title">Monitor your systems and applications, the right way!</span></h1>
+ <p class=description>
+ <strong>Unparalleled</strong> insights, in <strong>real-time</strong>,
+ of <strong>everything</strong> happening on your systems and applications,
+ with stunning, <strong>interactive</strong> web dashboards
+ and powerful <strong>performance</strong> and <strong>health</strong> alarms.
+ <div class=cta-option>
+ <a class="btn btn-download" href="https://docs.netdata.cloud/packaging/installer/" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Install><strong>Install netdata now</strong></a>
+ <a class=last-update href="https://github.com/netdata/netdata/releases" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Releases>See netdata releases</a></div>
+ <div class=cta-option>
+ <a class="btn btn-alt" href="#demosites" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Demo>netdata live demo</a>
+ </div>
+ </div>
+</div>
+<div class=site-section>
+ <div class=container><h2>Enter the world of Netdata!</h2>
+ <div class="grid-title">
+ <h3><span class=star>&#x2605;</span> 1s granularity</h3>
+ </div>
+ <div class=grid>
+ <div class=grid-cell>
+ <p>
+ <b>Per second</b> data collection and visualization, for all metrics!
+ </p>
+ <p>
+ Netdata <b>zooms into the problems</b> by providing higher resolution information, compared to any other monitoring solution.
+ </p>
+ </div>
+ <div class=grid-cell>
+ <p class="quote"><i>
+ The world goes real-time.
+ <br/>&nbsp;<br/>
+ High resolution metrics are required to effectively monitor and troubleshoot systems and applications, especially on virtual environments.
+ </i></p>
+ </div>
+ </div>
+ </div>
+ <div>
+ <a href="https://docs.netdata.cloud/docs/why-netdata/1s-granularity/">Learn more about high resolution metrics</a>
+ </div>
+</div>
+
+<div class="site-section site-section-gray">
+ <div class=container>
+ <div class="grid-title">
+ <h3><span class=star>&#x2605;</span> Unlimited metrics</h3>
+ </div>
+ <div class=grid>
+ <div class=grid-cell>
+ <p>
+ Use all the metrics, from all available sources!
+ </p>
+ <p>
+ Netdata collects all the metrics native console tools do. It has been <b>designed to kill the console</b> for troubleshooting infrastructure slowdowns and outages.
+ </p>
+ </div>
+ <div class=grid-cell>
+ <p class="quote"><i>
+ All metrics are important and all should be available when you need them.
+ <br/>&nbsp;<br/>
+ Filtering out most metrics is like reading a book by skipping most of its pages.
+ </i></p>
+ </div>
+ </div>
+ </div>
+ <div>
+ <a href="https://docs.netdata.cloud/docs/why-netdata/unlimited-metrics/">Learn more about unlimited metrics</a>
+ </div>
+</div>
+<div class="site-section">
+ <div class=container>
+ <div class="grid-title">
+ <h3><span class=star>&#x2605;</span> Meaningful presentation</h3>
+ </div>
+ <div class=grid>
+ <div class=grid-cell>
+ <p>
+ Explore all metrics in a meaningful, easy to understand way!
+ </p>
+ <p>
+ Netdata engineers and experts on our community organize metrics in a meaningful way, so that you can learn and understand them right on the job, while troubleshooting issues of your infrastructure.
+ </p>
+ </div>
+ <div class=grid-cell>
+ <p class="quote"><i>
+ Metrics are a lot more than name-value pairs over time.
+ <br/> <br/>
+ It is just not practical to require from all users to have a deep understanding of all metrics for monitoring their systems and applications.
+ </i></p>
+ </div>
+ </div>
+ </div>
+ <div>
+ <a href="https://docs.netdata.cloud/docs/why-netdata/meaningful-presentation/">Learn more about meaningful presentation</a>
+ </div>
+</div>
+
+<div class="site-section site-section-gray">
+ <div class=container>
+ <div class="grid-title">
+ <h3><span class=star>&#x2605;</span> Immediate results</h3>
+ </div>
+ <div class=grid>
+ <div class=grid-cell>
+ <p>
+ Install and use immediately! Get fully functional visualization and alarms, in just a couple of seconds after installation!
+ </p>
+ <p>
+ Netdata <b>decouples your skills from your monitoring infrastructure</b>.
+ No matter how skillful or novice you are, Netdata will apply all the community knowledge and expertise to your monitoring infrastructure.
+ </p>
+ </div>
+ <div class=grid-cell>
+ <p class="quote"><i>
+ Most of our infrastructure is based on standardized systems and applications.
+ <br/>&nbsp;<br/>
+ It is a tremendous waste of time and effort, in a global scale, to require from all users to configure their infrastructure dashboards and alarms metric by metric.
+ </i></p>
+ </div>
+ </div>
+ </div>
+ <div>
+ <a href="https://docs.netdata.cloud/docs/why-netdata/immediate-results/">Learn more about immediate results</a>
+ </div>
+</div>
+
+<div class="site-section site-section-dark">
+ <div class=container><h2>How it works</h2>
+ <div style="padding-bottom: 1em">
+ <p>
+ Netdata is a monitoring agent you install on all your systems:
+ <br/>
+ <b>physical servers</b>, <b>virtual servers</b>, <b>containers</b>, <b>IoT</b>.
+ </p>
+ <p>
+ Netdata is lightweight, designed to permanently run on all systems without disrupting their core function.
+ By default, it needs just 1% CPU of a single core, a few MB or RAM and no disk I/O at all.
+ </p>
+ <p>
+ Each Netdata is (by default) autonomous, taking care of all the following.
+ <br/>But all your Netdata are integrated into one large distributed application.
+ </p>
+ </div>
+ <div class=grid>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Collect</h3>
+ <p>
+ Netdata automatically detects data collection sources on the host it runs.
+ It comes with hundreds of plugins for collecting system and application metrics,
+ including databases, web servers, and commonly used application servers.
+ <br/> <br/>
+ Netdata is also a high performance, distributed <b>statsd server</b>, allowing custom
+ application metrics to be collected and visualized.
+ </p>
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Check (alarms)</h3>
+ <p>
+ Each Netdata spawns a thread that examines the metrics as they get collected,
+ evaluates pre-configured alarm expressions and triggers alarm notifications.
+ <br/> <br/>
+ Netdata comes with hundreds of alarms to detect common system and application issues,
+ that are automatically attached to the collected metrics,
+ supporting dozens of alarm notification integrations.
+ </p>
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Stream</h3>
+ <p>
+ Each Netdata can stream its metrics, in real-time, to any other Netdata.
+ Streaming allows Netdata to be used in ephemeral nodes and containers in auto-scaled environments,
+ but it also allows building Netdata hierarchies for aggregating the metrics of multiple Netdata nodes.
+ </p>
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Store</h3>
+ <p>
+ Each Netdata has its own internal metrics database. This database is optimized
+ for minimal memory footprint, and due to its lockless design allows one writer
+ and multiple readers per metric, concurrently, contributing significantly to
+ the performance of Netdata.
+ </p>
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Archive</h3>
+ <p>
+ Netdata can archive its metrics to time-series databases (prometheus, graphite, opentsdb, json document dbs, etc)
+ so that Netdata can be integrated to existing monitoring tool-chains.
+ </p>
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Visualize</h3>
+ <p>
+ The best part of Netdata is its visualization. Low latency, speedy and snazzy.
+ <br/> <br/>
+ Netdata dashboards are optimized for visual anomaly detection, a powerful tool to troubleshoot
+ performance issues.
+ </p>
+ </div>
+ </div>
+ </div>
+</div>
+
+<div class="site-section">
+ &nbsp;
+</div>
+
+
+<div id="demosites" class="site-section site-section-dark"><h2>netdata live demo sites</h2>
+ <div class="content">
+ <div class="container" style="text-align: center;">
+
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//london.my-netdata.io"
+ data-title="EU - London"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#558855"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//london.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoLondon><strong>Enter London!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//atlanta.my-netdata.io"
+ data-title="US - Atlanta"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AA5555"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//atlanta.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoAtlanta><strong>Enter Atlanta!</strong></a>
+ <div class="mygause-donation">
+ Donated by CDN77.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//sanfrancisco.my-netdata.io"
+ data-title="US - California"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5555AA"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//sanfrancisco.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSanfrancisco><strong>Enter California!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//toronto.my-netdata.io"
+ data-title="Canada"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#885588"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//toronto.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoToronto><strong>Enter Canada!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <br/>&nbsp;<br/>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//frankfurt.my-netdata.io"
+ data-title="EU - Germany"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AAAA55"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//frankfurt.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoFrankfurt><strong>Enter Germany!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//newyork.my-netdata.io"
+ data-title="US - New York"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB5533"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//newyork.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoNewYork><strong>Enter New York!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//singapore.my-netdata.io"
+ data-title="Singapore"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5588BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//singapore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSingapore><strong>Enter Singapore!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//bangalore.my-netdata.io"
+ data-title="India"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB55BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//bangalore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoBangalore><strong>Enter India!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div style="padding-top: 20px;">
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
+ <b>Israel</b>
+ </div>
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label2">
+ requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="octopuscs.requests.netdata" >
+ </div>
+ <div data-netdata="netdata.requests"
+ data-dimensions="requests"
+ data-host="//octopuscs.my-netdata.io"
+ data-common-max="top-gauges"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#4BFF91"
+ data-show-value-of-requests-at="octopuscs.requests.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//octopuscs.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoOctopuscs><strong>Enter Israel!</strong></a>
+ <div class="mygause-donation">
+ Donated by octopuscs.com
+ </div>
+ </div>
+ </div>
+
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
+ <b>EU - Spain</b>
+ </div>
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label2">
+ requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="stackscale.requests.netdata" >
+ </div>
+ <div data-netdata="netdata.requests"
+ data-dimensions="requests"
+ data-host="//stackscale.my-netdata.io"
+ data-common-max="top-gauges"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#4B91FF"
+ data-show-value-of-requests-at="stackscale.requests.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//stackscale.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoStackScale><strong>Enter Madrid!</strong></a>
+ <div class="mygause-donation">
+ Donated by stackscale.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="container" style="padding-top: 40px; text-align: center;">
+ Charts are coming from all servers, <b>in parallel</b>.
+ <br/>
+ The servers are <b>not aware</b> of this multi-server dashboard.
+ </div>
+
+ <div class="container" style="padding-top: 40px; padding-bottom: 40px; text-align: center;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b> connected clients
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-active-at="nginx_local.connections.netdata"
+ ></div>
+ </div>
+ </div>
+
+ <div class="container" style="padding-top: 0px; text-align: center;">
+ Each server is <b>not aware</b> of the other servers.
+ <br/>
+ But on this dashboard <b>they are one</b>! (hover on the chart above)
+ </div>
+
+
+ <!--
+ <div style="padding-top: 40px; color: #999;">
+ <small>We would love to show demos of IoT devices running netdata.<br/>
+ If you can host at your DC an RPi or a Linux IoT, <a href="mailto:costa@tsaousis.gr?subject=I can host IoT for netdata&body=Hi Costa,%0D%0A%0D%0AI would love to host an IoT device to demo netdata on it.%0D%0A%0D%0A-- please tell me who you are and what infrastructure you have --%0D%0A-- Take into account I would need SSH access to it --%0D%0A-- You have to have a DC - a home is not good enough - sorry. --%0D%0A%0D%0AThanks!">contact me</a>.</small>
+ </div>
+ -->
+ </div>
+</div>
+
+<div class=site-section><h2>Who uses netdata?</h2>
+ <div class="content">
+ <div class="container" style="text-align: center;">
+ <p>
+ Netdata is used by hundreds of thousands of users all over the world.
+ <br/>&nbsp;<br/>
+ Check our <a href="https://github.com/netdata/netdata/watchers">GitHub watchers</a> list.
+ <br/>
+ You will find people working for <b>Amazon</b>, <b>Atos</b>, <b>Baidu</b>, <b>Cisco Systems</b>, <b>Citrix</b>,
+ <b>Deutsche Telekom</b>, <b>DigitalOcean</b>, <b>Elastic</b>, <b>EPAM Systems</b>, <b>Ericsson</b>, <b>Google</b>,
+ <b>Groupon</b>, <b>Hortonworks</b>, <b>HP</b>, <b>Huawei</b>, <b>IBM</b>, <b>Microsoft</b>, <b>NewRelic</b>,
+ <b>Nvidia</b>, <b>Red Hat</b>, <b>SAP</b>, <b>Selectel</b>, <b>TicketMaster</b>, <b>Vimeo</b>, and many more!
+ </p>
+ <small>
+ The following figures come from users using the <a href="https://github.com/netdata/netdata/tree/master/registry" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=GlobalRegistry>netdata public global registry</a>.<br/>Counting since May 16th 2016. Actual figures may be a lot higher.<br/></small>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>unique users</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.persons.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_entries"
+ data-dimensions="persons"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-persons-at="netdata.registry_entries.persons.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>monitored servers</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.machines.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_entries"
+ data-dimensions="machines"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855 #558855 #558855"
+ data-show-value-of-machines-at="netdata.registry_entries.machines.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>sessions served</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_sessions.sessions.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_sessions"
+ data-dimensions="sessions"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855 #558855 #558855"
+ data-show-value-of-sessions-at="netdata.registry_sessions.sessions.netdata"
+ ></div>
+ </div>
+ </div>
+ <p>
+
+ <!--
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <br/><i>(figures come from <a href="https://github.com/netdata/netdata/tree/master/registry" target="_blank">the public netdata registry</a> data, showing only installations that use this registry, counting since May 16th 2016)</i>
+ <br/>
+ -->
+ </p>
+ <p>
+ <small>
+ netdata can generate auto-refreshing <strong><a href="https://github.com/netdata/netdata/tree/master/web/api/badges#netdata-badges" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Badges>badges</a></strong>, like these:
+ </small>
+ <br/>
+ <embed style="padding-top: 10px; padding-bottom: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-bottom: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-bottom: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <br/>
+ <small>These badges auto-refresh every minute.</small>
+ </p>
+ </div>
+ <div class="container" style="text-align: center;">
+ <strong>netdata</strong> is featured at the <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Octoverse>GitHub's state of the Octoverse 2016</a>
+ <div style="padding-top: 10px;">
+ <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=OctoverseImage>
+ <img src="https://cloud.githubusercontent.com/assets/2662304/21743260/23ebe62c-d507-11e6-80c0-76b95f53e464.png" width="90%" style="border-radius: 4px; border: 1px solid #fff;"/>
+ </a>
+ </div>
+ </div>
+ <div class=cta-option>
+ <a class="btn btn-download" href="https://docs.netdata.cloud/packaging/installer/" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=InstallAfterDemo><strong>Install netdata now</strong></a>
+ </div>
+ </div>
+</div>
+<div class=aside>
+ <div class=container>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata/subscription" data-style="mega" data-show-count="true" aria-label="Watch netdata/netdata on GitHub"><img src="https://img.shields.io/github/watchers/netdata/netdata.svg?style=flat&label=Github%20Watchers"></a>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata" data-style="mega" data-show-count="true" aria-label="Star netdata/netdata on GitHub"><img src="https://img.shields.io/github/stars/netdata/netdata.svg?style=flat&label=Github%20Stars"></a>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata/fork" data-style="mega" data-show-count="true" aria-label="Fork netdata/netdata on GitHub"><img src="https://img.shields.io/github/forks/netdata/netdata.svg?style=flat&label=Github%20Repo%20Forks"></a>
+ </div>
+</div>
+
+<!-- the footer -->
+<div class=site-footer role=contentinfo>
+ <p>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px;">twitter:</div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-share-button href=https://twitter.com/share data-count=none data-lang=en data-via=linuxnetdata data-size=small data-text="Get control of your Linux servers. Simple. Effective. Awesome." data-url=https://my-netdata.io/ >Tweet</a></div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-follow-button href=https://twitter.com/linuxnetdata data-show-count=false data-lang=en data-size=small>Follow @linuxnetdata</a></div>
+ </div>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px; padding-left: 10px;">facebook:</div>
+ <div class="fb-like" data-href="https://my-netdata.io/" data-layout="button" data-action="like" data-show-faces="false" data-share="false" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ <div class="fb-share-button" data-href="https://my-netdata.io/" data-layout="button" data-size="small" data-mobile-iframe="true"><a class="fb-xfbml-parse-ignore" target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fmy-netdata.io%2F&amp;src=sdkpreparse" style="vertical-align:top;display:inline-block; height: 34px;">Share</a></div>
+ <div class="fb-follow" data-href="https://www.facebook.com/linuxnetdata/" data-layout="standard" data-size="small" data-show-faces="false" data-colorscheme="dark" width="225" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ </div>
+ </p>
+ <p>
+ <strong>netdata</strong><br/>
+ &copy; Copyright 2018-2019, <a href="https://github.com/netdata" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=NetdataInc>Netdata</a><br/>
+ &copy; Copyright 2016-2018, <a href="https://github.com/ktsaou" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=CostaTsaousis>Costa Tsaousis</a><br/>
+ Released under <a href="https://github.com/netdata/netdata/blob/master/LICENSE.md" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=License>GPL v3+</a><br/>
+ </p>
+ </p>
+ <p style="padding-top: 20px;">
+ netdata has received significant contributions from:<br/>&nbsp;<br/>
+ <a href="https://github.com/philwhineray" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Phil>Phil Whineray</a> (release management),<br/>
+ <a href="https://github.com/alonbl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Alon>Alon Bar-Lev</a> (autoconf and automake),<br/>
+ <a href="https://github.com/titpetric" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=titpetric>Tit Petric</a> (docker image maintainer),<br/>
+ <a href="https://github.com/paulfantom" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Pawel>Paweł Krupa</a> (python.d.plugin and modules),<br/>
+ <a href="https://github.com/simonnagl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=simonnagl>simonnagl</a> (disk plugin and more),<br/>
+ <a href="https://github.com/fredericopissarra" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Frederico>Frederico Lamberti Pissarra</a> (performance improvements)<br/>
+ <a href="https://github.com/vlvkobal" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=VladimirKobal>Vladimir Kobal</a> (FreeBSD port)<br/>
+ <a href="https://github.com/l2isbad" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=l2isbad>Ilya Mashchenko</a> (python plugin modules)<br/>
+ &nbsp;<br/>
+ and dozens more enthusiasts, engineers and professionals.<br/>&nbsp;<br/>
+ </p>
+ </p>
+ Thank you! You are awesome!
+ <p>
+</div>
+</body>
+
+<script>
+ if(window.location.hostname != 'my-netdata.io' || window.location.protocol != 'https:') {
+ var canonical = document.createElement('link');
+ canonical.rel = 'canonical';
+ canonical.href = 'https://my-netdata.io/';
+ document.head.appendChild(canonical);
+ }
+</script>
+
+<script>!function (t, e) {
+ "use strict";
+ function a(t, n) {
+ return t.hasAttribute(n) === !0 ? t : t.parentNode !== r.body ? a(t.parentNode, n) : e
+ }
+
+ function n(n) {
+ var o, i, r, c, g, u = a(n.target, "data-ga-action"), l = !1;
+ u !== e && (o = u.getAttribute("data-ga-action") || e, i = u.getAttribute("data-ga-category") || e, r = u.getAttribute("data-ga-label") || e, c = u.getAttribute("href"), g = parseInt(u.getAttribute("data-ga-value"), 10) || e, ga !== e && i !== e && o !== e && (n.preventDefault(), "Download" !== i && n.ctrlKey !== !0 && n.metaKey !== !0 && 2 !== n.which || (l = !0, t.open(c)), function (a) {
+ var n;
+ ga("send", "event", i, o, r, g, {
+ hitCallback: function () {
+ l === !1 && (n !== e && clearTimeout(n), t.location = a)
+ }
+ }), n = setTimeout(function () {
+ l === !1 && (t.location.href = a)
+ }, 1e3)
+ }(c)))
+ }
+
+ function o() {
+ !function (t, e, a, n, o, i) {
+ t.GoogleAnalyticsObject = n, t[n] || (t[n] = function () {
+ (t[n].q = t[n].q || []).push(arguments)
+ }), t[n].l = +new Date, o = e.createElement(a), i = e.getElementsByTagName(a)[0], o.src = "//www.google-analytics.com/analytics.js", i.parentNode.insertBefore(o, i)
+ }(t, r, "script", "ga"), ga("create", "UA-64295674-3", "auto"), ga("send", "pageview", "/site"+window.location.pathname), t.document.addEventListener("click", n)
+ }
+
+ function i() {
+ !function (t, e, a) {
+ var n, o = t.getElementsByTagName(e)[0];
+ t.getElementById(a) || (n = t.createElement(e), n.id = a, n.src = "//platform.twitter.com/widgets.js", o.parentNode.insertBefore(n, o))
+ }(r, "script", "twitter-wjs")
+ }
+
+ var r = t.document;
+ o(), t.onload = i
+}(window)</script>
+
+<!-- facebook sdk -->
+<div id="fb-root"></div>
+<script>
+ window.fbAsyncInit = function() {
+ FB.init({
+ appId : '1200089276712916',
+ xfbml : true,
+ version : 'v2.8'
+ });
+ };
+
+ (function(d, s, id){
+ var js, fjs = d.getElementsByTagName(s)[0];
+ if (d.getElementById(id)) {return;}
+ js = d.createElement(s); js.id = id;
+ js.src = "//connect.facebook.net/en_US/sdk.js";
+ fjs.parentNode.insertBefore(js, fjs);
+ }(document, 'script', 'facebook-jssdk'));
+</script>
+
+<script>
+ var allTitles = [
+ 'Get control<br/>of your Linux servers'
+ , 'Get control<br/>of your FreeBSD servers'
+ , 'Monitor<br/>your containers'
+ , 'Monitor<br/>your virtual machines'
+ , 'Monitor<br/>your web servers'
+ , 'Monitor<br/>your databases'
+ , 'Monitor<br/>your applications'
+ , 'Monitor<br/>your SNMP devices'
+ , 'Monitor<br/>your IoT devices'
+ , 'Monitor<br/>your MacOS systems'
+ ];
+ var lastTitle = -1;
+
+ function updateTitle(){
+ lastTitle++;
+ if(lastTitle >= allTitles.length)
+ lastTitle = 0;
+
+ var os = document.getElementsByClassName('title');
+ var len = os.length;
+ while (len--) {
+ var el = os[len];
+ el.innerHTML = allTitles[lastTitle];
+ el.classList.add('titlefadein');
+ }
+
+ setTimeout(function() {
+ var os = document.getElementsByClassName('title');
+ var len = os.length;
+ while (len--)
+ os[len].classList.remove('titlefadein');
+
+ }, 5750);
+ setTimeout(updateTitle, 6000);
+ }
+ //updateTitle();
+</script>
+
+<!-- Start of HubSpot Embed Code -->
+<script type="text/javascript" id="hs-script-loader" async defer src="//js.hs-scripts.com/4567453.js"></script>
+<!-- End of HubSpot Embed Code -->
diff --git a/web/gui/demosites2.html b/web/gui/demosites2.html
new file mode 100644
index 0000000..fe35cfb
--- /dev/null
+++ b/web/gui/demosites2.html
@@ -0,0 +1,1112 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>NetData - Real-time performance monitoring, done right!</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png"/>
+ <meta property="og:url" content="http://my-netdata.io/"/>
+ <meta property="og:type" content="website"/>
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="netdata - real-time performance monitoring, done right!"/>
+ <meta property="og:description" content="Stunning real-time dashboards, blazingly fast and extremely interactive. Zero configuration, zero dependencies, zero maintenance." />
+</head>
+
+<script>
+ // --- OPTIONS FOR THE DASHBOARD --
+
+ // this section has to appear before loading dashboard.js
+
+ // Select a theme.
+ // uncomment on of the two themes:
+
+ // var netdataTheme = 'default'; // this is white
+ var netdataTheme = 'slate'; // this is dark
+
+
+ // Set the default netdata server.
+ // on charts without a 'data-host', this one will be used.
+ // the default is the server that dashboard.js is downloaded from.
+
+ // var netdataServer = 'http://my.server:19999/';
+ </script>
+
+ <!--
+ --- LOAD dashboard.js ---
+
+ to host this HTML file on your web server,
+ you have to load dashboard.js from the netdata server.
+
+ So, pick one the two below
+ If you pick the first, set the server name/IP.
+
+ The second assumes you host this file on /usr/share/netdata/web
+ and that you have chown it to be owned by netdata:netdata
+ -->
+ <!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
+ <script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+
+ <script>
+ // --- OPTIONS FOR THE CHARTS --
+
+ // destroy charts not shown (lowers memory on the browsers)
+ // set this to 'true' to destroy, 'false' to hide the charts
+ NETDATA.options.current.destroy_on_hide = false;
+
+ // set this to false, to always show all dimensions
+ NETDATA.options.current.eliminate_zero_dimensions = true;
+
+ // set this to false, to lower the pressure on the browser
+ NETDATA.options.current.concurrent_refreshes = true;
+
+ // if you need to support slow mobile phones, set this to false
+ NETDATA.options.current.parallel_refresher = true;
+
+ // set this to false, to always update the charts, even if focus is lost
+ NETDATA.options.current.stop_updates_when_focus_is_lost = true;
+
+ // since we have many servers and limited sockets,
+ // abort ajax calls when we scroll
+ NETDATA.options.current.abort_ajax_on_scroll = true;
+</script>
+<style>
+ body {
+ font-size: 1vw;
+ }
+
+ .mysparkline {
+ position: relative;
+ display: inline-block;
+ min-height: 50px;
+ width: 100%;
+ height: 7vmax;
+ text-align: left;
+ }
+
+ .mysparkline-overchart-label {
+ position: absolute;
+ display: block;
+ top: 0;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 1vmax;
+ z-index: 1;
+ }
+
+ .mysparkline-overchart-value {
+ position: absolute;
+ display: block;
+ top: 1.1vmax;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 5vmax;
+ z-index: 2;
+ text-shadow: #333 0px 0px 2px;
+ }
+
+ .myfullchart {
+ position: relative;
+ display: inline-block;
+ width: 100%;
+ height: 12vmax;
+ min-height: 150px;
+ text-align: left;
+ }
+
+ .mygauge-combo {
+ display: inline-block;
+ }
+
+ .mygauge {
+ position: relative;
+ display: block;
+ width: 18vw;
+ height: 11vw;
+ }
+
+ .mygauge-button {
+ display: block;
+ }
+
+ .mytitle {
+ padding-top: 6vw;
+ padding-bottom: 1vw;
+ text-align: center;
+ font-size: 2.4vw;
+ }
+
+ .mysubtitle {
+ padding-top: 2vw;
+ padding-bottom: 1vw;
+ text-align: center;
+ font-size: 1.8vw;
+ }
+
+ .mycontent {
+ text-align: center;
+ font-size: 1.5vw;
+ }
+
+ @media only screen and (min-width : 992px) {
+ .container {
+ width: 80%;
+ }
+ }
+ @media only screen and (max-width : 992px) {
+ .container {
+ width: 100%;
+ }
+ }
+</style>
+
+<body style="text-align: center; background-color: #272b30;">
+
+<div class="container">
+
+ <div style="text-align: center; font-size: 13vw; height: 14vw;">
+ <b>netdata</b>
+ </div>
+ <div style="text-align: center; font-size: 2vw; height: 2.5vw;">
+ real-time performance monitoring
+ </div>
+ <div style="width:80%; text-align: right; font-size: 2.7vw;">
+ <strong>scaled out</strong>!
+ </div>
+ <div class="mytitle">
+ pick a <b>netdata</b> demo server
+ </div>
+ <div class="mycontent">
+ these demo servers show what you will get by installing <b>netdata</b>
+ </div>
+
+ <div style="width: 100%; text-align: center; padding-top: 2vw;">
+ <div style="width: 100%; text-align: center;">
+
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//london.my-netdata.io"
+ data-title="EU - London"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#558855"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//london.my-netdata.io/default.html'" style="font-size: 1.0vw;">Enter London!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//atlanta.my-netdata.io"
+ data-title="US - Atlanta"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AA5555"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//atlanta.my-netdata.io/default.html'" style="font-size: 1.0vw;">Enter Atlanta!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by CDN77.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-title="US - California"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5555AA"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//sanfrancisco.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter California!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//toronto.netdata.rocks"
+ data-title="Canada"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#885588"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//toronto.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter Canada!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <br/>&nbsp;<br/>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//frankfurt.netdata.rocks"
+ data-title="EU - Germany"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AAAA55"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//frankfurt.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter Germany!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//newyork.netdata.rocks"
+ data-title="US - New York"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB5533"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//newyork.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter New York!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//singapore.netdata.rocks"
+ data-title="Singapore"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5588BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//singapore.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter Singapore!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//bangalore.netdata.rocks"
+ data-title="India"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB55BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <br/>&nbsp;<br/>
+ <button type="button" class="btn btn-default" data-toggle="button" aria-pressed="false" autocomplete="off" onclick="window.location='//bangalore.netdata.rocks/default.html'" style="font-size: 1.0vw;">Enter India!</button>
+ <div style="font-size: 0.8vw;">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="mytitle">
+ this page is a custom <b>netdata</b> dashboard
+ </div>
+ <div class="mycontent">
+ charts are coming from 8 servers, in parallel
+ <br/>
+ the servers are not aware of this multi-server dashboard,
+ <br/>
+ each server is not aware of the other servers,
+ <br/>
+ but on this dashboard <b>they are one</b>!
+ </div>
+ <div style="padding-top: 1vw; width: 100%; text-align: center; font-size: 1.5vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i>
+ hover on a chart below, or drag it to show the past - <b>the others will follow</b>!
+ <br/>
+ double click on a chart to reset them all
+ </div>
+
+ <div class="mytitle">
+ our <code>nginx</code> performance
+ </div>
+ <div class="mycontent">
+ (we proxy netdata through nginx, on the demo sites)
+ </div>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist" style="padding-top: 1vw;">
+ <li role="presentation" class="active"><a href="#nginx_requests" aria-controls="nginx_requests" role="tab" data-toggle="tab">Requests</a></li>
+ <li role="presentation"><a href="#nginx_connections" aria-controls="nginx_connections" role="tab" data-toggle="tab">Connections</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="nginx_requests">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b> web requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.requests.netdata" >
+ </div>
+ <div data-netdata="nginx_local.requests"
+ data-dimensions="requests"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-requests"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-requests-at="nginx_local.requests.netdata"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - Atlanta</b> web requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.requests.netdata2" >
+ </div>
+ <div data-netdata="nginx_local.requests"
+ data-dimensions="requests"
+ data-host="//atlanta.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-requests"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#AA5555"
+ data-show-value-of-requests-at="nginx_local.requests.netdata2"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - California</b> web requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.requests.netdata3" >
+ </div>
+ <div data-netdata="nginx_local.requests"
+ data-dimensions="requests"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="web-requests"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#5555AA"
+ data-show-value-of-requests-at="nginx_local.requests.netdata3"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>Canada</b> web requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.requests.netdata4" >
+ </div>
+ <div data-netdata="nginx_local.requests"
+ data-dimensions="requests"
+ data-host="//toronto.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="web-requests"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885588"
+ data-show-value-of-requests-at="nginx_local.requests.netdata4"
+ ></div>
+ </div>
+ </div>
+
+ <div role="tabpanel" class="tab-pane" id="nginx_connections">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b> active connections
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata1" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-active-at="nginx_local.connections.netdata1"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - Atlanta</b> active connections
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata2" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//atlanta.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#AA5555"
+ data-show-value-of-active-at="nginx_local.connections.netdata2"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - California</b> active connections
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata3" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#5555AA"
+ data-show-value-of-active-at="nginx_local.connections.netdata3"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>Canada</b> active connections
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata4" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//toronto.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885588"
+ data-show-value-of-active-at="nginx_local.connections.netdata4"
+ ></div>
+ </div>
+ </div>
+ </div>
+
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> these charts are draggable and touchable, double click them to reset them
+ </div>
+
+
+ <div class="mytitle">
+ bandwidth consumption on the demo sites
+ </div>
+ <div class="mycontent">
+ Linux QoS is configured by <a href="https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin">FireQOS</a>
+ </div>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist" style="padding-top: 1vw;">
+ <li role="presentation" class="active"><a href="#outbout" aria-controls="outbout" role="tab" data-toggle="tab">Outbound</a></li>
+ <li role="presentation"><a href="#inbound" aria-controls="inbound" role="tab" data-toggle="tab">Inbound</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="outbout">
+ <div class="myfullchart">
+ <div data-netdata="tc.world_out"
+ data-host="//london.my-netdata.io"
+ data-common-max="tc-world-out"
+ data-chart-library="dygraph"
+ data-title="EU - London, traffic we send per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_out"
+ data-host="//atlanta.my-netdata.io"
+ data-chart-library="dygraph"
+ data-common-max="tc-world-out"
+ data-title="US - Atlanta, traffic we send per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_out"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-chart-library="dygraph"
+ data-common-max="tc-world-out"
+ data-title="US - California, traffic we send per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_out"
+ data-host="//toronto.netdata.rocks"
+ data-chart-library="dygraph"
+ data-common-max="tc-world-out"
+ data-title="Canada, traffic we send per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+ </div>
+
+ <div role="tabpanel" class="tab-pane" id="inbound">
+ <div class="myfullchart">
+ <div data-netdata="tc.world_in"
+ data-host="//london.my-netdata.io"
+ data-common-max="tc-world-in"
+ data-chart-library="dygraph"
+ data-title="EU - London, traffic we receive per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_in"
+ data-host="//atlanta.my-netdata.io"
+ data-common-max="tc-world-in"
+ data-chart-library="dygraph"
+ data-title="US - Atlanta, traffic we receive per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_in"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-common-max="tc-world-in"
+ data-chart-library="dygraph"
+ data-title="US - California, traffic we receive per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="tc.world_in"
+ data-host="//toronto.netdata.rocks"
+ data-common-max="tc-world-in"
+ data-chart-library="dygraph"
+ data-title="Canada, traffic we receive per service"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+ </div>
+ </div>
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> <i>these legends are interactive and the charts are resizable here ^^^</i>
+ </div>
+
+ <div class="mytitle">
+ DDoS protection performance on the demo sites
+ </div>
+ <div class="mycontent">
+ iptables SYNPROXY configured by <a href="https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md#linux-anti-ddos">FireHOL</a>
+ </div>
+
+ <div style="padding-top: 4vw; width: 100%; text-align: center; font-size: 1.5vw;">
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b>, TCP SYN packets/s received
+ </div>
+ <div class="mysparkline-overchart-value" id="netfilter.synproxy_syn_received.netdata1" >
+ </div>
+ <div data-netdata="netfilter.synproxy_syn_received"
+ data-dimensions="received"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="synproxy-in"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-received-at="netfilter.synproxy_syn_received.netdata1"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - Atlanta</b>, TCP SYN packets/s received
+ </div>
+ <div class="mysparkline-overchart-value" id="netfilter.synproxy_syn_received.netdata2" >
+ </div>
+ <div data-netdata="netfilter.synproxy_syn_received"
+ data-dimensions="received"
+ data-host="//atlanta.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="synproxy-in"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885555"
+ data-show-value-of-received-at="netfilter.synproxy_syn_received.netdata2"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - California</b>, TCP SYN packets/s received
+ </div>
+ <div class="mysparkline-overchart-value" id="netfilter.synproxy_syn_received.netdata3" >
+ </div>
+ <div data-netdata="netfilter.synproxy_syn_received"
+ data-dimensions="received"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="synproxy-in"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#555588"
+ data-show-value-of-received-at="netfilter.synproxy_syn_received.netdata3"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>Canada</b>, TCP SYN packets/s received
+ </div>
+ <div class="mysparkline-overchart-value" id="netfilter.synproxy_syn_received.netdata4" >
+ </div>
+ <div data-netdata="netfilter.synproxy_syn_received"
+ data-dimensions="received"
+ data-host="//toronto.netdata.rocks"
+ data-decimal-digits="0"
+ data-common-max="synproxy-in"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885588"
+ data-show-value-of-received-at="netfilter.synproxy_syn_received.netdata4"
+ ></div>
+ </div>
+ </div>
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> <i>did you notice the decimal numbers?
+ <br/>netdata interpolates collected values at second boundaries, with nanosecond detail!</i>
+ </div>
+
+
+ <div class="mytitle">
+ CPU Utilization of the demo sites
+ </div>
+
+ <div style="padding-top: 1vw;">
+ <div class="myfullchart">
+ <div data-netdata="system.cpu"
+ data-host="//london.my-netdata.io"
+ data-chart-library="dygraph"
+ data-title="EU - London, CPU Usage"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="system.cpu"
+ data-host="//atlanta.my-netdata.io"
+ data-chart-library="dygraph"
+ data-title="US - Atlanta, CPU Usage"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="system.cpu"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-chart-library="dygraph"
+ data-title="US - California, CPU Usage"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ </div>
+
+ <div class="myfullchart">
+ <div data-netdata="system.cpu"
+ data-host="//toronto.netdata.rocks"
+ data-chart-library="dygraph"
+ data-title="Canada, CPU Usage"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ </div>
+ </div>
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> <i>what is using so much CPU?
+ <br/>The site <a href="//iplists.firehol.org/">iplists.firehol.org</a> is maintained by FireHOL - the CPU is used for comparing security IP Lists.</i>
+ </div>
+
+ <div class="mytitle">
+ Netdata performance
+ </div>
+ <div class="mycontent">
+ netdata monitors <b>users</b>, <b>user groups</b>, <b>applications (process trees)</b>
+ <br/>
+ <b>containers</b> (<code>lxc</code>, <code>docker</code>, etc.) and SNMP devices.
+ </div>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist" style="padding-top: 1vw;">
+ <li role="presentation" class="active"><a href="#netdata_cpu" aria-controls="netdata_cpu" role="tab" data-toggle="tab">CPU</a></li>
+ <li role="presentation"><a href="#netdata_avgtime" aria-controls="netdata_avgtime" role="tab" data-toggle="tab">Average Response Time</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="netdata_cpu">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b>, CPU % of a single core
+ </div>
+ <div class="mysparkline-overchart-value" id="users.cpu.netdata1" >
+ </div>
+ <div data-netdata="apps.cpu"
+ data-dimensions="netdata"
+ data-common-max="users-cpu"
+ data-decimal-digits="1"
+ data-host="//london.my-netdata.io"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-netdata-at="users.cpu.netdata1"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - Atlanta</b>, CPU % of a single core
+ </div>
+ <div class="mysparkline-overchart-value" id="users.cpu.netdata2" >
+ </div>
+ <div data-netdata="apps.cpu"
+ data-dimensions="netdata"
+ data-common-max="users-cpu"
+ data-decimal-digits="1"
+ data-host="//atlanta.my-netdata.io"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885555"
+ data-show-value-of-netdata-at="users.cpu.netdata2"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - California</b>, CPU % of a single core
+ </div>
+ <div class="mysparkline-overchart-value" id="users.cpu.netdata3" >
+ </div>
+ <div data-netdata="apps.cpu"
+ data-dimensions="netdata"
+ data-common-max="users-cpu"
+ data-decimal-digits="1"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#555588"
+ data-show-value-of-netdata-at="users.cpu.netdata3"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>Toronto</b>, CPU % of a single core
+ </div>
+ <div class="mysparkline-overchart-value" id="users.cpu.netdata4" >
+ </div>
+ <div data-netdata="apps.cpu"
+ data-dimensions="netdata"
+ data-common-max="users-cpu"
+ data-decimal-digits="1"
+ data-host="//toronto.netdata.rocks"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885588"
+ data-show-value-of-netdata-at="users.cpu.netdata4"
+ ></div>
+ </div>
+
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> <i>this utilization is about the whole netdata process tree and the percentage is of <b>a single core</b>!
+ <br/>including <b>BASH</b> plugins (it monitors <code>mysql</code> on the demo sites), <b>node.js</b> plugins (it monitors <code>bind9</code> on the demo sites), etc.
+ <br/>and including the chart refreshes for the dashboards of all viewers.</i>
+ </div>
+ </div>
+
+ <div role="tabpanel" class="tab-pane" id="netdata_avgtime">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b>, API average response time in milliseconds
+ </div>
+ <div class="mysparkline-overchart-value" id="netdata.response_time1" >
+ </div>
+ <div data-netdata="netdata.response_time"
+ data-host="//london.my-netdata.io"
+ data-common-max="netdata-response-time"
+ data-decimal-digits="1"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855 #356835"
+ data-show-value-of-average-at="netdata.response_time1"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - Atlanta</b>, API average response time in milliseconds
+ </div>
+ <div class="mysparkline-overchart-value" id="netdata.response_time2" >
+ </div>
+ <div data-netdata="netdata.response_time"
+ data-host="//atlanta.my-netdata.io"
+ data-common-max="netdata-response-time"
+ data-decimal-digits="1"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885555 #683535"
+ data-show-value-of-average-at="netdata.response_time2"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>US - California</b>, API average response time in milliseconds
+ </div>
+ <div class="mysparkline-overchart-value" id="netdata.response_time3" >
+ </div>
+ <div data-netdata="netdata.response_time"
+ data-host="//sanfrancisco.netdata.rocks"
+ data-common-max="netdata-response-time"
+ data-decimal-digits="1"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#555588 #353568"
+ data-show-value-of-average-at="netdata.response_time3"
+ ></div>
+ </div>
+
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>Canada</b>, API average response time in milliseconds
+ </div>
+ <div class="mysparkline-overchart-value" id="netdata.response_time4" >
+ </div>
+ <div data-netdata="netdata.response_time"
+ data-host="//toronto.netdata.rocks"
+ data-decimal-digits="1"
+ data-common-max="netdata-response-time"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#885588 #683568"
+ data-show-value-of-average-at="netdata.response_time4"
+ ></div>
+ </div>
+
+ <div style="width: 100%; text-align: right; font-size: 1vw;">
+ <i class="fa fa-comment" aria-hidden="true"></i> <i>netdata is really <b>fast</b> (the values are milliseconds!)
+ <br/>
+ These values include everything, from the reception of the first byte to the dispatch of the last, including gzip compression.
+ <br/>
+ Values above 2-3ms are usually chart refreshes of charts with several dimensions, charts with very long durations (zoomed out), or file transfers.
+ </i>
+ </div>
+ </div>
+ </div>
+
+ <div style="padding-top: 6vw; width: 100%; text-align: center; font-size: 2vw;">
+ want to know more?
+ <br/>
+ jump to <a href="https://github.com/netdata/netdata/">the netdata page at github</a>
+ <br/>
+ it needs just 3 mins to be installed on your servers!
+ <br/>
+ &nbsp;
+ </div>
+</div>
+</body>
+<script>
+ // google analytics when this is used for the home page of the demo sites
+ // you don't need this if you customize this dashboard for your needs
+ setTimeout(function() {
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
+
+ ga('create', 'UA-64295674-3', 'auto');
+ ga('send', 'pageview');
+ }, 2000);
+</script>
+</html>
diff --git a/web/gui/favicon.ico b/web/gui/favicon.ico
new file mode 100644
index 0000000..064032a
--- /dev/null
+++ b/web/gui/favicon.ico
Binary files differ
diff --git a/web/gui/fonts/glyphicons-halflings-regular.eot b/web/gui/fonts/glyphicons-halflings-regular.eot
new file mode 100644
index 0000000..b93a495
--- /dev/null
+++ b/web/gui/fonts/glyphicons-halflings-regular.eot
Binary files differ
diff --git a/web/gui/fonts/glyphicons-halflings-regular.svg b/web/gui/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 0000000..2a4aaba
--- /dev/null
+++ b/web/gui/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,289 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<!-- SPDX-License-Identifier: CC-BY-4.0 -->
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph horiz-adv-x="0" />
+<glyph horiz-adv-x="400" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M600 1100q15 0 34 -1.5t30 -3.5l11 -1q10 -2 17.5 -10.5t7.5 -18.5v-224l158 158q7 7 18 8t19 -6l106 -106q7 -8 6 -19t-8 -18l-158 -158h224q10 0 18.5 -7.5t10.5 -17.5q6 -41 6 -75q0 -15 -1.5 -34t-3.5 -30l-1 -11q-2 -10 -10.5 -17.5t-18.5 -7.5h-224l158 -158 q7 -7 8 -18t-6 -19l-106 -106q-8 -7 -19 -6t-18 8l-158 158v-224q0 -10 -7.5 -18.5t-17.5 -10.5q-41 -6 -75 -6q-15 0 -34 1.5t-30 3.5l-11 1q-10 2 -17.5 10.5t-7.5 18.5v224l-158 -158q-7 -7 -18 -8t-19 6l-106 106q-7 8 -6 19t8 18l158 158h-224q-10 0 -18.5 7.5 t-10.5 17.5q-6 41 -6 75q0 15 1.5 34t3.5 30l1 11q2 10 10.5 17.5t18.5 7.5h224l-158 158q-7 7 -8 18t6 19l106 106q8 7 19 6t18 -8l158 -158v224q0 10 7.5 18.5t17.5 10.5q41 6 75 6z" />
+<glyph unicode="+" d="M450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-350h350q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-350v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v350h-350q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5 h350v350q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#xa5;" d="M825 1100h250q10 0 12.5 -5t-5.5 -13l-364 -364q-6 -6 -11 -18h268q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-100h275q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-174q0 -11 -7.5 -18.5t-18.5 -7.5h-148q-11 0 -18.5 7.5t-7.5 18.5v174 h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h125v100h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h118q-5 12 -11 18l-364 364q-8 8 -5.5 13t12.5 5h250q25 0 43 -18l164 -164q8 -8 18 -8t18 8l164 164q18 18 43 18z" />
+<glyph unicode="&#x2000;" horiz-adv-x="650" />
+<glyph unicode="&#x2001;" horiz-adv-x="1300" />
+<glyph unicode="&#x2002;" horiz-adv-x="650" />
+<glyph unicode="&#x2003;" horiz-adv-x="1300" />
+<glyph unicode="&#x2004;" horiz-adv-x="433" />
+<glyph unicode="&#x2005;" horiz-adv-x="325" />
+<glyph unicode="&#x2006;" horiz-adv-x="216" />
+<glyph unicode="&#x2007;" horiz-adv-x="216" />
+<glyph unicode="&#x2008;" horiz-adv-x="162" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="325" />
+<glyph unicode="&#x20ac;" d="M744 1198q242 0 354 -189q60 -104 66 -209h-181q0 45 -17.5 82.5t-43.5 61.5t-58 40.5t-60.5 24t-51.5 7.5q-19 0 -40.5 -5.5t-49.5 -20.5t-53 -38t-49 -62.5t-39 -89.5h379l-100 -100h-300q-6 -50 -6 -100h406l-100 -100h-300q9 -74 33 -132t52.5 -91t61.5 -54.5t59 -29 t47 -7.5q22 0 50.5 7.5t60.5 24.5t58 41t43.5 61t17.5 80h174q-30 -171 -128 -278q-107 -117 -274 -117q-206 0 -324 158q-36 48 -69 133t-45 204h-217l100 100h112q1 47 6 100h-218l100 100h134q20 87 51 153.5t62 103.5q117 141 297 141z" />
+<glyph unicode="&#x20bd;" d="M428 1200h350q67 0 120 -13t86 -31t57 -49.5t35 -56.5t17 -64.5t6.5 -60.5t0.5 -57v-16.5v-16.5q0 -36 -0.5 -57t-6.5 -61t-17 -65t-35 -57t-57 -50.5t-86 -31.5t-120 -13h-178l-2 -100h288q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-138v-175q0 -11 -5.5 -18 t-15.5 -7h-149q-10 0 -17.5 7.5t-7.5 17.5v175h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v100h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v475q0 10 7.5 17.5t17.5 7.5zM600 1000v-300h203q64 0 86.5 33t22.5 119q0 84 -22.5 116t-86.5 32h-203z" />
+<glyph unicode="&#x2212;" d="M250 700h800q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#x231b;" d="M1000 1200v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-50v-100q0 -91 -49.5 -165.5t-130.5 -109.5q81 -35 130.5 -109.5t49.5 -165.5v-150h50q21 0 35.5 -14.5t14.5 -35.5v-150h-800v150q0 21 14.5 35.5t35.5 14.5h50v150q0 91 49.5 165.5t130.5 109.5q-81 35 -130.5 109.5 t-49.5 165.5v100h-50q-21 0 -35.5 14.5t-14.5 35.5v150h800zM400 1000v-100q0 -60 32.5 -109.5t87.5 -73.5q28 -12 44 -37t16 -55t-16 -55t-44 -37q-55 -24 -87.5 -73.5t-32.5 -109.5v-150h400v150q0 60 -32.5 109.5t-87.5 73.5q-28 12 -44 37t-16 55t16 55t44 37 q55 24 87.5 73.5t32.5 109.5v100h-400z" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#x2601;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -206.5q0 -121 -85 -207.5t-205 -86.5h-750q-79 0 -135.5 57t-56.5 137q0 69 42.5 122.5t108.5 67.5q-2 12 -2 37q0 153 108 260.5t260 107.5z" />
+<glyph unicode="&#x26fa;" d="M774 1193.5q16 -9.5 20.5 -27t-5.5 -33.5l-136 -187l467 -746h30q20 0 35 -18.5t15 -39.5v-42h-1200v42q0 21 15 39.5t35 18.5h30l468 746l-135 183q-10 16 -5.5 34t20.5 28t34 5.5t28 -20.5l111 -148l112 150q9 16 27 20.5t34 -5zM600 200h377l-182 112l-195 534v-646z " />
+<glyph unicode="&#x2709;" d="M25 1100h1150q10 0 12.5 -5t-5.5 -13l-564 -567q-8 -8 -18 -8t-18 8l-564 567q-8 8 -5.5 13t12.5 5zM18 882l264 -264q8 -8 8 -18t-8 -18l-264 -264q-8 -8 -13 -5.5t-5 12.5v550q0 10 5 12.5t13 -5.5zM918 618l264 264q8 8 13 5.5t5 -12.5v-550q0 -10 -5 -12.5t-13 5.5 l-264 264q-8 8 -8 18t8 18zM818 482l364 -364q8 -8 5.5 -13t-12.5 -5h-1150q-10 0 -12.5 5t5.5 13l364 364q8 8 18 8t18 -8l164 -164q8 -8 18 -8t18 8l164 164q8 8 18 8t18 -8z" />
+<glyph unicode="&#x270f;" d="M1011 1210q19 0 33 -13l153 -153q13 -14 13 -33t-13 -33l-99 -92l-214 214l95 96q13 14 32 14zM1013 800l-615 -614l-214 214l614 614zM317 96l-333 -112l110 335z" />
+<glyph unicode="&#xe001;" d="M700 650v-550h250q21 0 35.5 -14.5t14.5 -35.5v-50h-800v50q0 21 14.5 35.5t35.5 14.5h250v550l-500 550h1200z" />
+<glyph unicode="&#xe002;" d="M368 1017l645 163q39 15 63 0t24 -49v-831q0 -55 -41.5 -95.5t-111.5 -63.5q-79 -25 -147 -4.5t-86 75t25.5 111.5t122.5 82q72 24 138 8v521l-600 -155v-606q0 -42 -44 -90t-109 -69q-79 -26 -147 -5.5t-86 75.5t25.5 111.5t122.5 82.5q72 24 138 7v639q0 38 14.5 59 t53.5 34z" />
+<glyph unicode="&#xe003;" d="M500 1191q100 0 191 -39t156.5 -104.5t104.5 -156.5t39 -191l-1 -2l1 -5q0 -141 -78 -262l275 -274q23 -26 22.5 -44.5t-22.5 -42.5l-59 -58q-26 -20 -46.5 -20t-39.5 20l-275 274q-119 -77 -261 -77l-5 1l-2 -1q-100 0 -191 39t-156.5 104.5t-104.5 156.5t-39 191 t39 191t104.5 156.5t156.5 104.5t191 39zM500 1022q-88 0 -162 -43t-117 -117t-43 -162t43 -162t117 -117t162 -43t162 43t117 117t43 162t-43 162t-117 117t-162 43z" />
+<glyph unicode="&#xe005;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104z" />
+<glyph unicode="&#xe006;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429z" />
+<glyph unicode="&#xe007;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429zM477 700h-240l197 -142l-74 -226 l193 139l195 -140l-74 229l192 140h-234l-78 211z" />
+<glyph unicode="&#xe008;" d="M600 1200q124 0 212 -88t88 -212v-250q0 -46 -31 -98t-69 -52v-75q0 -10 6 -21.5t15 -17.5l358 -230q9 -5 15 -16.5t6 -21.5v-93q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v93q0 10 6 21.5t15 16.5l358 230q9 6 15 17.5t6 21.5v75q-38 0 -69 52 t-31 98v250q0 124 88 212t212 88z" />
+<glyph unicode="&#xe009;" d="M25 1100h1150q10 0 17.5 -7.5t7.5 -17.5v-1050q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v1050q0 10 7.5 17.5t17.5 7.5zM100 1000v-100h100v100h-100zM875 1000h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5t17.5 -7.5h550 q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM1000 1000v-100h100v100h-100zM100 800v-100h100v100h-100zM1000 800v-100h100v100h-100zM100 600v-100h100v100h-100zM1000 600v-100h100v100h-100zM875 500h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5 t17.5 -7.5h550q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM100 400v-100h100v100h-100zM1000 400v-100h100v100h-100zM100 200v-100h100v100h-100zM1000 200v-100h100v100h-100z" />
+<glyph unicode="&#xe010;" d="M50 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM50 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe011;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM850 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 700h200q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5 t35.5 14.5z" />
+<glyph unicode="&#xe012;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h700q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe013;" d="M465 477l571 571q8 8 18 8t17 -8l177 -177q8 -7 8 -17t-8 -18l-783 -784q-7 -8 -17.5 -8t-17.5 8l-384 384q-8 8 -8 18t8 17l177 177q7 8 17 8t18 -8l171 -171q7 -7 18 -7t18 7z" />
+<glyph unicode="&#xe014;" d="M904 1083l178 -179q8 -8 8 -18.5t-8 -17.5l-267 -268l267 -268q8 -7 8 -17.5t-8 -18.5l-178 -178q-8 -8 -18.5 -8t-17.5 8l-268 267l-268 -267q-7 -8 -17.5 -8t-18.5 8l-178 178q-8 8 -8 18.5t8 17.5l267 268l-267 268q-8 7 -8 17.5t8 18.5l178 178q8 8 18.5 8t17.5 -8 l268 -267l268 268q7 7 17.5 7t18.5 -7z" />
+<glyph unicode="&#xe015;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM425 900h150q10 0 17.5 -7.5t7.5 -17.5v-75h75q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5 t-17.5 -7.5h-75v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-75q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v75q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe016;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM325 800h350q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-350q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe017;" d="M550 1200h100q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM800 975v166q167 -62 272 -209.5t105 -331.5q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5 t-184.5 123t-123 184.5t-45.5 224q0 184 105 331.5t272 209.5v-166q-103 -55 -165 -155t-62 -220q0 -116 57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5q0 120 -62 220t-165 155z" />
+<glyph unicode="&#xe018;" d="M1025 1200h150q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM725 800h150q10 0 17.5 -7.5t7.5 -17.5v-750q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v750 q0 10 7.5 17.5t17.5 7.5zM425 500h150q10 0 17.5 -7.5t7.5 -17.5v-450q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v450q0 10 7.5 17.5t17.5 7.5zM125 300h150q10 0 17.5 -7.5t7.5 -17.5v-250q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5 v250q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe019;" d="M600 1174q33 0 74 -5l38 -152l5 -1q49 -14 94 -39l5 -2l134 80q61 -48 104 -105l-80 -134l3 -5q25 -44 39 -93l1 -6l152 -38q5 -43 5 -73q0 -34 -5 -74l-152 -38l-1 -6q-15 -49 -39 -93l-3 -5l80 -134q-48 -61 -104 -105l-134 81l-5 -3q-44 -25 -94 -39l-5 -2l-38 -151 q-43 -5 -74 -5q-33 0 -74 5l-38 151l-5 2q-49 14 -94 39l-5 3l-134 -81q-60 48 -104 105l80 134l-3 5q-25 45 -38 93l-2 6l-151 38q-6 42 -6 74q0 33 6 73l151 38l2 6q13 48 38 93l3 5l-80 134q47 61 105 105l133 -80l5 2q45 25 94 39l5 1l38 152q43 5 74 5zM600 815 q-89 0 -152 -63t-63 -151.5t63 -151.5t152 -63t152 63t63 151.5t-63 151.5t-152 63z" />
+<glyph unicode="&#xe020;" d="M500 1300h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-75h-1100v75q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5zM500 1200v-100h300v100h-300zM1100 900v-800q0 -41 -29.5 -70.5t-70.5 -29.5h-700q-41 0 -70.5 29.5t-29.5 70.5 v800h900zM300 800v-700h100v700h-100zM500 800v-700h100v700h-100zM700 800v-700h100v700h-100zM900 800v-700h100v700h-100z" />
+<glyph unicode="&#xe021;" d="M18 618l620 608q8 7 18.5 7t17.5 -7l608 -608q8 -8 5.5 -13t-12.5 -5h-175v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v375h-300v-375q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v575h-175q-10 0 -12.5 5t5.5 13z" />
+<glyph unicode="&#xe022;" d="M600 1200v-400q0 -41 29.5 -70.5t70.5 -29.5h300v-650q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5h450zM1000 800h-250q-21 0 -35.5 14.5t-14.5 35.5v250z" />
+<glyph unicode="&#xe023;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h50q10 0 17.5 -7.5t7.5 -17.5v-275h175q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe024;" d="M1300 0h-538l-41 400h-242l-41 -400h-538l431 1200h209l-21 -300h162l-20 300h208zM515 800l-27 -300h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-450h191q20 0 25.5 -11.5t-7.5 -27.5l-327 -400q-13 -16 -32 -16t-32 16l-327 400q-13 16 -7.5 27.5t25.5 11.5h191v450q0 21 14.5 35.5t35.5 14.5zM1125 400h50q10 0 17.5 -7.5t7.5 -17.5v-350q0 -10 -7.5 -17.5t-17.5 -7.5 h-1050q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h50q10 0 17.5 -7.5t7.5 -17.5v-175h900v175q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe026;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -275q-13 -16 -32 -16t-32 16l-223 275q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z " />
+<glyph unicode="&#xe027;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM632 914l223 -275q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5l223 275q13 16 32 16 t32 -16z" />
+<glyph unicode="&#xe028;" d="M225 1200h750q10 0 19.5 -7t12.5 -17l186 -652q7 -24 7 -49v-425q0 -12 -4 -27t-9 -17q-12 -6 -37 -6h-1100q-12 0 -27 4t-17 8q-6 13 -6 38l1 425q0 25 7 49l185 652q3 10 12.5 17t19.5 7zM878 1000h-556q-10 0 -19 -7t-11 -18l-87 -450q-2 -11 4 -18t16 -7h150 q10 0 19.5 -7t11.5 -17l38 -152q2 -10 11.5 -17t19.5 -7h250q10 0 19.5 7t11.5 17l38 152q2 10 11.5 17t19.5 7h150q10 0 16 7t4 18l-87 450q-2 11 -11 18t-19 7z" />
+<glyph unicode="&#xe029;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM540 820l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
+<glyph unicode="&#xe030;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-362q0 -10 -7.5 -17.5t-17.5 -7.5h-362q-11 0 -13 5.5t5 12.5l133 133q-109 76 -238 76q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5h150q0 -117 -45.5 -224 t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117z" />
+<glyph unicode="&#xe031;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-361q0 -11 -7.5 -18.5t-18.5 -7.5h-361q-11 0 -13 5.5t5 12.5l134 134q-110 75 -239 75q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5h-150q0 117 45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117zM1027 600h150 q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5q-192 0 -348 118l-134 -134q-7 -8 -12.5 -5.5t-5.5 12.5v360q0 11 7.5 18.5t18.5 7.5h360q10 0 12.5 -5.5t-5.5 -12.5l-133 -133q110 -76 240 -76q116 0 214.5 57t155.5 155.5t57 214.5z" />
+<glyph unicode="&#xe032;" d="M125 1200h1050q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-1050q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM1075 1000h-850q-10 0 -17.5 -7.5t-7.5 -17.5v-850q0 -10 7.5 -17.5t17.5 -7.5h850q10 0 17.5 7.5t7.5 17.5v850 q0 10 -7.5 17.5t-17.5 7.5zM325 900h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 900h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 700h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 700h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 500h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 500h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 300h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 300h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe033;" d="M900 800v200q0 83 -58.5 141.5t-141.5 58.5h-300q-82 0 -141 -59t-59 -141v-200h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h900q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-100zM400 800v150q0 21 15 35.5t35 14.5h200 q20 0 35 -14.5t15 -35.5v-150h-300z" />
+<glyph unicode="&#xe034;" d="M125 1100h50q10 0 17.5 -7.5t7.5 -17.5v-1075h-100v1075q0 10 7.5 17.5t17.5 7.5zM1075 1052q4 0 9 -2q16 -6 16 -23v-421q0 -6 -3 -12q-33 -59 -66.5 -99t-65.5 -58t-56.5 -24.5t-52.5 -6.5q-26 0 -57.5 6.5t-52.5 13.5t-60 21q-41 15 -63 22.5t-57.5 15t-65.5 7.5 q-85 0 -160 -57q-7 -5 -15 -5q-6 0 -11 3q-14 7 -14 22v438q22 55 82 98.5t119 46.5q23 2 43 0.5t43 -7t32.5 -8.5t38 -13t32.5 -11q41 -14 63.5 -21t57 -14t63.5 -7q103 0 183 87q7 8 18 8z" />
+<glyph unicode="&#xe035;" d="M600 1175q116 0 227 -49.5t192.5 -131t131 -192.5t49.5 -227v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v300q0 127 -70.5 231.5t-184.5 161.5t-245 57t-245 -57t-184.5 -161.5t-70.5 -231.5v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50 q-10 0 -17.5 7.5t-7.5 17.5v300q0 116 49.5 227t131 192.5t192.5 131t227 49.5zM220 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6zM820 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460 q0 8 6 14t14 6z" />
+<glyph unicode="&#xe036;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM900 668l120 120q7 7 17 7t17 -7l34 -34q7 -7 7 -17t-7 -17l-120 -120l120 -120q7 -7 7 -17 t-7 -17l-34 -34q-7 -7 -17 -7t-17 7l-120 119l-120 -119q-7 -7 -17 -7t-17 7l-34 34q-7 7 -7 17t7 17l119 120l-119 120q-7 7 -7 17t7 17l34 34q7 8 17 8t17 -8z" />
+<glyph unicode="&#xe037;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6 l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238q-6 8 -4.5 18t9.5 17l29 22q7 5 15 5z" />
+<glyph unicode="&#xe038;" d="M967 1004h3q11 -1 17 -10q135 -179 135 -396q0 -105 -34 -206.5t-98 -185.5q-7 -9 -17 -10h-3q-9 0 -16 6l-42 34q-8 6 -9 16t5 18q111 150 111 328q0 90 -29.5 176t-84.5 157q-6 9 -5 19t10 16l42 33q7 5 15 5zM321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5 t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238 q-6 8 -4.5 18.5t9.5 16.5l29 22q7 5 15 5z" />
+<glyph unicode="&#xe039;" d="M500 900h100v-100h-100v-100h-400v-100h-100v600h500v-300zM1200 700h-200v-100h200v-200h-300v300h-200v300h-100v200h600v-500zM100 1100v-300h300v300h-300zM800 1100v-300h300v300h-300zM300 900h-100v100h100v-100zM1000 900h-100v100h100v-100zM300 500h200v-500 h-500v500h200v100h100v-100zM800 300h200v-100h-100v-100h-200v100h-100v100h100v200h-200v100h300v-300zM100 400v-300h300v300h-300zM300 200h-100v100h100v-100zM1200 200h-100v100h100v-100zM700 0h-100v100h100v-100zM1200 0h-300v100h300v-100z" />
+<glyph unicode="&#xe040;" d="M100 200h-100v1000h100v-1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 200h-200v1000h200v-1000zM400 0h-300v100h300v-100zM600 0h-100v91h100v-91zM800 0h-100v91h100v-91zM1100 0h-200v91h200v-91z" />
+<glyph unicode="&#xe041;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
+<glyph unicode="&#xe042;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM800 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-56 56l424 426l-700 700h150zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5 t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
+<glyph unicode="&#xe043;" d="M300 1200h825q75 0 75 -75v-900q0 -25 -18 -43l-64 -64q-8 -8 -13 -5.5t-5 12.5v950q0 10 -7.5 17.5t-17.5 7.5h-700q-25 0 -43 -18l-64 -64q-8 -8 -5.5 -13t12.5 -5h700q10 0 17.5 -7.5t7.5 -17.5v-950q0 -10 -7.5 -17.5t-17.5 -7.5h-850q-10 0 -17.5 7.5t-7.5 17.5v975 q0 25 18 43l139 139q18 18 43 18z" />
+<glyph unicode="&#xe044;" d="M250 1200h800q21 0 35.5 -14.5t14.5 -35.5v-1150l-450 444l-450 -445v1151q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe045;" d="M822 1200h-444q-11 0 -19 -7.5t-9 -17.5l-78 -301q-7 -24 7 -45l57 -108q6 -9 17.5 -15t21.5 -6h450q10 0 21.5 6t17.5 15l62 108q14 21 7 45l-83 301q-1 10 -9 17.5t-19 7.5zM1175 800h-150q-10 0 -21 -6.5t-15 -15.5l-78 -156q-4 -9 -15 -15.5t-21 -6.5h-550 q-10 0 -21 6.5t-15 15.5l-78 156q-4 9 -15 15.5t-21 6.5h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-650q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h750q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5 t7.5 17.5v650q0 10 -7.5 17.5t-17.5 7.5zM850 200h-500q-10 0 -19.5 -7t-11.5 -17l-38 -152q-2 -10 3.5 -17t15.5 -7h600q10 0 15.5 7t3.5 17l-38 152q-2 10 -11.5 17t-19.5 7z" />
+<glyph unicode="&#xe046;" d="M500 1100h200q56 0 102.5 -20.5t72.5 -50t44 -59t25 -50.5l6 -20h150q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h150q2 8 6.5 21.5t24 48t45 61t72 48t102.5 21.5zM900 800v-100 h100v100h-100zM600 730q-95 0 -162.5 -67.5t-67.5 -162.5t67.5 -162.5t162.5 -67.5t162.5 67.5t67.5 162.5t-67.5 162.5t-162.5 67.5zM600 603q43 0 73 -30t30 -73t-30 -73t-73 -30t-73 30t-30 73t30 73t73 30z" />
+<glyph unicode="&#xe047;" d="M681 1199l385 -998q20 -50 60 -92q18 -19 36.5 -29.5t27.5 -11.5l10 -2v-66h-417v66q53 0 75 43.5t5 88.5l-82 222h-391q-58 -145 -92 -234q-11 -34 -6.5 -57t25.5 -37t46 -20t55 -6v-66h-365v66q56 24 84 52q12 12 25 30.5t20 31.5l7 13l399 1006h93zM416 521h340 l-162 457z" />
+<glyph unicode="&#xe048;" d="M753 641q5 -1 14.5 -4.5t36 -15.5t50.5 -26.5t53.5 -40t50.5 -54.5t35.5 -70t14.5 -87q0 -67 -27.5 -125.5t-71.5 -97.5t-98.5 -66.5t-108.5 -40.5t-102 -13h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 24 -0.5 34t-3.5 24t-8.5 19.5t-17 13.5t-28 12.5t-42.5 11.5v71 l471 -1q57 0 115.5 -20.5t108 -57t80.5 -94t31 -124.5q0 -51 -15.5 -96.5t-38 -74.5t-45 -50.5t-38.5 -30.5zM400 700h139q78 0 130.5 48.5t52.5 122.5q0 41 -8.5 70.5t-29.5 55.5t-62.5 39.5t-103.5 13.5h-118v-350zM400 200h216q80 0 121 50.5t41 130.5q0 90 -62.5 154.5 t-156.5 64.5h-159v-400z" />
+<glyph unicode="&#xe049;" d="M877 1200l2 -57q-83 -19 -116 -45.5t-40 -66.5l-132 -839q-9 -49 13 -69t96 -26v-97h-500v97q186 16 200 98l173 832q3 17 3 30t-1.5 22.5t-9 17.5t-13.5 12.5t-21.5 10t-26 8.5t-33.5 10q-13 3 -19 5v57h425z" />
+<glyph unicode="&#xe050;" d="M1300 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM175 1000h-75v-800h75l-125 -167l-125 167h75v800h-75l125 167z" />
+<glyph unicode="&#xe051;" d="M1100 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-650q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v650h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM1167 50l-167 -125v75h-800v-75l-167 125l167 125v-75h800v75z" />
+<glyph unicode="&#xe052;" d="M50 1100h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe053;" d="M250 1100h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM250 500h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000 q-21 0 -35.5 14.5t-14.5 35.5zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe055;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe056;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 1100h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 800h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 500h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 500h800q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 200h800 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe057;" d="M400 0h-100v1100h100v-1100zM550 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM267 550l-167 -125v75h-200v100h200v75zM550 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe058;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM900 0h-100v1100h100v-1100zM50 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM1100 600h200v-100h-200v-75l-167 125l167 125v-75zM50 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe059;" d="M75 1000h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22zM1200 300l-300 300l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M44 1100h1112q18 0 31 -13t13 -31v-1012q0 -18 -13 -31t-31 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13zM100 1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500h-1000zM342 884q56 0 95 -39t39 -94.5t-39 -95t-95 -39.5t-95 39.5t-39 95t39 94.5 t95 39z" />
+<glyph unicode="&#xe062;" d="M648 1169q117 0 216 -60t156.5 -161t57.5 -218q0 -115 -70 -258q-69 -109 -158 -225.5t-143 -179.5l-54 -62q-9 8 -25.5 24.5t-63.5 67.5t-91 103t-98.5 128t-95.5 148q-60 132 -60 249q0 88 34 169.5t91.5 142t137 96.5t166.5 36zM652.5 974q-91.5 0 -156.5 -65 t-65 -157t65 -156.5t156.5 -64.5t156.5 64.5t65 156.5t-65 157t-156.5 65z" />
+<glyph unicode="&#xe063;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 173v854q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57z" />
+<glyph unicode="&#xe064;" d="M554 1295q21 -72 57.5 -143.5t76 -130t83 -118t82.5 -117t70 -116t49.5 -126t18.5 -136.5q0 -71 -25.5 -135t-68.5 -111t-99 -82t-118.5 -54t-125.5 -23q-84 5 -161.5 34t-139.5 78.5t-99 125t-37 164.5q0 69 18 136.5t49.5 126.5t69.5 116.5t81.5 117.5t83.5 119 t76.5 131t58.5 143zM344 710q-23 -33 -43.5 -70.5t-40.5 -102.5t-17 -123q1 -37 14.5 -69.5t30 -52t41 -37t38.5 -24.5t33 -15q21 -7 32 -1t13 22l6 34q2 10 -2.5 22t-13.5 19q-5 4 -14 12t-29.5 40.5t-32.5 73.5q-26 89 6 271q2 11 -6 11q-8 1 -15 -10z" />
+<glyph unicode="&#xe065;" d="M1000 1013l108 115q2 1 5 2t13 2t20.5 -1t25 -9.5t28.5 -21.5q22 -22 27 -43t0 -32l-6 -10l-108 -115zM350 1100h400q50 0 105 -13l-187 -187h-368q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v182l200 200v-332 q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM1009 803l-362 -362l-161 -50l55 170l355 355z" />
+<glyph unicode="&#xe066;" d="M350 1100h361q-164 -146 -216 -200h-195q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-103q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M824 1073l339 -301q8 -7 8 -17.5t-8 -17.5l-340 -306q-7 -6 -12.5 -4t-6.5 11v203q-26 1 -54.5 0t-78.5 -7.5t-92 -17.5t-86 -35t-70 -57q10 59 33 108t51.5 81.5t65 58.5t68.5 40.5t67 24.5t56 13.5t40 4.5v210q1 10 6.5 12.5t13.5 -4.5z" />
+<glyph unicode="&#xe067;" d="M350 1100h350q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-219q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M643 639l395 395q7 7 17.5 7t17.5 -7l101 -101q7 -7 7 -17.5t-7 -17.5l-531 -532q-7 -7 -17.5 -7t-17.5 7l-248 248q-7 7 -7 17.5t7 17.5l101 101q7 7 17.5 7t17.5 -7l111 -111q8 -7 18 -7t18 7z" />
+<glyph unicode="&#xe068;" d="M318 918l264 264q8 8 18 8t18 -8l260 -264q7 -8 4.5 -13t-12.5 -5h-170v-200h200v173q0 10 5 12t13 -5l264 -260q8 -7 8 -17.5t-8 -17.5l-264 -265q-8 -7 -13 -5t-5 12v173h-200v-200h170q10 0 12.5 -5t-4.5 -13l-260 -264q-8 -8 -18 -8t-18 8l-264 264q-8 8 -5.5 13 t12.5 5h175v200h-200v-173q0 -10 -5 -12t-13 5l-264 265q-8 7 -8 17.5t8 17.5l264 260q8 7 13 5t5 -12v-173h200v200h-175q-10 0 -12.5 5t5.5 13z" />
+<glyph unicode="&#xe069;" d="M250 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe070;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5 t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe071;" d="M1200 1050v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-492 480q-15 14 -15 35t15 35l492 480q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25z" />
+<glyph unicode="&#xe072;" d="M243 1074l814 -498q18 -11 18 -26t-18 -26l-814 -498q-18 -11 -30.5 -4t-12.5 28v1000q0 21 12.5 28t30.5 -4z" />
+<glyph unicode="&#xe073;" d="M250 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM650 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800 q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe074;" d="M1100 950v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe075;" d="M500 612v438q0 21 10.5 25t25.5 -10l492 -480q15 -14 15 -35t-15 -35l-492 -480q-15 -14 -25.5 -10t-10.5 25v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10z" />
+<glyph unicode="&#xe076;" d="M1048 1102l100 1q20 0 35 -14.5t15 -35.5l5 -1000q0 -21 -14.5 -35.5t-35.5 -14.5l-100 -1q-21 0 -35.5 14.5t-14.5 35.5l-2 437l-463 -454q-14 -15 -24.5 -10.5t-10.5 25.5l-2 437l-462 -455q-15 -14 -25.5 -9.5t-10.5 24.5l-5 1000q0 21 10.5 25.5t25.5 -10.5l466 -450 l-2 438q0 20 10.5 24.5t25.5 -9.5l466 -451l-2 438q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10l464 -453v438q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe078;" d="M686 1081l501 -540q15 -15 10.5 -26t-26.5 -11h-1042q-22 0 -26.5 11t10.5 26l501 540q15 15 36 15t36 -15zM150 400h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe079;" d="M885 900l-352 -353l352 -353l-197 -198l-552 552l552 550z" />
+<glyph unicode="&#xe080;" d="M1064 547l-551 -551l-198 198l353 353l-353 353l198 198z" />
+<glyph unicode="&#xe081;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM650 900h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-150 q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5h150v-150q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v150h150q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-150v150q0 21 -14.5 35.5t-35.5 14.5z" />
+<glyph unicode="&#xe082;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM850 700h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5 t35.5 -14.5h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5z" />
+<glyph unicode="&#xe083;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM741.5 913q-12.5 0 -21.5 -9l-120 -120l-120 120q-9 9 -21.5 9 t-21.5 -9l-141 -141q-9 -9 -9 -21.5t9 -21.5l120 -120l-120 -120q-9 -9 -9 -21.5t9 -21.5l141 -141q9 -9 21.5 -9t21.5 9l120 120l120 -120q9 -9 21.5 -9t21.5 9l141 141q9 9 9 21.5t-9 21.5l-120 120l120 120q9 9 9 21.5t-9 21.5l-141 141q-9 9 -21.5 9z" />
+<glyph unicode="&#xe084;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM546 623l-84 85q-7 7 -17.5 7t-18.5 -7l-139 -139q-7 -8 -7 -18t7 -18 l242 -241q7 -8 17.5 -8t17.5 8l375 375q7 7 7 17.5t-7 18.5l-139 139q-7 7 -17.5 7t-17.5 -7z" />
+<glyph unicode="&#xe085;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM588 941q-29 0 -59 -5.5t-63 -20.5t-58 -38.5t-41.5 -63t-16.5 -89.5 q0 -25 20 -25h131q30 -5 35 11q6 20 20.5 28t45.5 8q20 0 31.5 -10.5t11.5 -28.5q0 -23 -7 -34t-26 -18q-1 0 -13.5 -4t-19.5 -7.5t-20 -10.5t-22 -17t-18.5 -24t-15.5 -35t-8 -46q-1 -8 5.5 -16.5t20.5 -8.5h173q7 0 22 8t35 28t37.5 48t29.5 74t12 100q0 47 -17 83 t-42.5 57t-59.5 34.5t-64 18t-59 4.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe086;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM675 1000h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5 t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5zM675 700h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h75v-200h-75q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h350q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5 t-17.5 7.5h-75v275q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe087;" d="M525 1200h150q10 0 17.5 -7.5t7.5 -17.5v-194q103 -27 178.5 -102.5t102.5 -178.5h194q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-194q-27 -103 -102.5 -178.5t-178.5 -102.5v-194q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v194 q-103 27 -178.5 102.5t-102.5 178.5h-194q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h194q27 103 102.5 178.5t178.5 102.5v194q0 10 7.5 17.5t17.5 7.5zM700 893v-168q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v168q-68 -23 -119 -74 t-74 -119h168q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-168q23 -68 74 -119t119 -74v168q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-168q68 23 119 74t74 119h-168q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h168 q-23 68 -74 119t-119 74z" />
+<glyph unicode="&#xe088;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM759 823l64 -64q7 -7 7 -17.5t-7 -17.5l-124 -124l124 -124q7 -7 7 -17.5t-7 -17.5l-64 -64q-7 -7 -17.5 -7t-17.5 7l-124 124l-124 -124q-7 -7 -17.5 -7t-17.5 7l-64 64 q-7 7 -7 17.5t7 17.5l124 124l-124 124q-7 7 -7 17.5t7 17.5l64 64q7 7 17.5 7t17.5 -7l124 -124l124 124q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe089;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM782 788l106 -106q7 -7 7 -17.5t-7 -17.5l-320 -321q-8 -7 -18 -7t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l197 197q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe090;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5q0 -120 65 -225 l587 587q-105 65 -225 65zM965 819l-584 -584q104 -62 219 -62q116 0 214.5 57t155.5 155.5t57 214.5q0 115 -62 219z" />
+<glyph unicode="&#xe091;" d="M39 582l522 427q16 13 27.5 8t11.5 -26v-291h550q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-550v-291q0 -21 -11.5 -26t-27.5 8l-522 427q-16 13 -16 32t16 32z" />
+<glyph unicode="&#xe092;" d="M639 1009l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291h-550q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h550v291q0 21 11.5 26t27.5 -8z" />
+<glyph unicode="&#xe093;" d="M682 1161l427 -522q13 -16 8 -27.5t-26 -11.5h-291v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v550h-291q-21 0 -26 11.5t8 27.5l427 522q13 16 32 16t32 -16z" />
+<glyph unicode="&#xe094;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-550h291q21 0 26 -11.5t-8 -27.5l-427 -522q-13 -16 -32 -16t-32 16l-427 522q-13 16 -8 27.5t26 11.5h291v550q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe095;" d="M639 1109l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291q-94 -2 -182 -20t-170.5 -52t-147 -92.5t-100.5 -135.5q5 105 27 193.5t67.5 167t113 135t167 91.5t225.5 42v262q0 21 11.5 26t27.5 -8z" />
+<glyph unicode="&#xe096;" d="M850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5zM350 0h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249 q8 7 18 7t18 -7l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5z" />
+<glyph unicode="&#xe097;" d="M1014 1120l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249q8 7 18 7t18 -7zM250 600h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5z" />
+<glyph unicode="&#xe101;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM704 900h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5 t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe102;" d="M260 1200q9 0 19 -2t15 -4l5 -2q22 -10 44 -23l196 -118q21 -13 36 -24q29 -21 37 -12q11 13 49 35l196 118q22 13 45 23q17 7 38 7q23 0 47 -16.5t37 -33.5l13 -16q14 -21 18 -45l25 -123l8 -44q1 -9 8.5 -14.5t17.5 -5.5h61q10 0 17.5 -7.5t7.5 -17.5v-50 q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 -7.5t-7.5 -17.5v-175h-400v300h-200v-300h-400v175q0 10 -7.5 17.5t-17.5 7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5h61q11 0 18 3t7 8q0 4 9 52l25 128q5 25 19 45q2 3 5 7t13.5 15t21.5 19.5t26.5 15.5 t29.5 7zM915 1079l-166 -162q-7 -7 -5 -12t12 -5h219q10 0 15 7t2 17l-51 149q-3 10 -11 12t-15 -6zM463 917l-177 157q-8 7 -16 5t-11 -12l-51 -143q-3 -10 2 -17t15 -7h231q11 0 12.5 5t-5.5 12zM500 0h-375q-10 0 -17.5 7.5t-7.5 17.5v375h400v-400zM1100 400v-375 q0 -10 -7.5 -17.5t-17.5 -7.5h-375v400h400z" />
+<glyph unicode="&#xe103;" d="M1165 1190q8 3 21 -6.5t13 -17.5q-2 -178 -24.5 -323.5t-55.5 -245.5t-87 -174.5t-102.5 -118.5t-118 -68.5t-118.5 -33t-120 -4.5t-105 9.5t-90 16.5q-61 12 -78 11q-4 1 -12.5 0t-34 -14.5t-52.5 -40.5l-153 -153q-26 -24 -37 -14.5t-11 43.5q0 64 42 102q8 8 50.5 45 t66.5 58q19 17 35 47t13 61q-9 55 -10 102.5t7 111t37 130t78 129.5q39 51 80 88t89.5 63.5t94.5 45t113.5 36t129 31t157.5 37t182 47.5zM1116 1098q-8 9 -22.5 -3t-45.5 -50q-38 -47 -119 -103.5t-142 -89.5l-62 -33q-56 -30 -102 -57t-104 -68t-102.5 -80.5t-85.5 -91 t-64 -104.5q-24 -56 -31 -86t2 -32t31.5 17.5t55.5 59.5q25 30 94 75.5t125.5 77.5t147.5 81q70 37 118.5 69t102 79.5t99 111t86.5 148.5q22 50 24 60t-6 19z" />
+<glyph unicode="&#xe104;" d="M653 1231q-39 -67 -54.5 -131t-10.5 -114.5t24.5 -96.5t47.5 -80t63.5 -62.5t68.5 -46.5t65 -30q-4 7 -17.5 35t-18.5 39.5t-17 39.5t-17 43t-13 42t-9.5 44.5t-2 42t4 43t13.5 39t23 38.5q96 -42 165 -107.5t105 -138t52 -156t13 -159t-19 -149.5q-13 -55 -44 -106.5 t-68 -87t-78.5 -64.5t-72.5 -45t-53 -22q-72 -22 -127 -11q-31 6 -13 19q6 3 17 7q13 5 32.5 21t41 44t38.5 63.5t21.5 81.5t-6.5 94.5t-50 107t-104 115.5q10 -104 -0.5 -189t-37 -140.5t-65 -93t-84 -52t-93.5 -11t-95 24.5q-80 36 -131.5 114t-53.5 171q-2 23 0 49.5 t4.5 52.5t13.5 56t27.5 60t46 64.5t69.5 68.5q-8 -53 -5 -102.5t17.5 -90t34 -68.5t44.5 -39t49 -2q31 13 38.5 36t-4.5 55t-29 64.5t-36 75t-26 75.5q-15 85 2 161.5t53.5 128.5t85.5 92.5t93.5 61t81.5 25.5z" />
+<glyph unicode="&#xe105;" d="M600 1094q82 0 160.5 -22.5t140 -59t116.5 -82.5t94.5 -95t68 -95t42.5 -82.5t14 -57.5t-14 -57.5t-43 -82.5t-68.5 -95t-94.5 -95t-116.5 -82.5t-140 -59t-159.5 -22.5t-159.5 22.5t-140 59t-116.5 82.5t-94.5 95t-68.5 95t-43 82.5t-14 57.5t14 57.5t42.5 82.5t68 95 t94.5 95t116.5 82.5t140 59t160.5 22.5zM888 829q-15 15 -18 12t5 -22q25 -57 25 -119q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 59 23 114q8 19 4.5 22t-17.5 -12q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q22 -36 47 -71t70 -82t92.5 -81t113 -58.5t133.5 -24.5 t133.5 24t113 58.5t92.5 81.5t70 81.5t47 70.5q11 18 9 42.5t-14 41.5q-90 117 -163 189zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l35 34q14 15 12.5 33.5t-16.5 33.5q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
+<glyph unicode="&#xe106;" d="M592 0h-148l31 120q-91 20 -175.5 68.5t-143.5 106.5t-103.5 119t-66.5 110t-22 76q0 21 14 57.5t42.5 82.5t68 95t94.5 95t116.5 82.5t140 59t160.5 22.5q61 0 126 -15l32 121h148zM944 770l47 181q108 -85 176.5 -192t68.5 -159q0 -26 -19.5 -71t-59.5 -102t-93 -112 t-129 -104.5t-158 -75.5l46 173q77 49 136 117t97 131q11 18 9 42.5t-14 41.5q-54 70 -107 130zM310 824q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q18 -30 39 -60t57 -70.5t74 -73t90 -61t105 -41.5l41 154q-107 18 -178.5 101.5t-71.5 193.5q0 59 23 114q8 19 4.5 22 t-17.5 -12zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l12 11l22 86l-3 4q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
+<glyph unicode="&#xe107;" d="M-90 100l642 1066q20 31 48 28.5t48 -35.5l642 -1056q21 -32 7.5 -67.5t-50.5 -35.5h-1294q-37 0 -50.5 34t7.5 66zM155 200h345v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h345l-445 723zM496 700h208q20 0 32 -14.5t8 -34.5l-58 -252 q-4 -20 -21.5 -34.5t-37.5 -14.5h-54q-20 0 -37.5 14.5t-21.5 34.5l-58 252q-4 20 8 34.5t32 14.5z" />
+<glyph unicode="&#xe108;" d="M650 1200q62 0 106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -93 100 -113v-64q0 -21 -13 -29t-32 1l-205 128l-205 -128q-19 -9 -32 -1t-13 29v64q0 20 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41 q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44z" />
+<glyph unicode="&#xe109;" d="M850 1200h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-150h-1100v150q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-50h500v50q0 21 14.5 35.5t35.5 14.5zM1100 800v-750q0 -21 -14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v750h1100zM100 600v-100h100v100h-100zM300 600v-100h100v100h-100zM500 600v-100h100v100h-100zM700 600v-100h100v100h-100zM900 600v-100h100v100h-100zM100 400v-100h100v100h-100zM300 400v-100h100v100h-100zM500 400 v-100h100v100h-100zM700 400v-100h100v100h-100zM900 400v-100h100v100h-100zM100 200v-100h100v100h-100zM300 200v-100h100v100h-100zM500 200v-100h100v100h-100zM700 200v-100h100v100h-100zM900 200v-100h100v100h-100z" />
+<glyph unicode="&#xe110;" d="M1135 1165l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-159l-600 -600h-291q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h209l600 600h241v150q0 21 10.5 25t24.5 -10zM522 819l-141 -141l-122 122h-209q-21 0 -35.5 14.5 t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h291zM1135 565l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-241l-181 181l141 141l122 -122h159v150q0 21 10.5 25t24.5 -10z" />
+<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
+<glyph unicode="&#xe112;" d="M150 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM850 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM1100 800v-300q0 -41 -3 -77.5t-15 -89.5t-32 -96t-58 -89t-89 -77t-129 -51t-174 -20t-174 20 t-129 51t-89 77t-58 89t-32 96t-15 89.5t-3 77.5v300h300v-250v-27v-42.5t1.5 -41t5 -38t10 -35t16.5 -30t25.5 -24.5t35 -19t46.5 -12t60 -4t60 4.5t46.5 12.5t35 19.5t25 25.5t17 30.5t10 35t5 38t2 40.5t-0.5 42v25v250h300z" />
+<glyph unicode="&#xe113;" d="M1100 411l-198 -199l-353 353l-353 -353l-197 199l551 551z" />
+<glyph unicode="&#xe114;" d="M1101 789l-550 -551l-551 551l198 199l353 -353l353 353z" />
+<glyph unicode="&#xe115;" d="M404 1000h746q21 0 35.5 -14.5t14.5 -35.5v-551h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v401h-381zM135 984l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-400h385l215 -200h-750q-21 0 -35.5 14.5 t-14.5 35.5v550h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe116;" d="M56 1200h94q17 0 31 -11t18 -27l38 -162h896q24 0 39 -18.5t10 -42.5l-100 -475q-5 -21 -27 -42.5t-55 -21.5h-633l48 -200h535q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-50q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-300v-50 q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-31q-18 0 -32.5 10t-20.5 19l-5 10l-201 961h-54q-20 0 -35 14.5t-15 35.5t15 35.5t35 14.5z" />
+<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
+<glyph unicode="&#xe118;" d="M200 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q42 0 71 -29.5t29 -70.5h500v-200h-1000zM1500 700l-300 -700h-1200l300 700h1200z" />
+<glyph unicode="&#xe119;" d="M635 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-601h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v601h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe120;" d="M936 864l249 -229q14 -15 14 -35.5t-14 -35.5l-249 -229q-15 -15 -25.5 -10.5t-10.5 24.5v151h-600v-151q0 -20 -10.5 -24.5t-25.5 10.5l-249 229q-14 15 -14 35.5t14 35.5l249 229q15 15 25.5 10.5t10.5 -25.5v-149h600v149q0 21 10.5 25.5t25.5 -10.5z" />
+<glyph unicode="&#xe121;" d="M1169 400l-172 732q-5 23 -23 45.5t-38 22.5h-672q-20 0 -38 -20t-23 -41l-172 -739h1138zM1100 300h-1000q-41 0 -70.5 -29.5t-29.5 -70.5v-100q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v100q0 41 -29.5 70.5t-70.5 29.5zM800 100v100h100v-100h-100 zM1000 100v100h100v-100h-100z" />
+<glyph unicode="&#xe122;" d="M1150 1100q21 0 35.5 -14.5t14.5 -35.5v-850q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v850q0 21 14.5 35.5t35.5 14.5zM1000 200l-675 200h-38l47 -276q3 -16 -5.5 -20t-29.5 -4h-7h-84q-20 0 -34.5 14t-18.5 35q-55 337 -55 351v250v6q0 16 1 23.5t6.5 14 t17.5 6.5h200l675 250v-850zM0 750v-250q-4 0 -11 0.5t-24 6t-30 15t-24 30t-11 48.5v50q0 26 10.5 46t25 30t29 16t25.5 7z" />
+<glyph unicode="&#xe123;" d="M553 1200h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q19 0 33 -14.5t14 -35t-13 -40.5t-31 -27q-8 -4 -23 -9.5t-65 -19.5t-103 -25t-132.5 -20t-158.5 -9q-57 0 -115 5t-104 12t-88.5 15.5t-73.5 17.5t-54.5 16t-35.5 12l-11 4 q-18 8 -31 28t-13 40.5t14 35t33 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3.5 32t28.5 13zM498 110q50 -6 102 -6q53 0 102 6q-12 -49 -39.5 -79.5t-62.5 -30.5t-63 30.5t-39 79.5z" />
+<glyph unicode="&#xe124;" d="M800 946l224 78l-78 -224l234 -45l-180 -155l180 -155l-234 -45l78 -224l-224 78l-45 -234l-155 180l-155 -180l-45 234l-224 -78l78 224l-234 45l180 155l-180 155l234 45l-78 224l224 -78l45 234l155 -180l155 180z" />
+<glyph unicode="&#xe125;" d="M650 1200h50q40 0 70 -40.5t30 -84.5v-150l-28 -125h328q40 0 70 -40.5t30 -84.5v-100q0 -45 -29 -74l-238 -344q-16 -24 -38 -40.5t-45 -16.5h-250q-7 0 -42 25t-66 50l-31 25h-61q-45 0 -72.5 18t-27.5 57v400q0 36 20 63l145 196l96 198q13 28 37.5 48t51.5 20z M650 1100l-100 -212l-150 -213v-375h100l136 -100h214l250 375v125h-450l50 225v175h-50zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe126;" d="M600 1100h250q23 0 45 -16.5t38 -40.5l238 -344q29 -29 29 -74v-100q0 -44 -30 -84.5t-70 -40.5h-328q28 -118 28 -125v-150q0 -44 -30 -84.5t-70 -40.5h-50q-27 0 -51.5 20t-37.5 48l-96 198l-145 196q-20 27 -20 63v400q0 39 27.5 57t72.5 18h61q124 100 139 100z M50 1000h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM636 1000l-136 -100h-100v-375l150 -213l100 -212h50v175l-50 225h450v125l-250 375h-214z" />
+<glyph unicode="&#xe127;" d="M356 873l363 230q31 16 53 -6l110 -112q13 -13 13.5 -32t-11.5 -34l-84 -121h302q84 0 138 -38t54 -110t-55 -111t-139 -39h-106l-131 -339q-6 -21 -19.5 -41t-28.5 -20h-342q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM400 792v-503l100 -89h293l131 339 q6 21 19.5 41t28.5 20h203q21 0 30.5 25t0.5 50t-31 25h-456h-7h-6h-5.5t-6 0.5t-5 1.5t-5 2t-4 2.5t-4 4t-2.5 4.5q-12 25 5 47l146 183l-86 83zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500 q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe128;" d="M475 1103l366 -230q2 -1 6 -3.5t14 -10.5t18 -16.5t14.5 -20t6.5 -22.5v-525q0 -13 -86 -94t-93 -81h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-85 0 -139.5 39t-54.5 111t54 110t138 38h302l-85 121q-11 15 -10.5 34t13.5 32l110 112q22 22 53 6zM370 945l146 -183 q17 -22 5 -47q-2 -2 -3.5 -4.5t-4 -4t-4 -2.5t-5 -2t-5 -1.5t-6 -0.5h-6h-6.5h-6h-475v-100h221q15 0 29 -20t20 -41l130 -339h294l106 89v503l-342 236zM1050 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5 v500q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe129;" d="M550 1294q72 0 111 -55t39 -139v-106l339 -131q21 -6 41 -19.5t20 -28.5v-342q0 -7 -81 -90t-94 -83h-525q-17 0 -35.5 14t-28.5 28l-9 14l-230 363q-16 31 6 53l112 110q13 13 32 13.5t34 -11.5l121 -84v302q0 84 38 138t110 54zM600 972v203q0 21 -25 30.5t-50 0.5 t-25 -31v-456v-7v-6v-5.5t-0.5 -6t-1.5 -5t-2 -5t-2.5 -4t-4 -4t-4.5 -2.5q-25 -12 -47 5l-183 146l-83 -86l236 -339h503l89 100v293l-339 131q-21 6 -41 19.5t-20 28.5zM450 200h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe130;" d="M350 1100h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5zM600 306v-106q0 -84 -39 -139t-111 -55t-110 54t-38 138v302l-121 -84q-15 -12 -34 -11.5t-32 13.5l-112 110 q-22 22 -6 53l230 363q1 2 3.5 6t10.5 13.5t16.5 17t20 13.5t22.5 6h525q13 0 94 -83t81 -90v-342q0 -15 -20 -28.5t-41 -19.5zM308 900l-236 -339l83 -86l183 146q22 17 47 5q2 -1 4.5 -2.5t4 -4t2.5 -4t2 -5t1.5 -5t0.5 -6v-5.5v-6v-7v-456q0 -22 25 -31t50 0.5t25 30.5 v203q0 15 20 28.5t41 19.5l339 131v293l-89 100h-503z" />
+<glyph unicode="&#xe131;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM914 632l-275 223q-16 13 -27.5 8t-11.5 -26v-137h-275 q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h275v-137q0 -21 11.5 -26t27.5 8l275 223q16 13 16 32t-16 32z" />
+<glyph unicode="&#xe132;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM561 855l-275 -223q-16 -13 -16 -32t16 -32l275 -223q16 -13 27.5 -8 t11.5 26v137h275q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5h-275v137q0 21 -11.5 26t-27.5 -8z" />
+<glyph unicode="&#xe133;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM855 639l-223 275q-13 16 -32 16t-32 -16l-223 -275q-13 -16 -8 -27.5 t26 -11.5h137v-275q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v275h137q21 0 26 11.5t-8 27.5z" />
+<glyph unicode="&#xe134;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM675 900h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-275h-137q-21 0 -26 -11.5 t8 -27.5l223 -275q13 -16 32 -16t32 16l223 275q13 16 8 27.5t-26 11.5h-137v275q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe135;" d="M600 1176q116 0 222.5 -46t184 -123.5t123.5 -184t46 -222.5t-46 -222.5t-123.5 -184t-184 -123.5t-222.5 -46t-222.5 46t-184 123.5t-123.5 184t-46 222.5t46 222.5t123.5 184t184 123.5t222.5 46zM627 1101q-15 -12 -36.5 -20.5t-35.5 -12t-43 -8t-39 -6.5 q-15 -3 -45.5 0t-45.5 -2q-20 -7 -51.5 -26.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79q-9 -34 5 -93t8 -87q0 -9 17 -44.5t16 -59.5q12 0 23 -5t23.5 -15t19.5 -14q16 -8 33 -15t40.5 -15t34.5 -12q21 -9 52.5 -32t60 -38t57.5 -11 q7 -15 -3 -34t-22.5 -40t-9.5 -38q13 -21 23 -34.5t27.5 -27.5t36.5 -18q0 -7 -3.5 -16t-3.5 -14t5 -17q104 -2 221 112q30 29 46.5 47t34.5 49t21 63q-13 8 -37 8.5t-36 7.5q-15 7 -49.5 15t-51.5 19q-18 0 -41 -0.5t-43 -1.5t-42 -6.5t-38 -16.5q-51 -35 -66 -12 q-4 1 -3.5 25.5t0.5 25.5q-6 13 -26.5 17.5t-24.5 6.5q1 15 -0.5 30.5t-7 28t-18.5 11.5t-31 -21q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q7 -12 18 -24t21.5 -20.5t20 -15t15.5 -10.5l5 -3q2 12 7.5 30.5t8 34.5t-0.5 32q-3 18 3.5 29 t18 22.5t15.5 24.5q6 14 10.5 35t8 31t15.5 22.5t34 22.5q-6 18 10 36q8 0 24 -1.5t24.5 -1.5t20 4.5t20.5 15.5q-10 23 -31 42.5t-37.5 29.5t-49 27t-43.5 23q0 1 2 8t3 11.5t1.5 10.5t-1 9.5t-4.5 4.5q31 -13 58.5 -14.5t38.5 2.5l12 5q5 28 -9.5 46t-36.5 24t-50 15 t-41 20q-18 -4 -37 0zM613 994q0 -17 8 -42t17 -45t9 -23q-8 1 -39.5 5.5t-52.5 10t-37 16.5q3 11 16 29.5t16 25.5q10 -10 19 -10t14 6t13.5 14.5t16.5 12.5z" />
+<glyph unicode="&#xe136;" d="M756 1157q164 92 306 -9l-259 -138l145 -232l251 126q6 -89 -34 -156.5t-117 -110.5q-60 -34 -127 -39.5t-126 16.5l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-34 101 5.5 201.5t135.5 154.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
+<glyph unicode="&#xe138;" d="M150 1200h900q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM700 500v-300l-200 -200v500l-350 500h900z" />
+<glyph unicode="&#xe139;" d="M500 1200h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5zM500 1100v-100h200v100h-200zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
+<glyph unicode="&#xe140;" d="M50 1200h300q21 0 25 -10.5t-10 -24.5l-94 -94l199 -199q7 -8 7 -18t-7 -18l-106 -106q-8 -7 -18 -7t-18 7l-199 199l-94 -94q-14 -14 -24.5 -10t-10.5 25v300q0 21 14.5 35.5t35.5 14.5zM850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-199 -199q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l199 199l-94 94q-14 14 -10 24.5t25 10.5zM364 470l106 -106q7 -8 7 -18t-7 -18l-199 -199l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l199 199 q8 7 18 7t18 -7zM1071 271l94 94q14 14 24.5 10t10.5 -25v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -25 10.5t10 24.5l94 94l-199 199q-7 8 -7 18t7 18l106 106q8 7 18 7t18 -7z" />
+<glyph unicode="&#xe141;" d="M596 1192q121 0 231.5 -47.5t190 -127t127 -190t47.5 -231.5t-47.5 -231.5t-127 -190.5t-190 -127t-231.5 -47t-231.5 47t-190.5 127t-127 190.5t-47 231.5t47 231.5t127 190t190.5 127t231.5 47.5zM596 1010q-112 0 -207.5 -55.5t-151 -151t-55.5 -207.5t55.5 -207.5 t151 -151t207.5 -55.5t207.5 55.5t151 151t55.5 207.5t-55.5 207.5t-151 151t-207.5 55.5zM454.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38.5 -16.5t-38.5 16.5t-16 39t16 38.5t38.5 16zM754.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38 -16.5q-14 0 -29 10l-55 -145 q17 -23 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5q0 32 20.5 56.5t51.5 29.5l122 126l1 1q-9 14 -9 28q0 23 16 39t38.5 16zM345.5 709q22.5 0 38.5 -16t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16zM854.5 709q22.5 0 38.5 -16 t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16z" />
+<glyph unicode="&#xe142;" d="M546 173l469 470q91 91 99 192q7 98 -52 175.5t-154 94.5q-22 4 -47 4q-34 0 -66.5 -10t-56.5 -23t-55.5 -38t-48 -41.5t-48.5 -47.5q-376 -375 -391 -390q-30 -27 -45 -41.5t-37.5 -41t-32 -46.5t-16 -47.5t-1.5 -56.5q9 -62 53.5 -95t99.5 -33q74 0 125 51l548 548 q36 36 20 75q-7 16 -21.5 26t-32.5 10q-26 0 -50 -23q-13 -12 -39 -38l-341 -338q-15 -15 -35.5 -15.5t-34.5 13.5t-14 34.5t14 34.5q327 333 361 367q35 35 67.5 51.5t78.5 16.5q14 0 29 -1q44 -8 74.5 -35.5t43.5 -68.5q14 -47 2 -96.5t-47 -84.5q-12 -11 -32 -32 t-79.5 -81t-114.5 -115t-124.5 -123.5t-123 -119.5t-96.5 -89t-57 -45q-56 -27 -120 -27q-70 0 -129 32t-93 89q-48 78 -35 173t81 163l511 511q71 72 111 96q91 55 198 55q80 0 152 -33q78 -36 129.5 -103t66.5 -154q17 -93 -11 -183.5t-94 -156.5l-482 -476 q-15 -15 -36 -16t-37 14t-17.5 34t14.5 35z" />
+<glyph unicode="&#xe143;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104zM896 972q-33 0 -64.5 -19t-56.5 -46t-47.5 -53.5t-43.5 -45.5t-37.5 -19t-36 19t-40 45.5t-43 53.5t-54 46t-65.5 19q-67 0 -122.5 -55.5t-55.5 -132.5q0 -23 13.5 -51t46 -65t57.5 -63t76 -75l22 -22q15 -14 44 -44t50.5 -51t46 -44t41 -35t23 -12 t23.5 12t42.5 36t46 44t52.5 52t44 43q4 4 12 13q43 41 63.5 62t52 55t46 55t26 46t11.5 44q0 79 -53 133.5t-120 54.5z" />
+<glyph unicode="&#xe144;" d="M776.5 1214q93.5 0 159.5 -66l141 -141q66 -66 66 -160q0 -42 -28 -95.5t-62 -87.5l-29 -29q-31 53 -77 99l-18 18l95 95l-247 248l-389 -389l212 -212l-105 -106l-19 18l-141 141q-66 66 -66 159t66 159l283 283q65 66 158.5 66zM600 706l105 105q10 -8 19 -17l141 -141 q66 -66 66 -159t-66 -159l-283 -283q-66 -66 -159 -66t-159 66l-141 141q-66 66 -66 159.5t66 159.5l55 55q29 -55 75 -102l18 -17l-95 -95l247 -248l389 389z" />
+<glyph unicode="&#xe145;" d="M603 1200q85 0 162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5v953q0 21 30 46.5t81 48t129 37.5t163 15zM300 1000v-700h600v700h-600zM600 254q-43 0 -73.5 -30.5t-30.5 -73.5t30.5 -73.5t73.5 -30.5t73.5 30.5 t30.5 73.5t-30.5 73.5t-73.5 30.5z" />
+<glyph unicode="&#xe146;" d="M902 1185l283 -282q15 -15 15 -36t-14.5 -35.5t-35.5 -14.5t-35 15l-36 35l-279 -267v-300l-212 210l-308 -307l-280 -203l203 280l307 308l-210 212h300l267 279l-35 36q-15 14 -15 35t14.5 35.5t35.5 14.5t35 -15z" />
+<glyph unicode="&#xe148;" d="M700 1248v-78q38 -5 72.5 -14.5t75.5 -31.5t71 -53.5t52 -84t24 -118.5h-159q-4 36 -10.5 59t-21 45t-40 35.5t-64.5 20.5v-307l64 -13q34 -7 64 -16.5t70 -32t67.5 -52.5t47.5 -80t20 -112q0 -139 -89 -224t-244 -97v-77h-100v79q-150 16 -237 103q-40 40 -52.5 93.5 t-15.5 139.5h139q5 -77 48.5 -126t117.5 -65v335l-27 8q-46 14 -79 26.5t-72 36t-63 52t-40 72.5t-16 98q0 70 25 126t67.5 92t94.5 57t110 27v77h100zM600 754v274q-29 -4 -50 -11t-42 -21.5t-31.5 -41.5t-10.5 -65q0 -29 7 -50.5t16.5 -34t28.5 -22.5t31.5 -14t37.5 -10 q9 -3 13 -4zM700 547v-310q22 2 42.5 6.5t45 15.5t41.5 27t29 42t12 59.5t-12.5 59.5t-38 44.5t-53 31t-66.5 24.5z" />
+<glyph unicode="&#xe149;" d="M561 1197q84 0 160.5 -40t123.5 -109.5t47 -147.5h-153q0 40 -19.5 71.5t-49.5 48.5t-59.5 26t-55.5 9q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -26 13.5 -63t26.5 -61t37 -66q6 -9 9 -14h241v-100h-197q8 -50 -2.5 -115t-31.5 -95q-45 -62 -99 -112 q34 10 83 17.5t71 7.5q32 1 102 -16t104 -17q83 0 136 30l50 -147q-31 -19 -58 -30.5t-55 -15.5t-42 -4.5t-46 -0.5q-23 0 -76 17t-111 32.5t-96 11.5q-39 -3 -82 -16t-67 -25l-23 -11l-55 145q4 3 16 11t15.5 10.5t13 9t15.5 12t14.5 14t17.5 18.5q48 55 54 126.5 t-30 142.5h-221v100h166q-23 47 -44 104q-7 20 -12 41.5t-6 55.5t6 66.5t29.5 70.5t58.5 71q97 88 263 88z" />
+<glyph unicode="&#xe150;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM935 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-900h-200v900h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe151;" d="M1000 700h-100v100h-100v-100h-100v500h300v-500zM400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM801 1100v-200h100v200h-100zM1000 350l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150z " />
+<glyph unicode="&#xe152;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 1050l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150zM1000 0h-100v100h-100v-100h-100v500h300v-500zM801 400v-200h100v200h-100z " />
+<glyph unicode="&#xe153;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 700h-100v400h-100v100h200v-500zM1100 0h-100v100h-200v400h300v-500zM901 400v-200h100v200h-100z" />
+<glyph unicode="&#xe154;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1100 700h-100v100h-200v400h300v-500zM901 1100v-200h100v200h-100zM1000 0h-100v400h-100v100h200v-500z" />
+<glyph unicode="&#xe155;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
+<glyph unicode="&#xe156;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
+<glyph unicode="&#xe157;" d="M350 1100h400q162 0 256 -93.5t94 -256.5v-400q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5z" />
+<glyph unicode="&#xe158;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-163 0 -256.5 92.5t-93.5 257.5v400q0 163 94 256.5t256 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM440 770l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
+<glyph unicode="&#xe159;" d="M350 1100h400q163 0 256.5 -94t93.5 -256v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 163 92.5 256.5t257.5 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM350 700h400q21 0 26.5 -12t-6.5 -28l-190 -253q-12 -17 -30 -17t-30 17l-190 253q-12 16 -6.5 28t26.5 12z" />
+<glyph unicode="&#xe160;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -163 -92.5 -256.5t-257.5 -93.5h-400q-163 0 -256.5 94t-93.5 256v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM580 693l190 -253q12 -16 6.5 -28t-26.5 -12h-400q-21 0 -26.5 12t6.5 28l190 253q12 17 30 17t30 -17z" />
+<glyph unicode="&#xe161;" d="M550 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h450q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-450q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM338 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
+<glyph unicode="&#xe162;" d="M793 1182l9 -9q8 -10 5 -27q-3 -11 -79 -225.5t-78 -221.5l300 1q24 0 32.5 -17.5t-5.5 -35.5q-1 0 -133.5 -155t-267 -312.5t-138.5 -162.5q-12 -15 -26 -15h-9l-9 8q-9 11 -4 32q2 9 42 123.5t79 224.5l39 110h-302q-23 0 -31 19q-10 21 6 41q75 86 209.5 237.5 t228 257t98.5 111.5q9 16 25 16h9z" />
+<glyph unicode="&#xe163;" d="M350 1100h400q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-450q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h450q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400 q0 165 92.5 257.5t257.5 92.5zM938 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
+<glyph unicode="&#xe164;" d="M750 1200h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -10.5 -25t-24.5 10l-109 109l-312 -312q-15 -15 -35.5 -15t-35.5 15l-141 141q-15 15 -15 35.5t15 35.5l312 312l-109 109q-14 14 -10 24.5t25 10.5zM456 900h-156q-41 0 -70.5 -29.5t-29.5 -70.5v-500 q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v148l200 200v-298q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5h300z" />
+<glyph unicode="&#xe165;" d="M600 1186q119 0 227.5 -46.5t187 -125t125 -187t46.5 -227.5t-46.5 -227.5t-125 -187t-187 -125t-227.5 -46.5t-227.5 46.5t-187 125t-125 187t-46.5 227.5t46.5 227.5t125 187t187 125t227.5 46.5zM600 1022q-115 0 -212 -56.5t-153.5 -153.5t-56.5 -212t56.5 -212 t153.5 -153.5t212 -56.5t212 56.5t153.5 153.5t56.5 212t-56.5 212t-153.5 153.5t-212 56.5zM600 794q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
+<glyph unicode="&#xe166;" d="M450 1200h200q21 0 35.5 -14.5t14.5 -35.5v-350h245q20 0 25 -11t-9 -26l-383 -426q-14 -15 -33.5 -15t-32.5 15l-379 426q-13 15 -8.5 26t25.5 11h250v350q0 21 14.5 35.5t35.5 14.5zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe167;" d="M583 1182l378 -435q14 -15 9 -31t-26 -16h-244v-250q0 -20 -17 -35t-39 -15h-200q-20 0 -32 14.5t-12 35.5v250h-250q-20 0 -25.5 16.5t8.5 31.5l383 431q14 16 33.5 17t33.5 -14zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe168;" d="M396 723l369 369q7 7 17.5 7t17.5 -7l139 -139q7 -8 7 -18.5t-7 -17.5l-525 -525q-7 -8 -17.5 -8t-17.5 8l-292 291q-7 8 -7 18t7 18l139 139q8 7 18.5 7t17.5 -7zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50 h-100z" />
+<glyph unicode="&#xe169;" d="M135 1023l142 142q14 14 35 14t35 -14l77 -77l-212 -212l-77 76q-14 15 -14 36t14 35zM655 855l210 210q14 14 24.5 10t10.5 -25l-2 -599q-1 -20 -15.5 -35t-35.5 -15l-597 -1q-21 0 -25 10.5t10 24.5l208 208l-154 155l212 212zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5 v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe170;" d="M350 1200l599 -2q20 -1 35 -15.5t15 -35.5l1 -597q0 -21 -10.5 -25t-24.5 10l-208 208l-155 -154l-212 212l155 154l-210 210q-14 14 -10 24.5t25 10.5zM524 512l-76 -77q-15 -14 -36 -14t-35 14l-142 142q-14 14 -14 35t14 35l77 77zM50 300h1000q21 0 35.5 -14.5 t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe171;" d="M1200 103l-483 276l-314 -399v423h-399l1196 796v-1096zM483 424v-230l683 953z" />
+<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -14.5 -35.5t-35.5 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200z" />
+<glyph unicode="&#xe173;" d="M1100 1000l-2 -149l-299 -299l-95 95q-9 9 -21.5 9t-21.5 -9l-149 -147h-312v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1132 638l106 -106q7 -7 7 -17.5t-7 -17.5l-420 -421q-8 -7 -18 -7 t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l297 297q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe174;" d="M1100 1000v-269l-103 -103l-134 134q-15 15 -33.5 16.5t-34.5 -12.5l-266 -266h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1202 572l70 -70q15 -15 15 -35.5t-15 -35.5l-131 -131 l131 -131q15 -15 15 -35.5t-15 -35.5l-70 -70q-15 -15 -35.5 -15t-35.5 15l-131 131l-131 -131q-15 -15 -35.5 -15t-35.5 15l-70 70q-15 15 -15 35.5t15 35.5l131 131l-131 131q-15 15 -15 35.5t15 35.5l70 70q15 15 35.5 15t35.5 -15l131 -131l131 131q15 15 35.5 15 t35.5 -15z" />
+<glyph unicode="&#xe175;" d="M1100 1000v-300h-350q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM850 600h100q21 0 35.5 -14.5t14.5 -35.5v-250h150q21 0 25 -10.5t-10 -24.5 l-230 -230q-14 -14 -35 -14t-35 14l-230 230q-14 14 -10 24.5t25 10.5h150v250q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe176;" d="M1100 1000v-400l-165 165q-14 15 -35 15t-35 -15l-263 -265h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM935 565l230 -229q14 -15 10 -25.5t-25 -10.5h-150v-250q0 -20 -14.5 -35 t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35v250h-150q-21 0 -25 10.5t10 25.5l230 229q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe177;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-150h-1200v150q0 21 14.5 35.5t35.5 14.5zM1200 800v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v550h1200zM100 500v-200h400v200h-400z" />
+<glyph unicode="&#xe178;" d="M935 1165l248 -230q14 -14 14 -35t-14 -35l-248 -230q-14 -14 -24.5 -10t-10.5 25v150h-400v200h400v150q0 21 10.5 25t24.5 -10zM200 800h-50q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v-200zM400 800h-100v200h100v-200zM18 435l247 230 q14 14 24.5 10t10.5 -25v-150h400v-200h-400v-150q0 -21 -10.5 -25t-24.5 10l-247 230q-15 14 -15 35t15 35zM900 300h-100v200h100v-200zM1000 500h51q20 0 34.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-34.5 -14.5h-51v200z" />
+<glyph unicode="&#xe179;" d="M862 1073l276 116q25 18 43.5 8t18.5 -41v-1106q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v397q-4 1 -11 5t-24 17.5t-30 29t-24 42t-11 56.5v359q0 31 18.5 65t43.5 52zM550 1200q22 0 34.5 -12.5t14.5 -24.5l1 -13v-450q0 -28 -10.5 -59.5 t-25 -56t-29 -45t-25.5 -31.5l-10 -11v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447q-4 4 -11 11.5t-24 30.5t-30 46t-24 55t-11 60v450q0 2 0.5 5.5t4 12t8.5 15t14.5 12t22.5 5.5q20 0 32.5 -12.5t14.5 -24.5l3 -13v-350h100v350v5.5t2.5 12 t7 15t15 12t25.5 5.5q23 0 35.5 -12.5t13.5 -24.5l1 -13v-350h100v350q0 2 0.5 5.5t3 12t7 15t15 12t24.5 5.5z" />
+<glyph unicode="&#xe180;" d="M1200 1100v-56q-4 0 -11 -0.5t-24 -3t-30 -7.5t-24 -15t-11 -24v-888q0 -22 25 -34.5t50 -13.5l25 -2v-56h-400v56q75 0 87.5 6.5t12.5 43.5v394h-500v-394q0 -37 12.5 -43.5t87.5 -6.5v-56h-400v56q4 0 11 0.5t24 3t30 7.5t24 15t11 24v888q0 22 -25 34.5t-50 13.5 l-25 2v56h400v-56q-75 0 -87.5 -6.5t-12.5 -43.5v-394h500v394q0 37 -12.5 43.5t-87.5 6.5v56h400z" />
+<glyph unicode="&#xe181;" d="M675 1000h375q21 0 35.5 -14.5t14.5 -35.5v-150h-105l-295 -98v98l-200 200h-400l100 100h375zM100 900h300q41 0 70.5 -29.5t29.5 -70.5v-500q0 -41 -29.5 -70.5t-70.5 -29.5h-300q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5zM100 800v-200h300v200 h-300zM1100 535l-400 -133v163l400 133v-163zM100 500v-200h300v200h-300zM1100 398v-248q0 -21 -14.5 -35.5t-35.5 -14.5h-375l-100 -100h-375l-100 100h400l200 200h105z" />
+<glyph unicode="&#xe182;" d="M17 1007l162 162q17 17 40 14t37 -22l139 -194q14 -20 11 -44.5t-20 -41.5l-119 -118q102 -142 228 -268t267 -227l119 118q17 17 42.5 19t44.5 -12l192 -136q19 -14 22.5 -37.5t-13.5 -40.5l-163 -162q-3 -1 -9.5 -1t-29.5 2t-47.5 6t-62.5 14.5t-77.5 26.5t-90 42.5 t-101.5 60t-111 83t-119 108.5q-74 74 -133.5 150.5t-94.5 138.5t-60 119.5t-34.5 100t-15 74.5t-4.5 48z" />
+<glyph unicode="&#xe183;" d="M600 1100q92 0 175 -10.5t141.5 -27t108.5 -36.5t81.5 -40t53.5 -37t31 -27l9 -10v-200q0 -21 -14.5 -33t-34.5 -9l-202 34q-20 3 -34.5 20t-14.5 38v146q-141 24 -300 24t-300 -24v-146q0 -21 -14.5 -38t-34.5 -20l-202 -34q-20 -3 -34.5 9t-14.5 33v200q3 4 9.5 10.5 t31 26t54 37.5t80.5 39.5t109 37.5t141 26.5t175 10.5zM600 795q56 0 97 -9.5t60 -23.5t30 -28t12 -24l1 -10v-50l365 -303q14 -15 24.5 -40t10.5 -45v-212q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45t24.5 40l365 303v50 q0 4 1 10.5t12 23t30 29t60 22.5t97 10z" />
+<glyph unicode="&#xe184;" d="M1100 700l-200 -200h-600l-200 200v500h200v-200h200v200h200v-200h200v200h200v-500zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5 t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe185;" d="M700 1100h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-1000h300v1000q0 41 -29.5 70.5t-70.5 29.5zM1100 800h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-700h300v700q0 41 -29.5 70.5t-70.5 29.5zM400 0h-300v400q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-400z " />
+<glyph unicode="&#xe186;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
+<glyph unicode="&#xe187;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 300h-100v200h-100v-200h-100v500h100v-200h100v200h100v-500zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
+<glyph unicode="&#xe188;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-300h200v-100h-300v500h300v-100zM900 700h-200v-300h200v-100h-300v500h300v-100z" />
+<glyph unicode="&#xe189;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 400l-300 150l300 150v-300zM900 550l-300 -150v300z" />
+<glyph unicode="&#xe190;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM900 300h-700v500h700v-500zM800 700h-130q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300zM300 700v-300 h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130z" />
+<glyph unicode="&#xe191;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 300h-100v400h-100v100h200v-500z M700 300h-100v100h100v-100z" />
+<glyph unicode="&#xe192;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM300 700h200v-400h-300v500h100v-100zM900 300h-100v400h-100v100h200v-500zM300 600v-200h100v200h-100z M700 300h-100v100h100v-100z" />
+<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 500l-199 -200h-100v50l199 200v150h-200v100h300v-300zM900 300h-100v400h-100v100h200v-500zM701 300h-100 v100h100v-100z" />
+<glyph unicode="&#xe194;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700h-300v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
+<glyph unicode="&#xe195;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700v-100l-50 -50l100 -100v-50h-100l-100 100h-150v-100h-100v400h300zM500 700v-100h200v100h-200z" />
+<glyph unicode="&#xe197;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -207t-85 -207t-205 -86.5h-128v250q0 21 -14.5 35.5t-35.5 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-250h-222q-80 0 -136 57.5t-56 136.5q0 69 43 122.5t108 67.5q-2 19 -2 37q0 100 49 185 t134 134t185 49zM525 500h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -244q-13 -16 -32 -16t-32 16l-223 244q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe198;" d="M502 1089q110 0 201 -59.5t135 -156.5q43 15 89 15q121 0 206 -86.5t86 -206.5q0 -99 -60 -181t-150 -110l-378 360q-13 16 -31.5 16t-31.5 -16l-381 -365h-9q-79 0 -135.5 57.5t-56.5 136.5q0 69 43 122.5t108 67.5q-2 19 -2 38q0 100 49 184.5t133.5 134t184.5 49.5z M632 467l223 -228q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5q199 204 223 228q19 19 31.5 19t32.5 -19z" />
+<glyph unicode="&#xe199;" d="M700 100v100h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-100h-50q-21 0 -35.5 -14.5t-14.5 -35.5v-50h400v50q0 21 -14.5 35.5t-35.5 14.5h-50z" />
+<glyph unicode="&#xe200;" d="M600 1179q94 0 167.5 -56.5t99.5 -145.5q89 -6 150.5 -71.5t61.5 -155.5q0 -61 -29.5 -112.5t-79.5 -82.5q9 -29 9 -55q0 -74 -52.5 -126.5t-126.5 -52.5q-55 0 -100 30v-251q21 0 35.5 -14.5t14.5 -35.5v-50h-300v50q0 21 14.5 35.5t35.5 14.5v251q-45 -30 -100 -30 q-74 0 -126.5 52.5t-52.5 126.5q0 18 4 38q-47 21 -75.5 65t-28.5 97q0 74 52.5 126.5t126.5 52.5q5 0 23 -2q0 2 -1 10t-1 13q0 116 81.5 197.5t197.5 81.5z" />
+<glyph unicode="&#xe201;" d="M1010 1010q111 -111 150.5 -260.5t0 -299t-150.5 -260.5q-83 -83 -191.5 -126.5t-218.5 -43.5t-218.5 43.5t-191.5 126.5q-111 111 -150.5 260.5t0 299t150.5 260.5q83 83 191.5 126.5t218.5 43.5t218.5 -43.5t191.5 -126.5zM476 1065q-4 0 -8 -1q-121 -34 -209.5 -122.5 t-122.5 -209.5q-4 -12 2.5 -23t18.5 -14l36 -9q3 -1 7 -1q23 0 29 22q27 96 98 166q70 71 166 98q11 3 17.5 13.5t3.5 22.5l-9 35q-3 13 -14 19q-7 4 -15 4zM512 920q-4 0 -9 -2q-80 -24 -138.5 -82.5t-82.5 -138.5q-4 -13 2 -24t19 -14l34 -9q4 -1 8 -1q22 0 28 21 q18 58 58.5 98.5t97.5 58.5q12 3 18 13.5t3 21.5l-9 35q-3 12 -14 19q-7 4 -15 4zM719.5 719.5q-49.5 49.5 -119.5 49.5t-119.5 -49.5t-49.5 -119.5t49.5 -119.5t119.5 -49.5t119.5 49.5t49.5 119.5t-49.5 119.5zM855 551q-22 0 -28 -21q-18 -58 -58.5 -98.5t-98.5 -57.5 q-11 -4 -17 -14.5t-3 -21.5l9 -35q3 -12 14 -19q7 -4 15 -4q4 0 9 2q80 24 138.5 82.5t82.5 138.5q4 13 -2.5 24t-18.5 14l-34 9q-4 1 -8 1zM1000 515q-23 0 -29 -22q-27 -96 -98 -166q-70 -71 -166 -98q-11 -3 -17.5 -13.5t-3.5 -22.5l9 -35q3 -13 14 -19q7 -4 15 -4 q4 0 8 1q121 34 209.5 122.5t122.5 209.5q4 12 -2.5 23t-18.5 14l-36 9q-3 1 -7 1z" />
+<glyph unicode="&#xe202;" d="M700 800h300v-380h-180v200h-340v-200h-380v755q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM700 300h162l-212 -212l-212 212h162v200h100v-200zM520 0h-395q-10 0 -17.5 7.5t-7.5 17.5v395zM1000 220v-195q0 -10 -7.5 -17.5t-17.5 -7.5h-195z" />
+<glyph unicode="&#xe203;" d="M700 800h300v-520l-350 350l-550 -550v1095q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM862 200h-162v-200h-100v200h-162l212 212zM480 0h-355q-10 0 -17.5 7.5t-7.5 17.5v55h380v-80zM1000 80v-55q0 -10 -7.5 -17.5t-17.5 -7.5h-155v80h180z" />
+<glyph unicode="&#xe204;" d="M1162 800h-162v-200h100l100 -100h-300v300h-162l212 212zM200 800h200q27 0 40 -2t29.5 -10.5t23.5 -30t7 -57.5h300v-100h-600l-200 -350v450h100q0 36 7 57.5t23.5 30t29.5 10.5t40 2zM800 400h240l-240 -400h-800l300 500h500v-100z" />
+<glyph unicode="&#xe205;" d="M650 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM1000 850v150q41 0 70.5 -29.5t29.5 -70.5v-800 q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-1 0 -20 4l246 246l-326 326v324q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM412 250l-212 -212v162h-200v100h200v162z" />
+<glyph unicode="&#xe206;" d="M450 1100h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5zM800 850v150q41 0 70.5 -29.5t29.5 -70.5v-500 h-200v-300h200q0 -36 -7 -57.5t-23.5 -30t-29.5 -10.5t-40 -2h-600q-41 0 -70.5 29.5t-29.5 70.5v800q0 41 29.5 70.5t70.5 29.5v-150q0 -62 44 -106t106 -44h300q62 0 106 44t44 106zM1212 250l-212 -212v162h-200v100h200v162z" />
+<glyph unicode="&#xe209;" d="M658 1197l637 -1104q23 -38 7 -65.5t-60 -27.5h-1276q-44 0 -60 27.5t7 65.5l637 1104q22 39 54 39t54 -39zM704 800h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM500 300v-100h200 v100h-200z" />
+<glyph unicode="&#xe210;" d="M425 1100h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM825 800h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM25 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5zM425 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 500h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5 v150q0 10 7.5 17.5t17.5 7.5zM25 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM425 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM825 200h250q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe211;" d="M700 1200h100v-200h-100v-100h350q62 0 86.5 -39.5t-3.5 -94.5l-66 -132q-41 -83 -81 -134h-772q-40 51 -81 134l-66 132q-28 55 -3.5 94.5t86.5 39.5h350v100h-100v200h100v100h200v-100zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100 h-950l138 100h-13q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe212;" d="M600 1300q40 0 68.5 -29.5t28.5 -70.5h-194q0 41 28.5 70.5t68.5 29.5zM443 1100h314q18 -37 18 -75q0 -8 -3 -25h328q41 0 44.5 -16.5t-30.5 -38.5l-175 -145h-678l-178 145q-34 22 -29 38.5t46 16.5h328q-3 17 -3 25q0 38 18 75zM250 700h700q21 0 35.5 -14.5 t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-150v-200l275 -200h-950l275 200v200h-150q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe213;" d="M600 1181q75 0 128 -53t53 -128t-53 -128t-128 -53t-128 53t-53 128t53 128t128 53zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13 l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe214;" d="M600 1300q47 0 92.5 -53.5t71 -123t25.5 -123.5q0 -78 -55.5 -133.5t-133.5 -55.5t-133.5 55.5t-55.5 133.5q0 62 34 143l144 -143l111 111l-163 163q34 26 63 26zM602 798h46q34 0 55.5 -28.5t21.5 -86.5q0 -76 39 -183h-324q39 107 39 183q0 58 21.5 86.5t56.5 28.5h45 zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe215;" d="M600 1200l300 -161v-139h-300q0 -57 18.5 -108t50 -91.5t63 -72t70 -67.5t57.5 -61h-530q-60 83 -90.5 177.5t-30.5 178.5t33 164.5t87.5 139.5t126 96.5t145.5 41.5v-98zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-13l138 -100h-950l137 100 h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe216;" d="M600 1300q41 0 70.5 -29.5t29.5 -70.5v-78q46 -26 73 -72t27 -100v-50h-400v50q0 54 27 100t73 72v78q0 41 29.5 70.5t70.5 29.5zM400 800h400q54 0 100 -27t72 -73h-172v-100h200v-100h-200v-100h200v-100h-200v-100h200q0 -83 -58.5 -141.5t-141.5 -58.5h-400 q-83 0 -141.5 58.5t-58.5 141.5v400q0 83 58.5 141.5t141.5 58.5z" />
+<glyph unicode="&#xe218;" d="M150 1100h900q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM125 400h950q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-283l224 -224q13 -13 13 -31.5t-13 -32 t-31.5 -13.5t-31.5 13l-88 88h-524l-87 -88q-13 -13 -32 -13t-32 13.5t-13 32t13 31.5l224 224h-289q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM541 300l-100 -100h324l-100 100h-124z" />
+<glyph unicode="&#xe219;" d="M200 1100h800q83 0 141.5 -58.5t58.5 -141.5v-200h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100q0 41 -29.5 70.5t-70.5 29.5h-250q-41 0 -70.5 -29.5t-29.5 -70.5h-100v200q0 83 58.5 141.5t141.5 58.5zM100 600h1000q41 0 70.5 -29.5 t29.5 -70.5v-300h-1200v300q0 41 29.5 70.5t70.5 29.5zM300 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200zM1100 100v-50q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v50h200z" />
+<glyph unicode="&#xe221;" d="M480 1165l682 -683q31 -31 31 -75.5t-31 -75.5l-131 -131h-481l-517 518q-32 31 -32 75.5t32 75.5l295 296q31 31 75.5 31t76.5 -31zM108 794l342 -342l303 304l-341 341zM250 100h800q21 0 35.5 -14.5t14.5 -35.5v-50h-900v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe223;" d="M1057 647l-189 506q-8 19 -27.5 33t-40.5 14h-400q-21 0 -40.5 -14t-27.5 -33l-189 -506q-8 -19 1.5 -33t30.5 -14h625v-150q0 -21 14.5 -35.5t35.5 -14.5t35.5 14.5t14.5 35.5v150h125q21 0 30.5 14t1.5 33zM897 0h-595v50q0 21 14.5 35.5t35.5 14.5h50v50 q0 21 14.5 35.5t35.5 14.5h48v300h200v-300h47q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-50z" />
+<glyph unicode="&#xe224;" d="M900 800h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-375v591l-300 300v84q0 10 7.5 17.5t17.5 7.5h375v-400zM1200 900h-200v200zM400 600h300v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-650q-10 0 -17.5 7.5t-7.5 17.5v950q0 10 7.5 17.5t17.5 7.5h375v-400zM700 700h-200v200z " />
+<glyph unicode="&#xe225;" d="M484 1095h195q75 0 146 -32.5t124 -86t89.5 -122.5t48.5 -142q18 -14 35 -20q31 -10 64.5 6.5t43.5 48.5q10 34 -15 71q-19 27 -9 43q5 8 12.5 11t19 -1t23.5 -16q41 -44 39 -105q-3 -63 -46 -106.5t-104 -43.5h-62q-7 -55 -35 -117t-56 -100l-39 -234q-3 -20 -20 -34.5 t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l12 70q-49 -14 -91 -14h-195q-24 0 -65 8l-11 -64q-3 -20 -20 -34.5t-38 -14.5h-100q-21 0 -33 14.5t-9 34.5l26 157q-84 74 -128 175l-159 53q-19 7 -33 26t-14 40v50q0 21 14.5 35.5t35.5 14.5h124q11 87 56 166l-111 95 q-16 14 -12.5 23.5t24.5 9.5h203q116 101 250 101zM675 1000h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h250q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe226;" d="M641 900l423 247q19 8 42 2.5t37 -21.5l32 -38q14 -15 12.5 -36t-17.5 -34l-139 -120h-390zM50 1100h106q67 0 103 -17t66 -71l102 -212h823q21 0 35.5 -14.5t14.5 -35.5v-50q0 -21 -14 -40t-33 -26l-737 -132q-23 -4 -40 6t-26 25q-42 67 -100 67h-300q-62 0 -106 44 t-44 106v200q0 62 44 106t106 44zM173 928h-80q-19 0 -28 -14t-9 -35v-56q0 -51 42 -51h134q16 0 21.5 8t5.5 24q0 11 -16 45t-27 51q-18 28 -43 28zM550 727q-32 0 -54.5 -22.5t-22.5 -54.5t22.5 -54.5t54.5 -22.5t54.5 22.5t22.5 54.5t-22.5 54.5t-54.5 22.5zM130 389 l152 130q18 19 34 24t31 -3.5t24.5 -17.5t25.5 -28q28 -35 50.5 -51t48.5 -13l63 5l48 -179q13 -61 -3.5 -97.5t-67.5 -79.5l-80 -69q-47 -40 -109 -35.5t-103 51.5l-130 151q-40 47 -35.5 109.5t51.5 102.5zM380 377l-102 -88q-31 -27 2 -65l37 -43q13 -15 27.5 -19.5 t31.5 6.5l61 53q19 16 14 49q-2 20 -12 56t-17 45q-11 12 -19 14t-23 -8z" />
+<glyph unicode="&#xe227;" d="M625 1200h150q10 0 17.5 -7.5t7.5 -17.5v-109q79 -33 131 -87.5t53 -128.5q1 -46 -15 -84.5t-39 -61t-46 -38t-39 -21.5l-17 -6q6 0 15 -1.5t35 -9t50 -17.5t53 -30t50 -45t35.5 -64t14.5 -84q0 -59 -11.5 -105.5t-28.5 -76.5t-44 -51t-49.5 -31.5t-54.5 -16t-49.5 -6.5 t-43.5 -1v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-100v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-175q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v600h-75q-10 0 -17.5 7.5t-7.5 17.5v150 q0 10 7.5 17.5t17.5 7.5h175v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h100v75q0 10 7.5 17.5t17.5 7.5zM400 900v-200h263q28 0 48.5 10.5t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-263zM400 500v-200h363q28 0 48.5 10.5 t30 25t15 29t5.5 25.5l1 10q0 4 -0.5 11t-6 24t-15 30t-30 24t-48.5 11h-363z" />
+<glyph unicode="&#xe230;" d="M212 1198h780q86 0 147 -61t61 -147v-416q0 -51 -18 -142.5t-36 -157.5l-18 -66q-29 -87 -93.5 -146.5t-146.5 -59.5h-572q-82 0 -147 59t-93 147q-8 28 -20 73t-32 143.5t-20 149.5v416q0 86 61 147t147 61zM600 1045q-70 0 -132.5 -11.5t-105.5 -30.5t-78.5 -41.5 t-57 -45t-36 -41t-20.5 -30.5l-6 -12l156 -243h560l156 243q-2 5 -6 12.5t-20 29.5t-36.5 42t-57 44.5t-79 42t-105 29.5t-132.5 12zM762 703h-157l195 261z" />
+<glyph unicode="&#xe231;" d="M475 1300h150q103 0 189 -86t86 -189v-500q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
+<glyph unicode="&#xe232;" d="M475 1300h96q0 -150 89.5 -239.5t239.5 -89.5v-446q0 -41 -42 -83t-83 -42h-450q-41 0 -83 42t-42 83v500q0 103 86 189t189 86zM700 300v-225q0 -21 -27 -48t-48 -27h-150q-21 0 -48 27t-27 48v225h300z" />
+<glyph unicode="&#xe233;" d="M1294 767l-638 -283l-378 170l-78 -60v-224l100 -150v-199l-150 148l-150 -149v200l100 150v250q0 4 -0.5 10.5t0 9.5t1 8t3 8t6.5 6l47 40l-147 65l642 283zM1000 380l-350 -166l-350 166v147l350 -165l350 165v-147z" />
+<glyph unicode="&#xe234;" d="M250 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM650 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM1050 800q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
+<glyph unicode="&#xe235;" d="M550 1100q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 700q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44zM550 300q62 0 106 -44t44 -106t-44 -106t-106 -44t-106 44t-44 106t44 106t106 44z" />
+<glyph unicode="&#xe236;" d="M125 1100h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5zM125 700h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5 t17.5 7.5zM125 300h950q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-950q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe237;" d="M350 1200h500q162 0 256 -93.5t94 -256.5v-500q0 -165 -93.5 -257.5t-256.5 -92.5h-500q-165 0 -257.5 92.5t-92.5 257.5v500q0 165 92.5 257.5t257.5 92.5zM900 1000h-600q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h600q41 0 70.5 29.5 t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5zM350 900h500q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-500q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 14.5 35.5t35.5 14.5zM400 800v-200h400v200h-400z" />
+<glyph unicode="&#xe238;" d="M150 1100h1000q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-200h50q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5h50v200h-50q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe239;" d="M650 1187q87 -67 118.5 -156t0 -178t-118.5 -155q-87 66 -118.5 155t0 178t118.5 156zM300 800q124 0 212 -88t88 -212q-124 0 -212 88t-88 212zM1000 800q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM300 500q124 0 212 -88t88 -212q-124 0 -212 88t-88 212z M1000 500q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM700 199v-144q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v142q40 -4 43 -4q17 0 57 6z" />
+<glyph unicode="&#xe240;" d="M745 878l69 19q25 6 45 -12l298 -295q11 -11 15 -26.5t-2 -30.5q-5 -14 -18 -23.5t-28 -9.5h-8q1 0 1 -13q0 -29 -2 -56t-8.5 -62t-20 -63t-33 -53t-51 -39t-72.5 -14h-146q-184 0 -184 288q0 24 10 47q-20 4 -62 4t-63 -4q11 -24 11 -47q0 -288 -184 -288h-142 q-48 0 -84.5 21t-56 51t-32 71.5t-16 75t-3.5 68.5q0 13 2 13h-7q-15 0 -27.5 9.5t-18.5 23.5q-6 15 -2 30.5t15 25.5l298 296q20 18 46 11l76 -19q20 -5 30.5 -22.5t5.5 -37.5t-22.5 -31t-37.5 -5l-51 12l-182 -193h891l-182 193l-44 -12q-20 -5 -37.5 6t-22.5 31t6 37.5 t31 22.5z" />
+<glyph unicode="&#xe241;" d="M1200 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM500 450h-25q0 15 -4 24.5t-9 14.5t-17 7.5t-20 3t-25 0.5h-100v-425q0 -11 12.5 -17.5t25.5 -7.5h12v-50h-200v50q50 0 50 25v425h-100q-17 0 -25 -0.5t-20 -3t-17 -7.5t-9 -14.5t-4 -24.5h-25v150h500v-150z" />
+<glyph unicode="&#xe242;" d="M1000 300v50q-25 0 -55 32q-14 14 -25 31t-16 27l-4 11l-289 747h-69l-300 -754q-18 -35 -39 -56q-9 -9 -24.5 -18.5t-26.5 -14.5l-11 -5v-50h273v50q-49 0 -78.5 21.5t-11.5 67.5l69 176h293l61 -166q13 -34 -3.5 -66.5t-55.5 -32.5v-50h312zM412 691l134 342l121 -342 h-255zM1100 150v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe243;" d="M50 1200h1100q21 0 35.5 -14.5t14.5 -35.5v-1100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5zM611 1118h-70q-13 0 -18 -12l-299 -753q-17 -32 -35 -51q-18 -18 -56 -34q-12 -5 -12 -18v-50q0 -8 5.5 -14t14.5 -6 h273q8 0 14 6t6 14v50q0 8 -6 14t-14 6q-55 0 -71 23q-10 14 0 39l63 163h266l57 -153q11 -31 -6 -55q-12 -17 -36 -17q-8 0 -14 -6t-6 -14v-50q0 -8 6 -14t14 -6h313q8 0 14 6t6 14v50q0 7 -5.5 13t-13.5 7q-17 0 -42 25q-25 27 -40 63h-1l-288 748q-5 12 -19 12zM639 611 h-197l103 264z" />
+<glyph unicode="&#xe244;" d="M1200 1100h-1200v100h1200v-100zM50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 1000h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM700 900v-300h300v300h-300z" />
+<glyph unicode="&#xe245;" d="M50 1200h400q21 0 35.5 -14.5t14.5 -35.5v-900q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v900q0 21 14.5 35.5t35.5 14.5zM650 700h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM700 600v-300h300v300h-300zM1200 0h-1200v100h1200v-100z" />
+<glyph unicode="&#xe246;" d="M50 1000h400q21 0 35.5 -14.5t14.5 -35.5v-350h100v150q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-150h100v-100h-100v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v150h-100v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM700 700v-300h300v300h-300z" />
+<glyph unicode="&#xe247;" d="M100 0h-100v1200h100v-1200zM250 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM300 1000v-300h300v300h-300zM250 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe248;" d="M600 1100h150q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-100h450q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h350v100h-150q-21 0 -35.5 14.5 t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h150v100h100v-100zM400 1000v-300h300v300h-300z" />
+<glyph unicode="&#xe249;" d="M1200 0h-100v1200h100v-1200zM550 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM600 1000v-300h300v300h-300zM50 500h900q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe250;" d="M865 565l-494 -494q-23 -23 -41 -23q-14 0 -22 13.5t-8 38.5v1000q0 25 8 38.5t22 13.5q18 0 41 -23l494 -494q14 -14 14 -35t-14 -35z" />
+<glyph unicode="&#xe251;" d="M335 635l494 494q29 29 50 20.5t21 -49.5v-1000q0 -41 -21 -49.5t-50 20.5l-494 494q-14 14 -14 35t14 35z" />
+<glyph unicode="&#xe252;" d="M100 900h1000q41 0 49.5 -21t-20.5 -50l-494 -494q-14 -14 -35 -14t-35 14l-494 494q-29 29 -20.5 50t49.5 21z" />
+<glyph unicode="&#xe253;" d="M635 865l494 -494q29 -29 20.5 -50t-49.5 -21h-1000q-41 0 -49.5 21t20.5 50l494 494q14 14 35 14t35 -14z" />
+<glyph unicode="&#xe254;" d="M700 741v-182l-692 -323v221l413 193l-413 193v221zM1200 0h-800v200h800v-200z" />
+<glyph unicode="&#xe255;" d="M1200 900h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300zM0 700h50q0 21 4 37t9.5 26.5t18 17.5t22 11t28.5 5.5t31 2t37 0.5h100v-550q0 -22 -25 -34.5t-50 -13.5l-25 -2v-100h400v100q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v550h100q25 0 37 -0.5t31 -2 t28.5 -5.5t22 -11t18 -17.5t9.5 -26.5t4 -37h50v300h-800v-300z" />
+<glyph unicode="&#xe256;" d="M800 700h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-100v-550q0 -22 25 -34.5t50 -14.5l25 -1v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v550h-100q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h800v-300zM1100 200h-200v-100h200v-100h-300v300h200v100h-200v100h300v-300z" />
+<glyph unicode="&#xe257;" d="M701 1098h160q16 0 21 -11t-7 -23l-464 -464l464 -464q12 -12 7 -23t-21 -11h-160q-13 0 -23 9l-471 471q-7 8 -7 18t7 18l471 471q10 9 23 9z" />
+<glyph unicode="&#xe258;" d="M339 1098h160q13 0 23 -9l471 -471q7 -8 7 -18t-7 -18l-471 -471q-10 -9 -23 -9h-160q-16 0 -21 11t7 23l464 464l-464 464q-12 12 -7 23t21 11z" />
+<glyph unicode="&#xe259;" d="M1087 882q11 -5 11 -21v-160q0 -13 -9 -23l-471 -471q-8 -7 -18 -7t-18 7l-471 471q-9 10 -9 23v160q0 16 11 21t23 -7l464 -464l464 464q12 12 23 7z" />
+<glyph unicode="&#xe260;" d="M618 993l471 -471q9 -10 9 -23v-160q0 -16 -11 -21t-23 7l-464 464l-464 -464q-12 -12 -23 -7t-11 21v160q0 13 9 23l471 471q8 7 18 7t18 -7z" />
+<glyph unicode="&#xf8ff;" d="M1000 1200q0 -124 -88 -212t-212 -88q0 124 88 212t212 88zM450 1000h100q21 0 40 -14t26 -33l79 -194q5 1 16 3q34 6 54 9.5t60 7t65.5 1t61 -10t56.5 -23t42.5 -42t29 -64t5 -92t-19.5 -121.5q-1 -7 -3 -19.5t-11 -50t-20.5 -73t-32.5 -81.5t-46.5 -83t-64 -70 t-82.5 -50q-13 -5 -42 -5t-65.5 2.5t-47.5 2.5q-14 0 -49.5 -3.5t-63 -3.5t-43.5 7q-57 25 -104.5 78.5t-75 111.5t-46.5 112t-26 90l-7 35q-15 63 -18 115t4.5 88.5t26 64t39.5 43.5t52 25.5t58.5 13t62.5 2t59.5 -4.5t55.5 -8l-147 192q-12 18 -5.5 30t27.5 12z" />
+<glyph unicode="&#x1f511;" d="M250 1200h600q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-150v-500l-255 -178q-19 -9 -32 -1t-13 29v650h-150q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM400 1100v-100h300v100h-300z" />
+<glyph unicode="&#x1f6aa;" d="M250 1200h750q39 0 69.5 -40.5t30.5 -84.5v-933l-700 -117v950l600 125h-700v-1000h-100v1025q0 23 15.5 49t34.5 26zM500 525v-100l100 20v100z" />
+</font>
+</defs></svg>
diff --git a/web/gui/fonts/glyphicons-halflings-regular.ttf b/web/gui/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 0000000..1413fc6
--- /dev/null
+++ b/web/gui/fonts/glyphicons-halflings-regular.ttf
Binary files differ
diff --git a/web/gui/fonts/glyphicons-halflings-regular.woff b/web/gui/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 0000000..9e61285
--- /dev/null
+++ b/web/gui/fonts/glyphicons-halflings-regular.woff
Binary files differ
diff --git a/web/gui/fonts/glyphicons-halflings-regular.woff2 b/web/gui/fonts/glyphicons-halflings-regular.woff2
new file mode 100644
index 0000000..64539b5
--- /dev/null
+++ b/web/gui/fonts/glyphicons-halflings-regular.woff2
Binary files differ
diff --git a/web/gui/goto-host-from-alarm.html b/web/gui/goto-host-from-alarm.html
new file mode 100644
index 0000000..7d12f84
--- /dev/null
+++ b/web/gui/goto-host-from-alarm.html
@@ -0,0 +1,251 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
+ new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
+ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
+ 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
+ })(window,document,'script','dataLayer','GTM-N6CBMJD');
+ dataLayer.push({"anonymous_statistics" : "false"});
+ </script>
+ <title>Goto a host you know...</title>
+ <meta name="application-name" content="netdata">
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+</head>
+<script>
+ var netdataRegistry = true;
+ var netdataRegistryAfterMs = 0;
+ var netdataTheme = 'slate';
+ var netdataShowHelp = true;
+</script>
+<script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+
+<script>
+function escapeUserInputHTML(s) {
+ return s.toString()
+ .replace(/&/g, '&amp;')
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/"/g, '&quot;')
+ .replace(/#/g, '&#35;')
+ .replace(/'/g, '&#39;')
+ .replace(/\(/g,'&#40;')
+ .replace(/\)/g,'&#41;')
+ .replace(/\//g,'&#47;');
+}
+
+// if string.startsWith is not defined, define it
+if(typeof String.prototype.startsWith !== 'function') {
+ String.prototype.startsWith = function(s) {
+ if(s.length > this.length) return false;
+ return this.slice(s.length) === s;
+ };
+}
+
+function verifyURL(s) {
+ if(typeof(s) === 'string' && (s.startsWith('http://') || s.startsWith('https://')))
+ return s
+ .replace(/'/g, '%22')
+ .replace(/"/g, '%27')
+ .replace(/\)/g, '%28')
+ .replace(/\(/g, '%29');
+
+ console.log('invalid URL detected:');
+ console.log(s);
+ return 'javascript:alert("invalid url");';
+}
+
+var urlOptions = {
+ host: null,
+ chart: null,
+ family: null,
+ alarm: null,
+ alarm_unique_id: 0,
+ alarm_id: 0,
+ alarm_event_id: 0,
+ alarm_when: 0,
+ hasProperty: function(property) {
+ return typeof this[property] !== 'undefined';
+ }
+};
+
+function netdataQueryParse() {
+ var query = document.location.search.split('?');
+ var variables = query[1].split('&');
+ var len = variables.length;
+ while(len--) {
+ var p = variables[len].split('=');
+ if(urlOptions.hasProperty(p[0]) && typeof p[1] !== 'undefined')
+ urlOptions[p[0]] = decodeURIComponent(p[1]);
+ }
+
+ if(typeof urlOptions.family !== 'string')
+ urlOptions.family = '';
+
+ if(typeof urlOptions.chart !== 'string')
+ urlOptions.chart = '';
+}
+
+function netdataURL(url) {
+ return url + '#top'
+ + ';nowelcome=1'
+ // + ';show_alarms=1'
+ + ';chart=' + encodeURIComponent(urlOptions.chart)
+ + ';family=' + encodeURIComponent(urlOptions.family)
+ + ';alarm=' + encodeURIComponent(urlOptions.alarm)
+ + ';alarm_unique_id=' + urlOptions.alarm_unique_id.toString()
+ + ';alarm_id=' + urlOptions.alarm_id.toString()
+ + ';alarm_event_id=' + urlOptions.alarm_event_id.toString()
+ + ';alarm_when=' + urlOptions.alarm_when.toString()
+ ;
+}
+
+var gotoServerValidateRemaining = 0;
+var gotoServerMiddleClick = false;
+var gotoServerStop = false;
+var thisIsHttps = false;
+var urlsInHttp = 0;
+function gotoServerValidateUrl(id, guid, url) {
+ var penaldy = 0;
+ var error = 'failed';
+
+ if(thisIsHttps === false && url.toString().startsWith('https://'))
+ // we penalize https only if the current url is http
+ // to allow the user walk through all its servers.
+ penaldy = 500;
+
+ else if(thisIsHttps === true && url.toString().startsWith('http://')) {
+ error = 'can\'t check';
+ urlsInHttp++;
+ }
+
+ var finalURL = netdataURL(url);
+
+ setTimeout(function() {
+ document.getElementById('gotoServerList').innerHTML += '<tr><td style="padding-left: 20px;"><a href="' + verifyURL(finalURL) + '" target="_blank">' + escapeUserInputHTML(url) + '</a></td><td style="padding-left: 30px;"><code id="' + guid + '-' + id + '-status">checking...</code></td></tr>';
+
+ NETDATA.registry.hello(url, function(data) {
+ if(typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid === guid) {
+ // console.log('OK ' + id + ' URL: ' + url);
+ document.getElementById(guid + '-' + id + '-status').innerHTML = "OK";
+
+ if(!gotoServerStop) {
+ gotoServerStop = true;
+
+ if(gotoServerMiddleClick) {
+ window.open(finalURL);
+ gotoServerMiddleClick = false;
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Opening new window to ' + NETDATA.registry.machines[guid].name + '<br/><a href="' + verifyURL(finalURL) + '">' + escapeUserInputHTML(url) + '</a></b><br/>(check your pop-up blocker if it fails)';
+ }
+ else {
+ document.getElementById('gotoServerResponse').innerHTML += 'found it! It is at:<br/><small>' + escapeUserInputHTML(url) + '</small>';
+ document.location = verifyURL(finalURL);
+ }
+ }
+ }
+ else {
+ if(typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid !== guid)
+ error = 'wrong machine';
+
+ document.getElementById(guid + '-' + id + '-status').innerHTML = error;
+ gotoServerValidateRemaining--;
+ if(gotoServerValidateRemaining <= 0) {
+ gotoServerMiddleClick = false;
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Sorry! I cannot find any operational URL for this server</b>';
+
+ if(thisIsHttps === true && urlsInHttp > 0) {
+ document.getElementById('gotoServerResponse').innerHTML += '<br/>redirecting myself to HTTP to allow checking';
+ document.location = verifyURL(document.location.toString().replace('https://', 'http://'));
+ }
+ }
+ }
+ });
+ }, (id * 50) + penaldy);
+}
+
+var netdataRegistryCallback = function(machines_array) {
+ if(typeof urlOptions.host !== 'string') {
+ document.getElementById('bodylog').innerHTML = "Sorry... bad request.";
+ return;
+ }
+
+ document.getElementById('message').innerHTML = 'These are the URLs this machine is known:';
+
+ if(document.location.toString().startsWith('https://'))
+ thisIsHttps = true;
+
+ if(machines_array) {
+ var guids = {};
+ var checked = {};
+ var len = machines_array.length;
+ var count = 0;
+
+ while(len--) {
+ if(machines_array[len].name === urlOptions.host) {
+ var ulen = machines_array[len].alternate_urls.length;
+ var guid = machines_array[len].guid;
+ guids[guid] = true;
+
+ gotoServerValidateRemaining = ulen;
+ while(ulen--) {
+ var url = machines_array[len].alternate_urls[ulen];
+ checked[url] = true;
+ gotoServerValidateUrl(count++, guid, url);
+ }
+
+ setTimeout(function() {
+ if(gotoServerStop === false) {
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Added all the known URLs for this machine.</b>';
+ var guid;
+ for(guid in guids) {
+ NETDATA.registry.search(guid, function(data) {
+ // console.log(data);
+ len = data.urls.length;
+ while(len--) {
+ var url = data.urls[len][1];
+ // console.log(url);
+ if(typeof checked[url] === 'undefined') {
+ gotoServerValidateRemaining++;
+ checked[url] = true;
+ gotoServerValidateUrl(count++, guid, url);
+ }
+ }
+ });
+ }
+ }
+ }, 2000);
+
+ return false;
+ }
+ }
+ }
+
+ document.getElementById('bodylog').innerHTML = "Sorry... your account is not linked to a netdata server named: <b>" + escapeUserInputHTML(urlOptions.host) + '</b>';
+};
+
+netdataQueryParse();
+</script>
+<body>
+<div class="container">
+ <div id="bodylog" style="padding-top: 8vmax; font-size: 2.0vmax;">
+ <span id="message">Please wait...</span>
+
+ <div style="padding-top: 20px;">
+ <table id="gotoServerList" class="table">
+ </table>
+ </div>
+ <p style="padding-top: 10px;"><small>
+ This page can only find netdata URLs you have already visited and are linked to your account on this netdata registry.
+ </small></p>
+ <div id="gotoServerResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+
+</div>
+</body>
+</html>
diff --git a/web/gui/images/alert-128-orange.png b/web/gui/images/alert-128-orange.png
new file mode 100644
index 0000000..c6182bf
--- /dev/null
+++ b/web/gui/images/alert-128-orange.png
Binary files differ
diff --git a/web/gui/images/alert-128-red.png b/web/gui/images/alert-128-red.png
new file mode 100644
index 0000000..90b9c73
--- /dev/null
+++ b/web/gui/images/alert-128-red.png
Binary files differ
diff --git a/web/gui/images/alert-multi-size-orange.ico b/web/gui/images/alert-multi-size-orange.ico
new file mode 100644
index 0000000..edca438
--- /dev/null
+++ b/web/gui/images/alert-multi-size-orange.ico
Binary files differ
diff --git a/web/gui/images/alert-multi-size-red.ico b/web/gui/images/alert-multi-size-red.ico
new file mode 100644
index 0000000..8f7cbd0
--- /dev/null
+++ b/web/gui/images/alert-multi-size-red.ico
Binary files differ
diff --git a/web/gui/images/android-icon-144x144.png b/web/gui/images/android-icon-144x144.png
new file mode 100644
index 0000000..69efa5a
--- /dev/null
+++ b/web/gui/images/android-icon-144x144.png
Binary files differ
diff --git a/web/gui/images/android-icon-192x192.png b/web/gui/images/android-icon-192x192.png
new file mode 100644
index 0000000..e574435
--- /dev/null
+++ b/web/gui/images/android-icon-192x192.png
Binary files differ
diff --git a/web/gui/images/android-icon-36x36.png b/web/gui/images/android-icon-36x36.png
new file mode 100644
index 0000000..4ba804d
--- /dev/null
+++ b/web/gui/images/android-icon-36x36.png
Binary files differ
diff --git a/web/gui/images/android-icon-48x48.png b/web/gui/images/android-icon-48x48.png
new file mode 100644
index 0000000..04970d4
--- /dev/null
+++ b/web/gui/images/android-icon-48x48.png
Binary files differ
diff --git a/web/gui/images/android-icon-72x72.png b/web/gui/images/android-icon-72x72.png
new file mode 100644
index 0000000..5cbc701
--- /dev/null
+++ b/web/gui/images/android-icon-72x72.png
Binary files differ
diff --git a/web/gui/images/android-icon-96x96.png b/web/gui/images/android-icon-96x96.png
new file mode 100644
index 0000000..21f27ce
--- /dev/null
+++ b/web/gui/images/android-icon-96x96.png
Binary files differ
diff --git a/web/gui/images/animated.gif b/web/gui/images/animated.gif
new file mode 100644
index 0000000..0e94a20
--- /dev/null
+++ b/web/gui/images/animated.gif
Binary files differ
diff --git a/web/gui/images/apple-icon-114x114.png b/web/gui/images/apple-icon-114x114.png
new file mode 100644
index 0000000..7993e05
--- /dev/null
+++ b/web/gui/images/apple-icon-114x114.png
Binary files differ
diff --git a/web/gui/images/apple-icon-120x120.png b/web/gui/images/apple-icon-120x120.png
new file mode 100644
index 0000000..3fbe8fd
--- /dev/null
+++ b/web/gui/images/apple-icon-120x120.png
Binary files differ
diff --git a/web/gui/images/apple-icon-144x144.png b/web/gui/images/apple-icon-144x144.png
new file mode 100644
index 0000000..8d46569
--- /dev/null
+++ b/web/gui/images/apple-icon-144x144.png
Binary files differ
diff --git a/web/gui/images/apple-icon-152x152.png b/web/gui/images/apple-icon-152x152.png
new file mode 100644
index 0000000..11a1072
--- /dev/null
+++ b/web/gui/images/apple-icon-152x152.png
Binary files differ
diff --git a/web/gui/images/apple-icon-180x180.png b/web/gui/images/apple-icon-180x180.png
new file mode 100644
index 0000000..314efb1
--- /dev/null
+++ b/web/gui/images/apple-icon-180x180.png
Binary files differ
diff --git a/web/gui/images/apple-icon-57x57.png b/web/gui/images/apple-icon-57x57.png
new file mode 100644
index 0000000..8528361
--- /dev/null
+++ b/web/gui/images/apple-icon-57x57.png
Binary files differ
diff --git a/web/gui/images/apple-icon-60x60.png b/web/gui/images/apple-icon-60x60.png
new file mode 100644
index 0000000..2662e85
--- /dev/null
+++ b/web/gui/images/apple-icon-60x60.png
Binary files differ
diff --git a/web/gui/images/apple-icon-72x72.png b/web/gui/images/apple-icon-72x72.png
new file mode 100644
index 0000000..4a6b056
--- /dev/null
+++ b/web/gui/images/apple-icon-72x72.png
Binary files differ
diff --git a/web/gui/images/apple-icon-76x76.png b/web/gui/images/apple-icon-76x76.png
new file mode 100644
index 0000000..c2bf6c9
--- /dev/null
+++ b/web/gui/images/apple-icon-76x76.png
Binary files differ
diff --git a/web/gui/images/apple-icon-precomposed.png b/web/gui/images/apple-icon-precomposed.png
new file mode 100644
index 0000000..9c3e73e
--- /dev/null
+++ b/web/gui/images/apple-icon-precomposed.png
Binary files differ
diff --git a/web/gui/images/apple-icon.png b/web/gui/images/apple-icon.png
new file mode 100644
index 0000000..9c3e73e
--- /dev/null
+++ b/web/gui/images/apple-icon.png
Binary files differ
diff --git a/web/gui/images/banner-icon-144x144.png b/web/gui/images/banner-icon-144x144.png
new file mode 100644
index 0000000..fef3dca
--- /dev/null
+++ b/web/gui/images/banner-icon-144x144.png
Binary files differ
diff --git a/web/gui/images/check-mark-2-128-green.png b/web/gui/images/check-mark-2-128-green.png
new file mode 100644
index 0000000..e04ddca
--- /dev/null
+++ b/web/gui/images/check-mark-2-128-green.png
Binary files differ
diff --git a/web/gui/images/check-mark-2-multi-size-green.ico b/web/gui/images/check-mark-2-multi-size-green.ico
new file mode 100644
index 0000000..2fc4141
--- /dev/null
+++ b/web/gui/images/check-mark-2-multi-size-green.ico
Binary files differ
diff --git a/web/gui/images/favicon-128.png b/web/gui/images/favicon-128.png
new file mode 100644
index 0000000..5371f92
--- /dev/null
+++ b/web/gui/images/favicon-128.png
Binary files differ
diff --git a/web/gui/images/favicon-16x16.png b/web/gui/images/favicon-16x16.png
new file mode 100644
index 0000000..5729f5a
--- /dev/null
+++ b/web/gui/images/favicon-16x16.png
Binary files differ
diff --git a/web/gui/images/favicon-196x196.png b/web/gui/images/favicon-196x196.png
new file mode 100644
index 0000000..a208c27
--- /dev/null
+++ b/web/gui/images/favicon-196x196.png
Binary files differ
diff --git a/web/gui/images/favicon-32x32.png b/web/gui/images/favicon-32x32.png
new file mode 100644
index 0000000..cdb0a48
--- /dev/null
+++ b/web/gui/images/favicon-32x32.png
Binary files differ
diff --git a/web/gui/images/favicon-96x96.png b/web/gui/images/favicon-96x96.png
new file mode 100644
index 0000000..dbe7dea
--- /dev/null
+++ b/web/gui/images/favicon-96x96.png
Binary files differ
diff --git a/web/gui/images/favicon.ico b/web/gui/images/favicon.ico
new file mode 100644
index 0000000..064032a
--- /dev/null
+++ b/web/gui/images/favicon.ico
Binary files differ
diff --git a/web/gui/images/ms-icon-144x144.png b/web/gui/images/ms-icon-144x144.png
new file mode 100644
index 0000000..8d46569
--- /dev/null
+++ b/web/gui/images/ms-icon-144x144.png
Binary files differ
diff --git a/web/gui/images/ms-icon-150x150.png b/web/gui/images/ms-icon-150x150.png
new file mode 100644
index 0000000..4683d56
--- /dev/null
+++ b/web/gui/images/ms-icon-150x150.png
Binary files differ
diff --git a/web/gui/images/ms-icon-310x150.png b/web/gui/images/ms-icon-310x150.png
new file mode 100644
index 0000000..5d4ac57
--- /dev/null
+++ b/web/gui/images/ms-icon-310x150.png
Binary files differ
diff --git a/web/gui/images/ms-icon-310x310.png b/web/gui/images/ms-icon-310x310.png
new file mode 100644
index 0000000..bdb591b
--- /dev/null
+++ b/web/gui/images/ms-icon-310x310.png
Binary files differ
diff --git a/web/gui/images/ms-icon-36x36.png b/web/gui/images/ms-icon-36x36.png
new file mode 100644
index 0000000..e251302
--- /dev/null
+++ b/web/gui/images/ms-icon-36x36.png
Binary files differ
diff --git a/web/gui/images/ms-icon-70x70.png b/web/gui/images/ms-icon-70x70.png
new file mode 100644
index 0000000..5371f92
--- /dev/null
+++ b/web/gui/images/ms-icon-70x70.png
Binary files differ
diff --git a/web/gui/images/netdata-logomark.svg b/web/gui/images/netdata-logomark.svg
new file mode 100644
index 0000000..18152fb
--- /dev/null
+++ b/web/gui/images/netdata-logomark.svg
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="500px" height="500px" viewBox="0 0 500 500" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <g id="Artboard" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+ <g id="logo_green_fill" transform="translate(0.000000, 49.000000)" fill="#00FF00" fill-rule="nonzero">
+ <path d="M307.477876,400.442478 L206.814159,400.442478 L0.486725664,2.21238938 L293.318584,2.21238938 C407.146221,2.33432589 499.391338,94.5794427 499.513274,208.40708 C499.391356,314.41476 413.485557,400.32056 307.477876,400.442478 L307.477876,400.442478 Z" id="Path"></path>
+ </g>
+ </g>
+</svg> \ No newline at end of file
diff --git a/web/gui/images/netdata.svg b/web/gui/images/netdata.svg
new file mode 100644
index 0000000..f8ddbda
--- /dev/null
+++ b/web/gui/images/netdata.svg
@@ -0,0 +1,18 @@
+<?xml version="1.0" standalone="no"?>
+<!-- SPDX-License-Identifier: CC0-1.0 -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
+ width="16.000000pt" height="16.000000pt" viewBox="0 0 16.000000 16.000000"
+ preserveAspectRatio="xMidYMid meet">
+ <metadata>
+ Created by potrace 1.15, written by Peter Selinger 2001-2017
+ </metadata>
+ <g transform="translate(0.000000,16.000000) scale(0.003125,-0.003125)" fill="#000000" stroke="none">
+ <path d="M3643 5008 c-235 -199 -1100 -956 -1108 -968 -4 -6 103 -10 293 -10 165 0 314 -3 331 -6 41 -8 40 -23 -4 -194 -95 -361 -208 -619 -396 -899 -425 -633 -1052 -1047 -1776 -1172 -159 -27 -444 -36 -647 -19 -185 15 -206 11 -206 -30 0 -24 7 -33 39 -50 80 -40 389 -115 646 -155 139 -22 612 -32 781 -16 910 85 1765 550 2281 1239 227 303 365 582 479 971 26 88 74 299 74 327 0 2 127 4 283 4 l282 0 -45 43 c-25 23 -167 148 -315 277 -149 129 -355 309 -460 400 -298 261 -404 350 -414 350 -5 0 -58 -42 -118 -92z"/>
+ <path d="M4542 3770 c-74 -276 -162 -509 -266 -705 -73 -137 -190 -325 -270 -430 l-66 -88 0 -1269 0 -1268 320 0 320 0 -1 1773 c-1 974 -4 1840 -8 1922 l-6 150 -23 -85z"/>
+ <path d="M3555 2148 c-96 -82 -310 -238 -442 -323 l-123 -78 0 -868 0 -869 320 0 320 0 0 1095 c0 602 -3 1095 -7 1095 -5 0 -35 -23 -68 -52z"/>
+ <path d="M2599 1566 c-85 -36 -270 -95 -419 -136 l-135 -37 -3 -692 -2 -691 310 0 310 0 0 790 c0 435 -1 790 -2 790 -2 -1 -28 -11 -59 -24z"/>
+ <path d="M127 1403 c-4 -54 -7 -389 -7 -745 l0 -648 320 0 320 0 0 669 0 669 -117 21 c-127 24 -342 78 -437 111 -33 11 -62 20 -66 20 -4 0 -10 -44 -13 -97z"/>
+ <path d="M1635 1330 c-27 -5 -122 -9 -211 -9 -88 -1 -202 -4 -252 -7 l-92 -7 0 -648 0 -649 315 0 315 0 0 619 c0 577 -4 713 -19 710 -3 -1 -28 -5 -56 -9z"/>
+ </g>
+</svg>
diff --git a/web/gui/images/packaging-beta-tag.svg b/web/gui/images/packaging-beta-tag.svg
new file mode 100644
index 0000000..cebdc08
--- /dev/null
+++ b/web/gui/images/packaging-beta-tag.svg
@@ -0,0 +1,42 @@
+<svg width="127" height="16" viewBox="0 0 127 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<rect width="127" height="16" rx="2" fill="url(#paint0_linear)"/>
+<path d="M86 0H125C126.105 0 127 0.895431 127 2V14C127 15.1046 126.105 16 125 16H86V0Z" fill="url(#paint1_linear)"/>
+<path d="M6.5 4V10" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M12.5 7C13.3284 7 14 6.32843 14 5.5C14 4.67157 13.3284 4 12.5 4C11.6716 4 11 4.67157 11 5.5C11 6.32843 11.6716 7 12.5 7Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M6.5 13C7.32843 13 8 12.3284 8 11.5C8 10.6716 7.32843 10 6.5 10C5.67157 10 5 10.6716 5 11.5C5 12.3284 5.67157 13 6.5 13Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M12.5 7C12.5 8.19347 12.0259 9.33807 11.182 10.182C10.3381 11.0259 9.19347 11.5 8 11.5" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
+<g filter="url(#filter0_d)">
+<path d="M23.939 7.93848V11H22.9077V3.17969H25.792C26.6478 3.17969 27.3174 3.39811 27.8008 3.83496C28.2878 4.27181 28.5312 4.8501 28.5312 5.56982C28.5312 6.32894 28.2931 6.91439 27.8169 7.32617C27.3442 7.73438 26.6657 7.93848 25.7812 7.93848H23.939ZM23.939 7.09521H25.792C26.3434 7.09521 26.766 6.96631 27.0596 6.7085C27.3532 6.4471 27.5 6.07113 27.5 5.58057C27.5 5.11507 27.3532 4.74268 27.0596 4.46338C26.766 4.18408 26.3631 4.03906 25.8511 4.02832H23.939V7.09521ZM33.6548 11C33.5976 10.8854 33.551 10.6813 33.5152 10.3877C33.0533 10.8675 32.5018 11.1074 31.8609 11.1074C31.288 11.1074 30.8171 10.9463 30.4483 10.624C30.0831 10.2982 29.9004 9.88639 29.9004 9.38867C29.9004 8.78353 30.1296 8.31445 30.5879 7.98145C31.0499 7.64486 31.698 7.47656 32.5323 7.47656H33.4991V7.02002C33.4991 6.67269 33.3952 6.39697 33.1876 6.19287C32.9799 5.98519 32.6737 5.88135 32.2691 5.88135C31.9146 5.88135 31.6174 5.97087 31.3775 6.1499C31.1376 6.32894 31.0176 6.54557 31.0176 6.7998H30.0186C30.0186 6.50977 30.1207 6.23047 30.3248 5.96191C30.5324 5.68978 30.8117 5.47493 31.1627 5.31738C31.5171 5.15983 31.9057 5.08105 32.3282 5.08105C32.9978 5.08105 33.5224 5.24935 33.9019 5.58594C34.2815 5.91895 34.4784 6.37907 34.4927 6.96631V9.64111C34.4927 10.1746 34.5608 10.599 34.6968 10.9141V11H33.6548ZM32.0059 10.2427C32.3174 10.2427 32.6129 10.1621 32.8921 10.001C33.1714 9.83984 33.3738 9.63037 33.4991 9.37256V8.18018H32.7203C31.5028 8.18018 30.8941 8.53646 30.8941 9.24902C30.8941 9.56055 30.9979 9.80404 31.2056 9.97949C31.4133 10.1549 31.6801 10.2427 32.0059 10.2427ZM38.8214 10.2964C39.1759 10.2964 39.4856 10.189 39.7506 9.97412C40.0156 9.75928 40.1624 9.49072 40.191 9.16846H41.131C41.1131 9.50146 40.9985 9.81836 40.7872 10.1191C40.576 10.4199 40.2931 10.6598 39.9386 10.8389C39.5877 11.0179 39.2153 11.1074 38.8214 11.1074C38.0301 11.1074 37.3999 10.8442 36.9308 10.3179C36.4653 9.78792 36.2325 9.06462 36.2325 8.14795V7.98145C36.2325 7.41569 36.3364 6.9126 36.5441 6.47217C36.7517 6.03174 37.0489 5.68978 37.4357 5.44629C37.826 5.2028 38.2861 5.08105 38.816 5.08105C39.4677 5.08105 40.0084 5.2762 40.4381 5.6665C40.8714 6.0568 41.1023 6.56348 41.131 7.18652H40.191C40.1624 6.81055 40.0192 6.5026 39.7613 6.2627C39.5071 6.01921 39.192 5.89746 38.816 5.89746C38.3112 5.89746 37.9191 6.08008 37.6398 6.44531C37.364 6.80697 37.2262 7.33154 37.2262 8.01904V8.20703C37.2262 8.87663 37.364 9.39225 37.6398 9.75391C37.9155 10.1156 38.3094 10.2964 38.8214 10.2964ZM44.3102 8.30908L43.6872 8.95898V11H42.6935V2.75H43.6872V7.73975L44.2189 7.10059L46.029 5.18848H47.2375L44.9762 7.61621L47.5007 11H46.3351L44.3102 8.30908ZM52.2913 11C52.234 10.8854 52.1874 10.6813 52.1516 10.3877C51.6897 10.8675 51.1383 11.1074 50.4973 11.1074C49.9244 11.1074 49.4535 10.9463 49.0847 10.624C48.7195 10.2982 48.5369 9.88639 48.5369 9.38867C48.5369 8.78353 48.766 8.31445 49.2244 7.98145C49.6863 7.64486 50.3344 7.47656 51.1687 7.47656H52.1355V7.02002C52.1355 6.67269 52.0316 6.39697 51.824 6.19287C51.6163 5.98519 51.3101 5.88135 50.9055 5.88135C50.551 5.88135 50.2538 5.97087 50.0139 6.1499C49.774 6.32894 49.654 6.54557 49.654 6.7998H48.655C48.655 6.50977 48.7571 6.23047 48.9612 5.96191C49.1689 5.68978 49.4482 5.47493 49.7991 5.31738C50.1536 5.15983 50.5421 5.08105 50.9646 5.08105C51.6342 5.08105 52.1588 5.24935 52.5383 5.58594C52.9179 5.91895 53.1148 6.37907 53.1291 6.96631V9.64111C53.1291 10.1746 53.1972 10.599 53.3332 10.9141V11H52.2913ZM50.6423 10.2427C50.9538 10.2427 51.2493 10.1621 51.5286 10.001C51.8079 9.83984 52.0102 9.63037 52.1355 9.37256V8.18018H51.3567C50.1392 8.18018 49.5305 8.53646 49.5305 9.24902C49.5305 9.56055 49.6343 9.80404 49.842 9.97949C50.0497 10.1549 50.3165 10.2427 50.6423 10.2427ZM54.8904 8.0459C54.8904 7.13997 55.0999 6.42025 55.5188 5.88672C55.9378 5.34961 56.4928 5.08105 57.1839 5.08105C57.8929 5.08105 58.4461 5.33171 58.8436 5.83301L58.8919 5.18848H59.7996V10.8604C59.7996 11.6123 59.5758 12.2049 59.1282 12.6382C58.6842 13.0715 58.0862 13.2881 57.3343 13.2881C56.9153 13.2881 56.5053 13.1986 56.1043 13.0195C55.7033 12.8405 55.3971 12.5952 55.1858 12.2837L55.7015 11.6875C56.1276 12.2139 56.6486 12.4771 57.2645 12.4771C57.7479 12.4771 58.1238 12.341 58.3924 12.0688C58.6645 11.7967 58.8006 11.4136 58.8006 10.9194V10.4199C58.4031 10.8783 57.8606 11.1074 57.1731 11.1074C56.4928 11.1074 55.9414 10.8335 55.5188 10.2856C55.0999 9.73779 54.8904 8.99121 54.8904 8.0459ZM55.8895 8.15869C55.8895 8.81396 56.0237 9.32959 56.2923 9.70557C56.5608 10.078 56.9368 10.2642 57.4202 10.2642C58.0468 10.2642 58.507 9.97949 58.8006 9.41016V6.75684C58.4962 6.20182 58.0397 5.92432 57.431 5.92432C56.9476 5.92432 56.5698 6.1123 56.2977 6.48828C56.0255 6.86426 55.8895 7.42106 55.8895 8.15869ZM62.8231 11H61.8295V5.18848H62.8231V11ZM61.7489 3.64697C61.7489 3.48584 61.7972 3.34977 61.8939 3.23877C61.9942 3.12777 62.141 3.07227 62.3343 3.07227C62.5277 3.07227 62.6745 3.12777 62.7748 3.23877C62.875 3.34977 62.9252 3.48584 62.9252 3.64697C62.9252 3.80811 62.875 3.94238 62.7748 4.0498C62.6745 4.15723 62.5277 4.21094 62.3343 4.21094C62.141 4.21094 61.9942 4.15723 61.8939 4.0498C61.7972 3.94238 61.7489 3.80811 61.7489 3.64697ZM65.7983 5.18848L65.8305 5.91895C66.2745 5.36035 66.8546 5.08105 67.5707 5.08105C68.7989 5.08105 69.4184 5.77393 69.4291 7.15967V11H68.4355V7.1543C68.4319 6.73535 68.3352 6.42562 68.1454 6.2251C67.9592 6.02458 67.6674 5.92432 67.2699 5.92432C66.9477 5.92432 66.6648 6.01025 66.4213 6.18213C66.1778 6.354 65.988 6.57959 65.852 6.85889V11H64.8583V5.18848H65.7983ZM71.1313 8.0459C71.1313 7.13997 71.3408 6.42025 71.7597 5.88672C72.1787 5.34961 72.7337 5.08105 73.4248 5.08105C74.1338 5.08105 74.687 5.33171 75.0845 5.83301L75.1328 5.18848H76.0405V10.8604C76.0405 11.6123 75.8167 12.2049 75.3691 12.6382C74.9251 13.0715 74.3271 13.2881 73.5752 13.2881C73.1562 13.2881 72.7462 13.1986 72.3452 13.0195C71.9442 12.8405 71.638 12.5952 71.4267 12.2837L71.9424 11.6875C72.3685 12.2139 72.8895 12.4771 73.5054 12.4771C73.9888 12.4771 74.3647 12.341 74.6333 12.0688C74.9054 11.7967 75.0415 11.4136 75.0415 10.9194V10.4199C74.644 10.8783 74.1015 11.1074 73.414 11.1074C72.7337 11.1074 72.1823 10.8335 71.7597 10.2856C71.3408 9.73779 71.1313 8.99121 71.1313 8.0459ZM72.1304 8.15869C72.1304 8.81396 72.2646 9.32959 72.5332 9.70557C72.8017 10.078 73.1777 10.2642 73.6611 10.2642C74.2877 10.2642 74.7479 9.97949 75.0415 9.41016V6.75684C74.7371 6.20182 74.2806 5.92432 73.6719 5.92432C73.1885 5.92432 72.8107 6.1123 72.5386 6.48828C72.2664 6.86426 72.1304 7.42106 72.1304 8.15869Z" fill="white"/>
+</g>
+<g filter="url(#filter1_d)">
+<path d="M93.7949 12V4.17969H96.4751C97.3595 4.17969 98.0327 4.35693 98.4946 4.71143C98.9565 5.06592 99.1875 5.59408 99.1875 6.2959C99.1875 6.65397 99.0908 6.97624 98.8975 7.2627C98.7041 7.54915 98.4212 7.77116 98.0488 7.92871C98.4714 8.04329 98.7972 8.25993 99.0264 8.57861C99.2591 8.89372 99.3755 9.27327 99.3755 9.71729C99.3755 10.4513 99.1392 11.0153 98.6665 11.4092C98.1974 11.8031 97.5243 12 96.647 12H93.7949ZM95.1538 8.47119V10.915H96.6631C97.0892 10.915 97.4222 10.8094 97.6621 10.5981C97.902 10.3869 98.022 10.0933 98.022 9.71729C98.022 8.90446 97.6066 8.4891 96.7759 8.47119H95.1538ZM95.1538 7.47217H96.4858C96.9084 7.47217 97.2378 7.37728 97.4741 7.1875C97.714 6.99414 97.834 6.72201 97.834 6.37109C97.834 5.98438 97.723 5.70508 97.501 5.5332C97.2826 5.36133 96.9406 5.27539 96.4751 5.27539H95.1538V7.47217ZM105.745 8.50879H102.533V10.915H106.288V12H101.174V4.17969H106.25V5.27539H102.533V7.43457H105.745V8.50879ZM113.592 5.27539H111.153V12H109.805V5.27539H107.388V4.17969H113.592V5.27539ZM118.796 10.1792H115.767L115.133 12H113.72L116.674 4.17969H117.894L120.853 12H119.435L118.796 10.1792ZM116.148 9.0835H118.415L117.281 5.83936L116.148 9.0835Z" fill="white"/>
+</g>
+<defs>
+<filter id="filter0_d" x="21.9077" y="2.75" width="55.1328" height="12.5381" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
+<feFlood flood-opacity="0" result="BackgroundImageFix"/>
+<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
+<feOffset dy="1"/>
+<feGaussianBlur stdDeviation="0.5"/>
+<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/>
+<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
+<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
+</filter>
+<filter id="filter1_d" x="92.7949" y="4.17969" width="29.0583" height="9.82031" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
+<feFlood flood-opacity="0" result="BackgroundImageFix"/>
+<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
+<feOffset dy="1"/>
+<feGaussianBlur stdDeviation="0.5"/>
+<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/>
+<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
+<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
+</filter>
+<linearGradient id="paint0_linear" x1="63.5" y1="0" x2="63.5" y2="16" gradientUnits="userSpaceOnUse">
+<stop stop-color="#606060"/>
+<stop offset="1" stop-color="#4D4D4D"/>
+</linearGradient>
+<linearGradient id="paint1_linear" x1="106.5" y1="0" x2="106.5" y2="16" gradientUnits="userSpaceOnUse">
+<stop stop-color="#BFBFBF"/>
+<stop offset="1" stop-color="#7F7F7F"/>
+</linearGradient>
+</defs>
+</svg>
diff --git a/web/gui/images/post.png b/web/gui/images/post.png
new file mode 100644
index 0000000..6bad547
--- /dev/null
+++ b/web/gui/images/post.png
Binary files differ
diff --git a/web/gui/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png
new file mode 100644
index 0000000..2a212a4
--- /dev/null
+++ b/web/gui/images/seo-performance-128.png
Binary files differ
diff --git a/web/gui/index.html b/web/gui/index.html
new file mode 100644
index 0000000..d309910
--- /dev/null
+++ b/web/gui/index.html
@@ -0,0 +1,1322 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>netdata dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+ <meta name="author" content="costa@tsaousis.gr">
+
+ <link rel="stylesheet" type="text/css" href="main.css?v=5">
+
+ <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" />
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:url" content="https://my-netdata.io" />
+ <meta property="og:type" content="website" />
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/14092712/93b039ea-f551-11e5-822c-beadbf2b2a2e.gif" />
+
+ <script src="main.js?v20190905-0"></script>
+</head>
+
+<body data-spy="scroll" data-target="#sidebar" data-offset="100">
+ <div id="loadOverlay" class="loadOverlay" style="background-color: #fff; color: #888;">
+ <div style="font-size: 3vh;">
+ You must enable JavaScript in order to use Netdata!<br />
+ You can do this in <a href="https://enable-javascript.com/" target="_blank">your browser settings</a>.
+ </div>
+ </div>
+ <script type="text/javascript">
+ // Cleanup JS warning.
+ document.documentElement.style.overflowY = "scroll";
+
+ // Change the loadOverlay colors ASAP to match the theme.
+ let theme;
+ const hash = document.location.hash;
+ if (hash.includes('theme=slate')) {
+ theme = 'slate';
+ } else if (hash.includes('theme=white')) {
+ theme = 'white';
+ } else {
+ theme = localStorage.getItem('netdataTheme') || 'slate';
+ }
+ const overlayEl = document.getElementById('loadOverlay');
+ overlayEl.innerHTML = 'netdata<br/><div style="font-size: 3vh;">Real-time performance monitoring, done right!</div>';
+ overlayEl.style = theme == 'slate' ? "background-color: #272b30; color: #373b40;" : "background-color: #fff; color: #ddd;";
+ </script>
+ <nav class="navbar navbar-default navbar-fixed-top" role="banner">
+ <div class="container">
+ <div class="navbar-header">
+ <button class="navbar-toggle" type="button" data-toggle="collapse" data-target=".navbar-collapse">
+ <span class="sr-only">Toggle navigation</span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ </button>
+ <ul class="nav navbar-nav" style="display: inline-block">
+ <li data-placement="right" class="hidden-xs hidden-sm" style="line-height: 50px; margin-right: 15px" title="Netdata Agent">
+ <img src="images/netdata-logomark.svg" height="32"/>
+ </li>
+ <li class="dropdown" id="myNetdataDropdownParent" title="your other netdata servers" data-toggle="tooltip" data-placement="right">
+ <a href="#" id="hostname" class="dropdown-toggle" data-toggle="dropdown">
+ my-netdata
+ <strong class="caret"></strong></a>
+ <div id="my-netdata-dropdown-content" class="dropdown-menu scrollable-menu inpagemenu">
+ <div class="agent-item" style="white-space: nowrap">
+ <i class="fas fa-hourglass-half"></i>
+ Loading, please wait...
+ <div></div>
+ </div>
+ </div>
+ </li>
+ </ul>
+ </div>
+ <nav class="collapse navbar-collapse navbar-right" role="navigation">
+ <ul class="nav navbar-nav">
+ <li title="Nodes view" data-toggle="tooltip" data-placement="bottom"><a onclick="openAuthenticatedUrl('console.html');" class="btn" target="_blank"><i class="fas fa-tv"></i>&nbsp;<span class="hidden-sm hidden-md">Nodes<sup class="beta"> beta</sup></span></a></li>
+ <li id="alarmsButton" title="check the health monitoring alarms and their log" data-toggle="tooltip" data-placement="bottom"><a href="#" class="btn" data-toggle="modal" data-target="#alarmsModal"><i class="fas fa-bell"></i>&nbsp;<span class="hidden-sm hidden-md">Alarms&nbsp;</span><span id="alarms_count_badge" class="badge"></span></a></li>
+ <li title="change dashboard settings" data-toggle="tooltip" data-placement="bottom"><a href="#" class="btn" data-toggle="modal" data-target="#optionsModal"><i class="fas fa-cog"></i>&nbsp;<span class="hidden-sm hidden-md">Settings</span></a></li>
+ <li title="check for netdata updates<br/>you should keep your netdata updated" data-toggle="tooltip" data-placement="bottom" class="hidden-sm" id="updateButton"><a href="#" class="btn" data-toggle="modal" data-target="#updateModal"><i class="fas fa-cloud-download-alt"></i> <span class="hidden-sm hidden-md">Update </span><span id="update_badge" class="badge"></span></a></li>
+ <li title="the netdata wiki home at github<br/>remember to <b>give netdata a <i class=&quot;fas fa-star&quot;></i></b> !" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://github.com/netdata/netdata" class="btn" target="_blank"><i class="fab fa-github"></i></a></li>
+ <li title="follow netdata on twitter" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://twitter.com/linuxnetdata" class="btn" target="_blank"><i class="fab fa-twitter"></i></a></li>
+ <li title="like netdata on facebook" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://www.facebook.com/linuxnetdata/" class="btn" target="_blank"><i class="fab fa-facebook"></i></a></li>
+ <li title="import / load a netdata snapshot" data-toggle="tooltip" data-placement="bottom" id="loadButton"><a href="#" class="btn" data-toggle="modal" data-target="#loadSnapshotModal"><i class="fas fa-download"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Import</span></a></li>
+ <li title="export / save a netdata snapshot" data-toggle="tooltip" data-placement="bottom" id="saveButton"><a href="#" class="btn" data-toggle="modal" data-target="#saveSnapshotModal"><i class="fas fa-upload"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Export</span></a></li>
+ <li title="print this dashboard to PDF" data-toggle="tooltip" data-placement="bottom" id="printButton"><a href="#" class="btn" data-toggle="modal" data-target="#printPreflightModal"><i class="fas fa-print"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Print</span></a></li>
+ <li title="get help on using the charts" data-toggle="tooltip" data-placement="bottom" class="hidden-sm"><a href="#" class="btn" data-toggle="modal" data-target="#helpModal"><i class="fas fa-question-circle"></i>&nbsp;<span class="hidden-sm hidden-md">Help</span></a></li>
+ <li id="account-menu-container" class="dropdown" data-toggle="tooltip"></li>
+ </ul>
+ </nav>
+ </div>
+ </nav>
+ <div class="navbar-highlight">
+ <div id="navbar-highlight-content" class="navbar-highlight-content"></div>
+ </div>
+
+ <div id="sign-in-banner" style="display: none">
+ <div class="container">
+ Like what you see?
+ <strong><a href="#" class="__link" onclick="signInDidClick(event); return false">Sign in</a>
+ to experience the full-range of netdata capabilities!</strong>
+ <div class="__close" onclick="closeSignInBannerDidClick(event)">
+ Close <i class="fas fa-times"></i>
+ </div>
+ </div>
+ </div>
+
+ <div id="masthead" style="display: none;">
+ <div class="container">
+ <div class="row">
+ <div class="col-md-7">
+ <h1>Netdata
+ <p class="lead">Real-time performance monitoring, in the greatest possible detail</p>
+ </h1>
+ </div>
+ <div class="col-md-5">
+ <div class="well well-lg">
+ <div class="row">
+ <div class="col-md-6">
+ <b>Drag</b> charts to pan.
+ <b>Shift + wheel</b> on them, to zoom in and out.
+ <b>Double-click</b> on them, to reset.
+ <b>Hover</b> on them too!
+ </div>
+ <div class="col-md-6">
+ <div class="netdata-container" data-netdata="system.intr" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dygraph-type="line" data-dygraph-strokewidth="3" data-dygraph-smooth="true" data-dygraph-highlightcirclesize="6" data-after="-90" data-height="60px" data-colors="#C66"></div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="container">
+ <div class="row">
+ <div class="charts-body" role="main">
+ <div id="charts_div"></div>
+ </div>
+ <div class="sidebar-body hidden-xs hidden-sm hidden-print" id="sidebar-body" role="complementary">
+ <nav class="dashboard-sidebar hidden-print hidden-xs hidden-sm" id="sidebar" role="menu"></nav>
+ </div>
+ </div>
+ </div>
+
+ <div id="footer" class="container" style="display: none;">
+ <div class="row">
+ <div class="col-md-10" role="main">
+ <div class="p">
+ <big><a href="https://github.com/netdata/netdata/wiki" target="_blank">Netdata</a></big>
+ <br /><br />
+ <i class="fas fa-copyright"></i> Copyright 2018, <a href="mailto:info@netdata.cloud">Netdata, Inc</a>.<br/>
+ <i class="fas fa-copyright"></i> Copyright 2016-2018, <a href="mailto:costa@tsaousis.gr">Costa Tsaousis</a>.<br/>
+ </div>
+ <div class="p">
+ Released under <a href="http://www.gnu.org/licenses/gpl-3.0.en.html" target="_blank">GPL v3 or later</a>.
+ Netdata uses <a href="https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md" target="_blank">third party tools</a>.
+ <br /><br />
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="xssModal" tabindex="-1" role="dialog" aria-labelledby="xssModalLabel" data-keyboard="false" data-backdrop="static" style="z-index: 3000">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <h4 class="modal-title" id="xssModalLabel">XSS Protection</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ This dashboard is about to render data from server:
+ </p>
+ <p style="font-size: 1.25em;">
+ <code id="netdataXssModalServer"></code>
+ </p>
+ <p>
+ To protect your privacy, the dashboard will <b>check all data transferred</b> for cross site scripting (XSS).
+ <br/>This is CPU intensive, so your browser might be a bit slower.
+ </p>
+ <p>
+ If you <b>trust</b> the remote server, you can disable XSS protection.<br/>
+ In this case, any remote dashboard decoration code (javascript) will also run.
+ </p>
+ <p>
+ If you <b>don't trust</b> the remote server, you should keep the protection on.<br/>
+ The dashboard will run slower and remote dashboard decoration code will not run, but better be safe than sorry...
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="return xssModalKeepXss();" type="button" class="btn btn-success" data-dismiss="modal">Keep protecting me</a>
+ <a href="#" onclick="return xssModalDisableXss();" type="button" class="btn btn-danger" data-dismiss="modal">I don't need this, the server is mine</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="printPreflightModal" tabindex="-1" role="dialog" aria-labelledby="printPreflightModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="printPreflightModalLabel">Print this netdata dashboard</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ netdata dashboards cannot be captured, since we are lazy loading and hiding all but the visible charts.
+ <br/>
+ To capture the whole page with all the charts rendered, a new browser window will pop-up that will render all the charts at once.
+ The new browser window will maintain the current pan and zoom settings of the charts. So, align the charts before proceeding.
+ </p>
+ <p>
+ <small>
+ This process will put some CPU and memory pressure on your browser.<br/>
+ For the netdata server, we will sequencially download all the charts, to avoid congesting network and server resources.<br/>
+ <b>Please, do not print netdata dashboards on paper!</b>
+ </small>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="printPreflight(); return false;" type="button" class="btn btn-default">Print</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="printModal" tabindex="-1" role="dialog" aria-labelledby="printModalLabel" data-keyboard="false" data-backdrop="static">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="printModalLabel">Preparing dashboard for printing...</h4>
+ </div>
+ <div class="modal-body">
+ Please wait while we initialize and render all the charts on the dashboard.
+ <div class="progress progress-striped active" style="height: 2em !important;">
+ <div id="printModalProgressBar" class="progress-bar progress-bar-info" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="min-width: 2em;">
+ <span id="printModalProgressBarText" style="padding-left: 10px; padding-top: 4px; font-size: 1.2em; text-align: left; width: 100%; position: absolute; display: block; color: black;"></span>
+ </div>
+ </div>
+ The print dialog will appear as soon as we finish rendering the page.
+ </div>
+ <div class="modal-footer">
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="loadSnapshotModal" tabindex="-1" role="dialog" aria-labelledby="loadSnapshotModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="loadSnapshotModalLabel">Import a netdata snapshot</h4>
+ </div>
+ <div id="loadSnapshotDragAndDrop" class="modal-body">
+ <p>
+ netdata can export and import dashboard snapshots.
+ Any netdata can import the snapshot of any other netdata.
+ The snapshots are not uploaded to a server. They are handled entirely by your web browser, on your computer.
+ </p>
+ <p style="text-align: center;">
+ <label class="btn btn-default">
+ Click here to select the netdata snapshot file to import
+ <input type="file" id="loadSnapshotSelectFiles" value="Import" style="display: none;"
+ onchange="loadSnapshotPreflight();">
+ </label>
+ </p>
+ <div id="loadSnapshotStatus" class="alert alert-info" role="alert">
+ Browse for a snapshot file (or drag it and drop it here), then click <b>Import</b> to render it.
+ </div>
+ <p>
+ <table class="table">
+ <tbody>
+ <tr><th>Filename</th><td id="loadSnapshotFilename"></td></tr>
+ <tr><th>Hostname</th><td id="loadSnapshotHostname"></td></tr>
+ <tr><th>Origin URL</th><td id="loadSnapshotURL"></td></tr>
+ <tr><th>Charts Info</th><td id="loadSnapshotCharts"></td></tr>
+ <tr><th>Snapshot Info</th><td id="loadSnapshotInfo"></td></tr>
+ <tr><th>Time Range</th><td id="loadSnapshotTimeRange"></td></tr>
+ <tr><th>Comments</th><td id="loadSnapshotComments"></td></tr>
+ </tbody>
+ </table>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <span style="display: inline-block; padding-right: 20px;">Snapshot files contain both data and javascript code. Make sure <b>you trust the files</b> you import!</span>
+ <a id="loadSnapshotImport" href="#" onclick="loadSnapshot(); return false;" type="button" class="btn btn-success disabled">Import</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="saveSnapshotModal" tabindex="-1" role="dialog" aria-labelledby="saveSnapshotModalLabel" data-keyboard="false" data-backdrop="static">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="saveSnapshotModalLabel">Export a snapshot</h4>
+ </div>
+ <div class="modal-body">
+ <div id="saveSnapshotModalProgressSection" hidden>
+ Please wait while we collect all the dashboard data...
+ <div class="progress progress-striped active" style="height: 2em !important;">
+ <div id="saveSnapshotModalProgressBar" class="progress-bar progress-bar-info" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="min-width: 2em;">
+ <span id="saveSnapshotModalProgressBarText" style="padding-left: 10px; padding-top: 4px; font-size: 1.2em; text-align: left; width: 100%; position: absolute; display: block;"></span>
+ </div>
+ </div>
+ </div>
+
+ <div id="saveSnapshotResolutionRadio" style="text-align: center;">
+ Select the desired resolution of the snapshot. This is the <b>seconds of data per point</b>.
+ <br/>
+ &nbsp;
+ <br/>
+ &nbsp;
+ <br/>
+ <input id="saveSnapshotResolutionSlider" data-slider-id='saveSnapshotResolutionSlider' type="text" style="width: 80%;" tabindex="0"/>
+ <br/>&nbsp;<br/>
+ <div class="input-group">
+ <span class="input-group-addon" id="sizing-saveSnapshotFilename" style="width: 100px;">Filename</span>
+ <input id="saveSnapshotFilename" class="form-control" placeholder="Filename of the generated snapshot" aria-describedby="sizing-saveSnapshotFilename" tabindex="2"/>
+ <div class="input-group-btn">
+ <div class="input-group-btn">
+ <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"><span id="saveSnapshotCompressionName">Compression</span> <span class="caret"></span></button>
+ <ul class="dropdown-menu dropdown-menu-right">
+ <li class="disabled"><a href="#" class="disabled">Select Compression</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('none'); return false;">uncompressed</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('pako.deflate'); return false;">pako.deflate (gzip, binary)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('pako.deflate.base64'); return false;">pako.deflate.base64 (gzip, ascii)</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.uri'); return false;">lzstring.uri (LZ, ascii)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.utf16'); return false;">lzstring.utf16 (LZ, utf16)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.base64'); return false;">lzstring.base64 (LZ, ascii)</a></li>
+ </ul>
+ </div><!-- /btn-group -->
+ </div>
+ </div>
+ <div class="input-group" style="padding-top: 10px; width: 100%">
+ <span class="input-group-addon" id="sizing-saveSnapshotComments" style="width: 100px;">Comments</span>
+ <input id="saveSnapshotComments" class="form-control" placeholder="Any comments about this snapshot?" aria-describedby="sizing-saveSnapshotComments" tabindex="3"/>
+ </div>
+ </div>
+
+ &nbsp;
+ <div id="saveSnapshotStatus" class="alert alert-info" role="alert">
+ Select snaphost resolution. This controls the size the snapshot file.
+ </div>
+ <p>
+ The generated snapshot will include all charts of this dashboard, <b>for the visible timeframe</b>, so align, pan and zoom the charts as needed.
+ The scroll position of the dashboard will also be saved.
+ The snapshot will be downloaded as a file, to your computer, that can be imported back into any netdata dashboard (no need to import it back on this server).
+ </p>
+ <p>
+ <small>
+ Snapshot files include all the information of the dashboard, including the URL of the origin server, its netdata unique ID, etc.
+ So, if you share the snapshot file with third parties, they will be able to access the origin server, if this server is exposed on the internet.
+ <br/>
+ Snapshots are handled entirely by the web browser. The netdata servers are not aware of them.
+ </small>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a id="saveSnapshotExport" href="#" onclick="saveSnapshot(); return false;" type="button" class="btn btn-success" tabindex="4">Export</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal" tabindex="5">Cancel</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="welcomeModal" tabindex="-1" role="dialog" aria-labelledby="welcomeModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="welcomeModalLabel">Welcome to the world of netdata</h4>
+ </div>
+ <div class="modal-body">
+ <div class="p">
+ <div style="width: 100%; text-align: center; padding-top: 10px; padding-bottom: 10px; font-size: 18px;">
+ if there is a metric for something, we want it visualised<br/>
+ and we want this visualisation to be <strong>real-time</strong>, <strong>efficient</strong> and <strong>awesome</strong>
+ </div>
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b>
+ is a new way to monitor your systems and applications, to get <strong>real-time insights</strong>
+ of what is really happening and what affects performance.
+ It is carefully optimised to be a real-time system, without interfering in any way,
+ to the core function of your systems.
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b>
+ has been designed to monitor <strong>massive amounts of metrics, per server, per second</strong>.
+ When installed, it might come up with 1k to 3k metrics, but we have been testing it with 100k
+ metrics, all collected per second, and still the cpu utilisation remained negligible.
+ <br/>
+ We have also tried to give each metric, a meaning, a context.
+ We have grouped and categorized all metrics into meaningful charts, providing a
+ better understanding of the underlying technologies and mechanisms.
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b> is free,
+ open-source software. If you decide to use it,
+ <strong><a href="https://github.com/netdata/netdata/tree/master/docs/a-github-star-is-important.md" target="_blank">it is important to give netdata a star at GitHub</a></strong>.
+ </div>
+ <div class="p">
+ Enjoy real-time performance monitoring!
+ </div>
+ <div class="p">
+ Costa Tsaousis
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="helpModal" tabindex="-1" role="dialog" aria-labelledby="helpModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="helpModalLabel">Dashboard Help</h4>
+ </div>
+ <div class="modal-body">
+
+ <h4>Dygraphs (line, area and stacked area charts)</h4>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#help_mouse" aria-controls="help_mouse" role="tab" data-toggle="tab">Mouse Interface</a></li>
+ <li role="presentation"><a href="#help_touch" aria-controls="help_touch" role="tab" data-toggle="tab">Touch Interface</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="help_mouse">
+ <div class="p">
+ <h4>Mouse Over / Hover</h4>
+ Mouse over on a chart to show, at its legend, the values for the timestamp under the mouse (the chart will also highlight the point at the chart).
+ <br/>
+ All the other visible charts will also show and highlight their values for the same timestamp.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Drag Chart Contents</h4>
+ Drag the contents of a chart, by pressing the left mouse button and moving the mouse, to pan it horizontally.
+ <br/>
+ All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring).
+ <br/>
+ Once a chart is panned, auto refreshing stops for all charts. To enable it again, <b>double click</b> a panned chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Double Click</h4>
+ Double Click a chart to reset all the charts to their default auto-refreshing state.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>SHIFT + Drag</h4>
+ While pressing the <code>SHIFT</code> key, press the left mouse button on the contents of a chart and move the mouse to select an area, to zoom in. The other charts will follow too. Zooming is performed in two phases:
+ <ul>
+ <li>The already loaded chart contents are zoomed (low resolution)</li>
+ <li>New data are transferred from the netdata server, to refresh the chart with possibly more detail.</li>
+ </ul>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Highlight Timeframe</h4>
+ While pressing the <code>ALT</code> key, press the left mouse button on the contents of a chart and move the mouse to select an area. The selected are will be highlighted on all charts.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>SHIFT + Mouse Wheel</h4>
+ While pressing the <code>SHIFT</code> key and the mouse pointer is over the contents of a chart, scroll the mouse wheel to zoom in or out. This kind of zooming is aligned to center below the mouse pointer. The other charts will follow too.
+ <br/>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Legend Operations</h4>
+ Click on the label or value of a dimension, will select / un-select this dimension.
+ <br/>
+ You can press any of the SHIFT or CONTROL keys and then click on legend labels or values, to select / un-select multiple dimensions.
+ </div>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="help_touch">
+ <div class="p">
+ <h4>Single Tap</h4>
+ Single Tap on the contents of a chart to show, at its legend, the values for the timestamp tapped (the chart will also highlight the point at the chart).
+ <br/>
+ All the other visible charts will also show and highlight their values for the same timestamp.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Drag Chart Contents</h4>
+ Touch and Drag the contents of a chart to pan it horizontally.
+ <br/>
+ All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring).
+ <br/>
+ Once a chart is panned, auto refreshing stops for all charts. To enable it again, <b>double tap</b> a panned chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Double Tap</h4>
+ Double tap a chart to reset all the charts to their default auto-refreshing state.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Zoom <small>(does not work on firefox and IE/Edge)</small></h4>
+ With two fingers, zoom in or out.
+ <br/>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Legend Operations</h4>
+ Tap on the label or value of a dimension, will select / un-select this dimension.
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="alarmsModal" tabindex="-1" role="dialog" aria-labelledby="alarmsModalLabel">
+ <div class="modal-dialog modal-lg" role="document" style="display: table;"> <!-- allow the modal to expand horizontally -->
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="alarmsModalLabel">netdata alarms</h4>
+ </div>
+ <div class="modal-body">
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#alarms_active" aria-controls="alarms_active" role="tab" data-toggle="tab">Active</a></li>
+ <li role="presentation"><a href="#alarms_all" aria-controls="alarms_all" role="tab" data-toggle="tab">All</a></li>
+ <li role="presentation"><a href="#alarms_log" aria-controls="alarms_log" role="tab" data-toggle="tab">Log</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="alarms_active">
+ loading...
+ </div>
+ <div role="tabpanel" class="tab-pane" id="alarms_all">
+ loading...
+ </div>
+ <div role="tabpanel" class="tab-pane" id="alarms_log">
+ loading...
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <!-- <a href="#" onclick="alarmsUpdateModal(); return false;" type="button" class="btn btn-default">Update</a> -->
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="optionsModal" tabindex="-1" role="dialog" aria-labelledby="optionsModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="optionsModalLabel">netdata dashboard options</h4>
+ </div>
+ <div class="modal-body">
+ <center>
+ <small style="color: #BBBBBB;">These are browser settings. Each viewer has its own. They do not affect the operation of your netdata server.
+ <br/>
+ Settings take effect immediately and are saved permanently to browser local storage (except the refresh on focus / always option).
+ <br/>
+ To reset all options (including charts sizes) to their defaults, click <a href="#" onclick="resetDashboardOptions(); return false;">here</a>.</small>
+ </center>
+ <div style="padding: 10px;"></div>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#settings_performance" aria-controls="settings_performance" role="tab" data-toggle="tab">Performance</a></li>
+ <li role="presentation"><a href="#settings_sync" aria-controls="settings_sync" role="tab" data-toggle="tab">Synchronization</a></li>
+ <li role="presentation"><a href="#settings_visual" aria-controls="settings_visual" role="tab" data-toggle="tab">Visual</a></li>
+ <li role="presentation"><a href="#settings_locale" aria-controls="settings_locale" role="tab" data-toggle="tab">Locale</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="settings_performance">
+ <form id="optionsForm1" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="stop_updates_when_focus_is_lost" type="checkbox" checked data-toggle="toggle" data-offstyle="danger" data-onstyle="success" data-on="On Focus" data-off="Always" data-width="110px"></td>
+ <td class="option-info"><strong>When to refresh the charts?</strong><br/>
+ <small>When set to <b>On Focus</b>, the charts will stop being updated if the page / tab does not have the focus of the user. When set to <b>Always</b>, the charts will always be refreshed. Set it to <b>On Focus</b> it to lower the CPU requirements of the browser (and extend the battery of laptops and tablets) when this page does not have your focus. Set to <b>Always</b> to work on another window (i.e. change the settings of something) and have the charts auto-refresh in this window.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control">
+ <input id="eliminate_zero_dimensions" type="checkbox" checked data-toggle="toggle" data-on="Non Zero" data-off="All" data-width="110px">
+ </td>
+ <td class="option-info"><strong>Which dimensions to show?</strong><br/>
+ <small>When set to <b>Non Zero</b>, dimensions that have all their values (within the current view) set to zero will not be transferred from the netdata server (except if all dimensions of the chart are zero, in which case this setting does nothing - all dimensions are transferred and shown). When set to <b>All</b>, all dimensions will always be shown. Set it to <b>Non Zero</b> to lower the data transferred between netdata and your browser, lower the CPU requirements of your browser (fewer lines to draw) and increase the focus on the legends (fewer entries at the legends).</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="destroy_on_hide" type="checkbox" data-toggle="toggle" data-on="Destroy" data-off="Hide" data-width="110px"></td>
+ <td class="option-info"><strong>How to handle hidden charts?</strong><br/>
+ <small>When set to <b>Destroy</b>, charts that are not in the current viewport of the browser (are above, or below the visible area of the page), will be destroyed and re-created if and when they become visible again. When set to <b>Hide</b>, the not-visible charts will be just hidden, to simplify the DOM and speed up your browser. Set it to <b>Destroy</b>, to lower the memory requirements of your browser. Set it to <b>Hide</b> for faster restoration of charts on page scrolling.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="async_on_scroll" type="checkbox" data-toggle="toggle" data-on="Async" data-off="Sync" data-width="110px"></td>
+ <td class="option-info"><strong>Page scroll handling?</strong><br/>
+ <small>When set to <b>Sync</b>, charts will be examined for their visibility immediately after scrolling. On slow computers this may impact the smoothness of page scrolling. To update the page when scrolling ends, set it to <b>Async</b>. Set it to <b>Sync</b> for immediate chart updates when scrolling. Set it to <b>Async</b> for smoother page scrolling on slower computers.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_sync">
+ <form id="optionsForm2" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="parallel_refresher" type="checkbox" checked data-toggle="toggle" data-on="Parallel" data-off="Sequential" data-width="110px"></td>
+ <td class="option-info"><strong>Which chart refresh policy to use?</strong><br/>
+ <small>When set to <b>parallel</b>, visible charts are refreshed in parallel (all queries are sent to netdata server in parallel) and are rendered asynchronously. When set to <b>sequential</b> charts are refreshed one after another. Set it to parallel if your browser can cope with it (most modern browsers do), set it to sequential if you work on an older/slower computer.</small>
+ </td>
+ </tr>
+ <tr class="option-row" id="concurrent_refreshes_row">
+ <td class="option-control"><input id="concurrent_refreshes" type="checkbox" checked data-toggle="toggle" data-on="Resync" data-off="Best Effort" data-width="110px"></td>
+ <td class="option-info"><strong>Shall we re-sync chart refreshes?</strong><br/>
+ <small>When set to <b>Resync</b>, the dashboard will attempt to re-synchronize all the charts so that they are refreshed concurrently. When set to <b>Best Effort</b>, each chart may be refreshed with a little time difference to the others. Normally, the dashboard starts refreshing them in parallel, but depending on the speed of your computer and the network latencies, charts start having a slight time difference. Setting this to <b>Resync</b> will attempt to re-synchronize the charts on every update. Setting it to <b>Best Effort</b> may lower the pressure on your browser and the network.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="sync_selection" type="checkbox" checked data-toggle="toggle" data-on="Sync" data-off="Don't Sync" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Sync hover selection on all charts?</strong><br/>
+ <small>When enabled, a selection on one chart will automatically select the same time on all other visible charts and the legends of all visible charts will be updated to show the selected values. When disabled, only the chart getting the user's attention will be selected. Enable it to get better insights of the data. Disable it if you are on a very slow computer that cannot actually do it.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="sync_pan_and_zoom" type="checkbox" checked data-toggle="toggle" data-on="Sync" data-off="Don't Sync" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Sync pan and zoom on all charts?</strong><br/>
+ <small>When enabled, pan and zoom operations on a chart will be replicated to all charts (even the ones that are not currently visible - of course only when they will become visible). When disabled, pan and zoom operations will not be propagated to other charts.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_visual">
+ <form id="optionsForm3" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="netdata_theme_control" type="checkbox" checked data-toggle="toggle" data-offstyle="danger" data-onstyle="success" data-on="Dark" data-off="White" data-width="110px"></td>
+ <td class="option-info"><strong>Which theme to use?</strong><br/>
+ <small>Netdata comes with two themes: <b>Dark</b> (the default) and <b>White</b>.
+ <br/>
+ <b>Switching this will reload the dashboard</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="show_help" type="checkbox" checked data-toggle="toggle" data-on="Help Me" data-off="No Help" data-width="110px"></td>
+ <td class="option-info"><strong>Do you need help?</strong><br/>
+ <small>Netdata can show some help in some areas to help you use the dashboard. If all these balloons bother you, disable them using this switch.
+ <br/>
+ <b>Switching this will reload the dashboard</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="pan_and_zoom_data_padding" type="checkbox" checked data-toggle="toggle" data-on="Pad" data-off="Don't Pad" data-width="110px"></td>
+ <td class="option-info"><strong>Enable data padding when panning and zooming?</strong><br/>
+ <small>When set to <b>Pad</b> the charts will be padded with more data, both before and after the visible area, thus giving the impression the whole database is loaded. This padding will happen only after the first pan or zoom operation on the chart (initially all charts have only the visible data). When set to <b>Don't Pad</b> only the visible data will be transfered from the netdata server, even after the first pan and zoom operation.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="smooth_plot" type="checkbox" checked data-toggle="toggle" data-on="Smooth" data-off="Rough" data-width="110px"></td>
+ <td class="option-info"><strong>Enable Bézier lines on charts?</strong><br/>
+ <small>When set to <b>Smooth</b> the charts libraries that support it, will plot smooth curves instead of simple straight lines to connect the points.
+ <br/>
+ Keep in mind <a href="http://dygraphs.com" target="_blank">dygraphs</a>, the main charting library in netdata dashboards, can only smooth line charts. It cannot smooth area or stacked charts. When set to <b>Rough</b>, this setting can lower the CPU resources consumed by your browser.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_locale">
+ <form id="optionsForm4" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td colspan="2" align="center">
+ <small>
+ <b>These settings are applied gradually, as charts are updated. To force them, refresh the dashboard now</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="units_conversion" type="checkbox" checked data-toggle="toggle" data-on="Scale Units" data-off="Fixed Units" data-onstyle="success" data-width="110px"></td>
+ <td class="option-info"><strong>Enable auto-scaling of select units?</strong><br/>
+ <small>When set to <b>Scale Units</b> the values shown will dynamically be scaled (e.g. 1000 kilobits will be shown as 1 megabit).
+ Netdata can auto-scale these original units: <code>kilobits/s</code>, <code>kilobytes/s</code>, <code>KB/s</code>,
+ <code>KB</code>, <code>MB</code>, and <code>GB</code>.
+ When set to <b>Fixed Units</b> all the values will be rendered using the original units maintained by the netdata server.
+ </small>
+ </td>
+ </tr>
+ <tr id="settingsLocaleTempRow" class="option-row">
+ <td class="option-control"><input id="units_temp" type="checkbox" checked data-toggle="toggle" data-on="Celsius" data-off="Fahrenheit" data-width="110px"></td>
+ <td class="option-info"><strong>Which units to use for temperatures?</strong><br/>
+ <small>Set the temperature units of the dashboard.
+ </small>
+ </td>
+ </tr>
+ <tr id="settingsLocaleTimeRow" class="option-row">
+ <td class="option-control"><input id="seconds_as_time" type="checkbox" checked data-toggle="toggle" data-on="Time" data-off="Seconds" data-onstyle="success" data-width="110px"></td>
+ <td class="option-info"><strong>Convert seconds to time?</strong><br/>
+ <small>When set to <b>Time</b>, charts that present <code>seconds</code> will show <code>DDd:HH:MM:SS</code>.
+ When set to <b>Seconds</b>, the raw number of seconds will be presented.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="local_timezone" type="checkbox" checked data-toggle="toggle" data-on="Your Time" data-off="Server Time" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Show browser local time or server time?</strong><br/>
+ <small>When set to <b>Your Time</b>, the charts will use your browser local time. When set to <b>Server Time</b> the charts will use the server time.
+ <br/>
+ Set it to <b>Your Time</b> to have a common time-reference, no matter where the server is and which time zone it uses (all your dashboards will report your local time).
+ Set it to <b>Server Time</b> when you need to compare netdata charts with server log files.
+ <br/>
+ Your browser time zone is: <b><span id="browser_timezone">-</span></b>.<br/>
+ The server reported timezone is: <b><span id="server_timezone">-</span></b> (you can set this in netdata.conf <code>[global].timezone</code>).<br/>
+ The current time zone on the dashboard is: <b><span id="current_timezone">-</span></b>.
+ <br/>
+ <div class="dropup">
+ <button class="btn btn-default dropdown-toggle" type="button" id="dropdownTimeZone" data-toggle="dropdown" aria-haspopup="true" aria-expanded="true">
+ Select Server Time Zone
+ <span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu scrollable-menu-50" aria-labelledby="dropdownTimeZone">
+ <li><a href=# onclick="return selected_server_timezone('UTC');">Universal Time Coordinated (UTC)</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Abidjan');">Africa/Abidjan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Accra');">Africa/Accra</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Algiers');">Africa/Algiers</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Bissau');">Africa/Bissau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Cairo');">Africa/Cairo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Casablanca');">Africa/Casablanca</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Ceuta');">Africa/Ceuta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/El_Aaiun');">Africa/El_Aaiun</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Johannesburg');">Africa/Johannesburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Khartoum');">Africa/Khartoum</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Lagos');">Africa/Lagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Maputo');">Africa/Maputo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Monrovia');">Africa/Monrovia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Nairobi');">Africa/Nairobi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Ndjamena');">Africa/Ndjamena</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Tripoli');">Africa/Tripoli</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Tunis');">Africa/Tunis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Windhoek');">Africa/Windhoek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Adak');">America/Adak</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Anchorage');">America/Anchorage</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Araguaina');">America/Araguaina</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Buenos_Aires');">America/Argentina/Buenos_Aires</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Catamarca');">America/Argentina/Catamarca</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Cordoba');">America/Argentina/Cordoba</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Jujuy');">America/Argentina/Jujuy</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/La_Rioja');">America/Argentina/La_Rioja</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Mendoza');">America/Argentina/Mendoza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Rio_Gallegos');">America/Argentina/Rio_Gallegos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Salta');">America/Argentina/Salta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/San_Juan');">America/Argentina/San_Juan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/San_Luis');">America/Argentina/San_Luis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Tucuman');">America/Argentina/Tucuman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Ushuaia');">America/Argentina/Ushuaia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Asuncion');">America/Asuncion</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Atikokan');">America/Atikokan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bahia');">America/Bahia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bahia_Banderas');">America/Bahia_Banderas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Barbados');">America/Barbados</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Belem');">America/Belem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Belize');">America/Belize</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Blanc-Sablon');">America/Blanc-Sablon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Boa_Vista');">America/Boa_Vista</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bogota');">America/Bogota</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Boise');">America/Boise</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cambridge_Bay');">America/Cambridge_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Campo_Grande');">America/Campo_Grande</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cancun');">America/Cancun</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Caracas');">America/Caracas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cayenne');">America/Cayenne</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Chicago');">America/Chicago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Chihuahua');">America/Chihuahua</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Costa_Rica');">America/Costa_Rica</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Creston');">America/Creston</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cuiaba');">America/Cuiaba</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Curacao');">America/Curacao</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Danmarkshavn');">America/Danmarkshavn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Dawson');">America/Dawson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Dawson_Creek');">America/Dawson_Creek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Denver');">America/Denver</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Detroit');">America/Detroit</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Edmonton');">America/Edmonton</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Eirunepe');">America/Eirunepe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/El_Salvador');">America/El_Salvador</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Fortaleza');">America/Fortaleza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Fort_Nelson');">America/Fort_Nelson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Glace_Bay');">America/Glace_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Godthab');">America/Godthab</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Goose_Bay');">America/Goose_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Grand_Turk');">America/Grand_Turk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guatemala');">America/Guatemala</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guayaquil');">America/Guayaquil</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guyana');">America/Guyana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Halifax');">America/Halifax</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Havana');">America/Havana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Hermosillo');">America/Hermosillo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Indianapolis');">America/Indiana/Indianapolis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Knox');">America/Indiana/Knox</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Marengo');">America/Indiana/Marengo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Petersburg');">America/Indiana/Petersburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Tell_City');">America/Indiana/Tell_City</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Vevay');">America/Indiana/Vevay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Vincennes');">America/Indiana/Vincennes</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Winamac');">America/Indiana/Winamac</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Inuvik');">America/Inuvik</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Iqaluit');">America/Iqaluit</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Jamaica');">America/Jamaica</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Juneau');">America/Juneau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Kentucky/Louisville');">America/Kentucky/Louisville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Kentucky/Monticello');">America/Kentucky/Monticello</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/La_Paz');">America/La_Paz</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Lima');">America/Lima</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Los_Angeles');">America/Los_Angeles</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Maceio');">America/Maceio</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Managua');">America/Managua</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Manaus');">America/Manaus</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Martinique');">America/Martinique</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Matamoros');">America/Matamoros</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Mazatlan');">America/Mazatlan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Menominee');">America/Menominee</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Merida');">America/Merida</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Metlakatla');">America/Metlakatla</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Mexico_City');">America/Mexico_City</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Miquelon');">America/Miquelon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Moncton');">America/Moncton</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Monterrey');">America/Monterrey</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Montevideo');">America/Montevideo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nassau');">America/Nassau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/New_York');">America/New_York</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nipigon');">America/Nipigon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nome');">America/Nome</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Noronha');">America/Noronha</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/Beulah');">America/North_Dakota/Beulah</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/Center');">America/North_Dakota/Center</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/New_Salem');">America/North_Dakota/New_Salem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Ojinaga');">America/Ojinaga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Panama');">America/Panama</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Pangnirtung');">America/Pangnirtung</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Paramaribo');">America/Paramaribo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Phoenix');">America/Phoenix</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Port-au-Prince');">America/Port-au-Prince</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Port_of_Spain');">America/Port_of_Spain</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Porto_Velho');">America/Porto_Velho</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Puerto_Rico');">America/Puerto_Rico</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Punta_Arenas');">America/Punta_Arenas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rainy_River');">America/Rainy_River</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rankin_Inlet');">America/Rankin_Inlet</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Recife');">America/Recife</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Regina');">America/Regina</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Resolute');">America/Resolute</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rio_Branco');">America/Rio_Branco</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santarem');">America/Santarem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santiago');">America/Santiago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santo_Domingo');">America/Santo_Domingo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Sao_Paulo');">America/Sao_Paulo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Scoresbysund');">America/Scoresbysund</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Sitka');">America/Sitka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/St_Johns');">America/St_Johns</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Swift_Current');">America/Swift_Current</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Tegucigalpa');">America/Tegucigalpa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Thule');">America/Thule</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Thunder_Bay');">America/Thunder_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Tijuana');">America/Tijuana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Toronto');">America/Toronto</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Vancouver');">America/Vancouver</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Whitehorse');">America/Whitehorse</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Winnipeg');">America/Winnipeg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Yakutat');">America/Yakutat</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Yellowknife');">America/Yellowknife</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Casey');">Antarctica/Casey</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Davis');">Antarctica/Davis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/DumontDUrville');">Antarctica/DumontDUrville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Macquarie');">Antarctica/Macquarie</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Mawson');">Antarctica/Mawson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Palmer');">Antarctica/Palmer</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Rothera');">Antarctica/Rothera</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Syowa');">Antarctica/Syowa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Troll');">Antarctica/Troll</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Vostok');">Antarctica/Vostok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Almaty');">Asia/Almaty</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Amman');">Asia/Amman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Anadyr');">Asia/Anadyr</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Aqtau');">Asia/Aqtau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Aqtobe');">Asia/Aqtobe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ashgabat');">Asia/Ashgabat</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Atyrau');">Asia/Atyrau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Baghdad');">Asia/Baghdad</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Baku');">Asia/Baku</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Bangkok');">Asia/Bangkok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Barnaul');">Asia/Barnaul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Beirut');">Asia/Beirut</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Bishkek');">Asia/Bishkek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Brunei');">Asia/Brunei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Chita');">Asia/Chita</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Choibalsan');">Asia/Choibalsan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Colombo');">Asia/Colombo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Damascus');">Asia/Damascus</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dhaka');">Asia/Dhaka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dili');">Asia/Dili</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dubai');">Asia/Dubai</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dushanbe');">Asia/Dushanbe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Famagusta');">Asia/Famagusta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Gaza');">Asia/Gaza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hebron');">Asia/Hebron</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ho_Chi_Minh');">Asia/Ho_Chi_Minh</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hong_Kong');">Asia/Hong_Kong</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hovd');">Asia/Hovd</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Irkutsk');">Asia/Irkutsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jakarta');">Asia/Jakarta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jayapura');">Asia/Jayapura</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jerusalem');">Asia/Jerusalem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kabul');">Asia/Kabul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kamchatka');">Asia/Kamchatka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Karachi');">Asia/Karachi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kathmandu');">Asia/Kathmandu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Khandyga');">Asia/Khandyga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kolkata');">Asia/Kolkata</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Krasnoyarsk');">Asia/Krasnoyarsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kuala_Lumpur');">Asia/Kuala_Lumpur</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kuching');">Asia/Kuching</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Macau');">Asia/Macau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Magadan');">Asia/Magadan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Makassar');">Asia/Makassar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Manila');">Asia/Manila</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Nicosia');">Asia/Nicosia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Novokuznetsk');">Asia/Novokuznetsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Novosibirsk');">Asia/Novosibirsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Omsk');">Asia/Omsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Oral');">Asia/Oral</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Pontianak');">Asia/Pontianak</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Pyongyang');">Asia/Pyongyang</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Qatar');">Asia/Qatar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Qyzylorda');">Asia/Qyzylorda</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Riyadh');">Asia/Riyadh</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Sakhalin');">Asia/Sakhalin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Samarkand');">Asia/Samarkand</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Seoul');">Asia/Seoul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Shanghai');">Asia/Shanghai</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Singapore');">Asia/Singapore</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Srednekolymsk');">Asia/Srednekolymsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Taipei');">Asia/Taipei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tashkent');">Asia/Tashkent</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tbilisi');">Asia/Tbilisi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tehran');">Asia/Tehran</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Thimphu');">Asia/Thimphu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tokyo');">Asia/Tokyo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tomsk');">Asia/Tomsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ulaanbaatar');">Asia/Ulaanbaatar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Urumqi');">Asia/Urumqi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ust-Nera');">Asia/Ust-Nera</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Vladivostok');">Asia/Vladivostok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yakutsk');">Asia/Yakutsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yangon');">Asia/Yangon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yekaterinburg');">Asia/Yekaterinburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yerevan');">Asia/Yerevan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Azores');">Atlantic/Azores</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Bermuda');">Atlantic/Bermuda</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Canary');">Atlantic/Canary</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Cape_Verde');">Atlantic/Cape_Verde</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Faroe');">Atlantic/Faroe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Madeira');">Atlantic/Madeira</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Reykjavik');">Atlantic/Reykjavik</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/South_Georgia');">Atlantic/South_Georgia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Stanley');">Atlantic/Stanley</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Adelaide');">Australia/Adelaide</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Brisbane');">Australia/Brisbane</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Broken_Hill');">Australia/Broken_Hill</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Currie');">Australia/Currie</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Darwin');">Australia/Darwin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Eucla');">Australia/Eucla</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Hobart');">Australia/Hobart</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Lindeman');">Australia/Lindeman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Lord_Howe');">Australia/Lord_Howe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Melbourne');">Australia/Melbourne</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Perth');">Australia/Perth</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Sydney');">Australia/Sydney</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Amsterdam');">Europe/Amsterdam</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Andorra');">Europe/Andorra</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Astrakhan');">Europe/Astrakhan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Athens');">Europe/Athens</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Belgrade');">Europe/Belgrade</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Berlin');">Europe/Berlin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Brussels');">Europe/Brussels</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Bucharest');">Europe/Bucharest</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Budapest');">Europe/Budapest</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Chisinau');">Europe/Chisinau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Copenhagen');">Europe/Copenhagen</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Dublin');">Europe/Dublin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Gibraltar');">Europe/Gibraltar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Helsinki');">Europe/Helsinki</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Istanbul');">Europe/Istanbul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kaliningrad');">Europe/Kaliningrad</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kiev');">Europe/Kiev</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kirov');">Europe/Kirov</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Lisbon');">Europe/Lisbon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/London');">Europe/London</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Luxembourg');">Europe/Luxembourg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Madrid');">Europe/Madrid</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Malta');">Europe/Malta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Minsk');">Europe/Minsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Monaco');">Europe/Monaco</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Moscow');">Europe/Moscow</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Oslo');">Europe/Oslo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Paris');">Europe/Paris</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Prague');">Europe/Prague</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Riga');">Europe/Riga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Rome');">Europe/Rome</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Samara');">Europe/Samara</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Saratov');">Europe/Saratov</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Simferopol');">Europe/Simferopol</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Sofia');">Europe/Sofia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Stockholm');">Europe/Stockholm</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Tallinn');">Europe/Tallinn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Tirane');">Europe/Tirane</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Ulyanovsk');">Europe/Ulyanovsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Uzhgorod');">Europe/Uzhgorod</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Vienna');">Europe/Vienna</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Vilnius');">Europe/Vilnius</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Volgograd');">Europe/Volgograd</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Warsaw');">Europe/Warsaw</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Zaporozhye');">Europe/Zaporozhye</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Zurich');">Europe/Zurich</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Chagos');">Indian/Chagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Christmas');">Indian/Christmas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Cocos');">Indian/Cocos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Kerguelen');">Indian/Kerguelen</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Mahe');">Indian/Mahe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Maldives');">Indian/Maldives</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Mauritius');">Indian/Mauritius</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Reunion');">Indian/Reunion</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Apia');">Pacific/Apia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Auckland');">Pacific/Auckland</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Bougainville');">Pacific/Bougainville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Chatham');">Pacific/Chatham</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Chuuk');">Pacific/Chuuk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Easter');">Pacific/Easter</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Efate');">Pacific/Efate</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Enderbury');">Pacific/Enderbury</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Fakaofo');">Pacific/Fakaofo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Fiji');">Pacific/Fiji</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Funafuti');">Pacific/Funafuti</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Galapagos');">Pacific/Galapagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Gambier');">Pacific/Gambier</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Guadalcanal');">Pacific/Guadalcanal</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Guam');">Pacific/Guam</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Honolulu');">Pacific/Honolulu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kiritimati');">Pacific/Kiritimati</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kosrae');">Pacific/Kosrae</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kwajalein');">Pacific/Kwajalein</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Majuro');">Pacific/Majuro</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Marquesas');">Pacific/Marquesas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Nauru');">Pacific/Nauru</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Niue');">Pacific/Niue</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Norfolk');">Pacific/Norfolk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Noumea');">Pacific/Noumea</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pago_Pago');">Pacific/Pago_Pago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Palau');">Pacific/Palau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pitcairn');">Pacific/Pitcairn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pohnpei');">Pacific/Pohnpei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Port_Moresby');">Pacific/Port_Moresby</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Rarotonga');">Pacific/Rarotonga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tahiti');">Pacific/Tahiti</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tarawa');">Pacific/Tarawa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tongatapu');">Pacific/Tongatapu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Wake');">Pacific/Wake</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Wallis');">Pacific/Wallis</a></li>
+ </ul>
+ </div>
+ <b><span id="timezone_error_message"></span></b>
+
+ </small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+
+ <div class="modal fade" id="updateModal" tabindex="-1" role="dialog" aria-labelledby="updateModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="updateModalLabel">Update Check</h4>
+ </div>
+ <div class="modal-body">
+ Your netdata version: <b><code><span id="netdataVersion">Unknown</span></code></b><br/>
+ <br/>
+ <div style="padding: 10px;"></div>
+ <div id="versionCheckLog">Not checked yet. Please press the Check Now button.</div>
+ <div>
+ <hr/>
+ </div>
+ <div>
+ For progress reports and key netdata updates: <strong><a href="https://twitter.com/linuxnetdata" target="_blank">follow netdata on <i class="fab fa-twitter"></i> twitter</a></strong>.
+ <br/>
+ You can also <a href="https://www.facebook.com/linuxnetdata/" target="_blank">follow netdata on <i class="fab fa-facebook"></i> facebook</a>,
+ or <a href="https://github.com/netdata/netdata" target="_blank">watch netdata on <i class="fab fa-github"></i> github</a>.
+ </div>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="notifyForUpdate(true); return false;" type="button" class="btn btn-default">Check Now</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="signInModal" tabindex="-1" role="dialog" aria-labelledby="signInModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="signInModalLabel">Sign In</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ Signing-in to netdata.cloud will synchronize the list of
+ your netdata monitored nodes known at registry
+ <strong><span id="sim-registry"></span></strong>. This
+ may include server hostnames, urls and identification
+ GUIDs.
+ </p>
+ <p>
+ After you upgrade all your netdata servers, your private
+ registry will not be needed any more.
+ </p>
+ <p>
+ Are you sure you want to proceed?
+ </p>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+ <a href="#" onclick="explicitlySignIn(); return false;" type="button" class="btn btn-success">Sign In</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="syncRegistryModal" tabindex="-1" role="dialog" aria-labelledby="syncRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="syncRegistryModalLabel">Synchronize netdata.cloud with registry?</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ You are about to synchronize your netdata.cloud account with data from the registry at <strong><span id="sync-registry-modal-registry"></span></strong>.
+ This may include server hostnames, urls and identification GUIDs.
+ </p>
+ <p>
+ Are you sure you want to proceed?
+ </p>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+ <a href="#" onclick="explicitlySyncAgents(); return false;" type="button" class="btn btn-success">Synchronize</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="deleteRegistryModal" tabindex="-1" role="dialog" aria-labelledby="deleteRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="deleteRegistryModalLabel">Delete <span id="deleteRegistryServerName"></span>?</h4>
+ </div>
+ <div class="modal-body">
+ You are about to delete, from your personal list of netdata servers, the following server:
+ <p style="text-align: center; padding-top: 10px; padding-bottom: 10px; line-height: 2;">
+ <b><span id="deleteRegistryServerName2"></span></b>
+ <br/>
+ <b><span id="deleteRegistryServerURL"></span></b>
+ </p>
+ Are you sure you want to do this?
+ <br/>
+ <div style="padding: 10px;"></div>
+ <small>Keep in mind, this server will be added back if and when you visit it again.</small>
+ <br/>
+ <div id="deleteRegistryResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-success" data-dismiss="modal">keep it</button>
+ <a href="#" onclick="notifyForDeleteRegistry(); return false;" type="button" class="btn btn-danger">delete it</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="switchRegistryModal" tabindex="-1" role="dialog" aria-labelledby="switchRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="switchRegistryModalLabel">Switch Netdata Registry Identity</h4>
+ </div>
+ <div class="modal-body">
+ You can copy and paste the following ID to all your browsers (e.g. work and home).
+ <br/>
+ All the browsers with the same ID will identify <b>you</b>, so please don't share this with others.
+ <div style="text-align: center; padding-top: 10px; padding-bottom: 10px; line-height: 2;">
+ <form action="#">
+ <input type="text" class="form-control" id="switchRegistryPersonGUID" placeholder="your personal ID" maxlength="36" autocomplete="off" style="text-align: center; font-size: 1.4em;">
+ </form>
+ </div>
+ Either copy this ID and paste it to another browser, or paste here the ID you have taken from another browser.
+ <div style="padding-top: 10px;"><small>
+ Keep in mind that:
+ <ul>
+ <li>when you switch ID, your previous ID will be lost forever - this is irreversible.</li>
+ <li>both IDs (your old and the new) must list this netdata at their personal lists.</li>
+ <li>both IDs have to be known by the registry: <b><span id="switchRegistryURL"></span></b>.</li>
+ <li>to get a new ID, just clear your browser cookies.</li>
+ </ul>
+ </small></div>
+ <div id="switchRegistryResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-success" data-dismiss="modal">cancel</button>
+ <a href="#" onclick="notifyForSwitchRegistry(); return false;" type="button" class="btn btn-danger">impersonate</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="gotoServerModal" tabindex="-1" role="dialog" aria-labelledby="gotoServerModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="gotoServerModalLabel"><span id="gotoServerName"></span></h4>
+ </div>
+ <div class="modal-body">
+ Checking known URLs for this server...
+ <div style="padding-top: 20px;">
+ <table id="gotoServerList">
+ </table>
+ </div>
+ <p style="padding-top: 10px;"><small>
+ Checks may fail if you are viewing an HTTPS page and the server to be checked is HTTP only.
+ </small></p>
+ <div id="gotoServerResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+ <iframe id="ssoifrm" width="0" height="0" style="border: none;"></iframe>
+ <div id="hiddenDownloadLinks" style="display: none;" hidden></div>
+ <script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+</body>
+</html>
diff --git a/web/gui/infographic.html b/web/gui/infographic.html
new file mode 100644
index 0000000..24ff8f4
--- /dev/null
+++ b/web/gui/infographic.html
@@ -0,0 +1,171 @@
+<!doctype html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang=en-us>
+<head>
+ <meta charset=utf-8>
+ <title>NetData: Get control of your Linux Servers. Simple. Effective. Awesome.</title>
+ <meta name=author content="Costa Tsaousis">
+ <meta name=description content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms.">
+
+ <meta name=viewport content="width=device-width,initial-scale=1">
+ <link rel=apple-touch-icon href=apple-touch-icon.png>
+ <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" />
+
+ <meta property="og:url" content="https://my-netdata.io/infographic.html" />
+ <meta property="og:type" content="website" />
+ <meta property="og:title" content="netdata infographic" />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="netdata infographic" />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/25580009/bf7016a4-2e85-11e7-9a7a-b36c57db7b91.png" />
+
+ <meta name="google-site-verification" content="3Xmk2kyCvai8p9HEnYHoQ9RBW20-b1NvPAgu07Fkkds" />
+ <meta name="msvalidate.01" content="896DCA31C9A664CE359FCF1A645DD476" />
+
+ <style>/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */
+ html {
+ line-height: 1.15;
+ -ms-text-size-adjust: 100%;
+ -webkit-text-size-adjust: 100%;
+ font: 17px/1.4 'Open Sans', sans-serif;
+ text-align: center
+ }
+ </style>
+</head>
+<body>
+<div style="width: 100%;text-align:center;">
+ <div>
+ <p style="font-size: 16pt">
+ <b>Interactive infographic of netdata features and functions</b>
+ </p>
+ <p>
+ Hover and click on the infographic, to open the related wiki page.
+ <br/>
+ <small>
+ The links and the docs are still a work in progress.
+ The interactive infographic is a feature of <b>draw.io</b>.
+ </small>
+ </p>
+ </div>
+ <div class=site-footer role=contentinfo>
+ <p>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px;margin-top:3px;"><a class=twitter-share-button href=https://twitter.com/share data-count=none data-lang=en data-via=linuxnetdata data-size=small data-text="Get control of your Linux servers. Simple. Effective. Awesome." data-url=https://my-netdata.io/infographic.html >Tweet</a></div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;margin-top:3px;"><a class=twitter-follow-button href=https://twitter.com/linuxnetdata data-show-count=false data-lang=en data-size=small>Follow @linuxnetdata</a></div>
+ </div>
+ <div style="display: inline-block;">
+ <div class="fb-like" data-href="https://my-netdata.io/" data-layout="button" data-action="like" data-show-faces="false" data-share="false" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ <div class="fb-share-button" data-href="https://my-netdata.io/" data-layout="button" data-size="small" data-mobile-iframe="true"><a class="fb-xfbml-parse-ignore" target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fmy-netdata.io%2Finfographic.html&amp;src=sdkpreparse" style="vertical-align:top;display:inline-block; height: 34px;">Share</a></div>
+ </div>
+ </div>
+ <div>
+ <p style="font-size: 14pt">
+ <b>New to netdata?</b> <a href="//my-netdata.io" target="_blank">Have a look at a netdata demo</a>. You will love it!
+ </p>
+ <p>
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v42" type="image/svg+xml" height="20" />
+ </p>
+ <hr/>
+ </div>
+ <div style="width:90%;display:inline-block;max-width:1300px;text-align:left;">
+ <div id="drawing" class="mxgraph" style="max-width:100%;border:1px solid transparent;"></div>
+ </div>
+</div>
+</body>
+
+<script>
+ var opts = {
+ "highlight":"#0000ff",
+ "target":"blank",
+ "lightbox":false,
+ "nav":false,
+ "resize":true,
+ "toolbar":"",
+ "auto-fit":true,
+ "check-visible-state":false,
+ "edit":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml",
+ "url":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml"
+ };
+ document.getElementById("drawing").dataset.mxgraph = JSON.stringify(opts);
+</script>
+
+<script>
+ if(window.location.hostname != 'my-netdata.io' || window.location.protocol != 'https:') {
+ var canonical = document.createElement('link');
+ canonical.rel = 'canonical';
+ canonical.href = 'https://my-netdata.io/infographic.html';
+ document.head.appendChild(canonical);
+ }
+</script>
+
+<script>!function (t, e) {
+ "use strict";
+ function a(t, n) {
+ return t.hasAttribute(n) === !0 ? t : t.parentNode !== r.body ? a(t.parentNode, n) : e
+ }
+
+ function n(n) {
+ var o, i, r, c, g, u = a(n.target, "data-ga-action"), l = !1;
+ u !== e && (o = u.getAttribute("data-ga-action") || e, i = u.getAttribute("data-ga-category") || e, r = u.getAttribute("data-ga-label") || e, c = u.getAttribute("href"), g = parseInt(u.getAttribute("data-ga-value"), 10) || e, ga !== e && i !== e && o !== e && (n.preventDefault(), "Download" !== i && n.ctrlKey !== !0 && n.metaKey !== !0 && 2 !== n.which || (l = !0, t.open(c)), function (a) {
+ var n;
+ ga("send", "event", i, o, r, g, {
+ hitCallback: function () {
+ l === !1 && (n !== e && clearTimeout(n), t.location = a)
+ }
+ }), n = setTimeout(function () {
+ l === !1 && (t.location.href = a)
+ }, 1e3)
+ }(c)))
+ }
+
+ function o() {
+ !function (t, e, a, n, o, i) {
+ t.GoogleAnalyticsObject = n, t[n] || (t[n] = function () {
+ (t[n].q = t[n].q || []).push(arguments)
+ }), t[n].l = +new Date, o = e.createElement(a), i = e.getElementsByTagName(a)[0], o.src = "//www.google-analytics.com/analytics.js", i.parentNode.insertBefore(o, i)
+ }(t, r, "script", "ga"), ga("create", "UA-64295674-3", "auto"), ga("send", "pageview"), t.document.addEventListener("click", n)
+ }
+
+ function i() {
+ !function (t, e, a) {
+ var n, o = t.getElementsByTagName(e)[0];
+ t.getElementById(a) || (n = t.createElement(e), n.id = a, n.src = "//platform.twitter.com/widgets.js", o.parentNode.insertBefore(n, o))
+ }(r, "script", "twitter-wjs")
+ }
+
+ var r = t.document;
+ o(), t.onload = i
+}(window)</script>
+
+<!-- facebook sdk -->
+<div id="fb-root"></div>
+<script>
+ window.fbAsyncInit = function() {
+ FB.init({
+ appId : '1200089276712916',
+ xfbml : true,
+ version : 'v2.8'
+ });
+ };
+
+ (function(d, s, id){
+ var js, fjs = d.getElementsByTagName(s)[0];
+ if (d.getElementById(id)) {return;}
+ js = d.createElement(s); js.id = id;
+ js.src = "//connect.facebook.net/en_US/sdk.js";
+ fjs.parentNode.insertBefore(js, fjs);
+ }(document, 'script', 'facebook-jssdk'));
+</script>
+
+<script type="text/javascript" src="https://www.draw.io/embed2.js?s=arrows2;mscae/cloud;azure;office/users;office/servers&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fnetdata%2Fnetdata%2Fmaster%2Fdiagrams%2Fnetdata-overview.xml"></script>
+
+</html>
+
diff --git a/web/gui/lib/bootstrap-3.3.7.min.js b/web/gui/lib/bootstrap-3.3.7.min.js
new file mode 100644
index 0000000..03a9716
--- /dev/null
+++ b/web/gui/lib/bootstrap-3.3.7.min.js
@@ -0,0 +1,8 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under the MIT license
+ * SPDX-License-Identifier: MIT
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>3)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b.target).is(this))return b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a("#"===f?[]:f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"], input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(a>this.$items.length-1||a<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.7",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.7",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",c).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignoreBackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in"),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){document===a.target||this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+e).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",a,b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);if(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),!c.isInStateTrue())return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-m<o.top?"bottom":"right"==h&&k.right+l>o.width?"left":"left"==h&&k.left-l<o.left?"right":h,f.removeClass(n).addClass(h)}var p=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(p,h);var q=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",q).emulateTransitionEnd(c.TRANSITION_DURATION):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top+=g,b.left+=h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);if(this.$element.trigger(g),!g.isDefaultPrevented())return f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=window.SVGElement&&c instanceof window.SVGElement,g=d?{top:0,left:0}:f?null:b.offset(),h={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},i=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,h,i,g)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null,a.$element=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.7",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.7",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){
+this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.7",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return e<c&&"top";if("bottom"==this.affixed)return null!=c?!(e+this.unpin<=f.top)&&"bottom":!(e+g<=a-d)&&"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&e<=c?"top":null!=d&&i+j>=a-d&&"bottom"},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
diff --git a/web/gui/lib/bootstrap-slider-10.0.0.min.js b/web/gui/lib/bootstrap-slider-10.0.0.min.js
new file mode 100644
index 0000000..87e8349
--- /dev/null
+++ b/web/gui/lib/bootstrap-slider-10.0.0.min.js
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: MIT
+/*! =======================================================
+ VERSION 10.0.0
+========================================================= */
+"use strict";var _typeof="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(a){return typeof a}:function(a){return a&&"function"==typeof Symbol&&a.constructor===Symbol&&a!==Symbol.prototype?"symbol":typeof a},windowIsDefined="object"===("undefined"==typeof window?"undefined":_typeof(window));!function(a){if("function"==typeof define&&define.amd)define(["jquery"],a);else if("object"===("undefined"==typeof module?"undefined":_typeof(module))&&module.exports){var b;try{b=require("jquery")}catch(c){b=null}module.exports=a(b)}else window&&(window.Slider=a(window.jQuery))}(function(a){var b="slider",c="bootstrapSlider";windowIsDefined&&!window.console&&(window.console={}),windowIsDefined&&!window.console.log&&(window.console.log=function(){}),windowIsDefined&&!window.console.warn&&(window.console.warn=function(){});var d;return function(a){function b(){}function c(a){function c(b){b.prototype.option||(b.prototype.option=function(b){a.isPlainObject(b)&&(this.options=a.extend(!0,this.options,b))})}function e(b,c){a.fn[b]=function(e){if("string"==typeof e){for(var g=d.call(arguments,1),h=0,i=this.length;i>h;h++){var j=this[h],k=a.data(j,b);if(k)if(a.isFunction(k[e])&&"_"!==e.charAt(0)){var l=k[e].apply(k,g);if(void 0!==l&&l!==k)return l}else f("no such method '"+e+"' for "+b+" instance");else f("cannot call methods on "+b+" prior to initialization; attempted to call '"+e+"'")}return this}var m=this.map(function(){var d=a.data(this,b);return d?(d.option(e),d._init()):(d=new c(this,e),a.data(this,b,d)),a(this)});return!m||m.length>1?m:m[0]}}if(a){var f="undefined"==typeof console?b:function(a){console.error(a)};return a.bridget=function(a,b){c(b),e(a,b)},a.bridget}}var d=Array.prototype.slice;c(a)}(a),function(a){function e(b,c){function d(a,b){var c="data-slider-"+b.replace(/_/g,"-"),d=a.getAttribute(c);try{return JSON.parse(d)}catch(e){return d}}this._state={value:null,enabled:null,offset:null,size:null,percentage:null,inDrag:!1,over:!1},this.ticksCallbackMap={},this.handleCallbackMap={},"string"==typeof b?this.element=document.querySelector(b):b instanceof HTMLElement&&(this.element=b),c=c?c:{};for(var e=Object.keys(this.defaultOptions),f=0;f<e.length;f++){var h=e[f],i=c[h];i="undefined"!=typeof i?i:d(this.element,h),i=null!==i?i:this.defaultOptions[h],this.options||(this.options={}),this.options[h]=i}"auto"===this.options.rtl&&(this.options.rtl="rtl"===window.getComputedStyle(this.element).direction),"vertical"!==this.options.orientation||"top"!==this.options.tooltip_position&&"bottom"!==this.options.tooltip_position?"horizontal"!==this.options.orientation||"left"!==this.options.tooltip_position&&"right"!==this.options.tooltip_position||(this.options.tooltip_position="top"):this.options.rtl?this.options.tooltip_position="left":this.options.tooltip_position="right";var j,k,l,m,n,o=this.element.style.width,p=!1,q=this.element.parentNode;if(this.sliderElem)p=!0;else{this.sliderElem=document.createElement("div"),this.sliderElem.className="slider";var r=document.createElement("div");r.className="slider-track",k=document.createElement("div"),k.className="slider-track-low",j=document.createElement("div"),j.className="slider-selection",l=document.createElement("div"),l.className="slider-track-high",m=document.createElement("div"),m.className="slider-handle min-slider-handle",m.setAttribute("role","slider"),m.setAttribute("aria-valuemin",this.options.min),m.setAttribute("aria-valuemax",this.options.max),n=document.createElement("div"),n.className="slider-handle max-slider-handle",n.setAttribute("role","slider"),n.setAttribute("aria-valuemin",this.options.min),n.setAttribute("aria-valuemax",this.options.max),r.appendChild(k),r.appendChild(j),r.appendChild(l),this.rangeHighlightElements=[];var s=this.options.rangeHighlights;if(Array.isArray(s)&&s.length>0)for(var t=0;t<s.length;t++){var u=document.createElement("div"),v=s[t]["class"]||"";u.className="slider-rangeHighlight slider-selection "+v,this.rangeHighlightElements.push(u),r.appendChild(u)}var w=Array.isArray(this.options.labelledby);if(w&&this.options.labelledby[0]&&m.setAttribute("aria-labelledby",this.options.labelledby[0]),w&&this.options.labelledby[1]&&n.setAttribute("aria-labelledby",this.options.labelledby[1]),!w&&this.options.labelledby&&(m.setAttribute("aria-labelledby",this.options.labelledby),n.setAttribute("aria-labelledby",this.options.labelledby)),this.ticks=[],Array.isArray(this.options.ticks)&&this.options.ticks.length>0){for(this.ticksContainer=document.createElement("div"),this.ticksContainer.className="slider-tick-container",f=0;f<this.options.ticks.length;f++){var x=document.createElement("div");if(x.className="slider-tick",this.options.ticks_tooltip){var y=this._addTickListener(),z=y.addMouseEnter(this,x,f),A=y.addMouseLeave(this,x);this.ticksCallbackMap[f]={mouseEnter:z,mouseLeave:A}}this.ticks.push(x),this.ticksContainer.appendChild(x)}j.className+=" tick-slider-selection"}if(this.tickLabels=[],Array.isArray(this.options.ticks_labels)&&this.options.ticks_labels.length>0)for(this.tickLabelContainer=document.createElement("div"),this.tickLabelContainer.className="slider-tick-label-container",f=0;f<this.options.ticks_labels.length;f++){var B=document.createElement("div"),C=0===this.options.ticks_positions.length,D=this.options.reversed&&C?this.options.ticks_labels.length-(f+1):f;B.className="slider-tick-label",B.innerHTML=this.options.ticks_labels[D],this.tickLabels.push(B),this.tickLabelContainer.appendChild(B)}var E=function(a){var b=document.createElement("div");b.className="tooltip-arrow";var c=document.createElement("div");c.className="tooltip-inner",a.appendChild(b),a.appendChild(c)},F=document.createElement("div");F.className="tooltip tooltip-main",F.setAttribute("role","presentation"),E(F);var G=document.createElement("div");G.className="tooltip tooltip-min",G.setAttribute("role","presentation"),E(G);var H=document.createElement("div");H.className="tooltip tooltip-max",H.setAttribute("role","presentation"),E(H),this.sliderElem.appendChild(r),this.sliderElem.appendChild(F),this.sliderElem.appendChild(G),this.sliderElem.appendChild(H),this.tickLabelContainer&&this.sliderElem.appendChild(this.tickLabelContainer),this.ticksContainer&&this.sliderElem.appendChild(this.ticksContainer),this.sliderElem.appendChild(m),this.sliderElem.appendChild(n),q.insertBefore(this.sliderElem,this.element),this.element.style.display="none"}if(a&&(this.$element=a(this.element),this.$sliderElem=a(this.sliderElem)),this.eventToCallbackMap={},this.sliderElem.id=this.options.id,this.touchCapable="ontouchstart"in window||window.DocumentTouch&&document instanceof window.DocumentTouch,this.touchX=0,this.touchY=0,this.tooltip=this.sliderElem.querySelector(".tooltip-main"),this.tooltipInner=this.tooltip.querySelector(".tooltip-inner"),this.tooltip_min=this.sliderElem.querySelector(".tooltip-min"),this.tooltipInner_min=this.tooltip_min.querySelector(".tooltip-inner"),this.tooltip_max=this.sliderElem.querySelector(".tooltip-max"),this.tooltipInner_max=this.tooltip_max.querySelector(".tooltip-inner"),g[this.options.scale]&&(this.options.scale=g[this.options.scale]),p===!0&&(this._removeClass(this.sliderElem,"slider-horizontal"),this._removeClass(this.sliderElem,"slider-vertical"),this._removeClass(this.sliderElem,"slider-rtl"),this._removeClass(this.tooltip,"hide"),this._removeClass(this.tooltip_min,"hide"),this._removeClass(this.tooltip_max,"hide"),["left","right","top","width","height"].forEach(function(a){this._removeProperty(this.trackLow,a),this._removeProperty(this.trackSelection,a),this._removeProperty(this.trackHigh,a)},this),[this.handle1,this.handle2].forEach(function(a){this._removeProperty(a,"left"),this._removeProperty(a,"right"),this._removeProperty(a,"top")},this),[this.tooltip,this.tooltip_min,this.tooltip_max].forEach(function(a){this._removeProperty(a,"left"),this._removeProperty(a,"right"),this._removeProperty(a,"top"),this._removeClass(a,"right"),this._removeClass(a,"left"),this._removeClass(a,"top")},this)),"vertical"===this.options.orientation?(this._addClass(this.sliderElem,"slider-vertical"),this.stylePos="top",this.mousePos="pageY",this.sizePos="offsetHeight"):(this._addClass(this.sliderElem,"slider-horizontal"),this.sliderElem.style.width=o,this.options.orientation="horizontal",this.options.rtl?this.stylePos="right":this.stylePos="left",this.mousePos="pageX",this.sizePos="offsetWidth"),this.options.rtl&&this._addClass(this.sliderElem,"slider-rtl"),this._setTooltipPosition(),Array.isArray(this.options.ticks)&&this.options.ticks.length>0&&(this.options.max=Math.max.apply(Math,this.options.ticks),this.options.min=Math.min.apply(Math,this.options.ticks)),Array.isArray(this.options.value)?(this.options.range=!0,this._state.value=this.options.value):this.options.range?this._state.value=[this.options.value,this.options.max]:this._state.value=this.options.value,this.trackLow=k||this.trackLow,this.trackSelection=j||this.trackSelection,this.trackHigh=l||this.trackHigh,"none"===this.options.selection?(this._addClass(this.trackLow,"hide"),this._addClass(this.trackSelection,"hide"),this._addClass(this.trackHigh,"hide")):("after"===this.options.selection||"before"===this.options.selection)&&(this._removeClass(this.trackLow,"hide"),this._removeClass(this.trackSelection,"hide"),this._removeClass(this.trackHigh,"hide")),this.handle1=m||this.handle1,this.handle2=n||this.handle2,p===!0)for(this._removeClass(this.handle1,"round triangle"),this._removeClass(this.handle2,"round triangle hide"),f=0;f<this.ticks.length;f++)this._removeClass(this.ticks[f],"round triangle hide");var I=["round","triangle","custom"],J=-1!==I.indexOf(this.options.handle);if(J)for(this._addClass(this.handle1,this.options.handle),this._addClass(this.handle2,this.options.handle),f=0;f<this.ticks.length;f++)this._addClass(this.ticks[f],this.options.handle);if(this._state.offset=this._offset(this.sliderElem),this._state.size=this.sliderElem[this.sizePos],this.setValue(this._state.value),this.handle1Keydown=this._keydown.bind(this,0),this.handle1.addEventListener("keydown",this.handle1Keydown,!1),this.handle2Keydown=this._keydown.bind(this,1),this.handle2.addEventListener("keydown",this.handle2Keydown,!1),this.mousedown=this._mousedown.bind(this),this.touchstart=this._touchstart.bind(this),this.touchmove=this._touchmove.bind(this),this.touchCapable){var K=!1;try{var L=Object.defineProperty({},"passive",{get:function(){K=!0}});window.addEventListener("test",null,L)}catch(M){}var N=K?{passive:!0}:!1;this.sliderElem.addEventListener("touchstart",this.touchstart,N),this.sliderElem.addEventListener("touchmove",this.touchmove,N)}if(this.sliderElem.addEventListener("mousedown",this.mousedown,!1),this.resize=this._resize.bind(this),window.addEventListener("resize",this.resize,!1),"hide"===this.options.tooltip)this._addClass(this.tooltip,"hide"),this._addClass(this.tooltip_min,"hide"),this._addClass(this.tooltip_max,"hide");else if("always"===this.options.tooltip)this._showTooltip(),this._alwaysShowTooltip=!0;else{if(this.showTooltip=this._showTooltip.bind(this),this.hideTooltip=this._hideTooltip.bind(this),this.options.ticks_tooltip){var O=this._addTickListener(),P=O.addMouseEnter(this,this.handle1),Q=O.addMouseLeave(this,this.handle1);this.handleCallbackMap.handle1={mouseEnter:P,mouseLeave:Q},P=O.addMouseEnter(this,this.handle2),Q=O.addMouseLeave(this,this.handle2),this.handleCallbackMap.handle2={mouseEnter:P,mouseLeave:Q}}else this.sliderElem.addEventListener("mouseenter",this.showTooltip,!1),this.sliderElem.addEventListener("mouseleave",this.hideTooltip,!1);this.handle1.addEventListener("focus",this.showTooltip,!1),this.handle1.addEventListener("blur",this.hideTooltip,!1),this.handle2.addEventListener("focus",this.showTooltip,!1),this.handle2.addEventListener("blur",this.hideTooltip,!1)}this.options.enabled?this.enable():this.disable()}var f={formatInvalidInputErrorMsg:function(a){return"Invalid input value '"+a+"' passed in"},callingContextNotSliderInstance:"Calling context element does not have instance of Slider bound to it. Check your code to make sure the JQuery object returned from the call to the slider() initializer is calling the method"},g={linear:{toValue:function(a){var b=a/100*(this.options.max-this.options.min),c=!0;if(this.options.ticks_positions.length>0){for(var d,e,f,g=0,h=1;h<this.options.ticks_positions.length;h++)if(a<=this.options.ticks_positions[h]){d=this.options.ticks[h-1],f=this.options.ticks_positions[h-1],e=this.options.ticks[h],g=this.options.ticks_positions[h];break}var i=(a-f)/(g-f);b=d+i*(e-d),c=!1}var j=c?this.options.min:0,k=j+Math.round(b/this.options.step)*this.options.step;return k<this.options.min?this.options.min:k>this.options.max?this.options.max:k},toPercentage:function(a){if(this.options.max===this.options.min)return 0;if(this.options.ticks_positions.length>0){for(var b,c,d,e=0,f=0;f<this.options.ticks.length;f++)if(a<=this.options.ticks[f]){b=f>0?this.options.ticks[f-1]:0,d=f>0?this.options.ticks_positions[f-1]:0,c=this.options.ticks[f],e=this.options.ticks_positions[f];break}if(f>0){var g=(a-b)/(c-b);return d+g*(e-d)}}return 100*(a-this.options.min)/(this.options.max-this.options.min)}},logarithmic:{toValue:function(a){var b=0===this.options.min?0:Math.log(this.options.min),c=Math.log(this.options.max),d=Math.exp(b+(c-b)*a/100);return Math.round(d)===this.options.max?this.options.max:(d=this.options.min+Math.round((d-this.options.min)/this.options.step)*this.options.step,d<this.options.min?this.options.min:d>this.options.max?this.options.max:d)},toPercentage:function(a){if(this.options.max===this.options.min)return 0;var b=Math.log(this.options.max),c=0===this.options.min?0:Math.log(this.options.min),d=0===a?0:Math.log(a);return 100*(d-c)/(b-c)}}};if(d=function(a,b){return e.call(this,a,b),this},d.prototype={_init:function(){},constructor:d,defaultOptions:{id:"",min:0,max:10,step:1,precision:0,orientation:"horizontal",value:5,range:!1,selection:"before",tooltip:"show",tooltip_split:!1,handle:"round",reversed:!1,rtl:"auto",enabled:!0,formatter:function(a){return Array.isArray(a)?a[0]+" : "+a[1]:a},natural_arrow_keys:!1,ticks:[],ticks_positions:[],ticks_labels:[],ticks_snap_bounds:0,ticks_tooltip:!1,scale:"linear",focus:!1,tooltip_position:null,labelledby:null,rangeHighlights:[]},getElement:function(){return this.sliderElem},getValue:function(){return this.options.range?this._state.value:this._state.value[0]},setValue:function(a,b,c){a||(a=0);var d=this.getValue();this._state.value=this._validateInputValue(a);var e=this._applyPrecision.bind(this);this.options.range?(this._state.value[0]=e(this._state.value[0]),this._state.value[1]=e(this._state.value[1]),this._state.value[0]=Math.max(this.options.min,Math.min(this.options.max,this._state.value[0])),this._state.value[1]=Math.max(this.options.min,Math.min(this.options.max,this._state.value[1]))):(this._state.value=e(this._state.value),this._state.value=[Math.max(this.options.min,Math.min(this.options.max,this._state.value))],this._addClass(this.handle2,"hide"),"after"===this.options.selection?this._state.value[1]=this.options.max:this._state.value[1]=this.options.min),this.options.max>this.options.min?this._state.percentage=[this._toPercentage(this._state.value[0]),this._toPercentage(this._state.value[1]),100*this.options.step/(this.options.max-this.options.min)]:this._state.percentage=[0,0,100],this._layout();var f=this.options.range?this._state.value:this._state.value[0];return this._setDataVal(f),b===!0&&this._trigger("slide",f),d!==f&&c===!0&&this._trigger("change",{oldValue:d,newValue:f}),this},destroy:function(){this._removeSliderEventHandlers(),this.sliderElem.parentNode.removeChild(this.sliderElem),this.element.style.display="",this._cleanUpEventCallbacksMap(),this.element.removeAttribute("data"),a&&(this._unbindJQueryEventHandlers(),this.$element.removeData("slider"))},disable:function(){return this._state.enabled=!1,this.handle1.removeAttribute("tabindex"),this.handle2.removeAttribute("tabindex"),this._addClass(this.sliderElem,"slider-disabled"),this._trigger("slideDisabled"),this},enable:function(){return this._state.enabled=!0,this.handle1.setAttribute("tabindex",0),this.handle2.setAttribute("tabindex",0),this._removeClass(this.sliderElem,"slider-disabled"),this._trigger("slideEnabled"),this},toggle:function(){return this._state.enabled?this.disable():this.enable(),this},isEnabled:function(){return this._state.enabled},on:function(a,b){return this._bindNonQueryEventHandler(a,b),this},off:function(b,c){a?(this.$element.off(b,c),this.$sliderElem.off(b,c)):this._unbindNonQueryEventHandler(b,c)},getAttribute:function(a){return a?this.options[a]:this.options},setAttribute:function(a,b){return this.options[a]=b,this},refresh:function(){return this._removeSliderEventHandlers(),e.call(this,this.element,this.options),a&&a.data(this.element,"slider",this),this},relayout:function(){return this._resize(),this._layout(),this},_removeSliderEventHandlers:function(){if(this.handle1.removeEventListener("keydown",this.handle1Keydown,!1),this.handle2.removeEventListener("keydown",this.handle2Keydown,!1),this.options.ticks_tooltip){for(var a=this.ticksContainer.getElementsByClassName("slider-tick"),b=0;b<a.length;b++)a[b].removeEventListener("mouseenter",this.ticksCallbackMap[b].mouseEnter,!1),a[b].removeEventListener("mouseleave",this.ticksCallbackMap[b].mouseLeave,!1);this.handle1.removeEventListener("mouseenter",this.handleCallbackMap.handle1.mouseEnter,!1),this.handle2.removeEventListener("mouseenter",this.handleCallbackMap.handle2.mouseEnter,!1),this.handle1.removeEventListener("mouseleave",this.handleCallbackMap.handle1.mouseLeave,!1),this.handle2.removeEventListener("mouseleave",this.handleCallbackMap.handle2.mouseLeave,!1)}this.handleCallbackMap=null,this.ticksCallbackMap=null,this.showTooltip&&(this.handle1.removeEventListener("focus",this.showTooltip,!1),this.handle2.removeEventListener("focus",this.showTooltip,!1)),this.hideTooltip&&(this.handle1.removeEventListener("blur",this.hideTooltip,!1),this.handle2.removeEventListener("blur",this.hideTooltip,!1)),this.showTooltip&&this.sliderElem.removeEventListener("mouseenter",this.showTooltip,!1),this.hideTooltip&&this.sliderElem.removeEventListener("mouseleave",this.hideTooltip,!1),this.sliderElem.removeEventListener("touchstart",this.touchstart,!1),this.sliderElem.removeEventListener("touchmove",this.touchmove,!1),this.sliderElem.removeEventListener("mousedown",this.mousedown,!1),window.removeEventListener("resize",this.resize,!1)},_bindNonQueryEventHandler:function(a,b){void 0===this.eventToCallbackMap[a]&&(this.eventToCallbackMap[a]=[]),this.eventToCallbackMap[a].push(b)},_unbindNonQueryEventHandler:function(a,b){var c=this.eventToCallbackMap[a];if(void 0!==c)for(var d=0;d<c.length;d++)if(c[d]===b){c.splice(d,1);break}},_cleanUpEventCallbacksMap:function(){for(var a=Object.keys(this.eventToCallbackMap),b=0;b<a.length;b++){var c=a[b];delete this.eventToCallbackMap[c]}},_showTooltip:function(){this.options.tooltip_split===!1?(this._addClass(this.tooltip,"in"),this.tooltip_min.style.display="none",this.tooltip_max.style.display="none"):(this._addClass(this.tooltip_min,"in"),this._addClass(this.tooltip_max,"in"),this.tooltip.style.display="none"),this._state.over=!0},_hideTooltip:function(){this._state.inDrag===!1&&this.alwaysShowTooltip!==!0&&(this._removeClass(this.tooltip,"in"),this._removeClass(this.tooltip_min,"in"),this._removeClass(this.tooltip_max,"in")),this._state.over=!1},_setToolTipOnMouseOver:function(a){function b(a,b){return b?[100-a.percentage[0],this.options.range?100-a.percentage[1]:a.percentage[1]]:[a.percentage[0],a.percentage[1]]}var c=this.options.formatter(a?a.value[0]:this._state.value[0]),d=a?b(a,this.options.reversed):b(this._state,this.options.reversed);this._setText(this.tooltipInner,c),this.tooltip.style[this.stylePos]=d[0]+"%"},_addTickListener:function(){return{addMouseEnter:function(a,b,c){var d=function(){var b=a._state,d=c>=0?c:this.attributes["aria-valuenow"].value,e=parseInt(d,10);b.value[0]=e,b.percentage[0]=a.options.ticks_positions[e],a._setToolTipOnMouseOver(b),a._showTooltip()};return b.addEventListener("mouseenter",d,!1),d},addMouseLeave:function(a,b){var c=function(){a._hideTooltip()};return b.addEventListener("mouseleave",c,!1),c}}},_layout:function(){var a;if(a=this.options.reversed?[100-this._state.percentage[0],this.options.range?100-this._state.percentage[1]:this._state.percentage[1]]:[this._state.percentage[0],this._state.percentage[1]],this.handle1.style[this.stylePos]=a[0]+"%",this.handle1.setAttribute("aria-valuenow",this._state.value[0]),isNaN(this.options.formatter(this._state.value[0]))&&this.handle1.setAttribute("aria-valuetext",this.options.formatter(this._state.value[0])),this.handle2.style[this.stylePos]=a[1]+"%",this.handle2.setAttribute("aria-valuenow",this._state.value[1]),isNaN(this.options.formatter(this._state.value[1]))&&this.handle2.setAttribute("aria-valuetext",this.options.formatter(this._state.value[1])),this.rangeHighlightElements.length>0&&Array.isArray(this.options.rangeHighlights)&&this.options.rangeHighlights.length>0)for(var b=0;b<this.options.rangeHighlights.length;b++){var c=this._toPercentage(this.options.rangeHighlights[b].start),d=this._toPercentage(this.options.rangeHighlights[b].end);if(this.options.reversed){var e=100-d;d=100-c,c=e}var f=this._createHighlightRange(c,d);f?"vertical"===this.options.orientation?(this.rangeHighlightElements[b].style.top=f.start+"%",this.rangeHighlightElements[b].style.height=f.size+"%"):(this.options.rtl?this.rangeHighlightElements[b].style.right=f.start+"%":this.rangeHighlightElements[b].style.left=f.start+"%",this.rangeHighlightElements[b].style.width=f.size+"%"):this.rangeHighlightElements[b].style.display="none"}if(Array.isArray(this.options.ticks)&&this.options.ticks.length>0){var g,h="vertical"===this.options.orientation?"height":"width";g="vertical"===this.options.orientation?"marginTop":this.options.rtl?"marginRight":"marginLeft";var i=this._state.size/(this.options.ticks.length-1);if(this.tickLabelContainer){var j=0;if(0===this.options.ticks_positions.length)"vertical"!==this.options.orientation&&(this.tickLabelContainer.style[g]=-i/2+"px"),j=this.tickLabelContainer.offsetHeight;else for(k=0;k<this.tickLabelContainer.childNodes.length;k++)this.tickLabelContainer.childNodes[k].offsetHeight>j&&(j=this.tickLabelContainer.childNodes[k].offsetHeight);"horizontal"===this.options.orientation&&(this.sliderElem.style.marginBottom=j+"px")}for(var k=0;k<this.options.ticks.length;k++){var l=this.options.ticks_positions[k]||this._toPercentage(this.options.ticks[k]);this.options.reversed&&(l=100-l),this.ticks[k].style[this.stylePos]=l+"%",this._removeClass(this.ticks[k],"in-selection"),this.options.range?l>=a[0]&&l<=a[1]&&this._addClass(this.ticks[k],"in-selection"):"after"===this.options.selection&&l>=a[0]?this._addClass(this.ticks[k],"in-selection"):"before"===this.options.selection&&l<=a[0]&&this._addClass(this.ticks[k],"in-selection"),this.tickLabels[k]&&(this.tickLabels[k].style[h]=i+"px","vertical"!==this.options.orientation&&void 0!==this.options.ticks_positions[k]?(this.tickLabels[k].style.position="absolute",this.tickLabels[k].style[this.stylePos]=l+"%",this.tickLabels[k].style[g]=-i/2+"px"):"vertical"===this.options.orientation&&(this.options.rtl?this.tickLabels[k].style.marginRight=this.sliderElem.offsetWidth+"px":this.tickLabels[k].style.marginLeft=this.sliderElem.offsetWidth+"px",this.tickLabelContainer.style[g]=this.sliderElem.offsetWidth/2*-1+"px"))}}var m;if(this.options.range){m=this.options.formatter(this._state.value),this._setText(this.tooltipInner,m),this.tooltip.style[this.stylePos]=(a[1]+a[0])/2+"%";var n=this.options.formatter(this._state.value[0]);this._setText(this.tooltipInner_min,n);var o=this.options.formatter(this._state.value[1]);this._setText(this.tooltipInner_max,o),this.tooltip_min.style[this.stylePos]=a[0]+"%",this.tooltip_max.style[this.stylePos]=a[1]+"%"}else m=this.options.formatter(this._state.value[0]),this._setText(this.tooltipInner,m),this.tooltip.style[this.stylePos]=a[0]+"%";if("vertical"===this.options.orientation)this.trackLow.style.top="0",this.trackLow.style.height=Math.min(a[0],a[1])+"%",this.trackSelection.style.top=Math.min(a[0],a[1])+"%",this.trackSelection.style.height=Math.abs(a[0]-a[1])+"%",this.trackHigh.style.bottom="0",this.trackHigh.style.height=100-Math.min(a[0],a[1])-Math.abs(a[0]-a[1])+"%";else{"right"===this.stylePos?this.trackLow.style.right="0":this.trackLow.style.left="0",this.trackLow.style.width=Math.min(a[0],a[1])+"%","right"===this.stylePos?this.trackSelection.style.right=Math.min(a[0],a[1])+"%":this.trackSelection.style.left=Math.min(a[0],a[1])+"%",this.trackSelection.style.width=Math.abs(a[0]-a[1])+"%","right"===this.stylePos?this.trackHigh.style.left="0":this.trackHigh.style.right="0",this.trackHigh.style.width=100-Math.min(a[0],a[1])-Math.abs(a[0]-a[1])+"%";var p=this.tooltip_min.getBoundingClientRect(),q=this.tooltip_max.getBoundingClientRect();"bottom"===this.options.tooltip_position?p.right>q.left?(this._removeClass(this.tooltip_max,"bottom"),this._addClass(this.tooltip_max,"top"),this.tooltip_max.style.top="",this.tooltip_max.style.bottom="22px"):(this._removeClass(this.tooltip_max,"top"),this._addClass(this.tooltip_max,"bottom"),this.tooltip_max.style.top=this.tooltip_min.style.top,this.tooltip_max.style.bottom=""):p.right>q.left?(this._removeClass(this.tooltip_max,"top"),this._addClass(this.tooltip_max,"bottom"),this.tooltip_max.style.top="18px"):(this._removeClass(this.tooltip_max,"bottom"),this._addClass(this.tooltip_max,"top"),this.tooltip_max.style.top=this.tooltip_min.style.top)}},_createHighlightRange:function(a,b){return this._isHighlightRange(a,b)?a>b?{start:b,size:a-b}:{start:a,size:b-a}:null},_isHighlightRange:function(a,b){return a>=0&&100>=a&&b>=0&&100>=b?!0:!1},_resize:function(a){this._state.offset=this._offset(this.sliderElem),this._state.size=this.sliderElem[this.sizePos],this._layout()},_removeProperty:function(a,b){a.style.removeProperty?a.style.removeProperty(b):a.style.removeAttribute(b)},_mousedown:function(a){if(!this._state.enabled)return!1;this._state.offset=this._offset(this.sliderElem),this._state.size=this.sliderElem[this.sizePos];var b=this._getPercentage(a);if(this.options.range){var c=Math.abs(this._state.percentage[0]-b),d=Math.abs(this._state.percentage[1]-b);this._state.dragged=d>c?0:1,this._adjustPercentageForRangeSliders(b)}else this._state.dragged=0;this._state.percentage[this._state.dragged]=b,this._layout(),this.touchCapable&&(document.removeEventListener("touchmove",this.mousemove,!1),document.removeEventListener("touchend",this.mouseup,!1)),this.mousemove&&document.removeEventListener("mousemove",this.mousemove,!1),this.mouseup&&document.removeEventListener("mouseup",this.mouseup,!1),this.mousemove=this._mousemove.bind(this),this.mouseup=this._mouseup.bind(this),this.touchCapable&&(document.addEventListener("touchmove",this.mousemove,!1),document.addEventListener("touchend",this.mouseup,!1)),document.addEventListener("mousemove",this.mousemove,!1),document.addEventListener("mouseup",this.mouseup,!1),this._state.inDrag=!0;var e=this._calculateValue();return this._trigger("slideStart",e),this._setDataVal(e),this.setValue(e,!1,!0),a.returnValue=!1,this.options.focus&&this._triggerFocusOnHandle(this._state.dragged),!0},_touchstart:function(a){if(void 0===a.changedTouches)return void this._mousedown(a);var b=a.changedTouches[0];this.touchX=b.pageX,this.touchY=b.pageY},_triggerFocusOnHandle:function(a){0===a&&this.handle1.focus(),1===a&&this.handle2.focus()},_keydown:function(a,b){if(!this._state.enabled)return!1;var c;switch(b.keyCode){case 37:case 40:c=-1;break;case 39:case 38:c=1}if(c){if(this.options.natural_arrow_keys){var d="vertical"===this.options.orientation&&!this.options.reversed,e="horizontal"===this.options.orientation&&this.options.reversed;(d||e)&&(c=-c)}var f=this._state.value[a]+c*this.options.step,g=f/this.options.max*100;if(this._state.keyCtrl=a,this.options.range){this._adjustPercentageForRangeSliders(g);var h=this._state.keyCtrl?this._state.value[0]:f,i=this._state.keyCtrl?f:this._state.value[1];f=[h,i]}return this._trigger("slideStart",f),this._setDataVal(f),this.setValue(f,!0,!0),this._setDataVal(f),this._trigger("slideStop",f),this._layout(),this._pauseEvent(b),delete this._state.keyCtrl,!1}},_pauseEvent:function(a){a.stopPropagation&&a.stopPropagation(),a.preventDefault&&a.preventDefault(),a.cancelBubble=!0,a.returnValue=!1},_mousemove:function(a){if(!this._state.enabled)return!1;var b=this._getPercentage(a);this._adjustPercentageForRangeSliders(b),this._state.percentage[this._state.dragged]=b,this._layout();var c=this._calculateValue(!0);return this.setValue(c,!0,!0),!1},_touchmove:function(a){if(void 0!==a.changedTouches){var b=a.changedTouches[0],c=b.pageX-this.touchX,d=b.pageY-this.touchY;this._state.inDrag||("vertical"===this.options.orientation&&5>=c&&c>=-5&&(d>=15||-15>=d)?this._mousedown(a):5>=d&&d>=-5&&(c>=15||-15>=c)&&this._mousedown(a))}},_adjustPercentageForRangeSliders:function(a){if(this.options.range){var b=this._getNumDigitsAfterDecimalPlace(a);b=b?b-1:0;var c=this._applyToFixedAndParseFloat(a,b);0===this._state.dragged&&this._applyToFixedAndParseFloat(this._state.percentage[1],b)<c?(this._state.percentage[0]=this._state.percentage[1],this._state.dragged=1):1===this._state.dragged&&this._applyToFixedAndParseFloat(this._state.percentage[0],b)>c?(this._state.percentage[1]=this._state.percentage[0],this._state.dragged=0):0===this._state.keyCtrl&&this._state.value[1]/this.options.max*100<a?(this._state.percentage[0]=this._state.percentage[1],this._state.keyCtrl=1,this.handle2.focus()):1===this._state.keyCtrl&&this._state.value[0]/this.options.max*100>a&&(this._state.percentage[1]=this._state.percentage[0],this._state.keyCtrl=0,this.handle1.focus())}},_mouseup:function(){if(!this._state.enabled)return!1;this.touchCapable&&(document.removeEventListener("touchmove",this.mousemove,!1),document.removeEventListener("touchend",this.mouseup,!1)),document.removeEventListener("mousemove",this.mousemove,!1),document.removeEventListener("mouseup",this.mouseup,!1),this._state.inDrag=!1,this._state.over===!1&&this._hideTooltip();var a=this._calculateValue(!0);return this._layout(),this._setDataVal(a),this._trigger("slideStop",a),!1},_calculateValue:function(a){var b;if(this.options.range?(b=[this.options.min,this.options.max],0!==this._state.percentage[0]&&(b[0]=this._toValue(this._state.percentage[0]),b[0]=this._applyPrecision(b[0])),100!==this._state.percentage[1]&&(b[1]=this._toValue(this._state.percentage[1]),b[1]=this._applyPrecision(b[1]))):(b=this._toValue(this._state.percentage[0]),b=parseFloat(b),b=this._applyPrecision(b)),a){for(var c=[b,1/0],d=0;d<this.options.ticks.length;d++){var e=Math.abs(this.options.ticks[d]-b);e<=c[1]&&(c=[this.options.ticks[d],e])}if(c[1]<=this.options.ticks_snap_bounds)return c[0]}return b},_applyPrecision:function(a){var b=this.options.precision||this._getNumDigitsAfterDecimalPlace(this.options.step);return this._applyToFixedAndParseFloat(a,b)},_getNumDigitsAfterDecimalPlace:function(a){var b=(""+a).match(/(?:\.(\d+))?(?:[eE]([+-]?\d+))?$/);return b?Math.max(0,(b[1]?b[1].length:0)-(b[2]?+b[2]:0)):0},_applyToFixedAndParseFloat:function(a,b){var c=a.toFixed(b);return parseFloat(c)},_getPercentage:function(a){!this.touchCapable||"touchstart"!==a.type&&"touchmove"!==a.type||(a=a.touches[0]);var b=a[this.mousePos],c=this._state.offset[this.stylePos],d=b-c;"right"===this.stylePos&&(d=-d);var e=d/this._state.size*100;return e=Math.round(e/this._state.percentage[2])*this._state.percentage[2],this.options.reversed&&(e=100-e),Math.max(0,Math.min(100,e))},_validateInputValue:function(a){if(isNaN(+a)){if(Array.isArray(a))return this._validateArray(a),a;throw new Error(f.formatInvalidInputErrorMsg(a))}return+a},_validateArray:function(a){for(var b=0;b<a.length;b++){
+var c=a[b];if("number"!=typeof c)throw new Error(f.formatInvalidInputErrorMsg(c))}},_setDataVal:function(a){this.element.setAttribute("data-value",a),this.element.setAttribute("value",a),this.element.value=a},_trigger:function(b,c){c=c||0===c?c:void 0;var d=this.eventToCallbackMap[b];if(d&&d.length)for(var e=0;e<d.length;e++){var f=d[e];f(c)}a&&this._triggerJQueryEvent(b,c)},_triggerJQueryEvent:function(a,b){var c={type:a,value:b};this.$element.trigger(c),this.$sliderElem.trigger(c)},_unbindJQueryEventHandlers:function(){this.$element.off(),this.$sliderElem.off()},_setText:function(a,b){"undefined"!=typeof a.textContent?a.textContent=b:"undefined"!=typeof a.innerText&&(a.innerText=b)},_removeClass:function(a,b){for(var c=b.split(" "),d=a.className,e=0;e<c.length;e++){var f=c[e],g=new RegExp("(?:\\s|^)"+f+"(?:\\s|$)");d=d.replace(g," ")}a.className=d.trim()},_addClass:function(a,b){for(var c=b.split(" "),d=a.className,e=0;e<c.length;e++){var f=c[e],g=new RegExp("(?:\\s|^)"+f+"(?:\\s|$)"),h=g.test(d);h||(d+=" "+f)}a.className=d.trim()},_offsetLeft:function(a){return a.getBoundingClientRect().left},_offsetRight:function(a){return a.getBoundingClientRect().right},_offsetTop:function(a){for(var b=a.offsetTop;(a=a.offsetParent)&&!isNaN(a.offsetTop);)b+=a.offsetTop,"BODY"!==a.tagName&&(b-=a.scrollTop);return b},_offset:function(a){return{left:this._offsetLeft(a),right:this._offsetRight(a),top:this._offsetTop(a)}},_css:function(b,c,d){if(a)a.style(b,c,d);else{var e=c.replace(/^-ms-/,"ms-").replace(/-([\da-z])/gi,function(a,b){return b.toUpperCase()});b.style[e]=d}},_toValue:function(a){return this.options.scale.toValue.apply(this,[a])},_toPercentage:function(a){return this.options.scale.toPercentage.apply(this,[a])},_setTooltipPosition:function(){var a=[this.tooltip,this.tooltip_min,this.tooltip_max];if("vertical"===this.options.orientation){var b;b=this.options.tooltip_position?this.options.tooltip_position:this.options.rtl?"left":"right";var c="left"===b?"right":"left";a.forEach(function(a){this._addClass(a,b),a.style[c]="100%"}.bind(this))}else"bottom"===this.options.tooltip_position?a.forEach(function(a){this._addClass(a,"bottom"),a.style.top="22px"}.bind(this)):a.forEach(function(a){this._addClass(a,"top"),a.style.top=-this.tooltip.outerHeight-14+"px"}.bind(this))}},a&&a.fn){var h=void 0;a.fn.slider?(windowIsDefined&&window.console.warn("bootstrap-slider.js - WARNING: $.fn.slider namespace is already bound. Use the $.fn.bootstrapSlider namespace instead."),h=c):(a.bridget(b,d),h=b),a.bridget(c,d),a(function(){a("input[data-provide=slider]")[h]()})}}(a),d});
diff --git a/web/gui/lib/bootstrap-table-1.11.0.min.js b/web/gui/lib/bootstrap-table-1.11.0.min.js
new file mode 100644
index 0000000..f4c2b8a
--- /dev/null
+++ b/web/gui/lib/bootstrap-table-1.11.0.min.js
@@ -0,0 +1,9 @@
+/*
+* bootstrap-table - v1.11.0 - 2016-07-02
+* https://github.com/wenzhixin/bootstrap-table
+* Copyright (c) 2016 zhixin wen
+* Licensed MIT License
+* SPDX-License-Identifier: MIT
+*/
+!function(a){"use strict";var b=null,c=function(a){var b=arguments,c=!0,d=1;return a=a.replace(/%s/g,function(){var a=b[d++];return"undefined"==typeof a?(c=!1,""):a}),c?a:""},d=function(b,c,d,e){var f="";return a.each(b,function(a,b){return b[c]===e?(f=b[d],!1):!0}),f},e=function(b,c){var d=-1;return a.each(b,function(a,b){return b.field===c?(d=a,!1):!0}),d},f=function(b){var c,d,e,f=0,g=[];for(c=0;c<b[0].length;c++)f+=b[0][c].colspan||1;for(c=0;c<b.length;c++)for(g[c]=[],d=0;f>d;d++)g[c][d]=!1;for(c=0;c<b.length;c++)for(d=0;d<b[c].length;d++){var h=b[c][d],i=h.rowspan||1,j=h.colspan||1,k=a.inArray(!1,g[c]);for(1===j&&(h.fieldIndex=k,"undefined"==typeof h.field&&(h.field=k)),e=0;i>e;e++)g[c+e][k]=!0;for(e=0;j>e;e++)g[c][k+e]=!0}},g=function(){if(null===b){var c,d,e=a("<p/>").addClass("fixed-table-scroll-inner"),f=a("<div/>").addClass("fixed-table-scroll-outer");f.append(e),a("body").append(f),c=e[0].offsetWidth,f.css("overflow","scroll"),d=e[0].offsetWidth,c===d&&(d=f[0].clientWidth),f.remove(),b=c-d}return b},h=function(b,d,e,f){var g=d;if("string"==typeof d){var h=d.split(".");h.length>1?(g=window,a.each(h,function(a,b){g=g[b]})):g=window[d]}return"object"==typeof g?g:"function"==typeof g?g.apply(b,e):!g&&"string"==typeof d&&c.apply(this,[d].concat(e))?c.apply(this,[d].concat(e)):f},i=function(b,c,d){var e=Object.getOwnPropertyNames(b),f=Object.getOwnPropertyNames(c),g="";if(d&&e.length!==f.length)return!1;for(var h=0;h<e.length;h++)if(g=e[h],a.inArray(g,f)>-1&&b[g]!==c[g])return!1;return!0},j=function(a){return"string"==typeof a?a.replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;").replace(/'/g,"&#039;").replace(/`/g,"&#x60;"):a},k=function(b){var c=0;return b.children().each(function(){c<a(this).outerHeight(!0)&&(c=a(this).outerHeight(!0))}),c},l=function(a){for(var b in a){var c=b.split(/(?=[A-Z])/).join("-").toLowerCase();c!==b&&(a[c]=a[b],delete a[b])}return a},m=function(a,b,c){var d=a;if("string"!=typeof b||a.hasOwnProperty(b))return c?j(a[b]):a[b];var e=b.split(".");for(var f in e)d=d&&d[e[f]];return c?j(d):d},n=function(){return!!(navigator.userAgent.indexOf("MSIE ")>0||navigator.userAgent.match(/Trident.*rv\:11\./))},o=function(){Object.keys||(Object.keys=function(){var a=Object.prototype.hasOwnProperty,b=!{toString:null}.propertyIsEnumerable("toString"),c=["toString","toLocaleString","valueOf","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","constructor"],d=c.length;return function(e){if("object"!=typeof e&&("function"!=typeof e||null===e))throw new TypeError("Object.keys called on non-object");var f,g,h=[];for(f in e)a.call(e,f)&&h.push(f);if(b)for(g=0;d>g;g++)a.call(e,c[g])&&h.push(c[g]);return h}}())},p=function(b,c){this.options=c,this.$el=a(b),this.$el_=this.$el.clone(),this.timeoutId_=0,this.timeoutFooter_=0,this.init()};p.DEFAULTS={classes:"table table-hover",locale:void 0,height:void 0,undefinedText:"-",sortName:void 0,sortOrder:"asc",sortStable:!1,striped:!1,columns:[[]],data:[],dataField:"rows",method:"get",url:void 0,ajax:void 0,cache:!0,contentType:"application/json",dataType:"json",ajaxOptions:{},queryParams:function(a){return a},queryParamsType:"limit",responseHandler:function(a){return a},pagination:!1,onlyInfoPagination:!1,sidePagination:"client",totalRows:0,pageNumber:1,pageSize:10,pageList:[10,25,50,100],paginationHAlign:"right",paginationVAlign:"bottom",paginationDetailHAlign:"left",paginationPreText:"&lsaquo;",paginationNextText:"&rsaquo;",search:!1,searchOnEnterKey:!1,strictSearch:!1,searchAlign:"right",selectItemName:"btSelectItem",showHeader:!0,showFooter:!1,showColumns:!1,showPaginationSwitch:!1,showRefresh:!1,showToggle:!1,buttonsAlign:"right",smartDisplay:!0,escape:!1,minimumCountColumns:1,idField:void 0,uniqueId:void 0,cardView:!1,detailView:!1,detailFormatter:function(){return""},trimOnSearch:!0,clickToSelect:!1,singleSelect:!1,toolbar:void 0,toolbarAlign:"left",checkboxHeader:!0,sortable:!0,silentSort:!0,maintainSelected:!1,searchTimeOut:500,searchText:"",iconSize:void 0,buttonsClass:"default",iconsPrefix:"glyphicon",icons:{paginationSwitchDown:"glyphicon-collapse-down icon-chevron-down",paginationSwitchUp:"glyphicon-collapse-up icon-chevron-up",refresh:"glyphicon-refresh icon-refresh",toggle:"glyphicon-list-alt icon-list-alt",columns:"glyphicon-th icon-th",detailOpen:"glyphicon-plus icon-plus",detailClose:"glyphicon-minus icon-minus"},customSearch:a.noop,customSort:a.noop,rowStyle:function(){return{}},rowAttributes:function(){return{}},footerStyle:function(){return{}},onAll:function(){return!1},onClickCell:function(){return!1},onDblClickCell:function(){return!1},onClickRow:function(){return!1},onDblClickRow:function(){return!1},onSort:function(){return!1},onCheck:function(){return!1},onUncheck:function(){return!1},onCheckAll:function(){return!1},onUncheckAll:function(){return!1},onCheckSome:function(){return!1},onUncheckSome:function(){return!1},onLoadSuccess:function(){return!1},onLoadError:function(){return!1},onColumnSwitch:function(){return!1},onPageChange:function(){return!1},onSearch:function(){return!1},onToggle:function(){return!1},onPreBody:function(){return!1},onPostBody:function(){return!1},onPostHeader:function(){return!1},onExpandRow:function(){return!1},onCollapseRow:function(){return!1},onRefreshOptions:function(){return!1},onRefresh:function(){return!1},onResetView:function(){return!1}},p.LOCALES={},p.LOCALES["en-US"]=p.LOCALES.en={formatLoadingMessage:function(){return"Loading, please wait..."},formatRecordsPerPage:function(a){return c("%s rows per page",a)},formatShowingRows:function(a,b,d){return c("Showing %s to %s of %s rows",a,b,d)},formatDetailPagination:function(a){return c("Showing %s rows",a)},formatSearch:function(){return"Search"},formatNoMatches:function(){return"No matching records found"},formatPaginationSwitch:function(){return"Hide/Show pagination"},formatRefresh:function(){return"Refresh"},formatToggle:function(){return"Toggle"},formatColumns:function(){return"Columns"},formatAllRows:function(){return"All"}},a.extend(p.DEFAULTS,p.LOCALES["en-US"]),p.COLUMN_DEFAULTS={radio:!1,checkbox:!1,checkboxEnabled:!0,field:void 0,title:void 0,titleTooltip:void 0,"class":void 0,align:void 0,halign:void 0,falign:void 0,valign:void 0,width:void 0,sortable:!1,order:"asc",visible:!0,switchable:!0,clickToSelect:!0,formatter:void 0,footerFormatter:void 0,events:void 0,sorter:void 0,sortName:void 0,cellStyle:void 0,searchable:!0,searchFormatter:!0,cardVisible:!0},p.EVENTS={"all.bs.table":"onAll","click-cell.bs.table":"onClickCell","dbl-click-cell.bs.table":"onDblClickCell","click-row.bs.table":"onClickRow","dbl-click-row.bs.table":"onDblClickRow","sort.bs.table":"onSort","check.bs.table":"onCheck","uncheck.bs.table":"onUncheck","check-all.bs.table":"onCheckAll","uncheck-all.bs.table":"onUncheckAll","check-some.bs.table":"onCheckSome","uncheck-some.bs.table":"onUncheckSome","load-success.bs.table":"onLoadSuccess","load-error.bs.table":"onLoadError","column-switch.bs.table":"onColumnSwitch","page-change.bs.table":"onPageChange","search.bs.table":"onSearch","toggle.bs.table":"onToggle","pre-body.bs.table":"onPreBody","post-body.bs.table":"onPostBody","post-header.bs.table":"onPostHeader","expand-row.bs.table":"onExpandRow","collapse-row.bs.table":"onCollapseRow","refresh-options.bs.table":"onRefreshOptions","reset-view.bs.table":"onResetView","refresh.bs.table":"onRefresh"},p.prototype.init=function(){this.initLocale(),this.initContainer(),this.initTable(),this.initHeader(),this.initData(),this.initFooter(),this.initToolbar(),this.initPagination(),this.initBody(),this.initSearchText(),this.initServer()},p.prototype.initLocale=function(){if(this.options.locale){var b=this.options.locale.split(/-|_/);b[0].toLowerCase(),b[1]&&b[1].toUpperCase(),a.fn.bootstrapTable.locales[this.options.locale]?a.extend(this.options,a.fn.bootstrapTable.locales[this.options.locale]):a.fn.bootstrapTable.locales[b.join("-")]?a.extend(this.options,a.fn.bootstrapTable.locales[b.join("-")]):a.fn.bootstrapTable.locales[b[0]]&&a.extend(this.options,a.fn.bootstrapTable.locales[b[0]])}},p.prototype.initContainer=function(){this.$container=a(['<div class="bootstrap-table">','<div class="fixed-table-toolbar"></div>',"top"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?'<div class="fixed-table-pagination" style="clear: both;"></div>':"",'<div class="fixed-table-container">','<div class="fixed-table-header"><table></table></div>','<div class="fixed-table-body">','<div class="fixed-table-loading">',this.options.formatLoadingMessage(),"</div>","</div>",'<div class="fixed-table-footer"><table><tr></tr></table></div>',"bottom"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?'<div class="fixed-table-pagination"></div>':"","</div>","</div>"].join("")),this.$container.insertAfter(this.$el),this.$tableContainer=this.$container.find(".fixed-table-container"),this.$tableHeader=this.$container.find(".fixed-table-header"),this.$tableBody=this.$container.find(".fixed-table-body"),this.$tableLoading=this.$container.find(".fixed-table-loading"),this.$tableFooter=this.$container.find(".fixed-table-footer"),this.$toolbar=this.$container.find(".fixed-table-toolbar"),this.$pagination=this.$container.find(".fixed-table-pagination"),this.$tableBody.append(this.$el),this.$container.after('<div class="clearfix"></div>'),this.$el.addClass(this.options.classes),this.options.striped&&this.$el.addClass("table-striped"),-1!==a.inArray("table-no-bordered",this.options.classes.split(" "))&&this.$tableContainer.addClass("table-no-bordered")},p.prototype.initTable=function(){var b=this,c=[],d=[];if(this.$header=this.$el.find(">thead"),this.$header.length||(this.$header=a("<thead></thead>").appendTo(this.$el)),this.$header.find("tr").each(function(){var b=[];a(this).find("th").each(function(){"undefined"!=typeof a(this).data("field")&&a(this).data("field",a(this).data("field")+""),b.push(a.extend({},{title:a(this).html(),"class":a(this).attr("class"),titleTooltip:a(this).attr("title"),rowspan:a(this).attr("rowspan")?+a(this).attr("rowspan"):void 0,colspan:a(this).attr("colspan")?+a(this).attr("colspan"):void 0},a(this).data()))}),c.push(b)}),a.isArray(this.options.columns[0])||(this.options.columns=[this.options.columns]),this.options.columns=a.extend(!0,[],c,this.options.columns),this.columns=[],f(this.options.columns),a.each(this.options.columns,function(c,d){a.each(d,function(d,e){e=a.extend({},p.COLUMN_DEFAULTS,e),"undefined"!=typeof e.fieldIndex&&(b.columns[e.fieldIndex]=e),b.options.columns[c][d]=e})}),!this.options.data.length){var e=[];this.$el.find(">tbody>tr").each(function(c){var f={};f._id=a(this).attr("id"),f._class=a(this).attr("class"),f._data=l(a(this).data()),a(this).find(">td").each(function(d){for(var g,h,i=a(this),j=+i.attr("colspan")||1,k=+i.attr("rowspan")||1;e[c]&&e[c][d];d++);for(g=d;d+j>g;g++)for(h=c;c+k>h;h++)e[h]||(e[h]=[]),e[h][g]=!0;var m=b.columns[d].field;f[m]=a(this).html(),f["_"+m+"_id"]=a(this).attr("id"),f["_"+m+"_class"]=a(this).attr("class"),f["_"+m+"_rowspan"]=a(this).attr("rowspan"),f["_"+m+"_colspan"]=a(this).attr("colspan"),f["_"+m+"_title"]=a(this).attr("title"),f["_"+m+"_data"]=l(a(this).data())}),d.push(f)}),this.options.data=d,d.length&&(this.fromHtml=!0)}},p.prototype.initHeader=function(){var b=this,d={},e=[];this.header={fields:[],styles:[],classes:[],formatters:[],events:[],sorters:[],sortNames:[],cellStyles:[],searchables:[]},a.each(this.options.columns,function(f,g){e.push("<tr>"),0===f&&!b.options.cardView&&b.options.detailView&&e.push(c('<th class="detail" rowspan="%s"><div class="fht-cell"></div></th>',b.options.columns.length)),a.each(g,function(a,f){var g="",h="",i="",j="",k=c(' class="%s"',f["class"]),l=(b.options.sortOrder||f.order,"px"),m=f.width;if(void 0===f.width||b.options.cardView||"string"==typeof f.width&&-1!==f.width.indexOf("%")&&(l="%"),f.width&&"string"==typeof f.width&&(m=f.width.replace("%","").replace("px","")),h=c("text-align: %s; ",f.halign?f.halign:f.align),i=c("text-align: %s; ",f.align),j=c("vertical-align: %s; ",f.valign),j+=c("width: %s; ",!f.checkbox&&!f.radio||m?m?m+l:void 0:"36px"),"undefined"!=typeof f.fieldIndex){if(b.header.fields[f.fieldIndex]=f.field,b.header.styles[f.fieldIndex]=i+j,b.header.classes[f.fieldIndex]=k,b.header.formatters[f.fieldIndex]=f.formatter,b.header.events[f.fieldIndex]=f.events,b.header.sorters[f.fieldIndex]=f.sorter,b.header.sortNames[f.fieldIndex]=f.sortName,b.header.cellStyles[f.fieldIndex]=f.cellStyle,b.header.searchables[f.fieldIndex]=f.searchable,!f.visible)return;if(b.options.cardView&&!f.cardVisible)return;d[f.field]=f}e.push("<th"+c(' title="%s"',f.titleTooltip),f.checkbox||f.radio?c(' class="bs-checkbox %s"',f["class"]||""):k,c(' style="%s"',h+j),c(' rowspan="%s"',f.rowspan),c(' colspan="%s"',f.colspan),c(' data-field="%s"',f.field),"tabindex='0'",">"),e.push(c('<div class="th-inner %s">',b.options.sortable&&f.sortable?"sortable both":"")),g=f.title,f.checkbox&&(!b.options.singleSelect&&b.options.checkboxHeader&&(g='<input name="btSelectAll" type="checkbox" />'),b.header.stateField=f.field),f.radio&&(g="",b.header.stateField=f.field,b.options.singleSelect=!0),e.push(g),e.push("</div>"),e.push('<div class="fht-cell"></div>'),e.push("</div>"),e.push("</th>")}),e.push("</tr>")}),this.$header.html(e.join("")),this.$header.find("th[data-field]").each(function(){a(this).data(d[a(this).data("field")])}),this.$container.off("click",".th-inner").on("click",".th-inner",function(c){var d=a(this);return b.options.detailView&&d.closest(".bootstrap-table")[0]!==b.$container[0]?!1:void(b.options.sortable&&d.parent().data().sortable&&b.onSort(c))}),this.$header.children().children().off("keypress").on("keypress",function(c){if(b.options.sortable&&a(this).data().sortable){var d=c.keyCode||c.which;13==d&&b.onSort(c)}}),a(window).off("resize.bootstrap-table"),!this.options.showHeader||this.options.cardView?(this.$header.hide(),this.$tableHeader.hide(),this.$tableLoading.css("top",0)):(this.$header.show(),this.$tableHeader.show(),this.$tableLoading.css("top",this.$header.outerHeight()+1),this.getCaret(),a(window).on("resize.bootstrap-table",a.proxy(this.resetWidth,this))),this.$selectAll=this.$header.find('[name="btSelectAll"]'),this.$selectAll.off("click").on("click",function(){var c=a(this).prop("checked");b[c?"checkAll":"uncheckAll"](),b.updateSelected()})},p.prototype.initFooter=function(){!this.options.showFooter||this.options.cardView?this.$tableFooter.hide():this.$tableFooter.show()},p.prototype.initData=function(a,b){this.data="append"===b?this.data.concat(a):"prepend"===b?[].concat(a).concat(this.data):a||this.options.data,this.options.data="append"===b?this.options.data.concat(a):"prepend"===b?[].concat(a).concat(this.options.data):this.data,"server"!==this.options.sidePagination&&this.initSort()},p.prototype.initSort=function(){var b=this,c=this.options.sortName,d="desc"===this.options.sortOrder?-1:1,e=a.inArray(this.options.sortName,this.header.fields);return this.options.customSort!==a.noop?void this.options.customSort.apply(this,[this.options.sortName,this.options.sortOrder]):void(-1!==e&&(this.options.sortStable&&a.each(this.data,function(a,b){b.hasOwnProperty("_position")||(b._position=a)}),this.data.sort(function(f,g){b.header.sortNames[e]&&(c=b.header.sortNames[e]);var i=m(f,c,b.options.escape),j=m(g,c,b.options.escape),k=h(b.header,b.header.sorters[e],[i,j]);return void 0!==k?d*k:((void 0===i||null===i)&&(i=""),(void 0===j||null===j)&&(j=""),b.options.sortStable&&i===j&&(i=f._position,j=g._position),a.isNumeric(i)&&a.isNumeric(j)?(i=parseFloat(i),j=parseFloat(j),j>i?-1*d:d):i===j?0:("string"!=typeof i&&(i=i.toString()),-1===i.localeCompare(j)?-1*d:d))})))},p.prototype.onSort=function(b){var c="keypress"===b.type?a(b.currentTarget):a(b.currentTarget).parent(),d=this.$header.find("th").eq(c.index());return this.$header.add(this.$header_).find("span.order").remove(),this.options.sortName===c.data("field")?this.options.sortOrder="asc"===this.options.sortOrder?"desc":"asc":(this.options.sortName=c.data("field"),this.options.sortOrder="asc"===c.data("order")?"desc":"asc"),this.trigger("sort",this.options.sortName,this.options.sortOrder),c.add(d).data("order",this.options.sortOrder),this.getCaret(),"server"===this.options.sidePagination?void this.initServer(this.options.silentSort):(this.initSort(),void this.initBody())},p.prototype.initToolbar=function(){var b,d,e=this,f=[],g=0,i=0;this.$toolbar.find(".bs-bars").children().length&&a("body").append(a(this.options.toolbar)),this.$toolbar.html(""),("string"==typeof this.options.toolbar||"object"==typeof this.options.toolbar)&&a(c('<div class="bs-bars pull-%s"></div>',this.options.toolbarAlign)).appendTo(this.$toolbar).append(a(this.options.toolbar)),f=[c('<div class="columns columns-%s btn-group pull-%s">',this.options.buttonsAlign,this.options.buttonsAlign)],"string"==typeof this.options.icons&&(this.options.icons=h(null,this.options.icons)),this.options.showPaginationSwitch&&f.push(c('<button class="btn'+c(" btn-%s",this.options.buttonsClass)+c(" btn-%s",this.options.iconSize)+'" type="button" name="paginationSwitch" title="%s">',this.options.formatPaginationSwitch()),c('<i class="%s %s"></i>',this.options.iconsPrefix,this.options.icons.paginationSwitchDown),"</button>"),this.options.showRefresh&&f.push(c('<button class="btn'+c(" btn-%s",this.options.buttonsClass)+c(" btn-%s",this.options.iconSize)+'" type="button" name="refresh" title="%s">',this.options.formatRefresh()),c('<i class="%s %s"></i>',this.options.iconsPrefix,this.options.icons.refresh),"</button>"),this.options.showToggle&&f.push(c('<button class="btn'+c(" btn-%s",this.options.buttonsClass)+c(" btn-%s",this.options.iconSize)+'" type="button" name="toggle" title="%s">',this.options.formatToggle()),c('<i class="%s %s"></i>',this.options.iconsPrefix,this.options.icons.toggle),"</button>"),this.options.showColumns&&(f.push(c('<div class="keep-open btn-group" title="%s">',this.options.formatColumns()),'<button type="button" class="btn'+c(" btn-%s",this.options.buttonsClass)+c(" btn-%s",this.options.iconSize)+' dropdown-toggle" data-toggle="dropdown">',c('<i class="%s %s"></i>',this.options.iconsPrefix,this.options.icons.columns),' <span class="caret"></span>',"</button>",'<ul class="dropdown-menu" role="menu">'),a.each(this.columns,function(a,b){if(!(b.radio||b.checkbox||e.options.cardView&&!b.cardVisible)){var d=b.visible?' checked="checked"':"";b.switchable&&(f.push(c('<li><label><input type="checkbox" data-field="%s" value="%s"%s> %s</label></li>',b.field,a,d,b.title)),i++)}}),f.push("</ul>","</div>")),f.push("</div>"),(this.showToolbar||f.length>2)&&this.$toolbar.append(f.join("")),this.options.showPaginationSwitch&&this.$toolbar.find('button[name="paginationSwitch"]').off("click").on("click",a.proxy(this.togglePagination,this)),this.options.showRefresh&&this.$toolbar.find('button[name="refresh"]').off("click").on("click",a.proxy(this.refresh,this)),this.options.showToggle&&this.$toolbar.find('button[name="toggle"]').off("click").on("click",function(){e.toggleView()}),this.options.showColumns&&(b=this.$toolbar.find(".keep-open"),i<=this.options.minimumCountColumns&&b.find("input").prop("disabled",!0),b.find("li").off("click").on("click",function(a){a.stopImmediatePropagation()}),b.find("input").off("click").on("click",function(){var b=a(this);e.toggleColumn(a(this).val(),b.prop("checked"),!1),e.trigger("column-switch",a(this).data("field"),b.prop("checked"))})),this.options.search&&(f=[],f.push('<div class="pull-'+this.options.searchAlign+' search">',c('<input class="form-control'+c(" input-%s",this.options.iconSize)+'" type="text" placeholder="%s">',this.options.formatSearch()),"</div>"),this.$toolbar.append(f.join("")),d=this.$toolbar.find(".search input"),d.off("keyup drop").on("keyup drop",function(b){e.options.searchOnEnterKey&&13!==b.keyCode||a.inArray(b.keyCode,[37,38,39,40])>-1||(clearTimeout(g),g=setTimeout(function(){e.onSearch(b)},e.options.searchTimeOut))}),n()&&d.off("mouseup").on("mouseup",function(a){clearTimeout(g),g=setTimeout(function(){e.onSearch(a)},e.options.searchTimeOut)}))},p.prototype.onSearch=function(b){var c=a.trim(a(b.currentTarget).val());this.options.trimOnSearch&&a(b.currentTarget).val()!==c&&a(b.currentTarget).val(c),c!==this.searchText&&(this.searchText=c,this.options.searchText=c,this.options.pageNumber=1,this.initSearch(),this.updatePagination(),this.trigger("search",c))},p.prototype.initSearch=function(){var b=this;if("server"!==this.options.sidePagination){if(this.options.customSearch!==a.noop)return void this.options.customSearch.apply(this,[this.searchText]);var c=this.searchText&&(this.options.escape?j(this.searchText):this.searchText).toLowerCase(),d=a.isEmptyObject(this.filterColumns)?null:this.filterColumns;this.data=d?a.grep(this.options.data,function(b){for(var c in d)if(a.isArray(d[c])&&-1===a.inArray(b[c],d[c])||b[c]!==d[c])return!1;return!0}):this.options.data,this.data=c?a.grep(this.data,function(d,f){for(var g=0;g<b.header.fields.length;g++)if(b.header.searchables[g]){var i,j=a.isNumeric(b.header.fields[g])?parseInt(b.header.fields[g],10):b.header.fields[g],k=b.columns[e(b.columns,j)];if("string"==typeof j){i=d;for(var l=j.split("."),m=0;m<l.length;m++)i=i[l[m]];k&&k.searchFormatter&&(i=h(k,b.header.formatters[g],[i,d,f],i))}else i=d[j];if("string"==typeof i||"number"==typeof i)if(b.options.strictSearch){if((i+"").toLowerCase()===c)return!0}else if(-1!==(i+"").toLowerCase().indexOf(c))return!0}return!1}):this.data}},p.prototype.initPagination=function(){if(!this.options.pagination)return void this.$pagination.hide();this.$pagination.show();var b,d,e,f,g,h,i,j,k,l=this,m=[],n=!1,o=this.getData(),p=this.options.pageList;if("server"!==this.options.sidePagination&&(this.options.totalRows=o.length),this.totalPages=0,this.options.totalRows){if(this.options.pageSize===this.options.formatAllRows())this.options.pageSize=this.options.totalRows,n=!0;else if(this.options.pageSize===this.options.totalRows){var q="string"==typeof this.options.pageList?this.options.pageList.replace("[","").replace("]","").replace(/ /g,"").toLowerCase().split(","):this.options.pageList;a.inArray(this.options.formatAllRows().toLowerCase(),q)>-1&&(n=!0)}this.totalPages=~~((this.options.totalRows-1)/this.options.pageSize)+1,this.options.totalPages=this.totalPages}if(this.totalPages>0&&this.options.pageNumber>this.totalPages&&(this.options.pageNumber=this.totalPages),this.pageFrom=(this.options.pageNumber-1)*this.options.pageSize+1,this.pageTo=this.options.pageNumber*this.options.pageSize,this.pageTo>this.options.totalRows&&(this.pageTo=this.options.totalRows),m.push('<div class="pull-'+this.options.paginationDetailHAlign+' pagination-detail">','<span class="pagination-info">',this.options.onlyInfoPagination?this.options.formatDetailPagination(this.options.totalRows):this.options.formatShowingRows(this.pageFrom,this.pageTo,this.options.totalRows),"</span>"),!this.options.onlyInfoPagination){m.push('<span class="page-list">');var r=[c('<span class="btn-group %s">',"top"===this.options.paginationVAlign||"both"===this.options.paginationVAlign?"dropdown":"dropup"),'<button type="button" class="btn'+c(" btn-%s",this.options.buttonsClass)+c(" btn-%s",this.options.iconSize)+' dropdown-toggle" data-toggle="dropdown">','<span class="page-size">',n?this.options.formatAllRows():this.options.pageSize,"</span>",' <span class="caret"></span>',"</button>",'<ul class="dropdown-menu" role="menu">'];if("string"==typeof this.options.pageList){var s=this.options.pageList.replace("[","").replace("]","").replace(/ /g,"").split(",");p=[],a.each(s,function(a,b){p.push(b.toUpperCase()===l.options.formatAllRows().toUpperCase()?l.options.formatAllRows():+b)})}for(a.each(p,function(a,b){if(!l.options.smartDisplay||0===a||p[a-1]<=l.options.totalRows){var d;d=n?b===l.options.formatAllRows()?' class="active"':"":b===l.options.pageSize?' class="active"':"",r.push(c('<li%s><a href="javascript:void(0)">%s</a></li>',d,b))}}),r.push("</ul></span>"),m.push(this.options.formatRecordsPerPage(r.join(""))),m.push("</span>"),m.push("</div>",'<div class="pull-'+this.options.paginationHAlign+' pagination">','<ul class="pagination'+c(" pagination-%s",this.options.iconSize)+'">','<li class="page-pre"><a href="javascript:void(0)">'+this.options.paginationPreText+"</a></li>"),this.totalPages<5?(d=1,e=this.totalPages):(d=this.options.pageNumber-2,e=d+4,1>d&&(d=1,e=5),e>this.totalPages&&(e=this.totalPages,d=e-4)),this.totalPages>=6&&(this.options.pageNumber>=3&&(m.push('<li class="page-first'+(1===this.options.pageNumber?" active":"")+'">','<a href="javascript:void(0)">',1,"</a>","</li>"),d++),this.options.pageNumber>=4&&(4==this.options.pageNumber||6==this.totalPages||7==this.totalPages?d--:m.push('<li class="page-first-separator disabled">','<a href="javascript:void(0)">...</a>',"</li>"),e--)),this.totalPages>=7&&this.options.pageNumber>=this.totalPages-2&&d--,6==this.totalPages?this.options.pageNumber>=this.totalPages-2&&e++:this.totalPages>=7&&(7==this.totalPages||this.options.pageNumber>=this.totalPages-3)&&e++,b=d;e>=b;b++)m.push('<li class="page-number'+(b===this.options.pageNumber?" active":"")+'">','<a href="javascript:void(0)">',b,"</a>","</li>");this.totalPages>=8&&this.options.pageNumber<=this.totalPages-4&&m.push('<li class="page-last-separator disabled">','<a href="javascript:void(0)">...</a>',"</li>"),this.totalPages>=6&&this.options.pageNumber<=this.totalPages-3&&m.push('<li class="page-last'+(this.totalPages===this.options.pageNumber?" active":"")+'">','<a href="javascript:void(0)">',this.totalPages,"</a>","</li>"),m.push('<li class="page-next"><a href="javascript:void(0)">'+this.options.paginationNextText+"</a></li>","</ul>","</div>")}this.$pagination.html(m.join("")),this.options.onlyInfoPagination||(f=this.$pagination.find(".page-list a"),g=this.$pagination.find(".page-first"),h=this.$pagination.find(".page-pre"),i=this.$pagination.find(".page-next"),j=this.$pagination.find(".page-last"),k=this.$pagination.find(".page-number"),this.options.smartDisplay&&(this.totalPages<=1&&this.$pagination.find("div.pagination").hide(),(p.length<2||this.options.totalRows<=p[0])&&this.$pagination.find("span.page-list").hide(),this.$pagination[this.getData().length?"show":"hide"]()),n&&(this.options.pageSize=this.options.formatAllRows()),f.off("click").on("click",a.proxy(this.onPageListChange,this)),g.off("click").on("click",a.proxy(this.onPageFirst,this)),h.off("click").on("click",a.proxy(this.onPagePre,this)),i.off("click").on("click",a.proxy(this.onPageNext,this)),j.off("click").on("click",a.proxy(this.onPageLast,this)),k.off("click").on("click",a.proxy(this.onPageNumber,this)))},p.prototype.updatePagination=function(b){b&&a(b.currentTarget).hasClass("disabled")||(this.options.maintainSelected||this.resetRows(),this.initPagination(),"server"===this.options.sidePagination?this.initServer():this.initBody(),this.trigger("page-change",this.options.pageNumber,this.options.pageSize))},p.prototype.onPageListChange=function(b){var c=a(b.currentTarget);c.parent().addClass("active").siblings().removeClass("active"),this.options.pageSize=c.text().toUpperCase()===this.options.formatAllRows().toUpperCase()?this.options.formatAllRows():+c.text(),this.$toolbar.find(".page-size").text(this.options.pageSize),this.updatePagination(b)},p.prototype.onPageFirst=function(a){this.options.pageNumber=1,this.updatePagination(a)},p.prototype.onPagePre=function(a){this.options.pageNumber-1===0?this.options.pageNumber=this.options.totalPages:this.options.pageNumber--,this.updatePagination(a)},p.prototype.onPageNext=function(a){this.options.pageNumber+1>this.options.totalPages?this.options.pageNumber=1:this.options.pageNumber++,this.updatePagination(a)},p.prototype.onPageLast=function(a){this.options.pageNumber=this.totalPages,this.updatePagination(a)},p.prototype.onPageNumber=function(b){this.options.pageNumber!==+a(b.currentTarget).text()&&(this.options.pageNumber=+a(b.currentTarget).text(),this.updatePagination(b))},p.prototype.initBody=function(b){var f=this,g=[],i=this.getData();this.trigger("pre-body",i),this.$body=this.$el.find(">tbody"),this.$body.length||(this.$body=a("<tbody></tbody>").appendTo(this.$el)),this.options.pagination&&"server"!==this.options.sidePagination||(this.pageFrom=1,this.pageTo=i.length);for(var k=this.pageFrom-1;k<this.pageTo;k++){var l,n=i[k],o={},p=[],q="",r={},s=[];if(o=h(this.options,this.options.rowStyle,[n,k],o),o&&o.css)for(l in o.css)p.push(l+": "+o.css[l]);if(r=h(this.options,this.options.rowAttributes,[n,k],r))for(l in r)s.push(c('%s="%s"',l,j(r[l])));n._data&&!a.isEmptyObject(n._data)&&a.each(n._data,function(a,b){"index"!==a&&(q+=c(' data-%s="%s"',a,b))}),g.push("<tr",c(" %s",s.join(" ")),c(' id="%s"',a.isArray(n)?void 0:n._id),c(' class="%s"',o.classes||(a.isArray(n)?void 0:n._class)),c(' data-index="%s"',k),c(' data-uniqueid="%s"',n[this.options.uniqueId]),c("%s",q),">"),this.options.cardView&&g.push(c('<td colspan="%s"><div class="card-views">',this.header.fields.length)),!this.options.cardView&&this.options.detailView&&g.push("<td>",'<a class="detail-icon" href="javascript:">',c('<i class="%s %s"></i>',this.options.iconsPrefix,this.options.icons.detailOpen),"</a>","</td>"),a.each(this.header.fields,function(b,e){var i="",j=m(n,e,f.options.escape),l="",q={},r="",s=f.header.classes[b],t="",u="",v="",w="",x=f.columns[b];if(!(f.fromHtml&&"undefined"==typeof j||!x.visible||f.options.cardView&&!x.cardVisible)){if(o=c('style="%s"',p.concat(f.header.styles[b]).join("; ")),n["_"+e+"_id"]&&(r=c(' id="%s"',n["_"+e+"_id"])),n["_"+e+"_class"]&&(s=c(' class="%s"',n["_"+e+"_class"])),n["_"+e+"_rowspan"]&&(u=c(' rowspan="%s"',n["_"+e+"_rowspan"])),n["_"+e+"_colspan"]&&(v=c(' colspan="%s"',n["_"+e+"_colspan"])),n["_"+e+"_title"]&&(w=c(' title="%s"',n["_"+e+"_title"])),q=h(f.header,f.header.cellStyles[b],[j,n,k,e],q),q.classes&&(s=c(' class="%s"',q.classes)),q.css){var y=[];for(var z in q.css)y.push(z+": "+q.css[z]);o=c('style="%s"',y.concat(f.header.styles[b]).join("; "))}j=h(x,f.header.formatters[b],[j,n,k],j),n["_"+e+"_data"]&&!a.isEmptyObject(n["_"+e+"_data"])&&a.each(n["_"+e+"_data"],function(a,b){"index"!==a&&(t+=c(' data-%s="%s"',a,b))}),x.checkbox||x.radio?(l=x.checkbox?"checkbox":l,l=x.radio?"radio":l,i=[c(f.options.cardView?'<div class="card-view %s">':'<td class="bs-checkbox %s">',x["class"]||""),"<input"+c(' data-index="%s"',k)+c(' name="%s"',f.options.selectItemName)+c(' type="%s"',l)+c(' value="%s"',n[f.options.idField])+c(' checked="%s"',j===!0||j&&j.checked?"checked":void 0)+c(' disabled="%s"',!x.checkboxEnabled||j&&j.disabled?"disabled":void 0)+" />",f.header.formatters[b]&&"string"==typeof j?j:"",f.options.cardView?"</div>":"</td>"].join(""),n[f.header.stateField]=j===!0||j&&j.checked):(j="undefined"==typeof j||null===j?f.options.undefinedText:j,i=f.options.cardView?['<div class="card-view">',f.options.showHeader?c('<span class="title" %s>%s</span>',o,d(f.columns,"field","title",e)):"",c('<span class="value">%s</span>',j),"</div>"].join(""):[c("<td%s %s %s %s %s %s %s>",r,s,o,t,u,v,w),j,"</td>"].join(""),f.options.cardView&&f.options.smartDisplay&&""===j&&(i='<div class="card-view"></div>')),g.push(i)}}),this.options.cardView&&g.push("</div></td>"),g.push("</tr>")}g.length||g.push('<tr class="no-records-found">',c('<td colspan="%s">%s</td>',this.$header.find("th").length,this.options.formatNoMatches()),"</tr>"),this.$body.html(g.join("")),b||this.scrollTo(0),this.$body.find("> tr[data-index] > td").off("click dblclick").on("click dblclick",function(b){var d=a(this),g=d.parent(),h=f.data[g.data("index")],i=d[0].cellIndex,j=f.getVisibleFields(),k=j[f.options.detailView&&!f.options.cardView?i-1:i],l=f.columns[e(f.columns,k)],n=m(h,k,f.options.escape);if(!d.find(".detail-icon").length&&(f.trigger("click"===b.type?"click-cell":"dbl-click-cell",k,n,h,d),f.trigger("click"===b.type?"click-row":"dbl-click-row",h,g,k),
+"click"===b.type&&f.options.clickToSelect&&l.clickToSelect)){var o=g.find(c('[name="%s"]',f.options.selectItemName));o.length&&o[0].click()}}),this.$body.find("> tr[data-index] > td > .detail-icon").off("click").on("click",function(){var b=a(this),d=b.parent().parent(),e=d.data("index"),g=i[e];if(d.next().is("tr.detail-view"))b.find("i").attr("class",c("%s %s",f.options.iconsPrefix,f.options.icons.detailOpen)),d.next().remove(),f.trigger("collapse-row",e,g);else{b.find("i").attr("class",c("%s %s",f.options.iconsPrefix,f.options.icons.detailClose)),d.after(c('<tr class="detail-view"><td colspan="%s"></td></tr>',d.find("td").length));var j=d.next().find("td"),k=h(f.options,f.options.detailFormatter,[e,g,j],"");1===j.length&&j.append(k),f.trigger("expand-row",e,g,j)}f.resetView()}),this.$selectItem=this.$body.find(c('[name="%s"]',this.options.selectItemName)),this.$selectItem.off("click").on("click",function(b){b.stopImmediatePropagation();var c=a(this),d=c.prop("checked"),e=f.data[c.data("index")];f.options.maintainSelected&&a(this).is(":radio")&&a.each(f.options.data,function(a,b){b[f.header.stateField]=!1}),e[f.header.stateField]=d,f.options.singleSelect&&(f.$selectItem.not(this).each(function(){f.data[a(this).data("index")][f.header.stateField]=!1}),f.$selectItem.filter(":checked").not(this).prop("checked",!1)),f.updateSelected(),f.trigger(d?"check":"uncheck",e,c)}),a.each(this.header.events,function(b,c){if(c){"string"==typeof c&&(c=h(null,c));var d=f.header.fields[b],e=a.inArray(d,f.getVisibleFields());f.options.detailView&&!f.options.cardView&&(e+=1);for(var g in c)f.$body.find(">tr:not(.no-records-found)").each(function(){var b=a(this),h=b.find(f.options.cardView?".card-view":"td").eq(e),i=g.indexOf(" "),j=g.substring(0,i),k=g.substring(i+1),l=c[g];h.find(k).off(j).on(j,function(a){var c=b.data("index"),e=f.data[c],g=e[d];l.apply(this,[a,g,e,c])})})}}),this.updateSelected(),this.resetView(),this.trigger("post-body",i)},p.prototype.initServer=function(b,c,d){var e,f=this,g={},i={searchText:this.searchText,sortName:this.options.sortName,sortOrder:this.options.sortOrder};this.options.pagination&&(i.pageSize=this.options.pageSize===this.options.formatAllRows()?this.options.totalRows:this.options.pageSize,i.pageNumber=this.options.pageNumber),(d||this.options.url||this.options.ajax)&&("limit"===this.options.queryParamsType&&(i={search:i.searchText,sort:i.sortName,order:i.sortOrder},this.options.pagination&&(i.offset=this.options.pageSize===this.options.formatAllRows()?0:this.options.pageSize*(this.options.pageNumber-1),i.limit=this.options.pageSize===this.options.formatAllRows()?this.options.totalRows:this.options.pageSize)),a.isEmptyObject(this.filterColumnsPartial)||(i.filter=JSON.stringify(this.filterColumnsPartial,null)),g=h(this.options,this.options.queryParams,[i],g),a.extend(g,c||{}),g!==!1&&(b||this.$tableLoading.show(),e=a.extend({},h(null,this.options.ajaxOptions),{type:this.options.method,url:d||this.options.url,data:"application/json"===this.options.contentType&&"post"===this.options.method?JSON.stringify(g):g,cache:this.options.cache,contentType:this.options.contentType,dataType:this.options.dataType,success:function(a){a=h(f.options,f.options.responseHandler,[a],a),f.load(a),f.trigger("load-success",a),b||f.$tableLoading.hide()},error:function(a){f.trigger("load-error",a.status,a),b||f.$tableLoading.hide()}}),this.options.ajax?h(this,this.options.ajax,[e],null):(this._xhr&&4!==this._xhr.readyState&&this._xhr.abort(),this._xhr=a.ajax(e))))},p.prototype.initSearchText=function(){if(this.options.search&&""!==this.options.searchText){var a=this.$toolbar.find(".search input");a.val(this.options.searchText),this.onSearch({currentTarget:a})}},p.prototype.getCaret=function(){var b=this;a.each(this.$header.find("th"),function(c,d){a(d).find(".sortable").removeClass("desc asc").addClass(a(d).data("field")===b.options.sortName?b.options.sortOrder:"both")})},p.prototype.updateSelected=function(){var b=this.$selectItem.filter(":enabled").length&&this.$selectItem.filter(":enabled").length===this.$selectItem.filter(":enabled").filter(":checked").length;this.$selectAll.add(this.$selectAll_).prop("checked",b),this.$selectItem.each(function(){a(this).closest("tr")[a(this).prop("checked")?"addClass":"removeClass"]("selected")})},p.prototype.updateRows=function(){var b=this;this.$selectItem.each(function(){b.data[a(this).data("index")][b.header.stateField]=a(this).prop("checked")})},p.prototype.resetRows=function(){var b=this;a.each(this.data,function(a,c){b.$selectAll.prop("checked",!1),b.$selectItem.prop("checked",!1),b.header.stateField&&(c[b.header.stateField]=!1)})},p.prototype.trigger=function(b){var c=Array.prototype.slice.call(arguments,1);b+=".bs.table",this.options[p.EVENTS[b]].apply(this.options,c),this.$el.trigger(a.Event(b),c),this.options.onAll(b,c),this.$el.trigger(a.Event("all.bs.table"),[b,c])},p.prototype.resetHeader=function(){clearTimeout(this.timeoutId_),this.timeoutId_=setTimeout(a.proxy(this.fitHeader,this),this.$el.is(":hidden")?100:0)},p.prototype.fitHeader=function(){var b,d,e,f,h=this;if(h.$el.is(":hidden"))return void(h.timeoutId_=setTimeout(a.proxy(h.fitHeader,h),100));if(b=this.$tableBody.get(0),d=b.scrollWidth>b.clientWidth&&b.scrollHeight>b.clientHeight+this.$header.outerHeight()?g():0,this.$el.css("margin-top",-this.$header.outerHeight()),e=a(":focus"),e.length>0){var i=e.parents("th");if(i.length>0){var j=i.attr("data-field");if(void 0!==j){var k=this.$header.find("[data-field='"+j+"']");k.length>0&&k.find(":input").addClass("focus-temp")}}}this.$header_=this.$header.clone(!0,!0),this.$selectAll_=this.$header_.find('[name="btSelectAll"]'),this.$tableHeader.css({"margin-right":d}).find("table").css("width",this.$el.outerWidth()).html("").attr("class",this.$el.attr("class")).append(this.$header_),f=a(".focus-temp:visible:eq(0)"),f.length>0&&(f.focus(),this.$header.find(".focus-temp").removeClass("focus-temp")),this.$header.find("th[data-field]").each(function(){h.$header_.find(c('th[data-field="%s"]',a(this).data("field"))).data(a(this).data())});var l=this.getVisibleFields(),m=this.$header_.find("th");this.$body.find(">tr:first-child:not(.no-records-found) > *").each(function(b){var d=a(this),e=b;h.options.detailView&&!h.options.cardView&&(0===b&&h.$header_.find("th.detail").find(".fht-cell").width(d.innerWidth()),e=b-1);var f=h.$header_.find(c('th[data-field="%s"]',l[e]));f.length>1&&(f=a(m[d[0].cellIndex])),f.find(".fht-cell").width(d.innerWidth())}),this.$tableBody.off("scroll").on("scroll",function(){h.$tableHeader.scrollLeft(a(this).scrollLeft()),h.options.showFooter&&!h.options.cardView&&h.$tableFooter.scrollLeft(a(this).scrollLeft())}),h.trigger("post-header")},p.prototype.resetFooter=function(){var b=this,d=b.getData(),e=[];this.options.showFooter&&!this.options.cardView&&(!this.options.cardView&&this.options.detailView&&e.push('<td><div class="th-inner">&nbsp;</div><div class="fht-cell"></div></td>'),a.each(this.columns,function(a,f){var g,i="",j="",k=[],l={},m=c(' class="%s"',f["class"]);if(f.visible&&(!b.options.cardView||f.cardVisible)){if(i=c("text-align: %s; ",f.falign?f.falign:f.align),j=c("vertical-align: %s; ",f.valign),l=h(null,b.options.footerStyle),l&&l.css)for(g in l.css)k.push(g+": "+l.css[g]);e.push("<td",m,c(' style="%s"',i+j+k.concat().join("; ")),">"),e.push('<div class="th-inner">'),e.push(h(f,f.footerFormatter,[d],"&nbsp;")||"&nbsp;"),e.push("</div>"),e.push('<div class="fht-cell"></div>'),e.push("</div>"),e.push("</td>")}}),this.$tableFooter.find("tr").html(e.join("")),this.$tableFooter.show(),clearTimeout(this.timeoutFooter_),this.timeoutFooter_=setTimeout(a.proxy(this.fitFooter,this),this.$el.is(":hidden")?100:0))},p.prototype.fitFooter=function(){var b,c,d;return clearTimeout(this.timeoutFooter_),this.$el.is(":hidden")?void(this.timeoutFooter_=setTimeout(a.proxy(this.fitFooter,this),100)):(c=this.$el.css("width"),d=c>this.$tableBody.width()?g():0,this.$tableFooter.css({"margin-right":d}).find("table").css("width",c).attr("class",this.$el.attr("class")),b=this.$tableFooter.find("td"),void this.$body.find(">tr:first-child:not(.no-records-found) > *").each(function(c){var d=a(this);b.eq(c).find(".fht-cell").width(d.innerWidth())}))},p.prototype.toggleColumn=function(a,b,d){if(-1!==a&&(this.columns[a].visible=b,this.initHeader(),this.initSearch(),this.initPagination(),this.initBody(),this.options.showColumns)){var e=this.$toolbar.find(".keep-open input").prop("disabled",!1);d&&e.filter(c('[value="%s"]',a)).prop("checked",b),e.filter(":checked").length<=this.options.minimumCountColumns&&e.filter(":checked").prop("disabled",!0)}},p.prototype.toggleRow=function(a,b,d){-1!==a&&this.$body.find("undefined"!=typeof a?c('tr[data-index="%s"]',a):c('tr[data-uniqueid="%s"]',b))[d?"show":"hide"]()},p.prototype.getVisibleFields=function(){var b=this,c=[];return a.each(this.header.fields,function(a,d){var f=b.columns[e(b.columns,d)];f.visible&&c.push(d)}),c},p.prototype.resetView=function(a){var b=0;if(a&&a.height&&(this.options.height=a.height),this.$selectAll.prop("checked",this.$selectItem.length>0&&this.$selectItem.length===this.$selectItem.filter(":checked").length),this.options.height){var c=k(this.$toolbar),d=k(this.$pagination),e=this.options.height-c-d;this.$tableContainer.css("height",e+"px")}return this.options.cardView?(this.$el.css("margin-top","0"),this.$tableContainer.css("padding-bottom","0"),void this.$tableFooter.hide()):(this.options.showHeader&&this.options.height?(this.$tableHeader.show(),this.resetHeader(),b+=this.$header.outerHeight()):(this.$tableHeader.hide(),this.trigger("post-header")),this.options.showFooter&&(this.resetFooter(),this.options.height&&(b+=this.$tableFooter.outerHeight()+1)),this.getCaret(),this.$tableContainer.css("padding-bottom",b+"px"),void this.trigger("reset-view"))},p.prototype.getData=function(b){return!this.searchText&&a.isEmptyObject(this.filterColumns)&&a.isEmptyObject(this.filterColumnsPartial)?b?this.options.data.slice(this.pageFrom-1,this.pageTo):this.options.data:b?this.data.slice(this.pageFrom-1,this.pageTo):this.data},p.prototype.load=function(b){var c=!1;"server"===this.options.sidePagination?(this.options.totalRows=b.total,c=b.fixedScroll,b=b[this.options.dataField]):a.isArray(b)||(c=b.fixedScroll,b=b.data),this.initData(b),this.initSearch(),this.initPagination(),this.initBody(c)},p.prototype.append=function(a){this.initData(a,"append"),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0)},p.prototype.prepend=function(a){this.initData(a,"prepend"),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0)},p.prototype.remove=function(b){var c,d,e=this.options.data.length;if(b.hasOwnProperty("field")&&b.hasOwnProperty("values")){for(c=e-1;c>=0;c--)d=this.options.data[c],d.hasOwnProperty(b.field)&&-1!==a.inArray(d[b.field],b.values)&&this.options.data.splice(c,1);e!==this.options.data.length&&(this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0))}},p.prototype.removeAll=function(){this.options.data.length>0&&(this.options.data.splice(0,this.options.data.length),this.initSearch(),this.initPagination(),this.initBody(!0))},p.prototype.getRowByUniqueId=function(a){var b,c,d,e=this.options.uniqueId,f=this.options.data.length,g=null;for(b=f-1;b>=0;b--){if(c=this.options.data[b],c.hasOwnProperty(e))d=c[e];else{if(!c._data.hasOwnProperty(e))continue;d=c._data[e]}if("string"==typeof d?a=a.toString():"number"==typeof d&&(Number(d)===d&&d%1===0?a=parseInt(a):d===Number(d)&&0!==d&&(a=parseFloat(a))),d===a){g=c;break}}return g},p.prototype.removeByUniqueId=function(a){var b=this.options.data.length,c=this.getRowByUniqueId(a);c&&this.options.data.splice(this.options.data.indexOf(c),1),b!==this.options.data.length&&(this.initSearch(),this.initPagination(),this.initBody(!0))},p.prototype.updateByUniqueId=function(b){var c=this,d=a.isArray(b)?b:[b];a.each(d,function(b,d){var e;d.hasOwnProperty("id")&&d.hasOwnProperty("row")&&(e=a.inArray(c.getRowByUniqueId(d.id),c.options.data),-1!==e&&a.extend(c.options.data[e],d.row))}),this.initSearch(),this.initSort(),this.initBody(!0)},p.prototype.insertRow=function(a){a.hasOwnProperty("index")&&a.hasOwnProperty("row")&&(this.data.splice(a.index,0,a.row),this.initSearch(),this.initPagination(),this.initSort(),this.initBody(!0))},p.prototype.updateRow=function(b){var c=this,d=a.isArray(b)?b:[b];a.each(d,function(b,d){d.hasOwnProperty("index")&&d.hasOwnProperty("row")&&a.extend(c.options.data[d.index],d.row)}),this.initSearch(),this.initSort(),this.initBody(!0)},p.prototype.showRow=function(a){(a.hasOwnProperty("index")||a.hasOwnProperty("uniqueId"))&&this.toggleRow(a.index,a.uniqueId,!0)},p.prototype.hideRow=function(a){(a.hasOwnProperty("index")||a.hasOwnProperty("uniqueId"))&&this.toggleRow(a.index,a.uniqueId,!1)},p.prototype.getRowsHidden=function(b){var c=a(this.$body[0]).children().filter(":hidden"),d=0;if(b)for(;d<c.length;d++)a(c[d]).show();return c},p.prototype.mergeCells=function(b){var c,d,e,f=b.index,g=a.inArray(b.field,this.getVisibleFields()),h=b.rowspan||1,i=b.colspan||1,j=this.$body.find(">tr");if(this.options.detailView&&!this.options.cardView&&(g+=1),e=j.eq(f).find(">td").eq(g),!(0>f||0>g||f>=this.data.length)){for(c=f;f+h>c;c++)for(d=g;g+i>d;d++)j.eq(c).find(">td").eq(d).hide();e.attr("rowspan",h).attr("colspan",i).show()}},p.prototype.updateCell=function(a){a.hasOwnProperty("index")&&a.hasOwnProperty("field")&&a.hasOwnProperty("value")&&(this.data[a.index][a.field]=a.value,a.reinit!==!1&&(this.initSort(),this.initBody(!0)))},p.prototype.getOptions=function(){return this.options},p.prototype.getSelections=function(){var b=this;return a.grep(this.options.data,function(a){return a[b.header.stateField]})},p.prototype.getAllSelections=function(){var b=this;return a.grep(this.options.data,function(a){return a[b.header.stateField]})},p.prototype.checkAll=function(){this.checkAll_(!0)},p.prototype.uncheckAll=function(){this.checkAll_(!1)},p.prototype.checkInvert=function(){var b=this,c=b.$selectItem.filter(":enabled"),d=c.filter(":checked");c.each(function(){a(this).prop("checked",!a(this).prop("checked"))}),b.updateRows(),b.updateSelected(),b.trigger("uncheck-some",d),d=b.getSelections(),b.trigger("check-some",d)},p.prototype.checkAll_=function(a){var b;a||(b=this.getSelections()),this.$selectAll.add(this.$selectAll_).prop("checked",a),this.$selectItem.filter(":enabled").prop("checked",a),this.updateRows(),a&&(b=this.getSelections()),this.trigger(a?"check-all":"uncheck-all",b)},p.prototype.check=function(a){this.check_(!0,a)},p.prototype.uncheck=function(a){this.check_(!1,a)},p.prototype.check_=function(a,b){var d=this.$selectItem.filter(c('[data-index="%s"]',b)).prop("checked",a);this.data[b][this.header.stateField]=a,this.updateSelected(),this.trigger(a?"check":"uncheck",this.data[b],d)},p.prototype.checkBy=function(a){this.checkBy_(!0,a)},p.prototype.uncheckBy=function(a){this.checkBy_(!1,a)},p.prototype.checkBy_=function(b,d){if(d.hasOwnProperty("field")&&d.hasOwnProperty("values")){var e=this,f=[];a.each(this.options.data,function(g,h){if(!h.hasOwnProperty(d.field))return!1;if(-1!==a.inArray(h[d.field],d.values)){var i=e.$selectItem.filter(":enabled").filter(c('[data-index="%s"]',g)).prop("checked",b);h[e.header.stateField]=b,f.push(h),e.trigger(b?"check":"uncheck",h,i)}}),this.updateSelected(),this.trigger(b?"check-some":"uncheck-some",f)}},p.prototype.destroy=function(){this.$el.insertBefore(this.$container),a(this.options.toolbar).insertBefore(this.$el),this.$container.next().remove(),this.$container.remove(),this.$el.html(this.$el_.html()).css("margin-top","0").attr("class",this.$el_.attr("class")||"")},p.prototype.showLoading=function(){this.$tableLoading.show()},p.prototype.hideLoading=function(){this.$tableLoading.hide()},p.prototype.togglePagination=function(){this.options.pagination=!this.options.pagination;var a=this.$toolbar.find('button[name="paginationSwitch"] i');this.options.pagination?a.attr("class",this.options.iconsPrefix+" "+this.options.icons.paginationSwitchDown):a.attr("class",this.options.iconsPrefix+" "+this.options.icons.paginationSwitchUp),this.updatePagination()},p.prototype.refresh=function(a){a&&a.url&&(this.options.pageNumber=1),this.initServer(a&&a.silent,a&&a.query,a&&a.url),this.trigger("refresh",a)},p.prototype.resetWidth=function(){this.options.showHeader&&this.options.height&&this.fitHeader(),this.options.showFooter&&this.fitFooter()},p.prototype.showColumn=function(a){this.toggleColumn(e(this.columns,a),!0,!0)},p.prototype.hideColumn=function(a){this.toggleColumn(e(this.columns,a),!1,!0)},p.prototype.getHiddenColumns=function(){return a.grep(this.columns,function(a){return!a.visible})},p.prototype.getVisibleColumns=function(){return a.grep(this.columns,function(a){return a.visible})},p.prototype.toggleAllColumns=function(b){if(a.each(this.columns,function(a){this.columns[a].visible=b}),this.initHeader(),this.initSearch(),this.initPagination(),this.initBody(),this.options.showColumns){var c=this.$toolbar.find(".keep-open input").prop("disabled",!1);c.filter(":checked").length<=this.options.minimumCountColumns&&c.filter(":checked").prop("disabled",!0)}},p.prototype.showAllColumns=function(){this.toggleAllColumns(!0)},p.prototype.hideAllColumns=function(){this.toggleAllColumns(!1)},p.prototype.filterBy=function(b){this.filterColumns=a.isEmptyObject(b)?{}:b,this.options.pageNumber=1,this.initSearch(),this.updatePagination()},p.prototype.scrollTo=function(a){return"string"==typeof a&&(a="bottom"===a?this.$tableBody[0].scrollHeight:0),"number"==typeof a&&this.$tableBody.scrollTop(a),"undefined"==typeof a?this.$tableBody.scrollTop():void 0},p.prototype.getScrollPosition=function(){return this.scrollTo()},p.prototype.selectPage=function(a){a>0&&a<=this.options.totalPages&&(this.options.pageNumber=a,this.updatePagination())},p.prototype.prevPage=function(){this.options.pageNumber>1&&(this.options.pageNumber--,this.updatePagination())},p.prototype.nextPage=function(){this.options.pageNumber<this.options.totalPages&&(this.options.pageNumber++,this.updatePagination())},p.prototype.toggleView=function(){this.options.cardView=!this.options.cardView,this.initHeader(),this.initBody(),this.trigger("toggle",this.options.cardView)},p.prototype.refreshOptions=function(b){i(this.options,b,!0)||(this.options=a.extend(this.options,b),this.trigger("refresh-options",this.options),this.destroy(),this.init())},p.prototype.resetSearch=function(a){var b=this.$toolbar.find(".search input");b.val(a||""),this.onSearch({currentTarget:b})},p.prototype.expandRow_=function(a,b){var d=this.$body.find(c('> tr[data-index="%s"]',b));d.next().is("tr.detail-view")===(a?!1:!0)&&d.find("> td > .detail-icon").click()},p.prototype.expandRow=function(a){this.expandRow_(!0,a)},p.prototype.collapseRow=function(a){this.expandRow_(!1,a)},p.prototype.expandAllRows=function(b){if(b){var d=this.$body.find(c('> tr[data-index="%s"]',0)),e=this,f=null,g=!1,h=-1;if(d.next().is("tr.detail-view")?d.next().next().is("tr.detail-view")||(d.next().find(".detail-icon").click(),g=!0):(d.find("> td > .detail-icon").click(),g=!0),g)try{h=setInterval(function(){f=e.$body.find("tr.detail-view").last().find(".detail-icon"),f.length>0?f.click():clearInterval(h)},1)}catch(i){clearInterval(h)}}else for(var j=this.$body.children(),k=0;k<j.length;k++)this.expandRow_(!0,a(j[k]).data("index"))},p.prototype.collapseAllRows=function(b){if(b)this.expandRow_(!1,0);else for(var c=this.$body.children(),d=0;d<c.length;d++)this.expandRow_(!1,a(c[d]).data("index"))},p.prototype.updateFormatText=function(a,b){this.options[c("format%s",a)]&&("string"==typeof b?this.options[c("format%s",a)]=function(){return b}:"function"==typeof b&&(this.options[c("format%s",a)]=b)),this.initToolbar(),this.initPagination(),this.initBody()};var q=["getOptions","getSelections","getAllSelections","getData","load","append","prepend","remove","removeAll","insertRow","updateRow","updateCell","updateByUniqueId","removeByUniqueId","getRowByUniqueId","showRow","hideRow","getRowsHidden","mergeCells","checkAll","uncheckAll","checkInvert","check","uncheck","checkBy","uncheckBy","refresh","resetView","resetWidth","destroy","showLoading","hideLoading","showColumn","hideColumn","getHiddenColumns","getVisibleColumns","showAllColumns","hideAllColumns","filterBy","scrollTo","getScrollPosition","selectPage","prevPage","nextPage","togglePagination","toggleView","refreshOptions","resetSearch","expandRow","collapseRow","expandAllRows","collapseAllRows","updateFormatText"];a.fn.bootstrapTable=function(b){var c,d=Array.prototype.slice.call(arguments,1);return this.each(function(){var e=a(this),f=e.data("bootstrap.table"),g=a.extend({},p.DEFAULTS,e.data(),"object"==typeof b&&b);if("string"==typeof b){if(a.inArray(b,q)<0)throw new Error("Unknown method: "+b);if(!f)return;c=f[b].apply(f,d),"destroy"===b&&e.removeData("bootstrap.table")}f||e.data("bootstrap.table",f=new p(this,g))}),"undefined"==typeof c?this:c},a.fn.bootstrapTable.Constructor=p,a.fn.bootstrapTable.defaults=p.DEFAULTS,a.fn.bootstrapTable.columnDefaults=p.COLUMN_DEFAULTS,a.fn.bootstrapTable.locales=p.LOCALES,a.fn.bootstrapTable.methods=q,a.fn.bootstrapTable.utils={sprintf:c,getFieldIndex:e,compareObjects:i,calculateObjectValue:h,getItemField:m,objectKeys:o,isIEBrowser:n},a(function(){a('[data-toggle="table"]').bootstrapTable()})}(jQuery);
diff --git a/web/gui/lib/bootstrap-table-export-1.11.0.min.js b/web/gui/lib/bootstrap-table-export-1.11.0.min.js
new file mode 100644
index 0000000..afa2d02
--- /dev/null
+++ b/web/gui/lib/bootstrap-table-export-1.11.0.min.js
@@ -0,0 +1,8 @@
+/*
+* bootstrap-table - v1.11.0 - 2016-07-02
+* https://github.com/wenzhixin/bootstrap-table
+* Copyright (c) 2016 zhixin wen
+* Licensed MIT License
+* SPDX-License-Identifier: MIT
+*/
+!function(a){"use strict";var b=a.fn.bootstrapTable.utils.sprintf,c={json:"JSON",xml:"XML",png:"PNG",csv:"CSV",txt:"TXT",sql:"SQL",doc:"MS-Word",excel:"MS-Excel",powerpoint:"MS-Powerpoint",pdf:"PDF"};a.extend(a.fn.bootstrapTable.defaults,{showExport:!1,exportDataType:"basic",exportTypes:["json","xml","csv","txt","sql","excel"],exportOptions:{}}),a.extend(a.fn.bootstrapTable.defaults.icons,{"export":"glyphicon-export icon-share"}),a.extend(a.fn.bootstrapTable.locales,{formatExport:function(){return"Export data"}}),a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales);var d=a.fn.bootstrapTable.Constructor,e=d.prototype.initToolbar;d.prototype.initToolbar=function(){if(this.showToolbar=this.options.showExport,e.apply(this,Array.prototype.slice.apply(arguments)),this.options.showExport){var d=this,f=this.$toolbar.find(">.btn-group"),g=f.find("div.export");if(!g.length){g=a(['<div class="export btn-group">','<button class="btn'+b(" btn-%s",this.options.buttonsClass)+b(" btn-%s",this.options.iconSize)+' dropdown-toggle" title="'+this.options.formatExport()+'" data-toggle="dropdown" type="button">',b('<i class="%s %s"></i> ',this.options.iconsPrefix,this.options.icons["export"]),'<span class="caret"></span>',"</button>",'<ul class="dropdown-menu" role="menu">',"</ul>","</div>"].join("")).appendTo(f);var h=g.find(".dropdown-menu"),i=this.options.exportTypes;if("string"==typeof this.options.exportTypes){var j=this.options.exportTypes.slice(1,-1).replace(/ /g,"").split(",");i=[],a.each(j,function(a,b){i.push(b.slice(1,-1))})}a.each(i,function(a,b){c.hasOwnProperty(b)&&h.append(['<li data-type="'+b+'">','<a href="javascript:void(0)">',c[b],"</a>","</li>"].join(""))}),h.find("li").click(function(){var b=a(this).data("type"),c=function(){d.$el.tableExport(a.extend({},d.options.exportOptions,{type:b,escape:!1}))};if("all"===d.options.exportDataType&&d.options.pagination)d.$el.one("server"===d.options.sidePagination?"post-body.bs.table":"page-change.bs.table",function(){c(),d.togglePagination()}),d.togglePagination();else if("selected"===d.options.exportDataType){var e=d.getData(),f=d.getAllSelections();d.load(f),c(),d.load(e)}else c()})}}}}(jQuery);
diff --git a/web/gui/lib/bootstrap-toggle-2.2.2.min.js b/web/gui/lib/bootstrap-toggle-2.2.2.min.js
new file mode 100644
index 0000000..a11e156
--- /dev/null
+++ b/web/gui/lib/bootstrap-toggle-2.2.2.min.js
@@ -0,0 +1,10 @@
+/*! ========================================================================
+ * Bootstrap Toggle: bootstrap-toggle.js v2.2.0
+ * http://www.bootstraptoggle.com
+ * ========================================================================
+ * Copyright 2014 Min Hur, The New York Times Company
+ * Licensed under MIT
+ * SPDX-License-Identifier: MIT
+ * ======================================================================== */
++function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.toggle"),f="object"==typeof b&&b;e||d.data("bs.toggle",e=new c(this,f)),"string"==typeof b&&e[b]&&e[b]()})}var c=function(b,c){this.$element=a(b),this.options=a.extend({},this.defaults(),c),this.render()};c.VERSION="2.2.0",c.DEFAULTS={on:"On",off:"Off",onstyle:"primary",offstyle:"default",size:"normal",style:"",width:null,height:null},c.prototype.defaults=function(){return{on:this.$element.attr("data-on")||c.DEFAULTS.on,off:this.$element.attr("data-off")||c.DEFAULTS.off,onstyle:this.$element.attr("data-onstyle")||c.DEFAULTS.onstyle,offstyle:this.$element.attr("data-offstyle")||c.DEFAULTS.offstyle,size:this.$element.attr("data-size")||c.DEFAULTS.size,style:this.$element.attr("data-style")||c.DEFAULTS.style,width:this.$element.attr("data-width")||c.DEFAULTS.width,height:this.$element.attr("data-height")||c.DEFAULTS.height}},c.prototype.render=function(){this._onstyle="btn-"+this.options.onstyle,this._offstyle="btn-"+this.options.offstyle;var b="large"===this.options.size?"btn-lg":"small"===this.options.size?"btn-sm":"mini"===this.options.size?"btn-xs":"",c=a('<label class="btn">').html(this.options.on).addClass(this._onstyle+" "+b),d=a('<label class="btn">').html(this.options.off).addClass(this._offstyle+" "+b+" active"),e=a('<span class="toggle-handle btn btn-default">').addClass(b),f=a('<div class="toggle-group">').append(c,d,e),g=a('<div class="toggle btn" data-toggle="toggle">').addClass(this.$element.prop("checked")?this._onstyle:this._offstyle+" off").addClass(b).addClass(this.options.style);this.$element.wrap(g),a.extend(this,{$toggle:this.$element.parent(),$toggleOn:c,$toggleOff:d,$toggleGroup:f}),this.$toggle.append(f);var h=this.options.width||Math.max(c.outerWidth(),d.outerWidth())+e.outerWidth()/2,i=this.options.height||Math.max(c.outerHeight(),d.outerHeight());c.addClass("toggle-on"),d.addClass("toggle-off"),this.$toggle.css({width:h,height:i}),this.options.height&&(c.css("line-height",c.height()+"px"),d.css("line-height",d.height()+"px")),this.update(!0),this.trigger(!0)},c.prototype.toggle=function(){this.$element.prop("checked")?this.off():this.on()},c.prototype.on=function(a){return this.$element.prop("disabled")?!1:(this.$toggle.removeClass(this._offstyle+" off").addClass(this._onstyle),this.$element.prop("checked",!0),void(a||this.trigger()))},c.prototype.off=function(a){return this.$element.prop("disabled")?!1:(this.$toggle.removeClass(this._onstyle).addClass(this._offstyle+" off"),this.$element.prop("checked",!1),void(a||this.trigger()))},c.prototype.enable=function(){this.$toggle.removeAttr("disabled"),this.$element.prop("disabled",!1)},c.prototype.disable=function(){this.$toggle.attr("disabled","disabled"),this.$element.prop("disabled",!0)},c.prototype.update=function(a){this.$element.prop("disabled")?this.disable():this.enable(),this.$element.prop("checked")?this.on(a):this.off(a)},c.prototype.trigger=function(b){this.$element.off("change.bs.toggle"),b||this.$element.change(),this.$element.on("change.bs.toggle",a.proxy(function(){this.update()},this))},c.prototype.destroy=function(){this.$element.off("change.bs.toggle"),this.$toggleGroup.remove(),this.$element.removeData("bs.toggle"),this.$element.unwrap()};var d=a.fn.bootstrapToggle;a.fn.bootstrapToggle=b,a.fn.bootstrapToggle.Constructor=c,a.fn.toggle.noConflict=function(){return a.fn.bootstrapToggle=d,this},a(function(){a("input[type=checkbox][data-toggle^=toggle]").bootstrapToggle()}),a(document).on("click.bs.toggle","div[data-toggle^=toggle]",function(b){var c=a(this).find("input[type=checkbox]");c.bootstrapToggle("toggle"),b.preventDefault()})}(jQuery);
+//# sourceMappingURL=bootstrap-toggle.min.js.map
diff --git a/web/gui/lib/clipboard-polyfill-be05dad.js b/web/gui/lib/clipboard-polyfill-be05dad.js
new file mode 100644
index 0000000..d1ba02e
--- /dev/null
+++ b/web/gui/lib/clipboard-polyfill-be05dad.js
@@ -0,0 +1,9 @@
+!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.clipboard=e():t.clipboard=e()}(this,function(){return function(t){function e(r){if(n[r])return n[r].exports;var o=n[r]={i:r,l:!1,exports:{}};return t[r].call(o.exports,o,o.exports,e),o.l=!0,o.exports}var n={};return e.m=t,e.c=n,e.d=function(t,n,r){e.o(t,n)||Object.defineProperty(t,n,{configurable:!1,enumerable:!0,get:r})},e.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return e.d(n,"a",n),n},e.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},e.p="",e(e.s=0)}([function(t,e,n){"use strict";function r(t,e,n){m("listener called"),t.success=!0,e.forEach(function(e,r){n.clipboardData.setData(r,e),r===b&&n.clipboardData.getData(r)!=e&&(m("setting text/plain failed"),t.success=!1)}),n.preventDefault()}function o(t){var e=new _,n=r.bind(this,e,t);document.addEventListener("copy",n);try{document.execCommand("copy")}finally{document.removeEventListener("copy",n)}return e}function i(t,e){c(t);var n=o(e);return a(),n}function u(t){var e=document.createElement("div");e.textContent="temporary element",document.body.appendChild(e);var n=i(e,t);return document.body.removeChild(e),n}function s(t){m("copyTextUsingDOM");var e=document.createElement("div"),n=e.attachShadow({mode:"open"});document.body.appendChild(e);var r=document.createElement("span");r.innerText=t,n.appendChild(r),c(r);var o=document.execCommand("copy");return a(),document.body.removeChild(e),o}function c(t){var e=document.getSelection(),n=document.createRange();n.selectNodeContents(t),e.removeAllRanges(),e.addRange(n)}function a(){document.getSelection().removeAllRanges()}function l(t){var e=new v.DT;return e.setData("text/plain",t),e}function f(){return"undefined"==typeof ClipboardEvent&&void 0!==window.clipboardData&&void 0!==window.clipboardData.setData}function d(t){var e=t.getData("text/plain");if(void 0!==e)return window.clipboardData.setData("Text",e);throw"No `text/plain` value was specified."}function p(){return new h.Promise(function(t,e){var n=window.clipboardData.getData("Text");""===n?e(new Error("Empty clipboard or could not read plain text from clipboard")):t(n)})}Object.defineProperty(e,"__esModule",{value:!0});var h=n(1),v=n(5),m=function(t){},y=!0,w=(console.warn||console.log).bind(console,"[clipboard-polyfill]"),b="text/plain",g=function(){function t(){}return t.setDebugLog=function(t){m=t},t.suppressWarnings=function(){y=!1,v.suppressDTWarnings()},t.write=function(t){return y&&!t.getData(b)&&w("clipboard.write() was called without a `text/plain` data type. On some platforms, this may result in an empty clipboard. Call clipboard.suppressWarnings() to suppress this warning."),new h.Promise(function(e,n){if(f())return void(d(t)?e():n(new Error("Copying failed, possibly because the user rejected it.")));var r=o(t);if(r.success)return m("regular execCopy worked"),void e();if(navigator.userAgent.indexOf("Edge")>-1)return m('UA "Edge" => assuming success'),void e();if(r=i(document.body,t),r.success)return m("copyUsingTempSelection worked"),void e();if(r=u(t),r.success)return m("copyUsingTempElem worked"),void e();var c=t.getData(b);if(void 0!==c&&s(c))return m("copyTextUsingDOM worked"),void e();n(new Error("Copy command failed."))})},t.writeText=function(t){var e=new v.DT;return e.setData(b,t),this.write(e)},t.read=function(){return new h.Promise(function(t,e){if(f())return void p().then(function(e){return t(l(e))},e);e("Read is not supported in your browser.")})},t.readText=function(){return f()?p():new h.Promise(function(t,e){e("Read is not supported in your browser.")})},t.DT=v.DT,t}();e.default=g;var _=function(){function t(){this.success=!1}return t}();t.exports=g},function(t,e,n){(function(e,r){/*!
+ * @overview es6-promise - a tiny implementation of Promises/A+.
+ * @copyright Copyright (c) 2014 Yehuda Katz, Tom Dale, Stefan Penner and contributors (Conversion to ES6 API by Jake Archibald)
+ * @license Licensed under MIT license
+ * See https://raw.githubusercontent.com/stefanpenner/es6-promise/master/LICENSE
+ * @version 4.1.1
+ * SPDX-License-Identifier: MIT
+ */
+!function(e,n){t.exports=n()}(0,function(){"use strict";function t(t){var e=typeof t;return null!==t&&("object"===e||"function"===e)}function o(t){return"function"==typeof t}function i(t){q=t}function u(t){z=t}function s(){return void 0!==K?function(){K(a)}:c()}function c(){var t=setTimeout;return function(){return t(a,1)}}function a(){for(var t=0;t<I;t+=2){(0,Z[t])(Z[t+1]),Z[t]=void 0,Z[t+1]=void 0}I=0}function l(t,e){var n=arguments,r=this,o=new this.constructor(d);void 0===o[tt]&&P(o);var i=r._state;return i?function(){var t=n[i-1];z(function(){return j(i,o,t,r._result)})}():E(r,o,t,e),o}function f(t){var e=this;if(t&&"object"==typeof t&&t.constructor===e)return t;var n=new e(d);return g(n,t),n}function d(){}function p(){return new TypeError("You cannot resolve a promise with itself")}function h(){return new TypeError("A promises callback cannot return that same promise.")}function v(t){try{return t.then}catch(t){return ot.error=t,ot}}function m(t,e,n,r){try{t.call(e,n,r)}catch(t){return t}}function y(t,e,n){z(function(t){var r=!1,o=m(n,e,function(n){r||(r=!0,e!==n?g(t,n):T(t,n))},function(e){r||(r=!0,x(t,e))},"Settle: "+(t._label||" unknown promise"));!r&&o&&(r=!0,x(t,o))},t)}function w(t,e){e._state===nt?T(t,e._result):e._state===rt?x(t,e._result):E(e,void 0,function(e){return g(t,e)},function(e){return x(t,e)})}function b(t,e,n){e.constructor===t.constructor&&n===l&&e.constructor.resolve===f?w(t,e):n===ot?(x(t,ot.error),ot.error=null):void 0===n?T(t,e):o(n)?y(t,e,n):T(t,e)}function g(e,n){e===n?x(e,p()):t(n)?b(e,n,v(n)):T(e,n)}function _(t){t._onerror&&t._onerror(t._result),D(t)}function T(t,e){t._state===et&&(t._result=e,t._state=nt,0!==t._subscribers.length&&z(D,t))}function x(t,e){t._state===et&&(t._state=rt,t._result=e,z(_,t))}function E(t,e,n,r){var o=t._subscribers,i=o.length;t._onerror=null,o[i]=e,o[i+nt]=n,o[i+rt]=r,0===i&&t._state&&z(D,t)}function D(t){var e=t._subscribers,n=t._state;if(0!==e.length){for(var r=void 0,o=void 0,i=t._result,u=0;u<e.length;u+=3)r=e[u],o=e[u+n],r?j(n,r,o,i):o(i);t._subscribers.length=0}}function A(){this.error=null}function C(t,e){try{return t(e)}catch(t){return it.error=t,it}}function j(t,e,n,r){var i=o(n),u=void 0,s=void 0,c=void 0,a=void 0;if(i){if(u=C(n,r),u===it?(a=!0,s=u.error,u.error=null):c=!0,e===u)return void x(e,h())}else u=r,c=!0;e._state!==et||(i&&c?g(e,u):a?x(e,s):t===nt?T(e,u):t===rt&&x(e,u))}function O(t,e){try{e(function(e){g(t,e)},function(e){x(t,e)})}catch(e){x(t,e)}}function S(){return ut++}function P(t){t[tt]=ut++,t._state=void 0,t._result=void 0,t._subscribers=[]}function M(t,e){this._instanceConstructor=t,this.promise=new t(d),this.promise[tt]||P(this.promise),H(e)?(this.length=e.length,this._remaining=e.length,this._result=new Array(this.length),0===this.length?T(this.promise,this._result):(this.length=this.length||0,this._enumerate(e),0===this._remaining&&T(this.promise,this._result))):x(this.promise,L())}function L(){return new Error("Array Methods must be provided an Array")}function k(t){return new M(this,t).promise}function U(t){var e=this;return new e(H(t)?function(n,r){for(var o=t.length,i=0;i<o;i++)e.resolve(t[i]).then(n,r)}:function(t,e){return e(new TypeError("You must pass an array to race."))})}function R(t){var e=this,n=new e(d);return x(n,t),n}function W(){throw new TypeError("You must pass a resolver function as the first argument to the promise constructor")}function N(){throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.")}function F(t){this[tt]=S(),this._result=this._state=void 0,this._subscribers=[],d!==t&&("function"!=typeof t&&W(),this instanceof F?O(this,t):N())}function Y(){var t=void 0;if(void 0!==r)t=r;else if("undefined"!=typeof self)t=self;else try{t=Function("return this")()}catch(t){throw new Error("polyfill failed because global object is unavailable in this environment")}var e=t.Promise;if(e){var n=null;try{n=Object.prototype.toString.call(e.resolve())}catch(t){}if("[object Promise]"===n&&!e.cast)return}t.Promise=F}var X=void 0;X=Array.isArray?Array.isArray:function(t){return"[object Array]"===Object.prototype.toString.call(t)};var H=X,I=0,K=void 0,q=void 0,z=function(t,e){Z[I]=t,Z[I+1]=e,2===(I+=2)&&(q?q(a):$())},B="undefined"!=typeof window?window:void 0,G=B||{},J=G.MutationObserver||G.WebKitMutationObserver,Q="undefined"==typeof self&&void 0!==e&&"[object process]"==={}.toString.call(e),V="undefined"!=typeof Uint8ClampedArray&&"undefined"!=typeof importScripts&&"undefined"!=typeof MessageChannel,Z=new Array(1e3),$=void 0;$=Q?function(){return function(){return e.nextTick(a)}}():J?function(){var t=0,e=new J(a),n=document.createTextNode("");return e.observe(n,{characterData:!0}),function(){n.data=t=++t%2}}():V?function(){var t=new MessageChannel;return t.port1.onmessage=a,function(){return t.port2.postMessage(0)}}():void 0===B?function(){try{var t=n(4);return K=t.runOnLoop||t.runOnContext,s()}catch(t){return c()}}():c();var tt=Math.random().toString(36).substring(16),et=void 0,nt=1,rt=2,ot=new A,it=new A,ut=0;return M.prototype._enumerate=function(t){for(var e=0;this._state===et&&e<t.length;e++)this._eachEntry(t[e],e)},M.prototype._eachEntry=function(t,e){var n=this._instanceConstructor,r=n.resolve;if(r===f){var o=v(t);if(o===l&&t._state!==et)this._settledAt(t._state,e,t._result);else if("function"!=typeof o)this._remaining--,this._result[e]=t;else if(n===F){var i=new n(d);b(i,t,o),this._willSettleAt(i,e)}else this._willSettleAt(new n(function(e){return e(t)}),e)}else this._willSettleAt(r(t),e)},M.prototype._settledAt=function(t,e,n){var r=this.promise;r._state===et&&(this._remaining--,t===rt?x(r,n):this._result[e]=n),0===this._remaining&&T(r,this._result)},M.prototype._willSettleAt=function(t,e){var n=this;E(t,void 0,function(t){return n._settledAt(nt,e,t)},function(t){return n._settledAt(rt,e,t)})},F.all=k,F.race=U,F.resolve=f,F.reject=R,F._setScheduler=i,F._setAsap=u,F._asap=z,F.prototype={constructor:F,then:l,catch:function(t){return this.then(null,t)}},F.polyfill=Y,F.Promise=F,F})}).call(e,n(2),n(3))},function(t,e){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(t){if(l===setTimeout)return setTimeout(t,0);if((l===n||!l)&&setTimeout)return l=setTimeout,setTimeout(t,0);try{return l(t,0)}catch(e){try{return l.call(null,t,0)}catch(e){return l.call(this,t,0)}}}function i(t){if(f===clearTimeout)return clearTimeout(t);if((f===r||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(t);try{return f(t)}catch(e){try{return f.call(null,t)}catch(e){return f.call(this,t)}}}function u(){v&&p&&(v=!1,p.length?h=p.concat(h):m=-1,h.length&&s())}function s(){if(!v){var t=o(u);v=!0;for(var e=h.length;e;){for(p=h,h=[];++m<e;)p&&p[m].run();m=-1,e=h.length}p=null,v=!1,i(t)}}function c(t,e){this.fun=t,this.array=e}function a(){}var l,f,d=t.exports={};!function(){try{l="function"==typeof setTimeout?setTimeout:n}catch(t){l=n}try{f="function"==typeof clearTimeout?clearTimeout:r}catch(t){f=r}}();var p,h=[],v=!1,m=-1;d.nextTick=function(t){var e=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)e[n-1]=arguments[n];h.push(new c(t,e)),1!==h.length||v||o(s)},c.prototype.run=function(){this.fun.apply(null,this.array)},d.title="browser",d.browser=!0,d.env={},d.argv=[],d.version="",d.versions={},d.on=a,d.addListener=a,d.once=a,d.off=a,d.removeListener=a,d.removeAllListeners=a,d.emit=a,d.prependListener=a,d.prependOnceListener=a,d.listeners=function(t){return[]},d.binding=function(t){throw new Error("process.binding is not supported")},d.cwd=function(){return"/"},d.chdir=function(t){throw new Error("process.chdir is not supported")},d.umask=function(){return 0}},function(t,e){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e){},function(t,e,n){"use strict";function r(){c=!1}Object.defineProperty(e,"__esModule",{value:!0});var o={TEXT_PLAIN:"text/plain",TEXT_HTML:"text/html"},i=new Set;for(var u in o)i.add(o[u]);var s=(console.warn||console.log).bind(console,"[clipboard-polyfill]"),c=!0;e.suppressDTWarnings=r;var a=function(){function t(){this.m=new Map}return t.prototype.setData=function(t,e){c&&!i.has(t)&&s("Unknown data type: "+t,"Call clipboard.suppressWarnings() to suppress this warning."),this.m.set(t,e)},t.prototype.getData=function(t){return this.m.get(t)},t.prototype.forEach=function(t){return this.m.forEach(t)},t}();e.DT=a}])});
diff --git a/web/gui/lib/d3-4.12.2.min.js b/web/gui/lib/d3-4.12.2.min.js
new file mode 100644
index 0000000..3d91d1a
--- /dev/null
+++ b/web/gui/lib/d3-4.12.2.min.js
@@ -0,0 +1,3 @@
+// https://d3js.org Version 4.12.2. Copyright 2017 Mike Bostock.
+// SPDX-License-Identifier: BSD-3-Clause
+(function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n(t.d3=t.d3||{})})(this,function(t){"use strict";function n(t,n){return t<n?-1:t>n?1:t>=n?0:NaN}function e(t){return 1===t.length&&(t=function(t){return function(e,r){return n(t(e),r)}}(t)),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r<i;){var o=r+i>>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r<i;){var o=r+i>>>1;t(n[o],e)>0?i=o:r=o+1}return r}}}function r(t,n){return[t,n]}function i(t){return null===t?NaN:+t}function o(t,n){var e,r,o=t.length,u=0,a=-1,c=0,s=0;if(null==n)for(;++a<o;)isNaN(e=i(t[a]))||(s+=(r=e-c)*(e-(c+=r/++u)));else for(;++a<o;)isNaN(e=i(n(t[a],a,t)))||(s+=(r=e-c)*(e-(c+=r/++u)));if(u>1)return s/(u-1)}function u(t,n){var e=o(t,n);return e?Math.sqrt(e):e}function a(t,n){var e,r,i,o=t.length,u=-1;if(null==n){for(;++u<o;)if(null!=(e=t[u])&&e>=e)for(r=i=e;++u<o;)null!=(e=t[u])&&(r>e&&(r=e),i<e&&(i=e))}else for(;++u<o;)if(null!=(e=n(t[u],u,t))&&e>=e)for(r=i=e;++u<o;)null!=(e=n(t[u],u,t))&&(r>e&&(r=e),i<e&&(i=e));return[r,i]}function c(t){return function(){return t}}function s(t){return t}function f(t,n,e){t=+t,n=+n,e=(i=arguments.length)<2?(n=t,t=0,1):i<3?1:+e;for(var r=-1,i=0|Math.max(0,Math.ceil((n-t)/e)),o=new Array(i);++r<i;)o[r]=t+r*e;return o}function l(t,n,e){var r,i,o,u,a=-1;if(n=+n,t=+t,e=+e,t===n&&e>0)return[t];if((r=n<t)&&(i=t,t=n,n=i),0===(u=h(t,n,e))||!isFinite(u))return[];if(u>0)for(t=Math.ceil(t/u),n=Math.floor(n/u),o=new Array(i=Math.ceil(n-t+1));++a<i;)o[a]=(t+a)*u;else for(t=Math.floor(t*u),n=Math.ceil(n*u),o=new Array(i=Math.ceil(t-n+1));++a<i;)o[a]=(t-a)/u;return r&&o.reverse(),o}function h(t,n,e){var r=(n-t)/Math.max(0,e),i=Math.floor(Math.log(r)/Math.LN10),o=r/Math.pow(10,i);return i>=0?(o>=Ys?10:o>=Bs?5:o>=Hs?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=Ys?10:o>=Bs?5:o>=Hs?2:1)}function p(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=Ys?i*=10:o>=Bs?i*=5:o>=Hs&&(i*=2),n<t?-i:i}function d(t){return Math.ceil(Math.log(t.length)/Math.LN2)+1}function v(t,n,e){if(null==e&&(e=i),r=t.length){if((n=+n)<=0||r<2)return+e(t[0],0,t);if(n>=1)return+e(t[r-1],r-1,t);var r,o=(r-1)*n,u=Math.floor(o),a=+e(t[u],u,t);return a+(+e(t[u+1],u+1,t)-a)*(o-u)}}function g(t){for(var n,e,r,i=t.length,o=-1,u=0;++o<i;)u+=t[o].length;for(e=new Array(u);--i>=0;)for(n=(r=t[i]).length;--n>=0;)e[--u]=r[n];return e}function _(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o<i;)if(null!=(e=t[o])&&e>=e)for(r=e;++o<i;)null!=(e=t[o])&&r>e&&(r=e)}else for(;++o<i;)if(null!=(e=n(t[o],o,t))&&e>=e)for(r=e;++o<i;)null!=(e=n(t[o],o,t))&&r>e&&(r=e);return r}function y(t){if(!(i=t.length))return[];for(var n=-1,e=_(t,m),r=new Array(e);++n<e;)for(var i,o=-1,u=r[n]=new Array(i);++o<i;)u[o]=t[o][n];return r}function m(t){return t.length}function x(t){return t}function b(t){return"translate("+(t+.5)+",0)"}function w(t){return"translate(0,"+(t+.5)+")"}function M(){return!this.__axis}function T(t,n){function e(e){var h=null==i?n.ticks?n.ticks.apply(n,r):n.domain():i,p=null==o?n.tickFormat?n.tickFormat.apply(n,r):x:o,d=Math.max(u,0)+c,v=n.range(),g=+v[0]+.5,_=+v[v.length-1]+.5,y=(n.bandwidth?function(t){var n=Math.max(0,t.bandwidth()-1)/2;return t.round()&&(n=Math.round(n)),function(e){return+t(e)+n}}:function(t){return function(n){return+t(n)}})(n.copy()),m=e.selection?e.selection():e,b=m.selectAll(".domain").data([null]),w=m.selectAll(".tick").data(h,n).order(),T=w.exit(),N=w.enter().append("g").attr("class","tick"),k=w.select("line"),S=w.select("text");b=b.merge(b.enter().insert("path",".tick").attr("class","domain").attr("stroke","#000")),w=w.merge(N),k=k.merge(N.append("line").attr("stroke","#000").attr(f+"2",s*u)),S=S.merge(N.append("text").attr("fill","#000").attr(f,s*d).attr("dy",t===Xs?"0em":t===$s?"0.71em":"0.32em")),e!==m&&(b=b.transition(e),w=w.transition(e),k=k.transition(e),S=S.transition(e),T=T.transition(e).attr("opacity",Zs).attr("transform",function(t){return isFinite(t=y(t))?l(t):this.getAttribute("transform")}),N.attr("opacity",Zs).attr("transform",function(t){var n=this.parentNode.__axis;return l(n&&isFinite(n=n(t))?n:y(t))})),T.remove(),b.attr("d",t===Ws||t==Vs?"M"+s*a+","+g+"H0.5V"+_+"H"+s*a:"M"+g+","+s*a+"V0.5H"+_+"V"+s*a),w.attr("opacity",1).attr("transform",function(t){return l(y(t))}),k.attr(f+"2",s*u),S.attr(f,s*d).text(p),m.filter(M).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",t===Vs?"start":t===Ws?"end":"middle"),m.each(function(){this.__axis=y})}var r=[],i=null,o=null,u=6,a=6,c=3,s=t===Xs||t===Ws?-1:1,f=t===Ws||t===Vs?"x":"y",l=t===Xs||t===$s?b:w;return e.scale=function(t){return arguments.length?(n=t,e):n},e.ticks=function(){return r=js.call(arguments),e},e.tickArguments=function(t){return arguments.length?(r=null==t?[]:js.call(t),e):r.slice()},e.tickValues=function(t){return arguments.length?(i=null==t?null:js.call(t),e):i&&i.slice()},e.tickFormat=function(t){return arguments.length?(o=t,e):o},e.tickSize=function(t){return arguments.length?(u=a=+t,e):u},e.tickSizeInner=function(t){return arguments.length?(u=+t,e):u},e.tickSizeOuter=function(t){return arguments.length?(a=+t,e):a},e.tickPadding=function(t){return arguments.length?(c=+t,e):c},e}function N(){for(var t,n=0,e=arguments.length,r={};n<e;++n){if(!(t=arguments[n]+"")||t in r)throw new Error("illegal type: "+t);r[t]=[]}return new k(r)}function k(t){this._=t}function S(t,n,e){for(var r=0,i=t.length;r<i;++r)if(t[r].name===n){t[r]=Gs,t=t.slice(0,r).concat(t.slice(r+1));break}return null!=e&&t.push({name:n,value:e}),t}function E(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Js.hasOwnProperty(n)?{space:Js[n],local:t}:t}function A(t){var n=E(t);return(n.local?function(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}:function(t){return function(){var n=this.ownerDocument,e=this.namespaceURI;return e===Qs&&n.documentElement.namespaceURI===Qs?n.createElement(t):n.createElementNS(e,t)}})(n)}function C(){return new z}function z(){this._="@"+(++Ks).toString(36)}function P(t,n,e){return t=R(t,n,e),function(n){var e=n.relatedTarget;e&&(e===this||8&e.compareDocumentPosition(this))||t.call(this,n)}}function R(n,e,r){return function(i){var o=t.event;t.event=i;try{n.call(this,this.__data__,e,r)}finally{t.event=o}}}function L(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r<o;++r)e=n[r],t.type&&e.type!==t.type||e.name!==t.name?n[++i]=e:this.removeEventListener(e.type,e.listener,e.capture);++i?n.length=i:delete this.__on}}}function q(t,n,e){var r=of.hasOwnProperty(t.type)?P:R;return function(i,o,u){var a,c=this.__on,s=r(n,o,u);if(c)for(var f=0,l=c.length;f<l;++f)if((a=c[f]).type===t.type&&a.name===t.name)return this.removeEventListener(a.type,a.listener,a.capture),this.addEventListener(a.type,a.listener=s,a.capture=e),void(a.value=n);this.addEventListener(t.type,s,e),a={type:t.type,name:t.name,value:n,listener:s,capture:e},c?c.push(a):this.__on=[a]}}function D(n,e,r,i){var o=t.event;n.sourceEvent=t.event,t.event=n;try{return e.apply(r,i)}finally{t.event=o}}function U(){for(var n,e=t.event;n=e.sourceEvent;)e=n;return e}function O(t,n){var e=t.ownerSVGElement||t;if(e.createSVGPoint){var r=e.createSVGPoint();return r.x=n.clientX,r.y=n.clientY,r=r.matrixTransform(t.getScreenCTM().inverse()),[r.x,r.y]}var i=t.getBoundingClientRect();return[n.clientX-i.left-t.clientLeft,n.clientY-i.top-t.clientTop]}function F(t){var n=U();return n.changedTouches&&(n=n.changedTouches[0]),O(t,n)}function I(){}function Y(t){return null==t?I:function(){return this.querySelector(t)}}function B(){return[]}function H(t){return null==t?B:function(){return this.querySelectorAll(t)}}function j(t){return new Array(t.length)}function X(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}function V(t,n,e,r,i,o){for(var u,a=0,c=n.length,s=o.length;a<s;++a)(u=n[a])?(u.__data__=o[a],r[a]=u):e[a]=new X(t,o[a]);for(;a<c;++a)(u=n[a])&&(i[a]=u)}function $(t,n,e,r,i,o,u){var a,c,s,f={},l=n.length,h=o.length,p=new Array(l);for(a=0;a<l;++a)(c=n[a])&&(p[a]=s=uf+u.call(c,c.__data__,a,n),s in f?i[a]=c:f[s]=c);for(a=0;a<h;++a)(c=f[s=uf+u.call(t,o[a],a,o)])?(r[a]=c,c.__data__=o[a],f[s]=null):e[a]=new X(t,o[a]);for(a=0;a<l;++a)(c=n[a])&&f[p[a]]===c&&(i[a]=c)}function W(t,n){return t<n?-1:t>n?1:t>=n?0:NaN}function Z(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function G(t,n){return t.style.getPropertyValue(n)||Z(t).getComputedStyle(t,null).getPropertyValue(n)}function Q(t){return t.trim().split(/^|\s+/)}function J(t){return t.classList||new K(t)}function K(t){this._node=t,this._names=Q(t.getAttribute("class")||"")}function tt(t,n){for(var e=J(t),r=-1,i=n.length;++r<i;)e.add(n[r])}function nt(t,n){for(var e=J(t),r=-1,i=n.length;++r<i;)e.remove(n[r])}function et(){this.textContent=""}function rt(){this.innerHTML=""}function it(){this.nextSibling&&this.parentNode.appendChild(this)}function ot(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}function ut(){return null}function at(){var t=this.parentNode;t&&t.removeChild(this)}function ct(t,n,e){var r=Z(t),i=r.CustomEvent;"function"==typeof i?i=new i(n,e):(i=r.document.createEvent("Event"),e?(i.initEvent(n,e.bubbles,e.cancelable),i.detail=e.detail):i.initEvent(n,!1,!1)),t.dispatchEvent(i)}function st(t,n){this._groups=t,this._parents=n}function ft(){return new st([[document.documentElement]],af)}function lt(t){return"string"==typeof t?new st([[document.querySelector(t)]],[document.documentElement]):new st([[t]],af)}function ht(t,n,e){arguments.length<3&&(e=n,n=U().changedTouches);for(var r,i=0,o=n?n.length:0;i<o;++i)if((r=n[i]).identifier===e)return O(t,r);return null}function pt(){t.event.stopImmediatePropagation()}function dt(){t.event.preventDefault(),t.event.stopImmediatePropagation()}function vt(t){var n=t.document.documentElement,e=lt(t).on("dragstart.drag",dt,!0);"onselectstart"in n?e.on("selectstart.drag",dt,!0):(n.__noselect=n.style.MozUserSelect,n.style.MozUserSelect="none")}function gt(t,n){var e=t.document.documentElement,r=lt(t).on("dragstart.drag",null);n&&(r.on("click.drag",dt,!0),setTimeout(function(){r.on("click.drag",null)},0)),"onselectstart"in e?r.on("selectstart.drag",null):(e.style.MozUserSelect=e.__noselect,delete e.__noselect)}function _t(t){return function(){return t}}function yt(t,n,e,r,i,o,u,a,c,s){this.target=t,this.type=n,this.subject=e,this.identifier=r,this.active=i,this.x=o,this.y=u,this.dx=a,this.dy=c,this._=s}function mt(){return!t.event.button}function xt(){return this.parentNode}function bt(n){return null==n?{x:t.event.x,y:t.event.y}:n}function wt(){return"ontouchstart"in this}function Mt(t,n,e){t.prototype=n.prototype=e,e.constructor=t}function Tt(t,n){var e=Object.create(t.prototype);for(var r in n)e[r]=n[r];return e}function Nt(){}function kt(t){var n;return t=(t+"").trim().toLowerCase(),(n=lf.exec(t))?(n=parseInt(n[1],16),new zt(n>>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1)):(n=hf.exec(t))?St(parseInt(n[1],16)):(n=pf.exec(t))?new zt(n[1],n[2],n[3],1):(n=df.exec(t))?new zt(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=vf.exec(t))?Et(n[1],n[2],n[3],n[4]):(n=gf.exec(t))?Et(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=_f.exec(t))?Pt(n[1],n[2]/100,n[3]/100,1):(n=yf.exec(t))?Pt(n[1],n[2]/100,n[3]/100,n[4]):mf.hasOwnProperty(t)?St(mf[t]):"transparent"===t?new zt(NaN,NaN,NaN,0):null}function St(t){return new zt(t>>16&255,t>>8&255,255&t,1)}function Et(t,n,e,r){return r<=0&&(t=n=e=NaN),new zt(t,n,e,r)}function At(t){return t instanceof Nt||(t=kt(t)),t?(t=t.rgb(),new zt(t.r,t.g,t.b,t.opacity)):new zt}function Ct(t,n,e,r){return 1===arguments.length?At(t):new zt(t,n,e,null==r?1:r)}function zt(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function Pt(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Lt(t,n,e,r)}function Rt(t,n,e,r){return 1===arguments.length?function(t){if(t instanceof Lt)return new Lt(t.h,t.s,t.l,t.opacity);if(t instanceof Nt||(t=kt(t)),!t)return new Lt;if(t instanceof Lt)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,c=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e<r):e===o?(r-n)/a+2:(n-e)/a+4,a/=c<.5?o+i:2-o-i,u*=60):a=c>0&&c<1?0:u,new Lt(u,a,c,t.opacity)}(t):new Lt(t,n,e,null==r?1:r)}function Lt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function qt(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function Dt(t){if(t instanceof Ot)return new Ot(t.l,t.a,t.b,t.opacity);if(t instanceof jt){var n=t.h*xf;return new Ot(t.l,Math.cos(n)*t.c,Math.sin(n)*t.c,t.opacity)}t instanceof zt||(t=At(t));var e=Bt(t.r),r=Bt(t.g),i=Bt(t.b),o=Ft((.4124564*e+.3575761*r+.1804375*i)/wf),u=Ft((.2126729*e+.7151522*r+.072175*i)/Mf);return new Ot(116*u-16,500*(o-u),200*(u-Ft((.0193339*e+.119192*r+.9503041*i)/Tf)),t.opacity)}function Ut(t,n,e,r){return 1===arguments.length?Dt(t):new Ot(t,n,e,null==r?1:r)}function Ot(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Ft(t){return t>Ef?Math.pow(t,1/3):t/Sf+Nf}function It(t){return t>kf?t*t*t:Sf*(t-Nf)}function Yt(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Bt(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Ht(t,n,e,r){return 1===arguments.length?function(t){if(t instanceof jt)return new jt(t.h,t.c,t.l,t.opacity);t instanceof Ot||(t=Dt(t));var n=Math.atan2(t.b,t.a)*bf;return new jt(n<0?n+360:n,Math.sqrt(t.a*t.a+t.b*t.b),t.l,t.opacity)}(t):new jt(t,n,e,null==r?1:r)}function jt(t,n,e,r){this.h=+t,this.c=+n,this.l=+e,this.opacity=+r}function Xt(t,n,e,r){return 1===arguments.length?function(t){if(t instanceof Vt)return new Vt(t.h,t.s,t.l,t.opacity);t instanceof zt||(t=At(t));var n=t.r/255,e=t.g/255,r=t.b/255,i=(Lf*r+Pf*n-Rf*e)/(Lf+Pf-Rf),o=r-i,u=(zf*(e-i)-Af*o)/Cf,a=Math.sqrt(u*u+o*o)/(zf*i*(1-i)),c=a?Math.atan2(u,o)*bf-120:NaN;return new Vt(c<0?c+360:c,a,i,t.opacity)}(t):new Vt(t,n,e,null==r?1:r)}function Vt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function $t(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Wt(t){var n=t.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=r<n-1?t[r+2]:2*o-i;return $t((e-r/n)*n,u,i,o,a)}}function Zt(t){var n=t.length;return function(e){var r=Math.floor(((e%=1)<0?++e:e)*n),i=t[(r+n-1)%n],o=t[r%n],u=t[(r+1)%n],a=t[(r+2)%n];return $t((e-r/n)*n,i,o,u,a)}}function Gt(t){return function(){return t}}function Qt(t,n){return function(e){return t+e*n}}function Jt(t,n){var e=n-t;return e?Qt(t,e>180||e<-180?e-360*Math.round(e/360):e):Gt(isNaN(t)?n:t)}function Kt(t){return 1==(t=+t)?tn:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):Gt(isNaN(n)?e:n)}}function tn(t,n){var e=n-t;return e?Qt(t,e):Gt(isNaN(t)?n:t)}function nn(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e<i;++e)r=Ct(n[e]),o[e]=r.r||0,u[e]=r.g||0,a[e]=r.b||0;return o=t(o),u=t(u),a=t(a),r.opacity=1,function(t){return r.r=o(t),r.g=u(t),r.b=a(t),r+""}}}function en(t,n){var e,r=n?n.length:0,i=t?Math.min(r,t.length):0,o=new Array(i),u=new Array(r);for(e=0;e<i;++e)o[e]=cn(t[e],n[e]);for(;e<r;++e)u[e]=n[e];return function(t){for(e=0;e<i;++e)u[e]=o[e](t);return u}}function rn(t,n){var e=new Date;return t=+t,n-=t,function(r){return e.setTime(t+n*r),e}}function on(t,n){return t=+t,n-=t,function(e){return t+n*e}}function un(t,n){var e,r={},i={};null!==t&&"object"==typeof t||(t={}),null!==n&&"object"==typeof n||(n={});for(e in n)e in t?r[e]=cn(t[e],n[e]):i[e]=n[e];return function(t){for(e in r)i[e]=r[e](t);return i}}function an(t,n){var e,r,i,o=jf.lastIndex=Xf.lastIndex=0,u=-1,a=[],c=[];for(t+="",n+="";(e=jf.exec(t))&&(r=Xf.exec(n));)(i=r.index)>o&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,c.push({i:u,x:on(e,r)})),o=Xf.lastIndex;return o<n.length&&(i=n.slice(o),a[u]?a[u]+=i:a[++u]=i),a.length<2?c[0]?function(t){return function(n){return t(n)+""}}(c[0].x):function(t){return function(){return t}}(n):(n=c.length,function(t){for(var e,r=0;r<n;++r)a[(e=c[r]).i]=e.x(t);return a.join("")})}function cn(t,n){var e,r=typeof n;return null==n||"boolean"===r?Gt(n):("number"===r?on:"string"===r?(e=kt(n))?(n=e,Yf):an:n instanceof kt?Yf:n instanceof Date?rn:Array.isArray(n)?en:"function"!=typeof n.valueOf&&"function"!=typeof n.toString||isNaN(n)?un:on)(t,n)}function sn(t,n){return t=+t,n-=t,function(e){return Math.round(t+n*e)}}function fn(t,n,e,r,i,o){var u,a,c;return(u=Math.sqrt(t*t+n*n))&&(t/=u,n/=u),(c=t*e+n*r)&&(e-=t*c,r-=n*c),(a=Math.sqrt(e*e+r*r))&&(e/=a,r/=a,c/=a),t*r<n*e&&(t=-t,n=-n,c=-c,u=-u),{translateX:i,translateY:o,rotate:Math.atan2(n,t)*Vf,skewX:Math.atan(c)*Vf,scaleX:u,scaleY:a}}function ln(t,n,e,r){function i(t){return t.length?t.pop()+" ":""}return function(o,u){var a=[],c=[];return o=t(o),u=t(u),function(t,r,i,o,u,a){if(t!==i||r!==o){var c=u.push("translate(",null,n,null,e);a.push({i:c-4,x:on(t,i)},{i:c-2,x:on(r,o)})}else(i||o)&&u.push("translate("+i+n+o+e)}(o.translateX,o.translateY,u.translateX,u.translateY,a,c),function(t,n,e,o){t!==n?(t-n>180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:on(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,u.rotate,a,c),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:on(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,u.skewX,a,c),function(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:on(t,e)},{i:a-2,x:on(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,u.scaleX,u.scaleY,a,c),o=u=null,function(t){for(var n,e=-1,r=c.length;++e<r;)a[(n=c[e]).i]=n.x(t);return a.join("")}}}function hn(t){return((t=Math.exp(t))+1/t)/2}function pn(t,n){var e,r,i=t[0],o=t[1],u=t[2],a=n[0],c=n[1],s=n[2],f=a-i,l=c-o,h=f*f+l*l;if(h<Kf)r=Math.log(s/u)/Gf,e=function(t){return[i+t*f,o+t*l,u*Math.exp(Gf*t*r)]};else{var p=Math.sqrt(h),d=(s*s-u*u+Jf*h)/(2*u*Qf*p),v=(s*s-u*u-Jf*h)/(2*s*Qf*p),g=Math.log(Math.sqrt(d*d+1)-d),_=Math.log(Math.sqrt(v*v+1)-v);r=(_-g)/Gf,e=function(t){var n=t*r,e=hn(g),a=u/(Qf*p)*(e*function(t){return((t=Math.exp(2*t))-1)/(t+1)}(Gf*n+g)-function(t){return((t=Math.exp(t))-1/t)/2}(g));return[i+a*f,o+a*l,u*e/hn(Gf*n+g)]}}return e.duration=1e3*r,e}function dn(t){return function(n,e){var r=t((n=Rt(n)).h,(e=Rt(e)).h),i=tn(n.s,e.s),o=tn(n.l,e.l),u=tn(n.opacity,e.opacity);return function(t){return n.h=r(t),n.s=i(t),n.l=o(t),n.opacity=u(t),n+""}}}function vn(t){return function(n,e){var r=t((n=Ht(n)).h,(e=Ht(e)).h),i=tn(n.c,e.c),o=tn(n.l,e.l),u=tn(n.opacity,e.opacity);return function(t){return n.h=r(t),n.c=i(t),n.l=o(t),n.opacity=u(t),n+""}}}function gn(t){return function n(e){function r(n,r){var i=t((n=Xt(n)).h,(r=Xt(r)).h),o=tn(n.s,r.s),u=tn(n.l,r.l),a=tn(n.opacity,r.opacity);return function(t){return n.h=i(t),n.s=o(t),n.l=u(Math.pow(t,e)),n.opacity=a(t),n+""}}return e=+e,r.gamma=n,r}(1)}function _n(){return ll||(dl(yn),ll=pl.now()+hl)}function yn(){ll=0}function mn(){this._call=this._time=this._next=null}function xn(t,n,e){var r=new mn;return r.restart(t,n,e),r}function bn(){_n(),++ul;for(var t,n=Ff;n;)(t=ll-n._time)>=0&&n._call.call(null,t),n=n._next;--ul}function wn(){ll=(fl=pl.now())+hl,ul=al=0;try{bn()}finally{ul=0,function(){var t,n,e=Ff,r=1/0;for(;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Ff=n);If=t,Tn(r)}(),ll=0}}function Mn(){var t=pl.now(),n=t-fl;n>sl&&(hl-=n,fl=t)}function Tn(t){if(!ul){al&&(al=clearTimeout(al));t-ll>24?(t<1/0&&(al=setTimeout(wn,t-pl.now()-hl)),cl&&(cl=clearInterval(cl))):(cl||(fl=pl.now(),cl=setInterval(Mn,sl)),ul=1,dl(wn))}}function Nn(t,n,e){var r=new mn;return n=null==n?0:+n,r.restart(function(e){r.stop(),t(e+n)},n,e),r}function kn(t,n,e,r,i,o){var u=t.__transition;if(u){if(e in u)return}else t.__transition={};(function(t,n,e){function r(c){var s,f,l,h;if(e.state!==yl)return o();for(s in a)if((h=a[s]).name===e.name){if(h.state===xl)return Nn(r);h.state===bl?(h.state=Ml,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete a[s]):+s<n&&(h.state=Ml,h.timer.stop(),delete a[s])}if(Nn(function(){e.state===xl&&(e.state=bl,e.timer.restart(i,e.delay,e.time),i(c))}),e.state=ml,e.on.call("start",t,t.__data__,e.index,e.group),e.state===ml){for(e.state=xl,u=new Array(l=e.tween.length),s=0,f=-1;s<l;++s)(h=e.tween[s].value.call(t,t.__data__,e.index,e.group))&&(u[++f]=h);u.length=f+1}}function i(n){for(var r=n<e.duration?e.ease.call(null,n/e.duration):(e.timer.restart(o),e.state=wl,1),i=-1,a=u.length;++i<a;)u[i].call(null,r);e.state===wl&&(e.on.call("end",t,t.__data__,e.index,e.group),o())}function o(){e.state=Ml,e.timer.stop(),delete a[n];for(var r in a)return;delete t.__transition}var u,a=t.__transition;a[n]=e,e.timer=xn(function(t){e.state=yl,e.timer.restart(r,e.delay,e.time),e.delay<=t&&r(t-e.delay)},0,e.time)})(t,e,{name:n,index:r,group:i,on:vl,tween:gl,time:o.time,delay:o.delay,duration:o.duration,ease:o.ease,timer:null,state:_l})}function Sn(t,n){var e=An(t,n);if(e.state>_l)throw new Error("too late; already scheduled");return e}function En(t,n){var e=An(t,n);if(e.state>ml)throw new Error("too late; already started");return e}function An(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}function Cn(t,n){var e,r,i,o=t.__transition,u=!0;if(o){n=null==n?null:n+"";for(i in o)(e=o[i]).name===n?(r=e.state>ml&&e.state<wl,e.state=Ml,e.timer.stop(),r&&e.on.call("interrupt",t,t.__data__,e.index,e.group),delete o[i]):u=!1;u&&delete t.__transition}}function zn(t,n,e){var r=t._id;return t.each(function(){var t=En(this,r);(t.value||(t.value={}))[n]=e.apply(this,arguments)}),function(t){return An(t,r).value[n]}}function Pn(t,n){var e;return("number"==typeof n?on:n instanceof kt?Yf:(e=kt(n))?(n=e,Yf):an)(t,n)}function Rn(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function Ln(t){return ft().transition(t)}function qn(){return++Nl}function Dn(t){return((t*=2)<=1?t*t:--t*(2-t)+1)/2}function Un(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}function On(t){return(1-Math.cos(Cl*t))/2}function Fn(t){return((t*=2)<=1?Math.pow(2,10*t-10):2-Math.pow(2,10-10*t))/2}function In(t){return((t*=2)<=1?1-Math.sqrt(1-t*t):Math.sqrt(1-(t-=2)*t)+1)/2}function Yn(t){return(t=+t)<Pl?Yl*t*t:t<Ll?Yl*(t-=Rl)*t+ql:t<Ul?Yl*(t-=Dl)*t+Ol:Yl*(t-=Fl)*t+Il}function Bn(t,n){for(var e;!(e=t.__transition)||!(e=e[n]);)if(!(t=t.parentNode))return Zl.time=_n(),Zl;return e}function Hn(t){return function(){return t}}function jn(){t.event.stopImmediatePropagation()}function Xn(){t.event.preventDefault(),t.event.stopImmediatePropagation()}function Vn(t){return{type:t}}function $n(){return!t.event.button}function Wn(){var t=this.ownerSVGElement||this;return[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function Zn(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function Gn(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function Qn(n){function e(t){var e=t.property("__brush",a).selectAll(".overlay").data([Vn("overlay")]);e.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",ih.overlay).merge(e).each(function(){var t=Zn(this).extent;lt(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])}),t.selectAll(".selection").data([Vn("selection")]).enter().append("rect").attr("class","selection").attr("cursor",ih.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var i=t.selectAll(".handle").data(n.handles,function(t){return t.type});i.exit().remove(),i.enter().append("rect").attr("class",function(t){return"handle handle--"+t.type}).attr("cursor",function(t){return ih[t.type]}),t.each(r).attr("fill","none").attr("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush touchstart.brush",u)}function r(){var t=lt(this),n=Zn(this).selection;n?(t.selectAll(".selection").style("display",null).attr("x",n[0][0]).attr("y",n[0][1]).attr("width",n[1][0]-n[0][0]).attr("height",n[1][1]-n[0][1]),t.selectAll(".handle").style("display",null).attr("x",function(t){return"e"===t.type[t.type.length-1]?n[1][0]-h/2:n[0][0]-h/2}).attr("y",function(t){return"s"===t.type[0]?n[1][1]-h/2:n[0][1]-h/2}).attr("width",function(t){return"n"===t.type||"s"===t.type?n[1][0]-n[0][0]+h:h}).attr("height",function(t){return"e"===t.type||"w"===t.type?n[1][1]-n[0][1]+h:h})):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function i(t,n){return t.__brush.emitter||new o(t,n)}function o(t,n){this.that=t,this.args=n,this.state=t.__brush,this.active=0}function u(){function e(){var t=F(w);!L||x||b||(Math.abs(t[0]-D[0])>Math.abs(t[1]-D[1])?b=!0:x=!0),D=t,m=!0,Xn(),o()}function o(){var t;switch(_=D[0]-q[0],y=D[1]-q[1],T){case Jl:case Ql:N&&(_=Math.max(C-a,Math.min(P-p,_)),s=a+_,d=p+_),k&&(y=Math.max(z-l,Math.min(R-v,y)),h=l+y,g=v+y);break;case Kl:N<0?(_=Math.max(C-a,Math.min(P-a,_)),s=a+_,d=p):N>0&&(_=Math.max(C-p,Math.min(P-p,_)),s=a,d=p+_),k<0?(y=Math.max(z-l,Math.min(R-l,y)),h=l+y,g=v):k>0&&(y=Math.max(z-v,Math.min(R-v,y)),h=l,g=v+y);break;case th:N&&(s=Math.max(C,Math.min(P,a-_*N)),d=Math.max(C,Math.min(P,p+_*N))),k&&(h=Math.max(z,Math.min(R,l-y*k)),g=Math.max(z,Math.min(R,v+y*k)))}d<s&&(N*=-1,t=a,a=p,p=t,t=s,s=d,d=t,M in oh&&I.attr("cursor",ih[M=oh[M]])),g<h&&(k*=-1,t=l,l=v,v=t,t=h,h=g,g=t,M in uh&&I.attr("cursor",ih[M=uh[M]])),S.selection&&(A=S.selection),x&&(s=A[0][0],d=A[1][0]),b&&(h=A[0][1],g=A[1][1]),A[0][0]===s&&A[0][1]===h&&A[1][0]===d&&A[1][1]===g||(S.selection=[[s,h],[d,g]],r.call(w),U.brush())}function u(){if(jn(),t.event.touches){if(t.event.touches.length)return;c&&clearTimeout(c),c=setTimeout(function(){c=null},500),O.on("touchmove.brush touchend.brush touchcancel.brush",null)}else gt(t.event.view,m),Y.on("keydown.brush keyup.brush mousemove.brush mouseup.brush",null);O.attr("pointer-events","all"),I.attr("cursor",ih.overlay),S.selection&&(A=S.selection),Gn(A)&&(S.selection=null,r.call(w)),U.end()}if(t.event.touches){if(t.event.changedTouches.length<t.event.touches.length)return Xn()}else if(c)return;if(f.apply(this,arguments)){var a,s,l,h,p,d,v,g,_,y,m,x,b,w=this,M=t.event.target.__data__.type,T="selection"===(t.event.metaKey?M="overlay":M)?Ql:t.event.altKey?th:Kl,N=n===eh?null:ah[M],k=n===nh?null:ch[M],S=Zn(w),E=S.extent,A=S.selection,C=E[0][0],z=E[0][1],P=E[1][0],R=E[1][1],L=N&&k&&t.event.shiftKey,q=F(w),D=q,U=i(w,arguments).beforestart();"overlay"===M?S.selection=A=[[a=n===eh?C:q[0],l=n===nh?z:q[1]],[p=n===eh?P:a,v=n===nh?R:l]]:(a=A[0][0],l=A[0][1],p=A[1][0],v=A[1][1]),s=a,h=l,d=p,g=v;var O=lt(w).attr("pointer-events","none"),I=O.selectAll(".overlay").attr("cursor",ih[M]);if(t.event.touches)O.on("touchmove.brush",e,!0).on("touchend.brush touchcancel.brush",u,!0);else{var Y=lt(t.event.view).on("keydown.brush",function(){switch(t.event.keyCode){case 16:L=N&&k;break;case 18:T===Kl&&(N&&(p=d-_*N,a=s+_*N),k&&(v=g-y*k,l=h+y*k),T=th,o());break;case 32:T!==Kl&&T!==th||(N<0?p=d-_:N>0&&(a=s-_),k<0?v=g-y:k>0&&(l=h-y),T=Jl,I.attr("cursor",ih.selection),o());break;default:return}Xn()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:L&&(x=b=L=!1,o());break;case 18:T===th&&(N<0?p=d:N>0&&(a=s),k<0?v=g:k>0&&(l=h),T=Kl,o());break;case 32:T===Jl&&(t.event.altKey?(N&&(p=d-_*N,a=s+_*N),k&&(v=g-y*k,l=h+y*k),T=th):(N<0?p=d:N>0&&(a=s),k<0?v=g:k>0&&(l=h),T=Kl),I.attr("cursor",ih[M]),o());break;default:return}Xn()},!0).on("mousemove.brush",e,!0).on("mouseup.brush",u,!0);vt(t.event.view)}jn(),Cn(w),r.call(w),U.start()}}function a(){var t=this.__brush||{selection:null};return t.extent=s.apply(this,arguments),t.dim=n,t}var c,s=Wn,f=$n,l=N(e,"start","brush","end"),h=6;return e.move=function(t,e){t.selection?t.on("start.brush",function(){i(this,arguments).beforestart().start()}).on("interrupt.brush end.brush",function(){i(this,arguments).end()}).tween("brush",function(){function t(t){u.selection=1===t&&Gn(s)?null:f(t),r.call(o),a.brush()}var o=this,u=o.__brush,a=i(o,arguments),c=u.selection,s=n.input("function"==typeof e?e.apply(this,arguments):e,u.extent),f=cn(c,s);return c&&s?t:t(1)}):t.each(function(){var t=arguments,o=this.__brush,u=n.input("function"==typeof e?e.apply(this,t):e,o.extent),a=i(this,t).beforestart();Cn(this),o.selection=null==u||Gn(u)?null:u,r.call(this),a.start().brush().end()})},o.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting&&(this.starting=!1,this.emit("start")),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(t){D(new function(t,n,e){this.target=t,this.type=n,this.selection=e}(e,t,n.output(this.state.selection)),l.apply,l,[t,this.that,this.args])}},e.extent=function(t){return arguments.length?(s="function"==typeof t?t:Hn([[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]]),e):s},e.filter=function(t){return arguments.length?(f="function"==typeof t?t:Hn(!!t),e):f},e.handleSize=function(t){return arguments.length?(h=+t,e):h},e.on=function(){var t=l.on.apply(l,arguments);return t===l?e:t},e}function Jn(t){return function(){return t}}function Kn(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function te(){return new Kn}function ne(t){return t.source}function ee(t){return t.target}function re(t){return t.radius}function ie(t){return t.startAngle}function oe(t){return t.endAngle}function ue(){}function ae(t,n){var e=new ue;if(t instanceof ue)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++i<o;)e.set(i,t[i]);else for(;++i<o;)e.set(n(r=t[i],i,t),r)}else if(t)for(var u in t)e.set(u,t[u]);return e}function ce(){return{}}function se(t,n,e){t[n]=e}function fe(){return ae()}function le(t,n,e){t.set(n,e)}function he(){}function pe(t,n){var e=new he;if(t instanceof he)t.each(function(t){e.add(t)});else if(t){var r=-1,i=t.length;if(null==n)for(;++r<i;)e.add(t[r]);else for(;++r<i;)e.add(n(t[r],r,t))}return e}function de(t){return new Function("d","return {"+t.map(function(t,n){return JSON.stringify(t)+": d["+n+"]"}).join(",")+"}")}function ve(t){function n(t,n){function e(){if(s)return bh;if(f)return f=!1,xh;var n,e,r=a;if(t.charCodeAt(r)===wh){for(;a++<u&&t.charCodeAt(a)!==wh||t.charCodeAt(++a)===wh;);return(n=a)>=u?s=!0:(e=t.charCodeAt(a++))===Mh?f=!0:e===Th&&(f=!0,t.charCodeAt(a)===Mh&&++a),t.slice(r+1,n-1).replace(/""/g,'"')}for(;a<u;){if((e=t.charCodeAt(n=a++))===Mh)f=!0;else if(e===Th)f=!0,t.charCodeAt(a)===Mh&&++a;else if(e!==o)continue;return t.slice(r,n)}return s=!0,t.slice(r,u)}var r,i=[],u=t.length,a=0,c=0,s=u<=0,f=!1;for(t.charCodeAt(u-1)===Mh&&--u,t.charCodeAt(u-1)===Th&&--u;(r=e())!==bh;){for(var l=[];r!==xh&&r!==bh;)l.push(r),r=e();n&&null==(l=n(l,c++))||i.push(l)}return i}function e(n){return n.map(r).join(t)}function r(t){return null==t?"":i.test(t+="")?'"'+t.replace(/"/g,'""')+'"':t}var i=new RegExp('["'+t+"\n\r]"),o=t.charCodeAt(0);return{parse:function(t,e){var r,i,o=n(t,function(t,n){if(r)return r(t,n-1);i=t,r=e?function(t,n){var e=de(t);return function(r,i){return n(e(r),i,t)}}(t,e):de(t)});return o.columns=i||[],o},parseRows:n,format:function(n,e){return null==e&&(e=function(t){var n=Object.create(null),e=[];return t.forEach(function(t){for(var r in t)r in n||e.push(n[r]=r)}),e}(n)),[e.map(r).join(t)].concat(n.map(function(n){return e.map(function(t){return r(n[t])}).join(t)})).join("\n")},formatRows:function(t){return t.map(e).join("\n")}}}function ge(t){return function(){return t}}function _e(){return 1e-6*(Math.random()-.5)}function ye(t,n,e,r){if(isNaN(n)||isNaN(e))return t;var i,o,u,a,c,s,f,l,h,p=t._root,d={data:r},v=t._x0,g=t._y0,_=t._x1,y=t._y1;if(!p)return t._root=d,t;for(;p.length;)if((s=n>=(o=(v+_)/2))?v=o:_=o,(f=e>=(u=(g+y)/2))?g=u:y=u,i=p,!(p=p[l=f<<1|s]))return i[l]=d,t;if(a=+t._x.call(null,p.data),c=+t._y.call(null,p.data),n===a&&e===c)return d.next=p,i?i[l]=d:t._root=d,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(s=n>=(o=(v+_)/2))?v=o:_=o,(f=e>=(u=(g+y)/2))?g=u:y=u}while((l=f<<1|s)==(h=(c>=u)<<1|a>=o));return i[h]=p,i[l]=d,t}function me(t,n,e,r,i){this.node=t,this.x0=n,this.y0=e,this.x1=r,this.y1=i}function xe(t){return t[0]}function be(t){return t[1]}function we(t,n,e){var r=new Me(null==n?xe:n,null==e?be:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Me(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function Te(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}function Ne(t){return t.x+t.vx}function ke(t){return t.y+t.vy}function Se(t){return t.index}function Ee(t,n){var e=t.get(n);if(!e)throw new Error("missing: "+n);return e}function Ae(t){return t.x}function Ce(t){return t.y}function ze(t,n){if((e=(t=n?t.toExponential(n-1):t.toExponential()).indexOf("e"))<0)return null;var e,r=t.slice(0,e);return[r.length>1?r[0]+r.slice(2):r,+t.slice(e+1)]}function Pe(t){return(t=ze(Math.abs(t)))?t[1]:NaN}function Re(t,n){var e=ze(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}function Le(t){return new qe(t)}function qe(t){if(!(n=Ih.exec(t)))throw new Error("invalid format: "+t);var n,e=n[1]||" ",r=n[2]||">",i=n[3]||"-",o=n[4]||"",u=!!n[5],a=n[6]&&+n[6],c=!!n[7],s=n[8]&&+n[8].slice(1),f=n[9]||"";"n"===f?(c=!0,f="g"):Fh[f]||(f=""),(u||"0"===e&&"="===r)&&(u=!0,e="0",r="="),this.fill=e,this.align=r,this.sign=i,this.symbol=o,this.zero=u,this.width=a,this.comma=c,this.precision=s,this.type=f}function De(t){return t}function Ue(t){function n(t){function n(t){var n,r,u,f=g,x=_;if("c"===v)x=y(t)+x,t="";else{var b=(t=+t)<0;if(t=y(Math.abs(t),d),b&&0==+t&&(b=!1),f=(b?"("===s?s:"-":"-"===s||"("===s?"":s)+f,x=x+("s"===v?Bh[8+Dh/3]:"")+(b&&"("===s?")":""),m)for(n=-1,r=t.length;++n<r;)if(48>(u=t.charCodeAt(n))||u>57){x=(46===u?i+t.slice(n+1):t.slice(n))+x,t=t.slice(0,n);break}}p&&!l&&(t=e(t,1/0));var w=f.length+t.length+x.length,M=w<h?new Array(h-w+1).join(a):"";switch(p&&l&&(t=e(M+t,M.length?h-x.length:1/0),M=""),c){case"<":t=f+t+x+M;break;case"=":t=f+M+t+x;break;case"^":t=M.slice(0,w=M.length>>1)+f+t+x+M.slice(w);break;default:t=M+f+t+x}return o(t)}var a=(t=Le(t)).fill,c=t.align,s=t.sign,f=t.symbol,l=t.zero,h=t.width,p=t.comma,d=t.precision,v=t.type,g="$"===f?r[0]:"#"===f&&/[boxX]/.test(v)?"0"+v.toLowerCase():"",_="$"===f?r[1]:/[%p]/.test(v)?u:"",y=Fh[v],m=!v||/[defgprs%]/.test(v);return d=null==d?v?6:12:/[gprs]/.test(v)?Math.max(1,Math.min(21,d)):Math.max(0,Math.min(20,d)),n.toString=function(){return t+""},n}var e=t.grouping&&t.thousands?function(t,n){return function(e,r){for(var i=e.length,o=[],u=0,a=t[0],c=0;i>0&&a>0&&(c+a+1>r&&(a=Math.max(1,r-c)),o.push(e.substring(i-=a,i+a)),!((c+=a+1)>r));)a=t[u=(u+1)%t.length];return o.reverse().join(n)}}(t.grouping,t.thousands):De,r=t.currency,i=t.decimal,o=t.numerals?function(t){return function(n){return n.replace(/[0-9]/g,function(n){return t[+n]})}}(t.numerals):De,u=t.percent||"%";return{format:n,formatPrefix:function(t,e){var r=n((t=Le(t),t.type="f",t)),i=3*Math.max(-8,Math.min(8,Math.floor(Pe(e)/3))),o=Math.pow(10,-i),u=Bh[8+i/3];return function(t){return r(o*t)+u}}}}function Oe(n){return Yh=Ue(n),t.format=Yh.format,t.formatPrefix=Yh.formatPrefix,Yh}function Fe(t){return Math.max(0,-Pe(Math.abs(t)))}function Ie(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(Pe(n)/3)))-Pe(Math.abs(t)))}function Ye(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,Pe(n)-Pe(t))+1}function Be(){return new He}function He(){this.reset()}function je(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}function Xe(t){return t>1?0:t<-1?Mp:Math.acos(t)}function Ve(t){return t>1?Tp:t<-1?-Tp:Math.asin(t)}function $e(t){return(t=Up(t/2))*t}function We(){}function Ze(t,n){t&&Bp.hasOwnProperty(t.type)&&Bp[t.type](t,n)}function Ge(t,n,e){var r,i=-1,o=t.length-e;for(n.lineStart();++i<o;)r=t[i],n.point(r[0],r[1],r[2]);n.lineEnd()}function Qe(t,n){var e=-1,r=t.length;for(n.polygonStart();++e<r;)Ge(t[e],n,1);n.polygonEnd()}function Je(t,n){t&&Yp.hasOwnProperty(t.type)?Yp[t.type](t,n):Ze(t,n)}function Ke(){Xp.point=nr}function tr(){er(Hh,jh)}function nr(t,n){Xp.point=er,Hh=t,jh=n,Xh=t*=Ep,Vh=Pp(n=(n*=Ep)/2+Np),$h=Up(n)}function er(t,n){n=(n*=Ep)/2+Np;var e=(t*=Ep)-Xh,r=e>=0?1:-1,i=r*e,o=Pp(n),u=Up(n),a=$h*u,c=Vh*o+a*Pp(i),s=a*r*Up(i);Hp.add(zp(s,c)),Xh=t,Vh=o,$h=u}function rr(t){return[zp(t[1],t[0]),Ve(t[2])]}function ir(t){var n=t[0],e=t[1],r=Pp(e);return[r*Pp(n),r*Up(n),Up(e)]}function or(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function ur(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function ar(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function cr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function sr(t){var n=Fp(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function fr(t,n){ep.push(rp=[Wh=t,Gh=t]),n<Zh&&(Zh=n),n>Qh&&(Qh=n)}function lr(t,n){var e=ir([t*Ep,n*Ep]);if(np){var r=ur(np,e),i=ur([r[1],-r[0],0],r);sr(i),i=rr(i);var o,u=t-Jh,a=u>0?1:-1,c=i[0]*Sp*a,s=Ap(u)>180;s^(a*Jh<c&&c<a*t)?(o=i[1]*Sp)>Qh&&(Qh=o):(c=(c+360)%360-180,s^(a*Jh<c&&c<a*t)?(o=-i[1]*Sp)<Zh&&(Zh=o):(n<Zh&&(Zh=n),n>Qh&&(Qh=n))),s?t<Jh?_r(Wh,t)>_r(Wh,Gh)&&(Gh=t):_r(t,Gh)>_r(Wh,Gh)&&(Wh=t):Gh>=Wh?(t<Wh&&(Wh=t),t>Gh&&(Gh=t)):t>Jh?_r(Wh,t)>_r(Wh,Gh)&&(Gh=t):_r(t,Gh)>_r(Wh,Gh)&&(Wh=t)}else ep.push(rp=[Wh=t,Gh=t]);n<Zh&&(Zh=n),n>Qh&&(Qh=n),np=e,Jh=t}function hr(){$p.point=lr}function pr(){rp[0]=Wh,rp[1]=Gh,$p.point=fr,np=null}function dr(t,n){if(np){var e=t-Jh;Vp.add(Ap(e)>180?e+(e>0?360:-360):e)}else Kh=t,tp=n;Xp.point(t,n),lr(t,n)}function vr(){Xp.lineStart()}function gr(){dr(Kh,tp),Xp.lineEnd(),Ap(Vp)>bp&&(Wh=-(Gh=180)),rp[0]=Wh,rp[1]=Gh,np=null}function _r(t,n){return(n-=t)<0?n+360:n}function yr(t,n){return t[0]-n[0]}function mr(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}function xr(t,n){t*=Ep;var e=Pp(n*=Ep);br(e*Pp(t),e*Up(t),Up(n))}function br(t,n,e){up+=(t-up)/++ip,ap+=(n-ap)/ip,cp+=(e-cp)/ip}function wr(){Wp.point=Mr}function Mr(t,n){t*=Ep;var e=Pp(n*=Ep);_p=e*Pp(t),yp=e*Up(t),mp=Up(n),Wp.point=Tr,br(_p,yp,mp)}function Tr(t,n){t*=Ep;var e=Pp(n*=Ep),r=e*Pp(t),i=e*Up(t),o=Up(n),u=zp(Fp((u=yp*o-mp*i)*u+(u=mp*r-_p*o)*u+(u=_p*i-yp*r)*u),_p*r+yp*i+mp*o);op+=u,sp+=u*(_p+(_p=r)),fp+=u*(yp+(yp=i)),lp+=u*(mp+(mp=o)),br(_p,yp,mp)}function Nr(){Wp.point=xr}function kr(){Wp.point=Er}function Sr(){Ar(vp,gp),Wp.point=xr}function Er(t,n){vp=t,gp=n,t*=Ep,n*=Ep,Wp.point=Ar;var e=Pp(n);_p=e*Pp(t),yp=e*Up(t),mp=Up(n),br(_p,yp,mp)}function Ar(t,n){t*=Ep;var e=Pp(n*=Ep),r=e*Pp(t),i=e*Up(t),o=Up(n),u=yp*o-mp*i,a=mp*r-_p*o,c=_p*i-yp*r,s=Fp(u*u+a*a+c*c),f=Ve(s),l=s&&-f/s;hp+=l*u,pp+=l*a,dp+=l*c,op+=f,sp+=f*(_p+(_p=r)),fp+=f*(yp+(yp=i)),lp+=f*(mp+(mp=o)),br(_p,yp,mp)}function Cr(t){return function(){return t}}function zr(t,n){function e(e,r){return e=t(e,r),n(e[0],e[1])}return t.invert&&n.invert&&(e.invert=function(e,r){return(e=n.invert(e,r))&&t.invert(e[0],e[1])}),e}function Pr(t,n){return[t>Mp?t-kp:t<-Mp?t+kp:t,n]}function Rr(t,n,e){return(t%=kp)?n||e?zr(qr(t),Dr(n,e)):qr(t):n||e?Dr(n,e):Pr}function Lr(t){return function(n,e){return n+=t,[n>Mp?n-kp:n<-Mp?n+kp:n,e]}}function qr(t){var n=Lr(t);return n.invert=Lr(-t),n}function Dr(t,n){function e(t,n){var e=Pp(n),a=Pp(t)*e,c=Up(t)*e,s=Up(n),f=s*r+a*i;return[zp(c*o-f*u,a*r-s*i),Ve(f*o+c*u)]}var r=Pp(t),i=Up(t),o=Pp(n),u=Up(n);return e.invert=function(t,n){var e=Pp(n),a=Pp(t)*e,c=Up(t)*e,s=Up(n),f=s*o-c*u;return[zp(c*o+s*u,a*r+f*i),Ve(f*r-a*i)]},e}function Ur(t){function n(n){return n=t(n[0]*Ep,n[1]*Ep),n[0]*=Sp,n[1]*=Sp,n}return t=Rr(t[0]*Ep,t[1]*Ep,t.length>2?t[2]*Ep:0),n.invert=function(n){return n=t.invert(n[0]*Ep,n[1]*Ep),n[0]*=Sp,n[1]*=Sp,n},n}function Or(t,n,e,r,i,o){if(e){var u=Pp(n),a=Up(n),c=r*e;null==i?(i=n+r*kp,o=n-c/2):(i=Fr(u,i),o=Fr(u,o),(r>0?i<o:i>o)&&(i+=r*kp));for(var s,f=i;r>0?f>o:f<o;f-=c)s=rr([u,-a*Pp(f),-a*Up(f)]),t.point(s[0],s[1])}}function Fr(t,n){(n=ir(n))[0]-=t,sr(n);var e=Xe(-n[1]);return((-n[2]<0?-e:e)+kp-bp)%kp}function Ir(){var t,n=[];return{point:function(n,e){t.push([n,e])},lineStart:function(){n.push(t=[])},lineEnd:We,rejoin:function(){n.length>1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}}function Yr(t,n){return Ap(t[0]-n[0])<bp&&Ap(t[1]-n[1])<bp}function Br(t,n,e,r){this.x=t,this.z=n,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Hr(t,n,e,r,i){var o,u,a=[],c=[];if(t.forEach(function(t){if(!((n=t.length-1)<=0)){var n,e,r=t[0],u=t[n];if(Yr(r,u)){for(i.lineStart(),o=0;o<n;++o)i.point((r=t[o])[0],r[1]);i.lineEnd()}else a.push(e=new Br(r,t,null,!0)),c.push(e.o=new Br(r,null,e,!1)),a.push(e=new Br(u,t,null,!1)),c.push(e.o=new Br(u,null,e,!0))}}),a.length){for(c.sort(n),jr(a),jr(c),o=0,u=c.length;o<u;++o)c[o].e=e=!e;for(var s,f,l=a[0];;){for(var h=l,p=!0;h.v;)if((h=h.n)===l)return;s=h.z,i.lineStart();do{if(h.v=h.o.v=!0,h.e){if(p)for(o=0,u=s.length;o<u;++o)i.point((f=s[o])[0],f[1]);else r(h.x,h.n.x,1,i);h=h.n}else{if(p)for(s=h.p.z,o=s.length-1;o>=0;--o)i.point((f=s[o])[0],f[1]);else r(h.x,h.p.x,-1,i);h=h.p}s=(h=h.o).z,p=!p}while(!h.v);i.lineEnd()}}}function jr(t){if(n=t.length){for(var n,e,r=0,i=t[0];++r<n;)i.n=e=t[r],e.p=i,i=e;i.n=e=t[0],e.p=i}}function Xr(t,n){var e=n[0],r=n[1],i=[Up(e),-Pp(e),0],o=0,u=0;ud.reset();for(var a=0,c=t.length;a<c;++a)if(f=(s=t[a]).length)for(var s,f,l=s[f-1],h=l[0],p=l[1]/2+Np,d=Up(p),v=Pp(p),g=0;g<f;++g,h=y,d=x,v=b,l=_){var _=s[g],y=_[0],m=_[1]/2+Np,x=Up(m),b=Pp(m),w=y-h,M=w>=0?1:-1,T=M*w,N=T>Mp,k=d*x;if(ud.add(zp(k*M*Up(T),v*b+k*Pp(T))),o+=N?w+M*kp:w,N^h>=e^y>=e){var S=ur(ir(l),ir(_));sr(S);var E=ur(i,S);sr(E);var A=(N^w>=0?-1:1)*Ve(E[2]);(r>A||r===A&&(S[0]||S[1]))&&(u+=N^w>=0?1:-1)}}return(o<-bp||o<bp&&ud<-bp)^1&u}function Vr(t,n,e,r){return function(i){function o(n,e){t(n,e)&&i.point(n,e)}function u(t,n){v.point(t,n)}function a(){x.point=u,v.lineStart()}function c(){x.point=o,v.lineEnd()}function s(t,n){d.push([t,n]),y.point(t,n)}function f(){y.lineStart(),d=[]}function l(){s(d[0][0],d[0][1]),y.lineEnd();var t,n,e,r,o=y.clean(),u=_.result(),a=u.length;if(d.pop(),h.push(d),d=null,a)if(1&o){if(e=u[0],(n=e.length-1)>0){for(m||(i.polygonStart(),m=!0),i.lineStart(),t=0;t<n;++t)i.point((r=e[t])[0],r[1]);i.lineEnd()}}else a>1&&2&o&&u.push(u.pop().concat(u.shift())),p.push(u.filter($r))}var h,p,d,v=n(i),_=Ir(),y=n(_),m=!1,x={point:o,lineStart:a,lineEnd:c,polygonStart:function(){x.point=s,x.lineStart=f,x.lineEnd=l,p=[],h=[]},polygonEnd:function(){x.point=o,x.lineStart=a,x.lineEnd=c,p=g(p);var t=Xr(h,r);p.length?(m||(i.polygonStart(),m=!0),Hr(p,Wr,t,e,i)):t&&(m||(i.polygonStart(),m=!0),i.lineStart(),e(null,null,1,i),i.lineEnd()),m&&(i.polygonEnd(),m=!1),p=h=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}};return x}}function $r(t){return t.length>1}function Wr(t,n){return((t=t.x)[0]<0?t[1]-Tp-bp:Tp-t[1])-((n=n.x)[0]<0?n[1]-Tp-bp:Tp-n[1])}function Zr(t){function n(t,n){return Pp(t)*Pp(n)>i}function e(t,n,e){var r=[1,0,0],o=ur(ir(t),ir(n)),u=or(o,o),a=o[0],c=u-a*a;if(!c)return!e&&t;var s=i*u/c,f=-i*a/c,l=ur(r,o),h=cr(r,s);ar(h,cr(o,f));var p=l,d=or(h,p),v=or(p,p),g=d*d-v*(or(h,h)-1);if(!(g<0)){var _=Fp(g),y=cr(p,(-d-_)/v);if(ar(y,h),y=rr(y),!e)return y;var m,x=t[0],b=n[0],w=t[1],M=n[1];b<x&&(m=x,x=b,b=m);var T=b-x,N=Ap(T-Mp)<bp;if(!N&&M<w&&(m=w,w=M,M=m),N||T<bp?N?w+M>0^y[1]<(Ap(y[0]-x)<bp?w:M):w<=y[1]&&y[1]<=M:T>Mp^(x<=y[0]&&y[0]<=b)){var k=cr(p,(-d+_)/v);return ar(k,h),[y,rr(k)]}}}function r(n,e){var r=u?t:Mp-t,i=0;return n<-r?i|=1:n>r&&(i|=2),e<-r?i|=4:e>r&&(i|=8),i}var i=Pp(t),o=6*Ep,u=i>0,a=Ap(i)>bp;return Vr(n,function(t){var i,o,c,s,f;return{lineStart:function(){s=c=!1,f=1},point:function(l,h){var p,d=[l,h],v=n(l,h),g=u?v?0:r(l,h):v?r(l+(l<0?Mp:-Mp),h):0;if(!i&&(s=c=v)&&t.lineStart(),v!==c&&(!(p=e(i,d))||Yr(i,p)||Yr(d,p))&&(d[0]+=bp,d[1]+=bp,v=n(d[0],d[1])),v!==c)f=0,v?(t.lineStart(),p=e(d,i),t.point(p[0],p[1])):(p=e(i,d),t.point(p[0],p[1]),t.lineEnd()),i=p;else if(a&&i&&u^v){var _;g&o||!(_=e(d,i,!0))||(f=0,u?(t.lineStart(),t.point(_[0][0],_[0][1]),t.point(_[1][0],_[1][1]),t.lineEnd()):(t.point(_[1][0],_[1][1]),t.lineEnd(),t.lineStart(),t.point(_[0][0],_[0][1])))}!v||i&&Yr(i,d)||t.point(d[0],d[1]),i=d,c=v,o=g},lineEnd:function(){c&&t.lineEnd(),i=null},clean:function(){return f|(s&&c)<<1}}},function(n,e,r,i){Or(i,t,o,r,n,e)},u?[0,-t]:[-Mp,t-Mp])}function Gr(t,n,e,r){function i(i,o){return t<=i&&i<=e&&n<=o&&o<=r}function o(i,o,a,s){var f=0,l=0;if(null==i||(f=u(i,a))!==(l=u(o,a))||c(i,o)<0^a>0)do{s.point(0===f||3===f?t:e,f>1?r:n)}while((f=(f+a+4)%4)!==l);else s.point(o[0],o[1])}function u(r,i){return Ap(r[0]-t)<bp?i>0?0:3:Ap(r[0]-e)<bp?i>0?2:1:Ap(r[1]-n)<bp?i>0?1:0:i>0?3:2}function a(t,n){return c(t.x,n.x)}function c(t,n){var e=u(t,1),r=u(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(u){function c(t,n){i(t,n)&&w.point(t,n)}function s(o,u){var a=i(o,u);if(l&&h.push([o,u]),x)p=o,d=u,v=a,x=!1,a&&(w.lineStart(),w.point(o,u));else if(a&&m)w.point(o,u);else{var c=[_=Math.max(sd,Math.min(cd,_)),y=Math.max(sd,Math.min(cd,y))],s=[o=Math.max(sd,Math.min(cd,o)),u=Math.max(sd,Math.min(cd,u))];!function(t,n,e,r,i,o){var u,a=t[0],c=t[1],s=0,f=1,l=n[0]-a,h=n[1]-c;if(u=e-a,l||!(u>0)){if(u/=l,l<0){if(u<s)return;u<f&&(f=u)}else if(l>0){if(u>f)return;u>s&&(s=u)}if(u=i-a,l||!(u<0)){if(u/=l,l<0){if(u>f)return;u>s&&(s=u)}else if(l>0){if(u<s)return;u<f&&(f=u)}if(u=r-c,h||!(u>0)){if(u/=h,h<0){if(u<s)return;u<f&&(f=u)}else if(h>0){if(u>f)return;u>s&&(s=u)}if(u=o-c,h||!(u<0)){if(u/=h,h<0){if(u>f)return;u>s&&(s=u)}else if(h>0){if(u<s)return;u<f&&(f=u)}return s>0&&(t[0]=a+s*l,t[1]=c+s*h),f<1&&(n[0]=a+f*l,n[1]=c+f*h),!0}}}}}(c,s,t,n,e,r)?a&&(w.lineStart(),w.point(o,u),b=!1):(m||(w.lineStart(),w.point(c[0],c[1])),w.point(s[0],s[1]),a||w.lineEnd(),b=!1)}_=o,y=u,m=a}var f,l,h,p,d,v,_,y,m,x,b,w=u,M=Ir(),T={point:c,lineStart:function(){T.point=s,l&&l.push(h=[]),x=!0,m=!1,_=y=NaN},lineEnd:function(){f&&(s(p,d),v&&m&&M.rejoin(),f.push(M.result())),T.point=c,m&&w.lineEnd()},polygonStart:function(){w=M,f=[],l=[],b=!0},polygonEnd:function(){var n=function(){for(var n=0,e=0,i=l.length;e<i;++e)for(var o,u,a=l[e],c=1,s=a.length,f=a[0],h=f[0],p=f[1];c<s;++c)o=h,u=p,h=(f=a[c])[0],p=f[1],u<=r?p>r&&(h-o)*(r-u)>(p-u)*(t-o)&&++n:p<=r&&(h-o)*(r-u)<(p-u)*(t-o)&&--n;return n}(),e=b&&n,i=(f=g(f)).length;(e||i)&&(u.polygonStart(),e&&(u.lineStart(),o(null,null,1,u),u.lineEnd()),i&&Hr(f,a,n,o,u),u.polygonEnd()),w=u,f=l=h=null}};return T}}function Qr(){ld.point=ld.lineEnd=We}function Jr(t,n){Zp=t*=Ep,Gp=Up(n*=Ep),Qp=Pp(n),ld.point=Kr}function Kr(t,n){t*=Ep;var e=Up(n*=Ep),r=Pp(n),i=Ap(t-Zp),o=Pp(i),u=r*Up(i),a=Qp*e-Gp*r*o,c=Gp*e+Qp*r*o;fd.add(zp(Fp(u*u+a*a),c)),Zp=t,Gp=e,Qp=r}function ti(t){return fd.reset(),Je(t,ld),+fd}function ni(t,n){return hd[0]=t,hd[1]=n,ti(pd)}function ei(t,n){return!(!t||!vd.hasOwnProperty(t.type))&&vd[t.type](t,n)}function ri(t,n){return 0===ni(t,n)}function ii(t,n){var e=ni(t[0],t[1]);return ni(t[0],n)+ni(n,t[1])<=e+bp}function oi(t,n){return!!Xr(t.map(ui),ai(n))}function ui(t){return(t=t.map(ai)).pop(),t}function ai(t){return[t[0]*Ep,t[1]*Ep]}function ci(t,n,e){var r=f(t,n-bp,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function si(t,n,e){var r=f(t,n-bp,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function fi(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return f(Rp(o/_)*_,i,_).map(p).concat(f(Rp(s/y)*y,c,y).map(d)).concat(f(Rp(r/v)*v,e,v).filter(function(t){return Ap(t%_)>bp}).map(l)).concat(f(Rp(a/g)*g,u,g).filter(function(t){return Ap(t%y)>bp}).map(h))}var e,r,i,o,u,a,c,s,l,h,p,d,v=10,g=v,_=90,y=360,m=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[p(o).concat(d(c).slice(1),p(i).reverse().slice(1),d(s).reverse().slice(1))]}},t.extent=function(n){return arguments.length?t.extentMajor(n).extentMinor(n):t.extentMinor()},t.extentMajor=function(n){return arguments.length?(o=+n[0][0],i=+n[1][0],s=+n[0][1],c=+n[1][1],o>i&&(n=o,o=i,i=n),s>c&&(n=s,s=c,c=n),t.precision(m)):[[o,s],[i,c]]},t.extentMinor=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],a=+n[0][1],u=+n[1][1],r>e&&(n=r,r=e,e=n),a>u&&(n=a,a=u,u=n),t.precision(m)):[[r,a],[e,u]]},t.step=function(n){return arguments.length?t.stepMajor(n).stepMinor(n):t.stepMinor()},t.stepMajor=function(n){return arguments.length?(_=+n[0],y=+n[1],t):[_,y]},t.stepMinor=function(n){return arguments.length?(v=+n[0],g=+n[1],t):[v,g]},t.precision=function(n){return arguments.length?(m=+n,l=ci(a,u,90),h=si(r,e,m),p=ci(s,c,90),d=si(o,i,m),t):m},t.extentMajor([[-180,-90+bp],[180,90-bp]]).extentMinor([[-180,-80-bp],[180,80+bp]])}function li(t){return t}function hi(){yd.point=pi}function pi(t,n){yd.point=di,Jp=td=t,Kp=nd=n}function di(t,n){_d.add(nd*t-td*n),td=t,nd=n}function vi(){di(Jp,Kp)}function gi(t,n){Td+=t,Nd+=n,++kd}function _i(){Rd.point=yi}function yi(t,n){Rd.point=mi,gi(id=t,od=n)}function mi(t,n){var e=t-id,r=n-od,i=Fp(e*e+r*r);Sd+=i*(id+t)/2,Ed+=i*(od+n)/2,Ad+=i,gi(id=t,od=n)}function xi(){Rd.point=gi}function bi(){Rd.point=Mi}function wi(){Ti(ed,rd)}function Mi(t,n){Rd.point=Ti,gi(ed=id=t,rd=od=n)}function Ti(t,n){var e=t-id,r=n-od,i=Fp(e*e+r*r);Sd+=i*(id+t)/2,Ed+=i*(od+n)/2,Ad+=i,Cd+=(i=od*t-id*n)*(id+t),zd+=i*(od+n),Pd+=3*i,gi(id=t,od=n)}function Ni(t){this._context=t}function ki(t,n){Id.point=Si,qd=Ud=t,Dd=Od=n}function Si(t,n){Ud-=t,Od-=n,Fd.add(Fp(Ud*Ud+Od*Od)),Ud=t,Od=n}function Ei(){this._string=[]}function Ai(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Ci(t){return function(n){var e=new zi;for(var r in t)e[r]=t[r];return e.stream=n,e}}function zi(){}function Pi(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Je(e,t.stream(Md)),n(Md.result()),null!=r&&t.clipExtent(r),t}function Ri(t,n,e){return Pi(t,function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),u=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,a=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([u,a])},e)}function Li(t,n,e){return Ri(t,[[0,0],n],e)}function qi(t,n,e){return Pi(t,function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,u=-i*e[0][1];t.scale(150*i).translate([o,u])},e)}function Di(t,n,e){return Pi(t,function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],u=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,u])},e)}function Ui(t,n){return+n?function(t,n){function e(r,i,o,u,a,c,s,f,l,h,p,d,v,g){var _=s-r,y=f-i,m=_*_+y*y;if(m>4*n&&v--){var x=u+h,b=a+p,w=c+d,M=Fp(x*x+b*b+w*w),T=Ve(w/=M),N=Ap(Ap(w)-1)<bp||Ap(o-l)<bp?(o+l)/2:zp(b,x),k=t(N,T),S=k[0],E=k[1],A=S-r,C=E-i,z=y*A-_*C;(z*z/m>n||Ap((_*A+y*C)/m-.5)>.3||u*h+a*p+c*d<Bd)&&(e(r,i,o,u,a,c,S,E,N,x/=M,b/=M,w,v,g),g.point(S,E),e(S,E,N,x,b,w,s,f,l,h,p,d,v,g))}}return function(n){function r(e,r){e=t(e,r),n.point(e[0],e[1])}function i(){_=NaN,w.point=o,n.lineStart()}function o(r,i){var o=ir([r,i]),u=t(r,i);e(_,y,g,m,x,b,_=u[0],y=u[1],g=r,m=o[0],x=o[1],b=o[2],Yd,n),n.point(_,y)}function u(){w.point=r,n.lineEnd()}function a(){i(),w.point=c,w.lineEnd=s}function c(t,n){o(f=t,n),l=_,h=y,p=m,d=x,v=b,w.point=o}function s(){e(_,y,g,m,x,b,l,h,f,p,d,v,Yd,n),w.lineEnd=u,u()}var f,l,h,p,d,v,g,_,y,m,x,b,w={point:r,lineStart:i,lineEnd:u,polygonStart:function(){n.polygonStart(),w.lineStart=a},polygonEnd:function(){n.polygonEnd(),w.lineStart=i}};return w}}(t,n):function(t){return Ci({point:function(n,e){n=t(n,e),this.stream.point(n[0],n[1])}})}(t)}function Oi(t){return Fi(function(){return t})()}function Fi(t){function n(t){return t=s(t[0]*Ep,t[1]*Ep),[t[0]*v+u,a-t[1]*v]}function e(t,n){return t=o(t,n),[t[0]*v+u,a-t[1]*v]}function r(){s=zr(c=Rr(x,b,w),o);var t=o(y,m);return u=g-t[0]*v,a=_+t[1]*v,i()}function i(){return p=d=null,n}var o,u,a,c,s,f,l,h,p,d,v=150,g=480,_=250,y=0,m=0,x=0,b=0,w=0,M=null,T=ad,N=null,k=li,S=.5,E=Ui(e,S);return n.stream=function(t){return p&&d===t?p:p=Hd(function(t){return Ci({point:function(n,e){var r=t(n,e);return this.stream.point(r[0],r[1])}})}(c)(T(E(k(d=t)))))},n.preclip=function(t){return arguments.length?(T=t,M=void 0,i()):T},n.postclip=function(t){return arguments.length?(k=t,N=f=l=h=null,i()):k},n.clipAngle=function(t){return arguments.length?(T=+t?Zr(M=t*Ep):(M=null,ad),i()):M*Sp},n.clipExtent=function(t){return arguments.length?(k=null==t?(N=f=l=h=null,li):Gr(N=+t[0][0],f=+t[0][1],l=+t[1][0],h=+t[1][1]),i()):null==N?null:[[N,f],[l,h]]},n.scale=function(t){return arguments.length?(v=+t,r()):v},n.translate=function(t){return arguments.length?(g=+t[0],_=+t[1],r()):[g,_]},n.center=function(t){return arguments.length?(y=t[0]%360*Ep,m=t[1]%360*Ep,r()):[y*Sp,m*Sp]},n.rotate=function(t){return arguments.length?(x=t[0]%360*Ep,b=t[1]%360*Ep,w=t.length>2?t[2]%360*Ep:0,r()):[x*Sp,b*Sp,w*Sp]},n.precision=function(t){return arguments.length?(E=Ui(e,S=t*t),i()):Fp(S)},n.fitExtent=function(t,e){return Ri(n,t,e)},n.fitSize=function(t,e){return Li(n,t,e)},n.fitWidth=function(t,e){return qi(n,t,e)},n.fitHeight=function(t,e){return Di(n,t,e)},function(){return o=t.apply(this,arguments),n.invert=o.invert&&function(t){return(t=s.invert((t[0]-u)/v,(a-t[1])/v))&&[t[0]*Sp,t[1]*Sp]},r()}}function Ii(t){var n=0,e=Mp/3,r=Fi(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*Ep,e=t[1]*Ep):[n*Sp,e*Sp]},i}function Yi(t,n){function e(t,n){var e=Fp(o-2*i*Up(n))/i;return[e*Up(t*=i),u-e*Pp(t)]}var r=Up(t),i=(r+Up(n))/2;if(Ap(i)<bp)return function(t){function n(t,n){return[t*e,Up(n)/e]}var e=Pp(t);return n.invert=function(t,n){return[t/e,Ve(n*e)]},n}(t);var o=1+r*(2*i-r),u=Fp(o)/i;return e.invert=function(t,n){var e=u-n;return[zp(t,Ap(e))/i*Op(e),Ve((o-(t*t+e*e)*i*i)/(2*i))]},e}function Bi(){return Ii(Yi).scale(155.424).center([0,33.6442])}function Hi(){return Bi().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])}function ji(t){return function(n,e){var r=Pp(n),i=Pp(e),o=t(r*i);return[o*i*Up(n),o*Up(e)]}}function Xi(t){return function(n,e){var r=Fp(n*n+e*e),i=t(r),o=Up(i),u=Pp(i);return[zp(n*o,r*u),Ve(r&&e*o/r)]}}function Vi(t,n){return[t,qp(Ip((Tp+n)/2))]}function $i(t){function n(){var n=Mp*a(),u=o(Ur(o.rotate()).invert([0,0]));return s(null==f?[[u[0]-n,u[1]-n],[u[0]+n,u[1]+n]]:t===Vi?[[Math.max(u[0]-n,f),e],[Math.min(u[0]+n,r),i]]:[[f,Math.max(u[1]-n,e)],[r,Math.min(u[1]+n,i)]])}var e,r,i,o=Oi(t),u=o.center,a=o.scale,c=o.translate,s=o.clipExtent,f=null;return o.scale=function(t){return arguments.length?(a(t),n()):a()},o.translate=function(t){return arguments.length?(c(t),n()):c()},o.center=function(t){return arguments.length?(u(t),n()):u()},o.clipExtent=function(t){return arguments.length?(null==t?f=e=r=i=null:(f=+t[0][0],e=+t[0][1],r=+t[1][0],i=+t[1][1]),n()):null==f?null:[[f,e],[r,i]]},n()}function Wi(t){return Ip((Tp+t)/2)}function Zi(t,n){function e(t,n){o>0?n<-Tp+bp&&(n=-Tp+bp):n>Tp-bp&&(n=Tp-bp);var e=o/Dp(Wi(n),i);return[e*Up(i*t),o-e*Pp(i*t)]}var r=Pp(t),i=t===n?Up(t):qp(r/Pp(n))/qp(Wi(n)/Wi(t)),o=r*Dp(Wi(t),i)/i;return i?(e.invert=function(t,n){var e=o-n,r=Op(i)*Fp(t*t+e*e);return[zp(t,Ap(e))/i*Op(e),2*Cp(Dp(o/r,1/i))-Tp]},e):Vi}function Gi(t,n){return[t,n]}function Qi(t,n){function e(t,n){var e=o-n,r=i*t;return[e*Up(r),o-e*Pp(r)]}var r=Pp(t),i=t===n?Up(t):(r-Pp(n))/(n-t),o=r/i+t;return Ap(i)<bp?Gi:(e.invert=function(t,n){var e=o-n;return[zp(t,Ap(e))/i*Op(e),o-Op(i)*Fp(t*t+e*e)]},e)}function Ji(t,n){var e=Pp(n),r=Pp(t)*e;return[e*Up(t)/r,Up(n)/r]}function Ki(t,n,e,r){return 1===t&&1===n&&0===e&&0===r?li:Ci({point:function(i,o){this.stream.point(i*t+e,o*n+r)}})}function to(t,n){var e=n*n,r=e*e;return[t*(.8707-.131979*e+r*(r*(.003971*e-.001529*r)-.013791)),n*(1.007226+e*(.015085+r*(.028874*e-.044475-.005916*r)))]}function no(t,n){return[Pp(n)*Up(t),Up(n)]}function eo(t,n){var e=Pp(n),r=1+Pp(t)*e;return[e*Up(t)/r,Up(n)/r]}function ro(t,n){return[qp(Ip((Tp+n)/2)),-t]}function io(t,n){return t.parent===n.parent?1:2}function oo(t,n){return t+n.x}function uo(t,n){return Math.max(t,n.y)}function ao(t){var n=0,e=t.children,r=e&&e.length;if(r)for(;--r>=0;)n+=e[r].value;else n=1;t.value=n}function co(t,n){var e,r,i,o,u,a=new ho(t),c=+t.value&&(a.value=t.value),s=[a];for(null==n&&(n=so);e=s.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(u=i.length))for(e.children=new Array(u),o=u-1;o>=0;--o)s.push(r=e.children[o]=new ho(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(lo)}function so(t){return t.children}function fo(t){t.data=t.data.data}function lo(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function ho(t){this.data=t,this.depth=this.height=0,this.parent=null}function po(t){for(var n,e,r=0,i=(t=function(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}(Vd.call(t))).length,o=[];r<i;)n=t[r],e&&go(e,n)?++r:(e=function(t){switch(t.length){case 1:return function(t){return{x:t.x,y:t.y,r:t.r}}(t[0]);case 2:return yo(t[0],t[1]);case 3:return mo(t[0],t[1],t[2])}}(o=function(t,n){var e,r;if(_o(n,t))return[n];for(e=0;e<t.length;++e)if(vo(n,t[e])&&_o(yo(t[e],n),t))return[t[e],n];for(e=0;e<t.length-1;++e)for(r=e+1;r<t.length;++r)if(vo(yo(t[e],t[r]),n)&&vo(yo(t[e],n),t[r])&&vo(yo(t[r],n),t[e])&&_o(mo(t[e],t[r],n),t))return[t[e],t[r],n];throw new Error}(o,n)),r=0);return e}function vo(t,n){var e=t.r-n.r,r=n.x-t.x,i=n.y-t.y;return e<0||e*e<r*r+i*i}function go(t,n){var e=t.r-n.r+1e-6,r=n.x-t.x,i=n.y-t.y;return e>0&&e*e>r*r+i*i}function _o(t,n){for(var e=0;e<n.length;++e)if(!go(t,n[e]))return!1;return!0}function yo(t,n){var e=t.x,r=t.y,i=t.r,o=n.x,u=n.y,a=n.r,c=o-e,s=u-r,f=a-i,l=Math.sqrt(c*c+s*s);return{x:(e+o+c/l*f)/2,y:(r+u+s/l*f)/2,r:(l+i+a)/2}}function mo(t,n,e){var r=t.x,i=t.y,o=t.r,u=n.x,a=n.y,c=n.r,s=e.x,f=e.y,l=e.r,h=r-u,p=r-s,d=i-a,v=i-f,g=c-o,_=l-o,y=r*r+i*i-o*o,m=y-u*u-a*a+c*c,x=y-s*s-f*f+l*l,b=p*d-h*v,w=(d*x-v*m)/(2*b)-r,M=(v*g-d*_)/b,T=(p*m-h*x)/(2*b)-i,N=(h*_-p*g)/b,k=M*M+N*N-1,S=2*(o+w*M+T*N),E=w*w+T*T-o*o,A=-(k?(S+Math.sqrt(S*S-4*k*E))/(2*k):E/S);return{x:r+w+M*A,y:i+T+N*A,r:A}}function xo(t,n,e){var r=t.x,i=t.y,o=n.r+e.r,u=t.r+e.r,a=n.x-r,c=n.y-i,s=a*a+c*c;if(s){var f=.5+((u*=u)-(o*=o))/(2*s),l=Math.sqrt(Math.max(0,2*o*(u+s)-(u-=s)*u-o*o))/(2*s);e.x=r+f*a+l*c,e.y=i+f*c-l*a}else e.x=r+u,e.y=i}function bo(t,n){var e=n.x-t.x,r=n.y-t.y,i=t.r+n.r;return i*i-1e-6>e*e+r*r}function wo(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function Mo(t){this._=t,this.next=null,this.previous=null}function To(t){if(!(i=t.length))return 0;var n,e,r,i,o,u,a,c,s,f,l;if(n=t[0],n.x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;xo(e,n,r=t[2]),n=new Mo(n),e=new Mo(e),r=new Mo(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(a=3;a<i;++a){xo(n._,e._,r=t[a]),r=new Mo(r),c=e.next,s=n.previous,f=e._.r,l=n._.r;do{if(f<=l){if(bo(c._,r._)){e=c,n.next=e,e.previous=n,--a;continue t}f+=c._.r,c=c.next}else{if(bo(s._,r._)){(n=s).next=e,e.previous=n,--a;continue t}l+=s._.r,s=s.previous}}while(c!==s.next);for(r.previous=n,r.next=e,n.next=e.previous=e=r,o=wo(n);(r=r.next)!==e;)(u=wo(r))<o&&(n=r,o=u);e=n.next}for(n=[e._],r=e;(r=r.next)!==e;)n.push(r._);for(r=po(n),a=0;a<i;++a)n=t[a],n.x-=r.x,n.y-=r.y;return r.r}function No(t){if("function"!=typeof t)throw new Error;return t}function ko(){return 0}function So(t){return function(){return t}}function Eo(t){return Math.sqrt(t.value)}function Ao(t){return function(n){n.children||(n.r=Math.max(0,+t(n)||0))}}function Co(t,n){return function(e){if(r=e.children){var r,i,o,u=r.length,a=t(e)*n||0;if(a)for(i=0;i<u;++i)r[i].r+=a;if(o=To(r),a)for(i=0;i<u;++i)r[i].r-=a;e.r=o+a}}}function zo(t){return function(n){var e=n.parent;n.r*=t,e&&(n.x=e.x+t*n.x,n.y=e.y+t*n.y)}}function Po(t){t.x0=Math.round(t.x0),t.y0=Math.round(t.y0),t.x1=Math.round(t.x1),t.y1=Math.round(t.y1)}function Ro(t,n,e,r,i){for(var o,u=t.children,a=-1,c=u.length,s=t.value&&(r-n)/t.value;++a<c;)(o=u[a]).y0=e,o.y1=i,o.x0=n,o.x1=n+=o.value*s}function Lo(t){return t.id}function qo(t){return t.parentId}function Do(t,n){return t.parent===n.parent?1:2}function Uo(t){var n=t.children;return n?n[0]:t.t}function Oo(t){var n=t.children;return n?n[n.length-1]:t.t}function Fo(t,n,e){var r=e/(n.i-t.i);n.c-=r,n.s+=e,t.c+=r,n.z+=e,n.m+=e}function Io(t,n,e){return t.a.parent===n.parent?t.a:e}function Yo(t,n){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=n}function Bo(t,n,e,r,i){for(var o,u=t.children,a=-1,c=u.length,s=t.value&&(i-e)/t.value;++a<c;)(o=u[a]).x0=n,o.x1=r,o.y0=e,o.y1=e+=o.value*s}function Ho(t,n,e,r,i,o){for(var u,a,c,s,f,l,h,p,d,v,g,_=[],y=n.children,m=0,x=0,b=y.length,w=n.value;m<b;){c=i-e,s=o-r;do{f=y[x++].value}while(!f&&x<b);for(l=h=f,g=f*f*(v=Math.max(s/c,c/s)/(w*t)),d=Math.max(h/g,g/l);x<b;++x){if(f+=a=y[x].value,a<l&&(l=a),a>h&&(h=a),g=f*f*v,(p=Math.max(h/g,g/l))>d){f-=a;break}d=p}_.push(u={value:f,dice:c<s,children:y.slice(m,x)}),u.dice?Ro(u,e,r,i,w?r+=s*f/w:o):Bo(u,e,r,w?e+=c*f/w:i,o),w-=f,m=x}return _}function jo(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])}function Xo(t,n){return t[0]-n[0]||t[1]-n[1]}function Vo(t){for(var n=t.length,e=[0,1],r=2,i=2;i<n;++i){for(;r>1&&jo(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function $o(t){this._size=t,this._call=this._error=null,this._tasks=[],this._data=[],this._waiting=this._active=this._ended=this._start=0}function Wo(t){if(!t._start)try{(function(t){for(;t._start=t._waiting&&t._active<t._size;){var n=t._ended+t._active,e=t._tasks[n],r=e.length-1,i=e[r];e[r]=function(t,n){return function(e,r){t._tasks[n]&&(--t._active,++t._ended,t._tasks[n]=null,null==t._error&&(null!=e?Zo(t,e):(t._data[n]=r,t._waiting?Wo(t):Go(t))))}}(t,n),--t._waiting,++t._active,e=i.apply(null,e),t._tasks[n]&&(t._tasks[n]=e||tv)}})(t)}catch(n){if(t._tasks[t._ended+t._active-1])Zo(t,n);else if(!t._data)throw n}}function Zo(t,n){var e,r=t._tasks.length;for(t._error=n,t._data=void 0,t._waiting=NaN;--r>=0;)if((e=t._tasks[r])&&(t._tasks[r]=null,e.abort))try{e.abort()}catch(n){}t._active=NaN,Go(t)}function Go(t){if(!t._active&&t._call){var n=t._data;t._data=void 0,t._call(t._error,n)}}function Qo(t){if(null==t)t=1/0;else if(!((t=+t)>=1))throw new Error("invalid concurrency");return new $o(t)}function Jo(){return Math.random()}function Ko(t,n){function e(t){var n,e=s.status;if(!e&&function(t){var n=t.responseType;return n&&"text"!==n?t.response:t.responseText}(s)||e>=200&&e<300||304===e){if(o)try{n=o.call(r,s)}catch(t){return void a.call("error",r,t)}else n=s;a.call("load",r,n)}else a.call("error",r,t)}var r,i,o,u,a=N("beforesend","progress","load","error"),c=ae(),s=new XMLHttpRequest,f=null,l=null,h=0;if("undefined"==typeof XDomainRequest||"withCredentials"in s||!/^(http(s)?:)?\/\//.test(t)||(s=new XDomainRequest),"onload"in s?s.onload=s.onerror=s.ontimeout=e:s.onreadystatechange=function(t){s.readyState>3&&e(t)},s.onprogress=function(t){a.call("progress",r,t)},r={header:function(t,n){return t=(t+"").toLowerCase(),arguments.length<2?c.get(t):(null==n?c.remove(t):c.set(t,n+""),r)},mimeType:function(t){return arguments.length?(i=null==t?null:t+"",r):i},responseType:function(t){return arguments.length?(u=t,r):u},timeout:function(t){return arguments.length?(h=+t,r):h},user:function(t){return arguments.length<1?f:(f=null==t?null:t+"",r)},password:function(t){return arguments.length<1?l:(l=null==t?null:t+"",r)},response:function(t){return o=t,r},get:function(t,n){return r.send("GET",t,n)},post:function(t,n){return r.send("POST",t,n)},send:function(n,e,o){return s.open(n,t,!0,f,l),null==i||c.has("accept")||c.set("accept",i+",*/*"),s.setRequestHeader&&c.each(function(t,n){s.setRequestHeader(n,t)}),null!=i&&s.overrideMimeType&&s.overrideMimeType(i),null!=u&&(s.responseType=u),h>0&&(s.timeout=h),null==o&&"function"==typeof e&&(o=e,e=null),null!=o&&1===o.length&&(o=function(t){return function(n,e){t(null==n?e:null)}}(o)),null!=o&&r.on("error",o).on("load",function(t){o(null,t)}),a.call("beforesend",r,s),s.send(null==e?null:e),r},abort:function(){return s.abort(),r},on:function(){var t=a.on.apply(a,arguments);return t===a?r:t}},null!=n){if("function"!=typeof n)throw new Error("invalid callback: "+n);return r.get(n)}return r}function tu(t,n){return function(e,r){var i=Ko(e).mimeType(t).response(n);if(null!=r){if("function"!=typeof r)throw new Error("invalid callback: "+r);return i.get(r)}return i}}function nu(t,n){return function(e,r,i){arguments.length<3&&(i=r,r=null);var o=Ko(e).mimeType(t);return o.row=function(t){return arguments.length?o.response(function(t,n){return function(e){return t(e.responseText,n)}}(n,r=t)):r},o.row(r),i?o.get(i):o}}function eu(t){function n(n){var o=n+"",u=e.get(o);if(!u){if(i!==gv)return i;e.set(o,u=r.push(n))}return t[(u-1)%t.length]}var e=ae(),r=[],i=gv;return t=null==t?[]:vv.call(t),n.domain=function(t){if(!arguments.length)return r.slice();r=[],e=ae();for(var i,o,u=-1,a=t.length;++u<a;)e.has(o=(i=t[u])+"")||e.set(o,r.push(i));return n},n.range=function(e){return arguments.length?(t=vv.call(e),n):t.slice()},n.unknown=function(t){return arguments.length?(i=t,n):i},n.copy=function(){return eu().domain(r).range(t).unknown(i)},n}function ru(){function t(){var t=i().length,r=u[1]<u[0],h=u[r-0],p=u[1-r];n=(p-h)/Math.max(1,t-c+2*s),a&&(n=Math.floor(n)),h+=(p-h-n*(t-c))*l,e=n*(1-c),a&&(h=Math.round(h),e=Math.round(e));var d=f(t).map(function(t){return h+n*t});return o(r?d.reverse():d)}var n,e,r=eu().unknown(void 0),i=r.domain,o=r.range,u=[0,1],a=!1,c=0,s=0,l=.5;return delete r.unknown,r.domain=function(n){return arguments.length?(i(n),t()):i()},r.range=function(n){return arguments.length?(u=[+n[0],+n[1]],t()):u.slice()},r.rangeRound=function(n){return u=[+n[0],+n[1]],a=!0,t()},r.bandwidth=function(){return e},r.step=function(){return n},r.round=function(n){return arguments.length?(a=!!n,t()):a},r.padding=function(n){return arguments.length?(c=s=Math.max(0,Math.min(1,n)),t()):c},r.paddingInner=function(n){return arguments.length?(c=Math.max(0,Math.min(1,n)),t()):c},r.paddingOuter=function(n){return arguments.length?(s=Math.max(0,Math.min(1,n)),t()):s},r.align=function(n){return arguments.length?(l=Math.max(0,Math.min(1,n)),t()):l},r.copy=function(){return ru().domain(i()).range(u).round(a).paddingInner(c).paddingOuter(s).align(l)},t()}function iu(t){var n=t.copy;return t.padding=t.paddingOuter,delete t.paddingInner,delete t.paddingOuter,t.copy=function(){return iu(n())},t}function ou(t){return function(){return t}}function uu(t){return+t}function au(t,n){return(n-=t=+t)?function(e){return(e-t)/n}:ou(n)}function cu(t,n,e,r){var i=t[0],o=t[1],u=n[0],a=n[1];return o<i?(i=e(o,i),u=r(a,u)):(i=e(i,o),u=r(u,a)),function(t){return u(i(t))}}function su(t,n,e,r){var i=Math.min(t.length,n.length)-1,o=new Array(i),u=new Array(i),a=-1;for(t[i]<t[0]&&(t=t.slice().reverse(),n=n.slice().reverse());++a<i;)o[a]=e(t[a],t[a+1]),u[a]=r(n[a],n[a+1]);return function(n){var e=Ds(t,n,1,i)-1;return u[e](o[e](n))}}function fu(t,n){return n.domain(t.domain()).range(t.range()).interpolate(t.interpolate()).clamp(t.clamp())}function lu(t,n){function e(){return i=Math.min(a.length,c.length)>2?su:cu,o=u=null,r}function r(n){return(o||(o=i(a,c,f?function(t){return function(n,e){var r=t(n=+n,e=+e);return function(t){return t<=n?0:t>=e?1:r(t)}}}(t):t,s)))(+n)}var i,o,u,a=_v,c=_v,s=cn,f=!1;return r.invert=function(t){return(u||(u=i(c,a,au,f?function(t){return function(n,e){var r=t(n=+n,e=+e);return function(t){return t<=0?n:t>=1?e:r(t)}}}(n):n)))(+t)},r.domain=function(t){return arguments.length?(a=dv.call(t,uu),e()):a.slice()},r.range=function(t){return arguments.length?(c=vv.call(t),e()):c.slice()},r.rangeRound=function(t){return c=vv.call(t),s=sn,e()},r.clamp=function(t){return arguments.length?(f=!!t,e()):f},r.interpolate=function(t){return arguments.length?(s=t,e()):s},e()}function hu(n){var e=n.domain;return n.ticks=function(t){var n=e();return l(n[0],n[n.length-1],null==t?10:t)},n.tickFormat=function(n,r){return function(n,e,r){var i,o=n[0],u=n[n.length-1],a=p(o,u,null==e?10:e);switch((r=Le(null==r?",f":r)).type){case"s":var c=Math.max(Math.abs(o),Math.abs(u));return null!=r.precision||isNaN(i=Ie(a,c))||(r.precision=i),t.formatPrefix(r,c);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=Ye(a,Math.max(Math.abs(o),Math.abs(u))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=Fe(a))||(r.precision=i-2*("%"===r.type))}return t.format(r)}(e(),n,r)},n.nice=function(t){null==t&&(t=10);var r,i=e(),o=0,u=i.length-1,a=i[o],c=i[u];return c<a&&(r=a,a=c,c=r,r=o,o=u,u=r),(r=h(a,c,t))>0?r=h(a=Math.floor(a/r)*r,c=Math.ceil(c/r)*r,t):r<0&&(r=h(a=Math.ceil(a*r)/r,c=Math.floor(c*r)/r,t)),r>0?(i[o]=Math.floor(a/r)*r,i[u]=Math.ceil(c/r)*r,e(i)):r<0&&(i[o]=Math.ceil(a*r)/r,i[u]=Math.floor(c*r)/r,e(i)),n},n}function pu(){var t=lu(au,on);return t.copy=function(){return fu(t,pu())},hu(t)}function du(){function t(t){return+t}var n=[0,1];return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=dv.call(e,uu),t):n.slice()},t.copy=function(){return du().domain(n)},hu(t)}function vu(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],u=t[i];return u<o&&(e=r,r=i,i=e,e=o,o=u,u=e),t[r]=n.floor(o),t[i]=n.ceil(u),t}function gu(t,n){return(n=Math.log(n/t))?function(e){return Math.log(e/t)/n}:ou(n)}function _u(t,n){return t<0?function(e){return-Math.pow(-n,e)*Math.pow(-t,1-e)}:function(e){return Math.pow(n,e)*Math.pow(t,1-e)}}function yu(t){return isFinite(t)?+("1e"+t):t<0?0:t}function mu(t){return 10===t?yu:t===Math.E?Math.exp:function(n){return Math.pow(t,n)}}function xu(t){return t===Math.E?Math.log:10===t&&Math.log10||2===t&&Math.log2||(t=Math.log(t),function(n){return Math.log(n)/t})}function bu(t){return function(n){return-t(-n)}}function wu(){function n(){return o=xu(i),u=mu(i),r()[0]<0&&(o=bu(o),u=bu(u)),e}var e=lu(gu,_u).domain([1,10]),r=e.domain,i=10,o=xu(10),u=mu(10);return e.base=function(t){return arguments.length?(i=+t,n()):i},e.domain=function(t){return arguments.length?(r(t),n()):r()},e.ticks=function(t){var n,e=r(),a=e[0],c=e[e.length-1];(n=c<a)&&(p=a,a=c,c=p);var s,f,h,p=o(a),d=o(c),v=null==t?10:+t,g=[];if(!(i%1)&&d-p<v){if(p=Math.round(p)-1,d=Math.round(d)+1,a>0){for(;p<d;++p)for(f=1,s=u(p);f<i;++f)if(!((h=s*f)<a)){if(h>c)break;g.push(h)}}else for(;p<d;++p)for(f=i-1,s=u(p);f>=1;--f)if(!((h=s*f)<a)){if(h>c)break;g.push(h)}}else g=l(p,d,Math.min(d-p,v)).map(u);return n?g.reverse():g},e.tickFormat=function(n,r){if(null==r&&(r=10===i?".0e":","),"function"!=typeof r&&(r=t.format(r)),n===1/0)return r;null==n&&(n=10);var a=Math.max(1,i*n/e.ticks().length);return function(t){var n=t/u(Math.round(o(t)));return n*i<i-.5&&(n*=i),n<=a?r(t):""}},e.nice=function(){return r(vu(r(),{floor:function(t){return u(Math.floor(o(t)))},ceil:function(t){return u(Math.ceil(o(t)))}}))},e.copy=function(){return fu(e,wu().base(i))},e}function Mu(t,n){return t<0?-Math.pow(-t,n):Math.pow(t,n)}function Tu(){var t=1,n=lu(function(n,e){return(e=Mu(e,t)-(n=Mu(n,t)))?function(r){return(Mu(r,t)-n)/e}:ou(e)},function(n,e){return e=Mu(e,t)-(n=Mu(n,t)),function(r){return Mu(n+e*r,1/t)}}),e=n.domain;return n.exponent=function(n){return arguments.length?(t=+n,e(e())):t},n.copy=function(){return fu(n,Tu().exponent(t))},hu(n)}function Nu(){function t(){var t=0,n=Math.max(1,i.length);for(o=new Array(n-1);++t<n;)o[t-1]=v(r,t/n);return e}function e(t){if(!isNaN(t=+t))return i[Ds(o,t)]}var r=[],i=[],o=[];return e.invertExtent=function(t){var n=i.indexOf(t);return n<0?[NaN,NaN]:[n>0?o[n-1]:r[0],n<o.length?o[n]:r[r.length-1]]},e.domain=function(e){if(!arguments.length)return r.slice();r=[];for(var i,o=0,u=e.length;o<u;++o)null==(i=e[o])||isNaN(i=+i)||r.push(i);return r.sort(n),t()},e.range=function(n){return arguments.length?(i=vv.call(n),t()):i.slice()},e.quantiles=function(){return o.slice()},e.copy=function(){return Nu().domain(r).range(i)},e}function ku(){function t(t){if(t<=t)return u[Ds(o,t,0,i)]}function n(){var n=-1;for(o=new Array(i);++n<i;)o[n]=((n+1)*r-(n-i)*e)/(i+1);return t}var e=0,r=1,i=1,o=[.5],u=[0,1];return t.domain=function(t){return arguments.length?(e=+t[0],r=+t[1],n()):[e,r]},t.range=function(t){return arguments.length?(i=(u=vv.call(t)).length-1,n()):u.slice()},t.invertExtent=function(t){var n=u.indexOf(t);return n<0?[NaN,NaN]:n<1?[e,o[0]]:n>=i?[o[i-1],r]:[o[n-1],o[n]]},t.copy=function(){return ku().domain([e,r]).range(u)},hu(t)}function Su(){function t(t){if(t<=t)return e[Ds(n,t,0,r)]}var n=[.5],e=[0,1],r=1;return t.domain=function(i){return arguments.length?(n=vv.call(i),r=Math.min(n.length,e.length-1),t):n.slice()},t.range=function(i){return arguments.length?(e=vv.call(i),r=Math.min(n.length,e.length-1),t):e.slice()},t.invertExtent=function(t){var r=e.indexOf(t);return[n[r-1],n[r]]},t.copy=function(){return Su().domain(n).range(e)},t}function Eu(t,n,e,r){function i(n){return t(n=new Date(+n)),n}return i.floor=i,i.ceil=function(e){return t(e=new Date(e-1)),n(e,1),t(e),e},i.round=function(t){var n=i(t),e=i.ceil(t);return t-n<e-t?n:e},i.offset=function(t,e){return n(t=new Date(+t),null==e?1:Math.floor(e)),t},i.range=function(e,r,o){var u,a=[];if(e=i.ceil(e),o=null==o?1:Math.floor(o),!(e<r&&o>0))return a;do{a.push(u=new Date(+e)),n(e,o),t(e)}while(u<e&&e<r);return a},i.filter=function(e){return Eu(function(n){if(n>=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return yv.setTime(+n),mv.setTime(+r),t(yv),t(mv),Math.floor(e(yv,mv))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}function Au(t){return Eu(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*wv)/Mv})}function Cu(t){return Eu(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/Mv})}function zu(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function Pu(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Ru(t){return{y:t,m:0,d:1,H:0,M:0,S:0,L:0}}function Lu(t){function n(t,n){return function(e){var r,i,o,u=[],a=-1,c=0,s=t.length;for(e instanceof Date||(e=new Date(+e));++a<s;)37===t.charCodeAt(a)&&(u.push(t.slice(c,a)),null!=(i=bg[r=t.charAt(++a)])?r=t.charAt(++a):i="e"===r?" ":"0",(o=n[r])&&(r=o(e,i)),u.push(r),c=a+1);return u.push(t.slice(c,a)),u.join("")}}function e(t,n){return function(e){var i,o,u=Ru(1900);if(r(u,t,e+="",0)!=e.length)return null;if("Q"in u)return new Date(u.Q);if("p"in u&&(u.H=u.H%12+12*u.p),"V"in u){if(u.V<1||u.V>53)return null;"w"in u||(u.w=1),"Z"in u?(i=(o=(i=Pu(Ru(u.y))).getUTCDay())>4||0===o?rg.ceil(i):rg(i),i=tg.offset(i,7*(u.V-1)),u.y=i.getUTCFullYear(),u.m=i.getUTCMonth(),u.d=i.getUTCDate()+(u.w+6)%7):(i=(o=(i=n(Ru(u.y))).getDay())>4||0===o?Rv.ceil(i):Rv(i),i=Cv.offset(i,7*(u.V-1)),u.y=i.getFullYear(),u.m=i.getMonth(),u.d=i.getDate()+(u.w+6)%7)}else("W"in u||"U"in u)&&("w"in u||(u.w="u"in u?u.u%7:"W"in u?1:0),o="Z"in u?Pu(Ru(u.y)).getUTCDay():n(Ru(u.y)).getDay(),u.m=0,u.d="W"in u?(u.w+6)%7+7*u.W-(o+5)%7:u.w+7*u.U-(o+6)%7);return"Z"in u?(u.H+=u.Z/100|0,u.M+=u.Z%100,Pu(u)):n(u)}}function r(t,n,e,r){for(var i,o,u=0,a=n.length,c=e.length;u<a;){if(r>=c)return-1;if(37===(i=n.charCodeAt(u++))){if(i=n.charAt(u++),!(o=T[i in bg?n.charAt(u++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}var i=t.dateTime,o=t.date,u=t.time,a=t.periods,c=t.days,s=t.shortDays,f=t.months,l=t.shortMonths,h=Uu(a),p=Ou(a),d=Uu(c),v=Ou(c),g=Uu(s),_=Ou(s),y=Uu(f),m=Ou(f),x=Uu(l),b=Ou(l),w={a:function(t){return s[t.getDay()]},A:function(t){return c[t.getDay()]},b:function(t){return l[t.getMonth()]},B:function(t){return f[t.getMonth()]},c:null,d:ia,e:ia,f:sa,H:oa,I:ua,j:aa,L:ca,m:fa,M:la,p:function(t){return a[+(t.getHours()>=12)]},Q:Fa,s:Ia,S:ha,u:pa,U:da,V:va,w:ga,W:_a,x:null,X:null,y:ya,Y:ma,Z:xa,"%":Oa},M={a:function(t){return s[t.getUTCDay()]},A:function(t){return c[t.getUTCDay()]},b:function(t){return l[t.getUTCMonth()]},B:function(t){return f[t.getUTCMonth()]},c:null,d:ba,e:ba,f:ka,H:wa,I:Ma,j:Ta,L:Na,m:Sa,M:Ea,p:function(t){return a[+(t.getUTCHours()>=12)]},Q:Fa,s:Ia,S:Aa,u:Ca,U:za,V:Pa,w:Ra,W:La,x:null,X:null,y:qa,Y:Da,Z:Ua,"%":Oa},T={a:function(t,n,e){var r=g.exec(n.slice(e));return r?(t.w=_[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=v[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=x.exec(n.slice(e));return r?(t.m=b[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=y.exec(n.slice(e));return r?(t.m=m[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,n,e){return r(t,i,n,e)},d:Wu,e:Wu,f:ta,H:Gu,I:Gu,j:Zu,L:Ku,m:$u,M:Qu,p:function(t,n,e){var r=h.exec(n.slice(e));return r?(t.p=p[r[0].toLowerCase()],e+r[0].length):-1},Q:ea,s:ra,S:Ju,u:Iu,U:Yu,V:Bu,w:Fu,W:Hu,x:function(t,n,e){return r(t,o,n,e)},X:function(t,n,e){return r(t,u,n,e)},y:Xu,Y:ju,Z:Vu,"%":na};return w.x=n(o,w),w.X=n(u,w),w.c=n(i,w),M.x=n(o,M),M.X=n(u,M),M.c=n(i,M),{format:function(t){var e=n(t+="",w);return e.toString=function(){return t},e},parse:function(t){var n=e(t+="",zu);return n.toString=function(){return t},n},utcFormat:function(t){var e=n(t+="",M);return e.toString=function(){return t},e},utcParse:function(t){var n=e(t,Pu);return n.toString=function(){return t},n}}}function qu(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o<e?new Array(e-o+1).join(n)+i:i)}function Du(t){return t.replace(Tg,"\\$&")}function Uu(t){return new RegExp("^(?:"+t.map(Du).join("|")+")","i")}function Ou(t){for(var n={},e=-1,r=t.length;++e<r;)n[t[e].toLowerCase()]=e;return n}function Fu(t,n,e){var r=wg.exec(n.slice(e,e+1));return r?(t.w=+r[0],e+r[0].length):-1}function Iu(t,n,e){var r=wg.exec(n.slice(e,e+1));return r?(t.u=+r[0],e+r[0].length):-1}function Yu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.U=+r[0],e+r[0].length):-1}function Bu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.V=+r[0],e+r[0].length):-1}function Hu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.W=+r[0],e+r[0].length):-1}function ju(t,n,e){var r=wg.exec(n.slice(e,e+4));return r?(t.y=+r[0],e+r[0].length):-1}function Xu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.y=+r[0]+(+r[0]>68?1900:2e3),e+r[0].length):-1}function Vu(t,n,e){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function $u(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Wu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Zu(t,n,e){var r=wg.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function Gu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Qu(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function Ju(t,n,e){var r=wg.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function Ku(t,n,e){var r=wg.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function ta(t,n,e){var r=wg.exec(n.slice(e,e+6));return r?(t.L=Math.floor(r[0]/1e3),e+r[0].length):-1}function na(t,n,e){var r=Mg.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function ea(t,n,e){var r=wg.exec(n.slice(e));return r?(t.Q=+r[0],e+r[0].length):-1}function ra(t,n,e){var r=wg.exec(n.slice(e));return r?(t.Q=1e3*+r[0],e+r[0].length):-1}function ia(t,n){return qu(t.getDate(),n,2)}function oa(t,n){return qu(t.getHours(),n,2)}function ua(t,n){return qu(t.getHours()%12||12,n,2)}function aa(t,n){return qu(1+Cv.count(Wv(t),t),n,3)}function ca(t,n){return qu(t.getMilliseconds(),n,3)}function sa(t,n){return ca(t,n)+"000"}function fa(t,n){return qu(t.getMonth()+1,n,2)}function la(t,n){return qu(t.getMinutes(),n,2)}function ha(t,n){return qu(t.getSeconds(),n,2)}function pa(t){var n=t.getDay();return 0===n?7:n}function da(t,n){return qu(Pv.count(Wv(t),t),n,2)}function va(t,n){var e=t.getDay();return t=e>=4||0===e?Dv(t):Dv.ceil(t),qu(Dv.count(Wv(t),t)+(4===Wv(t).getDay()),n,2)}function ga(t){return t.getDay()}function _a(t,n){return qu(Rv.count(Wv(t),t),n,2)}function ya(t,n){return qu(t.getFullYear()%100,n,2)}function ma(t,n){return qu(t.getFullYear()%1e4,n,4)}function xa(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+qu(n/60|0,"0",2)+qu(n%60,"0",2)}function ba(t,n){return qu(t.getUTCDate(),n,2)}function wa(t,n){return qu(t.getUTCHours(),n,2)}function Ma(t,n){return qu(t.getUTCHours()%12||12,n,2)}function Ta(t,n){return qu(1+tg.count(yg(t),t),n,3)}function Na(t,n){return qu(t.getUTCMilliseconds(),n,3)}function ka(t,n){return Na(t,n)+"000"}function Sa(t,n){return qu(t.getUTCMonth()+1,n,2)}function Ea(t,n){return qu(t.getUTCMinutes(),n,2)}function Aa(t,n){return qu(t.getUTCSeconds(),n,2)}function Ca(t){var n=t.getUTCDay();return 0===n?7:n}function za(t,n){return qu(eg.count(yg(t),t),n,2)}function Pa(t,n){var e=t.getUTCDay();return t=e>=4||0===e?ug(t):ug.ceil(t),qu(ug.count(yg(t),t)+(4===yg(t).getUTCDay()),n,2)}function Ra(t){return t.getUTCDay()}function La(t,n){return qu(rg.count(yg(t),t),n,2)}function qa(t,n){return qu(t.getUTCFullYear()%100,n,2)}function Da(t,n){return qu(t.getUTCFullYear()%1e4,n,4)}function Ua(){return"+0000"}function Oa(){return"%"}function Fa(t){return+t}function Ia(t){return Math.floor(+t/1e3)}function Ya(n){return mg=Lu(n),t.timeFormat=mg.format,t.timeParse=mg.parse,t.utcFormat=mg.utcFormat,t.utcParse=mg.utcParse,mg}function Ba(t){return new Date(t)}function Ha(t){return t instanceof Date?+t:+new Date(+t)}function ja(t,n,r,i,o,u,a,c,s){function f(e){return(a(e)<e?g:u(e)<e?_:o(e)<e?y:i(e)<e?m:n(e)<e?r(e)<e?x:b:t(e)<e?w:M)(e)}function l(n,r,i,o){if(null==n&&(n=10),"number"==typeof n){var u=Math.abs(i-r)/n,a=e(function(t){return t[2]}).right(T,u);a===T.length?(o=p(r/Lg,i/Lg,n),n=t):a?(o=(a=T[u/T[a-1][2]<T[a][2]/u?a-1:a])[1],n=a[0]):(o=Math.max(p(r,i,n),1),n=c)}return null==o?n:n.every(o)}var h=lu(au,on),d=h.invert,v=h.domain,g=s(".%L"),_=s(":%S"),y=s("%I:%M"),m=s("%I %p"),x=s("%a %d"),b=s("%b %d"),w=s("%B"),M=s("%Y"),T=[[a,1,Eg],[a,5,5*Eg],[a,15,15*Eg],[a,30,30*Eg],[u,1,Ag],[u,5,5*Ag],[u,15,15*Ag],[u,30,30*Ag],[o,1,Cg],[o,3,3*Cg],[o,6,6*Cg],[o,12,12*Cg],[i,1,zg],[i,2,2*zg],[r,1,Pg],[n,1,Rg],[n,3,3*Rg],[t,1,Lg]];return h.invert=function(t){return new Date(d(t))},h.domain=function(t){return arguments.length?v(dv.call(t,Ha)):v().map(Ba)},h.ticks=function(t,n){var e,r=v(),i=r[0],o=r[r.length-1],u=o<i;return u&&(e=i,i=o,o=e),e=l(t,i,o,n),e=e?e.range(i,o+1):[],u?e.reverse():e},h.tickFormat=function(t,n){return null==n?f:s(n)},h.nice=function(t,n){var e=v();return(t=l(t,e[0],e[e.length-1],n))?v(vu(e,t)):h},h.copy=function(){return fu(h,ja(t,n,r,i,o,u,a,c,s))},h}function Xa(t){return t.match(/.{6}/g).map(function(t){return"#"+t})}function Va(t){var n=t.length;return function(e){return t[Math.max(0,Math.min(n-1,Math.floor(e*n)))]}}function $a(t){function n(n){var o=(n-e)/(r-e);return t(i?Math.max(0,Math.min(1,o)):o)}var e=0,r=1,i=!1;return n.domain=function(t){return arguments.length?(e=+t[0],r=+t[1],n):[e,r]},n.clamp=function(t){return arguments.length?(i=!!t,n):i},n.interpolator=function(e){return arguments.length?(t=e,n):t},n.copy=function(){return $a(t).domain([e,r]).clamp(i)},hu(n)}function Wa(t){return function(){return t}}function Za(t){return t>=1?e_:t<=-1?-e_:Math.asin(t)}function Ga(t){return t.innerRadius}function Qa(t){return t.outerRadius}function Ja(t){return t.startAngle}function Ka(t){return t.endAngle}function tc(t){return t&&t.padAngle}function nc(t,n,e,r,i,o,u){var a=t-e,c=n-r,s=(u?o:-o)/Kg(a*a+c*c),f=s*c,l=-s*a,h=t+f,p=n+l,d=e+f,v=r+l,g=(h+d)/2,_=(p+v)/2,y=d-h,m=v-p,x=y*y+m*m,b=i-o,w=h*v-d*p,M=(m<0?-1:1)*Kg(Gg(0,b*b*x-w*w)),T=(w*m-y*M)/x,N=(-w*y-m*M)/x,k=(w*m+y*M)/x,S=(-w*y+m*M)/x,E=T-g,A=N-_,C=k-g,z=S-_;return E*E+A*A>C*C+z*z&&(T=k,N=S),{cx:T,cy:N,x01:-f,y01:-l,x11:T*(i/b-1),y11:N*(i/b-1)}}function ec(t){this._context=t}function rc(t){return new ec(t)}function ic(t){return t[0]}function oc(t){return t[1]}function uc(){function t(t){var a,c,s,f=t.length,l=!1;for(null==i&&(u=o(s=te())),a=0;a<=f;++a)!(a<f&&r(c=t[a],a,t))===l&&((l=!l)?u.lineStart():u.lineEnd()),l&&u.point(+n(c,a,t),+e(c,a,t));if(s)return u=null,s+""||null}var n=ic,e=oc,r=Wa(!0),i=null,o=rc,u=null;return t.x=function(e){return arguments.length?(n="function"==typeof e?e:Wa(+e),t):n},t.y=function(n){return arguments.length?(e="function"==typeof n?n:Wa(+n),t):e},t.defined=function(n){return arguments.length?(r="function"==typeof n?n:Wa(!!n),t):r},t.curve=function(n){return arguments.length?(o=n,null!=i&&(u=o(i)),t):o},t.context=function(n){return arguments.length?(null==n?i=u=null:u=o(i=n),t):i},t}function ac(){function t(t){var n,f,l,h,p,d=t.length,v=!1,g=new Array(d),_=new Array(d);for(null==a&&(s=c(p=te())),n=0;n<=d;++n){if(!(n<d&&u(h=t[n],n,t))===v)if(v=!v)f=n,s.areaStart(),s.lineStart();else{for(s.lineEnd(),s.lineStart(),l=n-1;l>=f;--l)s.point(g[l],_[l]);s.lineEnd(),s.areaEnd()}v&&(g[n]=+e(h,n,t),_[n]=+i(h,n,t),s.point(r?+r(h,n,t):g[n],o?+o(h,n,t):_[n]))}if(p)return s=null,p+""||null}function n(){return uc().defined(u).curve(c).context(a)}var e=ic,r=null,i=Wa(0),o=oc,u=Wa(!0),a=null,c=rc,s=null;return t.x=function(n){return arguments.length?(e="function"==typeof n?n:Wa(+n),r=null,t):e},t.x0=function(n){return arguments.length?(e="function"==typeof n?n:Wa(+n),t):e},t.x1=function(n){return arguments.length?(r=null==n?null:"function"==typeof n?n:Wa(+n),t):r},t.y=function(n){return arguments.length?(i="function"==typeof n?n:Wa(+n),o=null,t):i},t.y0=function(n){return arguments.length?(i="function"==typeof n?n:Wa(+n),t):i},t.y1=function(n){return arguments.length?(o=null==n?null:"function"==typeof n?n:Wa(+n),t):o},t.lineX0=t.lineY0=function(){return n().x(e).y(i)},t.lineY1=function(){return n().x(e).y(o)},t.lineX1=function(){return n().x(r).y(i)},t.defined=function(n){return arguments.length?(u="function"==typeof n?n:Wa(!!n),t):u},t.curve=function(n){return arguments.length?(c=n,null!=a&&(s=c(a)),t):c},t.context=function(n){return arguments.length?(null==n?a=s=null:s=c(a=n),t):a},t}function cc(t,n){return n<t?-1:n>t?1:n>=t?0:NaN}function sc(t){return t}function fc(t){this._curve=t}function lc(t){function n(n){return new fc(t(n))}return n._curve=t,n}function hc(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(lc(t)):n()._curve},t}function pc(){return hc(uc().curve(i_))}function dc(){var t=ac().curve(i_),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return hc(e())},delete t.lineX0,t.lineEndAngle=function(){return hc(r())},delete t.lineX1,t.lineInnerRadius=function(){return hc(i())},delete t.lineY0,t.lineOuterRadius=function(){return hc(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(lc(t)):n()._curve},t}function vc(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]}function gc(t){return t.source}function _c(t){return t.target}function yc(t){function n(){var n,a=o_.call(arguments),c=e.apply(this,a),s=r.apply(this,a);if(u||(u=n=te()),t(u,+i.apply(this,(a[0]=c,a)),+o.apply(this,a),+i.apply(this,(a[0]=s,a)),+o.apply(this,a)),n)return u=null,n+""||null}var e=gc,r=_c,i=ic,o=oc,u=null;return n.source=function(t){return arguments.length?(e=t,n):e},n.target=function(t){return arguments.length?(r=t,n):r},n.x=function(t){return arguments.length?(i="function"==typeof t?t:Wa(+t),n):i},n.y=function(t){return arguments.length?(o="function"==typeof t?t:Wa(+t),n):o},n.context=function(t){return arguments.length?(u=null==t?null:t,n):u},n}function mc(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function xc(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function bc(t,n,e,r,i){var o=vc(n,e),u=vc(n,e=(e+i)/2),a=vc(r,e),c=vc(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(u[0],u[1],a[0],a[1],c[0],c[1])}function wc(){}function Mc(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function Tc(t){this._context=t}function Nc(t){this._context=t}function kc(t){this._context=t}function Sc(t,n){this._basis=new Tc(t),this._beta=n}function Ec(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function Ac(t,n){this._context=t,this._k=(1-n)/6}function Cc(t,n){this._context=t,this._k=(1-n)/6}function zc(t,n){this._context=t,this._k=(1-n)/6}function Pc(t,n,e){var r=t._x1,i=t._y1,o=t._x2,u=t._y2;if(t._l01_a>t_){var a=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*a-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*a-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>t_){var s=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,f=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*s+t._x1*t._l23_2a-n*t._l12_2a)/f,u=(u*s+t._y1*t._l23_2a-e*t._l12_2a)/f}t._context.bezierCurveTo(r,i,o,u,t._x2,t._y2)}function Rc(t,n){this._context=t,this._alpha=n}function Lc(t,n){this._context=t,this._alpha=n}function qc(t,n){this._context=t,this._alpha=n}function Dc(t){this._context=t}function Uc(t){return t<0?-1:1}function Oc(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),u=(e-t._y1)/(i||r<0&&-0),a=(o*i+u*r)/(r+i);return(Uc(o)+Uc(u))*Math.min(Math.abs(o),Math.abs(u),.5*Math.abs(a))||0}function Fc(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function Ic(t,n,e){var r=t._x0,i=t._y0,o=t._x1,u=t._y1,a=(o-r)/3;t._context.bezierCurveTo(r+a,i+a*n,o-a,u-a*e,o,u)}function Yc(t){this._context=t}function Bc(t){this._context=new Hc(t)}function Hc(t){this._context=t}function jc(t){this._context=t}function Xc(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),u=new Array(r);for(i[0]=0,o[0]=2,u[0]=t[0]+2*t[1],n=1;n<r-1;++n)i[n]=1,o[n]=4,u[n]=4*t[n]+2*t[n+1];for(i[r-1]=2,o[r-1]=7,u[r-1]=8*t[r-1]+t[r],n=1;n<r;++n)e=i[n]/o[n-1],o[n]-=e,u[n]-=e*u[n-1];for(i[r-1]=u[r-1]/o[r-1],n=r-2;n>=0;--n)i[n]=(u[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n<r-1;++n)o[n]=2*t[n+1]-i[n+1];return[i,o]}function Vc(t,n){this._context=t,this._t=n}function $c(t,n){if((i=t.length)>1)for(var e,r,i,o=1,u=t[n[0]],a=u.length;o<i;++o)for(r=u,u=t[n[o]],e=0;e<a;++e)u[e][1]+=u[e][0]=isNaN(r[e][1])?r[e][0]:r[e][1]}function Wc(t){for(var n=t.length,e=new Array(n);--n>=0;)e[n]=n;return e}function Zc(t,n){return t[n]}function Gc(t){var n=t.map(Qc);return Wc(t).sort(function(t,e){return n[t]-n[e]})}function Qc(t){for(var n,e=0,r=-1,i=t.length;++r<i;)(n=+t[r][1])&&(e+=n);return e}function Jc(t){return function(){return t}}function Kc(t){return t[0]}function ts(t){return t[1]}function ns(){this._=null}function es(t){t.U=t.C=t.L=t.R=t.P=t.N=null}function rs(t,n){var e=n,r=n.R,i=e.U;i?i.L===e?i.L=r:i.R=r:t._=r,r.U=i,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function is(t,n){var e=n,r=n.L,i=e.U;i?i.L===e?i.L=r:i.R=r:t._=r,r.U=i,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function os(t){for(;t.L;)t=t.L;return t}function us(t,n,e,r){var i=[null,null],o=L_.push(i)-1;return i.left=t,i.right=n,e&&cs(i,t,n,e),r&&cs(i,n,t,r),P_[t.index].halfedges.push(o),P_[n.index].halfedges.push(o),i}function as(t,n,e){var r=[n,e];return r.left=t,r}function cs(t,n,e,r){t[0]||t[1]?t.left===e?t[1]=r:t[0]=r:(t[0]=r,t.left=n,t.right=e)}function ss(t,n,e,r,i){var o,u=t[0],a=t[1],c=u[0],s=u[1],f=0,l=1,h=a[0]-c,p=a[1]-s;if(o=n-c,h||!(o>0)){if(o/=h,h<0){if(o<f)return;o<l&&(l=o)}else if(h>0){if(o>l)return;o>f&&(f=o)}if(o=r-c,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>f&&(f=o)}else if(h>0){if(o<f)return;o<l&&(l=o)}if(o=e-s,p||!(o>0)){if(o/=p,p<0){if(o<f)return;o<l&&(l=o)}else if(p>0){if(o>l)return;o>f&&(f=o)}if(o=i-s,p||!(o<0)){if(o/=p,p<0){if(o>l)return;o>f&&(f=o)}else if(p>0){if(o<f)return;o<l&&(l=o)}return!(f>0||l<1)||(f>0&&(t[0]=[c+f*h,s+f*p]),l<1&&(t[1]=[c+l*h,s+l*p]),!0)}}}}}function fs(t,n,e,r,i){var o=t[1];if(o)return!0;var u,a,c=t[0],s=t.left,f=t.right,l=s[0],h=s[1],p=f[0],d=f[1],v=(l+p)/2,g=(h+d)/2;if(d===h){if(v<n||v>=r)return;if(l>p){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]<e)return}else c=[v,i];o=[v,e]}}else if(u=(l-p)/(d-h),a=g-u*v,u<-1||u>1)if(l>p){if(c){if(c[1]>=i)return}else c=[(e-a)/u,e];o=[(i-a)/u,i]}else{if(c){if(c[1]<e)return}else c=[(i-a)/u,i];o=[(e-a)/u,e]}else if(h<d){if(c){if(c[0]>=r)return}else c=[n,u*n+a];o=[r,u*r+a]}else{if(c){if(c[0]<n)return}else c=[r,u*r+a];o=[n,u*n+a]}return t[0]=c,t[1]=o,!0}function ls(t,n){var e=t.site,r=n.left,i=n.right;return e===i&&(i=r,r=e),i?Math.atan2(i[1]-r[1],i[0]-r[0]):(e===r?(r=n[1],i=n[0]):(r=n[0],i=n[1]),Math.atan2(r[0]-i[0],i[1]-r[1]))}function hs(t,n){return n[+(n.left!==t.site)]}function ps(t,n){return n[+(n.left===t.site)]}function ds(t){var n=t.P,e=t.N;if(n&&e){var r=n.site,i=t.site,o=e.site;if(r!==o){var u=i[0],a=i[1],c=r[0]-u,s=r[1]-a,f=o[0]-u,l=o[1]-a,h=2*(c*l-s*f);if(!(h>=-O_)){var p=c*c+s*s,d=f*f+l*l,v=(l*p-s*d)/h,g=(c*d-f*p)/h,_=q_.pop()||new function(){es(this),this.x=this.y=this.arc=this.site=this.cy=null};_.arc=t,_.site=i,_.x=v+u,_.y=(_.cy=g+a)+Math.sqrt(v*v+g*g),t.circle=_;for(var y=null,m=R_._;m;)if(_.y<m.y||_.y===m.y&&_.x<=m.x){if(!m.L){y=m.P;break}m=m.L}else{if(!m.R){y=m;break}m=m.R}R_.insert(y,_),y||(C_=_)}}}}function vs(t){var n=t.circle;n&&(n.P||(C_=n.N),R_.remove(n),q_.push(n),es(n),t.circle=null)}function gs(t){var n=D_.pop()||new function(){es(this),this.edge=this.site=this.circle=null};return n.site=t,n}function _s(t){vs(t),z_.remove(t),D_.push(t),es(t)}function ys(t){var n=t.circle,e=n.x,r=n.cy,i=[e,r],o=t.P,u=t.N,a=[t];_s(t);for(var c=o;c.circle&&Math.abs(e-c.circle.x)<U_&&Math.abs(r-c.circle.cy)<U_;)o=c.P,a.unshift(c),_s(c),c=o;a.unshift(c),vs(c);for(var s=u;s.circle&&Math.abs(e-s.circle.x)<U_&&Math.abs(r-s.circle.cy)<U_;)u=s.N,a.push(s),_s(s),s=u;a.push(s),vs(s);var f,l=a.length;for(f=1;f<l;++f)s=a[f],c=a[f-1],cs(s.edge,c.site,s.site,i);c=a[0],(s=a[l-1]).edge=us(c.site,s.site,null,i),ds(c),ds(s)}function ms(t){for(var n,e,r,i,o=t[0],u=t[1],a=z_._;a;)if((r=xs(a,u)-o)>U_)a=a.L;else{if(!((i=o-function(t,n){var e=t.N;if(e)return xs(e,n);var r=t.site;return r[1]===n?r[0]:1/0}(a,u))>U_)){r>-U_?(n=a.P,e=a):i>-U_?(n=a,e=a.N):n=e=a;break}if(!a.R){n=a;break}a=a.R}(function(t){P_[t.index]={site:t,halfedges:[]}})(t);var c=gs(t);if(z_.insert(n,c),n||e){if(n===e)return vs(n),e=gs(n.site),z_.insert(c,e),c.edge=e.edge=us(n.site,c.site),ds(n),void ds(e);if(e){vs(n),vs(e);var s=n.site,f=s[0],l=s[1],h=t[0]-f,p=t[1]-l,d=e.site,v=d[0]-f,g=d[1]-l,_=2*(h*g-p*v),y=h*h+p*p,m=v*v+g*g,x=[(g*y-p*m)/_+f,(h*m-v*y)/_+l];cs(e.edge,s,d,x),c.edge=us(s,t,null,x),e.edge=us(t,d,null,x),ds(n),ds(e)}else c.edge=us(n.site,c.site)}}function xs(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var u=t.P;if(!u)return-1/0;var a=(e=u.site)[0],c=e[1],s=c-n;if(!s)return a;var f=a-r,l=1/o-1/s,h=f/s;return l?(-h+Math.sqrt(h*h-2*l*(f*f/(-2*s)-c+s/2+i-o/2)))/l+r:(r+a)/2}function bs(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function ws(t,n){return n[1]-t[1]||n[0]-t[0]}function Ms(t,n){var e,r,i,o=t.sort(ws).pop();for(L_=[],P_=new Array(t.length),z_=new ns,R_=new ns;;)if(i=C_,o&&(!i||o[1]<i.y||o[1]===i.y&&o[0]<i.x))o[0]===e&&o[1]===r||(ms(o),e=o[0],r=o[1]),o=t.pop();else{if(!i)break;ys(i.arc)}if(function(){for(var t,n,e,r,i=0,o=P_.length;i<o;++i)if((t=P_[i])&&(r=(n=t.halfedges).length)){var u=new Array(r),a=new Array(r);for(e=0;e<r;++e)u[e]=e,a[e]=ls(t,L_[n[e]]);for(u.sort(function(t,n){return a[n]-a[t]}),e=0;e<r;++e)a[e]=n[u[e]];for(e=0;e<r;++e)n[e]=a[e]}}(),n){var u=+n[0][0],a=+n[0][1],c=+n[1][0],s=+n[1][1];(function(t,n,e,r){for(var i,o=L_.length;o--;)fs(i=L_[o],t,n,e,r)&&ss(i,t,n,e,r)&&(Math.abs(i[0][0]-i[1][0])>U_||Math.abs(i[0][1]-i[1][1])>U_)||delete L_[o]})(u,a,c,s),function(t,n,e,r){var i,o,u,a,c,s,f,l,h,p,d,v,g=P_.length,_=!0;for(i=0;i<g;++i)if(o=P_[i]){for(u=o.site,a=(c=o.halfedges).length;a--;)L_[c[a]]||c.splice(a,1);for(a=0,s=c.length;a<s;)d=(p=ps(o,L_[c[a]]))[0],v=p[1],l=(f=hs(o,L_[c[++a%s]]))[0],h=f[1],(Math.abs(d-l)>U_||Math.abs(v-h)>U_)&&(c.splice(a,0,L_.push(as(u,p,Math.abs(d-t)<U_&&r-v>U_?[t,Math.abs(l-t)<U_?h:r]:Math.abs(v-r)<U_&&e-d>U_?[Math.abs(h-r)<U_?l:e,r]:Math.abs(d-e)<U_&&v-n>U_?[e,Math.abs(l-e)<U_?h:n]:Math.abs(v-n)<U_&&d-t>U_?[Math.abs(h-n)<U_?l:t,n]:null))-1),++s);s&&(_=!1)}if(_){var y,m,x,b=1/0;for(i=0,_=null;i<g;++i)(o=P_[i])&&(x=(y=(u=o.site)[0]-t)*y+(m=u[1]-n)*m)<b&&(b=x,_=o);if(_){var w=[t,n],M=[t,r],T=[e,r],N=[e,n];_.halfedges.push(L_.push(as(u=_.site,w,M))-1,L_.push(as(u,M,T))-1,L_.push(as(u,T,N))-1,L_.push(as(u,N,w))-1)}}for(i=0;i<g;++i)(o=P_[i])&&(o.halfedges.length||delete P_[i])}(u,a,c,s)}this.edges=L_,this.cells=P_,z_=R_=L_=P_=null}function Ts(t){return function(){return t}}function Ns(t,n,e){this.k=t,this.x=n,this.y=e}function ks(t){return t.__zoom||F_}function Ss(){t.event.stopImmediatePropagation()}function Es(){t.event.preventDefault(),t.event.stopImmediatePropagation()}function As(){return!t.event.button}function Cs(){var t,n,e=this;return e instanceof SVGElement?(t=(e=e.ownerSVGElement||e).width.baseVal.value,n=e.height.baseVal.value):(t=e.clientWidth,n=e.clientHeight),[[0,0],[t,n]]}function zs(){return this.__zoom||F_}function Ps(){return-t.event.deltaY*(t.event.deltaMode?120:1)/500}function Rs(){return"ontouchstart"in this}function Ls(t,n,e){var r=t.invertX(n[0][0])-e[0][0],i=t.invertX(n[1][0])-e[1][0],o=t.invertY(n[0][1])-e[0][1],u=t.invertY(n[1][1])-e[1][1];return t.translate(i>r?(r+i)/2:Math.min(0,r)||Math.max(0,i),u>o?(o+u)/2:Math.min(0,o)||Math.max(0,u))}var qs=e(n),Ds=qs.right,Us=qs.left,Os=Array.prototype,Fs=Os.slice,Is=Os.map,Ys=Math.sqrt(50),Bs=Math.sqrt(10),Hs=Math.sqrt(2),js=Array.prototype.slice,Xs=1,Vs=2,$s=3,Ws=4,Zs=1e-6,Gs={value:function(){}};k.prototype=N.prototype={constructor:k,on:function(t,n){var e,r=this._,i=function(t,n){return t.trim().split(/^|\s+/).map(function(t){var e="",r=t.indexOf(".");if(r>=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}(t+"",r),o=-1,u=i.length;{if(!(arguments.length<2)){if(null!=n&&"function"!=typeof n)throw new Error("invalid callback: "+n);for(;++o<u;)if(e=(t=i[o]).type)r[e]=S(r[e],t.name,n);else if(null==n)for(e in r)r[e]=S(r[e],t.name,null);return this}for(;++o<u;)if((e=(t=i[o]).type)&&(e=function(t,n){for(var e,r=0,i=t.length;r<i;++r)if((e=t[r]).name===n)return e.value}(r[e],t.name)))return e}},copy:function(){var t={},n=this._;for(var e in n)t[e]=n[e].slice();return new k(t)},call:function(t,n){if((e=arguments.length-2)>0)for(var e,r,i=new Array(e),o=0;o<e;++o)i[o]=arguments[o+2];if(!this._.hasOwnProperty(t))throw new Error("unknown type: "+t);for(o=0,e=(r=this._[t]).length;o<e;++o)r[o].value.apply(n,i)},apply:function(t,n,e){if(!this._.hasOwnProperty(t))throw new Error("unknown type: "+t);for(var r=this._[t],i=0,o=r.length;i<o;++i)r[i].value.apply(n,e)}};var Qs="http://www.w3.org/1999/xhtml",Js={svg:"http://www.w3.org/2000/svg",xhtml:Qs,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},Ks=0;z.prototype=C.prototype={constructor:z,get:function(t){for(var n=this._;!(n in t);)if(!(t=t.parentNode))return;return t[n]},set:function(t,n){return t[this._]=n},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var tf=function(t){return function(){return this.matches(t)}};if("undefined"!=typeof document){var nf=document.documentElement;if(!nf.matches){var ef=nf.webkitMatchesSelector||nf.msMatchesSelector||nf.mozMatchesSelector||nf.oMatchesSelector;tf=function(t){return function(){return ef.call(this,t)}}}}var rf=tf,of={};if(t.event=null,"undefined"!=typeof document){"onmouseenter"in document.documentElement||(of={mouseenter:"mouseover",mouseleave:"mouseout"})}X.prototype={constructor:X,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var uf="$";K.prototype={add:function(t){this._names.indexOf(t)<0&&(this._names.push(t),this._node.setAttribute("class",this._names.join(" ")))},remove:function(t){var n=this._names.indexOf(t);n>=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var af=[null];st.prototype=ft.prototype={constructor:st,select:function(t){"function"!=typeof t&&(t=Y(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i<e;++i)for(var o,u,a=n[i],c=a.length,s=r[i]=new Array(c),f=0;f<c;++f)(o=a[f])&&(u=t.call(o,o.__data__,f,a))&&("__data__"in o&&(u.__data__=o.__data__),s[f]=u);return new st(r,this._parents)},selectAll:function(t){"function"!=typeof t&&(t=H(t));for(var n=this._groups,e=n.length,r=[],i=[],o=0;o<e;++o)for(var u,a=n[o],c=a.length,s=0;s<c;++s)(u=a[s])&&(r.push(t.call(u,u.__data__,s,a)),i.push(u));return new st(r,i)},filter:function(t){"function"!=typeof t&&(t=rf(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i<e;++i)for(var o,u=n[i],a=u.length,c=r[i]=[],s=0;s<a;++s)(o=u[s])&&t.call(o,o.__data__,s,u)&&c.push(o);return new st(r,this._parents)},data:function(t,n){if(!t)return p=new Array(this.size()),s=-1,this.each(function(t){p[++s]=t}),p;var e=n?$:V,r=this._parents,i=this._groups;"function"!=typeof t&&(t=function(t){return function(){return t}}(t));for(var o=i.length,u=new Array(o),a=new Array(o),c=new Array(o),s=0;s<o;++s){var f=r[s],l=i[s],h=l.length,p=t.call(f,f&&f.__data__,s,r),d=p.length,v=a[s]=new Array(d),g=u[s]=new Array(d);e(f,l,v,g,c[s]=new Array(h),p,n);for(var _,y,m=0,x=0;m<d;++m)if(_=v[m]){for(m>=x&&(x=m+1);!(y=g[x])&&++x<d;);_._next=y||null}}return u=new st(u,r),u._enter=a,u._exit=c,u},enter:function(){return new st(this._enter||this._groups.map(j),this._parents)},exit:function(){return new st(this._exit||this._groups.map(j),this._parents)},merge:function(t){for(var n=this._groups,e=t._groups,r=n.length,i=e.length,o=Math.min(r,i),u=new Array(r),a=0;a<o;++a)for(var c,s=n[a],f=e[a],l=s.length,h=u[a]=new Array(l),p=0;p<l;++p)(c=s[p]||f[p])&&(h[p]=c);for(;a<r;++a)u[a]=n[a];return new st(u,this._parents)},order:function(){for(var t=this._groups,n=-1,e=t.length;++n<e;)for(var r,i=t[n],o=i.length-1,u=i[o];--o>=0;)(r=i[o])&&(u&&u!==r.nextSibling&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=W);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o<r;++o){for(var u,a=e[o],c=a.length,s=i[o]=new Array(c),f=0;f<c;++f)(u=a[f])&&(s[f]=u);s.sort(n)}return new st(i,this._parents).order()},call:function(){var t=arguments[0];return arguments[0]=this,t.apply(null,arguments),this},nodes:function(){var t=new Array(this.size()),n=-1;return this.each(function(){t[++n]=this}),t},node:function(){for(var t=this._groups,n=0,e=t.length;n<e;++n)for(var r=t[n],i=0,o=r.length;i<o;++i){var u=r[i];if(u)return u}return null},size:function(){var t=0;return this.each(function(){++t}),t},empty:function(){return!this.node()},each:function(t){for(var n=this._groups,e=0,r=n.length;e<r;++e)for(var i,o=n[e],u=0,a=o.length;u<a;++u)(i=o[u])&&t.call(i,i.__data__,u,o);return this},attr:function(t,n){var e=E(t);if(arguments.length<2){var r=this.node();return e.local?r.getAttributeNS(e.space,e.local):r.getAttribute(e)}return this.each((null==n?e.local?function(t){return function(){this.removeAttributeNS(t.space,t.local)}}:function(t){return function(){this.removeAttribute(t)}}:"function"==typeof n?e.local?function(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}:function(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}:e.local?function(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}:function(t,n){return function(){this.setAttribute(t,n)}})(e,n))},style:function(t,n,e){return arguments.length>1?this.each((null==n?function(t){return function(){this.style.removeProperty(t)}}:"function"==typeof n?function(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}:function(t,n,e){return function(){this.style.setProperty(t,n,e)}})(t,n,null==e?"":e)):G(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?function(t){return function(){delete this[t]}}:"function"==typeof n?function(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}:function(t,n){return function(){this[t]=n}})(t,n)):this.node()[t]},classed:function(t,n){var e=Q(t+"");if(arguments.length<2){for(var r=J(this.node()),i=-1,o=e.length;++i<o;)if(!r.contains(e[i]))return!1;return!0}return this.each(("function"==typeof n?function(t,n){return function(){(n.apply(this,arguments)?tt:nt)(this,t)}}:n?function(t){return function(){tt(this,t)}}:function(t){return function(){nt(this,t)}})(e,n))},text:function(t){return arguments.length?this.each(null==t?et:("function"==typeof t?function(t){return function(){var n=t.apply(this,arguments);this.textContent=null==n?"":n}}:function(t){return function(){this.textContent=t}})(t)):this.node().textContent},html:function(t){return arguments.length?this.each(null==t?rt:("function"==typeof t?function(t){return function(){var n=t.apply(this,arguments);this.innerHTML=null==n?"":n}}:function(t){return function(){this.innerHTML=t}})(t)):this.node().innerHTML},raise:function(){return this.each(it)},lower:function(){return this.each(ot)},append:function(t){var n="function"==typeof t?t:A(t);return this.select(function(){return this.appendChild(n.apply(this,arguments))})},insert:function(t,n){var e="function"==typeof t?t:A(t),r=null==n?ut:"function"==typeof n?n:Y(n);return this.select(function(){return this.insertBefore(e.apply(this,arguments),r.apply(this,arguments)||null)})},remove:function(){return this.each(at)},datum:function(t){return arguments.length?this.property("__data__",t):this.node().__data__},on:function(t,n,e){var r,i,o=function(t){return t.trim().split(/^|\s+/).map(function(t){var n="",e=t.indexOf(".");return e>=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}(t+""),u=o.length;if(!(arguments.length<2)){for(a=n?q:L,null==e&&(e=!1),r=0;r<u;++r)this.each(a(o[r],n,e));return this}var a=this.node().__on;if(a)for(var c,s=0,f=a.length;s<f;++s)for(r=0,c=a[s];r<u;++r)if((i=o[r]).type===c.type&&i.name===c.name)return c.value},dispatch:function(t,n){return this.each(("function"==typeof n?function(t,n){return function(){return ct(this,t,n.apply(this,arguments))}}:function(t,n){return function(){return ct(this,t,n)}})(t,n))}},yt.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var cf="\\s*([+-]?\\d+)\\s*",sf="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)\\s*",ff="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)%\\s*",lf=/^#([0-9a-f]{3})$/,hf=/^#([0-9a-f]{6})$/,pf=new RegExp("^rgb\\("+[cf,cf,cf]+"\\)$"),df=new RegExp("^rgb\\("+[ff,ff,ff]+"\\)$"),vf=new RegExp("^rgba\\("+[cf,cf,cf,sf]+"\\)$"),gf=new RegExp("^rgba\\("+[ff,ff,ff,sf]+"\\)$"),_f=new RegExp("^hsl\\("+[sf,ff,ff]+"\\)$"),yf=new RegExp("^hsla\\("+[sf,ff,ff,sf]+"\\)$"),mf={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Mt(Nt,kt,{displayable:function(){return this.rgb().displayable()},toString:function(){return this.rgb()+""}}),Mt(zt,Ct,Tt(Nt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new zt(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new zt(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return 0<=this.r&&this.r<=255&&0<=this.g&&this.g<=255&&0<=this.b&&this.b<=255&&0<=this.opacity&&this.opacity<=1},toString:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}})),Mt(Lt,Rt,Tt(Nt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new Lt(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new Lt(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new zt(qt(t>=240?t-240:t+120,i,r),qt(t,i,r),qt(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1}}));var xf=Math.PI/180,bf=180/Math.PI,wf=.95047,Mf=1,Tf=1.08883,Nf=4/29,kf=6/29,Sf=3*kf*kf,Ef=kf*kf*kf;Mt(Ot,Ut,Tt(Nt,{brighter:function(t){return new Ot(this.l+18*(null==t?1:t),this.a,this.b,this.opacity)},darker:function(t){return new Ot(this.l-18*(null==t?1:t),this.a,this.b,this.opacity)},rgb:function(){var t=(this.l+16)/116,n=isNaN(this.a)?t:t+this.a/500,e=isNaN(this.b)?t:t-this.b/200;return t=Mf*It(t),n=wf*It(n),e=Tf*It(e),new zt(Yt(3.2404542*n-1.5371385*t-.4985314*e),Yt(-.969266*n+1.8760108*t+.041556*e),Yt(.0556434*n-.2040259*t+1.0572252*e),this.opacity)}})),Mt(jt,Ht,Tt(Nt,{brighter:function(t){return new jt(this.h,this.c,this.l+18*(null==t?1:t),this.opacity)},darker:function(t){return new jt(this.h,this.c,this.l-18*(null==t?1:t),this.opacity)},rgb:function(){return Dt(this).rgb()}}));var Af=-.29227,Cf=-.90649,zf=1.97294,Pf=zf*Cf,Rf=1.78277*zf,Lf=1.78277*Af- -.14861*Cf;Mt(Vt,Xt,Tt(Nt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new Vt(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*xf,n=+this.l,e=isNaN(this.s)?0:this.s*n*(1-n),r=Math.cos(t),i=Math.sin(t);return new zt(255*(n+e*(-.14861*r+1.78277*i)),255*(n+e*(Af*r+Cf*i)),255*(n+e*(zf*r)),this.opacity)}}));var qf,Df,Uf,Of,Ff,If,Yf=function t(n){function e(t,n){var e=r((t=Ct(t)).r,(n=Ct(n)).r),i=r(t.g,n.g),o=r(t.b,n.b),u=tn(t.opacity,n.opacity);return function(n){return t.r=e(n),t.g=i(n),t.b=o(n),t.opacity=u(n),t+""}}var r=Kt(n);return e.gamma=t,e}(1),Bf=nn(Wt),Hf=nn(Zt),jf=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g,Xf=new RegExp(jf.source,"g"),Vf=180/Math.PI,$f={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},Wf=ln(function(t){return"none"===t?$f:(qf||(qf=document.createElement("DIV"),Df=document.documentElement,Uf=document.defaultView),qf.style.transform=t,t=Uf.getComputedStyle(Df.appendChild(qf),null).getPropertyValue("transform"),Df.removeChild(qf),t=t.slice(7,-1).split(","),fn(+t[0],+t[1],+t[2],+t[3],+t[4],+t[5]))},"px, ","px)","deg)"),Zf=ln(function(t){return null==t?$f:(Of||(Of=document.createElementNS("http://www.w3.org/2000/svg","g")),Of.setAttribute("transform",t),(t=Of.transform.baseVal.consolidate())?(t=t.matrix,fn(t.a,t.b,t.c,t.d,t.e,t.f)):$f)},", ",")",")"),Gf=Math.SQRT2,Qf=2,Jf=4,Kf=1e-12,tl=dn(Jt),nl=dn(tn),el=vn(Jt),rl=vn(tn),il=gn(Jt),ol=gn(tn),ul=0,al=0,cl=0,sl=1e3,fl=0,ll=0,hl=0,pl="object"==typeof performance&&performance.now?performance:Date,dl="object"==typeof window&&window.requestAnimationFrame?window.requestAnimationFrame.bind(window):function(t){setTimeout(t,17)};mn.prototype=xn.prototype={constructor:mn,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?_n():+e)+(null==n?0:+n),this._next||If===this||(If?If._next=this:Ff=this,If=this),this._call=t,this._time=e,Tn()},stop:function(){this._call&&(this._call=null,this._time=1/0,Tn())}};var vl=N("start","end","interrupt"),gl=[],_l=0,yl=1,ml=2,xl=3,bl=4,wl=5,Ml=6,Tl=ft.prototype.constructor,Nl=0,kl=ft.prototype;Rn.prototype=Ln.prototype={constructor:Rn,select:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=Y(t));for(var r=this._groups,i=r.length,o=new Array(i),u=0;u<i;++u)for(var a,c,s=r[u],f=s.length,l=o[u]=new Array(f),h=0;h<f;++h)(a=s[h])&&(c=t.call(a,a.__data__,h,s))&&("__data__"in a&&(c.__data__=a.__data__),l[h]=c,kn(l[h],n,e,h,l,An(a,e)));return new Rn(o,this._parents,n,e)},selectAll:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=H(t));for(var r=this._groups,i=r.length,o=[],u=[],a=0;a<i;++a)for(var c,s=r[a],f=s.length,l=0;l<f;++l)if(c=s[l]){for(var h,p=t.call(c,c.__data__,l,s),d=An(c,e),v=0,g=p.length;v<g;++v)(h=p[v])&&kn(h,n,e,v,p,d);o.push(p),u.push(c)}return new Rn(o,u,n,e)},filter:function(t){"function"!=typeof t&&(t=rf(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i<e;++i)for(var o,u=n[i],a=u.length,c=r[i]=[],s=0;s<a;++s)(o=u[s])&&t.call(o,o.__data__,s,u)&&c.push(o);return new Rn(r,this._parents,this._name,this._id)},merge:function(t){if(t._id!==this._id)throw new Error;for(var n=this._groups,e=t._groups,r=n.length,i=e.length,o=Math.min(r,i),u=new Array(r),a=0;a<o;++a)for(var c,s=n[a],f=e[a],l=s.length,h=u[a]=new Array(l),p=0;p<l;++p)(c=s[p]||f[p])&&(h[p]=c);for(;a<r;++a)u[a]=n[a];return new Rn(u,this._parents,this._name,this._id)},selection:function(){return new Tl(this._groups,this._parents)},transition:function(){for(var t=this._name,n=this._id,e=qn(),r=this._groups,i=r.length,o=0;o<i;++o)for(var u,a=r[o],c=a.length,s=0;s<c;++s)if(u=a[s]){var f=An(u,n);kn(u,t,e,s,a,{time:f.time+f.delay+f.duration,delay:0,duration:f.duration,ease:f.ease})}return new Rn(r,this._parents,t,e)},call:kl.call,nodes:kl.nodes,node:kl.node,size:kl.size,empty:kl.empty,each:kl.each,on:function(t,n){var e=this._id;return arguments.length<2?An(this.node(),e).on.on(t):this.each(function(t,n,e){var r,i,o=function(t){return(t+"").trim().split(/^|\s+/).every(function(t){var n=t.indexOf(".");return n>=0&&(t=t.slice(0,n)),!t||"start"===t})}(n)?Sn:En;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}(e,t,n))},attr:function(t,n){var e=E(t),r="transform"===e?Zf:Pn;return this.attrTween(t,"function"==typeof n?(e.local?function(t,n,e){var r,i,o;return function(){var u,a=e(this);if(null!=a)return(u=this.getAttributeNS(t.space,t.local))===a?null:u===r&&a===i?o:o=n(r=u,i=a);this.removeAttributeNS(t.space,t.local)}}:function(t,n,e){var r,i,o;return function(){var u,a=e(this);if(null!=a)return(u=this.getAttribute(t))===a?null:u===r&&a===i?o:o=n(r=u,i=a);this.removeAttribute(t)}})(e,r,zn(this,"attr."+t,n)):null==n?(e.local?function(t){return function(){this.removeAttributeNS(t.space,t.local)}}:function(t){return function(){this.removeAttribute(t)}})(e):(e.local?function(t,n,e){var r,i;return function(){var o=this.getAttributeNS(t.space,t.local);return o===e?null:o===r?i:i=n(r=o,e)}}:function(t,n,e){var r,i;return function(){var o=this.getAttribute(t);return o===e?null:o===r?i:i=n(r=o,e)}})(e,r,n+""))},attrTween:function(t,n){var e="attr."+t;if(arguments.length<2)return(e=this.tween(e))&&e._value;if(null==n)return this.tween(e,null);if("function"!=typeof n)throw new Error;var r=E(t);return this.tween(e,(r.local?function(t,n){function e(){var e=this,r=n.apply(e,arguments);return r&&function(n){e.setAttributeNS(t.space,t.local,r(n))}}return e._value=n,e}:function(t,n){function e(){var e=this,r=n.apply(e,arguments);return r&&function(n){e.setAttribute(t,r(n))}}return e._value=n,e})(r,n))},style:function(t,n,e){var r="transform"==(t+="")?Wf:Pn;return null==n?this.styleTween(t,function(t,n){var e,r,i;return function(){var o=G(this,t),u=(this.style.removeProperty(t),G(this,t));return o===u?null:o===e&&u===r?i:i=n(e=o,r=u)}}(t,r)).on("end.style."+t,function(t){return function(){this.style.removeProperty(t)}}(t)):this.styleTween(t,"function"==typeof n?function(t,n,e){var r,i,o;return function(){var u=G(this,t),a=e(this);return null==a&&(this.style.removeProperty(t),a=G(this,t)),u===a?null:u===r&&a===i?o:o=n(r=u,i=a)}}(t,r,zn(this,"style."+t,n)):function(t,n,e){var r,i;return function(){var o=G(this,t);return o===e?null:o===r?i:i=n(r=o,e)}}(t,r,n+""),e)},styleTween:function(t,n,e){var r="style."+(t+="");if(arguments.length<2)return(r=this.tween(r))&&r._value;if(null==n)return this.tween(r,null);if("function"!=typeof n)throw new Error;return this.tween(r,function(t,n,e){function r(){var r=this,i=n.apply(r,arguments);return i&&function(n){r.style.setProperty(t,i(n),e)}}return r._value=n,r}(t,n,null==e?"":e))},text:function(t){return this.tween("text","function"==typeof t?function(t){return function(){var n=t(this);this.textContent=null==n?"":n}}(zn(this,"text",t)):function(t){return function(){this.textContent=t}}(null==t?"":t+""))},remove:function(){return this.on("end.remove",function(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}(this._id))},tween:function(t,n){var e=this._id;if(t+="",arguments.length<2){for(var r,i=An(this.node(),e).tween,o=0,u=i.length;o<u;++o)if((r=i[o]).name===t)return r.value;return null}return this.each((null==n?function(t,n){var e,r;return function(){var i=En(this,t),o=i.tween;if(o!==e)for(var u=0,a=(r=e=o).length;u<a;++u)if(r[u].name===n){(r=r.slice()).splice(u,1);break}i.tween=r}}:function(t,n,e){var r,i;if("function"!=typeof e)throw new Error;return function(){var o=En(this,t),u=o.tween;if(u!==r){i=(r=u).slice();for(var a={name:n,value:e},c=0,s=i.length;c<s;++c)if(i[c].name===n){i[c]=a;break}c===s&&i.push(a)}o.tween=i}})(e,t,n))},delay:function(t){var n=this._id;return arguments.length?this.each(("function"==typeof t?function(t,n){return function(){Sn(this,t).delay=+n.apply(this,arguments)}}:function(t,n){return n=+n,function(){Sn(this,t).delay=n}})(n,t)):An(this.node(),n).delay},duration:function(t){var n=this._id;return arguments.length?this.each(("function"==typeof t?function(t,n){return function(){En(this,t).duration=+n.apply(this,arguments)}}:function(t,n){return n=+n,function(){En(this,t).duration=n}})(n,t)):An(this.node(),n).duration},ease:function(t){var n=this._id;return arguments.length?this.each(function(t,n){if("function"!=typeof n)throw new Error;return function(){En(this,t).ease=n}}(n,t)):An(this.node(),n).ease}};var Sl=function t(n){function e(t){return Math.pow(t,n)}return n=+n,e.exponent=t,e}(3),El=function t(n){function e(t){return 1-Math.pow(1-t,n)}return n=+n,e.exponent=t,e}(3),Al=function t(n){function e(t){return((t*=2)<=1?Math.pow(t,n):2-Math.pow(2-t,n))/2}return n=+n,e.exponent=t,e}(3),Cl=Math.PI,zl=Cl/2,Pl=4/11,Rl=6/11,Ll=8/11,ql=.75,Dl=9/11,Ul=10/11,Ol=.9375,Fl=21/22,Il=63/64,Yl=1/Pl/Pl,Bl=function t(n){function e(t){return t*t*((n+1)*t-n)}return n=+n,e.overshoot=t,e}(1.70158),Hl=function t(n){function e(t){return--t*t*((n+1)*t+n)+1}return n=+n,e.overshoot=t,e}(1.70158),jl=function t(n){function e(t){return((t*=2)<1?t*t*((n+1)*t-n):(t-=2)*t*((n+1)*t+n)+2)/2}return n=+n,e.overshoot=t,e}(1.70158),Xl=2*Math.PI,Vl=function t(n,e){function r(t){return n*Math.pow(2,10*--t)*Math.sin((i-t)/e)}var i=Math.asin(1/(n=Math.max(1,n)))*(e/=Xl);return r.amplitude=function(n){return t(n,e*Xl)},r.period=function(e){return t(n,e)},r}(1,.3),$l=function t(n,e){function r(t){return 1-n*Math.pow(2,-10*(t=+t))*Math.sin((t+i)/e)}var i=Math.asin(1/(n=Math.max(1,n)))*(e/=Xl);return r.amplitude=function(n){return t(n,e*Xl)},r.period=function(e){return t(n,e)},r}(1,.3),Wl=function t(n,e){function r(t){return((t=2*t-1)<0?n*Math.pow(2,10*t)*Math.sin((i-t)/e):2-n*Math.pow(2,-10*t)*Math.sin((i+t)/e))/2}var i=Math.asin(1/(n=Math.max(1,n)))*(e/=Xl);return r.amplitude=function(n){return t(n,e*Xl)},r.period=function(e){return t(n,e)},r}(1,.3),Zl={time:null,delay:0,duration:250,ease:Un};ft.prototype.interrupt=function(t){return this.each(function(){Cn(this,t)})},ft.prototype.transition=function(t){var n,e;t instanceof Rn?(n=t._id,t=t._name):(n=qn(),(e=Zl).time=_n(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,o=0;o<i;++o)for(var u,a=r[o],c=a.length,s=0;s<c;++s)(u=a[s])&&kn(u,t,n,s,a,e||Bn(u,n));return new Rn(r,this._parents,t,n)};var Gl=[null],Ql={name:"drag"},Jl={name:"space"},Kl={name:"handle"},th={name:"center"},nh={name:"x",handles:["e","w"].map(Vn),input:function(t,n){return t&&[[t[0],n[0][1]],[t[1],n[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},eh={name:"y",handles:["n","s"].map(Vn),input:function(t,n){return t&&[[n[0][0],t[0]],[n[1][0],t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},rh={name:"xy",handles:["n","e","s","w","nw","ne","se","sw"].map(Vn),input:function(t){return t},output:function(t){return t}},ih={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},oh={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},uh={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},ah={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},ch={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1},sh=Math.cos,fh=Math.sin,lh=Math.PI,hh=lh/2,ph=2*lh,dh=Math.max,vh=Array.prototype.slice,gh=Math.PI,_h=2*gh,yh=_h-1e-6;Kn.prototype=te.prototype={constructor:Kn,moveTo:function(t,n){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)},closePath:function(){null!==this._x1&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(t,n){this._+="L"+(this._x1=+t)+","+(this._y1=+n)},quadraticCurveTo:function(t,n,e,r){this._+="Q"+ +t+","+ +n+","+(this._x1=+e)+","+(this._y1=+r)},bezierCurveTo:function(t,n,e,r,i,o){this._+="C"+ +t+","+ +n+","+ +e+","+ +r+","+(this._x1=+i)+","+(this._y1=+o)},arcTo:function(t,n,e,r,i){t=+t,n=+n,e=+e,r=+r,i=+i;var o=this._x1,u=this._y1,a=e-t,c=r-n,s=o-t,f=u-n,l=s*s+f*f;if(i<0)throw new Error("negative radius: "+i);if(null===this._x1)this._+="M"+(this._x1=t)+","+(this._y1=n);else if(l>1e-6)if(Math.abs(f*a-c*s)>1e-6&&i){var h=e-o,p=r-u,d=a*a+c*c,v=h*h+p*p,g=Math.sqrt(d),_=Math.sqrt(l),y=i*Math.tan((gh-Math.acos((d+l-v)/(2*g*_)))/2),m=y/_,x=y/g;Math.abs(m-1)>1e-6&&(this._+="L"+(t+m*s)+","+(n+m*f)),this._+="A"+i+","+i+",0,0,"+ +(f*h>s*p)+","+(this._x1=t+x*a)+","+(this._y1=n+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n;var u=(e=+e)*Math.cos(r),a=e*Math.sin(r),c=t+u,s=n+a,f=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+s:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-s)>1e-6)&&(this._+="L"+c+","+s),e&&(l<0&&(l=l%_h+_h),l>yh?this._+="A"+e+","+e+",0,1,"+f+","+(t-u)+","+(n-a)+"A"+e+","+e+",0,1,"+f+","+(this._x1=c)+","+(this._y1=s):l>1e-6&&(this._+="A"+e+","+e+",0,"+ +(l>=gh)+","+f+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};ue.prototype=ae.prototype={constructor:ue,has:function(t){return"$"+t in this},get:function(t){return this["$"+t]},set:function(t,n){return this["$"+t]=n,this},remove:function(t){var n="$"+t;return n in this&&delete this[n]},clear:function(){for(var t in this)"$"===t[0]&&delete this[t]},keys:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(n.slice(1));return t},values:function(){var t=[];for(var n in this)"$"===n[0]&&t.push(this[n]);return t},entries:function(){var t=[];for(var n in this)"$"===n[0]&&t.push({key:n.slice(1),value:this[n]});return t},size:function(){var t=0;for(var n in this)"$"===n[0]&&++t;return t},empty:function(){for(var t in this)if("$"===t[0])return!1;return!0},each:function(t){for(var n in this)"$"===n[0]&&t(this[n],n.slice(1),this)}};var mh=ae.prototype;he.prototype=pe.prototype={constructor:he,has:mh.has,add:function(t){return t+="",this["$"+t]=t,this},remove:mh.remove,clear:mh.clear,values:mh.keys,size:mh.size,empty:mh.empty,each:mh.each};var xh={},bh={},wh=34,Mh=10,Th=13,Nh=ve(","),kh=Nh.parse,Sh=Nh.parseRows,Eh=Nh.format,Ah=Nh.formatRows,Ch=ve("\t"),zh=Ch.parse,Ph=Ch.parseRows,Rh=Ch.format,Lh=Ch.formatRows,qh=we.prototype=Me.prototype;qh.copy=function(){var t,n,e=new Me(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return e;if(!r.length)return e._root=Te(r),e;for(t=[{source:r,target:e._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(n=r.source[i])&&(n.length?t.push({source:n,target:r.target[i]=new Array(4)}):r.target[i]=Te(n));return e},qh.add=function(t){var n=+this._x.call(null,t),e=+this._y.call(null,t);return ye(this.cover(n,e),n,e,t)},qh.addAll=function(t){var n,e,r,i,o=t.length,u=new Array(o),a=new Array(o),c=1/0,s=1/0,f=-1/0,l=-1/0;for(e=0;e<o;++e)isNaN(r=+this._x.call(null,n=t[e]))||isNaN(i=+this._y.call(null,n))||(u[e]=r,a[e]=i,r<c&&(c=r),r>f&&(f=r),i<s&&(s=i),i>l&&(l=i));for(f<c&&(c=this._x0,f=this._x1),l<s&&(s=this._y0,l=this._y1),this.cover(c,s).cover(f,l),e=0;e<o;++e)ye(this,u[e],a[e],t[e]);return this},qh.cover=function(t,n){if(isNaN(t=+t)||isNaN(n=+n))return this;var e=this._x0,r=this._y0,i=this._x1,o=this._y1;if(isNaN(e))i=(e=Math.floor(t))+1,o=(r=Math.floor(n))+1;else{if(!(e>t||t>i||r>n||n>o))return this;var u,a,c=i-e,s=this._root;switch(a=(n<(r+o)/2)<<1|t<(e+i)/2){case 0:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,o=r+c,t>i||n>o);break;case 1:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,o=r+c,e>t||n>o);break;case 2:do{u=new Array(4),u[a]=s,s=u}while(c*=2,i=e+c,r=o-c,t>i||r>n);break;case 3:do{u=new Array(4),u[a]=s,s=u}while(c*=2,e=i-c,r=o-c,e>t||r>n)}this._root&&this._root.length&&(this._root=s)}return this._x0=e,this._y0=r,this._x1=i,this._y1=o,this},qh.data=function(){var t=[];return this.visit(function(n){if(!n.length)do{t.push(n.data)}while(n=n.next)}),t},qh.extent=function(t){return arguments.length?this.cover(+t[0][0],+t[0][1]).cover(+t[1][0],+t[1][1]):isNaN(this._x0)?void 0:[[this._x0,this._y0],[this._x1,this._y1]]},qh.find=function(t,n,e){var r,i,o,u,a,c,s,f=this._x0,l=this._y0,h=this._x1,p=this._y1,d=[],v=this._root;for(v&&d.push(new me(v,f,l,h,p)),null==e?e=1/0:(f=t-e,l=n-e,h=t+e,p=n+e,e*=e);c=d.pop();)if(!(!(v=c.node)||(i=c.x0)>h||(o=c.y0)>p||(u=c.x1)<f||(a=c.y1)<l))if(v.length){var g=(i+u)/2,_=(o+a)/2;d.push(new me(v[3],g,_,u,a),new me(v[2],i,_,g,a),new me(v[1],g,o,u,_),new me(v[0],i,o,g,_)),(s=(n>=_)<<1|t>=g)&&(c=d[d.length-1],d[d.length-1]=d[d.length-1-s],d[d.length-1-s]=c)}else{var y=t-+this._x.call(null,v.data),m=n-+this._y.call(null,v.data),x=y*y+m*m;if(x<e){var b=Math.sqrt(e=x);f=t-b,l=n-b,h=t+b,p=n+b,r=v.data}}return r},qh.remove=function(t){if(isNaN(o=+this._x.call(null,t))||isNaN(u=+this._y.call(null,t)))return this;var n,e,r,i,o,u,a,c,s,f,l,h,p=this._root,d=this._x0,v=this._y0,g=this._x1,_=this._y1;if(!p)return this;if(p.length)for(;;){if((s=o>=(a=(d+g)/2))?d=a:g=a,(f=u>=(c=(v+_)/2))?v=c:_=c,n=p,!(p=p[l=f<<1|s]))return this;if(!p.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;p.data!==t;)if(r=p,!(p=p.next))return this;return(i=p.next)&&delete p.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(p=n[0]||n[1]||n[2]||n[3])&&p===(n[3]||n[2]||n[1]||n[0])&&!p.length&&(e?e[h]=p:this._root=p),this):(this._root=i,this)},qh.removeAll=function(t){for(var n=0,e=t.length;n<e;++n)this.remove(t[n]);return this},qh.root=function(){return this._root},qh.size=function(){var t=0;return this.visit(function(n){if(!n.length)do{++t}while(n=n.next)}),t},qh.visit=function(t){var n,e,r,i,o,u,a=[],c=this._root;for(c&&a.push(new me(c,this._x0,this._y0,this._x1,this._y1));n=a.pop();)if(!t(c=n.node,r=n.x0,i=n.y0,o=n.x1,u=n.y1)&&c.length){var s=(r+o)/2,f=(i+u)/2;(e=c[3])&&a.push(new me(e,s,f,o,u)),(e=c[2])&&a.push(new me(e,r,f,s,u)),(e=c[1])&&a.push(new me(e,s,i,o,f)),(e=c[0])&&a.push(new me(e,r,i,s,f))}return this},qh.visitAfter=function(t){var n,e=[],r=[];for(this._root&&e.push(new me(this._root,this._x0,this._y0,this._x1,this._y1));n=e.pop();){var i=n.node;if(i.length){var o,u=n.x0,a=n.y0,c=n.x1,s=n.y1,f=(u+c)/2,l=(a+s)/2;(o=i[0])&&e.push(new me(o,u,a,f,l)),(o=i[1])&&e.push(new me(o,f,a,c,l)),(o=i[2])&&e.push(new me(o,u,l,f,s)),(o=i[3])&&e.push(new me(o,f,l,c,s))}r.push(n)}for(;n=r.pop();)t(n.node,n.x0,n.y0,n.x1,n.y1);return this},qh.x=function(t){return arguments.length?(this._x=t,this):this._x},qh.y=function(t){return arguments.length?(this._y=t,this):this._y};var Dh,Uh=10,Oh=Math.PI*(3-Math.sqrt(5)),Fh={"":function(t,n){t:for(var e,r=(t=t.toPrecision(n)).length,i=1,o=-1;i<r;++i)switch(t[i]){case".":o=e=i;break;case"0":0===o&&(o=i),e=i;break;case"e":break t;default:o>0&&(o=0)}return o>0?t.slice(0,o)+t.slice(e+1):t},"%":function(t,n){return(100*t).toFixed(n)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},g:function(t,n){return t.toPrecision(n)},o:function(t){return Math.round(t).toString(8)},p:function(t,n){return Re(100*t,n)},r:Re,s:function(t,n){var e=ze(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(Dh=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+ze(t,Math.max(0,n+o-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},Ih=/^(?:(.)?([<>=^]))?([+\-\( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?([a-z%])?$/i;Le.prototype=qe.prototype,qe.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(null==this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(null==this.precision?"":"."+Math.max(0,0|this.precision))+this.type};var Yh,Bh=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];Oe({decimal:".",thousands:",",grouping:[3],currency:["$",""]}),He.prototype={constructor:He,reset:function(){this.s=this.t=0},add:function(t){je(xp,t,this.t),je(this,xp.s,this.s),this.s?this.t+=xp.t:this.s=xp.t},valueOf:function(){return this.s}};var Hh,jh,Xh,Vh,$h,Wh,Zh,Gh,Qh,Jh,Kh,tp,np,ep,rp,ip,op,up,ap,cp,sp,fp,lp,hp,pp,dp,vp,gp,_p,yp,mp,xp=new He,bp=1e-6,wp=1e-12,Mp=Math.PI,Tp=Mp/2,Np=Mp/4,kp=2*Mp,Sp=180/Mp,Ep=Mp/180,Ap=Math.abs,Cp=Math.atan,zp=Math.atan2,Pp=Math.cos,Rp=Math.ceil,Lp=Math.exp,qp=Math.log,Dp=Math.pow,Up=Math.sin,Op=Math.sign||function(t){return t>0?1:t<0?-1:0},Fp=Math.sqrt,Ip=Math.tan,Yp={Feature:function(t,n){Ze(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r<i;)Ze(e[r].geometry,n)}},Bp={Sphere:function(t,n){n.sphere()},Point:function(t,n){t=t.coordinates,n.point(t[0],t[1],t[2])},MultiPoint:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)t=e[r],n.point(t[0],t[1],t[2])},LineString:function(t,n){Ge(t.coordinates,n,0)},MultiLineString:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)Ge(e[r],n,0)},Polygon:function(t,n){Qe(t.coordinates,n)},MultiPolygon:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)Qe(e[r],n)},GeometryCollection:function(t,n){for(var e=t.geometries,r=-1,i=e.length;++r<i;)Ze(e[r],n)}},Hp=Be(),jp=Be(),Xp={point:We,lineStart:We,lineEnd:We,polygonStart:function(){Hp.reset(),Xp.lineStart=Ke,Xp.lineEnd=tr},polygonEnd:function(){var t=+Hp;jp.add(t<0?kp+t:t),this.lineStart=this.lineEnd=this.point=We},sphere:function(){jp.add(kp)}},Vp=Be(),$p={point:fr,lineStart:hr,lineEnd:pr,polygonStart:function(){$p.point=dr,$p.lineStart=vr,$p.lineEnd=gr,Vp.reset(),Xp.polygonStart()},polygonEnd:function(){Xp.polygonEnd(),$p.point=fr,$p.lineStart=hr,$p.lineEnd=pr,Hp<0?(Wh=-(Gh=180),Zh=-(Qh=90)):Vp>bp?Qh=90:Vp<-bp&&(Zh=-90),rp[0]=Wh,rp[1]=Gh}},Wp={sphere:We,point:xr,lineStart:wr,lineEnd:Nr,polygonStart:function(){Wp.lineStart=kr,Wp.lineEnd=Sr},polygonEnd:function(){Wp.lineStart=wr,Wp.lineEnd=Nr}};Pr.invert=Pr;var Zp,Gp,Qp,Jp,Kp,td,nd,ed,rd,id,od,ud=Be(),ad=Vr(function(){return!0},function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,u){var a=o>0?Mp:-Mp,c=Ap(o-e);Ap(c-Mp)<bp?(t.point(e,r=(r+u)/2>0?Tp:-Tp),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(a,r),t.point(o,r),n=0):i!==a&&c>=Mp&&(Ap(e-i)<bp&&(e-=i*bp),Ap(o-a)<bp&&(o-=a*bp),r=function(t,n,e,r){var i,o,u=Up(t-e);return Ap(u)>bp?Cp((Up(n)*(o=Pp(r))*Up(e)-Up(r)*(i=Pp(n))*Up(t))/(i*o*u)):(n+r)/2}(e,r,o,u),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(a,r),n=0),t.point(e=o,r=u),i=a},lineEnd:function(){t.lineEnd(),e=r=NaN},clean:function(){return 2-n}}},function(t,n,e,r){var i;if(null==t)i=e*Tp,r.point(-Mp,i),r.point(0,i),r.point(Mp,i),r.point(Mp,0),r.point(Mp,-i),r.point(0,-i),r.point(-Mp,-i),r.point(-Mp,0),r.point(-Mp,i);else if(Ap(t[0]-n[0])>bp){var o=t[0]<n[0]?Mp:-Mp;i=e*o/2,r.point(-o,i),r.point(0,i),r.point(o,i)}else r.point(n[0],n[1])},[-Mp,-Tp]),cd=1e9,sd=-cd,fd=Be(),ld={sphere:We,point:We,lineStart:function(){ld.point=Jr,ld.lineEnd=Qr},lineEnd:We,polygonStart:We,polygonEnd:We},hd=[null,null],pd={type:"LineString",coordinates:hd},dd={Feature:function(t,n){return ei(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r<i;)if(ei(e[r].geometry,n))return!0;return!1}},vd={Sphere:function(){return!0},Point:function(t,n){return ri(t.coordinates,n)},MultiPoint:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)if(ri(e[r],n))return!0;return!1},LineString:function(t,n){return ii(t.coordinates,n)},MultiLineString:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)if(ii(e[r],n))return!0;return!1},Polygon:function(t,n){return oi(t.coordinates,n)},MultiPolygon:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;++r<i;)if(oi(e[r],n))return!0;return!1},GeometryCollection:function(t,n){for(var e=t.geometries,r=-1,i=e.length;++r<i;)if(ei(e[r],n))return!0;return!1}},gd=Be(),_d=Be(),yd={point:We,lineStart:We,lineEnd:We,polygonStart:function(){yd.lineStart=hi,yd.lineEnd=vi},polygonEnd:function(){yd.lineStart=yd.lineEnd=yd.point=We,gd.add(Ap(_d)),_d.reset()},result:function(){var t=gd/2;return gd.reset(),t}},md=1/0,xd=md,bd=-md,wd=bd,Md={point:function(t,n){t<md&&(md=t),t>bd&&(bd=t),n<xd&&(xd=n),n>wd&&(wd=n)},lineStart:We,lineEnd:We,polygonStart:We,polygonEnd:We,result:function(){var t=[[md,xd],[bd,wd]];return bd=wd=-(xd=md=1/0),t}},Td=0,Nd=0,kd=0,Sd=0,Ed=0,Ad=0,Cd=0,zd=0,Pd=0,Rd={point:gi,lineStart:_i,lineEnd:xi,polygonStart:function(){Rd.lineStart=bi,Rd.lineEnd=wi},polygonEnd:function(){Rd.point=gi,Rd.lineStart=_i,Rd.lineEnd=xi},result:function(){var t=Pd?[Cd/Pd,zd/Pd]:Ad?[Sd/Ad,Ed/Ad]:kd?[Td/kd,Nd/kd]:[NaN,NaN];return Td=Nd=kd=Sd=Ed=Ad=Cd=zd=Pd=0,t}};Ni.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,kp)}},result:We};var Ld,qd,Dd,Ud,Od,Fd=Be(),Id={point:We,lineStart:function(){Id.point=ki},lineEnd:function(){Ld&&Si(qd,Dd),Id.point=We},polygonStart:function(){Ld=!0},polygonEnd:function(){Ld=null},result:function(){var t=+Fd;return Fd.reset(),t}};Ei.prototype={_radius:4.5,_circle:Ai(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=Ai(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}},zi.prototype={constructor:zi,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Yd=16,Bd=Pp(30*Ep),Hd=Ci({point:function(t,n){this.stream.point(t*Ep,n*Ep)}}),jd=ji(function(t){return Fp(2/(1+t))});jd.invert=Xi(function(t){return 2*Ve(t/2)});var Xd=ji(function(t){return(t=Xe(t))&&t/Up(t)});Xd.invert=Xi(function(t){return t}),Vi.invert=function(t,n){return[t,2*Cp(Lp(n))-Tp]},Gi.invert=Gi,Ji.invert=Xi(Cp),to.invert=function(t,n){var e,r=n,i=25;do{var o=r*r,u=o*o;r-=e=(r*(1.007226+o*(.015085+u*(.028874*o-.044475-.005916*u)))-n)/(1.007226+o*(.045255+u*(.259866*o-.311325-.005916*11*u)))}while(Ap(e)>bp&&--i>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]},no.invert=Xi(Ve),eo.invert=Xi(function(t){return 2*Cp(t)}),ro.invert=function(t,n){return[-n,2*Cp(Lp(t))-Tp]},ho.prototype=co.prototype={constructor:ho,count:function(){return this.eachAfter(ao)},each:function(t){var n,e,r,i,o=this,u=[o];do{for(n=u.reverse(),u=[];o=n.pop();)if(t(o),e=o.children)for(r=0,i=e.length;r<i;++r)u.push(e[r])}while(u.length);return this},eachAfter:function(t){for(var n,e,r,i=this,o=[i],u=[];i=o.pop();)if(u.push(i),n=i.children)for(e=0,r=n.length;e<r;++e)o.push(n[e]);for(;i=u.pop();)t(i);return this},eachBefore:function(t){for(var n,e,r=this,i=[r];r=i.pop();)if(t(r),n=r.children)for(e=n.length-1;e>=0;--e)i.push(n[e]);return this},sum:function(t){return this.eachAfter(function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e})},sort:function(t){return this.eachBefore(function(n){n.children&&n.children.sort(t)})},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){var t=[];return this.each(function(n){t.push(n)}),t},leaves:function(){var t=[];return this.eachBefore(function(n){n.children||t.push(n)}),t},links:function(){var t=this,n=[];return t.each(function(e){e!==t&&n.push({source:e.parent,target:e})}),n},copy:function(){return co(this).eachBefore(fo)}};var Vd=Array.prototype.slice,$d="$",Wd={depth:-1},Zd={};Yo.prototype=Object.create(ho.prototype);var Gd=(1+Math.sqrt(5))/2,Qd=function t(n){function e(t,e,r,i,o){Ho(n,t,e,r,i,o)}return e.ratio=function(n){return t((n=+n)>1?n:1)},e}(Gd),Jd=function t(n){function e(t,e,r,i,o){if((u=t._squarify)&&u.ratio===n)for(var u,a,c,s,f,l=-1,h=u.length,p=t.value;++l<h;){for(c=(a=u[l]).children,s=a.value=0,f=c.length;s<f;++s)a.value+=c[s].value;a.dice?Ro(a,e,r,i,r+=(o-r)*a.value/p):Bo(a,e,r,e+=(i-e)*a.value/p,o),p-=a.value}else t._squarify=u=Ho(n,t,e,r,i,o),u.ratio=n}return e.ratio=function(n){return t((n=+n)>1?n:1)},e}(Gd),Kd=[].slice,tv={};$o.prototype=Qo.prototype={constructor:$o,defer:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("defer after await");if(null!=this._error)return this;var n=Kd.call(arguments,1);return n.push(t),++this._waiting,this._tasks.push(n),Wo(this),this},abort:function(){return null==this._error&&Zo(this,new Error("abort")),this},await:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=function(n,e){t.apply(null,[n].concat(e))},Go(this),this},awaitAll:function(t){if("function"!=typeof t)throw new Error("invalid callback");if(this._call)throw new Error("multiple await");return this._call=t,Go(this),this}};var nv=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(Jo),ev=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(Jo),rv=function t(n){function e(){var t=ev.source(n).apply(this,arguments);return function(){return Math.exp(t())}}return e.source=t,e}(Jo),iv=function t(n){function e(t){return function(){for(var e=0,r=0;r<t;++r)e+=n();return e}}return e.source=t,e}(Jo),ov=function t(n){function e(t){var e=iv.source(n)(t);return function(){return e()/t}}return e.source=t,e}(Jo),uv=function t(n){function e(t){return function(){return-Math.log(1-n())/t}}return e.source=t,e}(Jo),av=tu("text/html",function(t){return document.createRange().createContextualFragment(t.responseText)}),cv=tu("application/json",function(t){return JSON.parse(t.responseText)}),sv=tu("text/plain",function(t){return t.responseText}),fv=tu("application/xml",function(t){var n=t.responseXML;if(!n)throw new Error("parse error");return n}),lv=nu("text/csv",kh),hv=nu("text/tab-separated-values",zh),pv=Array.prototype,dv=pv.map,vv=pv.slice,gv={name:"implicit"},_v=[0,1],yv=new Date,mv=new Date,xv=Eu(function(){},function(t,n){t.setTime(+t+n)},function(t,n){return n-t});xv.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Eu(function(n){n.setTime(Math.floor(n/t)*t)},function(n,e){n.setTime(+n+e*t)},function(n,e){return(e-n)/t}):xv:null};var bv=xv.range,wv=6e4,Mv=6048e5,Tv=Eu(function(t){t.setTime(1e3*Math.floor(t/1e3))},function(t,n){t.setTime(+t+1e3*n)},function(t,n){return(n-t)/1e3},function(t){return t.getUTCSeconds()}),Nv=Tv.range,kv=Eu(function(t){t.setTime(Math.floor(t/wv)*wv)},function(t,n){t.setTime(+t+n*wv)},function(t,n){return(n-t)/wv},function(t){return t.getMinutes()}),Sv=kv.range,Ev=Eu(function(t){var n=t.getTimezoneOffset()*wv%36e5;n<0&&(n+=36e5),t.setTime(36e5*Math.floor((+t-n)/36e5)+n)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getHours()}),Av=Ev.range,Cv=Eu(function(t){t.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*wv)/864e5},function(t){return t.getDate()-1}),zv=Cv.range,Pv=Au(0),Rv=Au(1),Lv=Au(2),qv=Au(3),Dv=Au(4),Uv=Au(5),Ov=Au(6),Fv=Pv.range,Iv=Rv.range,Yv=Lv.range,Bv=qv.range,Hv=Dv.range,jv=Uv.range,Xv=Ov.range,Vv=Eu(function(t){t.setDate(1),t.setHours(0,0,0,0)},function(t,n){t.setMonth(t.getMonth()+n)},function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())},function(t){return t.getMonth()}),$v=Vv.range,Wv=Eu(function(t){t.setMonth(0,1),t.setHours(0,0,0,0)},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t,n){return n.getFullYear()-t.getFullYear()},function(t){return t.getFullYear()});Wv.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Eu(function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)},function(n,e){n.setFullYear(n.getFullYear()+e*t)}):null};var Zv=Wv.range,Gv=Eu(function(t){t.setUTCSeconds(0,0)},function(t,n){t.setTime(+t+n*wv)},function(t,n){return(n-t)/wv},function(t){return t.getUTCMinutes()}),Qv=Gv.range,Jv=Eu(function(t){t.setUTCMinutes(0,0,0)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getUTCHours()}),Kv=Jv.range,tg=Eu(function(t){t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+n)},function(t,n){return(n-t)/864e5},function(t){return t.getUTCDate()-1}),ng=tg.range,eg=Cu(0),rg=Cu(1),ig=Cu(2),og=Cu(3),ug=Cu(4),ag=Cu(5),cg=Cu(6),sg=eg.range,fg=rg.range,lg=ig.range,hg=og.range,pg=ug.range,dg=ag.range,vg=cg.range,gg=Eu(function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCMonth(t.getUTCMonth()+n)},function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())},function(t){return t.getUTCMonth()}),_g=gg.range,yg=Eu(function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)},function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()},function(t){return t.getUTCFullYear()});yg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Eu(function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)},function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)}):null};var mg,xg=yg.range,bg={"-":"",_:" ",0:"0"},wg=/^\s*\d+/,Mg=/^%/,Tg=/[\\^$*+?|[\]().{}]/g;Ya({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var Ng="%Y-%m-%dT%H:%M:%S.%LZ",kg=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat(Ng),Sg=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse(Ng),Eg=1e3,Ag=60*Eg,Cg=60*Ag,zg=24*Cg,Pg=7*zg,Rg=30*zg,Lg=365*zg,qg=Xa("1f77b4ff7f0e2ca02cd627289467bd8c564be377c27f7f7fbcbd2217becf"),Dg=Xa("393b795254a36b6ecf9c9ede6379398ca252b5cf6bcedb9c8c6d31bd9e39e7ba52e7cb94843c39ad494ad6616be7969c7b4173a55194ce6dbdde9ed6"),Ug=Xa("3182bd6baed69ecae1c6dbefe6550dfd8d3cfdae6bfdd0a231a35474c476a1d99bc7e9c0756bb19e9ac8bcbddcdadaeb636363969696bdbdbdd9d9d9"),Og=Xa("1f77b4aec7e8ff7f0effbb782ca02c98df8ad62728ff98969467bdc5b0d58c564bc49c94e377c2f7b6d27f7f7fc7c7c7bcbd22dbdb8d17becf9edae5"),Fg=ol(Xt(300,.5,0),Xt(-240,.5,1)),Ig=ol(Xt(-100,.75,.35),Xt(80,1.5,.8)),Yg=ol(Xt(260,.75,.35),Xt(80,1.5,.8)),Bg=Xt(),Hg=Va(Xa("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),jg=Va(Xa("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),Xg=Va(Xa("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),Vg=Va(Xa("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),$g=Math.abs,Wg=Math.atan2,Zg=Math.cos,Gg=Math.max,Qg=Math.min,Jg=Math.sin,Kg=Math.sqrt,t_=1e-12,n_=Math.PI,e_=n_/2,r_=2*n_;ec.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var i_=lc(rc);fc.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var o_=Array.prototype.slice,u_={draw:function(t,n){var e=Math.sqrt(n/n_);t.moveTo(e,0),t.arc(0,0,e,0,r_)}},a_={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},c_=Math.sqrt(1/3),s_=2*c_,f_={draw:function(t,n){var e=Math.sqrt(n/s_),r=e*c_;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},l_=Math.sin(n_/10)/Math.sin(7*n_/10),h_=Math.sin(r_/10)*l_,p_=-Math.cos(r_/10)*l_,d_={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=h_*e,i=p_*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var u=r_*o/5,a=Math.cos(u),c=Math.sin(u);t.lineTo(c*e,-a*e),t.lineTo(a*r-c*i,c*r+a*i)}t.closePath()}},v_={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},g_=Math.sqrt(3),__={draw:function(t,n){var e=-Math.sqrt(n/(3*g_));t.moveTo(0,2*e),t.lineTo(-g_*e,-e),t.lineTo(g_*e,-e),t.closePath()}},y_=Math.sqrt(3)/2,m_=1/Math.sqrt(12),x_=3*(m_/2+1),b_={draw:function(t,n){var e=Math.sqrt(n/x_),r=e/2,i=e*m_,o=r,u=e*m_+e,a=-o,c=u;t.moveTo(r,i),t.lineTo(o,u),t.lineTo(a,c),t.lineTo(-.5*r-y_*i,y_*r+-.5*i),t.lineTo(-.5*o-y_*u,y_*o+-.5*u),t.lineTo(-.5*a-y_*c,y_*a+-.5*c),t.lineTo(-.5*r+y_*i,-.5*i-y_*r),t.lineTo(-.5*o+y_*u,-.5*u-y_*o),t.lineTo(-.5*a+y_*c,-.5*c-y_*a),t.closePath()}},w_=[u_,a_,f_,v_,d_,__,b_];Tc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:Mc(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:Mc(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},Nc.prototype={areaStart:wc,areaEnd:wc,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:Mc(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},kc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:Mc(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},Sc.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],u=t[e]-i,a=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*u),this._beta*n[c]+(1-this._beta)*(o+r*a));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var M_=function t(n){function e(t){return 1===n?new Tc(t):new Sc(t,n)}return e.beta=function(n){return t(+n)},e}(.85);Ac.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:Ec(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:Ec(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var T_=function t(n){function e(t){return new Ac(t,n)}return e.tension=function(n){return t(+n)},e}(0);Cc.prototype={areaStart:wc,areaEnd:wc,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:Ec(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var N_=function t(n){function e(t){return new Cc(t,n)}return e.tension=function(n){return t(+n)},e}(0);zc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Ec(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var k_=function t(n){function e(t){return new zc(t,n)}return e.tension=function(n){return t(+n)},e}(0);Rc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:Pc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var S_=function t(n){function e(t){return n?new Rc(t,n):new Ac(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);Lc.prototype={areaStart:wc,areaEnd:wc,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:Pc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var E_=function t(n){function e(t){return n?new Lc(t,n):new Cc(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);qc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Pc(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var A_=function t(n){function e(t){return n?new qc(t,n):new zc(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);Dc.prototype={areaStart:wc,areaEnd:wc,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,n){t=+t,n=+n,this._point?this._context.lineTo(t,n):(this._point=1,this._context.moveTo(t,n))}},Yc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=this._t0=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x1,this._y1);break;case 3:Ic(this,this._t0,Fc(this,this._t0))}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){var e=NaN;if(t=+t,n=+n,t!==this._x1||n!==this._y1){switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,Ic(this,Fc(this,e=Oc(this,t,n)),e);break;default:Ic(this,this._t0,e=Oc(this,t,n))}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n,this._t0=e}}},(Bc.prototype=Object.create(Yc.prototype)).point=function(t,n){Yc.prototype.point.call(this,n,t)},Hc.prototype={moveTo:function(t,n){this._context.moveTo(n,t)},closePath:function(){this._context.closePath()},lineTo:function(t,n){this._context.lineTo(n,t)},bezierCurveTo:function(t,n,e,r,i,o){this._context.bezierCurveTo(n,t,r,e,o,i)}},jc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x=[],this._y=[]},lineEnd:function(){var t=this._x,n=this._y,e=t.length;if(e)if(this._line?this._context.lineTo(t[0],n[0]):this._context.moveTo(t[0],n[0]),2===e)this._context.lineTo(t[1],n[1]);else for(var r=Xc(t),i=Xc(n),o=0,u=1;u<e;++o,++u)this._context.bezierCurveTo(r[0][o],i[0][o],r[1][o],i[1][o],t[u],n[u]);(this._line||0!==this._line&&1===e)&&this._context.closePath(),this._line=1-this._line,this._x=this._y=null},point:function(t,n){this._x.push(+t),this._y.push(+n)}},Vc.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x=this._y=NaN,this._point=0},lineEnd:function(){0<this._t&&this._t<1&&2===this._point&&this._context.lineTo(this._x,this._y),(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line>=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}},ns.prototype={constructor:ns,insert:function(t,n){var e,r,i;if(t){if(n.P=t,n.N=t.N,t.N&&(t.N.P=n),t.N=n,t.R){for(t=t.R;t.L;)t=t.L;t.L=n}else t.R=n;e=t}else this._?(t=os(this._),n.P=null,n.N=t,t.P=t.L=n,e=t):(n.P=n.N=null,this._=n,e=null);for(n.L=n.R=null,n.U=e,n.C=!0,t=n;e&&e.C;)e===(r=e.U).L?(i=r.R)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.R&&(rs(this,e),e=(t=e).U),e.C=!1,r.C=!0,is(this,r)):(i=r.L)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.L&&(is(this,e),e=(t=e).U),e.C=!1,r.C=!0,rs(this,r)),e=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var n,e,r,i=t.U,o=t.L,u=t.R;if(e=o?u?os(u):o:u,i?i.L===t?i.L=e:i.R=e:this._=e,o&&u?(r=e.C,e.C=t.C,e.L=o,o.U=e,e!==u?(i=e.U,e.U=t.U,t=e.R,i.L=t,e.R=u,u.U=e):(e.U=i,i=e,t=e.R)):(r=t.C,t=e),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((n=i.R).C&&(n.C=!1,i.C=!0,rs(this,i),n=i.R),n.L&&n.L.C||n.R&&n.R.C){n.R&&n.R.C||(n.L.C=!1,n.C=!0,is(this,n),n=i.R),n.C=i.C,i.C=n.R.C=!1,rs(this,i),t=this._;break}}else if((n=i.L).C&&(n.C=!1,i.C=!0,is(this,i),n=i.L),n.L&&n.L.C||n.R&&n.R.C){n.L&&n.L.C||(n.R.C=!1,n.C=!0,rs(this,n),n=i.L),n.C=i.C,i.C=n.L.C=!1,is(this,i),t=this._;break}n.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var C_,z_,P_,R_,L_,q_=[],D_=[],U_=1e-6,O_=1e-12;Ms.prototype={constructor:Ms,polygons:function(){var t=this.edges;return this.cells.map(function(n){var e=n.halfedges.map(function(e){return hs(n,t[e])});return e.data=n.site.data,e})},triangles:function(){var t=[],n=this.edges;return this.cells.forEach(function(e,r){if(o=(i=e.halfedges).length)for(var i,o,u,a=e.site,c=-1,s=n[i[o-1]],f=s.left===a?s.right:s.left;++c<o;)u=f,f=(s=n[i[c]]).left===a?s.right:s.left,u&&f&&r<u.index&&r<f.index&&bs(a,u,f)<0&&t.push([a.data,u.data,f.data])}),t},links:function(){return this.edges.filter(function(t){return t.right}).map(function(t){return{source:t.left.data,target:t.right.data}})},find:function(t,n,e){for(var r,i,o=this,u=o._found||0,a=o.cells.length;!(i=o.cells[u]);)if(++u>=a)return null;var c=t-i.site[0],s=n-i.site[1],f=c*c+s*s;do{i=o.cells[r=u],u=null,i.halfedges.forEach(function(e){var r=o.edges[e],a=r.left;if(a!==i.site&&a||(a=r.right)){var c=t-a[0],s=n-a[1],l=c*c+s*s;l<f&&(f=l,u=a.index)}})}while(null!==u);return o._found=r,null==e||f<=e*e?i.site:null}},Ns.prototype={constructor:Ns,scale:function(t){return 1===t?this:new Ns(this.k*t,this.x,this.y)},translate:function(t,n){return 0===t&0===n?this:new Ns(this.k,this.x+this.k*t,this.y+this.k*n)},apply:function(t){return[t[0]*this.k+this.x,t[1]*this.k+this.y]},applyX:function(t){return t*this.k+this.x},applyY:function(t){return t*this.k+this.y},invert:function(t){return[(t[0]-this.x)/this.k,(t[1]-this.y)/this.k]},invertX:function(t){return(t-this.x)/this.k},invertY:function(t){return(t-this.y)/this.k},rescaleX:function(t){return t.copy().domain(t.range().map(this.invertX,this).map(t.invert,t))},rescaleY:function(t){return t.copy().domain(t.range().map(this.invertY,this).map(t.invert,t))},toString:function(){return"translate("+this.x+","+this.y+") scale("+this.k+")"}};var F_=new Ns(1,0,0);ks.prototype=Ns.prototype,t.version="4.12.2",t.bisect=Ds,t.bisectRight=Ds,t.bisectLeft=Us,t.ascending=n,t.bisector=e,t.cross=function(t,n,e){var i,o,u,a,c=t.length,s=n.length,f=new Array(c*s);for(null==e&&(e=r),i=u=0;i<c;++i)for(a=t[i],o=0;o<s;++o,++u)f[u]=e(a,n[o]);return f},t.descending=function(t,n){return n<t?-1:n>t?1:n>=t?0:NaN},t.deviation=u,t.extent=a,t.histogram=function(){function t(t){var i,o,u=t.length,a=new Array(u);for(i=0;i<u;++i)a[i]=n(t[i],i,t);var c=e(a),s=c[0],l=c[1],h=r(a,s,l);Array.isArray(h)||(h=p(s,l,h),h=f(Math.ceil(s/h)*h,Math.floor(l/h)*h,h));for(var d=h.length;h[0]<=s;)h.shift(),--d;for(;h[d-1]>l;)h.pop(),--d;var v,g=new Array(d+1);for(i=0;i<=d;++i)(v=g[i]=[]).x0=i>0?h[i-1]:s,v.x1=i<d?h[i]:l;for(i=0;i<u;++i)s<=(o=a[i])&&o<=l&&g[Ds(h,o,0,d)].push(t[i]);return g}var n=s,e=a,r=d;return t.value=function(e){return arguments.length?(n="function"==typeof e?e:c(e),t):n},t.domain=function(n){return arguments.length?(e="function"==typeof n?n:c([n[0],n[1]]),t):e},t.thresholds=function(n){return arguments.length?(r="function"==typeof n?n:Array.isArray(n)?c(Fs.call(n)):c(n),t):r},t},t.thresholdFreedmanDiaconis=function(t,e,r){return t=Is.call(t,i).sort(n),Math.ceil((r-e)/(2*(v(t,.75)-v(t,.25))*Math.pow(t.length,-1/3)))},t.thresholdScott=function(t,n,e){return Math.ceil((e-n)/(3.5*u(t)*Math.pow(t.length,-1/3)))},t.thresholdSturges=d,t.max=function(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o<i;)if(null!=(e=t[o])&&e>=e)for(r=e;++o<i;)null!=(e=t[o])&&e>r&&(r=e)}else for(;++o<i;)if(null!=(e=n(t[o],o,t))&&e>=e)for(r=e;++o<i;)null!=(e=n(t[o],o,t))&&e>r&&(r=e);return r},t.mean=function(t,n){var e,r=t.length,o=r,u=-1,a=0;if(null==n)for(;++u<r;)isNaN(e=i(t[u]))?--o:a+=e;else for(;++u<r;)isNaN(e=i(n(t[u],u,t)))?--o:a+=e;if(o)return a/o},t.median=function(t,e){var r,o=t.length,u=-1,a=[];if(null==e)for(;++u<o;)isNaN(r=i(t[u]))||a.push(r);else for(;++u<o;)isNaN(r=i(e(t[u],u,t)))||a.push(r);return v(a.sort(n),.5)},t.merge=g,t.min=_,t.pairs=function(t,n){null==n&&(n=r);for(var e=0,i=t.length-1,o=t[0],u=new Array(i<0?0:i);e<i;)u[e]=n(o,o=t[++e]);return u},t.permute=function(t,n){for(var e=n.length,r=new Array(e);e--;)r[e]=t[n[e]];return r},t.quantile=v,t.range=f,t.scan=function(t,e){if(r=t.length){var r,i,o=0,u=0,a=t[u];for(null==e&&(e=n);++o<r;)(e(i=t[o],a)<0||0!==e(a,a))&&(a=i,u=o);return 0===e(a,a)?u:void 0}},t.shuffle=function(t,n,e){for(var r,i,o=(null==e?t.length:e)-(n=null==n?0:+n);o;)i=Math.random()*o--|0,r=t[o+n],t[o+n]=t[i+n],t[i+n]=r;return t},t.sum=function(t,n){var e,r=t.length,i=-1,o=0;if(null==n)for(;++i<r;)(e=+t[i])&&(o+=e);else for(;++i<r;)(e=+n(t[i],i,t))&&(o+=e);return o},t.ticks=l,t.tickIncrement=h,t.tickStep=p,t.transpose=y,t.variance=o,t.zip=function(){return y(arguments)},t.axisTop=function(t){return T(Xs,t)},t.axisRight=function(t){return T(Vs,t)},t.axisBottom=function(t){return T($s,t)},t.axisLeft=function(t){return T(Ws,t)},t.brush=function(){return Qn(rh)},t.brushX=function(){return Qn(nh)},t.brushY=function(){return Qn(eh)},t.brushSelection=function(t){var n=t.__brush;return n?n.dim.output(n.selection):null},t.chord=function(){function t(t){var o,u,a,c,s,l,h=t.length,p=[],d=f(h),v=[],g=[],_=g.groups=new Array(h),y=new Array(h*h);for(o=0,s=-1;++s<h;){for(u=0,l=-1;++l<h;)u+=t[s][l];p.push(u),v.push(f(h)),o+=u}for(e&&d.sort(function(t,n){return e(p[t],p[n])}),r&&v.forEach(function(n,e){n.sort(function(n,i){return r(t[e][n],t[e][i])})}),c=(o=dh(0,ph-n*h)/o)?n:ph/h,u=0,s=-1;++s<h;){for(a=u,l=-1;++l<h;){var m=d[s],x=v[m][l],b=t[m][x],w=u,M=u+=b*o;y[x*h+m]={index:m,subindex:x,startAngle:w,endAngle:M,value:b}}_[m]={index:m,startAngle:a,endAngle:u,value:p[m]},u+=c}for(s=-1;++s<h;)for(l=s-1;++l<h;){var T=y[l*h+s],N=y[s*h+l];(T.value||N.value)&&g.push(T.value<N.value?{source:N,target:T}:{source:T,target:N})}return i?g.sort(i):g}var n=0,e=null,r=null,i=null;return t.padAngle=function(e){return arguments.length?(n=dh(0,e),t):n},t.sortGroups=function(n){return arguments.length?(e=n,t):e},t.sortSubgroups=function(n){return arguments.length?(r=n,t):r},t.sortChords=function(n){return arguments.length?(null==n?i=null:(i=function(t){return function(n,e){return t(n.source.value+n.target.value,e.source.value+e.target.value)}}(n))._=n,t):i&&i._},t},t.ribbon=function(){function t(){var t,a=vh.call(arguments),c=n.apply(this,a),s=e.apply(this,a),f=+r.apply(this,(a[0]=c,a)),l=i.apply(this,a)-hh,h=o.apply(this,a)-hh,p=f*sh(l),d=f*fh(l),v=+r.apply(this,(a[0]=s,a)),g=i.apply(this,a)-hh,_=o.apply(this,a)-hh;if(u||(u=t=te()),u.moveTo(p,d),u.arc(0,0,f,l,h),l===g&&h===_||(u.quadraticCurveTo(0,0,v*sh(g),v*fh(g)),u.arc(0,0,v,g,_)),u.quadraticCurveTo(0,0,p,d),u.closePath(),t)return u=null,t+""||null}var n=ne,e=ee,r=re,i=ie,o=oe,u=null;return t.radius=function(n){return arguments.length?(r="function"==typeof n?n:Jn(+n),t):r},t.startAngle=function(n){return arguments.length?(i="function"==typeof n?n:Jn(+n),t):i},t.endAngle=function(n){return arguments.length?(o="function"==typeof n?n:Jn(+n),t):o},t.source=function(e){return arguments.length?(n=e,t):n},t.target=function(n){return arguments.length?(e=n,t):e},t.context=function(n){return arguments.length?(u=null==n?null:n,t):u},t},t.nest=function(){function t(n,i,u,a){if(i>=o.length)return null!=e&&n.sort(e),null!=r?r(n):n;for(var c,s,f,l=-1,h=n.length,p=o[i++],d=ae(),v=u();++l<h;)(f=d.get(c=p(s=n[l])+""))?f.push(s):d.set(c,[s]);return d.each(function(n,e){a(v,e,t(n,i,u,a))}),v}function n(t,e){if(++e>o.length)return t;var i,a=u[e-1];return null!=r&&e>=o.length?i=t.entries():(i=[],t.each(function(t,r){i.push({key:r,values:n(t,e)})})),null!=a?i.sort(function(t,n){return a(t.key,n.key)}):i}var e,r,i,o=[],u=[];return i={object:function(n){return t(n,0,ce,se)},map:function(n){return t(n,0,fe,le)},entries:function(e){return n(t(e,0,fe,le),0)},key:function(t){return o.push(t),i},sortKeys:function(t){return u[o.length-1]=t,i},sortValues:function(t){return e=t,i},rollup:function(t){return r=t,i}}},t.set=pe,t.map=ae,t.keys=function(t){var n=[];for(var e in t)n.push(e);return n},t.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},t.entries=function(t){var n=[];for(var e in t)n.push({key:e,value:t[e]});return n},t.color=kt,t.rgb=Ct,t.hsl=Rt,t.lab=Ut,t.hcl=Ht,t.cubehelix=Xt,t.dispatch=N,t.drag=function(){function n(t){t.on("mousedown.drag",e).filter(g).on("touchstart.drag",o).on("touchmove.drag",u).on("touchend.drag touchcancel.drag",a).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(){if(!h&&p.apply(this,arguments)){var n=c("mouse",d.apply(this,arguments),F,this,arguments);n&&(lt(t.event.view).on("mousemove.drag",r,!0).on("mouseup.drag",i,!0),vt(t.event.view),pt(),l=!1,s=t.event.clientX,f=t.event.clientY,n("start"))}}function r(){if(dt(),!l){var n=t.event.clientX-s,e=t.event.clientY-f;l=n*n+e*e>x}_.mouse("drag")}function i(){lt(t.event.view).on("mousemove.drag mouseup.drag",null),gt(t.event.view,l),dt(),_.mouse("end")}function o(){if(p.apply(this,arguments)){var n,e,r=t.event.changedTouches,i=d.apply(this,arguments),o=r.length;for(n=0;n<o;++n)(e=c(r[n].identifier,i,ht,this,arguments))&&(pt(),e("start"))}}function u(){var n,e,r=t.event.changedTouches,i=r.length;for(n=0;n<i;++n)(e=_[r[n].identifier])&&(dt(),e("drag"))}function a(){var n,e,r=t.event.changedTouches,i=r.length;for(h&&clearTimeout(h),h=setTimeout(function(){h=null},500),n=0;n<i;++n)(e=_[r[n].identifier])&&(pt(),e("end"))}function c(e,r,i,o,u){var a,c,s,f=i(r,e),l=y.copy();if(D(new yt(n,"beforestart",a,e,m,f[0],f[1],0,0,l),function(){return null!=(t.event.subject=a=v.apply(o,u))&&(c=a.x-f[0]||0,s=a.y-f[1]||0,!0)}))return function t(h){var p,d=f;switch(h){case"start":_[e]=t,p=m++;break;case"end":delete _[e],--m;case"drag":f=i(r,e),p=m}D(new yt(n,h,a,e,p,f[0]+c,f[1]+s,f[0]-d[0],f[1]-d[1],l),l.apply,l,[h,o,u])}}var s,f,l,h,p=mt,d=xt,v=bt,g=wt,_={},y=N("start","drag","end"),m=0,x=0;return n.filter=function(t){return arguments.length?(p="function"==typeof t?t:_t(!!t),n):p},n.container=function(t){return arguments.length?(d="function"==typeof t?t:_t(t),n):d},n.subject=function(t){return arguments.length?(v="function"==typeof t?t:_t(t),n):v},n.touchable=function(t){return arguments.length?(g="function"==typeof t?t:_t(!!t),n):g},n.on=function(){var t=y.on.apply(y,arguments);return t===y?n:t},n.clickDistance=function(t){return arguments.length?(x=(t=+t)*t,n):Math.sqrt(x)},n},t.dragDisable=vt,t.dragEnable=gt,t.dsvFormat=ve,t.csvParse=kh,t.csvParseRows=Sh,t.csvFormat=Eh,t.csvFormatRows=Ah,t.tsvParse=zh,t.tsvParseRows=Ph,t.tsvFormat=Rh,t.tsvFormatRows=Lh,t.easeLinear=function(t){return+t},t.easeQuad=Dn,t.easeQuadIn=function(t){return t*t},t.easeQuadOut=function(t){return t*(2-t)},t.easeQuadInOut=Dn,t.easeCubic=Un,t.easeCubicIn=function(t){return t*t*t},t.easeCubicOut=function(t){return--t*t*t+1},t.easeCubicInOut=Un,t.easePoly=Al,t.easePolyIn=Sl,t.easePolyOut=El,t.easePolyInOut=Al,t.easeSin=On,t.easeSinIn=function(t){return 1-Math.cos(t*zl)},t.easeSinOut=function(t){return Math.sin(t*zl)},t.easeSinInOut=On,t.easeExp=Fn,t.easeExpIn=function(t){return Math.pow(2,10*t-10)},t.easeExpOut=function(t){return 1-Math.pow(2,-10*t)},t.easeExpInOut=Fn,t.easeCircle=In,t.easeCircleIn=function(t){return 1-Math.sqrt(1-t*t)},t.easeCircleOut=function(t){return Math.sqrt(1- --t*t)},t.easeCircleInOut=In,t.easeBounce=Yn,t.easeBounceIn=function(t){return 1-Yn(1-t)},t.easeBounceOut=Yn,t.easeBounceInOut=function(t){return((t*=2)<=1?1-Yn(1-t):Yn(t-1)+1)/2},t.easeBack=jl,t.easeBackIn=Bl,t.easeBackOut=Hl,t.easeBackInOut=jl,t.easeElastic=$l,t.easeElasticIn=Vl,t.easeElasticOut=$l,t.easeElasticInOut=Wl,t.forceCenter=function(t,n){function e(){var e,i,o=r.length,u=0,a=0;for(e=0;e<o;++e)u+=(i=r[e]).x,a+=i.y;for(u=u/o-t,a=a/o-n,e=0;e<o;++e)(i=r[e]).x-=u,i.y-=a}var r;return null==t&&(t=0),null==n&&(n=0),e.initialize=function(t){r=t},e.x=function(n){return arguments.length?(t=+n,e):t},e.y=function(t){return arguments.length?(n=+t,e):n},e},t.forceCollide=function(t){function n(){for(var t,n,r,c,s,f,l,h=i.length,p=0;p<a;++p)for(n=we(i,Ne,ke).visitAfter(e),t=0;t<h;++t)r=i[t],f=o[r.index],l=f*f,c=r.x+r.vx,s=r.y+r.vy,n.visit(function(t,n,e,i,o){var a=t.data,h=t.r,p=f+h;if(!a)return n>c+p||i<c-p||e>s+p||o<s-p;if(a.index>r.index){var d=c-a.x-a.vx,v=s-a.y-a.vy,g=d*d+v*v;g<p*p&&(0===d&&(d=_e(),g+=d*d),0===v&&(v=_e(),g+=v*v),g=(p-(g=Math.sqrt(g)))/g*u,r.vx+=(d*=g)*(p=(h*=h)/(l+h)),r.vy+=(v*=g)*p,a.vx-=d*(p=1-p),a.vy-=v*p)}})}function e(t){if(t.data)return t.r=o[t.data.index];for(var n=t.r=0;n<4;++n)t[n]&&t[n].r>t.r&&(t.r=t[n].r)}function r(){if(i){var n,e,r=i.length;for(o=new Array(r),n=0;n<r;++n)e=i[n],o[e.index]=+t(e,n,i)}}var i,o,u=1,a=1;return"function"!=typeof t&&(t=ge(null==t?1:+t)),n.initialize=function(t){i=t,r()},n.iterations=function(t){return arguments.length?(a=+t,n):a},n.strength=function(t){return arguments.length?(u=+t,n):u},n.radius=function(e){return arguments.length?(t="function"==typeof e?e:ge(+e),r(),n):t},n},t.forceLink=function(t){function n(n){for(var e=0,r=t.length;e<p;++e)for(var i,a,c,f,l,h,d,v=0;v<r;++v)a=(i=t[v]).source,f=(c=i.target).x+c.vx-a.x-a.vx||_e(),l=c.y+c.vy-a.y-a.vy||_e(),f*=h=((h=Math.sqrt(f*f+l*l))-u[v])/h*n*o[v],l*=h,c.vx-=f*(d=s[v]),c.vy-=l*d,a.vx+=f*(d=1-d),a.vy+=l*d}function e(){if(a){var n,e,l=a.length,h=t.length,p=ae(a,f);for(n=0,c=new Array(l);n<h;++n)(e=t[n]).index=n,"object"!=typeof e.source&&(e.source=Ee(p,e.source)),"object"!=typeof e.target&&(e.target=Ee(p,e.target)),c[e.source.index]=(c[e.source.index]||0)+1,c[e.target.index]=(c[e.target.index]||0)+1;for(n=0,s=new Array(h);n<h;++n)e=t[n],s[n]=c[e.source.index]/(c[e.source.index]+c[e.target.index]);o=new Array(h),r(),u=new Array(h),i()}}function r(){if(a)for(var n=0,e=t.length;n<e;++n)o[n]=+l(t[n],n,t)}function i(){if(a)for(var n=0,e=t.length;n<e;++n)u[n]=+h(t[n],n,t)}var o,u,a,c,s,f=Se,l=function(t){return 1/Math.min(c[t.source.index],c[t.target.index])},h=ge(30),p=1;return null==t&&(t=[]),n.initialize=function(t){a=t,e()},n.links=function(r){return arguments.length?(t=r,e(),n):t},n.id=function(t){return arguments.length?(f=t,n):f},n.iterations=function(t){return arguments.length?(p=+t,n):p},n.strength=function(t){return arguments.length?(l="function"==typeof t?t:ge(+t),r(),n):l},n.distance=function(t){return arguments.length?(h="function"==typeof t?t:ge(+t),i(),n):h},n},t.forceManyBody=function(){function t(t){var n,a=i.length,c=we(i,Ae,Ce).visitAfter(e);for(u=t,n=0;n<a;++n)o=i[n],c.visit(r)}function n(){if(i){var t,n,e=i.length;for(a=new Array(e),t=0;t<e;++t)n=i[t],a[n.index]=+c(n,t,i)}}function e(t){var n,e,r,i,o,u=0,c=0;if(t.length){for(r=i=o=0;o<4;++o)(n=t[o])&&(e=Math.abs(n.value))&&(u+=n.value,c+=e,r+=e*n.x,i+=e*n.y);t.x=r/c,t.y=i/c}else{(n=t).x=n.data.x,n.y=n.data.y;do{u+=a[n.data.index]}while(n=n.next)}t.value=u}function r(t,n,e,r){if(!t.value)return!0;var i=t.x-o.x,c=t.y-o.y,h=r-n,p=i*i+c*c;if(h*h/l<p)return p<f&&(0===i&&(i=_e(),p+=i*i),0===c&&(c=_e(),p+=c*c),p<s&&(p=Math.sqrt(s*p)),o.vx+=i*t.value*u/p,o.vy+=c*t.value*u/p),!0;if(!(t.length||p>=f)){(t.data!==o||t.next)&&(0===i&&(i=_e(),p+=i*i),0===c&&(c=_e(),p+=c*c),p<s&&(p=Math.sqrt(s*p)));do{t.data!==o&&(h=a[t.data.index]*u/p,o.vx+=i*h,o.vy+=c*h)}while(t=t.next)}}var i,o,u,a,c=ge(-30),s=1,f=1/0,l=.81;return t.initialize=function(t){i=t,n()},t.strength=function(e){return arguments.length?(c="function"==typeof e?e:ge(+e),n(),t):c},t.distanceMin=function(n){return arguments.length?(s=n*n,t):Math.sqrt(s)},t.distanceMax=function(n){return arguments.length?(f=n*n,t):Math.sqrt(f)},t.theta=function(n){return arguments.length?(l=n*n,t):Math.sqrt(l)},t},t.forceRadial=function(t,n,e){function r(t){for(var r=0,i=o.length;r<i;++r){var c=o[r],s=c.x-n||1e-6,f=c.y-e||1e-6,l=Math.sqrt(s*s+f*f),h=(a[r]-l)*u[r]*t/l;c.vx+=s*h,c.vy+=f*h}}function i(){if(o){var n,e=o.length;for(u=new Array(e),a=new Array(e),n=0;n<e;++n)a[n]=+t(o[n],n,o),u[n]=isNaN(a[n])?0:+c(o[n],n,o)}}var o,u,a,c=ge(.1);return"function"!=typeof t&&(t=ge(+t)),null==n&&(n=0),null==e&&(e=0),r.initialize=function(t){o=t,i()},r.strength=function(t){return arguments.length?(c="function"==typeof t?t:ge(+t),i(),r):c},r.radius=function(n){return arguments.length?(t="function"==typeof n?n:ge(+n),i(),r):t},r.x=function(t){return arguments.length?(n=+t,r):n},r.y=function(t){return arguments.length?(e=+t,r):e},r},t.forceSimulation=function(t){function n(){e(),p.call("tick",o),u<a&&(h.stop(),p.call("end",o))}function e(){var n,e,r=t.length;for(u+=(s-u)*c,l.each(function(t){t(u)}),n=0;n<r;++n)null==(e=t[n]).fx?e.x+=e.vx*=f:(e.x=e.fx,e.vx=0),null==e.fy?e.y+=e.vy*=f:(e.y=e.fy,e.vy=0)}function r(){for(var n,e=0,r=t.length;e<r;++e){if(n=t[e],n.index=e,isNaN(n.x)||isNaN(n.y)){var i=Uh*Math.sqrt(e),o=e*Oh;n.x=i*Math.cos(o),n.y=i*Math.sin(o)}(isNaN(n.vx)||isNaN(n.vy))&&(n.vx=n.vy=0)}}function i(n){return n.initialize&&n.initialize(t),n}var o,u=1,a=.001,c=1-Math.pow(a,1/300),s=0,f=.6,l=ae(),h=xn(n),p=N("tick","end");return null==t&&(t=[]),r(),o={tick:e,restart:function(){return h.restart(n),o},stop:function(){return h.stop(),o},nodes:function(n){return arguments.length?(t=n,r(),l.each(i),o):t},alpha:function(t){return arguments.length?(u=+t,o):u},alphaMin:function(t){return arguments.length?(a=+t,o):a},alphaDecay:function(t){return arguments.length?(c=+t,o):+c},alphaTarget:function(t){return arguments.length?(s=+t,o):s},velocityDecay:function(t){return arguments.length?(f=1-t,o):1-f},force:function(t,n){return arguments.length>1?(null==n?l.remove(t):l.set(t,i(n)),o):l.get(t)},find:function(n,e,r){var i,o,u,a,c,s=0,f=t.length;for(null==r?r=1/0:r*=r,s=0;s<f;++s)(u=(i=n-(a=t[s]).x)*i+(o=e-a.y)*o)<r&&(c=a,r=u);return c},on:function(t,n){return arguments.length>1?(p.on(t,n),o):p.on(t)}}},t.forceX=function(t){function n(t){for(var n,e=0,u=r.length;e<u;++e)(n=r[e]).vx+=(o[e]-n.x)*i[e]*t}function e(){if(r){var n,e=r.length;for(i=new Array(e),o=new Array(e),n=0;n<e;++n)i[n]=isNaN(o[n]=+t(r[n],n,r))?0:+u(r[n],n,r)}}var r,i,o,u=ge(.1);return"function"!=typeof t&&(t=ge(null==t?0:+t)),n.initialize=function(t){r=t,e()},n.strength=function(t){return arguments.length?(u="function"==typeof t?t:ge(+t),e(),n):u},n.x=function(r){return arguments.length?(t="function"==typeof r?r:ge(+r),e(),n):t},n},t.forceY=function(t){function n(t){for(var n,e=0,u=r.length;e<u;++e)(n=r[e]).vy+=(o[e]-n.y)*i[e]*t}function e(){if(r){var n,e=r.length;for(i=new Array(e),o=new Array(e),n=0;n<e;++n)i[n]=isNaN(o[n]=+t(r[n],n,r))?0:+u(r[n],n,r)}}var r,i,o,u=ge(.1);return"function"!=typeof t&&(t=ge(null==t?0:+t)),n.initialize=function(t){r=t,e()},n.strength=function(t){return arguments.length?(u="function"==typeof t?t:ge(+t),e(),n):u},n.y=function(r){return arguments.length?(t="function"==typeof r?r:ge(+r),e(),n):t},n},t.formatDefaultLocale=Oe,t.formatLocale=Ue,t.formatSpecifier=Le,t.precisionFixed=Fe,t.precisionPrefix=Ie,t.precisionRound=Ye,t.geoArea=function(t){return jp.reset(),Je(t,Xp),2*jp},t.geoBounds=function(t){var n,e,r,i,o,u,a;if(Qh=Gh=-(Wh=Zh=1/0),ep=[],Je(t,$p),e=ep.length){for(ep.sort(yr),n=1,o=[r=ep[0]];n<e;++n)mr(r,(i=ep[n])[0])||mr(r,i[1])?(_r(r[0],i[1])>_r(r[0],r[1])&&(r[1]=i[1]),_r(i[0],r[1])>_r(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(u=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(a=_r(r[1],i[0]))>u&&(u=a,Wh=i[0],Gh=r[1])}return ep=rp=null,Wh===1/0||Zh===1/0?[[NaN,NaN],[NaN,NaN]]:[[Wh,Zh],[Gh,Qh]]},t.geoCentroid=function(t){ip=op=up=ap=cp=sp=fp=lp=hp=pp=dp=0,Je(t,Wp);var n=hp,e=pp,r=dp,i=n*n+e*e+r*r;return i<wp&&(n=sp,e=fp,r=lp,op<bp&&(n=up,e=ap,r=cp),(i=n*n+e*e+r*r)<wp)?[NaN,NaN]:[zp(e,n)*Sp,Ve(r/Fp(i))*Sp]},t.geoCircle=function(){function t(){var t=r.apply(this,arguments),a=i.apply(this,arguments)*Ep,c=o.apply(this,arguments)*Ep;return n=[],e=Rr(-t[0]*Ep,-t[1]*Ep,0).invert,Or(u,a,c,1),t={type:"Polygon",coordinates:[n]},n=e=null,t}var n,e,r=Cr([0,0]),i=Cr(90),o=Cr(6),u={point:function(t,r){n.push(t=e(t,r)),t[0]*=Sp,t[1]*=Sp}};return t.center=function(n){return arguments.length?(r="function"==typeof n?n:Cr([+n[0],+n[1]]),t):r},t.radius=function(n){return arguments.length?(i="function"==typeof n?n:Cr(+n),t):i},t.precision=function(n){return arguments.length?(o="function"==typeof n?n:Cr(+n),t):o},t},t.geoClipAntimeridian=ad,t.geoClipCircle=Zr,t.geoClipExtent=function(){var t,n,e,r=0,i=0,o=960,u=500;return e={stream:function(e){return t&&n===e?t:t=Gr(r,i,o,u)(n=e)},extent:function(a){return arguments.length?(r=+a[0][0],i=+a[0][1],o=+a[1][0],u=+a[1][1],t=n=null,e):[[r,i],[o,u]]}}},t.geoClipRectangle=Gr,t.geoContains=function(t,n){return(t&&dd.hasOwnProperty(t.type)?dd[t.type]:ei)(t,n)},t.geoDistance=ni,t.geoGraticule=fi,t.geoGraticule10=function(){return fi()()},t.geoInterpolate=function(t,n){var e=t[0]*Ep,r=t[1]*Ep,i=n[0]*Ep,o=n[1]*Ep,u=Pp(r),a=Up(r),c=Pp(o),s=Up(o),f=u*Pp(e),l=u*Up(e),h=c*Pp(i),p=c*Up(i),d=2*Ve(Fp($e(o-r)+u*c*$e(i-e))),v=Up(d),g=d?function(t){var n=Up(t*=d)/v,e=Up(d-t)/v,r=e*f+n*h,i=e*l+n*p,o=e*a+n*s;return[zp(i,r)*Sp,zp(o,Fp(r*r+i*i))*Sp]}:function(){return[e*Sp,r*Sp]};return g.distance=d,g},t.geoLength=ti,t.geoPath=function(t,n){function e(t){return t&&("function"==typeof o&&i.pointRadius(+o.apply(this,arguments)),Je(t,r(i))),i.result()}var r,i,o=4.5;return e.area=function(t){return Je(t,r(yd)),yd.result()},e.measure=function(t){return Je(t,r(Id)),Id.result()},e.bounds=function(t){return Je(t,r(Md)),Md.result()},e.centroid=function(t){return Je(t,r(Rd)),Rd.result()},e.projection=function(n){return arguments.length?(r=null==n?(t=null,li):(t=n).stream,e):t},e.context=function(t){return arguments.length?(i=null==t?(n=null,new Ei):new Ni(n=t),"function"!=typeof o&&i.pointRadius(o),e):n},e.pointRadius=function(t){return arguments.length?(o="function"==typeof t?t:(i.pointRadius(+t),+t),e):o},e.projection(t).context(n)},t.geoAlbers=Hi,t.geoAlbersUsa=function(){function t(t){var n=t[0],e=t[1];return a=null,i.point(n,e),a||(o.point(n,e),a)||(u.point(n,e),a)}function n(){return e=r=null,t}var e,r,i,o,u,a,c=Hi(),s=Bi().rotate([154,0]).center([-2,58.5]).parallels([55,65]),f=Bi().rotate([157,0]).center([-3,19.9]).parallels([8,18]),l={point:function(t,n){a=[t,n]}};return t.invert=function(t){var n=c.scale(),e=c.translate(),r=(t[0]-e[0])/n,i=(t[1]-e[1])/n;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?f:c).invert(t)},t.stream=function(t){return e&&r===t?e:e=function(t){var n=t.length;return{point:function(e,r){for(var i=-1;++i<n;)t[i].point(e,r)},sphere:function(){for(var e=-1;++e<n;)t[e].sphere()},lineStart:function(){for(var e=-1;++e<n;)t[e].lineStart()},lineEnd:function(){for(var e=-1;++e<n;)t[e].lineEnd()},polygonStart:function(){for(var e=-1;++e<n;)t[e].polygonStart()},polygonEnd:function(){for(var e=-1;++e<n;)t[e].polygonEnd()}}}([c.stream(r=t),s.stream(t),f.stream(t)])},t.precision=function(t){return arguments.length?(c.precision(t),s.precision(t),f.precision(t),n()):c.precision()},t.scale=function(n){return arguments.length?(c.scale(n),s.scale(.35*n),f.scale(n),t.translate(c.translate())):c.scale()},t.translate=function(t){if(!arguments.length)return c.translate();var e=c.scale(),r=+t[0],a=+t[1];return i=c.translate(t).clipExtent([[r-.455*e,a-.238*e],[r+.455*e,a+.238*e]]).stream(l),o=s.translate([r-.307*e,a+.201*e]).clipExtent([[r-.425*e+bp,a+.12*e+bp],[r-.214*e-bp,a+.234*e-bp]]).stream(l),u=f.translate([r-.205*e,a+.212*e]).clipExtent([[r-.214*e+bp,a+.166*e+bp],[r-.115*e-bp,a+.234*e-bp]]).stream(l),n()},t.fitExtent=function(n,e){return Ri(t,n,e)},t.fitSize=function(n,e){return Li(t,n,e)},t.fitWidth=function(n,e){return qi(t,n,e)},t.fitHeight=function(n,e){return Di(t,n,e)},t.scale(1070)},t.geoAzimuthalEqualArea=function(){return Oi(jd).scale(124.75).clipAngle(179.999)},t.geoAzimuthalEqualAreaRaw=jd,t.geoAzimuthalEquidistant=function(){return Oi(Xd).scale(79.4188).clipAngle(179.999)},t.geoAzimuthalEquidistantRaw=Xd,t.geoConicConformal=function(){return Ii(Zi).scale(109.5).parallels([30,30])},t.geoConicConformalRaw=Zi,t.geoConicEqualArea=Bi,t.geoConicEqualAreaRaw=Yi,t.geoConicEquidistant=function(){return Ii(Qi).scale(131.154).center([0,13.9389])},t.geoConicEquidistantRaw=Qi,t.geoEquirectangular=function(){return Oi(Gi).scale(152.63)},t.geoEquirectangularRaw=Gi,t.geoGnomonic=function(){return Oi(Ji).scale(144.049).clipAngle(60)},t.geoGnomonicRaw=Ji,t.geoIdentity=function(){function t(){return i=o=null,u}var n,e,r,i,o,u,a=1,c=0,s=0,f=1,l=1,h=li,p=null,d=li;return u={stream:function(t){return i&&o===t?i:i=h(d(o=t))},postclip:function(i){return arguments.length?(d=i,p=n=e=r=null,t()):d},clipExtent:function(i){return arguments.length?(d=null==i?(p=n=e=r=null,li):Gr(p=+i[0][0],n=+i[0][1],e=+i[1][0],r=+i[1][1]),t()):null==p?null:[[p,n],[e,r]]},scale:function(n){return arguments.length?(h=Ki((a=+n)*f,a*l,c,s),t()):a},translate:function(n){return arguments.length?(h=Ki(a*f,a*l,c=+n[0],s=+n[1]),t()):[c,s]},reflectX:function(n){return arguments.length?(h=Ki(a*(f=n?-1:1),a*l,c,s),t()):f<0},reflectY:function(n){return arguments.length?(h=Ki(a*f,a*(l=n?-1:1),c,s),t()):l<0},fitExtent:function(t,n){return Ri(u,t,n)},fitSize:function(t,n){return Li(u,t,n)},fitWidth:function(t,n){return qi(u,t,n)},fitHeight:function(t,n){return Di(u,t,n)}}},t.geoProjection=Oi,t.geoProjectionMutator=Fi,t.geoMercator=function(){return $i(Vi).scale(961/kp)},t.geoMercatorRaw=Vi,t.geoNaturalEarth1=function(){return Oi(to).scale(175.295)},t.geoNaturalEarth1Raw=to,t.geoOrthographic=function(){return Oi(no).scale(249.5).clipAngle(90+bp)},t.geoOrthographicRaw=no,t.geoStereographic=function(){return Oi(eo).scale(250).clipAngle(142)},t.geoStereographicRaw=eo,t.geoTransverseMercator=function(){var t=$i(ro),n=t.center,e=t.rotate;return t.center=function(t){return arguments.length?n([-t[1],t[0]]):(t=n(),[t[1],-t[0]])},t.rotate=function(t){return arguments.length?e([t[0],t[1],t.length>2?t[2]+90:90]):(t=e(),[t[0],t[1],t[2]-90])},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=ro,t.geoRotation=Ur,t.geoStream=Je,t.geoTransform=function(t){return{stream:Ci(t)}},t.cluster=function(){function t(t){var o,u=0;t.eachAfter(function(t){var e=t.children;e?(t.x=function(t){return t.reduce(oo,0)/t.length}(e),t.y=function(t){return 1+t.reduce(uo,0)}(e)):(t.x=o?u+=n(t,o):0,t.y=0,o=t)});var a=function(t){for(var n;n=t.children;)t=n[0];return t}(t),c=function(t){for(var n;n=t.children;)t=n[n.length-1];return t}(t),s=a.x-n(a,c)/2,f=c.x+n(c,a)/2;return t.eachAfter(i?function(n){n.x=(n.x-t.x)*e,n.y=(t.y-n.y)*r}:function(n){n.x=(n.x-s)/(f-s)*e,n.y=(1-(t.y?n.y/t.y:1))*r})}var n=io,e=1,r=1,i=!1;return t.separation=function(e){return arguments.length?(n=e,t):n},t.size=function(n){return arguments.length?(i=!1,e=+n[0],r=+n[1],t):i?null:[e,r]},t.nodeSize=function(n){return arguments.length?(i=!0,e=+n[0],r=+n[1],t):i?[e,r]:null},t},t.hierarchy=co,t.pack=function(){function t(t){return t.x=e/2,t.y=r/2,n?t.eachBefore(Ao(n)).eachAfter(Co(i,.5)).eachBefore(zo(1)):t.eachBefore(Ao(Eo)).eachAfter(Co(ko,1)).eachAfter(Co(i,t.r/Math.min(e,r))).eachBefore(zo(Math.min(e,r)/(2*t.r))),t}var n=null,e=1,r=1,i=ko;return t.radius=function(e){return arguments.length?(n=function(t){return null==t?null:No(t)}(e),t):n},t.size=function(n){return arguments.length?(e=+n[0],r=+n[1],t):[e,r]},t.padding=function(n){return arguments.length?(i="function"==typeof n?n:So(+n),t):i},t},t.packSiblings=function(t){return To(t),t},t.packEnclose=po,t.partition=function(){function t(t){var o=t.height+1;return t.x0=t.y0=r,t.x1=n,t.y1=e/o,t.eachBefore(function(t,n){return function(e){e.children&&Ro(e,e.x0,t*(e.depth+1)/n,e.x1,t*(e.depth+2)/n);var i=e.x0,o=e.y0,u=e.x1-r,a=e.y1-r;u<i&&(i=u=(i+u)/2),a<o&&(o=a=(o+a)/2),e.x0=i,e.y0=o,e.x1=u,e.y1=a}}(e,o)),i&&t.eachBefore(Po),t}var n=1,e=1,r=0,i=!1;return t.round=function(n){return arguments.length?(i=!!n,t):i},t.size=function(r){return arguments.length?(n=+r[0],e=+r[1],t):[n,e]},t.padding=function(n){return arguments.length?(r=+n,t):r},t},t.stratify=function(){function t(t){var r,i,o,u,a,c,s,f=t.length,l=new Array(f),h={};for(i=0;i<f;++i)r=t[i],a=l[i]=new ho(r),null!=(c=n(r,i,t))&&(c+="")&&(h[s=$d+(a.id=c)]=s in h?Zd:a);for(i=0;i<f;++i)if(a=l[i],null!=(c=e(t[i],i,t))&&(c+="")){if(!(u=h[$d+c]))throw new Error("missing: "+c);if(u===Zd)throw new Error("ambiguous: "+c);u.children?u.children.push(a):u.children=[a],a.parent=u}else{if(o)throw new Error("multiple roots");o=a}if(!o)throw new Error("no root");if(o.parent=Wd,o.eachBefore(function(t){t.depth=t.parent.depth+1,--f}).eachBefore(lo),o.parent=null,f>0)throw new Error("cycle");return o}var n=Lo,e=qo;return t.id=function(e){return arguments.length?(n=No(e),t):n},t.parentId=function(n){return arguments.length?(e=No(n),t):e},t},t.tree=function(){function t(t){var c=function(t){for(var n,e,r,i,o,u=new Yo(t,0),a=[u];n=a.pop();)if(r=n._.children)for(n.children=new Array(o=r.length),i=o-1;i>=0;--i)a.push(e=n.children[i]=new Yo(r[i],i)),e.parent=n;return(u.parent=new Yo(null,0)).children=[u],u}(t);if(c.eachAfter(n),c.parent.m=-c.z,c.eachBefore(e),a)t.eachBefore(r);else{var s=t,f=t,l=t;t.eachBefore(function(t){t.x<s.x&&(s=t),t.x>f.x&&(f=t),t.depth>l.depth&&(l=t)});var h=s===f?1:i(s,f)/2,p=h-s.x,d=o/(f.x+h+p),v=u/(l.depth||1);t.eachBefore(function(t){t.x=(t.x+p)*d,t.y=t.depth*v})}return t}function n(t){var n=t.children,e=t.parent.children,r=t.i?e[t.i-1]:null;if(n){(function(t){for(var n,e=0,r=0,i=t.children,o=i.length;--o>=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)})(t);var o=(n[0].z+n[n.length-1].z)/2;r?(t.z=r.z+i(t._,r._),t.m=t.z-o):t.z=o}else r&&(t.z=r.z+i(t._,r._));t.parent.A=function(t,n,e){if(n){for(var r,o=t,u=t,a=n,c=o.parent.children[0],s=o.m,f=u.m,l=a.m,h=c.m;a=Oo(a),o=Uo(o),a&&o;)c=Uo(c),(u=Oo(u)).a=t,(r=a.z+l-o.z-s+i(a._,o._))>0&&(Fo(Io(a,t,e),t,r),s+=r,f+=r),l+=a.m,s+=o.m,h+=c.m,f+=u.m;a&&!Oo(u)&&(u.t=a,u.m+=l-f),o&&!Uo(c)&&(c.t=o,c.m+=s-h,e=t)}return e}(t,r,t.parent.A||e[0])}function e(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function r(t){t.x*=o,t.y=t.depth*u}var i=Do,o=1,u=1,a=null;return t.separation=function(n){return arguments.length?(i=n,t):i},t.size=function(n){return arguments.length?(a=!1,o=+n[0],u=+n[1],t):a?null:[o,u]},t.nodeSize=function(n){return arguments.length?(a=!0,o=+n[0],u=+n[1],t):a?[o,u]:null},t},t.treemap=function(){function t(t){return t.x0=t.y0=0,t.x1=i,t.y1=o,t.eachBefore(n),u=[0],r&&t.eachBefore(Po),t}function n(t){var n=u[t.depth],r=t.x0+n,i=t.y0+n,o=t.x1-n,h=t.y1-n;o<r&&(r=o=(r+o)/2),h<i&&(i=h=(i+h)/2),t.x0=r,t.y0=i,t.x1=o,t.y1=h,t.children&&(n=u[t.depth+1]=a(t)/2,r+=l(t)-n,i+=c(t)-n,o-=s(t)-n,h-=f(t)-n,o<r&&(r=o=(r+o)/2),h<i&&(i=h=(i+h)/2),e(t,r,i,o,h))}var e=Qd,r=!1,i=1,o=1,u=[0],a=ko,c=ko,s=ko,f=ko,l=ko;return t.round=function(n){return arguments.length?(r=!!n,t):r},t.size=function(n){return arguments.length?(i=+n[0],o=+n[1],t):[i,o]},t.tile=function(n){return arguments.length?(e=No(n),t):e},t.padding=function(n){return arguments.length?t.paddingInner(n).paddingOuter(n):t.paddingInner()},t.paddingInner=function(n){return arguments.length?(a="function"==typeof n?n:So(+n),t):a},t.paddingOuter=function(n){return arguments.length?t.paddingTop(n).paddingRight(n).paddingBottom(n).paddingLeft(n):t.paddingTop()},t.paddingTop=function(n){return arguments.length?(c="function"==typeof n?n:So(+n),t):c},t.paddingRight=function(n){return arguments.length?(s="function"==typeof n?n:So(+n),t):s},t.paddingBottom=function(n){return arguments.length?(f="function"==typeof n?n:So(+n),t):f},t.paddingLeft=function(n){return arguments.length?(l="function"==typeof n?n:So(+n),t):l},t},t.treemapBinary=function(t,n,e,r,i){function o(t,n,e,r,i,u,a){if(t>=n-1){var s=c[t];return s.x0=r,s.y0=i,s.x1=u,void(s.y1=a)}for(var l=f[t],h=e/2+l,p=t+1,d=n-1;p<d;){var v=p+d>>>1;f[v]<h?p=v+1:d=v}h-f[p-1]<f[p]-h&&t+1<p&&--p;var g=f[p]-l,_=e-g;if(u-r>a-i){var y=(r*_+u*g)/e;o(t,p,g,r,i,y,a),o(p,n,_,y,i,u,a)}else{var m=(i*_+a*g)/e;o(t,p,g,r,i,u,m),o(p,n,_,r,m,u,a)}}var u,a,c=t.children,s=c.length,f=new Array(s+1);for(f[0]=a=u=0;u<s;++u)f[u+1]=a+=c[u].value;o(0,s,t.value,n,e,r,i)},t.treemapDice=Ro,t.treemapSlice=Bo,t.treemapSliceDice=function(t,n,e,r,i){(1&t.depth?Bo:Ro)(t,n,e,r,i)},t.treemapSquarify=Qd,t.treemapResquarify=Jd,t.interpolate=cn,t.interpolateArray=en,t.interpolateBasis=Wt,t.interpolateBasisClosed=Zt,t.interpolateDate=rn,t.interpolateNumber=on,t.interpolateObject=un,t.interpolateRound=sn,t.interpolateString=an,t.interpolateTransformCss=Wf,t.interpolateTransformSvg=Zf,t.interpolateZoom=pn,t.interpolateRgb=Yf,t.interpolateRgbBasis=Bf,t.interpolateRgbBasisClosed=Hf,t.interpolateHsl=tl,t.interpolateHslLong=nl,t.interpolateLab=function(t,n){var e=tn((t=Ut(t)).l,(n=Ut(n)).l),r=tn(t.a,n.a),i=tn(t.b,n.b),o=tn(t.opacity,n.opacity);return function(n){return t.l=e(n),t.a=r(n),t.b=i(n),t.opacity=o(n),t+""}},t.interpolateHcl=el,t.interpolateHclLong=rl,t.interpolateCubehelix=il,t.interpolateCubehelixLong=ol,t.quantize=function(t,n){for(var e=new Array(n),r=0;r<n;++r)e[r]=t(r/(n-1));return e},t.path=te,t.polygonArea=function(t){for(var n,e=-1,r=t.length,i=t[r-1],o=0;++e<r;)n=i,i=t[e],o+=n[1]*i[0]-n[0]*i[1];return o/2},t.polygonCentroid=function(t){for(var n,e,r=-1,i=t.length,o=0,u=0,a=t[i-1],c=0;++r<i;)n=a,a=t[r],c+=e=n[0]*a[1]-a[0]*n[1],o+=(n[0]+a[0])*e,u+=(n[1]+a[1])*e;return c*=3,[o/c,u/c]},t.polygonHull=function(t){if((e=t.length)<3)return null;var n,e,r=new Array(e),i=new Array(e);for(n=0;n<e;++n)r[n]=[+t[n][0],+t[n][1],n];for(r.sort(Xo),n=0;n<e;++n)i[n]=[r[n][0],-r[n][1]];var o=Vo(r),u=Vo(i),a=u[0]===o[0],c=u[u.length-1]===o[o.length-1],s=[];for(n=o.length-1;n>=0;--n)s.push(t[r[o[n]][2]]);for(n=+a;n<u.length-c;++n)s.push(t[r[u[n]][2]]);return s},t.polygonContains=function(t,n){for(var e,r,i=t.length,o=t[i-1],u=n[0],a=n[1],c=o[0],s=o[1],f=!1,l=0;l<i;++l)e=(o=t[l])[0],(r=o[1])>a!=s>a&&u<(c-e)*(a-r)/(s-r)+e&&(f=!f),c=e,s=r;return f},t.polygonLength=function(t){for(var n,e,r=-1,i=t.length,o=t[i-1],u=o[0],a=o[1],c=0;++r<i;)n=u,e=a,n-=u=(o=t[r])[0],e-=a=o[1],c+=Math.sqrt(n*n+e*e);return c},t.quadtree=we,t.queue=Qo,t.randomUniform=nv,t.randomNormal=ev,t.randomLogNormal=rv,t.randomBates=ov,t.randomIrwinHall=iv,t.randomExponential=uv,t.request=Ko,t.html=av,t.json=cv,t.text=sv,t.xml=fv,t.csv=lv,t.tsv=hv,t.scaleBand=ru,t.scalePoint=function(){return iu(ru().paddingInner(1))},t.scaleIdentity=du,t.scaleLinear=pu,t.scaleLog=wu,t.scaleOrdinal=eu,t.scaleImplicit=gv,t.scalePow=Tu,t.scaleSqrt=function(){return Tu().exponent(.5)},t.scaleQuantile=Nu,t.scaleQuantize=ku,t.scaleThreshold=Su,t.scaleTime=function(){return ja(Wv,Vv,Pv,Cv,Ev,kv,Tv,xv,t.timeFormat).domain([new Date(2e3,0,1),new Date(2e3,0,2)])},t.scaleUtc=function(){return ja(yg,gg,eg,tg,Jv,Gv,Tv,xv,t.utcFormat).domain([Date.UTC(2e3,0,1),Date.UTC(2e3,0,2)])},t.schemeCategory10=qg,t.schemeCategory20b=Dg,t.schemeCategory20c=Ug,t.schemeCategory20=Og,t.interpolateCubehelixDefault=Fg,t.interpolateRainbow=function(t){(t<0||t>1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return Bg.h=360*t-100,Bg.s=1.5-1.5*n,Bg.l=.8-.9*n,Bg+""},t.interpolateWarm=Ig,t.interpolateCool=Yg,t.interpolateViridis=Hg,t.interpolateMagma=jg,t.interpolateInferno=Xg,t.interpolatePlasma=Vg,t.scaleSequential=$a,t.creator=A,t.local=C,t.matcher=rf,t.mouse=F,t.namespace=E,t.namespaces=Js,t.clientPoint=O,t.select=lt,t.selectAll=function(t){return"string"==typeof t?new st([document.querySelectorAll(t)],[document.documentElement]):new st([null==t?[]:t],af)},t.selection=ft,t.selector=Y,t.selectorAll=H,t.style=G,t.touch=ht,t.touches=function(t,n){null==n&&(n=U().touches);for(var e=0,r=n?n.length:0,i=new Array(r);e<r;++e)i[e]=O(t,n[e]);return i},t.window=Z,t.customEvent=D,t.arc=function(){function t(){var t,s,f=+n.apply(this,arguments),l=+e.apply(this,arguments),h=o.apply(this,arguments)-e_,p=u.apply(this,arguments)-e_,d=$g(p-h),v=p>h;if(c||(c=t=te()),l<f&&(s=l,l=f,f=s),l>t_)if(d>r_-t_)c.moveTo(l*Zg(h),l*Jg(h)),c.arc(0,0,l,h,p,!v),f>t_&&(c.moveTo(f*Zg(p),f*Jg(p)),c.arc(0,0,f,p,h,v));else{var g,_,y=h,m=p,x=h,b=p,w=d,M=d,T=a.apply(this,arguments)/2,N=T>t_&&(i?+i.apply(this,arguments):Kg(f*f+l*l)),k=Qg($g(l-f)/2,+r.apply(this,arguments)),S=k,E=k;if(N>t_){var A=Za(N/f*Jg(T)),C=Za(N/l*Jg(T));(w-=2*A)>t_?(A*=v?1:-1,x+=A,b-=A):(w=0,x=b=(h+p)/2),(M-=2*C)>t_?(C*=v?1:-1,y+=C,m-=C):(M=0,y=m=(h+p)/2)}var z=l*Zg(y),P=l*Jg(y),R=f*Zg(b),L=f*Jg(b);if(k>t_){var q=l*Zg(m),D=l*Jg(m),U=f*Zg(x),O=f*Jg(x);if(d<n_){var F=w>t_?function(t,n,e,r,i,o,u,a){var c=e-t,s=r-n,f=u-i,l=a-o,h=(f*(n-o)-l*(t-i))/(l*c-f*s);return[t+h*c,n+h*s]}(z,P,U,O,q,D,R,L):[R,L],I=z-F[0],Y=P-F[1],B=q-F[0],H=D-F[1],j=1/Jg(function(t){return t>1?0:t<-1?n_:Math.acos(t)}((I*B+Y*H)/(Kg(I*I+Y*Y)*Kg(B*B+H*H)))/2),X=Kg(F[0]*F[0]+F[1]*F[1]);S=Qg(k,(f-X)/(j-1)),E=Qg(k,(l-X)/(j+1))}}M>t_?E>t_?(g=nc(U,O,z,P,l,E,v),_=nc(q,D,R,L,l,E,v),c.moveTo(g.cx+g.x01,g.cy+g.y01),E<k?c.arc(g.cx,g.cy,E,Wg(g.y01,g.x01),Wg(_.y01,_.x01),!v):(c.arc(g.cx,g.cy,E,Wg(g.y01,g.x01),Wg(g.y11,g.x11),!v),c.arc(0,0,l,Wg(g.cy+g.y11,g.cx+g.x11),Wg(_.cy+_.y11,_.cx+_.x11),!v),c.arc(_.cx,_.cy,E,Wg(_.y11,_.x11),Wg(_.y01,_.x01),!v))):(c.moveTo(z,P),c.arc(0,0,l,y,m,!v)):c.moveTo(z,P),f>t_&&w>t_?S>t_?(g=nc(R,L,q,D,f,-S,v),_=nc(z,P,U,O,f,-S,v),c.lineTo(g.cx+g.x01,g.cy+g.y01),S<k?c.arc(g.cx,g.cy,S,Wg(g.y01,g.x01),Wg(_.y01,_.x01),!v):(c.arc(g.cx,g.cy,S,Wg(g.y01,g.x01),Wg(g.y11,g.x11),!v),c.arc(0,0,f,Wg(g.cy+g.y11,g.cx+g.x11),Wg(_.cy+_.y11,_.cx+_.x11),v),c.arc(_.cx,_.cy,S,Wg(_.y11,_.x11),Wg(_.y01,_.x01),!v))):c.arc(0,0,f,b,x,v):c.lineTo(R,L)}else c.moveTo(0,0);if(c.closePath(),t)return c=null,t+""||null}var n=Ga,e=Qa,r=Wa(0),i=null,o=Ja,u=Ka,a=tc,c=null;return t.centroid=function(){var t=(+n.apply(this,arguments)+ +e.apply(this,arguments))/2,r=(+o.apply(this,arguments)+ +u.apply(this,arguments))/2-n_/2;return[Zg(r)*t,Jg(r)*t]},t.innerRadius=function(e){return arguments.length?(n="function"==typeof e?e:Wa(+e),t):n},t.outerRadius=function(n){return arguments.length?(e="function"==typeof n?n:Wa(+n),t):e},t.cornerRadius=function(n){return arguments.length?(r="function"==typeof n?n:Wa(+n),t):r},t.padRadius=function(n){return arguments.length?(i=null==n?null:"function"==typeof n?n:Wa(+n),t):i},t.startAngle=function(n){return arguments.length?(o="function"==typeof n?n:Wa(+n),t):o},t.endAngle=function(n){return arguments.length?(u="function"==typeof n?n:Wa(+n),t):u},t.padAngle=function(n){return arguments.length?(a="function"==typeof n?n:Wa(+n),t):a},t.context=function(n){return arguments.length?(c=null==n?null:n,t):c},t},t.area=ac,t.line=uc,t.pie=function(){function t(t){var a,c,s,f,l,h=t.length,p=0,d=new Array(h),v=new Array(h),g=+i.apply(this,arguments),_=Math.min(r_,Math.max(-r_,o.apply(this,arguments)-g)),y=Math.min(Math.abs(_)/h,u.apply(this,arguments)),m=y*(_<0?-1:1);for(a=0;a<h;++a)(l=v[d[a]=a]=+n(t[a],a,t))>0&&(p+=l);for(null!=e?d.sort(function(t,n){return e(v[t],v[n])}):null!=r&&d.sort(function(n,e){return r(t[n],t[e])}),a=0,s=p?(_-h*m)/p:0;a<h;++a,g=f)c=d[a],f=g+((l=v[c])>0?l*s:0)+m,v[c]={data:t[c],index:a,value:l,startAngle:g,endAngle:f,padAngle:y};return v}var n=sc,e=cc,r=null,i=Wa(0),o=Wa(r_),u=Wa(0);return t.value=function(e){return arguments.length?(n="function"==typeof e?e:Wa(+e),t):n},t.sortValues=function(n){return arguments.length?(e=n,r=null,t):e},t.sort=function(n){return arguments.length?(r=n,e=null,t):r},t.startAngle=function(n){return arguments.length?(i="function"==typeof n?n:Wa(+n),t):i},t.endAngle=function(n){return arguments.length?(o="function"==typeof n?n:Wa(+n),t):o},t.padAngle=function(n){return arguments.length?(u="function"==typeof n?n:Wa(+n),t):u},t},t.areaRadial=dc,t.radialArea=dc,t.lineRadial=pc,t.radialLine=pc,t.pointRadial=vc,t.linkHorizontal=function(){return yc(mc)},t.linkVertical=function(){return yc(xc)},t.linkRadial=function(){var t=yc(bc);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.symbol=function(){function t(){var t;if(r||(r=t=te()),n.apply(this,arguments).draw(r,+e.apply(this,arguments)),t)return r=null,t+""||null}var n=Wa(u_),e=Wa(64),r=null;return t.type=function(e){return arguments.length?(n="function"==typeof e?e:Wa(e),t):n},t.size=function(n){return arguments.length?(e="function"==typeof n?n:Wa(+n),t):e},t.context=function(n){return arguments.length?(r=null==n?null:n,t):r},t},t.symbols=w_,t.symbolCircle=u_,t.symbolCross=a_,t.symbolDiamond=f_,t.symbolSquare=v_,t.symbolStar=d_,t.symbolTriangle=__,t.symbolWye=b_,t.curveBasisClosed=function(t){return new Nc(t)},t.curveBasisOpen=function(t){return new kc(t)},t.curveBasis=function(t){return new Tc(t)},t.curveBundle=M_,t.curveCardinalClosed=N_,t.curveCardinalOpen=k_,t.curveCardinal=T_,t.curveCatmullRomClosed=E_,t.curveCatmullRomOpen=A_,t.curveCatmullRom=S_,t.curveLinearClosed=function(t){return new Dc(t)},t.curveLinear=rc,t.curveMonotoneX=function(t){return new Yc(t)},t.curveMonotoneY=function(t){return new Bc(t)},t.curveNatural=function(t){return new jc(t)},t.curveStep=function(t){return new Vc(t,.5)},t.curveStepAfter=function(t){return new Vc(t,1)},t.curveStepBefore=function(t){return new Vc(t,0)},t.stack=function(){function t(t){var o,u,a=n.apply(this,arguments),c=t.length,s=a.length,f=new Array(s);for(o=0;o<s;++o){for(var l,h=a[o],p=f[o]=new Array(c),d=0;d<c;++d)p[d]=l=[0,+i(t[d],h,d,t)],l.data=t[d];p.key=h}for(o=0,u=e(f);o<s;++o)f[u[o]].index=o;return r(f,u),f}var n=Wa([]),e=Wc,r=$c,i=Zc;return t.keys=function(e){return arguments.length?(n="function"==typeof e?e:Wa(o_.call(e)),t):n},t.value=function(n){return arguments.length?(i="function"==typeof n?n:Wa(+n),t):i},t.order=function(n){return arguments.length?(e=null==n?Wc:"function"==typeof n?n:Wa(o_.call(n)),t):e},t.offset=function(n){return arguments.length?(r=null==n?$c:n,t):r},t},t.stackOffsetExpand=function(t,n){if((r=t.length)>0){for(var e,r,i,o=0,u=t[0].length;o<u;++o){for(i=e=0;e<r;++e)i+=t[e][o][1]||0;if(i)for(e=0;e<r;++e)t[e][o][1]/=i}$c(t,n)}},t.stackOffsetDiverging=function(t,n){if((a=t.length)>1)for(var e,r,i,o,u,a,c=0,s=t[n[0]].length;c<s;++c)for(o=u=0,e=0;e<a;++e)(i=(r=t[n[e]][c])[1]-r[0])>=0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=u,r[0]=u+=i):r[0]=o},t.stackOffsetNone=$c,t.stackOffsetSilhouette=function(t,n){if((e=t.length)>0){for(var e,r=0,i=t[n[0]],o=i.length;r<o;++r){for(var u=0,a=0;u<e;++u)a+=t[u][r][1]||0;i[r][1]+=i[r][0]=-a/2}$c(t,n)}},t.stackOffsetWiggle=function(t,n){if((i=t.length)>0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,u=1;u<r;++u){for(var a=0,c=0,s=0;a<i;++a){for(var f=t[n[a]],l=f[u][1]||0,h=(l-(f[u-1][1]||0))/2,p=0;p<a;++p){var d=t[n[p]];h+=(d[u][1]||0)-(d[u-1][1]||0)}c+=l,s+=h*l}e[u-1][1]+=e[u-1][0]=o,c&&(o-=s/c)}e[u-1][1]+=e[u-1][0]=o,$c(t,n)}},t.stackOrderAscending=Gc,t.stackOrderDescending=function(t){return Gc(t).reverse()},t.stackOrderInsideOut=function(t){var n,e,r=t.length,i=t.map(Qc),o=Wc(t).sort(function(t,n){return i[n]-i[t]}),u=0,a=0,c=[],s=[];for(n=0;n<r;++n)e=o[n],u<a?(u+=i[e],c.push(e)):(a+=i[e],s.push(e));return s.reverse().concat(c)},t.stackOrderNone=Wc,t.stackOrderReverse=function(t){return Wc(t).reverse()},t.timeInterval=Eu,t.timeMillisecond=xv,t.timeMilliseconds=bv,t.utcMillisecond=xv,t.utcMilliseconds=bv,t.timeSecond=Tv,t.timeSeconds=Nv,t.utcSecond=Tv,t.utcSeconds=Nv,t.timeMinute=kv,t.timeMinutes=Sv,t.timeHour=Ev,t.timeHours=Av,t.timeDay=Cv,t.timeDays=zv,t.timeWeek=Pv,t.timeWeeks=Fv,t.timeSunday=Pv,t.timeSundays=Fv,t.timeMonday=Rv,t.timeMondays=Iv,t.timeTuesday=Lv,t.timeTuesdays=Yv,t.timeWednesday=qv,t.timeWednesdays=Bv,t.timeThursday=Dv,t.timeThursdays=Hv,t.timeFriday=Uv,t.timeFridays=jv,t.timeSaturday=Ov,t.timeSaturdays=Xv,t.timeMonth=Vv,t.timeMonths=$v,t.timeYear=Wv,t.timeYears=Zv,t.utcMinute=Gv,t.utcMinutes=Qv,t.utcHour=Jv,t.utcHours=Kv,t.utcDay=tg,t.utcDays=ng,t.utcWeek=eg,t.utcWeeks=sg,t.utcSunday=eg,t.utcSundays=sg,t.utcMonday=rg,t.utcMondays=fg,t.utcTuesday=ig,t.utcTuesdays=lg,t.utcWednesday=og,t.utcWednesdays=hg,t.utcThursday=ug,t.utcThursdays=pg,t.utcFriday=ag,t.utcFridays=dg,t.utcSaturday=cg,t.utcSaturdays=vg,t.utcMonth=gg,t.utcMonths=_g,t.utcYear=yg,t.utcYears=xg,t.timeFormatDefaultLocale=Ya,t.timeFormatLocale=Lu,t.isoFormat=kg,t.isoParse=Sg,t.now=_n,t.timer=xn,t.timerFlush=bn,t.timeout=Nn,t.interval=function(t,n,e){var r=new mn,i=n;return null==n?(r.restart(t,n,e),r):(n=+n,e=null==e?_n():+e,r.restart(function o(u){u+=i,r.restart(o,i+=n,e),t(u)},n,e),r)},t.transition=Ln,t.active=function(t,n){var e,r,i=t.__transition;if(i){n=null==n?null:n+"";for(r in i)if((e=i[r]).state>yl&&e.name===n)return new Rn([[t]],Gl,n,+r)}return null},t.interrupt=Cn,t.voronoi=function(){function t(t){return new Ms(t.map(function(r,i){var o=[Math.round(n(r,i,t)/U_)*U_,Math.round(e(r,i,t)/U_)*U_];return o.index=i,o.data=r,o}),r)}var n=Kc,e=ts,r=null;return t.polygons=function(n){return t(n).polygons()},t.links=function(n){return t(n).links()},t.triangles=function(n){return t(n).triangles()},t.x=function(e){return arguments.length?(n="function"==typeof e?e:Jc(+e),t):n},t.y=function(n){return arguments.length?(e="function"==typeof n?n:Jc(+n),t):e},t.extent=function(n){return arguments.length?(r=null==n?null:[[+n[0][0],+n[0][1]],[+n[1][0],+n[1][1]]],t):r&&[[r[0][0],r[0][1]],[r[1][0],r[1][1]]]},t.size=function(n){return arguments.length?(r=null==n?null:[[0,0],[+n[0],+n[1]]],t):r&&[r[1][0]-r[0][0],r[1][1]-r[0][1]]},t},t.zoom=function(){function n(t){t.property("__zoom",zs).on("wheel.zoom",c).on("mousedown.zoom",s).on("dblclick.zoom",f).filter(x).on("touchstart.zoom",l).on("touchmove.zoom",h).on("touchend.zoom touchcancel.zoom",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function e(t,n){return(n=Math.max(b[0],Math.min(b[1],n)))===t.k?t:new Ns(n,t.x,t.y)}function r(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new Ns(t.k,r,i)}function i(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function o(t,n,e){t.on("start.zoom",function(){u(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){u(this,arguments).end()}).tween("zoom",function(){var t=arguments,r=u(this,t),o=_.apply(this,t),a=e||i(o),c=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),s=this.__zoom,f="function"==typeof n?n.apply(this,t):n,l=T(s.invert(a).concat(c/s.k),f.invert(a).concat(c/f.k));return function(t){if(1===t)t=f;else{var n=l(t),e=c/n[2];t=new Ns(e,a[0]-n[0]*e,a[1]-n[1]*e)}r.zoom(null,t)}})}function u(t,n){for(var e,r=0,i=k.length;r<i;++r)if((e=k[r]).that===t)return e;return new a(t,n)}function a(t,n){this.that=t,this.args=n,this.index=-1,this.active=0,this.extent=_.apply(t,n)}function c(){if(g.apply(this,arguments)){var t=u(this,arguments),n=this.__zoom,i=Math.max(b[0],Math.min(b[1],n.k*Math.pow(2,m.apply(this,arguments)))),o=F(this);if(t.wheel)t.mouse[0][0]===o[0]&&t.mouse[0][1]===o[1]||(t.mouse[1]=n.invert(t.mouse[0]=o)),clearTimeout(t.wheel);else{if(n.k===i)return;t.mouse=[o,n.invert(o)],Cn(this),t.start()}Es(),t.wheel=setTimeout(function(){t.wheel=null,t.end()},A),t.zoom("mouse",y(r(e(n,i),t.mouse[0],t.mouse[1]),t.extent,w))}}function s(){if(!v&&g.apply(this,arguments)){var n=u(this,arguments),e=lt(t.event.view).on("mousemove.zoom",function(){if(Es(),!n.moved){var e=t.event.clientX-o,i=t.event.clientY-a;n.moved=e*e+i*i>C}n.zoom("mouse",y(r(n.that.__zoom,n.mouse[0]=F(n.that),n.mouse[1]),n.extent,w))},!0).on("mouseup.zoom",function(){e.on("mousemove.zoom mouseup.zoom",null),gt(t.event.view,n.moved),Es(),n.end()},!0),i=F(this),o=t.event.clientX,a=t.event.clientY;vt(t.event.view),Ss(),n.mouse=[i,this.__zoom.invert(i)],Cn(this),n.start()}}function f(){if(g.apply(this,arguments)){var i=this.__zoom,u=F(this),a=i.invert(u),c=i.k*(t.event.shiftKey?.5:2),s=y(r(e(i,c),u,a),_.apply(this,arguments),w);Es(),M>0?lt(this).transition().duration(M).call(o,s,u):lt(this).call(n.transform,s)}}function l(){if(g.apply(this,arguments)){var n,e,r,i,o=u(this,arguments),a=t.event.changedTouches,c=a.length;for(Ss(),e=0;e<c;++e)i=[i=ht(this,a,(r=a[e]).identifier),this.__zoom.invert(i),r.identifier],o.touch0?o.touch1||(o.touch1=i):(o.touch0=i,n=!0);if(d&&(d=clearTimeout(d),!o.touch1))return o.end(),void((i=lt(this).on("dblclick.zoom"))&&i.apply(this,arguments));n&&(d=setTimeout(function(){d=null},E),Cn(this),o.start())}}function h(){var n,i,o,a,c=u(this,arguments),s=t.event.changedTouches,f=s.length;for(Es(),d&&(d=clearTimeout(d)),n=0;n<f;++n)o=ht(this,s,(i=s[n]).identifier),c.touch0&&c.touch0[2]===i.identifier?c.touch0[0]=o:c.touch1&&c.touch1[2]===i.identifier&&(c.touch1[0]=o);if(i=c.that.__zoom,c.touch1){var l=c.touch0[0],h=c.touch0[1],p=c.touch1[0],v=c.touch1[1],g=(g=p[0]-l[0])*g+(g=p[1]-l[1])*g,_=(_=v[0]-h[0])*_+(_=v[1]-h[1])*_;i=e(i,Math.sqrt(g/_)),o=[(l[0]+p[0])/2,(l[1]+p[1])/2],a=[(h[0]+v[0])/2,(h[1]+v[1])/2]}else{if(!c.touch0)return;o=c.touch0[0],a=c.touch0[1]}c.zoom("touch",y(r(i,o,a),c.extent,w))}function p(){var n,e,r=u(this,arguments),i=t.event.changedTouches,o=i.length;for(Ss(),v&&clearTimeout(v),v=setTimeout(function(){v=null},E),n=0;n<o;++n)e=i[n],r.touch0&&r.touch0[2]===e.identifier?delete r.touch0:r.touch1&&r.touch1[2]===e.identifier&&delete r.touch1;r.touch1&&!r.touch0&&(r.touch0=r.touch1,delete r.touch1),r.touch0?r.touch0[1]=this.__zoom.invert(r.touch0[0]):r.end()}var d,v,g=As,_=Cs,y=Ls,m=Ps,x=Rs,b=[0,1/0],w=[[-1/0,-1/0],[1/0,1/0]],M=250,T=pn,k=[],S=N("start","zoom","end"),E=500,A=150,C=0;return n.transform=function(t,n){var e=t.selection?t.selection():t;e.property("__zoom",zs),t!==e?o(t,n):e.interrupt().each(function(){u(this,arguments).start().zoom(null,"function"==typeof n?n.apply(this,arguments):n).end()})},n.scaleBy=function(t,e){n.scaleTo(t,function(){return this.__zoom.k*("function"==typeof e?e.apply(this,arguments):e)})},n.scaleTo=function(t,o){n.transform(t,function(){var t=_.apply(this,arguments),n=this.__zoom,u=i(t),a=n.invert(u),c="function"==typeof o?o.apply(this,arguments):o;return y(r(e(n,c),u,a),t,w)})},n.translateBy=function(t,e,r){n.transform(t,function(){return y(this.__zoom.translate("function"==typeof e?e.apply(this,arguments):e,"function"==typeof r?r.apply(this,arguments):r),_.apply(this,arguments),w)})},n.translateTo=function(t,e,r){n.transform(t,function(){var t=_.apply(this,arguments),n=this.__zoom,o=i(t);return y(F_.translate(o[0],o[1]).scale(n.k).translate("function"==typeof e?-e.apply(this,arguments):-e,"function"==typeof r?-r.apply(this,arguments):-r),t,w)})},a.prototype={start:function(){return 1==++this.active&&(this.index=k.push(this)-1,this.emit("start")),this},zoom:function(t,n){return this.mouse&&"mouse"!==t&&(this.mouse[1]=n.invert(this.mouse[0])),this.touch0&&"touch"!==t&&(this.touch0[1]=n.invert(this.touch0[0])),this.touch1&&"touch"!==t&&(this.touch1[1]=n.invert(this.touch1[0])),this.that.__zoom=n,this.emit("zoom"),this},end:function(){return 0==--this.active&&(k.splice(this.index,1),this.index=-1,this.emit("end")),this},emit:function(t){D(new function(t,n,e){this.target=t,this.type=n,this.transform=e}(n,t,this.that.__zoom),S.apply,S,[t,this.that,this.args])}},n.wheelDelta=function(t){return arguments.length?(m="function"==typeof t?t:Ts(+t),n):m},n.filter=function(t){return arguments.length?(g="function"==typeof t?t:Ts(!!t),n):g},n.touchable=function(t){return arguments.length?(x="function"==typeof t?t:Ts(!!t),n):x},n.extent=function(t){return arguments.length?(_="function"==typeof t?t:Ts([[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]]),n):_},n.scaleExtent=function(t){return arguments.length?(b[0]=+t[0],b[1]=+t[1],n):[b[0],b[1]]},n.translateExtent=function(t){return arguments.length?(w[0][0]=+t[0][0],w[1][0]=+t[1][0],w[0][1]=+t[0][1],w[1][1]=+t[1][1],n):[[w[0][0],w[0][1]],[w[1][0],w[1][1]]]},n.constrain=function(t){return arguments.length?(y=t,n):y},n.duration=function(t){return arguments.length?(M=+t,n):M},n.interpolate=function(t){return arguments.length?(T=t,n):T},n.on=function(){var t=S.on.apply(S,arguments);return t===S?n:t},n.clickDistance=function(t){return arguments.length?(C=(t=+t)*t,n):Math.sqrt(C)},n},t.zoomTransform=ks,t.zoomIdentity=F_,Object.defineProperty(t,"__esModule",{value:!0})});
diff --git a/web/gui/lib/d3pie-0.2.1-netdata-3.js b/web/gui/lib/d3pie-0.2.1-netdata-3.js
new file mode 100644
index 0000000..3b00b49
--- /dev/null
+++ b/web/gui/lib/d3pie-0.2.1-netdata-3.js
@@ -0,0 +1,2124 @@
+/*!
+ * d3pie
+ * @author Ben Keen
+ * @version 0.1.9
+ * @date June 17th, 2015
+ * @repo http://github.com/benkeen/d3pie
+ * SPDX-License-Identifier: MIT
+ */
+
+// UMD pattern from https://github.com/umdjs/umd/blob/master/returnExports.js
+(function(root, factory) {
+ if (typeof define === 'function' && define.amd) {
+ // AMD. Register as an anonymous module
+ define([], factory);
+ } else if (typeof exports === 'object') {
+ // Node. Does not work with strict CommonJS, but only CommonJS-like environments that support module.exports,
+ // like Node
+ module.exports = factory();
+ } else {
+ // browser globals (root is window)
+ root.d3pie = factory(root);
+ }
+}(this, function() {
+
+ var _scriptName = "d3pie";
+ var _version = "0.2.1";
+
+ // used to uniquely generate IDs and classes, ensuring no conflict between multiple pies on the same page
+ var _uniqueIDCounter = 0;
+
+
+ // this section includes all helper libs on the d3pie object. They're populated via grunt-template. Note: to keep
+ // the syntax highlighting from getting all messed up, I commented out each line. That REQUIRES each of the files
+ // to have an empty first line. Crumby, yes, but acceptable.
+ //// --------- _default-settings.js -----------/**
+/**
+ * Contains the out-the-box settings for the script. Any of these settings that aren't explicitly overridden for the
+ * d3pie instance will inherit from these. This is also included on the main website for use in the generation script.
+ */
+var defaultSettings = {
+ header: {
+ title: {
+ text: "",
+ color: "#333333",
+ fontSize: 18,
+ fontWeight: "bold",
+ font: "arial"
+ },
+ subtitle: {
+ text: "",
+ color: "#666666",
+ fontSize: 14,
+ fontWeight: "bold",
+ font: "arial"
+ },
+ location: "top-center",
+ titleSubtitlePadding: 8
+ },
+ footer: {
+ text: "",
+ color: "#666666",
+ fontSize: 14,
+ fontWeight: "bold",
+ font: "arial",
+ location: "left"
+ },
+ size: {
+ canvasHeight: 500,
+ canvasWidth: 500,
+ pieInnerRadius: "0%",
+ pieOuterRadius: null
+ },
+ data: {
+ sortOrder: "none",
+ ignoreSmallSegments: {
+ enabled: false,
+ valueType: "percentage",
+ value: null
+ },
+ smallSegmentGrouping: {
+ enabled: false,
+ value: 1,
+ valueType: "percentage",
+ label: "Other",
+ color: "#cccccc"
+ },
+ content: []
+ },
+ labels: {
+ outer: {
+ format: "label",
+ hideWhenLessThanPercentage: null,
+ pieDistance: 30
+ },
+ inner: {
+ format: "percentage",
+ hideWhenLessThanPercentage: null
+ },
+ mainLabel: {
+ color: "#333333",
+ font: "arial",
+ fontWeight: "normal",
+ fontSize: 10
+ },
+ percentage: {
+ color: "#dddddd",
+ font: "arial",
+ fontWeight: "bold",
+ fontSize: 10,
+ decimalPlaces: 0
+ },
+ value: {
+ color: "#cccc44",
+ fontWeight: "bold",
+ font: "arial",
+ fontSize: 10
+ },
+ lines: {
+ enabled: true,
+ style: "curved",
+ color: "segment"
+ },
+ truncation: {
+ enabled: false,
+ truncateLength: 30
+ },
+ formatter: null
+ },
+ effects: {
+ load: {
+ effect: "none", // "default", commented in the code
+ speed: 1000
+ },
+ pullOutSegmentOnClick: {
+ effect: "none", // "bounce", commented in the code
+ speed: 300,
+ size: 10
+ },
+ highlightSegmentOnMouseover: false,
+ highlightLuminosity: -0.2
+ },
+ tooltips: {
+ enabled: false,
+ type: "placeholder", // caption|placeholder
+ string: "",
+ placeholderParser: null,
+ styles: {
+ fadeInSpeed: 250,
+ backgroundColor: "#000000",
+ backgroundOpacity: 0.5,
+ color: "#efefef",
+ borderRadius: 2,
+ font: "arial",
+ fontWeight: "bold",
+ fontSize: 10,
+ padding: 4
+ }
+ },
+ misc: {
+ colors: {
+ background: null,
+ segments: [
+ "#2484c1", "#65a620", "#7b6888", "#a05d56", "#961a1a", "#d8d23a", "#e98125", "#d0743c", "#635222", "#6ada6a",
+ "#0c6197", "#7d9058", "#207f33", "#44b9b0", "#bca44a", "#e4a14b", "#a3acb2", "#8cc3e9", "#69a6f9", "#5b388f",
+ "#546e91", "#8bde95", "#d2ab58", "#273c71", "#98bf6e", "#4daa4b", "#98abc5", "#cc1010", "#31383b", "#006391",
+ "#c2643f", "#b0a474", "#a5a39c", "#a9c2bc", "#22af8c", "#7fcecf", "#987ac6", "#3d3b87", "#b77b1c", "#c9c2b6",
+ "#807ece", "#8db27c", "#be66a2", "#9ed3c6", "#00644b", "#005064", "#77979f", "#77e079", "#9c73ab", "#1f79a7"
+ ],
+ segmentStroke: "#ffffff"
+ },
+ gradient: {
+ enabled: false,
+ percentage: 95,
+ color: "#000000"
+ },
+ canvasPadding: {
+ top: 5,
+ right: 5,
+ bottom: 5,
+ left: 5
+ },
+ pieCenterOffset: {
+ x: 0,
+ y: 0
+ },
+ cssPrefix: null
+ },
+ callbacks: {
+ onload: null,
+ onMouseoverSegment: null,
+ onMouseoutSegment: null,
+ onClickSegment: null
+ }
+};
+
+ //// --------- validate.js -----------
+var validate = {
+
+ // called whenever a new pie chart is created
+ initialCheck: function(pie) {
+ var cssPrefix = pie.cssPrefix;
+ var element = pie.element;
+ var options = pie.options;
+
+ // confirm d3 is available [check minimum version]
+ if (!window.d3 || !window.d3.hasOwnProperty("version")) {
+ console.error("d3pie error: d3 is not available");
+ return false;
+ }
+
+ // confirm element is either a DOM element or a valid string for a DOM element
+ if (!(element instanceof HTMLElement || element instanceof SVGElement)) {
+ console.error("d3pie error: the first d3pie() param must be a valid DOM element (not jQuery) or a ID string.");
+ return false;
+ }
+
+ // confirm the CSS prefix is valid. It has to start with a-Z and contain nothing but a-Z0-9_-
+ if (!(/[a-zA-Z][a-zA-Z0-9_-]*$/.test(cssPrefix))) {
+ console.error("d3pie error: invalid options.misc.cssPrefix");
+ return false;
+ }
+
+ // confirm some data has been supplied
+ if (!helpers.isArray(options.data.content)) {
+ console.error("d3pie error: invalid config structure: missing data.content property.");
+ return false;
+ }
+ if (options.data.content.length === 0) {
+ console.error("d3pie error: no data supplied.");
+ return false;
+ }
+
+ // clear out any invalid data. Each data row needs a valid positive number and a label
+ var data = [];
+ for (var i=0; i<options.data.content.length; i++) {
+ if (typeof options.data.content[i].value !== "number" || isNaN(options.data.content[i].value)) {
+ console.log("not valid: ", options.data.content[i]);
+ continue;
+ }
+ if (options.data.content[i].value <= 0) {
+ console.log("not valid - should have positive value: ", options.data.content[i]);
+ continue;
+ }
+ data.push(options.data.content[i]);
+ }
+ pie.options.data.content = data;
+
+ // labels.outer.hideWhenLessThanPercentage - 1-100
+ // labels.inner.hideWhenLessThanPercentage - 1-100
+
+ return true;
+ }
+};
+
+ //// --------- helpers.js -----------
+var helpers = {
+
+ // creates the SVG element
+ addSVGSpace: function(pie) {
+ var element = pie.element;
+ var canvasWidth = pie.options.size.canvasWidth;
+ var canvasHeight = pie.options.size.canvasHeight;
+ var backgroundColor = pie.options.misc.colors.background;
+
+ var svg = d3.select(element).append("svg:svg")
+ .attr("width", canvasWidth)
+ .attr("height", canvasHeight);
+
+ if (backgroundColor !== "transparent") {
+ svg.style("background-color", function() { return backgroundColor; });
+ }
+
+ return svg;
+ },
+
+ shuffleArray: function(array) {
+ var currentIndex = array.length, tmpVal, randomIndex;
+
+ while (0 !== currentIndex) {
+ randomIndex = Math.floor(Math.random() * currentIndex);
+ currentIndex -= 1;
+
+ // and swap it with the current element
+ tmpVal = array[currentIndex];
+ array[currentIndex] = array[randomIndex];
+ array[randomIndex] = tmpVal;
+ }
+ return array;
+ },
+
+ processObj: function(obj, is, value) {
+ if (typeof is === 'string') {
+ return helpers.processObj(obj, is.split('.'), value);
+ } else if (is.length === 1 && value !== undefined) {
+ obj[is[0]] = value;
+ return obj[is[0]];
+ } else if (is.length === 0) {
+ return obj;
+ } else {
+ return helpers.processObj(obj[is[0]], is.slice(1), value);
+ }
+ },
+
+ getDimensions: function(el) {
+ if(typeof el === 'string')
+ el = document.getElementById(el);
+
+ var w = 0, h = 0;
+ if (el) {
+ var dimensions = el.getBBox();
+ w = dimensions.width;
+ h = dimensions.height;
+ }
+ else {
+ console.log("error: getDimensions() " + id + " not found.");
+ }
+
+ return { w: w, h: h };
+ },
+
+ /**
+ * This is based on the SVG coordinate system, where top-left is 0,0 and bottom right is n-n.
+ * @param r1
+ * @param r2
+ * @returns {boolean}
+ */
+ rectIntersect: function(r1, r2) {
+ var returnVal = (
+ // r2.left > r1.right
+ (r2.x > (r1.x + r1.w)) ||
+
+ // r2.right < r1.left
+ ((r2.x + r2.w) < r1.x) ||
+
+ // r2.top < r1.bottom
+ ((r2.y + r2.h) < r1.y) ||
+
+ // r2.bottom > r1.top
+ (r2.y > (r1.y + r1.h))
+ );
+
+ return !returnVal;
+ },
+
+ /**
+ * Returns a lighter/darker shade of a hex value, based on a luminance value passed.
+ * @param hex a hex color value such as “#abc” or “#123456″ (the hash is optional)
+ * @param lum the luminosity factor: -0.1 is 10% darker, 0.2 is 20% lighter, etc.
+ * @returns {string}
+ */
+ getColorShade: function(hex, lum) {
+
+ // validate hex string
+ hex = String(hex).replace(/[^0-9a-f]/gi, '');
+ if (hex.length < 6) {
+ hex = hex[0]+hex[0]+hex[1]+hex[1]+hex[2]+hex[2];
+ }
+ lum = lum || 0;
+
+ // convert to decimal and change luminosity
+ var newHex = "#";
+ for (var i=0; i<3; i++) {
+ var c = parseInt(hex.substr(i * 2, 2), 16);
+ c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16);
+ newHex += ("00" + c).substr(c.length);
+ }
+
+ return newHex;
+ },
+
+ /**
+ * Users can choose to specify segment colors in three ways (in order of precedence):
+ * 1. include a "color" attribute for each row in data.content
+ * 2. include a misc.colors.segments property which contains an array of hex codes
+ * 3. specify nothing at all and rely on this lib provide some reasonable defaults
+ *
+ * This function sees what's included and populates this.options.colors with whatever's required
+ * for this pie chart.
+ * @param data
+ */
+ initSegmentColors: function(pie) {
+ var data = pie.options.data.content;
+ var colors = pie.options.misc.colors.segments;
+
+ // TODO this needs a ton of error handling
+
+ var finalColors = [];
+ for (var i=0; i<data.length; i++) {
+ if (data[i].hasOwnProperty("color")) {
+ finalColors.push(data[i].color);
+ } else {
+ finalColors.push(colors[i]);
+ }
+ }
+
+ return finalColors;
+ },
+
+ applySmallSegmentGrouping: function(data, smallSegmentGrouping) {
+ var totalSize;
+ if (smallSegmentGrouping.valueType === "percentage") {
+ totalSize = math.getTotalPieSize(data);
+ }
+
+ // loop through each data item
+ var newData = [];
+ var groupedData = [];
+ var totalGroupedData = 0;
+ for (var i=0; i<data.length; i++) {
+ if (smallSegmentGrouping.valueType === "percentage") {
+ var dataPercent = (data[i].value / totalSize) * 100;
+ if (dataPercent <= smallSegmentGrouping.value) {
+ groupedData.push(data[i]);
+ totalGroupedData += data[i].value;
+ continue;
+ }
+ data[i].isGrouped = false;
+ newData.push(data[i]);
+ } else {
+ if (data[i].value <= smallSegmentGrouping.value) {
+ groupedData.push(data[i]);
+ totalGroupedData += data[i].value;
+ continue;
+ }
+ data[i].isGrouped = false;
+ newData.push(data[i]);
+ }
+ }
+
+ // we're done! See if there's any small segment groups to add
+ if (groupedData.length) {
+ newData.push({
+ color: smallSegmentGrouping.color,
+ label: smallSegmentGrouping.label,
+ value: totalGroupedData,
+ isGrouped: true,
+ groupedData: groupedData
+ });
+ }
+
+ return newData;
+ },
+
+ // for debugging
+ showPoint: function(svg, x, y) {
+ svg.append("circle").attr("cx", x).attr("cy", y).attr("r", 2).style("fill", "black");
+ },
+
+ isFunction: function(functionToCheck) {
+ var getType = {};
+ return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]';
+ },
+
+ isArray: function(o) {
+ return Object.prototype.toString.call(o) === '[object Array]';
+ }
+};
+
+
+// taken from jQuery
+var extend = function() {
+ var options, name, src, copy, copyIsArray, clone, target = arguments[0] || {},
+ i = 1,
+ length = arguments.length,
+ deep = false,
+ toString = Object.prototype.toString,
+ hasOwn = Object.prototype.hasOwnProperty,
+ class2type = {
+ "[object Boolean]": "boolean",
+ "[object Number]": "number",
+ "[object String]": "string",
+ "[object Function]": "function",
+ "[object Array]": "array",
+ "[object Date]": "date",
+ "[object RegExp]": "regexp",
+ "[object Object]": "object"
+ },
+
+ jQuery = {
+ isFunction: function (obj) {
+ return jQuery.type(obj) === "function";
+ },
+ isArray: Array.isArray ||
+ function (obj) {
+ return jQuery.type(obj) === "array";
+ },
+ isWindow: function (obj) {
+ return obj !== null && obj === obj.window;
+ },
+ isNumeric: function (obj) {
+ return !isNaN(parseFloat(obj)) && isFinite(obj);
+ },
+ type: function (obj) {
+ return obj === null ? String(obj) : class2type[toString.call(obj)] || "object";
+ },
+ isPlainObject: function (obj) {
+ if (!obj || jQuery.type(obj) !== "object" || obj.nodeType) {
+ return false;
+ }
+ try {
+ if (obj.constructor && !hasOwn.call(obj, "constructor") && !hasOwn.call(obj.constructor.prototype, "isPrototypeOf")) {
+ return false;
+ }
+ } catch (e) {
+ return false;
+ }
+ var key;
+ for (key in obj) {}
+ return key === undefined || hasOwn.call(obj, key);
+ }
+ };
+ if (typeof target === "boolean") {
+ deep = target;
+ target = arguments[1] || {};
+ i = 2;
+ }
+ if (typeof target !== "object" && !jQuery.isFunction(target)) {
+ target = {};
+ }
+ if (length === i) {
+ target = this;
+ --i;
+ }
+ for (i; i < length; i++) {
+ if ((options = arguments[i]) !== null) {
+ for (name in options) {
+ src = target[name];
+ copy = options[name];
+ if (target === copy) {
+ continue;
+ }
+ if (deep && copy && (jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)))) {
+ if (copyIsArray) {
+ copyIsArray = false;
+ clone = src && jQuery.isArray(src) ? src : [];
+ } else {
+ clone = src && jQuery.isPlainObject(src) ? src : {};
+ }
+ // WARNING: RECURSION
+ target[name] = extend(deep, clone, copy);
+ } else if (copy !== undefined) {
+ target[name] = copy;
+ }
+ }
+ }
+ }
+ return target;
+};
+ //// --------- math.js -----------
+var math = {
+
+ toRadians: function(degrees) {
+ return degrees * (Math.PI / 180);
+ },
+
+ toDegrees: function(radians) {
+ return radians * (180 / Math.PI);
+ },
+
+ computePieRadius: function(pie) {
+ var size = pie.options.size;
+ var canvasPadding = pie.options.misc.canvasPadding;
+
+ // outer radius is either specified (e.g. through the generator), or omitted altogether
+ // and calculated based on the canvas dimensions. Right now the estimated version isn't great - it should
+ // be possible to calculate it to precisely generate the maximum sized pie, but it's fussy as heck. Something
+ // for the next release.
+
+ // first, calculate the default _outerRadius
+ var w = size.canvasWidth - canvasPadding.left - canvasPadding.right;
+ var h = size.canvasHeight - canvasPadding.top - canvasPadding.bottom;
+
+ // now factor in the footer, title & subtitle
+ if (pie.options.header.location !== "pie-center") {
+ h -= pie.textComponents.headerHeight;
+ }
+
+ if (pie.textComponents.footer.exists) {
+ h -= pie.textComponents.footer.h;
+ }
+
+ // for really teeny pies, h may be < 0. Adjust it back
+ h = (h < 0) ? 0 : h;
+
+ var outerRadius = ((w < h) ? w : h) / 3;
+ var innerRadius, percent;
+
+ // if the user specified something, use that instead
+ if (size.pieOuterRadius !== null) {
+ if (/%/.test(size.pieOuterRadius)) {
+ percent = parseInt(size.pieOuterRadius.replace(/[\D]/, ""), 10);
+ percent = (percent > 99) ? 99 : percent;
+ percent = (percent < 0) ? 0 : percent;
+
+ var smallestDimension = (w < h) ? w : h;
+
+ // now factor in the label line size
+ if (pie.options.labels.outer.format !== "none") {
+ var pieDistanceSpace = parseInt(pie.options.labels.outer.pieDistance, 10) * 2;
+ if (smallestDimension - pieDistanceSpace > 0) {
+ smallestDimension -= pieDistanceSpace;
+ }
+ }
+
+ outerRadius = Math.floor((smallestDimension / 100) * percent) / 2;
+ } else {
+ outerRadius = parseInt(size.pieOuterRadius, 10);
+ }
+ }
+
+ // inner radius
+ if (/%/.test(size.pieInnerRadius)) {
+ percent = parseInt(size.pieInnerRadius.replace(/[\D]/, ""), 10);
+ percent = (percent > 99) ? 99 : percent;
+ percent = (percent < 0) ? 0 : percent;
+ innerRadius = Math.floor((outerRadius / 100) * percent);
+ } else {
+ innerRadius = parseInt(size.pieInnerRadius, 10);
+ }
+
+ pie.innerRadius = innerRadius;
+ pie.outerRadius = outerRadius;
+ },
+
+ getTotalPieSize: function(data) {
+ var totalSize = 0;
+ for (var i=0; i<data.length; i++) {
+ totalSize += data[i].value;
+ }
+ return totalSize;
+ },
+
+ sortPieData: function(pie) {
+ var data = pie.options.data.content;
+ var sortOrder = pie.options.data.sortOrder;
+
+ switch (sortOrder) {
+ case "none":
+ // do nothing
+ break;
+ case "random":
+ data = helpers.shuffleArray(data);
+ break;
+ case "value-asc":
+ data.sort(function(a, b) { return (a.value < b.value) ? -1 : 1; });
+ break;
+ case "value-desc":
+ data.sort(function(a, b) { return (a.value < b.value) ? 1 : -1; });
+ break;
+ case "label-asc":
+ data.sort(function(a, b) { return (a.label.toLowerCase() > b.label.toLowerCase()) ? 1 : -1; });
+ break;
+ case "label-desc":
+ data.sort(function(a, b) { return (a.label.toLowerCase() < b.label.toLowerCase()) ? 1 : -1; });
+ break;
+ }
+
+ return data;
+ },
+
+ // var pieCenter = math.getPieCenter();
+ getPieTranslateCenter: function(pieCenter) {
+ return "translate(" + pieCenter.x + "," + pieCenter.y + ")";
+ },
+
+ /**
+ * Used to determine where on the canvas the center of the pie chart should be. It takes into account the
+ * height and position of the title, subtitle and footer, and the various paddings.
+ * @private
+ */
+ calculatePieCenter: function(pie) {
+ var pieCenterOffset = pie.options.misc.pieCenterOffset;
+ var hasTopTitle = (pie.textComponents.title.exists && pie.options.header.location !== "pie-center");
+ var hasTopSubtitle = (pie.textComponents.subtitle.exists && pie.options.header.location !== "pie-center");
+
+ var headerOffset = pie.options.misc.canvasPadding.top;
+ if (hasTopTitle && hasTopSubtitle) {
+ headerOffset += pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;
+ } else if (hasTopTitle) {
+ headerOffset += pie.textComponents.title.h;
+ } else if (hasTopSubtitle) {
+ headerOffset += pie.textComponents.subtitle.h;
+ }
+
+ var footerOffset = 0;
+ if (pie.textComponents.footer.exists) {
+ footerOffset = pie.textComponents.footer.h + pie.options.misc.canvasPadding.bottom;
+ }
+
+ var x = ((pie.options.size.canvasWidth - pie.options.misc.canvasPadding.left - pie.options.misc.canvasPadding.right) / 2) + pie.options.misc.canvasPadding.left;
+ var y = ((pie.options.size.canvasHeight - footerOffset - headerOffset) / 2) + headerOffset;
+
+ x += pieCenterOffset.x;
+ y += pieCenterOffset.y;
+
+ pie.pieCenter = { x: x, y: y };
+ },
+
+
+ /**
+ * Rotates a point (x, y) around an axis (xm, ym) by degrees (a).
+ * @param x
+ * @param y
+ * @param xm
+ * @param ym
+ * @param a angle in degrees
+ * @returns {Array}
+ */
+ rotate: function(x, y, xm, ym, a) {
+
+ a = a * Math.PI / 180; // convert to radians
+
+ var cos = Math.cos,
+ sin = Math.sin,
+ // subtract midpoints, so that midpoint is translated to origin and add it in the end again
+ xr = (x - xm) * cos(a) - (y - ym) * sin(a) + xm,
+ yr = (x - xm) * sin(a) + (y - ym) * cos(a) + ym;
+
+ return { x: xr, y: yr };
+ },
+
+ /**
+ * Translates a point x, y by distance d, and by angle a.
+ * @param x
+ * @param y
+ * @param dist
+ * @param a angle in degrees
+ */
+ translate: function(x, y, d, a) {
+ var rads = math.toRadians(a);
+ return {
+ x: x + d * Math.sin(rads),
+ y: y - d * Math.cos(rads)
+ };
+ },
+
+ // from: http://stackoverflow.com/questions/19792552/d3-put-arc-labels-in-a-pie-chart-if-there-is-enough-space
+ pointIsInArc: function(pt, ptData, d3Arc) {
+ // Center of the arc is assumed to be 0,0
+ // (pt.x, pt.y) are assumed to be relative to the center
+ var r1 = d3Arc.innerRadius()(ptData), // Note: Using the innerRadius
+ r2 = d3Arc.outerRadius()(ptData),
+ theta1 = d3Arc.startAngle()(ptData),
+ theta2 = d3Arc.endAngle()(ptData);
+
+ var dist = pt.x * pt.x + pt.y * pt.y,
+ angle = Math.atan2(pt.x, -pt.y); // Note: different coordinate system
+
+ angle = (angle < 0) ? (angle + Math.PI * 2) : angle;
+
+ return (r1 * r1 <= dist) && (dist <= r2 * r2) &&
+ (theta1 <= angle) && (angle <= theta2);
+ }
+};
+
+ //// --------- labels.js -----------
+var labels = {
+
+ /**
+ * Adds the labels to the pie chart, but doesn't position them. There are two locations for the
+ * labels: inside (center) of the segments, or outside the segments on the edge.
+ * @param section "inner" or "outer"
+ * @param sectionDisplayType "percentage", "value", "label", "label-value1", etc.
+ * @param pie
+ */
+ add: function(pie, section, sectionDisplayType) {
+ var include = labels.getIncludes(sectionDisplayType);
+ var settings = pie.options.labels;
+
+ // group the label groups (label, percentage, value) into a single element for simpler positioning
+ var outerLabel = pie.svg.insert("g", "." + pie.cssPrefix + "labels-" + section)
+ .attr("class", pie.cssPrefix + "labels-" + section);
+
+ var labelGroup = pie.__labels[section] = outerLabel.selectAll("." + pie.cssPrefix + "labelGroup-" + section)
+ .data(pie.options.data.content)
+ .enter()
+ .append("g")
+ .attr("id", function(d, i) { return pie.cssPrefix + "labelGroup" + i + "-" + section; })
+ .attr("data-index", function(d, i) { return i; })
+ .attr("class", pie.cssPrefix + "labelGroup-" + section)
+ .style("opacity", 0);
+
+ var formatterContext = { section: section, sectionDisplayType: sectionDisplayType };
+
+ // 1. Add the main label
+ if (include.mainLabel) {
+ labelGroup.append("text")
+ .attr("id", function(d, i) { return pie.cssPrefix + "segmentMainLabel" + i + "-" + section; })
+ .attr("class", pie.cssPrefix + "segmentMainLabel-" + section)
+ .text(function(d, i) {
+ var str = d.label;
+
+ // if a custom formatter has been defined, pass it the raw label string - it can do whatever it wants with it.
+ // we only apply truncation if it's not defined
+ if (settings.formatter) {
+ formatterContext.index = i;
+ formatterContext.part = 'mainLabel';
+ formatterContext.value = d.value;
+ formatterContext.label = str;
+ str = settings.formatter(formatterContext);
+ } else if (settings.truncation.enabled && d.label.length > settings.truncation.truncateLength) {
+ str = d.label.substring(0, settings.truncation.truncateLength) + "...";
+ }
+ return str;
+ })
+ .style("font-size", settings.mainLabel.fontSize + "px")
+ .style("font-family", settings.mainLabel.font)
+ .style("font-weight", settings.mainLabel.fontWeight)
+ .style("fill", function(d, i) {
+ return (settings.mainLabel.color === "segment") ? pie.options.colors[i] : settings.mainLabel.color;
+ });
+ }
+
+ // 2. Add the percentage label
+ if (include.percentage) {
+ labelGroup.append("text")
+ .attr("id", function(d, i) { return pie.cssPrefix + "segmentPercentage" + i + "-" + section; })
+ .attr("class", pie.cssPrefix + "segmentPercentage-" + section)
+ .text(function(d, i) {
+ var percentage = d.percentage;
+ if (settings.formatter) {
+ formatterContext.index = i;
+ formatterContext.part = "percentage";
+ formatterContext.value = d.value;
+ formatterContext.label = d.percentage;
+ percentage = settings.formatter(formatterContext);
+ } else {
+ percentage += "%";
+ }
+ return percentage;
+ })
+ .style("font-size", settings.percentage.fontSize + "px")
+ .style("font-family", settings.percentage.font)
+ .style("font-weight", settings.percentage.fontWeight)
+ .style("fill", settings.percentage.color);
+ }
+
+ // 3. Add the value label
+ if (include.value) {
+ labelGroup.append("text")
+ .attr("id", function(d, i) { return pie.cssPrefix + "segmentValue" + i + "-" + section; })
+ .attr("class", pie.cssPrefix + "segmentValue-" + section)
+ .text(function(d, i) {
+ formatterContext.index = i;
+ formatterContext.part = "value";
+ formatterContext.value = d.value;
+ formatterContext.label = d.value;
+ return settings.formatter ? settings.formatter(formatterContext, d.value) : d.value;
+ })
+ .style("font-size", settings.value.fontSize + "px")
+ .style("font-family", settings.value.font)
+ .style("font-weight", settings.value.fontWeight)
+ .style("fill", settings.value.color);
+ }
+ },
+
+ /**
+ * @param section "inner" / "outer"
+ */
+ positionLabelElements: function(pie, section, sectionDisplayType) {
+ labels["dimensions-" + section] = [];
+
+ // get the latest widths, heights
+ var labelGroups = pie.__labels[section];
+ labelGroups.each(function(d, i) {
+ var mainLabel = d3.select(this).selectAll("." + pie.cssPrefix + "segmentMainLabel-" + section);
+ var percentage = d3.select(this).selectAll("." + pie.cssPrefix + "segmentPercentage-" + section);
+ var value = d3.select(this).selectAll("." + pie.cssPrefix + "segmentValue-" + section);
+
+ labels["dimensions-" + section].push({
+ mainLabel: (mainLabel.node() !== null) ? mainLabel.node().getBBox() : null,
+ percentage: (percentage.node() !== null) ? percentage.node().getBBox() : null,
+ value: (value.node() !== null) ? value.node().getBBox() : null
+ });
+ });
+
+ var singleLinePad = 5;
+ var dims = labels["dimensions-" + section];
+ switch (sectionDisplayType) {
+ case "label-value1":
+ pie.svg.selectAll("." + pie.cssPrefix + "segmentValue-" + section)
+ .attr("dx", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });
+ break;
+ case "label-value2":
+ pie.svg.selectAll("." + pie.cssPrefix + "segmentValue-" + section)
+ .attr("dy", function(d, i) { return dims[i].mainLabel.height; });
+ break;
+ case "label-percentage1":
+ pie.svg.selectAll("." + pie.cssPrefix + "segmentPercentage-" + section)
+ .attr("dx", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });
+ break;
+ case "label-percentage2":
+ pie.svg.selectAll("." + pie.cssPrefix + "segmentPercentage-" + section)
+ .attr("dx", function(d, i) { return (dims[i].mainLabel.width / 2) - (dims[i].percentage.width / 2); })
+ .attr("dy", function(d, i) { return dims[i].mainLabel.height; });
+ break;
+ }
+ },
+
+ computeLabelLinePositions: function(pie) {
+ pie.lineCoordGroups = [];
+ pie.__labels.outer
+ .each(function(d, i) { return labels.computeLinePosition(pie, i); });
+ },
+
+ computeLinePosition: function(pie, i) {
+ var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });
+ var originCoords = math.rotate(pie.pieCenter.x, pie.pieCenter.y - pie.outerRadius, pie.pieCenter.x, pie.pieCenter.y, angle);
+ var heightOffset = pie.outerLabelGroupData[i].h / 5; // TODO check
+ var labelXMargin = 6; // the x-distance of the label from the end of the line [TODO configurable]
+
+ var quarter = Math.floor(angle / 90);
+ var midPoint = 4;
+ var x2, y2, x3, y3;
+
+ // this resolves an issue when the
+ if (quarter === 2 && angle === 180) {
+ quarter = 1;
+ }
+
+ switch (quarter) {
+ case 0:
+ x2 = pie.outerLabelGroupData[i].x - labelXMargin - ((pie.outerLabelGroupData[i].x - labelXMargin - originCoords.x) / 2);
+ y2 = pie.outerLabelGroupData[i].y + ((originCoords.y - pie.outerLabelGroupData[i].y) / midPoint);
+ x3 = pie.outerLabelGroupData[i].x - labelXMargin;
+ y3 = pie.outerLabelGroupData[i].y - heightOffset;
+ break;
+ case 1:
+ x2 = originCoords.x + (pie.outerLabelGroupData[i].x - originCoords.x) / midPoint;
+ y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;
+ x3 = pie.outerLabelGroupData[i].x - labelXMargin;
+ y3 = pie.outerLabelGroupData[i].y - heightOffset;
+ break;
+ case 2:
+ var startOfLabelX = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;
+ x2 = originCoords.x - (originCoords.x - startOfLabelX) / midPoint;
+ y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;
+ x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;
+ y3 = pie.outerLabelGroupData[i].y - heightOffset;
+ break;
+ case 3:
+ var startOfLabel = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;
+ x2 = startOfLabel + ((originCoords.x - startOfLabel) / midPoint);
+ y2 = pie.outerLabelGroupData[i].y + (originCoords.y - pie.outerLabelGroupData[i].y) / midPoint;
+ x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;
+ y3 = pie.outerLabelGroupData[i].y - heightOffset;
+ break;
+ }
+
+ /*
+ * x1 / y1: the x/y coords of the start of the line, at the mid point of the segments arc on the pie circumference
+ * x2 / y2: if "curved" line style is being used, this is the midpoint of the line. Other
+ * x3 / y3: the end of the line; closest point to the label
+ */
+ if (pie.options.labels.lines.style === "straight") {
+ pie.lineCoordGroups[i] = [
+ { x: originCoords.x, y: originCoords.y },
+ { x: x3, y: y3 }
+ ];
+ } else {
+ pie.lineCoordGroups[i] = [
+ { x: originCoords.x, y: originCoords.y },
+ { x: x2, y: y2 },
+ { x: x3, y: y3 }
+ ];
+ }
+ },
+
+ addLabelLines: function(pie) {
+ var lineGroups = pie.svg.insert("g", "." + pie.cssPrefix + "pieChart") // meaning, BEFORE .pieChart
+ .attr("class", pie.cssPrefix + "lineGroups")
+ .style("opacity", 1);
+
+ var lineGroup = lineGroups.selectAll("." + pie.cssPrefix + "lineGroup")
+ .data(pie.lineCoordGroups)
+ .enter()
+ .append("g")
+ .attr("class", pie.cssPrefix + "lineGroup");
+
+ var lineFunction = d3.line()
+ .curve(d3.curveBasis)
+ .x(function(d) { return d.x; })
+ .y(function(d) { return d.y; });
+
+ lineGroup.append("path")
+ .attr("d", lineFunction)
+ .attr("stroke", function(d, i) {
+ return (pie.options.labels.lines.color === "segment") ? pie.options.colors[i] : pie.options.labels.lines.color;
+ })
+ .attr("stroke-width", 1)
+ .attr("fill", "none")
+ .style("opacity", function(d, i) {
+ var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;
+ var isHidden = (percentage !== null && d.percentage < percentage) || pie.options.data.content[i].label === "";
+ return isHidden ? 0 : 1;
+ });
+ },
+
+ positionLabelGroups: function(pie, section) {
+ if (pie.options.labels[section].format === "none")
+ return;
+
+ pie.__labels[section]
+ .style("opacity", function(d, i) {
+ var percentage = pie.options.labels[section].hideWhenLessThanPercentage;
+ return (percentage !== null && d.percentage < percentage) ? 0 : 1;
+ })
+ .attr("transform", function(d, i) {
+ var x, y;
+ if (section === "outer") {
+ x = pie.outerLabelGroupData[i].x;
+ y = pie.outerLabelGroupData[i].y;
+ } else {
+ var pieCenterCopy = extend(true, {}, pie.pieCenter);
+
+ // now recompute the "center" based on the current _innerRadius
+ if (pie.innerRadius > 0) {
+ var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });
+ var newCoords = math.translate(pie.pieCenter.x, pie.pieCenter.y, pie.innerRadius, angle);
+ pieCenterCopy.x = newCoords.x;
+ pieCenterCopy.y = newCoords.y;
+ }
+
+ var dims = helpers.getDimensions(pie.cssPrefix + "labelGroup" + i + "-inner");
+ var xOffset = dims.w / 2;
+ var yOffset = dims.h / 4; // confusing! Why 4? should be 2, but it doesn't look right
+
+ x = pieCenterCopy.x + (pie.lineCoordGroups[i][0].x - pieCenterCopy.x) / 1.8;
+ y = pieCenterCopy.y + (pie.lineCoordGroups[i][0].y - pieCenterCopy.y) / 1.8;
+
+ x = x - xOffset;
+ y = y + yOffset;
+ }
+
+ return "translate(" + x + "," + y + ")";
+ });
+ },
+
+
+ getIncludes: function(val) {
+ var addMainLabel = false;
+ var addValue = false;
+ var addPercentage = false;
+
+ switch (val) {
+ case "label":
+ addMainLabel = true;
+ break;
+ case "value":
+ addValue = true;
+ break;
+ case "percentage":
+ addPercentage = true;
+ break;
+ case "label-value1":
+ case "label-value2":
+ addMainLabel = true;
+ addValue = true;
+ break;
+ case "label-percentage1":
+ case "label-percentage2":
+ addMainLabel = true;
+ addPercentage = true;
+ break;
+ }
+ return {
+ mainLabel: addMainLabel,
+ value: addValue,
+ percentage: addPercentage
+ };
+ },
+
+
+ /**
+ * This does the heavy-lifting to compute the actual coordinates for the outer label groups. It does two things:
+ * 1. Make a first pass and position them in the ideal positions, based on the pie sizes
+ * 2. Do some basic collision avoidance.
+ */
+ computeOuterLabelCoords: function(pie) {
+
+ // 1. figure out the ideal positions for the outer labels
+ pie.__labels.outer
+ .each(function(d, i) {
+ return labels.getIdealOuterLabelPositions(pie, i);
+ });
+
+ // 2. now adjust those positions to try to accommodate conflicts
+ labels.resolveOuterLabelCollisions(pie);
+ },
+
+ /**
+ * This attempts to resolve label positioning collisions.
+ */
+ resolveOuterLabelCollisions: function(pie) {
+ if (pie.options.labels.outer.format === "none") {
+ return;
+ }
+
+ var size = pie.options.data.content.length;
+ labels.checkConflict(pie, 0, "clockwise", size);
+ labels.checkConflict(pie, size-1, "anticlockwise", size);
+ },
+
+ checkConflict: function(pie, currIndex, direction, size) {
+ var i, curr;
+
+ if (size <= 1) {
+ return;
+ }
+
+ var currIndexHemisphere = pie.outerLabelGroupData[currIndex].hs;
+ if (direction === "clockwise" && currIndexHemisphere !== "right") {
+ return;
+ }
+ if (direction === "anticlockwise" && currIndexHemisphere !== "left") {
+ return;
+ }
+ var nextIndex = (direction === "clockwise") ? currIndex+1 : currIndex-1;
+
+ // this is the current label group being looked at. We KNOW it's positioned properly (the first item
+ // is always correct)
+ var currLabelGroup = pie.outerLabelGroupData[currIndex];
+
+ // this one we don't know about. That's the one we're going to look at and move if necessary
+ var examinedLabelGroup = pie.outerLabelGroupData[nextIndex];
+
+ var info = {
+ labelHeights: pie.outerLabelGroupData[0].h,
+ center: pie.pieCenter,
+ lineLength: (pie.outerRadius + pie.options.labels.outer.pieDistance),
+ heightChange: pie.outerLabelGroupData[0].h + 1 // 1 = padding
+ };
+
+ // loop through *ALL* label groups examined so far to check for conflicts. This is because when they're
+ // very tightly fitted, a later label group may still appear high up on the page
+ if (direction === "clockwise") {
+ i = 0;
+ for (; i<=currIndex; i++) {
+ curr = pie.outerLabelGroupData[i];
+
+ // if there's a conflict with this label group, shift the label to be AFTER the last known
+ // one that's been properly placed
+ if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {
+ labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);
+ break;
+ }
+ }
+ } else {
+ i = size - 1;
+ for (; i >= currIndex; i--) {
+ curr = pie.outerLabelGroupData[i];
+
+ // if there's a conflict with this label group, shift the label to be AFTER the last known
+ // one that's been properly placed
+ if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {
+ labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);
+ break;
+ }
+ }
+ }
+ labels.checkConflict(pie, nextIndex, direction, size);
+ },
+
+ isLabelHidden: function(pie, index) {
+ var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;
+ return (percentage !== null && d.percentage < percentage) || pie.options.data.content[index].label === "";
+ },
+
+ // does a little math to shift a label into a new position based on the last properly placed one
+ adjustLabelPos: function(pie, nextIndex, lastCorrectlyPositionedLabel, info) {
+ var xDiff, yDiff, newXPos, newYPos;
+ newYPos = lastCorrectlyPositionedLabel.y + info.heightChange;
+ yDiff = info.center.y - newYPos;
+
+ if (Math.abs(info.lineLength) > Math.abs(yDiff)) {
+ xDiff = Math.sqrt((info.lineLength * info.lineLength) - (yDiff * yDiff));
+ } else {
+ xDiff = Math.sqrt((yDiff * yDiff) - (info.lineLength * info.lineLength));
+ }
+
+ if (lastCorrectlyPositionedLabel.hs === "right") {
+ newXPos = info.center.x + xDiff;
+ } else {
+ newXPos = info.center.x - xDiff - pie.outerLabelGroupData[nextIndex].w;
+ }
+
+ pie.outerLabelGroupData[nextIndex].x = newXPos;
+ pie.outerLabelGroupData[nextIndex].y = newYPos;
+ },
+
+ /**
+ * @param i 0-N where N is the dataset size - 1.
+ */
+ getIdealOuterLabelPositions: function(pie, i) {
+ var labelGroupNode = pie.svg.select("#" + pie.cssPrefix + "labelGroup" + i + "-outer").node();
+ if (!labelGroupNode) return;
+
+ var labelGroupDims = labelGroupNode.getBBox();
+ var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });
+
+ var originalX = pie.pieCenter.x;
+ var originalY = pie.pieCenter.y - (pie.outerRadius + pie.options.labels.outer.pieDistance);
+ var newCoords = math.rotate(originalX, originalY, pie.pieCenter.x, pie.pieCenter.y, angle);
+
+ // if the label is on the left half of the pie, adjust the values
+ var hemisphere = "right"; // hemisphere
+ if (angle > 180) {
+ newCoords.x -= (labelGroupDims.width + 8);
+ hemisphere = "left";
+ } else {
+ newCoords.x += 8;
+ }
+
+ pie.outerLabelGroupData[i] = {
+ x: newCoords.x,
+ y: newCoords.y,
+ w: labelGroupDims.width,
+ h: labelGroupDims.height,
+ hs: hemisphere
+ };
+ }
+};
+
+ //// --------- segments.js -----------
+var segments = {
+
+ effectMap: {
+ "none": d3.easeLinear,
+ "bounce": d3.easeBounce,
+ "linear": d3.easeLinear,
+ "sin": d3.easeSin,
+ "elastic": d3.easeElastic,
+ "back": d3.easeBack,
+ "quad": d3.easeQuad,
+ "circle": d3.easeCircle,
+ "exp": d3.easeExp
+ },
+
+ /**
+ * Creates the pie chart segments and displays them according to the desired load effect.
+ * @private
+ */
+ create: function(pie) {
+ var pieCenter = pie.pieCenter;
+ var colors = pie.options.colors;
+ var loadEffects = pie.options.effects.load;
+ var segmentStroke = pie.options.misc.colors.segmentStroke;
+
+ // we insert the pie chart BEFORE the title, to ensure the title overlaps the pie
+ var pieChartElement = pie.svg.insert("g", "#" + pie.cssPrefix + "title")
+ .attr("transform", function() { return math.getPieTranslateCenter(pieCenter); })
+ .attr("class", pie.cssPrefix + "pieChart");
+
+ var arc = d3.arc()
+ .innerRadius(pie.innerRadius)
+ .outerRadius(pie.outerRadius)
+ .startAngle(0)
+ .endAngle(function(d) {
+ return (d.value / pie.totalSize) * 2 * Math.PI;
+ });
+
+ var g = pieChartElement.selectAll("." + pie.cssPrefix + "arc")
+ .data(pie.options.data.content)
+ .enter()
+ .append("g")
+ .attr("class", pie.cssPrefix + "arc");
+
+ // if we're not fading in the pie, just set the load speed to 0
+ //var loadSpeed = loadEffects.speed;
+ //if (loadEffects.effect === "none") {
+ // loadSpeed = 0;
+ //}
+
+ g.append("path")
+ .attr("id", function(d, i) { return pie.cssPrefix + "segment" + i; })
+ .attr("fill", function(d, i) {
+ var color = colors[i];
+ if (pie.options.misc.gradient.enabled) {
+ color = "url(#" + pie.cssPrefix + "grad" + i + ")";
+ }
+ return color;
+ })
+ .style("stroke", segmentStroke)
+ .style("stroke-width", 1)
+ //.transition()
+ //.ease(d3.easeCubicInOut)
+ //.duration(loadSpeed)
+ .attr("data-index", function(d, i) { return i; })
+ .attr("d", arc);
+/*
+ .attrTween("d", function(b) {
+ var i = d3.interpolate({ value: 0 }, b);
+ return function(t) {
+ var ret = pie.arc(i(t));
+ console.log(ret);
+ return ret;
+ };
+ });
+*/
+ pie.svg.selectAll("g." + pie.cssPrefix + "arc")
+ .attr("transform",
+ function(d, i) {
+ var angle = 0;
+ if (i > 0) {
+ angle = segments.getSegmentAngle(i-1, pie.options.data.content, pie.totalSize);
+ }
+ return "rotate(" + angle + ")";
+ }
+ );
+ pie.arc = arc;
+ },
+
+ addGradients: function(pie) {
+ var grads = pie.svg.append("defs")
+ .selectAll("radialGradient")
+ .data(pie.options.data.content)
+ .enter().append("radialGradient")
+ .attr("gradientUnits", "userSpaceOnUse")
+ .attr("cx", 0)
+ .attr("cy", 0)
+ .attr("r", "120%")
+ .attr("id", function(d, i) { return pie.cssPrefix + "grad" + i; });
+
+ grads.append("stop").attr("offset", "0%").style("stop-color", function(d, i) { return pie.options.colors[i]; });
+ grads.append("stop").attr("offset", pie.options.misc.gradient.percentage + "%").style("stop-color", pie.options.misc.gradient.color);
+ },
+
+ addSegmentEventHandlers: function(pie) {
+ var arc = pie.svg.selectAll("." + pie.cssPrefix + "arc");
+ arc = arc.merge(pie.__labels.inner.merge(pie.__labels.outer));
+
+ arc.on("click", function() {
+ var currentEl = d3.select(this);
+ var segment;
+
+ // mouseover works on both the segments AND the segment labels, hence the following
+ if (currentEl.attr("class") === pie.cssPrefix + "arc") {
+ segment = currentEl.select("path");
+ } else {
+ var index = currentEl.attr("data-index");
+ segment = d3.select("#" + pie.cssPrefix + "segment" + index);
+ }
+
+ var isExpanded = segment.attr("class") === pie.cssPrefix + "expanded";
+ segments.onSegmentEvent(pie, pie.options.callbacks.onClickSegment, segment, isExpanded);
+ if (pie.options.effects.pullOutSegmentOnClick.effect !== "none") {
+ if (isExpanded) {
+ segments.closeSegment(pie, segment.node());
+ } else {
+ segments.openSegment(pie, segment.node());
+ }
+ }
+ });
+
+ arc.on("mouseover", function() {
+ var currentEl = d3.select(this);
+ var segment, index;
+
+ if (currentEl.attr("class") === pie.cssPrefix + "arc") {
+ segment = currentEl.select("path");
+ } else {
+ index = currentEl.attr("data-index");
+ segment = d3.select("#" + pie.cssPrefix + "segment" + index);
+ }
+
+ if (pie.options.effects.highlightSegmentOnMouseover) {
+ index = segment.attr("data-index");
+ var segColor = pie.options.colors[index];
+ segment.style("fill", helpers.getColorShade(segColor, pie.options.effects.highlightLuminosity));
+ }
+
+ if (pie.options.tooltips.enabled) {
+ index = segment.attr("data-index");
+ tt.showTooltip(pie, index);
+ }
+
+ var isExpanded = segment.attr("class") === pie.cssPrefix + "expanded";
+ segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoverSegment, segment, isExpanded);
+ });
+
+ arc.on("mousemove", function() {
+ tt.moveTooltip(pie);
+ });
+
+ arc.on("mouseout", function() {
+ var currentEl = d3.select(this);
+ var segment, index;
+
+ if (currentEl.attr("class") === pie.cssPrefix + "arc") {
+ segment = currentEl.select("path");
+ } else {
+ index = currentEl.attr("data-index");
+ segment = d3.select("#" + pie.cssPrefix + "segment" + index);
+ }
+
+ if (pie.options.effects.highlightSegmentOnMouseover) {
+ index = segment.attr("data-index");
+ var color = pie.options.colors[index];
+ if (pie.options.misc.gradient.enabled) {
+ color = "url(#" + pie.cssPrefix + "grad" + index + ")";
+ }
+ segment.style("fill", color);
+ }
+
+ if (pie.options.tooltips.enabled) {
+ index = segment.attr("data-index");
+ tt.hideTooltip(pie, index);
+ }
+
+ var isExpanded = segment.attr("class") === pie.cssPrefix + "expanded";
+ segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoutSegment, segment, isExpanded);
+ });
+ },
+
+ // helper function used to call the click, mouseover, mouseout segment callback functions
+ onSegmentEvent: function(pie, func, segment, isExpanded) {
+ if (!helpers.isFunction(func)) {
+ return;
+ }
+ var index = parseInt(segment.attr("data-index"), 10);
+ func({
+ segment: segment.node(),
+ index: index,
+ expanded: isExpanded,
+ data: pie.options.data.content[index]
+ });
+ },
+
+ openSegment: function(pie, segment) {
+ if (pie.isOpeningSegment) {
+ return;
+ }
+ pie.isOpeningSegment = true;
+
+ segments.maybeCloseOpenSegment(pie);
+
+ d3.select(segment)
+ .transition()
+ .ease(segments.effectMap[pie.options.effects.pullOutSegmentOnClick.effect])
+ .duration(pie.options.effects.pullOutSegmentOnClick.speed)
+ .attr("transform", function(d, i) {
+ var c = pie.arc.centroid(d),
+ x = c[0],
+ y = c[1],
+ h = Math.sqrt(x*x + y*y),
+ pullOutSize = parseInt(pie.options.effects.pullOutSegmentOnClick.size, 10);
+
+ return "translate(" + ((x/h) * pullOutSize) + ',' + ((y/h) * pullOutSize) + ")";
+ })
+ .on("end", function(d, i) {
+ pie.currentlyOpenSegment = segment;
+ pie.isOpeningSegment = false;
+ d3.select(segment).attr("class", pie.cssPrefix + "expanded");
+ });
+ },
+
+ maybeCloseOpenSegment: function(pie) {
+ if (typeof pie !== 'undefined' && pie.svg.selectAll("." + pie.cssPrefix + "expanded").size() > 0) {
+ segments.closeSegment(pie, pie.svg.select("." + pie.cssPrefix + "expanded").node());
+ }
+ },
+
+ closeSegment: function(pie, segment) {
+ d3.select(segment)
+ .transition()
+ .duration(400)
+ .attr("transform", "translate(0,0)")
+ .on("end", function(d, i) {
+ d3.select(segment).attr("class", "");
+ pie.currentlyOpenSegment = null;
+ });
+ },
+
+ getCentroid: function(el) {
+ var bbox = el.getBBox();
+ return {
+ x: bbox.x + bbox.width / 2,
+ y: bbox.y + bbox.height / 2
+ };
+ },
+
+ /**
+ * General helper function to return a segment's angle, in various different ways.
+ * @param index
+ * @param opts optional object for fine-tuning exactly what you want.
+ */
+ getSegmentAngle: function(index, data, totalSize, opts) {
+ var options = extend({
+ // if true, this returns the full angle from the origin. Otherwise it returns the single segment angle
+ compounded: true,
+
+ // optionally returns the midpoint of the angle instead of the full angle
+ midpoint: false
+ }, opts);
+
+ var currValue = data[index].value;
+ var fullValue;
+ if (options.compounded) {
+ fullValue = 0;
+
+ // get all values up to and including the specified index
+ for (var i=0; i<=index; i++) {
+ fullValue += data[i].value;
+ }
+ }
+
+ if (typeof fullValue === 'undefined') {
+ fullValue = currValue;
+ }
+
+ // now convert the full value to an angle
+ var angle = (fullValue / totalSize) * 360;
+
+ // lastly, if we want the midpoint, factor that sucker in
+ if (options.midpoint) {
+ var currAngle = (currValue / totalSize) * 360;
+ angle -= (currAngle / 2);
+ }
+
+ return angle;
+ }
+
+};
+
+ //// --------- text.js -----------
+var text = {
+ offscreenCoord: -10000,
+
+ addTitle: function(pie) {
+ pie.__title = pie.svg.selectAll("." + pie.cssPrefix + "title")
+ .data([pie.options.header.title])
+ .enter()
+ .append("text")
+ .text(function(d) { return d.text; })
+ .attr("id", pie.cssPrefix + "title")
+ .attr("class", pie.cssPrefix + "title")
+ .attr("x", text.offscreenCoord)
+ .attr("y", text.offscreenCoord)
+ .attr("text-anchor", function() {
+ var location;
+ if (pie.options.header.location === "top-center" || pie.options.header.location === "pie-center") {
+ location = "middle";
+ } else {
+ location = "left";
+ }
+ return location;
+ })
+ .attr("fill", function(d) { return d.color; })
+ .style("font-size", function(d) { return d.fontSize + "px"; })
+ .style("font-weight", function(d) { return d.fontWeight; })
+ .style("font-family", function(d) { return d.font; });
+ },
+
+ positionTitle: function(pie) {
+ var textComponents = pie.textComponents;
+ var headerLocation = pie.options.header.location;
+ var canvasPadding = pie.options.misc.canvasPadding;
+ var canvasWidth = pie.options.size.canvasWidth;
+ var titleSubtitlePadding = pie.options.header.titleSubtitlePadding;
+
+ var x;
+ if (headerLocation === "top-left") {
+ x = canvasPadding.left;
+ } else {
+ x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;
+ }
+
+ // add whatever offset has been added by user
+ x += pie.options.misc.pieCenterOffset.x;
+
+ var y = canvasPadding.top + textComponents.title.h;
+
+ if (headerLocation === "pie-center") {
+ y = pie.pieCenter.y;
+
+ // still not fully correct
+ if (textComponents.subtitle.exists) {
+ var totalTitleHeight = textComponents.title.h + titleSubtitlePadding + textComponents.subtitle.h;
+ y = y - (totalTitleHeight / 2) + textComponents.title.h;
+ } else {
+ y += (textComponents.title.h / 4);
+ }
+ }
+
+ pie.__title
+ .attr("x", x)
+ .attr("y", y);
+ },
+
+ addSubtitle: function(pie) {
+ var headerLocation = pie.options.header.location;
+
+ pie.__subtitle = pie.svg.selectAll("." + pie.cssPrefix + "subtitle")
+ .data([pie.options.header.subtitle])
+ .enter()
+ .append("text")
+ .text(function(d) { return d.text; })
+ .attr("x", text.offscreenCoord)
+ .attr("y", text.offscreenCoord)
+ .attr("id", pie.cssPrefix + "subtitle")
+ .attr("class", pie.cssPrefix + "subtitle")
+ .attr("text-anchor", function() {
+ var location;
+ if (headerLocation === "top-center" || headerLocation === "pie-center") {
+ location = "middle";
+ } else {
+ location = "left";
+ }
+ return location;
+ })
+ .attr("fill", function(d) { return d.color; })
+ .style("font-size", function(d) { return d.fontSize + "px"; })
+ .style("font-weight", function(d) { return d.fontWeight; })
+ .style("font-family", function(d) { return d.font; });
+ },
+
+ positionSubtitle: function(pie) {
+ var canvasPadding = pie.options.misc.canvasPadding;
+ var canvasWidth = pie.options.size.canvasWidth;
+
+ var x;
+ if (pie.options.header.location === "top-left") {
+ x = canvasPadding.left;
+ } else {
+ x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;
+ }
+
+ // add whatever offset has been added by user
+ x += pie.options.misc.pieCenterOffset.x;
+
+ var y = text.getHeaderHeight(pie);
+
+ pie.__subtitle
+ .attr("x", x)
+ .attr("y", y);
+ },
+
+ addFooter: function(pie) {
+ pie.__footer = pie.svg.selectAll("." + pie.cssPrefix + "footer")
+ .data([pie.options.footer])
+ .enter()
+ .append("text")
+ .text(function(d) { return d.text; })
+ .attr("x", text.offscreenCoord)
+ .attr("y", text.offscreenCoord)
+ .attr("id", pie.cssPrefix + "footer")
+ .attr("class", pie.cssPrefix + "footer")
+ .attr("text-anchor", function() {
+ var location = "left";
+ if (pie.options.footer.location === "bottom-center") {
+ location = "middle";
+ } else if (pie.options.footer.location === "bottom-right") {
+ location = "left"; // on purpose. We have to change the x-coord to make it properly right-aligned
+ }
+ return location;
+ })
+ .attr("fill", function(d) { return d.color; })
+ .style("font-size", function(d) { return d.fontSize + "px"; })
+ .style("font-weight", function(d) { return d.fontWeight; })
+ .style("font-family", function(d) { return d.font; });
+ },
+
+ positionFooter: function(pie) {
+ var footerLocation = pie.options.footer.location;
+ var footerWidth = pie.textComponents.footer.w;
+ var canvasWidth = pie.options.size.canvasWidth;
+ var canvasHeight = pie.options.size.canvasHeight;
+ var canvasPadding = pie.options.misc.canvasPadding;
+
+ var x;
+ if (footerLocation === "bottom-left") {
+ x = canvasPadding.left;
+ } else if (footerLocation === "bottom-right") {
+ x = canvasWidth - footerWidth - canvasPadding.right;
+ } else {
+ x = canvasWidth / 2; // TODO - shouldn't this also take into account padding?
+ }
+
+ pie.__footer
+ .attr("x", x)
+ .attr("y", canvasHeight - canvasPadding.bottom);
+ },
+
+ getHeaderHeight: function(pie) {
+ var h;
+ if (pie.textComponents.title.exists) {
+
+ // if the subtitle isn't defined, it'll be set to 0
+ var totalTitleHeight = pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;
+ if (pie.options.header.location === "pie-center") {
+ h = pie.pieCenter.y - (totalTitleHeight / 2) + totalTitleHeight;
+ } else {
+ h = totalTitleHeight + pie.options.misc.canvasPadding.top;
+ }
+ } else {
+ if (pie.options.header.location === "pie-center") {
+ var footerPlusPadding = pie.options.misc.canvasPadding.bottom + pie.textComponents.footer.h;
+ h = ((pie.options.size.canvasHeight - footerPlusPadding) / 2) + pie.options.misc.canvasPadding.top + (pie.textComponents.subtitle.h / 2);
+ } else {
+ h = pie.options.misc.canvasPadding.top + pie.textComponents.subtitle.h;
+ }
+ }
+ return h;
+ }
+};
+
+ //// --------- validate.js -----------
+var tt = {
+ addTooltips: function(pie) {
+
+ // group the label groups (label, percentage, value) into a single element for simpler positioning
+ var tooltips = pie.svg.insert("g")
+ .attr("class", pie.cssPrefix + "tooltips");
+
+ tooltips.selectAll("." + pie.cssPrefix + "tooltip")
+ .data(pie.options.data.content)
+ .enter()
+ .append("g")
+ .attr("class", pie.cssPrefix + "tooltip")
+ .attr("id", function(d, i) { return pie.cssPrefix + "tooltip" + i; })
+ .style("opacity", 0)
+ .append("rect")
+ .attr("rx", pie.options.tooltips.styles.borderRadius)
+ .attr("ry", pie.options.tooltips.styles.borderRadius)
+ .attr("x", -pie.options.tooltips.styles.padding)
+ .attr("opacity", pie.options.tooltips.styles.backgroundOpacity)
+ .style("fill", pie.options.tooltips.styles.backgroundColor);
+
+ tooltips.selectAll("." + pie.cssPrefix + "tooltip")
+ .data(pie.options.data.content)
+ .append("text")
+ .attr("fill", function(d) { return pie.options.tooltips.styles.color; })
+ .style("font-size", function(d) { return pie.options.tooltips.styles.fontSize; })
+ .style("font-weight", function(d) { return pie.options.tooltips.styles.fontWeight; })
+ .style("font-family", function(d) { return pie.options.tooltips.styles.font; })
+ .text(function(d, i) {
+ var caption = pie.options.tooltips.string;
+ if (pie.options.tooltips.type === "caption") {
+ caption = d.caption;
+ }
+ return tt.replacePlaceholders(pie, caption, i, {
+ label: d.label,
+ value: d.value,
+ percentage: d.percentage
+ });
+ });
+
+ tooltips.selectAll("." + pie.cssPrefix + "tooltip rect")
+ .attr("width", function (d, i) {
+ var dims = helpers.getDimensions(pie.cssPrefix + "tooltip" + i);
+ return dims.w + (2 * pie.options.tooltips.styles.padding);
+ })
+ .attr("height", function (d, i) {
+ var dims = helpers.getDimensions(pie.cssPrefix + "tooltip" + i);
+ return dims.h + (2 * pie.options.tooltips.styles.padding);
+ })
+ .attr("y", function (d, i) {
+ var dims = helpers.getDimensions(pie.cssPrefix + "tooltip" + i);
+ return -(dims.h / 2) + 1;
+ });
+ },
+
+ showTooltip: function(pie, index) {
+ var fadeInSpeed = pie.options.tooltips.styles.fadeInSpeed;
+ if (tt.currentTooltip === index) {
+ fadeInSpeed = 1;
+ }
+
+ tt.currentTooltip = index;
+ d3.select("#" + pie.cssPrefix + "tooltip" + index)
+ .transition()
+ .duration(fadeInSpeed)
+ .style("opacity", function() { return 1; });
+
+ tt.moveTooltip(pie);
+ },
+
+ moveTooltip: function(pie) {
+ d3.selectAll("#" + pie.cssPrefix + "tooltip" + tt.currentTooltip)
+ .attr("transform", function(d) {
+ var mouseCoords = d3.mouse(this.parentNode);
+ var x = mouseCoords[0] + pie.options.tooltips.styles.padding + 2;
+ var y = mouseCoords[1] - (2 * pie.options.tooltips.styles.padding) - 2;
+ return "translate(" + x + "," + y + ")";
+ });
+ },
+
+ hideTooltip: function(pie, index) {
+ d3.select("#" + pie.cssPrefix + "tooltip" + index)
+ .style("opacity", function() { return 0; });
+
+ // move the tooltip offscreen. This ensures that when the user next mouseovers the segment the hidden
+ // element won't interfere
+ d3.select("#" + pie.cssPrefix + "tooltip" + tt.currentTooltip)
+ .attr("transform", function(d, i) {
+ // klutzy, but it accounts for tooltip padding which could push it onscreen
+ var x = pie.options.size.canvasWidth + 1000;
+ var y = pie.options.size.canvasHeight + 1000;
+ return "translate(" + x + "," + y + ")";
+ });
+ },
+
+ replacePlaceholders: function(pie, str, index, replacements) {
+
+ // if the user has defined a placeholderParser function, call it before doing the replacements
+ if (helpers.isFunction(pie.options.tooltips.placeholderParser)) {
+ pie.options.tooltips.placeholderParser(index, replacements);
+ }
+
+ var replacer = function() {
+ return function(match) {
+ var placeholder = arguments[1];
+ if (replacements.hasOwnProperty(placeholder)) {
+ return replacements[arguments[1]];
+ } else {
+ return arguments[0];
+ }
+ };
+ };
+ return str.replace(/\{(\w+)\}/g, replacer(replacements));
+ }
+};
+
+
+ // --------------------------------------------------------------------------------------------
+
+ // our constructor
+ var d3pie = function(element, options) {
+
+ // element can be an ID or DOM element
+ this.element = element;
+ if (typeof element === "string") {
+ var el = element.replace(/^#/, ""); // replace any jQuery-like ID hash char
+ this.element = document.getElementById(el);
+ }
+
+ var opts = {};
+ extend(true, opts, defaultSettings, options);
+ this.options = opts;
+
+ // if the user specified a custom CSS element prefix (ID, class), use it
+ if (this.options.misc.cssPrefix !== null) {
+ this.cssPrefix = this.options.misc.cssPrefix;
+ } else {
+ this.cssPrefix = "p" + _uniqueIDCounter + "_";
+ _uniqueIDCounter++;
+ }
+
+
+ // now run some validation on the user-defined info
+ if (!validate.initialCheck(this)) {
+ return;
+ }
+
+ // add a data-role to the DOM node to let anyone know that it contains a d3pie instance, and the d3pie version
+ d3.select(this.element).attr(_scriptName, _version);
+
+ // things that are done once
+ _setupData.call(this);
+ _init.call(this);
+ };
+
+ d3pie.prototype.recreate = function() {
+ // now run some validation on the user-defined info
+ if (!validate.initialCheck(this)) {
+ return;
+ }
+
+ _setupData.call(this);
+ _init.call(this);
+ };
+
+ d3pie.prototype.redraw = function() {
+ this.element.innerHTML = "";
+ _init.call(this);
+ };
+
+ d3pie.prototype.destroy = function() {
+ this.element.innerHTML = ""; // clear out the SVG
+ d3.select(this.element).attr(_scriptName, null); // remove the data attr
+ };
+
+ /**
+ * Returns all pertinent info about the current open info. Returns null if nothing's open, or if one is, an object of
+ * the following form:
+ * {
+ * element: DOM NODE,
+ * index: N,
+ * data: {}
+ * }
+ */
+ d3pie.prototype.getOpenSegment = function() {
+ var segment = this.currentlyOpenSegment;
+ if (segment !== null && typeof segment !== "undefined") {
+ var index = parseInt(d3.select(segment).attr("data-index"), 10);
+ return {
+ element: segment,
+ index: index,
+ data: this.options.data.content[index]
+ };
+ } else {
+ return null;
+ }
+ };
+
+ d3pie.prototype.openSegment = function(index) {
+ index = parseInt(index, 10);
+ if (index < 0 || index > this.options.data.content.length-1) {
+ return;
+ }
+ segments.openSegment(this, d3.select("#" + this.cssPrefix + "segment" + index).node());
+ };
+
+ d3pie.prototype.closeSegment = function() {
+ segments.maybeCloseOpenSegment(this);
+ };
+
+ // this let's the user dynamically update aspects of the pie chart without causing a complete redraw. It
+ // intelligently re-renders only the part of the pie that the user specifies. Some things cause a repaint, others
+ // just redraw the single element
+ d3pie.prototype.updateProp = function(propKey, value) {
+ switch (propKey) {
+ case "header.title.text":
+ var oldVal = helpers.processObj(this.options, propKey);
+ helpers.processObj(this.options, propKey, value);
+ d3.select("#" + this.cssPrefix + "title").html(value);
+ if ((oldVal === "" && value !== "") || (oldVal !== "" && value === "")) {
+ this.redraw();
+ }
+ break;
+
+ case "header.subtitle.text":
+ var oldValue = helpers.processObj(this.options, propKey);
+ helpers.processObj(this.options, propKey, value);
+ d3.select("#" + this.cssPrefix + "subtitle").html(value);
+ if ((oldValue === "" && value !== "") || (oldValue !== "" && value === "")) {
+ this.redraw();
+ }
+ break;
+
+ case "callbacks.onload":
+ case "callbacks.onMouseoverSegment":
+ case "callbacks.onMouseoutSegment":
+ case "callbacks.onClickSegment":
+ case "effects.pullOutSegmentOnClick.effect":
+ case "effects.pullOutSegmentOnClick.speed":
+ case "effects.pullOutSegmentOnClick.size":
+ case "effects.highlightSegmentOnMouseover":
+ case "effects.highlightLuminosity":
+ helpers.processObj(this.options, propKey, value);
+ break;
+
+ // everything else, attempt to update it & do a repaint
+ default:
+ helpers.processObj(this.options, propKey, value);
+
+ this.destroy();
+ this.recreate();
+ break;
+ }
+ };
+
+
+ // ------------------------------------------------------------------------------------------------
+
+ var _setupData = function () {
+ this.options.data.content = math.sortPieData(this);
+ if (this.options.data.smallSegmentGrouping.enabled) {
+ this.options.data.content = helpers.applySmallSegmentGrouping(this.options.data.content, this.options.data.smallSegmentGrouping);
+ }
+
+
+ this.options.colors = helpers.initSegmentColors(this);
+ this.totalSize = math.getTotalPieSize(this.options.data.content);
+
+ var dp = this.options.labels.percentage.decimalPlaces;
+
+ // add in percentage data to content
+ for (var i=0; i<this.options.data.content.length; i++) {
+ this.options.data.content[i].percentage = _getPercentage(this.options.data.content[i].value, this.totalSize, dp);
+ }
+
+ // adjust the final item to ensure the percentage always adds up to precisely 100%. This is necessary
+ var totalPercentage = 0;
+ for (var j=0; j<this.options.data.content.length; j++) {
+ if (j === this.options.data.content.length - 1) {
+ this.options.data.content[j].percentage = (100 - totalPercentage).toFixed(dp);
+ }
+ totalPercentage += parseFloat(this.options.data.content[j].percentage);
+ }
+ };
+
+ var _init = function() {
+
+ // prep-work
+ this.svg = helpers.addSVGSpace(this);
+
+ // store info about the main text components as part of the d3pie object instance
+ this.textComponents = {
+ headerHeight: 0,
+ title: {
+ exists: this.options.header.title.text !== "",
+ h: 0,
+ w: 0
+ },
+ subtitle: {
+ exists: this.options.header.subtitle.text !== "",
+ h: 0,
+ w: 0
+ },
+ footer: {
+ exists: this.options.footer.text !== "",
+ h: 0,
+ w: 0
+ }
+ };
+
+ this.outerLabelGroupData = [];
+
+ // add the key text components offscreen (title, subtitle, footer). We need to know their widths/heights for later computation
+ if (this.textComponents.title.exists) text.addTitle(this);
+ if (this.textComponents.subtitle.exists) text.addSubtitle(this);
+ text.addFooter(this);
+
+ // console.log(this);
+
+ // the footer never moves. Put it in place now
+ var self = this;
+ text.positionFooter(self);
+ var d3 = helpers.getDimensions(self.__footer.node());
+ self.textComponents.footer.h = d3.h;
+ self.textComponents.footer.w = d3.w;
+
+ if (self.textComponents.title.exists) {
+ var d1 = helpers.getDimensions(self.__title.node());
+ self.textComponents.title.h = d1.h;
+ self.textComponents.title.w = d1.w;
+ }
+
+ if (self.textComponents.subtitle.exists) {
+ var d2 = helpers.getDimensions(self.__subtitle.node());
+ self.textComponents.subtitle.h = d2.h;
+ self.textComponents.subtitle.w = d2.w;
+ }
+
+ // now compute the full header height
+ if (self.textComponents.title.exists || self.textComponents.subtitle.exists) {
+ var headerHeight = 0;
+ if (self.textComponents.title.exists) {
+ headerHeight += self.textComponents.title.h;
+ if (self.textComponents.subtitle.exists) {
+ headerHeight += self.options.header.titleSubtitlePadding;
+ }
+ }
+ if (self.textComponents.subtitle.exists) {
+ headerHeight += self.textComponents.subtitle.h;
+ }
+ self.textComponents.headerHeight = headerHeight;
+ }
+
+ // at this point, all main text component dimensions have been calculated
+ math.computePieRadius(self);
+
+ // this value is used all over the place for placing things and calculating locations. We figure it out ONCE
+ // and store it as part of the object
+ math.calculatePieCenter(self);
+
+ // position the title and subtitle
+ text.positionTitle(self);
+ text.positionSubtitle(self);
+
+ // now create the pie chart segments, and gradients if the user desired
+ if (self.options.misc.gradient.enabled) {
+ segments.addGradients(self);
+ }
+ segments.create(self); // also creates this.arc
+
+ self.__labels = {};
+ labels.add(self, "inner", self.options.labels.inner.format);
+ labels.add(self, "outer", self.options.labels.outer.format);
+
+ // position the label elements relatively within their individual group (label, percentage, value)
+ labels.positionLabelElements(self, "inner", self.options.labels.inner.format);
+ labels.positionLabelElements(self, "outer", self.options.labels.outer.format);
+ labels.computeOuterLabelCoords(self);
+
+ // this is (and should be) dumb. It just places the outer groups at their calculated, collision-free positions
+ labels.positionLabelGroups(self, "outer");
+
+ // we use the label line positions for many other calculations, so ALWAYS compute them
+ labels.computeLabelLinePositions(self);
+
+ // only add them if they're actually enabled
+ if (self.options.labels.lines.enabled && self.options.labels.outer.format !== "none") {
+ labels.addLabelLines(self);
+ }
+
+ labels.positionLabelGroups(self, "inner");
+
+ if (helpers.isFunction(self.options.callbacks.onload)) {
+ try {
+ self.options.callbacks.onload();
+ } catch (e) { }
+ }
+
+ // add and position the tooltips
+ if (self.options.tooltips.enabled) {
+ tt.addTooltips(self);
+ }
+
+ segments.addSegmentEventHandlers(self);
+ };
+
+ var _getPercentage = function(value, total, decimalPlaces) {
+ var relativeAmount = value / total;
+ if (decimalPlaces <= 0) {
+ return Math.round(relativeAmount * 100);
+ } else {
+ return (relativeAmount * 100).toFixed(decimalPlaces);
+ }
+ };
+
+ return d3pie;
+}));
diff --git a/web/gui/lib/dygraph-c91c859.min.js b/web/gui/lib/dygraph-c91c859.min.js
new file mode 100644
index 0000000..1713fee
--- /dev/null
+++ b/web/gui/lib/dygraph-c91c859.min.js
@@ -0,0 +1,7 @@
+/*! @license Copyright 2017 Dan Vanderkam (danvdk@gmail.com) MIT-licensed (http://opensource.org/licenses/MIT) */
+// SPDX-License-Identifier: MIT
+!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{var e;e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,e.Dygraph=t()}}(function(){return function t(e,a,i){function n(o,s){if(!a[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(r)return r(o,!0);var h=new Error("Cannot find module '"+o+"'");throw h.code="MODULE_NOT_FOUND",h}var u=a[o]={exports:{}};e[o][0].call(u.exports,function(t){var a=e[o][1][t];return n(a||t)},u,u.exports,t,e,a,i)}return a[o].exports}for(var r="function"==typeof require&&require,o=0;o<i.length;o++)n(i[o]);return n}({1:[function(t,e,a){function i(){throw new Error("setTimeout has not been defined")}function n(){throw new Error("clearTimeout has not been defined")}function r(t){if(d===setTimeout)return setTimeout(t,0);if((d===i||!d)&&setTimeout)return d=setTimeout,setTimeout(t,0);try{return d(t,0)}catch(e){try{return d.call(null,t,0)}catch(e){return d.call(this,t,0)}}}function o(t){if(c===clearTimeout)return clearTimeout(t);if((c===n||!c)&&clearTimeout)return c=clearTimeout,clearTimeout(t);try{return c(t)}catch(e){try{return c.call(null,t)}catch(e){return c.call(this,t)}}}function s(){v&&g&&(v=!1,g.length?f=g.concat(f):_=-1,f.length&&l())}function l(){if(!v){var t=r(s);v=!0;for(var e=f.length;e;){for(g=f,f=[];++_<e;)g&&g[_].run();_=-1,e=f.length}g=null,v=!1,o(t)}}function h(t,e){this.fun=t,this.array=e}function u(){}var d,c,p=e.exports={};!function(){try{d="function"==typeof setTimeout?setTimeout:i}catch(t){d=i}try{c="function"==typeof clearTimeout?clearTimeout:n}catch(t){c=n}}();var g,f=[],v=!1,_=-1;p.nextTick=function(t){var e=new Array(arguments.length-1);if(arguments.length>1)for(var a=1;a<arguments.length;a++)e[a-1]=arguments[a];f.push(new h(t,e)),1!==f.length||v||r(l)},h.prototype.run=function(){this.fun.apply(null,this.array)},p.title="browser",p.browser=!0,p.env={},p.argv=[],p.version="",p.versions={},p.on=u,p.addListener=u,p.once=u,p.off=u,p.removeListener=u,p.removeAllListeners=u,p.emit=u,p.prependListener=u,p.prependOnceListener=u,p.listeners=function(t){return[]},p.binding=function(t){throw new Error("process.binding is not supported")},p.cwd=function(){return"/"},p.chdir=function(t){throw new Error("process.chdir is not supported")},p.umask=function(){return 0}},{}],2:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./bars"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o=[],s=a.get("logscale"),l=0;l<t.length;l++)i=t[l][0],r=t[l][e],s&&null!==r&&(r[0]<=0||r[1]<=0||r[2]<=0)&&(r=null),null!==r?(n=r[1],null===n||isNaN(n)?o.push([i,n,[n,n]]):o.push([i,n,[r[0],r[2]]])):o.push([i,null,[null,null]]);return o},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l,h,u=[];for(n=0,o=0,r=0,s=0,l=0;l<t.length;l++){if(i=t[l][1],h=t[l][2],u[l]=t[l],null===i||isNaN(i)||(n+=h[0],o+=i,r+=h[1],s+=1),l-e>=0){var d=t[l-e];null===d[1]||isNaN(d[1])||(n-=d[2][0],o-=d[1],r-=d[2][1],s-=1)}u[l]=s?[t[l][0],1*o/s,[1*n/s,1*r/s]]:[t[l][0],null,[null,null]]}return u},a.default=r,e.exports=a.default},{"./bars":5}],3:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./bars"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s=[],l=a.get("sigma"),h=a.get("logscale"),u=0;u<t.length;u++)i=t[u][0],o=t[u][e],h&&null!==o&&(o[0]<=0||o[0]-l*o[1]<=0)&&(o=null),null!==o?(n=o[0],null===n||isNaN(n)?s.push([i,n,[n,n,n]]):(r=l*o[1],s.push([i,n,[n-r,n+r,o[1]]]))):s.push([i,null,[null,null,null]]);return s},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l,h,u,d,c=[],p=a.get("sigma");for(i=0;i<t.length;i++){for(s=0,u=0,l=0,n=Math.max(0,i-e+1);n<i+1;n++)null===(r=t[n][1])||isNaN(r)||(l++,s+=r,u+=Math.pow(t[n][2][2],2));l?(h=Math.sqrt(u)/l,d=s/l,c[i]=[t[i][0],d,[d-p*h,d+p*h]]):(o=1==e?t[i][1]:null,c[i]=[t[i][0],o,[o,o]])}return c},a.default=r,e.exports=a.default},{"./bars":5}],4:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./bars"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s,l,h,u,d=[],c=a.get("sigma"),p=a.get("logscale"),g=0;g<t.length;g++)i=t[g][0],r=t[g][e],p&&null!==r&&(r[0]<=0||r[1]<=0)&&(r=null),null!==r?(o=r[0],s=r[1],null===o||isNaN(o)?d.push([i,o,[o,o,o,s]]):(l=s?o/s:0,h=s?c*Math.sqrt(l*(1-l)/s):1,u=100*h,n=100*l,d.push([i,n,[n-u,n+u,o,s]]))):d.push([i,null,[null,null,null,null]]);return d},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s=[],l=a.get("sigma"),h=a.get("wilsonInterval"),u=0,d=0;for(r=0;r<t.length;r++){u+=t[r][2][2],d+=t[r][2][3],r-e>=0&&(u-=t[r-e][2][2],d-=t[r-e][2][3]);var c=t[r][0],p=d?u/d:0;if(h)if(d){var g=p<0?0:p,f=d,v=l*Math.sqrt(g*(1-g)/f+l*l/(4*f*f)),_=1+l*l/d;i=(g+l*l/(2*d)-v)/_,n=(g+l*l/(2*d)+v)/_,s[r]=[c,100*g,[100*i,100*n]]}else s[r]=[c,0,[0,0]];else o=d?l*Math.sqrt(p*(1-p)/d):1,s[r]=[c,100*p,[100*(p-o),100*(p+o)]]}return s},a.default=r,e.exports=a.default},{"./bars":5}],5:[function(t,e,a){"use strict";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,"__esModule",{value:!0});var n=t("./datahandler"),r=i(n),o=t("../dygraph-layout"),s=i(o),l=function(){r.default.call(this)};l.prototype=new r.default,l.prototype.extractSeries=function(t,e,a){},l.prototype.rollingAverage=function(t,e,a){},l.prototype.onPointsCreated_=function(t,e){for(var a=0;a<t.length;++a){var i=t[a],n=e[a];n.y_top=NaN,n.y_bottom=NaN,n.yval_minus=r.default.parseFloat(i[2][0]),n.yval_plus=r.default.parseFloat(i[2][1])}},l.prototype.getExtremeYValues=function(t,e,a){for(var i,n=null,r=null,o=t.length-1,s=0;s<=o;s++)if(null!==(i=t[s][1])&&!isNaN(i)){var l=t[s][2][0],h=t[s][2][1];l>i&&(l=i),h<i&&(h=i),(null===r||h>r)&&(r=h),(null===n||l<n)&&(n=l)}return[n,r]},l.prototype.onLineEvaluated=function(t,e,a){for(var i,n=0;n<t.length;n++)i=t[n],i.y_top=s.default.calcYNormal_(e,i.yval_minus,a),i.y_bottom=s.default.calcYNormal_(e,i.yval_plus,a)},a.default=l,e.exports=a.default},{"../dygraph-layout":13,"./datahandler":6}],6:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=function(){},n=i;n.X=0,n.Y=1,n.EXTRAS=2,n.prototype.extractSeries=function(t,e,a){},n.prototype.seriesToPoints=function(t,e,a){for(var i=[],r=0;r<t.length;++r){var o=t[r],s=o[1],l=null===s?null:n.parseFloat(s),h={x:NaN,y:NaN,xval:n.parseFloat(o[0]),yval:l,name:e,idx:r+a,canvasx:NaN,canvasy:NaN};i.push(h)}return this.onPointsCreated_(t,i),i},n.prototype.onPointsCreated_=function(t,e){},n.prototype.rollingAverage=function(t,e,a){},n.prototype.getExtremeYValues=function(t,e,a){},n.prototype.onLineEvaluated=function(t,e,a){},n.parseFloat=function(t){return null===t?NaN:t},a.default=i,e.exports=a.default},{}],7:[function(t,e,a){"use strict";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,"__esModule",{value:!0});var n=t("./datahandler"),r=(i(n),t("./default")),o=i(r),s=function(){};s.prototype=new o.default,s.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s,l,h=[],u=a.get("logscale"),d=0;d<t.length;d++)i=t[d][0],r=t[d][e],u&&null!==r&&(r[0]<=0||r[1]<=0)&&(r=null),null!==r?(o=r[0],s=r[1],null===o||isNaN(o)?h.push([i,o,[o,s]]):(l=s?o/s:0,n=100*l,h.push([i,n,[o,s]]))):h.push([i,null,[null,null]]);return h},s.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n=[],r=0,o=0;for(i=0;i<t.length;i++){r+=t[i][2][0],o+=t[i][2][1],i-e>=0&&(r-=t[i-e][2][0],o-=t[i-e][2][1]);var s=t[i][0],l=o?r/o:0;n[i]=[s,100*l]}return n},a.default=s,e.exports=a.default},{"./datahandler":6,"./default":8}],8:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./datahandler"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i=[],n=a.get("logscale"),r=0;r<t.length;r++){var o=t[r][0],s=t[r][e];n&&s<=0&&(s=null),i.push([o,s])}return i},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l=[];if(1==e)return t;for(i=0;i<t.length;i++){for(o=0,s=0,n=Math.max(0,i-e+1);n<i+1;n++)null===(r=t[n][1])||isNaN(r)||(s++,o+=t[n][1]);l[i]=s?[t[i][0],o/s]:[t[i][0],null]}return l},r.prototype.getExtremeYValues=function(t,e,a){for(var i,n=null,r=null,o=t.length-1,s=0;s<=o;s++)null===(i=t[s][1])||isNaN(i)||((null===r||i>r)&&(r=i),(null===n||i<n)&&(n=i));return[n,r]},a.default=r,e.exports=a.default},{"./datahandler":6}],9:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./dygraph-utils"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=t("./dygraph"),o=function(t){return t&&t.__esModule?t:{default:t}}(r),s=function(t,e,a,i){if(this.dygraph_=t,this.layout=i,this.element=e,this.elementContext=a,this.height=t.height_,this.width=t.width_,!n.isCanvasSupported(this.element))throw"Canvas is not supported.";this.area=i.getPlotArea();var r=this.dygraph_.canvas_ctx_;r.beginPath(),r.rect(this.area.x,this.area.y,this.area.w,this.area.h),r.clip(),r=this.dygraph_.hidden_ctx_,r.beginPath(),r.rect(this.area.x,this.area.y,this.area.w,this.area.h),r.clip()};s.prototype.clear=function(){this.elementContext.clearRect(0,0,this.width,this.height)},s.prototype.render=function(){this._updatePoints(),this._renderLineChart()},s._getIteratorPredicate=function(t){return t?s._predicateThatSkipsEmptyPoints:null},s._predicateThatSkipsEmptyPoints=function(t,e){return null!==t[e].yval},s._drawStyledLine=function(t,e,a,i,r,o,l){var h=t.dygraph,u=h.getBooleanOption("stepPlot",t.setName);n.isArrayLike(i)||(i=null);var d=h.getBooleanOption("drawGapEdgePoints",t.setName),c=t.points,p=t.setName,g=n.createIterator(c,0,c.length,s._getIteratorPredicate(h.getBooleanOption("connectSeparatedPoints",p))),f=i&&i.length>=2,v=t.drawingContext;v.save(),f&&v.setLineDash&&v.setLineDash(i);var _=s._drawSeries(t,g,a,l,r,d,u,e);s._drawPointsOnLine(t,_,o,e,l),f&&v.setLineDash&&v.setLineDash([]),v.restore()},s._drawSeries=function(t,e,a,i,n,r,o,s){var l,h,u=null,d=null,c=null,p=[],g=!0,f=t.drawingContext;f.beginPath(),f.strokeStyle=s,f.lineWidth=a;for(var v=e.array_,_=e.end_,y=e.predicate_,x=e.start_;x<_;x++){if(h=v[x],y){for(;x<_&&!y(v,x);)x++;if(x==_)break;h=v[x]}if(null===h.canvasy||h.canvasy!=h.canvasy)o&&null!==u&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),u=d=null;else{if(l=!1,r||null===u){e.nextIdx_=x,e.next(),c=e.hasNext?e.peek.canvasy:null;var m=null===c||c!=c;l=null===u&&m,r&&(!g&&null===u||e.hasNext&&m)&&(l=!0)}null!==u?a&&(o&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),f.lineTo(h.canvasx,h.canvasy)):f.moveTo(h.canvasx,h.canvasy),(n||l)&&p.push([h.canvasx,h.canvasy,h.idx]),u=h.canvasx,d=h.canvasy}g=!1}return f.stroke(),p},s._drawPointsOnLine=function(t,e,a,i,n){for(var r=t.drawingContext,o=0;o<e.length;o++){var s=e[o];r.save(),a.call(t.dygraph,t.dygraph,t.setName,r,s[0],s[1],i,n,s[2]),r.restore()}},s.prototype._updatePoints=function(){for(var t=this.layout.points,e=t.length;e--;)for(var a=t[e],i=a.length;i--;){var n=a[i];n.canvasx=this.area.w*n.x+this.area.x,n.canvasy=this.area.h*n.y+this.area.y}},s.prototype._renderLineChart=function(t,e){var a,i,r=e||this.elementContext,o=this.layout.points,s=this.layout.setNames;this.colors=this.dygraph_.colorsMap_;var l=this.dygraph_.getOption("plotter"),h=l;n.isArrayLike(h)||(h=[h]);var u={};for(a=0;a<s.length;a++){i=s[a];var d=this.dygraph_.getOption("plotter",i);d!=l&&(u[i]=d)}for(a=0;a<h.length;a++)for(var c=h[a],p=a==h.length-1,g=0;g<o.length;g++)if(i=s[g],!t||i==t){var f=o[g],v=c;if(i in u){if(!p)continue;v=u[i]}var _=this.colors[i],y=this.dygraph_.getOption("strokeWidth",i);r.save(),r.strokeStyle=_,r.lineWidth=y,v({points:f,setName:i,drawingContext:r,color:_,strokeWidth:y,dygraph:this.dygraph_,axis:this.dygraph_.axisPropertiesForSeries(i),plotArea:this.area,seriesIndex:g,seriesCount:o.length,singleSeriesName:t,allSeriesPoints:o}),r.restore()}},s._Plotters={linePlotter:function(t){s._linePlotter(t)},fillPlotter:function(t){s._fillPlotter(t)},errorPlotter:function(t){s._errorPlotter(t)}},s._linePlotter=function(t){var e=t.dygraph,a=t.setName,i=t.strokeWidth,r=e.getNumericOption("strokeBorderWidth",a),o=e.getOption("drawPointCallback",a)||n.Circles.DEFAULT,l=e.getOption("strokePattern",a),h=e.getBooleanOption("drawPoints",a),u=e.getNumericOption("pointSize",a);r&&i&&s._drawStyledLine(t,e.getOption("strokeBorderColor",a),i+2*r,l,h,o,u),s._drawStyledLine(t,t.color,i,l,h,o,u)},s._errorPlotter=function(t){var e=t.dygraph,a=t.setName;if(e.getBooleanOption("errorBars")||e.getBooleanOption("customBars")){e.getBooleanOption("fillGraph",a)&&console.warn("Can't use fillGraph option with error bars");var i,r=t.drawingContext,o=t.color,l=e.getNumericOption("fillAlpha",a),h=e.getBooleanOption("stepPlot",a),u=t.points,d=n.createIterator(u,0,u.length,s._getIteratorPredicate(e.getBooleanOption("connectSeparatedPoints",a))),c=NaN,p=NaN,g=[-1,-1],f=n.toRGB_(o),v="rgba("+f.r+","+f.g+","+f.b+","+l+")";r.fillStyle=v,r.beginPath();for(var _=function(t){return null===t||void 0===t||isNaN(t)};d.hasNext;){var y=d.next();!h&&_(y.y)||h&&!isNaN(p)&&_(p)?c=NaN:(i=[y.y_bottom,y.y_top],h&&(p=y.y),isNaN(i[0])&&(i[0]=y.y),isNaN(i[1])&&(i[1]=y.y),i[0]=t.plotArea.h*i[0]+t.plotArea.y,i[1]=t.plotArea.h*i[1]+t.plotArea.y,isNaN(c)||(h?(r.moveTo(c,g[0]),r.lineTo(y.canvasx,g[0]),r.lineTo(y.canvasx,g[1])):(r.moveTo(c,g[0]),r.lineTo(y.canvasx,i[0]),r.lineTo(y.canvasx,i[1])),r.lineTo(c,g[1]),r.closePath()),g=i,c=y.canvasx)}r.fill()}},s._fastCanvasProxy=function(t){var e=[],a=null,i=null,n=0,r=function(t){if(!(e.length<=1)){for(var a=e.length-1;a>0;a--){var i=e[a];if(2==i[0]){var n=e[a-1];n[1]==i[1]&&n[2]==i[2]&&e.splice(a,1)}}for(var a=0;a<e.length-1;){var i=e[a];2==i[0]&&2==e[a+1][0]?e.splice(a,1):a++}if(e.length>2&&!t){var r=0;2==e[0][0]&&r++;for(var o=null,s=null,a=r;a<e.length;a++){var i=e[a];if(1==i[0])if(null===o&&null===s)o=a,s=a;else{var l=i[2];l<e[o][2]?o=a:l>e[s][2]&&(s=a)}}var h=e[o],u=e[s];e.splice(r,e.length-r),o<s?(e.push(h),e.push(u)):o>s?(e.push(u),e.push(h)):e.push(h)}}},o=function(a){r(a);for(var o=0,s=e.length;o<s;o++){var l=e[o];1==l[0]?t.lineTo(l[1],l[2]):2==l[0]&&t.moveTo(l[1],l[2])}e.length&&(i=e[e.length-1][1]),n+=e.length,e=[]},s=function(t,n,r){var s=Math.round(n);if(null===a||s!=a){var l=a-i>1,h=s-a>1;o(l||h),a=s}e.push([t,n,r])};return{moveTo:function(t,e){s(2,t,e)},lineTo:function(t,e){s(1,t,e)},stroke:function(){o(!0),t.stroke()},fill:function(){o(!0),t.fill()},beginPath:function(){o(!0),t.beginPath()},closePath:function(){o(!0),t.closePath()},_count:function(){return n}}},s._fillPlotter=function(t){if(!t.singleSeriesName&&0===t.seriesIndex){for(var e=t.dygraph,a=e.getLabels().slice(1),i=a.length;i>=0;i--)e.visibility()[i]||a.splice(i,1);if(function(){for(var t=0;t<a.length;t++)if(e.getBooleanOption("fillGraph",a[t]))return!0;return!1}())for(var r,l,h=t.plotArea,u=t.allSeriesPoints,d=u.length,c=e.getBooleanOption("stackedGraph"),p=e.getColors(),g={},f=function(t,e,a,i){if(t.lineTo(e,a),c)for(var n=i.length-1;n>=0;n--){var r=i[n];t.lineTo(r[0],r[1])}},v=d-1;v>=0;v--){var _=t.drawingContext,y=a[v];if(e.getBooleanOption("fillGraph",y)){var x=e.getNumericOption("fillAlpha",y),m=e.getBooleanOption("stepPlot",y),b=p[v],w=e.axisPropertiesForSeries(y),A=1+w.minyval*w.yscale;A<0?A=0:A>1&&(A=1),A=h.h*A+h.y;var O,D=u[v],E=n.createIterator(D,0,D.length,s._getIteratorPredicate(e.getBooleanOption("connectSeparatedPoints",y))),L=NaN,T=[-1,-1],S=n.toRGB_(b),P="rgba("+S.r+","+S.g+","+S.b+","+x+")";_.fillStyle=P,_.beginPath();var C,M=!0;(D.length>2*e.width_||o.default.FORCE_FAST_PROXY)&&(_=s._fastCanvasProxy(_));for(var N,F=[];E.hasNext;)if(N=E.next(),n.isOK(N.y)||m){if(c){if(!M&&C==N.xval)continue;M=!1,C=N.xval,r=g[N.canvasx];var k;k=void 0===r?A:l?r[0]:r,O=[N.canvasy,k],m?-1===T[0]?g[N.canvasx]=[N.canvasy,A]:g[N.canvasx]=[N.canvasy,T[0]]:g[N.canvasx]=N.canvasy}else O=isNaN(N.canvasy)&&m?[h.y+h.h,A]:[N.canvasy,A];isNaN(L)?(_.moveTo(N.canvasx,O[1]),_.lineTo(N.canvasx,O[0])):(m?(_.lineTo(N.canvasx,T[0]),_.lineTo(N.canvasx,O[0])):_.lineTo(N.canvasx,O[0]),c&&(F.push([L,T[1]]),l&&r?F.push([N.canvasx,r[1]]):F.push([N.canvasx,O[1]]))),T=O,L=N.canvasx}else f(_,L,T[1],F),F=[],L=NaN,null===N.y_stacked||isNaN(N.y_stacked)||(g[N.canvasx]=h.h*N.y_stacked+h.y);l=m,O&&N&&(f(_,N.canvasx,O[1],F),F=[]),_.fill()}}}},a.default=s,e.exports=a.default},{"./dygraph":18,"./dygraph-utils":17}],10:[function(t,e,a){"use strict";function i(t){return t&&t.__esModule?t:{default:t}}function n(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}Object.defineProperty(a,"__esModule",{value:!0});var r=t("./dygraph-tickers"),o=n(r),s=t("./dygraph-interaction-model"),l=i(s),h=t("./dygraph-canvas"),u=i(h),d=t("./dygraph-utils"),c=n(d),p={highlightCircleSize:3,highlightSeriesOpts:null,highlightSeriesBackgroundAlpha:.5,highlightSeriesBackgroundColor:"rgb(255, 255, 255)",labelsSeparateLines:!1,labelsShowZeroValues:!0,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:!0,digitsAfterDecimal:2,maxNumberWidth:6,sigFigs:null,strokeWidth:1,strokeBorderWidth:0,strokeBorderColor:"white",axisTickSize:3,axisLabelFontSize:14,rightGap:5,showRoller:!1,xValueParser:void 0,delimiter:",",sigma:2,errorBars:!1,fractions:!1,wilsonInterval:!0,customBars:!1,fillGraph:!1,fillAlpha:.15,connectSeparatedPoints:!1,stackedGraph:!1,stackedGraphNaNFill:"all",hideOverlayOnMouseOut:!0,legend:"onmouseover",stepPlot:!1,xRangePad:0,yRangePad:null,drawAxesAtZero:!1,titleHeight:28,xLabelHeight:18,yLabelWidth:18,axisLineColor:"black",axisLineWidth:.3,gridLineWidth:.3,axisLabelWidth:50,gridLineColor:"rgb(128,128,128)",interactionModel:l.default.defaultModel,animatedZooms:!1,showRangeSelector:!1,rangeSelectorHeight:40,rangeSelectorPlotStrokeColor:"#808FAB",rangeSelectorPlotFillGradientColor:"white",rangeSelectorPlotFillColor:"#A7B1C4",rangeSelectorBackgroundStrokeColor:"gray",rangeSelectorBackgroundLineWidth:1,rangeSelectorPlotLineWidth:1.5,rangeSelectorForegroundStrokeColor:"black",rangeSelectorForegroundLineWidth:1,rangeSelectorAlpha:.6,showInRangeSelector:null,plotter:[u.default._fillPlotter,u.default._errorPlotter,u.default._linePlotter],plugins:[],axes:{x:{pixelsPerLabel:70,axisLabelWidth:60,axisLabelFormatter:c.dateAxisLabelFormatter,valueFormatter:c.dateValueFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.dateTicker},y:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.numericTicks},y2:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawAxis:!0,drawGrid:!1,independentTicks:!1,ticker:o.numericTicks}}};a.default=p,e.exports=a.default},{"./dygraph-canvas":9,"./dygraph-interaction-model":12,"./dygraph-tickers":16,"./dygraph-utils":17}],11:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./dygraph"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(t){this.container=t};r.prototype.draw=function(t,e){this.container.innerHTML="",void 0!==this.date_graph&&this.date_graph.destroy(),this.date_graph=new n.default(this.container,t,e)},r.prototype.setSelection=function(t){var e=!1;t.length&&(e=t[0].row),this.date_graph.setSelection(e)},r.prototype.getSelection=function(){var t=[],e=this.date_graph.getSelection();if(e<0)return t;for(var a=this.date_graph.layout_.points,i=0;i<a.length;++i)t.push({row:e,column:i+1});return t},a.default=r,e.exports=a.default},{"./dygraph":18}],12:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./dygraph-utils"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r={};r.maybeTreatMouseOpAsClick=function(t,e,a){a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=Math.abs(a.dragEndX-a.dragStartX),o=Math.abs(a.dragEndY-a.dragStartY);i<2&&o<2&&void 0!==e.lastx_&&-1!=e.lastx_&&r.treatMouseOpAsClick(e,t,a),a.regionWidth=i,a.regionHeight=o},r.startPan=function(t,e,a){var i,r;a.isPanning=!0;var o=e.xAxisRange();if(e.getOptionForAxis("logscale","x")?(a.initialLeftmostDate=n.log10(o[0]),a.dateRange=n.log10(o[1])-n.log10(o[0])):(a.initialLeftmostDate=o[0],a.dateRange=o[1]-o[0]),a.xUnitsPerPixel=a.dateRange/(e.plotter_.area.w-1),e.getNumericOption("panEdgeFraction")){var s=e.width_*e.getNumericOption("panEdgeFraction"),l=e.xAxisExtremes(),h=e.toDomXCoord(l[0])-s,u=e.toDomXCoord(l[1])+s,d=e.toDataXCoord(h),c=e.toDataXCoord(u);a.boundedDates=[d,c];var p=[],g=e.height_*e.getNumericOption("panEdgeFraction");for(i=0;i<e.axes_.length;i++){r=e.axes_[i];var f=r.extremeRange,v=e.toDomYCoord(f[0],i)+g,_=e.toDomYCoord(f[1],i)-g,y=e.toDataYCoord(v,i),x=e.toDataYCoord(_,i);p[i]=[y,x]}a.boundedValues=p}for(a.is2DPan=!1,a.axes=[],i=0;i<e.axes_.length;i++){r=e.axes_[i];var m={},b=e.yAxisRange(i);e.attributes_.getForAxis("logscale",i)?(m.initialTopValue=n.log10(b[1]),m.dragValueRange=n.log10(b[1])-n.log10(b[0])):(m.initialTopValue=b[1],m.dragValueRange=b[1]-b[0]),m.unitsPerPixel=m.dragValueRange/(e.plotter_.area.h-1),a.axes.push(m),r.valueRange&&(a.is2DPan=!0)}},r.movePan=function(t,e,a){a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=a.initialLeftmostDate-(a.dragEndX-a.dragStartX)*a.xUnitsPerPixel;a.boundedDates&&(i=Math.max(i,a.boundedDates[0]));var r=i+a.dateRange;if(a.boundedDates&&r>a.boundedDates[1]&&(i-=r-a.boundedDates[1],r=i+a.dateRange),e.getOptionForAxis("logscale","x")?e.dateWindow_=[Math.pow(n.LOG_SCALE,i),Math.pow(n.LOG_SCALE,r)]:e.dateWindow_=[i,r],a.is2DPan)for(var o=a.dragEndY-a.dragStartY,s=0;s<e.axes_.length;s++){var l=e.axes_[s],h=a.axes[s],u=o*h.unitsPerPixel,d=a.boundedValues?a.boundedValues[s]:null,c=h.initialTopValue+u;d&&(c=Math.min(c,d[1]));var p=c-h.dragValueRange;d&&p<d[0]&&(c-=p-d[0],p=c-h.dragValueRange),e.attributes_.getForAxis("logscale",s)?l.valueRange=[Math.pow(n.LOG_SCALE,p),Math.pow(n.LOG_SCALE,c)]:l.valueRange=[p,c]}e.drawGraph_(!1)},r.endPan=r.maybeTreatMouseOpAsClick,r.startZoom=function(t,e,a){a.isZooming=!0,a.zoomMoved=!1},r.moveZoom=function(t,e,a){a.zoomMoved=!0,a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=Math.abs(a.dragStartX-a.dragEndX),r=Math.abs(a.dragStartY-a.dragEndY);a.dragDirection=i<r/2?n.VERTICAL:n.HORIZONTAL,e.drawZoomRect_(a.dragDirection,a.dragStartX,a.dragEndX,a.dragStartY,a.dragEndY,a.prevDragDirection,a.prevEndX,a.prevEndY),a.prevEndX=a.dragEndX,a.prevEndY=a.dragEndY,a.prevDragDirection=a.dragDirection},r.treatMouseOpAsClick=function(t,e,a){for(var i=t.getFunctionOption("clickCallback"),n=t.getFunctionOption("pointClickCallback"),r=null,o=-1,s=Number.MAX_VALUE,l=0;l<t.selPoints_.length;l++){var h=t.selPoints_[l],u=Math.pow(h.canvasx-a.dragEndX,2)+Math.pow(h.canvasy-a.dragEndY,2);!isNaN(u)&&(-1==o||u<s)&&(s=u,o=l)}var d=t.getNumericOption("highlightCircleSize")+2;if(s<=d*d&&(r=t.selPoints_[o]),r){var c={cancelable:!0,point:r,canvasx:a.dragEndX,canvasy:a.dragEndY};if(t.cascadeEvents_("pointClick",c))return;n&&n.call(t,e,r)}var c={cancelable:!0,xval:t.lastx_,pts:t.selPoints_,canvasx:a.dragEndX,canvasy:a.dragEndY};t.cascadeEvents_("click",c)||i&&i.call(t,e,t.lastx_,t.selPoints_)},r.endZoom=function(t,e,a){e.clearZoomRect_(),a.isZooming=!1,r.maybeTreatMouseOpAsClick(t,e,a);var i=e.getArea();if(a.regionWidth>=10&&a.dragDirection==n.HORIZONTAL){var o=Math.min(a.dragStartX,a.dragEndX),s=Math.max(a.dragStartX,a.dragEndX);o=Math.max(o,i.x),s=Math.min(s,i.x+i.w),o<s&&e.doZoomX_(o,s),a.cancelNextDblclick=!0}else if(a.regionHeight>=10&&a.dragDirection==n.VERTICAL){var l=Math.min(a.dragStartY,a.dragEndY),h=Math.max(a.dragStartY,a.dragEndY);l=Math.max(l,i.y),h=Math.min(h,i.y+i.h),l<h&&e.doZoomY_(l,h),a.cancelNextDblclick=!0}a.dragStartX=null,a.dragStartY=null},r.startTouch=function(t,e,a){t.preventDefault(),t.touches.length>1&&(a.startTimeForDoubleTapMs=null);for(var i=[],n=0;n<t.touches.length;n++){var r=t.touches[n];i.push({pageX:r.pageX,pageY:r.pageY,dataX:e.toDataXCoord(r.pageX),dataY:e.toDataYCoord(r.pageY)})}if(a.initialTouches=i,1==i.length)a.initialPinchCenter=i[0],a.touchDirections={x:!0,y:!0};else if(i.length>=2){a.initialPinchCenter={pageX:.5*(i[0].pageX+i[1].pageX),pageY:.5*(i[0].pageY+i[1].pageY),dataX:.5*(i[0].dataX+i[1].dataX),dataY:.5*(i[0].dataY+i[1].dataY)};var o=180/Math.PI*Math.atan2(a.initialPinchCenter.pageY-i[0].pageY,i[0].pageX-a.initialPinchCenter.pageX);o=Math.abs(o),o>90&&(o=90-o),a.touchDirections={x:o<67.5,y:o>22.5}}a.initialRange={x:e.xAxisRange(),y:e.yAxisRange()}},r.moveTouch=function(t,e,a){a.startTimeForDoubleTapMs=null;var i,n=[];for(i=0;i<t.touches.length;i++){var r=t.touches[i];n.push({pageX:r.pageX,pageY:r.pageY})}var o,s=a.initialTouches,l=a.initialPinchCenter;o=1==n.length?n[0]:{pageX:.5*(n[0].pageX+n[1].pageX),pageY:.5*(n[0].pageY+n[1].pageY)};var h={pageX:o.pageX-l.pageX,pageY:o.pageY-l.pageY},u=a.initialRange.x[1]-a.initialRange.x[0],d=a.initialRange.y[0]-a.initialRange.y[1];h.dataX=h.pageX/e.plotter_.area.w*u,h.dataY=h.pageY/e.plotter_.area.h*d;var c,p;if(1==n.length)c=1,p=1;else if(n.length>=2){var g=s[1].pageX-l.pageX;c=(n[1].pageX-o.pageX)/g;var f=s[1].pageY-l.pageY;p=(n[1].pageY-o.pageY)/f}c=Math.min(8,Math.max(.125,c)),p=Math.min(8,Math.max(.125,p));var v=!1;if(a.touchDirections.x&&(e.dateWindow_=[l.dataX-h.dataX+(a.initialRange.x[0]-l.dataX)/c,l.dataX-h.dataX+(a.initialRange.x[1]-l.dataX)/c],v=!0),a.touchDirections.y)for(i=0;i<1;i++){var _=e.axes_[i],y=e.attributes_.getForAxis("logscale",i);y||(_.valueRange=[l.dataY-h.dataY+(a.initialRange.y[0]-l.dataY)/p,l.dataY-h.dataY+(a.initialRange.y[1]-l.dataY)/p],v=!0)}if(e.drawGraph_(!1),v&&n.length>1&&e.getFunctionOption("zoomCallback")){var x=e.xAxisRange();e.getFunctionOption("zoomCallback").call(e,x[0],x[1],e.yAxisRanges())}},r.endTouch=function(t,e,a){if(0!==t.touches.length)r.startTouch(t,e,a);else if(1==t.changedTouches.length){var i=(new Date).getTime(),n=t.changedTouches[0];a.startTimeForDoubleTapMs&&i-a.startTimeForDoubleTapMs<500&&a.doubleTapX&&Math.abs(a.doubleTapX-n.screenX)<50&&a.doubleTapY&&Math.abs(a.doubleTapY-n.screenY)<50?e.resetZoom():(a.startTimeForDoubleTapMs=i,a.doubleTapX=n.screenX,a.doubleTapY=n.screenY)}};var o=function(t,e,a){return t<e?e-t:t>a?t-a:0},s=function(t,e){var a=n.findPos(e.canvas_),i={left:a.x,right:a.x+e.canvas_.offsetWidth,top:a.y,bottom:a.y+e.canvas_.offsetHeight},r={x:n.pageX(t),y:n.pageY(t)},s=o(r.x,i.left,i.right),l=o(r.y,i.top,i.bottom);return Math.max(s,l)};r.defaultModel={mousedown:function(t,e,a){if(!t.button||2!=t.button){a.initializeMouseDown(t,e,a),t.altKey||t.shiftKey?r.startPan(t,e,a):r.startZoom(t,e,a);var i=function(t){if(a.isZooming){s(t,e)<100?r.moveZoom(t,e,a):null!==a.dragEndX&&(a.dragEndX=null,a.dragEndY=null,e.clearZoomRect_())}else a.isPanning&&r.movePan(t,e,a)},o=function t(o){a.isZooming?null!==a.dragEndX?r.endZoom(o,e,a):r.maybeTreatMouseOpAsClick(o,e,a):a.isPanning&&r.endPan(o,e,a),n.removeEvent(document,"mousemove",i),n.removeEvent(document,"mouseup",t),a.destroy()};e.addAndTrackEvent(document,"mousemove",i),e.addAndTrackEvent(document,"mouseup",o)}},willDestroyContextMyself:!0,touchstart:function(t,e,a){r.startTouch(t,e,a)},touchmove:function(t,e,a){r.moveTouch(t,e,a)},touchend:function(t,e,a){r.endTouch(t,e,a)},dblclick:function(t,e,a){if(a.cancelNextDblclick)return void(a.cancelNextDblclick=!1);var i={canvasx:a.dragEndX,canvasy:a.dragEndY,cancelable:!0};e.cascadeEvents_("dblclick",i)||t.altKey||t.shiftKey||e.resetZoom()}},r.nonInteractiveModel_={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a)},mouseup:r.maybeTreatMouseOpAsClick},r.dragIsPanInteractionModel={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a),r.startPan(t,e,a)},mousemove:function(t,e,a){a.isPanning&&r.movePan(t,e,a)},mouseup:function(t,e,a){a.isPanning&&r.endPan(t,e,a)}},a.default=r,e.exports=a.default},{"./dygraph-utils":17}],13:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./dygraph-utils"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t){this.dygraph_=t,this.points=[],this.setNames=[],this.annotations=[],this.yAxes_=null,this.xTicks_=null,this.yTicks_=null};r.prototype.addDataset=function(t,e){this.points.push(e),this.setNames.push(t)},r.prototype.getPlotArea=function(){return this.area_},r.prototype.computePlotArea=function(){var t={x:0,y:0};t.w=this.dygraph_.width_-t.x-this.dygraph_.getOption("rightGap"),t.h=this.dygraph_.height_;var e={chart_div:this.dygraph_.graphDiv,reserveSpaceLeft:function(e){var a={x:t.x,y:t.y,w:e,h:t.h};return t.x+=e,t.w-=e,a},reserveSpaceRight:function(e){var a={x:t.x+t.w-e,y:t.y,w:e,h:t.h};return t.w-=e,a},reserveSpaceTop:function(e){var a={x:t.x,y:t.y,w:t.w,h:e};return t.y+=e,t.h-=e,a},reserveSpaceBottom:function(e){var a={x:t.x,y:t.y+t.h-e,w:t.w,h:e};return t.h-=e,a},chartRect:function(){return{x:t.x,y:t.y,w:t.w,h:t.h}}};this.dygraph_.cascadeEvents_("layout",e),this.area_=t},r.prototype.setAnnotations=function(t){this.annotations=[];for(var e=this.dygraph_.getOption("xValueParser")||function(t){return t},a=0;a<t.length;a++){var i={};if(!t[a].xval&&void 0===t[a].x)return void console.error("Annotations must have an 'x' property");if(t[a].icon&&(!t[a].hasOwnProperty("width")||!t[a].hasOwnProperty("height")))return void console.error("Must set width and height when setting annotation.icon property");n.update(i,t[a]),i.xval||(i.xval=e(i.x)),this.annotations.push(i)}},r.prototype.setXTicks=function(t){this.xTicks_=t},r.prototype.setYAxes=function(t){this.yAxes_=t},r.prototype.evaluate=function(){this._xAxis={},this._evaluateLimits(),this._evaluateLineCharts(),this._evaluateLineTicks(),this._evaluateAnnotations()},r.prototype._evaluateLimits=function(){var t=this.dygraph_.xAxisRange();this._xAxis.minval=t[0],this._xAxis.maxval=t[1];var e=t[1]-t[0];this._xAxis.scale=0!==e?1/e:1,this.dygraph_.getOptionForAxis("logscale","x")&&(this._xAxis.xlogrange=n.log10(this._xAxis.maxval)-n.log10(this._xAxis.minval),this._xAxis.xlogscale=0!==this._xAxis.xlogrange?1/this._xAxis.xlogrange:1);for(var a=0;a<this.yAxes_.length;a++){var i=this.yAxes_[a];i.minyval=i.computedValueRange[0],i.maxyval=i.computedValueRange[1],i.yrange=i.maxyval-i.minyval,i.yscale=0!==i.yrange?1/i.yrange:1,this.dygraph_.getOption("logscale")&&(i.ylogrange=n.log10(i.maxyval)-n.log10(i.minyval),i.ylogscale=0!==i.ylogrange?1/i.ylogrange:1,isFinite(i.ylogrange)&&!isNaN(i.ylogrange)||console.error("axis "+a+" of graph at "+i.g+" can't be displayed in log scale for range ["+i.minyval+" - "+i.maxyval+"]"))}},r.calcXNormal_=function(t,e,a){return a?(n.log10(t)-n.log10(e.minval))*e.xlogscale:(t-e.minval)*e.scale},r.calcYNormal_=function(t,e,a){if(a){var i=1-(n.log10(e)-n.log10(t.minyval))*t.ylogscale;return isFinite(i)?i:NaN}return 1-(e-t.minyval)*t.yscale},r.prototype._evaluateLineCharts=function(){for(var t=this.dygraph_.getOption("stackedGraph"),e=this.dygraph_.getOptionForAxis("logscale","x"),a=0;a<this.points.length;a++){for(var i=this.points[a],n=this.setNames[a],o=this.dygraph_.getOption("connectSeparatedPoints",n),s=this.dygraph_.axisPropertiesForSeries(n),l=this.dygraph_.attributes_.getForSeries("logscale",n),h=0;h<i.length;h++){var u=i[h];u.x=r.calcXNormal_(u.xval,this._xAxis,e);var d=u.yval;t&&(u.y_stacked=r.calcYNormal_(s,u.yval_stacked,l),
+null===d||isNaN(d)||(d=u.yval_stacked)),null===d&&(d=NaN,o||(u.yval=NaN)),u.y=r.calcYNormal_(s,d,l)}this.dygraph_.dataHandler_.onLineEvaluated(i,s,l)}},r.prototype._evaluateLineTicks=function(){var t,e,a,i,n,r;for(this.xticks=[],t=0;t<this.xTicks_.length;t++)e=this.xTicks_[t],a=e.label,r=!("label_v"in e),n=r?e.v:e.label_v,(i=this.dygraph_.toPercentXCoord(n))>=0&&i<1&&this.xticks.push({pos:i,label:a,has_tick:r});for(this.yticks=[],t=0;t<this.yAxes_.length;t++)for(var o=this.yAxes_[t],s=0;s<o.ticks.length;s++)e=o.ticks[s],a=e.label,r=!("label_v"in e),n=r?e.v:e.label_v,(i=this.dygraph_.toPercentYCoord(n,t))>0&&i<=1&&this.yticks.push({axis:t,pos:i,label:a,has_tick:r})},r.prototype._evaluateAnnotations=function(){var t,e={};for(t=0;t<this.annotations.length;t++){var a=this.annotations[t];e[a.xval+","+a.series]=a}if(this.annotated_points=[],this.annotations&&this.annotations.length)for(var i=0;i<this.points.length;i++){var n=this.points[i];for(t=0;t<n.length;t++){var r=n[t],o=r.xval+","+r.name;o in e&&(r.annotation=e[o],this.annotated_points.push(r))}}},r.prototype.removeAllDatasets=function(){delete this.points,delete this.setNames,delete this.setPointsLengths,delete this.setPointsOffsets,this.points=[],this.setNames=[],this.setPointsLengths=[],this.setPointsOffsets=[]},a.default=r,e.exports=a.default},{"./dygraph-utils":17}],14:[function(t,e,a){(function(t){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=null;if(void 0!==t);a.default=i,e.exports=a.default}).call(this,t("_process"))},{_process:1}],15:[function(t,e,a){(function(i){"use strict";function n(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,"__esModule",{value:!0});var r=t("./dygraph-utils"),o=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(r),s=t("./dygraph-default-attrs"),l=n(s),h=t("./dygraph-options-reference"),u=(n(h),function(t){this.dygraph_=t,this.yAxes_=[],this.xAxis_={},this.series_={},this.global_=this.dygraph_.attrs_,this.user_=this.dygraph_.user_attrs_||{},this.labels_=[],this.highlightSeries_=this.get("highlightSeriesOpts")||{},this.reparseSeries()});if(u.AXIS_STRING_MAPPINGS_={y:0,Y:0,y1:0,Y1:0,y2:1,Y2:1},u.axisToIndex_=function(t){if("string"==typeof t){if(u.AXIS_STRING_MAPPINGS_.hasOwnProperty(t))return u.AXIS_STRING_MAPPINGS_[t];throw"Unknown axis : "+t}if("number"==typeof t){if(0===t||1===t)return t;throw"Dygraphs only supports two y-axes, indexed from 0-1."}if(t)throw"Unknown axis : "+t;return 0},u.prototype.reparseSeries=function(){var t=this.get("labels");if(t){this.labels_=t.slice(1),this.yAxes_=[{series:[],options:{}}],this.xAxis_={options:{}},this.series_={};for(var e=this.user_.series||{},a=0;a<this.labels_.length;a++){var i=this.labels_[a],n=e[i]||{},r=u.axisToIndex_(n.axis);this.series_[i]={idx:a,yAxis:r,options:n},this.yAxes_[r]?this.yAxes_[r].series.push(i):this.yAxes_[r]={series:[i],options:{}}}var s=this.user_.axes||{};o.update(this.yAxes_[0].options,s.y||{}),this.yAxes_.length>1&&o.update(this.yAxes_[1].options,s.y2||{}),o.update(this.xAxis_.options,s.x||{})}},u.prototype.get=function(t){var e=this.getGlobalUser_(t);return null!==e?e:this.getGlobalDefault_(t)},u.prototype.getGlobalUser_=function(t){return this.user_.hasOwnProperty(t)?this.user_[t]:null},u.prototype.getGlobalDefault_=function(t){return this.global_.hasOwnProperty(t)?this.global_[t]:l.default.hasOwnProperty(t)?l.default[t]:null},u.prototype.getForAxis=function(t,e){var a,i;if("number"==typeof e)a=e,i=0===a?"y":"y2";else{if("y1"==e&&(e="y"),"y"==e)a=0;else if("y2"==e)a=1;else{if("x"!=e)throw"Unknown axis "+e;a=-1}i=e}var n=-1==a?this.xAxis_:this.yAxes_[a];if(n){var r=n.options;if(r.hasOwnProperty(t))return r[t]}if("x"!==e||"logscale"!==t){var o=this.getGlobalUser_(t);if(null!==o)return o}var s=l.default.axes[i];return s.hasOwnProperty(t)?s[t]:this.getGlobalDefault_(t)},u.prototype.getForSeries=function(t,e){if(e===this.dygraph_.getHighlightSeries()&&this.highlightSeries_.hasOwnProperty(t))return this.highlightSeries_[t];if(!this.series_.hasOwnProperty(e))throw"Unknown series: "+e;var a=this.series_[e],i=a.options;return i.hasOwnProperty(t)?i[t]:this.getForAxis(t,a.yAxis)},u.prototype.numAxes=function(){return this.yAxes_.length},u.prototype.axisForSeries=function(t){return this.series_[t].yAxis},u.prototype.axisOptions=function(t){return this.yAxes_[t].options},u.prototype.seriesForAxis=function(t){return this.yAxes_[t].series},u.prototype.seriesNames=function(){return this.labels_},void 0!==i);a.default=u,e.exports=a.default}).call(this,t("_process"))},{"./dygraph-default-attrs":10,"./dygraph-options-reference":14,"./dygraph-utils":17,_process:1}],16:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("./dygraph-utils"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t,e,a,i,n,r){return o(t,e,a,function(t){return"logscale"!==t&&i(t)},n,r)};a.numericLinearTicks=r;var o=function(t,e,a,i,r,o){var s,l,h,u,c=i("pixelsPerLabel"),p=[];if(o)for(s=0;s<o.length;s++)p.push({v:o[s]});else{if(i("logscale")){u=Math.floor(a/c);var g=n.binarySearch(t,d,1),f=n.binarySearch(e,d,-1);-1==g&&(g=0),-1==f&&(f=d.length-1);var v=null;if(f-g>=u/4){for(var _=f;_>=g;_--){var y=d[_],x=Math.log(y/t)/Math.log(e/t)*a,m={v:y};null===v?v={tickValue:y,pixel_coord:x}:Math.abs(x-v.pixel_coord)>=c?v={tickValue:y,pixel_coord:x}:m.label="",p.push(m)}p.reverse()}}if(0===p.length){var b,w,A=i("labelsKMG2");A?(b=[1,2,4,8,16,32,64,128,256],w=16):(b=[1,2,5,10,20,50,100],w=10);var O,D,E,L=Math.ceil(a/c),T=Math.abs(e-t)/L,S=Math.floor(Math.log(T)/Math.log(w)),P=Math.pow(w,S);for(l=0;l<b.length&&(O=P*b[l],D=Math.floor(t/O)*O,E=Math.ceil(e/O)*O,u=Math.abs(E-D)/O,!(a/u>c));l++);for(D>E&&(O*=-1),s=0;s<=u;s++)h=D+s*O,p.push({v:h})}}var C=i("axisLabelFormatter");for(s=0;s<p.length;s++)void 0===p[s].label&&(p[s].label=C.call(r,p[s].v,0,i,r));return p};a.numericTicks=o;var s=function(t,e,a,i,n,r){var o=c(t,e,a,i);return o>=0?g(t,e,o,i,n):[]};a.dateTicker=s;var l={MILLISECONDLY:0,TWO_MILLISECONDLY:1,FIVE_MILLISECONDLY:2,TEN_MILLISECONDLY:3,FIFTY_MILLISECONDLY:4,HUNDRED_MILLISECONDLY:5,FIVE_HUNDRED_MILLISECONDLY:6,SECONDLY:7,TWO_SECONDLY:8,FIVE_SECONDLY:9,TEN_SECONDLY:10,THIRTY_SECONDLY:11,MINUTELY:12,TWO_MINUTELY:13,FIVE_MINUTELY:14,TEN_MINUTELY:15,THIRTY_MINUTELY:16,HOURLY:17,TWO_HOURLY:18,SIX_HOURLY:19,DAILY:20,TWO_DAILY:21,WEEKLY:22,MONTHLY:23,QUARTERLY:24,BIANNUAL:25,ANNUAL:26,DECADAL:27,CENTENNIAL:28,NUM_GRANULARITIES:29};a.Granularity=l;var h={DATEFIELD_Y:0,DATEFIELD_M:1,DATEFIELD_D:2,DATEFIELD_HH:3,DATEFIELD_MM:4,DATEFIELD_SS:5,DATEFIELD_MS:6,NUM_DATEFIELDS:7},u=[];u[l.MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:1,spacing:1},u[l.TWO_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:2,spacing:2},u[l.FIVE_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:5,spacing:5},u[l.TEN_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:10,spacing:10},u[l.FIFTY_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:50,spacing:50},u[l.HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:100,spacing:100},u[l.FIVE_HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:500,spacing:500},u[l.SECONDLY]={datefield:h.DATEFIELD_SS,step:1,spacing:1e3},u[l.TWO_SECONDLY]={datefield:h.DATEFIELD_SS,step:2,spacing:2e3},u[l.FIVE_SECONDLY]={datefield:h.DATEFIELD_SS,step:5,spacing:5e3},u[l.TEN_SECONDLY]={datefield:h.DATEFIELD_SS,step:10,spacing:1e4},u[l.THIRTY_SECONDLY]={datefield:h.DATEFIELD_SS,step:30,spacing:3e4},u[l.MINUTELY]={datefield:h.DATEFIELD_MM,step:1,spacing:6e4},u[l.TWO_MINUTELY]={datefield:h.DATEFIELD_MM,step:2,spacing:12e4},u[l.FIVE_MINUTELY]={datefield:h.DATEFIELD_MM,step:5,spacing:3e5},u[l.TEN_MINUTELY]={datefield:h.DATEFIELD_MM,step:10,spacing:6e5},u[l.THIRTY_MINUTELY]={datefield:h.DATEFIELD_MM,step:30,spacing:18e5},u[l.HOURLY]={datefield:h.DATEFIELD_HH,step:1,spacing:36e5},u[l.TWO_HOURLY]={datefield:h.DATEFIELD_HH,step:2,spacing:72e5},u[l.SIX_HOURLY]={datefield:h.DATEFIELD_HH,step:6,spacing:216e5},u[l.DAILY]={datefield:h.DATEFIELD_D,step:1,spacing:864e5},u[l.TWO_DAILY]={datefield:h.DATEFIELD_D,step:2,spacing:1728e5},u[l.WEEKLY]={datefield:h.DATEFIELD_D,step:7,spacing:6048e5},u[l.MONTHLY]={datefield:h.DATEFIELD_M,step:1,spacing:2629817280},u[l.QUARTERLY]={datefield:h.DATEFIELD_M,step:3,spacing:216e5*365.2524},u[l.BIANNUAL]={datefield:h.DATEFIELD_M,step:6,spacing:432e5*365.2524},u[l.ANNUAL]={datefield:h.DATEFIELD_Y,step:1,spacing:864e5*365.2524},u[l.DECADAL]={datefield:h.DATEFIELD_Y,step:10,spacing:315578073600},u[l.CENTENNIAL]={datefield:h.DATEFIELD_Y,step:100,spacing:3155780736e3};var d=function(){for(var t=[],e=-39;e<=39;e++)for(var a=Math.pow(10,e),i=1;i<=9;i++){var n=a*i;t.push(n)}return t}(),c=function(t,e,a,i){for(var n=i("pixelsPerLabel"),r=0;r<l.NUM_GRANULARITIES;r++){if(a/p(t,e,r)>=n)return r}return-1},p=function(t,e,a){var i=u[a].spacing;return Math.round(1*(e-t)/i)},g=function(t,e,a,i,r){var o=i("axisLabelFormatter"),s=i("labelsUTC"),d=s?n.DateAccessorsUTC:n.DateAccessorsLocal,c=u[a].datefield,p=u[a].step,g=u[a].spacing,f=new Date(t),v=[];v[h.DATEFIELD_Y]=d.getFullYear(f),v[h.DATEFIELD_M]=d.getMonth(f),v[h.DATEFIELD_D]=d.getDate(f),v[h.DATEFIELD_HH]=d.getHours(f),v[h.DATEFIELD_MM]=d.getMinutes(f),v[h.DATEFIELD_SS]=d.getSeconds(f),v[h.DATEFIELD_MS]=d.getMilliseconds(f);var _=v[c]%p;a==l.WEEKLY&&(_=d.getDay(f)),v[c]-=_;for(var y=c+1;y<h.NUM_DATEFIELDS;y++)v[y]=y===h.DATEFIELD_D?1:0;var x=[],m=d.makeDate.apply(null,v),b=m.getTime();if(a<=l.HOURLY)for(b<t&&(b+=g,m=new Date(b));b<=e;)x.push({v:b,label:o.call(r,m,a,i,r)}),b+=g,m=new Date(b);else for(b<t&&(v[c]+=p,m=d.makeDate.apply(null,v),b=m.getTime());b<=e;)(a>=l.DAILY||d.getHours(m)%p==0)&&x.push({v:b,label:o.call(r,m,a,i,r)}),v[c]+=p,m=d.makeDate.apply(null,v),b=m.getTime();return x};a.getDateAxis=g},{"./dygraph-utils":17}],17:[function(t,e,a){"use strict";function i(t,e,a){t.removeEventListener(e,a,!1)}function n(t){return t=t||window.event,t.stopPropagation&&t.stopPropagation(),t.preventDefault&&t.preventDefault(),t.cancelBubble=!0,t.cancel=!0,t.returnValue=!1,!1}function r(t,e,a){var i,n,r;if(0===e)i=a,n=a,r=a;else{var o=Math.floor(6*t),s=6*t-o,l=a*(1-e),h=a*(1-e*s),u=a*(1-e*(1-s));switch(o){case 1:i=h,n=a,r=l;break;case 2:i=l,n=a,r=u;break;case 3:i=l,n=h,r=a;break;case 4:i=u,n=l,r=a;break;case 5:i=a,n=l,r=h;break;case 6:case 0:i=a,n=u,r=l}}return i=Math.floor(255*i+.5),n=Math.floor(255*n+.5),r=Math.floor(255*r+.5),"rgb("+i+","+n+","+r+")"}function o(t){var e=t.getBoundingClientRect(),a=window,i=document.documentElement;return{x:e.left+(a.pageXOffset||i.scrollLeft),y:e.top+(a.pageYOffset||i.scrollTop)}}function s(t){return!t.pageX||t.pageX<0?0:t.pageX}function l(t){return!t.pageY||t.pageY<0?0:t.pageY}function h(t,e){return s(t)-e.px}function u(t,e){return l(t)-e.py}function d(t){return!!t&&!isNaN(t)}function c(t,e){return!!t&&(null!==t.yval&&(null!==t.x&&void 0!==t.x&&(null!==t.y&&void 0!==t.y&&!(isNaN(t.x)||!e&&isNaN(t.y)))))}function p(t,e){var a=Math.min(Math.max(1,e||2),21);return Math.abs(t)<.001&&0!==t?t.toExponential(a-1):t.toPrecision(a)}function g(t){return t<10?"0"+t:""+t}function f(t,e,a,i){var n=g(t)+":"+g(e);if(a&&(n+=":"+g(a),i)){var r=""+i;n+="."+("000"+r).substring(r.length)}return n}function v(t,e){var a=e?tt:$,i=new Date(t),n=a.getFullYear(i),r=a.getMonth(i),o=a.getDate(i),s=a.getHours(i),l=a.getMinutes(i),h=a.getSeconds(i),u=a.getMilliseconds(i),d=""+n,c=g(r+1),p=g(o),v=3600*s+60*l+h+.001*u,_=d+"/"+c+"/"+p;return v&&(_+=" "+f(s,l,h,u)),_}function _(t,e){var a=Math.pow(10,e);return Math.round(t*a)/a}function y(t,e,a,i,n){for(var r=!0;r;){var o=t,s=e,l=a,h=i,u=n;if(r=!1,null!==h&&void 0!==h&&null!==u&&void 0!==u||(h=0,u=s.length-1),h>u)return-1;null!==l&&void 0!==l||(l=0);var d,c=function(t){return t>=0&&t<s.length},p=parseInt((h+u)/2,10),g=s[p];if(g==o)return p;if(g>o){if(l>0&&(d=p-1,c(d)&&s[d]<o))return p;t=o,e=s,a=l,i=h,n=p-1,r=!0,c=p=g=d=void 0}else{if(!(g<o))return-1;if(l<0&&(d=p+1,c(d)&&s[d]>o))return p;t=o,e=s,a=l,i=p+1,n=u,r=!0,c=p=g=d=void 0}}}function x(t){var e,a;if((-1==t.search("-")||-1!=t.search("T")||-1!=t.search("Z"))&&(a=m(t))&&!isNaN(a))return a;if(-1!=t.search("-")){for(e=t.replace("-","/","g");-1!=e.search("-");)e=e.replace("-","/");a=m(e)}else 8==t.length?(e=t.substr(0,4)+"/"+t.substr(4,2)+"/"+t.substr(6,2),a=m(e)):a=m(t);return a&&!isNaN(a)||console.error("Couldn't parse "+t+" as a date"),a}function m(t){return new Date(t).getTime()}function b(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(t[a]=e[a]);return t}function w(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(null===e[a]?t[a]=null:A(e[a])?t[a]=e[a].slice():!function(t){return"object"==typeof Node?t instanceof Node:"object"==typeof t&&"number"==typeof t.nodeType&&"string"==typeof t.nodeName}(e[a])&&"object"==typeof e[a]?("object"==typeof t[a]&&null!==t[a]||(t[a]={}),w(t[a],e[a])):t[a]=e[a]);return t}function A(t){var e=typeof t;return("object"==e||"function"==e&&"function"==typeof t.item)&&null!==t&&"number"==typeof t.length&&3!==t.nodeType}function O(t){return"object"==typeof t&&null!==t&&"function"==typeof t.getTime}function D(t){for(var e=[],a=0;a<t.length;a++)A(t[a])?e.push(D(t[a])):e.push(t[a]);return e}function E(){return document.createElement("canvas")}function L(t){try{var e=window.devicePixelRatio,a=t.webkitBackingStorePixelRatio||t.mozBackingStorePixelRatio||t.msBackingStorePixelRatio||t.oBackingStorePixelRatio||t.backingStorePixelRatio||1;return void 0!==e?e/a:1}catch(t){return 1}}function T(t,e,a,i){e=e||0,a=a||t.length,this.hasNext=!0,this.peek=null,this.start_=e,this.array_=t,this.predicate_=i,this.end_=Math.min(t.length,e+a),this.nextIdx_=e-1,this.next()}function S(t,e,a,i){return new T(t,e,a,i)}function P(t,e,a,i){var n,r=0,o=(new Date).getTime();if(t(r),1==e)return void i();var s=e-1;!function l(){r>=e||et.call(window,function(){var e=(new Date).getTime(),h=e-o;n=r,r=Math.floor(h/a);var u=r-n;r+u>s||r>=s?(t(s),i()):(0!==u&&t(r),l())})}()}function C(t,e){var a={};if(t)for(var i=1;i<t.length;i++)a[t[i]]=!0;var n=function(t){for(var e in t)if(t.hasOwnProperty(e)&&!at[e])return!0;return!1};for(var r in e)if(e.hasOwnProperty(r))if("highlightSeriesOpts"==r||a[r]&&!e.series){if(n(e[r]))return!0}else if("series"==r||"axes"==r){var o=e[r];for(var s in o)if(o.hasOwnProperty(s)&&n(o[s]))return!0}else if(!at[r])return!0;return!1}function M(t){for(var e=0;e<t.length;e++){var a=t.charAt(e);if("\r"===a)return e+1<t.length&&"\n"===t.charAt(e+1)?"\r\n":a;if("\n"===a)return e+1<t.length&&"\r"===t.charAt(e+1)?"\n\r":a}return null}function N(t,e){if(null===e||null===t)return!1;for(var a=t;a&&a!==e;)a=a.parentNode;return a===e}function F(t,e){return e<0?1/Math.pow(t,-e):Math.pow(t,e)}function k(t){var e=nt.exec(t);if(!e)return null;var a=parseInt(e[1],10),i=parseInt(e[2],10),n=parseInt(e[3],10);return e[4]?{r:a,g:i,b:n,a:parseFloat(e[4])}:{r:a,g:i,b:n}}function R(t){var e=k(t);if(e)return e;var a=document.createElement("div");a.style.backgroundColor=t,a.style.visibility="hidden",document.body.appendChild(a);var i=window.getComputedStyle(a,null).backgroundColor;return document.body.removeChild(a),k(i)}function I(t){try{(t||document.createElement("canvas")).getContext("2d")}catch(t){return!1}return!0}function H(t,e,a){var i=parseFloat(t);if(!isNaN(i))return i;if(/^ *$/.test(t))return null;if(/^ *nan *$/i.test(t))return NaN;var n="Unable to parse '"+t+"' as a number";return void 0!==a&&void 0!==e&&(n+=" on line "+(1+(e||0))+" ('"+a+"') of CSV."),console.error(n),null}function Y(t,e){var a=e("sigFigs");if(null!==a)return p(t,a);var i,n=e("digitsAfterDecimal"),r=e("maxNumberWidth"),o=e("labelsKMB"),s=e("labelsKMG2");if(i=0!==t&&(Math.abs(t)>=Math.pow(10,r)||Math.abs(t)<Math.pow(10,-n))?t.toExponential(n):""+_(t,n),o||s){var l,h=[],u=[];o&&(l=1e3,h=rt),s&&(o&&console.warn("Setting both labelsKMB and labelsKMG2. Pick one!"),l=1024,h=ot,u=st);for(var d=Math.abs(t),c=F(l,h.length),g=h.length-1;g>=0;g--,c/=l)if(d>=c){i=_(t/c,n)+h[g];break}if(s){var f=String(t.toExponential()).split("e-");2===f.length&&f[1]>=3&&f[1]<=24&&(i=f[1]%3>0?_(f[0]/F(10,f[1]%3),n):Number(f[0]).toFixed(2),i+=u[Math.floor(f[1]/3)-1])}}return i}function X(t,e,a){return Y.call(this,t,a)}function V(t,e,a){var i=a("labelsUTC"),n=i?tt:$,r=n.getFullYear(t),o=n.getMonth(t),s=n.getDate(t),l=n.getHours(t),h=n.getMinutes(t),u=n.getSeconds(t),d=n.getMilliseconds(t);if(e>=G.Granularity.DECADAL)return""+r;if(e>=G.Granularity.MONTHLY)return lt[o]+"&#160;"+r;if(0===3600*l+60*h+u+.001*d||e>=G.Granularity.DAILY)return g(s)+"&#160;"+lt[o];if(e<G.Granularity.SECONDLY){var c=""+d;return g(u)+"."+("000"+c).substring(c.length)}return e>G.Granularity.MINUTELY?f(l,h,u,0):f(l,h,u,d)}function Z(t,e){return v(t,e("labelsUTC"))}Object.defineProperty(a,"__esModule",{value:!0}),a.removeEvent=i,a.cancelEvent=n,a.hsvToRGB=r,a.findPos=o,a.pageX=s,a.pageY=l,a.dragGetX_=h,a.dragGetY_=u,a.isOK=d,a.isValidPoint=c,a.floatFormat=p,a.zeropad=g,a.hmsString_=f,a.dateString_=v,a.round_=_,a.binarySearch=y,a.dateParser=x,a.dateStrToMillis=m,a.update=b,a.updateDeep=w,a.isArrayLike=A,a.isDateLike=O,a.clone=D,a.createCanvas=E,a.getContextPixelRatio=L,a.Iterator=T,a.createIterator=S,a.repeatAndCleanup=P,a.isPixelChangingOptionList=C,a.detectLineDelimiter=M,a.isNodeContainedBy=N,a.pow=F,a.toRGB_=R,a.isCanvasSupported=I,a.parseFloat_=H,a.numberValueFormatter=Y,a.numberAxisLabelFormatter=X,a.dateAxisLabelFormatter=V,a.dateValueFormatter=Z;var B=t("./dygraph-tickers"),G=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(B);a.LOG_SCALE=10;var W=Math.log(10);a.LN_TEN=W;var U=function(t){return Math.log(t)/W};a.log10=U;var z=function(t,e,a){var i=U(t),n=U(e),r=i+a*(n-i);return Math.pow(10,r)};a.logRangeFraction=z;var j=[2,2];a.DOTTED_LINE=j;var K=[7,3];a.DASHED_LINE=K;var q=[7,2,2,2];a.DOT_DASH_LINE=q;a.HORIZONTAL=1;a.VERTICAL=2;var Q=function(t){return t.getContext("2d")};a.getContext=Q;var J=function(t,e,a){t.addEventListener(e,a,!1)};a.addEvent=J;var $={getFullYear:function(t){return t.getFullYear()},getMonth:function(t){return t.getMonth()},getDate:function(t){return t.getDate()},getHours:function(t){return t.getHours()},getMinutes:function(t){return t.getMinutes()},getSeconds:function(t){return t.getSeconds()},getMilliseconds:function(t){return t.getMilliseconds()},getDay:function(t){return t.getDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(t,e,a,i,n,r,o)}};a.DateAccessorsLocal=$;var tt={getFullYear:function(t){return t.getUTCFullYear()},getMonth:function(t){return t.getUTCMonth()},getDate:function(t){return t.getUTCDate()},getHours:function(t){return t.getUTCHours()},getMinutes:function(t){return t.getUTCMinutes()},getSeconds:function(t){return t.getUTCSeconds()},getMilliseconds:function(t){return t.getUTCMilliseconds()},getDay:function(t){return t.getUTCDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(Date.UTC(t,e,a,i,n,r,o))}};a.DateAccessorsUTC=tt,T.prototype.next=function(){if(!this.hasNext)return null;for(var t=this.peek,e=this.nextIdx_+1,a=!1;e<this.end_;){if(!this.predicate_||this.predicate_(this.array_,e)){this.peek=this.array_[e],a=!0;break}e++}return this.nextIdx_=e,a||(this.hasNext=!1,this.peek=null),t};var et=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){window.setTimeout(t,1e3/60)}}();a.requestAnimFrame=et;var at={annotationClickHandler:!0,annotationDblClickHandler:!0,annotationMouseOutHandler:!0,annotationMouseOverHandler:!0,axisLineColor:!0,axisLineWidth:!0,clickCallback:!0,drawCallback:!0,drawHighlightPointCallback:!0,drawPoints:!0,drawPointCallback:!0,drawGrid:!0,fillAlpha:!0,gridLineColor:!0,gridLineWidth:!0,hideOverlayOnMouseOut:!0,highlightCallback:!0,highlightCircleSize:!0,interactionModel:!0,labelsDiv:!0,labelsKMB:!0,labelsKMG2:!0,labelsSeparateLines:!0,labelsShowZeroValues:!0,legend:!0,panEdgeFraction:!0,pixelsPerYLabel:!0,pointClickCallback:!0,pointSize:!0,rangeSelectorPlotFillColor:!0,rangeSelectorPlotFillGradientColor:!0,rangeSelectorPlotStrokeColor:!0,rangeSelectorBackgroundStrokeColor:!0,rangeSelectorBackgroundLineWidth:!0,rangeSelectorPlotLineWidth:!0,rangeSelectorForegroundStrokeColor:!0,rangeSelectorForegroundLineWidth:!0,rangeSelectorAlpha:!0,showLabelsOnHighlight:!0,showRoller:!0,strokeWidth:!0,underlayCallback:!0,unhighlightCallback:!0,zoomCallback:!0},it={DEFAULT:function(t,e,a,i,n,r,o){a.beginPath(),a.fillStyle=r,a.arc(i,n,o,0,2*Math.PI,!1),a.fill()}};a.Circles=it;var nt=/^rgba?\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})(?:,\s*([01](?:\.\d+)?))?\)$/,rt=["K","M","B","T","Q"],ot=["k","M","G","T","P","E","Z","Y"],st=["m","u","n","p","f","a","z","y"],lt=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]},{"./dygraph-tickers":16}],18:[function(t,e,a){(function(i){"use strict";function n(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}function r(t){return t&&t.__esModule?t:{default:t}}function o(t){var e=t[0],a=e[0];if("number"!=typeof a&&!x.isDateLike(a))throw new Error("Expected number or date but got "+typeof a+": "+a+".");for(var i=1;i<e.length;i++){var n=e[i];if(null!==n&&void 0!==n&&("number"!=typeof n&&!x.isArrayLike(n)))throw new Error("Expected number or array but got "+typeof n+": "+n+".")}}Object.defineProperty(a,"__esModule",{value:!0});var s=function(){function t(t,e){var a=[],i=!0,n=!1,r=void 0;try{for(var o,s=t[Symbol.iterator]();!(i=(o=s.next()).done)&&(a.push(o.value),!e||a.length!==e);i=!0);}catch(t){n=!0,r=t}finally{try{!i&&s.return&&s.return()}finally{if(n)throw r}}return a}return function(e,a){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return t(e,a);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}(),l=t("./dygraph-layout"),h=r(l),u=t("./dygraph-canvas"),d=r(u),c=t("./dygraph-options"),p=r(c),g=t("./dygraph-interaction-model"),f=r(g),v=t("./dygraph-tickers"),_=n(v),y=t("./dygraph-utils"),x=n(y),m=t("./dygraph-default-attrs"),b=r(m),w=t("./dygraph-options-reference"),A=(r(w),t("./iframe-tarp")),O=r(A),D=t("./datahandler/default"),E=r(D),L=t("./datahandler/bars-error"),T=r(L),S=t("./datahandler/bars-custom"),P=r(S),C=t("./datahandler/default-fractions"),M=r(C),N=t("./datahandler/bars-fractions"),F=r(N),k=t("./datahandler/bars"),R=r(k),I=t("./plugins/annotations"),H=r(I),Y=t("./plugins/axes"),X=r(Y),V=t("./plugins/chart-labels"),Z=r(V),B=t("./plugins/grid"),G=r(B),W=t("./plugins/legend"),U=r(W),z=t("./plugins/range-selector"),j=r(z),K=t("./dygraph-gviz"),q=r(K),Q=function(t,e,a){this.__init__(t,e,a)};Q.NAME="Dygraph",Q.VERSION="2.1.0",Q.DEFAULT_ROLL_PERIOD=1,Q.DEFAULT_WIDTH=480,Q.DEFAULT_HEIGHT=320,Q.ANIMATION_STEPS=12,Q.ANIMATION_DURATION=200,Q.Plotters=d.default._Plotters,Q.addedAnnotationCSS=!1,Q.prototype.__init__=function(t,e,a){if(this.is_initial_draw_=!0,this.readyFns_=[],null!==a&&void 0!==a||(a={}),a=Q.copyUserAttrs_(a),"string"==typeof t&&(t=document.getElementById(t)),!t)throw new Error("Constructing dygraph with a non-existent div!");this.maindiv_=t,this.file_=e,this.rollPeriod_=a.rollPeriod||Q.DEFAULT_ROLL_PERIOD,this.previousVerticalX_=-1,this.fractions_=a.fractions||!1,this.dateWindow_=a.dateWindow||null,this.annotations_=[],t.innerHTML="",""===t.style.width&&a.width&&(t.style.width=a.width+"px"),""===t.style.height&&a.height&&(t.style.height=a.height+"px"),""===t.style.height&&0===t.clientHeight&&(t.style.height=Q.DEFAULT_HEIGHT+"px",""===t.style.width&&(t.style.width=Q.DEFAULT_WIDTH+"px")),this.width_=t.clientWidth||a.width||0,this.height_=t.clientHeight||a.height||0,a.stackedGraph&&(a.fillGraph=!0),this.user_attrs_={},x.update(this.user_attrs_,a),this.attrs_={},x.updateDeep(this.attrs_,b.default),this.boundaryIds_=[],this.setIndexByName_={},this.datasetIndex_=[],this.registeredEvents_=[],this.eventListeners_={},this.attributes_=new p.default(this),this.createInterface_(),this.plugins_=[];for(var i=Q.PLUGINS.concat(this.getOption("plugins")),n=0;n<i.length;n++){var r,o=i[n];r=void 0!==o.activate?o:new o;var s={plugin:r,events:{},options:{},pluginOptions:{}},l=r.activate(this);for(var h in l)l.hasOwnProperty(h)&&(s.events[h]=l[h]);this.plugins_.push(s)}for(var n=0;n<this.plugins_.length;n++){var u=this.plugins_[n];for(var h in u.events)if(u.events.hasOwnProperty(h)){var d=u.events[h],c=[u.plugin,d];h in this.eventListeners_?this.eventListeners_[h].push(c):this.eventListeners_[h]=[c]}}this.createDragInterface_(),this.start_()},Q.prototype.cascadeEvents_=function(t,e){if(!(t in this.eventListeners_))return!1;var a={dygraph:this,cancelable:!1,defaultPrevented:!1,preventDefault:function(){if(!a.cancelable)throw"Cannot call preventDefault on non-cancelable event.";a.defaultPrevented=!0},propagationStopped:!1,stopPropagation:function(){a.propagationStopped=!0}};x.update(a,e);var i=this.eventListeners_[t];if(i)for(var n=i.length-1;n>=0;n--){var r=i[n][0],o=i[n][1];if(o.call(r,a),a.propagationStopped)break}return a.defaultPrevented},Q.prototype.getPluginInstance_=function(t){for(var e=0;e<this.plugins_.length;e++){var a=this.plugins_[e];if(a.plugin instanceof t)return a.plugin}return null},Q.prototype.isZoomed=function(t){var e=!!this.dateWindow_;if("x"===t)return e;var a=this.axes_.map(function(t){return!!t.valueRange}).indexOf(!0)>=0;if(null===t||void 0===t)return e||a;if("y"===t)return a;throw new Error("axis parameter is ["+t+"] must be null, 'x' or 'y'.")},Q.prototype.toString=function(){var t=this.maindiv_;return"[Dygraph "+(t&&t.id?t.id:t)+"]"},Q.prototype.attr_=function(t,e){return e?this.attributes_.getForSeries(t,e):this.attributes_.get(t)},Q.prototype.getOption=function(t,e){return this.attr_(t,e)},Q.prototype.getNumericOption=function(t,e){return this.getOption(t,e)},Q.prototype.getStringOption=function(t,e){return this.getOption(t,e)},Q.prototype.getBooleanOption=function(t,e){return this.getOption(t,e)},Q.prototype.getFunctionOption=function(t,e){return this.getOption(t,e)},Q.prototype.getOptionForAxis=function(t,e){return this.attributes_.getForAxis(t,e)},Q.prototype.optionsViewForAxis_=function(t){var e=this;return function(a){var i=e.user_attrs_.axes;return i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:("x"!==t||"logscale"!==a)&&(void 0!==e.user_attrs_[a]?e.user_attrs_[a]:(i=e.attrs_.axes,i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:"y"==t&&e.axes_[0].hasOwnProperty(a)?e.axes_[0][a]:"y2"==t&&e.axes_[1].hasOwnProperty(a)?e.axes_[1][a]:e.attr_(a)))}},Q.prototype.rollPeriod=function(){return this.rollPeriod_},Q.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},Q.prototype.xAxisExtremes=function(){var t=this.getNumericOption("xRangePad")/this.plotter_.area.w;if(0===this.numRows())return[0-t,1+t];var e=this.rawData_[0][0],a=this.rawData_[this.rawData_.length-1][0];if(t){var i=a-e;e-=i*t,a+=i*t}return[e,a]},Q.prototype.yAxisExtremes=function(){var t=this.gatherDatasets_(this.rolledSeries_,null),e=t.extremes,a=this.axes_;this.computeYAxisRanges_(e);var i=this.axes_;return this.axes_=a,i.map(function(t){return t.extremeRange})},Q.prototype.yAxisRange=function(t){if(void 0===t&&(t=0),t<0||t>=this.axes_.length)return null;var e=this.axes_[t];return[e.computedValueRange[0],e.computedValueRange[1]]},Q.prototype.yAxisRanges=function(){for(var t=[],e=0;e<this.axes_.length;e++)t.push(this.yAxisRange(e));return t},Q.prototype.toDomCoords=function(t,e,a){return[this.toDomXCoord(t),this.toDomYCoord(e,a)]},Q.prototype.toDomXCoord=function(t){if(null===t)return null;var e=this.plotter_.area,a=this.xAxisRange();return e.x+(t-a[0])/(a[1]-a[0])*e.w},Q.prototype.toDomYCoord=function(t,e){var a=this.toPercentYCoord(t,e);if(null===a)return null;var i=this.plotter_.area;return i.y+a*i.h},Q.prototype.toDataCoords=function(t,e,a){return[this.toDataXCoord(t),this.toDataYCoord(e,a)]},Q.prototype.toDataXCoord=function(t){if(null===t)return null;var e=this.plotter_.area,a=this.xAxisRange();if(this.attributes_.getForAxis("logscale","x")){var i=(t-e.x)/e.w;return x.logRangeFraction(a[0],a[1],i)}return a[0]+(t-e.x)/e.w*(a[1]-a[0])},Q.prototype.toDataYCoord=function(t,e){if(null===t)return null;var a=this.plotter_.area,i=this.yAxisRange(e);if(void 0===e&&(e=0),this.attributes_.getForAxis("logscale",e)){var n=(t-a.y)/a.h;return x.logRangeFraction(i[1],i[0],n)}return i[0]+(a.y+a.h-t)/a.h*(i[1]-i[0])},Q.prototype.toPercentYCoord=function(t,e){if(null===t)return null;void 0===e&&(e=0);var a,i=this.yAxisRange(e);if(this.attributes_.getForAxis("logscale",e)){var n=x.log10(i[0]),r=x.log10(i[1]);a=(r-x.log10(t))/(r-n)}else a=(i[1]-t)/(i[1]-i[0]);return a},Q.prototype.toPercentXCoord=function(t){if(null===t)return null;var e,a=this.xAxisRange();if(!0===this.attributes_.getForAxis("logscale","x")){var i=x.log10(a[0]),n=x.log10(a[1]);e=(x.log10(t)-i)/(n-i)}else e=(t-a[0])/(a[1]-a[0]);return e},Q.prototype.numColumns=function(){return this.rawData_?this.rawData_[0]?this.rawData_[0].length:this.attr_("labels").length:0},Q.prototype.numRows=function(){return this.rawData_?this.rawData_.length:0},Q.prototype.getValue=function(t,e){return t<0||t>this.rawData_.length?null:e<0||e>this.rawData_[t].length?null:this.rawData_[t][e]},Q.prototype.createInterface_=function(){var t=this.maindiv_;this.graphDiv=document.createElement("div"),this.graphDiv.style.textAlign="left",this.graphDiv.style.position="relative",t.appendChild(this.graphDiv),this.canvas_=x.createCanvas(),this.canvas_.style.position="absolute",this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=x.getContext(this.canvas_),this.hidden_ctx_=x.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new h.default(this);var e=this;this.mouseMoveHandler_=function(t){e.mouseMove_(t)},this.mouseOutHandler_=function(t){var a=t.target||t.fromElement,i=t.relatedTarget||t.toElement;x.isNodeContainedBy(a,e.graphDiv)&&!x.isNodeContainedBy(i,e.graphDiv)&&e.mouseOut_(t)},this.addAndTrackEvent(window,"mouseout",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),this.resizeHandler_||(this.resizeHandler_=function(t){e.resize()},this.addAndTrackEvent(window,"resize",this.resizeHandler_))},Q.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+"px",this.graphDiv.style.height=this.height_+"px";var t=this.getNumericOption("pixelRatio"),e=t||x.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*e,this.canvas_.height=this.height_*e,this.canvas_.style.width=this.width_+"px",this.canvas_.style.height=this.height_+"px",1!==e&&this.canvas_ctx_.scale(e,e);var a=t||x.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*a,this.hidden_.height=this.height_*a,this.hidden_.style.width=this.width_+"px",this.hidden_.style.height=this.height_+"px",1!==a&&this.hidden_ctx_.scale(a,a)},Q.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var t=this.plugins_.length-1;t>=0;t--){var e=this.plugins_.pop();e.plugin.destroy&&e.plugin.destroy()}this.removeTrackedEvents_(),x.removeEvent(window,"mouseout",this.mouseOutHandler_),x.removeEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),x.removeEvent(window,"resize",this.resizeHandler_),this.resizeHandler_=null,function t(e){for(;e.hasChildNodes();)t(e.firstChild),e.removeChild(e.firstChild)}(this.maindiv_);var a=function(t){for(var e in t)"object"==typeof t[e]&&(t[e]=null)};a(this.layout_),a(this.plotter_),a(this)},Q.prototype.createPlotKitCanvas_=function(t){var e=x.createCanvas();return e.style.position="absolute",e.style.top=t.style.top,e.style.left=t.style.left,
+e.width=this.width_,e.height=this.height_,e.style.width=this.width_+"px",e.style.height=this.height_+"px",e},Q.prototype.createMouseEventElement_=function(){return this.canvas_},Q.prototype.setColors_=function(){var t=this.getLabels(),e=t.length-1;this.colors_=[],this.colorsMap_={};for(var a=this.getNumericOption("colorSaturation")||1,i=this.getNumericOption("colorValue")||.5,n=Math.ceil(e/2),r=this.getOption("colors"),o=this.visibility(),s=0;s<e;s++)if(o[s]){var l=t[s+1],h=this.attributes_.getForSeries("color",l);if(!h)if(r)h=r[s%r.length];else{var u=s%2?n+(s+1)/2:Math.ceil((s+1)/2),d=1*u/(1+e);h=x.hsvToRGB(d,a,i)}this.colors_.push(h),this.colorsMap_[l]=h}},Q.prototype.getColors=function(){return this.colors_},Q.prototype.getPropertiesForSeries=function(t){for(var e=-1,a=this.getLabels(),i=1;i<a.length;i++)if(a[i]==t){e=i;break}return-1==e?null:{name:t,column:e,visible:this.visibility()[e-1],color:this.colorsMap_[t],axis:1+this.attributes_.axisForSeries(t)}},Q.prototype.createRollInterface_=function(){var t=this,e=this.roller_;e||(this.roller_=e=document.createElement("input"),e.type="text",e.style.display="none",e.className="dygraph-roller",this.graphDiv.appendChild(e));var a=this.getBooleanOption("showRoller")?"block":"none",i=this.getArea(),n={top:i.y+i.h-25+"px",left:i.x+1+"px",display:a};e.size="2",e.value=this.rollPeriod_,x.update(e.style,n),e.onchange=function(){return t.adjustRoll(e.value)}},Q.prototype.createDragInterface_=function(){var t={isZooming:!1,isPanning:!1,is2DPan:!1,dragStartX:null,dragStartY:null,dragEndX:null,dragEndY:null,dragDirection:null,prevEndX:null,prevEndY:null,prevDragDirection:null,cancelNextDblclick:!1,initialLeftmostDate:null,xUnitsPerPixel:null,dateRange:null,px:0,py:0,boundedDates:null,boundedValues:null,tarp:new O.default,initializeMouseDown:function(t,e,a){t.preventDefault?t.preventDefault():(t.returnValue=!1,t.cancelBubble=!0);var i=x.findPos(e.canvas_);a.px=i.x,a.py=i.y,a.dragStartX=x.dragGetX_(t,a),a.dragStartY=x.dragGetY_(t,a),a.cancelNextDblclick=!1,a.tarp.cover()},destroy:function(){var t=this;if((t.isZooming||t.isPanning)&&(t.isZooming=!1,t.dragStartX=null,t.dragStartY=null),t.isPanning){t.isPanning=!1,t.draggingDate=null,t.dateRange=null;for(var e=0;e<a.axes_.length;e++)delete a.axes_[e].draggingValue,delete a.axes_[e].dragValueRange}t.tarp.uncover()}},e=this.getOption("interactionModel"),a=this;for(var i in e)e.hasOwnProperty(i)&&this.addAndTrackEvent(this.mouseEventElement_,i,function(e){return function(i){e(i,a,t)}}(e[i]));if(!e.willDestroyContextMyself){var n=function(e){t.destroy()};this.addAndTrackEvent(document,"mouseup",n)}},Q.prototype.drawZoomRect_=function(t,e,a,i,n,r,o,s){var l=this.canvas_ctx_;r==x.HORIZONTAL?l.clearRect(Math.min(e,o),this.layout_.getPlotArea().y,Math.abs(e-o),this.layout_.getPlotArea().h):r==x.VERTICAL&&l.clearRect(this.layout_.getPlotArea().x,Math.min(i,s),this.layout_.getPlotArea().w,Math.abs(i-s)),t==x.HORIZONTAL?a&&e&&(l.fillStyle="rgba(128,128,128,0.33)",l.fillRect(Math.min(e,a),this.layout_.getPlotArea().y,Math.abs(a-e),this.layout_.getPlotArea().h)):t==x.VERTICAL&&n&&i&&(l.fillStyle="rgba(128,128,128,0.33)",l.fillRect(this.layout_.getPlotArea().x,Math.min(i,n),this.layout_.getPlotArea().w,Math.abs(n-i)))},Q.prototype.clearZoomRect_=function(){this.currentZoomRectArgs_=null,this.canvas_ctx_.clearRect(0,0,this.width_,this.height_)},Q.prototype.doZoomX_=function(t,e){this.currentZoomRectArgs_=null;var a=this.toDataXCoord(t),i=this.toDataXCoord(e);this.doZoomXDates_(a,i)},Q.prototype.doZoomXDates_=function(t,e){var a=this,i=this.xAxisRange(),n=[t,e],r=this.getFunctionOption("zoomCallback");this.doAnimatedZoom(i,n,null,null,function(){r&&r.call(a,t,e,a.yAxisRanges())})},Q.prototype.doZoomY_=function(t,e){var a=this;this.currentZoomRectArgs_=null;for(var i=this.yAxisRanges(),n=[],r=0;r<this.axes_.length;r++){var o=this.toDataYCoord(t,r),l=this.toDataYCoord(e,r);n.push([l,o])}var h=this.getFunctionOption("zoomCallback");this.doAnimatedZoom(null,null,i,n,function(){if(h){var t=a.xAxisRange(),e=s(t,2),i=e[0],n=e[1];h.call(a,i,n,a.yAxisRanges())}})},Q.zoomAnimationFunction=function(t,e){return(1-Math.pow(1.5,-t))/(1-Math.pow(1.5,-e))},Q.prototype.resetZoom=function(){var t=this,e=this.isZoomed("x"),a=this.isZoomed("y"),i=e||a;if(this.clearSelection(),i){var n=this.xAxisExtremes(),r=s(n,2),o=r[0],l=r[1],h=this.getBooleanOption("animatedZooms"),u=this.getFunctionOption("zoomCallback");if(!h)return this.dateWindow_=null,this.axes_.forEach(function(t){t.valueRange&&delete t.valueRange}),this.drawGraph_(),void(u&&u.call(this,o,l,this.yAxisRanges()));var d=null,c=null,p=null,g=null;e&&(d=this.xAxisRange(),c=[o,l]),a&&(p=this.yAxisRanges(),g=this.yAxisExtremes()),this.doAnimatedZoom(d,c,p,g,function(){t.dateWindow_=null,t.axes_.forEach(function(t){t.valueRange&&delete t.valueRange}),u&&u.call(t,o,l,t.yAxisRanges())})}},Q.prototype.doAnimatedZoom=function(t,e,a,i,n){var r,o,s=this,l=this.getBooleanOption("animatedZooms")?Q.ANIMATION_STEPS:1,h=[],u=[];if(null!==t&&null!==e)for(r=1;r<=l;r++)o=Q.zoomAnimationFunction(r,l),h[r-1]=[t[0]*(1-o)+o*e[0],t[1]*(1-o)+o*e[1]];if(null!==a&&null!==i)for(r=1;r<=l;r++){o=Q.zoomAnimationFunction(r,l);for(var d=[],c=0;c<this.axes_.length;c++)d.push([a[c][0]*(1-o)+o*i[c][0],a[c][1]*(1-o)+o*i[c][1]]);u[r-1]=d}x.repeatAndCleanup(function(t){if(u.length)for(var e=0;e<s.axes_.length;e++){var a=u[t][e];s.axes_[e].valueRange=[a[0],a[1]]}h.length&&(s.dateWindow_=h[t]),s.drawGraph_()},l,Q.ANIMATION_DURATION/l,n)},Q.prototype.getArea=function(){return this.plotter_.area},Q.prototype.eventToDomCoords=function(t){if(t.offsetX&&t.offsetY)return[t.offsetX,t.offsetY];var e=x.findPos(this.mouseEventElement_);return[x.pageX(t)-e.x,x.pageY(t)-e.y]},Q.prototype.findClosestRow=function(t){for(var e=1/0,a=-1,i=this.layout_.points,n=0;n<i.length;n++)for(var r=i[n],o=r.length,s=0;s<o;s++){var l=r[s];if(x.isValidPoint(l,!0)){var h=Math.abs(l.canvasx-t);h<e&&(e=h,a=l.idx)}}return a},Q.prototype.findClosestPoint=function(t,e){for(var a,i,n,r,o,s,l,h=1/0,u=this.layout_.points.length-1;u>=0;--u)for(var d=this.layout_.points[u],c=0;c<d.length;++c)r=d[c],x.isValidPoint(r)&&(i=r.canvasx-t,n=r.canvasy-e,(a=i*i+n*n)<h&&(h=a,o=r,s=u,l=r.idx));return{row:l,seriesName:this.layout_.setNames[s],point:o}},Q.prototype.findStackedPoint=function(t,e){for(var a,i,n=this.findClosestRow(t),r=0;r<this.layout_.points.length;++r){var o=this.getLeftBoundary_(r),s=n-o,l=this.layout_.points[r];if(!(s>=l.length)){var h=l[s];if(x.isValidPoint(h)){var u=h.canvasy;if(t>h.canvasx&&s+1<l.length){var d=l[s+1];if(x.isValidPoint(d)){var c=d.canvasx-h.canvasx;if(c>0){var p=(t-h.canvasx)/c;u+=p*(d.canvasy-h.canvasy)}}}else if(t<h.canvasx&&s>0){var g=l[s-1];if(x.isValidPoint(g)){var c=h.canvasx-g.canvasx;if(c>0){var p=(h.canvasx-t)/c;u+=p*(g.canvasy-h.canvasy)}}}(0===r||u<e)&&(a=h,i=r)}}}return{row:n,seriesName:this.layout_.setNames[i],point:a}},Q.prototype.mouseMove_=function(t){var e=this.layout_.points;if(void 0!==e&&null!==e){var a=this.eventToDomCoords(t),i=a[0],n=a[1],r=this.getOption("highlightSeriesOpts"),o=!1;if(r&&!this.isSeriesLocked()){var s;s=this.getBooleanOption("stackedGraph")?this.findStackedPoint(i,n):this.findClosestPoint(i,n),o=this.setSelection(s.row,s.seriesName)}else{var l=this.findClosestRow(i);o=this.setSelection(l)}var h=this.getFunctionOption("highlightCallback");h&&o&&h.call(this,t,this.lastx_,this.selPoints_,this.lastRow_,this.highlightSet_)}},Q.prototype.getLeftBoundary_=function(t){if(this.boundaryIds_[t])return this.boundaryIds_[t][0];for(var e=0;e<this.boundaryIds_.length;e++)if(void 0!==this.boundaryIds_[e])return this.boundaryIds_[e][0];return 0},Q.prototype.animateSelection_=function(t){void 0===this.fadeLevel&&(this.fadeLevel=0),void 0===this.animateId&&(this.animateId=0);var e=this.fadeLevel,a=t<0?e:10-e;if(a<=0)return void(this.fadeLevel&&this.updateSelection_(1));var i=++this.animateId,n=this,r=function(){0!==n.fadeLevel&&t<0&&(n.fadeLevel=0,n.clearSelection())};x.repeatAndCleanup(function(e){n.animateId==i&&(n.fadeLevel+=t,0===n.fadeLevel?n.clearSelection():n.updateSelection_(n.fadeLevel/10))},a,30,r)},Q.prototype.updateSelection_=function(t){this.cascadeEvents_("select",{selectedRow:-1===this.lastRow_?void 0:this.lastRow_,selectedX:-1===this.lastx_?void 0:this.lastx_,selectedPoints:this.selPoints_});var e,a=this.canvas_ctx_;if(this.getOption("highlightSeriesOpts")){a.clearRect(0,0,this.width_,this.height_);var i=1-this.getNumericOption("highlightSeriesBackgroundAlpha"),n=x.toRGB_(this.getOption("highlightSeriesBackgroundColor"));if(i){if(void 0===t)return void this.animateSelection_(1);i*=t,a.fillStyle="rgba("+n.r+","+n.g+","+n.b+","+i+")",a.fillRect(0,0,this.width_,this.height_)}this.plotter_._renderLineChart(this.highlightSet_,a)}else if(this.previousVerticalX_>=0){var r=0,o=this.attr_("labels");for(e=1;e<o.length;e++){var s=this.getNumericOption("highlightCircleSize",o[e]);s>r&&(r=s)}var l=this.previousVerticalX_;a.clearRect(l-r-1,0,2*r+2,this.height_)}if(this.selPoints_.length>0){var h=this.selPoints_[0].canvasx;for(a.save(),e=0;e<this.selPoints_.length;e++){var u=this.selPoints_[e];if(!isNaN(u.canvasy)){var d=this.getNumericOption("highlightCircleSize",u.name),c=this.getFunctionOption("drawHighlightPointCallback",u.name),p=this.plotter_.colors[u.name];c||(c=x.Circles.DEFAULT),a.lineWidth=this.getNumericOption("strokeWidth",u.name),a.strokeStyle=p,a.fillStyle=p,c.call(this,this,u.name,a,h,u.canvasy,p,d,u.idx)}}a.restore(),this.previousVerticalX_=h}},Q.prototype.setSelection=function(t,e,a){this.selPoints_=[];var i=!1;if(!1!==t&&t>=0){t!=this.lastRow_&&(i=!0),this.lastRow_=t;for(var n=0;n<this.layout_.points.length;++n){var r=this.layout_.points[n],o=t-this.getLeftBoundary_(n);if(o>=0&&o<r.length&&r[o].idx==t){var s=r[o];null!==s.yval&&this.selPoints_.push(s)}else for(var l=0;l<r.length;++l){var s=r[l];if(s.idx==t){null!==s.yval&&this.selPoints_.push(s);break}}}}else this.lastRow_>=0&&(i=!0),this.lastRow_=-1;return this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=-1,void 0!==e&&(this.highlightSet_!==e&&(i=!0),this.highlightSet_=e),void 0!==a&&(this.lockedSet_=a),i&&this.updateSelection_(void 0),i},Q.prototype.mouseOut_=function(t){this.getFunctionOption("unhighlightCallback")&&this.getFunctionOption("unhighlightCallback").call(this,t),this.getBooleanOption("hideOverlayOnMouseOut")&&!this.lockedSet_&&this.clearSelection()},Q.prototype.clearSelection=function(){if(this.cascadeEvents_("deselect",{}),this.lockedSet_=!1,this.fadeLevel)return void this.animateSelection_(-1);this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=-1,this.lastRow_=-1,this.highlightSet_=null},Q.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var t=0;t<this.layout_.points.length;t++)for(var e=this.layout_.points[t],a=0;a<e.length;a++)if(e[a].x==this.selPoints_[0].x)return e[a].idx;return-1},Q.prototype.getHighlightSeries=function(){return this.highlightSet_},Q.prototype.isSeriesLocked=function(){return this.lockedSet_},Q.prototype.loadedEvent_=function(t){this.rawData_=this.parseCSV_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_()},Q.prototype.addXTicks_=function(){var t;t=this.dateWindow_?[this.dateWindow_[0],this.dateWindow_[1]]:this.xAxisExtremes();var e=this.optionsViewForAxis_("x"),a=e("ticker")(t[0],t[1],this.plotter_.area.w,e,this);this.layout_.setXTicks(a)},Q.prototype.getHandlerClass_=function(){return this.attr_("dataHandler")?this.attr_("dataHandler"):this.fractions_?this.getBooleanOption("errorBars")?F.default:M.default:this.getBooleanOption("customBars")?P.default:this.getBooleanOption("errorBars")?T.default:E.default},Q.prototype.predraw_=function(){var t=new Date;this.dataHandler_=new(this.getHandlerClass_()),this.layout_.computePlotArea(),this.computeYAxes_(),this.is_initial_draw_||(this.canvas_ctx_.restore(),this.hidden_ctx_.restore()),this.canvas_ctx_.save(),this.hidden_ctx_.save(),this.plotter_=new d.default(this,this.hidden_,this.hidden_ctx_,this.layout_),this.createRollInterface_(),this.cascadeEvents_("predraw"),this.rolledSeries_=[null];for(var e=1;e<this.numColumns();e++){var a=this.dataHandler_.extractSeries(this.rawData_,e,this.attributes_);this.rollPeriod_>1&&(a=this.dataHandler_.rollingAverage(a,this.rollPeriod_,this.attributes_)),this.rolledSeries_.push(a)}this.drawGraph_();var i=new Date;this.drawingTimeMs_=i-t},Q.PointType=void 0,Q.stackPoints_=function(t,e,a,i){for(var n=null,r=null,o=null,s=-1,l=0;l<t.length;++l){var h=t[l],u=h.xval;void 0===e[u]&&(e[u]=0);var d=h.yval;isNaN(d)||null===d?"none"==i?d=0:(!function(e){if(!(s>=e))for(var a=e;a<t.length;++a)if(o=null,!isNaN(t[a].yval)&&null!==t[a].yval){s=a,o=t[a];break}}(l),d=r&&o&&"none"!=i?r.yval+(o.yval-r.yval)*((u-r.xval)/(o.xval-r.xval)):r&&"all"==i?r.yval:o&&"all"==i?o.yval:0):r=h;var c=e[u];n!=u&&(c+=d,e[u]=c),n=u,h.yval_stacked=c,c>a[1]&&(a[1]=c),c<a[0]&&(a[0]=c)}},Q.prototype.gatherDatasets_=function(t,e){var a,i,n,r,o,s,l=[],h=[],u=[],d={},c=t.length-1;for(a=c;a>=1;a--)if(this.visibility()[a-1]){if(e){s=t[a];var p=e[0],g=e[1];for(n=null,r=null,i=0;i<s.length;i++)s[i][0]>=p&&null===n&&(n=i),s[i][0]<=g&&(r=i);null===n&&(n=0);for(var f=n,v=!0;v&&f>0;)f--,v=null===s[f][1];null===r&&(r=s.length-1);var _=r;for(v=!0;v&&_<s.length-1;)_++,v=null===s[_][1];f!==n&&(n=f),_!==r&&(r=_),l[a-1]=[n,r],s=s.slice(n,r+1)}else s=t[a],l[a-1]=[0,s.length-1];var y=this.attr_("labels")[a],x=this.dataHandler_.getExtremeYValues(s,e,this.getBooleanOption("stepPlot",y)),m=this.dataHandler_.seriesToPoints(s,y,l[a-1][0]);this.getBooleanOption("stackedGraph")&&(o=this.attributes_.axisForSeries(y),void 0===u[o]&&(u[o]=[]),Q.stackPoints_(m,u[o],x,this.getBooleanOption("stackedGraphNaNFill"))),d[y]=x,h[a]=m}return{points:h,extremes:d,boundaryIds:l}},Q.prototype.drawGraph_=function(){var t=new Date,e=this.is_initial_draw_;this.is_initial_draw_=!1,this.layout_.removeAllDatasets(),this.setColors_(),this.attrs_.pointSize=.5*this.getNumericOption("highlightCircleSize");var a=this.gatherDatasets_(this.rolledSeries_,this.dateWindow_),i=a.points,n=a.extremes;this.boundaryIds_=a.boundaryIds,this.setIndexByName_={};for(var r=this.attr_("labels"),o=0,s=1;s<i.length;s++)this.visibility()[s-1]&&(this.layout_.addDataset(r[s],i[s]),this.datasetIndex_[s]=o++);for(var s=0;s<r.length;s++)this.setIndexByName_[r[s]]=s;if(this.computeYAxisRanges_(n),this.layout_.setYAxes(this.axes_),this.addXTicks_(),this.layout_.evaluate(),this.renderGraph_(e),this.getStringOption("timingName")){var l=new Date;console.log(this.getStringOption("timingName")+" - drawGraph: "+(l-t)+"ms")}},Q.prototype.renderGraph_=function(t){this.cascadeEvents_("clearChart"),this.plotter_.clear();var e=this.getFunctionOption("underlayCallback");e&&e.call(this,this.hidden_ctx_,this.layout_.getPlotArea(),this,this);var a={canvas:this.hidden_,drawingContext:this.hidden_ctx_};this.cascadeEvents_("willDrawChart",a),this.plotter_.render(),this.cascadeEvents_("didDrawChart",a),this.lastRow_=-1,this.canvas_.getContext("2d").clearRect(0,0,this.width_,this.height_);var i=this.getFunctionOption("drawCallback");if(null!==i&&i.call(this,this,t),t)for(this.readyFired_=!0;this.readyFns_.length>0;){var n=this.readyFns_.pop();n(this)}},Q.prototype.computeYAxes_=function(){var t,e,a;for(this.axes_=[],t=0;t<this.attributes_.numAxes();t++)e={g:this},x.update(e,this.attributes_.axisOptions(t)),this.axes_[t]=e;for(t=0;t<this.axes_.length;t++)if(0===t)e=this.optionsViewForAxis_("y"+(t?"2":"")),(a=e("valueRange"))&&(this.axes_[t].valueRange=a);else{var i=this.user_attrs_.axes;i&&i.y2&&(a=i.y2.valueRange)&&(this.axes_[t].valueRange=a)}},Q.prototype.numAxes=function(){return this.attributes_.numAxes()},Q.prototype.axisPropertiesForSeries=function(t){return this.axes_[this.attributes_.axisForSeries(t)]},Q.prototype.computeYAxisRanges_=function(t){for(var e,a,i,n,r,o=function(t){return isNaN(parseFloat(t))},s=this.attributes_.numAxes(),l=0;l<s;l++){var h=this.axes_[l],u=this.attributes_.getForAxis("logscale",l),d=this.attributes_.getForAxis("includeZero",l),c=this.attributes_.getForAxis("independentTicks",l);i=this.attributes_.seriesForAxis(l),e=!0,n=.1;var p=this.getNumericOption("yRangePad");if(null!==p&&(e=!1,n=p/this.plotter_.area.h),0===i.length)h.extremeRange=[0,1];else{for(var g,f,v=1/0,_=-1/0,y=0;y<i.length;y++)t.hasOwnProperty(i[y])&&(g=t[i[y]][0],null!==g&&(v=Math.min(g,v)),null!==(f=t[i[y]][1])&&(_=Math.max(f,_)));d&&!u&&(v>0&&(v=0),_<0&&(_=0)),v==1/0&&(v=0),_==-1/0&&(_=1),a=_-v,0===a&&(0!==_?a=Math.abs(_):(_=1,a=1));var m=_,b=v;e&&(u?(m=_+n*a,b=v):(m=_+n*a,b=v-n*a,b<0&&v>=0&&(b=0),m>0&&_<=0&&(m=0))),h.extremeRange=[b,m]}if(h.valueRange){var w=o(h.valueRange[0])?h.extremeRange[0]:h.valueRange[0],A=o(h.valueRange[1])?h.extremeRange[1]:h.valueRange[1];h.computedValueRange=[w,A]}else h.computedValueRange=h.extremeRange;if(!e)if(w=h.computedValueRange[0],A=h.computedValueRange[1],w===A&&(w-=.5,A+=.5),u){var O=n/(2*n-1),D=(n-1)/(2*n-1);h.computedValueRange[0]=x.logRangeFraction(w,A,O),h.computedValueRange[1]=x.logRangeFraction(w,A,D)}else a=A-w,h.computedValueRange[0]=w-a*n,h.computedValueRange[1]=A+a*n;if(c){h.independentTicks=c;var E=this.optionsViewForAxis_("y"+(l?"2":"")),L=E("ticker");h.ticks=L(h.computedValueRange[0],h.computedValueRange[1],this.plotter_.area.h,E,this),r||(r=h)}}if(void 0===r)throw'Configuration Error: At least one axis has to have the "independentTicks" option activated.';for(var l=0;l<s;l++){var h=this.axes_[l];if(!h.independentTicks){for(var E=this.optionsViewForAxis_("y"+(l?"2":"")),L=E("ticker"),T=r.ticks,S=r.computedValueRange[1]-r.computedValueRange[0],P=h.computedValueRange[1]-h.computedValueRange[0],C=[],M=0;M<T.length;M++){var N=(T[M].v-r.computedValueRange[0])/S,F=h.computedValueRange[0]+N*P;C.push(F)}h.ticks=L(h.computedValueRange[0],h.computedValueRange[1],this.plotter_.area.h,E,this,C)}}},Q.prototype.detectTypeFromString_=function(t){var e=!1,a=t.indexOf("-");a>0&&"e"!=t[a-1]&&"E"!=t[a-1]||t.indexOf("/")>=0||isNaN(parseFloat(t))?e=!0:8==t.length&&t>"19700101"&&t<"20371231"&&(e=!0),this.setXAxisOptions_(e)},Q.prototype.setXAxisOptions_=function(t){t?(this.attrs_.xValueParser=x.dateParser,this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(t){return parseFloat(t)},this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},Q.prototype.parseCSV_=function(t){var e,a,i=[],n=x.detectLineDelimiter(t),r=t.split(n||"\n"),o=this.getStringOption("delimiter");-1==r[0].indexOf(o)&&r[0].indexOf("\t")>=0&&(o="\t");var s=0;"labels"in this.user_attrs_||(s=1,this.attrs_.labels=r[0].split(o),this.attributes_.reparseSeries());for(var l,h=!1,u=this.attr_("labels").length,d=!1,c=s;c<r.length;c++){var p=r[c];if(c,0!==p.length&&"#"!=p[0]){var g=p.split(o);if(!(g.length<2)){var f=[];if(h||(this.detectTypeFromString_(g[0]),l=this.getFunctionOption("xValueParser"),h=!0),f[0]=l(g[0],this),this.fractions_)for(a=1;a<g.length;a++)e=g[a].split("/"),2!=e.length?(console.error('Expected fractional "num/den" values in CSV data but found a value \''+g[a]+"' on line "+(1+c)+" ('"+p+"') which is not of this form."),f[a]=[0,0]):f[a]=[x.parseFloat_(e[0],c,p),x.parseFloat_(e[1],c,p)];else if(this.getBooleanOption("errorBars"))for(g.length%2!=1&&console.error("Expected alternating (value, stdev.) pairs in CSV data but line "+(1+c)+" has an odd number of values ("+(g.length-1)+"): '"+p+"'"),a=1;a<g.length;a+=2)f[(a+1)/2]=[x.parseFloat_(g[a],c,p),x.parseFloat_(g[a+1],c,p)];else if(this.getBooleanOption("customBars"))for(a=1;a<g.length;a++){var v=g[a];/^ *$/.test(v)?f[a]=[null,null,null]:(e=v.split(";"),3==e.length?f[a]=[x.parseFloat_(e[0],c,p),x.parseFloat_(e[1],c,p),x.parseFloat_(e[2],c,p)]:console.warn('When using customBars, values must be either blank or "low;center;high" tuples (got "'+v+'" on line '+(1+c)))}else for(a=1;a<g.length;a++)f[a]=x.parseFloat_(g[a],c,p);if(i.length>0&&f[0]<i[i.length-1][0]&&(d=!0),f.length!=u&&console.error("Number of columns in line "+c+" ("+f.length+") does not agree with number of labels ("+u+") "+p),0===c&&this.attr_("labels")){var _=!0;for(a=0;_&&a<f.length;a++)f[a]&&(_=!1);if(_){console.warn("The dygraphs 'labels' option is set, but the first row of CSV data ('"+p+"') appears to also contain labels. Will drop the CSV labels and use the option labels.");continue}}i.push(f)}}}return d&&(console.warn("CSV is out of order; order it correctly to speed loading."),i.sort(function(t,e){return t[0]-e[0]})),i},Q.prototype.parseArray_=function(t){if(0===t.length)return console.error("Can't plot empty data set"),null;if(0===t[0].length)return console.error("Data set cannot contain an empty row"),null;o(t);var e;if(null===this.attr_("labels")){for(console.warn("Using default labels. Set labels explicitly via 'labels' in the options parameter"),this.attrs_.labels=["X"],e=1;e<t[0].length;e++)this.attrs_.labels.push("Y"+e);this.attributes_.reparseSeries()}else{var a=this.attr_("labels");if(a.length!=t[0].length)return console.error("Mismatch between number of labels ("+a+") and number of columns in array ("+t[0].length+")"),null}if(x.isDateLike(t[0][0])){this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter;var i=x.clone(t);for(e=0;e<t.length;e++){if(0===i[e].length)return console.error("Row "+(1+e)+" of data is empty"),null;if(null===i[e][0]||"function"!=typeof i[e][0].getTime||isNaN(i[e][0].getTime()))return console.error("x value in row "+(1+e)+" is not a Date"),null;i[e][0]=i[e][0].getTime()}return i}return this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=x.numberAxisLabelFormatter,t},Q.prototype.parseDataTable_=function(t){var e=t.getNumberOfColumns(),a=t.getNumberOfRows(),i=t.getColumnType(0);if("date"==i||"datetime"==i)this.attrs_.xValueParser=x.dateParser,this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter;else{if("number"!=i)throw new Error("only 'date', 'datetime' and 'number' types are supported for column 1 of DataTable input (Got '"+i+"')");this.attrs_.xValueParser=function(t){return parseFloat(t)},this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter}var n,r,o=[],s={},l=!1;for(n=1;n<e;n++){var h=t.getColumnType(n);if("number"==h)o.push(n);else{if("string"!=h||!this.getBooleanOption("displayAnnotations"))throw new Error("Only 'number' is supported as a dependent type with Gviz. 'string' is only supported if displayAnnotations is true");var u=o[o.length-1];s.hasOwnProperty(u)?s[u].push(n):s[u]=[n],l=!0}}var d=[t.getColumnLabel(0)];for(n=0;n<o.length;n++)d.push(t.getColumnLabel(o[n])),this.getBooleanOption("errorBars")&&(n+=1);this.attrs_.labels=d,e=d.length;var c=[],p=!1,g=[];for(n=0;n<a;n++){var f=[];if(void 0!==t.getValue(n,0)&&null!==t.getValue(n,0)){if("date"==i||"datetime"==i?f.push(t.getValue(n,0).getTime()):f.push(t.getValue(n,0)),this.getBooleanOption("errorBars"))for(r=0;r<e-1;r++)f.push([t.getValue(n,1+2*r),t.getValue(n,2+2*r)]);else{for(r=0;r<o.length;r++){var v=o[r];if(f.push(t.getValue(n,v)),l&&s.hasOwnProperty(v)&&null!==t.getValue(n,s[v][0])){var y={};y.series=t.getColumnLabel(v),y.xval=f[0],y.shortText=function(t){var e=String.fromCharCode(65+t%26);for(t=Math.floor(t/26);t>0;)e=String.fromCharCode(65+(t-1)%26)+e.toLowerCase(),t=Math.floor((t-1)/26);return e}(g.length),y.text="";for(var m=0;m<s[v].length;m++)m&&(y.text+="\n"),y.text+=t.getValue(n,s[v][m]);g.push(y)}}for(r=0;r<f.length;r++)isFinite(f[r])||(f[r]=null)}c.length>0&&f[0]<c[c.length-1][0]&&(p=!0),c.push(f)}else console.warn("Ignoring row "+n+" of DataTable because of undefined or null first column.")}p&&(console.warn("DataTable is out of order; order it correctly to speed loading."),c.sort(function(t,e){return t[0]-e[0]})),this.rawData_=c,g.length>0&&this.setAnnotations(g,!0),this.attributes_.reparseSeries()},Q.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_("dataDidUpdate",{})},Q.prototype.start_=function(){var t=this.file_;if("function"==typeof t&&(t=t()),x.isArrayLike(t))this.rawData_=this.parseArray_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("object"==typeof t&&"function"==typeof t.getColumnRange)this.parseDataTable_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("string"==typeof t){var e=x.detectLineDelimiter(t);if(e)this.loadedEvent_(t);else{var a;a=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP");var i=this;a.onreadystatechange=function(){4==a.readyState&&(200!==a.status&&0!==a.status||i.loadedEvent_(a.responseText))},a.open("GET",t,!0),a.send(null)}}else console.error("Unknown data format: "+typeof t)},Q.prototype.updateOptions=function(t,e){void 0===e&&(e=!1);var a=t.file,i=Q.copyUserAttrs_(t);"rollPeriod"in i&&(this.rollPeriod_=i.rollPeriod),"dateWindow"in i&&(this.dateWindow_=i.dateWindow);var n=x.isPixelChangingOptionList(this.attr_("labels"),i);x.updateDeep(this.user_attrs_,i),this.attributes_.reparseSeries(),a?(this.cascadeEvents_("dataWillUpdate",{}),this.file_=a,e||this.start_()):e||(n?this.predraw_():this.renderGraph_(!1))},Q.copyUserAttrs_=function(t){var e={};for(var a in t)t.hasOwnProperty(a)&&"file"!=a&&t.hasOwnProperty(a)&&(e[a]=t[a]);return e},Q.prototype.resize=function(t,e){if(!this.resize_lock){this.resize_lock=!0,null===t!=(null===e)&&(console.warn("Dygraph.resize() should be called with zero parameters or two non-NULL parameters. Pretending it was zero."),t=e=null);var a=this.width_,i=this.height_;t?(this.maindiv_.style.width=t+"px",this.maindiv_.style.height=e+"px",this.width_=t,this.height_=e):(this.width_=this.maindiv_.clientWidth,this.height_=this.maindiv_.clientHeight),a==this.width_&&i==this.height_||(this.resizeElements_(),this.predraw_()),this.resize_lock=!1}},Q.prototype.adjustRoll=function(t){this.rollPeriod_=t,this.predraw_()},Q.prototype.visibility=function(){for(this.getOption("visibility")||(this.attrs_.visibility=[]);this.getOption("visibility").length<this.numColumns()-1;)this.attrs_.visibility.push(!0);return this.getOption("visibility")},Q.prototype.setVisibility=function(t,e){var a=this.visibility(),i=!1;if(Array.isArray(t)||(null!==t&&"object"==typeof t?i=!0:t=[t]),i)for(var n in t)t.hasOwnProperty(n)&&(n<0||n>=a.length?console.warn("Invalid series number in setVisibility: "+n):a[n]=t[n]);else for(var n=0;n<t.length;n++)"boolean"==typeof t[n]?n>=a.length?console.warn("Invalid series number in setVisibility: "+n):a[n]=t[n]:t[n]<0||t[n]>=a.length?console.warn("Invalid series number in setVisibility: "+t[n]):a[t[n]]=e;this.predraw_()},Q.prototype.size=function(){return{width:this.width_,height:this.height_}},Q.prototype.setAnnotations=function(t,e){if(this.annotations_=t,!this.layout_)return void console.warn("Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html");this.layout_.setAnnotations(this.annotations_),e||this.predraw_()},Q.prototype.annotations=function(){return this.annotations_},Q.prototype.getLabels=function(){var t=this.attr_("labels");return t?t.slice():null},Q.prototype.indexFromSetName=function(t){return this.setIndexByName_[t]},Q.prototype.getRowForX=function(t){for(var e=0,a=this.numRows()-1;e<=a;){var i=a+e>>1,n=this.getValue(i,0);if(n<t)e=i+1;else if(n>t)a=i-1;else{if(e==i)return i;a=i}}return null},Q.prototype.ready=function(t){this.is_initial_draw_?this.readyFns_.push(t):t.call(this,this)},Q.prototype.addAndTrackEvent=function(t,e,a){x.addEvent(t,e,a),this.registeredEvents_.push({elem:t,type:e,fn:a})},Q.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var t=0;t<this.registeredEvents_.length;t++){var e=this.registeredEvents_[t];x.removeEvent(e.elem,e.type,e.fn)}this.registeredEvents_=[]},Q.PLUGINS=[U.default,X.default,j.default,Z.default,H.default,G.default],Q.GVizChart=q.default,Q.DASHED_LINE=x.DASHED_LINE,Q.DOT_DASH_LINE=x.DOT_DASH_LINE,Q.dateAxisLabelFormatter=x.dateAxisLabelFormatter,Q.toRGB_=x.toRGB_,Q.findPos=x.findPos,Q.pageX=x.pageX,Q.pageY=x.pageY,Q.dateString_=x.dateString_,Q.defaultInteractionModel=f.default.defaultModel,Q.nonInteractiveModel=Q.nonInteractiveModel_=f.default.nonInteractiveModel_,Q.Circles=x.Circles,Q.Plugins={Legend:U.default,Axes:X.default,Annotations:H.default,ChartLabels:Z.default,Grid:G.default,RangeSelector:j.default},Q.DataHandlers={DefaultHandler:E.default,BarsHandler:R.default,CustomBarsHandler:P.default,DefaultFractionHandler:M.default,ErrorBarsHandler:T.default,FractionsBarsHandler:F.default},Q.startPan=f.default.startPan,Q.startZoom=f.default.startZoom,Q.movePan=f.default.movePan,Q.moveZoom=f.default.moveZoom,Q.endPan=f.default.endPan,Q.endZoom=f.default.endZoom,Q.numericLinearTicks=_.numericLinearTicks,Q.numericTicks=_.numericTicks,Q.dateTicker=_.dateTicker,Q.Granularity=_.Granularity,Q.getDateAxis=_.getDateAxis,Q.floatFormat=x.floatFormat,a.default=Q,e.exports=a.default}).call(this,t("_process"))},{"./datahandler/bars":5,"./datahandler/bars-custom":2,"./datahandler/bars-error":3,"./datahandler/bars-fractions":4,"./datahandler/default":8,"./datahandler/default-fractions":7,"./dygraph-canvas":9,"./dygraph-default-attrs":10,"./dygraph-gviz":11,"./dygraph-interaction-model":12,"./dygraph-layout":13,"./dygraph-options":15,"./dygraph-options-reference":14,"./dygraph-tickers":16,"./dygraph-utils":17,"./iframe-tarp":19,"./plugins/annotations":20,"./plugins/axes":21,"./plugins/chart-labels":22,"./plugins/grid":23,"./plugins/legend":24,"./plugins/range-selector":25,_process:1}],19:[function(t,e,a){"use strict";function i(){this.tarps=[]}Object.defineProperty(a,"__esModule",{value:!0});var n=t("./dygraph-utils"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n);i.prototype.cover=function(){for(var t=document.getElementsByTagName("iframe"),e=0;e<t.length;e++){var a=t[e],i=r.findPos(a),n=i.x,o=i.y,s=a.offsetWidth,l=a.offsetHeight,h=document.createElement("div");h.style.position="absolute",h.style.left=n+"px",h.style.top=o+"px",h.style.width=s+"px",h.style.height=l+"px",h.style.zIndex=999,document.body.appendChild(h),this.tarps.push(h)}},i.prototype.uncover=function(){for(var t=0;t<this.tarps.length;t++)this.tarps[t].parentNode.removeChild(this.tarps[t]);this.tarps=[]},a.default=i,e.exports=a.default},{"./dygraph-utils":17}],20:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=function(){this.annotations_=[]};i.prototype.toString=function(){return"Annotations Plugin"},i.prototype.activate=function(t){return{clearChart:this.clearChart,didDrawChart:this.didDrawChart}},i.prototype.detachLabels=function(){for(var t=0;t<this.annotations_.length;t++){var e=this.annotations_[t];e.parentNode&&e.parentNode.removeChild(e),this.annotations_[t]=null}this.annotations_=[]},i.prototype.clearChart=function(t){this.detachLabels()},i.prototype.didDrawChart=function(t){var e=t.dygraph,a=e.layout_.annotated_points;if(a&&0!==a.length)for(var i=t.canvas.parentNode,n=function(t,a,i){return function(n){var r=i.annotation;r.hasOwnProperty(t)?r[t](r,i,e,n):e.getOption(a)&&e.getOption(a)(r,i,e,n)}},r=t.dygraph.getArea(),o={},s=0;s<a.length;s++){var l=a[s];if(!(l.canvasx<r.x||l.canvasx>r.x+r.w||l.canvasy<r.y||l.canvasy>r.y+r.h)){var h=l.annotation,u=6;h.hasOwnProperty("tickHeight")&&(u=h.tickHeight);var d=document.createElement("div");d.style.fontSize=e.getOption("axisLabelFontSize")+"px"
+;var c="dygraph-annotation";h.hasOwnProperty("icon")||(c+=" dygraphDefaultAnnotation dygraph-default-annotation"),h.hasOwnProperty("cssClass")&&(c+=" "+h.cssClass),d.className=c;var p=h.hasOwnProperty("width")?h.width:16,g=h.hasOwnProperty("height")?h.height:16;if(h.hasOwnProperty("icon")){var f=document.createElement("img");f.src=h.icon,f.width=p,f.height=g,d.appendChild(f)}else l.annotation.hasOwnProperty("shortText")&&d.appendChild(document.createTextNode(l.annotation.shortText));var v=l.canvasx-p/2;d.style.left=v+"px";var _=0;if(h.attachAtBottom){var y=r.y+r.h-g-u;o[v]?y-=o[v]:o[v]=0,o[v]+=u+g,_=y}else _=l.canvasy-g-u;d.style.top=_+"px",d.style.width=p+"px",d.style.height=g+"px",d.title=l.annotation.text,d.style.color=e.colorsMap_[l.name],d.style.borderColor=e.colorsMap_[l.name],h.div=d,e.addAndTrackEvent(d,"click",n("clickHandler","annotationClickHandler",l)),e.addAndTrackEvent(d,"mouseover",n("mouseOverHandler","annotationMouseOverHandler",l)),e.addAndTrackEvent(d,"mouseout",n("mouseOutHandler","annotationMouseOutHandler",l)),e.addAndTrackEvent(d,"dblclick",n("dblClickHandler","annotationDblClickHandler",l)),i.appendChild(d),this.annotations_.push(d);var x=t.drawingContext;if(x.save(),x.strokeStyle=h.hasOwnProperty("tickColor")?h.tickColor:e.colorsMap_[l.name],x.lineWidth=h.hasOwnProperty("tickWidth")?h.tickWidth:e.getOption("strokeWidth"),x.beginPath(),h.attachAtBottom){var y=_+g;x.moveTo(l.canvasx,y),x.lineTo(l.canvasx,y+u)}else x.moveTo(l.canvasx,l.canvasy),x.lineTo(l.canvasx,l.canvasy-2-u);x.closePath(),x.stroke(),x.restore()}}},i.prototype.destroy=function(){this.detachLabels()},a.default=i,e.exports=a.default},{}],21:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=t("../dygraph-utils"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(){this.xlabels_=[],this.ylabels_=[]};r.prototype.toString=function(){return"Axes Plugin"},r.prototype.activate=function(t){return{layout:this.layout,clearChart:this.clearChart,willDrawChart:this.willDrawChart}},r.prototype.layout=function(t){var e=t.dygraph;if(e.getOptionForAxis("drawAxis","y")){var a=e.getOptionForAxis("axisLabelWidth","y")+2*e.getOptionForAxis("axisTickSize","y");t.reserveSpaceLeft(a)}if(e.getOptionForAxis("drawAxis","x")){var i;i=e.getOption("xAxisHeight")?e.getOption("xAxisHeight"):e.getOptionForAxis("axisLabelFontSize","x")+2*e.getOptionForAxis("axisTickSize","x"),t.reserveSpaceBottom(i)}if(2==e.numAxes()){if(e.getOptionForAxis("drawAxis","y2")){var a=e.getOptionForAxis("axisLabelWidth","y2")+2*e.getOptionForAxis("axisTickSize","y2");t.reserveSpaceRight(a)}}else e.numAxes()>2&&e.error("Only two y-axes are supported at this time. (Trying to use "+e.numAxes()+")")},r.prototype.detachLabels=function(){function t(t){for(var e=0;e<t.length;e++){var a=t[e];a.parentNode&&a.parentNode.removeChild(a)}}t(this.xlabels_),t(this.ylabels_),this.xlabels_=[],this.ylabels_=[]},r.prototype.clearChart=function(t){this.detachLabels()},r.prototype.willDrawChart=function(t){function e(t){return Math.round(t)+.5}function a(t){return Math.round(t)-.5}var i=this,r=t.dygraph;if(r.getOptionForAxis("drawAxis","x")||r.getOptionForAxis("drawAxis","y")||r.getOptionForAxis("drawAxis","y2")){var o,s,l,h=t.drawingContext,u=t.canvas.parentNode,d=r.width_,c=r.height_,p=function(t){return{position:"absolute",fontSize:r.getOptionForAxis("axisLabelFontSize",t)+"px",width:r.getOptionForAxis("axisLabelWidth",t)+"px"}},g={x:p("x"),y:p("y"),y2:p("y2")},f=function(t,e,a){var i=document.createElement("div"),r=g["y2"==a?"y2":e];n.update(i.style,r);var o=document.createElement("div");return o.className="dygraph-axis-label dygraph-axis-label-"+e+(a?" dygraph-axis-label-"+a:""),o.innerHTML=t,i.appendChild(o),i};h.save();var v=r.layout_,_=t.dygraph.plotter_.area,y=function(t){return function(e){return r.getOptionForAxis(e,t)}};if(r.getOptionForAxis("drawAxis","y")){if(v.yticks&&v.yticks.length>0){var x=r.numAxes(),m=[y("y"),y("y2")];v.yticks.forEach(function(t){if(void 0!==t.label){s=_.x;var e="y1",a=m[0];1==t.axis&&(s=_.x+_.w,-1,e="y2",a=m[1]);var n=a("axisLabelFontSize");l=_.y+t.pos*_.h,o=f(t.label,"y",2==x?e:null);var r=l-n/2;r<0&&(r=0),r+n+3>c?o.style.bottom="0":o.style.top=r+"px",0===t.axis?(o.style.left=_.x-a("axisLabelWidth")-a("axisTickSize")+"px",o.style.textAlign="right"):1==t.axis&&(o.style.left=_.x+_.w+a("axisTickSize")+"px",o.style.textAlign="left"),o.style.width=a("axisLabelWidth")+"px",u.appendChild(o),i.ylabels_.push(o)}});var b=this.ylabels_[0],w=r.getOptionForAxis("axisLabelFontSize","y");parseInt(b.style.top,10)+w>c-w&&(b.style.top=parseInt(b.style.top,10)-w/2+"px")}var A;if(r.getOption("drawAxesAtZero")){var O=r.toPercentXCoord(0);(O>1||O<0||isNaN(O))&&(O=0),A=e(_.x+O*_.w)}else A=e(_.x);h.strokeStyle=r.getOptionForAxis("axisLineColor","y"),h.lineWidth=r.getOptionForAxis("axisLineWidth","y"),h.beginPath(),h.moveTo(A,a(_.y)),h.lineTo(A,a(_.y+_.h)),h.closePath(),h.stroke(),2==r.numAxes()&&(h.strokeStyle=r.getOptionForAxis("axisLineColor","y2"),h.lineWidth=r.getOptionForAxis("axisLineWidth","y2"),h.beginPath(),h.moveTo(a(_.x+_.w),a(_.y)),h.lineTo(a(_.x+_.w),a(_.y+_.h)),h.closePath(),h.stroke())}if(r.getOptionForAxis("drawAxis","x")){if(v.xticks){var D=y("x");v.xticks.forEach(function(t){if(void 0!==t.label){s=_.x+t.pos*_.w,l=_.y+_.h,o=f(t.label,"x"),o.style.textAlign="center",o.style.top=l+D("axisTickSize")+"px";var e=s-D("axisLabelWidth")/2;e+D("axisLabelWidth")>d&&(e=d-D("axisLabelWidth"),o.style.textAlign="right"),e<0&&(e=0,o.style.textAlign="left"),o.style.left=e+"px",o.style.width=D("axisLabelWidth")+"px",u.appendChild(o),i.xlabels_.push(o)}})}h.strokeStyle=r.getOptionForAxis("axisLineColor","x"),h.lineWidth=r.getOptionForAxis("axisLineWidth","x"),h.beginPath();var E;if(r.getOption("drawAxesAtZero")){var O=r.toPercentYCoord(0,0);(O>1||O<0)&&(O=1),E=a(_.y+O*_.h)}else E=a(_.y+_.h);h.moveTo(e(_.x),E),h.lineTo(e(_.x+_.w),E),h.closePath(),h.stroke()}h.restore()}},a.default=r,e.exports=a.default},{"../dygraph-utils":17}],22:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=function(){this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};i.prototype.toString=function(){return"ChartLabels Plugin"},i.prototype.activate=function(t){return{layout:this.layout,didDrawChart:this.didDrawChart}};var n=function(t){var e=document.createElement("div");return e.style.position="absolute",e.style.left=t.x+"px",e.style.top=t.y+"px",e.style.width=t.w+"px",e.style.height=t.h+"px",e};i.prototype.detachLabels_=function(){for(var t=[this.title_div_,this.xlabel_div_,this.ylabel_div_,this.y2label_div_],e=0;e<t.length;e++){var a=t[e];a&&(a.parentNode&&a.parentNode.removeChild(a))}this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};var r=function(t,e,a,i,n){var r=document.createElement("div");r.style.position="absolute",r.style.left=1==a?"0px":e.x+"px",r.style.top=e.y+"px",r.style.width=e.w+"px",r.style.height=e.h+"px",r.style.fontSize=t.getOption("yLabelWidth")-2+"px";var o=document.createElement("div");o.style.position="absolute",o.style.width=e.h+"px",o.style.height=e.w+"px",o.style.top=e.h/2-e.w/2+"px",o.style.left=e.w/2-e.h/2+"px",o.className="dygraph-label-rotate-"+(1==a?"right":"left");var s=document.createElement("div");return s.className=i,s.innerHTML=n,o.appendChild(s),r.appendChild(o),r};i.prototype.layout=function(t){this.detachLabels_();var e=t.dygraph,a=t.chart_div;if(e.getOption("title")){var i=t.reserveSpaceTop(e.getOption("titleHeight"));this.title_div_=n(i),this.title_div_.style.fontSize=e.getOption("titleHeight")-8+"px";var o=document.createElement("div");o.className="dygraph-label dygraph-title",o.innerHTML=e.getOption("title"),this.title_div_.appendChild(o),a.appendChild(this.title_div_)}if(e.getOption("xlabel")){var s=t.reserveSpaceBottom(e.getOption("xLabelHeight"));this.xlabel_div_=n(s),this.xlabel_div_.style.fontSize=e.getOption("xLabelHeight")-2+"px";var o=document.createElement("div");o.className="dygraph-label dygraph-xlabel",o.innerHTML=e.getOption("xlabel"),this.xlabel_div_.appendChild(o),a.appendChild(this.xlabel_div_)}if(e.getOption("ylabel")){var l=t.reserveSpaceLeft(0);this.ylabel_div_=r(e,l,1,"dygraph-label dygraph-ylabel",e.getOption("ylabel")),a.appendChild(this.ylabel_div_)}if(e.getOption("y2label")&&2==e.numAxes()){var h=t.reserveSpaceRight(0);this.y2label_div_=r(e,h,2,"dygraph-label dygraph-y2label",e.getOption("y2label")),a.appendChild(this.y2label_div_)}},i.prototype.didDrawChart=function(t){var e=t.dygraph;this.title_div_&&(this.title_div_.children[0].innerHTML=e.getOption("title")),this.xlabel_div_&&(this.xlabel_div_.children[0].innerHTML=e.getOption("xlabel")),this.ylabel_div_&&(this.ylabel_div_.children[0].children[0].innerHTML=e.getOption("ylabel")),this.y2label_div_&&(this.y2label_div_.children[0].children[0].innerHTML=e.getOption("y2label"))},i.prototype.clearChart=function(){},i.prototype.destroy=function(){this.detachLabels_()},a.default=i,e.exports=a.default},{}],23:[function(t,e,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var i=function(){};i.prototype.toString=function(){return"Gridline Plugin"},i.prototype.activate=function(t){return{willDrawChart:this.willDrawChart}},i.prototype.willDrawChart=function(t){function e(t){return Math.round(t)+.5}function a(t){return Math.round(t)-.5}var i,n,r,o,s=t.dygraph,l=t.drawingContext,h=s.layout_,u=t.dygraph.plotter_.area;if(s.getOptionForAxis("drawGrid","y")){for(var d=["y","y2"],c=[],p=[],g=[],f=[],v=[],r=0;r<d.length;r++)g[r]=s.getOptionForAxis("drawGrid",d[r]),g[r]&&(c[r]=s.getOptionForAxis("gridLineColor",d[r]),p[r]=s.getOptionForAxis("gridLineWidth",d[r]),v[r]=s.getOptionForAxis("gridLinePattern",d[r]),f[r]=v[r]&&v[r].length>=2);o=h.yticks,l.save(),o.forEach(function(t){if(t.has_tick){var r=t.axis;g[r]&&(l.save(),f[r]&&l.setLineDash&&l.setLineDash(v[r]),l.strokeStyle=c[r],l.lineWidth=p[r],i=e(u.x),n=a(u.y+t.pos*u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i+u.w,n),l.stroke(),l.restore())}}),l.restore()}if(s.getOptionForAxis("drawGrid","x")){o=h.xticks,l.save();var v=s.getOptionForAxis("gridLinePattern","x"),f=v&&v.length>=2;f&&l.setLineDash&&l.setLineDash(v),l.strokeStyle=s.getOptionForAxis("gridLineColor","x"),l.lineWidth=s.getOptionForAxis("gridLineWidth","x"),o.forEach(function(t){t.has_tick&&(i=e(u.x+t.pos*u.w),n=a(u.y+u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i,u.y),l.closePath(),l.stroke())}),f&&l.setLineDash&&l.setLineDash([]),l.restore()}},i.prototype.destroy=function(){},a.default=i,e.exports=a.default},{}],24:[function(t,e,a){"use strict";function i(t,e,a){if(!t||t.length<=1)return'<div class="dygraph-legend-line" style="border-bottom-color: '+e+';"></div>';var i,n,r,o,s,l=0,h=0,u=[];for(i=0;i<=t.length;i++)l+=t[i%t.length];if((s=Math.floor(a/(l-t[0])))>1){for(i=0;i<t.length;i++)u[i]=t[i]/a;h=u.length}else{for(s=1,i=0;i<t.length;i++)u[i]=t[i]/l;h=u.length+1}var d="";for(n=0;n<s;n++)for(i=0;i<h;i+=2)r=u[i%u.length],o=i<t.length?u[(i+1)%u.length]:0,d+='<div class="dygraph-legend-dash" style="margin-right: '+o+"em; padding-left: "+r+'em;"></div>';return d}Object.defineProperty(a,"__esModule",{value:!0});var n=t("../dygraph-utils"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=function(){this.legend_div_=null,this.is_generated_div_=!1};o.prototype.toString=function(){return"Legend Plugin"},o.prototype.activate=function(t){var e,a=t.getOption("labelsDiv");return a&&null!==a?e="string"==typeof a||a instanceof String?document.getElementById(a):a:(e=document.createElement("div"),e.className="dygraph-legend",t.graphDiv.appendChild(e),this.is_generated_div_=!0),this.legend_div_=e,this.one_em_width_=10,{select:this.select,deselect:this.deselect,predraw:this.predraw,didDrawChart:this.didDrawChart}};var s=function(t){var e=document.createElement("span");e.setAttribute("style","margin: 0; padding: 0 0 0 1em; border: 0;"),t.appendChild(e);var a=e.offsetWidth;return t.removeChild(e),a},l=function(t){return t.replace(/&/g,"&amp;").replace(/"/g,"&quot;").replace(/</g,"&lt;").replace(/>/g,"&gt;")};o.prototype.select=function(t){var e=t.selectedX,a=t.selectedPoints,i=t.selectedRow,n=t.dygraph.getOption("legend");if("never"===n)return void(this.legend_div_.style.display="none");if("follow"===n){var r=t.dygraph.plotter_.area,s=this.legend_div_.offsetWidth,l=t.dygraph.getOptionForAxis("axisLabelWidth","y"),h=a[0].x*r.w+50,u=a[0].y*r.h-50;h+s+1>r.w&&(h=h-100-s-(l-r.x)),t.dygraph.graphDiv.appendChild(this.legend_div_),this.legend_div_.style.left=l+h+"px",this.legend_div_.style.top=u+"px"}var d=o.generateLegendHTML(t.dygraph,e,a,this.one_em_width_,i);this.legend_div_.innerHTML=d,this.legend_div_.style.display=""},o.prototype.deselect=function(t){"always"!==t.dygraph.getOption("legend")&&(this.legend_div_.style.display="none");var e=s(this.legend_div_);this.one_em_width_=e;var a=o.generateLegendHTML(t.dygraph,void 0,void 0,e,null);this.legend_div_.innerHTML=a},o.prototype.didDrawChart=function(t){this.deselect(t)},o.prototype.predraw=function(t){if(this.is_generated_div_){t.dygraph.graphDiv.appendChild(this.legend_div_);var e=t.dygraph.getArea(),a=this.legend_div_.offsetWidth;this.legend_div_.style.left=e.x+e.w-a-1+"px",this.legend_div_.style.top=e.y+"px"}},o.prototype.destroy=function(){this.legend_div_=null},o.generateLegendHTML=function(t,e,a,n,s){var h={dygraph:t,x:e,series:[]},u={},d=t.getLabels();if(d)for(var c=1;c<d.length;c++){var p=t.getPropertiesForSeries(d[c]),g=t.getOption("strokePattern",d[c]),f={dashHTML:i(g,p.color,n),label:d[c],labelHTML:l(d[c]),isVisible:p.visible,color:p.color};h.series.push(f),u[d[c]]=f}if(void 0!==e){var v=t.optionsViewForAxis_("x"),_=v("valueFormatter");h.xHTML=_.call(t,e,v,d[0],t,s,0);for(var y=[],x=t.numAxes(),c=0;c<x;c++)y[c]=t.optionsViewForAxis_("y"+(c?1+c:""));var m=t.getOption("labelsShowZeroValues"),b=t.getHighlightSeries();for(c=0;c<a.length;c++){var w=a[c],f=u[w.name];if(f.y=w.yval,0===w.yval&&!m||isNaN(w.canvasy))f.isVisible=!1;else{var p=t.getPropertiesForSeries(w.name),A=y[p.axis-1],O=A("valueFormatter"),D=O.call(t,w.yval,A,w.name,t,s,d.indexOf(w.name));r.update(f,{yHTML:D}),w.name==b&&(f.isHighlighted=!0)}}}return(t.getOption("legendFormatter")||o.defaultFormatter).call(t,h)},o.defaultFormatter=function(t){var e=t.dygraph;if(!0!==e.getOption("showLabelsOnHighlight"))return"";var a,i=e.getOption("labelsSeparateLines");if(void 0===t.x){if("always"!=e.getOption("legend"))return"";a="";for(var n=0;n<t.series.length;n++){var r=t.series[n];r.isVisible&&(""!==a&&(a+=i?"<br/>":" "),a+="<span style='font-weight: bold; color: "+r.color+";'>"+r.dashHTML+" "+r.labelHTML+"</span>")}return a}a=t.xHTML+":";for(var n=0;n<t.series.length;n++){var r=t.series[n];if(r.isVisible){i&&(a+="<br>");a+="<span"+(r.isHighlighted?' class="highlight"':"")+"> <b><span style='color: "+r.color+";'>"+r.labelHTML+"</span></b>:&#160;"+r.yHTML+"</span>"}}return a},a.default=o,e.exports=a.default},{"../dygraph-utils":17}],25:[function(t,e,a){"use strict";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,"__esModule",{value:!0});var n=t("../dygraph-utils"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=t("../dygraph-interaction-model"),s=i(o),l=t("../iframe-tarp"),h=i(l),u=function(){this.hasTouchInterface_="undefined"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};u.prototype.toString=function(){return"RangeSelector Plugin"},u.prototype.activate=function(t){return this.dygraph_=t,this.getOption_("showRangeSelector")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},u.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},u.prototype.getOption_=function(t,e){return this.dygraph_.getOption(t,e)},u.prototype.setDefaultOption_=function(t,e){this.dygraph_.attrs_[t]=e},u.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_("animatedZooms")&&(console.warn("Animated zooms and range selector are not compatible; disabling animatedZooms."),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},u.prototype.addToGraph_=function(){var t=this.graphDiv_=this.dygraph_.graphDiv;t.appendChild(this.bgcanvas_),t.appendChild(this.fgcanvas_),t.appendChild(this.leftZoomHandle_),t.appendChild(this.rightZoomHandle_)},u.prototype.removeFromGraph_=function(){var t=this.graphDiv_;t.removeChild(this.bgcanvas_),t.removeChild(this.fgcanvas_),t.removeChild(this.leftZoomHandle_),t.removeChild(this.rightZoomHandle_),this.graphDiv_=null},u.prototype.reserveSpace_=function(t){this.getOption_("showRangeSelector")&&t.reserveSpaceBottom(this.getOption_("rangeSelectorHeight")+4)},u.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},u.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},u.prototype.updateVisibility_=function(){var t=this.getOption_("showRangeSelector");if(t)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var e=this.dygraph_;setTimeout(function(){e.width_=0,e.resize()},1)}return t},u.prototype.resize_=function(){function t(t,e,a,i){var n=i||r.getContextPixelRatio(e);t.style.top=a.y+"px",t.style.left=a.x+"px",t.width=a.w*n,t.height=a.h*n,t.style.width=a.w+"px",t.style.height=a.h+"px",1!=n&&e.scale(n,n)}var e=this.dygraph_.layout_.getPlotArea(),a=0;this.dygraph_.getOptionForAxis("drawAxis","x")&&(a=this.getOption_("xAxisHeight")||this.getOption_("axisLabelFontSize")+2*this.getOption_("axisTickSize")),this.canvasRect_={x:e.x,y:e.y+e.h+a+4,w:e.w,h:this.getOption_("rangeSelectorHeight")};var i=this.dygraph_.getNumericOption("pixelRatio");t(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,i),t(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,i)},u.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className="dygraph-rangesel-bgcanvas",this.bgcanvas_.style.position="absolute",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className="dygraph-rangesel-fgcanvas",this.fgcanvas_.style.position="absolute",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor="default",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},u.prototype.createZoomHandles_=function(){var t=new Image;t.className="dygraph-rangesel-zoomhandle",t.style.position="absolute",t.style.zIndex=10,t.style.visibility="hidden",t.style.cursor="col-resize",t.width=9,t.height=16,t.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=",this.isMobileDevice_&&(t.width*=2,t.height*=2),this.leftZoomHandle_=t,this.rightZoomHandle_=t.cloneNode(!1)},u.prototype.initInteraction_=function(){var t,e,a,i,n,o,l,u,d,c,p,g,f,v,_=this,y=document,x=0,m=null,b=!1,w=!1,A=!this.isMobileDevice_,O=new h.default;t=function(t){var e=_.dygraph_.xAxisExtremes(),a=(e[1]-e[0])/_.canvasRect_.w;return[e[0]+(t.leftHandlePos-_.canvasRect_.x)*a,e[0]+(t.rightHandlePos-_.canvasRect_.x)*a]},e=function(t){return r.cancelEvent(t),b=!0,x=t.clientX,m=t.target?t.target:t.srcElement,"mousedown"!==t.type&&"dragstart"!==t.type||(r.addEvent(y,"mousemove",a),r.addEvent(y,"mouseup",i)),_.fgcanvas_.style.cursor="col-resize",O.cover(),!0},a=function(t){if(!b)return!1;r.cancelEvent(t);var e=t.clientX-x;if(Math.abs(e)<4)return!0;x=t.clientX;var a,i=_.getZoomHandleStatus_();m==_.leftZoomHandle_?(a=i.leftHandlePos+e,a=Math.min(a,i.rightHandlePos-m.width-3),a=Math.max(a,_.canvasRect_.x)):(a=i.rightHandlePos+e,a=Math.min(a,_.canvasRect_.x+_.canvasRect_.w),a=Math.max(a,i.leftHandlePos+m.width+3));var o=m.width/2;return m.style.left=a-o+"px",_.drawInteractiveLayer_(),A&&n(),!0},i=function(t){return!!b&&(b=!1,O.uncover(),r.removeEvent(y,"mousemove",a),r.removeEvent(y,"mouseup",i),_.fgcanvas_.style.cursor="default",A||n(),!0)},n=function(){try{var e=_.getZoomHandleStatus_();if(_.isChangingRange_=!0,e.isZoomed){var a=t(e);_.dygraph_.doZoomXDates_(a[0],a[1])}else _.dygraph_.resetZoom()}finally{_.isChangingRange_=!1}},o=function(t){var e=_.leftZoomHandle_.getBoundingClientRect(),a=e.left+e.width/2;e=_.rightZoomHandle_.getBoundingClientRect();var i=e.left+e.width/2;return t.clientX>a&&t.clientX<i},l=function(t){return!(w||!o(t)||!_.getZoomHandleStatus_().isZoomed)&&(r.cancelEvent(t),w=!0,x=t.clientX,"mousedown"===t.type&&(r.addEvent(y,"mousemove",u),r.addEvent(y,"mouseup",d)),!0)},u=function(t){if(!w)return!1;r.cancelEvent(t);var e=t.clientX-x;if(Math.abs(e)<4)return!0;x=t.clientX;var a=_.getZoomHandleStatus_(),i=a.leftHandlePos,n=a.rightHandlePos,o=n-i;i+e<=_.canvasRect_.x?(i=_.canvasRect_.x,n=i+o):n+e>=_.canvasRect_.x+_.canvasRect_.w?(n=_.canvasRect_.x+_.canvasRect_.w,i=n-o):(i+=e,n+=e);var s=_.leftZoomHandle_.width/2;return _.leftZoomHandle_.style.left=i-s+"px",_.rightZoomHandle_.style.left=n-s+"px",_.drawInteractiveLayer_(),A&&c(),!0},d=function(t){return!!w&&(w=!1,r.removeEvent(y,"mousemove",u),r.removeEvent(y,"mouseup",d),A||c(),!0)},c=function(){try{_.isChangingRange_=!0,_.dygraph_.dateWindow_=t(_.getZoomHandleStatus_()),_.dygraph_.drawGraph_(!1)}finally{_.isChangingRange_=!1}},p=function(t){if(!b&&!w){var e=o(t)?"move":"default";e!=_.fgcanvas_.style.cursor&&(_.fgcanvas_.style.cursor=e)}},g=function(t){"touchstart"==t.type&&1==t.targetTouches.length?e(t.targetTouches[0])&&r.cancelEvent(t):"touchmove"==t.type&&1==t.targetTouches.length?a(t.targetTouches[0])&&r.cancelEvent(t):i(t)},f=function(t){"touchstart"==t.type&&1==t.targetTouches.length?l(t.targetTouches[0])&&r.cancelEvent(t):"touchmove"==t.type&&1==t.targetTouches.length?u(t.targetTouches[0])&&r.cancelEvent(t):d(t)},v=function(t,e){for(var a=["touchstart","touchend","touchmove","touchcancel"],i=0;i<a.length;i++)_.dygraph_.addAndTrackEvent(t,a[i],e)},this.setDefaultOption_("interactionModel",s.default.dragIsPanInteractionModel),this.setDefaultOption_("panEdgeFraction",1e-4);var D=window.opera?"mousedown":"dragstart";this.dygraph_.addAndTrackEvent(this.leftZoomHandle_,D,e),this.dygraph_.addAndTrackEvent(this.rightZoomHandle_,D,e),this.dygraph_.addAndTrackEvent(this.fgcanvas_,"mousedown",l),this.dygraph_.addAndTrackEvent(this.fgcanvas_,"mousemove",p),this.hasTouchInterface_&&(v(this.leftZoomHandle_,g),v(this.rightZoomHandle_,g),v(this.fgcanvas_,f))},u.prototype.drawStaticLayer_=function(){var t=this.bgcanvas_ctx_;t.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);try{this.drawMiniPlot_()}catch(t){console.warn(t)}this.bgcanvas_ctx_.lineWidth=this.getOption_("rangeSelectorBackgroundLineWidth"),t.strokeStyle=this.getOption_("rangeSelectorBackgroundStrokeColor"),t.beginPath(),t.moveTo(.5,.5),t.lineTo(.5,this.canvasRect_.h-.5),t.lineTo(this.canvasRect_.w-.5,this.canvasRect_.h-.5),t.lineTo(this.canvasRect_.w-.5,.5),t.stroke()},u.prototype.drawMiniPlot_=function(){var t=this.getOption_("rangeSelectorPlotFillColor"),e=this.getOption_("rangeSelectorPlotFillGradientColor"),a=this.getOption_("rangeSelectorPlotStrokeColor");if(t||a){var i=this.getOption_("stepPlot"),n=this.computeCombinedSeriesAndLimits_(),r=n.yMax-n.yMin,o=this.bgcanvas_ctx_,s=this.dygraph_.xAxisExtremes(),l=Math.max(s[1]-s[0],1e-30),h=(this.canvasRect_.w-.5)/l,u=(this.canvasRect_.h-.5)/r,d=this.canvasRect_.w-.5,c=this.canvasRect_.h-.5,p=null,g=null;o.beginPath(),o.moveTo(.5,c);for(var f=0;f<n.data.length;f++){var v=n.data[f],_=null!==v[0]?(v[0]-s[0])*h:NaN,y=null!==v[1]?c-(v[1]-n.yMin)*u:NaN;(i||null===p||Math.round(_)!=Math.round(p))&&(isFinite(_)&&isFinite(y)?(null===p?o.lineTo(_,c):i&&o.lineTo(_,g),o.lineTo(_,y),p=_,g=y):(null!==p&&(i?(o.lineTo(_,g),o.lineTo(_,c)):o.lineTo(p,c)),p=g=null))}if(o.lineTo(d,c),o.closePath(),t){var x=this.bgcanvas_ctx_.createLinearGradient(0,0,0,c);e&&x.addColorStop(0,e),x.addColorStop(1,t),this.bgcanvas_ctx_.fillStyle=x,o.fill()}a&&(this.bgcanvas_ctx_.strokeStyle=a,this.bgcanvas_ctx_.lineWidth=this.getOption_("rangeSelectorPlotLineWidth"),o.stroke())}},u.prototype.computeCombinedSeriesAndLimits_=function(){var t,e=this.dygraph_,a=this.getOption_("logscale"),i=e.numColumns(),n=e.getLabels(),o=new Array(i),s=!1,l=e.visibility(),h=[];for(t=1;t<i;t++){var u=this.getOption_("showInRangeSelector",n[t]);h.push(u),null!==u&&(s=!0)}if(s)for(t=1;t<i;t++)o[t]=h[t-1];else for(t=1;t<i;t++)o[t]=l[t-1];var d=[],c=e.dataHandler_,p=e.attributes_;for(t=1;t<e.numColumns();t++)if(o[t]){var g=c.extractSeries(e.rawData_,t,p);e.rollPeriod()>1&&(g=c.rollingAverage(g,e.rollPeriod(),p)),d.push(g)}var f=[];for(t=0;t<d[0].length;t++){for(var v=0,_=0,y=0;y<d.length;y++){var x=d[y][t][1];null===x||isNaN(x)||(_++,v+=x)}f.push([d[0][t][0],v/_])}var m=Number.MAX_VALUE,b=-Number.MAX_VALUE;for(t=0;t<f.length;t++){var w=f[t][1];null!==w&&isFinite(w)&&(!a||w>0)&&(m=Math.min(m,w),b=Math.max(b,w))}if(a)for(b=r.log10(b),b+=.25*b,m=r.log10(m),t=0;t<f.length;t++)f[t][1]=r.log10(f[t][1]);else{var A,O=b-m;A=O<=Number.MIN_VALUE?.25*b:.25*O,b+=A,m-=A}return{data:f,yMin:m,yMax:b}},u.prototype.placeZoomHandles_=function(){var t=this.dygraph_.xAxisExtremes(),e=this.dygraph_.xAxisRange(),a=t[1]-t[0],i=Math.max(0,(e[0]-t[0])/a),n=Math.max(0,(t[1]-e[1])/a),r=this.canvasRect_.x+this.canvasRect_.w*i,o=this.canvasRect_.x+this.canvasRect_.w*(1-n),s=Math.max(this.canvasRect_.y,this.canvasRect_.y+(this.canvasRect_.h-this.leftZoomHandle_.height)/2),l=this.leftZoomHandle_.width/2;this.leftZoomHandle_.style.left=r-l+"px",this.leftZoomHandle_.style.top=s+"px",this.rightZoomHandle_.style.left=o-l+"px",this.rightZoomHandle_.style.top=this.leftZoomHandle_.style.top,this.leftZoomHandle_.style.visibility="visible",this.rightZoomHandle_.style.visibility="visible"},u.prototype.drawInteractiveLayer_=function(){var t=this.fgcanvas_ctx_;t.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);var e=this.canvasRect_.w-1,a=this.canvasRect_.h-1,i=this.getZoomHandleStatus_();if(t.strokeStyle=this.getOption_("rangeSelectorForegroundStrokeColor"),t.lineWidth=this.getOption_("rangeSelectorForegroundLineWidth"),i.isZoomed){var n=Math.max(1,i.leftHandlePos-this.canvasRect_.x),r=Math.min(e,i.rightHandlePos-this.canvasRect_.x);t.fillStyle="rgba(240, 240, 240, "+this.getOption_("rangeSelectorAlpha").toString()+")",t.fillRect(0,0,n,this.canvasRect_.h),t.fillRect(r,0,this.canvasRect_.w-r,this.canvasRect_.h),t.beginPath(),t.moveTo(1,1),t.lineTo(n,1),t.lineTo(n,a),t.lineTo(r,a),t.lineTo(r,1),t.lineTo(e,1),t.stroke()}else t.beginPath(),t.moveTo(1,1),t.lineTo(1,a),t.lineTo(e,a),t.lineTo(e,1),t.stroke()},u.prototype.getZoomHandleStatus_=function(){var t=this.leftZoomHandle_.width/2,e=parseFloat(this.leftZoomHandle_.style.left)+t,a=parseFloat(this.rightZoomHandle_.style.left)+t;return{leftHandlePos:e,rightHandlePos:a,isZoomed:e-1>this.canvasRect_.x||a+1<this.canvasRect_.x+this.canvasRect_.w}},a.default=u,e.exports=a.default},{"../dygraph-interaction-model":12,"../dygraph-utils":17,"../iframe-tarp":19}]},{},[18])(18)});
+//# sourceMappingURL=dist/dygraph.min.js.map
diff --git a/web/gui/lib/dygraph-smooth-plotter-c91c859.js b/web/gui/lib/dygraph-smooth-plotter-c91c859.js
new file mode 100644
index 0000000..c0b76fb
--- /dev/null
+++ b/web/gui/lib/dygraph-smooth-plotter-c91c859.js
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: MIT
+(function() {
+"use strict";
+
+var Dygraph;
+if (window.Dygraph) {
+ Dygraph = window.Dygraph;
+} else if (typeof(module) !== 'undefined') {
+ Dygraph = require('../dygraph');
+}
+
+/**
+ * Given three sequential points, p0, p1 and p2, find the left and right
+ * control points for p1.
+ *
+ * The three points are expected to have x and y properties.
+ *
+ * The alpha parameter controls the amount of smoothing.
+ * If α=0, then both control points will be the same as p1 (i.e. no smoothing).
+ *
+ * Returns [l1x, l1y, r1x, r1y]
+ *
+ * It's guaranteed that the line from (l1x, l1y)-(r1x, r1y) passes through p1.
+ * Unless allowFalseExtrema is set, then it's also guaranteed that:
+ * l1y ∈ [p0.y, p1.y]
+ * r1y ∈ [p1.y, p2.y]
+ *
+ * The basic algorithm is:
+ * 1. Put the control points l1 and r1 α of the way down (p0, p1) and (p1, p2).
+ * 2. Shift l1 and r2 so that the line l1–r1 passes through p1
+ * 3. Adjust to prevent false extrema while keeping p1 on the l1–r1 line.
+ *
+ * This is loosely based on the HighCharts algorithm.
+ */
+function getControlPoints(p0, p1, p2, opt_alpha, opt_allowFalseExtrema) {
+ var alpha = (opt_alpha !== undefined) ? opt_alpha : 1/3; // 0=no smoothing, 1=crazy smoothing
+ var allowFalseExtrema = opt_allowFalseExtrema || false;
+
+ if (!p2) {
+ return [p1.x, p1.y, null, null];
+ }
+
+ // Step 1: Position the control points along each line segment.
+ var l1x = (1 - alpha) * p1.x + alpha * p0.x,
+ l1y = (1 - alpha) * p1.y + alpha * p0.y,
+ r1x = (1 - alpha) * p1.x + alpha * p2.x,
+ r1y = (1 - alpha) * p1.y + alpha * p2.y;
+
+ // Step 2: shift the points up so that p1 is on the l1–r1 line.
+ if (l1x != r1x) {
+ // This can be derived w/ some basic algebra.
+ var deltaY = p1.y - r1y - (p1.x - r1x) * (l1y - r1y) / (l1x - r1x);
+ l1y += deltaY;
+ r1y += deltaY;
+ }
+
+ // Step 3: correct to avoid false extrema.
+ if (!allowFalseExtrema) {
+ if (l1y > p0.y && l1y > p1.y) {
+ l1y = Math.max(p0.y, p1.y);
+ r1y = 2 * p1.y - l1y;
+ } else if (l1y < p0.y && l1y < p1.y) {
+ l1y = Math.min(p0.y, p1.y);
+ r1y = 2 * p1.y - l1y;
+ }
+
+ if (r1y > p1.y && r1y > p2.y) {
+ r1y = Math.max(p1.y, p2.y);
+ l1y = 2 * p1.y - r1y;
+ } else if (r1y < p1.y && r1y < p2.y) {
+ r1y = Math.min(p1.y, p2.y);
+ l1y = 2 * p1.y - r1y;
+ }
+ }
+
+ return [l1x, l1y, r1x, r1y];
+}
+
+// i.e. is none of (null, undefined, NaN)
+function isOK(x) {
+ return !!x && !isNaN(x);
+};
+
+// A plotter which uses splines to create a smooth curve.
+// See tests/plotters.html for a demo.
+// Can be controlled via smoothPlotter.smoothing
+function smoothPlotter(e) {
+ var ctx = e.drawingContext,
+ points = e.points;
+
+ ctx.beginPath();
+ ctx.moveTo(points[0].canvasx, points[0].canvasy);
+
+ // right control point for previous point
+ var lastRightX = points[0].canvasx, lastRightY = points[0].canvasy;
+
+ for (var i = 1; i < points.length; i++) {
+ var p0 = points[i - 1],
+ p1 = points[i],
+ p2 = points[i + 1];
+ p0 = p0 && isOK(p0.canvasy) ? p0 : null;
+ p1 = p1 && isOK(p1.canvasy) ? p1 : null;
+ p2 = p2 && isOK(p2.canvasy) ? p2 : null;
+ if (p0 && p1) {
+ var controls = getControlPoints({x: p0.canvasx, y: p0.canvasy},
+ {x: p1.canvasx, y: p1.canvasy},
+ p2 && {x: p2.canvasx, y: p2.canvasy},
+ smoothPlotter.smoothing);
+ // Uncomment to show the control points:
+ // ctx.lineTo(lastRightX, lastRightY);
+ // ctx.lineTo(controls[0], controls[1]);
+ // ctx.lineTo(p1.canvasx, p1.canvasy);
+ lastRightX = (lastRightX !== null) ? lastRightX : p0.canvasx;
+ lastRightY = (lastRightY !== null) ? lastRightY : p0.canvasy;
+ ctx.bezierCurveTo(lastRightX, lastRightY,
+ controls[0], controls[1],
+ p1.canvasx, p1.canvasy);
+ lastRightX = controls[2];
+ lastRightY = controls[3];
+ } else if (p1) {
+ // We're starting again after a missing point.
+ ctx.moveTo(p1.canvasx, p1.canvasy);
+ lastRightX = p1.canvasx;
+ lastRightY = p1.canvasy;
+ } else {
+ lastRightX = lastRightY = null;
+ }
+ }
+
+ ctx.stroke();
+}
+smoothPlotter.smoothing = 1/3;
+smoothPlotter._getControlPoints = getControlPoints; // for testing
+
+// older versions exported a global.
+// This will be removed in the future.
+// The preferred way to access smoothPlotter is via Dygraph.smoothPlotter.
+window.smoothPlotter = smoothPlotter;
+Dygraph.smoothPlotter = smoothPlotter;
+
+})();
diff --git a/web/gui/lib/fontawesome-all-5.0.1.min.js b/web/gui/lib/fontawesome-all-5.0.1.min.js
new file mode 100644
index 0000000..d2775aa
--- /dev/null
+++ b/web/gui/lib/fontawesome-all-5.0.1.min.js
@@ -0,0 +1,6 @@
+/*!
+ * Font Awesome Free 5.0.1 by @fontawesome - http://fontawesome.com
+ * License - http://fontawesome.com/license (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
+ * SPDX-License-Identifier: MIT
+ */
+!function(){"use strict";function c(c){"function"==typeof s.hooks.addPack?s.hooks.addPack(c,m):s.styles[c]=r({},s.styles[c]||{},m)}var l={};try{"undefined"!=typeof window&&(l=window)}catch(c){}var h=(l.navigator||{}).userAgent,v=void 0===h?"":h,z=l,e=(~v.indexOf("MSIE")||v.indexOf("Trident/"),[1,2,3,4,5,6,7,8,9,10]),a=e.concat([11,12,13,14,15,16,17,18,19,20]),m=(["xs","sm","lg","fw","ul","li","border","pull-left","pull-right","spin","pulse","rotate-90","rotate-180","rotate-270","flip-horizontal","flip-vertical","stack","stack-1x","stack-2x","inverse","layers","layers-text","layers-counter"].concat(e.map(function(c){return c+"x"})).concat(a.map(function(c){return"w-"+c})),{"500px":[448,512,[],"f26e","M103.3 344.3c-6.5-14.2-6.9-18.3 7.4-23.1 25.6-8 8 9.2 43.2 49.2h.3v-93.9c1.2-50.2 44-92.2 97.7-92.2 53.9 0 97.7 43.5 97.7 96.8 0 63.4-60.8 113.2-128.5 93.3-10.5-4.2-2.1-31.7 8.5-28.6 53 0 89.4-10.1 89.4-64.4 0-61-77.1-89.6-116.9-44.6-23.5 26.4-17.6 42.1-17.6 157.6 50.7 31 118.3 22 160.4-20.1 24.8-24.8 38.5-58 38.5-93 0-35.2-13.8-68.2-38.8-93.3-24.8-24.8-57.8-38.5-93.3-38.5s-68.8 13.8-93.5 38.5c-.3.3-16 16.5-21.2 23.9l-.5.6c-3.3 4.7-6.3 9.1-20.1 6.1-6.9-1.7-14.3-5.8-14.3-11.8V20c0-5 3.9-10.5 10.5-10.5h241.3c8.3 0 8.3 11.6 8.3 15.1 0 3.9 0 15.1-8.3 15.1H130.3v132.9h.3c104.2-109.8 282.8-36 282.8 108.9 0 178.1-244.8 220.3-310.1 62.8zm63.3-260.8c-.5 4.2 4.6 24.5 14.6 20.6C306 56.6 384 144.5 390.6 144.5c4.8 0 22.8-15.3 14.3-22.8-93.2-89-234.5-57-238.3-38.2zM393 414.7C283 524.6 94 475.5 61 310.5c0-12.2-30.4-7.4-28.9 3.3 24 173.4 246 256.9 381.6 121.3 6.9-7.8-12.6-28.4-20.7-20.4zM213.6 306.6c0 4 4.3 7.3 5.5 8.5 3 3 6.1 4.4 8.5 4.4 3.8 0 2.6.2 22.3-19.5 19.6 19.3 19.1 19.5 22.3 19.5 5.4 0 18.5-10.4 10.7-18.2L265.6 284l18.2-18.2c6.3-6.8-10.1-21.8-16.2-15.7L249.7 268c-18.6-18.8-18.4-19.5-21.5-19.5-5 0-18 11.7-12.4 17.3L234 284c-18.1 17.9-20.4 19.2-20.4 22.6z"],"accessible-icon":[448,512,[],"f368","M423.9 255.8L411 413.1c-3.3 40.7-63.9 35.1-60.6-4.9l10-122.5-41.1 2.3c10.1 20.7 15.8 43.9 15.8 68.5 0 41.2-16.1 78.7-42.3 106.5l-39.3-39.3c57.9-63.7 13.1-167.2-74-167.2-25.9 0-49.5 9.9-67.2 26L73 243.2c22-20.7 50.1-35.1 81.4-40.2l75.3-85.7-42.6-24.8-51.6 46c-30 26.8-70.6-18.5-40.5-45.4l68-60.7c9.8-8.8 24.1-10.2 35.5-3.6 0 0 139.3 80.9 139.5 81.1 16.2 10.1 20.7 36 6.1 52.6L285.7 229l106.1-5.9c18.5-1.1 33.6 14.4 32.1 32.7zm-64.9-154c28.1 0 50.9-22.8 50.9-50.9C409.9 22.8 387.1 0 359 0c-28.1 0-50.9 22.8-50.9 50.9 0 28.1 22.8 50.9 50.9 50.9zM179.6 456.5c-80.6 0-127.4-90.6-82.7-156.1l-39.7-39.7C36.4 287 24 320.3 24 356.4c0 130.7 150.7 201.4 251.4 122.5l-39.7-39.7c-16 10.9-35.3 17.3-56.1 17.3z"],accusoft:[640,512,[],"f369","M482.2 372.1C476.5 365.2 250 75 242.3 65.5c-13.7-17.2 0-16.8 19.2-16.9 9.7-.1 106.3-.6 116.5-.6 24.1-.1 28.7.6 38.4 12.8 2.1 2.7 205.1 245.8 207.2 248.3 5.5 6.7 15.2 19.1 7.2 23.4-2.4 1.3-114.6 47.7-117.8 48.9-10.1 4-17.5 6.8-30.8-9.3m114.7-5.6s-115 50.4-117.5 51.6c-16 7.3-26.9-3.2-36.7-14.6l-57.1-74c-5.4-.9-60.4-9.6-65.3-9.3-3.1.2-9.6.8-14.4 2.9-4.9 2.1-145.2 52.8-150.2 54.7-5.1 2-11.4 3.6-11.1 7.6.2 2.5 2 2.6 4.6 3.5 2.7.8 300.9 67.6 308 69.1 15.6 3.3 38.5 10.5 53.6 1.7 2.1-1.2 123.8-76.4 125.8-77.8 5.4-4 4.3-6.8-1.7-8.2-2.3-.3-24.6-4.7-38-7.2m-326-181.3s-12 1.6-25 15.1c-9 9.3-242.1 239.1-243.4 240.9-7 10 1.6 6.8 15.7 1.7.8 0 114.5-36.6 114.5-36.6.5-.6-.1-.1.6-.6-.4-5.1-.8-26.2-1-27.7-.6-5.2 2.2-6.9 7-8.9l92.6-33.8c.6-.8 88.5-81.7 90.2-83.3v-1l-51.2-65.8"],adn:[496,512,[],"f170","M248 167.5l64.9 98.8H183.1l64.9-98.8zM496 256c0 136.9-111.1 248-248 248S0 392.9 0 256 111.1 8 248 8s248 111.1 248 248zm-99.8 82.7L248 115.5 99.8 338.7h30.4l33.6-51.7h168.6l33.6 51.7h30.2z"],adversal:[512,512,[],"f36a","M482.1 32H28.7C5.8 32 0 37.9 0 60.9v390.2C0 474.4 5.8 480 28.7 480h453.4c24.4 0 29.9-5.2 29.9-29.7V62.2c0-24.6-5.4-30.2-29.9-30.2zM178.4 220.3c-27.5-20.2-72.1-8.7-84.2 23.4-4.3 11.1-9.3 9.5-17.5 8.3-9.7-1.5-17.2-3.2-22.5-5.5-28.8-11.4 8.6-55.3 24.9-64.3 41.1-21.4 83.4-22.2 125.3-4.8 40.9 16.8 34.5 59.2 34.5 128.5 2.7 25.8-4.3 58.3 9.3 88.8 1.9 4.4.4 7.9-2.7 10.7-8.4 6.7-39.3 2.2-46.6-7.4-1.9-2.2-1.8-3.6-3.9-6.2-3.6-3.9-7.3-2.2-11.9 1-57.4 36.4-140.3 21.4-147-43.3-3.1-29.3 12.4-57.1 39.6-71 38.2-19.5 112.2-11.8 114-30.9 1.1-10.2-1.9-20.1-11.3-27.3zm286.7 222c0 15.1-11.1 9.9-17.8 9.9H52.4c-7.4 0-18.2 4.8-17.8-10.7.4-13.9 10.5-9.1 17.1-9.1 132.3-.4 264.5-.4 396.8 0 6.8 0 16.6-4.4 16.6 9.9zm3.8-340.5v291c0 5.7-.7 13.9-8.1 13.9-12.4-.4-27.5 7.1-36.1-5.6-5.8-8.7-7.8-4-12.4-1.2-53.4 29.7-128.1 7.1-144.4-85.2-6.1-33.4-.7-67.1 15.7-100 11.8-23.9 56.9-76.1 136.1-30.5v-71c0-26.2-.1-26.2 26-26.2 3.1 0 6.6.4 9.7 0 10.1-.8 13.6 4.4 13.6 14.3-.1.2-.1.3-.1.5zm-51.5 232.3c-19.5 47.6-72.9 43.3-90 5.2-15.1-33.3-15.5-68.2.4-101.5 16.3-34.1 59.7-35.7 81.5-4.8 20.6 28.8 14.9 84.6 8.1 101.1zm-294.8 35.3c-7.5-1.3-33-3.3-33.7-27.8-.4-13.9 7.8-23 19.8-25.8 24.4-5.9 49.3-9.9 73.7-14.7 8.9-2 7.4 4.4 7.8 9.5 1.4 33-26.1 59.2-67.6 58.8z"],affiliatetheme:[512,512,[],"f36b","M159.7 237.4C108.4 308.3 43.1 348.2 14 326.6-15.2 304.9 2.8 230 54.2 159.1c51.3-70.9 116.6-110.8 145.7-89.2 29.1 21.6 11.1 96.6-40.2 167.5zm351.2-57.3C437.1 303.5 319 367.8 246.4 323.7c-25-15.2-41.3-41.2-49-73.8-33.6 64.8-92.8 113.8-164.1 133.2 49.8 59.3 124.1 96.9 207 96.9 150 0 271.6-123.1 271.6-274.9.1-8.5-.3-16.8-1-25z"],algolia:[448,512,[],"f36c","M229.3 182.6c-49.3 0-89.2 39.9-89.2 89.2 0 49.3 39.9 89.2 89.2 89.2s89.2-39.9 89.2-89.2c0-49.3-40-89.2-89.2-89.2zm62.7 56.6l-58.9 30.6c-1.8.9-3.8-.4-3.8-2.3V201c0-1.5 1.3-2.7 2.7-2.6 26.2 1 48.9 15.7 61.1 37.1.7 1.3.2 3-1.1 3.7zM389.1 32H58.9C26.4 32 0 58.4 0 90.9V421c0 32.6 26.4 59 58.9 59H389c32.6 0 58.9-26.4 58.9-58.9V90.9C448 58.4 421.6 32 389.1 32zm-202.6 84.7c0-10.8 8.7-19.5 19.5-19.5h45.3c10.8 0 19.5 8.7 19.5 19.5v15.4c0 1.8-1.7 3-3.3 2.5-12.3-3.4-25.1-5.1-38.1-5.1-13.5 0-26.7 1.8-39.4 5.5-1.7.5-3.4-.8-3.4-2.5v-15.8zm-84.4 37l9.2-9.2c7.6-7.6 19.9-7.6 27.5 0l7.7 7.7c1.1 1.1 1 3-.3 4-6.2 4.5-12.1 9.4-17.6 14.9-5.4 5.4-10.4 11.3-14.8 17.4-1 1.3-2.9 1.5-4 .3l-7.7-7.7c-7.6-7.5-7.6-19.8 0-27.4zm127.2 244.8c-70 0-126.6-56.7-126.6-126.6s56.7-126.6 126.6-126.6c70 0 126.6 56.6 126.6 126.6 0 69.8-56.7 126.6-126.6 126.6z"],amazon:[448,512,[],"f270","M257.2 162.7c-48.7 1.8-169.5 15.5-169.5 117.5 0 109.5 138.3 114 183.5 43.2 6.5 10.2 35.4 37.5 45.3 46.8l56.8-56S341 288.9 341 261.4V114.3C341 89 316.5 32 228.7 32 140.7 32 94 87 94 136.3l73.5 6.8c16.3-49.5 54.2-49.5 54.2-49.5 40.7-.1 35.5 29.8 35.5 69.1zm0 86.8c0 80-84.2 68-84.2 17.2 0-47.2 50.5-56.7 84.2-57.8v40.6zm136 163.5c-7.7 10-70 67-174.5 67S34.2 408.5 9.7 379c-6.8-7.7 1-11.3 5.5-8.3C88.5 415.2 203 488.5 387.7 401c7.5-3.7 13.3 2 5.5 12zm39.8 2.2c-6.5 15.8-16 26.8-21.2 31-5.5 4.5-9.5 2.7-6.5-3.8s19.3-46.5 12.7-55c-6.5-8.3-37-4.3-48-3.2-10.8 1-13 2-14-.3-2.3-5.7 21.7-15.5 37.5-17.5 15.7-1.8 41-.8 46 5.7 3.7 5.1 0 27.1-6.5 43.1z"],amilia:[448,512,[],"f36d","M240.1 32c-61.9 0-131.5 16.9-184.2 55.4-5.1 3.1-9.1 9.2-7.2 19.4 1.1 5.1 5.1 27.4 10.2 39.6 4.1 10.2 14.2 10.2 20.3 6.1 32.5-22.3 96.5-47.7 152.3-47.7 57.9 0 58.9 28.4 58.9 73.1v38.5C203 227.7 78.2 251 46.7 264.2 11.2 280.5 16.3 357.7 16.3 376s15.2 104 124.9 104c47.8 0 113.7-20.7 153.3-42.1v25.4c0 3 2.1 8.2 6.1 9.1 3.1 1 50.7 2 59.9 2s62.5.3 66.5-.7c4.1-1 5.1-6.1 5.1-9.1V168c-.1-80.3-57.9-136-192-136zm-87.9 327.7c0-12.2-3-42.7 18.3-52.9 24.3-13.2 75.1-29.4 119.8-33.5V380c-21.4 13.2-48.7 24.4-79.1 24.4-52.8 0-58.9-33.5-59-44.7"],android:[448,512,[],"f17b","M89.6 204.5v115.8c0 15.4-12.1 27.7-27.5 27.7-15.3 0-30.1-12.4-30.1-27.7V204.5c0-15.1 14.8-27.5 30.1-27.5 15.1 0 27.5 12.4 27.5 27.5zm10.8 157c0 16.4 13.2 29.6 29.6 29.6h19.9l.3 61.1c0 36.9 55.2 36.6 55.2 0v-61.1h37.2v61.1c0 36.7 55.5 36.8 55.5 0v-61.1h20.2c16.2 0 29.4-13.2 29.4-29.6V182.1H100.4v179.4zm248-189.1H99.3c0-42.8 25.6-80 63.6-99.4l-19.1-35.3c-2.8-4.9 4.3-8 6.7-3.8l19.4 35.6c34.9-15.5 75-14.7 108.3 0L297.5 34c2.5-4.3 9.5-1.1 6.7 3.8L285.1 73c37.7 19.4 63.3 56.6 63.3 99.4zm-170.7-55.5c0-5.7-4.6-10.5-10.5-10.5-5.7 0-10.2 4.8-10.2 10.5s4.6 10.5 10.2 10.5c5.9 0 10.5-4.8 10.5-10.5zm113.4 0c0-5.7-4.6-10.5-10.2-10.5-5.9 0-10.5 4.8-10.5 10.5s4.6 10.5 10.5 10.5c5.6 0 10.2-4.8 10.2-10.5zm94.8 60.1c-15.1 0-27.5 12.1-27.5 27.5v115.8c0 15.4 12.4 27.7 27.5 27.7 15.4 0 30.1-12.4 30.1-27.7V204.5c0-15.4-14.8-27.5-30.1-27.5z"],angellist:[448,512,[],"f209","M347.1 215.4c11.7-32.6 45.4-126.9 45.4-157.1 0-26.6-15.7-48.9-43.7-48.9-44.6 0-84.6 131.7-97.1 163.1C242 144 196.6 0 156.6 0c-31.1 0-45.7 22.9-45.7 51.7 0 35.3 34.2 126.8 46.6 162-6.3-2.3-13.1-4.3-20-4.3-23.4 0-48.3 29.1-48.3 52.6 0 8.9 4.9 21.4 8 29.7-36.9 10-51.1 34.6-51.1 71.7C46 435.6 114.4 512 210.6 512c118 0 191.4-88.6 191.4-202.9 0-43.1-6.9-82-54.9-93.7zM311.7 108c4-12.3 21.1-64.3 37.1-64.3 8.6 0 10.9 8.9 10.9 16 0 19.1-38.6 124.6-47.1 148l-34-6 33.1-93.7zM142.3 48.3c0-11.9 14.5-45.7 46.3 47.1l34.6 100.3c-15.6-1.3-27.7-3-35.4 1.4-10.9-28.8-45.5-119.7-45.5-148.8zM140 244c29.3 0 67.1 94.6 67.1 107.4 0 5.1-4.9 11.4-10.6 11.4-20.9 0-76.9-76.9-76.9-97.7.1-7.7 12.7-21.1 20.4-21.1zm184.3 186.3c-29.1 32-66.3 48.6-109.7 48.6-59.4 0-106.3-32.6-128.9-88.3-17.1-43.4 3.8-68.3 20.6-68.3 11.4 0 54.3 60.3 54.3 73.1 0 4.9-7.7 8.3-11.7 8.3-16.1 0-22.4-15.5-51.1-51.4-29.7 29.7 20.5 86.9 58.3 86.9 26.1 0 43.1-24.2 38-42 3.7 0 8.3.3 11.7-.6 1.1 27.1 9.1 59.4 41.7 61.7 0-.9 2-7.1 2-7.4 0-17.4-10.6-32.6-10.6-50.3 0-28.3 21.7-55.7 43.7-71.7 8-6 17.7-9.7 27.1-13.1 9.7-3.7 20-8 27.4-15.4-1.1-11.2-5.7-21.1-16.9-21.1-27.7 0-120.6 4-120.6-39.7 0-6.7.1-13.1 17.4-13.1 32.3 0 114.3 8 138.3 29.1 18.1 16.1 24.3 113.2-31 174.7zm-98.6-126c9.7 3.1 19.7 4 29.7 6-7.4 5.4-14 12-20.3 19.1-2.8-8.5-6.2-16.8-9.4-25.1z"],angrycreative:[640,512,[],"f36e","M640 238.2l-3.2 28.2-34.5 2.3-2 18.1 34.5-2.3-3.2 28.2-34.4 2.2-2.3 20.1 34.4-2.2-3 26.1-64.7 4.1 12.7-113.2L527 365.2l-31.9 2-23.8-117.8 30.3-2 13.6 79.4 31.7-82.4 93.1-6.2zM426.8 371.5l28.3-1.8L468 249.6l-28.4 1.9-12.8 120zM162 388.1l-19.4-36-3.5 37.4-28.2 1.7 2.7-29.1c-11 18-32 34.3-56.9 35.8C23.9 399.9-3 377 .3 339.7c2.6-29.3 26.7-62.8 67.5-65.4 37.7-2.4 47.6 23.2 51.3 28.8l2.8-30.8 38.9-2.5c20.1-1.3 38.7 3.7 42.5 23.7l2.6-26.6 64.8-4.2-2.7 27.9-36.4 2.4-1.7 17.9 36.4-2.3-2.7 27.9-36.4 2.3-1.9 19.9 36.3-2.3-2.1 20.8 55-117.2 23.8-1.6L370.4 369l8.9-85.6-22.3 1.4 2.9-27.9 75-4.9-3 28-24.3 1.6-9.7 91.9-58 3.7-4.3-15.6-39.4 2.5-8 16.3-126.2 7.7zm-44.3-70.2l-26.4 1.7C84.6 307.2 76.9 303 65 303.8c-19 1.2-33.3 17.5-34.6 33.3-1.4 16 7.3 32.5 28.7 31.2 12.8-.8 21.3-8.6 28.9-18.9l27-1.7 2.7-29.8zm56.1-7.7c1.2-12.9-7.6-13.6-26.1-12.4l-2.7 28.5c14.2-.9 27.5-2.1 28.8-16.1zm21.1 70.8l5.8-60c-5 13.5-14.7 21.1-27.9 26.6l22.1 33.4zm135.4-45l-7.9-37.8-15.8 39.3 23.7-1.5zm-170.1-74.6l-4.3-17.5-39.6 2.6-8.1 18.2-31.9 2.1 57-121.9 23.9-1.6 30.7 102 9.9-104.7 27-1.8 37.8 63.6 6.5-66.6 28.5-1.9-4 41.2c7.4-13.5 22.9-44.7 63.6-47.5 40.5-2.8 52.4 29.3 53.4 30.3l3.3-32 39.3-2.7c12.7-.9 27.8.3 36.3 9.7l-4.4-11.9 32.2-2.2 12.9 43.2 23-45.7 31-2.2-43.6 78.4-4.8 44.3-28.4 1.9 4.8-44.3-15.8-43c1 22.3-9.2 40.1-32 49.6l25.2 38.8-36.4 2.4-19.2-36.8-4 38.3-28.4 1.9 3.3-31.5c-6.7 9.3-19.7 35.4-59.6 38-26.2 1.7-45.6-10.3-55.4-39.2l-4 40.3-25 1.6-37.6-63.3-6.3 66.2-56.8 3.7zm276.6-82.1c10.2-.7 17.5-2.1 21.6-4.3 4.5-2.4 7-6.4 7.6-12.1.6-5.3-.6-8.8-3.4-10.4-3.6-2.1-10.6-2.8-22.9-2l-2.9 28.8zM327.7 214c5.6 5.9 12.7 8.5 21.3 7.9 4.7-.3 9.1-1.8 13.3-4.1 5.5-3 10.6-8 15.1-14.3l-34.2 2.3 2.4-23.9 63.1-4.3 1.2-12-31.2 2.1c-4.1-3.7-7.8-6.6-11.1-8.1-4-1.7-8.1-2.8-12.2-2.5-8 .5-15.3 3.6-22 9.2-7.7 6.4-12 14.5-12.9 24.4-1.1 9.6 1.4 17.3 7.2 23.3zm-201.3 8.2l23.8-1.6-8.3-37.6-15.5 39.2z"],angular:[415,512,[],"f420","M169.7 268.1h76.2l-38.1-91.6-38.1 91.6zM207.8 32L0 106.4l31.8 275.7 176 97.9 176-97.9 31.8-275.7L207.8 32zM338 373.8h-48.6l-26.2-65.4H152.6l-26.2 65.4H77.7L207.8 81.5 338 373.8z"],"app-store":[512,512,[],"f36f","M255.9 120.9l9.1-15.7c5.6-9.8 18.1-13.1 27.9-7.5 9.8 5.6 13.1 18.1 7.5 27.9l-87.5 151.5h63.3c20.5 0 32 24.1 23.1 40.8H113.8c-11.3 0-20.4-9.1-20.4-20.4 0-11.3 9.1-20.4 20.4-20.4h52l66.6-115.4-20.8-36.1c-5.6-9.8-2.3-22.2 7.5-27.9 9.8-5.6 22.2-2.3 27.9 7.5l8.9 15.7zm-78.7 218l-19.6 34c-5.6 9.8-18.1 13.1-27.9 7.5-9.8-5.6-13.1-18.1-7.5-27.9l14.6-25.2c16.4-5.1 29.8-1.2 40.4 11.6zm168.9-61.7h53.1c11.3 0 20.4 9.1 20.4 20.4 0 11.3-9.1 20.4-20.4 20.4h-29.5l19.9 34.5c5.6 9.8 2.3 22.2-7.5 27.9-9.8 5.6-22.2 2.3-27.9-7.5-33.5-58.1-58.7-101.6-75.4-130.6-17.1-29.5-4.9-59.1 7.2-69.1 13.4 23 33.4 57.7 60.1 104zM256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm216 248c0 118.7-96.1 216-216 216-118.7 0-216-96.1-216-216 0-118.7 96.1-216 216-216 118.7 0 216 96.1 216 216z"],"app-store-ios":[448,512,[],"f370","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM127 384.5c-5.5 9.6-17.8 12.8-27.3 7.3-9.6-5.5-12.8-17.8-7.3-27.3l14.3-24.7c16.1-4.9 29.3-1.1 39.6 11.4L127 384.5zm138.9-53.9H84c-11 0-20-9-20-20s9-20 20-20h51l65.4-113.2-20.5-35.4c-5.5-9.6-2.2-21.8 7.3-27.3 9.6-5.5 21.8-2.2 27.3 7.3l8.9 15.4 8.9-15.4c5.5-9.6 17.8-12.8 27.3-7.3 9.6 5.5 12.8 17.8 7.3 27.3l-85.8 148.6h62.1c20.2 0 31.5 23.7 22.7 40zm98.1 0h-29l19.6 33.9c5.5 9.6 2.2 21.8-7.3 27.3-9.6 5.5-21.8 2.2-27.3-7.3-32.9-56.9-57.5-99.7-74-128.1-16.7-29-4.8-58 7.1-67.8 13.1 22.7 32.7 56.7 58.9 102h52c11 0 20 9 20 20 0 11.1-9 20-20 20z"],apper:[640,512,[],"f371","M42.1 239.1c22.2 0 29 2.8 33.5 14.6h.8v-22.9c0-11.3-4.8-15.4-17.9-15.4-11.3 0-14.4 2.5-15.1 12.8H4.8c.3-13.9 1.5-19.1 5.8-24.4C17.9 195 29.5 192 56.7 192c33 0 47.1 5 53.9 18.9 2 4.3 4 15.6 4 23.7v76.3H76.3l1.3-19.1h-1c-5.3 15.6-13.6 20.4-35.5 20.4-30.3 0-41.1-10.1-41.1-37.3 0-25.2 12.3-35.8 42.1-35.8zm17.1 48.1c13.1 0 16.9-3 16.9-13.4 0-9.1-4.3-11.6-19.6-11.6-13.1 0-17.9 3-17.9 12.1-.1 10.4 3.7 12.9 20.6 12.9zm77.8-94.9h38.3l-1.5 20.6h.8c9.1-17.1 15.9-20.9 37.5-20.9 14.4 0 24.7 3 31.5 9.1 9.8 8.6 12.8 20.4 12.8 48.1 0 30-3 43.1-12.1 52.9-6.8 7.3-16.4 10.1-33.2 10.1-20.4 0-29.2-5.5-33.8-21.2h-.8v70.3H137v-169zm80.9 60.7c0-27.5-3.3-32.5-20.7-32.5-16.9 0-20.7 5-20.7 28.7 0 28 3.5 33.5 21.2 33.5 16.4 0 20.2-5.6 20.2-29.7zm57.9-60.7h38.3l-1.5 20.6h.8c9.1-17.1 15.9-20.9 37.5-20.9 14.4 0 24.7 3 31.5 9.1 9.8 8.6 12.8 20.4 12.8 48.1 0 30-3 43.1-12.1 52.9-6.8 7.3-16.4 10.1-33.3 10.1-20.4 0-29.2-5.5-33.8-21.2h-.8v70.3h-39.5v-169zm80.9 60.7c0-27.5-3.3-32.5-20.7-32.5-16.9 0-20.7 5-20.7 28.7 0 28 3.5 33.5 21.2 33.5 16.4 0 20.2-5.6 20.2-29.7zm53.8-3.8c0-25.4 3.3-37.8 12.3-45.8 8.8-8.1 22.2-11.3 45.1-11.3 42.8 0 55.7 12.8 55.7 55.7v11.1h-75.3c-.3 2-.3 4-.3 4.8 0 16.9 4.5 21.9 20.1 21.9 13.9 0 17.9-3 17.9-13.9h37.5v2.3c0 9.8-2.5 18.9-6.8 24.7-7.3 9.8-19.6 13.6-44.3 13.6-27.5 0-41.6-3.3-50.6-12.3-8.5-8.5-11.3-21.3-11.3-50.8zm76.4-11.6c-.3-1.8-.3-3.3-.3-3.8 0-12.3-3.3-14.6-19.6-14.6-14.4 0-17.1 3-18.1 15.1l-.3 3.3h38.3zm55.6-45.3h38.3l-1.8 19.9h.7c6.8-14.9 14.4-20.2 29.7-20.2 10.8 0 19.1 3.3 23.4 9.3 5.3 7.3 6.8 14.4 6.8 34 0 1.5 0 5 .2 9.3h-35c.3-1.8.3-3.3.3-4 0-15.4-2-19.4-10.3-19.4-6.3 0-10.8 3.3-13.1 9.3-1 3-1 4.3-1 12.3v68h-38.3V192.3z"],apple:[448,512,[],"f179","M247.2 137.6c-6.2 1.9-15.3 3.5-27.9 4.6 1.1-56.7 29.9-96.6 88-110.1 9.3 41.6-26.1 94.1-60.1 105.5zm121.3 72.7c6.4-9.4 16.6-19.9 30.6-31.7-22.3-27.6-48.1-44.3-85.1-44.3-35.4 0-65.2 18.2-87 18.2-18.5 0-51.9-16.1-84.5-16.1-69.6 0-106.5 68.1-106.5 139C36 354.2 95.7 480 156.2 480c23.8 0 45.2-18 73.5-18 29.3 0 52.8 17.2 80.3 17.2 46 0 88.6-77.5 102-119.7-46.8-14.3-84.4-90.2-43.5-149.2z"],"apple-pay":[640,512,[],"f415","M116.9 158.5c-7.5 8.9-19.5 15.9-31.5 14.9-1.5-12 4.4-24.8 11.3-32.6 7.5-9.1 20.6-15.6 31.3-16.1 1.2 12.4-3.7 24.7-11.1 33.8m10.9 17.2c-17.4-1-32.3 9.9-40.5 9.9-8.4 0-21-9.4-34.8-9.1-17.9.3-34.5 10.4-43.6 26.5-18.8 32.3-4.9 80 13.3 106.3 8.9 13 19.5 27.3 33.5 26.8 13.3-.5 18.5-8.6 34.5-8.6 16.1 0 20.8 8.6 34.8 8.4 14.5-.3 23.6-13 32.5-26 10.1-14.8 14.3-29.1 14.5-29.9-.3-.3-28-10.9-28.3-42.9-.3-26.8 21.9-39.5 22.9-40.3-12.5-18.6-32-20.6-38.8-21.1m100.4-36.2v194.9h30.3v-66.6h41.9c38.3 0 65.1-26.3 65.1-64.3s-26.4-64-64.1-64h-73.2zm30.3 25.5h34.9c26.3 0 41.3 14 41.3 38.6s-15 38.8-41.4 38.8h-34.8V165zm162.2 170.9c19 0 36.6-9.6 44.6-24.9h.6v23.4h28v-97c0-28.1-22.5-46.3-57.1-46.3-32.1 0-55.9 18.4-56.8 43.6h27.3c2.3-12 13.4-19.9 28.6-19.9 18.5 0 28.9 8.6 28.9 24.5v10.8l-37.8 2.3c-35.1 2.1-54.1 16.5-54.1 41.5.1 25.2 19.7 42 47.8 42zm8.2-23.1c-16.1 0-26.4-7.8-26.4-19.6 0-12.3 9.9-19.4 28.8-20.5l33.6-2.1v11c0 18.2-15.5 31.2-36 31.2zm102.5 74.6c29.5 0 43.4-11.3 55.5-45.4L640 193h-30.8l-35.6 115.1h-.6L537.4 193h-31.6L557 334.9l-2.8 8.6c-4.6 14.6-12.1 20.3-25.5 20.3-2.4 0-7-.3-8.9-.5v23.4c1.8.4 9.3.7 11.6.7z"],asymmetrik:[576,512,[],"f372","M517.5 309.2c38.8-40 58.1-80 58.5-116.1.8-65.5-59.4-118.2-169.4-135C277.9 38.4 118.1 73.6 0 140.5 52 114 110.6 92.3 170.7 82.3c74.5-20.5 153-25.4 221.3-14.8C544.5 91.3 588.8 195 490.8 299.2c-10.2 10.8-22 21.1-35 30.6L304.9 103.4 114.7 388.9c-65.6-29.4-76.5-90.2-19.1-151.2 20.8-22.2 48.3-41.9 79.5-58.1 20-12.2 39.7-22.6 62-30.7-65.1 20.3-122.7 52.9-161.6 92.9-27.7 28.6-41.4 57.1-41.7 82.9-.5 35.1 23.4 65.1 68.4 83l-34.5 51.7h101.6l22-34.4c22.2 1 45.3 0 68.6-2.7l-22.8 37.1h135.5L340 406.3c18.6-5.3 36.9-11.5 54.5-18.7l45.9 71.8H542L468.6 349c18.5-12.1 35-25.5 48.9-39.8zm-187.6 80.5l-25-40.6-32.7 53.3c-23.4 3.5-46.7 5.1-69.2 4.4l101.9-159.3 78.7 123c-17.2 7.4-35.3 13.9-53.7 19.2z"],audible:[640,512,[],"f373","M640 199.9v54l-320 200L0 254v-54l320 200 320-200.1zm-194.5 72l47.1-29.4c-37.2-55.8-100.7-92.6-172.7-92.6-72 0-135.5 36.7-172.6 92.4h.3c2.5-2.3 5.1-4.5 7.7-6.7 89.7-74.4 219.4-58.1 290.2 36.3zm-220.1 18.8c16.9-11.9 36.5-18.7 57.4-18.7 34.4 0 65.2 18.4 86.4 47.6l45.4-28.4c-20.9-29.9-55.6-49.5-94.8-49.5-38.9 0-73.4 19.4-94.4 49zM103.6 161.1c131.8-104.3 318.2-76.4 417.5 62.1l.7 1 48.8-30.4C517.1 112.1 424.8 58.1 319.9 58.1c-103.5 0-196.6 53.5-250.5 135.6 9.9-10.5 22.7-23.5 34.2-32.6zm467 32.7z"],autoprefixer:[640,512,[],"f41c","M318.4 16l-161 480h77.5l25.4-81.4h119.5L405 496h77.5L318.4 16zm-40.3 341.9l41.2-130.4h1.5l40.9 130.4h-83.6zM640 405l-10-31.4L462.1 358l19.4 56.5L640 405zm-462.1-47L10 373.7 0 405l158.5 9.4 19.4-56.4z"],avianex:[512,512,[],"f374","M453.1 32h-312c-38.9 0-76.2 31.2-83.3 69.7L1.2 410.3C-5.9 448.8 19.9 480 58.9 480h312c38.9 0 76.2-31.2 83.3-69.7l56.7-308.5c7-38.6-18.8-69.8-57.8-69.8zm-58.2 347.3l-32 13.5-115.4-110c-14.7 10-29.2 19.5-41.7 27.1l22.1 64.2-17.9 12.7-40.6-61-52.4-48.1 15.7-15.4 58 31.1c9.3-10.5 20.8-22.6 32.8-34.9L203 228.9l-68.8-99.8 18.8-28.9 8.9-4.8L265 207.8l4.9 4.5c19.4-18.8 33.8-32.4 33.8-32.4 7.7-6.5 21.5-2.9 30.7 7.9 9 10.5 10.6 24.7 2.7 31.3-1.8 1.3-15.5 11.4-35.3 25.6l4.5 7.3 94.9 119.4-6.3 7.9z"],aviato:[640,512,[],"f421","M107.2 283.5l-19-41.8H36.1l-19 41.8H0l62.2-131.4 62.2 131.4h-17.2zm-45-98.1l-19.6 42.5h39.2l-19.6-42.5zm112.7 102.4l-62.2-131.4h17.1l45.1 96 45.1-96h17l-62.1 131.4zm80.6-4.3V156.4H271v127.1h-15.5zm209.1-115.6v115.6h-17.3V167.9h-41.2v-11.5h99.6v11.5h-41.1zM640 218.8c0 9.2-1.7 17.8-5.1 25.8-3.4 8-8.2 15.1-14.2 21.1-6 6-13.1 10.8-21.1 14.2-8 3.4-16.6 5.1-25.8 5.1s-17.8-1.7-25.8-5.1c-8-3.4-15.1-8.2-21.1-14.2-6-6-10.8-13-14.2-21.1-3.4-8-5.1-16.6-5.1-25.8s1.7-17.8 5.1-25.8c3.4-8 8.2-15.1 14.2-21.1 6-6 13-8.4 21.1-11.9 8-3.4 16.6-5.1 25.8-5.1s17.8 1.7 25.8 5.1c8 3.4 15.1 5.8 21.1 11.9 6 6 10.7 13.1 14.2 21.1 3.4 8 5.1 16.6 5.1 25.8zm-15.5 0c0-7.3-1.3-14-3.9-20.3-2.6-6.3-6.2-11.7-10.8-16.3-4.6-4.6-10-8.2-16.2-10.9-6.2-2.7-12.8-4-19.8-4s-13.6 1.3-19.8 4c-6.2 2.7-11.6 6.3-16.2 10.9-4.6 4.6-8.2 10-10.8 16.3-2.6 6.3-3.9 13.1-3.9 20.3 0 7.3 1.3 14 3.9 20.3 2.6 6.3 6.2 11.7 10.8 16.3 4.6 4.6 10 8.2 16.2 10.9 6.2 2.7 12.8 4 19.8 4s13.6-1.3 19.8-4c6.2-2.7 11.6-6.3 16.2-10.9 4.6-4.6 8.2-10 10.8-16.3 2.6-6.3 3.9-13.1 3.9-20.3zm-94.8 96.7v-6.3l88.9-10-242.9 13.4c.6-2.2 1.1-4.6 1.4-7.2.3-2 .5-4.2.6-6.5l64.8-8.1-64.9 1.9c0-.4-.1-.7-.1-1.1-2.8-17.2-25.5-23.7-25.5-23.7l-1.1-26.3h23.8l19 41.8h17.1L348.6 152l-62.2 131.4h17.1l19-41.8h23.6L345 268s-22.7 6.5-25.5 23.7c-.1.3-.1.7-.1 1.1l-64.9-1.9 64.8 8.1c.1 2.3.3 4.4.6 6.5.3 2.6.8 5 1.4 7.2L78.4 299.2l88.9 10v6.3c-5.9.9-10.5 6-10.5 12.2 0 6.8 5.6 12.4 12.4 12.4 6.8 0 12.4-5.6 12.4-12.4 0-6.2-4.6-11.3-10.5-12.2v-5.8l80.3 9v5.4c-5.7 1.1-9.9 6.2-9.9 12.1 0 6.8 5.6 10.2 12.4 10.2 6.8 0 12.4-3.4 12.4-10.2 0-6-4.3-11-9.9-12.1v-4.9l28.4 3.2v23.7h-5.9V360h5.9v-6.6h5v6.6h5.9v-13.8h-5.9V323l38.3 4.3c8.1 11.4 19 13.6 19 13.6l-.1 6.7-5.1.2-.1 12.1h4.1l.1-5h5.2l.1 5h4.1l-.1-12.1-5.1-.2-.1-6.7s10.9-2.1 19-13.6l38.3-4.3v23.2h-5.9V360h5.9v-6.6h5v6.6h5.9v-13.8h-5.9v-23.7l28.4-3.2v4.9c-5.7 1.1-9.9 6.2-9.9 12.1 0 6.8 5.6 10.2 12.4 10.2 6.8 0 12.4-3.4 12.4-10.2 0-6-4.3-11-9.9-12.1v-5.4l80.3-9v5.8c-5.9.9-10.5 6-10.5 12.2 0 6.8 5.6 12.4 12.4 12.4 6.8 0 12.4-5.6 12.4-12.4-.2-6.3-4.7-11.4-10.7-12.3zm-200.8-87.6l19.6-42.5 19.6 42.5h-17.9l-1.7-40.3-1.7 40.3h-17.9z"],aws:[512,512,[],"f375","M261.2 136.1c-14 57.5-13.1 54.4-25.8 107-1.6 6.5-4.1 8.4-10.7 8.5h-14.4c-5.8-.1-8.2-1.6-9.9-7.3-12.3-39.4-28.8-94.1-39.9-130.7-4.1-13.5-1.4-13.2 9.3-12.9 3.7.1 7.3 0 11 0 5.1.1 7.7 2 9.1 7.1 3.6 12.9 6 22.8 26.6 104.1.4 1.6.9 3.1 1.4 4.6h1.1c.5-2 1.1-3.9 1.6-5.9 7.8-32.9 15.5-65.9 23.3-98.8 2.4-10.2 6.7-11.2 17-11.2h7.6c6.9.1 9 1.5 10.7 8.3 6 23.4 23.5 101.8 26.7 110.4 5.1-18.3-1.8 7.9 28.5-109 2.1-8.1 4.1-9.7 12.3-9.7h12.7c5.4.1 7 1.8 5.7 7.1-2.4 9.5-2.9 9.9-41.3 132.9-3.1 9.9-4.2 10.8-14.6 10.8h-10.6c-7.3 0-9.2-1.3-11-8.4-4.3-16.2-23.3-95.7-26.4-106.9zM125.4 247.3c4.2 5.8 8.1 6.3 14.1 2.4l6.3-4.2c6.8-4.5 7.3-6.3 3.6-13.5-4.3-8.4-6.4-17.3-6.3-26.9 0-3.1.6-55.7-.9-66.8-2.7-19.3-12.5-32.8-31.7-38.7-10.7-3.4-21.7-3.3-32.7-3-15.1.4-29.4 4.6-42.8 11.4-1.8.9-3.7 3.1-4.1 4.9-.8 3.9-1.1 8.1-.7 12.1.6 5.9 2.6 7 8.2 5.1 5.1-1.7 10-3.9 15.1-5.4 14.5-4.4 29.2-6.4 44.1-1.7 7.1 2.2 11.7 6.9 14.3 13.8 3 7.9 2.4 16.1 2.4 24.2 0 5.5-.1 5.5-5.5 4.5-13.9-2.6-27.7-5-41.9-3.1-15.2 2.1-28.6 7.3-38.2 20-9.1 12-10 25.6-7.4 39.5 2.8 15 11.8 25.7 26.4 30.4 20.6 6.7 40.1 3.3 57.7-9.5 3.8-2.8 7.2-6.2 11.1-9.5 3.1 5 5.8 9.7 8.9 14zm-15.3-61.6c3 .4 4.5 1.9 4.3 5.1-.2 3.8.1 7.6-.3 11.4-1.2 11.7-7.7 19.7-17.9 24.9-8.2 4.2-16.9 5.8-26.1 5-15.2-1.3-21-13.1-19.6-26.3C51.8 193.2 59 186.2 72 184c13.8-2.4 16-1.1 38.1 1.7zm348.8 65.1c21.3-8.6 32.9-26.2 29.2-50-2.2-14.6-11.8-24.2-25.2-29.5-14.7-5.9-33.8-10.3-48.1-18.2-4.4-2.4-7.4-6.3-7.6-11.9-.4-11.1 4.2-17.2 15.4-19.8 9.3-2.1 18.8-2.2 28.1-.4 7.3 1.4 14.3 4.2 21.4 6.3 2.8.9 5.9 2.1 7.8-1.6 3.8-7.3.4-18.7-7.3-21.8-22.5-9-45.5-11.6-68.2-1.6-14.6 6.4-24.6 17.4-26 34.2-1.6 19.3 6.9 33.4 24.1 41.7 7.7 3.7 16.1 5.9 24.2 8.9 8.1 3 16.2 5.8 24.1 9.1 12.3 5.3 11.6 24.2 1.2 30-27.7 15.3-64.9-2.4-69.2-3.8-3.3-1.1-5.3.2-6.3 3.7-3 11.3.7 18.8 11.6 22.7 21.7 7.9 49.6 10.5 70.8 2zM296 413.5c50.8-5.8 98.7-20.8 142.7-47 8-4.7 15.5-10.3 23.1-15.7 7.3-5.2 3.2-18.4-11.3-12.2-54.4 23.2-111.2 36.1-170.2 38.9-30.5 1.5-60.8-.3-91.1-4.7-63.1-9.2-122.4-29.2-177.6-61.2-2.1-1.2-4.2-2.5-6.5-3-4.9-1.1-7.7 4.7-2.4 9.7 24 22.1 50.3 40.8 79.1 55.7 53.7 27.7 110.5 42.7 171.2 42 14.4-.8 28.8-.9 43-2.5zm174.7-92.2c14.8.8 19.4 5.9 15.7 20.2-3.8 14.8-9.3 29.2-13.9 43.8-.9 2.9-4.2 6.3-.8 8.8 3.7 2.6 6.5-1 9-3.3 10.2-9.5 17.4-21 22.5-33.8 5.4-13.4 9.3-27.2 8.7-41.9-.2-6.2-1.8-8.8-7.8-10.5-5.4-1.5-11-2.8-16.5-3.2-21.6-1.8-42.5.5-62 10.6-3.1 1.6-6 3.7-8.7 5.9-1.1.9-3.2 5.3 2.4 6.1 1.9.3 3.9-.1 5.9-.3 16.9-1.6 28.6-3.3 45.5-2.4z"],bandcamp:[496,512,[],"f2d5","M248 8C111 8 0 119 0 256s111 248 248 248 248-111 248-248S385 8 248 8zm48.2 326.1h-181L199.9 178h181l-84.7 156.1z"],behance:[576,512,[],"f1b4","M232 237.2c31.8-15.2 48.4-38.2 48.4-74 0-70.6-52.6-87.8-113.3-87.8H0v354.4h171.8c64.4 0 124.9-30.9 124.9-102.9 0-44.5-21.1-77.4-64.7-89.7zM77.9 135.9H151c28.1 0 53.4 7.9 53.4 40.5 0 30.1-19.7 42.2-47.5 42.2h-79v-82.7zm83.3 233.7H77.9V272h84.9c34.3 0 56 14.3 56 50.6 0 35.8-25.9 47-57.6 47zm358.5-240.7H376V94h143.7v34.9zM576 305.2c0-75.9-44.4-139.2-124.9-139.2-78.2 0-131.3 58.8-131.3 135.8 0 79.9 50.3 134.7 131.3 134.7 61.3 0 101-27.6 120.1-86.3H509c-6.7 21.9-34.3 33.5-55.7 33.5-41.3 0-63-24.2-63-65.3h185.1c.3-4.2.6-8.7.6-13.2zM390.4 274c2.3-33.7 24.7-54.8 58.5-54.8 35.4 0 53.2 20.8 56.2 54.8H390.4z"],"behance-square":[512,512,[],"f1b5","M186.5 293c0 19.3-14 25.4-31.2 25.4h-45.1v-52.9h46c18.6.1 30.3 7.8 30.3 27.5zm-7.7-82.3c0-17.7-13.7-21.9-28.9-21.9h-39.6v44.8H153c15.1 0 25.8-6.6 25.8-22.9zm132.3 23.2c-18.3 0-30.5 11.4-31.7 29.7h62.2c-1.7-18.5-11.3-29.7-30.5-29.7zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zM271.7 185h77.8v-18.9h-77.8V185zm-43 110.3c0-24.1-11.4-44.9-35-51.6 17.2-8.2 26.2-17.7 26.2-37 0-38.2-28.5-47.5-61.4-47.5H68v192h93.1c34.9-.2 67.6-16.9 67.6-55.9zM380 280.5c0-41.1-24.1-75.4-67.6-75.4-42.4 0-71.1 31.8-71.1 73.6 0 43.3 27.3 73 71.1 73 33.2 0 54.7-14.9 65.1-46.8h-33.7c-3.7 11.9-18.6 18.1-30.2 18.1-22.4 0-34.1-13.1-34.1-35.3h100.2c.1-2.3.3-4.8.3-7.2z"],bimobject:[448,512,[],"f378","M416 32H32C14.4 32 0 46.4 0 64v384c0 17.6 14.4 32 32 32h384c17.6 0 32-14.4 32-32V64c0-17.6-14.4-32-32-32zm-64 257.4c0 49.4-11.4 82.6-103.8 82.6h-16.9c-44.1 0-62.4-14.9-70.4-38.8h-.9V368H96V136h64v74.7h1.1c4.6-30.5 39.7-38.8 69.7-38.8h17.3c92.4 0 103.8 33.1 103.8 82.5v35zm-64-28.9v22.9c0 21.7-3.4 33.8-38.4 33.8h-45.3c-28.9 0-44.1-6.5-44.1-35.7v-19c0-29.3 15.2-35.7 44.1-35.7h45.3c35-.2 38.4 12 38.4 33.7z"],bitbucket:[512,512,[],"f171","M23.1 32C14.2 31.9 7 38.9 6.9 47.8c0 .9.1 1.8.2 2.8L74.9 462c1.7 10.4 10.7 18 21.2 18.1h325.1c7.9.1 14.7-5.6 16-13.4l67.8-416c1.4-8.7-4.5-16.9-13.2-18.3-.9-.1-1.8-.2-2.8-.2L23.1 32zm285.3 297.3H204.6l-28.1-146.8h157l-25.1 146.8z"],bitcoin:[512,512,[],"f379","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-141.651-35.33c4.937-32.999-20.191-50.739-54.55-62.573l11.146-44.702-27.213-6.781-10.851 43.524c-7.154-1.783-14.502-3.464-21.803-5.13l10.929-43.81-27.198-6.781-11.153 44.686c-5.922-1.349-11.735-2.682-17.377-4.084l.031-.14-37.53-9.37-7.239 29.062s20.191 4.627 19.765 4.913c11.022 2.751 13.014 10.044 12.68 15.825l-12.696 50.925c.76.194 1.744.473 2.829.907-.907-.225-1.876-.473-2.876-.713l-17.796 71.338c-1.349 3.348-4.767 8.37-12.471 6.464.271.395-19.78-4.937-19.78-4.937l-13.51 31.147 35.414 8.827c6.588 1.651 13.045 3.379 19.4 5.006l-11.262 45.213 27.182 6.781 11.153-44.733a1038.209 1038.209 0 0 0 21.687 5.627l-11.115 44.523 27.213 6.781 11.262-45.128c46.404 8.781 81.299 5.239 95.986-36.727 11.836-33.79-.589-53.281-25.004-65.991 17.78-4.098 31.174-15.792 34.747-39.949zm-62.177 87.179c-8.41 33.79-65.308 15.523-83.755 10.943l14.944-59.899c18.446 4.603 77.6 13.717 68.811 48.956zm8.417-87.667c-7.673 30.736-55.031 15.12-70.393 11.292l13.548-54.327c15.363 3.828 64.836 10.973 56.845 43.035z"],bity:[496,512,[],"f37a","M78.4 67.2C173.8-22 324.5-24 421.5 71c14.3 14.1-6.4 37.1-22.4 21.5-84.8-82.4-215.8-80.3-298.9-3.2-16.3 15.1-36.5-8.3-21.8-22.1zm98.9 418.6c19.3 5.7 29.3-23.6 7.9-30C73 421.9 9.4 306.1 37.7 194.8c5-19.6-24.9-28.1-30.2-7.1-32.1 127.4 41.1 259.8 169.8 298.1zm148.1-2c121.9-40.2 192.9-166.9 164.4-291-4.5-19.7-34.9-13.8-30 7.9 24.2 107.7-37.1 217.9-143.2 253.4-21.2 7-10.4 36 8.8 29.7zm-62.9-79l.2-71.8c0-8.2-6.6-14.8-14.8-14.8-8.2 0-14.8 6.7-14.8 14.8l-.2 71.8c0 8.2 6.6 14.8 14.8 14.8s14.8-6.6 14.8-14.8zm71-269c2.1 90.9 4.7 131.9-85.5 132.5-92.5-.7-86.9-44.3-85.5-132.5 0-21.8-32.5-19.6-32.5 0v71.6c0 69.3 60.7 90.9 118 90.1 57.3.8 118-20.8 118-90.1v-71.6c0-19.6-32.5-21.8-32.5 0z"],"black-tie":[448,512,[],"f27e","M0 32v448h448V32H0zm316.5 325.2L224 445.9l-92.5-88.7 64.5-184-64.5-86.6h184.9L252 173.2l64.5 184z"],blackberry:[512,512,[],"f37b","M166 116.9c0 23.4-16.4 49.1-72.5 49.1H23.4l21-88.8h67.8c42.1 0 53.8 23.3 53.8 39.7zm126.2-39.7h-67.8L205.7 166h70.1c53.8 0 70.1-25.7 70.1-49.1.1-16.4-11.6-39.7-53.7-39.7zM88.8 208.1H21L0 296.9h70.1c56.1 0 72.5-23.4 72.5-49.1 0-16.3-11.7-39.7-53.8-39.7zm180.1 0h-67.8l-18.7 88.8h70.1c53.8 0 70.1-23.4 70.1-49.1 0-16.3-11.7-39.7-53.7-39.7zm189.3-53.8h-67.8l-18.7 88.8h70.1c53.8 0 70.1-23.4 70.1-49.1.1-16.3-11.6-39.7-53.7-39.7zm-28 137.9h-67.8L343.7 381h70.1c56.1 0 70.1-23.4 70.1-49.1 0-16.3-11.6-39.7-53.7-39.7zM240.8 346H173l-18.7 88.8h70.1c56.1 0 70.1-25.7 70.1-49.1.1-16.3-11.6-39.7-53.7-39.7z"],blogger:[448,512,[],"f37c","M162.4 196c4.8-4.9 6.2-5.1 36.4-5.1 27.2 0 28.1.1 32.1 2.1 5.8 2.9 8.3 7 8.3 13.6 0 5.9-2.4 10-7.6 13.4-2.8 1.8-4.5 1.9-31.1 2.1-16.4.1-29.5-.2-31.5-.8-10.3-2.9-14.1-17.7-6.6-25.3zm61.4 94.5c-53.9 0-55.8.2-60.2 4.1-3.5 3.1-5.7 9.4-5.1 13.9.7 4.7 4.8 10.1 9.2 12 2.2 1 14.1 1.7 56.3 1.2l47.9-.6 9.2-1.5c9-5.1 10.5-17.4 3.1-24.4-5.3-4.7-5-4.7-60.4-4.7zm223.4 130.1c-3.5 28.4-23 50.4-51.1 57.5-7.2 1.8-9.7 1.9-172.9 1.8-157.8 0-165.9-.1-172-1.8-8.4-2.2-15.6-5.5-22.3-10-5.6-3.8-13.9-11.8-17-16.4-3.8-5.6-8.2-15.3-10-22C.1 423 0 420.3 0 256.3 0 93.2 0 89.7 1.8 82.6 8.1 57.9 27.7 39 53 33.4c7.3-1.6 332.1-1.9 340-.3 21.2 4.3 37.9 17.1 47.6 36.4 7.7 15.3 7-1.5 7.3 180.6.2 115.8 0 164.5-.7 170.5zm-85.4-185.2c-1.1-5-4.2-9.6-7.7-11.5-1.1-.6-8-1.3-15.5-1.7-12.4-.6-13.8-.8-17.8-3.1-6.2-3.6-7.9-7.6-8-18.3 0-20.4-8.5-39.4-25.3-56.5-12-12.2-25.3-20.5-40.6-25.1-3.6-1.1-11.8-1.5-39.2-1.8-42.9-.5-52.5.4-67.1 6.2-27 10.7-46.3 33.4-53.4 62.4-1.3 5.4-1.6 14.2-1.9 64.3-.4 62.8 0 72.1 4 84.5 9.7 30.7 37.1 53.4 64.6 58.4 9.2 1.7 122.2 2.1 133.7.5 20.1-2.7 35.9-10.8 50.7-25.9 10.7-10.9 17.4-22.8 21.8-38.5 3.2-10.9 2.9-88.4 1.7-93.9z"],"blogger-b":[448,512,[],"f37d","M446.6 222.7c-1.8-8-6.8-15.4-12.5-18.5-1.8-1-13-2.2-25-2.7-20.1-.9-22.3-1.3-28.7-5-10.1-5.9-12.8-12.3-12.9-29.5-.1-33-13.8-63.7-40.9-91.3-19.3-19.7-40.9-33-65.5-40.5-5.9-1.8-19.1-2.4-63.3-2.9-69.4-.8-84.8.6-108.4 10C45.9 59.5 14.7 96.1 3.3 142.9 1.2 151.7.7 165.8.2 246.8c-.6 101.5.1 116.4 6.4 136.5 15.6 49.6 59.9 86.3 104.4 94.3 14.8 2.7 197.3 3.3 216 .8 32.5-4.4 58-17.5 81.9-41.9 17.3-17.7 28.1-36.8 35.2-62.1 4.9-17.6 4.5-142.8 2.5-151.7zm-322.1-63.6c7.8-7.9 10-8.2 58.8-8.2 43.9 0 45.4.1 51.8 3.4 9.3 4.7 13.4 11.3 13.4 21.9 0 9.5-3.8 16.2-12.3 21.6-4.6 2.9-7.3 3.1-50.3 3.3-26.5.2-47.7-.4-50.8-1.2-16.6-4.7-22.8-28.5-10.6-40.8zm191.8 199.8l-14.9 2.4-77.5.9c-68.1.8-87.3-.4-90.9-2-7.1-3.1-13.8-11.7-14.9-19.4-1.1-7.3 2.6-17.3 8.2-22.4 7.1-6.4 10.2-6.6 97.3-6.7 89.6-.1 89.1-.1 97.6 7.8 12.1 11.3 9.5 31.2-4.9 39.4z"],bluetooth:[448,512,[],"f293","M292.6 171.1L249.7 214l-.3-86 43.2 43.1m-43.2 219.8l43.1-43.1-42.9-42.9-.2 86zM416 259.4C416 465 344.1 512 230.9 512S32 465 32 259.4 115.4 0 228.6 0 416 53.9 416 259.4zm-158.5 0l79.4-88.6L211.8 36.5v176.9L138 139.6l-27 26.9 92.7 93-92.7 93 26.9 26.9 73.8-73.8 2.3 170 127.4-127.5-83.9-88.7z"],"bluetooth-b":[320,512,[],"f294","M196.48 260.023l92.626-103.333L143.125 0v206.33l-86.111-86.111-31.406 31.405 108.061 108.399L25.608 368.422l31.406 31.405 86.111-86.111L145.84 512l148.552-148.644-97.912-103.333zm40.86-102.996l-49.977 49.978-.338-100.295 50.315 50.317zM187.363 313.04l49.977 49.978-50.315 50.316.338-100.294z"],btc:[384,512,[],"f15a","M310.204 242.638c27.73-14.18 45.377-39.39 41.28-81.3-5.358-57.351-52.458-76.573-114.85-81.929V0h-48.528v77.203c-12.605 0-25.525.315-38.444.63V0h-48.528v79.409c-17.842.539-38.622.276-97.37 0v51.678c38.314-.678 58.417-3.14 63.023 21.427v217.429c-2.925 19.492-18.524 16.685-53.255 16.071L3.765 443.68c88.481 0 97.37.315 97.37.315V512h48.528v-67.06c13.234.315 26.154.315 38.444.315V512h48.528v-68.005c81.299-4.412 135.647-24.894 142.895-101.467 5.671-61.446-23.32-88.862-69.326-99.89zM150.608 134.553c27.415 0 113.126-8.507 113.126 48.528 0 54.515-85.71 48.212-113.126 48.212v-96.74zm0 251.776V279.821c32.772 0 133.127-9.138 133.127 53.255-.001 60.186-100.355 53.253-133.127 53.253z"],buromobelexperte:[448,512,[],"f37f","M0 32v128h128V32H0zm120 120H8V40h112v112zm40-120v128h128V32H160zm120 120H168V40h112v112zm40-120v128h128V32H320zm120 120H328V40h112v112zM0 192v128h128V192H0zm120 120H8V200h112v112zm40-120v128h128V192H160zm120 120H168V200h112v112zm40-120v128h128V192H320zm120 120H328V200h112v112zM0 352v128h128V352H0zm120 120H8V360h112v112zm40-120v128h128V352H160zm120 120H168V360h112v112zm40-120v128h128V352H320z"],buysellads:[448,512,[],"f20d","M224 150.7l42.9 160.7h-85.8L224 150.7zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-65.3 325.3l-94.5-298.7H159.8L65.3 405.3H156l111.7-91.6 24.2 91.6h90.8z"],"cc-amex":[576,512,[],"f1f3","M576 255.4c-37.9-.2-44.2-.9-54.5 5v-5c-45.3 0-53.5-1.7-64.9 5.2v-5.2h-78.2v5.1c-11.4-6.5-21.4-5.1-75.7-5.1v5.6c-6.3-3.7-14.5-5.6-24.3-5.6h-58c-3.5 3.8-12.5 13.7-15.7 17.2-12.7-14.1-10.5-11.6-15.5-17.2h-83.1v92.3h82c3.3-3.5 12.9-13.9 16.1-17.4 12.7 14.3 10.3 11.7 15.4 17.4h48.9c0-14.7.1-8.3.1-23 11.5.2 24.3-.2 34.3-6.2 0 13.9-.1 17.1-.1 29.2h39.6c0-18.5.1-7.4.1-25.3 6.2 0 7.7 0 9.4.1.1 1.3 0 0 0 25.2 152.8 0 145.9 1.1 156.7-4.5v4.5c34.8 0 54.8 2.2 67.5-6.1V432c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V228.3h26.6c4.2-10.1 2.2-5.3 6.4-15.3h19.2c4.2 10 2.2 5.2 6.4 15.3h52.9v-11.4c2.2 5 1.1 2.5 5.1 11.4h29.5c2.4-5.5 2.6-5.8 5.1-11.4v11.4h135.5v-25.1c6.4 0 8-.1 9.8.2 0 0-.2 10.9.1 24.8h66.5v-8.9c7.4 5.9 17.4 8.9 29.7 8.9h26.8c4.2-10.1 2.2-5.3 6.4-15.3h19c6.5 15 .2.5 6.6 15.3h52.8v-21.9c11.8 19.7 7.8 12.9 13.2 21.9h41.6v-92h-39.9v18.4c-12.2-20.2-6.3-10.4-11.2-18.4h-43.3v20.6c-6.2-14.6-4.6-10.8-8.8-20.6h-32.4c-.4 0-2.3.2-2.3-.3h-27.6c-12.8 0-23.1 3.2-30.7 9.3v-9.3h-39.9v5.3c-10.8-6.1-20.7-5.1-64.4-5.3-.1 0-11.6-.1-11.6 0h-103c-2.5 6.1-6.8 16.4-12.6 30-2.8-6-11-23.8-13.9-30h-46V157c-7.4-17.4-4.7-11-9-21.1H22.9c-3.4 7.9-13.7 32-23.1 53.9V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48v175.4zm-186.6-80.6c-.3.2-1.4 2.2-1.4 7.6 0 6 .9 7.7 1.1 7.9.2.1 1.1.5 3.4.5l7.3-16.9c-1.1 0-2.1-.1-3.1-.1-5.6 0-7 .7-7.3 1zm-19.9 130.9c9.2 3.3 11 9.5 11 18.4l-.1 13.8h-16.6l.1-11.5c0-11.8-3.8-13.8-14.8-13.8h-17.6l-.1 25.3h-16.6l.1-69.3h39.4c13 0 27.1 2.3 27.1 18.7-.1 7.6-4.2 15.3-11.9 18.4zm-6.3-15.4c0-6.4-5.6-7.4-10.7-7.4h-21v15.6h20.7c5.6 0 11-1.3 11-8.2zm181.7-7.1H575v-14.6h-32.9c-12.8 0-23.8 6.6-23.8 20.7 0 33 42.7 12.8 42.7 27.4 0 5.1-4.3 6.4-8.4 6.4h-32l-.1 14.8h32c8.4 0 17.6-1.8 22.5-8.9v-25.8c-10.5-13.8-39.3-1.3-39.3-13.5 0-5.8 4.6-6.5 9.2-6.5zm-99.2-.3v-14.3h-55.2l-.1 69.3h55.2l.1-14.3-38.6-.3v-13.8H445v-14.1h-37.8v-12.5h38.5zm42.2 40.1h-32.2l-.1 14.8h32.2c14.8 0 26.2-5.6 26.2-22 0-33.2-42.9-11.2-42.9-26.3 0-5.6 4.9-6.4 9.2-6.4h30.4v-14.6h-33.2c-12.8 0-23.5 6.6-23.5 20.7 0 33 42.7 12.5 42.7 27.4-.1 5.4-4.7 6.4-8.8 6.4zm-78.1-158.7c-17.4-.3-33.2-4.1-33.2 19.7 0 11.8 2.8 19.9 16.1 19.9h7.4l23.5-54.5h24.8l27.9 65.4v-65.4h25.3l29.1 48.1v-48.1h16.9v69H524l-31.2-51.9v51.9h-33.7l-6.6-15.3h-34.3l-6.4 15.3h-19.2c-22.8 0-33-11.8-33-34 0-23.3 10.5-35.3 34-35.3h16.1v15.2zm14.3 24.5h22.8l-11.2-27.6-11.6 27.6zm-72.6-39.6h-16.9v69.3h16.9v-69.3zm-38.1 37.3c9.5 3.3 11 9.2 11 18.4v13.5h-16.6c-.3-14.8 3.6-25.1-14.8-25.1h-18v25.1h-16.4v-69.3l39.1.3c13.3 0 27.4 2 27.4 18.4.1 8-4.3 15.7-11.7 18.7zm-6.7-15.3c0-6.4-5.6-7.4-10.7-7.4h-21v15.3h20.7c5.7 0 11-1.3 11-7.9zm-59.5-7.4v-14.6h-55.5v69.3h55.5v-14.3h-38.9v-13.8h37.8v-14.1h-37.8v-12.5h38.9zm-84.6 54.7v-54.2l-24 54.2H124l-24-54.2v54.2H66.2l-6.4-15.3H25.3l-6.4 15.3H1l29.7-69.3h24.5l28.1 65.7v-65.7h27.1l21.7 47 19.7-47h27.6v69.3h-16.8zM53.9 188.8l-11.5-27.6-11.2 27.6h22.7zm253 102.5c0 27.9-30.4 23.3-49.3 23.3l-.1 23.3h-32.2l-20.4-23-21.3 23h-65.4l.1-69.3h66.5l20.5 22.8 21-22.8H279c15.6 0 27.9 5.4 27.9 22.7zm-112.7 11.8l-17.9-20.2h-41.7v12.5h36.3v14.1h-36.3v13.8h40.6l19-20.2zM241 276l-25.3 27.4 25.3 28.1V276zm48.3 15.3c0-6.1-4.6-8.4-10.2-8.4h-21.5v17.6h21.2c5.9 0 10.5-2.8 10.5-9.2z"],"cc-apple-pay":[576,512,[],"f416","M302.2 218.4c0 17.2-10.5 27.1-29 27.1h-24.3v-54.2h24.4c18.4 0 28.9 9.8 28.9 27.1zm47.5 62.6c0 8.3 7.2 13.7 18.5 13.7 14.4 0 25.2-9.1 25.2-21.9v-7.7l-23.5 1.5c-13.3.9-20.2 5.8-20.2 14.4zM576 79v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V79c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM127.8 197.2c8.4.7 16.8-4.2 22.1-10.4 5.2-6.4 8.6-15 7.7-23.7-7.4.3-16.6 4.9-21.9 11.3-4.8 5.5-8.9 14.4-7.9 22.8zm60.6 74.5c-.2-.2-19.6-7.6-19.8-30-.2-18.7 15.3-27.7 16-28.2-8.8-13-22.4-14.4-27.1-14.7-12.2-.7-22.6 6.9-28.4 6.9-5.9 0-14.7-6.6-24.3-6.4-12.5.2-24.2 7.3-30.5 18.6-13.1 22.6-3.4 56 9.3 74.4 6.2 9.1 13.7 19.1 23.5 18.7 9.3-.4 13-6 24.2-6 11.3 0 14.5 6 24.3 5.9 10.2-.2 16.5-9.1 22.8-18.2 6.9-10.4 9.8-20.4 10-21zm135.4-53.4c0-26.6-18.5-44.8-44.9-44.8h-51.2v136.4h21.2v-46.6h29.3c26.8 0 45.6-18.4 45.6-45zm90 23.7c0-19.7-15.8-32.4-40-32.4-22.5 0-39.1 12.9-39.7 30.5h19.1c1.6-8.4 9.4-13.9 20-13.9 13 0 20.2 6 20.2 17.2v7.5l-26.4 1.6c-24.6 1.5-37.9 11.6-37.9 29.1 0 17.7 13.7 29.4 33.4 29.4 13.3 0 25.6-6.7 31.2-17.4h.4V310h19.6v-68zM516 210.9h-21.5l-24.9 80.6h-.4l-24.9-80.6H422l35.9 99.3-1.9 6c-3.2 10.2-8.5 14.2-17.9 14.2-1.7 0-4.9-.2-6.2-.3v16.4c1.2.4 6.5.5 8.1.5 20.7 0 30.4-7.9 38.9-31.8L516 210.9z"],"cc-diners-club":[576,512,[],"f24c","M239.7 79.9c-96.9 0-175.8 78.6-175.8 175.8 0 96.9 78.9 175.8 175.8 175.8 97.2 0 175.8-78.9 175.8-175.8 0-97.2-78.6-175.8-175.8-175.8zm-39.9 279.6c-41.7-15.9-71.4-56.4-71.4-103.8s29.7-87.9 71.4-104.1v207.9zm79.8.3V151.6c41.7 16.2 71.4 56.7 71.4 104.1s-29.7 87.9-71.4 104.1zM528 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h480c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM329.7 448h-90.3c-106.2 0-193.8-85.5-193.8-190.2C45.6 143.2 133.2 64 239.4 64h90.3c105 0 200.7 79.2 200.7 193.8 0 104.7-95.7 190.2-200.7 190.2z"],"cc-discover":[576,512,[],"f1f2","M83 212.1c0 7.9-3.2 15.5-8.9 20.7-4.9 4.4-11.6 6.4-21.9 6.4H48V185h4.2c10.3 0 16.7 1.7 21.9 6.6 5.7 5 8.9 12.6 8.9 20.5zM504.8 184h-4.9v24.9h4.7c10.3 0 15.8-4.4 15.8-12.8 0-7.9-5.5-12.1-15.6-12.1zM576 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM428 253h45.3v-13.8H444V217h28.3v-13.8H444V185h29.3v-14H428v82zm-86.2-82l35 84.2h8.6l35.5-84.2h-17.5l-22.2 55.2-21.9-55.2h-17.5zm-83 41.6c0 24.6 19.9 44.6 44.6 44.6 24.6 0 44.6-19.9 44.6-44.6 0-24.6-19.9-44.6-44.6-44.6-24.6 0-44.6 19.9-44.6 44.6zm-68-.5c0 32.5 33.6 52.5 63.3 38.2v-19c-19.3 19.3-46.8 5.8-46.8-19.2 0-23.7 26.7-39.1 46.8-19v-19c-30.2-15-63.3 6.8-63.3 38zm-33.9 28.3c-7.6 0-13.8-3.7-17.5-10.8l-10.3 9.9c17.8 26.1 56.6 18.2 56.6-11.3 0-13.1-5.4-19-23.6-25.6-9.6-3.4-12.3-5.9-12.3-10.3 0-8.7 14.5-14.1 24.9-2.5l8.4-10.8c-19.1-17.1-49.7-8.9-49.7 14.3 0 11.3 5.2 17.2 20.2 22.7 25.7 9.1 14.7 24.4 3.3 24.4zm-57.4-28.3c0-24.1-18-41.1-44.1-41.1H32v82h23.4c30.9 0 44.1-22.4 44.1-40.9zm23.4-41.1h-16v82h16v-82zM544 288c-33.3 20.8-226.4 124.4-416 160h401c8.2 0 15-6.8 15-15V288zm0-35l-25.9-34.5c12.1-2.5 18.7-10.6 18.7-23.2 0-28.5-30.3-24.4-52.9-24.4v82h16v-32.8h2.2l22.2 32.8H544z"],"cc-jcb":[576,512,[],"f24b","M431.5 244.3V212c41.2 0 38.5.2 38.5.2 7.3 1.3 13.3 7.3 13.3 16 0 8.8-6 14.5-13.3 15.8-1.2.4-3.3.3-38.5.3zm42.8 20.2c-2.8-.7-3.3-.5-42.8-.5v35c39.6 0 40 .2 42.8-.5 7.5-1.5 13.5-8 13.5-17 0-8.7-6-15.5-13.5-17zM576 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM182 192.3h-57c0 67.1 10.7 109.7-35.8 109.7-19.5 0-38.8-5.7-57.2-14.8v28c30 8.3 68 8.3 68 8.3 97.9 0 82-47.7 82-131.2zm178.5 4.5c-63.4-16-165-14.9-165 59.3 0 77.1 108.2 73.6 165 59.2V287C312.9 311.7 253 309 253 256s59.8-55.6 107.5-31.2v-28zM544 286.5c0-18.5-16.5-30.5-38-32v-.8c19.5-2.7 30.3-15.5 30.3-30.2 0-19-15.7-30-37-31 0 0 6.3-.3-120.3-.3v127.5h122.7c24.3.1 42.3-12.9 42.3-33.2z"],"cc-mastercard":[576,512,[],"f1f1","M482.9 410.3c0 6.8-4.6 11.7-11.2 11.7-6.8 0-11.2-5.2-11.2-11.7 0-6.5 4.4-11.7 11.2-11.7 6.6 0 11.2 5.2 11.2 11.7zm-310.8-11.7c-7.1 0-11.2 5.2-11.2 11.7 0 6.5 4.1 11.7 11.2 11.7 6.5 0 10.9-4.9 10.9-11.7-.1-6.5-4.4-11.7-10.9-11.7zm117.5-.3c-5.4 0-8.7 3.5-9.5 8.7h19.1c-.9-5.7-4.4-8.7-9.6-8.7zm107.8.3c-6.8 0-10.9 5.2-10.9 11.7 0 6.5 4.1 11.7 10.9 11.7 6.8 0 11.2-4.9 11.2-11.7 0-6.5-4.4-11.7-11.2-11.7zm105.9 26.1c0 .3.3.5.3 1.1 0 .3-.3.5-.3 1.1-.3.3-.3.5-.5.8-.3.3-.5.5-1.1.5-.3.3-.5.3-1.1.3-.3 0-.5 0-1.1-.3-.3 0-.5-.3-.8-.5-.3-.3-.5-.5-.5-.8-.3-.5-.3-.8-.3-1.1 0-.5 0-.8.3-1.1 0-.5.3-.8.5-1.1.3-.3.5-.3.8-.5.5-.3.8-.3 1.1-.3.5 0 .8 0 1.1.3.5.3.8.3 1.1.5s.2.6.5 1.1zm-2.2 1.4c.5 0 .5-.3.8-.3.3-.3.3-.5.3-.8 0-.3 0-.5-.3-.8-.3 0-.5-.3-1.1-.3h-1.6v3.5h.8V426h.3l1.1 1.4h.8l-1.1-1.3zM576 81v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V81c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM64 220.6c0 76.5 62.1 138.5 138.5 138.5 27.2 0 53.9-8.2 76.5-23.1-72.9-59.3-72.4-171.2 0-230.5-22.6-15-49.3-23.1-76.5-23.1-76.4-.1-138.5 62-138.5 138.2zm224 108.8c70.5-55 70.2-162.2 0-217.5-70.2 55.3-70.5 162.6 0 217.5zm-142.3 76.3c0-8.7-5.7-14.4-14.7-14.7-4.6 0-9.5 1.4-12.8 6.5-2.4-4.1-6.5-6.5-12.2-6.5-3.8 0-7.6 1.4-10.6 5.4V392h-8.2v36.7h8.2c0-18.9-2.5-30.2 9-30.2 10.2 0 8.2 10.2 8.2 30.2h7.9c0-18.3-2.5-30.2 9-30.2 10.2 0 8.2 10 8.2 30.2h8.2v-23zm44.9-13.7h-7.9v4.4c-2.7-3.3-6.5-5.4-11.7-5.4-10.3 0-18.2 8.2-18.2 19.3 0 11.2 7.9 19.3 18.2 19.3 5.2 0 9-1.9 11.7-5.4v4.6h7.9V392zm40.5 25.6c0-15-22.9-8.2-22.9-15.2 0-5.7 11.9-4.8 18.5-1.1l3.3-6.5c-9.4-6.1-30.2-6-30.2 8.2 0 14.3 22.9 8.3 22.9 15 0 6.3-13.5 5.8-20.7.8l-3.5 6.3c11.2 7.6 32.6 6 32.6-7.5zm35.4 9.3l-2.2-6.8c-3.8 2.1-12.2 4.4-12.2-4.1v-16.6h13.1V392h-13.1v-11.2h-8.2V392h-7.6v7.3h7.6V416c0 17.6 17.3 14.4 22.6 10.9zm13.3-13.4h27.5c0-16.2-7.4-22.6-17.4-22.6-10.6 0-18.2 7.9-18.2 19.3 0 20.5 22.6 23.9 33.8 14.2l-3.8-6c-7.8 6.4-19.6 5.8-21.9-4.9zm59.1-21.5c-4.6-2-11.6-1.8-15.2 4.4V392h-8.2v36.7h8.2V408c0-11.6 9.5-10.1 12.8-8.4l2.4-7.6zm10.6 18.3c0-11.4 11.6-15.1 20.7-8.4l3.8-6.5c-11.6-9.1-32.7-4.1-32.7 15 0 19.8 22.4 23.8 32.7 15l-3.8-6.5c-9.2 6.5-20.7 2.6-20.7-8.6zm66.7-18.3H408v4.4c-8.3-11-29.9-4.8-29.9 13.9 0 19.2 22.4 24.7 29.9 13.9v4.6h8.2V392zm33.7 0c-2.4-1.2-11-2.9-15.2 4.4V392h-7.9v36.7h7.9V408c0-11 9-10.3 12.8-8.4l2.4-7.6zm40.3-14.9h-7.9v19.3c-8.2-10.9-29.9-5.1-29.9 13.9 0 19.4 22.5 24.6 29.9 13.9v4.6h7.9v-51.7zm7.6-75.1v4.6h.8V302h1.9v-.8h-4.6v.8h1.9zm6.6 123.8c0-.5 0-1.1-.3-1.6-.3-.3-.5-.8-.8-1.1-.3-.3-.8-.5-1.1-.8-.5 0-1.1-.3-1.6-.3-.3 0-.8.3-1.4.3-.5.3-.8.5-1.1.8-.5.3-.8.8-.8 1.1-.3.5-.3 1.1-.3 1.6 0 .3 0 .8.3 1.4 0 .3.3.8.8 1.1.3.3.5.5 1.1.8.5.3 1.1.3 1.4.3.5 0 1.1 0 1.6-.3.3-.3.8-.5 1.1-.8.3-.3.5-.8.8-1.1.3-.6.3-1.1.3-1.4zm3.2-124.7h-1.4l-1.6 3.5-1.6-3.5h-1.4v5.4h.8v-4.1l1.6 3.5h1.1l1.4-3.5v4.1h1.1v-5.4zm4.4-80.5c0-76.2-62.1-138.3-138.5-138.3-27.2 0-53.9 8.2-76.5 23.1 72.1 59.3 73.2 171.5 0 230.5 22.6 15 49.5 23.1 76.5 23.1 76.4.1 138.5-61.9 138.5-138.4z"],"cc-paypal":[576,512,[],"f1f4","M186.3 258.2c0 12.2-9.7 21.5-22 21.5-9.2 0-16-5.2-16-15 0-12.2 9.5-22 21.7-22 9.3 0 16.3 5.7 16.3 15.5zM80.5 209.7h-4.7c-1.5 0-3 1-3.2 2.7l-4.3 26.7 8.2-.3c11 0 19.5-1.5 21.5-14.2 2.3-13.4-6.2-14.9-17.5-14.9zm284 0H360c-1.8 0-3 1-3.2 2.7l-4.2 26.7 8-.3c13 0 22-3 22-18-.1-10.6-9.6-11.1-18.1-11.1zM576 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM128.3 215.4c0-21-16.2-28-34.7-28h-40c-2.5 0-5 2-5.2 4.7L32 294.2c-.3 2 1.2 4 3.2 4h19c2.7 0 5.2-2.9 5.5-5.7l4.5-26.6c1-7.2 13.2-4.7 18-4.7 28.6 0 46.1-17 46.1-45.8zm84.2 8.8h-19c-3.8 0-4 5.5-4.2 8.2-5.8-8.5-14.2-10-23.7-10-24.5 0-43.2 21.5-43.2 45.2 0 19.5 12.2 32.2 31.7 32.2 9 0 20.2-4.9 26.5-11.9-.5 1.5-1 4.7-1 6.2 0 2.3 1 4 3.2 4H200c2.7 0 5-2.9 5.5-5.7l10.2-64.3c.3-1.9-1.2-3.9-3.2-3.9zm40.5 97.9l63.7-92.6c.5-.5.5-1 .5-1.7 0-1.7-1.5-3.5-3.2-3.5h-19.2c-1.7 0-3.5 1-4.5 2.5l-26.5 39-11-37.5c-.8-2.2-3-4-5.5-4h-18.7c-1.7 0-3.2 1.8-3.2 3.5 0 1.2 19.5 56.8 21.2 62.1-2.7 3.8-20.5 28.6-20.5 31.6 0 1.8 1.5 3.2 3.2 3.2h19.2c1.8-.1 3.5-1.1 4.5-2.6zm159.3-106.7c0-21-16.2-28-34.7-28h-39.7c-2.7 0-5.2 2-5.5 4.7l-16.2 102c-.2 2 1.3 4 3.2 4h20.5c2 0 3.5-1.5 4-3.2l4.5-29c1-7.2 13.2-4.7 18-4.7 28.4 0 45.9-17 45.9-45.8zm84.2 8.8h-19c-3.8 0-4 5.5-4.3 8.2-5.5-8.5-14-10-23.7-10-24.5 0-43.2 21.5-43.2 45.2 0 19.5 12.2 32.2 31.7 32.2 9.3 0 20.5-4.9 26.5-11.9-.3 1.5-1 4.7-1 6.2 0 2.3 1 4 3.2 4H484c2.7 0 5-2.9 5.5-5.7l10.2-64.3c.3-1.9-1.2-3.9-3.2-3.9zm47.5-33.3c0-2-1.5-3.5-3.2-3.5h-18.5c-1.5 0-3 1.2-3.2 2.7l-16.2 104-.3.5c0 1.8 1.5 3.5 3.5 3.5h16.5c2.5 0 5-2.9 5.2-5.7L544 191.2v-.3zm-90 51.8c-12.2 0-21.7 9.7-21.7 22 0 9.7 7 15 16.2 15 12 0 21.7-9.2 21.7-21.5.1-9.8-6.9-15.5-16.2-15.5z"],"cc-stripe":[576,512,[],"f1f5","M396.9 256.5c0 19.1-8.8 33.4-21.9 33.4-8.3 0-13.3-3-16.8-6.7l-.2-52.8c3.7-4.1 8.8-7 17-7 12.9-.1 21.9 14.5 21.9 33.1zM576 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM122.2 281.1c0-42.3-54.3-34.7-54.3-50.7 0-5.5 4.6-7.7 12.1-7.7 10.8 0 24.5 3.3 35.3 9.1v-33.4c-11.8-4.7-23.5-6.5-35.3-6.5-28.8 0-48 15-48 40.2 0 39.3 54 32.9 54 49.9 0 6.6-5.7 8.7-13.6 8.7-11.8 0-26.9-4.9-38.9-11.3v33.9c13.2 5.7 26.6 8.1 38.8 8.1 29.6-.2 49.9-14.7 49.9-40.3zm68.9-86.9h-27v-30.8l-34.7 7.4-.2 113.9c0 21 15.8 36.5 36.9 36.5 11.6 0 20.2-2.1 24.9-4.7v-28.9c-4.5 1.8-27 8.3-27-12.6v-50.5h27v-30.3zm73.8 0c-4.7-1.7-21.3-4.8-29.6 10.5l-2.2-10.5h-30.7v124.5h35.5v-84.4c8.4-11 22.6-8.9 27.1-7.4v-32.7zm44.2 0h-35.7v124.5h35.7V194.2zm0-47.3l-35.7 7.6v28.9l35.7-7.6v-28.9zm122.7 108.8c0-41.3-23.5-63.8-48.4-63.8-13.9 0-22.9 6.6-27.8 11.1l-1.8-8.8h-31.3V360l35.5-7.5.1-40.2c5.1 3.7 12.7 9 25.1 9 25.4-.1 48.6-20.5 48.6-65.6zm112.2 1.2c0-36.4-17.6-65.1-51.3-65.1-33.8 0-54.3 28.7-54.3 64.9 0 42.8 24.2 64.5 58.8 64.5 17 0 29.7-3.9 39.4-9.2v-28.6c-9.7 4.9-20.8 7.9-34.9 7.9-13.8 0-26-4.9-27.6-21.5h69.5c.1-2 .4-9.4.4-12.9zm-51.6-36.1c-8.9 0-18.7 6.7-18.7 22.7h36.7c0-16-9.3-22.7-18-22.7z"],"cc-visa":[576,512,[],"f1f0","M470.1 231.3s7.6 37.2 9.3 45H446c3.3-8.9 16-43.5 16-43.5-.2.3 3.3-9.1 5.3-14.9l2.8 13.4zM576 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48zM152.5 331.2L215.7 176h-42.5l-39.3 106-4.3-21.5-14-71.4c-2.3-9.9-9.4-12.7-18.2-13.1H32.7l-.7 3.1c15.8 4 29.9 9.8 42.2 17.1l35.8 135h42.5zm94.4.2L272.1 176h-40.2l-25.1 155.4h40.1zm139.9-50.8c.2-17.7-10.6-31.2-33.7-42.3-14.1-7.1-22.7-11.9-22.7-19.2.2-6.6 7.3-13.4 23.1-13.4 13.1-.3 22.7 2.8 29.9 5.9l3.6 1.7 5.5-33.6c-7.9-3.1-20.5-6.6-36-6.6-39.7 0-67.6 21.2-67.8 51.4-.3 22.3 20 34.7 35.2 42.2 15.5 7.6 20.8 12.6 20.8 19.3-.2 10.4-12.6 15.2-24.1 15.2-16 0-24.6-2.5-37.7-8.3l-5.3-2.5-5.6 34.9c9.4 4.3 26.8 8.1 44.8 8.3 42.2.1 69.7-20.8 70-53zM528 331.4L495.6 176h-31.1c-9.6 0-16.9 2.8-21 12.9l-59.7 142.5H426s6.9-19.2 8.4-23.3H486c1.2 5.5 4.8 23.3 4.8 23.3H528z"],centercode:[512,512,[],"f380","M329.2 268.6c-3.8 35.2-35.4 60.6-70.6 56.8-35.2-3.8-60.6-35.4-56.8-70.6 3.8-35.2 35.4-60.6 70.6-56.8 35.1 3.8 60.6 35.4 56.8 70.6zm-85.8 235.1C96.7 496-8.2 365.5 10.1 224.3c11.2-86.6 65.8-156.9 139.1-192 161-77.1 349.7 37.4 354.7 216.6 4.1 147-118.4 262.2-260.5 254.8zm179.9-180c27.9-118-160.5-205.9-237.2-234.2-57.5 56.3-69.1 188.6-33.8 344.4 68.8 15.8 169.1-26.4 271-110.2z"],chrome:[496,512,[],"f268","M131.5 217.5L55.1 100.1c47.6-59.2 119-91.8 192-92.1 42.3-.3 85.5 10.5 124.8 33.2 43.4 25.2 76.4 61.4 97.4 103L264 133.4c-58.1-3.4-113.4 29.3-132.5 84.1zm32.9 38.5c0 46.2 37.4 83.6 83.6 83.6s83.6-37.4 83.6-83.6-37.4-83.6-83.6-83.6-83.6 37.3-83.6 83.6zm314.9-89.2L339.6 174c37.9 44.3 38.5 108.2 6.6 157.2L234.1 503.6c46.5 2.5 94.4-7.7 137.8-32.9 107.4-62 150.9-192 107.4-303.9zM133.7 303.6L40.4 120.1C14.9 159.1 0 205.9 0 256c0 124 90.8 226.7 209.5 244.9l63.7-124.8c-57.6 10.8-113.2-20.8-139.5-72.5z"],cloudscale:[448,512,[],"f383","M318.1 154l-9.4 7.6c-22.5-19.3-51.5-33.6-83.3-33.6C153.8 128 96 188.8 96 260.3c0 6.6.4 13.1 1.4 19.4-2-56 41.8-97.4 92.6-97.4 24.2 0 46.2 9.4 62.6 24.7l-25.2 20.4c-8.3-.9-16.8 1.8-23.1 8.1-11.1 11-11.1 28.9 0 40 11.1 11 28.9 11 40 0 6.3-6.3 9-14.9 8.1-23.1l75.2-88.8c6.3-6.5-3.3-15.9-9.5-9.6zm-83.8 111.5c-5.6 5.5-14.6 5.5-20.2 0-5.6-5.6-5.6-14.6 0-20.2s14.6-5.6 20.2 0 5.6 14.7 0 20.2zM224 32C100.5 32 0 132.5 0 256s100.5 224 224 224 224-100.5 224-224S347.5 32 224 32zm0 384c-88.2 0-160-71.8-160-160S135.8 96 224 96s160 71.8 160 160-71.8 160-160 160z"],cloudsmith:[332,512,[],"f384","M332.5 419.9c0 46.4-37.6 84.1-84 84.1s-84-37.7-84-84.1 37.6-84 84-84 84 37.6 84 84zm-84-243.9c46.4 0 80-37.6 80-84s-33.6-84-80-84-88 37.6-88 84-29.6 76-76 76-84 41.6-84 88 37.6 80 84 80 84-33.6 84-80 33.6-80 80-80z"],cloudversify:[616,512,[],"f385","M148.6 304c8.2 68.5 67.4 115.5 146 111.3 51.2 43.3 136.8 45.8 186.4-5.6 69.2 1.1 118.5-44.6 131.5-99.5 14.8-62.5-18.2-132.5-92.1-155.1-33-88.1-131.4-101.5-186.5-85-57.3 17.3-84.3 53.2-99.3 109.7-7.8 2.7-26.5 8.9-45 24.1 11.7 0 15.2 8.9 15.2 19.5v20.4c0 10.7-8.7 19.5-19.5 19.5h-20.2c-10.7 0-19.5-6-19.5-16.7V240H98.8C95 240 88 244.3 88 251.9v40.4c0 6.4 5.3 11.8 11.7 11.8h48.9zm227.4 8c-10.7 46.3 21.7 72.4 55.3 86.8C324.1 432.6 259.7 348 296 288c-33.2 21.6-33.7 71.2-29.2 92.9-17.9-12.4-53.8-32.4-57.4-79.8-3-39.9 21.5-75.7 57-93.9C297 191.4 369.9 198.7 400 248c-14.1-48-53.8-70.1-101.8-74.8 30.9-30.7 64.4-50.3 114.2-43.7 69.8 9.3 133.2 82.8 67.7 150.5 35-16.3 48.7-54.4 47.5-76.9l10.5 19.6c11.8 22 15.2 47.6 9.4 72-9.2 39-40.6 68.8-79.7 76.5-32.1 6.3-83.1-5.1-91.8-59.2zM128 208H88.2c-8.9 0-16.2-7.3-16.2-16.2v-39.6c0-8.9 7.3-16.2 16.2-16.2H128c8.9 0 16.2 7.3 16.2 16.2v39.6c0 8.9-7.3 16.2-16.2 16.2zM10.1 168C4.5 168 0 163.5 0 157.9v-27.8c0-5.6 4.5-10.1 10.1-10.1h27.7c5.5 0 10.1 4.5 10.1 10.1v27.8c0 5.6-4.5 10.1-10.1 10.1H10.1zM168 142.7v-21.4c0-5.1 4.2-9.3 9.3-9.3h21.4c5.1 0 9.3 4.2 9.3 9.3v21.4c0 5.1-4.2 9.3-9.3 9.3h-21.4c-5.1 0-9.3-4.2-9.3-9.3zM56 235.5v25c0 6.3-5.1 11.5-11.4 11.5H19.4C13.1 272 8 266.8 8 260.5v-25c0-6.3 5.1-11.5 11.4-11.5h25.1c6.4 0 11.5 5.2 11.5 11.5z"],codepen:[512,512,[],"f1cb","M502.285 159.704l-234-156c-7.987-4.915-16.511-4.96-24.571 0l-234 156C3.714 163.703 0 170.847 0 177.989v155.999c0 7.143 3.714 14.286 9.715 18.286l234 156.022c7.987 4.915 16.511 4.96 24.571 0l234-156.022c6-3.999 9.715-11.143 9.715-18.286V177.989c-.001-7.142-3.715-14.286-9.716-18.285zM278 63.131l172.286 114.858-76.857 51.429L278 165.703V63.131zm-44 0v102.572l-95.429 63.715-76.857-51.429L234 63.131zM44 219.132l55.143 36.857L44 292.846v-73.714zm190 229.715L61.714 333.989l76.857-51.429L234 346.275v102.572zm22-140.858l-77.715-52 77.715-52 77.715 52-77.715 52zm22 140.858V346.275l95.429-63.715 76.857 51.429L278 448.847zm190-156.001l-55.143-36.857L468 219.132v73.714z"],codiepie:[472,512,[],"f284","M422.5 202.9c30.7 0 33.5 53.1-.3 53.1h-10.8v44.3h-26.6v-97.4h37.7zM472 352.6C429.9 444.5 350.4 504 248 504 111 504 0 393 0 256S111 8 248 8c97.4 0 172.8 53.7 218.2 138.4l-186 108.8L472 352.6zm-38.5 12.5l-60.3-30.7c-27.1 44.3-70.4 71.4-122.4 71.4-82.5 0-149.2-66.7-149.2-148.9 0-82.5 66.7-149.2 149.2-149.2 48.4 0 88.9 23.5 116.9 63.4l59.5-34.6c-40.7-62.6-104.7-100-179.2-100-121.2 0-219.5 98.3-219.5 219.5S126.8 475.5 248 475.5c78.6 0 146.5-42.1 185.5-110.4z"],connectdevelop:[576,512,[],"f20e","M550.5 241l-50.089-86.786c1.071-2.142 1.875-4.553 1.875-7.232 0-8.036-6.696-14.733-14.732-15.001l-55.447-95.893c.536-1.607 1.071-3.214 1.071-4.821 0-8.571-6.964-15.268-15.268-15.268-4.821 0-8.839 2.143-11.786 5.625H299.518C296.839 18.143 292.821 16 288 16s-8.839 2.143-11.518 5.625H170.411C167.464 18.143 163.447 16 158.625 16c-8.303 0-15.268 6.696-15.268 15.268 0 1.607.536 3.482 1.072 4.821l-55.983 97.233c-5.356 2.41-9.107 7.5-9.107 13.661 0 .535.268 1.071.268 1.607l-53.304 92.143c-7.232 1.339-12.59 7.5-12.59 15 0 7.232 5.089 13.393 12.054 15l55.179 95.358c-.536 1.607-.804 2.946-.804 4.821 0 7.232 5.089 13.393 12.054 14.732l51.697 89.732c-.536 1.607-1.071 3.482-1.071 5.357 0 8.571 6.964 15.268 15.268 15.268 4.821 0 8.839-2.143 11.518-5.357h106.875C279.161 493.857 283.447 496 288 496s8.839-2.143 11.518-5.357h107.143c2.678 2.946 6.696 4.821 10.982 4.821 8.571 0 15.268-6.964 15.268-15.268 0-1.607-.267-2.946-.803-4.285l51.697-90.268c6.964-1.339 12.054-7.5 12.054-14.732 0-1.607-.268-3.214-.804-4.821l54.911-95.358c6.964-1.339 12.322-7.5 12.322-15-.002-7.232-5.092-13.393-11.788-14.732zM153.535 450.732l-43.66-75.803h43.66v75.803zm0-83.839h-43.66c-.268-1.071-.804-2.142-1.339-3.214l44.999-47.41v50.624zm0-62.411l-50.357 53.304c-1.339-.536-2.679-1.34-4.018-1.607L43.447 259.75c.535-1.339.535-2.679.535-4.018s0-2.41-.268-3.482l51.965-90c2.679-.268 5.357-1.072 7.768-2.679l50.089 51.965v92.946zm0-102.322l-45.803-47.41c1.339-2.143 2.143-4.821 2.143-7.767 0-.268-.268-.804-.268-1.072l43.928-15.804v72.053zm0-80.625l-43.66 15.804 43.66-75.536v59.732zm326.519 39.108l.804 1.339L445.5 329.125l-63.75-67.232 98.036-101.518.268.268zM291.75 355.107l11.518 11.786H280.5l11.25-11.786zm-.268-11.25l-83.303-85.446 79.553-84.375 83.036 87.589-79.286 82.232zm5.357 5.893l79.286-82.232 67.5 71.25-5.892 28.125H313.714l-16.875-17.143zM410.411 44.393c1.071.536 2.142 1.072 3.482 1.34l57.857 100.714v.536c0 2.946.803 5.624 2.143 7.767L376.393 256l-83.035-87.589L410.411 44.393zm-9.107-2.143L287.732 162.518l-57.054-60.268 166.339-60h4.287zm-123.483 0c2.678 2.678 6.16 4.285 10.179 4.285s7.5-1.607 10.179-4.285h75L224.786 95.821 173.893 42.25h103.928zm-116.249 5.625l1.071-2.142a33.834 33.834 0 0 0 2.679-.804l51.161 53.84-54.911 19.821V47.875zm0 79.286l60.803-21.964 59.732 63.214-79.553 84.107-40.982-42.053v-83.304zm0 92.678L198 257.607l-36.428 38.304v-76.072zm0 87.858l42.053-44.464 82.768 85.982-17.143 17.678H161.572v-59.196zm6.964 162.053c-1.607-1.607-3.482-2.678-5.893-3.482l-1.071-1.607v-89.732h99.91l-91.607 94.821h-1.339zm129.911 0c-2.679-2.41-6.428-4.285-10.447-4.285s-7.767 1.875-10.447 4.285h-96.429l91.607-94.821h38.304l91.607 94.821H298.447zm120-11.786l-4.286 7.5c-1.339.268-2.41.803-3.482 1.339l-89.196-91.875h114.376l-17.412 83.036zm12.856-22.232l12.858-60.803h21.964l-34.822 60.803zm34.822-68.839h-20.357l4.553-21.16 17.143 18.214c-.535.803-1.071 1.874-1.339 2.946zm66.161-107.411l-55.447 96.697c-1.339.535-2.679 1.071-4.018 1.874l-20.625-21.964 34.554-163.928 45.803 79.286c-.267 1.339-.803 2.678-.803 4.285 0 1.339.268 2.411.536 3.75z"],contao:[512,512,[],"f26d","M45.4 305c14.4 67.1 26.4 129 68.2 175H34c-18.7 0-34-15.2-34-34V66c0-18.7 15.2-34 34-34h57.7C77.9 44.6 65.6 59.2 54.8 75.6c-45.4 70-27 146.8-9.4 229.4zM478 32h-90.2c21.4 21.4 39.2 49.5 52.7 84.1l-137.1 29.3c-14.9-29-37.8-53.3-82.6-43.9-24.6 5.3-41 19.3-48.3 34.6-8.8 18.7-13.2 39.8 8.2 140.3 21.1 100.2 33.7 117.7 49.5 131.2 12.9 11.1 33.4 17 58.3 11.7 44.5-9.4 55.7-40.7 57.4-73.2l137.4-29.6c3.2 71.5-18.7 125.2-57.4 163.6H478c18.7 0 34-15.2 34-34V66c0-18.8-15.2-34-34-34z"],cpanel:[640,512,[],"f388","M52.9 213.7h40l-6.2 23.6c-1.9 6.5-7.4 10.9-14.3 10.9H53.8c-24.9 0-24.7 37.4 0 37.4h11.3c4.2 0 7.6 3.9 6.4 8.3L64.4 320H52c-33.5 0-59-31.4-50.3-65.2 7.3-27 28.3-41.1 51.2-41.1M73.1 320L108 189.9c1.8-6.4 7.2-10.9 14.3-10.9h37c24.1 0 45.4 16.4 51 41.2 6.6 29.1-14.5 65.3-51.7 65.3h-32l6.4-23.8c1.8-6.2 7.3-10.8 14.3-10.8h10.3c12.4 0 20.8-11.7 18.3-22.6-2.1-9.2-9.9-14.8-18.3-14.8h-19.8L112 309.2c-1.9 6.2-7.4 10.7-14.2 10.7l-24.7.1m220.6-69.4c.3-1 1.9-5.3-2.1-5.3h-57.5c-9.7 0-16.6-8.9-14.2-18.5l3.5-13.4h77.9c18.8 0 33.3 17.6 28.5 36.8l-14 51.8c-2.8 10.6-12.2 17.8-23.4 17.8l-57.5-.2c-42.9 0-38.5-63.8.7-63.8H284l-3.5 13.2c-1.9 6.2-7.4 10.8-14.2 10.8h-21.6c-5.3 0-5.3 7.9 0 7.9h34.9c4.6 0 5.1-3.9 5.5-5.3l8.6-31.8m103.1-36.9c34.4 0 59.3 32.3 50.3 65.4l-8.8 33.1c-1.2 4.9-5.7 7.8-10.3 7.8h-19.1c-4.5 0-7.6-4-6.4-8.3l10.6-40c3.3-11.6-5.6-23.4-18.1-23.4h-19.8l-17.2 64c-1.2 4.8-5.6 7.8-10.4 7.8h-18.9c-4.2 0-7.6-3.9-6.4-8.3l26.2-98h48.3M498 251.6l-8 30c-.9 3.3 1.5 6.7 5.1 6.7h73.3l-5.7 21c-1.9 6.2-7.4 10.7-14.2 10.7h-66.7c-20 0-33.3-19-28.3-36.7l10.8-40c4.8-17.6 20.7-29.6 38.6-29.6h47.3c19 0 33.2 17.7 28.3 36.8l-3.2 12c-2.9 11-12.7 17.6-23.2 17.6h-53.4l3.5-13c1.6-6.2 7.2-10.8 14.2-10.8H538c2 0 3.3-1 3.9-3l.7-2.6c.7-2.7-1.3-5.1-3.9-5.1h-32.9c-4.1 0-6.9 2.1-7.8 6zm70.2 68.4l35.6-133.1c1.2-4.7 5.5-7.9 10.4-7.9h18.9c4.5 0 7.7 4 6.5 8.3l-26.5 98.2c-5.1 20.7-24.2 34.5-44.9 34.5"],"creative-commons":[512,512,[],"f25e","M255.547 8C392.884 8 504 114.439 504 256.004 504 405.979 381.106 504 255.562 504 122.319 504 8 394.557 8 256.004 8 124.825 113.486 8 255.547 8zm.899 44.734c-120.341 0-203.727 100.568-203.727 203.278 0 106.515 88.984 202.394 203.727 202.394 101.528 0 202.821-79.442 202.821-202.387-.001-114.773-91.773-203.285-202.821-203.285zm-3.108 162.093l-33.225 17.275c-5.395-11.203-15.25-19.926-27.459-19.926-22.134 0-33.217 14.609-33.217 43.842 0 23.842 9.446 43.842 33.217 43.842 14.469 0 24.653-7.091 30.566-21.259l30.551 15.5c-12.813 23.899-36.887 38.975-65.101 38.975-43.162 0-73.959-27.272-73.959-77.052 0-49.541 32.706-77.059 72.634-77.059 30.714-.013 52.701 11.946 65.993 35.862zm143.044 0l-32.775 17.275c-5.517-11.482-15.324-19.926-27.9-19.926-22.142 0-33.225 14.609-33.225 43.842 0 23.906 9.502 43.842 33.225 43.842 14.454 0 24.645-7.091 30.543-21.259l31 15.5c-13.363 23.869-37.451 38.975-65.086 38.975-43.439 0-73.959-26.988-73.959-77.052 0-49.523 32.698-77.059 72.626-77.059 30.706-.013 52.569 11.946 65.551 35.862z"],css3:[512,512,[],"f13c","M480 32l-64 368-223.3 80L0 400l19.6-94.8h82l-8 40.6L210 390.2l134.1-44.4 18.8-97.1H29.5l16-82h333.7l10.5-52.7H56.3l16.3-82H480z"],"css3-alt":[384,512,[],"f38b","M0 32l34.9 395.8L192 480l157.1-52.2L384 32H0zm313.1 80l-4.8 47.3L193 208.6l-.3.1h111.5l-12.8 146.6-98.2 28.7-98.8-29.2-6.4-73.9h48.9l3.2 38.3 52.6 13.3 54.7-15.4 3.7-61.6-166.3-.5v-.1l-.2.1-3.6-46.3L193.1 162l6.5-2.7H76.7L70.9 112h242.2z"],cuttlefish:[440,512,[],"f38c","M344 305.5c-17.5 31.6-57.4 54.5-96 54.5-56.6 0-104-47.4-104-104s47.4-104 104-104c38.6 0 78.5 22.9 96 54.5 13.7-50.9 41.7-93.3 87-117.8C385.7 39.1 320.5 8 248 8 111 8 0 119 0 256s111 248 248 248c72.5 0 137.7-31.1 183-80.7-45.3-24.5-73.3-66.9-87-117.8z"],"d-and-d":[576,512,[],"f38d","M82.5 98.9c-.6-17.2 2-33.8 12.7-48.2.3 7.4 1.2 14.5 4.2 21.6 5.9-27.5 19.7-49.3 42.3-65.5-1.9 5.9-3.5 11.8-3 17.7 8.7-7.4 18.8-17.8 44.4-22.7 14.7-2.8 29.7-2 42.1 1 38.5 9.3 61 34.3 69.7 72.3 5.3 23.1.7 45-8.3 66.4-5.2 12.4-12 24.4-20.7 35.1-2-1.9-3.9-3.8-5.8-5.6-42.8-40.8-26.8-25.2-37.4-37.4-1.1-1.2-1-2.2-.1-3.6 8.3-13.5 11.8-28.2 10-44-1.1-9.8-4.3-18.9-11.3-26.2-14.5-15.3-39.2-15-53.5.6-11.4 12.5-14.1 27.4-10.9 43.6.2 1.3.4 2.7 0 3.9-3.4 13.7-4.6 27.6-2.5 41.6.1.5.1 1.1.1 1.6 0 .3-.1.5-.2 1.1-21.8-11-36-28.3-43.2-52.2-8.3 17.8-11.1 35.5-6.6 54.1-15.6-15.2-21.3-34.3-22-55.2zm469.6 123.2c-11.6-11.6-25-20.4-40.1-26.6-12.8-5.2-26-7.9-39.9-7.1-10 .6-19.6 3.1-29 6.4-2.5.9-5.1 1.6-7.7 2.2-4.9 1.2-7.3-3.1-4.7-6.8 3.2-4.6 3.4-4.2 15-12 .6-.4 1.2-.8 2.2-1.5h-2.5c-.6 0-1.2.2-1.9.3-19.3 3.3-30.7 15.5-48.9 29.6-10.4 8.1-13.8 3.8-12-.5 1.4-3.5 3.3-6.7 5.1-10 1-1.8 2.3-3.4 3.5-5.1-.2-.2-.5-.3-.7-.5-27 18.3-46.7 42.4-57.7 73.3.3.3.7.6 1 .9.3-.6.5-1.2.9-1.7 10.4-12.1 22.8-21.8 36.6-29.8 18.2-10.6 37.5-18.3 58.7-20.2 4.3-.4 8.7-.1 13.1-.1-1.8.7-3.5.9-5.3 1.1-18.5 2.4-35.5 9-51.5 18.5-30.2 17.9-54.5 42.2-75.1 70.4-.3.4-.4.9-.7 1.3 14.5 5.3 24 17.3 36.1 25.6.2-.1.3-.2.4-.4l1.2-2.7c12.2-26.9 27-52.3 46.7-74.5 16.7-18.8 38-25.3 62.5-20 5.9 1.3 11.4 4.4 17.2 6.8 2.3-1.4 5.1-3.2 8-4.7 8.4-4.3 17.4-7 26.7-9 14.7-3.1 29.5-4.9 44.5-1.3v-.5c-.5-.4-1.2-.8-1.7-1.4zM316.7 397.6c-39.4-33-22.8-19.5-42.7-35.6-.8.9 0-.2-1.9 3-11.2 19.1-25.5 35.3-44 47.6-10.3 6.8-21.5 11.8-34.1 11.8-21.6 0-38.2-9.5-49.4-27.8-12-19.5-13.3-40.7-8.2-62.6 7.8-33.8 30.1-55.2 38.6-64.3-18.7-6.2-33 1.7-46.4 13.9.8-13.9 4.3-26.2 11.8-37.3-24.3 10.6-45.9 25-64.8 43.9-.3-5.8 5.4-43.7 5.6-44.7.3-2.7-.6-5.3-3-7.4-24.2 24.7-44.5 51.8-56.1 84.6 7.4-5.9 14.9-11.4 23.6-16.2-8.3 22.3-19.6 52.8-7.8 101.1 4.6 19 11.9 36.8 24.1 52.3 2.9 3.7 6.3 6.9 9.5 10.3.2-.2.4-.3.6-.5-1.4-7-2.2-14.1-1.5-21.9 2.2 3.2 3.9 6 5.9 8.6 12.6 16 28.7 27.4 47.2 35.6 25 11.3 51.1 13.3 77.9 8.6 54.9-9.7 90.7-48.6 116-98.8 1-1.8.6-2.9-.9-4.2zm172-46.4c-9.5-3.1-22.2-4.2-28.7-2.9 9.9 4 14.1 6.6 18.8 12 12.6 14.4 10.4 34.7-5.4 45.6-11.7 8.1-24.9 10.5-38.9 9.1-1.2-.1-2.3-.4-3-.6 2.8-3.7 6-7 8.1-10.8 9.4-16.8 5.4-42.1-8.7-56.1-2.1-2.1-4.6-3.9-7-5.9-.3 1.3-.1 2.1.1 2.8 4.2 16.6-8.1 32.4-24.8 31.8-7.6-.3-13.9-3.8-19.6-8.5-19.5-16.1-39.1-32.1-58.5-48.3-5.9-4.9-12.5-8.1-20.1-8.7-4.6-.4-9.3-.6-13.9-.9-5.9-.4-8.8-2.8-10.4-8.4-.9-3.4-1.5-6.8-2.2-10.2-1.5-8.1-6.2-13-14.3-14.2-4.4-.7-8.9-1-13.3-1.5-13-1.4-19.8-7.4-22.6-20.3-5 11-1.6 22.4 7.3 29.9 4.5 3.8 9.3 7.3 13.8 11.2 4.6 3.8 7.4 8.7 7.9 14.8.4 4.7.8 9.5 1.8 14.1 2.2 10.6 8.9 18.4 17 25.1 16.5 13.7 33 27.3 49.5 41.1 17.9 15 13.9 32.8 13 56-.9 22.9 12.2 42.9 33.5 51.2 1 .4 2 .6 3.6 1.1-15.7-18.2-10.1-44.1.7-52.3.3 2.2.4 4.3.9 6.4 9.4 44.1 45.4 64.2 85 56.9 16-2.9 30.6-8.9 42.9-19.8 2-1.8 3.7-4.1 5.9-6.5-19.3 4.6-35.8.1-50.9-10.6.7-.3 1.3-.3 1.9-.3 21.3 1.8 40.6-3.4 57-17.4 19.5-16.6 26.6-42.9 17.4-66-8.3-20.1-23.6-32.3-43.8-38.9zM99.4 179.3c-5.3-9.2-13.2-15.6-22.1-21.3 13.7-.5 26.6.2 39.6 3.7-7-12.2-8.5-24.7-5-38.7 5.3 11.9 13.7 20.1 23.6 26.8 19.7 13.2 35.7 19.6 46.7 30.2 3.4 3.3 6.3 7.1 9.6 10.9-.8-2.1-1.4-4.1-2.2-6-5-10.6-13-18.6-22.6-25-1.8-1.2-2.8-2.5-3.4-4.5-3.3-12.5-3-25.1-.7-37.6 1-5.5 2.8-10.9 4.5-16.3.8-2.4 2.3-4.6 4-6.6.6 6.9 0 25.5 19.6 46 10.8 11.3 22.4 21.9 33.9 32.7 9 8.5 18.3 16.7 25.5 26.8 1.1 1.6 2.2 3.3 3.8 4.7-5-13-14.2-24.1-24.2-33.8-9.6-9.3-19.4-18.4-29.2-27.4-3.3-3-4.6-6.7-5.1-10.9-1.2-10.4 0-20.6 4.3-30.2.5-1 1.1-2 1.9-3.3.5 4.2.6 7.9 1.4 11.6 4.8 23.1 20.4 36.3 49.3 63.5 10 9.4 19.3 19.2 25.6 31.6 4.8 9.3 7.3 19 5.7 29.6-.1.6.5 1.7 1.1 2 6.2 2.6 10 6.9 9.7 14.3 7.7-2.6 12.5-8 16.4-14.5 4.2 20.2-9.1 50.3-27.2 58.7.4-4.5 5-23.4-16.5-27.7-6.8-1.3-12.8-1.3-22.9-2.1 4.7-9 10.4-20.6.5-22.4-24.9-4.6-52.8 1.9-57.8 4.6 8.2.4 16.3 1 23.5 3.3-2 6.5-4 12.7-5.8 18.9-1.9 6.5 2.1 14.6 9.3 9.6 1.2-.9 2.3-1.9 3.3-2.7-3.1 17.9-2.9 15.9-2.8 18.3.3 10.2 9.5 7.8 15.7 7.3-2.5 11.8-29.5 27.3-45.4 25.8 7-4.7 12.7-10.3 15.9-17.9-6.5.8-12.9 1.6-19.2 2.4l-.3-.9c4.7-3.4 8-7.8 10.2-13.1 8.7-21.1-3.6-38-25-39.9-9.1-.8-17.8.8-25.9 5.5 6.2-15.6 17.2-26.6 32.6-34.5-15.2-4.3-8.9-2.7-24.6-6.3 14.6-9.3 30.2-13.2 46.5-14.6-5.2-3.2-48.1-3.6-70.2 20.9 7.9 1.4 15.5 2.8 23.2 4.2-23.8 7-44 19.7-62.4 35.6 1.1-4.8 2.7-9.5 3.3-14.3.6-4.5.8-9.2.1-13.6-1.5-9.4-8.9-15.1-19.7-16.3-7.9-.9-15.6.1-23.3 1.3-.9.1-1.7.3-2.9 0 15.8-14.8 36-21.7 53.1-33.5 6-4.5 6.8-8.2 3-14.9zm128.4 26.8c3.3 16 12.6 25.5 23.8 24.3-4.6-11.3-12.1-19.5-23.8-24.3z"],dashcube:[384,512,[],"f210","M288.1 97.5H85.5C37.6 97.5 0 138.1 0 185.2v215.1C0 447.7 37.6 480 85.5 480h213c47.9 0 85.5-32.3 85.5-79.7V0l-95.9 97.5zm-161.9 293c-16.6 0-30.4-14.2-30.4-30.8v-134c0-16.6 13.8-30.5 30.4-30.5h131.9c16.6 0 30 13.9 30 30.5v115.7l47.9 49H126.2z"],delicious:[448,512,[],"f1a5","M446.5 68c-.4-1.5-.9-3-1.4-4.5-.9-2.5-2-4.8-3.3-7.1-1.4-2.4-3-4.8-4.7-6.9-2.1-2.5-4.4-4.8-6.9-6.8-1.1-.9-2.2-1.7-3.3-2.5-1.3-.9-2.6-1.7-4-2.4-1.8-1-3.6-1.8-5.5-2.5-1.7-.7-3.5-1.3-5.4-1.7-3.8-1-7.9-1.5-12-1.5H48C21.5 32 0 53.5 0 80v352c0 4.1.5 8.2 1.5 12 2 7.7 5.8 14.6 11 20.3 1 1.1 2.1 2.2 3.3 3.3 5.7 5.2 12.6 9 20.3 11 3.8 1 7.9 1.5 12 1.5h352c26.5 0 48-21.5 48-48V80c-.1-4.1-.6-8.2-1.6-12zM416 432c0 8.8-7.2 16-16 16H224V256H32V80c0-8.8 7.2-16 16-16h176v192h192v176z"],deploydog:[512,512,[],"f38e","M382.2 136h51.7v239.6h-51.7v-20.7c-19.8 24.8-52.8 24.1-73.8 14.7-26.2-11.7-44.3-38.1-44.3-71.8 0-29.8 14.8-57.9 43.3-70.8 20.2-9.1 52.7-10.6 74.8 12.9V136zm-64.7 161.8c0 18.2 13.6 33.5 33.2 33.5 19.8 0 33.2-16.4 33.2-32.9 0-17.1-13.7-33.2-33.2-33.2-19.6 0-33.2 16.4-33.2 32.6zM188.5 136h51.7v239.6h-51.7v-20.7c-19.8 24.8-52.8 24.1-73.8 14.7-26.2-11.7-44.3-38.1-44.3-71.8 0-29.8 14.8-57.9 43.3-70.8 20.2-9.1 52.7-10.6 74.8 12.9V136zm-64.7 161.8c0 18.2 13.6 33.5 33.2 33.5 19.8 0 33.2-16.4 33.2-32.9 0-17.1-13.7-33.2-33.2-33.2-19.7 0-33.2 16.4-33.2 32.6zM448 96c17.5 0 32 14.4 32 32v256c0 17.5-14.4 32-32 32H64c-17.5 0-32-14.4-32-32V128c0-17.5 14.4-32 32-32h384m0-32H64C28.8 64 0 92.8 0 128v256c0 35.2 28.8 64 64 64h384c35.2 0 64-28.8 64-64V128c0-35.2-28.8-64-64-64z"],deskpro:[480,512,[],"f38f","M205.9 512l31.1-38.4c12.3-.2 25.6-1.4 36.5-6.6 38.9-18.6 38.4-61.9 38.3-63.8-.1-5-.8-4.4-28.9-37.4H362c-.2 50.1-7.3 68.5-10.2 75.7-9.4 23.7-43.9 62.8-95.2 69.4-8.7 1.1-32.8 1.2-50.7 1.1zm200.4-167.7c38.6 0 58.5-13.6 73.7-30.9l-175.5-.3-17.4 31.3 119.2-.1zm-43.6-223.9v168.3h-73.5l-32.7 55.5H250c-52.3 0-58.1-56.5-58.3-58.9-1.2-13.2-21.3-11.6-20.1 1.8 1.4 15.8 8.8 40 26.4 57.1h-91c-25.5 0-110.8-26.8-107-114V16.9C0 .9 9.7.3 15 .1h82c.2 0 .3.1.5.1 4.3-.4 50.1-2.1 50.1 43.7 0 13.3 20.2 13.4 20.2 0 0-18.2-5.5-32.8-15.8-43.7h84.2c108.7-.4 126.5 79.4 126.5 120.2zm-132.5 56l64 29.3c13.3-45.5-42.2-71.7-64-29.3z"],deviantart:[320,512,[],"f1bd","M320 93.2l-98.2 179.1 7.4 9.5H320v127.7H159.1l-13.5 9.2-43.7 84c-.3 0-8.6 8.6-9.2 9.2H0v-93.2l93.2-179.4-7.4-9.2H0V102.5h156l13.5-9.2 43.7-84c.3 0 8.6-8.6 9.2-9.2H320v93.1z"],digg:[512,512,[],"f1a6","M81.7 172.3H0v174.4h132.7V96h-51v76.3zm0 133.4H50.9v-92.3h30.8v92.3zm297.2-133.4v174.4h81.8v28.5h-81.8V416H512V172.3H378.9zm81.8 133.4h-30.8v-92.3h30.8v92.3zm-235.6 41h82.1v28.5h-82.1V416h133.3V172.3H225.1v174.4zm51.2-133.3h30.8v92.3h-30.8v-92.3zM153.3 96h51.3v51h-51.3V96zm0 76.3h51.3v174.4h-51.3V172.3z"],"digital-ocean":[512,512,[],"f391","M256 504v-96.1c101.8 0 180.8-100.9 141.7-208-14.3-39.6-46.1-71.4-85.8-85.7-107.1-38.8-208.1 39.9-208.1 141.7H8C8 93.7 164.9-32.8 335 20.3c74.2 23.3 133.6 82.4 156.6 156.6C544.8 347.2 418.6 504 256 504zm.3-191.4h-95.6v95.6h95.6v-95.6zm-95.6 95.6H87v73.6h73.7v-73.6zM87 346.6H25.4v61.6H87v-61.6z"],discord:[448,512,[],"f392","M297.216 243.2c0 15.616-11.52 28.416-26.112 28.416-14.336 0-26.112-12.8-26.112-28.416s11.52-28.416 26.112-28.416c14.592 0 26.112 12.8 26.112 28.416zm-119.552-28.416c-14.592 0-26.112 12.8-26.112 28.416s11.776 28.416 26.112 28.416c14.592 0 26.112-12.8 26.112-28.416.256-15.616-11.52-28.416-26.112-28.416zM448 52.736V512c-64.494-56.994-43.868-38.128-118.784-107.776l13.568 47.36H52.48C23.552 451.584 0 428.032 0 398.848V52.736C0 23.552 23.552 0 52.48 0h343.04C424.448 0 448 23.552 448 52.736zm-72.96 242.688c0-82.432-36.864-149.248-36.864-149.248-36.864-27.648-71.936-26.88-71.936-26.88l-3.584 4.096c43.52 13.312 63.744 32.512 63.744 32.512-60.811-33.329-132.244-33.335-191.232-7.424-9.472 4.352-15.104 7.424-15.104 7.424s21.248-20.224 67.328-33.536l-2.56-3.072s-35.072-.768-71.936 26.88c0 0-36.864 66.816-36.864 149.248 0 0 21.504 37.12 78.08 38.912 0 0 9.472-11.52 17.152-21.248-32.512-9.728-44.8-30.208-44.8-30.208 3.766 2.636 9.976 6.053 10.496 6.4 43.21 24.198 104.588 32.126 159.744 8.96 8.96-3.328 18.944-8.192 29.44-15.104 0 0-12.8 20.992-46.336 30.464 7.68 9.728 16.896 20.736 16.896 20.736 56.576-1.792 78.336-38.912 78.336-38.912z"],discourse:[448,512,[],"f393","M225.9 32C103.3 32 0 130.5 0 252.1 0 256 .1 480 .1 480l225.8-.2c122.7 0 222.1-102.3 222.1-223.9C448 134.3 348.6 32 225.9 32zM224 384c-19.4 0-37.9-4.3-54.4-12.1L88.5 392l22.9-75c-9.8-18.1-15.4-38.9-15.4-61 0-70.7 57.3-128 128-128s128 57.3 128 128-57.3 128-128 128z"],dochub:[416,512,[],"f394","M397.9 160H256V19.6L397.9 160zM304 192v130c0 66.8-36.5 100.1-113.3 100.1H96V84.8h94.7c12 0 23.1.8 33.1 2.5v-84C212.9 1.1 201.4 0 189.2 0H0v512h189.2C329.7 512 400 447.4 400 318.1V192h-96z"],docker:[640,512,[],"f395","M349.9 236.3h-66.1v-59.4h66.1v59.4zm0-204.3h-66.1v60.7h66.1V32zm78.2 144.8H362v59.4h66.1v-59.4zm-156.3-72.1h-66.1v60.1h66.1v-60.1zm78.1 0h-66.1v60.1h66.1v-60.1zm276.8 100c-14.4-9.7-47.6-13.2-73.1-8.4-3.3-24-16.7-44.9-41.1-63.7l-14-9.3-9.3 14c-18.4 27.8-23.4 73.6-3.7 103.8-8.7 4.7-25.8 11.1-48.4 10.7H2.4c-8.7 50.8 5.8 116.8 44 162.1 37.1 43.9 92.7 66.2 165.4 66.2 157.4 0 273.9-72.5 328.4-204.2 21.4.4 67.6.1 91.3-45.2 1.5-2.5 6.6-13.2 8.5-17.1l-13.3-8.9zm-511.1-27.9h-66v59.4h66.1v-59.4zm78.1 0h-66.1v59.4h66.1v-59.4zm78.1 0h-66.1v59.4h66.1v-59.4zm-78.1-72.1h-66.1v60.1h66.1v-60.1z"],draft2digital:[480,512,[],"f396","M369.9 425.4V371l47.1 27.2-47.1 27.2zM82.4 380.6c25.5-27.3 97.7-104.7 150.9-170 35.1-43.1 40.3-82.4 28.4-112.7-7.4-18.8-17.5-30.2-24.3-35.7 45.3 2.1 68 23.4 82.2 38.3 0 0 42.4 48.2 5.8 113.3-37 65.9-110.9 147.5-128.5 166.7H82.4zm51.8-219.2c0 12.4-10 22.4-22.4 22.4-12.4 0-22.4-10-22.4-22.4 0-12.4 10-22.4 22.4-22.4 12.4 0 22.4 10.1 22.4 22.4M336 315.9v64.7h-91.3c30.8-35 81.8-95.9 111.8-149.3 35.2-62.6 16.1-123.4-12.8-153.3-4.4-4.6-62.2-62.9-166-41.2-59.1 12.4-89.4 43.4-104.3 67.3-13.1 20.9-17 39.8-18.2 47.7-5.5 33 19.4 67.1 56.7 67.1 31.7 0 57.3-25.7 57.3-57.4 0-27.1-19.7-52.1-48-56.8 1.8-7.3 17.7-21.1 26.3-24.7 41.1-17.3 78 5.2 83.3 33.5 8.3 44.3-37.1 90.4-69.7 127.6C84.5 328.1 18.3 396.8 0 415.9l336-.1V480l144-81.9-144-82.2z"],dribbble:[512,512,[],"f17d","M256 8C119.252 8 8 119.252 8 256s111.252 248 248 248 248-111.252 248-248S392.748 8 256 8zm163.97 114.366c29.503 36.046 47.369 81.957 47.835 131.955-6.984-1.477-77.018-15.682-147.502-6.818-5.752-14.041-11.181-26.393-18.617-41.614 78.321-31.977 113.818-77.482 118.284-83.523zM396.421 97.87c-3.81 5.427-35.697 48.286-111.021 76.519-34.712-63.776-73.185-116.168-79.04-124.008 67.176-16.193 137.966 1.27 190.061 47.489zm-230.48-33.25c5.585 7.659 43.438 60.116 78.537 122.509-99.087 26.313-186.36 25.934-195.834 25.809C62.38 147.205 106.678 92.573 165.941 64.62zM44.17 256.323c0-2.166.043-4.322.108-6.473 9.268.19 111.92 1.513 217.706-30.146 6.064 11.868 11.857 23.915 17.174 35.949-76.599 21.575-146.194 83.527-180.531 142.306C64.794 360.405 44.17 310.73 44.17 256.323zm81.807 167.113c22.127-45.233 82.178-103.622 167.579-132.756 29.74 77.283 42.039 142.053 45.189 160.638-68.112 29.013-150.015 21.053-212.768-27.882zm248.38 8.489c-2.171-12.886-13.446-74.897-41.152-151.033 66.38-10.626 124.7 6.768 131.947 9.055-9.442 58.941-43.273 109.844-90.795 141.978z"],"dribbble-square":[448,512,[],"f397","M90.2 228.2c8.9-42.4 37.4-77.7 75.7-95.7 3.6 4.9 28 38.8 50.7 79-64 17-120.3 16.8-126.4 16.7zM314.6 154c-33.6-29.8-79.3-41.1-122.6-30.6 3.8 5.1 28.6 38.9 51 80 48.6-18.3 69.1-45.9 71.6-49.4zM140.1 364c40.5 31.6 93.3 36.7 137.3 18-2-12-10-53.8-29.2-103.6-55.1 18.8-93.8 56.4-108.1 85.6zm98.8-108.2c-3.4-7.8-7.2-15.5-11.1-23.2C159.6 253 93.4 252.2 87.4 252c0 1.4-.1 2.8-.1 4.2 0 35.1 13.3 67.1 35.1 91.4 22.2-37.9 67.1-77.9 116.5-91.8zm34.9 16.3c17.9 49.1 25.1 89.1 26.5 97.4 30.7-20.7 52.5-53.6 58.6-91.6-4.6-1.5-42.3-12.7-85.1-5.8zm-20.3-48.4c4.8 9.8 8.3 17.8 12 26.8 45.5-5.7 90.7 3.4 95.2 4.4-.3-32.3-11.8-61.9-30.9-85.1-2.9 3.9-25.8 33.2-76.3 53.9zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-64 176c0-88.2-71.8-160-160-160S64 167.8 64 256s71.8 160 160 160 160-71.8 160-160z"],dropbox:[528,512,[],"f16b","M264.4 116.3l-132 84.3 132 84.3-132 84.3L0 284.1l132.3-84.3L0 116.3 132.3 32l132.1 84.3zM131.6 395.7l132-84.3 132 84.3-132 84.3-132-84.3zm132.8-111.6l132-84.3-132-83.6L395.7 32 528 116.3l-132.3 84.3L528 284.8l-132.3 84.3-131.3-85z"],drupal:[448,512,[],"f1a9","M319.5 114.7c-22.2-14-43.5-19.5-64.7-33.5-13-8.8-31.3-30-46.5-48.3-2.7 29.3-11.5 41.2-22 49.5-21.3 17-34.8 22.2-53.5 32.3C117 123 32 181.5 32 290.5 32 399.7 123.8 480 225.8 480 327.5 480 416 406 416 294c0-112.3-83-171-96.5-179.3zm2.5 325.6c-20.1 20.1-90.1 28.7-116.7 4.2-4.8-4.8.3-12 6.5-12 0 0 17 13.3 51.5 13.3 27 0 46-7.7 54.5-14 6.1-4.6 8.4 4.3 4.2 8.5zm-54.5-52.6c8.7-3.6 29-3.8 36.8 1.3 4.1 2.8 16.1 18.8 6.2 23.7-8.4 4.2-1.2-15.7-26.5-15.7-14.7 0-19.5 5.2-26.7 11-7 6-9.8 8-12.2 4.7-6-8.2 15.9-22.3 22.4-25zM360 405c-15.2-1-45.5-48.8-65-49.5-30.9-.9-104.1 80.7-161.3 42-38.8-26.6-14.6-104.8 51.8-105.2 49.5-.5 83.8 49 108.5 48.5 21.3-.3 61.8-41.8 81.8-41.8 48.7 0 23.3 109.3-15.8 106z"],dyalog:[416,512,[],"f399","M0 32v119.2h64V96h107.2C284.6 96 352 176.2 352 255.9 352 332 293.4 416 171.2 416H0v64h171.2C331.9 480 416 367.3 416 255.9c0-58.7-22.1-113.4-62.3-154.3C308.9 56 245.7 32 171.2 32H0z"],earlybirds:[480,512,[],"f39a","M313.2 47.5c1.2-13 21.3-14 36.6-8.7.9.3 26.2 9.7 19 15.2-27.9-7.4-56.4 18.2-55.6-6.5zm-201 6.9c30.7-8.1 62 20 61.1-7.1-1.3-14.2-23.4-15.3-40.2-9.6-1 .3-28.7 10.5-20.9 16.7zM319.4 160c-8.8 0-16 7.2-16 16s7.2 16 16 16 16-7.2 16-16-7.2-16-16-16zm-159.7 0c-8.8 0-16 7.2-16 16s7.2 16 16 16 16-7.2 16-16-7.2-16-16-16zm318.5 163.2c-9.9 24-40.7 11-63.9-1.2-13.5 69.1-58.1 111.4-126.3 124.2.3.9-2-.1 24 1 33.6 1.4 63.8-3.1 97.4-8-19.8-13.8-11.4-37.1-9.8-38.1 1.4-.9 14.7 1.7 21.6 11.5 8.6-12.5 28.4-14.8 30.2-13.6 1.6 1.1 6.6 20.9-6.9 34.6 4.7-.9 8.2-1.6 9.8-2.1 2.6-.8 17.7 11.3 3.1 13.3-14.3 2.3-22.6 5.1-47.1 10.8-45.9 10.7-85.9 11.8-117.7 12.8l1 11.6c3.8 18.1-23.4 24.3-27.6 6.2.8 17.9-27.1 21.8-28.4-1l-.5 5.3c-.7 18.4-28.4 17.9-28.3-.6-7.5 13.5-28.1 6.8-26.4-8.5l1.2-12.4c-36.7.9-59.7 3.1-61.8 3.1-20.9 0-20.9-31.6 0-31.6 2.4 0 27.7 1.3 63.2 2.8-61.1-15.5-103.7-55-114.9-118.2-25 12.8-57.5 26.8-68.2.8-10.5-25.4 21.5-42.6 66.8-73.4.7-6.6 1.6-13.3 2.7-19.8-14.4-19.6-11.6-36.3-16.1-60.4-16.8 2.4-23.2-9.1-23.6-23.1.3-7.3 2.1-14.9 2.4-15.4 1.1-1.8 10.1-2 12.7-2.6 6-31.7 50.6-33.2 90.9-34.5 19.7-21.8 45.2-41.5 80.9-48.3C203.3 29 215.2 8.5 216.2 8c1.7-.8 21.2 4.3 26.3 23.2 5.2-8.8 18.3-11.4 19.6-10.7 1.1.6 6.4 15-4.9 25.9 40.3 3.5 72.2 24.7 96 50.7 36.1 1.5 71.8 5.9 77.1 34 2.7.6 11.6.8 12.7 2.6.3.5 2.1 8.1 2.4 15.4-.5 13.9-6.8 25.4-23.6 23.1-3.2 17.3-2.7 32.9-8.7 47.7 2.4 11.7 4 23.8 4.8 36.4 37 25.4 70.3 42.5 60.3 66.9zM207.4 159.9c.9-44-37.9-42.2-78.6-40.3-21.7 1-38.9 1.9-45.5 13.9-11.4 20.9 5.9 92.9 23.2 101.2 9.8 4.7 73.4 7.9 86.3-7.1 8.2-9.4 15-49.4 14.6-67.7zm52 58.3c-4.3-12.4-6-30.1-15.3-32.7-2-.5-9-.5-11 0-10 2.8-10.8 22.1-17 37.2 15.4 0 19.3 9.7 23.7 9.7 4.3 0 6.3-11.3 19.6-14.2zm135.7-84.7c-6.6-12.1-24.8-12.9-46.5-13.9-40.2-1.9-78.2-3.8-77.3 40.3-.5 18.3 5 58.3 13.2 67.8 13 14.9 76.6 11.8 86.3 7.1 15.8-7.6 36.5-78.9 24.3-101.3z"],edge:[512,512,[],"f282","M25.714 228.163c.111-.162.23-.323.342-.485-.021.162-.045.323-.065.485h-.277zm460.572 15.508c0-44.032-7.754-84.465-28.801-122.405C416.498 47.879 343.912 8.001 258.893 8.001 118.962 7.724 40.617 113.214 26.056 227.679c42.429-61.312 117.073-121.376 220.375-124.966 0 0 109.666 0 99.419 104.957H169.997c6.369-37.386 18.554-58.986 34.339-78.926-75.048 34.893-121.85 96.096-120.742 188.315.83 71.448 50.124 144.836 120.743 171.976 83.357 31.847 192.776 7.2 240.132-21.324V363.307c-80.864 56.494-270.871 60.925-272.255-67.572h314.073v-52.064z"],ember:[640,512,[],"f423","M639.9 311.7c-1.1-10.7-10.7-6.8-10.7-6.8s-15.6 12.1-29.3 10.7c-13.7-1.3-9.4-32-9.4-32s3-28.1-5.1-30.4c-8.1-2.4-18 7.3-18 7.3s-12.4 13.7-18.3 31.2l-1.6.5s1.9-30.6-.3-37.6c-1.6-3.5-16.4-3.2-18.8 3-2.4 6.2-14.2 49.2-15 67.2 0 0-23.1 19.6-43.3 22.8-20.2 3.2-25-9.4-25-9.4s54.8-15.3 52.9-59.1c-1.9-43.8-44.2-27.6-49-24-4.6 3.5-29.4 18.4-36.6 59.7-.2 1.4-.7 7.5-.7 7.5s-21.2 14.2-33 18c0 0 33-55.6-7.3-80.9-18.3-11-32.8 12.1-32.8 12.1s54.5-60.7 42.5-112c-5.8-24.4-18-27.1-29.2-23.1-17 6.7-23.5 16.7-23.5 16.7s-22 32-27.1 79.5-12.6 105.1-12.6 105.1-10.5 10.2-20.2 10.7-5.4-28.7-5.4-28.7 7.5-44.6 7-52.1-1.1-11.6-9.9-14.2c-8.9-2.7-18.5 8.6-18.5 8.6s-25.5 38.7-27.7 44.6l-1.3 2.4-1.3-1.6s18-52.7.8-53.5-28.5 18.8-28.5 18.8-19.6 32.8-20.4 36.5l-1.3-1.6s8.1-38.2 6.4-47.6c-1.6-9.4-10.5-7.5-10.5-7.5s-11.3-1.3-14.2 5.9c-3 7.3-13.7 55.3-15 70.7 0 0-28.2 20.2-46.8 20.4s-16.7-11.8-16.7-11.8 68-23.3 49.4-69.2c-8.3-11.8-18-15.5-31.7-15.3-13.7.3-30.3 8.6-41.3 33.3-5.3 11.8-6.8 23-7.8 31.5 0 0-12.3 2.4-18.8-2.9-6.4-5.4-10 0-10 0s-11.2 13.9-.1 18.2c11 4.3 28.1 6.1 28.1 6.1 1.6 7.5 6.2 19.5 19.6 29.7 20.2 15.3 58.8-1.3 58.8-1.3l15.9-8.8s.5 14.6 12.1 16.7c11.6 2.1 16.4 1 36.5-47.9 11.8-25 12.6-23.6 12.6-23.6l1.3-.3s-9.1 46.8-5.6 59.7c3.5 12.9 18.8 11.6 18.8 11.6s8.3 2.4 15-21.2c6.7-23.6 19.6-49.9 19.6-49.9h1.6s-5.6 48.1 3 63.7c8.6 15.6 30.9 5.3 30.9 5.3s15.6-7.8 18-10.2c0 0 18.5 15.8 44.6 12.9 58.3-11.5 79.1-25.9 79.1-25.9s10 24.4 41.1 26.7c35.5 2.7 54.8-18.6 54.8-18.6s-.3 13.5 12.1 18.6c12.4 5.1 20.7-22.8 20.7-22.8l20.7-57.2h1.9s1.1 37.3 21.5 43.2c20.4 5.9 47-13.7 47-13.7s6.4-3.7 5.3-14.4zm-578 5.3c.8-32 21.8-45.9 29-39 7.3 7 4.6 22-9.1 31.4-13.7 9.5-19.9 7.6-19.9 7.6zm272.8-123.9s19.1-49.7 23.6-25.5-40 96.2-40 96.2c.5-16.1 16.4-70.7 16.4-70.7zm22.8 138.4c-12.6 33-43.3 19.6-43.3 19.6s-3.5-11.8 6.4-44.9 33.3-20.2 33.3-20.2 16.2 12.5 3.6 45.5zm84.6-14.5s-3-10.5 8.1-30.6c11-20.2 19.6-9.1 19.6-9.1s9.4 10.2-1.3 25.5c-10.8 15.3-26.4 14.2-26.4 14.2z"],empire:[496,512,[],"f1d1","M287.6 54.2c-10.8-2.2-22.1-3.3-33.5-3.6V32.4c78.1 2.2 146.1 44 184.6 106.6l-15.8 9.1c-6.1-9.7-12.7-18.8-20.2-27.1l-18 15.5c-26-29.6-61.4-50.7-101.9-58.4l4.8-23.9zM53.4 322.4l23-7.7c-6.4-18.3-10-38.2-10-58.7s3.3-40.4 9.7-58.7l-22.7-7.7c3.6-10.8 8.3-21.3 13.6-31l-15.8-9.1C34 181 24.1 217.5 24.1 256s10 75 27.1 106.6l15.8-9.1c-5.3-10-9.7-20.3-13.6-31.1zM213.1 434c-40.4-8-75.8-29.1-101.9-58.7l-18 15.8c-7.5-8.6-14.4-17.7-20.2-27.4l-16 9.4c38.5 62.3 106.8 104.3 184.9 106.6v-18.3c-11.3-.3-22.7-1.7-33.5-3.6l4.7-23.8zM93.3 120.9l18 15.5c26-29.6 61.4-50.7 101.9-58.4l-4.7-23.8c10.8-2.2 22.1-3.3 33.5-3.6V32.4C163.9 34.6 95.9 76.4 57.4 139l15.8 9.1c6-9.7 12.6-18.9 20.1-27.2zm309.4 270.2l-18-15.8c-26 29.6-61.4 50.7-101.9 58.7l4.7 23.8c-10.8 1.9-22.1 3.3-33.5 3.6v18.3c78.1-2.2 146.4-44.3 184.9-106.6l-16.1-9.4c-5.7 9.7-12.6 18.8-20.1 27.4zM496 256c0 137-111 248-248 248S0 393 0 256 111 8 248 8s248 111 248 248zm-12.2 0c0-130.1-105.7-235.8-235.8-235.8S12.2 125.9 12.2 256 117.9 491.8 248 491.8 483.8 386.1 483.8 256zm-39-106.6l-15.8 9.1c5.3 9.7 10 20.2 13.6 31l-22.7 7.7c6.4 18.3 9.7 38.2 9.7 58.7s-3.6 40.4-10 58.7l23 7.7c-3.9 10.8-8.3 21-13.6 31l15.8 9.1C462 331 471.9 294.5 471.9 256s-9.9-75-27.1-106.6zm-183 177.7c16.3-3.3 30.4-11.6 40.7-23.5l51.2 44.8c11.9-13.6 21.3-29.3 27.1-46.8l-64.2-22.1c2.5-7.5 3.9-15.2 3.9-23.5s-1.4-16.1-3.9-23.5l64.5-22.1c-6.1-17.4-15.5-33.2-27.4-46.8l-51.2 44.8c-10.2-11.9-24.4-20.5-40.7-23.8l13.3-66.4c-8.6-1.9-17.7-2.8-27.1-2.8-9.4 0-18.5.8-27.1 2.8l13.3 66.4c-16.3 3.3-30.4 11.9-40.7 23.8l-51.2-44.8c-11.9 13.6-21.3 29.3-27.4 46.8l64.5 22.1c-2.5 7.5-3.9 15.2-3.9 23.5s1.4 16.1 3.9 23.5l-64.2 22.1c5.8 17.4 15.2 33.2 27.1 46.8l51.2-44.8c10.2 11.9 24.4 20.2 40.7 23.5l-13.3 66.7c8.6 1.7 17.7 2.8 27.1 2.8 9.4 0 18.5-1.1 27.1-2.8l-13.3-66.7z"],envira:[448,512,[],"f299","M0 32c477.6 0 366.6 317.3 367.1 366.3L448 480h-26l-70.4-71.2c-39 4.2-124.4 34.5-214.4-37C47 300.3 52 214.7 0 32zm79.7 46c-49.7-23.5-5.2 9.2-5.2 9.2 45.2 31.2 66 73.7 90.2 119.9 31.5 60.2 79 139.7 144.2 167.7 65 28 34.2 12.5 6-8.5-28.2-21.2-68.2-87-91-130.2-31.7-60-61-118.6-144.2-158.1z"],erlang:[640,512,[],"f39d","M21.7 193c-.1 86.8 29 159.5 78.7 212.1H0V.1h87.2C45.7 50.3 21.6 116.2 21.7 193zM640 .1h-83.6c31.4 42.7 48.7 97.5 46.2 162.7.5 6 .5 11.7 0 24.1H230.2c-.2 109.7 38.9 194.9 138.6 195.3 68.5-.3 118-51 151.9-106.1l96.4 48.2c-17.4 30.9-36.5 57.8-57.9 80.8H640V.1zm-80.8 405h-.2.2zM556.1.1h.3l-.1-.1-.2.1zM325.4 9.8c-45.9.1-85.1 33.5-89.2 83.2h169.9C405 43.3 371.6 9.9 325.4 9.8z"],etsy:[384,512,[],"f2d7","M384 348c-1.75 10.75-13.75 110-15.5 132-117.879-4.299-219.895-4.743-368.5 0v-25.5c45.457-8.948 60.627-8.019 61-35.25 1.793-72.322 3.524-244.143 0-322-1.029-28.46-12.13-26.765-61-36v-25.5c73.886 2.358 255.933 8.551 362.999-3.75-3.5 38.25-7.75 126.5-7.75 126.5H332C320.947 115.665 313.241 68 277.25 68h-137c-10.25 0-10.75 3.5-10.75 9.75V241.5c58 .5 88.5-2.5 88.5-2.5 29.77-.951 27.56-8.502 40.75-65.251h25.75c-4.407 101.351-3.91 61.829-1.75 160.25H257c-9.155-40.086-9.065-61.045-39.501-61.5 0 0-21.5-2-88-2v139c0 26 14.25 38.25 44.25 38.25H263c63.636 0 66.564-24.996 98.751-99.75H384z"],expeditedssl:[496,512,[],"f23e","M248 43.4C130.6 43.4 35.4 138.6 35.4 256S130.6 468.6 248 468.6 460.6 373.4 460.6 256 365.4 43.4 248 43.4zm-97.4 132.9c0-53.7 43.7-97.4 97.4-97.4s97.4 43.7 97.4 97.4v26.6c0 5-3.9 8.9-8.9 8.9h-17.7c-5 0-8.9-3.9-8.9-8.9v-26.6c0-82.1-124-82.1-124 0v26.6c0 5-3.9 8.9-8.9 8.9h-17.7c-5 0-8.9-3.9-8.9-8.9v-26.6zM389.7 380c0 9.7-8 17.7-17.7 17.7H124c-9.7 0-17.7-8-17.7-17.7V238.3c0-9.7 8-17.7 17.7-17.7h248c9.7 0 17.7 8 17.7 17.7V380zm-248-137.3v132.9c0 2.5-1.9 4.4-4.4 4.4h-8.9c-2.5 0-4.4-1.9-4.4-4.4V242.7c0-2.5 1.9-4.4 4.4-4.4h8.9c2.5 0 4.4 1.9 4.4 4.4zm141.7 48.7c0 13-7.2 24.4-17.7 30.4v31.6c0 5-3.9 8.9-8.9 8.9h-17.7c-5 0-8.9-3.9-8.9-8.9v-31.6c-10.5-6.1-17.7-17.4-17.7-30.4 0-19.7 15.8-35.4 35.4-35.4s35.5 15.8 35.5 35.4zM248 8C111 8 0 119 0 256s111 248 248 248 248-111 248-248S385 8 248 8zm0 478.3C121 486.3 17.7 383 17.7 256S121 25.7 248 25.7 478.3 129 478.3 256 375 486.3 248 486.3z"],facebook:[448,512,[],"f09a","M448 56.7v398.5c0 13.7-11.1 24.7-24.7 24.7H309.1V306.5h58.2l8.7-67.6h-67v-43.2c0-19.6 5.4-32.9 33.5-32.9h35.8v-60.5c-6.2-.8-27.4-2.7-52.2-2.7-51.6 0-87 31.5-87 89.4v49.9h-58.4v67.6h58.4V480H24.7C11.1 480 0 468.9 0 455.3V56.7C0 43.1 11.1 32 24.7 32h398.5c13.7 0 24.8 11.1 24.8 24.7z"],"facebook-f":[264,512,[],"f39e","M76.7 512V283H0v-91h76.7v-71.7C76.7 42.4 124.3 0 193.8 0c33.3 0 61.9 2.5 70.2 3.6V85h-48.2c-37.8 0-45.1 18-45.1 44.3V192H256l-11.7 91h-73.6v229"],"facebook-messenger":[448,512,[],"f39f","M224 32C15.9 32-77.5 278 84.6 400.6V480l75.7-42c142.2 39.8 285.4-59.9 285.4-198.7C445.8 124.8 346.5 32 224 32zm23.4 278.1L190 250.5 79.6 311.6l121.1-128.5 57.4 59.6 110.4-61.1-121.1 128.5z"],"facebook-square":[448,512,[],"f082","M448 80v352c0 26.5-21.5 48-48 48h-85.3V302.8h60.6l8.7-67.6h-69.3V192c0-19.6 5.4-32.9 33.5-32.9H384V98.7c-6.2-.8-27.4-2.7-52.2-2.7-51.6 0-87 31.5-87 89.4v49.9H184v67.6h60.9V480H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48z"],firefox:[480,512,[],"f269","M478.1 235.3c-.7-4.5-1.4-7.1-1.4-7.1s-1.8 2-4.7 5.9c-.9-10.7-2.8-21.2-5.8-31.6-3.7-12.9-8.5-25.4-14.5-37.4-3.8-8-8.2-15.6-13.3-22.8-1.8-2.7-3.7-5.4-5.6-7.9-8.8-14.4-19-23.3-30.7-40-7.6-12.8-12.9-26.9-15.4-41.6-3.2 8.9-5.7 18-7.4 27.3-12.1-12.2-22.5-20.8-28.9-26.7C319.4 24.2 323 9.1 323 9.1S264.7 74.2 289.9 142c8.7 23 23.8 43.1 43.4 57.9 24.4 20.2 50.8 36 64.7 76.6-11.2-21.3-28.1-39.2-48.8-51.5 6.2 14.7 9.4 30.6 9.3 46.5 0 61-49.6 110.5-110.6 110.4-8.3 0-16.5-.9-24.5-2.8-9.5-1.8-18.7-4.9-27.4-9.3-12.9-7.8-24-18.1-32.8-30.3l-.2-.3 2 .7c4.6 1.6 9.2 2.8 14 3.7 18.7 4 38.3 1.7 55.6-6.6 17.5-9.7 28-16.9 36.6-14h.2c8.4 2.7 15-5.5 9-14-10.4-13.4-27.4-20-44.2-17-17.5 2.5-33.5 15-56.4 2.9-1.5-.8-2.9-1.6-4.3-2.5-1.6-.9 4.9 1.3 3.4.3-5-2.5-9.8-5.4-14.4-8.6-.3-.3 3.5 1.1 3.1.8-5.9-4-11-9.2-15-15.2-4.1-7.4-4.5-16.4-1-24.1 2.1-3.8 5.4-6.9 9.3-8.7 3 1.5 4.8 2.6 4.8 2.6s-1.3-2.5-2.1-3.8c.3-.1.5 0 .8-.2 2.6 1.1 8.3 4 11.4 5.8 2.1 1.1 3.8 2.7 5.2 4.7 0 0 1-.5.3-2.7-1.1-2.7-2.9-5-5.4-6.6h.2c2.3 1.2 4.5 2.6 6.6 4.1 1.9-4.4 2.8-9.2 2.6-14 .2-2.6-.2-5.3-1.1-7.8-.8-1.6.5-2.2 1.9-.5-.2-1.3-.7-2.5-1.2-3.7v-.1s.8-1.1 1.2-1.5c1-1 2.1-1.9 3.4-2.7 7.2-4.5 14.8-8.4 22.7-11.6 6.4-2.8 11.7-4.9 12.8-5.6 1.6-1 3.1-2.2 4.5-3.5 5.3-4.5 9-10.8 10.2-17.7.1-.9.2-1.8.3-2.8v-1.5c-.9-3.5-6.9-6.1-38.4-9.1-11.1-1.8-20-10.1-22.5-21.1v.1c-.4 1.1-.9 2.3-1.3 3.5.4-1.2.8-2.3 1.3-3.5v-.2c6-15.7 16.8-29.1 30.8-38.3.8-.7-3.2.2-2.4-.5 2.7-1.3 5.4-2.5 8.2-3.5 1.4-.6-6-3.4-12.6-2.7-4 .2-8 1.2-11.7 2.8 1.6-1.3 6.2-3.1 5.1-3.1-8.4 1.6-16.5 4.7-23.9 9 0-.8.1-1.5.5-2.2-5.9 2.5-11 6.5-15 11.5.1-.9.2-1.8.2-2.7-2.7 2-5.2 4.3-7.3 6.9l-.1.1c-17.4-6.7-36.3-8.3-54.6-4.7l-.2-.1h.2c-3.8-3.1-7.1-6.7-9.7-10.9l-.2.1-.4-.2c-1.2-1.8-2.4-3.8-3.7-6-.9-1.6-1.8-3.4-2.7-5.2 0-.1-.1-.2-.2-.2-.4 0-.6 1.7-.9 1.3v-.1c-3.2-8.3-4.7-17.2-4.4-26.2l-.2.1c-5.1 3.5-9 8.6-11.1 14.5-.9 2.1-1.6 3.3-2.2 4.5v-.5c.1-1.1.6-3.3.5-3.1-.1.2-.2.3-.3.4-1.5 1.7-2.9 3.7-3.9 5.8-.9 1.9-1.7 3.9-2.3 5.9-.1.3 0-.3 0-1s.1-2 0-1.7l-.3.7c-6.7 14.9-10.9 30.8-12.4 47.1-.4 2.8-.6 5.6-.5 8.3v.2c-4.8 5.2-9 11-12.7 17.1-12.1 20.4-21.1 42.5-26.8 65.6 4-8.8 8.8-17.2 14.3-25.1C5.5 228.5 0 257.4 0 286.6c1.8-8.6 4.2-17 7-25.3-1.7 34.5 4.9 68.9 19.4 100.3 19.4 43.5 51.6 80 92.3 104.7 16.6 11.2 34.7 19.9 53.8 25.8 2.5.9 5.1 1.8 7.7 2.7-.8-.3-1.6-.7-2.4-1 22.6 6.8 46.2 10.3 69.8 10.3 83.7 0 111.3-31.9 113.8-35 4.1-3.7 7.5-8.2 9.9-13.3 1.6-.7 3.2-1.4 4.9-2.1l1-.5 1.9-.9c12.6-5.9 24.5-13.4 35.3-22.1 16.3-11.7 27.9-28.7 32.9-48.1 3-7.1 3.1-15 .4-22.2.9-1.4 1.7-2.8 2.7-4.3 18-28.9 28.2-61.9 29.6-95.9v-2.8c0-7.3-.6-14.5-1.9-21.6z"],"first-order":[448,512,[],"f2b0","M12.9 229.2c.1-.1.2-.3.3-.4 0 .1 0 .3-.1.4h-.2zM224 96.6c-7.1 0-14.6.6-21.4 1.7l3.7 67.4-22-64c-14.3 3.7-27.7 9.4-40 16.6l29.4 61.4-45.1-50.9c-11.4 8.9-21.7 19.1-30.6 30.9l50.6 45.4-61.1-29.7c-7.1 12.3-12.9 25.7-16.6 40l64.3 22.6-68-4c-.9 7.1-1.4 14.6-1.4 22s.6 14.6 1.4 21.7l67.7-4-64 22.6c3.7 14.3 9.4 27.7 16.6 40.3l61.1-29.7L97.7 352c8.9 11.7 19.1 22.3 30.9 30.9l44.9-50.9-29.5 61.4c12.3 7.4 25.7 13.1 40 16.9l22.3-64.6-4 68c7.1 1.1 14.6 1.7 21.7 1.7 7.4 0 14.6-.6 21.7-1.7l-4-68.6 22.6 65.1c14.3-4 27.7-9.4 40-16.9L274.9 332l44.9 50.9c11.7-8.9 22-19.1 30.6-30.9l-50.6-45.1 61.1 29.4c7.1-12.3 12.9-25.7 16.6-40.3l-64-22.3 67.4 4c1.1-7.1 1.4-14.3 1.4-21.7s-.3-14.9-1.4-22l-67.7 4 64-22.3c-3.7-14.3-9.1-28-16.6-40.3l-60.9 29.7 50.6-45.4c-8.9-11.7-19.1-22-30.6-30.9l-45.1 50.9 29.4-61.1c-12.3-7.4-25.7-13.1-40-16.9L241.7 166l4-67.7c-7.1-1.2-14.3-1.7-21.7-1.7zM443.4 128v256L224 512 4.6 384V128L224 0l219.4 128zm-17.1 10.3L224 20.9 21.7 138.3v235.1L224 491.1l202.3-117.7V138.3zM224 37.1l187.7 109.4v218.9L224 474.9 36.3 365.4V146.6L224 37.1zm0 50.9c-92.3 0-166.9 75.1-166.9 168 0 92.6 74.6 167.7 166.9 167.7 92 0 166.9-75.1 166.9-167.7 0-92.9-74.9-168-166.9-168z"],firstdraft:[384,512,[],"f3a1","M384 192h-64v128H192v128H0v-25.6h166.4v-128h128v-128H384V192zm-25.6 38.4v128h-128v128H64V512h192V384h128V230.4h-25.6zm25.6 192h-89.6V512H320v-64h64v-25.6zM0 0v384h128V256h128V128h128V0H0z"],flickr:[448,512,[],"f16e","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM144.5 319c-35.1 0-63.5-28.4-63.5-63.5s28.4-63.5 63.5-63.5 63.5 28.4 63.5 63.5-28.4 63.5-63.5 63.5zm159 0c-35.1 0-63.5-28.4-63.5-63.5s28.4-63.5 63.5-63.5 63.5 28.4 63.5 63.5-28.4 63.5-63.5 63.5z"],fly:[384,512,[],"f417","M197.8 427.8c12.9 11.7 33.7 33.3 33.2 50.7 0 .8-.1 1.6-.1 2.5-1.8 19.8-18.8 31.1-39.1 31-25-.1-39.9-16.8-38.7-35.8 1-16.2 20.5-36.7 32.4-47.6 2.3-2.1 2.7-2.7 5.6-3.6 3.4 0 3.9.3 6.7 2.8zM331.9 67.3c-16.3-25.7-38.6-40.6-63.3-52.1C243.1 4.5 214-.2 192 0c-44.1 0-71.2 13.2-81.1 17.3C57.3 45.2 26.5 87.2 28 158.6c7.1 82.2 97 176 155.8 233.8 1.7 1.6 4.5 4.5 6.2 5.1l3.3.1c2.1-.7 1.8-.5 3.5-2.1 52.3-49.2 140.7-145.8 155.9-215.7 7-39.2 3.1-72.5-20.8-112.5zM186.8 351.9c-28-51.1-65.2-130.7-69.3-189-3.4-47.5 11.4-131.2 69.3-136.7v325.7zM328.7 180c-16.4 56.8-77.3 128-118.9 170.3C237.6 298.4 275 217 277 158.4c1.6-45.9-9.8-105.8-48-131.4 88.8 18.3 115.5 98.1 99.7 153z"],"font-awesome":[448,512,[],"f2b4","M397.8 32H50.2C22.7 32 0 54.7 0 82.2v347.6C0 457.3 22.7 480 50.2 480h347.6c27.5 0 50.2-22.7 50.2-50.2V82.2c0-27.5-22.7-50.2-50.2-50.2zm-45.4 284.3c0 4.2-3.6 6-7.8 7.8-16.7 7.2-34.6 13.7-53.8 13.7-26.9 0-39.4-16.7-71.7-16.7-23.3 0-47.8 8.4-67.5 17.3-1.2.6-2.4.6-3.6 1.2V385c0 1.8 0 3.6-.6 4.8v1.2c-2.4 8.4-10.2 14.3-19.1 14.3-11.3 0-20.3-9-20.3-20.3V166.4c-7.8-6-13.1-15.5-13.1-26.3 0-18.5 14.9-33.5 33.5-33.5 18.5 0 33.5 14.9 33.5 33.5 0 10.8-4.8 20.3-13.1 26.3v18.5c1.8-.6 3.6-1.2 5.4-2.4 18.5-7.8 40.6-14.3 61.5-14.3 22.7 0 40.6 6 60.9 13.7 4.2 1.8 8.4 2.4 13.1 2.4 22.7 0 47.8-16.1 53.8-16.1 4.8 0 9 3.6 9 7.8v140.3z"],"font-awesome-alt":[448,512,[],"f35c","M397.8 67.8c7.8 0 14.3 6.6 14.3 14.3v347.6c0 7.8-6.6 14.3-14.3 14.3H50.2c-7.8 0-14.3-6.6-14.3-14.3V82.2c0-7.8 6.6-14.3 14.3-14.3h347.6m0-35.9H50.2C22.7 32 0 54.7 0 82.2v347.6C0 457.3 22.7 480 50.2 480h347.6c27.5 0 50.2-22.7 50.2-50.2V82.2c0-27.5-22.7-50.2-50.2-50.2zm-58.5 139.2c-6 0-29.9 15.5-52.6 15.5-4.2 0-8.4-.6-12.5-2.4-19.7-7.8-37-13.7-59.1-13.7-20.3 0-41.8 6.6-59.7 13.7-1.8.6-3.6 1.2-4.8 1.8v-17.9c7.8-6 12.5-14.9 12.5-25.7 0-17.9-14.3-32.3-32.3-32.3s-32.3 14.3-32.3 32.3c0 10.2 4.8 19.7 12.5 25.7v212.1c0 10.8 9 19.7 19.7 19.7 9 0 16.1-6 18.5-13.7V385c.6-1.8.6-3 .6-4.8V336c1.2 0 2.4-.6 3-1.2 19.7-8.4 43-16.7 65.7-16.7 31.1 0 43 16.1 69.3 16.1 18.5 0 36.4-6.6 52-13.7 4.2-1.8 7.2-3.6 7.2-7.8V178.3c1.8-4.1-2.3-7.1-7.7-7.1z"],"font-awesome-flag":[448,512,[],"f425","M444.373 359.424c0 7.168-6.144 10.24-13.312 13.312-28.672 12.288-59.392 23.552-92.16 23.552-46.08 0-67.584-28.672-122.88-28.672-39.936 0-81.92 14.336-115.712 29.696-2.048 1.024-4.096 1.024-6.144 2.048v77.824c0 21.405-16.122 34.816-33.792 34.816-19.456 0-34.816-15.36-34.816-34.816V102.4C12.245 92.16 3.029 75.776 3.029 57.344 3.029 25.6 28.629 0 60.373 0s57.344 25.6 57.344 57.344c0 18.432-8.192 34.816-22.528 45.056v31.744c4.124-1.374 58.768-28.672 114.688-28.672 65.27 0 97.676 27.648 126.976 27.648 38.912 0 81.92-27.648 92.16-27.648 8.192 0 15.36 6.144 15.36 13.312v240.64z"],fonticons:[448,512,[],"f280","M0 32v448h448V32H0zm167.4 196h67.4l-11.1 37.3H168v112.9c0 5.8-2 6.7 3.2 7.3l43.5 4.1v25.1H84V389l21.3-2c5.2-.6 6.7-2.3 6.7-7.9V267.7c0-2.3-2.9-2.3-5.8-2.3H84V228h28v-21c0-49.6 26.5-70 77.3-70 34.1 0 64.7 8.2 64.7 52.8l-50.7 6.1c.3-18.7-4.4-23-16.3-23-18.4 0-19 9.9-19 27.4v23.3c0 2.4-3.5 4.4-.6 4.4zM364 414.7H261.3v-25.1l20.4-2.6c5.2-.6 7.6-1.7 7.6-7.3V271.8c0-4.1-2.9-6.7-6.7-7.9l-24.2-6.4 6.7-29.5h80.2v151.7c0 5.8-2.6 6.4 2.9 7.3l15.7 2.6v25.1zm-21.9-255.5l9 33.2-7.3 7.3-31.2-16.6-31.2 16.6-7.3-7.3 9-33.2-21.8-24.2 3.5-9.6h27.7l15.5-28h9.3l15.5 28h27.7l3.5 9.6-21.9 24.2z"],"fonticons-fi":[384,512,[],"f3a2","M114.4 224h92.4l-15.2 51.2h-76.4V433c0 8-2.8 9.2 4.4 10l59.6 5.6V483H0v-35.2l29.2-2.8c7.2-.8 9.2-3.2 9.2-10.8V278.4c0-3.2-4-3.2-8-3.2H0V224h38.4v-28.8c0-68 36.4-96 106-96 46.8 0 88.8 11.2 88.8 72.4l-69.6 8.4c.4-25.6-6-31.6-22.4-31.6-25.2 0-26 13.6-26 37.6v32c0 3.2-4.8 6-.8 6zM384 483H243.2v-34.4l28-3.6c7.2-.8 10.4-2.4 10.4-10V287c0-5.6-4-9.2-9.2-10.8l-33.2-8.8 9.2-40.4h110v208c0 8-3.6 8.8 4 10l21.6 3.6V483zm-30-347.2l12.4 45.6-10 10-42.8-22.8-42.8 22.8-10-10 12.4-45.6-30-36.4 4.8-10h38L307.2 51H320l21.2 38.4h38l4.8 13.2-30 33.2z"],"fort-awesome":[448,512,[],"f286","M412 284h-24c-2.25 0-4 1.75-4 4v28h-32V160c0-2.25-1.75-4-4-4h-24c-2.25 0-4 1.75-4 4v28h-32v-28c0-2.25-1.75-4-4-4h-24c-2.25 0-4 1.75-4 4v28h-32v-28c0-5.25-7-4-10.25-4v-33.25c7.25-1.75 15-3 22.5-3 9.501 0 18.251 3.75 27.5 3.75 4 0 24.25-1 24.25-7V64c0-2.25-1.75-4-4-4-4.5 0-13.25 3.75-21 3.75-8.499 0-18.25-3.75-28.501-3.75-7 0-14 1-20.75 2.5v-4.25c4.75-2.25 8-7.25 8-12.5 0-18.149-27.499-18.167-27.499 0 0 5.25 3.25 10.25 8 12.5V156c-3.25 0-10.25-1.25-10.25 4v28h-32v-28c0-2.25-1.75-4-4-4h-24c-2.25 0-4 1.75-4 4v28H96v-28c0-2.25-1.75-4-4-4H68c-2.25 0-4 1.75-4 4v156H32v-28c0-2.25-1.75-4-4-4H4c-2.25 0-4 1.75-4 4v192h160v-84c0-63.507 96-63.525 96 0v84h160V288c0-2.25-1.75-4-4-4zm-252-4.001c0 2.25-1.75 4-4 4h-24c-2.25 0-4-1.75-4-4V224c0-2.25 1.75-4 4-4h24c2.25 0 4 1.75 4 4v55.999zm128 0c0 2.25-1.75 4-4 4h-24c-2.25 0-4-1.75-4-4V224c0-2.25 1.75-4 4-4h24c2.25 0 4 1.75 4 4v55.999z"],"fort-awesome-alt":[512,512,[],"f3a3","M211.7 241.1v51.7c0 2.1-1.6 3.7-3.7 3.7h-22.2c-2.1 0-3.7-1.6-3.7-3.7v-51.7c0-2.1 1.6-3.7 3.7-3.7H208c2.1 0 3.7 1.6 3.7 3.7zm114.5-3.7H304c-2.1 0-3.7 1.6-3.7 3.7v51.7c0 2.1 1.6 3.7 3.7 3.7h22.2c2.1 0 3.7-1.6 3.7-3.7v-51.7c-.1-2.1-1.7-3.7-3.7-3.7zm-29.1 263.2c-.9.1-1.7.3-2.6.4-1 .2-2.1.3-3.1.5-.9.1-1.8.3-2.8.4-1 .1-2 .3-3 .4-1 .1-2 .2-2.9.3-1 .1-1.9.2-2.9.3-1 .1-2.1.2-3.1.3-.9.1-1.8.2-2.7.2-1.1.1-2.3.1-3.4.2-.8 0-1.7.1-2.5.1-1.3.1-2.6.1-3.9.1-.7 0-1.4.1-2.1.1-2 0-4 .1-6 .1s-4 0-6-.1c-.7 0-1.4 0-2.1-.1-1.3 0-2.6-.1-3.9-.1-.8 0-1.7-.1-2.5-.1-1.1-.1-2.3-.1-3.4-.2-.9-.1-1.8-.1-2.7-.2-1-.1-2.1-.2-3.1-.3-1-.1-1.9-.2-2.9-.3-1-.1-2-.2-2.9-.3-1-.1-2-.2-3-.4-.9-.1-1.8-.3-2.8-.4-1-.1-2.1-.3-3.1-.5-.9-.1-1.7-.3-2.6-.4-65.6-10.9-122.5-47.7-160-99.4-.2-.2-.3-.5-.5-.7-.8-1.1-1.6-2.2-2.3-3.3-.3-.4-.6-.8-.8-1.2-.7-1.1-1.4-2.1-2.1-3.2-.3-.5-.6-.9-.9-1.4-.7-1.1-1.4-2.1-2-3.2-.3-.5-.6-.9-.9-1.4-.7-1.1-1.3-2.2-2-3.3-.2-.4-.5-.8-.7-1.2-2.4-4-4.6-8.1-6.8-12.2-.1-.2-.2-.3-.3-.5-.6-1.1-1.1-2.2-1.7-3.3-.3-.6-.6-1.1-.8-1.7-.5-1-1-2.1-1.5-3.1-.3-.7-.6-1.3-.9-2-.5-1-.9-2-1.4-3l-.9-2.1c-.4-1-.9-2-1.3-3-.3-.7-.6-1.5-.9-2.2l-1.2-3c-.3-.8-.6-1.5-.9-2.3-.4-1-.8-2-1.1-3-.3-.9-.6-1.8-1-2.8-.6-1.6-1.1-3.3-1.7-4.9-.3-.9-.6-1.8-.9-2.8-.3-.9-.5-1.8-.8-2.7-.3-.9-.6-1.9-.8-2.8-.3-.9-.5-1.8-.8-2.7-.3-1-.5-1.9-.8-2.9-.2-.9-.5-1.8-.7-2.7-.3-1-.5-2-.7-3-.2-.9-.4-1.7-.6-2.6-.2-1.1-.5-2.2-.7-3.2-.2-.8-.3-1.6-.5-2.4-.3-1.3-.5-2.7-.8-4-.1-.6-.2-1.1-.3-1.7l-.9-5.7c-.1-.6-.2-1.3-.3-1.9-.2-1.3-.4-2.6-.5-3.9-.1-.8-.2-1.5-.3-2.3-.1-1.2-.3-2.4-.4-3.6-.1-.8-.2-1.6-.2-2.4-.1-1.2-.2-2.4-.3-3.5-.1-.8-.1-1.6-.2-2.4-.1-1.2-.2-2.4-.2-3.7 0-.8-.1-1.5-.1-2.3-.1-1.3-.1-2.7-.2-4 0-.7 0-1.3-.1-2 0-2-.1-4-.1-6 0-53.5 16.9-103 45.8-143.6 2.3-3.2 4.7-6.4 7.1-9.5 4.9-6.2 10.1-12.3 15.6-18 2.7-2.9 5.5-5.7 8.4-8.4 2.9-2.7 5.8-5.4 8.8-8 4.5-3.9 9.1-7.6 13.9-11.2 1.6-1.2 3.2-2.4 4.8-3.5C140 34.2 171.7 20.1 206 13c16.1-3.3 32.9-5 50-5s33.8 1.7 50 5c34.3 7 66 21.1 93.6 40.7 1.6 1.2 3.2 2.3 4.8 3.5 4.8 3.6 9.4 7.3 13.9 11.2 12 10.4 23 21.9 32.8 34.4 2.5 3.1 4.8 6.3 7.1 9.5C487.1 153 504 202.5 504 256c0 2 0 4-.1 6 0 .7 0 1.3-.1 2 0 1.3-.1 2.7-.2 4 0 .8-.1 1.5-.1 2.3-.1 1.2-.1 2.4-.2.7-.1.8-.1 1.6-.2 2.4-.1 1.2-.2 2.4-.3 3.5-.1.8-.2 1.6-.2 2.4-.1 1.2-.3 2.4-.4 3.6-.1.8-.2 1.5-.3 2.3-.2 1.3-.4 2.6-.5 3.9-.1.6-.2 1.3-.3 1.9l-.9 5.7c-.1.6-.2 1.1-.3 1.7-.2 1.3-.5 2.7-.8 4-.2.8-.3 1.6-.5 2.4-.2 1.1-.5 2.2-.7 3.2-.2.9-.4 1.7-.6 2.6-.2 1-.5 2-.7 3-.2.9-.5 1.8-.7 2.7-.3 1-.5 1.9-.8 2.9-.2.9-.5 1.8-.8 2.7-.3.9-.6 1.9-.8 2.8-.3.9-.5 1.8-.8 2.7-.3.9-.6 1.8-.9 2.8-.5 1.6-1.1 3.3-1.7 4.9-.3.9-.6 1.8-1 2.8-.4 1-.7 2-1.1 3-.3.8-.6 1.5-.9 2.3l-1.2 3c-.3.7-.6 1.5-.9 2.2-.4 1-.8 2-1.3 3l-.9 2.1c-.4 1-.9 2-1.4 3-.3.7-.6 1.3-.9 2-.5 1-1 2.1-1.5 3.1-.3.6-.6 1.1-.8 1.7-.6 1.1-1.1 2.2-1.7 3.3-.1.2-.2.3-.3.5-2.2 4.1-4.4 8.2-6.8 12.2-.2.4-.5.8-.7 1.2-.7 1.1-1.3 2.2-2 3.3-.3.5-.6.9-.9 1.4-.7 1.1-1.4 2.1-2 3.2-.3.5-.6.9-.9 1.4-.7 1.1-1.4 2.1-2.1 3.2-.3.4-.6.8-.8 1.2-.8 1.1-1.5 2.2-2.3 3.3-.2.2-.3.5-.5.7-37.6 54.7-94.5 91.4-160.1 102.4zm117.3-86.2c13-13 24.2-27.4 33.6-42.9v-71.3c0-2.1-1.6-3.7-3.7-3.7h-22.2c-2.1 0-3.7 1.6-3.7 3.7V326h-29.5V182c0-2.1-1.6-3.7-3.7-3.7h-22.1c-2.1 0-3.7 1.6-3.7 3.7v25.9h-29.5V182c0-2.1-1.6-3.7-3.7-3.7H304c-2.1 0-3.7 1.6-3.7 3.7v25.9h-29.5V182c0-4.8-6.5-3.7-9.5-3.7v-30.7c6.7-1.6 13.8-2.8 20.8-2.8 8.8 0 16.8 3.5 25.4 3.5 3.7 0 22.4-.9 22.4-6.5V93.4c0-2.1-1.6-3.7-3.7-3.7-4.2 0-12.2 3.5-19.4 3.5-7.9 0-16.9-3.5-26.3-3.5-6.5 0-12.9.9-19.2 2.3v-3.9c4.4-2.1 7.4-6.7 7.4-11.5 0-16.8-25.4-16.8-25.4 0 0 4.8 3 9.5 7.4 11.5v90.2c-3 0-9.5-1.1-9.5 3.7v25.9h-29.5V182c0-2.1-1.6-3.7-3.7-3.7h-22.2c-2.1 0-3.7 1.6-3.7 3.7v25.9h-29.5V182c0-2.1-1.6-3.7-3.7-3.7h-22.1c-2.1 0-3.7 1.6-3.7 3.7v144H93.5v-25.8c0-2.1-1.6-3.7-3.7-3.7H67.7c-2.1 0-3.7 1.6-3.7 3.7v71.3c9.4 15.5 20.6 29.9 33.6 42.9 20.6 20.6 44.5 36.7 71.2 48 13.9 5.9 28.2 10.3 42.9 13.2v-75.8c0-58.6 88.6-58.6 88.6 0v75.8c14.7-2.9 29-7.4 42.9-13.2 26.7-11.3 50.6-27.4 71.2-48"],forumbee:[448,512,[],"f211","M5.8 309.7C2 292.7 0 275.5 0 258.3 0 135 99.8 35 223.1 35c16.6 0 33.3 2 49.3 5.5C149 87.5 51.9 186 5.8 309.7zm392.9-189.2C385 103 369 87.8 350.9 75.2c-149.6 44.3-266.3 162.1-309.7 312 12.5 18.1 28 35.6 45.2 49 43.1-151.3 161.2-271.7 312.3-315.7zm15.8 252.7c15.2-25.1 25.4-53.7 29.5-82.8-79.4 42.9-145 110.6-187.6 190.3 30-4.4 58.9-15.3 84.6-31.3 35 13.1 70.9 24.3 107 33.6-9.3-36.5-20.4-74.5-33.5-109.8zm29.7-145.5c-2.6-19.5-7.9-38.7-15.8-56.8C290.5 216.7 182 327.5 137.1 466c18.1 7.6 37 12.5 56.6 15.2C240 367.1 330.5 274.4 444.2 227.7z"],foursquare:[368,512,[],"f180","M323.1 3H49.9C12.4 3 0 31.3 0 49.1v433.8c0 20.3 12.1 27.7 18.2 30.1 6.2 2.5 22.8 4.6 32.9-7.1C180 356.5 182.2 354 182.2 354c3.1-3.4 3.4-3.1 6.8-3.1h83.4c35.1 0 40.6-25.2 44.3-39.7l48.6-243C373.8 25.8 363.1 3 323.1 3zm-16.3 73.8l-11.4 59.7c-1.2 6.5-9.5 13.2-16.9 13.2H172.1c-12 0-20.6 8.3-20.6 20.3v13c0 12 8.6 20.6 20.6 20.6h90.4c8.3 0 16.6 9.2 14.8 18.2-1.8 8.9-10.5 53.8-11.4 58.8-.9 4.9-6.8 13.5-16.9 13.5h-73.5c-13.5 0-17.2 1.8-26.5 12.6 0 0-8.9 11.4-89.5 108.3-.9.9-1.8.6-1.8-.3V75.9c0-7.7 6.8-16.6 16.6-16.6h219c8.2 0 15.6 7.7 13.5 17.5z"],"free-code-camp":[576,512,[],"f2c5","M69.3 144.5c-41 68.5-36.4 163 1 227C92.5 409.7 120 423.9 120 438c0 6.8-6 13-12.8 13C87.7 451 8 375.5 8 253.2c0-111.5 78-186 97.1-186 6 0 14.9 4.8 14.9 11.1 0 12.7-28.3 28.6-50.7 66.2zm195.8 213.8c4.5 1.8 12.3 5.2 12.3-1.2 0-2.7-2.2-2.9-4.3-3.6-8.5-3.4-14-7.7-19.1-15.2-8.2-12.1-10.1-24.2-10.1-38.6 0-32.1 44.2-37.9 44.2-70 0-12.3-7.7-15.9-7.7-19.3 0-2.2.7-2.2 2.9-2.2 8 0 19.1 13.3 22.5 19.8 2.2 4.6 2.4 6 2.4 11.1 0 7-.7 14.2-.7 21.3 0 27 31.9 19.8 31.9 6.8 0-6-3.6-11.6-3.6-17.4 0-.7 0-1.2.7-1.2 3.4 0 9.4 7.7 11.1 10.1 5.8 8.9 8.5 20.8 8.5 31.4 0 32.4-29.5 49-29.5 56 0 1 2.9 7.7 12.1 1.9 29.7-15.1 53.1-47.6 53.1-89.8 0-33.6-8.7-57.7-32.1-82.6-3.9-4.1-16.4-16.9-22.5-16.9-8.2 0 7.2 18.6 7.2 31.2 0 7.2-4.8 12.3-12.3 12.3-11.6 0-14.5-25.4-15.9-33.3-5.8-33.8-12.8-58.2-46.4-74.1-10.4-5-36.5-11.8-36.5-2.2 0 2.4 2.7 4.1 4.6 5.1 9.2 5.6 19.6 21.4 19.6 38.2 0 46.1-57.7 88.2-57.7 136.2-.2 40.3 28.1 72.6 65.3 86.2zM470.4 67c-6 0-14.4 6.5-14.4 12.6 0 8.7 12.1 19.6 17.6 25.4 81.6 85.1 78.6 214.3 17.6 291-7 8.9-35.3 35.3-35.3 43.5 0 5.1 8.2 11.4 13.2 11.4 25.4 0 98.8-80.8 98.8-185.7C568 145.9 491.8 67 470.4 67zm-42.3 323.1H167c-9.4 0-15.5 7.5-15.5 16.4 0 8.5 7 15.5 15.5 15.5h261.1c9.4 0 11.9-7.5 11.9-16.4 0-8.5-3.5-15.5-11.9-15.5z"],freebsd:[448,512,[],"f3a4","M303.7 96.2c11.1-11.1 115.5-77 139.2-53.2 23.7 23.7-42.1 128.1-53.2 139.2-11.1 11.1-39.4.9-63.1-22.9-23.8-23.7-34.1-52-22.9-63.1zM109.9 68.1C73.6 47.5 22 24.6 5.6 41.1c-16.6 16.6 7.1 69.4 27.9 105.7 18.5-32.2 44.8-59.3 76.4-78.7zM406.7 174c3.3 11.3 2.7 20.7-2.7 26.1-20.3 20.3-87.5-27-109.3-70.1-18-32.3-11.1-53.4 14.9-48.7 5.7-3.6 12.3-7.6 19.6-11.6-29.8-15.5-63.6-24.3-99.5-24.3-119.1 0-215.6 96.5-215.6 215.6 0 119 96.5 215.6 215.6 215.6S445.3 380.1 445.3 261c0-38.4-10.1-74.5-27.7-105.8-3.9 7-7.6 13.3-10.9 18.8z"],"get-pocket":[448,512,[],"f265","M407.6 64h-367C18.5 64 0 82.5 0 104.6v135.2C0 364.5 99.7 464 224.2 464c124 0 223.8-99.5 223.8-224.2V104.6c0-22.4-17.7-40.6-40.4-40.6zm-162 268.5c-12.4 11.8-31.4 11.1-42.4 0C89.5 223.6 88.3 227.4 88.3 209.3c0-16.9 13.8-30.7 30.7-30.7 17 0 16.1 3.8 105.2 89.3 90.6-86.9 88.6-89.3 105.5-89.3 16.9 0 30.7 13.8 30.7 30.7 0 17.8-2.9 15.7-114.8 123.2z"],gg:[512,512,[],"f260","M179.2 230.4l102.4 102.4-102.4 102.4L0 256 179.2 76.8l44.8 44.8-25.6 25.6-19.2-19.2-128 128 128 128 51.5-51.5-77.1-76.5 25.6-25.6zM332.8 76.8L230.4 179.2l102.4 102.4 25.6-25.6-77.1-76.5 51.5-51.5 128 128-128 128-19.2-19.2-25.6 25.6 44.8 44.8L512 256 332.8 76.8z"],"gg-circle":[512,512,[],"f261","M257 8C120 8 9 119 9 256s111 248 248 248 248-111 248-248S394 8 257 8zm-49.5 374.8L81.8 257.1l125.7-125.7 35.2 35.4-24.2 24.2-11.1-11.1-77.2 77.2 77.2 77.2 26.6-26.6-53.1-52.9 24.4-24.4 77.2 77.2-75 75.2zm99-2.2l-35.2-35.2 24.1-24.4 11.1 11.1 77.2-77.2-77.2-77.2-26.5 26.5 53.1 52.9-24.4 24.4-77.2-77.2 75-75L432.2 255 306.5 380.6z"],git:[448,512,[],"f1d3","M18.8 221.7c0 25.3 16.2 60 41.5 68.5v1c-18.8 8.3-24 50.6 1 65.8v1C34 367 16 384.3 16 414.2c0 51.5 48.8 65.8 91.5 65.8 52 0 90.7-18.7 90.7-76 0-70.5-101-44.5-101-82.8 0-13.5 7.2-18.7 19.7-21.3 41.5-7.7 67.5-40 67.5-82.2 0-7.3-1.5-14.2-4-21 6.7-1.5 13.2-3.3 19.7-5.5v-50.5c-17.2 6.8-35.7 11.8-54.5 11.8-53.8-31-126.8 1.3-126.8 69.2zm87.7 163.8c17 0 41.2 3 41.2 25 0 21.8-19.5 26.3-37.7 26.3-17.3 0-43.3-2.7-43.3-25.2.1-22.3 22.1-26.1 39.8-26.1zM103.3 256c-22 0-31.3-13-31.3-33.8 0-49.3 61-48.8 61-.5 0 20.3-8 34.3-29.7 34.3zM432 305.5v49c-13.3 7.3-30.5 9.8-45.5 9.8-53.5 0-59.8-42.2-59.8-85.7v-87.7h.5v-1c-7 0-7.3-1.6-24 1v-47.5h24c0-22.3.3-31-1.5-41.2h56.7c-2 13.8-1.5 27.5-1.5 41.2h51v47.5s-19.3-1-51-1V281c0 14.8 3.3 32.8 21.8 32.8 9.8 0 21.3-2.8 29.3-8.3zM286 68.7c0 18.7-14.5 36.2-33.8 36.2-19.8 0-34.5-17.2-34.5-36.2 0-19.3 14.5-36.7 34.5-36.7C272 32 286 50 286 68.7zm-6.2 74.5c-1.8 14.6-1.6 199.8 0 217.8h-55.5c1.6-18.1 1.8-203 0-217.8h55.5z"],"git-square":[448,512,[],"f1d2","M140.1 348.5c12.1 0 29.5 2.1 29.5 17.9 0 15.5-13.9 18.8-27 18.8-12.3 0-30.9-2-30.9-18s15.7-18.7 28.4-18.7zm-24.7-116.6c0 14.8 6.6 24.1 22.3 24.1 15.5 0 21.2-10 21.2-24.5.1-34.4-43.5-34.8-43.5.4zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-241 93.7c-12.3 4.8-25.5 8.4-38.9 8.4-38.5-22.1-90.7.9-90.7 49.5 0 18 11.6 42.9 29.6 48.9v.7c-13.4 5.9-17.1 36.1.7 47v.7c-19.5 6.4-32.3 18.8-32.3 40.2 0 36.8 34.8 47 65.4 47 37.1 0 64.8-13.4 64.8-54.3 0-50.4-72.1-31.8-72.1-59.1 0-9.6 5.2-13.4 14.1-15.2 29.6-5.5 48.2-28.6 48.2-58.7 0-5.2-1.1-10.2-2.9-15 4.8-1.1 9.5-2.3 14.1-3.9v-36.2zm56.8 1.8h-39.6c1.3 10.6 1.1 142.6 0 155.5h39.6c-1.1-12.8-1.2-145.1 0-155.5zm4.5-53.3c0-13.4-10-26.2-24.1-26.2-14.3 0-24.6 12.5-24.6 26.2 0 13.6 10.5 25.9 24.6 25.9 13.7 0 24.1-12.5 24.1-25.9zm104.3 53.3h-36.4c0-9.8-.4-19.6 1.1-29.5h-40.5c1.3 7.3 1.1 13.6 1.1 29.5h-17.1v33.9c11.9-1.9 12.1-.7 17.1-.7v.7h-.4v62.7c0 31.1 4.5 61.2 42.7 61.2 10.7 0 23-1.8 32.5-7v-35c-5.7 3.9-13.9 5.9-20.9 5.9-13.2 0-15.5-12.9-15.5-23.4v-65.2c22.7 0 36.4.7 36.4.7v-33.8z"],github:[496,512,[],"f09b","M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6zm-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3zm44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9zM244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8zM97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1zm-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7zm32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1zm-11.4-14.7c-1.6 1-1.6 3.6 0 5.9 1.6 2.3 4.3 3.3 5.6 2.3 1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2z"],"github-alt":[480,512,[],"f113","M186.1 328.7c0 20.9-10.9 55.1-36.7 55.1s-36.7-34.2-36.7-55.1 10.9-55.1 36.7-55.1 36.7 34.2 36.7 55.1zM480 278.2c0 31.9-3.2 65.7-17.5 95-37.9 76.6-142.1 74.8-216.7 74.8-75.8 0-186.2 2.7-225.6-74.8-14.6-29-20.2-63.1-20.2-95 0-41.9 13.9-81.5 41.5-113.6-5.2-15.8-7.7-32.4-7.7-48.8 0-21.5 4.9-32.3 14.6-51.8 45.3 0 74.3 9 108.8 36 29-6.9 58.8-10 88.7-10 27 0 54.2 2.9 80.4 9.2 34-26.7 63-35.2 107.8-35.2 9.8 19.5 14.6 30.3 14.6 51.8 0 16.4-2.6 32.7-7.7 48.2 27.5 32.4 39 72.3 39 114.2zm-64.3 50.5c0-43.9-26.7-82.6-73.5-82.6-18.9 0-37 3.4-56 6-14.9 2.3-29.8 3.2-45.1 3.2-15.2 0-30.1-.9-45.1-3.2-18.7-2.6-37-6-56-6-46.8 0-73.5 38.7-73.5 82.6 0 87.8 80.4 101.3 150.4 101.3h48.2c70.3 0 150.6-13.4 150.6-101.3zm-82.6-55.1c-25.8 0-36.7 34.2-36.7 55.1s10.9 55.1 36.7 55.1 36.7-34.2 36.7-55.1-10.9-55.1-36.7-55.1z"],"github-square":[448,512,[],"f092","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM277.3 415.7c-8.4 1.5-11.5-3.7-11.5-8 0-5.4.2-33 .2-55.3 0-15.6-5.2-25.5-11.3-30.7 37-4.1 76-9.2 76-73.1 0-18.2-6.5-27.3-17.1-39 1.7-4.3 7.4-22-1.7-45-13.9-4.3-45.7 17.9-45.7 17.9-13.2-3.7-27.5-5.6-41.6-5.6-14.1 0-28.4 1.9-41.6 5.6 0 0-31.8-22.2-45.7-17.9-9.1 22.9-3.5 40.6-1.7 45-10.6 11.7-15.6 20.8-15.6 39 0 63.6 37.3 69 74.3 73.1-4.8 4.3-9.1 11.7-10.6 22.3-9.5 4.3-33.8 11.7-48.3-13.9-9.1-15.8-25.5-17.1-25.5-17.1-16.2-.2-1.1 10.2-1.1 10.2 10.8 5 18.4 24.2 18.4 24.2 9.7 29.7 56.1 19.7 56.1 19.7 0 13.9.2 36.5.2 40.6 0 4.3-3 9.5-11.5 8-66-22.1-112.2-84.9-112.2-158.3 0-91.8 70.2-161.5 162-161.5S388 165.6 388 257.4c.1 73.4-44.7 136.3-110.7 158.3zm-98.1-61.1c-1.9.4-3.7-.4-3.9-1.7-.2-1.5 1.1-2.8 3-3.2 1.9-.2 3.7.6 3.9 1.9.3 1.3-1 2.6-3 3zm-9.5-.9c0 1.3-1.5 2.4-3.5 2.4-2.2.2-3.7-.9-3.7-2.4 0-1.3 1.5-2.4 3.5-2.4 1.9-.2 3.7.9 3.7 2.4zm-13.7-1.1c-.4 1.3-2.4 1.9-4.1 1.3-1.9-.4-3.2-1.9-2.8-3.2.4-1.3 2.4-1.9 4.1-1.5 2 .6 3.3 2.1 2.8 3.4zm-12.3-5.4c-.9 1.1-2.8.9-4.3-.6-1.5-1.3-1.9-3.2-.9-4.1.9-1.1 2.8-.9 4.3.6 1.3 1.3 1.8 3.3.9 4.1zm-9.1-9.1c-.9.6-2.6 0-3.7-1.5s-1.1-3.2 0-3.9c1.1-.9 2.8-.2 3.7 1.3 1.1 1.5 1.1 3.3 0 4.1zm-6.5-9.7c-.9.9-2.4.4-3.5-.6-1.1-1.3-1.3-2.8-.4-3.5.9-.9 2.4-.4 3.5.6 1.1 1.3 1.3 2.8.4 3.5zm-6.7-7.4c-.4.9-1.7 1.1-2.8.4-1.3-.6-1.9-1.7-1.5-2.6.4-.6 1.5-.9 2.8-.4 1.3.7 1.9 1.8 1.5 2.6z"],gitkraken:[592,512,[],"f3a6","M565.7 118.1c-2.3-6.1-9.3-9.2-15.3-6.6-5.7 2.4-8.5 8.9-6.3 14.6 10.9 29 16.9 60.5 16.9 93.3 0 134.6-100.3 245.7-230.2 262.7V358.4c7.9-1.5 15.5-3.6 23-6.2v104c106.7-25.9 185.9-122.1 185.9-236.8 0-91.8-50.8-171.8-125.8-213.3-5.7-3.2-13-.9-15.9 5-2.7 5.5-.6 12.2 4.7 15.1 67.9 37.6 113.9 110 113.9 193.2 0 93.3-57.9 173.1-139.8 205.4v-92.2c14.2-4.5 24.9-17.7 24.9-33.5 0-13.1-6.8-24.4-17.3-30.5 8.3-79.5 44.5-58.6 44.5-83.9V170c0-38-87.9-161.8-129-164.7-2.5-.2-5-.2-7.6 0C251.1 8.3 163.2 132 163.2 170v14.8c0 25.3 36.3 4.3 44.5 83.9-10.6 6.1-17.3 17.4-17.3 30.5 0 15.8 10.6 29 24.8 33.5v92.2c-81.9-32.2-139.8-112-139.8-205.4 0-83.1 46-155.5 113.9-193.2 5.4-3 7.4-9.6 4.7-15.1-2.9-5.9-10.1-8.2-15.9-5-75 41.5-125.8 121.5-125.8 213.3 0 114.7 79.2 210.8 185.9 236.8v-104c7.6 2.5 15.1 4.6 23 6.2v123.7C131.4 465.2 31 354.1 31 219.5c0-32.8 6-64.3 16.9-93.3 2.2-5.8-.6-12.2-6.3-14.6-6-2.6-13 .4-15.3 6.6C14.5 149.7 8 183.8 8 219.5c0 155.1 122.6 281.6 276.3 287.8V361.4c6.8.4 15 .5 23.4 0v145.8C461.4 501.1 584 374.6 584 219.5c0-35.7-6.5-69.8-18.3-101.4zM365.9 275.5c13 0 23.7 10.5 23.7 23.7 0 13.1-10.6 23.7-23.7 23.7-13 0-23.7-10.5-23.7-23.7 0-13.1 10.6-23.7 23.7-23.7zm-139.8 47.3c-13.2 0-23.7-10.7-23.7-23.7s10.5-23.7 23.7-23.7c13.1 0 23.7 10.6 23.7 23.7 0 13-10.5 23.7-23.7 23.7z"],gitlab:[512,512,[],"f296","M29.782 199.732L256 493.714 8.074 309.699c-6.856-5.142-9.712-13.996-7.141-21.993l28.849-87.974zm75.405-174.806c-3.142-8.854-15.709-8.854-18.851 0L29.782 199.732h131.961L105.187 24.926zm56.556 174.806L256 493.714l94.257-293.982H161.743zm349.324 87.974l-28.849-87.974L256 493.714l247.926-184.015c6.855-5.142 9.711-13.996 7.141-21.993zm-85.404-262.78c-3.142-8.854-15.709-8.854-18.851 0l-56.555 174.806h131.961L425.663 24.926z"],gitter:[384,512,[],"f426","M66.4 322.5H16V0h50.4v322.5zM166.9 76.1h-50.4V512h50.4V76.1zm100.6 0h-50.4V512h50.4V76.1zM368 76h-50.4v247H368V76z"],glide:[448,512,[],"f2a5","M252.8 148.6c0 8.8-1.6 17.7-3.4 26.4-5.8 27.8-11.6 55.8-17.3 83.6-1.4 6.3-8.3 4.9-13.7 4.9-23.8 0-30.5-26-30.5-45.5 0-29.3 11.2-68.1 38.5-83.1 4.3-2.5 9.2-4.2 14.1-4.2 11.4 0 12.3 8.3 12.3 17.9zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-64 187c0-5.1-20.8-37.7-25.5-39.5-2.2-.9-7.2-2.3-9.6-2.3-23.1 0-38.7 10.5-58.2 21.5l-.5-.5c4.3-29.4 14.6-57.2 14.6-87.4 0-44.6-23.8-62.7-67.5-62.7-71.7 0-108 70.8-108 123.5 0 54.7 32 85 86.3 85 7.5 0 6.9-.6 6.9 2.3-10.5 80.3-56.5 82.9-56.5 58.9 0-24.4 28-36.5 28.3-38-.2-7.6-29.3-17.2-36.7-17.2-21.1 0-32.7 33-32.7 50.6 0 32.3 20.4 54.7 53.3 54.7 48.2 0 83.4-49.7 94.3-91.7 9.4-37.7 7-39.4 12.3-42.1 20-10.1 35.8-16.8 58.4-16.8 11.1 0 19 2.3 36.7 5.2 1.8.1 4.1-1.7 4.1-3.5z"],"glide-g":[448,512,[],"f2a6","M407.1 211.2c-3.5-1.4-11.6-3.8-15.4-3.8-37.1 0-62.2 16.8-93.5 34.5l-.9-.9c7-47.3 23.5-91.9 23.5-140.4C320.8 29.1 282.6 0 212.4 0 97.3 0 39 113.7 39 198.4 39 286.3 90.3 335 177.6 335c12 0 11-1 11 3.8-16.9 128.9-90.8 133.1-90.8 94.6 0-39.2 45-58.6 45.5-61-.3-12.2-47-27.6-58.9-27.6-33.9.1-52.4 51.2-52.4 79.3C32 476 64.8 512 117.5 512c77.4 0 134-77.8 151.4-145.4 15.1-60.5 11.2-63.3 19.7-67.6 32.2-16.2 57.5-27 93.8-27 17.8 0 30.5 3.7 58.9 8.4 2.9 0 6.7-2.9 6.7-5.8 0-8-33.4-60.5-40.9-63.4zm-175.3-84.4c-9.3 44.7-18.6 89.6-27.8 134.3-2.3 10.2-13.3 7.8-22 7.8-38.3 0-49-41.8-49-73.1 0-47 18-109.3 61.8-133.4 7-4.1 14.8-6.7 22.6-6.7 18.6 0 20 13.3 20 28.7-.1 14.3-2.7 28.5-5.6 42.4z"],gofore:[400,512,[],"f3a7","M324 319.8h-13.2v34.7c-24.5 23.1-56.3 35.8-89.9 35.8-73.2 0-132.4-60.2-132.4-134.4 0-74.1 59.2-134.4 132.4-134.4 35.3 0 68.6 14 93.6 39.4l62.3-63.3C335 55.3 279.7 32 220.7 32 98 32 0 132.6 0 256c0 122.5 97 224 220.7 224 63.2 0 124.5-26.2 171-82.5-2-27.6-13.4-77.7-67.7-77.7zm-12.1-112.5H205.6v89H324c33.5 0 60.5 15.1 76 41.8v-30.6c0-65.2-40.4-100.2-88.1-100.2z"],goodreads:[448,512,[],"f3a8","M299.9 191.2c5.1 37.3-4.7 79-35.9 100.7-22.3 15.5-52.8 14.1-70.8 5.7-37.1-17.3-49.5-58.6-46.8-97.2 4.3-60.9 40.9-87.9 75.3-87.5 46.9-.2 71.8 31.8 78.2 78.3zM448 88v336c0 30.9-25.1 56-56 56H56c-30.9 0-56-25.1-56-56V88c0-30.9 25.1-56 56-56h336c30.9 0 56 25.1 56 56zM330 313.2s-.1-34-.1-217.3h-29v40.3c-.8.3-1.2-.5-1.6-1.2-9.6-20.7-35.9-46.3-76-46-51.9.4-87.2 31.2-100.6 77.8-4.3 14.9-5.8 30.1-5.5 45.6 1.7 77.9 45.1 117.8 112.4 115.2 28.9-1.1 54.5-17 69-45.2.5-1 1.1-1.9 1.7-2.9.2.1.4.1.6.2.3 3.8.2 30.7.1 34.5-.2 14.8-2 29.5-7.2 43.5-7.8 21-22.3 34.7-44.5 39.5-17.8 3.9-35.6 3.8-53.2-1.2-21.5-6.1-36.5-19-41.1-41.8-.3-1.6-1.3-1.3-2.3-1.3h-26.8c.8 10.6 3.2 20.3 8.5 29.2 24.2 40.5 82.7 48.5 128.2 37.4 49.9-12.3 67.3-54.9 67.4-106.3z"],"goodreads-g":[384,512,[],"f3a9","M42.6 403.3h2.8c12.7 0 25.5 0 38.2.1 1.6 0 3.1-.4 3.6 2.1 7.1 34.9 30 54.6 62.9 63.9 26.9 7.6 54.1 7.8 81.3 1.8 33.8-7.4 56-28.3 68-60.4 8-21.5 10.7-43.8 11-66.5.1-5.8.3-47-.2-52.8l-.9-.3c-.8 1.5-1.7 2.9-2.5 4.4-22.1 43.1-61.3 67.4-105.4 69.1-103 4-169.4-57-172-176.2-.5-23.7 1.8-46.9 8.3-69.7C58.3 47.7 112.3.6 191.6 0c61.3-.4 101.5 38.7 116.2 70.3.5 1.1 1.3 2.3 2.4 1.9V10.6h44.3c0 280.3.1 332.2.1 332.2-.1 78.5-26.7 143.7-103 162.2-69.5 16.9-159 4.8-196-57.2-8-13.5-11.8-28.3-13-44.5zM188.9 36.5c-52.5-.5-108.5 40.7-115 133.8-4.1 59 14.8 122.2 71.5 148.6 27.6 12.9 74.3 15 108.3-8.7 47.6-33.2 62.7-97 54.8-154-9.7-71.1-47.8-120-119.6-119.7z"],google:[488,512,[],"f1a0","M488 261.8C488 403.3 391.1 504 248 504 110.8 504 0 393.2 0 256S110.8 8 248 8c66.8 0 123 24.5 166.3 64.9l-67.5 64.9C258.5 52.6 94.3 116.6 94.3 256c0 86.5 69.1 156.6 153.7 156.6 98.2 0 135-70.4 140.8-106.9H248v-85.3h236.1c2.3 12.7 3.9 24.9 3.9 41.4z"],"google-drive":[512,512,[],"f3aa","M339 314.9L175.4 32h161.2l163.6 282.9H339zm-137.5 23.6L120.9 480h310.5L512 338.5H201.5zM154.1 67.4L0 338.5 80.6 480 237 208.8 154.1 67.4z"],"google-play":[512,512,[],"f3ab","M325.3 234.3L104.6 13l280.8 161.2-60.1 60.1zM47 0C34 6.8 25.3 19.2 25.3 35.3v441.3c0 16.1 8.7 28.5 21.7 35.3l256.6-256L47 0zm425.2 225.6l-58.9-34.1-65.7 64.5 65.7 64.5 60.1-34.1c18-14.3 18-46.5-1.2-60.8zM104.6 499l280.8-161.2-60.1-60.1L104.6 499z"],"google-plus":[496,512,[],"f2b3","M248 8C111.1 8 0 119.1 0 256s111.1 248 248 248 248-111.1 248-248S384.9 8 248 8zm-70.7 372c-68.8 0-124-55.5-124-124s55.2-124 124-124c31.3 0 60.1 11 83 32.3l-33.6 32.6c-13.2-12.9-31.3-19.1-49.4-19.1-42.9 0-77.2 35.5-77.2 78.1s34.2 78.1 77.2 78.1c32.6 0 64.9-19.1 70.1-53.3h-70.1v-42.6h116.9c1.3 6.8 1.9 13.6 1.9 20.7 0 70.8-47.5 121.2-118.8 121.2zm230.2-106.2v35.5H372v-35.5h-35.5v-35.5H372v-35.5h35.5v35.5h35.2v35.5h-35.2z"],"google-plus-g":[640,512,[],"f0d5","M386.061 228.496c1.834 9.692 3.143 19.384 3.143 31.956C389.204 370.205 315.599 448 204.8 448c-106.084 0-192-85.915-192-192s85.916-192 192-192c51.864 0 95.083 18.859 128.611 50.292l-52.126 50.03c-14.145-13.621-39.028-29.599-76.485-29.599-65.484 0-118.92 54.221-118.92 121.277 0 67.056 53.436 121.277 118.92 121.277 75.961 0 104.513-54.745 108.965-82.773H204.8v-66.009h181.261zm185.406 6.437V179.2h-56.001v55.733h-55.733v56.001h55.733v55.733h56.001v-55.733H627.2v-56.001h-55.733z"],"google-plus-square":[448,512,[],"f0d4","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM164 356c-55.3 0-100-44.7-100-100s44.7-100 100-100c27 0 49.5 9.8 67 26.2l-27.1 26.1c-7.4-7.1-20.3-15.4-39.8-15.4-34.1 0-61.9 28.2-61.9 63.2 0 34.9 27.8 63.2 61.9 63.2 39.6 0 54.4-28.5 56.8-43.1H164v-34.4h94.4c1 5 1.6 10.1 1.6 16.6 0 57.1-38.3 97.6-96 97.6zm220-81.8h-29v29h-29.2v-29h-29V245h29v-29H355v29h29v29.2z"],"google-wallet":[448,512,[],"f1ee","M156.8 126.8c37.6 60.6 64.2 113.1 84.3 162.5-8.3 33.8-18.8 66.5-31.3 98.3-13.2-52.3-26.5-101.3-56-148.5 6.5-36.4 2.3-73.6 3-112.3zM109.3 200H16.1c-6.5 0-10.5 7.5-6.5 12.7C51.8 267 81.3 330.5 101.3 400h103.5c-16.2-69.7-38.7-133.7-82.5-193.5-3-4-8-6.5-13-6.5zm47.8-88c68.5 108 130 234.5 138.2 368H409c-12-138-68.4-265-143.2-368H157.1zm251.8-68.5c-1.8-6.8-8.2-11.5-15.2-11.5h-88.3c-5.3 0-9 5-7.8 10.3 13.2 46.5 22.3 95.5 26.5 146 48.2 86.2 79.7 178.3 90.6 270.8 15.8-60.5 25.3-133.5 25.3-203 0-73.6-12.1-145.1-31.1-212.6z"],gratipay:[496,512,[],"f184","M248 8C111.1 8 0 119.1 0 256s111.1 248 248 248 248-111.1 248-248S384.9 8 248 8zm114.6 226.4l-113 152.7-112.7-152.7c-8.7-11.9-19.1-50.4 13.6-72 28.1-18.1 54.6-4.2 68.5 11.9 15.9 17.9 46.6 16.9 61.7 0 13.9-16.1 40.4-30 68.1-11.9 32.9 21.6 22.6 60 13.8 72z"],grav:[512,512,[],"f2d6","M301.1 212c4.4 4.4 4.4 11.9 0 16.3l-9.7 9.7c-4.4 4.7-11.9 4.7-16.6 0l-10.5-10.5c-4.4-4.7-4.4-11.9 0-16.6l9.7-9.7c4.4-4.4 11.9-4.4 16.6 0l10.5 10.8zm-30.2-19.7c3-3 3-7.8 0-10.5-2.8-3-7.5-3-10.5 0-2.8 2.8-2.8 7.5 0 10.5 3.1 2.8 7.8 2.8 10.5 0zm-26 5.3c-3 2.8-3 7.5 0 10.2 2.8 3 7.5 3 10.5 0 2.8-2.8 2.8-7.5 0-10.2-3-3-7.7-3-10.5 0zm72.5-13.3c-19.9-14.4-33.8-43.2-11.9-68.1 21.6-24.9 40.7-17.2 59.8.8 11.9 11.3 29.3 24.9 17.2 48.2-12.5 23.5-45.1 33.2-65.1 19.1zm47.7-44.5c-8.9-10-23.3 6.9-15.5 16.1 7.4 9 32.1 2.4 15.5-16.1zM504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-66.2 42.6c2.5-16.1-20.2-16.6-25.2-25.7-13.6-24.1-27.7-36.8-54.5-30.4 11.6-8 23.5-6.1 23.5-6.1.3-6.4 0-13-9.4-24.9 3.9-12.5.3-22.4.3-22.4 15.5-8.6 26.8-24.4 29.1-43.2 3.6-31-18.8-59.2-49.8-62.8-22.1-2.5-43.7 7.7-54.3 25.7-23.2 40.1 1.4 70.9 22.4 81.4-14.4-1.4-34.3-11.9-40.1-34.3-6.6-25.7 2.8-49.8 8.9-61.4 0 0-4.4-5.8-8-8.9 0 0-13.8 0-24.6 5.3 11.9-15.2 25.2-14.4 25.2-14.4 0-6.4-.6-14.9-3.6-21.6-5.4-11-23.8-12.9-31.7 2.8.1-.2.3-.4.4-.5-5 11.9-1.1 55.9 16.9 87.2-2.5 1.4-9.1 6.1-13 10-21.6 9.7-56.2 60.3-56.2 60.3-28.2 10.8-77.2 50.9-70.6 79.7.3 3 1.4 5.5 3 7.5-2.8 2.2-5.5 5-8.3 8.3-11.9 13.8-5.3 35.2 17.7 24.4 15.8-7.2 29.6-20.2 36.3-30.4 0 0-5.5-5-16.3-4.4 27.7-6.6 34.3-9.4 46.2-9.1 8 3.9 8-34.3 8-34.3 0-14.7-2.2-31-11.1-41.5 12.5 12.2 29.1 32.7 28 60.6-.8 18.3-15.2 23-15.2 23-9.1 16.6-43.2 65.9-30.4 106 0 0-9.7-14.9-10.2-22.1-17.4 19.4-46.5 52.3-24.6 64.5 26.6 14.7 108.8-88.6 126.2-142.3 34.6-20.8 55.4-47.3 63.9-65 22 43.5 95.3 94.5 101.1 59z"],gripfire:[384,512,[],"f3ac","M171.8 503.8c0-5.3 4.8-12.2 4.8-22.3 0-15.2-13-39.9-78.1-86.6C64.2 365.8 32 336.4 32 286.6 32 171.9 179.1 110.1 179.1 18c0-3.3-.2-6.7-.6-10 5.1 2.4 39.1 43.3 39.1 90.4 0 80.5-105.1 129.2-105.1 203 0 26.9 16.6 47.2 32.6 69.5 22.5 30.2 44.2 56.9 44.2 86.5-.1 14.5-4.4 29.7-17.5 46.4zm146-241.4c1.5 8.4 2.2 16.6 2.2 24.6 0 51.8-29.4 97.5-67.3 136.8-1 1-2.2 2.4-3.2 2.4-3.6 0-35.5-41.6-35.5-53.2 0 0 41.8-55.7 41.8-96.9 0-10.8-2.7-21.7-9.1-33.4-1.5 32.3-55.7 87.7-58.1 87.7-2.7 0-17.9-22-17.9-42.1 0-5.3 1-10.7 3.2-15.8 2.4-5.5 56.6-72 56.6-116.7 0-6.2-1-12-3.4-17.1l-4-7.2c16.7 6.5 82.6 64.1 94.7 130.9"],grunt:[384,512,[],"f3ad","M61.3 189.3c-1.1 10 5.2 19.1 5.2 19.1.7-7.5 2.2-12.8 4-16.6.4 10.3 3.2 23.5 12.8 34.1 6.9 7.6 35.6 23.3 54.9 6.1 1 2.4 2.1 5.3 3 8.5 2.9 10.3-2.7 25.3-2.7 25.3s15.1-17.1 13.9-32.5c10.8-.5 21.4-8.4 21.1-19.5 0 0-18.9 10.4-35.5-8.8-9.7-11.2-40.9-42-83.1-31.8 4.3 1 8.9 2.4 13.5 4.1h-.1c-4.2 2-6.5 7.1-7 12zm28.3-1.8c19.5 11 37.4 25.7 44.9 37-5.7 3.3-21.7 10.4-38-1.7-10.3-7.6-9.8-26.2-6.9-35.3zm79.2 233.7c2.2 2.3 1.5 5.3.9 6.8-1.1 2.7-5.5 11.6-13 19.8-2.7 2.9-6.6 4.6-11 4.6-4.3 0-8.7-1.6-11.8-4.3-2.3-2.1-10.2-9.5-13.7-18.6-1.3-3.4-1-6.1.9-8.1 1.3-1.3 4-2.9 9.5-2.9H160c4.1 0 7 .9 8.8 2.7zm62.9-187.9c-1.2 15.5 13.9 32.5 13.9 32.5s-5.6-15-2.7-25.3c.9-3.2 2-6 3-8.5 19.3 17.3 48 1.5 54.8-6.1 9.6-10.6 12.3-23.8 12.8-34.1 1.8 3.8 3.4 9.1 4 16.6 0 0 6.4-9.1 5.2-19.1-.6-5-2.9-10-7-11.8h-.1c4.6-1.8 9.2-3.2 13.5-4.1-42.3-10.2-73.4 20.6-83.1 31.8-16.7 19.2-35.5 8.8-35.5 8.8-.2 10.9 10.4 18.9 21.2 19.3zm17.8-8.8c7.5-11.4 25.4-26 44.9-37 3 9.1 3.4 27.7-7 35.4-16.3 12.1-32.2 5-37.9 1.6-.1.1 0 0 0 0zM263 421.4c1.9 1.9 2.2 4.6.9 7.9-3.5 8.9-11.4 16.1-13.7 18.1-3.1 2.6-7.4 4.2-11.8 4.2s-8.3-1.6-11-4.5c-7.5-8-12-16.7-13-19.3-.6-1.5-1.3-4.4.9-6.7 1.7-1.8 4.7-2.7 8.9-2.7h29.4c5.4.1 8.1 1.7 9.4 3zm-98.3-251.5c9.9 6 18.8 8.1 27.3 8.3 8.5-.2 17.4-2.3 27.3-8.3 0 0-14.5 17.7-27.2 17.8h-.2c-12.7-.2-27.2-17.8-27.2-17.8zm184.5 147.4c-2.4 17.9-13 33.8-24.6 43.7-3.1-22.7-3.7-55.5-3.7-62.4 0-14.7 9.5-24.5 12.2-26.1 2.5-1.5 5.4-3 8.3-4.6 18-9.6 40.4-21.6 40.4-43.7 0-16.2-9.3-23.2-15.4-27.8-.8-.6-1.5-1.1-2.2-1.7-2.1-1.7-3.7-3-4.3-4.4-4.4-9.8-3.6-34.2-1.7-37.6.6-.6 16.7-20.9 11.8-39.2-2-7.4-6.9-13.3-14.1-17-5.3-2.7-11.9-4.2-19.5-4.5-.1-2-.5-3.9-.9-5.9-.6-2.6-1.1-5.3-.9-8.1.4-4.7.8-9 2.2-11.3 8.4-13.3 28.8-17.6 29-17.6l12.3-2.4-8.1-9.5c-.1-.2-17.3-17.5-46.3-17.5-7.9 0-16 1.3-24.1 3.9-24.2 7.8-42.9 30.5-49.4 39.3-3.1-1-6.3-1.9-9.6-2.7-4.2-15.8 9-38.5 9-38.5s-13.6-3-33.7 15.2c-2.6-6.5-8.1-20.5-1.8-37.2C184.6 10.1 177.2 26 175 40.4c-7.6-5.4-6.7-23.1-7.2-27.6-7.5.9-29.2 21.9-28.2 48.3-2 .5-3.9 1.1-5.9 1.7-6.5-8.8-25.1-31.5-49.4-39.3-7.9-2.2-16-3.5-23.9-3.5-29 0-46.1 17.3-46.3 17.5L6 46.9l12.3 2.4c.2 0 20.6 4.3 29 17.6 1.4 2.2 1.8 6.6 2.2 11.3.2 2.8-.4 5.5-.9 8.1-.4 1.9-.8 3.9-.9 5.9-7.7.3-14.2 1.8-19.5 4.5-7.2 3.7-12.1 9.6-14.1 17-5 18.2 11.2 38.5 11.8 39.2 1.9 3.4 2.7 27.8-1.7 37.6-.6 1.4-2.2 2.7-4.3 4.4-.7.5-1.4 1.1-2.2 1.7-6.1 4.6-15.4 11.7-15.4 27.8 0 22.1 22.4 34.1 40.4 43.7 3 1.6 5.8 3.1 8.3 4.6 2.7 1.6 12.2 11.4 12.2 26.1 0 6.9-.6 39.7-3.7 62.4-11.6-9.9-22.2-25.9-24.6-43.8 0 0-29.2 22.6-20.6 70.8 5.2 29.5 23.2 46.1 47 54.7 8.8 19.1 29.4 45.7 67.3 49.6C143 504.3 163 512 192.2 512h.2c29.1 0 49.1-7.7 63.6-19.5 37.9-3.9 58.5-30.5 67.3-49.6 23.8-8.7 41.7-25.2 47-54.7 8.2-48.4-21.1-70.9-21.1-70.9zM305.7 37.7c5.6-1.8 11.6-2.7 17.7-2.7 11 0 19.9 3 24.7 5-3.1 1.4-6.4 3.2-9.7 5.3-2.4-.4-5.6-.8-9.2-.8-10.5 0-20.5 3.1-28.7 8.9-12.3 8.7-18 16.9-20.7 22.4-2.2-1.3-4.5-2.5-7.1-3.7-1.6-.8-3.1-1.5-4.7-2.2 6.1-9.1 19.9-26.5 37.7-32.2zm21 18.2c-.8 1-1.6 2.1-2.3 3.2-3.3 5.2-3.9 11.6-4.4 17.8-.5 6.4-1.1 12.5-4.4 17-4.2.8-8.1 1.7-11.5 2.7-2.3-3.1-5.6-7-10.5-11.2 1.4-4.8 5.5-16.1 13.5-22.5 5.6-4.3 12.2-6.7 19.6-7zM45.6 45.3c-3.3-2.2-6.6-4-9.7-5.3 4.8-2 13.7-5 24.7-5 6.1 0 12 .9 17.7 2.7 17.8 5.8 31.6 23.2 37.7 32.1-1.6.7-3.2 1.4-4.8 2.2-2.5 1.2-4.9 2.5-7.1 3.7-2.6-5.4-8.3-13.7-20.7-22.4-8.3-5.8-18.2-8.9-28.8-8.9-3.4.1-6.6.5-9 .9zm44.7 40.1c-4.9 4.2-8.3 8-10.5 11.2-3.4-.9-7.3-1.9-11.5-2.7C65 89.5 64.5 83.4 64 77c-.5-6.2-1.1-12.6-4.4-17.8-.7-1.1-1.5-2.2-2.3-3.2 7.4.3 14 2.6 19.5 7 8 6.3 12.1 17.6 13.5 22.4zM58.1 259.9c-2.7-1.6-5.6-3.1-8.4-4.6-14.9-8-30.2-16.3-30.2-30.5 0-11.1 4.3-14.6 8.9-18.2l.5-.4c.7-.6 1.4-1.2 2.2-1.8-.9 7.2-1.9 13.3-2.7 14.9 0 0 12.1-15 15.7-44.3 1.4-11.5-1.1-34.3-5.1-43 .2 4.9 0 9.8-.3 14.4-.4-.8-.8-1.6-1.3-2.2-3.2-4-11.8-17.5-9.4-26.6.9-3.5 3.1-6 6.7-7.8 3.8-1.9 8.8-2.9 15.1-2.9 12.3 0 25.9 3.7 32.9 6 25.1 8 55.4 30.9 64.1 37.7.2.2.4.3.4.3l5.6 3.9-3.5-5.8c-.2-.3-19.1-31.4-53.2-46.5 2-2.9 7.4-8.1 21.6-15.1 21.4-10.5 46.5-15.8 74.3-15.8 27.9 0 52.9 5.3 74.3 15.8 14.2 6.9 19.6 12.2 21.6 15.1-34 15.1-52.9 46.2-53.1 46.5l-3.5 5.8 5.6-3.9s.2-.1.4-.3c8.7-6.8 39-29.8 64.1-37.7 7-2.2 20.6-6 32.9-6 6.3 0 11.3 1 15.1 2.9 3.5 1.8 5.7 4.4 6.7 7.8 2.5 9.1-6.1 22.6-9.4 26.6-.5.6-.9 1.3-1.3 2.2-.3-4.6-.5-9.5-.3-14.4-4 8.8-6.5 31.5-5.1 43 3.6 29.3 15.7 44.3 15.7 44.3-.8-1.6-1.8-7.7-2.7-14.9.7.6 1.5 1.2 2.2 1.8l.5.4c4.6 3.7 8.9 7.1 8.9 18.2 0 14.2-15.4 22.5-30.2 30.5-2.9 1.5-5.7 3.1-8.4 4.6-8.7 5-18 16.7-19.1 34.2-.9 14.6.9 49.9 3.4 75.9-12.4 4.8-26.7 6.4-39.7 6.8-2-4.1-3.9-8.5-5.5-13.1-.7-2-19.6-51.1-26.4-62.2 5.5 39 17.5 73.7 23.5 89.6-3.5-.5-7.3-.7-11.7-.7h-117c-4.4 0-8.3.3-11.7.7 6-15.9 18.1-50.6 23.5-89.6-6.8 11.2-25.7 60.3-26.4 62.2-1.6 4.6-3.5 9-5.5 13.1-13-.4-27.2-2-39.7-6.8 2.5-26 4.3-61.2 3.4-75.9-.9-17.4-10.3-29.2-19-34.2zM34.8 404.6c-12.1-20-8.7-54.1-3.7-59.1 10.9 34.4 47.2 44.3 74.4 45.4-2.7 4.2-5.2 7.6-7 10l-1.4 1.4c-7.2 7.8-8.6 18.5-4.1 31.8-22.7-.1-46.3-9.8-58.2-29.5zm45.7 43.5c6 1.1 12.2 1.9 18.6 2.4 3.5 8 7.4 15.9 12.3 23.1-14.4-5.9-24.4-16-30.9-25.5zM192 498.2c-60.6-.1-78.3-45.8-84.9-64.7-3.7-10.5-3.4-18.2.9-23.1 2.9-3.3 9.5-7.2 24.6-7.2h118.8c15.1 0 21.8 3.9 24.6 7.2 4.2 4.8 4.5 12.6.9 23.1-6.6 18.8-24.3 64.6-84.9 64.7zm80.6-24.6c4.9-7.2 8.8-15.1 12.3-23.1 6.4-.5 12.6-1.3 18.6-2.4-6.5 9.5-16.5 19.6-30.9 25.5zm76.6-69c-12 19.7-35.6 29.3-58.1 29.7 4.5-13.3 3.1-24.1-4.1-31.8-.4-.5-.9-1-1.4-1.5-1.8-2.4-4.3-5.8-7-10 27.2-1.2 63.5-11 74.4-45.4 5 5 8.4 39.1-3.8 59z"],gulp:[256,512,[],"f3ae","M209.8 391.1l-14.1 24.6-4.6 80.2c0 8.9-28.3 16.1-63.1 16.1s-63.1-7.2-63.1-16.1l-5.8-79.4-14.9-25.4c41.2 17.3 126 16.7 165.6 0zm-196-253.3l13.6 125.5c5.9-20 20.8-47 40-55.2 6.3-2.7 12.7-2.7 18.7.9 5.2 3 9.6 9.3 10.1 11.8 1.2 6.5-2 9.1-4.5 9.1-3 0-5.3-4.6-6.8-7.3-4.1-7.3-10.3-7.6-16.9-2.8-6.9 5-12.9 13.4-17.1 20.7-5.1 8.8-9.4 18.5-12 28.2-1.5 5.6-2.9 14.6-.6 19.9 1 2.2 2.5 3.6 4.9 3.6 5 0 12.3-6.6 15.8-10.1 4.5-4.5 10.3-11.5 12.5-16l5.2-15.5c2.6-6.8 9.9-5.6 9.9 0 0 10.2-3.7 13.6-10 34.7-5.8 19.5-7.6 25.8-7.6 25.8-.7 2.8-3.4 7.5-6.3 7.5-1.2 0-2.1-.4-2.6-1.2-1-1.4-.9-5.3-.8-6.3.2-3.2 6.3-22.2 7.3-25.2-2 2.2-4.1 4.4-6.4 6.6-5.4 5.1-14.1 11.8-21.5 11.8-3.4 0-5.6-.9-7.7-2.4l7.6 79.6c2 5 39.2 17.1 88.2 17.1 49.1 0 86.3-12.2 88.2-17.1l10.9-94.6c-5.7 5.2-12.3 11.6-19.6 14.8-5.4 2.3-17.4 3.8-17.4-5.7 0-5.2 9.1-14.8 14.4-21.5 1.4-1.7 4.7-5.9 4.7-8.1 0-2.9-6-2.2-11.7 2.5-3.2 2.7-6.2 6.3-8.7 9.7-4.3 6-6.6 11.2-8.5 15.5-6.2 14.2-4.1 8.6-9.1 22-5 13.3-4.2 11.8-5.2 14-.9 1.9-2.2 3.5-4 4.5-1.9 1-4.5.9-6.1-.3-.9-.6-1.3-1.9-1.3-3.7 0-.9.1-1.8.3-2.7 1.5-6.1 7.8-18.1 15-34.3 1.6-3.7 1-2.6.8-2.3-6.2 6-10.9 8.9-14.4 10.5-5.8 2.6-13 2.6-14.5-4.1-.1-.4-.1-.8-.2-1.2-11.8 9.2-24.3 11.7-20-8.1-4.6 8.2-12.6 14.9-22.4 14.9-4.1 0-7.1-1.4-8.6-5.1-2.3-5.5 1.3-14.9 4.6-23.8 1.7-4.5 4-9.9 7.1-16.2 1.6-3.4 4.2-5.4 7.6-4.5.6.2 1.1.4 1.6.7 2.6 1.8 1.6 4.5.3 7.2-3.8 7.5-7.1 13-9.3 20.8-.9 3.3-2 9 1.5 9 2.4 0 4.7-.8 6.9-2.4 4.6-3.4 8.3-8.5 11.1-13.5 2-3.6 4.4-8.3 5.6-12.3.5-1.7 1.1-3.3 1.8-4.8 1.1-2.5 2.6-5.1 5.2-5.1 1.3 0 2.4.5 3.2 1.5 1.7 2.2 1.3 4.5.4 6.9-2 5.6-4.7 10.6-6.9 16.7-1.3 3.5-2.7 8-2.7 11.7 0 3.4 3.7 2.6 6.8 1.2 2.4-1.1 4.8-2.8 6.8-4.5 1.2-4.9.9-3.8 26.4-68.2 1.3-3.3 3.7-4.7 6.1-4.7 1.2 0 2.2.4 3.2 1.1 1.7 1.3 1.7 4.1 1 6.2-.7 1.9-.6 1.3-4.5 10.5-5.2 12.1-8.6 20.8-13.2 31.9-1.9 4.6-7.7 18.9-8.7 22.3-.6 2.2-1.3 5.8 1 5.8 5.4 0 19.3-13.1 23.1-17 .2-.3.5-.4.9-.6.6-1.9 1.2-3.7 1.7-5.5 1.4-3.8 2.7-8.2 5.3-11.3.8-1 1.7-1.6 2.7-1.6 2.8 0 4.2 1.2 4.2 4 0 1.1-.7 5.1-1.1 6.2 1.4-1.5 2.9-3 4.5-4.5 15-13.9 25.7-6.8 25.7.2 0 7.4-8.9 17.7-13.8 23.4-1.6 1.9-4.9 5.4-5 6.4 0 1.3.9 1.8 2.2 1.8 2 0 6.4-3.5 8-4.7 5-3.9 11.8-9.9 16.6-14.1l14.8-136.8c-30.5 17.1-197.6 17.2-228.3.2zm229.7-8.5c0 21-231.2 21-231.2 0 0-8.8 51.8-15.9 115.6-15.9 9 0 17.8.1 26.3.4l12.6-48.7L228.1.6c1.4-1.4 5.8-.2 9.9 3.5s6.6 7.9 5.3 9.3l-.1.1L185.9 74l-10 40.7c39.9 2.6 67.6 8.1 67.6 14.6zm-69.4 4.6c0-.8-.9-1.5-2.5-2.1l-.2.8c0 1.3-5 2.4-11.1 2.4s-11.1-1.1-11.1-2.4c0-.1 0-.2.1-.3l.2-.7c-1.8.6-3 1.4-3 2.3 0 2.1 6.2 3.7 13.7 3.7 7.7.1 13.9-1.6 13.9-3.7z"],"hacker-news":[448,512,[],"f1d4","M0 32v448h448V32H0zm21.2 197.2H21c.1-.1.2-.3.3-.4 0 .1 0 .3-.1.4zm218 53.9V384h-31.4V281.3L128 128h37.3c52.5 98.3 49.2 101.2 59.3 125.6 12.3-27 5.8-24.4 60.6-125.6H320l-80.8 155.1z"],"hacker-news-square":[448,512,[],"f3af","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM21.2 229.2H21c.1-.1.2-.3.3-.4 0 .1 0 .3-.1.4zm218 53.9V384h-31.4V281.3L128 128h37.3c52.5 98.3 49.2 101.2 59.3 125.6 12.3-27 5.8-24.4 60.6-125.6H320l-80.8 155.1z"],"hire-a-helper":[512,512,[],"f3b0","M443.1 0H71.9C67.9 37.3 37.4 67.8 0 71.7v371.5c37.4 4.9 66 32.4 71.9 68.8h372.2c3-36.4 32.5-65.8 67.9-69.8V71.7c-36.4-5.9-65-35.3-68.9-71.7zm-37 404.9c-36.3 0-18.8-2-55.1-2-35.8 0-21 2-56.1 2-5.9 0-4.9-8.2 0-9.8 22.8-7.6 22.9-10.2 24.6-12.8 10.4-15.6 5.9-83 5.9-113 0-5.3-6.4-12.8-13.8-12.8H200.4c-7.4 0-13.8 7.5-13.8 12.8 0 30-4.5 97.4 5.9 113 1.7 2.5 1.8 5.2 24.6 12.8 4.9 1.6 6 9.8 0 9.8-35.1 0-20.3-2-56.1-2-36.3 0-18.8 2-55.1 2-7.9 0-5.8-10.8 0-10.8 10.2-3.4 13.5-3.5 21.7-13.8 7.7-12.9 7.9-44.4 7.9-127.8V151.3c0-22.2-12.2-28.3-28.6-32.4-8.8-2.2-4-11.8 1-11.8 36.5 0 20.6 2 57.1 2 32.7 0 16.5-2 49.2-2 3.3 0 8.5 8.3 1 10.8-4.9 1.6-27.6 3.7-27.6 39.3 0 45.6-.2 55.8 1 68.8 0 1.3 2.3 12.8 12.8 12.8h109.2c10.5 0 12.8-11.5 12.8-12.8 1.2-13 1-23.2 1-68.8 0-35.6-22.7-37.7-27.6-39.3-7.5-2.5-2.3-10.8 1-10.8 32.7 0 16.5 2 49.2 2 36.5 0 20.6-2 57.1-2 4.9 0 9.9 9.6 1 11.8-16.4 4.1-28.6 10.3-28.6 32.4v101.2c0 83.4.1 114.9 7.9 127.8 8.2 10.2 11.4 10.4 21.7 13.8 5.8 0 7.8 10.8 0 10.8z"],hooli:[640,512,[],"f427","M508.4 352h57.9V156.7L508.4 184v168zm73.7-110.5V352H640V241.5h-57.9zm-250.7-8.9c-18.2-18.2-50.4-17.1-50.4-17.1s-32.2-1.1-50.4 17.1c-1.9 1.9-3.7 3.9-5.3 6-38.2-29.6-72.5-46.5-102.1-61.1v-20.7l-22.5 10.6c-54.4-22.1-89-18.2-97.3.1 0 0-24.9 32.8 61.9 110.9v-31c-48.8-54.6-39-76.1-35.3-79.2 13.5-11.4 37.5-8 64.4 2.1L65.2 184v63.3c13.1 14.7 30.5 31.5 53.5 50.4l4.5 3.6v-29.8c0-6.9 1.7-18.2 10.8-18.2s10.6 6.9 10.6 15V317c18 12.2 37.3 22.1 57.7 29.6v-93.9c0-18.7-13.4-37.4-40.6-37.4-15.8-.1-30.5 8.2-38.5 21.9v-54.3c41.9 20.9 83.9 46.5 99.9 58.3-10.2 14.6-9.3 28.1-9.3 43.7 0 18.7-1.4 34.3 16.8 52.5 18.2 18.2 50.4 17.1 50.4 17.1s32.3 1.1 50.4-17.1c18.2-18.2 16.7-33.8 16.7-52.5 0-18.5 1.5-34.2-16.7-52.3zm-39.7 71.9c0 3.6-1.8 12.5-10.7 12.5-8.9 0-10.7-8.9-10.7-12.5v-40.4c0-8.7 7.3-10.9 10.7-10.9 3.4 0 10.7 2.1 10.7 10.9v40.4zm185.7-71.9c-18.2-18.2-50.4-17.1-50.4-17.1s-32.3-1.1-50.4 17.1c-18.2 18.2-16.8 33.9-16.8 52.6 0 18.7-1.4 34.3 16.8 52.5 18.2 18.2 50.4 17.1 50.4 17.1s32.3 1.1 50.4-17.1c18.2-18.2 16.8-33.8 16.8-52.5-.1-18.8 1.3-34.5-16.8-52.6zm-39.8 71.9c0 3.6-1.8 12.5-10.7 12.5-8.9 0-10.7-8.9-10.7-12.5v-40.4c0-8.7 7.3-10.9 10.7-10.9 3.4 0 10.7 2.1 10.7 10.9v40.4zm173.5-73c15.9 0 28.9-12.9 28.9-28.9s-12.9-24.5-28.9-24.5c-15.9 0-28.9 8.6-28.9 24.5s12.9 28.9 28.9 28.9zM144.5 352l38.3.8c-13.2-4.6-26-10.2-38.3-16.8v16zm-21.4 0v-28.6c-6.5-4.2-13-8.7-19.4-13.6-14.8-11.2-27.5-21.7-38.5-31.5V352h57.9zm59.7.8c36.5 12.5 69.9 14.2 94.7 7.2-19.9.2-45.8-2.6-75.3-13.3v5.3l-19.4.8z"],hotjar:[448,512,[],"f3b1","M414.9 161.5C340.2 29 121.1 0 121.1 0S222.2 110.4 93 197.7C11.3 252.8-21 324.4 14 402.6c26.8 59.9 83.5 84.3 144.6 93.4-29.2-55.1-6.6-122.4-4.1-129.6 57.1 86.4 165 0 110.8-93.9 71 15.4 81.6 138.6 27.1 215.5 80.5-25.3 134.1-88.9 148.8-145.6 15.5-59.3 3.7-127.9-26.3-180.9z"],houzz:[320,512,[],"f27c","M12.2 256L160 341.1 12.2 426.6V256M160 512l147.8-85.4V256L160 341.1V512zm0-512L12.2 85.4V256L160 170.6V0zm0 170.6L307.8 256V85.4L160 170.6z"],html5:[384,512,[],"f13b","M0 32l34.9 395.8L191.5 480l157.6-52.2L384 32H0zm308.2 127.9H124.4l4.1 49.4h175.6l-13.6 148.4-97.9 27v.3h-1.1l-98.7-27.3-6-75.8h47.7L138 320l53.5 14.5 53.7-14.5 6-62.2H84.3L71.5 112.2h241.1l-4.4 47.7z"],hubspot:[512,512,[],"f3b2","M267.4 211.6c-25.1 23.7-40.8 57.3-40.8 94.6 0 29.3 9.7 56.3 26 78L203.1 434c-4.4-1.6-9.1-2.5-14-2.5-10.8 0-20.9 4.2-28.5 11.8-7.6 7.6-11.8 17.8-11.8 28.6s4.2 20.9 11.8 28.5c7.6 7.6 17.8 11.6 28.5 11.6 10.8 0 20.9-3.9 28.6-11.6 7.6-7.6 11.8-17.8 11.8-28.5 0-4.2-.6-8.2-1.9-12.1l50-50.2c22 16.9 49.4 26.9 79.3 26.9 71.9 0 130-58.3 130-130.2 0-65.2-47.7-119.2-110.2-128.7V116c17.5-7.4 28.2-23.8 28.2-42.9 0-26.1-20.9-47.9-47-47.9S311.2 47 311.2 73.1c0 19.1 10.7 35.5 28.2 42.9v61.2c-15.2 2.1-29.6 6.7-42.7 13.6-27.6-20.9-117.5-85.7-168.9-124.8 1.2-4.4 2-9 2-13.8C129.8 23.4 106.3 0 77.4 0 48.6 0 25.2 23.4 25.2 52.2c0 28.9 23.4 52.3 52.2 52.3 9.8 0 18.9-2.9 26.8-7.6l163.2 114.7zm89.5 163.6c-38.1 0-69-30.9-69-69s30.9-69 69-69 69 30.9 69 69-30.9 69-69 69z"],imdb:[448,512,[],"f2d8","M350.5 288.7c0 5.4 1.6 14.4-6.2 14.4-1.6 0-3-.8-3.8-2.4-2.2-5.1-1.1-44.1-1.1-44.7 0-3.8-1.1-12.7 4.9-12.7 7.3 0 6.2 7.3 6.2 12.7v32.7zM265 229.9c0-9.7 1.6-16-10.3-16v83.7c12.2.3 10.3-8.7 10.3-18.4v-49.3zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zM21.3 228.8c-.1.1-.2.3-.3.4h.3v-.4zM97 192H64v127.8h33V192zm113.3 0h-43.1l-7.6 59.9c-2.7-20-5.4-40.1-8.7-59.9h-42.8v127.8h29v-84.5l12.2 84.5h20.6l11.6-86.4v86.4h28.7V192zm86.3 45.3c0-8.1.3-16.8-1.4-24.4-4.3-22.5-31.4-20.9-49-20.9h-24.6v127.8c86.1.1 75 6 75-82.5zm85.9 17.3c0-17.3-.8-30.1-22.2-30.1-8.9 0-14.9 2.7-20.9 9.2V192h-31.7v127.8h29.8l1.9-8.1c5.7 6.8 11.9 9.8 20.9 9.8 19.8 0 22.2-15.2 22.2-30.9v-36z"],instagram:[448,512,[],"f16d","M224.1 141c-63.6 0-114.9 51.3-114.9 114.9s51.3 114.9 114.9 114.9S339 319.5 339 255.9 287.7 141 224.1 141zm0 189.6c-41.1 0-74.7-33.5-74.7-74.7s33.5-74.7 74.7-74.7 74.7 33.5 74.7 74.7-33.6 74.7-74.7 74.7zm146.4-194.3c0 14.9-12 26.8-26.8 26.8-14.9 0-26.8-12-26.8-26.8s12-26.8 26.8-26.8 26.8 12 26.8 26.8zm76.1 27.2c-1.7-35.9-9.9-67.7-36.2-93.9-26.2-26.2-58-34.4-93.9-36.2-37-2.1-147.9-2.1-184.9 0-35.8 1.7-67.6 9.9-93.9 36.1s-34.4 58-36.2 93.9c-2.1 37-2.1 147.9 0 184.9 1.7 35.9 9.9 67.7 36.2 93.9s58 34.4 93.9 36.2c37 2.1 147.9 2.1 184.9 0 35.9-1.7 67.7-9.9 93.9-36.2 26.2-26.2 34.4-58 36.2-93.9 2.1-37 2.1-147.8 0-184.8zM398.8 388c-7.8 19.6-22.9 34.7-42.6 42.6-29.5 11.7-99.5 9-132.1 9s-102.7 2.6-132.1-9c-19.6-7.8-34.7-22.9-42.6-42.6-11.7-29.5-9-99.5-9-132.1s-2.6-102.7 9-132.1c7.8-19.6 22.9-34.7 42.6-42.6 29.5-11.7 99.5-9 132.1-9s102.7-2.6 132.1 9c19.6 7.8 34.7 22.9 42.6 42.6 11.7 29.5 9 99.5 9 132.1s2.7 102.7-9 132.1z"],"internet-explorer":[512,512,[],"f26b","M483.049 159.706c10.855-24.575 21.424-60.438 21.424-87.871 0-72.722-79.641-98.371-209.673-38.577-107.632-7.181-211.221 73.67-237.098 186.457 30.852-34.862 78.271-82.298 121.977-101.158C125.404 166.85 79.128 228.002 43.992 291.725 23.246 329.651 0 390.94 0 436.747c0 98.575 92.854 86.5 180.251 42.006 31.423 15.43 66.559 15.573 101.695 15.573 97.124 0 184.249-54.294 216.814-146.022H377.927c-52.509 88.593-196.819 52.996-196.819-47.436H509.9c6.407-43.581-1.655-95.715-26.851-141.162zM64.559 346.877c17.711 51.15 53.703 95.871 100.266 123.304-88.741 48.94-173.267 29.096-100.266-123.304zm115.977-108.873c2-55.151 50.276-94.871 103.98-94.871 53.418 0 101.981 39.72 103.981 94.871H180.536zm184.536-187.6c21.425-10.287 48.563-22.003 72.558-22.003 31.422 0 54.274 21.717 54.274 53.722 0 20.003-7.427 49.007-14.569 67.867-26.28-42.292-65.986-81.584-112.263-99.586z"],ioxhost:[640,512,[],"f208","M616 160h-67.3C511.2 70.7 422.9 8 320 8 183 8 72 119 72 256c0 16.4 1.6 32.5 4.7 48H24c-13.3 0-24 10.8-24 24 0 13.3 10.7 24 24 24h67.3c37.5 89.3 125.8 152 228.7 152 137 0 248-111 248-248 0-16.4-1.6-32.5-4.7-48H616c13.3 0 24-10.8 24-24 0-13.3-10.7-24-24-24zm-96 96c0 110.5-89.5 200-200 200-75.7 0-141.6-42-175.5-104H424c13.3 0 24-10.8 24-24 0-13.3-10.7-24-24-24H125.8c-3.8-15.4-5.8-31.4-5.8-48 0-110.5 89.5-200 200-200 75.7 0 141.6 42 175.5 104H216c-13.3 0-24 10.8-24 24 0 13.3 10.7 24 24 24h298.2c3.8 15.4 5.8 31.4 5.8 48zm-304-24h208c13.3 0 24 10.7 24 24 0 13.2-10.7 24-24 24H216c-13.3 0-24-10.7-24-24 0-13.2 10.7-24 24-24z"],itunes:[448,512,[],"f3b4","M223.6 80.3C129 80.3 52.5 157 52.5 251.5S129 422.8 223.6 422.8s171.2-76.7 171.2-171.2c0-94.6-76.7-171.3-171.2-171.3zm79.4 240c-3.2 13.6-13.5 21.2-27.3 23.8-12.1 2.2-22.2 2.8-31.9-5-11.8-10-12-26.4-1.4-36.8 8.4-8 20.3-9.6 38-12.8 3-.5 5.6-1.2 7.7-3.7 3.2-3.6 2.2-2 2.2-80.8 0-5.6-2.7-7.1-8.4-6.1-4 .7-91.9 17.1-91.9 17.1-5 1.1-6.7 2.6-6.7 8.3 0 116.1.5 110.8-1.2 118.5-2.1 9-7.6 15.8-14.9 19.6-8.3 4.6-23.4 6.6-31.4 5.2-21.4-4-28.9-28.7-14.4-42.9 8.4-8 20.3-9.6 38-12.8 3-.5 5.6-1.2 7.7-3.7 5-5.7.9-127 2.6-133.7.4-2.6 1.5-4.8 3.5-6.4 2.1-1.7 5.8-2.7 6.7-2.7 101-19 113.3-21.4 115.1-21.4 5.7-.4 9 3 9 8.7-.1 170.6.4 161.4-1 167.6zM345.2 32H102.8C45.9 32 0 77.9 0 134.8v242.4C0 434.1 45.9 480 102.8 480h242.4c57 0 102.8-45.9 102.8-102.8V134.8C448 77.9 402.1 32 345.2 32zM223.6 444c-106.3 0-192.5-86.2-192.5-192.5S117.3 59 223.6 59s192.5 86.2 192.5 192.5S329.9 444 223.6 444z"],"itunes-note":[384,512,[],"f3b5","M381.9 388.2c-6.4 27.4-27.2 42.8-55.1 48-24.5 4.5-44.9 5.6-64.5-10.2-23.9-20.1-24.2-53.4-2.7-74.4 17-16.2 40.9-19.5 76.8-25.8 6-1.1 11.2-2.5 15.6-7.4 6.4-7.2 4.4-4.1 4.4-163.2 0-11.2-5.5-14.3-17-12.3-8.2 1.4-185.7 34.6-185.7 34.6-10.2 2.2-13.4 5.2-13.4 16.7 0 234.7 1.1 223.9-2.5 239.5-4.2 18.2-15.4 31.9-30.2 39.5-16.8 9.3-47.2 13.4-63.4 10.4-43.2-8.1-58.4-58-29.1-86.6 17-16.2 40.9-19.5 76.8-25.8 6-1.1 11.2-2.5 15.6-7.4 10.1-11.5 1.8-256.6 5.2-270.2.8-5.2 3-9.6 7.1-12.9 4.2-3.5 11.8-5.5 13.4-5.5 204-38.2 228.9-43.1 232.4-43.1 11.5-.8 18.1 6 18.1 17.6.2 344.5 1.1 326-1.8 338.5z"],jenkins:[512,512,[],"f3b6","M487.1 425c-1.4-11.2-19-23.1-28.2-31.9-5.1-5-29-23.1-30.4-29.9-1.4-6.6 9.7-21.5 13.3-28.9 5.1-10.7 8.8-23.7 11.3-32.6 18.8-66.1 20.7-156.9-6.2-211.2-10.2-20.6-38.6-49-56.4-62.5-42-31.7-119.6-35.3-170.1-16.6-14.1 5.2-27.8 9.8-40.1 17.1-33.1 19.4-68.3 32.5-78.1 71.6-24.2 10.8-31.5 41.8-30.3 77.8.2 7 4.1 15.8 2.7 22.4-.7 3.3-5.2 7.6-6.1 9.8-11.6 27.7-2.3 64 11.1 83.7 8.1 11.9 21.5 22.4 39.2 25.2.7 10.6 3.3 19.7 8.2 30.4 3.1 6.8 14.7 19 10.4 27.7-2.2 4.4-21 13.8-27.3 17.6C89 407.2 73.7 415 54.2 429c-12.6 9-32.3 10.2-29.2 31.1 2.1 14.1 10.1 31.6 14.7 45.8.7 2 1.4 4.1 2.1 6h422c4.9-15.3 9.7-30.9 14.6-47.2 3.4-11.4 10.2-27.8 8.7-39.7zM205.9 33.7c1.8-.5 3.4.7 4.9 2.4-.2 5.2-5.4 5.1-8.9 6.8-5.4 6.7-13.4 9.8-20 17.2-6.8 7.5-14.4 27.7-23.4 30-4.5 1.1-9.7-.8-13.6-.5-10.4.7-17.7 6-28.3 7.5 13.6-29.9 56.1-54 89.3-63.4zm-104.8 93.6c13.5-14.9 32.1-24.1 54.8-25.9 11.7 29.7-8.4 65-.9 97.6 2.3 9.9 10.2 25.4-2.4 25.7.3-28.3-34.8-46.3-61.3-29.6-1.8-21.5-4.9-51.7 9.8-67.8zm36.7 200.2c-1-4.1-2.7-12.9-2.3-15.1 1.6-8.7 17.1-12.5 11-24.7-11.3-.1-13.8 10.2-24.1 11.3-26.7 2.6-45.6-35.4-44.4-58.4 1-19.5 17.6-38.2 40.1-35.8 16 1.8 21.4 19.2 24.5 34.7 9.2.5 22.5-.4 26.9-7.6-.6-17.5-8.8-31.6-8.2-47.7 1-30.3 17.5-57.6 4.8-87.4 13.6-30.9 53.5-55.3 83.1-70 36.6-18.3 94.9-3.7 129.3 15.8 19.7 11.1 34.4 32.7 48.3 50.7-19.5-5.8-36.1 4.2-33.1 20.3 16.3-14.9 44.2-.2 52.5 16.4 7.9 15.8 7.8 39.3 9 62.8 2.9 57-10.4 115.9-39.1 157.1-7.7 11-14.1 23-24.9 30.6-26 18.2-65.4 34.7-99.2 23.4-44.7-15-65-44.8-89.5-78.8.7 18.7 13.8 34.1 26.8 48.4 11.3 12.5 25 26.6 39.7 32.4-12.3-2.9-31.1-3.8-36.2 7.2-28.6-1.9-55.1-4.8-68.7-24.2-10.6-15.4-21.4-41.4-26.3-61.4zm222 124.1c4.1-3 11.1-2.9 17.4-3.6-5.4-2.7-13-3.7-19.3-2.2-.1-4.2-2-6.8-3.2-10.2 10.6-3.8 35.5-28.5 49.6-20.3 6.7 3.9 9.5 26.2 10.1 37 .4 9-.8 18-4.5 22.8-18.8-.6-35.8-2.8-50.7-7 .9-6.1-1-12.1.6-16.5zm-17.2-20c-16.8.8-26-1.2-38.3-10.8.2-.8 1.4-.5 1.5-1.4 18 8 40.8-3.3 59-4.9-7.9 5.1-14.6 11.6-22.2 17.1zm-12.1 33.2c-1.6-9.4-3.5-12-2.8-20.2 25-16.6 29.7 28.6 2.8 20.2zM226 438.6c-11.6-.7-48.1-14-38.5-23.7 9.4 6.5 27.5 4.9 41.3 7.3.8 4.4-2.8 10.2-2.8 16.4zM57.7 497.1c-4.3-12.7-9.2-25.1-14.8-36.9 30.8-23.8 65.3-48.9 102.2-63.5 2.8-1.1 23.2 25.4 26.2 27.6 16.5 11.7 37 21 56.2 30.2 1.2 8.8 3.9 20.2 8.7 35.5.7 2.3 1.4 4.7 2.2 7.2H57.7zm240.6 5.7h-.8c.3-.2.5-.4.8-.5v.5zm7.5-5.7c2.1-1.4 4.3-2.8 6.4-4.3 1.1 1.4 2.2 2.8 3.2 4.3h-9.6zm15.1-24.7c-10.8 7.3-20.6 18.3-33.3 25.2-6 3.3-27 11.7-33.4 10.2-3.6-.8-3.9-5.3-5.4-9.5-3.1-9-10.1-23.4-10.8-37-.8-17.2-2.5-46 16-42.4 14.9 2.9 32.3 9.7 43.9 16.1 7.1 3.9 11.1 8.6 21.9 9.5-.1 1.4-.1 2.8-.2 4.3-5.9 3.9-15.3 3.8-21.8 7.1 9.5.4 17 2.7 23.5 5.9-.1 3.4-.3 7-.4 10.6zm53.4 24.7h-14c-.1-3.2-2.8-5.8-6.1-5.8s-5.9 2.6-6.1 5.8h-17.4c-2.8-4.4-5.7-8.6-8.9-12.5 2.1-2.2 4-4.7 6-6.9 9 3.7 14.8-4.9 21.7-4.2 7.9.8 14.2 11.7 25.4 11l-.6 12.6zm8.7 0c.2-4 .4-7.8.6-11.5 15.6-7.3 29 1.3 35.7 11.5H383zm83.4-37c-2.3 11.2-5.8 24-9.9 37.1-.2-.1-.4-.1-.6-.1H428c.6-1.1 1.2-2.2 1.9-3.3-2.6-6.1-9-8.7-10.9-15.5 12.1-22.7 6.5-93.4-24.2-78.5 4.3-6.3 15.6-11.5 20.8-19.3 13 10.4 20.8 20.3 33.2 31.4 6.8 6 20 13.3 21.4 23.1.8 5.5-2.6 18.9-3.8 25.1zM222.2 130.5c5.4-14.9 27.2-34.7 45-32 7.7 1.2 18 8.2 12.2 17.7-30.2-7-45.2 12.6-54.4 33.1-8.1-2-4.9-13.1-2.8-18.8zm184.1 63.1c8.2-3.6 22.4-.7 29.6-5.3-4.2-11.5-10.3-21.4-9.3-37.7.5 0 1 0 1.4.1 6.8 14.2 12.7 29.2 21.4 41.7-5.7 13.5-43.6 25.4-43.1 1.2zm20.4-43zm-117.2 45.7c-6.8-10.9-19-32.5-14.5-45.3 6.5 11.9 8.6 24.4 17.8 33.3 4.1 4 12.2 9 8.2 20.2-.9 2.7-7.8 8.6-11.7 9.7-14.4 4.3-47.9.9-36.6-17.1 11.9.7 27.9 7.8 36.8-.8zm27.3 70c3.8 6.6 1.4 18.7 12.1 20.6 20.2 3.4 43.6-12.3 58.1-17.8 9-15.2-.8-20.7-8.9-30.5-16.6-20-38.8-44.8-38-74.7 6.7-4.9 7.3 7.4 8.2 9.7 8.7 20.3 30.4 46.2 46.3 63.5 3.9 4.3 10.3 8.4 11 11.2 2.1 8.2-5.4 18-4.5 23.5-21.7 13.9-45.8 29.1-81.4 25.6-7.4-6.7-10.3-21.4-2.9-31.1zm-201.3-9.2c-6.8-3.9-8.4-21-16.4-21.4-11.4-.7-9.3 22.2-9.3 35.5-7.8-7.1-9.2-29.1-3.5-40.3-6.6-3.2-9.5 3.6-13.1 5.9 4.7-34.1 49.8-15.8 42.3 20.3zm299.6 28.8c-10.1 19.2-24.4 40.4-54 41-.6-6.2-1.1-15.6 0-19.4 22.7-2.2 36.6-13.7 54-21.6zm-141.9 12.4c18.9 9.9 53.6 11 79.3 10.2 1.4 5.6 1.3 12.6 1.4 19.4-33 1.8-72-6.4-80.7-29.6zm92.2 46.7c-1.7 4.3-5.3 9.3-9.8 11.1-12.1 4.9-45.6 8.7-62.4-.3-10.7-5.7-17.5-18.5-23.4-26-2.8-3.6-16.9-12.9-.2-12.9 13.1 32.7 58 29 95.8 28.1z"],joget:[496,512,[],"f3b7","M227.5 468.7c-9-13.6-19.9-33.3-23.7-42.4-5.7-13.7-27.2-45.6 31.2-67.1 51.7-19.1 176.7-16.5 208.8-17.6-4 9-8.6 17.9-13.9 26.6-40.4 65.5-110.4 101.5-182 101.5-6.8 0-13.6-.4-20.4-1M66.1 143.9C128 43.4 259.6 12.2 360.1 74.1c74.8 46.1 111.2 130.9 99.3 212.7-24.9-.5-179.3-3.6-230.3-4.9-55.5-1.4-81.7-20.8-58.5-48.2 23.2-27.4 51.1-40.7 68.9-51.2 17.9-10.5 27.3-33.7-23.6-29.7C87.3 161.5 48.6 252.1 37.6 293c-8.8-49.7-.1-102.7 28.5-149.1m-29.2-18c-71.9 116.6-35.6 269.3 81 341.2 116.6 71.9 269.3 35.6 341.2-80.9 71.9-116.6 35.6-269.4-81-341.2-40.5-25.1-85.5-37-129.9-37C165 8 83.8 49.9 36.9 125.9m244.4 110.4c-31.5 20.5-65.3 31.3-65.3 31.3l169.5-1.6 46.5-23.4s3.6-9.5-19.1-15.5c-22.7-6-57 11.3-86.7 27.2-29.7 15.8-31.1 8.2-31.1 8.2s40.2-28.1 50.7-34.5c10.5-6.4 31.9-14 13.4-24.6-3.2-1.8-6.7-2.7-10.4-2.7-17.8 0-41.5 18.7-67.5 35.6"],joomla:[448,512,[],"f1aa","M.6 92.1C.6 58.8 27.4 32 60.4 32c30 0 54.5 21.9 59.2 50.2 32.6-7.6 67.1.6 96.5 30l-44.3 44.3c-20.5-20.5-42.6-16.3-55.4-3.5-14.3 14.3-14.3 37.9 0 52.2l99.5 99.5-44 44.3c-87.7-87.2-49.7-49.7-99.8-99.7-26.8-26.5-35-64.8-24.8-98.9C20.4 144.6.6 120.7.6 92.1zm129.5 116.4l44.3 44.3c10-10 89.7-89.7 99.7-99.8 14.3-14.3 37.6-14.3 51.9 0 12.8 12.8 17 35-3.5 55.4l44 44.3c31.2-31.2 38.5-67.6 28.9-101.2 29.2-4.1 51.9-29.2 51.9-59.5 0-33.2-26.8-60.1-59.8-60.1-30.3 0-55.4 22.5-59.5 51.6-33.8-9.9-71.7-1.5-98.3 25.1-18.3 19.1-71.1 71.5-99.6 99.9zm266.3 152.2c8.2-32.7-.9-68.5-26.3-93.9-11.8-12.2 5 4.7-99.5-99.7l-44.3 44.3 99.7 99.7c14.3 14.3 14.3 37.6 0 51.9-12.8 12.8-35 17-55.4-3.5l-44 44.3c27.6 30.2 68 38.8 102.7 28 5.5 27.4 29.7 48.1 58.9 48.1 33 0 59.8-26.8 59.8-60.1 0-30.2-22.5-55-51.6-59.1zm-84.3-53.1l-44-44.3c-87 86.4-50.4 50.4-99.7 99.8-14.3 14.3-37.6 14.3-51.9 0-13.1-13.4-16.9-35.3 3.2-55.4l-44-44.3c-30.2 30.2-38 65.2-29.5 98.3-26.7 6-46.2 29.9-46.2 58.2C0 453.2 26.8 480 59.8 480c28.6 0 52.5-19.8 58.6-46.7 32.7 8.2 68.5-.6 94.2-26 32.1-32 12.2-12.4 99.5-99.7z"],js:[448,512,[],"f3b8","M0 32v448h448V32H0zm243.8 349.4c0 43.6-25.6 63.5-62.9 63.5-33.7 0-53.2-17.4-63.2-38.5l34.3-20.7c6.6 11.7 12.6 21.6 27.1 21.6 13.8 0 22.6-5.4 22.6-26.5V237.7h42.1v143.7zm99.6 63.5c-39.1 0-64.4-18.6-76.7-43l34.3-19.8c9 14.7 20.8 25.6 41.5 25.6 17.4 0 28.6-8.7 28.6-20.8 0-14.4-11.4-19.5-30.7-28l-10.5-4.5c-30.4-12.9-50.5-29.2-50.5-63.5 0-31.6 24.1-55.6 61.6-55.6 26.8 0 46 9.3 59.8 33.7L368 290c-7.2-12.9-15-18-27.1-18-12.3 0-20.1 7.8-20.1 18 0 12.6 7.8 17.7 25.9 25.6l10.5 4.5c35.8 15.3 55.9 31 55.9 66.2 0 37.8-29.8 58.6-69.7 58.6z"],"js-square":[512,512,[],"f3b9","M432 32H80c-26.5 0-48 21.5-48 48v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM275.8 381.4c0 43.6-25.6 63.5-62.9 63.5-33.7 0-53.2-17.4-63.2-38.5l34.3-20.7c6.6 11.7 12.6 21.6 27.1 21.6 13.8 0 22.6-5.4 22.6-26.5V237.7h42.1v143.7zm99.6 63.5c-39.1 0-64.4-18.6-76.7-43l34.3-19.8c9 14.7 20.8 25.6 41.5 25.6 17.4 0 28.6-8.7 28.6-20.8 0-14.4-11.4-19.5-30.7-28l-10.5-4.5c-30.4-12.9-50.5-29.2-50.5-63.5 0-31.6 24.1-55.6 61.6-55.6 26.8 0 46 9.3 59.8 33.7L400 290c-7.2-12.9-15-18-27.1-18-12.3 0-20.1 7.8-20.1 18 0 12.6 7.8 17.7 25.9 25.6l10.5 4.5c35.8 15.3 55.9 31 55.9 66.2 0 37.8-29.8 58.6-69.7 58.6z"],jsfiddle:[576,512,[],"f1cc","M510.634 237.462c-4.727-2.621-5.664-5.748-6.381-10.776-2.352-16.488-3.539-33.619-9.097-49.095-35.895-99.957-153.99-143.386-246.849-91.646-27.37 15.25-48.971 36.369-65.493 63.903-3.184-1.508-5.458-2.71-7.824-3.686-30.102-12.421-59.049-10.121-85.331 9.167-25.531 18.737-36.422 44.548-32.676 76.408.355 3.025-1.967 7.621-4.514 9.545-39.712 29.992-56.031 78.065-41.902 124.615 13.831 45.569 57.514 79.796 105.608 81.433 30.291 1.031 60.637.546 90.959.539 84.041-.021 168.09.531 252.12-.48 52.664-.634 96.108-36.873 108.212-87.293 11.54-48.074-11.144-97.3-56.832-122.634zm21.107 156.88c-18.23 22.432-42.343 35.253-71.28 35.65-56.874.781-113.767.23-170.652.23 0 .7-163.028.159-163.728.154-43.861-.332-76.739-19.766-95.175-59.995-18.902-41.245-4.004-90.848 34.186-116.106 9.182-6.073 12.505-11.566 10.096-23.136-5.49-26.361 4.453-47.956 26.42-62.981 22.987-15.723 47.422-16.146 72.034-3.083 10.269 5.45 14.607 11.564 22.198-2.527 14.222-26.399 34.557-46.727 60.671-61.294 97.46-54.366 228.37 7.568 230.24 132.697.122 8.15 2.412 12.428 9.848 15.894 57.56 26.829 74.456 96.122 35.142 144.497zm-87.789-80.499c-5.848 31.157-34.622 55.096-66.666 55.095-16.953-.001-32.058-6.545-44.079-17.705-27.697-25.713-71.141-74.98-95.937-93.387-20.056-14.888-41.99-12.333-60.272 3.782-49.996 44.071 15.859 121.775 67.063 77.188 4.548-3.96 7.84-9.543 12.744-12.844 8.184-5.509 20.766-.884 13.168 10.622-17.358 26.284-49.33 38.197-78.863 29.301-28.897-8.704-48.84-35.968-48.626-70.179 1.225-22.485 12.364-43.06 35.414-55.965 22.575-12.638 46.369-13.146 66.991 2.474C295.68 280.7 320.467 323.97 352.185 343.47c24.558 15.099 54.254 7.363 68.823-17.506 28.83-49.209-34.592-105.016-78.868-63.46-3.989 3.744-6.917 8.932-11.41 11.72-10.975 6.811-17.333-4.113-12.809-10.353 20.703-28.554 50.464-40.44 83.271-28.214 31.429 11.714 49.108 44.366 42.76 78.186z"],keycdn:[512,512,[],"f3ba","M63.8 409.3l60.5-59c32.1 42.8 71.1 66 126.6 67.4 30.5.7 60.3-7 86.4-22.4 5.1 5.3 18.5 19.5 20.9 22-32.2 20.7-69.6 31.1-108.1 30.2-43.3-1.1-84.6-16.7-117.7-44.4.3-.6-38.2 37.5-38.6 37.9 9.5 29.8-13.1 62.4-46.3 62.4C20.7 503.3 0 481.7 0 454.9c0-34.3 33.1-56.6 63.8-45.6zm354.9-252.4c19.1 31.3 29.6 67.4 28.7 104-1.1 44.8-19 87.5-48.6 121 .3.3 23.8 25.2 24.1 25.5 9.6-1.3 19.2 2 25.9 9.1 11.3 12 10.9 30.9-1.1 42.4-12 11.3-30.9 10.9-42.4-1.1-6.7-7-9.4-16.8-7.6-26.3-24.9-26.6-44.4-47.2-44.4-47.2 42.7-34.1 63.3-79.6 64.4-124.2.7-28.9-7.2-57.2-21.1-82.2l22.1-21zM104 53.1c6.7 7 9.4 16.8 7.6 26.3l45.9 48.1c-4.7 3.8-13.3 10.4-22.8 21.3-25.4 28.5-39.6 64.8-40.7 102.9-.7 28.9 6.1 57.2 20 82.4l-22 21.5C72.7 324 63.1 287.9 64.2 250.9c1-44.6 18.3-87.6 47.5-121.1l-25.3-26.4c-9.6 1.3-19.2-2-25.9-9.1-11.3-12-10.9-30.9 1.1-42.4C73.5 40.7 92.2 41 104 53.1zM464.9 8c26 0 47.1 22.4 47.1 48.3S490.9 104 464.9 104c-6.3.1-14-1.1-15.9-1.8l-62.9 59.7c-32.7-43.6-76.7-65.9-126.9-67.2-30.5-.7-60.3 6.8-86.2 22.4l-21.1-22C184.1 74.3 221.5 64 260 64.9c43.3 1.1 84.6 16.7 117.7 44.6l41.1-38.6c-1.5-4.7-2.2-9.6-2.2-14.5C416.5 29.7 438.9 8 464.9 8zM256.7 113.4c5.5 0 10.9.4 16.4 1.1 78.1 9.8 133.4 81.1 123.8 159.1-9.8 78.1-81.1 133.4-159.1 123.8-78.1-9.8-133.4-81.1-123.8-159.2 9.3-72.4 70.1-124.6 142.7-124.8zm-59 119.4c.6 22.7 12.2 41.8 32.4 52.2l-11 51.7h73.7l-11-51.7c20.1-10.9 32.1-29 32.4-52.2-.4-32.8-25.8-57.5-58.3-58.3-32.1.8-57.3 24.8-58.2 58.3zM256 160"],kickstarter:[448,512,[],"f3bb","M400 480H48c-26.4 0-48-21.6-48-48V80c0-26.4 21.6-48 48-48h352c26.4 0 48 21.6 48 48v352c0 26.4-21.6 48-48 48zM199.6 178.5c0-30.7-17.6-45.1-39.7-45.1-25.8 0-40 19.8-40 44.5v154.8c0 25.8 13.7 45.6 40.5 45.6 21.5 0 39.2-14 39.2-45.6v-41.8l60.6 75.7c12.3 14.9 39 16.8 55.8 0 14.6-15.1 14.8-36.8 4-50.4l-49.1-62.8 40.5-58.7c9.4-13.5 9.5-34.5-5.6-49.1-16.4-15.9-44.6-17.3-61.4 7l-44.8 64.7v-38.8z"],"kickstarter-k":[384,512,[],"f3bc","M147.3 114.4c0-56.2-32.5-82.4-73.4-82.4C26.2 32 0 68.2 0 113.4v283c0 47.3 25.3 83.4 74.9 83.4 39.8 0 72.4-25.6 72.4-83.4v-76.5l112.1 138.3c22.7 27.2 72.1 30.7 103.2 0 27-27.6 27.3-67.4 7.4-92.2l-90.8-114.8 74.9-107.4c17.4-24.7 17.5-63.1-10.4-89.8-30.3-29-82.4-31.6-113.6 12.8L147.3 185v-70.6z"],laravel:[640,512,[],"f3bd","M637.5 204.7c-4.2-4.8-62.8-78.1-73.1-90.5-10.3-12.4-15.4-10.2-21.7-9.3s-80.5 13.4-89.1 14.8c-8.6 1.5-14 4.9-8.7 12.3 4.7 6.6 53.4 75.7 64.2 90.9l-193.7 46.4L161.2 11.7C155.1 2.6 153.8-.6 139.8.1 125.9.7 19 9.6 11.4 10.2c-7.6.6-16 4-8.4 22s129 279.6 132.4 287.2c3.4 7.6 12.2 20 32.8 15 21.1-5.1 94.3-24.2 134.3-34.7 21.1 38.3 64.2 115.9 72.2 127 10.6 14.9 18 12.4 34.3 7.4 12.8-3.9 199.6-71.1 208-74.5 8.4-3.5 13.6-5.9 7.9-14.4-4.2-6.2-53.5-72.2-79.3-106.8 17.7-4.7 80.6-21.4 87.3-23.3 7.8-1.8 8.9-5.7 4.6-10.4zm-352.2 72c-2.3.5-110.8 26.5-116.6 27.8-5.8 1.3-5.8.7-6.5-1.3-.7-2-129-266.7-130.8-270-1.8-3.3-1.7-5.9 0-5.9s102.5-9 106-9.2c3.6-.2 3.2.6 4.5 2.8 0 0 142.2 245.4 144.6 249.7 2.6 4.3 1.1 5.6-1.2 6.1zm306 57.3c1.7 2.7 3.5 4.5-2 6.4-5.4 2-183.7 62.1-187.1 63.6-3.5 1.5-6.2 2-10.6-4.5-4.5-6.4-62.4-106.8-62.4-106.8l188.8-49c4.7-1.5 6.2-2.5 9.2 2.2 2.9 4.7 62.4 85.4 64.1 88.1zm12.1-134.1c-4.2.9-73.6 18.1-73.6 18.1l-56.7-77.8c-1.6-2.2-2.9-4.5 1.1-5s68.4-12.2 71.3-12.8c2.9-.7 5.4-1.5 9 3.4 3.6 4.9 52.6 67 54.5 69.4 1.8 2.3-1.4 3.8-5.6 4.7z"],lastfm:[512,512,[],"f202","M225.8 367.1l-18.8-51s-30.5 34-76.2 34c-40.5 0-69.2-35.2-69.2-91.5 0-72.1 36.4-97.9 72.1-97.9 66.5 0 74.8 53.3 100.9 134.9 18.8 56.9 54 102.6 155.4 102.6 72.7 0 122-22.3 122-80.9 0-72.9-62.7-80.6-115-92.1-25.8-5.9-33.4-16.4-33.4-34 0-19.9 15.8-31.7 41.6-31.7 28.2 0 43.4 10.6 45.7 35.8l58.6-7c-4.7-52.8-41.1-74.5-100.9-74.5-52.8 0-104.4 19.9-104.4 83.9 0 39.9 19.4 65.1 68 76.8 44.9 10.6 79.8 13.8 79.8 45.7 0 21.7-21.1 30.5-61 30.5-59.2 0-83.9-31.1-97.9-73.9-32-96.8-43.6-163-161.3-163C45.7 113.8 0 168.3 0 261c0 89.1 45.7 137.2 127.9 137.2 66.2 0 97.9-31.1 97.9-31.1z"],"lastfm-square":[448,512,[],"f203","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-92.2 312.9c-63.4 0-85.4-28.6-97.1-64.1-16.3-51-21.5-84.3-63-84.3-22.4 0-45.1 16.1-45.1 61.2 0 35.2 18 57.2 43.3 57.2 28.6 0 47.6-21.3 47.6-21.3l11.7 31.9s-19.8 19.4-61.2 19.4c-51.3 0-79.9-30.1-79.9-85.8 0-57.9 28.6-92 82.5-92 73.5 0 80.8 41.4 100.8 101.9 8.8 26.8 24.2 46.2 61.2 46.2 24.9 0 38.1-5.5 38.1-19.1 0-19.9-21.8-22-49.9-28.6-30.4-7.3-42.5-23.1-42.5-48 0-40 32.3-52.4 65.2-52.4 37.4 0 60.1 13.6 63 46.6l-36.7 4.4c-1.5-15.8-11-22.4-28.6-22.4-16.1 0-26 7.3-26 19.8 0 11 4.8 17.6 20.9 21.3 32.7 7.1 71.8 12 71.8 57.5.1 36.7-30.7 50.6-76.1 50.6z"],leanpub:[576,512,[],"f212","M386.539 111.485l15.096 248.955-10.979-.275c-36.232-.824-71.64 8.783-102.657 27.997-31.016-19.214-66.424-27.997-102.657-27.997-45.564 0-82.07 10.705-123.516 27.723L93.117 129.6c28.546-11.803 61.484-18.115 92.226-18.115 41.173 0 73.836 13.175 102.657 42.544 27.723-28.271 59.013-41.721 98.539-42.544zM569.07 448c-25.526 0-47.485-5.215-70.542-15.645-34.31-15.645-69.993-24.978-107.871-24.978-38.977 0-74.934 12.901-102.657 40.623-27.723-27.723-63.68-40.623-102.657-40.623-37.878 0-73.561 9.333-107.871 24.978C55.239 442.236 32.731 448 8.303 448H6.93L49.475 98.859C88.726 76.626 136.486 64 181.775 64 218.83 64 256.984 71.685 288 93.095 319.016 71.685 357.17 64 394.225 64c45.289 0 93.049 12.626 132.3 34.859L569.07 448zm-43.368-44.741l-34.036-280.246c-30.742-13.999-67.248-21.41-101.009-21.41-38.428 0-74.385 12.077-102.657 38.702-28.272-26.625-64.228-38.702-102.657-38.702-33.761 0-70.267 7.411-101.009 21.41L50.298 403.259c47.211-19.487 82.894-33.486 135.045-33.486 37.604 0 70.817 9.606 102.657 29.644 31.84-20.038 65.052-29.644 102.657-29.644 52.151 0 87.834 13.999 135.045 33.486z"],less:[640,512,[],"f41d","M612.7 219c0-20.5 3.2-32.6 3.2-54.6 0-34.2-12.6-45.2-40.5-45.2h-20.5v24.2h6.3c14.2 0 17.3 4.7 17.3 22.1 0 16.3-1.6 32.6-1.6 51.5 0 24.2 7.9 33.6 23.6 37.3v1.6c-15.8 3.7-23.6 13.1-23.6 37.3 0 18.9 1.6 34.2 1.6 51.5 0 17.9-3.7 22.6-17.3 22.6v.5h-6.3V393h20.5c27.8 0 40.5-11 40.5-45.2 0-22.6-3.2-34.2-3.2-54.6 0-11 6.8-22.6 27.3-23.6v-27.3c-20.5-.7-27.3-12.3-27.3-23.3zm-105.6 32c-15.8-6.3-30.5-10-30.5-20.5 0-7.9 6.3-12.6 17.9-12.6s22.1 4.7 33.6 13.1l21-27.8c-13.1-10-31-20.5-55.2-20.5-35.7 0-59.9 20.5-59.9 49.4 0 25.7 22.6 38.9 41.5 46.2 16.3 6.3 32.1 11.6 32.1 22.1 0 7.9-6.3 13.1-20.5 13.1-13.1 0-26.3-5.3-40.5-16.3l-21 30.5c15.8 13.1 39.9 22.1 59.9 22.1 42 0 64.6-22.1 64.6-51s-22.5-41-43-47.8zm-358.9 59.4c-3.7 0-8.4-3.2-8.4-13.1V119.1H65.2c-28.4 0-41 11-41 45.2 0 22.6 3.2 35.2 3.2 54.6 0 11-6.8 22.6-27.3 23.6v27.3c20.5.5 27.3 12.1 27.3 23.1 0 19.4-3.2 31-3.2 53.6 0 34.2 12.6 45.2 40.5 45.2h20.5v-24.2h-6.3c-13.1 0-17.3-5.3-17.3-22.6s1.6-32.1 1.6-51.5c0-24.2-7.9-33.6-23.6-37.3v-1.6c15.8-3.7 23.6-13.1 23.6-37.3 0-18.9-1.6-34.2-1.6-51.5s3.7-22.1 17.3-22.1H93v150.8c0 32.1 11 53.1 43.1 53.1 10 0 17.9-1.6 23.6-3.7l-5.3-34.2c-3.1.8-4.6.8-6.2.8zM379.9 251c-16.3-6.3-31-10-31-20.5 0-7.9 6.3-12.6 17.9-12.6 11.6 0 22.1 4.7 33.6 13.1l21-27.8c-13.1-10-31-20.5-55.2-20.5-35.7 0-59.9 20.5-59.9 49.4 0 25.7 22.6 38.9 41.5 46.2 16.3 6.3 32.1 11.6 32.1 22.1 0 7.9-6.3 13.1-20.5 13.1-13.1 0-26.3-5.3-40.5-16.3l-20.5 30.5c15.8 13.1 39.9 22.1 59.9 22.1 42 0 64.6-22.1 64.6-51 .1-28.9-22.5-41-43-47.8zm-155-68.8c-38.4 0-75.1 32.1-74.1 82.5 0 52 34.2 82.5 79.3 82.5 18.9 0 39.9-6.8 56.2-17.9l-15.8-27.8c-11.6 6.8-22.6 10-34.2 10-21 0-37.3-10-41.5-34.2H290c.5-3.7 1.6-11 1.6-19.4.6-42.6-22.6-75.7-66.7-75.7zm-30 66.2c3.2-21 15.8-31 30.5-31 18.9 0 26.3 13.1 26.3 31h-56.8z"],line:[448,512,[],"f3c0","M272.1 204.2v71.1c0 1.8-1.4 3.2-3.2 3.2h-11.4c-1.1 0-2.1-.6-2.6-1.3l-32.6-44v42.2c0 1.8-1.4 3.2-3.2 3.2h-11.4c-1.8 0-3.2-1.4-3.2-3.2v-71.1c0-1.8 1.4-3.2 3.2-3.2H219c1 0 2.1.5 2.6 1.4l32.6 44v-42.2c0-1.8 1.4-3.2 3.2-3.2h11.4c1.8-.1 3.3 1.4 3.3 3.1zm-82-3.2h-11.4c-1.8 0-3.2 1.4-3.2 3.2v71.1c0 1.8 1.4 3.2 3.2 3.2h11.4c1.8 0 3.2-1.4 3.2-3.2v-71.1c0-1.7-1.4-3.2-3.2-3.2zm-27.5 59.6h-31.1v-56.4c0-1.8-1.4-3.2-3.2-3.2h-11.4c-1.8 0-3.2 1.4-3.2 3.2v71.1c0 .9.3 1.6.9 2.2.6.5 1.3.9 2.2.9h45.7c1.8 0 3.2-1.4 3.2-3.2v-11.4c0-1.7-1.4-3.2-3.1-3.2zM332.1 201h-45.7c-1.7 0-3.2 1.4-3.2 3.2v71.1c0 1.7 1.4 3.2 3.2 3.2h45.7c1.8 0 3.2-1.4 3.2-3.2v-11.4c0-1.8-1.4-3.2-3.2-3.2H301v-12h31.1c1.8 0 3.2-1.4 3.2-3.2V234c0-1.8-1.4-3.2-3.2-3.2H301v-12h31.1c1.8 0 3.2-1.4 3.2-3.2v-11.4c-.1-1.7-1.5-3.2-3.2-3.2zM448 113.7V399c-.1 44.8-36.8 81.1-81.7 81H81c-44.8-.1-81.1-36.9-81-81.7V113c.1-44.8 36.9-81.1 81.7-81H367c44.8.1 81.1 36.8 81 81.7zm-61.6 122.6c0-73-73.2-132.4-163.1-132.4-89.9 0-163.1 59.4-163.1 132.4 0 65.4 58 120.2 136.4 130.6 19.1 4.1 16.9 11.1 12.6 36.8-.7 4.1-3.3 16.1 14.1 8.8 17.4-7.3 93.9-55.3 128.2-94.7 23.6-26 34.9-52.3 34.9-81.5z"],linkedin:[448,512,[],"f08c","M416 32H31.9C14.3 32 0 46.5 0 64.3v383.4C0 465.5 14.3 480 31.9 480H416c17.6 0 32-14.5 32-32.3V64.3c0-17.8-14.4-32.3-32-32.3zM135.4 416H69V202.2h66.5V416zm-33.2-243c-21.3 0-38.5-17.3-38.5-38.5S80.9 96 102.2 96c21.2 0 38.5 17.3 38.5 38.5 0 21.3-17.2 38.5-38.5 38.5zm282.1 243h-66.4V312c0-24.8-.5-56.7-34.5-56.7-34.6 0-39.9 27-39.9 54.9V416h-66.4V202.2h63.7v29.2h.9c8.9-16.8 30.6-34.5 62.9-34.5 67.2 0 79.7 44.3 79.7 101.9V416z"],"linkedin-in":[448,512,[],"f0e1","M100.3 480H7.4V180.9h92.9V480zM53.8 140.1C24.1 140.1 0 115.5 0 85.8 0 56.1 24.1 32 53.8 32c29.7 0 53.8 24.1 53.8 53.8 0 29.7-24.1 54.3-53.8 54.3zM448 480h-92.7V334.4c0-34.7-.7-79.2-48.3-79.2-48.3 0-55.7 37.7-55.7 76.7V480h-92.8V180.9h89.1v40.8h1.3c12.4-23.5 42.7-48.3 87.9-48.3 94 0 111.3 61.9 111.3 142.3V480z"],linode:[448,512,[],"f2b8","M437.4 226.3c-.3-.9-.9-1.4-1.4-2l-70-38.6c-.9-.6-2-.6-3.1 0l-58.9 36c-.9.6-1.4 1.7-1.4 2.6l-.9 31.4-24-16c-.9-.6-2.3-.6-3.1 0L240 260.9l-1.4-35.1c0-.9-.6-2-1.4-2.3l-36-24.3 33.7-17.4c1.1-.6 1.7-1.7 1.7-2.9l-5.7-132.3c0-.9-.9-2-1.7-2.6L138.6.3c-.9-.3-1.7-.3-2.3-.3L12.6 38.6c-1.4.6-2.3 2-2 3.7L38 175.4c.9 3.4 34 27.4 38.6 30.9l-26.9 12.9c-1.4.9-2 2.3-1.7 3.4l20.6 100.3c.6 2.9 23.7 23.1 27.1 26.3l-17.4 10.6c-.9.6-1.7 2-1.4 3.1 1.4 7.1 15.4 77.7 16.9 79.1l65.1 69.1c.6.6 1.4.6 2.3.9.6 0 1.1-.3 1.7-.6l83.7-66.9c.9-.6 1.1-1.4 1.1-2.3l-2-46 28 23.7c1.1.9 2.9.9 4 0l66.9-53.4c.9-.6 1.1-1.4 1.1-2.3l2.3-33.4 20.3 14c1.1.9 2.6.9 3.7 0l54.6-43.7c.6-.3 1.1-1.1 1.1-2 .9-6.5 10.3-70.8 9.7-72.8zm-204.8 4.8l4 92.6-90.6 61.2-14-96.6 100.6-57.2zm-7.7-180l5.4 126-106.6 55.4L104 97.7l120.9-46.6zM44 173.1L18 48l79.7 49.4 19.4 132.9L44 173.1zm30.6 147.8L55.7 230l70 58.3 13.7 93.4-64.8-60.8zm24.3 117.7l-13.7-67.1 61.7 60.9 9.7 67.4-57.7-61.2zm64.5 64.5l-10.6-70.9 85.7-61.4 3.1 70-78.2 62.3zm82-115.1c0-3.4.9-22.9-2-25.1l-24.3-20 22.3-14.9c2.3-1.7 1.1-5.7 1.1-8l29.4 22.6.6 68.3-27.1-22.9zm94.3-25.4l-60.9 48.6-.6-68.6 65.7-46.9-4.2 66.9zm27.7-25.7l-19.1-13.4 2-34c.3-.9-.3-2-1.1-2.6L308 259.7l.6-30 64.6 40.6-5.8 66.6zm54.6-39.8l-48.3 38.3 5.7-65.1 51.1-36.6-8.5 63.4z"],linux:[448,512,[],"f17c","M196.1 123.6c-.2-1.4 1.9-2.3 3.2-2.9 1.7-.7 3.9-1 5.5-.1.4.2.8.7.6 1.1-.4 1.2-2.4 1-3.5 1.6-1 .5-1.8 1.7-3 1.7-1 .1-2.7-.4-2.8-1.4zm24.7-.3c1 .5 1.8 1.7 3 1.7 1.1 0 2.8-.4 2.9-1.5.2-1.4-1.9-2.3-3.2-2.9-1.7-.7-3.9-1-5.5-.1-.4.2-.8.7-.6 1.1.3 1.3 2.3 1.1 3.4 1.7zm214.7 310.2c-.5 8.2-6.5 13.8-13.9 18.3-14.9 9-37.3 15.8-50.9 32.2l-2.6-2.2 2.6 2.2c-14.2 16.9-31.7 26.6-48.3 27.9-16.5 1.3-32-6.3-40.3-23v-.1c-1.1-2.1-1.9-4.4-2.5-6.7-21.5 1.2-40.2-5.3-55.1-4.1-22 1.2-35.8 6.5-48.3 6.6-4.8 10.6-14.3 17.6-25.9 20.2-16 3.7-36.1 0-55.9-10.4l1.6-3-1.6 3c-18.5-9.8-42-8.9-59.3-12.5-8.7-1.8-16.3-5-20.1-12.3-3.7-7.3-3-17.3 2.2-31.7 1.7-5.1.4-12.7-.8-20.8-.6-3.9-1.2-7.9-1.2-11.8 0-4.3.7-8.5 2.8-12.4 4.5-8.5 11.8-12.1 18.5-14.5 6.7-2.4 12.8-4 17-8.3 5.2-5.5 10.1-14.4 16.6-20.2-2.6-17.2.2-35.4 6.2-53.3 12.6-37.9 39.2-74.2 58.1-96.7 16.1-22.9 20.8-41.3 22.5-64.7C158 103.4 132.4-.2 234.8 0c80.9.1 76.3 85.4 75.8 131.3-.3 30.1 16.3 50.5 33.4 72 15.2 18 35.1 44.3 46.5 74.4 9.3 24.6 12.9 51.8 3.7 79.1 1.4.5 2.8 1.2 4.1 2 1.4.8 2.7 1.8 4 2.9 6.6 5.6 8.7 14.3 10.5 22.4 1.9 8.1 3.6 15.7 7.2 19.7 11.1 12.4 15.9 21.5 15.5 29.7zM220.8 109.1c3.6.9 8.9 2.4 13 4.4-2.1-12.2 4.5-23.5 11.8-23 8.9.3 13.9 15.5 9.1 27.3-.8 1.9-2.8 3.4-3.9 4.6 6.7 2.3 11 4.1 12.6 4.9 7.9-9.5 10.8-26.2 4.3-40.4-9.8-21.4-34.2-21.8-44 .4-3.2 7.2-3.9 14.9-2.9 21.8zm-46.2 18.8c7.8-5.7 6.9-4.7 5.9-5.5-8-6.9-6.6-27.4 1.8-28.1 6.3-.5 10.8 10.7 9.6 19.6 3.1-2.1 6.7-3.6 10.2-4.6 1.7-19.3-9-33.5-19.1-33.5-18.9 0-24 37.5-8.4 52.1zm-9.4 20.9c1.5 4.9 6.1 10.5 14.7 15.3 7.8 4.6 12 11.5 20 15 2.6 1.1 5.7 1.9 9.6 2.1 18.4 1.1 27.1-11.3 38.2-14.9 11.7-3.7 20.1-11 22.7-18.1 3.2-8.5-2.1-14.7-10.5-18.2-11.3-4.9-16.3-5.2-22.6-9.3-10.3-6.6-18.8-8.9-25.9-8.9-14.4 0-23.2 9.8-27.9 14.2-.5.5-7.9 5.9-14.1 10.5-4.2 3.3-5.6 7.4-4.2 12.3zm-33.5 252.8L112.1 366c-6.8-9.2-13.8-14.8-21.9-16-7.7-1.2-12.6 1.4-17.7 6.9-4.8 5.1-8.8 12.3-14.3 18-7.8 6.5-9.3 6.2-19.6 9.9-6.3 2.2-11.3 4.6-14.8 11.3-2.7 5-2.1 12.2-.9 20 1.2 7.9 3 16.3.6 23.9v.2c-5 13.7-5 21.7-2.6 26.4 7.9 15.4 46.6 6.1 76.5 21.9 31.4 16.4 72.6 17.1 75.3-18 2.1-20.5-31.5-49-41-68.9zm153.9 35.8c3.2-11 6.3-21.3 6.8-29 .8-15.2 1.6-28.7 4.4-39.9 3.1-12.6 9.3-23.1 21.4-27.3 2.3-21.1 18.7-21.1 38.3-12.5 18.9 8.5 26 16 22.8 26.1 1 0 2-.1 4.2 0 5.2-16.9-14.3-28-30.7-34.8 2.9-12 2.4-24.1-.4-35.7-6-25.3-22.6-47.8-35.2-59-2.3-.1-2.1 1.9 2.6 6.5 11.6 10.7 37.1 49.2 23.3 84.9-3.9-1-7.6-1.5-10.9-1.4-5.3-29.1-17.5-53.2-23.6-64.6-11.5-21.4-29.5-65.3-37.2-95.7-4.5 6.4-12.4 11.9-22.3 15-4.7 1.5-9.7 5.5-15.9 9-13.9 8-30 8.8-42.4-1.2-4.5-3.6-8-7.6-12.6-10.3-1.6-.9-5.1-3.3-6.2-4.1-2 37.8-27.3 85.3-39.3 112.7-8.3 19.7-13.2 40.8-13.8 61.5-21.8-29.1-5.9-66.3 2.6-82.4 9.5-17.6 11-22.5 8.7-20.8-8.6 14-22 36.3-27.2 59.2-2.7 11.9-3.2 24 .3 35.2 3.5 11.2 11.1 21.5 24.6 29.9 0 0 24.8 14.3 38.3 32.5 7.4 10 9.7 18.7 7.4 24.9-2.5 6.7-9.6 8.9-16.7 8.9 4.8 6 10.3 13 14.4 19.6 37.6 25.7 82.2 15.7 114.3-7.2zM415 408.5c-10-11.3-7.2-33.1-17.1-41.6-6.9-6-13.6-5.4-22.6-5.1-7.7 8.8-25.8 19.6-38.4 16.3-11.5-2.9-18-16.3-18.8-29.5-.3.2-.7.3-1 .5-7.1 3.9-11.1 10.8-13.7 21.1-2.5 10.2-3.4 23.5-4.2 38.7-.7 11.8-6.2 26.4-9.9 40.6-3.5 13.2-5.8 25.2-1.1 36.3 7.2 14.5 19.5 20.4 33.7 19.3 14.2-1.1 30.4-9.8 43.6-25.5 22-26.6 62.3-29.7 63.2-46.5.3-5.1-3.1-13-13.7-24.6zM173.3 148.7c2 1.9 4.7 4.5 8 7.1 6.6 5.2 15.8 10.6 27.3 10.6 11.6 0 22.5-5.9 31.8-10.8 4.9-2.6 10.9-7 14.8-10.4 3.9-3.4 5.9-6.3 3.1-6.6-2.8-.3-2.6 2.6-6 5.1-4.4 3.2-9.7 7.4-13.9 9.8-7.4 4.2-19.5 10.2-29.9 10.2-10.4 0-18.7-4.8-24.9-9.7-3.1-2.5-5.7-5-7.7-6.9-1.5-1.4-1.9-4.6-4.3-4.9-1.4-.1-1.8 3.7 1.7 6.5z"],lyft:[512,512,[],"f3c3","M0 81.1h77.8v208.7c0 33.1 15 52.8 27.2 61-12.7 11.1-51.2 20.9-80.2-2.8C7.8 334 0 310.7 0 289V81.1zm485.9 173.5v-22h23.8v-76.8h-26.1c-10.1-46.3-51.2-80.7-100.3-80.7-56.6 0-102.7 46-102.7 102.7V357c16 2.3 35.4-.3 51.7-14 17.1-14 24.8-37.2 24.8-59v-6.7h38.8v-76.8h-38.8v-23.3c0-34.6 52.2-34.6 52.2 0v77.1c0 56.6 46 102.7 102.7 102.7v-76.5c-14.5 0-26.1-11.7-26.1-25.9zm-294.3-99v113c0 15.4-23.8 15.4-23.8 0v-113H91v132.7c0 23.8 8 54 45 63.9 37 9.8 58.2-10.6 58.2-10.6-2.1 13.4-14.5 23.3-34.9 25.3-15.5 1.6-35.2-3.6-45-7.8v70.3c25.1 7.5 51.5 9.8 77.6 4.7 47.1-9.1 76.8-48.4 76.8-100.8V155.1h-77.1v.5z"],magento:[448,512,[],"f3c4","M445.7 127.9V384l-63.4 36.5V164.7L223.8 73.1 65.2 164.7l.4 255.9L2.3 384V128.1L224.2 0l221.5 127.9zM255.6 420.5L224 438.9l-31.8-18.2v-256l-63.3 36.6.1 255.9 94.9 54.9 95.1-54.9v-256l-63.4-36.6v255.9z"],maxcdn:[512,512,[],"f136","M461.1 442.7h-97.4L415.6 200c2.3-10.2.9-19.5-4.4-25.7-5-6.1-13.7-9.6-24.2-9.6h-49.3l-59.5 278h-97.4l59.5-278h-83.4l-59.5 278H0l59.5-278-44.6-95.4H387c39.4 0 75.3 16.3 98.3 44.9 23.3 28.6 31.8 67.4 23.6 105.9l-47.8 222.6z"],medapps:[320,512,[],"f3c6","M118.3 238.4c3.5-12.5 6.9-33.6 13.2-33.6 8.3 1.8 9.6 23.4 18.6 36.6 4.6-23.5 5.3-85.1 14.1-86.7 9-.7 19.7 66.5 22 77.5 9.9 4.1 48.9 6.6 48.9 6.6 1.9 7.3-24 7.6-40 7.8-4.6 14.8-5.4 27.7-11.4 28-4.7.2-8.2-28.8-17.5-49.6l-9.4 65.5c-4.4 13-15.5-22.5-21.9-39.3-3.3-.1-62.4-1.6-47.6-7.8l31-5zM228 448c21.2 0 21.2-32 0-32H92c-21.2 0-21.2 32 0 32h136zm-24 64c21.2 0 21.2-32 0-32h-88c-21.2 0-21.2 32 0 32h88zm34.2-141.5c3.2-18.9 5.2-36.4 11.9-48.8 7.9-14.7 16.1-28.1 24-41 24.6-40.4 45.9-75.2 45.9-125.5C320 69.6 248.2 0 160 0S0 69.6 0 155.2c0 50.2 21.3 85.1 45.9 125.5 7.9 12.9 16 26.3 24 41 6.7 12.5 8.7 29.8 11.9 48.9 3.5 21 36.1 15.7 32.6-5.1-3.6-21.7-5.6-40.7-15.3-58.6C66.5 246.5 33 211.3 33 155.2 33 87.3 90 32 160 32s127 55.3 127 123.2c0 56.1-33.5 91.3-66.1 151.6-9.7 18-11.7 37.4-15.3 58.6-3.4 20.6 29 26.4 32.6 5.1z"],medium:[448,512,[],"f23a","M0 32v448h448V32H0zm372.2 106.1l-24 23c-2.1 1.6-3.1 4.2-2.7 6.7v169.3c-.4 2.6.6 5.2 2.7 6.7l23.5 23v5.1h-118V367l24.3-23.6c2.4-2.4 2.4-3.1 2.4-6.7V199.8l-67.6 171.6h-9.1L125 199.8v115c-.7 4.8 1 9.7 4.4 13.2l31.6 38.3v5.1H71.2v-5.1l31.6-38.3c3.4-3.5 4.9-8.4 4.1-13.2v-133c.4-3.7-1-7.3-3.8-9.8L75 138.1V133h87.3l67.4 148L289 133.1h83.2v5z"],"medium-m":[512,512,[],"f3c7","M71.5 142.3c.6-5.9-1.7-11.8-6.1-15.8L20.3 72.1V64h140.2l108.4 237.7L364.2 64h133.7v8.1l-38.6 37c-3.3 2.5-5 6.7-4.3 10.8v272c-.7 4.1 1 8.3 4.3 10.8l37.7 37v8.1H307.3v-8.1l39.1-37.9c3.8-3.8 3.8-5 3.8-10.8V171.2L241.5 447.1h-14.7L100.4 171.2v184.9c-1.1 7.8 1.5 15.6 7 21.2l50.8 61.6v8.1h-144v-8L65 377.3c5.4-5.6 7.9-13.5 6.5-21.2V142.3z"],medrt:[544,512,[],"f3c8","M113.7 256c0 121.8 83.9 222.8 193.5 241.1-18.7 4.5-38.2 6.9-58.2 6.9C111.4 504 0 393 0 256S111.4 8 248.9 8c20.1 0 39.6 2.4 58.2 6.9C197.5 33.2 113.7 134.2 113.7 256m297.4 100.3c-77.7 55.4-179.6 47.5-240.4-14.6 5.5 14.1 12.7 27.7 21.7 40.5 61.6 88.2 182.4 109.3 269.7 47 87.3-62.3 108.1-184.3 46.5-272.6-9-12.9-19.3-24.3-30.5-34.2 37.4 78.8 10.7 178.5-67 233.9m-218.8-244c-1.4 1-2.7 2.1-4 3.1 64.3-17.8 135.9 4 178.9 60.5 35.7 47 42.9 106.6 24.4 158 56.7-56.2 67.6-142.1 22.3-201.8-50-65.5-149.1-74.4-221.6-19.8M296 224c-4.4 0-8-3.6-8-8v-40c0-4.4-3.6-8-8-8h-48c-4.4 0-8 3.6-8 8v40c0 4.4-3.6 8-8 8h-40c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8h40c4.4 0 8 3.6 8 8v40c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8v-40c0-4.4 3.6-8 8-8h40c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8h-40z"],meetup:[512,512,[],"f2e0","M99 414.3c1.1 5.7-2.3 11.1-8 12.3-5.4 1.1-10.9-2.3-12-8-1.1-5.4 2.3-11.1 7.7-12.3 5.4-1.2 11.1 2.3 12.3 8zm143.1 71.4c-6.3 4.6-8 13.4-3.7 20 4.6 6.6 13.4 8.3 20 3.7 6.3-4.6 8-13.4 3.4-20-4.2-6.5-13.1-8.3-19.7-3.7zm-86-462.3c6.3-1.4 10.3-7.7 8.9-14-1.1-6.6-7.4-10.6-13.7-9.1-6.3 1.4-10.3 7.7-9.1 14 1.4 6.6 7.6 10.6 13.9 9.1zM34.4 226.3c-10-6.9-23.7-4.3-30.6 6-6.9 10-4.3 24 5.7 30.9 10 7.1 23.7 4.6 30.6-5.7 6.9-10.4 4.3-24.1-5.7-31.2zm272-170.9c10.6-6.3 13.7-20 7.7-30.3-6.3-10.6-19.7-14-30-7.7s-13.7 20-7.4 30.6c6 10.3 19.4 13.7 29.7 7.4zm-191.1 58c7.7-5.4 9.4-16 4.3-23.7s-15.7-9.4-23.1-4.3c-7.7 5.4-9.4 16-4.3 23.7 5.1 7.8 15.6 9.5 23.1 4.3zm372.3 156c-7.4 1.7-12.3 9.1-10.6 16.9 1.4 7.4 8.9 12.3 16.3 10.6 7.4-1.4 12.3-8.9 10.6-16.6-1.5-7.4-8.9-12.3-16.3-10.9zm39.7-56.8c-1.1-5.7-6.6-9.1-12-8-5.7 1.1-9.1 6.9-8 12.6 1.1 5.4 6.6 9.1 12.3 8 5.4-1.5 9.1-6.9 7.7-12.6zM447 138.9c-8.6 6-10.6 17.7-4.9 26.3 5.7 8.6 17.4 10.6 26 4.9 8.3-6 10.3-17.7 4.6-26.3-5.7-8.7-17.4-10.9-25.7-4.9zm-6.3 139.4c26.3 43.1 15.1 100-26.3 129.1-17.4 12.3-37.1 17.7-56.9 17.1-12 47.1-69.4 64.6-105.1 32.6-1.1.9-2.6 1.7-3.7 2.9-39.1 27.1-92.3 17.4-119.4-22.3-9.7-14.3-14.6-30.6-15.1-46.9-65.4-10.9-90-94-41.1-139.7-28.3-46.9.6-107.4 53.4-114.9C151.6 70 234.1 38.6 290.1 82c67.4-22.3 136.3 29.4 130.9 101.1 41.1 12.6 52.8 66.9 19.7 95.2zm-70 74.3c-3.1-20.6-40.9-4.6-43.1-27.1-3.1-32 43.7-101.1 40-128-3.4-24-19.4-29.1-33.4-29.4-13.4-.3-16.9 2-21.4 4.6-2.9 1.7-6.6 4.9-11.7-.3-6.3-6-11.1-11.7-19.4-12.9-12.3-2-17.7 2-26.6 9.7-3.4 2.9-12 12.9-20 9.1-3.4-1.7-15.4-7.7-24-11.4-16.3-7.1-40 4.6-48.6 20-12.9 22.9-38 113.1-41.7 125.1-8.6 26.6 10.9 48.6 36.9 47.1 11.1-.6 18.3-4.6 25.4-17.4 4-7.4 41.7-107.7 44.6-112.6 2-3.4 8.9-8 14.6-5.1 5.7 3.1 6.9 9.4 6 15.1-1.1 9.7-28 70.9-28.9 77.7-3.4 22.9 26.9 26.6 38.6 4 3.7-7.1 45.7-92.6 49.4-98.3 4.3-6.3 7.4-8.3 11.7-8 3.1 0 8.3.9 7.1 10.9-1.4 9.4-35.1 72.3-38.9 87.7-4.6 20.6 6.6 41.4 24.9 50.6 11.4 5.7 62.5 15.7 58.5-11.1zm5.7 92.3c-10.3 7.4-12.9 22-5.7 32.6 7.1 10.6 21.4 13.1 32 6 10.6-7.4 13.1-22 6-32.6-7.4-10.6-21.7-13.5-32.3-6z"],microsoft:[448,512,[],"f3ca","M0 32h214.6v214.6H0V32zm233.4 0H448v214.6H233.4V32zM0 265.4h214.6V480H0V265.4zm233.4 0H448V480H233.4V265.4z"],mix:[416,512,[],"f3cb","M0 64v348.9c0 56.2 88 58.1 88 0V174.3c7.9-52.9 88-50.4 88 6.5v175.3c0 57.9 96 58 96 0V240c5.3-54.7 88-52.5 88 4.3v23.8c0 59.9 88 56.6 88 0V64H0z"],mixcloud:[640,512,[],"f289","M424.43 219.729C416.124 134.727 344.135 68 256.919 68c-72.266 0-136.224 46.516-159.205 114.074-54.545 8.029-96.63 54.822-96.63 111.582 0 62.298 50.668 112.966 113.243 112.966h289.614c52.329 0 94.969-42.362 94.969-94.693 0-45.131-32.118-83.063-74.48-92.2zm-20.489 144.53H114.327c-39.04 0-70.881-31.564-70.881-70.604s31.841-70.604 70.881-70.604c18.827 0 36.548 7.475 49.838 20.766 19.963 19.963 50.133-10.227 30.18-30.18-14.675-14.398-32.672-24.365-52.053-29.349 19.935-44.3 64.79-73.926 114.628-73.926 69.496 0 125.979 56.483 125.979 125.702 0 13.568-2.215 26.857-6.369 39.594-8.943 27.517 32.133 38.939 40.147 13.29 2.769-8.306 4.984-16.889 6.369-25.472 19.381 7.476 33.502 26.303 33.502 48.453 0 28.795-23.535 52.33-52.607 52.33zm235.069-52.33c0 44.024-12.737 86.386-37.102 122.657-4.153 6.092-10.798 9.414-17.72 9.414-16.317 0-27.127-18.826-17.443-32.949 19.381-29.349 29.903-63.682 29.903-99.122s-10.521-69.773-29.903-98.845c-15.655-22.831 19.361-47.24 35.163-23.534 24.366 35.993 37.102 78.356 37.102 122.379zm-70.88 0c0 31.565-9.137 62.021-26.857 88.325-4.153 6.091-10.798 9.136-17.72 9.136-17.201 0-27.022-18.979-17.443-32.948 13.013-19.104 19.658-41.255 19.658-64.513 0-22.981-6.645-45.408-19.658-64.512-15.761-22.986 19.008-47.095 35.163-23.535 17.719 26.026 26.857 56.483 26.857 88.047z"],mizuni:[496,512,[],"f3cc","M248 8C111 8 0 119.1 0 256c0 137 111 248 248 248s248-111 248-248C496 119.1 385 8 248 8zm-80 351.9c-31.4 10.6-58.8 27.3-80 48.2V136c0-22.1 17.9-40 40-40s40 17.9 40 40v223.9zm120-9.9c-12.9-2-26.2-3.1-39.8-3.1-13.8 0-27.2 1.1-40.2 3.1V136c0-22.1 17.9-40 40-40s40 17.9 40 40v214zm120 57.7c-21.2-20.8-48.6-37.4-80-48V136c0-22.1 17.9-40 40-40s40 17.9 40 40v271.7z"],modx:[448,512,[],"f285","M356 241.8l36.7 23.7V480l-133-83.8L356 241.8zM440 75H226.3l-23 37.8 153.5 96.5L440 75zm-89 142.8L55.2 32v214.5l46 29L351 217.8zM97 294.2L8 437h213.7l125-200.5L97 294.2z"],monero:[496,512,[],"f3d0","M352 384h108.4C417 455.9 338.1 504 248 504S79 455.9 35.6 384H144V256.2L248 361l104-105v128zM88 336V128l159.4 159.4L408 128v208h74.8c8.5-25.1 13.2-52 13.2-80C496 119 385 8 248 8S0 119 0 256c0 28 4.6 54.9 13.2 80H88z"],napster:[496,512,[],"f3d2","M298.3 373.6c-14.2 13.6-31.3 24.1-50.4 30.5-19-6.4-36.2-16.9-50.3-30.5h100.7zm44-199.6c20-16.9 43.6-29.2 69.6-36.2V299c0 219.4-328 217.6-328 .3V137.7c25.9 6.9 49.6 19.6 69.5 36.4 56.8-40 132.5-39.9 188.9-.1zm-208.8-58.5c64.4-60 164.3-60.1 228.9-.2-7.1 3.5-13.9 7.3-20.6 11.5-58.7-30.5-129.2-30.4-187.9.1-6.3-4-13.9-8.2-20.4-11.4zM43.8 93.2v69.3c-58.4 36.5-58.4 121.1.1 158.3 26.4 245.1 381.7 240.3 407.6 1.5l.3-1.7c58.7-36.3 58.9-121.7.2-158.2V93.2c-17.3.5-34 3-50.1 7.4-82-91.5-225.5-91.5-307.5.1-16.3-4.4-33.1-7-50.6-7.5zM259.2 352s36-.3 61.3-1.5c10.2-.5 21.1-4 25.5-6.5 26.3-15.1 25.4-39.2 26.2-47.4-79.5-.6-99.9-3.9-113 55.4zm-135.5-55.3c.8 8.2-.1 32.3 26.2 47.4 4.4 2.5 15.2 6 25.5 6.5 25.3 1.1 61.3 1.5 61.3 1.5-13.2-59.4-33.7-56.1-113-55.4zm169.1 123.4c-3.2-5.3-6.9-7.3-6.9-7.3-24.8 7.3-52.2 6.9-75.9 0 0 0-2.9 1.5-6.4 6.6-2.8 4.1-3.7 9.6-3.7 9.6 29.1 17.6 67.1 17.6 96.2 0-.1-.1-.3-4-3.3-8.9z"],"nintendo-switch":[448,512,[],"f418","M95.9 33.5c-44.6 8-80.5 41-91.8 84.4C0 133.6-.3 142.8.2 264.4.4 376 .5 378.6 2.4 387.3c10.3 46.5 43.3 79.6 90.3 90.5 6.1 1.4 13.9 1.7 64.1 1.9 51.9.4 57.3.3 58.7-1.1 1.4-1.4 1.5-19.3 1.5-222.2 0-150.5-.3-221.3-.9-222.6-.9-1.7-2.5-1.8-56.9-1.7-44.2.1-57.5.4-63.3 1.4zm83.9 222.6V444l-37.8-.5c-34.8-.4-38.5-.6-45.5-2.3-29.9-7.7-52-30.7-58.3-60.7-2-9.4-2-240.1-.1-249.3 5.6-26.1 23.7-47.7 48-57.4 12.2-4.9 17.9-5.5 57.6-5.6l35.9-.1v188zm-75.9-131.2c-5.8 1.1-14.7 5.6-19.5 9.7-9.7 8.4-14.6 20.4-13.8 34.5.4 7.3.8 9.3 3.8 15.2 4.4 9 10.9 15.6 19.9 20 6.2 3.1 7.8 3.4 15.9 3.7 7.3.3 9.9 0 14.8-1.7 20.1-6.8 32.3-26.3 28.8-46.4-3.9-23.7-26.6-39.7-49.9-35zm158.2-92.3c-.4.3-.6 100.8-.6 223.5 0 202.3.1 222.8 1.5 223.4 2.5.9 74.5.6 83.4-.4 37.7-4.3 71-27.2 89-61.2 2.3-4.4 5.4-11.7 7-16.2 5.8-17.4 5.7-12.8 5.7-146.1 0-106.4-.2-122.3-1.5-129-9.2-48.3-46.1-84.8-94.5-93.1-6.5-1.1-16.5-1.4-48.8-1.4-22.4-.1-40.9.2-41.2.5zm99.1 202.1c14.5 3.8 26.3 14.8 31.2 28.9 3.1 8.7 3 21.5-.1 29.5-5.7 14.7-16.8 25-31.1 28.8-23.2 6-47.9-8-54.6-31-2-7-1.9-18.9.4-26.2 6.9-22.7 31-36.1 54.2-30z"],node:[640,512,[],"f419","M316.3 452c-2.1 0-4.2-.6-6.1-1.6L291 439c-2.9-1.6-1.5-2.2-.5-2.5 3.8-1.3 4.6-1.6 8.7-4 .4-.2 1-.1 1.4.1l14.8 8.8c.5.3 1.3.3 1.8 0L375 408c.5-.3.9-.9.9-1.6v-66.7c0-.7-.3-1.3-.9-1.6l-57.8-33.3c-.5-.3-1.2-.3-1.8 0l-57.8 33.3c-.6.3-.9 1-.9 1.6v66.7c0 .6.4 1.2.9 1.5l15.8 9.1c8.6 4.3 13.9-.8 13.9-5.8v-65.9c0-.9.7-1.7 1.7-1.7h7.3c.9 0 1.7.7 1.7 1.7v65.9c0 11.5-6.2 18-17.1 18-3.3 0-6 0-13.3-3.6l-15.2-8.7c-3.7-2.2-6.1-6.2-6.1-10.5v-66.7c0-4.3 2.3-8.4 6.1-10.5l57.8-33.4c3.7-2.1 8.5-2.1 12.1 0l57.8 33.4c3.7 2.2 6.1 6.2 6.1 10.5v66.7c0 4.3-2.3 8.4-6.1 10.5l-57.8 33.4c-1.7 1.1-3.8 1.7-6 1.7zm46.7-65.8c0-12.5-8.4-15.8-26.2-18.2-18-2.4-19.8-3.6-19.8-7.8 0-3.5 1.5-8.1 14.8-8.1 11.9 0 16.3 2.6 18.1 10.6.2.8.8 1.3 1.6 1.3h7.5c.5 0 .9-.2 1.2-.5.3-.4.5-.8.4-1.3-1.2-13.8-10.3-20.2-28.8-20.2-16.5 0-26.3 7-26.3 18.6 0 12.7 9.8 16.1 25.6 17.7 18.9 1.9 20.4 4.6 20.4 8.3 0 6.5-5.2 9.2-17.4 9.2-15.3 0-18.7-3.8-19.8-11.4-.1-.8-.8-1.4-1.7-1.4h-7.5c-.9 0-1.7.7-1.7 1.7 0 9.7 5.3 21.3 30.6 21.3 18.5 0 29-7.2 29-19.8zm54.5-50.1c0 6.1-5 11.1-11.1 11.1s-11.1-5-11.1-11.1c0-6.3 5.2-11.1 11.1-11.1 6-.1 11.1 4.8 11.1 11.1zm-1.8 0c0-5.2-4.2-9.3-9.4-9.3-5.1 0-9.3 4.1-9.3 9.3 0 5.2 4.2 9.4 9.3 9.4 5.2-.1 9.4-4.3 9.4-9.4zm-4.5 6.2h-2.6c-.1-.6-.5-3.8-.5-3.9-.2-.7-.4-1.1-1.3-1.1h-2.2v5h-2.4v-12.5h4.3c1.5 0 4.4 0 4.4 3.3 0 2.3-1.5 2.8-2.4 3.1 1.7.1 1.8 1.2 2.1 2.8.1 1 .3 2.7.6 3.3zm-2.8-8.8c0-1.7-1.2-1.7-1.8-1.7h-2v3.5h1.9c1.6 0 1.9-1.1 1.9-1.8zM137.3 191c0-2.7-1.4-5.1-3.7-6.4l-61.3-35.3c-1-.6-2.2-.9-3.4-1h-.6c-1.2 0-2.3.4-3.4 1L3.7 184.6C1.4 185.9 0 188.4 0 191l.1 95c0 1.3.7 2.5 1.8 3.2 1.1.7 2.5.7 3.7 0L42 268.3c2.3-1.4 3.7-3.8 3.7-6.4v-44.4c0-2.6 1.4-5.1 3.7-6.4l15.5-8.9c1.2-.7 2.4-1 3.7-1 1.3 0 2.6.3 3.7 1l15.5 8.9c2.3 1.3 3.7 3.8 3.7 6.4v44.4c0 2.6 1.4 5.1 3.7 6.4l36.4 20.9c1.1.7 2.6.7 3.7 0 1.1-.6 1.8-1.9 1.8-3.2l.2-95zM472.5 87.3v176.4c0 2.6-1.4 5.1-3.7 6.4l-61.3 35.4c-2.3 1.3-5.1 1.3-7.4 0l-61.3-35.4c-2.3-1.3-3.7-3.8-3.7-6.4v-70.8c0-2.6 1.4-5.1 3.7-6.4l61.3-35.4c2.3-1.3 5.1-1.3 7.4 0l15.3 8.8c1.7 1 3.9-.3 3.9-2.2v-94c0-2.8 3-4.6 5.5-3.2l36.5 20.4c2.3 1.2 3.8 3.7 3.8 6.4zm-46 128.9c0-.7-.4-1.3-.9-1.6l-21-12.2c-.6-.3-1.3-.3-1.9 0l-21 12.2c-.6.3-.9.9-.9 1.6v24.3c0 .7.4 1.3.9 1.6l21 12.1c.6.3 1.3.3 1.8 0l21-12.1c.6-.3.9-.9.9-1.6v-24.3zm209.8-.7c2.3-1.3 3.7-3.8 3.7-6.4V192c0-2.6-1.4-5.1-3.7-6.4l-60.9-35.4c-2.3-1.3-5.1-1.3-7.4 0l-61.3 35.4c-2.3 1.3-3.7 3.8-3.7 6.4v70.8c0 2.7 1.4 5.1 3.7 6.4l60.9 34.7c2.2 1.3 5 1.3 7.3 0l36.8-20.5c2.5-1.4 2.5-5 0-6.4L550 241.6c-1.2-.7-1.9-1.9-1.9-3.2v-22.2c0-1.3.7-2.5 1.9-3.2l19.2-11.1c1.1-.7 2.6-.7 3.7 0l19.2 11.1c1.1.7 1.9 1.9 1.9 3.2v17.4c0 2.8 3.1 4.6 5.6 3.2l36.7-21.3zM559 219c-.4.3-.7.7-.7 1.2v13.6c0 .5.3 1 .7 1.2l11.8 6.8c.4.3 1 .3 1.4 0L584 235c.4-.3.7-.7.7-1.2v-13.6c0-.5-.3-1-.7-1.2l-11.8-6.8c-.4-.3-1-.3-1.4 0L559 219zm-254.2 43.5v-70.4c0-2.6-1.6-5.1-3.9-6.4l-61.1-35.2c-2.1-1.2-5-1.4-7.4 0l-61.1 35.2c-2.3 1.3-3.9 3.7-3.9 6.4v70.4c0 2.8 1.9 5.2 4 6.4l61.2 35.2c2.4 1.4 5.2 1.3 7.4 0l61-35.2c1.8-1 3.1-2.7 3.6-4.7.1-.5.2-1.1.2-1.7zm-74.3-124.9l-.8.5h1.1l-.3-.5zm76.2 130.2l-.4-.7v.9l.4-.2z"],"node-js":[448,512,[],"f3d3","M224 480c-6 0-12-1.6-17.2-4.6L151.9 443c-8.2-4.6-4.2-6.2-1.5-7.1 10.9-3.8 13.1-4.7 24.8-11.3 1.2-.7 2.8-.4 4.1.3l42.1 25c1.5.8 3.7.8 5.1 0l164.2-94.8c1.5-.9 2.5-2.6 2.5-4.4V161.2c0-1.9-1-3.6-2.5-4.5L226.5 62c-1.5-.9-3.5-.9-5.1 0l-164 94.7c-1.6.9-2.6 2.7-2.6 4.5v189.5c0 1.8 1 3.5 2.6 4.4l45 26c24.4 12.2 39.3-2.2 39.3-16.6V177.4c0-2.6 2.1-4.7 4.8-4.7h20.8c2.6 0 4.8 2.1 4.8 4.7v187.1c0 32.6-17.7 51.2-48.6 51.2-9.5 0-17 0-37.8-10.3l-43.1-24.8C32 374.5 25.4 363 25.4 350.7V161.2c0-12.3 6.6-23.8 17.2-29.9l164.2-94.9c10.4-5.9 24.2-5.9 34.5 0l164.2 94.9c10.6 6.1 17.2 17.6 17.2 29.9v189.5c0 12.3-6.6 23.8-17.2 29.9l-164.2 94.8c-5.3 3-11.3 4.6-17.3 4.6zm132.5-186.8c0-35.5-24-44.9-74.4-51.6-51-6.7-56.2-10.2-56.2-22.2 0-9.9 4.4-23 42.2-23 33.7 0 46.2 7.3 51.3 30 .4 2.1 2.4 3.7 4.6 3.7h21.3c1.3 0 2.6-.6 3.5-1.5.9-1 1.4-2.3 1.3-3.7-3.3-39.2-29.3-57.4-81.9-57.4-46.8 0-74.7 19.8-74.7 52.9 0 35.9 27.8 45.9 72.7 50.3 53.8 5.3 57.9 13.1 57.9 23.7 0 18.3-14.7 26.2-49.3 26.2-43.4 0-53-10.9-56.2-32.5-.4-2.3-2.3-4-4.7-4h-21.2c-2.6 0-4.7 2.1-4.7 4.7 0 27.7 15.1 60.6 86.9 60.6 51.8.1 81.6-20.4 81.6-56.2z"],npm:[576,512,[],"f3d4","M288 288h-32v-64h32v64zm288-128v192H288v32H160v-32H0V160h576zm-416 32H32v128h64v-96h32v96h32V192zm160 0H192v160h64v-32h64V192zm224 0H352v128h64v-96h32v96h32v-96h32v96h32V192z"],ns8:[640,512,[],"f3d5","M187.1 159.9l-34.2 113.7-54.5-113.7H49L0 320h44.9L76 213.5 126.6 320h56.9L232 159.9h-44.9zm452.5-.9c-2.9-18-23.9-28.1-42.1-31.3-44.6-7.8-101.9 16.3-88.5 58.8v.1c-43.8 8.7-74.3 26.8-94.2 48.2-3-9.8-13.6-16.6-34-16.6h-87.6c-9.3 0-12.9-2.3-11.5-7.4 1.6-5.5 1.9-6.8 3.7-12.2 2.1-6.4 7.8-7.1 13.3-7.1h133.5l9.7-31.5c-139.7 0-144.5-.5-160.1 1.2-12.3 1.3-23.5 4.8-30.6 15-6.8 9.9-14.4 35.6-17.6 47.1-5.4 19.4-.6 28.6 32.8 28.6h87.3c7.8 0 8.8 2.7 7.7 6.6-1.1 4.4-2.8 10-4.5 14.6-1.6 4.2-4.7 7.4-13.8 7.4H216.3L204.7 320c139.9 0 145.3-.6 160.9-2.3 6.6-.7 13-2.1 18.5-4.9.2 3.7.5 7.3 1.2 10.8 5.4 30.5 27.4 52.3 56.8 59.5 48.6 11.9 108.7-16.8 135.1-68 18.7-36.2 14.1-76.2-3.4-105.5h.1c29.6-5.9 70.3-22 65.7-50.6zM530.7 263.7c-5.9 29.5-36.6 47.8-61.6 43.9-30.9-4.8-38.5-39.5-14.1-64.8 16.2-16.8 45.2-24 68.5-26.9 6.7 14.1 10.3 32 7.2 47.8zm21.8-83.1c-4.2-6-9.8-18.5-2.5-26.3 6.7-7.2 20.9-10.1 31.8-7.7 15.3 3.4 19.7 15.9 4.9 24.4-10.7 6.1-23.6 8.1-34.2 9.6z"],nutritionix:[400,512,[],"f3d6","M88 8.1S221.4-.1 209 112.5c0 0 19.1-74.9 103-40.6 0 0-17.7 74-88 56 0 0 14.6-54.6 66.1-56.6 0 0-39.9-10.3-82.1 48.8 0 0-19.8-94.5-93.6-99.7 0 0 75.2 19.4 77.6 107.5 0 .1-106.4 7-104-119.8zm312 315.6c0 48.5-9.7 95.3-32 132.3-42.2 30.9-105 48-168 48-62.9 0-125.8-17.1-168-48C9.7 419 0 372.2 0 323.7 0 275.3 17.7 229 40 192c42.2-30.9 97.1-48.6 160-48.6 63 0 117.8 17.6 160 48.6 22.3 37 40 83.3 40 131.7zM120 428c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zM192 428c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zM264 428c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zM336 428c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm0-66.2c0-15.5-12.5-28-28-28s-28 12.5-28 28 12.5 28 28 28 28-12.5 28-28zm24-39.6c-4.8-22.3-7.4-36.9-16-56-38.8-19.9-90.5-32-144-32S94.8 180.1 56 200c-8.8 19.5-11.2 33.9-16 56 42.2-7.9 98.7-14.8 160-14.8s117.8 6.9 160 14.8z"],odnoklassniki:[320,512,[],"f263","M275.1 334c-27.4 17.4-65.1 24.3-90 26.9l20.9 20.6 76.3 76.3c27.9 28.6-17.5 73.3-45.7 45.7-19.1-19.4-47.1-47.4-76.3-76.6L84 503.4c-28.2 27.5-73.6-17.6-45.4-45.7 19.4-19.4 47.1-47.4 76.3-76.3l20.6-20.6c-24.6-2.6-62.9-9.1-90.6-26.9-32.6-21-46.9-33.3-34.3-59 7.4-14.6 27.7-26.9 54.6-5.7 0 0 36.3 28.9 94.9 28.9s94.9-28.9 94.9-28.9c26.9-21.1 47.1-8.9 54.6 5.7 12.4 25.7-1.9 38-34.5 59.1zM30.3 129.7C30.3 58 88.6 0 160 0s129.7 58 129.7 129.7c0 71.4-58.3 129.4-129.7 129.4s-129.7-58-129.7-129.4zm66 0c0 35.1 28.6 63.7 63.7 63.7s63.7-28.6 63.7-63.7c0-35.4-28.6-64-63.7-64s-63.7 28.6-63.7 64z"],"odnoklassniki-square":[448,512,[],"f264","M184.2 177.1c0-22.1 17.9-40 39.8-40s39.8 17.9 39.8 40c0 22-17.9 39.8-39.8 39.8s-39.8-17.9-39.8-39.8zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-305.1 97.1c0 44.6 36.4 80.9 81.1 80.9s81.1-36.2 81.1-80.9c0-44.8-36.4-81.1-81.1-81.1s-81.1 36.2-81.1 81.1zm174.5 90.7c-4.6-9.1-17.3-16.8-34.1-3.6 0 0-22.7 18-59.3 18s-59.3-18-59.3-18c-16.8-13.2-29.5-5.5-34.1 3.6-7.9 16.1 1.1 23.7 21.4 37 17.3 11.1 41.2 15.2 56.6 16.8l-12.9 12.9c-18.2 18-35.5 35.5-47.7 47.7-17.6 17.6 10.7 45.8 28.4 28.6l47.7-47.9c18.2 18.2 35.7 35.7 47.7 47.9 17.6 17.2 46-10.7 28.6-28.6l-47.7-47.7-13-12.9c15.5-1.6 39.1-5.9 56.2-16.8 20.4-13.3 29.3-21 21.5-37z"],opencart:[640,512,[],"f23d","M423.3 440.7c0 25.3-20.3 45.6-45.6 45.6s-45.8-20.3-45.8-45.6 20.6-45.8 45.8-45.8c25.4 0 45.6 20.5 45.6 45.8zm-253.9-45.8c-25.3 0-45.6 20.6-45.6 45.8s20.3 45.6 45.6 45.6 45.8-20.3 45.8-45.6-20.5-45.8-45.8-45.8zm291.7-270C158.9 124.9 81.9 112.1 0 25.7c34.4 51.7 53.3 148.9 373.1 144.2 333.3-5 130 86.1 70.8 188.9 186.7-166.7 319.4-233.9 17.2-233.9z"],openid:[448,512,[],"f19b","M271.5 432l-68 32C88.5 453.7 0 392.5 0 318.2c0-71.5 82.5-131 191.7-144.3v43c-71.5 12.5-124 53-124 101.3 0 51 58.5 93.3 135.7 103v-340l68-33.2v384zM448 291l-131.3-28.5 36.8-20.7c-19.5-11.5-43.5-20-70-24.8v-43c46.2 5.5 87.7 19.5 120.3 39.3l35-19.8L448 291z"],opera:[496,512,[],"f26a","M313.9 32.7c-170.2 0-252.6 223.8-147.5 355.1 36.5 45.4 88.6 75.6 147.5 75.6 36.3 0 70.3-11.1 99.4-30.4-43.8 39.2-101.9 63-165.3 63-3.9 0-8 0-11.9-.3C104.6 489.6 0 381.1 0 248 0 111 111 0 248 0h.8c63.1.3 120.7 24.1 164.4 63.1-29-19.4-63.1-30.4-99.3-30.4zm101.8 397.7c-40.9 24.7-90.7 23.6-132-5.8 56.2-20.5 97.7-91.6 97.7-176.6 0-84.7-41.2-155.8-97.4-176.6 41.8-29.2 91.2-30.3 132.9-5 105.9 98.7 105.5 265.7-1.2 364z"],"optin-monster":[576,512,[],"f23c","M550.671 450.303c0 11.62-15.673 19.457-32.158 14.863-12.16-3.243-31.346-17.565-36.211-27.294-5.674-11.62 4.054-32.698 18.916-30.806 15.674 1.621 49.453 25.401 49.453 43.237zM372.86 75.223c-3.783-72.151-100.796-79.718-125.928-23.51 44.588-24.321 90.257-15.673 125.928 23.51zM74.795 407.066c-15.673 1.621-49.452 25.401-49.452 43.237 0 11.62 15.673 19.457 32.157 14.863 12.16-3.243 31.076-17.565 35.94-27.294 5.946-11.62-3.782-32.698-18.645-30.806zm497.765 14.322c1.081 3.513 1.892 7.026 1.892 10.809.81 31.616-44.317 64.045-73.503 65.125-17.295.81-34.59-8.377-42.696-23.51-113.497 4.053-226.994 4.864-340.22 0-8.377 15.133-25.672 24.05-42.967 23.51-28.915-1.081-74.043-33.509-73.503-65.125.27-3.783.811-7.296 1.892-10.809-5.566-9.463-4.845-15.282 5.405-11.62 3.243-5.134 7.026-9.458 11.08-13.782-2.57-10.917 1.27-14.094 11.079-9.188 4.594-3.243 9.998-6.485 15.944-9.188 0-15.757 11.839-11.131 17.295-5.675 12.467-1.78 20.129.709 26.753 5.675v-19.726c-12.987 0-40.641-11.375-45.94-36.212-4.974-20.725 2.607-38.075 25.132-47.56.81-5.945 8.107-14.052 14.862-15.944 7.567-1.892 12.431 4.594 14.052 10.269 7.425 0 17.757 1.465 21.078 8.107 5.405-.541 11.079-1.352 16.484-1.892-2.432-1.892-5.134-3.513-8.107-4.594-5.134-8.917-13.782-11.079-24.591-11.62 0-.81 0-1.621.27-2.702-19.727-.541-44.048-5.675-54.857-17.835-21.321-23.638-15.935-83.577 12.16-103.498 8.377-5.675 21.618-.811 22.699 9.728 2.425 20.598.399 26.833 26.212 25.942 8.107-7.836 16.755-14.592 26.483-19.997-14.862-1.352-28.914 1.621-43.778 3.783 12.752-12.48 23.953-25.442 56.748-42.427 23.511-11.89 49.993-20.808 76.205-23.239-18.646-7.837-39.993-11.891-59.721-16.484 76.475-16.214 174.569-22.159 244.289 37.562 18.105 15.403 32.427 36.211 42.696 59.992 39.799 4.853 36.47-5.581 38.643-25.132 1.081-10.269 14.322-15.403 22.699-9.458 14.862 10.539 22.159 30.806 24.59 48.101 2.162 17.835.27 41.345-12.43 55.127-10.809 12.16-34.32 17.565-53.776 18.105v2.703c-11.08.27-20.268 2.432-25.673 11.62-2.972 1.081-5.674 2.703-8.377 4.594 5.675.54 11.35 1.351 16.755 1.891 1.869-5.619 12.535-8.377 21.077-8.377 1.621-5.405 6.756-11.89 14.052-10.269s14.052 9.998 14.863 15.944c10.809 4.324 22.159 12.16 25.131 25.672 1.892 8.107 1.621 15.133.27 21.888-5.726 25.262-33.361 36.212-45.939 36.212 0 6.756 0 13.241-.27 19.726 8.01-6.006 16.367-7.158 26.752-5.675 5.919-5.919 17.565-9.41 17.565 5.675 5.675 2.703 11.349 5.945 15.944 9.188 10.1-5.051 13.669-.539 10.809 9.188 4.053 4.323 8.107 8.917 11.079 13.782 10.136-3.62 11.021 2.078 5.409 11.62zm-73.773-254.016c17.295 6.756 26.212 22.159 30.265 35.67 1.081-10.539-2.702-39.453-13.782-51.073-7.296-7.296-14.052-5.134-14.052.81.001 6.216-1.35 11.62-2.431 14.593zm-18.646 12.43c12.971 15.673 17.024 41.615 12.7 62.963 10.809-2.162 20.537-6.215 26.212-12.16 1.892-2.162 3.783-4.864 4.864-7.566-1.081-21.348-10.269-42.697-29.725-48.912-3.242 3.243-9.187 4.864-14.051 5.675zm-21.889.811c7.567 20.537 12.431 42.696 14.322 64.585 3.513 0 7.567-.27 11.62-.811 5.945-24.321-.27-51.614-14.052-63.504-3.783 0-8.107 0-11.89-.27zM77.768 167.372c-1.081-2.973-2.432-8.377-2.432-14.593 0-5.945-7.026-8.107-14.052-.81-11.35 11.62-14.863 40.534-13.782 51.073 4.053-13.512 12.971-28.915 30.266-35.67zm5.675 75.394c-4.324-21.348-.27-47.291 12.701-62.963-4.865-.811-10.809-2.432-14.052-5.675-19.457 6.215-28.375 27.563-29.726 48.912 1.351 2.702 2.972 5.404 4.864 7.566 5.675 6.215 15.403 9.998 26.213 12.16zm41.345-61.073c-5.134 1.081-9.998 2.973-14.862 4.865l-12.16 5.134v-.27c-7.296 14.052-9.999 34.319-5.405 52.965 4.594.541 8.647.811 12.7.811 2.432-22.159 9.188-43.778 19.727-63.505zm88.095-23.239c0 42.155 34.319 76.205 76.205 76.205s76.205-34.05 76.205-76.205c0-41.886-34.319-75.935-76.205-75.935s-76.205 34.049-76.205 75.935zm152.41 97.283c9.969 50.608 3.299 64.692 16.484 58.099 15.944-8.107 22.699-39.183 22.97-57.019-12.971-.81-26.213-.81-39.454-1.08zm-71.611-.541v-.27c-.27 5.134.27 38.103 4.324 41.075 11.079 5.405 39.453 4.594 51.073 1.081 5.405-1.621 2.432-37.022 1.621-41.886-18.916-.27-38.102-.27-57.018 0zm-14.053 0v-.27c-19.456.27-38.642.27-57.829.811-1.892 9.187-4.594 48.911 1.892 51.614 12.971 5.675 41.616 5.134 54.586 1.621 4.595-2.432 2.433-45.399 1.351-53.776zm-85.662 57.56c5.405 2.432 8.647 2.432 9.728-4.324 1.892-8.647 2.432-36.752 4.865-52.155-12.16.27-24.591.811-36.752 1.621-5.405 19.727.27 45.129 22.159 54.858zm-65.666-11.08c43.778 47.02 92.689 85.663 155.923 106.47 67.558-19.186 115.659-59.991 163.219-107.011-11.095-4.315-7.715-10.363-7.296-11.62-8.918-.81-17.835-1.892-26.483-2.702-9.458 32.968-35.94 52.965-46.75 31.616-2.702-5.134-3.513-11.62-4.594-16.754-3.783 8.377-13.242 8.107-24.591 8.918-13.241 1.081-31.617 1.351-44.048-2.972-2.972 12.971-11.079 12.971-26.752 14.322-14.052 1.352-48.642 4.054-54.857-10.809-1.081 28.644-35.13 9.998-45.129-7.026-3.243-5.675-5.405-11.35-7.026-17.565-7.837.81-15.673 1.621-23.511 2.702 2.443 3.663 1.549 9.052-8.105 12.431zM115.6 453.545c-5.674-23.239-18.646-49.722-33.508-54.046-22.429-6.756-68.909 23.51-66.207 54.586 12.701 19.457 39.994 35.67 59.181 36.481 17.835.81 35.94-11.08 39.724-28.914.539-2.432.81-5.134.81-8.107zm7.296-5.944c33.509-19.457 69.179-35.671 105.931-47.02-38.643-20.537-68.098-47.831-97.283-77.016-2.162 1.352-5.134 2.432-7.836 3.513-1.637 4.91 8.718 5.33 5.405 12.431-2.162 4.054-8.648 7.567-15.133 9.188-2.161 2.702-5.134 4.864-7.836 6.485h-.27c-.27 13.511-.27 27.024.27 40.535 8.939 15.964 15.426 33.314 16.752 51.884zm320.764 12.7c-36.752-21.348-74.044-41.345-115.659-52.965-13.782 6.215-27.833 11.349-42.155 15.403-2.162.811-2.162.811-4.324 0-11.89-3.783-23.239-8.107-34.859-13.241-40.265 11.62-77.286 29.185-112.416 50.803h-.27v.27c.27 0 .27 0 .27-.27 103.227 4.054 206.455 3.513 309.413 0zm27.023-64.045l-.27.27c.541-13.782.811-27.563.811-41.345-2.973-1.621-5.675-4.054-8.107-6.756-6.485-1.351-12.971-5.134-15.133-8.918-1.892-4.053 1.351-7.566 5.945-10.269-.27-.541-.541-1.621-.541-2.432-2.972-.811-5.405-1.892-7.567-3.243-31.616 29.455-65.396 56.749-103.498 76.746 38.914 11.62 75.935 28.104 111.875 47.561 1.05-14.692 7.231-35.749 16.485-51.614zm23.24 3.244c-14.593 4.323-27.834 30.806-33.509 54.046 0 23.826 21.278 37.897 40.534 37.022 19.186-.811 46.48-17.024 59.181-36.481 2.973-31.077-43.507-61.344-66.206-54.587zM290.709 134.133c.045 0 .089.003.134.003.046 0 .09-.003.136-.003h-.27zm0 96.743c28.645 0 51.884-21.618 51.884-48.371 0-36.092-40.507-58.079-72.151-44.318 9.458 2.972 16.484 11.62 16.484 21.618 0 23.257-33.291 31.955-46.48 11.35-7.297 34.067 19.368 59.721 50.263 59.721zM68.039 474.083c.54 6.486 12.16 12.701 21.618 9.458 6.756-2.703 14.593-10.539 17.295-16.214 2.973-7.026-1.081-19.997-9.728-18.375-8.917 1.621-29.725 16.754-29.185 25.131zm410.75-25.131c-8.377-1.621-12.431 11.349-9.458 18.375 2.432 5.675 10.269 13.511 17.295 16.214 9.187 3.243 21.078-2.972 21.348-9.458.811-8.377-20.267-23.51-29.185-25.131z"],osi:[495,512,[],"f41a","M0 259.2C2.3 123.4 97.4 26.8 213.8 11.1c138.8-18.6 255.6 75.8 278 201.1 21.3 118.8-44 230-151.6 274-9.3 3.8-14.4 1.7-18-7.7-17.8-46.3-35.6-92.7-53.4-139-3.1-8.1-1-13.2 7-16.8 24.2-11 39.3-29.4 43.3-55.8 6.4-42.4-24.5-78.7-64.5-82.2-39-3.4-71.8 23.7-77.5 59.7-5.2 33 11.1 63.7 41.9 77.7 9.6 4.4 11.5 8.6 7.8 18.4-17.9 46.6-35.8 93.2-53.7 139.9-2.6 6.9-8.3 9.3-15.5 6.5-52.6-20.3-101.4-61-130.8-119C1.9 318.7 1.6 280.2 0 259.2zm20.9-1.9c.4 6.6.6 14.3 1.3 22.1 6.3 71.9 49.6 143.5 131 183.1 3.2 1.5 4.4.8 5.6-2.3 14.9-39.1 29.9-78.2 45-117.3 1.3-3.3.6-4.8-2.4-6.7-31.6-19.9-47.3-48.5-45.6-86 1-21.6 9.3-40.5 23.8-56.3 30-32.7 77-39.8 115.5-17.6 31.9 18.4 49.5 53.8 45.2 90.4-3.6 30.6-19.3 53.9-45.7 69.8-2.7 1.6-3.5 2.9-2.3 6 15.2 39.2 30.2 78.4 45.2 117.7 1.2 3.1 2.4 3.8 5.6 2.3 35.5-16.6 65.2-40.3 88.1-72 34.8-48.2 49.1-101.9 42.3-161C459.8 112 354.1 14.7 218 31.5 111.9 44.5 22.7 134 20.9 257.3z"],page4:[496,512,[],"f3d7","M248 504C111 504 0 393 0 256S111 8 248 8c20.9 0 41.3 2.6 60.7 7.5L42.3 392H248v112zm0-143.6V146.8L98.6 360.4H248zm96 31.6v92.7c45.7-19.2 84.5-51.7 111.4-92.7H344zm57.4-138.2l-21.2 8.4 21.2 8.3v-16.7zm-20.3 54.5c-6.7 0-8 6.3-8 12.9v7.7h16.2v-10c0-5.9-2.3-10.6-8.2-10.6zM496 256c0 37.3-8.2 72.7-23 104.4H344V27.3C433.3 64.8 496 153.1 496 256zM360.4 143.6h68.2V96h-13.9v32.6h-13.9V99h-13.9v29.6h-12.7V96h-13.9v47.6zm68.1 185.3H402v-11c0-15.4-5.6-25.2-20.9-25.2-15.4 0-20.7 10.6-20.7 25.9v25.3h68.2v-15zm0-103l-68.2 29.7V268l68.2 29.5v-16.6l-14.4-5.7v-26.5l14.4-5.9v-16.9zm-4.8-68.5h-35.6V184H402v-12.2h11c8.6 15.8 1.3 35.3-18.6 35.3-22.5 0-28.3-25.3-15.5-37.7l-11.6-10.6c-16.2 17.5-12.2 63.9 27.1 63.9 34 0 44.7-35.9 29.3-65.3z"],pagelines:[384,512,[],"f18c","M384 312.7c-55.1 136.7-187.1 54-187.1 54-40.5 81.8-107.4 134.4-184.6 134.7-16.1 0-16.6-24.4 0-24.4 64.4-.3 120.5-42.7 157.2-110.1-41.1 15.9-118.6 27.9-161.6-82.2 109-44.9 159.1 11.2 178.3 45.5 9.9-24.4 17-50.9 21.6-79.7 0 0-139.7 21.9-149.5-98.1 119.1-47.9 152.6 76.7 152.6 76.7 1.6-16.7 3.3-52.6 3.3-53.4 0 0-106.3-73.7-38.1-165.2 124.6 43 61.4 162.4 61.4 162.4.5 1.6.5 23.8 0 33.4 0 0 45.2-89 136.4-57.5-4.2 134-141.9 106.4-141.9 106.4-4.4 27.4-11.2 53.4-20 77.5 0 0 83-91.8 172-20z"],palfed:[560,512,[],"f3d8","M376.9 194.1c0-47.4-55.2-44.2-95.4-29.8-1.3 39.4-2.5 80.7-3 119.8.7 2.8 2.6 6.2 15.1 6.2 36.7-.1 83.3-42.9 83.3-96.2zm-194.5 72.2c.2 0 6.5-2.7 11.2-2.7 26.6 0 20.7 44.1-14.4 44.1-21.5 0-37.1-18.1-37.1-43 0-42 42.9-95.6 100.7-126.5 1-12.4 3-22 10.5-28.2 11.2-9 26.6-3.5 29.5 11.1 72.2-22.2 135.2 1 135.2 72 0 77.9-79.3 152.6-140.1 138.2-.1 39.4.9 74.4 2.7 100v.2c.2 3.4.6 12.5-5.3 19.1-9.6 10.6-33.4 10-36.4-22.3-4.1-44.4.2-206.1 1.4-242.5-21.5 15-58.5 50.3-58.5 75.9.1 2.4.3 3.9.6 4.6zM0 181.3s-.1 37.4 38.4 37.4h30l22.4 217.2s0 44.3 44.7 44.3h288.9s44.7-.4 44.7-44.3l22.4-217.2h30s38.4 1.2 38.4-37.4c0 0 .1-37.4-38.4-37.4h-30.1c-7.3-25.6-30.2-74.3-119.4-74.3h-28V50.5s-2.7-18.4-21.1-18.4h-85.8s-21.1 0-21.1 18.4v19.1h-28.1s-105 4.2-120.5 74.3h-29S0 142.7 0 181.3z"],patreon:[512,512,[],"f3d9","M489.6 200.2c0 92.5-75.2 167.7-167.7 167.7-92.7 0-168.2-75.2-168.2-167.7 0-92.7 75.5-168.2 168.2-168.2 92.5 0 167.7 75.4 167.7 168.2zM22.4 480h82.1V32H22.4v448z"],paypal:[384,512,[],"f1ed","M111.4 295.9c-3.5 19.2-17.4 108.7-21.5 134-.3 1.8-1 2.5-3 2.5H12.3c-7.6 0-13.1-6.6-12.1-13.9L58.8 46.6c1.5-9.6 10.1-16.9 20-16.9 152.3 0 165.1-3.7 204 11.4 60.1 23.3 65.6 79.5 44 140.3-21.5 62.6-72.5 89.5-140.1 90.3-43.4.7-69.5-7-75.3 24.2zM357.1 152c-1.8-1.3-2.5-1.8-3 1.3-2 11.4-5.1 22.5-8.8 33.6-39.9 113.8-150.5 103.9-204.5 103.9-6.1 0-10.1 3.3-10.9 9.4-22.6 140.4-27.1 169.7-27.1 169.7-1 7.1 3.5 12.9 10.6 12.9h63.5c8.6 0 15.7-6.3 17.4-14.9.7-5.4-1.1 6.1 14.4-91.3 4.6-22 14.3-19.7 29.3-19.7 71 0 126.4-28.8 142.9-112.3 6.5-34.8 4.6-71.4-23.8-92.6z"],periscope:[448,512,[],"f3da","M370 63.6C331.4 22.6 280.5 0 226.6 0 111.9 0 18.5 96.2 18.5 214.4c0 75.1 57.8 159.8 82.7 192.7C137.8 455.5 192.6 512 226.6 512c41.6 0 112.9-94.2 120.9-105 24.6-33.1 82-118.3 82-192.6 0-56.5-21.1-110.1-59.5-150.8zM226.6 493.9c-42.5 0-190-167.3-190-279.4 0-107.4 83.9-196.3 190-196.3 100.8 0 184.7 89 184.7 196.3.1 112.1-147.4 279.4-184.7 279.4zM338 206.8c0 59.1-51.1 109.7-110.8 109.7-100.6 0-150.7-108.2-92.9-181.8v.4c0 24.5 20.1 44.4 44.8 44.4 24.7 0 44.8-19.9 44.8-44.4 0-18.2-11.1-33.8-26.9-40.7 76.6-19.2 141 39.3 141 112.4z"],phabricator:[496,512,[],"f3db","M323 262.1l-.1-13s21.7-19.8 21.1-21.2l-9.5-20c-.6-1.4-29.5-.5-29.5-.5l-9.4-9.3s.2-28.5-1.2-29.1l-20.1-9.2c-1.4-.6-20.7 21-20.7 21l-13.1-.2s-20.5-21.4-21.9-20.8l-20 8.3c-1.4.5.2 28.9.2 28.9l-9.1 9.1s-29.2-.9-29.7.4l-8.1 19.8c-.6 1.4 21 21 21 21l.1 12.9s-21.7 19.8-21.1 21.2l9.5 20c.6 1.4 29.5.5 29.5.5l9.4 9.3s-.2 31.8 1.2 32.3l20.1 8.3c1.4.6 20.7-23.5 20.7-23.5l13.1.2s20.5 23.8 21.8 23.3l20-7.5c1.4-.6-.2-32.1-.2-32.1l9.1-9.1s29.2.9 29.7-.5l8.1-19.8c.7-1.1-20.9-20.7-20.9-20.7zm-44.9-8.7c.7 17.1-12.8 31.6-30.1 32.4-17.3.8-32.1-12.5-32.8-29.6-.7-17.1 12.8-31.6 30.1-32.3 17.3-.8 32.1 12.5 32.8 29.5zm201.2-37.9l-97-97-.1.1c-75.1-73.3-195.4-72.8-269.8 1.6-50.9 51-27.8 27.9-95.7 95.3-22.3 22.3-22.3 58.7 0 81 69.9 69.4 46.4 46 97.4 97l.1-.1c75.1 73.3 195.4 72.9 269.8-1.6 51-50.9 27.9-27.9 95.3-95.3 22.3-22.3 22.3-58.7 0-81zM140.4 363.8c-59.6-59.5-59.6-156 0-215.5 59.5-59.6 156-59.5 215.6 0 59.5 59.5 59.6 156 0 215.6-59.6 59.5-156 59.4-215.6-.1z"],"phoenix-framework":[640,512,[],"f3dc","M213.2 339.2c3.8-.1 22.9-1.4 25.6-2.2-2.4-2.7-43.6-1-68.1-49.7-4.3-8.7-7.5-17.6-6.4-27.6 2.9-25.5 32.9-30 52.1-18.5 36 21.6 63.4 91.5 113.8 97.6 37.1 4.5 84.7-17 108.3-45.4-.6-.1-.8-.2-1-.1-.4.1-.8.2-1.1.3-33.4 12.1-94.4 9.7-134.8-14.8-37.7-22.8-53.2-58.8-51.9-74.7 1.8-21.4 22.9-23.2 36-19.6 14.4 4 24.4 17.6 39 27.4 15.6 10.4 33 13.7 51.3 10.3 14.9-2.7 34.4-12.3 36.5-14.5-1.1-.1-1.8-.1-2.5-.2-6.2-.6-12.4-.8-18.6-1.7C280.1 189.3 262.5 42 138.7 32.5c-44.4-3.4-99.6 8.1-136.5 35-.8.6-1.5 1.2-2.2 1.8.1.2.1.3.2.5.8-.1 1.6-.1 2.4-.2 6.3-1 12.5-.8 18.8.3 23.9 4.3 47.8 23.1 56 76.6 5.3 34.3-.7 50.9 8 86.2 18.9 77.2 91 107.8 127.8 106.5zM75.4 59.5c-.9-1-.9-1.2-1.3-2 12.1-2.6 24.3-4.1 36.7-4.8-1.1 14.7-22.3 21.4-35.4 6.8zm197.2 350.9c-42.9 1.2-92.1-26.8-123.7-61.5-4.6-5-16.8-20.3-18.6-23.4l.4-.4c6.6 4.1 25.7 18.7 54.9 27.1 24.2 7 48.1 6.3 71.7-3.3 22.7-9.3 41-.5 43.1 2.9-18.5 3.8-20.1 4.4-24.1 7.9-5.1 4.4-4.6 11.7 7 17.2 26.2 12.4 63-2.8 97.3 25.4 2.4 2 8.1 7.8 10.1 10.7-.1.2-.3.3-.4.5-4.8-1.5-16.5-7.5-40.2-9.3-24.7-2-46.3 5.4-77.5 6.2zm175-252.2c16.4-5.2 41.4-13.4 66.6-3.3 16.1 6.5 26.2 18.7 32.1 34.7 3.5 9.4 5.1 19.7 5.1 28.8-.2 0-.4 0-.6.1-.2-.4-.4-.9-.5-1.3-5-22-29.9-43.8-67.7-29.9-50.2 18.6-130.5 9.7-177.2-48-.7-.9-2.4-1.7-1.3-3.2.1-.2 2.1.6 3 1.3 18.1 13.5 38.3 21.9 60.4 26.2 30.6 5.9 54.7 2.6 80.1-5.4zm102.8 117.6c-32.4.2-33.8 50.2-103.7 64.4-18.3 3.7-38.7 4.6-45 4.2v-.4c2.8-1.5 14.7-2.6 29.7-16.6 7.9-7.3 15.3-15.2 22.8-22.9 19.6-20.3 41.5-42.3 82-39 23.1 1.8 29.3 8.2 36.2 12.7.3.2.4.5.7.9-.5 0-.7.1-.9 0-7-2.7-14.3-3.3-21.8-3.3zm-12.3-24.2c-.1.2-.1.4-.2.6-29-4.4-48.1-7.9-68.6 4-17 9.9-31.5 20.6-62.1 24.4-27.1 3.4-45.2 2.4-66.2-8-.3-.2-.6-.4-1-.6 0-.2.1-.3.1-.5 24.9 3.8 36.5 5.2 55.6-5.9 22.4-12.9 40.2-26.7 71.4-31 29.6-3.9 51.3 2.7 71 17zM269 91.9c-.6-.6-1.1-1.2-2.1-2.3 7.6 0 29.7-1.2 53.4 8.4 19.7 8 32.3 21.1 50.3 33 11.1 7.3 23.5 9.3 36.5 8.1 4.3-.4 8.5-1.2 12.8-1.7.4-.1.9 0 1.5.3-.6.4-1.2.9-1.8 1.2-8.1 4-16.7 6.3-25.6 7.2-26.1 2.6-50.4-3.7-73.5-15.4-19.4-10-36.5-23-51.5-38.8zm371.8 238.7c-3.5 3.1-22.7 11.6-42.8 5.3-12.3-3.9-19.5-14.9-31.6-24.1-10-7.6-20.9-7.9-28.2-8.4.6-.8.9-1.2 1.2-1.4 14.8-9.2 30.5-12.2 47.4-6.5 12.5 4.2 19.3 13.5 30.4 24.2 10.8 10.4 21 9.9 23.2 10.5.1-.1.2.1.4.4zM428 467.8c2.2 1.2 1.6 1.5 1.5 2-18.5-1.4-33.9-7.6-46.8-22.2-21.8-24.7-41.8-27.9-48.7-29.7.5-.2.8-.4 1.1-.4 13.1.1 26.2.7 39 3.9 25.3 6.4 35 25.4 41.6 35.4 3.2 4.7 7.4 8.3 12.3 11z"],"pied-piper":[640,512,[],"f2ae","M640 24.9c-80.8 53.6-89.4 92.5-96.4 104.4-6.7 12.2-11.7 60.3-23.3 83.6-11.7 23.6-54.2 42.2-66.1 50-11.7 7.8-28.3 38.1-41.9 64.2-108.1-4.4-167.4 38.8-259.2 93.6 29.4-9.7 43.3-16.7 43.3-16.7 94.2-36 139.3-68.3 281.1-49.2 1.1 0 1.9.6 2.8.8 3.9 2.2 5.3 6.9 3.1 10.8l-53.9 95.8c-2.5 4.7-7.8 7.2-13.1 6.1-126.8-23.8-226.9 17.3-318.9 18.6C24.1 488 0 453.4 0 451.8c0-1.1.6-1.7 1.7-1.7 0 0 38.3 0 103.1-15.3C178.4 294.5 244 245.4 315.4 245.4c0 0 71.7 0 90.6 61.9 22.8-39.7 28.3-49.2 28.3-49.2 5.3-9.4 35-77.2 86.4-141.4 51.5-64 90.4-79.9 119.3-91.8z"],"pied-piper-alt":[576,512,[],"f1a8","M242 187c6.3-11.8 13.2-17 25.9-21.8 27.3-10.3 40.2-30.5 58.9-51.1 11.9 8.4 12 24.6 31.6 23v21.8l6.3.3c37.4-14.4 74.7-30.2 106.6-54.6 48.3-36.8 52.9-50 81.3-100l2-2.6c-.6 14.1-6.3 27.3-12.4 39.9-30.5 63.8-78.7 100.3-146.8 116.7-12.4 2.9-26.4 3.2-37.6 8.9 1.4 9.8 13.2 18.1 13.2 23 0 3.4-5.5 7.2-7.5 8.6-11.2-12.9-16.1-19.3-22.7-22.1-7.6-3.5-63.9-6.4-98.8 10zm137.9 256.9c-19 0-64.1 9.5-79.9 19.8l6.9 45.1c35.7 6.1 70.1 3.6 106-9.8-4.8-10-23.5-55.1-33-55.1zM244 246c-3.2-2-6.3-2.9-10.1-2.9-6.6 0-12.6 3.2-19.3 3.7l1.7 4.9L244 246zm-12.6 31.8l24.1 61.2 21-13.8-31.3-50.9-13.8 3.5zM555.5 0l-.6 1.1-.3.9.6-.6.3-1.4zm-59.2 382.1c-33.9-56.9-75.3-118.4-150-115.5l-.3-6c-1.1-13.5 32.8 3.2 35.1-31l-14.4 7.2c-19.8-45.7-8.6-54.3-65.5-54.3-14.7 0-26.7 1.7-41.4 4.6 2.9 18.6 2.2 36.7-10.9 50.3l19.5 5.5c-1.7 3.2-2.9 6.3-2.9 9.8 0 21 42.8 2.9 42.8 33.6 0 18.4-36.8 60.1-54.9 60.1-8 0-53.7-50-53.4-60.1l.3-4.6 52.3-11.5c13-2.6 12.3-22.7-2.9-22.7-3.7 0-43.1 9.2-49.4 10.6-2-5.2-7.5-14.1-13.8-14.1-3.2 0-6.3 3.2-9.5 4-9.2 2.6-31 2.9-21.5 20.1L15.9 298.5c-5.5 1.1-8.9 6.3-8.9 11.8 0 6 5.5 10.9 11.5 10.9 8 0 131.3-28.4 147.4-32.2 2.6 3.2 4.6 6.3 7.8 8.6 20.1 14.4 59.8 85.9 76.4 85.9 24.1 0 58-22.4 71.3-41.9 3.2-4.3 6.9-7.5 12.4-6.9.6 13.8-31.6 34.2-33 43.7-1.4 10.2-1 35.2-.3 41.1 26.7 8.1 52-3.6 77.9-2.9 4.3-21 10.6-41.9 9.8-63.5l-.3-9.5c-1.4-34.2-10.9-38.5-34.8-58.6-1.1-1.1-2.6-2.6-3.7-4 2.2-1.4 1.1-1 4.6-1.7 88.5 0 56.3 183.6 111.5 229.9 33.1-15 72.5-27.9 103.5-47.2-29-25.6-52.6-45.7-72.7-79.9zm-196.2 46v27.3l11.8-3.4-2.9-23.8h-8.9zm76.1 2.9c0-1.4-.6-3.2-.9-4.6-26.8 0-36.9 3.8-59.5 6.3l2 12.4c9-1.5 58.4-6.6 58.4-14.1z"],"pied-piper-pp":[448,512,[],"f1a7","M205.3 174.6c0 21.1-14.2 38.1-31.7 38.1-7.1 0-12.8-1.2-17.2-3.7v-68c4.4-2.7 10.1-4.2 17.2-4.2 17.5 0 31.7 16.9 31.7 37.8zm52.6 67c-7.1 0-12.8 1.5-17.2 4.2v68c4.4 2.5 10.1 3.7 17.2 3.7 17.4 0 31.7-16.9 31.7-37.8 0-21.1-14.3-38.1-31.7-38.1zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zM185 255.1c41 0 74.2-35.6 74.2-79.6 0-44-33.2-79.6-74.2-79.6-12 0-24.1 3.2-34.6 8.8h-45.7V311l51.8-10.1v-50.6c8.6 3.1 18.1 4.8 28.5 4.8zm158.4 25.3c0-44-33.2-79.6-73.9-79.6-3.2 0-6.4.2-9.6.7-3.7 12.5-10.1 23.8-19.2 33.4-13.8 15-32.2 23.8-51.8 24.8V416l51.8-10.1v-50.6c8.6 3.2 18.2 4.7 28.7 4.7 40.8 0 74-35.6 74-79.6z"],pinterest:[496,512,[],"f0d2","M496 256c0 137-111 248-248 248-25.6 0-50.2-3.9-73.4-11.1 10.1-16.5 25.2-43.5 30.8-65 3-11.6 15.4-59 15.4-59 8.1 15.4 31.7 28.5 56.8 28.5 74.8 0 128.7-68.8 128.7-154.3 0-81.9-66.9-143.2-152.9-143.2-107 0-163.9 71.8-163.9 150.1 0 36.4 19.4 81.7 50.3 96.1 4.7 2.2 7.2 1.2 8.3-3.3.8-3.4 5-20.3 6.9-28.1.6-2.5.3-4.7-1.7-7.1-10.1-12.5-18.3-35.3-18.3-56.6 0-54.7 41.4-107.6 112-107.6 60.9 0 103.6 41.5 103.6 100.9 0 67.1-33.9 113.6-78 113.6-24.3 0-42.6-20.1-36.7-44.8 7-29.5 20.5-61.3 20.5-82.6 0-19-10.2-34.9-31.4-34.9-24.9 0-44.9 25.7-44.9 60.2 0 22 7.4 36.8 7.4 36.8s-24.5 103.8-29 123.2c-5 21.4-3 51.6-.9 71.2C65.4 450.9 0 361.1 0 256 0 119 111 8 248 8s248 111 248 248z"],"pinterest-p":[384,512,[],"f231","M204 6.5C101.4 6.5 0 74.9 0 185.6 0 256 39.6 296 63.6 296c9.9 0 15.6-27.6 15.6-35.4 0-9.3-23.7-29.1-23.7-67.8 0-80.4 61.2-137.4 140.4-137.4 68.1 0 118.5 38.7 118.5 109.8 0 53.1-21.3 152.7-90.3 152.7-24.9 0-46.2-18-46.2-43.8 0-37.8 26.4-74.4 26.4-113.4 0-66.2-93.9-54.2-93.9 25.8 0 16.8 2.1 35.4 9.6 50.7-13.8 59.4-42 147.9-42 209.1 0 18.9 2.7 37.5 4.5 56.4 3.4 3.8 1.7 3.4 6.9 1.5 50.4-69 48.6-82.5 71.4-172.8 12.3 23.4 44.1 36 69.3 36 106.2 0 153.9-103.5 153.9-196.8C384 71.3 298.2 6.5 204 6.5z"],"pinterest-square":[448,512,[],"f0d3","M448 80v352c0 26.5-21.5 48-48 48H154.4c9.8-16.4 22.4-40 27.4-59.3 3-11.5 15.3-58.4 15.3-58.4 8 15.3 31.4 28.2 56.3 28.2 74.1 0 127.4-68.1 127.4-152.7 0-81.1-66.2-141.8-151.4-141.8-106 0-162.2 71.1-162.2 148.6 0 36 19.2 80.8 49.8 95.1 4.7 2.2 7.1 1.2 8.2-3.3.8-3.4 5-20.1 6.8-27.8.6-2.5.3-4.6-1.7-7-10.1-12.3-18.3-34.9-18.3-56 0-54.2 41-106.6 110.9-106.6 60.3 0 102.6 41.1 102.6 99.9 0 66.4-33.5 112.4-77.2 112.4-24.1 0-42.1-19.9-36.4-44.4 6.9-29.2 20.3-60.7 20.3-81.8 0-53-75.5-45.7-75.5 25 0 21.7 7.3 36.5 7.3 36.5-31.4 132.8-36.1 134.5-29.6 192.6l2.2.8H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48z"],playstation:[576,512,[],"f3df","M570.9 372.3c-11.3 14.2-38.8 24.3-38.8 24.3L327 470.2v-54.3l150.9-53.8c17.1-6.1 19.8-14.8 5.8-19.4-13.9-4.6-39.1-3.3-56.2 2.9L327 381.1v-56.4c23.2-7.8 47.1-13.6 75.7-16.8 40.9-4.5 90.9.6 130.2 15.5 44.2 14 49.2 34.7 38 48.9zm-224.4-92.5v-139c0-16.3-3-31.3-18.3-35.6-11.7-3.8-19 7.1-19 23.4v347.9l-93.8-29.8V32c39.9 7.4 98 24.9 129.2 35.4C424.1 94.7 451 128.7 451 205.2c0 74.5-46 102.8-104.5 74.6zM43.2 410.2c-45.4-12.8-53-39.5-32.3-54.8 19.1-14.2 51.7-24.9 51.7-24.9l134.5-47.8v54.5l-96.8 34.6c-17.1 6.1-19.7 14.8-5.8 19.4 13.9 4.6 39.1 3.3 56.2-2.9l46.4-16.9v48.8c-51.6 9.3-101.4 7.3-153.9-10z"],"product-hunt":[512,512,[],"f288","M326.3 218.8c0 20.5-16.7 37.2-37.2 37.2h-70.3v-74.4h70.3c20.5 0 37.2 16.7 37.2 37.2zM504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-128.1-37.2c0-47.9-38.9-86.8-86.8-86.8H169.2v248h49.6v-74.4h70.3c47.9 0 86.8-38.9 86.8-86.8z"],pushed:[432,512,[],"f3e1","M407 111.9l-98.5-9 14-33.4c10.4-23.5-10.8-40.4-28.7-37L22.5 76.9c-15.1 2.7-26 18.3-21.4 36.6l105.1 348.3c6.5 21.3 36.7 24.2 47.7 7l35.3-80.8 235.2-231.3c16.4-16.8 4.3-42.9-17.4-44.8zM297.6 53.6c5.1-.7 7.5 2.5 5.2 7.4L286 100.9 108.6 84.6l189-31zM22.7 107.9c-3.1-5.1 1-10 6.1-9.1l248.7 22.7-96.9 230.7L22.7 107.9zM136 456.4c-2.6 4-7.9 3.1-9.4-1.2L43.5 179.7l127.7 197.6c-7 15-35.2 79.1-35.2 79.1zm272.8-314.5L210.1 337.3l89.7-213.7 106.4 9.7c4 1.1 5.7 5.3 2.6 8.6z"],python:[448,512,[],"f3e2","M167.8 36.4c-45.2 8-53.4 24.7-53.4 55.6v40.7h106.9v13.6h-147c-31.1 0-58.3 18.7-66.8 54.2-9.8 40.7-10.2 66.1 0 108.6 7.6 31.6 25.7 54.2 56.8 54.2H101v-48.8c0-35.3 30.5-66.4 66.8-66.4h106.8c29.7 0 53.4-24.5 53.4-54.3V91.9c0-29-24.4-50.7-53.4-55.6-35.8-5.9-74.7-5.6-106.8.1zm-6.7 28.4c11 0 20.1 9.2 20.1 20.4s-9 20.3-20.1 20.3c-11.1 0-20.1-9.1-20.1-20.3.1-11.3 9-20.4 20.1-20.4zm185.2 81.4v47.5c0 36.8-31.2 67.8-66.8 67.8H172.7c-29.2 0-53.4 25-53.4 54.3v101.8c0 29 25.2 46 53.4 54.3 33.8 9.9 66.3 11.7 106.8 0 26.9-7.8 53.4-23.5 53.4-54.3v-40.7H226.2v-13.6h160.2c31.1 0 42.6-21.7 53.4-54.2 11.2-33.5 10.7-65.7 0-108.6-7.7-30.9-22.3-54.2-53.4-54.2h-40.1zM286.2 404c11.1 0 20.1 9.1 20.1 20.3 0 11.3-9 20.4-20.1 20.4-11 0-20.1-9.2-20.1-20.4.1-11.3 9.1-20.3 20.1-20.3z"],qq:[448,512,[],"f1d6","M433.754 420.445c-11.526 1.393-44.86-52.741-44.86-52.741 0 31.345-16.136 72.247-51.051 101.786 16.842 5.192 54.843 19.167 45.803 34.421-7.316 12.343-125.51 7.881-159.632 4.037-34.122 3.844-152.316 8.306-159.632-4.037-9.045-15.25 28.918-29.214 45.783-34.415-34.92-29.539-51.059-70.445-51.059-101.792 0 0-33.334 54.134-44.859 52.741-5.37-.65-12.424-29.644 9.347-99.704 10.261-33.024 21.995-60.478 40.144-105.779C60.683 98.063 108.982.006 224 0c113.737.006 163.156 96.133 160.264 214.963 18.118 45.223 29.912 72.85 40.144 105.778 21.768 70.06 14.716 99.053 9.346 99.704z"],quora:[448,512,[],"f2c4","M440.5 386.7h-29.3c-1.5 13.5-10.5 30.8-33 30.8-20.5 0-35.3-14.2-49.5-35.8 44.2-34.2 74.7-87.5 74.7-153C403.5 111.2 306.8 32 205 32 105.3 32 7.3 111.7 7.3 228.7c0 134.1 131.3 221.6 249 189C276 451.3 302 480 351.5 480c81.8 0 90.8-75.3 89-93.3zM297 329.2C277.5 300 253.3 277 205.5 277c-30.5 0-54.3 10-69 22.8l12.2 24.3c6.2-3 13-4 19.8-4 35.5 0 53.7 30.8 69.2 61.3-10 3-20.7 4.2-32.7 4.2-75 0-107.5-53-107.5-156.7C97.5 124.5 130 71 205 71c76.2 0 108.7 53.5 108.7 157.7.1 41.8-5.4 75.6-16.7 100.5z"],ravelry:[512,512,[],"f2d9","M407.4 61.5C331.6 22.1 257.8 31 182.9 66c-11.3 5.2-15.5 10.6-19.9 19-10.3 19.2-16.2 37.4-19.9 52.7-21.2 25.6-36.4 56.1-43.3 89.9-10.6 18-20.9 41.4-23.1 71.4 0 0-.7 7.6-.5 7.9-35.3-4.6-76.2-27-76.2-27 9.1 14.5 61.3 32.3 76.3 37.9 0 0 1.7 98 64.5 131.2-11.3-17.2-13.3-20.2-13.3-20.2S94.8 369 100.4 324.7c.7 0 1.5.2 2.2.2 23.9 87.4 103.2 151.4 196.9 151.4 6.2 0 12.1-.2 18-.7 14 1.5 27.6.5 40.1-3.9 6.9-2.2 13.8-6.4 20.2-10.8 70.2-39.1 100.9-82 123.1-147.7 5.4-16 8.1-35.5 9.8-52.2 8.7-82.3-30.6-161.6-103.3-199.5zM138.8 163.2s-1.2 12.3-.7 19.7c-3.4 2.5-10.1 8.1-18.2 16.7 5.2-12.8 11.3-25.1 18.9-36.4zm-31.2 121.9c4.4-17.2 13.3-39.1 29.8-55.1 0 0 1.7 48 15.8 90.1l-41.4-6.9c-2.2-9.2-3.5-18.5-4.2-28.1zm7.9 42.8c14.8 3.2 34 7.6 43.1 9.1 27.3 76.8 108.3 124.3 108.3 124.3 1 .5 1.7.7 2.7 1-73.1-11.6-132.7-64.7-154.1-134.4zM386 444.1c-14.5 4.7-36.2 8.4-64.7 3.7 0 0-91.1-23.1-127.5-107.8 38.2.7 52.4-.2 78-3.9 39.4-5.7 79-16.2 115-33 11.8-5.4 11.1-19.4 9.6-29.8-2-12.8-11.1-12.1-21.4-4.7 0 0-82 58.6-189.8 53.7-18.7-32-26.8-110.8-26.8-110.8 41.4-35.2 83.2-59.6 168.4-52.4.2-6.4 3-27.1-20.4-28.1 0 0-93.5-11.1-146 33.5 2.5-16.5 5.9-29.3 11.1-39.4 34.2-30.8 79-49.5 128.3-49.5 106.4 0 193 87.1 193 194.5-.2 76-43.8 142-106.8 174z"],react:[512,512,[],"f41b","M418.2 177.2c-5.4-1.8-10.8-3.5-16.2-5.1.9-3.7 1.7-7.4 2.5-11.1 12.3-59.6 4.2-107.5-23.1-123.3-26.3-15.1-69.2.6-112.6 38.4-4.3 3.7-8.5 7.6-12.5 11.5-2.7-2.6-5.5-5.2-8.3-7.7-45.5-40.4-91.1-57.4-118.4-41.5-26.2 15.2-34 60.3-23 116.7 1.1 5.6 2.3 11.1 3.7 16.7-6.4 1.8-12.7 3.8-18.6 5.9C38.3 196.2 0 225.4 0 255.6c0 31.2 40.8 62.5 96.3 81.5 4.5 1.5 9 3 13.6 4.3-1.5 6-2.8 11.9-4 18-10.5 55.5-2.3 99.5 23.9 114.6 27 15.6 72.4-.4 116.6-39.1 3.5-3.1 7-6.3 10.5-9.7 4.4 4.3 9 8.4 13.6 12.4 42.8 36.8 85.1 51.7 111.2 36.6 27-15.6 35.8-62.9 24.4-120.5-.9-4.4-1.9-8.9-3-13.5 3.2-.9 6.3-1.9 9.4-2.9 57.7-19.1 99.5-50 99.5-81.7 0-30.3-39.4-59.7-93.8-78.4zM282.9 92.3c37.2-32.4 71.9-45.1 87.7-36 16.9 9.7 23.4 48.9 12.8 100.4-.7 3.4-1.4 6.7-2.3 10-22.2-5-44.7-8.6-67.3-10.6-13-18.6-27.2-36.4-42.6-53.1 3.9-3.7 7.7-7.2 11.7-10.7zm-130 189.1c4.6 8.8 9.3 17.5 14.3 26.1 5.1 8.7 10.3 17.4 15.8 25.9-15.6-1.7-31.1-4.2-46.4-7.5 4.4-14.4 9.9-29.3 16.3-44.5zm0-50.6c-6.3-14.9-11.6-29.5-16-43.6 14.4-3.2 29.7-5.8 45.6-7.8-5.3 8.3-10.5 16.8-15.4 25.4-4.9 8.5-9.7 17.2-14.2 26zm11.4 25.3c6.6-13.8 13.8-27.3 21.4-40.6 7.6-13.3 15.8-26.2 24.4-38.9 15-1.1 30.3-1.7 45.9-1.7 15.6 0 31 .6 45.9 1.7 8.5 12.6 16.6 25.5 24.3 38.7 7.7 13.2 14.9 26.7 21.7 40.4-6.7 13.8-13.9 27.4-21.6 40.8-7.6 13.3-15.7 26.2-24.2 39-14.9 1.1-30.4 1.6-46.1 1.6-15.7 0-30.9-.5-45.6-1.4-8.7-12.7-16.9-25.7-24.6-39-7.7-13.3-14.8-26.8-21.5-40.6zm180.6 51.2c5.1-8.8 9.9-17.7 14.6-26.7 6.4 14.5 12 29.2 16.9 44.3-15.5 3.5-31.2 6.2-47 8 5.4-8.4 10.5-17 15.5-25.6zm14.4-76.5c-4.7-8.8-9.5-17.6-14.5-26.2-4.9-8.5-10-16.9-15.3-25.2 16.1 2 31.5 4.7 45.9 8-4.6 14.8-10 29.2-16.1 43.4zM256.2 118.3c10.5 11.4 20.4 23.4 29.6 35.8-19.8-.9-39.7-.9-59.5 0 9.8-12.9 19.9-24.9 29.9-35.8zM140.2 57c16.8-9.8 54.1 4.2 93.4 39 2.5 2.2 5 4.6 7.6 7-15.5 16.7-29.8 34.5-42.9 53.1-22.6 2-45 5.5-67.2 10.4-1.3-5.1-2.4-10.3-3.5-15.5-9.4-48.4-3.2-84.9 12.6-94zm-24.5 263.6c-4.2-1.2-8.3-2.5-12.4-3.9-21.3-6.7-45.5-17.3-63-31.2-10.1-7-16.9-17.8-18.8-29.9 0-18.3 31.6-41.7 77.2-57.6 5.7-2 11.5-3.8 17.3-5.5 6.8 21.7 15 43 24.5 63.6-9.6 20.9-17.9 42.5-24.8 64.5zm116.6 98c-16.5 15.1-35.6 27.1-56.4 35.3-11.1 5.3-23.9 5.8-35.3 1.3-15.9-9.2-22.5-44.5-13.5-92 1.1-5.6 2.3-11.2 3.7-16.7 22.4 4.8 45 8.1 67.9 9.8 13.2 18.7 27.7 36.6 43.2 53.4-3.2 3.1-6.4 6.1-9.6 8.9zm24.5-24.3c-10.2-11-20.4-23.2-30.3-36.3 9.6.4 19.5.6 29.5.6 10.3 0 20.4-.2 30.4-.7-9.2 12.7-19.1 24.8-29.6 36.4zm130.7 30c-.9 12.2-6.9 23.6-16.5 31.3-15.9 9.2-49.8-2.8-86.4-34.2-4.2-3.6-8.4-7.5-12.7-11.5 15.3-16.9 29.4-34.8 42.2-53.6 22.9-1.9 45.7-5.4 68.2-10.5 1 4.1 1.9 8.2 2.7 12.2 4.9 21.6 5.7 44.1 2.5 66.3zm18.2-107.5c-2.8.9-5.6 1.8-8.5 2.6-7-21.8-15.6-43.1-25.5-63.8 9.6-20.4 17.7-41.4 24.5-62.9 5.2 1.5 10.2 3.1 15 4.7 46.6 16 79.3 39.8 79.3 58 0 19.6-34.9 44.9-84.8 61.4zM256 210.2c25.3 0 45.8 20.5 45.8 45.8 0 25.3-20.5 45.8-45.8 45.8-25.3 0-45.8-20.5-45.8-45.8 0-25.3 20.5-45.8 45.8-45.8"],rebel:[512,512,[],"f1d0","M256.5 504C117.2 504 9 387.8 13.2 249.9 16 170.7 56.4 97.7 129.7 49.5c.3 0 1.9-.6 1.1.8-5.8 5.5-111.3 129.8-14.1 226.4 49.8 49.5 90 2.5 90 2.5 38.5-50.1-.6-125.9-.6-125.9-10-24.9-45.7-40.1-45.7-40.1l28.8-31.8c24.4 10.5 43.2 38.7 43.2 38.7.8-29.6-21.9-61.4-21.9-61.4L255.1 8l44.3 50.1c-20.5 28.8-21.9 62.6-21.9 62.6 13.8-23 43.5-39.3 43.5-39.3l28.5 31.8c-27.4 8.9-45.4 39.9-45.4 39.9-15.8 28.5-27.1 89.4.6 127.3 32.4 44.6 87.7-2.8 87.7-2.8 102.7-91.9-10.5-225-10.5-225-6.1-5.5.8-2.8.8-2.8 50.1 36.5 114.6 84.4 116.2 204.8C500.9 400.2 399 504 256.5 504z"],"red-river":[448,512,[],"f3e3","M353.2 32H94.8C42.4 32 0 74.4 0 126.8v258.4C0 437.6 42.4 480 94.8 480h258.4c52.4 0 94.8-42.4 94.8-94.8V126.8c0-52.4-42.4-94.8-94.8-94.8zM144.9 200.9v56.3c0 27-21.9 48.9-48.9 48.9V151.9c0-13.2 10.7-23.9 23.9-23.9h154.2c0 27-21.9 48.9-48.9 48.9h-56.3c-12.3-.6-24.6 11.6-24 24zm176.3 72h-56.3c-12.3-.6-24.6 11.6-24 24v56.3c0 27-21.9 48.9-48.9 48.9V247.9c0-13.2 10.7-23.9 23.9-23.9h154.2c0 27-21.9 48.9-48.9 48.9z"],reddit:[512,512,[],"f1a1","M201.5 305.5c-13.8 0-24.9-11.1-24.9-24.6 0-13.8 11.1-24.9 24.9-24.9 13.6 0 24.6 11.1 24.6 24.9 0 13.6-11.1 24.6-24.6 24.6zM504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-132.3-41.2c-9.4 0-17.7 3.9-23.8 10-22.4-15.5-52.6-25.5-86.1-26.6l17.4-78.3 55.4 12.5c0 13.6 11.1 24.6 24.6 24.6 13.8 0 24.9-11.3 24.9-24.9s-11.1-24.9-24.9-24.9c-9.7 0-18 5.8-22.1 13.8l-61.2-13.6c-3-.8-6.1 1.4-6.9 4.4l-19.1 86.4c-33.2 1.4-63.1 11.3-85.5 26.8-6.1-6.4-14.7-10.2-24.1-10.2-34.9 0-46.3 46.9-14.4 62.8-1.1 5-1.7 10.2-1.7 15.5 0 52.6 59.2 95.2 132 95.2 73.1 0 132.3-42.6 132.3-95.2 0-5.3-.6-10.8-1.9-15.8 31.3-16 19.8-62.5-14.9-62.5zM302.8 331c-18.2 18.2-76.1 17.9-93.6 0-2.2-2.2-6.1-2.2-8.3 0-2.5 2.5-2.5 6.4 0 8.6 22.8 22.8 87.3 22.8 110.2 0 2.5-2.2 2.5-6.1 0-8.6-2.2-2.2-6.1-2.2-8.3 0zm7.7-75c-13.6 0-24.6 11.1-24.6 24.9 0 13.6 11.1 24.6 24.6 24.6 13.8 0 24.9-11.1 24.9-24.6 0-13.8-11-24.9-24.9-24.9z"],"reddit-alien":[512,512,[],"f281","M440.3 203.5c-15 0-28.2 6.2-37.9 15.9-35.7-24.7-83.8-40.6-137.1-42.3L293 52.3l88.2 19.8c0 21.6 17.6 39.2 39.2 39.2 22 0 39.7-18.1 39.7-39.7s-17.6-39.7-39.7-39.7c-15.4 0-28.7 9.3-35.3 22l-97.4-21.6c-4.9-1.3-9.7 2.2-11 7.1L246.3 177c-52.9 2.2-100.5 18.1-136.3 42.8-9.7-10.1-23.4-16.3-38.4-16.3-55.6 0-73.8 74.6-22.9 100.1-1.8 7.9-2.6 16.3-2.6 24.7 0 83.8 94.4 151.7 210.3 151.7 116.4 0 210.8-67.9 210.8-151.7 0-8.4-.9-17.2-3.1-25.1 49.9-25.6 31.5-99.7-23.8-99.7zM129.4 308.9c0-22 17.6-39.7 39.7-39.7 21.6 0 39.2 17.6 39.2 39.7 0 21.6-17.6 39.2-39.2 39.2-22 .1-39.7-17.6-39.7-39.2zm214.3 93.5c-36.4 36.4-139.1 36.4-175.5 0-4-3.5-4-9.7 0-13.7 3.5-3.5 9.7-3.5 13.2 0 27.8 28.5 120 29 149 0 3.5-3.5 9.7-3.5 13.2 0 4.1 4 4.1 10.2.1 13.7zm-.8-54.2c-21.6 0-39.2-17.6-39.2-39.2 0-22 17.6-39.7 39.2-39.7 22 0 39.7 17.6 39.7 39.7-.1 21.5-17.7 39.2-39.7 39.2z"],"reddit-square":[448,512,[],"f1a2","M283.2 345.5c2.7 2.7 2.7 6.8 0 9.2-24.5 24.5-93.8 24.6-118.4 0-2.7-2.4-2.7-6.5 0-9.2 2.4-2.4 6.5-2.4 8.9 0 18.7 19.2 81 19.6 100.5 0 2.4-2.3 6.6-2.3 9 0zm-91.3-53.8c0-14.9-11.9-26.8-26.5-26.8-14.9 0-26.8 11.9-26.8 26.8 0 14.6 11.9 26.5 26.8 26.5 14.6 0 26.5-11.9 26.5-26.5zm90.7-26.8c-14.6 0-26.5 11.9-26.5 26.8 0 14.6 11.9 26.5 26.5 26.5 14.9 0 26.8-11.9 26.8-26.5 0-14.9-11.9-26.8-26.8-26.8zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-99.7 140.6c-10.1 0-19 4.2-25.6 10.7-24.1-16.7-56.5-27.4-92.5-28.6l18.7-84.2 59.5 13.4c0 14.6 11.9 26.5 26.5 26.5 14.9 0 26.8-12.2 26.8-26.8 0-14.6-11.9-26.8-26.8-26.8-10.4 0-19.3 6.2-23.8 14.9l-65.7-14.6c-3.3-.9-6.5 1.5-7.4 4.8l-20.5 92.8c-35.7 1.5-67.8 12.2-91.9 28.9-6.5-6.8-15.8-11-25.9-11-37.5 0-49.8 50.4-15.5 67.5-1.2 5.4-1.8 11-1.8 16.7 0 56.5 63.7 102.3 141.9 102.3 78.5 0 142.2-45.8 142.2-102.3 0-5.7-.6-11.6-2.1-17 33.6-17.2 21.2-67.2-16.1-67.2z"],rendact:[496,512,[],"f3e4","M248 8C111 8 0 119 0 256s111 248 248 248c18.6 0 36.7-2.1 54.1-5.9-5.6-7.4-10.8-14.4-15.9-21.3-12.4 2.1-25.2 3.3-38.3 3.3C124.3 480 24 379.7 24 256S124.3 32 248 32s224 100.3 224 224c0 71-33 134.2-84.5 175.3-25.9 18.8-39.1 21.4-83.5-44.2-78.7-112.9-48-71.1-73.7-108.3 72.8 8.9 228.5-72 168.6-168.6C314-26.8 15 93.8 59.7 226.4c3.2 9.8 14.4 38.6 45.6 38.6 2 0 2.6-.6 2-1.7-4.4-8.7-20.1-9.8-20.1-37.4 0-40.5 40.5-89.6 100.3-120 66.1-32.3 131.9-30.2 158.2 5.4 27.2 38.3-20.9 119.2-120.4 136.9 7.5-9.4 57-75.2 62.8-84 22.7-34.6 23.6-49 14-59.2-15.5-16.9-29.5-10.3-50.7-11.7-10.8-.9-113.7 181.2-136.4 216.9-5.9 9-21.2 34.1-21.2 50.9 0 21.3 2.8 51.4 20.6 51.4 10.6 0 8-18.7 8-26.6 0-12.9 27.4-49.4 74.8-104.6 20.4 36.1 57.7 114.3 130.2 209.7 98-33.1 168.5-125.8 168.5-235C496 119 385 8 248 8z"],renren:[512,512,[],"f18b","M214 169.1c0 110.4-61 205.4-147.6 247.4C30 373.2 8 317.7 8 256.6 8 133.9 97.1 32.2 214 12.5v156.6zM255 504c-42.9 0-83.3-11-118.5-30.4C193.7 437.5 239.9 382.9 255 319c15.5 63.9 61.7 118.5 118.8 154.7C338.7 493 298.3 504 255 504zm190.6-87.5C359 374.5 298 279.6 298 169.1V12.5c116.9 19.7 206 121.4 206 244.1 0 61.1-22 116.6-58.4 159.9z"],replyd:[448,512,[],"f3e6","M320 480H128C57.6 480 0 422.4 0 352V160C0 89.6 57.6 32 128 32h192c70.4 0 128 57.6 128 128v192c0 70.4-57.6 128-128 128zM193.4 273.2c-6.1-2-11.6-3.1-16.4-3.1-7.2 0-13.5 1.9-18.9 5.6-5.4 3.7-9.6 9-12.8 15.8h-1.1l-4.2-18.3h-28v138.9h36.1v-89.7c1.5-5.4 4.4-9.8 8.7-13.2 4.3-3.4 9.8-5.1 16.2-5.1 4.6 0 9.8 1 15.6 3.1l4.8-34zm115.2 103.4c-3.2 2.4-7.7 4.8-13.7 7.1-6 2.3-12.8 3.5-20.4 3.5-12.2 0-21.1-3-26.5-8.9-5.5-5.9-8.5-14.7-9-26.4h83.3c.9-4.8 1.6-9.4 2.1-13.9.5-4.4.7-8.6.7-12.5 0-10.7-1.6-19.7-4.7-26.9-3.2-7.2-7.3-13-12.5-17.2-5.2-4.3-11.1-7.3-17.8-9.2-6.7-1.8-13.5-2.8-20.6-2.8-21.1 0-37.5 6.1-49.2 18.3s-17.5 30.5-17.5 55c0 22.8 5.2 40.7 15.6 53.7 10.4 13.1 26.8 19.6 49.2 19.6 10.7 0 20.9-1.5 30.4-4.6 9.5-3.1 17.1-6.8 22.6-11.2l-12-23.6zm-21.8-70.3c3.8 5.4 5.3 13.1 4.6 23.1h-51.7c.9-9.4 3.7-17 8.2-22.6 4.5-5.6 11.5-8.5 21-8.5 8.2-.1 14.1 2.6 17.9 8zm79.9 2.5c4.1 3.9 9.4 5.8 16.1 5.8 7 0 12.6-1.9 16.7-5.8s6.1-9.1 6.1-15.6-2-11.6-6.1-15.4c-4.1-3.8-9.6-5.7-16.7-5.7-6.7 0-12 1.9-16.1 5.7-4.1 3.8-6.1 8.9-6.1 15.4s2 11.7 6.1 15.6zm0 100.5c4.1 3.9 9.4 5.8 16.1 5.8 7 0 12.6-1.9 16.7-5.8s6.1-9.1 6.1-15.6-2-11.6-6.1-15.4c-4.1-3.8-9.6-5.7-16.7-5.7-6.7 0-12 1.9-16.1 5.7-4.1 3.8-6.1 8.9-6.1 15.4 0 6.6 2 11.7 6.1 15.6z"],resolving:[496,512,[],"f3e7","M281.2 278.2c46-13.3 49.6-23.5 44-43.4L314 195.5c-6.1-20.9-18.4-28.1-71.1-12.8L54.7 236.8l28.6 98.6 197.9-57.2zM248.5 8C131.4 8 33.2 88.7 7.2 197.5l221.9-63.9c34.8-10.2 54.2-11.7 79.3-8.2 36.3 6.1 52.7 25 61.4 55.2l10.7 37.8c8.2 28.1 1 50.6-23.5 73.6-19.4 17.4-31.2 24.5-61.4 33.2L203 351.8l220.4 27.1 9.7 34.2-48.1 13.3-286.8-37.3 23 80.2c36.8 22 80.3 34.7 126.3 34.7 137 0 248.5-111.4 248.5-248.3C497 119.4 385.5 8 248.5 8zM38.3 388.6L0 256.8c0 48.5 14.3 93.4 38.3 131.8z"],rocketchat:[448,512,[],"f3e8","M448 256.2c0-87.2-99.6-153.3-219.8-153.3-18.8 0-37.3 1.6-55.3 4.8-11.1-10.5-24.2-20-38-27.4C61.2 44.2 0 79.4 0 79.4s56.9 47.1 47.6 88.3c-52.3 52.3-52.5 124.1 0 176.6C56.9 385.6 0 432.6 0 432.6s61.2 35.2 134.9-.8c13.8-7.5 26.9-16.9 38-27.4 18 3.2 36.5 4.8 55.3 4.8 120.3-.1 219.8-65.8 219.8-153zm-219.7 124c-23.7 0-46.3-2.8-67.3-7.8-21.3 25.8-68.1 61.7-113.6 50.1 14.8-16 36.7-43.1 32-87.6-27.3-21.4-43.6-48.7-43.6-78.5 0-68.4 86.2-123.9 192.5-123.9S420.8 188 420.8 256.4c0 68.3-86.2 123.8-192.5 123.8zm25.6-123.9c0 14.2-11.5 25.8-25.6 25.8-14.1 0-25.6-11.5-25.6-25.8 0-14.2 11.5-25.8 25.6-25.8 14.1 0 25.6 11.6 25.6 25.8zm88.9 0c0 14.2-11.4 25.8-25.6 25.8-14.1 0-25.6-11.5-25.6-25.8 0-14.2 11.4-25.8 25.6-25.8 14.1 0 25.6 11.6 25.6 25.8zm-177.9 0c0 14.2-11.4 25.8-25.6 25.8-14.1 0-25.6-11.5-25.6-25.8 0-14.2 11.4-25.8 25.6-25.8 14.2 0 25.6 11.6 25.6 25.8z"],rockrms:[496,512,[],"f3e9","M248 8C111 8 0 119 0 256s111 248 248 248 248-111 248-248S385 8 248 8zm157.4 419.5h-90l-112-131.3c-17.9-20.4-3.9-56.1 26.6-56.1h75.3l-84.6-99.3-84.3 98.9h-90L193.5 67.2c14.4-18.4 41.3-17.3 54.5 0l157.7 185.1c19 22.8 2 57.2-27.6 56.1-.6 0-74.2.2-74.2.2l101.5 118.9z"],safari:[512,512,[],"f267","M236.9 256.8c0-9.1 6.6-17.7 16.3-17.7 8.9 0 17.4 6.4 17.4 16.1 0 9.1-6.4 17.7-16.1 17.7-9 0-17.6-6.7-17.6-16.1zM504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-26.6 0c0-122.3-99.1-221.4-221.4-221.4S34.6 133.7 34.6 256 133.7 477.4 256 477.4 477.4 378.3 477.4 256zm-72.5 96.6c0 3.6 13 10.2 16.3 12.2-27.4 41.5-69.8 71.4-117.9 83.3l-4.4-18.5c-.3-2.5-1.9-2.8-4.2-2.8-1.9 0-3 2.8-2.8 4.2l4.4 18.8c-13.3 2.8-26.8 4.2-40.4 4.2-36.3 0-72-10.2-103-29.1 1.7-2.8 12.2-18 12.2-20.2 0-1.9-1.7-3.6-3.6-3.6-3.9 0-12.2 16.6-14.7 19.9-41.8-27.7-72-70.6-83.6-119.6l19.1-4.2c2.2-.6 2.8-2.2 2.8-4.2 0-1.9-2.8-3-4.4-2.8L62 294.5c-2.5-12.7-3.9-25.5-3.9-38.5 0-37.1 10.5-73.6 30.2-104.9 2.8 1.7 16.1 10.8 18.3 10.8 1.9 0 3.6-1.4 3.6-3.3 0-3.9-14.7-11.3-18-13.6 28.2-41.2 71.1-70.9 119.8-81.9l4.2 18.5c.6 2.2 2.2 2.8 4.2 2.8s3-2.8 2.8-4.4L219 61.7c12.2-2.2 24.6-3.6 37.1-3.6 37.1 0 73.3 10.5 104.9 30.2-1.9 2.8-10.8 15.8-10.8 18 0 1.9 1.4 3.6 3.3 3.6 3.9 0 11.3-14.4 13.3-17.7 41 27.7 70.3 70 81.7 118.2l-15.5 3.3c-2.5.6-2.8 2.2-2.8 4.4 0 1.9 2.8 3 4.2 2.8l15.8-3.6c2.5 12.7 3.9 25.7 3.9 38.7 0 36.3-10 72-28.8 102.7-2.8-1.4-14.4-9.7-16.6-9.7-2.1 0-3.8 1.7-3.8 3.6zm-33.2-242.2c-13 12.2-134.2 123.7-137.6 129.5l-96.6 160.5c12.7-11.9 134.2-124 137.3-129.3l96.9-160.7z"],sass:[640,512,[],"f41e","M551.1 291.9c-22.4.1-41.8 5.5-58 13.5-5.9-11.9-12-22.3-13-30.1-1.2-9.1-2.5-14.5-1.1-25.3s7.7-26.1 7.6-27.2c-.1-1.1-1.4-6.6-14.3-6.7-12.9-.1-24 2.5-25.3 5.9-1.3 3.4-3.8 11.1-5.3 19.1-2.3 11.7-25.8 53.5-39.1 75.3-4.4-8.5-8.1-16-8.9-22-1.2-9.1-2.5-14.5-1.1-25.3s7.7-26.1 7.6-27.2c-.1-1.1-1.4-6.6-14.3-6.7-12.9-.1-24 2.5-25.3 5.9-1.3 3.4-2.7 11.4-5.3 19.1-2.6 7.7-33.9 77.3-42.1 95.4-4.2 9.2-7.8 16.6-10.4 21.6s-.2.3-.4.9c-2.2 4.3-3.5 6.7-3.5 6.7v.1c-1.7 3.2-3.6 6.1-4.5 6.1-.6 0-1.9-8.4.3-19.9 4.7-24.2 15.8-61.8 15.7-63.1-.1-.7 2.1-7.2-7.3-10.7-9.1-3.3-12.4 2.2-13.2 2.2-.8 0-1.4 2-1.4 2s10.1-42.4-19.4-42.4c-18.4 0-44 20.2-56.6 38.5-7.9 4.3-25 13.6-43 23.5-6.9 3.8-14 7.7-20.7 11.4-.5-.5-.9-1-1.4-1.5-35.8-38.2-101.9-65.2-99.1-116.5 1-18.7 7.5-67.8 127.1-127.4 98-48.8 176.4-35.4 189.9-5.6 19.4 42.5-41.9 121.6-143.7 133-38.8 4.3-59.2-10.7-64.3-16.3-5.3-5.9-6.1-6.2-8.1-5.1-3.3 1.8-1.2 7 0 10.1 3 7.9 15.5 21.9 36.8 28.9 18.7 6.1 64.2 9.5 119.2-11.8C367 196.5 415.1 130.2 401 74.7 386.6 18.3 293.1-.2 204.6 31.2 151.9 49.9 94.9 79.3 53.9 117.6 5.2 163.2-2.6 202.9.6 219.5c11.4 58.9 92.6 97.3 125.1 125.7-1.6.9-3.1 1.7-4.5 2.5-16.3 8.1-78.2 40.5-93.7 74.7-17.5 38.8 2.9 66.6 16.3 70.4 41.8 11.6 84.6-9.3 107.6-43.6s20.2-79.1 9.6-99.5c-.1-.3-.3-.5-.4-.8 4.2-2.5 8.5-5 12.8-7.5 8.3-4.9 16.4-9.4 23.5-13.3-4 10.8-6.9 23.8-8.4 42.6-1.8 22 7.3 50.5 19.1 61.7 5.2 4.9 11.5 5 15.4 5 13.8 0 20-11.4 26.9-25 8.5-16.6 16-35.9 16-35.9s-9.4 52.2 16.3 52.2c9.4 0 18.8-12.1 23-18.3v.1s.2-.4.7-1.2c1-1.5 1.5-2.4 1.5-2.4v-.3c3.8-6.5 12.1-21.4 24.6-46 16.2-31.8 31.7-71.5 31.7-71.5s1.4 9.7 6.2 25.8c2.8 9.5 8.7 19.9 13.4 30-3.8 5.2-6.1 8.2-6.1 8.2s0 .1.1.2c-3 4-6.4 8.3-9.9 12.5-12.8 15.2-28 32.6-30 37.6-2.4 5.9-1.8 10.3 2.8 13.7 3.4 2.6 9.4 3 15.7 2.5 11.5-.8 19.6-3.6 23.5-5.4 6.2-2.2 13.4-5.7 20.2-10.6 12.5-9.2 20.1-22.4 19.4-39.8-.4-9.6-3.5-19.2-7.3-28.2 1.1-1.6 2.3-3.3 3.4-5 19.8-28.9 35.1-60.6 35.1-60.6s1.4 9.7 6.2 25.8c2.4 8.1 7.1 17 11.4 25.7-18.6 15.1-30.1 32.6-34.1 44.1-7.4 21.3-1.6 30.9 9.3 33.1 4.9 1 11.9-1.3 17.1-3.5 6.5-2.2 14.3-5.7 21.6-11.1 12.5-9.2 24.6-22.1 23.8-39.6-.3-7.9-2.5-15.8-5.4-23.4 15.7-6.6 36.1-10.2 62.1-7.2 55.7 6.5 66.6 41.3 64.5 55.8-2.1 14.6-13.8 22.6-17.7 25-3.9 2.4-5.1 3.3-4.8 5.1.5 2.6 2.3 2.5 5.6 1.9 4.6-.8 29.2-11.8 30.3-38.7 1.6-34-31.1-71.4-89-71.1zM121.8 436.6c-18.4 20.1-44.2 27.7-55.3 21.3C54.6 451 59.3 421.4 82 400c13.8-13 31.6-25 43.4-32.4 2.7-1.6 6.6-4 11.4-6.9.8-.5 1.2-.7 1.2-.7.9-.6 1.9-1.1 2.9-1.7 8.3 30.4.3 57.2-19.1 78.3zm134.4-91.4c-6.4 15.7-19.9 55.7-28.1 53.6-7-1.8-11.3-32.3-1.4-62.3 5-15.1 15.6-33.1 21.9-40.1 10.1-11.3 21.2-14.9 23.8-10.4 3.5 5.9-12.2 49.4-16.2 59.2zm111 53c-2.7 1.4-5.2 2.3-6.4 1.6-.9-.5 1.1-2.4 1.1-2.4s13.9-14.9 19.4-21.7c3.2-4 6.9-8.7 10.9-13.9 0 .5.1 1 .1 1.6-.1 17.9-17.3 30-25.1 34.8zm85.6-19.5c-2-1.4-1.7-6.1 5-20.7 2.6-5.7 8.6-15.3 19-24.5 1.2 3.8 1.9 7.4 1.9 10.8-.1 22.5-16.2 30.9-25.9 34.4z"],schlix:[448,512,[],"f3ea","M350.5 157.7l-54.2-46.1 73.4-39 78.3 44.2-97.5 40.9zM192 122.1l45.7-28.2 34.7 34.6-55.4 29-25-35.4zm-65.1 6.6l31.9-22.1L176 135l-36.7 22.5-12.4-28.8zm-23.3 88.2l-8.8-34.8 29.6-18.3 13.1 35.3-33.9 17.8zm-21.2-83.7l23.9-18.1 8.9 24-26.7 18.3-6.1-24.2zM59 206.5l-3.6-28.4 22.3-15.5 6.1 28.7L59 206.5zm-30.6 16.6l20.8-12.8 3.3 33.4-22.9 12-1.2-32.6zM1.4 268l19.2-10.2.4 38.2-21 8.8L1.4 268zm59.1 59.3l-28.3 8.3-1.6-46.8 25.1-10.7 4.8 49.2zM99 263.2l-31.1 13-5.2-40.8L90.1 221l8.9 42.2zM123.2 377l-41.6 5.9-8.1-63.5 35.2-10.8 14.5 68.4zm28.5-139.9l21.2 57.1-46.2 13.6-13.7-54.1 38.7-16.6zm85.7 230.5l-70.9-3.3-24.3-95.8 55.2-8.6 40 107.7zm-84.9-279.7l42.2-22.4 28 45.9-50.8 21.3-19.4-44.8zm41 94.9l61.3-18.7 52.8 86.6-79.8 11.3-34.3-79.2zm51.4-85.6l67.3-28.8 65.5 65.4-88.6 26.2-44.2-62.8z"],scribd:[384,512,[],"f28a","M42.3 252.7c-16.1-19-24.7-45.9-24.8-79.9 0-100.4 75.2-153.1 167.2-153.1 98.6-1.6 156.8 49 184.3 70.6l-50.5 72.1-37.3-24.6 26.9-38.6c-36.5-24-79.4-36.5-123-35.8-50.7-.8-111.7 27.2-111.7 76.2 0 18.7 11.2 20.7 28.6 15.6 23.3-5.3 41.9.6 55.8 14 26.4 24.3 23.2 67.6-.7 91.9-29.2 29.5-85.2 27.3-114.8-8.4zm317.7 5.9c-15.5-18.8-38.9-29.4-63.2-28.6-38.1-2-71.1 28-70.5 67.2-.7 16.8 6 33 18.4 44.3 14.1 13.9 33 19.7 56.3 14.4 17.4-5.1 28.6-3.1 28.6 15.6 0 4.3-.5 8.5-1.4 12.7-16.7 40.9-59.5 64.4-121.4 64.4-51.9.2-102.4-16.4-144.1-47.3l33.7-39.4-35.6-27.4L0 406.3l15.4 13.8c52.5 46.8 120.4 72.5 190.7 72.2 51.4 0 94.4-10.5 133.6-44.1 57.1-51.4 54.2-149.2 20.3-189.6z"],searchengin:[460,512,[],"f3eb","M220.6 130.3l-67.2 28.2V43.2L98.7 233.5l54.7-24.2v130.3l67.2-209.3zm-83.2-96.7l-1.3 4.7-15.2 52.9C80.6 106.7 52 145.8 52 191.5c0 52.3 34.3 95.9 83.4 105.5v53.6C57.5 340.1 0 272.4 0 191.6c0-80.5 59.8-147.2 137.4-158zm311.4 447.2c-11.2 11.2-23.1 12.3-28.6 10.5-5.4-1.8-27.1-19.9-60.4-44.4-33.3-24.6-33.6-35.7-43-56.7-9.4-20.9-30.4-42.6-57.5-52.4l-9.7-14.7c-24.7 16.9-53 26.9-81.3 28.7l2.1-6.6 15.9-49.5c46.5-11.9 80.9-54 80.9-104.2 0-54.5-38.4-102.1-96-107.1V32.3C254.4 37.4 320 106.8 320 191.6c0 33.6-11.2 64.7-29 90.4l14.6 9.6c9.8 27.1 31.5 48 52.4 57.4s32.2 9.7 56.8 43c24.6 33.2 42.7 54.9 44.5 60.3s.7 17.3-10.5 28.5zm-9.9-17.9c0-4.4-3.6-8-8-8s-8 3.6-8 8 3.6 8 8 8 8-3.6 8-8z"],sellcast:[448,512,[],"f2da","M353.4 32H94.7C42.6 32 0 74.6 0 126.6v258.7C0 437.4 42.6 480 94.7 480h258.7c52.1 0 94.7-42.6 94.7-94.6V126.6c0-52-42.6-94.6-94.7-94.6zm-50 316.4c-27.9 48.2-89.9 64.9-138.2 37.2-22.9 39.8-54.9 8.6-42.3-13.2l15.7-27.2c5.9-10.3 19.2-13.9 29.5-7.9 18.6 10.8-.1-.1 18.5 10.7 27.6 15.9 63.4 6.3 79.4-21.3 15.9-27.6 6.3-63.4-21.3-79.4-17.8-10.2-.6-.4-18.6-10.6-24.6-14.2-3.4-51.9 21.6-37.5 18.6 10.8-.1-.1 18.5 10.7 48.4 28 65.1 90.3 37.2 138.5zm21.8-208.8c-17 29.5-16.3 28.8-19 31.5-6.5 6.5-16.3 8.7-26.5 3.6-18.6-10.8.1.1-18.5-10.7-27.6-15.9-63.4-6.3-79.4 21.3s-6.3 63.4 21.3 79.4c0 0 18.5 10.6 18.6 10.6 24.6 14.2 3.4 51.9-21.6 37.5-18.6-10.8.1.1-18.5-10.7-48.2-27.8-64.9-90.1-37.1-138.4 27.9-48.2 89.9-64.9 138.2-37.2l4.8-8.4c14.3-24.9 52-3.3 37.7 21.5z"],sellsy:[640,512,[],"f213","M539.71 237.308c3.064-12.257 4.29-24.821 4.29-37.384C544 107.382 468.618 32 376.076 32c-77.22 0-144.634 53.012-163.02 127.781-15.322-13.176-34.934-20.53-55.157-20.53-46.271 0-83.962 37.69-83.962 83.961 0 7.354.92 15.015 3.065 22.369-42.9 20.225-70.785 63.738-70.785 111.234C6.216 424.843 61.68 480 129.401 480h381.198c67.72 0 123.184-55.157 123.184-123.184.001-56.384-38.916-106.025-94.073-119.508zM199.88 401.554c0 8.274-7.048 15.321-15.321 15.321H153.61c-8.274 0-15.321-7.048-15.321-15.321V290.626c0-8.273 7.048-15.321 15.321-15.321h30.949c8.274 0 15.321 7.048 15.321 15.321v110.928zm89.477 0c0 8.274-7.048 15.321-15.322 15.321h-30.949c-8.274 0-15.321-7.048-15.321-15.321V270.096c0-8.274 7.048-15.321 15.321-15.321h30.949c8.274 0 15.322 7.048 15.322 15.321v131.458zm89.477 0c0 8.274-7.047 15.321-15.321 15.321h-30.949c-8.274 0-15.322-7.048-15.322-15.321V238.84c0-8.274 7.048-15.321 15.322-15.321h30.949c8.274 0 15.321 7.048 15.321 15.321v162.714zm87.027 0c0 8.274-7.048 15.321-15.322 15.321h-28.497c-8.274 0-15.321-7.048-15.321-15.321V176.941c0-8.579 7.047-15.628 15.321-15.628h28.497c8.274 0 15.322 7.048 15.322 15.628v224.613z"],servicestack:[496,512,[],"f3ec","M88 216c81.7 10.2 273.7 102.3 304 232H0c99.5-8.1 184.5-137 88-232zm32-152c32.3 35.6 47.7 83.9 46.4 133.6C249.3 231.3 373.7 321.3 400 448h96C455.3 231.9 222.8 79.5 120 64z"],shirtsinbulk:[448,512,[],"f214","M395.208 221.583H406v33.542h-10.792v-33.542zm0-9.625H406v-33.542h-10.792v33.542zm0 86.333H406V264.75h-10.792v33.541zM358.75 135.25h-33.542v10.5h33.542v-10.5zm36.458 206.208H406v-33.542h-10.792v33.542zM311.5 135.25h-33.542v10.5H311.5v-10.5zm-47.25 0H231v10.5h33.25v-10.5zm-47.25 0h-33.25v10.5H217v-10.5zm178.208 33.542H406V135.25h-33.542v10.5h22.75v23.042zm-255.792 259l30.625 13.417 4.375-9.917-30.625-13.417-4.375 9.917zM179.083 445l30.334 13.708 4.374-9.916-30.333-13.417-4.375 9.625zm216.125-60.375H406v-33.542h-10.792v33.542zm-334.833 8.167L91 406.208l4.375-9.624-30.625-13.709-4.375 9.917zm39.666 17.499l30.625 13.417 4.375-9.917-30.625-13.416-4.375 9.916zm132.417 38.501l4.375 9.916L267.459 445l-4.375-9.625-30.626 13.417zm118.417-52.208l4.375 9.624 30.624-13.416-4.374-9.917-30.625 13.709zM311.5 413.791l4.375 9.917 30.625-13.417-4.374-9.916-30.626 13.416zm-39.667 17.501l4.375 9.917 30.625-13.417-4.375-9.917-30.625 13.417zM311.5 46.583h-33.542v10.5H311.5v-10.5zm94.209 0h-33.251v10.5h33.251v-10.5zm-188.709 0h-33.25v10.5H217v-10.5zm141.75 0h-33.542v10.5h33.542v-10.5zm-94.5 0H231v10.5h33.25v-10.5zM448 3.708v406l-226.334 98.584L0 409.708v-406h448zm-29.166 116.958H29.166V390.75l192.792 85.75 196.875-85.75V120.666zm0-87.791H29.166V91.5h389.667V32.875zM75.542 46.583H42.291v10.5h33.251v-10.5zm94.5 0H136.5v10.5h33.542v-10.5zm-47.251 0H89.25v10.5h33.542v-10.5zm7.584 236.542c0-50.167 41.125-91.292 91.292-91.292 50.458 0 91.292 41.125 91.292 91.292 0 50.458-40.833 91.292-91.292 91.292-50.167-.001-91.292-40.834-91.292-91.292zm120.75 18.084c0 13.125-23.917 14.291-32.666 14.291-12.25 0-29.75-2.625-35.875-14.875h-.875L172.666 319c14.876 9.333 29.167 12.25 47.25 12.25 19.542 0 51.042-5.833 51.042-31.209 0-48.125-78.458-16.333-78.458-37.916 0-13.125 20.708-14.875 29.75-14.875 10.791 0 29.166 3.208 35.583 13.124h.875l8.751-16.916c-15.167-6.125-27.417-11.959-44.334-11.959-20.125 0-49.583 6.417-49.583 31.792 0 44.334 77.583 11.959 77.583 37.918zM122.791 135.25H89.25v10.5h33.542v-10.5zm-69.999 10.5h22.75v-10.5H42v33.542h10.792V145.75zm0 32.666H42v33.542h10.792v-33.542zm117.25-43.166H136.5v10.5h33.542v-10.5zm-117.25 86.333H42v33.542h10.792v-33.542zm0 86.334H42v33.542h10.792v-33.542zm0-43.167H42v33.542h10.792V264.75zm0 86.333H42v33.542h10.792v-33.542z"],simplybuilt:[512,512,[],"f215","M481.2 64h-106c-14.5 0-26.6 11.8-26.6 26.3v39.6H163.3V90.3c0-14.5-12-26.3-26.6-26.3h-106C16.1 64 4.3 75.8 4.3 90.3v331.4c0 14.5 11.8 26.3 26.6 26.3h450.4c14.8 0 26.6-11.8 26.6-26.3V90.3c-.2-14.5-12-26.3-26.7-26.3zM149.8 355.8c-36.6 0-66.4-29.7-66.4-66.4 0-36.9 29.7-66.6 66.4-66.6 36.9 0 66.6 29.7 66.6 66.6 0 36.7-29.7 66.4-66.6 66.4zm212.4 0c-36.9 0-66.6-29.7-66.6-66.6 0-36.6 29.7-66.4 66.6-66.4 36.6 0 66.4 29.7 66.4 66.4 0 36.9-29.8 66.6-66.4 66.6z"],sistrix:[448,512,[],"f3ee","M448 449L301.2 300.2c20-27.9 31.9-62.2 31.9-99.2 0-93.1-74.7-168.9-166.5-168.9C74.7 32 0 107.8 0 200.9s74.7 168.9 166.5 168.9c39.8 0 76.3-14.2 105-37.9l146 148.1 30.5-31zM166.5 330.8c-70.6 0-128.1-58.3-128.1-129.9S95.9 71 166.5 71s128.1 58.3 128.1 129.9-57.4 129.9-128.1 129.9z"],skyatlas:[576,512,[],"f216","M545.7 318.5c0 56.2-44.8 97.5-100.2 97.5-141.5 0-167.6-212.9-306.7-212.9-125.2 0-125.4 180.9 4.8 180.9 36.2 0 77.5-15.2 106.8-36.2 4.8-3.5 14.4-13.9 19.5-13.9s9.3 4.3 9.3 9.3c0 6.7-11.2 16.3-16 20.5-34.9 30.4-85.5 52.2-131.9 52.2C60.2 416 0 365.6 0 292.4s57.6-127.1 130.3-127.1c158 0 189.7 209.7 308.5 209.7 85.2 0 80.8-119.1 2.9-119.1-14.9 0-29.8 9.9-40 9.9-7.2 0-13.6-6.1-13.6-13.3 0-9.9 4.5-20.2 4.5-30.9 0-56.8-43.4-97.8-99.7-97.8-45.3 0-68.2 31.4-75.7 31.4-5.3 0-9.6-4.3-9.6-9.6 0-4.8 3.5-8.8 6.7-12.3C235.9 108.8 269.5 96 302 96c67.7 0 118.6 49.8 118.6 117.5 0 5.9-.3 11.7-1.1 17.6 10.1-2.7 20.5-4 30.6-4 51.9 0 95.6 38.6 95.6 91.4z"],skype:[448,512,[],"f17e","M424.7 299.8c2.9-14 4.7-28.9 4.7-43.8 0-113.5-91.9-205.3-205.3-205.3-14.9 0-29.7 1.7-43.8 4.7C161.3 40.7 137.7 32 112 32 50.2 32 0 82.2 0 144c0 25.7 8.7 49.3 23.3 68.2-2.9 14-4.7 28.9-4.7 43.8 0 113.5 91.9 205.3 205.3 205.3 14.9 0 29.7-1.7 43.8-4.7 19 14.6 42.6 23.3 68.2 23.3 61.8 0 112-50.2 112-112 .1-25.6-8.6-49.2-23.2-68.1zm-194.6 91.5c-65.6 0-120.5-29.2-120.5-65 0-16 9-30.6 29.5-30.6 31.2 0 34.1 44.9 88.1 44.9 25.7 0 42.3-11.4 42.3-26.3 0-18.7-16-21.6-42-28-62.5-15.4-117.8-22-117.8-87.2 0-59.2 58.6-81.1 109.1-81.1 55.1 0 110.8 21.9 110.8 55.4 0 16.9-11.4 31.8-30.3 31.8-28.3 0-29.2-33.5-75-33.5-25.7 0-42 7-42 22.5 0 19.8 20.8 21.8 69.1 33 41.4 9.3 90.7 26.8 90.7 77.6 0 59.1-57.1 86.5-112 86.5z"],slack:[448,512,[],"f198","M244.2 217.5l19.3 57.7-59.8 20-19.3-57.7 59.8-20zm41.4 243.7C131.6 507.4 65 471.6 18.8 317.6S8.4 97 162.4 50.8C316.4 4.6 383 40.4 429.2 194.4c46.2 154 10.4 220.6-143.6 266.8zM366.2 265c-3.9-12.2-17.2-18.6-29.4-14.7l-29 9.7-19.3-57.7 29-9.7c12.2-3.9 18.6-17.2 14.7-29.4-3.9-12.2-17.2-18.6-29.4-14.7l-29 9.7-10-30.1c-3.9-12.2-17.2-18.6-29.4-14.7-12.2 3.9-18.6 17.2-14.7 29.4l10 30.1-59.8 20.1-10-30.1c-3.9-12.2-17.2-18.6-29.4-14.7-12.2 3.9-18.6 17.2-14.7 29.4l10 30.1-29 9.7c-12.2 3.9-18.6 17.2-14.7 29.4 3.2 9.3 12.2 15.4 21.5 15.8 4.3.6 7.7-1 36.9-10.7l19.3 57.7-29 9.7c-12.2 3.9-18.6 17.2-14.7 29.4 3.2 9.3 12.2 15.4 21.5 15.8 4.3.6 7.7-1 36.9-10.7l10 30.1c3.7 10.8 15.8 18.6 29.4 14.7 12.2-3.9 18.6-17.2 14.7-29.4l-10-30.1 59.8-20.1 10 30.1c3.7 10.8 15.8 18.6 29.4 14.7 12.2-3.9 18.6-17.2 14.7-29.4l-10-30.1 29-9.7c12.2-4.2 18.6-17.5 14.7-29.6z"],"slack-hash":[448,512,[],"f3ef","M446.2 270.4c-6.2-19-26.9-29.1-46-22.9l-45.4 15.1-30.3-90 45.4-15.1c19.1-6.2 29.1-26.8 23-45.9-6.2-19-26.9-29.1-46-22.9l-45.4 15.1-15.7-47c-6.2-19-26.9-29.1-46-22.9-19.1 6.2-29.1 26.8-23 45.9l15.7 47-93.4 31.2-15.7-47c-6.2-19-26.9-29.1-46-22.9-19.1 6.2-29.1 26.8-23 45.9l15.7 47-45.3 15c-19.1 6.2-29.1 26.8-23 45.9 5 14.5 19.1 24 33.6 24.6 6.8 1 12-1.6 57.7-16.8l30.3 90L78 354.8c-19 6.2-29.1 26.9-23 45.9 5 14.5 19.1 24 33.6 24.6 6.8 1 12-1.6 57.7-16.8l15.7 47c5.9 16.9 24.7 29 46 22.9 19.1-6.2 29.1-26.8 23-45.9l-15.7-47 93.6-31.3 15.7 47c5.9 16.9 24.7 29 46 22.9 19.1-6.2 29.1-26.8 23-45.9l-15.7-47 45.4-15.1c19-6 29.1-26.7 22.9-45.7zm-254.1 47.2l-30.3-90.2 93.5-31.3 30.3 90.2-93.5 31.3z"],slideshare:[512,512,[],"f1e7","M249.429 211.436c0 31.716-27.715 57.717-61.717 57.717-34.001 0-61.716-26.001-61.716-57.717 0-32.001 27.715-57.716 61.716-57.716 34.001 0 61.717 25.715 61.717 57.716zm254.294 50.002c-18.286 22.573-53.144 50.288-106.289 72.003C453.722 525.163 260 555.735 263.143 457.446c0 1.714-.286-52.859-.286-93.432-4.285-.858-8.571-2-13.714-3.143 0 40.858-.286 98.289-.286 96.575C252 555.735 58.278 525.163 114.566 333.441c-53.145-21.715-88.003-49.43-106.29-72.003-9.143-13.714.858-28.287 16.001-17.715 2 1.428 4.285 2.857 6.285 4.285V49.716C30.563 22.287 51.135 0 76.565 0h359.157c25.429 0 46.002 22.287 46.002 49.716v198.293l6-4.285c15.143-10.573 25.143 4 15.999 17.714zm-46.572-189.15c0-32.858-10.572-45.716-40.859-45.716H98.566c-31.716 0-40.573 10.858-40.573 45.716v192.293c67.717 35.43 125.72 29.144 157.435 28.001 13.429-.286 22.001 2.286 27.144 7.715 1.689 1.687 10.023 9.446 20.287 17.143 1.143-15.715 10.001-25.715 33.716-24.858 32.287 1.428 91.718 7.715 160.577-29.716V72.288zM331.146 153.72c-34.002 0-61.716 25.715-61.716 57.716 0 31.716 27.715 57.717 61.716 57.717 34.287 0 61.716-26.001 61.716-57.717 0-32.001-27.429-57.716-61.716-57.716z"],snapchat:[496,512,[],"f2ab","M248 8C111 8 0 119 0 256s111 248 248 248 248-111 248-248S385 8 248 8zm169.5 338.9c-3.5 8.1-18.1 14-44.8 18.2-1.4 1.9-2.5 9.8-4.3 15.9-1.1 3.7-3.7 5.9-8.1 5.9h-.2c-6.2 0-12.8-2.9-25.8-2.9-17.6 0-23.7 4-37.4 13.7-14.5 10.3-28.4 19.1-49.2 18.2-21 1.6-38.6-11.2-48.5-18.2-13.8-9.7-19.8-13.7-37.4-13.7-12.5 0-20.4 3.1-25.8 3.1-5.4 0-7.5-3.3-8.3-6-1.8-6.1-2.9-14.1-4.3-16-13.8-2.1-44.8-7.5-45.5-21.4-.2-3.6 2.3-6.8 5.9-7.4 46.3-7.6 67.1-55.1 68-57.1 0-.1.1-.2.2-.3 2.5-5 3-9.2 1.6-12.5-3.4-7.9-17.9-10.7-24-13.2-15.8-6.2-18-13.4-17-18.3 1.6-8.5 14.4-13.8 21.9-10.3 5.9 2.8 11.2 4.2 15.7 4.2 3.3 0 5.5-.8 6.6-1.4-1.4-23.9-4.7-58 3.8-77.1C183.1 100 230.7 96 244.7 96c.6 0 6.1-.1 6.7-.1 34.7 0 68 17.8 84.3 54.3 8.5 19.1 5.2 53.1 3.8 77.1 1.1.6 2.9 1.3 5.7 1.4 4.3-.2 9.2-1.6 14.7-4.2 4-1.9 9.6-1.6 13.6 0 6.3 2.3 10.3 6.8 10.4 11.9.1 6.5-5.7 12.1-17.2 16.6-1.4.6-3.1 1.1-4.9 1.7-6.5 2.1-16.4 5.2-19 11.5-1.4 3.3-.8 7.5 1.6 12.5.1.1.1.2.2.3.9 2 21.7 49.5 68 57.1 4 1 7.1 5.5 4.9 10.8z"],"snapchat-ghost":[512,512,[],"f2ac","M510.846 392.673c-5.211 12.157-27.239 21.089-67.36 27.318-2.064 2.786-3.775 14.686-6.507 23.956-1.625 5.566-5.623 8.869-12.128 8.869l-.297-.005c-9.395 0-19.203-4.323-38.852-4.323-26.521 0-35.662 6.043-56.254 20.588-21.832 15.438-42.771 28.764-74.027 27.399-31.646 2.334-58.025-16.908-72.871-27.404-20.714-14.643-29.828-20.582-56.241-20.582-18.864 0-30.736 4.72-38.852 4.72-8.073 0-11.213-4.922-12.422-9.04-2.703-9.189-4.404-21.263-6.523-24.13-20.679-3.209-67.31-11.344-68.498-32.15a10.627 10.627 0 0 1 8.877-11.069c69.583-11.455 100.924-82.901 102.227-85.934.074-.176.155-.344.237-.515 3.713-7.537 4.544-13.849 2.463-18.753-5.05-11.896-26.872-16.164-36.053-19.796-23.715-9.366-27.015-20.128-25.612-27.504 2.437-12.836 21.725-20.735 33.002-15.453 8.919 4.181 16.843 6.297 23.547 6.297 5.022 0 8.212-1.204 9.96-2.171-2.043-35.936-7.101-87.29 5.687-115.969C158.122 21.304 229.705 15.42 250.826 15.42c.944 0 9.141-.089 10.11-.089 52.148 0 102.254 26.78 126.723 81.643 12.777 28.65 7.749 79.792 5.695 116.009 1.582.872 4.357 1.942 8.599 2.139 6.397-.286 13.815-2.389 22.069-6.257 6.085-2.846 14.406-2.461 20.48.058l.029.01c9.476 3.385 15.439 10.215 15.589 17.87.184 9.747-8.522 18.165-25.878 25.018-2.118.835-4.694 1.655-7.434 2.525-9.797 3.106-24.6 7.805-28.616 17.271-2.079 4.904-1.256 11.211 2.46 18.748.087.168.166.342.239.515 1.301 3.03 32.615 74.46 102.23 85.934 6.427 1.058 11.163 7.877 7.725 15.859z"],"snapchat-square":[448,512,[],"f2ad","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-6.5 314.9c-3.5 8.1-18.1 14-44.8 18.2-1.4 1.9-2.5 9.8-4.3 15.9-1.1 3.7-3.7 5.9-8.1 5.9h-.2c-6.2 0-12.8-2.9-25.8-2.9-17.6 0-23.7 4-37.4 13.7-14.5 10.3-28.4 19.1-49.2 18.2-21 1.6-38.6-11.2-48.5-18.2-13.8-9.7-19.8-13.7-37.4-13.7-12.5 0-20.4 3.1-25.8 3.1-5.4 0-7.5-3.3-8.3-6-1.8-6.1-2.9-14.1-4.3-16-13.8-2.1-44.8-7.5-45.5-21.4-.2-3.6 2.3-6.8 5.9-7.4 46.3-7.6 67.1-55.1 68-57.1 0-.1.1-.2.2-.3 2.5-5 3-9.2 1.6-12.5-3.4-7.9-17.9-10.7-24-13.2-15.8-6.2-18-13.4-17-18.3 1.6-8.5 14.4-13.8 21.9-10.3 5.9 2.8 11.2 4.2 15.7 4.2 3.3 0 5.5-.8 6.6-1.4-1.4-23.9-4.7-58 3.8-77.1C159.1 100 206.7 96 220.7 96c.6 0 6.1-.1 6.7-.1 34.7 0 68 17.8 84.3 54.3 8.5 19.1 5.2 53.1 3.8 77.1 1.1.6 2.9 1.3 5.7 1.4 4.3-.2 9.2-1.6 14.7-4.2 4-1.9 9.6-1.6 13.6 0 6.3 2.3 10.3 6.8 10.4 11.9.1 6.5-5.7 12.1-17.2 16.6-1.4.6-3.1 1.1-4.9 1.7-6.5 2.1-16.4 5.2-19 11.5-1.4 3.3-.8 7.5 1.6 12.5.1.1.1.2.2.3.9 2 21.7 49.5 68 57.1 4 1 7.1 5.5 4.9 10.8z"],soundcloud:[640,512,[],"f1be","M111.4 256.3l5.8 65-5.8 68.3c-.3 2.5-2.2 4.4-4.4 4.4s-4.2-1.9-4.2-4.4l-5.6-68.3 5.6-65c0-2.2 1.9-4.2 4.2-4.2 2.2 0 4.1 2 4.4 4.2zm21.4-45.6c-2.8 0-4.7 2.2-5 5l-5 105.6 5 68.3c.3 2.8 2.2 5 5 5 2.5 0 4.7-2.2 4.7-5l5.8-68.3-5.8-105.6c0-2.8-2.2-5-4.7-5zm25.5-24.1c-3.1 0-5.3 2.2-5.6 5.3l-4.4 130 4.4 67.8c.3 3.1 2.5 5.3 5.6 5.3 2.8 0 5.3-2.2 5.3-5.3l5.3-67.8-5.3-130c0-3.1-2.5-5.3-5.3-5.3zM7.2 283.2c-1.4 0-2.2 1.1-2.5 2.5L0 321.3l4.7 35c.3 1.4 1.1 2.5 2.5 2.5s2.2-1.1 2.5-2.5l5.6-35-5.6-35.6c-.3-1.4-1.1-2.5-2.5-2.5zm23.6-21.9c-1.4 0-2.5 1.1-2.5 2.5l-6.4 57.5 6.4 56.1c0 1.7 1.1 2.8 2.5 2.8s2.5-1.1 2.8-2.5l7.2-56.4-7.2-57.5c-.3-1.4-1.4-2.5-2.8-2.5zm25.3-11.4c-1.7 0-3.1 1.4-3.3 3.3L47 321.3l5.8 65.8c.3 1.7 1.7 3.1 3.3 3.1 1.7 0 3.1-1.4 3.1-3.1l6.9-65.8-6.9-68.1c0-1.9-1.4-3.3-3.1-3.3zm25.3-2.2c-1.9 0-3.6 1.4-3.6 3.6l-5.8 70 5.8 67.8c0 2.2 1.7 3.6 3.6 3.6s3.6-1.4 3.9-3.6l6.4-67.8-6.4-70c-.3-2.2-2-3.6-3.9-3.6zm241.4-110.9c-1.1-.8-2.8-1.4-4.2-1.4-2.2 0-4.2.8-5.6 1.9-1.9 1.7-3.1 4.2-3.3 6.7v.8l-3.3 176.7 1.7 32.5 1.7 31.7c.3 4.7 4.2 8.6 8.9 8.6s8.6-3.9 8.6-8.6l3.9-64.2-3.9-177.5c-.4-3-2-5.8-4.5-7.2zm-26.7 15.3c-1.4-.8-2.8-1.4-4.4-1.4s-3.1.6-4.4 1.4c-2.2 1.4-3.6 3.9-3.6 6.7l-.3 1.7-2.8 160.8s0 .3 3.1 65.6v.3c0 1.7.6 3.3 1.7 4.7 1.7 1.9 3.9 3.1 6.4 3.1 2.2 0 4.2-1.1 5.6-2.5 1.7-1.4 2.5-3.3 2.5-5.6l.3-6.7 3.1-58.6-3.3-162.8c-.3-2.8-1.7-5.3-3.9-6.7zm-111.4 22.5c-3.1 0-5.8 2.8-5.8 6.1l-4.4 140.6 4.4 67.2c.3 3.3 2.8 5.8 5.8 5.8 3.3 0 5.8-2.5 6.1-5.8l5-67.2-5-140.6c-.2-3.3-2.7-6.1-6.1-6.1zm376.7 62.8c-10.8 0-21.1 2.2-30.6 6.1-6.4-70.8-65.8-126.4-138.3-126.4-17.8 0-35 3.3-50.3 9.4-6.1 2.2-7.8 4.4-7.8 9.2v249.7c0 5 3.9 8.6 8.6 9.2h218.3c43.3 0 78.6-35 78.6-78.3.1-43.6-35.2-78.9-78.5-78.9zm-296.7-60.3c-4.2 0-7.5 3.3-7.8 7.8l-3.3 136.7 3.3 65.6c.3 4.2 3.6 7.5 7.8 7.5 4.2 0 7.5-3.3 7.5-7.5l3.9-65.6-3.9-136.7c-.3-4.5-3.3-7.8-7.5-7.8zm-53.6-7.8c-3.3 0-6.4 3.1-6.4 6.7l-3.9 145.3 3.9 66.9c.3 3.6 3.1 6.4 6.4 6.4 3.6 0 6.4-2.8 6.7-6.4l4.4-66.9-4.4-145.3c-.3-3.6-3.1-6.7-6.7-6.7zm26.7 3.4c-3.9 0-6.9 3.1-6.9 6.9L227 321.3l3.9 66.4c.3 3.9 3.1 6.9 6.9 6.9s6.9-3.1 6.9-6.9l4.2-66.4-4.2-141.7c0-3.9-3-6.9-6.9-6.9z"],speakap:[448,512,[],"f3f3","M352 32H96C43.2 32 0 75.2 0 128v256c0 52.8 43.2 96 96 96h256c52.8 0 96-43.2 96-96V128c0-52.8-43.2-96-96-96zM221 382.9c-39.6 0-81.9-17.8-81.9-53.7V302H179v17.8c0 15.1 19.5 24.5 41.9 24.5 24.2 0 41.3-10.4 41.3-29.5 0-23.8-27.2-31.9-54.7-42.6-31.9-12.4-63.1-26.2-63.1-69.1 0-48 38.6-66.4 79.9-66.4 37.6 0 75.5 14.1 75.5 41.9v31.2h-39.9v-16.1c0-12.1-17.8-18.5-35.6-18.5-19.5 0-35.6 8.1-35.6 26.2 0 22.1 22.5 29.2 47 38.9 35.9 12.4 71.1 27.2 71.1 71.5.1 48.6-40.8 71.1-85.8 71.1z"],spotify:[496,512,[],"f1bc","M248 8C111.1 8 0 119.1 0 256s111.1 248 248 248 248-111.1 248-248S384.9 8 248 8zm100.7 364.9c-4.2 0-6.8-1.3-10.7-3.6-62.4-37.6-135-39.2-206.7-24.5-3.9 1-9 2.6-11.9 2.6-9.7 0-15.8-7.7-15.8-15.8 0-10.3 6.1-15.2 13.6-16.8 81.9-18.1 165.6-16.5 237 26.2 6.1 3.9 9.7 7.4 9.7 16.5s-7.1 15.4-15.2 15.4zm26.9-65.6c-5.2 0-8.7-2.3-12.3-4.2-62.5-37-155.7-51.9-238.6-29.4-4.8 1.3-7.4 2.6-11.9 2.6-10.7 0-19.4-8.7-19.4-19.4s5.2-17.8 15.5-20.7c27.8-7.8 56.2-13.6 97.8-13.6 64.9 0 127.6 16.1 177 45.5 8.1 4.8 11.3 11 11.3 19.7-.1 10.8-8.5 19.5-19.4 19.5zm31-76.2c-5.2 0-8.4-1.3-12.9-3.9-71.2-42.5-198.5-52.7-280.9-29.7-3.6 1-8.1 2.6-12.9 2.6-13.2 0-23.3-10.3-23.3-23.6 0-13.6 8.4-21.3 17.4-23.9 35.2-10.3 74.6-15.2 117.5-15.2 73 0 149.5 15.2 205.4 47.8 7.8 4.5 12.9 10.7 12.9 22.6 0 13.6-11 23.3-23.2 23.3z"],"stack-exchange":[448,512,[],"f18d","M43.5 322.8h361.1V342c0 33-25.7 59.5-57.2 59.5h-16.6L254.9 480v-78.5H100.6c-31.5 0-57.2-26.5-57.2-59.5v-19.2zm0-20.7h361.1v-74.4H43.5v74.4zm0-95.7h361.1V132H43.5v74.4zM347.4 32H100.6c-31.5 0-57.2 26.5-57.2 59.2v19.5h361.1V91.2c0-32.7-25.6-59.2-57.1-59.2z"],"stack-overflow":[384,512,[],"f16c","M293.7 300l-181.2-84.5 16.7-36.5 181.3 84.7-16.8 36.3zm48-76L188.2 95.7l-25.5 30.8 153.5 128.3 25.5-30.8zm39.6-31.7L262 32l-32 24 119.3 160.3 32-24zM290.7 311L95 269.7 86.8 309l195.7 41 8.2-39zm31.6 129H42.7V320h-40v160h359.5V320h-40v120zm-39.8-80h-200v39.7h200V360z"],staylinked:[440,512,[],"f3f5","M201.6 127.4c4.1-3.2 10.3-3 13.8.5l170 167.3-2.7-2.7 44.3 41.3c3.7 3.5 3.3 9-.7 12.2l-198 163.9c-9.9 7.6-17.3.8-17.3.8L2.3 314.6c-3.5-3.5-3-9 1.2-12.2l45.8-34.9c4.2-3.2 10.4-3 13.9.5l151.9 147.5c3.7 3.5 10 3.7 14.2.4l93.2-74c4.1-3.2 4.5-8.7.9-12.2l-84-81.3c-3.6-3.5-9.9-3.7-14-.5l-.1.1c-4.1 3.2-10.4 3-14-.5l-68.1-64.3c-3.5-3.5-3.1-9 1.1-12.2l57.3-43.6m14.8 257.3c3.7 3.5 10.1 3.7 14.3.4l50.2-38.8-.3-.3 7.7-6c4.2-3.2 4.6-8.7.9-12.2l-57.1-54.4c-3.6-3.5-10-3.7-14.2-.5l-.1.1c-4.2 3.2-10.5 3.1-14.2-.4L109 180.8c-3.6-3.5-3.1-8.9 1.1-12.2l92.2-71.5c4.1-3.2 10.3-3 13.9.5l160.4 159c3.7 3.5 10 3.7 14.1.5l45.8-35.8c4.1-3.2 4.4-8.7.7-12.2L226.7 2.5c-1.5-1.2-8-5.5-16.3 1.1L3.6 165.7c-4.2 3.2-4.8 8.7-1.2 12.2l42.3 41.7"],steam:[496,512,[],"f1b6","M496 256c0 137-111.2 248-248.4 248-113.8 0-209.6-76.3-239-180.4l95.2 39.3c6.4 32.1 34.9 56.4 68.9 56.4 39.2 0 71.9-32.4 70.2-73.5l84.5-60.2c52.1 1.3 95.8-40.9 95.8-93.5 0-51.6-42-93.5-93.7-93.5s-93.7 42-93.7 93.5v1.2L176.6 279c-15.5-.9-30.7 3.4-43.5 12.1L0 236.1C10.2 108.4 117.1 8 247.6 8 384.8 8 496 119 496 256zM155.7 384.3l-30.5-12.6a52.79 52.79 0 0 0 27.2 25.8c26.9 11.2 57.8-1.6 69-28.4 5.4-13 5.5-27.3.1-40.3-5.4-13-15.5-23.2-28.5-28.6-12.9-5.4-26.7-5.2-38.9-.6l31.5 13c19.8 8.2 29.2 30.9 20.9 50.7-8.3 19.9-31 29.2-50.8 21zm173.8-129.9c-34.4 0-62.4-28-62.4-62.3s28-62.3 62.4-62.3 62.4 28 62.4 62.3-27.9 62.3-62.4 62.3zm.1-15.6c25.9 0 46.9-21 46.9-46.8 0-25.9-21-46.8-46.9-46.8s-46.9 21-46.9 46.8c.1 25.8 21.1 46.8 46.9 46.8z"],"steam-square":[448,512,[],"f1b7","M185.2 356.5c7.7-18.5-1-39.7-19.6-47.4l-29.5-12.2c11.4-4.3 24.3-4.5 36.4.5 12.2 5.1 21.6 14.6 26.7 26.7 5 12.2 5 25.6-.1 37.7-10.5 25.1-39.4 37-64.6 26.5-11.6-4.8-20.4-13.6-25.4-24.2l28.5 11.8c18.6 7.8 39.9-.9 47.6-19.4zM400 32H48C21.5 32 0 53.5 0 80v160.7l116.6 48.1c12-8.2 26.2-12.1 40.7-11.3l55.4-80.2v-1.1c0-48.2 39.3-87.5 87.6-87.5s87.6 39.3 87.6 87.5c0 49.2-40.9 88.7-89.6 87.5l-79 56.3c1.6 38.5-29.1 68.8-65.7 68.8-31.8 0-58.5-22.7-64.5-52.7L0 319.2V432c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-99.7 222.5c-32.2 0-58.4-26.1-58.4-58.3s26.2-58.3 58.4-58.3 58.4 26.2 58.4 58.3-26.2 58.3-58.4 58.3zm.1-14.6c24.2 0 43.9-19.6 43.9-43.8 0-24.2-19.6-43.8-43.9-43.8-24.2 0-43.9 19.6-43.9 43.8 0 24.2 19.7 43.8 43.9 43.8z"],"steam-symbol":[448,512,[],"f3f6","M395.5 177.5c0 33.8-27.5 61-61 61-33.8 0-61-27.3-61-61s27.3-61 61-61c33.5 0 61 27.2 61 61zm52.5.2c0 63-51 113.8-113.7 113.8L225 371.3c-4 43-40.5 76.8-84.5 76.8-40.5 0-74.7-28.8-83-67L0 358V250.7L97.2 290c15.1-9.2 32.2-13.3 52-11.5l71-101.7c.5-62.3 51.5-112.8 114-112.8C397 64 448 115 448 177.7zM203 363c0-34.7-27.8-62.5-62.5-62.5-4.5 0-9 .5-13.5 1.5l26 10.5c25.5 10.2 38 39 27.7 64.5-10.2 25.5-39.2 38-64.7 27.5-10.2-4-20.5-8.3-30.7-12.2 10.5 19.7 31.2 33.2 55.2 33.2 34.7 0 62.5-27.8 62.5-62.5zm207.5-185.3c0-42-34.3-76.2-76.2-76.2-42.3 0-76.5 34.2-76.5 76.2 0 42.2 34.3 76.2 76.5 76.2 41.9.1 76.2-33.9 76.2-76.2z"],"sticker-mule":[576,512,[],"f3f7","M353.1 509.8c-5.9 2.9-32.1 3.2-36.5-.5-4.1-3-2.2-11.9-1.5-15 2.2-15-2.5-7.9-9.8-11.5-3.1-1.5-4.1-5.5-4.6-10-.5-1.5-1-2.5-1.5-3.5-1.7-10.7 6.8-33.6 8.2-43.4 4.9-23.7-.7-37.2 1.5-46.9 3.7-16.2 4.1-3.5 4.1-29.9-1.4-25.9 3.3-36.9.5-38.9-14.8 0-64.3 10.7-112.2 2-46.1-8.9-59.4-29-65.4-30.9-10.3-4.5-23.2.5-27.3 7-.1.1-35 70.6-39.6 87.8-6.2 20.5-.5 47.4 4.1 66.8 0 .1 4.5 14.6 10.3 19.5 2.1 1.5 5.1 2.5 7.2 4.5 2.8 2.7 9.4 15.2 9.8 16 2.6 4.5 3.6 8-1.5 10.5-3.6 2-9.3 2.5-14.4 2.5-2.6.5-1.5 3.5-3.1 5-2.9 2.8-20.7 6.1-29.9 2.5-2.6-1-5.7-3-6.2-5-1.5-4 2.1-9-1-12.5-4.5-2.9-13.1-2-17-12-2.2-5.4-2.6-7.6-2.6-49.4 0-9.7-5.9-38.7-8.2-46.9-1.5-5.5-1.5-11.5 0-16 .3-.9 4.1-4.6 4.1-13-1-1.5-4.6-.5-5.1-1.5-10.4-80.6-5.9-79-7.7-98.3-1.5-16-10.9-43.9-6.7-64.3.5-2.4 3.4-21 24.2-38.9 31-26.7 48.4-38.3 159-11.5 1.1.4 66.3 21.1 110.7-9 15.5-11.3 28.8-11.3 35.5-16 .1-.1 61.7-52.1 87-65.3 47.2-29.4 69.9-16.7 75.1-18 4.7-1 13.4-25.8 17-25.8 5.5 0 1.6 20.2 3.6 25.9.5 2 3.6 5 6.2 5 2.3 0 1.7-.8 10.3-5 8.4-5.4 14.9-17.6 20.6-17 11.7 1.6-19 41.6-19 46.9 0 2 .2.8 4.6 9.5 2.6 5.5 4.6 13.5 6.2 20 8.3 29.7 5.7 14.6 13.4 36.9 20.2 50.1 20.6 45.2 20.6 52.9 0 7.5-4.1 11-7.2 16.5-1.5 3-4.6 7.5-7.2 8-2.7.7 7-1.5-13.4 2.5-7.2 1-13.4-4.5-14.9-9.5-1.6-4.7 2.8-10.1-11.8-22.9-10.3-10-21.1-11.3-31.9-17-9.8-5.7-11.9 1-18 8-18 22.9-34 46.9-52 69.8-11.8 15-24.2 30.4-33.5 47.4-3.9 6.8-9.5 28.1-10.3 29.9-6.2 17.7-5.5 25.8-16.5 68.3-3.1 10-5.7 21.4-8.7 32.4-2.2 6.8-7.4 49.3-.5 59.4 2.1 3.5 8.7 4.5 11.3 8 .1.1 9.6 18.2 9.3 20 0 6.1-9.4 5.6-11.3 6.5-4.8 2.9-3.8 5.9-6.4 7.4"],strava:[369,512,[],"f428","M301.6 292l-43.9 88.2-44.6-88.2h-67.6l112.2 220 111.5-220h-67.6zM151.4 0L0 292h89.2l62.2-116.1L213.1 292h88.5L151.4 0z"],stripe:[640,512,[],"f429","M640 233.6c0-45.5-22-81.4-64.2-81.4s-67.9 35.9-67.9 81.1c0 53.5 30.3 78.2 73.5 78.2 21.2 0 37.1-4.8 49.2-11.5v-33.4c-12.1 6.1-26 9.8-43.6 9.8-17.3 0-32.5-6.1-34.5-26.9h86.9c.2-2.3.6-11.6.6-15.9m-87.9-16.8c0-20 12.3-28.4 23.4-28.4 10.9 0 22.5 8.4 22.5 28.4h-45.9zm-112.9-64.6c-17.4 0-28.6 8.2-34.8 13.9l-2.3-11H363v204.8l44.4-9.4.1-50.2c6.4 4.7 15.9 11.2 31.4 11.2 31.8 0 60.8-23.2 60.8-79.6.1-51.6-29.3-79.7-60.5-79.7m-10.6 122.5c-10.4 0-16.6-3.8-20.9-8.4l-.3-66c4.6-5.1 11-8.8 21.2-8.8 16.2 0 27.4 18.2 27.4 41.4.1 23.9-10.9 41.8-27.4 41.8M346.4 96v36.2l-44.6 9.5v-36.2l44.6-9.5m-44.5 59.2h44.6v153.2h-44.6V155.2zm-47.8 13.1c10.4-19.1 31.1-15.2 37.1-13.1V196c-5.7-1.8-23.4-4.5-33.9 9.3v103.1H213V155.2h38.4l2.7 13.1m-89-13.1h33.7V193h-33.7v63.2c0 26.2 28 18 33.7 15.7v33.8c-5.9 3.2-16.6 5.9-31.2 5.9-26.3 0-46.1-17-46.1-43.3l.2-142.4 43.3-9.2.1 38.5zM44.9 200.3c0 20 67.9 10.5 67.9 63.4 0 32-25.4 47.8-62.3 47.8-15.3 0-32-3-48.5-10.1v-40c14.9 8.1 33.9 14.2 48.6 14.2 9.9 0 17-2.7 17-10.9 0-21.2-67.5-13.2-67.5-62.4 0-31.4 24-50.2 60-50.2 14.7 0 29.4 2.3 44.1 8.1V202c-13.5-7.3-30.7-11.4-44.2-11.4-9.3.1-15.1 2.8-15.1 9.7"],"stripe-s":[362,512,[],"f42a","M144.3 154.6c0-22.3 18.6-30.9 48.4-30.9 43.4 0 98.5 13.3 141.9 36.7V26.1C287.3 7.2 240.1 0 192.8 0 77.1 0 0 60.4 0 161.4c0 157.9 216.8 132.3 216.8 200.4 0 26.4-22.9 34.9-54.7 34.9-47.2 0-108.2-19.5-156.1-45.5v128.5c53 22.8 106.8 32.4 156 32.4 118.6 0 200.3-51 200.3-153.6 0-170.2-218-139.7-218-203.9"],studiovinari:[512,512,[],"f3f8","M480.3 187.7l4.2 28v28l-25.1 44.1-39.8 78.4-56.1 67.5-79.1 37.8-17.7 24.5-7.7 12-9.6 4s17.3-63.6 19.4-63.6c2.1 0 20.3.7 20.3.7l66.7-38.6-92.5 26.1-55.9 36.8-22.8 28-6.6 1.4 20.8-73.6 6.9-5.5 20.7 12.9 88.3-45.2 56.8-51.5 14.8-68.4-125.4 23.3 15.2-18.2-173.4-53.3 81.9-10.5-166-122.9L133.5 108 32.2 0l252.9 126.6-31.5-38L378 163 234.7 64l18.7 38.4-49.6-18.1L158.3 0l194.6 122L310 66.2l108 96.4 12-8.9-21-16.4 4.2-37.8L451 89.1l29.2 24.7 11.5 4.2-7 6.2 8.5 12-13.1 7.4-10.3 20.2 10.5 23.9z"],stumbleupon:[512,512,[],"f1a4","M502.9 266v69.7c0 62.1-50.3 112.4-112.4 112.4-61.8 0-112.4-49.8-112.4-111.3v-70.2l34.3 16 51.1-15.2V338c0 14.7 12 26.5 26.7 26.5S417 352.7 417 338v-72h85.9zm-224.7-58.2l34.3 16 51.1-15.2V173c0-60.5-51.1-109-112.1-109-60.8 0-112.1 48.2-112.1 108.2v162.4c0 14.9-12 26.7-26.7 26.7S86 349.5 86 334.6V266H0v69.7C0 397.7 50.3 448 112.4 448c61.6 0 112.4-49.5 112.4-110.8V176.9c0-14.7 12-26.7 26.7-26.7s26.7 12 26.7 26.7v30.9z"],"stumbleupon-circle":[496,512,[],"f1a3","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 177.5c-9.8 0-17.8 8-17.8 17.8v106.9c0 40.9-33.9 73.9-74.9 73.9-41.4 0-74.9-33.5-74.9-74.9v-46.5h57.3v45.8c0 10 8 17.8 17.8 17.8s17.8-7.9 17.8-17.8V200.1c0-40 34.2-72.1 74.7-72.1 40.7 0 74.7 32.3 74.7 72.6v23.7l-34.1 10.1-22.9-10.7v-20.6c.1-9.6-7.9-17.6-17.7-17.6zm167.6 123.6c0 41.4-33.5 74.9-74.9 74.9-41.2 0-74.9-33.2-74.9-74.2V263l22.9 10.7 34.1-10.1v47.1c0 9.8 8 17.6 17.8 17.6s17.8-7.9 17.8-17.6v-48h57.3c-.1 45.9-.1 46.4-.1 46.4z"],superpowers:[448,512,[],"f2dd","M448 32c-83.3 11-166.8 22-250 33-92 12.5-163.3 86.7-169 180-3.3 55.5 18 109.5 57.8 148.2L0 480c83.3-11 166.5-22 249.8-33 91.8-12.5 163.3-86.8 168.7-179.8 3.5-55.5-18-109.5-57.7-148.2L448 32zm-79.7 232.3c-4.2 79.5-74 139.2-152.8 134.5-79.5-4.7-140.7-71-136.3-151 4.5-79.2 74.3-139.3 153-134.5 79.3 4.7 140.5 71 136.1 151z"],supple:[640,512,[],"f3f9","M640 262.5c0 64.1-109 116.1-243.5 116.1-24.8 0-48.6-1.8-71.1-5 7.7.4 15.5.6 23.4.6 134.5 0 243.5-56.9 243.5-127.1 0-29.4-19.1-56.4-51.2-78 60 21.1 98.9 55.1 98.9 93.4zM47.7 227.9c-.1-70.2 108.8-127.3 243.3-127.6 7.9 0 15.6.2 23.3.5-22.5-3.2-46.3-4.9-71-4.9C108.8 96.3-.1 148.5 0 212.6c.1 38.3 39.1 72.3 99.3 93.3-32.3-21.5-51.5-48.6-51.6-78zm60.2 39.9s10.5 13.2 29.3 13.2c17.9 0 28.4-11.5 28.4-25.1 0-28-40.2-25.1-40.2-39.7 0-5.4 5.3-9.1 12.5-9.1 5.7 0 11.3 2.6 11.3 6.6v3.9h14.2v-7.9c0-12.1-15.4-16.8-25.4-16.8-16.5 0-28.5 10.2-28.5 24.1 0 26.6 40.2 25.4 40.2 39.9 0 6.6-5.8 10.1-12.3 10.1-11.9 0-20.7-10.1-20.7-10.1l-8.8 10.9zm120.8-73.6v54.4c0 11.3-7.1 17.8-17.8 17.8-10.7 0-17.8-6.5-17.8-17.7v-54.5h-15.8v55c0 18.9 13.4 31.9 33.7 31.9 20.1 0 33.4-13 33.4-31.9v-55h-15.7zm34.4 85.4h15.8v-29.5h15.5c16 0 27.2-11.5 27.2-28.1s-11.2-27.8-27.2-27.8h-39.1v13.4h7.8v72zm15.8-43v-29.1h12.9c8.7 0 13.7 5.7 13.7 14.4 0 8.9-5.1 14.7-14 14.7h-12.6zm57 43h15.8v-29.5h15.5c16 0 27.2-11.5 27.2-28.1s-11.2-27.8-27.2-27.8h-39.1v13.4h7.8v72zm15.7-43v-29.1h12.9c8.7 0 13.7 5.7 13.7 14.4 0 8.9-5 14.7-14 14.7h-12.6zm57.1 34.8c0 5.8 2.4 8.2 8.2 8.2h37.6c5.8 0 8.2-2.4 8.2-8.2v-13h-14.3v5.2c0 1.7-1 2.6-2.6 2.6h-18.6c-1.7 0-2.6-1-2.6-2.6v-61.2c0-5.7-2.4-8.2-8.2-8.2H401v13.4h5.2c1.7 0 2.6 1 2.6 2.6v61.2zm63.4 0c0 5.8 2.4 8.2 8.2 8.2H519c5.7 0 8.2-2.4 8.2-8.2v-13h-14.3v5.2c0 1.7-1 2.6-2.6 2.6h-19.7c-1.7 0-2.6-1-2.6-2.6v-20.3h27.7v-13.4H488v-22.4h19.2c1.7 0 2.6 1 2.6 2.6v5.2H524v-13c0-5.7-2.5-8.2-8.2-8.2h-51.6v13.4h7.8v63.9zm58.9-76v5.9h1.6v-5.9h2.7v-1.2h-7v1.2h2.7zm5.7-1.2v7.1h1.5v-5.7l2.3 5.7h1.3l2.3-5.7v5.7h1.5v-7.1h-2.3l-2.1 5.1-2.1-5.1h-2.4z"],telegram:[496,512,[],"f2c6","M248 8C111 8 0 119 0 256s111 248 248 248 248-111 248-248S385 8 248 8zm121.8 169.9l-40.7 191.8c-3 13.6-11.1 16.9-22.4 10.5l-62-45.7-29.9 28.8c-3.3 3.3-6.1 6.1-12.5 6.1l4.4-63.1 114.9-103.8c5-4.4-1.1-6.9-7.7-2.5l-142 89.4-61.2-19.1c-13.3-4.2-13.6-13.3 2.8-19.7l239.1-92.2c11.1-4 20.8 2.7 17.2 19.5z"],"telegram-plane":[448,512,[],"f3fe","M446.7 98.6l-67.6 318.8c-5.1 22.5-18.4 28.1-37.3 17.5l-103-75.9-49.7 47.8c-5.5 5.5-10.1 10.1-20.7 10.1l7.4-104.9 190.9-172.5c8.3-7.4-1.8-11.5-12.9-4.1L117.8 284 16.2 252.2c-22.1-6.9-22.5-22.1 4.6-32.7L418.2 66.4c18.4-6.9 34.5 4.1 28.5 32.2z"],"tencent-weibo":[384,512,[],"f1d5","M72.3 495.8c1.4 19.9-27.6 22.2-29.7 2.9C31 368.8 73.7 259.2 144 185.5c-15.6-34 9.2-77.1 50.6-77.1 30.3 0 55.1 24.6 55.1 55.1 0 44-49.5 70.8-86.9 45.1-65.7 71.3-101.4 169.8-90.5 287.2zM192 .1C66.1.1-12.3 134.3 43.7 242.4 52.4 259.8 79 246.9 70 229 23.7 136.4 91 29.8 192 29.8c75.4 0 136.9 61.4 136.9 136.9 0 90.8-86.9 153.9-167.7 133.1-19.1-4.1-25.6 24.4-6.6 29.1 110.7 23.2 204-60 204-162.3C358.6 74.7 284 .1 192 .1z"],themeisle:[512,512,[],"f2b2","M208 88.286c0-10 6.286-21.714 17.715-21.714 11.142 0 17.714 11.714 17.714 21.714 0 10.285-6.572 21.714-17.714 21.714C214.286 110 208 98.571 208 88.286zm304 160c0 36.001-11.429 102.286-36.286 129.714-22.858 24.858-87.428 61.143-120.857 70.572l-1.143.286v32.571c0 16.286-12.572 30.571-29.143 30.571-10 0-19.429-5.714-24.572-14.286-5.427 8.572-14.856 14.286-24.856 14.286-10 0-19.429-5.714-24.858-14.286-5.142 8.572-14.571 14.286-24.57 14.286-10.286 0-19.429-5.714-24.858-14.286-5.143 8.572-14.571 14.286-24.571 14.286-18.857 0-29.429-15.714-29.429-32.857-16.286 12.285-35.715 19.428-56.571 19.428-22 0-43.429-8.285-60.286-22.857 10.285-.286 20.571-2.286 30.285-5.714-20.857-5.714-39.428-18.857-52-36.286 21.37 4.645 46.209 1.673 67.143-11.143-22-22-56.571-58.857-68.572-87.428C1.143 321.714 0 303.714 0 289.429c0-49.714 20.286-160 86.286-160 10.571 0 18.857 4.858 23.143 14.857a158.792 158.792 0 0 1 12-15.428c2-2.572 5.714-5.429 7.143-8.286 7.999-12.571 11.714-21.142 21.714-34C182.571 45.428 232 17.143 285.143 17.143c6 0 12 .285 17.714 1.143C313.714 6.571 328.857 0 344.572 0c14.571 0 29.714 6 40 16.286.857.858 1.428 2.286 1.428 3.428 0 3.714-10.285 13.429-12.857 16.286 4.286 1.429 15.714 6.858 15.714 12 0 2.857-2.857 5.143-4.571 7.143 31.429 27.714 49.429 67.143 56.286 108 4.286-5.143 10.285-8.572 17.143-8.572 10.571 0 20.857 7.144 28.571 14.001C507.143 187.143 512 221.714 512 248.286zM188 89.428c0 18.286 12.571 37.143 32.286 37.143 19.714 0 32.285-18.857 32.285-37.143 0-18-12.571-36.857-32.285-36.857-19.715 0-32.286 18.858-32.286 36.857zM237.714 194c0-19.714 3.714-39.143 8.571-58.286-52.039 79.534-13.531 184.571 68.858 184.571 21.428 0 42.571-7.714 60-20 2-7.429 3.714-14.857 3.714-22.572 0-14.286-6.286-21.428-20.572-21.428-4.571 0-9.143.857-13.429 1.714-63.343 12.668-107.142 3.669-107.142-63.999zm-41.142 254.858c0-11.143-8.858-20.857-20.286-20.857-11.429 0-20 9.715-20 20.857v32.571c0 11.143 8.571 21.142 20 21.142 11.428 0 20.286-9.715 20.286-21.142v-32.571zm49.143 0c0-11.143-8.572-20.857-20-20.857-11.429 0-20.286 9.715-20.286 20.857v32.571c0 11.143 8.857 21.142 20.286 21.142 11.428 0 20-10 20-21.142v-32.571zm49.713 0c0-11.143-8.857-20.857-20.285-20.857-11.429 0-20.286 9.715-20.286 20.857v32.571c0 11.143 8.857 21.142 20.286 21.142 11.428 0 20.285-9.715 20.285-21.142v-32.571zm49.715 0c0-11.143-8.857-20.857-20.286-20.857-11.428 0-20.286 9.715-20.286 20.857v32.571c0 11.143 8.858 21.142 20.286 21.142 11.429 0 20.286-10 20.286-21.142v-32.571zM421.714 286c-30.857 59.142-90.285 102.572-158.571 102.572-96.571 0-160.571-84.572-160.571-176.572 0-16.857 2-33.429 6-49.714-20 33.715-29.714 72.572-29.714 111.429 0 60.286 24.857 121.715 71.429 160.857 5.143-9.714 14.857-16.286 26-16.286 10 0 19.428 5.714 24.571 14.286 5.429-8.571 14.571-14.286 24.858-14.286 10 0 19.428 5.714 24.571 14.286 5.429-8.571 14.857-14.286 24.858-14.286 10 0 19.428 5.714 24.857 14.286 5.143-8.571 14.571-14.286 24.572-14.286 10.857 0 20.857 6.572 25.714 16 43.427-36.286 68.569-92 71.426-148.286zm10.572-99.714c0-53.714-34.571-105.714-92.572-105.714-30.285 0-58.571 15.143-78.857 36.857C240.862 183.812 233.41 254 302.286 254c28.805 0 97.357-28.538 84.286 36.857 28.857-26 45.714-65.714 45.714-104.571z"],trello:[448,512,[],"f181","M392 32H56C25.1 32 0 57.1 0 88v336c0 30.9 25.1 56 56 56h336c30.9 0 56-25.1 56-56V88c0-30.9-25.1-56-56-56zM194.9 371.4c0 14.8-12 26.9-26.9 26.9H85.1c-14.8 0-26.9-12-26.9-26.9V117.1c0-14.8 12-26.9 26.9-26.9H168c14.8 0 26.9 12 26.9 26.9v254.3zm194.9-112c0 14.8-12 26.9-26.9 26.9H280c-14.8 0-26.9-12-26.9-26.9V117.1c0-14.8 12-26.9 26.9-26.9h82.9c14.8 0 26.9 12 26.9 26.9v142.3z"],tripadvisor:[576,512,[],"f262","M166.4 280.521c0 13.236-10.73 23.966-23.966 23.966s-23.966-10.73-23.966-23.966 10.73-23.966 23.966-23.966 23.966 10.729 23.966 23.966zm264.962-23.956c-13.23 0-23.956 10.725-23.956 23.956 0 13.23 10.725 23.956 23.956 23.956 13.23 0 23.956-10.725 23.956-23.956-.001-13.231-10.726-23.956-23.956-23.956zm89.388 139.49c-62.667 49.104-153.276 38.109-202.379-24.559l-30.979 46.325-30.683-45.939c-48.277 60.39-135.622 71.891-197.885 26.055-64.058-47.158-77.759-137.316-30.601-201.374A186.762 186.762 0 0 0 0 139.416l90.286-.05a358.48 358.48 0 0 1 197.065-54.03 350.382 350.382 0 0 1 192.181 53.349l96.218.074a185.713 185.713 0 0 0-28.352 57.649c46.793 62.747 34.964 151.37-26.648 199.647zM259.366 281.761c-.007-63.557-51.535-115.075-115.092-115.068C80.717 166.7 29.2 218.228 29.206 281.785c.007 63.557 51.535 115.075 115.092 115.068 63.513-.075 114.984-51.539 115.068-115.052v-.04zm28.591-10.455c5.433-73.44 65.51-130.884 139.12-133.022a339.146 339.146 0 0 0-139.727-27.812 356.31 356.31 0 0 0-140.164 27.253c74.344 1.582 135.299 59.424 140.771 133.581zm251.706-28.767c-21.992-59.634-88.162-90.148-147.795-68.157-59.634 21.992-90.148 88.162-68.157 147.795v.032c22.038 59.607 88.198 90.091 147.827 68.113 59.615-22.004 90.113-88.162 68.125-147.783zm-326.039 37.975v.115c-.057 39.328-31.986 71.163-71.314 71.106-39.328-.057-71.163-31.986-71.106-71.314.057-39.328 31.986-71.163 71.314-71.106 39.259.116 71.042 31.94 71.106 71.199zm-24.512 0v-.084c-.051-25.784-20.994-46.645-46.778-46.594-25.784.051-46.645 20.994-46.594 46.777.051 25.784 20.994 46.645 46.777 46.594 25.726-.113 46.537-20.968 46.595-46.693zm313.423 0v.048c-.02 39.328-31.918 71.194-71.247 71.173s-71.194-31.918-71.173-71.247c.02-39.328 31.918-71.194 71.247-71.173 39.29.066 71.121 31.909 71.173 71.199zm-24.504-.008c-.009-25.784-20.918-46.679-46.702-46.67-25.784.009-46.679 20.918-46.67 46.702.009 25.784 20.918 46.678 46.702 46.67 25.765-.046 46.636-20.928 46.67-46.693v-.009z"],tumblr:[320,512,[],"f173","M309.8 480.3c-13.6 14.5-50 31.7-97.4 31.7-120.8 0-147-88.8-147-140.6v-144H17.9c-5.5 0-10-4.5-10-10v-68c0-7.2 4.5-13.6 11.3-16 62-21.8 81.5-76 84.3-117.1.8-11 6.5-16.3 16.1-16.3h70.9c5.5 0 10 4.5 10 10v115.2h83c5.5 0 10 4.4 10 9.9v81.7c0 5.5-4.5 10-10 10h-83.4V360c0 34.2 23.7 53.6 68 35.8 4.8-1.9 9-3.2 12.7-2.2 3.5.9 5.8 3.4 7.4 7.9l22 64.3c1.8 5 3.3 10.6-.4 14.5z"],"tumblr-square":[448,512,[],"f174","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-82.3 364.2c-8.5 9.1-31.2 19.8-60.9 19.8-75.5 0-91.9-55.5-91.9-87.9v-90h-29.7c-3.4 0-6.2-2.8-6.2-6.2v-42.5c0-4.5 2.8-8.5 7.1-10 38.8-13.7 50.9-47.5 52.7-73.2.5-6.9 4.1-10.2 10-10.2h44.3c3.4 0 6.2 2.8 6.2 6.2v72h51.9c3.4 0 6.2 2.8 6.2 6.2v51.1c0 3.4-2.8 6.2-6.2 6.2h-52.1V321c0 21.4 14.8 33.5 42.5 22.4 3-1.2 5.6-2 8-1.4 2.2.5 3.6 2.1 4.6 4.9l13.8 40.2c1 3.2 2 6.7-.3 9.1z"],twitch:[448,512,[],"f1e8","M40.1 32L10 108.9v314.3h107V480h60.2l56.8-56.8h87l117-117V32H40.1zm357.8 254.1L331 353H224l-56.8 56.8V353H76.9V72.1h321v214zM331 149v116.9h-40.1V149H331zm-107 0v116.9h-40.1V149H224z"],twitter:[512,512,[],"f099","M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"],"twitter-square":[448,512,[],"f081","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-48.9 158.8c.2 2.8.2 5.7.2 8.5 0 86.7-66 186.6-186.6 186.6-37.2 0-71.7-10.8-100.7-29.4 5.3.6 10.4.8 15.8.8 30.7 0 58.9-10.4 81.4-28-28.8-.6-53-19.5-61.3-45.5 10.1 1.5 19.2 1.5 29.6-1.2-30-6.1-52.5-32.5-52.5-64.4v-.8c8.7 4.9 18.9 7.9 29.6 8.3a65.447 65.447 0 0 1-29.2-54.6c0-12.2 3.2-23.4 8.9-33.1 32.3 39.8 80.8 65.8 135.2 68.6-9.3-44.5 24-80.6 64-80.6 18.9 0 35.9 7.9 47.9 20.7 14.8-2.8 29-8.3 41.6-15.8-4.9 15.2-15.2 28-28.8 36.1 13.2-1.4 26-5.1 37.8-10.2-8.9 13.1-20.1 24.7-32.9 34z"],typo3:[433,512,[],"f42b","M330.8 341c-7 2.3-11.6 2.3-18.5 2.3-57.2 0-140.6-198.5-140.6-264.9 0-24.7 5.4-32.4 13.9-39.4-69.5 8.5-149.3 34-176.3 66.4-5.4 7.7-9.3 20.8-9.3 37.1C0 246 106.8 480 184.1 480c36.3 0 97.3-59.5 146.7-139M294.5 32c71.8 0 138.8 11.6 138.8 52.5 0 82.6-52.5 182.3-78.8 182.3-47.9 0-101.7-132.1-101.7-198.5 0-30.9 11.6-36.3 41.7-36.3"],uber:[448,512,[],"f402","M414.1 32H33.9C15.2 32 0 47.2 0 65.9V446c0 18.8 15.2 34 33.9 34H414c18.7 0 33.9-15.2 33.9-33.9V65.9C448 47.2 432.8 32 414.1 32zM237.6 391.1C163 398.6 96.4 344.2 88.9 269.6h94.4V290c0 3.7 3 6.8 6.8 6.8H258c3.7 0 6.8-3 6.8-6.8v-67.9c0-3.7-3-6.8-6.8-6.8h-67.9c-3.7 0-6.8 3-6.8 6.8v20.4H88.9c7-69.4 65.4-122.2 135.1-122.2 69.7 0 128.1 52.8 135.1 122.2 7.5 74.5-46.9 141.1-121.5 148.6z"],uikit:[448,512,[],"f403","M443.9 128v256L218 512 0 384V169.7l87.6 45.1v117l133.5 75.5 135.8-75.5v-151l-101.1-57.6 87.6-53.1L443.9 128zM308.6 49.1L223.8 0l-88.6 54.8 86 47.3 87.4-53z"],uniregistry:[384,512,[],"f404","M281.1 220.1H384v-14.8H281.1v14.8zm0-37.1H384v-12.4H281.1V183zm0 74.2H384v-17.3H281.1v17.3zm-157.7 86.7H8.5c2.6 8.5 5.8 16.8 9.6 24.8h138.3c-12.9-5.7-24.1-14.2-33-24.8m145.7-12.4h109.7c1.8-7.3 3.1-14.7 3.9-22.3H278.3c-2.1 7.9-5.2 15.4-9.2 22.3m-41.5 37.1H367c3.7-8 5.8-16.2 8.5-24.8h-115c-8.8 10.7-20.1 19.2-32.9 24.8M384 32H281.1v2.5H384V32zM192 480c39.5 0 76.2-11.8 106.8-32.2H85.3C115.8 468.2 152.5 480 192 480m89.1-334.2H384V136H281.1v9.8zm0-37.1H384v-7.4H281.1v7.4zm0-37.1H384v-4.9H281.1v4.9zm-178.2 99H0V183h102.9v-12.4zM38.8 405.7h305.3c6.7-8.5 12.6-17.6 17.8-27.2H23c5.2 9.6 9.2 18.7 15.8 27.2m64.1-118.8v-12.4H0v12.4c0 2.5 0 5 .1 7.4h103.1c-.2-2.4-.3-4.9-.3-7.4m178.2 0c0 2.5-.1 5-.4 7.4h103.1c.1-2.5.2-4.9.2-7.4v-12.4H281.1v12.4zm-203 156h227.7c11.8-8.7 22.7-18.6 32.2-29.7H44.9c9.6 11 21.4 21 33.2 29.7m24.8-376.2H0v4.9h102.9v-4.9zm0-34.7H0v2.5h102.9V32zm0 173.3H0v14.8h102.9v-14.8zm0 34.6H0v17.3h102.9v-17.3zm0-103.9H0v9.9h102.9V136zm0-34.7H0v7.4h102.9v-7.4zm2.8 207.9H1.3c.9 7.6 2.2 15 3.9 22.3h109.7c-4-6.9-7.2-14.4-9.2-22.3"],untappd:[640,512,[],"f405","M401.3 49.9c-79.8 160.1-84.6 152.5-87.9 173.2l-5.2 32.8c-1.9 12-6.6 23.5-13.7 33.4L145.6 497.1c-7.6 10.6-20.4 16.2-33.4 14.6-40.3-5-77.8-32.2-95.3-68.5-5.7-11.8-4.5-25.8 3.1-36.4l148.9-207.9c7.1-9.9 16.4-18 27.2-23.7l29.3-15.5c18.5-9.8 9.7-11.9 135.6-138.9 1-4.8 1-7.3 3.6-8 3-.7 6.6-1 6.3-4.6l-.4-4.6c-.2-1.9 1.3-3.6 3.2-3.6 4.5-.1 13.2 1.2 25.6 10 12.3 8.9 16.4 16.8 17.7 21.1.6 1.8-.6 3.7-2.4 4.2l-4.5 1.1c-3.4.9-2.5 4.4-2.3 7.4.1 2.8-2.3 3.6-6.5 6.1zM230.1 36.4c3.4.9 2.5 4.4 2.3 7.4-.2 2.7 2.1 3.5 6.4 6 7.9 15.9 15.3 30.5 22.2 44 .7 1.3 2.3 1.5 3.3.5 11.2-12 24.6-26.2 40.5-42.6 1.3-1.4 1.4-3.5.1-4.9-8-8.2-16.5-16.9-25.6-26.1-1-4.7-1-7.3-3.6-8-3-.8-6.6-1-6.3-4.6.3-3.3 1.4-8.1-2.8-8.2-4.5-.1-13.2 1.1-25.6 10-12.3 8.9-16.4 16.8-17.7 21.1-1.4 4.2 3.6 4.6 6.8 5.4zM620 406.7L471.2 198.8c-13.2-18.5-26.6-23.4-56.4-39.1-11.2-5.9-14.2-10.9-30.5-28.9-1-1.1-2.9-.9-3.6.5-46.3 88.8-47.1 82.8-49 94.8-1.7 10.7-1.3 20 .3 29.8 1.9 12 6.6 23.5 13.7 33.4l148.9 207.9c7.6 10.6 20.2 16.2 33.1 14.7 40.3-4.9 78-32 95.7-68.6 5.4-11.9 4.3-25.9-3.4-36.6z"],usb:[640,512,[],"f287","M641.5 256c0 3.1-1.7 6.1-4.5 7.5L547.9 317c-1.4.8-2.8 1.4-4.5 1.4-1.4 0-3.1-.3-4.5-1.1-2.8-1.7-4.5-4.5-4.5-7.8v-35.6H295.7c25.3 39.6 40.5 106.9 69.6 106.9H392V354c0-5 3.9-8.9 8.9-8.9H490c5 0 8.9 3.9 8.9 8.9v89.1c0 5-3.9 8.9-8.9 8.9h-89.1c-5 0-8.9-3.9-8.9-8.9v-26.7h-26.7c-75.4 0-81.1-142.5-124.7-142.5H140.3c-8.1 30.6-35.9 53.5-69 53.5C32 327.3 0 295.3 0 256s32-71.3 71.3-71.3c33.1 0 61 22.8 69 53.5 39.1 0 43.9 9.5 74.6-60.4C255 88.7 273 95.7 323.8 95.7c7.5-20.9 27-35.6 50.4-35.6 29.5 0 53.5 23.9 53.5 53.5s-23.9 53.5-53.5 53.5c-23.4 0-42.9-14.8-50.4-35.6H294c-29.1 0-44.3 67.4-69.6 106.9h310.1v-35.6c0-3.3 1.7-6.1 4.5-7.8 2.8-1.7 6.4-1.4 8.9.3l89.1 53.5c2.8 1.1 4.5 4.1 4.5 7.2z"],ussunnah:[512,512,[],"f407","M156.8 285.1l5.7 14.4h-8.2c-1.3-3.2-3.1-7.7-3.8-9.5-2.5-6.3-1.1-8.4 0-10 1.9-2.7 3.2-4.4 3.6-5.2 0 2.2.8 5.7 2.7 10.3zm297.3 18.8c-2.1 13.8-5.7 27.1-10.5 39.7l43 23.4-44.8-18.8c-5.3 13.2-12 25.6-19.9 37.2l34.2 30.2-36.8-26.4c-8.4 11.8-18 22.6-28.7 32.3l24.9 34.7-28.1-31.8c-11 9.6-23.1 18-36.1 25.1l15.7 37.2-19.3-35.3c-13.1 6.8-27 12.1-41.6 15.9l6.7 38.4-10.5-37.4c-14.3 3.4-29.2 5.3-44.5 5.4L256 512l-1.9-38.4c-15.3-.1-30.2-2-44.5-5.3L199 505.6l6.7-38.2c-14.6-3.7-28.6-9.1-41.7-15.8l-19.2 35.1 15.6-37c-13-7-25.2-15.4-36.2-25.1l-27.9 31.6 24.7-34.4c-10.7-9.7-20.4-20.5-28.8-32.3l-36.5 26.2 33.9-29.9c-7.9-11.6-14.6-24.1-20-37.3l-44.4 18.7L67.8 344c-4.8-12.7-8.4-26.1-10.5-39.9l-51 9 50.3-14.2c-1.1-8.5-1.7-17.1-1.7-25.9 0-4.7.2-9.4.5-14.1L0 256l56-2.8c1.3-13.1 3.8-25.8 7.5-38.1L6.4 199l58.9 10.4c4-12 9.1-23.5 15.2-34.4l-55.1-30 58.3 24.6C90 159 97.2 149.2 105.3 140L55.8 96.4l53.9 38.7c8.1-8.6 17-16.5 26.6-23.6l-40-55.6 45.6 51.6c9.5-6.6 19.7-12.3 30.3-17.2l-27.3-64.9 33.8 62.1c10.5-4.4 21.4-7.9 32.7-10.4L199 6.4l19.5 69.2c11-2.1 22.3-3.2 33.8-3.4L256 0l3.6 72.2c11.5.2 22.8 1.4 33.8 3.5L313 6.4l-12.4 70.7c11.3 2.6 22.2 6.1 32.6 10.5l33.9-62.2-27.4 65.1c10.6 4.9 20.7 10.7 30.2 17.2l45.8-51.8-40.1 55.9c9.5 7.1 18.4 15 26.5 23.6l54.2-38.9-49.7 43.9c8 9.1 15.2 18.9 21.5 29.4l58.7-24.7-55.5 30.2c6.1 10.9 11.1 22.3 15.1 34.3l59.3-10.4-57.5 16.2c3.7 12.2 6.2 24.9 7.5 37.9L512 256l-56 2.8c.3 4.6.5 9.3.5 14.1 0 8.7-.6 17.3-1.6 25.8l50.7 14.3-51.5-9.1zm-21.8-31c0-97.5-79-176.5-176.5-176.5s-176.5 79-176.5 176.5 79 176.5 176.5 176.5 176.5-79 176.5-176.5zm-24 0c0 84.3-68.3 152.6-152.6 152.6s-152.6-68.3-152.6-152.6 68.3-152.6 152.6-152.6 152.6 68.3 152.6 152.6zM195 241c0 2.1 1.3 3.8 3.6 5.1 3.3 1.9 6.2 4.6 8.2 8.2 2.8-5.7 4.3-9.5 4.3-11.2 0-2.2-1.1-4.4-3.2-7-2.1-2.5-3.2-5.2-3.3-7.7-6.5 6.8-9.6 10.9-9.6 12.6zm-40.7-19c0 2.1 1.3 3.8 3.6 5.1 3.5 1.9 6.2 4.6 8.2 8.2 2.8-5.7 4.3-9.5 4.3-11.2 0-2.2-1.1-4.4-3.2-7-2.1-2.5-3.2-5.2-3.3-7.7-6.5 6.8-9.6 10.9-9.6 12.6zm-19 0c0 2.1 1.3 3.8 3.6 5.1 3.3 1.9 6.2 4.6 8.2 8.2 2.8-5.7 4.3-9.5 4.3-11.2 0-2.2-1.1-4.4-3.2-7-2.1-2.5-3.2-5.2-3.3-7.7-6.4 6.8-9.6 10.9-9.6 12.6zm204.9 87.9c-8.4-3-8.7-6.8-8.7-15.6V182c-8.2 12.5-14.2 18.6-18 18.6 6.3 14.4 9.5 23.9 9.5 28.3v64.3c0 2.2-2.2 6.5-4.7 6.5h-18c-2.8-7.5-10.2-26.9-15.3-40.3-2 2.5-7.2 9.2-10.7 13.7 2.4 1.6 4.1 3.6 5.2 6.3 2.6 6.7 6.4 16.5 7.9 20.2h-9.2c-3.9-10.4-9.6-25.4-11.8-31.1-2 2.5-7.2 9.2-10.7 13.7 2.4 1.6 4.1 3.6 5.2 6.3.8 2 2.8 7.3 4.3 10.9H256c-1.5-4.1-5.6-14.6-8.4-22-2 2.5-7.2 9.2-10.7 13.7 2.5 1.6 4.3 3.6 5.2 6.3.2.6.5 1.4.6 1.7H225c-4.6-13.9-11.4-27.7-11.4-34.1 0-2.2.3-5.1 1.1-8.2-8.8 10.8-14 15.9-14 25 0 7.5 10.4 28.3 10.4 33.3 0 1.7-.5 3.3-1.4 4.9-9.6-12.7-15.5-20.7-18.8-20.7h-12l-11.2-28c-3.8-9.6-5.7-16-5.7-18.8 0-3.8.5-7.7 1.7-12.2-1 1.3-3.7 4.7-5.5 7.1-.8-2.1-3.1-7.7-4.6-11.5-2.1 2.5-7.5 9.1-11.2 13.6.9 2.3 3.3 8.1 4.9 12.2-2.5 3.3-9.1 11.8-13.6 17.7-4 5.3-5.8 13.3-2.7 21.8 2.5 6.7 2 7.9-1.7 14.1H191c5.5 0 14.3 14 15.5 22 13.2-16 15.4-19.6 16.8-21.6h107c3.9 0 7.2-1.9 9.9-5.8zm20.1-26.6V181.7c-9 12.5-15.9 18.6-20.7 18.6 7.1 14.4 10.7 23.9 10.7 28.3v66.3c0 17.5 8.6 20.4 24 20.4 8.1 0 12.5-.8 13.7-2.7-4.3-1.6-7.6-2.5-9.9-3.3-8.1-3.2-17.8-7.4-17.8-26z"],vaadin:[448,512,[],"f408","M224.5 140.7c1.5-17.6 4.9-52.7 49.8-52.7h98.6c20.7 0 32.1-7.8 32.1-21.6V54.1c0-12.2 9.3-22.1 21.5-22.1S448 41.9 448 54.1v36.5c0 42.9-21.5 62-66.8 62H280.7c-30.1 0-33 14.7-33 27.1 0 1.3-.1 2.5-.2 3.7-.7 12.3-10.9 22.2-23.4 22.2s-22.7-9.8-23.4-22.2c-.1-1.2-.2-2.4-.2-3.7 0-12.3-3-27.1-33-27.1H66.8c-45.3 0-66.8-19.1-66.8-62V54.1C0 41.9 9.4 32 21.6 32s21.5 9.9 21.5 22.1v12.3C43.1 80.2 54.5 88 75.2 88h98.6c44.8 0 48.3 35.1 49.8 52.7h.9zM224 456c11.5 0 21.4-7 25.7-16.3 1.1-1.8 97.1-169.6 98.2-171.4 11.9-19.6-3.2-44.3-27.2-44.3-13.9 0-23.3 6.4-29.8 20.3L224 362l-66.9-117.7c-6.4-13.9-15.9-20.3-29.8-20.3-24 0-39.1 24.6-27.2 44.3 1.1 1.9 97.1 169.6 98.2 171.4 4.3 9.3 14.2 16.3 25.7 16.3z"],viacoin:[384,512,[],"f237","M384 32h-64l-80.7 192h-94.5L64 32H0l48 112H0v48h68.5l13.8 32H0v48h102.8L192 480l89.2-208H384v-48h-82.3l13.8-32H384v-48h-48l48-112zM192 336l-27-64h54l-27 64z"],viadeo:[448,512,[],"f2a9","M276.2 150.5v.7C258.3 98.6 233.6 47.8 205.4 0c43.3 29.2 67 100 70.8 150.5zm32.7 121.7c7.6 18.2 11 37.5 11 57 0 77.7-57.8 141-137.8 139.4l3.8-.3c74.2-46.7 109.3-118.6 109.3-205.1 0-38.1-6.5-75.9-18.9-112 1 11.7 1 23.7 1 35.4 0 91.8-18.1 241.6-116.6 280C95 455.2 49.4 398 49.4 329.2c0-75.6 57.4-142.3 135.4-142.3 16.8 0 33.7 3.1 49.1 9.6 1.7-15.1 6.5-29.9 13.4-43.3-19.9-7.2-41.2-10.7-62.5-10.7-161.5 0-238.7 195.9-129.9 313.7 67.9 74.6 192 73.9 259.8 0 56.6-61.3 60.9-142.4 36.4-201-12.7 8-27.1 13.9-42.2 17zM418.1 11.7c-31 66.5-81.3 47.2-115.8 80.1-12.4 12-20.6 34-20.6 50.5 0 14.1 4.5 27.1 12 38.8 47.4-11 98.3-46 118.2-90.7-.7 5.5-4.8 14.4-7.2 19.2-20.3 35.7-64.6 65.6-99.7 84.9 14.8 14.4 33.7 25.8 55 25.8 79 0 110.1-134.6 58.1-208.6z"],"viadeo-square":[448,512,[],"f2aa","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM280.7 381.2c-42.4 46.2-120 46.6-162.4 0-68-73.6-19.8-196.1 81.2-196.1 13.3 0 26.6 2.1 39.1 6.7-4.3 8.4-7.3 17.6-8.4 27.1-9.7-4.1-20.2-6-30.7-6-48.8 0-84.6 41.7-84.6 88.9 0 43 28.5 78.7 69.5 85.9 61.5-24 72.9-117.6 72.9-175 0-7.3 0-14.8-.6-22.1-11.2-32.9-26.6-64.6-44.2-94.5 27.1 18.3 41.9 62.5 44.2 94.1v.4c7.7 22.5 11.8 46.2 11.8 70 0 54.1-21.9 99-68.3 128.2l-2.4.2c50 1 86.2-38.6 86.2-87.2 0-12.2-2.1-24.3-6.9-35.7 9.5-1.9 18.5-5.6 26.4-10.5 15.3 36.6 12.6 87.3-22.8 125.6zM309 233.7c-13.3 0-25.1-7.1-34.4-16.1 21.9-12 49.6-30.7 62.3-53 1.5-3 4.1-8.6 4.5-12-12.5 27.9-44.2 49.8-73.9 56.7-4.7-7.3-7.5-15.5-7.5-24.3 0-10.3 5.2-24.1 12.9-31.6 21.6-20.5 53-8.5 72.4-50 32.5 46.2 13.1 130.3-36.3 130.3z"],viber:[512,512,[],"f409","M430.7 49.9C418 38.2 366.6.9 252.1.4c0 0-135.1-8.1-200.9 52.3C14.6 89.3 1.7 142.9.3 209.4c-1.4 66.5-3.1 191.1 117 224.9h.1l-.1 51.6s-.8 20.9 13 25.1c16.6 5.2 26.4-10.7 42.3-27.8 8.7-9.4 20.7-23.2 29.8-33.7 82.2 6.9 145.3-8.9 152.5-11.2 16.6-5.4 110.5-17.4 125.7-142 15.8-128.5-7.6-209.7-49.9-246.4zM444.6 287c-12.9 104-89 110.6-103 115.1-6 1.9-61.5 15.7-131.2 11.2 0 0-52 62.7-68.2 79-5.3 5.3-11.1 4.8-11-5.7 0-6.9.4-85.7.4-85.7-.1 0-.1 0 0 0C29.9 372.7 35.8 266.6 37 211.1c1.1-55.5 11.6-101 42.6-131.6C135.3 29 250 36.5 250 36.5c96.9.4 143.3 29.6 154.1 39.4 35.7 30.6 53.9 103.8 40.5 211.1zm-138.9-80.8c.4 8.6-12.5 9.2-12.9.6-1.1-22-11.4-32.7-32.6-33.9-8.6-.5-7.8-13.4.7-12.9 27.9 1.5 43.4 17.5 44.8 46.2zm20.3 11.3c1-42.4-25.5-75.6-75.8-79.3-8.5-.6-7.6-13.5.9-12.9 58 4.2 88.9 44.1 87.8 92.5-.2 8.6-13.1 8.2-12.9-.3zm46.9 13.4c.1 8.6-12.9 8.7-12.9.1-.6-81.5-54.9-125.9-120.8-126.4-8.5-.1-8.5-12.9 0-12.9 73.8.5 133.1 51.4 133.7 139.2zM361.7 329v.2c-10.8 19-31 40-51.8 33.3l-.2-.3c-21.1-5.9-70.8-31.5-102.2-56.5-16.2-12.8-31-27.9-42.4-42.4-10.3-12.9-20.7-28.2-30.8-46.6-21.3-38.5-26-55.7-26-55.7-6.7-20.8 14.2-41 33.3-51.8h.2c9.2-4.8 18-3.2 23.9 3.9 0 0 12.4 14.8 17.7 22.1 5 6.8 11.7 17.7 15.2 23.8 6.1 10.9 2.3 22-3.7 26.6l-12 9.6c-6.1 4.9-5.3 14-5.3 14s17.8 67.3 84.3 84.3c0 0 9.1.8 14-5.3l9.6-12c4.6-6 15.7-9.8 26.6-3.7 14.7 8.3 33.4 21.2 45.8 32.9 7 5.7 8.6 14.4 3.8 23.6z"],vimeo:[448,512,[],"f40a","M403.2 32H44.8C20.1 32 0 52.1 0 76.8v358.4C0 459.9 20.1 480 44.8 480h358.4c24.7 0 44.8-20.1 44.8-44.8V76.8c0-24.7-20.1-44.8-44.8-44.8zM377 180.8c-1.4 31.5-23.4 74.7-66 129.4-44 57.2-81.3 85.8-111.7 85.8-18.9 0-34.8-17.4-47.9-52.3-25.5-93.3-36.4-148-57.4-148-2.4 0-10.9 5.1-25.4 15.2l-15.2-19.6c37.3-32.8 72.9-69.2 95.2-71.2 25.2-2.4 40.7 14.8 46.5 51.7 20.7 131.2 29.9 151 67.6 91.6 13.5-21.4 20.8-37.7 21.8-48.9 3.5-33.2-25.9-30.9-45.8-22.4 15.9-52.1 46.3-77.4 91.2-76 33.3.9 49 22.5 47.1 64.7z"],"vimeo-square":[448,512,[],"f194","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-16.2 149.6c-1.4 31.1-23.2 73.8-65.3 127.9-43.5 56.5-80.3 84.8-110.4 84.8-18.7 0-34.4-17.2-47.3-51.6-25.2-92.3-35.9-146.4-56.7-146.4-2.4 0-10.8 5-25.1 15.1L64 192c36.9-32.4 72.1-68.4 94.1-70.4 24.9-2.4 40.2 14.6 46 51.1 20.5 129.6 29.6 149.2 66.8 90.5 13.4-21.2 20.6-37.2 21.5-48.3 3.4-32.8-25.6-30.6-45.2-22.2 15.7-51.5 45.8-76.5 90.1-75.1 32.9 1 48.4 22.4 46.5 64z"],"vimeo-v":[448,512,[],"f27d","M447.8 153.6c-2 43.6-32.4 103.3-91.4 179.1-60.9 79.2-112.4 118.8-154.6 118.8-26.1 0-48.2-24.1-66.3-72.3C100.3 250 85.3 174.3 56.2 174.3c-3.4 0-15.1 7.1-35.2 21.1L0 168.2c51.6-45.3 100.9-95.7 131.8-98.5 34.9-3.4 56.3 20.5 64.4 71.5 28.7 181.5 41.4 208.9 93.6 126.7 18.7-29.6 28.8-52.1 30.2-67.6 4.8-45.9-35.8-42.8-63.3-31 22-72.1 64.1-107.1 126.2-105.1 45.8 1.2 67.5 31.1 64.9 89.4z"],vine:[384,512,[],"f1ca","M384 254.7v52.1c-18.4 4.2-36.9 6.1-52.1 6.1-36.9 77.4-103 143.8-125.1 156.2-14 7.9-27.1 8.4-42.7-.8C137 452 34.2 367.7 0 102.7h74.5C93.2 261.8 139 343.4 189.3 404.5c27.9-27.9 54.8-65.1 75.6-106.9-49.8-25.3-80.1-80.9-80.1-145.6 0-65.6 37.7-115.1 102.2-115.1 114.9 0 106.2 127.9 81.6 181.5 0 0-46.4 9.2-63.5-20.5 3.4-11.3 8.2-30.8 8.2-48.5 0-31.3-11.3-46.6-28.4-46.6-18.2 0-30.8 17.1-30.8 50 .1 79.2 59.4 118.7 129.9 101.9z"],vk:[576,512,[],"f189","M545 117.7c3.7-12.5 0-21.7-17.8-21.7h-58.9c-15 0-21.9 7.9-25.6 16.7 0 0-30 73.1-72.4 120.5-13.7 13.7-20 18.1-27.5 18.1-3.7 0-9.4-4.4-9.4-16.9V117.7c0-15-4.2-21.7-16.6-21.7h-92.6c-9.4 0-15 7-15 13.5 0 14.2 21.2 17.5 23.4 57.5v86.8c0 19-3.4 22.5-10.9 22.5-20 0-68.6-73.4-97.4-157.4-5.8-16.3-11.5-22.9-26.6-22.9H38.8c-16.8 0-20.2 7.9-20.2 16.7 0 15.6 20 93.1 93.1 195.5C160.4 378.1 229 416 291.4 416c37.5 0 42.1-8.4 42.1-22.9 0-66.8-3.4-73.1 15.4-73.1 8.7 0 23.7 4.4 58.7 38.1 40 40 46.6 57.9 69 57.9h58.9c16.8 0 25.3-8.4 20.4-25-11.2-34.9-86.9-106.7-90.3-111.5-8.7-11.2-6.2-16.2 0-26.2.1-.1 72-101.3 79.4-135.6z"],vnv:[640,512,[],"f40b","M104.9 352c-34.1 0-46.4-30.4-46.4-30.4L2.6 210.1S-7.8 192 13 192h32.8c10.4 0 13.2 8.7 18.8 18.1l36.7 74.5s5.2 13.1 21.1 13.1 21.1-13.1 21.1-13.1l36.7-74.5c5.6-9.5 8.4-18.1 18.8-18.1h32.8c20.8 0 10.4 18.1 10.4 18.1l-55.8 111.5S174.2 352 140 352h-35.1zm395 0c-34.1 0-46.4-30.4-46.4-30.4l-55.9-111.5S387.2 192 408 192h32.8c10.4 0 13.2 8.7 18.8 18.1l36.7 74.5s5.2 13.1 21.1 13.1 21.1-13.1 21.1-13.1l36.8-74.5c5.6-9.5 8.4-18.1 18.8-18.1H627c20.8 0 10.4 18.1 10.4 18.1l-55.9 111.5S569.3 352 535.1 352h-35.2zM337.6 192c34.1 0 46.4 30.4 46.4 30.4l55.9 111.5s10.4 18.1-10.4 18.1h-32.8c-10.4 0-13.2-8.7-18.8-18.1l-36.7-74.5s-5.2-13.1-21.1-13.1c-15.9 0-21.1 13.1-21.1 13.1l-36.7 74.5c-5.6 9.4-8.4 18.1-18.8 18.1h-32.9c-20.8 0-10.4-18.1-10.4-18.1l55.9-111.5s12.2-30.4 46.4-30.4h35.1z"],vuejs:[448,512,[],"f41f","M356.9 64.3H280l-56 88.6-48-88.6H0L224 448 448 64.3h-91.1zm-301.2 32h53.8L224 294.5 338.4 96.3h53.8L224 384.5 55.7 96.3z"],weibo:[512,512,[],"f18a","M407 177.6c7.6-24-13.4-46.8-37.4-41.7-22 4.8-28.8-28.1-7.1-32.8 50.1-10.9 92.3 37.1 76.5 84.8-6.8 21.2-38.8 10.8-32-10.3zM214.8 446.7C108.5 446.7 0 395.3 0 310.4c0-44.3 28-95.4 76.3-143.7C176 67 279.5 65.8 249.9 161c-4 13.1 12.3 5.7 12.3 6 79.5-33.6 140.5-16.8 114 51.4-3.7 9.4 1.1 10.9 8.3 13.1 135.7 42.3 34.8 215.2-169.7 215.2zm143.7-146.3c-5.4-55.7-78.5-94-163.4-85.7-84.8 8.6-148.8 60.3-143.4 116s78.5 94 163.4 85.7c84.8-8.6 148.8-60.3 143.4-116zM347.9 35.1c-25.9 5.6-16.8 43.7 8.3 38.3 72.3-15.2 134.8 52.8 111.7 124-7.4 24.2 29.1 37 37.4 12 31.9-99.8-55.1-195.9-157.4-174.3zm-78.5 311c-17.1 38.8-66.8 60-109.1 46.3-40.8-13.1-58-53.4-40.3-89.7 17.7-35.4 63.1-55.4 103.4-45.1 42 10.8 63.1 50.2 46 88.5zm-86.3-30c-12.9-5.4-30 .3-38 12.9-8.3 12.9-4.3 28 8.6 34 13.1 6 30.8.3 39.1-12.9 8-13.1 3.7-28.3-9.7-34zm32.6-13.4c-5.1-1.7-11.4.6-14.3 5.4-2.9 5.1-1.4 10.6 3.7 12.9 5.1 2 11.7-.3 14.6-5.4 2.8-5.2 1.1-10.9-4-12.9z"],weixin:[576,512,[],"f1d7","M372.3 167.6c6.4 0 12.6.3 18.8 1.1C374.4 90.3 290.3 32 194.7 32 87.6 32 0 104.8 0 197.4c0 53.4 29.3 97.5 77.9 131.6l-19.3 58.6 68-34.1c24.4 4.8 43.8 9.7 68.2 9.7 6.2 0 12.1-.3 18.3-.8-4-12.9-6.2-26.6-6.2-40.8-.1-84.9 73-154 165.4-154zm-104.5-52.9c14.5 0 24.2 9.7 24.2 24.4 0 14.5-9.7 24.2-24.2 24.2-14.8 0-29.3-9.7-29.3-24.2 0-14.7 14.5-24.4 29.3-24.4zm-136.5 48.6c-14.5 0-29.3-9.7-29.3-24.2 0-14.8 14.8-24.4 29.3-24.4 14.8 0 24.4 9.7 24.4 24.4.1 14.6-9.6 24.2-24.4 24.2zm418.8 156.1c0-77.9-77.9-141.3-165.4-141.3-92.7 0-165.4 63.4-165.4 141.3S292 460.7 384.6 460.7c19.3 0 38.9-5.1 58.6-9.9l53.4 29.3-14.8-48.6c39.3-29.4 68.3-68.3 68.3-112.1zm-219.2-24.5c-9.7 0-19.3-9.7-19.3-19.6 0-9.7 9.7-19.3 19.3-19.3 14.8 0 24.4 9.7 24.4 19.3 0 10-9.6 19.6-24.4 19.6zm107.2 0c-9.7 0-19.3-9.7-19.3-19.6 0-9.7 9.7-19.3 19.3-19.3 14.5 0 24.4 9.7 24.4 19.3 0 10-9.9 19.6-24.4 19.6z"],whatsapp:[448,512,[],"f232","M380.9 97.1C339 55.1 283.2 32 223.9 32c-122.4 0-222 99.6-222 222 0 39.1 10.2 77.3 29.6 111L0 480l117.7-30.9c32.4 17.7 68.9 27 106.1 27h.1c122.3 0 224.1-99.6 224.1-222 0-59.3-25.2-115-67.1-157zm-157 341.6c-33.2 0-65.7-8.9-94-25.7l-6.7-4-69.8 18.3L72 359.2l-4.4-7c-18.5-29.4-28.2-63.3-28.2-98.2 0-101.7 82.8-184.5 184.6-184.5 49.3 0 95.6 19.2 130.4 54.1 34.8 34.9 56.2 81.2 56.1 130.5 0 101.8-84.9 184.6-186.6 184.6zm101.2-138.2c-5.5-2.8-32.8-16.2-37.9-18-5.1-1.9-8.8-2.8-12.5 2.8-3.7 5.6-14.3 18-17.6 21.8-3.2 3.7-6.5 4.2-12 1.4-32.6-16.3-54-29.1-75.5-66-5.7-9.8 5.7-9.1 16.3-30.3 1.8-3.7.9-6.9-.5-9.7-1.4-2.8-12.5-30.1-17.1-41.2-4.5-10.8-9.1-9.3-12.5-9.5-3.2-.2-6.9-.2-10.6-.2-3.7 0-9.7 1.4-14.8 6.9-5.1 5.6-19.4 19-19.4 46.3 0 27.3 19.9 53.7 22.6 57.4 2.8 3.7 39.1 59.7 94.8 83.8 35.2 15.2 49 16.5 66.6 13.9 10.7-1.6 32.8-13.4 37.4-26.4 4.6-13 4.6-24.1 3.2-26.4-1.3-2.5-5-3.9-10.5-6.6z"],"whatsapp-square":[448,512,[],"f40c","M224 122.8c-72.7 0-131.8 59.1-131.9 131.8 0 24.9 7 49.2 20.2 70.1l3.1 5-13.3 48.6 49.9-13.1 4.8 2.9c20.2 12 43.4 18.4 67.1 18.4h.1c72.6 0 133.3-59.1 133.3-131.8 0-35.2-15.2-68.3-40.1-93.2-25-25-58-38.7-93.2-38.7zm77.5 188.4c-3.3 9.3-19.1 17.7-26.7 18.8-12.6 1.9-22.4.9-47.5-9.9-39.7-17.2-65.7-57.2-67.7-59.8-2-2.6-16.2-21.5-16.2-41s10.2-29.1 13.9-33.1c3.6-4 7.9-5 10.6-5 2.6 0 5.3 0 7.6.1 2.4.1 5.7-.9 8.9 6.8 3.3 7.9 11.2 27.4 12.2 29.4s1.7 4.3.3 6.9c-7.6 15.2-15.7 14.6-11.6 21.6 15.3 26.3 30.6 35.4 53.9 47.1 4 2 6.3 1.7 8.6-1 2.3-2.6 9.9-11.6 12.5-15.5 2.6-4 5.3-3.3 8.9-2 3.6 1.3 23.1 10.9 27.1 12.9s6.6 3 7.6 4.6c.9 1.9.9 9.9-2.4 19.1zM400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM223.9 413.2c-26.6 0-52.7-6.7-75.8-19.3L64 416l22.5-82.2c-13.9-24-21.2-51.3-21.2-79.3C65.4 167.1 136.5 96 223.9 96c42.4 0 82.2 16.5 112.2 46.5 29.9 30 47.9 69.8 47.9 112.2 0 87.4-72.7 158.5-160.1 158.5z"],whmcs:[448,512,[],"f40d","M448 161v-21.3l-28.5-8.8-2.2-10.4 20.1-20.7L427 80.4l-29 7.5-7.2-7.5 7.5-28.2-19.1-11.6-21.3 21-10.7-3.2-7-26.4h-22.6l-6.2 26.4-12.1 3.2-19.7-21-19.4 11 8.1 27.7-8.1 8.4-28.5-7.5-11 19.1 20.7 21-2.9 10.4-28.5 7.8-.3 21.7 28.8 7.5 2.4 12.1-20.1 19.9 10.4 18.5 29.6-7.5 7.2 8.6-8.1 26.9 19.9 11.6 19.4-20.4 11.6 2.9 6.7 28.5 22.6.3 6.7-28.8 11.6-3.5 20.7 21.6 20.4-12.1-8.8-28 7.8-8.1 28.8 8.8 10.3-20.1-20.9-18.8 2.2-12.1 29.1-7zm-119.2 45.2c-31.3 0-56.8-25.4-56.8-56.8s25.4-56.8 56.8-56.8 56.8 25.4 56.8 56.8c0 31.5-25.4 56.8-56.8 56.8zm72.3 16.4l46.9 14.5V277l-55.1 13.4-4.1 22.7 38.9 35.3-19.2 37.9-54-16.7-14.6 15.2 16.7 52.5-38.3 22.7-38.9-40.5-21.7 6.6-12.6 54-42.4-.5-12.6-53.6-21.7-5.6-36.4 38.4-37.4-21.7 15.2-50.5-13.7-16.1-55.5 14.1-19.7-34.8 37.9-37.4-4.8-22.8-54-14.1.5-40.9L54 219.9l5.7-19.7-38.9-39.4L41.5 125l53.6 14.1 15.2-15.7-15.2-52 36.4-20.7 36.8 39.4L191 84l11.6-52H245l11.6 45.9L234 72l-6.3-1.7-3.3 5.7-11 19.1-3.3 5.6 4.6 4.6 17.2 17.4-.3 1-23.8 6.5-6.2 1.7-.1 6.4-.2 12.9C153.8 161.6 118 204 118 254.7c0 58.3 47.3 105.7 105.7 105.7 50.5 0 92.7-35.4 103.2-82.8l13.2.2 6.9.1 1.6-6.7 5.6-24 1.9-.6 17.1 17.8 4.7 4.9 5.8-3.4 20.4-12.1 5.8-3.5-2-6.5-6.8-21.2z"],"wikipedia-w":[640,512,[],"f266","M640 51.2l-.3 12.2c-28.1.8-45 15.8-55.8 40.3-25 57.8-103.3 240-155.3 358.6H415l-81.9-193.1c-32.5 63.6-68.3 130-99.2 193.1-.3.3-15 0-15-.3C172 352.3 122.8 243.4 75.8 133.4 64.4 106.7 26.4 63.4.2 63.7c0-3.1-.3-10-.3-14.2h161.9v13.9c-19.2 1.1-52.8 13.3-43.3 34.2 21.9 49.7 103.6 240.3 125.6 288.6 15-29.7 57.8-109.2 75.3-142.8-13.9-28.3-58.6-133.9-72.8-160-9.7-17.8-36.1-19.4-55.8-19.7V49.8l142.5.3v13.1c-19.4.6-38.1 7.8-29.4 26.1 18.9 40 30.6 68.1 48.1 104.7 5.6-10.8 34.7-69.4 48.1-100.8 8.9-20.6-3.9-28.6-38.6-29.4.3-3.6 0-10.3.3-13.6 44.4-.3 111.1-.3 123.1-.6v13.6c-22.5.8-45.8 12.8-58.1 31.7l-59.2 122.8c6.4 16.1 63.3 142.8 69.2 156.7L559.2 91.8c-8.6-23.1-36.4-28.1-47.2-28.3V49.6l127.8 1.1.2.5z"],windows:[448,512,[],"f17a","M0 93.7l183.6-25.3v177.4H0V93.7zm0 324.6l183.6 25.3V268.4H0v149.9zm203.8 28L448 480V268.4H203.8v177.9zm0-380.6v180.1H448V32L203.8 65.7z"],wordpress:[512,512,[],"f19a","M61.7 169.4l101.5 278C92.2 413 43.3 340.2 43.3 256c0-30.9 6.6-60.1 18.4-86.6zm337.9 75.9c0-26.3-9.4-44.5-17.5-58.7-10.8-17.5-20.9-32.4-20.9-49.9 0-19.6 14.8-37.8 35.7-37.8.9 0 1.8.1 2.8.2-37.9-34.7-88.3-55.9-143.7-55.9-74.3 0-139.7 38.1-177.8 95.9 5 .2 9.7.3 13.7.3 22.2 0 56.7-2.7 56.7-2.7 11.5-.7 12.8 16.2 1.4 17.5 0 0-11.5 1.3-24.3 2l77.5 230.4L249.8 247l-33.1-90.8c-11.5-.7-22.3-2-22.3-2-11.5-.7-10.1-18.2 1.3-17.5 0 0 35.1 2.7 56 2.7 22.2 0 56.7-2.7 56.7-2.7 11.5-.7 12.8 16.2 1.4 17.5 0 0-11.5 1.3-24.3 2l76.9 228.7 21.2-70.9c9-29.4 16-50.5 16-68.7zm-139.9 29.3l-63.8 185.5c19.1 5.6 39.2 8.7 60.1 8.7 24.8 0 48.5-4.3 70.6-12.1-.6-.9-1.1-1.9-1.5-2.9l-65.4-179.2zm183-120.7c.9 6.8 1.4 14 1.4 21.9 0 21.6-4 45.8-16.2 76.2l-65 187.9C426.2 403 468.7 334.5 468.7 256c0-37-9.4-71.8-26-102.1zM504 256c0 136.8-111.3 248-248 248C119.2 504 8 392.7 8 256 8 119.2 119.2 8 256 8c136.7 0 248 111.2 248 248zm-11.4 0c0-130.5-106.2-236.6-236.6-236.6C125.5 19.4 19.4 125.5 19.4 256S125.6 492.6 256 492.6c130.5 0 236.6-106.1 236.6-236.6z"],"wordpress-simple":[512,512,[],"f411","M256 8C119.3 8 8 119.2 8 256c0 136.7 111.3 248 248 248s248-111.3 248-248C504 119.2 392.7 8 256 8zM33 256c0-32.3 6.9-63 19.3-90.7l106.4 291.4C84.3 420.5 33 344.2 33 256zm223 223c-21.9 0-43-3.2-63-9.1l66.9-194.4 68.5 187.8c.5 1.1 1 2.1 1.6 3.1-23.1 8.1-48 12.6-74 12.6zm30.7-327.5c13.4-.7 25.5-2.1 25.5-2.1 12-1.4 10.6-19.1-1.4-18.4 0 0-36.1 2.8-59.4 2.8-21.9 0-58.7-2.8-58.7-2.8-12-.7-13.4 17.7-1.4 18.4 0 0 11.4 1.4 23.4 2.1l34.7 95.2L200.6 393l-81.2-241.5c13.4-.7 25.5-2.1 25.5-2.1 12-1.4 10.6-19.1-1.4-18.4 0 0-36.1 2.8-59.4 2.8-4.2 0-9.1-.1-14.4-.3C109.6 73 178.1 33 256 33c58 0 110.9 22.2 150.6 58.5-1-.1-1.9-.2-2.9-.2-21.9 0-37.4 19.1-37.4 39.6 0 18.4 10.6 33.9 21.9 52.3 8.5 14.8 18.4 33.9 18.4 61.5 0 19.1-7.3 41.2-17 72.1l-22.2 74.3-80.7-239.6zm81.4 297.2l68.1-196.9c12.7-31.8 17-57.2 17-79.9 0-8.2-.5-15.8-1.5-22.9 17.4 31.8 27.3 68.2 27.3 107 0 82.3-44.6 154.1-110.9 192.7z"],wpbeginner:[512,512,[],"f297","M462.799 322.374C519.01 386.682 466.961 480 370.944 480c-39.602 0-78.824-17.687-100.142-50.04-6.887.356-22.702.356-29.59 0C219.848 462.381 180.588 480 141.069 480c-95.49 0-148.348-92.996-91.855-157.626C-29.925 190.523 80.479 32 256.006 32c175.632 0 285.87 158.626 206.793 290.374zm-339.647-82.972h41.529v-58.075h-41.529v58.075zm217.18 86.072v-23.839c-60.506 20.915-132.355 9.198-187.589-33.971l.246 24.897c51.101 46.367 131.746 57.875 187.343 32.913zm-150.753-86.072h166.058v-58.075H189.579v58.075z"],wpexplorer:[512,512,[],"f2de","M512 256c0 141.2-114.7 256-256 256C114.8 512 0 397.3 0 256S114.7 0 256 0s256 114.7 256 256zm-32 0c0-123.2-100.3-224-224-224C132.5 32 32 132.5 32 256s100.5 224 224 224 224-100.5 224-224zM160.9 124.6l86.9 37.1-37.1 86.9-86.9-37.1 37.1-86.9zm110 169.1l46.6 94h-14.6l-50-100-48.9 100h-14l51.1-106.9-22.3-9.4 6-14 68.6 29.1-6 14.3-16.5-7.1zm-11.8-116.3l68.6 29.4-29.4 68.3L230 246l29.1-68.6zm80.3 42.9l54.6 23.1-23.4 54.3-54.3-23.1 23.1-54.3z"],wpforms:[448,512,[],"f298","M448 75.2v361.7c0 24.3-19 43.2-43.2 43.2H43.2C19.3 480 0 461.4 0 436.8V75.2C0 51.1 18.8 32 43.2 32h361.7c24 0 43.1 18.8 43.1 43.2zm-37.3 361.6V75.2c0-3-2.6-5.8-5.8-5.8h-9.3L285.3 144 224 94.1 162.8 144 52.5 69.3h-9.3c-3.2 0-5.8 2.8-5.8 5.8v361.7c0 3 2.6 5.8 5.8 5.8h361.7c3.2.1 5.8-2.7 5.8-5.8zM150.2 186v37H76.7v-37h73.5zm0 74.4v37.3H76.7v-37.3h73.5zm11.1-147.3l54-43.7H96.8l64.5 43.7zm210 72.9v37h-196v-37h196zm0 74.4v37.3h-196v-37.3h196zm-84.6-147.3l64.5-43.7H232.8l53.9 43.7zM371.3 335v37.3h-99.4V335h99.4z"],xbox:[512,512,[],"f412","M369.9 318.2c44.3 54.3 64.7 98.8 54.4 118.7-7.9 15.1-56.7 44.6-92.6 55.9-29.6 9.3-68.4 13.3-100.4 10.2-38.2-3.7-76.9-17.4-110.1-39C93.3 445.8 87 438.3 87 423.4c0-29.9 32.9-82.3 89.2-142.1 32-33.9 76.5-73.7 81.4-72.6 9.4 2.1 84.3 75.1 112.3 109.5zM188.6 143.8c-29.7-26.9-58.1-53.9-86.4-63.4-15.2-5.1-16.3-4.8-28.7 8.1-29.2 30.4-53.5 79.7-60.3 122.4-5.4 34.2-6.1 43.8-4.2 60.5 5.6 50.5 17.3 85.4 40.5 120.9 9.5 14.6 12.1 17.3 9.3 9.9-4.2-11-.3-37.5 9.5-64 14.3-39 53.9-112.9 120.3-194.4zm311.6 63.5C483.3 127.3 432.7 77 425.6 77c-7.3 0-24.2 6.5-36 13.9-23.3 14.5-41 31.4-64.3 52.8C367.7 197 427.5 283.1 448.2 346c6.8 20.7 9.7 41.1 7.4 52.3-1.7 8.5-1.7 8.5 1.4 4.6 6.1-7.7 19.9-31.3 25.4-43.5 7.4-16.2 15-40.2 18.6-58.7 4.3-22.5 3.9-70.8-.8-93.4zM141.3 43C189 40.5 251 77.5 255.6 78.4c.7.1 10.4-4.2 21.6-9.7 63.9-31.1 94-25.8 107.4-25.2-63.9-39.3-152.7-50-233.9-11.7-23.4 11.1-24 11.9-9.4 11.2z"],xing:[384,512,[],"f168","M162.7 210c-1.8 3.3-25.2 44.4-70.1 123.5-4.9 8.3-10.8 12.5-17.7 12.5H9.8c-7.7 0-12.1-7.5-8.5-14.4l69-121.3c.2 0 .2-.1 0-.3l-43.9-75.6c-4.3-7.8.3-14.1 8.5-14.1H100c7.3 0 13.3 4.1 18 12.2l44.7 77.5zM382.6 46.1l-144 253v.3L330.2 466c3.9 7.1.2 14.1-8.5 14.1h-65.2c-7.6 0-13.6-4-18-12.2l-92.4-168.5c3.3-5.8 51.5-90.8 144.8-255.2 4.6-8.1 10.4-12.2 17.5-12.2h65.7c8 0 12.3 6.7 8.5 14.1z"],"xing-square":[448,512,[],"f169","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM140.4 320.2H93.8c-5.5 0-8.7-5.3-6-10.3l49.3-86.7c.1 0 .1-.1 0-.2l-31.4-54c-3-5.6.2-10.1 6-10.1h46.6c5.2 0 9.5 2.9 12.9 8.7l31.9 55.3c-1.3 2.3-18 31.7-50.1 88.2-3.5 6.2-7.7 9.1-12.6 9.1zm219.7-214.1L257.3 286.8v.2l65.5 119c2.8 5.1.1 10.1-6 10.1h-46.6c-5.5 0-9.7-2.9-12.9-8.7l-66-120.3c2.3-4.1 36.8-64.9 103.4-182.3 3.3-5.8 7.4-8.7 12.5-8.7h46.9c5.7-.1 8.8 4.7 6 10z"],"y-combinator":[448,512,[],"f23b","M448 32v448H0V32h448zM236 287.5L313.5 142h-32.7L235 233c-4.7 9.3-9 18.3-12.8 26.8L210 233l-45.2-91h-35l76.7 143.8v94.5H236v-92.8z"],yahoo:[360,512,[],"f19e","M204.9 288l3.5 195.5c-11.3-2-20.9-3.5-28.7-3.5-7.5 0-17 1.5-28.7 3.5l3.5-195.5C105.7 203.7 56.5 113.1 0 28.5 10.6 31.3 20.4 32 29.5 32c8 0 18.1-.7 30.3-3.5 36.4 64.2 72.9 123.2 119.9 200.4 33.2-54.7 80.9-128.1 119.9-200.4 9.8 2.6 19.6 3.5 29.2 3.5 10.2 0 20.6-.9 31.1-3.5C329.4 71.1 243 221.3 204.9 288z"],yandex:[256,512,[],"f413","M153.1 315.8L65.7 512H2l96-209.8c-45.1-22.9-75.2-64.4-75.2-141.1C22.7 53.7 90.8 0 171.7 0H254v512h-55.1V315.8h-45.8zm45.8-269.3h-29.4c-44.4 0-87.4 29.4-87.4 114.6 0 82.3 39.4 108.8 87.4 108.8h29.4V46.5z"],"yandex-international":[320,512,[],"f414","M129.5 512V345.9L18.5 48h55.8l81.8 229.7L250.2 0h51.3L180.8 347.8V512h-51.3z"],yelp:[384,512,[],"f1e9","M136.9 328c-1 .3-109.2 35.7-115.8 35.7-15.2-.9-18.5-16.2-19.9-31.2-1.5-14.2-1.4-29.8.3-46.8 1.9-18.8 5.5-45.1 24.2-44 4.8 0 67.1 25.9 112.7 44.4 17.1 6.8 18.6 35.8-1.5 41.9zm57.9-113.9c1.8 38.2-25.5 48.5-47.2 14.3L41.3 60.4c-1.5-6.6.3-12.4 5.3-17.4C62.2 26.5 146 3.2 168.1 8.9c7.5 1.9 12.1 6.1 13.8 12.6 1.3 8.3 11.5 167.4 12.9 192.6zm-1.4 164.8c0 4.6.2 116.4-1.7 121.5-2.3 6-7 9.7-14.3 11.2-10.1 1.7-27.1-1.9-51-10.7-22-8.1-56.7-21.5-49.3-42.5 2.8-6.9 51.4-62.8 77.3-93.6 12-15.2 39.8-5.5 39 14.1zm180.2-117.8c-5.6 3.7-110.8 28.2-118.1 30.6l.3-.6c-18.1 4.7-35.4-18.5-23.3-34.6 3.7-3.7 65.9-92.4 72.8-97 5.2-3.6 11.3-3.8 18.3-.6 18.4 8.8 55.1 63.1 57.4 84.6-.1 2.9 1.2 11.7-7.4 17.6zm10.1 130.7c-2.7 20.6-44.5 73.4-63.8 81-6.9 2.6-12.9 2-17.7-2-5-3.5-61.8-97.1-64.9-102.3-10.9-16.2 6.8-39.8 25.6-33.2 0 0 110.5 35.7 114.7 39.4 5.2 4.1 7.2 9.8 6.1 17.1z"],yoast:[448,512,[],"f2b1","M91.265 96h186.043l-7.008 18.878H91.265c-39.658 0-71.889 31.556-71.889 70.292v205.373c0 35.401 24.882 70.311 84.001 70.311V480H91.265C41.165 480 0 439.83 0 390.544V185.17C0 135.937 40.709 96 91.265 96zm229.114-56h66.49C243.146 418.092 241.192 438.918 202.18 479.331c-20.779 21.646-49.294 31.719-78.328 32.669v-51.146c49.234-7.662 64.606-49.855 64.606-75.284 0-20.078.577-12.645-82.117-223.219h61.386l50.354 156.58L320.379 40zM448 181.465V480H233.963c6.635-9.621 10.679-16.277 12.112-19.413h182.529V181.465c0-32.543-17.097-51.945-48.194-62.914l6.733-17.578C428.763 114.636 448 144.059 448 181.465z"],youtube:[576,512,[],"f167","M549.655 124.083c-6.281-23.65-24.787-42.276-48.284-48.597C458.781 64 288 64 288 64S117.22 64 74.629 75.486c-23.497 6.322-42.003 24.947-48.284 48.597-11.412 42.867-11.412 132.305-11.412 132.305s0 89.438 11.412 132.305c6.281 23.65 24.787 41.5 48.284 47.821C117.22 448 288 448 288 448s170.78 0 213.371-11.486c23.497-6.321 42.003-24.171 48.284-47.821 11.412-42.867 11.412-132.305 11.412-132.305s0-89.438-11.412-132.305zm-317.51 213.508V175.185l142.739 81.205-142.739 81.201z"]}),t=z||{};t.___FONT_AWESOME___||(t.___FONT_AWESOME___={}),t.___FONT_AWESOME___.styles||(t.___FONT_AWESOME___.styles={}),t.___FONT_AWESOME___.hooks||(t.___FONT_AWESOME___.hooks={}),t.___FONT_AWESOME___.shims||(t.___FONT_AWESOME___.shims=[]);var s=t.___FONT_AWESOME___,r=Object.assign||function(c){for(var l=1;l<arguments.length;l++){var h=arguments[l];for(var v in h)Object.prototype.hasOwnProperty.call(h,v)&&(c[v]=h[v])}return c};!function(c){try{c()}catch(c){}}(function(){c("fab")})}(),function(){"use strict";function c(c){"function"==typeof s.hooks.addPack?s.hooks.addPack(c,m):s.styles[c]=r({},s.styles[c]||{},m)}var l={};try{"undefined"!=typeof window&&(l=window)}catch(c){}var h=(l.navigator||{}).userAgent,v=void 0===h?"":h,z=l,e=(~v.indexOf("MSIE")||v.indexOf("Trident/"),[1,2,3,4,5,6,7,8,9,10]),a=e.concat([11,12,13,14,15,16,17,18,19,20]),m=(["xs","sm","lg","fw","ul","li","border","pull-left","pull-right","spin","pulse","rotate-90","rotate-180","rotate-270","flip-horizontal","flip-vertical","stack","stack-1x","stack-2x","inverse","layers","layers-text","layers-counter"].concat(e.map(function(c){return c+"x"})).concat(a.map(function(c){return"w-"+c})),{"address-book":[448,512,[],"f2b9","M320 320v72c0 13.255-10.745 24-24 24H152c-13.255 0-24-10.745-24-24v-72c0-21.431 14.207-40.266 34.813-46.153l18.064-5.161C193.629 275.884 208.342 280 224 280s30.371-4.116 43.122-11.314l18.064 5.161C305.793 279.734 320 298.569 320 320zm-96-64c35.346 0 64-28.654 64-64s-28.654-64-64-64-64 28.654-64 64 28.654 64 64 64zm192-96v64h20c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-20v64h20c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-20v48c0 26.51-21.49 48-48 48H80c-26.51 0-48-21.49-48-48V48C32 21.49 53.49 0 80 0h288c26.51 0 48 21.49 48 48v48h20c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-20zm-48 298V54a6 6 0 0 0-6-6H86a6 6 0 0 0-6 6v404a6 6 0 0 0 6 6h276a6 6 0 0 0 6-6z"],"address-card":[512,512,[],"f2bb","M464 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm-6 336H54a6 6 0 0 1-6-6V118a6 6 0 0 1 6-6h404a6 6 0 0 1 6 6v276a6 6 0 0 1-6 6zm-54-176H300c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12zm0 80H300c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12zm-284-96c0-30.928 25.072-56 56-56s56 25.072 56 56-25.072 56-56 56-56-25.072-56-56zm136 89.857V340c0 6.627-5.373 12-12 12H108c-6.627 0-12-5.373-12-12v-42.143a24 24 0 0 1 17.104-22.988l13.464-4.039C140.186 281.568 157.351 288 176 288s35.814-6.432 49.433-17.17l13.464 4.039A24 24 0 0 1 256 297.857z"],"arrow-alt-circle-down":[512,512,[],"f358","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 448c-110.5 0-200-89.5-200-200S145.5 56 256 56s200 89.5 200 200-89.5 200-200 200zm-32-316v116h-67c-10.7 0-16 12.9-8.5 20.5l99 99c4.7 4.7 12.3 4.7 17 0l99-99c7.6-7.6 2.2-20.5-8.5-20.5h-67V140c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12z"],"arrow-alt-circle-left":[512,512,[],"f359","M8 256c0 137 111 248 248 248s248-111 248-248S393 8 256 8 8 119 8 256zm448 0c0 110.5-89.5 200-200 200S56 366.5 56 256 145.5 56 256 56s200 89.5 200 200zm-72-20v40c0 6.6-5.4 12-12 12H256v67c0 10.7-12.9 16-20.5 8.5l-99-99c-4.7-4.7-4.7-12.3 0-17l99-99c7.6-7.6 20.5-2.2 20.5 8.5v67h116c6.6 0 12 5.4 12 12z"],"arrow-alt-circle-right":[512,512,[],"f35a","M504 256C504 119 393 8 256 8S8 119 8 256s111 248 248 248 248-111 248-248zm-448 0c0-110.5 89.5-200 200-200s200 89.5 200 200-89.5 200-200 200S56 366.5 56 256zm72 20v-40c0-6.6 5.4-12 12-12h116v-67c0-10.7 12.9-16 20.5-8.5l99 99c4.7 4.7 4.7 12.3 0 17l-99 99c-7.6 7.6-20.5 2.2-20.5-8.5v-67H140c-6.6 0-12-5.4-12-12z"],"arrow-alt-circle-up":[512,512,[],"f35b","M256 504c137 0 248-111 248-248S393 8 256 8 8 119 8 256s111 248 248 248zm0-448c110.5 0 200 89.5 200 200s-89.5 200-200 200S56 366.5 56 256 145.5 56 256 56zm20 328h-40c-6.6 0-12-5.4-12-12V256h-67c-10.7 0-16-12.9-8.5-20.5l99-99c4.7-4.7 12.3-4.7 17 0l99 99c7.6 7.6 2.2 20.5-8.5 20.5h-67v116c0 6.6-5.4 12-12 12z"],bell:[448,512,[],"f0f3","M425.403 330.939c-16.989-16.785-34.546-34.143-34.546-116.083 0-83.026-60.958-152.074-140.467-164.762A31.843 31.843 0 0 0 256 32c0-17.673-14.327-32-32-32s-32 14.327-32 32a31.848 31.848 0 0 0 5.609 18.095C118.101 62.783 57.143 131.831 57.143 214.857c0 81.933-17.551 99.292-34.543 116.078C-25.496 378.441 9.726 448 66.919 448H160c0 35.346 28.654 64 64 64 35.346 0 64-28.654 64-64h93.08c57.19 0 92.415-69.583 44.323-117.061zM224 472c-13.234 0-24-10.766-24-24h48c0 13.234-10.766 24-24 24zm157.092-72H66.9c-16.762 0-25.135-20.39-13.334-32.191 28.585-28.585 51.577-55.724 51.577-152.952C105.143 149.319 158.462 96 224 96s118.857 53.319 118.857 118.857c0 97.65 23.221 124.574 51.568 152.952C406.278 379.661 397.783 400 381.092 400z"],"bell-slash":[576,512,[],"f1f6","M130.9 400c-16.762 0-25.135-20.39-13.334-32.191 25.226-25.226 46.094-49.338 50.649-121.48l-46.777-41.274a168.48 168.48 0 0 0-.296 9.802c0 81.933-17.551 99.292-34.543 116.078C38.504 378.441 73.726 448 130.919 448H224c0 35.346 28.654 64 64 64s64-28.654 64-64h44.777l-54.4-48H130.9zM288 472c-13.234 0-24-10.766-24-24h48c0 13.234-10.766 24-24 24zm283.867.553l-67.931-59.571c13.104-24.118 11.524-56.318-14.532-82.042-16.989-16.785-34.546-34.143-34.546-116.083 0-83.026-60.958-152.074-140.467-164.762A31.848 31.848 0 0 0 320 32c0-17.673-14.327-32-32-32s-32 14.327-32 32a31.848 31.848 0 0 0 5.609 18.095c-41.471 6.618-77.891 28.571-103.249 59.841L36.459 3.037c-5.058-4.436-12.777-3.956-17.24 1.071L3.056 22.313C-1.407 27.34-.925 35.012 4.134 39.447l535.408 469.516c5.058 4.436 12.777 3.956 17.24-1.071l16.163-18.205c4.462-5.027 3.98-12.699-1.078-17.134zM288 96c65.538 0 118.857 53.319 118.857 118.857 0 97.65 23.221 124.574 51.568 152.952 2.908 2.908 4.573 6.328 5.209 9.832L194.482 141.612C216.258 113.867 250.075 96 288 96z"],bookmark:[384,512,[],"f02e","M336 0H48C21.49 0 0 21.49 0 48v464l192-112 192 112V48c0-26.51-21.49-48-48-48zm0 428.43l-144-84-144 84V54a6 6 0 0 1 6-6h276c3.314 0 6 2.683 6 5.996V428.43z"],building:[448,512,[],"f1ad","M128 148v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12zm140 12h40c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12zm-128 96h40c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12zm128 0h40c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12zm-76 84v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm76 12h40c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12zm180 124v36H0v-36c0-6.6 5.4-12 12-12h19.5V24c0-13.3 10.7-24 24-24h337c13.3 0 24 10.7 24 24v440H436c6.6 0 12 5.4 12 12zM79.5 463H192v-67c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v67h112.5V49L80 48l-.5 415z"],calendar:[448,512,[],"f133","M400 64h-48V12c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v52H160V12c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v52H48C21.5 64 0 85.5 0 112v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V112c0-26.5-21.5-48-48-48zm-6 400H54c-3.3 0-6-2.7-6-6V160h352v298c0 3.3-2.7 6-6 6z"],"calendar-alt":[448,512,[],"f073","M148 288h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm108-12v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm96 0v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm-96 96v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm-96 0v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm192 0v-40c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm96-260v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48zm-48 346V160H48v298c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"calendar-check":[448,512,[],"f274","M400 64h-48V12c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v52H160V12c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v52H48C21.49 64 0 85.49 0 112v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm-6 400H54a6 6 0 0 1-6-6V160h352v298a6 6 0 0 1-6 6zm-52.849-200.65L198.842 404.519c-4.705 4.667-12.303 4.637-16.971-.068l-75.091-75.699c-4.667-4.705-4.637-12.303.068-16.971l22.719-22.536c4.705-4.667 12.303-4.637 16.97.069l44.104 44.461 111.072-110.181c4.705-4.667 12.303-4.637 16.971.068l22.536 22.718c4.667 4.705 4.636 12.303-.069 16.97z"],"calendar-minus":[448,512,[],"f272","M124 328c-6.6 0-12-5.4-12-12v-24c0-6.6 5.4-12 12-12h200c6.6 0 12 5.4 12 12v24c0 6.6-5.4 12-12 12H124zm324-216v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48zm-48 346V160H48v298c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"calendar-plus":[448,512,[],"f271","M336 292v24c0 6.6-5.4 12-12 12h-76v76c0 6.6-5.4 12-12 12h-24c-6.6 0-12-5.4-12-12v-76h-76c-6.6 0-12-5.4-12-12v-24c0-6.6 5.4-12 12-12h76v-76c0-6.6 5.4-12 12-12h24c6.6 0 12 5.4 12 12v76h76c6.6 0 12 5.4 12 12zm112-180v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48zm-48 346V160H48v298c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"calendar-times":[448,512,[],"f273","M311.7 374.7l-17 17c-4.7 4.7-12.3 4.7-17 0L224 337.9l-53.7 53.7c-4.7 4.7-12.3 4.7-17 0l-17-17c-4.7-4.7-4.7-12.3 0-17l53.7-53.7-53.7-53.7c-4.7-4.7-4.7-12.3 0-17l17-17c4.7-4.7 12.3-4.7 17 0l53.7 53.7 53.7-53.7c4.7-4.7 12.3-4.7 17 0l17 17c4.7 4.7 4.7 12.3 0 17L257.9 304l53.7 53.7c4.8 4.7 4.8 12.3.1 17zM448 112v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48zm-48 346V160H48v298c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"caret-square-down":[448,512,[],"f150","M125.1 208h197.8c10.7 0 16.1 13 8.5 20.5l-98.9 98.3c-4.7 4.7-12.2 4.7-16.9 0l-98.9-98.3c-7.7-7.5-2.3-20.5 8.4-20.5zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"caret-square-left":[448,512,[],"f191","M272 157.1v197.8c0 10.7-13 16.1-20.5 8.5l-98.3-98.9c-4.7-4.7-4.7-12.2 0-16.9l98.3-98.9c7.5-7.7 20.5-2.3 20.5 8.4zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"caret-square-right":[448,512,[],"f152","M176 354.9V157.1c0-10.7 13-16.1 20.5-8.5l98.3 98.9c4.7 4.7 4.7 12.2 0 16.9l-98.3 98.9c-7.5 7.7-20.5 2.3-20.5-8.4zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"caret-square-up":[448,512,[],"f151","M322.9 304H125.1c-10.7 0-16.1-13-8.5-20.5l98.9-98.3c4.7-4.7 12.2-4.7 16.9 0l98.9 98.3c7.7 7.5 2.3 20.5-8.4 20.5zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"chart-bar":[512,512,[],"f080","M500 400c6.6 0 12 5.4 12 12v24c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h24c6.6 0 12 5.4 12 12v324h452zm-356-60v-72c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v72c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V140c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v200c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V204c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v136c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V108c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v232c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12z"],"check-circle":[512,512,[],"f058","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 48c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m140.204 130.267l-22.536-22.718c-4.667-4.705-12.265-4.736-16.97-.068L215.346 303.697l-59.792-60.277c-4.667-4.705-12.265-4.736-16.97-.069l-22.719 22.536c-4.705 4.667-4.736 12.265-.068 16.971l90.781 91.516c4.667 4.705 12.265 4.736 16.97.068l172.589-171.204c4.704-4.668 4.734-12.266.067-16.971z"],"check-square":[448,512,[],"f14a","M400 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zm0 400H48V80h352v352zm-35.864-241.724L191.547 361.48c-4.705 4.667-12.303 4.637-16.97-.068l-90.781-91.516c-4.667-4.705-4.637-12.303.069-16.971l22.719-22.536c4.705-4.667 12.303-4.637 16.97.069l59.792 60.277 141.352-140.216c4.705-4.667 12.303-4.637 16.97.068l22.536 22.718c4.667 4.706 4.637 12.304-.068 16.971z"],circle:[512,512,[],"f111","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 448c-110.5 0-200-89.5-200-200S145.5 56 256 56s200 89.5 200 200-89.5 200-200 200z"],clipboard:[384,512,[],"f328","M336 64h-80c0-35.29-28.71-64-64-64s-64 28.71-64 64H48C21.49 64 0 85.49 0 112v352c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm-6 400H54a6 6 0 0 1-6-6V118a6 6 0 0 1 6-6h42v36c0 6.627 5.373 12 12 12h168c6.627 0 12-5.373 12-12v-36h42a6 6 0 0 1 6 6v340a6 6 0 0 1-6 6zM192 40c13.255 0 24 10.745 24 24s-10.745 24-24 24-24-10.745-24-24 10.745-24 24-24"],clock:[512,512,[],"f017","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 448c-110.5 0-200-89.5-200-200S145.5 56 256 56s200 89.5 200 200-89.5 200-200 200zm61.8-104.4l-84.9-61.7c-3.1-2.3-4.9-5.9-4.9-9.7V116c0-6.6 5.4-12 12-12h32c6.6 0 12 5.4 12 12v141.7l66.8 48.6c5.4 3.9 6.5 11.4 2.6 16.8L334.6 349c-3.9 5.3-11.4 6.5-16.8 2.6z"],clone:[512,512,[],"f24d","M464 0H144c-26.51 0-48 21.49-48 48v48H48c-26.51 0-48 21.49-48 48v320c0 26.51 21.49 48 48 48h320c26.51 0 48-21.49 48-48v-48h48c26.51 0 48-21.49 48-48V48c0-26.51-21.49-48-48-48zM362 464H54a6 6 0 0 1-6-6V150a6 6 0 0 1 6-6h42v224c0 26.51 21.49 48 48 48h224v42a6 6 0 0 1-6 6zm96-96H150a6 6 0 0 1-6-6V54a6 6 0 0 1 6-6h308a6 6 0 0 1 6 6v308a6 6 0 0 1-6 6z"],"closed-captioning":[512,512,[],"f20a","M464 64H48C21.5 64 0 85.5 0 112v288c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V112c0-26.5-21.5-48-48-48zm-6 336H54c-3.3 0-6-2.7-6-6V118c0-3.3 2.7-6 6-6h404c3.3 0 6 2.7 6 6v276c0 3.3-2.7 6-6 6zm-211.1-85.7c1.7 2.4 1.5 5.6-.5 7.7-53.6 56.8-172.8 32.1-172.8-67.9 0-97.3 121.7-119.5 172.5-70.1 2.1 2 2.5 3.2 1 5.7l-17.5 30.5c-1.9 3.1-6.2 4-9.1 1.7-40.8-32-94.6-14.9-94.6 31.2 0 48 51 70.5 92.2 32.6 2.8-2.5 7.1-2.1 9.2.9l19.6 27.7zm190.4 0c1.7 2.4 1.5 5.6-.5 7.7-53.6 56.9-172.8 32.1-172.8-67.9 0-97.3 121.7-119.5 172.5-70.1 2.1 2 2.5 3.2 1 5.7L420 220.2c-1.9 3.1-6.2 4-9.1 1.7-40.8-32-94.6-14.9-94.6 31.2 0 48 51 70.5 92.2 32.6 2.8-2.5 7.1-2.1 9.2.9l19.6 27.7z"],comment:[576,512,[],"f075","M288 32C129 32 0 125.1 0 240c0 49.3 23.7 94.5 63.3 130.2-8.7 23.3-22.1 32.7-37.1 43.1C15.1 421-6 433 1.6 456.5c5.1 15.4 20.9 24.7 38.1 23.3 57.7-4.6 111.2-19.2 157-42.5 28.7 6.9 59.4 10.7 91.2 10.7 159.1 0 288-93 288-208C576 125.1 447.1 32 288 32zm0 368c-32.5 0-65.4-4.4-97.3-14-32.3 19-78.7 46-134.7 54 32-24 56.8-61.6 61.2-88.4C79.1 325.6 48 286.7 48 240c0-70.9 86.3-160 240-160s240 89.1 240 160c0 71-86.3 160-240 160z"],"comment-alt":[576,512,[],"f27a","M288 32C129 32 0 125.1 0 240c0 49.3 23.7 94.5 63.3 130.2-8.7 23.3-22.1 32.7-37.1 43.1C15.1 421-6 433 1.6 456.5c5.1 15.4 20.9 24.7 38.1 23.3 57.7-4.6 111.2-19.2 157-42.5 28.7 6.9 59.4 10.7 91.2 10.7 159.1 0 288-93 288-208C576 125.1 447.1 32 288 32zm0 368c-32.5 0-65.4-4.4-97.3-14-32.3 19-78.7 46-134.7 54 32-24 56.8-61.6 61.2-88.4C79.1 325.6 48 286.7 48 240c0-70.9 86.3-160 240-160s240 89.1 240 160c0 71-86.3 160-240 160zm-64-160c0 26.5-21.5 48-48 48s-48-21.5-48-48 21.5-48 48-48 48 21.5 48 48zm112 0c0 26.5-21.5 48-48 48s-48-21.5-48-48 21.5-48 48-48 48 21.5 48 48zm112 0c0 26.5-21.5 48-48 48s-48-21.5-48-48 21.5-48 48-48 48 21.5 48 48z"],comments:[576,512,[],"f086","M574.507 443.86c-5.421 21.261-24.57 36.14-46.511 36.14-32.246 0-66.511-9.99-102.1-29.734-50.64 11.626-109.151 7.877-157.96-13.437 41.144-2.919 80.361-12.339 116.331-28.705 16.322-1.22 32.674-4.32 48.631-9.593C454.404 412.365 490.663 432 527.996 432c-32-17.455-43.219-38.958-46.159-58.502 25.443-18.848 46.159-47.183 46.159-81.135 0-10.495-2.383-21.536-7.041-32.467 7.405-25.93 8.656-50.194 5.185-73.938 32.164 30.461 49.856 69.128 49.856 106.405 0 33.893-12.913 65.047-34.976 91.119 2.653 2.038 5.924 4.176 9.962 6.378 19.261 10.508 28.947 32.739 23.525 54zM240.002 80C117.068 80 48.004 152.877 48.004 210.909c0 38.196 24.859 70.072 55.391 91.276-3.527 21.988-16.991 46.179-55.391 65.815 44.8 0 88.31-22.089 114.119-37.653 25.52 7.906 51.883 11.471 77.879 11.471C362.998 341.818 432 268.976 432 210.909 432 152.882 362.943 80 240.002 80m0-48C390.193 32 480 126.026 480 210.909c0 22.745-6.506 46.394-18.816 68.391-11.878 21.226-28.539 40.294-49.523 56.674-21.593 16.857-46.798 30.045-74.913 39.197-29.855 9.719-62.405 14.646-96.746 14.646-24.449 0-48.34-2.687-71.292-8.004C126.311 404.512 85.785 416 48.004 416c-22.18 0-41.472-15.197-46.665-36.761-5.194-21.563 5.064-43.878 24.811-53.976 7.663-3.918 13.324-7.737 17.519-11.294-7.393-7.829-13.952-16.124-19.634-24.844C8.09 264.655.005 238.339.005 210.909.005 126.259 89.508 32 240.002 32z"],compass:[512,512,[],"f14e","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 448c-110.532 0-200-89.451-200-200 0-110.531 89.451-200 200-200 110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200zm91.326-312.131l-33.359 137.779a24.005 24.005 0 0 1-6.772 11.729l-102.64 97.779c-17.104 16.293-45.56.434-39.88-23.024l33.359-137.779a23.997 23.997 0 0 1 6.772-11.729l102.642-97.779c17.285-16.47 45.494-.175 39.878 23.024zM256 224c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32z"],copy:[448,512,[],"f0c5","M433.941 65.941l-51.882-51.882A48 48 0 0 0 348.118 0H176c-26.51 0-48 21.49-48 48v48H48c-26.51 0-48 21.49-48 48v320c0 26.51 21.49 48 48 48h224c26.51 0 48-21.49 48-48v-48h80c26.51 0 48-21.49 48-48V99.882a48 48 0 0 0-14.059-33.941zM266 464H54a6 6 0 0 1-6-6V150a6 6 0 0 1 6-6h74v224c0 26.51 21.49 48 48 48h96v42a6 6 0 0 1-6 6zm128-96H182a6 6 0 0 1-6-6V54a6 6 0 0 1 6-6h106v88c0 13.255 10.745 24 24 24h88v202a6 6 0 0 1-6 6zm6-256h-64V48h9.632c1.591 0 3.117.632 4.243 1.757l48.368 48.368a6 6 0 0 1 1.757 4.243V112z"],copyright:[512,512,[],"f1f9","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 448c-110.532 0-200-89.451-200-200 0-110.531 89.451-200 200-200 110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200zm107.351-101.064c-9.614 9.712-45.53 41.396-104.065 41.396-82.43 0-140.484-61.425-140.484-141.567 0-79.152 60.275-139.401 139.762-139.401 55.531 0 88.738 26.62 97.593 34.779a11.965 11.965 0 0 1 1.936 15.322l-18.155 28.113c-3.841 5.95-11.966 7.282-17.499 2.921-8.595-6.776-31.814-22.538-61.708-22.538-48.303 0-77.916 35.33-77.916 80.082 0 41.589 26.888 83.692 78.277 83.692 32.657 0 56.843-19.039 65.726-27.225 5.27-4.857 13.596-4.039 17.82 1.738l19.865 27.17a11.947 11.947 0 0 1-1.152 15.518z"],"credit-card":[576,512,[],"f09d","M527.9 32H48.1C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48.1 48h479.8c26.6 0 48.1-21.5 48.1-48V80c0-26.5-21.5-48-48.1-48zM54.1 80h467.8c3.3 0 6 2.7 6 6v42H48.1V86c0-3.3 2.7-6 6-6zm467.8 352H54.1c-3.3 0-6-2.7-6-6V256h479.8v170c0 3.3-2.7 6-6 6zM192 332v40c0 6.6-5.4 12-12 12h-72c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h72c6.6 0 12 5.4 12 12zm192 0v40c0 6.6-5.4 12-12 12H236c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h136c6.6 0 12 5.4 12 12z"],"dot-circle":[512,512,[],"f192","M256 56c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m0-48C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 168c-44.183 0-80 35.817-80 80s35.817 80 80 80 80-35.817 80-80-35.817-80-80-80z"],edit:[576,512,[],"f044","M402.3 344.9l32-32c5-5 13.7-1.5 13.7 5.7V464c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V112c0-26.5 21.5-48 48-48h273.5c7.1 0 10.7 8.6 5.7 13.7l-32 32c-1.5 1.5-3.5 2.3-5.7 2.3H48v352h352V350.5c0-2.1.8-4.1 2.3-5.6zm156.6-201.8L296.3 405.7l-90.4 10c-26.2 2.9-48.5-19.2-45.6-45.6l10-90.4L432.9 17.1c22.9-22.9 59.9-22.9 82.7 0l43.2 43.2c22.9 22.9 22.9 60 .1 82.8zM460.1 174L402 115.9 216.2 301.8l-7.3 65.3 65.3-7.3L460.1 174zm64.8-79.7l-43.2-43.2c-4.1-4.1-10.8-4.1-14.8 0L436 82l58.1 58.1 30.9-30.9c4-4.2 4-10.8-.1-14.9z"],envelope:[512,512,[],"f0e0","M464 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm0 48v40.805c-22.422 18.259-58.168 46.651-134.587 106.49-16.841 13.247-50.201 45.072-73.413 44.701-23.208.375-56.579-31.459-73.413-44.701C106.18 199.465 70.425 171.067 48 152.805V112h416zM48 400V214.398c22.914 18.251 55.409 43.862 104.938 82.646 21.857 17.205 60.134 55.186 103.062 54.955 42.717.231 80.509-37.199 103.053-54.947 49.528-38.783 82.032-64.401 104.947-82.653V400H48z"],"envelope-open":[512,512,[],"f2b6","M494.586 164.516c-4.697-3.883-111.723-89.95-135.251-108.657C337.231 38.191 299.437 0 256 0c-43.205 0-80.636 37.717-103.335 55.859-24.463 19.45-131.07 105.195-135.15 108.549A48.004 48.004 0 0 0 0 201.485V464c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V201.509a48 48 0 0 0-17.414-36.993zM464 458a6 6 0 0 1-6 6H54a6 6 0 0 1-6-6V204.347c0-1.813.816-3.526 2.226-4.665 15.87-12.814 108.793-87.554 132.364-106.293C200.755 78.88 232.398 48 256 48c23.693 0 55.857 31.369 73.41 45.389 23.573 18.741 116.503 93.493 132.366 106.316a5.99 5.99 0 0 1 2.224 4.663V458zm-31.991-187.704c4.249 5.159 3.465 12.795-1.745 16.981-28.975 23.283-59.274 47.597-70.929 56.863C336.636 362.283 299.205 400 256 400c-43.452 0-81.287-38.237-103.335-55.86-11.279-8.967-41.744-33.413-70.927-56.865-5.21-4.187-5.993-11.822-1.745-16.981l15.258-18.528c4.178-5.073 11.657-5.843 16.779-1.726 28.618 23.001 58.566 47.035 70.56 56.571C200.143 320.631 232.307 352 256 352c23.602 0 55.246-30.88 73.41-45.389 11.994-9.535 41.944-33.57 70.563-56.568 5.122-4.116 12.601-3.346 16.778 1.727l15.258 18.526z"],"eye-slash":[576,512,[],"f070","M272.702 359.139c-80.483-9.011-136.212-86.886-116.93-167.042l116.93 167.042zM288 392c-102.556 0-192.092-54.701-240-136 21.755-36.917 52.1-68.342 88.344-91.658l-27.541-39.343C67.001 152.234 31.921 188.741 6.646 231.631a47.999 47.999 0 0 0 0 48.739C63.004 376.006 168.14 440 288 440a332.89 332.89 0 0 0 39.648-2.367l-32.021-45.744A284.16 284.16 0 0 1 288 392zm281.354-111.631c-33.232 56.394-83.421 101.742-143.554 129.492l48.116 68.74c3.801 5.429 2.48 12.912-2.949 16.712L450.23 509.83c-5.429 3.801-12.912 2.48-16.712-2.949L102.084 33.399c-3.801-5.429-2.48-12.912 2.949-16.712L125.77 2.17c5.429-3.801 12.912-2.48 16.712 2.949l55.526 79.325C226.612 76.343 256.808 72 288 72c119.86 0 224.996 63.994 281.354 159.631a48.002 48.002 0 0 1 0 48.738zM528 256c-44.157-74.933-123.677-127.27-216.162-135.007C302.042 131.078 296 144.83 296 160c0 30.928 25.072 56 56 56s56-25.072 56-56l-.001-.042c30.632 57.277 16.739 130.26-36.928 171.719l26.695 38.135C452.626 346.551 498.308 306.386 528 256z"],file:[384,512,[],"f15b","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48z"],"file-alt":[384,512,[],"f15c","M288 248v28c0 6.6-5.4 12-12 12H108c-6.6 0-12-5.4-12-12v-28c0-6.6 5.4-12 12-12h168c6.6 0 12 5.4 12 12zm-12 72H108c-6.6 0-12 5.4-12 12v28c0 6.6 5.4 12 12 12h168c6.6 0 12-5.4 12-12v-28c0-6.6-5.4-12-12-12zm108-188.1V464c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V48C0 21.5 21.5 0 48 0h204.1C264.8 0 277 5.1 286 14.1L369.9 98c9 8.9 14.1 21.2 14.1 33.9zm-128-80V128h76.1L256 51.9zM336 464V176H232c-13.3 0-24-10.7-24-24V48H48v416h288z"],"file-archive":[384,512,[],"f1c6","M369.941 97.941l-83.882-83.882A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V131.882a48 48 0 0 0-14.059-33.941zM256 51.882L332.118 128H256V51.882zM336 464H48V48h79.714v16h32V48H208v104c0 13.255 10.745 24 24 24h104v288zM192.27 96h-32V64h32v32zm-32 0v32h-32V96h32zm0 64v32h-32v-32h32zm32 0h-32v-32h32v32zm1.909 105.678A12 12 0 0 0 182.406 256H160.27v-32h-32v32l-19.69 97.106C101.989 385.611 126.834 416 160 416c33.052 0 57.871-30.192 51.476-62.62l-17.297-87.702zM160.27 390.073c-17.918 0-32.444-12.105-32.444-27.036 0-14.932 14.525-27.036 32.444-27.036s32.444 12.105 32.444 27.036c0 14.931-14.526 27.036-32.444 27.036zm32-166.073h-32v-32h32v32z"],"file-audio":[384,512,[],"f1c7","M369.941 97.941l-83.882-83.882A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V131.882a48 48 0 0 0-14.059-33.941zM332.118 128H256V51.882L332.118 128zM48 464V48h160v104c0 13.255 10.745 24 24 24h104v288H48zm144-76.024c0 10.691-12.926 16.045-20.485 8.485L136 360.486h-28c-6.627 0-12-5.373-12-12v-56c0-6.627 5.373-12 12-12h28l35.515-36.947c7.56-7.56 20.485-2.206 20.485 8.485v135.952zm41.201-47.13c9.051-9.297 9.06-24.133.001-33.439-22.149-22.752 12.235-56.246 34.395-33.481 27.198 27.94 27.212 72.444.001 100.401-21.793 22.386-56.947-10.315-34.397-33.481z"],"file-code":[384,512,[],"f1c9","M369.941 97.941l-83.882-83.882A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V131.882a48 48 0 0 0-14.059-33.941zM332.118 128H256V51.882L332.118 128zM48 464V48h160v104c0 13.255 10.745 24 24 24h104v288H48zm101.677-115.115L116.854 320l32.822-28.885a8.793 8.793 0 0 0 .605-12.624l-17.403-18.564c-3.384-3.613-8.964-3.662-12.438-.401L62.78 313.58c-3.703 3.474-3.704 9.367.001 12.84l57.659 54.055a8.738 8.738 0 0 0 6.012 2.381 8.746 8.746 0 0 0 6.427-2.782l17.403-18.563a8.795 8.795 0 0 0-.605-12.626zm84.284-127.85l-24.401-7.084a8.796 8.796 0 0 0-10.905 5.998L144.04 408.061c-1.353 4.66 1.338 9.552 5.998 10.905l24.403 7.084c4.68 1.355 9.557-1.354 10.905-5.998l54.612-188.112c1.354-4.66-1.337-9.552-5.997-10.905zm87.258 92.545l-57.658-54.055c-3.526-3.307-9.099-3.165-12.439.401l-17.403 18.563a8.795 8.795 0 0 0 .605 12.625L267.146 320l-32.822 28.885a8.793 8.793 0 0 0-.605 12.624l17.403 18.564a8.797 8.797 0 0 0 12.439.401h-.001l57.66-54.055c3.703-3.473 3.703-9.366-.001-12.839z"],"file-excel":[384,512,[],"f1c3","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48zm212-240h-28.8c-4.4 0-8.4 2.4-10.5 6.3-18 33.1-22.2 42.4-28.6 57.7-13.9-29.1-6.9-17.3-28.6-57.7-2.1-3.9-6.2-6.3-10.6-6.3H124c-9.3 0-15 10-10.4 18l46.3 78-46.3 78c-4.7 8 1.1 18 10.4 18h28.9c4.4 0 8.4-2.4 10.5-6.3 21.7-40 23-45 28.6-57.7 14.9 30.2 5.9 15.9 28.6 57.7 2.1 3.9 6.2 6.3 10.6 6.3H260c9.3 0 15-10 10.4-18L224 320c.7-1.1 30.3-50.5 46.3-78 4.7-8-1.1-18-10.3-18z"],"file-image":[384,512,[],"f1c5","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48zm32-48h224V288l-23.5-23.5c-4.7-4.7-12.3-4.7-17 0L176 352l-39.5-39.5c-4.7-4.7-12.3-4.7-17 0L80 352v64zm48-240c-26.5 0-48 21.5-48 48s21.5 48 48 48 48-21.5 48-48-21.5-48-48-48z"],"file-pdf":[384,512,[],"f1c1","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48zm250.2-143.7c-12.2-12-47-8.7-64.4-6.5-17.2-10.5-28.7-25-36.8-46.3 3.9-16.1 10.1-40.6 5.4-56-4.2-26.2-37.8-23.6-42.6-5.9-4.4 16.1-.4 38.5 7 67.1-10 23.9-24.9 56-35.4 74.4-20 10.3-47 26.2-51 46.2-3.3 15.8 26 55.2 76.1-31.2 22.4-7.4 46.8-16.5 68.4-20.1 18.9 10.2 41 17 55.8 17 25.5 0 28-28.2 17.5-38.7zm-198.1 77.8c5.1-13.7 24.5-29.5 30.4-35-19 30.3-30.4 35.7-30.4 35zm81.6-190.6c7.4 0 6.7 32.1 1.8 40.8-4.4-13.9-4.3-40.8-1.8-40.8zm-24.4 136.6c9.7-16.9 18-37 24.7-54.7 8.3 15.1 18.9 27.2 30.1 35.5-20.8 4.3-38.9 13.1-54.8 19.2zm131.6-5s-5 6-37.3-7.8c35.1-2.6 40.9 5.4 37.3 7.8z"],"file-powerpoint":[384,512,[],"f1c4","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48zm72-60V236c0-6.6 5.4-12 12-12h69.2c36.7 0 62.8 27 62.8 66.3 0 74.3-68.7 66.5-95.5 66.5V404c0 6.6-5.4 12-12 12H132c-6.6 0-12-5.4-12-12zm48.5-87.4h23c7.9 0 13.9-2.4 18.1-7.2 8.5-9.8 8.4-28.5.1-37.8-4.1-4.6-9.9-7-17.4-7h-23.9v52z"],"file-video":[384,512,[],"f1c8","M369.941 97.941l-83.882-83.882A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V131.882a48 48 0 0 0-14.059-33.941zM332.118 128H256V51.882L332.118 128zM48 464V48h160v104c0 13.255 10.745 24 24 24h104v288H48zm228.687-211.303L224 305.374V268c0-11.046-8.954-20-20-20H100c-11.046 0-20 8.954-20 20v104c0 11.046 8.954 20 20 20h104c11.046 0 20-8.954 20-20v-37.374l52.687 52.674C286.704 397.318 304 390.28 304 375.986V264.011c0-14.311-17.309-21.319-27.313-11.314z"],"file-word":[384,512,[],"f1c2","M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM332.1 128H256V51.9l76.1 76.1zM48 464V48h160v104c0 13.3 10.7 24 24 24h104v288H48zm220.1-208c-5.7 0-10.6 4-11.7 9.5-20.6 97.7-20.4 95.4-21 103.5-.2-1.2-.4-2.6-.7-4.3-.8-5.1.3.2-23.6-99.5-1.3-5.4-6.1-9.2-11.7-9.2h-13.3c-5.5 0-10.3 3.8-11.7 9.1-24.4 99-24 96.2-24.8 103.7-.1-1.1-.2-2.5-.5-4.2-.7-5.2-14.1-73.3-19.1-99-1.1-5.6-6-9.7-11.8-9.7h-16.8c-7.8 0-13.5 7.3-11.7 14.8 8 32.6 26.7 109.5 33.2 136 1.3 5.4 6.1 9.1 11.7 9.1h25.2c5.5 0 10.3-3.7 11.6-9.1l17.9-71.4c1.5-6.2 2.5-12 3-17.3l2.9 17.3c.1.4 12.6 50.5 17.9 71.4 1.3 5.3 6.1 9.1 11.6 9.1h24.7c5.5 0 10.3-3.7 11.6-9.1 20.8-81.9 30.2-119 34.5-136 1.9-7.6-3.8-14.9-11.6-14.9h-15.8z"],flag:[512,512,[],"f024","M336.174 80c-49.132 0-93.305-32-161.913-32-31.301 0-58.303 6.482-80.721 15.168a48.04 48.04 0 0 0 2.142-20.727C93.067 19.575 74.167 1.594 51.201.104 23.242-1.71 0 20.431 0 48c0 17.764 9.657 33.262 24 41.562V496c0 8.837 7.163 16 16 16h16c8.837 0 16-7.163 16-16v-83.443C109.869 395.28 143.259 384 199.826 384c49.132 0 93.305 32 161.913 32 58.479 0 101.972-22.617 128.548-39.981C503.846 367.161 512 352.051 512 335.855V95.937c0-34.459-35.264-57.768-66.904-44.117C409.193 67.309 371.641 80 336.174 80zM464 336c-21.783 15.412-60.824 32-102.261 32-59.945 0-102.002-32-161.913-32-43.361 0-96.379 9.403-127.826 24V128c21.784-15.412 60.824-32 102.261-32 59.945 0 102.002 32 161.913 32 43.271 0 96.32-17.366 127.826-32v240z"],folder:[512,512,[],"f07b","M464 128H272l-64-64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V176c0-26.51-21.49-48-48-48zm-6 272H54c-3.314 0-6-2.678-6-5.992V117.992A5.993 5.993 0 0 1 54 112h134.118l64 64H458a6 6 0 0 1 6 6v212a6 6 0 0 1-6 6z"],"folder-open":[576,512,[],"f07c","M527.943 224H480v-48c0-26.51-21.49-48-48-48H272l-64-64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h400a48.001 48.001 0 0 0 40.704-22.56l79.942-128c19.948-31.917-3.038-73.44-40.703-73.44zM54 112h134.118l64 64H426a6 6 0 0 1 6 6v42H152a48 48 0 0 0-41.098 23.202L48 351.449V117.993A5.993 5.993 0 0 1 54 112zm394 288H72l77.234-128H528l-80 128z"],frown:[512,512,[],"f119","M256 56c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m0-48C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm64 136c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059s-23.059-10.324-23.059-23.059v-.017C266.386 181.488 264 190.465 264 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm-128 0c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059-12.735 0-23.059-10.324-23.059-23.059v-.017C138.386 181.488 136 190.465 136 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm171.547 201.782c-56.595-76.964-158.383-77.065-215.057-.001-18.82 25.593 19.858 54.018 38.67 28.438 37.511-51.01 100.365-50.796 137.717-.001 18.509 25.172 57.821-2.395 38.67-28.436z"],futbol:[512,512,[],"f1e3","M207.898 325.571l-29.894-91.312L256 177.732l77.996 56.527-29.622 91.312h-96.476zM504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-455.998-.19L48 256c0 44.7 14.015 87.242 39.95 122.626l7.982-35.118 88.595 10.871 37.775 80.985-30.978 18.427c41.832 13.631 87.598 13.606 129.354 0L289.7 435.364l37.775-80.985 88.594-10.871 7.982 35.118C449.985 343.242 464 300.7 464 256l-.002-.19-27.003 23.561-65.223-60.875 17.122-87.779 35.538 3.183c-25.216-34.63-61.309-62.053-104.577-75.951l14.143 33.091L256 134.25l-77.996-43.21 14.144-33.091C148.868 71.851 112.782 99.277 87.57 133.9l35.81-3.183 16.849 87.779-65.223 60.875-27.004-23.561z"],gem:[576,512,[],"f3a5","M464 0H112c-4 0-7.8 2-10 5.4L2 152.6c-2.9 4.4-2.6 10.2.7 14.2l276 340.8c4.8 5.9 13.8 5.9 18.6 0l276-340.8c3.3-4.1 3.6-9.8.7-14.2L474.1 5.4C471.8 2 468.1 0 464 0zm-19.3 48l63.3 96h-68.4l-51.7-96h56.8zm-202.1 0h90.7l51.7 96H191l51.6-96zm-111.3 0h56.8l-51.7 96H68l63.3-96zm-43 144h51.4L208 352 88.3 192zm102.9 0h193.6L288 435.3 191.2 192zM368 352l68.2-160h51.4L368 352z"],"hand-lizard":[576,512,[],"f258","M556.686 290.542L410.328 64.829C397.001 44.272 374.417 32 349.917 32H56C25.121 32 0 57.122 0 88v8c0 44.112 35.888 80 80 80h196.042l-18.333 48H144c-48.523 0-88 39.477-88 88 0 30.879 25.121 56 56 56h131.552c2.987 0 5.914.549 8.697 1.631L352 408.418V480h224V355.829c0-23.225-6.679-45.801-19.314-65.287zM528 432H400v-23.582c0-19.948-12.014-37.508-30.604-44.736l-99.751-38.788A71.733 71.733 0 0 0 243.552 320H112c-4.411 0-8-3.589-8-8 0-22.056 17.944-40 40-40h113.709c19.767 0 37.786-12.407 44.84-30.873l24.552-64.281c8.996-23.553-8.428-48.846-33.63-48.846H80c-17.645 0-32-14.355-32-32v-8c0-4.411 3.589-8 8-8h293.917c8.166 0 15.693 4.09 20.137 10.942l146.358 225.715A71.84 71.84 0 0 1 528 355.829V432z"],"hand-paper":[448,512,[],"f256","M372.57 112.641v-10.825c0-43.612-40.52-76.691-83.039-65.546-25.629-49.5-94.09-47.45-117.982.747C130.269 26.456 89.144 57.945 89.144 102v126.13c-19.953-7.427-43.308-5.068-62.083 8.871-29.355 21.796-35.794 63.333-14.55 93.153L132.48 498.569a32 32 0 0 0 26.062 13.432h222.897c14.904 0 27.835-10.289 31.182-24.813l30.184-130.958A203.637 203.637 0 0 0 448 310.564V179c0-40.62-35.523-71.992-75.43-66.359zm27.427 197.922c0 11.731-1.334 23.469-3.965 34.886L368.707 464h-201.92L51.591 302.303c-14.439-20.27 15.023-42.776 29.394-22.605l27.128 38.079c8.995 12.626 29.031 6.287 29.031-9.283V102c0-25.645 36.571-24.81 36.571.691V256c0 8.837 7.163 16 16 16h6.856c8.837 0 16-7.163 16-16V67c0-25.663 36.571-24.81 36.571.691V256c0 8.837 7.163 16 16 16h6.856c8.837 0 16-7.163 16-16V101.125c0-25.672 36.57-24.81 36.57.691V256c0 8.837 7.163 16 16 16h6.857c8.837 0 16-7.163 16-16v-76.309c0-26.242 36.57-25.64 36.57-.691v131.563z"],"hand-peace":[448,512,[],"f25b","M362.146 191.976c-13.71-21.649-38.761-34.016-65.006-30.341V74c0-40.804-32.811-74-73.141-74-40.33 0-73.14 33.196-73.14 74L160 168l-18.679-78.85C126.578 50.843 83.85 32.11 46.209 47.208 8.735 62.238-9.571 104.963 5.008 142.85l55.757 144.927c-30.557 24.956-43.994 57.809-24.733 92.218l54.853 97.999C102.625 498.97 124.73 512 148.575 512h205.702c30.744 0 57.558-21.44 64.555-51.797l27.427-118.999a67.801 67.801 0 0 0 1.729-15.203L448 256c0-44.956-43.263-77.343-85.854-64.024zM399.987 326c0 1.488-.169 2.977-.502 4.423l-27.427 119.001c-1.978 8.582-9.29 14.576-17.782 14.576H148.575c-6.486 0-12.542-3.621-15.805-9.449l-54.854-98c-4.557-8.141-2.619-18.668 4.508-24.488l26.647-21.764a16 16 0 0 0 4.812-18.139l-64.09-166.549C37.226 92.956 84.37 74.837 96.51 106.389l59.784 155.357A16 16 0 0 0 171.227 272h11.632c8.837 0 16-7.163 16-16V74c0-34.375 50.281-34.43 50.281 0v182c0 8.837 7.163 16 16 16h6.856c8.837 0 16-7.163 16-16v-28c0-25.122 36.567-25.159 36.567 0v28c0 8.837 7.163 16 16 16h6.856c8.837 0 16-7.163 16-16 0-25.12 36.567-25.16 36.567 0v70z"],"hand-point-down":[448,512,[],"f0a7","M188.8 512c45.616 0 83.2-37.765 83.2-83.2v-35.647a93.148 93.148 0 0 0 22.064-7.929c22.006 2.507 44.978-3.503 62.791-15.985C409.342 368.1 448 331.841 448 269.299V248c0-60.063-40-98.512-40-127.2v-2.679c4.952-5.747 8-13.536 8-22.12V32c0-17.673-12.894-32-28.8-32H156.8C140.894 0 128 14.327 128 32v64c0 8.584 3.048 16.373 8 22.12v2.679c0 6.964-6.193 14.862-23.668 30.183l-.148.129-.146.131c-9.937 8.856-20.841 18.116-33.253 25.851C48.537 195.798 0 207.486 0 252.8c0 56.928 35.286 92 83.2 92 8.026 0 15.489-.814 22.4-2.176V428.8c0 45.099 38.101 83.2 83.2 83.2zm0-48c-18.7 0-35.2-16.775-35.2-35.2V270.4c-17.325 0-35.2 26.4-70.4 26.4-26.4 0-35.2-20.625-35.2-44 0-8.794 32.712-20.445 56.1-34.926 14.575-9.074 27.225-19.524 39.875-30.799 18.374-16.109 36.633-33.836 39.596-59.075h176.752C364.087 170.79 400 202.509 400 248v21.299c0 40.524-22.197 57.124-61.325 50.601-8.001 14.612-33.979 24.151-53.625 12.925-18.225 19.365-46.381 17.787-61.05 4.95V428.8c0 18.975-16.225 35.2-35.2 35.2zM328 64c0-13.255 10.745-24 24-24s24 10.745 24 24-10.745 24-24 24-24-10.745-24-24z"],"hand-point-left":[512,512,[],"f0a5","M0 220.8C0 266.416 37.765 304 83.2 304h35.647a93.148 93.148 0 0 0 7.929 22.064c-2.507 22.006 3.503 44.978 15.985 62.791C143.9 441.342 180.159 480 242.701 480H264c60.063 0 98.512-40 127.2-40h2.679c5.747 4.952 13.536 8 22.12 8h64c17.673 0 32-12.894 32-28.8V188.8c0-15.906-14.327-28.8-32-28.8h-64c-8.584 0-16.373 3.048-22.12 8H391.2c-6.964 0-14.862-6.193-30.183-23.668l-.129-.148-.131-.146c-8.856-9.937-18.116-20.841-25.851-33.253C316.202 80.537 304.514 32 259.2 32c-56.928 0-92 35.286-92 83.2 0 8.026.814 15.489 2.176 22.4H83.2C38.101 137.6 0 175.701 0 220.8zm48 0c0-18.7 16.775-35.2 35.2-35.2h158.4c0-17.325-26.4-35.2-26.4-70.4 0-26.4 20.625-35.2 44-35.2 8.794 0 20.445 32.712 34.926 56.1 9.074 14.575 19.524 27.225 30.799 39.875 16.109 18.374 33.836 36.633 59.075 39.596v176.752C341.21 396.087 309.491 432 264 432h-21.299c-40.524 0-57.124-22.197-50.601-61.325-14.612-8.001-24.151-33.979-12.925-53.625-19.365-18.225-17.787-46.381-4.95-61.05H83.2C64.225 256 48 239.775 48 220.8zM448 360c13.255 0 24 10.745 24 24s-10.745 24-24 24-24-10.745-24-24 10.745-24 24-24z"],"hand-point-right":[512,512,[],"f0a4","M428.8 137.6h-86.177a115.52 115.52 0 0 0 2.176-22.4c0-47.914-35.072-83.2-92-83.2-45.314 0-57.002 48.537-75.707 78.784-7.735 12.413-16.994 23.317-25.851 33.253l-.131.146-.129.148C135.662 161.807 127.764 168 120.8 168h-2.679c-5.747-4.952-13.536-8-22.12-8H32c-17.673 0-32 12.894-32 28.8v230.4C0 435.106 14.327 448 32 448h64c8.584 0 16.373-3.048 22.12-8h2.679c28.688 0 67.137 40 127.2 40h21.299c62.542 0 98.8-38.658 99.94-91.145 12.482-17.813 18.491-40.785 15.985-62.791A93.148 93.148 0 0 0 393.152 304H428.8c45.435 0 83.2-37.584 83.2-83.2 0-45.099-38.101-83.2-83.2-83.2zm0 118.4h-91.026c12.837 14.669 14.415 42.825-4.95 61.05 11.227 19.646 1.687 45.624-12.925 53.625 6.524 39.128-10.076 61.325-50.6 61.325H248c-45.491 0-77.21-35.913-120-39.676V215.571c25.239-2.964 42.966-21.222 59.075-39.596 11.275-12.65 21.725-25.3 30.799-39.875C232.355 112.712 244.006 80 252.8 80c23.375 0 44 8.8 44 35.2 0 35.2-26.4 53.075-26.4 70.4h158.4c18.425 0 35.2 16.5 35.2 35.2 0 18.975-16.225 35.2-35.2 35.2zM88 384c0 13.255-10.745 24-24 24s-24-10.745-24-24 10.745-24 24-24 24 10.745 24 24z"],"hand-point-up":[448,512,[],"f0a6","M105.6 83.2v86.177a115.52 115.52 0 0 0-22.4-2.176c-47.914 0-83.2 35.072-83.2 92 0 45.314 48.537 57.002 78.784 75.707 12.413 7.735 23.317 16.994 33.253 25.851l.146.131.148.129C129.807 376.338 136 384.236 136 391.2v2.679c-4.952 5.747-8 13.536-8 22.12v64c0 17.673 12.894 32 28.8 32h230.4c15.906 0 28.8-14.327 28.8-32v-64c0-8.584-3.048-16.373-8-22.12V391.2c0-28.688 40-67.137 40-127.2v-21.299c0-62.542-38.658-98.8-91.145-99.94-17.813-12.482-40.785-18.491-62.791-15.985A93.148 93.148 0 0 0 272 118.847V83.2C272 37.765 234.416 0 188.8 0c-45.099 0-83.2 38.101-83.2 83.2zm118.4 0v91.026c14.669-12.837 42.825-14.415 61.05 4.95 19.646-11.227 45.624-1.687 53.625 12.925 39.128-6.524 61.325 10.076 61.325 50.6V264c0 45.491-35.913 77.21-39.676 120H183.571c-2.964-25.239-21.222-42.966-39.596-59.075-12.65-11.275-25.3-21.725-39.875-30.799C80.712 279.645 48 267.994 48 259.2c0-23.375 8.8-44 35.2-44 35.2 0 53.075 26.4 70.4 26.4V83.2c0-18.425 16.5-35.2 35.2-35.2 18.975 0 35.2 16.225 35.2 35.2zM352 424c13.255 0 24 10.745 24 24s-10.745 24-24 24-24-10.745-24-24 10.745-24 24-24z"],"hand-pointer":[448,512,[],"f25a","M358.182 179.361c-19.493-24.768-52.679-31.945-79.872-19.098-15.127-15.687-36.182-22.487-56.595-19.629V67c0-36.944-29.736-67-66.286-67S89.143 30.056 89.143 67v161.129c-19.909-7.41-43.272-5.094-62.083 8.872-29.355 21.795-35.793 63.333-14.55 93.152l109.699 154.001C134.632 501.59 154.741 512 176 512h178.286c30.802 0 57.574-21.5 64.557-51.797l27.429-118.999A67.873 67.873 0 0 0 448 326v-84c0-46.844-46.625-79.273-89.818-62.639zM80.985 279.697l27.126 38.079c8.995 12.626 29.031 6.287 29.031-9.283V67c0-25.12 36.571-25.16 36.571 0v175c0 8.836 7.163 16 16 16h6.857c8.837 0 16-7.164 16-16v-35c0-25.12 36.571-25.16 36.571 0v35c0 8.836 7.163 16 16 16H272c8.837 0 16-7.164 16-16v-21c0-25.12 36.571-25.16 36.571 0v21c0 8.836 7.163 16 16 16h6.857c8.837 0 16-7.164 16-16 0-25.121 36.571-25.16 36.571 0v84c0 1.488-.169 2.977-.502 4.423l-27.43 119.001c-1.978 8.582-9.29 14.576-17.782 14.576H176c-5.769 0-11.263-2.878-14.697-7.697l-109.712-154c-14.406-20.223 14.994-42.818 29.394-22.606zM176.143 400v-96c0-8.837 6.268-16 14-16h6c7.732 0 14 7.163 14 16v96c0 8.837-6.268 16-14 16h-6c-7.733 0-14-7.163-14-16zm75.428 0v-96c0-8.837 6.268-16 14-16h6c7.732 0 14 7.163 14 16v96c0 8.837-6.268 16-14 16h-6c-7.732 0-14-7.163-14-16zM327 400v-96c0-8.837 6.268-16 14-16h6c7.732 0 14 7.163 14 16v96c0 8.837-6.268 16-14 16h-6c-7.732 0-14-7.163-14-16z"],"hand-rock":[512,512,[],"f255","M408.864 79.052c-22.401-33.898-66.108-42.273-98.813-23.588-29.474-31.469-79.145-31.093-108.334-.022-47.16-27.02-108.71 5.055-110.671 60.806C44.846 105.407 0 140.001 0 187.429v56.953c0 32.741 14.28 63.954 39.18 85.634l97.71 85.081c4.252 3.702 3.11 5.573 3.11 32.903 0 17.673 14.327 32 32 32h252c17.673 0 32-14.327 32-32 0-23.513-1.015-30.745 3.982-42.37l42.835-99.656c6.094-14.177 9.183-29.172 9.183-44.568V146.963c0-52.839-54.314-88.662-103.136-67.911zM464 261.406a64.505 64.505 0 0 1-5.282 25.613l-42.835 99.655c-5.23 12.171-7.883 25.04-7.883 38.25V432H188v-10.286c0-16.37-7.14-31.977-19.59-42.817l-97.71-85.08C56.274 281.255 48 263.236 48 244.381v-56.953c0-33.208 52-33.537 52 .677v41.228a16 16 0 0 0 5.493 12.067l7 6.095A16 16 0 0 0 139 235.429V118.857c0-33.097 52-33.725 52 .677v26.751c0 8.836 7.164 16 16 16h7c8.836 0 16-7.164 16-16v-41.143c0-33.134 52-33.675 52 .677v40.466c0 8.836 7.163 16 16 16h7c8.837 0 16-7.164 16-16v-27.429c0-33.03 52-33.78 52 .677v26.751c0 8.836 7.163 16 16 16h7c8.837 0 16-7.164 16-16 0-33.146 52-33.613 52 .677v114.445z"],"hand-scissors":[512,512,[],"f257","M256 480l70-.013c5.114 0 10.231-.583 15.203-1.729l118.999-27.427C490.56 443.835 512 417.02 512 386.277V180.575c0-23.845-13.03-45.951-34.005-57.69l-97.999-54.853c-34.409-19.261-67.263-5.824-92.218 24.733L142.85 37.008c-37.887-14.579-80.612 3.727-95.642 41.201-15.098 37.642 3.635 80.37 41.942 95.112L168 192l-94-9.141c-40.804 0-74 32.811-74 73.14 0 40.33 33.196 73.141 74 73.141h87.635c-3.675 26.245 8.692 51.297 30.341 65.006C178.657 436.737 211.044 480 256 480zm0-48.013c-25.16 0-25.12-36.567 0-36.567 8.837 0 16-7.163 16-16v-6.856c0-8.837-7.163-16-16-16h-28c-25.159 0-25.122-36.567 0-36.567h28c8.837 0 16-7.163 16-16v-6.856c0-8.837-7.163-16-16-16H74c-34.43 0-34.375-50.281 0-50.281h182c8.837 0 16-7.163 16-16v-11.632a16 16 0 0 0-10.254-14.933L106.389 128.51c-31.552-12.14-13.432-59.283 19.222-46.717l166.549 64.091a16.001 16.001 0 0 0 18.139-4.812l21.764-26.647c5.82-7.127 16.348-9.064 24.488-4.508l98 54.854c5.828 3.263 9.449 9.318 9.449 15.805v205.701c0 8.491-5.994 15.804-14.576 17.782l-119.001 27.427a19.743 19.743 0 0 1-4.423.502h-70z"],"hand-spock":[512,512,[],"f259","M21.096 381.79l129.092 121.513a32 32 0 0 0 21.932 8.698h237.6c14.17 0 26.653-9.319 30.68-22.904l31.815-107.313A115.955 115.955 0 0 0 477 348.811v-36.839c0-4.051.476-8.104 1.414-12.045l31.73-133.41c10.099-42.412-22.316-82.738-65.544-82.525-4.144-24.856-22.543-47.165-49.85-53.992-35.803-8.952-72.227 12.655-81.25 48.75L296.599 184 274.924 52.01c-8.286-36.07-44.303-58.572-80.304-50.296-29.616 6.804-50.138 32.389-51.882 61.295-42.637.831-73.455 40.563-64.071 81.844l31.04 136.508c-27.194-22.515-67.284-19.992-91.482 5.722-25.376 26.961-24.098 69.325 2.871 94.707zm32.068-61.811l.002-.001c7.219-7.672 19.241-7.98 26.856-.813l53.012 49.894C143.225 378.649 160 371.4 160 357.406v-69.479c0-1.193-.134-2.383-.397-3.546l-34.13-150.172c-5.596-24.617 31.502-32.86 37.054-8.421l30.399 133.757a16 16 0 0 0 15.603 12.454h8.604c10.276 0 17.894-9.567 15.594-19.583l-41.62-181.153c-5.623-24.469 31.39-33.076 37.035-8.508l45.22 196.828A16 16 0 0 0 288.956 272h13.217a16 16 0 0 0 15.522-12.119l42.372-169.49c6.104-24.422 42.962-15.159 36.865 9.217L358.805 252.12c-2.521 10.088 5.115 19.88 15.522 19.88h9.694a16 16 0 0 0 15.565-12.295L426.509 146.6c5.821-24.448 42.797-15.687 36.966 8.802L431.72 288.81a100.094 100.094 0 0 0-2.72 23.162v36.839c0 6.548-.943 13.051-2.805 19.328L397.775 464h-219.31L53.978 346.836c-7.629-7.18-7.994-19.229-.814-26.857z"],handshake:[640,512,[],"f2b5","M616 96h-48c-7.107 0-13.49 3.091-17.884 8H526.59l-31.13-36.3-.16-.18A103.974 103.974 0 0 0 417.03 32h-46.55c-17.75 0-34.9 4.94-49.69 14.01C304.33 36.93 285.67 32 266.62 32h-32.11c-28.903 0-57.599 11.219-79.2 32.8L116.12 104H89.884C85.49 99.091 79.107 96 72 96H24c-13.255 0-24 10.745-24 24v240c0 13.255 10.745 24 24 24h48c10.449 0 19.334-6.68 22.629-16h18.801l75.35 67.57c25.542 26.45 59.925 44.43 96.58 44.43 16.39 0 32.28-3.85 46.1-10.93 24.936.496 51.101-10.368 69.07-31.41 19.684-5.579 37.503-17.426 50.72-34.6 20.989-4.401 40.728-16.492 53.42-35.06h40.701c3.295 9.32 12.18 16 22.629 16h48c13.255 0 24-10.745 24-24V120c0-13.255-10.745-24-24-24zM48 352c-8.837 0-16-7.163-16-16s7.163-16 16-16 16 7.163 16 16-7.163 16-16 16zm412.52-5.76c-15.35 14.295-36.884 11.328-39.95 8 1.414 13.382-18.257 41.043-49.08 38.88-5.541 18.523-28.218 33.826-51.49 25.75-8.89 8.89-22.46 13.13-34.64 13.13-24.95 0-47.77-14.54-63.14-30.91l-81.3-72.91a31.976 31.976 0 0 0-21.36-8.18H96V152h26.75c8.48 0 16.62-3.37 22.62-9.37l43.88-43.88A64.004 64.004 0 0 1 234.51 80h32.11c5.8 0 11.51.79 17 2.3l-43.27 50.49c-23.56 27.48-23.84 67.62-.66 95.44 32.388 38.866 91.378 39.228 124.48 1.98l25.98-30.08L462.59 296c13.44 14.6 10.95 38.13-2.07 50.24zM544 320h-24.458c.104-20.261-6.799-39.33-19.762-54.4L421.7 162.28c4.51-9.51 2.34-21.23-6.01-28.45-10.075-8.691-25.23-7.499-33.86 2.48l-53.63 62.12c-13.828 15.41-38.223 15.145-51.64-.93a25.857 25.857 0 0 1 .23-33.47l57.92-67.58A47.09 47.09 0 0 1 370.48 80h46.55c16.11 0 31.44 6.94 42.07 19.04L504.52 152H544v168zm48 32c-8.837 0-16-7.163-16-16s7.163-16 16-16 16 7.163 16 16-7.163 16-16 16z"],hdd:[576,512,[],"f0a0","M567.403 235.642L462.323 84.589A48 48 0 0 0 422.919 64H153.081a48 48 0 0 0-39.404 20.589L8.597 235.642A48.001 48.001 0 0 0 0 263.054V400c0 26.51 21.49 48 48 48h480c26.51 0 48-21.49 48-48V263.054c0-9.801-3-19.366-8.597-27.412zM153.081 112h269.838l77.913 112H75.168l77.913-112zM528 400H48V272h480v128zm-32-64c0 17.673-14.327 32-32 32s-32-14.327-32-32 14.327-32 32-32 32 14.327 32 32zm-96 0c0 17.673-14.327 32-32 32s-32-14.327-32-32 14.327-32 32-32 32 14.327 32 32z"],heart:[576,512,[],"f004","M257.3 475.4L92.5 313.6C85.4 307 24 248.1 24 174.8 24 84.1 80.8 24 176 24c41.4 0 80.6 22.8 112 49.8 31.3-27 70.6-49.8 112-49.8 91.7 0 152 56.5 152 150.8 0 52-31.8 103.5-68.1 138.7l-.4.4-164.8 161.5a43.7 43.7 0 0 1-61.4 0zM125.9 279.1L288 438.3l161.8-158.7c27.3-27 54.2-66.3 54.2-104.8C504 107.9 465.8 72 400 72c-47.2 0-92.8 49.3-112 68.4-17-17-64-68.4-112-68.4-65.9 0-104 35.9-104 102.8 0 37.3 26.7 78.9 53.9 104.3z"],hospital:[448,512,[],"f0f8","M128 244v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12zm140 12h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12zm-76 84v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm76 12h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12zm180 124v36H0v-36c0-6.627 5.373-12 12-12h19.5V85.035C31.5 73.418 42.245 64 55.5 64H144V24c0-13.255 10.745-24 24-24h112c13.255 0 24 10.745 24 24v40h88.5c13.255 0 24 9.418 24 21.035V464H436c6.627 0 12 5.373 12 12zM79.5 463H192v-67c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v67h112.5V112H304v24c0 13.255-10.745 24-24 24H168c-13.255 0-24-10.745-24-24v-24H79.5v351zM266 64h-26V38a6 6 0 0 0-6-6h-20a6 6 0 0 0-6 6v26h-26a6 6 0 0 0-6 6v20a6 6 0 0 0 6 6h26v26a6 6 0 0 0 6 6h20a6 6 0 0 0 6-6V96h26a6 6 0 0 0 6-6V70a6 6 0 0 0-6-6z"],hourglass:[384,512,[],"f254","M368 48h4c6.627 0 12-5.373 12-12V12c0-6.627-5.373-12-12-12H12C5.373 0 0 5.373 0 12v24c0 6.627 5.373 12 12 12h4c0 80.564 32.188 165.807 97.18 208C47.899 298.381 16 383.9 16 464h-4c-6.627 0-12 5.373-12 12v24c0 6.627 5.373 12 12 12h360c6.627 0 12-5.373 12-12v-24c0-6.627-5.373-12-12-12h-4c0-80.564-32.188-165.807-97.18-208C336.102 213.619 368 128.1 368 48zM64 48h256c0 101.62-57.307 184-128 184S64 149.621 64 48zm256 416H64c0-101.62 57.308-184 128-184s128 82.38 128 184z"],"id-badge":[384,512,[],"f2c1","M192 128c35.346 0 64 28.654 64 64s-28.654 64-64 64-64-28.654-64-64 28.654-64 64-64m61.187 145.847l-18.064-5.161C222.371 275.884 207.658 280 192 280s-30.371-4.116-43.122-11.314l-18.064 5.161C110.207 279.734 96 298.569 96 320v72c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24v-72c0-21.431-14.207-40.266-34.813-46.153zM0 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48V48c0-26.51-21.49-48-48-48H48C21.49 0 0 21.49 0 48zm336 32v378a6 6 0 0 1-6 6H54a6 6 0 0 1-6-6V80h288z"],"id-card":[512,512,[],"f2c2","M404 256H300c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12zm12 68v-24c0-6.627-5.373-12-12-12H300c-6.627 0-12 5.373-12 12v24c0 6.627 5.373 12 12 12h104c6.627 0 12-5.373 12-12zm96-212v288c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h416c26.51 0 48 21.49 48 48zm-48 282V144H48v250a6 6 0 0 0 6 6h404a6 6 0 0 0 6-6zm-288-98c30.928 0 56-25.072 56-56s-25.072-56-56-56-56 25.072-56 56 25.072 56 56 56zm62.896 10.869l-13.464-4.039C211.814 313.568 194.649 320 176 320s-35.814-6.432-49.433-17.17l-13.464 4.039A24 24 0 0 0 96 329.857V372c0 6.627 5.373 12 12 12h136c6.627 0 12-5.373 12-12v-42.143a24 24 0 0 0-17.104-22.988z"],image:[512,512,[],"f03e","M464 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm-6 336H54a6 6 0 0 1-6-6V118a6 6 0 0 1 6-6h404a6 6 0 0 1 6 6v276a6 6 0 0 1-6 6zM128 152c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zM96 352h320v-80l-87.515-87.515c-4.686-4.686-12.284-4.686-16.971 0L192 304l-39.515-39.515c-4.686-4.686-12.284-4.686-16.971 0L96 304v48z"],images:[576,512,[],"f302","M480 416v16c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V176c0-26.51 21.49-48 48-48h16v48H54a6 6 0 0 0-6 6v244a6 6 0 0 0 6 6h372a6 6 0 0 0 6-6v-10h48zm42-336H150a6 6 0 0 0-6 6v244a6 6 0 0 0 6 6h372a6 6 0 0 0 6-6V86a6 6 0 0 0-6-6zm6-48c26.51 0 48 21.49 48 48v256c0 26.51-21.49 48-48 48H144c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h384zM264 144c0 22.091-17.909 40-40 40s-40-17.909-40-40 17.909-40 40-40 40 17.909 40 40zm-72 96l39.515-39.515c4.686-4.686 12.284-4.686 16.971 0L288 240l103.515-103.515c4.686-4.686 12.284-4.686 16.971 0L480 208v80H192v-48z"],keyboard:[576,512,[],"f11c","M528 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h480c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm8 336c0 4.411-3.589 8-8 8H48c-4.411 0-8-3.589-8-8V112c0-4.411 3.589-8 8-8h480c4.411 0 8 3.589 8 8v288zM170 270v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm-336 82v-28c0-6.627-5.373-12-12-12H82c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm384 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zM122 188v-28c0-6.627-5.373-12-12-12H82c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm96 0v-28c0-6.627-5.373-12-12-12h-28c-6.627 0-12 5.373-12 12v28c0 6.627 5.373 12 12 12h28c6.627 0 12-5.373 12-12zm-98 158v-16c0-6.627-5.373-12-12-12H180c-6.627 0-12 5.373-12 12v16c0 6.627 5.373 12 12 12h216c6.627 0 12-5.373 12-12z"],lemon:[512,512,[],"f094","M484.112 27.889C455.989-.233 416.108-8.057 387.059 8.865 347.604 31.848 223.504-41.111 91.196 91.197-41.277 223.672 31.923 347.472 8.866 387.058c-16.922 29.051-9.1 68.932 19.022 97.054 28.135 28.135 68.011 35.938 97.057 19.021 39.423-22.97 163.557 49.969 295.858-82.329 132.474-132.477 59.273-256.277 82.331-295.861 16.922-29.05 9.1-68.931-19.022-97.054zm-22.405 72.894c-38.8 66.609 45.6 165.635-74.845 286.08-120.44 120.443-219.475 36.048-286.076 74.843-22.679 13.207-64.035-27.241-50.493-50.488 38.8-66.609-45.6-165.635 74.845-286.08C245.573 4.702 344.616 89.086 411.219 50.292c22.73-13.24 64.005 27.288 50.488 50.491zm-169.861 8.736c1.37 10.96-6.404 20.957-17.365 22.327-54.846 6.855-135.779 87.787-142.635 142.635-1.373 10.989-11.399 18.734-22.326 17.365-10.961-1.37-18.735-11.366-17.365-22.326 9.162-73.286 104.167-168.215 177.365-177.365 10.953-1.368 20.956 6.403 22.326 17.364z"],"life-ring":[512,512,[],"f1cd","M256 504c136.967 0 248-111.033 248-248S392.967 8 256 8 8 119.033 8 256s111.033 248 248 248zm-103.398-76.72l53.411-53.411c31.806 13.506 68.128 13.522 99.974 0l53.411 53.411c-63.217 38.319-143.579 38.319-206.796 0zM336 256c0 44.112-35.888 80-80 80s-80-35.888-80-80 35.888-80 80-80 80 35.888 80 80zm91.28 103.398l-53.411-53.411c13.505-31.806 13.522-68.128 0-99.974l53.411-53.411c38.319 63.217 38.319 143.579 0 206.796zM359.397 84.72l-53.411 53.411c-31.806-13.505-68.128-13.522-99.973 0L152.602 84.72c63.217-38.319 143.579-38.319 206.795 0zM84.72 152.602l53.411 53.411c-13.506 31.806-13.522 68.128 0 99.974L84.72 359.398c-38.319-63.217-38.319-143.579 0-206.796z"],lightbulb:[384,512,[],"f0eb","M272 428v28c0 10.449-6.68 19.334-16 22.629V488c0 13.255-10.745 24-24 24h-80c-13.255 0-24-10.745-24-24v-9.371c-9.32-3.295-16-12.18-16-22.629v-28c0-6.627 5.373-12 12-12h136c6.627 0 12 5.373 12 12zM128 176c0-35.29 28.71-64 64-64 8.837 0 16-7.164 16-16s-7.163-16-16-16c-52.935 0-96 43.065-96 96 0 8.836 7.164 16 16 16s16-7.164 16-16zm64-128c70.734 0 128 57.254 128 128 0 77.602-37.383 60.477-80.98 160h-94.04C101.318 236.33 64 253.869 64 176c0-70.735 57.254-128 128-128m0-48C94.805 0 16 78.803 16 176c0 101.731 51.697 91.541 90.516 192.674 3.55 9.249 12.47 15.326 22.376 15.326h126.215c9.906 0 18.826-6.078 22.376-15.326C316.303 267.541 368 277.731 368 176 368 78.803 289.195 0 192 0z"],"list-alt":[512,512,[],"f022","M464 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zm-6 400H54a6 6 0 0 1-6-6V86a6 6 0 0 1 6-6h404a6 6 0 0 1 6 6v340a6 6 0 0 1-6 6zm-42-92v24c0 6.627-5.373 12-12 12H204c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h200c6.627 0 12 5.373 12 12zm0-96v24c0 6.627-5.373 12-12 12H204c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h200c6.627 0 12 5.373 12 12zm0-96v24c0 6.627-5.373 12-12 12H204c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h200c6.627 0 12 5.373 12 12zm-252 12c0 19.882-16.118 36-36 36s-36-16.118-36-36 16.118-36 36-36 36 16.118 36 36zm0 96c0 19.882-16.118 36-36 36s-36-16.118-36-36 16.118-36 36-36 36 16.118 36 36zm0 96c0 19.882-16.118 36-36 36s-36-16.118-36-36 16.118-36 36-36 36 16.118 36 36z"],map:[576,512,[],"f279","M508.505 36.17L381.517 92.576 207.179 34.463a47.992 47.992 0 0 0-34.674 1.674l-144 64A48 48 0 0 0 0 144v287.967c0 34.938 35.991 57.864 67.495 43.863l126.988-56.406 174.339 58.113a47.992 47.992 0 0 0 34.674-1.674l144-64A48 48 0 0 0 576 368V80.033c0-34.938-35.991-57.864-67.495-43.863zM360 424l-144-48V88l144 48v288zm-312 8V144l120-53.333v288L48 432zm480-64l-120 53.333v-288L528 80v288z"],meh:[512,512,[],"f11a","M256 56c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m0-48C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm64 136c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059s-23.059-10.324-23.059-23.059v-.017C266.386 181.488 264 190.465 264 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm-128 0c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059-12.735 0-23.059-10.324-23.059-23.059v-.017C138.386 181.488 136 190.465 136 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm136 184H184c-31.776 0-31.749 48 0 48h144c31.776 0 31.749-48 0-48z"],"minus-square":[448,512,[],"f146","M108 284c-6.6 0-12-5.4-12-12v-32c0-6.6 5.4-12 12-12h232c6.6 0 12 5.4 12 12v32c0 6.6-5.4 12-12 12H108zM448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"money-bill-alt":[640,512,[],"f3d1","M320 144c-53.021 0-96 50.143-96 112 0 61.847 42.977 112 96 112 53 0 96-50.13 96-112 0-61.857-42.979-112-96-112zm48 164.428c0 7.477-3.917 11.572-11.572 11.572h-67.293c-7.656 0-11.573-4.095-11.573-11.572v-8.901c0-7.477 3.917-11.572 11.573-11.572h15.131v-39.878c0-5.163.534-10.503.534-10.503h-.356s-1.779 2.67-2.848 3.738c-4.451 4.273-10.504 4.451-15.666-1.068l-5.518-6.231c-5.342-5.341-4.984-11.216.534-16.379l21.72-19.939c4.449-4.095 8.366-5.697 14.42-5.697h12.105c7.656 0 11.75 3.916 11.75 11.572v84.384h15.488c7.655 0 11.572 4.094 11.572 11.572v8.902zM616 64H24C10.745 64 0 74.745 0 88v335c0 13.255 10.745 24 24 24h592c13.255 0 24-10.745 24-24V88c0-13.255-10.745-24-24-24zM512 400H128c0-44.183-35.817-80-80-80V192c44.183 0 80-35.817 80-80h384c0 44.183 35.817 80 80 80v128c-44.183 0-80 35.817-80 80z"],moon:[512,512,[],"f186","M279.135 512c78.756 0 150.982-35.804 198.844-94.775 28.27-34.831-2.558-85.722-46.249-77.401-82.348 15.683-158.272-47.268-158.272-130.792 0-48.424 26.06-92.292 67.434-115.836 38.745-22.05 28.999-80.788-15.022-88.919A257.936 257.936 0 0 0 279.135 0c-141.36 0-256 114.575-256 256 0 141.36 114.576 256 256 256zm0-464c12.985 0 25.689 1.201 38.016 3.478-54.76 31.163-91.693 90.042-91.693 157.554 0 113.848 103.641 199.2 215.252 177.944C402.574 433.964 344.366 464 279.135 464c-114.875 0-208-93.125-208-208s93.125-208 208-208z"],newspaper:[576,512,[],"f1ea","M552 64H112c-20.858 0-38.643 13.377-45.248 32H24c-13.255 0-24 10.745-24 24v272c0 30.928 25.072 56 56 56h496c13.255 0 24-10.745 24-24V88c0-13.255-10.745-24-24-24zM48 392V144h16v248c0 4.411-3.589 8-8 8s-8-3.589-8-8zm480 8H111.422c.374-2.614.578-5.283.578-8V112h416v288zM172 280h136c6.627 0 12-5.373 12-12v-96c0-6.627-5.373-12-12-12H172c-6.627 0-12 5.373-12 12v96c0 6.627 5.373 12 12 12zm28-80h80v40h-80v-40zm-40 140v-24c0-6.627 5.373-12 12-12h136c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12H172c-6.627 0-12-5.373-12-12zm192 0v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12H364c-6.627 0-12-5.373-12-12zm0-144v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12H364c-6.627 0-12-5.373-12-12zm0 72v-24c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v24c0 6.627-5.373 12-12 12H364c-6.627 0-12-5.373-12-12z"],"object-group":[512,512,[],"f247","M500 128c6.627 0 12-5.373 12-12V44c0-6.627-5.373-12-12-12h-72c-6.627 0-12 5.373-12 12v12H96V44c0-6.627-5.373-12-12-12H12C5.373 32 0 37.373 0 44v72c0 6.627 5.373 12 12 12h12v256H12c-6.627 0-12 5.373-12 12v72c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-12h320v12c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-72c0-6.627-5.373-12-12-12h-12V128h12zm-52-64h32v32h-32V64zM32 64h32v32H32V64zm32 384H32v-32h32v32zm416 0h-32v-32h32v32zm-40-64h-12c-6.627 0-12 5.373-12 12v12H96v-12c0-6.627-5.373-12-12-12H72V128h12c6.627 0 12-5.373 12-12v-12h320v12c0 6.627 5.373 12 12 12h12v256zm-36-192h-84v-52c0-6.628-5.373-12-12-12H108c-6.627 0-12 5.372-12 12v168c0 6.628 5.373 12 12 12h84v52c0 6.628 5.373 12 12 12h200c6.627 0 12-5.372 12-12V204c0-6.628-5.373-12-12-12zm-268-24h144v112H136V168zm240 176H232v-24h76c6.627 0 12-5.372 12-12v-76h56v112z"],"object-ungroup":[576,512,[],"f248","M564 224c6.627 0 12-5.373 12-12v-72c0-6.627-5.373-12-12-12h-72c-6.627 0-12 5.373-12 12v12h-88v-24h12c6.627 0 12-5.373 12-12V44c0-6.627-5.373-12-12-12h-72c-6.627 0-12 5.373-12 12v12H96V44c0-6.627-5.373-12-12-12H12C5.373 32 0 37.373 0 44v72c0 6.627 5.373 12 12 12h12v160H12c-6.627 0-12 5.373-12 12v72c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-12h88v24h-12c-6.627 0-12 5.373-12 12v72c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-12h224v12c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-72c0-6.627-5.373-12-12-12h-12V224h12zM352 64h32v32h-32V64zm0 256h32v32h-32v-32zM64 352H32v-32h32v32zm0-256H32V64h32v32zm32 216v-12c0-6.627-5.373-12-12-12H72V128h12c6.627 0 12-5.373 12-12v-12h224v12c0 6.627 5.373 12 12 12h12v160h-12c-6.627 0-12 5.373-12 12v12H96zm128 136h-32v-32h32v32zm280-64h-12c-6.627 0-12 5.373-12 12v12H256v-12c0-6.627-5.373-12-12-12h-12v-24h88v12c0 6.627 5.373 12 12 12h72c6.627 0 12-5.373 12-12v-72c0-6.627-5.373-12-12-12h-12v-88h88v12c0 6.627 5.373 12 12 12h12v160zm40 64h-32v-32h32v32zm0-256h-32v-32h32v32z"],"paper-plane":[512,512,[],"f1d8","M440 6.5L24 246.4c-34.4 19.9-31.1 70.8 5.7 85.9L144 379.6V464c0 46.4 59.2 65.5 86.6 28.6l43.8-59.1 111.9 46.2c5.9 2.4 12.1 3.6 18.3 3.6 8.2 0 16.3-2.1 23.6-6.2 12.8-7.2 21.6-20 23.9-34.5l59.4-387.2c6.1-40.1-36.9-68.8-71.5-48.9zM192 464v-64.6l36.6 15.1L192 464zm212.6-28.7l-153.8-63.5L391 169.5c10.7-15.5-9.5-33.5-23.7-21.2L155.8 332.6 48 288 464 48l-59.4 387.3z"],"pause-circle":[512,512,[],"f28b","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 448c-110.5 0-200-89.5-200-200S145.5 56 256 56s200 89.5 200 200-89.5 200-200 200zm96-280v160c0 8.8-7.2 16-16 16h-48c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h48c8.8 0 16 7.2 16 16zm-112 0v160c0 8.8-7.2 16-16 16h-48c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h48c8.8 0 16 7.2 16 16z"],"play-circle":[512,512,[],"f144","M371.7 238l-176-107c-15.8-8.8-35.7 2.5-35.7 21v208c0 18.4 19.8 29.8 35.7 21l176-101c16.4-9.1 16.4-32.8 0-42zM504 256C504 119 393 8 256 8S8 119 8 256s111 248 248 248 248-111 248-248zm-448 0c0-110.5 89.5-200 200-200s200 89.5 200 200-89.5 200-200 200S56 366.5 56 256z"],"plus-square":[448,512,[],"f0fe","M352 240v32c0 6.6-5.4 12-12 12h-88v88c0 6.6-5.4 12-12 12h-32c-6.6 0-12-5.4-12-12v-88h-88c-6.6 0-12-5.4-12-12v-32c0-6.6 5.4-12 12-12h88v-88c0-6.6 5.4-12 12-12h32c6.6 0 12 5.4 12 12v88h88c6.6 0 12 5.4 12 12zm96-160v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zm-48 346V86c0-3.3-2.7-6-6-6H54c-3.3 0-6 2.7-6 6v340c0 3.3 2.7 6 6 6h340c3.3 0 6-2.7 6-6z"],"question-circle":[512,512,[],"f059","M256 8C119.043 8 8 119.083 8 256c0 136.997 111.043 248 248 248s248-111.003 248-248C504 119.083 392.957 8 256 8zm0 448c-110.532 0-200-89.431-200-200 0-110.495 89.472-200 200-200 110.491 0 200 89.471 200 200 0 110.53-89.431 200-200 200zm107.244-255.2c0 67.052-72.421 68.084-72.421 92.863V300c0 6.627-5.373 12-12 12h-45.647c-6.627 0-12-5.373-12-12v-8.659c0-35.745 27.1-50.034 47.579-61.516 17.561-9.845 28.324-16.541 28.324-29.579 0-17.246-21.999-28.693-39.784-28.693-23.189 0-33.894 10.977-48.942 29.969-4.057 5.12-11.46 6.071-16.666 2.124l-27.824-21.098c-5.107-3.872-6.251-11.066-2.644-16.363C184.846 131.491 214.94 112 261.794 112c49.071 0 101.45 38.304 101.45 88.8zM298 368c0 23.159-18.841 42-42 42s-42-18.841-42-42 18.841-42 42-42 42 18.841 42 42z"],registered:[512,512,[],"f25d","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 448c-110.532 0-200-89.451-200-200 0-110.531 89.451-200 200-200 110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200zm110.442-81.791c-53.046-96.284-50.25-91.468-53.271-96.085 24.267-13.879 39.482-41.563 39.482-73.176 0-52.503-30.247-85.252-101.498-85.252h-78.667c-6.617 0-12 5.383-12 12V380c0 6.617 5.383 12 12 12h38.568c6.617 0 12-5.383 12-12v-83.663h31.958l47.515 89.303a11.98 11.98 0 0 0 10.593 6.36h42.81c9.14 0 14.914-9.799 10.51-17.791zM256.933 239.906h-33.875v-64.14h27.377c32.417 0 38.929 12.133 38.929 31.709-.001 20.913-11.518 32.431-32.431 32.431z"],save:[448,512,[],"f0c7","M433.941 129.941l-83.882-83.882A48 48 0 0 0 316.118 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V163.882a48 48 0 0 0-14.059-33.941zM272 80v80H144V80h128zm122 352H54a6 6 0 0 1-6-6V86a6 6 0 0 1 6-6h42v104c0 13.255 10.745 24 24 24h176c13.255 0 24-10.745 24-24V83.882l78.243 78.243a6 6 0 0 1 1.757 4.243V426a6 6 0 0 1-6 6zM224 232c-48.523 0-88 39.477-88 88s39.477 88 88 88 88-39.477 88-88-39.477-88-88-88zm0 128c-22.056 0-40-17.944-40-40s17.944-40 40-40 40 17.944 40 40-17.944 40-40 40z"],"share-square":[576,512,[],"f14d","M561.938 158.06L417.94 14.092C387.926-15.922 336 5.097 336 48.032v57.198c-42.45 1.88-84.03 6.55-120.76 17.99-35.17 10.95-63.07 27.58-82.91 49.42C108.22 199.2 96 232.6 96 271.94c0 61.697 33.178 112.455 84.87 144.76 37.546 23.508 85.248-12.651 71.02-55.74-15.515-47.119-17.156-70.923 84.11-78.76V336c0 42.993 51.968 63.913 81.94 33.94l143.998-144c18.75-18.74 18.75-49.14 0-67.88zM384 336V232.16C255.309 234.082 166.492 255.35 206.31 376 176.79 357.55 144 324.08 144 271.94c0-109.334 129.14-118.947 240-119.85V48l144 144-144 144zm24.74 84.493a82.658 82.658 0 0 0 20.974-9.303c7.976-4.952 18.286.826 18.286 10.214V464c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h132c6.627 0 12 5.373 12 12v4.486c0 4.917-2.987 9.369-7.569 11.152-13.702 5.331-26.396 11.537-38.05 18.585a12.138 12.138 0 0 1-6.28 1.777H54a6 6 0 0 0-6 6v340a6 6 0 0 0 6 6h340a6 6 0 0 0 6-6v-25.966c0-5.37 3.579-10.059 8.74-11.541z"],smile:[512,512,[],"f118","M256 56c110.532 0 200 89.451 200 200 0 110.532-89.451 200-200 200-110.532 0-200-89.451-200-200 0-110.532 89.451-200 200-200m0-48C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm64 136c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059s-23.059-10.324-23.059-23.059v-.017C266.386 181.488 264 190.465 264 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm-128 0c-9.535 0-18.512 2.386-26.37 6.589h.017c12.735 0 23.059 10.324 23.059 23.059 0 12.735-10.324 23.059-23.059 23.059-12.735 0-23.059-10.324-23.059-23.059v-.017C138.386 181.488 136 190.465 136 200c0 30.928 25.072 56 56 56s56-25.072 56-56-25.072-56-56-56zm195.372 182.219c18.819-25.592-19.856-54.017-38.67-28.438-50.135 68.177-135.229 68.18-185.367 0-18.828-25.601-57.478 2.861-38.67 28.438 69.298 94.231 193.323 94.351 262.707 0z"],snowflake:[448,512,[],"f2dc","M438.237 355.927l-66.574-38.54 59.448-10.327c5.846-1.375 10.609-5.183 13.458-10.13 2.48-4.307 3.506-9.478 2.524-14.651-2.11-11.115-12.686-18.039-23.621-15.467l-85.423 31.115L255.914 256l82.136-41.926 85.423 31.115c10.936 2.572 21.512-4.352 23.621-15.467 2.111-11.115-5.046-22.209-15.981-24.781l-59.448-10.327 66.573-38.54c9.54-5.523 12.615-18.092 6.867-28.074-5.748-9.982-18.14-13.596-27.68-8.074l-66.574 38.54 20.805-56.787c3.246-10.782-2.758-22.542-13.413-26.268-10.654-3.725-21.922 1.997-25.168 12.779l-15.838 89.735-72.423 41.926V136l69.585-58.621c7.689-8.21 6.997-20.856-1.548-28.245-8.545-7.391-21.705-6.723-29.394 1.486l-38.644 46.46V20c0-11.046-9.318-20-20.813-20s-20.813 8.954-20.813 20v77.08l-38.644-46.46c-7.689-8.21-20.849-8.876-29.394-1.486-8.544 7.389-9.236 20.035-1.547 28.245L203.187 136v83.853l-72.423-41.926-15.838-89.736c-3.247-10.782-14.515-16.504-25.169-12.779-10.656 3.725-16.659 15.486-13.413 26.268l20.805 56.787-66.573-38.54c-9.54-5.523-21.933-1.908-27.68 8.074s-2.673 22.551 6.867 28.074l66.574 38.54-59.449 10.328C5.953 207.515-1.202 218.609.907 229.724c2.11 11.114 12.686 18.038 23.622 15.466l85.422-31.115L192.086 256l-82.136 41.926-85.423-31.115c-10.936-2.572-21.511 4.352-23.622 15.466-2.109 11.113 5.046 22.209 15.981 24.781l59.449 10.328-66.574 38.54C.223 361.449-2.852 374.018 2.896 384s18.14 13.597 27.68 8.074l66.574-38.54-20.805 56.786c-1.735 5.764-.828 11.805 2.02 16.751 2.48 4.307 6.433 7.784 11.392 9.517 10.655 3.725 21.923-1.997 25.169-12.779l15.838-89.736 72.423-41.926V376l-69.585 58.621c-7.69 8.21-6.997 20.855 1.547 28.245 8.544 7.388 21.705 6.723 29.394-1.487l38.644-46.46V492c0 11.046 9.318 20 20.813 20s20.813-8.954 20.813-20v-77.081l38.644 46.46c4.111 4.389 9.782 6.621 15.478 6.621 4.96 0 9.939-1.694 13.916-5.134 8.545-7.39 9.237-20.035 1.548-28.245L244.813 376v-83.853l72.423 41.926 15.838 89.736c3.246 10.782 14.514 16.504 25.168 12.779 10.653-3.726 16.659-15.487 13.412-26.268l-20.805-56.787 66.574 38.54c9.54 5.523 21.933 1.908 27.68-8.074 5.749-9.981 2.675-22.55-6.866-28.072z"],square:[448,512,[],"f0c8","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-6 400H54c-3.3 0-6-2.7-6-6V86c0-3.3 2.7-6 6-6h340c3.3 0 6 2.7 6 6v340c0 3.3-2.7 6-6 6z"],star:[576,512,[],"f005","M528.1 171.5L382 150.2 316.7 17.8c-11.7-23.6-45.6-23.9-57.4 0L194 150.2 47.9 171.5c-26.2 3.8-36.7 36.1-17.7 54.6l105.7 103-25 145.5c-4.5 26.3 23.2 46 46.4 33.7L288 439.6l130.7 68.7c23.2 12.2 50.9-7.4 46.4-33.7l-25-145.5 105.7-103c19-18.5 8.5-50.8-17.7-54.6zM388.6 312.3l23.7 138.4L288 385.4l-124.3 65.3 23.7-138.4-100.6-98 139-20.2 62.2-126 62.2 126 139 20.2-100.6 98z"],"star-half":[576,512,[],"f089","M288 385.3l-124.3 65.4 23.7-138.4-100.6-98 139-20.2 62.2-126V0c-11.4 0-22.8 5.9-28.7 17.8L194 150.2 47.9 171.4c-26.2 3.8-36.7 36.1-17.7 54.6l105.7 103-25 145.5c-4.5 26.1 23 46 46.4 33.7L288 439.6v-54.3z"],"sticky-note":[448,512,[],"f249","M448 348.106V80c0-26.51-21.49-48-48-48H48C21.49 32 0 53.49 0 80v351.988c0 26.51 21.49 48 48 48h268.118a48 48 0 0 0 33.941-14.059l83.882-83.882A48 48 0 0 0 448 348.106zm-128 80v-76.118h76.118L320 428.106zM400 80v223.988H296c-13.255 0-24 10.745-24 24v104H48V80h352z"],"stop-circle":[512,512,[],"f28d","M504 256C504 119 393 8 256 8S8 119 8 256s111 248 248 248 248-111 248-248zm-448 0c0-110.5 89.5-200 200-200s200 89.5 200 200-89.5 200-200 200S56 366.5 56 256zm296-80v160c0 8.8-7.2 16-16 16H176c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h160c8.8 0 16 7.2 16 16z"],sun:[512,512,[],"f185","M220.116 487.936l-20.213-49.425a3.992 3.992 0 0 0-5.808-1.886l-45.404 28.104c-29.466 18.24-66.295-8.519-58.054-42.179l12.699-51.865a3.993 3.993 0 0 0-3.59-4.941l-53.251-3.951c-34.554-2.562-48.632-45.855-22.174-68.247L65.08 259.05a3.992 3.992 0 0 0 0-6.106l-40.76-34.497c-26.45-22.384-12.39-65.682 22.174-68.246l53.251-3.951a3.993 3.993 0 0 0 3.59-4.941L90.637 89.443c-8.239-33.656 28.581-60.42 58.054-42.179l45.403 28.104a3.993 3.993 0 0 0 5.808-1.887l20.213-49.425c13.116-32.071 58.638-32.081 71.758 0l20.212 49.424a3.994 3.994 0 0 0 5.809 1.887l45.403-28.104c29.464-18.236 66.297 8.513 58.054 42.179l-12.699 51.865a3.995 3.995 0 0 0 3.59 4.941l53.251 3.951c34.553 2.563 48.633 45.854 22.175 68.246l-40.76 34.497a3.993 3.993 0 0 0 0 6.107l40.76 34.496c26.511 22.441 12.322 65.689-22.175 68.247l-53.251 3.951a3.993 3.993 0 0 0-3.589 4.942l12.698 51.864c8.241 33.658-28.583 60.421-58.054 42.18l-45.403-28.104a3.994 3.994 0 0 0-5.809 1.887l-20.212 49.424c-13.159 32.178-58.675 31.993-71.757 0zm16.814-64.568l19.064 46.616 19.064-46.615c10.308-25.2 40.778-35.066 63.892-20.759l42.822 26.507-11.976-48.919c-6.475-26.444 12.38-52.339 39.487-54.349l50.226-3.726-38.444-32.536c-20.782-17.591-20.747-49.621.001-67.18l38.442-32.536-50.225-3.727c-27.151-2.015-45.95-27.948-39.488-54.349l11.978-48.919-42.823 26.507c-23.151 14.327-53.603 4.4-63.892-20.76l-19.064-46.615-19.064 46.617c-10.305 25.198-40.778 35.066-63.891 20.76l-42.823-26.508 11.977 48.918c6.474 26.446-12.381 52.338-39.488 54.35l-50.224 3.726 38.443 32.537c20.782 17.588 20.747 49.619 0 67.178L52.48 322.123l50.226 3.726c27.151 2.014 45.95 27.947 39.487 54.349l-11.977 48.919 42.823-26.507c23.188-14.355 53.622-4.352 63.891 20.758zM256 384c-70.58 0-128-57.421-128-128 0-70.58 57.42-128 128-128 70.579 0 128 57.42 128 128 0 70.579-57.421 128-128 128zm0-208c-44.112 0-80 35.888-80 80s35.888 80 80 80 80-35.888 80-80-35.888-80-80-80z"],"thumbs-down":[512,512,[],"f165","M466.27 225.31c4.674-22.647.864-44.538-8.99-62.99 2.958-23.868-4.021-48.565-17.34-66.99C438.986 39.423 404.117 0 327 0c-7 0-15 .01-22.22.01C201.195.01 168.997 40 128 40h-10.845c-5.64-4.975-13.042-8-21.155-8H32C14.327 32 0 46.327 0 64v240c0 17.673 14.327 32 32 32h64c11.842 0 22.175-6.438 27.708-16h7.052c19.146 16.953 46.013 60.653 68.76 83.4 13.667 13.667 10.153 108.6 71.76 108.6 57.58 0 95.27-31.936 95.27-104.73 0-18.41-3.93-33.73-8.85-46.54h36.48c48.602 0 85.82-41.565 85.82-85.58 0-19.15-4.96-34.99-13.73-49.84zM64 296c-13.255 0-24-10.745-24-24s10.745-24 24-24 24 10.745 24 24-10.745 24-24 24zm330.18 16.73H290.19c0 37.82 28.36 55.37 28.36 94.54 0 23.75 0 56.73-47.27 56.73-18.91-18.91-9.46-66.18-37.82-94.54C206.9 342.89 167.28 272 138.92 272H128V85.83c53.611 0 100.001-37.82 171.64-37.82h37.82c35.512 0 60.82 17.12 53.12 65.9 15.2 8.16 26.5 36.44 13.94 57.57 21.581 20.384 18.699 51.065 5.21 65.62 9.45 0 22.36 18.91 22.27 37.81-.09 18.91-16.71 37.82-37.82 37.82z"],"thumbs-up":[512,512,[],"f164","M466.27 286.69C475.04 271.84 480 256 480 236.85c0-44.015-37.218-85.58-85.82-85.58H357.7c4.92-12.81 8.85-28.13 8.85-46.54C366.55 31.936 328.86 0 271.28 0c-61.607 0-58.093 94.933-71.76 108.6-22.747 22.747-49.615 66.447-68.76 83.4H32c-17.673 0-32 14.327-32 32v240c0 17.673 14.327 32 32 32h64c14.893 0 27.408-10.174 30.978-23.95 44.509 1.001 75.06 39.94 177.802 39.94 7.22 0 15.22.01 22.22.01 77.117 0 111.986-39.423 112.94-95.33 13.319-18.425 20.299-43.122 17.34-66.99 9.854-18.452 13.664-40.343 8.99-62.99zm-61.75 53.83c12.56 21.13 1.26 49.41-13.94 57.57 7.7 48.78-17.608 65.9-53.12 65.9h-37.82c-71.639 0-118.029-37.82-171.64-37.82V240h10.92c28.36 0 67.98-70.89 94.54-97.46 28.36-28.36 18.91-75.63 37.82-94.54 47.27 0 47.27 32.98 47.27 56.73 0 39.17-28.36 56.72-28.36 94.54h103.99c21.11 0 37.73 18.91 37.82 37.82.09 18.9-12.82 37.81-22.27 37.81 13.489 14.555 16.371 45.236-5.21 65.62zM88 432c0 13.255-10.745 24-24 24s-24-10.745-24-24 10.745-24 24-24 24 10.745 24 24z"],"times-circle":[512,512,[],"f057","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm0 448c-110.5 0-200-89.5-200-200S145.5 56 256 56s200 89.5 200 200-89.5 200-200 200zm101.8-262.2L295.6 256l62.2 62.2c4.7 4.7 4.7 12.3 0 17l-22.6 22.6c-4.7 4.7-12.3 4.7-17 0L256 295.6l-62.2 62.2c-4.7 4.7-12.3 4.7-17 0l-22.6-22.6c-4.7-4.7-4.7-12.3 0-17l62.2-62.2-62.2-62.2c-4.7-4.7-4.7-12.3 0-17l22.6-22.6c4.7-4.7 12.3-4.7 17 0l62.2 62.2 62.2-62.2c4.7-4.7 12.3-4.7 17 0l22.6 22.6c4.7 4.7 4.7 12.3 0 17z"],"trash-alt":[448,512,[],"f2ed","M192 188v216c0 6.627-5.373 12-12 12h-24c-6.627 0-12-5.373-12-12V188c0-6.627 5.373-12 12-12h24c6.627 0 12 5.373 12 12zm100-12h-24c-6.627 0-12 5.373-12 12v216c0 6.627 5.373 12 12 12h24c6.627 0 12-5.373 12-12V188c0-6.627-5.373-12-12-12zm132-96c13.255 0 24 10.745 24 24v12c0 6.627-5.373 12-12 12h-20v336c0 26.51-21.49 48-48 48H80c-26.51 0-48-21.49-48-48V128H12c-6.627 0-12-5.373-12-12v-12c0-13.255 10.745-24 24-24h74.411l34.018-56.696A48 48 0 0 1 173.589 0h100.823a48 48 0 0 1 41.16 23.304L349.589 80H424zm-269.611 0h139.223L276.16 50.913A6 6 0 0 0 271.015 48h-94.028a6 6 0 0 0-5.145 2.913L154.389 80zM368 128H80v330a6 6 0 0 0 6 6h276a6 6 0 0 0 6-6V128z"],user:[512,512,[],"f007","M399.326 288.908C422.188 258.886 436 221.085 436 180 436 80.591 355.414 0 256 0 156.591 0 76 80.586 76 180c0 41.073 13.806 78.878 36.674 108.908C50.028 296.336 0 349.651 0 416v28.5C0 481.72 30.28 512 67.5 512h377c37.22 0 67.5-30.28 67.5-67.5V416c0-66.374-50.052-119.667-112.674-127.092zM256 48c72.902 0 132 59.098 132 132s-59.098 132-132 132-132-59.098-132-132S183.098 48 256 48zm208 396.5c0 10.77-8.73 19.5-19.5 19.5h-377c-10.77 0-19.5-8.73-19.5-19.5V416c0-44.183 35.817-80 80-80h38.14c55.486 31.968 124.026 32.087 179.72 0H384c44.183 0 80 35.817 80 80v28.5z"],"user-circle":[512,512,[],"f2bd","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 48c110.532 0 200 89.451 200 200 0 34.48-8.706 66.909-24.04 95.213-13.403-26.393-37.525-47.542-67.384-56.572C378.19 273.809 385.5 249.468 385.5 224c0-71.569-57.919-129.5-129.5-129.5-71.569 0-129.5 57.919-129.5 129.5 0 25.468 7.31 49.809 20.924 70.641-29.821 9.018-53.962 30.142-67.385 56.572C64.706 322.911 56 290.482 56 256c0-110.531 89.451-200 200-200zm-80 168c0-44.183 35.817-80 80-80s80 35.817 80 80-35.817 80-80 80-80-35.817-80-80zm-59.927 174.943c1.519-33.998 29.554-61.097 63.927-61.097h14.171c38.337 20.889 85.337 20.881 123.659 0H332c34.373 0 62.408 27.099 63.927 61.097-77.746 76.114-202.156 76.065-279.854 0z"],"window-close":[512,512,[],"f410","M464 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm0 394c0 3.3-2.7 6-6 6H54c-3.3 0-6-2.7-6-6V86c0-3.3 2.7-6 6-6h404c3.3 0 6 2.7 6 6v340zM356.5 194.6L295.1 256l61.4 61.4c4.6 4.6 4.6 12.1 0 16.8l-22.3 22.3c-4.6 4.6-12.1 4.6-16.8 0L256 295.1l-61.4 61.4c-4.6 4.6-12.1 4.6-16.8 0l-22.3-22.3c-4.6-4.6-4.6-12.1 0-16.8l61.4-61.4-61.4-61.4c-4.6-4.6-4.6-12.1 0-16.8l22.3-22.3c4.6-4.6 12.1-4.6 16.8 0l61.4 61.4 61.4-61.4c4.6-4.6 12.1-4.6 16.8 0l22.3 22.3c4.7 4.6 4.7 12.1 0 16.8z"],"window-maximize":[512,512,[],"f2d0","M464 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm0 394c0 3.3-2.7 6-6 6H54c-3.3 0-6-2.7-6-6V192h416v234z"],"window-restore":[512,512,[],"f2d2","M464 0H144c-26.5 0-48 21.5-48 48v48H48c-26.5 0-48 21.5-48 48v320c0 26.5 21.5 48 48 48h320c26.5 0 48-21.5 48-48v-48h48c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zm-96 464H48V256h320v208zm96-96h-48V144c0-26.5-21.5-48-48-48H144V48h320v320z"]}),t=z||{};t.___FONT_AWESOME___||(t.___FONT_AWESOME___={}),t.___FONT_AWESOME___.styles||(t.___FONT_AWESOME___.styles={}),t.___FONT_AWESOME___.hooks||(t.___FONT_AWESOME___.hooks={}),t.___FONT_AWESOME___.shims||(t.___FONT_AWESOME___.shims=[]);var s=t.___FONT_AWESOME___,r=Object.assign||function(c){for(var l=1;l<arguments.length;l++){var h=arguments[l];for(var v in h)Object.prototype.hasOwnProperty.call(h,v)&&(c[v]=h[v])}return c};!function(c){try{c()}catch(c){}}(function(){c("far")})}(),function(){"use strict";function c(c){"function"==typeof s.hooks.addPack?s.hooks.addPack(c,m):s.styles[c]=r({},s.styles[c]||{},m)}var l={};try{"undefined"!=typeof window&&(l=window)}catch(c){}var h=(l.navigator||{}).userAgent,v=void 0===h?"":h,z=l,e=(~v.indexOf("MSIE")||v.indexOf("Trident/"),[1,2,3,4,5,6,7,8,9,10]),a=e.concat([11,12,13,14,15,16,17,18,19,20]),m=(["xs","sm","lg","fw","ul","li","border","pull-left","pull-right","spin","pulse","rotate-90","rotate-180","rotate-270","flip-horizontal","flip-vertical","stack","stack-1x","stack-2x","inverse","layers","layers-text","layers-counter"].concat(e.map(function(c){return c+"x"})).concat(a.map(function(c){return"w-"+c})),{"address-book":[448,512,[],"f2b9","M436 160c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-20V48c0-26.51-21.49-48-48-48H80C53.49 0 32 21.49 32 48v416c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48v-48h20c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-20v-64h20c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-20v-64h20zM224 96c53.019 0 96 42.981 96 96s-42.981 96-96 96-96-42.981-96-96 42.981-96 96-96zm128 304c0 26.51-21.49 48-48 48H144c-26.51 0-48-21.49-48-48v-48.711c0-20.994 13.644-39.553 33.683-45.815l22.954-7.173C173.563 312.413 198.198 320 224 320s50.437-7.587 71.363-21.699l22.954 7.173C338.356 311.736 352 330.295 352 351.289V400z"],"address-card":[512,512,[],"f2bb","M464 64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48zm-288 64c44.183 0 80 35.817 80 80s-35.817 80-80 80-80-35.817-80-80 35.817-80 80-80zm112 232c0 13.255-10.745 24-24 24H88c-13.255 0-24-10.745-24-24v-29.897a24 24 0 0 1 17.407-23.077l28.938-8.268C129.323 312.549 152.087 320 176 320s46.677-7.451 65.656-21.241l28.938 8.268A23.999 23.999 0 0 1 288 330.103V360zm160-52c0 6.627-5.373 12-12 12H332c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v8zm0-64c0 6.627-5.373 12-12 12H332c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v8zm0-64c0 6.627-5.373 12-12 12H332c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v8z"],adjust:[512,512,[],"f042","M8 256c0 136.966 111.033 248 248 248s248-111.034 248-248S392.966 8 256 8 8 119.033 8 256zm248 184V72c101.705 0 184 82.311 184 184 0 101.705-82.311 184-184 184z"],"align-center":[448,512,[],"f037","M352 44v40c0 8.837-7.163 16-16 16H112c-8.837 0-16-7.163-16-16V44c0-8.837 7.163-16 16-16h224c8.837 0 16 7.163 16 16zM16 228h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 256h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm320-200H112c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16h224c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16z"],"align-justify":[448,512,[],"f039","M0 84V44c0-8.837 7.163-16 16-16h416c8.837 0 16 7.163 16 16v40c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16zm16 144h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 256h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0-128h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"],"align-left":[448,512,[],"f036","M288 44v40c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16V44c0-8.837 7.163-16 16-16h256c8.837 0 16 7.163 16 16zM0 172v40c0 8.837 7.163 16 16 16h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16zm16 312h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm256-200H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16h256c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16z"],"align-right":[448,512,[],"f038","M160 84V44c0-8.837 7.163-16 16-16h256c8.837 0 16 7.163 16 16v40c0 8.837-7.163 16-16 16H176c-8.837 0-16-7.163-16-16zM16 228h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 256h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm160-128h256c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H176c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"],ambulance:[640,512,[],"f0f9","M592 0H272c-26.51 0-48 21.49-48 48v48h-44.118a48 48 0 0 0-33.941 14.059l-99.882 99.882A48 48 0 0 0 32 243.882V352h-8c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h40c0 53.019 42.981 96 96 96s96-42.981 96-96h128c0 53.019 42.981 96 96 96s96-42.981 96-96h40c13.255 0 24-10.745 24-24V48c0-26.51-21.49-48-48-48zM160 464c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48zm64-208H80v-12.118L179.882 144H224v112zm256 208c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48zm32-288v32c0 6.627-5.373 12-12 12h-56v56c0 6.627-5.373 12-12 12h-32c-6.627 0-12-5.373-12-12v-56h-56c-6.627 0-12-5.373-12-12v-32c0-6.627 5.373-12 12-12h56v-56c0-6.627 5.373-12 12-12h32c6.627 0 12 5.373 12 12v56h56c6.627 0 12 5.373 12 12z"],"american-sign-language-interpreting":[640,512,[],"f2a3","M290.547 189.039c-20.295-10.149-44.147-11.199-64.739-3.89 42.606 0 71.208 20.475 85.578 50.576 8.576 17.899-5.148 38.071-23.617 38.071 18.429 0 32.211 20.136 23.617 38.071-14.725 30.846-46.123 50.854-80.298 50.854-.557 0-94.471-8.615-94.471-8.615l-66.406 33.347c-9.384 4.693-19.815.379-23.895-7.781L1.86 290.747c-4.167-8.615-1.111-18.897 6.946-23.621l58.072-33.069L108 159.861c6.39-57.245 34.731-109.767 79.743-146.726 11.391-9.448 28.341-7.781 37.51 3.613 9.446 11.394 7.78 28.067-3.612 37.516-12.503 10.559-23.618 22.509-32.509 35.57 21.672-14.729 46.679-24.732 74.186-28.067 14.725-1.945 28.063 8.336 29.73 23.065 1.945 14.728-8.336 28.067-23.062 29.734-16.116 1.945-31.12 7.503-44.178 15.284 26.114-5.713 58.712-3.138 88.079 11.115 13.336 6.669 18.893 22.509 12.224 35.848-6.389 13.06-22.504 18.617-35.564 12.226zm-27.229 69.472c-6.112-12.505-18.338-20.286-32.231-20.286a35.46 35.46 0 0 0-35.565 35.57c0 21.428 17.808 35.57 35.565 35.57 13.893 0 26.119-7.781 32.231-20.286 4.446-9.449 13.614-15.006 23.339-15.284-9.725-.277-18.893-5.835-23.339-15.284zm374.821-37.237c4.168 8.615 1.111 18.897-6.946 23.621l-58.071 33.069L532 352.16c-6.39 57.245-34.731 109.767-79.743 146.726-10.932 9.112-27.799 8.144-37.51-3.613-9.446-11.394-7.78-28.067 3.613-37.516 12.503-10.559 23.617-22.509 32.508-35.57-21.672 14.729-46.679 24.732-74.186 28.067-10.021 2.506-27.552-5.643-29.73-23.065-1.945-14.728 8.336-28.067 23.062-29.734 16.116-1.946 31.12-7.503 44.178-15.284-26.114 5.713-58.712 3.138-88.079-11.115-13.336-6.669-18.893-22.509-12.224-35.848 6.389-13.061 22.505-18.619 35.565-12.227 20.295 10.149 44.147 11.199 64.739 3.89-42.606 0-71.208-20.475-85.578-50.576-8.576-17.899 5.148-38.071 23.617-38.071-18.429 0-32.211-20.136-23.617-38.071 14.033-29.396 44.039-50.887 81.966-50.854l92.803 8.615 66.406-33.347c9.408-4.704 19.828-.354 23.894 7.781l44.455 88.926zm-229.227-18.618c-13.893 0-26.119 7.781-32.231 20.286-4.446 9.449-13.614 15.006-23.339 15.284 9.725.278 18.893 5.836 23.339 15.284 6.112 12.505 18.338 20.286 32.231 20.286a35.46 35.46 0 0 0 35.565-35.57c0-21.429-17.808-35.57-35.565-35.57z"],anchor:[576,512,[],"f13d","M12.971 352h32.394C67.172 454.735 181.944 512 288 512c106.229 0 220.853-57.38 242.635-160h32.394c10.691 0 16.045-12.926 8.485-20.485l-67.029-67.029c-4.686-4.686-12.284-4.686-16.971 0l-67.029 67.029c-7.56 7.56-2.206 20.485 8.485 20.485h35.146c-20.29 54.317-84.963 86.588-144.117 94.015V256h52c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-52v-5.47c37.281-13.178 63.995-48.725 64-90.518C384.005 43.772 341.605.738 289.37.01 235.723-.739 192 42.525 192 96c0 41.798 26.716 77.35 64 90.53V192h-52c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h52v190.015c-58.936-7.399-123.82-39.679-144.117-94.015h35.146c10.691 0 16.045-12.926 8.485-20.485l-67.029-67.029c-4.686-4.686-12.284-4.686-16.971 0L4.485 331.515C-3.074 339.074 2.28 352 12.971 352zM288 64c17.645 0 32 14.355 32 32s-14.355 32-32 32-32-14.355-32-32 14.355-32 32-32z"],"angle-double-down":[320,512,[],"f103","M143 256.3L7 120.3c-9.4-9.4-9.4-24.6 0-33.9l22.6-22.6c9.4-9.4 24.6-9.4 33.9 0l96.4 96.4 96.4-96.4c9.4-9.4 24.6-9.4 33.9 0L313 86.3c9.4 9.4 9.4 24.6 0 33.9l-136 136c-9.4 9.5-24.6 9.5-34 .1zm34 192l136-136c9.4-9.4 9.4-24.6 0-33.9l-22.6-22.6c-9.4-9.4-24.6-9.4-33.9 0L160 352.1l-96.4-96.4c-9.4-9.4-24.6-9.4-33.9 0L7 278.3c-9.4 9.4-9.4 24.6 0 33.9l136 136c9.4 9.5 24.6 9.5 34 .1z"],"angle-double-left":[448,512,[],"f100","M223.7 239l136-136c9.4-9.4 24.6-9.4 33.9 0l22.6 22.6c9.4 9.4 9.4 24.6 0 33.9L319.9 256l96.4 96.4c9.4 9.4 9.4 24.6 0 33.9L393.7 409c-9.4 9.4-24.6 9.4-33.9 0l-136-136c-9.5-9.4-9.5-24.6-.1-34zm-192 34l136 136c9.4 9.4 24.6 9.4 33.9 0l22.6-22.6c9.4-9.4 9.4-24.6 0-33.9L127.9 256l96.4-96.4c9.4-9.4 9.4-24.6 0-33.9L201.7 103c-9.4-9.4-24.6-9.4-33.9 0l-136 136c-9.5 9.4-9.5 24.6-.1 34z"],"angle-double-right":[448,512,[],"f101","M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34zm192-34l-136-136c-9.4-9.4-24.6-9.4-33.9 0l-22.6 22.6c-9.4 9.4-9.4 24.6 0 33.9l96.4 96.4-96.4 96.4c-9.4 9.4-9.4 24.6 0 33.9l22.6 22.6c9.4 9.4 24.6 9.4 33.9 0l136-136c9.4-9.2 9.4-24.4 0-33.8z"],"angle-double-up":[320,512,[],"f102","M177 255.7l136 136c9.4 9.4 9.4 24.6 0 33.9l-22.6 22.6c-9.4 9.4-24.6 9.4-33.9 0L160 351.9l-96.4 96.4c-9.4 9.4-24.6 9.4-33.9 0L7 425.7c-9.4-9.4-9.4-24.6 0-33.9l136-136c9.4-9.5 24.6-9.5 34-.1zm-34-192L7 199.7c-9.4 9.4-9.4 24.6 0 33.9l22.6 22.6c9.4 9.4 24.6 9.4 33.9 0l96.4-96.4 96.4 96.4c9.4 9.4 24.6 9.4 33.9 0l22.6-22.6c9.4-9.4 9.4-24.6 0-33.9l-136-136c-9.2-9.4-24.4-9.4-33.8 0z"],"angle-down":[320,512,[],"f107","M143 352.3L7 216.3c-9.4-9.4-9.4-24.6 0-33.9l22.6-22.6c9.4-9.4 24.6-9.4 33.9 0l96.4 96.4 96.4-96.4c9.4-9.4 24.6-9.4 33.9 0l22.6 22.6c9.4 9.4 9.4 24.6 0 33.9l-136 136c-9.2 9.4-24.4 9.4-33.8 0z"],"angle-left":[256,512,[],"f104","M31.7 239l136-136c9.4-9.4 24.6-9.4 33.9 0l22.6 22.6c9.4 9.4 9.4 24.6 0 33.9L127.9 256l96.4 96.4c9.4 9.4 9.4 24.6 0 33.9L201.7 409c-9.4 9.4-24.6 9.4-33.9 0l-136-136c-9.5-9.4-9.5-24.6-.1-34z"],"angle-right":[256,512,[],"f105","M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"],"angle-up":[320,512,[],"f106","M177 159.7l136 136c9.4 9.4 9.4 24.6 0 33.9l-22.6 22.6c-9.4 9.4-24.6 9.4-33.9 0L160 255.9l-96.4 96.4c-9.4 9.4-24.6 9.4-33.9 0L7 329.7c-9.4-9.4-9.4-24.6 0-33.9l136-136c9.4-9.5 24.6-9.5 34-.1z"],archive:[512,512,[],"f187","M488 128H24c-13.255 0-24-10.745-24-24V56c0-13.255 10.745-24 24-24h464c13.255 0 24 10.745 24 24v48c0 13.255-10.745 24-24 24zm-8 328V184c0-13.255-10.745-24-24-24H56c-13.255 0-24 10.745-24 24v272c0 13.255 10.745 24 24 24h400c13.255 0 24-10.745 24-24zM308 256H204c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h104c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12z"],"arrow-alt-circle-down":[512,512,[],"f358","M504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zM212 140v116h-70.9c-10.7 0-16.1 13-8.5 20.5l114.9 114.3c4.7 4.7 12.2 4.7 16.9 0l114.9-114.3c7.6-7.6 2.2-20.5-8.5-20.5H300V140c0-6.6-5.4-12-12-12h-64c-6.6 0-12 5.4-12 12z"],"arrow-alt-circle-left":[512,512,[],"f359","M256 504C119 504 8 393 8 256S119 8 256 8s248 111 248 248-111 248-248 248zm116-292H256v-70.9c0-10.7-13-16.1-20.5-8.5L121.2 247.5c-4.7 4.7-4.7 12.2 0 16.9l114.3 114.9c7.6 7.6 20.5 2.2 20.5-8.5V300h116c6.6 0 12-5.4 12-12v-64c0-6.6-5.4-12-12-12z"],"arrow-alt-circle-right":[512,512,[],"f35a","M256 8c137 0 248 111 248 248S393 504 256 504 8 393 8 256 119 8 256 8zM140 300h116v70.9c0 10.7 13 16.1 20.5 8.5l114.3-114.9c4.7-4.7 4.7-12.2 0-16.9l-114.3-115c-7.6-7.6-20.5-2.2-20.5 8.5V212H140c-6.6 0-12 5.4-12 12v64c0 6.6 5.4 12 12 12z"],"arrow-alt-circle-up":[512,512,[],"f35b","M8 256C8 119 119 8 256 8s248 111 248 248-111 248-248 248S8 393 8 256zm292 116V256h70.9c10.7 0 16.1-13 8.5-20.5L264.5 121.2c-4.7-4.7-12.2-4.7-16.9 0l-115 114.3c-7.6 7.6-2.2 20.5 8.5 20.5H212v116c0 6.6 5.4 12 12 12h64c6.6 0 12-5.4 12-12z"],"arrow-circle-down":[512,512,[],"f0ab","M504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-143.6-28.9L288 302.6V120c0-13.3-10.7-24-24-24h-16c-13.3 0-24 10.7-24 24v182.6l-72.4-75.5c-9.3-9.7-24.8-9.9-34.3-.4l-10.9 11c-9.4 9.4-9.4 24.6 0 33.9L239 404.3c9.4 9.4 24.6 9.4 33.9 0l132.7-132.7c9.4-9.4 9.4-24.6 0-33.9l-10.9-11c-9.5-9.5-25-9.3-34.3.4z"],"arrow-circle-left":[512,512,[],"f0a8","M256 504C119 504 8 393 8 256S119 8 256 8s248 111 248 248-111 248-248 248zm28.9-143.6L209.4 288H392c13.3 0 24-10.7 24-24v-16c0-13.3-10.7-24-24-24H209.4l75.5-72.4c9.7-9.3 9.9-24.8.4-34.3l-11-10.9c-9.4-9.4-24.6-9.4-33.9 0L107.7 239c-9.4 9.4-9.4 24.6 0 33.9l132.7 132.7c9.4 9.4 24.6 9.4 33.9 0l11-10.9c9.5-9.5 9.3-25-.4-34.3z"],"arrow-circle-right":[512,512,[],"f0a9","M256 8c137 0 248 111 248 248S393 504 256 504 8 393 8 256 119 8 256 8zm-28.9 143.6l75.5 72.4H120c-13.3 0-24 10.7-24 24v16c0 13.3 10.7 24 24 24h182.6l-75.5 72.4c-9.7 9.3-9.9 24.8-.4 34.3l11 10.9c9.4 9.4 24.6 9.4 33.9 0L404.3 273c9.4-9.4 9.4-24.6 0-33.9L271.6 106.3c-9.4-9.4-24.6-9.4-33.9 0l-11 10.9c-9.5 9.6-9.3 25.1.4 34.4z"],"arrow-circle-up":[512,512,[],"f0aa","M8 256C8 119 119 8 256 8s248 111 248 248-111 248-248 248S8 393 8 256zm143.6 28.9l72.4-75.5V392c0 13.3 10.7 24 24 24h16c13.3 0 24-10.7 24-24V209.4l72.4 75.5c9.3 9.7 24.8 9.9 34.3.4l10.9-11c9.4-9.4 9.4-24.6 0-33.9L273 107.7c-9.4-9.4-24.6-9.4-33.9 0L106.3 240.4c-9.4 9.4-9.4 24.6 0 33.9l10.9 11c9.6 9.5 25.1 9.3 34.4-.4z"],"arrow-down":[448,512,[],"f063","M413.1 222.5l22.2 22.2c9.4 9.4 9.4 24.6 0 33.9L241 473c-9.4 9.4-24.6 9.4-33.9 0L12.7 278.6c-9.4-9.4-9.4-24.6 0-33.9l22.2-22.2c9.5-9.5 25-9.3 34.3.4L184 343.4V56c0-13.3 10.7-24 24-24h32c13.3 0 24 10.7 24 24v287.4l114.8-120.5c9.3-9.8 24.8-10 34.3-.4z"],"arrow-left":[448,512,[],"f060","M257.5 445.1l-22.2 22.2c-9.4 9.4-24.6 9.4-33.9 0L7 273c-9.4-9.4-9.4-24.6 0-33.9L201.4 44.7c9.4-9.4 24.6-9.4 33.9 0l22.2 22.2c9.5 9.5 9.3 25-.4 34.3L136.6 216H424c13.3 0 24 10.7 24 24v32c0 13.3-10.7 24-24 24H136.6l120.5 114.8c9.8 9.3 10 24.8.4 34.3z"],"arrow-right":[448,512,[],"f061","M190.5 66.9l22.2-22.2c9.4-9.4 24.6-9.4 33.9 0L441 239c9.4 9.4 9.4 24.6 0 33.9L246.6 467.3c-9.4 9.4-24.6 9.4-33.9 0l-22.2-22.2c-9.5-9.5-9.3-25 .4-34.3L311.4 296H24c-13.3 0-24-10.7-24-24v-32c0-13.3 10.7-24 24-24h287.4L190.9 101.2c-9.8-9.3-10-24.8-.4-34.3z"],"arrow-up":[448,512,[],"f062","M34.9 289.5l-22.2-22.2c-9.4-9.4-9.4-24.6 0-33.9L207 39c9.4-9.4 24.6-9.4 33.9 0l194.3 194.3c9.4 9.4 9.4 24.6 0 33.9L413 289.4c-9.5 9.5-25 9.3-34.3-.4L264 168.6V456c0 13.3-10.7 24-24 24h-32c-13.3 0-24-10.7-24-24V168.6L69.2 289.1c-9.3 9.8-24.8 10-34.3.4z"],"arrows-alt":[512,512,[],"f0b2","M352.201 425.775l-79.196 79.196c-9.373 9.373-24.568 9.373-33.941 0l-79.196-79.196c-15.119-15.119-4.411-40.971 16.971-40.97h51.162L228 284H127.196v51.162c0 21.382-25.851 32.09-40.971 16.971L7.029 272.937c-9.373-9.373-9.373-24.569 0-33.941L86.225 159.8c15.119-15.119 40.971-4.411 40.971 16.971V228H228V127.196h-51.23c-21.382 0-32.09-25.851-16.971-40.971l79.196-79.196c9.373-9.373 24.568-9.373 33.941 0l79.196 79.196c15.119 15.119 4.411 40.971-16.971 40.971h-51.162V228h100.804v-51.162c0-21.382 25.851-32.09 40.97-16.971l79.196 79.196c9.373 9.373 9.373 24.569 0 33.941L425.773 352.2c-15.119 15.119-40.971 4.411-40.97-16.971V284H284v100.804h51.23c21.382 0 32.09 25.851 16.971 40.971z"],"arrows-alt-h":[512,512,[],"f337","M377.941 169.941V216H134.059v-46.059c0-21.382-25.851-32.09-40.971-16.971L7.029 239.029c-9.373 9.373-9.373 24.568 0 33.941l86.059 86.059c15.119 15.119 40.971 4.411 40.971-16.971V296h243.882v46.059c0 21.382 25.851 32.09 40.971 16.971l86.059-86.059c9.373-9.373 9.373-24.568 0-33.941l-86.059-86.059c-15.119-15.12-40.971-4.412-40.971 16.97z"],"arrows-alt-v":[256,512,[],"f338","M214.059 377.941H168V134.059h46.059c21.382 0 32.09-25.851 16.971-40.971L144.971 7.029c-9.373-9.373-24.568-9.373-33.941 0L24.971 93.088c-15.119 15.119-4.411 40.971 16.971 40.971H88v243.882H41.941c-21.382 0-32.09 25.851-16.971 40.971l86.059 86.059c9.373 9.373 24.568 9.373 33.941 0l86.059-86.059c15.12-15.119 4.412-40.971-16.97-40.971z"],"assistive-listening-systems":[512,512,[],"f2a2","M216 260c0 15.464-12.536 28-28 28s-28-12.536-28-28c0-44.112 35.888-80 80-80s80 35.888 80 80c0 15.464-12.536 28-28 28s-28-12.536-28-28c0-13.234-10.767-24-24-24s-24 10.766-24 24zm24-176c-97.047 0-176 78.953-176 176 0 15.464 12.536 28 28 28s28-12.536 28-28c0-66.168 53.832-120 120-120s120 53.832 120 120c0 75.164-71.009 70.311-71.997 143.622L288 404c0 28.673-23.327 52-52 52-15.464 0-28 12.536-28 28s12.536 28 28 28c59.475 0 107.876-48.328 108-107.774.595-34.428 72-48.24 72-144.226 0-97.047-78.953-176-176-176zm-80 236c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zM32 448c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm480-187.993c0-1.518-.012-3.025-.045-4.531C510.076 140.525 436.157 38.47 327.994 1.511c-14.633-4.998-30.549 2.809-35.55 17.442-5 14.633 2.81 30.549 17.442 35.55 85.906 29.354 144.61 110.513 146.077 201.953l.003.188c.026 1.118.033 2.236.033 3.363 0 15.464 12.536 28 28 28s28.001-12.536 28.001-28zM152.971 439.029l-80-80L39.03 392.97l80 80 33.941-33.941z"],asterisk:[512,512,[],"f069","M478.21 334.093L336 256l142.21-78.093c11.795-6.477 15.961-21.384 9.232-33.037l-19.48-33.741c-6.728-11.653-21.72-15.499-33.227-8.523L296 186.718l3.475-162.204C299.763 11.061 288.937 0 275.48 0h-38.96c-13.456 0-24.283 11.061-23.994 24.514L216 186.718 77.265 102.607c-11.506-6.976-26.499-3.13-33.227 8.523l-19.48 33.741c-6.728 11.653-2.562 26.56 9.233 33.037L176 256 33.79 334.093c-11.795 6.477-15.961 21.384-9.232 33.037l19.48 33.741c6.728 11.653 21.721 15.499 33.227 8.523L216 325.282l-3.475 162.204C212.237 500.939 223.064 512 236.52 512h38.961c13.456 0 24.283-11.061 23.995-24.514L296 325.282l138.735 84.111c11.506 6.976 26.499 3.13 33.227-8.523l19.48-33.741c6.728-11.653 2.563-26.559-9.232-33.036z"],at:[512,512,[],"f1fa","M256 8C118.941 8 8 118.919 8 256c0 137.059 110.919 248 248 248 48.154 0 95.342-14.14 135.408-40.223 12.005-7.815 14.625-24.288 5.552-35.372l-10.177-12.433c-7.671-9.371-21.179-11.667-31.373-5.129C325.92 429.757 291.314 440 256 440c-101.458 0-184-82.542-184-184S154.542 72 256 72c100.139 0 184 57.619 184 160 0 38.786-21.093 79.742-58.17 83.693-17.349-.454-16.91-12.857-13.476-30.024l23.433-121.11C394.653 149.75 383.308 136 368.225 136h-44.981a13.518 13.518 0 0 0-13.432 11.993l-.01.092c-14.697-17.901-40.448-21.775-59.971-21.775-74.58 0-137.831 62.234-137.831 151.46 0 65.303 36.785 105.87 96 105.87 26.984 0 57.369-15.637 74.991-38.333 9.522 34.104 40.613 34.103 70.71 34.103C462.609 379.41 504 307.798 504 232 504 95.653 394.023 8 256 8zm-21.68 304.43c-22.249 0-36.07-15.623-36.07-40.771 0-44.993 30.779-72.729 58.63-72.729 22.292 0 35.601 15.241 35.601 40.77 0 45.061-33.875 72.73-58.161 72.73z"],"audio-description":[512,512,[],"f29e","M162.925 238.709l8.822 30.655h-25.606l9.041-30.652c1.277-4.421 2.651-9.994 3.872-15.245 1.22 5.251 2.594 10.823 3.871 15.242zm166.474-32.099h-14.523v98.781h14.523c29.776 0 46.175-17.678 46.175-49.776 0-32.239-17.49-49.005-46.175-49.005zM512 112v288c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h416c26.51 0 48 21.49 48 48zM245.459 336.139l-57.097-168A12.001 12.001 0 0 0 177 160h-35.894a12.001 12.001 0 0 0-11.362 8.139l-57.097 168C70.003 343.922 75.789 352 84.009 352h29.133a12 12 0 0 0 11.535-8.693l8.574-29.906h51.367l8.793 29.977A12 12 0 0 0 204.926 352h29.172c8.22 0 14.006-8.078 11.361-15.861zm184.701-80.525c0-58.977-37.919-95.614-98.96-95.614h-57.366c-6.627 0-12 5.373-12 12v168c0 6.627 5.373 12 12 12H331.2c61.041 0 98.96-36.933 98.96-96.386z"],backward:[512,512,[],"f04a","M11.5 280.6l192 160c20.6 17.2 52.5 2.8 52.5-24.6V96c0-27.4-31.9-41.8-52.5-24.6l-192 160c-15.3 12.8-15.3 36.4 0 49.2zm256 0l192 160c20.6 17.2 52.5 2.8 52.5-24.6V96c0-27.4-31.9-41.8-52.5-24.6l-192 160c-15.3 12.8-15.3 36.4 0 49.2z"],"balance-scale":[640,512,[],"f24e","M352 448h168c13.255 0 24 10.745 24 24v16c0 13.255-10.745 24-24 24H120c-13.255 0-24-10.745-24-24v-16c0-13.255 10.745-24 24-24h168V153.324C264.469 143.04 246.836 121.778 241.603 96H120c-13.255 0-24-10.745-24-24V56c0-13.255 10.745-24 24-24h135.999C270.594 12.57 293.828 0 320 0s49.406 12.57 64.001 32H520c13.255 0 24 10.745 24 24v16c0 13.255-10.745 24-24 24H398.397c-5.233 25.778-22.866 47.04-46.397 57.324V448zm287.981-112c.001-16.182 1.342-8.726-85.048-181.506-17.647-35.294-68.186-35.358-85.865 0C381.94 328.75 384.019 320.331 384.019 336H384c0 44.183 57.308 80 128 80s128-35.817 128-80h-.019zM512 176l72 144H440l72-144zM255.981 336c.001-16.182 1.342-8.726-85.048-181.506-17.647-35.294-68.186-35.358-85.865 0C-2.06 328.75.019 320.331.019 336H0c0 44.183 57.308 80 128 80s128-35.817 128-80h-.019zM128 176l72 144H56l72-144z"],ban:[512,512,[],"f05e","M256 8C119.034 8 8 119.033 8 256s111.034 248 248 248 248-111.034 248-248S392.967 8 256 8zm130.108 117.892c65.448 65.448 70 165.481 20.677 235.637L150.47 105.216c70.204-49.356 170.226-44.735 235.638 20.676zM125.892 386.108c-65.448-65.448-70-165.481-20.677-235.637L361.53 406.784c-70.203 49.356-170.226 44.736-235.638-20.676z"],barcode:[512,512,[],"f02a","M0 448V64h18v384H0zm26.857-.273V64H36v383.727h-9.143zm27.143 0V64h8.857v383.727H54zm44.857 0V64h8.857v383.727h-8.857zm36 0V64h17.714v383.727h-17.714zm44.857 0V64h8.857v383.727h-8.857zm18 0V64h8.857v383.727h-8.857zm18 0V64h8.857v383.727h-8.857zm35.715 0V64h18v383.727h-18zm44.857 0V64h18v383.727h-18zm35.999 0V64h18.001v383.727h-18.001zm36.001 0V64h18.001v383.727h-18.001zm26.857 0V64h18v383.727h-18zm45.143 0V64h26.857v383.727h-26.857zm35.714 0V64h9.143v383.727H476zm18 .273V64h18v384h-18z"],bars:[448,512,[],"f0c9","M16 132h416c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H16C7.163 60 0 67.163 0 76v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"],bath:[512,512,[],"f2cd","M488 256H80V112c0-17.645 14.355-32 32-32 11.351 0 21.332 5.945 27.015 14.88-16.492 25.207-14.687 59.576 6.838 83.035-4.176 4.713-4.021 11.916.491 16.428l11.314 11.314c4.686 4.686 12.284 4.686 16.971 0l95.03-95.029c4.686-4.686 4.686-12.284 0-16.971l-11.314-11.314c-4.512-4.512-11.715-4.666-16.428-.491-17.949-16.469-42.294-21.429-64.178-15.365C163.281 45.667 139.212 32 112 32c-44.112 0-80 35.888-80 80v144h-8c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h8v32c0 28.43 12.362 53.969 32 71.547V456c0 13.255 10.745 24 24 24h16c13.255 0 24-10.745 24-24v-8h256v8c0 13.255 10.745 24 24 24h16c13.255 0 24-10.745 24-24v-32.453c19.638-17.578 32-43.117 32-71.547v-32h8c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24z"],"battery-empty":[640,512,[],"f244","M544 160v64h32v64h-32v64H64V160h480m16-64H48c-26.51 0-48 21.49-48 48v224c0 26.51 21.49 48 48 48h512c26.51 0 48-21.49 48-48v-16h8c13.255 0 24-10.745 24-24V184c0-13.255-10.745-24-24-24h-8v-16c0-26.51-21.49-48-48-48z"],"battery-full":[640,512,[],"f240","M544 160v64h32v64h-32v64H64V160h480m16-64H48c-26.51 0-48 21.49-48 48v224c0 26.51 21.49 48 48 48h512c26.51 0 48-21.49 48-48v-16h8c13.255 0 24-10.745 24-24V184c0-13.255-10.745-24-24-24h-8v-16c0-26.51-21.49-48-48-48zm-48 96H96v128h416V192z"],"battery-half":[640,512,[],"f242","M544 160v64h32v64h-32v64H64V160h480m16-64H48c-26.51 0-48 21.49-48 48v224c0 26.51 21.49 48 48 48h512c26.51 0 48-21.49 48-48v-16h8c13.255 0 24-10.745 24-24V184c0-13.255-10.745-24-24-24h-8v-16c0-26.51-21.49-48-48-48zm-240 96H96v128h224V192z"],"battery-quarter":[640,512,[],"f243","M544 160v64h32v64h-32v64H64V160h480m16-64H48c-26.51 0-48 21.49-48 48v224c0 26.51 21.49 48 48 48h512c26.51 0 48-21.49 48-48v-16h8c13.255 0 24-10.745 24-24V184c0-13.255-10.745-24-24-24h-8v-16c0-26.51-21.49-48-48-48zm-336 96H96v128h128V192z"],"battery-three-quarters":[640,512,[],"f241","M544 160v64h32v64h-32v64H64V160h480m16-64H48c-26.51 0-48 21.49-48 48v224c0 26.51 21.49 48 48 48h512c26.51 0 48-21.49 48-48v-16h8c13.255 0 24-10.745 24-24V184c0-13.255-10.745-24-24-24h-8v-16c0-26.51-21.49-48-48-48zm-144 96H96v128h320V192z"],bed:[576,512,[],"f236","M552 288c13.255 0 24 10.745 24 24v136h-96v-64H96v64H0V88c0-13.255 10.745-24 24-24h48c13.255 0 24 10.745 24 24v200h456zM192 96c-44.183 0-80 35.817-80 80s35.817 80 80 80 80-35.817 80-80-35.817-80-80-80zm384 128c0-53.019-42.981-96-96-96H312c-13.255 0-24 10.745-24 24v104h288v-32z"],beer:[448,512,[],"f0fc","M368 96h-48V56c0-13.255-10.745-24-24-24H24C10.745 32 0 42.745 0 56v400c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24v-42.11l80.606-35.977C429.396 365.063 448 336.388 448 304.86V176c0-44.112-35.888-80-80-80zm16 208.86a16.018 16.018 0 0 1-9.479 14.611L320 343.805V160h48c8.822 0 16 7.178 16 16v128.86zM208 384c-8.836 0-16-7.164-16-16V144c0-8.836 7.164-16 16-16s16 7.164 16 16v224c0 8.836-7.164 16-16 16zm-96 0c-8.836 0-16-7.164-16-16V144c0-8.836 7.164-16 16-16s16 7.164 16 16v224c0 8.836-7.164 16-16 16z"],bell:[448,512,[],"f0f3","M433.884 366.059C411.634 343.809 384 316.118 384 208c0-79.394-57.831-145.269-133.663-157.83A31.845 31.845 0 0 0 256 32c0-17.673-14.327-32-32-32s-32 14.327-32 32c0 6.75 2.095 13.008 5.663 18.17C121.831 62.731 64 128.606 64 208c0 108.118-27.643 135.809-49.893 158.059C-16.042 396.208 5.325 448 48.048 448H160c0 35.346 28.654 64 64 64s64-28.654 64-64h111.943c42.638 0 64.151-51.731 33.941-81.941zM224 472a8 8 0 0 1 0 16c-22.056 0-40-17.944-40-40h16c0 13.234 10.766 24 24 24z"],"bell-slash":[576,512,[],"f1f6","M78.107 366.059C47.958 396.208 69.325 448 112.048 448H224c0 35.346 28.654 64 64 64 35.346 0 64-28.654 64-64h32.685L127.848 221.379c-2.198 97.078-28.439 123.378-49.741 144.68zM264 448c0 13.234 10.766 24 24 24a8 8 0 0 1 0 16c-22.056 0-40-17.944-40-40h16zm305.896 43.733l-10.762 12.086c-8.915 10.012-24.333 10.967-34.437 2.133L8.256 54.393C-1.848 45.558-2.811 30.28 6.104 20.267L16.865 8.181C25.781-1.831 41.199-2.786 51.303 6.049l113.81 99.512c24.017-28.778 57.946-48.996 96.55-55.39A31.85 31.85 0 0 1 256 32c0-17.673 14.327-32 32-32s32 14.327 32 32c0 6.75-2.095 13.008-5.663 18.17C390.169 62.731 448 128.606 448 208c0 108.118 27.634 135.809 49.884 158.059 12.149 12.149 15.923 27.776 13.33 42.121l56.53 49.427c10.104 8.835 11.067 24.113 2.152 34.126z"],bicycle:[640,512,[],"f206","M512.509 192.001c-16.373-.064-32.03 2.955-46.436 8.495l-77.68-125.153A24 24 0 0 0 368.001 64h-64c-8.837 0-16 7.163-16 16v16c0 8.837 7.163 16 16 16h50.649l14.896 24H256.002v-16c0-8.837-7.163-16-16-16h-87.459c-13.441 0-24.777 10.999-24.536 24.437.232 13.044 10.876 23.563 23.995 23.563h48.726l-29.417 47.52c-13.433-4.83-27.904-7.483-42.992-7.52C58.094 191.83.412 249.012.002 319.236-.413 390.279 57.055 448 128.002 448c59.642 0 109.758-40.793 123.967-96h52.033a24 24 0 0 0 20.406-11.367L410.37 201.77l14.938 24.067c-25.455 23.448-41.385 57.081-41.307 94.437.145 68.833 57.899 127.051 126.729 127.719 70.606.685 128.181-55.803 129.255-125.996 1.086-70.941-56.526-129.72-127.476-129.996zM186.75 265.772c9.727 10.529 16.673 23.661 19.642 38.228h-43.306l23.664-38.228zM128.002 400c-44.112 0-80-35.888-80-80s35.888-80 80-80c5.869 0 11.586.653 17.099 1.859l-45.505 73.509C89.715 331.327 101.213 352 120.002 352h81.3c-12.37 28.225-40.562 48-73.3 48zm162.63-96h-35.624c-3.96-31.756-19.556-59.894-42.383-80.026L237.371 184h127.547l-74.286 120zm217.057 95.886c-41.036-2.165-74.049-35.692-75.627-76.755-.812-21.121 6.633-40.518 19.335-55.263l44.433 71.586c4.66 7.508 14.524 9.816 22.032 5.156l13.594-8.437c7.508-4.66 9.817-14.524 5.156-22.032l-44.468-71.643a79.901 79.901 0 0 1 19.858-2.497c44.112 0 80 35.888 80 80-.001 45.54-38.252 82.316-84.313 79.885z"],binoculars:[512,512,[],"f1e5","M192 104H96V56c0-13.255 10.745-24 24-24h48c13.255 0 24 10.745 24 24v48zm224-48c0-13.255-10.745-24-24-24h-48c-13.255 0-24 10.745-24 24v48h96V56zM0 456c0 13.255 10.745 24 24 24h120c13.255 0 24-10.745 24-24v-16H0v16zm88-328c-13.255 0-24 10.745-24 24C64 256 0 272 0 416h168V312c0-13.255 10.745-24 24-24V128H88zm256 328c0 13.255 10.745 24 24 24h120c13.255 0 24-10.745 24-24v-16H344v16zM216 128v160h80V128h-80zm128 288h168c0-144-64-160-64-264 0-13.255-10.745-24-24-24H320v160c13.255 0 24 10.745 24 24v104z"],"birthday-cake":[448,512,[],"f1fd","M448 384c-28.02 0-31.26-32-74.5-32-43.43 0-46.825 32-74.75 32-27.695 0-31.454-32-74.75-32-42.842 0-47.218 32-74.5 32-28.148 0-31.202-32-74.75-32-43.547 0-46.653 32-74.75 32v-80c0-26.5 21.5-48 48-48h16V112h64v144h64V112h64v144h64V112h64v144h16c26.5 0 48 21.5 48 48v80zm0 128H0v-96c43.356 0 46.767-32 74.75-32 27.951 0 31.253 32 74.75 32 42.843 0 47.217-32 74.5-32 28.148 0 31.201 32 74.75 32 43.357 0 46.767-32 74.75-32 27.488 0 31.252 32 74.5 32v96zM96 96c-17.75 0-32-14.25-32-32 0-31 32-23 32-64 12 0 32 29.5 32 56s-14.25 40-32 40zm128 0c-17.75 0-32-14.25-32-32 0-31 32-23 32-64 12 0 32 29.5 32 56s-14.25 40-32 40zm128 0c-17.75 0-32-14.25-32-32 0-31 32-23 32-64 12 0 32 29.5 32 56s-14.25 40-32 40z"],blind:[384,512,[],"f29d","M380.15 510.837a8 8 0 0 1-10.989-2.687l-125.33-206.427a31.923 31.923 0 0 0 12.958-9.485l126.048 207.608a8 8 0 0 1-2.687 10.991zM142.803 314.338l-32.54 89.485 36.12 88.285c6.693 16.36 25.377 24.192 41.733 17.501 16.357-6.692 24.193-25.376 17.501-41.734l-62.814-153.537zM96 88c24.301 0 44-19.699 44-44S120.301 0 96 0 52 19.699 52 44s19.699 44 44 44zm154.837 169.128l-120-152c-4.733-5.995-11.75-9.108-18.837-9.112V96H80v.026c-7.146.003-14.217 3.161-18.944 9.24L0 183.766v95.694c0 13.455 11.011 24.791 24.464 24.536C37.505 303.748 48 293.1 48 280v-79.766l16-20.571v140.698L9.927 469.055c-6.04 16.609 2.528 34.969 19.138 41.009 16.602 6.039 34.968-2.524 41.009-19.138L136 309.638V202.441l-31.406-39.816a4 4 0 1 1 6.269-4.971l102.3 129.217c9.145 11.584 24.368 11.339 33.708 3.965 10.41-8.216 12.159-23.334 3.966-33.708z"],bold:[384,512,[],"f032","M304.793 243.891c33.639-18.537 53.657-54.16 53.657-95.693 0-48.236-26.25-87.626-68.626-104.179C265.138 34.01 240.849 32 209.661 32H24c-8.837 0-16 7.163-16 16v33.049c0 8.837 7.163 16 16 16h33.113v318.53H24c-8.837 0-16 7.163-16 16V464c0 8.837 7.163 16 16 16h195.69c24.203 0 44.834-1.289 66.866-7.584C337.52 457.193 376 410.647 376 350.014c0-52.168-26.573-91.684-71.207-106.123zM142.217 100.809h67.444c16.294 0 27.536 2.019 37.525 6.717 15.828 8.479 24.906 26.502 24.906 49.446 0 35.029-20.32 56.79-53.029 56.79h-76.846V100.809zm112.642 305.475c-10.14 4.056-22.677 4.907-31.409 4.907h-81.233V281.943h84.367c39.645 0 63.057 25.38 63.057 63.057.001 28.425-13.66 52.483-34.782 61.284z"],bolt:[320,512,[],"f0e7","M295.973 160H180.572L215.19 30.184C219.25 14.956 207.756 0 192 0H56C43.971 0 33.8 8.905 32.211 20.828l-31.996 240C-1.704 275.217 9.504 288 24.004 288h118.701L96.646 482.466C93.05 497.649 104.659 512 119.992 512c8.35 0 16.376-4.374 20.778-11.978l175.973-303.997c9.244-15.967-2.288-36.025-20.77-36.025z"],bomb:[512,512,[],"f1e2","M440.5 88.5l-52 52L415 167c9.4 9.4 9.4 24.6 0 33.9l-17.4 17.4c11.8 26.1 18.4 55.1 18.4 85.6 0 114.9-93.1 208-208 208S0 418.9 0 304 93.1 96 208 96c30.5 0 59.5 6.6 85.6 18.4L311 97c9.4-9.4 24.6-9.4 33.9 0l26.5 26.5 52-52 17.1 17zM500 60h-24c-6.6 0-12 5.4-12 12s5.4 12 12 12h24c6.6 0 12-5.4 12-12s-5.4-12-12-12zM440 0c-6.6 0-12 5.4-12 12v24c0 6.6 5.4 12 12 12s12-5.4 12-12V12c0-6.6-5.4-12-12-12zm33.9 55l17-17c4.7-4.7 4.7-12.3 0-17-4.7-4.7-12.3-4.7-17 0l-17 17c-4.7 4.7-4.7 12.3 0 17 4.8 4.7 12.4 4.7 17 0zm-67.8 0c4.7 4.7 12.3 4.7 17 0 4.7-4.7 4.7-12.3 0-17l-17-17c-4.7-4.7-12.3-4.7-17 0-4.7 4.7-4.7 12.3 0 17l17 17zm67.8 34c-4.7-4.7-12.3-4.7-17 0-4.7 4.7-4.7 12.3 0 17l17 17c4.7 4.7 12.3 4.7 17 0 4.7-4.7 4.7-12.3 0-17l-17-17zM112 272c0-35.3 28.7-64 64-64 8.8 0 16-7.2 16-16s-7.2-16-16-16c-52.9 0-96 43.1-96 96 0 8.8 7.2 16 16 16s16-7.2 16-16z"],book:[448,512,[],"f02d","M448 360V24c0-13.3-10.7-24-24-24H96C43 0 0 43 0 96v320c0 53 43 96 96 96h328c13.3 0 24-10.7 24-24v-16c0-7.5-3.5-14.3-8.9-18.7-4.2-15.4-4.2-59.3 0-74.7 5.4-4.3 8.9-11.1 8.9-18.6zM128 134c0-3.3 2.7-6 6-6h212c3.3 0 6 2.7 6 6v20c0 3.3-2.7 6-6 6H134c-3.3 0-6-2.7-6-6v-20zm0 64c0-3.3 2.7-6 6-6h212c3.3 0 6 2.7 6 6v20c0 3.3-2.7 6-6 6H134c-3.3 0-6-2.7-6-6v-20zm253.4 250H96c-17.7 0-32-14.3-32-32 0-17.6 14.4-32 32-32h285.4c-1.9 17.1-1.9 46.9 0 64z"],bookmark:[384,512,[],"f02e","M0 512V48C0 21.49 21.49 0 48 0h288c26.51 0 48 21.49 48 48v464L192 400 0 512z"],braille:[640,512,[],"f2a1","M128 256c0 35.346-28.654 64-64 64S0 291.346 0 256s28.654-64 64-64 64 28.654 64 64zM64 384c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0-352C28.654 32 0 60.654 0 96s28.654 64 64 64 64-28.654 64-64-28.654-64-64-64zm160 192c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0 160c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0-352c-35.346 0-64 28.654-64 64s28.654 64 64 64 64-28.654 64-64-28.654-64-64-64zm224 192c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0 160c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0-352c-35.346 0-64 28.654-64 64s28.654 64 64 64 64-28.654 64-64-28.654-64-64-64zm160 192c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0 160c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm0-320c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32z"],briefcase:[512,512,[],"f0b1","M320 288h192v144c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V288h192v20c0 6.627 5.373 12 12 12h104c6.627 0 12-5.373 12-12v-20zm192-112v80H0v-80c0-26.51 21.49-48 48-48h80V80c0-26.51 21.49-48 48-48h160c26.51 0 48 21.49 48 48v48h80c26.51 0 48 21.49 48 48zM320 96H192v32h128V96z"],bug:[512,512,[],"f188","M511.988 288.9c-.478 17.43-15.217 31.1-32.653 31.1H424v16c0 21.864-4.882 42.584-13.6 61.145l60.228 60.228c12.496 12.497 12.496 32.758 0 45.255-12.498 12.497-32.759 12.496-45.256 0l-54.736-54.736C345.886 467.965 314.351 480 280 480V236c0-6.627-5.373-12-12-12h-24c-6.627 0-12 5.373-12 12v244c-34.351 0-65.886-12.035-90.636-32.108l-54.736 54.736c-12.498 12.497-32.759 12.496-45.256 0-12.496-12.497-12.496-32.758 0-45.255l60.228-60.228C92.882 378.584 88 357.864 88 336v-16H32.666C15.23 320 .491 306.33.013 288.9-.484 270.816 14.028 256 32 256h56v-58.745l-46.628-46.628c-12.496-12.497-12.496-32.758 0-45.255 12.498-12.497 32.758-12.497 45.256 0L141.255 160h229.489l54.627-54.627c12.498-12.497 32.758-12.497 45.256 0 12.496 12.497 12.496 32.758 0 45.255L424 197.255V256h56c17.972 0 32.484 14.816 31.988 32.9zM257 0c-61.856 0-112 50.144-112 112h224C369 50.144 318.856 0 257 0z"],building:[448,512,[],"f1ad","M436 480h-20V24c0-13.255-10.745-24-24-24H56C42.745 0 32 10.745 32 24v456H12c-6.627 0-12 5.373-12 12v20h448v-20c0-6.627-5.373-12-12-12zM128 76c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12V76zm0 96c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12v-40zm52 148h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12zm76 160h-64v-84c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v84zm64-172c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40zm0-96c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40zm0-96c0 6.627-5.373 12-12 12h-40c-6.627 0-12-5.373-12-12V76c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40z"],bullhorn:[576,512,[],"f0a1","M576 224c0-20.896-13.36-38.666-32-45.258V64c0-35.346-28.654-64-64-64-64.985 56-142.031 128-272 128H48c-26.51 0-48 21.49-48 48v96c0 26.51 21.49 48 48 48h43.263c-18.742 64.65 2.479 116.379 18.814 167.44 1.702 5.32 5.203 9.893 9.922 12.88 20.78 13.155 68.355 15.657 93.773 5.151 16.046-6.633 19.96-27.423 7.522-39.537-18.508-18.026-30.136-36.91-19.795-60.858a12.278 12.278 0 0 0-1.045-11.673c-16.309-24.679-3.581-62.107 28.517-72.752C346.403 327.887 418.591 395.081 480 448c35.346 0 64-28.654 64-64V269.258c18.64-6.592 32-24.362 32-45.258zm-96 139.855c-54.609-44.979-125.033-92.94-224-104.982v-69.747c98.967-12.042 169.391-60.002 224-104.982v279.711z"],bullseye:[512,512,[],"f140","M256 72c101.689 0 184 82.295 184 184 0 101.689-82.295 184-184 184-101.689 0-184-82.295-184-184 0-101.689 82.295-184 184-184m0-64C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 184c35.29 0 64 28.71 64 64s-28.71 64-64 64-64-28.71-64-64 28.71-64 64-64m0-64c-70.692 0-128 57.308-128 128s57.308 128 128 128 128-57.308 128-128-57.308-128-128-128z"],bus:[512,512,[],"f207","M512 152v80c0 13.255-10.745 24-24 24h-8v168c0 13.255-10.745 24-24 24h-8v40c0 13.255-10.745 24-24 24h-48c-13.255 0-24-10.745-24-24v-40H160v40c0 13.255-10.745 24-24 24H88c-13.255 0-24-10.745-24-24v-40h-8c-13.255 0-24-10.745-24-24V256h-8c-13.255 0-24-10.745-24-24v-80c0-13.255 10.745-24 24-24h8V80C32 35.817 132.288 0 256 0s224 35.817 224 80v48h8c13.255 0 24 10.745 24 24zM112 320c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zm288 0c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zm32-56V120c0-13.255-10.745-24-24-24H104c-13.255 0-24 10.745-24 24v144c0 13.255 10.745 24 24 24h304c13.255 0 24-10.745 24-24z"],calculator:[448,512,[],"f1ec","M0 464V48C0 21.49 21.49 0 48 0h352c26.51 0 48 21.49 48 48v416c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48zm384-284V76c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v104c0 6.627 5.373 12 12 12h296c6.627 0 12-5.373 12-12zM128 308v-40c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm256 128V268c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v168c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm-256 0v-40c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm128-128v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm0 128v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12z"],calendar:[448,512,[],"f133","M12 192h424c6.6 0 12 5.4 12 12v260c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V204c0-6.6 5.4-12 12-12zm436-44v-36c0-26.5-21.5-48-48-48h-48V12c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v52H160V12c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v52H48C21.5 64 0 85.5 0 112v36c0 6.6 5.4 12 12 12h424c6.6 0 12-5.4 12-12z"],"calendar-alt":[448,512,[],"f073","M436 160H12c-6.6 0-12-5.4-12-12v-36c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48v36c0 6.6-5.4 12-12 12zM12 192h424c6.6 0 12 5.4 12 12v260c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V204c0-6.6 5.4-12 12-12zm116 204c0-6.6-5.4-12-12-12H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40zm0-128c0-6.6-5.4-12-12-12H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40zm128 128c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40zm0-128c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40zm128 128c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40zm0-128c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-40z"],"calendar-check":[448,512,[],"f274","M436 160H12c-6.627 0-12-5.373-12-12v-36c0-26.51 21.49-48 48-48h48V12c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v52h128V12c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v52h48c26.51 0 48 21.49 48 48v36c0 6.627-5.373 12-12 12zM12 192h424c6.627 0 12 5.373 12 12v260c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V204c0-6.627 5.373-12 12-12zm333.296 95.947l-28.169-28.398c-4.667-4.705-12.265-4.736-16.97-.068L194.12 364.665l-45.98-46.352c-4.667-4.705-12.266-4.736-16.971-.068l-28.397 28.17c-4.705 4.667-4.736 12.265-.068 16.97l82.601 83.269c4.667 4.705 12.265 4.736 16.97.068l142.953-141.805c4.705-4.667 4.736-12.265.068-16.97z"],"calendar-minus":[448,512,[],"f272","M436 160H12c-6.6 0-12-5.4-12-12v-36c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48v36c0 6.6-5.4 12-12 12zM12 192h424c6.6 0 12 5.4 12 12v260c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V204c0-6.6 5.4-12 12-12zm304 192c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12H132c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h184z"],"calendar-plus":[448,512,[],"f271","M436 160H12c-6.6 0-12-5.4-12-12v-36c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48v36c0 6.6-5.4 12-12 12zM12 192h424c6.6 0 12 5.4 12 12v260c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V204c0-6.6 5.4-12 12-12zm316 140c0-6.6-5.4-12-12-12h-60v-60c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v60h-60c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h60v60c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-60h60c6.6 0 12-5.4 12-12v-40z"],"calendar-times":[448,512,[],"f273","M436 160H12c-6.6 0-12-5.4-12-12v-36c0-26.5 21.5-48 48-48h48V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h128V12c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h48c26.5 0 48 21.5 48 48v36c0 6.6-5.4 12-12 12zM12 192h424c6.6 0 12 5.4 12 12v260c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V204c0-6.6 5.4-12 12-12zm257.3 160l48.1-48.1c4.7-4.7 4.7-12.3 0-17l-28.3-28.3c-4.7-4.7-12.3-4.7-17 0L224 306.7l-48.1-48.1c-4.7-4.7-12.3-4.7-17 0l-28.3 28.3c-4.7 4.7-4.7 12.3 0 17l48.1 48.1-48.1 48.1c-4.7 4.7-4.7 12.3 0 17l28.3 28.3c4.7 4.7 12.3 4.7 17 0l48.1-48.1 48.1 48.1c4.7 4.7 12.3 4.7 17 0l28.3-28.3c4.7-4.7 4.7-12.3 0-17L269.3 352z"],camera:[512,512,[],"f030","M512 144v288c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V144c0-26.5 21.5-48 48-48h88l12.3-32.9c7-18.7 24.9-31.1 44.9-31.1h125.5c20 0 37.9 12.4 44.9 31.1L376 96h88c26.5 0 48 21.5 48 48zM376 288c0-66.2-53.8-120-120-120s-120 53.8-120 120 53.8 120 120 120 120-53.8 120-120zm-32 0c0 48.5-39.5 88-88 88s-88-39.5-88-88 39.5-88 88-88 88 39.5 88 88z"],"camera-retro":[512,512,[],"f083","M48 32C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48H48zm0 32h106c3.3 0 6 2.7 6 6v20c0 3.3-2.7 6-6 6H38c-3.3 0-6-2.7-6-6V80c0-8.8 7.2-16 16-16zm426 96H38c-3.3 0-6-2.7-6-6v-36c0-3.3 2.7-6 6-6h138l30.2-45.3c1.1-1.7 3-2.7 5-2.7H464c8.8 0 16 7.2 16 16v74c0 3.3-2.7 6-6 6zM256 424c-66.2 0-120-53.8-120-120s53.8-120 120-120 120 53.8 120 120-53.8 120-120 120zm0-208c-48.5 0-88 39.5-88 88s39.5 88 88 88 88-39.5 88-88-39.5-88-88-88zm-48 104c-8.8 0-16-7.2-16-16 0-35.3 28.7-64 64-64 8.8 0 16 7.2 16 16s-7.2 16-16 16c-17.6 0-32 14.4-32 32 0 8.8-7.2 16-16 16z"],car:[512,512,[],"f1b9","M499.991 168h-54.815l-7.854-20.944c-9.192-24.513-25.425-45.351-46.942-60.263S343.651 64 317.472 64H194.528c-26.18 0-51.391 7.882-72.908 22.793-21.518 14.912-37.75 35.75-46.942 60.263L66.824 168H12.009c-8.191 0-13.974 8.024-11.384 15.795l8 24A12 12 0 0 0 20.009 216h28.815l-.052.14C29.222 227.093 16 247.997 16 272v48c0 16.225 6.049 31.029 16 42.309V424c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24v-40h256v40c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24v-61.691c9.951-11.281 16-26.085 16-42.309v-48c0-24.003-13.222-44.907-32.772-55.86l-.052-.14h28.815a12 12 0 0 0 11.384-8.205l8-24c2.59-7.771-3.193-15.795-11.384-15.795zm-365.388 1.528C143.918 144.689 168 128 194.528 128h122.944c26.528 0 50.61 16.689 59.925 41.528L391.824 208H120.176l14.427-38.472zM88 328c-17.673 0-32-14.327-32-32 0-17.673 14.327-32 32-32s48 30.327 48 48-30.327 16-48 16zm336 0c-17.673 0-48 1.673-48-16 0-17.673 30.327-48 48-48s32 14.327 32 32c0 17.673-14.327 32-32 32z"],"caret-down":[320,512,[],"f0d7","M31.3 192h257.3c17.8 0 26.7 21.5 14.1 34.1L174.1 354.8c-7.8 7.8-20.5 7.8-28.3 0L17.2 226.1C4.6 213.5 13.5 192 31.3 192z"],"caret-left":[192,512,[],"f0d9","M192 127.338v257.324c0 17.818-21.543 26.741-34.142 14.142L29.196 270.142c-7.81-7.81-7.81-20.474 0-28.284l128.662-128.662c12.599-12.6 34.142-3.676 34.142 14.142z"],"caret-right":[192,512,[],"f0da","M0 384.662V127.338c0-17.818 21.543-26.741 34.142-14.142l128.662 128.662c7.81 7.81 7.81 20.474 0 28.284L34.142 398.804C21.543 411.404 0 402.48 0 384.662z"],"caret-square-down":[448,512,[],"f150","M448 80v352c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48zM92.5 220.5l123 123c4.7 4.7 12.3 4.7 17 0l123-123c7.6-7.6 2.2-20.5-8.5-20.5H101c-10.7 0-16.1 12.9-8.5 20.5z"],"caret-square-left":[448,512,[],"f191","M400 480H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48v352c0 26.51-21.49 48-48 48zM259.515 124.485l-123.03 123.03c-4.686 4.686-4.686 12.284 0 16.971l123.029 123.029c7.56 7.56 20.485 2.206 20.485-8.485V132.971c.001-10.691-12.925-16.045-20.484-8.486z"],"caret-square-right":[448,512,[],"f152","M48 32h352c26.51 0 48 21.49 48 48v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48zm140.485 355.515l123.029-123.029c4.686-4.686 4.686-12.284 0-16.971l-123.029-123.03c-7.56-7.56-20.485-2.206-20.485 8.485v246.059c0 10.691 12.926 16.045 20.485 8.486z"],"caret-square-up":[448,512,[],"f151","M0 432V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48zm355.515-140.485l-123.03-123.03c-4.686-4.686-12.284-4.686-16.971 0L92.485 291.515c-7.56 7.56-2.206 20.485 8.485 20.485h246.059c10.691 0 16.045-12.926 8.486-20.485z"],"caret-up":[320,512,[],"f0d8","M288.662 352H31.338c-17.818 0-26.741-21.543-14.142-34.142l128.662-128.662c7.81-7.81 20.474-7.81 28.284 0l128.662 128.662c12.6 12.599 3.676 34.142-14.142 34.142z"],"cart-arrow-down":[576,512,[],"f218","M504.717 320H211.572l6.545 32h268.418c15.401 0 26.816 14.301 23.403 29.319l-5.517 24.276C523.112 414.668 536 433.828 536 456c0 31.202-25.519 56.444-56.824 55.994-29.823-.429-54.35-24.631-55.155-54.447-.44-16.287 6.085-31.049 16.803-41.548H231.176C241.553 426.165 248 440.326 248 456c0 31.813-26.528 57.431-58.67 55.938-28.54-1.325-51.751-24.385-53.251-52.917-1.158-22.034 10.436-41.455 28.051-51.586L93.883 64H24C10.745 64 0 53.255 0 40V24C0 10.745 10.745 0 24 0h102.529c11.401 0 21.228 8.021 23.513 19.19L159.208 64H551.99c15.401 0 26.816 14.301 23.403 29.319l-47.273 208C525.637 312.246 515.923 320 504.717 320zM403.029 192H360v-60c0-6.627-5.373-12-12-12h-24c-6.627 0-12 5.373-12 12v60h-43.029c-10.691 0-16.045 12.926-8.485 20.485l67.029 67.029c4.686 4.686 12.284 4.686 16.971 0l67.029-67.029c7.559-7.559 2.205-20.485-8.486-20.485z"],"cart-plus":[576,512,[],"f217","M504.717 320H211.572l6.545 32h268.418c15.401 0 26.816 14.301 23.403 29.319l-5.517 24.276C523.112 414.668 536 433.828 536 456c0 31.202-25.519 56.444-56.824 55.994-29.823-.429-54.35-24.631-55.155-54.447-.44-16.287 6.085-31.049 16.803-41.548H231.176C241.553 426.165 248 440.326 248 456c0 31.813-26.528 57.431-58.67 55.938-28.54-1.325-51.751-24.385-53.251-52.917-1.158-22.034 10.436-41.455 28.051-51.586L93.883 64H24C10.745 64 0 53.255 0 40V24C0 10.745 10.745 0 24 0h102.529c11.401 0 21.228 8.021 23.513 19.19L159.208 64H551.99c15.401 0 26.816 14.301 23.403 29.319l-47.273 208C525.637 312.246 515.923 320 504.717 320zM408 168h-48v-40c0-8.837-7.163-16-16-16h-16c-8.837 0-16 7.163-16 16v40h-48c-8.837 0-16 7.163-16 16v16c0 8.837 7.163 16 16 16h48v40c0 8.837 7.163 16 16 16h16c8.837 0 16-7.163 16-16v-40h48c8.837 0 16-7.163 16-16v-16c0-8.837-7.163-16-16-16z"],certificate:[512,512,[],"f0a3","M458.622 255.92l45.985-45.005c13.708-12.977 7.316-36.039-10.664-40.339l-62.65-15.99 17.661-62.015c4.991-17.838-11.829-34.663-29.661-29.671l-61.994 17.667-15.984-62.671C337.085.197 313.765-6.276 300.99 7.228L256 53.57 211.011 7.229c-12.63-13.351-36.047-7.234-40.325 10.668l-15.984 62.671-61.995-17.667C74.87 57.907 58.056 74.738 63.046 92.572l17.661 62.015-62.65 15.99C.069 174.878-6.31 197.944 7.392 210.915l45.985 45.005-45.985 45.004c-13.708 12.977-7.316 36.039 10.664 40.339l62.65 15.99-17.661 62.015c-4.991 17.838 11.829 34.663 29.661 29.671l61.994-17.667 15.984 62.671c4.439 18.575 27.696 24.018 40.325 10.668L256 458.61l44.989 46.001c12.5 13.488 35.987 7.486 40.325-10.668l15.984-62.671 61.994 17.667c17.836 4.994 34.651-11.837 29.661-29.671l-17.661-62.015 62.65-15.99c17.987-4.302 24.366-27.367 10.664-40.339l-45.984-45.004z"],"chart-area":[512,512,[],"f1fe","M500 384c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v308h436zM372.7 159.5L288 216l-85.3-113.7c-5.1-6.8-15.5-6.3-19.9 1L96 248v104h384l-89.9-187.8c-3.2-6.5-11.4-8.7-17.4-4.7z"],"chart-bar":[512,512,[],"f080","M500 384c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v308h436zm-308-44v-72c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v72c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm192 0V204c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v136c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm-96 0V140c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v200c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm192 0V108c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v232c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12z"],"chart-line":[512,512,[],"f201","M500 384c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v308h436zM456 96H344c-21.4 0-32.1 25.9-17 41l32.9 32.9-72 72.9-55.6-55.6c-4.7-4.7-12.2-4.7-16.9 0L96.4 305c-4.7 4.6-4.8 12.2-.2 16.9l28.5 29.4c4.7 4.8 12.4 4.9 17.1.1l82.1-82.1 55.5 55.5c4.7 4.7 12.3 4.7 17 0l109.2-109.2L439 249c15.1 15.1 41 4.4 41-17V120c0-13.3-10.7-24-24-24z"],"chart-pie":[576,512,[],"f200","M288 12.3V240h227.7c6.9 0 12.3-5.8 12-12.7-6.4-122.4-104.5-220.6-227-227-6.9-.3-12.7 5.1-12.7 12zM552.7 288c6.9 0 12.3 5.8 12 12.7-2.8 53.2-23.2 105.6-61.2 147.8-4.6 5.1-12.6 5.4-17.5.5L325 288h227.7zM401 433c4.8 4.8 4.7 12.8-.4 17.3-42.6 38.4-99 61.7-160.8 61.7C107.6 511.9-.2 403.8 0 271.5.2 143.4 100.8 38.9 227.3 32.3c6.9-.4 12.7 5.1 12.7 12V272l161 161z"],check:[512,512,[],"f00c","M173.898 439.404l-166.4-166.4c-9.997-9.997-9.997-26.206 0-36.204l36.203-36.204c9.997-9.998 26.207-9.998 36.204 0L192 312.69 432.095 72.596c9.997-9.997 26.207-9.997 36.204 0l36.203 36.204c9.997 9.997 9.997 26.206 0 36.204l-294.4 294.401c-9.998 9.997-26.207 9.997-36.204-.001z"],"check-circle":[512,512,[],"f058","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zM227.314 387.314l184-184c6.248-6.248 6.248-16.379 0-22.627l-22.627-22.627c-6.248-6.249-16.379-6.249-22.628 0L216 308.118l-70.059-70.059c-6.248-6.248-16.379-6.248-22.628 0l-22.627 22.627c-6.248 6.248-6.248 16.379 0 22.627l104 104c6.249 6.249 16.379 6.249 22.628.001z"],"check-square":[448,512,[],"f14a","M400 480H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48v352c0 26.51-21.49 48-48 48zm-204.686-98.059l184-184c6.248-6.248 6.248-16.379 0-22.627l-22.627-22.627c-6.248-6.248-16.379-6.249-22.628 0L184 302.745l-70.059-70.059c-6.248-6.248-16.379-6.248-22.628 0l-22.627 22.627c-6.248 6.248-6.248 16.379 0 22.627l104 104c6.249 6.25 16.379 6.25 22.628.001z"],"chevron-circle-down":[512,512,[],"f13a","M504 256c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zM273 369.9l135.5-135.5c9.4-9.4 9.4-24.6 0-33.9l-17-17c-9.4-9.4-24.6-9.4-33.9 0L256 285.1 154.4 183.5c-9.4-9.4-24.6-9.4-33.9 0l-17 17c-9.4 9.4-9.4 24.6 0 33.9L239 369.9c9.4 9.4 24.6 9.4 34 0z"],"chevron-circle-left":[512,512,[],"f137","M256 504C119 504 8 393 8 256S119 8 256 8s248 111 248 248-111 248-248 248zM142.1 273l135.5 135.5c9.4 9.4 24.6 9.4 33.9 0l17-17c9.4-9.4 9.4-24.6 0-33.9L226.9 256l101.6-101.6c9.4-9.4 9.4-24.6 0-33.9l-17-17c-9.4-9.4-24.6-9.4-33.9 0L142.1 239c-9.4 9.4-9.4 24.6 0 34z"],"chevron-circle-right":[512,512,[],"f138","M256 8c137 0 248 111 248 248S393 504 256 504 8 393 8 256 119 8 256 8zm113.9 231L234.4 103.5c-9.4-9.4-24.6-9.4-33.9 0l-17 17c-9.4 9.4-9.4 24.6 0 33.9L285.1 256 183.5 357.6c-9.4 9.4-9.4 24.6 0 33.9l17 17c9.4 9.4 24.6 9.4 33.9 0L369.9 273c9.4-9.4 9.4-24.6 0-34z"],"chevron-circle-up":[512,512,[],"f139","M8 256C8 119 119 8 256 8s248 111 248 248-111 248-248 248S8 393 8 256zm231-113.9L103.5 277.6c-9.4 9.4-9.4 24.6 0 33.9l17 17c9.4 9.4 24.6 9.4 33.9 0L256 226.9l101.6 101.6c9.4 9.4 24.6 9.4 33.9 0l17-17c9.4-9.4 9.4-24.6 0-33.9L273 142.1c-9.4-9.4-24.6-9.4-34 0z"],"chevron-down":[448,512,[],"f078","M207.029 381.476L12.686 187.132c-9.373-9.373-9.373-24.569 0-33.941l22.667-22.667c9.357-9.357 24.522-9.375 33.901-.04L224 284.505l154.745-154.021c9.379-9.335 24.544-9.317 33.901.04l22.667 22.667c9.373 9.373 9.373 24.569 0 33.941L240.971 381.476c-9.373 9.372-24.569 9.372-33.942 0z"],"chevron-left":[320,512,[],"f053","M34.52 239.03L228.87 44.69c9.37-9.37 24.57-9.37 33.94 0l22.67 22.67c9.36 9.36 9.37 24.52.04 33.9L131.49 256l154.02 154.75c9.34 9.38 9.32 24.54-.04 33.9l-22.67 22.67c-9.37 9.37-24.57 9.37-33.94 0L34.52 272.97c-9.37-9.37-9.37-24.57 0-33.94z"],"chevron-right":[320,512,[],"f054","M285.476 272.971L91.132 467.314c-9.373 9.373-24.569 9.373-33.941 0l-22.667-22.667c-9.357-9.357-9.375-24.522-.04-33.901L188.505 256 34.484 101.255c-9.335-9.379-9.317-24.544.04-33.901l22.667-22.667c9.373-9.373 24.569-9.373 33.941 0L285.475 239.03c9.373 9.372 9.373 24.568.001 33.941z"],"chevron-up":[448,512,[],"f077","M240.971 130.524l194.343 194.343c9.373 9.373 9.373 24.569 0 33.941l-22.667 22.667c-9.357 9.357-24.522 9.375-33.901.04L224 227.495 69.255 381.516c-9.379 9.335-24.544 9.317-33.901-.04l-22.667-22.667c-9.373-9.373-9.373-24.569 0-33.941L207.03 130.525c9.372-9.373 24.568-9.373 33.941-.001z"],child:[384,512,[],"f1ae","M120 72c0-39.765 32.235-72 72-72s72 32.235 72 72c0 39.764-32.235 72-72 72s-72-32.236-72-72zm254.627 1.373c-12.496-12.497-32.758-12.497-45.254 0L242.745 160H141.254L54.627 73.373c-12.496-12.497-32.758-12.497-45.254 0-12.497 12.497-12.497 32.758 0 45.255L104 213.254V480c0 17.673 14.327 32 32 32h16c17.673 0 32-14.327 32-32V368h16v112c0 17.673 14.327 32 32 32h16c17.673 0 32-14.327 32-32V213.254l94.627-94.627c12.497-12.497 12.497-32.757 0-45.254z"],circle:[512,512,[],"f111","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8z"],"circle-notch":[512,512,[],"f1ce","M288 39.056v16.659c0 10.804 7.281 20.159 17.686 23.066C383.204 100.434 440 171.518 440 256c0 101.689-82.295 184-184 184-101.689 0-184-82.295-184-184 0-84.47 56.786-155.564 134.312-177.219C216.719 75.874 224 66.517 224 55.712V39.064c0-15.709-14.834-27.153-30.046-23.234C86.603 43.482 7.394 141.206 8.003 257.332c.72 137.052 111.477 246.956 248.531 246.667C393.255 503.711 504 392.788 504 256c0-115.633-79.14-212.779-186.211-240.236C302.678 11.889 288 23.456 288 39.056z"],clipboard:[384,512,[],"f328","M384 112v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h80c0-35.29 28.71-64 64-64s64 28.71 64 64h80c26.51 0 48 21.49 48 48zM192 40c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24m96 114v-20a6 6 0 0 0-6-6H102a6 6 0 0 0-6 6v20a6 6 0 0 0 6 6h180a6 6 0 0 0 6-6z"],clock:[512,512,[],"f017","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm57.1 350.1L224.9 294c-3.1-2.3-4.9-5.9-4.9-9.7V116c0-6.6 5.4-12 12-12h48c6.6 0 12 5.4 12 12v137.7l63.5 46.2c5.4 3.9 6.5 11.4 2.6 16.8l-28.2 38.8c-3.9 5.3-11.4 6.5-16.8 2.6z"],clone:[512,512,[],"f24d","M464 0c26.51 0 48 21.49 48 48v288c0 26.51-21.49 48-48 48H176c-26.51 0-48-21.49-48-48V48c0-26.51 21.49-48 48-48h288M176 416c-44.112 0-80-35.888-80-80V128H48c-26.51 0-48 21.49-48 48v288c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48v-48H176z"],"closed-captioning":[512,512,[],"f20a","M464 64H48C21.5 64 0 85.5 0 112v288c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V112c0-26.5-21.5-48-48-48zM218.1 287.7c2.8-2.5 7.1-2.1 9.2.9l19.5 27.7c1.7 2.4 1.5 5.6-.5 7.7-53.6 56.8-172.8 32.1-172.8-67.9 0-97.3 121.7-119.5 172.5-70.1 2.1 2 2.5 3.2 1 5.7l-17.5 30.5c-1.9 3.1-6.2 4-9.1 1.7-40.8-32-94.6-14.9-94.6 31.2.1 48 51.1 70.5 92.3 32.6zm190.4 0c2.8-2.5 7.1-2.1 9.2.9l19.5 27.7c1.7 2.4 1.5 5.6-.5 7.7-53.5 56.9-172.7 32.1-172.7-67.9 0-97.3 121.7-119.5 172.5-70.1 2.1 2 2.5 3.2 1 5.7L420 222.2c-1.9 3.1-6.2 4-9.1 1.7-40.8-32-94.6-14.9-94.6 31.2 0 48 51 70.5 92.2 32.6z"],cloud:[640,512,[],"f0c2","M537.585 226.56C541.725 215.836 544 204.184 544 192c0-53.019-42.981-96-96-96-19.729 0-38.065 5.954-53.316 16.159C367.042 64.248 315.288 32 256 32c-88.366 0-160 71.634-160 160 0 2.728.07 5.439.204 8.133C40.171 219.845 0 273.227 0 336c0 79.529 64.471 144 144 144h368c70.692 0 128-57.308 128-128 0-61.93-43.983-113.586-102.415-125.44z"],"cloud-download-alt":[640,512,[],"f381","M640 352c0 70.692-57.308 128-128 128H144C64.471 480 0 415.529 0 336c0-62.773 40.171-116.155 96.204-135.867A163.68 163.68 0 0 1 96 192c0-88.366 71.634-160 160-160 59.288 0 111.042 32.248 138.684 80.159C409.935 101.954 428.271 96 448 96c53.019 0 96 42.981 96 96 0 12.184-2.275 23.836-6.415 34.56C596.017 238.414 640 290.07 640 352zm-246.627-64H328V176c0-8.837-7.164-16-16-16h-48c-8.836 0-16 7.163-16 16v112h-65.373c-14.254 0-21.393 17.234-11.314 27.314l105.373 105.373c6.248 6.248 16.379 6.248 22.627 0l105.373-105.373c10.08-10.08 2.941-27.314-11.313-27.314z"],"cloud-upload-alt":[640,512,[],"f382","M640 352c0 70.692-57.308 128-128 128H144C64.471 480 0 415.529 0 336c0-62.773 40.171-116.155 96.204-135.867A163.68 163.68 0 0 1 96 192c0-88.366 71.634-160 160-160 59.288 0 111.042 32.248 138.684 80.159C409.935 101.954 428.271 96 448 96c53.019 0 96 42.981 96 96 0 12.184-2.275 23.836-6.415 34.56C596.017 238.414 640 290.07 640 352zm-235.314-91.314L299.314 155.314c-6.248-6.248-16.379-6.248-22.627 0L171.314 260.686c-10.08 10.08-2.941 27.314 11.313 27.314H248v112c0 8.837 7.164 16 16 16h48c8.836 0 16-7.163 16-16V288h65.373c14.254 0 21.393-17.234 11.313-27.314z"],code:[640,512,[],"f121","M278.9 511.5l-61-17.7c-6.4-1.8-10-8.5-8.2-14.9L346.2 8.7c1.8-6.4 8.5-10 14.9-8.2l61 17.7c6.4 1.8 10 8.5 8.2 14.9L293.8 503.3c-1.9 6.4-8.5 10.1-14.9 8.2zm-114-112.2l43.5-46.4c4.6-4.9 4.3-12.7-.8-17.2L117 256l90.6-79.7c5.1-4.5 5.5-12.3.8-17.2l-43.5-46.4c-4.5-4.8-12.1-5.1-17-.5L3.8 247.2c-5.1 4.7-5.1 12.8 0 17.5l144.1 135.1c4.9 4.6 12.5 4.4 17-.5zm327.2.6l144.1-135.1c5.1-4.7 5.1-12.8 0-17.5L492.1 112.1c-4.8-4.5-12.4-4.3-17 .5L431.6 159c-4.6 4.9-4.3 12.7.8 17.2L523 256l-90.6 79.7c-5.1 4.5-5.5 12.3-.8 17.2l43.5 46.4c4.5 4.9 12.1 5.1 17 .6z"],"code-branch":[384,512,[],"f126","M384 144c0-44.2-35.8-80-80-80s-80 35.8-80 80c0 36.4 24.3 67.1 57.5 76.8-.6 16.1-4.2 28.5-11 36.9-15.4 19.2-49.3 22.4-85.2 25.7-28.2 2.6-57.4 5.4-81.3 16.9v-144c32.5-10.2 56-40.5 56-76.3 0-44.2-35.8-80-80-80S0 35.8 0 80c0 35.8 23.5 66.1 56 76.3v199.3C23.5 365.9 0 396.2 0 432c0 44.2 35.8 80 80 80s80-35.8 80-80c0-34-21.2-63.1-51.2-74.6 3.1-5.2 7.8-9.8 14.9-13.4 16.2-8.2 40.4-10.4 66.1-12.8 42.2-3.9 90-8.4 118.2-43.4 14-17.4 21.1-39.8 21.6-67.9 31.6-10.8 54.4-40.7 54.4-75.9zM80 64c8.8 0 16 7.2 16 16s-7.2 16-16 16-16-7.2-16-16 7.2-16 16-16zm0 384c-8.8 0-16-7.2-16-16s7.2-16 16-16 16 7.2 16 16-7.2 16-16 16zm224-320c8.8 0 16 7.2 16 16s-7.2 16-16 16-16-7.2-16-16 7.2-16 16-16z"],coffee:[640,512,[],"f0f4","M192 384h192c53 0 96-43 96-96h32c70.6 0 128-57.4 128-128S582.6 32 512 32H120c-13.3 0-24 10.7-24 24v232c0 53 43 96 96 96zM512 96c35.3 0 64 28.7 64 64s-28.7 64-64 64h-32V96h32zm47.7 384H48.3c-47.6 0-61-64-36-64h583.3c25 0 11.8 64-35.9 64z"],cog:[512,512,[],"f013","M444.788 291.1l42.616 24.599c4.867 2.809 7.126 8.618 5.459 13.985-11.07 35.642-29.97 67.842-54.689 94.586a12.016 12.016 0 0 1-14.832 2.254l-42.584-24.595a191.577 191.577 0 0 1-60.759 35.13v49.182a12.01 12.01 0 0 1-9.377 11.718c-34.956 7.85-72.499 8.256-109.219.007-5.49-1.233-9.403-6.096-9.403-11.723v-49.184a191.555 191.555 0 0 1-60.759-35.13l-42.584 24.595a12.016 12.016 0 0 1-14.832-2.254c-24.718-26.744-43.619-58.944-54.689-94.586-1.667-5.366.592-11.175 5.459-13.985L67.212 291.1a193.48 193.48 0 0 1 0-70.199l-42.616-24.599c-4.867-2.809-7.126-8.618-5.459-13.985 11.07-35.642 29.97-67.842 54.689-94.586a12.016 12.016 0 0 1 14.832-2.254l42.584 24.595a191.577 191.577 0 0 1 60.759-35.13V25.759a12.01 12.01 0 0 1 9.377-11.718c34.956-7.85 72.499-8.256 109.219-.007 5.49 1.233 9.403 6.096 9.403 11.723v49.184a191.555 191.555 0 0 1 60.759 35.13l42.584-24.595a12.016 12.016 0 0 1 14.832 2.254c24.718 26.744 43.619 58.944 54.689 94.586 1.667 5.366-.592 11.175-5.459 13.985L444.788 220.9a193.485 193.485 0 0 1 0 70.2zM336 256c0-44.112-35.888-80-80-80s-80 35.888-80 80 35.888 80 80 80 80-35.888 80-80z"],cogs:[640,512,[],"f085","M512.1 191l-8.2 14.3c-3 5.3-9.4 7.5-15.1 5.4-11.8-4.4-22.6-10.7-32.1-18.6-4.6-3.8-5.8-10.5-2.8-15.7l8.2-14.3c-6.9-8-12.3-17.3-15.9-27.4h-16.5c-6 0-11.2-4.3-12.2-10.3-2-12-2.1-24.6 0-37.1 1-6 6.2-10.4 12.2-10.4h16.5c3.6-10.1 9-19.4 15.9-27.4l-8.2-14.3c-3-5.2-1.9-11.9 2.8-15.7 9.5-7.9 20.4-14.2 32.1-18.6 5.7-2.1 12.1.1 15.1 5.4l8.2 14.3c10.5-1.9 21.2-1.9 31.7 0L552 6.3c3-5.3 9.4-7.5 15.1-5.4 11.8 4.4 22.6 10.7 32.1 18.6 4.6 3.8 5.8 10.5 2.8 15.7l-8.2 14.3c6.9 8 12.3 17.3 15.9 27.4h16.5c6 0 11.2 4.3 12.2 10.3 2 12 2.1 24.6 0 37.1-1 6-6.2 10.4-12.2 10.4h-16.5c-3.6 10.1-9 19.4-15.9 27.4l8.2 14.3c3 5.2 1.9 11.9-2.8 15.7-9.5 7.9-20.4 14.2-32.1 18.6-5.7 2.1-12.1-.1-15.1-5.4l-8.2-14.3c-10.4 1.9-21.2 1.9-31.7 0zm-10.5-58.8c38.5 29.6 82.4-14.3 52.8-52.8-38.5-29.7-82.4 14.3-52.8 52.8zM386.3 286.1l33.7 16.8c10.1 5.8 14.5 18.1 10.5 29.1-8.9 24.2-26.4 46.4-42.6 65.8-7.4 8.9-20.2 11.1-30.3 5.3l-29.1-16.8c-16 13.7-34.6 24.6-54.9 31.7v33.6c0 11.6-8.3 21.6-19.7 23.6-24.6 4.2-50.4 4.4-75.9 0-11.5-2-20-11.9-20-23.6V418c-20.3-7.2-38.9-18-54.9-31.7L74 403c-10 5.8-22.9 3.6-30.3-5.3-16.2-19.4-33.3-41.6-42.2-65.7-4-10.9.4-23.2 10.5-29.1l33.3-16.8c-3.9-20.9-3.9-42.4 0-63.4L12 205.8c-10.1-5.8-14.6-18.1-10.5-29 8.9-24.2 26-46.4 42.2-65.8 7.4-8.9 20.2-11.1 30.3-5.3l29.1 16.8c16-13.7 34.6-24.6 54.9-31.7V57.1c0-11.5 8.2-21.5 19.6-23.5 24.6-4.2 50.5-4.4 76-.1 11.5 2 20 11.9 20 23.6v33.6c20.3 7.2 38.9 18 54.9 31.7l29.1-16.8c10-5.8 22.9-3.6 30.3 5.3 16.2 19.4 33.2 41.6 42.1 65.8 4 10.9.1 23.2-10 29.1l-33.7 16.8c3.9 21 3.9 42.5 0 63.5zm-117.6 21.1c59.2-77-28.7-164.9-105.7-105.7-59.2 77 28.7 164.9 105.7 105.7zm243.4 182.7l-8.2 14.3c-3 5.3-9.4 7.5-15.1 5.4-11.8-4.4-22.6-10.7-32.1-18.6-4.6-3.8-5.8-10.5-2.8-15.7l8.2-14.3c-6.9-8-12.3-17.3-15.9-27.4h-16.5c-6 0-11.2-4.3-12.2-10.3-2-12-2.1-24.6 0-37.1 1-6 6.2-10.4 12.2-10.4h16.5c3.6-10.1 9-19.4 15.9-27.4l-8.2-14.3c-3-5.2-1.9-11.9 2.8-15.7 9.5-7.9 20.4-14.2 32.1-18.6 5.7-2.1 12.1.1 15.1 5.4l8.2 14.3c10.5-1.9 21.2-1.9 31.7 0l8.2-14.3c3-5.3 9.4-7.5 15.1-5.4 11.8 4.4 22.6 10.7 32.1 18.6 4.6 3.8 5.8 10.5 2.8 15.7l-8.2 14.3c6.9 8 12.3 17.3 15.9 27.4h16.5c6 0 11.2 4.3 12.2 10.3 2 12 2.1 24.6 0 37.1-1 6-6.2 10.4-12.2 10.4h-16.5c-3.6 10.1-9 19.4-15.9 27.4l8.2 14.3c3 5.2 1.9 11.9-2.8 15.7-9.5 7.9-20.4 14.2-32.1 18.6-5.7 2.1-12.1-.1-15.1-5.4l-8.2-14.3c-10.4 1.9-21.2 1.9-31.7 0zM501.6 431c38.5 29.6 82.4-14.3 52.8-52.8-38.5-29.6-82.4 14.3-52.8 52.8z"],columns:[512,512,[],"f0db","M464 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zM224 416H64V160h160v256zm224 0H288V160h160v256z"],comment:[576,512,[],"f075","M576 240c0 115-129 208-288 208-48.3 0-93.9-8.6-133.9-23.8-40.3 31.2-89.8 50.3-142.4 55.7-5.2.6-10.2-2.8-11.5-7.7-1.3-5 2.7-8.1 6.6-11.8 19.3-18.4 42.7-32.8 51.9-94.6C21.9 330.9 0 287.3 0 240 0 125.1 129 32 288 32s288 93.1 288 208z"],"comment-alt":[576,512,[],"f27a","M576 240c0 115-129 208-288 208-48.3 0-93.9-8.6-133.9-23.8-40.3 31.2-89.8 50.3-142.4 55.7-5.2.6-10.2-2.8-11.5-7.7-1.3-5 2.7-8.1 6.6-11.8 19.3-18.4 42.7-32.8 51.9-94.6C21.9 330.9 0 287.3 0 240 0 125.1 129 32 288 32s288 93.1 288 208zm-416-48c-26.5 0-48 21.5-48 48s21.5 48 48 48 48-21.5 48-48-21.5-48-48-48zm128 0c-26.5 0-48 21.5-48 48s21.5 48 48 48 48-21.5 48-48-21.5-48-48-48zm128 0c-26.5 0-48 21.5-48 48s21.5 48 48 48 48-21.5 48-48-21.5-48-48-48z"],comments:[576,512,[],"f086","M224 358.857c-37.599 0-73.027-6.763-104.143-18.7-31.375 24.549-69.869 39.508-110.764 43.796a8.632 8.632 0 0 1-.89.047c-3.736 0-7.111-2.498-8.017-6.061-.98-3.961 2.088-6.399 5.126-9.305 15.017-14.439 33.222-25.79 40.342-74.297C17.015 266.886 0 232.622 0 195.429 0 105.16 100.297 32 224 32s224 73.159 224 163.429c-.001 90.332-100.297 163.428-224 163.428zm347.067 107.174c-13.944-13.127-30.849-23.446-37.46-67.543 68.808-64.568 52.171-156.935-37.674-207.065.031 1.334.066 2.667.066 4.006 0 122.493-129.583 216.394-284.252 211.222 38.121 30.961 93.989 50.492 156.252 50.492 34.914 0 67.811-6.148 96.704-17 29.134 22.317 64.878 35.916 102.853 39.814 3.786.395 7.363-1.973 8.27-5.467.911-3.601-1.938-5.817-4.759-8.459z"],compass:[512,512,[],"f14e","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zM307.446 120.844l-102.642 97.779a23.997 23.997 0 0 0-6.772 11.729l-33.359 137.779c-5.68 23.459 22.777 39.318 39.88 23.024l102.64-97.779a23.99 23.99 0 0 0 6.772-11.729l33.359-137.779c5.618-23.198-22.591-39.493-39.878-23.024zM256 224c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32z"],compress:[448,512,[],"f066","M436 192H312c-13.3 0-24-10.7-24-24V44c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v84h84c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12zm-276-24V44c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v84H12c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h124c13.3 0 24-10.7 24-24zm0 300V344c0-13.3-10.7-24-24-24H12c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h84v84c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12zm192 0v-84h84c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12H312c-13.3 0-24 10.7-24 24v124c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12z"],copy:[448,512,[],"f0c5","M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"],copyright:[512,512,[],"f1f9","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm117.134 346.753c-1.592 1.867-39.776 45.731-109.851 45.731-84.692 0-144.484-63.26-144.484-145.567 0-81.303 62.004-143.401 143.762-143.401 66.957 0 101.965 37.315 103.422 38.904a12 12 0 0 1 1.238 14.623l-22.38 34.655c-4.049 6.267-12.774 7.351-18.234 2.295-.233-.214-26.529-23.88-61.88-23.88-46.116 0-73.916 33.575-73.916 76.082 0 39.602 25.514 79.692 74.277 79.692 38.697 0 65.28-28.338 65.544-28.625 5.132-5.565 14.059-5.033 18.508 1.053l24.547 33.572a12.001 12.001 0 0 1-.553 14.866z"],"credit-card":[576,512,[],"f09d","M0 432c0 26.5 21.5 48 48 48h480c26.5 0 48-21.5 48-48V256H0v176zm192-68c0-6.6 5.4-12 12-12h136c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H204c-6.6 0-12-5.4-12-12v-40zm-128 0c0-6.6 5.4-12 12-12h72c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H76c-6.6 0-12-5.4-12-12v-40zM576 80v48H0V80c0-26.5 21.5-48 48-48h480c26.5 0 48 21.5 48 48z"],crop:[512,512,[],"f125","M488 352h-40V109.3l57-57c9.4-9.4 9.4-24.6 0-33.9L493.7 7c-9.4-9.4-24.6-9.4-33.9 0l-57 57H160V24c0-13.3-10.7-24-24-24H88C74.7 0 64 10.7 64 24v40H24C10.7 64 0 74.7 0 88v48c0 13.3 10.7 24 24 24h40v264c0 13.3 10.7 24 24 24h264v40c0 13.3 10.7 24 24 24h48c13.3 0 24-10.7 24-24v-40h40c13.3 0 24-10.7 24-24v-48c0-13.3-10.7-24-24-24zM306.7 160L160 306.7V160h146.7zM205.3 352L352 205.3V352H205.3z"],crosshairs:[512,512,[],"f05b","M500 224h-30.364C455.724 130.325 381.675 56.276 288 42.364V12c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v30.364C130.325 56.276 56.276 130.325 42.364 224H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h30.364C56.276 381.675 130.325 455.724 224 469.636V500c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-30.364C381.675 455.724 455.724 381.675 469.636 288H500c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12zM288 404.634V364c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40.634C165.826 392.232 119.783 346.243 107.366 288H148c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-40.634C119.768 165.826 165.757 119.783 224 107.366V148c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40.634C346.174 119.768 392.217 165.757 404.634 224H364c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40.634C392.232 346.174 346.243 392.217 288 404.634zM288 256c0 17.673-14.327 32-32 32s-32-14.327-32-32c0-17.673 14.327-32 32-32s32 14.327 32 32z"],cube:[512,512,[],"f1b2","M239.1 6.3l-208 78c-18.7 7-31.1 25-31.1 45v225.1c0 18.2 10.3 34.8 26.5 42.9l208 104c13.5 6.8 29.4 6.8 42.9 0l208-104c16.3-8.1 26.5-24.8 26.5-42.9V129.3c0-20-12.4-37.9-31.1-44.9l-208-78C262 2.2 250 2.2 239.1 6.3zM256 68.4l192 72v1.1l-192 78-192-78v-1.1l192-72zm32 356V275.5l160-65v133.9l-160 80z"],cubes:[512,512,[],"f1b3","M488.6 250.2L392 214V105.5c0-15-9.3-28.4-23.4-33.7l-100-37.5c-8.1-3.1-17.1-3.1-25.3 0l-100 37.5c-14.1 5.3-23.4 18.7-23.4 33.7V214l-96.6 36.2C9.3 255.5 0 268.9 0 283.9V394c0 13.6 7.7 26.1 19.9 32.2l100 50c10.1 5.1 22.1 5.1 32.2 0l103.9-52 103.9 52c10.1 5.1 22.1 5.1 32.2 0l100-50c12.2-6.1 19.9-18.6 19.9-32.2V283.9c0-15-9.3-28.4-23.4-33.7zM358 214.8l-85 31.9v-68.2l85-37v73.3zM154 104.1l102-38.2 102 38.2v.6l-102 41.4-102-41.4v-.6zm84 291.1l-85 42.5v-79.1l85-38.8v75.4zm0-112l-102 41.4-102-41.4v-.6l102-38.2 102 38.2v.6zm240 112l-85 42.5v-79.1l85-38.8v75.4zm0-112l-102 41.4-102-41.4v-.6l102-38.2 102 38.2v.6z"],cut:[448,512,[],"f0c4","M444.485 422.426c4.689 4.689 4.684 12.287 0 16.971-32.804 32.804-85.991 32.804-118.795 0L210.176 323.883l-24.859 24.859C189.63 359.657 192 371.552 192 384c0 53.019-42.981 96-96 96S0 437.019 0 384s42.981-96 96-96c4.536 0 8.995.322 13.363.93l32.93-32.93-32.93-32.93c-4.368.608-8.827.93-13.363.93-53.019 0-96-42.981-96-96s42.981-96 96-96 96 42.981 96 96c0 12.448-2.37 24.343-6.682 35.258l24.859 24.859L325.69 72.603c32.804-32.804 85.991-32.804 118.795 0 4.684 4.684 4.689 12.282 0 16.971L278.059 256l166.426 166.426zM96 96c-17.645 0-32 14.355-32 32s14.355 32 32 32 32-14.355 32-32-14.355-32-32-32m0 256c-17.645 0-32 14.355-32 32s14.355 32 32 32 32-14.355 32-32-14.355-32-32-32m112-108c-6.627 0-12 5.373-12 12s5.373 12 12 12 12-5.373 12-12-5.373-12-12-12z"],database:[448,512,[],"f1c0","M448 73.143v45.714C448 159.143 347.667 192 224 192S0 159.143 0 118.857V73.143C0 32.857 100.333 0 224 0s224 32.857 224 73.143zM448 176v102.857C448 319.143 347.667 352 224 352S0 319.143 0 278.857V176c48.125 33.143 136.208 48.572 224 48.572S399.874 209.143 448 176zm0 160v102.857C448 479.143 347.667 512 224 512S0 479.143 0 438.857V336c48.125 33.143 136.208 48.572 224 48.572S399.874 369.143 448 336z"],deaf:[512,512,[],"f2a4","M216 260c0 15.464-12.536 28-28 28s-28-12.536-28-28c0-44.112 35.888-80 80-80s80 35.888 80 80c0 15.464-12.536 28-28 28s-28-12.536-28-28c0-13.234-10.767-24-24-24s-24 10.766-24 24zm24-176c-97.047 0-176 78.953-176 176 0 15.464 12.536 28 28 28s28-12.536 28-28c0-66.168 53.832-120 120-120s120 53.832 120 120c0 75.164-71.009 70.311-71.997 143.622L288 404c0 28.673-23.327 52-52 52-15.464 0-28 12.536-28 28s12.536 28 28 28c59.475 0 107.876-48.328 108-107.774.595-34.428 72-48.24 72-144.226 0-97.047-78.953-176-176-176zm268.485-52.201L480.2 3.515c-4.687-4.686-12.284-4.686-16.971 0L376.2 90.544c-4.686 4.686-4.686 12.284 0 16.971l28.285 28.285c4.686 4.686 12.284 4.686 16.97 0l87.03-87.029c4.687-4.688 4.687-12.286 0-16.972zM168.97 314.745c-4.686-4.686-12.284-4.686-16.97 0L3.515 463.23c-4.686 4.686-4.686 12.284 0 16.971L31.8 508.485c4.687 4.686 12.284 4.686 16.971 0L197.256 360c4.686-4.686 4.686-12.284 0-16.971l-28.286-28.284z"],desktop:[576,512,[],"f108","M528 0H48C21.5 0 0 21.5 0 48v320c0 26.5 21.5 48 48 48h192l-16 48h-72c-13.3 0-24 10.7-24 24s10.7 24 24 24h272c13.3 0 24-10.7 24-24s-10.7-24-24-24h-72l-16-48h192c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zm-16 352H64V64h448v288z"],"dollar-sign":[320,512,[],"f155","M113.411 169.375c0-23.337 21.536-38.417 54.865-38.417 26.726 0 54.116 12.263 76.461 28.333 5.88 4.229 14.13 2.354 17.575-4.017l23.552-43.549c2.649-4.898 1.596-10.991-2.575-14.68-24.281-21.477-59.135-34.09-91.289-37.806V12c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v49.832c-58.627 13.29-97.299 55.917-97.299 108.639 0 123.533 184.765 110.81 184.765 169.414 0 19.823-16.311 41.158-52.124 41.158-30.751 0-62.932-15.88-87.848-35.887-5.31-4.264-13.082-3.315-17.159 2.14l-30.389 40.667c-3.627 4.854-3.075 11.657 1.302 15.847 24.049 23.02 59.249 41.255 98.751 47.973V500c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-47.438c65.72-10.215 106.176-59.186 106.176-116.516.001-119.688-184.764-103.707-184.764-166.671z"],"dot-circle":[512,512,[],"f192","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm80 248c0 44.112-35.888 80-80 80s-80-35.888-80-80 35.888-80 80-80 80 35.888 80 80z"],download:[512,512,[],"f019","M216 0h80c13.3 0 24 10.7 24 24v168h87.7c17.8 0 26.7 21.5 14.1 34.1L269.7 378.3c-7.5 7.5-19.8 7.5-27.3 0L90.1 226.1c-12.6-12.6-3.7-34.1 14.1-34.1H192V24c0-13.3 10.7-24 24-24zm296 376v112c0 13.3-10.7 24-24 24H24c-13.3 0-24-10.7-24-24V376c0-13.3 10.7-24 24-24h146.7l49 49c20.1 20.1 52.5 20.1 72.6 0l49-49H488c13.3 0 24 10.7 24 24zm-124 88c0-11-9-20-20-20s-20 9-20 20 9 20 20 20 20-9 20-20zm64 0c0-11-9-20-20-20s-20 9-20 20 9 20 20 20 20-9 20-20z"],edit:[576,512,[],"f044","M402.6 83.2l90.2 90.2c3.8 3.8 3.8 10 0 13.8L274.4 405.6l-92.8 10.3c-12.4 1.4-22.9-9.1-21.5-21.5l10.3-92.8L388.8 83.2c3.8-3.8 10-3.8 13.8 0zm162-22.9l-48.8-48.8c-15.2-15.2-39.9-15.2-55.2 0l-35.4 35.4c-3.8 3.8-3.8 10 0 13.8l90.2 90.2c3.8 3.8 10 3.8 13.8 0l35.4-35.4c15.2-15.3 15.2-40 0-55.2zM384 346.2V448H64V128h229.8c3.2 0 6.2-1.3 8.5-3.5l40-40c7.6-7.6 2.2-20.5-8.5-20.5H48C21.5 64 0 85.5 0 112v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V306.2c0-10.7-12.9-16-20.5-8.5l-40 40c-2.2 2.3-3.5 5.3-3.5 8.5z"],eject:[448,512,[],"f052","M448 384v64c0 17.673-14.327 32-32 32H32c-17.673 0-32-14.327-32-32v-64c0-17.673 14.327-32 32-32h384c17.673 0 32 14.327 32 32zM48.053 320h351.886c41.651 0 63.581-49.674 35.383-80.435L259.383 47.558c-19.014-20.743-51.751-20.744-70.767 0L12.67 239.565C-15.475 270.268 6.324 320 48.053 320z"],"ellipsis-h":[512,512,[],"f141","M328 256c0 39.8-32.2 72-72 72s-72-32.2-72-72 32.2-72 72-72 72 32.2 72 72zm104-72c-39.8 0-72 32.2-72 72s32.2 72 72 72 72-32.2 72-72-32.2-72-72-72zm-352 0c-39.8 0-72 32.2-72 72s32.2 72 72 72 72-32.2 72-72-32.2-72-72-72z"],"ellipsis-v":[192,512,[],"f142","M96 184c39.8 0 72 32.2 72 72s-32.2 72-72 72-72-32.2-72-72 32.2-72 72-72zM24 80c0 39.8 32.2 72 72 72s72-32.2 72-72S135.8 8 96 8 24 40.2 24 80zm0 352c0 39.8 32.2 72 72 72s72-32.2 72-72-32.2-72-72-72-72 32.2-72 72z"],envelope:[512,512,[],"f0e0","M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"],"envelope-open":[512,512,[],"f2b6","M512 464c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V200.724a48 48 0 0 1 18.387-37.776c24.913-19.529 45.501-35.365 164.2-121.511C199.412 29.17 232.797-.347 256 .003c23.198-.354 56.596 29.172 73.413 41.433 118.687 86.137 139.303 101.995 164.2 121.512A48 48 0 0 1 512 200.724V464zm-65.666-196.605c-2.563-3.728-7.7-4.595-11.339-1.907-22.845 16.873-55.462 40.705-105.582 77.079-16.825 12.266-50.21 41.781-73.413 41.43-23.211.344-56.559-29.143-73.413-41.43-50.114-36.37-82.734-60.204-105.582-77.079-3.639-2.688-8.776-1.821-11.339 1.907l-9.072 13.196a7.998 7.998 0 0 0 1.839 10.967c22.887 16.899 55.454 40.69 105.303 76.868 20.274 14.781 56.524 47.813 92.264 47.573 35.724.242 71.961-32.771 92.263-47.573 49.85-36.179 82.418-59.97 105.303-76.868a7.998 7.998 0 0 0 1.839-10.967l-9.071-13.196z"],"envelope-square":[448,512,[],"f199","M400 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zM178.117 262.104C87.429 196.287 88.353 196.121 64 177.167V152c0-13.255 10.745-24 24-24h272c13.255 0 24 10.745 24 24v25.167c-24.371 18.969-23.434 19.124-114.117 84.938-10.5 7.655-31.392 26.12-45.883 25.894-14.503.218-35.367-18.227-45.883-25.895zM384 217.775V360c0 13.255-10.745 24-24 24H88c-13.255 0-24-10.745-24-24V217.775c13.958 10.794 33.329 25.236 95.303 70.214 14.162 10.341 37.975 32.145 64.694 32.01 26.887.134 51.037-22.041 64.72-32.025 61.958-44.965 81.325-59.406 95.283-70.199z"],eraser:[512,512,[],"f12d","M497.941 273.941c18.745-18.745 18.745-49.137 0-67.882l-160-160c-18.745-18.745-49.136-18.746-67.883 0l-256 256c-18.745 18.745-18.745 49.137 0 67.882l96 96A48.004 48.004 0 0 0 144 480h356c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12H355.883l142.058-142.059zm-302.627-62.627l137.373 137.373L265.373 416H150.628l-80-80 124.686-124.686z"],"euro-sign":[320,512,[],"f153","M310.706 413.765c-1.314-6.63-7.835-10.872-14.424-9.369-10.692 2.439-27.422 5.413-45.426 5.413-56.763 0-101.929-34.79-121.461-85.449h113.689a12 12 0 0 0 11.708-9.369l6.373-28.36c1.686-7.502-4.019-14.631-11.708-14.631H115.22c-1.21-14.328-1.414-28.287.137-42.245H261.95a12 12 0 0 0 11.723-9.434l6.512-29.755c1.638-7.484-4.061-14.566-11.723-14.566H130.184c20.633-44.991 62.69-75.03 117.619-75.03 14.486 0 28.564 2.25 37.851 4.145 6.216 1.268 12.347-2.498 14.002-8.623l11.991-44.368c1.822-6.741-2.465-13.616-9.326-14.917C290.217 34.912 270.71 32 249.635 32 152.451 32 74.03 92.252 45.075 176H12c-6.627 0-12 5.373-12 12v29.755c0 6.627 5.373 12 12 12h21.569c-1.009 13.607-1.181 29.287-.181 42.245H12c-6.627 0-12 5.373-12 12v28.36c0 6.627 5.373 12 12 12h30.114C67.139 414.692 145.264 480 249.635 480c26.301 0 48.562-4.544 61.101-7.788 6.167-1.595 10.027-7.708 8.788-13.957l-8.818-44.49z"],"exchange-alt":[512,512,[],"f362","M0 168v-16c0-13.255 10.745-24 24-24h360V80c0-21.367 25.899-32.042 40.971-16.971l80 80c9.372 9.373 9.372 24.569 0 33.941l-80 80C409.956 271.982 384 261.456 384 240v-48H24c-13.255 0-24-10.745-24-24zm488 152H128v-48c0-21.314-25.862-32.08-40.971-16.971l-80 80c-9.372 9.373-9.372 24.569 0 33.941l80 80C102.057 463.997 128 453.437 128 432v-48h360c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24z"],exclamation:[192,512,[],"f12a","M176 432c0 44.112-35.888 80-80 80s-80-35.888-80-80 35.888-80 80-80 80 35.888 80 80zM25.26 25.199l13.6 272C39.499 309.972 50.041 320 62.83 320h66.34c12.789 0 23.331-10.028 23.97-22.801l13.6-272C167.425 11.49 156.496 0 142.77 0H49.23C35.504 0 24.575 11.49 25.26 25.199z"],"exclamation-circle":[512,512,[],"f06a","M504 256c0 136.997-111.043 248-248 248S8 392.997 8 256C8 119.083 119.043 8 256 8s248 111.083 248 248zm-248 50c-25.405 0-46 20.595-46 46s20.595 46 46 46 46-20.595 46-46-20.595-46-46-46zm-43.673-165.346l7.418 136c.347 6.364 5.609 11.346 11.982 11.346h48.546c6.373 0 11.635-4.982 11.982-11.346l7.418-136c.375-6.874-5.098-12.654-11.982-12.654h-63.383c-6.884 0-12.356 5.78-11.981 12.654z"],"exclamation-triangle":[576,512,[],"f071","M569.517 440.013C587.975 472.007 564.806 512 527.94 512H48.054c-36.937 0-59.999-40.055-41.577-71.987L246.423 23.985c18.467-32.009 64.72-31.951 83.154 0l239.94 416.028zM288 354c-25.405 0-46 20.595-46 46s20.595 46 46 46 46-20.595 46-46-20.595-46-46-46zm-43.673-165.346l7.418 136c.347 6.364 5.609 11.346 11.982 11.346h48.546c6.373 0 11.635-4.982 11.982-11.346l7.418-136c.375-6.874-5.098-12.654-11.982-12.654h-63.383c-6.884 0-12.356 5.78-11.981 12.654z"],expand:[448,512,[],"f065","M0 180V56c0-13.3 10.7-24 24-24h124c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12H64v84c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12zM288 44v40c0 6.6 5.4 12 12 12h84v84c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12V56c0-13.3-10.7-24-24-24H300c-6.6 0-12 5.4-12 12zm148 276h-40c-6.6 0-12 5.4-12 12v84h-84c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h124c13.3 0 24-10.7 24-24V332c0-6.6-5.4-12-12-12zM160 468v-40c0-6.6-5.4-12-12-12H64v-84c0-6.6-5.4-12-12-12H12c-6.6 0-12 5.4-12 12v124c0 13.3 10.7 24 24 24h124c6.6 0 12-5.4 12-12z"],"expand-arrows-alt":[448,512,[],"f31e","M448.1 344v112c0 13.3-10.7 24-24 24h-112c-21.4 0-32.1-25.9-17-41l36.2-36.2L224 295.6 116.8 402.9 153 439c15.1 15.1 4.4 41-17 41H24c-13.3 0-24-10.7-24-24V344c0-21.4 25.9-32.1 41-17l36.2 36.2L184.5 256 77.2 148.7 41 185c-15.1 15.1-41 4.4-41-17V56c0-13.3 10.7-24 24-24h112c21.4 0 32.1 25.9 17 41l-36.2 36.2L224 216.4l107.3-107.3L295.1 73c-15.1-15.1-4.4-41 17-41h112c13.3 0 24 10.7 24 24v112c0 21.4-25.9 32.1-41 17l-36.2-36.2L263.6 256l107.3 107.3 36.2-36.2c15.1-15.2 41-4.5 41 16.9z"],"external-link-alt":[576,512,[],"f35d","M576 24v127.984c0 21.461-25.96 31.98-40.971 16.971l-35.707-35.709-243.523 243.523c-9.373 9.373-24.568 9.373-33.941 0l-22.627-22.627c-9.373-9.373-9.373-24.569 0-33.941L442.756 76.676l-35.703-35.705C391.982 25.9 402.656 0 424.024 0H552c13.255 0 24 10.745 24 24zM407.029 270.794l-16 16A23.999 23.999 0 0 0 384 303.765V448H64V128h264a24.003 24.003 0 0 0 16.97-7.029l16-16C376.089 89.851 365.381 64 344 64H48C21.49 64 0 85.49 0 112v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V287.764c0-21.382-25.852-32.09-40.971-16.97z"],"external-link-square-alt":[448,512,[],"f360","M448 80v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48zm-88 16H248.029c-21.313 0-32.08 25.861-16.971 40.971l31.984 31.987L67.515 364.485c-4.686 4.686-4.686 12.284 0 16.971l31.029 31.029c4.687 4.686 12.285 4.686 16.971 0l195.526-195.526 31.988 31.991C358.058 263.977 384 253.425 384 231.979V120c0-13.255-10.745-24-24-24z"],eye:[576,512,[],"f06e","M569.354 231.631C512.969 135.949 407.81 72 288 72 168.14 72 63.004 135.994 6.646 231.631a47.999 47.999 0 0 0 0 48.739C63.031 376.051 168.19 440 288 440c119.86 0 224.996-63.994 281.354-159.631a47.997 47.997 0 0 0 0-48.738zM288 392c-75.162 0-136-60.827-136-136 0-75.162 60.826-136 136-136 75.162 0 136 60.826 136 136 0 75.162-60.826 136-136 136zm104-136c0 57.438-46.562 104-104 104s-104-46.562-104-104c0-17.708 4.431-34.379 12.236-48.973l-.001.032c0 23.651 19.173 42.823 42.824 42.823s42.824-19.173 42.824-42.823c0-23.651-19.173-42.824-42.824-42.824l-.032.001C253.621 156.431 270.292 152 288 152c57.438 0 104 46.562 104 104z"],"eye-dropper":[512,512,[],"f1fb","M177.38 206.64L39.03 344.97A24.01 24.01 0 0 0 32 361.94V424L0 480l32 32 56-32h62.06c6.36 0 12.47-2.53 16.97-7.03l138.35-138.33-128-128zm225.552 30.47l16.952 16.95c9.37 9.37 9.37 24.57 0 33.94l-40.973 40.97c-9.292 9.312-24.506 9.434-33.94 0L183.028 167.03c-9.37-9.37-9.37-24.57 0-33.94L224 92.12c9.289-9.309 24.502-9.438 33.94 0l16.992 16.99 82.606-82.601c35.19-35.19 92.5-35.5 128 0 40.49 48.08 29.66 98.34 0 128l-82.606 82.601z"],"eye-slash":[576,512,[],"f070","M286.693 391.984l32.579 46.542A333.958 333.958 0 0 1 288 440C168.19 440 63.031 376.051 6.646 280.369a47.999 47.999 0 0 1 0-48.739c24.023-40.766 56.913-75.775 96.024-102.537l57.077 81.539C154.736 224.82 152 240.087 152 256c0 74.736 60.135 135.282 134.693 135.984zm282.661-111.615c-31.667 53.737-78.747 97.46-135.175 125.475l.011.015 41.47 59.2c7.6 10.86 4.96 25.82-5.9 33.42l-13.11 9.18c-10.86 7.6-25.82 4.96-33.42-5.9L100.34 46.94c-7.6-10.86-4.96-25.82 5.9-33.42l13.11-9.18c10.86-7.6 25.82-4.96 33.42 5.9l51.038 72.617C230.68 75.776 258.905 72 288 72c119.81 0 224.969 63.949 281.354 159.631a48.002 48.002 0 0 1 0 48.738zM424 256c0-75.174-60.838-136-136-136-17.939 0-35.056 3.473-50.729 9.772l19.299 27.058c25.869-8.171 55.044-6.163 80.4 7.41h-.03c-23.65 0-42.82 19.17-42.82 42.82 0 23.626 19.147 42.82 42.82 42.82 23.65 0 42.82-19.17 42.82-42.82v-.03c18.462 34.49 16.312 77.914-8.25 110.95v.01l19.314 27.061C411.496 321.2 424 290.074 424 256zM262.014 356.727l-77.53-110.757c-5.014 52.387 29.314 98.354 77.53 110.757z"],"fast-backward":[512,512,[],"f049","M0 436V76c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v151.9L235.5 71.4C256.1 54.3 288 68.6 288 96v131.9L459.5 71.4C480.1 54.3 512 68.6 512 96v320c0 27.4-31.9 41.7-52.5 24.6L288 285.3V416c0 27.4-31.9 41.7-52.5 24.6L64 285.3V436c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12z"],"fast-forward":[512,512,[],"f050","M512 76v360c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12V284.1L276.5 440.6c-20.6 17.2-52.5 2.8-52.5-24.6V284.1L52.5 440.6C31.9 457.8 0 443.4 0 416V96c0-27.4 31.9-41.7 52.5-24.6L224 226.8V96c0-27.4 31.9-41.7 52.5-24.6L448 226.8V76c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12z"],fax:[512,512,[],"f1ac","M128 144v320c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V144c0-26.51 21.49-48 48-48h32c26.51 0 48 21.49 48 48zm384 64v256c0 26.51-21.49 48-48 48H192c-26.51 0-48-21.49-48-48V40c0-22.091 17.909-40 40-40h207.432a39.996 39.996 0 0 1 28.284 11.716l48.569 48.569A39.999 39.999 0 0 1 480 88.568v74.174c18.641 6.591 32 24.36 32 45.258zm-320-16h240V96h-24c-13.203 0-24-10.797-24-24V48H192v144zm96 204c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40zm0-128c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40zm128 128c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40zm0-128c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40z"],female:[256,512,[],"f182","M128 0c35.346 0 64 28.654 64 64s-28.654 64-64 64c-35.346 0-64-28.654-64-64S92.654 0 128 0m119.283 354.179l-48-192A24 24 0 0 0 176 144h-11.36c-22.711 10.443-49.59 10.894-73.28 0H80a24 24 0 0 0-23.283 18.179l-48 192C4.935 369.305 16.383 384 32 384h56v104c0 13.255 10.745 24 24 24h32c13.255 0 24-10.745 24-24V384h56c15.591 0 27.071-14.671 23.283-29.821z"],"fighter-jet":[640,512,[],"f0fb","M544 224l-128-16-48-16h-24L227.158 44h39.509C278.333 44 288 41.375 288 38s-9.667-6-21.333-6H152v12h16v164h-48l-66.667-80H18.667L8 138.667V208h8v16h48v2.666l-64 8v42.667l64 8V288H16v16H8v69.333L18.667 384h34.667L120 304h48v164h-16v12h114.667c11.667 0 21.333-2.625 21.333-6s-9.667-6-21.333-6h-39.509L344 320h24l48-16 128-16c96-21.333 96-26.583 96-32 0-5.417 0-10.667-96-32z"],file:[384,512,[],"f15b","M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm160-14.1v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],"file-alt":[384,512,[],"f15c","M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm64 236c0 6.6-5.4 12-12 12H108c-6.6 0-12-5.4-12-12v-8c0-6.6 5.4-12 12-12h168c6.6 0 12 5.4 12 12v8zm0-64c0 6.6-5.4 12-12 12H108c-6.6 0-12-5.4-12-12v-8c0-6.6 5.4-12 12-12h168c6.6 0 12 5.4 12 12v8zm0-72v8c0 6.6-5.4 12-12 12H108c-6.6 0-12-5.4-12-12v-8c0-6.6 5.4-12 12-12h168c6.6 0 12 5.4 12 12zm96-114.1v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],"file-archive":[384,512,[],"f1c6","M224 136V0h-63.6v32h-32V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zM95.9 32h32v32h-32V32zm32.3 384c-33.2 0-58-30.4-51.4-62.9L96.4 256v-32h32v-32h-32v-32h32v-32h-32V96h32V64h32v32h-32v32h32v32h-32v32h32v32h-32v32h22.1c5.7 0 10.7 4.1 11.8 9.7l17.3 87.7c6.4 32.4-18.4 62.6-51.4 62.6zm32.7-53c0 14.9-14.5 27-32.4 27S96 378 96 363c0-14.9 14.5-27 32.4-27s32.5 12.1 32.5 27zM384 121.9v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],"file-audio":[384,512,[],"f1c7","M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm-64 268c0 10.7-12.9 16-20.5 8.5L104 376H76c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h28l35.5-36.5c7.6-7.6 20.5-2.2 20.5 8.5v136zm33.2-47.6c9.1-9.3 9.1-24.1 0-33.4-22.1-22.8 12.2-56.2 34.4-33.5 27.2 27.9 27.2 72.4 0 100.4-21.8 22.3-56.9-10.4-34.4-33.5zm86-117.1c54.4 55.9 54.4 144.8 0 200.8-21.8 22.4-57-10.3-34.4-33.5 36.2-37.2 36.3-96.5 0-133.8-22.1-22.8 12.3-56.3 34.4-33.5zM384 121.9v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],"file-code":[384,512,[],"f1c9","M384 121.941V128H256V0h6.059c6.365 0 12.47 2.529 16.971 7.029l97.941 97.941A24.005 24.005 0 0 1 384 121.941zM248 160c-13.2 0-24-10.8-24-24V0H24C10.745 0 0 10.745 0 24v464c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24V160H248zM123.206 400.505a5.4 5.4 0 0 1-7.633.246l-64.866-60.812a5.4 5.4 0 0 1 0-7.879l64.866-60.812a5.4 5.4 0 0 1 7.633.246l19.579 20.885a5.4 5.4 0 0 1-.372 7.747L101.65 336l40.763 35.874a5.4 5.4 0 0 1 .372 7.747l-19.579 20.884zm51.295 50.479l-27.453-7.97a5.402 5.402 0 0 1-3.681-6.692l61.44-211.626a5.402 5.402 0 0 1 6.692-3.681l27.452 7.97a5.4 5.4 0 0 1 3.68 6.692l-61.44 211.626a5.397 5.397 0 0 1-6.69 3.681zm160.792-111.045l-64.866 60.812a5.4 5.4 0 0 1-7.633-.246l-19.58-20.885a5.4 5.4 0 0 1 .372-7.747L284.35 336l-40.763-35.874a5.4 5.4 0 0 1-.372-7.747l19.58-20.885a5.4 5.4 0 0 1 7.633-.246l64.866 60.812a5.4 5.4 0 0 1-.001 7.879z"],"file-excel":[384,512,[],"f1c3","M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm60.1 106.5L224 336l60.1 93.5c5.1 8-.6 18.5-10.1 18.5h-34.9c-4.4 0-8.5-2.4-10.6-6.3C208.9 405.5 192 373 192 373c-6.4 14.8-10 20-36.6 68.8-2.1 3.9-6.1 6.3-10.5 6.3H110c-9.5 0-15.2-10.5-10.1-18.5l60.3-93.5-60.3-93.5c-5.2-8 .6-18.5 10.1-18.5h34.8c4.4 0 8.5 2.4 10.6 6.3 26.1 48.8 20 33.6 36.6 68.5 0 0 6.1-11.7 36.6-68.5 2.1-3.9 6.2-6.3 10.6-6.3H274c9.5-.1 15.2 10.4 10.1 18.4zM384 121.9v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],"file-image":[384,512,[],"f1c5","M384 121.941V128H256V0h6.059a24 24 0 0 1 16.97 7.029l97.941 97.941a24.002 24.002 0 0 1 7.03 16.971zM248 160c-13.2 0-24-10.8-24-24V0H24C10.745 0 0 10.745 0 24v464c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24V160H248zm-135.455 16c26.51 0 48 21.49 48 48s-21.49 48-48 48-48-21.49-48-48 21.491-48 48-48zm208 240h-256l.485-48.485L104.545 328c4.686-4.686 11.799-4.201 16.485.485L160.545 368 264.06 264.485c4.686-4.686 12.284-4.686 16.971 0L320.545 304v112z"],"file-pdf":[384,512,[],"f1c1","M181.9 256.1c-5-16-4.9-46.9-2-46.9 8.4 0 7.6 36.9 2 46.9zm-1.7 47.2c-7.7 20.2-17.3 43.3-28.4 62.7 18.3-7 39-17.2 62.9-21.9-12.7-9.6-24.9-23.4-34.5-40.8zM86.1 428.1c0 .8 13.2-5.4 34.9-40.2-6.7 6.3-29.1 24.5-34.9 40.2zM248 160h136v328c0 13.3-10.7 24-24 24H24c-13.3 0-24-10.7-24-24V24C0 10.7 10.7 0 24 0h200v136c0 13.2 10.8 24 24 24zm-8 171.8c-20-12.2-33.3-29-42.7-53.8 4.5-18.5 11.6-46.6 6.2-64.2-4.7-29.4-42.4-26.5-47.8-6.8-5 18.3-.4 44.1 8.1 77-11.6 27.6-28.7 64.6-40.8 85.8-.1 0-.1.1-.2.1-27.1 13.9-73.6 44.5-54.5 68 5.6 6.9 16 10 21.5 10 17.9 0 35.7-18 61.1-61.8 25.8-8.5 54.1-19.1 79-23.2 21.7 11.8 47.1 19.5 64 19.5 29.2 0 31.2-32 19.7-43.4-13.9-13.6-54.3-9.7-73.6-7.2zM377 105L279 7c-4.5-4.5-10.6-7-17-7h-6v128h128v-6.1c0-6.3-2.5-12.4-7-16.9zm-74.1 255.3c4.1-2.7-2.5-11.9-42.8-9 37.1 15.8 42.8 9 42.8 9z"],"file-powerpoint":[384,512,[],"f1c4","M193.7 271.2c8.8 0 15.5 2.7 20.3 8.1 9.6 10.9 9.8 32.7-.2 44.1-4.9 5.6-11.9 8.5-21.1 8.5h-26.9v-60.7h27.9zM377 105L279 7c-4.5-4.5-10.6-7-17-7h-6v128h128v-6.1c0-6.3-2.5-12.4-7-16.9zm-153 31V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm53 165.2c0 90.3-88.8 77.6-111.1 77.6V436c0 6.6-5.4 12-12 12h-30.8c-6.6 0-12-5.4-12-12V236.2c0-6.6 5.4-12 12-12h81c44.5 0 72.9 32.8 72.9 77z"],"file-video":[384,512,[],"f1c8","M384 121.941V128H256V0h6.059c6.365 0 12.47 2.529 16.971 7.029l97.941 97.941A24.005 24.005 0 0 1 384 121.941zM224 136V0H24C10.745 0 0 10.745 0 24v464c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24V160H248c-13.2 0-24-10.8-24-24zm96 144.016v111.963c0 21.445-25.943 31.998-40.971 16.971L224 353.941V392c0 13.255-10.745 24-24 24H88c-13.255 0-24-10.745-24-24V280c0-13.255 10.745-24 24-24h112c13.255 0 24 10.745 24 24v38.059l55.029-55.013c15.011-15.01 40.971-4.491 40.971 16.97z"],"file-word":[384,512,[],"f1c2","M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm57.1 120H305c7.7 0 13.4 7.1 11.7 14.7l-38 168c-1.2 5.5-6.1 9.3-11.7 9.3h-38c-5.5 0-10.3-3.8-11.6-9.1-25.8-103.5-20.8-81.2-25.6-110.5h-.5c-1.1 14.3-2.4 17.4-25.6 110.5-1.3 5.3-6.1 9.1-11.6 9.1H117c-5.6 0-10.5-3.9-11.7-9.4l-37.8-168c-1.7-7.5 4-14.6 11.7-14.6h24.5c5.7 0 10.7 4 11.8 9.7 15.6 78 20.1 109.5 21 122.2 1.6-10.2 7.3-32.7 29.4-122.7 1.3-5.4 6.1-9.1 11.7-9.1h29.1c5.6 0 10.4 3.8 11.7 9.2 24 100.4 28.8 124 29.6 129.4-.2-11.2-2.6-17.8 21.6-129.2 1-5.6 5.9-9.5 11.5-9.5zM384 121.9v6.1H256V0h6.1c6.4 0 12.5 2.5 17 7l97.9 98c4.5 4.5 7 10.6 7 16.9z"],film:[512,512,[],"f008","M488 64h-8v20c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12V64H96v20c0 6.6-5.4 12-12 12H44c-6.6 0-12-5.4-12-12V64h-8C10.7 64 0 74.7 0 88v336c0 13.3 10.7 24 24 24h8v-20c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v20h320v-20c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v20h8c13.3 0 24-10.7 24-24V88c0-13.3-10.7-24-24-24zM96 372c0 6.6-5.4 12-12 12H44c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40zm0-96c0 6.6-5.4 12-12 12H44c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40zm0-96c0 6.6-5.4 12-12 12H44c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40zm272 208c0 6.6-5.4 12-12 12H156c-6.6 0-12-5.4-12-12v-96c0-6.6 5.4-12 12-12h200c6.6 0 12 5.4 12 12v96zm0-168c0 6.6-5.4 12-12 12H156c-6.6 0-12-5.4-12-12v-96c0-6.6 5.4-12 12-12h200c6.6 0 12 5.4 12 12v96zm112 152c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40zm0-96c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40zm0-96c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v40z"],filter:[512,512,[],"f0b0","M487.976 0H24.028C2.71 0-8.047 25.866 7.058 40.971L192 225.941V432c0 7.831 3.821 15.17 10.237 19.662l80 55.98C298.02 518.69 320 507.493 320 487.98V225.941l184.947-184.97C520.021 25.896 509.338 0 487.976 0z"],fire:[384,512,[],"f06d","M216 23.858c0-23.802-30.653-32.765-44.149-13.038C48 191.851 224 200 224 288c0 35.629-29.114 64.458-64.85 63.994C123.98 351.538 96 322.22 96 287.046v-85.51c0-21.703-26.471-32.225-41.432-16.504C27.801 213.158 0 261.332 0 320c0 105.869 86.131 192 192 192s192-86.131 192-192c0-170.29-168-193.003-168-296.142z"],"fire-extinguisher":[448,512,[],"f134","M434.027 26.329l-168 28C254.693 56.218 256 67.8 256 72h-58.332C208.353 36.108 181.446 0 144 0c-39.435 0-66.368 39.676-52.228 76.203-52.039 13.051-75.381 54.213-90.049 90.884-4.923 12.307 1.063 26.274 13.37 31.197 12.317 4.926 26.279-1.075 31.196-13.37C75.058 112.99 106.964 120 168 120v27.076c-41.543 10.862-72 49.235-72 94.129V488c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24V240c0-44.731-30.596-82.312-72-92.97V120h40c0 2.974-1.703 15.716 10.027 17.671l168 28C441.342 166.89 448 161.25 448 153.834V38.166c0-7.416-6.658-13.056-13.973-11.837zM144 72c-8.822 0-16-7.178-16-16s7.178-16 16-16 16 7.178 16 16-7.178 16-16 16z"],flag:[512,512,[],"f024","M349.565 98.783C295.978 98.783 251.721 64 184.348 64c-24.955 0-47.309 4.384-68.045 12.013a55.947 55.947 0 0 0 3.586-23.562C118.117 24.015 94.806 1.206 66.338.048 34.345-1.254 8 24.296 8 56c0 19.026 9.497 35.825 24 45.945V488c0 13.255 10.745 24 24 24h16c13.255 0 24-10.745 24-24v-94.4c28.311-12.064 63.582-22.122 114.435-22.122 53.588 0 97.844 34.783 165.217 34.783 48.169 0 86.667-16.294 122.505-40.858C506.84 359.452 512 349.571 512 339.045v-243.1c0-23.393-24.269-38.87-45.485-29.016-34.338 15.948-76.454 31.854-116.95 31.854z"],"flag-checkered":[512,512,[],"f11e","M466.515 66.928C487.731 57.074 512 72.551 512 95.944v243.1c0 10.526-5.161 20.407-13.843 26.358-35.837 24.564-74.335 40.858-122.505 40.858-67.373 0-111.63-34.783-165.217-34.783-50.853 0-86.124 10.058-114.435 22.122V488c0 13.255-10.745 24-24 24H56c-13.255 0-24-10.745-24-24V101.945C17.497 91.825 8 75.026 8 56 8 24.296 34.345-1.254 66.338.048c28.468 1.158 51.779 23.968 53.551 52.404.52 8.342-.81 16.31-3.586 23.562C137.039 68.384 159.393 64 184.348 64c67.373 0 111.63 34.783 165.217 34.783 40.496 0 82.612-15.906 116.95-31.855zM96 134.63v70.49c29-10.67 51.18-17.83 73.6-20.91v-71.57c-23.5 2.17-40.44 9.79-73.6 21.99zm220.8 9.19c-26.417-4.672-49.886-13.979-73.6-21.34v67.42c24.175 6.706 47.566 16.444 73.6 22.31v-68.39zm-147.2 40.39v70.04c32.796-2.978 53.91-.635 73.6 3.8V189.9c-25.247-7.035-46.581-9.423-73.6-5.69zm73.6 142.23c26.338 4.652 49.732 13.927 73.6 21.34v-67.41c-24.277-6.746-47.54-16.45-73.6-22.32v68.39zM96 342.1c23.62-8.39 47.79-13.84 73.6-16.56v-71.29c-26.11 2.35-47.36 8.04-73.6 17.36v70.49zm368-221.6c-21.3 8.85-46.59 17.64-73.6 22.47v71.91c27.31-4.36 50.03-14.1 73.6-23.89V120.5zm0 209.96v-70.49c-22.19 14.2-48.78 22.61-73.6 26.02v71.58c25.07-2.38 48.49-11.04 73.6-27.11zM316.8 212.21v68.16c25.664 7.134 46.616 9.342 73.6 5.62v-71.11c-25.999 4.187-49.943 2.676-73.6-2.67z"],flask:[448,512,[],"f0c3","M437.2 403.5L320 215V64h8c13.3 0 24-10.7 24-24V24c0-13.3-10.7-24-24-24H120c-13.3 0-24 10.7-24 24v16c0 13.3 10.7 24 24 24h8v151L10.8 403.5C-18.5 450.6 15.3 512 70.9 512h306.2c55.7 0 89.4-61.5 60.1-108.5zM137.9 320l48.2-77.6c3.7-5.2 5.8-11.6 5.8-18.4V64h64v160c0 6.9 2.2 13.2 5.8 18.4l48.2 77.6h-172z"],folder:[512,512,[],"f07b","M464 128H272l-64-64H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V176c0-26.51-21.49-48-48-48z"],"folder-open":[576,512,[],"f07c","M572.694 292.093L500.27 416.248A63.997 63.997 0 0 1 444.989 448H45.025c-18.523 0-30.064-20.093-20.731-36.093l72.424-124.155A64 64 0 0 1 152 256h399.964c18.523 0 30.064 20.093 20.73 36.093zM152 224h328v-48c0-26.51-21.49-48-48-48H272l-64-64H48C21.49 64 0 85.49 0 112v278.046l69.077-118.418C86.214 242.25 117.989 224 152 224z"],font:[448,512,[],"f031","M152 416h-24.013l26.586-80.782H292.8L319.386 416H296c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h136c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16h-26.739L275.495 42.746A16 16 0 0 0 260.382 32h-72.766a16 16 0 0 0-15.113 10.746L42.739 416H16c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h136c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16zm64.353-271.778c4.348-15.216 6.61-28.156 7.586-34.644.839 6.521 2.939 19.476 7.727 34.706l41.335 124.006h-98.619l41.971-124.068z"],forward:[512,512,[],"f04e","M500.5 231.4l-192-160C287.9 54.3 256 68.6 256 96v320c0 27.4 31.9 41.8 52.5 24.6l192-160c15.3-12.8 15.3-36.4 0-49.2zm-256 0l-192-160C31.9 54.3 0 68.6 0 96v320c0 27.4 31.9 41.8 52.5 24.6l192-160c15.3-12.8 15.3-36.4 0-49.2z"],frown:[512,512,[],"f119","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-396-64c0 37.497 30.503 68 68 68s68-30.503 68-68-30.503-68-68-68-68 30.503-68 68zm160.5 0c0 37.221 30.279 67.5 67.5 67.5s67.5-30.279 67.5-67.5-30.279-67.5-67.5-67.5-67.5 30.279-67.5 67.5zm67.5-48a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 288 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48zm-160 0a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 128 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48zm192.551 212.66c-59.128-91.455-165.846-91.594-225.064 0-11.502 17.79 15.383 35.148 26.873 17.374 46.626-72.118 124.862-71.855 171.318 0 11.328 17.524 38.548.684 26.873-17.374z"],futbol:[512,512,[],"f1e3","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-48 0l-.003-.282-26.064 22.741-62.679-58.5 16.454-84.355 34.303 3.072c-24.889-34.216-60.004-60.089-100.709-73.141l13.651 31.939L256 139l-74.953-41.525 13.651-31.939c-40.631 13.028-75.78 38.87-100.709 73.141l34.565-3.073 16.192 84.355-62.678 58.5-26.064-22.741-.003.282c0 43.015 13.497 83.952 38.472 117.991l7.704-33.897 85.138 10.447 36.301 77.826-29.902 17.786c40.202 13.122 84.29 13.148 124.572 0l-29.902-17.786 36.301-77.826 85.138-10.447 7.704 33.897C442.503 339.952 456 299.015 456 256zm-248.102 69.571l-29.894-91.312L256 177.732l77.996 56.527-29.622 91.312h-96.476z"],gamepad:[640,512,[],"f11b","M480 96H160C71.6 96 0 167.6 0 256s71.6 160 160 160c44.8 0 85.2-18.4 114.2-48h91.5c29 29.6 69.5 48 114.2 48 88.4 0 160-71.6 160-160S568.4 96 480 96zM256 276c0 6.6-5.4 12-12 12h-52v52c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12v-52H76c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h52v-52c0-6.6 5.4-12 12-12h40c6.6 0 12 5.4 12 12v52h52c6.6 0 12 5.4 12 12v40zm184 68c-26.5 0-48-21.5-48-48s21.5-48 48-48 48 21.5 48 48-21.5 48-48 48zm80-80c-26.5 0-48-21.5-48-48s21.5-48 48-48 48 21.5 48 48-21.5 48-48 48z"],gavel:[512,512,[],"f0e3","M504.971 199.362l-22.627-22.627c-9.373-9.373-24.569-9.373-33.941 0l-5.657 5.657L329.608 69.255l5.657-5.657c9.373-9.373 9.373-24.569 0-33.941L312.638 7.029c-9.373-9.373-24.569-9.373-33.941 0L154.246 131.48c-9.373 9.373-9.373 24.569 0 33.941l22.627 22.627c9.373 9.373 24.569 9.373 33.941 0l5.657-5.657 39.598 39.598-81.04 81.04-5.657-5.657c-12.497-12.497-32.758-12.497-45.255 0L9.373 412.118c-12.497 12.497-12.497 32.758 0 45.255l45.255 45.255c12.497 12.497 32.758 12.497 45.255 0l114.745-114.745c12.497-12.497 12.497-32.758 0-45.255l-5.657-5.657 81.04-81.04 39.598 39.598-5.657 5.657c-9.373 9.373-9.373 24.569 0 33.941l22.627 22.627c9.373 9.373 24.569 9.373 33.941 0l124.451-124.451c9.372-9.372 9.372-24.568 0-33.941z"],gem:[576,512,[],"f3a5","M485.5 0L576 160H474.9L405.7 0h79.8zm-128 0l69.2 160H149.3L218.5 0h139zm-267 0h79.8l-69.2 160H0L90.5 0zM0 192h100.7l123 251.7c1.5 3.1-2.7 5.9-5 3.3L0 192zm148.2 0h279.6l-137 318.2c-1 2.4-4.5 2.4-5.5 0L148.2 192zm204.1 251.7l123-251.7H576L357.3 446.9c-2.3 2.7-6.5-.1-5-3.2z"],genderless:[288,512,[],"f22d","M144 176c44.1 0 80 35.9 80 80s-35.9 80-80 80-80-35.9-80-80 35.9-80 80-80m0-64C64.5 112 0 176.5 0 256s64.5 144 144 144 144-64.5 144-144-64.5-144-144-144z"],gift:[512,512,[],"f06b","M488 192h-64.512C438.72 175.003 448 152.566 448 128c0-52.935-43.065-96-96-96-41.997 0-68.742 20.693-95.992 54.15C226.671 50.192 199.613 32 160 32c-52.935 0-96 43.065-96 96 0 24.566 9.28 47.003 24.512 64H24c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24h8v112c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V320h8c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24zm-208-32c24-56 55.324-64 72-64 17.645 0 32 14.355 32 32s-14.355 32-32 32h-72zM160 96c16.676 0 48 8 72 64h-72c-17.645 0-32-14.355-32-32s14.355-32 32-32zm48 128h96v184c0 13.255-10.745 24-24 24h-48c-13.255 0-24-10.745-24-24V224z"],"glass-martini":[512,512,[],"f000","M507.3 27.3c10-10 2.9-27.3-11.3-27.3H16C1.8 0-5.4 17.2 4.7 27.3L216 238.6V472h-92c-15.5 0-28 12.5-28 28 0 6.6 5.4 12 12 12h296c6.6 0 12-5.4 12-12 0-15.5-12.5-28-28-28h-92V238.6L507.3 27.3z"],globe:[512,512,[],"f0ac","M364.215 192h131.43c5.439 20.419 8.354 41.868 8.354 64s-2.915 43.581-8.354 64h-131.43c5.154-43.049 4.939-86.746 0-128zM185.214 352c10.678 53.68 33.173 112.514 70.125 151.992.221.001.44.008.661.008s.44-.008.661-.008c37.012-39.543 59.467-98.414 70.125-151.992H185.214zm174.13-192h125.385C452.802 84.024 384.128 27.305 300.95 12.075c30.238 43.12 48.821 96.332 58.394 147.925zm-27.35 32H180.006c-5.339 41.914-5.345 86.037 0 128h151.989c5.339-41.915 5.345-86.037-.001-128zM152.656 352H27.271c31.926 75.976 100.6 132.695 183.778 147.925-30.246-43.136-48.823-96.35-58.393-147.925zm206.688 0c-9.575 51.605-28.163 104.814-58.394 147.925 83.178-15.23 151.852-71.949 183.778-147.925H359.344zm-32.558-192c-10.678-53.68-33.174-112.514-70.125-151.992-.221 0-.44-.008-.661-.008s-.44.008-.661.008C218.327 47.551 195.872 106.422 185.214 160h141.572zM16.355 192C10.915 212.419 8 233.868 8 256s2.915 43.581 8.355 64h131.43c-4.939-41.254-5.154-84.951 0-128H16.355zm136.301-32c9.575-51.602 28.161-104.81 58.394-147.925C127.872 27.305 59.198 84.024 27.271 160h125.385z"],"graduation-cap":[640,512,[],"f19d","M622.884 199.005l-275.817 85.1a96 96 0 0 1-54.134 0L92.398 222.232c-8.564 11.438-11.018 23.05-11.918 38.335C89.778 266.165 96 276.355 96 288c0 11.952-6.557 22.366-16.265 27.861l16.197 123.096c.63 4.786-3.1 9.043-7.932 9.043H40c-4.828 0-8.562-4.253-7.932-9.044L48.265 315.86C38.557 310.366 32 299.952 32 288c0-12.034 6.646-22.511 16.465-27.976.947-17.951 3.974-33.231 12.152-47.597l-43.502-13.422c-22.876-6.801-22.766-39.241 0-46.01l275.817-85.1a96 96 0 0 1 54.134 0l275.817 85.1c22.877 6.801 22.767 39.241.001 46.01zM356.503 314.682l-.207.064-.207.061a127.998 127.998 0 0 1-72.177 0l-.207-.061-.207-.064-150.914-46.57L120 352c0 35.346 89.543 64 200 64s200-28.654 200-64l-12.583-83.888-150.914 46.57z"],"h-square":[448,512,[],"f0fd","M448 80v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48zm-112 48h-32c-8.837 0-16 7.163-16 16v80H160v-80c0-8.837-7.163-16-16-16h-32c-8.837 0-16 7.163-16 16v224c0 8.837 7.163 16 16 16h32c8.837 0 16-7.163 16-16v-80h128v80c0 8.837 7.163 16 16 16h32c8.837 0 16-7.163 16-16V144c0-8.837-7.163-16-16-16z"],"hand-lizard":[576,512,[],"f258","M384 480h192V363.778a95.998 95.998 0 0 0-14.833-51.263L398.127 54.368A48 48 0 0 0 357.544 32H24C10.745 32 0 42.745 0 56v16c0 30.928 25.072 56 56 56h229.981c12.844 0 21.556 13.067 16.615 24.923l-21.41 51.385A32 32 0 0 1 251.648 224H128c-35.346 0-64 28.654-64 64v8c0 13.255 10.745 24 24 24h147.406a47.995 47.995 0 0 1 25.692 7.455l111.748 70.811A24.001 24.001 0 0 1 384 418.539V480z"],"hand-paper":[448,512,[],"f256","M408.781 128.007C386.356 127.578 368 146.36 368 168.79V256h-8V79.79c0-22.43-18.356-41.212-40.781-40.783C297.488 39.423 280 57.169 280 79v177h-8V40.79C272 18.36 253.644-.422 231.219.007 209.488.423 192 18.169 192 40v216h-8V80.79c0-22.43-18.356-41.212-40.781-40.783C121.488 40.423 104 58.169 104 80v235.992l-31.648-43.519c-12.993-17.866-38.009-21.817-55.877-8.823-17.865 12.994-21.815 38.01-8.822 55.877l125.601 172.705A48 48 0 0 0 172.073 512h197.59c22.274 0 41.622-15.324 46.724-37.006l26.508-112.66a192.011 192.011 0 0 0 5.104-43.975V168c.001-21.831-17.487-39.577-39.218-39.993z"],"hand-peace":[448,512,[],"f25b","M408 216c-22.092 0-40 17.909-40 40h-8v-32c0-22.091-17.908-40-40-40s-40 17.909-40 40v32h-8V48c0-26.51-21.49-48-48-48s-48 21.49-48 48v208h-13.572L92.688 78.449C82.994 53.774 55.134 41.63 30.461 51.324 5.787 61.017-6.356 88.877 3.337 113.551l74.765 190.342-31.09 24.872c-15.381 12.306-19.515 33.978-9.741 51.081l64 112A39.998 39.998 0 0 0 136 512h240c18.562 0 34.686-12.77 38.937-30.838l32-136A39.97 39.97 0 0 0 448 336v-80c0-22.091-17.908-40-40-40z"],"hand-point-down":[384,512,[],"f0a7","M91.826 467.2V317.966c-8.248 5.841-16.558 10.57-24.918 14.153C35.098 345.752-.014 322.222 0 288c.008-18.616 10.897-32.203 29.092-40 28.286-12.122 64.329-78.648 77.323-107.534 7.956-17.857 25.479-28.453 43.845-28.464l.001-.002h171.526c11.812 0 21.897 8.596 23.703 20.269 7.25 46.837 38.483 61.76 38.315 123.731-.007 2.724.195 13.254.195 16 0 50.654-22.122 81.574-71.263 72.6-9.297 18.597-39.486 30.738-62.315 16.45-21.177 24.645-53.896 22.639-70.944 6.299V467.2c0 24.15-20.201 44.8-43.826 44.8-23.283 0-43.826-21.35-43.826-44.8zM112 72V24c0-13.255 10.745-24 24-24h192c13.255 0 24 10.745 24 24v48c0 13.255-10.745 24-24 24H136c-13.255 0-24-10.745-24-24zm212-24c0-11.046-8.954-20-20-20s-20 8.954-20 20 8.954 20 20 20 20-8.954 20-20z"],"hand-point-left":[512,512,[],"f0a5","M44.8 155.826h149.234c-5.841-8.248-10.57-16.558-14.153-24.918C166.248 99.098 189.778 63.986 224 64c18.616.008 32.203 10.897 40 29.092 12.122 28.286 78.648 64.329 107.534 77.323 17.857 7.956 28.453 25.479 28.464 43.845l.002.001v171.526c0 11.812-8.596 21.897-20.269 23.703-46.837 7.25-61.76 38.483-123.731 38.315-2.724-.007-13.254.195-16 .195-50.654 0-81.574-22.122-72.6-71.263-18.597-9.297-30.738-39.486-16.45-62.315-24.645-21.177-22.639-53.896-6.299-70.944H44.8c-24.15 0-44.8-20.201-44.8-43.826 0-23.283 21.35-43.826 44.8-43.826zM440 176h48c13.255 0 24 10.745 24 24v192c0 13.255-10.745 24-24 24h-48c-13.255 0-24-10.745-24-24V200c0-13.255 10.745-24 24-24zm24 212c11.046 0 20-8.954 20-20s-8.954-20-20-20-20 8.954-20 20 8.954 20 20 20z"],"hand-point-right":[512,512,[],"f0a4","M512 199.652c0 23.625-20.65 43.826-44.8 43.826h-99.851c16.34 17.048 18.346 49.766-6.299 70.944 14.288 22.829 2.147 53.017-16.45 62.315C353.574 425.878 322.654 448 272 448c-2.746 0-13.276-.203-16-.195-61.971.168-76.894-31.065-123.731-38.315C120.596 407.683 112 397.599 112 385.786V214.261l.002-.001c.011-18.366 10.607-35.889 28.464-43.845 28.886-12.994 95.413-49.038 107.534-77.323 7.797-18.194 21.384-29.084 40-29.092 34.222-.014 57.752 35.098 44.119 66.908-3.583 8.359-8.312 16.67-14.153 24.918H467.2c23.45 0 44.8 20.543 44.8 43.826zM96 200v192c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V200c0-13.255 10.745-24 24-24h48c13.255 0 24 10.745 24 24zM68 368c0-11.046-8.954-20-20-20s-20 8.954-20 20 8.954 20 20 20 20-8.954 20-20z"],"hand-point-up":[384,512,[],"f0a6","M135.652 0c23.625 0 43.826 20.65 43.826 44.8v99.851c17.048-16.34 49.766-18.346 70.944 6.299 22.829-14.288 53.017-2.147 62.315 16.45C361.878 158.426 384 189.346 384 240c0 2.746-.203 13.276-.195 16 .168 61.971-31.065 76.894-38.315 123.731C343.683 391.404 333.599 400 321.786 400H150.261l-.001-.002c-18.366-.011-35.889-10.607-43.845-28.464C93.421 342.648 57.377 276.122 29.092 264 10.897 256.203.008 242.616 0 224c-.014-34.222 35.098-57.752 66.908-44.119 8.359 3.583 16.67 8.312 24.918 14.153V44.8c0-23.45 20.543-44.8 43.826-44.8zM136 416h192c13.255 0 24 10.745 24 24v48c0 13.255-10.745 24-24 24H136c-13.255 0-24-10.745-24-24v-48c0-13.255 10.745-24 24-24zm168 28c-11.046 0-20 8.954-20 20s8.954 20 20 20 20-8.954 20-20-8.954-20-20-20z"],"hand-pointer":[448,512,[],"f25a","M448 240v96c0 3.084-.356 6.159-1.063 9.162l-32 136C410.686 499.23 394.562 512 376 512H168a40.004 40.004 0 0 1-32.35-16.473l-127.997-176c-12.993-17.866-9.043-42.883 8.822-55.876 17.867-12.994 42.884-9.043 55.877 8.823L104 315.992V40c0-22.091 17.908-40 40-40s40 17.909 40 40v200h8v-40c0-22.091 17.908-40 40-40s40 17.909 40 40v40h8v-24c0-22.091 17.908-40 40-40s40 17.909 40 40v24h8c0-22.091 17.908-40 40-40s40 17.909 40 40zm-256 80h-8v96h8v-96zm88 0h-8v96h8v-96zm88 0h-8v96h8v-96z"],"hand-rock":[512,512,[],"f255","M512 128.79c0-26.322-20.861-48.344-47.18-48.783C437.935 79.558 416 101.217 416 128h-8V96.79c0-26.322-20.861-48.344-47.18-48.783C333.935 47.558 312 69.217 312 96v32h-8V80.79c0-26.322-20.861-48.344-47.18-48.783C229.935 31.558 208 53.217 208 80v48h-8V96.79c0-26.322-20.861-48.344-47.18-48.783C125.935 47.558 104 69.217 104 96v136l-8-7.111V176.79c0-26.322-20.861-48.344-47.18-48.783C21.935 127.558 0 149.217 0 176v66.445a95.998 95.998 0 0 0 32.221 71.751l111.668 99.261A47.999 47.999 0 0 1 160 449.333V456c0 13.255 10.745 24 24 24h240c13.255 0 24-10.745 24-24v-2.921a96.01 96.01 0 0 1 7.523-37.254l48.954-116.265A96.002 96.002 0 0 0 512 262.306V128.79z"],"hand-scissors":[512,512,[],"f257","M216 440c0-22.092 17.909-40 40-40v-8h-32c-22.091 0-40-17.908-40-40s17.909-40 40-40h32v-8H48c-26.51 0-48-21.49-48-48s21.49-48 48-48h208v-13.572l-177.551-69.74c-24.674-9.694-36.818-37.555-27.125-62.228 9.693-24.674 37.554-36.817 62.228-27.124l190.342 74.765 24.872-31.09c12.306-15.381 33.978-19.515 51.081-9.741l112 64A40.002 40.002 0 0 1 512 168v240c0 18.562-12.77 34.686-30.838 38.937l-136 32A39.982 39.982 0 0 1 336 480h-80c-22.091 0-40-17.908-40-40z"],"hand-spock":[512,512,[],"f259","M10.872 316.585c15.139-16.086 40.454-16.854 56.543-1.713L128 371.893v-79.405L88.995 120.865c-4.896-21.542 8.598-42.974 30.14-47.87 21.549-4.894 42.975 8.599 47.87 30.141L201.747 256h9.833L164.016 48.966c-4.946-21.531 8.498-42.994 30.028-47.94 21.532-4.95 42.994 8.498 47.94 30.028L293.664 256h15.105l48.425-193.702c5.357-21.432 27.075-34.462 48.507-29.104 21.432 5.358 34.463 27.075 29.104 48.507L391.231 256h11.08l30.768-129.265c5.117-21.491 26.685-34.768 48.177-29.647 21.491 5.117 34.765 26.686 29.647 48.177l-36.292 152.467A96.024 96.024 0 0 0 472 319.967v42.102a96.002 96.002 0 0 1-3.96 27.287l-26.174 88.287C435.825 498.022 417.101 512 395.846 512H179.172a48.002 48.002 0 0 1-32.898-13.046L12.585 373.128c-16.087-15.141-16.853-40.456-1.713-56.543z"],handshake:[640,512,[],"f2b5","M72 112H24c-13.255 0-24 10.745-24 24v208c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24V136c0-13.255-10.745-24-24-24zM48 340c-11.046 0-20-8.954-20-20s8.954-20 20-20 20 8.954 20 20-8.954 20-20 20zm568-228h-48c-13.255 0-24 10.745-24 24v208c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24V136c0-13.255-10.745-24-24-24zm-24 228c-11.046 0-20-8.954-20-20s8.954-20 20-20 20 8.954 20 20-8.954 20-20 20zM485.94 92.67L528 140.74V320h-19.17c.56-14.96-4.38-28.98-14-39.71l-80.92-98.91c2.93-3.2 2.76-8.16-.38-11.16-2.82-2.7-7.08-2.92-10.14-.76-.42.3-60.35 62.93-60.35 62.93l-.2.21c-23.904 26.905-66.127 26.204-89.15-1.42-15.48-18.58-15.29-45.39.45-63.76l66.57-77.67C334.304 73.88 354.534 64 376.7 64h46.05a83.98 83.98 0 0 1 63.19 28.67zm-3.37 197.92c15.46 16.78 12.59 43.83-2.37 57.75-17.711 16.462-42.433 13.004-45.93 9.2 1.653 15.658-21.389 47.249-56.42 44.68-6.325 21.185-32.298 38.909-59.18 29.61-10.22 10.21-25.82 14.97-39.81 14.97-28.69 0-54.92-11.99-72.58-30.8L112 320V135.52l61.36-50.57A71.52 71.52 0 0 1 223.93 64h37.42c16.73 0 32.68 6.84 44.21 18.85l-63.57 74.16c-20.84 24.31-21.09 59.81-.59 84.42 29.375 35.247 83.007 35.853 113.31 1.92L402.82 193l79.75 97.59z"],hashtag:[448,512,[],"f292","M440.667 182.109l7.143-40c1.313-7.355-4.342-14.109-11.813-14.109h-74.81l14.623-81.891C377.123 38.754 371.468 32 363.997 32h-40.632a12 12 0 0 0-11.813 9.891L296.175 128H197.54l14.623-81.891C213.477 38.754 207.822 32 200.35 32h-40.632a12 12 0 0 0-11.813 9.891L132.528 128H53.432a12 12 0 0 0-11.813 9.891l-7.143 40C33.163 185.246 38.818 192 46.289 192h74.81L98.242 320H19.146a12 12 0 0 0-11.813 9.891l-7.143 40C-1.123 377.246 4.532 384 12.003 384h74.81L72.19 465.891C70.877 473.246 76.532 480 84.003 480h40.632a12 12 0 0 0 11.813-9.891L151.826 384h98.634l-14.623 81.891C234.523 473.246 240.178 480 247.65 480h40.632a12 12 0 0 0 11.813-9.891L315.472 384h79.096a12 12 0 0 0 11.813-9.891l7.143-40c1.313-7.355-4.342-14.109-11.813-14.109h-74.81l22.857-128h79.096a12 12 0 0 0 11.813-9.891zM261.889 320h-98.634l22.857-128h98.634l-22.857 128z"],hdd:[576,512,[],"f0a0","M576 304v96c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48v-96c0-26.51 21.49-48 48-48h480c26.51 0 48 21.49 48 48zm-48-80a79.557 79.557 0 0 1 30.777 6.165L462.25 85.374A48.003 48.003 0 0 0 422.311 64H153.689a48 48 0 0 0-39.938 21.374L17.223 230.165A79.557 79.557 0 0 1 48 224h480zm-48 96c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32zm-96 0c-17.673 0-32 14.327-32 32s14.327 32 32 32 32-14.327 32-32-14.327-32-32-32z"],heading:[512,512,[],"f1dc","M496 80V48c0-8.837-7.163-16-16-16H320c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h37.621v128H154.379V96H192c8.837 0 16-7.163 16-16V48c0-8.837-7.163-16-16-16H32c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h37.275v320H32c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h160c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16h-37.621V288H357.62v128H320c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h160c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16h-37.275V96H480c8.837 0 16-7.163 16-16z"],headphones:[512,512,[],"f025","M256 32C114.52 32 0 146.496 0 288v48a32 32 0 0 0 17.689 28.622l14.383 7.191C34.083 431.903 83.421 480 144 480h24c13.255 0 24-10.745 24-24V280c0-13.255-10.745-24-24-24h-24c-31.342 0-59.671 12.879-80 33.627V288c0-105.869 86.131-192 192-192s192 86.131 192 192v1.627C427.671 268.879 399.342 256 368 256h-24c-13.255 0-24 10.745-24 24v176c0 13.255 10.745 24 24 24h24c60.579 0 109.917-48.098 111.928-108.187l14.382-7.191A32 32 0 0 0 512 336v-48c0-141.479-114.496-256-256-256z"],heart:[576,512,[],"f004","M414.9 24C361.8 24 312 65.7 288 89.3 264 65.7 214.2 24 161.1 24 70.3 24 16 76.9 16 165.5c0 72.6 66.8 133.3 69.2 135.4l187 180.8c8.8 8.5 22.8 8.5 31.6 0l186.7-180.2c2.7-2.7 69.5-63.5 69.5-136C560 76.9 505.7 24 414.9 24z"],heartbeat:[576,512,[],"f21e","M47.9 257C31.6 232.7 16 200.5 16 165.5 16 76.9 70.3 24 161.1 24 214.2 24 264 65.7 288 89.3 312 65.7 361.8 24 414.9 24 505.7 24 560 76.9 560 165.5c0 35-15.5 67.2-31.9 91.5H408l-26.4-58.6c-4.7-8.9-17.6-8.5-21.6.7l-53.3 134.6L235.4 120c-3.7-10.6-18.7-10.7-22.6-.2l-48 137.2H47.9zm348 32c-4.5 0-8.6-2.5-10.6-6.4l-12.8-32.5-56.9 142.8c-4.4 9.9-18.7 9.4-22.3-.9l-69.7-209.2-33.6 98.4c-1.7 4.7-6.2 7.8-11.2 7.8H73.4c5.3 5.7-12.8-12 198.9 192.6 8.8 8.5 22.8 8.5 31.6 0 204.3-197.2 191-184 199-192.6h-107z"],history:[512,512,[],"f1da","M504 255.531c.253 136.64-111.18 248.372-247.82 248.468-59.015.042-113.223-20.53-155.822-54.911-11.077-8.94-11.905-25.541-1.839-35.607l11.267-11.267c8.609-8.609 22.353-9.551 31.891-1.984C173.062 425.135 212.781 440 256 440c101.705 0 184-82.311 184-184 0-101.705-82.311-184-184-184-48.814 0-93.149 18.969-126.068 49.932l50.754 50.754c10.08 10.08 2.941 27.314-11.313 27.314H24c-8.837 0-16-7.163-16-16V38.627c0-14.254 17.234-21.393 27.314-11.314l49.372 49.372C129.209 34.136 189.552 8 256 8c136.81 0 247.747 110.78 248 247.531zm-180.912 78.784l9.823-12.63c8.138-10.463 6.253-25.542-4.21-33.679L288 256.349V152c0-13.255-10.745-24-24-24h-16c-13.255 0-24 10.745-24 24v135.651l65.409 50.874c10.463 8.137 25.541 6.253 33.679-4.21z"],home:[576,512,[],"f015","M488 312.7V456c0 13.3-10.7 24-24 24H348c-6.6 0-12-5.4-12-12V356c0-6.6-5.4-12-12-12h-72c-6.6 0-12 5.4-12 12v112c0 6.6-5.4 12-12 12H112c-13.3 0-24-10.7-24-24V312.7c0-3.6 1.6-7 4.4-9.3l188-154.8c4.4-3.6 10.8-3.6 15.3 0l188 154.8c2.7 2.3 4.3 5.7 4.3 9.3zm83.6-60.9L488 182.9V44.4c0-6.6-5.4-12-12-12h-56c-6.6 0-12 5.4-12 12V117l-89.5-73.7c-17.7-14.6-43.3-14.6-61 0L4.4 251.8c-5.1 4.2-5.8 11.8-1.6 16.9l25.5 31c4.2 5.1 11.8 5.8 16.9 1.6l235.2-193.7c4.4-3.6 10.8-3.6 15.3 0l235.2 193.7c5.1 4.2 12.7 3.5 16.9-1.6l25.5-31c4.2-5.2 3.4-12.7-1.7-16.9z"],hospital:[448,512,[],"f0f8","M448 492v20H0v-20c0-6.627 5.373-12 12-12h20V120c0-13.255 10.745-24 24-24h88V24c0-13.255 10.745-24 24-24h112c13.255 0 24 10.745 24 24v72h88c13.255 0 24 10.745 24 24v360h20c6.627 0 12 5.373 12 12zM308 192h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12zm-168 64h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12zm104 128h-40c-6.627 0-12 5.373-12 12v84h64v-84c0-6.627-5.373-12-12-12zm64-96h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12zm-116 12c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40zM182 96h26v26a6 6 0 0 0 6 6h20a6 6 0 0 0 6-6V96h26a6 6 0 0 0 6-6V70a6 6 0 0 0-6-6h-26V38a6 6 0 0 0-6-6h-20a6 6 0 0 0-6 6v26h-26a6 6 0 0 0-6 6v20a6 6 0 0 0 6 6z"],hourglass:[384,512,[],"f254","M360 64c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24H24C10.745 0 0 10.745 0 24v16c0 13.255 10.745 24 24 24 0 90.965 51.016 167.734 120.842 192C75.016 280.266 24 357.035 24 448c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24 0-90.965-51.016-167.734-120.842-192C308.984 231.734 360 154.965 360 64z"],"hourglass-end":[384,512,[],"f253","M360 64c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24H24C10.745 0 0 10.745 0 24v16c0 13.255 10.745 24 24 24 0 90.965 51.016 167.734 120.842 192C75.016 280.266 24 357.035 24 448c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24 0-90.965-51.016-167.734-120.842-192C308.984 231.734 360 154.965 360 64zM192 208c-57.787 0-104-66.518-104-144h208c0 77.945-46.51 144-104 144z"],"hourglass-half":[384,512,[],"f252","M360 0H24C10.745 0 0 10.745 0 24v16c0 13.255 10.745 24 24 24 0 90.965 51.016 167.734 120.842 192C75.016 280.266 24 357.035 24 448c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24 0-90.965-51.016-167.734-120.842-192C308.984 231.734 360 154.965 360 64c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24zm-75.078 384H99.08c17.059-46.797 52.096-80 92.92-80 40.821 0 75.862 33.196 92.922 80zm.019-256H99.078C91.988 108.548 88 86.748 88 64h208c0 22.805-3.987 44.587-11.059 64z"],"hourglass-start":[384,512,[],"f251","M360 0H24C10.745 0 0 10.745 0 24v16c0 13.255 10.745 24 24 24 0 90.965 51.016 167.734 120.842 192C75.016 280.266 24 357.035 24 448c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24 0-90.965-51.016-167.734-120.842-192C308.984 231.734 360 154.965 360 64c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24zm-64 448H88c0-77.458 46.204-144 104-144 57.786 0 104 66.517 104 144z"],"i-cursor":[256,512,[],"f246","M256 52.048V12.065C256 5.496 250.726.148 244.158.066 211.621-.344 166.469.011 128 37.959 90.266.736 46.979-.114 11.913.114 5.318.157 0 5.519 0 12.114v39.645c0 6.687 5.458 12.078 12.145 11.998C38.111 63.447 96 67.243 96 112.182V224H60c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h36v112c0 44.932-56.075 48.031-83.95 47.959C5.404 447.942 0 453.306 0 459.952v39.983c0 6.569 5.274 11.917 11.842 11.999 32.537.409 77.689.054 116.158-37.894 37.734 37.223 81.021 38.073 116.087 37.845 6.595-.043 11.913-5.405 11.913-12V460.24c0-6.687-5.458-12.078-12.145-11.998C217.889 448.553 160 444.939 160 400V288h36c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-36V112.182c0-44.932 56.075-48.213 83.95-48.142 6.646.018 12.05-5.346 12.05-11.992z"],"id-badge":[384,512,[],"f2c1","M0 464V48C0 21.49 21.49 0 48 0h288c26.51 0 48 21.49 48 48v416c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48zm192-144c53.019 0 96-42.981 96-96s-42.981-96-96-96-96 42.981-96 96 42.981 96 96 96zm94.317 17.474l-22.954-7.173C242.437 344.413 217.802 352 192 352s-50.437-7.587-71.363-21.699l-22.954 7.173C77.644 343.736 64 362.295 64 383.289V424c0 13.255 10.745 24 24 24h208c13.255 0 24-10.745 24-24v-40.711c0-20.994-13.644-39.553-33.683-45.815zM352 52v-8c0-6.627-5.373-12-12-12H44c-6.627 0-12 5.373-12 12v8c0 6.627 5.373 12 12 12h296c6.627 0 12-5.373 12-12z"],"id-card":[512,512,[],"f2c2","M464 448H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h416c26.51 0 48 21.49 48 48v288c0 26.51-21.49 48-48 48zM160 160c-35.346 0-64 28.654-64 64s28.654 64 64 64 64-28.654 64-64-28.654-64-64-64m79.589 154.53l-28.281-9.427C196.458 314.532 178.856 320 160 320s-36.458-5.468-51.309-14.897L80.41 314.53A24 24 0 0 0 64 337.298V360c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24v-22.702a24 24 0 0 0-16.411-22.768zM448 340v-8c0-6.627-5.373-12-12-12H300c-6.627 0-12 5.373-12 12v8c0 6.627 5.373 12 12 12h136c6.627 0 12-5.373 12-12zm0-64v-8c0-6.627-5.373-12-12-12H300c-6.627 0-12 5.373-12 12v8c0 6.627 5.373 12 12 12h136c6.627 0 12-5.373 12-12zm0-64v-8c0-6.627-5.373-12-12-12H300c-6.627 0-12 5.373-12 12v8c0 6.627 5.373 12 12 12h136c6.627 0 12-5.373 12-12zm32-96v-8c0-6.627-5.373-12-12-12H44c-6.627 0-12 5.373-12 12v8c0 6.627 5.373 12 12 12h424c6.627 0 12-5.373 12-12z"],image:[512,512,[],"f03e","M464 448H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h416c26.51 0 48 21.49 48 48v288c0 26.51-21.49 48-48 48zM112 120c-30.928 0-56 25.072-56 56s25.072 56 56 56 56-25.072 56-56-25.072-56-56-56zM64 384h384V272l-87.515-87.515c-4.686-4.686-12.284-4.686-16.971 0L208 320l-55.515-55.515c-4.686-4.686-12.284-4.686-16.971 0L64 336v48z"],images:[576,512,[],"f302","M480 416v16c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V176c0-26.51 21.49-48 48-48h16v208c0 44.112 35.888 80 80 80h336zm96-80V80c0-26.51-21.49-48-48-48H144c-26.51 0-48 21.49-48 48v256c0 26.51 21.49 48 48 48h384c26.51 0 48-21.49 48-48zM256 128c0 26.51-21.49 48-48 48s-48-21.49-48-48 21.49-48 48-48 48 21.49 48 48zm-96 144l55.515-55.515c4.686-4.686 12.284-4.686 16.971 0L272 256l135.515-135.515c4.686-4.686 12.284-4.686 16.971 0L512 208v112H160v-48z"],inbox:[576,512,[],"f01c","M567.938 243.908L462.25 85.374A48.003 48.003 0 0 0 422.311 64H153.689a48 48 0 0 0-39.938 21.374L8.062 243.908A47.994 47.994 0 0 0 0 270.533V400c0 26.51 21.49 48 48 48h480c26.51 0 48-21.49 48-48V270.533a47.994 47.994 0 0 0-8.062-26.625zM162.252 128h251.497l85.333 128H376l-32 64H232l-32-64H76.918l85.334-128z"],indent:[448,512,[],"f03c","M0 84V44c0-8.837 7.163-16 16-16h416c8.837 0 16 7.163 16 16v40c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16zm176 144h256c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H176c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zM16 484h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm160-128h256c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H176c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm-52.687-111.313l-96-95.984C17.266 138.652 0 145.776 0 160.016v191.975c0 14.329 17.325 21.304 27.313 11.313l96-95.992c6.249-6.247 6.249-16.377 0-22.625z"],industry:[512,512,[],"f275","M475.115 163.781L336 252.309v-68.28c0-18.916-20.931-30.399-36.885-20.248L160 252.309V56c0-13.255-10.745-24-24-24H24C10.745 32 0 42.745 0 56v400c0 13.255 10.745 24 24 24h464c13.255 0 24-10.745 24-24V184.029c0-18.917-20.931-30.399-36.885-20.248z"],info:[192,512,[],"f129","M20 424.229h20V279.771H20c-11.046 0-20-8.954-20-20V212c0-11.046 8.954-20 20-20h112c11.046 0 20 8.954 20 20v212.229h20c11.046 0 20 8.954 20 20V492c0 11.046-8.954 20-20 20H20c-11.046 0-20-8.954-20-20v-47.771c0-11.046 8.954-20 20-20zM96 0C56.235 0 24 32.235 24 72s32.235 72 72 72 72-32.235 72-72S135.764 0 96 0z"],"info-circle":[512,512,[],"f05a","M256 8C119.043 8 8 119.083 8 256c0 136.997 111.043 248 248 248s248-111.003 248-248C504 119.083 392.957 8 256 8zm0 110c23.196 0 42 18.804 42 42s-18.804 42-42 42-42-18.804-42-42 18.804-42 42-42zm56 254c0 6.627-5.373 12-12 12h-88c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h12v-64h-12c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h64c6.627 0 12 5.373 12 12v100h12c6.627 0 12 5.373 12 12v24z"],italic:[320,512,[],"f033","M204.758 416h-33.849l62.092-320h40.725a16 16 0 0 0 15.704-12.937l6.242-32C297.599 41.184 290.034 32 279.968 32H120.235a16 16 0 0 0-15.704 12.937l-6.242 32C96.362 86.816 103.927 96 113.993 96h33.846l-62.09 320H46.278a16 16 0 0 0-15.704 12.935l-6.245 32C22.402 470.815 29.967 480 40.034 480h158.479a16 16 0 0 0 15.704-12.935l6.245-32c1.927-9.88-5.638-19.065-15.704-19.065z"],key:[512,512,[],"f084","M512 176.001C512 273.203 433.202 352 336 352c-11.22 0-22.19-1.062-32.827-3.069l-24.012 27.014A23.999 23.999 0 0 1 261.223 384H224v40c0 13.255-10.745 24-24 24h-40v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24v-78.059c0-6.365 2.529-12.47 7.029-16.971l161.802-161.802C163.108 213.814 160 195.271 160 176 160 78.798 238.797.001 335.999 0 433.488-.001 512 78.511 512 176.001zM336 128c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48-48 21.49-48 48z"],keyboard:[576,512,[],"f11c","M528 448H48c-26.51 0-48-21.49-48-48V112c0-26.51 21.49-48 48-48h480c26.51 0 48 21.49 48 48v288c0 26.51-21.49 48-48 48zM128 180v-40c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm-336 96v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm-336 96v-40c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm288 0v-40c0-6.627-5.373-12-12-12H172c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h232c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12z"],language:[640,512,[],"f1ab","M304 416H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h280v320zm-120.676-72.622A12 12 0 0 0 194.839 352h22.863c8.22 0 14.007-8.078 11.362-15.861L171.61 167.085a12 12 0 0 0-11.362-8.139h-32.489a12.001 12.001 0 0 0-11.362 8.139L58.942 336.139C56.297 343.922 62.084 352 70.304 352h22.805a12 12 0 0 0 11.535-8.693l9.118-31.807h60.211l9.351 31.878zm-39.051-140.42s4.32 21.061 7.83 33.21l10.8 37.531h-38.07l11.07-37.531c3.51-12.15 7.83-33.21 7.83-33.21h.54zM616 416H336V96h280c13.255 0 24 10.745 24 24v272c0 13.255-10.745 24-24 24zm-36-228h-64v-16c0-6.627-5.373-12-12-12h-16c-6.627 0-12 5.373-12 12v16h-64c-6.627 0-12 5.373-12 12v16c0 6.627 5.373 12 12 12h114.106c-6.263 14.299-16.518 28.972-30.023 43.206-6.56-6.898-12.397-13.91-17.365-20.933-3.639-5.144-10.585-6.675-15.995-3.446l-7.28 4.346-6.498 3.879c-5.956 3.556-7.693 11.421-3.735 17.117 6.065 8.729 13.098 17.336 20.984 25.726-8.122 6.226-16.841 12.244-26.103 17.964-5.521 3.41-7.381 10.556-4.162 16.19l7.941 13.896c3.362 5.883 10.935 7.826 16.706 4.276 12.732-7.831 24.571-16.175 35.443-24.891 10.917 8.761 22.766 17.102 35.396 24.881 5.774 3.556 13.353 1.618 16.717-4.27l7.944-13.903c3.213-5.623 1.37-12.76-4.135-16.171a312.737 312.737 0 0 1-26.06-18.019c21.024-22.425 35.768-46.289 42.713-69.85H580c6.627 0 12-5.373 12-12v-16c0-6.625-5.373-11.998-12-11.998z"],laptop:[640,512,[],"f109","M512 64v256H128V64h384m16-64H112C85.5 0 64 21.5 64 48v288c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zm100 416H389.5c-3 0-5.5 2.1-5.9 5.1C381.2 436.3 368 448 352 448h-64c-16 0-29.2-11.7-31.6-26.9-.5-2.9-3-5.1-5.9-5.1H12c-6.6 0-12 5.4-12 12v36c0 26.5 21.5 48 48 48h544c26.5 0 48-21.5 48-48v-36c0-6.6-5.4-12-12-12z"],leaf:[576,512,[],"f06c","M395.4 420.8c-43.4 21.6-91.9 34.4-140.8 34.4-82.2 0-151.1-40.1-151.1-40.1-16.1 0-35.4 64.9-63.3 64.9-27 0-40.2-24-40.2-38.5 0-33.1 63.6-58.9 63.6-77.3 0 0-12.5-21.2-12.5-59.2 0-101.2 81.3-173.4 172.6-203.3 65.9-21.6 206 3.5 250.7-38.5C492.1 47 500.8 32 527.8 32c36.3 0 48.2 93.2 48.2 120.3 0 110.9-54.5 206.5-180.6 268.5zm-254.3-75.6c63.5-89.9 144.5-128.8 257.7-120 8.8.7 16.5-5.9 17.2-14.7.7-8.8-5.9-16.5-14.7-17.2-124-9.6-215.9 33.9-286.3 133.5-5.1 7.2-3.4 17.2 3.8 22.3 7.2 5.1 17.2 3.4 22.3-3.9z"],lemon:[512,512,[],"f094","M489.038 22.963C465.944-.13 434.648-5.93 413.947 6.129c-58.906 34.312-181.25-53.077-321.073 86.746S40.441 355.041 6.129 413.945c-12.059 20.702-6.26 51.999 16.833 75.093 23.095 23.095 54.392 28.891 75.095 16.832 58.901-34.31 181.246 53.079 321.068-86.743S471.56 156.96 505.871 98.056c12.059-20.702 6.261-51.999-16.833-75.093zM243.881 95.522c-58.189 14.547-133.808 90.155-148.358 148.358-1.817 7.27-8.342 12.124-15.511 12.124-1.284 0-2.59-.156-3.893-.481-8.572-2.144-13.784-10.83-11.642-19.403C81.901 166.427 166.316 81.93 236.119 64.478c8.575-2.143 17.261 3.069 19.403 11.642s-3.069 17.259-11.641 19.402z"],"level-down-alt":[320,512,[],"f3be","M313.553 392.331L209.587 504.334c-9.485 10.214-25.676 10.229-35.174 0L70.438 392.331C56.232 377.031 67.062 352 88.025 352H152V80H68.024a11.996 11.996 0 0 1-8.485-3.515l-56-56C-4.021 12.926 1.333 0 12.024 0H208c13.255 0 24 10.745 24 24v328h63.966c20.878 0 31.851 24.969 17.587 40.331z"],"level-up-alt":[320,512,[],"f3bf","M313.553 119.669L209.587 7.666c-9.485-10.214-25.676-10.229-35.174 0L70.438 119.669C56.232 134.969 67.062 160 88.025 160H152v272H68.024a11.996 11.996 0 0 0-8.485 3.515l-56 56C-4.021 499.074 1.333 512 12.024 512H208c13.255 0 24-10.745 24-24V160h63.966c20.878 0 31.851-24.969 17.587-40.331z"],"life-ring":[512,512,[],"f1cd","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm173.696 119.559l-63.399 63.399c-10.987-18.559-26.67-34.252-45.255-45.255l63.399-63.399a218.396 218.396 0 0 1 45.255 45.255zM256 352c-53.019 0-96-42.981-96-96s42.981-96 96-96 96 42.981 96 96-42.981 96-96 96zM127.559 82.304l63.399 63.399c-18.559 10.987-34.252 26.67-45.255 45.255l-63.399-63.399a218.372 218.372 0 0 1 45.255-45.255zM82.304 384.441l63.399-63.399c10.987 18.559 26.67 34.252 45.255 45.255l-63.399 63.399a218.396 218.396 0 0 1-45.255-45.255zm302.137 45.255l-63.399-63.399c18.559-10.987 34.252-26.67 45.255-45.255l63.399 63.399a218.403 218.403 0 0 1-45.255 45.255z"],lightbulb:[384,512,[],"f0eb","M272 428v28c0 10.449-6.68 19.334-16 22.629V488c0 13.255-10.745 24-24 24h-80c-13.255 0-24-10.745-24-24v-9.371c-9.32-3.295-16-12.18-16-22.629v-28c0-6.627 5.373-12 12-12h136c6.627 0 12 5.373 12 12zm-143.107-44c-9.907 0-18.826-6.078-22.376-15.327C67.697 267.541 16 277.731 16 176 16 78.803 94.805 0 192 0s176 78.803 176 176c0 101.731-51.697 91.541-90.516 192.673-3.55 9.249-12.47 15.327-22.376 15.327H128.893zM112 176c0-44.112 35.888-80 80-80 8.837 0 16-7.164 16-16s-7.163-16-16-16c-61.757 0-112 50.243-112 112 0 8.836 7.164 16 16 16s16-7.164 16-16z"],link:[512,512,[],"f0c1","M326.612 185.391c59.747 59.809 58.927 155.698.36 214.59-.11.12-.24.25-.36.37l-67.2 67.2c-59.27 59.27-155.699 59.262-214.96 0-59.27-59.26-59.27-155.7 0-214.96l37.106-37.106c9.84-9.84 26.786-3.3 27.294 10.606.648 17.722 3.826 35.527 9.69 52.721 1.986 5.822.567 12.262-3.783 16.612l-13.087 13.087c-28.026 28.026-28.905 73.66-1.155 101.96 28.024 28.579 74.086 28.749 102.325.51l67.2-67.19c28.191-28.191 28.073-73.757 0-101.83-3.701-3.694-7.429-6.564-10.341-8.569a16.037 16.037 0 0 1-6.947-12.606c-.396-10.567 3.348-21.456 11.698-29.806l21.054-21.055c5.521-5.521 14.182-6.199 20.584-1.731a152.482 152.482 0 0 1 20.522 17.197zM467.547 44.449c-59.261-59.262-155.69-59.27-214.96 0l-67.2 67.2c-.12.12-.25.25-.36.37-58.566 58.892-59.387 154.781.36 214.59a152.454 152.454 0 0 0 20.521 17.196c6.402 4.468 15.064 3.789 20.584-1.731l21.054-21.055c8.35-8.35 12.094-19.239 11.698-29.806a16.037 16.037 0 0 0-6.947-12.606c-2.912-2.005-6.64-4.875-10.341-8.569-28.073-28.073-28.191-73.639 0-101.83l67.2-67.19c28.239-28.239 74.3-28.069 102.325.51 27.75 28.3 26.872 73.934-1.155 101.96l-13.087 13.087c-4.35 4.35-5.769 10.79-3.783 16.612 5.864 17.194 9.042 34.999 9.69 52.721.509 13.906 17.454 20.446 27.294 10.606l37.106-37.106c59.271-59.259 59.271-155.699.001-214.959z"],"lira-sign":[384,512,[],"f195","M371.994 256h-48.019C317.64 256 312 260.912 312 267.246 312 368 230.179 416 144 416V256.781l134.603-29.912A12 12 0 0 0 288 215.155v-40.976c0-7.677-7.109-13.38-14.603-11.714L144 191.219V160.78l134.603-29.912A12 12 0 0 0 288 119.154V78.179c0-7.677-7.109-13.38-14.603-11.714L144 95.219V44c0-6.627-5.373-12-12-12H76c-6.627 0-12 5.373-12 12v68.997L9.397 125.131A12 12 0 0 0 0 136.845v40.976c0 7.677 7.109 13.38 14.603 11.714L64 178.558v30.439L9.397 221.131A12 12 0 0 0 0 232.845v40.976c0 7.677 7.109 13.38 14.603 11.714L64 274.558V468c0 6.627 5.373 12 12 12h79.583c134.091 0 223.255-77.834 228.408-211.592.261-6.782-5.211-12.408-11.997-12.408z"],list:[512,512,[],"f03a","M128 116V76c0-8.837 7.163-16 16-16h352c8.837 0 16 7.163 16 16v40c0 8.837-7.163 16-16 16H144c-8.837 0-16-7.163-16-16zm16 176h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zM16 144h64c8.837 0 16-7.163 16-16V64c0-8.837-7.163-16-16-16H16C7.163 48 0 55.163 0 64v64c0 8.837 7.163 16 16 16zm0 160h64c8.837 0 16-7.163 16-16v-64c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v64c0 8.837 7.163 16 16 16zm0 160h64c8.837 0 16-7.163 16-16v-64c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v64c0 8.837 7.163 16 16 16z"],"list-alt":[512,512,[],"f022","M464 480H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h416c26.51 0 48 21.49 48 48v352c0 26.51-21.49 48-48 48zM128 120c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zm0 96c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zm0 96c-22.091 0-40 17.909-40 40s17.909 40 40 40 40-17.909 40-40-17.909-40-40-40zm288-136v-32c0-6.627-5.373-12-12-12H204c-6.627 0-12 5.373-12 12v32c0 6.627 5.373 12 12 12h200c6.627 0 12-5.373 12-12zm0 96v-32c0-6.627-5.373-12-12-12H204c-6.627 0-12 5.373-12 12v32c0 6.627 5.373 12 12 12h200c6.627 0 12-5.373 12-12zm0 96v-32c0-6.627-5.373-12-12-12H204c-6.627 0-12 5.373-12 12v32c0 6.627 5.373 12 12 12h200c6.627 0 12-5.373 12-12z"],"list-ol":[512,512,[],"f0cb","M3.263 139.527c0-7.477 3.917-11.572 11.573-11.572h15.131V88.078c0-5.163.534-10.503.534-10.503h-.356s-1.779 2.67-2.848 3.738c-4.451 4.273-10.504 4.451-15.666-1.068l-5.518-6.231c-5.342-5.341-4.984-11.216.534-16.379l21.72-19.938C32.815 33.602 36.732 32 42.785 32H54.89c7.656 0 11.749 3.916 11.749 11.572v84.384h15.488c7.655 0 11.572 4.094 11.572 11.572v8.901c0 7.477-3.917 11.572-11.572 11.572H14.836c-7.656 0-11.573-4.095-11.573-11.572v-8.902zM2.211 304.591c0-47.278 50.955-56.383 50.955-69.165 0-7.18-5.954-8.755-9.28-8.755-3.153 0-6.479 1.051-9.455 3.852-5.079 4.903-10.507 7.004-16.111 2.451l-8.579-6.829c-5.779-4.553-7.18-9.805-2.803-15.409C13.592 201.981 26.025 192 47.387 192c19.437 0 44.476 10.506 44.476 39.573 0 38.347-46.753 46.402-48.679 56.909h39.049c7.529 0 11.557 4.027 11.557 11.382v8.755c0 7.354-4.028 11.382-11.557 11.382h-67.94c-7.005 0-12.083-4.028-12.083-11.382v-4.028zM5.654 454.61l5.603-9.28c3.853-6.654 9.105-7.004 15.584-3.152 4.903 2.101 9.63 3.152 14.359 3.152 10.155 0 14.358-3.502 14.358-8.23 0-6.654-5.604-9.106-15.934-9.106h-4.728c-5.954 0-9.28-2.101-12.258-7.88l-1.05-1.926c-2.451-4.728-1.226-9.806 2.801-14.884l5.604-7.004c6.829-8.405 12.257-13.483 12.257-13.483v-.35s-4.203 1.051-12.608 1.051H16.685c-7.53 0-11.383-4.028-11.383-11.382v-8.755c0-7.53 3.853-11.382 11.383-11.382h58.484c7.529 0 11.382 4.027 11.382 11.382v3.327c0 5.778-1.401 9.806-5.079 14.183l-17.509 20.137c19.611 5.078 28.716 20.487 28.716 34.845 0 21.363-14.358 44.126-48.503 44.126-16.636 0-28.192-4.728-35.896-9.455-5.779-4.202-6.304-9.805-2.626-15.934zM144 132h352c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"],"list-ul":[512,512,[],"f0ca","M96 96c0 26.51-21.49 48-48 48S0 122.51 0 96s21.49-48 48-48 48 21.49 48 48zM48 208c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zm0 160c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zm96-236h352c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h352c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H144c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"],"location-arrow":[512,512,[],"f124","M443.683 4.529L27.818 196.418C-18.702 217.889-3.39 288 47.933 288H224v175.993c0 51.727 70.161 66.526 91.582 20.115L507.38 68.225c18.905-40.961-23.752-82.133-63.697-63.696z"],lock:[448,512,[],"f023","M400 224h-24v-72C376 68.2 307.8 0 224 0S72 68.2 72 152v72H48c-26.5 0-48 21.5-48 48v192c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V272c0-26.5-21.5-48-48-48zm-104 0H152v-72c0-39.7 32.3-72 72-72s72 32.3 72 72v72z"],"lock-open":[576,512,[],"f3c1","M423.5 0C339.5.3 272 69.5 272 153.5V224H48c-26.5 0-48 21.5-48 48v192c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V272c0-26.5-21.5-48-48-48h-48v-71.1c0-39.6 31.7-72.5 71.3-72.9 40-.4 72.7 32.1 72.7 72v80c0 13.3 10.7 24 24 24h32c13.3 0 24-10.7 24-24v-80C576 68 507.5-.3 423.5 0z"],"long-arrow-alt-down":[256,512,[],"f309","M168 345.941V44c0-6.627-5.373-12-12-12h-56c-6.627 0-12 5.373-12 12v301.941H41.941c-21.382 0-32.09 25.851-16.971 40.971l86.059 86.059c9.373 9.373 24.569 9.373 33.941 0l86.059-86.059c15.119-15.119 4.411-40.971-16.971-40.971H168z"],"long-arrow-alt-left":[448,512,[],"f30a","M134.059 296H436c6.627 0 12-5.373 12-12v-56c0-6.627-5.373-12-12-12H134.059v-46.059c0-21.382-25.851-32.09-40.971-16.971L7.029 239.029c-9.373 9.373-9.373 24.569 0 33.941l86.059 86.059c15.119 15.119 40.971 4.411 40.971-16.971V296z"],"long-arrow-alt-right":[448,512,[],"f30b","M313.941 216H12c-6.627 0-12 5.373-12 12v56c0 6.627 5.373 12 12 12h301.941v46.059c0 21.382 25.851 32.09 40.971 16.971l86.059-86.059c9.373-9.373 9.373-24.569 0-33.941l-86.059-86.059c-15.119-15.119-40.971-4.411-40.971 16.971V216z"],"long-arrow-alt-up":[256,512,[],"f30c","M88 166.059V468c0 6.627 5.373 12 12 12h56c6.627 0 12-5.373 12-12V166.059h46.059c21.382 0 32.09-25.851 16.971-40.971l-86.059-86.059c-9.373-9.373-24.569-9.373-33.941 0l-86.059 86.059c-15.119 15.119-4.411 40.971 16.971 40.971H88z"],"low-vision":[576,512,[],"f2a8","M569.344 231.631C512.96 135.949 407.81 72 288 72c-28.468 0-56.102 3.619-82.451 10.409L152.778 10.24c-7.601-10.858-22.564-13.5-33.423-5.9l-13.114 9.178c-10.86 7.601-13.502 22.566-5.9 33.426l43.131 58.395C89.449 131.73 40.228 174.683 6.682 231.581c-.01.017-.023.033-.034.05-8.765 14.875-8.964 33.528 0 48.739 38.5 65.332 99.742 115.862 172.859 141.349L55.316 244.302A272.194 272.194 0 0 1 83.61 208.39l119.4 170.58h.01l40.63 58.04a330.055 330.055 0 0 0 78.94 1.17l-189.98-271.4a277.628 277.628 0 0 1 38.777-21.563l251.836 356.544c7.601 10.858 22.564 13.499 33.423 5.9l13.114-9.178c10.86-7.601 13.502-22.567 5.9-33.426l-43.12-58.377-.007-.009c57.161-27.978 104.835-72.04 136.81-126.301a47.938 47.938 0 0 0 .001-48.739zM390.026 345.94l-19.066-27.23c24.682-32.567 27.711-76.353 8.8-111.68v.03c0 23.65-19.17 42.82-42.82 42.82-23.828 0-42.82-19.349-42.82-42.82 0-23.65 19.17-42.82 42.82-42.82h.03c-24.75-13.249-53.522-15.643-79.51-7.68l-19.068-27.237C253.758 123.306 270.488 120 288 120c75.162 0 136 60.826 136 136 0 34.504-12.833 65.975-33.974 89.94z"],magic:[512,512,[],"f0d0","M101.1 505L7 410.9c-9.4-9.4-9.4-24.6 0-33.9L377 7c9.4-9.4 24.6-9.4 33.9 0l94.1 94.1c9.4 9.4 9.4 24.6 0 33.9L135 505c-9.3 9.3-24.5 9.3-33.9 0zM304 159.2l48.8 48.8 89.9-89.9-48.8-48.8-89.9 89.9zM138.9 39.3l-11.7 23.8-26.2 3.8c-4.7.7-6.6 6.5-3.2 9.8l19 18.5-4.5 26.1c-.8 4.7 4.1 8.3 8.3 6.1L144 115l23.4 12.3c4.2 2.2 9.1-1.4 8.3-6.1l-4.5-26.1 19-18.5c3.4-3.3 1.5-9.1-3.2-9.8L160.8 63l-11.7-23.8c-2-4.1-8.1-4.1-10.2.1zm97.7-20.7l-7.8 15.8-17.5 2.6c-3.1.5-4.4 4.3-2.1 6.5l12.6 12.3-3 17.4c-.5 3.1 2.8 5.5 5.6 4L240 69l15.6 8.2c2.8 1.5 6.1-.9 5.6-4l-3-17.4 12.6-12.3c2.3-2.2 1-6.1-2.1-6.5l-17.5-2.5-7.8-15.8c-1.4-3-5.4-3-6.8-.1zm-192 0l-7.8 15.8L19.3 37c-3.1.5-4.4 4.3-2.1 6.5l12.6 12.3-3 17.4c-.5 3.1 2.8 5.5 5.6 4L48 69l15.6 8.2c2.8 1.5 6.1-.9 5.6-4l-3-17.4 12.6-12.3c2.3-2.2 1-6.1-2.1-6.5l-17.5-2.5-7.8-15.8c-1.4-3-5.4-3-6.8-.1zm416 223.5l-7.8 15.8-17.5 2.5c-3.1.5-4.4 4.3-2.1 6.5l12.6 12.3-3 17.4c-.5 3.1 2.8 5.5 5.6 4l15.6-8.2 15.6 8.2c2.8 1.5 6.1-.9 5.6-4l-3-17.4 12.6-12.3c2.3-2.2 1-6.1-2.1-6.5l-17.5-2.5-7.8-15.8c-1.4-2.8-5.4-2.8-6.8 0z"],magnet:[512,512,[],"f076","M164.1 160H12c-6.6 0-12-5.4-12-12V68c0-19.9 16.1-36 36-36h104c19.9 0 36 16.1 36 36v80c.1 6.6-5.3 12-11.9 12zm348-12V67.9c0-19.9-16.1-36-36-36h-104c-19.9 0-36 16.1-36 36v80c0 6.6 5.4 12 12 12h152c6.6.1 12-5.3 12-11.9zm-164 44c-6.6 0-12 5.4-12 12v52c0 128.1-160 127.9-160 0v-52c0-6.6-5.4-12-12-12h-152c-6.7 0-12 5.4-12 12.1.1 21.4.6 40.3 0 53.3C.1 408 136.3 504 256.9 504 377.5 504 512 408 512 257.3c-.6-12.8-.2-33 0-53.2 0-6.7-5.3-12.1-12-12.1H348.1z"],male:[192,512,[],"f183","M96 0c35.346 0 64 28.654 64 64s-28.654 64-64 64-64-28.654-64-64S60.654 0 96 0m48 144h-11.36c-22.711 10.443-49.59 10.894-73.28 0H48c-26.51 0-48 21.49-48 48v136c0 13.255 10.745 24 24 24h16v136c0 13.255 10.745 24 24 24h64c13.255 0 24-10.745 24-24V352h16c13.255 0 24-10.745 24-24V192c0-26.51-21.49-48-48-48z"],map:[576,512,[],"f279","M576 56.015v335.97a23.998 23.998 0 0 1-13.267 21.466l-128 64C418.948 485.344 400 473.992 400 455.985v-335.97a23.998 23.998 0 0 1 13.267-21.466l128-64C557.052 26.656 576 38.008 576 56.015zm-206.253 42.07l-144-64c-15.751-7-33.747 4.461-33.747 21.932v335.967a24 24 0 0 0 14.253 21.931l144 64c15.751 7 33.747-4.461 33.747-21.931V120.017a24 24 0 0 0-14.253-21.932zm-228.48-63.536l-128 63.985A23.998 23.998 0 0 0 0 120v335.985c0 18.007 18.948 29.359 34.733 21.466l128-63.985A23.998 23.998 0 0 0 176 392V56.015c0-18.007-18.948-29.359-34.733-21.466z"],"map-marker":[384,512,[],"f041","M172.268 501.67C26.97 291.031 0 269.413 0 192 0 85.961 85.961 0 192 0s192 85.961 192 192c0 77.413-26.97 99.031-172.268 309.67-9.535 13.774-29.93 13.773-39.464 0z"],"map-marker-alt":[384,512,[],"f3c5","M172.268 501.67C26.97 291.031 0 269.413 0 192 0 85.961 85.961 0 192 0s192 85.961 192 192c0 77.413-26.97 99.031-172.268 309.67-9.535 13.774-29.93 13.773-39.464 0zM192 272c44.183 0 80-35.817 80-80s-35.817-80-80-80-80 35.817-80 80 35.817 80 80 80z"],"map-pin":[320,512,[],"f276","M192 300.813v172.82l-22.015 33.023c-4.75 7.125-15.219 7.125-19.969 0L128 473.633v-172.82a162.221 162.221 0 0 0 64 0zM160 0c79.529 0 144 64.471 144 144s-64.471 144-144 144S16 223.529 16 144 80.471 0 160 0M80 136c0-39.701 32.299-72 72-72a8 8 0 0 0 0-16c-48.523 0-88 39.477-88 88a8 8 0 0 0 16 0z"],"map-signs":[512,512,[],"f277","M487.515 104.485L439.03 152.97a23.998 23.998 0 0 1-16.97 7.029H56c-13.255 0-24-10.745-24-24V56c0-13.255 10.745-24 24-24h160v-8c0-13.255 10.745-24 24-24h32c13.255 0 24 10.745 24 24v8h126.059a24 24 0 0 1 16.97 7.029l48.485 48.485c4.687 4.687 4.687 12.285.001 16.971zM216 368v120c0 13.255 10.745 24 24 24h32c13.255 0 24-10.745 24-24V368h-80zm240-144H296v-48h-80v48H89.941a24 24 0 0 0-16.97 7.029l-48.485 48.485c-4.686 4.686-4.686 12.284 0 16.971l48.485 48.485a23.998 23.998 0 0 0 16.97 7.029H456c13.255 0 24-10.745 24-24v-80C480 234.745 469.255 224 456 224z"],mars:[384,512,[],"f222","M372 64h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-80.7 80.7c-22.2-14-48.5-22.1-76.7-22.1C64.5 160 0 224.5 0 304s64.5 144 144 144 144-64.5 144-144c0-28.2-8.1-54.5-22.1-76.7l80.7-80.7 16.9 16.9c7.6 7.6 20.5 2.2 20.5-8.5V76c0-6.6-5.4-12-12-12zM144 384c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],"mars-double":[512,512,[],"f227","M340 0h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-48.7 48.7C198.5 72.1 172.2 64 144 64 64.5 64 0 128.5 0 208s64.5 144 144 144 144-64.5 144-144c0-28.2-8.1-54.5-22.1-76.7l48.7-48.7 16.9 16.9c2.4 2.4 5.5 3.5 8.4 3.5 6.2 0 12.1-4.8 12.1-12V12c0-6.6-5.4-12-12-12zM144 288c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80zm356-128.1h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-48.7 48.7c-18.2-11.4-39-18.9-61.5-21.3-2.1 21.8-8.2 43.3-18.4 63.3 1.1 0 2.2-.1 3.2-.1 44.1 0 80 35.9 80 80s-35.9 80-80 80-80-35.9-80-80c0-1.1 0-2.2.1-3.2-20 10.2-41.5 16.4-63.3 18.4C168.4 455.6 229.6 512 304 512c79.5 0 144-64.5 144-144 0-28.2-8.1-54.5-22.1-76.7l48.7-48.7 16.9 16.9c2.4 2.4 5.4 3.5 8.4 3.5 6.2 0 12.1-4.8 12.1-12v-79c0-6.7-5.4-12.1-12-12.1z"],"mars-stroke":[384,512,[],"f229","M372 64h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-17.5 17.5-14.1-14.1c-4.7-4.7-12.3-4.7-17 0L224.5 133c-4.7 4.7-4.7 12.3 0 17l14.1 14.1-18 18c-22.2-14-48.5-22.1-76.7-22.1C64.5 160 0 224.5 0 304s64.5 144 144 144 144-64.5 144-144c0-28.2-8.1-54.5-22.1-76.7l18-18 14.1 14.1c4.7 4.7 12.3 4.7 17 0l28.3-28.3c4.7-4.7 4.7-12.3 0-17L329.2 164l17.5-17.5 16.9 16.9c7.6 7.6 20.5 2.2 20.5-8.5V76c-.1-6.6-5.5-12-12.1-12zM144 384c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],"mars-stroke-h":[480,512,[],"f22b","M476.2 247.5l-55.9-55.9c-7.6-7.6-20.5-2.2-20.5 8.5V224H376v-20c0-6.6-5.4-12-12-12h-40c-6.6 0-12 5.4-12 12v20h-27.6c-5.8-25.6-18.7-49.9-38.6-69.8C189.6 98 98.4 98 42.2 154.2c-56.2 56.2-56.2 147.4 0 203.6 56.2 56.2 147.4 56.2 203.6 0 19.9-19.9 32.8-44.2 38.6-69.8H312v20c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-20h23.9v23.9c0 10.7 12.9 16 20.5 8.5l55.9-55.9c4.6-4.7 4.6-12.3-.1-17zm-275.6 65.1c-31.2 31.2-81.9 31.2-113.1 0-31.2-31.2-31.2-81.9 0-113.1 31.2-31.2 81.9-31.2 113.1 0 31.2 31.1 31.2 81.9 0 113.1z"],"mars-stroke-v":[288,512,[],"f22a","M245.8 234.2c-19.9-19.9-44.2-32.8-69.8-38.6v-25.4h20c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-20V81.4h23.9c10.7 0 16-12.9 8.5-20.5L152.5 5.1c-4.7-4.7-12.3-4.7-17 0L79.6 61c-7.6 7.6-2.2 20.5 8.5 20.5H112v24.7H92c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h20v25.4c-25.6 5.8-49.9 18.7-69.8 38.6-56.2 56.2-56.2 147.4 0 203.6 56.2 56.2 147.4 56.2 203.6 0 56.3-56.2 56.3-147.4 0-203.6zm-45.2 158.4c-31.2 31.2-81.9 31.2-113.1 0-31.2-31.2-31.2-81.9 0-113.1 31.2-31.2 81.9-31.2 113.1 0 31.2 31.1 31.2 81.9 0 113.1z"],medkit:[512,512,[],"f0fa","M96 480h320V128h-32V80c0-26.51-21.49-48-48-48H176c-26.51 0-48 21.49-48 48v48H96v352zm96-384h128v32H192V96zm320 80v256c0 26.51-21.49 48-48 48h-16V128h16c26.51 0 48 21.49 48 48zM64 480H48c-26.51 0-48-21.49-48-48V176c0-26.51 21.49-48 48-48h16v352zm288-208v32c0 8.837-7.163 16-16 16h-48v48c0 8.837-7.163 16-16 16h-32c-8.837 0-16-7.163-16-16v-48h-48c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h48v-48c0-8.837 7.163-16 16-16h32c8.837 0 16 7.163 16 16v48h48c8.837 0 16 7.163 16 16z"],meh:[512,512,[],"f11a","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-396-64c0 37.497 30.503 68 68 68s68-30.503 68-68-30.503-68-68-68-68 30.503-68 68zm160.5 0c0 37.221 30.279 67.5 67.5 67.5s67.5-30.279 67.5-67.5-30.279-67.5-67.5-67.5-67.5 30.279-67.5 67.5zm67.5-48a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 288 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48zm-160 0a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 128 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48zm160 208H176c-21.178 0-21.169 32 0 32h160c21.178 0 21.169-32 0-32z"],mercury:[288,512,[],"f223","M288 208c0-44.2-19.9-83.7-51.2-110.1 2.5-1.8 4.9-3.8 7.2-5.8 24.7-21.2 39.8-48.8 43.2-78.8.9-7.1-4.7-13.3-11.9-13.3h-40.5C229 0 224.1 4.1 223 9.8c-2.4 12.5-9.6 24.3-20.7 33.8C187 56.8 166.3 64 144 64s-43-7.2-58.4-20.4C74.5 34.1 67.4 22.3 64.9 9.8 63.8 4.1 58.9 0 53.2 0H12.7C5.5 0-.1 6.2.8 13.3 4.2 43.4 19.2 71 44 92.2c2.3 2 4.7 3.9 7.2 5.8C19.9 124.3 0 163.8 0 208c0 68.5 47.9 125.9 112 140.4V400H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v36c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-36h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-51.6c64.1-14.5 112-71.9 112-140.4zm-224 0c0-44.1 35.9-80 80-80s80 35.9 80 80-35.9 80-80 80-80-35.9-80-80z"],microchip:[512,512,[],"f2db","M416 48v416c0 26.51-21.49 48-48 48H144c-26.51 0-48-21.49-48-48V48c0-26.51 21.49-48 48-48h224c26.51 0 48 21.49 48 48zm96 58v12a6 6 0 0 1-6 6h-18v6a6 6 0 0 1-6 6h-42V88h42a6 6 0 0 1 6 6v6h18a6 6 0 0 1 6 6zm0 96v12a6 6 0 0 1-6 6h-18v6a6 6 0 0 1-6 6h-42v-48h42a6 6 0 0 1 6 6v6h18a6 6 0 0 1 6 6zm0 96v12a6 6 0 0 1-6 6h-18v6a6 6 0 0 1-6 6h-42v-48h42a6 6 0 0 1 6 6v6h18a6 6 0 0 1 6 6zm0 96v12a6 6 0 0 1-6 6h-18v6a6 6 0 0 1-6 6h-42v-48h42a6 6 0 0 1 6 6v6h18a6 6 0 0 1 6 6zM30 376h42v48H30a6 6 0 0 1-6-6v-6H6a6 6 0 0 1-6-6v-12a6 6 0 0 1 6-6h18v-6a6 6 0 0 1 6-6zm0-96h42v48H30a6 6 0 0 1-6-6v-6H6a6 6 0 0 1-6-6v-12a6 6 0 0 1 6-6h18v-6a6 6 0 0 1 6-6zm0-96h42v48H30a6 6 0 0 1-6-6v-6H6a6 6 0 0 1-6-6v-12a6 6 0 0 1 6-6h18v-6a6 6 0 0 1 6-6zm0-96h42v48H30a6 6 0 0 1-6-6v-6H6a6 6 0 0 1-6-6v-12a6 6 0 0 1 6-6h18v-6a6 6 0 0 1 6-6z"],microphone:[384,512,[],"f130","M96 256V96c0-53.019 42.981-96 96-96s96 42.981 96 96v160c0 53.019-42.981 96-96 96s-96-42.981-96-96zm252-56h-24c-6.627 0-12 5.373-12 12v42.68c0 66.217-53.082 120.938-119.298 121.318C126.213 376.38 72 322.402 72 256v-44c0-6.627-5.373-12-12-12H36c-6.627 0-12 5.373-12 12v44c0 84.488 62.693 154.597 144 166.278V468h-68c-6.627 0-12 5.373-12 12v20c0 6.627 5.373 12 12 12h184c6.627 0 12-5.373 12-12v-20c0-6.627-5.373-12-12-12h-68v-45.722c81.307-11.681 144-81.79 144-166.278v-44c0-6.627-5.373-12-12-12z"],"microphone-slash":[512,512,[],"f131","M421.45 285.195L376 239.746V212c0-6.627 5.373-12 12-12h24c6.627 0 12 5.373 12 12v44c0 9.957-.881 19.71-2.55 29.195zM352 96c0-53.019-42.981-96-96-96-32.574 0-61.354 16.227-78.71 41.035L352 215.746V96zm152.971 363.716L52.284 7.029c-9.373-9.373-24.569-9.373-33.941 0L7.029 18.343c-9.372 9.373-9.372 24.568 0 33.941L160 205.254v49.577c0 53.089 43.436 97.452 96.524 97.167 14.626-.078 28.471-3.44 40.854-9.366l17.746 17.746c-17.529 9.971-37.794 15.666-59.372 15.622C189.355 375.864 136 321.053 136 254.656V212c0-6.627-5.373-12-12-12h-24c-6.627 0-12 5.373-12 12v44c0 84.488 62.693 154.597 144 166.278V468h-68c-6.627 0-12 5.373-12 12v20c0 6.627 5.373 12 12 12h184c6.627 0 12-5.373 12-12v-20c0-6.627-5.373-12-12-12h-68v-45.722c25.625-3.682 49.396-13.172 69.942-27.083L459.717 504.97c9.373 9.373 24.569 9.373 33.941 0l11.313-11.313c9.372-9.373 9.372-24.568 0-33.941z"],minus:[448,512,[],"f068","M424 318.2c13.3 0 24-10.7 24-24v-76.4c0-13.3-10.7-24-24-24H24c-13.3 0-24 10.7-24 24v76.4c0 13.3 10.7 24 24 24h400z"],"minus-circle":[512,512,[],"f056","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zM124 296c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h264c6.6 0 12 5.4 12 12v56c0 6.6-5.4 12-12 12H124z"],"minus-square":[448,512,[],"f146","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zM92 296c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h264c6.6 0 12 5.4 12 12v56c0 6.6-5.4 12-12 12H92z"],mobile:[320,512,[],"f10b","M272 0H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h224c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zM160 480c-17.7 0-32-14.3-32-32s14.3-32 32-32 32 14.3 32 32-14.3 32-32 32z"],"mobile-alt":[320,512,[],"f3cd","M272 0H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h224c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zM160 480c-17.7 0-32-14.3-32-32s14.3-32 32-32 32 14.3 32 32-14.3 32-32 32zm112-108c0 6.6-5.4 12-12 12H60c-6.6 0-12-5.4-12-12V60c0-6.6 5.4-12 12-12h200c6.6 0 12 5.4 12 12v312z"],"money-bill-alt":[640,512,[],"f3d1","M640 120v272c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h592c13.255 0 24 10.745 24 24zM96 384c0-35.346-28.654-64-64-64v64h64zm0-256H32v64c35.346 0 64-28.654 64-64zm304 128c0-53.021-35.816-96-80-96s-80 42.979-80 96c0 53.012 35.814 96 80 96 44.167 0 80-42.969 80-96zm208 64c-35.346 0-64 28.654-64 64h64v-64zm0-192h-64c0 35.346 28.654 64 64 64v-64zM277.563 299.527c0-7.477 3.917-11.572 11.573-11.572h15.131v-39.878c0-5.163.534-10.503.534-10.503h-.356s-1.779 2.67-2.848 3.738c-4.451 4.273-10.504 4.451-15.666-1.068l-5.518-6.231c-5.342-5.341-4.984-11.216.534-16.379l21.72-19.939c4.449-4.095 8.366-5.697 14.42-5.697h12.105c7.656 0 11.749 3.916 11.749 11.572v84.384h15.488c7.655 0 11.572 4.094 11.572 11.572v8.901c0 7.477-3.917 11.572-11.572 11.572h-67.293c-7.656 0-11.573-4.095-11.573-11.572v-8.9z"],moon:[512,512,[],"f186","M283.211 512c78.962 0 151.079-35.925 198.857-94.792 7.068-8.708-.639-21.43-11.562-19.35-124.203 23.654-238.262-71.576-238.262-196.954 0-72.222 38.662-138.635 101.498-174.394 9.686-5.512 7.25-20.197-3.756-22.23A258.156 258.156 0 0 0 283.211 0c-141.309 0-256 114.511-256 256 0 141.309 114.511 256 256 256z"],motorcycle:[640,512,[],"f21c","M512.949 192.003c-14.862-.108-29.14 2.322-42.434 6.874L437.589 144H520c13.255 0 24-10.745 24-24V88c0-13.255-10.745-24-24-24h-45.311a24 24 0 0 0-17.839 7.945l-37.496 41.663-22.774-37.956A24 24 0 0 0 376 64h-80c-8.837 0-16 7.163-16 16v16c0 8.837 7.163 16 16 16h66.411l19.2 32H227.904c-17.727-23.073-44.924-40-99.904-40H72.54c-13.455 0-24.791 11.011-24.536 24.464C48.252 141.505 58.9 152 72 152h56c24.504 0 38.686 10.919 47.787 24.769l-11.291 20.529c-13.006-3.865-26.871-5.736-41.251-5.21C55.857 194.549 1.565 249.605.034 317.021-1.603 389.076 56.317 448 128 448c59.642 0 109.744-40.794 123.953-96h84.236c13.673 0 24.589-11.421 23.976-25.077-2.118-47.12 17.522-93.665 56.185-125.026l12.485 20.808c-27.646 23.654-45.097 58.88-44.831 98.179.47 69.556 57.203 126.452 126.758 127.11 71.629.678 129.839-57.487 129.234-129.099-.588-69.591-57.455-126.386-127.047-126.892zM128 400c-44.112 0-80-35.888-80-80s35.888-80 80-80c4.242 0 8.405.341 12.469.982L98.97 316.434C90.187 332.407 101.762 352 120 352h81.297c-12.37 28.225-40.56 48-73.297 48zm388.351-.116C470.272 402.337 432 365.554 432 320c0-21.363 8.434-40.781 22.125-55.144l49.412 82.352c4.546 7.577 14.375 10.034 21.952 5.488l13.72-8.232c7.577-4.546 10.034-14.375 5.488-21.952l-48.556-80.927A80.005 80.005 0 0 1 512 240c45.554 0 82.338 38.273 79.884 84.352-2.16 40.558-34.974 73.372-75.533 75.532z"],"mouse-pointer":[320,512,[],"f245","M302.189 329.126H196.105l55.831 135.993c3.889 9.428-.555 19.999-9.444 23.999l-49.165 21.427c-9.165 4-19.443-.571-23.332-9.714l-53.053-129.136-86.664 89.138C18.729 472.71 0 463.554 0 447.977V18.299C0 1.899 19.921-6.096 30.277 5.443l284.412 292.542c11.472 11.179 3.007 31.141-12.5 31.141z"],music:[512,512,[],"f001","M470.4 1.5l-304 96C153.1 101.7 144 114 144 128v264.6c-14.1-5.4-30.5-8.6-48-8.6-53 0-96 28.7-96 64s43 64 96 64 96-28.7 96-64V220.5l272-85.9v194c-14.1-5.4-30.5-8.6-48-8.6-53 0-96 28.7-96 64s43 64 96 64 96-28.7 96-64V32c0-21.7-21.1-37-41.6-30.5z"],neuter:[288,512,[],"f22c","M288 176c0-79.5-64.5-144-144-144S0 96.5 0 176c0 68.5 47.9 125.9 112 140.4V468c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12V316.4c64.1-14.5 112-71.9 112-140.4zm-144 80c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],newspaper:[576,512,[],"f1ea","M552 64H88c-13.255 0-24 10.745-24 24v8H24c-13.255 0-24 10.745-24 24v272c0 30.928 25.072 56 56 56h472c26.51 0 48-21.49 48-48V88c0-13.255-10.745-24-24-24zM56 400a8 8 0 0 1-8-8V144h16v248a8 8 0 0 1-8 8zm236-16H140c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h152c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12zm208 0H348c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h152c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12zm-208-96H140c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h152c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12zm208 0H348c-6.627 0-12-5.373-12-12v-8c0-6.627 5.373-12 12-12h152c6.627 0 12 5.373 12 12v8c0 6.627-5.373 12-12 12zm0-96H140c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h360c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12z"],"object-group":[512,512,[],"f247","M480 128V96h20c6.627 0 12-5.373 12-12V44c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v20H64V44c0-6.627-5.373-12-12-12H12C5.373 32 0 37.373 0 44v40c0 6.627 5.373 12 12 12h20v320H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-20h384v20c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-20V128zM96 276V140c0-6.627 5.373-12 12-12h168c6.627 0 12 5.373 12 12v136c0 6.627-5.373 12-12 12H108c-6.627 0-12-5.373-12-12zm320 96c0 6.627-5.373 12-12 12H236c-6.627 0-12-5.373-12-12v-52h72c13.255 0 24-10.745 24-24v-72h84c6.627 0 12 5.373 12 12v136z"],"object-ungroup":[576,512,[],"f248","M64 320v26a6 6 0 0 1-6 6H6a6 6 0 0 1-6-6v-52a6 6 0 0 1 6-6h26V96H6a6 6 0 0 1-6-6V38a6 6 0 0 1 6-6h52a6 6 0 0 1 6 6v26h288V38a6 6 0 0 1 6-6h52a6 6 0 0 1 6 6v52a6 6 0 0 1-6 6h-26v192h26a6 6 0 0 1 6 6v52a6 6 0 0 1-6 6h-52a6 6 0 0 1-6-6v-26H64zm480-64v-32h26a6 6 0 0 0 6-6v-52a6 6 0 0 0-6-6h-52a6 6 0 0 0-6 6v26H408v72h8c13.255 0 24 10.745 24 24v64c0 13.255-10.745 24-24 24h-64c-13.255 0-24-10.745-24-24v-8H192v72h-26a6 6 0 0 0-6 6v52a6 6 0 0 0 6 6h52a6 6 0 0 0 6-6v-26h288v26a6 6 0 0 0 6 6h52a6 6 0 0 0 6-6v-52a6 6 0 0 0-6-6h-26V256z"],outdent:[448,512,[],"f03b","M0 84V44c0-8.837 7.163-16 16-16h416c8.837 0 16 7.163 16 16v40c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16zm208 144h224c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H208c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zM16 484h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm192-128h224c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H208c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zM4.687 267.313l96 95.984C110.734 373.348 128 366.224 128 351.984V160.008c0-14.329-17.325-21.304-27.313-11.313l-96 95.992c-6.249 6.248-6.249 16.378 0 22.626z"],"paint-brush":[512,512,[],"f1fc","M269.9 364.6c1.4 6.4 2.1 13 2.1 19.7 0 81.2-54.2 127.7-134.8 127.7C41.5 512 0 435.1 0 347.6c10.4 7.1 46.9 36.5 58.7 36.5 7 0 13-4 15.5-10.6 23.6-62.2 66.5-76.5 112.9-77.4 15.6 33.8 46.1 59.6 82.8 68.5zM460.6 0c-14.4 0-27.9 6.4-38.2 15.7C228.2 190 208 194.1 208 245.4c0 48.8 40.5 90.6 90.2 90.6 59 0 93.2-43.4 200.6-244.8 7-13.7 13.2-28.5 13.2-43.9C512 19.7 487.3 0 460.6 0z"],"paper-plane":[512,512,[],"f1d8","M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"],paperclip:[448,512,[],"f0c6","M43.246 466.142c-58.43-60.289-57.341-157.511 1.386-217.581L254.392 34c44.316-45.332 116.351-45.336 160.671 0 43.89 44.894 43.943 117.329 0 162.276L232.214 383.128c-29.855 30.537-78.633 30.111-107.982-.998-28.275-29.97-27.368-77.473 1.452-106.953l143.743-146.835c6.182-6.314 16.312-6.422 22.626-.241l22.861 22.379c6.315 6.182 6.422 16.312.241 22.626L171.427 319.927c-4.932 5.045-5.236 13.428-.648 18.292 4.372 4.634 11.245 4.711 15.688.165l182.849-186.851c19.613-20.062 19.613-52.725-.011-72.798-19.189-19.627-49.957-19.637-69.154 0L90.39 293.295c-34.763 35.56-35.299 93.12-1.191 128.313 34.01 35.093 88.985 35.137 123.058.286l172.06-175.999c6.177-6.319 16.307-6.433 22.626-.256l22.877 22.364c6.319 6.177 6.434 16.307.256 22.626l-172.06 175.998c-59.576 60.938-155.943 60.216-214.77-.485z"],paragraph:[448,512,[],"f1dd","M408 32H177.531C88.948 32 16.045 103.335 16 191.918 15.956 280.321 87.607 352 176 352v104c0 13.255 10.745 24 24 24h32c13.255 0 24-10.745 24-24V112h32v344c0 13.255 10.745 24 24 24h32c13.255 0 24-10.745 24-24V112h40c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24z"],paste:[448,512,[],"f0ea","M128 184c0-30.879 25.122-56 56-56h136V56c0-13.255-10.745-24-24-24h-80.61C204.306 12.89 183.637 0 160 0s-44.306 12.89-55.39 32H24C10.745 32 0 42.745 0 56v336c0 13.255 10.745 24 24 24h104V184zm32-144c13.255 0 24 10.745 24 24s-10.745 24-24 24-24-10.745-24-24 10.745-24 24-24zm184 248h104v200c0 13.255-10.745 24-24 24H184c-13.255 0-24-10.745-24-24V184c0-13.255 10.745-24 24-24h136v104c0 13.2 10.8 24 24 24zm104-38.059V256h-96v-96h6.059a24 24 0 0 1 16.97 7.029l65.941 65.941a24.002 24.002 0 0 1 7.03 16.971z"],pause:[448,512,[],"f04c","M144 479H48c-26.5 0-48-21.5-48-48V79c0-26.5 21.5-48 48-48h96c26.5 0 48 21.5 48 48v352c0 26.5-21.5 48-48 48zm304-48V79c0-26.5-21.5-48-48-48h-96c-26.5 0-48 21.5-48 48v352c0 26.5 21.5 48 48 48h96c26.5 0 48-21.5 48-48z"],"pause-circle":[512,512,[],"f28b","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm-16 328c0 8.8-7.2 16-16 16h-48c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h48c8.8 0 16 7.2 16 16v160zm112 0c0 8.8-7.2 16-16 16h-48c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h48c8.8 0 16 7.2 16 16v160z"],paw:[512,512,[],"f1b0","M85.231 330.958C36 330.958 0 273.792 0 231.5c0-28.292 16-58.042 49.538-58.042 49.231 0 85.231 57.458 85.231 99.75 0 28.292-15.692 57.75-49.538 57.75zm348 106.167c0 37.042-32 42.875-63.385 42.875-41.231 0-74.462-26.25-113.846-26.25-41.231 0-76.308 25.958-120.923 25.958-29.847 0-56.308-9.625-56.308-42.583C78.769 368 180.616 265.333 256 265.333s177.231 102.959 177.231 171.792zM182.462 203.792c-49.847 0-80-59.5-80-100.333C102.462 70.792 120.308 32 160 32c50.154 0 80 59.5 80 100.333 0 32.667-17.846 71.459-57.538 71.459zM272 132.333C272 91.5 301.846 32 352 32c39.692 0 57.539 38.792 57.539 71.458 0 40.833-30.154 100.333-80.001 100.333C289.846 203.792 272 165 272 132.333zM512 231.5c0 42.292-36 99.458-85.231 99.458-33.847 0-49.538-29.458-49.538-57.75 0-42.291 35.999-99.75 85.231-99.75C496 173.458 512 203.208 512 231.5z"],"pen-square":[448,512,[],"f14b","M400 480H48c-26.5 0-48-21.5-48-48V80c0-26.5 21.5-48 48-48h352c26.5 0 48 21.5 48 48v352c0 26.5-21.5 48-48 48zM238.1 177.9L102.4 313.6l-6.3 57.1c-.8 7.6 5.6 14.1 13.3 13.3l57.1-6.3L302.2 242c2.3-2.3 2.3-6.1 0-8.5L246.7 178c-2.5-2.4-6.3-2.4-8.6-.1zM345 165.1L314.9 135c-9.4-9.4-24.6-9.4-33.9 0l-23.1 23.1c-2.3 2.3-2.3 6.1 0 8.5l55.5 55.5c2.3 2.3 6.1 2.3 8.5 0L345 199c9.3-9.3 9.3-24.5 0-33.9z"],"pencil-alt":[512,512,[],"f303","M497.9 142.1l-46.1 46.1c-4.7 4.7-12.3 4.7-17 0l-111-111c-4.7-4.7-4.7-12.3 0-17l46.1-46.1c18.7-18.7 49.1-18.7 67.9 0l60.1 60.1c18.8 18.7 18.8 49.1 0 67.9zM284.2 99.8L21.6 362.4.4 483.9c-2.9 16.4 11.4 30.6 27.8 27.8l121.5-21.3 262.6-262.6c4.7-4.7 4.7-12.3 0-17l-111-111c-4.8-4.7-12.4-4.7-17.1 0zM124.1 339.9c-5.5-5.5-5.5-14.3 0-19.8l154-154c5.5-5.5 14.3-5.5 19.8 0s5.5 14.3 0 19.8l-154 154c-5.5 5.5-14.3 5.5-19.8 0zM88 424h48v36.3l-64.5 11.3-31.1-31.1L51.7 376H88v48z"],percent:[448,512,[],"f295","M112 224c61.9 0 112-50.1 112-112S173.9 0 112 0 0 50.1 0 112s50.1 112 112 112zm0-160c26.5 0 48 21.5 48 48s-21.5 48-48 48-48-21.5-48-48 21.5-48 48-48zm224 224c-61.9 0-112 50.1-112 112s50.1 112 112 112 112-50.1 112-112-50.1-112-112-112zm0 160c-26.5 0-48-21.5-48-48s21.5-48 48-48 48 21.5 48 48-21.5 48-48 48zM392.3.2l31.6-.1c19.4-.1 30.9 21.8 19.7 37.8L77.4 501.6a23.95 23.95 0 0 1-19.6 10.2l-33.4.1c-19.5 0-30.9-21.9-19.7-37.8l368-463.7C377.2 4 384.5.2 392.3.2z"],phone:[512,512,[],"f095","M493.397 24.615l-104-23.997c-11.314-2.611-22.879 3.252-27.456 13.931l-48 111.997a24 24 0 0 0 6.862 28.029l60.617 49.596c-35.973 76.675-98.938 140.508-177.249 177.248l-49.596-60.616a24 24 0 0 0-28.029-6.862l-111.997 48C3.873 366.516-1.994 378.08.618 389.397l23.997 104C27.109 504.204 36.748 512 48 512c256.087 0 464-207.532 464-464 0-11.176-7.714-20.873-18.603-23.385z"],"phone-square":[448,512,[],"f098","M400 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zM94 416c-7.033 0-13.057-4.873-14.616-11.627l-14.998-65a15 15 0 0 1 8.707-17.16l69.998-29.999a15 15 0 0 1 17.518 4.289l30.997 37.885c48.944-22.963 88.297-62.858 110.781-110.78l-37.886-30.997a15.001 15.001 0 0 1-4.289-17.518l30-69.998a15 15 0 0 1 17.16-8.707l65 14.998A14.997 14.997 0 0 1 384 126c0 160.292-129.945 290-290 290z"],"phone-volume":[384,512,[],"f2a0","M97.333 506.966c-129.874-129.874-129.681-340.252 0-469.933 5.698-5.698 14.527-6.632 21.263-2.422l64.817 40.513a17.187 17.187 0 0 1 6.849 20.958l-32.408 81.021a17.188 17.188 0 0 1-17.669 10.719l-55.81-5.58c-21.051 58.261-20.612 122.471 0 179.515l55.811-5.581a17.188 17.188 0 0 1 17.669 10.719l32.408 81.022a17.188 17.188 0 0 1-6.849 20.958l-64.817 40.513a17.19 17.19 0 0 1-21.264-2.422zM247.126 95.473c11.832 20.047 11.832 45.008 0 65.055-3.95 6.693-13.108 7.959-18.718 2.581l-5.975-5.726c-3.911-3.748-4.793-9.622-2.261-14.41a32.063 32.063 0 0 0 0-29.945c-2.533-4.788-1.65-10.662 2.261-14.41l5.975-5.726c5.61-5.378 14.768-4.112 18.718 2.581zm91.787-91.187c60.14 71.604 60.092 175.882 0 247.428-4.474 5.327-12.53 5.746-17.552.933l-5.798-5.557c-4.56-4.371-4.977-11.529-.93-16.379 49.687-59.538 49.646-145.933 0-205.422-4.047-4.85-3.631-12.008.93-16.379l5.798-5.557c5.022-4.813 13.078-4.394 17.552.933zm-45.972 44.941c36.05 46.322 36.108 111.149 0 157.546-4.39 5.641-12.697 6.251-17.856 1.304l-5.818-5.579c-4.4-4.219-4.998-11.095-1.285-15.931 26.536-34.564 26.534-82.572 0-117.134-3.713-4.836-3.115-11.711 1.285-15.931l5.818-5.579c5.159-4.947 13.466-4.337 17.856 1.304z"],plane:[576,512,[],"f072","M472 200H360.211L256.013 5.711A12 12 0 0 0 245.793 0h-57.787c-7.85 0-13.586 7.413-11.616 15.011L209.624 200H99.766l-34.904-58.174A12 12 0 0 0 54.572 136H12.004c-7.572 0-13.252 6.928-11.767 14.353l21.129 105.648L.237 361.646c-1.485 7.426 4.195 14.354 11.768 14.353l42.568-.002c4.215 0 8.121-2.212 10.289-5.826L99.766 312h109.858L176.39 496.989c-1.97 7.599 3.766 15.011 11.616 15.011h57.787a12 12 0 0 0 10.22-5.711L360.212 312H472c57.438 0 104-25.072 104-56s-46.562-56-104-56z"],play:[448,512,[],"f04b","M424.4 214.7L72.4 6.6C43.8-10.3 0 6.1 0 47.9V464c0 37.5 40.7 60.1 72.4 41.3l352-208c31.4-18.5 31.5-64.1 0-82.6z"],"play-circle":[512,512,[],"f144","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm115.7 272l-176 101c-15.8 8.8-35.7-2.5-35.7-21V152c0-18.4 19.8-29.8 35.7-21l176 107c16.4 9.2 16.4 32.9 0 42z"],plug:[384,512,[],"f1e6","M256 144V32c0-17.673 14.327-32 32-32s32 14.327 32 32v112h-64zm112 16H16c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16h16v32c0 77.406 54.969 141.971 128 156.796V512h64v-99.204c73.031-14.825 128-79.39 128-156.796v-32h16c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16zm-240-16V32c0-17.673-14.327-32-32-32S64 14.327 64 32v112h64z"],plus:[448,512,[],"f067","M448 294.2v-76.4c0-13.3-10.7-24-24-24H286.2V56c0-13.3-10.7-24-24-24h-76.4c-13.3 0-24 10.7-24 24v137.8H24c-13.3 0-24 10.7-24 24v76.4c0 13.3 10.7 24 24 24h137.8V456c0 13.3 10.7 24 24 24h76.4c13.3 0 24-10.7 24-24V318.2H424c13.3 0 24-10.7 24-24z"],"plus-circle":[512,512,[],"f055","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm144 276c0 6.6-5.4 12-12 12h-92v92c0 6.6-5.4 12-12 12h-56c-6.6 0-12-5.4-12-12v-92h-92c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h92v-92c0-6.6 5.4-12 12-12h56c6.6 0 12 5.4 12 12v92h92c6.6 0 12 5.4 12 12v56z"],"plus-square":[448,512,[],"f0fe","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-32 252c0 6.6-5.4 12-12 12h-92v92c0 6.6-5.4 12-12 12h-56c-6.6 0-12-5.4-12-12v-92H92c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h92v-92c0-6.6 5.4-12 12-12h56c6.6 0 12 5.4 12 12v92h92c6.6 0 12 5.4 12 12v56z"],podcast:[448,512,[],"f2ce","M267.429 488.563C262.286 507.573 242.858 512 224 512c-18.857 0-38.286-4.427-43.428-23.437C172.927 460.134 160 388.898 160 355.75c0-35.156 31.142-43.75 64-43.75s64 8.594 64 43.75c0 32.949-12.871 104.179-20.571 132.813zM156.867 288.554c-18.693-18.308-29.958-44.173-28.784-72.599 2.054-49.724 42.395-89.956 92.124-91.881C274.862 121.958 320 165.807 320 220c0 26.827-11.064 51.116-28.866 68.552-2.675 2.62-2.401 6.986.628 9.187 9.312 6.765 16.46 15.343 21.234 25.363 1.741 3.654 6.497 4.66 9.449 1.891 28.826-27.043 46.553-65.783 45.511-108.565-1.855-76.206-63.595-138.208-139.793-140.369C146.869 73.753 80 139.215 80 220c0 41.361 17.532 78.7 45.55 104.989 2.953 2.771 7.711 1.77 9.453-1.887 4.774-10.021 11.923-18.598 21.235-25.363 3.029-2.2 3.304-6.566.629-9.185zM224 0C100.204 0 0 100.185 0 224c0 89.992 52.602 165.647 125.739 201.408 4.333 2.118 9.267-1.544 8.535-6.31-2.382-15.512-4.342-30.946-5.406-44.339-.146-1.836-1.149-3.486-2.678-4.512-47.4-31.806-78.564-86.016-78.187-147.347.592-96.237 79.29-174.648 175.529-174.899C320.793 47.747 400 126.797 400 224c0 61.932-32.158 116.49-80.65 147.867-.999 14.037-3.069 30.588-5.624 47.23-.732 4.767 4.203 8.429 8.535 6.31C395.227 389.727 448 314.187 448 224 448 100.205 347.815 0 224 0zm0 160c-35.346 0-64 28.654-64 64s28.654 64 64 64 64-28.654 64-64-28.654-64-64-64z"],"pound-sign":[320,512,[],"f154","M308 352h-45.495c-6.627 0-12 5.373-12 12v50.848H128V288h84c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-84v-63.556c0-32.266 24.562-57.086 61.792-57.086 23.658 0 45.878 11.505 57.652 18.849 5.151 3.213 11.888 2.051 15.688-2.685l28.493-35.513c4.233-5.276 3.279-13.005-2.119-17.081C273.124 54.56 236.576 32 187.931 32 106.026 32 48 84.742 48 157.961V224H20c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h28v128H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h296c6.627 0 12-5.373 12-12V364c0-6.627-5.373-12-12-12z"],"power-off":[512,512,[],"f011","M400 54.1c63 45 104 118.6 104 201.9 0 136.8-110.8 247.7-247.5 248C120 504.3 8.2 393 8 256.4 7.9 173.1 48.9 99.3 111.8 54.2c11.7-8.3 28-4.8 35 7.7L162.6 90c5.9 10.5 3.1 23.8-6.6 31-41.5 30.8-68 79.6-68 134.9-.1 92.3 74.5 168.1 168 168.1 91.6 0 168.6-74.2 168-169.1-.3-51.8-24.7-101.8-68.1-134-9.7-7.2-12.4-20.5-6.5-30.9l15.8-28.1c7-12.4 23.2-16.1 34.8-7.8zM296 264V24c0-13.3-10.7-24-24-24h-32c-13.3 0-24 10.7-24 24v240c0 13.3 10.7 24 24 24h32c13.3 0 24-10.7 24-24z"],print:[512,512,[],"f02f","M464 192h-16V81.941a24 24 0 0 0-7.029-16.97L383.029 7.029A24 24 0 0 0 366.059 0H88C74.745 0 64 10.745 64 24v168H48c-26.51 0-48 21.49-48 48v132c0 6.627 5.373 12 12 12h52v104c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24V384h52c6.627 0 12-5.373 12-12V240c0-26.51-21.49-48-48-48zm-80 256H128v-96h256v96zM128 224V64h192v40c0 13.2 10.8 24 24 24h40v96H128zm304 72c-13.254 0-24-10.746-24-24s10.746-24 24-24 24 10.746 24 24-10.746 24-24 24z"],"puzzle-piece":[576,512,[],"f12e","M519.442 288.651c-41.519 0-59.5 31.593-82.058 31.593C377.409 320.244 432 144 432 144s-196.288 80-196.288-3.297c0-35.827 36.288-46.25 36.288-85.985C272 19.216 243.885 0 210.539 0c-34.654 0-66.366 18.891-66.366 56.346 0 41.364 31.711 59.277 31.711 81.75C175.885 207.719 0 166.758 0 166.758v333.237s178.635 41.047 178.635-28.662c0-22.473-40-40.107-40-81.471 0-37.456 29.25-56.346 63.577-56.346 33.673 0 61.788 19.216 61.788 54.717 0 39.735-36.288 50.158-36.288 85.985 0 60.803 129.675 25.73 181.23 25.73 0 0-34.725-120.101 25.827-120.101 35.962 0 46.423 36.152 86.308 36.152C556.712 416 576 387.99 576 354.443c0-34.199-18.962-65.792-56.558-65.792z"],qrcode:[448,512,[],"f029","M0 224h192V32H0v192zM64 96h64v64H64V96zm192-64v192h192V32H256zm128 128h-64V96h64v64zM0 480h192V288H0v192zm64-128h64v64H64v-64zm352-64h32v128h-96v-32h-32v96h-64V288h96v32h64v-32zm0 160h32v32h-32v-32zm-64 0h32v32h-32v-32z"],question:[384,512,[],"f128","M202.021 0C122.202 0 70.503 32.703 29.914 91.026c-7.363 10.58-5.093 25.086 5.178 32.874l43.138 32.709c10.373 7.865 25.132 6.026 33.253-4.148 25.049-31.381 43.63-49.449 82.757-49.449 30.764 0 68.816 19.799 68.816 49.631 0 22.552-18.617 34.134-48.993 51.164-35.423 19.86-82.299 44.576-82.299 106.405V320c0 13.255 10.745 24 24 24h72.471c13.255 0 24-10.745 24-24v-5.773c0-42.86 125.268-44.645 125.268-160.627C377.504 66.256 286.902 0 202.021 0zM192 373.459c-38.196 0-69.271 31.075-69.271 69.271 0 38.195 31.075 69.27 69.271 69.27s69.271-31.075 69.271-69.271-31.075-69.27-69.271-69.27z"],"question-circle":[512,512,[],"f059","M504 256c0 136.997-111.043 248-248 248S8 392.997 8 256C8 119.083 119.043 8 256 8s248 111.083 248 248zM262.655 90c-54.497 0-89.255 22.957-116.549 63.758-3.536 5.286-2.353 12.415 2.715 16.258l34.699 26.31c5.205 3.947 12.621 3.008 16.665-2.122 17.864-22.658 30.113-35.797 57.303-35.797 20.429 0 45.698 13.148 45.698 32.958 0 14.976-12.363 22.667-32.534 33.976C247.128 238.528 216 254.941 216 296v4c0 6.627 5.373 12 12 12h56c6.627 0 12-5.373 12-12v-1.333c0-28.462 83.186-29.647 83.186-106.667 0-58.002-60.165-102-116.531-102zM256 338c-25.365 0-46 20.635-46 46 0 25.364 20.635 46 46 46s46-20.636 46-46c0-25.365-20.635-46-46-46z"],"quote-left":[512,512,[],"f10d","M0 432V304C0 166.982 63.772 67.676 193.827 32.828 209.052 28.748 224 40.265 224 56.027v33.895c0 10.057-6.228 19.133-15.687 22.55C142.316 136.312 104 181.946 104 256h72c26.51 0 48 21.49 48 48v128c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48zm336 48h128c26.51 0 48-21.49 48-48V304c0-26.51-21.49-48-48-48h-72c0-74.054 38.316-119.688 104.313-143.528C505.772 109.055 512 99.979 512 89.922V56.027c0-15.762-14.948-27.279-30.173-23.199C351.772 67.676 288 166.982 288 304v128c0 26.51 21.49 48 48 48z"],"quote-right":[512,512,[],"f10e","M512 80v128c0 137.018-63.772 236.324-193.827 271.172-15.225 4.08-30.173-7.437-30.173-23.199v-33.895c0-10.057 6.228-19.133 15.687-22.55C369.684 375.688 408 330.054 408 256h-72c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h128c26.51 0 48 21.49 48 48zM176 32H48C21.49 32 0 53.49 0 80v128c0 26.51 21.49 48 48 48h72c0 74.054-38.316 119.688-104.313 143.528C6.228 402.945 0 412.021 0 422.078v33.895c0 15.762 14.948 27.279 30.173 23.199C160.228 444.324 224 345.018 224 208V80c0-26.51-21.49-48-48-48z"],random:[512,512,[],"f074","M504.971 359.029c9.373 9.373 9.373 24.569 0 33.941l-80 79.984c-15.01 15.01-40.971 4.49-40.971-16.971V416h-58.785a12.004 12.004 0 0 1-8.773-3.812l-70.556-75.596 53.333-57.143L352 336h32v-39.981c0-21.438 25.943-31.998 40.971-16.971l80 79.981zM12 176h84l52.781 56.551 53.333-57.143-70.556-75.596A11.999 11.999 0 0 0 122.785 96H12c-6.627 0-12 5.373-12 12v56c0 6.627 5.373 12 12 12zm372 0v39.984c0 21.46 25.961 31.98 40.971 16.971l80-79.984c9.373-9.373 9.373-24.569 0-33.941l-80-79.981C409.943 24.021 384 34.582 384 56.019V96h-58.785a12.004 12.004 0 0 0-8.773 3.812L96 336H12c-6.627 0-12 5.373-12 12v56c0 6.627 5.373 12 12 12h110.785c3.326 0 6.503-1.381 8.773-3.812L352 176h32z"],recycle:[512,512,[],"f1b8","M184.561 261.903c3.232 13.997-12.123 24.635-24.068 17.168l-40.736-25.455-50.867 81.402C55.606 356.273 70.96 384 96.012 384H148c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12H96.115c-75.334 0-121.302-83.048-81.408-146.88l50.822-81.388-40.725-25.448c-12.081-7.547-8.966-25.961 4.879-29.158l110.237-25.45c8.611-1.988 17.201 3.381 19.189 11.99l25.452 110.237zm98.561-182.915l41.289 66.076-40.74 25.457c-12.051 7.528-9 25.953 4.879 29.158l110.237 25.45c8.672 1.999 17.215-3.438 19.189-11.99l25.45-110.237c3.197-13.844-11.99-24.719-24.068-17.168l-40.687 25.424-41.263-66.082c-37.521-60.033-125.209-60.171-162.816 0l-17.963 28.766c-3.51 5.62-1.8 13.021 3.82 16.533l33.919 21.195c5.62 3.512 13.024 1.803 16.536-3.817l17.961-28.743c12.712-20.341 41.973-19.676 54.257-.022zM497.288 301.12l-27.515-44.065c-3.511-5.623-10.916-7.334-16.538-3.821l-33.861 21.159c-5.62 3.512-7.33 10.915-3.818 16.536l27.564 44.112c13.257 21.211-2.057 48.96-27.136 48.96H320V336.02c0-14.213-17.242-21.383-27.313-11.313l-80 79.981c-6.249 6.248-6.249 16.379 0 22.627l80 79.989C302.689 517.308 320 510.3 320 495.989V448h95.88c75.274 0 121.335-82.997 81.408-146.88z"],redo:[512,512,[],"f01e","M500.333 0h-47.411c-6.853 0-12.314 5.729-11.986 12.574l3.966 82.759C399.416 41.899 331.672 8 256.001 8 119.34 8 7.899 119.526 8 256.187 8.101 393.068 119.096 504 256 504c63.926 0 122.202-24.187 166.178-63.908 5.113-4.618 5.354-12.561.482-17.433l-33.971-33.971c-4.466-4.466-11.64-4.717-16.38-.543C341.308 415.448 300.606 432 256 432c-97.267 0-176-78.716-176-176 0-97.267 78.716-176 176-176 60.892 0 114.506 30.858 146.099 77.8l-101.525-4.865c-6.845-.328-12.574 5.133-12.574 11.986v47.411c0 6.627 5.373 12 12 12h200.333c6.627 0 12-5.373 12-12V12c0-6.627-5.373-12-12-12z"],"redo-alt":[512,512,[],"f2f9","M256.455 8c66.269.119 126.437 26.233 170.859 68.685l35.715-35.715C478.149 25.851 504 36.559 504 57.941V192c0 13.255-10.745 24-24 24H345.941c-21.382 0-32.09-25.851-16.971-40.971l41.75-41.75c-30.864-28.899-70.801-44.907-113.23-45.273-92.398-.798-170.283 73.977-169.484 169.442C88.764 348.009 162.184 424 256 424c41.127 0 79.997-14.678 110.629-41.556 4.743-4.161 11.906-3.908 16.368.553l39.662 39.662c4.872 4.872 4.631 12.815-.482 17.433C378.202 479.813 319.926 504 256 504 119.034 504 8.001 392.967 8 256.002 7.999 119.193 119.646 7.755 256.455 8z"],registered:[512,512,[],"f25d","M285.363 207.475c0 18.6-9.831 28.431-28.431 28.431h-29.876v-56.14h23.378c28.668 0 34.929 8.773 34.929 27.709zM504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zM363.411 360.414c-46.729-84.825-43.299-78.636-44.702-80.98 23.432-15.172 37.945-42.979 37.945-74.486 0-54.244-31.5-89.252-105.498-89.252h-70.667c-13.255 0-24 10.745-24 24V372c0 13.255 10.745 24 24 24h22.567c13.255 0 24-10.745 24-24v-71.663h25.556l44.129 82.937a24.001 24.001 0 0 0 21.188 12.727h24.464c18.261-.001 29.829-19.591 21.018-35.587z"],reply:[512,512,[],"f3e5","M8.309 189.836L184.313 37.851C199.719 24.546 224 35.347 224 56.015v80.053c160.629 1.839 288 34.032 288 186.258 0 61.441-39.581 122.309-83.333 154.132-13.653 9.931-33.111-2.533-28.077-18.631 45.344-145.012-21.507-183.51-176.59-185.742V360c0 20.7-24.3 31.453-39.687 18.164l-176.004-152c-11.071-9.562-11.086-26.753 0-36.328z"],"reply-all":[576,512,[],"f122","M136.309 189.836L312.313 37.851C327.72 24.546 352 35.348 352 56.015v82.763c129.182 10.231 224 52.212 224 183.548 0 61.441-39.582 122.309-83.333 154.132-13.653 9.931-33.111-2.533-28.077-18.631 38.512-123.162-3.922-169.482-112.59-182.015v84.175c0 20.701-24.3 31.453-39.687 18.164L136.309 226.164c-11.071-9.561-11.086-26.753 0-36.328zm-128 36.328L184.313 378.15C199.7 391.439 224 380.687 224 359.986v-15.818l-108.606-93.785A55.96 55.96 0 0 1 96 207.998a55.953 55.953 0 0 1 19.393-42.38L224 71.832V56.015c0-20.667-24.28-31.469-39.687-18.164L8.309 189.836c-11.086 9.575-11.071 26.767 0 36.328z"],retweet:[640,512,[],"f079","M629.657 343.598L528.971 444.284c-9.373 9.372-24.568 9.372-33.941 0L394.343 343.598c-9.373-9.373-9.373-24.569 0-33.941l10.823-10.823c9.562-9.562 25.133-9.34 34.419.492L480 342.118V160H292.451a24.005 24.005 0 0 1-16.971-7.029l-16-16C244.361 121.851 255.069 96 276.451 96H520c13.255 0 24 10.745 24 24v222.118l40.416-42.792c9.285-9.831 24.856-10.054 34.419-.492l10.823 10.823c9.372 9.372 9.372 24.569-.001 33.941zm-265.138 15.431A23.999 23.999 0 0 0 347.548 352H160V169.881l40.416 42.792c9.286 9.831 24.856 10.054 34.419.491l10.822-10.822c9.373-9.373 9.373-24.569 0-33.941L144.971 67.716c-9.373-9.373-24.569-9.373-33.941 0L10.343 168.402c-9.373 9.373-9.373 24.569 0 33.941l10.822 10.822c9.562 9.562 25.133 9.34 34.419-.491L96 169.881V392c0 13.255 10.745 24 24 24h243.549c21.382 0 32.09-25.851 16.971-40.971l-16.001-16z"],road:[576,512,[],"f018","M567.3 383.6L429.9 78.2C426 69.5 417.4 64 408 64h-96.1l1.9 18.8c.7 7.1-4.8 13.2-11.9 13.2H274c-7.1 0-12.7-6.2-11.9-13.2L264 64h-96c-9.4 0-18 5.5-21.9 14.2L8.7 383.6C3.2 395.8 0 409.6 0 424c0 13.3 10.7 24 24 24h213.6c-7.1 0-12.7-6.2-11.9-13.2l10.8-104c.6-6.1 5.8-10.8 11.9-10.8h79.2c6.1 0 11.3 4.6 11.9 10.8l10.8 104c.7 7.1-4.8 13.2-11.9 13.2H552c13.2 0 24-10.7 24-24 0-13.9-3-27.7-8.7-40.4zM254.7 154.8l3.3-32c.6-6.1 5.8-10.8 11.9-10.8h36.2c6.1 0 11.3 4.6 11.9 10.8l3.3 32c.7 7.1-4.8 13.2-11.9 13.2h-42.8c-7.1 0-12.7-6.2-11.9-13.2zM321.8 288h-67.6c-7.1 0-12.7-6.2-11.9-13.2l7.4-72c.6-6.1 5.8-10.8 11.9-10.8h52.7c6.1 0 11.3 4.6 11.9 10.8l7.4 72c.9 7-4.7 13.2-11.8 13.2z"],rocket:[512,512,[],"f135","M505.1 19.1C503.8 13 499 8.2 492.9 6.9 460.7 0 435.5 0 410.4 0 307.2 0 245.3 55.2 199.1 128H94.9c-18.2 0-34.8 10.3-42.9 26.5L2.6 253.3c-8 16 3.6 34.7 21.5 34.7h95.1c-5.9 12.8-11.9 25.5-18 37.7-3.1 6.2-1.9 13.6 3 18.5l63.6 63.6c4.9 4.9 12.3 6.1 18.5 3 12.2-6.1 24.9-12 37.7-17.9V488c0 17.8 18.8 29.4 34.7 21.5l98.7-49.4c16.3-8.1 26.5-24.8 26.5-42.9V312.8c72.6-46.3 128-108.4 128-211.1.1-25.2.1-50.4-6.8-82.6zM400 160c-26.5 0-48-21.5-48-48s21.5-48 48-48 48 21.5 48 48-21.5 48-48 48z"],rss:[448,512,[],"f09e","M128.081 415.959c0 35.369-28.672 64.041-64.041 64.041S0 451.328 0 415.959s28.672-64.041 64.041-64.041 64.04 28.673 64.04 64.041zm175.66 47.25c-8.354-154.6-132.185-278.587-286.95-286.95C7.656 175.765 0 183.105 0 192.253v48.069c0 8.415 6.49 15.472 14.887 16.018 111.832 7.284 201.473 96.702 208.772 208.772.547 8.397 7.604 14.887 16.018 14.887h48.069c9.149.001 16.489-7.655 15.995-16.79zm144.249.288C439.596 229.677 251.465 40.445 16.503 32.01 7.473 31.686 0 38.981 0 48.016v48.068c0 8.625 6.835 15.645 15.453 15.999 191.179 7.839 344.627 161.316 352.465 352.465.353 8.618 7.373 15.453 15.999 15.453h48.068c9.034-.001 16.329-7.474 16.005-16.504z"],"rss-square":[448,512,[],"f143","M400 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zM112 416c-26.51 0-48-21.49-48-48s21.49-48 48-48 48 21.49 48 48-21.49 48-48 48zm157.533 0h-34.335c-6.011 0-11.051-4.636-11.442-10.634-5.214-80.05-69.243-143.92-149.123-149.123-5.997-.39-10.633-5.431-10.633-11.441v-34.335c0-6.535 5.468-11.777 11.994-11.425 110.546 5.974 198.997 94.536 204.964 204.964.352 6.526-4.89 11.994-11.425 11.994zm103.027 0h-34.334c-6.161 0-11.175-4.882-11.427-11.038-5.598-136.535-115.204-246.161-251.76-251.76C68.882 152.949 64 147.935 64 141.774V107.44c0-6.454 5.338-11.664 11.787-11.432 167.83 6.025 302.21 141.191 308.205 308.205.232 6.449-4.978 11.787-11.432 11.787z"],"ruble-sign":[384,512,[],"f158","M239.36 320C324.48 320 384 260.542 384 175.071S324.48 32 239.36 32H76c-6.627 0-12 5.373-12 12v206.632H12c-6.627 0-12 5.373-12 12V308c0 6.627 5.373 12 12 12h52v32H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h52v52c0 6.627 5.373 12 12 12h58.56c6.627 0 12-5.373 12-12v-52H308c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12H146.56v-32h92.8zm-92.8-219.252h78.72c46.72 0 74.88 29.11 74.88 74.323 0 45.832-28.16 75.561-76.16 75.561h-77.44V100.748z"],"rupee-sign":[320,512,[],"f156","M308 96c6.627 0 12-5.373 12-12V44c0-6.627-5.373-12-12-12H12C5.373 32 0 37.373 0 44v44.748c0 6.627 5.373 12 12 12h85.28c27.308 0 48.261 9.958 60.97 27.252H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h158.757c-6.217 36.086-32.961 58.632-74.757 58.632H12c-6.627 0-12 5.373-12 12v53.012c0 3.349 1.4 6.546 3.861 8.818l165.052 152.356a12.001 12.001 0 0 0 8.139 3.182h82.562c10.924 0 16.166-13.408 8.139-20.818L116.871 319.906c76.499-2.34 131.144-53.395 138.318-127.906H308c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-58.69c-3.486-11.541-8.28-22.246-14.252-32H308z"],save:[448,512,[],"f0c7","M433.941 129.941l-83.882-83.882A48 48 0 0 0 316.118 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V163.882a48 48 0 0 0-14.059-33.941zM224 416c-35.346 0-64-28.654-64-64 0-35.346 28.654-64 64-64s64 28.654 64 64c0 35.346-28.654 64-64 64zm96-304.52V212c0 6.627-5.373 12-12 12H76c-6.627 0-12-5.373-12-12V108c0-6.627 5.373-12 12-12h228.52c3.183 0 6.235 1.264 8.485 3.515l3.48 3.48A11.996 11.996 0 0 1 320 111.48z"],search:[512,512,[],"f002","M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"],"search-minus":[512,512,[],"f010","M304 192v32c0 6.6-5.4 12-12 12H124c-6.6 0-12-5.4-12-12v-32c0-6.6 5.4-12 12-12h168c6.6 0 12 5.4 12 12zm201 284.7L476.7 505c-9.4 9.4-24.6 9.4-33.9 0L343 405.3c-4.5-4.5-7-10.6-7-17V372c-35.3 27.6-79.7 44-128 44C93.1 416 0 322.9 0 208S93.1 0 208 0s208 93.1 208 208c0 48.3-16.4 92.7-44 128h16.3c6.4 0 12.5 2.5 17 7l99.7 99.7c9.3 9.4 9.3 24.6 0 34zM344 208c0-75.2-60.8-136-136-136S72 132.8 72 208s60.8 136 136 136 136-60.8 136-136z"],"search-plus":[512,512,[],"f00e","M304 192v32c0 6.6-5.4 12-12 12h-56v56c0 6.6-5.4 12-12 12h-32c-6.6 0-12-5.4-12-12v-56h-56c-6.6 0-12-5.4-12-12v-32c0-6.6 5.4-12 12-12h56v-56c0-6.6 5.4-12 12-12h32c6.6 0 12 5.4 12 12v56h56c6.6 0 12 5.4 12 12zm201 284.7L476.7 505c-9.4 9.4-24.6 9.4-33.9 0L343 405.3c-4.5-4.5-7-10.6-7-17V372c-35.3 27.6-79.7 44-128 44C93.1 416 0 322.9 0 208S93.1 0 208 0s208 93.1 208 208c0 48.3-16.4 92.7-44 128h16.3c6.4 0 12.5 2.5 17 7l99.7 99.7c9.3 9.4 9.3 24.6 0 34zM344 208c0-75.2-60.8-136-136-136S72 132.8 72 208s60.8 136 136 136 136-60.8 136-136z"],server:[512,512,[],"f233","M480 160H32c-17.673 0-32-14.327-32-32V64c0-17.673 14.327-32 32-32h448c17.673 0 32 14.327 32 32v64c0 17.673-14.327 32-32 32zm-48-88c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24zm-64 0c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24zm112 248H32c-17.673 0-32-14.327-32-32v-64c0-17.673 14.327-32 32-32h448c17.673 0 32 14.327 32 32v64c0 17.673-14.327 32-32 32zm-48-88c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24zm-64 0c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24zm112 248H32c-17.673 0-32-14.327-32-32v-64c0-17.673 14.327-32 32-32h448c17.673 0 32 14.327 32 32v64c0 17.673-14.327 32-32 32zm-48-88c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24zm-64 0c-13.255 0-24 10.745-24 24s10.745 24 24 24 24-10.745 24-24-10.745-24-24-24z"],share:[512,512,[],"f064","M503.691 189.836L327.687 37.851C312.281 24.546 288 35.347 288 56.015v80.053C127.371 137.907 0 170.1 0 322.326c0 61.441 39.581 122.309 83.333 154.132 13.653 9.931 33.111-2.533 28.077-18.631C66.066 312.814 132.917 274.316 288 272.085V360c0 20.7 24.3 31.453 39.687 18.164l176.004-152c11.071-9.562 11.086-26.753 0-36.328z"],"share-alt":[448,512,[],"f1e0","M352 320c-22.608 0-43.387 7.819-59.79 20.895l-102.486-64.054a96.551 96.551 0 0 0 0-41.683l102.486-64.054C308.613 184.181 329.392 192 352 192c53.019 0 96-42.981 96-96S405.019 0 352 0s-96 42.981-96 96c0 7.158.79 14.13 2.276 20.841L155.79 180.895C139.387 167.819 118.608 160 96 160c-53.019 0-96 42.981-96 96s42.981 96 96 96c22.608 0 43.387-7.819 59.79-20.895l102.486 64.054A96.301 96.301 0 0 0 256 416c0 53.019 42.981 96 96 96s96-42.981 96-96-42.981-96-96-96z"],"share-alt-square":[448,512,[],"f1e1","M448 80v352c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48V80c0-26.51 21.49-48 48-48h352c26.51 0 48 21.49 48 48zM304 296c-14.562 0-27.823 5.561-37.783 14.671l-67.958-40.775a56.339 56.339 0 0 0 0-27.793l67.958-40.775C276.177 210.439 289.438 216 304 216c30.928 0 56-25.072 56-56s-25.072-56-56-56-56 25.072-56 56c0 4.797.605 9.453 1.74 13.897l-67.958 40.775C171.823 205.561 158.562 200 144 200c-30.928 0-56 25.072-56 56s25.072 56 56 56c14.562 0 27.823-5.561 37.783-14.671l67.958 40.775a56.088 56.088 0 0 0-1.74 13.897c0 30.928 25.072 56 56 56s56-25.072 56-56C360 321.072 334.928 296 304 296z"],"share-square":[576,512,[],"f14d","M568.482 177.448L424.479 313.433C409.3 327.768 384 317.14 384 295.985v-71.963c-144.575.97-205.566 35.113-164.775 171.353 4.483 14.973-12.846 26.567-25.006 17.33C155.252 383.105 120 326.488 120 269.339c0-143.937 117.599-172.5 264-173.312V24.012c0-21.174 25.317-31.768 40.479-17.448l144.003 135.988c10.02 9.463 10.028 25.425 0 34.896zM384 379.128V448H64V128h50.916a11.99 11.99 0 0 0 8.648-3.693c14.953-15.568 32.237-27.89 51.014-37.676C185.708 80.83 181.584 64 169.033 64H48C21.49 64 0 85.49 0 112v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48v-88.806c0-8.288-8.197-14.066-16.011-11.302a71.83 71.83 0 0 1-34.189 3.377c-7.27-1.046-13.8 4.514-13.8 11.859z"],"shekel-sign":[448,512,[],"f20b","M170.12 96H80v372c0 6.627-5.373 12-12 12H12c-6.627 0-12-5.373-12-12V44c0-6.627 5.373-12 12-12h168.36C265.48 32 325 89.6 325 175.071V359c0 6.627-5.373 12-12 12h-44c-13.255 0-24-10.745-24-24V170.323C245 125.11 216.839 96 170.12 96zM436 32h-56c-6.627 0-12 5.373-12 12v372h-90.12c-46.72 0-74.88-29.11-74.88-74.323V165c0-13.255-10.745-24-24-24h-44c-6.627 0-12 5.373-12 12v183.929C123 422.4 182.52 480 267.64 480H436c6.627 0 12-5.373 12-12V44c0-6.627-5.373-12-12-12z"],"shield-alt":[512,512,[],"f3ed","M496 128c0 221.282-135.934 344.645-221.539 380.308a48 48 0 0 1-36.923 0C130.495 463.713 16 326.487 16 128a48 48 0 0 1 29.539-44.308l192-80a48 48 0 0 1 36.923 0l192 80A48 48 0 0 1 496 128zM256 446.313l.066.034c93.735-46.689 172.497-156.308 175.817-307.729L256 65.333v380.98z"],ship:[640,512,[],"f21a","M496.616 372.639l70.012-70.012c16.899-16.9 9.942-45.771-12.836-53.092L512 236.102V96c0-17.673-14.327-32-32-32h-64V24c0-13.255-10.745-24-24-24H248c-13.255 0-24 10.745-24 24v40h-64c-17.673 0-32 14.327-32 32v140.102l-41.792 13.433c-22.753 7.313-29.754 36.173-12.836 53.092l70.012 70.012C125.828 416.287 85.587 448 24 448c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24 61.023 0 107.499-20.61 143.258-59.396C181.677 487.432 216.021 512 256 512h128c39.979 0 74.323-24.568 88.742-59.396C508.495 491.384 554.968 512 616 512c13.255 0 24-10.745 24-24v-16c0-13.255-10.745-24-24-24-60.817 0-101.542-31.001-119.384-75.361zM192 128h256v87.531l-118.208-37.995a31.995 31.995 0 0 0-19.584 0L192 215.531V128z"],"shopping-bag":[448,512,[],"f290","M352 160v-32C352 57.42 294.579 0 224 0 153.42 0 96 57.42 96 128v32H0v272c0 44.183 35.817 80 80 80h288c44.183 0 80-35.817 80-80V160h-96zm-192-32c0-35.29 28.71-64 64-64s64 28.71 64 64v32H160v-32zm160 120c-13.255 0-24-10.745-24-24s10.745-24 24-24 24 10.745 24 24-10.745 24-24 24zm-192 0c-13.255 0-24-10.745-24-24s10.745-24 24-24 24 10.745 24 24-10.745 24-24 24z"],"shopping-basket":[576,512,[],"f291","M576 216v16c0 13.255-10.745 24-24 24h-8l-26.113 182.788C514.509 462.435 494.257 480 470.37 480H105.63c-23.887 0-44.139-17.565-47.518-41.212L32 256h-8c-13.255 0-24-10.745-24-24v-16c0-13.255 10.745-24 24-24h67.341l106.78-146.821c10.395-14.292 30.407-17.453 44.701-7.058 14.293 10.395 17.453 30.408 7.058 44.701L170.477 192h235.046L326.12 82.821c-10.395-14.292-7.234-34.306 7.059-44.701 14.291-10.395 34.306-7.235 44.701 7.058L484.659 192H552c13.255 0 24 10.745 24 24zM312 392V280c0-13.255-10.745-24-24-24s-24 10.745-24 24v112c0 13.255 10.745 24 24 24s24-10.745 24-24zm112 0V280c0-13.255-10.745-24-24-24s-24 10.745-24 24v112c0 13.255 10.745 24 24 24s24-10.745 24-24zm-224 0V280c0-13.255-10.745-24-24-24s-24 10.745-24 24v112c0 13.255 10.745 24 24 24s24-10.745 24-24z"],"shopping-cart":[576,512,[],"f07a","M528.12 301.319l47.273-208C578.806 78.301 567.391 64 551.99 64H159.208l-9.166-44.81C147.758 8.021 137.93 0 126.529 0H24C10.745 0 0 10.745 0 24v16c0 13.255 10.745 24 24 24h69.883l70.248 343.435C147.325 417.1 136 435.222 136 456c0 30.928 25.072 56 56 56s56-25.072 56-56c0-15.674-6.447-29.835-16.824-40h209.647C430.447 426.165 424 440.326 424 456c0 30.928 25.072 56 56 56s56-25.072 56-56c0-22.172-12.888-41.332-31.579-50.405l5.517-24.276c3.413-15.018-8.002-29.319-23.403-29.319H218.117l-6.545-32h293.145c11.206 0 20.92-7.754 23.403-18.681z"],shower:[512,512,[],"f2cc","M389.66 135.6L231.6 293.66c-9.37 9.37-24.57 9.37-33.94 0l-11.32-11.32c-9.37-9.37-9.37-24.57 0-33.94l.11-.11c-34.03-40.21-35.16-98.94-3.39-140.38-11.97-7.55-26.14-11.91-41.3-11.91C98.88 96 64 130.88 64 173.76V480H0V173.76C0 95.59 63.59 32 141.76 32c36.93 0 70.61 14.2 95.86 37.42 35.9-11.51 76.5-4.5 106.67 21.03l.11-.11c9.37-9.37 24.57-9.37 33.94 0l11.32 11.32c9.37 9.37 9.37 24.57 0 33.94zM384 208c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm32 0c0-8.837 7.163-16 16-16s16 7.163 16 16-7.163 16-16 16-16-7.163-16-16zm96 0c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-160 32c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm48-16c8.837 0 16 7.163 16 16s-7.163 16-16 16-16-7.163-16-16 7.163-16 16-16zm80 16c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-160 32c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm32 0c0-8.837 7.163-16 16-16s16 7.163 16 16-7.163 16-16 16-16-7.163-16-16zm96 0c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-128 32c0-8.837 7.163-16 16-16s16 7.163 16 16-7.163 16-16 16-16-7.163-16-16zm96 0c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-96 32c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm64 0c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-32 32c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16zm-32 32c0 8.837-7.163 16-16 16s-16-7.163-16-16 7.163-16 16-16 16 7.163 16 16z"],"sign-in-alt":[512,512,[],"f2f6","M416 448h-84c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h84c17.7 0 32-14.3 32-32V160c0-17.7-14.3-32-32-32h-84c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h84c53 0 96 43 96 96v192c0 53-43 96-96 96zm-47-201L201 79c-15-15-41-4.5-41 17v96H24c-13.3 0-24 10.7-24 24v96c0 13.3 10.7 24 24 24h136v96c0 21.5 26 32 41 17l168-168c9.3-9.4 9.3-24.6 0-34z"],"sign-language":[448,512,[],"f2a7","M91.434 483.987c-.307-16.018 13.109-29.129 29.13-29.129h62.293v-5.714H56.993c-16.021 0-29.437-13.111-29.13-29.129C28.16 404.491 40.835 392 56.428 392h126.429v-5.714H29.136c-16.021 0-29.437-13.111-29.13-29.129.297-15.522 12.973-28.013 28.566-28.013h154.286v-5.714H57.707c-16.021 0-29.437-13.111-29.13-29.129.297-15.522 12.973-28.013 28.566-28.013h168.566l-31.085-22.606c-12.762-9.281-15.583-27.149-6.302-39.912 9.281-12.761 27.15-15.582 39.912-6.302l123.361 89.715a34.287 34.287 0 0 1 14.12 27.728v141.136c0 15.91-10.946 29.73-26.433 33.374l-80.471 18.934a137.16 137.16 0 0 1-31.411 3.646H120c-15.593-.001-28.269-12.492-28.566-28.014zm73.249-225.701h36.423l-11.187-8.136c-18.579-13.511-20.313-40.887-3.17-56.536l-13.004-16.7c-9.843-12.641-28.43-15.171-40.88-5.088-12.065 9.771-14.133 27.447-4.553 39.75l36.371 46.71zm283.298-2.103l-5.003-152.452c-.518-15.771-13.722-28.136-29.493-27.619-15.773.518-28.137 13.722-27.619 29.493l1.262 38.415L283.565 11.019c-9.58-12.303-27.223-14.63-39.653-5.328-12.827 9.599-14.929 28.24-5.086 40.881l76.889 98.745-4.509 3.511-94.79-121.734c-9.58-12.303-27.223-14.63-39.653-5.328-12.827 9.599-14.929 28.24-5.086 40.881l94.443 121.288-4.509 3.511-77.675-99.754c-9.58-12.303-27.223-14.63-39.653-5.328-12.827 9.599-14.929 28.24-5.086 40.881l52.053 66.849c12.497-8.257 29.055-8.285 41.69.904l123.36 89.714c10.904 7.93 17.415 20.715 17.415 34.198v16.999l61.064-47.549a34.285 34.285 0 0 0 13.202-28.177z"],"sign-out-alt":[512,512,[],"f2f5","M497 273L329 441c-15 15-41 4.5-41-17v-96H152c-13.3 0-24-10.7-24-24v-96c0-13.3 10.7-24 24-24h136V88c0-21.4 25.9-32 41-17l168 168c9.3 9.4 9.3 24.6 0 34zM192 436v-40c0-6.6-5.4-12-12-12H96c-17.7 0-32-14.3-32-32V160c0-17.7 14.3-32 32-32h84c6.6 0 12-5.4 12-12V76c0-6.6-5.4-12-12-12H96c-53 0-96 43-96 96v192c0 53 43 96 96 96h84c6.6 0 12-5.4 12-12z"],signal:[640,512,[],"f012","M36 384h56c6.6 0 12 5.4 12 12v104c0 6.6-5.4 12-12 12H36c-6.6 0-12-5.4-12-12V396c0-6.6 5.4-12 12-12zm116-36v152c0 6.6 5.4 12 12 12h56c6.6 0 12-5.4 12-12V348c0-6.6-5.4-12-12-12h-56c-6.6 0-12 5.4-12 12zm128-80v232c0 6.6 5.4 12 12 12h56c6.6 0 12-5.4 12-12V268c0-6.6-5.4-12-12-12h-56c-6.6 0-12 5.4-12 12zm128-112v344c0 6.6 5.4 12 12 12h56c6.6 0 12-5.4 12-12V156c0-6.6-5.4-12-12-12h-56c-6.6 0-12 5.4-12 12zM536 12v488c0 6.6 5.4 12 12 12h56c6.6 0 12-5.4 12-12V12c0-6.6-5.4-12-12-12h-56c-6.6 0-12 5.4-12 12z"],sitemap:[640,512,[],"f0e8","M616 320h-48v-48c0-22.056-17.944-40-40-40H344v-40h48c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24H248c-13.255 0-24 10.745-24 24v144c0 13.255 10.745 24 24 24h48v40H112c-22.056 0-40 17.944-40 40v48H24c-13.255 0-24 10.745-24 24v144c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24V344c0-13.255-10.745-24-24-24h-48v-40h176v40h-48c-13.255 0-24 10.745-24 24v144c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24V344c0-13.255-10.745-24-24-24h-48v-40h176v40h-48c-13.255 0-24 10.745-24 24v144c0 13.255 10.745 24 24 24h144c13.255 0 24-10.745 24-24V344c0-13.255-10.745-24-24-24z"],"sliders-h":[576,512,[],"f1de","M576 80v40c0 6.6-5.4 12-12 12H160v8c0 13.3-10.7 24-24 24h-16c-13.3 0-24-10.7-24-24v-8H12c-6.6 0-12-5.4-12-12V80c0-6.6 5.4-12 12-12h84v-8c0-13.3 10.7-24 24-24h16c13.3 0 24 10.7 24 24v8h404c6.6 0 12 5.4 12 12zm-12 148h-84v-8c0-13.3-10.7-24-24-24h-16c-13.3 0-24 10.7-24 24v8H12c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h404v8c0 13.3 10.7 24 24 24h16c13.3 0 24-10.7 24-24v-8h84c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12zm0 160H288v-8c0-13.3-10.7-24-24-24h-16c-13.3 0-24 10.7-24 24v8H12c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h212v8c0 13.3 10.7 24 24 24h16c13.3 0 24-10.7 24-24v-8h276c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12z"],smile:[512,512,[],"f118","M504 256c0 136.967-111.033 248-248 248S8 392.967 8 256 119.033 8 256 8s248 111.033 248 248zm-122.526 75.34c11.479-17.755-15.349-35.194-26.873-17.374-53.418 82.627-143.71 82.681-197.164 0-11.502-17.79-38.364-.401-26.873 17.374 66.014 102.107 184.795 102.265 250.91 0zM108 192c0 37.497 30.503 68 68 68s68-30.503 68-68-30.503-68-68-68-68 30.503-68 68zm160.5 0c0 37.221 30.279 67.5 67.5 67.5s67.5-30.279 67.5-67.5-30.279-67.5-67.5-67.5-67.5 30.279-67.5 67.5zm67.5-48a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 288 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48zm-160 0a47.789 47.789 0 0 0-22.603 5.647h.015c10.916 0 19.765 8.849 19.765 19.765s-8.849 19.765-19.765 19.765-19.765-8.849-19.765-19.765v-.015A47.789 47.789 0 0 0 128 192c0 26.51 21.49 48 48 48s48-21.49 48-48-21.49-48-48-48z"],snowflake:[448,512,[],"f2dc","M444.816 301.639a24.12 24.12 0 0 0 2.661-16.978c-2.725-12.966-15.339-21.245-28.174-18.492l-87.407 25.046L264 256l67.896-35.215 87.407 25.046c12.835 2.753 25.449-5.526 28.174-18.492 2.725-12.966-5.471-25.708-18.305-28.461l-47.477-7.137 53.077-30.956c11.363-6.627 15.257-21.306 8.696-32.785-6.561-11.479-21.091-15.412-32.454-8.785l-53.077 30.956 17.621-45.104c4.057-12.606-2.768-26.146-15.247-30.245-12.478-4.099-25.883 2.797-29.94 15.402l-22.232 88.99-60.38 35.215V144l65.175-63.945c8.778-9.852 7.987-25.027-1.766-33.894-9.753-8.867-24.775-8.068-33.552 1.784l-29.857 37.967V24c0-13.255-10.637-24-23.758-24s-23.758 10.745-23.758 24v61.912l-29.857-37.967c-8.779-9.852-23.799-10.652-33.552-1.784-9.753 8.867-10.543 24.042-1.766 33.894L200.242 144v70.431l-60.38-35.215-22.232-88.99c-4.057-12.605-17.462-19.501-29.94-15.402-12.478 4.099-19.304 17.64-15.247 30.245l17.62 45.104-53.077-30.956c-11.363-6.627-25.893-2.694-32.454 8.785s-2.667 26.157 8.696 32.785l53.077 30.956-47.477 7.137C5.993 201.634-2.203 214.375.523 227.341c2.725 12.965 15.339 21.245 28.174 18.492l87.407-25.046L184 256l-67.896 35.215-87.406-25.045c-12.835-2.753-25.449 5.526-28.174 18.492-2.725 12.967 5.47 25.708 18.305 28.461l47.477 7.137-53.077 30.956C1.866 357.843-2.027 372.521 4.533 384s21.091 15.412 32.454 8.785l53.077-30.956-17.62 45.104a24.157 24.157 0 0 0 2.022 19.428c2.831 4.953 7.416 8.909 13.224 10.816 12.478 4.099 25.883-2.797 29.94-15.402l22.232-88.99 60.38-35.215V368l-65.175 63.945c-8.778 9.852-7.987 25.027 1.766 33.894 9.754 8.868 24.774 8.068 33.552-1.784l29.857-37.967V488c0 13.255 10.637 24 23.758 24s23.758-10.745 23.758-24v-61.912l29.857 37.967A23.59 23.59 0 0 0 295.282 472a23.534 23.534 0 0 0 15.885-6.161c9.753-8.867 10.544-24.042 1.766-33.894L247.758 368v-70.431l60.38 35.215 22.232 88.99c4.057 12.605 17.462 19.501 29.94 15.402 12.479-4.099 19.304-17.64 15.247-30.245l-17.621-45.104 53.077 30.956c11.363 6.627 25.893 2.694 32.454-8.785s2.667-26.157-8.696-32.785l-53.077-30.956 47.477-7.137c6.86-1.469 12.394-5.793 15.645-11.481z"],sort:[320,512,[],"f0dc","M41 288h238c21.4 0 32.1 25.9 17 41L177 448c-9.4 9.4-24.6 9.4-33.9 0L24 329c-15.1-15.1-4.4-41 17-41zm255-105L177 64c-9.4-9.4-24.6-9.4-33.9 0L24 183c-15.1 15.1-4.4 41 17 41h238c21.4 0 32.1-25.9 17-41z"],"sort-alpha-down":[448,512,[],"f15d","M187.298 395.314l-79.984 80.002c-6.248 6.247-16.383 6.245-22.627 0L4.705 395.314C-5.365 385.244 1.807 368 16.019 368H64V48c0-8.837 7.163-16 16-16h32c8.837 0 16 7.163 16 16v320h47.984c14.241 0 21.363 17.264 11.314 27.314zm119.075-180.007A12 12 0 0 1 294.838 224h-35.717c-8.22 0-14.007-8.078-11.362-15.861l57.096-168A12 12 0 0 1 316.217 32h39.566c5.139 0 9.708 3.273 11.362 8.139l57.096 168C426.886 215.922 421.1 224 412.879 224h-35.735a12 12 0 0 1-11.515-8.622l-8.301-28.299h-42.863l-8.092 28.228zm22.857-78.697h13.367l-6.6-22.937-6.767 22.937zm12.575 287.323l67.451-95.698a12 12 0 0 0 2.192-6.913V300c0-6.627-5.373-12-12-12H274.522c-6.627 0-12 5.373-12 12v28.93c0 6.627 5.373 12 12 12h56.469c-.739.991-1.497 2.036-2.27 3.133l-67.203 95.205a12.001 12.001 0 0 0-2.196 6.92V468c0 6.627 5.373 12 12 12h129.355c6.627 0 12-5.373 12-12v-28.93c0-6.627-5.373-12-12-12h-61.146c.74-.993 1.5-2.039 2.274-3.137z"],"sort-alpha-up":[448,512,[],"f15e","M4.702 116.686l79.984-80.002c6.248-6.247 16.383-6.245 22.627 0l79.981 80.002c10.07 10.07 2.899 27.314-11.314 27.314H128v320c0 8.837-7.163 16-16 16H80c-8.837 0-16-7.163-16-16V144H16.016c-14.241 0-21.363-17.264-11.314-27.314zm301.671 98.621A12 12 0 0 1 294.838 224h-35.717c-8.22 0-14.007-8.078-11.362-15.861l57.096-168A12 12 0 0 1 316.217 32h39.566c5.139 0 9.708 3.273 11.362 8.139l57.096 168C426.886 215.922 421.1 224 412.879 224h-35.735a12 12 0 0 1-11.515-8.622l-8.301-28.299h-42.863l-8.092 28.228zm22.857-78.697h13.367l-6.6-22.937-6.767 22.937zm12.575 287.323l67.451-95.698a12 12 0 0 0 2.192-6.913V300c0-6.627-5.373-12-12-12H274.522c-6.627 0-12 5.373-12 12v28.93c0 6.627 5.373 12 12 12h56.469c-.739.991-1.497 2.036-2.27 3.133l-67.203 95.205a12.001 12.001 0 0 0-2.196 6.92V468c0 6.627 5.373 12 12 12h129.355c6.627 0 12-5.373 12-12v-28.93c0-6.627-5.373-12-12-12h-61.146c.74-.993 1.5-2.039 2.274-3.137z"],"sort-amount-down":[512,512,[],"f160","M187.298 395.314l-79.984 80.002c-6.248 6.247-16.383 6.245-22.627 0L4.705 395.314C-5.365 385.244 1.807 368 16.019 368H64V48c0-8.837 7.163-16 16-16h32c8.837 0 16 7.163 16 16v320h47.984c14.241 0 21.363 17.264 11.314 27.314zM240 96h256c8.837 0 16-7.163 16-16V48c0-8.837-7.163-16-16-16H240c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16zm-16 112v-32c0-8.837 7.163-16 16-16h192c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H240c-8.837 0-16-7.163-16-16zm0 256v-32c0-8.837 7.163-16 16-16h64c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-64c-8.837 0-16-7.163-16-16zm0-128v-32c0-8.837 7.163-16 16-16h128c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H240c-8.837 0-16-7.163-16-16z"],"sort-amount-up":[512,512,[],"f161","M4.702 116.686l79.984-80.002c6.248-6.247 16.383-6.245 22.627 0l79.981 80.002c10.07 10.07 2.899 27.314-11.314 27.314H128v320c0 8.837-7.163 16-16 16H80c-8.837 0-16-7.163-16-16V144H16.016c-14.241 0-21.363-17.264-11.314-27.314zM240 96h256c8.837 0 16-7.163 16-16V48c0-8.837-7.163-16-16-16H240c-8.837 0-16 7.163-16 16v32c0 8.837 7.163 16 16 16zm-16 112v-32c0-8.837 7.163-16 16-16h192c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H240c-8.837 0-16-7.163-16-16zm0 256v-32c0-8.837 7.163-16 16-16h64c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-64c-8.837 0-16-7.163-16-16zm0-128v-32c0-8.837 7.163-16 16-16h128c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H240c-8.837 0-16-7.163-16-16z"],"sort-down":[320,512,[],"f0dd","M41 288h238c21.4 0 32.1 25.9 17 41L177 448c-9.4 9.4-24.6 9.4-33.9 0L24 329c-15.1-15.1-4.4-41 17-41z"],"sort-numeric-down":[448,512,[],"f162","M308.811 113.787l-19.448-20.795c-4.522-4.836-4.274-12.421.556-16.95l43.443-40.741a11.999 11.999 0 0 1 8.209-3.247h31.591c6.627 0 12 5.373 12 12v127.07h25.66c6.627 0 12 5.373 12 12v28.93c0 6.627-5.373 12-12 12H301.649c-6.627 0-12-5.373-12-12v-28.93c0-6.627 5.373-12 12-12h25.414v-57.938c-7.254 6.58-14.211 4.921-18.252.601zm-30.57 238.569c0-32.653 23.865-67.356 68.094-67.356 38.253 0 79.424 28.861 79.424 92.228 0 51.276-32.237 105.772-91.983 105.772-17.836 0-30.546-3.557-38.548-6.781-5.79-2.333-8.789-8.746-6.922-14.703l9.237-29.48c2.035-6.496 9.049-9.983 15.467-7.716 13.029 4.602 27.878 5.275 38.103-4.138-38.742 5.072-72.872-25.36-72.872-67.826zm92.273 19.338c0-22.285-15.302-36.505-25.835-36.505-8.642 0-13.164 7.965-13.164 15.832 0 5.669 1.815 24.168 25.168 24.168 9.973 0 13.377-2.154 13.744-2.731.021-.046.087-.291.087-.764zM175.984 368H128V48c0-8.837-7.163-16-16-16H80c-8.837 0-16 7.163-16 16v320H16.019c-14.212 0-21.384 17.244-11.314 27.314l79.981 80.002c6.245 6.245 16.38 6.247 22.627 0l79.984-80.002c10.05-10.05 2.928-27.314-11.313-27.314z"],"sort-numeric-up":[448,512,[],"f163","M308.811 113.787l-19.448-20.795c-4.522-4.836-4.274-12.421.556-16.95l43.443-40.741a11.999 11.999 0 0 1 8.209-3.247h31.591c6.627 0 12 5.373 12 12v127.07h25.66c6.627 0 12 5.373 12 12v28.93c0 6.627-5.373 12-12 12H301.649c-6.627 0-12-5.373-12-12v-28.93c0-6.627 5.373-12 12-12h25.414v-57.938c-7.254 6.58-14.211 4.921-18.252.601zm-30.57 238.569c0-32.653 23.865-67.356 68.094-67.356 38.253 0 79.424 28.861 79.424 92.228 0 51.276-32.237 105.772-91.983 105.772-17.836 0-30.546-3.557-38.548-6.781-5.79-2.333-8.789-8.746-6.922-14.703l9.237-29.48c2.035-6.496 9.049-9.983 15.467-7.716 13.029 4.602 27.878 5.275 38.103-4.138-38.742 5.072-72.872-25.36-72.872-67.826zm92.273 19.338c0-22.285-15.302-36.505-25.835-36.505-8.642 0-13.164 7.965-13.164 15.832 0 5.669 1.815 24.168 25.168 24.168 9.973 0 13.377-2.154 13.744-2.731.021-.046.087-.291.087-.764zM16.016 144H64v320c0 8.837 7.163 16 16 16h32c8.837 0 16-7.163 16-16V144h47.981c14.212 0 21.384-17.244 11.314-27.314l-79.981-80.002c-6.245-6.245-16.38-6.247-22.627 0L4.702 116.686C-5.347 126.736 1.775 144 16.016 144z"],"sort-up":[320,512,[],"f0de","M279 224H41c-21.4 0-32.1-25.9-17-41L143 64c9.4-9.4 24.6-9.4 33.9 0l119 119c15.2 15.1 4.5 41-16.9 41z"],"space-shuttle":[640,512,[],"f197","M592.604 208.244C559.735 192.836 515.777 184 472 184H186.327c-4.952-6.555-10.585-11.978-16.72-16H376C229.157 137.747 219.403 32 96.003 32H96v128H80V32c-26.51 0-48 28.654-48 64v64c-23.197 0-32 10.032-32 24v40c0 13.983 8.819 24 32 24v16c-23.197 0-32 10.032-32 24v40c0 13.983 8.819 24 32 24v64c0 35.346 21.49 64 48 64V352h16v128h.003c123.4 0 133.154-105.747 279.997-136H169.606c6.135-4.022 11.768-9.445 16.72-16H472c43.777 0 87.735-8.836 120.604-24.244C622.282 289.845 640 271.992 640 256s-17.718-33.845-47.396-47.756zM488 296a8 8 0 0 1-8-8v-64a8 8 0 0 1 8-8c31.909 0 31.942 80 0 80z"],spinner:[512,512,[],"f110","M304 48c0 26.51-21.49 48-48 48s-48-21.49-48-48 21.49-48 48-48 48 21.49 48 48zm-48 368c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zm208-208c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zM96 256c0-26.51-21.49-48-48-48S0 229.49 0 256s21.49 48 48 48 48-21.49 48-48zm12.922 99.078c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48c0-26.509-21.491-48-48-48zm294.156 0c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48c0-26.509-21.49-48-48-48zM108.922 60.922c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.491-48-48-48z"],square:[448,512,[],"f0c8","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48z"],star:[576,512,[],"f005","M259.3 17.8L194 150.2 47.9 171.5c-26.2 3.8-36.7 36.1-17.7 54.6l105.7 103-25 145.5c-4.5 26.3 23.2 46 46.4 33.7L288 439.6l130.7 68.7c23.2 12.2 50.9-7.4 46.4-33.7l-25-145.5 105.7-103c19-18.5 8.5-50.8-17.7-54.6L382 150.2 316.7 17.8c-11.7-23.6-45.6-23.9-57.4 0z"],"star-half":[576,512,[],"f089","M288 0c-11.4 0-22.8 5.9-28.7 17.8L194 150.2 47.9 171.4c-26.2 3.8-36.7 36.1-17.7 54.6l105.7 103-25 145.5c-4.5 26.1 23 46 46.4 33.7L288 439.6V0z"],"step-backward":[448,512,[],"f048","M64 468V44c0-6.6 5.4-12 12-12h48c6.6 0 12 5.4 12 12v176.4l195.5-181C352.1 22.3 384 36.6 384 64v384c0 27.4-31.9 41.7-52.5 24.6L136 292.7V468c0 6.6-5.4 12-12 12H76c-6.6 0-12-5.4-12-12z"],"step-forward":[448,512,[],"f051","M384 44v424c0 6.6-5.4 12-12 12h-48c-6.6 0-12-5.4-12-12V291.6l-195.5 181C95.9 489.7 64 475.4 64 448V64c0-27.4 31.9-41.7 52.5-24.6L312 219.3V44c0-6.6 5.4-12 12-12h48c6.6 0 12 5.4 12 12z"],stethoscope:[512,512,[],"f0f1","M512 176c0-35.659-29.164-64.507-64.941-63.993-34.21.492-62.296 28.357-63.043 62.562-.531 24.282 12.476 45.558 31.984 56.848V344c0 57.346-50.243 104-112 104-60.039 0-109.189-44.096-111.878-99.24C265.005 333.847 320 269.225 320 192V36.584c0-11.44-8.075-21.29-19.293-23.534L237.81.471c-12.997-2.599-25.641 5.83-28.241 18.827l-3.138 15.689c-2.6 12.997 5.83 25.641 18.827 28.241L256 69.376v121.4c0 52.852-42.203 96.707-95.053 97.22C107.58 288.513 64 245.25 64 192V69.376l30.742-6.149c12.997-2.6 21.427-15.243 18.827-28.241l-3.138-15.689C107.831 6.3 95.188-2.129 82.19.471L19.293 13.05C8.075 15.294 0 25.144 0 36.584V192c0 77.295 55.096 141.961 128.076 156.798C130.747 439.223 208.634 512 304 512c97.047 0 176-75.364 176-168V231.417c19.124-11.068 32-31.732 32-55.417zm-64-16c8.822 0 16 7.178 16 16s-7.178 16-16 16-16-7.178-16-16 7.178-16 16-16z"],"sticky-note":[448,512,[],"f249","M312 320h136V56c0-13.3-10.7-24-24-24H24C10.7 32 0 42.7 0 56v400c0 13.3 10.7 24 24 24h264V344c0-13.2 10.8-24 24-24zm129 55l-98 98c-4.5 4.5-10.6 7-17 7h-6V352h128v6.1c0 6.3-2.5 12.4-7 16.9z"],stop:[448,512,[],"f04d","M400 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48z"],"stop-circle":[512,512,[],"f28d","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm96 328c0 8.8-7.2 16-16 16H176c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h160c8.8 0 16 7.2 16 16v160z"],"street-view":[512,512,[],"f21d","M192 64c0-35.346 28.654-64 64-64s64 28.654 64 64c0 35.346-28.654 64-64 64s-64-28.654-64-64zm112 80h-11.36c-22.711 10.443-49.59 10.894-73.28 0H208c-26.51 0-48 21.49-48 48v104c0 13.255 10.745 24 24 24h16v104c0 13.255 10.745 24 24 24h64c13.255 0 24-10.745 24-24V320h16c13.255 0 24-10.745 24-24V192c0-26.51-21.49-48-48-48zm85.642 189.152a72.503 72.503 0 0 1-29.01 27.009C391.133 365.251 480 385.854 480 416c0 46.304-167.656 64-224 64-70.303 0-224-20.859-224-64 0-30.123 88.361-50.665 119.367-55.839a72.516 72.516 0 0 1-29.01-27.009C74.959 343.395 0 367.599 0 416c0 77.111 178.658 96 256 96 77.249 0 256-18.865 256-96 0-48.403-74.967-72.606-122.358-82.848z"],strikethrough:[512,512,[],"f0cc","M496 288H16c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h480c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16zm-214.666 16c27.258 12.937 46.524 28.683 46.524 56.243 0 33.108-28.977 53.676-75.621 53.676-32.325 0-76.874-12.08-76.874-44.271V368c0-8.837-7.164-16-16-16H113.75c-8.836 0-16 7.163-16 16v19.204c0 66.845 77.717 101.82 154.487 101.82 88.578 0 162.013-45.438 162.013-134.424 0-19.815-3.618-36.417-10.143-50.6H281.334zm-30.952-96c-32.422-13.505-56.836-28.946-56.836-59.683 0-33.92 30.901-47.406 64.962-47.406 42.647 0 64.962 16.593 64.962 32.985V136c0 8.837 7.164 16 16 16h45.613c8.836 0 16-7.163 16-16v-30.318c0-52.438-71.725-79.875-142.575-79.875-85.203 0-150.726 40.972-150.726 125.646 0 22.71 4.665 41.176 12.777 56.547h129.823z"],subscript:[512,512,[],"f12c","M395.198 416c3.461-10.526 18.796-21.28 36.265-32.425 16.625-10.605 35.467-22.626 50.341-38.862 17.458-19.054 25.944-40.175 25.944-64.567 0-60.562-50.702-88.146-97.81-88.146-42.491 0-76.378 22.016-94.432 50.447-4.654 7.329-2.592 17.036 4.623 21.865l30.328 20.296c7.032 4.706 16.46 3.084 21.63-3.614 8.022-10.394 18.818-18.225 31.667-18.225 19.387 0 26.266 12.901 26.266 23.948 0 36.159-119.437 57.023-119.437 160.024 0 6.654.561 13.014 1.415 19.331 1.076 7.964 7.834 13.928 15.87 13.928H496c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16H395.198zM272 256c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-62.399a16 16 0 0 1-13.541-7.478l-45.701-72.615c-2.297-3.352-4.422-6.969-6.195-10.209-1.65 3.244-3.647 6.937-5.874 10.582l-44.712 72.147a15.999 15.999 0 0 1-13.6 7.572H16c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h26.325l56.552-82.709L46.111 96H16C7.163 96 0 88.837 0 80V48c0-8.837 7.163-16 16-16h68.806a16 16 0 0 1 13.645 7.644l39.882 65.126c2.072 3.523 4.053 7.171 5.727 10.37 1.777-3.244 3.92-6.954 6.237-10.537l40.332-65.035A15.999 15.999 0 0 1 204.226 32H272c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-27.979l-52.69 75.671L249.974 256H272z"],subway:[448,512,[],"f239","M448 96v256c0 51.815-61.624 96-130.022 96l62.98 49.721C386.905 502.417 383.562 512 376 512H72c-7.578 0-10.892-9.594-4.957-14.279L130.022 448C61.82 448 0 403.954 0 352V96C0 42.981 64 0 128 0h192c65 0 128 42.981 128 96zM200 232V120c0-13.255-10.745-24-24-24H72c-13.255 0-24 10.745-24 24v112c0 13.255 10.745 24 24 24h104c13.255 0 24-10.745 24-24zm200 0V120c0-13.255-10.745-24-24-24H272c-13.255 0-24 10.745-24 24v112c0 13.255 10.745 24 24 24h104c13.255 0 24-10.745 24-24zm-48 56c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48zm-256 0c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48z"],suitcase:[512,512,[],"f0f2","M96 480h320V128h-32V80c0-26.51-21.49-48-48-48H176c-26.51 0-48 21.49-48 48v48H96v352zm96-384h128v32H192V96zm320 80v256c0 26.51-21.49 48-48 48h-16V128h16c26.51 0 48 21.49 48 48zM64 480H48c-26.51 0-48-21.49-48-48V176c0-26.51 21.49-48 48-48h16v352z"],sun:[512,512,[],"f185","M274.835 12.646l25.516 62.393c4.213 10.301 16.671 14.349 26.134 8.492l57.316-35.479c15.49-9.588 34.808 4.447 30.475 22.142l-16.03 65.475c-2.647 10.81 5.053 21.408 16.152 22.231l67.224 4.987c18.167 1.348 25.546 24.057 11.641 35.826L441.81 242.26c-8.495 7.19-8.495 20.289 0 27.479l51.454 43.548c13.906 11.769 6.527 34.478-11.641 35.826l-67.224 4.987c-11.099.823-18.799 11.421-16.152 22.231l16.03 65.475c4.332 17.695-14.986 31.73-30.475 22.142l-57.316-35.479c-9.463-5.858-21.922-1.81-26.134 8.492l-25.516 62.393c-6.896 16.862-30.774 16.862-37.67 0l-25.516-62.393c-4.213-10.301-16.671-14.349-26.134-8.492l-57.317 35.479c-15.49 9.588-34.808-4.447-30.475-22.142l16.03-65.475c2.647-10.81-5.053-21.408-16.152-22.231l-67.224-4.987c-18.167-1.348-25.546-24.057-11.641-35.826L70.19 269.74c8.495-7.19 8.495-20.289 0-27.479l-51.454-43.548c-13.906-11.769-6.527-34.478 11.641-35.826l67.224-4.987c11.099-.823 18.799-11.421 16.152-22.231l-16.03-65.475c-4.332-17.695 14.986-31.73 30.475-22.142l57.317 35.479c9.463 5.858 21.921 1.81 26.134-8.492l25.516-62.393c6.896-16.861 30.774-16.861 37.67 0zM392 256c0-74.991-61.01-136-136-136-74.991 0-136 61.009-136 136s61.009 136 136 136c74.99 0 136-61.009 136-136zm-32 0c0 57.346-46.654 104-104 104s-104-46.654-104-104 46.654-104 104-104 104 46.654 104 104z"],superscript:[512,512,[],"f12b","M395.198 256c3.461-10.526 18.796-21.28 36.265-32.425 16.625-10.605 35.467-22.626 50.341-38.862 17.458-19.054 25.944-40.175 25.944-64.567 0-60.562-50.702-88.146-97.81-88.146-42.491 0-76.378 22.016-94.432 50.447-4.654 7.329-2.592 17.036 4.623 21.865l30.328 20.296c7.032 4.706 16.46 3.084 21.63-3.614 8.022-10.394 18.818-18.225 31.667-18.225 19.387 0 26.266 12.901 26.266 23.948 0 36.159-119.437 57.023-119.437 160.024 0 6.654.561 13.014 1.415 19.331 1.076 7.964 7.834 13.928 15.87 13.928H496c8.837 0 16-7.163 16-16v-32c0-8.837-7.163-16-16-16H395.198zM272 416c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-62.399a16 16 0 0 1-13.541-7.478l-45.701-72.615c-2.297-3.352-4.422-6.969-6.195-10.209-1.65 3.244-3.647 6.937-5.874 10.582l-44.712 72.147a15.999 15.999 0 0 1-13.6 7.572H16c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h26.325l56.552-82.709L46.111 256H16c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h68.806a16 16 0 0 1 13.645 7.644l39.882 65.126c2.072 3.523 4.053 7.171 5.727 10.37 1.777-3.244 3.92-6.954 6.237-10.537l40.332-65.035a16 16 0 0 1 13.598-7.567H272c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-27.979l-52.69 75.671L249.974 416H272z"],sync:[512,512,[],"f021","M440.935 12.574l3.966 82.766C399.416 41.904 331.674 8 256 8 134.813 8 33.933 94.924 12.296 209.824 10.908 217.193 16.604 224 24.103 224h49.084c5.57 0 10.377-3.842 11.676-9.259C103.407 137.408 172.931 80 256 80c60.893 0 114.512 30.856 146.104 77.801l-101.53-4.865c-6.845-.328-12.574 5.133-12.574 11.986v47.411c0 6.627 5.373 12 12 12h200.333c6.627 0 12-5.373 12-12V12c0-6.627-5.373-12-12-12h-47.411c-6.853 0-12.315 5.729-11.987 12.574zM256 432c-60.895 0-114.517-30.858-146.109-77.805l101.868 4.871c6.845.327 12.573-5.134 12.573-11.986v-47.412c0-6.627-5.373-12-12-12H12c-6.627 0-12 5.373-12 12V500c0 6.627 5.373 12 12 12h47.385c6.863 0 12.328-5.745 11.985-12.599l-4.129-82.575C112.725 470.166 180.405 504 256 504c121.187 0 222.067-86.924 243.704-201.824 1.388-7.369-4.308-14.176-11.807-14.176h-49.084c-5.57 0-10.377 3.842-11.676 9.259C408.593 374.592 339.069 432 256 432z"],"sync-alt":[512,512,[],"f2f1","M370.72 133.28C339.458 104.008 298.888 87.962 255.848 88c-77.458.068-144.328 53.178-162.791 126.85-1.344 5.363-6.122 9.15-11.651 9.15H24.103c-7.498 0-13.194-6.807-11.807-14.176C33.933 94.924 134.813 8 256 8c66.448 0 126.791 26.136 171.315 68.685L463.03 40.97C478.149 25.851 504 36.559 504 57.941V192c0 13.255-10.745 24-24 24H345.941c-21.382 0-32.09-25.851-16.971-40.971l41.75-41.749zM32 296h134.059c21.382 0 32.09 25.851 16.971 40.971l-41.75 41.75c31.262 29.273 71.835 45.319 114.876 45.28 77.418-.07 144.315-53.144 162.787-126.849 1.344-5.363 6.122-9.15 11.651-9.15h57.304c7.498 0 13.194 6.807 11.807 14.176C478.067 417.076 377.187 504 256 504c-66.448 0-126.791-26.136-171.315-68.685L48.97 471.03C33.851 486.149 8 475.441 8 454.059V320c0-13.255 10.745-24 24-24z"],table:[512,512,[],"f0ce","M464 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h416c26.51 0 48-21.49 48-48V80c0-26.51-21.49-48-48-48zM224 416H64v-96h160v96zm0-160H64v-96h160v96zm224 160H288v-96h160v96zm0-160H288v-96h160v96z"],tablet:[448,512,[],"f10a","M400 0H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zM224 480c-17.7 0-32-14.3-32-32s14.3-32 32-32 32 14.3 32 32-14.3 32-32 32z"],"tablet-alt":[448,512,[],"f3fa","M400 0H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zM224 480c-17.7 0-32-14.3-32-32s14.3-32 32-32 32 14.3 32 32-14.3 32-32 32zm176-108c0 6.6-5.4 12-12 12H60c-6.6 0-12-5.4-12-12V60c0-6.6 5.4-12 12-12h328c6.6 0 12 5.4 12 12v312z"],"tachometer-alt":[576,512,[],"f3fd","M75.694 480a48.02 48.02 0 0 1-42.448-25.571C12.023 414.3 0 368.556 0 320 0 160.942 128.942 32 288 32s288 128.942 288 288c0 48.556-12.023 94.3-33.246 134.429A48.018 48.018 0 0 1 500.306 480H75.694zM512 288c-17.673 0-32 14.327-32 32 0 17.673 14.327 32 32 32s32-14.327 32-32c0-17.673-14.327-32-32-32zM288 128c17.673 0 32-14.327 32-32 0-17.673-14.327-32-32-32s-32 14.327-32 32c0 17.673 14.327 32 32 32zM64 288c-17.673 0-32 14.327-32 32 0 17.673 14.327 32 32 32s32-14.327 32-32c0-17.673-14.327-32-32-32zm65.608-158.392c-17.673 0-32 14.327-32 32 0 17.673 14.327 32 32 32s32-14.327 32-32c0-17.673-14.327-32-32-32zm316.784 0c-17.673 0-32 14.327-32 32 0 17.673 14.327 32 32 32s32-14.327 32-32c0-17.673-14.327-32-32-32zm-87.078 31.534c-12.627-4.04-26.133 2.92-30.173 15.544l-45.923 143.511C250.108 322.645 224 350.264 224 384c0 35.346 28.654 64 64 64 35.346 0 64-28.654 64-64 0-19.773-8.971-37.447-23.061-49.187l45.919-143.498c4.039-12.625-2.92-26.133-15.544-30.173z"],tag:[512,512,[],"f02b","M0 252.118V48C0 21.49 21.49 0 48 0h204.118a48 48 0 0 1 33.941 14.059l211.882 211.882c18.745 18.745 18.745 49.137 0 67.882L293.823 497.941c-18.745 18.745-49.137 18.745-67.882 0L14.059 286.059A48 48 0 0 1 0 252.118zM112 64c-26.51 0-48 21.49-48 48s21.49 48 48 48 48-21.49 48-48-21.49-48-48-48z"],tags:[640,512,[],"f02c","M497.941 225.941L286.059 14.059A48 48 0 0 0 252.118 0H48C21.49 0 0 21.49 0 48v204.118a48 48 0 0 0 14.059 33.941l211.882 211.882c18.744 18.745 49.136 18.746 67.882 0l204.118-204.118c18.745-18.745 18.745-49.137 0-67.882zM112 160c-26.51 0-48-21.49-48-48s21.49-48 48-48 48 21.49 48 48-21.49 48-48 48zm513.941 133.823L421.823 497.941c-18.745 18.745-49.137 18.745-67.882 0l-.36-.36L527.64 323.522c16.999-16.999 26.36-39.6 26.36-63.64s-9.362-46.641-26.36-63.64L331.397 0h48.721a48 48 0 0 1 33.941 14.059l211.882 211.882c18.745 18.745 18.745 49.137 0 67.882z"],tasks:[512,512,[],"f0ae","M208 132h288c8.8 0 16-7.2 16-16V76c0-8.8-7.2-16-16-16H208c-8.8 0-16 7.2-16 16v40c0 8.8 7.2 16 16 16zm0 160h288c8.8 0 16-7.2 16-16v-40c0-8.8-7.2-16-16-16H208c-8.8 0-16 7.2-16 16v40c0 8.8 7.2 16 16 16zm0 160h288c8.8 0 16-7.2 16-16v-40c0-8.8-7.2-16-16-16H208c-8.8 0-16 7.2-16 16v40c0 8.8 7.2 16 16 16zM64 368c-26.5 0-48.6 21.5-48.6 48s22.1 48 48.6 48 48-21.5 48-48-21.5-48-48-48zm92.5-299l-72.2 72.2-15.6 15.6c-4.7 4.7-12.9 4.7-17.6 0L3.5 109.4c-4.7-4.7-4.7-12.3 0-17l15.7-15.7c4.7-4.7 12.3-4.7 17 0l22.7 22.1 63.7-63.3c4.7-4.7 12.3-4.7 17 0l17 16.5c4.6 4.7 4.6 12.3-.1 17zm0 159.6l-72.2 72.2-15.7 15.7c-4.7 4.7-12.9 4.7-17.6 0L3.5 269c-4.7-4.7-4.7-12.3 0-17l15.7-15.7c4.7-4.7 12.3-4.7 17 0l22.7 22.1 63.7-63.7c4.7-4.7 12.3-4.7 17 0l17 17c4.6 4.6 4.6 12.2-.1 16.9z"],taxi:[512,512,[],"f1ba","M461.951 243.865l-21.816-87.268A79.885 79.885 0 0 0 362.522 96H352V56c0-13.255-10.745-24-24-24H184c-13.255 0-24 10.745-24 24v40h-10.522a79.885 79.885 0 0 0-77.612 60.597L50.05 243.865C25.515 252.823 8 276.366 8 304v48c0 20.207 9.374 38.214 24 49.943V456c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24v-40h256v40c0 13.255 10.745 24 24 24h48c13.255 0 24-10.745 24-24v-54.057c14.626-11.729 24-29.737 24-49.943v-48c0-27.634-17.515-51.177-42.049-60.135zM149.478 160h213.045a15.975 15.975 0 0 1 15.522 12.12l16.97 67.88h-278.03l16.97-67.881A15.976 15.976 0 0 1 149.478 160zM132 336c0 19.882-16.118 36-36 36s-36-16.118-36-36 16.118-36 36-36 36 16.118 36 36zm320 0c0 19.882-16.118 36-36 36s-36-16.118-36-36 16.118-36 36-36 36 16.118 36 36z"],terminal:[640,512,[],"f120","M257.981 272.971L63.638 467.314c-9.373 9.373-24.569 9.373-33.941 0L7.029 444.647c-9.357-9.357-9.375-24.522-.04-33.901L161.011 256 6.99 101.255c-9.335-9.379-9.317-24.544.04-33.901l22.667-22.667c9.373-9.373 24.569-9.373 33.941 0L257.981 239.03c9.373 9.372 9.373 24.568 0 33.941zM640 456v-32c0-13.255-10.745-24-24-24H312c-13.255 0-24 10.745-24 24v32c0 13.255 10.745 24 24 24h304c13.255 0 24-10.745 24-24z"],"text-height":[576,512,[],"f034","M16 32h288c8.837 0 16 7.163 16 16v96c0 8.837-7.163 16-16 16h-35.496c-8.837 0-16-7.163-16-16V96h-54.761v320H232c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H88c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h34.257V96H67.496v48c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16V48c0-8.837 7.163-16 16-16zm475.308 4.685l79.995 80.001C581.309 126.693 574.297 144 559.99 144H512v224h48c15.639 0 20.635 17.991 11.313 27.314l-79.995 80.001c-6.247 6.247-16.381 6.245-22.626 0l-79.995-80.001C378.691 385.307 385.703 368 400.01 368H448V144h-48c-15.639 0-20.635-17.991-11.313-27.314l79.995-80.001c6.247-6.248 16.381-6.245 22.626 0z"],"text-width":[448,512,[],"f035","M16 32h416c8.837 0 16 7.163 16 16v96c0 8.837-7.163 16-16 16h-35.496c-8.837 0-16-7.163-16-16V96H261.743v128H296c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H152c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16h34.257V96H67.496v48c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16V48c0-8.837 7.163-16 16-16zm427.315 340.682l-80.001-79.995C353.991 283.365 336 288.362 336 304v48H112v-47.99c0-14.307-17.307-21.319-27.314-11.313L4.685 372.692c-6.245 6.245-6.247 16.379 0 22.626l80.001 79.995C94.009 484.635 112 479.638 112 464v-48h224v47.99c0 14.307 17.307 21.319 27.314 11.313l80.001-79.995c6.245-6.245 6.248-16.379 0-22.626z"],th:[512,512,[],"f00a","M149.333 56v80c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V56c0-13.255 10.745-24 24-24h101.333c13.255 0 24 10.745 24 24zm181.334 240v-80c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24h101.333c13.256 0 24.001-10.745 24.001-24zm32-240v80c0 13.255 10.745 24 24 24H488c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24H386.667c-13.255 0-24 10.745-24 24zm-32 80V56c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24h101.333c13.256 0 24.001-10.745 24.001-24zm-205.334 56H24c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24h101.333c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24zM0 376v80c0 13.255 10.745 24 24 24h101.333c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H24c-13.255 0-24 10.745-24 24zm386.667-56H488c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H386.667c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24zm0 160H488c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H386.667c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24zM181.333 376v80c0 13.255 10.745 24 24 24h101.333c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24z"],"th-large":[512,512,[],"f009","M296 32h192c13.255 0 24 10.745 24 24v160c0 13.255-10.745 24-24 24H296c-13.255 0-24-10.745-24-24V56c0-13.255 10.745-24 24-24zm-80 0H24C10.745 32 0 42.745 0 56v160c0 13.255 10.745 24 24 24h192c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24zM0 296v160c0 13.255 10.745 24 24 24h192c13.255 0 24-10.745 24-24V296c0-13.255-10.745-24-24-24H24c-13.255 0-24 10.745-24 24zm296 184h192c13.255 0 24-10.745 24-24V296c0-13.255-10.745-24-24-24H296c-13.255 0-24 10.745-24 24v160c0 13.255 10.745 24 24 24z"],"th-list":[512,512,[],"f00b","M149.333 216v80c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24v-80c0-13.255 10.745-24 24-24h101.333c13.255 0 24 10.745 24 24zM0 376v80c0 13.255 10.745 24 24 24h101.333c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H24c-13.255 0-24 10.745-24 24zM125.333 32H24C10.745 32 0 42.745 0 56v80c0 13.255 10.745 24 24 24h101.333c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24zm80 448H488c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24zm-24-424v80c0 13.255 10.745 24 24 24H488c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24zm24 264H488c13.255 0 24-10.745 24-24v-80c0-13.255-10.745-24-24-24H205.333c-13.255 0-24 10.745-24 24v80c0 13.255 10.745 24 24 24z"],"thermometer-empty":[256,512,[],"f2cb","M192 384c0 35.346-28.654 64-64 64s-64-28.654-64-64c0-35.346 28.654-64 64-64s64 28.654 64 64zm32-84.653c19.912 22.563 32 52.194 32 84.653 0 70.696-57.303 128-128 128-.299 0-.609-.001-.909-.003C56.789 511.509-.357 453.636.002 383.333.166 351.135 12.225 321.755 32 299.347V96c0-53.019 42.981-96 96-96s96 42.981 96 96v203.347zM208 384c0-34.339-19.37-52.19-32-66.502V96c0-26.467-21.533-48-48-48S80 69.533 80 96v221.498c-12.732 14.428-31.825 32.1-31.999 66.08-.224 43.876 35.563 80.116 79.423 80.42L128 464c44.112 0 80-35.888 80-80z"],"thermometer-full":[256,512,[],"f2c7","M224 96c0-53.019-42.981-96-96-96S32 42.981 32 96v203.347C12.225 321.756.166 351.136.002 383.333c-.359 70.303 56.787 128.176 127.089 128.664.299.002.61.003.909.003 70.698 0 128-57.304 128-128 0-32.459-12.088-62.09-32-84.653V96zm-96 368l-.576-.002c-43.86-.304-79.647-36.544-79.423-80.42.173-33.98 19.266-51.652 31.999-66.08V96c0-26.467 21.533-48 48-48s48 21.533 48 48v221.498c12.63 14.312 32 32.164 32 66.502 0 44.112-35.888 80-80 80zm64-80c0 35.346-28.654 64-64 64s-64-28.654-64-64c0-23.685 12.876-44.349 32-55.417V96c0-17.673 14.327-32 32-32s32 14.327 32 32v232.583c19.124 11.068 32 31.732 32 55.417z"],"thermometer-half":[256,512,[],"f2c9","M192 384c0 35.346-28.654 64-64 64s-64-28.654-64-64c0-23.685 12.876-44.349 32-55.417V224c0-17.673 14.327-32 32-32s32 14.327 32 32v104.583c19.124 11.068 32 31.732 32 55.417zm32-84.653c19.912 22.563 32 52.194 32 84.653 0 70.696-57.303 128-128 128-.299 0-.609-.001-.909-.003C56.789 511.509-.357 453.636.002 383.333.166 351.135 12.225 321.755 32 299.347V96c0-53.019 42.981-96 96-96s96 42.981 96 96v203.347zM208 384c0-34.339-19.37-52.19-32-66.502V96c0-26.467-21.533-48-48-48S80 69.533 80 96v221.498c-12.732 14.428-31.825 32.1-31.999 66.08-.224 43.876 35.563 80.116 79.423 80.42L128 464c44.112 0 80-35.888 80-80z"],"thermometer-quarter":[256,512,[],"f2ca","M192 384c0 35.346-28.654 64-64 64s-64-28.654-64-64c0-23.685 12.876-44.349 32-55.417V288c0-17.673 14.327-32 32-32s32 14.327 32 32v40.583c19.124 11.068 32 31.732 32 55.417zm32-84.653c19.912 22.563 32 52.194 32 84.653 0 70.696-57.303 128-128 128-.299 0-.609-.001-.909-.003C56.789 511.509-.357 453.636.002 383.333.166 351.135 12.225 321.755 32 299.347V96c0-53.019 42.981-96 96-96s96 42.981 96 96v203.347zM208 384c0-34.339-19.37-52.19-32-66.502V96c0-26.467-21.533-48-48-48S80 69.533 80 96v221.498c-12.732 14.428-31.825 32.1-31.999 66.08-.224 43.876 35.563 80.116 79.423 80.42L128 464c44.112 0 80-35.888 80-80z"],"thermometer-three-quarters":[256,512,[],"f2c8","M192 384c0 35.346-28.654 64-64 64-35.346 0-64-28.654-64-64 0-23.685 12.876-44.349 32-55.417V160c0-17.673 14.327-32 32-32s32 14.327 32 32v168.583c19.124 11.068 32 31.732 32 55.417zm32-84.653c19.912 22.563 32 52.194 32 84.653 0 70.696-57.303 128-128 128-.299 0-.609-.001-.909-.003C56.789 511.509-.357 453.636.002 383.333.166 351.135 12.225 321.755 32 299.347V96c0-53.019 42.981-96 96-96s96 42.981 96 96v203.347zM208 384c0-34.339-19.37-52.19-32-66.502V96c0-26.467-21.533-48-48-48S80 69.533 80 96v221.498c-12.732 14.428-31.825 32.1-31.999 66.08-.224 43.876 35.563 80.116 79.423 80.42L128 464c44.112 0 80-35.888 80-80z"],"thumbs-down":[512,512,[],"f165","M0 56v240c0 13.255 10.745 24 24 24h80c13.255 0 24-10.745 24-24V56c0-13.255-10.745-24-24-24H24C10.745 32 0 42.745 0 56zm40 200c0-13.255 10.745-24 24-24s24 10.745 24 24-10.745 24-24 24-24-10.745-24-24zm272 256c-20.183 0-29.485-39.293-33.931-57.795-5.206-21.666-10.589-44.07-25.393-58.902-32.469-32.524-49.503-73.967-89.117-113.111a11.98 11.98 0 0 1-3.558-8.521V59.901c0-6.541 5.243-11.878 11.783-11.998 15.831-.29 36.694-9.079 52.651-16.178C256.189 17.598 295.709.017 343.995 0h2.844c42.777 0 93.363.413 113.774 29.737 8.392 12.057 10.446 27.034 6.148 44.632 16.312 17.053 25.063 48.863 16.382 74.757 17.544 23.432 19.143 56.132 9.308 79.469l.11.11c11.893 11.949 19.523 31.259 19.439 49.197-.156 30.352-26.157 58.098-59.553 58.098H350.723C358.03 364.34 384 388.132 384 430.548 384 504 336 512 312 512z"],"thumbs-up":[512,512,[],"f164","M104 224H24c-13.255 0-24 10.745-24 24v240c0 13.255 10.745 24 24 24h80c13.255 0 24-10.745 24-24V248c0-13.255-10.745-24-24-24zM64 472c-13.255 0-24-10.745-24-24s10.745-24 24-24 24 10.745 24 24-10.745 24-24 24zM384 81.452c0 42.416-25.97 66.208-33.277 94.548h101.723c33.397 0 59.397 27.746 59.553 58.098.084 17.938-7.546 37.249-19.439 49.197l-.11.11c9.836 23.337 8.237 56.037-9.308 79.469 8.681 25.895-.069 57.704-16.382 74.757 4.298 17.598 2.244 32.575-6.148 44.632C440.202 511.587 389.616 512 346.839 512l-2.845-.001c-48.287-.017-87.806-17.598-119.56-31.725-15.957-7.099-36.821-15.887-52.651-16.178-6.54-.12-11.783-5.457-11.783-11.998v-213.77c0-3.2 1.282-6.271 3.558-8.521 39.614-39.144 56.648-80.587 89.117-113.111 14.804-14.832 20.188-37.236 25.393-58.902C282.515 39.293 291.817 0 312 0c24 0 72 8 72 81.452z"],thumbtack:[384,512,[],"f08d","M298.028 214.267L285.793 96H328c13.255 0 24-10.745 24-24V24c0-13.255-10.745-24-24-24H56C42.745 0 32 10.745 32 24v48c0 13.255 10.745 24 24 24h42.207L85.972 214.267C37.465 236.82 0 277.261 0 328c0 13.255 10.745 24 24 24h136v104.007c0 1.242.289 2.467.845 3.578l24 48c2.941 5.882 11.364 5.893 14.311 0l24-48a8.008 8.008 0 0 0 .845-3.578V352h136c13.255 0 24-10.745 24-24-.001-51.183-37.983-91.42-85.973-113.733z"],"ticket-alt":[576,512,[],"f3ff","M128 160h320v192H128V160zm400 96c0 26.51 21.49 48 48 48v96c0 26.51-21.49 48-48 48H48c-26.51 0-48-21.49-48-48v-96c26.51 0 48-21.49 48-48s-21.49-48-48-48v-96c0-26.51 21.49-48 48-48h480c26.51 0 48 21.49 48 48v96c-26.51 0-48 21.49-48 48zm-48-104c0-13.255-10.745-24-24-24H120c-13.255 0-24 10.745-24 24v208c0 13.255 10.745 24 24 24h336c13.255 0 24-10.745 24-24V152z"],times:[384,512,[],"f00d","M323.1 441l53.9-53.9c9.4-9.4 9.4-24.5 0-33.9L279.8 256l97.2-97.2c9.4-9.4 9.4-24.5 0-33.9L323.1 71c-9.4-9.4-24.5-9.4-33.9 0L192 168.2 94.8 71c-9.4-9.4-24.5-9.4-33.9 0L7 124.9c-9.4 9.4-9.4 24.5 0 33.9l97.2 97.2L7 353.2c-9.4 9.4-9.4 24.5 0 33.9L60.9 441c9.4 9.4 24.5 9.4 33.9 0l97.2-97.2 97.2 97.2c9.3 9.3 24.5 9.3 33.9 0z"],"times-circle":[512,512,[],"f057","M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm121.6 313.1c4.7 4.7 4.7 12.3 0 17L338 377.6c-4.7 4.7-12.3 4.7-17 0L256 312l-65.1 65.6c-4.7 4.7-12.3 4.7-17 0L134.4 338c-4.7-4.7-4.7-12.3 0-17l65.6-65-65.6-65.1c-4.7-4.7-4.7-12.3 0-17l39.6-39.6c4.7-4.7 12.3-4.7 17 0l65 65.7 65.1-65.6c4.7-4.7 12.3-4.7 17 0l39.6 39.6c4.7 4.7 4.7 12.3 0 17L312 256l65.6 65.1z"],tint:[384,512,[],"f043","M192 512c-98.435 0-178.087-79.652-178.087-178.087 0-111.196 101.194-154.065 148.522-311.825 9.104-30.116 51.099-28.778 59.13 0 47.546 158.486 148.522 200.069 148.522 311.825C370.087 432.348 290.435 512 192 512zm-42.522-171.826c-1.509-5.533-9.447-5.532-10.956 0-9.223 29.425-27.913 37.645-27.913 58.435C110.609 417.13 125.478 432 144 432s33.391-14.87 33.391-33.391c0-20.839-18.673-28.956-27.913-58.435z"],"toggle-off":[576,512,[],"f204","M384 64H192C85.961 64 0 149.961 0 256s85.961 192 192 192h192c106.039 0 192-85.961 192-192S490.039 64 384 64zM64 256c0-70.741 57.249-128 128-128 70.741 0 128 57.249 128 128 0 70.741-57.249 128-128 128-70.741 0-128-57.249-128-128zm320 128h-48.905c65.217-72.858 65.236-183.12 0-256H384c70.741 0 128 57.249 128 128 0 70.74-57.249 128-128 128z"],"toggle-on":[576,512,[],"f205","M576 256c0 106.039-85.961 192-192 192H192C85.961 448 0 362.039 0 256S85.961 64 192 64h192c106.039 0 192 85.961 192 192zM384 128c-70.741 0-128 57.249-128 128 0 70.741 57.249 128 128 128 70.741 0 128-57.249 128-128 0-70.741-57.249-128-128-128"],trademark:[640,512,[],"f25c","M97.119 163.133H12c-6.627 0-12-5.373-12-12V108c0-6.627 5.373-12 12-12h248.559c6.627 0 12 5.373 12 12v43.133c0 6.627-5.373 12-12 12H175.44V404c0 6.627-5.373 12-12 12h-54.322c-6.627 0-12-5.373-12-12V163.133zM329.825 96h65.425a12 12 0 0 1 11.346 8.093l43.759 127.068c7.161 20.588 16.111 52.812 16.111 52.812h.896s8.95-32.224 16.111-52.812l43.758-127.068A12 12 0 0 1 538.577 96h65.41a12 12 0 0 1 11.961 11.03l24.012 296c.567 6.987-4.951 12.97-11.961 12.97h-54.101a12 12 0 0 1-11.972-11.182l-9.082-132.93c-1.79-24.168 0-53.706 0-53.706h-.896s-10.741 33.566-17.902 53.706l-30.7 84.731a12 12 0 0 1-11.282 7.912h-50.302a12 12 0 0 1-11.282-7.912l-30.7-84.731c-7.161-20.14-17.903-53.706-17.903-53.706h-.895s1.79 29.538 0 53.706l-9.082 132.93c-.428 6.295-5.66 11.182-11.97 11.182H305.4c-7.017 0-12.536-5.994-11.959-12.987l24.425-296A11.999 11.999 0 0 1 329.825 96z"],train:[448,512,[],"f238","M448 96v256c0 51.815-61.624 96-130.022 96l62.98 49.721C386.905 502.417 383.562 512 376 512H72c-7.578 0-10.892-9.594-4.957-14.279L130.022 448C61.82 448 0 403.954 0 352V96C0 42.981 64 0 128 0h192c65 0 128 42.981 128 96zm-48 136V120c0-13.255-10.745-24-24-24H72c-13.255 0-24 10.745-24 24v112c0 13.255 10.745 24 24 24h304c13.255 0 24-10.745 24-24zm-176 64c-30.928 0-56 25.072-56 56s25.072 56 56 56 56-25.072 56-56-25.072-56-56-56z"],transgender:[384,512,[],"f224","M372 0h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-80.7 80.7C198.5 104.1 172.2 96 144 96 64.5 96 0 160.5 0 240c0 68.5 47.9 125.9 112 140.4V408H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v28c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-28h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-27.6c64.1-14.6 112-71.9 112-140.4 0-28.2-8.1-54.5-22.1-76.7l80.7-80.7 16.9 16.9c7.6 7.6 20.5 2.2 20.5-8.5V12c0-6.6-5.4-12-12-12zM144 320c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],"transgender-alt":[480,512,[],"f225","M468 0h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-80.7 80.7C294.5 104.1 268.2 96 240 96c-28.2 0-54.5 8.1-76.7 22.1l-16.5-16.5 19.8-19.8c4.7-4.7 4.7-12.3 0-17l-28.3-28.3c-4.7-4.7-12.3-4.7-17 0l-19.8 19.8-19-19 16.9-16.9C107.1 12.9 101.7 0 91 0H12C5.4 0 0 5.4 0 12v79c0 10.7 12.9 16 20.5 8.5l16.9-16.9 19 19-19.8 19.8c-4.7 4.7-4.7 12.3 0 17l28.3 28.3c4.7 4.7 12.3 4.7 17 0l19.8-19.8 16.5 16.5C104.1 185.5 96 211.8 96 240c0 68.5 47.9 125.9 112 140.4V408h-36c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v28c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-28h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-27.6c64.1-14.6 112-71.9 112-140.4 0-28.2-8.1-54.5-22.1-76.7l80.7-80.7 16.9 16.9c7.6 7.6 20.5 2.2 20.5-8.5V12c0-6.6-5.4-12-12-12zM240 320c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],trash:[448,512,[],"f1f8","M0 84V56c0-13.3 10.7-24 24-24h112l9.4-18.7c4-8.2 12.3-13.3 21.4-13.3h114.3c9.1 0 17.4 5.1 21.5 13.3L312 32h112c13.3 0 24 10.7 24 24v28c0 6.6-5.4 12-12 12H12C5.4 96 0 90.6 0 84zm415.2 56.7L394.8 467c-1.6 25.3-22.6 45-47.9 45H101.1c-25.3 0-46.3-19.7-47.9-45L32.8 140.7c-.4-6.9 5.1-12.7 12-12.7h358.5c6.8 0 12.3 5.8 11.9 12.7z"],"trash-alt":[448,512,[],"f2ed","M0 84V56c0-13.3 10.7-24 24-24h112l9.4-18.7c4-8.2 12.3-13.3 21.4-13.3h114.3c9.1 0 17.4 5.1 21.5 13.3L312 32h112c13.3 0 24 10.7 24 24v28c0 6.6-5.4 12-12 12H12C5.4 96 0 90.6 0 84zm416 56v324c0 26.5-21.5 48-48 48H80c-26.5 0-48-21.5-48-48V140c0-6.6 5.4-12 12-12h360c6.6 0 12 5.4 12 12zm-272 68c0-8.8-7.2-16-16-16s-16 7.2-16 16v224c0 8.8 7.2 16 16 16s16-7.2 16-16V208zm96 0c0-8.8-7.2-16-16-16s-16 7.2-16 16v224c0 8.8 7.2 16 16 16s16-7.2 16-16V208zm96 0c0-8.8-7.2-16-16-16s-16 7.2-16 16v224c0 8.8 7.2 16 16 16s16-7.2 16-16V208z"],tree:[384,512,[],"f1bb","M377.33 375.429L293.906 288H328c21.017 0 31.872-25.207 17.448-40.479L262.79 160H296c20.878 0 31.851-24.969 17.587-40.331l-104-112.003c-9.485-10.214-25.676-10.229-35.174 0l-104 112.003C56.206 134.969 67.037 160 88 160h33.21l-82.659 87.521C24.121 262.801 34.993 288 56 288h34.094L6.665 375.429C-7.869 390.655 2.925 416 24.025 416H144c0 32.781-11.188 49.26-33.995 67.506C98.225 492.93 104.914 512 120 512h144c15.086 0 21.776-19.069 9.995-28.494-19.768-15.814-33.992-31.665-33.995-67.496V416h119.97c21.05 0 31.929-25.309 17.36-40.571z"],trophy:[576,512,[],"f091","M552 64H448V24c0-13.3-10.7-24-24-24H152c-13.3 0-24 10.7-24 24v40H24C10.7 64 0 74.7 0 88v56c0 35.7 22.5 72.4 61.9 100.7 31.5 22.7 69.8 37.1 110 41.7C203.3 338.5 240 360 240 360v72h-48c-35.3 0-64 20.7-64 56v12c0 6.6 5.4 12 12 12h296c6.6 0 12-5.4 12-12v-12c0-35.3-28.7-56-64-56h-48v-72s36.7-21.5 68.1-73.6c40.3-4.6 78.6-19 110-41.7 39.3-28.3 61.9-65 61.9-100.7V88c0-13.3-10.7-24-24-24zM99.3 192.8C74.9 175.2 64 155.6 64 144v-16h64.2c1 32.6 5.8 61.2 12.8 86.2-15.1-5.2-29.2-12.4-41.7-21.4zM512 144c0 16.1-17.7 36.1-35.3 48.8-12.5 9-26.7 16.2-41.8 21.4 7-25 11.8-53.6 12.8-86.2H512v16z"],truck:[640,512,[],"f0d1","M592 0H272c-26.51 0-48 21.49-48 48v48h-44.118a48 48 0 0 0-33.941 14.059l-99.882 99.882A48 48 0 0 0 32 243.882V352h-8c-13.255 0-24 10.745-24 24v16c0 13.255 10.745 24 24 24h40c0 53.019 42.981 96 96 96s96-42.981 96-96h128c0 53.019 42.981 96 96 96s96-42.981 96-96h40c13.255 0 24-10.745 24-24V48c0-26.51-21.49-48-48-48zM160 464c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48zm64-208H80v-12.118L179.882 144H224v112zm256 208c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48z"],tty:[512,512,[],"f1e4","M5.37 103.822c138.532-138.532 362.936-138.326 501.262 0 6.078 6.078 7.074 15.496 2.583 22.681l-43.214 69.138a18.332 18.332 0 0 1-22.356 7.305l-86.422-34.569a18.335 18.335 0 0 1-11.434-18.846L351.741 90c-62.145-22.454-130.636-21.986-191.483 0l5.953 59.532a18.331 18.331 0 0 1-11.434 18.846l-86.423 34.568a18.334 18.334 0 0 1-22.356-7.305L2.787 126.502a18.333 18.333 0 0 1 2.583-22.68zM96 308v-40c0-6.627-5.373-12-12-12H44c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm-336 96v-40c0-6.627-5.373-12-12-12H92c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zM96 500v-40c0-6.627-5.373-12-12-12H44c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12zm288 0v-40c0-6.627-5.373-12-12-12H140c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h232c6.627 0 12-5.373 12-12zm96 0v-40c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12z"],tv:[640,512,[],"f26c","M592 0H48C21.5 0 0 21.5 0 48v320c0 26.5 21.5 48 48 48h245.1v32h-160c-17.7 0-32 14.3-32 32s14.3 32 32 32h384c17.7 0 32-14.3 32-32s-14.3-32-32-32h-160v-32H592c26.5 0 48-21.5 48-48V48c0-26.5-21.5-48-48-48zm-16 352H64V64h512v288z"],umbrella:[576,512,[],"f0e9","M557.011 267.631c-51.432-45.217-107.572-43.698-158.567 30.731-5.298 7.861-14.906 7.165-19.736 0-2.483-3.624-32.218-60.808-90.708-60.808-45.766 0-70.542 31.378-90.709 60.808-4.829 7.165-14.436 7.861-19.734 0-50.904-74.285-106.613-76.406-158.567-30.731-10.21 8.264-20.912-1.109-18.696-9.481C32.146 134.573 158.516 64.612 288.001 64.612c128.793 0 256.546 69.961 287.706 193.538 2.206 8.322-8.426 17.793-18.696 9.481zM256 261.001V416c0 17.645-14.355 32-32 32s-32-14.355-32-32c0-17.673-14.327-32-32-32s-32 14.327-32 32c0 52.935 43.065 96 96 96s96-43.065 96-96V261.288c-21.836-10.806-45.425-9.737-64-.287zm64-211.007V32c0-17.673-14.327-32-32-32s-32 14.327-32 32v17.987a372.105 372.105 0 0 1 64 .007z"],underline:[448,512,[],"f0cd","M224.264 388.24c-91.669 0-156.603-51.165-156.603-151.392V64H39.37c-8.837 0-16-7.163-16-16V16c0-8.837 7.163-16 16-16h137.39c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-28.813v172.848c0 53.699 28.314 79.444 76.317 79.444 46.966 0 75.796-25.434 75.796-79.965V64h-28.291c-8.837 0-16-7.163-16-16V16c0-8.837 7.163-16 16-16h136.868c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16h-28.291v172.848c0 99.405-64.881 151.392-156.082 151.392zM16 448h416c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H16c-8.837 0-16-7.163-16-16v-32c0-8.837 7.163-16 16-16z"],undo:[512,512,[],"f0e2","M212.333 224.333H12c-6.627 0-12-5.373-12-12V12C0 5.373 5.373 0 12 0h48c6.627 0 12 5.373 12 12v78.112C117.773 39.279 184.26 7.47 258.175 8.007c136.906.994 246.448 111.623 246.157 248.532C504.041 393.258 393.12 504 256.333 504c-64.089 0-122.496-24.313-166.51-64.215-5.099-4.622-5.334-12.554-.467-17.42l33.967-33.967c4.474-4.474 11.662-4.717 16.401-.525C170.76 415.336 211.58 432 256.333 432c97.268 0 176-78.716 176-176 0-97.267-78.716-176-176-176-58.496 0-110.28 28.476-142.274 72.333h98.274c6.627 0 12 5.373 12 12v48c0 6.627-5.373 12-12 12z"],"undo-alt":[512,512,[],"f2ea","M255.545 8c-66.269.119-126.438 26.233-170.86 68.685L48.971 40.971C33.851 25.851 8 36.559 8 57.941V192c0 13.255 10.745 24 24 24h134.059c21.382 0 32.09-25.851 16.971-40.971l-41.75-41.75c30.864-28.899 70.801-44.907 113.23-45.273 92.398-.798 170.283 73.977 169.484 169.442C423.236 348.009 349.816 424 256 424c-41.127 0-79.997-14.678-110.63-41.556-4.743-4.161-11.906-3.908-16.368.553L89.34 422.659c-4.872 4.872-4.631 12.815.482 17.433C133.798 479.813 192.074 504 256 504c136.966 0 247.999-111.033 248-247.998C504.001 119.193 392.354 7.755 255.545 8z"],"universal-access":[512,512,[],"f29a","M256 48c114.953 0 208 93.029 208 208 0 114.953-93.029 208-208 208-114.953 0-208-93.029-208-208 0-114.953 93.029-208 208-208m0-40C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zm0 56C149.961 64 64 149.961 64 256s85.961 192 192 192 192-85.961 192-192S362.039 64 256 64zm0 44c19.882 0 36 16.118 36 36s-16.118 36-36 36-36-16.118-36-36 16.118-36 36-36zm117.741 98.023c-28.712 6.779-55.511 12.748-82.14 15.807.851 101.023 12.306 123.052 25.037 155.621 3.617 9.26-.957 19.698-10.217 23.315-9.261 3.617-19.699-.957-23.316-10.217-8.705-22.308-17.086-40.636-22.261-78.549h-9.686c-5.167 37.851-13.534 56.208-22.262 78.549-3.615 9.255-14.05 13.836-23.315 10.217-9.26-3.617-13.834-14.056-10.217-23.315 12.713-32.541 24.185-54.541 25.037-155.621-26.629-3.058-53.428-9.027-82.141-15.807-8.6-2.031-13.926-10.648-11.895-19.249s10.647-13.926 19.249-11.895c96.686 22.829 124.283 22.783 220.775 0 8.599-2.03 17.218 3.294 19.249 11.895 2.029 8.601-3.297 17.219-11.897 19.249z"],university:[512,512,[],"f19c","M480 128v16a8 8 0 0 1-8 8h-24v12c0 6.627-5.373 12-12 12H44c-6.627 0-12-5.373-12-12v-12H8a8 8 0 0 1-8-8v-16a8 8 0 0 1 4.941-7.392l232-88a7.996 7.996 0 0 1 6.118 0l232 88A8 8 0 0 1 480 128zm-24 304H24c-13.255 0-24 10.745-24 24v16a8 8 0 0 0 8 8h464a8 8 0 0 0 8-8v-16c0-13.255-10.745-24-24-24zM64 192v192H44c-6.627 0-12 5.373-12 12v20h416v-20c0-6.627-5.373-12-12-12h-20V192h-64v192h-32V192h-64v192h-32V192h-64v192h-32V192H64z"],unlink:[512,512,[],"f127","M304.083 405.907c4.686 4.686 4.686 12.284 0 16.971l-44.674 44.674c-59.263 59.262-155.693 59.266-214.961 0-59.264-59.265-59.264-155.696 0-214.96l44.675-44.675c4.686-4.686 12.284-4.686 16.971 0l39.598 39.598c4.686 4.686 4.686 12.284 0 16.971l-44.675 44.674c-28.072 28.073-28.072 73.75 0 101.823 28.072 28.072 73.75 28.073 101.824 0l44.674-44.674c4.686-4.686 12.284-4.686 16.971 0l39.597 39.598zm-56.568-260.216c4.686 4.686 12.284 4.686 16.971 0l44.674-44.674c28.072-28.075 73.75-28.073 101.824 0 28.072 28.073 28.072 73.75 0 101.823l-44.675 44.674c-4.686 4.686-4.686 12.284 0 16.971l39.598 39.598c4.686 4.686 12.284 4.686 16.971 0l44.675-44.675c59.265-59.265 59.265-155.695 0-214.96-59.266-59.264-155.695-59.264-214.961 0l-44.674 44.674c-4.686 4.686-4.686 12.284 0 16.971l39.597 39.598zm234.828 359.28l22.627-22.627c9.373-9.373 9.373-24.569 0-33.941L63.598 7.029c-9.373-9.373-24.569-9.373-33.941 0L7.029 29.657c-9.373 9.373-9.373 24.569 0 33.941l441.373 441.373c9.373 9.372 24.569 9.372 33.941 0z"],unlock:[448,512,[],"f09c","M400 256H152V152.9c0-39.6 31.7-72.5 71.3-72.9 40-.4 72.7 32.1 72.7 72v16c0 13.3 10.7 24 24 24h32c13.3 0 24-10.7 24-24v-16C376 68 307.5-.3 223.5 0 139.5.3 72 69.5 72 153.5V256H48c-26.5 0-48 21.5-48 48v160c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V304c0-26.5-21.5-48-48-48z"],"unlock-alt":[448,512,[],"f13e","M400 256H152V152.9c0-39.6 31.7-72.5 71.3-72.9 40-.4 72.7 32.1 72.7 72v16c0 13.3 10.7 24 24 24h32c13.3 0 24-10.7 24-24v-16C376 68 307.5-.3 223.5 0 139.5.3 72 69.5 72 153.5V256H48c-26.5 0-48 21.5-48 48v160c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48V304c0-26.5-21.5-48-48-48zM264 408c0 22.1-17.9 40-40 40s-40-17.9-40-40v-48c0-22.1 17.9-40 40-40s40 17.9 40 40v48z"],upload:[512,512,[],"f093","M296 384h-80c-13.3 0-24-10.7-24-24V192h-87.7c-17.8 0-26.7-21.5-14.1-34.1L242.3 5.7c7.5-7.5 19.8-7.5 27.3 0l152.2 152.2c12.6 12.6 3.7 34.1-14.1 34.1H320v168c0 13.3-10.7 24-24 24zm216-8v112c0 13.3-10.7 24-24 24H24c-13.3 0-24-10.7-24-24V376c0-13.3 10.7-24 24-24h136v8c0 30.9 25.1 56 56 56h80c30.9 0 56-25.1 56-56v-8h136c13.3 0 24 10.7 24 24zm-124 88c0-11-9-20-20-20s-20 9-20 20 9 20 20 20 20-9 20-20zm64 0c0-11-9-20-20-20s-20 9-20 20 9 20 20 20 20-9 20-20z"],user:[512,512,[],"f007","M96 160C96 71.634 167.635 0 256 0s160 71.634 160 160-71.635 160-160 160S96 248.366 96 160zm304 192h-28.556c-71.006 42.713-159.912 42.695-230.888 0H112C50.144 352 0 402.144 0 464v24c0 13.255 10.745 24 24 24h464c13.255 0 24-10.745 24-24v-24c0-61.856-50.144-112-112-112z"],"user-circle":[512,512,[],"f2bd","M256 8C119.033 8 8 119.033 8 256s111.033 248 248 248 248-111.033 248-248S392.967 8 256 8zM144 208c0-61.856 50.144-112 112-112s112 50.144 112 112-50.144 112-112 112-112-50.144-112-112zm268.408 172.663c-80.346 100.411-232.375 100.53-312.817 0C117.003 362.973 141.218 352 168 352h18.204c44.03 21.336 95.495 21.368 139.592 0H344c26.782 0 50.997 10.973 68.408 28.663z"],"user-md":[448,512,[],"f0f0","M96 128C96 57.308 153.308 0 224 0s128 57.308 128 128-57.308 128-128 128S96 198.692 96 128zm256 160v33.61c36.471 7.433 64 39.756 64 78.39v49.441c0 11.44-8.075 21.29-19.293 23.534l-21.802 4.361c-6.499 1.3-12.821-2.915-14.12-9.414l-1.569-7.845c-1.3-6.499 2.915-12.821 9.414-14.12l15.37-3.074v-42.078c0-26.283-20.793-48.297-47.071-48.797C310.039 351.498 288 373.224 288 400v42.883l15.371 3.074c6.499 1.3 10.713 7.622 9.414 14.12l-1.569 7.845c-1.3 6.499-7.622 10.714-14.12 9.414l-21.802-4.361C264.075 470.732 256 460.882 256 449.441V400c0-38.634 27.529-70.957 64-78.39V288h-22.624c-45.669 20.945-99.331 21.749-146.752 0H128v66.025c28.495 7.361 49.359 33.906 47.931 64.977-1.506 32.778-28.097 59.392-60.874 60.926C78.383 481.644 48 452.303 48 416c0-29.767 20.427-54.852 48-61.975V288c-53.019 0-96 42.981-96 96v104c0 13.255 10.745 24 24 24h400c13.255 0 24-10.745 24-24V384c0-53.019-42.981-96-96-96zM80 416c0 17.645 14.355 32 32 32s32-14.355 32-32-14.355-32-32-32-32 14.355-32 32z"],"user-plus":[640,512,[],"f234","M616 332c0-6.627-5.373-12-12-12h-60v-60c0-6.627-5.373-12-12-12h-40c-6.627 0-12 5.373-12 12v60h-60c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h60v60c0 6.627 5.373 12 12 12h40c6.627 0 12-5.373 12-12v-60h60c6.627 0 12-5.373 12-12v-40zM448 444v15c0 11.598-9.402 21-21 21H21c-11.598 0-21-9.402-21-21v-21c0-54.124 43.876-98 98-98h24.986c62.104 37.358 139.897 37.374 202.027 0H350a98.09 98.09 0 0 1 26 3.493V372c0 24.262 19.738 44 44 44h25.519c2.768 12.064 2.481 20.659 2.481 28zM84 172c0-77.32 62.68-140 140-140s140 62.68 140 140-62.68 140-140 140S84 249.32 84 172z"],"user-secret":[448,512,[],"f21b","M388.829 295.324l20.972-55.052c2.992-7.854-2.809-16.272-11.214-16.272H340.39c7.45-16.236 11.61-34.297 11.61-53.333 0-3.631-.16-7.224-.456-10.778C391.083 152.074 416 140.684 416 128c0-13.263-27.231-25.112-69.947-32.937-9.185-32.805-27.178-65.797-40.714-82.85-9.452-11.908-25.873-15.634-39.471-8.834l-27.557 13.779a31.997 31.997 0 0 1-28.622 0l-27.557-13.78c-13.599-6.799-30.02-3.074-39.471 8.834-13.536 17.053-31.529 50.045-40.714 82.85C59.231 102.888 32 114.737 32 128c0 12.684 24.917 24.074 64.456 31.889A129.362 129.362 0 0 0 96 170.667c0 19.037 4.159 37.098 11.608 53.333h-57.41c-8.615 0-14.423 8.809-11.029 16.727l22.906 53.447C25.799 307.882 0 342.925 0 384v104c0 13.255 10.745 24 24 24h400c13.255 0 24-10.745 24-24V384c0-39.97-24.43-74.231-59.171-88.676zM184 488l-48-192 48 24 24 40-24 128zm80 0l-24-128 24-40 48-24-48 192zm54.778-303.746c-.008.043-4.299 3.231-5.125 5.771-3.861 11.864-7.026 24.572-16.514 33.359-10.071 9.327-47.957 22.405-63.996-25.029-2.837-8.395-15.447-8.398-18.285 0-16.963 50.168-56.019 32.417-63.996 25.029-9.488-8.786-12.653-21.495-16.514-33.359-.826-2.54-5.118-5.728-5.125-5.771-.554-2.925-.981-5.884-1.22-8.85-.309-3.848 10.078-3.658 11.078-3.747 26.303-2.326 52.303-.579 78.023 5.497 2.563.606 11.553.529 13.793 0 25.72-6.076 51.72-7.824 78.023-5.497 1.002.089 11.387-.102 11.078 3.747-.239 2.966-.666 5.925-1.22 8.85z"],"user-times":[640,512,[],"f235","M599.681 411.397c4.686-4.686 4.686-12.284 0-16.971L557.255 352l42.426-42.426c4.686-4.686 4.686-12.284 0-16.971l-28.284-28.284c-4.686-4.686-12.284-4.686-16.971 0L512 306.745l-42.426-42.426c-4.686-4.686-12.284-4.686-16.971 0l-28.284 28.284c-4.686 4.686-4.686 12.284 0 16.971L466.745 352l-42.426 42.426c-4.686 4.686-4.686 12.284 0 16.971l28.284 28.284c4.686 4.686 12.284 4.686 16.971 0L512 397.255l42.426 42.426c4.686 4.686 12.284 4.686 16.971 0l28.284-28.284zM84 172c0-77.32 62.68-140 140-140s140 62.68 140 140-62.68 140-140 140S84 249.32 84 172zm359.737 299.645C439.904 476.712 433.843 480 427 480H21c-11.598 0-21-9.402-21-21v-21c0-54.124 43.876-98 98-98h24.986c62.104 37.358 139.897 37.374 202.027 0H350c23.366 0 44.818 8.183 61.658 21.832l-9.967 9.967c-17.156 17.156-17.156 45.07 0 62.225l28.284 28.284a43.946 43.946 0 0 0 13.762 9.337z"],users:[640,512,[],"f0c0","M220 164c0-55.229 44.772-100 100-100s100 44.771 100 100-44.772 100-100 100-100-44.771-100-100zM48 208c0-44.183 35.817-80 80-80s80 35.817 80 80-35.817 80-80 80-80-35.817-80-80zm384 0c0-44.183 35.817-80 80-80s80 35.817 80 80-35.817 80-80 80-80-35.817-80-80zm-22 76c38.66 0 70 31.34 70 70v70c0 13.255-10.745 24-24 24H184c-13.255 0-24-10.745-24-24v-70c0-38.66 31.34-70 70-70h17.848c44.364 26.687 99.93 26.693 144.305 0H410m-282 70c0-11.975 2.081-23.472 5.889-34.156-21.93 1.152-44.122-4.121-63.611-15.844H56c-30.928 0-56 25.072-56 56v32c0 13.255 10.745 24 24 24h104v-62zm456-50h-14.278c-19.495 11.727-41.686 16.996-63.611 15.844A101.542 101.542 0 0 1 512 354v62h104c13.255 0 24-10.745 24-24v-32c0-30.928-25.072-56-56-56z"],"utensil-spoon":[512,512,[],"f2e5","M480.1 31.9c-55-55.1-164.9-34.5-227.8 28.5-49.3 49.3-55.1 110-28.8 160.4L9 413.2c-11.6 10.5-12.1 28.5-1 39.5L59.3 504c11 11 29.1 10.5 39.5-1.1l192.4-214.4c50.4 26.3 111.1 20.5 160.4-28.8 63-62.9 83.6-172.8 28.5-227.8z"],utensils:[416,512,[],"f2e7","M207.9 15.2c.8 4.7 16.1 94.5 16.1 128.8 0 52.3-27.8 89.6-68.9 104.6L168 486.7c.7 13.7-10.2 25.3-24 25.3H80c-13.7 0-24.7-11.5-24-25.3l12.9-238.1C27.7 233.6 0 196.2 0 144 0 109.6 15.3 19.9 16.1 15.2 19.3-5.1 61.4-5.4 64 16.3v141.2c1.3 3.4 15.1 3.2 16 0 1.4-25.3 7.9-139.2 8-141.8 3.3-20.8 44.7-20.8 47.9 0 .2 2.7 6.6 116.5 8 141.8.9 3.2 14.8 3.4 16 0V16.3c2.6-21.6 44.8-21.4 48-1.1zm119.2 285.7l-15 185.1c-1.2 14 9.9 26 23.9 26h56c13.3 0 24-10.7 24-24V24c0-13.2-10.7-24-24-24-82.5 0-221.4 178.5-64.9 300.9z"],venus:[288,512,[],"f221","M288 176c0-79.5-64.5-144-144-144S0 96.5 0 176c0 68.5 47.9 125.9 112 140.4V368H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v36c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-36h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-51.6c64.1-14.5 112-71.9 112-140.4zm-224 0c0-44.1 35.9-80 80-80s80 35.9 80 80-35.9 80-80 80-80-35.9-80-80z"],"venus-double":[512,512,[],"f226","M288 176c0-79.5-64.5-144-144-144S0 96.5 0 176c0 68.5 47.9 125.9 112 140.4V368H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v36c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-36h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-51.6c64.1-14.5 112-71.9 112-140.4zm-224 0c0-44.1 35.9-80 80-80s80 35.9 80 80-35.9 80-80 80-80-35.9-80-80zm336 140.4V368h36c6.6 0 12 5.4 12 12v40c0 6.6-5.4 12-12 12h-36v36c0 6.6-5.4 12-12 12h-40c-6.6 0-12-5.4-12-12v-36h-36c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h36v-51.6c-21.2-4.8-40.6-14.3-57.2-27.3 14-16.7 25-36 32.1-57.1 14.5 14.8 34.7 24 57.1 24 44.1 0 80-35.9 80-80s-35.9-80-80-80c-22.3 0-42.6 9.2-57.1 24-7.1-21.1-18-40.4-32.1-57.1C303.4 43.6 334.3 32 368 32c79.5 0 144 64.5 144 144 0 68.5-47.9 125.9-112 140.4z"],"venus-mars":[576,512,[],"f228","M564 0h-79c-10.7 0-16 12.9-8.5 20.5l16.9 16.9-48.7 48.7C422.5 72.1 396.2 64 368 64c-33.7 0-64.6 11.6-89.2 30.9 14 16.7 25 36 32.1 57.1 14.5-14.8 34.7-24 57.1-24 44.1 0 80 35.9 80 80s-35.9 80-80 80c-22.3 0-42.6-9.2-57.1-24-7.1 21.1-18 40.4-32.1 57.1 24.5 19.4 55.5 30.9 89.2 30.9 79.5 0 144-64.5 144-144 0-28.2-8.1-54.5-22.1-76.7l48.7-48.7 16.9 16.9c2.4 2.4 5.4 3.5 8.4 3.5 6.2 0 12.1-4.8 12.1-12V12c0-6.6-5.4-12-12-12zM144 64C64.5 64 0 128.5 0 208c0 68.5 47.9 125.9 112 140.4V400H76c-6.6 0-12 5.4-12 12v40c0 6.6 5.4 12 12 12h36v36c0 6.6 5.4 12 12 12h40c6.6 0 12-5.4 12-12v-36h36c6.6 0 12-5.4 12-12v-40c0-6.6-5.4-12-12-12h-36v-51.6c64.1-14.6 112-71.9 112-140.4 0-79.5-64.5-144-144-144zm0 224c-44.1 0-80-35.9-80-80s35.9-80 80-80 80 35.9 80 80-35.9 80-80 80z"],video:[576,512,[],"f03d","M528 64h-12.118a48 48 0 0 0-33.941 14.059L384 176v-64c0-26.51-21.49-48-48-48H48C21.49 64 0 85.49 0 112v288c0 26.51 21.49 48 48 48h288c26.51 0 48-21.49 48-48v-64l97.941 97.941A48 48 0 0 0 515.882 448H528c26.51 0 48-21.49 48-48V112c0-26.51-21.49-48-48-48z"],"volume-down":[384,512,[],"f027","M256 88.017v335.964c0 21.438-25.943 31.998-40.971 16.971L126.059 352H24c-13.255 0-24-10.745-24-24V184c0-13.255 10.745-24 24-24h102.059l88.971-88.954c15.01-15.01 40.97-4.49 40.97 16.971zM384 256c0-33.717-17.186-64.35-45.972-81.944-15.079-9.214-34.775-4.463-43.992 10.616s-4.464 34.775 10.615 43.992C314.263 234.538 320 244.757 320 256a32.056 32.056 0 0 1-13.802 26.332c-14.524 10.069-18.136 30.006-8.067 44.53 10.07 14.525 30.008 18.136 44.53 8.067C368.546 316.983 384 287.478 384 256z"],"volume-off":[256,512,[],"f026","M256 88.017v335.964c0 21.438-25.943 31.998-40.971 16.971L126.059 352H24c-13.255 0-24-10.745-24-24V184c0-13.255 10.745-24 24-24h102.059l88.971-88.954c15.01-15.01 40.97-4.49 40.97 16.971z"],"volume-up":[576,512,[],"f028","M256 88.017v335.964c0 21.438-25.943 31.998-40.971 16.971L126.059 352H24c-13.255 0-24-10.745-24-24V184c0-13.255 10.745-24 24-24h102.059l88.971-88.954c15.01-15.01 40.97-4.49 40.97 16.971zm182.056-77.876C422.982.92 403.283 5.668 394.061 20.745c-9.221 15.077-4.473 34.774 10.604 43.995C468.967 104.063 512 174.983 512 256c0 73.431-36.077 142.292-96.507 184.206-14.522 10.072-18.129 30.01-8.057 44.532 10.076 14.528 30.016 18.126 44.531 8.057C529.633 438.927 576 350.406 576 256c0-103.244-54.579-194.877-137.944-245.859zM480 256c0-68.547-36.15-129.777-91.957-163.901-15.076-9.22-34.774-4.471-43.994 10.607-9.22 15.078-4.471 34.774 10.607 43.994C393.067 170.188 416 211.048 416 256c0 41.964-20.62 81.319-55.158 105.276-14.521 10.073-18.128 30.01-8.056 44.532 6.216 8.96 16.185 13.765 26.322 13.765a31.862 31.862 0 0 0 18.21-5.709C449.091 377.953 480 318.938 480 256zm-96 0c0-33.717-17.186-64.35-45.972-81.944-15.079-9.214-34.775-4.463-43.992 10.616s-4.464 34.775 10.615 43.992C314.263 234.538 320 244.757 320 256a32.056 32.056 0 0 1-13.802 26.332c-14.524 10.069-18.136 30.006-8.067 44.53 10.07 14.525 30.008 18.136 44.53 8.067C368.546 316.983 384 287.478 384 256z"],wheelchair:[512,512,[],"f193","M496.101 385.669l14.227 28.663c3.929 7.915.697 17.516-7.218 21.445l-65.465 32.886c-16.049 7.967-35.556 1.194-43.189-15.055L331.679 320H192c-15.925 0-29.426-11.71-31.679-27.475C126.433 55.308 128.38 70.044 128 64c0-36.358 30.318-65.635 67.052-63.929 33.271 1.545 60.048 28.905 60.925 62.201.868 32.933-23.152 60.423-54.608 65.039l4.67 32.69H336c8.837 0 16 7.163 16 16v32c0 8.837-7.163 16-16 16H215.182l4.572 32H352a32 32 0 0 1 28.962 18.392L438.477 396.8l36.178-18.349c7.915-3.929 17.517-.697 21.446 7.218zM311.358 352h-24.506c-7.788 54.204-54.528 96-110.852 96-61.757 0-112-50.243-112-112 0-41.505 22.694-77.809 56.324-97.156-3.712-25.965-6.844-47.86-9.488-66.333C45.956 198.464 0 261.963 0 336c0 97.047 78.953 176 176 176 71.87 0 133.806-43.308 161.11-105.192L311.358 352z"],wifi:[640,512,[],"f1eb","M384 416c0 35.346-28.654 64-64 64s-64-28.654-64-64c0-35.346 28.654-64 64-64s64 28.654 64 64zm136.659-124.443c6.465-6.465 6.245-17.065-.564-23.167-113.793-101.985-286.526-101.869-400.19 0-6.809 6.102-7.029 16.702-.564 23.167l34.006 34.006c5.927 5.927 15.464 6.32 21.769.796 82.88-72.609 207.074-72.447 289.768 0 6.305 5.524 15.842 5.132 21.769-.796l34.006-34.006zm112.11-113.718c6.385-6.385 6.254-16.816-.35-22.973-175.768-163.86-449.134-163.8-624.837 0-6.604 6.157-6.735 16.589-.35 22.973l33.966 33.966c6.095 6.095 15.891 6.231 22.224.383 144.763-133.668 368.356-133.702 513.156 0 6.333 5.848 16.129 5.712 22.224-.383l33.967-33.966z"],"window-close":[512,512,[],"f410","M464 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-83.6 290.5c4.8 4.8 4.8 12.6 0 17.4l-40.5 40.5c-4.8 4.8-12.6 4.8-17.4 0L256 313.3l-66.5 67.1c-4.8 4.8-12.6 4.8-17.4 0l-40.5-40.5c-4.8-4.8-4.8-12.6 0-17.4l67.1-66.5-67.1-66.5c-4.8-4.8-4.8-12.6 0-17.4l40.5-40.5c4.8-4.8 12.6-4.8 17.4 0l66.5 67.1 66.5-67.1c4.8-4.8 12.6-4.8 17.4 0l40.5 40.5c4.8 4.8 4.8 12.6 0 17.4L313.3 256l67.1 66.5z"],"window-maximize":[512,512,[],"f2d0","M464 32H48C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48V80c0-26.5-21.5-48-48-48zm-16 160H64v-84c0-6.6 5.4-12 12-12h360c6.6 0 12 5.4 12 12v84z"],"window-minimize":[512,512,[],"f2d1","M464 352H48c-26.5 0-48 21.5-48 48v32c0 26.5 21.5 48 48 48h416c26.5 0 48-21.5 48-48v-32c0-26.5-21.5-48-48-48z"],"window-restore":[512,512,[],"f2d2","M512 48v288c0 26.5-21.5 48-48 48h-48V176c0-44.1-35.9-80-80-80H128V48c0-26.5 21.5-48 48-48h288c26.5 0 48 21.5 48 48zM384 176v288c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V176c0-26.5 21.5-48 48-48h288c26.5 0 48 21.5 48 48zm-68 28c0-6.6-5.4-12-12-12H76c-6.6 0-12 5.4-12 12v52h252v-52z"],"won-sign":[576,512,[],"f159","M564 192c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-48.028l18.572-80.61c1.732-7.518-3.978-14.694-11.693-14.694h-46.107a11.998 11.998 0 0 0-11.736 9.5L450.73 128H340.839l-19.725-85.987a12 12 0 0 0-11.696-9.317H265.43a12 12 0 0 0-11.687 9.277L233.696 128H124.975L107.5 42.299a12 12 0 0 0-11.758-9.602H53.628c-7.686 0-13.39 7.124-11.709 14.624L60 128H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h62.342l7.171 32H12c-6.627 0-12 5.373-12 12v40c0 6.627 5.373 12 12 12h83.856l40.927 182.624A12 12 0 0 0 148.492 480h56.767c5.583 0 10.428-3.85 11.689-9.288L259.335 288h55.086l42.386 182.712A12 12 0 0 0 368.496 480h56.826a12 12 0 0 0 11.694-9.306L479.108 288H564c6.627 0 12-5.373 12-12v-40c0-6.627-5.373-12-12-12h-70.146l7.373-32H564zm-425.976 0h80.757l-7.457 32h-66.776l-6.524-32zm45.796 150.029c-6.194 25.831-6.758 47.25-7.321 47.25h-1.126s-1.689-22.05-6.758-47.25L157.599 288h38.812l-12.591 54.029zM274.182 224l1.996-8.602c1.856-7.962 3.457-15.968 4.803-23.398h11.794c1.347 7.43 2.947 15.436 4.803 23.398l1.996 8.602h-25.392zm130.959 118.029c-5.068 25.2-6.758 47.25-6.758 47.25h-1.126c-.563 0-1.126-21.42-7.321-47.25L377.542 288h39.107l-11.508 54.029zM430.281 224h-67.42l-7.34-32h81.577l-6.817 32z"],wrench:[512,512,[],"f0ad","M481.156 200c9.3 0 15.12 10.155 10.325 18.124C466.295 259.992 420.419 288 368 288c-79.222 0-143.501-63.974-143.997-143.079C223.505 65.469 288.548-.001 368.002 0c52.362.001 98.196 27.949 123.4 69.743C496.24 77.766 490.523 88 481.154 88H376l-40 56 40 56h105.156zm-171.649 93.003L109.255 493.255c-24.994 24.993-65.515 24.994-90.51 0-24.993-24.994-24.993-65.516 0-90.51L218.991 202.5c16.16 41.197 49.303 74.335 90.516 90.503zM104 432c0-13.255-10.745-24-24-24s-24 10.745-24 24 10.745 24 24 24 24-10.745 24-24z"],"yen-sign":[384,512,[],"f157","M351.208 32h-65.277a12 12 0 0 0-10.778 6.724l-55.39 113.163c-14.513 34.704-27.133 71.932-27.133 71.932h-1.262s-12.62-37.228-27.133-71.932l-55.39-113.163A11.997 11.997 0 0 0 98.068 32H32.792c-9.057 0-14.85 9.65-10.59 17.643L102.322 200H44c-6.627 0-12 5.373-12 12v32c0 6.627 5.373 12 12 12h88.162L152 293.228V320H44c-6.627 0-12 5.373-12 12v32c0 6.627 5.373 12 12 12h108v92c0 6.627 5.373 12 12 12h56c6.627 0 12-5.373 12-12v-92h108c6.627 0 12-5.373 12-12v-32c0-6.627-5.373-12-12-12H232v-26.772L251.838 256H340c6.627 0 12-5.373 12-12v-32c0-6.627-5.373-12-12-12h-58.322l80.12-150.357C366.058 41.65 360.266 32 351.208 32z"]}),t=z||{};t.___FONT_AWESOME___||(t.___FONT_AWESOME___={}),t.___FONT_AWESOME___.styles||(t.___FONT_AWESOME___.styles={}),t.___FONT_AWESOME___.hooks||(t.___FONT_AWESOME___.hooks={}),t.___FONT_AWESOME___.shims||(t.___FONT_AWESOME___.shims=[]);var s=t.___FONT_AWESOME___,r=Object.assign||function(c){for(var l=1;l<arguments.length;l++){var h=arguments[l];for(var v in h)Object.prototype.hasOwnProperty.call(h,v)&&(c[v]=h[v])}return c};!function(c){try{c()}catch(c){}}(function(){c("fas"),c("fa")})}(),function(){"use strict";function c(c){var l=Object.keys(Hc);Object.keys(c).forEach(function(h){~l.indexOf(h)&&(Hc[h]=c[h])})}function l(c){return~mc.indexOf(c)}function h(c){if(c&&void 0!==K.createElement){var l=K.createElement("style");l.setAttribute("type","text/css"),l.innerHTML=c;for(var h=K.head.childNodes,v=null,z=h.length-1;z>-1;z--){var e=h[z],a=(e.tagName||"").toUpperCase();["STYLE","LINK"].indexOf(a)>-1&&(v=e)}return K.head.insertBefore(l,v),c}}function v(){return++Cc}function z(c){for(var l=[],h=(c||[]).length>>>0;h--;)l[h]=c[h];return l}function e(c){return c.classList?z(c.classList):(c.getAttribute("class")||"").split(" ").filter(function(c){return c})}function a(c,h){var v=h.split("-"),z=v[0],e=v.slice(1).join("-");return z!==c||""===e||l(e)?null:e}function m(c){return(""+c).replace(/&/g,"&amp;").replace(/"/g,"&quot;").replace(/'/g,"&#39;").replace(/</g,"&lt;").replace(/>/g,"&gt;")}function t(c){return Object.keys(c||{}).reduce(function(l,h){return l+(h+'="')+m(c[h])+'" '},"").trim()}function s(c){return Object.keys(c||{}).reduce(function(l,h){return l+(h+": ")+c[h]+";"},"")}function r(c){return c.size!==Vc.size||c.x!==Vc.x||c.y!==Vc.y||c.rotate!==Vc.rotate||c.flipX||c.flipY}function f(c){var l=c.transform,h=c.containerWidth,v=c.iconWidth;return{outer:{transform:"translate("+h/2+" 256)"},inner:{transform:"translate("+32*l.x+", "+32*l.y+") "+" "+("scale("+l.size/16*(l.flipX?-1:1)+", "+l.size/16*(l.flipY?-1:1)+") ")+" "+("rotate("+l.rotate+" 0 0)")},path:{transform:"translate("+v/2*-1+" -256)"}}}function M(c){var l=c.transform,h=c.width,v=void 0===h?$:h,z=c.height,e=void 0===z?$:z,a=c.startCentered,m=void 0!==a&&a,t="";return t+=m&&Z?"translate("+(l.x/oc-v/2)+"em, "+(l.y/oc-e/2)+"em) ":m?"translate(calc(-50% + "+l.x/oc+"em), calc(-50% + "+l.y/oc+"em)) ":"translate("+l.x/oc+"em, "+l.y/oc+"em) ",t+="scale("+l.size/oc*(l.flipX?-1:1)+", "+l.size/oc*(l.flipY?-1:1)+") ",t+="rotate("+l.rotate+"deg) "}function i(c){var l,h=c.icons,z=h.main,e=h.mask,a=c.prefix,m=c.iconName,t=c.transform,s=c.symbol,r=c.title,f=c.extra,M=e.found?e:z,i=M.width,n=M.height,H="fa-w-"+Math.ceil(i/n*16),o=[Hc.replacementClass,m?Hc.familyPrefix+"-"+m:"",H].concat(f.classes).join(" "),V={children:[],attributes:fc({},f.attributes,(l={},rc(l,cc,""),rc(l,"data-prefix",a),rc(l,"data-icon",m),rc(l,"class",o),rc(l,"role","img"),rc(l,"xmlns","http://www.w3.org/2000/svg"),rc(l,"viewBox","0 0 "+i+" "+n),l))};r&&V.children.push({tag:"title",attributes:{id:V.attributes["aria-labelledby"]||"title-"+v()},children:[r]});var C=fc({},V,{prefix:a,iconName:m,main:z,mask:e,transform:t,symbol:s,styles:f.styles}),L=e.found&&z.found?uc(C):dc(C),u=L.children,d=L.attributes;return C.children=u,C.attributes=d,s?gc(C):pc(C)}function n(c){var l,h=c.content,v=c.width,z=c.height,e=c.transform,a=c.title,m=c.extra,t=fc({},m.attributes,a?{title:a}:{},(l={},rc(l,cc,""),rc(l,"class",m.classes.join(" ")),l)),f=fc({},m.styles);r(e)&&(f.transform=M({transform:e,startCentered:!0,width:v,height:z}),f["-webkit-transform"]=f.transform);var i=s(f);i.length>0&&(t.style=i);var n=[];return n.push({tag:"span",attributes:t,children:[h]}),a&&n.push({tag:"span",attributes:{class:"sr-only"},children:[a]}),n}function H(c,l){return Nc[c][l]}function o(c,l){return qc[c][l]}function V(c){return Tc[c]||{prefix:null,iconName:null}}function C(c){return c.reduce(function(c,l){var h=a(Hc.familyPrefix,l);if(Fc[l])c.prefix=l;else if(h){var v="fa"===c.prefix?V(h):{};c.iconName=v.iconName||h,c.prefix=v.prefix||c.prefix}else l!==Hc.replacementClass&&0!==l.indexOf("fa-w-")&&c.rest.push(l);return c},Wc())}function L(c,l,h){if(c&&c[l]&&c[l][h])return{prefix:l,iconName:h,icon:c[l][h]}}function u(c){var l=c.tag,h=c.attributes,v=void 0===h?{}:h,z=c.children,e=void 0===z?[]:z;return"string"==typeof c?m(c):"<"+l+" "+t(v)+">"+e.map(u).join("")+"</"+l+">"}function d(c){var l=c.getAttribute?c.getAttribute("class"):null;return!!l&&(!!~l.toString().indexOf(Hc.replacementClass)||~l.toString().indexOf("fa-layers-text"))}function p(){return!0===Hc.autoReplaceSvg?Ic.replace:Ic[Hc.autoReplaceSvg]||Ic.replace}function g(c,l){var h="function"==typeof l?l:Pc;0===c.length?h():(U.requestAnimationFrame||function(c){return c()})(function(){var l=p(),v=kc.begin("mutate");c.map(l),v(),h()})}function b(c){Rc=!0,c(),Rc=!1}function w(c){if(G){var l=c.treeCallback,h=c.nodeCallback,v=c.pseudoElementsCallback,a=new G(function(c){Rc||z(c).forEach(function(c){if("childList"===c.type&&c.addedNodes.length>0&&!d(c.addedNodes[0])&&(Hc.searchPseudoElements&&v(c.target),l(c.target)),"attributes"===c.type&&"class"===c.attributeName&&c.target.parentNode&&Hc.searchPseudoElements&&v(c.target.parentNode),"attributes"===c.type&&d(c.target)&&~ac.indexOf(c.attributeName))if("class"===c.attributeName){var z=C(e(c.target)),a=z.prefix,m=z.iconName;a&&c.target.setAttribute("data-prefix",a),m&&c.target.setAttribute("data-icon",m)}else h(c.target)})});K.getElementsByTagName&&a.observe(K.getElementsByTagName("body")[0],{childList:!0,attributes:!0,characterData:!0,subtree:!0})}}function y(c){for(var l="",h=0;h<c.length;h++)l+=("000"+c.charCodeAt(h).toString(16)).slice(-4);return l}function S(c){var l=Xc(c),h=l.iconName,v=l.prefix,z=l.rest,e=Bc(c),a=Dc(c),m=Uc(c),t=Kc(c),s=Gc(c);return{iconName:h,title:c.getAttribute("title"),prefix:v,transform:a,symbol:m,mask:s,extra:{classes:z,styles:e,attributes:t}}}function _(c){this.name="MissingIcon",this.message=c||"Icon unavailable",this.stack=(new Error).stack}function k(c,l){var h={found:!1,width:512,height:512,icon:cl};if(c&&l&&ll[l]&&ll[l][c]){var v=ll[l][c];h={found:!0,width:v[0],height:v[1],icon:{tag:"path",attributes:{fill:"currentColor",d:v.slice(4)[0]}}}}else if(c&&l&&!Hc.showMissingIcons)throw new _("Icon is missing for prefix "+l+" with icon name "+c);return h}function A(c,l){var h=l.iconName,v=l.title,z=l.prefix,e=l.transform,a=l.symbol,m=l.mask,t=l.extra;return[c,i({icons:{main:k(h,z),mask:k(m.iconName,m.prefix)},prefix:z,iconName:h,transform:e,symbol:a,mask:m,title:v,extra:t})]}function x(c,l){var h=l.title,v=l.transform,z=l.extra,e=null,a=null;if(Z){var m=parseInt(getComputedStyle(c).fontSize,10),t=c.getBoundingClientRect();e=t.width/m,a=t.height/m}return Hc.autoA11y&&!h&&(z.attributes["aria-hidden"]="true"),[c,n({content:c.innerHTML,width:e,height:a,transform:v,title:h,extra:z})]}function O(c){var l=S(c);return~l.extra.classes.indexOf(hl)?x(c,l):A(c,l)}function E(c){var l=kc.begin("searchPseudoElements");b(function(){z(c.querySelectorAll("*")).forEach(function(c){[":before",":after"].forEach(function(l){var h=U.getComputedStyle(c,l),v=h.getPropertyValue("font-family").match(vl),e=z(c.children).filter(function(c){return c.getAttribute(lc)===l})[0];if(!v&&e&&e.remove(),v&&!e){var a=h.getPropertyValue("content"),m=K.createElement("i");m.setAttribute("class",""+zl[v[1]]),m.setAttribute(lc,l),m.innerText=3===a.length?a.substr(1,1):a,":before"===l?c.insertBefore(m,c.firstChild):c.appendChild(m)}})})}),l()}function N(c){var l=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null,h=K.documentElement.classList,v=function(c){return h.add(hc+"-"+c)},e=function(c){return h.remove(hc+"-"+c)},a=Object.keys(ll),m=["."+hl+":not(["+cc+"])"].concat(a.map(function(c){return"."+c+":not(["+cc+"])"})).join(", ");if(0!==m.length){var t=z(c.querySelectorAll(m));if(t.length>0){v("pending"),e("complete");var s=kc.begin("onTree"),r=t.reduce(function(c,l){try{var h=O(l);h&&c.push(h)}catch(c){vc||c instanceof _&&console.error(c)}return c},[]);s(),g(r,function(){v("active"),v("complete"),e("pending"),"function"==typeof l&&l()})}}}function q(c){var l=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null,h=O(c);h&&g([h],l)}function T(c){return{found:!0,width:c[0],height:c[1],icon:{tag:"path",attributes:{fill:"currentColor",d:c.slice(4)[0]}}}}function j(){Hc.autoAddCss&&(sl||h(tl()),sl=!0)}function F(c,l){return Object.defineProperty(c,"abstract",{get:l}),Object.defineProperty(c,"html",{get:function(){return c.abstract.map(function(c){return u(c)})}}),Object.defineProperty(c,"node",{get:function(){if(K.createElement){var l=K.createElement("div");return l.innerHTML=c.html,l.children}}}),c}function W(c){var l=c.prefix,h=void 0===l?"fa":l,v=c.iconName;if(v)return L(rl.definitions,h,v)||L(wc.styles,h,v)}var P=function(){},I={},R={},B=null,X={mark:P,measure:P};try{"undefined"!=typeof window&&(I=window),"undefined"!=typeof document&&(R=document),"undefined"!=typeof MutationObserver&&(B=MutationObserver),"undefined"!=typeof performance&&(X=performance)}catch(c){}var Y=(I.navigator||{}).userAgent,D=void 0===Y?"":Y,U=I,K=R,G=B,J=X,Q=!!U.document,Z=~D.indexOf("MSIE")||~D.indexOf("Trident/"),$=16,cc="data-fa-processed",lc="data-fa-pseudo-element",hc="fontawesome-i2svg",vc=function(){try{return!0}catch(c){return!1}}(),zc=[1,2,3,4,5,6,7,8,9,10],ec=zc.concat([11,12,13,14,15,16,17,18,19,20]),ac=["class","data-prefix","data-icon","data-fa-transform","data-fa-mask"],mc=["xs","sm","lg","fw","ul","li","border","pull-left","pull-right","spin","pulse","rotate-90","rotate-180","rotate-270","flip-horizontal","flip-vertical","stack","stack-1x","stack-2x","inverse","layers","layers-text","layers-counter"].concat(zc.map(function(c){return c+"x"})).concat(ec.map(function(c){return"w-"+c})),tc=function(c,l){if(!(c instanceof l))throw new TypeError("Cannot call a class as a function")},sc=function(){function c(c,l){for(var h=0;h<l.length;h++){var v=l[h];v.enumerable=v.enumerable||!1,v.configurable=!0,"value"in v&&(v.writable=!0),Object.defineProperty(c,v.key,v)}}return function(l,h,v){return h&&c(l.prototype,h),v&&c(l,v),l}}(),rc=function(c,l,h){return l in c?Object.defineProperty(c,l,{value:h,enumerable:!0,configurable:!0,writable:!0}):c[l]=h,c},fc=Object.assign||function(c){for(var l=1;l<arguments.length;l++){var h=arguments[l];for(var v in h)Object.prototype.hasOwnProperty.call(h,v)&&(c[v]=h[v])}return c},Mc=function(c,l){var h={};for(var v in c)l.indexOf(v)>=0||Object.prototype.hasOwnProperty.call(c,v)&&(h[v]=c[v]);return h},ic=function(c){if(Array.isArray(c)){for(var l=0,h=Array(c.length);l<c.length;l++)h[l]=c[l];return h}return Array.from(c)},nc=fc({familyPrefix:"fa",replacementClass:"svg-inline--fa",autoReplaceSvg:!0,autoAddCss:!0,autoA11y:!0,searchPseudoElements:!1,observeMutations:!0,keepOriginalSource:!0,measurePerformance:!1,showMissingIcons:!0},U.FontAwesomeConfig||{});nc.autoReplaceSvg||(nc.observeMutations=!1);var Hc=fc({},nc);U.FontAwesomeConfig=Hc;var oc=$,Vc={size:16,x:0,y:0,rotate:0,flipX:!1,flipY:!1},Cc=0,Lc={x:0,y:0,width:"100%",height:"100%"},uc=function(c){var l=c.children,h=c.attributes,z=c.main,e=c.mask,a=c.transform,m=z.width,t=z.icon,s=e.width,r=e.icon,M=f({transform:a,containerWidth:s,iconWidth:m}),i={tag:"rect",attributes:fc({},Lc,{fill:"white"})},n={tag:"g",attributes:fc({},M.inner),children:[{tag:"path",attributes:fc({},t.attributes,M.path,{fill:"black"})}]},H={tag:"g",attributes:fc({},M.outer),children:[n]},o="mask-"+v(),V="clip-"+v(),C={tag:"defs",children:[{tag:"clipPath",attributes:{id:V},children:[r]},{tag:"mask",attributes:fc({},Lc,{id:o,maskUnits:"userSpaceOnUse",maskContentUnits:"userSpaceOnUse"}),children:[i,H]}]};return l.push(C,{tag:"rect",attributes:fc({fill:"currentColor","clip-path":"url(#"+V+")",mask:"url(#"+o+")"},Lc)}),{children:l,attributes:h}},dc=function(c){var l=c.children,h=c.attributes,v=c.main,z=c.transform,e=s(c.styles);if(e.length>0&&(h.style=e),r(z)){var a=f({transform:z,containerWidth:v.width,iconWidth:v.width});l.push({tag:"g",attributes:fc({},a.outer),children:[{tag:"g",attributes:fc({},a.inner),children:[{tag:v.icon.tag,children:v.icon.children,attributes:fc({},v.icon.attributes,a.path)}]}]})}else l.push(v.icon);return{children:l,attributes:h}},pc=function(c){var l=c.children,h=c.main,v=c.mask,z=c.attributes,e=c.styles,a=c.transform;if(r(a)&&h.found&&!v.found){var m={x:h.width/h.height/2,y:.5};z.style=s(fc({},e,{"transform-origin":m.x+a.x/16+"em "+(m.y+a.y/16)+"em"}))}return[{tag:"svg",attributes:z,children:l}]},gc=function(c){var l=c.prefix,h=c.iconName,v=c.children,z=c.attributes,e=c.symbol,a=!0===e?l+"-"+Hc.familyPrefix+"-"+h:e;return[{tag:"svg",attributes:{style:"display: none;"},children:[{tag:"symbol",attributes:fc({},z,{id:a}),children:v}]}]},bc=U||{};bc.___FONT_AWESOME___||(bc.___FONT_AWESOME___={}),bc.___FONT_AWESOME___.styles||(bc.___FONT_AWESOME___.styles={}),bc.___FONT_AWESOME___.hooks||(bc.___FONT_AWESOME___.hooks={}),bc.___FONT_AWESOME___.shims||(bc.___FONT_AWESOME___.shims=[]);var wc=bc.___FONT_AWESOME___,yc=function(){},Sc=Hc.measurePerformance&&J&&J.mark&&J.measure?J:{mark:yc,measure:yc},_c=function(c){Sc.mark('FA "5.0.1" '+c+" ends"),Sc.measure('FA "5.0.1" '+c,'FA "5.0.1" '+c+" begins",'FA "5.0.1" '+c+" ends")},kc={begin:function(c){return Sc.mark('FA "5.0.1" '+c+" begins"),function(){return _c(c)}},end:_c},Ac=function(c,l){return function(h,v,z,e){return c.call(l,h,v,z,e)}},xc=function(c,l,h,v){var z,e,a,m=Object.keys(c),t=m.length,s=void 0!==v?Ac(l,v):l;for(void 0===h?(z=1,a=c[m[0]]):(z=0,a=h);z<t;z++)a=s(a,c[e=m[z]],e,c);return a},Oc=wc.styles,Ec=wc.shims,Nc={},qc={},Tc={},jc=function(){var c=function(c){return xc(Oc,function(l,h,v){return l[v]=xc(h,c,{}),l},{})};Nc=c(function(c,l,h){return c[l[3]]=h,c}),qc=c(function(c,l,h){var v=l[2];return c[h]=h,v.forEach(function(l){c[l]=h}),c});var l="far"in Oc;Tc=xc(Ec,function(c,h){var v=h[0],z=h[1],e=h[2];return"far"!==z||l||(z="fas"),c[v]={prefix:z,iconName:e},c},{})};jc();var Fc=wc.styles,Wc=function(){return{prefix:null,iconName:null,rest:[]}},Pc=function(){},Ic={replace:function(c){var l=c[0],h=c[1].map(function(c){return u(c)}).join("\n");if(l.parentNode&&l.outerHTML)l.outerHTML=h+(Hc.keepOriginalSource&&"svg"!==l.tagName.toLowerCase()?"\x3c!-- "+l.outerHTML+" --\x3e":"");else if(l.parentNode){var v=document.createElement("span");l.parentNode.replaceChild(v,l),v.outerHTML=h}},nest:function(c){var l=c[0],h=c[1];if(~e(l).indexOf(Hc.replacementClass))return Ic.replace(c);var v=new RegExp(Hc.familyPrefix+"-.*");delete h[0].attributes.style;var z=h[0].attributes.class.split(" ").reduce(function(c,l){return l===Hc.replacementClass||l.match(v)?c.toSvg.push(l):c.toNode.push(l),c},{toNode:[],toSvg:[]});h[0].attributes.class=z.toSvg.join(" ");var a=h.map(function(c){return u(c)}).join("\n");l.setAttribute("class",z.toNode.join(" ")),l.setAttribute(cc,""),l.innerHTML=a}},Rc=!1,Bc=function(c){var l=c.getAttribute("style"),h=[];return l&&(h=l.split(";").reduce(function(c,l){var h=l.split(":"),v=h[0],z=h.slice(1);return v&&z.length>0&&(c[v]=z.join(":").trim()),c},{})),h},Xc=function(c){var l=c.getAttribute("data-prefix"),h=c.getAttribute("data-icon"),v=void 0!==c.innerText?c.innerText.trim():"",z=C(e(c));return l&&h&&(z.prefix=l,z.iconName=h),z.prefix&&v.length>1?z.iconName=o(z.prefix,c.innerText):z.prefix&&1===v.length&&(z.iconName=H(z.prefix,y(c.innerText))),z},Yc=function(c){var l={size:16,x:0,y:0,flipX:!1,flipY:!1,rotate:0};return c?c.toLowerCase().split(" ").reduce(function(c,l){var h=l.toLowerCase().split("-"),v=h[0],z=h.slice(1).join("-");if(v&&"h"===z)return c.flipX=!0,c;if(v&&"v"===z)return c.flipY=!0,c;if(z=parseFloat(z),isNaN(z))return c;switch(v){case"grow":c.size=c.size+z;break;case"shrink":c.size=c.size-z;break;case"left":c.x=c.x-z;break;case"right":c.x=c.x+z;break;case"up":c.y=c.y-z;break;case"down":c.y=c.y+z;break;case"rotate":c.rotate=c.rotate+z}return c},l):l},Dc=function(c){return Yc(c.getAttribute("data-fa-transform"))},Uc=function(c){var l=c.getAttribute("data-fa-symbol");return null!==l&&(""===l||l)},Kc=function(c){var l=z(c.attributes).reduce(function(c,l){return"class"!==c.name&&"style"!==c.name&&(c[l.name]=l.value),c},{}),h=c.getAttribute("title");return Hc.autoA11y&&(h?l["aria-labelledby"]=Hc.replacementClass+"-title-"+v():l["aria-hidden"]="true"),l},Gc=function(c){var l=c.getAttribute("data-fa-mask");return l?C(l.split(" ").map(function(c){return c.trim()})):Wc()};_.prototype=Object.create(Error.prototype),_.prototype.constructor=_;var Jc={fill:"currentColor"},Qc={attributeType:"XML",repeatCount:"indefinite",dur:"2s"},Zc={tag:"path",attributes:fc({},Jc,{d:"M156.5,447.7l-12.6,29.5c-18.7-9.5-35.9-21.2-51.5-34.9l22.7-22.7C127.6,430.5,141.5,440,156.5,447.7z M40.6,272H8.5 c1.4,21.2,5.4,41.7,11.7,61.1L50,321.2C45.1,305.5,41.8,289,40.6,272z M40.6,240c1.4-18.8,5.2-37,11.1-54.1l-29.5-12.6 C14.7,194.3,10,216.7,8.5,240H40.6z M64.3,156.5c7.8-14.9,17.2-28.8,28.1-41.5L69.7,92.3c-13.7,15.6-25.5,32.8-34.9,51.5 L64.3,156.5z M397,419.6c-13.9,12-29.4,22.3-46.1,30.4l11.9,29.8c20.7-9.9,39.8-22.6,56.9-37.6L397,419.6z M115,92.4 c13.9-12,29.4-22.3,46.1-30.4l-11.9-29.8c-20.7,9.9-39.8,22.6-56.8,37.6L115,92.4z M447.7,355.5c-7.8,14.9-17.2,28.8-28.1,41.5 l22.7,22.7c13.7-15.6,25.5-32.9,34.9-51.5L447.7,355.5z M471.4,272c-1.4,18.8-5.2,37-11.1,54.1l29.5,12.6 c7.5-21.1,12.2-43.5,13.6-66.8H471.4z M321.2,462c-15.7,5-32.2,8.2-49.2,9.4v32.1c21.2-1.4,41.7-5.4,61.1-11.7L321.2,462z M240,471.4c-18.8-1.4-37-5.2-54.1-11.1l-12.6,29.5c21.1,7.5,43.5,12.2,66.8,13.6V471.4z M462,190.8c5,15.7,8.2,32.2,9.4,49.2h32.1 c-1.4-21.2-5.4-41.7-11.7-61.1L462,190.8z M92.4,397c-12-13.9-22.3-29.4-30.4-46.1l-29.8,11.9c9.9,20.7,22.6,39.8,37.6,56.9 L92.4,397z M272,40.6c18.8,1.4,36.9,5.2,54.1,11.1l12.6-29.5C317.7,14.7,295.3,10,272,8.5V40.6z M190.8,50 c15.7-5,32.2-8.2,49.2-9.4V8.5c-21.2,1.4-41.7,5.4-61.1,11.7L190.8,50z M442.3,92.3L419.6,115c12,13.9,22.3,29.4,30.5,46.1 l29.8-11.9C470,128.5,457.3,109.4,442.3,92.3z M397,92.4l22.7-22.7c-15.6-13.7-32.8-25.5-51.5-34.9l-12.6,29.5 C370.4,72.1,384.4,81.5,397,92.4z"})},$c=fc({},Qc,{attributeName:"opacity"}),cl={tag:"g",children:[Zc,{tag:"circle",attributes:fc({},Jc,{cx:"256",cy:"364",r:"28"}),children:[{tag:"animate",attributes:fc({},Qc,{attributeName:"r",values:"28;14;28;28;14;28;"})},{tag:"animate",attributes:fc({},$c,{values:"1;0;1;1;0;1;"})}]},{tag:"path",attributes:fc({},Jc,{opacity:"1",d:"M263.7,312h-16c-6.6,0-12-5.4-12-12c0-71,77.4-63.9,77.4-107.8c0-20-17.8-40.2-57.4-40.2c-29.1,0-44.3,9.6-59.2,28.7 c-3.9,5-11.1,6-16.2,2.4l-13.1-9.2c-5.6-3.9-6.9-11.8-2.6-17.2c21.2-27.2,46.4-44.7,91.2-44.7c52.3,0,97.4,29.8,97.4,80.2 c0,67.6-77.4,63.5-77.4,107.8C275.7,306.6,270.3,312,263.7,312z"}),children:[{tag:"animate",attributes:fc({},$c,{values:"1;0;0;0;0;1;"})}]},{tag:"path",attributes:fc({},Jc,{opacity:"0",d:"M232.5,134.5l7,168c0.3,6.4,5.6,11.5,12,11.5h9c6.4,0,11.7-5.1,12-11.5l7-168c0.3-6.8-5.2-12.5-12-12.5h-23 C237.7,122,232.2,127.7,232.5,134.5z"}),children:[{tag:"animate",attributes:fc({},$c,{values:"0;0;1;1;0;0;"})}]}]},ll=wc.styles,hl="fa-layers-text",vl=/Font Awesome 5 (Solid|Regular|Light|Brands)/,zl={Solid:"fas",Regular:"far",Light:"fal",Brands:"fab"},el=[],al=(K.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(K.readyState);al||K.addEventListener("DOMContentLoaded",function c(){K.removeEventListener("DOMContentLoaded",c),al=1,el.map(function(c){return c()})});var ml=function(c){K&&(al?setTimeout(c,0):el.push(c))},tl=function(){var c="svg-inline--fa",l=Hc.familyPrefix,h=Hc.replacementClass,v="svg:not(:root).svg-inline--fa{overflow:visible}.svg-inline--fa{display:inline-block;font-size:inherit;height:1em;overflow:visible;vertical-align:-.125em}.svg-inline--fa.fa-lg{vertical-align:-.225em}.svg-inline--fa.fa-w-1{width:.0625em}.svg-inline--fa.fa-w-2{width:.125em}.svg-inline--fa.fa-w-3{width:.1875em}.svg-inline--fa.fa-w-4{width:.25em}.svg-inline--fa.fa-w-5{width:.3125em}.svg-inline--fa.fa-w-6{width:.375em}.svg-inline--fa.fa-w-7{width:.4375em}.svg-inline--fa.fa-w-8{width:.5em}.svg-inline--fa.fa-w-9{width:.5625em}.svg-inline--fa.fa-w-10{width:.625em}.svg-inline--fa.fa-w-11{width:.6875em}.svg-inline--fa.fa-w-12{width:.75em}.svg-inline--fa.fa-w-13{width:.8125em}.svg-inline--fa.fa-w-14{width:.875em}.svg-inline--fa.fa-w-15{width:.9375em}.svg-inline--fa.fa-w-16{width:1em}.svg-inline--fa.fa-w-17{width:1.0625em}.svg-inline--fa.fa-w-18{width:1.125em}.svg-inline--fa.fa-w-19{width:1.1875em}.svg-inline--fa.fa-w-20{width:1.25em}.svg-inline--fa.fa-pull-left{margin-right:.3em;width:auto}.svg-inline--fa.fa-pull-right{margin-left:.3em;width:auto}.svg-inline--fa.fa-border{height:1.5em}.svg-inline--fa.fa-li{width:2em}.svg-inline--fa.fa-fw{width:1.25em}.fa-layers svg.svg-inline--fa{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0}.fa-layers{display:inline-block;height:1em;position:relative;text-align:center;vertical-align:-12.5%;width:1em}.fa-layers svg.svg-inline--fa{-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-counter,.fa-layers-text{display:inline-block;position:absolute;text-align:center}.fa-layers-text{left:50%;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-counter{background-color:#ff253a;border-radius:1em;color:#fff;height:1.5em;line-height:1;max-width:5em;min-width:1.5em;overflow:hidden;padding:.25em;right:0;text-overflow:ellipsis;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-bottom-right{bottom:0;right:0;top:auto;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:bottom right;transform-origin:bottom right}.fa-layers-bottom-left{bottom:0;left:0;right:auto;top:auto;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:bottom left;transform-origin:bottom left}.fa-layers-top-right{right:0;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-top-left{left:0;right:auto;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top left;transform-origin:top left}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:solid .08em #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-webkit-transform:scale(1,-1);transform:scale(1,-1)}.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1,-1);transform:scale(-1,-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-rotate-90{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;position:relative;width:2em}.fa-stack-1x,.fa-stack-2x{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0}.svg-inline--fa.fa-stack-1x{height:1em;width:1em}.svg-inline--fa.fa-stack-2x{height:2em;width:2em}.fa-inverse{color:#fff}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}";if("fa"!==l||h!==c){var z=new RegExp("\\.fa\\-","g"),e=new RegExp("\\."+c,"g");v=v.replace(z,"."+l+"-").replace(e,"."+h)}return v},sl=!1,rl=new(function(){function c(){tc(this,c),this.definitions={}}return sc(c,[{key:"add",value:function(){for(var c=this,l=arguments.length,h=Array(l),v=0;v<l;v++)h[v]=arguments[v];var z=h.reduce(this._pullDefinitions,{});Object.keys(z).forEach(function(l){c.definitions[l]=fc({},c.definitions[l]||{},z[l])})}},{key:"reset",value:function(){this.definitions={}}},{key:"_pullDefinitions",value:function(c,l){var h=l.prefix&&l.iconName&&l.icon?{0:l}:l;return Object.keys(h).map(function(l){var v=h[l],z=v.prefix,e=v.iconName,a=v.icon;c[z]||(c[z]={}),c[z][e]=a}),c}}]),c}()),fl={dom:{i2svg:function(){var c=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};j();var l=c.node,h=void 0===l?K:l,v=c.callback,z=void 0===v?function(){}:v;Hc.searchPseudoElements&&E(h),N(h,z)},css:tl,insertCss:function(){h(tl())}},library:rl,parse:{transform:function(c){return Yc(c)}},findIconDefinition:W,icon:function(c){return function(l){var h=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},v=(l||{}).icon?l:W(l||{}),z=h.mask;return z&&(z=(z||{}).icon?z:W(z||{})),c(v,fc({},h,{mask:z}))}}(function(c){var l=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},h=l.transform,z=void 0===h?Vc:h,e=l.symbol,a=void 0!==e&&e,m=l.mask,t=void 0===m?null:m,s=l.title,r=void 0===s?null:s,f=l.classes,M=void 0===f?[]:f,n=l.attributes,H=void 0===n?{}:n,o=l.styles,V=void 0===o?{}:o;if(c){var C=c.prefix,L=c.iconName,u=c.icon;return F(fc({type:"icon"},c),function(){return j(),Hc.autoA11y&&(r?H["aria-labelledby"]=Hc.replacementClass+"-title-"+v():H["aria-hidden"]="true"),i({icons:{main:T(u),mask:t?T(t.icon):{found:!1,width:null,height:null,icon:{}}},prefix:C,iconName:L,transform:fc({},Vc,z),symbol:a,title:r,extra:{attributes:H,styles:V,classes:M}})})}}),text:function(c){var l=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},h=l.transform,v=void 0===h?Vc:h,z=l.title,e=void 0===z?null:z,a=l.classes,m=void 0===a?[]:a,t=l.attributes,s=void 0===t?{}:t,r=l.styles,f=void 0===r?{}:r;return F({type:"text",content:c},function(){return j(),n({content:c,transform:fc({},Vc,v),title:e,extra:{attributes:s,styles:f,classes:[Hc.familyPrefix+"-layers-text"].concat(ic(m))}})})},layer:function(c){return F({type:"layer"},function(){j();var l=[];return c(function(c){l=Array.isArray(c)?c.map(function(c){l=l.concat(c.abstract)}):l.concat(c.abstract)}),[{tag:"span",attributes:{class:Hc.familyPrefix+"-layers"},children:l}]})}};Object.defineProperty(fl,"config",{get:function(){Hc.autoReplaceSvg,Hc.observeMutations,Hc.showMissingIcons;return Mc(Hc,["autoReplaceSvg","observeMutations","showMissingIcons"])},set:function(l){c(l)}}),function(c){try{c()}catch(c){}}(function(){var c=function(){Hc.autoReplaceSvg&&fl.dom.i2svg({node:K})};Q&&(U.FontAwesome||(U.FontAwesome=fl),ml(function(){Object.keys(wc.styles).length>0&&c(),Hc.observeMutations&&"function"==typeof MutationObserver&&w({treeCallback:N,nodeCallback:q,pseudoElementsCallback:E})})),wc.hooks=fc({},wc.hooks,{addPack:function(l,h){wc.styles[l]=fc({},wc.styles[l]||{},h),jc(),c()},addShims:function(l){var h;(h=wc.shims).push.apply(h,ic(l)),jc(),c()}})})}();
diff --git a/web/gui/lib/gauge-1.3.2.min.js b/web/gui/lib/gauge-1.3.2.min.js
new file mode 100644
index 0000000..7d9e163
--- /dev/null
+++ b/web/gui/lib/gauge-1.3.2.min.js
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: MIT
+(function(){var a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p=[].slice,q={}.hasOwnProperty,r=function(a,b){function d(){this.constructor=a}for(var c in b)q.call(b,c)&&(a[c]=b[c]);return d.prototype=b.prototype,a.prototype=new d,a.__super__=b.prototype,a};!function(){var a,b,c,d,e,f,g;for(g=["ms","moz","webkit","o"],c=0,e=g.length;c<e&&(f=g[c],!window.requestAnimationFrame);c++)window.requestAnimationFrame=window[f+"RequestAnimationFrame"],window.cancelAnimationFrame=window[f+"CancelAnimationFrame"]||window[f+"CancelRequestAnimationFrame"];return a=null,d=0,b={},requestAnimationFrame?window.cancelAnimationFrame?void 0:(a=window.requestAnimationFrame,window.requestAnimationFrame=function(c,e){var f;return f=++d,a(function(){if(!b[f])return c()},e),f},window.cancelAnimationFrame=function(a){return b[a]=!0}):(window.requestAnimationFrame=function(a,b){var c,d,e,f;return c=(new Date).getTime(),f=Math.max(0,16-(c-e)),d=window.setTimeout(function(){return a(c+f)},f),e=c+f,d},window.cancelAnimationFrame=function(a){return clearTimeout(a)})}(),String.prototype.hashCode=function(){var a,b,c,d,e;if(b=0,0===this.length)return b;for(c=d=0,e=this.length;0<=e?d<e:d>e;c=0<=e?++d:--d)a=this.charCodeAt(c),b=(b<<5)-b+a,b&=b;return b},o=function(a){var b,c;for(b=Math.floor(a/3600),c=Math.floor((a-3600*b)/60),a-=3600*b+60*c,a+="",c+="";c.length<2;)c="0"+c;for(;a.length<2;)a="0"+a;return b=b?b+":":"",b+c+":"+a},m=function(){var a,b,c;return b=1<=arguments.length?p.call(arguments,0):[],c=b[0],a=b[1],k(c.toFixed(a))},n=function(a,b){var c,d,e;d={};for(c in a)q.call(a,c)&&(e=a[c],d[c]=e);for(c in b)q.call(b,c)&&(e=b[c],d[c]=e);return d},k=function(a){var b,c,d,e;for(a+="",c=a.split("."),d=c[0],e="",c.length>1&&(e="."+c[1]),b=/(\d+)(\d{3})/;b.test(d);)d=d.replace(b,"$1,$2");return d+e},l=function(a){return"#"===a.charAt(0)?a.substring(1,7):a},j=function(){function a(a,b){null==a&&(a=!0),this.clear=null==b||b,a&&AnimationUpdater.add(this)}return a.prototype.animationSpeed=32,a.prototype.update=function(a){var b;return null==a&&(a=!1),!(!a&&this.displayedValue===this.value)&&(this.ctx&&this.clear&&this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),b=this.value-this.displayedValue,Math.abs(b/this.animationSpeed)<=.001?this.displayedValue=this.value:this.displayedValue=this.displayedValue+b/this.animationSpeed,this.render(),!0)},a}(),e=function(a){function b(){return b.__super__.constructor.apply(this,arguments)}return r(b,a),b.prototype.displayScale=1,b.prototype.setTextField=function(a,b){return this.textField=a instanceof i?a:new i(a,b)},b.prototype.setMinValue=function(a,b){var c,d,e,f,g;if(this.minValue=a,null==b&&(b=!0),b){for(this.displayedValue=this.minValue,f=this.gp||[],g=[],d=0,e=f.length;d<e;d++)c=f[d],g.push(c.displayedValue=this.minValue);return g}},b.prototype.setOptions=function(a){return null==a&&(a=null),this.options=n(this.options,a),this.textField&&(this.textField.el.style.fontSize=a.fontSize+"px"),this.options.angle>.5&&(this.options.angle=.5),this.configDisplayScale(),this},b.prototype.configDisplayScale=function(){var a,b,c,d,e;return d=this.displayScale,this.options.highDpiSupport===!1?delete this.displayScale:(b=window.devicePixelRatio||1,a=this.ctx.webkitBackingStorePixelRatio||this.ctx.mozBackingStorePixelRatio||this.ctx.msBackingStorePixelRatio||this.ctx.oBackingStorePixelRatio||this.ctx.backingStorePixelRatio||1,this.displayScale=b/a),this.displayScale!==d&&(e=this.canvas.G__width||this.canvas.width,c=this.canvas.G__height||this.canvas.height,this.canvas.width=e*this.displayScale,this.canvas.height=c*this.displayScale,this.canvas.style.width=e+"px",this.canvas.style.height=c+"px",this.canvas.G__width=e,this.canvas.G__height=c),this},b}(j),i=function(){function a(a,b){this.el=a,this.fractionDigits=b}return a.prototype.render=function(a){return this.el.innerHTML=m(a.displayedValue,this.fractionDigits)},a}(),a=function(a){function b(a,b){this.elem=a,this.text=null!=b&&b,this.value=1*this.elem.innerHTML,this.text&&(this.value=0)}return r(b,a),b.prototype.displayedValue=0,b.prototype.value=0,b.prototype.setVal=function(a){return this.value=1*a},b.prototype.render=function(){var a;return a=this.text?o(this.displayedValue.toFixed(0)):k(m(this.displayedValue)),this.elem.innerHTML=a},b}(j),b={create:function(b){var c,d,e,f;for(f=[],d=0,e=b.length;d<e;d++)c=b[d],f.push(new a(c));return f}},h=function(a){function b(a){this.gauge=a,this.ctx=this.gauge.ctx,this.canvas=this.gauge.canvas,b.__super__.constructor.call(this,!1,!1),this.setOptions()}return r(b,a),b.prototype.displayedValue=0,b.prototype.value=0,b.prototype.options={strokeWidth:.035,length:.1,color:"#000000"},b.prototype.setOptions=function(a){return null==a&&(a=null),this.options=n(this.options,a),this.length=2*this.gauge.radius*this.gauge.options.radiusScale*this.options.length,this.strokeWidth=this.canvas.height*this.options.strokeWidth,this.maxValue=this.gauge.maxValue,this.minValue=this.gauge.minValue,this.animationSpeed=this.gauge.animationSpeed,this.options.angle=this.gauge.options.angle},b.prototype.render=function(){var a,b,c,d,e,f,g;return a=this.gauge.getAngle.call(this,this.displayedValue),f=Math.round(this.length*Math.cos(a)),g=Math.round(this.length*Math.sin(a)),d=Math.round(this.strokeWidth*Math.cos(a-Math.PI/2)),e=Math.round(this.strokeWidth*Math.sin(a-Math.PI/2)),b=Math.round(this.strokeWidth*Math.cos(a+Math.PI/2)),c=Math.round(this.strokeWidth*Math.sin(a+Math.PI/2)),this.ctx.fillStyle=this.options.color,this.ctx.beginPath(),this.ctx.arc(0,0,this.strokeWidth,0,2*Math.PI,!0),this.ctx.fill(),this.ctx.beginPath(),this.ctx.moveTo(d,e),this.ctx.lineTo(f,g),this.ctx.lineTo(b,c),this.ctx.fill()},b}(j),c=function(){function a(a){this.elem=a}return a.prototype.updateValues=function(a){return this.value=a[0],this.maxValue=a[1],this.avgValue=a[2],this.render()},a.prototype.render=function(){var a,b;return this.textField&&this.textField.text(m(this.value)),0===this.maxValue&&(this.maxValue=2*this.avgValue),b=this.value/this.maxValue*100,a=this.avgValue/this.maxValue*100,$(".bar-value",this.elem).css({width:b+"%"}),$(".typical-value",this.elem).css({width:a+"%"})},a}(),g=function(a){function b(a){var c,d;this.canvas=a,b.__super__.constructor.call(this),this.percentColors=null,this.forceUpdate=!0,"undefined"!=typeof G_vmlCanvasManager&&(this.canvas=window.G_vmlCanvasManager.initElement(this.canvas)),this.ctx=this.canvas.getContext("2d"),c=this.canvas.clientHeight,d=this.canvas.clientWidth,this.canvas.height=c,this.canvas.width=d,this.gp=[new h(this)],this.setOptions(),this.render()}return r(b,a),b.prototype.elem=null,b.prototype.value=[20],b.prototype.maxValue=80,b.prototype.minValue=0,b.prototype.displayedAngle=0,b.prototype.displayedValue=0,b.prototype.lineWidth=40,b.prototype.paddingTop=.1,b.prototype.paddingBottom=.1,b.prototype.percentColors=null,b.prototype.options={colorStart:"#6fadcf",colorStop:void 0,gradientType:0,strokeColor:"#e0e0e0",pointer:{length:.8,strokeWidth:.035},angle:.15,lineWidth:.44,radiusScale:1,fontSize:40,limitMax:!1,limitMin:!1},b.prototype.setOptions=function(a){var c,d,e,f,g;for(null==a&&(a=null),b.__super__.setOptions.call(this,a),this.configPercentColors(),this.extraPadding=0,this.options.angle<0&&(f=Math.PI*(1+this.options.angle),this.extraPadding=Math.sin(f)),this.availableHeight=this.canvas.height*(1-this.paddingTop-this.paddingBottom),this.lineWidth=this.availableHeight*this.options.lineWidth,this.radius=(this.availableHeight-this.lineWidth/2)/(1+this.extraPadding),this.ctx.clearRect(0,0,this.canvas.width,this.canvas.height),g=this.gp,d=0,e=g.length;d<e;d++)c=g[d],c.setOptions(this.options.pointer),c.render();return this},b.prototype.configPercentColors=function(){var a,b,c,d,e,f,g;if(this.percentColors=null,void 0!==this.options.percentColors){for(this.percentColors=new Array,f=[],c=d=0,e=this.options.percentColors.length-1;0<=e?d<=e:d>=e;c=0<=e?++d:--d)g=parseInt(l(this.options.percentColors[c][1]).substring(0,2),16),b=parseInt(l(this.options.percentColors[c][1]).substring(2,4),16),a=parseInt(l(this.options.percentColors[c][1]).substring(4,6),16),f.push(this.percentColors[c]={pct:this.options.percentColors[c][0],color:{r:g,g:b,b:a}});return f}},b.prototype.set=function(a){var b,c,d,e,f,g,i;if(a instanceof Array||(a=[a]),a.length>this.gp.length)for(c=d=0,g=a.length-this.gp.length;0<=g?d<g:d>g;c=0<=g?++d:--d)b=new h(this),b.setOptions(this.options.pointer),this.gp.push(b);else a.length<this.gp.length&&(this.gp=this.gp.slice(this.gp.length-a.length));for(c=0,e=0,f=a.length;e<f;e++)i=a[e],i>this.maxValue?this.options.limitMax?i=this.maxValue:this.maxValue=i+1:i<this.minValue&&(this.options.limitMin?i=this.minValue:this.minValue=i-1),this.gp[c].value=i,this.gp[c++].setOptions({minValue:this.minValue,maxValue:this.maxValue,angle:this.options.angle});return this.value=Math.max(Math.min(a[a.length-1],this.maxValue),this.minValue),AnimationUpdater.run(this.forceUpdate),this.forceUpdate=!1},b.prototype.getAngle=function(a){return(1+this.options.angle)*Math.PI+(a-this.minValue)/(this.maxValue-this.minValue)*(1-2*this.options.angle)*Math.PI},b.prototype.getColorForPercentage=function(a,b){var c,d,e,f,g,h,i;if(0===a)c=this.percentColors[0].color;else for(c=this.percentColors[this.percentColors.length-1].color,e=f=0,h=this.percentColors.length-1;0<=h?f<=h:f>=h;e=0<=h?++f:--f)if(a<=this.percentColors[e].pct){b===!0?(i=this.percentColors[e-1]||this.percentColors[0],d=this.percentColors[e],g=(a-i.pct)/(d.pct-i.pct),c={r:Math.floor(i.color.r*(1-g)+d.color.r*g),g:Math.floor(i.color.g*(1-g)+d.color.g*g),b:Math.floor(i.color.b*(1-g)+d.color.b*g)}):c=this.percentColors[e].color;break}return"rgb("+[c.r,c.g,c.b].join(",")+")"},b.prototype.getColorForValue=function(a,b){var c;return c=(a-this.minValue)/(this.maxValue-this.minValue),this.getColorForPercentage(c,b)},b.prototype.renderStaticLabels=function(a,b,c,d){var e,f,g,h,i,j,k,l,n,o;for(this.ctx.save(),this.ctx.translate(b,c),e=a.font||"10px Times",j=/\d+\.?\d?/,i=e.match(j)[0],l=e.slice(i.length),f=parseFloat(i)*this.displayScale,this.ctx.font=f+l,this.ctx.fillStyle=a.color||"#000000",this.ctx.textBaseline="bottom",this.ctx.textAlign="center",k=a.labels,g=0,h=k.length;g<h;g++)o=k[g],(!this.options.limitMin||o>=this.minValue)&&(!this.options.limitMax||o<=this.maxValue)&&(n=this.getAngle(o)-3*Math.PI/2,this.ctx.rotate(n),this.ctx.fillText(m(o,a.fractionDigits),0,-d-this.lineWidth/2),this.ctx.rotate(-n));return this.ctx.restore()},b.prototype.render=function(){var a,b,c,d,e,f,g,h,i,j,k,l,m,n,o;if(n=this.canvas.width/2,d=this.canvas.height*this.paddingTop+this.availableHeight-(this.radius+this.lineWidth/2)*this.extraPadding,a=this.getAngle(this.displayedValue),this.textField&&this.textField.render(this),this.ctx.lineCap="butt",k=this.radius*this.options.radiusScale,this.options.staticLabels&&this.renderStaticLabels(this.options.staticLabels,n,d,k),this.options.staticZones){for(this.ctx.save(),this.ctx.translate(n,d),this.ctx.lineWidth=this.lineWidth,l=this.options.staticZones,e=0,g=l.length;e<g;e++)o=l[e],j=o.min,this.options.limitMin&&j<this.minValue&&(j=this.minValue),i=o.max,this.options.limitMax&&i>this.maxValue&&(i=this.maxValue),this.ctx.strokeStyle=o.strokeStyle,this.ctx.beginPath(),this.ctx.arc(0,0,k,this.getAngle(j),this.getAngle(i),!1),this.ctx.stroke();this.ctx.restore()}else void 0!==this.options.customFillStyle?b=this.options.customFillStyle(this):null!==this.percentColors?b=this.getColorForValue(this.displayedValue,!0):void 0!==this.options.colorStop?(b=0===this.options.gradientType?this.ctx.createRadialGradient(n,d,9,n,d,70):this.ctx.createLinearGradient(0,0,n,0),b.addColorStop(0,this.options.colorStart),b.addColorStop(1,this.options.colorStop)):b=this.options.colorStart,this.ctx.strokeStyle=b,this.ctx.beginPath(),this.ctx.arc(n,d,k,(1+this.options.angle)*Math.PI,a,!1),this.ctx.lineWidth=this.lineWidth,this.ctx.stroke(),this.ctx.strokeStyle=this.options.strokeColor,this.ctx.beginPath(),this.ctx.arc(n,d,k,a,(2-this.options.angle)*Math.PI,!1),this.ctx.stroke();for(this.ctx.translate(n,d),m=this.gp,f=0,h=m.length;f<h;f++)c=m[f],c.update(!0);return this.ctx.translate(-n,-d)},b}(e),d=function(a){function b(a){this.canvas=a,b.__super__.constructor.call(this),"undefined"!=typeof G_vmlCanvasManager&&(this.canvas=window.G_vmlCanvasManager.initElement(this.canvas)),this.ctx=this.canvas.getContext("2d"),this.setOptions(),this.render()}return r(b,a),b.prototype.lineWidth=15,b.prototype.displayedValue=0,b.prototype.value=33,b.prototype.maxValue=80,b.prototype.minValue=0,b.prototype.options={lineWidth:.1,colorStart:"#6f6ea0",colorStop:"#c0c0db",strokeColor:"#eeeeee",shadowColor:"#d5d5d5",angle:.35,radiusScale:1},b.prototype.getAngle=function(a){return(1-this.options.angle)*Math.PI+(a-this.minValue)/(this.maxValue-this.minValue)*(2+this.options.angle-(1-this.options.angle))*Math.PI},b.prototype.setOptions=function(a){return null==a&&(a=null),b.__super__.setOptions.call(this,a),this.lineWidth=this.canvas.height*this.options.lineWidth,this.radius=this.options.radiusScale*(this.canvas.height/2-this.lineWidth/2),this},b.prototype.set=function(a){return this.value=a,this.value>this.maxValue&&(this.maxValue=1.1*this.value),AnimationUpdater.run()},b.prototype.render=function(){var a,b,c,d,e,f;return a=this.getAngle(this.displayedValue),f=this.canvas.width/2,c=this.canvas.height/2,this.textField&&this.textField.render(this),b=this.ctx.createRadialGradient(f,c,39,f,c,70),b.addColorStop(0,this.options.colorStart),b.addColorStop(1,this.options.colorStop),d=this.radius-this.lineWidth/2,e=this.radius+this.lineWidth/2,this.ctx.strokeStyle=this.options.strokeColor,this.ctx.beginPath(),this.ctx.arc(f,c,this.radius,(1-this.options.angle)*Math.PI,(2+this.options.angle)*Math.PI,!1),this.ctx.lineWidth=this.lineWidth,this.ctx.lineCap="round",this.ctx.stroke(),this.ctx.strokeStyle=b,this.ctx.beginPath(),this.ctx.arc(f,c,this.radius,(1-this.options.angle)*Math.PI,a,!1),this.ctx.stroke()},b}(e),f=function(a){function b(){return b.__super__.constructor.apply(this,arguments)}return r(b,a),b.prototype.strokeGradient=function(a,b,c,d){var e;return e=this.ctx.createRadialGradient(a,b,c,a,b,d),e.addColorStop(0,this.options.shadowColor),e.addColorStop(.12,this.options._orgStrokeColor),e.addColorStop(.88,this.options._orgStrokeColor),e.addColorStop(1,this.options.shadowColor),e},b.prototype.setOptions=function(a){var c,d,e,f;return null==a&&(a=null),b.__super__.setOptions.call(this,a),f=this.canvas.width/2,c=this.canvas.height/2,d=this.radius-this.lineWidth/2,e=this.radius+this.lineWidth/2,this.options._orgStrokeColor=this.options.strokeColor,this.options.strokeColor=this.strokeGradient(f,c,d,e),this},b}(d),window.AnimationUpdater={elements:[],animId:null,addAll:function(a){var b,c,d,e;for(e=[],c=0,d=a.length;c<d;c++)b=a[c],e.push(AnimationUpdater.elements.push(b));return e},add:function(a){return AnimationUpdater.elements.push(a)},run:function(a){var b,c,d,e,f;for(null==a&&(a=!1),b=!0,f=AnimationUpdater.elements,d=0,e=f.length;d<e;d++)c=f[d],c.update(a===!0)&&(b=!1);return b?cancelAnimationFrame(AnimationUpdater.animId):AnimationUpdater.animId=requestAnimationFrame(AnimationUpdater.run)}},"function"==typeof window.define&&null!=window.define.amd?define(function(){return{Gauge:g,Donut:f,BaseDonut:d,TextRenderer:i}}):"undefined"!=typeof module&&null!=module.exports?module.exports={Gauge:g,Donut:f,BaseDonut:d,TextRenderer:i}:(window.Gauge=g,window.Donut=f,window.BaseDonut=d,window.TextRenderer=i)}).call(this);
diff --git a/web/gui/lib/jquery-2.2.4.min.js b/web/gui/lib/jquery-2.2.4.min.js
new file mode 100644
index 0000000..c641fda
--- /dev/null
+++ b/web/gui/lib/jquery-2.2.4.min.js
@@ -0,0 +1,5 @@
+/*! jQuery v2.2.4 | (c) jQuery Foundation | jquery.org/license */
+// SPDX-License-Identifier: MIT
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="2.2.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isPlainObject:function(a){var b;if("object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype||{},"isPrototypeOf"))return!1;for(b in a);return void 0===b||k.call(a,b)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=d.createElement("script"),b.text=a,d.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:h.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(d=e.call(arguments,2),f=function(){return a.apply(b||this,d.concat(e.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=la(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=ma(b);function pa(){}pa.prototype=d.filters=d.pseudos,d.setFilters=new pa,g=fa.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=R.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=S.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(Q," ")}),h=h.slice(c.length));for(g in d.filter)!(e=W[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fa.error(a):z(a,i).slice(0)};function qa(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return h.call(b,a)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&f.parentNode&&(this.length=1,this[0]=f),this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?void 0!==c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?h.call(n(a),this[0]):h.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||n.uniqueSort(e),D.test(a)&&e.reverse()),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:"")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){n.each(b,function(b,c){n.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&"string"!==n.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return n.each(arguments,function(a,b){var c;while((c=n.inArray(b,f,c))>-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.removeEventListener("DOMContentLoaded",J),a.removeEventListener("load",J),n.ready()}n.ready.promise=function(b){return I||(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(n.ready):(d.addEventListener("DOMContentLoaded",J),a.addEventListener("load",J))),I.promise(b)},n.ready.promise();var K=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)K(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},L=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function M(){this.expando=n.expando+M.uid++}M.uid=1,M.prototype={register:function(a,b){var c=b||{};return a.nodeType?a[this.expando]=c:Object.defineProperty(a,this.expando,{value:c,writable:!0,configurable:!0}),a[this.expando]},cache:function(a){if(!L(a))return{};var b=a[this.expando];return b||(b={},L(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[b]=c;else for(d in b)e[d]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=a[this.expando];if(void 0!==f){if(void 0===b)this.register(a);else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in f?d=[b,e]:(d=e,d=d in f?[d]:d.match(G)||[])),c=d.length;while(c--)delete f[d[c]]}(void 0===b||n.isEmptyObject(f))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!n.isEmptyObject(b)}};var N=new M,O=new M,P=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Q=/[A-Z]/g;function R(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Q,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:P.test(c)?n.parseJSON(c):c;
+}catch(e){}O.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return O.hasData(a)||N.hasData(a)},data:function(a,b,c){return O.access(a,b,c)},removeData:function(a,b){O.remove(a,b)},_data:function(a,b,c){return N.access(a,b,c)},_removeData:function(a,b){N.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=O.get(f),1===f.nodeType&&!N.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),R(f,d,e[d])));N.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){O.set(this,a)}):K(this,function(b){var c,d;if(f&&void 0===b){if(c=O.get(f,a)||O.get(f,a.replace(Q,"-$&").toLowerCase()),void 0!==c)return c;if(d=n.camelCase(a),c=O.get(f,d),void 0!==c)return c;if(c=R(f,d,void 0),void 0!==c)return c}else d=n.camelCase(a),this.each(function(){var c=O.get(this,d);O.set(this,d,b),a.indexOf("-")>-1&&void 0!==c&&O.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){O.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=N.get(a,b),c&&(!d||n.isArray(c)?d=N.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return N.get(a,c)||N.access(a,c,{empty:n.Callbacks("once memory").add(function(){N.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=N.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),U=["Top","Right","Bottom","Left"],V=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)};function W(a,b,c,d){var e,f=1,g=20,h=d?function(){return d.cur()}:function(){return n.css(a,b,"")},i=h(),j=c&&c[3]||(n.cssNumber[b]?"":"px"),k=(n.cssNumber[b]||"px"!==j&&+i)&&T.exec(n.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||".5",k/=f,n.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var X=/^(?:checkbox|radio)$/i,Y=/<([\w:-]+)/,Z=/^$|\/(?:java|ecma)script/i,$={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};$.optgroup=$.option,$.tbody=$.tfoot=$.colgroup=$.caption=$.thead,$.th=$.td;function _(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function aa(a,b){for(var c=0,d=a.length;d>c;c++)N.set(a[c],"globalEval",!b||N.get(b[c],"globalEval"))}var ba=/<|&#?\w+;/;function ca(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],o=0,p=a.length;p>o;o++)if(f=a[o],f||0===f)if("object"===n.type(f))n.merge(m,f.nodeType?[f]:f);else if(ba.test(f)){g=g||l.appendChild(b.createElement("div")),h=(Y.exec(f)||["",""])[1].toLowerCase(),i=$[h]||$._default,g.innerHTML=i[1]+n.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;n.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",o=0;while(f=m[o++])if(d&&n.inArray(f,d)>-1)e&&e.push(f);else if(j=n.contains(f.ownerDocument,f),g=_(l.appendChild(f),"script"),j&&aa(g),c){k=0;while(f=g[k++])Z.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var da=/^key/,ea=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,fa=/^([^.]*)(?:\.(.+)|)/;function ga(){return!0}function ha(){return!1}function ia(){try{return d.activeElement}catch(a){}}function ja(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ja(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ha;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return"undefined"!=typeof n&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(G)||[""],j=b.length;while(j--)h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.hasData(a)&&N.get(a);if(r&&(i=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&N.remove(a,"handle events")}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(N.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!==this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,e,f,g=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||d,e=c.documentElement,f=c.body,a.pageX=b.clientX+(e&&e.scrollLeft||f&&f.scrollLeft||0)-(e&&e.clientLeft||f&&f.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||f&&f.scrollTop||0)-(e&&e.clientTop||f&&f.clientTop||0)),a.which||void 0===g||(a.which=1&g?1:2&g?3:4&g?2:0),a}},fix:function(a){if(a[n.expando])return a;var b,c,e,f=a.type,g=a,h=this.fixHooks[f];h||(this.fixHooks[f]=h=ea.test(f)?this.mouseHooks:da.test(f)?this.keyHooks:{}),e=h.props?this.props.concat(h.props):this.props,a=new n.Event(g),b=e.length;while(b--)c=e[b],a[c]=g[c];return a.target||(a.target=d),3===a.target.nodeType&&(a.target=a.target.parentNode),h.filter?h.filter(a,g):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==ia()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===ia()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&n.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}}},n.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ga:ha):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={constructor:n.Event,isDefaultPrevented:ha,isPropagationStopped:ha,isImmediatePropagationStopped:ha,isSimulated:!1,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ga,a&&!this.isSimulated&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ga,a&&!this.isSimulated&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ga,a&&!this.isSimulated&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return e&&(e===d||n.contains(d,e))||(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),n.fn.extend({on:function(a,b,c,d){return ja(this,a,b,c,d)},one:function(a,b,c,d){return ja(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return b!==!1&&"function"!=typeof b||(c=b,b=void 0),c===!1&&(c=ha),this.each(function(){n.event.remove(this,a,c,b)})}});var ka=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,la=/<script|<style|<link/i,ma=/checked\s*(?:[^=]|=\s*.checked.)/i,na=/^true\/(.*)/,oa=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function pa(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function qa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function ra(a){var b=na.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function sa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(N.hasData(a)&&(f=N.access(a),g=N.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}O.hasData(a)&&(h=O.access(a),i=n.extend({},h),O.set(b,i))}}function ta(a,b){var c=b.nodeName.toLowerCase();"input"===c&&X.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function ua(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&ma.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),ua(f,b,c,d)});if(o&&(e=ca(b,a[0].ownerDocument,!1,a,d),g=e.firstChild,1===e.childNodes.length&&(e=g),g||d)){for(h=n.map(_(e,"script"),qa),i=h.length;o>m;m++)j=e,m!==p&&(j=n.clone(j,!0,!0),i&&n.merge(h,_(j,"script"))),c.call(a[m],j,m);if(i)for(k=h[h.length-1].ownerDocument,n.map(h,ra),m=0;i>m;m++)j=h[m],Z.test(j.type||"")&&!N.access(j,"globalEval")&&n.contains(k,j)&&(j.src?n._evalUrl&&n._evalUrl(j.src):n.globalEval(j.textContent.replace(oa,"")))}return a}function va(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(_(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&aa(_(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(ka,"<$1></$2>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=_(h),f=_(a),d=0,e=f.length;e>d;d++)ta(f[d],g[d]);if(b)if(c)for(f=f||_(a),g=g||_(h),d=0,e=f.length;e>d;d++)sa(f[d],g[d]);else sa(a,h);return g=_(h,"script"),g.length>0&&aa(g,!i&&_(a,"script")),h},cleanData:function(a){for(var b,c,d,e=n.event.special,f=0;void 0!==(c=a[f]);f++)if(L(c)){if(b=c[N.expando]){if(b.events)for(d in b.events)e[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);c[N.expando]=void 0}c[O.expando]&&(c[O.expando]=void 0)}}}),n.fn.extend({domManip:ua,detach:function(a){return va(this,a,!0)},remove:function(a){return va(this,a)},text:function(a){return K(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.appendChild(a)}})},prepend:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(_(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return K(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!la.test(a)&&!$[(Y.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(_(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return ua(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(_(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),f=e.length-1,h=0;f>=h;h++)c=h===f?this:this.clone(!0),n(e[h])[b](c),g.apply(d,c.get());return this.pushStack(d)}});var wa,xa={HTML:"block",BODY:"block"};function ya(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function za(a){var b=d,c=xa[a];return c||(c=ya(a,b),"none"!==c&&c||(wa=(wa||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=wa[0].contentDocument,b.write(),b.close(),c=ya(a,b),wa.detach()),xa[a]=c),c}var Aa=/^margin/,Ba=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ca=function(b){var c=b.ownerDocument.defaultView;return c&&c.opener||(c=a),c.getComputedStyle(b)},Da=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e},Ea=d.documentElement;!function(){var b,c,e,f,g=d.createElement("div"),h=d.createElement("div");if(h.style){h.style.backgroundClip="content-box",h.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===h.style.backgroundClip,g.style.cssText="border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute",g.appendChild(h);function i(){h.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%",h.innerHTML="",Ea.appendChild(g);var d=a.getComputedStyle(h);b="1%"!==d.top,f="2px"===d.marginLeft,c="4px"===d.width,h.style.marginRight="50%",e="4px"===d.marginRight,Ea.removeChild(g)}n.extend(l,{pixelPosition:function(){return i(),b},boxSizingReliable:function(){return null==c&&i(),c},pixelMarginRight:function(){return null==c&&i(),e},reliableMarginLeft:function(){return null==c&&i(),f},reliableMarginRight:function(){var b,c=h.appendChild(d.createElement("div"));return c.style.cssText=h.style.cssText="-webkit-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",h.style.width="1px",Ea.appendChild(g),b=!parseFloat(a.getComputedStyle(c).marginRight),Ea.removeChild(g),h.removeChild(c),b}})}}();function Fa(a,b,c){var d,e,f,g,h=a.style;return c=c||Ca(a),g=c?c.getPropertyValue(b)||c[b]:void 0,""!==g&&void 0!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),c&&!l.pixelMarginRight()&&Ba.test(g)&&Aa.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f),void 0!==g?g+"":g}function Ga(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Ha=/^(none|table(?!-c[ea]).+)/,Ia={position:"absolute",visibility:"hidden",display:"block"},Ja={letterSpacing:"0",fontWeight:"400"},Ka=["Webkit","O","Moz","ms"],La=d.createElement("div").style;function Ma(a){if(a in La)return a;var b=a[0].toUpperCase()+a.slice(1),c=Ka.length;while(c--)if(a=Ka[c]+b,a in La)return a}function Na(a,b,c){var d=T.exec(b);return d?Math.max(0,d[2]-(c||0))+(d[3]||"px"):b}function Oa(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+U[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+U[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+U[f]+"Width",!0,e))):(g+=n.css(a,"padding"+U[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+U[f]+"Width",!0,e)));return g}function Pa(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ca(a),g="border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Fa(a,b,f),(0>e||null==e)&&(e=a.style[b]),Ba.test(e))return e;d=g&&(l.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Oa(a,b,c||(g?"border":"content"),d,f)+"px"}function Qa(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=N.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&V(d)&&(f[g]=N.access(d,"olddisplay",za(d.nodeName)))):(e=V(d),"none"===c&&e||N.set(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Fa(a,"opacity");return""===c?"1":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;return b=n.cssProps[h]||(n.cssProps[h]=Ma(h)||h),g=n.cssHooks[b]||n.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=T.exec(c))&&e[1]&&(c=W(a,b,e),f="number"),null!=c&&c===c&&("number"===f&&(c+=e&&e[3]||(n.cssNumber[h]?"":"px")),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=Ma(h)||h),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=Fa(a,b,d)),"normal"===e&&b in Ja&&(e=Ja[b]),""===c||c?(f=parseFloat(e),c===!0||isFinite(f)?f||0:e):e}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?Ha.test(n.css(a,"display"))&&0===a.offsetWidth?Da(a,Ia,function(){return Pa(a,b,d)}):Pa(a,b,d):void 0},set:function(a,c,d){var e,f=d&&Ca(a),g=d&&Oa(a,b,d,"border-box"===n.css(a,"boxSizing",!1,f),f);return g&&(e=T.exec(c))&&"px"!==(e[3]||"px")&&(a.style[b]=c,c=n.css(a,b)),Na(a,c,g)}}}),n.cssHooks.marginLeft=Ga(l.reliableMarginLeft,function(a,b){return b?(parseFloat(Fa(a,"marginLeft"))||a.getBoundingClientRect().left-Da(a,{marginLeft:0},function(){return a.getBoundingClientRect().left}))+"px":void 0}),n.cssHooks.marginRight=Ga(l.reliableMarginRight,function(a,b){return b?Da(a,{display:"inline-block"},Fa,[a,"marginRight"]):void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+U[d]+b]=f[d]||f[d-2]||f[0];return e}},Aa.test(a)||(n.cssHooks[a+b].set=Na)}),n.fn.extend({css:function(a,b){return K(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=Ca(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return Qa(this,!0)},hide:function(){return Qa(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){V(this)?n(this).show():n(this).hide()})}});function Ra(a,b,c,d,e){return new Ra.prototype.init(a,b,c,d,e)}n.Tween=Ra,Ra.prototype={constructor:Ra,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||n.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=Ra.propHooks[this.prop];return a&&a.get?a.get(this):Ra.propHooks._default.get(this)},run:function(a){var b,c=Ra.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Ra.propHooks._default.set(this),this}},Ra.prototype.init.prototype=Ra.prototype,Ra.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[n.cssProps[a.prop]]&&!n.cssHooks[a.prop]?a.elem[a.prop]=a.now:n.style(a.elem,a.prop,a.now+a.unit)}}},Ra.propHooks.scrollTop=Ra.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},n.fx=Ra.prototype.init,n.fx.step={};var Sa,Ta,Ua=/^(?:toggle|show|hide)$/,Va=/queueHooks$/;function Wa(){return a.setTimeout(function(){Sa=void 0}),Sa=n.now()}function Xa(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=U[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ya(a,b,c){for(var d,e=(_a.tweeners[b]||[]).concat(_a.tweeners["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Za(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},o=a.style,p=a.nodeType&&V(a),q=N.get(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=n.css(a,"display"),k="none"===j?N.get(a,"olddisplay")||za(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(o.display="inline-block")),c.overflow&&(o.overflow="hidden",l.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Ua.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}m[d]=q&&q[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(m))"inline"===("none"===j?za(a.nodeName):j)&&(o.display=j);else{q?"hidden"in q&&(p=q.hidden):q=N.access(a,"fxshow",{}),f&&(q.hidden=!p),p?n(a).show():l.done(function(){n(a).hide()}),l.done(function(){var b;N.remove(a,"fxshow");for(b in m)n.style(a,b,m[b])});for(d in m)g=Ya(p?q[d]:0,d,l),d in q||(q[d]=g.start,p&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function $a(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function _a(a,b,c){var d,e,f=0,g=_a.prefilters.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Sa||Wa(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{},easing:n.easing._default},c),originalProperties:b,originalOptions:c,startTime:Sa||Wa(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.rejectWith(a,[j,b]),this}}),k=j.props;for($a(k,j.opts.specialEasing);g>f;f++)if(d=_a.prefilters[f].call(j,a,k,j.opts))return n.isFunction(d.stop)&&(n._queueHooks(j.elem,j.opts.queue).stop=n.proxy(d.stop,d)),d;return n.map(k,Ya,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(_a,{tweeners:{"*":[function(a,b){var c=this.createTween(a,b);return W(c.elem,a,T.exec(b),c),c}]},tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.match(G);for(var c,d=0,e=a.length;e>d;d++)c=a[d],_a.tweeners[c]=_a.tweeners[c]||[],_a.tweeners[c].unshift(b)},prefilters:[Za],prefilter:function(a,b){b?_a.prefilters.unshift(a):_a.prefilters.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,null!=d.queue&&d.queue!==!0||(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(V).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=_a(this,n.extend({},a),f);(e||N.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=N.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Va.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));!b&&c||n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=N.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Xa(b,!0),a,d,e)}}),n.each({slideDown:Xa("show"),slideUp:Xa("hide"),slideToggle:Xa("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=0,c=n.timers;for(Sa=n.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||n.fx.stop(),Sa=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){Ta||(Ta=a.setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){a.clearInterval(Ta),Ta=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(b,c){return b=n.fx?n.fx.speeds[b]||b:b,c=c||"fx",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a=d.createElement("input"),b=d.createElement("select"),c=b.appendChild(d.createElement("option"));a.type="checkbox",l.checkOn=""!==a.value,l.optSelected=c.selected,b.disabled=!0,l.optDisabled=!c.disabled,a=d.createElement("input"),a.value="t",a.type="radio",l.radioValue="t"===a.value}();var ab,bb=n.expr.attrHandle;n.fn.extend({attr:function(a,b){return K(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),e=n.attrHooks[b]||(n.expr.match.bool.test(b)?ab:void 0)),void 0!==c?null===c?void n.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=n.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(G);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)}}),ab={set:function(a,b,c){return b===!1?n.removeAttr(a,c):a.setAttribute(c,c),c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=bb[b]||n.find.attr;bb[b]=function(a,b,d){var e,f;return d||(f=bb[b],bb[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,bb[b]=f),e}});var cb=/^(?:input|select|textarea|button)$/i,db=/^(?:a|area)$/i;n.fn.extend({prop:function(a,b){return K(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[n.propFix[a]||a]})}}),n.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&n.isXMLDoc(a)||(b=n.propFix[b]||b,e=n.propHooks[b]),
+void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=n.find.attr(a,"tabindex");return b?parseInt(b,10):cb.test(a.nodeName)||db.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),l.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this});var eb=/[\t\r\n\f]/g;function fb(a){return a.getAttribute&&a.getAttribute("class")||""}n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,fb(this)))});if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=fb(c),d=1===c.nodeType&&(" "+e+" ").replace(eb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=n.trim(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,fb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=fb(c),d=1===c.nodeType&&(" "+e+" ").replace(eb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=n.trim(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):n.isFunction(a)?this.each(function(c){n(this).toggleClass(a.call(this,c,fb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=n(this),f=a.match(G)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=fb(this),b&&N.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":N.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+fb(c)+" ").replace(eb," ").indexOf(b)>-1)return!0;return!1}});var gb=/\r/g,hb=/[\x20\t\r\n\f]+/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(gb,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a)).replace(hb," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(l.optDisabled?!c.disabled:null===c.getAttribute("disabled"))&&(!c.parentNode.disabled||!n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=n.inArray(n.valHooks.option.get(d),f)>-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>-1:void 0}},l.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var ib=/^(?:focusinfocus|focusoutblur)$/;n.extend(n.event,{trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!ib.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),l=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},f||!o.trigger||o.trigger.apply(e,c)!==!1)){if(!f&&!o.noBubble&&!n.isWindow(e)){for(j=o.delegateType||q,ib.test(j+q)||(h=h.parentNode);h;h=h.parentNode)p.push(h),i=h;i===(e.ownerDocument||d)&&p.push(i.defaultView||i.parentWindow||a)}g=0;while((h=p[g++])&&!b.isPropagationStopped())b.type=g>1?j:o.bindType||q,m=(N.get(h,"events")||{})[b.type]&&N.get(h,"handle"),m&&m.apply(h,c),m=l&&h[l],m&&m.apply&&L(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=q,f||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!L(e)||l&&n.isFunction(e[q])&&!n.isWindow(e)&&(i=e[l],i&&(e[l]=null),n.event.triggered=q,e[q](),n.event.triggered=void 0,i&&(e[l]=i)),b.result}},simulate:function(a,b,c){var d=n.extend(new n.Event,c,{type:a,isSimulated:!0});n.event.trigger(d,null,b)}}),n.fn.extend({trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),l.focusin="onfocusin"in a,l.focusin||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a))};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=N.access(d,b);e||d.addEventListener(a,c,!0),N.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=N.access(d,b)-1;e?N.access(d,b,e):(d.removeEventListener(a,c,!0),N.remove(d,b))}}});var jb=a.location,kb=n.now(),lb=/\?/;n.parseJSON=function(a){return JSON.parse(a+"")},n.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||n.error("Invalid XML: "+b),c};var mb=/#.*$/,nb=/([?&])_=[^&]*/,ob=/^(.*?):[ \t]*([^\r\n]*)$/gm,pb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,qb=/^(?:GET|HEAD)$/,rb=/^\/\//,sb={},tb={},ub="*/".concat("*"),vb=d.createElement("a");vb.href=jb.href;function wb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(G)||[];if(n.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function xb(a,b,c,d){var e={},f=a===tb;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function yb(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&n.extend(!0,a,d),a}function zb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Ab(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:jb.href,type:"GET",isLocal:pb.test(jb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":ub,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?yb(yb(a,n.ajaxSettings),b):yb(n.ajaxSettings,a)},ajaxPrefilter:wb(sb),ajaxTransport:wb(tb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m=n.ajaxSetup({},c),o=m.context||m,p=m.context&&(o.nodeType||o.jquery)?n(o):n.event,q=n.Deferred(),r=n.Callbacks("once memory"),s=m.statusCode||{},t={},u={},v=0,w="canceled",x={readyState:0,getResponseHeader:function(a){var b;if(2===v){if(!h){h={};while(b=ob.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===v?g:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return v||(a=u[c]=u[c]||a,t[a]=b),this},overrideMimeType:function(a){return v||(m.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>v)for(b in a)s[b]=[s[b],a[b]];else x.always(a[x.status]);return this},abort:function(a){var b=a||w;return e&&e.abort(b),z(0,b),this}};if(q.promise(x).complete=r.add,x.success=x.done,x.error=x.fail,m.url=((b||m.url||jb.href)+"").replace(mb,"").replace(rb,jb.protocol+"//"),m.type=c.method||c.type||m.method||m.type,m.dataTypes=n.trim(m.dataType||"*").toLowerCase().match(G)||[""],null==m.crossDomain){j=d.createElement("a");try{j.href=m.url,j.href=j.href,m.crossDomain=vb.protocol+"//"+vb.host!=j.protocol+"//"+j.host}catch(y){m.crossDomain=!0}}if(m.data&&m.processData&&"string"!=typeof m.data&&(m.data=n.param(m.data,m.traditional)),xb(sb,m,c,x),2===v)return x;k=n.event&&m.global,k&&0===n.active++&&n.event.trigger("ajaxStart"),m.type=m.type.toUpperCase(),m.hasContent=!qb.test(m.type),f=m.url,m.hasContent||(m.data&&(f=m.url+=(lb.test(f)?"&":"?")+m.data,delete m.data),m.cache===!1&&(m.url=nb.test(f)?f.replace(nb,"$1_="+kb++):f+(lb.test(f)?"&":"?")+"_="+kb++)),m.ifModified&&(n.lastModified[f]&&x.setRequestHeader("If-Modified-Since",n.lastModified[f]),n.etag[f]&&x.setRequestHeader("If-None-Match",n.etag[f])),(m.data&&m.hasContent&&m.contentType!==!1||c.contentType)&&x.setRequestHeader("Content-Type",m.contentType),x.setRequestHeader("Accept",m.dataTypes[0]&&m.accepts[m.dataTypes[0]]?m.accepts[m.dataTypes[0]]+("*"!==m.dataTypes[0]?", "+ub+"; q=0.01":""):m.accepts["*"]);for(l in m.headers)x.setRequestHeader(l,m.headers[l]);if(m.beforeSend&&(m.beforeSend.call(o,x,m)===!1||2===v))return x.abort();w="abort";for(l in{success:1,error:1,complete:1})x[l](m[l]);if(e=xb(tb,m,c,x)){if(x.readyState=1,k&&p.trigger("ajaxSend",[x,m]),2===v)return x;m.async&&m.timeout>0&&(i=a.setTimeout(function(){x.abort("timeout")},m.timeout));try{v=1,e.send(t,z)}catch(y){if(!(2>v))throw y;z(-1,y)}}else z(-1,"No Transport");function z(b,c,d,h){var j,l,t,u,w,y=c;2!==v&&(v=2,i&&a.clearTimeout(i),e=void 0,g=h||"",x.readyState=b>0?4:0,j=b>=200&&300>b||304===b,d&&(u=zb(m,x,d)),u=Ab(m,u,x,j),j?(m.ifModified&&(w=x.getResponseHeader("Last-Modified"),w&&(n.lastModified[f]=w),w=x.getResponseHeader("etag"),w&&(n.etag[f]=w)),204===b||"HEAD"===m.type?y="nocontent":304===b?y="notmodified":(y=u.state,l=u.data,t=u.error,j=!t)):(t=y,!b&&y||(y="error",0>b&&(b=0))),x.status=b,x.statusText=(c||y)+"",j?q.resolveWith(o,[l,y,x]):q.rejectWith(o,[x,y,t]),x.statusCode(s),s=void 0,k&&p.trigger(j?"ajaxSuccess":"ajaxError",[x,m,j?l:t]),r.fireWith(o,[x,y]),k&&(p.trigger("ajaxComplete",[x,m]),--n.active||n.event.trigger("ajaxStop")))}return x},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax(n.extend({url:a,type:b,dataType:e,data:c,success:d},n.isPlainObject(a)&&a))}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){var b;return n.isFunction(a)?this.each(function(b){n(this).wrapAll(a.call(this,b))}):(this[0]&&(b=n(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return n.isFunction(a)?this.each(function(b){n(this).wrapInner(a.call(this,b))}):this.each(function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}}),n.expr.filters.hidden=function(a){return!n.expr.filters.visible(a)},n.expr.filters.visible=function(a){return a.offsetWidth>0||a.offsetHeight>0||a.getClientRects().length>0};var Bb=/%20/g,Cb=/\[\]$/,Db=/\r?\n/g,Eb=/^(?:submit|button|image|reset|file)$/i,Fb=/^(?:input|select|textarea|keygen)/i;function Gb(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||Cb.test(a)?d(a,e):Gb(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)Gb(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)Gb(c,a[c],b,e);return d.join("&").replace(Bb,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&Fb.test(this.nodeName)&&!Eb.test(a)&&(this.checked||!X.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(Db,"\r\n")}}):{name:b.name,value:c.replace(Db,"\r\n")}}).get()}}),n.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Hb={0:200,1223:204},Ib=n.ajaxSettings.xhr();l.cors=!!Ib&&"withCredentials"in Ib,l.ajax=Ib=!!Ib,n.ajaxTransport(function(b){var c,d;return l.cors||Ib&&!b.crossDomain?{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Hb[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}:void 0}),n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=n("<script>").prop({charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&f("error"===a.type?404:200,a.type)}),d.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Jb=[],Kb=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Jb.pop()||n.expando+"_"+kb++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Kb.test(b.url)?"url":"string"==typeof b.data&&0===(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Kb.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Kb,"$1"+e):b.jsonp!==!1&&(b.url+=(lb.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){void 0===f?n(a).removeProp(e):a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Jb.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||d;var e=x.exec(a),f=!c&&[];return e?[b.createElement(e[1])]:(e=ca([a],b,f),f&&f.length&&n(f).remove(),n.merge([],e.childNodes))};var Lb=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&Lb)return Lb.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>-1&&(d=n.trim(a.slice(h)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e||"GET",dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).always(c&&function(a,b){g.each(function(){c.apply(this,f||[a.responseText,b,a])})}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};function Mb(a){return n.isWindow(a)?a:9===a.nodeType&&a.defaultView}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,n.extend({},h))),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,n.contains(b,d)?(e=d.getBoundingClientRect(),c=Mb(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===n.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(d=a.offset()),d.top+=n.css(a[0],"borderTopWidth",!0),d.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-n.css(c,"marginTop",!0),left:b.left-d.left-n.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent;while(a&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Ea})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c="pageYOffset"===b;n.fn[a]=function(d){return K(this,function(a,d,e){var f=Mb(a);return void 0===e?f?f[b]:a[d]:void(f?f.scrollTo(c?f.pageXOffset:e,c?e:f.pageYOffset):a[d]=e)},a,d,arguments.length)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=Ga(l.pixelPosition,function(a,c){return c?(c=Fa(a,b),Ba.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return K(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.extend({bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)},size:function(){return this.length}}),n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var Nb=a.jQuery,Ob=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=Ob),b&&a.jQuery===n&&(a.jQuery=Nb),n},b||(a.jQuery=a.$=n),n});
diff --git a/web/gui/lib/jquery.easypiechart-97b5824.min.js b/web/gui/lib/jquery.easypiechart-97b5824.min.js
new file mode 100644
index 0000000..b6f6811
--- /dev/null
+++ b/web/gui/lib/jquery.easypiechart-97b5824.min.js
@@ -0,0 +1,10 @@
+/**!
+ * easy-pie-chart
+ * Lightweight plugin to render simple, animated and retina optimized pie charts
+ *
+ * @license
+ * @author Robert Fleischmann <rendro87@gmail.com> (http://robert-fleischmann.de)
+ * @version 2.1.7
+ * SPDX-License-Identifier: MIT
+ **/
+!function(a,b){"function"==typeof define&&define.amd?define(["jquery"],function(a){return b(a)}):"object"==typeof exports?module.exports=b(require("jquery")):b(jQuery)}(this,function(a){var b=function(a,b){var c,d=document.createElement("canvas");a.appendChild(d),"object"==typeof G_vmlCanvasManager&&G_vmlCanvasManager.initElement(d);var e=d.getContext("2d");d.width=d.height=b.size;var f=1;window.devicePixelRatio>1&&(f=window.devicePixelRatio,d.style.width=d.style.height=[b.size,"px"].join(""),d.width=d.height=b.size*f,e.scale(f,f)),e.translate(b.size/2,b.size/2),e.rotate((-0.5+b.rotate/180)*Math.PI);var g=(b.size-b.lineWidth)/2;b.scaleColor&&b.scaleLength&&(g-=b.scaleLength+2),Date.now=Date.now||function(){return+new Date};var h=function(a,b,c){c=Math.min(Math.max(-1,c||0),1);var d=0>=c?!0:!1;e.beginPath(),e.arc(0,0,g,0,2*Math.PI*c,d),e.strokeStyle=a,e.lineWidth=b,e.stroke()},i=function(){var a,c;e.lineWidth=1,e.fillStyle=b.scaleColor,e.save();for(var d=24;d>0;--d)d%6===0?(c=b.scaleLength,a=0):(c=.6*b.scaleLength,a=b.scaleLength-c),e.fillRect(-b.size/2+a,0,c,1),e.rotate(Math.PI/12);e.restore()},j=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||function(a){window.setTimeout(a,1e3/60)}}(),k=function(){b.scaleColor&&i(),b.trackColor&&h(b.trackColor,b.trackWidth||b.lineWidth,1)};this.getCanvas=function(){return d},this.getCtx=function(){return e},this.clear=function(){e.clearRect(b.size/-2,b.size/-2,b.size,b.size)},this.draw=function(a){b.scaleColor||b.trackColor?e.getImageData&&e.putImageData?c?e.putImageData(c,0,0):(k(),c=e.getImageData(0,0,b.size*f,b.size*f)):(this.clear(),k()):this.clear(),e.lineCap=b.lineCap;var d;d="function"==typeof b.barColor?b.barColor(a):b.barColor,h(d,b.lineWidth,a/100)}.bind(this),this.animate=function(a,c){var d=Date.now();b.onStart(a,c);var e=function(){var f=Math.min(Date.now()-d,b.animate.duration),g=b.easing(this,f,a,c-a,b.animate.duration);this.draw(g),b.onStep(a,c,g),f>=b.animate.duration?b.onStop(a,c):j(e)}.bind(this);j(e)}.bind(this)},c=function(a,c){var d={barColor:"#ef1e25",trackColor:"#f9f9f9",scaleColor:"#dfe0e0",scaleLength:5,lineCap:"round",lineWidth:3,trackWidth:void 0,size:110,rotate:0,animate:{duration:1e3,enabled:!0},easing:function(a,b,c,d,e){return b/=e/2,1>b?d/2*b*b+c:-d/2*(--b*(b-2)-1)+c},onStart:function(a,b){},onStep:function(a,b,c){},onStop:function(a,b){}};if("undefined"!=typeof b)d.renderer=b;else{if("undefined"==typeof SVGRenderer)throw new Error("Please load either the SVG- or the CanvasRenderer");d.renderer=SVGRenderer}var e={},f=0,g=function(){this.el=a,this.options=e;for(var b in d)d.hasOwnProperty(b)&&(e[b]=c&&"undefined"!=typeof c[b]?c[b]:d[b],"function"==typeof e[b]&&(e[b]=e[b].bind(this)));"string"==typeof e.easing&&"undefined"!=typeof jQuery&&jQuery.isFunction(jQuery.easing[e.easing])?e.easing=jQuery.easing[e.easing]:e.easing=d.easing,"number"==typeof e.animate&&(e.animate={duration:e.animate,enabled:!0}),"boolean"!=typeof e.animate||e.animate||(e.animate={duration:1e3,enabled:e.animate}),this.renderer=new e.renderer(a,e),this.renderer.draw(f),a.dataset&&a.dataset.percent?this.update(parseFloat(a.dataset.percent)):a.getAttribute&&a.getAttribute("data-percent")&&this.update(parseFloat(a.getAttribute("data-percent")))}.bind(this);this.update=function(a){return a=parseFloat(a),e.animate.enabled?this.renderer.animate(f,a):this.renderer.draw(a),f=a,this}.bind(this),this.disableAnimation=function(){return e.animate.enabled=!1,this},this.enableAnimation=function(){return e.animate.enabled=!0,this},g()};a.fn.easyPieChart=function(b){return this.each(function(){var d;a.data(this,"easyPieChart")||(d=a.extend({},b,a(this).data()),a.data(this,"easyPieChart",new c(this,d)))})}});
diff --git a/web/gui/lib/jquery.peity-3.2.0.min.js b/web/gui/lib/jquery.peity-3.2.0.min.js
new file mode 100644
index 0000000..a0a9169
--- /dev/null
+++ b/web/gui/lib/jquery.peity-3.2.0.min.js
@@ -0,0 +1,14 @@
+// Peity jQuery plugin version 3.2.0
+// (c) 2015 Ben Pickles
+//
+// http://benpickles.github.io/peity
+//
+// Released under MIT license.
+// SPDX-License-Identifier: MIT
+(function(k,w,h,v){var d=k.fn.peity=function(a,b){y&&this.each(function(){var e=k(this),c=e.data("_peity");c?(a&&(c.type=a),k.extend(c.opts,b)):(c=new x(e,a,k.extend({},d.defaults[a],e.data("peity"),b)),e.change(function(){c.draw()}).data("_peity",c));c.draw()});return this},x=function(a,b,e){this.$el=a;this.type=b;this.opts=e},o=x.prototype,q=o.svgElement=function(a,b){return k(w.createElementNS("http://www.w3.org/2000/svg",a)).attr(b)},y="createElementNS"in w&&q("svg",{})[0].createSVGRect;o.draw=
+function(){var a=this.opts;d.graphers[this.type].call(this,a);a.after&&a.after.call(this,a)};o.fill=function(){var a=this.opts.fill;return k.isFunction(a)?a:function(b,e){return a[e%a.length]}};o.prepare=function(a,b){this.$svg||this.$el.hide().after(this.$svg=q("svg",{"class":"peity"}));return this.$svg.empty().data("peity",this).attr({height:b,width:a})};o.values=function(){return k.map(this.$el.text().split(this.opts.delimiter),function(a){return parseFloat(a)})};d.defaults={};d.graphers={};d.register=
+function(a,b,e){this.defaults[a]=b;this.graphers[a]=e};d.register("pie",{fill:["#ff9900","#fff4dd","#ffc66e"],radius:8},function(a){if(!a.delimiter){var b=this.$el.text().match(/[^0-9\.]/);a.delimiter=b?b[0]:","}b=k.map(this.values(),function(a){return 0<a?a:0});if("/"==a.delimiter)var e=b[0],b=[e,h.max(0,b[1]-e)];for(var c=0,e=b.length,t=0;c<e;c++)t+=b[c];t||(e=2,t=1,b=[0,1]);var l=2*a.radius,l=this.prepare(a.width||l,a.height||l),c=l.width(),f=l.height(),j=c/2,d=f/2,f=h.min(j,d),a=a.innerRadius;
+"donut"==this.type&&!a&&(a=0.5*f);for(var r=h.PI,s=this.fill(),g=this.scale=function(a,b){var c=a/t*r*2-r/2;return[b*h.cos(c)+j,b*h.sin(c)+d]},m=0,c=0;c<e;c++){var u=b[c],i=u/t;if(0!=i){if(1==i)if(a)var i=j-0.01,p=d-f,n=d-a,i=q("path",{d:["M",j,p,"A",f,f,0,1,1,i,p,"L",i,n,"A",a,a,0,1,0,j,n].join(" ")});else i=q("circle",{cx:j,cy:d,r:f});else p=m+u,n=["M"].concat(g(m,f),"A",f,f,0,0.5<i?1:0,1,g(p,f),"L"),a?n=n.concat(g(p,a),"A",a,a,0,0.5<i?1:0,0,g(m,a)):n.push(j,d),m+=u,i=q("path",{d:n.join(" ")});
+i.attr("fill",s.call(this,u,c,b));l.append(i)}}});d.register("donut",k.extend(!0,{},d.defaults.pie),function(a){d.graphers.pie.call(this,a)});d.register("line",{delimiter:",",fill:"#c6d9fd",height:16,min:0,stroke:"#4d89f9",strokeWidth:1,width:32},function(a){var b=this.values();1==b.length&&b.push(b[0]);for(var e=h.max.apply(h,a.max==v?b:b.concat(a.max)),c=h.min.apply(h,a.min==v?b:b.concat(a.min)),d=this.prepare(a.width,a.height),l=a.strokeWidth,f=d.width(),j=d.height()-l,k=e-c,e=this.x=function(a){return a*
+(f/(b.length-1))},r=this.y=function(a){var b=j;k&&(b-=(a-c)/k*j);return b+l/2},s=r(h.max(c,0)),g=[0,s],m=0;m<b.length;m++)g.push(e(m),r(b[m]));g.push(f,s);a.fill&&d.append(q("polygon",{fill:a.fill,points:g.join(" ")}));l&&d.append(q("polyline",{fill:"none",points:g.slice(2,g.length-2).join(" "),stroke:a.stroke,"stroke-width":l,"stroke-linecap":"square"}))});d.register("bar",{delimiter:",",fill:["#4D89F9"],height:16,min:0,padding:0.1,width:32},function(a){for(var b=this.values(),e=h.max.apply(h,a.max==
+v?b:b.concat(a.max)),c=h.min.apply(h,a.min==v?b:b.concat(a.min)),d=this.prepare(a.width,a.height),l=d.width(),f=d.height(),j=e-c,a=a.padding,k=this.fill(),r=this.x=function(a){return a*l/b.length},s=this.y=function(a){return f-(j?(a-c)/j*f:1)},g=0;g<b.length;g++){var m=r(g+a),u=r(g+1-a)-m,i=b[g],p=s(i),n=p,o;j?0>i?n=s(h.min(e,0)):p=s(h.max(c,0)):o=1;o=p-n;0==o&&(o=1,0<e&&j&&n--);d.append(q("rect",{fill:k.call(this,i,g,b),x:m,y:n,width:u,height:o}))}})})(jQuery,document,Math);
diff --git a/web/gui/lib/jquery.sparkline-2.1.2.min.js b/web/gui/lib/jquery.sparkline-2.1.2.min.js
new file mode 100644
index 0000000..f1973df
--- /dev/null
+++ b/web/gui/lib/jquery.sparkline-2.1.2.min.js
@@ -0,0 +1,6 @@
+/* jquery.sparkline 2.1.2 - http://omnipotent.net/jquery.sparkline/
+** SPDX-License-Identifier: BSD-3-Clause
+** Licensed under the New BSD License - see above site for details */
+
+(function(a,b,c){(function(a){typeof define=="function"&&define.amd?define(["jquery"],a):jQuery&&!jQuery.fn.sparkline&&a(jQuery)})(function(d){"use strict";var e={},f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L=0;f=function(){return{common:{type:"line",lineColor:"#00f",fillColor:"#cdf",defaultPixelsPerValue:3,width:"auto",height:"auto",composite:!1,tagValuesAttribute:"values",tagOptionsPrefix:"spark",enableTagOptions:!1,enableHighlight:!0,highlightLighten:1.4,tooltipSkipNull:!0,tooltipPrefix:"",tooltipSuffix:"",disableHiddenCheck:!1,numberFormatter:!1,numberDigitGroupCount:3,numberDigitGroupSep:",",numberDecimalMark:".",disableTooltips:!1,disableInteraction:!1},line:{spotColor:"#f80",highlightSpotColor:"#5f5",highlightLineColor:"#f22",spotRadius:1.5,minSpotColor:"#f80",maxSpotColor:"#f80",lineWidth:1,normalRangeMin:c,normalRangeMax:c,normalRangeColor:"#ccc",drawNormalOnTop:!1,chartRangeMin:c,chartRangeMax:c,chartRangeMinX:c,chartRangeMaxX:c,tooltipFormat:new h('<span style="color: {{color}}">&#9679;</span> {{prefix}}{{y}}{{suffix}}')},bar:{barColor:"#3366cc",negBarColor:"#f44",stackedBarColor:["#3366cc","#dc3912","#ff9900","#109618","#66aa00","#dd4477","#0099c6","#990099"],zeroColor:c,nullColor:c,zeroAxis:!0,barWidth:4,barSpacing:1,chartRangeMax:c,chartRangeMin:c,chartRangeClip:!1,colorMap:c,tooltipFormat:new h('<span style="color: {{color}}">&#9679;</span> {{prefix}}{{value}}{{suffix}}')},tristate:{barWidth:4,barSpacing:1,posBarColor:"#6f6",negBarColor:"#f44",zeroBarColor:"#999",colorMap:{},tooltipFormat:new h('<span style="color: {{color}}">&#9679;</span> {{value:map}}'),tooltipValueLookups:{map:{"-1":"Loss",0:"Draw",1:"Win"}}},discrete:{lineHeight:"auto",thresholdColor:c,thresholdValue:0,chartRangeMax:c,chartRangeMin:c,chartRangeClip:!1,tooltipFormat:new h("{{prefix}}{{value}}{{suffix}}")},bullet:{targetColor:"#f33",targetWidth:3,performanceColor:"#33f",rangeColors:["#d3dafe","#a8b6ff","#7f94ff"],base:c,tooltipFormat:new h("{{fieldkey:fields}} - {{value}}"),tooltipValueLookups:{fields:{r:"Range",p:"Performance",t:"Target"}}},pie:{offset:0,sliceColors:["#3366cc","#dc3912","#ff9900","#109618","#66aa00","#dd4477","#0099c6","#990099"],borderWidth:0,borderColor:"#000",tooltipFormat:new h('<span style="color: {{color}}">&#9679;</span> {{value}} ({{percent.1}}%)')},box:{raw:!1,boxLineColor:"#000",boxFillColor:"#cdf",whiskerColor:"#000",outlierLineColor:"#333",outlierFillColor:"#fff",medianColor:"#f00",showOutliers:!0,outlierIQR:1.5,spotRadius:1.5,target:c,targetColor:"#4a2",chartRangeMax:c,chartRangeMin:c,tooltipFormat:new h("{{field:fields}}: {{value}}"),tooltipFormatFieldlistKey:"field",tooltipValueLookups:{fields:{lq:"Lower Quartile",med:"Median",uq:"Upper Quartile",lo:"Left Outlier",ro:"Right Outlier",lw:"Left Whisker",rw:"Right Whisker"}}}}},E='.jqstooltip { position: absolute;left: 0px;top: 0px;visibility: hidden;background: rgb(0, 0, 0) transparent;background-color: rgba(0,0,0,0.6);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr=#99000000, endColorstr=#99000000);-ms-filter: "progid:DXImageTransform.Microsoft.gradient(startColorstr=#99000000, endColorstr=#99000000)";color: white;font: 10px arial, san serif;text-align: left;white-space: nowrap;padding: 5px;border: 1px solid white;z-index: 10000;}.jqsfield { color: white;font: 10px arial, san serif;text-align: left;}',g=function(){var a,b;return a=function(){this.init.apply(this,arguments)},arguments.length>1?(arguments[0]?(a.prototype=d.extend(new arguments[0],arguments[arguments.length-1]),a._super=arguments[0].prototype):a.prototype=arguments[arguments.length-1],arguments.length>2&&(b=Array.prototype.slice.call(arguments,1,-1),b.unshift(a.prototype),d.extend.apply(d,b))):a.prototype=arguments[0],a.prototype.cls=a,a},d.SPFormatClass=h=g({fre:/\{\{([\w.]+?)(:(.+?))?\}\}/g,precre:/(\w+)\.(\d+)/,init:function(a,b){this.format=a,this.fclass=b},render:function(a,b,d){var e=this,f=a,g,h,i,j,k;return this.format.replace(this.fre,function(){var a;return h=arguments[1],i=arguments[3],g=e.precre.exec(h),g?(k=g[2],h=g[1]):k=!1,j=f[h],j===c?"":i&&b&&b[i]?(a=b[i],a.get?b[i].get(j)||j:b[i][j]||j):(n(j)&&(d.get("numberFormatter")?j=d.get("numberFormatter")(j):j=s(j,k,d.get("numberDigitGroupCount"),d.get("numberDigitGroupSep"),d.get("numberDecimalMark"))),j)})}}),d.spformat=function(a,b){return new h(a,b)},i=function(a,b,c){return a<b?b:a>c?c:a},j=function(a,c){var d;return c===2?(d=b.floor(a.length/2),a.length%2?a[d]:(a[d-1]+a[d])/2):a.length%2?(d=(a.length*c+c)/4,d%1?(a[b.floor(d)]+a[b.floor(d)-1])/2:a[d-1]):(d=(a.length*c+2)/4,d%1?(a[b.floor(d)]+a[b.floor(d)-1])/2:a[d-1])},k=function(a){var b;switch(a){case"undefined":a=c;break;case"null":a=null;break;case"true":a=!0;break;case"false":a=!1;break;default:b=parseFloat(a),a==b&&(a=b)}return a},l=function(a){var b,c=[];for(b=a.length;b--;)c[b]=k(a[b]);return c},m=function(a,b){var c,d,e=[];for(c=0,d=a.length;c<d;c++)a[c]!==b&&e.push(a[c]);return e},n=function(a){return!isNaN(parseFloat(a))&&isFinite(a)},s=function(a,b,c,e,f){var g,h;a=(b===!1?parseFloat(a).toString():a.toFixed(b)).split(""),g=(g=d.inArray(".",a))<0?a.length:g,g<a.length&&(a[g]=f);for(h=g-c;h>0;h-=c)a.splice(h,0,e);return a.join("")},o=function(a,b,c){var d;for(d=b.length;d--;){if(c&&b[d]===null)continue;if(b[d]!==a)return!1}return!0},p=function(a){var b=0,c;for(c=a.length;c--;)b+=typeof a[c]=="number"?a[c]:0;return b},r=function(a){return d.isArray(a)?a:[a]},q=function(b){var c;a.createStyleSheet?a.createStyleSheet().cssText=b:(c=a.createElement("style"),c.type="text/css",a.getElementsByTagName("head")[0].appendChild(c),c[typeof a.body.style.WebkitAppearance=="string"?"innerText":"innerHTML"]=b)},d.fn.simpledraw=function(b,e,f,g){var h,i;if(f&&(h=this.data("_jqs_vcanvas")))return h;if(d.fn.sparkline.canvas===!1)return!1;if(d.fn.sparkline.canvas===c){var j=a.createElement("canvas");if(!j.getContext||!j.getContext("2d")){if(!a.namespaces||!!a.namespaces.v)return d.fn.sparkline.canvas=!1,!1;a.namespaces.add("v","urn:schemas-microsoft-com:vml","#default#VML"),d.fn.sparkline.canvas=function(a,b,c,d){return new J(a,b,c)}}else d.fn.sparkline.canvas=function(a,b,c,d){return new I(a,b,c,d)}}return b===c&&(b=d(this).innerWidth()),e===c&&(e=d(this).innerHeight()),h=d.fn.sparkline.canvas(b,e,this,g),i=d(this).data("_jqs_mhandler"),i&&i.registerCanvas(h),h},d.fn.cleardraw=function(){var a=this.data("_jqs_vcanvas");a&&a.reset()},d.RangeMapClass=t=g({init:function(a){var b,c,d=[];for(b in a)a.hasOwnProperty(b)&&typeof b=="string"&&b.indexOf(":")>-1&&(c=b.split(":"),c[0]=c[0].length===0?-Infinity:parseFloat(c[0]),c[1]=c[1].length===0?Infinity:parseFloat(c[1]),c[2]=a[b],d.push(c));this.map=a,this.rangelist=d||!1},get:function(a){var b=this.rangelist,d,e,f;if((f=this.map[a])!==c)return f;if(b)for(d=b.length;d--;){e=b[d];if(e[0]<=a&&e[1]>=a)return e[2]}return c}}),d.range_map=function(a){return new t(a)},u=g({init:function(a,b){var c=d(a);this.$el=c,this.options=b,this.currentPageX=0,this.currentPageY=0,this.el=a,this.splist=[],this.tooltip=null,this.over=!1,this.displayTooltips=!b.get("disableTooltips"),this.highlightEnabled=!b.get("disableHighlight")},registerSparkline:function(a){this.splist.push(a),this.over&&this.updateDisplay()},registerCanvas:function(a){var b=d(a.canvas);this.canvas=a,this.$canvas=b,b.mouseenter(d.proxy(this.mouseenter,this)),b.mouseleave(d.proxy(this.mouseleave,this)),b.click(d.proxy(this.mouseclick,this))},reset:function(a){this.splist=[],this.tooltip&&a&&(this.tooltip.remove(),this.tooltip=c)},mouseclick:function(a){var b=d.Event("sparklineClick");b.originalEvent=a,b.sparklines=this.splist,this.$el.trigger(b)},mouseenter:function(b){d(a.body).unbind("mousemove.jqs"),d(a.body).bind("mousemove.jqs",d.proxy(this.mousemove,this)),this.over=!0,this.currentPageX=b.pageX,this.currentPageY=b.pageY,this.currentEl=b.target,!this.tooltip&&this.displayTooltips&&(this.tooltip=new v(this.options),this.tooltip.updatePosition(b.pageX,b.pageY)),this.updateDisplay()},mouseleave:function(){d(a.body).unbind("mousemove.jqs");var b=this.splist,c=b.length,e=!1,f,g;this.over=!1,this.currentEl=null,this.tooltip&&(this.tooltip.remove(),this.tooltip=null);for(g=0;g<c;g++)f=b[g],f.clearRegionHighlight()&&(e=!0);e&&this.canvas.render()},mousemove:function(a){this.currentPageX=a.pageX,this.currentPageY=a.pageY,this.currentEl=a.target,this.tooltip&&this.tooltip.updatePosition(a.pageX,a.pageY),this.updateDisplay()},updateDisplay:function(){var a=this.splist,b=a.length,c=!1,e=this.$canvas.offset(),f=this.currentPageX-e.left,g=this.currentPageY-e.top,h,i,j,k,l;if(!this.over)return;for(j=0;j<b;j++)i=a[j],k=i.setRegionHighlight(this.currentEl,f,g),k&&(c=!0);if(c){l=d.Event("sparklineRegionChange"),l.sparklines=this.splist,this.$el.trigger(l);if(this.tooltip){h="";for(j=0;j<b;j++)i=a[j],h+=i.getCurrentRegionTooltip();this.tooltip.setContent(h)}this.disableHighlight||this.canvas.render()}k===null&&this.mouseleave()}}),v=g({sizeStyle:"position: static !important;display: block !important;visibility: hidden !important;float: left !important;",init:function(b){var c=b.get("tooltipClassname","jqstooltip"),e=this.sizeStyle,f;this.container=b.get("tooltipContainer")||a.body,this.tooltipOffsetX=b.get("tooltipOffsetX",10),this.tooltipOffsetY=b.get("tooltipOffsetY",12),d("#jqssizetip").remove(),d("#jqstooltip").remove(),this.sizetip=d("<div/>",{id:"jqssizetip",style:e,"class":c}),this.tooltip=d("<div/>",{id:"jqstooltip","class":c}).appendTo(this.container),f=this.tooltip.offset(),this.offsetLeft=f.left,this.offsetTop=f.top,this.hidden=!0,d(window).unbind("resize.jqs scroll.jqs"),d(window).bind("resize.jqs scroll.jqs",d.proxy(this.updateWindowDims,this)),this.updateWindowDims()},updateWindowDims:function(){this.scrollTop=d(window).scrollTop(),this.scrollLeft=d(window).scrollLeft(),this.scrollRight=this.scrollLeft+d(window).width(),this.updatePosition()},getSize:function(a){this.sizetip.html(a).appendTo(this.container),this.width=this.sizetip.width()+1,this.height=this.sizetip.height(),this.sizetip.remove()},setContent:function(a){if(!a){this.tooltip.css("visibility","hidden"),this.hidden=!0;return}this.getSize(a),this.tooltip.html(a).css({width:this.width,height:this.height,visibility:"visible"}),this.hidden&&(this.hidden=!1,this.updatePosition())},updatePosition:function(a,b){if(a===c){if(this.mousex===c)return;a=this.mousex-this.offsetLeft,b=this.mousey-this.offsetTop}else this.mousex=a-=this.offsetLeft,this.mousey=b-=this.offsetTop;if(!this.height||!this.width||this.hidden)return;b-=this.height+this.tooltipOffsetY,a+=this.tooltipOffsetX,b<this.scrollTop&&(b=this.scrollTop),a<this.scrollLeft?a=this.scrollLeft:a+this.width>this.scrollRight&&(a=this.scrollRight-this.width),this.tooltip.css({left:a,top:b})},remove:function(){this.tooltip.remove(),this.sizetip.remove(),this.sizetip=this.tooltip=c,d(window).unbind("resize.jqs scroll.jqs")}}),F=function(){q(E)},d(F),K=[],d.fn.sparkline=function(b,e){return this.each(function(){var f=new d.fn.sparkline.options(this,e),g=d(this),h,i;h=function(){var e,h,i,j,k,l,m;if(b==="html"||b===c){m=this.getAttribute(f.get("tagValuesAttribute"));if(m===c||m===null)m=g.html();e=m.replace(/(^\s*<!--)|(-->\s*$)|\s+/g,"").split(",")}else e=b;h=f.get("width")==="auto"?e.length*f.get("defaultPixelsPerValue"):f.get("width");if(f.get("height")==="auto"){if(!f.get("composite")||!d.data(this,"_jqs_vcanvas"))j=a.createElement("span"),j.innerHTML="a",g.html(j),i=d(j).innerHeight()||d(j).height(),d(j).remove(),j=null}else i=f.get("height");f.get("disableInteraction")?k=!1:(k=d.data(this,"_jqs_mhandler"),k?f.get("composite")||k.reset():(k=new u(this,f),d.data(this,"_jqs_mhandler",k)));if(f.get("composite")&&!d.data(this,"_jqs_vcanvas")){d.data(this,"_jqs_errnotify")||(alert("Attempted to attach a composite sparkline to an element with no existing sparkline"),d.data(this,"_jqs_errnotify",!0));return}l=new(d.fn.sparkline[f.get("type")])(this,e,f,h,i),l.render(),k&&k.registerSparkline(l)};if(d(this).html()&&!f.get("disableHiddenCheck")&&d(this).is(":hidden")||!d(this).parents("body").length){if(!f.get("composite")&&d.data(this,"_jqs_pending"))for(i=K.length;i;i--)K[i-1][0]==this&&K.splice(i-1,1);K.push([this,h]),d.data(this,"_jqs_pending",!0)}else h.call(this)})},d.fn.sparkline.defaults=f(),d.sparkline_display_visible=function(){var a,b,c,e=[];for(b=0,c=K.length;b<c;b++)a=K[b][0],d(a).is(":visible")&&!d(a).parents().is(":hidden")?(K[b][1].call(a),d.data(K[b][0],"_jqs_pending",!1),e.push(b)):!d(a).closest("html").length&&!d.data(a,"_jqs_pending")&&(d.data(K[b][0],"_jqs_pending",!1),e.push(b));for(b=e.length;b;b--)K.splice(e[b-1],1)},d.fn.sparkline.options=g({init:function(a,b){var c,f,g,h;this.userOptions=b=b||{},this.tag=a,this.tagValCache={},f=d.fn.sparkline.defaults,g=f.common,this.tagOptionsPrefix=b.enableTagOptions&&(b.tagOptionsPrefix||g.tagOptionsPrefix),h=this.getTagSetting("type"),h===e?c=f[b.type||g.type]:c=f[h],this.mergedOptions=d.extend({},g,c,b)},getTagSetting:function(a){var b=this.tagOptionsPrefix,d,f,g,h;if(b===!1||b===c)return e;if(this.tagValCache.hasOwnProperty(a))d=this.tagValCache.key;else{d=this.tag.getAttribute(b+a);if(d===c||d===null)d=e;else if(d.substr(0,1)==="["){d=d.substr(1,d.length-2).split(",");for(f=d.length;f--;)d[f]=k(d[f].replace(/(^\s*)|(\s*$)/g,""))}else if(d.substr(0,1)==="{"){g=d.substr(1,d.length-2).split(","),d={};for(f=g.length;f--;)h=g[f].split(":",2),d[h[0].replace(/(^\s*)|(\s*$)/g,"")]=k(h[1].replace(/(^\s*)|(\s*$)/g,""))}else d=k(d);this.tagValCache.key=d}return d},get:function(a,b){var d=this.getTagSetting(a),f;return d!==e?d:(f=this.mergedOptions[a])===c?b:f}}),d.fn.sparkline._base=g({disabled:!1,init:function(a,b,e,f,g){this.el=a,this.$el=d(a),this.values=b,this.options=e,this.width=f,this.height=g,this.currentRegion=c},initTarget:function(){var a=!this.options.get("disableInteraction");(this.target=this.$el.simpledraw(this.width,this.height,this.options.get("composite"),a))?(this.canvasWidth=this.target.pixelWidth,this.canvasHeight=this.target.pixelHeight):this.disabled=!0},render:function(){return this.disabled?(this.el.innerHTML="",!1):!0},getRegion:function(a,b){},setRegionHighlight:function(a,b,d){var e=this.currentRegion,f=!this.options.get("disableHighlight"),g;return b>this.canvasWidth||d>this.canvasHeight||b<0||d<0?null:(g=this.getRegion(a,b,d),e!==g?(e!==c&&f&&this.removeHighlight(),this.currentRegion=g,g!==c&&f&&this.renderHighlight(),!0):!1)},clearRegionHighlight:function(){return this.currentRegion!==c?(this.removeHighlight(),this.currentRegion=c,!0):!1},renderHighlight:function(){this.changeHighlight(!0)},removeHighlight:function(){this.changeHighlight(!1)},changeHighlight:function(a){},getCurrentRegionTooltip:function(){var a=this.options,b="",e=[],f,g,i,j,k,l,m,n,o,p,q,r,s,t;if(this.currentRegion===c)return"";f=this.getCurrentRegionFields(),q=a.get("tooltipFormatter");if(q)return q(this,a,f);a.get("tooltipChartTitle")&&(b+='<div class="jqs jqstitle">'+a.get("tooltipChartTitle")+"</div>\n"),g=this.options.get("tooltipFormat");if(!g)return"";d.isArray(g)||(g=[g]),d.isArray(f)||(f=[f]),m=this.options.get("tooltipFormatFieldlist"),n=this.options.get("tooltipFormatFieldlistKey");if(m&&n){o=[];for(l=f.length;l--;)p=f[l][n],(t=d.inArray(p,m))!=-1&&(o[t]=f[l]);f=o}i=g.length,s=f.length;for(l=0;l<i;l++){r=g[l],typeof r=="string"&&(r=new h(r)),j=r.fclass||"jqsfield";for(t=0;t<s;t++)if(!f[t].isNull||!a.get("tooltipSkipNull"))d.extend(f[t],{prefix:a.get("tooltipPrefix"),suffix:a.get("tooltipSuffix")}),k=r.render(f[t],a.get("tooltipValueLookups"),a),e.push('<div class="'+j+'">'+k+"</div>")}return e.length?b+e.join("\n"):""},getCurrentRegionFields:function(){},calcHighlightColor:function(a,c){var d=c.get("highlightColor"),e=c.get("highlightLighten"),f,g,h,j;if(d)return d;if(e){f=/^#([0-9a-f])([0-9a-f])([0-9a-f])$/i.exec(a)||/^#([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$/i.exec(a);if(f){h=[],g=a.length===4?16:1;for(j=0;j<3;j++)h[j]=i(b.round(parseInt(f[j+1],16)*g*e),0,255);return"rgb("+h.join(",")+")"}}return a}}),w={changeHighlight:function(a){var b=this.currentRegion,c=this.target,e=this.regionShapes[b],f;e&&(f=this.renderRegion(b,a),d.isArray(f)||d.isArray(e)?(c.replaceWithShapes(e,f),this.regionShapes[b]=d.map(f,function(a){return a.id})):(c.replaceWithShape(e,f),this.regionShapes[b]=f.id))},render:function(){var a=this.values,b=this.target,c=this.regionShapes,e,f,g,h;if(!this.cls._super.render.call(this))return;for(g=a.length;g--;){e=this.renderRegion(g);if(e)if(d.isArray(e)){f=[];for(h=e.length;h--;)e[h].append(),f.push(e[h].id);c[g]=f}else e.append(),c[g]=e.id;else c[g]=null}b.render()}},d.fn.sparkline.line=x=g(d.fn.sparkline._base,{type:"line",init:function(a,b,c,d,e){x._super.init.call(this,a,b,c,d,e),this.vertices=[],this.regionMap=[],this.xvalues=[],this.yvalues=[],this.yminmax=[],this.hightlightSpotId=null,this.lastShapeId=null,this.initTarget()},getRegion:function(a,b,d){var e,f=this.regionMap;for(e=f.length;e--;)if(f[e]!==null&&b>=f[e][0]&&b<=f[e][1])return f[e][2];return c},getCurrentRegionFields:function(){var a=this.currentRegion;return{isNull:this.yvalues[a]===null,x:this.xvalues[a],y:this.yvalues[a],color:this.options.get("lineColor"),fillColor:this.options.get("fillColor"),offset:a}},renderHighlight:function(){var a=this.currentRegion,b=this.target,d=this.vertices[a],e=this.options,f=e.get("spotRadius"),g=e.get("highlightSpotColor"),h=e.get("highlightLineColor"),i,j;if(!d)return;f&&g&&(i=b.drawCircle(d[0],d[1],f,c,g),this.highlightSpotId=i.id,b.insertAfterShape(this.lastShapeId,i)),h&&(j=b.drawLine(d[0],this.canvasTop,d[0],this.canvasTop+this.canvasHeight,h),this.highlightLineId=j.id,b.insertAfterShape(this.lastShapeId,j))},removeHighlight:function(){var a=this.target;this.highlightSpotId&&(a.removeShapeId(this.highlightSpotId),this.highlightSpotId=null),this.highlightLineId&&(a.removeShapeId(this.highlightLineId),this.highlightLineId=null)},scanValues:function(){var a=this.values,c=a.length,d=this.xvalues,e=this.yvalues,f=this.yminmax,g,h,i,j,k;for(g=0;g<c;g++)h=a[g],i=typeof a[g]=="string",j=typeof a[g]=="object"&&a[g]instanceof Array,k=i&&a[g].split(":"),i&&k.length===2?(d.push(Number(k[0])),e.push(Number(k[1])),f.push(Number(k[1]))):j?(d.push(h[0]),e.push(h[1]),f.push(h[1])):(d.push(g),a[g]===null||a[g]==="null"?e.push(null):(e.push(Number(h)),f.push(Number(h))));this.options.get("xvalues")&&(d=this.options.get("xvalues")),this.maxy=this.maxyorg=b.max.apply(b,f),this.miny=this.minyorg=b.min.apply(b,f),this.maxx=b.max.apply(b,d),this.minx=b.min.apply(b,d),this.xvalues=d,this.yvalues=e,this.yminmax=f},processRangeOptions:function(){var a=this.options,b=a.get("normalRangeMin"),d=a.get("normalRangeMax");b!==c&&(b<this.miny&&(this.miny=b),d>this.maxy&&(this.maxy=d)),a.get("chartRangeMin")!==c&&(a.get("chartRangeClip")||a.get("chartRangeMin")<this.miny)&&(this.miny=a.get("chartRangeMin")),a.get("chartRangeMax")!==c&&(a.get("chartRangeClip")||a.get("chartRangeMax")>this.maxy)&&(this.maxy=a.get("chartRangeMax")),a.get("chartRangeMinX")!==c&&(a.get("chartRangeClipX")||a.get("chartRangeMinX")<this.minx)&&(this.minx=a.get("chartRangeMinX")),a.get("chartRangeMaxX")!==c&&(a.get("chartRangeClipX")||a.get("chartRangeMaxX")>this.maxx)&&(this.maxx=a.get("chartRangeMaxX"))},drawNormalRange:function(a,d,e,f,g){var h=this.options.get("normalRangeMin"),i=this.options.get("normalRangeMax"),j=d+b.round(e-e*((i-this.miny)/g)),k=b.round(e*(i-h)/g);this.target.drawRect(a,j,f,k,c,this.options.get("normalRangeColor")).append()},render:function(){var a=this.options,e=this.target,f=this.canvasWidth,g=this.canvasHeight,h=this.vertices,i=a.get("spotRadius"),j=this.regionMap,k,l,m,n,o,p,q,r,s,u,v,w,y,z,A,B,C,D,E,F,G,H,I,J,K;if(!x._super.render.call(this))return;this.scanValues(),this.processRangeOptions(),I=this.xvalues,J=this.yvalues;if(!this.yminmax.length||this.yvalues.length<2)return;n=o=0,k=this.maxx-this.minx===0?1:this.maxx-this.minx,l=this.maxy-this.miny===0?1:this.maxy-this.miny,m=this.yvalues.length-1,i&&(f<i*4||g<i*4)&&(i=0);if(i){G=a.get("highlightSpotColor")&&!a.get("disableInteraction");if(G||a.get("minSpotColor")||a.get("spotColor")&&J[m]===this.miny)g-=b.ceil(i);if(G||a.get("maxSpotColor")||a.get("spotColor")&&J[m]===this.maxy)g-=b.ceil(i),n+=b.ceil(i);if(G||(a.get("minSpotColor")||a.get("maxSpotColor"))&&(J[0]===this.miny||J[0]===this.maxy))o+=b.ceil(i),f-=b.ceil(i);if(G||a.get("spotColor")||a.get("minSpotColor")||a.get("maxSpotColor")&&(J[m]===this.miny||J[m]===this.maxy))f-=b.ceil(i)}g--,a.get("normalRangeMin")!==c&&!a.get("drawNormalOnTop")&&this.drawNormalRange(o,n,g,f,l),q=[],r=[q],z=A=null,B=J.length;for(K=0;K<B;K++)s=I[K],v=I[K+1],u=J[K],w=o+b.round((s-this.minx)*(f/k)),y=K<B-1?o+b.round((v-this.minx)*(f/k)):f,A=w+(y-w)/2,j[K]=[z||0,A,K],z=A,u===null?K&&(J[K-1]!==null&&(q=[],r.push(q)),h.push(null)):(u<this.miny&&(u=this.miny),u>this.maxy&&(u=this.maxy),q.length||q.push([w,n+g]),p=[w,n+b.round(g-g*((u-this.miny)/l))],q.push(p),h.push(p));C=[],D=[],E=r.length;for(K=0;K<E;K++)q=r[K],q.length&&(a.get("fillColor")&&(q.push([q[q.length-1][0],n+g]),D.push(q.slice(0)),q.pop()),q.length>2&&(q[0]=[q[0][0],q[1][1]]),C.push(q));E=D.length;for(K=0;K<E;K++)e.drawShape(D[K],a.get("fillColor"),a.get("fillColor")).append();a.get("normalRangeMin")!==c&&a.get("drawNormalOnTop")&&this.drawNormalRange(o,n,g,f,l),E=C.length;for(K=0;K<E;K++)e.drawShape(C[K],a.get("lineColor"),c,a.get("lineWidth")).append();if(i&&a.get("valueSpots")){F=a.get("valueSpots"),F.get===c&&(F=new t(F));for(K=0;K<B;K++)H=F.get(J[K]),H&&e.drawCircle(o+b.round((I[K]-this.minx)*(f/k)),n+b.round(g-g*((J[K]-this.miny)/l)),i,c,H).append()}i&&a.get("spotColor")&&J[m]!==null&&e.drawCircle(o+b.round((I[I.length-1]-this.minx)*(f/k)),n+b.round(g-g*((J[m]-this.miny)/l)),i,c,a.get("spotColor")).append(),this.maxy!==this.minyorg&&(i&&a.get("minSpotColor")&&(s=I[d.inArray(this.minyorg,J)],e.drawCircle(o+b.round((s-this.minx)*(f/k)),n+b.round(g-g*((this.minyorg-this.miny)/l)),i,c,a.get("minSpotColor")).append()),i&&a.get("maxSpotColor")&&(s=I[d.inArray(this.maxyorg,J)],e.drawCircle(o+b.round((s-this.minx)*(f/k)),n+b.round(g-g*((this.maxyorg-this.miny)/l)),i,c,a.get("maxSpotColor")).append())),this.lastShapeId=e.getLastShapeId(),this.canvasTop=n,e.render()}}),d.fn.sparkline.bar=y=g(d.fn.sparkline._base,w,{type:"bar",init:function(a,e,f,g,h){var j=parseInt(f.get("barWidth"),10),n=parseInt(f.get("barSpacing"),10),o=f.get("chartRangeMin"),p=f.get("chartRangeMax"),q=f.get("chartRangeClip"),r=Infinity,s=-Infinity,u,v,w,x,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R;y._super.init.call(this,a,e,f,g,h);for(A=0,B=e.length;A<B;A++){O=e[A],u=typeof O=="string"&&O.indexOf(":")>-1;if(u||d.isArray(O))J=!0,u&&(O=e[A]=l(O.split(":"))),O=m(O,null),v=b.min.apply(b,O),w=b.max.apply(b,O),v<r&&(r=v),w>s&&(s=w)}this.stacked=J,this.regionShapes={},this.barWidth=j,this.barSpacing=n,this.totalBarWidth=j+n,this.width=g=e.length*j+(e.length-1)*n,this.initTarget(),q&&(H=o===c?-Infinity:o,I=p===c?Infinity:p),z=[],x=J?[]:z;var S=[],T=[];for(A=0,B=e.length;A<B;A++)if(J){K=e[A],e[A]=N=[],S[A]=0,x[A]=T[A]=0;for(L=0,M=K.length;L<M;L++)O=N[L]=q?i(K[L],H,I):K[L],O!==null&&(O>0&&(S[A]+=O),r<0&&s>0?O<0?T[A]+=b.abs(O):x[A]+=O:x[A]+=b.abs(O-(O<0?s:r)),z.push(O))}else O=q?i(e[A],H,I):e[A],O=e[A]=k(O),O!==null&&z.push(O);this.max=G=b.max.apply(b,z),this.min=F=b.min.apply(b,z),this.stackMax=s=J?b.max.apply(b,S):G,this.stackMin=r=J?b.min.apply(b,z):F,f.get("chartRangeMin")!==c&&(f.get("chartRangeClip")||f.get("chartRangeMin")<F)&&(F=f.get("chartRangeMin")),f.get("chartRangeMax")!==c&&(f.get("chartRangeClip")||f.get("chartRangeMax")>G)&&(G=f.get("chartRangeMax")),this.zeroAxis=D=f.get("zeroAxis",!0),F<=0&&G>=0&&D?E=0:D==0?E=F:F>0?E=F:E=G,this.xaxisOffset=E,C=J?b.max.apply(b,x)+b.max.apply(b,T):G-F,this.canvasHeightEf=D&&F<0?this.canvasHeight-2:this.canvasHeight-1,F<E?(Q=J&&G>=0?s:G,P=(Q-E)/C*this.canvasHeight,P!==b.ceil(P)&&(this.canvasHeightEf-=2,P=b.ceil(P))):P=this.canvasHeight,this.yoffset=P,d.isArray(f.get("colorMap"))?(this.colorMapByIndex=f.get("colorMap"),this.colorMapByValue=null):(this.colorMapByIndex=null,this.colorMapByValue=f.get("colorMap"),this.colorMapByValue&&this.colorMapByValue.get===c&&(this.colorMapByValue=new t(this.colorMapByValue))),this.range=C},getRegion:function(a,d,e){var f=b.floor(d/this.totalBarWidth);return f<0||f>=this.values.length?c:f},getCurrentRegionFields:function(){var a=this.currentRegion,b=r(this.values[a]),c=[],d,e;for(e=b.length;e--;)d=b[e],c.push({isNull:d===null,value:d,color:this.calcColor(e,d,a),offset:a});return c},calcColor:function(a,b,e){var f=this.colorMapByIndex,g=this.colorMapByValue,h=this.options,i,j;return this.stacked?i=h.get("stackedBarColor"):i=b<0?h.get("negBarColor"):h.get("barColor"),b===0&&h.get("zeroColor")!==c&&(i=h.get("zeroColor")),g&&(j=g.get(b))?i=j:f&&f.length>e&&(i=f[e]),d.isArray(i)?i[a%i.length]:i},renderRegion:function(a,e){var f=this.values[a],g=this.options,h=this.xaxisOffset,i=[],j=this.range,k=this.stacked,l=this.target,m=a*this.totalBarWidth,n=this.canvasHeightEf,p=this.yoffset,q,r,s,t,u,v,w,x,y,z;f=d.isArray(f)?f:[f],w=f.length,x=f[0],t=o(null,f),z=o(h,f,!0);if(t)return g.get("nullColor")?(s=e?g.get("nullColor"):this.calcHighlightColor(g.get("nullColor"),g),q=p>0?p-1:p,l.drawRect(m,q,this.barWidth-1,0,s,s)):c;u=p;for(v=0;v<w;v++){x=f[v];if(k&&x===h){if(!z||y)continue;y=!0}j>0?r=b.floor(n*(b.abs(x-h)/j))+1:r=1,x<h||x===h&&p===0?(q=u,u+=r):(q=p-r,p-=r),s=this.calcColor(v,x,a),e&&(s=this.calcHighlightColor(s,g)),i.push(l.drawRect(m,q,this.barWidth-1,r-1,s,s))}return i.length===1?i[0]:i}}),d.fn.sparkline.tristate=z=g(d.fn.sparkline._base,w,{type:"tristate",init:function(a,b,e,f,g){var h=parseInt(e.get("barWidth"),10),i=parseInt(e.get("barSpacing"),10);z._super.init.call(this,a,b,e,f,g),this.regionShapes={},this.barWidth=h,this.barSpacing=i,this.totalBarWidth=h+i,this.values=d.map(b,Number),this.width=f=b.length*h+(b.length-1)*i,d.isArray(e.get("colorMap"))?(this.colorMapByIndex=e.get("colorMap"),this.colorMapByValue=null):(this.colorMapByIndex=null,this.colorMapByValue=e.get("colorMap"),this.colorMapByValue&&this.colorMapByValue.get===c&&(this.colorMapByValue=new t(this.colorMapByValue))),this.initTarget()},getRegion:function(a,c,d){return b.floor(c/this.totalBarWidth)},getCurrentRegionFields:function(){var a=this.currentRegion;return{isNull:this.values[a]===c,value:this.values[a],color:this.calcColor(this.values[a],a),offset:a}},calcColor:function(a,b){var c=this.values,d=this.options,e=this.colorMapByIndex,f=this.colorMapByValue,g,h;return f&&(h=f.get(a))?g=h:e&&e.length>b?g=e[b]:c[b]<0?g=d.get("negBarColor"):c[b]>0?g=d.get("posBarColor"):g=d.get("zeroBarColor"),g},renderRegion:function(a,c){var d=this.values,e=this.options,f=this.target,g,h,i,j,k,l;g=f.pixelHeight,i=b.round(g/2),j=a*this.totalBarWidth,d[a]<0?(k=i,h=i-1):d[a]>0?(k=0,h=i-1):(k=i-1,h=2),l=this.calcColor(d[a],a);if(l===null)return;return c&&(l=this.calcHighlightColor(l,e)),f.drawRect(j,k,this.barWidth-1,h-1,l,l)}}),d.fn.sparkline.discrete=A=g(d.fn.sparkline._base,w,{type:"discrete",init:function(a,e,f,g,h){A._super.init.call(this,a,e,f,g,h),this.regionShapes={},this.values=e=d.map(e,Number),this.min=b.min.apply(b,e),this.max=b.max.apply(b,e),this.range=this.max-this.min,this.width=g=f.get("width")==="auto"?e.length*2:this.width,this.interval=b.floor(g/e.length),this.itemWidth=g/e.length,f.get("chartRangeMin")!==c&&(f.get("chartRangeClip")||f.get("chartRangeMin")<this.min)&&(this.min=f.get("chartRangeMin")),f.get("chartRangeMax")!==c&&(f.get("chartRangeClip")||f.get("chartRangeMax")>this.max)&&(this.max=f.get("chartRangeMax")),this.initTarget(),this.target&&(this.lineHeight=f.get("lineHeight")==="auto"?b.round(this.canvasHeight*.3):f.get("lineHeight"))},getRegion:function(a,c,d){return b.floor(c/this.itemWidth)},getCurrentRegionFields:function(){var a=this.currentRegion;return{isNull:this.values[a]===c,value:this.values[a],offset:a}},renderRegion:function(a,c){var d=this.values,e=this.options,f=this.min,g=this.max,h=this.range,j=this.interval,k=this.target,l=this.canvasHeight,m=this.lineHeight,n=l-m,o,p,q,r;return p=i(d[a],f,g),r=a*j,o=b.round(n-n*((p-f)/h)),q=e.get("thresholdColor")&&p<e.get("thresholdValue")?e.get("thresholdColor"):e.get("lineColor"),c&&(q=this.calcHighlightColor(q,e)),k.drawLine(r,o,r,o+m,q)}}),d.fn.sparkline.bullet=B=g(d.fn.sparkline._base,{type:"bullet",init:function(a,d,e,f,g){var h,i,j;B._super.init.call(this,a,d,e,f,g),this.values=d=l(d),j=d.slice(),j[0]=j[0]===null?j[2]:j[0],j[1]=d[1]===null?j[2]:j[1],h=b.min.apply(b,d),i=b.max.apply(b,d),e.get("base")===c?h=h<0?h:0:h=e.get("base"),this.min=h,this.max=i,this.range=i-h,this.shapes={},this.valueShapes={},this.regiondata={},this.width=f=e.get("width")==="auto"?"4.0em":f,this.target=this.$el.simpledraw(f,g,e.get("composite")),d.length||(this.disabled=!0),this.initTarget()},getRegion:function(a,b,d){var e=this.target.getShapeAt(a,b,d);return e!==c&&this.shapes[e]!==c?this.shapes[e]:c},getCurrentRegionFields:function(){var a=this.currentRegion;return{fieldkey:a.substr(0,1),value:this.values[a.substr(1)],region:a}},changeHighlight:function(a){var b=this.currentRegion,c=this.valueShapes[b],d;delete this.shapes[c];switch(b.substr(0,1)){case"r":d=this.renderRange(b.substr(1),a);break;case"p":d=this.renderPerformance(a);break;case"t":d=this.renderTarget(a)}this.valueShapes[b]=d.id,this.shapes[d.id]=b,this.target.replaceWithShape(c,d)},renderRange:function(a,c){var d=this.values[a],e=b.round(this.canvasWidth*((d-this.min)/this.range)),f=this.options.get("rangeColors")[a-2];return c&&(f=this.calcHighlightColor(f,this.options)),this.target.drawRect(0,0,e-1,this.canvasHeight-1,f,f)},renderPerformance:function(a){var c=this.values[1],d=b.round(this.canvasWidth*((c-this.min)/this.range)),e=this.options.get("performanceColor");return a&&(e=this.calcHighlightColor(e,this.options)),this.target.drawRect(0,b.round(this.canvasHeight*.3),d-1,b.round(this.canvasHeight*.4)-1,e,e)},renderTarget:function(a){var c=this.values[0],d=b.round(this.canvasWidth*((c-this.min)/this.range)-this.options.get("targetWidth")/2),e=b.round(this.canvasHeight*.1),f=this.canvasHeight-e*2,g=this.options.get("targetColor");return a&&(g=this.calcHighlightColor(g,this.options)),this.target.drawRect(d,e,this.options.get("targetWidth")-1,f-1,g,g)},render:function(){var a=this.values.length,b=this.target,c,d;if(!B._super.render.call(this))return;for(c=2;c<a;c++)d=this.renderRange(c).append(),this.shapes[d.id]="r"+c,this.valueShapes["r"+c]=d.id;this.values[1]!==null&&(d=this.renderPerformance().append(),this.shapes[d.id]="p1",this.valueShapes.p1=d.id),this.values[0]!==null&&(d=this.renderTarget().append(),this.shapes[d.id]="t0",this.valueShapes.t0=d.id),b.render()}}),d.fn.sparkline.pie=C=g(d.fn.sparkline._base,{type:"pie",init:function(a,c,e,f,g){var h=0,i;C._super.init.call(this,a,c,e,f,g),this.shapes={},this.valueShapes={},this.values=c=d.map(c,Number),e.get("width")==="auto"&&(this.width=this.height);if(c.length>0)for(i=c.length;i--;)h+=c[i];this.total=h,this.initTarget(),this.radius=b.floor(b.min(this.canvasWidth,this.canvasHeight)/2)},getRegion:function(a,b,d){var e=this.target.getShapeAt(a,b,d);return e!==c&&this.shapes[e]!==c?this.shapes[e]:c},getCurrentRegionFields:function(){var a=this.currentRegion;return{isNull:this.values[a]===c,value:this.values[a],percent:this.values[a]/this.total*100,color:this.options.get("sliceColors")[a%this.options.get("sliceColors").length],offset:a}},changeHighlight:function(a){var b=this.currentRegion,c=this.renderSlice(b,a),d=this.valueShapes[b];delete this.shapes[d],this.target.replaceWithShape(d,c),this.valueShapes[b]=c.id,this.shapes[c.id]=b},renderSlice:function(a,d){var e=this.target,f=this.options,g=this.radius,h=f.get("borderWidth"),i=f.get("offset"),j=2*b.PI,k=this.values,l=this.total,m=i?2*b.PI*(i/360):0,n,o,p,q,r;q=k.length;for(p=0;p<q;p++){n=m,o=m,l>0&&(o=m+j*(k[p]/l));if(a===p)return r=f.get("sliceColors")[p%f.get("sliceColors").length],d&&(r=this.calcHighlightColor(r,f)),e.drawPieSlice(g,g,g-h,n,o,c,r);m=o}},render:function(){var a=this.target,d=this.values,e=this.options,f=this.radius,g=e.get("borderWidth"),h,i;if(!C._super.render.call(this))return;g&&a.drawCircle(f,f,b.floor(f-g/2),e.get("borderColor"),c,g).append();for(i=d.length;i--;)d[i]&&(h=this.renderSlice(i).append(),this.valueShapes[i]=h.id,this.shapes[h.id]=i);a.render()}}),d.fn.sparkline.box=D=g(d.fn.sparkline._base,{type:"box",init:function(a,b,c,e,f){D._super.init.call(this,a,b,c,e,f),this.values=d.map(b,Number),this.width=c.get("width")==="auto"?"4.0em":e,this.initTarget(),this.values.length||(this.disabled=1)},getRegion:function(){return 1},getCurrentRegionFields:function(){var a=[{field:"lq",value:this.quartiles[0]},{field:"med",value:this.quartiles
+[1]},{field:"uq",value:this.quartiles[2]}];return this.loutlier!==c&&a.push({field:"lo",value:this.loutlier}),this.routlier!==c&&a.push({field:"ro",value:this.routlier}),this.lwhisker!==c&&a.push({field:"lw",value:this.lwhisker}),this.rwhisker!==c&&a.push({field:"rw",value:this.rwhisker}),a},render:function(){var a=this.target,d=this.values,e=d.length,f=this.options,g=this.canvasWidth,h=this.canvasHeight,i=f.get("chartRangeMin")===c?b.min.apply(b,d):f.get("chartRangeMin"),k=f.get("chartRangeMax")===c?b.max.apply(b,d):f.get("chartRangeMax"),l=0,m,n,o,p,q,r,s,t,u,v,w;if(!D._super.render.call(this))return;if(f.get("raw"))f.get("showOutliers")&&d.length>5?(n=d[0],m=d[1],p=d[2],q=d[3],r=d[4],s=d[5],t=d[6]):(m=d[0],p=d[1],q=d[2],r=d[3],s=d[4]);else{d.sort(function(a,b){return a-b}),p=j(d,1),q=j(d,2),r=j(d,3),o=r-p;if(f.get("showOutliers")){m=s=c;for(u=0;u<e;u++)m===c&&d[u]>p-o*f.get("outlierIQR")&&(m=d[u]),d[u]<r+o*f.get("outlierIQR")&&(s=d[u]);n=d[0],t=d[e-1]}else m=d[0],s=d[e-1]}this.quartiles=[p,q,r],this.lwhisker=m,this.rwhisker=s,this.loutlier=n,this.routlier=t,w=g/(k-i+1),f.get("showOutliers")&&(l=b.ceil(f.get("spotRadius")),g-=2*b.ceil(f.get("spotRadius")),w=g/(k-i+1),n<m&&a.drawCircle((n-i)*w+l,h/2,f.get("spotRadius"),f.get("outlierLineColor"),f.get("outlierFillColor")).append(),t>s&&a.drawCircle((t-i)*w+l,h/2,f.get("spotRadius"),f.get("outlierLineColor"),f.get("outlierFillColor")).append()),a.drawRect(b.round((p-i)*w+l),b.round(h*.1),b.round((r-p)*w),b.round(h*.8),f.get("boxLineColor"),f.get("boxFillColor")).append(),a.drawLine(b.round((m-i)*w+l),b.round(h/2),b.round((p-i)*w+l),b.round(h/2),f.get("lineColor")).append(),a.drawLine(b.round((m-i)*w+l),b.round(h/4),b.round((m-i)*w+l),b.round(h-h/4),f.get("whiskerColor")).append(),a.drawLine(b.round((s-i)*w+l),b.round(h/2),b.round((r-i)*w+l),b.round(h/2),f.get("lineColor")).append(),a.drawLine(b.round((s-i)*w+l),b.round(h/4),b.round((s-i)*w+l),b.round(h-h/4),f.get("whiskerColor")).append(),a.drawLine(b.round((q-i)*w+l),b.round(h*.1),b.round((q-i)*w+l),b.round(h*.9),f.get("medianColor")).append(),f.get("target")&&(v=b.ceil(f.get("spotRadius")),a.drawLine(b.round((f.get("target")-i)*w+l),b.round(h/2-v),b.round((f.get("target")-i)*w+l),b.round(h/2+v),f.get("targetColor")).append(),a.drawLine(b.round((f.get("target")-i)*w+l-v),b.round(h/2),b.round((f.get("target")-i)*w+l+v),b.round(h/2),f.get("targetColor")).append()),a.render()}}),G=g({init:function(a,b,c,d){this.target=a,this.id=b,this.type=c,this.args=d},append:function(){return this.target.appendShape(this),this}}),H=g({_pxregex:/(\d+)(px)?\s*$/i,init:function(a,b,c){if(!a)return;this.width=a,this.height=b,this.target=c,this.lastShapeId=null,c[0]&&(c=c[0]),d.data(c,"_jqs_vcanvas",this)},drawLine:function(a,b,c,d,e,f){return this.drawShape([[a,b],[c,d]],e,f)},drawShape:function(a,b,c,d){return this._genShape("Shape",[a,b,c,d])},drawCircle:function(a,b,c,d,e,f){return this._genShape("Circle",[a,b,c,d,e,f])},drawPieSlice:function(a,b,c,d,e,f,g){return this._genShape("PieSlice",[a,b,c,d,e,f,g])},drawRect:function(a,b,c,d,e,f){return this._genShape("Rect",[a,b,c,d,e,f])},getElement:function(){return this.canvas},getLastShapeId:function(){return this.lastShapeId},reset:function(){alert("reset not implemented")},_insert:function(a,b){d(b).html(a)},_calculatePixelDims:function(a,b,c){var e;e=this._pxregex.exec(b),e?this.pixelHeight=e[1]:this.pixelHeight=d(c).height(),e=this._pxregex.exec(a),e?this.pixelWidth=e[1]:this.pixelWidth=d(c).width()},_genShape:function(a,b){var c=L++;return b.unshift(c),new G(this,c,a,b)},appendShape:function(a){alert("appendShape not implemented")},replaceWithShape:function(a,b){alert("replaceWithShape not implemented")},insertAfterShape:function(a,b){alert("insertAfterShape not implemented")},removeShapeId:function(a){alert("removeShapeId not implemented")},getShapeAt:function(a,b,c){alert("getShapeAt not implemented")},render:function(){alert("render not implemented")}}),I=g(H,{init:function(b,e,f,g){I._super.init.call(this,b,e,f),this.canvas=a.createElement("canvas"),f[0]&&(f=f[0]),d.data(f,"_jqs_vcanvas",this),d(this.canvas).css({display:"inline-block",width:b,height:e,verticalAlign:"top"}),this._insert(this.canvas,f),this._calculatePixelDims(b,e,this.canvas),this.canvas.width=this.pixelWidth,this.canvas.height=this.pixelHeight,this.interact=g,this.shapes={},this.shapeseq=[],this.currentTargetShapeId=c,d(this.canvas).css({width:this.pixelWidth,height:this.pixelHeight})},_getContext:function(a,b,d){var e=this.canvas.getContext("2d");return a!==c&&(e.strokeStyle=a),e.lineWidth=d===c?1:d,b!==c&&(e.fillStyle=b),e},reset:function(){var a=this._getContext();a.clearRect(0,0,this.pixelWidth,this.pixelHeight),this.shapes={},this.shapeseq=[],this.currentTargetShapeId=c},_drawShape:function(a,b,d,e,f){var g=this._getContext(d,e,f),h,i;g.beginPath(),g.moveTo(b[0][0]+.5,b[0][1]+.5);for(h=1,i=b.length;h<i;h++)g.lineTo(b[h][0]+.5,b[h][1]+.5);d!==c&&g.stroke(),e!==c&&g.fill(),this.targetX!==c&&this.targetY!==c&&g.isPointInPath(this.targetX,this.targetY)&&(this.currentTargetShapeId=a)},_drawCircle:function(a,d,e,f,g,h,i){var j=this._getContext(g,h,i);j.beginPath(),j.arc(d,e,f,0,2*b.PI,!1),this.targetX!==c&&this.targetY!==c&&j.isPointInPath(this.targetX,this.targetY)&&(this.currentTargetShapeId=a),g!==c&&j.stroke(),h!==c&&j.fill()},_drawPieSlice:function(a,b,d,e,f,g,h,i){var j=this._getContext(h,i);j.beginPath(),j.moveTo(b,d),j.arc(b,d,e,f,g,!1),j.lineTo(b,d),j.closePath(),h!==c&&j.stroke(),i&&j.fill(),this.targetX!==c&&this.targetY!==c&&j.isPointInPath(this.targetX,this.targetY)&&(this.currentTargetShapeId=a)},_drawRect:function(a,b,c,d,e,f,g){return this._drawShape(a,[[b,c],[b+d,c],[b+d,c+e],[b,c+e],[b,c]],f,g)},appendShape:function(a){return this.shapes[a.id]=a,this.shapeseq.push(a.id),this.lastShapeId=a.id,a.id},replaceWithShape:function(a,b){var c=this.shapeseq,d;this.shapes[b.id]=b;for(d=c.length;d--;)c[d]==a&&(c[d]=b.id);delete this.shapes[a]},replaceWithShapes:function(a,b){var c=this.shapeseq,d={},e,f,g;for(f=a.length;f--;)d[a[f]]=!0;for(f=c.length;f--;)e=c[f],d[e]&&(c.splice(f,1),delete this.shapes[e],g=f);for(f=b.length;f--;)c.splice(g,0,b[f].id),this.shapes[b[f].id]=b[f]},insertAfterShape:function(a,b){var c=this.shapeseq,d;for(d=c.length;d--;)if(c[d]===a){c.splice(d+1,0,b.id),this.shapes[b.id]=b;return}},removeShapeId:function(a){var b=this.shapeseq,c;for(c=b.length;c--;)if(b[c]===a){b.splice(c,1);break}delete this.shapes[a]},getShapeAt:function(a,b,c){return this.targetX=b,this.targetY=c,this.render(),this.currentTargetShapeId},render:function(){var a=this.shapeseq,b=this.shapes,c=a.length,d=this._getContext(),e,f,g;d.clearRect(0,0,this.pixelWidth,this.pixelHeight);for(g=0;g<c;g++)e=a[g],f=b[e],this["_draw"+f.type].apply(this,f.args);this.interact||(this.shapes={},this.shapeseq=[])}}),J=g(H,{init:function(b,c,e){var f;J._super.init.call(this,b,c,e),e[0]&&(e=e[0]),d.data(e,"_jqs_vcanvas",this),this.canvas=a.createElement("span"),d(this.canvas).css({display:"inline-block",position:"relative",overflow:"hidden",width:b,height:c,margin:"0px",padding:"0px",verticalAlign:"top"}),this._insert(this.canvas,e),this._calculatePixelDims(b,c,this.canvas),this.canvas.width=this.pixelWidth,this.canvas.height=this.pixelHeight,f='<v:group coordorigin="0 0" coordsize="'+this.pixelWidth+" "+this.pixelHeight+'"'+' style="position:absolute;top:0;left:0;width:'+this.pixelWidth+"px;height="+this.pixelHeight+'px;"></v:group>',this.canvas.insertAdjacentHTML("beforeEnd",f),this.group=d(this.canvas).children()[0],this.rendered=!1,this.prerender=""},_drawShape:function(a,b,d,e,f){var g=[],h,i,j,k,l,m,n;for(n=0,m=b.length;n<m;n++)g[n]=""+b[n][0]+","+b[n][1];return h=g.splice(0,1),f=f===c?1:f,i=d===c?' stroked="false" ':' strokeWeight="'+f+'px" strokeColor="'+d+'" ',j=e===c?' filled="false"':' fillColor="'+e+'" filled="true" ',k=g[0]===g[g.length-1]?"x ":"",l='<v:shape coordorigin="0 0" coordsize="'+this.pixelWidth+" "+this.pixelHeight+'" '+' id="jqsshape'+a+'" '+i+j+' style="position:absolute;left:0px;top:0px;height:'+this.pixelHeight+"px;width:"+this.pixelWidth+'px;padding:0px;margin:0px;" '+' path="m '+h+" l "+g.join(", ")+" "+k+'e">'+" </v:shape>",l},_drawCircle:function(a,b,d,e,f,g,h){var i,j,k;return b-=e,d-=e,i=f===c?' stroked="false" ':' strokeWeight="'+h+'px" strokeColor="'+f+'" ',j=g===c?' filled="false"':' fillColor="'+g+'" filled="true" ',k='<v:oval id="jqsshape'+a+'" '+i+j+' style="position:absolute;top:'+d+"px; left:"+b+"px; width:"+e*2+"px; height:"+e*2+'px"></v:oval>',k},_drawPieSlice:function(a,d,e,f,g,h,i,j){var k,l,m,n,o,p,q,r;if(g===h)return"";h-g===2*b.PI&&(g=0,h=2*b.PI),l=d+b.round(b.cos(g)*f),m=e+b.round(b.sin(g)*f),n=d+b.round(b.cos(h)*f),o=e+b.round(b.sin(h)*f);if(l===n&&m===o){if(h-g<b.PI)return"";l=n=d+f,m=o=e}return l===n&&m===o&&h-g<b.PI?"":(k=[d-f,e-f,d+f,e+f,l,m,n,o],p=i===c?' stroked="false" ':' strokeWeight="1px" strokeColor="'+i+'" ',q=j===c?' filled="false"':' fillColor="'+j+'" filled="true" ',r='<v:shape coordorigin="0 0" coordsize="'+this.pixelWidth+" "+this.pixelHeight+'" '+' id="jqsshape'+a+'" '+p+q+' style="position:absolute;left:0px;top:0px;height:'+this.pixelHeight+"px;width:"+this.pixelWidth+'px;padding:0px;margin:0px;" '+' path="m '+d+","+e+" wa "+k.join(", ")+' x e">'+" </v:shape>",r)},_drawRect:function(a,b,c,d,e,f,g){return this._drawShape(a,[[b,c],[b,c+e],[b+d,c+e],[b+d,c],[b,c]],f,g)},reset:function(){this.group.innerHTML=""},appendShape:function(a){var b=this["_draw"+a.type].apply(this,a.args);return this.rendered?this.group.insertAdjacentHTML("beforeEnd",b):this.prerender+=b,this.lastShapeId=a.id,a.id},replaceWithShape:function(a,b){var c=d("#jqsshape"+a),e=this["_draw"+b.type].apply(this,b.args);c[0].outerHTML=e},replaceWithShapes:function(a,b){var c=d("#jqsshape"+a[0]),e="",f=b.length,g;for(g=0;g<f;g++)e+=this["_draw"+b[g].type].apply(this,b[g].args);c[0].outerHTML=e;for(g=1;g<a.length;g++)d("#jqsshape"+a[g]).remove()},insertAfterShape:function(a,b){var c=d("#jqsshape"+a),e=this["_draw"+b.type].apply(this,b.args);c[0].insertAdjacentHTML("afterEnd",e)},removeShapeId:function(a){var b=d("#jqsshape"+a);this.group.removeChild(b[0])},getShapeAt:function(a,b,c){var d=a.id.substr(8);return d},render:function(){this.rendered||(this.group.innerHTML=this.prerender,this.rendered=!0)}})})})(document,Math);
diff --git a/web/gui/lib/lz-string-1.4.4.min.js b/web/gui/lib/lz-string-1.4.4.min.js
new file mode 100644
index 0000000..c7de0d5
--- /dev/null
+++ b/web/gui/lib/lz-string-1.4.4.min.js
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: WTFPL
+var LZString=function(){function o(o,r){if(!t[o]){t[o]={};for(var n=0;n<o.length;n++)t[o][o.charAt(n)]=n}return t[o][r]}var r=String.fromCharCode,n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",e="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-$",t={},i={compressToBase64:function(o){if(null==o)return"";var r=i._compress(o,6,function(o){return n.charAt(o)});switch(r.length%4){default:case 0:return r;case 1:return r+"===";case 2:return r+"==";case 3:return r+"="}},decompressFromBase64:function(r){return null==r?"":""==r?null:i._decompress(r.length,32,function(e){return o(n,r.charAt(e))})},compressToUTF16:function(o){return null==o?"":i._compress(o,15,function(o){return r(o+32)})+" "},decompressFromUTF16:function(o){return null==o?"":""==o?null:i._decompress(o.length,16384,function(r){return o.charCodeAt(r)-32})},compressToUint8Array:function(o){for(var r=i.compress(o),n=new Uint8Array(2*r.length),e=0,t=r.length;t>e;e++){var s=r.charCodeAt(e);n[2*e]=s>>>8,n[2*e+1]=s%256}return n},decompressFromUint8Array:function(o){if(null===o||void 0===o)return i.decompress(o);for(var n=new Array(o.length/2),e=0,t=n.length;t>e;e++)n[e]=256*o[2*e]+o[2*e+1];var s=[];return n.forEach(function(o){s.push(r(o))}),i.decompress(s.join(""))},compressToEncodedURIComponent:function(o){return null==o?"":i._compress(o,6,function(o){return e.charAt(o)})},decompressFromEncodedURIComponent:function(r){return null==r?"":""==r?null:(r=r.replace(/ /g,"+"),i._decompress(r.length,32,function(n){return o(e,r.charAt(n))}))},compress:function(o){return i._compress(o,16,function(o){return r(o)})},_compress:function(o,r,n){if(null==o)return"";var e,t,i,s={},p={},u="",c="",a="",l=2,f=3,h=2,d=[],m=0,v=0;for(i=0;i<o.length;i+=1)if(u=o.charAt(i),Object.prototype.hasOwnProperty.call(s,u)||(s[u]=f++,p[u]=!0),c=a+u,Object.prototype.hasOwnProperty.call(s,c))a=c;else{if(Object.prototype.hasOwnProperty.call(p,a)){if(a.charCodeAt(0)<256){for(e=0;h>e;e++)m<<=1,v==r-1?(v=0,d.push(n(m)),m=0):v++;for(t=a.charCodeAt(0),e=0;8>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}else{for(t=1,e=0;h>e;e++)m=m<<1|t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t=0;for(t=a.charCodeAt(0),e=0;16>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}l--,0==l&&(l=Math.pow(2,h),h++),delete p[a]}else for(t=s[a],e=0;h>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;l--,0==l&&(l=Math.pow(2,h),h++),s[c]=f++,a=String(u)}if(""!==a){if(Object.prototype.hasOwnProperty.call(p,a)){if(a.charCodeAt(0)<256){for(e=0;h>e;e++)m<<=1,v==r-1?(v=0,d.push(n(m)),m=0):v++;for(t=a.charCodeAt(0),e=0;8>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}else{for(t=1,e=0;h>e;e++)m=m<<1|t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t=0;for(t=a.charCodeAt(0),e=0;16>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1}l--,0==l&&(l=Math.pow(2,h),h++),delete p[a]}else for(t=s[a],e=0;h>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;l--,0==l&&(l=Math.pow(2,h),h++)}for(t=2,e=0;h>e;e++)m=m<<1|1&t,v==r-1?(v=0,d.push(n(m)),m=0):v++,t>>=1;for(;;){if(m<<=1,v==r-1){d.push(n(m));break}v++}return d.join("")},decompress:function(o){return null==o?"":""==o?null:i._decompress(o.length,32768,function(r){return o.charCodeAt(r)})},_decompress:function(o,n,e){var t,i,s,p,u,c,a,l,f=[],h=4,d=4,m=3,v="",w=[],A={val:e(0),position:n,index:1};for(i=0;3>i;i+=1)f[i]=i;for(p=0,c=Math.pow(2,2),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;switch(t=p){case 0:for(p=0,c=Math.pow(2,8),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;l=r(p);break;case 1:for(p=0,c=Math.pow(2,16),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;l=r(p);break;case 2:return""}for(f[3]=l,s=l,w.push(l);;){if(A.index>o)return"";for(p=0,c=Math.pow(2,m),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;switch(l=p){case 0:for(p=0,c=Math.pow(2,8),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;f[d++]=r(p),l=d-1,h--;break;case 1:for(p=0,c=Math.pow(2,16),a=1;a!=c;)u=A.val&A.position,A.position>>=1,0==A.position&&(A.position=n,A.val=e(A.index++)),p|=(u>0?1:0)*a,a<<=1;f[d++]=r(p),l=d-1,h--;break;case 2:return w.join("")}if(0==h&&(h=Math.pow(2,m),m++),f[l])v=f[l];else{if(l!==d)return null;v=s+s.charAt(0)}w.push(v),f[d++]=s+v.charAt(0),h--,s=v,0==h&&(h=Math.pow(2,m),m++)}}};return i}();"function"==typeof define&&define.amd?define(function(){return LZString}):"undefined"!=typeof module&&null!=module&&(module.exports=LZString);
diff --git a/web/gui/lib/pako-1.0.6.min.js b/web/gui/lib/pako-1.0.6.min.js
new file mode 100644
index 0000000..165fc26
--- /dev/null
+++ b/web/gui/lib/pako-1.0.6.min.js
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: MIT
+!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.sane){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.sane=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.sane){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});
diff --git a/web/gui/lib/perfect-scrollbar-0.6.15.min.js b/web/gui/lib/perfect-scrollbar-0.6.15.min.js
new file mode 100644
index 0000000..7fabff5
--- /dev/null
+++ b/web/gui/lib/perfect-scrollbar-0.6.15.min.js
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: MIT
+/* perfect-scrollbar v0.6.15 */
+!function t(e,n,r){function o(i,s){if(!n[i]){if(!e[i]){var a="function"==typeof require&&require;if(!s&&a)return a(i,!0);if(l)return l(i,!0);var c=new Error("Cannot find module '"+i+"'");throw c.code="MODULE_NOT_FOUND",c}var u=n[i]={exports:{}};e[i][0].call(u.exports,function(t){var n=e[i][1][t];return o(n?n:t)},u,u.exports,t,e,n,r)}return n[i].exports}for(var l="function"==typeof require&&require,i=0;i<r.length;i++)o(r[i]);return o}({1:[function(t,e,n){"use strict";var r=t("../main");"function"==typeof define&&define.amd?define(r):(window.PerfectScrollbar=r,"undefined"==typeof window.Ps&&(window.Ps=r))},{"../main":7}],2:[function(t,e,n){"use strict";function r(t,e){var n=t.className.split(" ");n.indexOf(e)<0&&n.push(e),t.className=n.join(" ")}function o(t,e){var n=t.className.split(" "),r=n.indexOf(e);r>=0&&n.splice(r,1),t.className=n.join(" ")}n.add=function(t,e){t.classList?t.classList.add(e):r(t,e)},n.remove=function(t,e){t.classList?t.classList.remove(e):o(t,e)},n.list=function(t){return t.classList?Array.prototype.slice.apply(t.classList):t.className.split(" ")}},{}],3:[function(t,e,n){"use strict";function r(t,e){return window.getComputedStyle(t)[e]}function o(t,e,n){return"number"==typeof n&&(n=n.toString()+"px"),t.style[e]=n,t}function l(t,e){for(var n in e){var r=e[n];"number"==typeof r&&(r=r.toString()+"px"),t.style[n]=r}return t}var i={};i.e=function(t,e){var n=document.createElement(t);return n.className=e,n},i.appendTo=function(t,e){return e.appendChild(t),t},i.css=function(t,e,n){return"object"==typeof e?l(t,e):"undefined"==typeof n?r(t,e):o(t,e,n)},i.matches=function(t,e){return"undefined"!=typeof t.matches?t.matches(e):"undefined"!=typeof t.matchesSelector?t.matchesSelector(e):"undefined"!=typeof t.webkitMatchesSelector?t.webkitMatchesSelector(e):"undefined"!=typeof t.mozMatchesSelector?t.mozMatchesSelector(e):"undefined"!=typeof t.msMatchesSelector?t.msMatchesSelector(e):void 0},i.remove=function(t){"undefined"!=typeof t.remove?t.remove():t.parentNode&&t.parentNode.removeChild(t)},i.queryChildren=function(t,e){return Array.prototype.filter.call(t.childNodes,function(t){return i.matches(t,e)})},e.exports=i},{}],4:[function(t,e,n){"use strict";var r=function(t){this.element=t,this.events={}};r.prototype.bind=function(t,e){"undefined"==typeof this.events[t]&&(this.events[t]=[]),this.events[t].push(e),this.element.addEventListener(t,e,!1)},r.prototype.unbind=function(t,e){var n="undefined"!=typeof e;this.events[t]=this.events[t].filter(function(r){return!(!n||r===e)||(this.element.removeEventListener(t,r,!1),!1)},this)},r.prototype.unbindAll=function(){for(var t in this.events)this.unbind(t)};var o=function(){this.eventElements=[]};o.prototype.eventElement=function(t){var e=this.eventElements.filter(function(e){return e.element===t})[0];return"undefined"==typeof e&&(e=new r(t),this.eventElements.push(e)),e},o.prototype.bind=function(t,e,n){this.eventElement(t).bind(e,n)},o.prototype.unbind=function(t,e,n){this.eventElement(t).unbind(e,n)},o.prototype.unbindAll=function(){for(var t=0;t<this.eventElements.length;t++)this.eventElements[t].unbindAll()},o.prototype.once=function(t,e,n){var r=this.eventElement(t),o=function(t){r.unbind(e,o),n(t)};r.bind(e,o)},e.exports=o},{}],5:[function(t,e,n){"use strict";e.exports=function(){function t(){return Math.floor(65536*(1+Math.random())).toString(16).substring(1)}return function(){return t()+t()+"-"+t()+"-"+t()+"-"+t()+"-"+t()+t()+t()}}()},{}],6:[function(t,e,n){"use strict";var r=t("./class"),o=t("./dom"),l=n.toInt=function(t){return parseInt(t,10)||0},i=n.clone=function(t){if(t){if(t.constructor===Array)return t.map(i);if("object"==typeof t){var e={};for(var n in t)e[n]=i(t[n]);return e}return t}return null};n.extend=function(t,e){var n=i(t);for(var r in e)n[r]=i(e[r]);return n},n.isEditable=function(t){return o.matches(t,"input,[contenteditable]")||o.matches(t,"select,[contenteditable]")||o.matches(t,"textarea,[contenteditable]")||o.matches(t,"button,[contenteditable]")},n.removePsClasses=function(t){for(var e=r.list(t),n=0;n<e.length;n++){var o=e[n];0===o.indexOf("ps-")&&r.remove(t,o)}},n.outerWidth=function(t){return l(o.css(t,"width"))+l(o.css(t,"paddingLeft"))+l(o.css(t,"paddingRight"))+l(o.css(t,"borderLeftWidth"))+l(o.css(t,"borderRightWidth"))},n.startScrolling=function(t,e){r.add(t,"ps-in-scrolling"),"undefined"!=typeof e?r.add(t,"ps-"+e):(r.add(t,"ps-x"),r.add(t,"ps-y"))},n.stopScrolling=function(t,e){r.remove(t,"ps-in-scrolling"),"undefined"!=typeof e?r.remove(t,"ps-"+e):(r.remove(t,"ps-x"),r.remove(t,"ps-y"))},n.env={isWebKit:"WebkitAppearance"in document.documentElement.style,supportsTouch:"ontouchstart"in window||window.DocumentTouch&&document instanceof window.DocumentTouch,supportsIePointer:null!==window.navigator.msMaxTouchPoints}},{"./class":2,"./dom":3}],7:[function(t,e,n){"use strict";var r=t("./plugin/destroy"),o=t("./plugin/initialize"),l=t("./plugin/update");e.exports={initialize:o,update:l,destroy:r}},{"./plugin/destroy":9,"./plugin/initialize":17,"./plugin/update":21}],8:[function(t,e,n){"use strict";e.exports={handlers:["click-rail","drag-scrollbar","keyboard","wheel","touch"],maxScrollbarLength:null,minScrollbarLength:null,scrollXMarginOffset:0,scrollYMarginOffset:0,suppressScrollX:!1,suppressScrollY:!1,swipePropagation:!0,useBothWheelAxes:!1,wheelPropagation:!1,wheelSpeed:1,theme:"default"}},{}],9:[function(t,e,n){"use strict";var r=t("../lib/helper"),o=t("../lib/dom"),l=t("./instances");e.exports=function(t){var e=l.get(t);e&&(e.event.unbindAll(),o.remove(e.scrollbarX),o.remove(e.scrollbarY),o.remove(e.scrollbarXRail),o.remove(e.scrollbarYRail),r.removePsClasses(t),l.remove(t))}},{"../lib/dom":3,"../lib/helper":6,"./instances":18}],10:[function(t,e,n){"use strict";function r(t,e){function n(t){return t.getBoundingClientRect()}var r=function(t){t.stopPropagation()};e.event.bind(e.scrollbarY,"click",r),e.event.bind(e.scrollbarYRail,"click",function(r){var o=r.pageY-window.pageYOffset-n(e.scrollbarYRail).top,s=o>e.scrollbarYTop?1:-1;i(t,"top",t.scrollTop+s*e.containerHeight),l(t),r.stopPropagation()}),e.event.bind(e.scrollbarX,"click",r),e.event.bind(e.scrollbarXRail,"click",function(r){var o=r.pageX-window.pageXOffset-n(e.scrollbarXRail).left,s=o>e.scrollbarXLeft?1:-1;i(t,"left",t.scrollLeft+s*e.containerWidth),l(t),r.stopPropagation()})}var o=t("../instances"),l=t("../update-geometry"),i=t("../update-scroll");e.exports=function(t){var e=o.get(t);r(t,e)}},{"../instances":18,"../update-geometry":19,"../update-scroll":20}],11:[function(t,e,n){"use strict";function r(t,e){function n(n){var o=r+n*e.railXRatio,i=Math.max(0,e.scrollbarXRail.getBoundingClientRect().left)+e.railXRatio*(e.railXWidth-e.scrollbarXWidth);o<0?e.scrollbarXLeft=0:o>i?e.scrollbarXLeft=i:e.scrollbarXLeft=o;var s=l.toInt(e.scrollbarXLeft*(e.contentWidth-e.containerWidth)/(e.containerWidth-e.railXRatio*e.scrollbarXWidth))-e.negativeScrollAdjustment;c(t,"left",s)}var r=null,o=null,s=function(e){n(e.pageX-o),a(t),e.stopPropagation(),e.preventDefault()},u=function(){l.stopScrolling(t,"x"),e.event.unbind(e.ownerDocument,"mousemove",s)};e.event.bind(e.scrollbarX,"mousedown",function(n){o=n.pageX,r=l.toInt(i.css(e.scrollbarX,"left"))*e.railXRatio,l.startScrolling(t,"x"),e.event.bind(e.ownerDocument,"mousemove",s),e.event.once(e.ownerDocument,"mouseup",u),n.stopPropagation(),n.preventDefault()})}function o(t,e){function n(n){var o=r+n*e.railYRatio,i=Math.max(0,e.scrollbarYRail.getBoundingClientRect().top)+e.railYRatio*(e.railYHeight-e.scrollbarYHeight);o<0?e.scrollbarYTop=0:o>i?e.scrollbarYTop=i:e.scrollbarYTop=o;var s=l.toInt(e.scrollbarYTop*(e.contentHeight-e.containerHeight)/(e.containerHeight-e.railYRatio*e.scrollbarYHeight));c(t,"top",s)}var r=null,o=null,s=function(e){n(e.pageY-o),a(t),e.stopPropagation(),e.preventDefault()},u=function(){l.stopScrolling(t,"y"),e.event.unbind(e.ownerDocument,"mousemove",s)};e.event.bind(e.scrollbarY,"mousedown",function(n){o=n.pageY,r=l.toInt(i.css(e.scrollbarY,"top"))*e.railYRatio,l.startScrolling(t,"y"),e.event.bind(e.ownerDocument,"mousemove",s),e.event.once(e.ownerDocument,"mouseup",u),n.stopPropagation(),n.preventDefault()})}var l=t("../../lib/helper"),i=t("../../lib/dom"),s=t("../instances"),a=t("../update-geometry"),c=t("../update-scroll");e.exports=function(t){var e=s.get(t);r(t,e),o(t,e)}},{"../../lib/dom":3,"../../lib/helper":6,"../instances":18,"../update-geometry":19,"../update-scroll":20}],12:[function(t,e,n){"use strict";function r(t,e){function n(n,r){var o=t.scrollTop;if(0===n){if(!e.scrollbarYActive)return!1;if(0===o&&r>0||o>=e.contentHeight-e.containerHeight&&r<0)return!e.settings.wheelPropagation}var l=t.scrollLeft;if(0===r){if(!e.scrollbarXActive)return!1;if(0===l&&n<0||l>=e.contentWidth-e.containerWidth&&n>0)return!e.settings.wheelPropagation}return!0}var r=!1;e.event.bind(t,"mouseenter",function(){r=!0}),e.event.bind(t,"mouseleave",function(){r=!1});var i=!1;e.event.bind(e.ownerDocument,"keydown",function(c){if(!(c.isDefaultPrevented&&c.isDefaultPrevented()||c.defaultPrevented)){var u=l.matches(e.scrollbarX,":focus")||l.matches(e.scrollbarY,":focus");if(r||u){var d=document.activeElement?document.activeElement:e.ownerDocument.activeElement;if(d){if("IFRAME"===d.tagName)d=d.contentDocument.activeElement;else for(;d.shadowRoot;)d=d.shadowRoot.activeElement;if(o.isEditable(d))return}var p=0,f=0;switch(c.which){case 37:p=c.metaKey?-e.contentWidth:c.altKey?-e.containerWidth:-30;break;case 38:f=c.metaKey?e.contentHeight:c.altKey?e.containerHeight:30;break;case 39:p=c.metaKey?e.contentWidth:c.altKey?e.containerWidth:30;break;case 40:f=c.metaKey?-e.contentHeight:c.altKey?-e.containerHeight:-30;break;case 33:f=90;break;case 32:f=c.shiftKey?90:-90;break;case 34:f=-90;break;case 35:f=c.ctrlKey?-e.contentHeight:-e.containerHeight;break;case 36:f=c.ctrlKey?t.scrollTop:e.containerHeight;break;default:return}a(t,"top",t.scrollTop-f),a(t,"left",t.scrollLeft+p),s(t),i=n(p,f),i&&c.preventDefault()}}})}var o=t("../../lib/helper"),l=t("../../lib/dom"),i=t("../instances"),s=t("../update-geometry"),a=t("../update-scroll");e.exports=function(t){var e=i.get(t);r(t,e)}},{"../../lib/dom":3,"../../lib/helper":6,"../instances":18,"../update-geometry":19,"../update-scroll":20}],13:[function(t,e,n){"use strict";function r(t,e){function n(n,r){var o=t.scrollTop;if(0===n){if(!e.scrollbarYActive)return!1;if(0===o&&r>0||o>=e.contentHeight-e.containerHeight&&r<0)return!e.settings.wheelPropagation}var l=t.scrollLeft;if(0===r){if(!e.scrollbarXActive)return!1;if(0===l&&n<0||l>=e.contentWidth-e.containerWidth&&n>0)return!e.settings.wheelPropagation}return!0}function r(t){var e=t.deltaX,n=-1*t.deltaY;return"undefined"!=typeof e&&"undefined"!=typeof n||(e=-1*t.wheelDeltaX/6,n=t.wheelDeltaY/6),t.deltaMode&&1===t.deltaMode&&(e*=10,n*=10),e!==e&&n!==n&&(e=0,n=t.wheelDelta),t.shiftKey?[-n,-e]:[e,n]}function o(e,n){var r=t.querySelector("textarea:hover, select[multiple]:hover, .ps-child:hover");if(r){if(!window.getComputedStyle(r).overflow.match(/(scroll|auto)/))return!1;var o=r.scrollHeight-r.clientHeight;if(o>0&&!(0===r.scrollTop&&n>0||r.scrollTop===o&&n<0))return!0;var l=r.scrollLeft-r.clientWidth;if(l>0&&!(0===r.scrollLeft&&e<0||r.scrollLeft===l&&e>0))return!0}return!1}function s(s){var c=r(s),u=c[0],d=c[1];o(u,d)||(a=!1,e.settings.useBothWheelAxes?e.scrollbarYActive&&!e.scrollbarXActive?(d?i(t,"top",t.scrollTop-d*e.settings.wheelSpeed):i(t,"top",t.scrollTop+u*e.settings.wheelSpeed),a=!0):e.scrollbarXActive&&!e.scrollbarYActive&&(u?i(t,"left",t.scrollLeft+u*e.settings.wheelSpeed):i(t,"left",t.scrollLeft-d*e.settings.wheelSpeed),a=!0):(i(t,"top",t.scrollTop-d*e.settings.wheelSpeed),i(t,"left",t.scrollLeft+u*e.settings.wheelSpeed)),l(t),a=a||n(u,d),a&&(s.stopPropagation(),s.preventDefault()))}var a=!1;"undefined"!=typeof window.onwheel?e.event.bind(t,"wheel",s):"undefined"!=typeof window.onmousewheel&&e.event.bind(t,"mousewheel",s)}var o=t("../instances"),l=t("../update-geometry"),i=t("../update-scroll");e.exports=function(t){var e=o.get(t);r(t,e)}},{"../instances":18,"../update-geometry":19,"../update-scroll":20}],14:[function(t,e,n){"use strict";function r(t,e){e.event.bind(t,"scroll",function(){l(t)})}var o=t("../instances"),l=t("../update-geometry");e.exports=function(t){var e=o.get(t);r(t,e)}},{"../instances":18,"../update-geometry":19}],15:[function(t,e,n){"use strict";function r(t,e){function n(){var t=window.getSelection?window.getSelection():document.getSelection?document.getSelection():"";return 0===t.toString().length?null:t.getRangeAt(0).commonAncestorContainer}function r(){c||(c=setInterval(function(){return l.get(t)?(s(t,"top",t.scrollTop+u.top),s(t,"left",t.scrollLeft+u.left),void i(t)):void clearInterval(c)},50))}function a(){c&&(clearInterval(c),c=null),o.stopScrolling(t)}var c=null,u={top:0,left:0},d=!1;e.event.bind(e.ownerDocument,"selectionchange",function(){t.contains(n())?d=!0:(d=!1,a())}),e.event.bind(window,"mouseup",function(){d&&(d=!1,a())}),e.event.bind(window,"keyup",function(){d&&(d=!1,a())}),e.event.bind(window,"mousemove",function(e){if(d){var n={x:e.pageX,y:e.pageY},l={left:t.offsetLeft,right:t.offsetLeft+t.offsetWidth,top:t.offsetTop,bottom:t.offsetTop+t.offsetHeight};n.x<l.left+3?(u.left=-5,o.startScrolling(t,"x")):n.x>l.right-3?(u.left=5,o.startScrolling(t,"x")):u.left=0,n.y<l.top+3?(l.top+3-n.y<5?u.top=-5:u.top=-20,o.startScrolling(t,"y")):n.y>l.bottom-3?(n.y-l.bottom+3<5?u.top=5:u.top=20,o.startScrolling(t,"y")):u.top=0,0===u.top&&0===u.left?a():r()}})}var o=t("../../lib/helper"),l=t("../instances"),i=t("../update-geometry"),s=t("../update-scroll");e.exports=function(t){var e=l.get(t);r(t,e)}},{"../../lib/helper":6,"../instances":18,"../update-geometry":19,"../update-scroll":20}],16:[function(t,e,n){"use strict";function r(t,e,n,r){function o(n,r){var o=t.scrollTop,l=t.scrollLeft,i=Math.abs(n),s=Math.abs(r);if(s>i){if(r<0&&o===e.contentHeight-e.containerHeight||r>0&&0===o)return!e.settings.swipePropagation}else if(i>s&&(n<0&&l===e.contentWidth-e.containerWidth||n>0&&0===l))return!e.settings.swipePropagation;return!0}function a(e,n){s(t,"top",t.scrollTop-n),s(t,"left",t.scrollLeft-e),i(t)}function c(){w=!0}function u(){w=!1}function d(t){return t.targetTouches?t.targetTouches[0]:t}function p(t){return!(!t.targetTouches||1!==t.targetTouches.length)||!(!t.pointerType||"mouse"===t.pointerType||t.pointerType===t.MSPOINTER_TYPE_MOUSE)}function f(t){if(p(t)){Y=!0;var e=d(t);g.pageX=e.pageX,g.pageY=e.pageY,v=(new Date).getTime(),null!==y&&clearInterval(y),t.stopPropagation()}}function h(t){if(!Y&&e.settings.swipePropagation&&f(t),!w&&Y&&p(t)){var n=d(t),r={pageX:n.pageX,pageY:n.pageY},l=r.pageX-g.pageX,i=r.pageY-g.pageY;a(l,i),g=r;var s=(new Date).getTime(),c=s-v;c>0&&(m.x=l/c,m.y=i/c,v=s),o(l,i)&&(t.stopPropagation(),t.preventDefault())}}function b(){!w&&Y&&(Y=!1,clearInterval(y),y=setInterval(function(){return l.get(t)&&(m.x||m.y)?Math.abs(m.x)<.01&&Math.abs(m.y)<.01?void clearInterval(y):(a(30*m.x,30*m.y),m.x*=.8,void(m.y*=.8)):void clearInterval(y)},10))}var g={},v=0,m={},y=null,w=!1,Y=!1;n&&(e.event.bind(window,"touchstart",c),e.event.bind(window,"touchend",u),e.event.bind(t,"touchstart",f),e.event.bind(t,"touchmove",h),e.event.bind(t,"touchend",b)),r&&(window.PointerEvent?(e.event.bind(window,"pointerdown",c),e.event.bind(window,"pointerup",u),e.event.bind(t,"pointerdown",f),e.event.bind(t,"pointermove",h),e.event.bind(t,"pointerup",b)):window.MSPointerEvent&&(e.event.bind(window,"MSPointerDown",c),e.event.bind(window,"MSPointerUp",u),e.event.bind(t,"MSPointerDown",f),e.event.bind(t,"MSPointerMove",h),e.event.bind(t,"MSPointerUp",b)))}var o=t("../../lib/helper"),l=t("../instances"),i=t("../update-geometry"),s=t("../update-scroll");e.exports=function(t){if(o.env.supportsTouch||o.env.supportsIePointer){var e=l.get(t);r(t,e,o.env.supportsTouch,o.env.supportsIePointer)}}},{"../../lib/helper":6,"../instances":18,"../update-geometry":19,"../update-scroll":20}],17:[function(t,e,n){"use strict";var r=t("../lib/helper"),o=t("../lib/class"),l=t("./instances"),i=t("./update-geometry"),s={"click-rail":t("./handler/click-rail"),"drag-scrollbar":t("./handler/drag-scrollbar"),keyboard:t("./handler/keyboard"),wheel:t("./handler/mouse-wheel"),touch:t("./handler/touch"),selection:t("./handler/selection")},a=t("./handler/native-scroll");e.exports=function(t,e){e="object"==typeof e?e:{},o.add(t,"ps-container");var n=l.add(t);n.settings=r.extend(n.settings,e),o.add(t,"ps-theme-"+n.settings.theme),n.settings.handlers.forEach(function(e){s[e](t)}),a(t),i(t)}},{"../lib/class":2,"../lib/helper":6,"./handler/click-rail":10,"./handler/drag-scrollbar":11,"./handler/keyboard":12,"./handler/mouse-wheel":13,"./handler/native-scroll":14,"./handler/selection":15,"./handler/touch":16,"./instances":18,"./update-geometry":19}],18:[function(t,e,n){"use strict";function r(t){function e(){a.add(t,"ps-focus")}function n(){a.remove(t,"ps-focus")}var r=this;r.settings=s.clone(c),r.containerWidth=null,r.containerHeight=null,r.contentWidth=null,r.contentHeight=null,r.isRtl="rtl"===u.css(t,"direction"),r.isNegativeScroll=function(){var e=t.scrollLeft,n=null;return t.scrollLeft=-1,n=t.scrollLeft<0,t.scrollLeft=e,n}(),r.negativeScrollAdjustment=r.isNegativeScroll?t.scrollWidth-t.clientWidth:0,r.event=new d,r.ownerDocument=t.ownerDocument||document,r.scrollbarXRail=u.appendTo(u.e("div","ps-scrollbar-x-rail"),t),r.scrollbarX=u.appendTo(u.e("div","ps-scrollbar-x"),r.scrollbarXRail),r.scrollbarX.setAttribute("tabindex",0),r.event.bind(r.scrollbarX,"focus",e),r.event.bind(r.scrollbarX,"blur",n),r.scrollbarXActive=null,r.scrollbarXWidth=null,r.scrollbarXLeft=null,r.scrollbarXBottom=s.toInt(u.css(r.scrollbarXRail,"bottom")),r.isScrollbarXUsingBottom=r.scrollbarXBottom===r.scrollbarXBottom,r.scrollbarXTop=r.isScrollbarXUsingBottom?null:s.toInt(u.css(r.scrollbarXRail,"top")),r.railBorderXWidth=s.toInt(u.css(r.scrollbarXRail,"borderLeftWidth"))+s.toInt(u.css(r.scrollbarXRail,"borderRightWidth")),u.css(r.scrollbarXRail,"display","block"),r.railXMarginWidth=s.toInt(u.css(r.scrollbarXRail,"marginLeft"))+s.toInt(u.css(r.scrollbarXRail,"marginRight")),u.css(r.scrollbarXRail,"display",""),r.railXWidth=null,r.railXRatio=null,r.scrollbarYRail=u.appendTo(u.e("div","ps-scrollbar-y-rail"),t),r.scrollbarY=u.appendTo(u.e("div","ps-scrollbar-y"),r.scrollbarYRail),r.scrollbarY.setAttribute("tabindex",0),r.event.bind(r.scrollbarY,"focus",e),r.event.bind(r.scrollbarY,"blur",n),r.scrollbarYActive=null,r.scrollbarYHeight=null,r.scrollbarYTop=null,r.scrollbarYRight=s.toInt(u.css(r.scrollbarYRail,"right")),r.isScrollbarYUsingRight=r.scrollbarYRight===r.scrollbarYRight,r.scrollbarYLeft=r.isScrollbarYUsingRight?null:s.toInt(u.css(r.scrollbarYRail,"left")),r.scrollbarYOuterWidth=r.isRtl?s.outerWidth(r.scrollbarY):null,r.railBorderYWidth=s.toInt(u.css(r.scrollbarYRail,"borderTopWidth"))+s.toInt(u.css(r.scrollbarYRail,"borderBottomWidth")),u.css(r.scrollbarYRail,"display","block"),r.railYMarginHeight=s.toInt(u.css(r.scrollbarYRail,"marginTop"))+s.toInt(u.css(r.scrollbarYRail,"marginBottom")),u.css(r.scrollbarYRail,"display",""),r.railYHeight=null,r.railYRatio=null}function o(t){return t.getAttribute("data-ps-id")}function l(t,e){t.setAttribute("data-ps-id",e)}function i(t){t.removeAttribute("data-ps-id")}var s=t("../lib/helper"),a=t("../lib/class"),c=t("./default-setting"),u=t("../lib/dom"),d=t("../lib/event-manager"),p=t("../lib/guid"),f={};n.add=function(t){var e=p();return l(t,e),f[e]=new r(t),f[e]},n.remove=function(t){delete f[o(t)],i(t)},n.get=function(t){return f[o(t)]}},{"../lib/class":2,"../lib/dom":3,"../lib/event-manager":4,"../lib/guid":5,"../lib/helper":6,"./default-setting":8}],19:[function(t,e,n){"use strict";function r(t,e){return t.settings.minScrollbarLength&&(e=Math.max(e,t.settings.minScrollbarLength)),t.settings.maxScrollbarLength&&(e=Math.min(e,t.settings.maxScrollbarLength)),e}function o(t,e){var n={width:e.railXWidth};e.isRtl?n.left=e.negativeScrollAdjustment+t.scrollLeft+e.containerWidth-e.contentWidth:n.left=t.scrollLeft,e.isScrollbarXUsingBottom?n.bottom=e.scrollbarXBottom-t.scrollTop:n.top=e.scrollbarXTop+t.scrollTop,s.css(e.scrollbarXRail,n);var r={top:t.scrollTop,height:e.railYHeight};e.isScrollbarYUsingRight?e.isRtl?r.right=e.contentWidth-(e.negativeScrollAdjustment+t.scrollLeft)-e.scrollbarYRight-e.scrollbarYOuterWidth:r.right=e.scrollbarYRight-t.scrollLeft:e.isRtl?r.left=e.negativeScrollAdjustment+t.scrollLeft+2*e.containerWidth-e.contentWidth-e.scrollbarYLeft-e.scrollbarYOuterWidth:r.left=e.scrollbarYLeft+t.scrollLeft,s.css(e.scrollbarYRail,r),s.css(e.scrollbarX,{left:e.scrollbarXLeft,width:e.scrollbarXWidth-e.railBorderXWidth}),s.css(e.scrollbarY,{top:e.scrollbarYTop,height:e.scrollbarYHeight-e.railBorderYWidth})}var l=t("../lib/helper"),i=t("../lib/class"),s=t("../lib/dom"),a=t("./instances"),c=t("./update-scroll");e.exports=function(t){var e=a.get(t);e.containerWidth=t.clientWidth,e.containerHeight=t.clientHeight,e.contentWidth=t.scrollWidth,e.contentHeight=t.scrollHeight;var n;t.contains(e.scrollbarXRail)||(n=s.queryChildren(t,".ps-scrollbar-x-rail"),n.length>0&&n.forEach(function(t){s.remove(t)}),s.appendTo(e.scrollbarXRail,t)),t.contains(e.scrollbarYRail)||(n=s.queryChildren(t,".ps-scrollbar-y-rail"),n.length>0&&n.forEach(function(t){s.remove(t)}),s.appendTo(e.scrollbarYRail,t)),!e.settings.suppressScrollX&&e.containerWidth+e.settings.scrollXMarginOffset<e.contentWidth?(e.scrollbarXActive=!0,e.railXWidth=e.containerWidth-e.railXMarginWidth,e.railXRatio=e.containerWidth/e.railXWidth,e.scrollbarXWidth=r(e,l.toInt(e.railXWidth*e.containerWidth/e.contentWidth)),e.scrollbarXLeft=l.toInt((e.negativeScrollAdjustment+t.scrollLeft)*(e.railXWidth-e.scrollbarXWidth)/(e.contentWidth-e.containerWidth))):e.scrollbarXActive=!1,!e.settings.suppressScrollY&&e.containerHeight+e.settings.scrollYMarginOffset<e.contentHeight?(e.scrollbarYActive=!0,e.railYHeight=e.containerHeight-e.railYMarginHeight,e.railYRatio=e.containerHeight/e.railYHeight,e.scrollbarYHeight=r(e,l.toInt(e.railYHeight*e.containerHeight/e.contentHeight)),e.scrollbarYTop=l.toInt(t.scrollTop*(e.railYHeight-e.scrollbarYHeight)/(e.contentHeight-e.containerHeight))):e.scrollbarYActive=!1,e.scrollbarXLeft>=e.railXWidth-e.scrollbarXWidth&&(e.scrollbarXLeft=e.railXWidth-e.scrollbarXWidth),e.scrollbarYTop>=e.railYHeight-e.scrollbarYHeight&&(e.scrollbarYTop=e.railYHeight-e.scrollbarYHeight),o(t,e),e.scrollbarXActive?i.add(t,"ps-active-x"):(i.remove(t,"ps-active-x"),e.scrollbarXWidth=0,e.scrollbarXLeft=0,c(t,"left",0)),e.scrollbarYActive?i.add(t,"ps-active-y"):(i.remove(t,"ps-active-y"),e.scrollbarYHeight=0,e.scrollbarYTop=0,c(t,"top",0))}},{"../lib/class":2,"../lib/dom":3,"../lib/helper":6,"./instances":18,"./update-scroll":20}],20:[function(t,e,n){"use strict";var r,o,l=t("./instances"),i=function(t){var e=document.createEvent("Event");return e.initEvent(t,!0,!0),e};e.exports=function(t,e,n){if("undefined"==typeof t)throw"You must provide an element to the update-scroll function";if("undefined"==typeof e)throw"You must provide an axis to the update-scroll function";if("undefined"==typeof n)throw"You must provide a value to the update-scroll function";"top"===e&&n<=0&&(t.scrollTop=n=0,t.dispatchEvent(i("ps-y-reach-start"))),"left"===e&&n<=0&&(t.scrollLeft=n=0,t.dispatchEvent(i("ps-x-reach-start")));var s=l.get(t);"top"===e&&n>=s.contentHeight-s.containerHeight&&(n=s.contentHeight-s.containerHeight,n-t.scrollTop<=1?n=t.scrollTop:t.scrollTop=n,t.dispatchEvent(i("ps-y-reach-end"))),"left"===e&&n>=s.contentWidth-s.containerWidth&&(n=s.contentWidth-s.containerWidth,n-t.scrollLeft<=1?n=t.scrollLeft:t.scrollLeft=n,t.dispatchEvent(i("ps-x-reach-end"))),r||(r=t.scrollTop),o||(o=t.scrollLeft),"top"===e&&n<r&&t.dispatchEvent(i("ps-scroll-up")),"top"===e&&n>r&&t.dispatchEvent(i("ps-scroll-down")),"left"===e&&n<o&&t.dispatchEvent(i("ps-scroll-left")),"left"===e&&n>o&&t.dispatchEvent(i("ps-scroll-right")),"top"===e&&(t.scrollTop=r=n,t.dispatchEvent(i("ps-scroll-y"))),"left"===e&&(t.scrollLeft=o=n,t.dispatchEvent(i("ps-scroll-x")))}},{"./instances":18}],21:[function(t,e,n){"use strict";var r=t("../lib/helper"),o=t("../lib/dom"),l=t("./instances"),i=t("./update-geometry"),s=t("./update-scroll");e.exports=function(t){var e=l.get(t);e&&(e.negativeScrollAdjustment=e.isNegativeScroll?t.scrollWidth-t.clientWidth:0,o.css(e.scrollbarXRail,"display","block"),o.css(e.scrollbarYRail,"display","block"),e.railXMarginWidth=r.toInt(o.css(e.scrollbarXRail,"marginLeft"))+r.toInt(o.css(e.scrollbarXRail,"marginRight")),e.railYMarginHeight=r.toInt(o.css(e.scrollbarYRail,"marginTop"))+r.toInt(o.css(e.scrollbarYRail,"marginBottom")),o.css(e.scrollbarXRail,"display","none"),o.css(e.scrollbarYRail,"display","none"),i(t),s(t,"top",t.scrollTop),s(t,"left",t.scrollLeft),o.css(e.scrollbarXRail,"display",""),o.css(e.scrollbarYRail,"display",""))}},{"../lib/dom":3,"../lib/helper":6,"./instances":18,"./update-geometry":19,"./update-scroll":20}]},{},[1]);
diff --git a/web/gui/lib/tableExport-1.6.0.min.js b/web/gui/lib/tableExport-1.6.0.min.js
new file mode 100644
index 0000000..4841309
--- /dev/null
+++ b/web/gui/lib/tableExport-1.6.0.min.js
@@ -0,0 +1,55 @@
+/*
+ tableExport.jquery.plugin
+
+ Copyright (c) 2015,2016 hhurz, https://github.com/hhurz/tableExport.jquery.plugin
+ Original work Copyright (c) 2014 Giri Raj, https://github.com/kayalshri/
+
+ Licensed under the MIT License, http://opensource.org/licenses/mit-license
+
+ SPDX-License-Identifier: MIT
+*/
+(function(b){b.fn.extend({tableExport:function(q){function R(d){var a=[];b(d).find("thead").first().find("th").each(function(d,f){void 0!==b(f).attr("data-field")?a[d]=b(f).attr("data-field"):a[d]=d.toString()});return a}function w(d,g,e,f,k){if(-1==b.inArray(e,a.ignoreRow)&&-1==b.inArray(e-f,a.ignoreRow)){var B=b(d).filter(function(){return"none"!=b(this).data("tableexport-display")&&(b(this).is(":visible")||"always"==b(this).data("tableexport-display")||"always"==b(this).closest("table").data("tableexport-display"))}).find(g),
+F=0;B.each(function(d){if("always"==b(this).data("tableexport-display")||"none"!=b(this).css("display")&&"hidden"!=b(this).css("visibility")&&"none"!=b(this).data("tableexport-display")){var g=d,f=!1;0<a.ignoreColumn.length&&("string"==typeof a.ignoreColumn[0]?I.length>g&&"undefined"!=typeof I[g]&&-1!=b.inArray(I[g],a.ignoreColumn)&&(f=!0):"number"!=typeof a.ignoreColumn[0]||-1==b.inArray(g,a.ignoreColumn)&&-1==b.inArray(g-B.length,a.ignoreColumn)||(f=!0));if(0==f&&"function"===typeof k){var f=0,
+h,l=0;if("undefined"!=typeof x[e]&&0<x[e].length)for(g=0;g<=d;g++)"undefined"!=typeof x[e][g]&&(k(null,e,g),delete x[e][g],d++);b(this).is("[colspan]")&&(f=parseInt(b(this).attr("colspan")),F+=0<f?f-1:0);b(this).is("[rowspan]")&&(l=parseInt(b(this).attr("rowspan")));k(this,e,d);for(g=0;g<f-1;g++)k(null,e,d+g);if(l)for(h=1;h<l;h++)for("undefined"==typeof x[e+h]&&(x[e+h]=[]),x[e+h][d+F]="",g=1;g<f;g++)x[e+h][d+F-g]=""}}});if("undefined"!=typeof x[e]&&0<x[e].length)for(c=0;c<=x[e].length;c++)"undefined"!=
+typeof x[e][c]&&(k(null,e,c),delete x[e][c])}}function Y(d){!0===a.consoleLog&&console.log(d.output());if("string"===a.outputMode)return d.output();if("base64"===a.outputMode)return G(d.output());try{var g=d.output("blob");saveAs(g,a.fileName+".pdf")}catch(e){C(a.fileName+".pdf","data:application/pdf;base64,",d.output())}}function Z(d,a,e){var f=0;"undefined"!=typeof e&&(f=e.colspan);if(0<=f){for(var k=d.width,b=d.textPos.x,F=a.table.columns.indexOf(a.column),h=1;h<f;h++)k+=a.table.columns[F+h].width;
+1<f&&("right"===d.styles.halign?b=d.textPos.x+k-d.width:"center"===d.styles.halign&&(b=d.textPos.x+(k-d.width)/2));d.width=k;d.textPos.x=b;"undefined"!=typeof e&&1<e.rowspan&&(d.height*=e.rowspan);if("middle"===d.styles.valign||"bottom"===d.styles.valign)e=("string"===typeof d.text?d.text.split(/\r\n|\r|\n/g):d.text).length||1,2<e&&(d.textPos.y-=(2-1.15)/2*a.row.styles.fontSize*(e-2)/3);return!0}return!1}function aa(d,g,e){g.each(function(){var g=b(this).children();if(b(this).is("div")){var k=M(D(this,
+"background-color"),[255,255,255]),B=M(D(this,"border-top-color"),[0,0,0]),F=N(this,"border-top-width",a.jspdf.unit),h=this.getBoundingClientRect(),l=this.offsetLeft*e.dw,m=this.offsetTop*e.dh,n=h.width*e.dw,h=h.height*e.dh;e.doc.setDrawColor.apply(void 0,B);e.doc.setFillColor.apply(void 0,k);e.doc.setLineWidth(F);e.doc.rect(d.x+l,d.y+m,n,h,F?"FD":"F")}"undefined"!=typeof g&&0<g.length&&aa(d,g,e)})}function S(d,a,e){return d.replace(new RegExp(a.replace(/([.*+?^=!:${}()|\[\]\/\\])/g,"\\$1"),"g"),
+e)}function fa(d){d=S(d||"0",a.numbers.html.decimalMark,".");d=S(d,a.numbers.html.thousandsSeparator,"");return"number"===typeof d||!1!==jQuery.isNumeric(d)?d:!1}function v(d,g,e){var f="";if(null!=d){d=b(d);var k;k=d[0].hasAttribute("data-tableexport-value")?d.data("tableexport-value"):d.html();"function"===typeof a.onCellHtmlData&&(k=a.onCellHtmlData(d,g,e,k));if(!0===a.htmlContent)f=b.trim(k);else{var B=k.replace(/\n/g,"\u2028").replace(/<br\s*[\/]?>/gi,"\u2060");k=b("<div/>").html(B).contents();
+B="";b.each(k.text().split("\u2028"),function(d,a){0<d&&(B+=" ");B+=b.trim(a)});b.each(B.split("\u2060"),function(d,a){0<d&&(f+="\n");f+=b.trim(a).replace(/\u00AD/g,"")});if(a.numbers.html.decimalMark!=a.numbers.output.decimalMark||a.numbers.html.thousandsSeparator!=a.numbers.output.thousandsSeparator)if(k=fa(f),!1!==k){var h=(""+k).split(".");1==h.length&&(h[1]="");var l=3<h[0].length?h[0].length%3:0,f=(0>k?"-":"")+(a.numbers.output.thousandsSeparator?(l?h[0].substr(0,l)+a.numbers.output.thousandsSeparator:
+"")+h[0].substr(l).replace(/(\d{3})(?=\d)/g,"$1"+a.numbers.output.thousandsSeparator):h[0])+(h[1].length?a.numbers.output.decimalMark+h[1]:"")}}!0===a.escape&&(f=escape(f));"function"===typeof a.onCellData&&(f=a.onCellData(d,g,e,f))}return f}function ga(d,a,e){return a+"-"+e.toLowerCase()}function M(d,a){var e=/^rgb\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})\)$/.exec(d),f=a;e&&(f=[parseInt(e[1]),parseInt(e[2]),parseInt(e[3])]);return f}function ba(d){var a=D(d,"text-align"),e=D(d,"font-weight"),f=D(d,"font-style"),
+k="";"start"==a&&(a="rtl"==D(d,"direction")?"right":"left");700<=e&&(k="bold");"italic"==f&&(k+=f);""==k&&(k="normal");a={style:{align:a,bcolor:M(D(d,"background-color"),[255,255,255]),color:M(D(d,"color"),[0,0,0]),fstyle:k},colspan:parseInt(b(d).attr("colspan"))||0,rowspan:parseInt(b(d).attr("rowspan"))||0};null!==d&&(d=d.getBoundingClientRect(),a.rect={width:d.width,height:d.height});return a}function D(d,a){try{return window.getComputedStyle?(a=a.replace(/([a-z])([A-Z])/,ga),window.getComputedStyle(d,
+null).getPropertyValue(a)):d.currentStyle?d.currentStyle[a]:d.style[a]}catch(e){}return""}function N(a,g,e){g=D(a,g).match(/\d+/);if(null!==g){g=g[0];a=a.parentElement;var f=document.createElement("div");f.style.overflow="hidden";f.style.visibility="hidden";a.appendChild(f);f.style.width=100+e;e=100/f.offsetWidth;a.removeChild(f);return g*e}return 0}function T(){if(!(this instanceof T))return new T;this.SheetNames=[];this.Sheets={}}function ha(a){for(var g=new ArrayBuffer(a.length),e=new Uint8Array(g),
+f=0;f!=a.length;++f)e[f]=a.charCodeAt(f)&255;return g}function ia(a){for(var g={},e={s:{c:1E7,r:1E7},e:{c:0,r:0}},f=0;f!=a.length;++f)for(var k=0;k!=a[f].length;++k){e.s.r>f&&(e.s.r=f);e.s.c>k&&(e.s.c=k);e.e.r<f&&(e.e.r=f);e.e.c<k&&(e.e.c=k);var b={v:a[f][k]};if(null!=b.v){var h=XLSX.utils.encode_cell({c:k,r:f});"number"===typeof b.v?b.t="n":"boolean"===typeof b.v?b.t="b":b.v instanceof Date?(b.t="n",b.z=XLSX.SSF._table[14],b.v=datenum(b.v)):b.t="s";g[h]=b}}1E7>e.s.c&&(g["!ref"]=XLSX.utils.encode_range(e));
+return g}function C(a,g,e){var f=window.navigator.userAgent;if(0<f.indexOf("MSIE ")||f.match(/Trident.*rv\:11\./)){if(g=document.createElement("iframe"))document.body.appendChild(g),g.setAttribute("style","display:none"),g.contentDocument.open("txt/html","replace"),g.contentDocument.write(e),g.contentDocument.close(),g.focus(),g.contentDocument.execCommand("SaveAs",!0,a),document.body.removeChild(g)}else if(f=document.createElement("a")){f.style.display="none";f.download=a;0<=g.toLowerCase().indexOf("base64,")?
+f.href=g+G(e):f.href=g+encodeURIComponent(e);document.body.appendChild(f);if(document.createEvent)null==O&&(O=document.createEvent("MouseEvents")),O.initEvent("click",!0,!1),f.dispatchEvent(O);else if(document.createEventObject)f.fireEvent("onclick");else if("function"==typeof f.onclick)f.onclick();document.body.removeChild(f)}}function G(a){var g="",e,f,k,b,h,l,m=0;a=a.replace(/\x0d\x0a/g,"\n");f="";for(k=0;k<a.length;k++)b=a.charCodeAt(k),128>b?f+=String.fromCharCode(b):(127<b&&2048>b?f+=String.fromCharCode(b>>
+6|192):(f+=String.fromCharCode(b>>12|224),f+=String.fromCharCode(b>>6&63|128)),f+=String.fromCharCode(b&63|128));for(a=f;m<a.length;)e=a.charCodeAt(m++),f=a.charCodeAt(m++),k=a.charCodeAt(m++),b=e>>2,e=(e&3)<<4|f>>4,h=(f&15)<<2|k>>6,l=k&63,isNaN(f)?h=l=64:isNaN(k)&&(l=64),g=g+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(b)+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(e)+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(h)+
+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(l);return g}var a={consoleLog:!1,csvEnclosure:'"',csvSeparator:",",csvUseBOM:!0,displayTableName:!1,escape:!1,excelstyles:[],fileName:"tableExport",htmlContent:!1,ignoreColumn:[],ignoreRow:[],jsonScope:"all",jspdf:{orientation:"p",unit:"pt",format:"a4",margins:{left:20,right:10,top:10,bottom:10},autotable:{styles:{cellPadding:2,rowHeight:12,fontSize:8,fillColor:255,textColor:50,fontStyle:"normal",overflow:"ellipsize",halign:"left",
+valign:"middle"},headerStyles:{fillColor:[52,73,94],textColor:255,fontStyle:"bold",halign:"center"},alternateRowStyles:{fillColor:245},tableExport:{onAfterAutotable:null,onBeforeAutotable:null,onTable:null}}},numbers:{html:{decimalMark:".",thousandsSeparator:","},output:{decimalMark:".",thousandsSeparator:","}},onCellData:null,onCellHtmlData:null,outputMode:"file",pdfmake:{enabled:!1},tbodySelector:"tr",tfootSelector:"tr",theadSelector:"tr",tableName:"myTableName",type:"csv",worksheetName:"xlsWorksheetName"},
+r=this,O=null,t=[],l=[],m=0,x=[],n="",I=[];b.extend(!0,a,q);I=R(r);if("csv"==a.type||"txt"==a.type){q=function(d,g,e,f){l=b(r).find(d).first().find(g);l.each(function(){n="";w(this,e,m,f+l.length,function(d,e,g){var b=n,f="";if(null!=d)if(d=v(d,e,g),e=null===d||""==d?"":d.toString(),d instanceof Date)f=a.csvEnclosure+d.toLocaleString()+a.csvEnclosure;else if(f=S(e,a.csvEnclosure,a.csvEnclosure+a.csvEnclosure),0<=f.indexOf(a.csvSeparator)||/[\r\n ]/g.test(f))f=a.csvEnclosure+f+a.csvEnclosure;n=b+(f+
+a.csvSeparator)});n=b.trim(n).substring(0,n.length-1);0<n.length&&(0<E.length&&(E+="\n"),E+=n);m++});return l.length};var E="",y=0,m=0,y=y+q("thead",a.theadSelector,"th,td",y),y=y+q("tbody",a.tbodySelector,"td",y);a.tfootSelector.length&&q("tfoot",a.tfootSelector,"td",y);E+="\n";!0===a.consoleLog&&console.log(E);if("string"===a.outputMode)return E;if("base64"===a.outputMode)return G(E);try{var z=new Blob([E],{type:"text/"+("csv"==a.type?"csv":"plain")+";charset=utf-8"});saveAs(z,a.fileName+"."+a.type,
+"csv"!=a.type||!1===a.csvUseBOM)}catch(d){C(a.fileName+"."+a.type,"data:text/"+("csv"==a.type?"csv":"plain")+";charset=utf-8,"+("csv"==a.type&&a.csvUseBOM?"\ufeff":""),E)}}else if("sql"==a.type){var m=0,u="INSERT INTO `"+a.tableName+"` (",t=b(r).find("thead").first().find(a.theadSelector);t.each(function(){w(this,"th,td",m,t.length,function(a,g,e){u+="'"+v(a,g,e)+"',"});m++;u=b.trim(u);u=b.trim(u).substring(0,u.length-1)});u+=") VALUES ";l=b(r).find("tbody").first().find(a.tbodySelector);a.tfootSelector.length&&
+l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){n="";w(this,"td",m,t.length+l.length,function(a,g,e){n+="'"+v(a,g,e)+"',"});3<n.length&&(u+="("+n,u=b.trim(u).substring(0,u.length-1),u+="),");m++});u=b.trim(u).substring(0,u.length-1);u+=";";!0===a.consoleLog&&console.log(u);if("string"===a.outputMode)return u;if("base64"===a.outputMode)return G(u);try{z=new Blob([u],{type:"text/plain;charset=utf-8"}),saveAs(z,a.fileName+".sql")}catch(d){C(a.fileName+".sql","data:application/sql;charset=utf-8,",
+u)}}else if("json"==a.type){var J=[],t=b(r).find("thead").first().find(a.theadSelector);t.each(function(){var a=[];w(this,"th,td",m,t.length,function(g,e,b){a.push(v(g,e,b))});J.push(a)});var U=[],l=b(r).find("tbody").first().find(a.tbodySelector);a.tfootSelector.length&&l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){var a={},g=0;w(this,"td",m,t.length+l.length,function(e,b,k){J.length?a[J[J.length-1][g]]=v(e,b,k):a[g]=v(e,b,k);g++});0==b.isEmptyObject(a)&&U.push(a);m++});
+q="";q="head"==a.jsonScope?JSON.stringify(J):"data"==a.jsonScope?JSON.stringify(U):JSON.stringify({header:J,data:U});!0===a.consoleLog&&console.log(q);if("string"===a.outputMode)return q;if("base64"===a.outputMode)return G(q);try{z=new Blob([q],{type:"application/json;charset=utf-8"}),saveAs(z,a.fileName+".json")}catch(d){C(a.fileName+".json","data:application/json;charset=utf-8;base64,",q)}}else if("xml"===a.type){var m=0,A='<?xml version="1.0" encoding="utf-8"?>',A=A+"<tabledata><fields>",t=b(r).find("thead").first().find(a.theadSelector);
+t.each(function(){w(this,"th,td",m,l.length,function(a,b,e){A+="<field>"+v(a,b,e)+"</field>"});m++});var A=A+"</fields><data>",ca=1,l=b(r).find("tbody").first().find(a.tbodySelector);a.tfootSelector.length&&l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){var a=1;n="";w(this,"td",m,t.length+l.length,function(b,e,f){n+="<column-"+a+">"+v(b,e,f)+"</column-"+a+">";a++});0<n.length&&"<column-1></column-1>"!=n&&(A+='<row id="'+ca+'">'+n+"</row>",ca++);m++});A+="</data></tabledata>";
+!0===a.consoleLog&&console.log(A);if("string"===a.outputMode)return A;if("base64"===a.outputMode)return G(A);try{z=new Blob([A],{type:"application/xml;charset=utf-8"}),saveAs(z,a.fileName+".xml")}catch(d){C(a.fileName+".xml","data:application/xml;charset=utf-8;base64,",A)}}else if("excel"==a.type||"xls"==a.type||"word"==a.type||"doc"==a.type){q="excel"==a.type||"xls"==a.type?"excel":"word";var y="excel"==q?"xls":"doc",p='xmlns:x="urn:schemas-microsoft-com:office:'+q+'"',H="";b(r).filter(function(){return"none"!=
+b(this).data("tableexport-display")&&(b(this).is(":visible")||"always"==b(this).data("tableexport-display"))}).each(function(){m=0;I=R(this);H+="<table><thead>";t=b(this).find("thead").first().find(a.theadSelector);t.each(function(){n="";w(this,"th,td",m,t.length,function(d,g,e){if(null!=d){var f="";n+="<th";for(var k in a.excelstyles)if(a.excelstyles.hasOwnProperty(k)){var h=b(d).css(a.excelstyles[k]);""!=h&&"0px none rgb(0, 0, 0)"!=h&&(""==f&&(f='style="'),f+=a.excelstyles[k]+":"+h+";")}""!=f&&
+(n+=" "+f+'"');b(d).is("[colspan]")&&(n+=' colspan="'+b(d).attr("colspan")+'"');b(d).is("[rowspan]")&&(n+=' rowspan="'+b(d).attr("rowspan")+'"');n+=">"+v(d,g,e)+"</th>"}});0<n.length&&(H+="<tr>"+n+"</tr>");m++});H+="</thead><tbody>";l=b(this).find("tbody").first().find(a.tbodySelector);a.tfootSelector.length&&l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){n="";w(this,"td",m,t.length+l.length,function(d,g,e){if(null!=d){var f="",k=b(d).data("tableexport-msonumberformat");
+"undefined"==typeof k&&"function"===typeof a.onMsoNumberFormat&&(k=a.onMsoNumberFormat(d,g,e));"undefined"!=typeof k&&""!=k&&(f="style=\"mso-number-format:'"+k+"'");n+="<td";for(var h in a.excelstyles)a.excelstyles.hasOwnProperty(h)&&(k=b(d).css(a.excelstyles[h]),""!=k&&"0px none rgb(0, 0, 0)"!=k&&(""==f&&(f='style="'),f+=a.excelstyles[h]+":"+k+";"));""!=f&&(n+=" "+f+'"');b(d).is("[colspan]")&&(n+=' colspan="'+b(d).attr("colspan")+'"');b(d).is("[rowspan]")&&(n+=' rowspan="'+b(d).attr("rowspan")+'"');
+n+=">"+v(d,g,e)+"</td>"}});0<n.length&&(H+="<tr>"+n+"</tr>");m++});a.displayTableName&&(H+="<tr><td></td></tr><tr><td></td></tr><tr><td>"+v(b("<p>"+a.tableName+"</p>"))+"</td></tr>");H+="</tbody></table>";!0===a.consoleLog&&console.log(H)});p='<html xmlns:o="urn:schemas-microsoft-com:office:office" '+p+' xmlns="http://www.w3.org/TR/REC-html40">'+('<meta http-equiv="content-type" content="application/vnd.ms-'+q+'; charset=UTF-8">')+"<head>";"excel"===q&&(p+="\x3c!--[if gte mso 9]>",p+="<xml>",p+="<x:ExcelWorkbook>",
+p+="<x:ExcelWorksheets>",p+="<x:ExcelWorksheet>",p+="<x:Name>",p+=a.worksheetName,p+="</x:Name>",p+="<x:WorksheetOptions>",p+="<x:DisplayGridlines/>",p+="</x:WorksheetOptions>",p+="</x:ExcelWorksheet>",p+="</x:ExcelWorksheets>",p+="</x:ExcelWorkbook>",p+="</xml>",p+="<![endif]--\x3e");p+="</head>";p+="<body>";p+=H;p+="</body>";p+="</html>";!0===a.consoleLog&&console.log(p);if("string"===a.outputMode)return p;if("base64"===a.outputMode)return G(p);try{z=new Blob([p],{type:"application/vnd.ms-"+a.type}),
+saveAs(z,a.fileName+"."+y)}catch(d){C(a.fileName+"."+y,"data:application/vnd.ms-"+q+";base64,",p)}}else if("xlsx"==a.type){var P=[],V=[],m=0,l=b(r).find("thead").first().find(a.theadSelector);l.push.apply(l,b(r).find("tbody").first().find(a.tbodySelector));a.tfootSelector.length&&l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){var a=[];w(this,"th,td",m,l.length,function(b,e,f){if("undefined"!==typeof b&&null!=b){var k=b.getAttribute("colspan"),h=b.getAttribute("rowspan");
+b=v(b,e,f);""!==b&&b==+b&&(b=+b);V.forEach(function(b){if(m>=b.s.r&&m<=b.e.r&&a.length>=b.s.c&&a.length<=b.e.c)for(var e=0;e<=b.e.c-b.s.c;++e)a.push(null)});if(h||k)k=k||1,V.push({s:{r:m,c:a.length},e:{r:m+(h||1)-1,c:a.length+k-1}});a.push(""!==b?b:null);if(k)for(h=0;h<k-1;++h)a.push(null)}});P.push(a);m++});console.log(P);q=new T;y=ia(P);y["!merges"]=V;q.SheetNames.push(a.worksheetName);q.Sheets[a.worksheetName]=y;q=XLSX.write(q,{bookType:a.type,bookSST:!1,type:"binary"});try{z=new Blob([ha(q)],
+{type:"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet; charset=UTF-8"}),saveAs(z,a.fileName+"."+a.type)}catch(d){C(a.fileName+"."+a.type,"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet; charset=UTF-8",P)}}else if("png"==a.type)html2canvas(b(r)[0]).then(function(b){b=b.toDataURL();b=b.substring(22);for(var g=atob(b),e=new ArrayBuffer(g.length),f=new Uint8Array(e),k=0;k<g.length;k++)f[k]=g.charCodeAt(k);!0===a.consoleLog&&console.log(g);if("string"===a.outputMode)return g;
+if("base64"===a.outputMode)return G(b);try{var h=new Blob([e],{type:"image/png"});saveAs(h,a.fileName+".png")}catch(l){C(a.fileName+".png","data:image/png,",b)}});else if("pdf"==a.type)if(!0===a.pdfmake.enabled){var W=[],X=[],m=0,t=b(this).find("thead").find(a.theadSelector);t.each(function(){var a=[];w(this,"th,td",m,t.length,function(b,f,k){a.push(v(b,f,k))});a.length&&X.push(a);for(var b=W.length;b<a.length;b++)W.push("*");m++});l=b(this).find("tbody").find(a.tbodySelector);a.tfootSelector.length&&
+l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){var a=[];w(this,"td",m,t.length+l.length,function(b,e,f){a.push(v(b,e,f))});a.length&&X.push(a);m++});pdfMake.createPdf({pageOrientation:"landscape",content:[{table:{headerRows:t.length,widths:W,body:X}}]}).getBuffer(function(b){try{var g=new Blob([b],{type:"application/pdf"});saveAs(g,a.fileName+".pdf")}catch(e){C(a.fileName+".pdf","data:application/pdf;base64,",b)}})}else if(!1===a.jspdf.autotable){var z={dim:{w:N(b(r).first().get(0),
+"width","mm"),h:N(b(r).first().get(0),"height","mm")},pagesplit:!1},da=new jsPDF(a.jspdf.orientation,a.jspdf.unit,a.jspdf.format);da.addHTML(b(r).first(),a.jspdf.margins.left,a.jspdf.margins.top,z,function(){Y(da)})}else{var h=a.jspdf.autotable.tableExport;if("string"===typeof a.jspdf.format&&"bestfit"===a.jspdf.format.toLowerCase()){var K={a0:[2383.94,3370.39],a1:[1683.78,2383.94],a2:[1190.55,1683.78],a3:[841.89,1190.55],a4:[595.28,841.89]},Q="",L="",ea=0;b(r).filter(":visible").each(function(){if("none"!=
+b(this).css("display")){var a=N(b(this).get(0),"width","pt");if(a>ea){a>K.a0[0]&&(Q="a0",L="l");for(var g in K)K.hasOwnProperty(g)&&K[g][1]>a&&(Q=g,L="l",K[g][0]>a&&(L="p"));ea=a}}});a.jspdf.format=""==Q?"a4":Q;a.jspdf.orientation=""==L?"w":L}h.doc=new jsPDF(a.jspdf.orientation,a.jspdf.unit,a.jspdf.format);b(r).filter(function(){return"none"!=b(this).data("tableexport-display")&&(b(this).is(":visible")||"always"==b(this).data("tableexport-display"))}).each(function(){var d,g=0;I=R(this);h.columns=
+[];h.rows=[];h.rowoptions={};if("function"===typeof h.onTable&&!1===h.onTable(b(this),a))return!0;a.jspdf.autotable.tableExport=null;var e=b.extend(!0,{},a.jspdf.autotable);a.jspdf.autotable.tableExport=h;e.margin={};b.extend(!0,e.margin,a.jspdf.margins);e.tableExport=h;"function"!==typeof e.beforePageContent&&(e.beforePageContent=function(a){1==a.pageCount&&a.table.rows.concat(a.table.headerRow).forEach(function(b){0<b.height&&(b.height+=(2-1.15)/2*b.styles.fontSize,a.table.height+=(2-1.15)/2*b.styles.fontSize)})});
+"function"!==typeof e.createdHeaderCell&&(e.createdHeaderCell=function(a,d){a.styles=b.extend({},d.row.styles);if("undefined"!=typeof h.columns[d.column.dataKey]){var f=h.columns[d.column.dataKey];if("undefined"!=typeof f.rect){var g;a.contentWidth=f.rect.width;if("undefined"==typeof h.heightRatio||0==h.heightRatio)g=d.row.raw[d.column.dataKey].rowspan?d.row.raw[d.column.dataKey].rect.height/d.row.raw[d.column.dataKey].rowspan:d.row.raw[d.column.dataKey].rect.height,h.heightRatio=a.styles.rowHeight/
+g;g=d.row.raw[d.column.dataKey].rect.height*h.heightRatio;g>a.styles.rowHeight&&(a.styles.rowHeight=g)}"undefined"!=typeof f.style&&!0!==f.style.hidden&&(a.styles.halign=f.style.align,"inherit"===e.styles.fillColor&&(a.styles.fillColor=f.style.bcolor),"inherit"===e.styles.textColor&&(a.styles.textColor=f.style.color),"inherit"===e.styles.fontStyle&&(a.styles.fontStyle=f.style.fstyle))}});"function"!==typeof e.createdCell&&(e.createdCell=function(a,b){var d=h.rowoptions[b.row.index+":"+b.column.dataKey];
+"undefined"!=typeof d&&"undefined"!=typeof d.style&&!0!==d.style.hidden&&(a.styles.halign=d.style.align,"inherit"===e.styles.fillColor&&(a.styles.fillColor=d.style.bcolor),"inherit"===e.styles.textColor&&(a.styles.textColor=d.style.color),"inherit"===e.styles.fontStyle&&(a.styles.fontStyle=d.style.fstyle))});"function"!==typeof e.drawHeaderCell&&(e.drawHeaderCell=function(a,b){var d=h.columns[b.column.dataKey];return(1!=d.style.hasOwnProperty("hidden")||!0!==d.style.hidden)&&0<=d.rowIndex?Z(a,b,d):
+!1});"function"!==typeof e.drawCell&&(e.drawCell=function(a,b){var d=h.rowoptions[b.row.index+":"+b.column.dataKey];if(Z(a,b,d)){h.doc.rect(a.x,a.y,a.width,a.height,a.styles.fillStyle);if("undefined"!=typeof d&&"undefined"!=typeof d.kids&&0<d.kids.length){var e=a.height/d.rect.height;if(e>h.dh||"undefined"==typeof h.dh)h.dh=e;h.dw=a.width/d.rect.width;aa(a,d.kids,h)}h.doc.autoTableText(a.text,a.textPos.x,a.textPos.y,{halign:a.styles.halign,valign:a.styles.valign})}return!1});h.headerrows=[];t=b(this).find("thead").find(a.theadSelector);
+t.each(function(){d=0;h.headerrows[g]=[];w(this,"th,td",g,t.length,function(a,b,e){var f=ba(a);f.title=v(a,b,e);f.key=d++;f.rowIndex=g;h.headerrows[g].push(f)});g++});0<g&&b.each(h.headerrows[g-1],function(){obj=1<g&&null==this.rect?h.headerrows[g-2][this.key]:this;null!=obj&&h.columns.push(obj)});var f=0;l=b(this).find("tbody").find(a.tbodySelector);a.tfootSelector.length&&l.push.apply(l,b(r).find("tfoot").find(a.tfootSelector));l.each(function(){var a=[];d=0;w(this,"td",g,t.length+l.length,function(e,
+g,l){if("undefined"===typeof h.columns[d]){var m={title:"",key:d,style:{hidden:!0}};h.columns.push(m)}"undefined"!==typeof e&&null!=e?(m=ba(e),m.kids=b(e).children()):(m=b.extend(!0,{},h.rowoptions[f+":"+(d-1)]),m.colspan=-1);h.rowoptions[f+":"+d++]=m;a.push(v(e,g,l))});a.length&&(h.rows.push(a),f++);g++});if("function"===typeof h.onBeforeAutotable)h.onBeforeAutotable(b(this),h.columns,h.rows,e);h.doc.autoTable(h.columns,h.rows,e);if("function"===typeof h.onAfterAutotable)h.onAfterAutotable(b(this),
+e);a.jspdf.autotable.startY=h.doc.autoTableEndPosY()+e.margin.top});Y(h.doc);"undefined"!=typeof h.headerrows&&(h.headerrows.length=0);"undefined"!=typeof h.columns&&(h.columns.length=0);"undefined"!=typeof h.rows&&(h.rows.length=0);delete h.doc;h.doc=null}return this}})})(jQuery);
diff --git a/web/gui/main.css b/web/gui/main.css
new file mode 100644
index 0000000..870a1fd
--- /dev/null
+++ b/web/gui/main.css
@@ -0,0 +1,764 @@
+/* force the vertical window scrollbar */
+html {
+ overflow-y: hidden;
+}
+
+/* prevent body from hiding under the navbar */
+body {
+ padding-top: 50px;
+}
+
+.loadOverlay {
+ position: absolute;
+ top: 0px;
+ left: 0px;
+ width: 100%;
+ height: 100%;
+ z-index: 2000;
+ font-size: 10vh;
+ font-family: sans-serif;
+ padding: 40vh 0 40vh 0;
+ font-weight: bold;
+ text-align: center;
+}
+
+.navbar-highlight {
+ display: none;
+ position: fixed;
+ margin-top: 5px;
+ height: 26px;
+ width: 100%;
+ text-align: center;
+ overflow: hidden;
+ z-index: 30;
+ pointer-events: none !important;
+}
+
+.navbar-highlight-content {
+ position: relative;
+ display: inline-block;
+ margin: 0 auto;
+ height: 26px;
+ min-width: 500px;
+ background-color: rgba(0, 0, 0, 0.7);
+ padding-top: 2px;
+ padding-bottom: 2px;
+ padding-left: 15px;
+ padding-right: 15px;
+ border-radius: 10px;
+ color: lightgrey;
+ pointer-events: auto !important;
+}
+
+.navbar-highlight-bar {
+ cursor: pointer;
+}
+
+.navbar-highlight-button-right {
+ cursor: pointer;
+ padding-left: 10px;
+}
+
+.modal-wide .modal-dialog {
+ width: 80%;
+}
+
+/* fix # anchors scrolling under the navbar
+ https://github.com/twbs/bootstrap/issues/1768#issuecomment-46519033
+ */
+h1 {
+ position: relative;
+ z-index: -1;
+}
+
+h2 {
+ position: relative;
+ z-index: -2;
+}
+
+h1:before, h2:before {
+ display: block;
+ content: " ";
+ margin-top: -70px;
+ height: 70px;
+ visibility: hidden;
+}
+
+.p {
+ display: block;
+ margin-top: 15px;
+}
+
+.option-row,
+.option-control {
+ vertical-align: top;
+ padding: 10px;
+ padding-top: 30px;
+ padding-left: 30px;
+}
+
+.option-info {
+ padding: 10px;
+}
+
+.dashboard-submenu-info {
+ display: block;
+ margin-top: 10px;
+}
+
+.dashboard-context-info {
+ display: block;
+ margin-top: 10px;
+}
+
+#masthead h1 {
+ /*font-size: 30px;*/
+ line-height: 1;
+ padding-top: 30px;
+}
+
+#masthead .well {
+ margin-top: 4%;
+}
+
+/* fix the navbar shifting when a modal is open */
+/* https://github.com/twbs/bootstrap/issues/14040#issuecomment-159891033 */
+body.modal-open {
+ width: 100% !important;
+ padding-right: 0 !important;
+ /* overflow-y: scroll !important; */
+ /* position: fixed !important;*/
+ overflow: visible;
+}
+
+/* make accordion use the whole header bar for expand/collapse */
+.panel-title a {
+ display: block;
+ padding: 10px 15px;
+ margin: -10px -15px;
+}
+
+/*
+ * Side navigation
+ *
+ * Scrollspy and affixed enhanced navigation to highlight sections and secondary
+ * sections of docs content.
+ */
+
+.affix {
+ position: static;
+ top: 70px !important;
+ /*width: 220px;*/
+}
+
+.dashboard-sidebar {
+ max-height: calc(100% - 70px) !important;
+ overflow-y: auto;
+ /*width: 220px !important;*/
+}
+
+/* By default it's not affixed in mobile views, so undo that */
+.dashboard-sidebar.affix {
+ position: static;
+}
+
+/* First level of nav */
+.dashboard-sidenav {
+ margin-top: 20px;
+ margin-bottom: 20px;
+}
+
+/* All levels of nav */
+.dashboard-sidebar .nav > li > a {
+ display: block;
+ padding: 4px 20px;
+ font-size: 13px;
+ font-weight: 500;
+ color: #767676;
+}
+
+.dashboard-sidebar .nav > li > a > .svg-inline--fa {
+ width: 20px;
+ text-align: center;
+}
+
+.dashboard-sidebar .nav > li > a:hover,
+.dashboard-sidebar .nav > li > a:focus {
+ padding-left: 19px;
+ color: #563d7c;
+ text-decoration: none;
+ background-color: transparent;
+ border-left: 1px solid #563d7c;
+}
+
+.dashboard-sidebar .nav > .active > a,
+.dashboard-sidebar .nav > .active:hover > a,
+.dashboard-sidebar .nav > .active:focus > a {
+ padding-left: 18px;
+ font-weight: bold;
+ color: #563d7c;
+ background-color: transparent;
+ border-left: 2px solid #563d7c;
+}
+
+/* Nav: second level (shown on .active) */
+.dashboard-sidebar .nav .nav {
+ display: none; /* Hide by default, but at >768px, show it */
+ padding-bottom: 10px;
+}
+
+.dashboard-sidebar .nav .nav > li > a {
+ padding-top: 1px;
+ padding-bottom: 1px;
+ padding-left: 30px;
+ font-size: 12px;
+ font-weight: normal;
+}
+
+.dashboard-sidebar .nav .nav > li > a:hover,
+.dashboard-sidebar .nav .nav > li > a:focus {
+ padding-left: 29px;
+}
+
+.dashboard-sidebar .nav .nav > .active > a,
+.dashboard-sidebar .nav .nav > .active:hover > a,
+.dashboard-sidebar .nav .nav > .active:focus > a {
+ padding-left: 28px;
+ font-weight: 500;
+}
+
+.dropdown-menu {
+ min-width: 200px;
+}
+
+.dropdown-menu.columns-2 {
+ margin: 0;
+ padding: 0;
+ width: 400px;
+}
+
+.dropdown-menu li a {
+ padding: 5px 15px;
+ font-weight: 300;
+}
+
+.dropdown-menu.multi-column {
+ overflow-x: hidden;
+}
+
+.multi-column-dropdown {
+ list-style: none;
+ padding: 0;
+}
+
+.multi-column-dropdown li a {
+ display: inline-block;
+ clear: both;
+ line-height: 1.428571429;
+ white-space: normal;
+}
+
+.multi-column-dropdown li a:hover {
+ text-decoration: none;
+ color: #f5f5f5;
+ background-color: #262626;
+}
+
+.scrollable-menu {
+ height: auto;
+ max-height: 80vh;
+ overflow-x: hidden;
+}
+
+.scrollable-menu-50 {
+ height: auto;
+ max-height: 50vh;
+ overflow-x: hidden;
+}
+
+/* Back to top (hidden on mobile) */
+.back-to-top,
+.dashboard-theme-toggle {
+ display: none;
+ padding: 4px 10px;
+ margin-top: 10px;
+ margin-left: 10px;
+ font-size: 12px;
+ font-weight: 500;
+ color: #999;
+}
+
+.back-to-top:hover,
+.dashboard-theme-toggle:hover {
+ color: #563d7c;
+ text-decoration: none;
+}
+
+.dashboard-theme-toggle {
+ margin-top: 0;
+}
+
+.container {
+ width: calc(100% - 20px) !important;
+}
+
+.charts-body {
+ display: inline-block;
+ width: 100%;
+}
+
+.sidebar-body {
+ position: absolute;
+ display: none;
+ height: 100vh;
+}
+
+.dashboard-section-container {
+ display: block;
+ width: 100%;
+ page-break-before: auto;
+ page-break-after: auto;
+ page-break-inside: auto;
+}
+
+.dashboard-print-row {
+ display: block;
+ width: 100%;
+ page-break-before: auto;
+ page-break-after: auto;
+ page-break-inside: avoid;
+}
+
+.netdata-chartblock-container {
+ display: inline-block;
+}
+
+/* https://github.com/seiyria/bootstrap-slider/issues/746 */
+.tooltip {
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+
+.action-button {
+ position: relative;
+ display: inline-block;
+ color: gray;
+ cursor: pointer;
+ margin: 0 auto;
+ width: 30px;
+ height: 30px;
+ font-size: 25px;
+}
+
+.ripple {
+ position: relative;
+ /*overflow: hidden;*/
+ transform: translate3d(0, 0, 0)
+}
+
+.ripple:after {
+ content: "";
+ display: block;
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ top: 0;
+ left: 0;
+ pointer-events: none;
+ background-image: radial-gradient(circle, #000 10%, transparent 10.01%);
+ background-repeat: no-repeat;
+ background-position: 50%;
+ transform: scale(18, 18); /* the size of the ripple */
+ opacity: 0;
+ transition: transform .5s, opacity 1s
+}
+
+.ripple:active:after {
+ transform: scale(0, 0);
+ opacity: .2;
+ transition: 0s
+}
+
+/* -------------------------------------------------------------------------- */
+
+#alarms_log_table tbody tr {
+ cursor: pointer;
+}
+
+#my-netdata-dropdown-content {
+ width: 500px;
+}
+
+#my-netdata-dropdown-content a:hover {
+ color: #fff;
+}
+
+#my-netdata-dropdown-content .info-item {
+ height: 32px;
+ line-height: 32px;
+ padding-left: 14px;
+}
+
+#my-netdata-dropdown-content .agent-item {
+ display: flex;
+ align-items: center;
+ min-height: 32px;
+ font-weight: 300;
+}
+
+#my-netdata-dropdown-content .agent-item .__title {
+ cursor: pointer;
+}
+
+#my-netdata-dropdown-content .agent-item:hover {
+ background-color: #262626;
+}
+
+#my-netdata-dropdown-content .agent-item a:hover {
+ text-decoration: none;
+}
+
+#my-netdata-dropdown-content .agent-item > :first-child {
+ width: 40px;
+ text-align: center;
+}
+
+#my-netdata-dropdown-content .agent-item > :last-child {
+ width: 40px;
+ text-align: center;
+}
+
+#my-netdata-dropdown-content .agent-item :nth-child(2) {
+ min-width: 420px;
+ line-height: 32px;
+}
+
+.agent-item--separated {
+ border-top: 1px solid #333;
+}
+
+.agent-item--alternate a {
+ color: #999;
+}
+
+#my-netdata-dropdown-content .agent-alternate-urls.collapsed {
+ display: none;
+}
+
+#my-netdata-dropdown-content hr {
+ display: block;
+ margin-top: 5px;
+ margin-bottom: 0;
+ border-top: 1px solid #333;
+ height: 4px;
+}
+
+/* white theme overrides */
+
+#my-netdata-dropdown-content.theme-white hr {
+ border-top: 1px solid #ddd;
+}
+
+#my-netdata-dropdown-content.theme-white .agent-item:hover {
+ background-color: #e6e6e6;
+}
+
+#my-netdata-dropdown-content.theme-white a {
+ color: #888;
+}
+
+#my-netdata-dropdown-content.theme-white a:hover {
+ color: #000;
+}
+
+#sign-in-iframe {
+ background-color: #fff;
+ border: none;
+}
+
+#cloud-menu {
+}
+
+#cloud-menu.dropdown-menu > li > a {
+ text-align: left;
+}
+
+#my-netdata-menu-filter-input {
+ color: #fff;
+ border: none;
+ background-color: #4b4f55;
+ width: 472px;
+ margin: 5px 14px;
+ margin-right: 0;
+ padding: 2px 5px;
+ outline: none;
+}
+
+#my-netdata-menu-filter-input::placeholder {
+ opacity: 0.7;
+}
+
+#my-netdata-dropdown-content.theme-white #my-netdata-menu-filter-input {
+ background-color: #e7e7e7;
+ color: #555;
+}
+
+.filter-control {
+ position: relative;
+}
+
+.filter-control .filter-control__clear {
+ cursor: pointer;
+ position: absolute;
+ top: 7px;
+ right: 19px;
+}
+
+#myNetdataDropdownParent {
+ float: left;
+}
+
+#hostname {
+ font-size: 18px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ max-width: 220px;
+ }
+
+ #hostnametext {
+ white-space: pre;
+ float: left;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ max-width: 160px;
+}
+
+.sign-in-btn {
+ background-color: #1E2126;
+}
+
+.sign-in-btn.theme-white {
+ background-color: #e6e6e6;
+}
+
+.sign-in-btn.theme-white span {
+ color: #000;
+}
+
+#sign-in-banner {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ z-index: 9999;
+ /* margin: 5px 0; */
+ background-color: #1E2126;
+ height: 44px;
+ line-height: 44px;
+ text-align: center;
+}
+
+#sign-in-banner.theme-white {
+ background-color: #e6e6e6;
+}
+
+#sign-in-banner .__link {
+ color: #17CE8A;
+}
+
+#sign-in-banner .__link:hover {
+ text-decoration: underline;
+}
+
+#sign-in-banner .__close {
+ cursor: pointer;
+ float: right;
+}
+
+#sign-in-banner .__close .fa-times {
+ margin-left: 5px;
+ color: #17CE8A;
+}
+
+.beta {
+ color:#FFCC00;
+}
+
+
+@media (min-width: 1400px) {
+ #hostname {
+ max-width: 600px !important;
+ }
+
+ #hostnametext {
+ max-width: 540px !important;
+ }
+}
+
+@media (min-width: 1360px) {
+ .container {
+ padding-left: 3% !important;
+ }
+
+ #hostname {
+ max-width: 280px !important;
+ }
+
+ #hostnametext {
+ max-width: 220px !important;
+ }
+
+ .charts-body {
+ width: calc(100% - 263px) !important;
+ padding-left: 1% !important;
+ padding-right: 2% !important;
+ }
+
+ .sidebar-body {
+ display: inline-block !important;
+ width: 263px !important;
+ }
+
+ /* Widen the fixed sidebar again */
+ .dashboard-sidebar.affix,
+ .dashboard-sidebar.affix-top,
+ .dashboard-sidebar.affix-bottom {
+ width: 263px !important;
+ }
+}
+
+@media (min-width: 1200px) {
+ #hostname {
+ max-width: 100px;
+ }
+
+ #hostnametext {
+ max-width: 40px;
+ }
+ .container {
+ padding-left: 2% !important;
+ }
+
+
+ .charts-body {
+ width: calc(100% - 233px) !important;
+ padding-left: 1% !important;
+ padding-right: 1% !important;
+ }
+
+ .sidebar-body {
+ display: inline-block !important;
+ width: 233px !important;
+ }
+
+ /* Widen the fixed sidebar again */
+ .dashboard-sidebar.affix,
+ .dashboard-sidebar.affix-top,
+ .dashboard-sidebar.affix-bottom {
+ width: 233px !important;
+ }
+}
+
+@media (min-width: 992px) {
+ .container {
+ padding-left: 0% !important;
+ }
+
+ .charts-body {
+ width: calc(100% - 213px) !important;
+ padding-left: 1% !important;
+ padding-right: 0% !important;
+ }
+
+ .sidebar-body {
+ display: inline-block !important;
+ width: 213px !important;
+ }
+
+ .dashboard-sidebar .nav > .active > ul {
+ display: block;
+ }
+
+ /* Widen the fixed sidebar */
+ .dashboard-sidebar.affix,
+ .dashboard-sidebar.affix-top,
+ .dashboard-sidebar.affix-bottom {
+ width: 213px !important;
+ }
+
+ .dashboard-sidebar.affix {
+ position: fixed; /* Undo the static from mobile first approach */
+ top: 20px;
+ }
+
+ .dashboard-sidebar.affix-bottom {
+ position: absolute; /* Undo the static from mobile first approach */
+ }
+
+ .dashboard-sidebar.affix-bottom .dashboard-sidenav,
+ .dashboard-sidebar.affix .dashboard-sidenav {
+ margin-top: 0;
+ margin-bottom: 0;
+ }
+}
+
+@media (min-width: 860px) {
+ .dashboard-sidebar {
+ padding-left: 20px;
+ }
+
+}
+
+@media (min-width: 768px) {
+ .dashboard-sidebar {
+ padding-left: 20px;
+ }
+
+ .charts-body {
+ padding-left: 0%;
+ padding-right: 0%;
+ }
+
+ .back-to-top,
+ .dashboard-theme-toggle {
+ display: block;
+ }
+}
+
+@media print {
+ body {
+ overflow: visible !important;
+ -webkit-print-color-adjust: exact;
+ page-break-inside: auto;
+ page-break-before: auto;
+ page-break-after: auto;
+ }
+
+ .dashboard-section {
+ page-break-inside: auto;
+ page-break-before: auto;
+ page-break-after: auto;
+ }
+
+ .dashboard-subsection {
+ page-break-before: avoid;
+ page-break-after: auto;
+ page-break-inside: auto;
+ }
+
+ .charts-body {
+ padding-left: 0%;
+ padding-right: 0%;
+ display: block;
+ page-break-inside: auto;
+ page-break-before: auto;
+ page-break-after: auto;
+ }
+
+ .back-to-top,
+ .dashboard-theme-toggle {
+ display: block;
+ }
+}
diff --git a/web/gui/main.js b/web/gui/main.js
new file mode 100644
index 0000000..5bf11e5
--- /dev/null
+++ b/web/gui/main.js
@@ -0,0 +1,5155 @@
+// Main JavaScript file for the Netdata GUI.
+
+// Codacy declarations
+/* global NETDATA */
+
+// netdata snapshot data
+var netdataSnapshotData = null;
+
+// enable alarms checking and notifications
+var netdataShowAlarms = true;
+
+// enable registry updates
+var netdataRegistry = true;
+
+// forward definition only - not used here
+var netdataServer = undefined;
+var netdataServerStatic = undefined;
+var netdataCheckXSS = undefined;
+
+// control the welcome modal and analytics
+var this_is_demo = null;
+
+function escapeUserInputHTML(s) {
+ return s.toString()
+ .replace(/&/g, '&amp;')
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/"/g, '&quot;')
+ .replace(/#/g, '&#35;')
+ .replace(/'/g, '&#39;')
+ .replace(/\(/g, '&#40;')
+ .replace(/\)/g, '&#41;')
+ .replace(/\//g, '&#47;');
+}
+
+function verifyURL(s) {
+ if (typeof (s) === 'string' && (s.startsWith('http://') || s.startsWith('https://'))) {
+ return s
+ .replace(/'/g, '%22')
+ .replace(/"/g, '%27')
+ .replace(/\)/g, '%28')
+ .replace(/\(/g, '%29');
+ }
+
+ console.log('invalid URL detected:');
+ console.log(s);
+ return 'javascript:alert("invalid url");';
+}
+
+// --------------------------------------------------------------------
+// urlOptions
+
+var urlOptions = {
+ hash: '#',
+ theme: null,
+ help: null,
+ mode: 'live', // 'live', 'print'
+ update_always: false,
+ pan_and_zoom: false,
+ server: null,
+ after: 0,
+ before: 0,
+ highlight: false,
+ highlight_after: 0,
+ highlight_before: 0,
+ nowelcome: false,
+ show_alarms: false,
+ chart: null,
+ family: null,
+ alarm: null,
+ alarm_unique_id: 0,
+ alarm_id: 0,
+ alarm_event_id: 0,
+ alarm_when: 0,
+
+ hasProperty: function (property) {
+ // console.log('checking property ' + property + ' of type ' + typeof(this[property]));
+ return typeof this[property] !== 'undefined';
+ },
+
+ genHash: function (forReload) {
+ var hash = urlOptions.hash;
+
+ if (urlOptions.pan_and_zoom === true) {
+ hash += ';after=' + urlOptions.after.toString() +
+ ';before=' + urlOptions.before.toString();
+ }
+
+ if (urlOptions.highlight === true) {
+ hash += ';highlight_after=' + urlOptions.highlight_after.toString() +
+ ';highlight_before=' + urlOptions.highlight_before.toString();
+ }
+
+ if (urlOptions.theme !== null) {
+ hash += ';theme=' + urlOptions.theme.toString();
+ }
+
+ if (urlOptions.help !== null) {
+ hash += ';help=' + urlOptions.help.toString();
+ }
+
+ if (urlOptions.update_always === true) {
+ hash += ';update_always=true';
+ }
+
+ if (forReload === true && urlOptions.server !== null) {
+ hash += ';server=' + urlOptions.server.toString();
+ }
+
+ if (urlOptions.mode !== 'live') {
+ hash += ';mode=' + urlOptions.mode;
+ }
+
+ return hash;
+ },
+
+ parseHash: function () {
+ var variables = document.location.hash.split(';');
+ var len = variables.length;
+ while (len--) {
+ if (len !== 0) {
+ var p = variables[len].split('=');
+ if (urlOptions.hasProperty(p[0]) && typeof p[1] !== 'undefined') {
+ urlOptions[p[0]] = decodeURIComponent(p[1]);
+ }
+ } else {
+ if (variables[len].length > 0) {
+ urlOptions.hash = variables[len];
+ }
+ }
+ }
+
+ var booleans = ['nowelcome', 'show_alarms', 'update_always'];
+ len = booleans.length;
+ while (len--) {
+ if (urlOptions[booleans[len]] === 'true' || urlOptions[booleans[len]] === true || urlOptions[booleans[len]] === '1' || urlOptions[booleans[len]] === 1) {
+ urlOptions[booleans[len]] = true;
+ } else {
+ urlOptions[booleans[len]] = false;
+ }
+ }
+
+ var numeric = ['after', 'before', 'highlight_after', 'highlight_before', 'alarm_when'];
+ len = numeric.length;
+ while (len--) {
+ if (typeof urlOptions[numeric[len]] === 'string') {
+ try {
+ urlOptions[numeric[len]] = parseInt(urlOptions[numeric[len]]);
+ }
+ catch (e) {
+ console.log('failed to parse URL hash parameter ' + numeric[len]);
+ urlOptions[numeric[len]] = 0;
+ }
+ }
+ }
+
+ if (urlOptions.alarm_when) {
+ // if alarm_when exists, create after/before params
+ // -/+ 2 minutes from the alarm, and reload the page
+ const alarmTime = new Date(urlOptions.alarm_when * 1000).valueOf();
+ const timeMarginMs = 120000; // 2 mins
+
+ const after = alarmTime - timeMarginMs;
+ const before = alarmTime + timeMarginMs;
+ const newHash = document.location.hash.replace(
+ /;alarm_when=[0-9]*/i,
+ ";after=" + after + ";before=" + before,
+ );
+ history.replaceState(null, '', newHash);
+ location.reload();
+ }
+
+ if (urlOptions.server !== null && urlOptions.server !== '') {
+ netdataServerStatic = document.location.origin.toString() + document.location.pathname.toString();
+ netdataServer = urlOptions.server;
+ netdataCheckXSS = true;
+ } else {
+ urlOptions.server = null;
+ }
+
+ if (urlOptions.before > 0 && urlOptions.after > 0) {
+ urlOptions.pan_and_zoom = true;
+ urlOptions.nowelcome = true;
+ } else {
+ urlOptions.pan_and_zoom = false;
+ }
+
+ if (urlOptions.highlight_before > 0 && urlOptions.highlight_after > 0) {
+ urlOptions.highlight = true;
+ } else {
+ urlOptions.highlight = false;
+ }
+
+ switch (urlOptions.mode) {
+ case 'print':
+ urlOptions.theme = 'white';
+ urlOptions.welcome = false;
+ urlOptions.help = false;
+ urlOptions.show_alarms = false;
+
+ if (urlOptions.pan_and_zoom === false) {
+ urlOptions.pan_and_zoom = true;
+ urlOptions.before = Date.now();
+ urlOptions.after = urlOptions.before - 600000;
+ }
+
+ netdataShowAlarms = false;
+ netdataRegistry = false;
+ this_is_demo = false;
+ break;
+
+ case 'live':
+ default:
+ urlOptions.mode = 'live';
+ break;
+ }
+
+ // console.log(urlOptions);
+ },
+
+ hashUpdate: function () {
+ history.replaceState(null, '', urlOptions.genHash(true));
+ },
+
+ netdataPanAndZoomCallback: function (status, after, before) {
+ //console.log(1);
+ //console.log(new Error().stack);
+
+ if (netdataSnapshotData === null) {
+ urlOptions.pan_and_zoom = status;
+ urlOptions.after = after;
+ urlOptions.before = before;
+ urlOptions.hashUpdate();
+ }
+ },
+
+ netdataHighlightCallback: function (status, after, before) {
+ //console.log(2);
+ //console.log(new Error().stack);
+
+ if (status === true && (after === null || before === null || after <= 0 || before <= 0 || after >= before)) {
+ status = false;
+ after = 0;
+ before = 0;
+ }
+
+ if (netdataSnapshotData === null) {
+ urlOptions.highlight = status;
+ } else {
+ urlOptions.highlight = false;
+ }
+
+ urlOptions.highlight_after = Math.round(after);
+ urlOptions.highlight_before = Math.round(before);
+ urlOptions.hashUpdate();
+
+ var show_eye = NETDATA.globalChartUnderlay.hasViewport();
+
+ if (status === true && after > 0 && before > 0 && after < before) {
+ var d1 = NETDATA.dateTime.localeDateString(after);
+ var d2 = NETDATA.dateTime.localeDateString(before);
+ if (d1 === d2) {
+ d2 = '';
+ }
+ document.getElementById('navbar-highlight-content').innerHTML =
+ ((show_eye === true) ? '<span class="navbar-highlight-bar highlight-tooltip" onclick="urlOptions.showHighlight();" title="restore the highlighted view" data-toggle="tooltip" data-placement="bottom">' : '<span>').toString()
+ + 'highlighted time-frame'
+ + ' <b>' + d1 + ' <code>' + NETDATA.dateTime.localeTimeString(after) + '</code></b> to '
+ + ' <b>' + d2 + ' <code>' + NETDATA.dateTime.localeTimeString(before) + '</code></b>, '
+ + 'duration <b>' + NETDATA.seconds4human(Math.round((before - after) / 1000)) + '</b>'
+ + '</span>'
+ + '<span class="navbar-highlight-button-right highlight-tooltip" onclick="urlOptions.clearHighlight();" title="clear the highlighted time-frame" data-toggle="tooltip" data-placement="bottom"><i class="fas fa-times"></i></span>';
+
+ $('.navbar-highlight').show();
+
+ $('.highlight-tooltip').tooltip({
+ html: true,
+ delay: { show: 500, hide: 0 },
+ container: 'body'
+ });
+ } else {
+ $('.navbar-highlight').hide();
+ }
+ },
+
+ clearHighlight: function () {
+ NETDATA.globalChartUnderlay.clear();
+
+ if (NETDATA.globalPanAndZoom.isActive() === true) {
+ NETDATA.globalPanAndZoom.clearMaster();
+ }
+ },
+
+ showHighlight: function () {
+ NETDATA.globalChartUnderlay.focus();
+ }
+};
+
+urlOptions.parseHash();
+
+// --------------------------------------------------------------------
+// check options that should be processed before loading netdata.js
+
+var localStorageTested = -1;
+
+function localStorageTest() {
+ if (localStorageTested !== -1) {
+ return localStorageTested;
+ }
+
+ if (typeof Storage !== "undefined" && typeof localStorage === 'object') {
+ var test = 'test';
+ try {
+ localStorage.setItem(test, test);
+ localStorage.removeItem(test);
+ localStorageTested = true;
+ }
+ catch (e) {
+ console.log(e);
+ localStorageTested = false;
+ }
+ } else {
+ localStorageTested = false;
+ }
+
+ return localStorageTested;
+}
+
+function loadLocalStorage(name) {
+ var ret = null;
+
+ try {
+ if (localStorageTest() === true) {
+ ret = localStorage.getItem(name);
+ } else {
+ console.log('localStorage is not available');
+ }
+ }
+ catch (error) {
+ console.log(error);
+ return null;
+ }
+
+ if (typeof ret === 'undefined' || ret === null) {
+ return null;
+ }
+
+ // console.log('loaded: ' + name.toString() + ' = ' + ret.toString());
+
+ return ret;
+}
+
+function saveLocalStorage(name, value) {
+ // console.log('saving: ' + name.toString() + ' = ' + value.toString());
+ try {
+ if (localStorageTest() === true) {
+ localStorage.setItem(name, value.toString());
+ return true;
+ }
+ }
+ catch (error) {
+ console.log(error);
+ }
+
+ return false;
+}
+
+function getTheme(def) {
+ if (urlOptions.mode === 'print') {
+ return 'white';
+ }
+
+ var ret = loadLocalStorage('netdataTheme');
+ if (typeof ret === 'undefined' || ret === null || ret === 'undefined') {
+ return def;
+ } else {
+ return ret;
+ }
+}
+
+function setTheme(theme) {
+ if (urlOptions.mode === 'print') {
+ return false;
+ }
+
+ if (theme === netdataTheme) {
+ return false;
+ }
+ return saveLocalStorage('netdataTheme', theme);
+}
+
+var netdataTheme = getTheme('slate');
+var netdataShowHelp = true;
+
+if (urlOptions.theme !== null) {
+ setTheme(urlOptions.theme);
+ netdataTheme = urlOptions.theme;
+} else {
+ urlOptions.theme = netdataTheme;
+}
+
+if (urlOptions.help !== null) {
+ saveLocalStorage('options.show_help', urlOptions.help);
+ netdataShowHelp = urlOptions.help;
+} else {
+ urlOptions.help = loadLocalStorage('options.show_help');
+}
+
+// --------------------------------------------------------------------
+// natural sorting
+// http://www.davekoelle.com/files/alphanum.js - LGPL
+
+function naturalSortChunkify(t) {
+ var tz = [];
+ var x = 0, y = -1, n = 0, i, j;
+
+ while (i = (j = t.charAt(x++)).charCodeAt(0)) {
+ var m = (i >= 48 && i <= 57);
+ if (m !== n) {
+ tz[++y] = "";
+ n = m;
+ }
+ tz[y] += j;
+ }
+
+ return tz;
+}
+
+function naturalSortCompare(a, b) {
+ var aa = naturalSortChunkify(a.toLowerCase());
+ var bb = naturalSortChunkify(b.toLowerCase());
+
+ for (var x = 0; aa[x] && bb[x]; x++) {
+ if (aa[x] !== bb[x]) {
+ var c = Number(aa[x]), d = Number(bb[x]);
+ if (c.toString() === aa[x] && d.toString() === bb[x]) {
+ return c - d;
+ } else {
+ return (aa[x] > bb[x]) ? 1 : -1;
+ }
+ }
+ }
+
+ return aa.length - bb.length;
+}
+
+// --------------------------------------------------------------------
+// saving files to client
+
+function saveTextToClient(data, filename) {
+ var blob = new Blob([data], {
+ type: 'application/octet-stream'
+ });
+
+ var url = URL.createObjectURL(blob);
+ var link = document.createElement('a');
+ link.setAttribute('href', url);
+ link.setAttribute('download', filename);
+
+ var el = document.getElementById('hiddenDownloadLinks');
+ el.innerHTML = '';
+ el.appendChild(link);
+
+ setTimeout(function () {
+ el.removeChild(link);
+ URL.revokeObjectURL(url);
+ }, 60);
+
+ link.click();
+}
+
+function saveObjectToClient(data, filename) {
+ saveTextToClient(JSON.stringify(data), filename);
+}
+
+// -----------------------------------------------------------------------------
+// registry call back to render my-netdata menu
+
+function toggleExpandIcon(svgEl) {
+ if (svgEl.getAttribute('data-icon') === 'caret-down') {
+ svgEl.setAttribute('data-icon', 'caret-up');
+ } else {
+ svgEl.setAttribute('data-icon', 'caret-down');
+ }
+}
+
+function toggleAgentItem(e, guid) {
+ e.stopPropagation();
+ e.preventDefault();
+
+ toggleExpandIcon(e.currentTarget.children[0]);
+
+ const el = document.querySelector(`.agent-alternate-urls.agent-${guid}`);
+ if (el) {
+ el.classList.toggle('collapsed');
+ }
+}
+
+// When you stream metrics from netdata to netdata, the recieving netdata now
+// has multiple host databases. It's own, and multiple mirrored. Mirrored databases
+// can be accessed with <http://localhost:19999/host/NAME/>
+const OLD_DASHBOARD_SUFFIX = "old"
+let isOldSuffix = true
+try {
+ const currentScriptMainJs = document.currentScript;
+ const mainJsSrc = currentScriptMainJs.getAttribute("src")
+ isOldSuffix = mainJsSrc.startsWith("../main.js")
+} catch {
+ console.warn("current script not detecting, assuming the dashboard is running with /old suffix")
+}
+
+function transformWithOldSuffix(url) {
+ return isOldSuffix ? `../${url}` : url
+}
+
+function renderStreamedHosts(options) {
+ let html = `<div class="info-item">Databases streamed to this agent</div>`;
+
+ var base = document.location.origin.toString() +
+ document.location.pathname.toString()
+ .replace(isOldSuffix ? `/${OLD_DASHBOARD_SUFFIX}` : "", "");
+ if (base.endsWith("/host/" + options.hostname + "/")) {
+ base = base.substring(0, base.length - ("/host/" + options.hostname + "/").toString().length);
+ }
+
+ if (base.endsWith("/")) {
+ base = base.substring(0, base.length - 1);
+ }
+
+ var master = options.hosts[0].hostname;
+ // We sort a clone of options.hosts, to keep the master as the first element
+ // for future calls.
+ var sorted = options.hosts.slice(0).sort(function (a, b) {
+ if (a.hostname === master) {
+ return -1;
+ }
+ return naturalSortCompare(a.hostname, b.hostname);
+ });
+
+ let displayedDatabases = false;
+
+ for (var s of sorted) {
+ let url, icon;
+ const hostname = s.hostname;
+
+ if (myNetdataMenuFilterValue !== "") {
+ if (!hostname.includes(myNetdataMenuFilterValue)) {
+ continue;
+ }
+ }
+
+ displayedDatabases = true;
+
+ if (hostname === master) {
+ url = isOldSuffix ? `${base}/${OLD_DASHBOARD_SUFFIX}/` : `${base}/`;
+ icon = 'home';
+ } else {
+ url = isOldSuffix ? `${base}/host/${hostname}/${OLD_DASHBOARD_SUFFIX}/` : `${base}/host/${hostname}/`;
+ icon = 'window-restore';
+ }
+
+ html += (
+ `<div class="agent-item">
+ <a class="registry_link" href="${url}#" onClick="return gotoHostedModalHandler('${url}');">
+ <i class="fas fa-${icon}" style="color: #999;"></i>
+ </a>
+ <span class="__title" onClick="return gotoHostedModalHandler('${url}');">
+ <a class="registry_link" href="${url}#">${hostname}</a>
+ </span>
+ <div></div>
+ </div>`
+ )
+ }
+
+ if (!displayedDatabases) {
+ html += (
+ `<div class="info-item">
+ <i class="fas fa-filter"></i>
+ <span style="margin-left: 8px">no databases match the filter criteria.<span>
+ </div>`
+ )
+ }
+
+ return html;
+}
+
+function renderMachines(machinesArray) {
+ let html = `<div class="info-item">My nodes</div>`;
+
+ if (machinesArray === null) {
+ let ret = loadLocalStorage("registryCallback");
+ if (ret) {
+ machinesArray = JSON.parse(ret);
+ console.log("failed to contact the registry - loaded registry data from browser local storage");
+ }
+ }
+
+ let found = false;
+ let displayedAgents = false;
+
+ const maskedURL = NETDATA.registry.MASKED_DATA;
+
+ if (machinesArray) {
+ saveLocalStorage("registryCallback", JSON.stringify(machinesArray));
+
+ var machines = machinesArray.sort(function (a, b) {
+ return naturalSortCompare(a.name, b.name);
+ });
+
+ for (var machine of machines) {
+ found = true;
+
+ if (myNetdataMenuFilterValue !== "") {
+ if (!machine.name.includes(myNetdataMenuFilterValue)) {
+ continue;
+ }
+ }
+
+ displayedAgents = true;
+
+ const alternateUrlItems = (
+ `<div class="agent-alternate-urls agent-${machine.guid} collapsed">
+ ${machine.alternate_urls.reduce((str, url) => {
+ if (url === maskedURL) {
+ return str
+ }
+
+ return str + (
+ `<div class="agent-item agent-item--alternate">
+ <div></div>
+ <a href="${url}" title="${url}">${truncateString(url, 64)}</a>
+ <a href="#" onclick="deleteRegistryModalHandler('${machine.guid}', '${machine.name}', '${url}'); return false;">
+ <i class="fas fa-trash" style="color: #777;"></i>
+ </a>
+ </div>`
+ )
+ },
+ ''
+ )}
+ </div>`
+ )
+
+ html += (
+ `<div class="agent-item agent-${machine.guid}">
+ <i class="fas fa-chart-bar" color: #fff"></i>
+ <span class="__title" onClick="return gotoServerModalHandler('${machine.guid}');">
+ <a class="registry_link" href="${machine.url}#">${machine.name}</a>
+ </span>
+ <a href="#" onClick="toggleAgentItem(event, '${machine.guid}');">
+ <i class="fas fa-caret-down" style="color: #999"></i>
+ </a>
+ </div>
+ ${alternateUrlItems}`
+ )
+ }
+
+ if (found && (!displayedAgents)) {
+ html += (
+ `<div class="info-item">
+ <i class="fas fa-filter"></i>
+ <span style="margin-left: 8px">zero nodes are matching the filter value.<span>
+ </div>`
+ )
+ }
+ }
+
+ if (!found) {
+ if (machines) {
+ html += (
+ `<div class="info-item">
+ <a href="https://github.com/netdata/netdata/tree/master/registry#netdata-registry" target="_blank">Your nodes list is empty</a>
+ </div>`
+ )
+ } else {
+ html += (
+ `<div class="info-item">
+ <a href="https://github.com/netdata/netdata/tree/master/registry#netdata-registry" target="_blank">Failed to contact the registry</a>
+ </div>`
+ )
+ }
+
+ html += `<hr />`;
+ html += `<div class="info-item">Demo netdata nodes</div>`;
+
+ const demoServers = [
+ { url: "//london.netdata.rocks/default.html", title: "UK - London (DigitalOcean.com)" },
+ { url: "//newyork.netdata.rocks/default.html", title: "US - New York (DigitalOcean.com)" },
+ { url: "//sanfrancisco.netdata.rocks/default.html", title: "US - San Francisco (DigitalOcean.com)" },
+ { url: "//atlanta.netdata.rocks/default.html", title: "US - Atlanta (CDN77.com)" },
+ { url: "//frankfurt.netdata.rocks/default.html", title: "Germany - Frankfurt (DigitalOcean.com)" },
+ { url: "//toronto.netdata.rocks/default.html", title: "Canada - Toronto (DigitalOcean.com)" },
+ { url: "//singapore.netdata.rocks/default.html", title: "Japan - Singapore (DigitalOcean.com)" },
+ { url: "//bangalore.netdata.rocks/default.html", title: "India - Bangalore (DigitalOcean.com)" },
+
+ ]
+
+ for (var server of demoServers) {
+ html += (
+ `<div class="agent-item">
+ <i class="fas fa-chart-bar" style="color: #fff"></i>
+ <a href="${server.url}">${server.title}</a>
+ <div></div>
+ </div>
+ `
+ );
+ }
+ }
+
+ return html;
+}
+
+function setMyNetdataMenu(html) {
+ const el = document.getElementById('my-netdata-dropdown-content')
+ el.innerHTML = html;
+}
+
+function clearMyNetdataMenu() {
+ setMyNetdataMenu(`<div class="agent-item" style="white-space: nowrap">
+ <i class="fas fa-hourglass-half"></i>
+ Loading, please wait...
+ <div></div>
+ </div>`);
+}
+
+function errorMyNetdataMenu() {
+ setMyNetdataMenu(`<div class="agent-item" style="padding: 0 8px">
+ <i class="fas fa-exclamation-triangle" style="color: red"></i>
+ Cannot load known Netdata agents from Netdata Cloud! Please make sure you have the latest version of Netdata.
+ </div>`);
+}
+
+function restrictMyNetdataMenu() {
+ setMyNetdataMenu(`<div class="info-item" style="white-space: nowrap">
+ <span>Please <a href="#" onclick="signInDidClick(event); return false">sign in to netdata.cloud</a> to view your nodes!</span>
+ <div></div>
+ </div>`);
+}
+
+function openAuthenticatedUrl(url) {
+ if (isSignedIn()) {
+ window.open(url);
+ } else {
+ window.open(`${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}&redirect_uri=${encodeURIComponent(window.location.origin + "/" + url)}`);
+ }
+}
+
+function renderMyNetdataMenu(machinesArray) {
+ const el = document.getElementById('my-netdata-dropdown-content');
+ el.classList.add(`theme-${netdataTheme}`);
+
+ if (machinesArray == registryAgents) {
+ console.log("Rendering my-netdata menu from registry");
+ } else {
+ console.log("Rendering my-netdata menu from netdata.cloud", machinesArray);
+ }
+
+ let html = '';
+
+ if (!isSignedIn()) {
+ if (!NETDATA.registry.isRegistryEnabled()) {
+ html += (
+ `<div class="info-item" style="white-space: nowrap">
+ <span>Please <a href="#" onclick="signInDidClick(event); return false">sign in to netdata.cloud</a> to view your nodes!</span>
+ <div></div>
+ </div>
+ <hr />`
+ );
+ }
+ }
+
+ if (isSignedIn()) {
+ html += (
+ `<div class="filter-control">
+ <input
+ id="my-netdata-menu-filter-input"
+ type="text"
+ placeholder="filter nodes..."
+ autofocus
+ autocomplete="off"
+ value="${myNetdataMenuFilterValue}"
+ onkeydown="myNetdataFilterDidChange(event)"
+ />
+ <span class="filter-control__clear" onclick="myNetdataFilterClearDidClick(event)"><i class="fas fa-times"></i><span>
+ </div>
+ <hr />`
+ );
+ }
+
+ // options.hosts = [
+ // {
+ // hostname: "streamed1",
+ // },
+ // {
+ // hostname: "streamed2",
+ // },
+ // ]
+
+ if (options.hosts.length > 1) {
+ html += `<div id="my-netdata-menu-streamed">${renderStreamedHosts(options)}</div><hr />`;
+ }
+
+ if (isSignedIn() || NETDATA.registry.isRegistryEnabled()) {
+ html += `<div id="my-netdata-menu-machines">${renderMachines(machinesArray)}</div><hr />`;
+ }
+
+ if (!isSignedIn()) {
+ html += (
+ `<div class="agent-item">
+ <i class="fas fa-cog""></i>
+ <a href="#" onclick="switchRegistryModalHandler(); return false;">Switch Identity</a>
+ <div></div>
+ </div>
+ <div class="agent-item">
+ <i class="fas fa-question-circle""></i>
+ <a href="https://github.com/netdata/netdata/tree/master/registry#netdata-registry" target="_blank">What is this?</a>
+ <div></div>
+ </div>`
+ )
+ } else {
+ html += (
+ `<div class="agent-item">
+ <i class="fas fa-tv"></i>
+ <a onclick="openAuthenticatedUrl('console.html');" target="_blank">Nodes<sup class="beta"> beta</sup></a>
+ <div></div>
+ </div>
+ <div class="agent-item">
+ <i class="fas fa-sync"></i>
+ <a href="#" onclick="showSyncModal(); return false">Synchronize with netdata.cloud</a>
+ <div></div>
+ </div>
+ <div class="agent-item">
+ <i class="fas fa-question-circle""></i>
+ <a href="https://netdata.cloud/about" target="_blank">What is this?</a>
+ <div></div>
+ </div>`
+ )
+ }
+
+ el.innerHTML = html;
+
+ gotoServerInit();
+}
+
+function isdemo() {
+ if (this_is_demo !== null) {
+ return this_is_demo;
+ }
+ this_is_demo = false;
+
+ try {
+ if (typeof document.location.hostname === 'string') {
+ if (document.location.hostname.endsWith('.my-netdata.io') ||
+ document.location.hostname.endsWith('.mynetdata.io') ||
+ document.location.hostname.endsWith('.netdata.rocks') ||
+ document.location.hostname.endsWith('.netdata.ai') ||
+ document.location.hostname.endsWith('.netdata.live') ||
+ document.location.hostname.endsWith('.firehol.org') ||
+ document.location.hostname.endsWith('.netdata.online') ||
+ document.location.hostname.endsWith('.netdata.cloud')) {
+ this_is_demo = true;
+ }
+ }
+ }
+ catch (error) {
+ }
+ return this_is_demo;
+}
+
+function netdataURL(url, forReload) {
+ if (typeof url === 'undefined')
+ // url = document.location.toString();
+ {
+ url = '';
+ }
+
+ if (url.indexOf('#') !== -1) {
+ url = url.substring(0, url.indexOf('#'));
+ }
+
+ var hash = urlOptions.genHash(forReload);
+
+ // console.log('netdataURL: ' + url + hash);
+
+ return url + hash;
+}
+
+function netdataReload(url) {
+ document.location = verifyURL(netdataURL(url, true));
+
+ // since we play with hash
+ // this is needed to reload the page
+ location.reload();
+}
+
+function gotoHostedModalHandler(url) {
+ document.location = verifyURL(url + urlOptions.genHash());
+ return false;
+}
+
+var gotoServerValidateRemaining = 0;
+var gotoServerMiddleClick = false;
+var gotoServerStop = false;
+
+function gotoServerValidateUrl(id, guid, url) {
+ var penalty = 0;
+ var error = 'failed';
+
+ if (document.location.toString().startsWith('http://') && url.toString().startsWith('https://'))
+ // we penalize https only if the current url is http
+ // to allow the user walk through all its servers.
+ {
+ penalty = 500;
+ } else if (document.location.toString().startsWith('https://') && url.toString().startsWith('http://')) {
+ error = 'can\'t check';
+ }
+
+ var finalURL = netdataURL(url);
+
+ setTimeout(function () {
+ document.getElementById('gotoServerList').innerHTML += '<tr><td style="padding-left: 20px;"><a href="' + verifyURL(finalURL) + '" target="_blank">' + escapeUserInputHTML(url) + '</a></td><td style="padding-left: 30px;"><code id="' + guid + '-' + id + '-status">checking...</code></td></tr>';
+
+ NETDATA.registry.hello(url, function (data) {
+ if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid === guid) {
+ // console.log('OK ' + id + ' URL: ' + url);
+ document.getElementById(guid + '-' + id + '-status').innerHTML = "OK";
+
+ if (!gotoServerStop) {
+ gotoServerStop = true;
+
+ if (gotoServerMiddleClick) {
+ window.open(verifyURL(finalURL), '_blank');
+ gotoServerMiddleClick = false;
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Opening new window to ' + NETDATA.registry.machines[guid].name + '<br/><a href="' + verifyURL(finalURL) + '">' + escapeUserInputHTML(url) + '</a></b><br/>(check your pop-up blocker if it fails)';
+ } else {
+ document.getElementById('gotoServerResponse').innerHTML += 'found it! It is at:<br/><small>' + escapeUserInputHTML(url) + '</small>';
+ document.location = verifyURL(finalURL);
+ $('#gotoServerModal').modal('hide');
+ }
+ }
+ } else {
+ if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid !== guid) {
+ error = 'wrong machine';
+ }
+
+ document.getElementById(guid + '-' + id + '-status').innerHTML = error;
+ gotoServerValidateRemaining--;
+ if (gotoServerValidateRemaining <= 0) {
+ gotoServerMiddleClick = false;
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Sorry! I cannot find any operational URL for this server</b>';
+ }
+ }
+ });
+ }, (id * 50) + penalty);
+}
+
+function gotoServerModalHandler(guid) {
+ // console.log('goto server: ' + guid);
+
+ gotoServerStop = false;
+ var checked = {};
+ var len = NETDATA.registry.machines[guid].alternate_urls.length;
+ var count = 0;
+
+ document.getElementById('gotoServerResponse').innerHTML = '';
+ document.getElementById('gotoServerList').innerHTML = '';
+ document.getElementById('gotoServerName').innerHTML = NETDATA.registry.machines[guid].name;
+ $('#gotoServerModal').modal('show');
+
+ gotoServerValidateRemaining = len;
+ while (len--) {
+ var url = NETDATA.registry.machines[guid].alternate_urls[len];
+ checked[url] = true;
+ gotoServerValidateUrl(count++, guid, url);
+ }
+
+ if (!isSignedIn()) {
+ // When the registry is enabled, if the user's known URLs are not working
+ // we consult the registry to get additional URLs.
+ setTimeout(function () {
+ if (gotoServerStop === false) {
+ document.getElementById('gotoServerResponse').innerHTML = '<b>Added all the known URLs for this machine.</b>';
+ NETDATA.registry.search(guid, function (data) {
+ // console.log(data);
+ len = data.urls.length;
+ while (len--) {
+ var url = data.urls[len][1];
+ // console.log(url);
+ if (typeof checked[url] === 'undefined') {
+ gotoServerValidateRemaining++;
+ checked[url] = true;
+ gotoServerValidateUrl(count++, guid, url);
+ }
+ }
+ });
+ }
+ }, 2000);
+ }
+
+ return false;
+}
+
+function gotoServerInit() {
+ $(".registry_link").on('click', function (e) {
+ if (e.which === 2) {
+ e.preventDefault();
+ gotoServerMiddleClick = true;
+ } else {
+ gotoServerMiddleClick = false;
+ }
+
+ return true;
+ });
+}
+
+function switchRegistryModalHandler() {
+ document.getElementById('switchRegistryPersonGUID').value = NETDATA.registry.person_guid;
+ document.getElementById('switchRegistryURL').innerHTML = NETDATA.registry.server;
+ document.getElementById('switchRegistryResponse').innerHTML = '';
+ $('#switchRegistryModal').modal('show');
+}
+
+function notifyForSwitchRegistry() {
+ var n = document.getElementById('switchRegistryPersonGUID').value;
+
+ if (n !== '' && n.length === 36) {
+ NETDATA.registry.switch(n, function (result) {
+ if (result !== null) {
+ $('#switchRegistryModal').modal('hide');
+ NETDATA.registry.init();
+ } else {
+ document.getElementById('switchRegistryResponse').innerHTML = "<b>Sorry! The registry rejected your request.</b>";
+ }
+ });
+ } else {
+ document.getElementById('switchRegistryResponse').innerHTML = "<b>The ID you have entered is not a GUID.</b>";
+ }
+}
+
+var deleteRegistryGuid = null;
+var deleteRegistryUrl = null;
+
+function deleteRegistryModalHandler(guid, name, url) {
+ // void (guid);
+
+ deleteRegistryGuid = guid;
+ deleteRegistryUrl = url;
+
+ document.getElementById('deleteRegistryServerName').innerHTML = name;
+ document.getElementById('deleteRegistryServerName2').innerHTML = name;
+ document.getElementById('deleteRegistryServerURL').innerHTML = url;
+ document.getElementById('deleteRegistryResponse').innerHTML = '';
+
+ $('#deleteRegistryModal').modal('show');
+}
+
+function notifyForDeleteRegistry() {
+ const responseEl = document.getElementById('deleteRegistryResponse');
+
+ if (deleteRegistryUrl) {
+ if (isSignedIn()) {
+ deleteCloudAgentURL(deleteRegistryGuid, deleteRegistryUrl)
+ .then((count) => {
+ if (!count) {
+ responseEl.innerHTML = "<b>Sorry, this command was rejected by netdata.cloud!</b>";
+ return;
+ }
+ NETDATA.registry.delete(deleteRegistryUrl, function (result) {
+ if (result === null) {
+ console.log("Received error from registry", result);
+ }
+
+ deleteRegistryUrl = null;
+ $('#deleteRegistryModal').modal('hide');
+ NETDATA.registry.init();
+ });
+ });
+ } else {
+ NETDATA.registry.delete(deleteRegistryUrl, function (result) {
+ if (result !== null) {
+ deleteRegistryUrl = null;
+ $('#deleteRegistryModal').modal('hide');
+ NETDATA.registry.init();
+ } else {
+ responseEl.innerHTML = "<b>Sorry, this command was rejected by the registry server!</b>";
+ }
+ });
+ }
+ }
+}
+
+var options = {
+ menus: {},
+ submenu_names: {},
+ data: null,
+ hostname: 'netdata_server', // will be overwritten by the netdata server
+ version: 'unknown',
+ release_channel: 'unknown',
+ hosts: [],
+
+ duration: 0, // the default duration of the charts
+ update_every: 1,
+
+ chartsPerRow: 0,
+ // chartsMinWidth: 1450,
+ chartsHeight: 180,
+};
+
+function chartsPerRow(total) {
+ void (total);
+
+ if (options.chartsPerRow === 0) {
+ return 1;
+ //var width = Math.floor(total / options.chartsMinWidth);
+ //if(width === 0) width = 1;
+ //return width;
+ } else {
+ return options.chartsPerRow;
+ }
+}
+
+function prioritySort(a, b) {
+ if (a.priority < b.priority) {
+ return -1;
+ }
+ if (a.priority > b.priority) {
+ return 1;
+ }
+ return naturalSortCompare(a.name, b.name);
+}
+
+function sortObjectByPriority(object) {
+ var idx = {};
+ var sorted = [];
+
+ for (var i in object) {
+ if (!object.hasOwnProperty(i)) {
+ continue;
+ }
+
+ if (typeof idx[i] === 'undefined') {
+ idx[i] = object[i];
+ sorted.push(i);
+ }
+ }
+
+ sorted.sort(function (a, b) {
+ if (idx[a].priority < idx[b].priority) {
+ return -1;
+ }
+ if (idx[a].priority > idx[b].priority) {
+ return 1;
+ }
+ return naturalSortCompare(a, b);
+ });
+
+ return sorted;
+}
+
+// ----------------------------------------------------------------------------
+// scroll to a section, without changing the browser history
+
+function scrollToId(hash) {
+ if (hash && hash !== '' && document.getElementById(hash) !== null) {
+ var offset = $('#' + hash).offset();
+ if (typeof offset !== 'undefined') {
+ //console.log('scrolling to ' + hash + ' at ' + offset.top.toString());
+ $('html, body').animate({ scrollTop: offset.top - 30 }, 0);
+ }
+ }
+
+ // we must return false to prevent the default action
+ return false;
+}
+
+// ----------------------------------------------------------------------------
+
+// user editable information
+var customDashboard = {
+ menu: {},
+ submenu: {},
+ context: {}
+};
+
+// netdata standard information
+var netdataDashboard = {
+ sparklines_registry: {},
+ os: 'unknown',
+
+ menu: {},
+ submenu: {},
+ context: {},
+
+ // generate a sparkline
+ // used in the documentation
+ sparkline: function (prefix, chart, dimension, units, suffix) {
+ if (options.data === null || typeof options.data.charts === 'undefined') {
+ return '';
+ }
+
+ if (typeof options.data.charts[chart] === 'undefined') {
+ return '';
+ }
+
+ if (typeof options.data.charts[chart].dimensions === 'undefined') {
+ return '';
+ }
+
+ if (typeof options.data.charts[chart].dimensions[dimension] === 'undefined') {
+ return '';
+ }
+
+ var key = chart + '.' + dimension;
+
+ if (typeof units === 'undefined') {
+ units = '';
+ }
+
+ if (typeof this.sparklines_registry[key] === 'undefined') {
+ this.sparklines_registry[key] = { count: 1 };
+ } else {
+ this.sparklines_registry[key].count++;
+ }
+
+ key = key + '.' + this.sparklines_registry[key].count;
+
+ return prefix + '<div class="netdata-container" data-netdata="' + chart + '" data-after="-120" data-width="25%" data-height="15px" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dimensions="' + dimension + '" data-show-value-of-' + dimension + '-at="' + key + '"></div> (<span id="' + key + '" style="display: inline-block; min-width: 50px; text-align: right;">X</span>' + units + ')' + suffix;
+ },
+
+ gaugeChart: function (title, width, dimensions, colors) {
+ if (typeof colors === 'undefined') {
+ colors = '';
+ }
+
+ if (typeof dimensions === 'undefined') {
+ dimensions = '';
+ }
+
+ return '<div class="netdata-container" data-netdata="CHART_UNIQUE_ID"'
+ + ' data-dimensions="' + dimensions + '"'
+ + ' data-chart-library="gauge"'
+ + ' data-gauge-adjust="width"'
+ + ' data-title="' + title + '"'
+ + ' data-width="' + width + '"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + colors + '"'
+ + ' role="application"></div>';
+ },
+
+ anyAttribute: function (obj, attr, key, def) {
+ if (typeof (obj[key]) !== 'undefined') {
+ var x = obj[key][attr];
+
+ if (typeof (x) === 'undefined') {
+ return def;
+ }
+
+ if (typeof (x) === 'function') {
+ return x(netdataDashboard.os);
+ }
+
+ return x;
+ }
+
+ return def;
+ },
+
+ menuTitle: function (chart) {
+ if (typeof chart.menu_pattern !== 'undefined') {
+ return (this.anyAttribute(this.menu, 'title', chart.menu_pattern, chart.menu_pattern).toString()
+ + '&nbsp;' + chart.type.slice(-(chart.type.length - chart.menu_pattern.length - 1)).toString()).replace(/_/g, ' ');
+ }
+
+ return (this.anyAttribute(this.menu, 'title', chart.menu, chart.menu)).toString().replace(/_/g, ' ');
+ },
+
+ menuIcon: function (chart) {
+ if (typeof chart.menu_pattern !== 'undefined') {
+ return this.anyAttribute(this.menu, 'icon', chart.menu_pattern, '<i class="fas fa-puzzle-piece"></i>').toString();
+ }
+
+ return this.anyAttribute(this.menu, 'icon', chart.menu, '<i class="fas fa-puzzle-piece"></i>');
+ },
+
+ menuInfo: function (chart) {
+ if (typeof chart.menu_pattern !== 'undefined') {
+ return this.anyAttribute(this.menu, 'info', chart.menu_pattern, null);
+ }
+
+ return this.anyAttribute(this.menu, 'info', chart.menu, null);
+ },
+
+ menuHeight: function (chart) {
+ if (typeof chart.menu_pattern !== 'undefined') {
+ return this.anyAttribute(this.menu, 'height', chart.menu_pattern, 1.0);
+ }
+
+ return this.anyAttribute(this.menu, 'height', chart.menu, 1.0);
+ },
+
+ submenuTitle: function (menu, submenu) {
+ var key = menu + '.' + submenu;
+ // console.log(key);
+ var title = this.anyAttribute(this.submenu, 'title', key, submenu).toString().replace(/_/g, ' ');
+ if (title.length > 28) {
+ var a = title.substring(0, 13);
+ var b = title.substring(title.length - 12, title.length);
+ return a + '...' + b;
+ }
+ return title;
+ },
+
+ submenuInfo: function (menu, submenu) {
+ var key = menu + '.' + submenu;
+ return this.anyAttribute(this.submenu, 'info', key, null);
+ },
+
+ submenuHeight: function (menu, submenu, relative) {
+ var key = menu + '.' + submenu;
+ return this.anyAttribute(this.submenu, 'height', key, 1.0) * relative;
+ },
+
+ contextInfo: function (id) {
+ var x = this.anyAttribute(this.context, 'info', id, null);
+
+ if (x !== null) {
+ return '<div class="shorten dashboard-context-info netdata-chart-alignment" role="document">' + x + '</div>';
+ } else {
+ return '';
+ }
+ },
+
+ contextValueRange: function (id) {
+ if (typeof this.context[id] !== 'undefined' && typeof this.context[id].valueRange !== 'undefined') {
+ return this.context[id].valueRange;
+ } else {
+ return '[null, null]';
+ }
+ },
+
+ contextHeight: function (id, def) {
+ if (typeof this.context[id] !== 'undefined' && typeof this.context[id].height !== 'undefined') {
+ return def * this.context[id].height;
+ } else {
+ return def;
+ }
+ },
+
+ contextDecimalDigits: function (id, def) {
+ if (typeof this.context[id] !== 'undefined' && typeof this.context[id].decimalDigits !== 'undefined') {
+ return this.context[id].decimalDigits;
+ } else {
+ return def;
+ }
+ }
+};
+
+// ----------------------------------------------------------------------------
+
+// enrich the data structure returned by netdata
+// to reflect our menu system and content
+// TODO: this is a shame - we should fix charts naming (issue #807)
+function enrichChartData(chart) {
+ var parts = chart.type.split('_');
+ var tmp = parts[0];
+
+ switch (tmp) {
+ case 'ap':
+ case 'net':
+ case 'disk':
+ case 'powersupply':
+ case 'statsd':
+ chart.menu = tmp;
+ break;
+
+ case 'apache':
+ chart.menu = chart.type;
+ if (parts.length > 2 && parts[1] === 'cache') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'bind':
+ chart.menu = chart.type;
+ if (parts.length > 2 && parts[1] === 'rndc') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'cgroup':
+ chart.menu = chart.type;
+ if (chart.id.match(/.*[\._\/-:]qemu[\._\/-:]*/) || chart.id.match(/.*[\._\/-:]kvm[\._\/-:]*/)) {
+ chart.menu_pattern = 'cgqemu';
+ } else {
+ chart.menu_pattern = 'cgroup';
+ }
+ break;
+
+ case 'go':
+ chart.menu = chart.type;
+ if (parts.length > 2 && parts[1] === 'expvar') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'isc':
+ chart.menu = chart.type;
+ if (parts.length > 2 && parts[1] === 'dhcpd') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'ovpn':
+ chart.menu = chart.type;
+ if (parts.length > 3 && parts[1] === 'status' && parts[2] === 'log') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'smartd':
+ case 'web':
+ chart.menu = chart.type;
+ if (parts.length > 2 && parts[1] === 'log') {
+ chart.menu_pattern = tmp + '_' + parts[1];
+ } else if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+
+ case 'tc':
+ chart.menu = tmp;
+
+ // find a name for this device from fireqos info
+ // we strip '_(in|out)' or '(in|out)_'
+ if (chart.context === 'tc.qos' && (typeof options.submenu_names[chart.family] === 'undefined' || options.submenu_names[chart.family] === chart.family)) {
+ var n = chart.name.split('.')[1];
+ if (n.endsWith('_in')) {
+ options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_in'));
+ } else if (n.endsWith('_out')) {
+ options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_out'));
+ } else if (n.startsWith('in_')) {
+ options.submenu_names[chart.family] = n.slice(3, n.length);
+ } else if (n.startsWith('out_')) {
+ options.submenu_names[chart.family] = n.slice(4, n.length);
+ } else {
+ options.submenu_names[chart.family] = n;
+ }
+ }
+
+ // increase the priority of IFB devices
+ // to have inbound appear before outbound
+ if (chart.id.match(/.*-ifb$/)) {
+ chart.priority--;
+ }
+
+ break;
+
+ default:
+ chart.menu = chart.type;
+ if (parts.length > 1) {
+ chart.menu_pattern = tmp;
+ }
+ break;
+ }
+
+ chart.submenu = chart.family;
+}
+
+// ----------------------------------------------------------------------------
+
+function headMain(os, charts, duration) {
+ void (os);
+
+ if (urlOptions.mode === 'print') {
+ return '';
+ }
+
+ var head = '';
+
+ if (typeof charts['system.swap'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.swap"'
+ + ' data-dimensions="used"'
+ + ' data-append-options="percentage"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Used Swap"'
+ + ' data-units="%"'
+ + ' data-easypiechart-max-value="100"'
+ + ' data-width="9%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-colors="#DD4400"'
+ + ' role="application"></div>';
+ }
+
+ if (typeof charts['system.io'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io"'
+ + ' data-dimensions="in"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Disk Read"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.io.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io"'
+ + ' data-dimensions="out"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Disk Write"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.io.mainhead"'
+ + ' role="application"></div>';
+ }
+ else if (typeof charts['system.pgpgio'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.pgpgio"'
+ + ' data-dimensions="in"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Disk Read"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.pgpgio.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.pgpgio"'
+ + ' data-dimensions="out"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Disk Write"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.pgpgio.mainhead"'
+ + ' role="application"></div>';
+ }
+
+ if (typeof charts['system.cpu'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.cpu"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="CPU"'
+ + ' data-units="%"'
+ + ' data-gauge-max-value="100"'
+ + ' data-width="20%"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-colors="' + NETDATA.colors[12] + '"'
+ + ' role="application"></div>';
+ }
+
+ if (typeof charts['system.net'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.net"'
+ + ' data-dimensions="received"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Net Inbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.net.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.net"'
+ + ' data-dimensions="sent"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Net Outbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.net.mainhead"'
+ + ' role="application"></div>';
+ }
+ else if (typeof charts['system.ip'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ip"'
+ + ' data-dimensions="received"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IP Inbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ip.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ip"'
+ + ' data-dimensions="sent"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IP Outbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ip.mainhead"'
+ + ' role="application"></div>';
+ }
+ else if (typeof charts['system.ipv4'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4"'
+ + ' data-dimensions="received"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IPv4 Inbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ipv4.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4"'
+ + ' data-dimensions="sent"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IPv4 Outbound"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ipv4.mainhead"'
+ + ' role="application"></div>';
+ }
+ else if (typeof charts['system.ipv6'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6"'
+ + ' data-dimensions="received"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IPv6 Inbound"'
+ + ' data-units="kbps"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ipv6.mainhead"'
+ + ' role="application"></div>';
+
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6"'
+ + ' data-dimensions="sent"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="IPv6 Outbound"'
+ + ' data-units="kbps"'
+ + ' data-width="11%"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-common-units="system.ipv6.mainhead"'
+ + ' role="application"></div>';
+ }
+
+ if (typeof charts['system.ram'] !== 'undefined') {
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ram"'
+ + ' data-dimensions="used|buffers|active|wired"' // active and wired are FreeBSD stats
+ + ' data-append-options="percentage"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Used RAM"'
+ + ' data-units="%"'
+ + ' data-easypiechart-max-value="100"'
+ + ' data-width="9%"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-points="' + duration.toString() + '"'
+ + ' data-colors="' + NETDATA.colors[7] + '"'
+ + ' role="application"></div>';
+ }
+
+ return head;
+}
+
+function generateHeadCharts(type, chart, duration) {
+ if (urlOptions.mode === 'print') {
+ return '';
+ }
+
+ var head = '';
+ var hcharts = netdataDashboard.anyAttribute(netdataDashboard.context, type, chart.context, []);
+ if (hcharts.length > 0) {
+ var hi = 0, hlen = hcharts.length;
+ while (hi < hlen) {
+ if (typeof hcharts[hi] === 'function') {
+ head += hcharts[hi](netdataDashboard.os, chart.id).replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);
+ } else {
+ head += hcharts[hi].replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);
+ }
+ hi++;
+ }
+ }
+ return head;
+}
+
+function renderPage(menus, data) {
+ var div = document.getElementById('charts_div');
+ var pcent_width = Math.floor(100 / chartsPerRow($(div).width()));
+
+ // find the proper duration for per-second updates
+ var duration = Math.round(($(div).width() * pcent_width / 100 * data.update_every / 3) / 60) * 60;
+ options.duration = duration;
+ options.update_every = data.update_every;
+
+ var html = '';
+ var sidebar = '<ul class="nav dashboard-sidenav" data-spy="affix" id="sidebar_ul">';
+ var mainhead = headMain(netdataDashboard.os, data.charts, duration);
+
+ // sort the menus
+ var main = sortObjectByPriority(menus);
+ var i = 0, len = main.length;
+ while (i < len) {
+ var menu = main[i++];
+
+ // generate an entry at the main menu
+
+ var menuid = NETDATA.name2id('menu_' + menu);
+ sidebar += '<li class=""><a href="#' + menuid + '" onClick="return scrollToId(\'' + menuid + '\');">' + menus[menu].icon + ' ' + menus[menu].title + '</a><ul class="nav">';
+ html += '<div role="section" class="dashboard-section"><div role="sectionhead"><h1 id="' + menuid + '" role="heading">' + menus[menu].icon + ' ' + menus[menu].title + '</h1></div><div role="section" class="dashboard-subsection">';
+
+ if (menus[menu].info !== null) {
+ html += menus[menu].info;
+ }
+
+ // console.log(' >> ' + menu + ' (' + menus[menu].priority + '): ' + menus[menu].title);
+
+ var shtml = '';
+ var mhead = '<div class="netdata-chart-row">' + mainhead;
+ mainhead = '';
+
+ // sort the submenus of this menu
+ var sub = sortObjectByPriority(menus[menu].submenus);
+ var si = 0, slen = sub.length;
+ while (si < slen) {
+ var submenu = sub[si++];
+
+ // generate an entry at the submenu
+ var submenuid = NETDATA.name2id('menu_' + menu + '_submenu_' + submenu);
+ sidebar += '<li class><a href="#' + submenuid + '" onClick="return scrollToId(\'' + submenuid + '\');">' + menus[menu].submenus[submenu].title + '</a></li>';
+ shtml += '<div role="section" class="dashboard-section-container" id="' + submenuid + '"><h2 id="' + submenuid + '" class="netdata-chart-alignment" role="heading">' + menus[menu].submenus[submenu].title + '</h2>';
+
+ if (menus[menu].submenus[submenu].info !== null) {
+ shtml += '<div class="dashboard-submenu-info netdata-chart-alignment" role="document">' + menus[menu].submenus[submenu].info + '</div>';
+ }
+
+ var head = '<div class="netdata-chart-row">';
+ var chtml = '';
+
+ // console.log(' \------- ' + submenu + ' (' + menus[menu].submenus[submenu].priority + '): ' + menus[menu].submenus[submenu].title);
+
+ // sort the charts in this submenu of this menu
+ menus[menu].submenus[submenu].charts.sort(prioritySort);
+ var ci = 0, clen = menus[menu].submenus[submenu].charts.length;
+ while (ci < clen) {
+ var chart = menus[menu].submenus[submenu].charts[ci++];
+
+ // generate the submenu heading charts
+ mhead += generateHeadCharts('mainheads', chart, duration);
+ head += generateHeadCharts('heads', chart, duration);
+
+ function chartCommonMin(family, context, units) {
+ var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMin', context, undefined);
+ if (typeof x !== 'undefined') {
+ return ' data-common-min="' + family + '/' + context + '/' + units + '"';
+ } else {
+ return '';
+ }
+ }
+
+ function chartCommonMax(family, context, units) {
+ var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMax', context, undefined);
+ if (typeof x !== 'undefined') {
+ return ' data-common-max="' + family + '/' + context + '/' + units + '"';
+ } else {
+ return '';
+ }
+ }
+
+ // generate the chart
+ if (urlOptions.mode === 'print') {
+ chtml += '<div role="row" class="dashboard-print-row">';
+ }
+
+ chtml += '<div class="netdata-chartblock-container" style="width: ' + pcent_width.toString() + '%;">' + netdataDashboard.contextInfo(chart.context) + '<div class="netdata-container" id="chart_' + NETDATA.name2id(chart.id) + '" data-netdata="' + chart.id + '"'
+ + ' data-width="100%"'
+ + ' data-height="' + netdataDashboard.contextHeight(chart.context, options.chartsHeight).toString() + 'px"'
+ + ' data-dygraph-valuerange="' + netdataDashboard.contextValueRange(chart.context) + '"'
+ + ' data-before="0"'
+ + ' data-after="-' + duration.toString() + '"'
+ + ' data-id="' + NETDATA.name2id(options.hostname + '/' + chart.id) + '"'
+ + ' data-colors="' + netdataDashboard.anyAttribute(netdataDashboard.context, 'colors', chart.context, '') + '"'
+ + ' data-decimal-digits="' + netdataDashboard.contextDecimalDigits(chart.context, -1) + '"'
+ + chartCommonMin(chart.family, chart.context, chart.units)
+ + chartCommonMax(chart.family, chart.context, chart.units)
+ + ' role="application"></div></div>';
+
+ if (urlOptions.mode === 'print') {
+ chtml += '</div>';
+ }
+ }
+
+ head += '</div>';
+ shtml += head + chtml + '</div>';
+ }
+
+ mhead += '</div>';
+ sidebar += '</ul></li>';
+ html += mhead + shtml + '</div></div><hr role="separator"/>';
+ }
+
+ const isMemoryModeDbEngine = data.memory_mode === "dbengine";
+
+ sidebar += '<li class="" style="padding-top:15px;"><a href="https://docs.netdata.cloud/collectors/quickstart/" target="_blank"><i class="fas fa-plus"></i> Add more charts</a></li>';
+ sidebar += '<li class=""><a href="https://docs.netdata.cloud/health/quickstart/" target="_blank"><i class="fas fa-plus"></i> Add more alarms</a></li>';
+ sidebar += '<li class="" style="margin:20px;color:#666;"><small>Every ' +
+ ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ', ' +
+ 'Netdata collects <strong>' + data.dimensions_count.toLocaleString() + '</strong> metrics on ' +
+ data.hostname.toString() + ', presents them in <strong>' +
+ data.charts_count.toLocaleString() + '</strong> charts' +
+ (isMemoryModeDbEngine ? '' : ',') + // oxford comma
+ ' and monitors them with <strong>' +
+ data.alarms_count.toLocaleString() + '</strong> alarms.';
+
+ if (!isMemoryModeDbEngine) {
+ sidebar += '<br />&nbsp;<br />Get more history by ' +
+ '<a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>configuring Netdata\'s <strong>history</strong></a> or using the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine.</a>';
+ }
+
+ sidebar += '<br/>&nbsp;<br/><strong>netdata</strong><br/>' + data.version.toString() + '</small></li>';
+
+ sidebar += '</ul>';
+ div.innerHTML = html;
+ document.getElementById('sidebar').innerHTML = sidebar;
+
+ if (urlOptions.highlight === true) {
+ NETDATA.globalChartUnderlay.init(null
+ , urlOptions.highlight_after
+ , urlOptions.highlight_before
+ , (urlOptions.after > 0) ? urlOptions.after : null
+ , (urlOptions.before > 0) ? urlOptions.before : null
+ );
+ } else {
+ NETDATA.globalChartUnderlay.clear();
+ }
+
+ if (urlOptions.mode === 'print') {
+ printPage();
+ } else {
+ finalizePage();
+ }
+}
+
+function renderChartsAndMenu(data) {
+ options.menus = {};
+ options.submenu_names = {};
+
+ var menus = options.menus;
+ var charts = data.charts;
+ var m, menu_key;
+
+ for (var c in charts) {
+ if (!charts.hasOwnProperty(c)) {
+ continue;
+ }
+
+ var chart = charts[c];
+ enrichChartData(chart);
+ m = chart.menu;
+
+ // create the menu
+ if (typeof menus[m] === 'undefined') {
+ menus[m] = {
+ menu_pattern: chart.menu_pattern,
+ priority: chart.priority,
+ submenus: {},
+ title: netdataDashboard.menuTitle(chart),
+ icon: netdataDashboard.menuIcon(chart),
+ info: netdataDashboard.menuInfo(chart),
+ height: netdataDashboard.menuHeight(chart) * options.chartsHeight
+ };
+ } else {
+ if (typeof (menus[m].menu_pattern) === 'undefined') {
+ menus[m].menu_pattern = chart.menu_pattern;
+ }
+
+ if (chart.priority < menus[m].priority) {
+ menus[m].priority = chart.priority;
+ }
+ }
+
+ menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;
+
+ // create the submenu
+ if (typeof menus[m].submenus[chart.submenu] === 'undefined') {
+ menus[m].submenus[chart.submenu] = {
+ priority: chart.priority,
+ charts: [],
+ title: null,
+ info: netdataDashboard.submenuInfo(menu_key, chart.submenu),
+ height: netdataDashboard.submenuHeight(menu_key, chart.submenu, menus[m].height)
+ };
+ } else {
+ if (chart.priority < menus[m].submenus[chart.submenu].priority) {
+ menus[m].submenus[chart.submenu].priority = chart.priority;
+ }
+ }
+
+ // index the chart in the menu/submenu
+ menus[m].submenus[chart.submenu].charts.push(chart);
+ }
+
+ // propagate the descriptive subname given to QoS
+ // to all the other submenus with the same name
+ for (var m in menus) {
+ if (!menus.hasOwnProperty(m)) {
+ continue;
+ }
+
+ for (var s in menus[m].submenus) {
+ if (!menus[m].submenus.hasOwnProperty(s)) {
+ continue;
+ }
+
+ // set the family using a name
+ if (typeof options.submenu_names[s] !== 'undefined') {
+ menus[m].submenus[s].title = s + ' (' + options.submenu_names[s] + ')';
+ } else {
+ menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;
+ menus[m].submenus[s].title = netdataDashboard.submenuTitle(menu_key, s);
+ }
+ }
+ }
+
+ renderPage(menus, data);
+}
+
+// ----------------------------------------------------------------------------
+
+function loadJs(url, callback) {
+ $.ajax({
+ url: url.startsWith("http") ? url : transformWithOldSuffix(url),
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .fail(function () {
+ alert('Cannot load required JS library: ' + url);
+ })
+ .always(function () {
+ if (typeof callback === 'function') {
+ callback();
+ }
+ })
+}
+
+var clipboardLoaded = false;
+
+function loadClipboard(callback) {
+ if (clipboardLoaded === false) {
+ clipboardLoaded = true;
+ loadJs('lib/clipboard-polyfill-be05dad.js', callback);
+ } else {
+ callback();
+ }
+}
+
+var bootstrapTableLoaded = false;
+
+function loadBootstrapTable(callback) {
+ if (bootstrapTableLoaded === false) {
+ bootstrapTableLoaded = true;
+ loadJs('lib/bootstrap-table-1.11.0.min.js', function () {
+ loadJs('lib/bootstrap-table-export-1.11.0.min.js', function () {
+ loadJs('lib/tableExport-1.6.0.min.js', callback);
+ })
+ });
+ } else {
+ callback();
+ }
+}
+
+var bootstrapSliderLoaded = false;
+
+function loadBootstrapSlider(callback) {
+ if (bootstrapSliderLoaded === false) {
+ bootstrapSliderLoaded = true;
+ loadJs('lib/bootstrap-slider-10.0.0.min.js', function () {
+ NETDATA._loadCSS(transformWithOldSuffix("css/bootstrap-slider-10.0.0.min.css"));
+ callback();
+ });
+ } else {
+ callback();
+ }
+}
+
+var lzStringLoaded = false;
+
+function loadLzString(callback) {
+ if (lzStringLoaded === false) {
+ lzStringLoaded = true;
+ loadJs('lib/lz-string-1.4.4.min.js', callback);
+ } else {
+ callback();
+ }
+}
+
+var pakoLoaded = false;
+
+function loadPako(callback) {
+ if (pakoLoaded === false) {
+ pakoLoaded = true;
+ loadJs('lib/pako-1.0.6.min.js', callback);
+ } else {
+ callback();
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+function clipboardCopy(text) {
+ clipboard.writeText(text);
+}
+
+function clipboardCopyBadgeEmbed(url) {
+ clipboard.writeText('<embed src="' + url + '" type="image/svg+xml" height="20"/>');
+}
+
+// ----------------------------------------------------------------------------
+
+function alarmsUpdateModal() {
+ var active = '<h3>Raised Alarms</h3><table class="table">';
+ var all = '<h3>All Running Alarms</h3><div class="panel-group" id="alarms_all_accordion" role="tablist" aria-multiselectable="true">';
+ var footer = '<hr/><a href="https://github.com/netdata/netdata/tree/master/web/api/badges#netdata-badges" target="_blank">netdata badges</a> refresh automatically. Their color indicates the state of the alarm: <span style="color: #e05d44"><b>&nbsp;red&nbsp;</b></span> is critical, <span style="color:#fe7d37"><b>&nbsp;orange&nbsp;</b></span> is warning, <span style="color: #4c1"><b>&nbsp;bright green&nbsp;</b></span> is ok, <span style="color: #9f9f9f"><b>&nbsp;light grey&nbsp;</b></span> is undefined (i.e. no data or no status), <span style="color: #000"><b>&nbsp;black&nbsp;</b></span> is not initialized. You can copy and paste their URLs to embed them in any web page.<br/>netdata can send notifications for these alarms. Check <a href="https://github.com/netdata/netdata/blob/master/health/notifications/health_alarm_notify.conf" target="_blank">this configuration file</a> for more information.';
+
+ loadClipboard(function () {
+ });
+
+ NETDATA.alarms.get('all', function (data) {
+ options.alarm_families = [];
+
+ alarmsCallback(data);
+
+ if (data === null) {
+ document.getElementById('alarms_active').innerHTML =
+ document.getElementById('alarms_all').innerHTML =
+ document.getElementById('alarms_log').innerHTML =
+ 'failed to load alarm data!';
+ return;
+ }
+
+ function alarmid4human(id) {
+ if (id === 0) {
+ return '-';
+ }
+
+ return id.toString();
+ }
+
+ function timestamp4human(timestamp, space) {
+ if (timestamp === 0) {
+ return '-';
+ }
+
+ if (typeof space === 'undefined') {
+ space = '&nbsp;';
+ }
+
+ var t = new Date(timestamp * 1000);
+ var now = new Date();
+
+ if (t.toDateString() === now.toDateString()) {
+ return t.toLocaleTimeString();
+ }
+
+ return t.toLocaleDateString() + space + t.toLocaleTimeString();
+ }
+
+ function alarm_lookup_explain(alarm, chart) {
+ var dimensions = ' of all values ';
+
+ if (chart.dimensions.length > 1) {
+ dimensions = ' of the sum of all dimensions ';
+ }
+
+ if (typeof alarm.lookup_dimensions !== 'undefined') {
+ var d = alarm.lookup_dimensions.replace(/|/g, ',');
+ var x = d.split(',');
+ if (x.length > 1) {
+ dimensions = 'of the sum of dimensions <code>' + alarm.lookup_dimensions + '</code> ';
+ } else {
+ dimensions = 'of all values of dimension <code>' + alarm.lookup_dimensions + '</code> ';
+ }
+ }
+
+ return '<code>' + alarm.lookup_method + '</code> '
+ + dimensions + ', of chart <code>' + alarm.chart + '</code>'
+ + ', starting <code>' + NETDATA.seconds4human(alarm.lookup_after + alarm.lookup_before, { space: '&nbsp;' }) + '</code> and up to <code>' + NETDATA.seconds4human(alarm.lookup_before, { space: '&nbsp;' }) + '</code>'
+ + ((alarm.lookup_options) ? (', with options <code>' + alarm.lookup_options.replace(/ /g, ',&nbsp;') + '</code>') : '')
+ + '.';
+ }
+
+ function alarm_to_html(alarm, full) {
+ var chart = options.data.charts[alarm.chart];
+ if (typeof (chart) === 'undefined') {
+ chart = options.data.charts_by_name[alarm.chart];
+ if (typeof (chart) === 'undefined') {
+ // this means the charts loaded are incomplete
+ // probably netdata was restarted and more alarms
+ // are now available.
+ console.log('Cannot find chart ' + alarm.chart + ', you probably need to refresh the page.');
+ return '';
+ }
+ }
+
+ var has_alarm = (typeof alarm.warn !== 'undefined' || typeof alarm.crit !== 'undefined');
+ var badge_url = NETDATA.alarms.server + '/api/v1/badge.svg?chart=' + alarm.chart + '&alarm=' + alarm.name + '&refresh=auto';
+
+ var action_buttons = '<br/>&nbsp;<br/>role: <b>' + alarm.recipient + '</b><br/>&nbsp;<br/>'
+ + '<div class="action-button ripple" title="click to scroll the dashboard to the chart of this alarm" data-toggle="tooltip" data-placement="bottom" onClick="scrollToChartAfterHidingModal(\'' + alarm.chart + '\', ' + alarm.last_status_change * 1000 + ', \'' + alarm.status + '\'); $(\'#alarmsModal\').modal(\'hide\'); return false;"><i class="fab fa-periscope"></i></div>'
+ + '<div class="action-button ripple" title="click to copy to the clipboard the URL of this badge" data-toggle="tooltip" data-placement="bottom" onClick="clipboardCopy(\'' + badge_url + '\'); return false;"><i class="far fa-copy"></i></div>'
+ + '<div class="action-button ripple" title="click to copy to the clipboard an auto-refreshing <code>embed</code> html element for this badge" data-toggle="tooltip" data-placement="bottom" onClick="clipboardCopyBadgeEmbed(\'' + badge_url + '\'); return false;"><i class="fas fa-copy"></i></div>';
+
+ var html = '<tr><td class="text-center" style="vertical-align:middle" width="40%"><b>' + alarm.chart + '</b><br/>&nbsp;<br/><embed src="' + badge_url + '" type="image/svg+xml" height="20"/><br/>&nbsp;<br/><span style="font-size: 18px">' + alarm.info + '</span>' + action_buttons + '</td>'
+ + '<td><table class="table">'
+ + ((typeof alarm.warn !== 'undefined') ? ('<tr><td width="10%" style="text-align:right">warning&nbsp;when</td><td><span style="font-family: monospace; color:#fe7d37; font-weight: bold;">' + alarm.warn + '</span></td></tr>') : '')
+ + ((typeof alarm.crit !== 'undefined') ? ('<tr><td width="10%" style="text-align:right">critical&nbsp;when</td><td><span style="font-family: monospace; color: #e05d44; font-weight: bold;">' + alarm.crit + '</span></td></tr>') : '');
+
+ if (full === true) {
+ var units = chart.units;
+ if (units === '%') {
+ units = '&#37;';
+ }
+
+ html += ((typeof alarm.lookup_after !== 'undefined') ? ('<tr><td width="10%" style="text-align:right">db&nbsp;lookup</td><td>' + alarm_lookup_explain(alarm, chart) + '</td></tr>') : '')
+ + ((typeof alarm.calc !== 'undefined') ? ('<tr><td width="10%" style="text-align:right">calculation</td><td><span style="font-family: monospace;">' + alarm.calc + '</span></td></tr>') : '')
+ + ((chart.green !== null) ? ('<tr><td width="10%" style="text-align:right">green&nbsp;threshold</td><td><code>' + chart.green + ' ' + units + '</code></td></tr>') : '')
+ + ((chart.red !== null) ? ('<tr><td width="10%" style="text-align:right">red&nbsp;threshold</td><td><code>' + chart.red + ' ' + units + '</code></td></tr>') : '');
+ }
+
+ if (alarm.warn_repeat_every > 0) {
+ html += '<tr><td width="10%" style="text-align:right">repeat&nbsp;warning</td><td>' + NETDATA.seconds4human(alarm.warn_repeat_every) + '</td></tr>';
+ }
+
+ if (alarm.crit_repeat_every > 0) {
+ html += '<tr><td width="10%" style="text-align:right">repeat&nbsp;critical</td><td>' + NETDATA.seconds4human(alarm.crit_repeat_every) + '</td></tr>';
+ }
+
+ var delay = '';
+ if ((alarm.delay_up_duration > 0 || alarm.delay_down_duration > 0) && alarm.delay_multiplier !== 0 && alarm.delay_max_duration > 0) {
+ if (alarm.delay_up_duration === alarm.delay_down_duration) {
+ delay += '<small><br/>hysteresis ' + NETDATA.seconds4human(alarm.delay_up_duration, {
+ space: '&nbsp;',
+ negative_suffix: ''
+ });
+ } else {
+ delay = '<small><br/>hysteresis ';
+ if (alarm.delay_up_duration > 0) {
+ delay += 'on&nbsp;escalation&nbsp;<code>' + NETDATA.seconds4human(alarm.delay_up_duration, {
+ space: '&nbsp;',
+ negative_suffix: ''
+ }) + '</code>, ';
+ }
+ if (alarm.delay_down_duration > 0) {
+ delay += 'on&nbsp;recovery&nbsp;<code>' + NETDATA.seconds4human(alarm.delay_down_duration, {
+ space: '&nbsp;',
+ negative_suffix: ''
+ }) + '</code>, ';
+ }
+ }
+ if (alarm.delay_multiplier !== 1.0) {
+ delay += 'multiplied&nbsp;by&nbsp;<code>' + alarm.delay_multiplier.toString() + '</code>';
+ delay += ',&nbsp;up&nbsp;to&nbsp;<code>' + NETDATA.seconds4human(alarm.delay_max_duration, {
+ space: '&nbsp;',
+ negative_suffix: ''
+ }) + '</code>';
+ }
+ delay += '</small>';
+ }
+
+ html += '<tr><td width="10%" style="text-align:right">check&nbsp;every</td><td>' + NETDATA.seconds4human(alarm.update_every, {
+ space: '&nbsp;',
+ negative_suffix: ''
+ }) + '</td></tr>'
+ + ((has_alarm === true) ? ('<tr><td width="10%" style="text-align:right">execute</td><td><span style="font-family: monospace;">' + alarm.exec + '</span>' + delay + '</td></tr>') : '')
+ + '<tr><td width="10%" style="text-align:right">source</td><td><span style="font-family: monospace;">' + alarm.source + '</span></td></tr>'
+ + '</table></td></tr>';
+
+ return html;
+ }
+
+ function alarm_family_show(id) {
+ var html = '<table class="table">';
+ var family = options.alarm_families[id];
+ var len = family.arr.length;
+ while (len--) {
+ var alarm = family.arr[len];
+ html += alarm_to_html(alarm, true);
+ }
+ html += '</table>';
+
+ $('#alarm_all_' + id.toString()).html(html);
+ enableTooltipsAndPopovers();
+ }
+
+ // find the proper family of each alarm
+ var x, family, alarm;
+ var count_active = 0;
+ var count_all = 0;
+ var families = {};
+ var families_sort = [];
+ for (x in data.alarms) {
+ if (!data.alarms.hasOwnProperty(x)) {
+ continue;
+ }
+
+ alarm = data.alarms[x];
+ family = alarm.family;
+
+ // find the chart
+ var chart = options.data.charts[alarm.chart];
+ if (typeof chart === 'undefined') {
+ chart = options.data.charts_by_name[alarm.chart];
+ }
+
+ // not found - this should never happen!
+ if (typeof chart === 'undefined') {
+ console.log('WARNING: alarm ' + x + ' is linked to chart ' + alarm.chart + ', which is not found in the list of chart got from the server.');
+ chart = { priority: 9999999 };
+ }
+ else if (typeof chart.menu !== 'undefined' && typeof chart.submenu !== 'undefined')
+ // the family based on the chart
+ {
+ family = chart.menu + ' - ' + chart.submenu;
+ }
+
+ if (typeof families[family] === 'undefined') {
+ families[family] = {
+ name: family,
+ arr: [],
+ priority: chart.priority
+ };
+
+ families_sort.push(families[family]);
+ }
+
+ if (chart.priority < families[family].priority) {
+ families[family].priority = chart.priority;
+ }
+
+ families[family].arr.unshift(alarm);
+ }
+
+ // sort the families, like the dashboard menu does
+ var families_sorted = families_sort.sort(function (a, b) {
+ if (a.priority < b.priority) {
+ return -1;
+ }
+ if (a.priority > b.priority) {
+ return 1;
+ }
+ return naturalSortCompare(a.name, b.name);
+ });
+
+ var i = 0;
+ var fc = 0;
+ var len = families_sorted.length;
+ while (len--) {
+ family = families_sorted[i++].name;
+ var active_family_added = false;
+ var expanded = 'true';
+ var collapsed = '';
+ var cin = 'in';
+
+ if (fc !== 0) {
+ all += "</table></div></div></div>";
+ expanded = 'false';
+ collapsed = 'class="collapsed"';
+ cin = '';
+ }
+
+ all += '<div class="panel panel-default"><div class="panel-heading" role="tab" id="alarm_all_heading_' + fc.toString() + '"><h4 class="panel-title"><a ' + collapsed + ' role="button" data-toggle="collapse" data-parent="#alarms_all_accordion" href="#alarm_all_' + fc.toString() + '" aria-expanded="' + expanded + '" aria-controls="alarm_all_' + fc.toString() + '">' + family.toString() + '</a></h4></div><div id="alarm_all_' + fc.toString() + '" class="panel-collapse collapse ' + cin + '" role="tabpanel" aria-labelledby="alarm_all_heading_' + fc.toString() + '" data-alarm-id="' + fc.toString() + '"><div class="panel-body" id="alarm_all_body_' + fc.toString() + '">';
+
+ options.alarm_families[fc] = families[family];
+
+ fc++;
+
+ var arr = families[family].arr;
+ var c = arr.length;
+ while (c--) {
+ alarm = arr[c];
+ if (alarm.status === 'WARNING' || alarm.status === 'CRITICAL') {
+ if (!active_family_added) {
+ active_family_added = true;
+ active += '<tr><th class="text-center" colspan="2"><h4>' + family + '</h4></th></tr>';
+ }
+ count_active++;
+ active += alarm_to_html(alarm, true);
+ }
+
+ count_all++;
+ }
+ }
+ active += "</table>";
+ if (families_sorted.length > 0) {
+ all += "</div></div></div>";
+ }
+ all += "</div>";
+
+ if (!count_active) {
+ active += '<div style="width:100%; height: 100px; text-align: center;"><span style="font-size: 50px;"><i class="fas fa-thumbs-up"></i></span><br/>Everything is normal. No raised alarms.</div>';
+ } else {
+ active += footer;
+ }
+
+ if (!count_all) {
+ all += "<h4>No alarms are running in this system.</h4>";
+ } else {
+ all += footer;
+ }
+
+ document.getElementById('alarms_active').innerHTML = active;
+ document.getElementById('alarms_all').innerHTML = all;
+ enableTooltipsAndPopovers();
+
+ if (families_sorted.length > 0) {
+ alarm_family_show(0);
+ }
+
+ // register bootstrap events
+ var $accordion = $('#alarms_all_accordion');
+ $accordion.on('show.bs.collapse', function (d) {
+ var target = $(d.target);
+ var id = $(target).data('alarm-id');
+ alarm_family_show(id);
+ });
+ $accordion.on('hidden.bs.collapse', function (d) {
+ var target = $(d.target);
+ var id = $(target).data('alarm-id');
+ $('#alarm_all_' + id.toString()).html('');
+ });
+
+ document.getElementById('alarms_log').innerHTML = '<h3>Alarm Log</h3><table id="alarms_log_table"></table>';
+
+ loadBootstrapTable(function () {
+ $('#alarms_log_table').bootstrapTable({
+ url: NETDATA.alarms.server + '/api/v1/alarm_log?all',
+ cache: false,
+ pagination: true,
+ pageSize: 10,
+ showPaginationSwitch: false,
+ search: true,
+ searchTimeOut: 300,
+ searchAlign: 'left',
+ showColumns: true,
+ showExport: true,
+ exportDataType: 'basic',
+ exportOptions: {
+ fileName: 'netdata_alarm_log'
+ },
+ onClickRow: function (row, $element,field) {
+ void (field);
+ void ($element);
+ let main_url;
+ let common_url = "&host=" + encodeURIComponent(row['hostname']) + "&chart=" + encodeURIComponent(row['chart']) + "&family=" + encodeURIComponent(row['family']) + "&alarm=" + encodeURIComponent(row['name']) + "&alarm_unique_id=" + row['unique_id'] + "&alarm_id=" + row['alarm_id'] + "&alarm_event_id=" + row['alarm_event_id'] + "&alarm_when=" + row['when'];
+ if (NETDATA.registry.isUsingGlobalRegistry() && NETDATA.registry.machine_guid != null) {
+ main_url = "https://netdata.cloud/alarms/redirect?agentID=" + NETDATA.registry.machine_guid + common_url;
+ } else {
+ main_url = NETDATA.registry.server + "/goto-host-from-alarm.html?" + common_url ;
+ }
+ window.open(main_url,"_blank");
+ },
+ rowStyle: function (row, index) {
+ void (index);
+
+ switch (row.status) {
+ case 'CRITICAL':
+ return { classes: 'danger' };
+ break;
+ case 'WARNING':
+ return { classes: 'warning' };
+ break;
+ case 'UNDEFINED':
+ return { classes: 'info' };
+ break;
+ case 'CLEAR':
+ return { classes: 'success' };
+ break;
+ }
+ return {};
+ },
+ showFooter: false,
+ showHeader: true,
+ showRefresh: true,
+ showToggle: false,
+ sortable: true,
+ silentSort: false,
+ columns: [
+ {
+ field: 'when',
+ title: 'Event Date',
+ valign: 'middle',
+ titleTooltip: 'The date and time the even took place',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return timestamp4human(value, ' ');
+ },
+ align: 'center',
+ switchable: false,
+ sortable: true
+ },
+ {
+ field: 'hostname',
+ title: 'Host',
+ valign: 'middle',
+ titleTooltip: 'The host that generated this event',
+ align: 'center',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'unique_id',
+ title: 'Unique ID',
+ titleTooltip: 'The host unique ID for this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return alarmid4human(value);
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'alarm_id',
+ title: 'Alarm ID',
+ titleTooltip: 'The ID of the alarm that generated this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return alarmid4human(value);
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'alarm_event_id',
+ title: 'Alarm Event ID',
+ titleTooltip: 'The incremental ID of this event for the given alarm',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return alarmid4human(value);
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'chart',
+ title: 'Chart',
+ titleTooltip: 'The chart the alarm is attached to',
+ align: 'center',
+ valign: 'middle',
+ switchable: false,
+ sortable: true
+ },
+ {
+ field: 'family',
+ title: 'Family',
+ titleTooltip: 'The family of the chart the alarm is attached to',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'name',
+ title: 'Alarm',
+ titleTooltip: 'The alarm name that generated this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return value.toString().replace(/_/g, ' ');
+ },
+ align: 'center',
+ valign: 'middle',
+ switchable: false,
+ sortable: true
+ },
+ {
+ field: 'value_string',
+ title: 'Friendly Value',
+ titleTooltip: 'The value of the alarm, that triggered this event',
+ align: 'right',
+ valign: 'middle',
+ sortable: true
+ },
+ {
+ field: 'old_value_string',
+ title: 'Friendly Old Value',
+ titleTooltip: 'The value of the alarm, just before this event',
+ align: 'right',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'old_value',
+ title: 'Old Value',
+ titleTooltip: 'The value of the alarm, just before this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'value',
+ title: 'Value',
+ titleTooltip: 'The value of the alarm, that triggered this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();
+ },
+ align: 'right',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'units',
+ title: 'Units',
+ titleTooltip: 'The units of the value of the alarm',
+ align: 'left',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'old_status',
+ title: 'Old Status',
+ titleTooltip: 'The status of the alarm, just before this event',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'status',
+ title: 'Status',
+ titleTooltip: 'The status of the alarm, that was set due to this event',
+ align: 'center',
+ valign: 'middle',
+ switchable: false,
+ sortable: true
+ },
+ {
+ field: 'duration',
+ title: 'Last Duration',
+ titleTooltip: 'The duration the alarm was at its previous state, just before this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return NETDATA.seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'non_clear_duration',
+ title: 'Raised Duration',
+ titleTooltip: 'The duration the alarm was raised, just before this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return NETDATA.seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'recipient',
+ title: 'Recipient',
+ titleTooltip: 'The recipient of this event',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'processed',
+ title: 'Processed Status',
+ titleTooltip: 'True when this event is processed',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+
+ if (value === true) {
+ return 'DONE';
+ } else {
+ return 'PENDING';
+ }
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'updated',
+ title: 'Updated Status',
+ titleTooltip: 'True when this event has been updated by another event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+
+ if (value === true) {
+ return 'UPDATED';
+ } else {
+ return 'CURRENT';
+ }
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'updated_by_id',
+ title: 'Updated By ID',
+ titleTooltip: 'The unique ID of the event that obsoleted this one',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return alarmid4human(value);
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'updates_id',
+ title: 'Updates ID',
+ titleTooltip: 'The unique ID of the event obsoleted because of this event',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return alarmid4human(value);
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'exec',
+ title: 'Script',
+ titleTooltip: 'The script to handle the event notification',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'exec_run',
+ title: 'Script Run At',
+ titleTooltip: 'The date and time the script has been ran',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return timestamp4human(value, ' ');
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'exec_code',
+ title: 'Script Return Value',
+ titleTooltip: 'The return code of the script',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+
+ if (value === 0) {
+ return 'OK (returned 0)';
+ } else {
+ return 'FAILED (with code ' + value.toString() + ')';
+ }
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'delay',
+ title: 'Script Delay',
+ titleTooltip: 'The hysteresis of the notification',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+
+ return NETDATA.seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'delay_up_to_timestamp',
+ title: 'Script Delay Run At',
+ titleTooltip: 'The date and time the script should be run, after hysteresis',
+ formatter: function (value, row, index) {
+ void (row);
+ void (index);
+ return timestamp4human(value, ' ');
+ },
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'info',
+ title: 'Description',
+ titleTooltip: 'A short description of the alarm',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ },
+ {
+ field: 'source',
+ title: 'Alarm Source',
+ titleTooltip: 'The source of configuration of the alarm',
+ align: 'center',
+ valign: 'middle',
+ visible: false,
+ sortable: true
+ }
+ ]
+ });
+ // console.log($('#alarms_log_table').bootstrapTable('getOptions'));
+ });
+ });
+}
+
+function alarmsCallback(data) {
+ var count = 0, x;
+ for (x in data.alarms) {
+ if (!data.alarms.hasOwnProperty(x)) {
+ continue;
+ }
+
+ var alarm = data.alarms[x];
+ if (alarm.status === 'WARNING' || alarm.status === 'CRITICAL') {
+ count++;
+ }
+ }
+
+ if (count > 0) {
+ document.getElementById('alarms_count_badge').innerHTML = count.toString();
+ } else {
+ document.getElementById('alarms_count_badge').innerHTML = '';
+ }
+}
+
+function initializeDynamicDashboardWithData(data) {
+ if (data !== null) {
+ options.hostname = data.hostname;
+ options.data = data;
+ options.version = data.version;
+ options.release_channel = data.release_channel;
+ netdataDashboard.os = data.os;
+
+ if (typeof data.hosts !== 'undefined') {
+ options.hosts = data.hosts;
+ }
+
+ // update the dashboard hostname
+ document.getElementById('hostname').innerHTML = '<span id="hostnametext">' + options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + '</span>&nbsp;&nbsp;<strong class="caret">';
+ document.getElementById('hostname').href = NETDATA.serverDefault;
+ document.getElementById('netdataVersion').innerHTML = options.version;
+
+ if (netdataSnapshotData !== null) {
+ $('#alarmsButton').hide();
+ $('#updateButton').hide();
+ // $('#loadButton').hide();
+ $('#saveButton').hide();
+ $('#printButton').hide();
+ }
+
+ // update the dashboard title
+ document.title = options.hostname + ' netdata dashboard';
+
+ // close the splash screen
+ $("#loadOverlay").css("display", "none");
+
+ // create a chart_by_name index
+ data.charts_by_name = {};
+ var charts = data.charts;
+ var x;
+ for (x in charts) {
+ if (!charts.hasOwnProperty(x)) {
+ continue;
+ }
+
+ var chart = charts[x];
+ data.charts_by_name[chart.name] = chart;
+ }
+
+ // render all charts
+ renderChartsAndMenu(data);
+
+ // Ensure MyNetdata menu is rendered with latest host info #5370
+ renderMyNetdataMenu(isSignedIn() ? cloudAgents : registryAgents);
+ }
+}
+
+// an object to keep initilization configuration
+// needed due to the async nature of the XSS modal
+var initializeConfig = {
+ url: null,
+ custom_info: true,
+};
+
+function loadCustomDashboardInfo(url, callback) {
+ loadJs(url, function () {
+ $.extend(true, netdataDashboard, customDashboard);
+ callback();
+ });
+}
+
+function initializeChartsAndCustomInfo() {
+ NETDATA.alarms.callback = alarmsCallback;
+
+ // download all the charts the server knows
+ NETDATA.chartRegistry.downloadAll(initializeConfig.url, function (data) {
+ if (data !== null) {
+ if (initializeConfig.custom_info === true && typeof data.custom_info !== 'undefined' && data.custom_info !== "" && netdataSnapshotData === null) {
+ //console.log('loading custom dashboard decorations from server ' + initializeConfig.url);
+ loadCustomDashboardInfo(NETDATA.serverDefault + data.custom_info, function () {
+ initializeDynamicDashboardWithData(data);
+ });
+ } else {
+ //console.log('not loading custom dashboard decorations from server ' + initializeConfig.url);
+ initializeDynamicDashboardWithData(data);
+ }
+ }
+ });
+}
+
+function xssModalDisableXss() {
+ //console.log('disabling xss checks');
+ NETDATA.xss.enabled = false;
+ NETDATA.xss.enabled_for_data = false;
+ initializeConfig.custom_info = true;
+ initializeChartsAndCustomInfo();
+ return false;
+}
+
+function xssModalKeepXss() {
+ //console.log('keeping xss checks');
+ NETDATA.xss.enabled = true;
+ NETDATA.xss.enabled_for_data = true;
+ initializeConfig.custom_info = false;
+ initializeChartsAndCustomInfo();
+ return false;
+}
+
+function initializeDynamicDashboard(netdata_url) {
+ if (typeof netdata_url === 'undefined' || netdata_url === null) {
+ netdata_url = NETDATA.serverDefault;
+ }
+
+ initializeConfig.url = netdata_url;
+
+ // initialize clickable alarms
+ NETDATA.alarms.chart_div_offset = -50;
+ NETDATA.alarms.chart_div_id_prefix = 'chart_';
+ NETDATA.alarms.chart_div_animation_duration = 0;
+
+ NETDATA.pause(function () {
+ if (typeof netdataCheckXSS !== 'undefined' && netdataCheckXSS === true) {
+ //$("#loadOverlay").css("display","none");
+ document.getElementById('netdataXssModalServer').innerText = initializeConfig.url;
+ $('#xssModal').modal('show');
+ } else {
+ initializeChartsAndCustomInfo();
+ }
+ });
+}
+
+// ----------------------------------------------------------------------------
+
+function versionLog(msg) {
+ document.getElementById('versionCheckLog').innerHTML = msg;
+}
+
+// New way of checking for updates, based only on versions
+
+function versionsMatch(v1, v2) {
+ if (v1 == v2) {
+ return true;
+ } else {
+ let s1 = v1.split('.');
+ let s2 = v2.split('.');
+ // Check major version
+ let n1 = parseInt(s1[0].substring(1, 2), 10);
+ let n2 = parseInt(s2[0].substring(1, 2), 10);
+ if (n1 < n2) return false;
+ else if (n1 > n2) return true;
+
+ // Check minor version
+ n1 = parseInt(s1[1], 10);
+ n2 = parseInt(s2[1], 10);
+ if (n1 < n2) return false;
+ else if (n1 > n2) return true;
+
+ // Split patch: format could be e.g. 0-22-nightly
+ s1 = s1[2].split('-');
+ s2 = s2[2].split('-');
+
+ n1 = parseInt(s1[0], 10);
+ n2 = parseInt(s2[0], 10);
+ if (n1 < n2) return false;
+ else if (n1 > n2) return true;
+
+ n1 = (s1.length > 1) ? parseInt(s1[1], 10) : 0;
+ n2 = (s2.length > 1) ? parseInt(s2[1], 10) : 0;
+ if (n1 < n2) return false;
+ else return true;
+ }
+}
+
+function getGithubLatestVersion(callback) {
+ versionLog('Downloading latest version id from github...');
+
+ $.ajax({
+ url: 'https://api.github.com/repos/netdata/netdata/releases/latest',
+ async: true,
+ cache: false
+ })
+ .done(function (data) {
+ data = data.tag_name.replace(/(\r\n|\n|\r| |\t)/gm, "");
+ versionLog('Latest stable version from github is ' + data);
+ callback(data);
+ })
+ .fail(function () {
+ versionLog('Failed to download the latest stable version id from github!');
+ callback(null);
+ });
+}
+
+function getGCSLatestVersion(callback) {
+ versionLog('Downloading latest version id from GCS...');
+ $.ajax({
+ url: "https://www.googleapis.com/storage/v1/b/netdata-nightlies/o/latest-version.txt",
+ async: true,
+ cache: false
+ })
+ .done(function (response) {
+ $.ajax({
+ url: response.mediaLink,
+ async: true,
+ cache: false
+ })
+ .done(function (data) {
+ data = data.replace(/(\r\n|\n|\r| |\t)/gm, "");
+ versionLog('Latest nightly version from GCS is ' + data);
+ callback(data);
+ })
+ .fail(function (xhr, textStatus, errorThrown) {
+ versionLog('Failed to download the latest nightly version id from GCS!');
+ callback(null);
+ });
+ })
+ .fail(function (xhr, textStatus, errorThrown) {
+ versionLog('Failed to download the latest nightly version from GCS!');
+ callback(null);
+ });
+}
+
+
+function checkForUpdateByVersion(force, callback) {
+ if (options.release_channel === 'stable') {
+ getGithubLatestVersion(function (sha2) {
+ callback(options.version, sha2);
+ });
+ } else {
+ getGCSLatestVersion(function (sha2) {
+ callback(options.version, sha2);
+ });
+ }
+ return null;
+}
+
+function notifyForUpdate(force) {
+ versionLog('<p>checking for updates...</p>');
+
+ var now = Date.now();
+
+ if (typeof force === 'undefined' || force !== true) {
+ var last = loadLocalStorage('last_update_check');
+
+ if (typeof last === 'string') {
+ last = parseInt(last);
+ } else {
+ last = 0;
+ }
+
+ if (now - last < 3600000 * 8) {
+ // no need to check it - too soon
+ return;
+ }
+ }
+
+ checkForUpdateByVersion(force, function (sha1, sha2) {
+ var save = false;
+
+ if (sha1 === null) {
+ save = false;
+ versionLog('<p><big>Failed to get your netdata version!</big></p><p>You can always get the latest netdata from <a href="https://github.com/netdata/netdata" target="_blank">its github page</a>.</p>');
+ } else if (sha2 === null) {
+ save = false;
+ versionLog('<p><big>Failed to get the latest netdata version.</big></p><p>You can always get the latest netdata from <a href="https://github.com/netdata/netdata" target="_blank">its github page</a>.</p>');
+ } else if (versionsMatch(sha1, sha2)) {
+ save = true;
+ versionLog('<p><big>You already have the latest netdata!</big></p><p>No update yet?<br/>We probably need some motivation to keep going on!</p><p>If you haven\'t already, <a href="https://github.com/netdata/netdata" target="_blank">give netdata a <b><i class="fas fa-star"></i></b> at its github page</a>.</p>');
+ } else {
+ save = true;
+ var compare = 'https://docs.netdata.cloud/changelog/';
+ versionLog('<p><big><strong>New version of netdata available!</strong></big></p><p>Latest version: <b><code>' + sha2 + '</code></b></p><p><a href="' + compare + '" target="_blank">Click here for the changes log</a> and<br/><a href="https://github.com/netdata/netdata/tree/master/packaging/installer/UPDATE.md" target="_blank">click here for directions on updating</a> your netdata installation.</p><p>We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.<br/>Keeping your netdata updated is generally a good idea.</p>');
+
+ document.getElementById('update_badge').innerHTML = '!';
+ }
+
+ if (save) {
+ saveLocalStorage('last_update_check', now.toString());
+ }
+ });
+}
+
+// ----------------------------------------------------------------------------
+// printing dashboards
+
+function showPageFooter() {
+ document.getElementById('footer').style.display = 'block';
+}
+
+function printPreflight() {
+ var url = document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString() + urlOptions.genHash() + ';mode=print';
+ var width = 990;
+ var height = screen.height * 90 / 100;
+ //console.log(url);
+ //console.log(document.location);
+ window.open(url, '', 'width=' + width.toString() + ',height=' + height.toString() + ',menubar=no,toolbar=no,personalbar=no,location=no,resizable=no,scrollbars=yes,status=no,chrome=yes,centerscreen=yes,attention=yes,dialog=yes');
+ $('#printPreflightModal').modal('hide');
+}
+
+function printPage() {
+ var print_is_rendering = true;
+
+ $('#printModal').on('hide.bs.modal', function (e) {
+ if (print_is_rendering === true) {
+ e.preventDefault();
+ return false;
+ }
+
+ return true;
+ });
+
+ $('#printModal').on('show.bs.modal', function () {
+ var print_options = {
+ stop_updates_when_focus_is_lost: false,
+ update_only_visible: false,
+ sync_selection: false,
+ eliminate_zero_dimensions: false,
+ pan_and_zoom_data_padding: false,
+ show_help: false,
+ legend_toolbox: false,
+ resize_charts: false,
+ pixels_per_point: 1
+ };
+
+ var x;
+ for (x in print_options) {
+ if (print_options.hasOwnProperty(x)) {
+ NETDATA.options.current[x] = print_options[x];
+ }
+ }
+
+ NETDATA.parseDom();
+ showPageFooter();
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalPanAndZoom.setMaster(NETDATA.options.targets[0], urlOptions.after, urlOptions.before);
+ // NETDATA.onresize();
+
+ var el = document.getElementById('printModalProgressBar');
+ var eltxt = document.getElementById('printModalProgressBarText');
+
+ function update_chart(idx) {
+ var state = NETDATA.options.targets[--idx];
+
+ var pcent = (NETDATA.options.targets.length - idx) * 100 / NETDATA.options.targets.length;
+ $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);
+ eltxt.innerText = Math.round(pcent).toString() + '%, ' + state.id;
+
+ setTimeout(function () {
+ state.updateChart(function () {
+ NETDATA.options.targets[idx].resizeForPrint();
+
+ if (idx > 0) {
+ update_chart(idx);
+ } else {
+ print_is_rendering = false;
+ $('#printModal').modal('hide');
+ window.print();
+ window.close();
+ }
+ })
+ }, 0);
+ }
+
+ print_is_rendering = true;
+ update_chart(NETDATA.options.targets.length);
+ });
+
+ $('#printModal').modal('show');
+}
+
+// --------------------------------------------------------------------
+
+function jsonStringifyFn(obj) {
+ return JSON.stringify(obj, function (key, value) {
+ return (typeof value === 'function') ? value.toString() : value;
+ });
+}
+
+function jsonParseFn(str) {
+ return JSON.parse(str, function (key, value) {
+ if (typeof value != 'string') {
+ return value;
+ }
+ return (value.substring(0, 8) == 'function') ? eval('(' + value + ')') : value;
+ });
+}
+
+// --------------------------------------------------------------------
+
+var snapshotOptions = {
+ bytes_per_chart: 2048,
+ compressionDefault: 'pako.deflate.base64',
+
+ compressions: {
+ 'none': {
+ bytes_per_point_memory: 5.2,
+ bytes_per_point_disk: 5.6,
+
+ compress: function (s) {
+ return s;
+ },
+
+ compressed_length: function (s) {
+ return s.length;
+ },
+
+ uncompress: function (s) {
+ return s;
+ }
+ },
+
+ 'pako.deflate.base64': {
+ bytes_per_point_memory: 1.8,
+ bytes_per_point_disk: 1.9,
+
+ compress: function (s) {
+ return btoa(pako.deflate(s, { to: 'string' }));
+ },
+
+ compressed_length: function (s) {
+ return s.length;
+ },
+
+ uncompress: function (s) {
+ return pako.inflate(atob(s), { to: 'string' });
+ }
+ },
+
+ 'pako.deflate': {
+ bytes_per_point_memory: 1.4,
+ bytes_per_point_disk: 3.2,
+
+ compress: function (s) {
+ return pako.deflate(s, { to: 'string' });
+ },
+
+ compressed_length: function (s) {
+ return s.length;
+ },
+
+ uncompress: function (s) {
+ return pako.inflate(s, { to: 'string' });
+ }
+ },
+
+ 'lzstring.utf16': {
+ bytes_per_point_memory: 1.7,
+ bytes_per_point_disk: 2.6,
+
+ compress: function (s) {
+ return LZString.compressToUTF16(s);
+ },
+
+ compressed_length: function (s) {
+ return s.length * 2;
+ },
+
+ uncompress: function (s) {
+ return LZString.decompressFromUTF16(s);
+ }
+ },
+
+ 'lzstring.base64': {
+ bytes_per_point_memory: 2.1,
+ bytes_per_point_disk: 2.3,
+
+ compress: function (s) {
+ return LZString.compressToBase64(s);
+ },
+
+ compressed_length: function (s) {
+ return s.length;
+ },
+
+ uncompress: function (s) {
+ return LZString.decompressFromBase64(s);
+ }
+ },
+
+ 'lzstring.uri': {
+ bytes_per_point_memory: 2.1,
+ bytes_per_point_disk: 2.3,
+
+ compress: function (s) {
+ return LZString.compressToEncodedURIComponent(s);
+ },
+
+ compressed_length: function (s) {
+ return s.length;
+ },
+
+ uncompress: function (s) {
+ return LZString.decompressFromEncodedURIComponent(s);
+ }
+ }
+ }
+};
+
+// --------------------------------------------------------------------
+// loading snapshots
+
+function loadSnapshotModalLog(priority, msg) {
+ document.getElementById('loadSnapshotStatus').className = "alert alert-" + priority;
+ document.getElementById('loadSnapshotStatus').innerHTML = msg;
+}
+
+var tmpSnapshotData = null;
+
+function loadSnapshot() {
+ $('#loadSnapshotImport').addClass('disabled');
+
+ if (tmpSnapshotData === null) {
+ loadSnapshotPreflightEmpty();
+ loadSnapshotModalLog('danger', 'no data have been loaded');
+ return;
+ }
+
+ loadPako(function () {
+ loadLzString(function () {
+ loadSnapshotModalLog('info', 'Please wait, activating snapshot...');
+ $('#loadSnapshotModal').modal('hide');
+
+ netdataShowAlarms = false;
+ netdataRegistry = false;
+ netdataServer = tmpSnapshotData.server;
+ NETDATA.serverDefault = netdataServer;
+
+ document.getElementById('charts_div').innerHTML = '';
+ document.getElementById('sidebar').innerHTML = '';
+ NETDATA.globalReset();
+
+ if (typeof tmpSnapshotData.hash !== 'undefined') {
+ urlOptions.hash = tmpSnapshotData.hash;
+ } else {
+ urlOptions.hash = '#';
+ }
+
+ if (typeof tmpSnapshotData.info !== 'undefined') {
+ var info = jsonParseFn(tmpSnapshotData.info);
+ if (typeof info.menu !== 'undefined') {
+ netdataDashboard.menu = info.menu;
+ }
+
+ if (typeof info.submenu !== 'undefined') {
+ netdataDashboard.submenu = info.submenu;
+ }
+
+ if (typeof info.context !== 'undefined') {
+ netdataDashboard.context = info.context;
+ }
+ }
+
+ if (typeof tmpSnapshotData.compression !== 'string') {
+ tmpSnapshotData.compression = 'none';
+ }
+
+ if (typeof snapshotOptions.compressions[tmpSnapshotData.compression] === 'undefined') {
+ alert('unknown compression method: ' + tmpSnapshotData.compression);
+ tmpSnapshotData.compression = 'none';
+ }
+
+ tmpSnapshotData.uncompress = snapshotOptions.compressions[tmpSnapshotData.compression].uncompress;
+ netdataSnapshotData = tmpSnapshotData;
+
+ urlOptions.after = tmpSnapshotData.after_ms;
+ urlOptions.before = tmpSnapshotData.before_ms;
+
+ if (typeof tmpSnapshotData.highlight_after_ms !== 'undefined'
+ && tmpSnapshotData.highlight_after_ms !== null
+ && tmpSnapshotData.highlight_after_ms > 0
+ && typeof tmpSnapshotData.highlight_before_ms !== 'undefined'
+ && tmpSnapshotData.highlight_before_ms !== null
+ && tmpSnapshotData.highlight_before_ms > 0
+ ) {
+ urlOptions.highlight_after = tmpSnapshotData.highlight_after_ms;
+ urlOptions.highlight_before = tmpSnapshotData.highlight_before_ms;
+ urlOptions.highlight = true;
+ } else {
+ urlOptions.highlight_after = 0;
+ urlOptions.highlight_before = 0;
+ urlOptions.highlight = false;
+ }
+
+ netdataCheckXSS = false; // disable the modal - this does not affect XSS checks, since dashboard.js is already loaded
+ NETDATA.xss.enabled = true; // we should not do any remote requests, but if we do, check them
+ NETDATA.xss.enabled_for_data = true; // check also snapshot data - that have been excluded from the initial check, due to compression
+ loadSnapshotPreflightEmpty();
+ initializeDynamicDashboard();
+ });
+ });
+};
+
+function loadSnapshotPreflightFile(file) {
+ var filename = NETDATA.xss.string(file.name);
+ var fr = new FileReader();
+ fr.onload = function (e) {
+ document.getElementById('loadSnapshotFilename').innerHTML = filename;
+ var result = null;
+ try {
+ result = NETDATA.xss.checkAlways('snapshot', JSON.parse(e.target.result), /^(snapshot\.info|snapshot\.data)$/);
+
+ //console.log(result);
+ var date_after = new Date(result.after_ms);
+ var date_before = new Date(result.before_ms);
+
+ if (typeof result.charts_ok === 'undefined') {
+ result.charts_ok = 'unknown';
+ }
+
+ if (typeof result.charts_failed === 'undefined') {
+ result.charts_failed = 0;
+ }
+
+ if (typeof result.compression === 'undefined') {
+ result.compression = 'none';
+ }
+
+ if (typeof result.data_size === 'undefined') {
+ result.data_size = 0;
+ }
+
+ document.getElementById('loadSnapshotFilename').innerHTML = '<code>' + filename + '</code>';
+ document.getElementById('loadSnapshotHostname').innerHTML = '<b>' + result.hostname + '</b>, netdata version: <b>' + result.netdata_version.toString() + '</b>';
+ document.getElementById('loadSnapshotURL').innerHTML = result.url;
+ document.getElementById('loadSnapshotCharts').innerHTML = result.charts.charts_count.toString() + ' charts, ' + result.charts.dimensions_count.toString() + ' dimensions, ' + result.data_points.toString() + ' points per dimension, ' + Math.round(result.duration_ms / result.data_points).toString() + ' ms per point';
+ document.getElementById('loadSnapshotInfo').innerHTML = 'version: <b>' + result.snapshot_version.toString() + '</b>, includes <b>' + result.charts_ok.toString() + '</b> unique chart data queries ' + ((result.charts_failed > 0) ? ('<b>' + result.charts_failed.toString() + '</b> failed') : '').toString() + ', compressed with <code>' + result.compression.toString() + '</code>, data size ' + (Math.round(result.data_size * 100 / 1024 / 1024) / 100).toString() + ' MB';
+ document.getElementById('loadSnapshotTimeRange').innerHTML = '<b>' + NETDATA.dateTime.localeDateString(date_after) + ' ' + NETDATA.dateTime.localeTimeString(date_after) + '</b> to <b>' + NETDATA.dateTime.localeDateString(date_before) + ' ' + NETDATA.dateTime.localeTimeString(date_before) + '</b>';
+ document.getElementById('loadSnapshotComments').innerHTML = ((result.comments) ? result.comments : '').toString();
+ loadSnapshotModalLog('success', 'File loaded, click <b>Import</b> to render it!');
+ $('#loadSnapshotImport').removeClass('disabled');
+
+ tmpSnapshotData = result;
+ }
+ catch (e) {
+ console.log(e);
+ document.getElementById('loadSnapshotStatus').className = "alert alert-danger";
+ document.getElementById('loadSnapshotStatus').innerHTML = "Failed to parse this file!";
+ $('#loadSnapshotImport').addClass('disabled');
+ }
+ }
+
+ //console.log(file);
+ fr.readAsText(file);
+};
+
+function loadSnapshotPreflightEmpty() {
+ document.getElementById('loadSnapshotFilename').innerHTML = '';
+ document.getElementById('loadSnapshotHostname').innerHTML = '';
+ document.getElementById('loadSnapshotURL').innerHTML = '';
+ document.getElementById('loadSnapshotCharts').innerHTML = '';
+ document.getElementById('loadSnapshotInfo').innerHTML = '';
+ document.getElementById('loadSnapshotTimeRange').innerHTML = '';
+ document.getElementById('loadSnapshotComments').innerHTML = '';
+ loadSnapshotModalLog('success', 'Browse for a snapshot file (or drag it and drop it here), then click <b>Import</b> to render it.');
+ $('#loadSnapshotImport').addClass('disabled');
+};
+
+var loadSnapshotDragAndDropInitialized = false;
+
+function loadSnapshotDragAndDropSetup() {
+ if (loadSnapshotDragAndDropInitialized === false) {
+ loadSnapshotDragAndDropInitialized = true;
+ $('#loadSnapshotDragAndDrop')
+ .on('drag dragstart dragend dragover dragenter dragleave drop', function (e) {
+ e.preventDefault();
+ e.stopPropagation();
+ })
+ .on('drop', function (e) {
+ if (e.originalEvent.dataTransfer.files.length) {
+ loadSnapshotPreflightFile(e.originalEvent.dataTransfer.files.item(0));
+ } else {
+ loadSnapshotPreflightEmpty();
+ loadSnapshotModalLog('danger', 'No file selected');
+ }
+ });
+ }
+};
+
+function loadSnapshotPreflight() {
+ var files = document.getElementById('loadSnapshotSelectFiles').files;
+ if (files.length <= 0) {
+ loadSnapshotPreflightEmpty();
+ loadSnapshotModalLog('danger', 'No file selected');
+ return;
+ }
+
+ loadSnapshotModalLog('info', 'Loading file...');
+
+ loadSnapshotPreflightFile(files.item(0));
+}
+
+// --------------------------------------------------------------------
+// saving snapshots
+
+var saveSnapshotStop = false;
+
+function saveSnapshotCancel() {
+ saveSnapshotStop = true;
+}
+
+var saveSnapshotModalInitialized = false;
+
+function saveSnapshotModalSetup() {
+ if (saveSnapshotModalInitialized === false) {
+ saveSnapshotModalInitialized = true;
+ $('#saveSnapshotModal')
+ .on('hide.bs.modal', saveSnapshotCancel)
+ .on('show.bs.modal', saveSnapshotModalInit)
+ .on('shown.bs.modal', function () {
+ $('#saveSnapshotResolutionSlider').find(".slider-handle:first").attr("tabindex", 1);
+ document.getElementById('saveSnapshotComments').focus();
+ });
+ }
+};
+
+function saveSnapshotModalLog(priority, msg) {
+ document.getElementById('saveSnapshotStatus').className = "alert alert-" + priority;
+ document.getElementById('saveSnapshotStatus').innerHTML = msg;
+}
+
+function saveSnapshotModalShowExpectedSize() {
+ var points = Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint);
+ var priority = 'info';
+ var msg = 'A moderate snapshot.';
+
+ var sizemb = Math.round(
+ (options.data.charts_count * snapshotOptions.bytes_per_chart
+ + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_disk)
+ * 10 / 1024 / 1024) / 10;
+
+ var memmb = Math.round(
+ (options.data.charts_count * snapshotOptions.bytes_per_chart
+ + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_memory)
+ * 10 / 1024 / 1024) / 10;
+
+ if (sizemb < 10) {
+ priority = 'success';
+ msg = 'A nice small snapshot!';
+ }
+ if (sizemb > 50) {
+ priority = 'warning';
+ msg = 'Will stress your browser...';
+ }
+ if (sizemb > 100) {
+ priority = 'danger';
+ msg = 'Hm... good luck...';
+ }
+
+ saveSnapshotModalLog(priority, 'The snapshot will have ' + points.toString() + ' points per dimension. Expected size on disk ' + sizemb + ' MB, at browser memory ' + memmb + ' MB.<br/>' + msg);
+}
+
+var saveSnapshotCompression = snapshotOptions.compressionDefault;
+
+function saveSnapshotSetCompression(name) {
+ saveSnapshotCompression = name;
+ document.getElementById('saveSnapshotCompressionName').innerHTML = saveSnapshotCompression;
+ saveSnapshotModalShowExpectedSize();
+}
+
+var saveSnapshotSlider = null;
+var saveSnapshotSelectedSecondsPerPoint = 1;
+var saveSnapshotViewDuration = 1;
+
+function saveSnapshotModalInit() {
+ $('#saveSnapshotModalProgressSection').hide();
+ $('#saveSnapshotResolutionRadio').show();
+ saveSnapshotModalLog('info', 'Select resolution and click <b>Save</b>');
+ $('#saveSnapshotExport').removeClass('disabled');
+
+ loadBootstrapSlider(function () {
+ saveSnapshotViewDuration = options.duration;
+ var start_ms = Math.round(Date.now() - saveSnapshotViewDuration * 1000);
+
+ if (NETDATA.globalPanAndZoom.isActive() === true) {
+ saveSnapshotViewDuration = Math.round((NETDATA.globalPanAndZoom.force_before_ms - NETDATA.globalPanAndZoom.force_after_ms) / 1000);
+ start_ms = NETDATA.globalPanAndZoom.force_after_ms;
+ }
+
+ var start_date = new Date(start_ms);
+ var yyyymmddhhssmm = start_date.getFullYear() + NETDATA.zeropad(start_date.getMonth() + 1) + NETDATA.zeropad(start_date.getDate()) + '-' + NETDATA.zeropad(start_date.getHours()) + NETDATA.zeropad(start_date.getMinutes()) + NETDATA.zeropad(start_date.getSeconds());
+
+ document.getElementById('saveSnapshotFilename').value = 'netdata-' + options.hostname.toString() + '-' + yyyymmddhhssmm.toString() + '-' + saveSnapshotViewDuration.toString() + '.snapshot';
+ saveSnapshotSetCompression(saveSnapshotCompression);
+
+ var min = options.update_every;
+ var max = Math.round(saveSnapshotViewDuration / 100);
+
+ if (NETDATA.globalPanAndZoom.isActive() === false) {
+ max = Math.round(saveSnapshotViewDuration / 50);
+ }
+
+ var view = Math.round(saveSnapshotViewDuration / Math.round($(document.getElementById('charts_div')).width() / 2));
+
+ // console.log('view duration: ' + saveSnapshotViewDuration + ', min: ' + min + ', max: ' + max + ', view: ' + view);
+
+ if (max < 10) {
+ max = 10;
+ }
+ if (max < min) {
+ max = min;
+ }
+ if (view < min) {
+ view = min;
+ }
+ if (view > max) {
+ view = max;
+ }
+
+ if (saveSnapshotSlider !== null) {
+ saveSnapshotSlider.destroy();
+ }
+
+ saveSnapshotSlider = new Slider('#saveSnapshotResolutionSlider', {
+ ticks: [min, view, max],
+ min: min,
+ max: max,
+ step: options.update_every,
+ value: view,
+ scale: (max > 100) ? 'logarithmic' : 'linear',
+ tooltip: 'always',
+ formatter: function (value) {
+ if (value < 1) {
+ value = 1;
+ }
+
+ if (value < options.data.update_every) {
+ value = options.data.update_every;
+ }
+
+ saveSnapshotSelectedSecondsPerPoint = value;
+ saveSnapshotModalShowExpectedSize();
+
+ var seconds = ' seconds ';
+ if (value === 1) {
+ seconds = ' second ';
+ }
+
+ return value + seconds + 'per point' + ((value === options.data.update_every) ? ', server default' : '').toString();
+ }
+ });
+ });
+}
+
+function saveSnapshot() {
+ loadPako(function () {
+ loadLzString(function () {
+ saveSnapshotStop = false;
+ $('#saveSnapshotModalProgressSection').show();
+ $('#saveSnapshotResolutionRadio').hide();
+ $('#saveSnapshotExport').addClass('disabled');
+
+ var filename = document.getElementById('saveSnapshotFilename').value;
+ // console.log(filename);
+ saveSnapshotModalLog('info', 'Generating snapshot as <code>' + filename.toString() + '</code>');
+
+ var save_options = {
+ stop_updates_when_focus_is_lost: false,
+ update_only_visible: false,
+ sync_selection: false,
+ eliminate_zero_dimensions: true,
+ pan_and_zoom_data_padding: false,
+ show_help: false,
+ legend_toolbox: false,
+ resize_charts: false,
+ pixels_per_point: 1
+ };
+ var backedup_options = {};
+
+ var x;
+ for (x in save_options) {
+ if (save_options.hasOwnProperty(x)) {
+ backedup_options[x] = NETDATA.options.current[x];
+ NETDATA.options.current[x] = save_options[x];
+ }
+ }
+
+ var el = document.getElementById('saveSnapshotModalProgressBar');
+ var eltxt = document.getElementById('saveSnapshotModalProgressBarText');
+
+ options.data.charts_by_name = null;
+
+ var saveData = {
+ hostname: options.hostname,
+ server: NETDATA.serverDefault,
+ netdata_version: options.data.version,
+ snapshot_version: 1,
+ after_ms: Date.now() - options.duration * 1000,
+ before_ms: Date.now(),
+ highlight_after_ms: urlOptions.highlight_after,
+ highlight_before_ms: urlOptions.highlight_before,
+ duration_ms: options.duration * 1000,
+ update_every_ms: options.update_every * 1000,
+ data_points: 0,
+ url: ((urlOptions.server !== null) ? urlOptions.server : document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString()).toString(),
+ comments: document.getElementById('saveSnapshotComments').value.toString(),
+ hash: urlOptions.hash,
+ charts: options.data,
+ info: jsonStringifyFn({
+ menu: netdataDashboard.menu,
+ submenu: netdataDashboard.submenu,
+ context: netdataDashboard.context
+ }),
+ charts_ok: 0,
+ charts_failed: 0,
+ compression: saveSnapshotCompression,
+ data_size: 0,
+ data: {}
+ };
+
+ if (typeof snapshotOptions.compressions[saveData.compression] === 'undefined') {
+ alert('unknown compression method: ' + saveData.compression);
+ saveData.compression = 'none';
+ }
+
+ var compress = snapshotOptions.compressions[saveData.compression].compress;
+ var compressed_length = snapshotOptions.compressions[saveData.compression].compressed_length;
+
+ function pack_api1_v1_chart_data(state) {
+ if (state.library_name === null || state.data === null) {
+ return;
+ }
+
+ var data = state.data;
+ state.data = null;
+ data.state = null;
+ var str = JSON.stringify(data);
+
+ if (typeof str === 'string') {
+ var cstr = compress(str);
+ saveData.data[state.chartDataUniqueID()] = cstr;
+ return compressed_length(cstr);
+ } else {
+ return 0;
+ }
+ }
+
+ var clearPanAndZoom = false;
+ if (NETDATA.globalPanAndZoom.isActive() === false) {
+ NETDATA.globalPanAndZoom.setMaster(NETDATA.options.targets[0], saveData.after_ms, saveData.before_ms);
+ clearPanAndZoom = true;
+ }
+
+ saveData.after_ms = NETDATA.globalPanAndZoom.force_after_ms;
+ saveData.before_ms = NETDATA.globalPanAndZoom.force_before_ms;
+ saveData.duration_ms = saveData.before_ms - saveData.after_ms;
+ saveData.data_points = Math.round((saveData.before_ms - saveData.after_ms) / (saveSnapshotSelectedSecondsPerPoint * 1000));
+ saveSnapshotModalLog('info', 'Generating snapshot with ' + saveData.data_points.toString() + ' data points per dimension...');
+
+ var charts_count = 0;
+ var charts_ok = 0;
+ var charts_failed = 0;
+
+ function saveSnapshotRestore() {
+ $('#saveSnapshotModal').modal('hide');
+
+ // restore the options
+ var x;
+ for (x in backedup_options) {
+ if (backedup_options.hasOwnProperty(x)) {
+ NETDATA.options.current[x] = backedup_options[x];
+ }
+ }
+
+ $(el).css('width', '0%').attr('aria-valuenow', 0);
+ eltxt.innerText = '0%';
+
+ if (clearPanAndZoom) {
+ NETDATA.globalPanAndZoom.clearMaster();
+ }
+
+ NETDATA.options.force_data_points = 0;
+ NETDATA.options.fake_chart_rendering = false;
+ NETDATA.onscroll_updater_enabled = true;
+ NETDATA.onresize();
+ NETDATA.unpause();
+
+ $('#saveSnapshotExport').removeClass('disabled');
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.options.force_data_points = saveData.data_points;
+ NETDATA.options.fake_chart_rendering = true;
+ NETDATA.onscroll_updater_enabled = false;
+ NETDATA.abortAllRefreshes();
+
+ var size = 0;
+ var info = ' Resolution: <b>' + saveSnapshotSelectedSecondsPerPoint.toString() + ((saveSnapshotSelectedSecondsPerPoint === 1) ? ' second ' : ' seconds ').toString() + 'per point</b>.';
+
+ function update_chart(idx) {
+ if (saveSnapshotStop === true) {
+ saveSnapshotModalLog('info', 'Cancelled!');
+ saveSnapshotRestore();
+ return;
+ }
+
+ var state = NETDATA.options.targets[--idx];
+
+ var pcent = (NETDATA.options.targets.length - idx) * 100 / NETDATA.options.targets.length;
+ $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);
+ eltxt.innerText = Math.round(pcent).toString() + '%, ' + state.id;
+
+ setTimeout(function () {
+ charts_count++;
+ state.isVisible(true);
+ state.current.force_after_ms = saveData.after_ms;
+ state.current.force_before_ms = saveData.before_ms;
+
+ state.updateChart(function (status, reason) {
+ state.current.force_after_ms = null;
+ state.current.force_before_ms = null;
+
+ if (status === true) {
+ charts_ok++;
+ // state.log('ok');
+ size += pack_api1_v1_chart_data(state);
+ } else {
+ charts_failed++;
+ state.log('failed to be updated: ' + reason);
+ }
+
+ saveSnapshotModalLog((charts_failed) ? 'danger' : 'info', 'Generated snapshot data size <b>' + (Math.round(size * 100 / 1024 / 1024) / 100).toString() + ' MB</b>. ' + ((charts_failed) ? (charts_failed.toString() + ' charts have failed to be downloaded') : '').toString() + info);
+
+ if (idx > 0) {
+ update_chart(idx);
+ } else {
+ saveData.charts_ok = charts_ok;
+ saveData.charts_failed = charts_failed;
+ saveData.data_size = size;
+ // console.log(saveData.compression + ': ' + (size / (options.data.dimensions_count * Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint))).toString());
+
+ // save it
+ // console.log(saveData);
+ saveObjectToClient(saveData, filename);
+
+ if (charts_failed > 0) {
+ alert(charts_failed.toString() + ' failed to be downloaded');
+ }
+
+ saveSnapshotRestore();
+ saveData = null;
+ }
+ })
+ }, 0);
+ }
+
+ update_chart(NETDATA.options.targets.length);
+ });
+ });
+}
+
+// --------------------------------------------------------------------
+// activate netdata on the page
+
+function dashboardSettingsSetup() {
+ var update_options_modal = function () {
+ // console.log('update_options_modal');
+
+ var sync_option = function (option) {
+ var self = $('#' + option);
+
+ if (self.prop('checked') !== NETDATA.getOption(option)) {
+ // console.log('switching ' + option.toString());
+ self.bootstrapToggle(NETDATA.getOption(option) ? 'on' : 'off');
+ }
+ };
+
+ var theme_sync_option = function (option) {
+ var self = $('#' + option);
+
+ self.bootstrapToggle(netdataTheme === 'slate' ? 'on' : 'off');
+ };
+ var units_sync_option = function (option) {
+ var self = $('#' + option);
+
+ if (self.prop('checked') !== (NETDATA.getOption('units') === 'auto')) {
+ self.bootstrapToggle(NETDATA.getOption('units') === 'auto' ? 'on' : 'off');
+ }
+
+ if (self.prop('checked') === true) {
+ $('#settingsLocaleTempRow').show();
+ $('#settingsLocaleTimeRow').show();
+ } else {
+ $('#settingsLocaleTempRow').hide();
+ $('#settingsLocaleTimeRow').hide();
+ }
+ };
+ var temp_sync_option = function (option) {
+ var self = $('#' + option);
+
+ if (self.prop('checked') !== (NETDATA.getOption('temperature') === 'celsius')) {
+ self.bootstrapToggle(NETDATA.getOption('temperature') === 'celsius' ? 'on' : 'off');
+ }
+ };
+ var timezone_sync_option = function (option) {
+ var self = $('#' + option);
+
+ document.getElementById('browser_timezone').innerText = NETDATA.options.browser_timezone;
+ document.getElementById('server_timezone').innerText = NETDATA.options.server_timezone;
+ document.getElementById('current_timezone').innerText = (NETDATA.options.current.timezone === 'default') ? 'unset, using browser default' : NETDATA.options.current.timezone;
+
+ if (self.prop('checked') === NETDATA.dateTime.using_timezone) {
+ self.bootstrapToggle(NETDATA.dateTime.using_timezone ? 'off' : 'on');
+ }
+ };
+
+ sync_option('eliminate_zero_dimensions');
+ sync_option('destroy_on_hide');
+ sync_option('async_on_scroll');
+ sync_option('parallel_refresher');
+ sync_option('concurrent_refreshes');
+ sync_option('sync_selection');
+ sync_option('sync_pan_and_zoom');
+ sync_option('stop_updates_when_focus_is_lost');
+ sync_option('smooth_plot');
+ sync_option('pan_and_zoom_data_padding');
+ sync_option('show_help');
+ sync_option('seconds_as_time');
+ theme_sync_option('netdata_theme_control');
+ units_sync_option('units_conversion');
+ temp_sync_option('units_temp');
+ timezone_sync_option('local_timezone');
+
+ if (NETDATA.getOption('parallel_refresher') === false) {
+ $('#concurrent_refreshes_row').hide();
+ } else {
+ $('#concurrent_refreshes_row').show();
+ }
+ };
+ NETDATA.setOption('setOptionCallback', update_options_modal);
+
+ // handle options changes
+ $('#eliminate_zero_dimensions').change(function () {
+ NETDATA.setOption('eliminate_zero_dimensions', $(this).prop('checked'));
+ });
+ $('#destroy_on_hide').change(function () {
+ NETDATA.setOption('destroy_on_hide', $(this).prop('checked'));
+ });
+ $('#async_on_scroll').change(function () {
+ NETDATA.setOption('async_on_scroll', $(this).prop('checked'));
+ });
+ $('#parallel_refresher').change(function () {
+ NETDATA.setOption('parallel_refresher', $(this).prop('checked'));
+ });
+ $('#concurrent_refreshes').change(function () {
+ NETDATA.setOption('concurrent_refreshes', $(this).prop('checked'));
+ });
+ $('#sync_selection').change(function () {
+ NETDATA.setOption('sync_selection', $(this).prop('checked'));
+ });
+ $('#sync_pan_and_zoom').change(function () {
+ NETDATA.setOption('sync_pan_and_zoom', $(this).prop('checked'));
+ });
+ $('#stop_updates_when_focus_is_lost').change(function () {
+ urlOptions.update_always = !$(this).prop('checked');
+ urlOptions.hashUpdate();
+
+ NETDATA.setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);
+ });
+ $('#smooth_plot').change(function () {
+ NETDATA.setOption('smooth_plot', $(this).prop('checked'));
+ });
+ $('#pan_and_zoom_data_padding').change(function () {
+ NETDATA.setOption('pan_and_zoom_data_padding', $(this).prop('checked'));
+ });
+ $('#seconds_as_time').change(function () {
+ NETDATA.setOption('seconds_as_time', $(this).prop('checked'));
+ });
+ $('#local_timezone').change(function () {
+ if ($(this).prop('checked')) {
+ selected_server_timezone('default', true);
+ } else {
+ selected_server_timezone('default', false);
+ }
+ });
+
+ $('#units_conversion').change(function () {
+ NETDATA.setOption('units', $(this).prop('checked') ? 'auto' : 'original');
+ });
+ $('#units_temp').change(function () {
+ NETDATA.setOption('temperature', $(this).prop('checked') ? 'celsius' : 'fahrenheit');
+ });
+
+ $('#show_help').change(function () {
+ urlOptions.help = $(this).prop('checked');
+ urlOptions.hashUpdate();
+
+ NETDATA.setOption('show_help', urlOptions.help);
+ netdataReload();
+ });
+
+ // this has to be the last
+ // it reloads the page
+ $('#netdata_theme_control').change(function () {
+ urlOptions.theme = $(this).prop('checked') ? 'slate' : 'white';
+ urlOptions.hashUpdate();
+
+ if (setTheme(urlOptions.theme)) {
+ netdataReload();
+ }
+ });
+}
+
+function scrollDashboardTo() {
+ if (netdataSnapshotData !== null && typeof netdataSnapshotData.hash !== 'undefined') {
+ //console.log(netdataSnapshotData.hash);
+ scrollToId(netdataSnapshotData.hash.replace('#', ''));
+ } else {
+ // check if we have to jump to a specific section
+ scrollToId(urlOptions.hash.replace('#', ''));
+
+ if (urlOptions.chart !== null) {
+ NETDATA.alarms.scrollToChart(urlOptions.chart);
+ //urlOptions.hash = '#' + NETDATA.name2id('menu_' + charts[c].menu + '_submenu_' + charts[c].submenu);
+ //urlOptions.hash = '#chart_' + NETDATA.name2id(urlOptions.chart);
+ //console.log('hash = ' + urlOptions.hash);
+ }
+ }
+}
+
+var modalHiddenCallback = null;
+
+function scrollToChartAfterHidingModal(chart, alarmDate, alarmStatus) {
+ modalHiddenCallback = function () {
+ NETDATA.alarms.scrollToChart(chart, alarmDate);
+
+ if (['WARNING', 'CRITICAL'].includes(alarmStatus)) {
+ const currentChartState = NETDATA.options.targets.find(
+ (chartState) => chartState.id === chart,
+ )
+ const twoMinutes = 2 * 60 * 1000
+ NETDATA.globalPanAndZoom.setMaster(
+ currentChartState,
+ alarmDate - twoMinutes,
+ alarmDate + twoMinutes,
+ )
+ }
+ };
+}
+
+// ----------------------------------------------------------------------------
+
+function enableTooltipsAndPopovers() {
+ $('[data-toggle="tooltip"]').tooltip({
+ animated: 'fade',
+ trigger: 'hover',
+ html: true,
+ delay: { show: 500, hide: 0 },
+ container: 'body'
+ });
+ $('[data-toggle="popover"]').popover();
+}
+
+// ----------------------------------------------------------------------------
+
+var runOnceOnDashboardLastRun = 0;
+
+function runOnceOnDashboardWithjQuery() {
+ if (runOnceOnDashboardLastRun !== 0) {
+ scrollDashboardTo();
+
+ // restore the scrollspy at the proper position
+ $(document.body).scrollspy('refresh');
+ $(document.body).scrollspy('process');
+
+ return;
+ }
+
+ runOnceOnDashboardLastRun = Date.now();
+
+ // ------------------------------------------------------------------------
+ // bootstrap modals
+
+ // prevent bootstrap modals from scrolling the page
+ // maintains the current scroll position
+ // https://stackoverflow.com/a/34754029/4525767
+
+ var scrollPos = 0;
+ var modal_depth = 0; // how many modals are currently open
+ var modal_shown = false; // set to true, if a modal is shown
+ var netdata_paused_on_modal = false; // set to true, if the modal paused netdata
+ var scrollspyOffset = $(window).height() / 3; // will be updated below - the offset of scrollspy to select an item
+
+ $('.modal')
+ .on('show.bs.modal', function () {
+ if (modal_depth === 0) {
+ scrollPos = window.scrollY;
+
+ $('body').css({
+ overflow: 'hidden',
+ position: 'fixed',
+ top: -scrollPos
+ });
+
+ modal_shown = true;
+
+ if (NETDATA.options.pauseCallback === null) {
+ NETDATA.pause(function () {
+ });
+ netdata_paused_on_modal = true;
+ } else {
+ netdata_paused_on_modal = false;
+ }
+ }
+
+ modal_depth++;
+ //console.log(urlOptions.after);
+
+ })
+ .on('hide.bs.modal', function () {
+
+ modal_depth--;
+
+ if (modal_depth <= 0) {
+ modal_depth = 0;
+
+ $('body')
+ .css({
+ overflow: '',
+ position: '',
+ top: ''
+ });
+
+ // scroll to the position we had open before the modal
+ $('html, body')
+ .animate({ scrollTop: scrollPos }, 0);
+
+ // unpause netdata, if we paused it
+ if (netdata_paused_on_modal === true) {
+ NETDATA.unpause();
+ netdata_paused_on_modal = false;
+ }
+
+ // restore the scrollspy at the proper position
+ $(document.body).scrollspy('process');
+ }
+ //console.log(urlOptions.after);
+ })
+ .on('hidden.bs.modal', function () {
+ if (modal_depth === 0) {
+ modal_shown = false;
+ }
+
+ if (typeof modalHiddenCallback === 'function') {
+ modalHiddenCallback();
+ }
+
+ modalHiddenCallback = null;
+ //console.log(urlOptions.after);
+ });
+
+ // ------------------------------------------------------------------------
+ // sidebar / affix
+
+ if (shouldShowSignInBanner()) {
+ const el = document.getElementById("sign-in-banner");
+ if (el) {
+ el.style.display = "initial";
+ el.classList.add(`theme-${netdataTheme}`);
+ }
+ }
+
+ $('#sidebar')
+ .affix({
+ offset: {
+ top: (isdemo()) ? 150 : 0,
+ bottom: 0
+ }
+ })
+ .on('affixed.bs.affix', function () {
+ // fix scrolling of very long affix lists
+ // http://stackoverflow.com/questions/21691585/bootstrap-3-1-0-affix-too-long
+
+ $(this).removeAttr('style');
+ })
+ .on('affix-top.bs.affix', function () {
+ // fix bootstrap affix click bug
+ // https://stackoverflow.com/a/37847981/4525767
+
+ if (modal_shown) {
+ return false;
+ }
+ })
+ .on('activate.bs.scrollspy', function (e) {
+ // change the URL based on the current position of the screen
+
+ if (modal_shown === false) {
+ var el = $(e.target);
+ var hash = el.find('a').attr('href');
+ if (typeof hash === 'string' && hash.substring(0, 1) === '#' && urlOptions.hash.startsWith(hash + '_submenu_') === false) {
+ urlOptions.hash = hash;
+ urlOptions.hashUpdate();
+ }
+ }
+ });
+
+ Ps.initialize(document.getElementById('sidebar'), {
+ wheelSpeed: 0.5,
+ wheelPropagation: true,
+ swipePropagation: true,
+ minScrollbarLength: null,
+ maxScrollbarLength: null,
+ useBothWheelAxes: false,
+ suppressScrollX: true,
+ suppressScrollY: false,
+ scrollXMarginOffset: 0,
+ scrollYMarginOffset: 0,
+ theme: 'default'
+ });
+
+ // ------------------------------------------------------------------------
+ // scrollspy
+
+ if (scrollspyOffset > 250) {
+ scrollspyOffset = 250;
+ }
+ if (scrollspyOffset < 75) {
+ scrollspyOffset = 75;
+ }
+ document.body.setAttribute('data-offset', scrollspyOffset);
+
+ // scroll the dashboard, before activating the scrollspy, so that our
+ // hash will not be updated before we got the chance to scroll to it
+ scrollDashboardTo();
+
+ $(document.body).scrollspy({
+ target: '#sidebar',
+ offset: scrollspyOffset // controls the diff of the <hX> element to the top, to select it
+ });
+
+ // ------------------------------------------------------------------------
+ // my-netdata menu
+
+ Ps.initialize(document.getElementById('my-netdata-dropdown-content'), {
+ wheelSpeed: 1,
+ wheelPropagation: false,
+ swipePropagation: false,
+ minScrollbarLength: null,
+ maxScrollbarLength: null,
+ useBothWheelAxes: false,
+ suppressScrollX: true,
+ suppressScrollY: false,
+ scrollXMarginOffset: 0,
+ scrollYMarginOffset: 0,
+ theme: 'default'
+ });
+
+ $('#myNetdataDropdownParent')
+ .on('show.bs.dropdown', function () {
+ var hash = urlOptions.genHash();
+ $('.registry_link').each(function (idx) {
+ this.setAttribute('href', this.getAttribute("href").replace(/#.*$/, hash));
+ });
+
+ NETDATA.pause(function () {
+ });
+ })
+ .on('shown.bs.dropdown', function () {
+ Ps.update(document.getElementById('my-netdata-dropdown-content'));
+ myNetdataMenuDidShow();
+ })
+ .on('hidden.bs.dropdown', function () {
+ NETDATA.unpause();
+ });
+
+ $('#deleteRegistryModal')
+ .on('hidden.bs.modal', function () {
+ deleteRegistryGuid = null;
+ });
+
+ // ------------------------------------------------------------------------
+ // update modal
+
+ $('#updateModal')
+ .on('show.bs.modal', function () {
+ versionLog('checking, please wait...');
+ })
+ .on('shown.bs.modal', function () {
+ notifyForUpdate(true);
+ });
+
+ // ------------------------------------------------------------------------
+ // alarms modal
+
+ $('#alarmsModal')
+ .on('shown.bs.modal', function () {
+ alarmsUpdateModal();
+ })
+ .on('hidden.bs.modal', function () {
+ document.getElementById('alarms_active').innerHTML =
+ document.getElementById('alarms_all').innerHTML =
+ document.getElementById('alarms_log').innerHTML =
+ 'loading...';
+ });
+
+ // ------------------------------------------------------------------------
+
+ dashboardSettingsSetup();
+ loadSnapshotDragAndDropSetup();
+ saveSnapshotModalSetup();
+ showPageFooter();
+
+ // ------------------------------------------------------------------------
+ // https://github.com/viralpatel/jquery.shorten/blob/master/src/jquery.shorten.js
+
+ $.fn.shorten = function (settings) {
+ "use strict";
+
+ var config = {
+ showChars: 750,
+ minHideChars: 10,
+ ellipsesText: "...",
+ moreText: '<i class="fas fa-expand"></i> show more information',
+ lessText: '<i class="fas fa-compress"></i> show less information',
+ onLess: function () {
+ NETDATA.onscroll();
+ },
+ onMore: function () {
+ NETDATA.onscroll();
+ },
+ errMsg: null,
+ force: false
+ };
+
+ if (settings) {
+ $.extend(config, settings);
+ }
+
+ if ($(this).data('jquery.shorten') && !config.force) {
+ return false;
+ }
+ $(this).data('jquery.shorten', true);
+
+ $(document).off("click", '.morelink');
+
+ $(document).on({
+ click: function () {
+
+ var $this = $(this);
+ if ($this.hasClass('less')) {
+ $this.removeClass('less');
+ $this.html(config.moreText);
+ $this.parent().prev().animate({ 'height': '0' + '%' }, 0, function () {
+ $this.parent().prev().prev().show();
+ }).hide(0, function () {
+ config.onLess();
+ });
+ } else {
+ $this.addClass('less');
+ $this.html(config.lessText);
+ $this.parent().prev().animate({ 'height': '100' + '%' }, 0, function () {
+ $this.parent().prev().prev().hide();
+ }).show(0, function () {
+ config.onMore();
+ });
+ }
+ return false;
+ }
+ }, '.morelink');
+
+ return this.each(function () {
+ var $this = $(this);
+
+ var content = $this.html();
+ var contentlen = $this.text().length;
+ if (contentlen > config.showChars + config.minHideChars) {
+ var c = content.substr(0, config.showChars);
+ if (c.indexOf('<') >= 0) // If there's HTML don't want to cut it
+ {
+ var inTag = false; // I'm in a tag?
+ var bag = ''; // Put the characters to be shown here
+ var countChars = 0; // Current bag size
+ var openTags = []; // Stack for opened tags, so I can close them later
+ var tagName = null;
+
+ for (var i = 0, r = 0; r <= config.showChars; i++) {
+ if (content[i] === '<' && !inTag) {
+ inTag = true;
+
+ // This could be "tag" or "/tag"
+ tagName = content.substring(i + 1, content.indexOf('>', i));
+
+ // If its a closing tag
+ if (tagName[0] === '/') {
+
+ if (tagName !== ('/' + openTags[0])) {
+ config.errMsg = 'ERROR en HTML: the top of the stack should be the tag that closes';
+ } else {
+ openTags.shift(); // Pops the last tag from the open tag stack (the tag is closed in the retult HTML!)
+ }
+
+ } else {
+ // There are some nasty tags that don't have a close tag like <br/>
+ if (tagName.toLowerCase() !== 'br') {
+ openTags.unshift(tagName); // Add to start the name of the tag that opens
+ }
+ }
+ }
+
+ if (inTag && content[i] === '>') {
+ inTag = false;
+ }
+
+ if (inTag) {
+ bag += content.charAt(i);
+ } else {
+ // Add tag name chars to the result
+ r++;
+ if (countChars <= config.showChars) {
+ bag += content.charAt(i); // Fix to ie 7 not allowing you to reference string characters using the []
+ countChars++;
+ } else {
+ // Now I have the characters needed
+ if (openTags.length > 0) {
+ // I have unclosed tags
+
+ //console.log('They were open tags');
+ //console.log(openTags);
+ for (var j = 0; j < openTags.length; j++) {
+ //console.log('Cierro tag ' + openTags[j]);
+ bag += '</' + openTags[j] + '>'; // Close all tags that were opened
+
+ // You could shift the tag from the stack to check if you end with an empty stack, that means you have closed all open tags
+ }
+ break;
+ }
+ }
+ }
+ }
+ c = $('<div/>').html(bag + '<span class="ellip">' + config.ellipsesText + '</span>').html();
+ } else {
+ c += config.ellipsesText;
+ }
+
+ var html = '<div class="shortcontent">' + c +
+ '</div><div class="allcontent">' + content +
+ '</div><span><a href="javascript://nop/" class="morelink">' + config.moreText + '</a></span>';
+
+ $this.html(html);
+ $this.find(".allcontent").hide(); // Hide all text
+ $('.shortcontent p:last', $this).css('margin-bottom', 0); //Remove bottom margin on last paragraph as it's likely shortened
+ }
+ });
+ };
+}
+
+function finalizePage() {
+ // resize all charts - without starting the background thread
+ // this has to be done while NETDATA is paused
+ // if we ommit this, the affix menu will be wrong, since all
+ // the Dom elements are initially zero-sized
+ NETDATA.parseDom();
+
+ // ------------------------------------------------------------------------
+
+ NETDATA.globalPanAndZoom.callback = null;
+ NETDATA.globalChartUnderlay.callback = null;
+
+ if (urlOptions.pan_and_zoom === true && NETDATA.options.targets.length > 0) {
+ NETDATA.globalPanAndZoom.setMaster(NETDATA.options.targets[0], urlOptions.after, urlOptions.before);
+ }
+
+ // callback for us to track PanAndZoom operations
+ NETDATA.globalPanAndZoom.callback = urlOptions.netdataPanAndZoomCallback;
+ NETDATA.globalChartUnderlay.callback = urlOptions.netdataHighlightCallback;
+
+ // ------------------------------------------------------------------------
+
+ // let it run (update the charts)
+ NETDATA.unpause();
+
+ runOnceOnDashboardWithjQuery();
+ $(".shorten").shorten();
+ enableTooltipsAndPopovers();
+
+ if (isdemo()) {
+ // do not to give errors on netdata demo servers for 60 seconds
+ NETDATA.options.current.retries_on_data_failures = 60;
+
+ // google analytics when this is used for the home page of the demo sites
+ // this does not run on user's installations
+ setTimeout(function () {
+ (function (i, s, o, g, r, a, m) {
+ i['GoogleAnalyticsObject'] = r;
+ i[r] = i[r] || function () {
+ (i[r].q = i[r].q || []).push(arguments)
+ }, i[r].l = 1 * new Date();
+ a = s.createElement(o),
+ m = s.getElementsByTagName(o)[0];
+ a.async = 1;
+ a.src = g;
+ m.parentNode.insertBefore(a, m)
+ })(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga');
+
+ ga('create', 'UA-64295674-3', 'auto');
+ ga('send', 'pageview', '/demosite/' + window.location.host);
+ }, 2000);
+ } else {
+ notifyForUpdate();
+ }
+
+ if (urlOptions.show_alarms === true) {
+ setTimeout(function () {
+ $('#alarmsModal').modal('show');
+ }, 1000);
+ }
+
+ NETDATA.onresizeCallback = function () {
+ Ps.update(document.getElementById('sidebar'));
+ Ps.update(document.getElementById('my-netdata-dropdown-content'));
+ };
+ NETDATA.onresizeCallback();
+
+ if (netdataSnapshotData !== null) {
+ NETDATA.globalPanAndZoom.setMaster(NETDATA.options.targets[0], netdataSnapshotData.after_ms, netdataSnapshotData.before_ms);
+ }
+
+ //if (urlOptions.nowelcome !== true) {
+ // setTimeout(function () {
+ // $('#welcomeModal').modal();
+ // }, 2000);
+ //}
+
+ // var netdataEnded = performance.now();
+ // console.log('start up time: ' + (netdataEnded - netdataStarted).toString() + ' ms');
+}
+
+function resetDashboardOptions() {
+ var help = NETDATA.options.current.show_help;
+
+ NETDATA.resetOptions();
+ if (setTheme('slate')) {
+ netdataReload();
+ }
+
+ if (help !== NETDATA.options.current.show_help) {
+ netdataReload();
+ }
+}
+
+// callback to add the dashboard info to the
+// parallel javascript downloader in netdata
+var netdataPrepCallback = function () {
+ NETDATA.requiredCSS.push({
+ url: NETDATA.serverStatic + 'css/bootstrap-toggle-2.2.2.min.css',
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ });
+
+ NETDATA.requiredJs.push({
+ url: NETDATA.serverStatic + 'lib/bootstrap-toggle-2.2.2.min.js',
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ });
+
+ NETDATA.requiredJs.push({
+ url: NETDATA.serverStatic + 'dashboard_info.js?v20181019-1',
+ async: false,
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ });
+
+ if (isdemo()) {
+ document.getElementById('masthead').style.display = 'block';
+ } else {
+ if (urlOptions.update_always === true) {
+ NETDATA.setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);
+ }
+ }
+};
+
+var selected_server_timezone = function (timezone, status) {
+ //console.log('called with timezone: ' + timezone + ", status: " + ((typeof status === 'undefined')?'undefined':status).toString());
+
+ // clear the error
+ document.getElementById('timezone_error_message').innerHTML = '';
+
+ if (typeof status === 'undefined') {
+ // the user selected a timezone from the menu
+
+ NETDATA.setOption('user_set_server_timezone', timezone);
+
+ if (NETDATA.dateTime.init(timezone) === false) {
+ NETDATA.dateTime.init();
+
+ if (!$('#local_timezone').prop('checked')) {
+ $('#local_timezone').bootstrapToggle('on');
+ }
+
+ document.getElementById('timezone_error_message').innerHTML = 'Ooops! That timezone was not accepted by your browser. Please open a github issue to help us fix it.';
+ NETDATA.setOption('user_set_server_timezone', NETDATA.options.server_timezone);
+ } else {
+ if ($('#local_timezone').prop('checked')) {
+ $('#local_timezone').bootstrapToggle('off');
+ }
+ }
+ } else if (status === true) {
+ // the user wants the browser default timezone to be activated
+
+ NETDATA.dateTime.init();
+ } else {
+ // the user wants the server default timezone to be activated
+ //console.log('found ' + NETDATA.options.current.user_set_server_timezone);
+
+ if (NETDATA.options.current.user_set_server_timezone === 'default') {
+ NETDATA.options.current.user_set_server_timezone = NETDATA.options.server_timezone;
+ }
+
+ timezone = NETDATA.options.current.user_set_server_timezone;
+
+ if (NETDATA.dateTime.init(timezone) === false) {
+ NETDATA.dateTime.init();
+
+ if (!$('#local_timezone').prop('checked')) {
+ $('#local_timezone').bootstrapToggle('on');
+ }
+
+ document.getElementById('timezone_error_message').innerHTML = 'Sorry. The timezone "' + timezone.toString() + '" is not accepted by your browser. Please select one from the list.';
+ NETDATA.setOption('user_set_server_timezone', NETDATA.options.server_timezone);
+ }
+ }
+
+ document.getElementById('current_timezone').innerText = (NETDATA.options.current.timezone === 'default') ? 'unset, using browser default' : NETDATA.options.current.timezone;
+ return false;
+};
+
+// our entry point
+// var netdataStarted = performance.now();
+
+var netdataCallback = initializeDynamicDashboard;
+
+// =================================================================================================
+// netdata.cloud
+
+let registryAgents = [];
+
+let cloudAgents = [];
+
+let myNetdataMenuFilterValue = "";
+
+let cloudAccountID = null;
+
+let cloudAccountName = null;
+
+let cloudToken = null;
+
+/// Enforces a maximum string length while retaining the prefix and the postfix of
+/// the string.
+function truncateString(str, maxLength) {
+ if (str.length <= maxLength) {
+ return str;
+ }
+
+ const spanLength = Math.floor((maxLength - 3) / 2);
+ return `${str.substring(0, spanLength)}...${str.substring(str.length - spanLength)}`;
+}
+
+// -------------------------------------------------------------------------------------------------
+// netdata.cloud API Client
+// -------------------------------------------------------------------------------------------------
+
+function isValidAgent(a) {
+ return a.urls != null && a.urls.length > 0;
+}
+
+// https://github.com/netdata/hub/issues/146
+function getCloudAccountAgents() {
+ if (!isSignedIn()) {
+ return [];
+ }
+
+ return fetch(
+ `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents`,
+ {
+ method: "GET",
+ mode: "cors",
+ headers: {
+ "Authorization": `Bearer ${cloudToken}`
+ }
+ }
+ ).then((response) => {
+ if (!response.ok) {
+ throw Error("Cannot fetch known accounts");
+ }
+ return response.json();
+ }).then((payload) => {
+ const agents = payload.result ? payload.result.agents : null;
+
+ if (!agents) {
+ return [];
+ }
+
+ return agents.filter((a) => isValidAgent(a)).map((a) => {
+ return {
+ "guid": a.id,
+ "name": a.name,
+ "url": a.urls[0],
+ "alternate_urls": a.urls
+ }
+ })
+ }).catch(function (error) {
+ console.log(error);
+ return null;
+ });
+}
+
+/** Updates the lastAccessTime and accessCount properties of the agent for the account. */
+function touchAgent() {
+ if (!isSignedIn()) {
+ return [];
+ }
+
+ const touchUrl = `${NETDATA.registry.cloudBaseURL}/api/v1/agents/${NETDATA.registry.machine_guid}/touch?account_id=${cloudAccountID}`;
+ return fetch(
+ touchUrl,
+ {
+ method: "post",
+ body: "",
+ mode: "cors",
+ headers: {
+ "Authorization": `Bearer ${cloudToken}`
+ }
+ }
+ ).then((response) => {
+ if (!response.ok) {
+ throw Error("Cannot touch agent" + JSON.stringify(response));
+ }
+ return response.json();
+ }).then((payload) => {
+
+ }).catch(function (error) {
+ console.log(error);
+ return null;
+ });
+}
+
+// https://github.com/netdata/hub/issues/128
+function postCloudAccountAgents(agentsToSync) {
+ if (!isSignedIn()) {
+ return [];
+ }
+
+ const maskedURL = NETDATA.registry.MASKED_DATA;
+
+ const agents = agentsToSync.map((a) => {
+ const urls = a.alternate_urls.filter((url) => url != maskedURL);
+
+ return {
+ "id": a.guid,
+ "name": a.name,
+ "urls": urls
+ }
+ }).filter((a) => isValidAgent(a))
+
+ const payload = {
+ "accountID": cloudAccountID,
+ "agents": agents,
+ "merge": false,
+ };
+
+ return fetch(
+ `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents`,
+ {
+ method: "POST",
+ mode: "cors",
+ headers: {
+ "Content-Type": "application/json; charset=utf-8",
+ "Authorization": `Bearer ${cloudToken}`
+ },
+ body: JSON.stringify(payload)
+ }
+ ).then((response) => {
+ return response.json();
+ }).then((payload) => {
+ const agents = payload.result ? payload.result.agents : null;
+
+ if (!agents) {
+ return [];
+ }
+
+ return agents.filter((a) => isValidAgent(a)).map((a) => {
+ return {
+ "guid": a.id,
+ "name": a.name,
+ "url": a.urls[0],
+ "alternate_urls": a.urls
+ }
+ })
+ });
+}
+
+function deleteCloudAgentURL(agentID, url) {
+ if (!isSignedIn()) {
+ return [];
+ }
+
+ return fetch(
+ `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents/${agentID}/url?value=${encodeURIComponent(url)}`,
+ {
+ method: "DELETE",
+ mode: "cors",
+ headers: {
+ "Content-Type": "application/json; charset=utf-8",
+ "Authorization": `Bearer ${cloudToken}`
+ },
+ }
+ ).then((response) => {
+ return response.json();
+ }).then((payload) => {
+ const count = payload.result ? payload.result.count : 0;
+ return count;
+ });
+}
+
+// -------------------------------------------------------------------------------------------------
+
+function signInDidClick(e) {
+ e.preventDefault();
+ e.stopPropagation();
+
+ if (!NETDATA.registry.isUsingGlobalRegistry()) {
+ // If user is using a private registry, request his consent for
+ // synchronizing with cloud.
+ showSignInModal();
+ return;
+ }
+
+ signIn();
+}
+
+function shouldShowSignInBanner() {
+ return false;
+}
+
+function closeSignInBanner() {
+ localStorage.setItem("signInBannerClosed", "true");
+ const el = document.getElementById("sign-in-banner");
+ if (el) {
+ el.style.display = "none";
+ }
+}
+
+function closeSignInBannerDidClick(e) {
+ closeSignInBanner();
+}
+
+function signOutDidClick(e) {
+ e.preventDefault();
+ e.stopPropagation();
+ signOut();
+}
+
+// -------------------------------------------------------------------------------------------------
+
+function updateMyNetdataAfterFilterChange() {
+ const machinesEl = document.getElementById("my-netdata-menu-machines")
+ machinesEl.innerHTML = renderMachines(cloudAgents);
+
+ if (options.hosts.length > 1) {
+ const streamedEl = document.getElementById("my-netdata-menu-streamed")
+ streamedEl.innerHTML = renderStreamedHosts(options);
+ }
+}
+
+function myNetdataMenuDidShow() {
+ const filterEl = document.getElementById("my-netdata-menu-filter-input");
+ if (filterEl) {
+ filterEl.focus();
+ }
+}
+
+function myNetdataFilterDidChange(e) {
+ const inputEl = e.target;
+ setTimeout(() => {
+ myNetdataMenuFilterValue = inputEl.value;
+ updateMyNetdataAfterFilterChange();
+ }, 1);
+}
+
+function myNetdataFilterClearDidClick(e) {
+ e.preventDefault();
+ e.stopPropagation();
+
+ const inputEl = document.getElementById("my-netdata-menu-filter-input");
+ inputEl.value = "";
+ myNetdataMenuFilterValue = "";
+
+ updateMyNetdataAfterFilterChange();
+
+ inputEl.focus();
+}
+
+// -------------------------------------------------------------------------------------------------
+
+function clearCloudVariables() {
+ cloudAccountID = null;
+ cloudAccountName = null;
+ cloudToken = null;
+}
+
+function clearCloudLocalStorageItems() {
+ localStorage.removeItem("cloud.baseURL");
+ localStorage.removeItem("cloud.agentID");
+ localStorage.removeItem("cloud.sync");
+}
+
+function signIn() {
+ const url = `${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?id=${NETDATA.registry.machine_guid}&name=${encodeURIComponent(NETDATA.registry.hostname)}&origin=${encodeURIComponent(window.location.origin + "/")}`;
+ window.open(url);
+}
+
+function signOut() {
+ cloudSSOSignOut();
+}
+
+function handleMessage(e) {
+ switch (e.data.type) {
+ case "sign-in":
+ handleSignInMessage(e);
+ break;
+
+ case "sign-out":
+ handleSignOutMessage(e);
+ break;
+
+ default:
+ return;
+ }
+}
+
+function handleSignInMessage(e) {
+ closeSignInBanner();
+ localStorage.setItem("cloud.baseURL", NETDATA.registry.cloudBaseURL);
+
+ cloudAccountID = e.data.accountID;
+ cloudAccountName = e.data.accountName;
+ cloudToken = e.data.token;
+
+ netdataRegistryCallback(registryAgents);
+ if (e.data.redirectURI && !window.location.href.includes(e.data.redirectURI)) {
+ // lgtm false-positive - redirectURI does not come from user input, but from iframe callback
+ window.location.replace(e.data.redirectURI); // lgtm[js/client-side-unvalidated-url-redirection]
+ }
+}
+
+function handleSignOutMessage(e) {
+ clearCloudVariables();
+ renderMyNetdataMenu(registryAgents);
+}
+
+function isSignedIn() {
+ return cloudToken != null && cloudAccountID != null;
+}
+
+function sortedArraysEqual(a, b) {
+ if (a.length != b.length) return false;
+
+ for (var i = 0; i < a.length; ++i) {
+ if (a[i] !== b[i]) return false;
+ }
+
+ return true;
+}
+
+// If merging is needed returns the merged agents set, otherwise returns null.
+function mergeAgents(cloud, local) {
+ let dirty = false;
+
+ const union = new Map();
+
+ for (const cagent of cloud) {
+ union.set(cagent.guid, cagent);
+ }
+
+ for (const lagent of local) {
+ const cagent = union.get(lagent.guid);
+ if (cagent) {
+ for (const u of lagent.alternate_urls) {
+ if (u === NETDATA.registry.MASKED_DATA) { // TODO: temp until registry is updated.
+ continue;
+ }
+
+ if (!cagent.alternate_urls.includes(u)) {
+ dirty = true;
+ cagent.alternate_urls.push(u);
+ }
+ }
+ } else {
+ dirty = true;
+ union.set(lagent.guid, lagent);
+ }
+ }
+
+ if (dirty) {
+ return Array.from(union.values());
+ }
+
+ return null;
+}
+
+function showSignInModal() {
+ document.getElementById("sim-registry").innerHTML = NETDATA.registry.server;
+ $("#signInModal").modal("show");
+}
+
+function explicitlySignIn() {
+ $("#signInModal").modal("hide");
+ signIn();
+}
+
+function showSyncModal() {
+ document.getElementById("sync-registry-modal-registry").innerHTML = NETDATA.registry.server;
+ $("#syncRegistryModal").modal("show");
+}
+
+function explicitlySyncAgents() {
+ $("#syncRegistryModal").modal("hide");
+
+ const json = localStorage.getItem("cloud.sync");
+ const sync = json ? JSON.parse(json) : {};
+ delete sync[cloudAccountID];
+ localStorage.setItem("cloud.sync", JSON.stringify(sync));
+
+ NETDATA.registry.init();
+}
+
+function syncAgents(callback) {
+ const json = localStorage.getItem("cloud.sync");
+ const sync = json ? JSON.parse(json) : {};
+
+ const currentAgent = {
+ guid: NETDATA.registry.machine_guid,
+ name: NETDATA.registry.hostname,
+ url: NETDATA.serverDefault,
+ alternate_urls: [NETDATA.serverDefault],
+ }
+
+ const localAgents = sync[cloudAccountID]
+ ? [currentAgent]
+ : registryAgents.concat([currentAgent]);
+
+ console.log("Checking if sync is needed.", localAgents);
+
+ const agentsToSync = mergeAgents(cloudAgents, localAgents);
+
+ if ((!sync[cloudAccountID]) || agentsToSync) {
+ sync[cloudAccountID] = new Date().getTime();
+ localStorage.setItem("cloud.sync", JSON.stringify(sync));
+ }
+
+ if (agentsToSync) {
+ console.log("Synchronizing with netdata.cloud.");
+
+ postCloudAccountAgents(agentsToSync).then((agents) => {
+ // TODO: clear syncTime on error!
+ cloudAgents = agents;
+ callback(cloudAgents);
+ });
+
+ return
+ }
+
+ callback(cloudAgents);
+}
+
+let isCloudSSOInitialized = false;
+
+function cloudSSOInit() {
+ const iframeEl = document.getElementById("ssoifrm");
+ const url = `${NETDATA.registry.cloudBaseURL}/account/sso-agent?id=${NETDATA.registry.machine_guid}`;
+ iframeEl.src = url;
+ isCloudSSOInitialized = true;
+}
+
+function cloudSSOSignOut() {
+ const iframe = document.getElementById("ssoifrm");
+ const url = `${NETDATA.registry.cloudBaseURL}/account/sign-out-agent`;
+ iframe.src = url;
+}
+
+function initCloud() {
+ if (!NETDATA.registry.isCloudEnabled) {
+ clearCloudVariables();
+ clearCloudLocalStorageItems();
+ return;
+ }
+
+ if (NETDATA.registry.cloudBaseURL != localStorage.getItem("cloud.baseURL")) {
+ clearCloudVariables();
+ clearCloudLocalStorageItems();
+ if (NETDATA.registry.cloudBaseURL) {
+ localStorage.setItem("cloud.baseURL", NETDATA.registry.cloudBaseURL);
+ }
+ }
+
+ if (!isCloudSSOInitialized) {
+ cloudSSOInit();
+ }
+
+ touchAgent();
+}
+
+// This callback is called after NETDATA.registry is initialized.
+function netdataRegistryCallback(machinesArray) {
+ localStorage.setItem("cloud.agentID", NETDATA.registry.machine_guid);
+
+ initCloud();
+
+ registryAgents = machinesArray;
+
+ if (isSignedIn()) {
+ // We call getCloudAccountAgents() here because it requires that
+ // NETDATA.registry is initialized.
+ clearMyNetdataMenu();
+ getCloudAccountAgents().then((agents) => {
+ if (!agents) {
+ errorMyNetdataMenu();
+ return;
+ }
+ cloudAgents = agents;
+ syncAgents((agents) => {
+ const agentsMap = {}
+ for (const agent of agents) {
+ agentsMap[agent.guid] = agent;
+ }
+
+ NETDATA.registry.machines = agentsMap;
+ NETDATA.registry.machines_array = agents;
+
+ renderMyNetdataMenu(agents);
+ });
+ });
+ } else {
+ renderMyNetdataMenu(machinesArray)
+ }
+};
+
+// If we know the cloudBaseURL and agentID from local storage render (eagerly)
+// the account ui before receiving the definitive response from the web server.
+// This improves the perceived performance.
+function tryFastInitCloud() {
+ const baseURL = localStorage.getItem("cloud.baseURL");
+ const agentID = localStorage.getItem("cloud.agentID");
+
+ if (baseURL && agentID) {
+ NETDATA.registry.cloudBaseURL = baseURL;
+ NETDATA.registry.machine_guid = agentID;
+ NETDATA.registry.isCloudEnabled = true;
+
+ initCloud();
+ }
+}
+
+function initializeApp() {
+ window.addEventListener("message", handleMessage, false);
+
+ // tryFastInitCloud();
+}
+
+if (document.readyState === "complete") {
+ initializeApp();
+} else {
+ document.addEventListener("readystatechange", () => {
+ if (document.readyState === "complete") {
+ initializeApp();
+ }
+ });
+}
diff --git a/web/gui/manifest.json b/web/gui/manifest.json
new file mode 100644
index 0000000..52cb483
--- /dev/null
+++ b/web/gui/manifest.json
@@ -0,0 +1,41 @@
+{
+ "name": "App",
+ "icons": [
+ {
+ "src": "images\/android-icon-36x36.png",
+ "sizes": "36x36",
+ "type": "image\/png",
+ "density": "0.75"
+ },
+ {
+ "src": "images\/android-icon-48x48.png",
+ "sizes": "48x48",
+ "type": "image\/png",
+ "density": "1.0"
+ },
+ {
+ "src": "images\/android-icon-72x72.png",
+ "sizes": "72x72",
+ "type": "image\/png",
+ "density": "1.5"
+ },
+ {
+ "src": "images\/android-icon-96x96.png",
+ "sizes": "96x96",
+ "type": "image\/png",
+ "density": "2.0"
+ },
+ {
+ "src": "images\/android-icon-144x144.png",
+ "sizes": "144x144",
+ "type": "image\/png",
+ "density": "3.0"
+ },
+ {
+ "src": "images\/android-icon-192x192.png",
+ "sizes": "192x192",
+ "type": "image\/png",
+ "density": "4.0"
+ }
+ ]
+}
diff --git a/web/gui/old/index.html b/web/gui/old/index.html
new file mode 100644
index 0000000..94ec12b
--- /dev/null
+++ b/web/gui/old/index.html
@@ -0,0 +1,1321 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>netdata dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+ <meta name="author" content="costa@tsaousis.gr">
+
+ <link rel="stylesheet" type="text/css" href="../main.css?v=5">
+
+ <link rel="icon" href="data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAP9JREFUeNpiYBgFo+A/w34gpiZ8DzWzAYgNiHGAA5UdgA73g+2gcyhgg/0DGQoweB6IBQYyFCCOGOBQwBMd/xnW09ERDtgcoEBHB+zHFQrz6egIBUasocDAcJ9OxWAhE4YQI8MDILmATg7wZ8QRDfQKhQf4Cie6pAVGPA4AhQKo0BCgZRAw4ZSBpIWJNI6CD4wEKikBaFqgVSgcYMIrzcjwgcahcIGRiPYCLUPBkNhWUwP9akVcoQBpatG4MsLviAIqWj6f3Absfdq2igg7IIEKDVQKEzN5ofAenJCp1I8gJRTug5tfkGIdR1FDniMI+QZUjF8Amn5htOdHCAAEGACE6B0cS6mrEwAAAABJRU5ErkJggg==" />
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:url" content="https://my-netdata.io" />
+ <meta property="og:type" content="website" />
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/14092712/93b039ea-f551-11e5-822c-beadbf2b2a2e.gif" />
+
+ <script src="../main.js?v20200429-0"></script>
+</head>
+
+<body data-spy="scroll" data-target="#sidebar" data-offset="100">
+ <div id="loadOverlay" class="loadOverlay" style="background-color: #fff; color: #888;">
+ <div style="font-size: 3vh;">
+ You must enable JavaScript in order to use Netdata!<br />
+ You can do this in <a href="https://enable-javascript.com/" target="_blank">your browser settings</a>.
+ </div>
+ </div>
+ <script type="text/javascript">
+ // Cleanup JS warning.
+ document.documentElement.style.overflowY = "scroll";
+
+ // Change the loadOverlay colors ASAP to match the theme.
+ let theme;
+ const hash = document.location.hash;
+ if (hash.includes('theme=slate')) {
+ theme = 'slate';
+ } else if (hash.includes('theme=white')) {
+ theme = 'white';
+ } else {
+ theme = localStorage.getItem('netdataTheme') || 'slate';
+ }
+ const overlayEl = document.getElementById('loadOverlay');
+ overlayEl.innerHTML = 'netdata<br/><div style="font-size: 3vh;">Real-time performance monitoring, done right!</div>';
+ overlayEl.style = theme == 'slate' ? "background-color: #272b30; color: #373b40;" : "background-color: #fff; color: #ddd;";
+ </script>
+ <nav class="navbar navbar-default navbar-fixed-top" role="banner">
+ <div class="container">
+ <div class="navbar-header">
+ <button class="navbar-toggle" type="button" data-toggle="collapse" data-target=".navbar-collapse">
+ <span class="sr-only">Toggle navigation</span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ </button>
+ <ul class="nav navbar-nav" style="display: inline-block">
+ <li data-placement="right" class="hidden-xs hidden-sm" style="line-height: 50px; margin-right: 15px" title="Netdata Agent">
+ <img src="../images/netdata-logomark.svg" height="32"/>
+ </li>
+ <li class="dropdown" id="myNetdataDropdownParent" title="your other netdata servers" data-toggle="tooltip" data-placement="right">
+ <a href="#" id="hostname" class="dropdown-toggle" data-toggle="dropdown">
+ my-netdata
+ <strong class="caret"></strong></a>
+ <div id="my-netdata-dropdown-content" class="dropdown-menu scrollable-menu inpagemenu">
+ <div class="agent-item" style="white-space: nowrap">
+ <i class="fas fa-hourglass-half"></i>
+ Loading, please wait...
+ <div></div>
+ </div>
+ </div>
+ </li>
+ </ul>
+ </div>
+ <nav class="collapse navbar-collapse navbar-right" role="navigation">
+ <ul class="nav navbar-nav">
+ <li id="alarmsButton" title="check the health monitoring alarms and their log" data-toggle="tooltip" data-placement="bottom"><a href="#" class="btn" data-toggle="modal" data-target="#alarmsModal"><i class="fas fa-bell"></i>&nbsp;<span class="hidden-sm hidden-md">Alarms&nbsp;</span><span id="alarms_count_badge" class="badge"></span></a></li>
+ <li title="change dashboard settings" data-toggle="tooltip" data-placement="bottom"><a href="#" class="btn" data-toggle="modal" data-target="#optionsModal"><i class="fas fa-cog"></i>&nbsp;<span class="hidden-sm hidden-md">Settings</span></a></li>
+ <li title="check for netdata updates<br/>you should keep your netdata updated" data-toggle="tooltip" data-placement="bottom" class="hidden-sm" id="updateButton"><a href="#" class="btn" data-toggle="modal" data-target="#updateModal"><i class="fas fa-cloud-download-alt"></i> <span class="hidden-sm hidden-md">Update </span><span id="update_badge" class="badge"></span></a></li>
+ <li title="the netdata wiki home at github<br/>remember to <b>give netdata a <i class=&quot;fas fa-star&quot;></i></b> !" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://github.com/netdata/netdata" class="btn" target="_blank"><i class="fab fa-github"></i></a></li>
+ <li title="follow netdata on twitter" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://twitter.com/linuxnetdata" class="btn" target="_blank"><i class="fab fa-twitter"></i></a></li>
+ <li title="like netdata on facebook" data-toggle="tooltip" data-placement="bottom" class="hidden-xs hidden-sm hidden-md"><a href="https://www.facebook.com/linuxnetdata/" class="btn" target="_blank"><i class="fab fa-facebook"></i></a></li>
+ <li title="import / load a netdata snapshot" data-toggle="tooltip" data-placement="bottom" id="loadButton"><a href="#" class="btn" data-toggle="modal" data-target="#loadSnapshotModal"><i class="fas fa-download"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Import</span></a></li>
+ <li title="export / save a netdata snapshot" data-toggle="tooltip" data-placement="bottom" id="saveButton"><a href="#" class="btn" data-toggle="modal" data-target="#saveSnapshotModal"><i class="fas fa-upload"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Export</span></a></li>
+ <li title="print this dashboard to PDF" data-toggle="tooltip" data-placement="bottom" id="printButton"><a href="#" class="btn" data-toggle="modal" data-target="#printPreflightModal"><i class="fas fa-print"></i>&nbsp;<span class="hidden-sm hidden-md hidden-lg">Print</span></a></li>
+ <li title="get help on using the charts" data-toggle="tooltip" data-placement="bottom" class="hidden-sm"><a href="#" class="btn" data-toggle="modal" data-target="#helpModal"><i class="fas fa-question-circle"></i>&nbsp;<span class="hidden-sm hidden-md">Help</span></a></li>
+ <li id="account-menu-container" class="dropdown" data-toggle="tooltip"></li>
+ </ul>
+ </nav>
+ </div>
+ </nav>
+ <div class="navbar-highlight">
+ <div id="navbar-highlight-content" class="navbar-highlight-content"></div>
+ </div>
+
+ <div id="sign-in-banner" style="display: none">
+ <div class="container">
+ Like what you see?
+ <strong><a href="#" class="__link" onclick="signInDidClick(event); return false">Sign in</a>
+ to experience the full-range of netdata capabilities!</strong>
+ <div class="__close" onclick="closeSignInBannerDidClick(event)">
+ Close <i class="fas fa-times"></i>
+ </div>
+ </div>
+ </div>
+
+ <div id="masthead" style="display: none;">
+ <div class="container">
+ <div class="row">
+ <div class="col-md-7">
+ <h1>Netdata
+ <p class="lead">Real-time performance monitoring, in the greatest possible detail</p>
+ </h1>
+ </div>
+ <div class="col-md-5">
+ <div class="well well-lg">
+ <div class="row">
+ <div class="col-md-6">
+ <b>Drag</b> charts to pan.
+ <b>Shift + wheel</b> on them, to zoom in and out.
+ <b>Double-click</b> on them, to reset.
+ <b>Hover</b> on them too!
+ </div>
+ <div class="col-md-6">
+ <div class="netdata-container" data-netdata="system.intr" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dygraph-type="line" data-dygraph-strokewidth="3" data-dygraph-smooth="true" data-dygraph-highlightcirclesize="6" data-after="-90" data-height="60px" data-colors="#C66"></div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="container">
+ <div class="row">
+ <div class="charts-body" role="main">
+ <div id="charts_div"></div>
+ </div>
+ <div class="sidebar-body hidden-xs hidden-sm hidden-print" id="sidebar-body" role="complementary">
+ <nav class="dashboard-sidebar hidden-print hidden-xs hidden-sm" id="sidebar" role="menu"></nav>
+ </div>
+ </div>
+ </div>
+
+ <div id="footer" class="container" style="display: none;">
+ <div class="row">
+ <div class="col-md-10" role="main">
+ <div class="p">
+ <big><a href="https://github.com/netdata/netdata/wiki" target="_blank">Netdata</a></big>
+ <br /><br />
+ <i class="fas fa-copyright"></i> Copyright 2018, <a href="mailto:info@netdata.cloud">Netdata, Inc</a>.<br/>
+ <i class="fas fa-copyright"></i> Copyright 2016-2018, <a href="mailto:costa@tsaousis.gr">Costa Tsaousis</a>.<br/>
+ </div>
+ <div class="p">
+ Released under <a href="http://www.gnu.org/licenses/gpl-3.0.en.html" target="_blank">GPL v3 or later</a>.
+ Netdata uses <a href="https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md" target="_blank">third party tools</a>.
+ <br /><br />
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="xssModal" tabindex="-1" role="dialog" aria-labelledby="xssModalLabel" data-keyboard="false" data-backdrop="static" style="z-index: 3000">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <h4 class="modal-title" id="xssModalLabel">XSS Protection</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ This dashboard is about to render data from server:
+ </p>
+ <p style="font-size: 1.25em;">
+ <code id="netdataXssModalServer"></code>
+ </p>
+ <p>
+ To protect your privacy, the dashboard will <b>check all data transferred</b> for cross site scripting (XSS).
+ <br/>This is CPU intensive, so your browser might be a bit slower.
+ </p>
+ <p>
+ If you <b>trust</b> the remote server, you can disable XSS protection.<br/>
+ In this case, any remote dashboard decoration code (javascript) will also run.
+ </p>
+ <p>
+ If you <b>don't trust</b> the remote server, you should keep the protection on.<br/>
+ The dashboard will run slower and remote dashboard decoration code will not run, but better be safe than sorry...
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="return xssModalKeepXss();" type="button" class="btn btn-success" data-dismiss="modal">Keep protecting me</a>
+ <a href="#" onclick="return xssModalDisableXss();" type="button" class="btn btn-danger" data-dismiss="modal">I don't need this, the server is mine</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="printPreflightModal" tabindex="-1" role="dialog" aria-labelledby="printPreflightModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="printPreflightModalLabel">Print this netdata dashboard</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ netdata dashboards cannot be captured, since we are lazy loading and hiding all but the visible charts.
+ <br/>
+ To capture the whole page with all the charts rendered, a new browser window will pop-up that will render all the charts at once.
+ The new browser window will maintain the current pan and zoom settings of the charts. So, align the charts before proceeding.
+ </p>
+ <p>
+ <small>
+ This process will put some CPU and memory pressure on your browser.<br/>
+ For the netdata server, we will sequencially download all the charts, to avoid congesting network and server resources.<br/>
+ <b>Please, do not print netdata dashboards on paper!</b>
+ </small>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="printPreflight(); return false;" type="button" class="btn btn-default">Print</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="printModal" tabindex="-1" role="dialog" aria-labelledby="printModalLabel" data-keyboard="false" data-backdrop="static">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="printModalLabel">Preparing dashboard for printing...</h4>
+ </div>
+ <div class="modal-body">
+ Please wait while we initialize and render all the charts on the dashboard.
+ <div class="progress progress-striped active" style="height: 2em !important;">
+ <div id="printModalProgressBar" class="progress-bar progress-bar-info" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="min-width: 2em;">
+ <span id="printModalProgressBarText" style="padding-left: 10px; padding-top: 4px; font-size: 1.2em; text-align: left; width: 100%; position: absolute; display: block; color: black;"></span>
+ </div>
+ </div>
+ The print dialog will appear as soon as we finish rendering the page.
+ </div>
+ <div class="modal-footer">
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="loadSnapshotModal" tabindex="-1" role="dialog" aria-labelledby="loadSnapshotModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="loadSnapshotModalLabel">Import a netdata snapshot</h4>
+ </div>
+ <div id="loadSnapshotDragAndDrop" class="modal-body">
+ <p>
+ netdata can export and import dashboard snapshots.
+ Any netdata can import the snapshot of any other netdata.
+ The snapshots are not uploaded to a server. They are handled entirely by your web browser, on your computer.
+ </p>
+ <p style="text-align: center;">
+ <label class="btn btn-default">
+ Click here to select the netdata snapshot file to import
+ <input type="file" id="loadSnapshotSelectFiles" value="Import" style="display: none;"
+ onchange="loadSnapshotPreflight();">
+ </label>
+ </p>
+ <div id="loadSnapshotStatus" class="alert alert-info" role="alert">
+ Browse for a snapshot file (or drag it and drop it here), then click <b>Import</b> to render it.
+ </div>
+ <p>
+ <table class="table">
+ <tbody>
+ <tr><th>Filename</th><td id="loadSnapshotFilename"></td></tr>
+ <tr><th>Hostname</th><td id="loadSnapshotHostname"></td></tr>
+ <tr><th>Origin URL</th><td id="loadSnapshotURL"></td></tr>
+ <tr><th>Charts Info</th><td id="loadSnapshotCharts"></td></tr>
+ <tr><th>Snapshot Info</th><td id="loadSnapshotInfo"></td></tr>
+ <tr><th>Time Range</th><td id="loadSnapshotTimeRange"></td></tr>
+ <tr><th>Comments</th><td id="loadSnapshotComments"></td></tr>
+ </tbody>
+ </table>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <span style="display: inline-block; padding-right: 20px;">Snapshot files contain both data and javascript code. Make sure <b>you trust the files</b> you import!</span>
+ <a id="loadSnapshotImport" href="#" onclick="loadSnapshot(); return false;" type="button" class="btn btn-success disabled">Import</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="saveSnapshotModal" tabindex="-1" role="dialog" aria-labelledby="saveSnapshotModalLabel" data-keyboard="false" data-backdrop="static">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="saveSnapshotModalLabel">Export a snapshot</h4>
+ </div>
+ <div class="modal-body">
+ <div id="saveSnapshotModalProgressSection" hidden>
+ Please wait while we collect all the dashboard data...
+ <div class="progress progress-striped active" style="height: 2em !important;">
+ <div id="saveSnapshotModalProgressBar" class="progress-bar progress-bar-info" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="min-width: 2em;">
+ <span id="saveSnapshotModalProgressBarText" style="padding-left: 10px; padding-top: 4px; font-size: 1.2em; text-align: left; width: 100%; position: absolute; display: block;"></span>
+ </div>
+ </div>
+ </div>
+
+ <div id="saveSnapshotResolutionRadio" style="text-align: center;">
+ Select the desired resolution of the snapshot. This is the <b>seconds of data per point</b>.
+ <br/>
+ &nbsp;
+ <br/>
+ &nbsp;
+ <br/>
+ <input id="saveSnapshotResolutionSlider" data-slider-id='saveSnapshotResolutionSlider' type="text" style="width: 80%;" tabindex="0"/>
+ <br/>&nbsp;<br/>
+ <div class="input-group">
+ <span class="input-group-addon" id="sizing-saveSnapshotFilename" style="width: 100px;">Filename</span>
+ <input id="saveSnapshotFilename" class="form-control" placeholder="Filename of the generated snapshot" aria-describedby="sizing-saveSnapshotFilename" tabindex="2"/>
+ <div class="input-group-btn">
+ <div class="input-group-btn">
+ <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"><span id="saveSnapshotCompressionName">Compression</span> <span class="caret"></span></button>
+ <ul class="dropdown-menu dropdown-menu-right">
+ <li class="disabled"><a href="#" class="disabled">Select Compression</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('none'); return false;">uncompressed</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('pako.deflate'); return false;">pako.deflate (gzip, binary)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('pako.deflate.base64'); return false;">pako.deflate.base64 (gzip, ascii)</a></li>
+ <li role="separator" class="divider"></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.uri'); return false;">lzstring.uri (LZ, ascii)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.utf16'); return false;">lzstring.utf16 (LZ, utf16)</a></li>
+ <li><a href="#" onclick="saveSnapshotSetCompression('lzstring.base64'); return false;">lzstring.base64 (LZ, ascii)</a></li>
+ </ul>
+ </div><!-- /btn-group -->
+ </div>
+ </div>
+ <div class="input-group" style="padding-top: 10px; width: 100%">
+ <span class="input-group-addon" id="sizing-saveSnapshotComments" style="width: 100px;">Comments</span>
+ <input id="saveSnapshotComments" class="form-control" placeholder="Any comments about this snapshot?" aria-describedby="sizing-saveSnapshotComments" tabindex="3"/>
+ </div>
+ </div>
+
+ &nbsp;
+ <div id="saveSnapshotStatus" class="alert alert-info" role="alert">
+ Select snaphost resolution. This controls the size the snapshot file.
+ </div>
+ <p>
+ The generated snapshot will include all charts of this dashboard, <b>for the visible timeframe</b>, so align, pan and zoom the charts as needed.
+ The scroll position of the dashboard will also be saved.
+ The snapshot will be downloaded as a file, to your computer, that can be imported back into any netdata dashboard (no need to import it back on this server).
+ </p>
+ <p>
+ <small>
+ Snapshot files include all the information of the dashboard, including the URL of the origin server, its netdata unique ID, etc.
+ So, if you share the snapshot file with third parties, they will be able to access the origin server, if this server is exposed on the internet.
+ <br/>
+ Snapshots are handled entirely by the web browser. The netdata servers are not aware of them.
+ </small>
+ </p>
+ </div>
+ <div class="modal-footer">
+ <a id="saveSnapshotExport" href="#" onclick="saveSnapshot(); return false;" type="button" class="btn btn-success" tabindex="4">Export</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal" tabindex="5">Cancel</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="welcomeModal" tabindex="-1" role="dialog" aria-labelledby="welcomeModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="welcomeModalLabel">Welcome to the world of netdata</h4>
+ </div>
+ <div class="modal-body">
+ <div class="p">
+ <div style="width: 100%; text-align: center; padding-top: 10px; padding-bottom: 10px; font-size: 18px;">
+ if there is a metric for something, we want it visualised<br/>
+ and we want this visualisation to be <strong>real-time</strong>, <strong>efficient</strong> and <strong>awesome</strong>
+ </div>
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b>
+ is a new way to monitor your systems and applications, to get <strong>real-time insights</strong>
+ of what is really happening and what affects performance.
+ It is carefully optimised to be a real-time system, without interfering in any way,
+ to the core function of your systems.
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b>
+ has been designed to monitor <strong>massive amounts of metrics, per server, per second</strong>.
+ When installed, it might come up with 1k to 3k metrics, but we have been testing it with 100k
+ metrics, all collected per second, and still the cpu utilisation remained negligible.
+ <br/>
+ We have also tried to give each metric, a meaning, a context.
+ We have grouped and categorized all metrics into meaningful charts, providing a
+ better understanding of the underlying technologies and mechanisms.
+ </div>
+ <div class="p">
+ <b><a href="https://github.com/netdata/netdata/wiki" target="_blank">netdata</a></b> is free,
+ open-source software. If you decide to use it,
+ <strong><a href="https://github.com/netdata/netdata/tree/master/docs/a-github-star-is-important.md" target="_blank">it is important to give netdata a star at GitHub</a></strong>.
+ </div>
+ <div class="p">
+ Enjoy real-time performance monitoring!
+ </div>
+ <div class="p">
+ Costa Tsaousis
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="helpModal" tabindex="-1" role="dialog" aria-labelledby="helpModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="helpModalLabel">Dashboard Help</h4>
+ </div>
+ <div class="modal-body">
+
+ <h4>Dygraphs (line, area and stacked area charts)</h4>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#help_mouse" aria-controls="help_mouse" role="tab" data-toggle="tab">Mouse Interface</a></li>
+ <li role="presentation"><a href="#help_touch" aria-controls="help_touch" role="tab" data-toggle="tab">Touch Interface</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="help_mouse">
+ <div class="p">
+ <h4>Mouse Over / Hover</h4>
+ Mouse over on a chart to show, at its legend, the values for the timestamp under the mouse (the chart will also highlight the point at the chart).
+ <br/>
+ All the other visible charts will also show and highlight their values for the same timestamp.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Drag Chart Contents</h4>
+ Drag the contents of a chart, by pressing the left mouse button and moving the mouse, to pan it horizontally.
+ <br/>
+ All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring).
+ <br/>
+ Once a chart is panned, auto refreshing stops for all charts. To enable it again, <b>double click</b> a panned chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Double Click</h4>
+ Double Click a chart to reset all the charts to their default auto-refreshing state.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>SHIFT + Drag</h4>
+ While pressing the <code>SHIFT</code> key, press the left mouse button on the contents of a chart and move the mouse to select an area, to zoom in. The other charts will follow too. Zooming is performed in two phases:
+ <ul>
+ <li>The already loaded chart contents are zoomed (low resolution)</li>
+ <li>New data are transferred from the netdata server, to refresh the chart with possibly more detail.</li>
+ </ul>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Highlight Timeframe</h4>
+ While pressing the <code>ALT</code> key, press the left mouse button on the contents of a chart and move the mouse to select an area. The selected are will be highlighted on all charts.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>SHIFT + Mouse Wheel</h4>
+ While pressing the <code>SHIFT</code> key and the mouse pointer is over the contents of a chart, scroll the mouse wheel to zoom in or out. This kind of zooming is aligned to center below the mouse pointer. The other charts will follow too.
+ <br/>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Legend Operations</h4>
+ Click on the label or value of a dimension, will select / un-select this dimension.
+ <br/>
+ You can press any of the SHIFT or CONTROL keys and then click on legend labels or values, to select / un-select multiple dimensions.
+ </div>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="help_touch">
+ <div class="p">
+ <h4>Single Tap</h4>
+ Single Tap on the contents of a chart to show, at its legend, the values for the timestamp tapped (the chart will also highlight the point at the chart).
+ <br/>
+ All the other visible charts will also show and highlight their values for the same timestamp.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Drag Chart Contents</h4>
+ Touch and Drag the contents of a chart to pan it horizontally.
+ <br/>
+ All the charts will follow soon after you let the chart alone (this little delay is by design: it speeds up your browser and lets you focus on what you are exploring).
+ <br/>
+ Once a chart is panned, auto refreshing stops for all charts. To enable it again, <b>double tap</b> a panned chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Double Tap</h4>
+ Double tap a chart to reset all the charts to their default auto-refreshing state.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Zoom <small>(does not work on firefox and IE/Edge)</small></h4>
+ With two fingers, zoom in or out.
+ <br/>
+ Once a chart is zoomed, auto refreshing stops for all charts. To enable it again, <b>double click</b> a zoomed chart.
+ </div>
+ <hr/>
+ <div class="p">
+ <h4>Legend Operations</h4>
+ Tap on the label or value of a dimension, will select / un-select this dimension.
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="alarmsModal" tabindex="-1" role="dialog" aria-labelledby="alarmsModalLabel">
+ <div class="modal-dialog modal-lg" role="document" style="display: table;"> <!-- allow the modal to expand horizontally -->
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="alarmsModalLabel">netdata alarms</h4>
+ </div>
+ <div class="modal-body">
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#alarms_active" aria-controls="alarms_active" role="tab" data-toggle="tab">Active</a></li>
+ <li role="presentation"><a href="#alarms_all" aria-controls="alarms_all" role="tab" data-toggle="tab">All</a></li>
+ <li role="presentation"><a href="#alarms_log" aria-controls="alarms_log" role="tab" data-toggle="tab">Log</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="alarms_active">
+ loading...
+ </div>
+ <div role="tabpanel" class="tab-pane" id="alarms_all">
+ loading...
+ </div>
+ <div role="tabpanel" class="tab-pane" id="alarms_log">
+ loading...
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <!-- <a href="#" onclick="alarmsUpdateModal(); return false;" type="button" class="btn btn-default">Update</a> -->
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="optionsModal" tabindex="-1" role="dialog" aria-labelledby="optionsModalLabel">
+ <div class="modal-dialog modal-lg" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="optionsModalLabel">netdata dashboard options</h4>
+ </div>
+ <div class="modal-body">
+ <center>
+ <small style="color: #BBBBBB;">These are browser settings. Each viewer has its own. They do not affect the operation of your netdata server.
+ <br/>
+ Settings take effect immediately and are saved permanently to browser local storage (except the refresh on focus / always option).
+ <br/>
+ To reset all options (including charts sizes) to their defaults, click <a href="#" onclick="resetDashboardOptions(); return false;">here</a>.</small>
+ </center>
+ <div style="padding: 10px;"></div>
+
+ <!-- Nav tabs -->
+ <ul class="nav nav-tabs" role="tablist">
+ <li role="presentation" class="active"><a href="#settings_performance" aria-controls="settings_performance" role="tab" data-toggle="tab">Performance</a></li>
+ <li role="presentation"><a href="#settings_sync" aria-controls="settings_sync" role="tab" data-toggle="tab">Synchronization</a></li>
+ <li role="presentation"><a href="#settings_visual" aria-controls="settings_visual" role="tab" data-toggle="tab">Visual</a></li>
+ <li role="presentation"><a href="#settings_locale" aria-controls="settings_locale" role="tab" data-toggle="tab">Locale</a></li>
+ </ul>
+
+ <!-- Tab panes -->
+ <div class="tab-content">
+ <div role="tabpanel" class="tab-pane active" id="settings_performance">
+ <form id="optionsForm1" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="stop_updates_when_focus_is_lost" type="checkbox" checked data-toggle="toggle" data-offstyle="danger" data-onstyle="success" data-on="On Focus" data-off="Always" data-width="110px"></td>
+ <td class="option-info"><strong>When to refresh the charts?</strong><br/>
+ <small>When set to <b>On Focus</b>, the charts will stop being updated if the page / tab does not have the focus of the user. When set to <b>Always</b>, the charts will always be refreshed. Set it to <b>On Focus</b> it to lower the CPU requirements of the browser (and extend the battery of laptops and tablets) when this page does not have your focus. Set to <b>Always</b> to work on another window (i.e. change the settings of something) and have the charts auto-refresh in this window.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control">
+ <input id="eliminate_zero_dimensions" type="checkbox" checked data-toggle="toggle" data-on="Non Zero" data-off="All" data-width="110px">
+ </td>
+ <td class="option-info"><strong>Which dimensions to show?</strong><br/>
+ <small>When set to <b>Non Zero</b>, dimensions that have all their values (within the current view) set to zero will not be transferred from the netdata server (except if all dimensions of the chart are zero, in which case this setting does nothing - all dimensions are transferred and shown). When set to <b>All</b>, all dimensions will always be shown. Set it to <b>Non Zero</b> to lower the data transferred between netdata and your browser, lower the CPU requirements of your browser (fewer lines to draw) and increase the focus on the legends (fewer entries at the legends).</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="destroy_on_hide" type="checkbox" data-toggle="toggle" data-on="Destroy" data-off="Hide" data-width="110px"></td>
+ <td class="option-info"><strong>How to handle hidden charts?</strong><br/>
+ <small>When set to <b>Destroy</b>, charts that are not in the current viewport of the browser (are above, or below the visible area of the page), will be destroyed and re-created if and when they become visible again. When set to <b>Hide</b>, the not-visible charts will be just hidden, to simplify the DOM and speed up your browser. Set it to <b>Destroy</b>, to lower the memory requirements of your browser. Set it to <b>Hide</b> for faster restoration of charts on page scrolling.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="async_on_scroll" type="checkbox" data-toggle="toggle" data-on="Async" data-off="Sync" data-width="110px"></td>
+ <td class="option-info"><strong>Page scroll handling?</strong><br/>
+ <small>When set to <b>Sync</b>, charts will be examined for their visibility immediately after scrolling. On slow computers this may impact the smoothness of page scrolling. To update the page when scrolling ends, set it to <b>Async</b>. Set it to <b>Sync</b> for immediate chart updates when scrolling. Set it to <b>Async</b> for smoother page scrolling on slower computers.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_sync">
+ <form id="optionsForm2" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="parallel_refresher" type="checkbox" checked data-toggle="toggle" data-on="Parallel" data-off="Sequential" data-width="110px"></td>
+ <td class="option-info"><strong>Which chart refresh policy to use?</strong><br/>
+ <small>When set to <b>parallel</b>, visible charts are refreshed in parallel (all queries are sent to netdata server in parallel) and are rendered asynchronously. When set to <b>sequential</b> charts are refreshed one after another. Set it to parallel if your browser can cope with it (most modern browsers do), set it to sequential if you work on an older/slower computer.</small>
+ </td>
+ </tr>
+ <tr class="option-row" id="concurrent_refreshes_row">
+ <td class="option-control"><input id="concurrent_refreshes" type="checkbox" checked data-toggle="toggle" data-on="Resync" data-off="Best Effort" data-width="110px"></td>
+ <td class="option-info"><strong>Shall we re-sync chart refreshes?</strong><br/>
+ <small>When set to <b>Resync</b>, the dashboard will attempt to re-synchronize all the charts so that they are refreshed concurrently. When set to <b>Best Effort</b>, each chart may be refreshed with a little time difference to the others. Normally, the dashboard starts refreshing them in parallel, but depending on the speed of your computer and the network latencies, charts start having a slight time difference. Setting this to <b>Resync</b> will attempt to re-synchronize the charts on every update. Setting it to <b>Best Effort</b> may lower the pressure on your browser and the network.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="sync_selection" type="checkbox" checked data-toggle="toggle" data-on="Sync" data-off="Don't Sync" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Sync hover selection on all charts?</strong><br/>
+ <small>When enabled, a selection on one chart will automatically select the same time on all other visible charts and the legends of all visible charts will be updated to show the selected values. When disabled, only the chart getting the user's attention will be selected. Enable it to get better insights of the data. Disable it if you are on a very slow computer that cannot actually do it.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="sync_pan_and_zoom" type="checkbox" checked data-toggle="toggle" data-on="Sync" data-off="Don't Sync" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Sync pan and zoom on all charts?</strong><br/>
+ <small>When enabled, pan and zoom operations on a chart will be replicated to all charts (even the ones that are not currently visible - of course only when they will become visible). When disabled, pan and zoom operations will not be propagated to other charts.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_visual">
+ <form id="optionsForm3" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td class="option-control"><input id="netdata_theme_control" type="checkbox" checked data-toggle="toggle" data-offstyle="danger" data-onstyle="success" data-on="Dark" data-off="White" data-width="110px"></td>
+ <td class="option-info"><strong>Which theme to use?</strong><br/>
+ <small>Netdata comes with two themes: <b>Dark</b> (the default) and <b>White</b>.
+ <br/>
+ <b>Switching this will reload the dashboard</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="show_help" type="checkbox" checked data-toggle="toggle" data-on="Help Me" data-off="No Help" data-width="110px"></td>
+ <td class="option-info"><strong>Do you need help?</strong><br/>
+ <small>Netdata can show some help in some areas to help you use the dashboard. If all these balloons bother you, disable them using this switch.
+ <br/>
+ <b>Switching this will reload the dashboard</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="pan_and_zoom_data_padding" type="checkbox" checked data-toggle="toggle" data-on="Pad" data-off="Don't Pad" data-width="110px"></td>
+ <td class="option-info"><strong>Enable data padding when panning and zooming?</strong><br/>
+ <small>When set to <b>Pad</b> the charts will be padded with more data, both before and after the visible area, thus giving the impression the whole database is loaded. This padding will happen only after the first pan or zoom operation on the chart (initially all charts have only the visible data). When set to <b>Don't Pad</b> only the visible data will be transfered from the netdata server, even after the first pan and zoom operation.</small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="smooth_plot" type="checkbox" checked data-toggle="toggle" data-on="Smooth" data-off="Rough" data-width="110px"></td>
+ <td class="option-info"><strong>Enable Bézier lines on charts?</strong><br/>
+ <small>When set to <b>Smooth</b> the charts libraries that support it, will plot smooth curves instead of simple straight lines to connect the points.
+ <br/>
+ Keep in mind <a href="http://dygraphs.com" target="_blank">dygraphs</a>, the main charting library in netdata dashboards, can only smooth line charts. It cannot smooth area or stacked charts. When set to <b>Rough</b>, this setting can lower the CPU resources consumed by your browser.</small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ <div role="tabpanel" class="tab-pane" id="settings_locale">
+ <form id="optionsForm4" method="get" class="form-horizontal">
+ <div class="form-group">
+ <table>
+ <tr class="option-row">
+ <td colspan="2" align="center">
+ <small>
+ <b>These settings are applied gradually, as charts are updated. To force them, refresh the dashboard now</b>.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="units_conversion" type="checkbox" checked data-toggle="toggle" data-on="Scale Units" data-off="Fixed Units" data-onstyle="success" data-width="110px"></td>
+ <td class="option-info"><strong>Enable auto-scaling of select units?</strong><br/>
+ <small>When set to <b>Scale Units</b> the values shown will dynamically be scaled (e.g. 1000 kilobits will be shown as 1 megabit).
+ Netdata can auto-scale these original units: <code>kilobits/s</code>, <code>kilobytes/s</code>, <code>KB/s</code>,
+ <code>KB</code>, <code>MB</code>, and <code>GB</code>.
+ When set to <b>Fixed Units</b> all the values will be rendered using the original units maintained by the netdata server.
+ </small>
+ </td>
+ </tr>
+ <tr id="settingsLocaleTempRow" class="option-row">
+ <td class="option-control"><input id="units_temp" type="checkbox" checked data-toggle="toggle" data-on="Celsius" data-off="Fahrenheit" data-width="110px"></td>
+ <td class="option-info"><strong>Which units to use for temperatures?</strong><br/>
+ <small>Set the temperature units of the dashboard.
+ </small>
+ </td>
+ </tr>
+ <tr id="settingsLocaleTimeRow" class="option-row">
+ <td class="option-control"><input id="seconds_as_time" type="checkbox" checked data-toggle="toggle" data-on="Time" data-off="Seconds" data-onstyle="success" data-width="110px"></td>
+ <td class="option-info"><strong>Convert seconds to time?</strong><br/>
+ <small>When set to <b>Time</b>, charts that present <code>seconds</code> will show <code>DDd:HH:MM:SS</code>.
+ When set to <b>Seconds</b>, the raw number of seconds will be presented.
+ </small>
+ </td>
+ </tr>
+ <tr class="option-row">
+ <td class="option-control"><input id="local_timezone" type="checkbox" checked data-toggle="toggle" data-on="Your Time" data-off="Server Time" data-onstyle="success" data-offstyle="danger" data-width="110px"></td>
+ <td class="option-info"><strong>Show browser local time or server time?</strong><br/>
+ <small>When set to <b>Your Time</b>, the charts will use your browser local time. When set to <b>Server Time</b> the charts will use the server time.
+ <br/>
+ Set it to <b>Your Time</b> to have a common time-reference, no matter where the server is and which time zone it uses (all your dashboards will report your local time).
+ Set it to <b>Server Time</b> when you need to compare netdata charts with server log files.
+ <br/>
+ Your browser time zone is: <b><span id="browser_timezone">-</span></b>.<br/>
+ The server reported timezone is: <b><span id="server_timezone">-</span></b> (you can set this in netdata.conf <code>[global].timezone</code>).<br/>
+ The current time zone on the dashboard is: <b><span id="current_timezone">-</span></b>.
+ <br/>
+ <div class="dropup">
+ <button class="btn btn-default dropdown-toggle" type="button" id="dropdownTimeZone" data-toggle="dropdown" aria-haspopup="true" aria-expanded="true">
+ Select Server Time Zone
+ <span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu scrollable-menu-50" aria-labelledby="dropdownTimeZone">
+ <li><a href=# onclick="return selected_server_timezone('UTC');">Universal Time Coordinated (UTC)</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Abidjan');">Africa/Abidjan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Accra');">Africa/Accra</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Algiers');">Africa/Algiers</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Bissau');">Africa/Bissau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Cairo');">Africa/Cairo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Casablanca');">Africa/Casablanca</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Ceuta');">Africa/Ceuta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/El_Aaiun');">Africa/El_Aaiun</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Johannesburg');">Africa/Johannesburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Khartoum');">Africa/Khartoum</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Lagos');">Africa/Lagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Maputo');">Africa/Maputo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Monrovia');">Africa/Monrovia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Nairobi');">Africa/Nairobi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Ndjamena');">Africa/Ndjamena</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Tripoli');">Africa/Tripoli</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Tunis');">Africa/Tunis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Africa/Windhoek');">Africa/Windhoek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Adak');">America/Adak</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Anchorage');">America/Anchorage</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Araguaina');">America/Araguaina</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Buenos_Aires');">America/Argentina/Buenos_Aires</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Catamarca');">America/Argentina/Catamarca</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Cordoba');">America/Argentina/Cordoba</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Jujuy');">America/Argentina/Jujuy</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/La_Rioja');">America/Argentina/La_Rioja</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Mendoza');">America/Argentina/Mendoza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Rio_Gallegos');">America/Argentina/Rio_Gallegos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Salta');">America/Argentina/Salta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/San_Juan');">America/Argentina/San_Juan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/San_Luis');">America/Argentina/San_Luis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Tucuman');">America/Argentina/Tucuman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Argentina/Ushuaia');">America/Argentina/Ushuaia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Asuncion');">America/Asuncion</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Atikokan');">America/Atikokan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bahia');">America/Bahia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bahia_Banderas');">America/Bahia_Banderas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Barbados');">America/Barbados</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Belem');">America/Belem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Belize');">America/Belize</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Blanc-Sablon');">America/Blanc-Sablon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Boa_Vista');">America/Boa_Vista</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Bogota');">America/Bogota</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Boise');">America/Boise</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cambridge_Bay');">America/Cambridge_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Campo_Grande');">America/Campo_Grande</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cancun');">America/Cancun</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Caracas');">America/Caracas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cayenne');">America/Cayenne</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Chicago');">America/Chicago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Chihuahua');">America/Chihuahua</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Costa_Rica');">America/Costa_Rica</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Creston');">America/Creston</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Cuiaba');">America/Cuiaba</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Curacao');">America/Curacao</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Danmarkshavn');">America/Danmarkshavn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Dawson');">America/Dawson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Dawson_Creek');">America/Dawson_Creek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Denver');">America/Denver</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Detroit');">America/Detroit</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Edmonton');">America/Edmonton</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Eirunepe');">America/Eirunepe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/El_Salvador');">America/El_Salvador</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Fortaleza');">America/Fortaleza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Fort_Nelson');">America/Fort_Nelson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Glace_Bay');">America/Glace_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Godthab');">America/Godthab</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Goose_Bay');">America/Goose_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Grand_Turk');">America/Grand_Turk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guatemala');">America/Guatemala</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guayaquil');">America/Guayaquil</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Guyana');">America/Guyana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Halifax');">America/Halifax</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Havana');">America/Havana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Hermosillo');">America/Hermosillo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Indianapolis');">America/Indiana/Indianapolis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Knox');">America/Indiana/Knox</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Marengo');">America/Indiana/Marengo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Petersburg');">America/Indiana/Petersburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Tell_City');">America/Indiana/Tell_City</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Vevay');">America/Indiana/Vevay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Vincennes');">America/Indiana/Vincennes</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Indiana/Winamac');">America/Indiana/Winamac</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Inuvik');">America/Inuvik</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Iqaluit');">America/Iqaluit</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Jamaica');">America/Jamaica</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Juneau');">America/Juneau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Kentucky/Louisville');">America/Kentucky/Louisville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Kentucky/Monticello');">America/Kentucky/Monticello</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/La_Paz');">America/La_Paz</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Lima');">America/Lima</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Los_Angeles');">America/Los_Angeles</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Maceio');">America/Maceio</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Managua');">America/Managua</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Manaus');">America/Manaus</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Martinique');">America/Martinique</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Matamoros');">America/Matamoros</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Mazatlan');">America/Mazatlan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Menominee');">America/Menominee</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Merida');">America/Merida</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Metlakatla');">America/Metlakatla</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Mexico_City');">America/Mexico_City</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Miquelon');">America/Miquelon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Moncton');">America/Moncton</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Monterrey');">America/Monterrey</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Montevideo');">America/Montevideo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nassau');">America/Nassau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/New_York');">America/New_York</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nipigon');">America/Nipigon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Nome');">America/Nome</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Noronha');">America/Noronha</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/Beulah');">America/North_Dakota/Beulah</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/Center');">America/North_Dakota/Center</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/North_Dakota/New_Salem');">America/North_Dakota/New_Salem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Ojinaga');">America/Ojinaga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Panama');">America/Panama</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Pangnirtung');">America/Pangnirtung</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Paramaribo');">America/Paramaribo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Phoenix');">America/Phoenix</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Port-au-Prince');">America/Port-au-Prince</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Port_of_Spain');">America/Port_of_Spain</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Porto_Velho');">America/Porto_Velho</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Puerto_Rico');">America/Puerto_Rico</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Punta_Arenas');">America/Punta_Arenas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rainy_River');">America/Rainy_River</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rankin_Inlet');">America/Rankin_Inlet</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Recife');">America/Recife</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Regina');">America/Regina</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Resolute');">America/Resolute</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Rio_Branco');">America/Rio_Branco</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santarem');">America/Santarem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santiago');">America/Santiago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Santo_Domingo');">America/Santo_Domingo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Sao_Paulo');">America/Sao_Paulo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Scoresbysund');">America/Scoresbysund</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Sitka');">America/Sitka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/St_Johns');">America/St_Johns</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Swift_Current');">America/Swift_Current</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Tegucigalpa');">America/Tegucigalpa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Thule');">America/Thule</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Thunder_Bay');">America/Thunder_Bay</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Tijuana');">America/Tijuana</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Toronto');">America/Toronto</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Vancouver');">America/Vancouver</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Whitehorse');">America/Whitehorse</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Winnipeg');">America/Winnipeg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Yakutat');">America/Yakutat</a></li>
+ <li><a href=# onclick="return selected_server_timezone('America/Yellowknife');">America/Yellowknife</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Casey');">Antarctica/Casey</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Davis');">Antarctica/Davis</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/DumontDUrville');">Antarctica/DumontDUrville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Macquarie');">Antarctica/Macquarie</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Mawson');">Antarctica/Mawson</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Palmer');">Antarctica/Palmer</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Rothera');">Antarctica/Rothera</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Syowa');">Antarctica/Syowa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Troll');">Antarctica/Troll</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Antarctica/Vostok');">Antarctica/Vostok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Almaty');">Asia/Almaty</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Amman');">Asia/Amman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Anadyr');">Asia/Anadyr</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Aqtau');">Asia/Aqtau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Aqtobe');">Asia/Aqtobe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ashgabat');">Asia/Ashgabat</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Atyrau');">Asia/Atyrau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Baghdad');">Asia/Baghdad</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Baku');">Asia/Baku</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Bangkok');">Asia/Bangkok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Barnaul');">Asia/Barnaul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Beirut');">Asia/Beirut</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Bishkek');">Asia/Bishkek</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Brunei');">Asia/Brunei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Chita');">Asia/Chita</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Choibalsan');">Asia/Choibalsan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Colombo');">Asia/Colombo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Damascus');">Asia/Damascus</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dhaka');">Asia/Dhaka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dili');">Asia/Dili</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dubai');">Asia/Dubai</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Dushanbe');">Asia/Dushanbe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Famagusta');">Asia/Famagusta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Gaza');">Asia/Gaza</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hebron');">Asia/Hebron</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ho_Chi_Minh');">Asia/Ho_Chi_Minh</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hong_Kong');">Asia/Hong_Kong</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Hovd');">Asia/Hovd</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Irkutsk');">Asia/Irkutsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jakarta');">Asia/Jakarta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jayapura');">Asia/Jayapura</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Jerusalem');">Asia/Jerusalem</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kabul');">Asia/Kabul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kamchatka');">Asia/Kamchatka</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Karachi');">Asia/Karachi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kathmandu');">Asia/Kathmandu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Khandyga');">Asia/Khandyga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kolkata');">Asia/Kolkata</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Krasnoyarsk');">Asia/Krasnoyarsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kuala_Lumpur');">Asia/Kuala_Lumpur</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Kuching');">Asia/Kuching</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Macau');">Asia/Macau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Magadan');">Asia/Magadan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Makassar');">Asia/Makassar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Manila');">Asia/Manila</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Nicosia');">Asia/Nicosia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Novokuznetsk');">Asia/Novokuznetsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Novosibirsk');">Asia/Novosibirsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Omsk');">Asia/Omsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Oral');">Asia/Oral</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Pontianak');">Asia/Pontianak</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Pyongyang');">Asia/Pyongyang</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Qatar');">Asia/Qatar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Qyzylorda');">Asia/Qyzylorda</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Riyadh');">Asia/Riyadh</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Sakhalin');">Asia/Sakhalin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Samarkand');">Asia/Samarkand</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Seoul');">Asia/Seoul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Shanghai');">Asia/Shanghai</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Singapore');">Asia/Singapore</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Srednekolymsk');">Asia/Srednekolymsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Taipei');">Asia/Taipei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tashkent');">Asia/Tashkent</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tbilisi');">Asia/Tbilisi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tehran');">Asia/Tehran</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Thimphu');">Asia/Thimphu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tokyo');">Asia/Tokyo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Tomsk');">Asia/Tomsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ulaanbaatar');">Asia/Ulaanbaatar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Urumqi');">Asia/Urumqi</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Ust-Nera');">Asia/Ust-Nera</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Vladivostok');">Asia/Vladivostok</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yakutsk');">Asia/Yakutsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yangon');">Asia/Yangon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yekaterinburg');">Asia/Yekaterinburg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Asia/Yerevan');">Asia/Yerevan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Azores');">Atlantic/Azores</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Bermuda');">Atlantic/Bermuda</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Canary');">Atlantic/Canary</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Cape_Verde');">Atlantic/Cape_Verde</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Faroe');">Atlantic/Faroe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Madeira');">Atlantic/Madeira</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Reykjavik');">Atlantic/Reykjavik</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/South_Georgia');">Atlantic/South_Georgia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Atlantic/Stanley');">Atlantic/Stanley</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Adelaide');">Australia/Adelaide</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Brisbane');">Australia/Brisbane</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Broken_Hill');">Australia/Broken_Hill</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Currie');">Australia/Currie</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Darwin');">Australia/Darwin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Eucla');">Australia/Eucla</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Hobart');">Australia/Hobart</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Lindeman');">Australia/Lindeman</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Lord_Howe');">Australia/Lord_Howe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Melbourne');">Australia/Melbourne</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Perth');">Australia/Perth</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Australia/Sydney');">Australia/Sydney</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Amsterdam');">Europe/Amsterdam</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Andorra');">Europe/Andorra</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Astrakhan');">Europe/Astrakhan</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Athens');">Europe/Athens</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Belgrade');">Europe/Belgrade</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Berlin');">Europe/Berlin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Brussels');">Europe/Brussels</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Bucharest');">Europe/Bucharest</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Budapest');">Europe/Budapest</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Chisinau');">Europe/Chisinau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Copenhagen');">Europe/Copenhagen</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Dublin');">Europe/Dublin</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Gibraltar');">Europe/Gibraltar</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Helsinki');">Europe/Helsinki</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Istanbul');">Europe/Istanbul</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kaliningrad');">Europe/Kaliningrad</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kiev');">Europe/Kiev</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Kirov');">Europe/Kirov</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Lisbon');">Europe/Lisbon</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/London');">Europe/London</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Luxembourg');">Europe/Luxembourg</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Madrid');">Europe/Madrid</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Malta');">Europe/Malta</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Minsk');">Europe/Minsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Monaco');">Europe/Monaco</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Moscow');">Europe/Moscow</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Oslo');">Europe/Oslo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Paris');">Europe/Paris</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Prague');">Europe/Prague</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Riga');">Europe/Riga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Rome');">Europe/Rome</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Samara');">Europe/Samara</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Saratov');">Europe/Saratov</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Simferopol');">Europe/Simferopol</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Sofia');">Europe/Sofia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Stockholm');">Europe/Stockholm</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Tallinn');">Europe/Tallinn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Tirane');">Europe/Tirane</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Ulyanovsk');">Europe/Ulyanovsk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Uzhgorod');">Europe/Uzhgorod</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Vienna');">Europe/Vienna</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Vilnius');">Europe/Vilnius</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Volgograd');">Europe/Volgograd</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Warsaw');">Europe/Warsaw</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Zaporozhye');">Europe/Zaporozhye</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Europe/Zurich');">Europe/Zurich</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Chagos');">Indian/Chagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Christmas');">Indian/Christmas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Cocos');">Indian/Cocos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Kerguelen');">Indian/Kerguelen</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Mahe');">Indian/Mahe</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Maldives');">Indian/Maldives</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Mauritius');">Indian/Mauritius</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Indian/Reunion');">Indian/Reunion</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Apia');">Pacific/Apia</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Auckland');">Pacific/Auckland</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Bougainville');">Pacific/Bougainville</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Chatham');">Pacific/Chatham</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Chuuk');">Pacific/Chuuk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Easter');">Pacific/Easter</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Efate');">Pacific/Efate</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Enderbury');">Pacific/Enderbury</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Fakaofo');">Pacific/Fakaofo</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Fiji');">Pacific/Fiji</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Funafuti');">Pacific/Funafuti</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Galapagos');">Pacific/Galapagos</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Gambier');">Pacific/Gambier</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Guadalcanal');">Pacific/Guadalcanal</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Guam');">Pacific/Guam</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Honolulu');">Pacific/Honolulu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kiritimati');">Pacific/Kiritimati</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kosrae');">Pacific/Kosrae</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Kwajalein');">Pacific/Kwajalein</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Majuro');">Pacific/Majuro</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Marquesas');">Pacific/Marquesas</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Nauru');">Pacific/Nauru</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Niue');">Pacific/Niue</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Norfolk');">Pacific/Norfolk</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Noumea');">Pacific/Noumea</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pago_Pago');">Pacific/Pago_Pago</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Palau');">Pacific/Palau</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pitcairn');">Pacific/Pitcairn</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Pohnpei');">Pacific/Pohnpei</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Port_Moresby');">Pacific/Port_Moresby</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Rarotonga');">Pacific/Rarotonga</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tahiti');">Pacific/Tahiti</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tarawa');">Pacific/Tarawa</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Tongatapu');">Pacific/Tongatapu</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Wake');">Pacific/Wake</a></li>
+ <li><a href=# onclick="return selected_server_timezone('Pacific/Wallis');">Pacific/Wallis</a></li>
+ </ul>
+ </div>
+ <b><span id="timezone_error_message"></span></b>
+
+ </small>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </form>
+ </div>
+ </div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+
+ <div class="modal fade" id="updateModal" tabindex="-1" role="dialog" aria-labelledby="updateModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="updateModalLabel">Update Check</h4>
+ </div>
+ <div class="modal-body">
+ Your netdata version: <b><code><span id="netdataVersion">Unknown</span></code></b><br/>
+ <br/>
+ <div style="padding: 10px;"></div>
+ <div id="versionCheckLog">Not checked yet. Please press the Check Now button.</div>
+ <div>
+ <hr/>
+ </div>
+ <div>
+ For progress reports and key netdata updates: <strong><a href="https://twitter.com/linuxnetdata" target="_blank">follow netdata on <i class="fab fa-twitter"></i> twitter</a></strong>.
+ <br/>
+ You can also <a href="https://www.facebook.com/linuxnetdata/" target="_blank">follow netdata on <i class="fab fa-facebook"></i> facebook</a>,
+ or <a href="https://github.com/netdata/netdata" target="_blank">watch netdata on <i class="fab fa-github"></i> github</a>.
+ </div>
+ </div>
+ <div class="modal-footer">
+ <a href="#" onclick="notifyForUpdate(true); return false;" type="button" class="btn btn-default">Check Now</a>
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="signInModal" tabindex="-1" role="dialog" aria-labelledby="signInModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="signInModalLabel">Sign In</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ Signing-in to netdata.cloud will synchronize the list of
+ your netdata monitored nodes known at registry
+ <strong><span id="sim-registry"></span></strong>. This
+ may include server hostnames, urls and identification
+ GUIDs.
+ </p>
+ <p>
+ After you upgrade all your netdata servers, your private
+ registry will not be needed any more.
+ </p>
+ <p>
+ Are you sure you want to proceed?
+ </p>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+ <a href="#" onclick="explicitlySignIn(); return false;" type="button" class="btn btn-success">Sign In</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="syncRegistryModal" tabindex="-1" role="dialog" aria-labelledby="syncRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="syncRegistryModalLabel">Synchronize netdata.cloud with registry?</h4>
+ </div>
+ <div class="modal-body">
+ <p>
+ You are about to synchronize your netdata.cloud account with data from the registry at <strong><span id="sync-registry-modal-registry"></span></strong>.
+ This may include server hostnames, urls and identification GUIDs.
+ </p>
+ <p>
+ Are you sure you want to proceed?
+ </p>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+ <a href="#" onclick="explicitlySyncAgents(); return false;" type="button" class="btn btn-success">Synchronize</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="deleteRegistryModal" tabindex="-1" role="dialog" aria-labelledby="deleteRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="deleteRegistryModalLabel">Delete <span id="deleteRegistryServerName"></span>?</h4>
+ </div>
+ <div class="modal-body">
+ You are about to delete, from your personal list of netdata servers, the following server:
+ <p style="text-align: center; padding-top: 10px; padding-bottom: 10px; line-height: 2;">
+ <b><span id="deleteRegistryServerName2"></span></b>
+ <br/>
+ <b><span id="deleteRegistryServerURL"></span></b>
+ </p>
+ Are you sure you want to do this?
+ <br/>
+ <div style="padding: 10px;"></div>
+ <small>Keep in mind, this server will be added back if and when you visit it again.</small>
+ <br/>
+ <div id="deleteRegistryResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-success" data-dismiss="modal">keep it</button>
+ <a href="#" onclick="notifyForDeleteRegistry(); return false;" type="button" class="btn btn-danger">delete it</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="switchRegistryModal" tabindex="-1" role="dialog" aria-labelledby="switchRegistryModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="switchRegistryModalLabel">Switch Netdata Registry Identity</h4>
+ </div>
+ <div class="modal-body">
+ You can copy and paste the following ID to all your browsers (e.g. work and home).
+ <br/>
+ All the browsers with the same ID will identify <b>you</b>, so please don't share this with others.
+ <div style="text-align: center; padding-top: 10px; padding-bottom: 10px; line-height: 2;">
+ <form action="#">
+ <input type="text" class="form-control" id="switchRegistryPersonGUID" placeholder="your personal ID" maxlength="36" autocomplete="off" style="text-align: center; font-size: 1.4em;">
+ </form>
+ </div>
+ Either copy this ID and paste it to another browser, or paste here the ID you have taken from another browser.
+ <div style="padding-top: 10px;"><small>
+ Keep in mind that:
+ <ul>
+ <li>when you switch ID, your previous ID will be lost forever - this is irreversible.</li>
+ <li>both IDs (your old and the new) must list this netdata at their personal lists.</li>
+ <li>both IDs have to be known by the registry: <b><span id="switchRegistryURL"></span></b>.</li>
+ <li>to get a new ID, just clear your browser cookies.</li>
+ </ul>
+ </small></div>
+ <div id="switchRegistryResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-success" data-dismiss="modal">cancel</button>
+ <a href="#" onclick="notifyForSwitchRegistry(); return false;" type="button" class="btn btn-danger">impersonate</a>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="modal fade" id="gotoServerModal" tabindex="-1" role="dialog" aria-labelledby="gotoServerModalLabel">
+ <div class="modal-dialog" role="document">
+ <div class="modal-content">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+ <h4 class="modal-title" id="gotoServerModalLabel"><span id="gotoServerName"></span></h4>
+ </div>
+ <div class="modal-body">
+ Checking known URLs for this server...
+ <div style="padding-top: 20px;">
+ <table id="gotoServerList">
+ </table>
+ </div>
+ <p style="padding-top: 10px;"><small>
+ Checks may fail if you are viewing an HTTPS page and the server to be checked is HTTP only.
+ </small></p>
+ <div id="gotoServerResponse" style="display: block; width: 100%; text-align: center; padding-top: 20px;"></div>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+ </div>
+ <iframe id="ssoifrm" width="0" height="0"></iframe>
+ <div id="hiddenDownloadLinks" style="display: none;" hidden></div>
+ <script type="text/javascript" src="../dashboard.js?v20190902-0"></script>
+</body>
+</html>
diff --git a/web/gui/refresh-badges.js b/web/gui/refresh-badges.js
new file mode 100644
index 0000000..00dd4da
--- /dev/null
+++ b/web/gui/refresh-badges.js
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+// ----------------------------------------------------------------------------
+// This script periodically updates all the netdata badges you have added to a
+// page as images. You don't need this script if you add the badges with
+// <embed src="..."/> - embedded badges auto-refresh by themselves.
+//
+// You can set the following variables before loading this script:
+
+/*global netdata_update_every *//* number, the time in seconds to update the badges
+ * (default: 15) */
+/*global netdata_live_callback *//* function, callback to be called on each iteration while updating the badges
+ * (default: null) */
+/*global netdata_paused_callback *//* function, callback to be called when the update pauses
+ * (default: null) */
+
+/*
+// EXAMPLE HTML PAGE:
+
+<html>
+<head>
+<script>
+// how frequently to update the badges?
+var netdata_update_every = 15;
+
+// show a count-down for badge refreshes
+var netdata_live_callback = function(secs, count) {
+ document.body.style.opacity = 1;
+ if(count)
+ document.getElementById("pageliveinfo").innerHTML = "This page is live - updated <b>" + count + "</b> badges...";
+ else
+ document.getElementById("pageliveinfo").innerHTML = "This page is live - badges will be updated in <b>" + secs + "</b> seconds...";
+};
+
+// show that we paused refreshes
+var netdata_paused_callback = function() {
+ document.body.style.opacity = 0.5;
+ document.getElementById("pageliveinfo").innerHTML = "Refresh paused - the page does not have your focus";
+};
+</script>
+<script src="https://localhost:19999/refresh-badges.js"></script>
+
+</head>
+<body>
+<div id="pageliveinfo">Please wait... loading...</div>
+<img src="http://localhost:19999/api/v1/badge.svg?chart=system.cpu"/>
+</body>
+</html>
+
+*/
+
+if(typeof netdata_update_every === 'undefined')
+ netdata_update_every = 15;
+
+var netdata_was_live = false;
+var netdata_is_live = true;
+var netdata_loops = 0;
+
+function update_netdata_badges() {
+ netdata_loops++;
+ netdata_is_live = false;
+
+ var updated = 0;
+ var focus = document.hasFocus();
+
+ if(focus && netdata_loops >= netdata_update_every) {
+ var len = document.images.length;
+ while(len--) {
+ var url = document.images[len].src;
+ if(url.match(/\api\/v1\/badge\.svg/)) {
+ if(url.match(/\?/))
+ url = url.replace(/&cacheBuster=\d*/, "") + "&cacheBuster=" + new Date().getTime().toString();
+ else
+ url = url.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString();
+
+ document.images[len].src = url;
+ updated++;
+ }
+ }
+ netdata_loops = 0;
+ }
+
+ if(focus || updated)
+ netdata_is_live = true;
+
+ try {
+ if(netdata_is_live && typeof netdata_live_callback === 'function')
+ netdata_live_callback(netdata_update_every - netdata_loops, updated);
+ else if(netdata_was_live !== netdata_is_live && typeof netdata_paused_callback === 'function')
+ netdata_paused_callback();
+ }
+ catch(e) {
+ console.log(e);
+ }
+ netdata_was_live = netdata_is_live;
+
+ setTimeout(update_netdata_badges, 1000);
+}
+setTimeout(update_netdata_badges, 1000);
diff --git a/web/gui/robots.txt b/web/gui/robots.txt
new file mode 100644
index 0000000..e434d9c
--- /dev/null
+++ b/web/gui/robots.txt
@@ -0,0 +1,7 @@
+User-agent: *
+Allow: /$
+Allow: /index.html
+Allow: /default.html
+Allow: /demosites.html
+Allow: /sitemap.xml
+Disallow: /
diff --git a/web/gui/sitemap.xml b/web/gui/sitemap.xml
new file mode 100644
index 0000000..4438e8b
--- /dev/null
+++ b/web/gui/sitemap.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
+ <url>
+ <loc>https://my-netdata.io/</loc>
+ <lastmod>2017-01-02</lastmod>
+ <changefreq>always</changefreq>
+ </url>
+</urlset>
diff --git a/web/gui/src/dashboard.js/alarms.js b/web/gui/src/dashboard.js/alarms.js
new file mode 100644
index 0000000..8247767
--- /dev/null
+++ b/web/gui/src/dashboard.js/alarms.js
@@ -0,0 +1,422 @@
+
+// Registry of netdata hosts
+
+NETDATA.alarms = {
+ onclick: null, // the callback to handle the click - it will be called with the alarm log entry
+ chart_div_offset: -50, // give that space above the chart when scrolling to it
+ chart_div_id_prefix: 'chart_', // the chart DIV IDs have this prefix (they should be NETDATA.name2id(chart.id))
+ chart_div_animation_duration: 0,// the duration of the animation while scrolling to a chart
+
+ ms_penalty: 0, // the time penalty of the next alarm
+ ms_between_notifications: 500, // firefox moves the alarms off-screen (above, outside the top of the screen)
+ // if alarms are shown faster than: one per 500ms
+
+ update_every: 10000, // the time in ms between alarm checks
+
+ notifications: false, // when true, the browser supports notifications (may not be granted though)
+ last_notification_id: 0, // the id of the last alarm_log we have raised an alarm for
+ first_notification_id: 0, // the id of the first alarm_log entry for this session
+ // this is used to prevent CLEAR notifications for past events
+ // notifications_shown: [],
+
+ server: null, // the server to connect to for fetching alarms
+ current: null, // the list of raised alarms - updated in the background
+
+ // a callback function to call every time the list of raised alarms is refreshed
+ callback: (typeof netdataAlarmsActiveCallback === 'function') ? netdataAlarmsActiveCallback : null,
+
+ // a callback function to call every time a notification is shown
+ // the return value is used to decide if the notification will be shown
+ notificationCallback: (typeof netdataAlarmsNotifCallback === 'function') ? netdataAlarmsNotifCallback : null,
+
+ recipients: null, // the list (array) of recipients to show alarms for, or null
+
+ recipientMatches: function (to_string, wanted_array) {
+ if (typeof wanted_array === 'undefined' || wanted_array === null || Array.isArray(wanted_array) === false) {
+ return true;
+ }
+
+ let r = ' ' + to_string.toString() + ' ';
+ let len = wanted_array.length;
+ while (len--) {
+ if (r.indexOf(' ' + wanted_array[len] + ' ') >= 0) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ activeForRecipients: function () {
+ let active = {};
+ let data = NETDATA.alarms.current;
+
+ if (typeof data === 'undefined' || data === null) {
+ return active;
+ }
+
+ for (let x in data.alarms) {
+ if (!data.alarms.hasOwnProperty(x)) {
+ continue;
+ }
+
+ let alarm = data.alarms[x];
+ if ((alarm.status === 'WARNING' || alarm.status === 'CRITICAL') && NETDATA.alarms.recipientMatches(alarm.recipient, NETDATA.alarms.recipients)) {
+ active[x] = alarm;
+ }
+ }
+
+ return active;
+ },
+
+ notify: function (entry) {
+ // console.log('alarm ' + entry.unique_id);
+
+ if (entry.updated) {
+ // console.log('alarm ' + entry.unique_id + ' has been updated by another alarm');
+ return;
+ }
+
+ let value_string = entry.value_string;
+
+ if (NETDATA.alarms.current !== null) {
+ // get the current value_string
+ let t = NETDATA.alarms.current.alarms[entry.chart + '.' + entry.name];
+ if (typeof t !== 'undefined' && entry.status === t.status && typeof t.value_string !== 'undefined') {
+ value_string = t.value_string;
+ }
+ }
+
+ let name = entry.name.replace(/_/g, ' ');
+ let status = entry.status.toLowerCase();
+ let title = name + ' = ' + value_string.toString();
+ let tag = entry.alarm_id;
+ let icon = 'images/banner-icon-144x144.png';
+ let interaction = false;
+ let data = entry;
+ let show = true;
+
+ // console.log('alarm ' + entry.unique_id + ' ' + entry.chart + '.' + entry.name + ' is ' + entry.status);
+
+ switch (entry.status) {
+ case 'REMOVED':
+ show = false;
+ break;
+
+ case 'UNDEFINED':
+ return;
+
+ case 'UNINITIALIZED':
+ return;
+
+ case 'CLEAR':
+ if (entry.unique_id < NETDATA.alarms.first_notification_id) {
+ // console.log('alarm ' + entry.unique_id + ' is not current');
+ return;
+ }
+ if (entry.old_status === 'UNINITIALIZED' || entry.old_status === 'UNDEFINED') {
+ // console.log('alarm' + entry.unique_id + ' switch to CLEAR from ' + entry.old_status);
+ return;
+ }
+ if (entry.no_clear_notification) {
+ // console.log('alarm' + entry.unique_id + ' is CLEAR but has no_clear_notification flag');
+ return;
+ }
+ title = name + ' back to normal (' + value_string.toString() + ')';
+ icon = 'images/check-mark-2-128-green.png';
+ interaction = false;
+ break;
+
+ case 'WARNING':
+ if (entry.old_status === 'CRITICAL') {
+ status = 'demoted to ' + entry.status.toLowerCase();
+ }
+
+ icon = 'images/alert-128-orange.png';
+ interaction = false;
+ break;
+
+ case 'CRITICAL':
+ if (entry.old_status === 'WARNING') {
+ status = 'escalated to ' + entry.status.toLowerCase();
+ }
+
+ icon = 'images/alert-128-red.png';
+ interaction = true;
+ break;
+
+ default:
+ console.log('invalid alarm status ' + entry.status);
+ return;
+ }
+
+ // filter recipients
+ if (show) {
+ show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients);
+ }
+
+ /*
+ // cleanup old notifications with the same alarm_id as this one
+ // it does not seem to work on any web browser - so notifications cannot be removed
+
+ let len = NETDATA.alarms.notifications_shown.length;
+ while (len--) {
+ let n = NETDATA.alarms.notifications_shown[len];
+ if (n.data.alarm_id === entry.alarm_id) {
+ console.log('removing old alarm ' + n.data.unique_id);
+
+ // close the notification
+ n.close.bind(n);
+
+ // remove it from the array
+ NETDATA.alarms.notifications_shown.splice(len, 1);
+ len = NETDATA.alarms.notifications_shown.length;
+ }
+ }
+ */
+
+ if (show) {
+ if (typeof NETDATA.alarms.notificationCallback === 'function') {
+ show = NETDATA.alarms.notificationCallback(entry);
+ }
+
+ if (show) {
+ setTimeout(function () {
+ // show this notification
+ // console.log('new notification: ' + title);
+ let n = new Notification(title, {
+ body: entry.hostname + ' - ' + entry.chart + ' (' + entry.family + ') - ' + status + ': ' + entry.info,
+ tag: tag,
+ requireInteraction: interaction,
+ icon: NETDATA.serverStatic + icon,
+ data: data
+ });
+
+ n.onclick = function (event) {
+ event.preventDefault();
+ NETDATA.alarms.onclick(event.target.data);
+ };
+
+ // console.log(n);
+ // NETDATA.alarms.notifications_shown.push(n);
+ // console.log(entry);
+ }, NETDATA.alarms.ms_penalty);
+
+ NETDATA.alarms.ms_penalty += NETDATA.alarms.ms_between_notifications;
+ }
+ }
+ },
+
+ scrollToChart: function (chart_id) {
+ if (typeof chart_id === 'string') {
+ let offset = $('#' + NETDATA.alarms.chart_div_id_prefix + NETDATA.name2id(chart_id)).offset();
+ if (typeof offset !== 'undefined') {
+ $('html, body').animate({scrollTop: offset.top + NETDATA.alarms.chart_div_offset}, NETDATA.alarms.chart_div_animation_duration);
+ return true;
+ }
+ }
+ return false;
+ },
+
+ scrollToAlarm: function (alarm) {
+ if (typeof alarm === 'object') {
+ let ret = NETDATA.alarms.scrollToChart(alarm.chart);
+
+ if (ret && NETDATA.options.page_is_visible === false) {
+ window.focus();
+ }
+ // alert('netdata dashboard will now scroll to chart: ' + alarm.chart + '\n\nThis alarm opened to bring the browser window in front of the screen. Click on the dashboard to prevent it from appearing again.');
+ }
+
+ },
+
+ notifyAll: function () {
+ // console.log('FETCHING ALARM LOG');
+ NETDATA.alarms.get_log(NETDATA.alarms.last_notification_id, function (data) {
+ // console.log('ALARM LOG FETCHED');
+
+ if (data === null || typeof data !== 'object') {
+ console.log('invalid alarms log response');
+ return;
+ }
+
+ if (data.length === 0) {
+ console.log('received empty alarm log');
+ return;
+ }
+
+ // console.log('received alarm log of ' + data.length + ' entries, from ' + data[data.length - 1].unique_id.toString() + ' to ' + data[0].unique_id.toString());
+
+ data.sort(function (a, b) {
+ if (a.unique_id > b.unique_id) {
+ return -1;
+ }
+ if (a.unique_id < b.unique_id) {
+ return 1;
+ }
+ return 0;
+ });
+
+ NETDATA.alarms.ms_penalty = 0;
+
+ let len = data.length;
+ while (len--) {
+ if (data[len].unique_id > NETDATA.alarms.last_notification_id) {
+ NETDATA.alarms.notify(data[len]);
+ }
+ //else
+ // console.log('ignoring alarm (older) with id ' + data[len].unique_id.toString());
+ }
+
+ NETDATA.alarms.last_notification_id = data[0].unique_id;
+
+ if (typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember) {
+ NETDATA.localStorageSet('last_notification_id', NETDATA.alarms.last_notification_id, null);
+ }
+ // console.log('last notification id = ' + NETDATA.alarms.last_notification_id);
+ })
+ },
+
+ check_notifications: function () {
+ // returns true if we should fire 1+ notifications
+
+ if (NETDATA.alarms.notifications !== true) {
+ // console.log('web notifications are not available');
+ return false;
+ }
+
+ if (Notification.permission !== 'granted') {
+ // console.log('web notifications are not granted');
+ return false;
+ }
+
+ if (typeof NETDATA.alarms.current !== 'undefined' && typeof NETDATA.alarms.current.alarms === 'object') {
+ // console.log('can do alarms: old id = ' + NETDATA.alarms.last_notification_id + ' new id = ' + NETDATA.alarms.current.latest_alarm_log_unique_id);
+
+ if (NETDATA.alarms.current.latest_alarm_log_unique_id > NETDATA.alarms.last_notification_id) {
+ // console.log('new alarms detected');
+ return true;
+ }
+ //else console.log('no new alarms');
+ }
+ // else console.log('cannot process alarms');
+
+ return false;
+ },
+
+ get: function (what, callback) {
+ $.ajax({
+ url: NETDATA.alarms.server + '/api/v1/alarms?' + what.toString(),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/alarms', data /*, '.*\.(calc|calc_parsed|warn|warn_parsed|crit|crit_parsed)$' */);
+
+ if (NETDATA.alarms.first_notification_id === 0 && typeof data.latest_alarm_log_unique_id === 'number') {
+ NETDATA.alarms.first_notification_id = data.latest_alarm_log_unique_id;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(415, NETDATA.alarms.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ update_forever: function () {
+ if (netdataShowAlarms !== true || netdataSnapshotData !== null) {
+ return;
+ }
+
+ NETDATA.alarms.get('active', function (data) {
+ if (data !== null) {
+ NETDATA.alarms.current = data;
+
+ if (NETDATA.alarms.check_notifications()) {
+ NETDATA.alarms.notifyAll();
+ }
+
+ if (typeof NETDATA.alarms.callback === 'function') {
+ NETDATA.alarms.callback(data);
+ }
+
+ // Health monitoring is disabled on this netdata
+ if (data.status === false) {
+ return;
+ }
+ }
+
+ setTimeout(NETDATA.alarms.update_forever, NETDATA.alarms.update_every);
+ });
+ },
+
+ get_log: function (last_id, callback) {
+ // console.log('fetching all log after ' + last_id.toString());
+ $.ajax({
+ url: NETDATA.alarms.server + '/api/v1/alarm_log?after=' + last_id.toString(),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/alarm_log', data);
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(416, NETDATA.alarms.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ init: function () {
+ NETDATA.alarms.server = NETDATA.fixHost(NETDATA.serverDefault);
+
+ if (typeof netdataAlarmsRemember === 'undefined' || netdataAlarmsRemember) {
+ NETDATA.alarms.last_notification_id =
+ NETDATA.localStorageGet('last_notification_id', NETDATA.alarms.last_notification_id, null);
+ }
+
+ if (NETDATA.alarms.onclick === null) {
+ NETDATA.alarms.onclick = NETDATA.alarms.scrollToAlarm;
+ }
+
+ if (typeof netdataAlarmsRecipients !== 'undefined' && Array.isArray(netdataAlarmsRecipients)) {
+ NETDATA.alarms.recipients = netdataAlarmsRecipients;
+ }
+
+ if (netdataShowAlarms) {
+ NETDATA.alarms.update_forever();
+
+ if ('Notification' in window) {
+ // console.log('notifications available');
+ NETDATA.alarms.notifications = true;
+
+ if (Notification.permission === 'default') {
+ Notification.requestPermission();
+ }
+ }
+ }
+ }
+};
diff --git a/web/gui/src/dashboard.js/boot.js b/web/gui/src/dashboard.js/boot.js
new file mode 100644
index 0000000..c448213
--- /dev/null
+++ b/web/gui/src/dashboard.js/boot.js
@@ -0,0 +1,142 @@
+
+// Load required JS libraries and CSS
+
+NETDATA.requiredJs = [
+ {
+ url: NETDATA.serverStatic + 'lib/bootstrap-3.3.7.min.js',
+ async: false,
+ isAlreadyLoaded: function () {
+ // check if bootstrap is loaded
+ if (typeof $().emulateTransitionEnd === 'function') {
+ return true;
+ } else {
+ return typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap;
+ }
+ }
+ },
+ {
+ url: NETDATA.serverStatic + 'lib/fontawesome-all-5.0.1.min.js',
+ async: true,
+ isAlreadyLoaded: function () {
+ return typeof netdataNoFontAwesome !== 'undefined' && netdataNoFontAwesome;
+ }
+ },
+ {
+ url: NETDATA.serverStatic + 'lib/perfect-scrollbar-0.6.15.min.js',
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ }
+];
+
+NETDATA.requiredCSS = [
+ {
+ url: NETDATA.themes.current.bootstrap_css,
+ isAlreadyLoaded: function () {
+ return typeof netdataNoBootstrap !== 'undefined' && netdataNoBootstrap;
+ }
+ },
+ {
+ url: NETDATA.themes.current.dashboard_css,
+ isAlreadyLoaded: function () {
+ return false;
+ }
+ }
+];
+
+NETDATA.loadedRequiredJs = 0;
+NETDATA.loadRequiredJs = function (index, callback) {
+ if (index >= NETDATA.requiredJs.length) {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ return;
+ }
+
+ if (NETDATA.requiredJs[index].isAlreadyLoaded()) {
+ NETDATA.loadedRequiredJs++;
+ NETDATA.loadRequiredJs(++index, callback);
+ return;
+ }
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.requiredJs[index].url);
+ }
+
+ let async = true;
+ if (typeof NETDATA.requiredJs[index].async !== 'undefined' && NETDATA.requiredJs[index].async === false) {
+ async = false;
+ }
+
+ $.ajax({
+ url: NETDATA.requiredJs[index].url,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loaded ' + NETDATA.requiredJs[index].url);
+ }
+ })
+ .fail(function () {
+ alert('Cannot load required JS library: ' + NETDATA.requiredJs[index].url);
+ })
+ .always(function () {
+ NETDATA.loadedRequiredJs++;
+
+ // if (async === false)
+ if (!async) {
+ NETDATA.loadRequiredJs(++index, callback);
+ }
+ });
+
+ // if (async === true)
+ if (async) {
+ NETDATA.loadRequiredJs(++index, callback);
+ }
+};
+
+NETDATA.loadRequiredCSS = function (index) {
+ if (index >= NETDATA.requiredCSS.length) {
+ return;
+ }
+
+ if (NETDATA.requiredCSS[index].isAlreadyLoaded()) {
+ NETDATA.loadRequiredCSS(++index);
+ return;
+ }
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.requiredCSS[index].url);
+ }
+
+ NETDATA._loadCSS(NETDATA.requiredCSS[index].url);
+ NETDATA.loadRequiredCSS(++index);
+};
+
+// Boot it!
+
+if (typeof netdataPrepCallback === 'function') {
+ netdataPrepCallback();
+}
+
+NETDATA.errorReset();
+NETDATA.loadRequiredCSS(0);
+
+NETDATA._loadjQuery(function () {
+ NETDATA.loadRequiredJs(0, function () {
+ if (typeof $().emulateTransitionEnd !== 'function') {
+ // bootstrap is not available
+ NETDATA.options.current.show_help = false;
+ }
+
+ if (typeof netdataDontStart === 'undefined' || !netdataDontStart) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('starting chart refresh thread');
+ }
+
+ NETDATA.start();
+ }
+ });
+});
diff --git a/web/gui/src/dashboard.js/chart-registry.js b/web/gui/src/dashboard.js/chart-registry.js
new file mode 100644
index 0000000..542f4e0
--- /dev/null
+++ b/web/gui/src/dashboard.js/chart-registry.js
@@ -0,0 +1,94 @@
+
+// *** src/dashboard.js/chart-registry.js
+
+// Chart Registry
+
+// When multiple charts need the same chart, we avoid downloading it
+// multiple times (and having it in browser memory multiple time)
+// by using this registry.
+
+// Every time we download a chart definition, we save it here with .add()
+// Then we try to get it back with .get(). If that fails, we download it.
+
+NETDATA.fixHost = function (host) {
+ while (host.slice(-1) === '/') {
+ host = host.substring(0, host.length - 1);
+ }
+
+ return host;
+};
+
+NETDATA.chartRegistry = {
+ charts: {},
+
+ globalReset: function () {
+ this.charts = {};
+ },
+
+ add: function (host, id, data) {
+ if (typeof this.charts[host] === 'undefined') {
+ this.charts[host] = {};
+ }
+
+ //console.log('added ' + host + '/' + id);
+ this.charts[host][id] = data;
+ },
+
+ get: function (host, id) {
+ if (typeof this.charts[host] === 'undefined') {
+ return null;
+ }
+
+ if (typeof this.charts[host][id] === 'undefined') {
+ return null;
+ }
+
+ //console.log('cached ' + host + '/' + id);
+ return this.charts[host][id];
+ },
+
+ downloadAll: function (host, callback) {
+ host = NETDATA.fixHost(host);
+
+ let self = this;
+
+ function got_data(h, data, callback) {
+ if (data !== null) {
+ self.charts[h] = data.charts;
+
+ // update the server timezone in our options
+ if (typeof data.timezone === 'string') {
+ NETDATA.options.server_timezone = data.timezone;
+ }
+ } else {
+ NETDATA.error(406, h + '/api/v1/charts');
+ }
+
+ if (typeof callback === 'function') {
+ callback(data);
+ }
+ }
+
+ if (netdataSnapshotData !== null) {
+ got_data(host, netdataSnapshotData.charts, callback);
+ } else {
+ $.ajax({
+ url: host + '/api/v1/charts',
+ async: true,
+ cache: false,
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/charts', data);
+ got_data(host, data, callback);
+ })
+ .fail(function () {
+ NETDATA.error(405, host + '/api/v1/charts');
+
+ if (typeof callback === 'function') {
+ callback(null);
+ }
+ });
+ }
+ }
+};
diff --git a/web/gui/src/dashboard.js/charting.js b/web/gui/src/dashboard.js/charting.js
new file mode 100644
index 0000000..1035ff0
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting.js
@@ -0,0 +1,492 @@
+
+// Charts Libraries Registration
+
+NETDATA.chartLibraries = {
+ "dygraph": {
+ initialize: NETDATA.dygraphInitialize,
+ create: NETDATA.dygraphChartCreate,
+ update: NETDATA.dygraphChartUpdate,
+ resize: function (state) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined' && typeof state.tmp.dygraph_instance.resize === 'function') {
+ state.tmp.dygraph_instance.resize();
+ }
+ },
+ setSelection: NETDATA.dygraphSetSelection,
+ clearSelection: NETDATA.dygraphClearSelection,
+ toolboxPanAndZoom: NETDATA.dygraphToolboxPanAndZoom,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ return 'ms' + '%7C' + 'flip' + (this.isLogScale(state) ? ('%7C' + 'abs') : '').toString();
+ },
+ legend: function (state) {
+ return (this.isSparkline(state) === false && NETDATA.dataAttributeBoolean(state.element, 'legend', true) === true) ? 'right-side' : null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return true;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ return (this.isSparkline(state) === false) ? 3 : 2;
+ },
+ isSparkline: function (state) {
+ if (typeof state.tmp.dygraph_sparkline === 'undefined') {
+ state.tmp.dygraph_sparkline = (this.theme(state) === 'sparkline');
+ }
+ return state.tmp.dygraph_sparkline;
+ },
+ isLogScale: function (state) {
+ if (typeof state.tmp.dygraph_logscale === 'undefined') {
+ state.tmp.dygraph_logscale = (this.theme(state) === 'logscale');
+ }
+ return state.tmp.dygraph_logscale;
+ },
+ theme: function (state) {
+ if (typeof state.tmp.dygraph_theme === 'undefined') {
+ state.tmp.dygraph_theme = NETDATA.dataAttribute(state.element, 'dygraph-theme', 'default');
+ }
+ return state.tmp.dygraph_theme;
+ },
+ container_class: function (state) {
+ if (this.legend(state) !== null) {
+ return 'netdata-container-with-legend';
+ }
+ return 'netdata-container';
+ }
+ },
+ "sparkline": {
+ initialize: NETDATA.sparklineInitialize,
+ create: NETDATA.sparklineChartCreate,
+ update: NETDATA.sparklineChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'flip' + '%7C' + 'abs';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "peity": {
+ initialize: NETDATA.peityInitialize,
+ create: NETDATA.peityChartCreate,
+ update: NETDATA.peityChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'ssvcomma';
+ },
+ options: function (state) {
+ void(state);
+ return 'null2zero' + '%7C' + 'flip' + '%7C' + 'abs';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ // "morris": {
+ // initialize: NETDATA.morrisInitialize,
+ // create: NETDATA.morrisChartCreate,
+ // update: NETDATA.morrisChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ // format: function(state) { void(state); return 'json'; },
+ // options: function(state) { void(state); return 'objectrows' + '%7C' + 'ms'; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 50; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 15; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ "google": {
+ initialize: NETDATA.googleInitialize,
+ create: NETDATA.googleChartCreate,
+ update: NETDATA.googleChartUpdate,
+ resize: null,
+ setSelection: undefined, //function(state, t) { void(state); return true; },
+ clearSelection: undefined, //function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.rows$'),
+ format: function (state) {
+ void(state);
+ return 'datatable';
+ },
+ options: function (state) {
+ void(state);
+ return '';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 300;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 4;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ // "raphael": {
+ // initialize: NETDATA.raphaelInitialize,
+ // create: NETDATA.raphaelChartCreate,
+ // update: NETDATA.raphaelChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ // format: function(state) { void(state); return 'json'; },
+ // options: function(state) { void(state); return ''; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 5000; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 3; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ // "c3": {
+ // initialize: NETDATA.c3Initialize,
+ // create: NETDATA.c3ChartCreate,
+ // update: NETDATA.c3ChartUpdate,
+ // resize: null,
+ // setSelection: undefined, // function(state, t) { void(state); return true; },
+ // clearSelection: undefined, // function(state) { void(state); return true; },
+ // toolboxPanAndZoom: null,
+ // initialized: false,
+ // enabled: true,
+ // xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ // format: function(state) { void(state); return 'csvjsonarray'; },
+ // options: function(state) { void(state); return 'milliseconds'; },
+ // legend: function(state) { void(state); return null; },
+ // autoresize: function(state) { void(state); return false; },
+ // max_updates_to_recreate: function(state) { void(state); return 5000; },
+ // track_colors: function(state) { void(state); return false; },
+ // pixels_per_point: function(state) { void(state); return 15; },
+ // container_class: function(state) { void(state); return 'netdata-container'; }
+ // },
+ "d3pie": {
+ initialize: NETDATA.d3pieInitialize,
+ create: NETDATA.d3pieChartCreate,
+ update: NETDATA.d3pieChartUpdate,
+ resize: null,
+ setSelection: NETDATA.d3pieSetSelection,
+ clearSelection: NETDATA.d3pieClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ void(state);
+ return 'objectrows' + '%7C' + 'ms';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 15;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "d3": {
+ initialize: NETDATA.d3Initialize,
+ create: NETDATA.d3ChartCreate,
+ update: NETDATA.d3ChartUpdate,
+ resize: null,
+ setSelection: undefined, // function(state, t) { void(state); return true; },
+ clearSelection: undefined, // function(state) { void(state); return true; },
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result.data$'),
+ format: function (state) {
+ void(state);
+ return 'json';
+ },
+ options: function (state) {
+ void(state);
+ return '';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ }
+ },
+ "easypiechart": {
+ initialize: NETDATA.easypiechartInitialize,
+ create: NETDATA.easypiechartChartCreate,
+ update: NETDATA.easypiechartChartUpdate,
+ resize: null,
+ setSelection: NETDATA.easypiechartSetSelection,
+ clearSelection: NETDATA.easypiechartClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ aspect_ratio: 100,
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container-easypiechart';
+ }
+ },
+ "gauge": {
+ initialize: NETDATA.gaugeInitialize,
+ create: NETDATA.gaugeChartCreate,
+ update: NETDATA.gaugeChartUpdate,
+ resize: null,
+ setSelection: NETDATA.gaugeSetSelection,
+ clearSelection: NETDATA.gaugeClearSelection,
+ toolboxPanAndZoom: null,
+ initialized: false,
+ enabled: true,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ track_colors: function (state) {
+ void(state);
+ return true;
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ aspect_ratio: 60,
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container-gauge';
+ }
+ },
+ "textonly": {
+ autoresize: function (state) {
+ void(state);
+ return false;
+ },
+ container_class: function (state) {
+ void(state);
+ return 'netdata-container';
+ },
+ create: NETDATA.textOnlyCreate,
+ enabled: true,
+ format: function (state) {
+ void(state);
+ return 'array';
+ },
+ initialized: true,
+ initialize: function (callback) {
+ callback();
+ },
+ legend: function (state) {
+ void(state);
+ return null;
+ },
+ max_updates_to_recreate: function (state) {
+ void(state);
+ return 5000;
+ },
+ options: function (state) {
+ void(state);
+ return 'absolute';
+ },
+ pixels_per_point: function (state) {
+ void(state);
+ return 3;
+ },
+ track_colors: function (state) {
+ void(state);
+ return false;
+ },
+ update: NETDATA.textOnlyUpdate,
+ xssRegexIgnore: new RegExp('^/api/v1/data\.result$'),
+ }
+};
+
+NETDATA.registerChartLibrary = function (library, url) {
+ if (NETDATA.options.debug.libraries) {
+ console.log("registering chart library: " + library);
+ }
+
+ NETDATA.chartLibraries[library].url = url;
+ NETDATA.chartLibraries[library].initialized = true;
+ NETDATA.chartLibraries[library].enabled = true;
+};
diff --git a/web/gui/src/dashboard.js/charting/_c3.js b/web/gui/src/dashboard.js/charting/_c3.js
new file mode 100644
index 0000000..6688bbc
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/_c3.js
@@ -0,0 +1,114 @@
+
+// DEPRECATED: will be removed!
+
+// c3
+
+NETDATA.c3Initialize = function(callback) {
+ if (typeof netdataNoC3 === 'undefined' || !netdataNoC3) {
+
+ // C3 requires D3
+ if (!NETDATA.chartLibraries.d3.initialized) {
+ if (NETDATA.chartLibraries.d3.enabled) {
+ NETDATA.d3Initialize(function() {
+ NETDATA.c3Initialize(callback);
+ });
+ } else {
+ NETDATA.chartLibraries.c3.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+ } else {
+ NETDATA._loadCSS(NETDATA.c3_css);
+
+ $.ajax({
+ url: NETDATA.c3_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .done(function() {
+ NETDATA.registerChartLibrary('c3', NETDATA.c3_js);
+ })
+ .fail(function() {
+ NETDATA.chartLibraries.c3.enabled = false;
+ NETDATA.error(100, NETDATA.c3_js);
+ })
+ .always(function() {
+ if (typeof callback === "function")
+ return callback();
+ });
+ }
+ } else {
+ NETDATA.chartLibraries.c3.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+};
+
+NETDATA.c3ChartUpdate = function(state, data) {
+ state.c3_instance.destroy();
+ return NETDATA.c3ChartCreate(state, data);
+
+ //state.c3_instance.load({
+ // rows: data.result,
+ // unload: true
+ //});
+
+ //return true;
+};
+
+NETDATA.c3ChartCreate = function(state, data) {
+
+ state.element_chart.id = 'c3-' + state.uuid;
+ // console.log('id = ' + state.element_chart.id);
+
+ state.c3_instance = c3.generate({
+ bindto: '#' + state.element_chart.id,
+ size: {
+ width: state.chartWidth(),
+ height: state.chartHeight()
+ },
+ color: {
+ pattern: state.chartColors()
+ },
+ data: {
+ x: 'time',
+ rows: data.result,
+ type: (state.chart.chart_type === 'line')?'spline':'area-spline'
+ },
+ axis: {
+ x: {
+ type: 'timeseries',
+ tick: {
+ format: function(x) {
+ return NETDATA.dateTime.xAxisTimeString(x);
+ }
+ }
+ }
+ },
+ grid: {
+ x: {
+ show: true
+ },
+ y: {
+ show: true
+ }
+ },
+ point: {
+ show: false
+ },
+ line: {
+ connectNull: false
+ },
+ transition: {
+ duration: 0
+ },
+ interaction: {
+ enabled: true
+ }
+ });
+
+ // console.log(state.c3_instance);
+
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/_morris.js b/web/gui/src/dashboard.js/charting/_morris.js
new file mode 100644
index 0000000..30789e4
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/_morris.js
@@ -0,0 +1,81 @@
+
+// DEPRECATED: will be removed!
+
+// morris
+
+NETDATA.morrisInitialize = function(callback) {
+ if (typeof netdataNoMorris === 'undefined' || !netdataNoMorris) {
+
+ // morris requires raphael
+ if (!NETDATA.chartLibraries.raphael.initialized) {
+ if (NETDATA.chartLibraries.raphael.enabled) {
+ NETDATA.raphaelInitialize(function() {
+ NETDATA.morrisInitialize(callback);
+ });
+ } else {
+ NETDATA.chartLibraries.morris.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+ } else {
+ NETDATA._loadCSS(NETDATA.morris_css);
+
+ $.ajax({
+ url: NETDATA.morris_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .done(function() {
+ NETDATA.registerChartLibrary('morris', NETDATA.morris_js);
+ })
+ .fail(function() {
+ NETDATA.chartLibraries.morris.enabled = false;
+ NETDATA.error(100, NETDATA.morris_js);
+ })
+ .always(function() {
+ if (typeof callback === "function")
+ return callback();
+ });
+ }
+ } else {
+ NETDATA.chartLibraries.morris.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+};
+
+NETDATA.morrisChartUpdate = function(state, data) {
+ state.morris_instance.setData(data.result.data);
+ return true;
+};
+
+NETDATA.morrisChartCreate = function(state, data) {
+
+ state.morris_options = {
+ element: state.element_chart.id,
+ data: data.result.data,
+ xkey: 'time',
+ ykeys: data.dimension_names,
+ labels: data.dimension_names,
+ lineWidth: 2,
+ pointSize: 3,
+ smooth: true,
+ hideHover: 'auto',
+ parseTime: true,
+ continuousLine: false,
+ behaveLikeLine: false
+ };
+
+ if (state.chart.chart_type === 'line')
+ state.morris_instance = new Morris.Line(state.morris_options);
+
+ else if (state.chart.chart_type === 'area') {
+ state.morris_options.behaveLikeLine = true;
+ state.morris_instance = new Morris.Area(state.morris_options);
+ }
+ else // stacked
+ state.morris_instance = new Morris.Area(state.morris_options);
+
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/_raphael.js b/web/gui/src/dashboard.js/charting/_raphael.js
new file mode 100644
index 0000000..2d89a22
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/_raphael.js
@@ -0,0 +1,48 @@
+
+// DEPRECATED: will be removed!
+
+// raphael
+
+NETDATA.raphaelInitialize = function(callback) {
+ if (typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) {
+ $.ajax({
+ url: NETDATA.raphael_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .done(function() {
+ NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js);
+ })
+ .fail(function() {
+ NETDATA.chartLibraries.raphael.enabled = false;
+ NETDATA.error(100, NETDATA.raphael_js);
+ })
+ .always(function() {
+ if (typeof callback === "function")
+ return callback();
+ });
+ } else {
+ NETDATA.chartLibraries.raphael.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+};
+
+NETDATA.raphaelChartUpdate = function(state, data) {
+ $(state.element_chart).raphael(data.result, {
+ width: state.chartWidth(),
+ height: state.chartHeight()
+ });
+
+ return false;
+};
+
+NETDATA.raphaelChartCreate = function(state, data) {
+ $(state.element_chart).raphael(data.result, {
+ width: state.chartWidth(),
+ height: state.chartHeight()
+ });
+
+ return false;
+};
diff --git a/web/gui/src/dashboard.js/charting/d3.js b/web/gui/src/dashboard.js/charting/d3.js
new file mode 100644
index 0000000..6528208
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/d3.js
@@ -0,0 +1,43 @@
+
+// ----------------------------------------------------------------------------------------------------------------
+// D3
+
+NETDATA.d3Initialize = function(callback) {
+ if (typeof netdataStopD3 === 'undefined' || !netdataStopD3) {
+ $.ajax({
+ url: NETDATA.d3_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: { withCredentials: true } // required for the cookie
+ })
+ .done(function() {
+ NETDATA.registerChartLibrary('d3', NETDATA.d3_js);
+ })
+ .fail(function() {
+ NETDATA.chartLibraries.d3.enabled = false;
+ NETDATA.error(100, NETDATA.d3_js);
+ })
+ .always(function() {
+ if (typeof callback === "function")
+ return callback();
+ });
+ } else {
+ NETDATA.chartLibraries.d3.enabled = false;
+ if (typeof callback === "function")
+ return callback();
+ }
+};
+
+NETDATA.d3ChartUpdate = function(state, data) {
+ void(state);
+ void(data);
+
+ return false;
+};
+
+NETDATA.d3ChartCreate = function(state, data) {
+ void(state);
+ void(data);
+
+ return false;
+};
diff --git a/web/gui/src/dashboard.js/charting/d3pie.js b/web/gui/src/dashboard.js/charting/d3pie.js
new file mode 100644
index 0000000..27cff85
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/d3pie.js
@@ -0,0 +1,341 @@
+
+// d3pie
+
+NETDATA.d3pieInitialize = function (callback) {
+ if (typeof netdataNoD3pie === 'undefined' || !netdataNoD3pie) {
+
+ // d3pie requires D3
+ if (!NETDATA.chartLibraries.d3.initialized) {
+ if (NETDATA.chartLibraries.d3.enabled) {
+ NETDATA.d3Initialize(function () {
+ NETDATA.d3pieInitialize(callback);
+ });
+ } else {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+ } else {
+ $.ajax({
+ url: NETDATA.d3pie_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('d3pie', NETDATA.d3pie_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ NETDATA.error(100, NETDATA.d3pie_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ }
+ } else {
+ NETDATA.chartLibraries.d3pie.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.d3pieSetContent = function (state, data, index) {
+ state.legendFormatValueDecimalsFromMinMax(
+ data.min,
+ data.max
+ );
+
+ let content = [];
+ let colors = state.chartColors();
+ let len = data.result.labels.length;
+ for (let i = 1; i < len; i++) {
+ let label = data.result.labels[i];
+ let value = data.result.data[index][label];
+ let color = colors[i - 1];
+
+ if (value !== null && value > 0) {
+ content.push({
+ label: label,
+ value: value,
+ color: color
+ });
+ }
+ }
+
+ if (content.length === 0) {
+ content.push({
+ label: 'no data',
+ value: 100,
+ color: '#666666'
+ });
+ }
+
+ state.tmp.d3pie_last_slot = index;
+ return content;
+};
+
+NETDATA.d3pieDateRange = function (state, data, index) {
+ let dt = Math.round((data.before - data.after + 1) / data.points);
+ let dt_str = NETDATA.seconds4human(dt);
+
+ let before = data.result.data[index].time;
+ let after = before - (dt * 1000);
+
+ let d1 = NETDATA.dateTime.localeDateString(after);
+ let t1 = NETDATA.dateTime.localeTimeString(after);
+ let d2 = NETDATA.dateTime.localeDateString(before);
+ let t2 = NETDATA.dateTime.localeTimeString(before);
+
+ if (d1 === d2) {
+ return d1 + ' ' + t1 + ' to ' + t2 + ', ' + dt_str;
+ }
+
+ return d1 + ' ' + t1 + ' to ' + d2 + ' ' + t2 + ', ' + dt_str;
+};
+
+NETDATA.d3pieSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.d3pieClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ slot = state.data.result.data.length - slot - 1;
+
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.d3pieClearSelection(state, true);
+ }
+
+ if (state.tmp.d3pie_last_slot === slot) {
+ // we already show this slot, don't do anything
+ return true;
+ }
+
+ if (state.tmp.d3pie_timer === undefined) {
+ state.tmp.d3pie_timer = NETDATA.timeout.set(function () {
+ state.tmp.d3pie_timer = undefined;
+ NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, state.data, slot), NETDATA.d3pieDateRange(state, state.data, slot));
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.d3pieClearSelection = function (state, force) {
+ if (typeof state.tmp.d3pie_timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.d3pie_timer);
+ state.tmp.d3pie_timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.d3pieChartUpdate(state, state.data);
+ } else {
+ if (state.tmp.d3pie_last_slot !== -1) {
+ state.tmp.d3pie_last_slot = -1;
+ NETDATA.d3pieChange(state, [{label: 'no data', value: 1, color: '#666666'}], 'no data available');
+ }
+ }
+
+ return true;
+};
+
+NETDATA.d3pieChange = function (state, content, footer) {
+ if (state.d3pie_forced_subtitle === null) {
+ //state.d3pie_instance.updateProp("header.subtitle.text", state.units_current);
+ state.d3pie_instance.options.header.subtitle.text = state.units_current;
+ }
+
+ if (state.d3pie_forced_footer === null) {
+ //state.d3pie_instance.updateProp("footer.text", footer);
+ state.d3pie_instance.options.footer.text = footer;
+ }
+
+ //state.d3pie_instance.updateProp("data.content", content);
+ state.d3pie_instance.options.data.content = content;
+ state.d3pie_instance.destroy();
+ state.d3pie_instance.recreate();
+ return true;
+};
+
+NETDATA.d3pieChartUpdate = function (state, data) {
+ return NETDATA.d3pieChange(state, NETDATA.d3pieSetContent(state, data, 0), NETDATA.d3pieDateRange(state, data, 0));
+};
+
+NETDATA.d3pieChartCreate = function (state, data) {
+
+ state.element_chart.id = 'd3pie-' + state.uuid;
+ // console.log('id = ' + state.element_chart.id);
+
+ let content = NETDATA.d3pieSetContent(state, data, 0);
+
+ state.d3pie_forced_title = NETDATA.dataAttribute(state.element, 'd3pie-title', null);
+ state.d3pie_forced_subtitle = NETDATA.dataAttribute(state.element, 'd3pie-subtitle', null);
+ state.d3pie_forced_footer = NETDATA.dataAttribute(state.element, 'd3pie-footer', null);
+
+ state.d3pie_options = {
+ header: {
+ title: {
+ text: (state.d3pie_forced_title !== null) ? state.d3pie_forced_title : state.title,
+ color: NETDATA.dataAttribute(state.element, 'd3pie-title-color', NETDATA.themes.current.d3pie.title),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-title-fontsize', 12),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-title-fontweight', "bold"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-title-font', "arial")
+ },
+ subtitle: {
+ text: (state.d3pie_forced_subtitle !== null) ? state.d3pie_forced_subtitle : state.units_current,
+ color: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-color', NETDATA.themes.current.d3pie.subtitle),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-fontweight', "normal"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-subtitle-font', "arial")
+ },
+ titleSubtitlePadding: 1
+ },
+ footer: {
+ text: (state.d3pie_forced_footer !== null) ? state.d3pie_forced_footer : NETDATA.d3pieDateRange(state, data, 0),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-footer-color', NETDATA.themes.current.d3pie.footer),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontsize', 9),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-footer-fontweight', "bold"),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-footer-font', "arial"),
+ location: NETDATA.dataAttribute(state.element, 'd3pie-footer-location', "bottom-center") // bottom-left, bottom-center, bottom-right
+ },
+ size: {
+ canvasHeight: state.chartHeight(),
+ canvasWidth: state.chartWidth(),
+ pieInnerRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieinnerradius', "45%"),
+ pieOuterRadius: NETDATA.dataAttribute(state.element, 'd3pie-pieouterradius', "80%")
+ },
+ data: {
+ // none, random, value-asc, value-desc, label-asc, label-desc
+ sortOrder: NETDATA.dataAttribute(state.element, 'd3pie-sortorder', "value-desc"),
+ smallSegmentGrouping: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, "d3pie-smallsegmentgrouping-enabled", false),
+ value: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-value', 1),
+ // percentage, value
+ valueType: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-valuetype', "percentage"),
+ label: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-label', "other"),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-smallsegmentgrouping-color', NETDATA.themes.current.d3pie.other)
+ },
+
+ // REQUIRED! This is where you enter your pie data; it needs to be an array of objects
+ // of this form: { label: "label", value: 1.5, color: "#000000" } - color is optional
+ content: content
+ },
+ labels: {
+ outer: {
+ // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2
+ format: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-format', "label-value1"),
+ hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-hidewhenlessthanpercentage', null),
+ pieDistance: NETDATA.dataAttribute(state.element, 'd3pie-labels-outer-piedistance', 15)
+ },
+ inner: {
+ // label, value, percentage, label-value1, label-value2, label-percentage1, label-percentage2
+ format: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-format', "percentage"),
+ hideWhenLessThanPercentage: NETDATA.dataAttribute(state.element, 'd3pie-labels-inner-hidewhenlessthanpercentage', 2)
+ },
+ mainLabel: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-color', NETDATA.themes.current.d3pie.mainlabel), // or 'segment' for dynamic color
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-mainLabel-fontweight', "normal")
+ },
+ percentage: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-color', NETDATA.themes.current.d3pie.percentage),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-percentage-fontweight', "bold"),
+ decimalPlaces: 0
+ },
+ value: {
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-color', NETDATA.themes.current.d3pie.value),
+ font: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-font', "arial"),
+ fontSize: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontsize', 10),
+ fontWeight: NETDATA.dataAttribute(state.element, 'd3pie-labels-value-fontweight', "bold")
+ },
+ lines: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-lines-enabled', true),
+ style: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-style', "curved"),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-labels-lines-color', "segment") // "segment" or a hex color
+ },
+ truncation: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-labels-truncation-enabled', false),
+ truncateLength: NETDATA.dataAttribute(state.element, 'd3pie-labels-truncation-truncatelength', 30)
+ },
+ formatter: function (context) {
+ // console.log(context);
+ if (context.part === 'value') {
+ return state.legendFormatValue(context.value);
+ }
+ if (context.part === 'percentage') {
+ return context.label + '%';
+ }
+
+ return context.label;
+ }
+ },
+ effects: {
+ load: {
+ effect: "none", // none / default
+ speed: 0 // commented in the d3pie code to speed it up
+ },
+ pullOutSegmentOnClick: {
+ effect: "bounce", // none / linear / bounce / elastic / back
+ speed: 400,
+ size: 5
+ },
+ highlightSegmentOnMouseover: true,
+ highlightLuminosity: -0.2
+ },
+ tooltips: {
+ enabled: false,
+ type: "placeholder", // caption|placeholder
+ string: "",
+ placeholderParser: null, // function
+ styles: {
+ fadeInSpeed: 250,
+ backgroundColor: NETDATA.themes.current.d3pie.tooltip_bg,
+ backgroundOpacity: 0.5,
+ color: NETDATA.themes.current.d3pie.tooltip_fg,
+ borderRadius: 2,
+ font: "arial",
+ fontSize: 12,
+ padding: 4
+ }
+ },
+ misc: {
+ colors: {
+ background: 'transparent', // transparent or color #
+ // segments: state.chartColors(),
+ segmentStroke: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-segmentstroke', NETDATA.themes.current.d3pie.segment_stroke)
+ },
+ gradient: {
+ enabled: NETDATA.dataAttributeBoolean(state.element, 'd3pie-misc-gradient-enabled', false),
+ percentage: NETDATA.dataAttribute(state.element, 'd3pie-misc-colors-percentage', 95),
+ color: NETDATA.dataAttribute(state.element, 'd3pie-misc-gradient-color', NETDATA.themes.current.d3pie.gradient_color)
+ },
+ canvasPadding: {
+ top: 5,
+ right: 5,
+ bottom: 5,
+ left: 5
+ },
+ pieCenterOffset: {
+ x: 0,
+ y: 0
+ },
+ cssPrefix: NETDATA.dataAttribute(state.element, 'd3pie-cssprefix', null)
+ },
+ callbacks: {
+ onload: null,
+ onMouseoverSegment: null,
+ onMouseoutSegment: null,
+ onClickSegment: null
+ }
+ };
+
+ state.d3pie_instance = new d3pie(state.element_chart, state.d3pie_options);
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/dygraph.js b/web/gui/src/dashboard.js/charting/dygraph.js
new file mode 100644
index 0000000..4b44e7c
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/dygraph.js
@@ -0,0 +1,1065 @@
+// dygraph
+
+// Codacy declarations
+/* global smoothPlotter */
+/* global Dygraph */
+
+NETDATA.dygraph = {
+ smooth: false
+};
+
+NETDATA.dygraphToolboxPanAndZoom = function (state, after, before) {
+ if (after < state.netdata_first) {
+ after = state.netdata_first;
+ }
+
+ if (before > state.netdata_last) {
+ before = state.netdata_last;
+ }
+
+ state.setMode('zoom');
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
+ // state.log('toolboxPanAndZoom');
+ state.updateChartPanOrZoom(after, before);
+ NETDATA.globalPanAndZoom.setMaster(state, after, before);
+};
+
+NETDATA.dygraphSetSelection = function (state, t) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined') {
+ let r = state.calculateRowForTime(t);
+ if (r !== -1) {
+ state.tmp.dygraph_instance.setSelection(r);
+ return true;
+ } else {
+ state.tmp.dygraph_instance.clearSelection();
+ state.legendShowUndefined();
+ }
+ }
+
+ return false;
+};
+
+NETDATA.dygraphClearSelection = function (state) {
+ if (typeof state.tmp.dygraph_instance !== 'undefined') {
+ state.tmp.dygraph_instance.clearSelection();
+ }
+ return true;
+};
+
+NETDATA.dygraphSmoothInitialize = function (callback) {
+ $.ajax({
+ url: NETDATA.dygraph_smooth_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.dygraph.smooth = true;
+ smoothPlotter.smoothing = 0.3;
+ })
+ .fail(function () {
+ NETDATA.dygraph.smooth = false;
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+};
+
+NETDATA.dygraphInitialize = function (callback) {
+ if (typeof netdataNoDygraphs === 'undefined' || !netdataNoDygraphs) {
+ $.ajax({
+ url: NETDATA.dygraph_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('dygraph', NETDATA.dygraph_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.dygraph.enabled = false;
+ NETDATA.error(100, NETDATA.dygraph_js);
+ })
+ .always(function () {
+ if (NETDATA.chartLibraries.dygraph.enabled && NETDATA.options.current.smooth_plot) {
+ NETDATA.dygraphSmoothInitialize(callback);
+ } else if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.dygraph.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.dygraphChartUpdate = function (state, data) {
+ let dygraph = state.tmp.dygraph_instance;
+
+ if (typeof dygraph === 'undefined') {
+ return NETDATA.dygraphChartCreate(state, data);
+ }
+
+ // when the chart is not visible, and hidden
+ // if there is a window resize, dygraph detects
+ // its element size as 0x0.
+ // this will make it re-appear properly
+
+ if (state.tm.last_unhidden > state.tmp.dygraph_last_rendered) {
+ dygraph.resize();
+ }
+
+ let options = {
+ file: data.result.data,
+ colors: state.chartColors(),
+ labels: data.result.labels,
+ //labelsDivWidth: state.chartWidth() - 70,
+ includeZero: state.tmp.dygraph_include_zero,
+ visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names)
+ };
+
+ if (state.tmp.dygraph_chart_type === 'stacked') {
+ if (options.includeZero && state.dimensions_visibility.countSelected() < options.visibility.length) {
+ options.includeZero = 0;
+ }
+ }
+
+ if (!NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ options.ylabel = state.units_current; // (state.units_desired === 'auto')?"":state.units_current;
+ }
+
+ if (state.tmp.dygraph_force_zoom) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() forced zoom update');
+ }
+
+ options.dateWindow = (state.requested_padding !== null) ? [state.view_after, state.view_before] : null;
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ state.tmp.dygraph_force_zoom = false;
+ } else if (state.current.name !== 'auto') {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() loose update');
+ }
+ } else {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartUpdate() strict update');
+ }
+
+ options.dateWindow = (state.requested_padding !== null) ? [state.view_after, state.view_before] : null;
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ options.valueRange = state.tmp.dygraph_options.valueRange;
+
+ let oldMax = null, oldMin = null;
+ if (state.tmp.__commonMin !== null) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
+ oldMin = options.valueRange[0] = NETDATA.commonMin.get(state);
+ }
+ if (state.tmp.__commonMax !== null) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
+ oldMax = options.valueRange[1] = NETDATA.commonMax.get(state);
+ }
+
+ if (state.tmp.dygraph_smooth_eligible) {
+ if ((NETDATA.options.current.smooth_plot && state.tmp.dygraph_options.plotter !== smoothPlotter)
+ || (NETDATA.options.current.smooth_plot === false && state.tmp.dygraph_options.plotter === smoothPlotter)) {
+ NETDATA.dygraphChartCreate(state, data);
+ return;
+ }
+ }
+
+ if (netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() && NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // pan and zoom on snapshots
+ options.dateWindow = [NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms];
+ //options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ if (NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ if (Array.isArray(options.valueRange) && options.valueRange[0] <= 0) {
+ options.valueRange[0] = null;
+ }
+ }
+
+ dygraph.updateOptions(options);
+
+ let redraw = false;
+ if (oldMin !== null && oldMin > state.tmp.dygraph_instance.axes_[0].extremeRange[0]) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
+ options.valueRange[0] = NETDATA.commonMin.get(state);
+ redraw = true;
+ }
+ if (oldMax !== null && oldMax < state.tmp.dygraph_instance.axes_[0].extremeRange[1]) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
+ options.valueRange[1] = NETDATA.commonMax.get(state);
+ redraw = true;
+ }
+
+ if (redraw) {
+ // state.log('forcing redraw to adapt to common- min/max');
+ dygraph.updateOptions(options);
+ }
+
+ state.tmp.dygraph_last_rendered = Date.now();
+ return true;
+};
+
+NETDATA.dygraphChartCreate = function (state, data) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphChartCreate()');
+ }
+
+ state.tmp.dygraph_chart_type = NETDATA.dataAttribute(state.element, 'dygraph-type', state.chart.chart_type);
+ if (state.tmp.dygraph_chart_type === 'stacked' && data.dimensions === 1) {
+ state.tmp.dygraph_chart_type = 'area';
+ }
+ if (state.tmp.dygraph_chart_type === 'stacked' && NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ state.tmp.dygraph_chart_type = 'area';
+ }
+
+ let highlightCircleSize = NETDATA.chartLibraries.dygraph.isSparkline(state) ? 3 : 4;
+
+ let smooth = NETDATA.dygraph.smooth
+ ? (NETDATA.dataAttributeBoolean(state.element, 'dygraph-smooth', (state.tmp.dygraph_chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false)))
+ : false;
+
+ state.tmp.dygraph_include_zero = NETDATA.dataAttribute(state.element, 'dygraph-includezero', (state.tmp.dygraph_chart_type === 'stacked'));
+ let drawAxis = NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawaxis', true);
+
+ state.tmp.dygraph_options = {
+ colors: NETDATA.dataAttribute(state.element, 'dygraph-colors', state.chartColors()),
+
+ // leave a few pixels empty on the right of the chart
+ rightGap: NETDATA.dataAttribute(state.element, 'dygraph-rightgap', 5),
+ showRangeSelector: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showrangeselector', false),
+ showRoller: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showroller', false),
+ title: NETDATA.dataAttribute(state.element, 'dygraph-title', state.title),
+ titleHeight: NETDATA.dataAttribute(state.element, 'dygraph-titleheight', 19),
+ legend: NETDATA.dataAttribute(state.element, 'dygraph-legend', 'always'), // we need this to get selection events
+ labels: data.result.labels,
+ labelsDiv: NETDATA.dataAttribute(state.element, 'dygraph-labelsdiv', state.element_legend_childs.hidden),
+ //labelsDivStyles: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivstyles', { 'fontSize':'1px' }),
+ //labelsDivWidth: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivwidth', state.chartWidth() - 70),
+ labelsSeparateLines: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsseparatelines', true),
+ labelsShowZeroValues: NETDATA.chartLibraries.dygraph.isLogScale(state) ? false : NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsshowzerovalues', true),
+ labelsKMB: false,
+ labelsKMG2: false,
+ showLabelsOnHighlight: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showlabelsonhighlight', true),
+ hideOverlayOnMouseOut: NETDATA.dataAttributeBoolean(state.element, 'dygraph-hideoverlayonmouseout', true),
+ includeZero: state.tmp.dygraph_include_zero,
+ xRangePad: NETDATA.dataAttribute(state.element, 'dygraph-xrangepad', 0),
+ yRangePad: NETDATA.dataAttribute(state.element, 'dygraph-yrangepad', 1),
+ valueRange: NETDATA.dataAttribute(state.element, 'dygraph-valuerange', [null, null]),
+ ylabel: state.units_current, // (state.units_desired === 'auto')?"":state.units_current,
+ yLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-ylabelwidth', 12),
+
+ // the function to plot the chart
+ plotter: null,
+
+ // The width of the lines connecting data points.
+ // This can be used to increase the contrast or some graphs.
+ strokeWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokewidth', ((state.tmp.dygraph_chart_type === 'stacked') ? 0.1 : ((smooth === true) ? 1.5 : 0.7))),
+ strokePattern: NETDATA.dataAttribute(state.element, 'dygraph-strokepattern', undefined),
+
+ // The size of the dot to draw on each point in pixels (see drawPoints).
+ // A dot is always drawn when a point is "isolated",
+ // i.e. there is a missing point on either side of it.
+ // This also controls the size of those dots.
+ drawPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawpoints', false),
+
+ // Draw points at the edges of gaps in the data.
+ // This improves visibility of small data segments or other data irregularities.
+ drawGapEdgePoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgapedgepoints', true),
+ connectSeparatedPoints: NETDATA.chartLibraries.dygraph.isLogScale(state) ? false : NETDATA.dataAttributeBoolean(state.element, 'dygraph-connectseparatedpoints', false),
+ pointSize: NETDATA.dataAttribute(state.element, 'dygraph-pointsize', 1),
+
+ // enabling this makes the chart with little square lines
+ stepPlot: NETDATA.dataAttributeBoolean(state.element, 'dygraph-stepplot', false),
+
+ // Draw a border around graph lines to make crossing lines more easily
+ // distinguishable. Useful for graphs with many lines.
+ strokeBorderColor: NETDATA.dataAttribute(state.element, 'dygraph-strokebordercolor', NETDATA.themes.current.background),
+ strokeBorderWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokeborderwidth', (state.tmp.dygraph_chart_type === 'stacked') ? 0.0 : 0.0),
+ fillGraph: NETDATA.dataAttribute(state.element, 'dygraph-fillgraph', (state.tmp.dygraph_chart_type === 'area' || state.tmp.dygraph_chart_type === 'stacked')),
+ fillAlpha: NETDATA.dataAttribute(state.element, 'dygraph-fillalpha',
+ ((state.tmp.dygraph_chart_type === 'stacked')
+ ? NETDATA.options.current.color_fill_opacity_stacked
+ : NETDATA.options.current.color_fill_opacity_area)
+ ),
+ stackedGraph: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraph', (state.tmp.dygraph_chart_type === 'stacked')),
+ stackedGraphNaNFill: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraphnanfill', 'none'),
+ drawAxis: drawAxis,
+ axisLabelFontSize: NETDATA.dataAttribute(state.element, 'dygraph-axislabelfontsize', 10),
+ axisLineColor: NETDATA.dataAttribute(state.element, 'dygraph-axislinecolor', NETDATA.themes.current.axis),
+ axisLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-axislinewidth', 1.0),
+ drawGrid: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgrid', true),
+ gridLinePattern: NETDATA.dataAttribute(state.element, 'dygraph-gridlinepattern', null),
+ gridLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-gridlinewidth', 1.0),
+ gridLineColor: NETDATA.dataAttribute(state.element, 'dygraph-gridlinecolor', NETDATA.themes.current.grid),
+ maxNumberWidth: NETDATA.dataAttribute(state.element, 'dygraph-maxnumberwidth', 8),
+ sigFigs: NETDATA.dataAttribute(state.element, 'dygraph-sigfigs', null),
+ digitsAfterDecimal: NETDATA.dataAttribute(state.element, 'dygraph-digitsafterdecimal', 2),
+ valueFormatter: NETDATA.dataAttribute(state.element, 'dygraph-valueformatter', undefined),
+ highlightCircleSize: NETDATA.dataAttribute(state.element, 'dygraph-highlightcirclesize', highlightCircleSize),
+ highlightSeriesOpts: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesopts', null), // TOO SLOW: { strokeWidth: 1.5 },
+ highlightSeriesBackgroundAlpha: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesbackgroundalpha', null), // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5,
+ pointClickCallback: NETDATA.dataAttribute(state.element, 'dygraph-pointclickcallback', undefined),
+ visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names),
+ logscale: NETDATA.chartLibraries.dygraph.isLogScale(state) ? 'y' : undefined,
+
+ // Expects a string in the format "<series name>: <style>" where each series is separated by a |
+ perSeriesStyle: NETDATA.dataAttribute(state.element, 'dygraph-per-series-style', ''),
+
+ axes: {
+ x: {
+ pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-xpixelsperlabel', 50),
+ ticker: Dygraph.dateTicker,
+ axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-xaxislabelwidth', 60),
+ drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawxaxis', drawAxis),
+ axisLabelFormatter: function (d, gran) {
+ void(gran);
+ return NETDATA.dateTime.xAxisTimeString(d);
+ }
+ },
+ y: {
+ logscale: NETDATA.chartLibraries.dygraph.isLogScale(state) ? true : undefined,
+ pixelsPerLabel: NETDATA.dataAttribute(state.element, 'dygraph-ypixelsperlabel', 15),
+ axisLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-yaxislabelwidth', 50),
+ drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawyaxis', drawAxis),
+ axisLabelFormatter: function (y) {
+
+ // unfortunately, we have to call this every single time
+ state.legendFormatValueDecimalsFromMinMax(
+ this.axes_[0].extremeRange[0],
+ this.axes_[0].extremeRange[1]
+ );
+
+ let old_units = this.user_attrs_.ylabel;
+ let v = state.legendFormatValue(y);
+ let new_units = state.units_current;
+
+ if (state.units_desired === 'auto' && typeof old_units !== 'undefined' && new_units !== old_units && !NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ // console.log(this);
+ // state.log('units discrepancy: old = ' + old_units + ', new = ' + new_units);
+ let len = this.plugins_.length;
+ while (len--) {
+ // console.log(this.plugins_[len]);
+ if (typeof this.plugins_[len].plugin.ylabel_div_ !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_ !== null
+ && typeof this.plugins_[len].plugin.ylabel_div_.children !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_.children !== null
+ && typeof this.plugins_[len].plugin.ylabel_div_.children[0].children !== 'undefined'
+ && this.plugins_[len].plugin.ylabel_div_.children[0].children !== null
+ ) {
+ this.plugins_[len].plugin.ylabel_div_.children[0].children[0].innerHTML = new_units;
+ this.user_attrs_.ylabel = new_units;
+ break;
+ }
+ }
+
+ if (len < 0) {
+ state.log('units discrepancy, but cannot find dygraphs div to change: old = ' + old_units + ', new = ' + new_units);
+ }
+ }
+
+ return v;
+ }
+ }
+ },
+ legendFormatter: function (data) {
+ if (state.tmp.dygraph_mouse_down) {
+ return;
+ }
+
+ let elements = state.element_legend_childs;
+
+ // if the hidden div is not there
+ // we are not managing the legend
+ if (elements.hidden === null) {
+ return;
+ }
+
+ if (typeof data.x !== 'undefined') {
+ state.legendSetDate(data.x);
+ let i = data.series.length;
+ while (i--) {
+ let series = data.series[i];
+ if (series.isVisible) {
+ state.legendSetLabelValue(series.label, series.y);
+ } else {
+ state.legendSetLabelValue(series.label, null);
+ }
+ }
+ }
+
+ return '';
+ },
+ drawCallback: function (dygraph, is_initial) {
+
+ // the user has panned the chart and this is called to re-draw the chart
+ // 1. refresh this chart by adding data to it
+ // 2. notify all the other charts about the update they need
+
+ // to prevent an infinite loop (feedback), we use
+ // state.tmp.dygraph_user_action
+ // - when true, this is initiated by a user
+ // - when false, this is feedback
+
+ if (state.current.name !== 'auto' && state.tmp.dygraph_user_action) {
+ state.tmp.dygraph_user_action = false;
+
+ let x_range = dygraph.xAxisRange();
+ let after = Math.round(x_range[0]);
+ let before = Math.round(x_range[1]);
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphDrawCallback(dygraph, ' + is_initial + '): mode ' + state.current.name + ' ' + (after / 1000).toString() + ' - ' + (before / 1000).toString());
+ //console.log(state);
+ }
+
+ if (before <= state.netdata_last && after >= state.netdata_first) {
+ // update only when we are within the data limits
+ state.updateChartPanOrZoom(after, before);
+ }
+ }
+ },
+ zoomCallback: function (minDate, maxDate, yRanges) {
+
+ // the user has selected a range on the chart
+ // 1. refresh this chart by adding data to it
+ // 2. notify all the other charts about the update they need
+
+ void(yRanges);
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphZoomCallback(): ' + state.current.name);
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+ state.setMode('zoom');
+
+ // refresh it to the greatest possible zoom level
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
+ state.updateChartPanOrZoom(minDate, maxDate);
+ },
+ highlightCallback: function (event, x, points, row, seriesName) {
+ void(seriesName);
+
+ state.pauseChart();
+
+ // there is a bug in dygraph when the chart is zoomed enough
+ // the time it thinks is selected is wrong
+ // here we calculate the time t based on the row number selected
+ // which is ok
+ // let t = state.data_after + row * state.data_update_every;
+ // console.log('row = ' + row + ', x = ' + x + ', t = ' + t + ' ' + ((t === x)?'SAME':(Math.abs(x-t)<=state.data_update_every)?'SIMILAR':'DIFFERENT') + ', rows in db: ' + state.data_points + ' visible(x) = ' + state.timeIsVisible(x) + ' visible(t) = ' + state.timeIsVisible(t) + ' r(x) = ' + state.calculateRowForTime(x) + ' r(t) = ' + state.calculateRowForTime(t) + ' range: ' + state.data_after + ' - ' + state.data_before + ' real: ' + state.data.after + ' - ' + state.data.before + ' every: ' + state.data_update_every);
+
+ if (state.tmp.dygraph_mouse_down !== true) {
+ NETDATA.globalSelectionSync.sync(state, x);
+ }
+
+ // fix legend zIndex using the internal structures of dygraph legend module
+ // this works, but it is a hack!
+ // state.tmp.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000;
+ },
+ unhighlightCallback: function (event) {
+ void(event);
+
+ if (state.tmp.dygraph_mouse_down) {
+ return;
+ }
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('dygraphUnhighlightCallback()');
+ }
+
+ state.unpauseChart();
+ NETDATA.globalSelectionSync.stop();
+ },
+ underlayCallback: function (canvas, area, g) {
+ // the chart is about to be drawn
+
+ // update history_tip_element
+ if (state.tmp.dygraph_history_tip_element) {
+ const xHookRightSide = g.toDomXCoord(state.netdata_first);
+ if (xHookRightSide > area.x) {
+ state.tmp.dygraph_history_tip_element_displayed = true;
+ // group the styles for possible better performance
+ state.tmp.dygraph_history_tip_element.setAttribute(
+ 'style',
+ `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);`
+ )
+ } else {
+ if (state.tmp.dygraph_history_tip_element_displayed) {
+ // additional check just for performance
+ // don't update the DOM when it's not needed
+ state.tmp.dygraph_history_tip_element.style.display = 'none';
+ state.tmp.dygraph_history_tip_element_displayed = false;
+ }
+ }
+ }
+
+ // this function renders global highlighted time-frame
+
+ if (NETDATA.globalChartUnderlay.isActive()) {
+ let after = NETDATA.globalChartUnderlay.after;
+ let before = NETDATA.globalChartUnderlay.before;
+
+ if (after < state.view_after) {
+ after = state.view_after;
+ }
+
+ if (before > state.view_before) {
+ before = state.view_before;
+ }
+
+ if (after < before) {
+ let bottom_left = g.toDomCoords(after, -20);
+ let top_right = g.toDomCoords(before, +20);
+
+ let left = bottom_left[0];
+ let right = top_right[0];
+
+ canvas.fillStyle = NETDATA.themes.current.highlight;
+ canvas.fillRect(left, area.y, right - left, area.h);
+ }
+ }
+ },
+ interactionModel: {
+ mousedown: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mousedown()');
+ }
+
+ state.tmp.dygraph_user_action = true;
+
+ if (NETDATA.options.debug.dygraph) {
+ state.log('dygraphMouseDown()');
+ }
+
+ // Right-click should not initiate anything.
+ if (event.button && event.button === 2) {
+ return;
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_mouse_down = true;
+ context.initializeMouseDown(event, dygraph, context);
+
+ //console.log(event);
+ if (event.button && event.button === 1) {
+ if (event.shiftKey) {
+ //console.log('middle mouse button dragging (PAN)');
+
+ state.setMode('pan');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startPan(event, dygraph, context);
+ } else if (event.altKey || event.ctrlKey || event.metaKey) {
+ //console.log('middle mouse button highlight');
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+ state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX);
+ Dygraph.startZoom(event, dygraph, context);
+ } else {
+ //console.log('middle mouse button selection for zoom (ZOOM)');
+
+ state.setMode('zoom');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startZoom(event, dygraph, context);
+ }
+ } else {
+ if (event.shiftKey) {
+ //console.log('left mouse button selection for zoom (ZOOM)');
+
+ state.setMode('zoom');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startZoom(event, dygraph, context);
+ } else if (event.altKey || event.ctrlKey || event.metaKey) {
+ //console.log('left mouse button highlight');
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+ state.tmp.dygraph_highlight_after = dygraph.toDataXCoord(event.offsetX);
+ Dygraph.startZoom(event, dygraph, context);
+ } else {
+ //console.log('left mouse button dragging (PAN)');
+
+ state.setMode('pan');
+ // NETDATA.globalSelectionSync.delay();
+ state.tmp.dygraph_highlight_after = null;
+ Dygraph.startPan(event, dygraph, context);
+ }
+ }
+ },
+ mousemove: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mousemove()');
+ }
+
+ if (state.tmp.dygraph_highlight_after !== null) {
+ //console.log('highlight selection...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.moveZoom(event, dygraph, context);
+ event.preventDefault();
+ } else if (context.isPanning) {
+ //console.log('panning...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ //NETDATA.globalSelectionSync.stop();
+ //NETDATA.globalSelectionSync.delay();
+ state.setMode('pan');
+ context.is2DPan = false;
+ Dygraph.movePan(event, dygraph, context);
+ } else if (context.isZooming) {
+ //console.log('zooming...');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ //NETDATA.globalSelectionSync.stop();
+ //NETDATA.globalSelectionSync.delay();
+ state.setMode('zoom');
+ Dygraph.moveZoom(event, dygraph, context);
+ }
+ },
+ mouseup: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = false;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.mouseup()');
+ }
+
+ if (state.tmp.dygraph_highlight_after !== null) {
+ //console.log('done highlight selection');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+
+ NETDATA.globalChartUnderlay.set(state
+ , state.tmp.dygraph_highlight_after
+ , dygraph.toDataXCoord(event.offsetX)
+ , state.view_after
+ , state.view_before
+ );
+
+ state.tmp.dygraph_highlight_after = null;
+
+ context.isZooming = false;
+ dygraph.clearZoomRect_();
+ dygraph.drawGraph_(false);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ } else if (context.isPanning) {
+ //console.log('done panning');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.endPan(event, dygraph, context);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ } else if (context.isZooming) {
+ //console.log('done zomming');
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.endZoom(event, dygraph, context);
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ }
+ },
+ click: function (event, dygraph, context) {
+ void(dygraph);
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.click()');
+ }
+
+ event.preventDefault();
+ },
+ dblclick: function (event, dygraph, context) {
+ void(event);
+ void(dygraph);
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.dblclick()');
+ }
+ NETDATA.resetAllCharts(state);
+ },
+ wheel: function (event, dygraph, context) {
+ void(context);
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.wheel()');
+ }
+
+ // Take the offset of a mouse event on the dygraph canvas and
+ // convert it to a pair of percentages from the bottom left.
+ // (Not top left, bottom is where the lower value is.)
+ function offsetToPercentage(g, offsetX, offsetY) {
+ // This is calculating the pixel offset of the leftmost date.
+ let xOffset = g.toDomCoords(g.xAxisRange()[0], null)[0];
+ let yar0 = g.yAxisRange(0);
+
+ // This is calculating the pixel of the highest value. (Top pixel)
+ let yOffset = g.toDomCoords(null, yar0[1])[1];
+
+ // x y w and h are relative to the corner of the drawing area,
+ // so that the upper corner of the drawing area is (0, 0).
+ let x = offsetX - xOffset;
+ let y = offsetY - yOffset;
+
+ // This is computing the rightmost pixel, effectively defining the
+ // width.
+ let w = g.toDomCoords(g.xAxisRange()[1], null)[0] - xOffset;
+
+ // This is computing the lowest pixel, effectively defining the height.
+ let h = g.toDomCoords(null, yar0[0])[1] - yOffset;
+
+ // Percentage from the left.
+ let xPct = w === 0 ? 0 : (x / w);
+ // Percentage from the top.
+ let yPct = h === 0 ? 0 : (y / h);
+
+ // The (1-) part below changes it from "% distance down from the top"
+ // to "% distance up from the bottom".
+ return [xPct, (1 - yPct)];
+ }
+
+ // Adjusts [x, y] toward each other by zoomInPercentage%
+ // Split it so the left/bottom axis gets xBias/yBias of that change and
+ // tight/top gets (1-xBias)/(1-yBias) of that change.
+ //
+ // If a bias is missing it splits it down the middle.
+ function zoomRange(g, zoomInPercentage, xBias, yBias) {
+ xBias = xBias || 0.5;
+ yBias = yBias || 0.5;
+
+ function adjustAxis(axis, zoomInPercentage, bias) {
+ let delta = axis[1] - axis[0];
+ let increment = delta * zoomInPercentage;
+ let foo = [increment * bias, increment * (1 - bias)];
+
+ return [axis[0] + foo[0], axis[1] - foo[1]];
+ }
+
+ let yAxes = g.yAxisRanges();
+ let newYAxes = [];
+ for (let i = 0; i < yAxes.length; i++) {
+ newYAxes[i] = adjustAxis(yAxes[i], zoomInPercentage, yBias);
+ }
+
+ return adjustAxis(g.xAxisRange(), zoomInPercentage, xBias);
+ }
+
+ if (event.altKey || event.shiftKey) {
+ state.tmp.dygraph_user_action = true;
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ // http://dygraphs.com/gallery/interaction-api.js
+ let normal_def;
+ if (typeof event.wheelDelta === 'number' && !isNaN(event.wheelDelta))
+ // chrome
+ {
+ normal_def = event.wheelDelta / 40;
+ } else
+ // firefox
+ {
+ normal_def = event.deltaY * -1.2;
+ }
+
+ let normal = (event.detail) ? event.detail * -1 : normal_def;
+ let percentage = normal / 50;
+
+ if (!(event.offsetX && event.offsetY)) {
+ event.offsetX = event.layerX - event.target.offsetLeft;
+ event.offsetY = event.layerY - event.target.offsetTop;
+ }
+
+ let percentages = offsetToPercentage(dygraph, event.offsetX, event.offsetY);
+ let xPct = percentages[0];
+ let yPct = percentages[1];
+
+ let new_x_range = zoomRange(dygraph, percentage, xPct, yPct);
+ let after = new_x_range[0];
+ let before = new_x_range[1];
+
+ let first = state.netdata_first + state.data_update_every;
+ let last = state.netdata_last + state.data_update_every;
+
+ if (before > last) {
+ after -= (before - last);
+ before = last;
+ }
+ if (after < first) {
+ after = first;
+ }
+
+ state.setMode('zoom');
+ state.updateChartPanOrZoom(after, before, function () {
+ dygraph.updateOptions({dateWindow: [after, before]});
+ });
+
+ event.preventDefault();
+ }
+ },
+ touchstart: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = true;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchstart()');
+ }
+
+ state.tmp.dygraph_user_action = true;
+ state.setMode('zoom');
+ state.pauseChart();
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ Dygraph.defaultInteractionModel.touchstart(event, dygraph, context);
+
+ // we overwrite the touch directions at the end, to overwrite
+ // the internal default of dygraph
+ context.touchDirections = {x: true, y: false};
+
+ state.dygraph_last_touch_start = Date.now();
+ state.dygraph_last_touch_move = 0;
+
+ if (typeof event.touches[0].pageX === 'number') {
+ state.dygraph_last_touch_page_x = event.touches[0].pageX;
+ } else {
+ state.dygraph_last_touch_page_x = 0;
+ }
+ },
+ touchmove: function (event, dygraph, context) {
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchmove()');
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.defaultInteractionModel.touchmove(event, dygraph, context);
+
+ state.dygraph_last_touch_move = Date.now();
+ },
+ touchend: function (event, dygraph, context) {
+ state.tmp.dygraph_mouse_down = false;
+
+ if (NETDATA.options.debug.dygraph || state.debug) {
+ state.log('interactionModel.touchend()');
+ }
+
+ NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.delay();
+
+ state.tmp.dygraph_user_action = true;
+ Dygraph.defaultInteractionModel.touchend(event, dygraph, context);
+
+ // if it didn't move, it is a selection
+ if (state.dygraph_last_touch_move === 0 && state.dygraph_last_touch_page_x !== 0) {
+ NETDATA.globalSelectionSync.dontSyncBefore = 0;
+ NETDATA.globalSelectionSync.setMaster(state);
+
+ // internal api of dygraph
+ let pct = (state.dygraph_last_touch_page_x - (dygraph.plotter_.area.x + state.element.getBoundingClientRect().left)) / dygraph.plotter_.area.w;
+ console.log('pct: ' + pct.toString());
+
+ let t = Math.round(state.view_after + (state.view_before - state.view_after) * pct);
+ if (NETDATA.dygraphSetSelection(state, t)) {
+ NETDATA.globalSelectionSync.sync(state, t);
+ }
+ }
+
+ // if it was double tap within double click time, reset the charts
+ let now = Date.now();
+ if (typeof state.dygraph_last_touch_end !== 'undefined') {
+ if (state.dygraph_last_touch_move === 0) {
+ let dt = now - state.dygraph_last_touch_end;
+ if (dt <= NETDATA.options.current.double_click_speed) {
+ NETDATA.resetAllCharts(state);
+ }
+ }
+ }
+
+ // remember the timestamp of the last touch end
+ state.dygraph_last_touch_end = now;
+
+ // refresh all the charts immediately
+ NETDATA.options.auto_refresher_stop_until = 0;
+ }
+ }
+ };
+
+ if (NETDATA.chartLibraries.dygraph.isLogScale(state)) {
+ if (Array.isArray(state.tmp.dygraph_options.valueRange) && state.tmp.dygraph_options.valueRange[0] <= 0) {
+ state.tmp.dygraph_options.valueRange[0] = null;
+ }
+ }
+
+ if (NETDATA.chartLibraries.dygraph.isSparkline(state)) {
+ state.tmp.dygraph_options.drawGrid = false;
+ state.tmp.dygraph_options.drawAxis = false;
+ state.tmp.dygraph_options.title = undefined;
+ state.tmp.dygraph_options.ylabel = undefined;
+ state.tmp.dygraph_options.yLabelWidth = 0;
+ //state.tmp.dygraph_options.labelsDivWidth = 120;
+ //state.tmp.dygraph_options.labelsDivStyles.width = '120px';
+ state.tmp.dygraph_options.labelsSeparateLines = true;
+ state.tmp.dygraph_options.rightGap = 0;
+ state.tmp.dygraph_options.yRangePad = 1;
+ state.tmp.dygraph_options.axes.x.drawAxis = false;
+ state.tmp.dygraph_options.axes.y.drawAxis = false;
+ }
+
+ if (smooth) {
+ state.tmp.dygraph_smooth_eligible = true;
+
+ if (NETDATA.options.current.smooth_plot) {
+ state.tmp.dygraph_options.plotter = smoothPlotter;
+ }
+ }
+ else {
+ state.tmp.dygraph_smooth_eligible = false;
+ }
+
+ if (netdataSnapshotData !== null && NETDATA.globalPanAndZoom.isActive() && NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // pan and zoom on snapshots
+ state.tmp.dygraph_options.dateWindow = [NETDATA.globalPanAndZoom.force_after_ms, NETDATA.globalPanAndZoom.force_before_ms];
+ //state.tmp.dygraph_options.isZoomedIgnoreProgrammaticZoom = true;
+ }
+
+ let seriesStyles = NETDATA.dygraphGetSeriesStyle(state.tmp.dygraph_options);
+ state.tmp.dygraph_options.series = seriesStyles;
+
+ state.tmp.dygraph_instance = new Dygraph(
+ state.element_chart,
+ data.result.data,
+ state.tmp.dygraph_options
+ );
+
+ state.tmp.dygraph_history_tip_element = document.createElement('div');
+ state.tmp.dygraph_history_tip_element.innerHTML = `
+ <span class="dygraph__history-tip-content">
+ Want to extend your history of real-time metrics?
+ <br />
+ <a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>
+ Configure Netdata's <b>history</b></a>
+ or use the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine</a>.
+ </span>
+ `;
+ state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip';
+ state.element_chart.appendChild(state.tmp.dygraph_history_tip_element);
+
+
+ state.tmp.dygraph_force_zoom = false;
+ state.tmp.dygraph_user_action = false;
+ state.tmp.dygraph_last_rendered = Date.now();
+ state.tmp.dygraph_highlight_after = null;
+
+ if (state.tmp.dygraph_options.valueRange[0] === null && state.tmp.dygraph_options.valueRange[1] === null) {
+ if (typeof state.tmp.dygraph_instance.axes_[0].extremeRange !== 'undefined') {
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
+ } else {
+ state.log('incompatible version of Dygraph detected');
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
+ }
+ } else {
+ // if the user gave a valueRange, respect it
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
+ }
+
+ return true;
+};
+
+NETDATA.dygraphGetSeriesStyle = function(dygraphOptions) {
+ const seriesStyleStr = dygraphOptions.perSeriesStyle;
+ let formattedStyles = {};
+
+ if (seriesStyleStr === '') {
+ return formattedStyles;
+ }
+
+ // Parse the config string into a JSON object
+ let styles = seriesStyleStr.replace(' ', '').split('|');
+
+ styles.forEach(style => {
+ const keys = style.split(':');
+ formattedStyles[keys[0]] = keys[1];
+ });
+
+ for (let key in formattedStyles) {
+ if (formattedStyles.hasOwnProperty(key)) {
+ let settings;
+
+ switch (formattedStyles[key]) {
+ case 'line':
+ settings = { fillGraph: false };
+ break;
+ case 'area':
+ settings = { fillGraph: true };
+ break;
+ case 'dot':
+ settings = {
+ fillGraph: false,
+ drawPoints: true,
+ pointSize: dygraphOptions.pointSize
+ };
+ break;
+ default:
+ settings = undefined;
+ }
+
+ formattedStyles[key] = settings;
+ }
+ }
+
+ return formattedStyles;
+};
diff --git a/web/gui/src/dashboard.js/charting/easy-pie-chart.js b/web/gui/src/dashboard.js/charting/easy-pie-chart.js
new file mode 100644
index 0000000..6905a10
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/easy-pie-chart.js
@@ -0,0 +1,281 @@
+// ----------------------------------------------------------------------------------------------------------------
+
+NETDATA.easypiechartPercentFromValueMinMax = function (state, value, min, max) {
+ if (typeof value !== 'number') {
+ value = 0;
+ }
+ if (typeof min !== 'number') {
+ min = 0;
+ }
+ if (typeof max !== 'number') {
+ max = 0;
+ }
+
+ if (min > max) {
+ let t = min;
+ min = max;
+ max = t;
+ }
+
+ if (min > value) {
+ min = value;
+ }
+ if (max < value) {
+ max = value;
+ }
+
+ state.legendFormatValueDecimalsFromMinMax(min, max);
+
+ if (state.tmp.easyPieChartMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.easyPieChartMax === null && max < 0) {
+ max = 0;
+ }
+
+ let pcent;
+
+ if (min < 0 && max > 0) {
+ // it is both positive and negative
+ // zero at the top center of the chart
+ max = (-min > max) ? -min : max;
+ pcent = Math.round(value * 100 / max);
+ } else if (value >= 0 && min >= 0 && max >= 0) {
+ // clockwise
+ pcent = Math.round((value - min) * 100 / (max - min));
+ if (pcent === 0) {
+ pcent = 0.1;
+ }
+ } else {
+ // counter clockwise
+ pcent = Math.round((value - max) * 100 / (max - min));
+ if (pcent === 0) {
+ pcent = -0.1;
+ }
+ }
+
+ return pcent;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// easy-pie-chart
+
+NETDATA.easypiechartInitialize = function (callback) {
+ if (typeof netdataNoEasyPieChart === 'undefined' || !netdataNoEasyPieChart) {
+ $.ajax({
+ url: NETDATA.easypiechart_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('easypiechart', NETDATA.easypiechart_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.easypiechart.enabled = false;
+ NETDATA.error(100, NETDATA.easypiechart_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ })
+ } else {
+ NETDATA.chartLibraries.easypiechart.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.easypiechartClearSelection = function (state, force) {
+ if (typeof state.tmp.easyPieChartEvent !== 'undefined' && typeof state.tmp.easyPieChartEvent.timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.easyPieChartEvent.timer);
+ state.tmp.easyPieChartEvent.timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.easypiechartChartUpdate(state, state.data);
+ }
+ else {
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(null);
+ state.tmp.easyPieChart_instance.update(0);
+ }
+ state.tmp.easyPieChart_instance.enableAnimation();
+
+ return true;
+};
+
+NETDATA.easypiechartSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.easypiechartClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.easypiechartClearSelection(state, true);
+ }
+
+ if (typeof state.tmp.easyPieChartEvent === 'undefined') {
+ state.tmp.easyPieChartEvent = {
+ timer: undefined,
+ value: 0,
+ pcent: 0
+ };
+ }
+
+ let value = state.data.result[state.data.result.length - 1 - slot];
+ let min = (state.tmp.easyPieChartMin === null) ? NETDATA.commonMin.get(state) : state.tmp.easyPieChartMin;
+ let max = (state.tmp.easyPieChartMax === null) ? NETDATA.commonMax.get(state) : state.tmp.easyPieChartMax;
+ let pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+
+ state.tmp.easyPieChartEvent.value = value;
+ state.tmp.easyPieChartEvent.pcent = pcent;
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+
+ if (state.tmp.easyPieChartEvent.timer === undefined) {
+ state.tmp.easyPieChart_instance.disableAnimation();
+
+ state.tmp.easyPieChartEvent.timer = NETDATA.timeout.set(function () {
+ state.tmp.easyPieChartEvent.timer = undefined;
+ state.tmp.easyPieChart_instance.update(state.tmp.easyPieChartEvent.pcent);
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.easypiechartChartUpdate = function (state, data) {
+ let value, min, max, pcent;
+
+ if (NETDATA.globalPanAndZoom.isActive() || state.isAutoRefreshable() === false) {
+ value = null;
+ pcent = 0;
+ }
+ else {
+ value = data.result[0];
+ min = (state.tmp.easyPieChartMin === null) ? NETDATA.commonMin.get(state) : state.tmp.easyPieChartMin;
+ max = (state.tmp.easyPieChartMax === null) ? NETDATA.commonMax.get(state) : state.tmp.easyPieChartMax;
+ pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+ }
+
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChart_instance.update(pcent);
+ return true;
+};
+
+NETDATA.easypiechartChartCreate = function (state, data) {
+ let chart = $(state.element_chart);
+
+ let value = data.result[0];
+ let min = NETDATA.dataAttribute(state.element, 'easypiechart-min-value', null);
+ let max = NETDATA.dataAttribute(state.element, 'easypiechart-max-value', null);
+
+ if (min === null) {
+ min = NETDATA.commonMin.get(state);
+ state.tmp.easyPieChartMin = null;
+ }
+ else {
+ state.tmp.easyPieChartMin = min;
+ }
+
+ if (max === null) {
+ max = NETDATA.commonMax.get(state);
+ state.tmp.easyPieChartMax = null;
+ }
+ else {
+ state.tmp.easyPieChartMax = max;
+ }
+
+ let size = state.chartWidth();
+ let stroke = Math.floor(size / 22);
+ if (stroke < 3) {
+ stroke = 2;
+ }
+
+ let valuefontsize = Math.floor((size * 2 / 3) / 5);
+ let valuetop = Math.round((size - valuefontsize - (size / 40)) / 2);
+ state.tmp.easyPieChartLabel = document.createElement('span');
+ state.tmp.easyPieChartLabel.className = 'easyPieChartLabel';
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.easyPieChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartLabel);
+
+ let titlefontsize = Math.round(valuefontsize * 1.6 / 3);
+ let titletop = Math.round(valuetop - (titlefontsize * 2) - (size / 40));
+ state.tmp.easyPieChartTitle = document.createElement('span');
+ state.tmp.easyPieChartTitle.className = 'easyPieChartTitle';
+ state.tmp.easyPieChartTitle.innerText = state.title;
+ state.tmp.easyPieChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartTitle);
+
+ let unitfontsize = Math.round(titlefontsize * 0.9);
+ let unittop = Math.round(valuetop + (valuefontsize + unitfontsize) + (size / 40));
+ state.tmp.easyPieChartUnits = document.createElement('span');
+ state.tmp.easyPieChartUnits.className = 'easyPieChartUnits';
+ state.tmp.easyPieChartUnits.innerText = state.units_current;
+ state.tmp.easyPieChartUnits.style.fontSize = unitfontsize + 'px';
+ state.tmp.easyPieChartUnits.style.top = unittop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartUnits);
+
+ let barColor = NETDATA.dataAttribute(state.element, 'easypiechart-barcolor', undefined);
+ if (typeof barColor === 'undefined' || barColor === null) {
+ barColor = state.chartCustomColors()[0];
+ } else {
+ // <div ... data-easypiechart-barcolor="(function(percent){return(percent < 50 ? '#5cb85c' : percent < 85 ? '#f0ad4e' : '#cb3935');})" ...></div>
+ let tmp = eval(barColor);
+ if (typeof tmp === 'function') {
+ barColor = tmp;
+ }
+ }
+
+ let pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
+ chart.data('data-percent', pcent);
+
+ chart.easyPieChart({
+ barColor: barColor,
+ trackColor: NETDATA.dataAttribute(state.element, 'easypiechart-trackcolor', NETDATA.themes.current.easypiechart_track),
+ scaleColor: NETDATA.dataAttribute(state.element, 'easypiechart-scalecolor', NETDATA.themes.current.easypiechart_scale),
+ scaleLength: NETDATA.dataAttribute(state.element, 'easypiechart-scalelength', 5),
+ lineCap: NETDATA.dataAttribute(state.element, 'easypiechart-linecap', 'round'),
+ lineWidth: NETDATA.dataAttribute(state.element, 'easypiechart-linewidth', stroke),
+ trackWidth: NETDATA.dataAttribute(state.element, 'easypiechart-trackwidth', undefined),
+ size: NETDATA.dataAttribute(state.element, 'easypiechart-size', size),
+ rotate: NETDATA.dataAttribute(state.element, 'easypiechart-rotate', 0),
+ animate: NETDATA.dataAttribute(state.element, 'easypiechart-animate', {duration: 500, enabled: true}),
+ easing: NETDATA.dataAttribute(state.element, 'easypiechart-easing', undefined)
+ });
+
+ // when we just re-create the chart
+ // do not animate the first update
+ let animate = true;
+ if (typeof state.tmp.easyPieChart_instance !== 'undefined') {
+ animate = false;
+ }
+
+ state.tmp.easyPieChart_instance = chart.data('easyPieChart');
+ if (animate === false) {
+ state.tmp.easyPieChart_instance.disableAnimation();
+ }
+ state.tmp.easyPieChart_instance.update(pcent);
+ if (animate === false) {
+ state.tmp.easyPieChart_instance.enableAnimation();
+ }
+
+ state.legendSetUnitsString = function (units) {
+ if (typeof state.tmp.easyPieChartUnits !== 'undefined' && state.tmp.units !== units) {
+ state.tmp.easyPieChartUnits.innerText = units;
+ state.tmp.units = units;
+ }
+ };
+ state.legendShowUndefined = function () {
+ if (typeof state.tmp.easyPieChart_instance !== 'undefined') {
+ NETDATA.easypiechartClearSelection(state);
+ }
+ };
+
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/gauge.js b/web/gui/src/dashboard.js/charting/gauge.js
new file mode 100644
index 0000000..53ed46f
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/gauge.js
@@ -0,0 +1,406 @@
+// gauge.js
+
+NETDATA.gaugeInitialize = function (callback) {
+ if (typeof netdataNoGauge === 'undefined' || !netdataNoGauge) {
+ $.ajax({
+ url: NETDATA.gauge_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('gauge', NETDATA.gauge_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.gauge.enabled = false;
+ NETDATA.error(100, NETDATA.gauge_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ })
+ }
+ else {
+ NETDATA.chartLibraries.gauge.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.gaugeAnimation = function (state, status) {
+ let speed = 32;
+
+ if (typeof status === 'boolean' && status === false) {
+ speed = 1000000000;
+ } else if (typeof status === 'number') {
+ speed = status;
+ }
+
+ // console.log('gauge speed ' + speed);
+ state.tmp.gauge_instance.animationSpeed = speed;
+ state.tmp.___gaugeOld__.speed = speed;
+};
+
+NETDATA.gaugeSet = function (state, value, min, max) {
+ if (typeof value !== 'number') {
+ value = 0;
+ }
+ if (typeof min !== 'number') {
+ min = 0;
+ }
+ if (typeof max !== 'number') {
+ max = 0;
+ }
+ if (value > max) {
+ max = value;
+ }
+ if (value < min) {
+ min = value;
+ }
+ if (min > max) {
+ let t = min;
+ min = max;
+ max = t;
+ }
+ else if (min === max) {
+ max = min + 1;
+ }
+
+ state.legendFormatValueDecimalsFromMinMax(min, max);
+
+ // gauge.js has an issue if the needle
+ // is smaller than min or larger than max
+ // when we set the new values
+ // the needle will go crazy
+
+ // to prevent it, we always feed it
+ // with a percentage, so that the needle
+ // is always between min and max
+ let pcent = (value - min) * 100 / (max - min);
+
+ // bug fix for gauge.js 1.3.1
+ // if the value is the absolute min or max, the chart is broken
+ if (pcent < 0.001) {
+ pcent = 0.001;
+ }
+ if (pcent > 99.999) {
+ pcent = 99.999;
+ }
+
+ state.tmp.gauge_instance.set(pcent);
+ // console.log('gauge set ' + pcent + ', value ' + value + ', min ' + min + ', max ' + max);
+
+ state.tmp.___gaugeOld__.value = value;
+ state.tmp.___gaugeOld__.min = min;
+ state.tmp.___gaugeOld__.max = max;
+};
+
+NETDATA.gaugeSetLabels = function (state, value, min, max) {
+ if (state.tmp.___gaugeOld__.valueLabel !== value) {
+ state.tmp.___gaugeOld__.valueLabel = value;
+ state.tmp.gaugeChartLabel.innerText = state.legendFormatValue(value);
+ }
+ if (state.tmp.___gaugeOld__.minLabel !== min) {
+ state.tmp.___gaugeOld__.minLabel = min;
+ state.tmp.gaugeChartMin.innerText = state.legendFormatValue(min);
+ }
+ if (state.tmp.___gaugeOld__.maxLabel !== max) {
+ state.tmp.___gaugeOld__.maxLabel = max;
+ state.tmp.gaugeChartMax.innerText = state.legendFormatValue(max);
+ }
+};
+
+NETDATA.gaugeClearSelection = function (state, force) {
+ if (typeof state.tmp.gaugeEvent !== 'undefined' && typeof state.tmp.gaugeEvent.timer !== 'undefined') {
+ NETDATA.timeout.clear(state.tmp.gaugeEvent.timer);
+ state.tmp.gaugeEvent.timer = undefined;
+ }
+
+ if (state.isAutoRefreshable() && state.data !== null && force !== true) {
+ NETDATA.gaugeChartUpdate(state, state.data);
+ } else {
+ NETDATA.gaugeAnimation(state, false);
+ NETDATA.gaugeSetLabels(state, null, null, null);
+ NETDATA.gaugeSet(state, null, null, null);
+ }
+
+ NETDATA.gaugeAnimation(state, true);
+ return true;
+};
+
+NETDATA.gaugeSetSelection = function (state, t) {
+ if (state.timeIsVisible(t) !== true) {
+ return NETDATA.gaugeClearSelection(state, true);
+ }
+
+ let slot = state.calculateRowForTime(t);
+ if (slot < 0 || slot >= state.data.result.length) {
+ return NETDATA.gaugeClearSelection(state, true);
+ }
+
+ if (typeof state.tmp.gaugeEvent === 'undefined') {
+ state.tmp.gaugeEvent = {
+ timer: undefined,
+ value: 0,
+ min: 0,
+ max: 0
+ };
+ }
+
+ let value = state.data.result[state.data.result.length - 1 - slot];
+ let min = (state.tmp.gaugeMin === null) ? NETDATA.commonMin.get(state) : state.tmp.gaugeMin;
+ let max = (state.tmp.gaugeMax === null) ? NETDATA.commonMax.get(state) : state.tmp.gaugeMax;
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ state.tmp.gaugeEvent.value = value;
+ state.tmp.gaugeEvent.min = min;
+ state.tmp.gaugeEvent.max = max;
+ NETDATA.gaugeSetLabels(state, value, min, max);
+
+ if (state.tmp.gaugeEvent.timer === undefined) {
+ NETDATA.gaugeAnimation(state, false);
+
+ state.tmp.gaugeEvent.timer = NETDATA.timeout.set(function () {
+ state.tmp.gaugeEvent.timer = undefined;
+ NETDATA.gaugeSet(state, state.tmp.gaugeEvent.value, state.tmp.gaugeEvent.min, state.tmp.gaugeEvent.max);
+ }, 0);
+ }
+
+ return true;
+};
+
+NETDATA.gaugeChartUpdate = function (state, data) {
+ let value, min, max;
+
+ if (NETDATA.globalPanAndZoom.isActive() || state.isAutoRefreshable() === false) {
+ NETDATA.gaugeSetLabels(state, null, null, null);
+ state.tmp.gauge_instance.set(0);
+ } else {
+ value = data.result[0];
+ min = (state.tmp.gaugeMin === null) ? NETDATA.commonMin.get(state) : state.tmp.gaugeMin;
+ max = (state.tmp.gaugeMax === null) ? NETDATA.commonMax.get(state) : state.tmp.gaugeMax;
+ if (value < min) {
+ min = value;
+ }
+ if (value > max) {
+ max = value;
+ }
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ NETDATA.gaugeSet(state, value, min, max);
+ NETDATA.gaugeSetLabels(state, value, min, max);
+ }
+
+ return true;
+};
+
+NETDATA.gaugeChartCreate = function (state, data) {
+ // let chart = $(state.element_chart);
+
+ let value = data.result[0];
+ let min = NETDATA.dataAttribute(state.element, 'gauge-min-value', null);
+ let max = NETDATA.dataAttribute(state.element, 'gauge-max-value', null);
+ // let adjust = NETDATA.dataAttribute(state.element, 'gauge-adjust', null);
+ let pointerColor = NETDATA.dataAttribute(state.element, 'gauge-pointer-color', NETDATA.themes.current.gauge_pointer);
+ let strokeColor = NETDATA.dataAttribute(state.element, 'gauge-stroke-color', NETDATA.themes.current.gauge_stroke);
+ let startColor = NETDATA.dataAttribute(state.element, 'gauge-start-color', state.chartCustomColors()[0]);
+ let stopColor = NETDATA.dataAttribute(state.element, 'gauge-stop-color', void 0);
+ let generateGradient = NETDATA.dataAttribute(state.element, 'gauge-generate-gradient', false);
+
+ if (min === null) {
+ min = NETDATA.commonMin.get(state);
+ state.tmp.gaugeMin = null;
+ } else {
+ state.tmp.gaugeMin = min;
+ }
+
+ if (max === null) {
+ max = NETDATA.commonMax.get(state);
+ state.tmp.gaugeMax = null;
+ } else {
+ state.tmp.gaugeMax = max;
+ }
+
+ // make sure it is zero based
+ // but only if it has not been set by the user
+ if (state.tmp.gaugeMin === null && min > 0) {
+ min = 0;
+ }
+ if (state.tmp.gaugeMax === null && max < 0) {
+ max = 0;
+ }
+
+ let width = state.chartWidth(), height = state.chartHeight(); //, ratio = 1.5;
+ // console.log('gauge width: ' + width.toString() + ', height: ' + height.toString());
+ //switch(adjust) {
+ // case 'width': width = height * ratio; break;
+ // case 'height':
+ // default: height = width / ratio; break;
+ //}
+ //state.element.style.width = width.toString() + 'px';
+ //state.element.style.height = height.toString() + 'px';
+
+ let lum_d = 0.05;
+
+ let options = {
+ lines: 12, // The number of lines to draw
+ angle: 0.14, // The span of the gauge arc
+ lineWidth: 0.57, // The line thickness
+ radiusScale: 1.0, // Relative radius
+ pointer: {
+ length: 0.85, // 0.9 The radius of the inner circle
+ strokeWidth: 0.045, // The rotation offset
+ color: pointerColor // Fill color
+ },
+ limitMax: true, // If false, the max value of the gauge will be updated if value surpass max
+ limitMin: true, // If true, the min value of the gauge will be fixed unless you set it manually
+ colorStart: startColor, // Colors
+ colorStop: stopColor, // just experiment with them
+ strokeColor: strokeColor, // to see which ones work best for you
+ generateGradient: (generateGradient === true), // gmosx:
+ gradientType: 0,
+ highDpiSupport: true // High resolution support
+ };
+
+ if (generateGradient.constructor === Array) {
+ // example options:
+ // data-gauge-generate-gradient="[0, 50, 100]"
+ // data-gauge-gradient-percent-color-0="#FFFFFF"
+ // data-gauge-gradient-percent-color-50="#999900"
+ // data-gauge-gradient-percent-color-100="#000000"
+
+ options.percentColors = [];
+ let len = generateGradient.length;
+ while (len--) {
+ let pcent = generateGradient[len];
+ let color = NETDATA.dataAttribute(state.element, 'gauge-gradient-percent-color-' + pcent.toString(), false);
+ if (color !== false) {
+ let a = [];
+ a[0] = pcent / 100;
+ a[1] = color;
+ options.percentColors.unshift(a);
+ }
+ }
+ if (options.percentColors.length === 0) {
+ delete options.percentColors;
+ }
+ } else if (generateGradient === false && NETDATA.themes.current.gauge_gradient) {
+ //noinspection PointlessArithmeticExpressionJS
+ options.percentColors = [
+ [0.0, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 0))],
+ [0.1, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 1))],
+ [0.2, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 2))],
+ [0.3, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 3))],
+ [0.4, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 4))],
+ [0.5, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 5))],
+ [0.6, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 6))],
+ [0.7, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 7))],
+ [0.8, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 8))],
+ [0.9, NETDATA.colorLuminance(startColor, (lum_d * 10) - (lum_d * 9))],
+ [1.0, NETDATA.colorLuminance(startColor, 0.0)]];
+ }
+
+ state.tmp.gauge_canvas = document.createElement('canvas');
+ state.tmp.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas';
+ state.tmp.gauge_canvas.className = 'gaugeChart';
+ state.tmp.gauge_canvas.width = width;
+ state.tmp.gauge_canvas.height = height;
+ state.element_chart.appendChild(state.tmp.gauge_canvas);
+
+ let valuefontsize = Math.floor(height / 5);
+ let valuetop = Math.round((height - valuefontsize) / 3.2);
+ state.tmp.gaugeChartLabel = document.createElement('span');
+ state.tmp.gaugeChartLabel.className = 'gaugeChartLabel';
+ state.tmp.gaugeChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.gaugeChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartLabel);
+
+ let titlefontsize = Math.round(valuefontsize / 2.1);
+ let titletop = 0;
+ state.tmp.gaugeChartTitle = document.createElement('span');
+ state.tmp.gaugeChartTitle.className = 'gaugeChartTitle';
+ state.tmp.gaugeChartTitle.innerText = state.title;
+ state.tmp.gaugeChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartTitle);
+
+ let unitfontsize = Math.round(titlefontsize * 0.9);
+ state.tmp.gaugeChartUnits = document.createElement('span');
+ state.tmp.gaugeChartUnits.className = 'gaugeChartUnits';
+ state.tmp.gaugeChartUnits.innerText = state.units_current;
+ state.tmp.gaugeChartUnits.style.fontSize = unitfontsize + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartUnits);
+
+ state.tmp.gaugeChartMin = document.createElement('span');
+ state.tmp.gaugeChartMin.className = 'gaugeChartMin';
+ state.tmp.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMin);
+
+ state.tmp.gaugeChartMax = document.createElement('span');
+ state.tmp.gaugeChartMax.className = 'gaugeChartMax';
+ state.tmp.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMax);
+
+ // when we just re-create the chart
+ // do not animate the first update
+ let animate = true;
+ if (typeof state.tmp.gauge_instance !== 'undefined') {
+ animate = false;
+ }
+
+ state.tmp.gauge_instance = new Gauge(state.tmp.gauge_canvas).setOptions(options); // create sexy gauge!
+
+ state.tmp.___gaugeOld__ = {
+ value: value,
+ min: min,
+ max: max,
+ valueLabel: null,
+ minLabel: null,
+ maxLabel: null
+ };
+
+ // we will always feed a percentage
+ state.tmp.gauge_instance.minValue = 0;
+ state.tmp.gauge_instance.maxValue = 100;
+
+ NETDATA.gaugeAnimation(state, animate);
+ NETDATA.gaugeSet(state, value, min, max);
+ NETDATA.gaugeSetLabels(state, value, min, max);
+ NETDATA.gaugeAnimation(state, true);
+
+ state.legendSetUnitsString = function (units) {
+ if (typeof state.tmp.gaugeChartUnits !== 'undefined' && state.tmp.units !== units) {
+ state.tmp.gaugeChartUnits.innerText = units;
+ state.tmp.___gaugeOld__.valueLabel = null;
+ state.tmp.___gaugeOld__.minLabel = null;
+ state.tmp.___gaugeOld__.maxLabel = null;
+ state.tmp.units = units;
+ }
+ };
+ state.legendShowUndefined = function () {
+ if (typeof state.tmp.gauge_instance !== 'undefined') {
+ NETDATA.gaugeClearSelection(state);
+ }
+ };
+
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/google-charts.js b/web/gui/src/dashboard.js/charting/google-charts.js
new file mode 100644
index 0000000..7497daf
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/google-charts.js
@@ -0,0 +1,132 @@
+// google charts
+
+// Codacy declarations
+/* global google */
+
+NETDATA.googleInitialize = function (callback) {
+ if (typeof netdataNoGoogleCharts === 'undefined' || !netdataNoGoogleCharts) {
+ $.ajax({
+ url: NETDATA.google_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('google', NETDATA.google_js);
+ google.load('visualization', '1.1', {
+ 'packages': ['corechart', 'controls'],
+ 'callback': callback
+ });
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.google.enabled = false;
+ NETDATA.error(100, NETDATA.google_js);
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.google.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.googleChartUpdate = function (state, data) {
+ let datatable = new google.visualization.DataTable(data.result);
+ state.google_instance.draw(datatable, state.google_options);
+ return true;
+};
+
+NETDATA.googleChartCreate = function (state, data) {
+ let datatable = new google.visualization.DataTable(data.result);
+
+ state.google_options = {
+ colors: state.chartColors(),
+
+ // do not set width, height - the chart resizes itself
+ //width: state.chartWidth(),
+ //height: state.chartHeight(),
+ lineWidth: 1,
+ title: state.title,
+ fontSize: 11,
+ hAxis: {
+ // title: "Time of Day",
+ // format:'HH:mm:ss',
+ viewWindowMode: 'maximized',
+ slantedText: false,
+ format: 'HH:mm:ss',
+ textStyle: {
+ fontSize: 9
+ },
+ gridlines: {
+ color: '#EEE'
+ }
+ },
+ vAxis: {
+ title: state.units_current,
+ viewWindowMode: 'pretty',
+ minValue: -0.1,
+ maxValue: 0.1,
+ direction: 1,
+ textStyle: {
+ fontSize: 9
+ },
+ gridlines: {
+ color: '#EEE'
+ }
+ },
+ chartArea: {
+ width: '65%',
+ height: '80%'
+ },
+ focusTarget: 'category',
+ annotation: {
+ '1': {
+ style: 'line'
+ }
+ },
+ pointsVisible: 0,
+ titlePosition: 'out',
+ titleTextStyle: {
+ fontSize: 11
+ },
+ tooltip: {
+ isHtml: false,
+ ignoreBounds: true,
+ textStyle: {
+ fontSize: 9
+ }
+ },
+ curveType: 'function',
+ areaOpacity: 0.3,
+ isStacked: false
+ };
+
+ switch (state.chart.chart_type) {
+ case "area":
+ state.google_options.vAxis.viewWindowMode = 'maximized';
+ state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_area;
+ state.google_instance = new google.visualization.AreaChart(state.element_chart);
+ break;
+
+ case "stacked":
+ state.google_options.isStacked = true;
+ state.google_options.areaOpacity = NETDATA.options.current.color_fill_opacity_stacked;
+ state.google_options.vAxis.viewWindowMode = 'maximized';
+ state.google_options.vAxis.minValue = null;
+ state.google_options.vAxis.maxValue = null;
+ state.google_instance = new google.visualization.AreaChart(state.element_chart);
+ break;
+
+ default:
+ case "line":
+ state.google_options.lineWidth = 2;
+ state.google_instance = new google.visualization.LineChart(state.element_chart);
+ break;
+ }
+
+ state.google_instance.draw(datatable, state.google_options);
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/peity.js b/web/gui/src/dashboard.js/charting/peity.js
new file mode 100644
index 0000000..012fb9c
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/peity.js
@@ -0,0 +1,62 @@
+
+// peity
+
+NETDATA.peityInitialize = function (callback) {
+ if (typeof netdataNoPeitys === 'undefined' || !netdataNoPeitys) {
+ $.ajax({
+ url: NETDATA.peity_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('peity', NETDATA.peity_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.peity.enabled = false;
+ NETDATA.error(100, NETDATA.peity_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.peity.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.peityChartUpdate = function (state, data) {
+ state.peity_instance.innerHTML = data.result;
+
+ if (state.peity_options.stroke !== state.chartCustomColors()[0]) {
+ state.peity_options.stroke = state.chartCustomColors()[0];
+ if (state.chart.chart_type === 'line') {
+ state.peity_options.fill = NETDATA.themes.current.background;
+ } else {
+ state.peity_options.fill = NETDATA.colorLuminance(state.chartCustomColors()[0], NETDATA.chartDefaults.fill_luminance);
+ }
+ }
+
+ $(state.peity_instance).peity('line', state.peity_options);
+ return true;
+};
+
+NETDATA.peityChartCreate = function (state, data) {
+ state.peity_instance = document.createElement('div');
+ state.element_chart.appendChild(state.peity_instance);
+
+ state.peity_options = {
+ stroke: NETDATA.themes.current.foreground,
+ strokeWidth: NETDATA.dataAttribute(state.element, 'peity-strokewidth', 1),
+ width: state.chartWidth(),
+ height: state.chartHeight(),
+ fill: NETDATA.themes.current.foreground
+ };
+
+ NETDATA.peityChartUpdate(state, data);
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/sparkline.js b/web/gui/src/dashboard.js/charting/sparkline.js
new file mode 100644
index 0000000..5d8a9e6
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/sparkline.js
@@ -0,0 +1,155 @@
+// ----------------------------------------------------------------------------------------------------------------
+// sparkline
+
+NETDATA.sparklineInitialize = function (callback) {
+ if (typeof netdataNoSparklines === 'undefined' || !netdataNoSparklines) {
+ $.ajax({
+ url: NETDATA.sparkline_js,
+ cache: true,
+ dataType: "script",
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function () {
+ NETDATA.registerChartLibrary('sparkline', NETDATA.sparkline_js);
+ })
+ .fail(function () {
+ NETDATA.chartLibraries.sparkline.enabled = false;
+ NETDATA.error(100, NETDATA.sparkline_js);
+ })
+ .always(function () {
+ if (typeof callback === "function") {
+ return callback();
+ }
+ });
+ } else {
+ NETDATA.chartLibraries.sparkline.enabled = false;
+ if (typeof callback === "function") {
+ return callback();
+ }
+ }
+};
+
+NETDATA.sparklineChartUpdate = function (state, data) {
+ state.sparkline_options.width = state.chartWidth();
+ state.sparkline_options.height = state.chartHeight();
+
+ $(state.element_chart).sparkline(data.result, state.sparkline_options);
+ return true;
+};
+
+NETDATA.sparklineChartCreate = function (state, data) {
+ let type = NETDATA.dataAttribute(state.element, 'sparkline-type', 'line');
+ let lineColor = NETDATA.dataAttribute(state.element, 'sparkline-linecolor', state.chartCustomColors()[0]);
+ let fillColor = NETDATA.dataAttribute(state.element, 'sparkline-fillcolor', ((state.chart.chart_type === 'line') ? NETDATA.themes.current.background : NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance)));
+ let chartRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemin', undefined);
+ let chartRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemax', undefined);
+ let composite = NETDATA.dataAttribute(state.element, 'sparkline-composite', undefined);
+ let enableTagOptions = NETDATA.dataAttribute(state.element, 'sparkline-enabletagoptions', undefined);
+ let tagOptionPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tagoptionprefix', undefined);
+ let tagValuesAttribute = NETDATA.dataAttribute(state.element, 'sparkline-tagvaluesattribute', undefined);
+ let disableHiddenCheck = NETDATA.dataAttribute(state.element, 'sparkline-disablehiddencheck', undefined);
+ let defaultPixelsPerValue = NETDATA.dataAttribute(state.element, 'sparkline-defaultpixelspervalue', undefined);
+ let spotColor = NETDATA.dataAttribute(state.element, 'sparkline-spotcolor', undefined);
+ let minSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-minspotcolor', undefined);
+ let maxSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-maxspotcolor', undefined);
+ let spotRadius = NETDATA.dataAttribute(state.element, 'sparkline-spotradius', undefined);
+ let valueSpots = NETDATA.dataAttribute(state.element, 'sparkline-valuespots', undefined);
+ let highlightSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightspotcolor', undefined);
+ let highlightLineColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightlinecolor', undefined);
+ let lineWidth = NETDATA.dataAttribute(state.element, 'sparkline-linewidth', undefined);
+ let normalRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemin', undefined);
+ let normalRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemax', undefined);
+ let drawNormalOnTop = NETDATA.dataAttribute(state.element, 'sparkline-drawnormalontop', undefined);
+ let xvalues = NETDATA.dataAttribute(state.element, 'sparkline-xvalues', undefined);
+ let chartRangeClip = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeclip', undefined);
+ let chartRangeMinX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeminx', undefined);
+ let chartRangeMaxX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemaxx', undefined);
+ let disableInteraction = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disableinteraction', false);
+ let disableTooltips = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disabletooltips', false);
+ let disableHighlight = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disablehighlight', false);
+ let highlightLighten = NETDATA.dataAttribute(state.element, 'sparkline-highlightlighten', 1.4);
+ let highlightColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightcolor', undefined);
+ let tooltipContainer = NETDATA.dataAttribute(state.element, 'sparkline-tooltipcontainer', undefined);
+ let tooltipClassname = NETDATA.dataAttribute(state.element, 'sparkline-tooltipclassname', undefined);
+ let tooltipFormat = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformat', undefined);
+ let tooltipPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipprefix', undefined);
+ let tooltipSuffix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipsuffix', ' ' + state.units_current);
+ let tooltipSkipNull = NETDATA.dataAttributeBoolean(state.element, 'sparkline-tooltipskipnull', true);
+ let tooltipValueLookups = NETDATA.dataAttribute(state.element, 'sparkline-tooltipvaluelookups', undefined);
+ let tooltipFormatFieldlist = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlist', undefined);
+ let tooltipFormatFieldlistKey = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlistkey', undefined);
+ let numberFormatter = NETDATA.dataAttribute(state.element, 'sparkline-numberformatter', function (n) {
+ return n.toFixed(2);
+ });
+ let numberDigitGroupSep = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupsep', undefined);
+ let numberDecimalMark = NETDATA.dataAttribute(state.element, 'sparkline-numberdecimalmark', undefined);
+ let numberDigitGroupCount = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupcount', undefined);
+ let animatedZooms = NETDATA.dataAttributeBoolean(state.element, 'sparkline-animatedzooms', false);
+
+ if (spotColor === 'disable') {
+ spotColor = '';
+ }
+ if (minSpotColor === 'disable') {
+ minSpotColor = '';
+ }
+ if (maxSpotColor === 'disable') {
+ maxSpotColor = '';
+ }
+
+ // state.log('sparkline type ' + type + ', lineColor: ' + lineColor + ', fillColor: ' + fillColor);
+
+ state.sparkline_options = {
+ type: type,
+ lineColor: lineColor,
+ fillColor: fillColor,
+ chartRangeMin: chartRangeMin,
+ chartRangeMax: chartRangeMax,
+ composite: composite,
+ enableTagOptions: enableTagOptions,
+ tagOptionPrefix: tagOptionPrefix,
+ tagValuesAttribute: tagValuesAttribute,
+ disableHiddenCheck: disableHiddenCheck,
+ defaultPixelsPerValue: defaultPixelsPerValue,
+ spotColor: spotColor,
+ minSpotColor: minSpotColor,
+ maxSpotColor: maxSpotColor,
+ spotRadius: spotRadius,
+ valueSpots: valueSpots,
+ highlightSpotColor: highlightSpotColor,
+ highlightLineColor: highlightLineColor,
+ lineWidth: lineWidth,
+ normalRangeMin: normalRangeMin,
+ normalRangeMax: normalRangeMax,
+ drawNormalOnTop: drawNormalOnTop,
+ xvalues: xvalues,
+ chartRangeClip: chartRangeClip,
+ chartRangeMinX: chartRangeMinX,
+ chartRangeMaxX: chartRangeMaxX,
+ disableInteraction: disableInteraction,
+ disableTooltips: disableTooltips,
+ disableHighlight: disableHighlight,
+ highlightLighten: highlightLighten,
+ highlightColor: highlightColor,
+ tooltipContainer: tooltipContainer,
+ tooltipClassname: tooltipClassname,
+ tooltipChartTitle: state.title,
+ tooltipFormat: tooltipFormat,
+ tooltipPrefix: tooltipPrefix,
+ tooltipSuffix: tooltipSuffix,
+ tooltipSkipNull: tooltipSkipNull,
+ tooltipValueLookups: tooltipValueLookups,
+ tooltipFormatFieldlist: tooltipFormatFieldlist,
+ tooltipFormatFieldlistKey: tooltipFormatFieldlistKey,
+ numberFormatter: numberFormatter,
+ numberDigitGroupSep: numberDigitGroupSep,
+ numberDecimalMark: numberDecimalMark,
+ numberDigitGroupCount: numberDigitGroupCount,
+ animatedZooms: animatedZooms,
+ width: state.chartWidth(),
+ height: state.chartHeight()
+ };
+
+ $(state.element_chart).sparkline(data.result, state.sparkline_options);
+
+ return true;
+};
diff --git a/web/gui/src/dashboard.js/charting/textonly.js b/web/gui/src/dashboard.js/charting/textonly.js
new file mode 100644
index 0000000..e7b9aa9
--- /dev/null
+++ b/web/gui/src/dashboard.js/charting/textonly.js
@@ -0,0 +1,18 @@
+
+// ----------------------------------------------------------------------------------------------------------------
+// "Text-only" chart - Just renders the raw value to the DOM
+
+NETDATA.textOnlyCreate = function(state, data) {
+ var decimalPlaces = NETDATA.dataAttribute(state.element, 'textonly-decimal-places', 1);
+ var prefix = NETDATA.dataAttribute(state.element, 'textonly-prefix', '');
+ var suffix = NETDATA.dataAttribute(state.element, 'textonly-suffix', '');
+
+ // Round based on number of decimal places to show
+ var precision = Math.pow(10, decimalPlaces);
+ var value = Math.round(data.result[0] * precision) / precision;
+
+ state.element.textContent = prefix + value + suffix;
+ return true;
+}
+
+NETDATA.textOnlyUpdate = NETDATA.textOnlyCreate; \ No newline at end of file
diff --git a/web/gui/src/dashboard.js/colors.js b/web/gui/src/dashboard.js/colors.js
new file mode 100644
index 0000000..4b98c01
--- /dev/null
+++ b/web/gui/src/dashboard.js/colors.js
@@ -0,0 +1,34 @@
+NETDATA.colorHex2Rgb = function (hex) {
+ // Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF")
+ let shorthandRegex = /^#?([a-f\d])([a-f\d])([a-f\d])$/i;
+ hex = hex.replace(shorthandRegex, function (m, r, g, b) {
+ return r + r + g + g + b + b;
+ });
+
+ let result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
+ return result ? {
+ r: parseInt(result[1], 16),
+ g: parseInt(result[2], 16),
+ b: parseInt(result[3], 16)
+ } : null;
+};
+
+NETDATA.colorLuminance = function (hex, lum) {
+ // validate hex string
+ hex = String(hex).replace(/[^0-9a-f]/gi, '');
+ if (hex.length < 6) {
+ hex = hex[0] + hex[0] + hex[1] + hex[1] + hex[2] + hex[2];
+ }
+
+ lum = lum || 0;
+
+ // convert to decimal and change luminosity
+ let rgb = "#";
+ for (let i = 0; i < 3; i++) {
+ let c = parseInt(hex.substr(i * 2, 2), 16);
+ c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16);
+ rgb += ("00" + c).substr(c.length);
+ }
+
+ return rgb;
+};
diff --git a/web/gui/src/dashboard.js/common.js b/web/gui/src/dashboard.js/common.js
new file mode 100644
index 0000000..4a97bab
--- /dev/null
+++ b/web/gui/src/dashboard.js/common.js
@@ -0,0 +1,249 @@
+
+// Compute common (joint) values over multiple charts.
+
+
+// commonMin & commonMax
+
+NETDATA.commonMin = {
+ keys: {},
+ latest: {},
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ get: function (state) {
+ if (typeof state.tmp.__commonMin === 'undefined') {
+ // get the commonMin setting
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
+ }
+
+ let min = state.data.min;
+ let name = state.tmp.__commonMin;
+
+ if (name === null) {
+ // we don't need commonMin
+ //state.log('no need for commonMin');
+ return min;
+ }
+
+ let t = this.keys[name];
+ if (typeof t === 'undefined') {
+ // add our commonMin
+ this.keys[name] = {};
+ t = this.keys[name];
+ }
+
+ let uuid = state.uuid;
+ if (typeof t[uuid] !== 'undefined') {
+ if (t[uuid] === min) {
+ //state.log('commonMin ' + state.tmp.__commonMin + ' not changed: ' + this.latest[name]);
+ return this.latest[name];
+ } else if (min < this.latest[name]) {
+ //state.log('commonMin ' + state.tmp.__commonMin + ' increased: ' + min);
+ t[uuid] = min;
+ this.latest[name] = min;
+ return min;
+ }
+ }
+
+ // add our min
+ t[uuid] = min;
+
+ // find the common min
+ let m = min;
+ // for (let i in t) {
+ // if (t.hasOwnProperty(i) && t[i] < m) m = t[i];
+ // }
+ for (var ti of Object.values(t)) {
+ if (ti < m) {
+ m = ti;
+ }
+ }
+
+ //state.log('commonMin ' + state.tmp.__commonMin + ' updated: ' + m);
+ this.latest[name] = m;
+ return m;
+ }
+};
+
+NETDATA.commonMax = {
+ keys: {},
+ latest: {},
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ get: function (state) {
+ if (typeof state.tmp.__commonMax === 'undefined') {
+ // get the commonMax setting
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
+ }
+
+ let max = state.data.max;
+ let name = state.tmp.__commonMax;
+
+ if (name === null) {
+ // we don't need commonMax
+ //state.log('no need for commonMax');
+ return max;
+ }
+
+ let t = this.keys[name];
+ if (typeof t === 'undefined') {
+ // add our commonMax
+ this.keys[name] = {};
+ t = this.keys[name];
+ }
+
+ let uuid = state.uuid;
+ if (typeof t[uuid] !== 'undefined') {
+ if (t[uuid] === max) {
+ //state.log('commonMax ' + state.tmp.__commonMax + ' not changed: ' + this.latest[name]);
+ return this.latest[name];
+ } else if (max > this.latest[name]) {
+ //state.log('commonMax ' + state.tmp.__commonMax + ' increased: ' + max);
+ t[uuid] = max;
+ this.latest[name] = max;
+ return max;
+ }
+ }
+
+ // add our max
+ t[uuid] = max;
+
+ // find the common max
+ let m = max;
+ // for (let i in t) {
+ // if (t.hasOwnProperty(i) && t[i] > m) m = t[i];
+ // }
+ for (var ti of Object.values(t)) {
+ if (ti > m) {
+ m = ti;
+ }
+ }
+
+ //state.log('commonMax ' + state.tmp.__commonMax + ' updated: ' + m);
+ this.latest[name] = m;
+ return m;
+ }
+};
+
+NETDATA.commonColors = {
+ keys: {},
+
+ globalReset: function () {
+ this.keys = {};
+ },
+
+ get: function (state, label) {
+ let ret = this.refill(state);
+
+ if (typeof ret.assigned[label] === 'undefined') {
+ ret.assigned[label] = ret.available.shift();
+ }
+
+ return ret.assigned[label];
+ },
+
+ refill: function (state) {
+ let ret, len;
+
+ if (typeof state.tmp.__commonColors === 'undefined') {
+ ret = this.prepare(state);
+ } else {
+ ret = this.keys[state.tmp.__commonColors];
+ if (typeof ret === 'undefined') {
+ ret = this.prepare(state);
+ }
+ }
+
+ if (ret.available.length === 0) {
+ if (ret.copy_theme || ret.custom.length === 0) {
+ // copy the theme colors
+ len = NETDATA.themes.current.colors.length;
+ while (len--) {
+ ret.available.unshift(NETDATA.themes.current.colors[len]);
+ }
+ }
+
+ // copy the custom colors
+ len = ret.custom.length;
+ while (len--) {
+ ret.available.unshift(ret.custom[len]);
+ }
+ }
+
+ state.colors_assigned = ret.assigned;
+ state.colors_available = ret.available;
+ state.colors_custom = ret.custom;
+
+ return ret;
+ },
+
+ __read_custom_colors: function (state, ret) {
+ // add the user supplied colors
+ let c = NETDATA.dataAttribute(state.element, 'colors', undefined);
+ if (typeof c === 'string' && c.length > 0) {
+ c = c.split(' ');
+ let len = c.length;
+
+ if (len > 0 && c[len - 1] === 'ONLY') {
+ len--;
+ ret.copy_theme = false;
+ }
+
+ while (len--) {
+ ret.custom.unshift(c[len]);
+ }
+ }
+ },
+
+ prepare: function (state) {
+ let has_custom_colors = false;
+
+ if (typeof state.tmp.__commonColors === 'undefined') {
+ let defname = state.chart.context;
+
+ // if this chart has data-colors=""
+ // we should use the chart uuid as the default key (private palette)
+ // (data-common-colors="NAME" will be used anyways)
+ let c = NETDATA.dataAttribute(state.element, 'colors', undefined);
+ if (typeof c === 'string' && c.length > 0) {
+ defname = state.uuid;
+ has_custom_colors = true;
+ }
+
+ // get the commonColors setting
+ state.tmp.__commonColors = NETDATA.dataAttribute(state.element, 'common-colors', defname);
+ }
+
+ let name = state.tmp.__commonColors;
+ let ret = this.keys[name];
+
+ if (typeof ret === 'undefined') {
+ // add our commonMax
+ this.keys[name] = {
+ assigned: {}, // name-value of dimensions and their colors
+ available: [], // an array of colors available to be used
+ custom: [], // the array of colors defined by the user
+ charts: {}, // the charts linked to this
+ copy_theme: true
+ };
+ ret = this.keys[name];
+ }
+
+ if (typeof ret.charts[state.uuid] === 'undefined') {
+ ret.charts[state.uuid] = state;
+
+ if (has_custom_colors) {
+ this.__read_custom_colors(state, ret);
+ }
+ }
+
+ return ret;
+ }
+};
diff --git a/web/gui/src/dashboard.js/compatibility.js b/web/gui/src/dashboard.js/compatibility.js
new file mode 100644
index 0000000..e1ecfbd
--- /dev/null
+++ b/web/gui/src/dashboard.js/compatibility.js
@@ -0,0 +1,31 @@
+// *** src/dashboard.js/compatibility.js
+
+// Compatibility fixes.
+
+// fix IE issue with console
+if (!window.console) {
+ window.console = {
+ log: function () {
+ }
+ };
+}
+
+// if string.endsWith is not defined, define it
+if (typeof String.prototype.endsWith !== 'function') {
+ String.prototype.endsWith = function (s) {
+ if (s.length > this.length) {
+ return false;
+ }
+ return this.slice(-s.length) === s;
+ };
+}
+
+// if string.startsWith is not defined, define it
+if (typeof String.prototype.startsWith !== 'function') {
+ String.prototype.startsWith = function (s) {
+ if (s.length > this.length) {
+ return false;
+ }
+ return this.slice(s.length) === s;
+ };
+}
diff --git a/web/gui/src/dashboard.js/dependencies.js b/web/gui/src/dashboard.js/dependencies.js
new file mode 100644
index 0000000..fff7818
--- /dev/null
+++ b/web/gui/src/dashboard.js/dependencies.js
@@ -0,0 +1,21 @@
+
+// *** src/dashboard.js/dependencies.js
+
+// default URLs for all the external files we need
+// make them RELATIVE so that the whole thing can also be
+// installed under a web server
+NETDATA.jQuery = NETDATA.serverStatic + 'lib/jquery-2.2.4.min.js';
+NETDATA.peity_js = NETDATA.serverStatic + 'lib/jquery.peity-3.2.0.min.js';
+NETDATA.sparkline_js = NETDATA.serverStatic + 'lib/jquery.sparkline-2.1.2.min.js';
+NETDATA.easypiechart_js = NETDATA.serverStatic + 'lib/jquery.easypiechart-97b5824.min.js';
+NETDATA.gauge_js = NETDATA.serverStatic + 'lib/gauge-1.3.2.min.js';
+NETDATA.dygraph_js = NETDATA.serverStatic + 'lib/dygraph-c91c859.min.js';
+NETDATA.dygraph_smooth_js = NETDATA.serverStatic + 'lib/dygraph-smooth-plotter-c91c859.js';
+// NETDATA.raphael_js = NETDATA.serverStatic + 'lib/raphael-2.2.4-min.js';
+// NETDATA.c3_js = NETDATA.serverStatic + 'lib/c3-0.4.18.min.js';
+// NETDATA.c3_css = NETDATA.serverStatic + 'css/c3-0.4.18.min.css';
+NETDATA.d3pie_js = NETDATA.serverStatic + 'lib/d3pie-0.2.1-netdata-3.js';
+NETDATA.d3_js = NETDATA.serverStatic + 'lib/d3-4.12.2.min.js';
+// NETDATA.morris_js = NETDATA.serverStatic + 'lib/morris-0.5.1.min.js';
+// NETDATA.morris_css = NETDATA.serverStatic + 'css/morris-0.5.1.css';
+NETDATA.google_js = 'https://www.google.com/jsapi';
diff --git a/web/gui/src/dashboard.js/epilogue.js.inc b/web/gui/src/dashboard.js/epilogue.js.inc
new file mode 100644
index 0000000..c612988
--- /dev/null
+++ b/web/gui/src/dashboard.js/epilogue.js.inc
@@ -0,0 +1 @@
+})(window, document, (typeof jQuery === 'function')?jQuery:undefined);
diff --git a/web/gui/src/dashboard.js/error-handling.js b/web/gui/src/dashboard.js/error-handling.js
new file mode 100644
index 0000000..abc7c61
--- /dev/null
+++ b/web/gui/src/dashboard.js/error-handling.js
@@ -0,0 +1,52 @@
+// Error Handling
+
+NETDATA.errorCodes = {
+ 100: {message: "Cannot load chart library", alert: true},
+ 101: {message: "Cannot load jQuery", alert: true},
+ 402: {message: "Chart library not found", alert: false},
+ 403: {message: "Chart library not enabled/is failed", alert: false},
+ 404: {message: "Chart not found", alert: false},
+ 405: {message: "Cannot download charts index from server", alert: true},
+ 406: {message: "Invalid charts index downloaded from server", alert: true},
+ 407: {message: "Cannot HELLO netdata server", alert: false},
+ 408: {message: "Netdata servers sent invalid response to HELLO", alert: false},
+ 409: {message: "Cannot ACCESS netdata registry", alert: false},
+ 410: {message: "Netdata registry ACCESS failed", alert: false},
+ 411: {message: "Netdata registry server send invalid response to DELETE ", alert: false},
+ 412: {message: "Netdata registry DELETE failed", alert: false},
+ 413: {message: "Netdata registry server send invalid response to SWITCH ", alert: false},
+ 414: {message: "Netdata registry SWITCH failed", alert: false},
+ 415: {message: "Netdata alarms download failed", alert: false},
+ 416: {message: "Netdata alarms log download failed", alert: false},
+ 417: {message: "Netdata registry server send invalid response to SEARCH ", alert: false},
+ 418: {message: "Netdata registry SEARCH failed", alert: false}
+};
+
+NETDATA.errorLast = {
+ code: 0,
+ message: "",
+ datetime: 0
+};
+
+NETDATA.error = function (code, msg) {
+ NETDATA.errorLast.code = code;
+ NETDATA.errorLast.message = msg;
+ NETDATA.errorLast.datetime = Date.now();
+
+ console.log("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg);
+
+ let ret = true;
+ if (typeof netdataErrorCallback === 'function') {
+ ret = netdataErrorCallback('system', code, msg);
+ }
+
+ if (ret && NETDATA.errorCodes[code].alert) {
+ alert("ERROR " + code + ": " + NETDATA.errorCodes[code].message + ": " + msg);
+ }
+};
+
+NETDATA.errorReset = function () {
+ NETDATA.errorLast.code = 0;
+ NETDATA.errorLast.message = "You are doing fine!";
+ NETDATA.errorLast.datetime = 0;
+};
diff --git a/web/gui/src/dashboard.js/localstorage.js b/web/gui/src/dashboard.js/localstorage.js
new file mode 100644
index 0000000..5bbf5a2
--- /dev/null
+++ b/web/gui/src/dashboard.js/localstorage.js
@@ -0,0 +1,173 @@
+
+// local storage options
+
+NETDATA.localStorage = {
+ default: {},
+ current: {},
+ callback: {} // only used for resetting back to defaults
+};
+
+NETDATA.localStorageTested = -1;
+NETDATA.localStorageTest = function () {
+ if (NETDATA.localStorageTested !== -1) {
+ return NETDATA.localStorageTested;
+ }
+
+ if (typeof Storage !== "undefined" && typeof localStorage === 'object') {
+ let test = 'test';
+ try {
+ localStorage.setItem(test, test);
+ localStorage.removeItem(test);
+ NETDATA.localStorageTested = true;
+ } catch (e) {
+ NETDATA.localStorageTested = false;
+ }
+ } else {
+ NETDATA.localStorageTested = false;
+ }
+
+ return NETDATA.localStorageTested;
+};
+
+NETDATA.localStorageGet = function (key, def, callback) {
+ let ret = def;
+
+ if (typeof NETDATA.localStorage.default[key.toString()] === 'undefined') {
+ NETDATA.localStorage.default[key.toString()] = def;
+ NETDATA.localStorage.callback[key.toString()] = callback;
+ }
+
+ if (NETDATA.localStorageTest()) {
+ try {
+ // console.log('localStorage: loading "' + key.toString() + '"');
+ ret = localStorage.getItem(key.toString());
+ // console.log('netdata loaded: ' + key.toString() + ' = ' + ret.toString());
+ if (ret === null || ret === 'undefined') {
+ // console.log('localStorage: cannot load it, saving "' + key.toString() + '" with value "' + JSON.stringify(def) + '"');
+ localStorage.setItem(key.toString(), JSON.stringify(def));
+ ret = def;
+ } else {
+ // console.log('localStorage: got "' + key.toString() + '" with value "' + ret + '"');
+ ret = JSON.parse(ret);
+ // console.log('localStorage: loaded "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret));
+ }
+ } catch (error) {
+ console.log('localStorage: failed to read "' + key.toString() + '", using default: "' + def.toString() + '"');
+ ret = def;
+ }
+ }
+
+ if (typeof ret === 'undefined' || ret === 'undefined') {
+ console.log('localStorage: LOADED UNDEFINED "' + key.toString() + '" as value ' + ret + ' of type ' + typeof(ret));
+ ret = def;
+ }
+
+ NETDATA.localStorage.current[key.toString()] = ret;
+ return ret;
+};
+
+NETDATA.localStorageSet = function (key, value, callback) {
+ if (typeof value === 'undefined' || value === 'undefined') {
+ console.log('localStorage: ATTEMPT TO SET UNDEFINED "' + key.toString() + '" as value ' + value + ' of type ' + typeof(value));
+ }
+
+ if (typeof NETDATA.localStorage.default[key.toString()] === 'undefined') {
+ NETDATA.localStorage.default[key.toString()] = value;
+ NETDATA.localStorage.current[key.toString()] = value;
+ NETDATA.localStorage.callback[key.toString()] = callback;
+ }
+
+ if (NETDATA.localStorageTest()) {
+ // console.log('localStorage: saving "' + key.toString() + '" with value "' + JSON.stringify(value) + '"');
+ try {
+ localStorage.setItem(key.toString(), JSON.stringify(value));
+ } catch (e) {
+ console.log('localStorage: failed to save "' + key.toString() + '" with value: "' + value.toString() + '"');
+ }
+ }
+
+ NETDATA.localStorage.current[key.toString()] = value;
+ return value;
+};
+
+NETDATA.localStorageGetRecursive = function (obj, prefix, callback) {
+ let keys = Object.keys(obj);
+ let len = keys.length;
+ while (len--) {
+ let i = keys[len];
+
+ if (typeof obj[i] === 'object') {
+ //console.log('object ' + prefix + '.' + i.toString());
+ NETDATA.localStorageGetRecursive(obj[i], prefix + '.' + i.toString(), callback);
+ continue;
+ }
+
+ obj[i] = NETDATA.localStorageGet(prefix + '.' + i.toString(), obj[i], callback);
+ }
+};
+
+NETDATA.setOption = function (key, value) {
+ if (key.toString() === 'setOptionCallback') {
+ if (typeof NETDATA.options.current.setOptionCallback === 'function') {
+ NETDATA.options.current[key.toString()] = value;
+ NETDATA.options.current.setOptionCallback();
+ }
+ } else if (NETDATA.options.current[key.toString()] !== value) {
+ let name = 'options.' + key.toString();
+
+ if (typeof NETDATA.localStorage.default[name.toString()] === 'undefined') {
+ console.log('localStorage: setOption() on unsaved option: "' + name.toString() + '", value: ' + value);
+ }
+
+ //console.log(NETDATA.localStorage);
+ //console.log('setOption: setting "' + key.toString() + '" to "' + value + '" of type ' + typeof(value) + ' original type ' + typeof(NETDATA.options.current[key.toString()]));
+ //console.log(NETDATA.options);
+ NETDATA.options.current[key.toString()] = NETDATA.localStorageSet(name.toString(), value, null);
+
+ if (typeof NETDATA.options.current.setOptionCallback === 'function') {
+ NETDATA.options.current.setOptionCallback();
+ }
+ }
+
+ return true;
+};
+
+NETDATA.getOption = function (key) {
+ return NETDATA.options.current[key.toString()];
+};
+
+// read settings from local storage
+NETDATA.localStorageGetRecursive(NETDATA.options.current, 'options', null);
+
+// always start with this option enabled.
+NETDATA.setOption('stop_updates_when_focus_is_lost', true);
+
+NETDATA.resetOptions = function () {
+ let keys = Object.keys(NETDATA.localStorage.default);
+ let len = keys.length;
+
+ while (len--) {
+ let i = keys[len];
+ let a = i.split('.');
+
+ if (a[0] === 'options') {
+ if (a[1] === 'setOptionCallback') {
+ continue;
+ }
+ if (typeof NETDATA.localStorage.default[i] === 'undefined') {
+ continue;
+ }
+ if (NETDATA.options.current[i] === NETDATA.localStorage.default[i]) {
+ continue;
+ }
+
+ NETDATA.setOption(a[1], NETDATA.localStorage.default[i]);
+ } else if (a[0] === 'chart_heights') {
+ if (typeof NETDATA.localStorage.callback[i] === 'function' && typeof NETDATA.localStorage.default[i] !== 'undefined') {
+ NETDATA.localStorage.callback[i](NETDATA.localStorage.default[i]);
+ }
+ }
+ }
+
+ NETDATA.dateTime.init(NETDATA.options.current.timezone);
+};
diff --git a/web/gui/src/dashboard.js/main.js b/web/gui/src/dashboard.js/main.js
new file mode 100644
index 0000000..c1cfba5
--- /dev/null
+++ b/web/gui/src/dashboard.js/main.js
@@ -0,0 +1,4279 @@
+
+// *** src/dashboard.js/main.js
+
+// Codacy declarations
+/* global clipboard */
+/* global Ps */
+
+if (NETDATA.options.debug.main_loop) {
+ console.log('welcome to NETDATA');
+}
+
+NETDATA.onresizeCallback = null;
+NETDATA.onresize = function () {
+ NETDATA.options.last_page_resize = Date.now();
+ NETDATA.onscroll();
+
+ if (typeof NETDATA.onresizeCallback === 'function') {
+ NETDATA.onresizeCallback();
+ }
+};
+
+NETDATA.abortAllRefreshes = function () {
+ let targets = NETDATA.options.targets;
+ let len = targets.length;
+
+ while (len--) {
+ if (targets[len].fetching_data) {
+ if (typeof targets[len].xhr !== 'undefined') {
+ targets[len].xhr.abort();
+ targets[len].running = false;
+ targets[len].fetching_data = false;
+ }
+ }
+ }
+};
+
+NETDATA.onscrollStartDelay = function () {
+ NETDATA.options.last_page_scroll = Date.now();
+
+ NETDATA.options.on_scroll_refresher_stop_until =
+ NETDATA.options.last_page_scroll
+ + (NETDATA.options.current.async_on_scroll ? 1000 : 0);
+};
+
+NETDATA.onscrollEndDelay = function () {
+ NETDATA.options.on_scroll_refresher_stop_until =
+ Date.now()
+ + (NETDATA.options.current.async_on_scroll ? NETDATA.options.current.onscroll_worker_duration_threshold : 0);
+};
+
+NETDATA.onscroll_updater_timeout_id = undefined;
+NETDATA.onscrollUpdater = function () {
+ NETDATA.globalSelectionSync.stop();
+
+ if (NETDATA.options.abort_ajax_on_scroll) {
+ NETDATA.abortAllRefreshes();
+ }
+
+ // when the user scrolls he sees that we have
+ // hidden all the not-visible charts
+ // using this little function we try to switch
+ // the charts back to visible quickly
+
+ if (!NETDATA.intersectionObserver.enabled()) {
+ if (!NETDATA.options.current.parallel_refresher) {
+ let targets = NETDATA.options.targets;
+ let len = targets.length;
+
+ while (len--) {
+ if (!targets[len].running) {
+ targets[len].isVisible();
+ }
+ }
+ }
+ }
+
+ NETDATA.onscrollEndDelay();
+};
+
+NETDATA.scrollUp = false;
+NETDATA.scrollY = window.scrollY;
+NETDATA.onscroll = function () {
+ //console.log('onscroll() begin');
+
+ NETDATA.onscrollStartDelay();
+ NETDATA.chartRefresherReschedule();
+
+ NETDATA.scrollUp = (window.scrollY > NETDATA.scrollY);
+ NETDATA.scrollY = window.scrollY;
+
+ if (NETDATA.onscroll_updater_timeout_id) {
+ NETDATA.timeout.clear(NETDATA.onscroll_updater_timeout_id);
+ }
+
+ NETDATA.onscroll_updater_timeout_id = NETDATA.timeout.set(NETDATA.onscrollUpdater, 0);
+ //console.log('onscroll() end');
+};
+
+NETDATA.supportsPassiveEvents = function () {
+ if (NETDATA.options.passive_events === null) {
+ let supportsPassive = false;
+ try {
+ let opts = Object.defineProperty({}, 'passive', {
+ get: function () {
+ supportsPassive = true;
+ }
+ });
+ window.addEventListener("test", null, opts);
+ } catch (e) {
+ console.log('browser does not support passive events');
+ }
+
+ NETDATA.options.passive_events = supportsPassive;
+ }
+
+ // console.log('passive ' + NETDATA.options.passive_events);
+ return NETDATA.options.passive_events;
+};
+
+window.addEventListener('resize', NETDATA.onresize, NETDATA.supportsPassiveEvents() ? {passive: true} : false);
+window.addEventListener('scroll', NETDATA.onscroll, NETDATA.supportsPassiveEvents() ? {passive: true} : false);
+// window.onresize = NETDATA.onresize;
+// window.onscroll = NETDATA.onscroll;
+
+// ----------------------------------------------------------------------------------------------------------------
+// Global Pan and Zoom on charts
+
+// Using this structure are synchronize all the charts, so that
+// when you pan or zoom one, all others are automatically refreshed
+// to the same timespan.
+
+NETDATA.globalPanAndZoom = {
+ seq: 0, // timestamp ms
+ // every time a chart is panned or zoomed
+ // we set the timestamp here
+ // then we use it as a sequence number
+ // to find if other charts are synchronized
+ // to this time-range
+
+ master: null, // the master chart (state), to which all others
+ // are synchronized
+
+ force_before_ms: null, // the timespan to sync all other charts
+ force_after_ms: null,
+
+ callback: null,
+
+ globalReset: function () {
+ this.clearMaster();
+ this.seq = 0;
+ this.master = null;
+ this.force_after_ms = null;
+ this.force_before_ms = null;
+ this.callback = null;
+ },
+
+ delay: function () {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.delay()');
+ }
+
+ NETDATA.options.auto_refresher_stop_until = Date.now() + NETDATA.options.current.global_pan_sync_time;
+ },
+
+ // set a new master
+ setMaster: function (state, after, before) {
+ this.delay();
+
+ if (!NETDATA.options.current.sync_pan_and_zoom) {
+ return;
+ }
+
+ if (this.master === null) {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') SET MASTER');
+ }
+ } else if (this.master !== state) {
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.setMaster(' + state.id + ', ' + after + ', ' + before + ') CHANGED MASTER');
+ }
+
+ this.master.resetChart(true, true);
+ }
+
+ let now = Date.now();
+ this.master = state;
+ this.seq = now;
+ this.force_after_ms = after;
+ this.force_before_ms = before;
+
+ if (typeof this.callback === 'function') {
+ this.callback(true, after, before);
+ }
+ },
+
+ // clear the master
+ clearMaster: function () {
+ // if (NETDATA.options.debug.globalPanAndZoom === true)
+ // console.log('globalPanAndZoom.clearMaster()');
+ if (NETDATA.options.debug.globalPanAndZoom) {
+ console.log('globalPanAndZoom.clearMaster()');
+ }
+
+ if (this.master !== null) {
+ let st = this.master;
+ this.master = null;
+ st.resetChart();
+ }
+
+ this.master = null;
+ this.seq = 0;
+ this.force_after_ms = null;
+ this.force_before_ms = null;
+ NETDATA.options.auto_refresher_stop_until = 0;
+
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ },
+
+ // is the given state the master of the global
+ // pan and zoom sync?
+ isMaster: function (state) {
+ return (this.master === state);
+ },
+
+ // are we currently have a global pan and zoom sync?
+ isActive: function () {
+ return (this.master !== null && this.force_before_ms !== null && this.force_after_ms !== null && this.seq !== 0);
+ },
+
+ // check if a chart, other than the master
+ // needs to be refreshed, due to the global pan and zoom
+ shouldBeAutoRefreshed: function (state) {
+ if (this.master === null || this.seq === 0) {
+ return false;
+ }
+
+ //if (state.needsRecreation())
+ // return true;
+
+ return (state.tm.pan_and_zoom_seq !== this.seq);
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// global chart underlay (time-frame highlighting)
+
+NETDATA.globalChartUnderlay = {
+ callback: null, // what to call when a highlighted range is setup
+ after: null, // highlight after this time
+ before: null, // highlight before this time
+ view_after: null, // the charts after_ms viewport when the highlight was setup
+ view_before: null, // the charts before_ms viewport, when the highlight was setup
+ state: null, // the chart the highlight was setup
+
+ isActive: function () {
+ return (this.after !== null && this.before !== null);
+ },
+
+ hasViewport: function () {
+ return (this.state !== null && this.view_after !== null && this.view_before !== null);
+ },
+
+ init: function (state, after, before, view_after, view_before) {
+ this.state = (typeof state !== 'undefined') ? state : null;
+ this.after = (typeof after !== 'undefined' && after !== null && after > 0) ? after : null;
+ this.before = (typeof before !== 'undefined' && before !== null && before > 0) ? before : null;
+ this.view_after = (typeof view_after !== 'undefined' && view_after !== null && view_after > 0) ? view_after : null;
+ this.view_before = (typeof view_before !== 'undefined' && view_before !== null && view_before > 0) ? view_before : null;
+ },
+
+ setup: function () {
+ if (this.isActive()) {
+ if (this.state === null) {
+ this.state = NETDATA.options.targets[0];
+ }
+
+ if (typeof this.callback === 'function') {
+ this.callback(true, this.after, this.before);
+ }
+ } else {
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ }
+ },
+
+ set: function (state, after, before, view_after, view_before) {
+ if (after > before) {
+ let t = after;
+ after = before;
+ before = t;
+ }
+
+ this.init(state, after, before, view_after, view_before);
+
+ // if (this.hasViewport() === true)
+ // NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before);
+ if (this.hasViewport()) {
+ NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before);
+ }
+
+ this.setup();
+ },
+
+ clear: function () {
+ this.after = null;
+ this.before = null;
+ this.state = null;
+ this.view_after = null;
+ this.view_before = null;
+
+ if (typeof this.callback === 'function') {
+ this.callback(false, 0, 0);
+ }
+ },
+
+ focus: function () {
+ if (this.isActive() && this.hasViewport()) {
+ if (this.state === null) {
+ this.state = NETDATA.options.targets[0];
+ }
+
+ if (NETDATA.globalPanAndZoom.isMaster(this.state)) {
+ NETDATA.globalPanAndZoom.clearMaster();
+ }
+
+ NETDATA.globalPanAndZoom.setMaster(this.state, this.view_after, this.view_before, true);
+ }
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// dimensions selection
+
+// TODO
+// move color assignment to dimensions, here
+
+let dimensionStatus = function (parent, label, name_div, value_div, color) {
+ this.enabled = false;
+ this.parent = parent;
+ this.label = label;
+ this.name_div = null;
+ this.value_div = null;
+ this.color = NETDATA.themes.current.foreground;
+ this.selected = (parent.unselected_count === 0);
+
+ this.setOptions(name_div, value_div, color);
+};
+
+dimensionStatus.prototype.invalidate = function () {
+ this.name_div = null;
+ this.value_div = null;
+ this.enabled = false;
+};
+
+dimensionStatus.prototype.setOptions = function (name_div, value_div, color) {
+ this.color = color;
+
+ if (this.name_div !== name_div) {
+ this.name_div = name_div;
+ this.name_div.title = this.label;
+ this.name_div.style.setProperty('color', this.color, 'important');
+ if (!this.selected) {
+ this.name_div.className = 'netdata-legend-name not-selected';
+ } else {
+ this.name_div.className = 'netdata-legend-name selected';
+ }
+ }
+
+ if (this.value_div !== value_div) {
+ this.value_div = value_div;
+ this.value_div.title = this.label;
+ this.value_div.style.setProperty('color', this.color, 'important');
+ if (!this.selected) {
+ this.value_div.className = 'netdata-legend-value not-selected';
+ } else {
+ this.value_div.className = 'netdata-legend-value selected';
+ }
+ }
+
+ this.enabled = true;
+ this.setHandler();
+};
+
+dimensionStatus.prototype.setHandler = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ let ds = this;
+
+ // this.name_div.onmousedown = this.value_div.onmousedown = function(e) {
+ this.name_div.onclick = this.value_div.onclick = function (e) {
+ e.preventDefault();
+ if (ds.isSelected()) {
+ // this is selected
+ if (e.shiftKey || e.ctrlKey) {
+ // control or shift key is pressed -> unselect this (except is none will remain selected, in which case select all)
+ ds.unselect();
+
+ if (ds.parent.countSelected() === 0) {
+ ds.parent.selectAll();
+ }
+ } else {
+ // no key is pressed -> select only this (except if it is the only selected already, in which case select all)
+ if (ds.parent.countSelected() === 1) {
+ ds.parent.selectAll();
+ } else {
+ ds.parent.selectNone();
+ ds.select();
+ }
+ }
+ }
+ else {
+ // this is not selected
+ if (e.shiftKey || e.ctrlKey) {
+ // control or shift key is pressed -> select this too
+ ds.select();
+ } else {
+ // no key is pressed -> select only this
+ ds.parent.selectNone();
+ ds.select();
+ }
+ }
+
+ ds.parent.state.redrawChart();
+ }
+};
+
+dimensionStatus.prototype.select = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ this.name_div.className = 'netdata-legend-name selected';
+ this.value_div.className = 'netdata-legend-value selected';
+ this.selected = true;
+};
+
+dimensionStatus.prototype.unselect = function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ this.name_div.className = 'netdata-legend-name not-selected';
+ this.value_div.className = 'netdata-legend-value hidden';
+ this.selected = false;
+};
+
+dimensionStatus.prototype.isSelected = function () {
+ // return(this.enabled === true && this.selected === true);
+ return this.enabled && this.selected;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+
+let dimensionsVisibility = function (state) {
+ this.state = state;
+ this.len = 0;
+ this.dimensions = {};
+ this.selected_count = 0;
+ this.unselected_count = 0;
+};
+
+dimensionsVisibility.prototype.dimensionAdd = function (label, name_div, value_div, color) {
+ if (typeof this.dimensions[label] === 'undefined') {
+ this.len++;
+ this.dimensions[label] = new dimensionStatus(this, label, name_div, value_div, color);
+ } else {
+ this.dimensions[label].setOptions(name_div, value_div, color);
+ }
+
+ return this.dimensions[label];
+};
+
+dimensionsVisibility.prototype.dimensionGet = function (label) {
+ return this.dimensions[label];
+};
+
+dimensionsVisibility.prototype.invalidateAll = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].invalidate();
+ }
+};
+
+dimensionsVisibility.prototype.selectAll = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].select();
+ }
+};
+
+dimensionsVisibility.prototype.countSelected = function () {
+ let selected = 0;
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ if (this.dimensions[keys[len]].isSelected()) {
+ selected++;
+ }
+ }
+
+ return selected;
+};
+
+dimensionsVisibility.prototype.selectNone = function () {
+ let keys = Object.keys(this.dimensions);
+ let len = keys.length;
+ while (len--) {
+ this.dimensions[keys[len]].unselect();
+ }
+};
+
+dimensionsVisibility.prototype.selected2BooleanArray = function (array) {
+ let ret = [];
+ this.selected_count = 0;
+ this.unselected_count = 0;
+
+ let len = array.length;
+ while (len--) {
+ let ds = this.dimensions[array[len]];
+ if (typeof ds === 'undefined') {
+ // console.log(array[i] + ' is not found');
+ ret.unshift(false);
+ } else if (ds.isSelected()) {
+ ret.unshift(true);
+ this.selected_count++;
+ } else {
+ ret.unshift(false);
+ this.unselected_count++;
+ }
+ }
+
+ if (this.selected_count === 0 && this.unselected_count !== 0) {
+ this.selectAll();
+ return this.selected2BooleanArray(array);
+ }
+
+ return ret;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// date/time conversion
+
+NETDATA.dateTime = {
+ using_timezone: false,
+
+ // these are the old netdata functions
+ // we fallback to these, if the new ones fail
+
+ localeDateStringNative: function (d) {
+ return d.toLocaleDateString();
+ },
+
+ localeTimeStringNative: function (d) {
+ return d.toLocaleTimeString();
+ },
+
+ xAxisTimeStringNative: function (d) {
+ return NETDATA.zeropad(d.getHours()) + ":"
+ + NETDATA.zeropad(d.getMinutes()) + ":"
+ + NETDATA.zeropad(d.getSeconds());
+ },
+
+ // initialize the new date/time conversion
+ // functions.
+ // if this fails, we fallback to the above
+ init: function (timezone) {
+ //console.log('init with timezone: ' + timezone);
+
+ // detect browser timezone
+ try {
+ NETDATA.options.browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
+ } catch (e) {
+ console.log('failed to detect browser timezone: ' + e.toString());
+ NETDATA.options.browser_timezone = 'cannot-detect-it';
+ }
+
+ let ret = false;
+
+ try {
+ let dateOptions = {
+ localeMatcher: 'best fit',
+ formatMatcher: 'best fit',
+ weekday: 'short',
+ year: 'numeric',
+ month: 'short',
+ day: '2-digit'
+ };
+
+ let timeOptions = {
+ localeMatcher: 'best fit',
+ hour12: false,
+ formatMatcher: 'best fit',
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit'
+ };
+
+ let xAxisOptions = {
+ localeMatcher: 'best fit',
+ hour12: false,
+ formatMatcher: 'best fit',
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit'
+ };
+
+ if (typeof timezone === 'string' && timezone !== '' && timezone !== 'default') {
+ dateOptions.timeZone = timezone;
+ timeOptions.timeZone = timezone;
+ timeOptions.timeZoneName = 'short';
+ xAxisOptions.timeZone = timezone;
+ this.using_timezone = true;
+ } else {
+ timezone = 'default';
+ this.using_timezone = false;
+ }
+
+ this.dateFormat = new Intl.DateTimeFormat(navigator.language, dateOptions);
+ this.timeFormat = new Intl.DateTimeFormat(navigator.language, timeOptions);
+ this.xAxisFormat = new Intl.DateTimeFormat(navigator.language, xAxisOptions);
+
+ this.localeDateString = function (d) {
+ return this.dateFormat.format(d);
+ };
+
+ this.localeTimeString = function (d) {
+ return this.timeFormat.format(d);
+ };
+
+ this.xAxisTimeString = function (d) {
+ return this.xAxisFormat.format(d);
+ };
+
+ //let d = new Date();
+ //let t = this.dateFormat.format(d) + ' ' + this.timeFormat.format(d) + ' ' + this.xAxisFormat.format(d);
+
+ ret = true;
+ } catch (e) {
+ console.log('Cannot setup Date/Time formatting: ' + e.toString());
+
+ timezone = 'default';
+ this.localeDateString = this.localeDateStringNative;
+ this.localeTimeString = this.localeTimeStringNative;
+ this.xAxisTimeString = this.xAxisTimeStringNative;
+ this.using_timezone = false;
+
+ ret = false;
+ }
+
+ // save it
+ //console.log('init setOption timezone: ' + timezone);
+ NETDATA.setOption('timezone', timezone);
+
+ return ret;
+ }
+};
+NETDATA.dateTime.init(NETDATA.options.current.timezone);
+
+// ----------------------------------------------------------------------------------------------------------------
+// global selection sync
+
+NETDATA.globalSelectionSync = {
+ state: null,
+ dontSyncBefore: 0,
+ last_t: 0,
+ slaves: [],
+ timeoutId: undefined,
+
+ globalReset: function () {
+ this.stop();
+ this.state = null;
+ this.dontSyncBefore = 0;
+ this.last_t = 0;
+ this.slaves = [];
+ this.timeoutId = undefined;
+ },
+
+ active: function () {
+ return (this.state !== null);
+ },
+
+ // return true if global selection sync can be enabled now
+ enabled: function () {
+ // console.log('enabled()');
+ // can we globally apply selection sync?
+ if (!NETDATA.options.current.sync_selection) {
+ return false;
+ }
+
+ return (this.dontSyncBefore <= Date.now());
+ },
+
+ // set the global selection sync master
+ setMaster: function (state) {
+ if (!this.enabled()) {
+ this.stop();
+ return;
+ }
+
+ if (this.state === state) {
+ return;
+ }
+
+ if (this.state !== null) {
+ this.stop();
+ }
+
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.setMaster(' + state.id + ')');
+ }
+
+ state.selected = true;
+ this.state = state;
+ this.last_t = 0;
+
+ // find all slaves
+ let targets = NETDATA.intersectionObserver.targets();
+ this.slaves = [];
+ let len = targets.length;
+ while (len--) {
+ let st = targets[len];
+ if (this.state !== st && st.globalSelectionSyncIsEligible()) {
+ this.slaves.push(st);
+ }
+ }
+
+ // this.delay(100);
+ },
+
+ // stop global selection sync
+ stop: function () {
+ if (this.state !== null) {
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.stop()');
+ }
+
+ let len = this.slaves.length;
+ while (len--) {
+ this.slaves[len].clearSelection();
+ }
+
+ this.state.clearSelection();
+
+ this.last_t = 0;
+ this.slaves = [];
+ this.state = null;
+ }
+ },
+
+ // delay global selection sync for some time
+ delay: function (ms) {
+ if (NETDATA.options.current.sync_selection) {
+ // if (NETDATA.options.debug.globalSelectionSync === true) {
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.delay()');
+ }
+
+ if (typeof ms === 'number') {
+ this.dontSyncBefore = Date.now() + ms;
+ } else {
+ this.dontSyncBefore = Date.now() + NETDATA.options.current.sync_selection_delay;
+ }
+ }
+ },
+
+ __syncSlaves: function () {
+ // if (NETDATA.globalSelectionSync.enabled() === true) {
+ if (NETDATA.globalSelectionSync.enabled()) {
+ // if (NETDATA.options.debug.globalSelectionSync === true)
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.__syncSlaves()');
+ }
+
+ let t = NETDATA.globalSelectionSync.last_t;
+ let len = NETDATA.globalSelectionSync.slaves.length;
+ while (len--) {
+ NETDATA.globalSelectionSync.slaves[len].setSelection(t);
+ }
+
+ this.timeoutId = undefined;
+ }
+ },
+
+ // sync all the visible charts to the given time
+ // this is to be called from the chart libraries
+ sync: function (state, t) {
+ // if (NETDATA.options.current.sync_selection === true) {
+ if (NETDATA.options.current.sync_selection) {
+ // if (NETDATA.options.debug.globalSelectionSync === true)
+ if (NETDATA.options.debug.globalSelectionSync) {
+ console.log('globalSelectionSync.sync(' + state.id + ', ' + t.toString() + ')');
+ }
+
+ this.setMaster(state);
+
+ if (t === this.last_t) {
+ return;
+ }
+
+ this.last_t = t;
+
+ if (state.foreignElementSelection !== null) {
+ state.foreignElementSelection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t);
+ }
+
+ if (this.timeoutId) {
+ NETDATA.timeout.clear(this.timeoutId);
+ }
+
+ this.timeoutId = NETDATA.timeout.set(this.__syncSlaves, 0);
+ }
+ }
+};
+
+NETDATA.intersectionObserver = {
+ observer: null,
+ visible_targets: [],
+
+ options: {
+ root: null,
+ rootMargin: "0px",
+ threshold: null
+ },
+
+ enabled: function () {
+ return this.observer !== null;
+ },
+
+ globalReset: function () {
+ if (this.observer !== null) {
+ this.visible_targets = [];
+ this.observer.disconnect();
+ this.init();
+ }
+ },
+
+ targets: function () {
+ if (this.enabled() && this.visible_targets.length > 0) {
+ return this.visible_targets;
+ } else {
+ return NETDATA.options.targets;
+ }
+ },
+
+ switchChartVisibility: function () {
+ let old = this.__visibilityRatioOld;
+
+ if (old !== this.__visibilityRatio) {
+ if (old === 0 && this.__visibilityRatio > 0) {
+ this.unhideChart();
+ } else if (old > 0 && this.__visibilityRatio === 0) {
+ this.hideChart();
+ }
+
+ this.__visibilityRatioOld = this.__visibilityRatio;
+ }
+ },
+
+ handler: function (entries, observer) {
+ entries.forEach(function (entry) {
+ let state = NETDATA.chartState(entry.target);
+
+ let idx;
+ if (entry.intersectionRatio > 0) {
+ idx = NETDATA.intersectionObserver.visible_targets.indexOf(state);
+ if (idx === -1) {
+ if (NETDATA.scrollUp) {
+ NETDATA.intersectionObserver.visible_targets.push(state);
+ } else {
+ NETDATA.intersectionObserver.visible_targets.unshift(state);
+ }
+ }
+ else if (state.__visibilityRatio === 0) {
+ state.log("was not visible until now, but was already in visible_targets");
+ }
+ } else {
+ idx = NETDATA.intersectionObserver.visible_targets.indexOf(state);
+ if (idx !== -1) {
+ NETDATA.intersectionObserver.visible_targets.splice(idx, 1);
+ } else if (state.__visibilityRatio > 0) {
+ state.log("was visible, but not found in visible_targets");
+ }
+ }
+
+ state.__visibilityRatio = entry.intersectionRatio;
+
+ if (!NETDATA.options.current.async_on_scroll) {
+ if (window.requestIdleCallback) {
+ window.requestIdleCallback(function () {
+ NETDATA.intersectionObserver.switchChartVisibility.call(state);
+ }, {timeout: 100});
+ } else {
+ NETDATA.intersectionObserver.switchChartVisibility.call(state);
+ }
+ }
+ });
+ },
+
+ observe: function (state) {
+ if (this.enabled()) {
+ state.__visibilityRatioOld = 0;
+ state.__visibilityRatio = 0;
+ this.observer.observe(state.element);
+
+ state.isVisible = function () {
+ if (!NETDATA.options.current.update_only_visible) {
+ return true;
+ }
+
+ NETDATA.intersectionObserver.switchChartVisibility.call(this);
+
+ return this.__visibilityRatio > 0;
+ }
+ }
+ },
+
+ init: function () {
+ if (typeof netdataIntersectionObserver === 'undefined' || netdataIntersectionObserver) {
+ try {
+ this.observer = new IntersectionObserver(this.handler, this.options);
+ } catch (e) {
+ console.log("IntersectionObserver is not supported on this browser");
+ this.observer = null;
+ }
+ }
+ //else {
+ // console.log("IntersectionObserver is disabled");
+ //}
+ }
+};
+NETDATA.intersectionObserver.init();
+
+// ----------------------------------------------------------------------------------------------------------------
+// Our state object, where all per-chart values are stored
+
+let chartState = function (element) {
+ this.element = element;
+
+ // IMPORTANT:
+ // all private functions should use 'that', instead of 'this'
+ // Alternatively, you can use arrow functions (related issue #4514)
+ let that = this;
+
+ // ============================================================================================================
+ // ERROR HANDLING
+
+ /* error() - private
+ * show an error instead of the chart
+ */
+ let error = (msg) => {
+ let ret = true;
+
+ if (typeof netdataErrorCallback === 'function') {
+ ret = netdataErrorCallback('chart', this.id, msg);
+ }
+
+ if (ret) {
+ this.element.innerHTML = this.id + ': ' + msg;
+ this.enabled = false;
+ this.current = this.pan;
+ }
+ };
+
+ // console logging
+ this.log = function (msg) {
+ console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg);
+ };
+
+ this.debugLog = function (msg) {
+ if (this.debug) {
+ this.log(msg);
+ }
+ };
+
+ // ============================================================================================================
+ // EARLY INITIALIZATION
+
+ // These are variables that should exist even if the chart is never to be rendered.
+ // Be careful what you add here - there may be thousands of charts on the page.
+
+ // GUID - a unique identifier for the chart
+ this.uuid = NETDATA.guid();
+
+ // string - the name of chart
+ this.id = NETDATA.dataAttribute(this.element, 'netdata', undefined);
+ if (typeof this.id === 'undefined') {
+ error("netdata elements need data-netdata");
+ return;
+ }
+
+ // string - the key for localStorage settings
+ this.settings_id = NETDATA.dataAttribute(this.element, 'id', null);
+
+ // the user given dimensions of the element
+ this.width = NETDATA.dataAttribute(this.element, 'width', NETDATA.chartDefaults.width);
+ this.height = NETDATA.dataAttribute(this.element, 'height', NETDATA.chartDefaults.height);
+ this.height_original = this.height;
+
+ if (this.settings_id !== null) {
+ this.height = NETDATA.localStorageGet('chart_heights.' + this.settings_id, this.height, function (height) {
+ // this is the callback that will be called
+ // if and when the user resets all localStorage variables
+ // to their defaults
+
+ resizeChartToHeight(height);
+ });
+ }
+
+ // the chart library requested by the user
+ this.library_name = NETDATA.dataAttribute(this.element, 'chart-library', NETDATA.chartDefaults.library);
+
+ // check the requested library is available
+ // we don't initialize it here - it will be initialized when
+ // this chart will be first used
+ if (typeof NETDATA.chartLibraries[this.library_name] === 'undefined') {
+ NETDATA.error(402, this.library_name);
+ error('chart library "' + this.library_name + '" is not found');
+ this.enabled = false;
+ } else if (!NETDATA.chartLibraries[this.library_name].enabled) {
+ NETDATA.error(403, this.library_name);
+ error('chart library "' + this.library_name + '" is not enabled');
+ this.enabled = false;
+ } else {
+ this.library = NETDATA.chartLibraries[this.library_name];
+ }
+
+ this.auto = {
+ name: 'auto',
+ autorefresh: true,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+ this.pan = {
+ name: 'pan',
+ autorefresh: false,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+ this.zoom = {
+ name: 'zoom',
+ autorefresh: false,
+ force_update_at: 0, // the timestamp to force the update at
+ force_before_ms: null,
+ force_after_ms: null
+ };
+
+ // this is a pointer to one of the sub-classes below
+ // auto, pan, zoom
+ this.current = this.auto;
+
+ this.running = false; // boolean - true when the chart is being refreshed now
+ this.enabled = true; // boolean - is the chart enabled for refresh?
+
+ this.force_update_every = null; // number - overwrite the visualization update frequency of the chart
+
+ this.tmp = {};
+
+ this.foreignElementBefore = null;
+ this.foreignElementAfter = null;
+ this.foreignElementDuration = null;
+ this.foreignElementUpdateEvery = null;
+ this.foreignElementSelection = null;
+
+ // ============================================================================================================
+ // PRIVATE FUNCTIONS
+
+ // reset the runtime status variables to their defaults
+ const runtimeInit = () => {
+ this.paused = false; // boolean - is the chart paused for any reason?
+ this.selected = false; // boolean - is the chart shown a selection?
+
+ this.chart_created = false; // boolean - is the library.create() been called?
+ this.dom_created = false; // boolean - is the chart DOM been created?
+ this.fetching_data = false; // boolean - true while we fetch data via ajax
+
+ this.updates_counter = 0; // numeric - the number of refreshes made so far
+ this.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden
+ this.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created
+
+ this.tm = {
+ last_initialized: 0, // milliseconds - the timestamp it was last initialized
+ last_dom_created: 0, // milliseconds - the timestamp its DOM was last created
+ last_mode_switch: 0, // milliseconds - the timestamp it switched modes
+
+ last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart
+ last_updated: 0, // the timestamp the chart last updated with data
+ pan_and_zoom_seq: 0, // the sequence number of the global synchronization
+ // between chart.
+ // Used with NETDATA.globalPanAndZoom.seq
+ last_visible_check: 0, // the time we last checked if it is visible
+ last_resized: 0, // the time the chart was resized
+ last_hidden: 0, // the time the chart was hidden
+ last_unhidden: 0, // the time the chart was unhidden
+ last_autorefreshed: 0 // the time the chart was last refreshed
+ };
+
+ this.data = null; // the last data as downloaded from the netdata server
+ this.data_url = 'invalid://'; // string - the last url used to update the chart
+ this.data_points = 0; // number - the number of points returned from netdata
+ this.data_after = 0; // milliseconds - the first timestamp of the data
+ this.data_before = 0; // milliseconds - the last timestamp of the data
+ this.data_update_every = 0; // milliseconds - the frequency to update the data
+
+ this.tmp = {}; // members that can be destroyed to save memory
+ };
+
+ // initialize all the variables that are required for the chart to be rendered
+ const lateInitialization = () => {
+ if (typeof this.host !== 'undefined') {
+ return;
+ }
+
+ // string - the netdata server URL, without any path
+ this.host = NETDATA.dataAttribute(this.element, 'host', NETDATA.serverDefault);
+
+ // make sure the host does not end with /
+ // all netdata API requests use absolute paths
+ while (this.host.slice(-1) === '/') {
+ this.host = this.host.substring(0, this.host.length - 1);
+ }
+
+ // string - the grouping method requested by the user
+ this.method = NETDATA.dataAttribute(this.element, 'method', NETDATA.chartDefaults.method);
+ this.gtime = NETDATA.dataAttribute(this.element, 'gtime', 0);
+
+ // the time-range requested by the user
+ this.after = NETDATA.dataAttribute(this.element, 'after', NETDATA.chartDefaults.after);
+ this.before = NETDATA.dataAttribute(this.element, 'before', NETDATA.chartDefaults.before);
+
+ // the pixels per point requested by the user
+ this.pixels_per_point = NETDATA.dataAttribute(this.element, 'pixels-per-point', 1);
+ this.points = NETDATA.dataAttribute(this.element, 'points', null);
+
+ // the forced update_every
+ this.force_update_every = NETDATA.dataAttribute(this.element, 'update-every', null);
+ if (typeof this.force_update_every !== 'number' || this.force_update_every <= 1) {
+ if (this.force_update_every !== null) {
+ this.log('ignoring invalid value of property data-update-every');
+ }
+
+ this.force_update_every = null;
+ } else {
+ this.force_update_every *= 1000;
+ }
+
+ // the dimensions requested by the user
+ this.dimensions = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'dimensions', null));
+
+ this.title = NETDATA.dataAttribute(this.element, 'title', null); // the title of the chart
+ this.units = NETDATA.dataAttribute(this.element, 'units', null); // the units of the chart dimensions
+ this.units_desired = NETDATA.dataAttribute(this.element, 'desired-units', NETDATA.options.current.units); // the units of the chart dimensions
+ this.units_current = this.units;
+ this.units_common = NETDATA.dataAttribute(this.element, 'common-units', null);
+
+ // additional options to pass to netdata
+ this.append_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'append-options', null));
+
+ // override options to pass to netdata
+ this.override_options = NETDATA.encodeURIComponent(NETDATA.dataAttribute(this.element, 'override-options', null));
+
+ this.debug = NETDATA.dataAttributeBoolean(this.element, 'debug', false);
+
+ this.value_decimal_detail = -1;
+ let d = NETDATA.dataAttribute(this.element, 'decimal-digits', -1);
+ if (typeof d === 'number') {
+ this.value_decimal_detail = d;
+ } else if (typeof d !== 'undefined') {
+ this.log('ignoring decimal-digits value: ' + d.toString());
+ }
+
+ // if we need to report the rendering speed
+ // find the element that needs to be updated
+ let refresh_dt_element_name = NETDATA.dataAttribute(this.element, 'dt-element-name', null); // string - the element to print refresh_dt_ms
+
+ if (refresh_dt_element_name !== null) {
+ this.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null;
+ }
+ else {
+ this.refresh_dt_element = null;
+ }
+
+ this.dimensions_visibility = new dimensionsVisibility(that);
+
+ this.netdata_first = 0; // milliseconds - the first timestamp in netdata
+ this.netdata_last = 0; // milliseconds - the last timestamp in netdata
+ this.requested_after = null; // milliseconds - the timestamp of the request after param
+ this.requested_before = null; // milliseconds - the timestamp of the request before param
+ this.requested_padding = null;
+ this.view_after = 0;
+ this.view_before = 0;
+
+ this.refresh_dt_ms = 0; // milliseconds - the time the last refresh took
+
+ // how many retries we have made to load chart data from the server
+ this.retries_on_data_failures = 0;
+
+ // color management
+ this.colors = null;
+ this.colors_assigned = null;
+ this.colors_available = null;
+ this.colors_custom = null;
+
+ this.element_message = null; // the element already created by the user
+ this.element_chart = null; // the element with the chart
+ this.element_legend = null; // the element with the legend of the chart (if created by us)
+ this.element_legend_childs = {
+ content: null,
+ hidden: null,
+ title_date: null,
+ title_time: null,
+ title_units: null,
+ perfect_scroller: null, // the container to apply perfect scroller to
+ series: null
+ };
+
+ this.chart_url = null; // string - the url to download chart info
+ this.chart = null; // object - the chart as downloaded from the server
+
+ const getForeignElementById = (opt) => {
+ let id = NETDATA.dataAttribute(this.element, opt, null);
+ if (id === null) {
+ //this.log('option "' + opt + '" is undefined');
+ return null;
+ }
+
+ let el = document.getElementById(id);
+ if (typeof el === 'undefined') {
+ this.log('cannot find an element with name "' + id.toString() + '"');
+ return null;
+ }
+
+ return el;
+ };
+
+ this.foreignElementBefore = getForeignElementById('show-before-at');
+ this.foreignElementAfter = getForeignElementById('show-after-at');
+ this.foreignElementDuration = getForeignElementById('show-duration-at');
+ this.foreignElementUpdateEvery = getForeignElementById('show-update-every-at');
+ this.foreignElementSelection = getForeignElementById('show-selection-at');
+ };
+
+ const destroyDOM = () => {
+ if (!this.enabled) {
+ return;
+ }
+
+ if (this.debug) {
+ this.log('destroyDOM()');
+ }
+
+ // this.element.className = 'netdata-message icon';
+ // this.element.innerHTML = '<i class="fas fa-sync"></i> netdata';
+ this.element.innerHTML = '';
+ this.element_message = null;
+ this.element_legend = null;
+ this.element_chart = null;
+ this.element_legend_childs.series = null;
+
+ this.chart_created = false;
+ this.dom_created = false;
+
+ this.tm.last_resized = 0;
+ this.tm.last_dom_created = 0;
+ };
+
+ const maxMessageFontSize = () => {
+ let screenHeight = screen.height;
+ let el = this.element;
+
+ // normally we want a font size, as tall as the element
+ let h = el.clientHeight;
+
+ // but give it some air, 20% let's say, or 5 pixels min
+ let lost = Math.max(h * 0.2, 5);
+ h -= lost;
+
+ // center the text, vertically
+ let paddingTop = (lost - 5) / 2;
+
+ // but check the width too
+ // it should fit 10 characters in it
+ let w = el.clientWidth / 10;
+ if (h > w) {
+ paddingTop += (h - w) / 2;
+ h = w;
+ }
+
+ // and don't make it too huge
+ // 5% of the screen size is good
+ if (h > screenHeight / 20) {
+ paddingTop += (h - (screenHeight / 20)) / 2;
+ h = screenHeight / 20;
+ }
+
+ // set it
+ this.element_message.style.fontSize = h.toString() + 'px';
+ this.element_message.style.paddingTop = paddingTop.toString() + 'px';
+ };
+
+ const showMessageIcon = (icon) => {
+ this.element_message.innerHTML = icon;
+ maxMessageFontSize();
+ $(this.element_message).removeClass('hidden');
+ this.tmp.___messageHidden___ = undefined;
+ };
+
+ const showLoading = () => {
+ if (!this.chart_created) {
+ showMessageIcon(NETDATA.icons.loading + ' netdata');
+ return true;
+ }
+ return false;
+ };
+
+ let createDOM = () => {
+ if (!this.enabled) {
+ return;
+ }
+ lateInitialization();
+
+ destroyDOM();
+
+ if (this.debug) {
+ this.log('createDOM()');
+ }
+
+ this.element_message = document.createElement('div');
+ this.element_message.className = 'netdata-message icon hidden';
+ this.element.appendChild(this.element_message);
+
+ this.dom_created = true;
+ this.chart_created = false;
+
+ this.tm.last_dom_created = this.tm.last_resized = Date.now();
+
+ showLoading();
+ };
+
+ const initDOM = () => {
+ this.element.className = this.library.container_class(that);
+
+ if (typeof(this.width) === 'string') {
+ this.element.style.width = this.width;
+ } else if (typeof(this.width) === 'number') {
+ this.element.style.width = this.width.toString() + 'px';
+ }
+
+ if (typeof(this.library.aspect_ratio) === 'undefined') {
+ if (typeof(this.height) === 'string') {
+ this.element.style.height = this.height;
+ } else if (typeof(this.height) === 'number') {
+ this.element.style.height = this.height.toString() + 'px';
+ }
+ }
+
+ if (NETDATA.chartDefaults.min_width !== null) {
+ this.element.style.min_width = NETDATA.chartDefaults.min_width;
+ }
+ };
+
+ const invisibleSearchableText = () => {
+ return '<span style="position:absolute; opacity: 0; width: 0px;">' + this.id + '</span>';
+ };
+
+ /* init() private
+ * initialize state variables
+ * destroy all (possibly) created state elements
+ * create the basic DOM for a chart
+ */
+ const init = (opt) => {
+ if (!this.enabled) {
+ return;
+ }
+
+ runtimeInit();
+ this.element.innerHTML = invisibleSearchableText();
+
+ this.tm.last_initialized = Date.now();
+ this.setMode('auto');
+
+ if (opt !== 'fast') {
+ if (this.isVisible(true) || opt === 'force') {
+ createDOM();
+ }
+ }
+ };
+
+ const hideMessage = () => {
+ if (typeof this.tmp.___messageHidden___ === 'undefined') {
+ this.tmp.___messageHidden___ = true;
+ $(this.element_message).addClass('hidden');
+ }
+ };
+
+ const showRendering = () => {
+ let icon;
+ if (this.chart !== null) {
+ if (this.chart.chart_type === 'line') {
+ icon = NETDATA.icons.lineChart;
+ } else {
+ icon = NETDATA.icons.areaChart;
+ }
+ }
+ else {
+ icon = NETDATA.icons.noChart;
+ }
+
+ showMessageIcon(icon + ' netdata' + invisibleSearchableText());
+ };
+
+ const isHidden = () => {
+ return (typeof this.tmp.___chartIsHidden___ !== 'undefined');
+ };
+
+ // hide the chart, when it is not visible - called from isVisible()
+ this.hideChart = function () {
+ // hide it, if it is not already hidden
+ if (isHidden()) {
+ return;
+ }
+
+ if (this.chart_created) {
+ if (NETDATA.options.current.show_help) {
+ if (this.element_legend_childs.toolbox !== null) {
+ if (this.debug) {
+ this.log('hideChart(): hidding legend popovers');
+ }
+
+ $(this.element_legend_childs.toolbox_left).popover('hide');
+ $(this.element_legend_childs.toolbox_reset).popover('hide');
+ $(this.element_legend_childs.toolbox_right).popover('hide');
+ $(this.element_legend_childs.toolbox_zoomin).popover('hide');
+ $(this.element_legend_childs.toolbox_zoomout).popover('hide');
+ }
+
+ if (this.element_legend_childs.resize_handler !== null) {
+ $(this.element_legend_childs.resize_handler).popover('hide');
+ }
+
+ if (this.element_legend_childs.content !== null) {
+ $(this.element_legend_childs.content).popover('hide');
+ }
+ }
+
+ if (NETDATA.options.current.destroy_on_hide) {
+ if (this.debug) {
+ this.log('hideChart(): initializing chart');
+ }
+
+ // we should destroy it
+ init('force');
+ } else {
+ if (this.debug) {
+ this.log('hideChart(): hiding chart');
+ }
+
+ showRendering();
+ this.element_chart.style.display = 'none';
+ this.element.style.willChange = 'auto';
+ if (this.element_legend !== null) {
+ this.element_legend.style.display = 'none';
+ }
+ if (this.element_legend_childs.toolbox !== null) {
+ this.element_legend_childs.toolbox.style.display = 'none';
+ }
+ if (this.element_legend_childs.resize_handler !== null) {
+ this.element_legend_childs.resize_handler.style.display = 'none';
+ }
+
+ this.tm.last_hidden = Date.now();
+
+ // de-allocate data
+ // This works, but I not sure there are no corner cases somewhere
+ // so it is commented - if the user has memory issues he can
+ // set Destroy on Hide for all charts
+ // this.data = null;
+ }
+ }
+
+ this.tmp.___chartIsHidden___ = true;
+ };
+
+ // unhide the chart, when it is visible - called from isVisible()
+ this.unhideChart = function () {
+ if (!isHidden()) {
+ return;
+ }
+
+ this.tmp.___chartIsHidden___ = undefined;
+ this.updates_since_last_unhide = 0;
+
+ if (!this.chart_created) {
+ if (this.debug) {
+ this.log('unhideChart(): initializing chart');
+ }
+
+ // we need to re-initialize it, to show our background
+ // logo in bootstrap tabs, until the chart loads
+ init('force');
+ } else {
+ if (this.debug) {
+ this.log('unhideChart(): unhiding chart');
+ }
+
+ this.element.style.willChange = 'transform';
+ this.tm.last_unhidden = Date.now();
+ this.element_chart.style.display = '';
+ if (this.element_legend !== null) {
+ this.element_legend.style.display = '';
+ }
+ if (this.element_legend_childs.toolbox !== null) {
+ this.element_legend_childs.toolbox.style.display = '';
+ }
+ if (this.element_legend_childs.resize_handler !== null) {
+ this.element_legend_childs.resize_handler.style.display = '';
+ }
+ resizeChart();
+ hideMessage();
+ }
+
+ if (this.__redraw_on_unhide) {
+ if (this.debug) {
+ this.log("redrawing chart on unhide");
+ }
+
+ this.__redraw_on_unhide = undefined;
+ this.redrawChart();
+ }
+ };
+
+ const canBeRendered = (uncached_visibility) => {
+ if (this.debug) {
+ this.log('canBeRendered() called');
+ }
+
+ if (!NETDATA.options.current.update_only_visible) {
+ return true;
+ }
+
+ let ret = (
+ (
+ NETDATA.options.page_is_visible ||
+ NETDATA.options.current.stop_updates_when_focus_is_lost === false ||
+ this.updates_since_last_unhide === 0
+ )
+ && isHidden() === false && this.isVisible(uncached_visibility)
+ );
+
+ if (this.debug) {
+ this.log('canBeRendered(): ' + ret);
+ }
+
+ return ret;
+ };
+
+ // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers
+ const callChartLibraryUpdateSafely = (data) => {
+ let status;
+
+ // we should not do this here
+ // if we prevent rendering the chart then:
+ // 1. globalSelectionSync will be wrong
+ // 2. globalPanAndZoom will be wrong
+ //if (canBeRendered(true) === false)
+ // return false;
+
+ if (NETDATA.options.fake_chart_rendering) {
+ return true;
+ }
+
+ this.updates_counter++;
+ this.updates_since_last_unhide++;
+ this.updates_since_last_creation++;
+
+ if (NETDATA.options.debug.chart_errors) {
+ status = this.library.update(that, data);
+ } else {
+ try {
+ status = this.library.update(that, data);
+ } catch (err) {
+ status = false;
+ }
+ }
+
+ if (!status) {
+ error('chart failed to be updated as ' + this.library_name);
+ return false;
+ }
+
+ return true;
+ };
+
+ // https://github.com/petkaantonov/bluebird/wiki/Optimization-killers
+ const callChartLibraryCreateSafely = (data) => {
+ let status;
+
+ // we should not do this here
+ // if we prevent rendering the chart then:
+ // 1. globalSelectionSync will be wrong
+ // 2. globalPanAndZoom will be wrong
+ //if (canBeRendered(true) === false)
+ // return false;
+
+ if (NETDATA.options.fake_chart_rendering) {
+ return true;
+ }
+
+ this.updates_counter++;
+ this.updates_since_last_unhide++;
+ this.updates_since_last_creation++;
+
+ if (NETDATA.options.debug.chart_errors) {
+ status = this.library.create(that, data);
+ } else {
+ try {
+ status = this.library.create(that, data);
+ } catch (err) {
+ status = false;
+ }
+ }
+
+ if (!status) {
+ error('chart failed to be created as ' + this.library_name);
+ return false;
+ }
+
+ this.chart_created = true;
+ this.updates_since_last_creation = 0;
+ return true;
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // Chart Resize
+
+ // resizeChart() - private
+ // to be called just before the chart library to make sure that
+ // a properly sized dom is available
+ const resizeChart = () => {
+ if (this.tm.last_resized < NETDATA.options.last_page_resize) {
+ if (!this.chart_created) {
+ return;
+ }
+
+ if (this.needsRecreation()) {
+ if (this.debug) {
+ this.log('resizeChart(): initializing chart');
+ }
+
+ init('force');
+ } else if (typeof this.library.resize === 'function') {
+ if (this.debug) {
+ this.log('resizeChart(): resizing chart');
+ }
+
+ this.library.resize(that);
+
+ if (this.element_legend_childs.perfect_scroller !== null) {
+ Ps.update(this.element_legend_childs.perfect_scroller);
+ }
+
+ maxMessageFontSize();
+ }
+
+ this.tm.last_resized = Date.now();
+ }
+ };
+
+ // this is the actual chart resize algorithm
+ // it will:
+ // - resize the entire container
+ // - update the internal states
+ // - resize the chart as the div changes height
+ // - update the scrollbar of the legend
+ const resizeChartToHeight = (h) => {
+ // console.log(h);
+ this.element.style.height = h;
+
+ if (this.settings_id !== null) {
+ NETDATA.localStorageSet('chart_heights.' + this.settings_id, h);
+ }
+
+ let now = Date.now();
+ NETDATA.options.last_page_scroll = now;
+ NETDATA.options.auto_refresher_stop_until = now + NETDATA.options.current.stop_updates_while_resizing;
+
+ // force a resize
+ this.tm.last_resized = 0;
+ resizeChart();
+ };
+
+ this.resizeForPrint = function () {
+ if (typeof this.element_legend_childs !== 'undefined' && this.element_legend_childs.perfect_scroller !== null) {
+ let current = this.element.clientHeight;
+ let optimal = current
+ + this.element_legend_childs.perfect_scroller.scrollHeight
+ - this.element_legend_childs.perfect_scroller.clientHeight;
+
+ if (optimal > current) {
+ // this.log('resized');
+ this.element.style.height = optimal + 'px';
+ this.library.resize(this);
+ }
+ }
+ };
+
+ this.resizeHandler = function (e) {
+ e.preventDefault();
+
+ if (typeof this.event_resize === 'undefined'
+ || this.event_resize.chart_original_w === 'undefined'
+ || this.event_resize.chart_original_h === 'undefined') {
+ this.event_resize = {
+ chart_original_w: this.element.clientWidth,
+ chart_original_h: this.element.clientHeight,
+ last: 0
+ };
+ }
+
+ if (e.type === 'touchstart') {
+ this.event_resize.mouse_start_x = e.touches.item(0).pageX;
+ this.event_resize.mouse_start_y = e.touches.item(0).pageY;
+ } else {
+ this.event_resize.mouse_start_x = e.clientX;
+ this.event_resize.mouse_start_y = e.clientY;
+ }
+
+ this.event_resize.chart_start_w = this.element.clientWidth;
+ this.event_resize.chart_start_h = this.element.clientHeight;
+ this.event_resize.chart_last_w = this.element.clientWidth;
+ this.event_resize.chart_last_h = this.element.clientHeight;
+
+ let now = Date.now();
+ if (now - this.event_resize.last <= NETDATA.options.current.double_click_speed && this.element_legend_childs.perfect_scroller !== null) {
+ // double click / double tap event
+
+ // console.dir(this.element_legend_childs.content);
+ // console.dir(this.element_legend_childs.perfect_scroller);
+
+ // the optimal height of the chart
+ // showing the entire legend
+ let optimal = this.event_resize.chart_last_h
+ + this.element_legend_childs.perfect_scroller.scrollHeight
+ - this.element_legend_childs.perfect_scroller.clientHeight;
+
+ // if we are not optimal, be optimal
+ if (this.event_resize.chart_last_h !== optimal) {
+ // this.log('resize to optimal, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(optimal.toString() + 'px');
+ }
+
+ // else if the current height is not the original/saved height
+ // reset to the original/saved height
+ else if (this.event_resize.chart_last_h !== this.event_resize.chart_original_h) {
+ // this.log('resize to original, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(this.event_resize.chart_original_h.toString() + 'px');
+ }
+
+ // else if the current height is not the internal default height
+ // reset to the internal default height
+ else if ((this.event_resize.chart_last_h.toString() + 'px') !== this.height_original) {
+ // this.log('resize to internal default, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString());
+ resizeChartToHeight(this.height_original.toString());
+ }
+
+ // else if the current height is not the firstchild's clientheight
+ // resize to it
+ else if (typeof this.element_legend_childs.perfect_scroller.firstChild !== 'undefined') {
+ let parent_rect = this.element.getBoundingClientRect();
+ let content_rect = this.element_legend_childs.perfect_scroller.firstElementChild.getBoundingClientRect();
+ let wanted = content_rect.top - parent_rect.top + this.element_legend_childs.perfect_scroller.firstChild.clientHeight + 18; // 15 = toolbox + 3 space
+
+ // console.log(parent_rect);
+ // console.log(content_rect);
+ // console.log(wanted);
+
+ // this.log('resize to firstChild, current = ' + this.event_resize.chart_last_h.toString() + 'px, original = ' + this.event_resize.chart_original_h.toString() + 'px, optimal = ' + optimal.toString() + 'px, internal = ' + this.height_original.toString() + 'px, firstChild = ' + wanted.toString() + 'px' );
+ if (this.event_resize.chart_last_h !== wanted) {
+ resizeChartToHeight(wanted.toString() + 'px');
+ }
+ }
+ } else {
+ this.event_resize.last = now;
+
+ // process movement event
+ document.onmousemove =
+ document.ontouchmove =
+ this.element_legend_childs.resize_handler.onmousemove =
+ this.element_legend_childs.resize_handler.ontouchmove =
+ function (e) {
+ let y = null;
+
+ switch (e.type) {
+ case 'mousemove':
+ y = e.clientY;
+ break;
+ case 'touchmove':
+ y = e.touches.item(e.touches - 1).pageY;
+ break;
+ }
+
+ if (y !== null) {
+ let newH = that.event_resize.chart_start_h + y - that.event_resize.mouse_start_y;
+
+ if (newH >= 70 && newH !== that.event_resize.chart_last_h) {
+ resizeChartToHeight(newH.toString() + 'px');
+ that.event_resize.chart_last_h = newH;
+ }
+ }
+ };
+
+ // process end event
+ document.onmouseup =
+ document.ontouchend =
+ this.element_legend_childs.resize_handler.onmouseup =
+ this.element_legend_childs.resize_handler.ontouchend =
+ function (e) {
+ void(e);
+
+ // remove all the hooks
+ document.onmouseup =
+ document.onmousemove =
+ document.ontouchmove =
+ document.ontouchend =
+ that.element_legend_childs.resize_handler.onmousemove =
+ that.element_legend_childs.resize_handler.ontouchmove =
+ that.element_legend_childs.resize_handler.onmouseout =
+ that.element_legend_childs.resize_handler.onmouseup =
+ that.element_legend_childs.resize_handler.ontouchend =
+ null;
+
+ // allow auto-refreshes
+ NETDATA.options.auto_refresher_stop_until = 0;
+ };
+ }
+ };
+
+ const noDataToShow = () => {
+ showMessageIcon(NETDATA.icons.noData + ' empty');
+ this.legendUpdateDOM();
+ this.tm.last_autorefreshed = Date.now();
+ // this.data_update_every = 30 * 1000;
+ //this.element_chart.style.display = 'none';
+ //if (this.element_legend !== null) this.element_legend.style.display = 'none';
+ //this.tmp.___chartIsHidden___ = true;
+ };
+
+ // ============================================================================================================
+ // PUBLIC FUNCTIONS
+
+ this.error = function (msg) {
+ error(msg);
+ };
+
+ this.setMode = function (m) {
+ if (this.current !== null && this.current.name === m) {
+ return;
+ }
+
+ if (m === 'auto') {
+ this.current = this.auto;
+ } else if (m === 'pan') {
+ this.current = this.pan;
+ } else if (m === 'zoom') {
+ this.current = this.zoom;
+ } else {
+ this.current = this.auto;
+ }
+
+ this.current.force_update_at = 0;
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+
+ this.tm.last_mode_switch = Date.now();
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // global selection sync for slaves
+
+ // can the chart participate to the global selection sync as a slave?
+ this.globalSelectionSyncIsEligible = function () {
+ return (
+ this.enabled &&
+ this.library !== null &&
+ typeof this.library.setSelection === 'function' &&
+ this.isVisible() &&
+ this.chart_created
+ );
+ };
+
+ this.setSelection = function (t) {
+ if (typeof this.library.setSelection === 'function') {
+ // this.selected = this.library.setSelection(this, t) === true;
+ this.selected = this.library.setSelection(this, t);
+ } else {
+ this.selected = true;
+ }
+
+ if (this.selected && this.debug) {
+ this.log('selection set to ' + t.toString());
+ }
+
+ if (this.foreignElementSelection !== null) {
+ this.foreignElementSelection.innerText = NETDATA.dateTime.localeDateString(t) + ' ' + NETDATA.dateTime.localeTimeString(t);
+ }
+
+ return this.selected;
+ };
+
+ this.clearSelection = function () {
+ if (this.selected) {
+ if (typeof this.library.clearSelection === 'function') {
+ this.selected = (this.library.clearSelection(this) !== true);
+ } else {
+ this.selected = false;
+ }
+
+ if (this.selected === false && this.debug) {
+ this.log('selection cleared');
+ }
+
+ if (this.foreignElementSelection !== null) {
+ this.foreignElementSelection.innerText = '';
+ }
+
+ this.legendReset();
+ }
+
+ return this.selected;
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+
+ // find if a timestamp (ms) is shown in the current chart
+ this.timeIsVisible = function (t) {
+ return (t >= this.data_after && t <= this.data_before);
+ };
+
+ this.calculateRowForTime = function (t) {
+ if (!this.timeIsVisible(t)) {
+ return -1;
+ }
+ return Math.floor((t - this.data_after) / this.data_update_every);
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+
+ this.pauseChart = function () {
+ if (!this.paused) {
+ if (this.debug) {
+ this.log('pauseChart()');
+ }
+
+ this.paused = true;
+ }
+ };
+
+ this.unpauseChart = function () {
+ if (this.paused) {
+ if (this.debug) {
+ this.log('unpauseChart()');
+ }
+
+ this.paused = false;
+ }
+ };
+
+ this.resetChart = function (dontClearMaster, dontUpdate) {
+ if (this.debug) {
+ this.log('resetChart(' + dontClearMaster + ', ' + dontUpdate + ') called');
+ }
+
+ if (typeof dontClearMaster === 'undefined') {
+ dontClearMaster = false;
+ }
+
+ if (typeof dontUpdate === 'undefined') {
+ dontUpdate = false;
+ }
+
+ if (dontClearMaster !== true && NETDATA.globalPanAndZoom.isMaster(this)) {
+ if (this.debug) {
+ this.log('resetChart() diverting to clearMaster().');
+ }
+ // this will call us back with master === true
+ NETDATA.globalPanAndZoom.clearMaster();
+ return;
+ }
+
+ this.clearSelection();
+
+ this.tm.pan_and_zoom_seq = 0;
+
+ this.setMode('auto');
+ this.current.force_update_at = 0;
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+ this.tm.last_autorefreshed = 0;
+ this.paused = false;
+ this.selected = false;
+ this.enabled = true;
+ // this.debug = false;
+
+ // do not update the chart here
+ // or the chart will flip-flop when it is the master
+ // of a selection sync and another chart becomes
+ // the new master
+
+ if (dontUpdate !== true && this.isVisible()) {
+ this.updateChart();
+ }
+ };
+
+ this.updateChartPanOrZoom = function (after, before, callback) {
+ let logme = 'updateChartPanOrZoom(' + after + ', ' + before + '): ';
+ let ret = true;
+
+ NETDATA.globalPanAndZoom.delay();
+ NETDATA.globalSelectionSync.delay();
+
+ if (this.debug) {
+ this.log(logme);
+ }
+
+ if (before < after) {
+ if (this.debug) {
+ this.log(logme + 'flipped parameters, rejecting it.');
+ }
+ return false;
+ }
+
+ if (typeof this.fixed_min_duration === 'undefined') {
+ this.fixed_min_duration = Math.round((this.chartWidth() / 30) * this.chart.update_every * 1000);
+ }
+
+ let min_duration = this.fixed_min_duration;
+ let current_duration = Math.round(this.view_before - this.view_after);
+
+ // round the numbers
+ after = Math.round(after);
+ before = Math.round(before);
+
+ // align them to update_every
+ // stretching them further away
+ after -= after % this.data_update_every;
+ before += this.data_update_every - (before % this.data_update_every);
+
+ // the final wanted duration
+ let wanted_duration = before - after;
+
+ // to allow panning, accept just a point below our minimum
+ if ((current_duration - this.data_update_every) < min_duration) {
+ min_duration = current_duration - this.data_update_every;
+ }
+
+ // we do it, but we adjust to minimum size and return false
+ // when the wanted size is below the current and the minimum
+ // and we zoom
+ if (wanted_duration < current_duration && wanted_duration < min_duration) {
+ if (this.debug) {
+ this.log(logme + 'too small: min_duration: ' + (min_duration / 1000).toString() + ', wanted: ' + (wanted_duration / 1000).toString());
+ }
+
+ min_duration = this.fixed_min_duration;
+
+ let dt = (min_duration - wanted_duration) / 2;
+ before += dt;
+ after -= dt;
+ wanted_duration = before - after;
+ ret = false;
+ }
+
+ let tolerance = this.data_update_every * 2;
+ let movement = Math.abs(before - this.view_before);
+
+ if (Math.abs(current_duration - wanted_duration) <= tolerance && movement <= tolerance && ret) {
+ if (this.debug) {
+ this.log(logme + 'REJECTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + false);
+ }
+ return false;
+ }
+
+ if (this.current.name === 'auto') {
+ this.log(logme + 'caller called me with mode: ' + this.current.name);
+ this.setMode('pan');
+ }
+
+ if (this.debug) {
+ this.log(logme + 'ACCEPTING UPDATE: current/min duration: ' + (current_duration / 1000).toString() + '/' + (this.fixed_min_duration / 1000).toString() + ', wanted duration: ' + (wanted_duration / 1000).toString() + ', duration diff: ' + (Math.round(Math.abs(current_duration - wanted_duration) / 1000)).toString() + ', movement: ' + (movement / 1000).toString() + ', tolerance: ' + (tolerance / 1000).toString() + ', returning: ' + ret);
+ }
+
+ this.current.force_update_at = Date.now() + NETDATA.options.current.pan_and_zoom_delay;
+ this.current.force_after_ms = after;
+ this.current.force_before_ms = before;
+ NETDATA.globalPanAndZoom.setMaster(this, after, before);
+
+ if (ret && typeof callback === 'function') {
+ callback();
+ }
+
+ return ret;
+ };
+
+ this.updateChartPanOrZoomAsyncTimeOutId = undefined;
+ this.updateChartPanOrZoomAsync = function (after, before, callback) {
+ NETDATA.globalPanAndZoom.delay();
+ NETDATA.globalSelectionSync.delay();
+
+ if (!NETDATA.globalPanAndZoom.isMaster(this)) {
+ this.pauseChart();
+ NETDATA.globalPanAndZoom.setMaster(this, after, before);
+ // NETDATA.globalSelectionSync.stop();
+ NETDATA.globalSelectionSync.setMaster(this);
+ }
+
+ if (this.updateChartPanOrZoomAsyncTimeOutId) {
+ NETDATA.timeout.clear(this.updateChartPanOrZoomAsyncTimeOutId);
+ }
+
+ NETDATA.timeout.set(function () {
+ that.updateChartPanOrZoomAsyncTimeOutId = undefined;
+ that.updateChartPanOrZoom(after, before, callback);
+ }, 0);
+ };
+
+ let _unitsConversionLastUnits = undefined;
+ let _unitsConversionLastUnitsDesired = undefined;
+ let _unitsConversionLastMin = undefined;
+ let _unitsConversionLastMax = undefined;
+ let _unitsConversion = function (value) {
+ return value;
+ };
+ this.unitsConversionSetup = function (min, max) {
+ if (this.units !== _unitsConversionLastUnits
+ || this.units_desired !== _unitsConversionLastUnitsDesired
+ || min !== _unitsConversionLastMin
+ || max !== _unitsConversionLastMax) {
+
+ _unitsConversionLastUnits = this.units;
+ _unitsConversionLastUnitsDesired = this.units_desired;
+ _unitsConversionLastMin = min;
+ _unitsConversionLastMax = max;
+
+ _unitsConversion = NETDATA.unitsConversion.get(this.uuid, min, max, this.units, this.units_desired, this.units_common, function (units) {
+ // console.log('switching units from ' + that.units.toString() + ' to ' + units.toString());
+ that.units_current = units;
+ that.legendSetUnitsString(that.units_current);
+ });
+ }
+ };
+
+ let _legendFormatValueChartDecimalsLastMin = undefined;
+ let _legendFormatValueChartDecimalsLastMax = undefined;
+ let _legendFormatValueChartDecimals = -1;
+ let _intlNumberFormat = null;
+ this.legendFormatValueDecimalsFromMinMax = function (min, max) {
+ if (min === _legendFormatValueChartDecimalsLastMin && max === _legendFormatValueChartDecimalsLastMax) {
+ return;
+ }
+
+ this.unitsConversionSetup(min, max);
+ if (_unitsConversion !== null) {
+ min = _unitsConversion(min);
+ max = _unitsConversion(max);
+
+ if (typeof min !== 'number' || typeof max !== 'number') {
+ return;
+ }
+ }
+
+ _legendFormatValueChartDecimalsLastMin = min;
+ _legendFormatValueChartDecimalsLastMax = max;
+
+ let old = _legendFormatValueChartDecimals;
+
+ if (this.data !== null && this.data.min === this.data.max)
+ // it is a fixed number, let the visualizer decide based on the value
+ {
+ _legendFormatValueChartDecimals = -1;
+ } else if (this.value_decimal_detail !== -1)
+ // there is an override
+ {
+ _legendFormatValueChartDecimals = this.value_decimal_detail;
+ } else {
+ // ok, let's calculate the proper number of decimal points
+ let delta;
+
+ if (min === max) {
+ delta = Math.abs(min);
+ } else {
+ delta = Math.abs(max - min);
+ }
+
+ if (delta > 1000) {
+ _legendFormatValueChartDecimals = 0;
+ } else if (delta > 10) {
+ _legendFormatValueChartDecimals = 1;
+ } else if (delta > 1) {
+ _legendFormatValueChartDecimals = 2;
+ } else if (delta > 0.1) {
+ _legendFormatValueChartDecimals = 2;
+ } else if (delta > 0.01) {
+ _legendFormatValueChartDecimals = 4;
+ } else if (delta > 0.001) {
+ _legendFormatValueChartDecimals = 5;
+ } else if (delta > 0.0001) {
+ _legendFormatValueChartDecimals = 6;
+ } else {
+ _legendFormatValueChartDecimals = 7;
+ }
+ }
+
+ if (_legendFormatValueChartDecimals !== old) {
+ if (_legendFormatValueChartDecimals < 0) {
+ _intlNumberFormat = null;
+ } else {
+ _intlNumberFormat = NETDATA.fastNumberFormat.get(
+ _legendFormatValueChartDecimals,
+ _legendFormatValueChartDecimals
+ );
+ }
+ }
+ };
+
+ this.legendFormatValue = function (value) {
+ if (typeof value !== 'number') {
+ return '-';
+ }
+
+ value = _unitsConversion(value);
+
+ if (typeof value !== 'number') {
+ return value;
+ }
+
+ if (_intlNumberFormat !== null) {
+ return _intlNumberFormat.format(value);
+ }
+
+ let dmin, dmax;
+ if (this.value_decimal_detail !== -1) {
+ dmin = dmax = this.value_decimal_detail;
+ } else {
+ dmin = 0;
+ let abs = (value < 0) ? -value : value;
+ if (abs > 1000) {
+ dmax = 0;
+ } else if (abs > 10) {
+ dmax = 1;
+ } else if (abs > 1) {
+ dmax = 2;
+ } else if (abs > 0.1) {
+ dmax = 2;
+ } else if (abs > 0.01) {
+ dmax = 4;
+ } else if (abs > 0.001) {
+ dmax = 5;
+ } else if (abs > 0.0001) {
+ dmax = 6;
+ } else {
+ dmax = 7;
+ }
+ }
+
+ return NETDATA.fastNumberFormat.get(dmin, dmax).format(value);
+ };
+
+ this.legendSetLabelValue = function (label, value) {
+ let series = this.element_legend_childs.series[label];
+ if (typeof series === 'undefined') {
+ return;
+ }
+ if (series.value === null && series.user === null) {
+ return;
+ }
+
+ /*
+ // this slows down firefox and edge significantly
+ // since it requires to use innerHTML(), instead of innerText()
+
+ // if the value has not changed, skip DOM update
+ //if (series.last === value) return;
+
+ let s, r;
+ if (typeof value === 'number') {
+ let v = Math.abs(value);
+ s = r = this.legendFormatValue(value);
+
+ if (typeof series.last === 'number') {
+ if (v > series.last) s += '<i class="fas fa-angle-up" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ else if (v < series.last) s += '<i class="fas fa-angle-down" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ else s += '<i class="fas fa-angle-left" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+ }
+ else s += '<i class="fas fa-angle-right" style="width: 8px; text-align: center; overflow: hidden; vertical-align: middle;"></i>';
+
+ series.last = v;
+ }
+ else {
+ if (value === null)
+ s = r = '';
+ else
+ s = r = value;
+
+ series.last = value;
+ }
+ */
+
+ let s = this.legendFormatValue(value);
+
+ // caching: do not update the update to show the same value again
+ if (s === series.last_shown_value) {
+ return;
+ }
+ series.last_shown_value = s;
+
+ if (series.value !== null) {
+ series.value.innerText = s;
+ }
+ if (series.user !== null) {
+ series.user.innerText = s;
+ }
+ };
+
+ this.legendSetDateString = function (date) {
+ if (this.element_legend_childs.title_date !== null && date !== this.tmp.__last_shown_legend_date) {
+ this.element_legend_childs.title_date.innerText = date;
+ this.tmp.__last_shown_legend_date = date;
+ }
+ };
+
+ this.legendSetTimeString = function (time) {
+ if (this.element_legend_childs.title_time !== null && time !== this.tmp.__last_shown_legend_time) {
+ this.element_legend_childs.title_time.innerText = time;
+ this.tmp.__last_shown_legend_time = time;
+ }
+ };
+
+ this.legendSetUnitsString = function (units) {
+ if (this.element_legend_childs.title_units !== null && units !== this.tmp.__last_shown_legend_units) {
+ this.element_legend_childs.title_units.innerText = units;
+ this.tmp.__last_shown_legend_units = units;
+ }
+ };
+
+ this.legendSetDateLast = {
+ ms: 0,
+ date: undefined,
+ time: undefined
+ };
+
+ this.legendSetDate = function (ms) {
+ if (typeof ms !== 'number') {
+ this.legendShowUndefined();
+ return;
+ }
+
+ if (this.legendSetDateLast.ms !== ms) {
+ let d = new Date(ms);
+ this.legendSetDateLast.ms = ms;
+ this.legendSetDateLast.date = NETDATA.dateTime.localeDateString(d);
+ this.legendSetDateLast.time = NETDATA.dateTime.localeTimeString(d);
+ }
+
+ this.legendSetDateString(this.legendSetDateLast.date);
+ this.legendSetTimeString(this.legendSetDateLast.time);
+ this.legendSetUnitsString(this.units_current)
+ };
+
+ this.legendShowUndefined = function () {
+ this.legendSetDateString(this.legendPluginModuleString(false));
+ this.legendSetTimeString(this.chart.context.toString());
+ // this.legendSetUnitsString(' ');
+
+ if (this.data && this.element_legend_childs.series !== null) {
+ let labels = this.data.dimension_names;
+ let i = labels.length;
+ while (i--) {
+ let label = labels[i];
+
+ if (typeof label === 'undefined' || typeof this.element_legend_childs.series[label] === 'undefined') {
+ continue;
+ }
+ this.legendSetLabelValue(label, null);
+ }
+ }
+ };
+
+ this.legendShowLatestValues = function () {
+ if (this.chart === null) {
+ return;
+ }
+ if (this.selected) {
+ return;
+ }
+
+ if (this.data === null || this.element_legend_childs.series === null) {
+ this.legendShowUndefined();
+ return;
+ }
+
+ let show_undefined = true;
+ if (Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) {
+ show_undefined = false;
+ }
+
+ if (show_undefined) {
+ this.legendShowUndefined();
+ return;
+ }
+
+ this.legendSetDate(this.view_before);
+
+ let labels = this.data.dimension_names;
+ let i = labels.length;
+ while (i--) {
+ let label = labels[i];
+
+ if (typeof label === 'undefined') {
+ continue;
+ }
+ if (typeof this.element_legend_childs.series[label] === 'undefined') {
+ continue;
+ }
+
+ this.legendSetLabelValue(label, this.data.view_latest_values[i]);
+ }
+ };
+
+ this.legendReset = function () {
+ this.legendShowLatestValues();
+ };
+
+ // this should be called just ONCE per dimension per chart
+ this.__chartDimensionColor = function (label) {
+ let c = NETDATA.commonColors.get(this, label);
+
+ // it is important to maintain a list of colors
+ // for this chart only, since the chart library
+ // uses this to assign colors to dimensions in the same
+ // order the dimension are given to it
+ this.colors.push(c);
+
+ return c;
+ };
+
+ this.chartPrepareColorPalette = function () {
+ NETDATA.commonColors.refill(this);
+ };
+
+ // get the ordered list of chart colors
+ // this includes user defined colors
+ this.chartCustomColors = function () {
+ this.chartPrepareColorPalette();
+
+ let colors;
+ if (this.colors_custom.length) {
+ colors = this.colors_custom;
+ } else {
+ colors = this.colors;
+ }
+
+ if (this.debug) {
+ this.log("chartCustomColors() returns:");
+ this.log(colors);
+ }
+
+ return colors;
+ };
+
+ // get the ordered list of chart ASSIGNED colors
+ // (this returns only the colors that have been
+ // assigned to dimensions, prepended with any
+ // custom colors defined)
+ this.chartColors = function () {
+ this.chartPrepareColorPalette();
+
+ if (this.debug) {
+ this.log("chartColors() returns:");
+ this.log(this.colors);
+ }
+
+ return this.colors;
+ };
+
+ this.legendPluginModuleString = function (withContext) {
+ let str = ' ';
+ let context = '';
+
+ if (typeof this.chart !== 'undefined') {
+ if (withContext && typeof this.chart.context === 'string') {
+ context = this.chart.context;
+ }
+
+ if (typeof this.chart.plugin === 'string' && this.chart.plugin !== '') {
+ str = this.chart.plugin;
+
+ if (str.endsWith(".plugin")) {
+ str = str.substring(0, str.length - 7);
+ }
+
+ if (typeof this.chart.module === 'string' && this.chart.module !== '') {
+ str += ':' + this.chart.module;
+ }
+
+ if (withContext && context !== '') {
+ str += ', ' + context;
+ }
+ }
+ else if (withContext && context !== '') {
+ str = context;
+ }
+ }
+
+ return str;
+ };
+
+ this.legendResolutionTooltip = function () {
+ if (!this.chart) {
+ return '';
+ }
+
+ let collected = this.chart.update_every;
+ let viewed = (this.data) ? this.data.view_update_every : collected;
+
+ if (collected === viewed) {
+ return "resolution " + NETDATA.seconds4human(collected);
+ }
+
+ return "resolution " + NETDATA.seconds4human(viewed) + ", collected every " + NETDATA.seconds4human(collected);
+ };
+
+ this.legendUpdateDOM = function () {
+ let needed = false, dim, keys, len;
+
+ // check that the legend DOM is up to date for the downloaded dimensions
+ if (typeof this.element_legend_childs.series !== 'object' || this.element_legend_childs.series === null) {
+ // this.log('the legend does not have any series - requesting legend update');
+ needed = true;
+ } else if (this.data === null) {
+ // this.log('the chart does not have any data - requesting legend update');
+ needed = true;
+ } else if (typeof this.element_legend_childs.series.labels_key === 'undefined') {
+ needed = true;
+ } else {
+ let labels = this.data.dimension_names.toString();
+ if (labels !== this.element_legend_childs.series.labels_key) {
+ needed = true;
+
+ if (this.debug) {
+ this.log('NEW LABELS: "' + labels + '" NOT EQUAL OLD LABELS: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+ }
+ }
+
+ if (!needed) {
+ // make sure colors available
+ this.chartPrepareColorPalette();
+
+ // do we have to update the current values?
+ // we do this, only when the visible chart is current
+ if (Math.abs(this.netdata_last - this.view_before) <= this.data_update_every) {
+ if (this.debug) {
+ this.log('chart is in latest position... updating values on legend...');
+ }
+
+ //let labels = this.data.dimension_names;
+ //let i = labels.length;
+ //while (i--)
+ // this.legendSetLabelValue(labels[i], this.data.view_latest_values[i]);
+ }
+ return;
+ }
+
+ if (this.colors === null) {
+ // this is the first time we update the chart
+ // let's assign colors to all dimensions
+ if (this.library.track_colors()) {
+ this.colors = [];
+ keys = Object.keys(this.chart.dimensions);
+ len = keys.length;
+ for (let i = 0; i < len; i++) {
+ NETDATA.commonColors.get(this, this.chart.dimensions[keys[i]].name);
+ }
+ }
+ }
+
+ // we will re-generate the colors for the chart
+ // based on the dimensions this result has data for
+ this.colors = [];
+
+ if (this.debug) {
+ this.log('updating Legend DOM');
+ }
+
+ // mark all dimensions as invalid
+ this.dimensions_visibility.invalidateAll();
+
+ const genLabel = function (state, parent, dim, name, count) {
+ let color = state.__chartDimensionColor(name);
+
+ let user_element = null;
+ let user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + name.toLowerCase() + '-at', null);
+ if (user_id === null) {
+ user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + dim.toLowerCase() + '-at', null);
+ }
+ if (user_id !== null) {
+ user_element = document.getElementById(user_id) || null;
+ if (user_element === null) {
+ state.log('Cannot find element with id: ' + user_id);
+ }
+ }
+
+ state.element_legend_childs.series[name] = {
+ name: document.createElement('span'),
+ value: document.createElement('span'),
+ user: user_element,
+ last: null,
+ last_shown_value: null
+ };
+
+ let label = state.element_legend_childs.series[name];
+
+ // create the dimension visibility tracking for this label
+ state.dimensions_visibility.dimensionAdd(name, label.name, label.value, color);
+
+ let rgb = NETDATA.colorHex2Rgb(color);
+ label.name.innerHTML = '<table class="netdata-legend-name-table-'
+ + state.chart.chart_type
+ + '" style="background-color: '
+ + 'rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ',' + NETDATA.options.current['color_fill_opacity_' + state.chart.chart_type] + ') !important'
+ + '"><tr class="netdata-legend-name-tr"><td class="netdata-legend-name-td"></td></tr></table>';
+
+ let text = document.createTextNode(' ' + name);
+ label.name.appendChild(text);
+
+ if (count > 0) {
+ parent.appendChild(document.createElement('br'));
+ }
+
+ parent.appendChild(label.name);
+ parent.appendChild(label.value);
+ };
+
+ let content = document.createElement('div');
+
+ if (this.element_chart === null) {
+ this.element_chart = document.createElement('div');
+ this.element_chart.id = this.library_name + '-' + this.uuid + '-chart';
+ this.element.appendChild(this.element_chart);
+
+ if (this.hasLegend()) {
+ this.element_chart.className = 'netdata-chart-with-legend-right netdata-' + this.library_name + '-chart-with-legend-right';
+ } else {
+ this.element_chart.className = ' netdata-chart netdata-' + this.library_name + '-chart';
+ }
+ }
+
+ if (this.hasLegend()) {
+ if (this.element_legend === null) {
+ this.element_legend = document.createElement('div');
+ this.element_legend.className = 'netdata-chart-legend netdata-' + this.library_name + '-legend';
+ this.element.appendChild(this.element_legend);
+ } else {
+ this.element_legend.innerHTML = '';
+ }
+
+ this.element_legend_childs = {
+ content: content,
+ resize_handler: null,
+ toolbox: null,
+ toolbox_left: null,
+ toolbox_right: null,
+ toolbox_reset: null,
+ toolbox_zoomin: null,
+ toolbox_zoomout: null,
+ toolbox_volume: null,
+ title_date: document.createElement('span'),
+ title_time: document.createElement('span'),
+ title_units: document.createElement('span'),
+ perfect_scroller: document.createElement('div'),
+ series: {}
+ };
+
+ if (NETDATA.options.current.legend_toolbox && this.library.toolboxPanAndZoom !== null) {
+ this.element_legend_childs.toolbox = document.createElement('div');
+ this.element_legend_childs.toolbox_left = document.createElement('div');
+ this.element_legend_childs.toolbox_right = document.createElement('div');
+ this.element_legend_childs.toolbox_reset = document.createElement('div');
+ this.element_legend_childs.toolbox_zoomin = document.createElement('div');
+ this.element_legend_childs.toolbox_zoomout = document.createElement('div');
+ this.element_legend_childs.toolbox_volume = document.createElement('div');
+
+ const getPanAndZoomStep = function (event) {
+ if (event.ctrlKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_control;
+ } else if (event.shiftKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_shift;
+ } else if (event.altKey) {
+ return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_alt;
+ } else {
+ return NETDATA.options.current.pan_and_zoom_factor;
+ }
+ };
+
+ this.element_legend_childs.toolbox.className += ' netdata-legend-toolbox';
+ this.element.appendChild(this.element_legend_childs.toolbox);
+
+ this.element_legend_childs.toolbox_left.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_left.innerHTML = NETDATA.icons.left;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_left);
+ this.element_legend_childs.toolbox_left.onclick = function (e) {
+ e.preventDefault();
+
+ let step = (that.view_before - that.view_after) * getPanAndZoomStep(e);
+ let before = that.view_before - step;
+ let after = that.view_after - step;
+ if (after >= that.netdata_first) {
+ that.library.toolboxPanAndZoom(that, after, before);
+ }
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_left).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Pan Left',
+ content: 'Pan the chart to the left. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_reset.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_reset.innerHTML = NETDATA.icons.reset;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_reset);
+ this.element_legend_childs.toolbox_reset.onclick = function (e) {
+ e.preventDefault();
+ NETDATA.resetAllCharts(that);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_reset).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Reset',
+ content: 'Reset all the charts to their default auto-refreshing state. You can also <b>double click</b> the chart contents with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_right.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_right.innerHTML = NETDATA.icons.right;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_right);
+ this.element_legend_childs.toolbox_right.onclick = function (e) {
+ e.preventDefault();
+ let step = (that.view_before - that.view_after) * getPanAndZoomStep(e);
+ let before = that.view_before + step;
+ let after = that.view_after + step;
+ if (before <= that.netdata_last) {
+ that.library.toolboxPanAndZoom(that, after, before);
+ }
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_right).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Pan Right',
+ content: 'Pan the chart to the right. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_zoomin.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_zoomin.innerHTML = NETDATA.icons.zoomIn;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomin);
+ this.element_legend_childs.toolbox_zoomin.onclick = function (e) {
+ e.preventDefault();
+ let dt = ((that.view_before - that.view_after) * (getPanAndZoomStep(e) * 0.8) / 2);
+ let before = that.view_before - dt;
+ let after = that.view_after + dt;
+ that.library.toolboxPanAndZoom(that, after, before);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_zoomin).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Zoom In',
+ content: 'Zoom in the chart. You can also press SHIFT and select an area of the chart, or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ this.element_legend_childs.toolbox_zoomout.className += ' netdata-legend-toolbox-button';
+ this.element_legend_childs.toolbox_zoomout.innerHTML = NETDATA.icons.zoomOut;
+ this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomout);
+ this.element_legend_childs.toolbox_zoomout.onclick = function (e) {
+ e.preventDefault();
+ let dt = (((that.view_before - that.view_after) / (1.0 - (getPanAndZoomStep(e) * 0.8)) - (that.view_before - that.view_after)) / 2);
+ let before = that.view_before + dt;
+ let after = that.view_after - dt;
+
+ that.library.toolboxPanAndZoom(that, after, before);
+ };
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.toolbox_zoomout).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Zoom Out',
+ content: 'Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel, or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ //this.element_legend_childs.toolbox_volume.className += ' netdata-legend-toolbox-button';
+ //this.element_legend_childs.toolbox_volume.innerHTML = '<i class="fas fa-sort-amount-down"></i>';
+ //this.element_legend_childs.toolbox_volume.title = 'Visible Volume';
+ //this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_volume);
+ //this.element_legend_childs.toolbox_volume.onclick = function(e) {
+ //e.preventDefault();
+ //alert('clicked toolbox_volume on ' + that.id);
+ //}
+ }
+
+ if (NETDATA.options.current.resize_charts) {
+ this.element_legend_childs.resize_handler = document.createElement('div');
+
+ this.element_legend_childs.resize_handler.className += " netdata-legend-resize-handler";
+ this.element_legend_childs.resize_handler.innerHTML = NETDATA.icons.resize;
+ this.element.appendChild(this.element_legend_childs.resize_handler);
+ if (NETDATA.options.current.show_help) {
+ $(this.element_legend_childs.resize_handler).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ title: 'Chart Resize',
+ content: 'Drag this point with your mouse or your finger (on touch devices), to resize the chart vertically. You can also <b>double click it</b> or <b>double tap it</b> to reset between 2 states: the default and the one that fits all the values.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+
+ // mousedown event
+ this.element_legend_childs.resize_handler.onmousedown =
+ function (e) {
+ that.resizeHandler(e);
+ };
+
+ // touchstart event
+ this.element_legend_childs.resize_handler.addEventListener('touchstart', function (e) {
+ that.resizeHandler(e);
+ }, false);
+ }
+
+ if (this.chart) {
+ this.element_legend_childs.title_date.title = this.legendPluginModuleString(true);
+ this.element_legend_childs.title_time.title = this.legendResolutionTooltip();
+ }
+
+ this.element_legend_childs.title_date.className += " netdata-legend-title-date";
+ this.element_legend.appendChild(this.element_legend_childs.title_date);
+ this.tmp.__last_shown_legend_date = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.title_time.className += " netdata-legend-title-time";
+ this.element_legend.appendChild(this.element_legend_childs.title_time);
+ this.tmp.__last_shown_legend_time = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.title_units.className += " netdata-legend-title-units";
+ this.element_legend_childs.title_units.innerText = this.units_current;
+ this.element_legend.appendChild(this.element_legend_childs.title_units);
+ this.tmp.__last_shown_legend_units = undefined;
+
+ this.element_legend.appendChild(document.createElement('br'));
+
+ this.element_legend_childs.perfect_scroller.className = 'netdata-legend-series';
+ this.element_legend.appendChild(this.element_legend_childs.perfect_scroller);
+
+ content.className = 'netdata-legend-series-content';
+ this.element_legend_childs.perfect_scroller.appendChild(content);
+
+ this.element_legend_childs.content = content;
+
+ if (NETDATA.options.current.show_help) {
+ $(content).popover({
+ container: "body",
+ animation: false,
+ html: true,
+ trigger: 'hover',
+ placement: 'bottom',
+ title: 'Chart Legend',
+ delay: {
+ show: NETDATA.options.current.show_help_delay_show_ms,
+ hide: NETDATA.options.current.show_help_delay_hide_ms
+ },
+ content: 'You can click or tap on the values or the labels to select dimensions. By pressing SHIFT or CONTROL, you can enable or disable multiple dimensions.<br/><small>Help can be disabled from the settings.</small>'
+ });
+ }
+ } else {
+ this.element_legend_childs = {
+ content: content,
+ resize_handler: null,
+ toolbox: null,
+ toolbox_left: null,
+ toolbox_right: null,
+ toolbox_reset: null,
+ toolbox_zoomin: null,
+ toolbox_zoomout: null,
+ toolbox_volume: null,
+ title_date: null,
+ title_time: null,
+ title_units: null,
+ perfect_scroller: null,
+ series: {}
+ };
+ }
+
+ if (this.data) {
+ this.element_legend_childs.series.labels_key = this.data.dimension_names.toString();
+ if (this.debug) {
+ this.log('labels from data: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+
+ for (let i = 0, len = this.data.dimension_names.length; i < len; i++) {
+ genLabel(this, content, this.data.dimension_ids[i], this.data.dimension_names[i], i);
+ }
+ } else {
+ let tmp = [];
+ keys = Object.keys(this.chart.dimensions);
+ for (let i = 0, len = keys.length; i < len; i++) {
+ dim = keys[i];
+ tmp.push(this.chart.dimensions[dim].name);
+ genLabel(this, content, dim, this.chart.dimensions[dim].name, i);
+ }
+ this.element_legend_childs.series.labels_key = tmp.toString();
+ if (this.debug) {
+ this.log('labels from chart: "' + this.element_legend_childs.series.labels_key + '"');
+ }
+ }
+
+ // create a hidden div to be used for hidding
+ // the original legend of the chart library
+ let el = document.createElement('div');
+ if (this.element_legend !== null) {
+ this.element_legend.appendChild(el);
+ }
+ el.style.display = 'none';
+
+ this.element_legend_childs.hidden = document.createElement('div');
+ el.appendChild(this.element_legend_childs.hidden);
+
+ if (this.element_legend_childs.perfect_scroller !== null) {
+ Ps.initialize(this.element_legend_childs.perfect_scroller, {
+ wheelSpeed: 0.2,
+ wheelPropagation: true,
+ swipePropagation: true,
+ minScrollbarLength: null,
+ maxScrollbarLength: null,
+ useBothWheelAxes: false,
+ suppressScrollX: true,
+ suppressScrollY: false,
+ scrollXMarginOffset: 0,
+ scrollYMarginOffset: 0,
+ theme: 'default'
+ });
+ Ps.update(this.element_legend_childs.perfect_scroller);
+ }
+
+ this.legendShowLatestValues();
+ };
+
+ this.hasLegend = function () {
+ if (typeof this.tmp.___hasLegendCache___ !== 'undefined') {
+ return this.tmp.___hasLegendCache___;
+ }
+
+ let leg = false;
+ if (this.library && this.library.legend(this) === 'right-side') {
+ leg = true;
+ }
+
+ this.tmp.___hasLegendCache___ = leg;
+ return leg;
+ };
+
+ this.legendWidth = function () {
+ return (this.hasLegend()) ? 140 : 0;
+ };
+
+ this.legendHeight = function () {
+ return $(this.element).height();
+ };
+
+ this.chartWidth = function () {
+ return $(this.element).width() - this.legendWidth();
+ };
+
+ this.chartHeight = function () {
+ return $(this.element).height();
+ };
+
+ this.chartPixelsPerPoint = function () {
+ // force an options provided detail
+ let px = this.pixels_per_point;
+
+ if (this.library && px < this.library.pixels_per_point(this)) {
+ px = this.library.pixels_per_point(this);
+ }
+
+ if (px < NETDATA.options.current.pixels_per_point) {
+ px = NETDATA.options.current.pixels_per_point;
+ }
+
+ return px;
+ };
+
+ this.needsRecreation = function () {
+ let ret = (
+ this.chart_created &&
+ this.library &&
+ this.library.autoresize() === false &&
+ this.tm.last_resized < NETDATA.options.last_page_resize
+ );
+
+ if (this.debug) {
+ this.log('needsRecreation(): ' + ret.toString() + ', chart_created = ' + this.chart_created.toString());
+ }
+
+ return ret;
+ };
+
+ this.chartDataUniqueID = function () {
+ return this.id + ',' + this.library_name + ',' + this.dimensions + ',' + this.chartURLOptions(true);
+ };
+
+ this.chartURLOptions = function (isForUniqueId) {
+ let ret = '';
+
+ if (this.override_options !== null) {
+ ret = this.override_options.toString();
+ } else {
+ ret = this.library.options(this);
+ }
+
+ if (this.append_options !== null) {
+ ret += '%7C' + this.append_options.toString();
+ }
+
+ ret += '%7C' + 'jsonwrap';
+
+ // always add `nonzero` when it's used to create a chartDataUniqueID
+ // we cannot just remove `nonzero` because of backwards compatibility with old snapshots
+ if (isForUniqueId || NETDATA.options.current.eliminate_zero_dimensions) {
+ ret += '%7C' + 'nonzero';
+ }
+
+ return ret;
+ };
+
+ this.chartURL = function () {
+ let after, before, points_multiplier = 1;
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ if (this.current.force_before_ms !== null && this.current.force_after_ms !== null) {
+ this.tm.pan_and_zoom_seq = 0;
+
+ before = Math.round(this.current.force_before_ms / 1000);
+ after = Math.round(this.current.force_after_ms / 1000);
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ if (NETDATA.options.current.pan_and_zoom_data_padding) {
+ this.requested_padding = Math.round((before - after) / 2);
+ after -= this.requested_padding;
+ before += this.requested_padding;
+ this.requested_padding *= 1000;
+ points_multiplier = 2;
+ }
+
+ this.current.force_before_ms = null;
+ this.current.force_after_ms = null;
+ } else {
+ this.tm.pan_and_zoom_seq = NETDATA.globalPanAndZoom.seq;
+
+ after = Math.round(NETDATA.globalPanAndZoom.force_after_ms / 1000);
+ before = Math.round(NETDATA.globalPanAndZoom.force_before_ms / 1000);
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ this.requested_padding = null;
+ points_multiplier = 1;
+ }
+ } else {
+ this.tm.pan_and_zoom_seq = 0;
+
+ before = this.before;
+ after = this.after;
+ this.view_after = after * 1000;
+ this.view_before = before * 1000;
+
+ this.requested_padding = null;
+ points_multiplier = 1;
+ }
+
+ this.requested_after = after * 1000;
+ this.requested_before = before * 1000;
+
+ let data_points;
+ if (NETDATA.options.force_data_points !== 0) {
+ data_points = NETDATA.options.force_data_points;
+ this.data_points = data_points;
+ } else {
+ this.data_points = this.points || Math.round(this.chartWidth() / this.chartPixelsPerPoint());
+ data_points = this.data_points * points_multiplier;
+ }
+
+ // build the data URL
+ this.data_url = this.host + this.chart.data_url;
+ this.data_url += "&format=" + this.library.format();
+ this.data_url += "&points=" + (data_points).toString();
+ this.data_url += "&group=" + this.method;
+ this.data_url += "&gtime=" + this.gtime;
+ this.data_url += "&options=" + this.chartURLOptions();
+
+ if (after) {
+ this.data_url += "&after=" + after.toString();
+ }
+
+ if (before) {
+ this.data_url += "&before=" + before.toString();
+ }
+
+ if (this.dimensions) {
+ this.data_url += "&dimensions=" + this.dimensions;
+ }
+
+ if (NETDATA.options.debug.chart_data_url || this.debug) {
+ this.log('chartURL(): ' + this.data_url + ' WxH:' + this.chartWidth() + 'x' + this.chartHeight() + ' points: ' + data_points.toString() + ' library: ' + this.library_name);
+ }
+ };
+
+ this.redrawChart = function () {
+ if (this.data !== null) {
+ this.updateChartWithData(this.data);
+ }
+ };
+
+ this.updateChartWithData = function (data) {
+ if (this.debug) {
+ this.log('updateChartWithData() called.');
+ }
+
+ // this may force the chart to be re-created
+ resizeChart();
+
+ this.data = data;
+
+ let started = Date.now();
+ let view_update_every = data.view_update_every * 1000;
+
+ if (this.data_update_every !== view_update_every) {
+ if (this.element_legend_childs.title_time) {
+ this.element_legend_childs.title_time.title = this.legendResolutionTooltip();
+ }
+ }
+
+ // if the result is JSON, find the latest update-every
+ this.data_update_every = view_update_every;
+ this.data_after = data.after * 1000;
+ this.data_before = data.before * 1000;
+ this.netdata_first = data.first_entry * 1000;
+ this.netdata_last = data.last_entry * 1000;
+ this.data_points = data.points;
+
+ data.state = this;
+
+ if (NETDATA.options.current.pan_and_zoom_data_padding && this.requested_padding !== null) {
+ if (this.view_after < this.data_after) {
+ // console.log('adjusting view_after from ' + this.view_after + ' to ' + this.data_after);
+ this.view_after = this.data_after;
+ }
+
+ if (this.view_before > this.data_before) {
+ // console.log('adjusting view_before from ' + this.view_before + ' to ' + this.data_before);
+ this.view_before = this.data_before;
+ }
+ } else {
+ this.view_after = this.data_after;
+ this.view_before = this.data_before;
+ }
+
+ if (this.debug) {
+ this.log('UPDATE No ' + this.updates_counter + ' COMPLETED');
+
+ if (this.current.force_after_ms) {
+ this.log('STATUS: forced : ' + (this.current.force_after_ms / 1000).toString() + ' - ' + (this.current.force_before_ms / 1000).toString());
+ } else {
+ this.log('STATUS: forced : unset');
+ }
+
+ this.log('STATUS: requested : ' + (this.requested_after / 1000).toString() + ' - ' + (this.requested_before / 1000).toString());
+ this.log('STATUS: downloaded: ' + (this.data_after / 1000).toString() + ' - ' + (this.data_before / 1000).toString());
+ this.log('STATUS: rendered : ' + (this.view_after / 1000).toString() + ' - ' + (this.view_before / 1000).toString());
+ this.log('STATUS: points : ' + (this.data_points).toString());
+ }
+
+ if (this.data_points === 0) {
+ noDataToShow();
+ return;
+ }
+
+ if (this.updates_since_last_creation >= this.library.max_updates_to_recreate()) {
+ if (this.debug) {
+ this.log('max updates of ' + this.updates_since_last_creation.toString() + ' reached. Forcing re-generation.');
+ }
+
+ init('force');
+ return;
+ }
+
+ // check and update the legend
+ this.legendUpdateDOM();
+
+ if (this.chart_created && typeof this.library.update === 'function') {
+ if (this.debug) {
+ this.log('updating chart...');
+ }
+
+ if (!callChartLibraryUpdateSafely(data)) {
+ return;
+ }
+ } else {
+ if (this.debug) {
+ this.log('creating chart...');
+ }
+
+ if (!callChartLibraryCreateSafely(data)) {
+ return;
+ }
+ }
+
+ if (this.isVisible()) {
+ hideMessage();
+ this.legendShowLatestValues();
+ } else {
+ this.__redraw_on_unhide = true;
+
+ if (this.debug) {
+ this.log("drawn while not visible");
+ }
+ }
+
+ if (this.selected) {
+ NETDATA.globalSelectionSync.stop();
+ }
+
+ // update the performance counters
+ let now = Date.now();
+ this.tm.last_updated = now;
+
+ // don't update last_autorefreshed if this chart is
+ // forced to be updated with global PanAndZoom
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ this.tm.last_autorefreshed = 0;
+ } else {
+ if (NETDATA.options.current.parallel_refresher && NETDATA.options.current.concurrent_refreshes && typeof this.force_update_every !== 'number') {
+ this.tm.last_autorefreshed = now - (now % this.data_update_every);
+ } else {
+ this.tm.last_autorefreshed = now;
+ }
+ }
+
+ this.refresh_dt_ms = now - started;
+ NETDATA.options.auto_refresher_fast_weight += this.refresh_dt_ms;
+
+ if (this.refresh_dt_element !== null) {
+ this.refresh_dt_element.innerText = this.refresh_dt_ms.toString();
+ }
+
+ if (this.foreignElementBefore !== null) {
+ this.foreignElementBefore.innerText = NETDATA.dateTime.localeDateString(this.view_before) + ' ' + NETDATA.dateTime.localeTimeString(this.view_before);
+ }
+
+ if (this.foreignElementAfter !== null) {
+ this.foreignElementAfter.innerText = NETDATA.dateTime.localeDateString(this.view_after) + ' ' + NETDATA.dateTime.localeTimeString(this.view_after);
+ }
+
+ if (this.foreignElementDuration !== null) {
+ this.foreignElementDuration.innerText = NETDATA.seconds4human(Math.floor((this.view_before - this.view_after) / 1000) + 1);
+ }
+
+ if (this.foreignElementUpdateEvery !== null) {
+ this.foreignElementUpdateEvery.innerText = NETDATA.seconds4human(Math.floor(this.data_update_every / 1000));
+ }
+ };
+
+ this.getSnapshotData = function (key) {
+ if (this.debug) {
+ this.log('updating from snapshot: ' + key);
+ }
+
+ if (typeof netdataSnapshotData.data[key] === 'undefined') {
+ this.log('snapshot does not include data for key "' + key + '"');
+ return null;
+ }
+
+ if (typeof netdataSnapshotData.data[key] !== 'string') {
+ this.log('snapshot data for key "' + key + '" is not string');
+ return null;
+ }
+
+ let uncompressed;
+ try {
+ uncompressed = netdataSnapshotData.uncompress(netdataSnapshotData.data[key]);
+
+ if (uncompressed === null) {
+ this.log('uncompressed snapshot data for key ' + key + ' is null');
+ return null;
+ }
+
+ if (typeof uncompressed === 'undefined') {
+ this.log('uncompressed snapshot data for key ' + key + ' is undefined');
+ return null;
+ }
+ } catch (e) {
+ this.log('decompression of snapshot data for key ' + key + ' failed');
+ console.log(e);
+ uncompressed = null;
+ }
+
+ if (typeof uncompressed !== 'string') {
+ this.log('uncompressed snapshot data for key ' + key + ' is not string');
+ return null;
+ }
+
+ let data;
+ try {
+ data = JSON.parse(uncompressed);
+ } catch (e) {
+ this.log('parsing snapshot data for key ' + key + ' failed');
+ console.log(e);
+ data = null;
+ }
+
+ return data;
+ };
+
+ this.updateChart = function (callback) {
+ if (this.debug) {
+ this.log('updateChart()');
+ }
+
+ if (this.fetching_data) {
+ if (this.debug) {
+ this.log('updateChart(): I am already updating...');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'already running');
+ }
+
+ return;
+ }
+
+ // due to late initialization of charts and libraries
+ // we need to check this too
+ if (!this.enabled) {
+ if (this.debug) {
+ this.log('updateChart(): I am not enabled');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'not enabled');
+ }
+
+ return;
+ }
+
+ if (!canBeRendered()) {
+ if (this.debug) {
+ this.log('updateChart(): cannot be rendered');
+ }
+
+ if (typeof callback === 'function') {
+ return callback(false, 'cannot be rendered');
+ }
+
+ return;
+ }
+
+ if (that.dom_created !== true) {
+ if (this.debug) {
+ this.log('updateChart(): creating DOM');
+ }
+
+ createDOM();
+ }
+
+ if (this.chart === null) {
+ if (this.debug) {
+ this.log('updateChart(): getting chart');
+ }
+
+ return this.getChart(function () {
+ return that.updateChart(callback);
+ });
+ }
+
+ if (!this.library.initialized) {
+ if (this.library.enabled) {
+ if (this.debug) {
+ this.log('updateChart(): initializing chart library');
+ }
+
+ return this.library.initialize(function () {
+ return that.updateChart(callback);
+ });
+ } else {
+ error('chart library "' + this.library_name + '" is not available.');
+
+ if (typeof callback === 'function') {
+ return callback(false, 'library not available');
+ }
+
+ return;
+ }
+ }
+
+ this.clearSelection();
+ this.chartURL();
+
+ NETDATA.statistics.refreshes_total++;
+ NETDATA.statistics.refreshes_active++;
+
+ if (NETDATA.statistics.refreshes_active > NETDATA.statistics.refreshes_active_max) {
+ NETDATA.statistics.refreshes_active_max = NETDATA.statistics.refreshes_active;
+ }
+
+ let ok = false;
+ this.fetching_data = true;
+
+ if (netdataSnapshotData !== null) {
+ let key = this.chartDataUniqueID();
+ let data = this.getSnapshotData(key);
+ if (data !== null) {
+ ok = true;
+ data = NETDATA.xss.checkData('/api/v1/data', data, this.library.xssRegexIgnore);
+ this.updateChartWithData(data);
+ } else {
+ ok = false;
+ error('cannot get data from snapshot for key: "' + key + '"');
+ that.tm.last_autorefreshed = Date.now();
+ }
+
+ NETDATA.statistics.refreshes_active--;
+ this.fetching_data = false;
+
+ if (typeof callback === 'function') {
+ callback(ok, 'snapshot');
+ }
+
+ return;
+ }
+
+ if (this.debug) {
+ this.log('updating from ' + this.data_url);
+ }
+
+ this.xhr = $.ajax({
+ url: this.data_url,
+ cache: false,
+ async: true,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkData('/api/v1/data', data, that.library.xssRegexIgnore);
+
+ that.xhr = undefined;
+ that.retries_on_data_failures = 0;
+ ok = true;
+
+ if (that.debug) {
+ that.log('data received. updating chart.');
+ }
+
+ that.updateChartWithData(data);
+ })
+ .fail(function (msg) {
+ that.xhr = undefined;
+
+ if (msg.statusText !== 'abort') {
+ that.retries_on_data_failures++;
+ if (that.retries_on_data_failures > NETDATA.options.current.retries_on_data_failures) {
+ // that.log('failed ' + that.retries_on_data_failures.toString() + ' times - giving up');
+ that.retries_on_data_failures = 0;
+ error('data download failed for url: ' + that.data_url);
+ }
+ else {
+ that.tm.last_autorefreshed = Date.now();
+ // that.log('failed ' + that.retries_on_data_failures.toString() + ' times, but I will retry');
+ }
+ }
+ })
+ .always(function () {
+ that.xhr = undefined;
+
+ NETDATA.statistics.refreshes_active--;
+ that.fetching_data = false;
+
+ if (typeof callback === 'function') {
+ return callback(ok, 'download');
+ }
+ });
+ };
+
+ const __isVisible = function () {
+ let ret = true;
+
+ if (NETDATA.options.current.update_only_visible !== false) {
+ // tolerance is the number of pixels a chart can be off-screen
+ // to consider it as visible and refresh it as if was visible
+ let tolerance = 0;
+
+ that.tm.last_visible_check = Date.now();
+
+ let rect = that.element.getBoundingClientRect();
+
+ let screenTop = window.scrollY;
+ let screenBottom = screenTop + window.innerHeight;
+
+ let chartTop = rect.top + screenTop;
+ let chartBottom = chartTop + rect.height;
+
+ ret = !(rect.width === 0 || rect.height === 0 || chartBottom + tolerance < screenTop || chartTop - tolerance > screenBottom);
+ }
+
+ if (that.debug) {
+ that.log('__isVisible(): ' + ret);
+ }
+
+ return ret;
+ };
+
+ this.isVisible = function (nocache) {
+ // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll);
+
+ // caching - we do not evaluate the charts visibility
+ // if the page has not been scrolled since the last check
+ if ((typeof nocache !== 'undefined' && nocache)
+ || typeof this.tmp.___isVisible___ === 'undefined'
+ || this.tm.last_visible_check <= NETDATA.options.last_page_scroll) {
+ this.tmp.___isVisible___ = __isVisible();
+ if (this.tmp.___isVisible___) {
+ this.unhideChart();
+ } else {
+ this.hideChart();
+ }
+ }
+
+ if (this.debug) {
+ this.log('isVisible(' + nocache + '): ' + this.tmp.___isVisible___);
+ }
+
+ return this.tmp.___isVisible___;
+ };
+
+ this.isAutoRefreshable = function () {
+ return (this.current.autorefresh);
+ };
+
+ this.canBeAutoRefreshed = function () {
+ if (!this.enabled) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> not enabled');
+ }
+
+ return false;
+ }
+
+ if (this.running) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> already running');
+ }
+
+ return false;
+ }
+
+ if (this.library === null || this.library.enabled === false) {
+ error('charting library "' + this.library_name + '" is not available');
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> chart library ' + this.library_name + ' is not available');
+ }
+
+ return false;
+ }
+
+ if (!this.isVisible()) {
+ if (NETDATA.options.debug.visibility || this.debug) {
+ this.log('canBeAutoRefreshed() -> not visible');
+ }
+
+ return false;
+ }
+
+ let now = Date.now();
+
+ if (this.current.force_update_at !== 0 && this.current.force_update_at < now) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> timed force update - allowing this update');
+ }
+
+ this.current.force_update_at = 0;
+ return true;
+ }
+
+ if (!this.isAutoRefreshable()) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> not auto-refreshable');
+ }
+
+ return false;
+ }
+
+ // allow the first update, even if the page is not visible
+ if (NETDATA.options.page_is_visible === false && this.updates_counter && this.updates_since_last_unhide) {
+ if (NETDATA.options.debug.focus || this.debug) {
+ this.log('canBeAutoRefreshed() -> not the first update, and page does not have focus');
+ }
+
+ return false;
+ }
+
+ if (this.needsRecreation()) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> needs re-creation.');
+ }
+
+ return true;
+ }
+
+ if (NETDATA.options.auto_refresher_stop_until >= now) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed() -> stopped until is in future.');
+ }
+
+ return false;
+ }
+
+ // options valid only for autoRefresh()
+ if (NETDATA.globalPanAndZoom.isActive()) {
+ if (NETDATA.globalPanAndZoom.shouldBeAutoRefreshed(this)) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): global panning: I need an update.');
+ }
+
+ return true;
+ }
+ else {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): global panning: I am already up to date.');
+ }
+
+ return false;
+ }
+ }
+
+ if (this.selected) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): I have a selection in place.');
+ }
+
+ return false;
+ }
+
+ if (this.paused) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): I am paused.');
+ }
+
+ return false;
+ }
+
+ let data_update_every = this.data_update_every;
+ if (typeof this.force_update_every === 'number') {
+ data_update_every = this.force_update_every;
+ }
+
+ if (now - this.tm.last_autorefreshed >= data_update_every) {
+ if (this.debug) {
+ this.log('canBeAutoRefreshed(): It is time to update me. Now: ' + now.toString() + ', last_autorefreshed: ' + this.tm.last_autorefreshed + ', data_update_every: ' + data_update_every + ', delta: ' + (now - this.tm.last_autorefreshed).toString());
+ }
+
+ return true;
+ }
+
+ return false;
+ };
+
+ this.autoRefresh = function (callback) {
+ let state = that;
+
+ if (state.canBeAutoRefreshed() && state.running === false) {
+ state.running = true;
+ state.updateChart(function () {
+ state.running = false;
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ });
+ } else {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ }
+ };
+
+ this.__defaultsFromDownloadedChart = function (chart) {
+ this.chart = chart;
+ this.chart_url = chart.url;
+ this.data_update_every = chart.update_every * 1000;
+ this.data_points = Math.round(this.chartWidth() / this.chartPixelsPerPoint());
+ this.tm.last_info_downloaded = Date.now();
+
+ if (this.title === null) {
+ this.title = chart.title;
+ }
+
+ if (this.units === null) {
+ this.units = chart.units;
+ this.units_current = this.units;
+ }
+ };
+
+ // fetch the chart description from the netdata server
+ this.getChart = function (callback) {
+ this.chart = NETDATA.chartRegistry.get(this.host, this.id);
+ if (this.chart) {
+ this.__defaultsFromDownloadedChart(this.chart);
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ } else if (netdataSnapshotData !== null) {
+ // console.log(this);
+ // console.log(NETDATA.chartRegistry);
+ NETDATA.error(404, 'host: ' + this.host + ', chart: ' + this.id);
+ error('chart not found in snapshot');
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ } else {
+ this.chart_url = "/api/v1/chart?chart=" + this.id;
+
+ if (this.debug) {
+ this.log('downloading ' + this.chart_url);
+ }
+
+ $.ajax({
+ url: this.host + this.chart_url,
+ cache: false,
+ async: true,
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (chart) {
+ chart = NETDATA.xss.checkOptional('/api/v1/chart', chart);
+
+ chart.url = that.chart_url;
+ that.__defaultsFromDownloadedChart(chart);
+ NETDATA.chartRegistry.add(that.host, that.id, chart);
+ })
+ .fail(function () {
+ NETDATA.error(404, that.chart_url);
+ error('chart not found on url "' + that.chart_url + '"');
+ })
+ .always(function () {
+ if (typeof callback === 'function') {
+ return callback();
+ }
+ });
+ }
+ };
+
+ // ============================================================================================================
+ // INITIALIZATION
+
+ initDOM();
+ init('fast');
+};
+
+NETDATA.resetAllCharts = function (state) {
+ // first clear the global selection sync
+ // to make sure no chart is in selected state
+ NETDATA.globalSelectionSync.stop();
+
+ // there are 2 possibilities here
+ // a. state is the global Pan and Zoom master
+ // b. state is not the global Pan and Zoom master
+
+ // let master = true;
+ // if (NETDATA.globalPanAndZoom.isMaster(state) === false) {
+ // master = false;
+ // }
+ const master = NETDATA.globalPanAndZoom.isMaster(state);
+
+ // clear the global Pan and Zoom
+ // this will also refresh the master
+ // and unblock any charts currently mirroring the master
+ NETDATA.globalPanAndZoom.clearMaster();
+
+ // if we were not the master, reset our status too
+ // this is required because most probably the mouse
+ // is over this chart, blocking it from auto-refreshing
+ if (master === false && (state.paused || state.selected)) {
+ state.resetChart();
+ }
+};
+
+// get or create a chart state, given a DOM element
+NETDATA.chartState = function (element) {
+ let self = $(element);
+
+ let state = self.data('netdata-state-object') || null;
+ if (state === null) {
+ state = new chartState(element);
+ self.data('netdata-state-object', state);
+ }
+ return state;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// Library functions
+
+// Load a script without jquery
+// This is used to load jquery - after it is loaded, we use jquery
+NETDATA._loadjQuery = function (callback) {
+ if (typeof jQuery === 'undefined') {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('loading ' + NETDATA.jQuery);
+ }
+
+ let script = document.createElement('script');
+ script.type = 'text/javascript';
+ script.async = true;
+ script.src = NETDATA.jQuery;
+
+ // script.onabort = onError;
+ script.onerror = function () {
+ NETDATA.error(101, NETDATA.jQuery);
+ };
+ if (typeof callback === "function") {
+ script.onload = function () {
+ $ = jQuery;
+ return callback();
+ };
+ }
+
+ let s = document.getElementsByTagName('script')[0];
+ s.parentNode.insertBefore(script, s);
+ }
+ else if (typeof callback === "function") {
+ $ = jQuery;
+ return callback();
+ }
+};
+
+NETDATA._loadCSS = function (filename) {
+ // don't use jQuery here
+ // styles are loaded before jQuery
+ // to eliminate showing an unstyled page to the user
+
+ let fileref = document.createElement("link");
+ fileref.setAttribute("rel", "stylesheet");
+ fileref.setAttribute("type", "text/css");
+ fileref.setAttribute("href", filename);
+
+ if (typeof fileref !== 'undefined') {
+ document.getElementsByTagName("head")[0].appendChild(fileref);
+ }
+};
+
+// user function to signal us the DOM has been
+// updated.
+NETDATA.updatedDom = function () {
+ NETDATA.options.updated_dom = true;
+};
+
+NETDATA.ready = function (callback) {
+ NETDATA.options.pauseCallback = callback;
+};
+
+NETDATA.pause = function (callback) {
+ if (typeof callback === 'function') {
+ if (NETDATA.options.pause) {
+ return callback();
+ } else {
+ NETDATA.options.pauseCallback = callback;
+ }
+ }
+};
+
+NETDATA.unpause = function () {
+ NETDATA.options.pauseCallback = null;
+ NETDATA.options.updated_dom = true;
+ NETDATA.options.pause = false;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+
+// this is purely sequential charts refresher
+// it is meant to be autonomous
+NETDATA.chartRefresherNoParallel = function (index, callback) {
+ let targets = NETDATA.intersectionObserver.targets();
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('NETDATA.chartRefresherNoParallel(' + index + ')');
+ }
+
+ if (NETDATA.options.updated_dom) {
+ // the dom has been updated
+ // get the dom parts again
+ NETDATA.parseDom(callback);
+ return;
+ }
+ if (index >= targets.length) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('waiting to restart main loop...');
+ }
+
+ NETDATA.options.auto_refresher_fast_weight = 0;
+ callback();
+ } else {
+ let state = targets[index];
+
+ if (NETDATA.options.auto_refresher_fast_weight < NETDATA.options.current.fast_render_timeframe) {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('fast rendering...');
+ }
+
+ if (state.isVisible()) {
+ NETDATA.timeout.set(function () {
+ state.autoRefresh(function () {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ });
+ }, 0);
+ } else {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ }
+ } else {
+ if (NETDATA.options.debug.main_loop) {
+ console.log('waiting for next refresh...');
+ }
+ NETDATA.options.auto_refresher_fast_weight = 0;
+
+ NETDATA.timeout.set(function () {
+ state.autoRefresh(function () {
+ NETDATA.chartRefresherNoParallel(++index, callback);
+ });
+ }, NETDATA.options.current.idle_between_charts);
+ }
+ }
+};
+
+NETDATA.chartRefresherWaitTime = function () {
+ return NETDATA.options.current.idle_parallel_loops;
+};
+
+// the default refresher
+NETDATA.chartRefresherLastRun = 0;
+NETDATA.chartRefresherRunsAfterParseDom = 0;
+NETDATA.chartRefresherTimeoutId = undefined;
+
+NETDATA.chartRefresherReschedule = function () {
+ if (NETDATA.options.current.async_on_scroll) {
+ if (NETDATA.chartRefresherTimeoutId) {
+ NETDATA.timeout.clear(NETDATA.chartRefresherTimeoutId);
+ }
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(NETDATA.chartRefresher, NETDATA.options.current.onscroll_worker_duration_threshold);
+ //console.log('chartRefresherReschedule()');
+ }
+};
+
+NETDATA.chartRefresher = function () {
+ // console.log('chartRefresher() begin ' + (Date.now() - NETDATA.chartRefresherLastRun).toString() + ' ms since last run');
+
+ if (NETDATA.options.page_is_visible === false
+ && NETDATA.options.current.stop_updates_when_focus_is_lost
+ && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_resize
+ && NETDATA.chartRefresherLastRun > NETDATA.options.last_page_scroll
+ && NETDATA.chartRefresherRunsAfterParseDom > 10
+ ) {
+ setTimeout(
+ NETDATA.chartRefresher,
+ NETDATA.options.current.idle_lost_focus
+ );
+
+ // console.log('chartRefresher() page without focus, will run in ' + NETDATA.options.current.idle_lost_focus.toString() + ' ms, ' + NETDATA.chartRefresherRunsAfterParseDom.toString());
+ return;
+ }
+ NETDATA.chartRefresherRunsAfterParseDom++;
+
+ let now = Date.now();
+ NETDATA.chartRefresherLastRun = now;
+
+ if (now < NETDATA.options.on_scroll_refresher_stop_until) {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end1 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (now < NETDATA.options.auto_refresher_stop_until) {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end2 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (NETDATA.options.pause) {
+ // console.log('auto-refresher is paused');
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ // console.log('chartRefresher() end3 will run in ' + NETDATA.chartRefresherWaitTime().toString() + ' ms');
+ return;
+ }
+
+ if (typeof NETDATA.options.pauseCallback === 'function') {
+ // console.log('auto-refresher is calling pauseCallback');
+
+ NETDATA.options.pause = true;
+ NETDATA.options.pauseCallback();
+ NETDATA.chartRefresher();
+
+ // console.log('chartRefresher() end4 (nested)');
+ return;
+ }
+
+ if (!NETDATA.options.current.parallel_refresher) {
+ // console.log('auto-refresher is calling chartRefresherNoParallel(0)');
+ NETDATA.chartRefresherNoParallel(0, function () {
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.options.current.idle_between_loops
+ );
+ });
+ // console.log('chartRefresher() end5 (no parallel, nested)');
+ return;
+ }
+
+ if (NETDATA.options.updated_dom) {
+ // the dom has been updated
+ // get the dom parts again
+ // console.log('auto-refresher is calling parseDom()');
+ NETDATA.parseDom(NETDATA.chartRefresher);
+ // console.log('chartRefresher() end6 (parseDom)');
+ return;
+ }
+
+ if (!NETDATA.globalSelectionSync.active()) {
+ let parallel = [];
+ let targets = NETDATA.intersectionObserver.targets();
+ let len = targets.length;
+ let state;
+ while (len--) {
+ state = targets[len];
+ if (state.running || state.isVisible() === false) {
+ continue;
+ }
+
+ if (!state.library.initialized) {
+ if (state.library.enabled) {
+ state.library.initialize(NETDATA.chartRefresher);
+ //console.log('chartRefresher() end6 (library init)');
+ return;
+ }
+ else {
+ state.error('chart library "' + state.library_name + '" is not enabled.');
+ }
+ }
+
+ if (NETDATA.scrollUp) {
+ parallel.unshift(state);
+ } else {
+ parallel.push(state);
+ }
+ }
+
+ len = parallel.length;
+ while (len--) {
+ state = parallel[len];
+ // console.log('auto-refresher executing in parallel for ' + parallel.length.toString() + ' charts');
+ // this will execute the jobs in parallel
+
+ if (!state.running) {
+ NETDATA.timeout.set(state.autoRefresh, 0);
+ }
+ }
+ //else {
+ // console.log('auto-refresher nothing to do');
+ //}
+ }
+
+ // run the next refresh iteration
+ NETDATA.chartRefresherTimeoutId = NETDATA.timeout.set(
+ NETDATA.chartRefresher,
+ NETDATA.chartRefresherWaitTime()
+ );
+
+ //console.log('chartRefresher() completed in ' + (Date.now() - now).toString() + ' ms');
+};
+
+NETDATA.parseDom = function (callback) {
+ //console.log('parseDom()');
+
+ NETDATA.options.last_page_scroll = Date.now();
+ NETDATA.options.updated_dom = false;
+ NETDATA.chartRefresherRunsAfterParseDom = 0;
+
+ let targets = $('div[data-netdata]'); //.filter(':visible');
+
+ if (NETDATA.options.debug.main_loop) {
+ console.log('DOM updated - there are ' + targets.length + ' charts on page.');
+ }
+
+ NETDATA.intersectionObserver.globalReset();
+ NETDATA.options.targets = [];
+ let len = targets.length;
+ while (len--) {
+ // the initialization will take care of sizing
+ // and the "loading..." message
+ let state = NETDATA.chartState(targets[len]);
+ NETDATA.options.targets.push(state);
+ NETDATA.intersectionObserver.observe(state);
+ }
+
+ if (NETDATA.globalChartUnderlay.isActive()) {
+ NETDATA.globalChartUnderlay.setup();
+ } else {
+ NETDATA.globalChartUnderlay.clear();
+ }
+
+ if (typeof callback === 'function') {
+ return callback();
+ }
+};
+
+// this is the main function - where everything starts
+NETDATA.started = false;
+NETDATA.start = function () {
+ // this should be called only once
+
+ if (NETDATA.started) {
+ console.log('netdata is already started');
+ return;
+ }
+
+ NETDATA.started = true;
+ NETDATA.options.page_is_visible = true;
+
+ $(window).blur(function () {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = false;
+ if (NETDATA.options.debug.focus) {
+ console.log('Lost Focus!');
+ }
+ }
+ });
+
+ $(window).focus(function () {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = true;
+ if (NETDATA.options.debug.focus) {
+ console.log('Focus restored!');
+ }
+ }
+ });
+
+ if (typeof document.hasFocus === 'function' && !document.hasFocus()) {
+ if (NETDATA.options.current.stop_updates_when_focus_is_lost) {
+ NETDATA.options.page_is_visible = false;
+ if (NETDATA.options.debug.focus) {
+ console.log('Document has no focus!');
+ }
+ }
+ }
+
+ // bootstrap tab switching
+ $('a[data-toggle="tab"]').on('shown.bs.tab', NETDATA.onscroll);
+
+ // bootstrap modal switching
+ let $modal = $('.modal');
+ $modal.on('hidden.bs.modal', NETDATA.onscroll);
+ $modal.on('shown.bs.modal', NETDATA.onscroll);
+
+ // bootstrap collapse switching
+ let $collapse = $('.collapse');
+ $collapse.on('hidden.bs.collapse', NETDATA.onscroll);
+ $collapse.on('shown.bs.collapse', NETDATA.onscroll);
+
+ NETDATA.parseDom(NETDATA.chartRefresher);
+
+ // Alarms initialization
+ setTimeout(NETDATA.alarms.init, 1000);
+
+ // Registry initialization
+ setTimeout(NETDATA.registry.init, netdataRegistryAfterMs);
+
+ if (typeof netdataCallback === 'function') {
+ netdataCallback();
+ }
+};
+
+NETDATA.globalReset = function () {
+ NETDATA.intersectionObserver.globalReset();
+ NETDATA.globalSelectionSync.globalReset();
+ NETDATA.globalPanAndZoom.globalReset();
+ NETDATA.chartRegistry.globalReset();
+ NETDATA.commonMin.globalReset();
+ NETDATA.commonMax.globalReset();
+ NETDATA.commonColors.globalReset();
+ NETDATA.unitsConversion.globalReset();
+ NETDATA.options.targets = [];
+ NETDATA.parseDom();
+ NETDATA.unpause();
+};
diff --git a/web/gui/src/dashboard.js/options.js b/web/gui/src/dashboard.js/options.js
new file mode 100644
index 0000000..68132e7
--- /dev/null
+++ b/web/gui/src/dashboard.js/options.js
@@ -0,0 +1,225 @@
+
+NETDATA.icons = {
+ left: '<i class="fas fa-backward"></i>',
+ reset: '<i class="fas fa-play"></i>',
+ right: '<i class="fas fa-forward"></i>',
+ zoomIn: '<i class="fas fa-plus"></i>',
+ zoomOut: '<i class="fas fa-minus"></i>',
+ resize: '<i class="fas fa-sort"></i>',
+ lineChart: '<i class="fas fa-chart-line"></i>',
+ areaChart: '<i class="fas fa-chart-area"></i>',
+ noChart: '<i class="fas fa-chart-area"></i>',
+ loading: '<i class="fas fa-sync-alt"></i>',
+ noData: '<i class="fas fa-exclamation-triangle"></i>'
+};
+
+if (typeof netdataIcons === 'object') {
+ // for (let icon in NETDATA.icons) {
+ // if (NETDATA.icons.hasOwnProperty(icon) && typeof(netdataIcons[icon]) === 'string')
+ // NETDATA.icons[icon] = netdataIcons[icon];
+ // }
+ for (var icon of Object.keys(NETDATA.icons)) {
+ if (typeof(netdataIcons[icon]) === 'string') {
+ NETDATA.icons[icon] = netdataIcons[icon]
+ }
+ }
+}
+
+if (typeof netdataSnapshotData === 'undefined') {
+ netdataSnapshotData = null;
+}
+
+if (typeof netdataShowHelp === 'undefined') {
+ netdataShowHelp = true;
+}
+
+if (typeof netdataShowAlarms === 'undefined') {
+ netdataShowAlarms = false;
+}
+
+if (typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) {
+ netdataRegistryAfterMs = 0; // 1500;
+}
+
+if (typeof netdataRegistry === 'undefined') {
+ // backward compatibility
+ netdataRegistry = (typeof netdataNoRegistry !== 'undefined' && netdataNoRegistry === false);
+}
+
+if (netdataRegistry === false && typeof netdataRegistryCallback === 'function') {
+ netdataRegistry = true;
+}
+
+// ----------------------------------------------------------------------------------------------------------------
+// the defaults for all charts
+
+// if the user does not specify any of these, the following will be used
+
+NETDATA.chartDefaults = {
+ width: '100%', // the chart width - can be null
+ height: '100%', // the chart height - can be null
+ min_width: null, // the chart minimum width - can be null
+ library: 'dygraph', // the graphing library to use
+ method: 'average', // the grouping method
+ before: 0, // panning
+ after: -600, // panning
+ pixels_per_point: 1, // the detail of the chart
+ fill_luminance: 0.8 // luminance of colors in solid areas
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// global options
+
+NETDATA.options = {
+ pauseCallback: null, // a callback when we are really paused
+
+ pause: false, // when enabled we don't auto-refresh the charts
+
+ targets: [], // an array of all the state objects that are
+ // currently active (independently of their
+ // viewport visibility)
+
+ updated_dom: true, // when true, the DOM has been updated with
+ // new elements we have to check.
+
+ auto_refresher_fast_weight: 0, // this is the current time in ms, spent
+ // rendering charts continuously.
+ // used with .current.fast_render_timeframe
+
+ page_is_visible: true, // when true, this page is visible
+
+ auto_refresher_stop_until: 0, // timestamp in ms - used internally, to stop the
+ // auto-refresher for some time (when a chart is
+ // performing pan or zoom, we need to stop refreshing
+ // all other charts, to have the maximum speed for
+ // rendering the chart that is panned or zoomed).
+ // Used with .current.global_pan_sync_time
+
+ on_scroll_refresher_stop_until: 0, // timestamp in ms - used to stop evaluating
+ // charts for some time, after a page scroll
+
+ last_page_resize: Date.now(), // the timestamp of the last resize request
+
+ last_page_scroll: 0, // the timestamp the last time the page was scrolled
+
+ browser_timezone: 'unknown', // timezone detected by javascript
+ server_timezone: 'unknown', // timezone reported by the server
+
+ force_data_points: 0, // force the number of points to be returned for charts
+ fake_chart_rendering: false, // when set to true, the dashboard will download data but will not render the charts
+
+ passive_events: null, // true if the browser supports passive events
+
+ // the current profile
+ // we may have many...
+ current: {
+ units: 'auto', // can be 'auto' or 'original'
+ temperature: 'celsius', // can be 'celsius' or 'fahrenheit'
+ seconds_as_time: true, // show seconds as DDd:HH:MM:SS ?
+ timezone: 'default', // the timezone to use, or 'default'
+ user_set_server_timezone: 'default', // as set by the user on the dashboard
+
+ legend_toolbox: true, // show the legend toolbox on charts
+ resize_charts: true, // show the resize handler on charts
+
+ pixels_per_point: isSlowDevice() ? 5 : 1, // the minimum pixels per point for all charts
+ // increase this to speed javascript up
+ // each chart library has its own limit too
+ // the max of this and the chart library is used
+ // the final is calculated every time, so a change
+ // here will have immediate effect on the next chart
+ // update
+
+ idle_between_charts: 100, // ms - how much time to wait between chart updates
+
+ fast_render_timeframe: 200, // ms - render continuously until this time of continuous
+ // rendering has been reached
+ // this setting is used to make it render e.g. 10
+ // charts at once, sleep idle_between_charts time
+ // and continue for another 10 charts.
+
+ idle_between_loops: 500, // ms - if all charts have been updated, wait this
+ // time before starting again.
+
+ idle_parallel_loops: 100, // ms - the time between parallel refresher updates
+
+ idle_lost_focus: 500, // ms - when the window does not have focus, check
+ // if focus has been regained, every this time
+
+ global_pan_sync_time: 300, // ms - when you pan or zoom a chart, the background
+ // auto-refreshing of charts is paused for this amount
+ // of time
+
+ sync_selection_delay: 400, // ms - when you pan or zoom a chart, wait this amount
+ // of time before setting up synchronized selections
+ // on hover.
+
+ sync_selection: true, // enable or disable selection sync
+
+ pan_and_zoom_delay: 50, // when panning or zooming, how ofter to update the chart
+
+ sync_pan_and_zoom: true, // enable or disable pan and zoom sync
+
+ pan_and_zoom_data_padding: true, // fetch more data for the master chart when panning or zooming
+
+ update_only_visible: true, // enable or disable visibility management / used for printing
+
+ parallel_refresher: !isSlowDevice(), // enable parallel refresh of charts
+
+ concurrent_refreshes: true, // when parallel_refresher is enabled, sync also the charts
+
+ destroy_on_hide: isSlowDevice(), // destroy charts when they are not visible
+
+ show_help: netdataShowHelp, // when enabled the charts will show some help
+ show_help_delay_show_ms: 500,
+ show_help_delay_hide_ms: 0,
+
+ eliminate_zero_dimensions: true, // do not show dimensions with just zeros
+
+ stop_updates_when_focus_is_lost: true, // boolean - shall we stop auto-refreshes when document does not have user focus
+ stop_updates_while_resizing: 1000, // ms - time to stop auto-refreshes while resizing the charts
+
+ double_click_speed: 500, // ms - time between clicks / taps to detect double click/tap
+
+ smooth_plot: !isSlowDevice(), // enable smooth plot, where possible
+
+ color_fill_opacity_line: 1.0,
+ color_fill_opacity_area: 0.2,
+ color_fill_opacity_stacked: 0.8,
+
+ pan_and_zoom_factor: 0.25, // the increment when panning and zooming with the toolbox
+ pan_and_zoom_factor_multiplier_control: 2.0,
+ pan_and_zoom_factor_multiplier_shift: 3.0,
+ pan_and_zoom_factor_multiplier_alt: 4.0,
+
+ abort_ajax_on_scroll: false, // kill pending ajax page scroll
+ async_on_scroll: false, // sync/async onscroll handler
+ onscroll_worker_duration_threshold: 30, // time in ms, for async scroll handler
+
+ retries_on_data_failures: 3, // how many retries to make if we can't fetch chart data from the server
+
+ setOptionCallback: function () {
+ }
+ },
+
+ debug: {
+ show_boxes: false,
+ main_loop: false,
+ focus: false,
+ visibility: false,
+ chart_data_url: false,
+ chart_errors: true, // remember to set it to false before merging
+ chart_timing: false,
+ chart_calls: false,
+ libraries: false,
+ dygraph: false,
+ globalSelectionSync: false,
+ globalPanAndZoom: false
+ }
+};
+
+NETDATA.statistics = {
+ refreshes_total: 0,
+ refreshes_active: 0,
+ refreshes_active_max: 0
+};
diff --git a/web/gui/src/dashboard.js/prologue.js.inc b/web/gui/src/dashboard.js/prologue.js.inc
new file mode 100644
index 0000000..afa1f0e
--- /dev/null
+++ b/web/gui/src/dashboard.js/prologue.js.inc
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// DO NOT EDIT: This file is automatically generated from the source files in src/
+
+// ----------------------------------------------------------------------------
+// You can set the following variables before loading this script:
+
+// 'use strict';
+
+/*global netdataNoDygraphs *//* boolean, disable dygraph charts
+ * (default: false) */
+/*global netdataNoSparklines *//* boolean, disable sparkline charts
+ * (default: false) */
+/*global netdataNoPeitys *//* boolean, disable peity charts
+ * (default: false) */
+/*global netdataNoGoogleCharts *//* boolean, disable google charts
+ * (default: false) */
+/*global netdataNoMorris *//* boolean, disable morris charts
+ * (default: false) */
+/*global netdataNoEasyPieChart *//* boolean, disable easypiechart charts
+ * (default: false) */
+/*global netdataNoGauge *//* boolean, disable gauge.js charts
+ * (default: false) */
+/*global netdataNoD3 *//* boolean, disable d3 charts
+ * (default: false) */
+/*global netdataNoC3 *//* boolean, disable c3 charts
+ * (default: false) */
+/*global netdataNoD3pie *//* boolean, disable d3pie charts
+ * (default: false) */
+/*global netdataNoBootstrap *//* boolean, disable bootstrap - disables help too
+ * (default: false) */
+/*global netdataNoFontAwesome *//* boolean, disable fontawesome (do not load it)
+ * (default: false) */
+/*global netdataIcons *//* object, overwrite netdata fontawesome icons
+ * (default: null) */
+/*global netdataDontStart *//* boolean, do not start the thread to process the charts
+ * (default: false) */
+/*global netdataErrorCallback *//* function, callback to be called when the dashboard encounters an error
+ * (default: null) */
+/*global netdataRegistry:true *//* boolean, use the netdata registry
+ * (default: false) */
+/*global netdataNoRegistry *//* boolean, included only for compatibility with existing custom dashboard
+ * (obsolete - do not use this any more) */
+/*global netdataRegistryCallback *//* function, callback that will be invoked with one param: the URLs from the registry
+ * (default: null) */
+/*global netdataShowHelp:true *//* boolean, disable charts help
+ * (default: true) */
+/*global netdataShowAlarms:true *//* boolean, enable alarms checks and notifications
+ * (default: false) */
+/*global netdataRegistryAfterMs:true *//* ms, delay registry use at started
+ * (default: 1500) */
+/*global netdataCallback *//* function, callback to be called when netdata is ready to start
+ * (default: null)
+ * netdata will be running while this is called
+ * (call NETDATA.pause to stop it) */
+/*global netdataPrepCallback *//* function, callback to be called before netdata does anything else
+ * (default: null) */
+/*global netdataServer *//* string, the URL of the netdata server to use
+ * (default: the URL the page is hosted at) */
+/*global netdataServerStatic *//* string, the URL of the netdata server to use for static files
+ * (default: netdataServer) */
+/*global netdataSnapshotData *//* object, a netdata snapshot loaded
+ * (default: null) */
+/*global netdataAlarmsRecipients *//* array, an array of alarm recipients to show notifications for
+ * (default: null) */
+/*global netdataAlarmsRemember *//* boolen, keep our position in the alarm log at browser local storage
+ * (default: true) */
+/*global netdataAlarmsActiveCallback *//* function, a hook for the alarm logs
+ * (default: undefined) */
+/*global netdataAlarmsNotifCallback *//* function, a hook for alarm notifications
+ * (default: undefined) */
+/*global netdataIntersectionObserver *//* boolean, enable or disable the use of intersection observer
+ * (default: true) */
+/*global netdataCheckXSS *//* boolean, enable or disable checking for XSS issues
+ * (default: false) */
+
+// ----------------------------------------------------------------------------
+// global namespace
+
+// Should stay var!
+var NETDATA = window.NETDATA || {};
+
+(function(window, document, $, undefined) {
+
diff --git a/web/gui/src/dashboard.js/registry.js b/web/gui/src/dashboard.js/registry.js
new file mode 100644
index 0000000..090ef52
--- /dev/null
+++ b/web/gui/src/dashboard.js/registry.js
@@ -0,0 +1,311 @@
+
+// Registry of netdata hosts
+
+NETDATA.registry = {
+ server: null, // the netdata registry server
+ isCloudEnabled: false,// is netdata.cloud functionality enabled?
+ cloudBaseURL: null, // the netdata cloud base url
+ person_guid: null, // the unique ID of this browser / user
+ machine_guid: null, // the unique ID the netdata server that served dashboard.js
+ hostname: 'unknown', // the hostname of the netdata server that served dashboard.js
+ machines: null, // the user's other URLs
+ machines_array: null, // the user's other URLs in an array
+ person_urls: null,
+ anonymous_statistics_checked: false,
+ MASKED_DATA: "***",
+
+ isUsingGlobalRegistry: function() {
+ return NETDATA.registry.server == "https://registry.my-netdata.io";
+ },
+
+ isRegistryEnabled: function() {
+ return !(NETDATA.registry.isUsingGlobalRegistry() || isSignedIn())
+ },
+
+ parsePersonUrls: function (person_urls) {
+ NETDATA.registry.person_urls = person_urls;
+
+ if (person_urls) {
+ NETDATA.registry.machines = {};
+ NETDATA.registry.machines_array = [];
+
+ let apu = person_urls;
+ let i = apu.length;
+ while (i--) {
+ if (typeof NETDATA.registry.machines[apu[i][0]] === 'undefined') {
+ // console.log('adding: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString());
+
+ let obj = {
+ guid: apu[i][0],
+ url: apu[i][1],
+ last_t: apu[i][2],
+ accesses: apu[i][3],
+ name: apu[i][4],
+ alternate_urls: []
+ };
+ obj.alternate_urls.push(apu[i][1]);
+
+ NETDATA.registry.machines[apu[i][0]] = obj;
+ NETDATA.registry.machines_array.push(obj);
+ } else {
+ // console.log('appending: ' + apu[i][4] + ', ' + ((now - apu[i][2]) / 1000).toString());
+
+ let pu = NETDATA.registry.machines[apu[i][0]];
+ if (pu.last_t < apu[i][2]) {
+ pu.url = apu[i][1];
+ pu.last_t = apu[i][2];
+ pu.name = apu[i][4];
+ }
+ pu.accesses += apu[i][3];
+ pu.alternate_urls.push(apu[i][1]);
+ }
+ }
+ }
+
+ if (typeof netdataRegistryCallback === 'function') {
+ netdataRegistryCallback(NETDATA.registry.machines_array);
+ }
+ },
+
+ init: function () {
+ if (netdataRegistry !== true) {
+ return;
+ }
+
+ NETDATA.registry.hello(NETDATA.serverDefault, function (data) {
+ if (data) {
+ NETDATA.registry.server = data.registry;
+ if (data.cloud_base_url !== "") {
+ NETDATA.registry.isCloudEnabled = true;
+ NETDATA.registry.cloudBaseURL = data.cloud_base_url;
+ } else {
+ NETDATA.registry.isCloudEnabled = false;
+ NETDATA.registry.cloudBaseURL = "";
+ }
+ NETDATA.registry.machine_guid = data.machine_guid;
+ NETDATA.registry.hostname = data.hostname;
+ if (!NETDATA.registry.anonymous_statistics_checked) {
+ NETDATA.registry.anonymous_statistics_checked=true;
+ if (data.anonymous_statistics) {
+ (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
+ new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
+ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=false;j.src=
+ 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
+ })(window,document,'script','dataLayer','GTM-N6CBMJD');
+ dataLayer.push({"anonymous_statistics" : "true", "machine_guid" : data.machine_guid});
+ }
+ }
+ NETDATA.registry.access(2, function (person_urls) {
+ NETDATA.registry.parsePersonUrls(person_urls);
+ });
+ }
+ });
+ },
+
+ hello: function (host, callback) {
+ host = NETDATA.fixHost(host);
+
+ // send HELLO to a netdata server:
+ // 1. verifies the server is reachable
+ // 2. responds with the registry URL, the machine GUID of this netdata server and its hostname
+ $.ajax({
+ url: host + '/api/v1/registry?action=hello',
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkOptional('/api/v1/registry?action=hello', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(408, host + ' response: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(407, host);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ access: function (max_redirects, callback) {
+ let name = NETDATA.registry.MASKED_DATA;
+ let url = NETDATA.registry.MASKED_DATA;
+
+ if (!NETDATA.registry.isUsingGlobalRegistry()) {
+ // If the user is using a private registry keep sending identifiable
+ // data.
+ name = NETDATA.registry.hostname;
+ url = NETDATA.serverDefault;
+ }
+
+ console.log("ACCESS", name, url);
+
+ // send ACCESS to a netdata registry:
+ // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL)
+ // 2. it responds with a list of netdata servers we know
+ // the registry identifies us using a cookie it sets the first time we access it
+ // the registry may respond with a redirect URL to send us to another registry
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(name) + '&url=' + encodeURIComponent(url), // + '&visible_url=' + encodeURIComponent(document.location),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=access', data);
+
+ let redirect = null;
+ if (typeof data.registry === 'string') {
+ redirect = data.registry;
+ }
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(409, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (data === null) {
+ if (redirect !== null && max_redirects > 0) {
+ NETDATA.registry.server = redirect;
+ NETDATA.registry.access(max_redirects - 1, callback);
+ }
+ else {
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ }
+ } else {
+ if (typeof data.person_guid === 'string') {
+ NETDATA.registry.person_guid = data.person_guid;
+ }
+
+ if (typeof callback === 'function') {
+ const urls = data.urls.filter((u) => u[1] !== NETDATA.registry.MASKED_DATA);
+ return callback(urls);
+ }
+ }
+ })
+ .fail(function () {
+ NETDATA.error(410, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ delete: function (delete_url, callback) {
+ // send DELETE to a netdata registry:
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=delete&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&delete_url=' + encodeURIComponent(delete_url),
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=delete', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(411, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(412, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ search: function (machine_guid, callback) {
+ // SEARCH for the URLs of a machine:
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=search&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&for=' + machine_guid,
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=search', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(417, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(418, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ },
+
+ switch: function (new_person_guid, callback) {
+ // impersonate
+ $.ajax({
+ url: NETDATA.registry.server + '/api/v1/registry?action=switch&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault) + '&to=' + new_person_guid,
+ async: true,
+ cache: false,
+ headers: {
+ 'Cache-Control': 'no-cache, no-store',
+ 'Pragma': 'no-cache'
+ },
+ xhrFields: {withCredentials: true} // required for the cookie
+ })
+ .done(function (data) {
+ data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data);
+
+ if (typeof data.status !== 'string' || data.status !== 'ok') {
+ NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));
+ data = null;
+ }
+
+ if (typeof callback === 'function') {
+ return callback(data);
+ }
+ })
+ .fail(function () {
+ NETDATA.error(414, NETDATA.registry.server);
+
+ if (typeof callback === 'function') {
+ return callback(null);
+ }
+ });
+ }
+};
diff --git a/web/gui/src/dashboard.js/server-detection.js b/web/gui/src/dashboard.js/server-detection.js
new file mode 100644
index 0000000..472ad48
--- /dev/null
+++ b/web/gui/src/dashboard.js/server-detection.js
@@ -0,0 +1,29 @@
+
+// *** src/dashboard.js/server-detection.js
+
+if (typeof netdataServer !== 'undefined') {
+ NETDATA.serverDefault = netdataServer;
+} else {
+ let s = NETDATA._scriptSource();
+ if (s) {
+ NETDATA.serverDefault = s.replace(/\/dashboard.js(\?.*)?$/g, "");
+ } else {
+ console.log('WARNING: Cannot detect the URL of the netdata server.');
+ NETDATA.serverDefault = null;
+ }
+}
+
+if (NETDATA.serverDefault === null) {
+ NETDATA.serverDefault = '';
+} else if (NETDATA.serverDefault.slice(-1) !== '/') {
+ NETDATA.serverDefault += '/';
+}
+
+if (typeof netdataServerStatic !== 'undefined' && netdataServerStatic !== null && netdataServerStatic !== '') {
+ NETDATA.serverStatic = netdataServerStatic;
+ if (NETDATA.serverStatic.slice(-1) !== '/') {
+ NETDATA.serverStatic += '/';
+ }
+} else {
+ NETDATA.serverStatic = NETDATA.serverDefault;
+}
diff --git a/web/gui/src/dashboard.js/themes.js b/web/gui/src/dashboard.js/themes.js
new file mode 100644
index 0000000..d5126a6
--- /dev/null
+++ b/web/gui/src/dashboard.js/themes.js
@@ -0,0 +1,92 @@
+// Codacy declarations
+/* global netdataTheme */
+
+NETDATA.themes = {
+ white: {
+ bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-3.3.7.css',
+ dashboard_css: NETDATA.serverStatic + 'dashboard.css?v20190902-0',
+ background: '#FFFFFF',
+ foreground: '#000000',
+ grid: '#F0F0F0',
+ axis: '#F0F0F0',
+ highlight: '#F5F5F5',
+ colors: ['#3366CC', '#DC3912', '#109618', '#FF9900', '#990099', '#DD4477',
+ '#3B3EAC', '#66AA00', '#0099C6', '#B82E2E', '#AAAA11', '#5574A6',
+ '#994499', '#22AA99', '#6633CC', '#E67300', '#316395', '#8B0707',
+ '#329262', '#3B3EAC'],
+ easypiechart_track: '#f0f0f0',
+ easypiechart_scale: '#dfe0e0',
+ gauge_pointer: '#C0C0C0',
+ gauge_stroke: '#F0F0F0',
+ gauge_gradient: false,
+ d3pie: {
+ title: '#333333',
+ subtitle: '#666666',
+ footer: '#888888',
+ other: '#aaaaaa',
+ mainlabel: '#333333',
+ percentage: '#dddddd',
+ value: '#aaaa22',
+ tooltip_bg: '#000000',
+ tooltip_fg: '#efefef',
+ segment_stroke: "#ffffff",
+ gradient_color: '#000000'
+ }
+ },
+ slate: {
+ bootstrap_css: NETDATA.serverStatic + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1',
+ dashboard_css: NETDATA.serverStatic + 'dashboard.slate.css?v20190902-0',
+ background: '#272b30',
+ foreground: '#C8C8C8',
+ grid: '#283236',
+ axis: '#283236',
+ highlight: '#383838',
+ /* colors: [ '#55bb33', '#ff2222', '#0099C6', '#faa11b', '#adbce0', '#DDDD00',
+ '#4178ba', '#f58122', '#a5cc39', '#f58667', '#f5ef89', '#cf93c0',
+ '#a5d18a', '#b8539d', '#3954a3', '#c8a9cf', '#c7de8a', '#fad20a',
+ '#a6a479', '#a66da8' ],
+ */
+ colors: ['#66AA00', '#FE3912', '#3366CC', '#D66300', '#0099C6', '#DDDD00',
+ '#5054e6', '#EE9911', '#BB44CC', '#e45757', '#ef0aef', '#CC7700',
+ '#22AA99', '#109618', '#905bfd', '#f54882', '#4381bf', '#ff3737',
+ '#329262', '#3B3EFF'],
+ easypiechart_track: '#373b40',
+ easypiechart_scale: '#373b40',
+ gauge_pointer: '#474b50',
+ gauge_stroke: '#373b40',
+ gauge_gradient: false,
+ d3pie: {
+ title: '#C8C8C8',
+ subtitle: '#283236',
+ footer: '#283236',
+ other: '#283236',
+ mainlabel: '#C8C8C8',
+ percentage: '#dddddd',
+ value: '#cccc44',
+ tooltip_bg: '#272b30',
+ tooltip_fg: '#C8C8C8',
+ segment_stroke: "#283236",
+ gradient_color: '#000000'
+ }
+ }
+};
+
+if (typeof netdataTheme !== 'undefined' && typeof NETDATA.themes[netdataTheme] !== 'undefined') {
+ NETDATA.themes.current = NETDATA.themes[netdataTheme];
+} else {
+ NETDATA.themes.current = NETDATA.themes.white;
+}
+
+NETDATA.colors = NETDATA.themes.current.colors;
+
+// these are the colors Google Charts are using
+// we have them here to attempt emulate their look and feel on the other chart libraries
+// http://there4.io/2012/05/02/google-chart-color-list/
+//NETDATA.colors = [ '#3366CC', '#DC3912', '#FF9900', '#109618', '#990099', '#3B3EAC', '#0099C6',
+// '#DD4477', '#66AA00', '#B82E2E', '#316395', '#994499', '#22AA99', '#AAAA11',
+// '#6633CC', '#E67300', '#8B0707', '#329262', '#5574A6', '#3B3EAC' ];
+
+// an alternative set
+// http://www.mulinblog.com/a-color-palette-optimized-for-data-visualization/
+// (blue) (red) (orange) (green) (pink) (brown) (purple) (yellow) (gray)
+//NETDATA.colors = [ '#5DA5DA', '#F15854', '#FAA43A', '#60BD68', '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#4D4D4D' ];
diff --git a/web/gui/src/dashboard.js/timeout.js b/web/gui/src/dashboard.js/timeout.js
new file mode 100644
index 0000000..4adf9bb
--- /dev/null
+++ b/web/gui/src/dashboard.js/timeout.js
@@ -0,0 +1,100 @@
+
+// *** src/dashboard.js/timeout.js
+
+// TODO: Better name needed
+
+NETDATA.timeout = {
+ // by default, these are just wrappers to setTimeout() / clearTimeout()
+
+ step: function (callback) {
+ return window.setTimeout(callback, 1000 / 60);
+ },
+
+ set: function (callback, delay) {
+ return window.setTimeout(callback, delay);
+ },
+
+ clear: function (id) {
+ return window.clearTimeout(id);
+ },
+
+ init: function () {
+ let custom = true;
+
+ if (window.requestAnimationFrame) {
+ this.step = function (callback) {
+ return window.requestAnimationFrame(callback);
+ };
+
+ this.clear = function (handle) {
+ return window.cancelAnimationFrame(handle.value);
+ };
+ // } else if (window.webkitRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.webkitRequestAnimationFrame(callback);
+ // };
+
+ // if (window.webkitCancelAnimationFrame) {
+ // this.clear = function (handle) {
+ // return window.webkitCancelAnimationFrame(handle.value);
+ // };
+ // } else if (window.webkitCancelRequestAnimationFrame) {
+ // this.clear = function (handle) {
+ // return window.webkitCancelRequestAnimationFrame(handle.value);
+ // };
+ // }
+ // } else if (window.mozRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.mozRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.mozCancelRequestAnimationFrame(handle.value);
+ // };
+ // } else if (window.oRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.oRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.oCancelRequestAnimationFrame(handle.value);
+ // };
+ // } else if (window.msRequestAnimationFrame) {
+ // this.step = function (callback) {
+ // return window.msRequestAnimationFrame(callback);
+ // };
+
+ // this.clear = function (handle) {
+ // return window.msCancelRequestAnimationFrame(handle.value);
+ // };
+ } else {
+ custom = false;
+ }
+
+ if (custom) {
+ // we have installed custom .step() / .clear() functions
+ // overwrite the .set() too
+
+ this.set = function (callback, delay) {
+ let start = Date.now(),
+ handle = new Object();
+
+ const loop = () => {
+ let current = Date.now(),
+ delta = current - start;
+
+ if (delta >= delay) {
+ callback.call();
+ } else {
+ handle.value = this.step(loop);
+ }
+ }
+
+ handle.value = this.step(loop);
+ return handle;
+ };
+ }
+ }
+};
+
+NETDATA.timeout.init();
diff --git a/web/gui/src/dashboard.js/units-conversion.js b/web/gui/src/dashboard.js/units-conversion.js
new file mode 100644
index 0000000..b0dbd29
--- /dev/null
+++ b/web/gui/src/dashboard.js/units-conversion.js
@@ -0,0 +1,528 @@
+NETDATA.unitsConversion = {
+ keys: {}, // keys for data-common-units
+ latest: {}, // latest selected units for data-common-units
+
+ globalReset: function () {
+ this.keys = {};
+ this.latest = {};
+ },
+
+ scalableUnits: {
+ 'packets/s': {
+ 'pps': 1,
+ 'Kpps': 1000,
+ 'Mpps': 1000000
+ },
+ 'pps': {
+ 'pps': 1,
+ 'Kpps': 1000,
+ 'Mpps': 1000000
+ },
+ 'kilobits/s': {
+ 'bits/s': 1 / 1000,
+ 'kilobits/s': 1,
+ 'megabits/s': 1000,
+ 'gigabits/s': 1000000,
+ 'terabits/s': 1000000000
+ },
+ 'bytes/s': {
+ 'bytes/s': 1,
+ 'kilobytes/s': 1024,
+ 'megabytes/s': 1024 * 1024,
+ 'gigabytes/s': 1024 * 1024 * 1024,
+ 'terabytes/s': 1024 * 1024 * 1024 * 1024
+ },
+ 'kilobytes/s': {
+ 'bytes/s': 1 / 1024,
+ 'kilobytes/s': 1,
+ 'megabytes/s': 1024,
+ 'gigabytes/s': 1024 * 1024,
+ 'terabytes/s': 1024 * 1024 * 1024
+ },
+ 'B/s': {
+ 'B/s': 1,
+ 'KiB/s': 1024,
+ 'MiB/s': 1024 * 1024,
+ 'GiB/s': 1024 * 1024 * 1024,
+ 'TiB/s': 1024 * 1024 * 1024 * 1024
+ },
+ 'KB/s': {
+ 'B/s': 1 / 1024,
+ 'KB/s': 1,
+ 'MB/s': 1024,
+ 'GB/s': 1024 * 1024,
+ 'TB/s': 1024 * 1024 * 1024
+ },
+ 'KiB/s': {
+ 'B/s': 1 / 1024,
+ 'KiB/s': 1,
+ 'MiB/s': 1024,
+ 'GiB/s': 1024 * 1024,
+ 'TiB/s': 1024 * 1024 * 1024
+ },
+ 'B': {
+ 'B': 1,
+ 'KiB': 1024,
+ 'MiB': 1024 * 1024,
+ 'GiB': 1024 * 1024 * 1024,
+ 'TiB': 1024 * 1024 * 1024 * 1024,
+ 'PiB': 1024 * 1024 * 1024 * 1024 * 1024
+ },
+ 'KB': {
+ 'B': 1 / 1024,
+ 'KB': 1,
+ 'MB': 1024,
+ 'GB': 1024 * 1024,
+ 'TB': 1024 * 1024 * 1024
+ },
+ 'KiB': {
+ 'B': 1 / 1024,
+ 'KiB': 1,
+ 'MiB': 1024,
+ 'GiB': 1024 * 1024,
+ 'TiB': 1024 * 1024 * 1024
+ },
+ 'MB': {
+ 'B': 1 / (1024 * 1024),
+ 'KB': 1 / 1024,
+ 'MB': 1,
+ 'GB': 1024,
+ 'TB': 1024 * 1024,
+ 'PB': 1024 * 1024 * 1024
+ },
+ 'MiB': {
+ 'B': 1 / (1024 * 1024),
+ 'KiB': 1 / 1024,
+ 'MiB': 1,
+ 'GiB': 1024,
+ 'TiB': 1024 * 1024,
+ 'PiB': 1024 * 1024 * 1024
+ },
+ 'GB': {
+ 'B': 1 / (1024 * 1024 * 1024),
+ 'KB': 1 / (1024 * 1024),
+ 'MB': 1 / 1024,
+ 'GB': 1,
+ 'TB': 1024,
+ 'PB': 1024 * 1024,
+ 'EB': 1024 * 1024 * 1024
+ },
+ 'GiB': {
+ 'B': 1 / (1024 * 1024 * 1024),
+ 'KiB': 1 / (1024 * 1024),
+ 'MiB': 1 / 1024,
+ 'GiB': 1,
+ 'TiB': 1024,
+ 'PiB': 1024 * 1024,
+ 'EiB': 1024 * 1024 * 1024
+ },
+ 'num': {
+ 'num': 1,
+ 'num (K)': 1000,
+ 'num (M)': 1000000,
+ 'num (G)': 1000000000,
+ 'num (T)': 1000000000000
+ }
+ /*
+ 'milliseconds': {
+ 'seconds': 1000
+ },
+ 'seconds': {
+ 'milliseconds': 0.001,
+ 'seconds': 1,
+ 'minutes': 60,
+ 'hours': 3600,
+ 'days': 86400
+ }
+ */
+ },
+
+ convertibleUnits: {
+ 'Celsius': {
+ 'Fahrenheit': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.temperature === 'fahrenheit';
+ },
+ convert: function (value) {
+ return value * 9 / 5 + 32;
+ }
+ }
+ },
+ 'celsius': {
+ 'fahrenheit': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.temperature === 'fahrenheit';
+ },
+ convert: function (value) {
+ return value * 9 / 5 + 32;
+ }
+ }
+ },
+ 'seconds': {
+ 'time': {
+ check: function (max) {
+ void(max);
+ return NETDATA.options.current.seconds_as_time;
+ },
+ convert: function (seconds) {
+ return NETDATA.unitsConversion.seconds2time(seconds);
+ }
+ }
+ },
+ 'milliseconds': {
+ 'milliseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max < 1000;
+ },
+ convert: function (milliseconds) {
+ let tms = Math.round(milliseconds * 10);
+ milliseconds = Math.floor(tms / 10);
+
+ tms -= milliseconds * 10;
+
+ return (milliseconds).toString() + '.' + tms.toString();
+ }
+ },
+ 'seconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max >= 1000 && max < 60000;
+ },
+ convert: function (milliseconds) {
+ milliseconds = Math.round(milliseconds);
+
+ let seconds = Math.floor(milliseconds / 1000);
+ milliseconds -= seconds * 1000;
+
+ milliseconds = Math.round(milliseconds / 10);
+
+ return seconds.toString() + '.'
+ + NETDATA.zeropad(milliseconds);
+ }
+ },
+ 'M:SS.ms': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max >= 60000;
+ },
+ convert: function (milliseconds) {
+ milliseconds = Math.round(milliseconds);
+
+ let minutes = Math.floor(milliseconds / 60000);
+ milliseconds -= minutes * 60000;
+
+ let seconds = Math.floor(milliseconds / 1000);
+ milliseconds -= seconds * 1000;
+
+ milliseconds = Math.round(milliseconds / 10);
+
+ return minutes.toString() + ':'
+ + NETDATA.zeropad(seconds) + '.'
+ + NETDATA.zeropad(milliseconds);
+ }
+ }
+ },
+ 'nanoseconds': {
+ 'nanoseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time && max < 1000;
+ },
+ convert: function (nanoseconds) {
+ let tms = Math.round(nanoseconds * 10);
+ nanoseconds = Math.floor(tms / 10);
+
+ tms -= nanoseconds * 10;
+
+ return (nanoseconds).toString() + '.' + tms.toString();
+ }
+ },
+ 'microseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 && max < 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let microseconds = Math.floor(nanoseconds / 1000);
+ nanoseconds -= microseconds * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 10 );
+
+ return microseconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ 'milliseconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 * 1000 && max < 1000 * 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let milliseconds = Math.floor(nanoseconds / 1000 / 1000);
+ nanoseconds -= milliseconds * 1000 * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 1000 / 10);
+
+ return milliseconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ 'seconds': {
+ check: function (max) {
+ return NETDATA.options.current.seconds_as_time
+ && max >= 1000 * 1000 * 1000;
+ },
+ convert: function (nanoseconds) {
+ nanoseconds = Math.round(nanoseconds);
+
+ let seconds = Math.floor(nanoseconds / 1000 / 1000 / 1000);
+ nanoseconds -= seconds * 1000 * 1000 * 1000;
+
+ nanoseconds = Math.round(nanoseconds / 1000 / 1000 / 10);
+
+ return seconds.toString() + '.'
+ + NETDATA.zeropad(nanoseconds);
+ }
+ },
+ }
+ },
+
+ seconds2time: function (seconds) {
+ seconds = Math.abs(seconds);
+
+ let days = Math.floor(seconds / 86400);
+ seconds -= days * 86400;
+
+ let hours = Math.floor(seconds / 3600);
+ seconds -= hours * 3600;
+
+ let minutes = Math.floor(seconds / 60);
+ seconds -= minutes * 60;
+
+ seconds = Math.round(seconds);
+
+ let ms_txt = '';
+ /*
+ let ms = seconds - Math.floor(seconds);
+ seconds -= ms;
+ ms = Math.round(ms * 1000);
+
+ if (ms > 1) {
+ if (ms < 10)
+ ms_txt = '.00' + ms.toString();
+ else if (ms < 100)
+ ms_txt = '.0' + ms.toString();
+ else
+ ms_txt = '.' + ms.toString();
+ }
+ */
+
+ return ((days > 0) ? days.toString() + 'd:' : '').toString()
+ + NETDATA.zeropad(hours) + ':'
+ + NETDATA.zeropad(minutes) + ':'
+ + NETDATA.zeropad(seconds)
+ + ms_txt;
+ },
+
+ // get a function that converts the units
+ // + every time units are switched call the callback
+ get: function (uuid, min, max, units, desired_units, common_units_name, switch_units_callback) {
+ // validate the parameters
+ if (typeof units === 'undefined') {
+ units = 'undefined';
+ }
+
+ // check if we support units conversion
+ if (typeof this.scalableUnits[units] === 'undefined' && typeof this.convertibleUnits[units] === 'undefined') {
+ // we can't convert these units
+ //console.log('DEBUG: ' + uuid.toString() + ' can\'t convert units: ' + units.toString());
+ return function (value) {
+ return value;
+ };
+ }
+
+ // check if the caller wants the original units
+ if (typeof desired_units === 'undefined' || desired_units === null || desired_units === 'original' || desired_units === units) {
+ //console.log('DEBUG: ' + uuid.toString() + ' original units wanted');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+
+ // now we know we can convert the units
+ // and the caller wants some kind of conversion
+
+ let tunits = null;
+ let tdivider = 0;
+
+ if (typeof this.scalableUnits[units] !== 'undefined') {
+ // units that can be scaled
+ // we decide a divider
+
+ // console.log('NETDATA.unitsConversion.get(' + units.toString() + ', ' + desired_units.toString() + ', function()) decide divider with min = ' + min.toString() + ', max = ' + max.toString());
+
+ if (desired_units === 'auto') {
+ // the caller wants to auto-scale the units
+
+ // find the absolute maximum value that is rendered on the chart
+ // based on this we decide the scale
+ min = Math.abs(min);
+ max = Math.abs(max);
+ if (min > max) {
+ max = min;
+ }
+
+ // find the smallest scale that provides integers
+ // for (x in this.scalableUnits[units]) {
+ // if (this.scalableUnits[units].hasOwnProperty(x)) {
+ // let m = this.scalableUnits[units][x];
+ // if (m <= max && m > tdivider) {
+ // tunits = x;
+ // tdivider = m;
+ // }
+ // }
+ // }
+ const sunit = this.scalableUnits[units];
+ for (var x of Object.keys(sunit)) {
+ let m = sunit[x];
+ if (m <= max && m > tdivider) {
+ tunits = x;
+ tdivider = m;
+ }
+ }
+
+ if (tunits === null || tdivider <= 0) {
+ // we couldn't find one
+ //console.log('DEBUG: ' + uuid.toString() + ' cannot find an auto-scaling candidate for units: ' + units.toString() + ' (max: ' + max.toString() + ')');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+
+ if (typeof common_units_name === 'string' && typeof uuid === 'string') {
+ // the caller wants several charts to have the same units
+ // data-common-units
+
+ let common_units_key = common_units_name + '-' + units;
+
+ // add our divider into the list of keys
+ let t = this.keys[common_units_key];
+ if (typeof t === 'undefined') {
+ this.keys[common_units_key] = {};
+ t = this.keys[common_units_key];
+ }
+ t[uuid] = {
+ units: tunits,
+ divider: tdivider
+ };
+
+ // find the max divider of all charts
+ let common_units = t[uuid];
+ for (var x in t) {
+ if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) {
+ common_units = t[x];
+ }
+ }
+
+ // save our common_max to the latest keys
+ let latest = this.latest[common_units_key];
+ if (typeof latest === 'undefined') {
+ this.latest[common_units_key] = {};
+ latest = this.latest[common_units_key];
+ }
+ latest.units = common_units.units;
+ latest.divider = common_units.divider;
+
+ tunits = latest.units;
+ tdivider = latest.divider;
+
+ //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', common-units=' + common_units_name.toString() + ((t[uuid].divider !== tdivider)?' USED COMMON, mine was ' + t[uuid].units:' set common').toString());
+
+ // apply it to this chart
+ switch_units_callback(tunits);
+ return function (value) {
+ if (tdivider !== latest.divider) {
+ // another chart switched our common units
+ // we should switch them too
+ //console.log('DEBUG: ' + uuid + ' switching units due to a common-units change, from ' + tunits.toString() + ' to ' + latest.units.toString());
+ tunits = latest.units;
+ tdivider = latest.divider;
+ switch_units_callback(tunits);
+ }
+
+ return value / tdivider;
+ };
+ } else {
+ // the caller did not give data-common-units
+ // this chart auto-scales independently of all others
+ //console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + tunits.toString() + ' with divider ' + tdivider.toString() + ', autonomously');
+
+ switch_units_callback(tunits);
+ return function (value) {
+ return value / tdivider;
+ };
+ }
+ } else {
+ // the caller wants specific units
+
+ if (typeof this.scalableUnits[units][desired_units] !== 'undefined') {
+ // all good, set the new units
+ tdivider = this.scalableUnits[units][desired_units];
+ // console.log('DEBUG: ' + uuid.toString() + ' converted units: ' + units.toString() + ' to units: ' + desired_units.toString() + ' with divider ' + tdivider.toString() + ', by reference');
+ switch_units_callback(desired_units);
+ return function (value) {
+ return value / tdivider;
+ };
+ } else {
+ // oops! switch back to original units
+ console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ }
+ } else if (typeof this.convertibleUnits[units] !== 'undefined') {
+ // units that can be converted
+ if (desired_units === 'auto') {
+ for (var x in this.convertibleUnits[units]) {
+ if (this.convertibleUnits[units].hasOwnProperty(x)) {
+ if (this.convertibleUnits[units][x].check(max)) {
+ //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString());
+ switch_units_callback(x);
+ return this.convertibleUnits[units][x].convert;
+ }
+ }
+ }
+
+ // none checked ok
+ //console.log('DEBUG: ' + uuid.toString() + ' no conversion available for ' + units.toString() + ' to: ' + desired_units.toString());
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ } else if (typeof this.convertibleUnits[units][desired_units] !== 'undefined') {
+ switch_units_callback(desired_units);
+ return this.convertibleUnits[units][desired_units].convert;
+ } else {
+ console.log('Units conversion from ' + units.toString() + ' to ' + desired_units.toString() + ' is not supported.');
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ } else {
+ // hm... did we forget to implement the new type?
+ console.log(`Unmatched unit conversion method for units ${units.toString()}`);
+ switch_units_callback(units);
+ return function (value) {
+ return value;
+ };
+ }
+ }
+};
diff --git a/web/gui/src/dashboard.js/utils.js b/web/gui/src/dashboard.js/utils.js
new file mode 100644
index 0000000..8014aaf
--- /dev/null
+++ b/web/gui/src/dashboard.js/utils.js
@@ -0,0 +1,432 @@
+// *** src/dashboard.js/utils.js
+
+NETDATA.name2id = function (s) {
+ return s
+ .replace(/ /g, '_')
+ .replace(/:/g, '_')
+ .replace(/\(/g, '_')
+ .replace(/\)/g, '_')
+ .replace(/\./g, '_')
+ .replace(/\//g, '_');
+};
+
+NETDATA.encodeURIComponent = function (s) {
+ if (typeof(s) === 'string') {
+ return encodeURIComponent(s);
+ }
+
+ return s;
+};
+
+/// A heuristic for detecting slow devices.
+let isSlowDeviceResult = undefined;
+const isSlowDevice = function () {
+ if (!isSlowDeviceResult) {
+ return isSlowDeviceResult;
+ }
+
+ try {
+ let ua = navigator.userAgent.toLowerCase();
+
+ let iOS = /ipad|iphone|ipod/.test(ua) && !window.MSStream;
+ let android = /android/.test(ua) && !window.MSStream;
+ isSlowDeviceResult = (iOS || android);
+ } catch (e) {
+ isSlowDeviceResult = false;
+ }
+
+ return isSlowDeviceResult;
+};
+
+NETDATA.guid = function () {
+ function s4() {
+ return Math.floor((1 + Math.random()) * 0x10000)
+ .toString(16)
+ .substring(1);
+ }
+
+ return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();
+};
+
+NETDATA.zeropad = function (x) {
+ if (x > -10 && x < 10) {
+ return '0' + x.toString();
+ } else {
+ return x.toString();
+ }
+};
+
+NETDATA.seconds4human = function (seconds, options) {
+ let defaultOptions = {
+ now: 'now',
+ space: ' ',
+ negative_suffix: 'ago',
+ day: 'day',
+ days: 'days',
+ hour: 'hour',
+ hours: 'hours',
+ minute: 'min',
+ minutes: 'mins',
+ second: 'sec',
+ seconds: 'secs',
+ and: 'and'
+ };
+
+ if (typeof options !== 'object') {
+ options = defaultOptions;
+ } else {
+ for (var x in defaultOptions) {
+ if (typeof options[x] !== 'string') {
+ options[x] = defaultOptions[x];
+ }
+ }
+ }
+
+ if (typeof seconds === 'string') {
+ seconds = parseInt(seconds, 10);
+ }
+
+ if (seconds === 0) {
+ return options.now;
+ }
+
+ let suffix = '';
+ if (seconds < 0) {
+ seconds = -seconds;
+ if (options.negative_suffix !== '') {
+ suffix = options.space + options.negative_suffix;
+ }
+ }
+
+ let days = Math.floor(seconds / 86400);
+ seconds -= (days * 86400);
+
+ let hours = Math.floor(seconds / 3600);
+ seconds -= (hours * 3600);
+
+ let minutes = Math.floor(seconds / 60);
+ seconds -= (minutes * 60);
+
+ let strings = [];
+
+ if (days > 1) {
+ strings.push(days.toString() + options.space + options.days);
+ } else if (days === 1) {
+ strings.push(days.toString() + options.space + options.day);
+ }
+
+ if (hours > 1) {
+ strings.push(hours.toString() + options.space + options.hours);
+ } else if (hours === 1) {
+ strings.push(hours.toString() + options.space + options.hour);
+ }
+
+ if (minutes > 1) {
+ strings.push(minutes.toString() + options.space + options.minutes);
+ } else if (minutes === 1) {
+ strings.push(minutes.toString() + options.space + options.minute);
+ }
+
+ if (seconds > 1) {
+ strings.push(Math.floor(seconds).toString() + options.space + options.seconds);
+ } else if (seconds === 1) {
+ strings.push(Math.floor(seconds).toString() + options.space + options.second);
+ }
+
+ if (strings.length === 1) {
+ return strings.pop() + suffix;
+ }
+
+ let last = strings.pop();
+ return strings.join(", ") + " " + options.and + " " + last + suffix;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// element data attributes
+
+NETDATA.dataAttribute = function (element, attribute, def) {
+ let key = 'data-' + attribute.toString();
+ if (element.hasAttribute(key)) {
+ let data = element.getAttribute(key);
+
+ if (data === 'true') {
+ return true;
+ }
+ if (data === 'false') {
+ return false;
+ }
+ if (data === 'null') {
+ return null;
+ }
+
+ // Only convert to a number if it doesn't change the string
+ if (data === +data + '') {
+ return +data;
+ }
+
+ if (/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(data)) {
+ return JSON.parse(data);
+ }
+
+ return data;
+ } else {
+ return def;
+ }
+};
+
+NETDATA.dataAttributeBoolean = function (element, attribute, def) {
+ let value = NETDATA.dataAttribute(element, attribute, def);
+
+ if (value === true || value === false) // gmosx: Love this :)
+ {
+ return value;
+ }
+
+ if (typeof(value) === 'string') {
+ if (value === 'yes' || value === 'on') {
+ return true;
+ }
+
+ if (value === '' || value === 'no' || value === 'off' || value === 'null') {
+ return false;
+ }
+
+ return def;
+ }
+
+ if (typeof(value) === 'number') {
+ return value !== 0;
+ }
+
+ return def;
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// fast numbers formatting
+
+NETDATA.fastNumberFormat = {
+ formattersFixed: [],
+ formattersZeroBased: [],
+
+ // this is the fastest and the preferred
+ getIntlNumberFormat: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ // this is never used
+ // it is added just for completeness
+ return new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ },
+
+ // this respects locale
+ getLocaleString: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ return {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+ },
+
+ // the fallback
+ getFixed: function (min, max) {
+ let key = max;
+ if (min === max) {
+ if (typeof this.formattersFixed[key] === 'undefined') {
+ this.formattersFixed[key] = {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+
+ return this.formattersFixed[key];
+ } else if (min === 0) {
+ if (typeof this.formattersZeroBased[key] === 'undefined') {
+ this.formattersZeroBased[key] = {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+
+ return this.formattersZeroBased[key];
+ } else {
+ return {
+ format: function (value) {
+ if (value === 0) {
+ return "0";
+ }
+ return value.toFixed(max);
+ }
+ };
+ }
+ },
+
+ testIntlNumberFormat: function () {
+ let value = 1.12345;
+ let e1 = "1.12", e2 = "1,12";
+ let s = "";
+
+ try {
+ let x = new Intl.NumberFormat(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+
+ s = x.format(value);
+ } catch (e) {
+ s = "";
+ }
+
+ // console.log('NumberFormat: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ testLocaleString: function () {
+ let value = 1.12345;
+ let e1 = "1.12", e2 = "1,12";
+ let s = "";
+
+ try {
+ s = value.toLocaleString(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+ } catch (e) {
+ s = "";
+ }
+
+ // console.log('localeString: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ // on first run we decide which formatter to use
+ get: function (min, max) {
+ if (this.testIntlNumberFormat()) {
+ // console.log('numberformat');
+ this.get = this.getIntlNumberFormat;
+ } else if (this.testLocaleString()) {
+ // console.log('localestring');
+ this.get = this.getLocaleString;
+ } else {
+ // console.log('fixed');
+ this.get = this.getFixed;
+ }
+ return this.get(min, max);
+ }
+};
+
+// ----------------------------------------------------------------------------------------------------------------
+// Detect the netdata server
+
+// http://stackoverflow.com/questions/984510/what-is-my-script-src-url
+// http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url
+NETDATA._scriptSource = function () {
+ let script = null;
+
+ if (typeof document.currentScript !== 'undefined') {
+ script = document.currentScript;
+ } else {
+ const all_scripts = document.getElementsByTagName('script');
+ script = all_scripts[all_scripts.length - 1];
+ }
+
+ if (typeof script.getAttribute.length !== 'undefined') {
+ script = script.src;
+ } else {
+ script = script.getAttribute('src', -1);
+ }
+
+ return script;
+};
diff --git a/web/gui/src/dashboard.js/xss.js b/web/gui/src/dashboard.js/xss.js
new file mode 100644
index 0000000..fa66f34
--- /dev/null
+++ b/web/gui/src/dashboard.js/xss.js
@@ -0,0 +1,84 @@
+// ----------------------------------------------------------------------------------------------------------------
+// XSS checks
+
+NETDATA.xss = {
+ enabled: (typeof netdataCheckXSS === 'undefined') ? false : netdataCheckXSS,
+ enabled_for_data: (typeof netdataCheckXSS === 'undefined') ? false : netdataCheckXSS,
+
+ string: function (s) {
+ return s.toString()
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/"/g, '&quot;')
+ .replace(/'/g, '&#39;');
+ },
+
+ object: function (name, obj, ignore_regex) {
+ if (typeof ignore_regex !== 'undefined' && ignore_regex.test(name)) {
+ // console.log('XSS: ignoring "' + name + '"');
+ return obj;
+ }
+
+ switch (typeof(obj)) {
+ case 'string':
+ const ret = this.string(obj);
+ if (ret !== obj) {
+ console.log('XSS protection changed string ' + name + ' from "' + obj + '" to "' + ret + '"');
+ }
+ return ret;
+
+ case 'object':
+ if (obj === null) {
+ return obj;
+ }
+
+ if (Array.isArray(obj)) {
+ // console.log('checking array "' + name + '"');
+
+ let len = obj.length;
+ while (len--) {
+ obj[len] = this.object(name + '[' + len + ']', obj[len], ignore_regex);
+ }
+ } else {
+ // console.log('checking object "' + name + '"');
+
+ for (var i in obj) {
+ if (obj.hasOwnProperty(i) === false) {
+ continue;
+ }
+ if (this.string(i) !== i) {
+ console.log('XSS protection removed invalid object member "' + name + '.' + i + '"');
+ delete obj[i];
+ } else {
+ obj[i] = this.object(name + '.' + i, obj[i], ignore_regex);
+ }
+ }
+ }
+ return obj;
+
+ default:
+ return obj;
+ }
+ },
+
+ checkOptional: function (name, obj, ignore_regex) {
+ if (this.enabled) {
+ //console.log('XSS: checking optional "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ }
+ return obj;
+ },
+
+ checkAlways: function (name, obj, ignore_regex) {
+ //console.log('XSS: checking always "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ },
+
+ checkData: function (name, obj, ignore_regex) {
+ if (this.enabled_for_data) {
+ //console.log('XSS: checking data "' + name + '"...');
+ return this.object(name, obj, ignore_regex);
+ }
+ return obj;
+ }
+};
diff --git a/web/gui/static/img/netdata-logomark.svg b/web/gui/static/img/netdata-logomark.svg
new file mode 100644
index 0000000..87fb2bd
--- /dev/null
+++ b/web/gui/static/img/netdata-logomark.svg
@@ -0,0 +1,3 @@
+<svg width="1723" height="1723" viewBox="0 0 1723 1723" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path fill-rule="evenodd" clip-rule="evenodd" d="M0.628784 849.678C0.628784 473.909 235.042 153.621 563.766 30.7914C701.438 19.0613 843.892 50.2449 970.557 129.297C1052.47 180.42 1119.71 246.528 1170.96 321.982C1161.21 207.568 1122.97 96.4678 1058.94 0.187012C1220.56 38.587 1364.64 123.126 1476.91 239.343C1518.34 297.634 1548.55 365.545 1563.67 440.489C1578.54 514.244 1577.35 587.545 1562.5 656.661C1601.04 613.105 1632.22 563.24 1654.63 509.251C1698.41 613.852 1722.63 728.899 1722.63 849.678C1722.63 1331.55 1337.15 1722.19 861.629 1722.19C386.112 1722.19 0.628784 1331.55 0.628784 849.678ZM1178.87 1369.04C1286.71 1369.04 1374.13 1280.45 1374.13 1171.17C1374.13 1061.88 1286.71 973.293 1178.87 973.293C1071.03 973.293 983.603 1061.88 983.603 1171.17C983.603 1280.45 1071.03 1369.04 1178.87 1369.04Z" fill="#00C853"/>
+</svg>
diff --git a/web/gui/tv.html b/web/gui/tv.html
new file mode 100644
index 0000000..e15fb2e
--- /dev/null
+++ b/web/gui/tv.html
@@ -0,0 +1,279 @@
+<!DOCTYPE html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang="en">
+<head>
+ <title>NetData TV Dashboard</title>
+ <meta name="application-name" content="netdata">
+
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
+
+ <meta property="og:locale" content="en_US" />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png"/>
+ <meta property="og:url" content="http://my-netdata.io/"/>
+ <meta property="og:type" content="website"/>
+ <meta property="og:site_name" content="netdata"/>
+ <meta property="og:title" content="netdata - real-time performance monitoring, done right!"/>
+ <meta property="og:description" content="Stunning real-time dashboards, blazingly fast and extremely interactive. Zero configuration, zero dependencies, zero maintenance." />
+
+</head>
+<script>
+// this section has to appear before loading dashboard.js
+
+// Select a theme.
+// uncomment on of the two themes:
+
+// var netdataTheme = 'default'; // this is white
+var netdataTheme = 'slate'; // this is dark
+
+
+// Set the default netdata server.
+// on charts without a 'data-host', this one will be used.
+// the default is the server that dashboard.js is downloaded from.
+
+// var netdataServer = 'http://my.server:19999/';
+</script>
+
+<!--
+ Load dashboard.js
+
+ to host this HTML file on your web server,
+ you have to load dashboard.js from the netdata server.
+
+ So, pick one the two below
+ If you pick the first, set the server name/IP.
+
+ The second assumes you host this file on /usr/share/netdata/web
+ and that you have chown it to be owned by netdata:netdata
+-->
+<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
+<script type="text/javascript" src="dashboard.js?v20190902-0"></script>
+
+<script>
+// Set options for TV operation
+// This has to be done, after dashboard.js is loaded
+
+// destroy charts not shown (lowers memory on the browser)
+NETDATA.options.current.destroy_on_hide = true;
+
+// set this to false, to always show all dimensions
+NETDATA.options.current.eliminate_zero_dimensions = true;
+
+// lower the pressure on this browser
+NETDATA.options.current.concurrent_refreshes = false;
+
+// if the tv browser is too slow (a pi?)
+// set this to false
+NETDATA.options.current.parallel_refresher = true;
+
+// always update the charts, even if focus is lost
+// NETDATA.options.current.stop_updates_when_focus_is_lost = false;
+
+// Since you may render charts from many servers and any of them may
+// become offline for some time, the charts will break.
+// This will reload the page every RELOAD_EVERY minutes
+
+var RELOAD_EVERY = 5;
+setTimeout(function(){
+ location.reload();
+}, RELOAD_EVERY * 60 * 1000);
+
+</script>
+<body>
+
+<div style="width: 100%; text-align: center; display: inline-block;">
+
+ <div style="width: 100%; height: 24vh; text-align: center; display: inline-block;">
+ <div style="width: 100%; height: 15px; text-align: center; display: inline-block;">
+ <b>CPU On both servers</b>
+ </div>
+ <div style="width: 100%; height: calc(100% - 15px); text-align: center; display: inline-block;">
+ <br/>
+ <div data-netdata="system.cpu"
+ data-host="https://registry.my-netdata.io"
+ data-title="CPU usage of registry.my-netdata.io"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ <div data-netdata="system.cpu"
+ data-title="CPU usage of your netdata server"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ data-dygraph-valuerange="[0, 100]"
+ ></div>
+ </div>
+ </div>
+
+
+ <div style="width: 100%; height: 24vh; text-align: center; display: inline-block;">
+ <div style="width: 100%; height: 15px; text-align: center; display: inline-block;">
+ <b>Disk I/O on both servers</b>
+ </div>
+ <div style="width: 100%; height: calc(100% - 15px); text-align: center; display: inline-block;">
+ <div data-netdata="system.io"
+ data-host="https://registry.my-netdata.io"
+ data-common-max="io"
+ data-common-min="io"
+ data-title="I/O on registry.my-netdata.io"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ <div data-netdata="system.io"
+ data-title="I/O on your netdata server"
+ data-common-max="io"
+ data-common-min="io"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+ </div>
+
+
+ <div style="width: 100%; height: 24vh; text-align: center; display: inline-block;">
+ <div style="width: 100%; height: 15px; text-align: center; display: inline-block;">
+ <b>IPv4 traffic on both servers</b>
+ </div>
+ <div style="width: 100%; height: calc(100% - 15px); text-align: center; display: inline-block;">
+ <div data-netdata="system.net"
+ data-host="https://registry.my-netdata.io"
+ data-common-max="traffic"
+ data-common-min="traffic"
+ data-title="Network traffic on registry.my-netdata.io"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ <div data-netdata="system.net"
+ data-title="Network traffic on your netdata server"
+ data-common-max="traffic"
+ data-common-min="traffic"
+ data-chart-library="dygraph"
+ data-width="49%"
+ data-height="100%"
+ data-after="-300"
+ ></div>
+ </div>
+ </div>
+
+ <div style="width: 100%; height: 23vh; text-align: center; display: inline-block;">
+ <div style="width: 100%; height: 15px; text-align: center; display: inline-block;">
+ <b>Netdata statistics on both servers</b>
+ </div>
+ <div style="width: 100%; max-height: calc(100% - 15px); text-align: center; display: inline-block;">
+ <div style="width: 49%; height:100%; align: center; display: inline-block;">
+ registry.my-netdata.io
+ <br/>
+ <div data-netdata="netdata.requests"
+ data-host="https://registry.my-netdata.io"
+ data-common-max="netdata-requests"
+ data-decimal-digits="0"
+ data-title="Chart Refreshes/s"
+ data-chart-library="gauge"
+ data-width="20%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ <div data-netdata="netdata.clients"
+ data-host="https://registry.my-netdata.io"
+ data-common-max="netdata-clients"
+ data-decimal-digits="0"
+ data-title="Sockets"
+ data-chart-library="gauge"
+ data-width="20%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AA5500"
+ ></div>
+ <div data-netdata="netdata.net"
+ data-dimensions="in"
+ data-common-max="netdata-net-in"
+ data-decimal-digits="0"
+ data-host="https://registry.my-netdata.io"
+ data-title="Requests Traffic"
+ data-chart-library="easypiechart"
+ data-width="15%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ <div data-netdata="netdata.net"
+ data-dimensions="out"
+ data-common-max="netdata-net-out"
+ data-decimal-digits="0"
+ data-host="https://registry.my-netdata.io"
+ data-title="Chart Data Traffic"
+ data-chart-library="easypiechart"
+ data-width="15%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ </div>
+ <div style="width: 49%; height:100%; align: center; display: inline-block;">
+ your netdata server
+ <br/>
+ <div data-netdata="netdata.requests"
+ data-title="Chart Refreshes/s"
+ data-common-max="netdata-requests"
+ data-decimal-digits="0"
+ data-chart-library="gauge"
+ data-width="20%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ <div data-netdata="netdata.clients"
+ data-common-max="netdata-clients"
+ data-decimal-digits="0"
+ data-title="Sockets"
+ data-chart-library="gauge"
+ data-width="20%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AA5500"
+ ></div>
+ <div data-netdata="netdata.net"
+ data-dimensions="in"
+ data-common-max="netdata-net-in"
+ data-decimal-digits="0"
+ data-title="Requests Traffic"
+ data-chart-library="easypiechart"
+ data-width="15%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ <div data-netdata="netdata.net"
+ data-dimensions="out"
+ data-common-max="netdata-net-out"
+ data-decimal-digits="0"
+ data-title="Chart Data Traffic"
+ data-chart-library="easypiechart"
+ data-width="15%"
+ data-height="100%"
+ data-after="-300"
+ data-points="300"
+ ></div>
+ </div>
+ </div>
+ </div>
+</div>
+</body>
+</html>
diff --git a/web/server/Makefile.am b/web/server/Makefile.am
new file mode 100644
index 0000000..1ee01bb
--- /dev/null
+++ b/web/server/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/README.md b/web/server/README.md
new file mode 100644
index 0000000..fbf3151
--- /dev/null
+++ b/web/server/README.md
@@ -0,0 +1,245 @@
+<!--
+title: "Web server"
+description: "The Netdata Agent's local static-threaded web server serves dashboards and real-time visualizations with security and DDoS protection."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/server/README.md
+-->
+
+# Web server
+
+The Netdata web server runs as `static-threaded`, i.e. with a fixed, configurable number of threads.
+It uses non-blocking I/O and respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+## Configuration
+
+Disable the web server by editing `netdata.conf` and setting:
+
+```
+[web]
+ mode = none
+```
+
+With the web server enabled, control the number of threads and sockets with the following settings:
+
+```
+[web]
+ web server threads = 4
+ web server max sockets = 512
+```
+
+The default number of processor threads is `min(cpu cores, 6)`.
+
+The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection.
+
+### Binding Netdata to multiple ports
+
+Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`).
+
+The ports to bind are controlled via `[web].bind to`, like this:
+
+```
+[web]
+ default port = 19999
+ bind to = 127.0.0.1=dashboard^SSL=optional 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming^SSL=force localhost:19995=registry *:http=dashboard unix:/run/netdata/netdata.sock
+```
+
+Using the above, Netdata will bind to:
+
+- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted.
+- IPv4 10.1.1.1 at port 19998. The management API and `netdata.conf` will be accessible on this port.
+- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port.
+- All IPv6 IPs at port 19996. Only metric streaming requests from other Netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. child nodes also need to be [configured for TLS](/streaming/README.md).
+- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests.
+- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port.
+- Unix domain socket `/run/netdata/netdata.sock`. All requests are serviceable on this socket. Note that in some OSs like Fedora, every service sees a different `/tmp`, so don't create a Unix socket under `/tmp`. `/run` or `/var/run` is suggested.
+
+The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port.
+
+Note that the access permissions specified with the `=request type|request type|...` format are available from version 1.12 onwards.
+As shown in the example above, these permissions are optional, with the default being to permit all request types on the specified port.
+The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`.
+The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services.
+The API requests are serviced as follows:
+
+- `dashboard` gives access to the UI, the read API and badges API calls.
+- `badges` gives access only to the badges API calls.
+- `management` gives access only to the management API calls.
+
+### Enabling TLS support
+
+Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data to a
+parent from its child nodes, via the TLS protocol.
+
+Inbound unix socket connections are unaffected, regardless of the TLS settings.
+
+> While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol,
+> it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata
+> itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS`
+> or `TLS/SSL`.
+
+To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`:
+
+```conf
+[web]
+ ssl key = /etc/netdata/ssl/key.pem
+ ssl certificate = /etc/netdata/ssl/cert.pem
+```
+
+Both files must be readable by the `netdata` user. If either of these files do not exist or are unreadable, Netdata will fall back to HTTP. For a parent-child connection, only the parent needs these settings.
+
+For test purposes, generate self-signed certificates with the following command:
+
+```bash
+openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem
+```
+
+> If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication.
+> `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. Verify the difference
+> by running:
+>
+> ```sh
+> openssl speed rsa2048 rsa4096
+> ```
+
+### Select TLS version
+
+Beginning with version 1.21, specify the TLS version and the ciphers that you want to use:
+
+```conf
+[web]
+ tls version = 1.3
+ tls ciphers = TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256
+```
+
+If you do not specify these options, Netdata will use the highest available protocol version on your system and the default cipher list for that protocol provided by your TLS implementation.
+
+While Netdata accepts all the TLS version as arguments (`1` or `1.0`, `1.1`, `1.2` and `1.3`), we recommend you use `1.3` for the most secure encryption.
+
+#### TLS/SSL enforcement
+
+When the certificates are defined and unless any other options are provided, a Netdata server will:
+
+- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, `netdata.conf` and badges.
+- Allow incoming child connections to use both unencrypted and encrypted communications for streaming.
+
+To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming.
+
+| SSL setting | HTTP requests|HTTPS requests|Unencrypted Streams|Encrypted Streams|
+|:---------:|:-----------:|:------------:|:-----------------:|:----------------|
+| none | Redirected to HTTPS|Accepted|Accepted|Accepted|
+| `force`| Redirected to HTTPS|Accepted|Denied|Accepted|
+| `optional`| Accepted|Accepted|Accepted|Accepted|
+
+Example:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force
+```
+
+For information how to configure the child to use TLS, check [securing the communication](/streaming/README.md#securing-streaming-communications) in the streaming documentation. There you will find additional details on the expected behavior for client and server nodes, when their respective TLS options are enabled.
+
+When we define the use of SSL in a Netdata agent for different ports, Netdata will apply the behavior specified on each port. For example, using the configuration line below:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force *:20000=netdata.conf^SSL=optional *:20001=dashboard|registry
+```
+
+Netdata will:
+
+- Force all HTTP requests to the default port to be redirected to HTTPS (same port).
+- Refuse unencrypted streaming connections from child nodes on the default port.
+- Allow both HTTP and HTTPS requests to port 20000 for `netdata.conf`
+- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001.
+
+#### TLS/SSL errors
+
+When you start using Netdata with TLS, you may find errors in the Netdata log, which is stored at `/var/log/netdata/error.log` by default.
+
+Most of the time, these errors are due to incompatibilities between your browser's options related to TLS/SSL protocols and Netdata's internal configuration. The most common error is `error:00000006:lib(0):func(0):EVP lib`.
+
+In the near future, Netdata will allow our users to change the internal configuration to avoid similar errors. Until then, we're recommending only the most common and safe encryption protocols listed above.
+
+### Access lists
+
+Netdata supports access lists in `netdata.conf`:
+
+```
+[web]
+ allow connections from = localhost *
+ allow dashboard from = localhost *
+ allow badges from = *
+ allow streaming from = *
+ allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+ allow management from = localhost
+```
+
+`*` does string matches on the IPs or FQDNs of the clients.
+
+- `allow connections from` matches anyone that connects on the Netdata port(s).
+ So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
+ a single byte from its connection. This is a global settings with higher priority to any of the ones below.
+
+- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
+ dashboards do.
+
+- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
+
+- `allow streaming from` checks if the child willing to stream metrics to this Netdata is allowed.
+ This can be controlled per API KEY and MACHINE GUID in `stream.conf`.
+ The setting in `netdata.conf` is checked before the ones in `stream.conf`.
+
+- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
+ The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to Netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`.
+
+- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](/web/api/health/README.md#health-management-api)
+
+In order to check the FQDN of the connection without opening the Netdata agent to DNS-spoofing, a reverse-dns record
+must be setup for the connecting host. At connection time the reverse-dns of the peer IP address is resolved, and
+a forward DNS resolution is made to validate the IP address against the name-pattern.
+
+Please note that this process can be expensive on a machine that is serving many connections. Each access list has an
+associated configuration option to turn off DNS-based patterns completely to avoid incurring this cost at run-time:
+
+```
+ allow connections by dns = heuristic
+ allow dashboard by dns = heuristic
+ allow badges by dns = heuristic
+ allow streaming by dns = heuristic
+ allow netdata.conf by dns = no
+ allow management by dns = heuristic
+```
+
+The three possible values for each of these options are `yes`, `no` and `heuristic`. The `heuristic` option disables
+the check when the pattern only contains IPv4/IPv6 addresses or `localhost`, and enables it when wildcards are
+present that may match DNS FQDNs.
+
+### Other netdata.conf [web] section options
+
+|setting|default|info|
+|:-----:|:-----:|:---|
+|ses max window|`15`|See [single exponential smoothing](/web/api/queries/des/README.md)|
+|des max window|`15`|See [double exponential smoothing](/web/api/queries/des/README.md)|
+|listen backlog|`4096`|The port backlog. Check `man 2 listen`.|
+|web files owner|`netdata`|The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`.|
+|web files group|`netdata`|If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not.|
+|disconnect idle clients after seconds|`60`|The time in seconds to disconnect web clients after being totally idle.|
+|timeout for first request|`60`|How long to wait for a client to send a request before closing the socket. Prevents slow request attacks.|
+|accept a streaming request every seconds|`0`|Can be used to set a limit on how often a parent node will accept streaming requests from child nodes in a [streaming and replication setup](/streaming/README.md)|
+|respect do not track policy|`no`|If set to `yes`, will respect the client's browser preferences on storing cookies.|
+|x-frame-options response header||[Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).|
+|enable gzip compression|`yes`|When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses.|
+|gzip compression strategy|`default`|Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed`|
+|gzip compression level|`3`|Valid levels are 1 (fastest) to 9 (best ratio)|
+
+## DDoS protection
+
+If you publish your Netdata to the internet, you may want to apply some protection against DDoS:
+
+1. Use the `static-threaded` web server (it is the default)
+2. Use reasonable `[web].web server max sockets` (the default is)
+3. Don't use all your CPU cores for Netdata (lower `[web].web server threads`)
+4. Run the `netdata` process with a low process scheduling priority (the default is the lowest)
+5. If possible, proxy Netdata via a full featured web server (nginx, apache, etc)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/static/Makefile.am b/web/server/static/Makefile.am
new file mode 100644
index 0000000..59250a9
--- /dev/null
+++ b/web/server/static/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/static/README.md b/web/server/static/README.md
new file mode 100644
index 0000000..e095f2e
--- /dev/null
+++ b/web/server/static/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "`static-threaded` web server"
+description: "The Netdata Agent's static-threaded web server spawns a fixed number of threads that listen to web requests and uses non-blocking I/O."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/server/static/README.md
+-->
+
+# `static-threaded` web server
+
+The `static-threaded` web server spawns a fixed number of threads.
+All the threads are concurrently listening for web requests on the same sockets.
+The kernel distributes the incoming requests to them.
+
+Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c
new file mode 100644
index 0000000..93e36de
--- /dev/null
+++ b/web/server/static/static-threaded.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "static-threaded.h"
+
+int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
+int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
+long web_client_streaming_rate_t = 0L;
+
+/*
+ * --------------------------------------------------------------------------------------------------------------------
+ * Build web_client state from the pollinfo that describes an accepted connection.
+ */
+static struct web_client *web_client_create_on_fd(POLLINFO *pi) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = pi->fd;
+
+ strncpyz(w->client_ip, pi->client_ip, sizeof(w->client_ip) - 1);
+ strncpyz(w->client_port, pi->client_port, sizeof(w->client_port) - 1);
+ strncpyz(w->client_host, pi->client_host, sizeof(w->client_host) - 1);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+ w->port_acl = pi->port_acl;
+
+ web_client_initialize_connection(w);
+ w->pollinfo_slot = pi->slot;
+ return(w);
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - STATIC-THREADED
+
+struct web_server_static_threaded_worker {
+ netdata_thread_t thread;
+
+ int id;
+ int running;
+
+ size_t max_sockets;
+
+ volatile size_t connected;
+ volatile size_t disconnected;
+ volatile size_t receptions;
+ volatile size_t sends;
+ volatile size_t max_concurrent;
+
+ volatile size_t files_read;
+ volatile size_t file_reads;
+};
+
+static long long static_threaded_workers_count = 1;
+
+static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
+static __thread struct web_server_static_threaded_worker *worker_private = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline int web_server_check_client_status(struct web_client *w) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
+ return -1;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// web server files
+
+static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
+ struct web_client *w = (struct web_client *)data;
+
+ worker_private->files_read++;
+
+ debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
+ *events = POLLIN;
+ pi->data = w;
+ return w;
+}
+
+static void web_server_file_del_callback(POLLINFO *pi) {
+ struct web_client *w = (struct web_client *)pi->data;
+ debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
+
+ w->pollinfo_filecopy_slot = 0;
+
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
+ struct web_client *w = (struct web_client *)pi->data;
+
+ // if there is no POLLINFO linked to this, it means the client disconnected
+ // stop the file reading too
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
+
+ worker_private->file_reads++;
+ ssize_t ret = unlikely(web_client_read_file(w));
+
+ if(likely(web_client_has_wait_send(w))) {
+ POLLJOB *p = pi->p; // our POLLJOB
+ POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
+
+ debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
+ p->fds[wpi->slot].events |= POLLOUT;
+ }
+
+ if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
+ return -1;
+ }
+
+ *events = POLLIN;
+ return 0;
+}
+
+static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("Writing to web files is not supported!");
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// web server clients
+
+static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)data; // Supress warning on unused argument
+
+ worker_private->connected++;
+
+ size_t concurrent = worker_private->connected - worker_private->disconnected;
+ if(unlikely(concurrent > worker_private->max_concurrent))
+ worker_private->max_concurrent = concurrent;
+
+ *events = POLLIN;
+
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
+ struct web_client *w = web_client_create_on_fd(pi);
+
+ if (!strncmp(pi->client_port, "UNIX", 4)) {
+ web_client_set_unix(w);
+ } else {
+ web_client_set_tcp(w);
+ }
+
+#ifdef ENABLE_HTTPS
+ if ((!web_client_check_unix(w)) && ( netdata_srv_ctx )) {
+ if( sock_delnonblock(w->ifd) < 0 ){
+ error("Web server cannot remove the non-blocking flag from socket %d",w->ifd);
+ }
+
+ //Read the first 7 bytes from the message, but the message
+ //is not removed from the queue, because we are using MSG_PEEK
+ char test[8];
+ if ( recv(w->ifd,test, 7,MSG_PEEK) == 7 ) {
+ test[7] = 0x00;
+ }
+ else {
+ //Case I do not have success to read 7 bytes,
+ //this means that the mensage was not completely read, so
+ //I cannot identify it yet.
+ sock_setnonblock(w->ifd);
+ return w;
+ }
+
+ //The next two ifs are not together because I am reusing SSL structure
+ if (!w->ssl.conn)
+ {
+ w->ssl.conn = SSL_new(netdata_srv_ctx);
+ if ( w->ssl.conn ) {
+ SSL_set_accept_state(w->ssl.conn);
+ } else {
+ error("Failed to create SSL context on socket fd %d.", w->ifd);
+ if (test[0] < 0x18){
+ WEB_CLIENT_IS_DEAD(w);
+ sock_setnonblock(w->ifd);
+ return w;
+ }
+ }
+ }
+
+ if (w->ssl.conn) {
+ if (SSL_set_fd(w->ssl.conn, w->ifd) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", w->ifd);
+ //The client is not set dead, because I received a normal HTTP request
+ //instead a Client Hello(HTTPS).
+ if ( test[0] < 0x18 ){
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ }
+ else{
+ w->ssl.flags = security_process_accept(w->ssl.conn, (int)test[0]);
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+ } else{
+ w->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+#endif
+
+ debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
+ return w;
+}
+
+// TCP client disconnected
+static void web_server_del_callback(POLLINFO *pi) {
+ worker_private->disconnected++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+
+ w->pollinfo_slot = 0;
+ if(unlikely(w->pollinfo_filecopy_slot)) {
+ POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
+ (void)fpi;
+
+ debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
+ }
+ else {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
+ pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
+
+ debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
+ worker_private->receptions++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ if(unlikely(web_client_receive(w) < 0))
+ return -1;
+
+ debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
+ web_client_process_request(w);
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->pollinfo_filecopy_slot == 0) {
+ debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
+
+ if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
+ // add a new socket to poll_events, with the same
+ debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
+
+ POLLINFO *fpi = poll_add_fd(
+ pi->p
+ , w->ifd
+ , pi->port_acl
+ , 0
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , "FILENAME"
+ , ""
+ , ""
+ , web_server_file_add_callback
+ , web_server_file_del_callback
+ , web_server_file_read_callback
+ , web_server_file_write_callback
+ , (void *) w
+ );
+
+ if(fpi)
+ w->pollinfo_filecopy_slot = fpi->slot;
+ else {
+ error("Failed to add filecopy fd. Closing client.");
+ return -1;
+ }
+ }
+ }
+ }
+ else {
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+ }
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static int web_server_snd_callback(POLLINFO *pi, short int *events) {
+ worker_private->sends++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
+
+ if(unlikely(web_client_send(w) < 0))
+ return -1;
+
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static void web_server_tmr_callback(void *timer_data) {
+ worker_private = (struct web_server_static_threaded_worker *)timer_data;
+
+ static __thread RRDSET *st = NULL;
+ static __thread RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(netdata_exit)) return;
+
+ if(unlikely(!st)) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1);
+ snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1);
+
+ st = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "web"
+ , "netdata.web_cpu"
+ , title
+ , "milliseconds/s"
+ , "web"
+ , "stats"
+ , 132000 + worker_private->id
+ , default_rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st);
+
+ struct rusage rusage;
+ getrusage(RUSAGE_THREAD, &rusage);
+ rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec);
+ rrdset_done(st);
+}
+
+// ----------------------------------------------------------------------------
+// web server worker thread
+
+static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+
+ info("freeing local web clients cache...");
+ web_client_cache_destroy();
+
+ info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
+ worker_private->connected,
+ worker_private->disconnected,
+ worker_private->max_concurrent,
+ worker_private->receptions,
+ worker_private->sends
+ );
+
+ worker_private->running = 0;
+}
+
+void *socket_listen_main_static_threaded_worker(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+ worker_private->running = 1;
+
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr);
+
+ poll_events(&api_sockets
+ , web_server_add_callback
+ , web_server_del_callback
+ , web_server_rcv_callback
+ , web_server_snd_callback
+ , web_server_tmr_callback
+ , web_allow_connections_from
+ , web_allow_connections_dns
+ , NULL
+ , web_client_first_request_timeout
+ , web_client_timeout
+ , default_rrd_update_every * 1000 // timer_milliseconds
+ , ptr // timer_data
+ , worker_private->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// web server main thread - also becomes a worker
+
+static void socket_listen_main_static_threaded_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ int i, found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if(static_workers_private_data[i].running) {
+ found++;
+ info("stopping worker %d", i + 1);
+ netdata_thread_cancel(static_workers_private_data[i].thread);
+ }
+ else
+ info("found stopped worker %d", i + 1);
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d static web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if (static_workers_private_data[i].running)
+ found++;
+ }
+ }
+
+ if(found)
+ error("%d static web threads are taking too long to finish. Giving up.", found);
+
+ info("closing all web server sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("all static web threads stopped.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_static_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+#ifdef ENABLE_HTTPS
+ security_start_ssl(NETDATA_SSL_CONTEXT_SERVER);
+#endif
+ // 6 threads is the optimal value
+ // since 6 are the parallel connections browsers will do
+ // so, if the machine has more CPUs, avoid using resources unnecessarily
+ int def_thread_count = (processors > 6)?6:processors;
+
+ if (!strcmp(config_get(CONFIG_SECTION_WEB, "mode", ""),"single-threaded")) {
+ info("Running web server with one thread, because mode is single-threaded");
+ config_set(CONFIG_SECTION_WEB, "mode", "static-threaded");
+ def_thread_count = 1;
+ }
+ static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
+
+ if(static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 4));
+
+ static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker));
+
+ web_server_is_multithreaded = (static_threaded_workers_count > 1);
+
+ int i;
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ static_workers_private_data[i].id = i;
+ static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
+
+ char tag[50 + 1];
+ snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1);
+
+ info("starting worker %d", i+1);
+ netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]);
+ }
+
+ // and the main one
+ static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
+ socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/web/server/static/static-threaded.h b/web/server/static/static-threaded.h
new file mode 100644
index 0000000..5f4862e
--- /dev/null
+++ b/web/server/static/static-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H
+#define NETDATA_WEB_SERVER_STATIC_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_static_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H
diff --git a/web/server/web_client.c b/web/server/web_client.c
new file mode 100644
index 0000000..f0856fb
--- /dev/null
+++ b/web/server/web_client.c
@@ -0,0 +1,2035 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_client.h"
+
+// this is an async I/O implementation of the web server request parser
+// it is used by all netdata web servers
+
+int respect_web_browser_do_not_track_policy = 0;
+char *web_x_frame_options = NULL;
+
+#ifdef NETDATA_WITH_ZLIB
+int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY;
+#endif /* NETDATA_WITH_ZLIB */
+
+inline int web_client_permission_denied(struct web_client *w) {
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "You are not allowed to access this resource.");
+ w->response.code = HTTP_RESP_FORBIDDEN;
+ return HTTP_RESP_FORBIDDEN;
+}
+
+static inline int web_client_crock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 1;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to enable TCP_CORK on socket.", w->id);
+
+ w->tcp_cork = 0;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline int web_client_uncrock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 0;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to disable TCP_CORK on socket.", w->id);
+ w->tcp_cork = 1;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline char *strip_control_characters(char *url) {
+ char *s = url;
+ if(!s) return "";
+
+ if(iscntrl(*s)) *s = ' ';
+ while(*++s) {
+ if(iscntrl(*s)) *s = ' ';
+ }
+
+ return url;
+}
+
+void web_client_request_done(struct web_client *w) {
+ web_client_uncrock_socket(w);
+
+ debug(D_WEB_CLIENT, "%llu: Resetting client.", w->id);
+
+ if(likely(w->last_url[0])) {
+ struct timeval tv;
+ now_realtime_timeval(&tv);
+
+ size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY)?w->response.rlen:w->response.data->len;
+ size_t sent = size;
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) sent = (size_t)w->response.zstream.total_out;
+#endif
+
+ // --------------------------------------------------------------------
+ // global statistics
+
+ finished_web_request_statistics(dt_usec(&tv, &w->tv_in),
+ w->stats_received_bytes,
+ w->stats_sent_bytes,
+ size,
+ sent);
+
+ w->stats_received_bytes = 0;
+ w->stats_sent_bytes = 0;
+
+
+ // --------------------------------------------------------------------
+
+ const char *mode;
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_FILECOPY:
+ mode = "FILECOPY";
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ mode = "OPTIONS";
+ break;
+
+ case WEB_CLIENT_MODE_STREAM:
+ mode = "STREAM";
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ mode = "DATA";
+ break;
+
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+
+ // access log
+ log_access("%llu: %d '[%s]:%s' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'",
+ w->id
+ , gettid()
+ , w->client_ip
+ , w->client_port
+ , mode
+ , sent
+ , size
+ , -((size > 0) ? ((size - sent) / (double) size * 100.0) : 0.0)
+ , dt_usec(&w->tv_ready, &w->tv_in) / 1000.0
+ , dt_usec(&tv, &w->tv_ready) / 1000.0
+ , dt_usec(&tv, &w->tv_in) / 1000.0
+ , w->response.code
+ , strip_control_characters(w->last_url)
+ );
+ }
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->ifd != w->ofd) {
+ debug(D_WEB_CLIENT, "%llu: Closing filecopy input file descriptor %d.", w->id, w->ifd);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1){
+ close(w->ifd);
+ }
+ }
+
+ w->ifd = w->ofd;
+ }
+ }
+
+ w->last_url[0] = '\0';
+ w->cookie1[0] = '\0';
+ w->cookie2[0] = '\0';
+ w->origin[0] = '*';
+ w->origin[1] = '\0';
+
+ freez(w->user_agent); w->user_agent = NULL;
+ if (w->auth_bearer_token) {
+ freez(w->auth_bearer_token);
+ w->auth_bearer_token = NULL;
+ }
+
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ w->tcp_cork = 0;
+ web_client_disable_donottrack(w);
+ web_client_disable_tracking_required(w);
+ web_client_disable_keepalive(w);
+ w->decoded_url[0] = '\0';
+
+ buffer_reset(w->response.header_output);
+ buffer_reset(w->response.header);
+ buffer_reset(w->response.data);
+ w->response.rlen = 0;
+ w->response.sent = 0;
+ w->response.code = 0;
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+
+ w->response.zoutput = 0;
+
+ // if we had enabled compression, release it
+#ifdef NETDATA_WITH_ZLIB
+ if(w->response.zinitialized) {
+ debug(D_DEFLATE, "%llu: Freeing compression resources.", w->id);
+ deflateEnd(&w->response.zstream);
+ w->response.zsent = 0;
+ w->response.zhave = 0;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_in = 0;
+ w->response.zstream.total_out = 0;
+ w->response.zinitialized = 0;
+ w->flags &= ~WEB_CLIENT_CHUNKED_TRANSFER;
+ }
+#endif // NETDATA_WITH_ZLIB
+}
+
+uid_t web_files_uid(void) {
+ static char *web_owner = NULL;
+ static uid_t owner_uid = 0;
+
+ if(unlikely(!web_owner)) {
+ // getpwuid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct passwd *pw = getpwuid(geteuid());
+ web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", (pw)?(pw->pw_name?pw->pw_name:""):"");
+ if(!web_owner || !*web_owner)
+ owner_uid = geteuid();
+ else {
+ // getpwnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ pw = getpwnam(web_owner);
+ if(!pw) {
+ error("User '%s' is not present. Ignoring option.", web_owner);
+ owner_uid = geteuid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files owner set to %s.", web_owner);
+ owner_uid = pw->pw_uid;
+ }
+ }
+ }
+
+ return(owner_uid);
+}
+
+gid_t web_files_gid(void) {
+ static char *web_group = NULL;
+ static gid_t owner_gid = 0;
+
+ if(unlikely(!web_group)) {
+ // getgrgid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct group *gr = getgrgid(getegid());
+ web_group = config_get(CONFIG_SECTION_WEB, "web files group", (gr)?(gr->gr_name?gr->gr_name:""):"");
+ if(!web_group || !*web_group)
+ owner_gid = getegid();
+ else {
+ // getgrnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ gr = getgrnam(web_group);
+ if(!gr) {
+ error("Group '%s' is not present. Ignoring option.", web_group);
+ owner_gid = getegid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files group set to %s.", web_group);
+ owner_gid = gr->gr_gid;
+ }
+ }
+ }
+
+ return(owner_gid);
+}
+
+static struct {
+ const char *extension;
+ uint32_t hash;
+ uint8_t contenttype;
+} mime_types[] = {
+ { "html" , 0 , CT_TEXT_HTML}
+ , {"js" , 0 , CT_APPLICATION_X_JAVASCRIPT}
+ , {"css" , 0 , CT_TEXT_CSS}
+ , {"xml" , 0 , CT_TEXT_XML}
+ , {"xsl" , 0 , CT_TEXT_XSL}
+ , {"txt" , 0 , CT_TEXT_PLAIN}
+ , {"svg" , 0 , CT_IMAGE_SVG_XML}
+ , {"ttf" , 0 , CT_APPLICATION_X_FONT_TRUETYPE}
+ , {"otf" , 0 , CT_APPLICATION_X_FONT_OPENTYPE}
+ , {"woff2", 0 , CT_APPLICATION_FONT_WOFF2}
+ , {"woff" , 0 , CT_APPLICATION_FONT_WOFF}
+ , {"eot" , 0 , CT_APPLICATION_VND_MS_FONTOBJ}
+ , {"png" , 0 , CT_IMAGE_PNG}
+ , {"jpg" , 0 , CT_IMAGE_JPG}
+ , {"jpeg" , 0 , CT_IMAGE_JPG}
+ , {"gif" , 0 , CT_IMAGE_GIF}
+ , {"bmp" , 0 , CT_IMAGE_BMP}
+ , {"ico" , 0 , CT_IMAGE_XICON}
+ , {"icns" , 0 , CT_IMAGE_ICNS}
+ , { NULL, 0, 0}
+};
+
+static inline uint8_t contenttype_for_filename(const char *filename) {
+ // info("checking filename '%s'", filename);
+
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(!initialized)) {
+ for (i = 0; mime_types[i].extension; i++)
+ mime_types[i].hash = simple_hash(mime_types[i].extension);
+
+ initialized = 1;
+ }
+
+ const char *s = filename, *last_dot = NULL;
+
+ // find the last dot
+ while(*s) {
+ if(unlikely(*s == '.')) last_dot = s;
+ s++;
+ }
+
+ if(unlikely(!last_dot || !*last_dot || !last_dot[1])) {
+ // info("no extension for filename '%s'", filename);
+ return CT_APPLICATION_OCTET_STREAM;
+ }
+ last_dot++;
+
+ // info("extension for filename '%s' is '%s'", filename, last_dot);
+
+ uint32_t hash = simple_hash(last_dot);
+ for(i = 0; mime_types[i].extension ; i++) {
+ if(unlikely(hash == mime_types[i].hash && !strcmp(last_dot, mime_types[i].extension))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ return mime_types[i].contenttype;
+ }
+ }
+
+ // info("not matched extension for filename '%s': '%s'", filename, last_dot);
+ return CT_APPLICATION_OCTET_STREAM;
+}
+
+static inline int access_to_file_is_not_permitted(struct web_client *w, const char *filename) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Access to file is not permitted: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_FORBIDDEN;
+}
+
+// Work around a bug in the CMocka library by removing this function during testing.
+#ifndef REMOVE_MYSENDFILE
+int mysendfile(struct web_client *w, char *filename) {
+ debug(D_WEB_CLIENT, "%llu: Looking for file '%s/%s'", w->id, netdata_configured_web_dir, filename);
+
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ // skip leading slashes
+ while (*filename == '/') filename++;
+
+ // if the filename contains "strange" characters, refuse to serve it
+ char *s;
+ for(s = filename; *s ;s++) {
+ if( !isalnum(*s) && *s != '/' && *s != '.' && *s != '-' && *s != '_') {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Filename contains invalid characters: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ }
+
+ // if the filename contains a .. refuse to serve it
+ if(strstr(filename, "..") != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Relative filenames are not supported: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ // find the physical file on disk
+ char webfilename[FILENAME_MAX + 1];
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s", netdata_configured_web_dir, filename);
+
+ struct stat statbuf;
+ int done = 0;
+ while(!done) {
+ // check if the file exists
+ if (lstat(webfilename, &statbuf) != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not found.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "File does not exist, or is not accessible: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) == S_IFDIR) {
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s/index.html", netdata_configured_web_dir, filename);
+ continue;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) != S_IFREG) {
+ error("%llu: File '%s' is not a regular file. Access Denied.", w->id, webfilename);
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected user
+ if (statbuf.st_uid != web_files_uid()) {
+ error("%llu: File '%s' is owned by user %u (expected user %u). Access Denied.", w->id, webfilename, statbuf.st_uid, web_files_uid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected group
+ if (statbuf.st_gid != web_files_gid()) {
+ error("%llu: File '%s' is owned by group %u (expected group %u). Access Denied.", w->id, webfilename, statbuf.st_gid, web_files_gid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ done = 1;
+ }
+
+ // open the file
+ w->ifd = open(webfilename, O_NONBLOCK, O_RDONLY);
+ if(w->ifd == -1) {
+ w->ifd = w->ofd;
+
+ if(errno == EBUSY || errno == EAGAIN) {
+ error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.header, "Location: /%s\r\n", filename);
+ buffer_strcat(w->response.data, "File is currently busy, please try again later: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_REDIR_TEMP;
+ }
+ else {
+ error("%llu: Cannot open file '%s'.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Cannot open file: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+
+ w->response.data->contenttype = contenttype_for_filename(webfilename);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%ld bytes, ifd %d, ofd %d).", w->id, webfilename, statbuf.st_size, w->ifd, w->ofd);
+
+ w->mode = WEB_CLIENT_MODE_FILECOPY;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+ buffer_flush(w->response.data);
+ buffer_need_bytes(w->response.data, (size_t)statbuf.st_size);
+ w->response.rlen = (size_t)statbuf.st_size;
+#ifdef __APPLE__
+ w->response.data->date = statbuf.st_mtimespec.tv_sec;
+#else
+ w->response.data->date = statbuf.st_mtim.tv_sec;
+#endif
+ buffer_cacheable(w->response.data);
+
+ return HTTP_RESP_OK;
+}
+#endif
+
+
+
+#ifdef NETDATA_WITH_ZLIB
+void web_client_enable_deflate(struct web_client *w, int gzip) {
+ if(unlikely(w->response.zinitialized)) {
+ debug(D_DEFLATE, "%llu: Compression has already be initialized for this client.", w->id);
+ return;
+ }
+
+ if(unlikely(w->response.sent)) {
+ error("%llu: Cannot enable compression in the middle of a conversation.", w->id);
+ return;
+ }
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+ w->response.zstream.next_in = (Bytef *)w->response.data->buffer;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.total_in = 0;
+
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_out = 0;
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+// if(deflateInit(&w->response.zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+// error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+// return;
+// }
+
+ // Select GZIP compression: windowbits = 15 + 16 = 31
+ if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + ((gzip)?16:0), 8, web_gzip_strategy) != Z_OK) {
+ error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+ return;
+ }
+
+ w->response.zsent = 0;
+ w->response.zoutput = 1;
+ w->response.zinitialized = 1;
+ w->flags |= WEB_CLIENT_CHUNKED_TRANSFER;
+
+ debug(D_DEFLATE, "%llu: Initialized compression.", w->id);
+}
+#endif // NETDATA_WITH_ZLIB
+
+void buffer_data_options2string(BUFFER *wb, uint32_t options) {
+ int count = 0;
+
+ if(options & RRDR_OPTION_NONZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "nonzero");
+ }
+
+ if(options & RRDR_OPTION_REVERSED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "flip");
+ }
+
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "jsonwrap");
+ }
+
+ if(options & RRDR_OPTION_MIN2MAX) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "min2max");
+ }
+
+ if(options & RRDR_OPTION_MILLISECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "ms");
+ }
+
+ if(options & RRDR_OPTION_ABSOLUTE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "absolute");
+ }
+
+ if(options & RRDR_OPTION_SECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "seconds");
+ }
+
+ if(options & RRDR_OPTION_NULL2ZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "null2zero");
+ }
+
+ if(options & RRDR_OPTION_OBJECTSROWS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "objectrows");
+ }
+
+ if(options & RRDR_OPTION_GOOGLE_JSON) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "google_json");
+ }
+
+ if(options & RRDR_OPTION_PERCENTAGE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "percentage");
+ }
+
+ if(options & RRDR_OPTION_NOT_ALIGNED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "unaligned");
+ }
+}
+
+static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ //if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
+ // buffer_flush(w->response.data);
+ // buffer_strcat(w->response.data, "This host does not maintain a database");
+ // return HTTP_RESP_BAD_REQUEST;
+ //}
+
+ return func(host, w, url);
+}
+
+static inline int check_host_and_dashboard_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ return check_host_and_call(host, w, url, func);
+}
+
+static inline int check_host_and_mgmt_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(!web_client_can_access_mgmt(w))
+ return web_client_permission_denied(w);
+
+ return check_host_and_call(host, w, url, func);
+}
+
+int web_client_api_request(RRDHOST *host, struct web_client *w, char *url)
+{
+ // get the api version
+ char *tok = mystrsep(&url, "/");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for API version '%s'.", w->id, tok);
+ if(strcmp(tok, "v1") == 0)
+ return web_client_api_request_v1(host, w, url);
+ else {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Unsupported API version: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API version?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
+
+const char *web_content_type_to_string(uint8_t contenttype) {
+ switch(contenttype) {
+ case CT_TEXT_HTML:
+ return "text/html; charset=utf-8";
+
+ case CT_APPLICATION_XML:
+ return "application/xml; charset=utf-8";
+
+ case CT_APPLICATION_JSON:
+ return "application/json; charset=utf-8";
+
+ case CT_APPLICATION_X_JAVASCRIPT:
+ return "application/x-javascript; charset=utf-8";
+
+ case CT_TEXT_CSS:
+ return "text/css; charset=utf-8";
+
+ case CT_TEXT_XML:
+ return "text/xml; charset=utf-8";
+
+ case CT_TEXT_XSL:
+ return "text/xsl; charset=utf-8";
+
+ case CT_APPLICATION_OCTET_STREAM:
+ return "application/octet-stream";
+
+ case CT_IMAGE_SVG_XML:
+ return "image/svg+xml";
+
+ case CT_APPLICATION_X_FONT_TRUETYPE:
+ return "application/x-font-truetype";
+
+ case CT_APPLICATION_X_FONT_OPENTYPE:
+ return "application/x-font-opentype";
+
+ case CT_APPLICATION_FONT_WOFF:
+ return "application/font-woff";
+
+ case CT_APPLICATION_FONT_WOFF2:
+ return "application/font-woff2";
+
+ case CT_APPLICATION_VND_MS_FONTOBJ:
+ return "application/vnd.ms-fontobject";
+
+ case CT_IMAGE_PNG:
+ return "image/png";
+
+ case CT_IMAGE_JPG:
+ return "image/jpeg";
+
+ case CT_IMAGE_GIF:
+ return "image/gif";
+
+ case CT_IMAGE_XICON:
+ return "image/x-icon";
+
+ case CT_IMAGE_BMP:
+ return "image/bmp";
+
+ case CT_IMAGE_ICNS:
+ return "image/icns";
+
+ case CT_PROMETHEUS:
+ return "text/plain; version=0.0.4";
+
+ default:
+ case CT_TEXT_PLAIN:
+ return "text/plain; charset=utf-8";
+ }
+}
+
+
+const char *web_response_code_to_string(int code) {
+ switch(code) {
+ case HTTP_RESP_OK:
+ return "OK";
+
+ case HTTP_RESP_MOVED_PERM:
+ return "Moved Permanently";
+
+ case HTTP_RESP_REDIR_TEMP:
+ return "Temporary Redirect";
+
+ case HTTP_RESP_BAD_REQUEST:
+ return "Bad Request";
+
+ case HTTP_RESP_FORBIDDEN:
+ return "Forbidden";
+
+ case HTTP_RESP_NOT_FOUND:
+ return "Not Found";
+
+ case HTTP_RESP_PRECOND_FAIL:
+ return "Preconditions Failed";
+
+ default:
+ if(code >= 100 && code < 200)
+ return "Informational";
+
+ if(code >= 200 && code < 300)
+ return "Successful";
+
+ if(code >= 300 && code < 400)
+ return "Redirection";
+
+ if(code >= 400 && code < 500)
+ return "Bad Request";
+
+ if(code >= 500 && code < 600)
+ return "Server Error";
+
+ return "Undefined Error";
+ }
+}
+
+static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) {
+ static uint32_t hash_origin = 0, hash_connection = 0, hash_donottrack = 0, hash_useragent = 0,
+ hash_authorization = 0, hash_host = 0, hash_forwarded_proto = 0, hash_forwarded_host = 0;
+#ifdef NETDATA_WITH_ZLIB
+ static uint32_t hash_accept_encoding = 0;
+#endif
+
+ if(unlikely(!hash_origin)) {
+ hash_origin = simple_uhash("Origin");
+ hash_connection = simple_uhash("Connection");
+#ifdef NETDATA_WITH_ZLIB
+ hash_accept_encoding = simple_uhash("Accept-Encoding");
+#endif
+ hash_donottrack = simple_uhash("DNT");
+ hash_useragent = simple_uhash("User-Agent");
+ hash_authorization = simple_uhash("X-Auth-Token");
+ hash_host = simple_uhash("Host");
+ hash_forwarded_proto = simple_uhash("X-Forwarded-Proto");
+ hash_forwarded_host = simple_uhash("X-Forwarded-Host");
+ }
+
+ char *e = s;
+
+ // find the :
+ while(*e && *e != ':') e++;
+ if(!*e) return e;
+
+ // get the name
+ *e = '\0';
+
+ // find the value
+ char *v = e + 1, *ve;
+
+ // skip leading spaces from value
+ while(*v == ' ') v++;
+ ve = v;
+
+ // find the \r
+ while(*ve && *ve != '\r') ve++;
+ if(!*ve || ve[1] != '\n') {
+ *e = ':';
+ return ve;
+ }
+
+ // terminate the value
+ *ve = '\0';
+
+ uint32_t hash = simple_uhash(s);
+
+ if(hash == hash_origin && !strcasecmp(s, "Origin"))
+ strncpyz(w->origin, v, NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE);
+
+ else if(hash == hash_connection && !strcasecmp(s, "Connection")) {
+ if(strcasestr(v, "keep-alive"))
+ web_client_enable_keepalive(w);
+ }
+ else if(respect_web_browser_do_not_track_policy && hash == hash_donottrack && !strcasecmp(s, "DNT")) {
+ if(*v == '0') web_client_disable_donottrack(w);
+ else if(*v == '1') web_client_enable_donottrack(w);
+ }
+ else if(parse_useragent && hash == hash_useragent && !strcasecmp(s, "User-Agent")) {
+ w->user_agent = strdupz(v);
+ } else if(hash == hash_authorization&& !strcasecmp(s, "X-Auth-Token")) {
+ w->auth_bearer_token = strdupz(v);
+ }
+ else if(hash == hash_host && !strcasecmp(s, "Host")){
+ strncpyz(w->server_host, v, ((size_t)(ve - v) < sizeof(w->server_host)-1 ? (size_t)(ve - v) : sizeof(w->server_host)-1));
+ }
+#ifdef NETDATA_WITH_ZLIB
+ else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) {
+ if(web_enable_gzip) {
+ if(strcasestr(v, "gzip"))
+ web_client_enable_deflate(w, 1);
+ //
+ // does not seem to work
+ // else if(strcasestr(v, "deflate"))
+ // web_client_enable_deflate(w, 0);
+ }
+ }
+#endif /* NETDATA_WITH_ZLIB */
+#ifdef ENABLE_HTTPS
+ else if(hash == hash_forwarded_proto && !strcasecmp(s, "X-Forwarded-Proto")) {
+ if(strcasestr(v, "https"))
+ w->ssl.flags |= NETDATA_SSL_PROXY_HTTPS;
+ }
+#endif
+ else if(hash == hash_forwarded_host && !strcasecmp(s, "X-Forwarded-Host")){
+ strncpyz(w->forwarded_host, v, ((size_t)(ve - v) < sizeof(w->server_host)-1 ? (size_t)(ve - v) : sizeof(w->server_host)-1));
+ }
+
+ *e = ':';
+ *ve = '\r';
+ return ve;
+}
+
+/**
+ * Valid Method
+ *
+ * Netdata accepts only three methods, including one of these three(STREAM) is an internal method.
+ *
+ * @param w is the structure with the client request
+ * @param s is the start string to parse
+ *
+ * @return it returns the next address to parse case the method is valid and NULL otherwise.
+ */
+static inline char *web_client_valid_method(struct web_client *w, char *s) {
+ // is is a valid request?
+ if(!strncmp(s, "GET ", 4)) {
+ s = &s[4];
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+ }
+ else if(!strncmp(s, "OPTIONS ", 8)) {
+ s = &s[8];
+ w->mode = WEB_CLIENT_MODE_OPTIONS;
+ }
+ else if(!strncmp(s, "STREAM ", 7)) {
+ s = &s[7];
+
+#ifdef ENABLE_HTTPS
+ if (w->ssl.flags && web_client_is_using_ssl_force(w)){
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ char hostname[256];
+ char *copyme = strstr(s,"hostname=");
+ if ( copyme ){
+ copyme += 9;
+ char *end = strchr(copyme,'&');
+ if(end){
+ size_t length = MIN(255, end - copyme);
+ memcpy(hostname,copyme,length);
+ hostname[length] = 0X00;
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname);
+ s = NULL;
+ }
+#endif
+
+ w->mode = WEB_CLIENT_MODE_STREAM;
+ }
+ else {
+ s = NULL;
+ }
+
+ return s;
+}
+
+/**
+ * Set Path Query
+ *
+ * Set the pointers to the path and query string according to the input.
+ *
+ * @param w is the structure with the client request
+ * @param s is the first address of the string.
+ * @param ptr is the address of the separator.
+ */
+static void web_client_set_path_query(struct web_client *w, char *s, char *ptr) {
+ w->url_path_length = (size_t)(ptr -s);
+
+ w->url_search_path = ptr;
+}
+
+/**
+ * Split path query
+ *
+ * Do the separation between path and query string
+ *
+ * @param w is the structure with the client request
+ * @param s is the string to parse
+ */
+void web_client_split_path_query(struct web_client *w, char *s) {
+ //I am assuming here that the separator character(?) is not encoded
+ char *ptr = strchr(s, '?');
+ if(ptr) {
+ w->separator = '?';
+ web_client_set_path_query(w, s, ptr);
+ return;
+ }
+
+ //Here I test the second possibility, the URL is completely encoded by the user.
+ //I am not using the strcasestr, because it is fastest to check %3f and compare
+ //the next character.
+ //We executed some tests with "encodeURI(uri);" described in https://www.w3schools.com/jsref/jsref_encodeuri.asp
+ //on July 1st, 2019, that show us that URLs won't have '?','=' and '&' encoded, but we decided to move in front
+ //with the next part, because users can develop their own encoded that won't follow this rule.
+ char *moveme = s;
+ while (moveme) {
+ ptr = strchr(moveme, '%');
+ if(ptr) {
+ char *test = (ptr+1);
+ if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) {
+ w->separator = *ptr;
+ web_client_set_path_query(w, s, ptr);
+ return;
+ }
+ ptr++;
+ }
+
+ moveme = ptr;
+ }
+
+ w->separator = 0x00;
+ w->url_path_length = strlen(s);
+}
+
+/**
+ * Request validate
+ *
+ * @param w is the structure with the client request
+ *
+ * @return It returns HTTP_VALIDATION_OK on success and another code present
+ * in the enum HTTP_VALIDATION otherwise.
+ */
+static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
+ char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL;
+
+ size_t last_pos = w->header_parse_last_size;
+
+ w->header_parse_tries++;
+ w->header_parse_last_size = buffer_strlen(w->response.data);
+
+ int is_it_valid;
+ if(w->header_parse_tries > 1) {
+ if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n
+ else last_pos = 0;
+
+ if(w->header_parse_last_size < last_pos)
+ last_pos = 0;
+
+ is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size);
+ if(!is_it_valid) {
+ if(w->header_parse_tries > 10) {
+ info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data));
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ is_it_valid = 1;
+ } else {
+ last_pos = w->header_parse_last_size;
+ is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size);
+ }
+
+ s = web_client_valid_method(w, s);
+ if (!s) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ } else if (!is_it_valid) {
+ //Invalid request, we have more data after the end of message
+ char *check = strstr((char *)buffer_tostring(w->response.data), "\r\n\r\n");
+ if(check) {
+ check += 4;
+ if (*check) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+ }
+
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ //After the method we have the path and query string together
+ encoded_url = s;
+
+ //we search for the position where we have " HTTP/", because it finishes the user request
+ s = url_find_protocol(s);
+
+ // incomplete requests
+ if(unlikely(!*s)) {
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ // we have the end of encoded_url - remember it
+ char *ue = s;
+
+ //Variables used to map the variables in the query string case it is present
+ int total_variables;
+ char *ptr_variables[WEB_FIELDS_MAX];
+
+ // make sure we have complete request
+ // complete requests contain: \r\n\r\n
+ while(*s) {
+ // find a line feed
+ while(*s && *s++ != '\r');
+
+ // did we reach the end?
+ if(unlikely(!*s)) break;
+
+ // is it \r\n ?
+ if(likely(*s++ == '\n')) {
+
+ // is it again \r\n ? (header end)
+ if(unlikely(*s == '\r' && s[1] == '\n')) {
+ // a valid complete HTTP request found
+
+ *ue = '\0';
+ //This is to avoid crash in line
+ w->url_search_path = NULL;
+ if(w->mode != WEB_CLIENT_MODE_NORMAL) {
+ if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
+ return HTTP_VALIDATION_MALFORMED_URL;
+ } else {
+ web_client_split_path_query(w, encoded_url);
+
+ if (w->url_search_path && w->separator) {
+ *w->url_search_path = 0x00;
+ }
+
+ if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
+ return HTTP_VALIDATION_MALFORMED_URL;
+
+ if (w->url_search_path && w->separator) {
+ *w->url_search_path = w->separator;
+
+ char *from = (encoded_url + w->url_path_length);
+ total_variables = url_map_query_string(ptr_variables, from);
+
+ if (url_parse_query_string(w->decoded_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1, ptr_variables, total_variables)) {
+ return HTTP_VALIDATION_MALFORMED_URL;
+ }
+ }
+ }
+ *ue = ' ';
+
+ // copy the URL - we are going to overwrite parts of it
+ // TODO -- ideally we we should avoid copying buffers around
+ strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ // The client will be redirected for Netdata and we are preserving the original request.
+ *ue = '\0';
+ strncpyz(w->last_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
+ *ue = ' ';
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_REDIRECT;
+ }
+ }
+#endif
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_OK;
+ }
+
+ // another header line
+ s = http_header_parse(w, s,
+ (w->mode == WEB_CLIENT_MODE_STREAM) // parse user agent
+ );
+ }
+ }
+
+ // incomplete request
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+}
+
+static inline ssize_t web_client_send_data(struct web_client *w,const void *buf,size_t len, int flags)
+{
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && ( !w->ssl.flags ) ){
+ bytes = SSL_write(w->ssl.conn,buf, len) ;
+ } else {
+ bytes = send(w->ofd,buf, len , flags);
+ }
+ } else {
+ bytes = send(w->ofd,buf, len , flags);
+ }
+#else
+ bytes = send(w->ofd, buf, len, flags);
+#endif
+
+ return bytes;
+}
+
+void web_client_build_http_header(struct web_client *w) {
+ if(unlikely(w->response.code != HTTP_RESP_OK))
+ buffer_no_cacheable(w->response.data);
+
+ // set a proper expiration date, if not already set
+ if(unlikely(!w->response.data->expires)) {
+ if(w->response.data->options & WB_CONTENT_NO_CACHEABLE)
+ w->response.data->expires = w->tv_ready.tv_sec + localhost->rrd_update_every;
+ else
+ w->response.data->expires = w->tv_ready.tv_sec + 86400;
+ }
+
+ // prepare the HTTP response header
+ debug(D_WEB_CLIENT, "%llu: Generating HTTP header with response %d.", w->id, w->response.code);
+
+ const char *content_type_string = web_content_type_to_string(w->response.data->contenttype);
+ const char *code_msg = web_response_code_to_string(w->response.code);
+
+ // prepare the last modified and expiration dates
+ char date[32], edate[32];
+ {
+ struct tm tmbuf, *tm;
+
+ tm = gmtime_r(&w->response.data->date, &tmbuf);
+ strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", tm);
+
+ tm = gmtime_r(&w->response.data->expires, &tmbuf);
+ strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", tm);
+ }
+
+ if (w->response.code == HTTP_RESP_MOVED_PERM) {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Location: https://%s%s\r\n",
+ w->response.code, code_msg,
+ w->server_host,
+ w->last_url);
+ }else {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Connection: %s\r\n"
+ "Server: NetData Embedded HTTP Server %s\r\n"
+ "Access-Control-Allow-Origin: %s\r\n"
+ "Access-Control-Allow-Credentials: true\r\n"
+ "Content-Type: %s\r\n"
+ "Date: %s\r\n",
+ w->response.code,
+ code_msg,
+ web_client_has_keepalive(w)?"keep-alive":"close",
+ VERSION,
+ w->origin,
+ content_type_string,
+ date);
+ }
+
+ if(unlikely(web_x_frame_options))
+ buffer_sprintf(w->response.header_output, "X-Frame-Options: %s\r\n", web_x_frame_options);
+
+ if(w->cookie1[0] || w->cookie2[0]) {
+ if(w->cookie1[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie1);
+ }
+
+ if(w->cookie2[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie2);
+ }
+
+ if(respect_web_browser_do_not_track_policy)
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ }
+ else {
+ if(respect_web_browser_do_not_track_policy) {
+ if(web_client_has_tracking_required(w))
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ else
+ buffer_sprintf(w->response.header_output,
+ "Tk: N\r\n");
+ }
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_OPTIONS) {
+ buffer_strcat(w->response.header_output,
+ "Access-Control-Allow-Methods: GET, OPTIONS\r\n"
+ "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control, x-auth-token\r\n"
+ "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14
+ );
+ }
+ else {
+ buffer_sprintf(w->response.header_output,
+ "Cache-Control: %s\r\n"
+ "Expires: %s\r\n",
+ (w->response.data->options & WB_CONTENT_NO_CACHEABLE)?"no-cache, no-store, must-revalidate\r\nPragma: no-cache":"public",
+ edate);
+ }
+
+ // copy a possibly available custom header
+ if(unlikely(buffer_strlen(w->response.header)))
+ buffer_strcat(w->response.header_output, buffer_tostring(w->response.header));
+
+ // headers related to the transfer method
+ if(likely(w->response.zoutput))
+ buffer_strcat(w->response.header_output, "Content-Encoding: gzip\r\n");
+
+ if(likely(w->flags & WEB_CLIENT_CHUNKED_TRANSFER))
+ buffer_strcat(w->response.header_output, "Transfer-Encoding: chunked\r\n");
+ else {
+ if(likely((w->response.data->len || w->response.rlen))) {
+ // we know the content length, put it
+ buffer_sprintf(w->response.header_output, "Content-Length: %zu\r\n", w->response.data->len? w->response.data->len: w->response.rlen);
+ }
+ else {
+ // we don't know the content length, disable keep-alive
+ web_client_disable_keepalive(w);
+ }
+ }
+
+ // end of HTTP header
+ buffer_strcat(w->response.header_output, "\r\n");
+}
+
+static inline void web_client_send_http_header(struct web_client *w) {
+ web_client_build_http_header(w);
+
+ // sent the HTTP header
+ debug(D_WEB_DATA, "%llu: Sending response HTTP header of size %zu: '%s'"
+ , w->id
+ , buffer_strlen(w->response.header_output)
+ , buffer_tostring(w->response.header_output)
+ );
+
+ web_client_crock_socket(w);
+
+ size_t count = 0;
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && ( !w->ssl.flags ) ){
+ while((bytes = SSL_write(w->ssl.conn, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output))) < 0) {
+ count++;
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTPS headers to web client.");
+ break;
+ }
+ }
+ } else {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+ } else {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+#else
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+#endif
+
+ if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) {
+ if(bytes > 0)
+ w->stats_sent_bytes += bytes;
+
+ error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
+ , buffer_strlen(w->response.header_output)
+ , bytes);
+
+ WEB_CLIENT_IS_DEAD(w);
+ return;
+ }
+ else
+ w->stats_sent_bytes += bytes;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url);
+
+static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t hash_localhost = 0;
+
+ if(unlikely(!hash_localhost)) {
+ hash_localhost = simple_hash("localhost");
+ }
+
+ if(host != localhost) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Nesting of hosts is not allowed.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ char *tok = mystrsep(&url, "/");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok);
+
+ if(!url) { //no delim found
+ debug(D_WEB_CLIENT, "%llu: URL doesn't end with / generating redirect.", w->id);
+ char *protocol, *url_host;
+#ifdef ENABLE_HTTPS
+ protocol = ((w->ssl.conn && !w->ssl.flags) || w->ssl.flags & NETDATA_SSL_PROXY_HTTPS) ? "https" : "http";
+#else
+ protocol = "http";
+#endif
+ url_host = (!w->forwarded_host[0])?w->server_host:w->forwarded_host;
+ buffer_sprintf(w->response.header, "Location: %s://%s%s/\r\n", protocol, url_host, w->last_url);
+ buffer_strcat(w->response.data, "Permanent redirect");
+ return HTTP_RESP_REDIR_PERM;
+ }
+
+ // copy the URL, we need it to serve files
+ w->last_url[0] = '/';
+
+ if(url && *url) strncpyz(&w->last_url[1], url, NETDATA_WEB_REQUEST_URL_SIZE - 1);
+ else w->last_url[1] = '\0';
+
+ uint32_t hash = simple_hash(tok);
+
+ host = rrdhost_find_by_hostname(tok, hash);
+ if(!host) host = rrdhost_find_by_guid(tok, hash);
+
+#ifdef ENABLE_DBENGINE
+ int release_host = 0;
+ if (!host) {
+ host = sql_create_host_by_uuid(tok);
+ if (likely(host)) {
+ rrdhost_flag_set(host, RRDHOST_FLAG_ARCHIVED);
+ release_host = 1;
+ }
+ }
+ if(host) {
+ int rc = web_client_process_url(host, w, url);
+ if (release_host) {
+ freez(host->hostname);
+ freez((char *) host->os);
+ freez((char *) host->tags);
+ freez((char *) host->timezone);
+ freez(host->program_name);
+ freez(host->program_version);
+ freez(host->registry_hostname);
+ freez(host);
+ }
+ return rc;
+ }
+#else
+ if (host) return web_client_process_url(host, w, url);
+#endif
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "This netdata does not maintain a database for host: ");
+ buffer_strcat_htmlescape(w->response.data, tok?tok:"");
+ return HTTP_RESP_NOT_FOUND;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t
+ hash_api = 0,
+ hash_netdata_conf = 0,
+ hash_host = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ static uint32_t hash_exit = 0, hash_debug = 0, hash_mirror = 0;
+#endif
+
+ if(unlikely(!hash_api)) {
+ hash_api = simple_hash("api");
+ hash_netdata_conf = simple_hash("netdata.conf");
+ hash_host = simple_hash("host");
+#ifdef NETDATA_INTERNAL_CHECKS
+ hash_exit = simple_hash("exit");
+ hash_debug = simple_hash("debug");
+ hash_mirror = simple_hash("mirror");
+#endif
+ }
+
+ char *tok = mystrsep(&url, "/?");
+ if(likely(tok && *tok)) {
+ uint32_t hash = simple_hash(tok);
+ debug(D_WEB_CLIENT, "%llu: Processing command '%s'.", w->id, tok);
+
+ if(unlikely(hash == hash_api && strcmp(tok, "api") == 0)) { // current API
+ debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id);
+ return check_host_and_call(host, w, url, web_client_api_request);
+ }
+ else if(unlikely(hash == hash_host && strcmp(tok, "host") == 0)) { // host switching
+ debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id);
+ return web_client_switch_host(host, w, url);
+ }
+ else if(unlikely(hash == hash_netdata_conf && strcmp(tok, "netdata.conf") == 0)) { // netdata.conf
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id);
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ config_generate(w->response.data, 0);
+ return HTTP_RESP_OK;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+
+ if(!netdata_exit)
+ buffer_strcat(w->response.data, "ok, will do...");
+ else
+ buffer_strcat(w->response.data, "I am doing it already");
+
+ error("web request to exit received.");
+ netdata_cleanup_and_exit(0);
+ return HTTP_RESP_OK;
+ }
+ else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ buffer_flush(w->response.data);
+
+ // get the name of the data to show
+ tok = mystrsep(&url, "&");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok);
+
+ // do we have such a data set?
+ RRDSET *st = rrdset_find_byname(host, tok);
+ if(!st) st = rrdset_find(host, tok);
+ if(!st) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ debug_flags |= D_RRD_STATS;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))
+ rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
+ else
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ return HTTP_RESP_OK;
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "debug which chart?\r\n");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Mirroring...", w->id);
+
+ // replace the zero bytes with spaces
+ buffer_char_replace(w->response.data, '\0', ' ');
+
+ // just leave the buffer as is
+ // it will be copied back to the client
+
+ return HTTP_RESP_OK;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ char filename[FILENAME_MAX+1];
+ url = filename;
+ strncpyz(filename, w->last_url, FILENAME_MAX);
+ tok = mystrsep(&url, "?");
+ buffer_flush(w->response.data);
+ return mysendfile(w, (tok && *tok)?tok:"/");
+}
+
+void web_client_process_request(struct web_client *w) {
+
+ // start timing us
+ now_realtime_timeval(&w->tv_in);
+
+ switch(http_request_validate(w)) {
+ case HTTP_VALIDATION_OK:
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ if(unlikely(!web_client_can_access_stream(w))) {
+ web_client_permission_denied(w);
+ return;
+ }
+
+ w->response.code = rrdpush_receiver_thread_spawn(w, w->decoded_url);
+ return;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ if(unlikely(
+ !web_client_can_access_dashboard(w) &&
+ !web_client_can_access_registry(w) &&
+ !web_client_can_access_badges(w) &&
+ !web_client_can_access_mgmt(w) &&
+ !web_client_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied(w);
+ break;
+ }
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "OK");
+ w->response.code = HTTP_RESP_OK;
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ case WEB_CLIENT_MODE_NORMAL:
+ if(unlikely(
+ !web_client_can_access_dashboard(w) &&
+ !web_client_can_access_registry(w) &&
+ !web_client_can_access_badges(w) &&
+ !web_client_can_access_mgmt(w) &&
+ !web_client_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied(w);
+ break;
+ }
+
+ w->response.code = web_client_process_url(localhost, w, w->decoded_url);
+ break;
+ }
+ break;
+
+ case HTTP_VALIDATION_INCOMPLETE:
+ if(w->response.data->len > NETDATA_WEB_REQUEST_MAX_SIZE) {
+ strcpy(w->last_url, "too big request");
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Received request is too big (%zu bytes).", w->id, w->response.data->len);
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len);
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ }
+ else {
+ // wait for more data
+ return;
+ }
+ break;
+#ifdef ENABLE_HTTPS
+ case HTTP_VALIDATION_REDIRECT:
+ {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data,
+ "<!DOCTYPE html><!-- SPDX-License-Identifier: GPL-3.0-or-later --><html>"
+ "<body onload=\"window.location.href ='https://'+ window.location.hostname +"
+ " ':' + window.location.port + window.location.pathname + window.location.search\">"
+ "Redirecting to safety connection, case your browser does not support redirection, please"
+ " click <a onclick=\"window.location.href ='https://'+ window.location.hostname + ':' "
+ " + window.location.port + window.location.pathname + window.location.search\">here</a>."
+ "</body></html>");
+ w->response.code = HTTP_RESP_MOVED_PERM;
+ break;
+ }
+#endif
+ case HTTP_VALIDATION_MALFORMED_URL:
+ debug(D_WEB_CLIENT_ACCESS, "%llu: URL parsing failed (malformed URL). Cannot understand '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "URL not valid. I don't understand you...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ case HTTP_VALIDATION_NOT_SUPPORTED:
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "I don't understand you...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ }
+
+ // keep track of the time we done processing
+ now_realtime_timeval(&w->tv_ready);
+
+ w->response.sent = 0;
+
+ // set a proper last modified date
+ if(unlikely(!w->response.data->date))
+ w->response.data->date = w->tv_ready.tv_sec;
+
+ web_client_send_http_header(w);
+
+ // enable sending immediately if we have data
+ if(w->response.data->len) web_client_enable_wait_send(w);
+ else web_client_disable_wait_send(w);
+
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ debug(D_WEB_CLIENT, "%llu: STREAM done.", w->id);
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the OPTIONS response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ if(w->response.rlen) {
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen);
+ web_client_enable_wait_receive(w);
+
+ /*
+ // utilize the kernel sendfile() for copying the file to the socket.
+ // this block of code can be commented, without anything missing.
+ // when it is commented, the program will copy the data using async I/O.
+ {
+ long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes);
+ if(len != w->response.data->rbytes)
+ error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len);
+ else
+ web_client_request_done(w);
+ }
+ */
+ }
+ else
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending an unknown amount of bytes to client.", w->id);
+ break;
+
+ default:
+ fatal("%llu: Unknown client mode %u.", w->id, w->mode);
+ break;
+ }
+}
+
+ssize_t web_client_send_chunk_header(struct web_client *w, size_t len)
+{
+ debug(D_DEFLATE, "%llu: OPEN CHUNK of %zu bytes (hex: %zx).", w->id, len, len);
+ char buf[24];
+ ssize_t bytes;
+ bytes = (ssize_t)sprintf(buf, "%zX\r\n", len);
+ buf[bytes] = 0x00;
+
+ bytes = web_client_send_data(w,buf,strlen(buf),0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk header %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk header to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk header to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_close(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: CLOSE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n",2,0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_finalize(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: FINALIZE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n0\r\n\r\n",7,0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk finalize suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk finalize suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+#ifdef NETDATA_WITH_ZLIB
+ssize_t web_client_send_deflate(struct web_client *w)
+{
+ ssize_t len = 0, t = 0;
+
+ // when using compression,
+ // w->response.sent is the amount of bytes passed through compression
+
+ debug(D_DEFLATE, "%llu: web_client_send_deflate(): w->response.data->len = %zu, w->response.sent = %zu, w->response.zhave = %zu, w->response.zsent = %zu, w->response.zstream.avail_in = %u, w->response.zstream.avail_out = %u, w->response.zstream.total_in = %lu, w->response.zstream.total_out = %lu.",
+ w->id, w->response.data->len, w->response.sent, w->response.zhave, w->response.zsent, w->response.zstream.avail_in, w->response.zstream.avail_out, w->response.zstream.total_in, w->response.zstream.total_out);
+
+ if(w->response.data->len - w->response.sent == 0 && w->response.zstream.avail_in == 0 && w->response.zhave == w->response.zsent && w->response.zstream.avail_out != 0) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // finalize the chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_finalize(w);
+ if(t < 0) return t;
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return t;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return t;
+ }
+
+ // reset the client
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket.", w->id);
+ return t;
+ }
+
+ if(w->response.zhave == w->response.zsent) {
+ // compress more input data
+
+ // close the previous open chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_close(w);
+ if(t < 0) return t;
+ }
+
+ debug(D_DEFLATE, "%llu: Compressing %zu new bytes starting from %zu (and %u left behind).", w->id, (w->response.data->len - w->response.sent), w->response.sent, w->response.zstream.avail_in);
+
+ // give the compressor all the data not passed through the compressor yet
+ if(w->response.data->len > w->response.sent) {
+ w->response.zstream.next_in = (Bytef *)&w->response.data->buffer[w->response.sent - w->response.zstream.avail_in];
+ w->response.zstream.avail_in += (uInt) (w->response.data->len - w->response.sent);
+ }
+
+ // reset the compressor output buffer
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
+
+ // ask for FINISH if we have all the input
+ int flush = Z_SYNC_FLUSH;
+ if(w->mode == WEB_CLIENT_MODE_NORMAL
+ || (w->mode == WEB_CLIENT_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) {
+ flush = Z_FINISH;
+ debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id);
+ }
+ else {
+ debug(D_DEFLATE, "%llu: Requesting Z_SYNC_FLUSH.", w->id);
+ }
+
+ // compress
+ if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) {
+ error("%llu: Compression failed. Closing down client.", w->id);
+ web_client_request_done(w);
+ return(-1);
+ }
+
+ w->response.zhave = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out;
+ w->response.zsent = 0;
+
+ // keep track of the bytes passed through the compressor
+ w->response.sent = w->response.data->len;
+
+ debug(D_DEFLATE, "%llu: Compression produced %zu bytes.", w->id, w->response.zhave);
+
+ // open a new chunk
+ ssize_t t2 = web_client_send_chunk_header(w, w->response.zhave);
+ if(t2 < 0) return t2;
+ t += t2;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: Sending %zu bytes of data (+%zd of chunk header).", w->id, w->response.zhave - w->response.zsent, t);
+
+ len = web_client_send_data(w,&w->response.zbuffer[w->response.zsent], (size_t) (w->response.zhave - w->response.zsent), MSG_DONTWAIT);
+ if(len > 0) {
+ w->stats_sent_bytes += len;
+ w->response.zsent += len;
+ len += t;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, len);
+ }
+ else if(len == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client (zhave = %zu, zsent = %zu, need to send = %zu).",
+ w->id, w->response.zhave, w->response.zsent, w->response.zhave - w->response.zsent);
+
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(len);
+}
+#endif // NETDATA_WITH_ZLIB
+
+ssize_t web_client_send(struct web_client *w) {
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) return web_client_send_deflate(w);
+#endif // NETDATA_WITH_ZLIB
+
+ ssize_t bytes;
+
+ if(unlikely(w->response.data->len - w->response.sent == 0)) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // there can be two cases for this
+ // A. we have done everything
+ // B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return 0;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return 0;
+ }
+
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket. Waiting for next request on the same socket.", w->id);
+ return 0;
+ }
+
+ bytes = web_client_send_data(w,&w->response.data->buffer[w->response.sent], w->response.data->len - w->response.sent, MSG_DONTWAIT);
+ if(likely(bytes > 0)) {
+ w->stats_sent_bytes += bytes;
+ w->response.sent += bytes;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, bytes);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_read_file(struct web_client *w)
+{
+ if(unlikely(w->response.rlen > w->response.data->size))
+ buffer_need_bytes(w->response.data, w->response.rlen - w->response.data->size);
+
+ if(unlikely(w->response.rlen <= w->response.data->len))
+ return 0;
+
+ ssize_t left = w->response.rlen - w->response.data->len;
+ ssize_t bytes = read(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t)left);
+ if(likely(bytes > 0)) {
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Read %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Read data: '%s'.", w->id, &w->response.data->buffer[old]);
+
+ web_client_enable_wait_send(w);
+
+ if(w->response.rlen && w->response.data->len >= w->response.rlen)
+ web_client_disable_wait_receive(w);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Out of input file data.", w->id);
+
+ // if we cannot read, it means we have an error on input.
+ // if however, we are copying a file from ifd to ofd, we should not return an error.
+ // in this case, the error should be generated when the file has been sent to the client.
+
+ // we are copying data from ifd to ofd
+ // let it finish copying...
+ web_client_disable_wait_receive(w);
+
+ debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != w->ofd) close(w->ifd);
+ }
+
+ w->ifd = w->ofd;
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: read data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_receive(struct web_client *w)
+{
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY))
+ return web_client_read_file(w);
+
+ ssize_t bytes;
+ ssize_t left = w->response.data->size - w->response.data->len;
+
+ // do we have any space for more data?
+ buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_RECEIVE_SIZE);
+
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && (!w->ssl.flags)) {
+ bytes = SSL_read(w->ssl.conn, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1));
+ }else {
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+ }
+ else{
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+#else
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+#endif
+
+ if(likely(bytes > 0)) {
+ w->stats_received_bytes += bytes;
+
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: receive data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
diff --git a/web/server/web_client.h b/web/server/web_client.h
new file mode 100644
index 0000000..48bf1ac
--- /dev/null
+++ b/web/server/web_client.h
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_H
+#define NETDATA_WEB_CLIENT_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef NETDATA_WITH_ZLIB
+extern int web_enable_gzip, web_gzip_level, web_gzip_strategy;
+#endif /* NETDATA_WITH_ZLIB */
+
+// HTTP_CODES 2XX Success
+#define HTTP_RESP_OK 200
+
+// HTTP_CODES 3XX Redirections
+#define HTTP_RESP_MOVED_PERM 301
+#define HTTP_RESP_REDIR_TEMP 307
+#define HTTP_RESP_REDIR_PERM 308
+
+// HTTP_CODES 4XX Client Errors
+#define HTTP_RESP_BAD_REQUEST 400
+#define HTTP_RESP_FORBIDDEN 403
+#define HTTP_RESP_NOT_FOUND 404
+#define HTTP_RESP_PRECOND_FAIL 412
+
+// HTTP_CODES 5XX Server Errors
+#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
+#define HTTP_RESP_BACKEND_FETCH_FAILED 503
+
+extern int respect_web_browser_do_not_track_policy;
+extern char *web_x_frame_options;
+
+typedef enum web_client_mode {
+ WEB_CLIENT_MODE_NORMAL = 0,
+ WEB_CLIENT_MODE_FILECOPY = 1,
+ WEB_CLIENT_MODE_OPTIONS = 2,
+ WEB_CLIENT_MODE_STREAM = 3
+} WEB_CLIENT_MODE;
+
+typedef enum {
+ HTTP_VALIDATION_OK,
+ HTTP_VALIDATION_NOT_SUPPORTED,
+ HTTP_VALIDATION_MALFORMED_URL,
+#ifdef ENABLE_HTTPS
+ HTTP_VALIDATION_INCOMPLETE,
+ HTTP_VALIDATION_REDIRECT
+#else
+ HTTP_VALIDATION_INCOMPLETE
+#endif
+} HTTP_VALIDATION;
+
+typedef enum web_client_flags {
+ WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
+
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
+
+ WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
+ WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket
+
+ WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server)
+
+ WEB_CLIENT_CHUNKED_TRANSFER = 1 << 10, // chunked transfer (used with zlib compression)
+} WEB_CLIENT_FLAGS;
+
+//#ifdef HAVE_C___ATOMIC
+//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
+//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
+//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
+//#else
+#define web_client_flag_check(w, flag) ((w)->flags & (flag))
+#define web_client_flag_set(w, flag) (w)->flags |= flag
+#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
+//#endif
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define NETDATA_WEB_REQUEST_URL_SIZE 8192
+#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
+#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096
+#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024
+#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024
+#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384
+#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384
+#define NETDATA_WEB_REQUEST_MAX_SIZE 16384
+
+struct response {
+ BUFFER *header; // our response header
+ BUFFER *header_output; // internal use
+ BUFFER *data; // our response data buffer
+
+ int code; // the HTTP response code
+
+ size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
+ size_t sent; // current data length sent to output
+
+ int zoutput; // if set to 1, web_client_send() will send compressed data
+#ifdef NETDATA_WITH_ZLIB
+ z_stream zstream; // zlib stream for sending compressed output to client
+ Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
+ size_t zsent; // the compressed bytes we have sent to the client
+ size_t zhave; // the compressed bytes that we have received from zlib
+ unsigned int zinitialized : 1;
+#endif /* NETDATA_WITH_ZLIB */
+};
+
+struct web_client {
+ unsigned long long id;
+
+ WEB_CLIENT_FLAGS flags; // status flags for the client
+ WEB_CLIENT_MODE mode; // the operational mode of the client
+ WEB_CLIENT_ACL acl; // the access list of the client
+ int port_acl; // the operations permitted on the port the client connected to
+ char *auth_bearer_token; // the Bearer auth token (if sent)
+ size_t header_parse_tries;
+ size_t header_parse_last_size;
+
+ int tcp_cork; // 1 = we have a cork on the socket
+
+ int ifd;
+ int ofd;
+
+ char client_ip[INET6_ADDRSTRLEN]; // Defined buffer sizes include null-terminators
+ char client_port[NI_MAXSERV];
+ char server_host[NI_MAXHOST];
+ char client_host[NI_MAXHOST];
+ char forwarded_host[NI_MAXHOST]; //Used with proxy
+
+ char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer
+ char decoded_query_string[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the Query String in this buffer
+ char last_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we keep a copy of the decoded URL here
+ size_t url_path_length;
+ char separator; // This value can be either '?' or 'f'
+ char *url_search_path; //A pointer to the search path sent by the client
+
+ struct timeval tv_in, tv_ready;
+
+ char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE + 1];
+ char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE + 1];
+ char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE + 1];
+ char *user_agent;
+
+ struct response response;
+
+ size_t stats_received_bytes;
+ size_t stats_sent_bytes;
+
+ // cache of web_client allocations
+ struct web_client *prev; // maintain a linked list of web clients
+ struct web_client *next; // for the web servers that need it
+
+ // MULTI-THREADED WEB SERVER MEMBERS
+ netdata_thread_t thread; // the thread servicing this client
+ volatile int running; // 1 when the thread runs, 0 otherwise
+
+ // STATIC-THREADED WEB SERVER MEMBERS
+ size_t pollinfo_slot; // POLLINFO slot of the web client
+ size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
+#ifdef ENABLE_HTTPS
+ struct netdata_ssl ssl;
+#endif
+};
+
+extern uid_t web_files_uid(void);
+extern uid_t web_files_gid(void);
+
+extern int web_client_permission_denied(struct web_client *w);
+
+extern ssize_t web_client_send(struct web_client *w);
+extern ssize_t web_client_receive(struct web_client *w);
+extern ssize_t web_client_read_file(struct web_client *w);
+
+extern void web_client_process_request(struct web_client *w);
+extern void web_client_request_done(struct web_client *w);
+
+extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
+
+extern int mysendfile(struct web_client *w, char *filename);
+
+extern void web_client_build_http_header(struct web_client *w);
+
+#include "daemon/common.h"
+
+#endif
diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c
new file mode 100644
index 0000000..afd51d8
--- /dev/null
+++ b/web/server/web_client_cache.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_client_cache.h"
+
+// ----------------------------------------------------------------------------
+// allocate and free web_clients
+
+#ifdef ENABLE_HTTPS
+
+static void web_client_reuse_ssl(struct web_client *w) {
+ if (netdata_srv_ctx) {
+ if (w->ssl.conn) {
+ SSL_clear(w->ssl.conn);
+ }
+ }
+}
+#endif
+
+
+static void web_client_zero(struct web_client *w) {
+ // zero everything about it - but keep the buffers
+
+ // remember the pointers to the buffers
+ BUFFER *b1 = w->response.data;
+ BUFFER *b2 = w->response.header;
+ BUFFER *b3 = w->response.header_output;
+
+ // empty the buffers
+ buffer_flush(b1);
+ buffer_flush(b2);
+ buffer_flush(b3);
+
+ freez(w->user_agent);
+
+ // zero everything
+ memset(w, 0, sizeof(struct web_client));
+
+ // restore the pointers of the buffers
+ w->response.data = b1;
+ w->response.header = b2;
+ w->response.header_output = b3;
+}
+
+static void web_client_free(struct web_client *w) {
+ buffer_free(w->response.header_output);
+ buffer_free(w->response.header);
+ buffer_free(w->response.data);
+ freez(w->user_agent);
+#ifdef ENABLE_HTTPS
+ if ((!web_client_check_unix(w)) && ( netdata_srv_ctx )) {
+ if (w->ssl.conn) {
+ SSL_free(w->ssl.conn);
+ w->ssl.conn = NULL;
+ }
+ }
+#endif
+ freez(w);
+}
+
+static struct web_client *web_client_alloc(void) {
+ struct web_client *w = callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ return w;
+}
+
+// ----------------------------------------------------------------------------
+// web clients caching
+
+// When clients connect and disconnect, avoid allocating and releasing memory.
+// Instead, when new clients get connected, reuse any memory previously allocated
+// for serving web clients that are now disconnected.
+
+// The size of the cache is adaptive. It caches the structures of 2x
+// the number of currently connected clients.
+
+// Comments per server:
+// SINGLE-THREADED : 1 cache is maintained
+// MULTI-THREADED : 1 cache is maintained
+// STATIC-THREADED : 1 cache for each thred of the web server
+
+__thread struct clients_cache web_clients_cache = {
+ .pid = 0,
+ .used = NULL,
+ .used_count = 0,
+ .avail = NULL,
+ .avail_count = 0,
+ .allocated = 0,
+ .reused = 0
+};
+
+inline void web_client_cache_verify(int force) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ static __thread size_t count = 0;
+ count++;
+
+ if(unlikely(force || count > 1000)) {
+ count = 0;
+
+ struct web_client *w;
+ size_t used = 0, avail = 0;
+ for(w = web_clients_cache.used; w ; w = w->next) used++;
+ for(w = web_clients_cache.avail; w ; w = w->next) avail++;
+
+ info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , used, web_clients_cache.used_count
+ , avail, web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#else
+ if(unlikely(force)) {
+ info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , web_clients_cache.used_count
+ , web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#endif
+}
+
+// destroy the cache and free all the memory it uses
+void web_client_cache_destroy(void) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ web_client_cache_verify(1);
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w, *t;
+
+ w = web_clients_cache.used;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.used = NULL;
+ web_clients_cache.used_count = 0;
+
+ w = web_clients_cache.avail;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.avail = NULL;
+ web_clients_cache.avail_count = 0;
+
+ netdata_thread_enable_cancelability();
+}
+
+struct web_client *web_client_get_from_cache_or_allocate() {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid == 0))
+ web_clients_cache.pid = gettid();
+
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w = web_clients_cache.avail;
+
+ if(w) {
+ // get it from avail
+ if (w == web_clients_cache.avail) web_clients_cache.avail = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.avail_count--;
+#ifdef ENABLE_HTTPS
+ web_client_reuse_ssl(w);
+ SSL *ssl = w->ssl.conn;
+#endif
+ web_client_zero(w);
+ web_clients_cache.reused++;
+#ifdef ENABLE_HTTPS
+ w->ssl.conn = ssl;
+ w->ssl.flags = NETDATA_SSL_START;
+ debug(D_WEB_CLIENT_ACCESS,"Reusing SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
+#endif
+ }
+ else {
+ // allocate it
+ w = web_client_alloc();
+#ifdef ENABLE_HTTPS
+ w->ssl.flags = NETDATA_SSL_START;
+ debug(D_WEB_CLIENT_ACCESS,"Starting SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
+#endif
+ web_clients_cache.allocated++;
+ }
+
+ // link it to used web clients
+ if (web_clients_cache.used) web_clients_cache.used->prev = w;
+ w->next = web_clients_cache.used;
+ w->prev = NULL;
+ web_clients_cache.used = w;
+ web_clients_cache.used_count++;
+
+ // initialize it
+ w->id = web_client_connected();
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ netdata_thread_enable_cancelability();
+
+ return w;
+}
+
+void web_client_release(struct web_client *w) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ if(unlikely(w->running))
+ error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port);
+#endif
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port);
+
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_disconnected();
+
+ netdata_thread_disable_cancelability();
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1) close(w->ifd);
+ if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd);
+ w->ifd = w->ofd = -1;
+#ifdef ENABLE_HTTPS
+ web_client_reuse_ssl(w);
+ w->ssl.flags = NETDATA_SSL_START;
+#endif
+
+ }
+
+ // unlink it from the used
+ if (w == web_clients_cache.used) web_clients_cache.used = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.used_count--;
+
+ if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) {
+ // we have too many of them - free it
+ web_client_free(w);
+ }
+ else {
+ // link it to the avail
+ if (web_clients_cache.avail) web_clients_cache.avail->prev = w;
+ w->next = web_clients_cache.avail;
+ w->prev = NULL;
+ web_clients_cache.avail = w;
+ web_clients_cache.avail_count++;
+ }
+
+ netdata_thread_enable_cancelability();
+}
+
diff --git a/web/server/web_client_cache.h b/web/server/web_client_cache.h
new file mode 100644
index 0000000..f638880
--- /dev/null
+++ b/web/server/web_client_cache.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_CACHE_H
+#define NETDATA_WEB_CLIENT_CACHE_H
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+struct clients_cache {
+ pid_t pid;
+
+ struct web_client *used; // the structures of the currently connected clients
+ size_t used_count; // the count the currently connected clients
+
+ struct web_client *avail; // the cached structures, available for future clients
+ size_t avail_count; // the number of cached structures
+
+ size_t reused; // the number of re-uses
+ size_t allocated; // the number of allocations
+};
+
+extern __thread struct clients_cache web_clients_cache;
+
+extern void web_client_release(struct web_client *w);
+extern struct web_client *web_client_get_from_cache_or_allocate();
+extern void web_client_cache_destroy(void);
+extern void web_client_cache_verify(int force);
+
+#include "web_server.h"
+
+#endif //NETDATA_WEB_CLIENT_CACHE_H
diff --git a/web/server/web_server.c b/web/server/web_server.c
new file mode 100644
index 0000000..4da08d4
--- /dev/null
+++ b/web/server/web_server.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_server.h"
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+// --------------------------------------------------------------------------------------
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode) {
+ if(!strcmp(mode, "none"))
+ return WEB_SERVER_MODE_NONE;
+ else
+ return WEB_SERVER_MODE_STATIC_THREADED;
+
+}
+
+const char *web_server_mode_name(WEB_SERVER_MODE id) {
+ switch(id) {
+ case WEB_SERVER_MODE_NONE:
+ return "none";
+ default:
+ case WEB_SERVER_MODE_STATIC_THREADED:
+ return "static-threaded";
+ }
+}
+
+// --------------------------------------------------------------------------------------
+// API sockets
+
+LISTEN_SOCKETS api_sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
+
+void debug_sockets() {
+ BUFFER *wb = buffer_create(256 * sizeof(char));
+ int i;
+
+ for(i = 0 ; i < (int)api_sockets.opened ; i++) {
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NOCHECK)?"NONE ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_DASHBOARD)?"dashboard ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_REGISTRY)?"registry ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_BADGE)?"badges ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_MGMT)?"management ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_STREAMING)?"streaming ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NETDATACONF)?"netdata.conf ":"");
+ debug(D_WEB_CLIENT, "Socket fd %d name '%s' acl_flags: %s",
+ i,
+ api_sockets.fds_names[i],
+ buffer_tostring(wb));
+ buffer_reset(wb);
+ }
+ buffer_free(wb);
+}
+
+void api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
+
+ if(!socks)
+ fatal("LISTENER: Cannot listen on any API socket. Exiting...");
+
+ if(unlikely(debug_flags & D_WEB_CLIENT))
+ debug_sockets();
+
+ return;
+}
+
+
+// --------------------------------------------------------------------------------------
+// access lists
+
+SIMPLE_PATTERN *web_allow_connections_from = NULL;
+int web_allow_connections_dns;
+
+// WEB_CLIENT_ACL
+SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
+int web_allow_dashboard_dns;
+SIMPLE_PATTERN *web_allow_registry_from = NULL;
+int web_allow_registry_dns;
+SIMPLE_PATTERN *web_allow_badges_from = NULL;
+int web_allow_badges_dns;
+SIMPLE_PATTERN *web_allow_mgmt_from = NULL;
+int web_allow_mgmt_dns;
+SIMPLE_PATTERN *web_allow_streaming_from = NULL;
+int web_allow_streaming_dns;
+SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
+int web_allow_netdataconf_dns;
+
+void web_client_update_acl_matches(struct web_client *w) {
+ w->acl = WEB_CLIENT_ACL_NONE;
+
+ if (!web_allow_dashboard_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_dashboard_from, "dashboard", web_allow_dashboard_dns))
+ w->acl |= WEB_CLIENT_ACL_DASHBOARD;
+
+ if (!web_allow_registry_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_registry_from, "registry", web_allow_registry_dns))
+ w->acl |= WEB_CLIENT_ACL_REGISTRY;
+
+ if (!web_allow_badges_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_badges_from, "badges", web_allow_badges_dns))
+ w->acl |= WEB_CLIENT_ACL_BADGE;
+
+ if (!web_allow_mgmt_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_mgmt_from, "management", web_allow_mgmt_dns))
+ w->acl |= WEB_CLIENT_ACL_MGMT;
+
+ if (!web_allow_streaming_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_streaming_from, "streaming", web_allow_streaming_dns))
+ w->acl |= WEB_CLIENT_ACL_STREAMING;
+
+ if (!web_allow_netdataconf_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_netdataconf_from, "netdata.conf", web_allow_netdataconf_dns))
+ w->acl |= WEB_CLIENT_ACL_NETDATACONF;
+
+ w->acl &= w->port_acl;
+}
+
+
+// --------------------------------------------------------------------------------------
+
+void web_server_log_connection(struct web_client *w, const char *msg) {
+ log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg);
+}
+
+// --------------------------------------------------------------------------------------
+
+void web_client_initialize_connection(struct web_client *w) {
+ int flag = 1;
+
+ if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
+
+ flag = 1;
+ if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
+
+ web_client_update_acl_matches(w);
+
+ w->origin[0] = '*'; w->origin[1] = '\0';
+ w->cookie1[0] = '\0'; w->cookie2[0] = '\0';
+ freez(w->user_agent); w->user_agent = NULL;
+
+ web_client_enable_wait_receive(w);
+
+ web_server_log_connection(w, "CONNECTED");
+
+ web_client_cache_verify(0);
+}
diff --git a/web/server/web_server.h b/web/server/web_server.h
new file mode 100644
index 0000000..7bfc81c
--- /dev/null
+++ b/web/server/web_server.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_H
+#define NETDATA_WEB_SERVER_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
+
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
+#endif
+
+typedef enum web_server_mode {
+ WEB_SERVER_MODE_STATIC_THREADED,
+ WEB_SERVER_MODE_NONE
+} WEB_SERVER_MODE;
+
+extern SIMPLE_PATTERN *web_allow_connections_from;
+extern int web_allow_connections_dns;
+extern SIMPLE_PATTERN *web_allow_dashboard_from;
+extern int web_allow_dashboard_dns;
+extern SIMPLE_PATTERN *web_allow_registry_from;
+extern int web_allow_registry_dns;
+extern SIMPLE_PATTERN *web_allow_badges_from;
+extern int web_allow_badges_dns;
+extern SIMPLE_PATTERN *web_allow_streaming_from;
+extern int web_allow_streaming_dns;
+extern SIMPLE_PATTERN *web_allow_netdataconf_from;
+extern int web_allow_netdataconf_dns;
+extern SIMPLE_PATTERN *web_allow_mgmt_from;
+extern int web_allow_mgmt_dns;
+
+extern WEB_SERVER_MODE web_server_mode;
+
+extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
+extern const char *web_server_mode_name(WEB_SERVER_MODE id);
+
+extern void api_listen_sockets_setup(void);
+
+#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
+#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
+extern int web_client_timeout;
+extern int web_client_first_request_timeout;
+extern long web_client_streaming_rate_t;
+
+#ifdef WEB_SERVER_INTERNALS
+extern LISTEN_SOCKETS api_sockets;
+extern void web_client_update_acl_matches(struct web_client *w);
+extern void web_server_log_connection(struct web_client *w, const char *msg);
+extern void web_client_initialize_connection(struct web_client *w);
+extern struct web_client *web_client_create_on_listenfd(int listener);
+
+#include "web_client_cache.h"
+#endif // WEB_SERVER_INTERNALS
+
+#include "static/static-threaded.h"
+
+#include "daemon/common.h"
+
+#endif /* NETDATA_WEB_SERVER_H */